summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2022-02-05 08:58:25 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2022-02-05 08:58:25 +0300
commit7e6a6b400db8048bd1c06e497e338388413cf5bc (patch)
tree794f9fcdc7a1bfb9a2812e90fc76809d810203b2 /drivers
parent6e37ec8825a113bc2dd1b280be10e5ac6eb4f6b1 (diff)
parent1dd498e5e26ad71e3e9130daf72cfb6a693fee03 (diff)
downloadlinux-7e6a6b400db8048bd1c06e497e338388413cf5bc.tar.xz
Merge tag 'kvmarm-fixes-5.17-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 fixes for 5.17, take #2 - A couple of fixes when handling an exception while a SError has been delivered - Workaround for Cortex-A510's single-step[ erratum
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accessibility/speakup/speakup_acntpc.c2
-rw-r--r--drivers/accessibility/speakup/speakup_dtlk.c2
-rw-r--r--drivers/accessibility/speakup/speakup_keypc.c2
-rw-r--r--drivers/accessibility/speakup/spk_ttyio.c4
-rw-r--r--drivers/acpi/Kconfig42
-rw-r--r--drivers/acpi/Makefile4
-rw-r--r--drivers/acpi/ac.c43
-rw-r--r--drivers/acpi/acpi_apd.c13
-rw-r--r--drivers/acpi/acpi_pcc.c120
-rw-r--r--drivers/acpi/acpi_video.c5
-rw-r--r--drivers/acpi/acpica/acevents.h5
-rw-r--r--drivers/acpi/acpica/acobject.h1
-rw-r--r--drivers/acpi/acpica/actables.h8
-rw-r--r--drivers/acpi/acpica/dsopcode.c1
-rw-r--r--drivers/acpi/acpica/evhandler.c2
-rw-r--r--drivers/acpi/acpica/evregion.c10
-rw-r--r--drivers/acpi/acpica/evrgnini.c52
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/excreate.c1
-rw-r--r--drivers/acpi/acpica/exfield.c7
-rw-r--r--drivers/acpi/acpica/exoparg1.c3
-rw-r--r--drivers/acpi/acpica/exregion.c15
-rw-r--r--drivers/acpi/acpica/hwesleep.c4
-rw-r--r--drivers/acpi/acpica/hwsleep.c4
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c2
-rw-r--r--drivers/acpi/acpica/tbdata.c93
-rw-r--r--drivers/acpi/acpica/tbfadt.c6
-rw-r--r--drivers/acpi/acpica/tbinstal.c15
-rw-r--r--drivers/acpi/acpica/tbprint.c3
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/tbxfload.c52
-rw-r--r--drivers/acpi/acpica/utdelete.c1
-rw-r--r--drivers/acpi/apei/einj.c3
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/battery.c46
-rw-r--r--drivers/acpi/bus.c6
-rw-r--r--drivers/acpi/cppc_acpi.c170
-rw-r--r--drivers/acpi/device_pm.c31
-rw-r--r--drivers/acpi/device_sysfs.c3
-rw-r--r--drivers/acpi/dock.c3
-rw-r--r--drivers/acpi/dptf/dptf_pch_fivr.c3
-rw-r--r--drivers/acpi/dptf/dptf_power.c2
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c6
-rw-r--r--drivers/acpi/ec.c317
-rw-r--r--drivers/acpi/ec_sys.c2
-rw-r--r--drivers/acpi/fan.h1
-rw-r--r--drivers/acpi/internal.h14
-rw-r--r--drivers/acpi/nfit/core.c4
-rw-r--r--drivers/acpi/numa/srat.c64
-rw-r--r--drivers/acpi/pci_link.c12
-rw-r--r--drivers/acpi/pci_root.c12
-rw-r--r--drivers/acpi/pfr_telemetry.c435
-rw-r--r--drivers/acpi/pfr_update.c575
-rw-r--r--drivers/acpi/pmic/intel_pmic.c14
-rw-r--r--drivers/acpi/pmic/intel_pmic.h8
-rw-r--r--drivers/acpi/pmic/intel_pmic_bxtwc.c3
-rw-r--r--drivers/acpi/pmic/intel_pmic_bytcrc.c3
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtcrc.c3
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtdc_ti.c3
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtwc.c3
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c24
-rw-r--r--drivers/acpi/power.c19
-rw-r--r--drivers/acpi/proc.c2
-rw-r--r--drivers/acpi/processor_driver.c10
-rw-r--r--drivers/acpi/processor_idle.c11
-rw-r--r--drivers/acpi/processor_thermal.c13
-rw-r--r--drivers/acpi/property.c11
-rw-r--r--drivers/acpi/resource.c4
-rw-r--r--drivers/acpi/scan.c186
-rw-r--r--drivers/acpi/sleep.c31
-rw-r--r--drivers/acpi/spcr.c9
-rw-r--r--drivers/acpi/sysfs.c3
-rw-r--r--drivers/acpi/tables.c93
-rw-r--r--drivers/acpi/thermal.c11
-rw-r--r--drivers/acpi/video_detect.c6
-rw-r--r--drivers/acpi/x86/s2idle.c4
-rw-r--r--drivers/acpi/x86/utils.c305
-rw-r--r--drivers/amba/bus.c71
-rw-r--r--drivers/android/binder.c437
-rw-r--r--drivers/ata/Kconfig44
-rw-r--r--drivers/ata/acard-ahci.c4
-rw-r--r--drivers/ata/ahci.c24
-rw-r--r--drivers/ata/ahci_brcm.c4
-rw-r--r--drivers/ata/ahci_ceva.c5
-rw-r--r--drivers/ata/ahci_qoriq.c4
-rw-r--r--drivers/ata/ahci_xgene.c12
-rw-r--r--drivers/ata/ata_piix.c11
-rw-r--r--drivers/ata/libahci.c33
-rw-r--r--drivers/ata/libahci_platform.c14
-rw-r--r--drivers/ata/libata-acpi.c69
-rw-r--r--drivers/ata/libata-core.c231
-rw-r--r--drivers/ata/libata-eh.c72
-rw-r--r--drivers/ata/libata-pmp.c8
-rw-r--r--drivers/ata/libata-sata.c11
-rw-r--r--drivers/ata/libata-scsi.c170
-rw-r--r--drivers/ata/libata-sff.c88
-rw-r--r--drivers/ata/libata-trace.c47
-rw-r--r--drivers/ata/libata-transport.c48
-rw-r--r--drivers/ata/libata.h5
-rw-r--r--drivers/ata/pata_ali.c4
-rw-r--r--drivers/ata/pata_arasan_cf.c3
-rw-r--r--drivers/ata/pata_atp867x.c105
-rw-r--r--drivers/ata/pata_cmd640.c2
-rw-r--r--drivers/ata/pata_cmd64x.c4
-rw-r--r--drivers/ata/pata_cs5520.c4
-rw-r--r--drivers/ata/pata_cs5536.c4
-rw-r--r--drivers/ata/pata_cypress.c2
-rw-r--r--drivers/ata/pata_ep93xx.c1
-rw-r--r--drivers/ata/pata_hpt366.c5
-rw-r--r--drivers/ata/pata_hpt37x.c20
-rw-r--r--drivers/ata/pata_hpt3x2n.c12
-rw-r--r--drivers/ata/pata_it821x.c66
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c6
-rw-r--r--drivers/ata/pata_marvell.c9
-rw-r--r--drivers/ata/pata_netcell.c5
-rw-r--r--drivers/ata/pata_octeon_cf.c54
-rw-r--r--drivers/ata/pata_of_platform.c15
-rw-r--r--drivers/ata/pata_pdc2027x.c71
-rw-r--r--drivers/ata/pata_pdc202xx_old.c2
-rw-r--r--drivers/ata/pata_platform.c2
-rw-r--r--drivers/ata/pata_rz1000.c4
-rw-r--r--drivers/ata/pata_serverworks.c4
-rw-r--r--drivers/ata/pata_sil680.c9
-rw-r--r--drivers/ata/pata_via.c12
-rw-r--r--drivers/ata/pdc_adma.c33
-rw-r--r--drivers/ata/sata_dwc_460ex.c165
-rw-r--r--drivers/ata/sata_fsl.c212
-rw-r--r--drivers/ata/sata_gemini.c4
-rw-r--r--drivers/ata/sata_inic162x.c4
-rw-r--r--drivers/ata/sata_mv.c132
-rw-r--r--drivers/ata/sata_nv.c54
-rw-r--r--drivers/ata/sata_promise.c31
-rw-r--r--drivers/ata/sata_qstor.c15
-rw-r--r--drivers/ata/sata_rcar.c26
-rw-r--r--drivers/ata/sata_sil.c1
-rw-r--r--drivers/ata/sata_sil24.c5
-rw-r--r--drivers/ata/sata_sx4.c148
-rw-r--r--drivers/atm/iphase.c4
-rw-r--r--drivers/base/Kconfig11
-rw-r--r--drivers/base/arch_numa.c68
-rw-r--r--drivers/base/arch_topology.c42
-rw-r--r--drivers/base/auxiliary.c152
-rw-r--r--drivers/base/bus.c4
-rw-r--r--drivers/base/core.c38
-rw-r--r--drivers/base/dd.c7
-rw-r--r--drivers/base/devtmpfs.c17
-rw-r--r--drivers/base/firmware_loader/builtin/Makefile4
-rw-r--r--drivers/base/firmware_loader/fallback.c7
-rw-r--r--drivers/base/firmware_loader/fallback.h11
-rw-r--r--drivers/base/firmware_loader/fallback_table.c25
-rw-r--r--drivers/base/node.c3
-rw-r--r--drivers/base/platform-msi.c234
-rw-r--r--drivers/base/platform.c9
-rw-r--r--drivers/base/power/runtime.c98
-rw-r--r--drivers/base/power/trace.c6
-rw-r--r--drivers/base/property.c166
-rw-r--r--drivers/base/regmap/regmap-debugfs.c2
-rw-r--r--drivers/base/regmap/regmap.c2
-rw-r--r--drivers/base/swnode.c2
-rw-r--r--drivers/base/test/test_async_driver_probe.c14
-rw-r--r--drivers/base/topology.c28
-rw-r--r--drivers/block/Kconfig11
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/amiflop.c3
-rw-r--r--drivers/block/aoe/aoecmd.c2
-rw-r--r--drivers/block/ataflop.c7
-rw-r--r--drivers/block/brd.c74
-rw-r--r--drivers/block/drbd/drbd_main.c4
-rw-r--r--drivers/block/drbd/drbd_protocol.h6
-rw-r--r--drivers/block/drbd/drbd_receiver.c3
-rw-r--r--drivers/block/floppy.c13
-rw-r--r--drivers/block/loop.c119
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c88
-rw-r--r--drivers/block/n64cart.c2
-rw-r--r--drivers/block/null_blk/main.c13
-rw-r--r--drivers/block/null_blk/trace.h2
-rw-r--r--drivers/block/paride/bpck.c1
-rw-r--r--drivers/block/paride/pcd.c5
-rw-r--r--drivers/block/paride/pd.c6
-rw-r--r--drivers/block/paride/pf.c5
-rw-r--r--drivers/block/pktcdvd.c310
-rw-r--r--drivers/block/ps3vram.c1
-rw-r--r--drivers/block/rbd.c11
-rw-r--r--drivers/block/rnbd/rnbd-clt-sysfs.c3
-rw-r--r--drivers/block/rnbd/rnbd-clt.c10
-rw-r--r--drivers/block/rnbd/rnbd-clt.h2
-rw-r--r--drivers/block/rnbd/rnbd-srv.c16
-rw-r--r--drivers/block/rnbd/rnbd-srv.h2
-rw-r--r--drivers/block/rsxx/Makefile3
-rw-r--r--drivers/block/rsxx/config.c197
-rw-r--r--drivers/block/rsxx/core.c1126
-rw-r--r--drivers/block/rsxx/cregs.c789
-rw-r--r--drivers/block/rsxx/dev.c306
-rw-r--r--drivers/block/rsxx/dma.c1085
-rw-r--r--drivers/block/rsxx/rsxx.h33
-rw-r--r--drivers/block/rsxx/rsxx_cfg.h58
-rw-r--r--drivers/block/rsxx/rsxx_priv.h418
-rw-r--r--drivers/block/sunvdc.c19
-rw-r--r--drivers/block/swim.c1
-rw-r--r--drivers/block/swim3.c2
-rw-r--r--drivers/block/sx8.c4
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/block/xen-blkback/xenbus.c2
-rw-r--r--drivers/block/xen-blkfront.c26
-rw-r--r--drivers/block/z2ram.c1
-rw-r--r--drivers/block/zram/zram_drv.c12
-rw-r--r--drivers/bluetooth/Kconfig6
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/bfusb.c3
-rw-r--r--drivers/bluetooth/btbcm.c51
-rw-r--r--drivers/bluetooth/btintel.c68
-rw-r--r--drivers/bluetooth/btintel.h2
-rw-r--r--drivers/bluetooth/btmrvl_main.c2
-rw-r--r--drivers/bluetooth/btmtk.c290
-rw-r--r--drivers/bluetooth/btmtk.h111
-rw-r--r--drivers/bluetooth/btmtksdio.c535
-rw-r--r--drivers/bluetooth/btqca.c48
-rw-r--r--drivers/bluetooth/btqca.h2
-rw-r--r--drivers/bluetooth/btsdio.c2
-rw-r--r--drivers/bluetooth/btusb.c588
-rw-r--r--drivers/bluetooth/hci_bcm.c8
-rw-r--r--drivers/bluetooth/hci_h4.c4
-rw-r--r--drivers/bluetooth/hci_ldisc.c5
-rw-r--r--drivers/bluetooth/hci_qca.c9
-rw-r--r--drivers/bluetooth/hci_vhci.c122
-rw-r--r--drivers/bluetooth/virtio_bt.c5
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c8
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-allocator.c9
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-msi.c79
-rw-r--r--drivers/bus/imx-weim.c18
-rw-r--r--drivers/bus/mhi/core/boot.c2
-rw-r--r--drivers/bus/mhi/core/init.c4
-rw-r--r--drivers/bus/mhi/core/internal.h9
-rw-r--r--drivers/bus/mhi/core/main.c24
-rw-r--r--drivers/bus/mhi/core/pm.c39
-rw-r--r--drivers/bus/mhi/pci_generic.c56
-rw-r--r--drivers/bus/mvebu-mbus.c5
-rw-r--r--drivers/bus/tegra-gmi.c50
-rw-r--r--drivers/cdrom/cdrom.c23
-rw-r--r--drivers/cdrom/gdrom.c1
-rw-r--r--drivers/char/agp/amd64-agp.c24
-rw-r--r--drivers/char/agp/intel-gtt.c1
-rw-r--r--drivers/char/agp/sis-agp.c25
-rw-r--r--drivers/char/agp/via-agp.c25
-rw-r--r--drivers/char/applicom.c4
-rw-r--r--drivers/char/hpet.c22
-rw-r--r--drivers/char/hw_random/Kconfig26
-rw-r--r--drivers/char/hw_random/Makefile2
-rw-r--r--drivers/char/hw_random/cavium-rng-vf.c194
-rw-r--r--drivers/char/hw_random/cavium-rng.c11
-rw-r--r--drivers/char/hw_random/cn10k-rng.c181
-rw-r--r--drivers/char/hw_random/tx4939-rng.c157
-rw-r--r--drivers/char/hw_random/virtio-rng.c2
-rw-r--r--drivers/char/mwave/3780i.h2
-rw-r--r--drivers/char/random.c869
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c122
-rw-r--r--drivers/char/tpm/tpm-chip.c37
-rw-r--r--drivers/char/tpm/tpm-sysfs.c3
-rw-r--r--drivers/char/tpm/tpm2-cmd.c6
-rw-r--r--drivers/char/tpm/tpm_tis_core.c14
-rw-r--r--drivers/char/tpm/tpm_tis_i2c_cr50.c16
-rw-r--r--drivers/char/tpm/tpm_tis_spi_cr50.c20
-rw-r--r--drivers/char/virtio_console.c4
-rw-r--r--drivers/clk/Kconfig27
-rw-r--r--drivers/clk/Makefile6
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c13
-rw-r--r--drivers/clk/clk-bm1880.c20
-rw-r--r--drivers/clk/clk-gate.c35
-rw-r--r--drivers/clk/clk-gemini.c2
-rw-r--r--drivers/clk/clk-lan966x.c293
-rw-r--r--drivers/clk/clk-si5341.c2
-rw-r--r--drivers/clk/clk-stm32f4.c4
-rw-r--r--drivers/clk/clk-stm32mp1.c2
-rw-r--r--drivers/clk/clk-tps68470.c261
-rw-r--r--drivers/clk/clk.c80
-rw-r--r--drivers/clk/imx/clk-imx8mn.c6
-rw-r--r--drivers/clk/imx/clk-imx8mp.c2
-rw-r--r--drivers/clk/imx/clk-imx8ulp.c1
-rw-r--r--drivers/clk/imx/clk-pllv1.c17
-rw-r--r--drivers/clk/imx/clk-pllv3.c6
-rw-r--r--drivers/clk/ingenic/jz4760-cgu.c10
-rw-r--r--drivers/clk/ingenic/jz4770-cgu.c5
-rw-r--r--drivers/clk/mediatek/Kconfig17
-rw-r--r--drivers/clk/mediatek/Makefile4
-rw-r--r--drivers/clk/mediatek/clk-gate.c24
-rw-r--r--drivers/clk/mediatek/clk-mt7986-apmixed.c100
-rw-r--r--drivers/clk/mediatek/clk-mt7986-eth.c132
-rw-r--r--drivers/clk/mediatek/clk-mt7986-infracfg.c224
-rw-r--r--drivers/clk/mediatek/clk-mt7986-topckgen.c342
-rw-r--r--drivers/clk/meson/gxbb.c44
-rw-r--r--drivers/clk/qcom/Kconfig24
-rw-r--r--drivers/clk/qcom/Makefile3
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c166
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h3
-rw-r--r--drivers/clk/qcom/clk-rpmh.c52
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c31
-rw-r--r--drivers/clk/qcom/gcc-msm8976.c4155
-rw-r--r--drivers/clk/qcom/gcc-msm8994.c1
-rw-r--r--drivers/clk/qcom/gcc-sc7280.c2
-rw-r--r--drivers/clk/qcom/gcc-sdx65.c1611
-rw-r--r--drivers/clk/qcom/gcc-sm6350.c1
-rw-r--r--drivers/clk/qcom/gcc-sm8350.c1
-rw-r--r--drivers/clk/qcom/gcc-sm8450.c3304
-rw-r--r--drivers/clk/qcom/lpasscc-sc7280.c1
-rw-r--r--drivers/clk/qcom/lpasscc-sdm845.c1
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c1
-rw-r--r--drivers/clk/qcom/q6sstop-qcs404.c1
-rw-r--r--drivers/clk/qcom/turingcc-qcs404.c1
-rw-r--r--drivers/clk/renesas/Kconfig13
-rw-r--r--drivers/clk/renesas/Makefile2
-rw-r--r--drivers/clk/renesas/r8a774a1-cpg-mssr.c12
-rw-r--r--drivers/clk/renesas/r8a774b1-cpg-mssr.c12
-rw-r--r--drivers/clk/renesas/r8a774c0-cpg-mssr.c9
-rw-r--r--drivers/clk/renesas/r8a774e1-cpg-mssr.c12
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c12
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c12
-rw-r--r--drivers/clk/renesas/r8a77965-cpg-mssr.c12
-rw-r--r--drivers/clk/renesas/r8a77980-cpg-mssr.c3
-rw-r--r--drivers/clk/renesas/r8a77990-cpg-mssr.c9
-rw-r--r--drivers/clk/renesas/r8a77995-cpg-mssr.c3
-rw-r--r--drivers/clk/renesas/r8a779a0-cpg-mssr.c343
-rw-r--r--drivers/clk/renesas/r8a779f0-cpg-mssr.c183
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c81
-rw-r--r--drivers/clk/renesas/rcar-cpg-lib.c211
-rw-r--r--drivers/clk/renesas/rcar-cpg-lib.h7
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c24
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h4
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.c305
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.h76
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c60
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h3
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c18
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.h9
-rw-r--r--drivers/clk/samsung/Makefile2
-rw-r--r--drivers/clk/samsung/clk-cpu.c2
-rw-r--r--drivers/clk/samsung/clk-cpu.h7
-rw-r--r--drivers/clk/samsung/clk-exynos-arm64.c94
-rw-r--r--drivers/clk/samsung/clk-exynos-arm64.h20
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c54
-rw-r--r--drivers/clk/samsung/clk-exynos4.c41
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c21
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c29
-rw-r--r--drivers/clk/samsung/clk-exynos7885.c597
-rw-r--r--drivers/clk/samsung/clk-exynos850.c366
-rw-r--r--drivers/clk/samsung/clk-pll.c1
-rw-r--r--drivers/clk/samsung/clk-pll.h1
-rw-r--r--drivers/clk/samsung/clk-s3c2410.c6
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c8
-rw-r--r--drivers/clk/samsung/clk-s5pv210.c8
-rw-r--r--drivers/clk/samsung/clk.c14
-rw-r--r--drivers/clk/samsung/clk.h36
-rw-r--r--drivers/clk/socfpga/clk-agilex.c4
-rw-r--r--drivers/clk/socfpga/clk-gate.c4
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c2
-rw-r--r--drivers/clk/socfpga/clk-s10.c4
-rw-r--r--drivers/clk/st/clkgen-fsyn.c13
-rw-r--r--drivers/clk/st/clkgen-mux.c11
-rw-r--r--drivers/clk/starfive/Kconfig9
-rw-r--r--drivers/clk/starfive/Makefile3
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7100.c689
-rw-r--r--drivers/clk/sunxi-ng/Kconfig49
-rw-r--r--drivers/clk/sunxi-ng/Makefile101
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c140
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1-r.h17
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1.c1390
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1.h15
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun4i-a10.c58
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a100.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c56
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.c33
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c40
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c35
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a33.c40
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a83t.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c9
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c62
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r.c65
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c57
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c40
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.h78
-rw-r--r--drivers/clk/sunxi-ng/ccu_frac.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu_gate.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu_gate.h32
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.h49
-rw-r--r--drivers/clk/sunxi-ng/ccu_mult.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_mux.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu_mux.h33
-rw-r--r--drivers/clk/sunxi-ng/ccu_nk.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_phase.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_reset.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_sdm.c6
-rw-r--r--drivers/clk/tegra/Makefile1
-rw-r--r--drivers/clk/tegra/clk-device.c199
-rw-r--r--drivers/clk/tegra/clk-pll.c2
-rw-r--r--drivers/clk/tegra/clk-super.c2
-rw-r--r--drivers/clk/tegra/clk-tegra114.c2
-rw-r--r--drivers/clk/tegra/clk-tegra20.c77
-rw-r--r--drivers/clk/tegra/clk-tegra30.c116
-rw-r--r--drivers/clk/tegra/clk.c75
-rw-r--r--drivers/clk/tegra/clk.h2
-rw-r--r--drivers/clk/ti/adpll.c2
-rw-r--r--drivers/clk/visconti/Kconfig9
-rw-r--r--drivers/clk/visconti/Makefile5
-rw-r--r--drivers/clk/visconti/clkc-tmpv770x.c291
-rw-r--r--drivers/clk/visconti/clkc.c206
-rw-r--r--drivers/clk/visconti/clkc.h76
-rw-r--r--drivers/clk/visconti/pll-tmpv770x.c85
-rw-r--r--drivers/clk/visconti/pll.c339
-rw-r--r--drivers/clk/visconti/pll.h62
-rw-r--r--drivers/clk/visconti/reset.c107
-rw-r--r--drivers/clk/visconti/reset.h36
-rw-r--r--drivers/clk/x86/clk-fch.c48
-rw-r--r--drivers/clk/zynq/pll.c12
-rw-r--r--drivers/clocksource/Kconfig12
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/exynos_mct.c52
-rw-r--r--drivers/clocksource/renesas-ostm.c39
-rw-r--r--drivers/clocksource/timer-imx-sysctr.c6
-rw-r--r--drivers/clocksource/timer-msc313e.c253
-rw-r--r--drivers/clocksource/timer-pistachio.c3
-rw-r--r--drivers/comedi/comedi.h1528
-rw-r--r--drivers/comedi/comedi_buf.c3
-rw-r--r--drivers/comedi/comedi_fops.c2
-rw-r--r--drivers/comedi/comedi_pci.c3
-rw-r--r--drivers/comedi/comedi_pci.h57
-rw-r--r--drivers/comedi/comedi_pcmcia.c3
-rw-r--r--drivers/comedi/comedi_pcmcia.h49
-rw-r--r--drivers/comedi/comedi_usb.c3
-rw-r--r--drivers/comedi/comedi_usb.h42
-rw-r--r--drivers/comedi/comedidev.h1054
-rw-r--r--drivers/comedi/comedilib.h26
-rw-r--r--drivers/comedi/drivers.c3
-rw-r--r--drivers/comedi/drivers/8255.c5
-rw-r--r--drivers/comedi/drivers/8255.h42
-rw-r--r--drivers/comedi/drivers/8255_pci.c6
-rw-r--r--drivers/comedi/drivers/addi_apci_1032.c2
-rw-r--r--drivers/comedi/drivers/addi_apci_1500.c2
-rw-r--r--drivers/comedi/drivers/addi_apci_1516.c2
-rw-r--r--drivers/comedi/drivers/addi_apci_1564.c2
-rw-r--r--drivers/comedi/drivers/addi_apci_16xx.c3
-rw-r--r--drivers/comedi/drivers/addi_apci_2032.c2
-rw-r--r--drivers/comedi/drivers/addi_apci_2200.c2
-rw-r--r--drivers/comedi/drivers/addi_apci_3120.c2
-rw-r--r--drivers/comedi/drivers/addi_apci_3501.c2
-rw-r--r--drivers/comedi/drivers/addi_apci_3xxx.c3
-rw-r--r--drivers/comedi/drivers/addi_watchdog.c2
-rw-r--r--drivers/comedi/drivers/adl_pci6208.c3
-rw-r--r--drivers/comedi/drivers/adl_pci7x3x.c3
-rw-r--r--drivers/comedi/drivers/adl_pci8164.c3
-rw-r--r--drivers/comedi/drivers/adl_pci9111.c5
-rw-r--r--drivers/comedi/drivers/adl_pci9118.c5
-rw-r--r--drivers/comedi/drivers/adq12b.c3
-rw-r--r--drivers/comedi/drivers/adv_pci1710.c5
-rw-r--r--drivers/comedi/drivers/adv_pci1720.c3
-rw-r--r--drivers/comedi/drivers/adv_pci1723.c3
-rw-r--r--drivers/comedi/drivers/adv_pci1724.c3
-rw-r--r--drivers/comedi/drivers/adv_pci1760.c3
-rw-r--r--drivers/comedi/drivers/adv_pci_dio.c8
-rw-r--r--drivers/comedi/drivers/aio_aio12_8.c7
-rw-r--r--drivers/comedi/drivers/aio_iiro_16.c3
-rw-r--r--drivers/comedi/drivers/amplc_dio200.c2
-rw-r--r--drivers/comedi/drivers/amplc_dio200_common.c7
-rw-r--r--drivers/comedi/drivers/amplc_dio200_pci.c3
-rw-r--r--drivers/comedi/drivers/amplc_pc236.c3
-rw-r--r--drivers/comedi/drivers/amplc_pc236_common.c5
-rw-r--r--drivers/comedi/drivers/amplc_pc263.c2
-rw-r--r--drivers/comedi/drivers/amplc_pci224.c6
-rw-r--r--drivers/comedi/drivers/amplc_pci230.c8
-rw-r--r--drivers/comedi/drivers/amplc_pci236.c3
-rw-r--r--drivers/comedi/drivers/amplc_pci263.c3
-rw-r--r--drivers/comedi/drivers/c6xdigio.c3
-rw-r--r--drivers/comedi/drivers/cb_das16_cs.c6
-rw-r--r--drivers/comedi/drivers/cb_pcidas.c7
-rw-r--r--drivers/comedi/drivers/cb_pcidas64.c5
-rw-r--r--drivers/comedi/drivers/cb_pcidda.c6
-rw-r--r--drivers/comedi/drivers/cb_pcimdas.c7
-rw-r--r--drivers/comedi/drivers/cb_pcimdda.c6
-rw-r--r--drivers/comedi/drivers/comedi_8254.c6
-rw-r--r--drivers/comedi/drivers/comedi_8254.h134
-rw-r--r--drivers/comedi/drivers/comedi_8255.c5
-rw-r--r--drivers/comedi/drivers/comedi_bond.c6
-rw-r--r--drivers/comedi/drivers/comedi_isadma.c6
-rw-r--r--drivers/comedi/drivers/comedi_isadma.h114
-rw-r--r--drivers/comedi/drivers/comedi_parport.c3
-rw-r--r--drivers/comedi/drivers/comedi_test.c4
-rw-r--r--drivers/comedi/drivers/contec_pci_dio.c3
-rw-r--r--drivers/comedi/drivers/dac02.c3
-rw-r--r--drivers/comedi/drivers/daqboard2000.c5
-rw-r--r--drivers/comedi/drivers/das08.c7
-rw-r--r--drivers/comedi/drivers/das08_cs.c3
-rw-r--r--drivers/comedi/drivers/das08_isa.c2
-rw-r--r--drivers/comedi/drivers/das08_pci.c3
-rw-r--r--drivers/comedi/drivers/das16.c10
-rw-r--r--drivers/comedi/drivers/das16m1.c7
-rw-r--r--drivers/comedi/drivers/das1800.c8
-rw-r--r--drivers/comedi/drivers/das6402.c6
-rw-r--r--drivers/comedi/drivers/das800.c6
-rw-r--r--drivers/comedi/drivers/dmm32at.c5
-rw-r--r--drivers/comedi/drivers/dt2801.c2
-rw-r--r--drivers/comedi/drivers/dt2811.c3
-rw-r--r--drivers/comedi/drivers/dt2814.c3
-rw-r--r--drivers/comedi/drivers/dt2815.c3
-rw-r--r--drivers/comedi/drivers/dt2817.c2
-rw-r--r--drivers/comedi/drivers/dt282x.c6
-rw-r--r--drivers/comedi/drivers/dt3000.c3
-rw-r--r--drivers/comedi/drivers/dt9812.c3
-rw-r--r--drivers/comedi/drivers/dyna_pci10xx.c3
-rw-r--r--drivers/comedi/drivers/fl512.c3
-rw-r--r--drivers/comedi/drivers/gsc_hpdi.c3
-rw-r--r--drivers/comedi/drivers/icp_multi.c3
-rw-r--r--drivers/comedi/drivers/ii_pci20kc.c2
-rw-r--r--drivers/comedi/drivers/jr3_pci.c3
-rw-r--r--drivers/comedi/drivers/ke_counter.c3
-rw-r--r--drivers/comedi/drivers/me4000.c5
-rw-r--r--drivers/comedi/drivers/me_daq.c3
-rw-r--r--drivers/comedi/drivers/mf6x4.c3
-rw-r--r--drivers/comedi/drivers/mite.c3
-rw-r--r--drivers/comedi/drivers/mpc624.c3
-rw-r--r--drivers/comedi/drivers/multiq3.c3
-rw-r--r--drivers/comedi/drivers/ni_6527.c3
-rw-r--r--drivers/comedi/drivers/ni_65xx.c3
-rw-r--r--drivers/comedi/drivers/ni_660x.c3
-rw-r--r--drivers/comedi/drivers/ni_670x.c3
-rw-r--r--drivers/comedi/drivers/ni_at_a2150.c8
-rw-r--r--drivers/comedi/drivers/ni_at_ao.c6
-rw-r--r--drivers/comedi/drivers/ni_atmio.c5
-rw-r--r--drivers/comedi/drivers/ni_atmio16d.c5
-rw-r--r--drivers/comedi/drivers/ni_daq_700.c3
-rw-r--r--drivers/comedi/drivers/ni_daq_dio24.c5
-rw-r--r--drivers/comedi/drivers/ni_labpc.c3
-rw-r--r--drivers/comedi/drivers/ni_labpc_common.c7
-rw-r--r--drivers/comedi/drivers/ni_labpc_cs.c3
-rw-r--r--drivers/comedi/drivers/ni_labpc_isadma.c5
-rw-r--r--drivers/comedi/drivers/ni_labpc_pci.c3
-rw-r--r--drivers/comedi/drivers/ni_mio_common.c2
-rw-r--r--drivers/comedi/drivers/ni_mio_cs.c4
-rw-r--r--drivers/comedi/drivers/ni_pcidio.c3
-rw-r--r--drivers/comedi/drivers/ni_pcimio.c4
-rw-r--r--drivers/comedi/drivers/ni_routes.c3
-rw-r--r--drivers/comedi/drivers/ni_routes.h2
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_route_values.h2
-rw-r--r--drivers/comedi/drivers/ni_routing/tools/.gitignore1
-rw-r--r--drivers/comedi/drivers/ni_routing/tools/Makefile29
-rw-r--r--drivers/comedi/drivers/ni_tio.h2
-rw-r--r--drivers/comedi/drivers/ni_usb6501.c3
-rw-r--r--drivers/comedi/drivers/pcl711.c6
-rw-r--r--drivers/comedi/drivers/pcl724.c5
-rw-r--r--drivers/comedi/drivers/pcl726.c3
-rw-r--r--drivers/comedi/drivers/pcl730.c2
-rw-r--r--drivers/comedi/drivers/pcl812.c8
-rw-r--r--drivers/comedi/drivers/pcl816.c8
-rw-r--r--drivers/comedi/drivers/pcl818.c8
-rw-r--r--drivers/comedi/drivers/pcm3724.c5
-rw-r--r--drivers/comedi/drivers/pcmad.c2
-rw-r--r--drivers/comedi/drivers/pcmda12.c2
-rw-r--r--drivers/comedi/drivers/pcmmio.c3
-rw-r--r--drivers/comedi/drivers/pcmuio.c3
-rw-r--r--drivers/comedi/drivers/quatech_daqp_cs.c3
-rw-r--r--drivers/comedi/drivers/rtd520.c5
-rw-r--r--drivers/comedi/drivers/rti800.c2
-rw-r--r--drivers/comedi/drivers/rti802.c2
-rw-r--r--drivers/comedi/drivers/s526.c2
-rw-r--r--drivers/comedi/drivers/s626.c3
-rw-r--r--drivers/comedi/drivers/ssv_dnp.c2
-rw-r--r--drivers/comedi/drivers/usbdux.c3
-rw-r--r--drivers/comedi/drivers/usbduxfast.c2
-rw-r--r--drivers/comedi/drivers/usbduxsigma.c3
-rw-r--r--drivers/comedi/drivers/vmk80xx.c3
-rw-r--r--drivers/comedi/kcomedilib/kcomedilib_main.c6
-rw-r--r--drivers/comedi/proc.c2
-rw-r--r--drivers/comedi/range.c2
-rw-r--r--drivers/connector/cn_proc.c2
-rw-r--r--drivers/counter/104-quad-8.c175
-rw-r--r--drivers/counter/counter-core.c183
-rw-r--r--drivers/counter/ftm-quaddec.c36
-rw-r--r--drivers/counter/intel-qep.c46
-rw-r--r--drivers/counter/interrupt-cnt.c38
-rw-r--r--drivers/counter/microchip-tcb-capture.c44
-rw-r--r--drivers/counter/stm32-lptimer-cnt.c51
-rw-r--r--drivers/counter/stm32-timer-cnt.c48
-rw-r--r--drivers/counter/ti-eqep.c52
-rw-r--r--drivers/cpufreq/Kconfig.x8617
-rw-r--r--drivers/cpufreq/Makefile5
-rw-r--r--drivers/cpufreq/amd-pstate-trace.c2
-rw-r--r--drivers/cpufreq/amd-pstate-trace.h77
-rw-r--r--drivers/cpufreq/amd-pstate.c645
-rw-r--r--drivers/cpufreq/cpufreq.c9
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c5
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c5
-rw-r--r--drivers/cpufreq/intel_pstate.c121
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c33
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c39
-rw-r--r--drivers/cpuidle/governors/menu.c2
-rw-r--r--drivers/cpuidle/sysfs.c8
-rw-r--r--drivers/crypto/Kconfig12
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c21
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h1
-rw-r--r--drivers/crypto/atmel-aes.c6
-rw-r--r--drivers/crypto/caam/caamalg.c6
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c2
-rw-r--r--drivers/crypto/caam/caamhash.c3
-rw-r--r--drivers/crypto/caam/caampkc.c3
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_main.c5
-rw-r--r--drivers/crypto/ccp/ccp-dev.c2
-rw-r--r--drivers/crypto/ccp/sev-dev.c259
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.c1
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c7
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c2
-rw-r--r--drivers/crypto/hisilicon/qm.c483
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c2
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c12
-rw-r--r--drivers/crypto/keembay/keembay-ocs-ecc.c1
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_main.c4
-rw-r--r--drivers/crypto/marvell/octeontx2/Makefile2
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c108
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.h20
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf.h3
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c18
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c315
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h7
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c5
-rw-r--r--drivers/crypto/omap-aes.c2
-rw-r--r--drivers/crypto/omap-des.c8
-rw-r--r--drivers/crypto/qat/Kconfig1
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c154
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h2
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_drv.c33
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c12
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c15
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h1
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c12
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c15
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h1
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_common/Makefile6
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h47
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_engine.c8
-rw-r--r--drivers/crypto/qat/qat_common/adf_admin.c47
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.c1
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_common.h13
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_strings.h3
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h42
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_hw_data.c105
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_hw_data.h22
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_pfvf.c381
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_pfvf.h29
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_hw_data.c69
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_hw_data.h17
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_pfvf.c148
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_pfvf.h17
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c11
-rw-r--r--drivers/crypto/qat/qat_common/adf_isr.c111
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.c416
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.h93
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_msg.h259
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.c52
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.h18
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c346
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.h13
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_utils.c65
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_utils.h31
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c167
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.h23
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.c368
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.h17
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c59
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf2pf_msg.c48
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf_isr.c106
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h4
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_hw.h13
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.c25
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c41
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c44
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h2
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c15
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h1
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.c6
-rw-r--r--drivers/crypto/qce/aead.c2
-rw-r--r--drivers/crypto/qce/sha.c2
-rw-r--r--drivers/crypto/qce/skcipher.c2
-rw-r--r--drivers/crypto/sa2ul.c19
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c4
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c988
-rw-r--r--drivers/crypto/stm32/stm32-hash.c6
-rw-r--r--drivers/crypto/ux500/cryp/cryp.h2
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c26
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c8
-rw-r--r--drivers/cxl/Kconfig1
-rw-r--r--drivers/cxl/acpi.c237
-rw-r--r--drivers/cxl/core/Makefile2
-rw-r--r--drivers/cxl/core/bus.c26
-rw-r--r--drivers/cxl/core/mbox.c186
-rw-r--r--drivers/cxl/core/memdev.c55
-rw-r--r--drivers/cxl/core/pmem.c20
-rw-r--r--drivers/cxl/core/regs.c8
-rw-r--r--drivers/cxl/cxl.h10
-rw-r--r--drivers/cxl/cxlmem.h37
-rw-r--r--drivers/cxl/pci.c120
-rw-r--r--drivers/cxl/pmem.c85
-rw-r--r--drivers/dax/Kconfig13
-rw-r--r--drivers/dax/Makefile3
-rw-r--r--drivers/dax/bus.c62
-rw-r--r--drivers/dax/bus.h14
-rw-r--r--drivers/dax/device.c132
-rw-r--r--drivers/dax/pmem.c (renamed from drivers/dax/pmem/core.c)36
-rw-r--r--drivers/dax/pmem/Makefile1
-rw-r--r--drivers/dax/pmem/compat.c72
-rw-r--r--drivers/dax/pmem/pmem.c30
-rw-r--r--drivers/dax/super.c272
-rw-r--r--drivers/devfreq/Kconfig9
-rw-r--r--drivers/devfreq/Makefile1
-rw-r--r--drivers/devfreq/devfreq.c4
-rw-r--r--drivers/devfreq/sun8i-a33-mbus.c511
-rw-r--r--drivers/dma-buf/Makefile3
-rw-r--r--drivers/dma-buf/dma-buf-sysfs-stats.c2
-rw-r--r--drivers/dma-buf/dma-buf.c32
-rw-r--r--drivers/dma-buf/dma-fence-array.c6
-rw-r--r--drivers/dma-buf/dma-fence.c17
-rw-r--r--drivers/dma-buf/dma-resv.c26
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c6
-rw-r--r--drivers/dma-buf/selftests.h1
-rw-r--r--drivers/dma-buf/st-dma-resv.c371
-rw-r--r--drivers/dma/at_xdmac.c194
-rw-r--r--drivers/dma/dma-jz4780.c118
-rw-r--r--drivers/dma/dmaengine.c7
-rw-r--r--drivers/dma/idxd/device.c222
-rw-r--r--drivers/dma/idxd/dma.c40
-rw-r--r--drivers/dma/idxd/idxd.h67
-rw-r--r--drivers/dma/idxd/init.c196
-rw-r--r--drivers/dma/idxd/irq.c239
-rw-r--r--drivers/dma/idxd/registers.h15
-rw-r--r--drivers/dma/idxd/submit.c69
-rw-r--r--drivers/dma/idxd/sysfs.c215
-rw-r--r--drivers/dma/ioat/sysfs.c3
-rw-r--r--drivers/dma/mmp_pdma.c6
-rw-r--r--drivers/dma/mv_xor_v2.c16
-rw-r--r--drivers/dma/pch_dma.c2
-rw-r--r--drivers/dma/ppc4xx/adma.c3
-rw-r--r--drivers/dma/pxa_dma.c7
-rw-r--r--drivers/dma/qcom/gpi.c4
-rw-r--r--drivers/dma/qcom/hidma.c44
-rw-r--r--drivers/dma/qcom/qcom_adm.c56
-rw-r--r--drivers/dma/sh/rcar-dmac.c17
-rw-r--r--drivers/dma/sh/shdma-base.c14
-rw-r--r--drivers/dma/sprd-dma.c3
-rw-r--r--drivers/dma/stm32-mdma.c78
-rw-r--r--drivers/dma/tegra20-apb-dma.c6
-rw-r--r--drivers/dma/ti/Makefile3
-rw-r--r--drivers/dma/ti/edma.c3
-rw-r--r--drivers/dma/ti/k3-psil-j721s2.c167
-rw-r--r--drivers/dma/ti/k3-psil-priv.h1
-rw-r--r--drivers/dma/ti/k3-psil.c1
-rw-r--r--drivers/dma/ti/k3-udma-private.c6
-rw-r--r--drivers/dma/ti/k3-udma.c15
-rw-r--r--drivers/dma/uniphier-xdmac.c5
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c133
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c17
-rw-r--r--drivers/edac/Kconfig2
-rw-r--r--drivers/edac/amd64_edac.c311
-rw-r--r--drivers/edac/amd64_edac.h8
-rw-r--r--drivers/edac/edac_mc.c2
-rw-r--r--drivers/edac/mce_amd.c146
-rw-r--r--drivers/edac/sb_edac.c2
-rw-r--r--drivers/edac/sifive_edac.c2
-rw-r--r--drivers/edac/synopsys_edac.c52
-rw-r--r--drivers/extcon/extcon-usb-gpio.c2
-rw-r--r--drivers/extcon/extcon.c14
-rw-r--r--drivers/firmware/arm_scmi/virtio.c2
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c156
-rw-r--r--drivers/firmware/dmi-sysfs.c7
-rw-r--r--drivers/firmware/edd.c9
-rw-r--r--drivers/firmware/efi/efi-init.c5
-rw-r--r--drivers/firmware/efi/efi.c7
-rw-r--r--drivers/firmware/efi/efivars.c3
-rw-r--r--drivers/firmware/efi/esrt.c4
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c6
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c73
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c12
-rw-r--r--drivers/firmware/efi/libstub/efistub.h30
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c26
-rw-r--r--drivers/firmware/efi/runtime-map.c3
-rw-r--r--drivers/firmware/google/Kconfig6
-rw-r--r--drivers/firmware/memmap.c3
-rw-r--r--drivers/firmware/qemu_fw_cfg.c26
-rw-r--r--drivers/firmware/sysfb_simplefb.c8
-rw-r--r--drivers/firmware/ti_sci.c2
-rw-r--r--drivers/firmware/xilinx/zynqmp.c45
-rw-r--r--drivers/fpga/altera-cvp.c12
-rw-r--r--drivers/fpga/altera-fpga2sdram.c12
-rw-r--r--drivers/fpga/altera-freeze-bridge.c10
-rw-r--r--drivers/fpga/altera-hps2fpga.c12
-rw-r--r--drivers/fpga/altera-pr-ip-core.c7
-rw-r--r--drivers/fpga/altera-ps-spi.c9
-rw-r--r--drivers/fpga/dfl-fme-br.c10
-rw-r--r--drivers/fpga/dfl-fme-mgr.c22
-rw-r--r--drivers/fpga/dfl-fme-region.c17
-rw-r--r--drivers/fpga/dfl.c12
-rw-r--r--drivers/fpga/fpga-bridge.c122
-rw-r--r--drivers/fpga/fpga-mgr.c215
-rw-r--r--drivers/fpga/fpga-region.c119
-rw-r--r--drivers/fpga/ice40-spi.c9
-rw-r--r--drivers/fpga/machxo2-spi.c9
-rw-r--r--drivers/fpga/of-fpga-region.c12
-rw-r--r--drivers/fpga/socfpga-a10.c16
-rw-r--r--drivers/fpga/socfpga.c9
-rw-r--r--drivers/fpga/stratix10-soc.c18
-rw-r--r--drivers/fpga/ts73xx-fpga.c9
-rw-r--r--drivers/fpga/versal-fpga.c9
-rw-r--r--drivers/fpga/xilinx-pr-decoupler.c17
-rw-r--r--drivers/fpga/xilinx-spi.c11
-rw-r--r--drivers/fpga/zynq-fpga.c16
-rw-r--r--drivers/fpga/zynqmp-fpga.c9
-rw-r--r--drivers/gnss/Kconfig11
-rw-r--r--drivers/gnss/Makefile3
-rw-r--r--drivers/gnss/mtk.c2
-rw-r--r--drivers/gnss/serial.c2
-rw-r--r--drivers/gnss/sirf.c2
-rw-r--r--drivers/gnss/ubx.c2
-rw-r--r--drivers/gnss/usb.c214
-rw-r--r--drivers/gpio/Kconfig29
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-adnp.c1
-rw-r--r--drivers/gpio/gpio-aggregator.c11
-rw-r--r--drivers/gpio/gpio-amdpt.c12
-rw-r--r--drivers/gpio/gpio-arizona.c5
-rw-r--r--drivers/gpio/gpio-aspeed-sgpio.c32
-rw-r--r--drivers/gpio/gpio-aspeed.c52
-rw-r--r--drivers/gpio/gpio-bcm-kona.c2
-rw-r--r--drivers/gpio/gpio-bd70528.c230
-rw-r--r--drivers/gpio/gpio-bd71828.c1
-rw-r--r--drivers/gpio/gpio-brcmstb.c3
-rw-r--r--drivers/gpio/gpio-creg-snps.c2
-rw-r--r--drivers/gpio/gpio-davinci.c1
-rw-r--r--drivers/gpio/gpio-dwapb.c15
-rw-r--r--drivers/gpio/gpio-eic-sprd.c1
-rw-r--r--drivers/gpio/gpio-em.c1
-rw-r--r--drivers/gpio/gpio-ge.c1
-rw-r--r--drivers/gpio/gpio-grgpio.c1
-rw-r--r--drivers/gpio/gpio-gw-pld.c2
-rw-r--r--drivers/gpio/gpio-idt3243x.c6
-rw-r--r--drivers/gpio/gpio-lpc32xx.c2
-rw-r--r--drivers/gpio/gpio-max3191x.c2
-rw-r--r--drivers/gpio/gpio-ml-ioh.c52
-rw-r--r--drivers/gpio/gpio-mockup.c23
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c6
-rw-r--r--drivers/gpio/gpio-msc313.c266
-rw-r--r--drivers/gpio/gpio-mt7621.c6
-rw-r--r--drivers/gpio/gpio-mvebu.c1
-rw-r--r--drivers/gpio/gpio-omap.c3
-rw-r--r--drivers/gpio/gpio-palmas.c4
-rw-r--r--drivers/gpio/gpio-pch.c42
-rw-r--r--drivers/gpio/gpio-pmic-eic-sprd.c1
-rw-r--r--drivers/gpio/gpio-pxa.c8
-rw-r--r--drivers/gpio/gpio-raspberrypi-exp.c1
-rw-r--r--drivers/gpio/gpio-rcar.c16
-rw-r--r--drivers/gpio/gpio-rda.c3
-rw-r--r--drivers/gpio/gpio-regmap.c6
-rw-r--r--drivers/gpio/gpio-rockchip.c21
-rw-r--r--drivers/gpio/gpio-sama5d2-piobu.c1
-rw-r--r--drivers/gpio/gpio-sch.c2
-rw-r--r--drivers/gpio/gpio-sim.c1592
-rw-r--r--drivers/gpio/gpio-sprd.c1
-rw-r--r--drivers/gpio/gpio-sta2x11.c2
-rw-r--r--drivers/gpio/gpio-stmpe.c1
-rw-r--r--drivers/gpio/gpio-tc3589x.c1
-rw-r--r--drivers/gpio/gpio-tegra186.c129
-rw-r--r--drivers/gpio/gpio-tps65218.c3
-rw-r--r--drivers/gpio/gpio-tps6586x.c5
-rw-r--r--drivers/gpio/gpio-tps65910.c6
-rw-r--r--drivers/gpio/gpio-ts5500.c11
-rw-r--r--drivers/gpio/gpio-twl6040.c5
-rw-r--r--drivers/gpio/gpio-vf610.c1
-rw-r--r--drivers/gpio/gpio-virtio.c2
-rw-r--r--drivers/gpio/gpio-wm831x.c5
-rw-r--r--drivers/gpio/gpio-xlp.c147
-rw-r--r--drivers/gpio/gpiolib-acpi.c54
-rw-r--r--drivers/gpio/gpiolib-of.c3
-rw-r--r--drivers/gpio/gpiolib.c73
-rw-r--r--drivers/gpu/drm/Kconfig23
-rw-r--r--drivers/gpu/drm/Makefile32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c143
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c143
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c155
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c218
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c262
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c141
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c388
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c169
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.c161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c54
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c151
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c998
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c153
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c31
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c47
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c35
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c69
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c346
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c98
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h46
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c368
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c217
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c52
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c101
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c191
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c467
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c1080
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c217
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_sink.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c111
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c130
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c163
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c489
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c1889
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c284
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_hwss.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h42
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h61
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c89
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h7
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h12
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_sh_mask.h12
-rw-r--r--drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h10
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h57
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h4
-rw-r--r--drivers/gpu/drm/amd/include/yellow_carp_offset.h4
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c21
-rw-r--r--drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h8
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h31
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h18
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_types.h3
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v13_0.h5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c9
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c14
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c143
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c22
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c87
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c148
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c37
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c34
-rw-r--r--drivers/gpu/drm/arm/Kconfig2
-rw-r--r--drivers/gpu/drm/arm/display/Kconfig1
-rw-r--r--drivers/gpu/drm/aspeed/Kconfig2
-rw-r--r--drivers/gpu/drm/ast/Makefile2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c3
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h3
-rw-r--r--drivers/gpu/drm/ast/ast_i2c.c152
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c151
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c15
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c20
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c14
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c495
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.h23
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c88
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c31
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c66
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c65
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c36
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c40
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c347
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c10
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c9
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c13
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c94
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c50
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c128
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c475
-rw-r--r--drivers/gpu/drm/drm_atomic.c192
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c245
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c6
-rw-r--r--drivers/gpu/drm/drm_auth.c12
-rw-r--r--drivers/gpu/drm/drm_connector.c205
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c261
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c1
-rw-r--r--drivers/gpu/drm/drm_drv.c13
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_format_helper.c311
-rw-r--r--drivers/gpu/drm/drm_fourcc.c3
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c14
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c95
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c153
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c4
-rw-r--r--drivers/gpu/drm/drm_hashtab.c10
-rw-r--r--drivers/gpu/drm/drm_irq.c2
-rw-r--r--drivers/gpu/drm/drm_legacy.h40
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c40
-rw-r--r--drivers/gpu/drm/drm_mm.c4
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c9
-rw-r--r--drivers/gpu/drm/drm_nomodeset.c24
-rw-r--r--drivers/gpu/drm/drm_of.c33
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c18
-rw-r--r--drivers/gpu/drm/drm_privacy_screen.c467
-rw-r--r--drivers/gpu/drm/drm_privacy_screen_x86.c89
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c48
-rw-r--r--drivers/gpu/drm/drm_sysfs.c25
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c41
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c33
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c10
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c49
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c43
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h5
-rw-r--r--drivers/gpu/drm/fsl-dcu/Kconfig2
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c52
-rw-r--r--drivers/gpu/drm/gma500/gem.c234
-rw-r--r--drivers/gpu/drm/gma500/gem.h28
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c51
-rw-r--r--drivers/gpu/drm/gma500/gtt.c326
-rw-r--r--drivers/gpu/drm/gma500/gtt.h29
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c12
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c17
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h2
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c14
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/Kconfig1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c52
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_modeset.c5
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/Makefile9
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c1
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c1
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c141
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c10
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c107
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c143
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c215
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c205
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c153
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c155
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c69
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c386
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c104
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c1985
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h47
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c111
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c136
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h102
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_trace.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_trace.h587
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h127
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c166
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c61
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c45
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c62
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h42
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c717
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h33
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c1831
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c141
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c117
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c501
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c648
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c125
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c289
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c82
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h1
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c1
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c511
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c8
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.h19
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c1
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.h38
-rw-r--r--drivers/gpu/drm/i915/dma_resv_utils.c17
-rw-r--r--drivers/gpu/drm/i915/dma_resv_utils.h13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c35
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c26
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c22
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c195
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c44
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c92
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h48
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h48
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c32
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c195
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c137
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c19
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_throttle.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c784
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.h41
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c627
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.h41
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c106
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c5
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c134
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c71
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c26
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c44
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c151
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.h2
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c34
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c68
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c40
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_stats.h33
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h84
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c430
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c121
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.h21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c30
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c32
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.h9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c60
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c143
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c477
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c50
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_pm.c35
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c6
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c12
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c22
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c7
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_ring_submission.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c6
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c6
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h42
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c21
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c18
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c33
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c62
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c157
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c367
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c75
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c188
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h18
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c175
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c2
-rw-r--r--drivers/gpu/drm/i915/i915_active.c28
-rw-r--r--drivers/gpu/drm/i915/i915_active.h17
-rw-r--r--drivers/gpu/drm/i915/i915_active_types.h2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c43
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.c4
-rw-r--r--drivers/gpu/drm/i915/i915_deps.c237
-rw-r--r--drivers/gpu/drm/i915/i915_deps.h45
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c (renamed from drivers/gpu/drm/i915/i915_drv.c)93
-rw-r--r--drivers/gpu/drm/i915/i915_driver.h24
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h388
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c51
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c234
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h4
-rw-r--r--drivers/gpu/drm/i915/i915_iosf_mbi.h42
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c84
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c28
-rw-r--r--drivers/gpu/drm/i915/i915_mm.h35
-rw-r--r--drivers/gpu/drm/i915/i915_module.c8
-rw-r--r--drivers/gpu/drm/i915/i915_params.c3
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c129
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c24
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c14
-rw-r--r--drivers/gpu/drm/i915/i915_query.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h585
-rw-r--r--drivers/gpu/drm/i915/i915_request.c149
-rw-r--r--drivers/gpu/drm/i915/i915_request.h30
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.c62
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.h76
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c81
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h23
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence_work.c2
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c5
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c41
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h506
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c526
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h14
-rw-r--r--drivers/gpu/drm/i915/i915_vma_snapshot.c134
-rw-r--r--drivers/gpu/drm/i915/i915_vma_snapshot.h112
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h21
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c64
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h25
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c154
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h9
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c1
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h1
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c119
-rw-r--r--drivers/gpu/drm/i915/intel_pm_types.h76
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.c50
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.h7
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c6
-rw-r--r--drivers/gpu/drm/i915/intel_step.c77
-rw-r--r--drivers/gpu/drm/i915/intel_step.h3
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c54
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h7
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.c2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c5
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.h15
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c3
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.c15
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_types.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c25
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c18
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c24
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_sw_fence.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c17
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c19
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.c2
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.c3
-rw-r--r--drivers/gpu/drm/imx/Kconfig2
-rw-r--r--drivers/gpu/drm/imx/dcss/Kconfig2
-rw-r--r--drivers/gpu/drm/ingenic/Kconfig1
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c283
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-ipu.c127
-rw-r--r--drivers/gpu/drm/kmb/Kconfig1
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c4
-rw-r--r--drivers/gpu/drm/lima/lima_device.c1
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c20
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c4
-rw-r--r--drivers/gpu/drm/mcde/Kconfig1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ccorr.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c20
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c175
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c217
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h23
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c142
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c3
-rw-r--r--drivers/gpu/drm/meson/Kconfig3
-rw-r--r--drivers/gpu/drm/meson/Makefile3
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c71
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c342
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c284
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.h (renamed from drivers/gpu/drm/meson/meson_venc_cvbs.h)2
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c447
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.h12
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c293
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c4
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/Makefile8
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c55
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c25
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c17
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c104
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h11
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c56
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c41
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c38
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c98
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h44
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c150
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h40
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c564
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c22
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c56
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.c27
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.h14
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c9
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c23
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c59
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c75
-rw-r--r--drivers/gpu/drm/msm/dp/dp_hpd.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_hpd.h2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c19
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c71
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h7
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c77
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c37
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c17
-rw-r--r--drivers/gpu/drm/msm/edp/edp.c198
-rw-r--r--drivers/gpu/drm/msm/edp/edp.h77
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h388
-rw-r--r--drivers/gpu/drm/msm/edp/edp_aux.c265
-rw-r--r--drivers/gpu/drm/msm/edp/edp_bridge.c111
-rw-r--r--drivers/gpu/drm/msm/edp/edp_connector.c132
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c1373
-rw-r--r--drivers/gpu/drm/msm/edp/edp_phy.c98
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c26
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h19
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c83
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hpd.c (renamed from drivers/gpu/drm/msm/hdmi/hdmi_connector.c)158
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c127
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c122
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h37
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c13
-rw-r--r--drivers/gpu/drm/msm/msm_fence.h12
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c29
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c25
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h69
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c112
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h11
-rw-r--r--drivers/gpu/drm/msm/msm_perf.c9
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c16
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c4
-rw-r--r--drivers/gpu/drm/mxsfb/Kconfig2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base907c.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec57d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.c64
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.h7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc907d.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcc37d.c45
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcc37d.h40
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcc57d.c58
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs507a.c15
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c38
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head907d.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head917d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc37d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc57d.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c15
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc57d.h69
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c76
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/client.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h2
-rw-r--r--drivers/gpu/drm/omapdrm/Makefile1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c44
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c14
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c11
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c14
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h14
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_phy.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_pll.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c11
-rw-r--r--drivers/gpu/drm/omapdrm/dss/video-pll.c8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_priv.h10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.h10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c196
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h24
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c33
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.h4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c79
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c34
-rw-r--r--drivers/gpu/drm/omapdrm/omap_overlay.c212
-rw-r--r--drivers/gpu/drm/omapdrm/omap_overlay.h35
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c349
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.h1
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.c10
-rw-r--r--drivers/gpu/drm/panel/Kconfig43
-rw-r--r--drivers/gpu/drm/panel/Makefile4
-rw-r--r--drivers/gpu/drm/panel/panel-abt-y030xx067a.c14
-rw-r--r--drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c445
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c108
-rw-r--r--drivers/gpu/drm/panel/panel-dsi-cm.c4
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c4
-rw-r--r--drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c13
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c29
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c265
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-ej030na.c14
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c10
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c323
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c8
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c8
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c18
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c702
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672a.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c20
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c8
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c8
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c21
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c1
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-sofef00.c17
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c21
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c8
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c86
-rw-r--r--drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c552
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c14
-rw-r--r--drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c25
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c22
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c5
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c6
-rw-r--r--drivers/gpu/drm/pl111/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c17
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_sync.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c2
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig31
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c31
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c819
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h172
-rw-r--r--drivers/gpu/drm/rockchip/Makefile1
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c82
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c23
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c163
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h24
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c44
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.h7
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c4
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c15
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig1
-rw-r--r--drivers/gpu/drm/sprd/Kconfig13
-rw-r--r--drivers/gpu/drm/sprd/Makefile8
-rw-r--r--drivers/gpu/drm/sprd/megacores_pll.c305
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.c880
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.h109
-rw-r--r--drivers/gpu/drm/sprd/sprd_drm.c205
-rw-r--r--drivers/gpu/drm/sprd/sprd_drm.h19
-rw-r--r--drivers/gpu/drm/sprd/sprd_dsi.c1073
-rw-r--r--drivers/gpu/drm/sprd/sprd_dsi.h126
-rw-r--r--drivers/gpu/drm/sti/Kconfig1
-rw-r--r--drivers/gpu/drm/stm/Kconfig1
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c4
-rw-r--r--drivers/gpu/drm/tegra/Kconfig3
-rw-r--r--drivers/gpu/drm/tegra/Makefile3
-rw-r--r--drivers/gpu/drm/tegra/dc.c194
-rw-r--r--drivers/gpu/drm/tegra/dc.h3
-rw-r--r--drivers/gpu/drm/tegra/drm.c30
-rw-r--r--drivers/gpu/drm/tegra/drm.h1
-rw-r--r--drivers/gpu/drm/tegra/gem.c171
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c151
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c353
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c183
-rw-r--r--drivers/gpu/drm/tegra/hub.h1
-rw-r--r--drivers/gpu/drm/tegra/nvdec.c466
-rw-r--r--drivers/gpu/drm/tegra/plane.c65
-rw-r--r--drivers/gpu/drm/tegra/plane.h2
-rw-r--r--drivers/gpu/drm/tegra/rgb.c53
-rw-r--r--drivers/gpu/drm/tegra/submit.c77
-rw-r--r--drivers/gpu/drm/tegra/uapi.c68
-rw-r--r--drivers/gpu/drm/tegra/uapi.h5
-rw-r--r--drivers/gpu/drm/tegra/vic.c61
-rw-r--r--drivers/gpu/drm/tidss/Kconfig1
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c13
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/tiny/Kconfig31
-rw-r--r--drivers/gpu/drm/tiny/Makefile1
-rw-r--r--drivers/gpu/drm/tiny/bochs.c3
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c28
-rw-r--r--drivers/gpu/drm/tiny/ili9163.c225
-rw-r--r--drivers/gpu/drm/tiny/repaper.c2
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c41
-rw-r--r--drivers/gpu/drm/tiny/st7586.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c11
-rw-r--r--drivers/gpu/drm/tve200/Kconfig1
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c26
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c4
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c5
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c4
-rw-r--r--drivers/gpu/drm/vc4/Kconfig1
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c10
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c91
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c7
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h37
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c630
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h37
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_phy.c37
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_regs.h8
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c26
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c130
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c203
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h19
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c4
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig1
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile6
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h12
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_escape.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_reg.h14
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_memory.c683
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_memory.h96
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c180
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.h59
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c584
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c43
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c92
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h159
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c117
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c294
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c199
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h83
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c47
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c91
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c158
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c90
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_thp.c184
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c135
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c74
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_va.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h53
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c16
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c108
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.h7
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c9
-rw-r--r--drivers/gpu/host1x/Kconfig1
-rw-r--r--drivers/gpu/host1x/bus.c80
-rw-r--r--drivers/gpu/host1x/channel.c8
-rw-r--r--drivers/gpu/host1x/debug.c15
-rw-r--r--drivers/gpu/host1x/dev.c185
-rw-r--r--drivers/gpu/host1x/dev.h5
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c44
-rw-r--r--drivers/gpu/host1x/intr.c3
-rw-r--r--drivers/gpu/host1x/job.c160
-rw-r--r--drivers/gpu/host1x/job.h6
-rw-r--r--drivers/gpu/host1x/syncpt.c5
-rw-r--r--drivers/greybus/es2.c2
-rw-r--r--drivers/hid/Kconfig14
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-apple.c146
-rw-r--r--drivers/hid/hid-core.c93
-rw-r--r--drivers/hid/hid-debug.c35
-rw-r--r--drivers/hid/hid-ids.h6
-rw-r--r--drivers/hid/hid-input.c28
-rw-r--r--drivers/hid/hid-letsketch.c322
-rw-r--r--drivers/hid/hid-magicmouse.c115
-rw-r--r--drivers/hid/hid-multitouch.c3
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/hid-tmff.c8
-rw-r--r--drivers/hid/hid-uclogic-params.c31
-rw-r--r--drivers/hid/hid-vivaldi.c41
-rw-r--r--drivers/hid/hidraw.c34
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-acpi.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c24
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-goodix.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of.c10
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.h2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c6
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-fw-loader.c3
-rw-r--r--drivers/hid/surface-hid/surface_hid_core.c25
-rw-r--r--drivers/hid/uhid.c49
-rw-r--r--drivers/hid/usbhid/hid-core.c19
-rw-r--r--drivers/hid/wacom_wac.c39
-rw-r--r--drivers/hsi/hsi_core.c1
-rw-r--r--drivers/hv/channel_mgmt.c2
-rw-r--r--drivers/hv/hv_balloon.c7
-rw-r--r--drivers/hv/hv_common.c15
-rw-r--r--drivers/hv/vmbus_drv.c6
-rw-r--r--drivers/hwmon/Kconfig51
-rw-r--r--drivers/hwmon/Makefile4
-rw-r--r--drivers/hwmon/adm1021.c23
-rw-r--r--drivers/hwmon/adm1031.c3
-rw-r--r--drivers/hwmon/adt7470.c3
-rw-r--r--drivers/hwmon/asus_wmi_ec_sensors.c621
-rw-r--r--drivers/hwmon/asus_wmi_sensors.c664
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c72
-rw-r--r--drivers/hwmon/f71882fg.c2
-rw-r--r--drivers/hwmon/ina238.c644
-rw-r--r--drivers/hwmon/jc42.c4
-rw-r--r--drivers/hwmon/k10temp.c36
-rw-r--r--drivers/hwmon/lm90.c21
-rw-r--r--drivers/hwmon/ltc2992.c3
-rw-r--r--drivers/hwmon/mr75203.c2
-rw-r--r--drivers/hwmon/nct6775.c16
-rw-r--r--drivers/hwmon/ntc_thermistor.c299
-rw-r--r--drivers/hwmon/nzxt-smart2.c829
-rw-r--r--drivers/hwmon/pmbus/Kconfig29
-rw-r--r--drivers/hwmon/pmbus/Makefile2
-rw-r--r--drivers/hwmon/pmbus/delta-ahe50dc-fan.c114
-rw-r--r--drivers/hwmon/pmbus/ir38064.c28
-rw-r--r--drivers/hwmon/pmbus/mp5023.c67
-rw-r--r--drivers/hwmon/raspberrypi-hwmon.c7
-rw-r--r--drivers/hwmon/sht4x.c7
-rw-r--r--drivers/hwmon/tmp401.c863
-rw-r--r--drivers/hwmon/xgene-hwmon.c6
-rw-r--r--drivers/hwspinlock/stm32_hwspinlock.c58
-rw-r--r--drivers/hwtracing/coresight/coresight-cfg-preload.c9
-rw-r--r--drivers/hwtracing/coresight/coresight-config.h9
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c11
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg-configfs.c87
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg-configfs.h4
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg.c315
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg.h39
-rw-r--r--drivers/i2c/busses/Kconfig28
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c11
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h13
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c7
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c51
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c2
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c110
-rw-r--r--drivers/i2c/busses/i2c-i801.c288
-rw-r--r--drivers/i2c/busses/i2c-imx.c92
-rw-r--r--drivers/i2c/busses/i2c-mpc.c23
-rw-r--r--drivers/i2c/busses/i2c-rcar.c26
-rw-r--r--drivers/i2c/busses/i2c-riic.c10
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c7
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c60
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c14
-rw-r--r--drivers/i2c/busses/i2c-tegra.c69
-rw-r--r--drivers/i2c/busses/i2c-virtio.c2
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c7
-rw-r--r--drivers/i2c/busses/i2c-xlr.c470
-rw-r--r--drivers/i2c/i2c-core-acpi.c29
-rw-r--r--drivers/i2c/i2c-core-base.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c53
-rw-r--r--drivers/i3c/master.c3
-rw-r--r--drivers/i3c/master/dw-i3c-master.c4
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c2
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dat_v1.c4
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dma.c2
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/hci.h2
-rw-r--r--drivers/i3c/master/svc-i3c-master.c341
-rw-r--r--drivers/iio/Kconfig2
-rw-r--r--drivers/iio/Makefile2
-rw-r--r--drivers/iio/accel/bma180.c4
-rw-r--r--drivers/iio/accel/bma220_spi.c6
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c2
-rw-r--r--drivers/iio/accel/kxcjk-1013.c5
-rw-r--r--drivers/iio/accel/mma7455_core.c3
-rw-r--r--drivers/iio/accel/mma7660.c8
-rw-r--r--drivers/iio/accel/mma8452.c2
-rw-r--r--drivers/iio/accel/mma9553.c2
-rw-r--r--drivers/iio/accel/sca3000.c17
-rw-r--r--drivers/iio/accel/stk8312.c2
-rw-r--r--drivers/iio/accel/stk8ba50.c3
-rw-r--r--drivers/iio/adc/Kconfig21
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/ad7124.c2
-rw-r--r--drivers/iio/adc/ad7192.c3
-rw-r--r--drivers/iio/adc/ad7266.c3
-rw-r--r--drivers/iio/adc/ad7606.h2
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c4
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c38
-rw-r--r--drivers/iio/adc/axp20x_adc.c45
-rw-r--r--drivers/iio/adc/envelope-detector.c3
-rw-r--r--drivers/iio/adc/hi8435.c2
-rw-r--r--drivers/iio/adc/imx7d_adc.c5
-rw-r--r--drivers/iio/adc/ina2xx-adc.c15
-rw-r--r--drivers/iio/adc/lpc18xx_adc.c6
-rw-r--r--drivers/iio/adc/max9611.c20
-rw-r--r--drivers/iio/adc/mcp3911.c9
-rw-r--r--drivers/iio/adc/rcar-gyroadc.c3
-rw-r--r--drivers/iio/adc/rzg2l_adc.c4
-rw-r--r--drivers/iio/adc/stm32-adc.c3
-rw-r--r--drivers/iio/adc/stmpe-adc.c5
-rw-r--r--drivers/iio/adc/ti-adc081c.c22
-rw-r--r--drivers/iio/adc/ti-adc12138.c14
-rw-r--r--drivers/iio/adc/ti-ads1015.c10
-rw-r--r--drivers/iio/adc/ti-ads124s08.c3
-rw-r--r--drivers/iio/adc/ti-ads8688.c4
-rw-r--r--drivers/iio/adc/xilinx-ams.c1451
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c64
-rw-r--r--drivers/iio/addac/Kconfig20
-rw-r--r--drivers/iio/addac/Makefile7
-rw-r--r--drivers/iio/addac/ad74413r.c1475
-rw-r--r--drivers/iio/amplifiers/hmc425a.c2
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c2
-rw-r--r--drivers/iio/chemical/atlas-sensor.c4
-rw-r--r--drivers/iio/chemical/sunrise_co2.c4
-rw-r--r--drivers/iio/chemical/vz89x.c2
-rw-r--r--drivers/iio/common/scmi_sensors/scmi_iio.c57
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c4
-rw-r--r--drivers/iio/dac/Kconfig22
-rw-r--r--drivers/iio/dac/Makefile2
-rw-r--r--drivers/iio/dac/ad3552r.c1138
-rw-r--r--drivers/iio/dac/ad5064.c4
-rw-r--r--drivers/iio/dac/ad5380.c2
-rw-r--r--drivers/iio/dac/ad5446.c2
-rw-r--r--drivers/iio/dac/ad5504.c2
-rw-r--r--drivers/iio/dac/ad5624r_spi.c2
-rw-r--r--drivers/iio/dac/ad5686.c2
-rw-r--r--drivers/iio/dac/ad5755.c152
-rw-r--r--drivers/iio/dac/ad5758.c3
-rw-r--r--drivers/iio/dac/ad5766.c13
-rw-r--r--drivers/iio/dac/ad5791.c2
-rw-r--r--drivers/iio/dac/ad7293.c934
-rw-r--r--drivers/iio/dac/dpot-dac.c2
-rw-r--r--drivers/iio/dac/lpc18xx_dac.c3
-rw-r--r--drivers/iio/dac/max5821.c2
-rw-r--r--drivers/iio/dac/mcp4725.c10
-rw-r--r--drivers/iio/dac/stm32-dac.c2
-rw-r--r--drivers/iio/dac/ti-dac082s085.c2
-rw-r--r--drivers/iio/dac/ti-dac5571.c2
-rw-r--r--drivers/iio/dac/ti-dac7311.c2
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_buffer.c2
-rw-r--r--drivers/iio/filter/Kconfig18
-rw-r--r--drivers/iio/filter/Makefile7
-rw-r--r--drivers/iio/filter/admv8818.c665
-rw-r--r--drivers/iio/frequency/Kconfig10
-rw-r--r--drivers/iio/frequency/Makefile1
-rw-r--r--drivers/iio/frequency/admv1013.c656
-rw-r--r--drivers/iio/health/afe4403.c5
-rw-r--r--drivers/iio/health/afe4404.c5
-rw-r--r--drivers/iio/iio_core.h2
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c2
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c4
-rw-r--r--drivers/iio/industrialio-buffer.c20
-rw-r--r--drivers/iio/industrialio-core.c6
-rw-r--r--drivers/iio/industrialio-trigger.c36
-rw-r--r--drivers/iio/light/cm3605.c6
-rw-r--r--drivers/iio/light/gp2ap020a00f.c5
-rw-r--r--drivers/iio/light/ltr501.c42
-rw-r--r--drivers/iio/magnetometer/ak8975.c2
-rw-r--r--drivers/iio/magnetometer/hmc5843_core.c4
-rw-r--r--drivers/iio/magnetometer/mag3110.c6
-rw-r--r--drivers/iio/potentiometer/mcp41010.c6
-rw-r--r--drivers/iio/potentiostat/lmp91000.c4
-rw-r--r--drivers/iio/pressure/bmp280-core.c11
-rw-r--r--drivers/iio/pressure/bmp280-i2c.c2
-rw-r--r--drivers/iio/pressure/bmp280-spi.c2
-rw-r--r--drivers/iio/pressure/mpl3115.c16
-rw-r--r--drivers/iio/pressure/ms5611.h6
-rw-r--r--drivers/iio/pressure/ms5611_core.c7
-rw-r--r--drivers/iio/pressure/ms5611_i2c.c11
-rw-r--r--drivers/iio/pressure/ms5611_spi.c17
-rw-r--r--drivers/iio/proximity/as3935.c6
-rw-r--r--drivers/iio/test/iio-test-format.c123
-rw-r--r--drivers/iio/trigger/iio-trig-interrupt.c4
-rw-r--r--drivers/iio/trigger/iio-trig-sysfs.c4
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c4
-rw-r--r--drivers/infiniband/core/cache.c13
-rw-r--r--drivers/infiniband/core/cma.c18
-rw-r--r--drivers/infiniband/core/device.c3
-rw-r--r--drivers/infiniband/core/sysfs.c3
-rw-r--r--drivers/infiniband/core/umem_odp.c3
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c1
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c9
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c11
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c12
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c50
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h7
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c99
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h9
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/id_table.c17
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c1
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c8
-rw-r--r--drivers/infiniband/hw/hns/Kconfig17
-rw-r--r--drivers/infiniband/hw/hns/Makefile5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h202
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c13
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h108
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c4675
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h1147
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c49
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h22
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c85
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c32
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c17
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c93
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c2
-rw-r--r--drivers/infiniband/hw/irdma/hw.c20
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_if.c2
-rw-r--r--drivers/infiniband/hw/irdma/main.c7
-rw-r--r--drivers/infiniband/hw/irdma/pble.h2
-rw-r--r--drivers/infiniband/hw/irdma/uda.c2
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c31
-rw-r--r--drivers/infiniband/hw/mlx4/main.c34
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c5
-rw-r--r--drivers/infiniband/hw/mlx5/doorbell.c1
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c18
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c23
-rw-r--r--drivers/infiniband/hw/mlx5/main.c8
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h15
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c6
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_allocator.c15
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c25
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c20
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c16
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c17
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c18
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h1
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c4
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c3
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c8
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c10
-rw-r--r--drivers/infiniband/sw/rxe/Makefile1
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c24
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h10
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mcast.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mw.c21
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_opcode.c739
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c177
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h54
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c16
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_sysfs.c119
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c34
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h24
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c7
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c6
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c76
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h23
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c106
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c58
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c138
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c8
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c145
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c1089
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h41
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h18
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c121
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c684
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h16
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c98
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.h34
-rw-r--r--drivers/input/ff-core.c2
-rw-r--r--drivers/input/keyboard/gpio_keys.c2
-rw-r--r--drivers/input/misc/axp20x-pek.c72
-rw-r--r--drivers/input/misc/palmas-pwrbutton.c9
-rw-r--r--drivers/input/mouse/byd.c2
-rw-r--r--drivers/input/serio/serport.c5
-rw-r--r--drivers/input/touchscreen/goodix.c127
-rw-r--r--drivers/input/touchscreen/goodix.h1
-rw-r--r--drivers/input/touchscreen/silead.c172
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c20
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c4
-rw-r--r--drivers/input/touchscreen/wacom_i2c.c44
-rw-r--r--drivers/input/touchscreen/zinitix.c22
-rw-r--r--drivers/interconnect/qcom/Kconfig27
-rw-r--r--drivers/interconnect/qcom/Makefile6
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c64
-rw-r--r--drivers/interconnect/qcom/icc-rpm.h15
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c10
-rw-r--r--drivers/interconnect/qcom/msm8916.c4
-rw-r--r--drivers/interconnect/qcom/msm8939.c5
-rw-r--r--drivers/interconnect/qcom/msm8996.c2110
-rw-r--r--drivers/interconnect/qcom/msm8996.h149
-rw-r--r--drivers/interconnect/qcom/osm-l3.c20
-rw-r--r--drivers/interconnect/qcom/qcm2290.c1363
-rw-r--r--drivers/interconnect/qcom/sc7280.h2
-rw-r--r--drivers/interconnect/qcom/sdm660.c7
-rw-r--r--drivers/interconnect/qcom/sm8150.c1
-rw-r--r--drivers/interconnect/qcom/sm8250.c1
-rw-r--r--drivers/interconnect/qcom/sm8350.c1
-rw-r--r--drivers/interconnect/qcom/sm8450.c1987
-rw-r--r--drivers/interconnect/qcom/sm8450.h169
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h2
-rw-r--r--drivers/iommu/amd/init.c109
-rw-r--r--drivers/iommu/amd/io_pgtable.c110
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c23
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h5
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c3
-rw-r--r--drivers/iommu/dma-iommu.c274
-rw-r--r--drivers/iommu/intel/iommu.c111
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c6
-rw-r--r--drivers/iommu/io-pgtable-arm.c9
-rw-r--r--drivers/iommu/iommu.c3
-rw-r--r--drivers/iommu/iova.c209
-rw-r--r--drivers/iommu/s390-iommu.c10
-rw-r--r--drivers/iommu/virtio-iommu.c117
-rw-r--r--drivers/irqchip/irq-apple-aic.c1
-rw-r--r--drivers/irqchip/irq-gic-v2m.c3
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c203
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c1
-rw-r--r--drivers/irqchip/irq-gic-v3.c17
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c2
-rw-r--r--drivers/irqchip/irq-ingenic-tcu.c4
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c2
-rw-r--r--drivers/irqchip/irq-mbigen.c4
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c12
-rw-r--r--drivers/irqchip/irq-realtek-rtl.c18
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c9
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c9
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c2
-rw-r--r--drivers/irqchip/spear-shirq.c2
-rw-r--r--drivers/isdn/capi/kcapi.c2
-rw-r--r--drivers/leds/Kconfig10
-rw-r--r--drivers/leds/Makefile4
-rw-r--r--drivers/leds/blink/leds-lgm-sso.c1
-rw-r--r--drivers/leds/flash/Kconfig13
-rw-r--r--drivers/leds/flash/Makefile1
-rw-r--r--drivers/leds/flash/leds-ktd2692.c2
-rw-r--r--drivers/leds/flash/leds-mt6360.c910
-rw-r--r--drivers/leds/led-class.c6
-rw-r--r--drivers/leds/leds-fsg.c193
-rw-r--r--drivers/leds/leds-lp50xx.c1
-rw-r--r--drivers/leds/leds-lp55xx-common.c4
-rw-r--r--drivers/leds/leds-tca6507.c7
-rw-r--r--drivers/leds/simple/Kconfig11
-rw-r--r--drivers/leds/simple/Makefile2
-rw-r--r--drivers/leds/simple/simatic-ipc-leds.c202
-rw-r--r--drivers/macintosh/mac_hid.c24
-rw-r--r--drivers/macintosh/mediabay.c2
-rw-r--r--drivers/mailbox/apple-mailbox.c4
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c13
-rw-r--r--drivers/mailbox/hi3660-mailbox.c18
-rw-r--r--drivers/mailbox/imx-mailbox.c4
-rw-r--r--drivers/mailbox/mailbox-mpfs.c2
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c15
-rw-r--r--drivers/mailbox/pcc.c10
-rw-r--r--drivers/mailbox/qcom-ipcc.c99
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c1
-rw-r--r--drivers/md/dm-integrity.c9
-rw-r--r--drivers/md/dm-linear.c63
-rw-r--r--drivers/md/dm-log-writes.c110
-rw-r--r--drivers/md/dm-mpath.c1
-rw-r--r--drivers/md/dm-stripe.c75
-rw-r--r--drivers/md/dm-sysfs.c3
-rw-r--r--drivers/md/dm-table.c22
-rw-r--r--drivers/md/dm-writecache.c2
-rw-r--r--drivers/md/dm.c110
-rw-r--r--drivers/md/dm.h4
-rw-r--r--drivers/md/md-cluster.c2
-rw-r--r--drivers/md/md.c58
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c173
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c12
-rw-r--r--drivers/md/persistent-data/dm-btree.c8
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c5
-rw-r--r--drivers/md/raid0.c38
-rw-r--r--drivers/md/raid1-10.c6
-rw-r--r--drivers/md/raid1.c83
-rw-r--r--drivers/md/raid10.c107
-rw-r--r--drivers/md/raid5.c67
-rw-r--r--drivers/md/raid5.h4
-rw-r--r--drivers/media/Kconfig8
-rw-r--r--drivers/media/cec/core/cec-adap.c46
-rw-r--r--drivers/media/cec/core/cec-api.c8
-rw-r--r--drivers/media/cec/core/cec-core.c5
-rw-r--r--drivers/media/cec/core/cec-pin-priv.h1
-rw-r--r--drivers/media/cec/core/cec-pin.c32
-rw-r--r--drivers/media/common/b2c2/flexcop.c11
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c5
-rw-r--r--drivers/media/common/videobuf2/frame_vector.c15
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c8
-rw-r--r--drivers/media/dvb-core/dmxdev.c30
-rw-r--r--drivers/media/dvb-core/dvb_demux.c12
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c21
-rw-r--r--drivers/media/dvb-core/dvb_vb2.c4
-rw-r--r--drivers/media/dvb-core/dvbdev.c12
-rw-r--r--drivers/media/dvb-frontends/cx24113.c2
-rw-r--r--drivers/media/dvb-frontends/dib8000.c4
-rw-r--r--drivers/media/dvb-frontends/dib9000.c4
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c8
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c23
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c6
-rw-r--r--drivers/media/dvb-frontends/s5h1411.c2
-rw-r--r--drivers/media/dvb-frontends/si2168.c43
-rw-r--r--drivers/media/dvb-frontends/si2168_priv.h2
-rw-r--r--drivers/media/dvb-frontends/si21xx.c7
-rw-r--r--drivers/media/dvb-frontends/sp887x.c4
-rw-r--r--drivers/media/dvb-frontends/stb6100.c2
-rw-r--r--drivers/media/dvb-frontends/stv0367.c6
-rw-r--r--drivers/media/i2c/Kconfig12
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c22
-rw-r--r--drivers/media/i2c/adv7604.c18
-rw-r--r--drivers/media/i2c/adv7842.c10
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c12
-rw-r--r--drivers/media/i2c/cx25840/cx25840-ir.c20
-rw-r--r--drivers/media/i2c/dw9768.c6
-rw-r--r--drivers/media/i2c/hi556.c70
-rw-r--r--drivers/media/i2c/hi846.c14
-rw-r--r--drivers/media/i2c/imx208.c82
-rw-r--r--drivers/media/i2c/imx274.c102
-rw-r--r--drivers/media/i2c/imx290.c2
-rw-r--r--drivers/media/i2c/imx319.c2
-rw-r--r--drivers/media/i2c/imx355.c2
-rw-r--r--drivers/media/i2c/max9286.c7
-rw-r--r--drivers/media/i2c/ov13858.c2
-rw-r--r--drivers/media/i2c/ov2740.c69
-rw-r--r--drivers/media/i2c/ov5670.c80
-rw-r--r--drivers/media/i2c/ov5675.c71
-rw-r--r--drivers/media/i2c/ov5693.c1537
-rw-r--r--drivers/media/i2c/ov8856.c162
-rw-r--r--drivers/media/i2c/ov8865.c478
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-spi.c6
-rw-r--r--drivers/media/mc/mc-devnode.c2
-rw-r--r--drivers/media/mc/mc-entity.c22
-rw-r--r--drivers/media/pci/b2c2/flexcop-pci.c3
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c4
-rw-r--r--drivers/media/pci/cobalt/cobalt-cpld.c5
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-main.c6
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c7
-rw-r--r--drivers/media/pci/intel/ipu3/cio2-bridge.c100
-rw-r--r--drivers/media/pci/intel/ipu3/cio2-bridge.h16
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2-main.c27
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-main.c6
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c2
-rw-r--r--drivers/media/pci/pt3/pt3.c58
-rw-r--r--drivers/media/pci/saa7134/saa7134-go7007.c7
-rw-r--r--drivers/media/pci/saa7146/hexium_gemini.c7
-rw-r--r--drivers/media/pci/saa7146/hexium_orion.c8
-rw-r--r--drivers/media/pci/saa7146/mxb.c11
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c12
-rw-r--r--drivers/media/pci/tw5864/tw5864-core.c5
-rw-r--r--drivers/media/platform/Kconfig12
-rw-r--r--drivers/media/platform/Makefile1
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-core.c2
-rw-r--r--drivers/media/platform/aspeed-video.c14
-rw-r--r--drivers/media/platform/coda/coda-common.c16
-rw-r--r--drivers/media/platform/coda/coda-jpeg.c21
-rw-r--r--drivers/media/platform/coda/imx-vdoa.c6
-rw-r--r--drivers/media/platform/davinci/vpbe_osd.c2
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c12
-rw-r--r--drivers/media/platform/imx-pxp.c35
-rw-r--r--drivers/media/platform/marvell-ccic/cafe-driver.c7
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c4
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_core.c2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h1
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c6
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateless.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c208
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c6
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c10
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h45
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_h264_req_if.c8
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c9
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c3
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_vpu_if.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_vpu_if.h1
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c4
-rw-r--r--drivers/media/platform/omap3isp/isp.c3
-rw-r--r--drivers/media/platform/omap3isp/isph3a_af.c2
-rw-r--r--drivers/media/platform/omap3isp/omap3isp.h2
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-170.c7
-rw-r--r--drivers/media/platform/qcom/venus/core.c11
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c32
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c3
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c23
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c17
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c15
-rw-r--r--drivers/media/platform/rcar_fdp1.c7
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c2
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c2
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c6
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.h2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c9
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c1
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c12
-rw-r--r--drivers/media/platform/sti/hva/hva-hw.c4
-rw-r--r--drivers/media/platform/stm32/Makefile2
-rw-r--r--drivers/media/platform/stm32/dma2d/dma2d-hw.c133
-rw-r--r--drivers/media/platform/stm32/dma2d/dma2d-regs.h113
-rw-r--r--drivers/media/platform/stm32/dma2d/dma2d.c739
-rw-r--r--drivers/media/platform/stm32/dma2d/dma2d.h135
-rw-r--r--drivers/media/radio/radio-si476x.c6
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c20
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c16
-rw-r--r--drivers/media/rc/Kconfig2
-rw-r--r--drivers/media/rc/igorplugusb.c4
-rw-r--r--drivers/media/rc/iguanair.c3
-rw-r--r--drivers/media/rc/ir-rx51.c16
-rw-r--r--drivers/media/rc/ir-spi.c6
-rw-r--r--drivers/media/rc/lirc_dev.c5
-rw-r--r--drivers/media/rc/mceusb.c8
-rw-r--r--drivers/media/rc/pwm-ir-tx.c18
-rw-r--r--drivers/media/rc/redrat3.c22
-rw-r--r--drivers/media/rc/streamzap.c122
-rw-r--r--drivers/media/rc/winbond-cir.c1
-rw-r--r--drivers/media/spi/cxd2880-spi.c13
-rw-r--r--drivers/media/test-drivers/vicodec/vicodec-core.c2
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_psi.c12
-rw-r--r--drivers/media/tuners/msi001.c7
-rw-r--r--drivers/media/tuners/mxl5005s.c14
-rw-r--r--drivers/media/tuners/r820t.c24
-rw-r--r--drivers/media/tuners/si2157.c333
-rw-r--r--drivers/media/tuners/si2157_priv.h44
-rw-r--r--drivers/media/tuners/tua9001.c6
-rw-r--r--drivers/media/usb/au0828/au0828-i2c.c7
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c28
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.h12
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c3
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c28
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c338
-rw-r--r--drivers/media/usb/dvb-usb/m920x.c12
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c18
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c4
-rw-r--r--drivers/media/usb/go7007/go7007-driver.c2
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_s5k83a.c13
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-encoder.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c41
-rw-r--r--drivers/media/usb/s2255/s2255drv.c4
-rw-r--r--drivers/media/usb/siano/smsusb.c4
-rw-r--r--drivers/media/usb/stk1160/stk1160-core.c4
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c4
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c13
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c6
-rw-r--r--drivers/media/usb/uvc/uvc_video.c4
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h2
-rw-r--r--drivers/media/v4l2-core/Kconfig4
-rw-r--r--drivers/media/v4l2-core/Makefile1
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-core.c180
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-defs.c20
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c5
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c21
-rw-r--r--drivers/media/v4l2-core/v4l2-vp9.c1850
-rw-r--r--drivers/memory/omap-gpmc.c50
-rw-r--r--drivers/memory/renesas-rpc-if.c109
-rw-r--r--drivers/message/fusion/mptbase.c155
-rw-r--r--drivers/message/fusion/mptctl.c82
-rw-r--r--drivers/message/fusion/mptlan.c90
-rw-r--r--drivers/message/fusion/mptsas.c94
-rw-r--r--drivers/mfd/Kconfig33
-rw-r--r--drivers/mfd/Makefile2
-rw-r--r--drivers/mfd/atmel-flexcom.c11
-rw-r--r--drivers/mfd/da9062-core.c12
-rw-r--r--drivers/mfd/intel-lpss-acpi.c7
-rw-r--r--drivers/mfd/intel-lpss-pci.c21
-rw-r--r--drivers/mfd/intel-lpss.c1
-rw-r--r--drivers/mfd/intel-lpss.h1
-rw-r--r--drivers/mfd/intel_pmt.c261
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c28
-rw-r--r--drivers/mfd/rohm-bd70528.c314
-rw-r--r--drivers/mfd/stmpe.c2
-rw-r--r--drivers/mfd/tps65910.c22
-rw-r--r--drivers/misc/cxl/Kconfig1
-rw-r--r--drivers/misc/cxl/sysfs.c3
-rw-r--r--drivers/misc/eeprom/at24.c68
-rw-r--r--drivers/misc/eeprom/at25.c217
-rw-r--r--drivers/misc/habanalabs/common/command_buffer.c46
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c389
-rw-r--r--drivers/misc/habanalabs/common/context.c39
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c97
-rw-r--r--drivers/misc/habanalabs/common/device.c387
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c253
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h301
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c150
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c195
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c5
-rw-r--r--drivers/misc/habanalabs/common/hwmon.c209
-rw-r--r--drivers/misc/habanalabs/common/irq.c14
-rw-r--r--drivers/misc/habanalabs/common/memory.c78
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu.c25
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu_v1.c18
-rw-r--r--drivers/misc/habanalabs/common/sysfs.c56
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c313
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h4
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_coresight.c4
-rw-r--r--drivers/misc/habanalabs/goya/goya.c165
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h14
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c4
-rw-r--r--drivers/misc/habanalabs/goya/goya_hwmgr.c31
-rw-r--r--drivers/misc/habanalabs/include/common/cpucp_if.h62
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h4
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h19
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h18
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h20
-rw-r--r--drivers/misc/lattice-ecp3-config.c12
-rw-r--r--drivers/misc/lkdtm/Makefile4
-rw-r--r--drivers/misc/lkdtm/bugs.c16
-rw-r--r--drivers/misc/lkdtm/core.c8
-rw-r--r--drivers/misc/mei/bus.c67
-rw-r--r--drivers/misc/mei/client.c7
-rw-r--r--drivers/misc/mei/hbm.c20
-rw-r--r--drivers/misc/mei/hw-txe.c6
-rw-r--r--drivers/misc/mei/hw.h5
-rw-r--r--drivers/misc/mei/init.c1
-rw-r--r--drivers/misc/ocxl/file.c4
-rw-r--r--drivers/misc/pci_endpoint_test.c2
-rw-r--r--drivers/misc/sram.c1
-rw-r--r--drivers/misc/uacce/uacce.c12
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.c6
-rw-r--r--drivers/misc/vmw_vmci/vmci_event.c3
-rw-r--r--drivers/mmc/core/block.c42
-rw-r--r--drivers/mmc/core/bus.c11
-rw-r--r--drivers/mmc/core/card.h36
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/mmc_ops.c89
-rw-r--r--drivers/mmc/core/mmc_ops.h2
-rw-r--r--drivers/mmc/core/pwrseq_simple.c2
-rw-r--r--drivers/mmc/core/queue.c2
-rw-r--r--drivers/mmc/core/quirks.h64
-rw-r--r--drivers/mmc/core/sd.c2
-rw-r--r--drivers/mmc/core/sdio.c5
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/au1xmmc.c4
-rw-r--r--drivers/mmc/host/bcm2835.c2
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c110
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798cv200.c9
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c11
-rw-r--r--drivers/mmc/host/dw_mmc.c56
-rw-r--r--drivers/mmc/host/dw_mmc.h13
-rw-r--r--drivers/mmc/host/jz4740_mmc.c29
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-clkc.c2
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-mmc.c5
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c5
-rw-r--r--drivers/mmc/host/mmc_spi.c16
-rw-r--r--drivers/mmc/host/mmci.c7
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c5
-rw-r--r--drivers/mmc/host/mtk-sd.c550
-rw-r--r--drivers/mmc/host/mxcmmc.c6
-rw-r--r--drivers/mmc/host/omap_hsmmc.c36
-rw-r--r--drivers/mmc/host/renesas_sdhi.h4
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c47
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c21
-rw-r--r--drivers/mmc/host/sdhci-acpi.c78
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c4
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c1
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c34
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c62
-rw-r--r--drivers/mmc/host/sdhci-pci.h1
-rw-r--r--drivers/mmc/host/sdhci-tegra.c81
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c15
-rw-r--r--drivers/most/most_usb.c4
-rw-r--r--drivers/mtd/chips/gen_probe.c9
-rw-r--r--drivers/mtd/devices/mchp23k256.c4
-rw-r--r--drivers/mtd/devices/mchp48l640.c4
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c10
-rw-r--r--drivers/mtd/devices/sst25l.c4
-rw-r--r--drivers/mtd/hyperbus/rpc-if.c12
-rw-r--r--drivers/mtd/maps/Kconfig6
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c133
-rw-r--r--drivers/mtd/mtd_blkdevs.c26
-rw-r--r--drivers/mtd/mtdchar.c110
-rw-r--r--drivers/mtd/mtdcore.c21
-rw-r--r--drivers/mtd/mtdpart.c2
-rw-r--r--drivers/mtd/nand/core.c3
-rw-r--r--drivers/mtd/nand/onenand/onenand_bbt.c4
-rw-r--r--drivers/mtd/nand/raw/Kconfig12
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c73
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c53
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c5
-rw-r--r--drivers/mtd/nand/raw/mpc5121_nfc.c1
-rw-r--r--drivers/mtd/nand/raw/nand_base.c70
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c3
-rw-r--r--drivers/mtd/nand/raw/omap2.c507
-rw-r--r--drivers/mtd/nand/raw/omap_elm.c16
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c14
-rw-r--r--drivers/mtd/nand/raw/renesas-nand-controller.c1424
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c58
-rw-r--r--drivers/mtd/spi-nor/atmel.c79
-rw-r--r--drivers/mtd/spi-nor/catalyst.c15
-rw-r--r--drivers/mtd/spi-nor/core.c556
-rw-r--r--drivers/mtd/spi-nor/core.h217
-rw-r--r--drivers/mtd/spi-nor/eon.c33
-rw-r--r--drivers/mtd/spi-nor/esmt.c15
-rw-r--r--drivers/mtd/spi-nor/everspin.c12
-rw-r--r--drivers/mtd/spi-nor/fujitsu.c3
-rw-r--r--drivers/mtd/spi-nor/gigadevice.c59
-rw-r--r--drivers/mtd/spi-nor/intel.c12
-rw-r--r--drivers/mtd/spi-nor/issi.c62
-rw-r--r--drivers/mtd/spi-nor/macronix.c107
-rw-r--r--drivers/mtd/spi-nor/micron-st.c234
-rw-r--r--drivers/mtd/spi-nor/otp.c2
-rw-r--r--drivers/mtd/spi-nor/sfdp.c20
-rw-r--r--drivers/mtd/spi-nor/spansion.c184
-rw-r--r--drivers/mtd/spi-nor/sst.c96
-rw-r--r--drivers/mtd/spi-nor/swp.c2
-rw-r--r--drivers/mtd/spi-nor/winbond.c168
-rw-r--r--drivers/mtd/spi-nor/xilinx.c21
-rw-r--r--drivers/mtd/spi-nor/xmc.c10
-rw-r--r--drivers/mtd/ubi/block.c7
-rw-r--r--drivers/mtd/ubi/debug.c2
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/amt.c3
-rw-r--r--drivers/net/appletalk/ipddp.c1
-rw-r--r--drivers/net/bareudp.c54
-rw-r--r--drivers/net/bonding/bond_main.c130
-rw-r--r--drivers/net/bonding/bond_netlink.c15
-rw-r--r--drivers/net/bonding/bond_options.c28
-rw-r--r--drivers/net/bonding/bond_procfs.c10
-rw-r--r--drivers/net/bonding/bond_sysfs.c13
-rw-r--r--drivers/net/caif/caif_virtio.c2
-rw-r--r--drivers/net/can/Makefile2
-rw-r--r--drivers/net/can/at91_can.c18
-rw-r--r--drivers/net/can/c_can/c_can.h1
-rw-r--r--drivers/net/can/c_can/c_can_ethtool.c4
-rw-r--r--drivers/net/can/c_can/c_can_main.c16
-rw-r--r--drivers/net/can/cc770/cc770.c16
-rw-r--r--drivers/net/can/dev/bittiming.c5
-rw-r--r--drivers/net/can/dev/dev.c9
-rw-r--r--drivers/net/can/dev/netlink.c33
-rw-r--r--drivers/net/can/dev/rx-offload.c7
-rw-r--r--drivers/net/can/flexcan/Makefile7
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c (renamed from drivers/net/can/flexcan.c)235
-rw-r--r--drivers/net/can/flexcan/flexcan-ethtool.c114
-rw-r--r--drivers/net/can/flexcan/flexcan.h163
-rw-r--r--drivers/net/can/grcan.c23
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c11
-rw-r--r--drivers/net/can/janz-ican3.c8
-rw-r--r--drivers/net/can/kvaser_pciefd.c16
-rw-r--r--drivers/net/can/m_can/m_can.c29
-rw-r--r--drivers/net/can/m_can/tcan4x5x-regmap.c2
-rw-r--r--drivers/net/can/mscan/mscan.c14
-rw-r--r--drivers/net/can/pch_can.c33
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c14
-rw-r--r--drivers/net/can/rcar/rcar_can.c22
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c22
-rw-r--r--drivers/net/can/sja1000/sja1000.c11
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c15
-rw-r--r--drivers/net/can/slcan.c11
-rw-r--r--drivers/net/can/softing/softing_cs.c2
-rw-r--r--drivers/net/can/softing/softing_fw.c11
-rw-r--r--drivers/net/can/softing/softing_main.c8
-rw-r--r--drivers/net/can/spi/hi311x.c83
-rw-r--r--drivers/net/can/spi/mcp251x.c34
-rw-r--r--drivers/net/can/spi/mcp251xfd/Makefile5
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c119
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c1083
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c1
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c269
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c260
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c260
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c205
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h36
-rw-r--r--drivers/net/can/sun4i_can.c84
-rw-r--r--drivers/net/can/ti_hecc.c8
-rw-r--r--drivers/net/can/usb/ems_usb.c14
-rw-r--r--drivers/net/can/usb/esd_usb2.c13
-rw-r--r--drivers/net/can/usb/etas_es58x/es581_4.c5
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c8
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c5
-rw-r--r--drivers/net/can/usb/gs_usb.c12
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h5
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c4
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c78
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c29
-rw-r--r--drivers/net/can/usb/mcba_usb.c23
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c10
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c20
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h1
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c11
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c12
-rw-r--r--drivers/net/can/usb/ucan.c17
-rw-r--r--drivers/net/can/usb/usb_8dev.c19
-rw-r--r--drivers/net/can/vcan.c7
-rw-r--r--drivers/net/can/vxcan.c2
-rw-r--r--drivers/net/can/xilinx_can.c26
-rw-r--r--drivers/net/dsa/b53/b53_common.c9
-rw-r--r--drivers/net/dsa/b53/b53_priv.h5
-rw-r--r--drivers/net/dsa/bcm_sf2.c54
-rw-r--r--drivers/net/dsa/bcm_sf2.h10
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h65
-rw-r--r--drivers/net/dsa/dsa_loop.c9
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c116
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c4
-rw-r--r--drivers/net/dsa/lan9303-core.c7
-rw-r--r--drivers/net/dsa/lantiq_gswip.c145
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c1
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c10
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h4
-rw-r--r--drivers/net/dsa/mt7530.c58
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c146
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c4
-rw-r--r--drivers/net/dsa/ocelot/Kconfig1
-rw-r--r--drivers/net/dsa/ocelot/felix.c109
-rw-r--r--drivers/net/dsa/ocelot/felix.h10
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c929
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c140
-rw-r--r--drivers/net/dsa/qca8k.c666
-rw-r--r--drivers/net/dsa/qca8k.h198
-rw-r--r--drivers/net/dsa/realtek-smi-core.c2
-rw-r--r--drivers/net/dsa/rtl8365mb.c20
-rw-r--r--drivers/net/dsa/rtl8366rb.c9
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h6
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c163
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c86
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h24
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c7
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx.h2
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c11
-rw-r--r--drivers/net/eql.c4
-rw-r--r--drivers/net/ethernet/3com/typhoon.c10
-rw-r--r--drivers/net/ethernet/8390/etherh.c6
-rw-r--r--drivers/net/ethernet/8390/hydra.c4
-rw-r--r--drivers/net/ethernet/8390/mac8390.c4
-rw-r--r--drivers/net/ethernet/8390/smc-ultra.c4
-rw-r--r--drivers/net/ethernet/8390/wd.c4
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/agere/et131x.c5
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c245
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.h18
-rw-r--r--drivers/net/ethernet/alteon/acenic.c9
-rw-r--r--drivers/net/ethernet/alteon/acenic.h1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h10
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c8
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h13
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c23
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c127
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h25
-rw-r--r--drivers/net/ethernet/amd/a2065.c18
-rw-r--r--drivers/net/ethernet/amd/ariadne.c20
-rw-r--r--drivers/net/ethernet/amd/atarilance.c7
-rw-r--r--drivers/net/ethernet/amd/declance.c4
-rw-r--r--drivers/net/ethernet/amd/hplance.c4
-rw-r--r--drivers/net/ethernet/amd/lance.c4
-rw-r--r--drivers/net/ethernet/amd/mvme147.c14
-rw-r--r--drivers/net/ethernet/amd/ni65.c8
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c8
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/apple/bmac.c5
-rw-r--r--drivers/net/ethernet/apple/mace.c16
-rw-r--r--drivers/net/ethernet/apple/macmace.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c3
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.c18
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c111
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c8
-rw-r--r--drivers/net/ethernet/broadcom/b44.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c25
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c99
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c139
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c41
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c103
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c10
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c13
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c34
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c30
-rw-r--r--drivers/net/ethernet/cadence/macb.h3
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c133
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c11
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c3
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c7
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c3
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c7
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c8
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c24
-rw-r--r--drivers/net/ethernet/cortina/gemini.c17
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c18
-rw-r--r--drivers/net/ethernet/engleder/Kconfig39
-rw-r--r--drivers/net/ethernet/engleder/Makefile10
-rw-r--r--drivers/net/ethernet/engleder/tsnep.h189
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ethtool.c293
-rw-r--r--drivers/net/ethernet/engleder/tsnep_hw.h230
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c1272
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ptp.c218
-rw-r--r--drivers/net/ethernet/engleder/tsnep_selftests.c811
-rw-r--r--drivers/net/ethernet/engleder/tsnep_tc.c443
-rw-r--r--drivers/net/ethernet/ethoc.c17
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c14
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c142
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c14
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c10
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c81
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c9
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c6
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c48
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c12
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c32
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c21
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c8
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c8
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c28
-rw-r--r--drivers/net/ethernet/google/gve/gve.h23
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc.h20
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h24
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c86
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c117
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c5
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c73
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c610
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h458
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c525
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h136
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c115
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h39
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c904
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c116
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c591
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h434
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c116
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c1412
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h95
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c33
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c110
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c556
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h218
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c825
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h90
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c23
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c40
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c9
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c17
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c23
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c18
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c10
-rw-r--r--drivers/net/ethernet/i825xx/82596.c3
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c4
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c6
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c3
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c7
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c241
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h3
-rw-r--r--drivers/net/ethernet/intel/Kconfig10
-rw-r--r--drivers/net/ethernet/intel/e100.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c14
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c29
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c52
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c137
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h115
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c60
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c804
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c75
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h30
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c558
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_cgu_regs.h116
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c429
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h96
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c120
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c92
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c398
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c169
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c157
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c304
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c702
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h83
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h42
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c214
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c216
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.h41
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c397
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c302
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c739
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c208
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c860
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h374
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c2794
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h345
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c402
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c557
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h56
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c298
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c468
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c3
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c156
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c192
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h7
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c14
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c39
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c1
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c323
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h19
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c62
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h5
-rw-r--r--drivers/net/ethernet/lantiq_etop.c55
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c107
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c10
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c432
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h3
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c229
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h70
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c66
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c7
-rw-r--r--drivers/net/ethernet/marvell/prestera/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera.h38
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.c727
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.h215
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_counter.c475
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_counter.h30
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.c108
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.h18
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.c353
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.h8
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c630
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.h73
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c17
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router.c184
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.c214
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.h37
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_span.c1
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c9
-rw-r--r--drivers/net/ethernet/marvell/skge.c8
-rw-r--r--drivers/net/ethernet/marvell/sky2.c92
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c219
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h19
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c103
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c122
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c307
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c218
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c90
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c205
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c119
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1354
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c138
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c221
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c226
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c314
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c649
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c250
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h262
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c239
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h642
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c306
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c351
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c187
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c24
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c6
-rw-r--r--drivers/net/ethernet/microchip/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/Makefile1
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c22
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Kconfig9
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile10
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c682
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c244
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h173
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mac.c470
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c1002
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h278
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c506
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c127
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c406
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_regs.h871
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c544
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c317
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c27
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c75
-rw-r--r--drivers/net/ethernet/microsoft/mana/Makefile2
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana.h15
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c162
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c140
-rw-r--r--drivers/net/ethernet/mscc/Makefile4
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c304
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h15
-rw-r--r--drivers/net/ethernet/mscc/ocelot_fdma.c894
-rw-r--r--drivers/net/ethernet/mscc/ocelot_fdma.h166
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c128
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c91
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c103
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c535
-rw-r--r--drivers/net/ethernet/mscc/vsc7514_regs.c523
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c17
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c6
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c27
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c6
-rw-r--r--drivers/net/ethernet/neterion/s2io.c25
-rw-r--r--drivers/net/ethernet/neterion/s2io.h1
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c31
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c70
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c6
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c10
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c12
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c3
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c102
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c100
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c42
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c21
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c91
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c5
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c11
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c38
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c9
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-ethtool.c8
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c8
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c71
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c18
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c11
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c29
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c3
-rw-r--r--drivers/net/ethernet/seeq/ether3.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c7
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c6
-rw-r--r--drivers/net/ethernet/sfc/efx.c3
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c15
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c1
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c14
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c14
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c4
-rw-r--r--drivers/net/ethernet/sfc/ptp.c3
-rw-r--r--drivers/net/ethernet/sfc/rx.c2
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c6
-rw-r--r--drivers/net/ethernet/socionext/netsec.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c101
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c42
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c255
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c121
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c4
-rw-r--r--drivers/net/ethernet/sun/cassini.c26
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c40
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c7
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c8
-rw-r--r--drivers/net/ethernet/ti/cpsw.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c8
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c32
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h10
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c69
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c4
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c12
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c4
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c35
-rw-r--r--drivers/net/ethernet/vertexcom/Kconfig25
-rw-r--r--drivers/net/ethernet/vertexcom/Makefile6
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c769
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c14
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c221
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c9
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c3
-rw-r--r--drivers/net/fddi/skfp/hwmtm.c6
-rw-r--r--drivers/net/fddi/skfp/smt.c14
-rw-r--r--drivers/net/geneve.c9
-rw-r--r--drivers/net/hamradio/6pack.c4
-rw-r--r--drivers/net/hamradio/hdlcdrv.c1
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/hamradio/scc.c1
-rw-r--r--drivers/net/hamradio/yam.c4
-rw-r--r--drivers/net/hyperv/hyperv_net.h5
-rw-r--r--drivers/net/hyperv/netvsc.c146
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c9
-rw-r--r--drivers/net/hyperv/rndis_filter.c2
-rw-r--r--drivers/net/ifb.c146
-rw-r--r--drivers/net/ipa/gsi.c114
-rw-r--r--drivers/net/ipa/gsi.h21
-rw-r--r--drivers/net/ipa/gsi_reg.h4
-rw-r--r--drivers/net/ipa/ipa_data-v4.5.c7
-rw-r--r--drivers/net/ipa/ipa_endpoint.c121
-rw-r--r--drivers/net/ipa/ipa_endpoint.h17
-rw-r--r--drivers/net/ipa/ipa_main.c6
-rw-r--r--drivers/net/ipa/ipa_mem.c4
-rw-r--r--drivers/net/ipa/ipa_modem.c10
-rw-r--r--drivers/net/ipa/ipa_modem.h3
-rw-r--r--drivers/net/ipa/ipa_table.c48
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c3
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c11
-rw-r--r--drivers/net/loopback.c1
-rw-r--r--drivers/net/macvlan.c11
-rw-r--r--drivers/net/mctp/Kconfig18
-rw-r--r--drivers/net/mctp/Makefile1
-rw-r--r--drivers/net/mctp/mctp-serial.c515
-rw-r--r--drivers/net/mdio/Kconfig2
-rw-r--r--drivers/net/mdio/mdio-ipq8064.c2
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c176
-rw-r--r--drivers/net/netconsole.c2
-rw-r--r--drivers/net/netdevsim/dev.c4
-rw-r--r--drivers/net/netdevsim/ethtool.c8
-rw-r--r--drivers/net/pcs/pcs-lynx.c36
-rw-r--r--drivers/net/phy/at803x.c2
-rw-r--r--drivers/net/phy/broadcom.c1
-rw-r--r--drivers/net/phy/dp83640.c3
-rw-r--r--drivers/net/phy/dp83869.c42
-rw-r--r--drivers/net/phy/marvell.c82
-rw-r--r--drivers/net/phy/mdio_bus.c18
-rw-r--r--drivers/net/phy/micrel.c135
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c3
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c220
-rw-r--r--drivers/net/phy/phy-core.c2
-rw-r--r--drivers/net/phy/phy_device.c6
-rw-r--r--drivers/net/phy/phylink.c492
-rw-r--r--drivers/net/phy/sfp-bus.c5
-rw-r--r--drivers/net/phy/sfp.c25
-rw-r--r--drivers/net/ppp/ppp_async.c3
-rw-r--r--drivers/net/ppp/ppp_generic.c12
-rw-r--r--drivers/net/ppp/ppp_synctty.c3
-rw-r--r--drivers/net/slip/slip.c4
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/ax88179_178a.c17
-rw-r--r--drivers/net/usb/lan78xx.c1214
-rw-r--r--drivers/net/usb/mcs7830.c12
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c8
-rw-r--r--drivers/net/usb/smsc95xx.c3
-rw-r--r--drivers/net/veth.c36
-rw-r--r--drivers/net/virtio_net.c16
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c22
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c10
-rw-r--r--drivers/net/vrf.c9
-rw-r--r--drivers/net/vxlan.c10
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c62
-rw-r--r--drivers/net/wan/ixp4xx_hss.c261
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wireguard/noise.c45
-rw-r--r--drivers/net/wireguard/queueing.h4
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c83
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h110
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c33
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c28
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c55
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c272
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h52
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c46
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.c12
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c41
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h8
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c78
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c49
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h58
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c686
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c247
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c57
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_desc.h19
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c74
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.h56
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c71
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.h9
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c16
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h34
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c962
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h17
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c49
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c248
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c214
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h17
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c120
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c14
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h200
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c633
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h119
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c50
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h19
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c45
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c9
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c12
-rw-r--r--drivers/net/wireless/ath/regd.h1
-rw-r--r--drivers/net/wireless/ath/regd_common.h3
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c96
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h29
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c74
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c125
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h5
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c41
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c21
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h19
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c2
-rw-r--r--drivers/net/wireless/cisco/airo.c22
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h81
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h92
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/system.h (renamed from drivers/net/wireless/intel/iwlwifi/fw/api/soc.h)16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c153
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c70
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c96
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/Makefile8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/internal.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h505
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/main.c2001
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/net.c409
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/sap.h733
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/trace-data.h82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/trace.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/trace.h76
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c184
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c227
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c189
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h87
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c261
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rfi.c48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c105
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c261
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c91
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c109
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c152
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c343
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c136
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c8
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.c16
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_download.c2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c5
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_proc.c24
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_wlan.h14
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c6
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/host.h10
-rw-r--r--drivers/net/wireless/marvell/libertas/tx.c5
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/libertas_tf.h36
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c28
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c3
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c90
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c122
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c200
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h127
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/testmode.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c169
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h521
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c227
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c83
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c205
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c70
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c267
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h841
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c136
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c83
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c160
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h153
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c51
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/testmode.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c36
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.h6
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c10
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c5
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c28
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c80
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c41
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h2
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c14
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.h7
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c100
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c388
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h143
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c91
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c88
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h77
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c69
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c63
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c14
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c10
-rw-r--r--drivers/net/wireless/realtek/rtw88/sar.c114
-rw-r--r--drivers/net/wireless/realtek/rtw88/sar.h22
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c36
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c61
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h472
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c73
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h6
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c93
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h39
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c11
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h2170
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c21
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h11
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c158
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h60
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h25
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c375
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c25
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_table.c12201
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h91
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_coex.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c9
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb_ops.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_usb.h2
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c2
-rw-r--r--drivers/net/wwan/Kconfig25
-rw-r--r--drivers/net/wwan/Makefile1
-rw-r--r--drivers/net/wwan/iosm/Makefile4
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_debugfs.c29
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_debugfs.h17
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c11
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.h8
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.c13
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.h9
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux.c28
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux.h1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.c18
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c49
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_port.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_trace.c182
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_trace.h74
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_wwan.c3
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_wwan.h10
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c4
-rw-r--r--drivers/net/wwan/qcom_bam_dmux.c907
-rw-r--r--drivers/net/wwan/wwan_core.c39
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/nfc/fdp/i2c.c4
-rw-r--r--drivers/nfc/pn544/i2c.c2
-rw-r--r--drivers/nfc/st21nfca/i2c.c4
-rw-r--r--drivers/nfc/st21nfca/se.c10
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c2
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c26
-rw-r--r--drivers/ntb/msi.c22
-rw-r--r--drivers/nubus/proc.c36
-rw-r--r--drivers/nvdimm/Kconfig2
-rw-r--r--drivers/nvdimm/pmem.c38
-rw-r--r--drivers/nvdimm/virtio_pmem.c2
-rw-r--r--drivers/nvme/host/core.c11
-rw-r--r--drivers/nvme/host/fabrics.c25
-rw-r--r--drivers/nvme/host/fault_inject.c2
-rw-r--r--drivers/nvme/host/multipath.c41
-rw-r--r--drivers/nvme/host/nvme.h8
-rw-r--r--drivers/nvme/host/pci.c150
-rw-r--r--drivers/nvme/host/trace.h6
-rw-r--r--drivers/nvme/target/passthru.c3
-rw-r--r--drivers/nvmem/core.c2
-rw-r--r--drivers/nvmem/mtk-efuse.c13
-rw-r--r--drivers/of/base.c169
-rw-r--r--drivers/of/device.c2
-rw-r--r--drivers/of/fdt.c150
-rw-r--r--drivers/of/platform.c4
-rw-r--r--drivers/of/property.c17
-rw-r--r--drivers/of/unittest.c175
-rw-r--r--drivers/parisc/led.c4
-rw-r--r--drivers/parisc/pdc_stable.c7
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/access.c36
-rw-r--r--drivers/pci/controller/Kconfig10
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c18
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-plat.c6
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h2
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c8
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c4
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c81
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c37
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c152
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-hisi.c32
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c204
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c14
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c222
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c147
-rw-r--r--drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c84
-rw-r--r--drivers/pci/controller/pci-aardvark.c79
-rw-r--r--drivers/pci/controller/pci-hyperv.c329
-rw-r--r--drivers/pci/controller/pci-mvebu.c542
-rw-r--r--drivers/pci/controller/pci-rcar-gen2.c14
-rw-r--r--drivers/pci/controller/pci-thunder-ecam.c46
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c4
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c6
-rw-r--r--drivers/pci/controller/pci-xgene.c58
-rw-r--r--drivers/pci/controller/pcie-altera.c12
-rw-r--r--drivers/pci/controller/pcie-apple.c10
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c386
-rw-r--r--drivers/pci/controller/pcie-iproc-bcma.c22
-rw-r--r--drivers/pci/controller/pcie-iproc-platform.c16
-rw-r--r--drivers/pci/controller/pcie-iproc.c4
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c382
-rw-r--r--drivers/pci/controller/pcie-mediatek.c18
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c42
-rw-r--r--drivers/pci/controller/pcie-mt7621.c84
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c14
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c4
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c44
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c30
-rw-r--r--drivers/pci/controller/pcie-xilinx.c158
-rw-r--r--drivers/pci/controller/vmd.c61
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-ntb.c2
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c2
-rw-r--r--drivers/pci/hotplug/TODO5
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c4
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c74
-rw-r--r--drivers/pci/hotplug/pciehp.h3
-rw-r--r--drivers/pci/hotplug/pciehp_core.c2
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c38
-rw-r--r--drivers/pci/msi/Makefile7
-rw-r--r--drivers/pci/msi/irqdomain.c280
-rw-r--r--drivers/pci/msi/legacy.c80
-rw-r--r--drivers/pci/msi/msi.c (renamed from drivers/pci/msi.c)954
-rw-r--r--drivers/pci/msi/msi.h39
-rw-r--r--drivers/pci/msi/pcidev_msi.c43
-rw-r--r--drivers/pci/of.c2
-rw-r--r--drivers/pci/p2pdma.c4
-rw-r--r--drivers/pci/pci-bridge-emul.c119
-rw-r--r--drivers/pci/pci-sysfs.c7
-rw-r--r--drivers/pci/pci.c31
-rw-r--r--drivers/pci/pcie/aspm.c94
-rw-r--r--drivers/pci/pcie/dpc.c4
-rw-r--r--drivers/pci/pcie/pme.c4
-rw-r--r--drivers/pci/probe.c36
-rw-r--r--drivers/pci/proc.c10
-rw-r--r--drivers/pci/quirks.c28
-rw-r--r--drivers/pci/setup-res.c8
-rw-r--r--drivers/pci/slot.c3
-rw-r--r--drivers/pci/switch/switchtec.c11
-rw-r--r--drivers/pci/xen-pcifront.c4
-rw-r--r--drivers/pcmcia/Kconfig2
-rw-r--r--drivers/pcmcia/Makefile2
-rw-r--r--drivers/pcmcia/at91_cf.c6
-rw-r--r--drivers/pcmcia/cs.c8
-rw-r--r--drivers/pcmcia/ds.c20
-rw-r--r--drivers/pcmcia/pcmcia_cis.c3
-rw-r--r--drivers/pcmcia/pcmcia_resource.c5
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x255.c124
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x270.c103
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x2xx.c44
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c12
-rw-r--r--drivers/pcmcia/socket_sysfs.c18
-rw-r--r--drivers/pcmcia/yenta_socket.c25
-rw-r--r--drivers/perf/Kconfig9
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/arm-cmn.c1111
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c73
-rw-r--r--drivers/perf/hisilicon/Kconfig9
-rw-r--r--drivers/perf/hisilicon/Makefile2
-rw-r--r--drivers/perf/hisilicon/hisi_pcie_pmu.c948
-rw-r--r--drivers/perf/marvell_cn10k_tad_pmu.c429
-rw-r--r--drivers/phy/amlogic/Kconfig10
-rw-r--r--drivers/phy/amlogic/Makefile1
-rw-r--r--drivers/phy/amlogic/phy-meson8-hdmi-tx.c160
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns-usb2.c54
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c1312
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c6
-rw-r--r--drivers/phy/freescale/Kconfig8
-rw-r--r--drivers/phy/freescale/Makefile1
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c237
-rw-r--r--drivers/phy/intel/Kconfig10
-rw-r--r--drivers/phy/intel/Makefile1
-rw-r--r--drivers/phy/intel/phy-intel-thunderbay-emmc.c509
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c9
-rw-r--r--drivers/phy/mediatek/phy-mtk-io.h38
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-dsi.c2
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c608
-rw-r--r--drivers/phy/mediatek/phy-mtk-xsphy.c140
-rw-r--r--drivers/phy/microchip/Kconfig8
-rw-r--r--drivers/phy/microchip/Makefile1
-rw-r--r--drivers/phy/microchip/lan966x_serdes.c545
-rw-r--r--drivers/phy/microchip/lan966x_serdes_regs.h209
-rw-r--r--drivers/phy/phy-can-transceiver.c4
-rw-r--r--drivers/phy/qualcomm/Kconfig10
-rw-r--r--drivers/phy/qualcomm/Makefile1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-edp.c674
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c313
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h104
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c260
-rw-r--r--drivers/phy/socionext/Kconfig2
-rw-r--r--drivers/phy/socionext/phy-uniphier-ahci.c201
-rw-r--r--drivers/phy/socionext/phy-uniphier-pcie.c70
-rw-r--r--drivers/phy/socionext/phy-uniphier-usb3hs.c4
-rw-r--r--drivers/phy/socionext/phy-uniphier-usb3ss.c14
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c10
-rw-r--r--drivers/phy/tegra/xusb.c2
-rw-r--r--drivers/phy/ti/phy-omap-control.c6
-rw-r--r--drivers/pinctrl/Kconfig463
-rw-r--r--drivers/pinctrl/Makefile47
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c1
-rw-r--r--drivers/pinctrl/aspeed/Kconfig2
-rw-r--r--drivers/pinctrl/bcm/Kconfig2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c15
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns.c163
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c1
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-lochnagar.c3
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c5
-rw-r--r--drivers/pinctrl/freescale/Kconfig7
-rw-r--r--drivers/pinctrl/freescale/Makefile1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c17
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imxrt1050.c349
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c130
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c7
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c7
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c85
-rw-r--r--drivers/pinctrl/pinconf-generic.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c3
-rw-r--r--drivers/pinctrl/pinctrl-apple-gpio.c105
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c13
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c1
-rw-r--r--drivers/pinctrl/pinctrl-at91.c1
-rw-r--r--drivers/pinctrl/pinctrl-da9062.c6
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c5
-rw-r--r--drivers/pinctrl/pinctrl-keembay.c87
-rw-r--r--drivers/pinctrl/pinctrl-max77620.c11
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c1
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c45
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c486
-rw-r--r--drivers/pinctrl/pinctrl-oxnas.c1
-rw-r--r--drivers/pinctrl/pinctrl-pic32.c2
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c12
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c161
-rw-r--r--drivers/pinctrl/pinctrl-st.c116
-rw-r--r--drivers/pinctrl/pinctrl-starfive.c1354
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c1
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c3
-rw-r--r--drivers/pinctrl/pinctrl-thunderbay.c1322
-rw-r--r--drivers/pinctrl/pinctrl-xway.c1
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c10
-rw-r--r--drivers/pinctrl/pinmux.c2
-rw-r--r--drivers/pinctrl/pinmux.h4
-rw-r--r--drivers/pinctrl/qcom/Kconfig18
-rw-r--r--drivers/pinctrl/qcom/Makefile2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c16
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h10
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280.c75
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx65.c967
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450.c1689
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779a0.c4
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza1.c6
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza2.c1
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c310
-rw-r--r--drivers/pinctrl/renesas/pinctrl.c9
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c81
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c11
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h1
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c149
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c10
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.h12
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c8
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c1
-rw-r--r--drivers/platform/mips/Kconfig6
-rw-r--r--drivers/platform/mips/Makefile1
-rw-r--r--drivers/platform/mips/ls2k-reset.c53
-rw-r--r--drivers/platform/surface/Kconfig7
-rw-r--r--drivers/platform/surface/aggregator/Kconfig1
-rw-r--r--drivers/platform/surface/aggregator/bus.c24
-rw-r--r--drivers/platform/surface/aggregator/bus.h3
-rw-r--r--drivers/platform/surface/aggregator/core.c3
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c32
-rw-r--r--drivers/platform/x86/Kconfig63
-rw-r--r--drivers/platform/x86/Makefile6
-rw-r--r--drivers/platform/x86/amd-pmc.c160
-rw-r--r--drivers/platform/x86/asus-tf103c-dock.c945
-rw-r--r--drivers/platform/x86/asus-wmi.c605
-rw-r--r--drivers/platform/x86/hp_accel.c27
-rw-r--r--drivers/platform/x86/intel/Kconfig11
-rw-r--r--drivers/platform/x86/intel/Makefile4
-rw-r--r--drivers/platform/x86/intel/crystal_cove_charger.c153
-rw-r--r--drivers/platform/x86/intel/int3472/Makefile9
-rw-r--r--drivers/platform/x86/intel/int3472/clk_and_regulator.c (renamed from drivers/platform/x86/intel/int3472/intel_skl_int3472_clk_and_regulator.c)2
-rw-r--r--drivers/platform/x86/intel/int3472/common.c82
-rw-r--r--drivers/platform/x86/intel/int3472/common.h (renamed from drivers/platform/x86/intel/int3472/intel_skl_int3472_common.h)6
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c (renamed from drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c)51
-rw-r--r--drivers/platform/x86/intel/int3472/intel_skl_int3472_common.c106
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470.c (renamed from drivers/platform/x86/intel/int3472/intel_skl_int3472_tps68470.c)92
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470.h25
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470_board_data.c158
-rw-r--r--drivers/platform/x86/intel/pmt/Kconfig4
-rw-r--r--drivers/platform/x86/intel/pmt/class.c21
-rw-r--r--drivers/platform/x86/intel/pmt/class.h5
-rw-r--r--drivers/platform/x86/intel/pmt/crashlog.c47
-rw-r--r--drivers/platform/x86/intel/pmt/telemetry.c46
-rw-r--r--drivers/platform/x86/intel/uncore-frequency.c3
-rw-r--r--drivers/platform/x86/intel/vsec.c408
-rw-r--r--drivers/platform/x86/intel/vsec.h43
-rw-r--r--drivers/platform/x86/lenovo-yogabook-wmi.c408
-rw-r--r--drivers/platform/x86/pmc_atom.c54
-rw-r--r--drivers/platform/x86/simatic-ipc.c176
-rw-r--r--drivers/platform/x86/think-lmi.c327
-rw-r--r--drivers/platform/x86/think-lmi.h28
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c1141
-rw-r--r--drivers/platform/x86/toshiba_acpi.c16
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c38
-rw-r--r--drivers/platform/x86/uv_sysfs.c6
-rw-r--r--drivers/platform/x86/wmi.c27
-rw-r--r--drivers/platform/x86/x86-android-tablets.c870
-rw-r--r--drivers/pnp/isapnp/proc.c2
-rw-r--r--drivers/pnp/pnpbios/core.c6
-rw-r--r--drivers/pnp/pnpbios/proc.c4
-rw-r--r--drivers/power/reset/mt6323-poweroff.c3
-rw-r--r--drivers/power/supply/Kconfig12
-rw-r--r--drivers/power/supply/Makefile1
-rw-r--r--drivers/power/supply/ab8500-bm.h123
-rw-r--r--drivers/power/supply/ab8500-chargalg.h8
-rw-r--r--drivers/power/supply/ab8500_bmdata.c575
-rw-r--r--drivers/power/supply/ab8500_btemp.c65
-rw-r--r--drivers/power/supply/ab8500_chargalg.c315
-rw-r--r--drivers/power/supply/ab8500_charger.c544
-rw-r--r--drivers/power/supply/ab8500_fg.c373
-rw-r--r--drivers/power/supply/axp20x_battery.c6
-rw-r--r--drivers/power/supply/bd99954-charger.c24
-rw-r--r--drivers/power/supply/bq24190_charger.c6
-rw-r--r--drivers/power/supply/bq2515x_charger.c8
-rw-r--r--drivers/power/supply/bq256xx_charger.c24
-rw-r--r--drivers/power/supply/bq25890_charger.c37
-rw-r--r--drivers/power/supply/bq25980_charger.c6
-rw-r--r--drivers/power/supply/bq27xxx_battery.c38
-rw-r--r--drivers/power/supply/cw2015_battery.c20
-rw-r--r--drivers/power/supply/ingenic-battery.c14
-rw-r--r--drivers/power/supply/max77976_charger.c509
-rw-r--r--drivers/power/supply/power_supply_core.c80
-rw-r--r--drivers/power/supply/power_supply_sysfs.c57
-rw-r--r--drivers/power/supply/qcom_smbb.c5
-rw-r--r--drivers/power/supply/sc2731_charger.c8
-rw-r--r--drivers/power/supply/sc27xx_fuel_gauge.c22
-rw-r--r--drivers/power/supply/smb347-charger.c34
-rw-r--r--drivers/powercap/dtpm.c6
-rw-r--r--drivers/powercap/idle_inject.c2
-rw-r--r--drivers/powercap/intel_rapl_common.c61
-rw-r--r--drivers/ptp/ptp_ines.c4
-rw-r--r--drivers/ptp/ptp_vclock.c10
-rw-r--r--drivers/pwm/core.c165
-rw-r--r--drivers/pwm/pwm-img.c35
-rw-r--r--drivers/pwm/pwm-pxa.c16
-rw-r--r--drivers/pwm/pwm-tegra.c82
-rw-r--r--drivers/pwm/pwm-twl.c62
-rw-r--r--drivers/pwm/pwm-vt8500.c57
-rw-r--r--drivers/rapidio/switches/Kconfig11
-rw-r--r--drivers/rapidio/switches/Makefile2
-rw-r--r--drivers/rapidio/switches/tsi568.c195
-rw-r--r--drivers/rapidio/switches/tsi57x.c365
-rw-r--r--drivers/ras/cec.c2
-rw-r--r--drivers/regulator/Kconfig19
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/bd718x7-regulator.c29
-rw-r--r--drivers/regulator/da9121-regulator.c117
-rw-r--r--drivers/regulator/da9121-regulator.h21
-rw-r--r--drivers/regulator/irq_helpers.c41
-rw-r--r--drivers/regulator/max20086-regulator.c332
-rw-r--r--drivers/regulator/mt6380-regulator.c6
-rw-r--r--drivers/regulator/qcom-labibb-regulator.c2
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c27
-rw-r--r--drivers/regulator/qcom_smd-regulator.c100
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c39
-rw-r--r--drivers/regulator/rohm-regulator.c16
-rw-r--r--drivers/regulator/tps68470-regulator.c201
-rw-r--r--drivers/regulator/twl-regulator.c10
-rw-r--r--drivers/remoteproc/Kconfig15
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/imx_rproc.c9
-rw-r--r--drivers/remoteproc/ingenic_rproc.c5
-rw-r--r--drivers/remoteproc/mtk_scp_ipi.c4
-rw-r--r--drivers/remoteproc/qcom_pil_info.c2
-rw-r--r--drivers/remoteproc/qcom_q6v5.c1
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c38
-rw-r--r--drivers/remoteproc/rcar_rproc.c224
-rw-r--r--drivers/remoteproc/remoteproc_core.c4
-rw-r--r--drivers/remoteproc/remoteproc_coredump.c2
-rw-r--r--drivers/remoteproc/st_slim_rproc.c2
-rw-r--r--drivers/remoteproc/stm32_rproc.c2
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c1
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c5
-rw-r--r--drivers/reset/Kconfig7
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/reset-starfive-jh7100.c173
-rw-r--r--drivers/rpmsg/qcom_glink_native.c2
-rw-r--r--drivers/rpmsg/qcom_smd.c2
-rw-r--r--drivers/rpmsg/rpmsg_char.c29
-rw-r--r--drivers/rpmsg/rpmsg_core.c44
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c4
-rw-r--r--drivers/rtc/Kconfig24
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/dev.c6
-rw-r--r--drivers/rtc/rtc-cmos.c201
-rw-r--r--drivers/rtc/rtc-da9063.c16
-rw-r--r--drivers/rtc/rtc-ftrtc010.c8
-rw-r--r--drivers/rtc/rtc-gamecube.c377
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c182
-rw-r--r--drivers/rtc/rtc-pcf2127.c2
-rw-r--r--drivers/rtc/rtc-pcf85063.c97
-rw-r--r--drivers/rtc/rtc-pxa.c4
-rw-r--r--drivers/rtc/rtc-rs5c372.c185
-rw-r--r--drivers/rtc/rtc-rv8803.c6
-rw-r--r--drivers/rtc/rtc-sunplus.c362
-rw-r--r--drivers/s390/block/Kconfig2
-rw-r--r--drivers/s390/block/dasd_devmap.c3
-rw-r--r--drivers/s390/block/dcssblk.c26
-rw-r--r--drivers/s390/char/keyboard.h4
-rw-r--r--drivers/s390/char/sclp_early.c2
-rw-r--r--drivers/s390/char/sclp_sd.c3
-rw-r--r--drivers/s390/char/vmcp.c4
-rw-r--r--drivers/s390/cio/chsc_sch.c5
-rw-r--r--drivers/s390/cio/css.c19
-rw-r--r--drivers/s390/cio/device.c20
-rw-r--r--drivers/s390/cio/eadm_sch.c5
-rw-r--r--drivers/s390/cio/qdio.h7
-rw-r--r--drivers/s390/cio/qdio_main.c210
-rw-r--r--drivers/s390/cio/qdio_setup.c22
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c5
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c79
-rw-r--r--drivers/s390/crypto/zcrypt_api.c7
-rw-r--r--drivers/s390/net/ctcm_dbug.h1
-rw-r--r--drivers/s390/net/ctcm_fsms.c2
-rw-r--r--drivers/s390/net/lcs.c11
-rw-r--r--drivers/s390/net/qeth_core.h4
-rw-r--r--drivers/s390/net/qeth_core_main.c125
-rw-r--r--drivers/s390/net/qeth_ethtool.c4
-rw-r--r--drivers/s390/net/qeth_l2_main.c52
-rw-r--r--drivers/s390/net/qeth_l3_main.c13
-rw-r--r--drivers/s390/scsi/zfcp_fc.c13
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c19
-rw-r--r--drivers/scsi/3w-sas.c4
-rw-r--r--drivers/scsi/53c700.c1
-rw-r--r--drivers/scsi/a100u2w.c2
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c6
-rw-r--r--drivers/scsi/atp870u.c1
-rw-r--r--drivers/scsi/bfa/bfad.c6
-rw-r--r--drivers/scsi/bfa/bfad_attr.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c20
-rw-r--r--drivers/scsi/ch.c8
-rw-r--r--drivers/scsi/dc395x.c3
-rw-r--r--drivers/scsi/elx/efct/efct_driver.c13
-rw-r--r--drivers/scsi/elx/efct/efct_hw.c10
-rw-r--r--drivers/scsi/elx/efct/efct_io.c2
-rw-r--r--drivers/scsi/elx/libefc/efc_cmds.c4
-rw-r--r--drivers/scsi/elx/libefc/efc_els.c12
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.c14
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h7
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c402
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c37
-rw-r--r--drivers/scsi/hosts.c15
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/initio.c5
-rw-r--r--drivers/scsi/libsas/sas_discover.c1
-rw-r--r--drivers/scsi/libsas/sas_event.c77
-rw-r--r--drivers/scsi/libsas/sas_expander.c3
-rw-r--r--drivers/scsi/libsas/sas_init.c49
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c7
-rw-r--r--drivers/scsi/lpfc/lpfc.h97
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c62
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c27
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h29
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c45
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c48
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c83
-rw-r--r--drivers/scsi/megaraid.c84
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c27
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h603
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_image.h59
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_init.h15
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h128
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_pci.h44
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_sas.h14
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h31
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h126
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_debug.h133
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c1428
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c771
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c21
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c87
-rw-r--r--drivers/scsi/mvsas/mv_sas.c5
-rw-r--r--drivers/scsi/myrb.c2
-rw-r--r--drivers/scsi/myrs.c13
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c3
-rw-r--r--drivers/scsi/pm8001/Makefile7
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c24
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c33
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c23
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h3
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c61
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h6
-rw-r--r--drivers/scsi/pm8001/pm80xx_tracepoints.c10
-rw-r--r--drivers/scsi/pm8001/pm80xx_tracepoints.h113
-rw-r--r--drivers/scsi/pmcraid.c5
-rw-r--r--drivers/scsi/qedf/qedf_io.c1
-rw-r--r--drivers/scsi/qedf/qedf_main.c9
-rw-r--r--drivers/scsi/qedi/qedi_main.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c4
-rw-r--r--drivers/scsi/scsi.c5
-rw-r--r--drivers/scsi/scsi_bsg.c2
-rw-r--r--drivers/scsi/scsi_debugfs.c1
-rw-r--r--drivers/scsi/scsi_error.c114
-rw-r--r--drivers/scsi/scsi_ioctl.c43
-rw-r--r--drivers/scsi/scsi_lib.c55
-rw-r--r--drivers/scsi/scsi_logging.c4
-rw-r--r--drivers/scsi/scsi_pm.c3
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/scsi_proc.c4
-rw-r--r--drivers/scsi/scsi_scan.c4
-rw-r--r--drivers/scsi/scsi_sysfs.c7
-rw-r--r--drivers/scsi/sd.c30
-rw-r--r--drivers/scsi/sd_zbc.c14
-rw-r--r--drivers/scsi/sg.c41
-rw-r--r--drivers/scsi/snic/snic_disc.c2
-rw-r--r--drivers/scsi/sr.c19
-rw-r--r--drivers/scsi/sr_vendor.c4
-rw-r--r--drivers/scsi/st.c4
-rw-r--r--drivers/scsi/storvsc_drv.c54
-rw-r--r--drivers/scsi/ufs/Kconfig15
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210-pci.c1
-rw-r--r--drivers/scsi/ufs/ufs-exynos.c4
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c8
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c9
-rw-r--r--drivers/scsi/ufs/ufshcd.c358
-rw-r--r--drivers/scsi/ufs/ufshcd.h20
-rw-r--r--drivers/scsi/ufs/ufshci.h3
-rw-r--r--drivers/scsi/ufs/ufshpb.c5
-rw-r--r--drivers/scsi/virtio_scsi.c4
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/apple/Kconfig22
-rw-r--r--drivers/soc/apple/Makefile2
-rw-r--r--drivers/soc/apple/apple-pmgr-pwrstate.c324
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm-mips.c5
-rw-r--r--drivers/soc/canaan/Kconfig1
-rw-r--r--drivers/soc/fsl/dpio/dpio-driver.c8
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c2
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c2
-rw-r--r--drivers/soc/imx/gpcv2.c29
-rw-r--r--drivers/soc/imx/imx8m-blk-ctrl.c77
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c15
-rw-r--r--drivers/soc/qcom/cpr.c2
-rw-r--r--drivers/soc/qcom/llcc-qcom.c28
-rw-r--r--drivers/soc/qcom/qcom_aoss.c2
-rw-r--r--drivers/soc/qcom/qcom_stats.c13
-rw-r--r--drivers/soc/qcom/qmi_interface.c2
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c2
-rw-r--r--drivers/soc/qcom/rpmhpd.c326
-rw-r--r--drivers/soc/qcom/rpmpd.c42
-rw-r--r--drivers/soc/qcom/smem.c2
-rw-r--r--drivers/soc/qcom/socinfo.c3
-rw-r--r--drivers/soc/renesas/Kconfig15
-rw-r--r--drivers/soc/renesas/Makefile2
-rw-r--r--drivers/soc/renesas/r8a779a0-sysc.c380
-rw-r--r--drivers/soc/renesas/r8a779f0-sysc.c47
-rw-r--r--drivers/soc/renesas/rcar-gen4-sysc.c376
-rw-r--r--drivers/soc/renesas/rcar-gen4-sysc.h43
-rw-r--r--drivers/soc/renesas/rcar-rst.c50
-rw-r--r--drivers/soc/renesas/renesas-soc.c127
-rw-r--r--drivers/soc/samsung/Kconfig14
-rw-r--r--drivers/soc/samsung/Makefile2
-rw-r--r--drivers/soc/samsung/exynos-chipid.c3
-rw-r--r--drivers/soc/samsung/exynos-pmu.c2
-rw-r--r--drivers/soc/samsung/exynos-usi.c285
-rw-r--r--drivers/soc/tegra/common.c29
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c51
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra20.c33
-rw-r--r--drivers/soc/tegra/fuse/fuse.h1
-rw-r--r--drivers/soc/tegra/pmc.c41
-rw-r--r--drivers/soc/tegra/regulators-tegra20.c99
-rw-r--r--drivers/soc/tegra/regulators-tegra30.c122
-rw-r--r--drivers/soc/ti/k3-ringacc.c10
-rw-r--r--drivers/soc/ti/k3-socinfo.c3
-rw-r--r--drivers/soc/ti/knav_dma.c20
-rw-r--r--drivers/soc/ti/pruss.c2
-rw-r--r--drivers/soc/ti/ti_sci_inta_msi.c94
-rw-r--r--drivers/soc/xilinx/Kconfig10
-rw-r--r--drivers/soc/xilinx/Makefile1
-rw-r--r--drivers/soc/xilinx/xlnx_event_manager.c600
-rw-r--r--drivers/soc/xilinx/zynqmp_pm_domains.c91
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c55
-rw-r--r--drivers/soundwire/cadence_master.c36
-rw-r--r--drivers/soundwire/cadence_master.h14
-rw-r--r--drivers/soundwire/intel.c261
-rw-r--r--drivers/soundwire/intel_init.c2
-rw-r--r--drivers/soundwire/qcom.c14
-rw-r--r--drivers/soundwire/stream.c4
-rw-r--r--drivers/spi/Kconfig11
-rw-r--r--drivers/spi/spi-ar934x.c18
-rw-r--r--drivers/spi/spi-atmel.c38
-rw-r--r--drivers/spi/spi-bcm-qspi.c44
-rw-r--r--drivers/spi/spi-dln2.c4
-rw-r--r--drivers/spi/spi-dw-bt1.c9
-rw-r--r--drivers/spi/spi-dw-core.c179
-rw-r--r--drivers/spi/spi-dw-dma.c55
-rw-r--r--drivers/spi/spi-dw-mmio.c22
-rw-r--r--drivers/spi/spi-dw-pci.c60
-rw-r--r--drivers/spi/spi-dw.h182
-rw-r--r--drivers/spi/spi-fsl-lpspi.c6
-rw-r--r--drivers/spi/spi-geni-qcom.c36
-rw-r--r--drivers/spi/spi-hisi-kunpeng.c15
-rw-r--r--drivers/spi/spi-meson-spifc.c1
-rw-r--r--drivers/spi/spi-pic32.c2
-rw-r--r--drivers/spi/spi-pxa2xx.c61
-rw-r--r--drivers/spi/spi-pxa2xx.h7
-rw-r--r--drivers/spi/spi-rpc-if.c4
-rw-r--r--drivers/spi/spi-rspi.c27
-rw-r--r--drivers/spi/spi-tegra20-slink.c9
-rw-r--r--drivers/spi/spi-tegra210-quad.c11
-rw-r--r--drivers/spi/spi-uniphier.c11
-rw-r--r--drivers/spi/spi-xlp.c8
-rw-r--r--drivers/spi/spi.c194
-rw-r--r--drivers/spi/spidev.c9
-rw-r--r--drivers/spmi/Kconfig11
-rw-r--r--drivers/spmi/Makefile1
-rw-r--r--drivers/spmi/spmi-mtk-pmif.c542
-rw-r--r--drivers/spmi/spmi-pmic-arb.c193
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/Makefile2
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c11
-rw-r--r--drivers/staging/fbtft/Kconfig6
-rw-r--r--drivers/staging/fbtft/Makefile1
-rw-r--r--drivers/staging/fbtft/fb_sh1106.c7
-rw-r--r--drivers/staging/fbtft/fb_watterott.c302
-rw-r--r--drivers/staging/fbtft/fbtft.h41
-rw-r--r--drivers/staging/greybus/audio_manager_module.c3
-rw-r--r--drivers/staging/greybus/audio_topology.c107
-rw-r--r--drivers/staging/media/Kconfig2
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/atomisp/Makefile24
-rw-r--r--drivers/staging/media/atomisp/TODO191
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc0310.c119
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc2235.c121
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c130
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2680.c313
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2722.c118
-rw-r--r--drivers/staging/media/atomisp/i2c/gc0310.h3
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.h3
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2680.h207
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2722.h3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c248
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c92
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.c43
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c21
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_internal.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c312
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c54
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c33
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.h3
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_binarydesc.c25
-rw-r--r--drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_configs.c386
-rw-r--r--drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_params.c3420
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_configs.c386
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_states.c224
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc.c11
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_acc_types.h5
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_event_public.h17
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_configs.c321
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_configs.h119
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_params.c (renamed from drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_params.c)23
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_params.h3
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_states.c (renamed from drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_states.c)0
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_pipe.h4
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_pipe_public.h26
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream.h4
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream_public.h19
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_local.h134
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.c2
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h6
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.c28
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.h14
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h6
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c28
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h14
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.c1
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.h1
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_param.h1
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_types.h1
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c16
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h6
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c18
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h6
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c11
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.c69
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.h42
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c30
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h16
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c38
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.h22
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c2
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.c30
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.h16
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c5
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.c68
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.h33
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_types.h14
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c33
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h14
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h4
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c32
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.h8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h14
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_local.h128
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_global.h12
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_local.h26
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/binary/interface/ia_css_binary.h3
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c513
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h24
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c116
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c11
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c3
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css.c1963
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_defs.h58
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_firmware.c16
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_firmware.h3
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_internal.h36
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_metadata.c17
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_metrics.c25
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.c197
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.h17
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_morph.c17
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_shading.c82
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params.c249
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params.h8
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_shading.c17
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_sp.c131
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_sp.h7
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_stream.c17
-rw-r--r--drivers/staging/media/atomisp/pci/system_global.h3
-rw-r--r--drivers/staging/media/hantro/Kconfig11
-rw-r--r--drivers/staging/media/hantro/Makefile10
-rw-r--r--drivers/staging/media/hantro/hantro.h50
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c51
-rw-r--r--drivers/staging/media/hantro/hantro_g2.c44
-rw-r--r--drivers/staging/media/hantro/hantro_g2_hevc_dec.c92
-rw-r--r--drivers/staging/media/hantro/hantro_g2_regs.h149
-rw-r--r--drivers/staging/media/hantro/hantro_g2_vp9_dec.c1022
-rw-r--r--drivers/staging/media/hantro/hantro_h1_jpeg_enc.c7
-rw-r--r--drivers/staging/media/hantro/hantro_hevc.c79
-rw-r--r--drivers/staging/media/hantro/hantro_hw.h131
-rw-r--r--drivers/staging/media/hantro/hantro_jpeg.c31
-rw-r--r--drivers/staging/media/hantro/hantro_jpeg.h4
-rw-r--r--drivers/staging/media/hantro/hantro_postproc.c84
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.c13
-rw-r--r--drivers/staging/media/hantro/hantro_vp9.c240
-rw-r--r--drivers/staging/media/hantro/hantro_vp9.h102
-rw-r--r--drivers/staging/media/hantro/imx8m_vpu_hw.c58
-rw-r--r--drivers/staging/media/hantro/rockchip_vpu2_hw_jpeg_enc.c22
-rw-r--r--drivers/staging/media/hantro/rockchip_vpu_hw.c12
-rw-r--r--drivers/staging/media/hantro/sama5d4_vdec_hw.c3
-rw-r--r--drivers/staging/media/hantro/sunxi_vpu_hw.c86
-rw-r--r--drivers/staging/media/imx/imx-media-csc-scaler.c2
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c2
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c42
-rw-r--r--drivers/staging/media/imx/imx-media.h4
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c9
-rw-r--r--drivers/staging/media/ipu3/include/uapi/intel-ipu3.h42
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-params.c3
-rw-r--r--drivers/staging/media/max96712/Kconfig14
-rw-r--r--drivers/staging/media/max96712/Makefile2
-rw-r--r--drivers/staging/media/max96712/max96712.c440
-rw-r--r--drivers/staging/media/meson/vdec/vdec_helpers.c10
-rw-r--r--drivers/staging/media/rkvdec/Kconfig1
-rw-r--r--drivers/staging/media/rkvdec/Makefile2
-rw-r--r--drivers/staging/media/rkvdec/rkvdec-vp9.c1072
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.c43
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.h12
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c12
-rw-r--r--drivers/staging/media/tegra-vde/vde.c210
-rw-r--r--drivers/staging/media/tegra-vde/vde.h18
-rw-r--r--drivers/staging/media/tegra-video/vi.c12
-rw-r--r--drivers/staging/most/dim2/dim2.c28
-rw-r--r--drivers/staging/mt7621-dma/Kconfig7
-rw-r--r--drivers/staging/mt7621-dma/Makefile4
-rw-r--r--drivers/staging/mt7621-dma/TODO5
-rw-r--r--drivers/staging/mt7621-dma/hsdma-mt7621.c758
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi38
-rw-r--r--drivers/staging/pi433/pi433_if.c2
-rw-r--r--drivers/staging/pi433/rf69.c62
-rw-r--r--drivers/staging/pi433/rf69_enum.h12
-rw-r--r--drivers/staging/r8188eu/Makefile3
-rw-r--r--drivers/staging/r8188eu/core/rtw_ap.c11
-rw-r--r--drivers/staging/r8188eu/core/rtw_br_ext.c11
-rw-r--r--drivers/staging/r8188eu/core/rtw_cmd.c36
-rw-r--r--drivers/staging/r8188eu/core/rtw_efuse.c221
-rw-r--r--drivers/staging/r8188eu/core/rtw_ieee80211.c73
-rw-r--r--drivers/staging/r8188eu/core/rtw_ioctl_set.c19
-rw-r--r--drivers/staging/r8188eu/core/rtw_led.c285
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme.c16
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme_ext.c102
-rw-r--r--drivers/staging/r8188eu/core/rtw_p2p.c6
-rw-r--r--drivers/staging/r8188eu/core/rtw_pwrctrl.c22
-rw-r--r--drivers/staging/r8188eu/core/rtw_rf.c2
-rw-r--r--drivers/staging/r8188eu/core/rtw_security.c22
-rw-r--r--drivers/staging/r8188eu/core/rtw_sta_mgt.c2
-rw-r--r--drivers/staging/r8188eu/core/rtw_wlan_util.c23
-rw-r--r--drivers/staging/r8188eu/core/rtw_xmit.c2
-rw-r--r--drivers/staging/r8188eu/hal/Hal8188EPwrSeq.c100
-rw-r--r--drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c27
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c1
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c1
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c1
-rw-r--r--drivers/staging/r8188eu/hal/HalPhyRf_8188e.c401
-rw-r--r--drivers/staging/r8188eu/hal/HalPwrSeqCmd.c84
-rw-r--r--drivers/staging/r8188eu/hal/odm.c903
-rw-r--r--drivers/staging/r8188eu/hal/odm_HWConfig.c20
-rw-r--r--drivers/staging/r8188eu/hal/odm_RTL8188E.c176
-rw-r--r--drivers/staging/r8188eu/hal/odm_RegConfig8188E.c60
-rw-r--r--drivers/staging/r8188eu/hal/odm_debug.c2
-rw-r--r--drivers/staging/r8188eu/hal/odm_interface.c93
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_cmd.c13
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_dm.c25
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_hal_init.c1010
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_phycfg.c197
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_rf6052.c65
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c2
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_sreset.c37
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188eu_led.c94
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188eu_xmit.c4
-rw-r--r--drivers/staging/r8188eu/hal/usb_halinit.c263
-rw-r--r--drivers/staging/r8188eu/hal/usb_ops_linux.c32
-rw-r--r--drivers/staging/r8188eu/include/Hal8188EPhyCfg.h60
-rw-r--r--drivers/staging/r8188eu/include/Hal8188EPwrSeq.h148
-rw-r--r--drivers/staging/r8188eu/include/HalPhyRf_8188e.h5
-rw-r--r--drivers/staging/r8188eu/include/HalPwrSeqCmd.h53
-rw-r--r--drivers/staging/r8188eu/include/HalVerDef.h13
-rw-r--r--drivers/staging/r8188eu/include/drv_types.h19
-rw-r--r--drivers/staging/r8188eu/include/hal_intf.h30
-rw-r--r--drivers/staging/r8188eu/include/ieee80211.h3
-rw-r--r--drivers/staging/r8188eu/include/ieee80211_ext.h271
-rw-r--r--drivers/staging/r8188eu/include/odm.h216
-rw-r--r--drivers/staging/r8188eu/include/odm_RTL8188E.h6
-rw-r--r--drivers/staging/r8188eu/include/odm_RegConfig8188E.h3
-rw-r--r--drivers/staging/r8188eu/include/odm_interface.h59
-rw-r--r--drivers/staging/r8188eu/include/odm_precomp.h54
-rw-r--r--drivers/staging/r8188eu/include/osdep_service.h32
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_dm.h7
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_hal.h100
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_led.h16
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_recv.h3
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_spec.h61
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_sreset.h13
-rw-r--r--drivers/staging/r8188eu/include/rtw_cmd.h1
-rw-r--r--drivers/staging/r8188eu/include/rtw_debug.h12
-rw-r--r--drivers/staging/r8188eu/include/rtw_eeprom.h31
-rw-r--r--drivers/staging/r8188eu/include/rtw_efuse.h104
-rw-r--r--drivers/staging/r8188eu/include/rtw_io.h2
-rw-r--r--drivers/staging/r8188eu/include/rtw_led.h116
-rw-r--r--drivers/staging/r8188eu/include/rtw_mlme.h12
-rw-r--r--drivers/staging/r8188eu/include/rtw_mlme_ext.h5
-rw-r--r--drivers/staging/r8188eu/include/rtw_pwrctrl.h7
-rw-r--r--drivers/staging/r8188eu/include/rtw_recv.h1
-rw-r--r--drivers/staging/r8188eu/include/rtw_rf.h34
-rw-r--r--drivers/staging/r8188eu/include/usb_osintf.h1
-rw-r--r--drivers/staging/r8188eu/include/wifi.h59
-rw-r--r--drivers/staging/r8188eu/os_dep/ioctl_linux.c235
-rw-r--r--drivers/staging/r8188eu/os_dep/os_intfs.c69
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_intf.c42
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_ops_linux.c6
-rw-r--r--drivers/staging/ralink-gdma/Kconfig8
-rw-r--r--drivers/staging/ralink-gdma/Makefile4
-rw-r--r--drivers/staging/ralink-gdma/ralink-gdma.c917
-rw-r--r--drivers/staging/rtl8192e/rtllib.h2
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c17
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c4
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c13
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c28
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c3
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c14
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c5
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c2
-rw-r--r--drivers/staging/rts5208/rtsx.c16
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c2
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c8
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.c12
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c89
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h12
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/controls.c143
-rw-r--r--drivers/staging/vc04_services/interface/TODO8
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h2
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-common.h2
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-encodings.h2
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h2
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h2
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-msg-port.h2
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-msg.h2
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h2
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c2
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.h2
-rw-r--r--drivers/staging/vt6655/card.c36
-rw-r--r--drivers/staging/vt6655/channel.c2
-rw-r--r--drivers/staging/vt6655/device.h10
-rw-r--r--drivers/staging/vt6655/device_main.c12
-rw-r--r--drivers/staging/vt6655/dpc.c2
-rw-r--r--drivers/staging/vt6655/rf.c54
-rw-r--r--drivers/staging/vt6655/rf.h2
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c8
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c3
-rw-r--r--drivers/target/target_core_pscsi.c2
-rw-r--r--drivers/tee/optee/Makefile1
-rw-r--r--drivers/tee/optee/core.c2
-rw-r--r--drivers/tee/optee/ffa_abi.c6
-rw-r--r--drivers/tee/optee/notif.c125
-rw-r--r--drivers/tee/optee/optee_msg.h9
-rw-r--r--drivers/tee/optee/optee_private.h28
-rw-r--r--drivers/tee/optee/optee_rpc_cmd.h31
-rw-r--r--drivers/tee/optee/optee_smc.h75
-rw-r--r--drivers/tee/optee/rpc.c71
-rw-r--r--drivers/tee/optee/smc_abi.c237
-rw-r--r--drivers/tee/tee_core.c10
-rw-r--r--drivers/thermal/Kconfig9
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/cpufreq_cooling.c6
-rw-r--r--drivers/thermal/imx8mm_thermal.c3
-rw-r--r--drivers/thermal/imx_thermal.c145
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c5
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.h48
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.h4
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c100
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c23
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c2
-rw-r--r--drivers/thermal/rzg2l_thermal.c242
-rw-r--r--drivers/thunderbolt/acpi.c15
-rw-r--r--drivers/thunderbolt/icm.c7
-rw-r--r--drivers/thunderbolt/lc.c24
-rw-r--r--drivers/thunderbolt/path.c42
-rw-r--r--drivers/thunderbolt/retimer.c28
-rw-r--r--drivers/thunderbolt/switch.c493
-rw-r--r--drivers/thunderbolt/tb.c91
-rw-r--r--drivers/thunderbolt/tb.h106
-rw-r--r--drivers/thunderbolt/tb_msgs.h47
-rw-r--r--drivers/thunderbolt/tb_regs.h113
-rw-r--r--drivers/thunderbolt/tmu.c337
-rw-r--r--drivers/thunderbolt/tunnel.c27
-rw-r--r--drivers/thunderbolt/tunnel.h9
-rw-r--r--drivers/thunderbolt/usb4.c52
-rw-r--r--drivers/thunderbolt/xdomain.c16
-rw-r--r--drivers/tty/goldfish.c12
-rw-r--r--drivers/tty/mips_ejtag_fdc.c22
-rw-r--r--drivers/tty/moxa.c4
-rw-r--r--drivers/tty/mxser.c306
-rw-r--r--drivers/tty/n_gsm.c15
-rw-r--r--drivers/tty/n_hdlc.c5
-rw-r--r--drivers/tty/n_tty.c694
-rw-r--r--drivers/tty/rpmsg_tty.c40
-rw-r--r--drivers/tty/serdev/core.c14
-rw-r--r--drivers/tty/serial/8250/8250.h12
-rw-r--r--drivers/tty/serial/8250/8250_alpha.c21
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c23
-rw-r--r--drivers/tty/serial/8250/8250_core.c9
-rw-r--r--drivers/tty/serial/8250/8250_dw.c3
-rw-r--r--drivers/tty/serial/8250/8250_of.c11
-rw-r--r--drivers/tty/serial/8250/8250_pci.c507
-rw-r--r--drivers/tty/serial/8250/8250_pericom.c214
-rw-r--r--drivers/tty/serial/8250/8250_port.c4
-rw-r--r--drivers/tty/serial/8250/Kconfig8
-rw-r--r--drivers/tty/serial/8250/Makefile3
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/altera_jtaguart.c11
-rw-r--r--drivers/tty/serial/altera_uart.c9
-rw-r--r--drivers/tty/serial/amba-pl010.c3
-rw-r--r--drivers/tty/serial/amba-pl011.c84
-rw-r--r--drivers/tty/serial/ar933x_uart.c12
-rw-r--r--drivers/tty/serial/atmel_serial.c16
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c10
-rw-r--r--drivers/tty/serial/fsl_lpuart.c12
-rw-r--r--drivers/tty/serial/imx.c13
-rw-r--r--drivers/tty/serial/lantiq.c34
-rw-r--r--drivers/tty/serial/liteuart.c2
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c2
-rw-r--r--drivers/tty/serial/meson_uart.c45
-rw-r--r--drivers/tty/serial/msm_serial.c15
-rw-r--r--drivers/tty/serial/pmac_zilog.c12
-rw-r--r--drivers/tty/serial/pxa.c12
-rw-r--r--drivers/tty/serial/samsung_tty.c78
-rw-r--r--drivers/tty/serial/serial_core.c49
-rw-r--r--drivers/tty/serial/sh-sci.c91
-rw-r--r--drivers/tty/serial/stm32-usart.c88
-rw-r--r--drivers/tty/serial/stm32-usart.h2
-rw-r--r--drivers/tty/serial/sunsu.c3
-rw-r--r--drivers/tty/serial/uartlite.c2
-rw-r--r--drivers/tty/serial/vt8500_serial.c12
-rw-r--r--drivers/tty/tty_buffer.c279
-rw-r--r--drivers/tty/tty_io.c921
-rw-r--r--drivers/tty/tty_ldisc.c292
-rw-r--r--drivers/tty/tty_ldsem.c2
-rw-r--r--drivers/tty/tty_port.c223
-rw-r--r--drivers/tty/vt/keyboard.c18
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/uio/uio.c8
-rw-r--r--drivers/uio/uio_dmem_genirq.c6
-rw-r--r--drivers/usb/atm/usbatm.c2
-rw-r--r--drivers/usb/cdns3/cdns3-plat.c14
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c2
-rw-r--r--drivers/usb/cdns3/core.h6
-rw-r--r--drivers/usb/cdns3/drd.c6
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c53
-rw-r--r--drivers/usb/chipidea/core.c1
-rw-r--r--drivers/usb/chipidea/otg.c5
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/common/debug.c1
-rw-r--r--drivers/usb/common/ulpi.c7
-rw-r--r--drivers/usb/core/driver.c3
-rw-r--r--drivers/usb/core/generic.c2
-rw-r--r--drivers/usb/core/hcd.c25
-rw-r--r--drivers/usb/core/hub.c37
-rw-r--r--drivers/usb/core/port.c32
-rw-r--r--drivers/usb/core/urb.c12
-rw-r--r--drivers/usb/core/usb.c46
-rw-r--r--drivers/usb/dwc2/core.h6
-rw-r--r--drivers/usb/dwc2/drd.c51
-rw-r--r--drivers/usb/dwc2/gadget.c19
-rw-r--r--drivers/usb/dwc2/hcd.c7
-rw-r--r--drivers/usb/dwc2/platform.c63
-rw-r--r--drivers/usb/dwc3/core.h9
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c17
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c15
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c23
-rw-r--r--drivers/usb/dwc3/gadget.c59
-rw-r--r--drivers/usb/dwc3/host.c45
-rw-r--r--drivers/usb/gadget/composite.c39
-rw-r--r--drivers/usb/gadget/configfs.c39
-rw-r--r--drivers/usb/gadget/function/f_fs.c4
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c2
-rw-r--r--drivers/usb/gadget/function/f_midi.c48
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c1
-rw-r--r--drivers/usb/gadget/function/rndis.c4
-rw-r--r--drivers/usb/gadget/function/u_audio.c28
-rw-r--r--drivers/usb/gadget/legacy/inode.c18
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/dev.c19
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/ep0.c7
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/hub.c47
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/vhub.h1
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c69
-rw-r--r--drivers/usb/gadget/udc/at91_udc.h8
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c8
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c1
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c4
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c2
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c56
-rw-r--r--drivers/usb/host/Kconfig11
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-brcm.c6
-rw-r--r--drivers/usb/host/fotg210-hcd.c11
-rw-r--r--drivers/usb/host/ohci-omap.c2
-rw-r--r--drivers/usb/host/ohci-s3c2410.c10
-rw-r--r--drivers/usb/host/ohci-spear.c2
-rw-r--r--drivers/usb/host/ohci-tmio.c5
-rw-r--r--drivers/usb/host/u132-hcd.c1
-rw-r--r--drivers/usb/host/uhci-platform.c9
-rw-r--r--drivers/usb/host/xen-hcd.c1609
-rw-r--r--drivers/usb/host/xhci-mtk.c16
-rw-r--r--drivers/usb/host/xhci-plat.c3
-rw-r--r--drivers/usb/host/xhci.c6
-rw-r--r--drivers/usb/isp1760/isp1760-if.c16
-rw-r--r--drivers/usb/misc/ehset.c58
-rw-r--r--drivers/usb/misc/ftdi-elan.c1
-rw-r--r--drivers/usb/musb/am35x.c2
-rw-r--r--drivers/usb/musb/da8xx.c20
-rw-r--r--drivers/usb/musb/jz4740.c1
-rw-r--r--drivers/usb/musb/mediatek.c2
-rw-r--r--drivers/usb/musb/musb_dsps.c15
-rw-r--r--drivers/usb/musb/omap2430.c23
-rw-r--r--drivers/usb/musb/ux500.c18
-rw-r--r--drivers/usb/phy/phy-mv-usb.c5
-rw-r--r--drivers/usb/renesas_usbhs/common.c14
-rw-r--r--drivers/usb/renesas_usbhs/common.h1
-rw-r--r--drivers/usb/renesas_usbhs/mod.c14
-rw-r--r--drivers/usb/storage/sierra_ms.c2
-rw-r--r--drivers/usb/storage/transport.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h10
-rw-r--r--drivers/usb/typec/Makefile3
-rw-r--r--drivers/usb/typec/class.c2
-rw-r--r--drivers/usb/typec/class.h10
-rw-r--r--drivers/usb/typec/port-mapper.c281
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c26
-rw-r--r--drivers/usb/typec/tcpm/tcpci.h1
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c7
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c16
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c2
-rw-r--r--drivers/usb/usbip/usbip_event.c1
-rw-r--r--drivers/vdpa/alibaba/eni_vdpa.c28
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c41
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h9
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c40
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c160
-rw-r--r--drivers/vdpa/vdpa.c163
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c21
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c2
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c8
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c19
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c16
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c4
-rw-r--r--drivers/vfio/pci/vfio_pci_igd.c15
-rw-r--r--drivers/vfio/vfio_iommu_type1.c2
-rw-r--r--drivers/vhost/test.c1
-rw-r--r--drivers/vhost/vdpa.c12
-rw-r--r--drivers/video/backlight/lp855x_bl.c134
-rw-r--r--drivers/video/backlight/qcom-wled.c130
-rw-r--r--drivers/video/console/vgacon.c21
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c14
-rw-r--r--drivers/video/fbdev/hyperv_fb.c16
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c14
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c18
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c20
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c12
-rw-r--r--drivers/video/fbdev/simplefb.c21
-rw-r--r--drivers/video/fbdev/vga16fb.c24
-rw-r--r--drivers/virt/acrn/ioreq.c3
-rw-r--r--drivers/virt/nitro_enclaves/Kconfig9
-rw-r--r--drivers/virt/nitro_enclaves/ne_misc_dev.c174
-rw-r--r--drivers/virt/nitro_enclaves/ne_misc_dev_test.c157
-rw-r--r--drivers/virt/nitro_enclaves/ne_pci_dev.c1
-rw-r--r--drivers/virtio/virtio.c6
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_input.c2
-rw-r--r--drivers/virtio/virtio_mem.c114
-rw-r--r--drivers/virtio/virtio_pci_legacy.c2
-rw-r--r--drivers/virtio/virtio_pci_legacy_dev.c4
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c2
-rw-r--r--drivers/virtio/virtio_ring.c4
-rw-r--r--drivers/virtio/virtio_vdpa.c7
-rw-r--r--drivers/w1/slaves/w1_ds28e04.c26
-rw-r--r--drivers/w1/slaves/w1_therm.c7
-rw-r--r--drivers/watchdog/Kconfig109
-rw-r--r--drivers/watchdog/Makefile5
-rw-r--r--drivers/watchdog/apple_wdt.c226
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c317
-rw-r--r--drivers/watchdog/bcm7038_wdt.c15
-rw-r--r--drivers/watchdog/da9063_wdt.c12
-rw-r--r--drivers/watchdog/davinci_wdt.c2
-rw-r--r--drivers/watchdog/f71808e_wdt.c10
-rw-r--r--drivers/watchdog/meson_gxbb_wdt.c1
-rw-r--r--drivers/watchdog/msc313e_wdt.c4
-rw-r--r--drivers/watchdog/mtk_wdt.c2
-rw-r--r--drivers/watchdog/realtek_otto_wdt.c384
-rw-r--r--drivers/watchdog/rzg2l_wdt.c263
-rw-r--r--drivers/watchdog/s3c2410_wdt.c338
-rw-r--r--drivers/watchdog/simatic-ipc-wdt.c228
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/balloon.c20
-rw-r--r--drivers/xen/gntdev.c6
-rw-r--r--drivers/xen/unpopulated-alloc.c87
-rw-r--r--drivers/zorro/proc.c2
5009 files changed, 244308 insertions, 122747 deletions
diff --git a/drivers/accessibility/speakup/speakup_acntpc.c b/drivers/accessibility/speakup/speakup_acntpc.c
index c1ec087dca13..023172ca22ef 100644
--- a/drivers/accessibility/speakup/speakup_acntpc.c
+++ b/drivers/accessibility/speakup/speakup_acntpc.c
@@ -247,7 +247,7 @@ static void synth_flush(struct spk_synth *synth)
static int synth_probe(struct spk_synth *synth)
{
unsigned int port_val = 0;
- int i = 0;
+ int i;
pr_info("Probing for %s.\n", synth->long_name);
if (port_forced) {
diff --git a/drivers/accessibility/speakup/speakup_dtlk.c b/drivers/accessibility/speakup/speakup_dtlk.c
index 92838d3ae9eb..a9dd5c45d237 100644
--- a/drivers/accessibility/speakup/speakup_dtlk.c
+++ b/drivers/accessibility/speakup/speakup_dtlk.c
@@ -316,7 +316,7 @@ static struct synth_settings *synth_interrogate(struct spk_synth *synth)
static int synth_probe(struct spk_synth *synth)
{
unsigned int port_val = 0;
- int i = 0;
+ int i;
struct synth_settings *sp;
pr_info("Probing for DoubleTalk.\n");
diff --git a/drivers/accessibility/speakup/speakup_keypc.c b/drivers/accessibility/speakup/speakup_keypc.c
index 311f4aa0be22..1618be87bff1 100644
--- a/drivers/accessibility/speakup/speakup_keypc.c
+++ b/drivers/accessibility/speakup/speakup_keypc.c
@@ -254,7 +254,7 @@ static void synth_flush(struct spk_synth *synth)
static int synth_probe(struct spk_synth *synth)
{
unsigned int port_val = 0;
- int i = 0;
+ int i;
pr_info("Probing for %s.\n", synth->long_name);
if (port_forced) {
diff --git a/drivers/accessibility/speakup/spk_ttyio.c b/drivers/accessibility/speakup/spk_ttyio.c
index 0d1f397cd896..08cf8a17754b 100644
--- a/drivers/accessibility/speakup/spk_ttyio.c
+++ b/drivers/accessibility/speakup/spk_ttyio.c
@@ -88,7 +88,7 @@ static int spk_ttyio_receive_buf2(struct tty_struct *tty,
}
if (!ldisc_data->buf_free)
- /* ttyio_in will tty_schedule_flip */
+ /* ttyio_in will tty_flip_buffer_push */
return 0;
/* Make sure the consumer has read buf before we have seen
@@ -312,7 +312,7 @@ static unsigned char ttyio_in(struct spk_synth *in_synth, int timeout)
mb();
ldisc_data->buf_free = true;
/* Let TTY push more characters */
- tty_schedule_flip(tty->port);
+ tty_flip_buffer_push(tty->port);
return rv;
}
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index cdbdf68bd98f..ba45541b1f1f 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -59,6 +59,9 @@ config ACPI_SYSTEM_POWER_STATES_SUPPORT
config ACPI_CCA_REQUIRED
bool
+config ACPI_TABLE_LIB
+ bool
+
config ACPI_DEBUGGER
bool "AML debugger interface"
select ACPI_DEBUG
@@ -517,6 +520,28 @@ config ACPI_CONFIGFS
userspace. The configurable ACPI groups will be visible under
/config/acpi, assuming configfs is mounted under /config.
+config ACPI_PFRUT
+ tristate "ACPI Platform Firmware Runtime Update and Telemetry"
+ depends on 64BIT
+ help
+ This mechanism allows certain pieces of the platform firmware
+ to be updated on the fly while the system is running (runtime)
+ without the need to restart it, which is key in the cases when
+ the system needs to be available 100% of the time and it cannot
+ afford the downtime related to restarting it, or when the work
+ carried out by the system is particularly important, so it cannot
+ be interrupted, and it is not practical to wait until it is complete.
+
+ The existing firmware code can be modified (driver update) or
+ extended by adding new code to the firmware (code injection).
+
+ Besides, the telemetry driver allows user space to fetch telemetry
+ data from the firmware with the help of the Platform Firmware Runtime
+ Telemetry interface.
+
+ To compile the drivers as modules, choose M here:
+ the modules will be called pfr_update and pfr_telemetry.
+
if ARM64
source "drivers/acpi/arm64/Kconfig"
@@ -524,6 +549,23 @@ config ACPI_PPTT
bool
endif
+config ACPI_PCC
+ bool "ACPI PCC Address Space"
+ depends on PCC
+ default y
+ help
+ The PCC Address Space also referred as PCC Operation Region pertains
+ to the region of PCC subspace that succeeds the PCC signature.
+
+ The PCC Operation Region works in conjunction with the PCC Table
+ (Platform Communications Channel Table). PCC subspaces that are
+ marked for use as PCC Operation Regions must not be used as PCC
+ subspaces for the standard ACPI features such as CPPC, RASF, PDTT and
+ MPST. These standard features must always use the PCC Table instead.
+
+ Enable this feature if you want to set up and install the PCC Address
+ Space handler to handle PCC OpRegion in the firmware.
+
source "drivers/acpi/pmic/Kconfig"
config ACPI_VIOT
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 3018714e87d9..bb757148e7ba 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -9,7 +9,7 @@ ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
# ACPI Boot-Time Table Parsing
#
ifeq ($(CONFIG_ACPI_CUSTOM_DSDT),y)
-tables.o: $(src)/../../include/$(subst $\",,$(CONFIG_ACPI_CUSTOM_DSDT_FILE)) ;
+tables.o: $(src)/../../include/$(CONFIG_ACPI_CUSTOM_DSDT_FILE) ;
endif
@@ -67,6 +67,7 @@ acpi-$(CONFIG_ACPI_LPIT) += acpi_lpit.o
acpi-$(CONFIG_ACPI_GENERIC_GSI) += irq.o
acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o
acpi-$(CONFIG_ACPI_PRMT) += prmt.o
+acpi-$(CONFIG_ACPI_PCC) += acpi_pcc.o
# Address translation
acpi-$(CONFIG_ACPI_ADXL) += acpi_adxl.o
@@ -102,6 +103,7 @@ obj-$(CONFIG_ACPI_CPPC_LIB) += cppc_acpi.o
obj-$(CONFIG_ACPI_SPCR_TABLE) += spcr.o
obj-$(CONFIG_ACPI_DEBUGGER_USER) += acpi_dbg.o
obj-$(CONFIG_ACPI_PPTT) += pptt.o
+obj-$(CONFIG_ACPI_PFRUT) += pfr_update.o pfr_telemetry.o
# processor has its own "processor." module_param namespace
processor-y := processor_driver.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 81aff651a0d4..db487ff9dd1b 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -48,19 +48,12 @@ static const struct acpi_device_id ac_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, ac_device_ids);
-/* Lists of PMIC ACPI HIDs with an (often better) native charger driver */
-static const struct acpi_ac_bl acpi_ac_blacklist[] = {
- { "INT33F4", -1 }, /* X-Powers AXP288 PMIC */
- { "INT34D3", 3 }, /* Intel Cherrytrail Whiskey Cove PMIC */
-};
-
#ifdef CONFIG_PM_SLEEP
static int acpi_ac_resume(struct device *dev);
#endif
static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
static int ac_sleep_before_get_state_ms;
-static int ac_check_pmic = 1;
static int ac_only;
static struct acpi_driver acpi_ac_driver = {
@@ -200,12 +193,6 @@ static int __init thinkpad_e530_quirk(const struct dmi_system_id *d)
return 0;
}
-static int __init ac_do_not_check_pmic_quirk(const struct dmi_system_id *d)
-{
- ac_check_pmic = 0;
- return 0;
-}
-
static int __init ac_only_quirk(const struct dmi_system_id *d)
{
ac_only = 1;
@@ -215,13 +202,6 @@ static int __init ac_only_quirk(const struct dmi_system_id *d)
/* Please keep this list alphabetically sorted */
static const struct dmi_system_id ac_dmi_table[] __initconst = {
{
- /* ECS EF20EA, AXP288 PMIC but uses separate fuel-gauge */
- .callback = ac_do_not_check_pmic_quirk,
- .matches = {
- DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"),
- },
- },
- {
/* Kodlix GK45 returning incorrect state */
.callback = ac_only_quirk,
.matches = {
@@ -229,15 +209,6 @@ static const struct dmi_system_id ac_dmi_table[] __initconst = {
},
},
{
- /* Lenovo Ideapad Miix 320, AXP288 PMIC, separate fuel-gauge */
- .callback = ac_do_not_check_pmic_quirk,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "80XF"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
- },
- },
- {
/* Lenovo Thinkpad e530, see comment in acpi_ac_notify() */
.callback = thinkpad_e530_quirk,
.matches = {
@@ -341,23 +312,15 @@ static int acpi_ac_remove(struct acpi_device *device)
static int __init acpi_ac_init(void)
{
- unsigned int i;
int result;
if (acpi_disabled)
return -ENODEV;
- dmi_check_system(ac_dmi_table);
+ if (acpi_quirk_skip_acpi_ac_and_battery())
+ return -ENODEV;
- if (ac_check_pmic) {
- for (i = 0; i < ARRAY_SIZE(acpi_ac_blacklist); i++)
- if (acpi_dev_present(acpi_ac_blacklist[i].hid, "1",
- acpi_ac_blacklist[i].hrv)) {
- pr_info("found native %s PMIC, not loading\n",
- acpi_ac_blacklist[i].hid);
- return -ENODEV;
- }
- }
+ dmi_check_system(ac_dmi_table);
result = acpi_bus_register_driver(&acpi_ac_driver);
if (result < 0)
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index 6e02448d15d9..e7934ba79b02 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -87,14 +87,23 @@ static int fch_misc_setup(struct apd_private_data *pdata)
if (ret < 0)
return -ENOENT;
- if (!acpi_dev_get_property(adev, "is-rv", ACPI_TYPE_INTEGER, &obj))
- clk_data->is_rv = obj->integer.value;
+ if (!acpi_dev_get_property(adev, "clk-name", ACPI_TYPE_STRING, &obj)) {
+ clk_data->name = devm_kzalloc(&adev->dev, obj->string.length,
+ GFP_KERNEL);
+
+ strcpy(clk_data->name, obj->string.pointer);
+ } else {
+ /* Set default name to mclk if entry missing in firmware */
+ clk_data->name = "mclk";
+ }
list_for_each_entry(rentry, &resource_list, node) {
clk_data->base = devm_ioremap(&adev->dev, rentry->res->start,
resource_size(rentry->res));
break;
}
+ if (!clk_data->base)
+ return -ENOMEM;
acpi_dev_free_resource_list(&resource_list);
diff --git a/drivers/acpi/acpi_pcc.c b/drivers/acpi/acpi_pcc.c
new file mode 100644
index 000000000000..a12b55d81209
--- /dev/null
+++ b/drivers/acpi/acpi_pcc.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Author: Sudeep Holla <sudeep.holla@arm.com>
+ * Copyright 2021 Arm Limited
+ *
+ * The PCC Address Space also referred as PCC Operation Region pertains to the
+ * region of PCC subspace that succeeds the PCC signature. The PCC Operation
+ * Region works in conjunction with the PCC Table(Platform Communications
+ * Channel Table). PCC subspaces that are marked for use as PCC Operation
+ * Regions must not be used as PCC subspaces for the standard ACPI features
+ * such as CPPC, RASF, PDTT and MPST. These standard features must always use
+ * the PCC Table instead.
+ *
+ * This driver sets up the PCC Address Space and installs an handler to enable
+ * handling of PCC OpRegion in the firmware.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/completion.h>
+#include <linux/idr.h>
+#include <linux/io.h>
+
+#include <acpi/pcc.h>
+
+struct pcc_data {
+ struct pcc_mbox_chan *pcc_chan;
+ void __iomem *pcc_comm_addr;
+ struct completion done;
+ struct mbox_client cl;
+ struct acpi_pcc_info ctx;
+};
+
+static struct acpi_pcc_info pcc_ctx;
+
+static void pcc_rx_callback(struct mbox_client *cl, void *m)
+{
+ struct pcc_data *data = container_of(cl, struct pcc_data, cl);
+
+ complete(&data->done);
+}
+
+static acpi_status
+acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function,
+ void *handler_context, void **region_context)
+{
+ struct pcc_data *data;
+ struct acpi_pcc_info *ctx = handler_context;
+ struct pcc_mbox_chan *pcc_chan;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return AE_NO_MEMORY;
+
+ data->cl.rx_callback = pcc_rx_callback;
+ data->cl.knows_txdone = true;
+ data->ctx.length = ctx->length;
+ data->ctx.subspace_id = ctx->subspace_id;
+ data->ctx.internal_buffer = ctx->internal_buffer;
+
+ init_completion(&data->done);
+ data->pcc_chan = pcc_mbox_request_channel(&data->cl, ctx->subspace_id);
+ if (IS_ERR(data->pcc_chan)) {
+ pr_err("Failed to find PCC channel for subspace %d\n",
+ ctx->subspace_id);
+ return AE_NOT_FOUND;
+ }
+
+ pcc_chan = data->pcc_chan;
+ data->pcc_comm_addr = acpi_os_ioremap(pcc_chan->shmem_base_addr,
+ pcc_chan->shmem_size);
+ if (!data->pcc_comm_addr) {
+ pr_err("Failed to ioremap PCC comm region mem for %d\n",
+ ctx->subspace_id);
+ return AE_NO_MEMORY;
+ }
+
+ *region_context = data;
+ return AE_OK;
+}
+
+static acpi_status
+acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr,
+ u32 bits, acpi_integer *value,
+ void *handler_context, void *region_context)
+{
+ int ret;
+ struct pcc_data *data = region_context;
+
+ reinit_completion(&data->done);
+
+ /* Write to Shared Memory */
+ memcpy_toio(data->pcc_comm_addr, (void *)value, data->ctx.length);
+
+ ret = mbox_send_message(data->pcc_chan->mchan, NULL);
+ if (ret < 0)
+ return AE_ERROR;
+
+ if (data->pcc_chan->mchan->mbox->txdone_irq)
+ wait_for_completion(&data->done);
+
+ mbox_client_txdone(data->pcc_chan->mchan, ret);
+
+ memcpy_fromio(value, data->pcc_comm_addr, data->ctx.length);
+
+ return AE_OK;
+}
+
+void __init acpi_init_pcc(void)
+{
+ acpi_status status;
+
+ status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
+ ACPI_ADR_SPACE_PLATFORM_COMM,
+ &acpi_pcc_address_space_handler,
+ &acpi_pcc_address_space_setup,
+ &pcc_ctx);
+ if (ACPI_FAILURE(status))
+ pr_alert("OperationRegion handler could not be installed\n");
+}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 42ede059728c..990ff5b0aeb8 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -1733,13 +1733,12 @@ acpi_video_bus_match(acpi_handle handle, u32 level, void *context,
{
struct acpi_device *device = context;
struct acpi_device *sibling;
- int result;
if (handle == device->handle)
return AE_CTRL_TERMINATE;
- result = acpi_bus_get_device(handle, &sibling);
- if (result)
+ sibling = acpi_fetch_acpi_dev(handle);
+ if (!sibling)
return AE_OK;
if (!strcmp(acpi_device_name(sibling), ACPI_VIDEO_BUS_NAME))
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 82a75964343b..b29ba436944a 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -224,6 +224,11 @@ acpi_ev_pci_bar_region_setup(acpi_handle handle,
void *handler_context, void **region_context);
acpi_status
+acpi_ev_data_table_region_setup(acpi_handle handle,
+ u32 function,
+ void *handler_context, void **region_context);
+
+acpi_status
acpi_ev_default_region_setup(acpi_handle handle,
u32 function,
void *handler_context, void **region_context);
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 9db5ae0f79ea..0aa0d847cb25 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -138,6 +138,7 @@ struct acpi_object_region {
union acpi_operand_object *next;
acpi_physical_address address;
u32 length;
+ void *pointer; /* Only for data table regions */
};
struct acpi_object_method {
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index e2d0046799a2..533802fe73e9 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -35,7 +35,8 @@ acpi_tb_init_table_descriptor(struct acpi_table_desc *table_desc,
acpi_status
acpi_tb_acquire_temp_table(struct acpi_table_desc *table_desc,
- acpi_physical_address address, u8 flags);
+ acpi_physical_address address,
+ u8 flags, struct acpi_table_header *table);
void acpi_tb_release_temp_table(struct acpi_table_desc *table_desc);
@@ -86,6 +87,7 @@ acpi_tb_release_table(struct acpi_table_header *table,
acpi_status
acpi_tb_install_standard_table(acpi_physical_address address,
u8 flags,
+ struct acpi_table_header *table,
u8 reload, u8 override, u32 *table_index);
void acpi_tb_uninstall_table(struct acpi_table_desc *table_desc);
@@ -95,7 +97,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node);
acpi_status
acpi_tb_install_and_load_table(acpi_physical_address address,
- u8 flags, u8 override, u32 *table_index);
+ u8 flags,
+ struct acpi_table_header *table,
+ u8 override, u32 *table_index);
acpi_status acpi_tb_unload_table(u32 table_index);
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 639635291ab7..44c448269861 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -531,6 +531,7 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
obj_desc->region.address = ACPI_PTR_TO_PHYSADDR(table);
obj_desc->region.length = table->length;
+ obj_desc->region.pointer = table;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
obj_desc,
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index c0cd7147a5a3..8f43d38dc4ca 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -386,7 +386,7 @@ acpi_ev_install_space_handler(struct acpi_namespace_node *node,
case ACPI_ADR_SPACE_DATA_TABLE:
handler = acpi_ex_data_table_space_handler;
- setup = NULL;
+ setup = acpi_ev_data_table_region_setup;
break;
default:
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 4ef43c8ef5e7..b9d77d327d38 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -162,6 +162,16 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
return_ACPI_STATUS(AE_NOT_EXIST);
}
+ if (region_obj->region.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ struct acpi_pcc_info *ctx =
+ handler_desc->address_space.context;
+
+ ctx->internal_buffer =
+ field_obj->field.internal_pcc_buffer;
+ ctx->length = (u16)region_obj->region.length;
+ ctx->subspace_id = (u8)region_obj->region.address;
+ }
+
/*
* We must exit the interpreter because the region setup will
* potentially execute control methods (for example, the _REG method
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 984c172453bf..d28dee929e61 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -408,6 +408,58 @@ acpi_ev_cmos_region_setup(acpi_handle handle,
/*******************************************************************************
*
+ * FUNCTION: acpi_ev_data_table_region_setup
+ *
+ * PARAMETERS: handle - Region we are interested in
+ * function - Start or stop
+ * handler_context - Address space handler context
+ * region_context - Region specific context
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Setup a data_table_region
+ *
+ * MUTEX: Assumes namespace is not locked
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_data_table_region_setup(acpi_handle handle,
+ u32 function,
+ void *handler_context, void **region_context)
+{
+ union acpi_operand_object *region_desc =
+ (union acpi_operand_object *)handle;
+ struct acpi_data_table_space_context *local_region_context;
+
+ ACPI_FUNCTION_TRACE(ev_data_table_region_setup);
+
+ if (function == ACPI_REGION_DEACTIVATE) {
+ if (*region_context) {
+ ACPI_FREE(*region_context);
+ *region_context = NULL;
+ }
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Create a new context */
+
+ local_region_context =
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_data_table_space_context));
+ if (!(local_region_context)) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Save the data table pointer for use in the handler */
+
+ local_region_context->pointer = region_desc->region.pointer;
+
+ *region_context = local_region_context;
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ev_default_region_setup
*
* PARAMETERS: handle - Region we are interested in
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 0cd9b3738e76..6c2685a6a4c1 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -411,7 +411,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
acpi_ex_exit_interpreter();
status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table),
ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL,
- TRUE, &table_index);
+ table, TRUE, &table_index);
acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 80b52ad55775..deb3674ae726 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -279,6 +279,7 @@ acpi_ex_create_region(u8 * aml_start,
obj_desc->region.space_id = space_id;
obj_desc->region.address = 0;
obj_desc->region.length = 0;
+ obj_desc->region.pointer = NULL;
obj_desc->region.node = node;
obj_desc->region.handler = NULL;
obj_desc->common.flags &=
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 06f3c9df1e22..8618500f23b3 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -330,12 +330,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
obj_desc->field.base_byte_offset,
source_desc->buffer.pointer, data_length);
- if ((obj_desc->field.region_obj->region.address ==
- PCC_MASTER_SUBSPACE
- && MASTER_SUBSPACE_COMMAND(obj_desc->field.
- base_byte_offset))
- || GENERIC_SUBSPACE_COMMAND(obj_desc->field.
- base_byte_offset)) {
+ if (MASTER_SUBSPACE_COMMAND(obj_desc->field.base_byte_offset)) {
/* Perform the write */
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index b639e930d642..44b7c350ed5c 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -1007,7 +1007,8 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
(walk_state, return_desc,
&temp_desc);
if (ACPI_FAILURE(status)) {
- goto cleanup;
+ return_ACPI_STATUS
+ (status);
}
return_desc = temp_desc;
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 82b713a9a193..48c19908fa4e 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -509,8 +509,15 @@ acpi_ex_data_table_space_handler(u32 function,
u64 *value,
void *handler_context, void *region_context)
{
+ struct acpi_data_table_space_context *mapping;
+ char *pointer;
+
ACPI_FUNCTION_TRACE(ex_data_table_space_handler);
+ mapping = (struct acpi_data_table_space_context *) region_context;
+ pointer = ACPI_CAST_PTR(char, mapping->pointer) +
+ (address - ACPI_PTR_TO_PHYSADDR(mapping->pointer));
+
/*
* Perform the memory read or write. The bit_width was already
* validated.
@@ -518,14 +525,14 @@ acpi_ex_data_table_space_handler(u32 function,
switch (function) {
case ACPI_READ:
- memcpy(ACPI_CAST_PTR(char, value),
- ACPI_PHYSADDR_TO_PTR(address), ACPI_DIV_8(bit_width));
+ memcpy(ACPI_CAST_PTR(char, value), pointer,
+ ACPI_DIV_8(bit_width));
break;
case ACPI_WRITE:
- memcpy(ACPI_PHYSADDR_TO_PTR(address),
- ACPI_CAST_PTR(char, value), ACPI_DIV_8(bit_width));
+ memcpy(pointer, ACPI_CAST_PTR(char, value),
+ ACPI_DIV_8(bit_width));
break;
default:
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index 808fdf54aeeb..7ee2939c08cd 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -104,7 +104,9 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state)
/* Flush caches, as per ACPI specification */
- ACPI_FLUSH_CPU_CACHE();
+ if (sleep_state < ACPI_STATE_S4) {
+ ACPI_FLUSH_CPU_CACHE();
+ }
status = acpi_os_enter_sleep(sleep_state, sleep_control, 0);
if (status == AE_CTRL_TERMINATE) {
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 34a3825f25d3..5efa3d8e483e 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -110,7 +110,9 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
/* Flush caches, as per ACPI specification */
- ACPI_FLUSH_CPU_CACHE();
+ if (sleep_state < ACPI_STATE_S4) {
+ ACPI_FLUSH_CPU_CACHE();
+ }
status = acpi_os_enter_sleep(sleep_state, pm1a_control, pm1b_control);
if (status == AE_CTRL_TERMINATE) {
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index e4cde23a2906..ba77598ee43e 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -162,8 +162,6 @@ acpi_status acpi_enter_sleep_state_s4bios(void)
return_ACPI_STATUS(status);
}
- ACPI_FLUSH_CPU_CACHE();
-
status = acpi_hw_write_port(acpi_gbl_FADT.smi_command,
(u32)acpi_gbl_FADT.s4_bios_request, 8);
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index ebbca109edcb..20360a9db482 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -89,14 +89,27 @@ acpi_tb_init_table_descriptor(struct acpi_table_desc *table_desc,
{
/*
- * Initialize the table descriptor. Set the pointer to NULL, since the
- * table is not fully mapped at this time.
+ * Initialize the table descriptor. Set the pointer to NULL for external
+ * tables, since the table is not fully mapped at this time.
*/
memset(table_desc, 0, sizeof(struct acpi_table_desc));
table_desc->address = address;
table_desc->length = table->length;
table_desc->flags = flags;
ACPI_MOVE_32_TO_32(table_desc->signature.ascii, table->signature);
+
+ switch (table_desc->flags & ACPI_TABLE_ORIGIN_MASK) {
+ case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL:
+ case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL:
+
+ table_desc->pointer = table;
+ break;
+
+ case ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL:
+ default:
+
+ break;
+ }
}
/*******************************************************************************
@@ -132,9 +145,7 @@ acpi_tb_acquire_table(struct acpi_table_desc *table_desc,
case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL:
case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL:
- table = ACPI_CAST_PTR(struct acpi_table_header,
- ACPI_PHYSADDR_TO_PTR(table_desc->
- address));
+ table = table_desc->pointer;
break;
default:
@@ -196,6 +207,8 @@ acpi_tb_release_table(struct acpi_table_header *table,
* PARAMETERS: table_desc - Table descriptor to be acquired
* address - Address of the table
* flags - Allocation flags of the table
+ * table - Pointer to the table (required for virtual
+ * origins, optional for physical)
*
* RETURN: Status
*
@@ -208,49 +221,52 @@ acpi_tb_release_table(struct acpi_table_header *table,
acpi_status
acpi_tb_acquire_temp_table(struct acpi_table_desc *table_desc,
- acpi_physical_address address, u8 flags)
+ acpi_physical_address address,
+ u8 flags, struct acpi_table_header *table)
{
- struct acpi_table_header *table_header;
+ u8 mapped_table = FALSE;
switch (flags & ACPI_TABLE_ORIGIN_MASK) {
case ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL:
/* Get the length of the full table from the header */
- table_header =
- acpi_os_map_memory(address,
- sizeof(struct acpi_table_header));
- if (!table_header) {
- return (AE_NO_MEMORY);
+ if (!table) {
+ table =
+ acpi_os_map_memory(address,
+ sizeof(struct
+ acpi_table_header));
+ if (!table) {
+ return (AE_NO_MEMORY);
+ }
+
+ mapped_table = TRUE;
}
- acpi_tb_init_table_descriptor(table_desc, address, flags,
- table_header);
- acpi_os_unmap_memory(table_header,
- sizeof(struct acpi_table_header));
- return (AE_OK);
+ break;
case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL:
case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL:
- table_header = ACPI_CAST_PTR(struct acpi_table_header,
- ACPI_PHYSADDR_TO_PTR(address));
- if (!table_header) {
- return (AE_NO_MEMORY);
+ if (!table) {
+ return (AE_BAD_PARAMETER);
}
- acpi_tb_init_table_descriptor(table_desc, address, flags,
- table_header);
- return (AE_OK);
+ break;
default:
- break;
+ /* Table is not valid yet */
+
+ return (AE_NO_MEMORY);
}
- /* Table is not valid yet */
+ acpi_tb_init_table_descriptor(table_desc, address, flags, table);
+ if (mapped_table) {
+ acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
+ }
- return (AE_NO_MEMORY);
+ return (AE_OK);
}
/*******************************************************************************
@@ -335,7 +351,19 @@ void acpi_tb_invalidate_table(struct acpi_table_desc *table_desc)
acpi_tb_release_table(table_desc->pointer, table_desc->length,
table_desc->flags);
- table_desc->pointer = NULL;
+
+ switch (table_desc->flags & ACPI_TABLE_ORIGIN_MASK) {
+ case ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL:
+
+ table_desc->pointer = NULL;
+ break;
+
+ case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL:
+ case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL:
+ default:
+
+ break;
+ }
return_VOID;
}
@@ -959,6 +987,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node)
*
* PARAMETERS: address - Physical address of the table
* flags - Allocation flags of the table
+ * table - Pointer to the table (required for
+ * virtual origins, optional for
+ * physical)
* override - Whether override should be performed
* table_index - Where table index is returned
*
@@ -970,7 +1001,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node)
acpi_status
acpi_tb_install_and_load_table(acpi_physical_address address,
- u8 flags, u8 override, u32 *table_index)
+ u8 flags,
+ struct acpi_table_header *table,
+ u8 override, u32 *table_index)
{
acpi_status status;
u32 i;
@@ -979,7 +1012,7 @@ acpi_tb_install_and_load_table(acpi_physical_address address,
/* Install the table and load it into the namespace */
- status = acpi_tb_install_standard_table(address, flags, TRUE,
+ status = acpi_tb_install_standard_table(address, flags, table, TRUE,
override, &i);
if (ACPI_FAILURE(status)) {
goto exit;
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 5174abfa8af9..047bd094ba68 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -313,7 +313,7 @@ void acpi_tb_parse_fadt(void)
acpi_tb_install_standard_table((acpi_physical_address)acpi_gbl_FADT.
Xdsdt,
ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
- FALSE, TRUE, &acpi_gbl_dsdt_index);
+ NULL, FALSE, TRUE, &acpi_gbl_dsdt_index);
/* If Hardware Reduced flag is set, there is no FACS */
@@ -322,14 +322,14 @@ void acpi_tb_parse_fadt(void)
acpi_tb_install_standard_table((acpi_physical_address)
acpi_gbl_FADT.facs,
ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
- FALSE, TRUE,
+ NULL, FALSE, TRUE,
&acpi_gbl_facs_index);
}
if (acpi_gbl_FADT.Xfacs) {
acpi_tb_install_standard_table((acpi_physical_address)
acpi_gbl_FADT.Xfacs,
ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
- FALSE, TRUE,
+ NULL, FALSE, TRUE,
&acpi_gbl_xfacs_index);
}
}
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 8d1e5b572493..5649f493a1ed 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -79,6 +79,8 @@ acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
* PARAMETERS: address - Address of the table (might be a virtual
* address depending on the table_flags)
* flags - Flags for the table
+ * table - Pointer to the table (required for virtual
+ * origins, optional for physical)
* reload - Whether reload should be performed
* override - Whether override should be performed
* table_index - Where the table index is returned
@@ -96,6 +98,7 @@ acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
acpi_status
acpi_tb_install_standard_table(acpi_physical_address address,
u8 flags,
+ struct acpi_table_header *table,
u8 reload, u8 override, u32 *table_index)
{
u32 i;
@@ -106,7 +109,8 @@ acpi_tb_install_standard_table(acpi_physical_address address,
/* Acquire a temporary table descriptor for validation */
- status = acpi_tb_acquire_temp_table(&new_table_desc, address, flags);
+ status =
+ acpi_tb_acquire_temp_table(&new_table_desc, address, flags, table);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO,
"Could not acquire table length at %8.8X%8.8X",
@@ -209,7 +213,8 @@ void acpi_tb_override_table(struct acpi_table_desc *old_table_desc)
if (ACPI_SUCCESS(status) && table) {
acpi_tb_acquire_temp_table(&new_table_desc,
ACPI_PTR_TO_PHYSADDR(table),
- ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL);
+ ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL,
+ table);
ACPI_ERROR_ONLY(override_type = "Logical");
goto finish_override;
}
@@ -220,7 +225,8 @@ void acpi_tb_override_table(struct acpi_table_desc *old_table_desc)
&address, &length);
if (ACPI_SUCCESS(status) && address && length) {
acpi_tb_acquire_temp_table(&new_table_desc, address,
- ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL);
+ ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
+ NULL);
ACPI_ERROR_ONLY(override_type = "Physical");
goto finish_override;
}
@@ -289,7 +295,8 @@ void acpi_tb_uninstall_table(struct acpi_table_desc *table_desc)
if ((table_desc->flags & ACPI_TABLE_ORIGIN_MASK) ==
ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL) {
- ACPI_FREE(ACPI_PHYSADDR_TO_PTR(table_desc->address));
+ ACPI_FREE(table_desc->pointer);
+ table_desc->pointer = NULL;
}
table_desc->address = ACPI_PTR_TO_PHYSADDR(NULL);
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index 254823d494a2..4dac16bd63d3 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -101,7 +101,8 @@ acpi_tb_print_table_header(acpi_physical_address address,
ACPI_INFO(("%-4.4s 0x%8.8X%8.8X %06X",
header->signature, ACPI_FORMAT_UINT64(address),
header->length));
- } else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) {
+ } else if (ACPI_VALIDATE_RSDP_SIG(ACPI_CAST_PTR(struct acpi_table_rsdp,
+ header)->signature)) {
/* RSDP has no common fields */
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 4b9b329a5a92..5e8d50a4b6a9 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -328,7 +328,7 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
status = acpi_tb_install_standard_table(address,
ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
- FALSE, TRUE,
+ NULL, FALSE, TRUE,
&table_index);
if (ACPI_SUCCESS(status) &&
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 38623049b962..87356d9ad613 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -227,9 +227,7 @@ unlock_and_exit:
*
* FUNCTION: acpi_install_table
*
- * PARAMETERS: address - Address of the ACPI table to be installed.
- * physical - Whether the address is a physical table
- * address or not
+ * PARAMETERS: table - Pointer to the ACPI table to be installed.
*
* RETURN: Status
*
@@ -240,22 +238,17 @@ unlock_and_exit:
******************************************************************************/
acpi_status ACPI_INIT_FUNCTION
-acpi_install_table(acpi_physical_address address, u8 physical)
+acpi_install_table(struct acpi_table_header *table)
{
acpi_status status;
- u8 flags;
u32 table_index;
ACPI_FUNCTION_TRACE(acpi_install_table);
- if (physical) {
- flags = ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL;
- } else {
- flags = ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL;
- }
-
- status = acpi_tb_install_standard_table(address, flags,
- FALSE, FALSE, &table_index);
+ status = acpi_tb_install_standard_table(ACPI_PTR_TO_PHYSADDR(table),
+ ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL,
+ table, FALSE, FALSE,
+ &table_index);
return_ACPI_STATUS(status);
}
@@ -264,6 +257,37 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_install_table)
/*******************************************************************************
*
+ * FUNCTION: acpi_install_physical_table
+ *
+ * PARAMETERS: address - Address of the ACPI table to be installed.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Dynamically install an ACPI table.
+ * Note: This function should only be invoked after
+ * acpi_initialize_tables() and before acpi_load_tables().
+ *
+ ******************************************************************************/
+acpi_status ACPI_INIT_FUNCTION
+acpi_install_physical_table(acpi_physical_address address)
+{
+ acpi_status status;
+ u32 table_index;
+
+ ACPI_FUNCTION_TRACE(acpi_install_physical_table);
+
+ status = acpi_tb_install_standard_table(address,
+ ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
+ NULL, FALSE, FALSE,
+ &table_index);
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL_INIT(acpi_install_physical_table)
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_load_table
*
* PARAMETERS: table - Pointer to a buffer containing the ACPI
@@ -298,7 +322,7 @@ acpi_status acpi_load_table(struct acpi_table_header *table, u32 *table_idx)
ACPI_INFO(("Host-directed Dynamic ACPI Table Load:"));
status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table),
ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL,
- FALSE, &table_index);
+ table, FALSE, &table_index);
if (table_idx) {
*table_idx = table_index;
}
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index e5ba9795ec69..8d7736d2d269 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -422,6 +422,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
ACPI_WARNING((AE_INFO,
"Obj %p, Reference Count is already zero, cannot decrement\n",
object));
+ return;
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_ALLOCATIONS,
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index edb2622fd35f..95cc2a9f3e05 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -545,7 +545,8 @@ static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
((region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
!= REGION_INTERSECTS) &&
(region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY)
- != REGION_INTERSECTS)))
+ != REGION_INTERSECTS) &&
+ !arch_is_platform_page(base_addr)))
return -EINVAL;
inject:
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 0c8330ed1ffd..0c5c9acc6254 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -449,7 +449,7 @@ static bool ghes_do_memory_failure(u64 physical_addr, int flags)
return false;
pfn = PHYS_PFN(physical_addr);
- if (!pfn_valid(pfn)) {
+ if (!pfn_valid(pfn) && !arch_is_platform_page(physical_addr)) {
pr_warn_ratelimited(FW_WARN GHES_PFX
"Invalid address in generic error data: %#llx\n",
physical_addr);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 8afa85d6eb6a..ea31ae01458b 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -52,7 +52,7 @@ static bool battery_driver_registered;
static int battery_bix_broken_package;
static int battery_notification_delay_ms;
static int battery_ac_is_broken;
-static int battery_check_pmic = 1;
+static int battery_quirk_notcharging;
static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -64,11 +64,6 @@ static const struct acpi_device_id battery_device_ids[] = {
MODULE_DEVICE_TABLE(acpi, battery_device_ids);
-/* Lists of PMIC ACPI HIDs with an (often better) native battery driver */
-static const char * const acpi_battery_blacklist[] = {
- "INT33F4", /* X-Powers AXP288 PMIC */
-};
-
enum {
ACPI_BATTERY_ALARM_PRESENT,
ACPI_BATTERY_XINFO_PRESENT,
@@ -217,6 +212,8 @@ static int acpi_battery_get_property(struct power_supply *psy,
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (acpi_battery_is_charged(battery))
val->intval = POWER_SUPPLY_STATUS_FULL;
+ else if (battery_quirk_notcharging)
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
else
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
break;
@@ -1104,10 +1101,9 @@ battery_ac_is_broken_quirk(const struct dmi_system_id *d)
return 0;
}
-static int __init
-battery_do_not_check_pmic_quirk(const struct dmi_system_id *d)
+static int __init battery_quirk_not_charging(const struct dmi_system_id *d)
{
- battery_check_pmic = 0;
+ battery_quirk_notcharging = 1;
return 0;
}
@@ -1140,19 +1136,16 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
},
},
{
- /* ECS EF20EA, AXP288 PMIC but uses separate fuel-gauge */
- .callback = battery_do_not_check_pmic_quirk,
- .matches = {
- DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"),
- },
- },
- {
- /* Lenovo Ideapad Miix 320, AXP288 PMIC, separate fuel-gauge */
- .callback = battery_do_not_check_pmic_quirk,
+ /*
+ * On Lenovo ThinkPads the BIOS specification defines
+ * a state when the bits for charging and discharging
+ * are both set to 0. That state is "Not Charging".
+ */
+ .callback = battery_quirk_not_charging,
+ .ident = "Lenovo ThinkPad",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "80XF"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad"),
},
},
{},
@@ -1279,19 +1272,12 @@ static struct acpi_driver acpi_battery_driver = {
static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
{
- unsigned int i;
int result;
- dmi_check_system(bat_dmi_table);
+ if (acpi_quirk_skip_acpi_ac_and_battery())
+ return;
- if (battery_check_pmic) {
- for (i = 0; i < ARRAY_SIZE(acpi_battery_blacklist); i++)
- if (acpi_dev_present(acpi_battery_blacklist[i], "1", -1)) {
- pr_info("found native %s PMIC, not loading\n",
- acpi_battery_blacklist[i]);
- return;
- }
- }
+ dmi_check_system(bat_dmi_table);
result = acpi_bus_register_driver(&acpi_battery_driver);
battery_driver_registered = (result == 0);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index fa923a929224..07f604832fd6 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -98,8 +98,8 @@ int acpi_bus_get_status(struct acpi_device *device)
acpi_status status;
unsigned long long sta;
- if (acpi_device_always_present(device)) {
- acpi_set_device_status(device, ACPI_STA_DEFAULT);
+ if (acpi_device_override_status(device, &sta)) {
+ acpi_set_device_status(device, sta);
return 0;
}
@@ -1043,6 +1043,7 @@ struct bus_type acpi_bus_type = {
.remove = acpi_device_remove,
.uevent = acpi_device_uevent,
};
+EXPORT_SYMBOL_GPL(acpi_bus_type);
/* --------------------------------------------------------------------------
Initialization/Cleanup
@@ -1320,6 +1321,7 @@ static int __init acpi_init(void)
pr_debug("%s: kset create error\n", __func__);
init_prmt();
+ acpi_init_pcc();
result = acpi_bus_init();
if (result) {
kobject_put(acpi_kobj);
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index b62c87b8ce4a..866560cbb082 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -118,6 +118,8 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
*/
#define NUM_RETRIES 500ULL
+#define OVER_16BTS_MASK ~0xFFFFULL
+
#define define_one_cppc_ro(_name) \
static struct kobj_attribute _name = \
__ATTR(_name, 0444, show_##_name, NULL)
@@ -179,10 +181,11 @@ static struct attribute *cppc_attrs[] = {
&lowest_freq.attr,
NULL
};
+ATTRIBUTE_GROUPS(cppc);
static struct kobj_type cppc_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
- .default_attrs = cppc_attrs,
+ .default_groups = cppc_groups,
};
static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
@@ -411,7 +414,7 @@ bool acpi_cpc_valid(void)
struct cpc_desc *cpc_ptr;
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
if (!cpc_ptr)
return false;
@@ -604,47 +607,30 @@ static bool is_cppc_supported(int revision, int num_ent)
/*
* An example CPC table looks like the following.
*
- * Name(_CPC, Package()
- * {
- * 17,
- * NumEntries
- * 1,
- * // Revision
- * ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
- * // Highest Performance
- * ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
- * // Nominal Performance
- * ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
- * // Lowest Nonlinear Performance
- * ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
- * // Lowest Performance
- * ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
- * // Guaranteed Performance Register
- * ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
- * // Desired Performance Register
- * ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
- * ..
- * ..
- * ..
- *
- * }
+ * Name (_CPC, Package() {
+ * 17, // NumEntries
+ * 1, // Revision
+ * ResourceTemplate() {Register(PCC, 32, 0, 0x120, 2)}, // Highest Performance
+ * ResourceTemplate() {Register(PCC, 32, 0, 0x124, 2)}, // Nominal Performance
+ * ResourceTemplate() {Register(PCC, 32, 0, 0x128, 2)}, // Lowest Nonlinear Performance
+ * ResourceTemplate() {Register(PCC, 32, 0, 0x12C, 2)}, // Lowest Performance
+ * ResourceTemplate() {Register(PCC, 32, 0, 0x130, 2)}, // Guaranteed Performance Register
+ * ResourceTemplate() {Register(PCC, 32, 0, 0x110, 2)}, // Desired Performance Register
+ * ResourceTemplate() {Register(SystemMemory, 0, 0, 0, 0)},
+ * ...
+ * ...
+ * ...
+ * }
* Each Register() encodes how to access that specific register.
* e.g. a sample PCC entry has the following encoding:
*
- * Register (
- * PCC,
- * AddressSpaceKeyword
- * 8,
- * //RegisterBitWidth
- * 8,
- * //RegisterBitOffset
- * 0x30,
- * //RegisterAddress
- * 9
- * //AccessSize (subspace ID)
- * 0
- * )
- * }
+ * Register (
+ * PCC, // AddressSpaceKeyword
+ * 8, // RegisterBitWidth
+ * 8, // RegisterBitOffset
+ * 0x30, // RegisterAddress
+ * 9, // AccessSize (subspace ID)
+ * )
*/
#ifndef init_freq_invariance_cppc
@@ -746,9 +732,26 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
}
+ } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ if (gas_t->access_width < 1 || gas_t->access_width > 3) {
+ /*
+ * 1 = 8-bit, 2 = 16-bit, and 3 = 32-bit.
+ * SystemIO doesn't implement 64-bit
+ * registers.
+ */
+ pr_debug("Invalid access width %d for SystemIO register\n",
+ gas_t->access_width);
+ goto out_free;
+ }
+ if (gas_t->address & OVER_16BTS_MASK) {
+ /* SystemIO registers use 16-bit integer addresses */
+ pr_debug("Invalid IO port %llu for SystemIO register\n",
+ gas_t->address);
+ goto out_free;
+ }
} else {
if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
- /* Support only PCC ,SYS MEM and FFH type regs */
+ /* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */
pr_debug("Unsupported register type: %d\n", gas_t->space_id);
goto out_free;
}
@@ -912,18 +915,33 @@ int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
{
- int ret_val = 0;
void __iomem *vaddr = NULL;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
if (reg_res->type == ACPI_TYPE_INTEGER) {
*val = reg_res->cpc_entry.int_value;
- return ret_val;
+ return 0;
}
*val = 0;
- if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
+
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ u32 width = 8 << (reg->access_width - 1);
+ u32 val_u32;
+ acpi_status status;
+
+ status = acpi_os_read_port((acpi_io_address)reg->address,
+ &val_u32, width);
+ if (ACPI_FAILURE(status)) {
+ pr_debug("Error: Failed to read SystemIO port %llx\n",
+ reg->address);
+ return -EFAULT;
+ }
+
+ *val = val_u32;
+ return 0;
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
@@ -949,10 +967,10 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
default:
pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
reg->bit_width, pcc_ss_id);
- ret_val = -EFAULT;
+ return -EFAULT;
}
- return ret_val;
+ return 0;
}
static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
@@ -962,7 +980,20 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
- if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ u32 width = 8 << (reg->access_width - 1);
+ acpi_status status;
+
+ status = acpi_os_write_port((acpi_io_address)reg->address,
+ (u32)val, width);
+ if (ACPI_FAILURE(status)) {
+ pr_debug("Error: Failed to write SystemIO port %llx\n",
+ reg->address);
+ return -EFAULT;
+ }
+
+ return 0;
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
@@ -1230,6 +1261,51 @@ out_err:
EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
/**
+ * cppc_set_enable - Set to enable CPPC on the processor by writing the
+ * Continuous Performance Control package EnableRegister field.
+ * @cpu: CPU for which to enable CPPC register.
+ * @enable: 0 - disable, 1 - enable CPPC feature on the processor.
+ *
+ * Return: 0 for success, -ERRNO or -EIO otherwise.
+ */
+int cppc_set_enable(int cpu, bool enable)
+{
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ struct cpc_register_resource *enable_reg;
+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
+ struct cppc_pcc_data *pcc_ss_data = NULL;
+ int ret = -EINVAL;
+
+ if (!cpc_desc) {
+ pr_debug("No CPC descriptor for CPU:%d\n", cpu);
+ return -EINVAL;
+ }
+
+ enable_reg = &cpc_desc->cpc_regs[ENABLE];
+
+ if (CPC_IN_PCC(enable_reg)) {
+
+ if (pcc_ss_id < 0)
+ return -EIO;
+
+ ret = cpc_write(cpu, enable_reg, enable);
+ if (ret)
+ return ret;
+
+ pcc_ss_data = pcc_data[pcc_ss_id];
+
+ down_write(&pcc_ss_data->pcc_lock);
+ /* after writing CPC, transfer the ownership of PCC to platfrom */
+ ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
+ up_write(&pcc_ss_data->pcc_lock);
+ return ret;
+ }
+
+ return cpc_write(cpu, enable_reg, enable);
+}
+EXPORT_SYMBOL_GPL(cppc_set_enable);
+
+/**
* cppc_set_perf - Set a CPU's performance controls.
* @cpu: CPU for which to set performance controls.
* @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 19b33c028f35..cc6c97e7dcae 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -285,14 +285,12 @@ EXPORT_SYMBOL(acpi_device_set_power);
int acpi_bus_set_power(acpi_handle handle, int state)
{
- struct acpi_device *device;
- int result;
+ struct acpi_device *device = acpi_fetch_acpi_dev(handle);
- result = acpi_bus_get_device(handle, &device);
- if (result)
- return result;
+ if (device)
+ return acpi_device_set_power(device, state);
- return acpi_device_set_power(device, state);
+ return -ENODEV;
}
EXPORT_SYMBOL(acpi_bus_set_power);
@@ -410,21 +408,20 @@ EXPORT_SYMBOL_GPL(acpi_device_update_power);
int acpi_bus_update_power(acpi_handle handle, int *state_p)
{
- struct acpi_device *device;
- int result;
+ struct acpi_device *device = acpi_fetch_acpi_dev(handle);
- result = acpi_bus_get_device(handle, &device);
- return result ? result : acpi_device_update_power(device, state_p);
+ if (device)
+ return acpi_device_update_power(device, state_p);
+
+ return -ENODEV;
}
EXPORT_SYMBOL_GPL(acpi_bus_update_power);
bool acpi_bus_power_manageable(acpi_handle handle)
{
- struct acpi_device *device;
- int result;
+ struct acpi_device *device = acpi_fetch_acpi_dev(handle);
- result = acpi_bus_get_device(handle, &device);
- return result ? false : device->flags.power_manageable;
+ return device && device->flags.power_manageable;
}
EXPORT_SYMBOL(acpi_bus_power_manageable);
@@ -543,11 +540,9 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
bool acpi_bus_can_wakeup(acpi_handle handle)
{
- struct acpi_device *device;
- int result;
+ struct acpi_device *device = acpi_fetch_acpi_dev(handle);
- result = acpi_bus_get_device(handle, &device);
- return result ? false : device->wakeup.flags.valid;
+ return device && device->wakeup.flags.valid;
}
EXPORT_SYMBOL(acpi_bus_can_wakeup);
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 61271e61c307..d5d6403ba07b 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -53,6 +53,7 @@ static struct attribute *acpi_data_node_default_attrs[] = {
&data_node_path.attr,
NULL
};
+ATTRIBUTE_GROUPS(acpi_data_node_default);
#define to_data_node(k) container_of(k, struct acpi_data_node, kobj)
#define to_attr(a) container_of(a, struct acpi_data_node_attr, attr)
@@ -79,7 +80,7 @@ static void acpi_data_node_release(struct kobject *kobj)
static struct kobj_type acpi_data_node_ktype = {
.sysfs_ops = &acpi_data_node_sysfs_ops,
- .default_attrs = acpi_data_node_default_attrs,
+ .default_groups = acpi_data_node_default_groups,
.release = acpi_data_node_release,
};
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index c8e9b962e18c..a89bdbe00184 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -489,9 +489,8 @@ static ssize_t docked_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dock_station *dock_station = dev->platform_data;
- struct acpi_device *adev = NULL;
+ struct acpi_device *adev = acpi_fetch_acpi_dev(dock_station->handle);
- acpi_bus_get_device(dock_station->handle, &adev);
return sysfs_emit(buf, "%u\n", acpi_device_enumerated(adev));
}
static DEVICE_ATTR_RO(docked);
diff --git a/drivers/acpi/dptf/dptf_pch_fivr.c b/drivers/acpi/dptf/dptf_pch_fivr.c
index f4e9c2ef2f88..c0da24c9f8c3 100644
--- a/drivers/acpi/dptf/dptf_pch_fivr.c
+++ b/drivers/acpi/dptf/dptf_pch_fivr.c
@@ -46,7 +46,7 @@ release_buffer:
}
/*
- * Presentation of attributes which are defined for INT1045
+ * Presentation of attributes which are defined for INTC10xx
* They are:
* freq_mhz_low_clock : Set PCH FIVR switching freq for
* FIVR clock 19.2MHz and 24MHz
@@ -151,6 +151,7 @@ static int pch_fivr_remove(struct platform_device *pdev)
static const struct acpi_device_id pch_fivr_device_ids[] = {
{"INTC1045", 0},
{"INTC1049", 0},
+ {"INTC10A3", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, pch_fivr_device_ids);
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index a24d5d7aa117..dc1f52a5b3f4 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -231,6 +231,8 @@ static const struct acpi_device_id int3407_device_ids[] = {
{"INTC1050", 0},
{"INTC1060", 0},
{"INTC1061", 0},
+ {"INTC10A4", 0},
+ {"INTC10A5", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index da5d5f0be2f2..42a556346548 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -37,6 +37,12 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
{"INTC1050"},
{"INTC1060"},
{"INTC1061"},
+ {"INTC10A0"},
+ {"INTC10A1"},
+ {"INTC10A2"},
+ {"INTC10A3"},
+ {"INTC10A4"},
+ {"INTC10A5"},
{""},
};
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index a6366d3f0c78..0077d2c85df8 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -92,8 +92,6 @@ enum ec_command {
enum {
EC_FLAGS_QUERY_ENABLED, /* Query is enabled */
- EC_FLAGS_QUERY_PENDING, /* Query is pending */
- EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */
EC_FLAGS_EVENT_HANDLER_INSTALLED, /* Event handler installed */
EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */
EC_FLAGS_QUERY_METHODS_INSTALLED, /* _Qxx handlers installed */
@@ -166,12 +164,12 @@ struct acpi_ec_query {
struct transaction transaction;
struct work_struct work;
struct acpi_ec_query_handler *handler;
+ struct acpi_ec *ec;
};
-static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
-static void advance_transaction(struct acpi_ec *ec, bool interrupt);
+static int acpi_ec_submit_query(struct acpi_ec *ec);
+static bool advance_transaction(struct acpi_ec *ec, bool interrupt);
static void acpi_ec_event_handler(struct work_struct *work);
-static void acpi_ec_event_processor(struct work_struct *work);
struct acpi_ec *first_ec;
EXPORT_SYMBOL(first_ec);
@@ -443,24 +441,51 @@ static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
return true;
}
-static void acpi_ec_submit_query(struct acpi_ec *ec)
+static bool acpi_ec_submit_event(struct acpi_ec *ec)
{
acpi_ec_mask_events(ec);
if (!acpi_ec_event_enabled(ec))
- return;
- if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
+ return false;
+
+ if (ec->event_state == EC_EVENT_READY) {
ec_dbg_evt("Command(%s) submitted/blocked",
acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
- ec->nr_pending_queries++;
- queue_work(ec_wq, &ec->work);
+
+ ec->event_state = EC_EVENT_IN_PROGRESS;
+ /*
+ * If events_to_process is greqter than 0 at this point, the
+ * while () loop in acpi_ec_event_handler() is still running
+ * and incrementing events_to_process will cause it to invoke
+ * acpi_ec_submit_query() once more, so it is not necessary to
+ * queue up the event work to start the same loop again.
+ */
+ if (ec->events_to_process++ > 0)
+ return true;
+
+ ec->events_in_progress++;
+ return queue_work(ec_wq, &ec->work);
}
+
+ /*
+ * The event handling work has not been completed yet, so it needs to be
+ * flushed.
+ */
+ return true;
+}
+
+static void acpi_ec_complete_event(struct acpi_ec *ec)
+{
+ if (ec->event_state == EC_EVENT_IN_PROGRESS)
+ ec->event_state = EC_EVENT_COMPLETE;
}
-static void acpi_ec_complete_query(struct acpi_ec *ec)
+static void acpi_ec_close_event(struct acpi_ec *ec)
{
- if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
+ if (ec->event_state != EC_EVENT_READY)
ec_dbg_evt("Command(%s) unblocked",
acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
+
+ ec->event_state = EC_EVENT_READY;
acpi_ec_unmask_events(ec);
}
@@ -487,12 +512,10 @@ static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
*/
static void acpi_ec_clear(struct acpi_ec *ec)
{
- int i, status;
- u8 value = 0;
+ int i;
for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
- status = acpi_ec_query(ec, &value);
- if (status || !value)
+ if (acpi_ec_submit_query(ec))
break;
}
if (unlikely(i == ACPI_EC_CLEAR_MAX))
@@ -518,7 +541,7 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
#ifdef CONFIG_PM_SLEEP
static void __acpi_ec_flush_work(void)
{
- drain_workqueue(ec_wq); /* flush ec->work */
+ flush_workqueue(ec_wq); /* flush ec->work */
flush_workqueue(ec_query_wq); /* flush queries */
}
@@ -549,8 +572,8 @@ void acpi_ec_flush_work(void)
static bool acpi_ec_guard_event(struct acpi_ec *ec)
{
- bool guarded = true;
unsigned long flags;
+ bool guarded;
spin_lock_irqsave(&ec->lock, flags);
/*
@@ -559,19 +582,15 @@ static bool acpi_ec_guard_event(struct acpi_ec *ec)
* evaluating _Qxx, so we need to re-check SCI_EVT after waiting an
* acceptable period.
*
- * The guarding period begins when EC_FLAGS_QUERY_PENDING is
- * flagged, which means SCI_EVT check has just been performed.
- * But if the current transaction is ACPI_EC_COMMAND_QUERY, the
- * guarding should have already been performed (via
- * EC_FLAGS_QUERY_GUARDING) and should not be applied so that the
- * ACPI_EC_COMMAND_QUERY transaction can be transitioned into
- * ACPI_EC_COMMAND_POLL state immediately.
+ * The guarding period is applicable if the event state is not
+ * EC_EVENT_READY, but otherwise if the current transaction is of the
+ * ACPI_EC_COMMAND_QUERY type, the guarding should have elapsed already
+ * and it should not be applied to let the transaction transition into
+ * the ACPI_EC_COMMAND_POLL state immediately.
*/
- if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
- ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY ||
- !test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) ||
- (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY))
- guarded = false;
+ guarded = ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
+ ec->event_state != EC_EVENT_READY &&
+ (!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY);
spin_unlock_irqrestore(&ec->lock, flags);
return guarded;
}
@@ -603,16 +622,26 @@ static int ec_transaction_completed(struct acpi_ec *ec)
static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag)
{
ec->curr->flags |= flag;
- if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
- if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS &&
- flag == ACPI_EC_COMMAND_POLL)
- acpi_ec_complete_query(ec);
- if (ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY &&
- flag == ACPI_EC_COMMAND_COMPLETE)
- acpi_ec_complete_query(ec);
- if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
- flag == ACPI_EC_COMMAND_COMPLETE)
- set_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
+
+ if (ec->curr->command != ACPI_EC_COMMAND_QUERY)
+ return;
+
+ switch (ec_event_clearing) {
+ case ACPI_EC_EVT_TIMING_STATUS:
+ if (flag == ACPI_EC_COMMAND_POLL)
+ acpi_ec_close_event(ec);
+
+ return;
+
+ case ACPI_EC_EVT_TIMING_QUERY:
+ if (flag == ACPI_EC_COMMAND_COMPLETE)
+ acpi_ec_close_event(ec);
+
+ return;
+
+ case ACPI_EC_EVT_TIMING_EVENT:
+ if (flag == ACPI_EC_COMMAND_COMPLETE)
+ acpi_ec_complete_event(ec);
}
}
@@ -626,10 +655,11 @@ static void acpi_ec_spurious_interrupt(struct acpi_ec *ec, struct transaction *t
acpi_ec_mask_events(ec);
}
-static void advance_transaction(struct acpi_ec *ec, bool interrupt)
+static bool advance_transaction(struct acpi_ec *ec, bool interrupt)
{
struct transaction *t = ec->curr;
bool wakeup = false;
+ bool ret = false;
u8 status;
ec_dbg_stm("%s (%d)", interrupt ? "IRQ" : "TASK", smp_processor_id());
@@ -657,11 +687,9 @@ static void advance_transaction(struct acpi_ec *ec, bool interrupt)
*/
if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) {
if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
- (!ec->nr_pending_queries ||
- test_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags))) {
- clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
- acpi_ec_complete_query(ec);
- }
+ ec->event_state == EC_EVENT_COMPLETE)
+ acpi_ec_close_event(ec);
+
if (!t)
goto out;
}
@@ -696,10 +724,12 @@ static void advance_transaction(struct acpi_ec *ec, bool interrupt)
out:
if (status & ACPI_EC_FLAG_SCI)
- acpi_ec_submit_query(ec);
+ ret = acpi_ec_submit_event(ec);
if (wakeup && interrupt)
wake_up(&ec->wait);
+
+ return ret;
}
static void start_transaction(struct acpi_ec *ec)
@@ -1103,7 +1133,30 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
}
EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
-static struct acpi_ec_query *acpi_ec_create_query(u8 *pval)
+static void acpi_ec_event_processor(struct work_struct *work)
+{
+ struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
+ struct acpi_ec_query_handler *handler = q->handler;
+ struct acpi_ec *ec = q->ec;
+
+ ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
+
+ if (handler->func)
+ handler->func(handler->data);
+ else if (handler->handle)
+ acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
+
+ ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
+
+ spin_lock_irq(&ec->lock);
+ ec->queries_in_progress--;
+ spin_unlock_irq(&ec->lock);
+
+ acpi_ec_put_query_handler(handler);
+ kfree(q);
+}
+
+static struct acpi_ec_query *acpi_ec_create_query(struct acpi_ec *ec, u8 *pval)
{
struct acpi_ec_query *q;
struct transaction *t;
@@ -1111,44 +1164,23 @@ static struct acpi_ec_query *acpi_ec_create_query(u8 *pval)
q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL);
if (!q)
return NULL;
+
INIT_WORK(&q->work, acpi_ec_event_processor);
t = &q->transaction;
t->command = ACPI_EC_COMMAND_QUERY;
t->rdata = pval;
t->rlen = 1;
+ q->ec = ec;
return q;
}
-static void acpi_ec_delete_query(struct acpi_ec_query *q)
-{
- if (q) {
- if (q->handler)
- acpi_ec_put_query_handler(q->handler);
- kfree(q);
- }
-}
-
-static void acpi_ec_event_processor(struct work_struct *work)
-{
- struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
- struct acpi_ec_query_handler *handler = q->handler;
-
- ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
- if (handler->func)
- handler->func(handler->data);
- else if (handler->handle)
- acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
- ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
- acpi_ec_delete_query(q);
-}
-
-static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
+static int acpi_ec_submit_query(struct acpi_ec *ec)
{
+ struct acpi_ec_query *q;
u8 value = 0;
int result;
- struct acpi_ec_query *q;
- q = acpi_ec_create_query(&value);
+ q = acpi_ec_create_query(ec, &value);
if (!q)
return -ENOMEM;
@@ -1158,11 +1190,14 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
* bit to be cleared (and thus clearing the interrupt source).
*/
result = acpi_ec_transaction(ec, &q->transaction);
- if (!value)
- result = -ENODATA;
if (result)
goto err_exit;
+ if (!value) {
+ result = -ENODATA;
+ goto err_exit;
+ }
+
q->handler = acpi_ec_get_query_handler_by_value(ec, value);
if (!q->handler) {
result = -ENODATA;
@@ -1170,76 +1205,73 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
}
/*
- * It is reported that _Qxx are evaluated in a parallel way on
- * Windows:
+ * It is reported that _Qxx are evaluated in a parallel way on Windows:
* https://bugzilla.kernel.org/show_bug.cgi?id=94411
*
- * Put this log entry before schedule_work() in order to make
- * it appearing before any other log entries occurred during the
- * work queue execution.
+ * Put this log entry before queue_work() to make it appear in the log
+ * before any other messages emitted during workqueue handling.
*/
ec_dbg_evt("Query(0x%02x) scheduled", value);
- if (!queue_work(ec_query_wq, &q->work)) {
- ec_dbg_evt("Query(0x%02x) overlapped", value);
- result = -EBUSY;
- }
-err_exit:
- if (result)
- acpi_ec_delete_query(q);
- if (data)
- *data = value;
- return result;
-}
+ spin_lock_irq(&ec->lock);
-static void acpi_ec_check_event(struct acpi_ec *ec)
-{
- unsigned long flags;
+ ec->queries_in_progress++;
+ queue_work(ec_query_wq, &q->work);
- if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) {
- if (ec_guard(ec)) {
- spin_lock_irqsave(&ec->lock, flags);
- /*
- * Take care of the SCI_EVT unless no one else is
- * taking care of it.
- */
- if (!ec->curr)
- advance_transaction(ec, false);
- spin_unlock_irqrestore(&ec->lock, flags);
- }
- }
+ spin_unlock_irq(&ec->lock);
+
+ return 0;
+
+err_exit:
+ kfree(q);
+
+ return result;
}
static void acpi_ec_event_handler(struct work_struct *work)
{
- unsigned long flags;
struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
ec_dbg_evt("Event started");
- spin_lock_irqsave(&ec->lock, flags);
- while (ec->nr_pending_queries) {
- spin_unlock_irqrestore(&ec->lock, flags);
- (void)acpi_ec_query(ec, NULL);
- spin_lock_irqsave(&ec->lock, flags);
- ec->nr_pending_queries--;
- /*
- * Before exit, make sure that this work item can be
- * scheduled again. There might be QR_EC failures, leaving
- * EC_FLAGS_QUERY_PENDING uncleared and preventing this work
- * item from being scheduled again.
- */
- if (!ec->nr_pending_queries) {
- if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
- ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY)
- acpi_ec_complete_query(ec);
- }
+ spin_lock_irq(&ec->lock);
+
+ while (ec->events_to_process) {
+ spin_unlock_irq(&ec->lock);
+
+ acpi_ec_submit_query(ec);
+
+ spin_lock_irq(&ec->lock);
+ ec->events_to_process--;
}
- spin_unlock_irqrestore(&ec->lock, flags);
+
+ /*
+ * Before exit, make sure that the it will be possible to queue up the
+ * event handling work again regardless of whether or not the query
+ * queued up above is processed successfully.
+ */
+ if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT)
+ acpi_ec_complete_event(ec);
+ else
+ acpi_ec_close_event(ec);
+
+ spin_unlock_irq(&ec->lock);
ec_dbg_evt("Event stopped");
- acpi_ec_check_event(ec);
+ if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT && ec_guard(ec)) {
+ spin_lock_irq(&ec->lock);
+
+ /* Take care of SCI_EVT unless someone else is doing that. */
+ if (!ec->curr)
+ advance_transaction(ec, false);
+
+ spin_unlock_irq(&ec->lock);
+ }
+
+ spin_lock_irq(&ec->lock);
+ ec->events_in_progress--;
+ spin_unlock_irq(&ec->lock);
}
static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
@@ -2021,7 +2053,7 @@ void acpi_ec_set_gpe_wake_mask(u8 action)
bool acpi_ec_dispatch_gpe(void)
{
- u32 ret;
+ bool work_in_progress = false;
if (!first_ec)
return acpi_any_gpe_status_set(U32_MAX);
@@ -2037,12 +2069,31 @@ bool acpi_ec_dispatch_gpe(void)
* Dispatch the EC GPE in-band, but do not report wakeup in any case
* to allow the caller to process events properly after that.
*/
- ret = acpi_dispatch_gpe(NULL, first_ec->gpe);
- if (ret == ACPI_INTERRUPT_HANDLED)
- pm_pr_dbg("ACPI EC GPE dispatched\n");
+ spin_lock_irq(&first_ec->lock);
+
+ if (acpi_ec_gpe_status_set(first_ec))
+ work_in_progress = advance_transaction(first_ec, false);
+
+ spin_unlock_irq(&first_ec->lock);
+
+ if (!work_in_progress)
+ return false;
+
+ pm_pr_dbg("ACPI EC GPE dispatched\n");
+
+ /* Drain EC work. */
+ do {
+ acpi_ec_flush_work();
+
+ pm_pr_dbg("ACPI EC work flushed\n");
+
+ spin_lock_irq(&first_ec->lock);
+
+ work_in_progress = first_ec->events_in_progress +
+ first_ec->queries_in_progress > 0;
- /* Flush the event and query workqueues. */
- acpi_ec_flush_work();
+ spin_unlock_irq(&first_ec->lock);
+ } while (work_in_progress && !pm_wakeup_pending());
return false;
}
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index fd39c14493ab..c074a0fae059 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -19,7 +19,7 @@ MODULE_DESCRIPTION("ACPI EC sysfs access driver");
MODULE_LICENSE("GPL");
static bool write_support;
-module_param(write_support, bool, 0644);
+module_param_hw(write_support, bool, other, 0644);
MODULE_PARM_DESC(write_support, "Dangerous, reboot and removal of battery may "
"be needed.");
diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
index dc9a6efa514b..dd9bb8ca2244 100644
--- a/drivers/acpi/fan.h
+++ b/drivers/acpi/fan.h
@@ -10,4 +10,5 @@
{"INT3404", }, /* Fan */ \
{"INTC1044", }, /* Fan for Tiger Lake generation */ \
{"INTC1048", }, /* Fan for Alder Lake generation */ \
+ {"INTC10A2", }, /* Fan for Raptor Lake generation */ \
{"PNP0C0B", } /* Generic ACPI fan */
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index d91b560e8867..457e11d851b8 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -14,7 +14,7 @@
int early_acpi_osi_init(void);
int acpi_osi_init(void);
acpi_status acpi_os_initialize1(void);
-int acpi_scan_init(void);
+void acpi_scan_init(void);
#ifdef CONFIG_PCI
void acpi_pci_root_init(void);
void acpi_pci_link_init(void);
@@ -166,6 +166,13 @@ static inline void acpi_early_processor_osc(void) {}
/* --------------------------------------------------------------------------
Embedded Controller
-------------------------------------------------------------------------- */
+
+enum acpi_ec_event_state {
+ EC_EVENT_READY = 0, /* Event work can be submitted */
+ EC_EVENT_IN_PROGRESS, /* Event work is pending or being processed */
+ EC_EVENT_COMPLETE, /* Event work processing has completed */
+};
+
struct acpi_ec {
acpi_handle handle;
int gpe;
@@ -182,7 +189,10 @@ struct acpi_ec {
spinlock_t lock;
struct work_struct work;
unsigned long timestamp;
- unsigned long nr_pending_queries;
+ enum acpi_ec_event_state event_state;
+ unsigned int events_to_process;
+ unsigned int events_in_progress;
+ unsigned int queries_in_progress;
bool busy_polling;
unsigned int polling_guard;
};
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 7dd80acf92c7..e5d7f2bda13f 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -678,10 +678,12 @@ static const char *spa_type_name(u16 type)
int nfit_spa_type(struct acpi_nfit_system_address *spa)
{
+ guid_t guid;
int i;
+ import_guid(&guid, spa->range_guid);
for (i = 0; i < NFIT_UUID_MAX; i++)
- if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid))
+ if (guid_equal(to_nfit_uuid(i), &guid))
return i;
return -1;
}
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index b8795fc49097..3b818ab186be 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -254,9 +254,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
}
if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
goto out_err;
- hotpluggable = ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE;
- if (hotpluggable && !IS_ENABLED(CONFIG_MEMORY_HOTPLUG))
- goto out_err;
+ hotpluggable = IS_ENABLED(CONFIG_MEMORY_HOTPLUG) &&
+ (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE);
start = ma->base_address;
end = start + ma->length;
@@ -298,6 +297,47 @@ out_err_bad_srat:
out_err:
return -EINVAL;
}
+
+static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
+ void *arg, const unsigned long table_end)
+{
+ struct acpi_cedt_cfmws *cfmws;
+ int *fake_pxm = arg;
+ u64 start, end;
+ int node;
+
+ cfmws = (struct acpi_cedt_cfmws *)header;
+ start = cfmws->base_hpa;
+ end = cfmws->base_hpa + cfmws->window_size;
+
+ /* Skip if the SRAT already described the NUMA details for this HPA */
+ node = phys_to_target_node(start);
+ if (node != NUMA_NO_NODE)
+ return 0;
+
+ node = acpi_map_pxm_to_node(*fake_pxm);
+
+ if (node == NUMA_NO_NODE) {
+ pr_err("ACPI NUMA: Too many proximity domains while processing CFMWS.\n");
+ return -EINVAL;
+ }
+
+ if (numa_add_memblk(node, start, end) < 0) {
+ /* CXL driver must handle the NUMA_NO_NODE case */
+ pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n",
+ node, start, end);
+ }
+
+ /* Set the next available fake_pxm value */
+ (*fake_pxm)++;
+ return 0;
+}
+#else
+static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
+ void *arg, const unsigned long table_end)
+{
+ return 0;
+}
#endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */
static int __init acpi_parse_slit(struct acpi_table_header *table)
@@ -442,7 +482,7 @@ acpi_table_parse_srat(enum acpi_srat_type id,
int __init acpi_numa_init(void)
{
- int cnt = 0;
+ int i, fake_pxm, cnt = 0;
if (acpi_disabled)
return -EINVAL;
@@ -478,6 +518,22 @@ int __init acpi_numa_init(void)
/* SLIT: System Locality Information Table */
acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
+ /*
+ * CXL Fixed Memory Window Structures (CFMWS) must be parsed
+ * after the SRAT. Create NUMA Nodes for CXL memory ranges that
+ * are defined in the CFMWS and not already defined in the SRAT.
+ * Initialize a fake_pxm as the first available PXM to emulate.
+ */
+
+ /* fake_pxm is the next unused PXM value after SRAT parsing */
+ for (i = 0, fake_pxm = -1; i < MAX_NUMNODES - 1; i++) {
+ if (node_to_pxm_map[i] > fake_pxm)
+ fake_pxm = node_to_pxm_map[i];
+ }
+ fake_pxm++;
+ acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, acpi_parse_cfmws,
+ &fake_pxm);
+
if (cnt < 0)
return cnt;
else if (!parsed_numa_memblks)
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index cb7b900d9466..d54fb8e54671 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -606,12 +606,10 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
int *polarity, char **name)
{
- int result;
- struct acpi_device *device;
+ struct acpi_device *device = acpi_fetch_acpi_dev(handle);
struct acpi_pci_link *link;
- result = acpi_bus_get_device(handle, &device);
- if (result) {
+ if (!device) {
acpi_handle_err(handle, "Invalid link device\n");
return -1;
}
@@ -658,12 +656,10 @@ int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
*/
int acpi_pci_link_free_irq(acpi_handle handle)
{
- struct acpi_device *device;
+ struct acpi_device *device = acpi_fetch_acpi_dev(handle);
struct acpi_pci_link *link;
- acpi_status result;
- result = acpi_bus_get_device(handle, &device);
- if (result) {
+ if (!device) {
acpi_handle_err(handle, "Invalid link device\n");
return -1;
}
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index ab2f7dfb0c44..b76db99cced3 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -67,11 +67,10 @@ static struct acpi_scan_handler pci_root_handler = {
*/
int acpi_is_root_bridge(acpi_handle handle)
{
+ struct acpi_device *device = acpi_fetch_acpi_dev(handle);
int ret;
- struct acpi_device *device;
- ret = acpi_bus_get_device(handle, &device);
- if (ret)
+ if (!device)
return 0;
ret = acpi_match_device_ids(device, root_device_ids);
@@ -215,11 +214,10 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
{
+ struct acpi_device *device = acpi_fetch_acpi_dev(handle);
struct acpi_pci_root *root;
- struct acpi_device *device;
- if (acpi_bus_get_device(handle, &device) ||
- acpi_match_device_ids(device, root_device_ids))
+ if (!device || acpi_match_device_ids(device, root_device_ids))
return NULL;
root = acpi_driver_data(device);
@@ -324,7 +322,7 @@ EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
* acpi_pci_osc_control_set - Request control of PCI root _OSC features.
* @handle: ACPI handle of a PCI root bridge (or PCIe Root Complex).
* @mask: Mask of _OSC bits to request control of, place to store control mask.
- * @req: Mask of _OSC bits the control of is essential to the caller.
+ * @support: _OSC supported capability.
*
* Run _OSC query for @mask and if that is successful, compare the returned
* mask of control bits with @req. If all of the @req bits are set in the
diff --git a/drivers/acpi/pfr_telemetry.c b/drivers/acpi/pfr_telemetry.c
new file mode 100644
index 000000000000..9abf350bd7a5
--- /dev/null
+++ b/drivers/acpi/pfr_telemetry.c
@@ -0,0 +1,435 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACPI Platform Firmware Runtime Telemetry driver
+ *
+ * Copyright (C) 2021 Intel Corporation
+ * Author: Chen Yu <yu.c.chen@intel.com>
+ *
+ * This driver allows user space to fetch telemetry data from the
+ * firmware with the help of the Platform Firmware Runtime Telemetry
+ * interface.
+ */
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/uio.h>
+#include <linux/uuid.h>
+
+#include <uapi/linux/pfrut.h>
+
+#define PFRT_LOG_EXEC_IDX 0
+#define PFRT_LOG_HISTORY_IDX 1
+
+#define PFRT_LOG_ERR 0
+#define PFRT_LOG_WARN 1
+#define PFRT_LOG_INFO 2
+#define PFRT_LOG_VERB 4
+
+#define PFRT_FUNC_SET_LEV 1
+#define PFRT_FUNC_GET_LEV 2
+#define PFRT_FUNC_GET_DATA 3
+
+#define PFRT_REVID_1 1
+#define PFRT_REVID_2 2
+#define PFRT_DEFAULT_REV_ID PFRT_REVID_1
+
+enum log_index {
+ LOG_STATUS_IDX = 0,
+ LOG_EXT_STATUS_IDX = 1,
+ LOG_MAX_SZ_IDX = 2,
+ LOG_CHUNK1_LO_IDX = 3,
+ LOG_CHUNK1_HI_IDX = 4,
+ LOG_CHUNK1_SZ_IDX = 5,
+ LOG_CHUNK2_LO_IDX = 6,
+ LOG_CHUNK2_HI_IDX = 7,
+ LOG_CHUNK2_SZ_IDX = 8,
+ LOG_ROLLOVER_CNT_IDX = 9,
+ LOG_RESET_CNT_IDX = 10,
+ LOG_NR_IDX
+};
+
+struct pfrt_log_device {
+ int index;
+ struct pfrt_log_info info;
+ struct device *parent_dev;
+ struct miscdevice miscdev;
+};
+
+/* pfrt_guid is the parameter for _DSM method */
+static const guid_t pfrt_log_guid =
+ GUID_INIT(0x75191659, 0x8178, 0x4D9D, 0xB8, 0x8F, 0xAC, 0x5E,
+ 0x5E, 0x93, 0xE8, 0xBF);
+
+static DEFINE_IDA(pfrt_log_ida);
+
+static inline struct pfrt_log_device *to_pfrt_log_dev(struct file *file)
+{
+ return container_of(file->private_data, struct pfrt_log_device, miscdev);
+}
+
+static int get_pfrt_log_data_info(struct pfrt_log_data_info *data_info,
+ struct pfrt_log_device *pfrt_log_dev)
+{
+ acpi_handle handle = ACPI_HANDLE(pfrt_log_dev->parent_dev);
+ union acpi_object *out_obj, in_obj, in_buf;
+ int ret = -EBUSY;
+
+ memset(data_info, 0, sizeof(*data_info));
+ memset(&in_obj, 0, sizeof(in_obj));
+ memset(&in_buf, 0, sizeof(in_buf));
+ in_obj.type = ACPI_TYPE_PACKAGE;
+ in_obj.package.count = 1;
+ in_obj.package.elements = &in_buf;
+ in_buf.type = ACPI_TYPE_INTEGER;
+ in_buf.integer.value = pfrt_log_dev->info.log_type;
+
+ out_obj = acpi_evaluate_dsm_typed(handle, &pfrt_log_guid,
+ pfrt_log_dev->info.log_revid, PFRT_FUNC_GET_DATA,
+ &in_obj, ACPI_TYPE_PACKAGE);
+ if (!out_obj)
+ return -EINVAL;
+
+ if (out_obj->package.count < LOG_NR_IDX ||
+ out_obj->package.elements[LOG_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[LOG_EXT_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[LOG_MAX_SZ_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[LOG_CHUNK1_LO_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[LOG_CHUNK1_HI_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[LOG_CHUNK1_SZ_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[LOG_CHUNK2_LO_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[LOG_CHUNK2_HI_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[LOG_CHUNK2_SZ_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[LOG_ROLLOVER_CNT_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[LOG_RESET_CNT_IDX].type != ACPI_TYPE_INTEGER)
+ goto free_acpi_buffer;
+
+ data_info->status = out_obj->package.elements[LOG_STATUS_IDX].integer.value;
+ data_info->ext_status =
+ out_obj->package.elements[LOG_EXT_STATUS_IDX].integer.value;
+ if (data_info->status != DSM_SUCCEED) {
+ dev_dbg(pfrt_log_dev->parent_dev, "Error Status:%d\n", data_info->status);
+ dev_dbg(pfrt_log_dev->parent_dev, "Error Extend Status:%d\n",
+ data_info->ext_status);
+ goto free_acpi_buffer;
+ }
+
+ data_info->max_data_size =
+ out_obj->package.elements[LOG_MAX_SZ_IDX].integer.value;
+ data_info->chunk1_addr_lo =
+ out_obj->package.elements[LOG_CHUNK1_LO_IDX].integer.value;
+ data_info->chunk1_addr_hi =
+ out_obj->package.elements[LOG_CHUNK1_HI_IDX].integer.value;
+ data_info->chunk1_size =
+ out_obj->package.elements[LOG_CHUNK1_SZ_IDX].integer.value;
+ data_info->chunk2_addr_lo =
+ out_obj->package.elements[LOG_CHUNK2_LO_IDX].integer.value;
+ data_info->chunk2_addr_hi =
+ out_obj->package.elements[LOG_CHUNK2_HI_IDX].integer.value;
+ data_info->chunk2_size =
+ out_obj->package.elements[LOG_CHUNK2_SZ_IDX].integer.value;
+ data_info->rollover_cnt =
+ out_obj->package.elements[LOG_ROLLOVER_CNT_IDX].integer.value;
+ data_info->reset_cnt =
+ out_obj->package.elements[LOG_RESET_CNT_IDX].integer.value;
+
+ ret = 0;
+
+free_acpi_buffer:
+ kfree(out_obj);
+
+ return ret;
+}
+
+static int set_pfrt_log_level(int level, struct pfrt_log_device *pfrt_log_dev)
+{
+ acpi_handle handle = ACPI_HANDLE(pfrt_log_dev->parent_dev);
+ union acpi_object *out_obj, *obj, in_obj, in_buf;
+ enum pfru_dsm_status status, ext_status;
+ int ret = 0;
+
+ memset(&in_obj, 0, sizeof(in_obj));
+ memset(&in_buf, 0, sizeof(in_buf));
+ in_obj.type = ACPI_TYPE_PACKAGE;
+ in_obj.package.count = 1;
+ in_obj.package.elements = &in_buf;
+ in_buf.type = ACPI_TYPE_INTEGER;
+ in_buf.integer.value = level;
+
+ out_obj = acpi_evaluate_dsm_typed(handle, &pfrt_log_guid,
+ pfrt_log_dev->info.log_revid, PFRT_FUNC_SET_LEV,
+ &in_obj, ACPI_TYPE_PACKAGE);
+ if (!out_obj)
+ return -EINVAL;
+
+ obj = &out_obj->package.elements[0];
+ status = obj->integer.value;
+ if (status != DSM_SUCCEED) {
+ obj = &out_obj->package.elements[1];
+ ext_status = obj->integer.value;
+ dev_dbg(pfrt_log_dev->parent_dev, "Error Status:%d\n", status);
+ dev_dbg(pfrt_log_dev->parent_dev, "Error Extend Status:%d\n", ext_status);
+ ret = -EBUSY;
+ }
+
+ kfree(out_obj);
+
+ return ret;
+}
+
+static int get_pfrt_log_level(struct pfrt_log_device *pfrt_log_dev)
+{
+ acpi_handle handle = ACPI_HANDLE(pfrt_log_dev->parent_dev);
+ union acpi_object *out_obj, *obj;
+ enum pfru_dsm_status status, ext_status;
+ int ret = -EBUSY;
+
+ out_obj = acpi_evaluate_dsm_typed(handle, &pfrt_log_guid,
+ pfrt_log_dev->info.log_revid, PFRT_FUNC_GET_LEV,
+ NULL, ACPI_TYPE_PACKAGE);
+ if (!out_obj)
+ return -EINVAL;
+
+ obj = &out_obj->package.elements[0];
+ if (obj->type != ACPI_TYPE_INTEGER)
+ goto free_acpi_buffer;
+
+ status = obj->integer.value;
+ if (status != DSM_SUCCEED) {
+ obj = &out_obj->package.elements[1];
+ ext_status = obj->integer.value;
+ dev_dbg(pfrt_log_dev->parent_dev, "Error Status:%d\n", status);
+ dev_dbg(pfrt_log_dev->parent_dev, "Error Extend Status:%d\n", ext_status);
+ goto free_acpi_buffer;
+ }
+
+ obj = &out_obj->package.elements[2];
+ if (obj->type != ACPI_TYPE_INTEGER)
+ goto free_acpi_buffer;
+
+ ret = obj->integer.value;
+
+free_acpi_buffer:
+ kfree(out_obj);
+
+ return ret;
+}
+
+static int valid_log_level(u32 level)
+{
+ return level == PFRT_LOG_ERR || level == PFRT_LOG_WARN ||
+ level == PFRT_LOG_INFO || level == PFRT_LOG_VERB;
+}
+
+static int valid_log_type(u32 type)
+{
+ return type == PFRT_LOG_EXEC_IDX || type == PFRT_LOG_HISTORY_IDX;
+}
+
+static inline int valid_log_revid(u32 id)
+{
+ return id == PFRT_REVID_1 || id == PFRT_REVID_2;
+}
+
+static long pfrt_log_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct pfrt_log_device *pfrt_log_dev = to_pfrt_log_dev(file);
+ struct pfrt_log_data_info data_info;
+ struct pfrt_log_info info;
+ void __user *p;
+ int ret = 0;
+
+ p = (void __user *)arg;
+
+ switch (cmd) {
+ case PFRT_LOG_IOC_SET_INFO:
+ if (copy_from_user(&info, p, sizeof(info)))
+ return -EFAULT;
+
+ if (valid_log_revid(info.log_revid))
+ pfrt_log_dev->info.log_revid = info.log_revid;
+
+ if (valid_log_level(info.log_level)) {
+ ret = set_pfrt_log_level(info.log_level, pfrt_log_dev);
+ if (ret < 0)
+ return ret;
+
+ pfrt_log_dev->info.log_level = info.log_level;
+ }
+
+ if (valid_log_type(info.log_type))
+ pfrt_log_dev->info.log_type = info.log_type;
+
+ return 0;
+
+ case PFRT_LOG_IOC_GET_INFO:
+ info.log_level = get_pfrt_log_level(pfrt_log_dev);
+ if (ret < 0)
+ return ret;
+
+ info.log_type = pfrt_log_dev->info.log_type;
+ info.log_revid = pfrt_log_dev->info.log_revid;
+ if (copy_to_user(p, &info, sizeof(info)))
+ return -EFAULT;
+
+ return 0;
+
+ case PFRT_LOG_IOC_GET_DATA_INFO:
+ ret = get_pfrt_log_data_info(&data_info, pfrt_log_dev);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(p, &data_info, sizeof(struct pfrt_log_data_info)))
+ return -EFAULT;
+
+ return 0;
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static int
+pfrt_log_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct pfrt_log_device *pfrt_log_dev;
+ struct pfrt_log_data_info info;
+ unsigned long psize, vsize;
+ phys_addr_t base_addr;
+ int ret;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EROFS;
+
+ /* changing from read to write with mprotect is not allowed */
+ vma->vm_flags &= ~VM_MAYWRITE;
+
+ pfrt_log_dev = to_pfrt_log_dev(file);
+
+ ret = get_pfrt_log_data_info(&info, pfrt_log_dev);
+ if (ret)
+ return ret;
+
+ base_addr = (phys_addr_t)((info.chunk2_addr_hi << 32) | info.chunk2_addr_lo);
+ /* pfrt update has not been launched yet */
+ if (!base_addr)
+ return -ENODEV;
+
+ psize = info.max_data_size;
+ /* base address and total buffer size must be page aligned */
+ if (!PAGE_ALIGNED(base_addr) || !PAGE_ALIGNED(psize))
+ return -ENODEV;
+
+ vsize = vma->vm_end - vma->vm_start;
+ if (vsize > psize)
+ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (io_remap_pfn_range(vma, vma->vm_start, PFN_DOWN(base_addr),
+ vsize, vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static const struct file_operations acpi_pfrt_log_fops = {
+ .owner = THIS_MODULE,
+ .mmap = pfrt_log_mmap,
+ .unlocked_ioctl = pfrt_log_ioctl,
+ .llseek = noop_llseek,
+};
+
+static int acpi_pfrt_log_remove(struct platform_device *pdev)
+{
+ struct pfrt_log_device *pfrt_log_dev = platform_get_drvdata(pdev);
+
+ misc_deregister(&pfrt_log_dev->miscdev);
+
+ return 0;
+}
+
+static void pfrt_log_put_idx(void *data)
+{
+ struct pfrt_log_device *pfrt_log_dev = data;
+
+ ida_free(&pfrt_log_ida, pfrt_log_dev->index);
+}
+
+static int acpi_pfrt_log_probe(struct platform_device *pdev)
+{
+ acpi_handle handle = ACPI_HANDLE(&pdev->dev);
+ struct pfrt_log_device *pfrt_log_dev;
+ int ret;
+
+ if (!acpi_has_method(handle, "_DSM")) {
+ dev_dbg(&pdev->dev, "Missing _DSM\n");
+ return -ENODEV;
+ }
+
+ pfrt_log_dev = devm_kzalloc(&pdev->dev, sizeof(*pfrt_log_dev), GFP_KERNEL);
+ if (!pfrt_log_dev)
+ return -ENOMEM;
+
+ ret = ida_alloc(&pfrt_log_ida, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ pfrt_log_dev->index = ret;
+ ret = devm_add_action_or_reset(&pdev->dev, pfrt_log_put_idx, pfrt_log_dev);
+ if (ret)
+ return ret;
+
+ pfrt_log_dev->info.log_revid = PFRT_DEFAULT_REV_ID;
+ pfrt_log_dev->parent_dev = &pdev->dev;
+
+ pfrt_log_dev->miscdev.minor = MISC_DYNAMIC_MINOR;
+ pfrt_log_dev->miscdev.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "pfrt%d",
+ pfrt_log_dev->index);
+ if (!pfrt_log_dev->miscdev.name)
+ return -ENOMEM;
+
+ pfrt_log_dev->miscdev.nodename = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "acpi_pfr_telemetry%d",
+ pfrt_log_dev->index);
+ if (!pfrt_log_dev->miscdev.nodename)
+ return -ENOMEM;
+
+ pfrt_log_dev->miscdev.fops = &acpi_pfrt_log_fops;
+ pfrt_log_dev->miscdev.parent = &pdev->dev;
+
+ ret = misc_register(&pfrt_log_dev->miscdev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, pfrt_log_dev);
+
+ return 0;
+}
+
+static const struct acpi_device_id acpi_pfrt_log_ids[] = {
+ {"INTC1081"},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, acpi_pfrt_log_ids);
+
+static struct platform_driver acpi_pfrt_log_driver = {
+ .driver = {
+ .name = "pfr_telemetry",
+ .acpi_match_table = acpi_pfrt_log_ids,
+ },
+ .probe = acpi_pfrt_log_probe,
+ .remove = acpi_pfrt_log_remove,
+};
+module_platform_driver(acpi_pfrt_log_driver);
+
+MODULE_DESCRIPTION("Platform Firmware Runtime Update Telemetry driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/acpi/pfr_update.c b/drivers/acpi/pfr_update.c
new file mode 100644
index 000000000000..6bb0b778b5da
--- /dev/null
+++ b/drivers/acpi/pfr_update.c
@@ -0,0 +1,575 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACPI Platform Firmware Runtime Update Device driver
+ *
+ * Copyright (C) 2021 Intel Corporation
+ * Author: Chen Yu <yu.c.chen@intel.com>
+ *
+ * pfr_update driver is used for Platform Firmware Runtime
+ * Update, which includes the code injection and driver update.
+ */
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/efi.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/uio.h>
+#include <linux/uuid.h>
+
+#include <uapi/linux/pfrut.h>
+
+#define PFRU_FUNC_STANDARD_QUERY 0
+#define PFRU_FUNC_QUERY_UPDATE_CAP 1
+#define PFRU_FUNC_QUERY_BUF 2
+#define PFRU_FUNC_START 3
+
+#define PFRU_CODE_INJECT_TYPE 1
+#define PFRU_DRIVER_UPDATE_TYPE 2
+
+#define PFRU_REVID_1 1
+#define PFRU_REVID_2 2
+#define PFRU_DEFAULT_REV_ID PFRU_REVID_1
+
+enum cap_index {
+ CAP_STATUS_IDX = 0,
+ CAP_UPDATE_IDX = 1,
+ CAP_CODE_TYPE_IDX = 2,
+ CAP_FW_VER_IDX = 3,
+ CAP_CODE_RT_VER_IDX = 4,
+ CAP_DRV_TYPE_IDX = 5,
+ CAP_DRV_RT_VER_IDX = 6,
+ CAP_DRV_SVN_IDX = 7,
+ CAP_PLAT_ID_IDX = 8,
+ CAP_OEM_ID_IDX = 9,
+ CAP_OEM_INFO_IDX = 10,
+ CAP_NR_IDX
+};
+
+enum buf_index {
+ BUF_STATUS_IDX = 0,
+ BUF_EXT_STATUS_IDX = 1,
+ BUF_ADDR_LOW_IDX = 2,
+ BUF_ADDR_HI_IDX = 3,
+ BUF_SIZE_IDX = 4,
+ BUF_NR_IDX
+};
+
+enum update_index {
+ UPDATE_STATUS_IDX = 0,
+ UPDATE_EXT_STATUS_IDX = 1,
+ UPDATE_AUTH_TIME_LOW_IDX = 2,
+ UPDATE_AUTH_TIME_HI_IDX = 3,
+ UPDATE_EXEC_TIME_LOW_IDX = 4,
+ UPDATE_EXEC_TIME_HI_IDX = 5,
+ UPDATE_NR_IDX
+};
+
+enum pfru_start_action {
+ START_STAGE = 0,
+ START_ACTIVATE = 1,
+ START_STAGE_ACTIVATE = 2,
+};
+
+struct pfru_device {
+ u32 rev_id, index;
+ struct device *parent_dev;
+ struct miscdevice miscdev;
+};
+
+static DEFINE_IDA(pfru_ida);
+
+/*
+ * Manual reference:
+ * https://uefi.org/sites/default/files/resources/Intel_MM_OS_Interface_Spec_Rev100.pdf
+ *
+ * pfru_guid is the parameter for _DSM method
+ */
+static const guid_t pfru_guid =
+ GUID_INIT(0xECF9533B, 0x4A3C, 0x4E89, 0x93, 0x9E, 0xC7, 0x71,
+ 0x12, 0x60, 0x1C, 0x6D);
+
+/* pfru_code_inj_guid is the UUID to identify code injection EFI capsule file */
+static const guid_t pfru_code_inj_guid =
+ GUID_INIT(0xB2F84B79, 0x7B6E, 0x4E45, 0x88, 0x5F, 0x3F, 0xB9,
+ 0xBB, 0x18, 0x54, 0x02);
+
+/* pfru_drv_update_guid is the UUID to identify driver update EFI capsule file */
+static const guid_t pfru_drv_update_guid =
+ GUID_INIT(0x4569DD8C, 0x75F1, 0x429A, 0xA3, 0xD6, 0x24, 0xDE,
+ 0x80, 0x97, 0xA0, 0xDF);
+
+static inline int pfru_valid_revid(u32 id)
+{
+ return id == PFRU_REVID_1 || id == PFRU_REVID_2;
+}
+
+static inline struct pfru_device *to_pfru_dev(struct file *file)
+{
+ return container_of(file->private_data, struct pfru_device, miscdev);
+}
+
+static int query_capability(struct pfru_update_cap_info *cap_hdr,
+ struct pfru_device *pfru_dev)
+{
+ acpi_handle handle = ACPI_HANDLE(pfru_dev->parent_dev);
+ union acpi_object *out_obj;
+ int ret = -EINVAL;
+
+ out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid,
+ pfru_dev->rev_id,
+ PFRU_FUNC_QUERY_UPDATE_CAP,
+ NULL, ACPI_TYPE_PACKAGE);
+ if (!out_obj)
+ return ret;
+
+ if (out_obj->package.count < CAP_NR_IDX ||
+ out_obj->package.elements[CAP_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[CAP_UPDATE_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[CAP_CODE_TYPE_IDX].type != ACPI_TYPE_BUFFER ||
+ out_obj->package.elements[CAP_FW_VER_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[CAP_CODE_RT_VER_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[CAP_DRV_TYPE_IDX].type != ACPI_TYPE_BUFFER ||
+ out_obj->package.elements[CAP_DRV_RT_VER_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[CAP_DRV_SVN_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[CAP_PLAT_ID_IDX].type != ACPI_TYPE_BUFFER ||
+ out_obj->package.elements[CAP_OEM_ID_IDX].type != ACPI_TYPE_BUFFER ||
+ out_obj->package.elements[CAP_OEM_INFO_IDX].type != ACPI_TYPE_BUFFER)
+ goto free_acpi_buffer;
+
+ cap_hdr->status = out_obj->package.elements[CAP_STATUS_IDX].integer.value;
+ if (cap_hdr->status != DSM_SUCCEED) {
+ ret = -EBUSY;
+ dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", cap_hdr->status);
+ goto free_acpi_buffer;
+ }
+
+ cap_hdr->update_cap = out_obj->package.elements[CAP_UPDATE_IDX].integer.value;
+ memcpy(&cap_hdr->code_type,
+ out_obj->package.elements[CAP_CODE_TYPE_IDX].buffer.pointer,
+ out_obj->package.elements[CAP_CODE_TYPE_IDX].buffer.length);
+ cap_hdr->fw_version =
+ out_obj->package.elements[CAP_FW_VER_IDX].integer.value;
+ cap_hdr->code_rt_version =
+ out_obj->package.elements[CAP_CODE_RT_VER_IDX].integer.value;
+ memcpy(&cap_hdr->drv_type,
+ out_obj->package.elements[CAP_DRV_TYPE_IDX].buffer.pointer,
+ out_obj->package.elements[CAP_DRV_TYPE_IDX].buffer.length);
+ cap_hdr->drv_rt_version =
+ out_obj->package.elements[CAP_DRV_RT_VER_IDX].integer.value;
+ cap_hdr->drv_svn =
+ out_obj->package.elements[CAP_DRV_SVN_IDX].integer.value;
+ memcpy(&cap_hdr->platform_id,
+ out_obj->package.elements[CAP_PLAT_ID_IDX].buffer.pointer,
+ out_obj->package.elements[CAP_PLAT_ID_IDX].buffer.length);
+ memcpy(&cap_hdr->oem_id,
+ out_obj->package.elements[CAP_OEM_ID_IDX].buffer.pointer,
+ out_obj->package.elements[CAP_OEM_ID_IDX].buffer.length);
+ cap_hdr->oem_info_len =
+ out_obj->package.elements[CAP_OEM_INFO_IDX].buffer.length;
+
+ ret = 0;
+
+free_acpi_buffer:
+ kfree(out_obj);
+
+ return ret;
+}
+
+static int query_buffer(struct pfru_com_buf_info *info,
+ struct pfru_device *pfru_dev)
+{
+ acpi_handle handle = ACPI_HANDLE(pfru_dev->parent_dev);
+ union acpi_object *out_obj;
+ int ret = -EINVAL;
+
+ out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid,
+ pfru_dev->rev_id, PFRU_FUNC_QUERY_BUF,
+ NULL, ACPI_TYPE_PACKAGE);
+ if (!out_obj)
+ return ret;
+
+ if (out_obj->package.count < BUF_NR_IDX ||
+ out_obj->package.elements[BUF_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[BUF_EXT_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[BUF_ADDR_LOW_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[BUF_ADDR_HI_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[BUF_SIZE_IDX].type != ACPI_TYPE_INTEGER)
+ goto free_acpi_buffer;
+
+ info->status = out_obj->package.elements[BUF_STATUS_IDX].integer.value;
+ info->ext_status =
+ out_obj->package.elements[BUF_EXT_STATUS_IDX].integer.value;
+ if (info->status != DSM_SUCCEED) {
+ ret = -EBUSY;
+ dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", info->status);
+ dev_dbg(pfru_dev->parent_dev, "Error Extended Status:%d\n", info->ext_status);
+
+ goto free_acpi_buffer;
+ }
+
+ info->addr_lo =
+ out_obj->package.elements[BUF_ADDR_LOW_IDX].integer.value;
+ info->addr_hi =
+ out_obj->package.elements[BUF_ADDR_HI_IDX].integer.value;
+ info->buf_size = out_obj->package.elements[BUF_SIZE_IDX].integer.value;
+
+ ret = 0;
+
+free_acpi_buffer:
+ kfree(out_obj);
+
+ return ret;
+}
+
+static int get_image_type(const struct efi_manage_capsule_image_header *img_hdr,
+ struct pfru_device *pfru_dev)
+{
+ const efi_guid_t *image_type_id = &img_hdr->image_type_id;
+
+ /* check whether this is a code injection or driver update */
+ if (guid_equal(image_type_id, &pfru_code_inj_guid))
+ return PFRU_CODE_INJECT_TYPE;
+
+ if (guid_equal(image_type_id, &pfru_drv_update_guid))
+ return PFRU_DRIVER_UPDATE_TYPE;
+
+ return -EINVAL;
+}
+
+static int adjust_efi_size(const struct efi_manage_capsule_image_header *img_hdr,
+ int size)
+{
+ /*
+ * The (u64 hw_ins) was introduced in UEFI spec version 2,
+ * and (u64 capsule_support) was introduced in version 3.
+ * The size needs to be adjusted accordingly. That is to
+ * say, version 1 should subtract the size of hw_ins+capsule_support,
+ * and version 2 should sbstract the size of capsule_support.
+ */
+ size += sizeof(struct efi_manage_capsule_image_header);
+ switch (img_hdr->ver) {
+ case 1:
+ return size - 2 * sizeof(u64);
+
+ case 2:
+ return size - sizeof(u64);
+
+ default:
+ /* only support version 1 and 2 */
+ return -EINVAL;
+ }
+}
+
+static bool applicable_image(const void *data, struct pfru_update_cap_info *cap,
+ struct pfru_device *pfru_dev)
+{
+ struct pfru_payload_hdr *payload_hdr;
+ const efi_capsule_header_t *cap_hdr = data;
+ const struct efi_manage_capsule_header *m_hdr;
+ const struct efi_manage_capsule_image_header *m_img_hdr;
+ const struct efi_image_auth *auth;
+ int type, size;
+
+ /*
+ * If the code in the capsule is older than the current
+ * firmware code, the update will be rejected by the firmware,
+ * so check the version of it upfront without engaging the
+ * Management Mode update mechanism which may be costly.
+ */
+ size = cap_hdr->headersize;
+ m_hdr = data + size;
+ /*
+ * Current data structure size plus variable array indicated
+ * by number of (emb_drv_cnt + payload_cnt)
+ */
+ size += offsetof(struct efi_manage_capsule_header, offset_list) +
+ (m_hdr->emb_drv_cnt + m_hdr->payload_cnt) * sizeof(u64);
+ m_img_hdr = data + size;
+
+ type = get_image_type(m_img_hdr, pfru_dev);
+ if (type < 0)
+ return false;
+
+ size = adjust_efi_size(m_img_hdr, size);
+ if (size < 0)
+ return false;
+
+ auth = data + size;
+ size += sizeof(u64) + auth->auth_info.hdr.len;
+ payload_hdr = (struct pfru_payload_hdr *)(data + size);
+
+ /* finally compare the version */
+ if (type == PFRU_CODE_INJECT_TYPE)
+ return payload_hdr->rt_ver >= cap->code_rt_version;
+
+ return payload_hdr->rt_ver >= cap->drv_rt_version;
+}
+
+static void print_update_debug_info(struct pfru_updated_result *result,
+ struct pfru_device *pfru_dev)
+{
+ dev_dbg(pfru_dev->parent_dev, "Update result:\n");
+ dev_dbg(pfru_dev->parent_dev, "Authentication Time Low:%lld\n",
+ result->low_auth_time);
+ dev_dbg(pfru_dev->parent_dev, "Authentication Time High:%lld\n",
+ result->high_auth_time);
+ dev_dbg(pfru_dev->parent_dev, "Execution Time Low:%lld\n",
+ result->low_exec_time);
+ dev_dbg(pfru_dev->parent_dev, "Execution Time High:%lld\n",
+ result->high_exec_time);
+}
+
+static int start_update(int action, struct pfru_device *pfru_dev)
+{
+ union acpi_object *out_obj, in_obj, in_buf;
+ struct pfru_updated_result update_result;
+ acpi_handle handle;
+ int ret = -EINVAL;
+
+ memset(&in_obj, 0, sizeof(in_obj));
+ memset(&in_buf, 0, sizeof(in_buf));
+ in_obj.type = ACPI_TYPE_PACKAGE;
+ in_obj.package.count = 1;
+ in_obj.package.elements = &in_buf;
+ in_buf.type = ACPI_TYPE_INTEGER;
+ in_buf.integer.value = action;
+
+ handle = ACPI_HANDLE(pfru_dev->parent_dev);
+ out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid,
+ pfru_dev->rev_id, PFRU_FUNC_START,
+ &in_obj, ACPI_TYPE_PACKAGE);
+ if (!out_obj)
+ return ret;
+
+ if (out_obj->package.count < UPDATE_NR_IDX ||
+ out_obj->package.elements[UPDATE_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[UPDATE_EXT_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[UPDATE_AUTH_TIME_LOW_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[UPDATE_AUTH_TIME_HI_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[UPDATE_EXEC_TIME_LOW_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[UPDATE_EXEC_TIME_HI_IDX].type != ACPI_TYPE_INTEGER)
+ goto free_acpi_buffer;
+
+ update_result.status =
+ out_obj->package.elements[UPDATE_STATUS_IDX].integer.value;
+ update_result.ext_status =
+ out_obj->package.elements[UPDATE_EXT_STATUS_IDX].integer.value;
+
+ if (update_result.status != DSM_SUCCEED) {
+ ret = -EBUSY;
+ dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", update_result.status);
+ dev_dbg(pfru_dev->parent_dev, "Error Extended Status:%d\n",
+ update_result.ext_status);
+
+ goto free_acpi_buffer;
+ }
+
+ update_result.low_auth_time =
+ out_obj->package.elements[UPDATE_AUTH_TIME_LOW_IDX].integer.value;
+ update_result.high_auth_time =
+ out_obj->package.elements[UPDATE_AUTH_TIME_HI_IDX].integer.value;
+ update_result.low_exec_time =
+ out_obj->package.elements[UPDATE_EXEC_TIME_LOW_IDX].integer.value;
+ update_result.high_exec_time =
+ out_obj->package.elements[UPDATE_EXEC_TIME_HI_IDX].integer.value;
+
+ print_update_debug_info(&update_result, pfru_dev);
+ ret = 0;
+
+free_acpi_buffer:
+ kfree(out_obj);
+
+ return ret;
+}
+
+static long pfru_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct pfru_update_cap_info cap_hdr;
+ struct pfru_device *pfru_dev = to_pfru_dev(file);
+ void __user *p = (void __user *)arg;
+ u32 rev;
+ int ret;
+
+ switch (cmd) {
+ case PFRU_IOC_QUERY_CAP:
+ ret = query_capability(&cap_hdr, pfru_dev);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(p, &cap_hdr, sizeof(cap_hdr)))
+ return -EFAULT;
+
+ return 0;
+
+ case PFRU_IOC_SET_REV:
+ if (copy_from_user(&rev, p, sizeof(rev)))
+ return -EFAULT;
+
+ if (!pfru_valid_revid(rev))
+ return -EINVAL;
+
+ pfru_dev->rev_id = rev;
+
+ return 0;
+
+ case PFRU_IOC_STAGE:
+ return start_update(START_STAGE, pfru_dev);
+
+ case PFRU_IOC_ACTIVATE:
+ return start_update(START_ACTIVATE, pfru_dev);
+
+ case PFRU_IOC_STAGE_ACTIVATE:
+ return start_update(START_STAGE_ACTIVATE, pfru_dev);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static ssize_t pfru_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct pfru_device *pfru_dev = to_pfru_dev(file);
+ struct pfru_update_cap_info cap;
+ struct pfru_com_buf_info buf_info;
+ phys_addr_t phy_addr;
+ struct iov_iter iter;
+ struct iovec iov;
+ char *buf_ptr;
+ int ret;
+
+ ret = query_buffer(&buf_info, pfru_dev);
+ if (ret)
+ return ret;
+
+ if (len > buf_info.buf_size)
+ return -EINVAL;
+
+ iov.iov_base = (void __user *)buf;
+ iov.iov_len = len;
+ iov_iter_init(&iter, WRITE, &iov, 1, len);
+
+ /* map the communication buffer */
+ phy_addr = (phys_addr_t)((buf_info.addr_hi << 32) | buf_info.addr_lo);
+ buf_ptr = memremap(phy_addr, buf_info.buf_size, MEMREMAP_WB);
+ if (!buf_ptr)
+ return -ENOMEM;
+
+ if (!copy_from_iter_full(buf_ptr, len, &iter)) {
+ ret = -EINVAL;
+ goto unmap;
+ }
+
+ /* check if the capsule header has a valid version number */
+ ret = query_capability(&cap, pfru_dev);
+ if (ret)
+ goto unmap;
+
+ if (!applicable_image(buf_ptr, &cap, pfru_dev))
+ ret = -EINVAL;
+
+unmap:
+ memunmap(buf_ptr);
+
+ return ret ?: len;
+}
+
+static const struct file_operations acpi_pfru_fops = {
+ .owner = THIS_MODULE,
+ .write = pfru_write,
+ .unlocked_ioctl = pfru_ioctl,
+ .llseek = noop_llseek,
+};
+
+static int acpi_pfru_remove(struct platform_device *pdev)
+{
+ struct pfru_device *pfru_dev = platform_get_drvdata(pdev);
+
+ misc_deregister(&pfru_dev->miscdev);
+
+ return 0;
+}
+
+static void pfru_put_idx(void *data)
+{
+ struct pfru_device *pfru_dev = data;
+
+ ida_free(&pfru_ida, pfru_dev->index);
+}
+
+static int acpi_pfru_probe(struct platform_device *pdev)
+{
+ acpi_handle handle = ACPI_HANDLE(&pdev->dev);
+ struct pfru_device *pfru_dev;
+ int ret;
+
+ if (!acpi_has_method(handle, "_DSM")) {
+ dev_dbg(&pdev->dev, "Missing _DSM\n");
+ return -ENODEV;
+ }
+
+ pfru_dev = devm_kzalloc(&pdev->dev, sizeof(*pfru_dev), GFP_KERNEL);
+ if (!pfru_dev)
+ return -ENOMEM;
+
+ ret = ida_alloc(&pfru_ida, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ pfru_dev->index = ret;
+ ret = devm_add_action_or_reset(&pdev->dev, pfru_put_idx, pfru_dev);
+ if (ret)
+ return ret;
+
+ pfru_dev->rev_id = PFRU_DEFAULT_REV_ID;
+ pfru_dev->parent_dev = &pdev->dev;
+
+ pfru_dev->miscdev.minor = MISC_DYNAMIC_MINOR;
+ pfru_dev->miscdev.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "pfru%d", pfru_dev->index);
+ if (!pfru_dev->miscdev.name)
+ return -ENOMEM;
+
+ pfru_dev->miscdev.nodename = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "acpi_pfr_update%d", pfru_dev->index);
+ if (!pfru_dev->miscdev.nodename)
+ return -ENOMEM;
+
+ pfru_dev->miscdev.fops = &acpi_pfru_fops;
+ pfru_dev->miscdev.parent = &pdev->dev;
+
+ ret = misc_register(&pfru_dev->miscdev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, pfru_dev);
+
+ return 0;
+}
+
+static const struct acpi_device_id acpi_pfru_ids[] = {
+ {"INTC1080"},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, acpi_pfru_ids);
+
+static struct platform_driver acpi_pfru_driver = {
+ .driver = {
+ .name = "pfr_update",
+ .acpi_match_table = acpi_pfru_ids,
+ },
+ .probe = acpi_pfru_probe,
+ .remove = acpi_pfru_remove,
+};
+module_platform_driver(acpi_pfru_driver);
+
+MODULE_DESCRIPTION("Platform Firmware Runtime Update device driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/acpi/pmic/intel_pmic.c b/drivers/acpi/pmic/intel_pmic.c
index 9cde299eba88..f20dbda1a831 100644
--- a/drivers/acpi/pmic/intel_pmic.c
+++ b/drivers/acpi/pmic/intel_pmic.c
@@ -25,7 +25,7 @@ struct intel_pmic_opregion {
struct mutex lock;
struct acpi_lpat_conversion_table *lpat_table;
struct regmap *regmap;
- struct intel_pmic_opregion_data *data;
+ const struct intel_pmic_opregion_data *data;
struct intel_pmic_regs_handler_ctx ctx;
};
@@ -53,7 +53,7 @@ static acpi_status intel_pmic_power_handler(u32 function,
{
struct intel_pmic_opregion *opregion = region_context;
struct regmap *regmap = opregion->regmap;
- struct intel_pmic_opregion_data *d = opregion->data;
+ const struct intel_pmic_opregion_data *d = opregion->data;
int reg, bit, result;
if (bits != 32 || !value64)
@@ -95,7 +95,7 @@ static int pmic_read_temp(struct intel_pmic_opregion *opregion,
return 0;
}
- temp = acpi_lpat_raw_to_temp(opregion->lpat_table, raw_temp);
+ temp = opregion->data->lpat_raw_to_temp(opregion->lpat_table, raw_temp);
if (temp < 0)
return temp;
@@ -135,7 +135,7 @@ static int pmic_thermal_aux(struct intel_pmic_opregion *opregion, int reg,
static int pmic_thermal_pen(struct intel_pmic_opregion *opregion, int reg,
int bit, u32 function, u64 *value)
{
- struct intel_pmic_opregion_data *d = opregion->data;
+ const struct intel_pmic_opregion_data *d = opregion->data;
struct regmap *regmap = opregion->regmap;
if (!d->get_policy || !d->update_policy)
@@ -171,7 +171,7 @@ static acpi_status intel_pmic_thermal_handler(u32 function,
void *handler_context, void *region_context)
{
struct intel_pmic_opregion *opregion = region_context;
- struct intel_pmic_opregion_data *d = opregion->data;
+ const struct intel_pmic_opregion_data *d = opregion->data;
int reg, bit, result;
if (bits != 32 || !value64)
@@ -255,7 +255,7 @@ static acpi_status intel_pmic_regs_handler(u32 function,
int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
struct regmap *regmap,
- struct intel_pmic_opregion_data *d)
+ const struct intel_pmic_opregion_data *d)
{
acpi_status status = AE_OK;
struct intel_pmic_opregion *opregion;
@@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler);
int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address,
u32 value, u32 mask)
{
- struct intel_pmic_opregion_data *d;
+ const struct intel_pmic_opregion_data *d;
int ret;
if (!intel_pmic_opregion) {
diff --git a/drivers/acpi/pmic/intel_pmic.h b/drivers/acpi/pmic/intel_pmic.h
index 89379476a1df..d956b03a6ca0 100644
--- a/drivers/acpi/pmic/intel_pmic.h
+++ b/drivers/acpi/pmic/intel_pmic.h
@@ -2,6 +2,8 @@
#ifndef __INTEL_PMIC_H
#define __INTEL_PMIC_H
+#include <acpi/acpi_lpat.h>
+
struct pmic_table {
int address; /* operation region address */
int reg; /* corresponding thermal register */
@@ -17,6 +19,8 @@ struct intel_pmic_opregion_data {
int (*update_policy)(struct regmap *r, int reg, int bit, int enable);
int (*exec_mipi_pmic_seq_element)(struct regmap *r, u16 i2c_address,
u32 reg_address, u32 value, u32 mask);
+ int (*lpat_raw_to_temp)(struct acpi_lpat_conversion_table *lpat_table,
+ int raw);
struct pmic_table *power_table;
int power_table_count;
struct pmic_table *thermal_table;
@@ -25,6 +29,8 @@ struct intel_pmic_opregion_data {
int pmic_i2c_address;
};
-int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, struct regmap *regmap, struct intel_pmic_opregion_data *d);
+int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
+ struct regmap *regmap,
+ const struct intel_pmic_opregion_data *d);
#endif
diff --git a/drivers/acpi/pmic/intel_pmic_bxtwc.c b/drivers/acpi/pmic/intel_pmic_bxtwc.c
index bd7621edd60b..e247615189fa 100644
--- a/drivers/acpi/pmic/intel_pmic_bxtwc.c
+++ b/drivers/acpi/pmic/intel_pmic_bxtwc.c
@@ -369,13 +369,14 @@ intel_bxtwc_pmic_update_policy(struct regmap *regmap,
return regmap_update_bits(regmap, reg, mask, val);
}
-static struct intel_pmic_opregion_data intel_bxtwc_pmic_opregion_data = {
+static const struct intel_pmic_opregion_data intel_bxtwc_pmic_opregion_data = {
.get_power = intel_bxtwc_pmic_get_power,
.update_power = intel_bxtwc_pmic_update_power,
.get_raw_temp = intel_bxtwc_pmic_get_raw_temp,
.update_aux = intel_bxtwc_pmic_update_aux,
.get_policy = intel_bxtwc_pmic_get_policy,
.update_policy = intel_bxtwc_pmic_update_policy,
+ .lpat_raw_to_temp = acpi_lpat_raw_to_temp,
.power_table = power_table,
.power_table_count = ARRAY_SIZE(power_table),
.thermal_table = thermal_table,
diff --git a/drivers/acpi/pmic/intel_pmic_bytcrc.c b/drivers/acpi/pmic/intel_pmic_bytcrc.c
index 2a692cc4b7ae..9ea79f210965 100644
--- a/drivers/acpi/pmic/intel_pmic_bytcrc.c
+++ b/drivers/acpi/pmic/intel_pmic_bytcrc.c
@@ -271,13 +271,14 @@ static int intel_crc_pmic_update_policy(struct regmap *regmap,
return 0;
}
-static struct intel_pmic_opregion_data intel_crc_pmic_opregion_data = {
+static const struct intel_pmic_opregion_data intel_crc_pmic_opregion_data = {
.get_power = intel_crc_pmic_get_power,
.update_power = intel_crc_pmic_update_power,
.get_raw_temp = intel_crc_pmic_get_raw_temp,
.update_aux = intel_crc_pmic_update_aux,
.get_policy = intel_crc_pmic_get_policy,
.update_policy = intel_crc_pmic_update_policy,
+ .lpat_raw_to_temp = acpi_lpat_raw_to_temp,
.power_table = power_table,
.power_table_count= ARRAY_SIZE(power_table),
.thermal_table = thermal_table,
diff --git a/drivers/acpi/pmic/intel_pmic_chtcrc.c b/drivers/acpi/pmic/intel_pmic_chtcrc.c
index 2900dc3074d2..f9301c6f098e 100644
--- a/drivers/acpi/pmic/intel_pmic_chtcrc.c
+++ b/drivers/acpi/pmic/intel_pmic_chtcrc.c
@@ -23,7 +23,8 @@
* intel_soc_pmic_exec_mipi_pmic_seq_element work on devices with a
* CHT Crystal Cove PMIC.
*/
-static struct intel_pmic_opregion_data intel_chtcrc_pmic_opregion_data = {
+static const struct intel_pmic_opregion_data intel_chtcrc_pmic_opregion_data = {
+ .lpat_raw_to_temp = acpi_lpat_raw_to_temp,
.pmic_i2c_address = 0x6e,
};
diff --git a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
index fef7831d0d63..418eec523025 100644
--- a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
+++ b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
@@ -94,10 +94,11 @@ static int chtdc_ti_pmic_get_raw_temp(struct regmap *regmap, int reg)
return ((buf[0] & 0x03) << 8) | buf[1];
}
-static struct intel_pmic_opregion_data chtdc_ti_pmic_opregion_data = {
+static const struct intel_pmic_opregion_data chtdc_ti_pmic_opregion_data = {
.get_power = chtdc_ti_pmic_get_power,
.update_power = chtdc_ti_pmic_update_power,
.get_raw_temp = chtdc_ti_pmic_get_raw_temp,
+ .lpat_raw_to_temp = acpi_lpat_raw_to_temp,
.power_table = chtdc_ti_power_table,
.power_table_count = ARRAY_SIZE(chtdc_ti_power_table),
.thermal_table = chtdc_ti_thermal_table,
diff --git a/drivers/acpi/pmic/intel_pmic_chtwc.c b/drivers/acpi/pmic/intel_pmic_chtwc.c
index 7ffd5624b8e1..f2c42f4c79ca 100644
--- a/drivers/acpi/pmic/intel_pmic_chtwc.c
+++ b/drivers/acpi/pmic/intel_pmic_chtwc.c
@@ -253,10 +253,11 @@ static int intel_cht_wc_exec_mipi_pmic_seq_element(struct regmap *regmap,
* The thermal table and ops are empty, we do not support the Thermal opregion
* (DPTF) due to lacking documentation.
*/
-static struct intel_pmic_opregion_data intel_cht_wc_pmic_opregion_data = {
+static const struct intel_pmic_opregion_data intel_cht_wc_pmic_opregion_data = {
.get_power = intel_cht_wc_pmic_get_power,
.update_power = intel_cht_wc_pmic_update_power,
.exec_mipi_pmic_seq_element = intel_cht_wc_exec_mipi_pmic_seq_element,
+ .lpat_raw_to_temp = acpi_lpat_raw_to_temp,
.power_table = power_table,
.power_table_count = ARRAY_SIZE(power_table),
};
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index cbe08e600fa3..61bbe4c24d87 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -293,11 +293,33 @@ static int intel_xpower_exec_mipi_pmic_seq_element(struct regmap *regmap,
return ret;
}
-static struct intel_pmic_opregion_data intel_xpower_pmic_opregion_data = {
+static int intel_xpower_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table,
+ int raw)
+{
+ struct acpi_lpat first = lpat_table->lpat[0];
+ struct acpi_lpat last = lpat_table->lpat[lpat_table->lpat_count - 1];
+
+ /*
+ * Some LPAT tables in the ACPI Device for the AXP288 PMIC for some
+ * reason only describe a small temperature range, e.g. 27° - 37°
+ * Celcius. Resulting in errors when the tablet is idle in a cool room.
+ *
+ * To avoid these errors clamp the raw value to be inside the LPAT.
+ */
+ if (first.raw < last.raw)
+ raw = clamp(raw, first.raw, last.raw);
+ else
+ raw = clamp(raw, last.raw, first.raw);
+
+ return acpi_lpat_raw_to_temp(lpat_table, raw);
+}
+
+static const struct intel_pmic_opregion_data intel_xpower_pmic_opregion_data = {
.get_power = intel_xpower_pmic_get_power,
.update_power = intel_xpower_pmic_update_power,
.get_raw_temp = intel_xpower_pmic_get_raw_temp,
.exec_mipi_pmic_seq_element = intel_xpower_exec_mipi_pmic_seq_element,
+ .lpat_raw_to_temp = intel_xpower_lpat_raw_to_temp,
.power_table = power_table,
.power_table_count = ARRAY_SIZE(power_table),
.thermal_table = thermal_table,
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 5dcb02ededbc..8c4a73a1351e 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -81,9 +81,9 @@ struct acpi_power_resource *to_power_resource(struct acpi_device *device)
static struct acpi_power_resource *acpi_power_get_context(acpi_handle handle)
{
- struct acpi_device *device;
+ struct acpi_device *device = acpi_fetch_acpi_dev(handle);
- if (acpi_bus_get_device(handle, &device))
+ if (!device)
return NULL;
return to_power_resource(device);
@@ -716,6 +716,9 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
mutex_lock(&acpi_device_lock);
+ dev_dbg(&dev->dev, "Enabling wakeup power (count %d)\n",
+ dev->wakeup.prepare_count);
+
if (dev->wakeup.prepare_count++)
goto out;
@@ -734,8 +737,11 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
if (err) {
acpi_power_off_list(&dev->wakeup.resources);
dev->wakeup.prepare_count = 0;
+ goto out;
}
+ dev_dbg(&dev->dev, "Wakeup power enabled\n");
+
out:
mutex_unlock(&acpi_device_lock);
return err;
@@ -757,6 +763,9 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
mutex_lock(&acpi_device_lock);
+ dev_dbg(&dev->dev, "Disabling wakeup power (count %d)\n",
+ dev->wakeup.prepare_count);
+
/* Do nothing if wakeup power has not been enabled for this device. */
if (dev->wakeup.prepare_count <= 0)
goto out;
@@ -782,8 +791,11 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
if (err) {
dev_err(&dev->dev, "Cannot turn off wakeup power resources\n");
dev->wakeup.flags.valid = 0;
+ goto out;
}
+ dev_dbg(&dev->dev, "Wakeup power disabled\n");
+
out:
mutex_unlock(&acpi_device_lock);
return err;
@@ -916,15 +928,14 @@ static void acpi_power_add_resource_to_list(struct acpi_power_resource *resource
struct acpi_device *acpi_add_power_resource(acpi_handle handle)
{
+ struct acpi_device *device = acpi_fetch_acpi_dev(handle);
struct acpi_power_resource *resource;
- struct acpi_device *device = NULL;
union acpi_object acpi_object;
struct acpi_buffer buffer = { sizeof(acpi_object), &acpi_object };
acpi_status status;
u8 state_dummy;
int result;
- acpi_bus_get_device(handle, &device);
if (device)
return device;
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 0cca7991f186..4322f2da6d10 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -127,7 +127,7 @@ static int
acpi_system_wakeup_device_open_fs(struct inode *inode, struct file *file)
{
return single_open(file, acpi_system_wakeup_device_seq_show,
- PDE_DATA(inode));
+ pde_data(inode));
}
static const struct proc_ops acpi_system_wakeup_device_proc_ops = {
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 77541f939be3..368a9edefd0c 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -98,8 +98,13 @@ static int acpi_soft_cpu_online(unsigned int cpu)
struct acpi_processor *pr = per_cpu(processors, cpu);
struct acpi_device *device;
- if (!pr || acpi_bus_get_device(pr->handle, &device))
+ if (!pr)
+ return 0;
+
+ device = acpi_fetch_acpi_dev(pr->handle);
+ if (!device)
return 0;
+
/*
* CPU got physically hotplugged and onlined for the first time:
* Initialize missing things.
@@ -125,9 +130,8 @@ static int acpi_soft_cpu_online(unsigned int cpu)
static int acpi_soft_cpu_dead(unsigned int cpu)
{
struct acpi_processor *pr = per_cpu(processors, cpu);
- struct acpi_device *device;
- if (!pr || acpi_bus_get_device(pr->handle, &device))
+ if (!pr || !acpi_fetch_acpi_dev(pr->handle))
return 0;
acpi_processor_reevaluate_tstate(pr, true);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 76ef1bcc8848..86560a28751b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -20,6 +20,7 @@
#include <linux/tick.h>
#include <linux/cpuidle.h>
#include <linux/cpu.h>
+#include <linux/minmax.h>
#include <acpi/processor.h>
/*
@@ -400,13 +401,10 @@ static int acpi_cst_latency_cmp(const void *a, const void *b)
static void acpi_cst_latency_swap(void *a, void *b, int n)
{
struct acpi_processor_cx *x = a, *y = b;
- u32 tmp;
if (!(x->valid && y->valid))
return;
- tmp = x->latency;
- x->latency = y->latency;
- y->latency = tmp;
+ swap(x->latency, y->latency);
}
static int acpi_processor_power_verify(struct acpi_processor *pr)
@@ -567,7 +565,8 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
- ACPI_FLUSH_CPU_CACHE();
+ if (cx->type == ACPI_STATE_C3)
+ ACPI_FLUSH_CPU_CACHE();
while (1) {
@@ -1101,7 +1100,7 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
status = acpi_get_parent(handle, &pr_ahandle);
while (ACPI_SUCCESS(status)) {
- acpi_bus_get_device(pr_ahandle, &d);
+ d = acpi_fetch_acpi_dev(pr_ahandle);
handle = pr_ahandle;
if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index a3d34e3f9f94..d8b2dfcd59b5 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -53,10 +53,17 @@ static int phys_package_first_cpu(int cpu)
static int cpu_has_cpufreq(unsigned int cpu)
{
- struct cpufreq_policy policy;
- if (!acpi_processor_cpufreq_init || cpufreq_get_policy(&policy, cpu))
+ struct cpufreq_policy *policy;
+
+ if (!acpi_processor_cpufreq_init)
return 0;
- return 1;
+
+ policy = cpufreq_cpu_get(cpu);
+ if (policy) {
+ cpufreq_cpu_put(policy);
+ return 1;
+ }
+ return 0;
}
static int cpufreq_get_max_state(unsigned int cpu)
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 2366f54d8e9c..d0986bda2964 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -687,9 +687,9 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
if (index)
return -EINVAL;
- ret = acpi_bus_get_device(obj->reference.handle, &device);
- if (ret)
- return ret == -ENODEV ? -EINVAL : ret;
+ device = acpi_fetch_acpi_dev(obj->reference.handle);
+ if (!device)
+ return -EINVAL;
args->fwnode = acpi_fwnode_handle(device);
args->nargs = 0;
@@ -719,9 +719,8 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
if (element->type == ACPI_TYPE_LOCAL_REFERENCE) {
struct fwnode_handle *ref_fwnode;
- ret = acpi_bus_get_device(element->reference.handle,
- &device);
- if (ret)
+ device = acpi_fetch_acpi_dev(element->reference.handle);
+ if (!device)
return -EINVAL;
nargs = 0;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 3c25ce8c95ba..c2d494784425 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -791,9 +791,9 @@ static acpi_status acpi_res_consumer_cb(acpi_handle handle, u32 depth,
{
struct resource *res = context;
struct acpi_device **consumer = (struct acpi_device **) ret;
- struct acpi_device *adev;
+ struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
- if (acpi_bus_get_device(handle, &adev))
+ if (!adev)
return AE_OK;
if (acpi_dev_consumes_res(adev, res)) {
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 2c80765670bc..1331756d4cfc 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -19,6 +19,7 @@
#include <linux/dma-map-ops.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/pgtable.h>
+#include <linux/crc32.h>
#include "internal.h"
@@ -135,12 +136,12 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
static acpi_status acpi_bus_offline(acpi_handle handle, u32 lvl, void *data,
void **ret_p)
{
- struct acpi_device *device = NULL;
+ struct acpi_device *device = acpi_fetch_acpi_dev(handle);
struct acpi_device_physical_node *pn;
bool second_pass = (bool)data;
acpi_status status = AE_OK;
- if (acpi_bus_get_device(handle, &device))
+ if (!device)
return AE_OK;
if (device->handler && !device->handler->hotplug.enabled) {
@@ -180,10 +181,10 @@ static acpi_status acpi_bus_offline(acpi_handle handle, u32 lvl, void *data,
static acpi_status acpi_bus_online(acpi_handle handle, u32 lvl, void *data,
void **ret_p)
{
- struct acpi_device *device = NULL;
+ struct acpi_device *device = acpi_fetch_acpi_dev(handle);
struct acpi_device_physical_node *pn;
- if (acpi_bus_get_device(handle, &device))
+ if (!device)
return AE_OK;
mutex_lock(&device->physical_node_lock);
@@ -599,6 +600,19 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
}
EXPORT_SYMBOL(acpi_bus_get_device);
+/**
+ * acpi_fetch_acpi_dev - Retrieve ACPI device object.
+ * @handle: ACPI handle associated with the requested ACPI device object.
+ *
+ * Return a pointer to the ACPI device object associated with @handle, if
+ * present, or NULL otherwise.
+ */
+struct acpi_device *acpi_fetch_acpi_dev(acpi_handle handle)
+{
+ return handle_to_device(handle, NULL);
+}
+EXPORT_SYMBOL_GPL(acpi_fetch_acpi_dev);
+
static void get_acpi_device(void *dev)
{
acpi_dev_get(dev);
@@ -654,6 +668,19 @@ static int acpi_tie_acpi_dev(struct acpi_device *adev)
return 0;
}
+static void acpi_store_pld_crc(struct acpi_device *adev)
+{
+ struct acpi_pld_info *pld;
+ acpi_status status;
+
+ status = acpi_get_physical_device_location(adev->handle, &pld);
+ if (ACPI_FAILURE(status))
+ return;
+
+ adev->pld_crc = crc32(~0, pld, sizeof(*pld));
+ ACPI_FREE(pld);
+}
+
static int __acpi_device_add(struct acpi_device *device,
void (*release)(struct device *))
{
@@ -712,6 +739,8 @@ static int __acpi_device_add(struct acpi_device *device,
if (device->wakeup.flags.valid)
list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list);
+ acpi_store_pld_crc(device);
+
mutex_unlock(&acpi_device_lock);
if (device->parent)
@@ -797,9 +826,15 @@ static const char * const acpi_ignore_dep_ids[] = {
NULL
};
+/* List of HIDs for which we honor deps of matching ACPI devs, when checking _DEP lists. */
+static const char * const acpi_honor_dep_ids[] = {
+ "INT3472", /* Camera sensor PMIC / clk and regulator info */
+ NULL
+};
+
static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
{
- struct acpi_device *device = NULL;
+ struct acpi_device *device;
acpi_status status;
/*
@@ -814,7 +849,9 @@ static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
status = acpi_get_parent(handle, &handle);
if (ACPI_FAILURE(status))
return status == AE_NULL_ENTRY ? NULL : acpi_root;
- } while (acpi_bus_get_device(handle, &device));
+
+ device = acpi_fetch_acpi_dev(handle);
+ } while (!device);
return device;
}
@@ -1340,11 +1377,11 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
if (info->valid & ACPI_VALID_HID) {
acpi_add_id(pnp, info->hardware_id.string);
pnp->type.platform_id = 1;
- }
- if (info->valid & ACPI_VALID_CID) {
- cid_list = &info->compatible_id_list;
- for (i = 0; i < cid_list->count; i++)
- acpi_add_id(pnp, cid_list->ids[i].string);
+ if (info->valid & ACPI_VALID_CID) {
+ cid_list = &info->compatible_id_list;
+ for (i = 0; i < cid_list->count; i++)
+ acpi_add_id(pnp, cid_list->ids[i].string);
+ }
}
if (info->valid & ACPI_VALID_ADR) {
pnp->bus_address = info->address;
@@ -1695,6 +1732,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
{
struct list_head resource_list;
bool is_serial_bus_slave = false;
+ static const struct acpi_device_id ignore_serial_bus_ids[] = {
/*
* These devices have multiple I2cSerialBus resources and an i2c-client
* must be instantiated for each, each with its own i2c_device_id.
@@ -1703,11 +1741,18 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
* drivers/platform/x86/i2c-multi-instantiate.c driver, which knows
* which i2c_device_id to use for each resource.
*/
- static const struct acpi_device_id i2c_multi_instantiate_ids[] = {
{"BSG1160", },
{"BSG2150", },
{"INT33FE", },
{"INT3515", },
+ /*
+ * HIDs of device with an UartSerialBusV2 resource for which userspace
+ * expects a regular tty cdev to be created (instead of the in kernel
+ * serdev) and which have a kernel driver which expects a platform_dev
+ * such as the rfkill-gpio driver.
+ */
+ {"BCM4752", },
+ {"LNV4752", },
{}
};
@@ -1721,8 +1766,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
fwnode_property_present(&device->fwnode, "baud")))
return true;
- /* Instantiate a pdev for the i2c-multi-instantiate drv to bind to */
- if (!acpi_match_device_ids(device, i2c_multi_instantiate_ids))
+ if (!acpi_match_device_ids(device, ignore_serial_bus_ids))
return false;
INIT_LIST_HEAD(&resource_list);
@@ -1762,8 +1806,12 @@ static void acpi_scan_dep_init(struct acpi_device *adev)
struct acpi_dep_data *dep;
list_for_each_entry(dep, &acpi_dep_list, node) {
- if (dep->consumer == adev->handle)
+ if (dep->consumer == adev->handle) {
+ if (dep->honor_dep)
+ adev->flags.honor_deps = 1;
+
adev->dep_unmet++;
+ }
}
}
@@ -1967,7 +2015,7 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep)
for (count = 0, i = 0; i < dep_devices.count; i++) {
struct acpi_device_info *info;
struct acpi_dep_data *dep;
- bool skip;
+ bool skip, honor_dep;
status = acpi_get_object_info(dep_devices.handles[i], &info);
if (ACPI_FAILURE(status)) {
@@ -1976,6 +2024,7 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep)
}
skip = acpi_info_matches_ids(info, acpi_ignore_dep_ids);
+ honor_dep = acpi_info_matches_ids(info, acpi_honor_dep_ids);
kfree(info);
if (skip)
@@ -1989,6 +2038,7 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep)
dep->supplier = dep_devices.handles[i];
dep->consumer = handle;
+ dep->honor_dep = honor_dep;
mutex_lock(&acpi_dep_list_lock);
list_add_tail(&dep->node , &acpi_dep_list);
@@ -2003,11 +2053,10 @@ static bool acpi_bus_scan_second_pass;
static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
struct acpi_device **adev_p)
{
- struct acpi_device *device = NULL;
+ struct acpi_device *device = acpi_fetch_acpi_dev(handle);
acpi_object_type acpi_type;
int type;
- acpi_bus_get_device(handle, &device);
if (device)
goto out;
@@ -2155,8 +2204,8 @@ static void acpi_bus_attach(struct acpi_device *device, bool first_pass)
register_dock_dependent_device(device, ejd);
acpi_bus_get_status(device);
- /* Skip devices that are not present. */
- if (!acpi_device_is_present(device)) {
+ /* Skip devices that are not ready for enumeration (e.g. not present) */
+ if (!acpi_dev_ready_for_enumeration(device)) {
device->flags.initialized = false;
acpi_device_clear_enumerated(device);
device->flags.power_manageable = 0;
@@ -2319,6 +2368,23 @@ void acpi_dev_clear_dependencies(struct acpi_device *supplier)
EXPORT_SYMBOL_GPL(acpi_dev_clear_dependencies);
/**
+ * acpi_dev_ready_for_enumeration - Check if the ACPI device is ready for enumeration
+ * @device: Pointer to the &struct acpi_device to check
+ *
+ * Check if the device is present and has no unmet dependencies.
+ *
+ * Return true if the device is ready for enumeratino. Otherwise, return false.
+ */
+bool acpi_dev_ready_for_enumeration(const struct acpi_device *device)
+{
+ if (device->flags.honor_deps && device->dep_unmet)
+ return false;
+
+ return acpi_device_is_present(device);
+}
+EXPORT_SYMBOL_GPL(acpi_dev_ready_for_enumeration);
+
+/**
* acpi_dev_get_first_consumer_dev - Return ACPI device dependent on @supplier
* @supplier: Pointer to the dependee device
*
@@ -2436,42 +2502,33 @@ int acpi_bus_register_early_device(int type)
}
EXPORT_SYMBOL_GPL(acpi_bus_register_early_device);
-static int acpi_bus_scan_fixed(void)
+static void acpi_bus_scan_fixed(void)
{
- int result = 0;
-
- /*
- * Enumerate all fixed-feature devices.
- */
if (!(acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON)) {
- struct acpi_device *device = NULL;
-
- result = acpi_add_single_object(&device, NULL,
- ACPI_BUS_TYPE_POWER_BUTTON, false);
- if (result)
- return result;
-
- device->flags.match_driver = true;
- result = device_attach(&device->dev);
- if (result < 0)
- return result;
-
- device_init_wakeup(&device->dev, true);
+ struct acpi_device *adev = NULL;
+
+ acpi_add_single_object(&adev, NULL, ACPI_BUS_TYPE_POWER_BUTTON,
+ false);
+ if (adev) {
+ adev->flags.match_driver = true;
+ if (device_attach(&adev->dev) >= 0)
+ device_init_wakeup(&adev->dev, true);
+ else
+ dev_dbg(&adev->dev, "No driver\n");
+ }
}
if (!(acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON)) {
- struct acpi_device *device = NULL;
-
- result = acpi_add_single_object(&device, NULL,
- ACPI_BUS_TYPE_SLEEP_BUTTON, false);
- if (result)
- return result;
-
- device->flags.match_driver = true;
- result = device_attach(&device->dev);
+ struct acpi_device *adev = NULL;
+
+ acpi_add_single_object(&adev, NULL, ACPI_BUS_TYPE_SLEEP_BUTTON,
+ false);
+ if (adev) {
+ adev->flags.match_driver = true;
+ if (device_attach(&adev->dev) < 0)
+ dev_dbg(&adev->dev, "No driver\n");
+ }
}
-
- return result < 0 ? result : 0;
}
static void __init acpi_get_spcr_uart_addr(void)
@@ -2492,9 +2549,8 @@ static void __init acpi_get_spcr_uart_addr(void)
static bool acpi_scan_initialized;
-int __init acpi_scan_init(void)
+void __init acpi_scan_init(void)
{
- int result;
acpi_status status;
struct acpi_table_stao *stao_ptr;
@@ -2544,33 +2600,23 @@ int __init acpi_scan_init(void)
/*
* Enumerate devices in the ACPI namespace.
*/
- result = acpi_bus_scan(ACPI_ROOT_OBJECT);
- if (result)
- goto out;
+ if (acpi_bus_scan(ACPI_ROOT_OBJECT))
+ goto unlock;
- result = acpi_bus_get_device(ACPI_ROOT_OBJECT, &acpi_root);
- if (result)
- goto out;
+ acpi_root = acpi_fetch_acpi_dev(ACPI_ROOT_OBJECT);
+ if (!acpi_root)
+ goto unlock;
/* Fixed feature devices do not exist on HW-reduced platform */
- if (!acpi_gbl_reduced_hardware) {
- result = acpi_bus_scan_fixed();
- if (result) {
- acpi_detach_data(acpi_root->handle,
- acpi_scan_drop_device);
- acpi_device_del(acpi_root);
- acpi_bus_put_acpi_device(acpi_root);
- goto out;
- }
- }
+ if (!acpi_gbl_reduced_hardware)
+ acpi_bus_scan_fixed();
acpi_turn_off_unused_power_resources();
acpi_scan_initialized = true;
- out:
+unlock:
mutex_unlock(&acpi_scan_lock);
- return result;
}
static struct acpi_probe_entry *ape;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index eaa47753b758..a60ff5dfed3a 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -73,7 +73,6 @@ static int acpi_sleep_prepare(u32 acpi_state)
acpi_set_waking_vector(acpi_wakeup_address);
}
- ACPI_FLUSH_CPU_CACHE();
#endif
pr_info("Preparing to enter system sleep state S%d\n", acpi_state);
acpi_enable_wakeup_devices(acpi_state);
@@ -566,8 +565,6 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
u32 acpi_state = acpi_target_sleep_state;
int error;
- ACPI_FLUSH_CPU_CACHE();
-
trace_suspend_resume(TPS("acpi_suspend"), acpi_state, true);
switch (acpi_state) {
case ACPI_STATE_S1:
@@ -877,11 +874,11 @@ static inline void acpi_sleep_syscore_init(void) {}
#ifdef CONFIG_HIBERNATION
static unsigned long s4_hardware_signature;
static struct acpi_table_facs *facs;
-static bool nosigcheck;
+static int sigcheck = -1; /* Default behaviour is just to warn */
-void __init acpi_no_s4_hw_signature(void)
+void __init acpi_check_s4_hw_signature(int check)
{
- nosigcheck = true;
+ sigcheck = check;
}
static int acpi_hibernation_begin(pm_message_t stage)
@@ -903,8 +900,6 @@ static int acpi_hibernation_enter(void)
{
acpi_status status = AE_OK;
- ACPI_FLUSH_CPU_CACHE();
-
/* This shouldn't return. If it returns, we have a problem */
status = acpi_enter_sleep_state(ACPI_STATE_S4);
/* Reprogram control registers */
@@ -1009,12 +1004,28 @@ static void acpi_sleep_hibernate_setup(void)
hibernation_set_ops(old_suspend_ordering ?
&acpi_hibernation_ops_old : &acpi_hibernation_ops);
sleep_states[ACPI_STATE_S4] = 1;
- if (nosigcheck)
+ if (!sigcheck)
return;
acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
- if (facs)
+ if (facs) {
+ /*
+ * s4_hardware_signature is the local variable which is just
+ * used to warn about mismatch after we're attempting to
+ * resume (in violation of the ACPI specification.)
+ */
s4_hardware_signature = facs->hardware_signature;
+
+ if (sigcheck > 0) {
+ /*
+ * If we're actually obeying the ACPI specification
+ * then the signature is written out as part of the
+ * swsusp header, in order to allow the boot kernel
+ * to gracefully decline to resume.
+ */
+ swsusp_hardware_signature = facs->hardware_signature;
+ }
+ }
}
#else /* !CONFIG_HIBERNATION */
static inline void acpi_sleep_hibernate_setup(void) {}
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index 25c2d0be953e..d589543875b8 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -107,8 +107,13 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
pr_info("SPCR table version %d\n", table->header.revision);
if (table->serial_port.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- switch (ACPI_ACCESS_BIT_WIDTH((
- table->serial_port.access_width))) {
+ u32 bit_width = table->serial_port.access_width;
+
+ if (bit_width > ACPI_ACCESS_BIT_MAX) {
+ pr_err("Unacceptable wide SPCR Access Width. Defaulting to byte size\n");
+ bit_width = ACPI_ACCESS_BIT_DEFAULT;
+ }
+ switch (ACPI_ACCESS_BIT_WIDTH((bit_width))) {
default:
pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n");
fallthrough;
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 00c0ebaab29f..a4b638bea6f1 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -939,10 +939,11 @@ static struct attribute *hotplug_profile_attrs[] = {
&hotplug_enabled_attr.attr,
NULL
};
+ATTRIBUTE_GROUPS(hotplug_profile);
static struct kobj_type acpi_hotplug_profile_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
- .default_attrs = hotplug_profile_attrs,
+ .default_groups = hotplug_profile_groups,
};
void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 71419eb16e09..0741a4933f62 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -35,12 +35,13 @@ static char *mps_inti_flags_trigger[] = { "dfl", "edge", "res", "level" };
static struct acpi_table_desc initial_tables[ACPI_MAX_TABLES] __initdata;
-static int acpi_apic_instance __initdata;
+static int acpi_apic_instance __initdata_or_acpilib;
enum acpi_subtable_type {
ACPI_SUBTABLE_COMMON,
ACPI_SUBTABLE_HMAT,
ACPI_SUBTABLE_PRMT,
+ ACPI_SUBTABLE_CEDT,
};
struct acpi_subtable_entry {
@@ -52,7 +53,7 @@ struct acpi_subtable_entry {
* Disable table checksum verification for the early stage due to the size
* limitation of the current x86 early mapping implementation.
*/
-static bool acpi_verify_table_checksum __initdata = false;
+static bool acpi_verify_table_checksum __initdata_or_acpilib = false;
void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
@@ -216,7 +217,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
}
}
-static unsigned long __init
+static unsigned long __init_or_acpilib
acpi_get_entry_type(struct acpi_subtable_entry *entry)
{
switch (entry->type) {
@@ -226,11 +227,13 @@ acpi_get_entry_type(struct acpi_subtable_entry *entry)
return entry->hdr->hmat.type;
case ACPI_SUBTABLE_PRMT:
return 0;
+ case ACPI_SUBTABLE_CEDT:
+ return entry->hdr->cedt.type;
}
return 0;
}
-static unsigned long __init
+static unsigned long __init_or_acpilib
acpi_get_entry_length(struct acpi_subtable_entry *entry)
{
switch (entry->type) {
@@ -240,11 +243,13 @@ acpi_get_entry_length(struct acpi_subtable_entry *entry)
return entry->hdr->hmat.length;
case ACPI_SUBTABLE_PRMT:
return entry->hdr->prmt.length;
+ case ACPI_SUBTABLE_CEDT:
+ return entry->hdr->cedt.length;
}
return 0;
}
-static unsigned long __init
+static unsigned long __init_or_acpilib
acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
{
switch (entry->type) {
@@ -254,20 +259,40 @@ acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
return sizeof(entry->hdr->hmat);
case ACPI_SUBTABLE_PRMT:
return sizeof(entry->hdr->prmt);
+ case ACPI_SUBTABLE_CEDT:
+ return sizeof(entry->hdr->cedt);
}
return 0;
}
-static enum acpi_subtable_type __init
+static enum acpi_subtable_type __init_or_acpilib
acpi_get_subtable_type(char *id)
{
if (strncmp(id, ACPI_SIG_HMAT, 4) == 0)
return ACPI_SUBTABLE_HMAT;
if (strncmp(id, ACPI_SIG_PRMT, 4) == 0)
return ACPI_SUBTABLE_PRMT;
+ if (strncmp(id, ACPI_SIG_CEDT, 4) == 0)
+ return ACPI_SUBTABLE_CEDT;
return ACPI_SUBTABLE_COMMON;
}
+static __init_or_acpilib bool has_handler(struct acpi_subtable_proc *proc)
+{
+ return proc->handler || proc->handler_arg;
+}
+
+static __init_or_acpilib int call_handler(struct acpi_subtable_proc *proc,
+ union acpi_subtable_headers *hdr,
+ unsigned long end)
+{
+ if (proc->handler)
+ return proc->handler(hdr, end);
+ if (proc->handler_arg)
+ return proc->handler_arg(hdr, proc->arg, end);
+ return -EINVAL;
+}
+
/**
* acpi_parse_entries_array - for each proc_num find a suitable subtable
*
@@ -291,10 +316,10 @@ acpi_get_subtable_type(char *id)
* On success returns sum of all matching entries for all proc handlers.
* Otherwise, -ENODEV or -EINVAL is returned.
*/
-static int __init acpi_parse_entries_array(char *id, unsigned long table_size,
- struct acpi_table_header *table_header,
- struct acpi_subtable_proc *proc, int proc_num,
- unsigned int max_entries)
+static int __init_or_acpilib acpi_parse_entries_array(
+ char *id, unsigned long table_size,
+ struct acpi_table_header *table_header, struct acpi_subtable_proc *proc,
+ int proc_num, unsigned int max_entries)
{
struct acpi_subtable_entry entry;
unsigned long table_end, subtable_len, entry_len;
@@ -318,8 +343,9 @@ static int __init acpi_parse_entries_array(char *id, unsigned long table_size,
for (i = 0; i < proc_num; i++) {
if (acpi_get_entry_type(&entry) != proc[i].id)
continue;
- if (!proc[i].handler ||
- (!errs && proc[i].handler(entry.hdr, table_end))) {
+ if (!has_handler(&proc[i]) ||
+ (!errs &&
+ call_handler(&proc[i], entry.hdr, table_end))) {
errs++;
continue;
}
@@ -352,10 +378,9 @@ static int __init acpi_parse_entries_array(char *id, unsigned long table_size,
return errs ? -EINVAL : count;
}
-int __init acpi_table_parse_entries_array(char *id,
- unsigned long table_size,
- struct acpi_subtable_proc *proc, int proc_num,
- unsigned int max_entries)
+int __init_or_acpilib acpi_table_parse_entries_array(
+ char *id, unsigned long table_size, struct acpi_subtable_proc *proc,
+ int proc_num, unsigned int max_entries)
{
struct acpi_table_header *table_header = NULL;
int count;
@@ -386,21 +411,41 @@ int __init acpi_table_parse_entries_array(char *id,
return count;
}
-int __init acpi_table_parse_entries(char *id,
- unsigned long table_size,
- int entry_id,
- acpi_tbl_entry_handler handler,
- unsigned int max_entries)
+static int __init_or_acpilib __acpi_table_parse_entries(
+ char *id, unsigned long table_size, int entry_id,
+ acpi_tbl_entry_handler handler, acpi_tbl_entry_handler_arg handler_arg,
+ void *arg, unsigned int max_entries)
{
struct acpi_subtable_proc proc = {
.id = entry_id,
.handler = handler,
+ .handler_arg = handler_arg,
+ .arg = arg,
};
return acpi_table_parse_entries_array(id, table_size, &proc, 1,
max_entries);
}
+int __init_or_acpilib
+acpi_table_parse_cedt(enum acpi_cedt_type id,
+ acpi_tbl_entry_handler_arg handler_arg, void *arg)
+{
+ return __acpi_table_parse_entries(ACPI_SIG_CEDT,
+ sizeof(struct acpi_table_cedt), id,
+ NULL, handler_arg, arg, 0);
+}
+EXPORT_SYMBOL_ACPI_LIB(acpi_table_parse_cedt);
+
+int __init acpi_table_parse_entries(char *id, unsigned long table_size,
+ int entry_id,
+ acpi_tbl_entry_handler handler,
+ unsigned int max_entries)
+{
+ return __acpi_table_parse_entries(id, table_size, entry_id, handler,
+ NULL, NULL, max_entries);
+}
+
int __init acpi_table_parse_madt(enum acpi_madt_type id,
acpi_tbl_entry_handler handler, unsigned int max_entries)
{
@@ -500,7 +545,7 @@ static const char table_sigs[][ACPI_NAMESEG_SIZE] __initconst = {
ACPI_SIG_WDDT, ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT,
ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT,
ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT,
- ACPI_SIG_NHLT };
+ ACPI_SIG_NHLT, ACPI_SIG_AEST };
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
@@ -723,7 +768,7 @@ static void __init acpi_table_initrd_scan(void)
/*
* Mark the table to avoid being used in
* acpi_table_initrd_override(). Though this is not possible
- * because override is disabled in acpi_install_table().
+ * because override is disabled in acpi_install_physical_table().
*/
if (test_and_set_bit(table_index, acpi_initrd_installed)) {
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
@@ -734,7 +779,7 @@ static void __init acpi_table_initrd_scan(void)
table->signature, table->oem_id,
table->oem_table_id);
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
- acpi_install_table(acpi_tables_addr + table_offset, TRUE);
+ acpi_install_physical_table(acpi_tables_addr + table_offset);
next_table:
table_offset += table_length;
table_index++;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 95105db642b9..539660ef93c7 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -697,7 +697,6 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
struct acpi_device *device = cdev->devdata;
struct acpi_thermal *tz = thermal->devdata;
struct acpi_device *dev;
- acpi_status status;
acpi_handle handle;
int i;
int j;
@@ -715,8 +714,8 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
for (i = 0; i < tz->trips.passive.devices.count;
i++) {
handle = tz->trips.passive.devices.handles[i];
- status = acpi_bus_get_device(handle, &dev);
- if (ACPI_FAILURE(status) || dev != device)
+ dev = acpi_fetch_acpi_dev(handle);
+ if (dev != device)
continue;
if (bind)
result =
@@ -741,8 +740,8 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
j < tz->trips.active[i].devices.count;
j++) {
handle = tz->trips.active[i].devices.handles[j];
- status = acpi_bus_get_device(handle, &dev);
- if (ACPI_FAILURE(status) || dev != device)
+ dev = acpi_fetch_acpi_dev(handle);
+ if (dev != device)
continue;
if (bind)
result = thermal_zone_bind_cooling_device
@@ -1098,8 +1097,6 @@ static int acpi_thermal_resume(struct device *dev)
return -EINVAL;
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
- if (!(&tz->trips.active[i]))
- break;
if (!tz->trips.active[i].flags.valid)
break;
tz->trips.active[i].flags.enabled = 1;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 068e393ea0c6..4f64713e9917 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -59,18 +59,16 @@ static void acpi_video_parse_cmdline(void)
static acpi_status
find_video(acpi_handle handle, u32 lvl, void *context, void **rv)
{
+ struct acpi_device *acpi_dev = acpi_fetch_acpi_dev(handle);
long *cap = context;
struct pci_dev *dev;
- struct acpi_device *acpi_dev;
static const struct acpi_device_id video_ids[] = {
{ACPI_VIDEO_HID, 0},
{"", 0},
};
- if (acpi_bus_get_device(handle, &acpi_dev))
- return AE_OK;
- if (!acpi_match_device_ids(acpi_dev, video_ids)) {
+ if (acpi_dev && !acpi_match_device_ids(acpi_dev, video_ids)) {
dev = acpi_get_pci_dev(handle);
if (!dev)
return AE_OK;
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 1c48358b43ba..abc06e7f89d8 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -293,9 +293,9 @@ static void lpi_check_constraints(void)
for (i = 0; i < lpi_constraints_table_size; ++i) {
acpi_handle handle = lpi_constraints_table[i].handle;
- struct acpi_device *adev;
+ struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
- if (!handle || acpi_bus_get_device(handle, &adev))
+ if (!adev)
continue;
acpi_handle_debug(handle,
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index f22f23933063..ffdeed5334d6 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -8,8 +8,11 @@
* Copyright (C) 2013-2015 Intel Corporation. All rights reserved.
*/
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/platform_device.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include "../internal.h"
@@ -22,58 +25,77 @@
* Some BIOS-es (temporarily) hide specific APCI devices to work around Windows
* driver bugs. We use DMI matching to match known cases of this.
*
- * We work around this by always reporting ACPI_STA_DEFAULT for these
- * devices. Note this MUST only be done for devices where this is safe.
+ * Likewise sometimes some not-actually present devices are sometimes
+ * reported as present, which may cause issues.
+ *
+ * We work around this by using the below quirk list to override the status
+ * reported by the _STA method with a fixed value (ACPI_STA_DEFAULT or 0).
+ * Note this MUST only be done for devices where this is safe.
*
- * This forcing of devices to be present is limited to specific CPU (SoC)
- * models both to avoid potentially causing trouble on other models and
- * because some HIDs are re-used on different SoCs for completely
- * different devices.
+ * This status overriding is limited to specific CPU (SoC) models both to
+ * avoid potentially causing trouble on other models and because some HIDs
+ * are re-used on different SoCs for completely different devices.
*/
-struct always_present_id {
+struct override_status_id {
struct acpi_device_id hid[2];
struct x86_cpu_id cpu_ids[2];
struct dmi_system_id dmi_ids[2]; /* Optional */
const char *uid;
+ const char *path;
+ unsigned long long status;
};
-#define X86_MATCH(model) X86_MATCH_INTEL_FAM6_MODEL(model, NULL)
-
-#define ENTRY(hid, uid, cpu_models, dmi...) { \
+#define ENTRY(status, hid, uid, path, cpu_model, dmi...) { \
{ { hid, }, {} }, \
- { cpu_models, {} }, \
+ { X86_MATCH_INTEL_FAM6_MODEL(cpu_model, NULL), {} }, \
{ { .matches = dmi }, {} }, \
uid, \
+ path, \
+ status, \
}
-static const struct always_present_id always_present_ids[] = {
+#define PRESENT_ENTRY_HID(hid, uid, cpu_model, dmi...) \
+ ENTRY(ACPI_STA_DEFAULT, hid, uid, NULL, cpu_model, dmi)
+
+#define NOT_PRESENT_ENTRY_HID(hid, uid, cpu_model, dmi...) \
+ ENTRY(0, hid, uid, NULL, cpu_model, dmi)
+
+#define PRESENT_ENTRY_PATH(path, cpu_model, dmi...) \
+ ENTRY(ACPI_STA_DEFAULT, "", NULL, path, cpu_model, dmi)
+
+#define NOT_PRESENT_ENTRY_PATH(path, cpu_model, dmi...) \
+ ENTRY(0, "", NULL, path, cpu_model, dmi)
+
+static const struct override_status_id override_status_ids[] = {
/*
* Bay / Cherry Trail PWM directly poked by GPU driver in win10,
* but Linux uses a separate PWM driver, harmless if not used.
*/
- ENTRY("80860F09", "1", X86_MATCH(ATOM_SILVERMONT), {}),
- ENTRY("80862288", "1", X86_MATCH(ATOM_AIRMONT), {}),
+ PRESENT_ENTRY_HID("80860F09", "1", ATOM_SILVERMONT, {}),
+ PRESENT_ENTRY_HID("80862288", "1", ATOM_AIRMONT, {}),
+
+ /* The Xiaomi Mi Pad 2 uses PWM2 for touchkeys backlight control */
+ PRESENT_ENTRY_HID("80862289", "2", ATOM_AIRMONT, {
+ DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
+ }),
- /* Lenovo Yoga Book uses PWM2 for keyboard backlight control */
- ENTRY("80862289", "2", X86_MATCH(ATOM_AIRMONT), {
- DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"),
- }),
/*
* The INT0002 device is necessary to clear wakeup interrupt sources
* on Cherry Trail devices, without it we get nobody cared IRQ msgs.
*/
- ENTRY("INT0002", "1", X86_MATCH(ATOM_AIRMONT), {}),
+ PRESENT_ENTRY_HID("INT0002", "1", ATOM_AIRMONT, {}),
/*
* On the Dell Venue 11 Pro 7130 and 7139, the DSDT hides
* the touchscreen ACPI device until a certain time
* after _SB.PCI0.GFX0.LCD.LCD1._ON gets called has passed
* *and* _STA has been called at least 3 times since.
*/
- ENTRY("SYNA7500", "1", X86_MATCH(HASWELL_L), {
+ PRESENT_ENTRY_HID("SYNA7500", "1", HASWELL_L, {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"),
}),
- ENTRY("SYNA7500", "1", X86_MATCH(HASWELL_L), {
+ PRESENT_ENTRY_HID("SYNA7500", "1", HASWELL_L, {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7139"),
}),
@@ -81,54 +103,83 @@ static const struct always_present_id always_present_ids[] = {
/*
* The GPD win BIOS dated 20170221 has disabled the accelerometer, the
* drivers sometimes cause crashes under Windows and this is how the
- * manufacturer has solved this :| Note that the the DMI data is less
- * generic then it seems, a board_vendor of "AMI Corporation" is quite
- * rare and a board_name of "Default String" also is rare.
+ * manufacturer has solved this :| The DMI match may not seem unique,
+ * but it is. In the 67000+ DMI decode dumps from linux-hardware.org
+ * only 116 have board_vendor set to "AMI Corporation" and of those 116
+ * only the GPD win and pocket entries' board_name is "Default string".
*
* Unfortunately the GPD pocket also uses these strings and its BIOS
* was copy-pasted from the GPD win, so it has a disabled KIOX000A
* node which we should not enable, thus we also check the BIOS date.
*/
- ENTRY("KIOX000A", "1", X86_MATCH(ATOM_AIRMONT), {
+ PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_MATCH(DMI_BIOS_DATE, "02/21/2017")
}),
- ENTRY("KIOX000A", "1", X86_MATCH(ATOM_AIRMONT), {
+ PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_MATCH(DMI_BIOS_DATE, "03/20/2017")
}),
- ENTRY("KIOX000A", "1", X86_MATCH(ATOM_AIRMONT), {
+ PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_MATCH(DMI_BIOS_DATE, "05/25/2017")
}),
+
+ /*
+ * The GPD win/pocket have a PCI wifi card, but its DSDT has the SDIO
+ * mmc controller enabled and that has a child-device which _PS3
+ * method sets a GPIO causing the PCI wifi card to turn off.
+ * See above remark about uniqueness of the DMI match.
+ */
+ NOT_PRESENT_ENTRY_PATH("\\_SB_.PCI0.SDHB.BRC1", ATOM_AIRMONT, {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
+ DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ }),
};
-bool acpi_device_always_present(struct acpi_device *adev)
+bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *status)
{
bool ret = false;
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(always_present_ids); i++) {
- if (acpi_match_device_ids(adev, always_present_ids[i].hid))
+ for (i = 0; i < ARRAY_SIZE(override_status_ids); i++) {
+ if (!x86_match_cpu(override_status_ids[i].cpu_ids))
continue;
- if (!adev->pnp.unique_id ||
- strcmp(adev->pnp.unique_id, always_present_ids[i].uid))
+ if (override_status_ids[i].dmi_ids[0].matches[0].slot &&
+ !dmi_check_system(override_status_ids[i].dmi_ids))
continue;
- if (!x86_match_cpu(always_present_ids[i].cpu_ids))
- continue;
+ if (override_status_ids[i].path) {
+ struct acpi_buffer path = { ACPI_ALLOCATE_BUFFER, NULL };
+ bool match;
- if (always_present_ids[i].dmi_ids[0].matches[0].slot &&
- !dmi_check_system(always_present_ids[i].dmi_ids))
- continue;
+ if (acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &path))
+ continue;
+
+ match = strcmp((char *)path.pointer, override_status_ids[i].path) == 0;
+ kfree(path.pointer);
+ if (!match)
+ continue;
+ } else {
+ if (acpi_match_device_ids(adev, override_status_ids[i].hid))
+ continue;
+
+ if (!adev->pnp.unique_id ||
+ strcmp(adev->pnp.unique_id, override_status_ids[i].uid))
+ continue;
+ }
+
+ *status = override_status_ids[i].status;
ret = true;
break;
}
@@ -160,3 +211,183 @@ bool force_storage_d3(void)
{
return x86_match_cpu(storage_d3_cpu_ids);
}
+
+/*
+ * x86 ACPI boards which ship with only Android as their factory image usually
+ * declare a whole bunch of bogus I2C devices in their ACPI tables and sometimes
+ * there are issues with serdev devices on these boards too, e.g. the resource
+ * points to the wrong serdev_controller.
+ *
+ * Instantiating I2C / serdev devs for these bogus devs causes various issues,
+ * e.g. GPIO/IRQ resource conflicts because sometimes drivers do bind to them.
+ * The Android x86 kernel fork shipped on these devices has some special code
+ * to remove the bogus I2C clients (and AFAICT serdevs are ignored completely).
+ *
+ * The acpi_quirk_skip_*_enumeration() functions below are used by the I2C or
+ * serdev code to skip instantiating any I2C or serdev devs on broken boards.
+ *
+ * In case of I2C an exception is made for HIDs on the i2c_acpi_known_good_ids
+ * list. These are known to always be correct (and in case of the audio-codecs
+ * the drivers heavily rely on the codec being enumerated through ACPI).
+ *
+ * Note these boards typically do actually have I2C and serdev devices,
+ * just different ones then the ones described in their DSDT. The devices
+ * which are actually present are manually instantiated by the
+ * drivers/platform/x86/x86-android-tablets.c kernel module.
+ */
+#define ACPI_QUIRK_SKIP_I2C_CLIENTS BIT(0)
+#define ACPI_QUIRK_UART1_TTY_UART2_SKIP BIT(1)
+#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(2)
+#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(3)
+
+static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
+ /*
+ * 1. Devices with only the skip / don't-skip AC and battery quirks,
+ * sorted alphabetically.
+ */
+ {
+ /* ECS EF20EA, AXP288 PMIC but uses separate fuel-gauge */
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"),
+ },
+ .driver_data = (void *)ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY
+ },
+ {
+ /* Lenovo Ideapad Miix 320, AXP288 PMIC, separate fuel-gauge */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "80XF"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
+ },
+ .driver_data = (void *)ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY
+ },
+
+ /*
+ * 2. Devices which also have the skip i2c/serdev quirks and which
+ * need the x86-android-tablets module to properly work.
+ */
+#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ME176C"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_UART1_TTY_UART2_SKIP |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ },
+ {
+ /* Whitelabel (sold as various brands) TM800A550L */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ /* Above strings are too generic, also match on BIOS version */
+ DMI_MATCH(DMI_BIOS_VERSION, "ZY-8-BI-PX4S70VTR400-X423B-005-D"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ },
+#endif
+ {}
+};
+
+#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
+static const struct acpi_device_id i2c_acpi_known_good_ids[] = {
+ { "10EC5640", 0 }, /* RealTek ALC5640 audio codec */
+ { "INT33F4", 0 }, /* X-Powers AXP288 PMIC */
+ { "INT33FD", 0 }, /* Intel Crystal Cove PMIC */
+ { "NPCE69A", 0 }, /* Asus Transformer keyboard dock */
+ {}
+};
+
+bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev)
+{
+ const struct dmi_system_id *dmi_id;
+ long quirks;
+
+ dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
+ if (!dmi_id)
+ return false;
+
+ quirks = (unsigned long)dmi_id->driver_data;
+ if (!(quirks & ACPI_QUIRK_SKIP_I2C_CLIENTS))
+ return false;
+
+ return acpi_match_device_ids(adev, i2c_acpi_known_good_ids);
+}
+EXPORT_SYMBOL_GPL(acpi_quirk_skip_i2c_client_enumeration);
+
+int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
+{
+ struct acpi_device *adev = ACPI_COMPANION(controller_parent);
+ const struct dmi_system_id *dmi_id;
+ long quirks = 0;
+
+ *skip = false;
+
+ /* !dev_is_platform() to not match on PNP enumerated debug UARTs */
+ if (!adev || !adev->pnp.unique_id || !dev_is_platform(controller_parent))
+ return 0;
+
+ dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
+ if (dmi_id)
+ quirks = (unsigned long)dmi_id->driver_data;
+
+ if (quirks & ACPI_QUIRK_UART1_TTY_UART2_SKIP) {
+ if (!strcmp(adev->pnp.unique_id, "1"))
+ return -ENODEV; /* Create tty cdev instead of serdev */
+
+ if (!strcmp(adev->pnp.unique_id, "2"))
+ *skip = true;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_quirk_skip_serdev_enumeration);
+#endif
+
+/* Lists of PMIC ACPI HIDs with an (often better) native charger driver */
+static const struct {
+ const char *hid;
+ int hrv;
+} acpi_skip_ac_and_battery_pmic_ids[] = {
+ { "INT33F4", -1 }, /* X-Powers AXP288 PMIC */
+ { "INT34D3", 3 }, /* Intel Cherrytrail Whiskey Cove PMIC */
+};
+
+bool acpi_quirk_skip_acpi_ac_and_battery(void)
+{
+ const struct dmi_system_id *dmi_id;
+ long quirks = 0;
+ int i;
+
+ dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
+ if (dmi_id)
+ quirks = (unsigned long)dmi_id->driver_data;
+
+ if (quirks & ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY)
+ return true;
+
+ if (quirks & ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(acpi_skip_ac_and_battery_pmic_ids); i++) {
+ if (acpi_dev_present(acpi_skip_ac_and_battery_pmic_ids[i].hid, "1",
+ acpi_skip_ac_and_battery_pmic_ids[i].hrv)) {
+ pr_info_once("found native %s PMIC, skipping ACPI AC and battery devices\n",
+ acpi_skip_ac_and_battery_pmic_ids[i].hid);
+ return true;
+ }
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(acpi_quirk_skip_acpi_ac_and_battery);
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 720aa6cdd402..e1a5eca3ae3c 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -21,8 +21,6 @@
#include <linux/reset.h>
#include <linux/of_irq.h>
-#include <asm/irq.h>
-
#define to_amba_driver(d) container_of(d, struct amba_driver, drv)
/* called on periphid match and class 0x9 coresight device. */
@@ -136,8 +134,6 @@ static ssize_t name##_show(struct device *_dev, \
static DEVICE_ATTR_RO(name)
amba_attr_func(id, "%08x\n", dev->periphid);
-amba_attr_func(irq0, "%u\n", dev->irq[0]);
-amba_attr_func(irq1, "%u\n", dev->irq[1]);
amba_attr_func(resource, "\t%016llx\t%016llx\t%016lx\n",
(unsigned long long)dev->res.start, (unsigned long long)dev->res.end,
dev->res.flags);
@@ -175,6 +171,28 @@ static int amba_uevent(struct device *dev, struct kobj_uevent_env *env)
return retval;
}
+static int of_amba_device_decode_irq(struct amba_device *dev)
+{
+ struct device_node *node = dev->dev.of_node;
+ int i, irq = 0;
+
+ if (IS_ENABLED(CONFIG_OF_IRQ) && node) {
+ /* Decode the IRQs and address ranges */
+ for (i = 0; i < AMBA_NR_IRQS; i++) {
+ irq = of_irq_get(node, i);
+ if (irq < 0) {
+ if (irq == -EPROBE_DEFER)
+ return irq;
+ irq = 0;
+ }
+
+ dev->irq[i] = irq;
+ }
+ }
+
+ return 0;
+}
+
/*
* These are the device model conversion veneers; they convert the
* device model structures to our more specific structures.
@@ -187,6 +205,10 @@ static int amba_probe(struct device *dev)
int ret;
do {
+ ret = of_amba_device_decode_irq(pcdev);
+ if (ret)
+ break;
+
ret = of_clk_set_defaults(dev->of_node, false);
if (ret < 0)
break;
@@ -372,38 +394,12 @@ static void amba_device_release(struct device *dev)
kfree(d);
}
-static int of_amba_device_decode_irq(struct amba_device *dev)
-{
- struct device_node *node = dev->dev.of_node;
- int i, irq = 0;
-
- if (IS_ENABLED(CONFIG_OF_IRQ) && node) {
- /* Decode the IRQs and address ranges */
- for (i = 0; i < AMBA_NR_IRQS; i++) {
- irq = of_irq_get(node, i);
- if (irq < 0) {
- if (irq == -EPROBE_DEFER)
- return irq;
- irq = 0;
- }
-
- dev->irq[i] = irq;
- }
- }
-
- return 0;
-}
-
static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
{
u32 size;
void __iomem *tmp;
int i, ret;
- ret = of_amba_device_decode_irq(dev);
- if (ret)
- goto err_out;
-
ret = request_resource(parent, &dev->res);
if (ret)
goto err_out;
@@ -488,20 +484,9 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
skip_probe:
ret = device_add(&dev->dev);
- if (ret)
- goto err_release;
-
- if (dev->irq[0])
- ret = device_create_file(&dev->dev, &dev_attr_irq0);
- if (ret == 0 && dev->irq[1])
- ret = device_create_file(&dev->dev, &dev_attr_irq1);
- if (ret == 0)
- return ret;
-
- device_unregister(&dev->dev);
-
err_release:
- release_resource(&dev->res);
+ if (ret)
+ release_resource(&dev->res);
err_out:
return ret;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index c75fb600740c..8351c5638880 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -69,7 +69,7 @@
#include <uapi/linux/android/binder.h>
-#include <asm/cacheflush.h>
+#include <linux/cacheflush.h>
#include "binder_internal.h"
#include "binder_trace.h"
@@ -1608,15 +1608,21 @@ static void binder_cleanup_transaction(struct binder_transaction *t,
/**
* binder_get_object() - gets object and checks for valid metadata
* @proc: binder_proc owning the buffer
+ * @u: sender's user pointer to base of buffer
* @buffer: binder_buffer that we're parsing.
* @offset: offset in the @buffer at which to validate an object.
* @object: struct binder_object to read into
*
- * Return: If there's a valid metadata object at @offset in @buffer, the
+ * Copy the binder object at the given offset into @object. If @u is
+ * provided then the copy is from the sender's buffer. If not, then
+ * it is copied from the target's @buffer.
+ *
+ * Return: If there's a valid metadata object at @offset, the
* size of that object. Otherwise, it returns zero. The object
* is read into the struct binder_object pointed to by @object.
*/
static size_t binder_get_object(struct binder_proc *proc,
+ const void __user *u,
struct binder_buffer *buffer,
unsigned long offset,
struct binder_object *object)
@@ -1626,10 +1632,16 @@ static size_t binder_get_object(struct binder_proc *proc,
size_t object_size = 0;
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
- if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
- binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
- offset, read_size))
+ if (offset > buffer->data_size || read_size < sizeof(*hdr))
return 0;
+ if (u) {
+ if (copy_from_user(object, u + offset, read_size))
+ return 0;
+ } else {
+ if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
+ offset, read_size))
+ return 0;
+ }
/* Ok, now see if we read a complete object. */
hdr = &object->hdr;
@@ -1702,7 +1714,7 @@ static struct binder_buffer_object *binder_validate_ptr(
b, buffer_offset,
sizeof(object_offset)))
return NULL;
- object_size = binder_get_object(proc, b, object_offset, object);
+ object_size = binder_get_object(proc, NULL, b, object_offset, object);
if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
return NULL;
if (object_offsetp)
@@ -1767,7 +1779,8 @@ static bool binder_validate_fixup(struct binder_proc *proc,
unsigned long buffer_offset;
struct binder_object last_object;
struct binder_buffer_object *last_bbo;
- size_t object_size = binder_get_object(proc, b, last_obj_offset,
+ size_t object_size = binder_get_object(proc, NULL, b,
+ last_obj_offset,
&last_object);
if (object_size != sizeof(*last_bbo))
return false;
@@ -1882,7 +1895,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
buffer, buffer_offset,
sizeof(object_offset)))
- object_size = binder_get_object(proc, buffer,
+ object_size = binder_get_object(proc, NULL, buffer,
object_offset, &object);
if (object_size == 0) {
pr_err("transaction release %d bad object at offset %lld, size %zd\n",
@@ -1933,7 +1946,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
case BINDER_TYPE_FD: {
/*
* No need to close the file here since user-space
- * closes it for for successfully delivered
+ * closes it for successfully delivered
* transactions. For transactions that weren't
* delivered, the new fd was never allocated so
* there is no need to close and the fput on the
@@ -2220,16 +2233,258 @@ err_fd_not_accepted:
return ret;
}
-static int binder_translate_fd_array(struct binder_fd_array_object *fda,
+/**
+ * struct binder_ptr_fixup - data to be fixed-up in target buffer
+ * @offset offset in target buffer to fixup
+ * @skip_size bytes to skip in copy (fixup will be written later)
+ * @fixup_data data to write at fixup offset
+ * @node list node
+ *
+ * This is used for the pointer fixup list (pf) which is created and consumed
+ * during binder_transaction() and is only accessed locally. No
+ * locking is necessary.
+ *
+ * The list is ordered by @offset.
+ */
+struct binder_ptr_fixup {
+ binder_size_t offset;
+ size_t skip_size;
+ binder_uintptr_t fixup_data;
+ struct list_head node;
+};
+
+/**
+ * struct binder_sg_copy - scatter-gather data to be copied
+ * @offset offset in target buffer
+ * @sender_uaddr user address in source buffer
+ * @length bytes to copy
+ * @node list node
+ *
+ * This is used for the sg copy list (sgc) which is created and consumed
+ * during binder_transaction() and is only accessed locally. No
+ * locking is necessary.
+ *
+ * The list is ordered by @offset.
+ */
+struct binder_sg_copy {
+ binder_size_t offset;
+ const void __user *sender_uaddr;
+ size_t length;
+ struct list_head node;
+};
+
+/**
+ * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
+ * @alloc: binder_alloc associated with @buffer
+ * @buffer: binder buffer in target process
+ * @sgc_head: list_head of scatter-gather copy list
+ * @pf_head: list_head of pointer fixup list
+ *
+ * Processes all elements of @sgc_head, applying fixups from @pf_head
+ * and copying the scatter-gather data from the source process' user
+ * buffer to the target's buffer. It is expected that the list creation
+ * and processing all occurs during binder_transaction() so these lists
+ * are only accessed in local context.
+ *
+ * Return: 0=success, else -errno
+ */
+static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ struct list_head *sgc_head,
+ struct list_head *pf_head)
+{
+ int ret = 0;
+ struct binder_sg_copy *sgc, *tmpsgc;
+ struct binder_ptr_fixup *pf =
+ list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
+ node);
+
+ list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
+ size_t bytes_copied = 0;
+
+ while (bytes_copied < sgc->length) {
+ size_t copy_size;
+ size_t bytes_left = sgc->length - bytes_copied;
+ size_t offset = sgc->offset + bytes_copied;
+
+ /*
+ * We copy up to the fixup (pointed to by pf)
+ */
+ copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
+ : bytes_left;
+ if (!ret && copy_size)
+ ret = binder_alloc_copy_user_to_buffer(
+ alloc, buffer,
+ offset,
+ sgc->sender_uaddr + bytes_copied,
+ copy_size);
+ bytes_copied += copy_size;
+ if (copy_size != bytes_left) {
+ BUG_ON(!pf);
+ /* we stopped at a fixup offset */
+ if (pf->skip_size) {
+ /*
+ * we are just skipping. This is for
+ * BINDER_TYPE_FDA where the translated
+ * fds will be fixed up when we get
+ * to target context.
+ */
+ bytes_copied += pf->skip_size;
+ } else {
+ /* apply the fixup indicated by pf */
+ if (!ret)
+ ret = binder_alloc_copy_to_buffer(
+ alloc, buffer,
+ pf->offset,
+ &pf->fixup_data,
+ sizeof(pf->fixup_data));
+ bytes_copied += sizeof(pf->fixup_data);
+ }
+ list_del(&pf->node);
+ kfree(pf);
+ pf = list_first_entry_or_null(pf_head,
+ struct binder_ptr_fixup, node);
+ }
+ }
+ list_del(&sgc->node);
+ kfree(sgc);
+ }
+ BUG_ON(!list_empty(pf_head));
+ BUG_ON(!list_empty(sgc_head));
+
+ return ret > 0 ? -EINVAL : ret;
+}
+
+/**
+ * binder_cleanup_deferred_txn_lists() - free specified lists
+ * @sgc_head: list_head of scatter-gather copy list
+ * @pf_head: list_head of pointer fixup list
+ *
+ * Called to clean up @sgc_head and @pf_head if there is an
+ * error.
+ */
+static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
+ struct list_head *pf_head)
+{
+ struct binder_sg_copy *sgc, *tmpsgc;
+ struct binder_ptr_fixup *pf, *tmppf;
+
+ list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
+ list_del(&sgc->node);
+ kfree(sgc);
+ }
+ list_for_each_entry_safe(pf, tmppf, pf_head, node) {
+ list_del(&pf->node);
+ kfree(pf);
+ }
+}
+
+/**
+ * binder_defer_copy() - queue a scatter-gather buffer for copy
+ * @sgc_head: list_head of scatter-gather copy list
+ * @offset: binder buffer offset in target process
+ * @sender_uaddr: user address in source process
+ * @length: bytes to copy
+ *
+ * Specify a scatter-gather block to be copied. The actual copy must
+ * be deferred until all the needed fixups are identified and queued.
+ * Then the copy and fixups are done together so un-translated values
+ * from the source are never visible in the target buffer.
+ *
+ * We are guaranteed that repeated calls to this function will have
+ * monotonically increasing @offset values so the list will naturally
+ * be ordered.
+ *
+ * Return: 0=success, else -errno
+ */
+static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
+ const void __user *sender_uaddr, size_t length)
+{
+ struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
+
+ if (!bc)
+ return -ENOMEM;
+
+ bc->offset = offset;
+ bc->sender_uaddr = sender_uaddr;
+ bc->length = length;
+ INIT_LIST_HEAD(&bc->node);
+
+ /*
+ * We are guaranteed that the deferred copies are in-order
+ * so just add to the tail.
+ */
+ list_add_tail(&bc->node, sgc_head);
+
+ return 0;
+}
+
+/**
+ * binder_add_fixup() - queue a fixup to be applied to sg copy
+ * @pf_head: list_head of binder ptr fixup list
+ * @offset: binder buffer offset in target process
+ * @fixup: bytes to be copied for fixup
+ * @skip_size: bytes to skip when copying (fixup will be applied later)
+ *
+ * Add the specified fixup to a list ordered by @offset. When copying
+ * the scatter-gather buffers, the fixup will be copied instead of
+ * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
+ * will be applied later (in target process context), so we just skip
+ * the bytes specified by @skip_size. If @skip_size is 0, we copy the
+ * value in @fixup.
+ *
+ * This function is called *mostly* in @offset order, but there are
+ * exceptions. Since out-of-order inserts are relatively uncommon,
+ * we insert the new element by searching backward from the tail of
+ * the list.
+ *
+ * Return: 0=success, else -errno
+ */
+static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
+ binder_uintptr_t fixup, size_t skip_size)
+{
+ struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
+ struct binder_ptr_fixup *tmppf;
+
+ if (!pf)
+ return -ENOMEM;
+
+ pf->offset = offset;
+ pf->fixup_data = fixup;
+ pf->skip_size = skip_size;
+ INIT_LIST_HEAD(&pf->node);
+
+ /* Fixups are *mostly* added in-order, but there are some
+ * exceptions. Look backwards through list for insertion point.
+ */
+ list_for_each_entry_reverse(tmppf, pf_head, node) {
+ if (tmppf->offset < pf->offset) {
+ list_add(&pf->node, &tmppf->node);
+ return 0;
+ }
+ }
+ /*
+ * if we get here, then the new offset is the lowest so
+ * insert at the head
+ */
+ list_add(&pf->node, pf_head);
+ return 0;
+}
+
+static int binder_translate_fd_array(struct list_head *pf_head,
+ struct binder_fd_array_object *fda,
+ const void __user *sender_ubuffer,
struct binder_buffer_object *parent,
+ struct binder_buffer_object *sender_uparent,
struct binder_transaction *t,
struct binder_thread *thread,
struct binder_transaction *in_reply_to)
{
binder_size_t fdi, fd_buf_size;
binder_size_t fda_offset;
+ const void __user *sender_ufda_base;
struct binder_proc *proc = thread->proc;
- struct binder_proc *target_proc = t->to_proc;
+ int ret;
fd_buf_size = sizeof(u32) * fda->num_fds;
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
@@ -2253,29 +2508,36 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
*/
fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
fda->parent_offset;
- if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
+ sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
+ fda->parent_offset;
+
+ if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
+ !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n",
proc->pid, thread->pid);
return -EINVAL;
}
+ ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
+ if (ret)
+ return ret;
+
for (fdi = 0; fdi < fda->num_fds; fdi++) {
u32 fd;
- int ret;
binder_size_t offset = fda_offset + fdi * sizeof(fd);
+ binder_size_t sender_uoffset = fdi * sizeof(fd);
- ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
- &fd, t->buffer,
- offset, sizeof(fd));
+ ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
if (!ret)
ret = binder_translate_fd(fd, offset, t, thread,
in_reply_to);
- if (ret < 0)
- return ret;
+ if (ret)
+ return ret > 0 ? -EINVAL : ret;
}
return 0;
}
-static int binder_fixup_parent(struct binder_transaction *t,
+static int binder_fixup_parent(struct list_head *pf_head,
+ struct binder_transaction *t,
struct binder_thread *thread,
struct binder_buffer_object *bp,
binder_size_t off_start_offset,
@@ -2321,14 +2583,7 @@ static int binder_fixup_parent(struct binder_transaction *t,
}
buffer_offset = bp->parent_offset +
(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
- if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
- &bp->buffer, sizeof(bp->buffer))) {
- binder_user_error("%d:%d got transaction with invalid parent offset\n",
- proc->pid, thread->pid);
- return -EINVAL;
- }
-
- return 0;
+ return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
}
/**
@@ -2455,6 +2710,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_size_t off_start_offset, off_end_offset;
binder_size_t off_min;
binder_size_t sg_buf_offset, sg_buf_end_offset;
+ binder_size_t user_offset = 0;
struct binder_proc *target_proc = NULL;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
@@ -2469,6 +2725,12 @@ static void binder_transaction(struct binder_proc *proc,
int t_debug_id = atomic_inc_return(&binder_last_id);
char *secctx = NULL;
u32 secctx_sz = 0;
+ struct list_head sgc_head;
+ struct list_head pf_head;
+ const void __user *user_buffer = (const void __user *)
+ (uintptr_t)tr->data.ptr.buffer;
+ INIT_LIST_HEAD(&sgc_head);
+ INIT_LIST_HEAD(&pf_head);
e = binder_transaction_log_add(&binder_transaction_log);
e->debug_id = t_debug_id;
@@ -2782,19 +3044,6 @@ static void binder_transaction(struct binder_proc *proc,
if (binder_alloc_copy_user_to_buffer(
&target_proc->alloc,
- t->buffer, 0,
- (const void __user *)
- (uintptr_t)tr->data.ptr.buffer,
- tr->data_size)) {
- binder_user_error("%d:%d got transaction with invalid data ptr\n",
- proc->pid, thread->pid);
- return_error = BR_FAILED_REPLY;
- return_error_param = -EFAULT;
- return_error_line = __LINE__;
- goto err_copy_data_failed;
- }
- if (binder_alloc_copy_user_to_buffer(
- &target_proc->alloc,
t->buffer,
ALIGN(tr->data_size, sizeof(void *)),
(const void __user *)
@@ -2837,6 +3086,7 @@ static void binder_transaction(struct binder_proc *proc,
size_t object_size;
struct binder_object object;
binder_size_t object_offset;
+ binder_size_t copy_size;
if (binder_alloc_copy_from_buffer(&target_proc->alloc,
&object_offset,
@@ -2848,8 +3098,27 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_bad_offset;
}
- object_size = binder_get_object(target_proc, t->buffer,
- object_offset, &object);
+
+ /*
+ * Copy the source user buffer up to the next object
+ * that will be processed.
+ */
+ copy_size = object_offset - user_offset;
+ if (copy_size && (user_offset > object_offset ||
+ binder_alloc_copy_user_to_buffer(
+ &target_proc->alloc,
+ t->buffer, user_offset,
+ user_buffer + user_offset,
+ copy_size))) {
+ binder_user_error("%d:%d got transaction with invalid data ptr\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
+ goto err_copy_data_failed;
+ }
+ object_size = binder_get_object(target_proc, user_buffer,
+ t->buffer, object_offset, &object);
if (object_size == 0 || object_offset < off_min) {
binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
proc->pid, thread->pid,
@@ -2861,6 +3130,11 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_bad_offset;
}
+ /*
+ * Set offset to the next buffer fragment to be
+ * copied
+ */
+ user_offset = object_offset + object_size;
hdr = &object.hdr;
off_min = object_offset + object_size;
@@ -2923,6 +3197,8 @@ static void binder_transaction(struct binder_proc *proc,
case BINDER_TYPE_FDA: {
struct binder_object ptr_object;
binder_size_t parent_offset;
+ struct binder_object user_object;
+ size_t user_parent_size;
struct binder_fd_array_object *fda =
to_binder_fd_array_object(hdr);
size_t num_valid = (buffer_offset - off_start_offset) /
@@ -2954,11 +3230,35 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_bad_parent;
}
- ret = binder_translate_fd_array(fda, parent, t, thread,
- in_reply_to);
- if (ret < 0) {
+ /*
+ * We need to read the user version of the parent
+ * object to get the original user offset
+ */
+ user_parent_size =
+ binder_get_object(proc, user_buffer, t->buffer,
+ parent_offset, &user_object);
+ if (user_parent_size != sizeof(user_object.bbo)) {
+ binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
+ proc->pid, thread->pid,
+ user_parent_size,
+ sizeof(user_object.bbo));
return_error = BR_FAILED_REPLY;
- return_error_param = ret;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
+ goto err_bad_parent;
+ }
+ ret = binder_translate_fd_array(&pf_head, fda,
+ user_buffer, parent,
+ &user_object.bbo, t,
+ thread, in_reply_to);
+ if (!ret)
+ ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer,
+ object_offset,
+ fda, sizeof(*fda));
+ if (ret) {
+ return_error = BR_FAILED_REPLY;
+ return_error_param = ret > 0 ? -EINVAL : ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
@@ -2980,19 +3280,14 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_bad_offset;
}
- if (binder_alloc_copy_user_to_buffer(
- &target_proc->alloc,
- t->buffer,
- sg_buf_offset,
- (const void __user *)
- (uintptr_t)bp->buffer,
- bp->length)) {
- binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
- proc->pid, thread->pid);
- return_error_param = -EFAULT;
+ ret = binder_defer_copy(&sgc_head, sg_buf_offset,
+ (const void __user *)(uintptr_t)bp->buffer,
+ bp->length);
+ if (ret) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
return_error_line = __LINE__;
- goto err_copy_data_failed;
+ goto err_translate_failed;
}
/* Fixup buffer pointer to target proc address space */
bp->buffer = (uintptr_t)
@@ -3001,7 +3296,8 @@ static void binder_transaction(struct binder_proc *proc,
num_valid = (buffer_offset - off_start_offset) /
sizeof(binder_size_t);
- ret = binder_fixup_parent(t, thread, bp,
+ ret = binder_fixup_parent(&pf_head, t,
+ thread, bp,
off_start_offset,
num_valid,
last_fixup_obj_off,
@@ -3028,6 +3324,30 @@ static void binder_transaction(struct binder_proc *proc,
goto err_bad_object_type;
}
}
+ /* Done processing objects, copy the rest of the buffer */
+ if (binder_alloc_copy_user_to_buffer(
+ &target_proc->alloc,
+ t->buffer, user_offset,
+ user_buffer + user_offset,
+ tr->data_size - user_offset)) {
+ binder_user_error("%d:%d got transaction with invalid data ptr\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
+ goto err_copy_data_failed;
+ }
+
+ ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
+ &sgc_head, &pf_head);
+ if (ret) {
+ binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
+ goto err_copy_data_failed;
+ }
if (t->buffer->oneway_spam_suspect)
tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
else
@@ -3101,6 +3421,7 @@ err_bad_object_type:
err_bad_offset:
err_bad_parent:
err_copy_data_failed:
+ binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
binder_free_txn_fixups(t);
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, NULL, t->buffer,
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index a7da8ea7b3ed..cb54631fd950 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -146,7 +146,7 @@ config SATA_AHCI_PLATFORM
config AHCI_BRCM
tristate "Broadcom AHCI SATA support"
depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP || \
- ARCH_BCM_63XX
+ ARCH_BCM_63XX || COMPILE_TEST
select SATA_HOST
help
This option enables support for the AHCI SATA3 controller found on
@@ -156,7 +156,7 @@ config AHCI_BRCM
config AHCI_DA850
tristate "DaVinci DA850 AHCI SATA support"
- depends on ARCH_DAVINCI_DA850
+ depends on ARCH_DAVINCI_DA850 || COMPILE_TEST
select SATA_HOST
help
This option enables support for the DaVinci DA850 SoC's
@@ -166,7 +166,7 @@ config AHCI_DA850
config AHCI_DM816
tristate "DaVinci DM816 AHCI SATA support"
- depends on ARCH_OMAP2PLUS
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
select SATA_HOST
help
This option enables support for the DaVinci DM816 SoC's
@@ -206,7 +206,7 @@ config AHCI_CEVA
config AHCI_MTK
tristate "MediaTek AHCI SATA support"
- depends on ARCH_MEDIATEK
+ depends on ARCH_MEDIATEK || COMPILE_TEST
select MFD_SYSCON
select SATA_HOST
help
@@ -217,7 +217,7 @@ config AHCI_MTK
config AHCI_MVEBU
tristate "Marvell EBU AHCI SATA support"
- depends on ARCH_MVEBU
+ depends on ARCH_MVEBU || COMPILE_TEST
select SATA_HOST
help
This option enables support for the Marvebu EBU SoC's
@@ -236,7 +236,7 @@ config AHCI_OCTEON
config AHCI_SUNXI
tristate "Allwinner sunxi AHCI SATA support"
- depends on ARCH_SUNXI
+ depends on ARCH_SUNXI || COMPILE_TEST
select SATA_HOST
help
This option enables support for the Allwinner sunxi SoC's
@@ -246,7 +246,7 @@ config AHCI_SUNXI
config AHCI_TEGRA
tristate "NVIDIA Tegra AHCI SATA support"
- depends on ARCH_TEGRA
+ depends on ARCH_TEGRA || COMPILE_TEST
select SATA_HOST
help
This option enables support for the NVIDIA Tegra SoC's
@@ -256,7 +256,7 @@ config AHCI_TEGRA
config AHCI_XGENE
tristate "APM X-Gene 6.0Gbps AHCI SATA host controller support"
- depends on PHY_XGENE
+ depends on PHY_XGENE || COMPILE_TEST
select SATA_HOST
help
This option enables support for APM X-Gene SoC SATA host controller.
@@ -273,7 +273,7 @@ config AHCI_QORIQ
config SATA_FSL
tristate "Freescale 3.0Gbps SATA support"
- depends on FSL_SOC
+ depends on FSL_SOC || COMPILE_TEST
select SATA_HOST
help
This option enables support for Freescale 3.0Gbps SATA controller.
@@ -294,7 +294,7 @@ config SATA_GEMINI
config SATA_AHCI_SEATTLE
tristate "AMD Seattle 6.0Gbps AHCI SATA host controller support"
- depends on ARCH_SEATTLE
+ depends on ARCH_SEATTLE || COMPILE_TEST
select SATA_HOST
help
This option enables support for AMD Seattle SATA host controller.
@@ -432,18 +432,6 @@ config SATA_DWC_OLD_DMA
This option enables support for old device trees without the
"dmas" property.
-config SATA_DWC_DEBUG
- bool "Debugging driver version"
- depends on SATA_DWC
- help
- This option enables debugging output in the driver.
-
-config SATA_DWC_VDEBUG
- bool "Verbose debug output"
- depends on SATA_DWC_DEBUG
- help
- This option enables the taskfile dumping and NCQ debugging.
-
config SATA_HIGHBANK
tristate "Calxeda Highbank SATA support"
depends on ARCH_HIGHBANK || COMPILE_TEST
@@ -611,7 +599,7 @@ config PATA_ATP867X
config PATA_BK3710
tristate "Palmchip BK3710 PATA support"
- depends on ARCH_DAVINCI
+ depends on ARCH_DAVINCI || COMPILE_TEST
select PATA_TIMINGS
help
This option enables support for the integrated IDE controller on
@@ -649,7 +637,7 @@ config PATA_CS5530
config PATA_CS5535
tristate "CS5535 PATA support (Experimental)"
- depends on PCI && X86_32
+ depends on PCI && (X86_32 || (X86_64 && COMPILE_TEST))
help
This option enables support for the NatSemi/AMD CS5535
companion chip used with the Geode processor family.
@@ -697,7 +685,7 @@ config PATA_EP93XX
config PATA_FTIDE010
tristate "Faraday Technology FTIDE010 PATA support"
depends on OF
- depends on ARM
+ depends on ARM || COMPILE_TEST
depends on SATA_GEMINI
help
This option enables support for the Faraday FTIDE010
@@ -760,7 +748,7 @@ config PATA_ICSIDE
config PATA_IMX
tristate "PATA support for Freescale iMX"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
select PATA_TIMINGS
help
This option enables support for the PATA host available on Freescale
@@ -981,7 +969,7 @@ config PATA_VIA
config PATA_PXA
tristate "PXA DMA-capable PATA support"
- depends on ARCH_PXA
+ depends on ARCH_PXA || COMPILE_TEST
help
This option enables support for harddrive attached to PXA CPU's bus.
@@ -1157,7 +1145,7 @@ config PATA_RZ1000
config PATA_SAMSUNG_CF
tristate "Samsung SoC PATA support"
- depends on SAMSUNG_DEV_IDE
+ depends on SAMSUNG_DEV_IDE || COMPILE_TEST
select PATA_TIMINGS
help
This option enables basic support for Samsung's S3C/S5P board
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index 2a04e8abd397..536d4cb8f08b 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -185,8 +185,6 @@ static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
struct acard_sg *acard_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
unsigned int si, last_si = 0;
- VPRINTK("ENTER\n");
-
/*
* Next, the S/G list.
*/
@@ -362,8 +360,6 @@ static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id
struct ata_host *host;
int n_ports, i, rc;
- VPRINTK("ENTER\n");
-
WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS);
ata_print_version_once(&pdev->dev, DRV_VERSION);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 1e1167e725a4..ab5811ef5a53 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -51,6 +51,7 @@ enum board_ids {
board_ahci,
board_ahci_ign_iferr,
board_ahci_mobile,
+ board_ahci_no_debounce_delay,
board_ahci_nomsi,
board_ahci_noncq,
board_ahci_nosntf,
@@ -141,6 +142,13 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ [board_ahci_no_debounce_delay] = {
+ .flags = AHCI_FLAG_COMMON,
+ .link_flags = ATA_LFLAG_NO_DEBOUNCE_DELAY,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
[board_ahci_nomsi] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_MSI),
.flags = AHCI_FLAG_COMMON,
@@ -437,6 +445,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
board_ahci_al },
/* AMD */
{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
+ { PCI_VDEVICE(AMD, 0x7801), board_ahci_no_debounce_delay }, /* AMD Hudson-2 (AHCI mode) */
{ PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
{ PCI_VDEVICE(AMD, 0x7901), board_ahci_mobile }, /* AMD Green Sardine */
/* AMD is using RAID class only for ahci controllers */
@@ -684,7 +693,7 @@ static void ahci_pci_init_controller(struct ata_host *host)
/* clear port IRQ */
tmp = readl(port_mmio + PORT_IRQ_STAT);
- VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
+ dev_dbg(&pdev->dev, "PORT_IRQ_STAT 0x%x\n", tmp);
if (tmp)
writel(tmp, port_mmio + PORT_IRQ_STAT);
}
@@ -700,8 +709,6 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
bool online;
int rc;
- DPRINTK("ENTER\n");
-
hpriv->stop_engine(ap);
rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
@@ -709,8 +716,6 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
hpriv->start_engine(ap);
- DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
-
/* vt8251 doesn't clear BSY on signature FIS reception,
* request follow-up softreset.
*/
@@ -790,8 +795,6 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
bool online;
int rc, i;
- DPRINTK("ENTER\n");
-
hpriv->stop_engine(ap);
for (i = 0; i < 2; i++) {
@@ -829,7 +832,6 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
if (online)
*class = ahci_dev_classify(ap);
- DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
return rc;
}
@@ -1476,7 +1478,6 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
u32 irq_stat, irq_masked;
unsigned int handled = 1;
- VPRINTK("ENTER\n");
hpriv = host->private_data;
mmio = hpriv->mmio;
irq_stat = readl(mmio + HOST_IRQ_STAT);
@@ -1493,7 +1494,6 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
irq_stat = readl(mmio + HOST_IRQ_STAT);
spin_unlock(&host->lock);
} while (irq_stat);
- VPRINTK("EXIT\n");
return IRQ_RETVAL(handled);
}
@@ -1657,7 +1657,7 @@ static ssize_t remapped_nvme_show(struct device *dev,
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
- return sprintf(buf, "%u\n", hpriv->remapped_nvme);
+ return sysfs_emit(buf, "%u\n", hpriv->remapped_nvme);
}
static DEVICE_ATTR_RO(remapped_nvme);
@@ -1673,8 +1673,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
int n_ports, i, rc;
int ahci_pci_bar = AHCI_PCI_BAR_STANDARD;
- VPRINTK("ENTER\n");
-
WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS);
ata_print_version_once(&pdev->dev, DRV_VERSION);
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 6e9c5ade4c2e..64dd8aa397d5 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -246,7 +246,7 @@ static void brcm_sata_init(struct brcm_ahci_priv *priv)
}
static unsigned int brcm_ahci_read_id(struct ata_device *dev,
- struct ata_taskfile *tf, u16 *id)
+ struct ata_taskfile *tf, __le16 *id)
{
struct ata_port *ap = dev->link->ap;
struct ata_host *host = ap->host;
@@ -333,7 +333,7 @@ static struct ata_port_operations ahci_brcm_platform_ops = {
static const struct ata_port_info ahci_brcm_port_info = {
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
- .link_flags = ATA_LFLAG_NO_DB_DELAY,
+ .link_flags = ATA_LFLAG_NO_DEBOUNCE_DELAY,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_brcm_platform_ops,
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
index e9c7c07fd84c..acf59f51b356 100644
--- a/drivers/ata/ahci_ceva.c
+++ b/drivers/ata/ahci_ceva.c
@@ -92,9 +92,8 @@ struct ceva_ahci_priv {
};
static unsigned int ceva_ahci_read_id(struct ata_device *dev,
- struct ata_taskfile *tf, u16 *id)
+ struct ata_taskfile *tf, __le16 *id)
{
- __le16 *__id = (__le16 *)id;
u32 err_mask;
err_mask = ata_do_dev_read_id(dev, tf, id);
@@ -104,7 +103,7 @@ static unsigned int ceva_ahci_read_id(struct ata_device *dev,
* Since CEVA controller does not support device sleep feature, we
* need to clear DEVSLP (bit 8) in word78 of the IDENTIFY DEVICE data.
*/
- __id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
+ id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
return 0;
}
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 5b46fc9aeb4a..bf5b388bd4e0 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -103,8 +103,6 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
int rc;
bool ls1021a_workaround = (qoriq_priv->type == AHCI_LS1021A);
- DPRINTK("ENTER\n");
-
hpriv->stop_engine(ap);
/*
@@ -146,8 +144,6 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
if (online)
*class = ahci_dev_classify(ap);
-
- DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
return rc;
}
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index dffc432b9d54..8e206379d699 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -193,7 +193,7 @@ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
struct xgene_ahci_context *ctx = hpriv->plat_data;
int rc = 0;
u32 port_fbs;
- void *port_mmio = ahci_port_base(ap);
+ void __iomem *port_mmio = ahci_port_base(ap);
/*
* Write the pmp value to PxFBS.DEV
@@ -237,7 +237,7 @@ static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx)
* does not support DEVSLP.
*/
static unsigned int xgene_ahci_read_id(struct ata_device *dev,
- struct ata_taskfile *tf, u16 *id)
+ struct ata_taskfile *tf, __le16 *id)
{
u32 err_mask;
@@ -454,7 +454,7 @@ static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
int pmp = sata_srst_pmp(link);
struct ata_port *ap = link->ap;
u32 rc;
- void *port_mmio = ahci_port_base(ap);
+ void __iomem *port_mmio = ahci_port_base(ap);
u32 port_fbs;
/*
@@ -499,7 +499,7 @@ static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
struct ata_port *ap = link->ap;
struct ahci_host_priv *hpriv = ap->host->private_data;
struct xgene_ahci_context *ctx = hpriv->plat_data;
- void *port_mmio = ahci_port_base(ap);
+ void __iomem *port_mmio = ahci_port_base(ap);
u32 port_fbs;
u32 port_fbs_save;
u32 retry = 1;
@@ -588,8 +588,6 @@ static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
void __iomem *mmio;
u32 irq_stat, irq_masked;
- VPRINTK("ENTER\n");
-
hpriv = host->private_data;
mmio = hpriv->mmio;
@@ -612,8 +610,6 @@ static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
spin_unlock(&host->lock);
- VPRINTK("EXIT\n");
-
return IRQ_RETVAL(rc);
}
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 0b2fcf0d1d6c..27b0d903f91f 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -77,6 +77,7 @@
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/dmi.h>
+#include <trace/events/libata.h>
#define DRV_NAME "ata_piix"
#define DRV_VERSION "2.13"
@@ -816,10 +817,15 @@ static int piix_sidpr_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
static bool piix_irq_check(struct ata_port *ap)
{
+ unsigned char host_stat;
+
if (unlikely(!ap->ioaddr.bmdma_addr))
return false;
- return ap->ops->bmdma_status(ap) & ATA_DMA_INTR;
+ host_stat = ap->ops->bmdma_status(ap);
+ trace_ata_bmdma_status(ap, host_stat);
+
+ return host_stat & ATA_DMA_INTR;
}
#ifdef CONFIG_PM_SLEEP
@@ -1345,7 +1351,6 @@ static void piix_init_pcs(struct ata_host *host,
new_pcs = pcs | map_db->port_enable;
if (new_pcs != pcs) {
- DPRINTK("updating PCS from 0x%x to 0x%x\n", pcs, new_pcs);
pci_write_config_word(pdev, ICH5_PCS, new_pcs);
msleep(150);
}
@@ -1769,14 +1774,12 @@ static int __init piix_init(void)
{
int rc;
- DPRINTK("pci_register_driver\n");
rc = pci_register_driver(&piix_pci_driver);
if (rc)
return rc;
in_module_init = 0;
- DPRINTK("done\n");
return 0;
}
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index f76b8418e6fb..0ed484e04fd6 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1234,12 +1234,12 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
/* clear SError */
tmp = readl(port_mmio + PORT_SCR_ERR);
- VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
+ dev_dbg(dev, "PORT_SCR_ERR 0x%x\n", tmp);
writel(tmp, port_mmio + PORT_SCR_ERR);
/* clear port IRQ */
tmp = readl(port_mmio + PORT_IRQ_STAT);
- VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
+ dev_dbg(dev, "PORT_IRQ_STAT 0x%x\n", tmp);
if (tmp)
writel(tmp, port_mmio + PORT_IRQ_STAT);
@@ -1270,10 +1270,10 @@ void ahci_init_controller(struct ata_host *host)
}
tmp = readl(mmio + HOST_CTL);
- VPRINTK("HOST_CTL 0x%x\n", tmp);
+ dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp);
writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
tmp = readl(mmio + HOST_CTL);
- VPRINTK("HOST_CTL 0x%x\n", tmp);
+ dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp);
}
EXPORT_SYMBOL_GPL(ahci_init_controller);
@@ -1300,7 +1300,7 @@ unsigned int ahci_dev_classify(struct ata_port *ap)
tf.lbal = (tmp >> 8) & 0xff;
tf.nsect = (tmp) & 0xff;
- return ata_dev_classify(&tf);
+ return ata_port_classify(ap, &tf);
}
EXPORT_SYMBOL_GPL(ahci_dev_classify);
@@ -1415,8 +1415,6 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
bool fbs_disabled = false;
int rc;
- DPRINTK("ENTER\n");
-
/* prepare for SRST (AHCI-1.1 10.4.1) */
rc = ahci_kick_engine(ap);
if (rc && rc != -EOPNOTSUPP)
@@ -1476,7 +1474,6 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
if (fbs_disabled)
ahci_enable_fbs(ap);
- DPRINTK("EXIT, class=%u\n", *class);
return 0;
fail:
@@ -1498,8 +1495,6 @@ static int ahci_softreset(struct ata_link *link, unsigned int *class,
{
int pmp = sata_srst_pmp(link);
- DPRINTK("ENTER\n");
-
return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
}
EXPORT_SYMBOL_GPL(ahci_do_softreset);
@@ -1529,8 +1524,6 @@ static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
int rc;
u32 irq_sts;
- DPRINTK("ENTER\n");
-
rc = ahci_do_softreset(link, class, pmp, deadline,
ahci_bad_pmp_check_ready);
@@ -1564,8 +1557,6 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
struct ata_taskfile tf;
int rc;
- DPRINTK("ENTER\n");
-
hpriv->stop_engine(ap);
/* clear D2H reception area to properly wait for D2H FIS */
@@ -1581,7 +1572,6 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
if (*online)
*class = ahci_dev_classify(ap);
- DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
return rc;
}
EXPORT_SYMBOL_GPL(ahci_do_hardreset);
@@ -1620,8 +1610,6 @@ static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
unsigned int si;
- VPRINTK("ENTER\n");
-
/*
* Next, the S/G list.
*/
@@ -1695,7 +1683,6 @@ static void ahci_fbs_dec_intr(struct ata_port *ap)
u32 fbs = readl(port_mmio + PORT_FBS);
int retries = 3;
- DPRINTK("ENTER\n");
BUG_ON(!pp->fbs_enabled);
/* time to wait for DEC is not specified by AHCI spec,
@@ -1924,8 +1911,6 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
void __iomem *port_mmio = ahci_port_base(ap);
u32 status;
- VPRINTK("ENTER\n");
-
status = readl(port_mmio + PORT_IRQ_STAT);
writel(status, port_mmio + PORT_IRQ_STAT);
@@ -1933,8 +1918,6 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
ahci_handle_port_interrupt(ap, port_mmio, status);
spin_unlock(ap->lock);
- VPRINTK("EXIT\n");
-
return IRQ_HANDLED;
}
@@ -1951,9 +1934,7 @@ u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
ap = host->ports[i];
if (ap) {
ahci_port_intr(ap);
- VPRINTK("port %u\n", i);
} else {
- VPRINTK("port %u (no irq)\n", i);
if (ata_ratelimit())
dev_warn(host->dev,
"interrupt on disabled port %u\n", i);
@@ -1974,8 +1955,6 @@ static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
void __iomem *mmio;
u32 irq_stat, irq_masked;
- VPRINTK("ENTER\n");
-
hpriv = host->private_data;
mmio = hpriv->mmio;
@@ -2003,8 +1982,6 @@ static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
spin_unlock(&host->lock);
- VPRINTK("EXIT\n");
-
return IRQ_RETVAL(rc);
}
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 0910441321f7..18296443ccba 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -579,11 +579,8 @@ int ahci_platform_init_host(struct platform_device *pdev,
int i, irq, n_ports, rc;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- if (irq != -EPROBE_DEFER)
- dev_err(dev, "no irq\n");
+ if (irq < 0)
return irq;
- }
if (!irq)
return -EINVAL;
@@ -642,13 +639,8 @@ int ahci_platform_init_host(struct platform_device *pdev,
if (hpriv->cap & HOST_CAP_64) {
rc = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (rc) {
- rc = dma_coerce_mask_and_coherent(dev,
- DMA_BIT_MASK(32));
- if (rc) {
- dev_err(dev, "Failed to enable 64-bit DMA.\n");
- return rc;
- }
- dev_warn(dev, "Enable 32-bit DMA instead of 64-bit.\n");
+ dev_err(dev, "Failed to enable 64-bit DMA.\n");
+ return rc;
}
}
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 7a7d6642edcc..8cfa8c96bb13 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -402,7 +402,6 @@ EXPORT_SYMBOL_GPL(ata_acpi_stm);
*/
static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
{
- struct ata_port *ap = dev->link->ap;
acpi_status status;
struct acpi_buffer output;
union acpi_object *out_obj;
@@ -418,10 +417,6 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
output.length = ACPI_ALLOCATE_BUFFER;
output.pointer = NULL; /* ACPI-CA sets this; save/free it later */
- if (ata_msg_probe(ap))
- ata_dev_dbg(dev, "%s: ENTER: port#: %d\n",
- __func__, ap->port_no);
-
/* _GTF has no input parameters */
status = acpi_evaluate_object(ata_dev_acpi_handle(dev), "_GTF", NULL,
&output);
@@ -437,11 +432,9 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
}
if (!output.length || !output.pointer) {
- if (ata_msg_probe(ap))
- ata_dev_dbg(dev, "%s: Run _GTF: length or ptr is NULL (0x%llx, 0x%p)\n",
- __func__,
- (unsigned long long)output.length,
- output.pointer);
+ ata_dev_dbg(dev, "Run _GTF: length or ptr is NULL (0x%llx, 0x%p)\n",
+ (unsigned long long)output.length,
+ output.pointer);
rc = -EINVAL;
goto out_free;
}
@@ -464,9 +457,8 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
rc = out_obj->buffer.length / REGS_PER_GTF;
if (gtf) {
*gtf = (void *)out_obj->buffer.pointer;
- if (ata_msg_probe(ap))
- ata_dev_dbg(dev, "%s: returning gtf=%p, gtf_count=%d\n",
- __func__, *gtf, rc);
+ ata_dev_dbg(dev, "returning gtf=%p, gtf_count=%d\n",
+ *gtf, rc);
}
return rc;
@@ -650,9 +642,7 @@ static int ata_acpi_run_tf(struct ata_device *dev,
struct ata_taskfile *pptf = NULL;
struct ata_taskfile tf, ptf, rtf;
unsigned int err_mask;
- const char *level;
const char *descr;
- char msg[60];
int rc;
if ((gtf->tf[0] == 0) && (gtf->tf[1] == 0) && (gtf->tf[2] == 0)
@@ -666,6 +656,8 @@ static int ata_acpi_run_tf(struct ata_device *dev,
pptf = &ptf;
}
+ descr = ata_get_cmd_name(tf.command);
+
if (!ata_acpi_filter_tf(dev, &tf, pptf)) {
rtf = tf;
err_mask = ata_exec_internal(dev, &rtf, NULL,
@@ -673,40 +665,42 @@ static int ata_acpi_run_tf(struct ata_device *dev,
switch (err_mask) {
case 0:
- level = KERN_DEBUG;
- snprintf(msg, sizeof(msg), "succeeded");
+ ata_dev_dbg(dev,
+ "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x"
+ "(%s) succeeded\n",
+ tf.command, tf.feature, tf.nsect, tf.lbal,
+ tf.lbam, tf.lbah, tf.device, descr);
rc = 1;
break;
case AC_ERR_DEV:
- level = KERN_INFO;
- snprintf(msg, sizeof(msg),
- "rejected by device (Stat=0x%02x Err=0x%02x)",
- rtf.command, rtf.feature);
+ ata_dev_info(dev,
+ "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x"
+ "(%s) rejected by device (Stat=0x%02x Err=0x%02x)",
+ tf.command, tf.feature, tf.nsect, tf.lbal,
+ tf.lbam, tf.lbah, tf.device, descr,
+ rtf.command, rtf.feature);
rc = 0;
break;
default:
- level = KERN_ERR;
- snprintf(msg, sizeof(msg),
- "failed (Emask=0x%x Stat=0x%02x Err=0x%02x)",
- err_mask, rtf.command, rtf.feature);
+ ata_dev_err(dev,
+ "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x"
+ "(%s) failed (Emask=0x%x Stat=0x%02x Err=0x%02x)",
+ tf.command, tf.feature, tf.nsect, tf.lbal,
+ tf.lbam, tf.lbah, tf.device, descr,
+ err_mask, rtf.command, rtf.feature);
rc = -EIO;
break;
}
} else {
- level = KERN_INFO;
- snprintf(msg, sizeof(msg), "filtered out");
+ ata_dev_info(dev,
+ "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x"
+ "(%s) filtered out\n",
+ tf.command, tf.feature, tf.nsect, tf.lbal,
+ tf.lbam, tf.lbah, tf.device, descr);
rc = 0;
}
- descr = ata_get_cmd_descript(tf.command);
-
- ata_dev_printk(dev, level,
- "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x (%s) %s\n",
- tf.command, tf.feature, tf.nsect, tf.lbal,
- tf.lbam, tf.lbah, tf.device,
- (descr ? descr : "unknown"), msg);
-
return rc;
}
@@ -776,9 +770,8 @@ static int ata_acpi_push_id(struct ata_device *dev)
struct acpi_object_list input;
union acpi_object in_params[1];
- if (ata_msg_probe(ap))
- ata_dev_dbg(dev, "%s: ix = %d, port#: %d\n",
- __func__, dev->devno, ap->port_no);
+ ata_dev_dbg(dev, "%s: ix = %d, port#: %d\n",
+ __func__, dev->devno, ap->port_no);
/* Give the drive Identify data to the drive via the _SDD method */
/* _SDD: set up input parameters */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index aba0c67d1bd6..67f88027680a 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -764,9 +764,6 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
head = track % dev->heads;
sect = (u32)block % dev->sectors + 1;
- DPRINTK("block %u track %u cyl %u head %u sect %u\n",
- (u32)block, track, cyl, head, sect);
-
/* Check whether the converted CHS can fit.
Cylinder: 0-65535
Head: 0-15
@@ -1010,32 +1007,21 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
* SEMB signature. This is worked around in
* ata_dev_read_id().
*/
- if ((tf->lbam == 0) && (tf->lbah == 0)) {
- DPRINTK("found ATA device by sig\n");
+ if (tf->lbam == 0 && tf->lbah == 0)
return ATA_DEV_ATA;
- }
- if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
- DPRINTK("found ATAPI device by sig\n");
+ if (tf->lbam == 0x14 && tf->lbah == 0xeb)
return ATA_DEV_ATAPI;
- }
- if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
- DPRINTK("found PMP device by sig\n");
+ if (tf->lbam == 0x69 && tf->lbah == 0x96)
return ATA_DEV_PMP;
- }
- if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
- DPRINTK("found SEMB device by sig (could be ATA device)\n");
+ if (tf->lbam == 0x3c && tf->lbah == 0xc3)
return ATA_DEV_SEMB;
- }
- if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
- DPRINTK("found ZAC device by sig\n");
+ if (tf->lbam == 0xcd && tf->lbah == 0xab)
return ATA_DEV_ZAC;
- }
- DPRINTK("unknown device\n");
return ATA_DEV_UNKNOWN;
}
EXPORT_SYMBOL_GPL(ata_dev_classify);
@@ -1355,6 +1341,7 @@ static int ata_hpa_resize(struct ata_device *dev)
/**
* ata_dump_id - IDENTIFY DEVICE info debugging output
+ * @dev: device from which the information is fetched
* @id: IDENTIFY DEVICE page to dump
*
* Dump selected 16-bit words from the given IDENTIFY DEVICE
@@ -1364,32 +1351,14 @@ static int ata_hpa_resize(struct ata_device *dev)
* caller.
*/
-static inline void ata_dump_id(const u16 *id)
-{
- DPRINTK("49==0x%04x "
- "53==0x%04x "
- "63==0x%04x "
- "64==0x%04x "
- "75==0x%04x \n",
- id[49],
- id[53],
- id[63],
- id[64],
- id[75]);
- DPRINTK("80==0x%04x "
- "81==0x%04x "
- "82==0x%04x "
- "83==0x%04x "
- "84==0x%04x \n",
- id[80],
- id[81],
- id[82],
- id[83],
- id[84]);
- DPRINTK("88==0x%04x "
- "93==0x%04x\n",
- id[88],
- id[93]);
+static inline void ata_dump_id(struct ata_device *dev, const u16 *id)
+{
+ ata_dev_dbg(dev,
+ "49==0x%04x 53==0x%04x 63==0x%04x 64==0x%04x 75==0x%04x\n"
+ "80==0x%04x 81==0x%04x 82==0x%04x 83==0x%04x 84==0x%04x\n"
+ "88==0x%04x 93==0x%04x\n",
+ id[49], id[53], id[63], id[64], id[75], id[80],
+ id[81], id[82], id[83], id[84], id[88], id[93]);
}
/**
@@ -1602,9 +1571,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
else
ata_qc_complete(qc);
- if (ata_msg_warn(ap))
- ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
- command);
+ ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
+ command);
}
spin_unlock_irqrestore(ap->lock, flags);
@@ -1754,7 +1722,7 @@ static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
* this function is wrapped or replaced by the driver
*/
unsigned int ata_do_dev_read_id(struct ata_device *dev,
- struct ata_taskfile *tf, u16 *id)
+ struct ata_taskfile *tf, __le16 *id)
{
return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
id, sizeof(id[0]) * ATA_ID_WORDS, 0);
@@ -1794,9 +1762,6 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
int may_fallback = 1, tried_spinup = 0;
int rc;
- if (ata_msg_ctl(ap))
- ata_dev_dbg(dev, "%s: ENTER\n", __func__);
-
retry:
ata_tf_init(dev, &tf);
@@ -1830,9 +1795,9 @@ retry:
tf.flags |= ATA_TFLAG_POLLING;
if (ap->ops->read_id)
- err_mask = ap->ops->read_id(dev, &tf, id);
+ err_mask = ap->ops->read_id(dev, &tf, (__le16 *)id);
else
- err_mask = ata_do_dev_read_id(dev, &tf, id);
+ err_mask = ata_do_dev_read_id(dev, &tf, (__le16 *)id);
if (err_mask) {
if (err_mask & AC_ERR_NODEV_HINT) {
@@ -1879,10 +1844,10 @@ retry:
}
if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
- ata_dev_dbg(dev, "dumping IDENTIFY data, "
+ ata_dev_info(dev, "dumping IDENTIFY data, "
"class=%d may_fallback=%d tried_spinup=%d\n",
class, may_fallback, tried_spinup);
- print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
}
@@ -1966,9 +1931,8 @@ retry:
return 0;
err_out:
- if (ata_msg_warn(ap))
- ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
- reason, err_mask);
+ ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
+ reason, err_mask);
return rc;
}
@@ -1996,7 +1960,7 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
unsigned int err_mask;
bool dma = false;
- DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
+ ata_dev_dbg(dev, "read log page - log 0x%x, page 0x%x\n", log, page);
/*
* Return error without actually issuing the command on controllers
@@ -2390,7 +2354,6 @@ static void ata_dev_config_trusted(struct ata_device *dev)
static int ata_dev_config_lba(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
const u16 *id = dev->id;
const char *lba_desc;
char ncq_desc[24];
@@ -2412,7 +2375,7 @@ static int ata_dev_config_lba(struct ata_device *dev)
ret = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
/* print device info to dmesg */
- if (ata_msg_drv(ap) && ata_dev_print_info(dev))
+ if (ata_dev_print_info(dev))
ata_dev_info(dev,
"%llu sectors, multi %u: %s %s\n",
(unsigned long long)dev->n_sectors,
@@ -2423,7 +2386,6 @@ static int ata_dev_config_lba(struct ata_device *dev)
static void ata_dev_config_chs(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
const u16 *id = dev->id;
if (ata_id_current_chs_valid(id)) {
@@ -2439,7 +2401,7 @@ static void ata_dev_config_chs(struct ata_device *dev)
}
/* print device info to dmesg */
- if (ata_msg_drv(ap) && ata_dev_print_info(dev))
+ if (ata_dev_print_info(dev))
ata_dev_info(dev,
"%llu sectors, multi %u, CHS %u/%u/%u\n",
(unsigned long long)dev->n_sectors,
@@ -2566,14 +2528,11 @@ int ata_dev_configure(struct ata_device *dev)
char modelbuf[ATA_ID_PROD_LEN+1];
int rc;
- if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
- ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
+ if (!ata_dev_enabled(dev)) {
+ ata_dev_dbg(dev, "no device\n");
return 0;
}
- if (ata_msg_probe(ap))
- ata_dev_dbg(dev, "%s: ENTER\n", __func__);
-
/* set horkage */
dev->horkage |= ata_dev_blacklisted(dev);
ata_force_horkage(dev);
@@ -2621,13 +2580,12 @@ int ata_dev_configure(struct ata_device *dev)
return rc;
/* print device capabilities */
- if (ata_msg_probe(ap))
- ata_dev_dbg(dev,
- "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
- "85:%04x 86:%04x 87:%04x 88:%04x\n",
- __func__,
- id[49], id[82], id[83], id[84],
- id[85], id[86], id[87], id[88]);
+ ata_dev_dbg(dev,
+ "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
+ "85:%04x 86:%04x 87:%04x 88:%04x\n",
+ __func__,
+ id[49], id[82], id[83], id[84],
+ id[85], id[86], id[87], id[88]);
/* initialize to-be-configured parameters */
dev->flags &= ~ATA_DFLAG_CFG_MASK;
@@ -2646,8 +2604,7 @@ int ata_dev_configure(struct ata_device *dev)
/* find max transfer mode; for printk only */
xfer_mask = ata_id_xfermask(id);
- if (ata_msg_probe(ap))
- ata_dump_id(id);
+ ata_dump_id(dev, id);
/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
@@ -2685,7 +2642,7 @@ int ata_dev_configure(struct ata_device *dev)
}
/* print device info to dmesg */
- if (ata_msg_drv(ap) && print_info)
+ if (print_info)
ata_dev_info(dev, "%s: %s, %s, max %s\n",
revbuf, modelbuf, fwrevbuf,
ata_mode_string(xfer_mask));
@@ -2705,7 +2662,7 @@ int ata_dev_configure(struct ata_device *dev)
ata_dev_config_cpr(dev);
dev->cdb_len = 32;
- if (ata_msg_drv(ap) && print_info)
+ if (print_info)
ata_dev_print_features(dev);
}
@@ -2718,8 +2675,7 @@ int ata_dev_configure(struct ata_device *dev)
rc = atapi_cdb_len(id);
if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
- if (ata_msg_warn(ap))
- ata_dev_warn(dev, "unsupported CDB len\n");
+ ata_dev_warn(dev, "unsupported CDB len %d\n", rc);
rc = -EINVAL;
goto err_out_nosup;
}
@@ -2763,7 +2719,7 @@ int ata_dev_configure(struct ata_device *dev)
}
/* print device info to dmesg */
- if (ata_msg_drv(ap) && print_info)
+ if (print_info)
ata_dev_info(dev,
"ATAPI: %s, %s, max %s%s%s%s\n",
modelbuf, fwrevbuf,
@@ -2780,7 +2736,7 @@ int ata_dev_configure(struct ata_device *dev)
/* Limit PATA drive on SATA cable bridge transfers to udma5,
200 sectors */
if (ata_dev_knobble(dev)) {
- if (ata_msg_drv(ap) && print_info)
+ if (print_info)
ata_dev_info(dev, "applying bridge limits\n");
dev->udma_mask &= ATA_UDMA5;
dev->max_sectors = ATA_MAX_SECTORS;
@@ -2829,8 +2785,6 @@ int ata_dev_configure(struct ata_device *dev)
return 0;
err_out_nosup:
- if (ata_msg_probe(ap))
- ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
return rc;
}
@@ -3373,8 +3327,8 @@ static int ata_dev_set_mode(struct ata_device *dev)
dev_err_whine = " (device error ignored)";
}
- DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
- dev->xfer_shift, (int)dev->xfer_mode);
+ ata_dev_dbg(dev, "xfer_shift=%u, xfer_mode=0x%x\n",
+ dev->xfer_shift, (int)dev->xfer_mode);
if (!(ehc->i.flags & ATA_EHI_QUIET) ||
ehc->i.flags & ATA_EHI_DID_HARDRESET)
@@ -3688,16 +3642,12 @@ void ata_std_postreset(struct ata_link *link, unsigned int *classes)
{
u32 serror;
- DPRINTK("ENTER\n");
-
/* reset complete, clear SError */
if (!sata_scr_read(link, SCR_ERROR, &serror))
sata_scr_write(link, SCR_ERROR, serror);
/* print link status */
sata_print_link_status(link);
-
- DPRINTK("EXIT\n");
}
EXPORT_SYMBOL_GPL(ata_std_postreset);
@@ -4324,7 +4274,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
unsigned int err_mask;
/* set up set-features taskfile */
- DPRINTK("set features - xfer mode\n");
+ ata_dev_dbg(dev, "set features - xfer mode\n");
/* Some controllers and ATAPI devices show flaky interrupt
* behavior after setting xfer mode. Use polling instead.
@@ -4346,7 +4296,6 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
/* On some disks, this command causes spin-up, so we need longer timeout */
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
- DPRINTK("EXIT, err_mask=%x\n", err_mask);
return err_mask;
}
@@ -4372,7 +4321,7 @@ unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
unsigned long timeout = 0;
/* set up set-features taskfile */
- DPRINTK("set features - SATA features\n");
+ ata_dev_dbg(dev, "set features - SATA features\n");
ata_tf_init(dev, &tf);
tf.command = ATA_CMD_SET_FEATURES;
@@ -4386,7 +4335,6 @@ unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
- DPRINTK("EXIT, err_mask=%x\n", err_mask);
return err_mask;
}
EXPORT_SYMBOL_GPL(ata_dev_set_feature);
@@ -4414,7 +4362,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
return AC_ERR_INVALID;
/* set up init dev params taskfile */
- DPRINTK("init dev params \n");
+ ata_dev_dbg(dev, "init dev params \n");
ata_tf_init(dev, &tf);
tf.command = ATA_CMD_INIT_DEV_PARAMS;
@@ -4430,7 +4378,6 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
err_mask = 0;
- DPRINTK("EXIT, err_mask=%x\n", err_mask);
return err_mask;
}
@@ -4542,8 +4489,6 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
WARN_ON_ONCE(sg == NULL);
- VPRINTK("unmapping %u sg elements\n", qc->n_elem);
-
if (qc->n_elem)
dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
@@ -4569,13 +4514,10 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
unsigned int n_elem;
- VPRINTK("ENTER, ata%u\n", ap->print_id);
-
n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
if (n_elem < 1)
return -1;
- VPRINTK("%d sg elements mapped\n", n_elem);
qc->orig_n_elem = qc->n_elem;
qc->n_elem = n_elem;
qc->flags |= ATA_QCFLAG_DMAMAP;
@@ -4930,6 +4872,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
return;
}
+ trace_ata_qc_prep(qc);
qc->err_mask |= ap->ops->qc_prep(qc);
if (unlikely(qc->err_mask))
goto err;
@@ -5375,8 +5318,6 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
{
struct ata_port *ap;
- DPRINTK("ENTER\n");
-
ap = kzalloc(sizeof(*ap), GFP_KERNEL);
if (!ap)
return NULL;
@@ -5388,15 +5329,6 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
ap->host = host;
ap->dev = host->dev;
-#if defined(ATA_VERBOSE_DEBUG)
- /* turn on all debugging levels */
- ap->msg_enable = 0x00FF;
-#elif defined(ATA_DEBUG)
- ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
-#else
- ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
-#endif
-
mutex_init(&ap->scsi_scan_mutex);
INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
@@ -5493,8 +5425,6 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
int i;
void *dr;
- DPRINTK("ENTER\n");
-
/* alloc a container for our list of ATA ports (buses) */
sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
host = kzalloc(sz, GFP_KERNEL);
@@ -5784,9 +5714,7 @@ int ata_port_probe(struct ata_port *ap)
__ata_port_probe(ap);
ata_port_wait_eh(ap);
} else {
- DPRINTK("ata%u: bus probe begin\n", ap->print_id);
rc = ata_bus_probe(ap);
- DPRINTK("ata%u: bus probe end\n", ap->print_id);
}
return rc;
}
@@ -6553,69 +6481,14 @@ const struct ata_port_info ata_dummy_port_info = {
};
EXPORT_SYMBOL_GPL(ata_dummy_port_info);
-/*
- * Utility print functions
- */
-void ata_port_printk(const struct ata_port *ap, const char *level,
- const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- printk("%sata%u: %pV", level, ap->print_id, &vaf);
-
- va_end(args);
-}
-EXPORT_SYMBOL(ata_port_printk);
-
-void ata_link_printk(const struct ata_link *link, const char *level,
- const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- if (sata_pmp_attached(link->ap) || link->ap->slave_link)
- printk("%sata%u.%02u: %pV",
- level, link->ap->print_id, link->pmp, &vaf);
- else
- printk("%sata%u: %pV",
- level, link->ap->print_id, &vaf);
-
- va_end(args);
-}
-EXPORT_SYMBOL(ata_link_printk);
-
-void ata_dev_printk(const struct ata_device *dev, const char *level,
- const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- printk("%sata%u.%02u: %pV",
- level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
- &vaf);
-
- va_end(args);
-}
-EXPORT_SYMBOL(ata_dev_printk);
-
void ata_print_version(const struct device *dev, const char *version)
{
dev_printk(KERN_DEBUG, dev, "version %s\n", version);
}
EXPORT_SYMBOL(ata_print_version);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_status);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 1d4a6f1e88cd..7951fd946bf9 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -533,8 +533,6 @@ void ata_scsi_error(struct Scsi_Host *host)
unsigned long flags;
LIST_HEAD(eh_work_q);
- DPRINTK("ENTER\n");
-
spin_lock_irqsave(host->host_lock, flags);
list_splice_init(&host->eh_cmd_q, &eh_work_q);
spin_unlock_irqrestore(host->host_lock, flags);
@@ -548,7 +546,6 @@ void ata_scsi_error(struct Scsi_Host *host)
/* finish or retry handled scmd's and clean up */
WARN_ON(!list_empty(&eh_work_q));
- DPRINTK("EXIT\n");
}
/**
@@ -940,7 +937,7 @@ void ata_std_sched_eh(struct ata_port *ap)
ata_eh_set_pending(ap, 1);
scsi_schedule_eh(ap->scsi_host);
- DPRINTK("port EH scheduled\n");
+ trace_ata_std_sched_eh(ap);
}
EXPORT_SYMBOL_GPL(ata_std_sched_eh);
@@ -1070,7 +1067,7 @@ static void __ata_port_freeze(struct ata_port *ap)
ap->pflags |= ATA_PFLAG_FROZEN;
- DPRINTK("ata%u port frozen\n", ap->print_id);
+ trace_ata_port_freeze(ap);
}
/**
@@ -1147,7 +1144,7 @@ void ata_eh_thaw_port(struct ata_port *ap)
spin_unlock_irqrestore(ap->lock, flags);
- DPRINTK("ata%u port thawed\n", ap->print_id);
+ trace_ata_port_thaw(ap);
}
static void ata_eh_scsidone(struct scsi_cmnd *scmd)
@@ -1217,8 +1214,7 @@ void ata_dev_disable(struct ata_device *dev)
if (!ata_dev_enabled(dev))
return;
- if (ata_msg_drv(dev->link->ap))
- ata_dev_warn(dev, "disabled\n");
+ ata_dev_warn(dev, "disable device\n");
ata_acpi_on_disable(dev);
ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
dev->class++;
@@ -1287,6 +1283,8 @@ void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
struct ata_eh_context *ehc = &link->eh_context;
unsigned long flags;
+ trace_ata_eh_about_to_do(link, dev ? dev->devno : 0, action);
+
spin_lock_irqsave(ap->lock, flags);
ata_eh_clear_action(link, dev, ehi, action);
@@ -1317,6 +1315,8 @@ void ata_eh_done(struct ata_link *link, struct ata_device *dev,
{
struct ata_eh_context *ehc = &link->eh_context;
+ trace_ata_eh_done(link, dev ? dev->devno : 0, action);
+
ata_eh_clear_action(link, dev, &ehc->i, action);
}
@@ -1421,8 +1421,6 @@ static void ata_eh_request_sense(struct ata_queued_cmd *qc,
return;
}
- DPRINTK("ATA request sense\n");
-
ata_tf_init(dev, &tf);
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
@@ -1463,8 +1461,6 @@ unsigned int atapi_eh_request_sense(struct ata_device *dev,
struct ata_port *ap = dev->link->ap;
struct ata_taskfile tf;
- DPRINTK("ATAPI request sense\n");
-
memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
/* initialize sense_buf with the error register,
@@ -1928,8 +1924,6 @@ static void ata_eh_link_autopsy(struct ata_link *link)
u32 serror;
int rc;
- DPRINTK("ENTER\n");
-
if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
return;
@@ -2036,7 +2030,6 @@ static void ata_eh_link_autopsy(struct ata_link *link)
ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
}
- DPRINTK("EXIT\n");
}
/**
@@ -2086,16 +2079,15 @@ void ata_eh_autopsy(struct ata_port *ap)
}
/**
- * ata_get_cmd_descript - get description for ATA command
- * @command: ATA command code to get description for
+ * ata_get_cmd_name - get name for ATA command
+ * @command: ATA command code to get name for
*
- * Return a textual description of the given command, or NULL if the
- * command is not known.
+ * Return a textual name of the given command or "unknown"
*
* LOCKING:
* None
*/
-const char *ata_get_cmd_descript(u8 command)
+const char *ata_get_cmd_name(u8 command)
{
#ifdef CONFIG_ATA_VERBOSE_ERROR
static const struct
@@ -2203,9 +2195,9 @@ const char *ata_get_cmd_descript(u8 command)
return cmd_descr[i].text;
#endif
- return NULL;
+ return "unknown";
}
-EXPORT_SYMBOL_GPL(ata_get_cmd_descript);
+EXPORT_SYMBOL_GPL(ata_get_cmd_name);
/**
* ata_eh_link_report - report error handling to user
@@ -2354,12 +2346,9 @@ static void ata_eh_link_report(struct ata_link *link)
}
__scsi_format_command(cdb_buf, sizeof(cdb_buf),
cdb, cdb_len);
- } else {
- const char *descr = ata_get_cmd_descript(cmd->command);
- if (descr)
- ata_dev_err(qc->dev, "failed command: %s\n",
- descr);
- }
+ } else
+ ata_dev_err(qc->dev, "failed command: %s\n",
+ ata_get_cmd_name(cmd->command));
ata_dev_err(qc->dev,
"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
@@ -2596,12 +2585,19 @@ int ata_eh_reset(struct ata_link *link, int classify,
/* mark that this EH session started with reset */
ehc->last_reset = jiffies;
- if (reset == hardreset)
+ if (reset == hardreset) {
ehc->i.flags |= ATA_EHI_DID_HARDRESET;
- else
+ trace_ata_link_hardreset_begin(link, classes, deadline);
+ } else {
ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
+ trace_ata_link_softreset_begin(link, classes, deadline);
+ }
rc = ata_do_reset(link, reset, classes, deadline, true);
+ if (reset == hardreset)
+ trace_ata_link_hardreset_end(link, classes, rc);
+ else
+ trace_ata_link_softreset_end(link, classes, rc);
if (rc && rc != -EAGAIN) {
failed_link = link;
goto fail;
@@ -2615,8 +2611,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
ata_link_info(slave, "hard resetting link\n");
ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
+ trace_ata_slave_hardreset_begin(slave, classes,
+ deadline);
tmp = ata_do_reset(slave, reset, classes, deadline,
false);
+ trace_ata_slave_hardreset_end(slave, classes, tmp);
switch (tmp) {
case -EAGAIN:
rc = -EAGAIN;
@@ -2644,7 +2643,9 @@ int ata_eh_reset(struct ata_link *link, int classify,
}
ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
+ trace_ata_link_softreset_begin(link, classes, deadline);
rc = ata_do_reset(link, reset, classes, deadline, true);
+ trace_ata_link_softreset_end(link, classes, rc);
if (rc) {
failed_link = link;
goto fail;
@@ -2698,8 +2699,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
*/
if (postreset) {
postreset(link, classes);
- if (slave)
+ trace_ata_link_postreset(link, classes, rc);
+ if (slave) {
postreset(slave, classes);
+ trace_ata_slave_postreset(slave, classes, rc);
+ }
}
/*
@@ -2921,8 +2925,6 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
unsigned long flags;
int rc = 0;
- DPRINTK("ENTER\n");
-
/* For PATA drive side cable detection to work, IDENTIFY must
* be done backwards such that PDIAG- is released by the slave
* device before the master device is identified.
@@ -3036,7 +3038,6 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
err:
*r_failed_dev = dev;
- DPRINTK("EXIT rc=%d\n", rc);
return rc;
}
@@ -3551,8 +3552,6 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
int rc, nr_fails;
unsigned long flags, deadline;
- DPRINTK("ENTER\n");
-
/* prep for recovery */
ata_for_each_link(link, ap, EDGE) {
struct ata_eh_context *ehc = &link->eh_context;
@@ -3760,7 +3759,6 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
if (rc && r_failed_link)
*r_failed_link = link;
- DPRINTK("EXIT, rc=%d\n", rc);
return rc;
}
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index ba7be3f38617..e2e9cbd405fa 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -652,8 +652,6 @@ static int sata_pmp_revalidate(struct ata_device *dev, unsigned int new_class)
u32 *gscr = (void *)ap->sector_buf;
int rc;
- DPRINTK("ENTER\n");
-
ata_eh_about_to_do(link, NULL, ATA_EH_REVALIDATE);
if (!ata_dev_enabled(dev)) {
@@ -686,12 +684,10 @@ static int sata_pmp_revalidate(struct ata_device *dev, unsigned int new_class)
ata_eh_done(link, NULL, ATA_EH_REVALIDATE);
- DPRINTK("EXIT, rc=0\n");
return 0;
fail:
ata_dev_err(dev, "PMP revalidation failed (errno=%d)\n", rc);
- DPRINTK("EXIT, rc=%d\n", rc);
return rc;
}
@@ -759,8 +755,6 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
int detach = 0, rc = 0;
int reval_failed = 0;
- DPRINTK("ENTER\n");
-
if (dev->flags & ATA_DFLAG_DETACH) {
detach = 1;
rc = -ENODEV;
@@ -828,7 +822,6 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
/* okay, PMP resurrected */
ehc->i.flags = 0;
- DPRINTK("EXIT, rc=0\n");
return 0;
fail:
@@ -838,7 +831,6 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
else
ata_dev_disable(dev);
- DPRINTK("EXIT, rc=%d\n", rc);
return rc;
}
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index b9c77885b872..071158c0c44c 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -317,7 +317,7 @@ int sata_link_resume(struct ata_link *link, const unsigned long *params,
* immediately after resuming. Delay 200ms before
* debouncing.
*/
- if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
+ if (!(link->flags & ATA_LFLAG_NO_DEBOUNCE_DELAY))
ata_msleep(link->ap, 200);
/* is SControl restored correctly? */
@@ -533,8 +533,6 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
u32 scontrol;
int rc;
- DPRINTK("ENTER\n");
-
if (online)
*online = false;
@@ -610,7 +608,6 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
*online = false;
ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
}
- DPRINTK("EXIT, rc=%d\n", rc);
return rc;
}
EXPORT_SYMBOL_GPL(sata_link_hardreset);
@@ -876,7 +873,7 @@ static ssize_t ata_ncq_prio_enable_show(struct device *device,
ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE;
spin_unlock_irq(ap->lock);
- return rc ? rc : snprintf(buf, 20, "%u\n", ncq_prio_enable);
+ return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_enable);
}
static ssize_t ata_ncq_prio_enable_store(struct device *device,
@@ -972,7 +969,7 @@ ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
- return snprintf(buf, 23, "%d\n", ap->em_message_type);
+ return sysfs_emit(buf, "%d\n", ap->em_message_type);
}
DEVICE_ATTR(em_message_type, S_IRUGO,
ata_scsi_em_message_type_show, NULL);
@@ -1261,8 +1258,6 @@ int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
{
int rc = 0;
- ata_scsi_dump_cdb(ap, cmd);
-
if (likely(ata_dev_enabled(ap->link.device)))
rc = __ata_scsi_queuecmd(cmd, ap->link.device);
else {
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 313e9475507b..ed8be585a98f 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -121,7 +121,7 @@ static ssize_t ata_scsi_park_show(struct device *device,
unlock:
spin_unlock_irq(ap->lock);
- return rc ? rc : snprintf(buf, 20, "%u\n", msecs);
+ return rc ? rc : sysfs_emit(buf, "%u\n", msecs);
}
static ssize_t ata_scsi_park_store(struct device *device,
@@ -668,7 +668,7 @@ static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
/**
* ata_dump_status - user friendly display of error info
- * @id: id of the port in question
+ * @ap: the port in question
* @tf: ptr to filled out taskfile
*
* Decode and dump the ATA error/status registers for the user so
@@ -678,37 +678,32 @@ static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
* LOCKING:
* inherited from caller
*/
-static void ata_dump_status(unsigned id, struct ata_taskfile *tf)
+static void ata_dump_status(struct ata_port *ap, struct ata_taskfile *tf)
{
u8 stat = tf->command, err = tf->feature;
- pr_warn("ata%u: status=0x%02x { ", id, stat);
if (stat & ATA_BUSY) {
- pr_cont("Busy }\n"); /* Data is not valid in this case */
+ ata_port_warn(ap, "status=0x%02x {Busy} ", stat);
} else {
- if (stat & ATA_DRDY) pr_cont("DriveReady ");
- if (stat & ATA_DF) pr_cont("DeviceFault ");
- if (stat & ATA_DSC) pr_cont("SeekComplete ");
- if (stat & ATA_DRQ) pr_cont("DataRequest ");
- if (stat & ATA_CORR) pr_cont("CorrectedError ");
- if (stat & ATA_SENSE) pr_cont("Sense ");
- if (stat & ATA_ERR) pr_cont("Error ");
- pr_cont("}\n");
-
- if (err) {
- pr_warn("ata%u: error=0x%02x { ", id, err);
- if (err & ATA_ABORTED) pr_cont("DriveStatusError ");
- if (err & ATA_ICRC) {
- if (err & ATA_ABORTED)
- pr_cont("BadCRC ");
- else pr_cont("Sector ");
- }
- if (err & ATA_UNC) pr_cont("UncorrectableError ");
- if (err & ATA_IDNF) pr_cont("SectorIdNotFound ");
- if (err & ATA_TRK0NF) pr_cont("TrackZeroNotFound ");
- if (err & ATA_AMNF) pr_cont("AddrMarkNotFound ");
- pr_cont("}\n");
- }
+ ata_port_warn(ap, "status=0x%02x { %s%s%s%s%s%s%s} ", stat,
+ stat & ATA_DRDY ? "DriveReady " : "",
+ stat & ATA_DF ? "DeviceFault " : "",
+ stat & ATA_DSC ? "SeekComplete " : "",
+ stat & ATA_DRQ ? "DataRequest " : "",
+ stat & ATA_CORR ? "CorrectedError " : "",
+ stat & ATA_SENSE ? "Sense " : "",
+ stat & ATA_ERR ? "Error " : "");
+ if (err)
+ ata_port_warn(ap, "error=0x%02x {%s%s%s%s%s%s", err,
+ err & ATA_ABORTED ?
+ "DriveStatusError " : "",
+ err & ATA_ICRC ?
+ (err & ATA_ABORTED ?
+ "BadCRC " : "Sector ") : "",
+ err & ATA_UNC ? "UncorrectableError " : "",
+ err & ATA_IDNF ? "SectorIdNotFound " : "",
+ err & ATA_TRK0NF ? "TrackZeroNotFound " : "",
+ err & ATA_AMNF ? "AddrMarkNotFound " : "");
}
}
@@ -1299,8 +1294,6 @@ static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
u64 lba = 0;
u32 len;
- VPRINTK("six-byte command\n");
-
lba |= ((u64)(cdb[1] & 0x1f)) << 16;
lba |= ((u64)cdb[2]) << 8;
lba |= ((u64)cdb[3]);
@@ -1326,8 +1319,6 @@ static void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
u64 lba = 0;
u32 len = 0;
- VPRINTK("ten-byte command\n");
-
lba |= ((u64)cdb[2]) << 24;
lba |= ((u64)cdb[3]) << 16;
lba |= ((u64)cdb[4]) << 8;
@@ -1355,8 +1346,6 @@ static void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
u64 lba = 0;
u32 len = 0;
- VPRINTK("sixteen-byte command\n");
-
lba |= ((u64)cdb[2]) << 56;
lba |= ((u64)cdb[3]) << 48;
lba |= ((u64)cdb[4]) << 40;
@@ -1469,9 +1458,6 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
head = track % dev->heads;
sect = (u32)block % dev->sectors + 1;
- DPRINTK("block %u track %u cyl %u head %u sect %u\n",
- (u32)block, track, cyl, head, sect);
-
/* Check whether the converted CHS can fit.
Cylinder: 0-65535
Head: 0-15
@@ -1594,7 +1580,6 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
goto invalid_fld;
break;
default:
- DPRINTK("no-byte command\n");
fp = 0;
goto invalid_fld;
}
@@ -1672,7 +1657,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
cmd->result = SAM_STAT_GOOD;
if (need_sense && !ap->ops->error_handler)
- ata_dump_status(ap->print_id, &qc->result_tf);
+ ata_dump_status(ap, &qc->result_tf);
ata_qc_done(qc);
}
@@ -1710,8 +1695,6 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
struct ata_queued_cmd *qc;
int rc;
- VPRINTK("ENTER\n");
-
qc = ata_scsi_qc_new(dev, cmd);
if (!qc)
goto err_mem;
@@ -1742,13 +1725,11 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
/* select device, send command to hardware */
ata_qc_issue(qc);
- VPRINTK("EXIT\n");
return 0;
early_finish:
ata_qc_free(qc);
scsi_done(cmd);
- DPRINTK("EXIT - early finish (good or error)\n");
return 0;
err_did:
@@ -1756,12 +1737,10 @@ err_did:
cmd->result = (DID_ERROR << 16);
scsi_done(cmd);
err_mem:
- DPRINTK("EXIT - internal\n");
return 0;
defer:
ata_qc_free(qc);
- DPRINTK("EXIT - defer\n");
if (rc == ATA_DEFER_LINK)
return SCSI_MLQUEUE_DEVICE_BUSY;
else
@@ -1858,8 +1837,6 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
2
};
- VPRINTK("ENTER\n");
-
/* set scsi removable (RMB) bit per ata bit, or if the
* AHCI port says it's external (Hotplug-capable, eSATA).
*/
@@ -2294,8 +2271,6 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
u8 dpofua, bp = 0xff;
u16 fp;
- VPRINTK("ENTER\n");
-
six_byte = (scsicmd[0] == MODE_SENSE);
ebd = !(scsicmd[1] & 0x8); /* dbd bit inverted == edb */
/*
@@ -2413,8 +2388,6 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
log2_per_phys = ata_id_log2_per_physical_sector(dev->id);
lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys);
- VPRINTK("ENTER\n");
-
if (args->cmd->cmnd[0] == READ_CAPACITY) {
if (last_lba >= 0xffffffffULL)
last_lba = 0xffffffff;
@@ -2481,7 +2454,6 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
*/
static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
{
- VPRINTK("ENTER\n");
rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
return 0;
@@ -2512,8 +2484,6 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
struct scsi_cmnd *cmd = qc->scsicmd;
- DPRINTK("ATAPI request sense\n");
-
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
#ifdef CONFIG_ATA_SFF
@@ -2552,8 +2522,6 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
qc->complete_fn = atapi_sense_complete;
ata_qc_issue(qc);
-
- DPRINTK("EXIT\n");
}
/*
@@ -2581,8 +2549,6 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
struct scsi_cmnd *cmd = qc->scsicmd;
unsigned int err_mask = qc->err_mask;
- VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
-
/* handle completion from new EH */
if (unlikely(qc->ap->ops->error_handler &&
(err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
@@ -2663,7 +2629,6 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
if (scmd->sc_data_direction == DMA_TO_DEVICE) {
qc->tf.flags |= ATA_TFLAG_WRITE;
- DPRINTK("direction: write\n");
}
qc->tf.command = ATA_CMD_PACKET;
@@ -3591,10 +3556,7 @@ static int ata_mselect_caching(struct ata_queued_cmd *qc,
*/
if (len != CACHE_MPAGE_LEN - 2) {
- if (len < CACHE_MPAGE_LEN - 2)
- *fp = len;
- else
- *fp = CACHE_MPAGE_LEN - 2;
+ *fp = min(len, CACHE_MPAGE_LEN - 2);
return -EINVAL;
}
@@ -3647,10 +3609,7 @@ static int ata_mselect_control(struct ata_queued_cmd *qc,
*/
if (len != CONTROL_MPAGE_LEN - 2) {
- if (len < CONTROL_MPAGE_LEN - 2)
- *fp = len;
- else
- *fp = CONTROL_MPAGE_LEN - 2;
+ *fp = min(len, CONTROL_MPAGE_LEN - 2);
return -EINVAL;
}
@@ -3698,8 +3657,6 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
u8 buffer[64];
const u8 *p = buffer;
- VPRINTK("ENTER\n");
-
six_byte = (cdb[0] == MODE_SELECT);
if (six_byte) {
if (scmd->cmd_len < 5) {
@@ -3997,70 +3954,45 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
return NULL;
}
-/**
- * ata_scsi_dump_cdb - dump SCSI command contents to dmesg
- * @ap: ATA port to which the command was being sent
- * @cmd: SCSI command to dump
- *
- * Prints the contents of a SCSI command via printk().
- */
-
-void ata_scsi_dump_cdb(struct ata_port *ap, struct scsi_cmnd *cmd)
-{
-#ifdef ATA_VERBOSE_DEBUG
- struct scsi_device *scsidev = cmd->device;
-
- VPRINTK("CDB (%u:%d,%d,%lld) %9ph\n",
- ap->print_id,
- scsidev->channel, scsidev->id, scsidev->lun,
- cmd->cmnd);
-#endif
-}
-
int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev)
{
u8 scsi_op = scmd->cmnd[0];
ata_xlat_func_t xlat_func;
- int rc = 0;
+
+ if (unlikely(!scmd->cmd_len))
+ goto bad_cdb_len;
if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
- if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len))
+ if (unlikely(scmd->cmd_len > dev->cdb_len))
goto bad_cdb_len;
xlat_func = ata_get_xlat_func(dev, scsi_op);
- } else {
- if (unlikely(!scmd->cmd_len))
- goto bad_cdb_len;
+ } else if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
+ /* relay SCSI command to ATAPI device */
+ int len = COMMAND_SIZE(scsi_op);
- xlat_func = NULL;
- if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
- /* relay SCSI command to ATAPI device */
- int len = COMMAND_SIZE(scsi_op);
- if (unlikely(len > scmd->cmd_len ||
- len > dev->cdb_len ||
- scmd->cmd_len > ATAPI_CDB_LEN))
- goto bad_cdb_len;
+ if (unlikely(len > scmd->cmd_len ||
+ len > dev->cdb_len ||
+ scmd->cmd_len > ATAPI_CDB_LEN))
+ goto bad_cdb_len;
- xlat_func = atapi_xlat;
- } else {
- /* ATA_16 passthru, treat as an ATA command */
- if (unlikely(scmd->cmd_len > 16))
- goto bad_cdb_len;
+ xlat_func = atapi_xlat;
+ } else {
+ /* ATA_16 passthru, treat as an ATA command */
+ if (unlikely(scmd->cmd_len > 16))
+ goto bad_cdb_len;
- xlat_func = ata_get_xlat_func(dev, scsi_op);
- }
+ xlat_func = ata_get_xlat_func(dev, scsi_op);
}
if (xlat_func)
- rc = ata_scsi_translate(dev, scmd, xlat_func);
- else
- ata_scsi_simulate(dev, scmd);
+ return ata_scsi_translate(dev, scmd, xlat_func);
- return rc;
+ ata_scsi_simulate(dev, scmd);
+
+ return 0;
bad_cdb_len:
- DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n",
- scmd->cmd_len, scsi_op, dev->cdb_len);
scmd->result = DID_ERROR << 16;
scsi_done(scmd);
return 0;
@@ -4097,8 +4029,6 @@ int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
spin_lock_irqsave(ap->lock, irq_flags);
- ata_scsi_dump_cdb(ap, cmd);
-
dev = ata_scsi_find_dev(ap, scsidev);
if (likely(dev))
rc = __ata_scsi_queuecmd(cmd, dev);
@@ -4531,12 +4461,9 @@ void ata_scsi_hotplug(struct work_struct *work)
container_of(work, struct ata_port, hotplug_task.work);
int i;
- if (ap->pflags & ATA_PFLAG_UNLOADING) {
- DPRINTK("ENTER/EXIT - unloading\n");
+ if (ap->pflags & ATA_PFLAG_UNLOADING)
return;
- }
- DPRINTK("ENTER\n");
mutex_lock(&ap->scsi_scan_mutex);
/* Unplug detached devices. We cannot use link iterator here
@@ -4552,7 +4479,6 @@ void ata_scsi_hotplug(struct work_struct *work)
ata_scsi_scan_host(ap, 0);
mutex_unlock(&ap->scsi_scan_mutex);
- DPRINTK("EXIT\n");
}
/**
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index b71ea4a680b0..75217828dfe3 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -18,7 +18,7 @@
#include <linux/module.h>
#include <linux/libata.h>
#include <linux/highmem.h>
-
+#include <trace/events/libata.h>
#include "libata.h"
static struct workqueue_struct *ata_sff_wq;
@@ -330,10 +330,6 @@ EXPORT_SYMBOL_GPL(ata_sff_dev_select);
static void ata_dev_select(struct ata_port *ap, unsigned int device,
unsigned int wait, unsigned int can_sleep)
{
- if (ata_msg_probe(ap))
- ata_port_info(ap, "ata_dev_select: ENTER, device %u, wait %u\n",
- device, wait);
-
if (wait)
ata_wait_idle(ap);
@@ -409,12 +405,6 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
- VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
- tf->hob_feature,
- tf->hob_nsect,
- tf->hob_lbal,
- tf->hob_lbam,
- tf->hob_lbah);
}
if (is_addr) {
@@ -423,18 +413,10 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
iowrite8(tf->lbal, ioaddr->lbal_addr);
iowrite8(tf->lbam, ioaddr->lbam_addr);
iowrite8(tf->lbah, ioaddr->lbah_addr);
- VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
- tf->feature,
- tf->nsect,
- tf->lbal,
- tf->lbam,
- tf->lbah);
}
- if (tf->flags & ATA_TFLAG_DEVICE) {
+ if (tf->flags & ATA_TFLAG_DEVICE)
iowrite8(tf->device, ioaddr->device_addr);
- VPRINTK("device 0x%X\n", tf->device);
- }
ata_wait_idle(ap);
}
@@ -494,8 +476,6 @@ EXPORT_SYMBOL_GPL(ata_sff_tf_read);
*/
void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
{
- DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
-
iowrite8(tf->command, ap->ioaddr.command_addr);
ata_sff_pause(ap);
}
@@ -505,6 +485,7 @@ EXPORT_SYMBOL_GPL(ata_sff_exec_command);
* ata_tf_to_host - issue ATA taskfile to host controller
* @ap: port to which command is being issued
* @tf: ATA taskfile register set
+ * @tag: tag of the associated command
*
* Issues ATA taskfile register set to ATA host controller,
* with proper synchronization with interrupt handler and
@@ -514,9 +495,12 @@ EXPORT_SYMBOL_GPL(ata_sff_exec_command);
* spin_lock_irqsave(host lock)
*/
static inline void ata_tf_to_host(struct ata_port *ap,
- const struct ata_taskfile *tf)
+ const struct ata_taskfile *tf,
+ unsigned int tag)
{
+ trace_ata_tf_load(ap, tf);
ap->ops->sff_tf_load(ap, tf);
+ trace_ata_exec_command(ap, tf, tag);
ap->ops->sff_exec_command(ap, tf);
}
@@ -680,7 +664,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
page = nth_page(page, (offset >> PAGE_SHIFT));
offset %= PAGE_SIZE;
- DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
+ trace_ata_sff_pio_transfer_data(qc, offset, qc->sect_size);
/*
* Split the transfer when it splits a page boundary. Note that the
@@ -750,7 +734,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc)
static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
{
/* send SCSI cdb */
- DPRINTK("send cdb\n");
+ trace_atapi_send_cdb(qc, 0, qc->dev->cdb_len);
WARN_ON_ONCE(qc->dev->cdb_len < 12);
ap->ops->sff_data_xfer(qc, qc->cdb, qc->dev->cdb_len, 1);
@@ -768,6 +752,7 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
case ATAPI_PROT_DMA:
ap->hsm_task_state = HSM_ST_LAST;
/* initiate bmdma */
+ trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
ap->ops->bmdma_start(qc);
break;
#endif /* CONFIG_ATA_BMDMA */
@@ -820,7 +805,7 @@ next_sg:
/* don't cross page boundaries */
count = min(count, (unsigned int)PAGE_SIZE - offset);
- DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
+ trace_atapi_pio_transfer_data(qc, offset, count);
/* do the actual data transfer */
buf = kmap_atomic(page);
@@ -888,8 +873,6 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
if (unlikely(!bytes))
goto atapi_check;
- VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
-
if (unlikely(__atapi_pio_bytes(qc, bytes)))
goto err_out;
ata_sff_sync(ap); /* flush */
@@ -1002,8 +985,7 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
fsm_start:
- DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
- ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
+ trace_ata_sff_hsm_state(qc, status);
switch (ap->hsm_task_state) {
case HSM_ST_FIRST:
@@ -1204,8 +1186,7 @@ fsm_start:
}
/* no more data to transfer */
- DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
- ap->print_id, qc->dev->devno, status);
+ trace_ata_sff_hsm_command_complete(qc, status);
WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
@@ -1262,7 +1243,7 @@ EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
void ata_sff_flush_pio_task(struct ata_port *ap)
{
- DPRINTK("ENTER\n");
+ trace_ata_sff_flush_pio_task(ap);
cancel_delayed_work_sync(&ap->sff_pio_task);
@@ -1279,9 +1260,6 @@ void ata_sff_flush_pio_task(struct ata_port *ap)
spin_unlock_irq(ap->lock);
ap->sff_pio_task_link = NULL;
-
- if (ata_msg_ctl(ap))
- ata_port_dbg(ap, "%s: EXIT\n", __func__);
}
static void ata_sff_pio_task(struct work_struct *work)
@@ -1376,7 +1354,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
if (qc->tf.flags & ATA_TFLAG_POLLING)
ata_qc_set_polling(qc);
- ata_tf_to_host(ap, &qc->tf);
+ ata_tf_to_host(ap, &qc->tf, qc->tag);
ap->hsm_task_state = HSM_ST_LAST;
if (qc->tf.flags & ATA_TFLAG_POLLING)
@@ -1388,7 +1366,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
if (qc->tf.flags & ATA_TFLAG_POLLING)
ata_qc_set_polling(qc);
- ata_tf_to_host(ap, &qc->tf);
+ ata_tf_to_host(ap, &qc->tf, qc->tag);
if (qc->tf.flags & ATA_TFLAG_WRITE) {
/* PIO data out protocol */
@@ -1418,7 +1396,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
if (qc->tf.flags & ATA_TFLAG_POLLING)
ata_qc_set_polling(qc);
- ata_tf_to_host(ap, &qc->tf);
+ ata_tf_to_host(ap, &qc->tf, qc->tag);
ap->hsm_task_state = HSM_ST_FIRST;
@@ -1478,8 +1456,7 @@ static unsigned int __ata_sff_port_intr(struct ata_port *ap,
{
u8 status;
- VPRINTK("ata%u: protocol %d task_state %d\n",
- ap->print_id, qc->tf.protocol, ap->hsm_task_state);
+ trace_ata_sff_port_intr(qc, hsmv_on_idle);
/* Check whether we are expecting interrupt in this state */
switch (ap->hsm_task_state) {
@@ -1853,7 +1830,7 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
return ATA_DEV_NONE;
/* determine if device is ATA or ATAPI */
- class = ata_dev_classify(&tf);
+ class = ata_port_classify(ap, &tf);
if (class == ATA_DEV_UNKNOWN) {
/* If the device failed diagnostic, it's likely to
@@ -1956,8 +1933,6 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
{
struct ata_ioports *ioaddr = &ap->ioaddr;
- DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
-
if (ap->ioaddr.ctl_addr) {
/* software reset. causes dev0 to be selected */
iowrite8(ap->ctl, ioaddr->ctl_addr);
@@ -1995,8 +1970,6 @@ int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
int rc;
u8 err;
- DPRINTK("ENTER\n");
-
/* determine if device 0/1 are present */
if (ata_devchk(ap, 0))
devmask |= (1 << 0);
@@ -2007,7 +1980,6 @@ int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
ap->ops->sff_dev_select(ap, 0);
/* issue bus reset */
- DPRINTK("about to softreset, devmask=%x\n", devmask);
rc = ata_bus_softreset(ap, devmask, deadline);
/* if link is occupied, -ENODEV too is an error */
if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
@@ -2022,7 +1994,6 @@ int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
classes[1] = ata_sff_dev_classify(&link->device[1],
devmask & (1 << 1), &err);
- DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
return 0;
}
EXPORT_SYMBOL_GPL(ata_sff_softreset);
@@ -2055,7 +2026,6 @@ int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
if (online)
*class = ata_sff_dev_classify(link->device, 1, NULL);
- DPRINTK("EXIT, class=%u\n", *class);
return rc;
}
EXPORT_SYMBOL_GPL(sata_sff_hardreset);
@@ -2085,10 +2055,8 @@ void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
ap->ops->sff_dev_select(ap, 0);
/* bail out if no device is present */
- if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
- DPRINTK("EXIT, no device\n");
+ if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE)
return;
- }
/* set up device control */
if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) {
@@ -2123,7 +2091,6 @@ void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
&& count < 65536; count += 2)
ioread16(ap->ioaddr.data_addr);
- /* Can become DEBUG later */
if (count)
ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
@@ -2467,8 +2434,6 @@ static int ata_pci_init_one(struct pci_dev *pdev,
struct ata_host *host = NULL;
int rc;
- DPRINTK("ENTER\n");
-
pi = ata_sff_find_valid_pi(ppi);
if (!pi) {
dev_err(&pdev->dev, "no valid port_info specified\n");
@@ -2614,7 +2579,6 @@ static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
prd[pi].addr = cpu_to_le32(addr);
prd[pi].flags_len = cpu_to_le32(len & 0xffff);
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
pi++;
sg_len -= len;
@@ -2674,7 +2638,6 @@ static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
prd[++pi].addr = cpu_to_le32(addr + 0x8000);
}
prd[pi].flags_len = cpu_to_le32(blen);
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
pi++;
sg_len -= len;
@@ -2756,8 +2719,11 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
case ATA_PROT_DMA:
WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
+ trace_ata_tf_load(ap, &qc->tf);
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
+ trace_ata_bmdma_setup(ap, &qc->tf, qc->tag);
ap->ops->bmdma_setup(qc); /* set up bmdma */
+ trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
ap->ops->bmdma_start(qc); /* initiate bmdma */
ap->hsm_task_state = HSM_ST_LAST;
break;
@@ -2765,7 +2731,9 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
case ATAPI_PROT_DMA:
WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
+ trace_ata_tf_load(ap, &qc->tf);
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
+ trace_ata_bmdma_setup(ap, &qc->tf, qc->tag);
ap->ops->bmdma_setup(qc); /* set up bmdma */
ap->hsm_task_state = HSM_ST_FIRST;
@@ -2806,13 +2774,14 @@ unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
/* check status of DMA engine */
host_stat = ap->ops->bmdma_status(ap);
- VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat);
+ trace_ata_bmdma_status(ap, host_stat);
/* if it's not our irq... */
if (!(host_stat & ATA_DMA_INTR))
return ata_sff_idle_irq(ap);
/* before we do anything else, clear DMA-Start bit */
+ trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
ap->ops->bmdma_stop(qc);
bmdma_stopped = true;
@@ -2881,6 +2850,7 @@ void ata_bmdma_error_handler(struct ata_port *ap)
u8 host_stat;
host_stat = ap->ops->bmdma_status(ap);
+ trace_ata_bmdma_status(ap, host_stat);
/* BMDMA controllers indicate host bus error by
* setting DMA_ERR bit and timing out. As it wasn't
@@ -2892,6 +2862,7 @@ void ata_bmdma_error_handler(struct ata_port *ap)
thaw = true;
}
+ trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
ap->ops->bmdma_stop(qc);
/* if we're gonna thaw, make sure IRQ is clear */
@@ -2925,6 +2896,7 @@ void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
if (ata_is_dma(qc->tf.protocol)) {
spin_lock_irqsave(ap->lock, flags);
+ trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
ap->ops->bmdma_stop(qc);
spin_unlock_irqrestore(ap->lock, flags);
}
diff --git a/drivers/ata/libata-trace.c b/drivers/ata/libata-trace.c
index 08e001303a82..e0e4d0d5a100 100644
--- a/drivers/ata/libata-trace.c
+++ b/drivers/ata/libata-trace.c
@@ -39,6 +39,24 @@ libata_trace_parse_status(struct trace_seq *p, unsigned char status)
}
const char *
+libata_trace_parse_host_stat(struct trace_seq *p, unsigned char host_stat)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "{ ");
+ if (host_stat & ATA_DMA_INTR)
+ trace_seq_printf(p, "INTR ");
+ if (host_stat & ATA_DMA_ERR)
+ trace_seq_printf(p, "ERR ");
+ if (host_stat & ATA_DMA_ACTIVE)
+ trace_seq_printf(p, "ACTIVE ");
+ trace_seq_putc(p, '}');
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+const char *
libata_trace_parse_eh_action(struct trace_seq *p, unsigned int eh_action)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -138,6 +156,35 @@ libata_trace_parse_qc_flags(struct trace_seq *p, unsigned int qc_flags)
}
const char *
+libata_trace_parse_tf_flags(struct trace_seq *p, unsigned int tf_flags)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "%x", tf_flags);
+ if (tf_flags) {
+ trace_seq_printf(p, "{ ");
+ if (tf_flags & ATA_TFLAG_LBA48)
+ trace_seq_printf(p, "LBA48 ");
+ if (tf_flags & ATA_TFLAG_ISADDR)
+ trace_seq_printf(p, "ISADDR ");
+ if (tf_flags & ATA_TFLAG_DEVICE)
+ trace_seq_printf(p, "DEV ");
+ if (tf_flags & ATA_TFLAG_WRITE)
+ trace_seq_printf(p, "WRITE ");
+ if (tf_flags & ATA_TFLAG_LBA)
+ trace_seq_printf(p, "LBA ");
+ if (tf_flags & ATA_TFLAG_FUA)
+ trace_seq_printf(p, "FUA ");
+ if (tf_flags & ATA_TFLAG_POLLING)
+ trace_seq_printf(p, "POLL ");
+ trace_seq_putc(p, '}');
+ }
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+const char *
libata_trace_parse_subcmd(struct trace_seq *p, unsigned char cmd,
unsigned char feature, unsigned char hob_nsect)
{
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 34bb4608bdc6..ca129854a88c 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -163,7 +163,7 @@ static struct {
{ AC_ERR_INVALID, "InvalidArg" },
{ AC_ERR_OTHER, "Unknown" },
{ AC_ERR_NODEV_HINT, "NoDeviceHint" },
- { AC_ERR_NCQ, "NCQError" }
+ { AC_ERR_NCQ, "NCQError" }
};
ata_bitfield_name_match(err, ata_err_names)
@@ -321,13 +321,43 @@ int ata_tport_add(struct device *parent,
return error;
}
+/**
+ * ata_port_classify - determine device type based on ATA-spec signature
+ * @ap: ATA port device on which the classification should be run
+ * @tf: ATA taskfile register set for device to be identified
+ *
+ * A wrapper around ata_dev_classify() to provide additional logging
+ *
+ * RETURNS:
+ * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
+ * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
+ */
+unsigned int ata_port_classify(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ int i;
+ unsigned int class = ata_dev_classify(tf);
+
+ /* Start with index '1' to skip the 'unknown' entry */
+ for (i = 1; i < ARRAY_SIZE(ata_class_names); i++) {
+ if (ata_class_names[i].value == class) {
+ ata_port_dbg(ap, "found %s device by sig\n",
+ ata_class_names[i].name);
+ return class;
+ }
+ }
+
+ ata_port_info(ap, "found unknown device (class %u)\n", class);
+ return class;
+}
+EXPORT_SYMBOL_GPL(ata_port_classify);
/*
* ATA link attributes
*/
static int noop(int x) { return x; }
-#define ata_link_show_linkspeed(field, format) \
+#define ata_link_show_linkspeed(field, format) \
static ssize_t \
show_ata_link_##field(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -416,7 +446,7 @@ int ata_tlink_add(struct ata_link *link)
dev->release = ata_tlink_release;
if (ata_is_host_link(link))
dev_set_name(dev, "link%d", ap->print_id);
- else
+ else
dev_set_name(dev, "link%d.%d", ap->print_id, link->pmp);
transport_setup_device(dev);
@@ -472,7 +502,7 @@ ata_dev_attr(xfer, dma_mode);
ata_dev_attr(xfer, xfer_mode);
-#define ata_dev_show_simple(field, format_string, cast) \
+#define ata_dev_show_simple(field, format_string, cast) \
static ssize_t \
show_ata_dev_##field(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -482,9 +512,9 @@ show_ata_dev_##field(struct device *dev, \
return scnprintf(buf, 20, format_string, cast ata_dev->field); \
}
-#define ata_dev_simple_attr(field, format_string, type) \
+#define ata_dev_simple_attr(field, format_string, type) \
ata_dev_show_simple(field, format_string, (type)) \
-static DEVICE_ATTR(field, S_IRUGO, \
+ static DEVICE_ATTR(field, S_IRUGO, \
show_ata_dev_##field, NULL)
ata_dev_simple_attr(spdn_cnt, "%d\n", int);
@@ -502,7 +532,7 @@ static int ata_show_ering(struct ata_ering_entry *ent, void *void_arg)
seconds = div_u64_rem(ent->timestamp, HZ, &rem);
arg->written += sprintf(arg->buf + arg->written,
- "[%5llu.%09lu]", seconds,
+ "[%5llu.%09lu]", seconds,
rem * NSEC_PER_SEC / HZ);
arg->written += get_ata_err_names(ent->err_mask,
arg->buf + arg->written);
@@ -667,7 +697,7 @@ static int ata_tdev_add(struct ata_device *ata_dev)
dev->release = ata_tdev_release;
if (ata_is_host_link(link))
dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno);
- else
+ else
dev_set_name(dev, "dev%d.%d.0", ap->print_id, link->pmp);
transport_setup_device(dev);
@@ -689,7 +719,7 @@ static int ata_tdev_add(struct ata_device *ata_dev)
*/
#define SETUP_TEMPLATE(attrb, field, perm, test) \
- i->private_##attrb[count] = dev_attr_##field; \
+ i->private_##attrb[count] = dev_attr_##field; \
i->private_##attrb[count].attr.mode = perm; \
i->attrb[count] = &i->private_##attrb[count]; \
if (test) \
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 68cdd81d747c..51e01acdd241 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -148,7 +148,6 @@ extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
unsigned int id, u64 lun);
void ata_scsi_sdev_config(struct scsi_device *sdev);
int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev);
-void ata_scsi_dump_cdb(struct ata_port *ap, struct scsi_cmnd *cmd);
int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev);
/* libata-eh.c */
@@ -166,7 +165,7 @@ extern void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
extern void ata_eh_done(struct ata_link *link, struct ata_device *dev,
unsigned int action);
extern void ata_eh_autopsy(struct ata_port *ap);
-const char *ata_get_cmd_descript(u8 command);
+const char *ata_get_cmd_name(u8 command);
extern void ata_eh_report(struct ata_port *ap);
extern int ata_eh_reset(struct ata_link *link, int classify,
ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
@@ -179,7 +178,7 @@ extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
extern void ata_eh_finish(struct ata_port *ap);
extern int ata_ering_map(struct ata_ering *ering,
int (*map_fn)(struct ata_ering_entry *, void *),
- void *arg);
+ void *arg);
extern unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key);
extern unsigned int atapi_eh_request_sense(struct ata_device *dev,
u8 *sense_buf, u8 dfl_sense_key);
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index b7ff63ed3bbb..1b90cda27246 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -37,7 +37,7 @@
#define DRV_NAME "pata_ali"
#define DRV_VERSION "0.7.8"
-static int ali_atapi_dma = 0;
+static int ali_atapi_dma;
module_param_named(atapi_dma, ali_atapi_dma, int, 0644);
MODULE_PARM_DESC(atapi_dma, "Enable ATAPI DMA (0=disable, 1=enable)");
@@ -123,7 +123,7 @@ static unsigned long ali_20_filter(struct ata_device *adev, unsigned long mask)
mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
if (strstr(model_num, "WDC"))
- return mask &= ~ATA_MASK_UDMA;
+ mask &= ~ATA_MASK_UDMA;
return mask;
}
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 63f39440a9b4..24c3d5e1fca3 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -39,6 +39,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/workqueue.h>
+#include <trace/events/libata.h>
#define DRIVER_NAME "arasan_cf"
#define TIMEOUT msecs_to_jiffies(3000)
@@ -703,9 +704,11 @@ static unsigned int arasan_cf_qc_issue(struct ata_queued_cmd *qc)
case ATA_PROT_DMA:
WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
+ trace_ata_tf_load(ap, &qc->tf);
ap->ops->sff_tf_load(ap, &qc->tf);
acdev->dma_status = 0;
acdev->qc = qc;
+ trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
arasan_cf_dma_start(acdev);
ap->hsm_task_state = HSM_ST_LAST;
break;
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index 2bc5fc81efe3..779d660415c8 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -155,7 +155,7 @@ static int atp867x_get_active_clocks_shifted(struct ata_port *ap,
case 1 ... 6:
break;
default:
- printk(KERN_WARNING "ATP867X: active %dclk is invalid. "
+ ata_port_warn(ap, "ATP867X: active %dclk is invalid. "
"Using 12clk.\n", clk);
fallthrough;
case 9 ... 12:
@@ -171,7 +171,8 @@ active_clock_shift_done:
return clocks << ATP867X_IO_PIOSPD_ACTIVE_SHIFT;
}
-static int atp867x_get_recover_clocks_shifted(unsigned int clk)
+static int atp867x_get_recover_clocks_shifted(struct ata_port *ap,
+ unsigned int clk)
{
unsigned char clocks = clk;
@@ -188,7 +189,7 @@ static int atp867x_get_recover_clocks_shifted(unsigned int clk)
case 15:
break;
default:
- printk(KERN_WARNING "ATP867X: recover %dclk is invalid. "
+ ata_port_warn(ap, "ATP867X: recover %dclk is invalid. "
"Using default 12clk.\n", clk);
fallthrough;
case 12: /* default 12 clk */
@@ -225,7 +226,7 @@ static void atp867x_set_piomode(struct ata_port *ap, struct ata_device *adev)
iowrite8(b, dp->dma_mode);
b = atp867x_get_active_clocks_shifted(ap, t.active) |
- atp867x_get_recover_clocks_shifted(t.recover);
+ atp867x_get_recover_clocks_shifted(ap, t.recover);
if (adev->devno & 1)
iowrite8(b, dp->slave_piospd);
@@ -233,7 +234,7 @@ static void atp867x_set_piomode(struct ata_port *ap, struct ata_device *adev)
iowrite8(b, dp->mstr_piospd);
b = atp867x_get_active_clocks_shifted(ap, t.act8b) |
- atp867x_get_recover_clocks_shifted(t.rec8b);
+ atp867x_get_recover_clocks_shifted(ap, t.rec8b);
iowrite8(b, dp->eightb_piospd);
}
@@ -270,7 +271,6 @@ static struct ata_port_operations atp867x_ops = {
};
-#ifdef ATP867X_DEBUG
static void atp867x_check_res(struct pci_dev *pdev)
{
int i;
@@ -280,7 +280,7 @@ static void atp867x_check_res(struct pci_dev *pdev)
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
start = pci_resource_start(pdev, i);
len = pci_resource_len(pdev, i);
- printk(KERN_DEBUG "ATP867X: resource start:len=%lx:%lx\n",
+ dev_dbg(&pdev->dev, "ATP867X: resource start:len=%lx:%lx\n",
start, len);
}
}
@@ -290,49 +290,48 @@ static void atp867x_check_ports(struct ata_port *ap, int port)
struct ata_ioports *ioaddr = &ap->ioaddr;
struct atp867x_priv *dp = ap->private_data;
- printk(KERN_DEBUG "ATP867X: port[%d] addresses\n"
- " cmd_addr =0x%llx, 0x%llx\n"
- " ctl_addr =0x%llx, 0x%llx\n"
- " bmdma_addr =0x%llx, 0x%llx\n"
- " data_addr =0x%llx\n"
- " error_addr =0x%llx\n"
- " feature_addr =0x%llx\n"
- " nsect_addr =0x%llx\n"
- " lbal_addr =0x%llx\n"
- " lbam_addr =0x%llx\n"
- " lbah_addr =0x%llx\n"
- " device_addr =0x%llx\n"
- " status_addr =0x%llx\n"
- " command_addr =0x%llx\n"
- " dp->dma_mode =0x%llx\n"
- " dp->mstr_piospd =0x%llx\n"
- " dp->slave_piospd =0x%llx\n"
- " dp->eightb_piospd =0x%llx\n"
+ ata_port_dbg(ap, "ATP867X: port[%d] addresses\n"
+ " cmd_addr =0x%lx, 0x%lx\n"
+ " ctl_addr =0x%lx, 0x%lx\n"
+ " bmdma_addr =0x%lx, 0x%lx\n"
+ " data_addr =0x%lx\n"
+ " error_addr =0x%lx\n"
+ " feature_addr =0x%lx\n"
+ " nsect_addr =0x%lx\n"
+ " lbal_addr =0x%lx\n"
+ " lbam_addr =0x%lx\n"
+ " lbah_addr =0x%lx\n"
+ " device_addr =0x%lx\n"
+ " status_addr =0x%lx\n"
+ " command_addr =0x%lx\n"
+ " dp->dma_mode =0x%lx\n"
+ " dp->mstr_piospd =0x%lx\n"
+ " dp->slave_piospd =0x%lx\n"
+ " dp->eightb_piospd =0x%lx\n"
" dp->pci66mhz =0x%lx\n",
port,
- (unsigned long long)ioaddr->cmd_addr,
- (unsigned long long)ATP867X_IO_PORTBASE(ap, port),
- (unsigned long long)ioaddr->ctl_addr,
- (unsigned long long)ATP867X_IO_ALTSTATUS(ap, port),
- (unsigned long long)ioaddr->bmdma_addr,
- (unsigned long long)ATP867X_IO_DMABASE(ap, port),
- (unsigned long long)ioaddr->data_addr,
- (unsigned long long)ioaddr->error_addr,
- (unsigned long long)ioaddr->feature_addr,
- (unsigned long long)ioaddr->nsect_addr,
- (unsigned long long)ioaddr->lbal_addr,
- (unsigned long long)ioaddr->lbam_addr,
- (unsigned long long)ioaddr->lbah_addr,
- (unsigned long long)ioaddr->device_addr,
- (unsigned long long)ioaddr->status_addr,
- (unsigned long long)ioaddr->command_addr,
- (unsigned long long)dp->dma_mode,
- (unsigned long long)dp->mstr_piospd,
- (unsigned long long)dp->slave_piospd,
- (unsigned long long)dp->eightb_piospd,
+ (unsigned long)ioaddr->cmd_addr,
+ (unsigned long)ATP867X_IO_PORTBASE(ap, port),
+ (unsigned long)ioaddr->ctl_addr,
+ (unsigned long)ATP867X_IO_ALTSTATUS(ap, port),
+ (unsigned long)ioaddr->bmdma_addr,
+ (unsigned long)ATP867X_IO_DMABASE(ap, port),
+ (unsigned long)ioaddr->data_addr,
+ (unsigned long)ioaddr->error_addr,
+ (unsigned long)ioaddr->feature_addr,
+ (unsigned long)ioaddr->nsect_addr,
+ (unsigned long)ioaddr->lbal_addr,
+ (unsigned long)ioaddr->lbam_addr,
+ (unsigned long)ioaddr->lbah_addr,
+ (unsigned long)ioaddr->device_addr,
+ (unsigned long)ioaddr->status_addr,
+ (unsigned long)ioaddr->command_addr,
+ (unsigned long)dp->dma_mode,
+ (unsigned long)dp->mstr_piospd,
+ (unsigned long)dp->slave_piospd,
+ (unsigned long)dp->eightb_piospd,
(unsigned long)dp->pci66mhz);
}
-#endif
static int atp867x_set_priv(struct ata_port *ap)
{
@@ -370,8 +369,7 @@ static void atp867x_fixup(struct ata_host *host)
if (v < 0x80) {
v = 0x80;
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, v);
- printk(KERN_DEBUG "ATP867X: set latency timer of device %s"
- " to %d\n", pci_name(pdev), v);
+ dev_dbg(&pdev->dev, "ATP867X: set latency timer to %d\n", v);
}
/*
@@ -419,13 +417,11 @@ static int atp867x_ata_pci_sff_init_host(struct ata_host *host)
return rc;
host->iomap = pcim_iomap_table(pdev);
-#ifdef ATP867X_DEBUG
atp867x_check_res(pdev);
for (i = 0; i < PCI_STD_NUM_BARS; i++)
- printk(KERN_DEBUG "ATP867X: iomap[%d]=0x%llx\n", i,
- (unsigned long long)(host->iomap[i]));
-#endif
+ dev_dbg(gdev, "ATP867X: iomap[%d]=0x%p\n", i,
+ host->iomap[i]);
/*
* request, iomap BARs and init port addresses accordingly
@@ -444,9 +440,8 @@ static int atp867x_ata_pci_sff_init_host(struct ata_host *host)
if (rc)
return rc;
-#ifdef ATP867X_DEBUG
atp867x_check_ports(ap, i);
-#endif
+
ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
(unsigned long)ioaddr->cmd_addr,
(unsigned long)ioaddr->ctl_addr);
@@ -486,7 +481,7 @@ static int atp867x_init_one(struct pci_dev *pdev,
if (rc)
return rc;
- printk(KERN_INFO "ATP867X: ATP867 ATA UDMA133 controller (rev %02X)",
+ dev_info(&pdev->dev, "ATP867X: ATP867 ATA UDMA133 controller (rev %02X)",
pdev->device);
host = ata_host_alloc_pinfo(&pdev->dev, ppi, ATP867X_NUM_PORTS);
diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
index d0bcabb58b44..1a3372a72213 100644
--- a/drivers/ata/pata_cmd640.c
+++ b/drivers/ata/pata_cmd640.c
@@ -61,7 +61,7 @@ static void cmd640_set_piomode(struct ata_port *ap, struct ata_device *adev)
struct ata_device *pair = ata_dev_pair(adev);
if (ata_timing_compute(adev, adev->pio_mode, &t, T, 0) < 0) {
- printk(KERN_ERR DRV_NAME ": mode computation failed.\n");
+ ata_dev_err(adev, DRV_NAME ": mode computation failed.\n");
return;
}
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 1d74d89b5bed..5baa4a7819c1 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -116,7 +116,7 @@ static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 m
/* ata_timing_compute is smart and will produce timings for MWDMA
that don't violate the drives PIO capabilities. */
if (ata_timing_compute(adev, mode, &t, T, 0) < 0) {
- printk(KERN_ERR DRV_NAME ": mode computation failed.\n");
+ ata_dev_err(adev, DRV_NAME ": mode computation failed.\n");
return;
}
if (ap->port_no) {
@@ -130,7 +130,7 @@ static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 m
}
}
- printk(KERN_DEBUG DRV_NAME ": active %d recovery %d setup %d.\n",
+ ata_dev_dbg(adev, DRV_NAME ": active %d recovery %d setup %d.\n",
t.active, t.recover, t.setup);
if (t.recover > 16) {
t.active += t.recover - 16;
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 247c14702624..24ce8665b1f9 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -153,12 +153,12 @@ static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* Perform set up for DMA */
if (pci_enable_device_io(pdev)) {
- printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n");
+ dev_err(&pdev->dev, "unable to configure BAR2.\n");
return -ENODEV;
}
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
- printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
+ dev_err(&pdev->dev, "unable to configure DMA mask.\n");
return -ENODEV;
}
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 760ac6e65216..ab47aeb5587f 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -263,12 +263,12 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
ppi[1] = &ata_dummy_port_info;
if (use_msr)
- printk(KERN_ERR DRV_NAME ": Using MSR regs instead of PCI\n");
+ dev_err(&dev->dev, DRV_NAME ": Using MSR regs instead of PCI\n");
cs5536_read(dev, CFG, &cfg);
if ((cfg & IDE_CFG_CHANEN) == 0) {
- printk(KERN_ERR DRV_NAME ": disabled by BIOS\n");
+ dev_err(&dev->dev, DRV_NAME ": disabled by BIOS\n");
return -ENODEV;
}
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index 5b3a7a8ebef6..3be5d52a777b 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -62,7 +62,7 @@ static void cy82c693_set_piomode(struct ata_port *ap, struct ata_device *adev)
u32 addr;
if (ata_timing_compute(adev, adev->pio_mode, &t, T, 1) < 0) {
- printk(KERN_ERR DRV_NAME ": mome computation failed.\n");
+ ata_dev_err(adev, DRV_NAME ": mome computation failed.\n");
return;
}
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index 46208ececbb6..b78f71c70f27 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -855,7 +855,6 @@ static void ep93xx_pata_drain_fifo(struct ata_queued_cmd *qc)
&& count < 65536; count += 2)
ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_DATA);
- /* Can become DEBUG later */
if (count)
ata_port_dbg(ap, "drained %d bytes to clear DRQ.\n", count);
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 06b7c4a9ec95..778c893f276b 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -14,9 +14,6 @@
* TODO
* Look into engine reset on timeout errors. Should not be required.
*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -183,7 +180,7 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
i = match_string(list, -1, model_num);
if (i >= 0) {
- pr_warn("%s is not supported for %s\n", modestr, list[i]);
+ ata_dev_warn(dev, "%s is not supported for %s\n", modestr, list[i]);
return 1;
}
return 0;
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index f242157bc81b..7abc7e04f656 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -14,9 +14,6 @@
* TODO
* Look into engine reset on timeout errors. Should not be required.
*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -231,7 +228,8 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
i = match_string(list, -1, model_num);
if (i >= 0) {
- pr_warn("%s is not supported for %s\n", modestr, list[i]);
+ ata_dev_warn(dev, "%s is not supported for %s\n",
+ modestr, list[i]);
return 1;
}
return 0;
@@ -864,7 +862,8 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
chip_table = &hpt372;
break;
default:
- pr_err("Unknown HPT366 subtype, please report (%d)\n",
+ dev_err(&dev->dev,
+ "Unknown HPT366 subtype, please report (%d)\n",
rev);
return -ENODEV;
}
@@ -905,7 +904,8 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
*ppi = &info_hpt374_fn1;
break;
default:
- pr_err("PCI table is bogus, please report (%d)\n", dev->device);
+ dev_err(&dev->dev, "PCI table is bogus, please report (%d)\n",
+ dev->device);
return -ENODEV;
}
/* Ok so this is a chip we support */
@@ -953,7 +953,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
u8 sr;
u32 total = 0;
- pr_warn("BIOS has not set timing clocks\n");
+ dev_warn(&dev->dev, "BIOS has not set timing clocks\n");
/* This is the process the HPT371 BIOS is reported to use */
for (i = 0; i < 128; i++) {
@@ -1009,7 +1009,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
(f_high << 16) | f_low | 0x100);
}
if (adjust == 8) {
- pr_err("DPLL did not stabilize!\n");
+ dev_err(&dev->dev, "DPLL did not stabilize!\n");
return -ENODEV;
}
if (dpll == 3)
@@ -1017,7 +1017,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
else
private_data = (void *)hpt37x_timings_50;
- pr_info("bus clock %dMHz, using %dMHz DPLL\n",
+ dev_info(&dev->dev, "bus clock %dMHz, using %dMHz DPLL\n",
MHz[clock_slot], MHz[dpll]);
} else {
private_data = (void *)chip_table->clocks[clock_slot];
@@ -1032,7 +1032,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
if (clock_slot < 2 && ppi[0] == &info_hpt370a)
ppi[0] = &info_hpt370a_33;
- pr_info("%s using %dMHz bus clock\n",
+ dev_info(&dev->dev, "%s using %dMHz bus clock\n",
chip_table->name, MHz[clock_slot]);
}
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 48eef338e050..1d9d4eec5b8a 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -15,9 +15,6 @@
* TODO
* Work out best PLL policy
*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -420,7 +417,7 @@ static int hpt3x2n_pci_clock(struct pci_dev *pdev)
u16 sr;
u32 total = 0;
- pr_warn("BIOS clock data not set\n");
+ dev_warn(&pdev->dev, "BIOS clock data not set\n");
/* This is the process the HPT371 BIOS is reported to use */
for (i = 0; i < 128; i++) {
@@ -530,7 +527,8 @@ hpt372n:
ppi[0] = &info_hpt372n;
break;
default:
- pr_err("PCI table is bogus, please report (%d)\n", dev->device);
+ dev_err(&dev->dev,"PCI table is bogus, please report (%d)\n",
+ dev->device);
return -ENODEV;
}
@@ -579,11 +577,11 @@ hpt372n:
pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
}
if (adjust == 8) {
- pr_err("DPLL did not stabilize!\n");
+ dev_err(&dev->dev, "DPLL did not stabilize!\n");
return -ENODEV;
}
- pr_info("bus clock %dMHz, using 66MHz DPLL\n", pci_mhz);
+ dev_info(&dev->dev, "bus clock %dMHz, using 66MHz DPLL\n", pci_mhz);
/*
* Set our private data up. We only need a few flags
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 0e2265978a34..8a5b4e0079ab 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -431,7 +431,8 @@ static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc)
case ATA_CMD_SET_FEATURES:
return ata_bmdma_qc_issue(qc);
}
- printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command);
+ ata_dev_dbg(qc->dev, "it821x: can't process command 0x%02X\n",
+ qc->tf.command);
return AC_ERR_DEV;
}
@@ -507,12 +508,14 @@ static void it821x_dev_config(struct ata_device *adev)
if (strstr(model_num, "Integrated Technology Express")) {
/* RAID mode */
- ata_dev_info(adev, "%sRAID%d volume",
- adev->id[147] ? "Bootable " : "",
- adev->id[129]);
- if (adev->id[129] != 1)
- pr_cont("(%dK stripe)", adev->id[146]);
- pr_cont("\n");
+ if (adev->id[129] == 1)
+ ata_dev_info(adev, "%sRAID%d volume\n",
+ adev->id[147] ? "Bootable " : "",
+ adev->id[129]);
+ else
+ ata_dev_info(adev, "%sRAID%d volume (%dK stripe)\n",
+ adev->id[147] ? "Bootable " : "",
+ adev->id[129], adev->id[146]);
}
/* This is a controller firmware triggered funny, don't
report the drive faulty! */
@@ -534,7 +537,7 @@ static void it821x_dev_config(struct ata_device *adev)
*/
static unsigned int it821x_read_id(struct ata_device *adev,
- struct ata_taskfile *tf, u16 *id)
+ struct ata_taskfile *tf, __le16 *id)
{
unsigned int err_mask;
unsigned char model_num[ATA_ID_PROD_LEN + 1];
@@ -542,21 +545,20 @@ static unsigned int it821x_read_id(struct ata_device *adev,
err_mask = ata_do_dev_read_id(adev, tf, id);
if (err_mask)
return err_mask;
- ata_id_c_string(id, model_num, ATA_ID_PROD, sizeof(model_num));
+ ata_id_c_string((u16 *)id, model_num, ATA_ID_PROD, sizeof(model_num));
- id[83] &= ~(1 << 12); /* Cache flush is firmware handled */
- id[83] &= ~(1 << 13); /* Ditto for LBA48 flushes */
- id[84] &= ~(1 << 6); /* No FUA */
- id[85] &= ~(1 << 10); /* No HPA */
- id[76] = 0; /* No NCQ/AN etc */
+ id[83] &= cpu_to_le16(~(1 << 12)); /* Cache flush is firmware handled */
+ id[84] &= cpu_to_le16(~(1 << 6)); /* No FUA */
+ id[85] &= cpu_to_le16(~(1 << 10)); /* No HPA */
+ id[76] = 0; /* No NCQ/AN etc */
if (strstr(model_num, "Integrated Technology Express")) {
/* Set feature bits the firmware neglects */
- id[49] |= 0x0300; /* LBA, DMA */
- id[83] &= 0x7FFF;
- id[83] |= 0x4400; /* Word 83 is valid and LBA48 */
- id[86] |= 0x0400; /* LBA48 on */
- id[ATA_ID_MAJOR_VER] |= 0x1F;
+ id[49] |= cpu_to_le16(0x0300); /* LBA, DMA */
+ id[83] &= cpu_to_le16(0x7FFF);
+ id[83] |= cpu_to_le16(0x4400); /* Word 83 is valid and LBA48 */
+ id[86] |= cpu_to_le16(0x0400); /* LBA48 on */
+ id[ATA_ID_MAJOR_VER] |= cpu_to_le16(0x1F);
/* Clear the serial number because it's different each boot
which breaks validation on resume */
memset(&id[ATA_ID_SERNO], 0x20, ATA_ID_SERNO_LEN);
@@ -593,6 +595,7 @@ static int it821x_check_atapi_dma(struct ata_queued_cmd *qc)
/**
* it821x_display_disk - display disk setup
+ * @ap: ATA port
* @n: Device number
* @buf: Buffer block from firmware
*
@@ -600,7 +603,7 @@ static int it821x_check_atapi_dma(struct ata_queued_cmd *qc)
* by the firmware.
*/
-static void it821x_display_disk(int n, u8 *buf)
+static void it821x_display_disk(struct ata_port *ap, int n, u8 *buf)
{
unsigned char id[41];
int mode = 0;
@@ -633,13 +636,13 @@ static void it821x_display_disk(int n, u8 *buf)
else
strcpy(mbuf, "PIO");
if (buf[52] == 4)
- printk(KERN_INFO "%d: %-6s %-8s %s %s\n",
+ ata_port_info(ap, "%d: %-6s %-8s %s %s\n",
n, mbuf, types[buf[52]], id, cbl);
else
- printk(KERN_INFO "%d: %-6s %-8s Volume: %1d %s %s\n",
+ ata_port_info(ap, "%d: %-6s %-8s Volume: %1d %s %s\n",
n, mbuf, types[buf[52]], buf[53], id, cbl);
if (buf[125] < 100)
- printk(KERN_INFO "%d: Rebuilding: %d%%\n", n, buf[125]);
+ ata_port_info(ap, "%d: Rebuilding: %d%%\n", n, buf[125]);
}
/**
@@ -676,7 +679,7 @@ static u8 *it821x_firmware_command(struct ata_port *ap, u8 cmd, int len)
status = ioread8(ap->ioaddr.status_addr);
if (status & ATA_ERR) {
kfree(buf);
- printk(KERN_ERR "it821x_firmware_command: rejected\n");
+ ata_port_err(ap, "%s: rejected\n", __func__);
return NULL;
}
if (status & ATA_DRQ) {
@@ -686,7 +689,7 @@ static u8 *it821x_firmware_command(struct ata_port *ap, u8 cmd, int len)
usleep_range(500, 1000);
}
kfree(buf);
- printk(KERN_ERR "it821x_firmware_command: timeout\n");
+ ata_port_err(ap, "%s: timeout\n", __func__);
return NULL;
}
@@ -709,13 +712,13 @@ static void it821x_probe_firmware(struct ata_port *ap)
buf = it821x_firmware_command(ap, 0xFA, 512);
if (buf != NULL) {
- printk(KERN_INFO "pata_it821x: Firmware %02X/%02X/%02X%02X\n",
+ ata_port_info(ap, "pata_it821x: Firmware %02X/%02X/%02X%02X\n",
buf[505],
buf[506],
buf[507],
buf[508]);
for (i = 0; i < 4; i++)
- it821x_display_disk(i, buf + 128 * i);
+ it821x_display_disk(ap, i, buf + 128 * i);
kfree(buf);
}
}
@@ -771,7 +774,8 @@ static int it821x_port_start(struct ata_port *ap)
itdev->timing10 = 1;
/* Need to disable ATAPI DMA for this case */
if (!itdev->smart)
- printk(KERN_WARNING DRV_NAME": Revision 0x10, workarounds activated.\n");
+ dev_warn(&pdev->dev,
+ "Revision 0x10, workarounds activated.\n");
}
return 0;
@@ -919,14 +923,14 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
} else {
/* Force the card into bypass mode if so requested */
if (it8212_noraid) {
- printk(KERN_INFO DRV_NAME ": forcing bypass mode.\n");
+ dev_info(&pdev->dev, "forcing bypass mode.\n");
it821x_disable_raid(pdev);
}
pci_read_config_byte(pdev, 0x50, &conf);
conf &= 1;
- printk(KERN_INFO DRV_NAME": controller in %s mode.\n",
- mode[conf]);
+ dev_info(&pdev->dev, "controller in %s mode.\n", mode[conf]);
+
if (conf == 0)
ppi[0] = &info_passthru;
else
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 99c63087c8ae..17b557c91e1c 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -114,7 +114,7 @@ static void ixp4xx_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct ixp4xx_pata *ixpp = ap->host->private_data;
- ata_dev_printk(adev, KERN_INFO, "configured for PIO%d 8bit\n",
+ ata_dev_info(adev, "configured for PIO%d 8bit\n",
adev->pio_mode - XFER_PIO_0);
ixp4xx_set_8bit_timing(ixpp, adev->pio_mode);
}
@@ -132,8 +132,8 @@ static unsigned int ixp4xx_mmio_data_xfer(struct ata_queued_cmd *qc,
struct ixp4xx_pata *ixpp = ap->host->private_data;
unsigned long flags;
- ata_dev_printk(adev, KERN_DEBUG, "%s %d bytes\n", (rw == READ) ? "READ" : "WRITE",
- buflen);
+ ata_dev_dbg(adev, "%s %d bytes\n", (rw == READ) ? "READ" : "WRITE",
+ buflen);
spin_lock_irqsave(ap->lock, flags);
/* set the expansion bus in 16bit mode and restore
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 361597d14c56..0c5a51970fbf 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -32,7 +32,6 @@
static int marvell_pata_active(struct pci_dev *pdev)
{
- int i;
u32 devices;
void __iomem *barp;
@@ -44,11 +43,6 @@ static int marvell_pata_active(struct pci_dev *pdev)
if (barp == NULL)
return -ENOMEM;
- printk("BAR5:");
- for(i = 0; i <= 0x0F; i++)
- printk("%02X:%02X ", i, ioread8(barp + i));
- printk("\n");
-
devices = ioread32(barp + 0x0C);
pci_iounmap(pdev, barp);
@@ -149,7 +143,8 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i
#if IS_ENABLED(CONFIG_SATA_AHCI)
if (!marvell_pata_active(pdev)) {
- printk(KERN_INFO DRV_NAME ": PATA port not active, deferring to AHCI driver.\n");
+ dev_info(&pdev->dev,
+ "PATA port not active, deferring to AHCI driver.\n");
return -ENODEV;
}
#endif
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index a7ecc1a204b5..06929e77c491 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -21,12 +21,13 @@
/* No PIO or DMA methods needed for this device */
static unsigned int netcell_read_id(struct ata_device *adev,
- struct ata_taskfile *tf, u16 *id)
+ struct ata_taskfile *tf, __le16 *id)
{
unsigned int err_mask = ata_do_dev_read_id(adev, tf, id);
+
/* Firmware forgets to mark words 85-87 valid */
if (err_mask == 0)
- id[ATA_ID_CSF_DEFAULT] |= 0x4000;
+ id[ATA_ID_CSF_DEFAULT] |= cpu_to_le16(0x4000);
return err_mask;
}
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index b5a3f710d76d..05c2ab375756 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -19,7 +19,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <scsi/scsi_host.h>
-
+#include <trace/events/libata.h>
#include <asm/byteorder.h>
#include <asm/octeon/octeon.h>
@@ -73,16 +73,12 @@ MODULE_PARM_DESC(enable_dma,
*/
static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs)
{
- unsigned int val;
-
/*
* Compute # of eclock periods to get desired duration in
* nanoseconds.
*/
- val = DIV_ROUND_UP(nsecs * (octeon_get_io_clock_rate() / 1000000),
+ return DIV_ROUND_UP(nsecs * (octeon_get_io_clock_rate() / 1000000),
1000 * tim_mult);
-
- return val;
}
static void octeon_cf_set_boot_reg_cfg(int cs, unsigned int multiplier)
@@ -273,9 +269,9 @@ static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
dma_tim.s.we_n = ns_to_tim_reg(tim_mult, oe_n);
dma_tim.s.we_a = ns_to_tim_reg(tim_mult, oe_a);
- pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60,
+ ata_dev_dbg(dev, "ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60,
ns_to_tim_reg(tim_mult, 60));
- pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: %d, dmarq: %d, pause: %d\n",
+ ata_dev_dbg(dev, "oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: %d, dmarq: %d, pause: %d\n",
dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s,
dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause);
@@ -440,7 +436,6 @@ static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes,
int rc;
u8 err;
- DPRINTK("about to softreset\n");
__raw_writew(ap->ctl, base + 0xe);
udelay(20);
__raw_writew(ap->ctl | ATA_SRST, base + 0xe);
@@ -455,7 +450,6 @@ static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes,
/* determine by signature whether we have ATA or ATAPI devices */
classes[0] = ata_sff_dev_classify(&link->device[0], 1, &err);
- DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
return 0;
}
@@ -479,23 +473,11 @@ static void octeon_cf_tf_load16(struct ata_port *ap,
__raw_writew(tf->hob_feature << 8, base + 0xc);
__raw_writew(tf->hob_nsect | tf->hob_lbal << 8, base + 2);
__raw_writew(tf->hob_lbam | tf->hob_lbah << 8, base + 4);
- VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
- tf->hob_feature,
- tf->hob_nsect,
- tf->hob_lbal,
- tf->hob_lbam,
- tf->hob_lbah);
}
if (is_addr) {
__raw_writew(tf->feature << 8, base + 0xc);
__raw_writew(tf->nsect | tf->lbal << 8, base + 2);
__raw_writew(tf->lbam | tf->lbah << 8, base + 4);
- VPRINTK("feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
- tf->feature,
- tf->nsect,
- tf->lbal,
- tf->lbam,
- tf->lbah);
}
ata_wait_idle(ap);
}
@@ -516,20 +498,14 @@ static void octeon_cf_exec_command16(struct ata_port *ap,
{
/* The base of the registers is at ioaddr.data_addr. */
void __iomem *base = ap->ioaddr.data_addr;
- u16 blob;
+ u16 blob = 0;
- if (tf->flags & ATA_TFLAG_DEVICE) {
- VPRINTK("device 0x%X\n", tf->device);
+ if (tf->flags & ATA_TFLAG_DEVICE)
blob = tf->device;
- } else {
- blob = 0;
- }
- DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
blob |= (tf->command << 8);
__raw_writew(blob, base + 6);
-
ata_wait_idle(ap);
}
@@ -543,12 +519,10 @@ static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
struct octeon_cf_port *cf_port;
cf_port = ap->private_data;
- DPRINTK("ENTER\n");
/* issue r/w command */
qc->cursg = qc->sg;
cf_port->dma_finished = 0;
ap->ops->sff_exec_command(ap, &qc->tf);
- DPRINTK("EXIT\n");
}
/**
@@ -563,8 +537,6 @@ static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
union cvmx_mio_boot_dma_intx mio_boot_dma_int;
struct scatterlist *sg;
- VPRINTK("%d scatterlists\n", qc->n_elem);
-
/* Get the scatter list entry we need to DMA into */
sg = qc->cursg;
BUG_ON(!sg);
@@ -605,10 +577,6 @@ static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
mio_boot_dma_cfg.s.adr = sg_dma_address(sg);
- VPRINTK("%s %d bytes address=%p\n",
- (mio_boot_dma_cfg.s.rw) ? "write" : "read", sg->length,
- (void *)(unsigned long)mio_boot_dma_cfg.s.adr);
-
cvmx_write_csr(cf_port->dma_base + DMA_CFG, mio_boot_dma_cfg.u64);
}
@@ -627,9 +595,7 @@ static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
union cvmx_mio_boot_dma_intx dma_int;
u8 status;
- VPRINTK("ata%u: protocol %d task_state %d\n",
- ap->print_id, qc->tf.protocol, ap->hsm_task_state);
-
+ trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
if (ap->hsm_task_state != HSM_ST_LAST)
return 0;
@@ -678,7 +644,6 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
spin_lock_irqsave(&host->lock, flags);
- DPRINTK("ENTER\n");
for (i = 0; i < host->n_ports; i++) {
u8 status;
struct ata_port *ap;
@@ -701,6 +666,7 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
if (!sg_is_last(qc->cursg)) {
qc->cursg = sg_next(qc->cursg);
handled = 1;
+ trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
octeon_cf_dma_start(qc);
continue;
} else {
@@ -732,7 +698,6 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
}
}
spin_unlock_irqrestore(&host->lock, flags);
- DPRINTK("EXIT\n");
return IRQ_RETVAL(handled);
}
@@ -800,8 +765,11 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
case ATA_PROT_DMA:
WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
+ trace_ata_tf_load(ap, &qc->tf);
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
+ trace_ata_bmdma_setup(ap, &qc->tf, qc->tag);
octeon_cf_dma_setup(qc); /* set up dma */
+ trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
octeon_cf_dma_start(qc); /* initiate dma */
ap->hsm_task_state = HSM_ST_LAST;
break;
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 35aa158fc976..c3a40b717dcd 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -25,11 +25,12 @@ static int pata_of_platform_probe(struct platform_device *ofdev)
struct device_node *dn = ofdev->dev.of_node;
struct resource io_res;
struct resource ctl_res;
- struct resource *irq_res;
+ struct resource irq_res;
unsigned int reg_shift = 0;
int pio_mode = 0;
int pio_mask;
bool use16bit;
+ int irq;
ret = of_address_to_resource(dn, 0, &io_res);
if (ret) {
@@ -45,7 +46,15 @@ static int pata_of_platform_probe(struct platform_device *ofdev)
return -EINVAL;
}
- irq_res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
+ memset(&irq_res, 0, sizeof(irq_res));
+
+ irq = platform_get_irq_optional(ofdev, 0);
+ if (irq < 0 && irq != -ENXIO)
+ return irq;
+ if (irq > 0) {
+ irq_res.start = irq;
+ irq_res.end = irq;
+ }
of_property_read_u32(dn, "reg-shift", &reg_shift);
@@ -63,7 +72,7 @@ static int pata_of_platform_probe(struct platform_device *ofdev)
pio_mask = 1 << pio_mode;
pio_mask |= (1 << pio_mode) - 1;
- return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, irq_res,
+ return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, irq > 0 ? &irq_res : NULL,
reg_shift, pio_mask, &pata_platform_sht,
use16bit);
}
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index effc1a09444d..4fbb3eed8b0b 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -30,13 +30,6 @@
#define DRV_NAME "pata_pdc2027x"
#define DRV_VERSION "1.0"
-#undef PDC_DEBUG
-
-#ifdef PDC_DEBUG
-#define PDPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-#else
-#define PDPRINTK(fmt, args...)
-#endif
enum {
PDC_MMIO_BAR = 5,
@@ -214,11 +207,11 @@ static int pdc2027x_cable_detect(struct ata_port *ap)
if (cgcr & (1 << 26))
goto cbl40;
- PDPRINTK("No cable or 80-conductor cable on port %d\n", ap->port_no);
+ ata_port_dbg(ap, "No cable or 80-conductor cable\n");
return ATA_CBL_PATA80;
cbl40:
- printk(KERN_INFO DRV_NAME ": 40-conductor cable detected on port %d\n", ap->port_no);
+ ata_port_info(ap, DRV_NAME ":40-conductor cable detected\n");
return ATA_CBL_PATA40;
}
@@ -292,17 +285,17 @@ static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
unsigned int pio = adev->pio_mode - XFER_PIO_0;
u32 ctcr0, ctcr1;
- PDPRINTK("adev->pio_mode[%X]\n", adev->pio_mode);
+ ata_port_dbg(ap, "adev->pio_mode[%X]\n", adev->pio_mode);
/* Sanity check */
if (pio > 4) {
- printk(KERN_ERR DRV_NAME ": Unknown pio mode [%d] ignored\n", pio);
+ ata_port_err(ap, "Unknown pio mode [%d] ignored\n", pio);
return;
}
/* Set the PIO timing registers using value table for 133MHz */
- PDPRINTK("Set pio regs... \n");
+ ata_port_dbg(ap, "Set pio regs... \n");
ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
ctcr0 &= 0xffff0000;
@@ -315,9 +308,7 @@ static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
ctcr1 |= (pdc2027x_pio_timing_tbl[pio].value2 << 24);
iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
- PDPRINTK("Set pio regs done\n");
-
- PDPRINTK("Set to pio mode[%u] \n", pio);
+ ata_port_dbg(ap, "Set to pio mode[%u] \n", pio);
}
/**
@@ -350,7 +341,7 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
iowrite32(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1));
}
- PDPRINTK("Set udma regs... \n");
+ ata_port_dbg(ap, "Set udma regs... \n");
ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1));
ctcr1 &= 0xff000000;
@@ -359,16 +350,14 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
(pdc2027x_udma_timing_tbl[udma_mode].value2 << 16);
iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
- PDPRINTK("Set udma regs done\n");
-
- PDPRINTK("Set to udma mode[%u] \n", udma_mode);
+ ata_port_dbg(ap, "Set to udma mode[%u] \n", udma_mode);
} else if ((dma_mode >= XFER_MW_DMA_0) &&
(dma_mode <= XFER_MW_DMA_2)) {
/* Set the MDMA timing registers with value table for 133MHz */
unsigned int mdma_mode = dma_mode & 0x07;
- PDPRINTK("Set mdma regs... \n");
+ ata_port_dbg(ap, "Set mdma regs... \n");
ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
ctcr0 &= 0x0000ffff;
@@ -376,11 +365,10 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
(pdc2027x_mdma_timing_tbl[mdma_mode].value1 << 24);
iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
- PDPRINTK("Set mdma regs done\n");
- PDPRINTK("Set to mdma mode[%u] \n", mdma_mode);
+ ata_port_dbg(ap, "Set to mdma mode[%u] \n", mdma_mode);
} else {
- printk(KERN_ERR DRV_NAME ": Unknown dma mode [%u] ignored\n", dma_mode);
+ ata_port_err(ap, "Unknown dma mode [%u] ignored\n", dma_mode);
}
}
@@ -414,7 +402,7 @@ static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed
ctcr1 |= (1 << 25);
iowrite32(ctcr1, dev_mmio(ap, dev, PDC_CTCR1));
- PDPRINTK("Turn on prefetch\n");
+ ata_dev_dbg(dev, "Turn on prefetch\n");
} else {
pdc2027x_set_dmamode(ap, dev);
}
@@ -485,8 +473,8 @@ retry:
counter = (bccrh << 15) | bccrl;
- PDPRINTK("bccrh [%X] bccrl [%X]\n", bccrh, bccrl);
- PDPRINTK("bccrhv[%X] bccrlv[%X]\n", bccrhv, bccrlv);
+ dev_dbg(host->dev, "bccrh [%X] bccrl [%X]\n", bccrh, bccrl);
+ dev_dbg(host->dev, "bccrhv[%X] bccrlv[%X]\n", bccrhv, bccrlv);
/*
* The 30-bit decreasing counter are read by 2 pieces.
@@ -495,7 +483,7 @@ retry:
*/
if (retry && !(bccrh == bccrhv && bccrl >= bccrlv)) {
retry--;
- PDPRINTK("rereading counter\n");
+ dev_dbg(host->dev, "rereading counter\n");
goto retry;
}
@@ -520,20 +508,19 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b
/* Sanity check */
if (unlikely(pll_clock_khz < 5000L || pll_clock_khz > 70000L)) {
- printk(KERN_ERR DRV_NAME ": Invalid PLL input clock %ldkHz, give up!\n", pll_clock_khz);
+ dev_err(host->dev, "Invalid PLL input clock %ldkHz, give up!\n",
+ pll_clock_khz);
return;
}
-#ifdef PDC_DEBUG
- PDPRINTK("pout_required is %ld\n", pout_required);
+ dev_dbg(host->dev, "pout_required is %ld\n", pout_required);
/* Show the current clock value of PLL control register
* (maybe already configured by the firmware)
*/
pll_ctl = ioread16(mmio_base + PDC_PLL_CTL);
- PDPRINTK("pll_ctl[%X]\n", pll_ctl);
-#endif
+ dev_dbg(host->dev, "pll_ctl[%X]\n", pll_ctl);
/*
* Calculate the ratio of F, R and OD
@@ -552,7 +539,7 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b
R = 0x00;
} else {
/* Invalid ratio */
- printk(KERN_ERR DRV_NAME ": Invalid ratio %ld, give up!\n", ratio);
+ dev_err(host->dev, "Invalid ratio %ld, give up!\n", ratio);
return;
}
@@ -560,15 +547,15 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b
if (unlikely(F < 0 || F > 127)) {
/* Invalid F */
- printk(KERN_ERR DRV_NAME ": F[%d] invalid!\n", F);
+ dev_err(host->dev, "F[%d] invalid!\n", F);
return;
}
- PDPRINTK("F[%d] R[%d] ratio*1000[%ld]\n", F, R, ratio);
+ dev_dbg(host->dev, "F[%d] R[%d] ratio*1000[%ld]\n", F, R, ratio);
pll_ctl = (R << 8) | F;
- PDPRINTK("Writing pll_ctl[%X]\n", pll_ctl);
+ dev_dbg(host->dev, "Writing pll_ctl[%X]\n", pll_ctl);
iowrite16(pll_ctl, mmio_base + PDC_PLL_CTL);
ioread16(mmio_base + PDC_PLL_CTL); /* flush */
@@ -576,15 +563,13 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b
/* Wait the PLL circuit to be stable */
msleep(30);
-#ifdef PDC_DEBUG
/*
* Show the current clock value of PLL control register
* (maybe configured by the firmware)
*/
pll_ctl = ioread16(mmio_base + PDC_PLL_CTL);
- PDPRINTK("pll_ctl[%X]\n", pll_ctl);
-#endif
+ dev_dbg(host->dev, "pll_ctl[%X]\n", pll_ctl);
return;
}
@@ -605,7 +590,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
/* Start the test mode */
scr = ioread32(mmio_base + PDC_SYS_CTL);
- PDPRINTK("scr[%X]\n", scr);
+ dev_dbg(host->dev, "scr[%X]\n", scr);
iowrite32(scr | (0x01 << 14), mmio_base + PDC_SYS_CTL);
ioread32(mmio_base + PDC_SYS_CTL); /* flush */
@@ -622,7 +607,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
/* Stop the test mode */
scr = ioread32(mmio_base + PDC_SYS_CTL);
- PDPRINTK("scr[%X]\n", scr);
+ dev_dbg(host->dev, "scr[%X]\n", scr);
iowrite32(scr & ~(0x01 << 14), mmio_base + PDC_SYS_CTL);
ioread32(mmio_base + PDC_SYS_CTL); /* flush */
@@ -632,8 +617,8 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
pll_clock = ((start_count - end_count) & 0x3fffffff) / 100 *
(100000000 / usec_elapsed);
- PDPRINTK("start[%ld] end[%ld] \n", start_count, end_count);
- PDPRINTK("PLL input clock[%ld]Hz\n", pll_clock);
+ dev_dbg(host->dev, "start[%ld] end[%ld] PLL input clock[%ld]HZ\n",
+ start_count, end_count, pll_clock);
return pll_clock;
}
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index 0c5cbcd28d0d..b99849095853 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -38,8 +38,6 @@ static int pdc2026x_cable_detect(struct ata_port *ap)
static void pdc202xx_exec_command(struct ata_port *ap,
const struct ata_taskfile *tf)
{
- DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
-
iowrite8(tf->command, ap->ioaddr.command_addr);
ndelay(400);
}
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 028329428b75..87c7c90676ca 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -128,6 +128,8 @@ int __pata_platform_probe(struct device *dev, struct resource *io_res,
ap = host->ports[0];
ap->ops = devm_kzalloc(dev, sizeof(*ap->ops), GFP_KERNEL);
+ if (!ap->ops)
+ return -ENOMEM;
ap->ops->inherits = &ata_sff_port_ops;
ap->ops->cable_detect = ata_cable_unknown;
ap->ops->set_mode = pata_platform_set_mode;
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index 3722a67083fd..fb00c3e5fd19 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -69,7 +69,7 @@ static int rz1000_fifo_disable(struct pci_dev *pdev)
reg &= 0xDFFF;
if (pci_write_config_word(pdev, 0x40, reg) != 0)
return -1;
- printk(KERN_INFO DRV_NAME ": disabled chipset readahead.\n");
+ dev_info(&pdev->dev, "disabled chipset readahead.\n");
return 0;
}
@@ -97,7 +97,7 @@ static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *en
if (rz1000_fifo_disable(pdev) == 0)
return ata_pci_sff_init_one(pdev, ppi, &rz1000_sht, NULL, 0);
- printk(KERN_ERR DRV_NAME ": failed to disable read-ahead on chipset..\n");
+ dev_err(&pdev->dev, "failed to disable read-ahead on chipset.\n");
/* Not safe to use so skip */
return -ENODEV;
}
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index b602e303fb54..e410fe44177f 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -286,13 +286,13 @@ static int serverworks_fixup_osb4(struct pci_dev *pdev)
pci_read_config_dword(isa_dev, 0x64, &reg);
reg &= ~0x00002000; /* disable 600ns interrupt mask */
if (!(reg & 0x00004000))
- printk(KERN_DEBUG DRV_NAME ": UDMA not BIOS enabled.\n");
+ dev_info(&pdev->dev, "UDMA not BIOS enabled.\n");
reg |= 0x00004000; /* enable UDMA/33 support */
pci_write_config_dword(isa_dev, 0x64, reg);
pci_dev_put(isa_dev);
return 0;
}
- printk(KERN_WARNING DRV_NAME ": Unable to find bridge.\n");
+ dev_warn(&pdev->dev, "Unable to find bridge.\n");
return -ENODEV;
}
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index 43215a664b96..0da58ce20d82 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -212,7 +212,6 @@ static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev)
static void sil680_sff_exec_command(struct ata_port *ap,
const struct ata_taskfile *tf)
{
- DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
iowrite8(tf->command, ap->ioaddr.command_addr);
ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
}
@@ -309,17 +308,17 @@ static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio)
switch (tmpbyte & 0x30) {
case 0x00:
- printk(KERN_INFO "sil680: 100MHz clock.\n");
+ dev_info(&pdev->dev, "sil680: 100MHz clock.\n");
break;
case 0x10:
- printk(KERN_INFO "sil680: 133MHz clock.\n");
+ dev_info(&pdev->dev, "sil680: 133MHz clock.\n");
break;
case 0x20:
- printk(KERN_INFO "sil680: Using PCI clock.\n");
+ dev_info(&pdev->dev, "sil680: Using PCI clock.\n");
break;
/* This last case is _NOT_ ok */
case 0x30:
- printk(KERN_ERR "sil680: Clock disabled ?\n");
+ dev_err(&pdev->dev, "sil680: Clock disabled ?\n");
}
return tmpbyte & 0x30;
}
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 475032048984..439ca882f73c 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -414,12 +414,6 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
- VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
- tf->hob_feature,
- tf->hob_nsect,
- tf->hob_lbal,
- tf->hob_lbam,
- tf->hob_lbah);
}
if (is_addr) {
@@ -428,12 +422,6 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
iowrite8(tf->lbal, ioaddr->lbal_addr);
iowrite8(tf->lbam, ioaddr->lbam_addr);
iowrite8(tf->lbah, ioaddr->lbah_addr);
- VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
- tf->feature,
- tf->nsect,
- tf->lbal,
- tf->lbam,
- tf->lbah);
}
ata_wait_idle(ap);
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 5db55e1e2a61..35b823ac20c9 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -284,9 +284,6 @@ static int adma_fill_sg(struct ata_queued_cmd *qc)
*(__le32 *)(buf + i) =
(pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4);
i += 4;
-
- VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4,
- (unsigned long)addr, len);
}
if (likely(last_buf))
@@ -302,8 +299,6 @@ static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc)
u32 pkt_dma = (u32)pp->pkt_dma;
int i = 0;
- VPRINTK("ENTER\n");
-
adma_enter_reg_mode(qc->ap);
if (qc->tf.protocol != ATA_PROT_DMA)
return AC_ERR_OK;
@@ -355,22 +350,6 @@ static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc)
i = adma_fill_sg(qc);
wmb(); /* flush PRDs and pkt to memory */
-#if 0
- /* dump out CPB + PRDs for debug */
- {
- int j, len = 0;
- static char obuf[2048];
- for (j = 0; j < i; ++j) {
- len += sprintf(obuf+len, "%02x ", buf[j]);
- if ((j & 7) == 7) {
- printk("%s\n", obuf);
- len = 0;
- }
- }
- if (len)
- printk("%s\n", obuf);
- }
-#endif
return AC_ERR_OK;
}
@@ -379,8 +358,6 @@ static inline void adma_packet_start(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
void __iomem *chan = ADMA_PORT_REGS(ap);
- VPRINTK("ENTER, ap %p\n", ap);
-
/* fire up the ADMA engine */
writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
}
@@ -475,8 +452,6 @@ static inline unsigned int adma_intr_mmio(struct ata_host *host)
u8 status = ata_sff_check_status(ap);
if ((status & ATA_BUSY))
continue;
- DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
- ap->print_id, qc->tf.protocol, status);
/* complete taskfile transaction */
pp->state = adma_state_idle;
@@ -504,14 +479,10 @@ static irqreturn_t adma_intr(int irq, void *dev_instance)
struct ata_host *host = dev_instance;
unsigned int handled = 0;
- VPRINTK("ENTER\n");
-
spin_lock(&host->lock);
handled = adma_intr_pkt(host) | adma_intr_mmio(host);
spin_unlock(&host->lock);
- VPRINTK("EXIT\n");
-
return IRQ_RETVAL(handled);
}
@@ -547,8 +518,8 @@ static int adma_port_start(struct ata_port *ap)
return -ENOMEM;
/* paranoia? */
if ((pp->pkt_dma & 7) != 0) {
- printk(KERN_ERR "bad alignment for pp->pkt_dma: %08x\n",
- (u32)pp->pkt_dma);
+ ata_port_err(ap, "bad alignment for pp->pkt_dma: %08x\n",
+ (u32)pp->pkt_dma);
return -ENOMEM;
}
ap->private_data = pp;
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 338c2e50f759..bec33d781ae0 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -14,15 +14,6 @@
* COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED
*/
-#ifdef CONFIG_SATA_DWC_DEBUG
-#define DEBUG
-#endif
-
-#ifdef CONFIG_SATA_DWC_VDEBUG
-#define VERBOSE_DEBUG
-#define DEBUG_NCQ
-#endif
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
@@ -34,6 +25,7 @@
#include <linux/phy/phy.h>
#include <linux/libata.h>
#include <linux/slab.h>
+#include <trace/events/libata.h>
#include "libata.h"
@@ -182,10 +174,8 @@ enum {
* Prototypes
*/
static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag);
-static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
- u32 check_status);
-static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status);
-static void sata_dwc_port_stop(struct ata_port *ap);
+static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc);
+static void sata_dwc_dma_xfer_complete(struct ata_port *ap);
static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
#ifdef CONFIG_SATA_DWC_OLD_DMA
@@ -215,9 +205,10 @@ static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp)
{
struct sata_dwc_device *hsdev = hsdevp->hsdev;
struct dw_dma_slave *dws = &sata_dwc_dma_dws;
+ struct device *dev = hsdev->dev;
dma_cap_mask_t mask;
- dws->dma_dev = hsdev->dev;
+ dws->dma_dev = dev;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@@ -225,8 +216,7 @@ static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp)
/* Acquire DMA channel */
hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp);
if (!hsdevp->chan) {
- dev_err(hsdev->dev, "%s: dma channel unavailable\n",
- __func__);
+ dev_err(dev, "%s: dma channel unavailable\n", __func__);
return -EAGAIN;
}
@@ -236,26 +226,25 @@ static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp)
static int sata_dwc_dma_init_old(struct platform_device *pdev,
struct sata_dwc_device *hsdev)
{
- struct device_node *np = pdev->dev.of_node;
- struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
- hsdev->dma = devm_kzalloc(&pdev->dev, sizeof(*hsdev->dma), GFP_KERNEL);
+ hsdev->dma = devm_kzalloc(dev, sizeof(*hsdev->dma), GFP_KERNEL);
if (!hsdev->dma)
return -ENOMEM;
- hsdev->dma->dev = &pdev->dev;
+ hsdev->dma->dev = dev;
hsdev->dma->id = pdev->id;
/* Get SATA DMA interrupt number */
hsdev->dma->irq = irq_of_parse_and_map(np, 1);
if (hsdev->dma->irq == NO_IRQ) {
- dev_err(&pdev->dev, "no SATA DMA irq\n");
+ dev_err(dev, "no SATA DMA irq\n");
return -ENODEV;
}
/* Get physical SATA DMA register base address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- hsdev->dma->regs = devm_ioremap_resource(&pdev->dev, res);
+ hsdev->dma->regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(hsdev->dma->regs))
return PTR_ERR(hsdev->dma->regs);
@@ -297,35 +286,6 @@ static const char *get_prot_descript(u8 protocol)
}
}
-static const char *get_dma_dir_descript(int dma_dir)
-{
- switch ((enum dma_data_direction)dma_dir) {
- case DMA_BIDIRECTIONAL:
- return "bidirectional";
- case DMA_TO_DEVICE:
- return "to device";
- case DMA_FROM_DEVICE:
- return "from device";
- default:
- return "none";
- }
-}
-
-static void sata_dwc_tf_dump(struct ata_port *ap, struct ata_taskfile *tf)
-{
- dev_vdbg(ap->dev,
- "taskfile cmd: 0x%02x protocol: %s flags: 0x%lx device: %x\n",
- tf->command, get_prot_descript(tf->protocol), tf->flags,
- tf->device);
- dev_vdbg(ap->dev,
- "feature: 0x%02x nsect: 0x%x lbal: 0x%x lbam: 0x%x lbah: 0x%x\n",
- tf->feature, tf->nsect, tf->lbal, tf->lbam, tf->lbah);
- dev_vdbg(ap->dev,
- "hob_feature: 0x%02x hob_nsect: 0x%x hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n",
- tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam,
- tf->hob_lbah);
-}
-
static void dma_dwc_xfer_done(void *hsdev_instance)
{
unsigned long flags;
@@ -355,7 +315,7 @@ static void dma_dwc_xfer_done(void *hsdev_instance)
}
if ((hsdevp->dma_interrupt_count % 2) == 0)
- sata_dwc_dma_xfer_complete(ap, 1);
+ sata_dwc_dma_xfer_complete(ap);
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -553,6 +513,7 @@ static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
* active tag. It is the tag that matches the command about to
* be completed.
*/
+ trace_ata_bmdma_start(ap, &qc->tf, tag);
qc->ap->link.active_tag = tag;
sata_dwc_bmdma_start_by_tag(qc, tag);
@@ -586,7 +547,7 @@ static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
if (status & ATA_ERR) {
dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status);
- sata_dwc_qc_complete(ap, qc, 1);
+ sata_dwc_qc_complete(ap, qc);
handled = 1;
goto DONE;
}
@@ -611,13 +572,13 @@ DRVSTILLBUSY:
}
if ((hsdevp->dma_interrupt_count % 2) == 0)
- sata_dwc_dma_xfer_complete(ap, 1);
+ sata_dwc_dma_xfer_complete(ap);
} else if (ata_is_pio(qc->tf.protocol)) {
ata_sff_hsm_move(ap, qc, status, 0);
handled = 1;
goto DONE;
} else {
- if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
+ if (unlikely(sata_dwc_qc_complete(ap, qc)))
goto DRVSTILLBUSY;
}
@@ -677,7 +638,7 @@ DRVSTILLBUSY:
if (status & ATA_ERR) {
dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__,
status);
- sata_dwc_qc_complete(ap, qc, 1);
+ sata_dwc_qc_complete(ap, qc);
handled = 1;
goto DONE;
}
@@ -692,9 +653,9 @@ DRVSTILLBUSY:
dev_warn(ap->dev, "%s: DMA not pending?\n",
__func__);
if ((hsdevp->dma_interrupt_count % 2) == 0)
- sata_dwc_dma_xfer_complete(ap, 1);
+ sata_dwc_dma_xfer_complete(ap);
} else {
- if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
+ if (unlikely(sata_dwc_qc_complete(ap, qc)))
goto STILLBUSY;
}
continue;
@@ -749,7 +710,7 @@ static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
}
}
-static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
+static void sata_dwc_dma_xfer_complete(struct ata_port *ap)
{
struct ata_queued_cmd *qc;
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
@@ -763,17 +724,6 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
return;
}
-#ifdef DEBUG_NCQ
- if (tag > 0) {
- dev_info(ap->dev,
- "%s tag=%u cmd=0x%02x dma dir=%s proto=%s dmacr=0x%08x\n",
- __func__, qc->hw_tag, qc->tf.command,
- get_dma_dir_descript(qc->dma_dir),
- get_prot_descript(qc->tf.protocol),
- sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
- }
-#endif
-
if (ata_is_dma(qc->tf.protocol)) {
if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
dev_err(ap->dev,
@@ -783,15 +733,14 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
}
hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
- sata_dwc_qc_complete(ap, qc, check_status);
+ sata_dwc_qc_complete(ap, qc);
ap->link.active_tag = ATA_TAG_POISON;
} else {
- sata_dwc_qc_complete(ap, qc, check_status);
+ sata_dwc_qc_complete(ap, qc);
}
}
-static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
- u32 check_status)
+static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc)
{
u8 status = 0;
u32 mask = 0x0;
@@ -799,7 +748,6 @@ static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
hsdev->sactive_queued = 0;
- dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status);
if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)
dev_err(ap->dev, "TX DMA PENDING\n");
@@ -980,9 +928,6 @@ static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
{
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
- dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command,
- ata_get_cmd_descript(tf->command), tag);
-
hsdevp->cmd_issued[tag] = cmd_issued;
/*
@@ -1005,12 +950,9 @@ static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
{
u8 tag = qc->hw_tag;
- if (ata_is_ncq(qc->tf.protocol)) {
- dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
- __func__, qc->ap->link.sactive, tag);
- } else {
+ if (!ata_is_ncq(qc->tf.protocol))
tag = 0;
- }
+
sata_dwc_bmdma_setup_by_tag(qc, tag);
}
@@ -1037,12 +979,6 @@ static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
start_dma = 0;
}
- dev_dbg(ap->dev,
- "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s start_dma? %x\n",
- __func__, qc, tag, qc->tf.command,
- get_dma_dir_descript(qc->dma_dir), start_dma);
- sata_dwc_tf_dump(ap, &qc->tf);
-
if (start_dma) {
sata_dwc_scr_read(&ap->link, SCR_ERROR, &reg);
if (reg & SATA_DWC_SERROR_ERR_BITS) {
@@ -1067,13 +1003,9 @@ static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
{
u8 tag = qc->hw_tag;
- if (ata_is_ncq(qc->tf.protocol)) {
- dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
- __func__, qc->ap->link.sactive, tag);
- } else {
+ if (!ata_is_ncq(qc->tf.protocol))
tag = 0;
- }
- dev_dbg(qc->ap->dev, "%s\n", __func__);
+
sata_dwc_bmdma_start_by_tag(qc, tag);
}
@@ -1084,16 +1016,6 @@ static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
-#ifdef DEBUG_NCQ
- if (qc->hw_tag > 0 || ap->link.sactive > 1)
- dev_info(ap->dev,
- "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n",
- __func__, ap->print_id, qc->tf.command,
- ata_get_cmd_descript(qc->tf.command),
- qc->hw_tag, get_prot_descript(qc->tf.protocol),
- ap->link.active_tag, ap->link.sactive);
-#endif
-
if (!ata_is_ncq(qc->tf.protocol))
tag = 0;
@@ -1110,11 +1032,9 @@ static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
sactive |= (0x00000001 << tag);
sata_dwc_scr_write(&ap->link, SCR_ACTIVE, sactive);
- dev_dbg(qc->ap->dev,
- "%s: tag=%d ap->link.sactive = 0x%08x sactive=0x%08x\n",
- __func__, tag, qc->ap->link.sactive, sactive);
-
+ trace_ata_tf_load(ap, &qc->tf);
ap->ops->sff_tf_load(ap, &qc->tf);
+ trace_ata_exec_command(ap, &qc->tf, tag);
sata_dwc_exec_command_by_tag(ap, &qc->tf, tag,
SATA_DWC_CMD_ISSUED_PEND);
} else {
@@ -1207,6 +1127,8 @@ static const struct ata_port_info sata_dwc_port_info[] = {
static int sata_dwc_probe(struct platform_device *ofdev)
{
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = dev->of_node;
struct sata_dwc_device *hsdev;
u32 idr, versionr;
char *ver = (char *)&versionr;
@@ -1216,23 +1138,21 @@ static int sata_dwc_probe(struct platform_device *ofdev)
struct ata_host *host;
struct ata_port_info pi = sata_dwc_port_info[0];
const struct ata_port_info *ppi[] = { &pi, NULL };
- struct device_node *np = ofdev->dev.of_node;
struct resource *res;
/* Allocate DWC SATA device */
- host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
- hsdev = devm_kzalloc(&ofdev->dev, sizeof(*hsdev), GFP_KERNEL);
+ host = ata_host_alloc_pinfo(dev, ppi, SATA_DWC_MAX_PORTS);
+ hsdev = devm_kzalloc(dev, sizeof(*hsdev), GFP_KERNEL);
if (!host || !hsdev)
return -ENOMEM;
host->private_data = hsdev;
/* Ioremap SATA registers */
- res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&ofdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(ofdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
- dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");
+ dev_dbg(dev, "ioremap done for SATA register address\n");
/* Synopsys DWC SATA specific Registers */
hsdev->sata_dwc_regs = base + SATA_DWC_REG_OFFSET;
@@ -1246,11 +1166,10 @@ static int sata_dwc_probe(struct platform_device *ofdev)
/* Read the ID and Version Registers */
idr = sata_dwc_readl(&hsdev->sata_dwc_regs->idr);
versionr = sata_dwc_readl(&hsdev->sata_dwc_regs->versionr);
- dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n",
- idr, ver[0], ver[1], ver[2]);
+ dev_notice(dev, "id %d, controller version %c.%c%c\n", idr, ver[0], ver[1], ver[2]);
/* Save dev for later use in dev_xxx() routines */
- hsdev->dev = &ofdev->dev;
+ hsdev->dev = dev;
/* Enable SATA Interrupts */
sata_dwc_enable_interrupts(hsdev);
@@ -1258,7 +1177,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
/* Get SATA interrupt number */
irq = irq_of_parse_and_map(np, 0);
if (irq == NO_IRQ) {
- dev_err(&ofdev->dev, "no SATA DMA irq\n");
+ dev_err(dev, "no SATA DMA irq\n");
return -ENODEV;
}
@@ -1270,7 +1189,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
}
#endif
- hsdev->phy = devm_phy_optional_get(hsdev->dev, "sata-phy");
+ hsdev->phy = devm_phy_optional_get(dev, "sata-phy");
if (IS_ERR(hsdev->phy))
return PTR_ERR(hsdev->phy);
@@ -1285,7 +1204,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
*/
err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
if (err)
- dev_err(&ofdev->dev, "failed to activate host");
+ dev_err(dev, "failed to activate host");
return 0;
@@ -1309,7 +1228,7 @@ static int sata_dwc_remove(struct platform_device *ofdev)
sata_dwc_dma_exit_old(hsdev);
#endif
- dev_dbg(&ofdev->dev, "done\n");
+ dev_dbg(dev, "done\n");
return 0;
}
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 3b31a4f596d8..da0152116d9f 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -221,10 +221,10 @@ enum {
* 4 Dwords per command slot, command header size == 64 Dwords.
*/
struct cmdhdr_tbl_entry {
- u32 cda;
- u32 prde_fis_len;
- u32 ttl;
- u32 desc_info;
+ __le32 cda;
+ __le32 prde_fis_len;
+ __le32 ttl;
+ __le32 desc_info;
};
/*
@@ -246,8 +246,10 @@ enum {
struct command_desc {
u8 cfis[8 * 4];
u8 sfis[8 * 4];
- u8 acmd[4 * 4];
- u8 fill[4 * 4];
+ struct_group(cdb,
+ u8 acmd[4 * 4];
+ u8 fill[4 * 4];
+ );
u32 prdt[SATA_FSL_MAX_PRD_DIRECT * 4];
u32 prdt_indirect[(SATA_FSL_MAX_PRD - SATA_FSL_MAX_PRD_DIRECT) * 4];
};
@@ -257,9 +259,9 @@ struct command_desc {
*/
struct prde {
- u32 dba;
+ __le32 dba;
u8 fill[2 * 4];
- u32 ddc_and_ext;
+ __le32 ddc_and_ext;
};
/*
@@ -311,16 +313,16 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
intr_coalescing_ticks = ticks;
spin_unlock_irqrestore(&host->lock, flags);
- DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n",
- intr_coalescing_count, intr_coalescing_ticks);
- DPRINTK("ICC register status: (hcr base: %p) = 0x%x\n",
- hcr_base, ioread32(hcr_base + ICC));
+ dev_dbg(host->dev, "interrupt coalescing, count = 0x%x, ticks = %x\n",
+ intr_coalescing_count, intr_coalescing_ticks);
+ dev_dbg(host->dev, "ICC register status: (hcr base: 0x%p) = 0x%x\n",
+ hcr_base, ioread32(hcr_base + ICC));
}
static ssize_t fsl_sata_intr_coalescing_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d %d\n",
+ return sysfs_emit(buf, "%d %d\n",
intr_coalescing_count, intr_coalescing_ticks);
}
@@ -355,9 +357,9 @@ static ssize_t fsl_sata_rx_watermark_show(struct device *dev,
spin_lock_irqsave(&host->lock, flags);
rx_watermark = ioread32(csr_base + TRANSCFG);
rx_watermark &= 0x1f;
-
spin_unlock_irqrestore(&host->lock, flags);
- return sprintf(buf, "%d\n", rx_watermark);
+
+ return sysfs_emit(buf, "%d\n", rx_watermark);
}
static ssize_t fsl_sata_rx_watermark_store(struct device *dev,
@@ -385,25 +387,27 @@ static ssize_t fsl_sata_rx_watermark_store(struct device *dev,
return strlen(buf);
}
-static inline unsigned int sata_fsl_tag(unsigned int tag,
+static inline unsigned int sata_fsl_tag(struct ata_port *ap,
+ unsigned int tag,
void __iomem *hcr_base)
{
/* We let libATA core do actual (queue) tag allocation */
if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) {
- DPRINTK("tag %d invalid : out of range\n", tag);
+ ata_port_dbg(ap, "tag %d invalid : out of range\n", tag);
return 0;
}
if (unlikely((ioread32(hcr_base + CQ)) & (1 << tag))) {
- DPRINTK("tag %d invalid : in use!!\n", tag);
+ ata_port_dbg(ap, "tag %d invalid : in use!!\n", tag);
return 0;
}
return tag;
}
-static void sata_fsl_setup_cmd_hdr_entry(struct sata_fsl_port_priv *pp,
+static void sata_fsl_setup_cmd_hdr_entry(struct ata_port *ap,
+ struct sata_fsl_port_priv *pp,
unsigned int tag, u32 desc_info,
u32 data_xfer_len, u8 num_prde,
u8 fis_len)
@@ -421,11 +425,11 @@ static void sata_fsl_setup_cmd_hdr_entry(struct sata_fsl_port_priv *pp,
pp->cmdslot[tag].ttl = cpu_to_le32(data_xfer_len & ~0x03);
pp->cmdslot[tag].desc_info = cpu_to_le32(desc_info | (tag & 0x1F));
- VPRINTK("cda=0x%x, prde_fis_len=0x%x, ttl=0x%x, di=0x%x\n",
- pp->cmdslot[tag].cda,
- pp->cmdslot[tag].prde_fis_len,
- pp->cmdslot[tag].ttl, pp->cmdslot[tag].desc_info);
-
+ ata_port_dbg(ap, "cda=0x%x, prde_fis_len=0x%x, ttl=0x%x, di=0x%x\n",
+ le32_to_cpu(pp->cmdslot[tag].cda),
+ le32_to_cpu(pp->cmdslot[tag].prde_fis_len),
+ le32_to_cpu(pp->cmdslot[tag].ttl),
+ le32_to_cpu(pp->cmdslot[tag].desc_info));
}
static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
@@ -447,8 +451,6 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
dma_addr_t indirect_ext_segment_paddr;
unsigned int si;
- VPRINTK("SATA FSL : cd = 0x%p, prd = 0x%p\n", cmd_desc, prd);
-
indirect_ext_segment_paddr = cmd_desc_paddr +
SATA_FSL_CMD_DESC_OFFSET_TO_PRDT + SATA_FSL_MAX_PRD_DIRECT * 16;
@@ -456,9 +458,6 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
dma_addr_t sg_addr = sg_dma_address(sg);
u32 sg_len = sg_dma_len(sg);
- VPRINTK("SATA FSL : fill_sg, sg_addr = 0x%llx, sg_len = %d\n",
- (unsigned long long)sg_addr, sg_len);
-
/* warn if each s/g element is not dword aligned */
if (unlikely(sg_addr & 0x03))
ata_port_err(qc->ap, "s/g addr unaligned : 0x%llx\n",
@@ -469,7 +468,6 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
if (num_prde == (SATA_FSL_MAX_PRD_DIRECT - 1) &&
sg_next(sg) != NULL) {
- VPRINTK("setting indirect prde\n");
prd_ptr_to_indirect_ext = prd;
prd->dba = cpu_to_le32(indirect_ext_segment_paddr);
indirect_ext_segment_sz = 0;
@@ -481,9 +479,6 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
prd->dba = cpu_to_le32(sg_addr);
prd->ddc_and_ext = cpu_to_le32(data_snoop | (sg_len & ~0x03));
- VPRINTK("sg_fill, ttl=%d, dba=0x%x, ddc=0x%x\n",
- ttl_dwords, prd->dba, prd->ddc_and_ext);
-
++num_prde;
++prd;
if (prd_ptr_to_indirect_ext)
@@ -508,7 +503,7 @@ static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc)
struct sata_fsl_port_priv *pp = ap->private_data;
struct sata_fsl_host_priv *host_priv = ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
- unsigned int tag = sata_fsl_tag(qc->hw_tag, hcr_base);
+ unsigned int tag = sata_fsl_tag(ap, qc->hw_tag, hcr_base);
struct command_desc *cd;
u32 desc_info = CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE;
u32 num_prde = 0;
@@ -520,19 +515,11 @@ static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc)
ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *) &cd->cfis);
- VPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x\n",
- cd->cfis[0], cd->cfis[1], cd->cfis[2]);
-
- if (qc->tf.protocol == ATA_PROT_NCQ) {
- VPRINTK("FPDMA xfer,Sctor cnt[0:7],[8:15] = %d,%d\n",
- cd->cfis[3], cd->cfis[11]);
- }
-
/* setup "ACMD - atapi command" in cmd. desc. if this is ATAPI cmd */
if (ata_is_atapi(qc->tf.protocol)) {
desc_info |= ATAPI_CMD;
- memset((void *)&cd->acmd, 0, 32);
- memcpy((void *)&cd->acmd, qc->cdb, qc->dev->cdb_len);
+ memset(&cd->cdb, 0, sizeof(cd->cdb));
+ memcpy(&cd->cdb, qc->cdb, qc->dev->cdb_len);
}
if (qc->flags & ATA_QCFLAG_DMAMAP)
@@ -543,10 +530,10 @@ static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc)
if (qc->tf.protocol == ATA_PROT_NCQ)
desc_info |= FPDMA_QUEUED_CMD;
- sata_fsl_setup_cmd_hdr_entry(pp, tag, desc_info, ttl_dwords,
+ sata_fsl_setup_cmd_hdr_entry(ap, pp, tag, desc_info, ttl_dwords,
num_prde, 5);
- VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n",
+ ata_port_dbg(ap, "SATA FSL : di = 0x%x, ttl = %d, num_prde = %d\n",
desc_info, ttl_dwords, num_prde);
return AC_ERR_OK;
@@ -557,9 +544,9 @@ static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
struct sata_fsl_host_priv *host_priv = ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
- unsigned int tag = sata_fsl_tag(qc->hw_tag, hcr_base);
+ unsigned int tag = sata_fsl_tag(ap, qc->hw_tag, hcr_base);
- VPRINTK("xx_qc_issue called,CQ=0x%x,CA=0x%x,CE=0x%x,CC=0x%x\n",
+ ata_port_dbg(ap, "CQ=0x%x,CA=0x%x,CE=0x%x,CC=0x%x\n",
ioread32(CQ + hcr_base),
ioread32(CA + hcr_base),
ioread32(CE + hcr_base), ioread32(CC + hcr_base));
@@ -569,10 +556,10 @@ static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
/* Simply queue command to the controller/device */
iowrite32(1 << tag, CQ + hcr_base);
- VPRINTK("xx_qc_issue called, tag=%d, CQ=0x%x, CA=0x%x\n",
+ ata_port_dbg(ap, "tag=%d, CQ=0x%x, CA=0x%x\n",
tag, ioread32(CQ + hcr_base), ioread32(CA + hcr_base));
- VPRINTK("CE=0x%x, DE=0x%x, CC=0x%x, CmdStat = 0x%x\n",
+ ata_port_dbg(ap, "CE=0x%x, DE=0x%x, CC=0x%x, CmdStat = 0x%x\n",
ioread32(CE + hcr_base),
ioread32(DE + hcr_base),
ioread32(CC + hcr_base),
@@ -586,7 +573,7 @@ static bool sata_fsl_qc_fill_rtf(struct ata_queued_cmd *qc)
struct sata_fsl_port_priv *pp = qc->ap->private_data;
struct sata_fsl_host_priv *host_priv = qc->ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
- unsigned int tag = sata_fsl_tag(qc->hw_tag, hcr_base);
+ unsigned int tag = sata_fsl_tag(qc->ap, qc->hw_tag, hcr_base);
struct command_desc *cd;
cd = pp->cmdentry + tag;
@@ -613,7 +600,7 @@ static int sata_fsl_scr_write(struct ata_link *link,
return -EINVAL;
}
- VPRINTK("xx_scr_write, reg_in = %d\n", sc_reg);
+ ata_link_dbg(link, "reg_in = %d\n", sc_reg);
iowrite32(val, ssr_base + (sc_reg * 4));
return 0;
@@ -637,7 +624,7 @@ static int sata_fsl_scr_read(struct ata_link *link,
return -EINVAL;
}
- VPRINTK("xx_scr_read, reg_in = %d\n", sc_reg);
+ ata_link_dbg(link, "reg_in = %d\n", sc_reg);
*val = ioread32(ssr_base + (sc_reg * 4));
return 0;
@@ -649,18 +636,18 @@ static void sata_fsl_freeze(struct ata_port *ap)
void __iomem *hcr_base = host_priv->hcr_base;
u32 temp;
- VPRINTK("xx_freeze, CQ=0x%x, CA=0x%x, CE=0x%x, DE=0x%x\n",
+ ata_port_dbg(ap, "CQ=0x%x, CA=0x%x, CE=0x%x, DE=0x%x\n",
ioread32(CQ + hcr_base),
ioread32(CA + hcr_base),
ioread32(CE + hcr_base), ioread32(DE + hcr_base));
- VPRINTK("CmdStat = 0x%x\n",
+ ata_port_dbg(ap, "CmdStat = 0x%x\n",
ioread32(host_priv->csr_base + COMMANDSTAT));
/* disable interrupts on the controller/port */
temp = ioread32(hcr_base + HCONTROL);
iowrite32((temp & ~0x3F), hcr_base + HCONTROL);
- VPRINTK("in xx_freeze : HControl = 0x%x, HStatus = 0x%x\n",
+ ata_port_dbg(ap, "HControl = 0x%x, HStatus = 0x%x\n",
ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS));
}
@@ -673,7 +660,7 @@ static void sata_fsl_thaw(struct ata_port *ap)
/* ack. any pending IRQs for this controller/port */
temp = ioread32(hcr_base + HSTATUS);
- VPRINTK("xx_thaw, pending IRQs = 0x%x\n", (temp & 0x3F));
+ ata_port_dbg(ap, "pending IRQs = 0x%x\n", (temp & 0x3F));
if (temp & 0x3F)
iowrite32((temp & 0x3F), hcr_base + HSTATUS);
@@ -682,7 +669,7 @@ static void sata_fsl_thaw(struct ata_port *ap)
temp = ioread32(hcr_base + HCONTROL);
iowrite32((temp | DEFAULT_PORT_IRQ_ENABLE_MASK), hcr_base + HCONTROL);
- VPRINTK("xx_thaw : HControl = 0x%x, HStatus = 0x%x\n",
+ ata_port_dbg(ap, "HControl = 0x%x, HStatus = 0x%x\n",
ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS));
}
@@ -744,8 +731,9 @@ static int sata_fsl_port_start(struct ata_port *ap)
ap->private_data = pp;
- VPRINTK("CHBA = 0x%x, cmdentry_phys = 0x%x\n",
- pp->cmdslot_paddr, pp->cmdentry_paddr);
+ ata_port_dbg(ap, "CHBA = 0x%lx, cmdentry_phys = 0x%lx\n",
+ (unsigned long)pp->cmdslot_paddr,
+ (unsigned long)pp->cmdentry_paddr);
/* Now, update the CHBA register in host controller cmd register set */
iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA);
@@ -761,9 +749,9 @@ static int sata_fsl_port_start(struct ata_port *ap)
temp = ioread32(hcr_base + HCONTROL);
iowrite32((temp | HCONTROL_ONLINE_PHY_RST), hcr_base + HCONTROL);
- VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
- VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
- VPRINTK("CHBA = 0x%x\n", ioread32(hcr_base + CHBA));
+ ata_port_dbg(ap, "HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
+ ata_port_dbg(ap, "HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+ ata_port_dbg(ap, "CHBA = 0x%x\n", ioread32(hcr_base + CHBA));
return 0;
}
@@ -803,16 +791,15 @@ static unsigned int sata_fsl_dev_classify(struct ata_port *ap)
temp = ioread32(hcr_base + SIGNATURE);
- VPRINTK("raw sig = 0x%x\n", temp);
- VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
- VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+ ata_port_dbg(ap, "HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
+ ata_port_dbg(ap, "HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
tf.lbah = (temp >> 24) & 0xff;
tf.lbam = (temp >> 16) & 0xff;
tf.lbal = (temp >> 8) & 0xff;
tf.nsect = temp & 0xff;
- return ata_dev_classify(&tf);
+ return ata_port_classify(ap, &tf);
}
static int sata_fsl_hardreset(struct ata_link *link, unsigned int *class,
@@ -825,8 +812,6 @@ static int sata_fsl_hardreset(struct ata_link *link, unsigned int *class,
int i = 0;
unsigned long start_jiffies;
- DPRINTK("in xx_hardreset\n");
-
try_offline_again:
/*
* Force host controller to go off-line, aborting current operations
@@ -852,9 +837,10 @@ try_offline_again:
goto try_offline_again;
}
- DPRINTK("hardreset, controller off-lined\n");
- VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
- VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+ ata_port_dbg(ap, "hardreset, controller off-lined\n"
+ "HStatus = 0x%x HControl = 0x%x\n",
+ ioread32(hcr_base + HSTATUS),
+ ioread32(hcr_base + HCONTROL));
/*
* PHY reset should remain asserted for atleast 1ms
@@ -882,9 +868,10 @@ try_offline_again:
goto err;
}
- DPRINTK("hardreset, controller off-lined & on-lined\n");
- VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
- VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+ ata_port_dbg(ap, "controller off-lined & on-lined\n"
+ "HStatus = 0x%x HControl = 0x%x\n",
+ ioread32(hcr_base + HSTATUS),
+ ioread32(hcr_base + HCONTROL));
/*
* First, wait for the PHYRDY change to occur before waiting for
@@ -941,10 +928,7 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
u8 *cfis;
u32 Serror;
- DPRINTK("in xx_softreset\n");
-
if (ata_link_offline(link)) {
- DPRINTK("PHY reports no device\n");
*class = ATA_DEV_NONE;
return 0;
}
@@ -957,19 +941,17 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
* reached here, we can send a command to the target device
*/
- DPRINTK("Sending SRST/device reset\n");
-
ata_tf_init(link->device, &tf);
cfis = (u8 *) &pp->cmdentry->cfis;
/* device reset/SRST is a control register update FIS, uses tag0 */
- sata_fsl_setup_cmd_hdr_entry(pp, 0,
+ sata_fsl_setup_cmd_hdr_entry(ap, pp, 0,
SRST_CMD | CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE, 0, 0, 5);
tf.ctl |= ATA_SRST; /* setup SRST bit in taskfile control reg */
ata_tf_to_fis(&tf, pmp, 0, cfis);
- DPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x, 0x%x\n",
+ ata_port_dbg(ap, "Dumping cfis : 0x%x, 0x%x, 0x%x, 0x%x\n",
cfis[0], cfis[1], cfis[2], cfis[3]);
/*
@@ -977,7 +959,7 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
* other commands are active on the controller/device
*/
- DPRINTK("@Softreset, CQ = 0x%x, CA = 0x%x, CC = 0x%x\n",
+ ata_port_dbg(ap, "CQ = 0x%x, CA = 0x%x, CC = 0x%x\n",
ioread32(CQ + hcr_base),
ioread32(CA + hcr_base), ioread32(CC + hcr_base));
@@ -990,15 +972,16 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
if (temp & 0x1) {
ata_port_warn(ap, "ATA_SRST issue failed\n");
- DPRINTK("Softreset@5000,CQ=0x%x,CA=0x%x,CC=0x%x\n",
+ ata_port_dbg(ap, "Softreset@5000,CQ=0x%x,CA=0x%x,CC=0x%x\n",
ioread32(CQ + hcr_base),
ioread32(CA + hcr_base), ioread32(CC + hcr_base));
sata_fsl_scr_read(&ap->link, SCR_ERROR, &Serror);
- DPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
- DPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
- DPRINTK("Serror = 0x%x\n", Serror);
+ ata_port_dbg(ap, "HStatus = 0x%x HControl = 0x%x Serror = 0x%x\n",
+ ioread32(hcr_base + HSTATUS),
+ ioread32(hcr_base + HCONTROL),
+ Serror);
goto err;
}
@@ -1012,8 +995,9 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
* using ATA signature D2H register FIS to the host controller.
*/
- sata_fsl_setup_cmd_hdr_entry(pp, 0, CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE,
- 0, 0, 5);
+ sata_fsl_setup_cmd_hdr_entry(ap, pp, 0,
+ CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE,
+ 0, 0, 5);
tf.ctl &= ~ATA_SRST; /* 2nd H2D Ctl. register FIS */
ata_tf_to_fis(&tf, pmp, 0, cfis);
@@ -1030,8 +1014,6 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
*/
iowrite32(0x01, CC + hcr_base); /* We know it will be cmd#0 always */
- DPRINTK("SATA FSL : Now checking device signature\n");
-
*class = ATA_DEV_NONE;
/* Verify if SStatus indicates device presence */
@@ -1045,9 +1027,8 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
*class = sata_fsl_dev_classify(ap);
- DPRINTK("class = %d\n", *class);
- VPRINTK("ccreg = 0x%x\n", ioread32(hcr_base + CC));
- VPRINTK("cereg = 0x%x\n", ioread32(hcr_base + CE));
+ ata_port_dbg(ap, "ccreg = 0x%x\n", ioread32(hcr_base + CC));
+ ata_port_dbg(ap, "cereg = 0x%x\n", ioread32(hcr_base + CE));
}
return 0;
@@ -1058,10 +1039,7 @@ err:
static void sata_fsl_error_handler(struct ata_port *ap)
{
-
- DPRINTK("in xx_error_handler\n");
sata_pmp_error_handler(ap);
-
}
static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc)
@@ -1102,7 +1080,7 @@ static void sata_fsl_error_intr(struct ata_port *ap)
if (unlikely(SError & 0xFFFF0000))
sata_fsl_scr_write(&ap->link, SCR_ERROR, SError);
- DPRINTK("error_intr,hStat=0x%x,CE=0x%x,DE =0x%x,SErr=0x%x\n",
+ ata_port_dbg(ap, "hStat=0x%x,CE=0x%x,DE =0x%x,SErr=0x%x\n",
hstatus, cereg, ioread32(hcr_base + DE), SError);
/* handle fatal errors */
@@ -1119,7 +1097,7 @@ static void sata_fsl_error_intr(struct ata_port *ap)
/* Handle PHYRDY change notification */
if (hstatus & INT_ON_PHYRDY_CHG) {
- DPRINTK("SATA FSL: PHYRDY change indication\n");
+ ata_port_dbg(ap, "PHYRDY change indication\n");
/* Setup a soft-reset EH action */
ata_ehi_hotplugged(ehi);
@@ -1140,7 +1118,7 @@ static void sata_fsl_error_intr(struct ata_port *ap)
*/
abort = 1;
- DPRINTK("single device error, CE=0x%x, DE=0x%x\n",
+ ata_port_dbg(ap, "single device error, CE=0x%x, DE=0x%x\n",
ioread32(hcr_base + CE), ioread32(hcr_base + DE));
/* find out the offending link and qc */
@@ -1245,18 +1223,18 @@ static void sata_fsl_host_intr(struct ata_port *ap)
}
if (unlikely(SError & 0xFFFF0000)) {
- DPRINTK("serror @host_intr : 0x%x\n", SError);
+ ata_port_dbg(ap, "serror @host_intr : 0x%x\n", SError);
sata_fsl_error_intr(ap);
}
if (unlikely(hstatus & status_mask)) {
- DPRINTK("error interrupt!!\n");
+ ata_port_dbg(ap, "error interrupt!!\n");
sata_fsl_error_intr(ap);
return;
}
- VPRINTK("Status of all queues :\n");
- VPRINTK("done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x,CQ=0x%x,apqa=0x%llx\n",
+ ata_port_dbg(ap, "Status of all queues :\n");
+ ata_port_dbg(ap, "done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x,CQ=0x%x,apqa=0x%llx\n",
done_mask,
ioread32(hcr_base + CA),
ioread32(hcr_base + CE),
@@ -1268,15 +1246,13 @@ static void sata_fsl_host_intr(struct ata_port *ap)
/* clear CC bit, this will also complete the interrupt */
iowrite32(done_mask, hcr_base + CC);
- DPRINTK("Status of all queues :\n");
- DPRINTK("done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x\n",
+ ata_port_dbg(ap, "Status of all queues: done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x\n",
done_mask, ioread32(hcr_base + CA),
ioread32(hcr_base + CE));
for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) {
if (done_mask & (1 << i))
- DPRINTK
- ("completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n",
+ ata_port_dbg(ap, "completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n",
i, ioread32(hcr_base + CC),
ioread32(hcr_base + CA));
}
@@ -1287,7 +1263,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
iowrite32(1, hcr_base + CC);
qc = ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
- DPRINTK("completing non-ncq cmd, CC=0x%x\n",
+ ata_port_dbg(ap, "completing non-ncq cmd, CC=0x%x\n",
ioread32(hcr_base + CC));
if (qc) {
@@ -1295,7 +1271,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
}
} else {
/* Spurious Interrupt!! */
- DPRINTK("spurious interrupt!!, CC = 0x%x\n",
+ ata_port_dbg(ap, "spurious interrupt!!, CC = 0x%x\n",
ioread32(hcr_base + CC));
iowrite32(done_mask, hcr_base + CC);
return;
@@ -1315,8 +1291,6 @@ static irqreturn_t sata_fsl_interrupt(int irq, void *dev_instance)
interrupt_enables = ioread32(hcr_base + HSTATUS);
interrupt_enables &= 0x3F;
- DPRINTK("interrupt status 0x%x\n", interrupt_enables);
-
if (!interrupt_enables)
return IRQ_NONE;
@@ -1369,7 +1343,7 @@ static int sata_fsl_init_controller(struct ata_host *host)
iowrite32((temp & ~0x3F), hcr_base + HCONTROL);
/* Disable interrupt coalescing control(icc), for the moment */
- DPRINTK("icc = 0x%x\n", ioread32(hcr_base + ICC));
+ dev_dbg(host->dev, "icc = 0x%x\n", ioread32(hcr_base + ICC));
iowrite32(0x01000000, hcr_base + ICC);
/* clear error registers, SError is cleared by libATA */
@@ -1388,8 +1362,8 @@ static int sata_fsl_init_controller(struct ata_host *host)
* callback, that should also initiate the OOB, COMINIT sequence
*/
- DPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
- DPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+ dev_dbg(host->dev, "HStatus = 0x%x HControl = 0x%x\n",
+ ioread32(hcr_base + HSTATUS), ioread32(hcr_base + HCONTROL));
return 0;
}
@@ -1406,8 +1380,7 @@ static void sata_fsl_host_stop(struct ata_host *host)
* scsi mid-layer and libata interface structures
*/
static struct scsi_host_template sata_fsl_sht = {
- ATA_NCQ_SHT("sata_fsl"),
- .can_queue = SATA_FSL_QUEUE_DEPTH,
+ ATA_NCQ_SHT_QD("sata_fsl", SATA_FSL_QUEUE_DEPTH),
.sg_tablesize = SATA_FSL_MAX_PRD_USABLE,
.dma_boundary = ATA_DMA_BOUNDARY,
};
@@ -1478,9 +1451,8 @@ static int sata_fsl_probe(struct platform_device *ofdev)
iowrite32(temp | TRANSCFG_RX_WATER_MARK, csr_base + TRANSCFG);
}
- DPRINTK("@reset i/o = 0x%x\n", ioread32(csr_base + TRANSCFG));
- DPRINTK("sizeof(cmd_desc) = %d\n", sizeof(struct command_desc));
- DPRINTK("sizeof(#define cmd_desc) = %d\n", SATA_FSL_CMD_DESC_SIZE);
+ dev_dbg(&ofdev->dev, "@reset i/o = 0x%x\n",
+ ioread32(csr_base + TRANSCFG));
host_priv = kzalloc(sizeof(struct sata_fsl_host_priv), GFP_KERNEL);
if (!host_priv)
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index f793564f3d78..440a63de20d0 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -253,12 +253,12 @@ static int gemini_sata_bridge_init(struct sata_gemini *sg)
ret = clk_prepare_enable(sg->sata0_pclk);
if (ret) {
- pr_err("failed to enable SATA0 PCLK\n");
+ dev_err(dev, "failed to enable SATA0 PCLK\n");
return ret;
}
ret = clk_prepare_enable(sg->sata1_pclk);
if (ret) {
- pr_err("failed to enable SATA1 PCLK\n");
+ dev_err(dev, "failed to enable SATA1 PCLK\n");
clk_disable_unprepare(sg->sata0_pclk);
return ret;
}
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index e517bd8822a5..781901151d82 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -488,8 +488,6 @@ static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc)
bool is_data = ata_is_data(qc->tf.protocol);
unsigned int cdb_len = 0;
- VPRINTK("ENTER\n");
-
if (is_atapi)
cdb_len = qc->dev->cdb_len;
@@ -657,7 +655,7 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
}
inic_tf_read(ap, &tf);
- *class = ata_dev_classify(&tf);
+ *class = ata_port_classify(ap, &tf);
}
return 0;
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index cae4c1eab102..53446b997740 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -579,7 +579,7 @@ struct mv_hw_ops {
void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio);
- int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
+ int (*reset_hc)(struct ata_host *host, void __iomem *mmio,
unsigned int n_hc);
void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
@@ -606,7 +606,7 @@ static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio);
-static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
+static int mv5_reset_hc(struct ata_host *host, void __iomem *mmio,
unsigned int n_hc);
static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
@@ -616,14 +616,14 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio);
-static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
+static int mv6_reset_hc(struct ata_host *host, void __iomem *mmio,
unsigned int n_hc);
static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
void __iomem *mmio);
static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio);
-static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
+static int mv_soc_reset_hc(struct ata_host *host,
void __iomem *mmio, unsigned int n_hc);
static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
void __iomem *mmio);
@@ -1248,81 +1248,74 @@ static int mv_stop_edma(struct ata_port *ap)
return err;
}
-#ifdef ATA_DEBUG
-static void mv_dump_mem(void __iomem *start, unsigned bytes)
+static void mv_dump_mem(struct device *dev, void __iomem *start, unsigned bytes)
{
- int b, w;
+ int b, w, o;
+ unsigned char linebuf[38];
+
for (b = 0; b < bytes; ) {
- DPRINTK("%p: ", start + b);
- for (w = 0; b < bytes && w < 4; w++) {
- printk("%08x ", readl(start + b));
+ for (w = 0, o = 0; b < bytes && w < 4; w++) {
+ o += snprintf(linebuf + o, sizeof(linebuf) - o,
+ "%08x ", readl(start + b));
b += sizeof(u32);
}
- printk("\n");
+ dev_dbg(dev, "%s: %p: %s\n",
+ __func__, start + b, linebuf);
}
}
-#endif
-#if defined(ATA_DEBUG) || defined(CONFIG_PCI)
+
static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
{
-#ifdef ATA_DEBUG
- int b, w;
- u32 dw;
+ int b, w, o;
+ u32 dw = 0;
+ unsigned char linebuf[38];
+
for (b = 0; b < bytes; ) {
- DPRINTK("%02x: ", b);
- for (w = 0; b < bytes && w < 4; w++) {
+ for (w = 0, o = 0; b < bytes && w < 4; w++) {
(void) pci_read_config_dword(pdev, b, &dw);
- printk("%08x ", dw);
+ o += snprintf(linebuf + o, sizeof(linebuf) - o,
+ "%08x ", dw);
b += sizeof(u32);
}
- printk("\n");
+ dev_dbg(&pdev->dev, "%s: %02x: %s\n",
+ __func__, b, linebuf);
}
-#endif
}
-#endif
-static void mv_dump_all_regs(void __iomem *mmio_base, int port,
+
+static void mv_dump_all_regs(void __iomem *mmio_base,
struct pci_dev *pdev)
{
-#ifdef ATA_DEBUG
- void __iomem *hc_base = mv_hc_base(mmio_base,
- port >> MV_PORT_HC_SHIFT);
+ void __iomem *hc_base;
void __iomem *port_base;
int start_port, num_ports, p, start_hc, num_hcs, hc;
- if (0 > port) {
- start_hc = start_port = 0;
- num_ports = 8; /* shld be benign for 4 port devs */
- num_hcs = 2;
- } else {
- start_hc = port >> MV_PORT_HC_SHIFT;
- start_port = port;
- num_ports = num_hcs = 1;
- }
- DPRINTK("All registers for port(s) %u-%u:\n", start_port,
- num_ports > 1 ? num_ports - 1 : start_port);
+ start_hc = start_port = 0;
+ num_ports = 8; /* should be benign for 4 port devs */
+ num_hcs = 2;
+ dev_dbg(&pdev->dev,
+ "%s: All registers for port(s) %u-%u:\n", __func__,
+ start_port, num_ports > 1 ? num_ports - 1 : start_port);
- if (NULL != pdev) {
- DPRINTK("PCI config space regs:\n");
- mv_dump_pci_cfg(pdev, 0x68);
- }
- DPRINTK("PCI regs:\n");
- mv_dump_mem(mmio_base+0xc00, 0x3c);
- mv_dump_mem(mmio_base+0xd00, 0x34);
- mv_dump_mem(mmio_base+0xf00, 0x4);
- mv_dump_mem(mmio_base+0x1d00, 0x6c);
+ dev_dbg(&pdev->dev, "%s: PCI config space regs:\n", __func__);
+ mv_dump_pci_cfg(pdev, 0x68);
+
+ dev_dbg(&pdev->dev, "%s: PCI regs:\n", __func__);
+ mv_dump_mem(&pdev->dev, mmio_base+0xc00, 0x3c);
+ mv_dump_mem(&pdev->dev, mmio_base+0xd00, 0x34);
+ mv_dump_mem(&pdev->dev, mmio_base+0xf00, 0x4);
+ mv_dump_mem(&pdev->dev, mmio_base+0x1d00, 0x6c);
for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
hc_base = mv_hc_base(mmio_base, hc);
- DPRINTK("HC regs (HC %i):\n", hc);
- mv_dump_mem(hc_base, 0x1c);
+ dev_dbg(&pdev->dev, "%s: HC regs (HC %i):\n", __func__, hc);
+ mv_dump_mem(&pdev->dev, hc_base, 0x1c);
}
for (p = start_port; p < start_port + num_ports; p++) {
port_base = mv_port_base(mmio_base, p);
- DPRINTK("EDMA regs (port %i):\n", p);
- mv_dump_mem(port_base, 0x54);
- DPRINTK("SATA regs (port %i):\n", p);
- mv_dump_mem(port_base+0x300, 0x60);
+ dev_dbg(&pdev->dev, "%s: EDMA regs (port %i):\n", __func__, p);
+ mv_dump_mem(&pdev->dev, port_base, 0x54);
+ dev_dbg(&pdev->dev, "%s: SATA regs (port %i):\n", __func__, p);
+ mv_dump_mem(&pdev->dev, port_base+0x300, 0x60);
}
-#endif
}
static unsigned int mv_scr_offset(unsigned int sc_reg_in)
@@ -2962,8 +2955,8 @@ static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
- DPRINTK("All regs @ PCI error\n");
- mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
+ dev_dbg(host->dev, "%s: All regs @ PCI error\n", __func__);
+ mv_dump_all_regs(mmio, to_pci_dev(host->dev));
writelfl(0, mmio + hpriv->irq_cause_offset);
@@ -3201,9 +3194,10 @@ static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
}
#undef ZERO
-static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
+static int mv5_reset_hc(struct ata_host *host, void __iomem *mmio,
unsigned int n_hc)
{
+ struct mv_host_priv *hpriv = host->private_data;
unsigned int hc, port;
for (hc = 0; hc < n_hc; hc++) {
@@ -3262,7 +3256,7 @@ static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
* LOCKING:
* Inherited from caller.
*/
-static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
+static int mv6_reset_hc(struct ata_host *host, void __iomem *mmio,
unsigned int n_hc)
{
void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
@@ -3282,7 +3276,7 @@ static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
break;
}
if (!(PCI_MASTER_EMPTY & t)) {
- printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
+ dev_err(host->dev, "PCI master won't flush\n");
rc = 1;
goto done;
}
@@ -3296,7 +3290,7 @@ static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
} while (!(GLOB_SFT_RST & t) && (i-- > 0));
if (!(GLOB_SFT_RST & t)) {
- printk(KERN_ERR DRV_NAME ": can't set global reset\n");
+ dev_err(host->dev, "can't set global reset\n");
rc = 1;
goto done;
}
@@ -3310,7 +3304,7 @@ static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
} while ((GLOB_SFT_RST & t) && (i-- > 0));
if (GLOB_SFT_RST & t) {
- printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
+ dev_err(host->dev, "can't clear global reset\n");
rc = 1;
}
done:
@@ -3479,9 +3473,10 @@ static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
#undef ZERO
-static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
+static int mv_soc_reset_hc(struct ata_host *host,
void __iomem *mmio, unsigned int n_hc)
{
+ struct mv_host_priv *hpriv = host->private_data;
unsigned int port;
for (port = 0; port < hpriv->n_ports; port++)
@@ -3723,11 +3718,6 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
/* unmask all non-transient EDMA error interrupts */
writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
-
- VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
- readl(port_mmio + EDMA_CFG),
- readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
- readl(port_mmio + EDMA_ERR_IRQ_MASK));
}
static unsigned int mv_in_pcix_mode(struct ata_host *host)
@@ -3859,11 +3849,11 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
*
* Warn the user, lest they think we're just buggy.
*/
- printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
+ dev_warn(&pdev->dev, "Highpoint RocketRAID"
" BIOS CORRUPTS DATA on all attached drives,"
" regardless of if/how they are configured."
" BEWARE!\n");
- printk(KERN_WARNING DRV_NAME ": For data safety, do not"
+ dev_warn(&pdev->dev, "For data safety, do not"
" use sectors 8-9 on \"Legacy\" drives,"
" and avoid the final two gigabytes on"
" all RocketRAID BIOS initialized drives.\n");
@@ -3954,7 +3944,7 @@ static int mv_init_host(struct ata_host *host)
if (hpriv->ops->read_preamp)
hpriv->ops->read_preamp(hpriv, port, mmio);
- rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
+ rc = hpriv->ops->reset_hc(host, mmio, n_hc);
if (rc)
goto done;
@@ -3972,7 +3962,7 @@ static int mv_init_host(struct ata_host *host)
for (hc = 0; hc < n_hc; hc++) {
void __iomem *hc_mmio = mv_hc_base(mmio, hc);
- VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
+ dev_dbg(host->dev, "HC%i: HC config=0x%08x HC IRQ cause "
"(before clear)=0x%08x\n", hc,
readl(hc_mmio + HC_CFG),
readl(hc_mmio + HC_IRQ_CAUSE));
@@ -4270,7 +4260,7 @@ static int mv_platform_resume(struct platform_device *pdev)
/* initialize adapter */
ret = mv_init_host(host);
if (ret) {
- printk(KERN_ERR DRV_NAME ": Error during HW init\n");
+ dev_err(&pdev->dev, "Error during HW init\n");
return ret;
}
ata_host_resume(host);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 16272c111208..7f14d0d31057 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -31,6 +31,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <linux/libata.h>
+#include <trace/events/libata.h>
#define DRV_NAME "sata_nv"
#define DRV_VERSION "3.5"
@@ -808,7 +809,7 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
struct nv_adma_port_priv *pp = ap->private_data;
u8 flags = pp->cpb[cpb_num].resp_flags;
- VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
+ ata_port_dbg(ap, "CPB %d, flags=0x%x\n", cpb_num, flags);
if (unlikely((force_err ||
flags & (NV_CPB_RESP_ATA_ERR |
@@ -1100,8 +1101,6 @@ static int nv_adma_port_start(struct ata_port *ap)
struct pci_dev *pdev = to_pci_dev(dev);
u16 tmp;
- VPRINTK("ENTER\n");
-
/*
* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
* pad buffers.
@@ -1190,7 +1189,6 @@ static void nv_adma_port_stop(struct ata_port *ap)
struct nv_adma_port_priv *pp = ap->private_data;
void __iomem *mmio = pp->ctl_block;
- VPRINTK("ENTER\n");
writew(0, mmio + NV_ADMA_CTL);
}
@@ -1252,8 +1250,6 @@ static void nv_adma_setup_port(struct ata_port *ap)
void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
struct ata_ioports *ioport = &ap->ioaddr;
- VPRINTK("ENTER\n");
-
mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
ioport->cmd_addr = mmio;
@@ -1277,8 +1273,6 @@ static int nv_adma_host_init(struct ata_host *host)
unsigned int i;
u32 tmp32;
- VPRINTK("ENTER\n");
-
/* enable ADMA on the ports */
pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
@@ -1320,8 +1314,6 @@ static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
struct scatterlist *sg;
unsigned int si;
- VPRINTK("ENTER\n");
-
for_each_sg(qc->sg, sg, qc->n_elem, si) {
aprd = (si < 5) ? &cpb->aprd[si] :
&pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
@@ -1378,8 +1370,6 @@ static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
if (qc->tf.protocol == ATA_PROT_NCQ)
ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
- VPRINTK("qc->flags = 0x%lx\n", qc->flags);
-
nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
if (qc->flags & ATA_QCFLAG_DMAMAP) {
@@ -1404,8 +1394,6 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
void __iomem *mmio = pp->ctl_block;
int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
- VPRINTK("ENTER\n");
-
/* We can't handle result taskfile with NCQ commands, since
retrieving the taskfile switches us out of ADMA mode and would abort
existing commands. */
@@ -1417,7 +1405,6 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
if (nv_adma_use_reg_mode(qc)) {
/* use ATA register mode */
- VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
(qc->flags & ATA_QCFLAG_DMAMAP));
nv_adma_register_mode(qc->ap);
@@ -1438,8 +1425,6 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
- DPRINTK("Issued tag %u\n", qc->hw_tag);
-
return 0;
}
@@ -1871,12 +1856,12 @@ static void nv_swncq_host_init(struct ata_host *host)
/* enable swncq */
tmp = readl(mmio + NV_CTL_MCP55);
- VPRINTK("HOST_CTL:0x%X\n", tmp);
+ dev_dbg(&pdev->dev, "HOST_CTL:0x%X\n", tmp);
writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
/* enable irq intr */
tmp = readl(mmio + NV_INT_ENABLE_MCP55);
- VPRINTK("HOST_ENABLE:0x%X\n", tmp);
+ dev_dbg(&pdev->dev, "HOST_ENABLE:0x%X\n", tmp);
writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
/* clear port irq */
@@ -2017,19 +2002,17 @@ static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
if (qc == NULL)
return 0;
- DPRINTK("Enter\n");
-
writel((1 << qc->hw_tag), pp->sactive_block);
pp->last_issue_tag = qc->hw_tag;
pp->dhfis_bits &= ~(1 << qc->hw_tag);
pp->dmafis_bits &= ~(1 << qc->hw_tag);
pp->qc_active |= (0x1 << qc->hw_tag);
+ trace_ata_tf_load(ap, &qc->tf);
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
+ trace_ata_exec_command(ap, &qc->tf, qc->hw_tag);
ap->ops->sff_exec_command(ap, &qc->tf);
- DPRINTK("Issued tag %u\n", qc->hw_tag);
-
return 0;
}
@@ -2041,8 +2024,6 @@ static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
if (qc->tf.protocol != ATA_PROT_NCQ)
return ata_bmdma_qc_issue(qc);
- DPRINTK("Enter\n");
-
if (!pp->qc_active)
nv_swncq_issue_atacmd(ap, qc);
else
@@ -2087,6 +2068,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
u8 lack_dhfis = 0;
host_stat = ap->ops->bmdma_status(ap);
+ trace_ata_bmdma_status(ap, host_stat);
if (unlikely(host_stat & ATA_DMA_ERR)) {
/* error when transferring data to/from memory */
ata_ehi_clear_desc(ehi);
@@ -2109,7 +2091,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
if (!ap->qc_active) {
- DPRINTK("over\n");
+ ata_port_dbg(ap, "over\n");
nv_swncq_pp_reinit(ap);
return 0;
}
@@ -2124,12 +2106,12 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
*/
lack_dhfis = 1;
- DPRINTK("id 0x%x QC: qc_active 0x%llx,"
- "SWNCQ:qc_active 0x%X defer_bits %X "
- "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
- ap->print_id, ap->qc_active, pp->qc_active,
- pp->defer_queue.defer_bits, pp->dhfis_bits,
- pp->dmafis_bits, pp->last_issue_tag);
+ ata_port_dbg(ap, "QC: qc_active 0x%llx,"
+ "SWNCQ:qc_active 0x%X defer_bits %X "
+ "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
+ ap->qc_active, pp->qc_active,
+ pp->defer_queue.defer_bits, pp->dhfis_bits,
+ pp->dmafis_bits, pp->last_issue_tag);
nv_swncq_fis_reinit(ap);
@@ -2169,7 +2151,7 @@ static void nv_swncq_dmafis(struct ata_port *ap)
__ata_bmdma_stop(ap);
tag = nv_swncq_tag(ap);
- DPRINTK("dma setup tag 0x%x\n", tag);
+ ata_port_dbg(ap, "dma setup tag 0x%x\n", tag);
qc = ata_qc_from_tag(ap, tag);
if (unlikely(!qc))
@@ -2237,9 +2219,9 @@ static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
if (fis & NV_SWNCQ_IRQ_SDBFIS) {
pp->ncq_flags |= ncq_saw_sdb;
- DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
+ ata_port_dbg(ap, "SWNCQ: qc_active 0x%X "
"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
- ap->print_id, pp->qc_active, pp->dhfis_bits,
+ pp->qc_active, pp->dhfis_bits,
pp->dmafis_bits, readl(pp->sactive_block));
if (nv_swncq_sdbfis(ap) < 0)
goto irq_error;
@@ -2265,7 +2247,7 @@ static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
goto irq_exit;
if (pp->defer_queue.defer_bits) {
- DPRINTK("send next command\n");
+ ata_port_dbg(ap, "send next command\n");
qc = nv_swncq_qc_from_dq(ap);
nv_swncq_issue_atacmd(ap, qc);
}
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 7815da8ef9e5..b8465fef2ed2 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -596,7 +596,8 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
prd[idx].addr = cpu_to_le32(addr);
prd[idx].flags_len = cpu_to_le32(len & 0xffff);
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
+ ata_port_dbg(ap, "PRD[%u] = (0x%X, 0x%X)\n",
+ idx, addr, len);
idx++;
sg_len -= len;
@@ -609,17 +610,16 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
if (len > SG_COUNT_ASIC_BUG) {
u32 addr;
- VPRINTK("Splitting last PRD.\n");
-
addr = le32_to_cpu(prd[idx - 1].addr);
prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG);
+ ata_port_dbg(ap, "PRD[%u] = (0x%X, 0x%X)\n",
+ idx - 1, addr, SG_COUNT_ASIC_BUG);
addr = addr + len - SG_COUNT_ASIC_BUG;
len = SG_COUNT_ASIC_BUG;
prd[idx].addr = cpu_to_le32(addr);
prd[idx].flags_len = cpu_to_le32(len);
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
+ ata_port_dbg(ap, "PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
idx++;
}
@@ -632,8 +632,6 @@ static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc)
struct pdc_port_priv *pp = qc->ap->private_data;
unsigned int i;
- VPRINTK("ENTER\n");
-
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
pdc_fill_sg(qc);
@@ -922,12 +920,8 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
u32 hotplug_status;
int is_sataii_tx4;
- VPRINTK("ENTER\n");
-
- if (!host || !host->iomap[PDC_MMIO_BAR]) {
- VPRINTK("QUICK EXIT\n");
+ if (!host || !host->iomap[PDC_MMIO_BAR])
return IRQ_NONE;
- }
host_mmio = host->iomap[PDC_MMIO_BAR];
@@ -946,23 +940,18 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
/* reading should also clear interrupts */
mask = readl(host_mmio + PDC_INT_SEQMASK);
- if (mask == 0xffffffff && hotplug_status == 0) {
- VPRINTK("QUICK EXIT 2\n");
+ if (mask == 0xffffffff && hotplug_status == 0)
goto done_irq;
- }
mask &= 0xffff; /* only 16 SEQIDs possible */
- if (mask == 0 && hotplug_status == 0) {
- VPRINTK("QUICK EXIT 3\n");
+ if (mask == 0 && hotplug_status == 0)
goto done_irq;
- }
writel(mask, host_mmio + PDC_INT_SEQMASK);
is_sataii_tx4 = pdc_is_sataii_tx4(host->ports[0]->flags);
for (i = 0; i < host->n_ports; i++) {
- VPRINTK("port %u\n", i);
ap = host->ports[i];
/* check for a plug or unplug event */
@@ -989,8 +978,6 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
}
}
- VPRINTK("EXIT\n");
-
done_irq:
spin_unlock(&host->lock);
return IRQ_RETVAL(handled);
@@ -1005,8 +992,6 @@ static void pdc_packet_start(struct ata_queued_cmd *qc)
unsigned int port_no = ap->port_no;
u8 seq = (u8) (port_no + 1);
- VPRINTK("ENTER, ap %p\n", ap);
-
writel(0x00000001, host_mmio + (seq * 4));
readl(host_mmio + (seq * 4)); /* flush */
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index ef00ab644afb..8ca0810aad26 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -252,9 +252,6 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
len = sg_dma_len(sg);
*(__le32 *)prd = cpu_to_le32(len);
prd += sizeof(u64);
-
- VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", si,
- (unsigned long long)addr, len);
}
return si;
@@ -268,8 +265,6 @@ static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc)
u64 addr;
unsigned int nelem;
- VPRINTK("ENTER\n");
-
qs_enter_reg_mode(qc->ap);
if (qc->tf.protocol != ATA_PROT_DMA)
return AC_ERR_OK;
@@ -304,8 +299,6 @@ static inline void qs_packet_start(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
- VPRINTK("ENTER, ap %p\n", ap);
-
writeb(QS_CTR0_CLER, chan + QS_CCT_CTR0);
wmb(); /* flush PRDs and pkt to memory */
writel(QS_CCF_RUN_PKT, chan + QS_CCT_CFF);
@@ -374,8 +367,8 @@ static inline unsigned int qs_intr_pkt(struct ata_host *host)
struct qs_port_priv *pp = ap->private_data;
struct ata_queued_cmd *qc;
- DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
- sff1, sff0, port_no, sHST, sDST);
+ dev_dbg(host->dev, "SFF=%08x%08x: sHST=%d sDST=%02x\n",
+ sff1, sff0, sHST, sDST);
handled = 1;
if (!pp || pp->state != qs_state_pkt)
continue;
@@ -435,14 +428,10 @@ static irqreturn_t qs_intr(int irq, void *dev_instance)
unsigned int handled = 0;
unsigned long flags;
- VPRINTK("ENTER\n");
-
spin_lock_irqsave(&host->lock, flags);
handled = qs_intr_pkt(host) | qs_intr_mmio(host);
spin_unlock_irqrestore(&host->lock, flags);
- VPRINTK("EXIT\n");
-
return IRQ_RETVAL(handled);
}
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 44b0ed8f6bb8..3d96b6faa3f0 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -323,8 +323,6 @@ static int sata_rcar_bus_softreset(struct ata_port *ap, unsigned long deadline)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
- DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
-
/* software reset. causes dev0 to be selected */
iowrite32(ap->ctl, ioaddr->ctl_addr);
udelay(20);
@@ -350,7 +348,6 @@ static int sata_rcar_softreset(struct ata_link *link, unsigned int *classes,
devmask |= 1 << 0;
/* issue bus reset */
- DPRINTK("about to softreset, devmask=%x\n", devmask);
rc = sata_rcar_bus_softreset(ap, deadline);
/* if link is occupied, -ENODEV too is an error */
if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
@@ -361,7 +358,6 @@ static int sata_rcar_softreset(struct ata_link *link, unsigned int *classes,
/* determine by signature whether we have ATA or ATAPI devices */
classes[0] = ata_sff_dev_classify(&link->device[0], devmask, &err);
- DPRINTK("classes[0]=%u\n", classes[0]);
return 0;
}
@@ -383,12 +379,6 @@ static void sata_rcar_tf_load(struct ata_port *ap,
iowrite32(tf->hob_lbal, ioaddr->lbal_addr);
iowrite32(tf->hob_lbam, ioaddr->lbam_addr);
iowrite32(tf->hob_lbah, ioaddr->lbah_addr);
- VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
- tf->hob_feature,
- tf->hob_nsect,
- tf->hob_lbal,
- tf->hob_lbam,
- tf->hob_lbah);
}
if (is_addr) {
@@ -397,18 +387,10 @@ static void sata_rcar_tf_load(struct ata_port *ap,
iowrite32(tf->lbal, ioaddr->lbal_addr);
iowrite32(tf->lbam, ioaddr->lbam_addr);
iowrite32(tf->lbah, ioaddr->lbah_addr);
- VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
- tf->feature,
- tf->nsect,
- tf->lbal,
- tf->lbam,
- tf->lbah);
}
- if (tf->flags & ATA_TFLAG_DEVICE) {
+ if (tf->flags & ATA_TFLAG_DEVICE)
iowrite32(tf->device, ioaddr->device_addr);
- VPRINTK("device 0x%X\n", tf->device);
- }
ata_wait_idle(ap);
}
@@ -440,8 +422,6 @@ static void sata_rcar_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
static void sata_rcar_exec_command(struct ata_port *ap,
const struct ata_taskfile *tf)
{
- DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
-
iowrite32(tf->command, ap->ioaddr.command_addr);
ata_sff_pause(ap);
}
@@ -499,7 +479,6 @@ static void sata_rcar_drain_fifo(struct ata_queued_cmd *qc)
count < 65536; count += 2)
ioread32(ap->ioaddr.data_addr);
- /* Can become DEBUG later */
if (count)
ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
}
@@ -543,7 +522,6 @@ static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc)
prd[si].addr = cpu_to_le32(addr);
prd[si].flags_len = cpu_to_le32(sg_len);
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", si, addr, sg_len);
}
/* end-of-table flag */
@@ -685,7 +663,7 @@ static void sata_rcar_serr_interrupt(struct ata_port *ap)
if (!serror)
return;
- DPRINTK("SError @host_intr: 0x%x\n", serror);
+ ata_port_dbg(ap, "SError @host_intr: 0x%x\n", serror);
/* first, analyze and record host port events */
ata_ehi_clear_desc(ehi);
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 75321f1ceba5..3b989a52879d 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -307,7 +307,6 @@ static void sil_fill_sg(struct ata_queued_cmd *qc)
prd->addr = cpu_to_le32(addr);
prd->flags_len = cpu_to_le32(sg_len);
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", si, addr, sg_len);
last_prd = prd;
prd++;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index f99ec6f7d7c0..2fef6ce93f07 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -656,8 +656,6 @@ static int sil24_softreset(struct ata_link *link, unsigned int *class,
const char *reason;
int rc;
- DPRINTK("ENTER\n");
-
/* put the port into known state */
if (sil24_init_port(ap)) {
reason = "port not ready";
@@ -680,9 +678,8 @@ static int sil24_softreset(struct ata_link *link, unsigned int *class,
}
sil24_read_tf(ap, 0, &tf);
- *class = ata_dev_classify(&tf);
+ *class = ata_port_classify(ap, &tf);
- DPRINTK("EXIT, class=%u\n", *class);
return 0;
err:
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 4c01190a5e37..6ceec59cb291 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -78,6 +78,9 @@
#define DRV_NAME "sata_sx4"
#define DRV_VERSION "0.12"
+static int dimm_test;
+module_param(dimm_test, int, 0644);
+MODULE_PARM_DESC(dimm_test, "Enable DIMM test during startup (1 = enabled)");
enum {
PDC_MMIO_BAR = 3,
@@ -211,10 +214,8 @@ static unsigned int pdc20621_i2c_read(struct ata_host *host,
u32 device, u32 subaddr, u32 *pdata);
static int pdc20621_prog_dimm0(struct ata_host *host);
static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
-#ifdef ATA_VERBOSE_DEBUG
static void pdc20621_get_from_dimm(struct ata_host *host,
void *psource, u32 offset, u32 size);
-#endif
static void pdc20621_put_to_dimm(struct ata_host *host,
void *psource, u32 offset, u32 size);
static void pdc20621_irq_clear(struct ata_port *ap);
@@ -308,15 +309,9 @@ static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno,
/* output ATA packet S/G table */
addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
(PDC_DIMM_DATA_STEP * portno);
- VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
+
buf32[dw] = cpu_to_le32(addr);
buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
-
- VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
- PDC_20621_DIMM_BASE +
- (PDC_DIMM_WINDOW_STEP * portno) +
- PDC_DIMM_APKT_PRD,
- buf32[dw], buf32[dw + 1]);
}
static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
@@ -332,12 +327,6 @@ static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
buf32[dw] = cpu_to_le32(addr);
buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
-
- VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
- PDC_20621_DIMM_BASE +
- (PDC_DIMM_WINDOW_STEP * portno) +
- PDC_DIMM_HPKT_PRD,
- buf32[dw], buf32[dw + 1]);
}
static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
@@ -351,7 +340,6 @@ static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
unsigned int dimm_sg = PDC_20621_DIMM_BASE +
(PDC_DIMM_WINDOW_STEP * portno) +
PDC_DIMM_APKT_PRD;
- VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
i = PDC_DIMM_ATA_PKT;
@@ -406,8 +394,6 @@ static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
unsigned int dimm_sg = PDC_20621_DIMM_BASE +
(PDC_DIMM_WINDOW_STEP * portno) +
PDC_DIMM_HPKT_PRD;
- VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
- VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
dw = PDC_DIMM_HOST_PKT >> 2;
@@ -424,14 +410,6 @@ static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
buf32[dw + 1] = cpu_to_le32(host_sg);
buf32[dw + 2] = cpu_to_le32(dimm_sg);
buf32[dw + 3] = 0;
-
- VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
- PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
- PDC_DIMM_HOST_PKT,
- buf32[dw + 0],
- buf32[dw + 1],
- buf32[dw + 2],
- buf32[dw + 3]);
}
static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
@@ -447,8 +425,6 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
- VPRINTK("ata%u: ENTER\n", ap->print_id);
-
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
@@ -492,7 +468,8 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
readl(dimm_mmio); /* MMIO PCI posting flush */
- VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
+ ata_port_dbg(ap, "ata pkt buf ofs %u, prd size %u, mmio copied\n",
+ i, sgt_len);
}
static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
@@ -504,8 +481,6 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
unsigned int portno = ap->port_no;
unsigned int i;
- VPRINTK("ata%u: ENTER\n", ap->print_id);
-
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
@@ -527,7 +502,7 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
readl(dimm_mmio); /* MMIO PCI posting flush */
- VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
+ ata_port_dbg(ap, "ata pkt buf ofs %u, mmio copied\n", i);
}
static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
@@ -601,7 +576,6 @@ static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
pp->hdma_cons++;
}
-#ifdef ATA_VERBOSE_DEBUG
static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
@@ -611,14 +585,10 @@ static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
dimm_mmio += PDC_DIMM_HOST_PKT;
- printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
- printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
- printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
- printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
+ ata_port_dbg(ap, "HDMA 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ readl(dimm_mmio), readl(dimm_mmio + 4),
+ readl(dimm_mmio + 8), readl(dimm_mmio + 12));
}
-#else
-static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
-#endif /* ATA_VERBOSE_DEBUG */
static void pdc20621_packet_start(struct ata_queued_cmd *qc)
{
@@ -633,8 +603,6 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
- VPRINTK("ata%u: ENTER\n", ap->print_id);
-
wmb(); /* flush PRD, pkt writes */
port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
@@ -645,7 +613,7 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
pdc20621_dump_hdma(qc);
pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
- VPRINTK("queued ofs 0x%x (%u), seq %u\n",
+ ata_port_dbg(ap, "queued ofs 0x%x (%u), seq %u\n",
port_ofs + PDC_DIMM_HOST_PKT,
port_ofs + PDC_DIMM_HOST_PKT,
seq);
@@ -656,7 +624,7 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
writel(port_ofs + PDC_DIMM_ATA_PKT,
ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
- VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
+ ata_port_dbg(ap, "submitted ofs 0x%x (%u), seq %u\n",
port_ofs + PDC_DIMM_ATA_PKT,
port_ofs + PDC_DIMM_ATA_PKT,
seq);
@@ -696,14 +664,12 @@ static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
u8 status;
unsigned int handled = 0;
- VPRINTK("ENTER\n");
-
if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
(!(qc->tf.flags & ATA_TFLAG_WRITE))) {
/* step two - DMA from DIMM to host */
if (doing_hdma) {
- VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
+ ata_port_dbg(ap, "read hdma, 0x%x 0x%x\n",
readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
/* get drive status; clear intr; complete txn */
qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
@@ -714,7 +680,7 @@ static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
/* step one - exec ATA command */
else {
u8 seq = (u8) (port_no + 1 + 4);
- VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
+ ata_port_dbg(ap, "read ata, 0x%x 0x%x\n",
readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
/* submit hdma pkt */
@@ -729,7 +695,7 @@ static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
/* step one - DMA from host to DIMM */
if (doing_hdma) {
u8 seq = (u8) (port_no + 1);
- VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
+ ata_port_dbg(ap, "write hdma, 0x%x 0x%x\n",
readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
/* submit ata pkt */
@@ -742,7 +708,7 @@ static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
/* step two - execute ATA command */
else {
- VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
+ ata_port_dbg(ap, "write ata, 0x%x 0x%x\n",
readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
/* get drive status; clear intr; complete txn */
qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
@@ -755,7 +721,7 @@ static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
} else if (qc->tf.protocol == ATA_PROT_NODATA) {
status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
- DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
+ ata_port_dbg(ap, "BUS_NODATA (drv_stat 0x%X)\n", status);
qc->err_mask |= ac_err_mask(status);
ata_qc_complete(qc);
handled = 1;
@@ -781,29 +747,21 @@ static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
unsigned int handled = 0;
void __iomem *mmio_base;
- VPRINTK("ENTER\n");
-
- if (!host || !host->iomap[PDC_MMIO_BAR]) {
- VPRINTK("QUICK EXIT\n");
+ if (!host || !host->iomap[PDC_MMIO_BAR])
return IRQ_NONE;
- }
mmio_base = host->iomap[PDC_MMIO_BAR];
/* reading should also clear interrupts */
mmio_base += PDC_CHIP0_OFS;
mask = readl(mmio_base + PDC_20621_SEQMASK);
- VPRINTK("mask == 0x%x\n", mask);
- if (mask == 0xffffffff) {
- VPRINTK("QUICK EXIT 2\n");
+ if (mask == 0xffffffff)
return IRQ_NONE;
- }
+
mask &= 0xffff; /* only 16 tags possible */
- if (!mask) {
- VPRINTK("QUICK EXIT 3\n");
+ if (!mask)
return IRQ_NONE;
- }
spin_lock(&host->lock);
@@ -816,7 +774,8 @@ static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
else
ap = host->ports[port_no];
tmp = mask & (1 << i);
- VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
+ if (ap)
+ ata_port_dbg(ap, "seq %u, tmp %x\n", i, tmp);
if (tmp && ap) {
struct ata_queued_cmd *qc;
@@ -829,10 +788,6 @@ static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
spin_unlock(&host->lock);
- VPRINTK("mask == 0x%x\n", mask);
-
- VPRINTK("EXIT\n");
-
return IRQ_RETVAL(handled);
}
@@ -979,7 +934,6 @@ static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
}
-#ifdef ATA_VERBOSE_DEBUG
static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
u32 offset, u32 size)
{
@@ -1029,7 +983,6 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
memcpy_fromio(psource, dimm_mmio, size / 4);
}
}
-#endif
static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
@@ -1226,15 +1179,16 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
/* Turn on for ECC */
if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
PDC_DIMM_SPD_TYPE, &spd0)) {
- pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
- PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
+ dev_err(host->dev,
+ "Failed in i2c read: device=%#x, subaddr=%#x\n",
+ PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
return 1;
}
if (spd0 == 0x02) {
data |= (0x01 << 16);
writel(data, mmio + PDC_SDRAM_CONTROL);
readl(mmio + PDC_SDRAM_CONTROL);
- printk(KERN_ERR "Local DIMM ECC Enabled\n");
+ dev_err(host->dev, "Local DIMM ECC Enabled\n");
}
/* DIMM Initialization Select/Enable (bit 18/19) */
@@ -1274,7 +1228,7 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
/* Initialize Time Period Register */
writel(0xffffffff, mmio + PDC_TIME_PERIOD);
time_period = readl(mmio + PDC_TIME_PERIOD);
- VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
+ dev_dbg(host->dev, "Time Period Register (0x40): 0x%x\n", time_period);
/* Enable timer */
writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
@@ -1289,7 +1243,7 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
*/
tcount = readl(mmio + PDC_TIME_COUNTER);
- VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
+ dev_dbg(host->dev, "Time Counter Register (0x44): 0x%x\n", tcount);
/*
If SX4 is on PCI-X bus, after 3 seconds, the timer counter
@@ -1297,17 +1251,19 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
*/
if (tcount >= PCI_X_TCOUNT) {
ticks = (time_period - tcount);
- VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
+ dev_dbg(host->dev, "Num counters 0x%x (%d)\n", ticks, ticks);
clock = (ticks / 300000);
- VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
+ dev_dbg(host->dev, "10 * Internal clk = 0x%x (%d)\n",
+ clock, clock);
clock = (clock * 33);
- VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
+ dev_dbg(host->dev, "10 * Internal clk * 33 = 0x%x (%d)\n",
+ clock, clock);
/* PLL F Param (bit 22:16) */
fparam = (1400000 / clock) - 2;
- VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
+ dev_dbg(host->dev, "PLL F Param: 0x%x (%d)\n", fparam, fparam);
/* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
pci_status = (0x8a001824 | (fparam << 16));
@@ -1315,7 +1271,7 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
pci_status = PCI_PLL_INIT;
/* Initialize PLL. */
- VPRINTK("pci_status: 0x%x\n", pci_status);
+ dev_dbg(host->dev, "pci_status: 0x%x\n", pci_status);
writel(pci_status, mmio + PDC_CTL_STATUS);
readl(mmio + PDC_CTL_STATUS);
@@ -1324,23 +1280,23 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
and program the DIMM Module Controller.
*/
if (!(speed = pdc20621_detect_dimm(host))) {
- printk(KERN_ERR "Detect Local DIMM Fail\n");
+ dev_err(host->dev, "Detect Local DIMM Fail\n");
return 1; /* DIMM error */
}
- VPRINTK("Local DIMM Speed = %d\n", speed);
+ dev_dbg(host->dev, "Local DIMM Speed = %d\n", speed);
/* Programming DIMM0 Module Control Register (index_CID0:80h) */
size = pdc20621_prog_dimm0(host);
- VPRINTK("Local DIMM Size = %dMB\n", size);
+ dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size);
/* Programming DIMM Module Global Control Register (index_CID0:88h) */
if (pdc20621_prog_dimm_global(host)) {
- printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
+ dev_err(host->dev,
+ "Programming DIMM Module Global Control Register Fail\n");
return 1;
}
-#ifdef ATA_VERBOSE_DEBUG
- {
+ if (dimm_test) {
u8 test_parttern1[40] =
{0x55,0xAA,'P','r','o','m','i','s','e',' ',
'N','o','t',' ','Y','e','t',' ',
@@ -1354,31 +1310,33 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
- printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
+ dev_info(host->dev, "DIMM test pattern 1: %x, %x, %s\n", test_parttern2[0],
test_parttern2[1], &(test_parttern2[2]));
pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
40);
- printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
- test_parttern2[1], &(test_parttern2[2]));
+ dev_info(host->dev, "DIMM test pattern 2: %x, %x, %s\n",
+ test_parttern2[0],
+ test_parttern2[1], &(test_parttern2[2]));
pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
- printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
- test_parttern2[1], &(test_parttern2[2]));
+ dev_info(host->dev, "DIMM test pattern 3: %x, %x, %s\n",
+ test_parttern2[0],
+ test_parttern2[1], &(test_parttern2[2]));
}
-#endif
/* ECC initiliazation. */
if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
PDC_DIMM_SPD_TYPE, &spd0)) {
- pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
+ dev_err(host->dev,
+ "Failed in i2c read: device=%#x, subaddr=%#x\n",
PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
return 1;
}
if (spd0 == 0x02) {
void *buf;
- VPRINTK("Start ECC initialization\n");
+ dev_dbg(host->dev, "Start ECC initialization\n");
addr = 0;
length = size * 1024 * 1024;
buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
@@ -1390,7 +1348,7 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
addr += ECC_ERASE_BUF_SZ;
}
kfree(buf);
- VPRINTK("Finish ECC initialization\n");
+ dev_dbg(host->dev, "Finish ECC initialization\n");
}
return 0;
}
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index bc8e8d9f176b..3e726ee91fdc 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -178,7 +178,6 @@ static void ia_hack_tcq(IADEV *dev) {
static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
u_short desc_num, i;
- struct sk_buff *skb;
struct ia_vcc *iavcc_r = NULL;
unsigned long delta;
static unsigned long timer = 0;
@@ -202,8 +201,7 @@ static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
else
dev->ffL.tcq_rd -= 2;
*(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
- if (!(skb = dev->desc_tbl[i].txskb) ||
- !(iavcc_r = dev->desc_tbl[i].iavcc))
+ if (!dev->desc_tbl[i].txskb || !(iavcc_r = dev->desc_tbl[i].iavcc))
printk("Fatal err, desc table vcc or skb is NULL\n");
else
iavcc_r->vc_desc_cnt--;
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index ffcbe2bc460e..6f04b831a5c0 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -62,6 +62,17 @@ config DEVTMPFS_MOUNT
rescue mode with init=/bin/sh, even when the /dev directory
on the rootfs is completely empty.
+config DEVTMPFS_SAFE
+ bool "Use nosuid,noexec mount options on devtmpfs"
+ depends on DEVTMPFS
+ help
+ This instructs the kernel to include the MS_NOEXEC and MS_NOSUID mount
+ flags when mounting devtmpfs.
+
+ Notice: If enabled, things like /dev/mem cannot be mmapped
+ with the PROT_EXEC flag. This can break, for example, non-KMS
+ video drivers.
+
config STANDALONE
bool "Select only drivers that don't need compile-time external firmware"
default y
diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c
index bc1876915457..eaa31e567d1e 100644
--- a/drivers/base/arch_numa.c
+++ b/drivers/base/arch_numa.c
@@ -14,7 +14,6 @@
#include <linux/of.h>
#include <asm/sections.h>
-#include <asm/pgalloc.h>
struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_data);
@@ -155,66 +154,6 @@ static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
return node_distance(early_cpu_to_node(from), early_cpu_to_node(to));
}
-static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
- size_t align)
-{
- int nid = early_cpu_to_node(cpu);
-
- return memblock_alloc_try_nid(size, align,
- __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
-}
-
-static void __init pcpu_fc_free(void *ptr, size_t size)
-{
- memblock_free(ptr, size);
-}
-
-#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
-static void __init pcpu_populate_pte(unsigned long addr)
-{
- pgd_t *pgd = pgd_offset_k(addr);
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
-
- p4d = p4d_offset(pgd, addr);
- if (p4d_none(*p4d)) {
- pud_t *new;
-
- new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!new)
- goto err_alloc;
- p4d_populate(&init_mm, p4d, new);
- }
-
- pud = pud_offset(p4d, addr);
- if (pud_none(*pud)) {
- pmd_t *new;
-
- new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!new)
- goto err_alloc;
- pud_populate(&init_mm, pud, new);
- }
-
- pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd)) {
- pte_t *new;
-
- new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!new)
- goto err_alloc;
- pmd_populate_kernel(&init_mm, pmd, new);
- }
-
- return;
-
-err_alloc:
- panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
-}
-#endif
-
void __init setup_per_cpu_areas(void)
{
unsigned long delta;
@@ -229,7 +168,7 @@ void __init setup_per_cpu_areas(void)
rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
pcpu_cpu_distance,
- pcpu_fc_alloc, pcpu_fc_free);
+ early_cpu_to_node);
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
if (rc < 0)
pr_warn("PERCPU: %s allocator failed (%d), falling back to page size\n",
@@ -239,10 +178,7 @@ void __init setup_per_cpu_areas(void)
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
if (rc < 0)
- rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
- pcpu_fc_alloc,
- pcpu_fc_free,
- pcpu_populate_pte);
+ rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, early_cpu_to_node);
#endif
if (rc < 0)
panic("Failed to initialize percpu areas (err=%d).", rc);
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index ff16a36a908b..976154140f0b 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -22,6 +22,7 @@
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
static bool scale_freq_invariant;
+static DEFINE_PER_CPU(u32, freq_factor) = 1;
static bool supports_scale_freq_counters(const struct cpumask *cpus)
{
@@ -155,15 +156,49 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
DEFINE_PER_CPU(unsigned long, thermal_pressure);
-void topology_set_thermal_pressure(const struct cpumask *cpus,
- unsigned long th_pressure)
+/**
+ * topology_update_thermal_pressure() - Update thermal pressure for CPUs
+ * @cpus : The related CPUs for which capacity has been reduced
+ * @capped_freq : The maximum allowed frequency that CPUs can run at
+ *
+ * Update the value of thermal pressure for all @cpus in the mask. The
+ * cpumask should include all (online+offline) affected CPUs, to avoid
+ * operating on stale data when hot-plug is used for some CPUs. The
+ * @capped_freq reflects the currently allowed max CPUs frequency due to
+ * thermal capping. It might be also a boost frequency value, which is bigger
+ * than the internal 'freq_factor' max frequency. In such case the pressure
+ * value should simply be removed, since this is an indication that there is
+ * no thermal throttling. The @capped_freq must be provided in kHz.
+ */
+void topology_update_thermal_pressure(const struct cpumask *cpus,
+ unsigned long capped_freq)
{
+ unsigned long max_capacity, capacity, th_pressure;
+ u32 max_freq;
int cpu;
+ cpu = cpumask_first(cpus);
+ max_capacity = arch_scale_cpu_capacity(cpu);
+ max_freq = per_cpu(freq_factor, cpu);
+
+ /* Convert to MHz scale which is used in 'freq_factor' */
+ capped_freq /= 1000;
+
+ /*
+ * Handle properly the boost frequencies, which should simply clean
+ * the thermal pressure value.
+ */
+ if (max_freq <= capped_freq)
+ capacity = max_capacity;
+ else
+ capacity = mult_frac(max_capacity, capped_freq, max_freq);
+
+ th_pressure = max_capacity - capacity;
+
for_each_cpu(cpu, cpus)
WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
}
-EXPORT_SYMBOL_GPL(topology_set_thermal_pressure);
+EXPORT_SYMBOL_GPL(topology_update_thermal_pressure);
static ssize_t cpu_capacity_show(struct device *dev,
struct device_attribute *attr,
@@ -217,7 +252,6 @@ static void update_topology_flags_workfn(struct work_struct *work)
update_topology = 0;
}
-static DEFINE_PER_CPU(u32, freq_factor) = 1;
static u32 *raw_capacity;
static int free_raw_capacity(void)
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index 9230c9472bb0..8c5e65930617 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -17,6 +17,147 @@
#include <linux/auxiliary_bus.h>
#include "base.h"
+/**
+ * DOC: PURPOSE
+ *
+ * In some subsystems, the functionality of the core device (PCI/ACPI/other) is
+ * too complex for a single device to be managed by a monolithic driver (e.g.
+ * Sound Open Firmware), multiple devices might implement a common intersection
+ * of functionality (e.g. NICs + RDMA), or a driver may want to export an
+ * interface for another subsystem to drive (e.g. SIOV Physical Function export
+ * Virtual Function management). A split of the functionality into child-
+ * devices representing sub-domains of functionality makes it possible to
+ * compartmentalize, layer, and distribute domain-specific concerns via a Linux
+ * device-driver model.
+ *
+ * An example for this kind of requirement is the audio subsystem where a
+ * single IP is handling multiple entities such as HDMI, Soundwire, local
+ * devices such as mics/speakers etc. The split for the core's functionality
+ * can be arbitrary or be defined by the DSP firmware topology and include
+ * hooks for test/debug. This allows for the audio core device to be minimal
+ * and focused on hardware-specific control and communication.
+ *
+ * Each auxiliary_device represents a part of its parent functionality. The
+ * generic behavior can be extended and specialized as needed by encapsulating
+ * an auxiliary_device within other domain-specific structures and the use of
+ * .ops callbacks. Devices on the auxiliary bus do not share any structures and
+ * the use of a communication channel with the parent is domain-specific.
+ *
+ * Note that ops are intended as a way to augment instance behavior within a
+ * class of auxiliary devices, it is not the mechanism for exporting common
+ * infrastructure from the parent. Consider EXPORT_SYMBOL_NS() to convey
+ * infrastructure from the parent module to the auxiliary module(s).
+ */
+
+/**
+ * DOC: USAGE
+ *
+ * The auxiliary bus is to be used when a driver and one or more kernel
+ * modules, who share a common header file with the driver, need a mechanism to
+ * connect and provide access to a shared object allocated by the
+ * auxiliary_device's registering driver. The registering driver for the
+ * auxiliary_device(s) and the kernel module(s) registering auxiliary_drivers
+ * can be from the same subsystem, or from multiple subsystems.
+ *
+ * The emphasis here is on a common generic interface that keeps subsystem
+ * customization out of the bus infrastructure.
+ *
+ * One example is a PCI network device that is RDMA-capable and exports a child
+ * device to be driven by an auxiliary_driver in the RDMA subsystem. The PCI
+ * driver allocates and registers an auxiliary_device for each physical
+ * function on the NIC. The RDMA driver registers an auxiliary_driver that
+ * claims each of these auxiliary_devices. This conveys data/ops published by
+ * the parent PCI device/driver to the RDMA auxiliary_driver.
+ *
+ * Another use case is for the PCI device to be split out into multiple sub
+ * functions. For each sub function an auxiliary_device is created. A PCI sub
+ * function driver binds to such devices that creates its own one or more class
+ * devices. A PCI sub function auxiliary device is likely to be contained in a
+ * struct with additional attributes such as user defined sub function number
+ * and optional attributes such as resources and a link to the parent device.
+ * These attributes could be used by systemd/udev; and hence should be
+ * initialized before a driver binds to an auxiliary_device.
+ *
+ * A key requirement for utilizing the auxiliary bus is that there is no
+ * dependency on a physical bus, device, register accesses or regmap support.
+ * These individual devices split from the core cannot live on the platform bus
+ * as they are not physical devices that are controlled by DT/ACPI. The same
+ * argument applies for not using MFD in this scenario as MFD relies on
+ * individual function devices being physical devices.
+ */
+
+/**
+ * DOC: EXAMPLE
+ *
+ * Auxiliary devices are created and registered by a subsystem-level core
+ * device that needs to break up its functionality into smaller fragments. One
+ * way to extend the scope of an auxiliary_device is to encapsulate it within a
+ * domain- pecific structure defined by the parent device. This structure
+ * contains the auxiliary_device and any associated shared data/callbacks
+ * needed to establish the connection with the parent.
+ *
+ * An example is:
+ *
+ * .. code-block:: c
+ *
+ * struct foo {
+ * struct auxiliary_device auxdev;
+ * void (*connect)(struct auxiliary_device *auxdev);
+ * void (*disconnect)(struct auxiliary_device *auxdev);
+ * void *data;
+ * };
+ *
+ * The parent device then registers the auxiliary_device by calling
+ * auxiliary_device_init(), and then auxiliary_device_add(), with the pointer
+ * to the auxdev member of the above structure. The parent provides a name for
+ * the auxiliary_device that, combined with the parent's KBUILD_MODNAME,
+ * creates a match_name that is be used for matching and binding with a driver.
+ *
+ * Whenever an auxiliary_driver is registered, based on the match_name, the
+ * auxiliary_driver's probe() is invoked for the matching devices. The
+ * auxiliary_driver can also be encapsulated inside custom drivers that make
+ * the core device's functionality extensible by adding additional
+ * domain-specific ops as follows:
+ *
+ * .. code-block:: c
+ *
+ * struct my_ops {
+ * void (*send)(struct auxiliary_device *auxdev);
+ * void (*receive)(struct auxiliary_device *auxdev);
+ * };
+ *
+ *
+ * struct my_driver {
+ * struct auxiliary_driver auxiliary_drv;
+ * const struct my_ops ops;
+ * };
+ *
+ * An example of this type of usage is:
+ *
+ * .. code-block:: c
+ *
+ * const struct auxiliary_device_id my_auxiliary_id_table[] = {
+ * { .name = "foo_mod.foo_dev" },
+ * { },
+ * };
+ *
+ * const struct my_ops my_custom_ops = {
+ * .send = my_tx,
+ * .receive = my_rx,
+ * };
+ *
+ * const struct my_driver my_drv = {
+ * .auxiliary_drv = {
+ * .name = "myauxiliarydrv",
+ * .id_table = my_auxiliary_id_table,
+ * .probe = my_probe,
+ * .remove = my_remove,
+ * .shutdown = my_shutdown,
+ * },
+ * .ops = my_custom_ops,
+ * };
+ */
+
static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id,
const struct auxiliary_device *auxdev)
{
@@ -117,7 +258,7 @@ static struct bus_type auxiliary_bus_type = {
* auxiliary_device_init - check auxiliary_device and initialize
* @auxdev: auxiliary device struct
*
- * This is the first step in the two-step process to register an
+ * This is the second step in the three-step process to register an
* auxiliary_device.
*
* When this function returns an error code, then the device_initialize will
@@ -155,7 +296,7 @@ EXPORT_SYMBOL_GPL(auxiliary_device_init);
* @auxdev: auxiliary bus device to add to the bus
* @modname: name of the parent device's driver module
*
- * This is the second step in the two-step process to register an
+ * This is the third step in the three-step process to register an
* auxiliary_device.
*
* This function must be called after a successful call to
@@ -202,6 +343,8 @@ EXPORT_SYMBOL_GPL(__auxiliary_device_add);
* This function returns a reference to a device that is 'found'
* for later use, as determined by the @match callback.
*
+ * The reference returned should be released with put_device().
+ *
* The callback should return 0 if the device doesn't match and non-zero
* if it does. If the callback returns non-zero, this function will
* return to the caller and not iterate over any more devices.
@@ -225,6 +368,11 @@ EXPORT_SYMBOL_GPL(auxiliary_find_device);
* @auxdrv: auxiliary_driver structure
* @owner: owning module/driver
* @modname: KBUILD_MODNAME for parent driver
+ *
+ * The expectation is that users will call the "auxiliary_driver_register"
+ * macro so that the caller's KBUILD_MODNAME is automatically inserted for the
+ * modname parameter. Only if a user requires a custom name would this version
+ * be called directly.
*/
int __auxiliary_driver_register(struct auxiliary_driver *auxdrv,
struct module *owner, const char *modname)
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index bdc98c5713d5..97936ec49bde 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -163,9 +163,9 @@ static struct kobj_type bus_ktype = {
.release = bus_release,
};
-static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
+static int bus_uevent_filter(struct kobject *kobj)
{
- struct kobj_type *ktype = get_ktype(kobj);
+ const struct kobj_type *ktype = get_ktype(kobj);
if (ktype == &bus_ktype)
return 1;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index fd034d742447..7bb957b11861 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -485,8 +485,7 @@ static void device_link_release_fn(struct work_struct *work)
/* Ensure that all references to the link object have been dropped. */
device_link_synchronize_removal();
- while (refcount_dec_not_one(&link->rpm_active))
- pm_runtime_put(link->supplier);
+ pm_runtime_release_supplier(link, true);
put_device(link->consumer);
put_device(link->supplier);
@@ -2261,9 +2260,9 @@ static struct kobj_type device_ktype = {
};
-static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
+static int dev_uevent_filter(struct kobject *kobj)
{
- struct kobj_type *ktype = get_ktype(kobj);
+ const struct kobj_type *ktype = get_ktype(kobj);
if (ktype == &device_ktype) {
struct device *dev = kobj_to_dev(kobj);
@@ -2275,7 +2274,7 @@ static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
return 0;
}
-static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
+static const char *dev_uevent_name(struct kobject *kobj)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2286,8 +2285,7 @@ static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
return NULL;
}
-static int dev_uevent(struct kset *kset, struct kobject *kobj,
- struct kobj_uevent_env *env)
+static int dev_uevent(struct kobject *kobj, struct kobj_uevent_env *env)
{
struct device *dev = kobj_to_dev(kobj);
int retval = 0;
@@ -2382,7 +2380,7 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
/* respect filter */
if (kset->uevent_ops && kset->uevent_ops->filter)
- if (!kset->uevent_ops->filter(kset, &dev->kobj))
+ if (!kset->uevent_ops->filter(&dev->kobj))
goto out;
env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
@@ -2390,7 +2388,7 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
return -ENOMEM;
/* let the kset specific function add its keys */
- retval = kset->uevent_ops->uevent(kset, &dev->kobj, env);
+ retval = kset->uevent_ops->uevent(&dev->kobj, env);
if (retval)
goto out;
@@ -2874,10 +2872,6 @@ void device_initialize(struct device *dev)
INIT_LIST_HEAD(&dev->devres_head);
device_pm_init(dev);
set_dev_node(dev, NUMA_NO_NODE);
-#ifdef CONFIG_GENERIC_MSI_IRQ
- raw_spin_lock_init(&dev->msi_lock);
- INIT_LIST_HEAD(&dev->msi_list);
-#endif
INIT_LIST_HEAD(&dev->links.consumers);
INIT_LIST_HEAD(&dev->links.suppliers);
INIT_LIST_HEAD(&dev->links.defer_sync);
@@ -3029,6 +3023,23 @@ static inline struct kobject *get_glue_dir(struct device *dev)
return dev->kobj.parent;
}
+/**
+ * kobject_has_children - Returns whether a kobject has children.
+ * @kobj: the object to test
+ *
+ * This will return whether a kobject has other kobjects as children.
+ *
+ * It does NOT account for the presence of attribute files, only sub
+ * directories. It also assumes there is no concurrent addition or
+ * removal of such children, and thus relies on external locking.
+ */
+static inline bool kobject_has_children(struct kobject *kobj)
+{
+ WARN_ON_ONCE(kref_read(&kobj->kref) == 0);
+
+ return kobj->sd && kobj->sd->dir.subdirs;
+}
+
/*
* make sure cleaning up dir as the last step, we need to make
* sure .release handler of kobject is run with holding the
@@ -3582,7 +3593,6 @@ void device_del(struct device *dev)
device_pm_remove(dev);
driver_deferred_probe_del(dev);
device_platform_notify_remove(dev);
- device_remove_properties(dev);
device_links_purge(dev);
if (dev->bus)
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 68ea1f949daa..9eaaff2f556c 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -577,14 +577,14 @@ re_probe:
if (dev->bus->dma_configure) {
ret = dev->bus->dma_configure(dev);
if (ret)
- goto probe_failed;
+ goto pinctrl_bind_failed;
}
ret = driver_sysfs_add(dev);
if (ret) {
pr_err("%s: driver_sysfs_add(%s) failed\n",
__func__, dev_name(dev));
- goto probe_failed;
+ goto sysfs_failed;
}
if (dev->pm_domain && dev->pm_domain->activate) {
@@ -657,6 +657,8 @@ dev_groups_failed:
else if (drv->remove)
drv->remove(dev);
probe_failed:
+ driver_sysfs_remove(dev);
+sysfs_failed:
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
@@ -666,7 +668,6 @@ pinctrl_bind_failed:
arch_teardown_dma_ops(dev);
kfree(dev->dma_range_map);
dev->dma_range_map = NULL;
- driver_sysfs_remove(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
if (dev->pm_domain && dev->pm_domain->dismiss)
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 8be352ab4ddb..f41063ac1aee 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -29,6 +29,12 @@
#include <uapi/linux/mount.h>
#include "base.h"
+#ifdef CONFIG_DEVTMPFS_SAFE
+#define DEVTMPFS_MFLAGS (MS_SILENT | MS_NOEXEC | MS_NOSUID)
+#else
+#define DEVTMPFS_MFLAGS (MS_SILENT)
+#endif
+
static struct task_struct *thread;
static int __initdata mount_dev = IS_ENABLED(CONFIG_DEVTMPFS_MOUNT);
@@ -59,8 +65,15 @@ static struct dentry *public_dev_mount(struct file_system_type *fs_type, int fla
const char *dev_name, void *data)
{
struct super_block *s = mnt->mnt_sb;
+ int err;
+
atomic_inc(&s->s_active);
down_write(&s->s_umount);
+ err = reconfigure_single(s, flags, data);
+ if (err < 0) {
+ deactivate_locked_super(s);
+ return ERR_PTR(err);
+ }
return dget(s->s_root);
}
@@ -363,7 +376,7 @@ int __init devtmpfs_mount(void)
if (!thread)
return 0;
- err = init_mount("devtmpfs", "dev", "devtmpfs", MS_SILENT, NULL);
+ err = init_mount("devtmpfs", "dev", "devtmpfs", DEVTMPFS_MFLAGS, NULL);
if (err)
printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
else
@@ -412,7 +425,7 @@ static noinline int __init devtmpfs_setup(void *p)
err = ksys_unshare(CLONE_NEWNS);
if (err)
goto out;
- err = init_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, NULL);
+ err = init_mount("devtmpfs", "/", "devtmpfs", DEVTMPFS_MFLAGS, NULL);
if (err)
goto out;
init_chdir("/.."); /* will traverse into overmounted root */
diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile
index eb4be452062a..6c067dedc01e 100644
--- a/drivers/base/firmware_loader/builtin/Makefile
+++ b/drivers/base/firmware_loader/builtin/Makefile
@@ -3,10 +3,10 @@ obj-y += main.o
# Create $(fwdir) from $(CONFIG_EXTRA_FIRMWARE_DIR) -- if it doesn't have a
# leading /, it's relative to $(srctree).
-fwdir := $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE_DIR))
+fwdir := $(CONFIG_EXTRA_FIRMWARE_DIR)
fwdir := $(addprefix $(srctree)/,$(filter-out /%,$(fwdir)))$(filter /%,$(fwdir))
-firmware := $(addsuffix .gen.o, $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE)))
+firmware := $(addsuffix .gen.o, $(CONFIG_EXTRA_FIRMWARE))
obj-y += $(firmware)
FWNAME = $(patsubst $(obj)/%.gen.S,%,$@)
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index d7d63c1aa993..4afb0e9312c0 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -199,11 +199,16 @@ static struct class firmware_class = {
int register_sysfs_loader(void)
{
- return class_register(&firmware_class);
+ int ret = class_register(&firmware_class);
+
+ if (ret != 0)
+ return ret;
+ return register_firmware_config_sysctl();
}
void unregister_sysfs_loader(void)
{
+ unregister_firmware_config_sysctl();
class_unregister(&firmware_class);
}
diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h
index 3af7205b302f..9f3055d3b4ca 100644
--- a/drivers/base/firmware_loader/fallback.h
+++ b/drivers/base/firmware_loader/fallback.h
@@ -42,6 +42,17 @@ void fw_fallback_set_default_timeout(void);
int register_sysfs_loader(void);
void unregister_sysfs_loader(void);
+#ifdef CONFIG_SYSCTL
+extern int register_firmware_config_sysctl(void);
+extern void unregister_firmware_config_sysctl(void);
+#else
+static inline int register_firmware_config_sysctl(void)
+{
+ return 0;
+}
+static inline void unregister_firmware_config_sysctl(void) { }
+#endif /* CONFIG_SYSCTL */
+
#else /* CONFIG_FW_LOADER_USER_HELPER */
static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name,
struct device *device,
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c
index 46a731dede6f..e5ac098d0742 100644
--- a/drivers/base/firmware_loader/fallback_table.c
+++ b/drivers/base/firmware_loader/fallback_table.c
@@ -4,6 +4,7 @@
#include <linux/kconfig.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <linux/security.h>
#include <linux/highmem.h>
#include <linux/umh.h>
@@ -24,7 +25,7 @@ struct firmware_fallback_config fw_fallback_config = {
EXPORT_SYMBOL_NS_GPL(fw_fallback_config, FIRMWARE_LOADER_PRIVATE);
#ifdef CONFIG_SYSCTL
-struct ctl_table firmware_config_table[] = {
+static struct ctl_table firmware_config_table[] = {
{
.procname = "force_sysfs_fallback",
.data = &fw_fallback_config.force_sysfs_fallback,
@@ -45,4 +46,24 @@ struct ctl_table firmware_config_table[] = {
},
{ }
};
-#endif
+
+static struct ctl_table_header *firmware_config_sysct_table_header;
+int register_firmware_config_sysctl(void)
+{
+ firmware_config_sysct_table_header =
+ register_sysctl("kernel/firmware_config",
+ firmware_config_table);
+ if (!firmware_config_sysct_table_header)
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(register_firmware_config_sysctl, FIRMWARE_LOADER_PRIVATE);
+
+void unregister_firmware_config_sysctl(void)
+{
+ unregister_sysctl_table(firmware_config_sysct_table_header);
+ firmware_config_sysct_table_header = NULL;
+}
+EXPORT_SYMBOL_NS_GPL(unregister_firmware_config_sysctl, FIRMWARE_LOADER_PRIVATE);
+
+#endif /* CONFIG_SYSCTL */
diff --git a/drivers/base/node.c b/drivers/base/node.c
index b5a4ba18f9f9..87acc47e8951 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -581,6 +581,9 @@ static const struct attribute_group node_dev_group = {
static const struct attribute_group *node_dev_groups[] = {
&node_dev_group,
+#ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP
+ &arch_node_dev_group,
+#endif
NULL
};
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 3d6c8f9caf43..296ea673d661 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -23,7 +23,6 @@
struct platform_msi_priv_data {
struct device *dev;
void *host_data;
- const struct attribute_group **msi_irq_groups;
msi_alloc_info_t arg;
irq_write_msi_msg_t write_msg;
int devid;
@@ -39,11 +38,9 @@ static DEFINE_IDA(platform_msi_devid_ida);
*/
static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc)
{
- u32 devid;
+ u32 devid = desc->dev->msi.data->platform_data->devid;
- devid = desc->platform.msi_priv_data->devid;
-
- return (devid << (32 - DEV_ID_SHIFT)) | desc->platform.msi_index;
+ return (devid << (32 - DEV_ID_SHIFT)) | desc->msi_index;
}
static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
@@ -86,11 +83,8 @@ static void platform_msi_update_dom_ops(struct msi_domain_info *info)
static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
{
struct msi_desc *desc = irq_data_get_msi_desc(data);
- struct platform_msi_priv_data *priv_data;
-
- priv_data = desc->platform.msi_priv_data;
- priv_data->write_msg(desc, msg);
+ desc->dev->msi.data->platform_data->write_msg(desc, msg);
}
static void platform_msi_update_chip_ops(struct msi_domain_info *info)
@@ -113,62 +107,6 @@ static void platform_msi_update_chip_ops(struct msi_domain_info *info)
info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
}
-static void platform_msi_free_descs(struct device *dev, int base, int nvec)
-{
- struct msi_desc *desc, *tmp;
-
- list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
- if (desc->platform.msi_index >= base &&
- desc->platform.msi_index < (base + nvec)) {
- list_del(&desc->list);
- free_msi_entry(desc);
- }
- }
-}
-
-static int platform_msi_alloc_descs_with_irq(struct device *dev, int virq,
- int nvec,
- struct platform_msi_priv_data *data)
-
-{
- struct msi_desc *desc;
- int i, base = 0;
-
- if (!list_empty(dev_to_msi_list(dev))) {
- desc = list_last_entry(dev_to_msi_list(dev),
- struct msi_desc, list);
- base = desc->platform.msi_index + 1;
- }
-
- for (i = 0; i < nvec; i++) {
- desc = alloc_msi_entry(dev, 1, NULL);
- if (!desc)
- break;
-
- desc->platform.msi_priv_data = data;
- desc->platform.msi_index = base + i;
- desc->irq = virq ? virq + i : 0;
-
- list_add_tail(&desc->list, dev_to_msi_list(dev));
- }
-
- if (i != nvec) {
- /* Clean up the mess */
- platform_msi_free_descs(dev, base, nvec);
-
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int platform_msi_alloc_descs(struct device *dev, int nvec,
- struct platform_msi_priv_data *data)
-
-{
- return platform_msi_alloc_descs_with_irq(dev, 0, nvec, data);
-}
-
/**
* platform_msi_create_irq_domain - Create a platform MSI interrupt domain
* @fwnode: Optional fwnode of the interrupt controller
@@ -191,6 +129,8 @@ struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
platform_msi_update_dom_ops(info);
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
platform_msi_update_chip_ops(info);
+ info->flags |= MSI_FLAG_DEV_SYSFS | MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS |
+ MSI_FLAG_FREE_MSI_DESCS;
domain = msi_create_irq_domain(fwnode, info, parent);
if (domain)
@@ -199,49 +139,57 @@ struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
return domain;
}
-static struct platform_msi_priv_data *
-platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg)
+static int platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg)
{
struct platform_msi_priv_data *datap;
+ int err;
+
/*
* Limit the number of interrupts to 2048 per device. Should we
* need to bump this up, DEV_ID_SHIFT should be adjusted
* accordingly (which would impact the max number of MSI
* capable devices).
*/
- if (!dev->msi_domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS)
- return ERR_PTR(-EINVAL);
+ if (!dev->msi.domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS)
+ return -EINVAL;
- if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
+ if (dev->msi.domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
dev_err(dev, "Incompatible msi_domain, giving up\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
- /* Already had a helping of MSI? Greed... */
- if (!list_empty(dev_to_msi_list(dev)))
- return ERR_PTR(-EBUSY);
+ err = msi_setup_device_data(dev);
+ if (err)
+ return err;
+
+ /* Already initialized? */
+ if (dev->msi.data->platform_data)
+ return -EBUSY;
datap = kzalloc(sizeof(*datap), GFP_KERNEL);
if (!datap)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
datap->devid = ida_simple_get(&platform_msi_devid_ida,
0, 1 << DEV_ID_SHIFT, GFP_KERNEL);
if (datap->devid < 0) {
- int err = datap->devid;
+ err = datap->devid;
kfree(datap);
- return ERR_PTR(err);
+ return err;
}
datap->write_msg = write_msi_msg;
datap->dev = dev;
-
- return datap;
+ dev->msi.data->platform_data = datap;
+ return 0;
}
-static void platform_msi_free_priv_data(struct platform_msi_priv_data *data)
+static void platform_msi_free_priv_data(struct device *dev)
{
+ struct platform_msi_priv_data *data = dev->msi.data->platform_data;
+
+ dev->msi.data->platform_data = NULL;
ida_simple_remove(&platform_msi_devid_ida, data->devid);
kfree(data);
}
@@ -258,35 +206,15 @@ static void platform_msi_free_priv_data(struct platform_msi_priv_data *data)
int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
irq_write_msi_msg_t write_msi_msg)
{
- struct platform_msi_priv_data *priv_data;
int err;
- priv_data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
- if (IS_ERR(priv_data))
- return PTR_ERR(priv_data);
-
- err = platform_msi_alloc_descs(dev, nvec, priv_data);
+ err = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
if (err)
- goto out_free_priv_data;
+ return err;
- err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec);
+ err = msi_domain_alloc_irqs(dev->msi.domain, dev, nvec);
if (err)
- goto out_free_desc;
-
- priv_data->msi_irq_groups = msi_populate_sysfs(dev);
- if (IS_ERR(priv_data->msi_irq_groups)) {
- err = PTR_ERR(priv_data->msi_irq_groups);
- goto out_free_irqs;
- }
-
- return 0;
-
-out_free_irqs:
- msi_domain_free_irqs(dev->msi_domain, dev);
-out_free_desc:
- platform_msi_free_descs(dev, 0, nvec);
-out_free_priv_data:
- platform_msi_free_priv_data(priv_data);
+ platform_msi_free_priv_data(dev);
return err;
}
@@ -298,16 +226,8 @@ EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs);
*/
void platform_msi_domain_free_irqs(struct device *dev)
{
- if (!list_empty(dev_to_msi_list(dev))) {
- struct msi_desc *desc;
-
- desc = first_msi_entry(dev);
- msi_destroy_sysfs(dev, desc->platform.msi_priv_data->msi_irq_groups);
- platform_msi_free_priv_data(desc->platform.msi_priv_data);
- }
-
- msi_domain_free_irqs(dev->msi_domain, dev);
- platform_msi_free_descs(dev, 0, MAX_DEV_MSIS);
+ msi_domain_free_irqs(dev->msi.domain, dev);
+ platform_msi_free_priv_data(dev);
}
EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
@@ -316,17 +236,20 @@ EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
* a platform-msi domain
* @domain: The platform-msi domain
*
- * Returns the private data provided when calling
- * platform_msi_create_device_domain.
+ * Return: The private data provided when calling
+ * platform_msi_create_device_domain().
*/
void *platform_msi_get_host_data(struct irq_domain *domain)
{
struct platform_msi_priv_data *data = domain->host_data;
+
return data->host_data;
}
+static struct lock_class_key platform_device_msi_lock_class;
+
/**
- * __platform_msi_create_device_domain - Create a platform-msi domain
+ * __platform_msi_create_device_domain - Create a platform-msi device domain
*
* @dev: The device generating the MSIs
* @nvec: The number of MSIs that need to be allocated
@@ -335,7 +258,11 @@ void *platform_msi_get_host_data(struct irq_domain *domain)
* @ops: The hierarchy domain operations to use
* @host_data: Private data associated to this domain
*
- * Returns an irqdomain for @nvec interrupts
+ * Return: An irqdomain for @nvec interrupts on success, NULL in case of error.
+ *
+ * This is for interrupt domains which stack on a platform-msi domain
+ * created by platform_msi_create_irq_domain(). @dev->msi.domain points to
+ * that platform-msi domain which is the parent for the new domain.
*/
struct irq_domain *
__platform_msi_create_device_domain(struct device *dev,
@@ -349,12 +276,20 @@ __platform_msi_create_device_domain(struct device *dev,
struct irq_domain *domain;
int err;
- data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
- if (IS_ERR(data))
+ err = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
+ if (err)
return NULL;
+ /*
+ * Use a separate lock class for the MSI descriptor mutex on
+ * platform MSI device domains because the descriptor mutex nests
+ * into the domain mutex. See alloc/free below.
+ */
+ lockdep_set_class(&dev->msi.data->mutex, &platform_device_msi_lock_class);
+
+ data = dev->msi.data->platform_data;
data->host_data = host_data;
- domain = irq_domain_create_hierarchy(dev->msi_domain, 0,
+ domain = irq_domain_create_hierarchy(dev->msi.domain, 0,
is_tree ? 0 : nvec,
dev->fwnode, ops, data);
if (!domain)
@@ -370,61 +305,46 @@ __platform_msi_create_device_domain(struct device *dev,
free_domain:
irq_domain_remove(domain);
free_priv:
- platform_msi_free_priv_data(data);
+ platform_msi_free_priv_data(dev);
return NULL;
}
/**
- * platform_msi_domain_free - Free interrupts associated with a platform-msi
- * domain
+ * platform_msi_device_domain_free - Free interrupts associated with a platform-msi
+ * device domain
*
- * @domain: The platform-msi domain
+ * @domain: The platform-msi device domain
* @virq: The base irq from which to perform the free operation
- * @nvec: How many interrupts to free from @virq
+ * @nr_irqs: How many interrupts to free from @virq
*/
-void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nvec)
+void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
{
struct platform_msi_priv_data *data = domain->host_data;
- struct msi_desc *desc, *tmp;
- for_each_msi_entry_safe(desc, tmp, data->dev) {
- if (WARN_ON(!desc->irq || desc->nvec_used != 1))
- return;
- if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
- continue;
-
- irq_domain_free_irqs_common(domain, desc->irq, 1);
- list_del(&desc->list);
- free_msi_entry(desc);
- }
+
+ msi_lock_descs(data->dev);
+ irq_domain_free_irqs_common(domain, virq, nr_irqs);
+ msi_free_msi_descs_range(data->dev, MSI_DESC_ALL, virq, virq + nr_irqs - 1);
+ msi_unlock_descs(data->dev);
}
/**
- * platform_msi_domain_alloc - Allocate interrupts associated with
- * a platform-msi domain
+ * platform_msi_device_domain_alloc - Allocate interrupts associated with
+ * a platform-msi device domain
*
- * @domain: The platform-msi domain
+ * @domain: The platform-msi device domain
* @virq: The base irq from which to perform the allocate operation
- * @nr_irqs: How many interrupts to free from @virq
+ * @nr_irqs: How many interrupts to allocate from @virq
*
* Return 0 on success, or an error code on failure. Must be called
* with irq_domain_mutex held (which can only be done as part of a
* top-level interrupt allocation).
*/
-int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
+int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
{
struct platform_msi_priv_data *data = domain->host_data;
- int err;
-
- err = platform_msi_alloc_descs_with_irq(data->dev, virq, nr_irqs, data);
- if (err)
- return err;
-
- err = msi_domain_populate_irqs(domain->parent, data->dev,
- virq, nr_irqs, &data->arg);
- if (err)
- platform_msi_domain_free(domain, virq, nr_irqs);
+ struct device *dev = data->dev;
- return err;
+ return msi_domain_populate_irqs(domain->parent, dev, virq, nr_irqs, &data->arg);
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 598acf93a360..6cb04ac48bf0 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -258,8 +258,9 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
int ret;
ret = platform_get_irq_optional(dev, num);
- if (ret < 0 && ret != -EPROBE_DEFER)
- dev_err(&dev->dev, "IRQ index %u not found\n", num);
+ if (ret < 0)
+ return dev_err_probe(&dev->dev, ret,
+ "IRQ index %u not found\n", num);
return ret;
}
@@ -762,6 +763,10 @@ EXPORT_SYMBOL_GPL(platform_device_del);
/**
* platform_device_register - add a platform-level device
* @pdev: platform device we're adding
+ *
+ * NOTE: _Never_ directly free @pdev after calling this function, even if it
+ * returned an error! Always use platform_device_put() to give up the
+ * reference initialised in this function instead.
*/
int platform_device_register(struct platform_device *pdev)
{
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index d504cd4ab3cb..2f3cce17219b 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -305,19 +305,40 @@ static int rpm_get_suppliers(struct device *dev)
return 0;
}
+/**
+ * pm_runtime_release_supplier - Drop references to device link's supplier.
+ * @link: Target device link.
+ * @check_idle: Whether or not to check if the supplier device is idle.
+ *
+ * Drop all runtime PM references associated with @link to its supplier device
+ * and if @check_idle is set, check if that device is idle (and so it can be
+ * suspended).
+ */
+void pm_runtime_release_supplier(struct device_link *link, bool check_idle)
+{
+ struct device *supplier = link->supplier;
+
+ /*
+ * The additional power.usage_count check is a safety net in case
+ * the rpm_active refcount becomes saturated, in which case
+ * refcount_dec_not_one() would return true forever, but it is not
+ * strictly necessary.
+ */
+ while (refcount_dec_not_one(&link->rpm_active) &&
+ atomic_read(&supplier->power.usage_count) > 0)
+ pm_runtime_put_noidle(supplier);
+
+ if (check_idle)
+ pm_request_idle(supplier);
+}
+
static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
{
struct device_link *link;
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
- device_links_read_lock_held()) {
-
- while (refcount_dec_not_one(&link->rpm_active))
- pm_runtime_put_noidle(link->supplier);
-
- if (try_to_suspend)
- pm_request_idle(link->supplier);
- }
+ device_links_read_lock_held())
+ pm_runtime_release_supplier(link, try_to_suspend);
}
static void rpm_put_suppliers(struct device *dev)
@@ -742,13 +763,15 @@ static int rpm_resume(struct device *dev, int rpmflags)
trace_rpm_resume_rcuidle(dev, rpmflags);
repeat:
- if (dev->power.runtime_error)
+ if (dev->power.runtime_error) {
retval = -EINVAL;
- else if (dev->power.disable_depth == 1 && dev->power.is_suspended
- && dev->power.runtime_status == RPM_ACTIVE)
- retval = 1;
- else if (dev->power.disable_depth > 0)
- retval = -EACCES;
+ } else if (dev->power.disable_depth > 0) {
+ if (dev->power.runtime_status == RPM_ACTIVE &&
+ dev->power.last_status == RPM_ACTIVE)
+ retval = 1;
+ else
+ retval = -EACCES;
+ }
if (retval)
goto out;
@@ -1410,8 +1433,10 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
/* Update time accounting before disabling PM-runtime. */
update_pm_runtime_accounting(dev);
- if (!dev->power.disable_depth++)
+ if (!dev->power.disable_depth++) {
__pm_runtime_barrier(dev);
+ dev->power.last_status = dev->power.runtime_status;
+ }
out:
spin_unlock_irq(&dev->power.lock);
@@ -1428,23 +1453,23 @@ void pm_runtime_enable(struct device *dev)
spin_lock_irqsave(&dev->power.lock, flags);
- if (dev->power.disable_depth > 0) {
- dev->power.disable_depth--;
-
- /* About to enable runtime pm, set accounting_timestamp to now */
- if (!dev->power.disable_depth)
- dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
- } else {
+ if (!dev->power.disable_depth) {
dev_warn(dev, "Unbalanced %s!\n", __func__);
+ goto out;
}
- WARN(!dev->power.disable_depth &&
- dev->power.runtime_status == RPM_SUSPENDED &&
- !dev->power.ignore_children &&
- atomic_read(&dev->power.child_count) > 0,
- "Enabling runtime PM for inactive device (%s) with active children\n",
- dev_name(dev));
+ if (--dev->power.disable_depth > 0)
+ goto out;
+
+ dev->power.last_status = RPM_INVALID;
+ dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
+
+ if (dev->power.runtime_status == RPM_SUSPENDED &&
+ !dev->power.ignore_children &&
+ atomic_read(&dev->power.child_count) > 0)
+ dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
+out:
spin_unlock_irqrestore(&dev->power.lock, flags);
}
EXPORT_SYMBOL_GPL(pm_runtime_enable);
@@ -1640,6 +1665,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
void pm_runtime_init(struct device *dev)
{
dev->power.runtime_status = RPM_SUSPENDED;
+ dev->power.last_status = RPM_INVALID;
dev->power.idle_notification = false;
dev->power.disable_depth = 1;
@@ -1722,8 +1748,6 @@ void pm_runtime_get_suppliers(struct device *dev)
void pm_runtime_put_suppliers(struct device *dev)
{
struct device_link *link;
- unsigned long flags;
- bool put;
int idx;
idx = device_links_read_lock();
@@ -1731,11 +1755,17 @@ void pm_runtime_put_suppliers(struct device *dev)
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
device_links_read_lock_held())
if (link->supplier_preactivated) {
+ bool put;
+
link->supplier_preactivated = false;
- spin_lock_irqsave(&dev->power.lock, flags);
+
+ spin_lock_irq(&dev->power.lock);
+
put = pm_runtime_status_suspended(dev) &&
refcount_dec_not_one(&link->rpm_active);
- spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ spin_unlock_irq(&dev->power.lock);
+
if (put)
pm_runtime_put(link->supplier);
}
@@ -1772,9 +1802,7 @@ void pm_runtime_drop_link(struct device_link *link)
return;
pm_runtime_drop_link_count(link->consumer);
-
- while (refcount_dec_not_one(&link->rpm_active))
- pm_runtime_put(link->supplier);
+ pm_runtime_release_supplier(link, true);
}
static bool pm_runtime_need_not_resume(struct device *dev)
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index 94665037f4a3..72b7a92337b1 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -120,7 +120,11 @@ static unsigned int read_magic_time(void)
struct rtc_time time;
unsigned int val;
- mc146818_get_time(&time);
+ if (mc146818_get_time(&time) < 0) {
+ pr_err("Unable to read current time from RTC\n");
+ return 0;
+ }
+
pr_info("RTC time: %ptRt, date: %ptRd\n", &time, &time);
val = time.tm_year; /* 100 years */
if (val > 100)
diff --git a/drivers/base/property.c b/drivers/base/property.c
index f1f35b48ab8b..e6497f6877ee 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -478,8 +478,17 @@ int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
unsigned int nargs, unsigned int index,
struct fwnode_reference_args *args)
{
- return fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop,
- nargs, index, args);
+ int ret;
+
+ ret = fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop,
+ nargs, index, args);
+
+ if (ret < 0 && !IS_ERR_OR_NULL(fwnode) &&
+ !IS_ERR_OR_NULL(fwnode->secondary))
+ ret = fwnode_call_int_op(fwnode->secondary, get_reference_args,
+ prop, nargs_prop, nargs, index, args);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args);
@@ -508,54 +517,6 @@ struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode,
EXPORT_SYMBOL_GPL(fwnode_find_reference);
/**
- * device_remove_properties - Remove properties from a device object.
- * @dev: Device whose properties to remove.
- *
- * The function removes properties previously associated to the device
- * firmware node with device_add_properties(). Memory allocated to the
- * properties will also be released.
- */
-void device_remove_properties(struct device *dev)
-{
- struct fwnode_handle *fwnode = dev_fwnode(dev);
-
- if (!fwnode)
- return;
-
- if (is_software_node(fwnode->secondary)) {
- fwnode_remove_software_node(fwnode->secondary);
- set_secondary_fwnode(dev, NULL);
- }
-}
-EXPORT_SYMBOL_GPL(device_remove_properties);
-
-/**
- * device_add_properties - Add a collection of properties to a device object.
- * @dev: Device to add properties to.
- * @properties: Collection of properties to add.
- *
- * Associate a collection of device properties represented by @properties with
- * @dev. The function takes a copy of @properties.
- *
- * WARNING: The callers should not use this function if it is known that there
- * is no real firmware node associated with @dev! In that case the callers
- * should create a software node and assign it to @dev directly.
- */
-int device_add_properties(struct device *dev,
- const struct property_entry *properties)
-{
- struct fwnode_handle *fwnode;
-
- fwnode = fwnode_create_software_node(properties, NULL);
- if (IS_ERR(fwnode))
- return PTR_ERR(fwnode);
-
- set_secondary_fwnode(dev, fwnode);
- return 0;
-}
-EXPORT_SYMBOL_GPL(device_add_properties);
-
-/**
* fwnode_get_name - Return the name of a node
* @fwnode: The firmware node
*
@@ -959,6 +920,22 @@ int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index)
EXPORT_SYMBOL(fwnode_irq_get);
/**
+ * fwnode_iomap - Maps the memory mapped IO for a given fwnode
+ * @fwnode: Pointer to the firmware node
+ * @index: Index of the IO range
+ *
+ * Returns a pointer to the mapped memory.
+ */
+void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index)
+{
+ if (IS_ENABLED(CONFIG_OF_ADDRESS) && is_of_node(fwnode))
+ return of_iomap(to_of_node(fwnode), index);
+
+ return NULL;
+}
+EXPORT_SYMBOL(fwnode_iomap);
+
+/**
* fwnode_graph_get_next_endpoint - Get next endpoint firmware node
* @fwnode: Pointer to the parent firmware node
* @prev: Previous endpoint node or %NULL to get the first
@@ -1059,43 +1036,17 @@ fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_endpoint);
-/**
- * fwnode_graph_get_remote_node - get remote parent node for given port/endpoint
- * @fwnode: pointer to parent fwnode_handle containing graph port/endpoint
- * @port_id: identifier of the parent port node
- * @endpoint_id: identifier of the endpoint node
- *
- * Return: Remote fwnode handle associated with remote endpoint node linked
- * to @node. Use fwnode_node_put() on it when done.
- */
-struct fwnode_handle *
-fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port_id,
- u32 endpoint_id)
+static bool fwnode_graph_remote_available(struct fwnode_handle *ep)
{
- struct fwnode_handle *endpoint = NULL;
+ struct fwnode_handle *dev_node;
+ bool available;
- while ((endpoint = fwnode_graph_get_next_endpoint(fwnode, endpoint))) {
- struct fwnode_endpoint fwnode_ep;
- struct fwnode_handle *remote;
- int ret;
+ dev_node = fwnode_graph_get_remote_port_parent(ep);
+ available = fwnode_device_is_available(dev_node);
+ fwnode_handle_put(dev_node);
- ret = fwnode_graph_parse_endpoint(endpoint, &fwnode_ep);
- if (ret < 0)
- continue;
-
- if (fwnode_ep.port != port_id || fwnode_ep.id != endpoint_id)
- continue;
-
- remote = fwnode_graph_get_remote_port_parent(endpoint);
- if (!remote)
- return NULL;
-
- return fwnode_device_is_available(remote) ? remote : NULL;
- }
-
- return NULL;
+ return available;
}
-EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_node);
/**
* fwnode_graph_get_endpoint_by_id - get endpoint by port and endpoint numbers
@@ -1111,8 +1062,8 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_node);
* has not been found, look for the closest endpoint ID greater than the
* specified one and return the endpoint that corresponds to it, if present.
*
- * Do not return endpoints that belong to disabled devices, unless
- * FWNODE_GRAPH_DEVICE_DISABLED is passed in @flags.
+ * Does not return endpoints that belong to disabled devices or endpoints that
+ * are unconnected, unless FWNODE_GRAPH_DEVICE_DISABLED is passed in @flags.
*
* The returned endpoint needs to be released by calling fwnode_handle_put() on
* it when it is not needed any more.
@@ -1121,25 +1072,17 @@ struct fwnode_handle *
fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
u32 port, u32 endpoint, unsigned long flags)
{
- struct fwnode_handle *ep = NULL, *best_ep = NULL;
+ struct fwnode_handle *ep, *best_ep = NULL;
unsigned int best_ep_id = 0;
bool endpoint_next = flags & FWNODE_GRAPH_ENDPOINT_NEXT;
bool enabled_only = !(flags & FWNODE_GRAPH_DEVICE_DISABLED);
- while ((ep = fwnode_graph_get_next_endpoint(fwnode, ep))) {
+ fwnode_graph_for_each_endpoint(fwnode, ep) {
struct fwnode_endpoint fwnode_ep = { 0 };
int ret;
- if (enabled_only) {
- struct fwnode_handle *dev_node;
- bool available;
-
- dev_node = fwnode_graph_get_remote_port_parent(ep);
- available = fwnode_device_is_available(dev_node);
- fwnode_handle_put(dev_node);
- if (!available)
- continue;
- }
+ if (enabled_only && !fwnode_graph_remote_available(ep))
+ continue;
ret = fwnode_graph_parse_endpoint(ep, &fwnode_ep);
if (ret < 0)
@@ -1173,6 +1116,31 @@ fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_by_id);
/**
+ * fwnode_graph_get_endpoint_count - Count endpoints on a device node
+ * @fwnode: The node related to a device
+ * @flags: fwnode lookup flags
+ * Count endpoints in a device node.
+ *
+ * If FWNODE_GRAPH_DEVICE_DISABLED flag is specified, also unconnected endpoints
+ * and endpoints connected to disabled devices are counted.
+ */
+unsigned int fwnode_graph_get_endpoint_count(struct fwnode_handle *fwnode,
+ unsigned long flags)
+{
+ struct fwnode_handle *ep;
+ unsigned int count = 0;
+
+ fwnode_graph_for_each_endpoint(fwnode, ep) {
+ if (flags & FWNODE_GRAPH_DEVICE_DISABLED ||
+ fwnode_graph_remote_available(ep))
+ count++;
+ }
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_count);
+
+/**
* fwnode_graph_parse_endpoint - parse common endpoint node properties
* @fwnode: pointer to endpoint fwnode_handle
* @endpoint: pointer to the fwnode endpoint data structure
@@ -1206,8 +1174,10 @@ fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
fwnode_graph_for_each_endpoint(fwnode, ep) {
node = fwnode_graph_get_remote_port_parent(ep);
- if (!fwnode_device_is_available(node))
+ if (!fwnode_device_is_available(node)) {
+ fwnode_handle_put(node);
continue;
+ }
ret = match(node, con_id, data);
fwnode_handle_put(node);
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index ad684d37c2da..817eda2075aa 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -598,7 +598,7 @@ void regmap_debugfs_init(struct regmap *map)
map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
dummy_index);
if (!map->debugfs_name)
- return;
+ return;
name = map->debugfs_name;
dummy_index++;
}
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 21a0c2562ec0..8f9fe5fd4707 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -647,6 +647,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
if (ret)
return ret;
+ regmap_debugfs_exit(map);
regmap_debugfs_init(map);
/* Add a devres resource for dev_get_regmap() */
@@ -876,6 +877,7 @@ struct regmap *__regmap_init(struct device *dev,
if (!bus) {
map->reg_read = config->reg_read;
map->reg_write = config->reg_write;
+ map->reg_update_bits = config->reg_update_bits;
map->defer_caching = false;
goto skip_format_initialization;
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 4debcea4fb12..0a482212c7e8 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -529,7 +529,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
return -ENOENT;
if (nargs_prop) {
- error = property_entry_read_int_array(swnode->node->properties,
+ error = property_entry_read_int_array(ref->node->properties,
nargs_prop, sizeof(u32),
&nargs_prop_val, 1);
if (error)
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
index 3bb7beb127a9..4d1976ca5072 100644
--- a/drivers/base/test/test_async_driver_probe.c
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -104,7 +104,7 @@ static int __init test_async_probe_init(void)
struct platform_device **pdev = NULL;
int async_id = 0, sync_id = 0;
unsigned long long duration;
- ktime_t calltime, delta;
+ ktime_t calltime;
int err, nid, cpu;
pr_info("registering first set of asynchronous devices...\n");
@@ -133,8 +133,7 @@ static int __init test_async_probe_init(void)
goto err_unregister_async_devs;
}
- delta = ktime_sub(ktime_get(), calltime);
- duration = (unsigned long long) ktime_to_ms(delta);
+ duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
pr_info("registration took %lld msecs\n", duration);
if (duration > TEST_PROBE_THRESHOLD) {
pr_err("test failed: probe took too long\n");
@@ -161,8 +160,7 @@ static int __init test_async_probe_init(void)
async_id++;
}
- delta = ktime_sub(ktime_get(), calltime);
- duration = (unsigned long long) ktime_to_ms(delta);
+ duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
dev_info(&(*pdev)->dev,
"registration took %lld msecs\n", duration);
if (duration > TEST_PROBE_THRESHOLD) {
@@ -197,8 +195,7 @@ static int __init test_async_probe_init(void)
goto err_unregister_sync_devs;
}
- delta = ktime_sub(ktime_get(), calltime);
- duration = (unsigned long long) ktime_to_ms(delta);
+ duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
pr_info("registration took %lld msecs\n", duration);
if (duration < TEST_PROBE_THRESHOLD) {
dev_err(&(*pdev)->dev,
@@ -223,8 +220,7 @@ static int __init test_async_probe_init(void)
sync_id++;
- delta = ktime_sub(ktime_get(), calltime);
- duration = (unsigned long long) ktime_to_ms(delta);
+ duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
dev_info(&(*pdev)->dev,
"registration took %lld msecs\n", duration);
if (duration < TEST_PROBE_THRESHOLD) {
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 8f2b641d0b8c..fc24e89f9592 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -45,11 +45,15 @@ static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \
define_id_show_func(physical_package_id);
static DEVICE_ATTR_RO(physical_package_id);
+#ifdef TOPOLOGY_DIE_SYSFS
define_id_show_func(die_id);
static DEVICE_ATTR_RO(die_id);
+#endif
+#ifdef TOPOLOGY_CLUSTER_SYSFS
define_id_show_func(cluster_id);
static DEVICE_ATTR_RO(cluster_id);
+#endif
define_id_show_func(core_id);
static DEVICE_ATTR_RO(core_id);
@@ -66,19 +70,23 @@ define_siblings_read_func(core_siblings, core_cpumask);
static BIN_ATTR_RO(core_siblings, 0);
static BIN_ATTR_RO(core_siblings_list, 0);
+#ifdef TOPOLOGY_CLUSTER_SYSFS
define_siblings_read_func(cluster_cpus, cluster_cpumask);
static BIN_ATTR_RO(cluster_cpus, 0);
static BIN_ATTR_RO(cluster_cpus_list, 0);
+#endif
+#ifdef TOPOLOGY_DIE_SYSFS
define_siblings_read_func(die_cpus, die_cpumask);
static BIN_ATTR_RO(die_cpus, 0);
static BIN_ATTR_RO(die_cpus_list, 0);
+#endif
define_siblings_read_func(package_cpus, core_cpumask);
static BIN_ATTR_RO(package_cpus, 0);
static BIN_ATTR_RO(package_cpus_list, 0);
-#ifdef CONFIG_SCHED_BOOK
+#ifdef TOPOLOGY_BOOK_SYSFS
define_id_show_func(book_id);
static DEVICE_ATTR_RO(book_id);
define_siblings_read_func(book_siblings, book_cpumask);
@@ -86,7 +94,7 @@ static BIN_ATTR_RO(book_siblings, 0);
static BIN_ATTR_RO(book_siblings_list, 0);
#endif
-#ifdef CONFIG_SCHED_DRAWER
+#ifdef TOPOLOGY_DRAWER_SYSFS
define_id_show_func(drawer_id);
static DEVICE_ATTR_RO(drawer_id);
define_siblings_read_func(drawer_siblings, drawer_cpumask);
@@ -101,17 +109,21 @@ static struct bin_attribute *bin_attrs[] = {
&bin_attr_thread_siblings_list,
&bin_attr_core_siblings,
&bin_attr_core_siblings_list,
+#ifdef TOPOLOGY_CLUSTER_SYSFS
&bin_attr_cluster_cpus,
&bin_attr_cluster_cpus_list,
+#endif
+#ifdef TOPOLOGY_DIE_SYSFS
&bin_attr_die_cpus,
&bin_attr_die_cpus_list,
+#endif
&bin_attr_package_cpus,
&bin_attr_package_cpus_list,
-#ifdef CONFIG_SCHED_BOOK
+#ifdef TOPOLOGY_BOOK_SYSFS
&bin_attr_book_siblings,
&bin_attr_book_siblings_list,
#endif
-#ifdef CONFIG_SCHED_DRAWER
+#ifdef TOPOLOGY_DRAWER_SYSFS
&bin_attr_drawer_siblings,
&bin_attr_drawer_siblings_list,
#endif
@@ -120,13 +132,17 @@ static struct bin_attribute *bin_attrs[] = {
static struct attribute *default_attrs[] = {
&dev_attr_physical_package_id.attr,
+#ifdef TOPOLOGY_DIE_SYSFS
&dev_attr_die_id.attr,
+#endif
+#ifdef TOPOLOGY_CLUSTER_SYSFS
&dev_attr_cluster_id.attr,
+#endif
&dev_attr_core_id.attr,
-#ifdef CONFIG_SCHED_BOOK
+#ifdef TOPOLOGY_BOOK_SYSFS
&dev_attr_book_id.attr,
#endif
-#ifdef CONFIG_SCHED_DRAWER
+#ifdef TOPOLOGY_DRAWER_SYSFS
&dev_attr_drawer_id.attr,
#endif
NULL
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 2a51dfb09c8f..519b6d38d4df 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -392,17 +392,6 @@ config BLK_DEV_RBD
If unsure, say N.
-config BLK_DEV_RSXX
- tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
- depends on PCI
- select CRC32
- help
- Device driver for IBM's high speed PCIe SSD
- storage device: Flash Adapter 900GB Full Height.
-
- To compile this driver as a module, choose M here: the
- module will be called rsxx.
-
source "drivers/block/rnbd/Kconfig"
endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 11a74f17c9ad..934a9c7c3a7c 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -34,7 +34,6 @@ obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
-obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
obj-$(CONFIG_ZRAM) += zram/
obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index bf5c124c5452..5a566f2fd533 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1505,7 +1505,7 @@ static blk_status_t amiflop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *rq = bd->rq;
- struct amiga_floppy_struct *floppy = rq->rq_disk->private_data;
+ struct amiga_floppy_struct *floppy = rq->q->disk->private_data;
blk_status_t err;
if (!spin_trylock_irq(&amiflop_lock))
@@ -1790,6 +1790,7 @@ static int fd_alloc_disk(int drive, int system)
disk->first_minor = drive + system;
disk->minors = 1;
disk->fops = &floppy_fops;
+ disk->flags |= GENHD_FL_NO_PART;
disk->events = DISK_EVENT_MEDIA_CHANGE;
if (system)
sprintf(disk->disk_name, "fd%d_msdos", drive);
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 588889bea7c3..6af111f568e4 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -122,7 +122,7 @@ newtag(struct aoedev *d)
register ulong n;
n = jiffies & 0xffff;
- return n |= (++d->lasttag & 0x7fff) << 16;
+ return n | (++d->lasttag & 0x7fff) << 16;
}
static u32
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index bf769e6e32fe..5d819a466e2f 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1502,7 +1502,7 @@ static void setup_req_params( int drive )
static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
- struct atari_floppy_struct *floppy = bd->rq->rq_disk->private_data;
+ struct atari_floppy_struct *floppy = bd->rq->q->disk->private_data;
int drive = floppy - unit;
int type = floppy->type;
@@ -1538,7 +1538,7 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
if (!UDT) {
Probing = 1;
UDT = atari_disk_type + StartDiskType[DriveType];
- set_capacity(bd->rq->rq_disk, UDT->blocks);
+ set_capacity(bd->rq->q->disk, UDT->blocks);
UD.autoprobe = 1;
}
}
@@ -1558,7 +1558,7 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
}
type = minor2disktype[type].index;
UDT = &atari_disk_type[type];
- set_capacity(bd->rq->rq_disk, UDT->blocks);
+ set_capacity(bd->rq->q->disk, UDT->blocks);
UD.autoprobe = 0;
}
@@ -2000,6 +2000,7 @@ static int ataflop_alloc_disk(unsigned int drive, unsigned int type)
disk->minors = 1;
sprintf(disk->disk_name, "fd%d", drive);
disk->fops = &floppy_fops;
+ disk->flags |= GENHD_FL_NO_PART;
disk->events = DISK_EVENT_MEDIA_CHANGE;
disk->private_data = &unit[drive];
set_capacity(disk, MAX_DISK_SIZE * 2);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index a896ee175d86..6e3f2f0d2352 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -362,7 +362,6 @@ __setup("ramdisk_size=", ramdisk_size);
* (should share code eventually).
*/
static LIST_HEAD(brd_devices);
-static DEFINE_MUTEX(brd_devices_mutex);
static struct dentry *brd_debugfs_dir;
static int brd_alloc(int i)
@@ -372,21 +371,14 @@ static int brd_alloc(int i)
char buf[DISK_NAME_LEN];
int err = -ENOMEM;
- mutex_lock(&brd_devices_mutex);
- list_for_each_entry(brd, &brd_devices, brd_list) {
- if (brd->brd_number == i) {
- mutex_unlock(&brd_devices_mutex);
+ list_for_each_entry(brd, &brd_devices, brd_list)
+ if (brd->brd_number == i)
return -EEXIST;
- }
- }
brd = kzalloc(sizeof(*brd), GFP_KERNEL);
- if (!brd) {
- mutex_unlock(&brd_devices_mutex);
+ if (!brd)
return -ENOMEM;
- }
brd->brd_number = i;
list_add_tail(&brd->brd_list, &brd_devices);
- mutex_unlock(&brd_devices_mutex);
spin_lock_init(&brd->brd_lock);
INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
@@ -405,7 +397,6 @@ static int brd_alloc(int i)
disk->minors = max_part;
disk->fops = &brd_fops;
disk->private_data = brd;
- disk->flags = GENHD_FL_EXT_DEVT;
strlcpy(disk->disk_name, buf, DISK_NAME_LEN);
set_capacity(disk, rd_size * 2);
@@ -430,9 +421,7 @@ static int brd_alloc(int i)
out_cleanup_disk:
blk_cleanup_disk(disk);
out_free_dev:
- mutex_lock(&brd_devices_mutex);
list_del(&brd->brd_list);
- mutex_unlock(&brd_devices_mutex);
kfree(brd);
return err;
}
@@ -442,15 +431,19 @@ static void brd_probe(dev_t dev)
brd_alloc(MINOR(dev) / max_part);
}
-static void brd_del_one(struct brd_device *brd)
+static void brd_cleanup(void)
{
- del_gendisk(brd->brd_disk);
- blk_cleanup_disk(brd->brd_disk);
- brd_free_pages(brd);
- mutex_lock(&brd_devices_mutex);
- list_del(&brd->brd_list);
- mutex_unlock(&brd_devices_mutex);
- kfree(brd);
+ struct brd_device *brd, *next;
+
+ debugfs_remove_recursive(brd_debugfs_dir);
+
+ list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
+ del_gendisk(brd->brd_disk);
+ blk_cleanup_disk(brd->brd_disk);
+ brd_free_pages(brd);
+ list_del(&brd->brd_list);
+ kfree(brd);
+ }
}
static inline void brd_check_and_reset_par(void)
@@ -474,9 +467,18 @@ static inline void brd_check_and_reset_par(void)
static int __init brd_init(void)
{
- struct brd_device *brd, *next;
int err, i;
+ brd_check_and_reset_par();
+
+ brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
+
+ for (i = 0; i < rd_nr; i++) {
+ err = brd_alloc(i);
+ if (err)
+ goto out_free;
+ }
+
/*
* brd module now has a feature to instantiate underlying device
* structure on-demand, provided that there is an access dev node.
@@ -492,28 +494,16 @@ static int __init brd_init(void)
* dynamically.
*/
- if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe))
- return -EIO;
-
- brd_check_and_reset_par();
-
- brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
-
- for (i = 0; i < rd_nr; i++) {
- err = brd_alloc(i);
- if (err)
- goto out_free;
+ if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe)) {
+ err = -EIO;
+ goto out_free;
}
pr_info("brd: module loaded\n");
return 0;
out_free:
- unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
- debugfs_remove_recursive(brd_debugfs_dir);
-
- list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
- brd_del_one(brd);
+ brd_cleanup();
pr_info("brd: module NOT loaded !!!\n");
return err;
@@ -521,13 +511,9 @@ out_free:
static void __exit brd_exit(void)
{
- struct brd_device *brd, *next;
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
- debugfs_remove_recursive(brd_debugfs_dir);
-
- list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
- brd_del_one(brd);
+ brd_cleanup();
pr_info("brd: module unloaded\n");
}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 53ba2dddba6e..6f450816c4fa 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -729,7 +729,8 @@ int drbd_send_sync_param(struct drbd_peer_device *peer_device)
cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
/* initialize verify_alg and csums_alg */
- memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
+ BUILD_BUG_ON(sizeof(p->algs) != 2 * SHARED_SECRET_MAX);
+ memset(&p->algs, 0, sizeof(p->algs));
if (get_ldev(peer_device->device)) {
dc = rcu_dereference(peer_device->device->ldev->disk_conf);
@@ -2734,6 +2735,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
disk->first_minor = minor;
disk->minors = 1;
disk->fops = &drbd_ops;
+ disk->flags |= GENHD_FL_NO_PART;
sprintf(disk->disk_name, "drbd%d", minor);
disk->private_data = device;
diff --git a/drivers/block/drbd/drbd_protocol.h b/drivers/block/drbd/drbd_protocol.h
index dea59c92ecc1..a882b65ab5d2 100644
--- a/drivers/block/drbd/drbd_protocol.h
+++ b/drivers/block/drbd/drbd_protocol.h
@@ -283,8 +283,10 @@ struct p_rs_param_89 {
struct p_rs_param_95 {
u32 resync_rate;
- char verify_alg[SHARED_SECRET_MAX];
- char csums_alg[SHARED_SECRET_MAX];
+ struct_group(algs,
+ char verify_alg[SHARED_SECRET_MAX];
+ char csums_alg[SHARED_SECRET_MAX];
+ );
u32 c_plan_ahead;
u32 c_delay_target;
u32 c_fill_target;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 1f740e42e457..6df2539e215b 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -3921,7 +3921,8 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
/* initialize verify_alg and csums_alg */
p = pi->data;
- memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
+ BUILD_BUG_ON(sizeof(p->algs) != 2 * SHARED_SECRET_MAX);
+ memset(&p->algs, 0, sizeof(p->algs));
err = drbd_recv_all(peer_device->connection, p, header_size);
if (err)
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index c4267da716fe..e611411a934c 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -1015,7 +1015,7 @@ static DECLARE_DELAYED_WORK(fd_timer, fd_timer_workfn);
static void cancel_activity(void)
{
do_floppy = NULL;
- cancel_delayed_work_sync(&fd_timer);
+ cancel_delayed_work(&fd_timer);
cancel_work_sync(&floppy_work);
}
@@ -2259,7 +2259,7 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
static void floppy_end_request(struct request *req, blk_status_t error)
{
unsigned int nr_sectors = current_count_sectors;
- unsigned int drive = (unsigned long)req->rq_disk->private_data;
+ unsigned int drive = (unsigned long)req->q->disk->private_data;
/* current_count_sectors can be zero if transfer failed */
if (error)
@@ -2550,7 +2550,7 @@ static int make_raw_rw_request(void)
if (WARN(max_buffer_sectors == 0, "VFS: Block I/O scheduled on unopened device\n"))
return 0;
- set_fdc((long)current_req->rq_disk->private_data);
+ set_fdc((long)current_req->q->disk->private_data);
raw_cmd = &default_raw_cmd;
raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK;
@@ -2792,7 +2792,7 @@ do_request:
return;
}
}
- drive = (long)current_req->rq_disk->private_data;
+ drive = (long)current_req->q->disk->private_data;
set_fdc(drive);
reschedule_timeout(current_drive, "redo fd request");
@@ -3081,6 +3081,8 @@ static void raw_cmd_free(struct floppy_raw_cmd **ptr)
}
}
+#define MAX_LEN (1UL << MAX_ORDER << PAGE_SHIFT)
+
static int raw_cmd_copyin(int cmd, void __user *param,
struct floppy_raw_cmd **rcmd)
{
@@ -3108,7 +3110,7 @@ loop:
ptr->resultcode = 0;
if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
- if (ptr->length <= 0)
+ if (ptr->length <= 0 || ptr->length >= MAX_LEN)
return -EINVAL;
ptr->kernel_data = (char *)fd_dma_mem_alloc(ptr->length);
fallback_on_nodma_alloc(&ptr->kernel_data, ptr->length);
@@ -4503,6 +4505,7 @@ static int floppy_alloc_disk(unsigned int drive, unsigned int type)
disk->first_minor = TOMINOR(drive) | (type << 2);
disk->minors = 1;
disk->fops = &floppy_fops;
+ disk->flags |= GENHD_FL_NO_PART;
disk->events = DISK_EVENT_MEDIA_CHANGE;
if (type)
sprintf(disk->disk_name, "fd%d_type%d", drive, type);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c3a36cfaa855..01cbbfc4e9e2 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -820,7 +820,7 @@ static inline int queue_on_root_worker(struct cgroup_subsys_state *css)
static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd)
{
- struct rb_node **node = &(lo->worker_tree.rb_node), *parent = NULL;
+ struct rb_node **node, *parent = NULL;
struct loop_worker *cur_worker, *worker = NULL;
struct work_struct *work;
struct list_head *cmd_list;
@@ -1061,7 +1061,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
lo->lo_flags |= LO_FLAGS_PARTSCAN;
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
if (partscan)
- lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
+ lo->lo_disk->flags &= ~GENHD_FL_NO_PART;
loop_global_unlock(lo, is_loop);
if (partscan)
@@ -1082,13 +1082,10 @@ out_putf:
return error;
}
-static int __loop_clr_fd(struct loop_device *lo, bool release)
+static void __loop_clr_fd(struct loop_device *lo)
{
- struct file *filp = NULL;
+ struct file *filp;
gfp_t gfp = lo->old_gfp_mask;
- int err = 0;
- bool partscan = false;
- int lo_number;
struct loop_worker *pos, *worker;
/*
@@ -1103,17 +1100,14 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
* became visible.
*/
+ /*
+ * Since this function is called upon "ioctl(LOOP_CLR_FD)" xor "close()
+ * after ioctl(LOOP_CLR_FD)", it is a sign of something going wrong if
+ * lo->lo_state has changed while waiting for lo->lo_mutex.
+ */
mutex_lock(&lo->lo_mutex);
- if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
- err = -ENXIO;
- goto out_unlock;
- }
-
- filp = lo->lo_backing_file;
- if (filp == NULL) {
- err = -EINVAL;
- goto out_unlock;
- }
+ BUG_ON(lo->lo_state != Lo_rundown);
+ mutex_unlock(&lo->lo_mutex);
if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags))
blk_queue_write_cache(lo->lo_queue, false, false);
@@ -1134,6 +1128,7 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
del_timer_sync(&lo->timer);
spin_lock_irq(&lo->lo_lock);
+ filp = lo->lo_backing_file;
lo->lo_backing_file = NULL;
spin_unlock_irq(&lo->lo_lock);
@@ -1149,60 +1144,59 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
/* let user-space know about this change */
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
mapping_set_gfp_mask(filp->f_mapping, gfp);
- /* This is safe: open() is still holding a reference. */
- module_put(THIS_MODULE);
blk_mq_unfreeze_queue(lo->lo_queue);
- partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
- lo_number = lo->lo_number;
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
-out_unlock:
- mutex_unlock(&lo->lo_mutex);
- if (partscan) {
- /*
- * open_mutex has been held already in release path, so don't
- * acquire it if this function is called in such case.
- *
- * If the reread partition isn't from release path, lo_refcnt
- * must be at least one and it can only become zero when the
- * current holder is released.
- */
- if (!release)
- mutex_lock(&lo->lo_disk->open_mutex);
+
+ if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
+ int err;
+
+ mutex_lock(&lo->lo_disk->open_mutex);
err = bdev_disk_changed(lo->lo_disk, false);
- if (!release)
- mutex_unlock(&lo->lo_disk->open_mutex);
+ mutex_unlock(&lo->lo_disk->open_mutex);
if (err)
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
- __func__, lo_number, err);
+ __func__, lo->lo_number, err);
/* Device is gone, no point in returning error */
- err = 0;
}
- /*
- * lo->lo_state is set to Lo_unbound here after above partscan has
- * finished.
- *
- * There cannot be anybody else entering __loop_clr_fd() as
- * lo->lo_backing_file is already cleared and Lo_rundown state
- * protects us from all the other places trying to change the 'lo'
- * device.
- */
- mutex_lock(&lo->lo_mutex);
lo->lo_flags = 0;
if (!part_shift)
- lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
+ lo->lo_disk->flags |= GENHD_FL_NO_PART;
+
+ fput(filp);
+}
+
+static void loop_rundown_completed(struct loop_device *lo)
+{
+ mutex_lock(&lo->lo_mutex);
lo->lo_state = Lo_unbound;
mutex_unlock(&lo->lo_mutex);
+ module_put(THIS_MODULE);
+}
- /*
- * Need not hold lo_mutex to fput backing file. Calling fput holding
- * lo_mutex triggers a circular lock dependency possibility warning as
- * fput can take open_mutex which is usually taken before lo_mutex.
- */
- if (filp)
- fput(filp);
- return err;
+static void loop_rundown_workfn(struct work_struct *work)
+{
+ struct loop_device *lo = container_of(work, struct loop_device,
+ rundown_work);
+ struct block_device *bdev = lo->lo_device;
+ struct gendisk *disk = lo->lo_disk;
+
+ __loop_clr_fd(lo);
+ kobject_put(&bdev->bd_device.kobj);
+ module_put(disk->fops->owner);
+ loop_rundown_completed(lo);
+}
+
+static void loop_schedule_rundown(struct loop_device *lo)
+{
+ struct block_device *bdev = lo->lo_device;
+ struct gendisk *disk = lo->lo_disk;
+
+ __module_get(disk->fops->owner);
+ kobject_get(&bdev->bd_device.kobj);
+ INIT_WORK(&lo->rundown_work, loop_rundown_workfn);
+ queue_work(system_long_wq, &lo->rundown_work);
}
static int loop_clr_fd(struct loop_device *lo)
@@ -1234,7 +1228,9 @@ static int loop_clr_fd(struct loop_device *lo)
lo->lo_state = Lo_rundown;
mutex_unlock(&lo->lo_mutex);
- return __loop_clr_fd(lo, false);
+ __loop_clr_fd(lo);
+ loop_rundown_completed(lo);
+ return 0;
}
static int
@@ -1301,7 +1297,7 @@ out_unfreeze:
if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
!(prev_lo_flags & LO_FLAGS_PARTSCAN)) {
- lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
+ lo->lo_disk->flags &= ~GENHD_FL_NO_PART;
partscan = true;
}
out_unlock:
@@ -1758,7 +1754,7 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
* In autoclear mode, stop the loop thread
* and remove configuration after last close.
*/
- __loop_clr_fd(lo, true);
+ loop_schedule_rundown(lo);
return;
} else if (lo->lo_state == Lo_bound) {
/*
@@ -2032,8 +2028,7 @@ static int loop_add(int i)
* userspace tools. Parameters like this in general should be avoided.
*/
if (!part_shift)
- disk->flags |= GENHD_FL_NO_PART_SCAN;
- disk->flags |= GENHD_FL_EXT_DEVT;
+ disk->flags |= GENHD_FL_NO_PART;
atomic_set(&lo->lo_refcnt, 0);
mutex_init(&lo->lo_mutex);
lo->lo_number = i;
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 082d4b6bfc6a..918a7a2dc025 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -56,6 +56,7 @@ struct loop_device {
struct gendisk *lo_disk;
struct mutex lo_mutex;
bool idr_visible;
+ struct work_struct rundown_work;
};
struct loop_cmd {
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index c91b9010c1a6..e6005c232328 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -136,16 +136,15 @@ struct mtip_compat_ide_task_request_s {
* return value
* true if device removed, else false
*/
-static bool mtip_check_surprise_removal(struct pci_dev *pdev)
+static bool mtip_check_surprise_removal(struct driver_data *dd)
{
u16 vendor_id = 0;
- struct driver_data *dd = pci_get_drvdata(pdev);
if (dd->sr)
return true;
/* Read the vendorID from the configuration space */
- pci_read_config_word(pdev, 0x00, &vendor_id);
+ pci_read_config_word(dd->pdev, 0x00, &vendor_id);
if (vendor_id == 0xFFFF) {
dd->sr = true;
if (dd->queue)
@@ -447,7 +446,7 @@ static int mtip_device_reset(struct driver_data *dd)
{
int rv = 0;
- if (mtip_check_surprise_removal(dd->pdev))
+ if (mtip_check_surprise_removal(dd))
return 0;
if (mtip_hba_reset(dd) < 0)
@@ -727,7 +726,7 @@ static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
dev_warn(&dd->pdev->dev,
"Port stat errors %x unhandled\n",
(port_stat & ~PORT_IRQ_HANDLED));
- if (mtip_check_surprise_removal(dd->pdev))
+ if (mtip_check_surprise_removal(dd))
return;
}
if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR))) {
@@ -752,7 +751,7 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
/* Acknowledge the interrupt status on the port.*/
port_stat = readl(port->mmio + PORT_IRQ_STAT);
if (unlikely(port_stat == 0xFFFFFFFF)) {
- mtip_check_surprise_removal(dd->pdev);
+ mtip_check_surprise_removal(dd);
return IRQ_HANDLED;
}
writel(port_stat, port->mmio + PORT_IRQ_STAT);
@@ -796,7 +795,7 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
}
if (unlikely(port_stat & PORT_IRQ_ERR)) {
- if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
+ if (unlikely(mtip_check_surprise_removal(dd))) {
/* don't proceed further */
return IRQ_HANDLED;
}
@@ -915,7 +914,7 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
msleep(100);
- if (mtip_check_surprise_removal(port->dd->pdev))
+ if (mtip_check_surprise_removal(port->dd))
goto err_fault;
active = mtip_commands_active(port);
@@ -980,7 +979,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
return -EFAULT;
}
- if (mtip_check_surprise_removal(dd->pdev))
+ if (mtip_check_surprise_removal(dd))
return -EFAULT;
rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED);
@@ -1015,14 +1014,14 @@ static int mtip_exec_internal_command(struct mtip_port *port,
rq->timeout = timeout;
/* insert request and run queue */
- blk_execute_rq(NULL, rq, true);
+ blk_execute_rq(rq, true);
if (int_cmd->status) {
dev_err(&dd->pdev->dev, "Internal command [%02X] failed %d\n",
fis->command, int_cmd->status);
rv = -EIO;
- if (mtip_check_surprise_removal(dd->pdev) ||
+ if (mtip_check_surprise_removal(dd) ||
test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
&dd->dd_flag)) {
dev_err(&dd->pdev->dev,
@@ -2513,7 +2512,7 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
&dd->dd_flag)))
return -EFAULT;
- if (mtip_check_surprise_removal(dd->pdev))
+ if (mtip_check_surprise_removal(dd))
return -EFAULT;
if (mtip_get_identify(dd->port, NULL) < 0)
@@ -2891,7 +2890,7 @@ static int mtip_hw_init(struct driver_data *dd)
time_before(jiffies, timeout)) {
mdelay(100);
}
- if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
+ if (unlikely(mtip_check_surprise_removal(dd))) {
timetaken = jiffies - timetaken;
dev_warn(&dd->pdev->dev,
"Surprise removal detected at %u ms\n",
@@ -4098,7 +4097,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
list_add(&dd->remove_list, &removing_list);
spin_unlock_irqrestore(&dev_lock, flags);
- mtip_check_surprise_removal(pdev);
+ mtip_check_surprise_removal(dd);
synchronize_irq(dd->pdev->irq);
/* Spin until workers are done */
@@ -4145,36 +4144,17 @@ static void mtip_pci_remove(struct pci_dev *pdev)
* 0 Success
* <0 Error
*/
-static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
+static int __maybe_unused mtip_pci_suspend(struct device *dev)
{
int rv = 0;
- struct driver_data *dd = pci_get_drvdata(pdev);
-
- if (!dd) {
- dev_err(&pdev->dev,
- "Driver private datastructure is NULL\n");
- return -EFAULT;
- }
+ struct driver_data *dd = dev_get_drvdata(dev);
set_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
/* Disable ports & interrupts then send standby immediate */
rv = mtip_block_suspend(dd);
- if (rv < 0) {
- dev_err(&pdev->dev,
- "Failed to suspend controller\n");
- return rv;
- }
-
- /*
- * Save the pci config space to pdev structure &
- * disable the device
- */
- pci_save_state(pdev);
- pci_disable_device(pdev);
-
- /* Move to Low power state*/
- pci_set_power_state(pdev, PCI_D3hot);
+ if (rv < 0)
+ dev_err(dev, "Failed to suspend controller\n");
return rv;
}
@@ -4186,32 +4166,10 @@ static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
* 0 Success
* <0 Error
*/
-static int mtip_pci_resume(struct pci_dev *pdev)
+static int __maybe_unused mtip_pci_resume(struct device *dev)
{
int rv = 0;
- struct driver_data *dd;
-
- dd = pci_get_drvdata(pdev);
- if (!dd) {
- dev_err(&pdev->dev,
- "Driver private datastructure is NULL\n");
- return -EFAULT;
- }
-
- /* Move the device to active State */
- pci_set_power_state(pdev, PCI_D0);
-
- /* Restore PCI configuration space */
- pci_restore_state(pdev);
-
- /* Enable the PCI device*/
- rv = pcim_enable_device(pdev);
- if (rv < 0) {
- dev_err(&pdev->dev,
- "Failed to enable card during resume\n");
- goto err;
- }
- pci_set_master(pdev);
+ struct driver_data *dd = dev_get_drvdata(dev);
/*
* Calls hbaReset, initPort, & startPort function
@@ -4219,9 +4177,8 @@ static int mtip_pci_resume(struct pci_dev *pdev)
*/
rv = mtip_block_resume(dd);
if (rv < 0)
- dev_err(&pdev->dev, "Unable to resume\n");
+ dev_err(dev, "Unable to resume\n");
-err:
clear_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
return rv;
@@ -4252,14 +4209,15 @@ static const struct pci_device_id mtip_pci_tbl[] = {
{ 0 }
};
+static SIMPLE_DEV_PM_OPS(mtip_pci_pm_ops, mtip_pci_suspend, mtip_pci_resume);
+
/* Structure that describes the PCI driver functions. */
static struct pci_driver mtip_pci_driver = {
.name = MTIP_DRV_NAME,
.id_table = mtip_pci_tbl,
.probe = mtip_pci_probe,
.remove = mtip_pci_remove,
- .suspend = mtip_pci_suspend,
- .resume = mtip_pci_resume,
+ .driver.pm = &mtip_pci_pm_ops,
.shutdown = mtip_pci_shutdown,
};
diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c
index 78282f01f581..4db9a8c244af 100644
--- a/drivers/block/n64cart.c
+++ b/drivers/block/n64cart.c
@@ -136,7 +136,7 @@ static int __init n64cart_probe(struct platform_device *pdev)
goto out;
disk->first_minor = 0;
- disk->flags = GENHD_FL_NO_PART_SCAN;
+ disk->flags = GENHD_FL_NO_PART;
disk->fops = &n64cart_fops;
disk->private_data = &pdev->dev;
strcpy(disk->disk_name, "n64cart");
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 323af5c9c802..13004beb48ca 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -340,9 +340,9 @@ static int nullb_update_nr_hw_queues(struct nullb_device *dev,
return 0;
/*
- * Make sure at least one queue exists for each of submit and poll.
+ * Make sure at least one submit queue exists.
*/
- if (!submit_queues || !poll_queues)
+ if (!submit_queues)
return -EINVAL;
/*
@@ -1574,7 +1574,9 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
cmd = blk_mq_rq_to_pdu(req);
cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
blk_rq_sectors(req));
- end_cmd(cmd);
+ if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error,
+ blk_mq_end_request_batch))
+ end_cmd(cmd);
nr++;
}
@@ -1850,7 +1852,6 @@ static int null_gendisk_register(struct nullb *nullb)
set_capacity(disk, size);
- disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
disk->major = null_major;
disk->first_minor = nullb->index;
disk->minors = 1;
@@ -1891,7 +1892,7 @@ static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
if (g_shared_tag_bitmap)
set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
set->driver_data = nullb;
- if (g_poll_queues)
+ if (poll_queues)
set->nr_maps = 3;
else
set->nr_maps = 1;
@@ -1918,8 +1919,6 @@ static int null_validate_conf(struct nullb_device *dev)
if (dev->poll_queues > g_poll_queues)
dev->poll_queues = g_poll_queues;
- else if (dev->poll_queues == 0)
- dev->poll_queues = 1;
dev->prev_poll_queues = dev->poll_queues;
dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
diff --git a/drivers/block/null_blk/trace.h b/drivers/block/null_blk/trace.h
index ce3b430e88c5..86d6c12c603c 100644
--- a/drivers/block/null_blk/trace.h
+++ b/drivers/block/null_blk/trace.h
@@ -44,7 +44,7 @@ TRACE_EVENT(nullb_zone_op,
__entry->op = req_op(cmd->rq);
__entry->zone_no = zone_no;
__entry->zone_cond = zone_cond;
- __assign_disk_name(__entry->disk, cmd->rq->rq_disk);
+ __assign_disk_name(__entry->disk, cmd->rq->q->disk);
),
TP_printk("%s req=%-15s zone_no=%u zone_cond=%-10s",
__print_disk_name(__entry->disk),
diff --git a/drivers/block/paride/bpck.c b/drivers/block/paride/bpck.c
index f5f63ca2889d..d880a9465e9b 100644
--- a/drivers/block/paride/bpck.c
+++ b/drivers/block/paride/bpck.c
@@ -28,6 +28,7 @@
#undef r2
#undef w2
+#undef PC
#define PC pi->private
#define r2() (PC=(in_p(2) & 0xff))
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index f6b1d63e96e1..f462ad67931a 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -690,7 +690,7 @@ static void pcd_request(void)
if (!pcd_req && !set_next_request())
return;
- cd = pcd_req->rq_disk->private_data;
+ cd = pcd_req->q->disk->private_data;
if (cd != pcd_current)
pcd_bufblk = -1;
pcd_current = cd;
@@ -928,8 +928,9 @@ static int pcd_init_unit(struct pcd_unit *cd, bool autoprobe, int port,
disk->minors = 1;
strcpy(disk->disk_name, cd->name); /* umm... */
disk->fops = &pcd_bdops;
- disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ disk->flags |= GENHD_FL_NO_PART;
disk->events = DISK_EVENT_MEDIA_CHANGE;
+ disk->event_flags = DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE;
if (!pi_init(cd->pi, autoprobe, port, mode, unit, protocol, delay,
pcd_buffer, PI_PCD, verbose, cd->name)) {
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index fba865058a17..3637c38c72f9 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -430,7 +430,7 @@ static void run_fsm(void)
int stop = 0;
if (!phase) {
- pd_current = pd_req->rq_disk->private_data;
+ pd_current = pd_req->q->disk->private_data;
pi_current = pd_current->pi;
phase = do_pd_io_start;
}
@@ -492,7 +492,7 @@ static enum action do_pd_io_start(void)
case REQ_OP_WRITE:
pd_block = blk_rq_pos(pd_req);
pd_count = blk_rq_cur_sectors(pd_req);
- if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
+ if (pd_block + pd_count > get_capacity(pd_req->q->disk))
return Fail;
pd_run = blk_rq_sectors(pd_req);
pd_buf = bio_data(pd_req->bio);
@@ -781,7 +781,7 @@ static int pd_special_command(struct pd_unit *disk,
req = blk_mq_rq_to_pdu(rq);
req->func = func;
- blk_execute_rq(disk->gd, rq, 0);
+ blk_execute_rq(rq, false);
blk_mq_free_request(rq);
return 0;
}
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index bf8d0ef41a0a..292e9a4ce1b9 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -746,12 +746,12 @@ repeat:
if (!pf_req && !set_next_request())
return;
- pf_current = pf_req->rq_disk->private_data;
+ pf_current = pf_req->q->disk->private_data;
pf_block = blk_rq_pos(pf_req);
pf_run = blk_rq_sectors(pf_req);
pf_count = blk_rq_cur_sectors(pf_req);
- if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
+ if (pf_block + pf_count > get_capacity(pf_req->q->disk)) {
pf_end_request(BLK_STS_IOERR);
goto repeat;
}
@@ -942,6 +942,7 @@ static int __init pf_init_unit(struct pf_unit *pf, bool autoprobe, int port,
disk->minors = 1;
strcpy(disk->disk_name, pf->name);
disk->fops = &pf_fops;
+ disk->flags |= GENHD_FL_NO_PART;
disk->events = DISK_EVENT_MEDIA_CHANGE;
disk->private_data = pf;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index b53f648302c1..2b6b70a39e76 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -113,57 +113,10 @@ static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
}
-/*
- * create and register a pktcdvd kernel object.
- */
-static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd,
- const char* name,
- struct kobject* parent,
- struct kobj_type* ktype)
-{
- struct pktcdvd_kobj *p;
- int error;
-
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return NULL;
- p->pd = pd;
- error = kobject_init_and_add(&p->kobj, ktype, parent, "%s", name);
- if (error) {
- kobject_put(&p->kobj);
- return NULL;
- }
- kobject_uevent(&p->kobj, KOBJ_ADD);
- return p;
-}
-/*
- * remove a pktcdvd kernel object.
- */
-static void pkt_kobj_remove(struct pktcdvd_kobj *p)
-{
- if (p)
- kobject_put(&p->kobj);
-}
-/*
- * default release function for pktcdvd kernel objects.
- */
-static void pkt_kobj_release(struct kobject *kobj)
-{
- kfree(to_pktcdvdkobj(kobj));
-}
-
-
/**********************************************************
- *
* sysfs interface for pktcdvd
* by (C) 2006 Thomas Maier <balagi@justmail.de>
- *
- **********************************************************/
-
-#define DEF_ATTR(_obj,_name,_mode) \
- static struct attribute _obj = { .name = _name, .mode = _mode }
-
-/**********************************************************
+
/sys/class/pktcdvd/pktcdvd[0-7]/
stat/reset
stat/packets_started
@@ -176,75 +129,94 @@ static void pkt_kobj_release(struct kobject *kobj)
write_queue/congestion_on
**********************************************************/
-DEF_ATTR(kobj_pkt_attr_st1, "reset", 0200);
-DEF_ATTR(kobj_pkt_attr_st2, "packets_started", 0444);
-DEF_ATTR(kobj_pkt_attr_st3, "packets_finished", 0444);
-DEF_ATTR(kobj_pkt_attr_st4, "kb_written", 0444);
-DEF_ATTR(kobj_pkt_attr_st5, "kb_read", 0444);
-DEF_ATTR(kobj_pkt_attr_st6, "kb_read_gather", 0444);
-
-static struct attribute *kobj_pkt_attrs_stat[] = {
- &kobj_pkt_attr_st1,
- &kobj_pkt_attr_st2,
- &kobj_pkt_attr_st3,
- &kobj_pkt_attr_st4,
- &kobj_pkt_attr_st5,
- &kobj_pkt_attr_st6,
- NULL
-};
+static ssize_t packets_started_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
-DEF_ATTR(kobj_pkt_attr_wq1, "size", 0444);
-DEF_ATTR(kobj_pkt_attr_wq2, "congestion_off", 0644);
-DEF_ATTR(kobj_pkt_attr_wq3, "congestion_on", 0644);
+ return sysfs_emit(buf, "%lu\n", pd->stats.pkt_started);
+}
+static DEVICE_ATTR_RO(packets_started);
-static struct attribute *kobj_pkt_attrs_wqueue[] = {
- &kobj_pkt_attr_wq1,
- &kobj_pkt_attr_wq2,
- &kobj_pkt_attr_wq3,
- NULL
-};
+static ssize_t packets_finished_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
-static ssize_t kobj_pkt_show(struct kobject *kobj,
- struct attribute *attr, char *data)
+ return sysfs_emit(buf, "%lu\n", pd->stats.pkt_ended);
+}
+static DEVICE_ATTR_RO(packets_finished);
+
+static ssize_t kb_written_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
- int n = 0;
- int v;
- if (strcmp(attr->name, "packets_started") == 0) {
- n = sprintf(data, "%lu\n", pd->stats.pkt_started);
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
- } else if (strcmp(attr->name, "packets_finished") == 0) {
- n = sprintf(data, "%lu\n", pd->stats.pkt_ended);
+ return sysfs_emit(buf, "%lu\n", pd->stats.secs_w >> 1);
+}
+static DEVICE_ATTR_RO(kb_written);
- } else if (strcmp(attr->name, "kb_written") == 0) {
- n = sprintf(data, "%lu\n", pd->stats.secs_w >> 1);
+static ssize_t kb_read_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
- } else if (strcmp(attr->name, "kb_read") == 0) {
- n = sprintf(data, "%lu\n", pd->stats.secs_r >> 1);
+ return sysfs_emit(buf, "%lu\n", pd->stats.secs_r >> 1);
+}
+static DEVICE_ATTR_RO(kb_read);
- } else if (strcmp(attr->name, "kb_read_gather") == 0) {
- n = sprintf(data, "%lu\n", pd->stats.secs_rg >> 1);
+static ssize_t kb_read_gather_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
- } else if (strcmp(attr->name, "size") == 0) {
- spin_lock(&pd->lock);
- v = pd->bio_queue_size;
- spin_unlock(&pd->lock);
- n = sprintf(data, "%d\n", v);
+ return sysfs_emit(buf, "%lu\n", pd->stats.secs_rg >> 1);
+}
+static DEVICE_ATTR_RO(kb_read_gather);
- } else if (strcmp(attr->name, "congestion_off") == 0) {
- spin_lock(&pd->lock);
- v = pd->write_congestion_off;
- spin_unlock(&pd->lock);
- n = sprintf(data, "%d\n", v);
+static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
- } else if (strcmp(attr->name, "congestion_on") == 0) {
- spin_lock(&pd->lock);
- v = pd->write_congestion_on;
- spin_unlock(&pd->lock);
- n = sprintf(data, "%d\n", v);
+ if (len > 0) {
+ pd->stats.pkt_started = 0;
+ pd->stats.pkt_ended = 0;
+ pd->stats.secs_w = 0;
+ pd->stats.secs_rg = 0;
+ pd->stats.secs_r = 0;
}
+ return len;
+}
+static DEVICE_ATTR_WO(reset);
+
+static struct attribute *pkt_stat_attrs[] = {
+ &dev_attr_packets_finished.attr,
+ &dev_attr_packets_started.attr,
+ &dev_attr_kb_read.attr,
+ &dev_attr_kb_written.attr,
+ &dev_attr_kb_read_gather.attr,
+ &dev_attr_reset.attr,
+ NULL,
+};
+
+static const struct attribute_group pkt_stat_group = {
+ .name = "stat",
+ .attrs = pkt_stat_attrs,
+};
+
+static ssize_t size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+ int n;
+
+ spin_lock(&pd->lock);
+ n = sysfs_emit(buf, "%d\n", pd->bio_queue_size);
+ spin_unlock(&pd->lock);
return n;
}
+static DEVICE_ATTR_RO(size);
static void init_write_congestion_marks(int* lo, int* hi)
{
@@ -263,30 +235,56 @@ static void init_write_congestion_marks(int* lo, int* hi)
}
}
-static ssize_t kobj_pkt_store(struct kobject *kobj,
- struct attribute *attr,
- const char *data, size_t len)
+static ssize_t congestion_off_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
- int val;
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+ int n;
- if (strcmp(attr->name, "reset") == 0 && len > 0) {
- pd->stats.pkt_started = 0;
- pd->stats.pkt_ended = 0;
- pd->stats.secs_w = 0;
- pd->stats.secs_rg = 0;
- pd->stats.secs_r = 0;
+ spin_lock(&pd->lock);
+ n = sysfs_emit(buf, "%d\n", pd->write_congestion_off);
+ spin_unlock(&pd->lock);
+ return n;
+}
+
+static ssize_t congestion_off_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+ int val;
- } else if (strcmp(attr->name, "congestion_off") == 0
- && sscanf(data, "%d", &val) == 1) {
+ if (sscanf(buf, "%d", &val) == 1) {
spin_lock(&pd->lock);
pd->write_congestion_off = val;
init_write_congestion_marks(&pd->write_congestion_off,
&pd->write_congestion_on);
spin_unlock(&pd->lock);
+ }
+ return len;
+}
+static DEVICE_ATTR_RW(congestion_off);
+
+static ssize_t congestion_on_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+ int n;
+
+ spin_lock(&pd->lock);
+ n = sysfs_emit(buf, "%d\n", pd->write_congestion_on);
+ spin_unlock(&pd->lock);
+ return n;
+}
+
+static ssize_t congestion_on_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+ int val;
- } else if (strcmp(attr->name, "congestion_on") == 0
- && sscanf(data, "%d", &val) == 1) {
+ if (sscanf(buf, "%d", &val) == 1) {
spin_lock(&pd->lock);
pd->write_congestion_on = val;
init_write_congestion_marks(&pd->write_congestion_off,
@@ -295,44 +293,39 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
}
return len;
}
+static DEVICE_ATTR_RW(congestion_on);
-static const struct sysfs_ops kobj_pkt_ops = {
- .show = kobj_pkt_show,
- .store = kobj_pkt_store
+static struct attribute *pkt_wq_attrs[] = {
+ &dev_attr_congestion_on.attr,
+ &dev_attr_congestion_off.attr,
+ &dev_attr_size.attr,
+ NULL,
};
-static struct kobj_type kobj_pkt_type_stat = {
- .release = pkt_kobj_release,
- .sysfs_ops = &kobj_pkt_ops,
- .default_attrs = kobj_pkt_attrs_stat
+
+static const struct attribute_group pkt_wq_group = {
+ .name = "write_queue",
+ .attrs = pkt_wq_attrs,
};
-static struct kobj_type kobj_pkt_type_wqueue = {
- .release = pkt_kobj_release,
- .sysfs_ops = &kobj_pkt_ops,
- .default_attrs = kobj_pkt_attrs_wqueue
+
+static const struct attribute_group *pkt_groups[] = {
+ &pkt_stat_group,
+ &pkt_wq_group,
+ NULL,
};
static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
{
if (class_pktcdvd) {
- pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL,
- "%s", pd->name);
+ pd->dev = device_create_with_groups(class_pktcdvd, NULL,
+ MKDEV(0, 0), pd, pkt_groups,
+ "%s", pd->name);
if (IS_ERR(pd->dev))
pd->dev = NULL;
}
- if (pd->dev) {
- pd->kobj_stat = pkt_kobj_create(pd, "stat",
- &pd->dev->kobj,
- &kobj_pkt_type_stat);
- pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue",
- &pd->dev->kobj,
- &kobj_pkt_type_wqueue);
- }
}
static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
{
- pkt_kobj_remove(pd->kobj_stat);
- pkt_kobj_remove(pd->kobj_wqueue);
if (class_pktcdvd)
device_unregister(pd->dev);
}
@@ -722,7 +715,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
if (cgc->quiet)
rq->rq_flags |= RQF_QUIET;
- blk_execute_rq(pd->bdev->bd_disk, rq, 0);
+ blk_execute_rq(rq, false);
if (scsi_req(rq)->result)
ret = -EIO;
out:
@@ -1107,7 +1100,6 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
sector_t zone = 0; /* Suppress gcc warning */
struct pkt_rb_node *node, *first_node;
struct rb_node *n;
- int wakeup;
atomic_set(&pd->scan_queue, 0);
@@ -1179,12 +1171,14 @@ try_next_bio:
spin_unlock(&pkt->lock);
}
/* check write congestion marks, and if bio_queue_size is
- below, wake up any waiters */
- wakeup = (pd->write_congestion_on > 0
- && pd->bio_queue_size <= pd->write_congestion_off);
+ * below, wake up any waiters
+ */
+ if (pd->congested &&
+ pd->bio_queue_size <= pd->write_congestion_off) {
+ pd->congested = false;
+ wake_up_var(&pd->congested);
+ }
spin_unlock(&pd->lock);
- if (wakeup)
- clear_bdi_congested(pd->disk->bdi, BLK_RW_ASYNC);
pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
pkt_set_state(pkt, PACKET_WAITING_STATE);
@@ -2356,7 +2350,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
}
spin_unlock(&pd->cdrw.active_list_lock);
- /*
+ /*
* Test if there is enough room left in the bio work queue
* (queue size >= congestion on mark).
* If not, wait till the work queue size is below the congestion off mark.
@@ -2364,12 +2358,20 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
spin_lock(&pd->lock);
if (pd->write_congestion_on > 0
&& pd->bio_queue_size >= pd->write_congestion_on) {
- set_bdi_congested(bio->bi_bdev->bd_disk->bdi, BLK_RW_ASYNC);
- do {
+ struct wait_bit_queue_entry wqe;
+
+ init_wait_var_entry(&wqe, &pd->congested, 0);
+ for (;;) {
+ prepare_to_wait_event(__var_waitqueue(&pd->congested),
+ &wqe.wq_entry,
+ TASK_UNINTERRUPTIBLE);
+ if (pd->bio_queue_size <= pd->write_congestion_off)
+ break;
+ pd->congested = true;
spin_unlock(&pd->lock);
- congestion_wait(BLK_RW_ASYNC, HZ);
+ schedule();
spin_lock(&pd->lock);
- } while(pd->bio_queue_size > pd->write_congestion_off);
+ }
}
spin_unlock(&pd->lock);
@@ -2719,7 +2721,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
disk->first_minor = idx;
disk->minors = 1;
disk->fops = &pktcdvd_ops;
- disk->flags = GENHD_FL_REMOVABLE;
+ disk->flags = GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
strcpy(disk->disk_name, pd->name);
disk->private_data = pd;
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index c1876646a4cb..4f90819e245e 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -742,6 +742,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
priv->gendisk = gendisk;
gendisk->major = ps3vram_major;
gendisk->minors = 1;
+ gendisk->flags |= GENHD_FL_NO_PART;
gendisk->fops = &ps3vram_fops;
gendisk->private_data = dev;
strlcpy(gendisk->disk_name, DEVICE_NAME, sizeof(gendisk->disk_name));
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 953fa134cd3d..4203cdab8abf 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4924,12 +4924,10 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
rbd_dev->dev_id);
disk->major = rbd_dev->major;
disk->first_minor = rbd_dev->minor;
- if (single_major) {
+ if (single_major)
disk->minors = (1 << RBD_SINGLE_MAJOR_PART_SHIFT);
- disk->flags |= GENHD_FL_EXT_DEVT;
- } else {
+ else
disk->minors = RBD_MINORS_PER_MAJOR;
- }
disk->fops = &rbd_bd_ops;
disk->private_data = rbd_dev;
@@ -6191,7 +6189,7 @@ static inline size_t next_token(const char **buf)
* These are the characters that produce nonzero for
* isspace() in the "C" and "POSIX" locales.
*/
- const char *spaces = " \f\n\r\t\v";
+ static const char spaces[] = " \f\n\r\t\v";
*buf += strspn(*buf, spaces); /* Find start of token */
@@ -6497,7 +6495,8 @@ static int rbd_add_parse_args(const char *buf,
pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
pctx.opts->trim = RBD_TRIM_DEFAULT;
- ret = ceph_parse_mon_ips(mon_addrs, mon_addrs_size, pctx.copts, NULL);
+ ret = ceph_parse_mon_ips(mon_addrs, mon_addrs_size, pctx.copts, NULL,
+ ',');
if (ret)
goto out_err;
diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
index 44e45af00e83..2be5d87a3ca6 100644
--- a/drivers/block/rnbd/rnbd-clt-sysfs.c
+++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
@@ -452,6 +452,7 @@ static struct attribute *rnbd_dev_attrs[] = {
&rnbd_clt_nr_poll_queues.attr,
NULL,
};
+ATTRIBUTE_GROUPS(rnbd_dev);
void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
{
@@ -474,7 +475,7 @@ void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
static struct kobj_type rnbd_dev_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
- .default_attrs = rnbd_dev_attrs,
+ .default_groups = rnbd_dev_groups,
};
static int rnbd_clt_add_dev_kobj(struct rnbd_clt_dev *dev)
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 2df0657cdf00..c08971de369f 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -196,7 +196,7 @@ rnbd_get_cpu_qlist(struct rnbd_clt_session *sess, int cpu)
return per_cpu_ptr(sess->cpu_queues, bit);
} else if (cpu != 0) {
/* Search from 0 to cpu */
- bit = find_next_bit(sess->cpu_queues_bm, cpu, 0);
+ bit = find_first_bit(sess->cpu_queues_bm, cpu);
if (bit < cpu)
return per_cpu_ptr(sess->cpu_queues, bit);
}
@@ -393,7 +393,7 @@ static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu)
static void rnbd_softirq_done_fn(struct request *rq)
{
- struct rnbd_clt_dev *dev = rq->rq_disk->private_data;
+ struct rnbd_clt_dev *dev = rq->q->disk->private_data;
struct rnbd_clt_session *sess = dev->sess;
struct rnbd_iu *iu;
@@ -433,7 +433,7 @@ static void msg_conf(void *priv, int errno)
schedule_work(&iu->work);
}
-static int send_usr_msg(struct rtrs_clt *rtrs, int dir,
+static int send_usr_msg(struct rtrs_clt_sess *rtrs, int dir,
struct rnbd_iu *iu, struct kvec *vec,
size_t len, struct scatterlist *sg, unsigned int sg_len,
void (*conf)(struct work_struct *work),
@@ -1010,7 +1010,7 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
struct request *rq,
struct rnbd_iu *iu)
{
- struct rtrs_clt *rtrs = dev->sess->rtrs;
+ struct rtrs_clt_sess *rtrs = dev->sess->rtrs;
struct rtrs_permit *permit = iu->permit;
struct rnbd_msg_io msg;
struct rtrs_clt_req_ops req_ops;
@@ -1133,7 +1133,7 @@ static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *rq = bd->rq;
- struct rnbd_clt_dev *dev = rq->rq_disk->private_data;
+ struct rnbd_clt_dev *dev = rq->q->disk->private_data;
struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq);
int err;
blk_status_t ret = BLK_STS_IOERR;
diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h
index 9ef8c4f306f2..0c2cae7f39b9 100644
--- a/drivers/block/rnbd/rnbd-clt.h
+++ b/drivers/block/rnbd/rnbd-clt.h
@@ -75,7 +75,7 @@ struct rnbd_cpu_qlist {
struct rnbd_clt_session {
struct list_head list;
- struct rtrs_clt *rtrs;
+ struct rtrs_clt_sess *rtrs;
wait_queue_head_t rtrs_waitq;
bool rtrs_ready;
struct rnbd_cpu_qlist __percpu
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index aafecfe97055..1ee808fc600c 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -263,15 +263,15 @@ out:
kfree(srv_sess);
}
-static int create_sess(struct rtrs_srv *rtrs)
+static int create_sess(struct rtrs_srv_sess *rtrs)
{
struct rnbd_srv_session *srv_sess;
- char sessname[NAME_MAX];
+ char pathname[NAME_MAX];
int err;
- err = rtrs_srv_get_sess_name(rtrs, sessname, sizeof(sessname));
+ err = rtrs_srv_get_path_name(rtrs, pathname, sizeof(pathname));
if (err) {
- pr_err("rtrs_srv_get_sess_name(%s): %d\n", sessname, err);
+ pr_err("rtrs_srv_get_path_name(%s): %d\n", pathname, err);
return err;
}
@@ -284,8 +284,8 @@ static int create_sess(struct rtrs_srv *rtrs)
offsetof(struct rnbd_dev_blk_io, bio),
BIOSET_NEED_BVECS);
if (err) {
- pr_err("Allocating srv_session for session %s failed\n",
- sessname);
+ pr_err("Allocating srv_session for path %s failed\n",
+ pathname);
kfree(srv_sess);
return err;
}
@@ -298,14 +298,14 @@ static int create_sess(struct rtrs_srv *rtrs)
mutex_unlock(&sess_lock);
srv_sess->rtrs = rtrs;
- strscpy(srv_sess->sessname, sessname, sizeof(srv_sess->sessname));
+ strscpy(srv_sess->sessname, pathname, sizeof(srv_sess->sessname));
rtrs_srv_set_sess_priv(rtrs, srv_sess);
return 0;
}
-static int rnbd_srv_link_ev(struct rtrs_srv *rtrs,
+static int rnbd_srv_link_ev(struct rtrs_srv_sess *rtrs,
enum rtrs_srv_link_ev ev, void *priv)
{
struct rnbd_srv_session *srv_sess = priv;
diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h
index 98ddc31eb408..e5604bce123a 100644
--- a/drivers/block/rnbd/rnbd-srv.h
+++ b/drivers/block/rnbd/rnbd-srv.h
@@ -20,7 +20,7 @@
struct rnbd_srv_session {
/* Entry inside global sess_list */
struct list_head list;
- struct rtrs_srv *rtrs;
+ struct rtrs_srv_sess *rtrs;
char sessname[NAME_MAX];
int queue_depth;
struct bio_set sess_bio_set;
diff --git a/drivers/block/rsxx/Makefile b/drivers/block/rsxx/Makefile
deleted file mode 100644
index 7ef158099d33..000000000000
--- a/drivers/block/rsxx/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o
-rsxx-objs := config.o core.o cregs.o dev.o dma.o
diff --git a/drivers/block/rsxx/config.c b/drivers/block/rsxx/config.c
deleted file mode 100644
index 11ed1d9646b9..000000000000
--- a/drivers/block/rsxx/config.c
+++ /dev/null
@@ -1,197 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
-* Filename: config.c
-*
-* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
-* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
-*
-* (C) Copyright 2013 IBM Corporation
-*/
-
-#include <linux/types.h>
-#include <linux/crc32.h>
-#include <linux/swab.h>
-
-#include "rsxx_priv.h"
-#include "rsxx_cfg.h"
-
-static void initialize_config(struct rsxx_card_cfg *cfg)
-{
- cfg->hdr.version = RSXX_CFG_VERSION;
-
- cfg->data.block_size = RSXX_HW_BLK_SIZE;
- cfg->data.stripe_size = RSXX_HW_BLK_SIZE;
- cfg->data.vendor_id = RSXX_VENDOR_ID_IBM;
- cfg->data.cache_order = (-1);
- cfg->data.intr_coal.mode = RSXX_INTR_COAL_DISABLED;
- cfg->data.intr_coal.count = 0;
- cfg->data.intr_coal.latency = 0;
-}
-
-static u32 config_data_crc32(struct rsxx_card_cfg *cfg)
-{
- /*
- * Return the compliment of the CRC to ensure compatibility
- * (i.e. this is how early rsxx drivers did it.)
- */
-
- return ~crc32(~0, &cfg->data, sizeof(cfg->data));
-}
-
-
-/*----------------- Config Byte Swap Functions -------------------*/
-static void config_hdr_be_to_cpu(struct card_cfg_hdr *hdr)
-{
- hdr->version = be32_to_cpu((__force __be32) hdr->version);
- hdr->crc = be32_to_cpu((__force __be32) hdr->crc);
-}
-
-static void config_hdr_cpu_to_be(struct card_cfg_hdr *hdr)
-{
- hdr->version = (__force u32) cpu_to_be32(hdr->version);
- hdr->crc = (__force u32) cpu_to_be32(hdr->crc);
-}
-
-static void config_data_swab(struct rsxx_card_cfg *cfg)
-{
- u32 *data = (u32 *) &cfg->data;
- int i;
-
- for (i = 0; i < (sizeof(cfg->data) / 4); i++)
- data[i] = swab32(data[i]);
-}
-
-static void config_data_le_to_cpu(struct rsxx_card_cfg *cfg)
-{
- u32 *data = (u32 *) &cfg->data;
- int i;
-
- for (i = 0; i < (sizeof(cfg->data) / 4); i++)
- data[i] = le32_to_cpu((__force __le32) data[i]);
-}
-
-static void config_data_cpu_to_le(struct rsxx_card_cfg *cfg)
-{
- u32 *data = (u32 *) &cfg->data;
- int i;
-
- for (i = 0; i < (sizeof(cfg->data) / 4); i++)
- data[i] = (__force u32) cpu_to_le32(data[i]);
-}
-
-
-/*----------------- Config Operations ------------------*/
-static int rsxx_save_config(struct rsxx_cardinfo *card)
-{
- struct rsxx_card_cfg cfg;
- int st;
-
- memcpy(&cfg, &card->config, sizeof(cfg));
-
- if (unlikely(cfg.hdr.version != RSXX_CFG_VERSION)) {
- dev_err(CARD_TO_DEV(card),
- "Cannot save config with invalid version %d\n",
- cfg.hdr.version);
- return -EINVAL;
- }
-
- /* Convert data to little endian for the CRC calculation. */
- config_data_cpu_to_le(&cfg);
-
- cfg.hdr.crc = config_data_crc32(&cfg);
-
- /*
- * Swap the data from little endian to big endian so it can be
- * stored.
- */
- config_data_swab(&cfg);
- config_hdr_cpu_to_be(&cfg.hdr);
-
- st = rsxx_creg_write(card, CREG_ADD_CONFIG, sizeof(cfg), &cfg, 1);
- if (st)
- return st;
-
- return 0;
-}
-
-int rsxx_load_config(struct rsxx_cardinfo *card)
-{
- int st;
- u32 crc;
-
- st = rsxx_creg_read(card, CREG_ADD_CONFIG, sizeof(card->config),
- &card->config, 1);
- if (st) {
- dev_err(CARD_TO_DEV(card),
- "Failed reading card config.\n");
- return st;
- }
-
- config_hdr_be_to_cpu(&card->config.hdr);
-
- if (card->config.hdr.version == RSXX_CFG_VERSION) {
- /*
- * We calculate the CRC with the data in little endian, because
- * early drivers did not take big endian CPUs into account.
- * The data is always stored in big endian, so we need to byte
- * swap it before calculating the CRC.
- */
-
- config_data_swab(&card->config);
-
- /* Check the CRC */
- crc = config_data_crc32(&card->config);
- if (crc != card->config.hdr.crc) {
- dev_err(CARD_TO_DEV(card),
- "Config corruption detected!\n");
- dev_info(CARD_TO_DEV(card),
- "CRC (sb x%08x is x%08x)\n",
- card->config.hdr.crc, crc);
- return -EIO;
- }
-
- /* Convert the data to CPU byteorder */
- config_data_le_to_cpu(&card->config);
-
- } else if (card->config.hdr.version != 0) {
- dev_err(CARD_TO_DEV(card),
- "Invalid config version %d.\n",
- card->config.hdr.version);
- /*
- * Config version changes require special handling from the
- * user
- */
- return -EINVAL;
- } else {
- dev_info(CARD_TO_DEV(card),
- "Initializing card configuration.\n");
- initialize_config(&card->config);
- st = rsxx_save_config(card);
- if (st)
- return st;
- }
-
- card->config_valid = 1;
-
- dev_dbg(CARD_TO_DEV(card), "version: x%08x\n",
- card->config.hdr.version);
- dev_dbg(CARD_TO_DEV(card), "crc: x%08x\n",
- card->config.hdr.crc);
- dev_dbg(CARD_TO_DEV(card), "block_size: x%08x\n",
- card->config.data.block_size);
- dev_dbg(CARD_TO_DEV(card), "stripe_size: x%08x\n",
- card->config.data.stripe_size);
- dev_dbg(CARD_TO_DEV(card), "vendor_id: x%08x\n",
- card->config.data.vendor_id);
- dev_dbg(CARD_TO_DEV(card), "cache_order: x%08x\n",
- card->config.data.cache_order);
- dev_dbg(CARD_TO_DEV(card), "mode: x%08x\n",
- card->config.data.intr_coal.mode);
- dev_dbg(CARD_TO_DEV(card), "count: x%08x\n",
- card->config.data.intr_coal.count);
- dev_dbg(CARD_TO_DEV(card), "latency: x%08x\n",
- card->config.data.intr_coal.latency);
-
- return 0;
-}
-
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
deleted file mode 100644
index 8d9d69f5dfbc..000000000000
--- a/drivers/block/rsxx/core.c
+++ /dev/null
@@ -1,1126 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
-* Filename: core.c
-*
-* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
-* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
-*
-* (C) Copyright 2013 IBM Corporation
-*/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/reboot.h>
-#include <linux/slab.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-#include <linux/genhd.h>
-#include <linux/idr.h>
-
-#include "rsxx_priv.h"
-#include "rsxx_cfg.h"
-
-#define NO_LEGACY 0
-#define SYNC_START_TIMEOUT (10 * 60) /* 10 minutes */
-
-MODULE_DESCRIPTION("IBM Flash Adapter 900GB Full Height Device Driver");
-MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRIVER_VERSION);
-
-static unsigned int force_legacy = NO_LEGACY;
-module_param(force_legacy, uint, 0444);
-MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts");
-
-static unsigned int sync_start = 1;
-module_param(sync_start, uint, 0444);
-MODULE_PARM_DESC(sync_start, "On by Default: Driver load will not complete "
- "until the card startup has completed.");
-
-static DEFINE_IDA(rsxx_disk_ida);
-
-/* --------------------Debugfs Setup ------------------- */
-
-static int rsxx_attr_pci_regs_show(struct seq_file *m, void *p)
-{
- struct rsxx_cardinfo *card = m->private;
-
- seq_printf(m, "HWID 0x%08x\n",
- ioread32(card->regmap + HWID));
- seq_printf(m, "SCRATCH 0x%08x\n",
- ioread32(card->regmap + SCRATCH));
- seq_printf(m, "IER 0x%08x\n",
- ioread32(card->regmap + IER));
- seq_printf(m, "IPR 0x%08x\n",
- ioread32(card->regmap + IPR));
- seq_printf(m, "CREG_CMD 0x%08x\n",
- ioread32(card->regmap + CREG_CMD));
- seq_printf(m, "CREG_ADD 0x%08x\n",
- ioread32(card->regmap + CREG_ADD));
- seq_printf(m, "CREG_CNT 0x%08x\n",
- ioread32(card->regmap + CREG_CNT));
- seq_printf(m, "CREG_STAT 0x%08x\n",
- ioread32(card->regmap + CREG_STAT));
- seq_printf(m, "CREG_DATA0 0x%08x\n",
- ioread32(card->regmap + CREG_DATA0));
- seq_printf(m, "CREG_DATA1 0x%08x\n",
- ioread32(card->regmap + CREG_DATA1));
- seq_printf(m, "CREG_DATA2 0x%08x\n",
- ioread32(card->regmap + CREG_DATA2));
- seq_printf(m, "CREG_DATA3 0x%08x\n",
- ioread32(card->regmap + CREG_DATA3));
- seq_printf(m, "CREG_DATA4 0x%08x\n",
- ioread32(card->regmap + CREG_DATA4));
- seq_printf(m, "CREG_DATA5 0x%08x\n",
- ioread32(card->regmap + CREG_DATA5));
- seq_printf(m, "CREG_DATA6 0x%08x\n",
- ioread32(card->regmap + CREG_DATA6));
- seq_printf(m, "CREG_DATA7 0x%08x\n",
- ioread32(card->regmap + CREG_DATA7));
- seq_printf(m, "INTR_COAL 0x%08x\n",
- ioread32(card->regmap + INTR_COAL));
- seq_printf(m, "HW_ERROR 0x%08x\n",
- ioread32(card->regmap + HW_ERROR));
- seq_printf(m, "DEBUG0 0x%08x\n",
- ioread32(card->regmap + PCI_DEBUG0));
- seq_printf(m, "DEBUG1 0x%08x\n",
- ioread32(card->regmap + PCI_DEBUG1));
- seq_printf(m, "DEBUG2 0x%08x\n",
- ioread32(card->regmap + PCI_DEBUG2));
- seq_printf(m, "DEBUG3 0x%08x\n",
- ioread32(card->regmap + PCI_DEBUG3));
- seq_printf(m, "DEBUG4 0x%08x\n",
- ioread32(card->regmap + PCI_DEBUG4));
- seq_printf(m, "DEBUG5 0x%08x\n",
- ioread32(card->regmap + PCI_DEBUG5));
- seq_printf(m, "DEBUG6 0x%08x\n",
- ioread32(card->regmap + PCI_DEBUG6));
- seq_printf(m, "DEBUG7 0x%08x\n",
- ioread32(card->regmap + PCI_DEBUG7));
- seq_printf(m, "RECONFIG 0x%08x\n",
- ioread32(card->regmap + PCI_RECONFIG));
-
- return 0;
-}
-
-static int rsxx_attr_stats_show(struct seq_file *m, void *p)
-{
- struct rsxx_cardinfo *card = m->private;
- int i;
-
- for (i = 0; i < card->n_targets; i++) {
- seq_printf(m, "Ctrl %d CRC Errors = %d\n",
- i, card->ctrl[i].stats.crc_errors);
- seq_printf(m, "Ctrl %d Hard Errors = %d\n",
- i, card->ctrl[i].stats.hard_errors);
- seq_printf(m, "Ctrl %d Soft Errors = %d\n",
- i, card->ctrl[i].stats.soft_errors);
- seq_printf(m, "Ctrl %d Writes Issued = %d\n",
- i, card->ctrl[i].stats.writes_issued);
- seq_printf(m, "Ctrl %d Writes Failed = %d\n",
- i, card->ctrl[i].stats.writes_failed);
- seq_printf(m, "Ctrl %d Reads Issued = %d\n",
- i, card->ctrl[i].stats.reads_issued);
- seq_printf(m, "Ctrl %d Reads Failed = %d\n",
- i, card->ctrl[i].stats.reads_failed);
- seq_printf(m, "Ctrl %d Reads Retried = %d\n",
- i, card->ctrl[i].stats.reads_retried);
- seq_printf(m, "Ctrl %d Discards Issued = %d\n",
- i, card->ctrl[i].stats.discards_issued);
- seq_printf(m, "Ctrl %d Discards Failed = %d\n",
- i, card->ctrl[i].stats.discards_failed);
- seq_printf(m, "Ctrl %d DMA SW Errors = %d\n",
- i, card->ctrl[i].stats.dma_sw_err);
- seq_printf(m, "Ctrl %d DMA HW Faults = %d\n",
- i, card->ctrl[i].stats.dma_hw_fault);
- seq_printf(m, "Ctrl %d DMAs Cancelled = %d\n",
- i, card->ctrl[i].stats.dma_cancelled);
- seq_printf(m, "Ctrl %d SW Queue Depth = %d\n",
- i, card->ctrl[i].stats.sw_q_depth);
- seq_printf(m, "Ctrl %d HW Queue Depth = %d\n",
- i, atomic_read(&card->ctrl[i].stats.hw_q_depth));
- }
-
- return 0;
-}
-
-static int rsxx_attr_stats_open(struct inode *inode, struct file *file)
-{
- return single_open(file, rsxx_attr_stats_show, inode->i_private);
-}
-
-static int rsxx_attr_pci_regs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, rsxx_attr_pci_regs_show, inode->i_private);
-}
-
-static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- struct rsxx_cardinfo *card = file_inode(fp)->i_private;
- char *buf;
- int st;
-
- buf = kzalloc(cnt, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- st = rsxx_creg_read(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1);
- if (!st) {
- if (copy_to_user(ubuf, buf, cnt))
- st = -EFAULT;
- }
- kfree(buf);
- if (st)
- return st;
- *ppos += cnt;
- return cnt;
-}
-
-static ssize_t rsxx_cram_write(struct file *fp, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- struct rsxx_cardinfo *card = file_inode(fp)->i_private;
- char *buf;
- ssize_t st;
-
- buf = memdup_user(ubuf, cnt);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
-
- st = rsxx_creg_write(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1);
- kfree(buf);
- if (st)
- return st;
- *ppos += cnt;
- return cnt;
-}
-
-static const struct file_operations debugfs_cram_fops = {
- .owner = THIS_MODULE,
- .read = rsxx_cram_read,
- .write = rsxx_cram_write,
-};
-
-static const struct file_operations debugfs_stats_fops = {
- .owner = THIS_MODULE,
- .open = rsxx_attr_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct file_operations debugfs_pci_regs_fops = {
- .owner = THIS_MODULE,
- .open = rsxx_attr_pci_regs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static void rsxx_debugfs_dev_new(struct rsxx_cardinfo *card)
-{
- struct dentry *debugfs_stats;
- struct dentry *debugfs_pci_regs;
- struct dentry *debugfs_cram;
-
- card->debugfs_dir = debugfs_create_dir(card->gendisk->disk_name, NULL);
- if (IS_ERR_OR_NULL(card->debugfs_dir))
- goto failed_debugfs_dir;
-
- debugfs_stats = debugfs_create_file("stats", 0444,
- card->debugfs_dir, card,
- &debugfs_stats_fops);
- if (IS_ERR_OR_NULL(debugfs_stats))
- goto failed_debugfs_stats;
-
- debugfs_pci_regs = debugfs_create_file("pci_regs", 0444,
- card->debugfs_dir, card,
- &debugfs_pci_regs_fops);
- if (IS_ERR_OR_NULL(debugfs_pci_regs))
- goto failed_debugfs_pci_regs;
-
- debugfs_cram = debugfs_create_file("cram", 0644,
- card->debugfs_dir, card,
- &debugfs_cram_fops);
- if (IS_ERR_OR_NULL(debugfs_cram))
- goto failed_debugfs_cram;
-
- return;
-failed_debugfs_cram:
- debugfs_remove(debugfs_pci_regs);
-failed_debugfs_pci_regs:
- debugfs_remove(debugfs_stats);
-failed_debugfs_stats:
- debugfs_remove(card->debugfs_dir);
-failed_debugfs_dir:
- card->debugfs_dir = NULL;
-}
-
-/*----------------- Interrupt Control & Handling -------------------*/
-
-static void rsxx_mask_interrupts(struct rsxx_cardinfo *card)
-{
- card->isr_mask = 0;
- card->ier_mask = 0;
-}
-
-static void __enable_intr(unsigned int *mask, unsigned int intr)
-{
- *mask |= intr;
-}
-
-static void __disable_intr(unsigned int *mask, unsigned int intr)
-{
- *mask &= ~intr;
-}
-
-/*
- * NOTE: Disabling the IER will disable the hardware interrupt.
- * Disabling the ISR will disable the software handling of the ISR bit.
- *
- * Enable/Disable interrupt functions assume the card->irq_lock
- * is held by the caller.
- */
-void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
-{
- if (unlikely(card->halt) ||
- unlikely(card->eeh_state))
- return;
-
- __enable_intr(&card->ier_mask, intr);
- iowrite32(card->ier_mask, card->regmap + IER);
-}
-
-void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
-{
- if (unlikely(card->eeh_state))
- return;
-
- __disable_intr(&card->ier_mask, intr);
- iowrite32(card->ier_mask, card->regmap + IER);
-}
-
-void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
- unsigned int intr)
-{
- if (unlikely(card->halt) ||
- unlikely(card->eeh_state))
- return;
-
- __enable_intr(&card->isr_mask, intr);
- __enable_intr(&card->ier_mask, intr);
- iowrite32(card->ier_mask, card->regmap + IER);
-}
-void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
- unsigned int intr)
-{
- if (unlikely(card->eeh_state))
- return;
-
- __disable_intr(&card->isr_mask, intr);
- __disable_intr(&card->ier_mask, intr);
- iowrite32(card->ier_mask, card->regmap + IER);
-}
-
-static irqreturn_t rsxx_isr(int irq, void *pdata)
-{
- struct rsxx_cardinfo *card = pdata;
- unsigned int isr;
- int handled = 0;
- int reread_isr;
- int i;
-
- spin_lock(&card->irq_lock);
-
- do {
- reread_isr = 0;
-
- if (unlikely(card->eeh_state))
- break;
-
- isr = ioread32(card->regmap + ISR);
- if (isr == 0xffffffff) {
- /*
- * A few systems seem to have an intermittent issue
- * where PCI reads return all Fs, but retrying the read
- * a little later will return as expected.
- */
- dev_info(CARD_TO_DEV(card),
- "ISR = 0xFFFFFFFF, retrying later\n");
- break;
- }
-
- isr &= card->isr_mask;
- if (!isr)
- break;
-
- for (i = 0; i < card->n_targets; i++) {
- if (isr & CR_INTR_DMA(i)) {
- if (card->ier_mask & CR_INTR_DMA(i)) {
- rsxx_disable_ier(card, CR_INTR_DMA(i));
- reread_isr = 1;
- }
- queue_work(card->ctrl[i].done_wq,
- &card->ctrl[i].dma_done_work);
- handled++;
- }
- }
-
- if (isr & CR_INTR_CREG) {
- queue_work(card->creg_ctrl.creg_wq,
- &card->creg_ctrl.done_work);
- handled++;
- }
-
- if (isr & CR_INTR_EVENT) {
- queue_work(card->event_wq, &card->event_work);
- rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
- handled++;
- }
- } while (reread_isr);
-
- spin_unlock(&card->irq_lock);
-
- return handled ? IRQ_HANDLED : IRQ_NONE;
-}
-
-/*----------------- Card Event Handler -------------------*/
-static const char *rsxx_card_state_to_str(unsigned int state)
-{
- static const char * const state_strings[] = {
- "Unknown", "Shutdown", "Starting", "Formatting",
- "Uninitialized", "Good", "Shutting Down",
- "Fault", "Read Only Fault", "dStroying"
- };
-
- return state_strings[ffs(state)];
-}
-
-static void card_state_change(struct rsxx_cardinfo *card,
- unsigned int new_state)
-{
- int st;
-
- dev_info(CARD_TO_DEV(card),
- "card state change detected.(%s -> %s)\n",
- rsxx_card_state_to_str(card->state),
- rsxx_card_state_to_str(new_state));
-
- card->state = new_state;
-
- /* Don't attach DMA interfaces if the card has an invalid config */
- if (!card->config_valid)
- return;
-
- switch (new_state) {
- case CARD_STATE_RD_ONLY_FAULT:
- dev_crit(CARD_TO_DEV(card),
- "Hardware has entered read-only mode!\n");
- /*
- * Fall through so the DMA devices can be attached and
- * the user can attempt to pull off their data.
- */
- fallthrough;
- case CARD_STATE_GOOD:
- st = rsxx_get_card_size8(card, &card->size8);
- if (st)
- dev_err(CARD_TO_DEV(card),
- "Failed attaching DMA devices\n");
-
- if (card->config_valid)
- set_capacity(card->gendisk, card->size8 >> 9);
- break;
-
- case CARD_STATE_FAULT:
- dev_crit(CARD_TO_DEV(card),
- "Hardware Fault reported!\n");
- fallthrough;
-
- /* Everything else, detach DMA interface if it's attached. */
- case CARD_STATE_SHUTDOWN:
- case CARD_STATE_STARTING:
- case CARD_STATE_FORMATTING:
- case CARD_STATE_UNINITIALIZED:
- case CARD_STATE_SHUTTING_DOWN:
- /*
- * dStroy is a term coined by marketing to represent the low level
- * secure erase.
- */
- case CARD_STATE_DSTROYING:
- set_capacity(card->gendisk, 0);
- break;
- }
-}
-
-static void card_event_handler(struct work_struct *work)
-{
- struct rsxx_cardinfo *card;
- unsigned int state;
- unsigned long flags;
- int st;
-
- card = container_of(work, struct rsxx_cardinfo, event_work);
-
- if (unlikely(card->halt))
- return;
-
- /*
- * Enable the interrupt now to avoid any weird race conditions where a
- * state change might occur while rsxx_get_card_state() is
- * processing a returned creg cmd.
- */
- spin_lock_irqsave(&card->irq_lock, flags);
- rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
- spin_unlock_irqrestore(&card->irq_lock, flags);
-
- st = rsxx_get_card_state(card, &state);
- if (st) {
- dev_info(CARD_TO_DEV(card),
- "Failed reading state after event.\n");
- return;
- }
-
- if (card->state != state)
- card_state_change(card, state);
-
- if (card->creg_ctrl.creg_stats.stat & CREG_STAT_LOG_PENDING)
- rsxx_read_hw_log(card);
-}
-
-/*----------------- Card Operations -------------------*/
-static int card_shutdown(struct rsxx_cardinfo *card)
-{
- unsigned int state;
- signed long start;
- const int timeout = msecs_to_jiffies(120000);
- int st;
-
- /* We can't issue a shutdown if the card is in a transition state */
- start = jiffies;
- do {
- st = rsxx_get_card_state(card, &state);
- if (st)
- return st;
- } while (state == CARD_STATE_STARTING &&
- (jiffies - start < timeout));
-
- if (state == CARD_STATE_STARTING)
- return -ETIMEDOUT;
-
- /* Only issue a shutdown if we need to */
- if ((state != CARD_STATE_SHUTTING_DOWN) &&
- (state != CARD_STATE_SHUTDOWN)) {
- st = rsxx_issue_card_cmd(card, CARD_CMD_SHUTDOWN);
- if (st)
- return st;
- }
-
- start = jiffies;
- do {
- st = rsxx_get_card_state(card, &state);
- if (st)
- return st;
- } while (state != CARD_STATE_SHUTDOWN &&
- (jiffies - start < timeout));
-
- if (state != CARD_STATE_SHUTDOWN)
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static int rsxx_eeh_frozen(struct pci_dev *dev)
-{
- struct rsxx_cardinfo *card = pci_get_drvdata(dev);
- int i;
- int st;
-
- dev_warn(&dev->dev, "IBM Flash Adapter PCI: preparing for slot reset.\n");
-
- card->eeh_state = 1;
- rsxx_mask_interrupts(card);
-
- /*
- * We need to guarantee that the write for eeh_state and masking
- * interrupts does not become reordered. This will prevent a possible
- * race condition with the EEH code.
- */
- wmb();
-
- pci_disable_device(dev);
-
- st = rsxx_eeh_save_issued_dmas(card);
- if (st)
- return st;
-
- rsxx_eeh_save_issued_creg(card);
-
- for (i = 0; i < card->n_targets; i++) {
- if (card->ctrl[i].status.buf)
- dma_free_coherent(&card->dev->dev,
- STATUS_BUFFER_SIZE8,
- card->ctrl[i].status.buf,
- card->ctrl[i].status.dma_addr);
- if (card->ctrl[i].cmd.buf)
- dma_free_coherent(&card->dev->dev,
- COMMAND_BUFFER_SIZE8,
- card->ctrl[i].cmd.buf,
- card->ctrl[i].cmd.dma_addr);
- }
-
- return 0;
-}
-
-static void rsxx_eeh_failure(struct pci_dev *dev)
-{
- struct rsxx_cardinfo *card = pci_get_drvdata(dev);
- int i;
- int cnt = 0;
-
- dev_err(&dev->dev, "IBM Flash Adapter PCI: disabling failed card.\n");
-
- card->eeh_state = 1;
- card->halt = 1;
-
- for (i = 0; i < card->n_targets; i++) {
- spin_lock_bh(&card->ctrl[i].queue_lock);
- cnt = rsxx_cleanup_dma_queue(&card->ctrl[i],
- &card->ctrl[i].queue,
- COMPLETE_DMA);
- spin_unlock_bh(&card->ctrl[i].queue_lock);
-
- cnt += rsxx_dma_cancel(&card->ctrl[i]);
-
- if (cnt)
- dev_info(CARD_TO_DEV(card),
- "Freed %d queued DMAs on channel %d\n",
- cnt, card->ctrl[i].id);
- }
-}
-
-static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card)
-{
- unsigned int status;
- int iter = 0;
-
- /* We need to wait for the hardware to reset */
- while (iter++ < 10) {
- status = ioread32(card->regmap + PCI_RECONFIG);
-
- if (status & RSXX_FLUSH_BUSY) {
- ssleep(1);
- continue;
- }
-
- if (status & RSXX_FLUSH_TIMEOUT)
- dev_warn(CARD_TO_DEV(card), "HW: flash controller timeout\n");
- return 0;
- }
-
- /* Hardware failed resetting itself. */
- return -1;
-}
-
-static pci_ers_result_t rsxx_error_detected(struct pci_dev *dev,
- pci_channel_state_t error)
-{
- int st;
-
- if (dev->revision < RSXX_EEH_SUPPORT)
- return PCI_ERS_RESULT_NONE;
-
- if (error == pci_channel_io_perm_failure) {
- rsxx_eeh_failure(dev);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- st = rsxx_eeh_frozen(dev);
- if (st) {
- dev_err(&dev->dev, "Slot reset setup failed\n");
- rsxx_eeh_failure(dev);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
-static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
-{
- struct rsxx_cardinfo *card = pci_get_drvdata(dev);
- unsigned long flags;
- int i;
- int st;
-
- dev_warn(&dev->dev,
- "IBM Flash Adapter PCI: recovering from slot reset.\n");
-
- st = pci_enable_device(dev);
- if (st)
- goto failed_hw_setup;
-
- pci_set_master(dev);
-
- st = rsxx_eeh_fifo_flush_poll(card);
- if (st)
- goto failed_hw_setup;
-
- rsxx_dma_queue_reset(card);
-
- for (i = 0; i < card->n_targets; i++) {
- st = rsxx_hw_buffers_init(dev, &card->ctrl[i]);
- if (st)
- goto failed_hw_buffers_init;
- }
-
- if (card->config_valid)
- rsxx_dma_configure(card);
-
- /* Clears the ISR register from spurious interrupts */
- st = ioread32(card->regmap + ISR);
-
- card->eeh_state = 0;
-
- spin_lock_irqsave(&card->irq_lock, flags);
- if (card->n_targets & RSXX_MAX_TARGETS)
- rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G);
- else
- rsxx_enable_ier_and_isr(card, CR_INTR_ALL_C);
- spin_unlock_irqrestore(&card->irq_lock, flags);
-
- rsxx_kick_creg_queue(card);
-
- for (i = 0; i < card->n_targets; i++) {
- spin_lock(&card->ctrl[i].queue_lock);
- if (list_empty(&card->ctrl[i].queue)) {
- spin_unlock(&card->ctrl[i].queue_lock);
- continue;
- }
- spin_unlock(&card->ctrl[i].queue_lock);
-
- queue_work(card->ctrl[i].issue_wq,
- &card->ctrl[i].issue_dma_work);
- }
-
- dev_info(&dev->dev, "IBM Flash Adapter PCI: recovery complete.\n");
-
- return PCI_ERS_RESULT_RECOVERED;
-
-failed_hw_buffers_init:
- for (i = 0; i < card->n_targets; i++) {
- if (card->ctrl[i].status.buf)
- dma_free_coherent(&card->dev->dev,
- STATUS_BUFFER_SIZE8,
- card->ctrl[i].status.buf,
- card->ctrl[i].status.dma_addr);
- if (card->ctrl[i].cmd.buf)
- dma_free_coherent(&card->dev->dev,
- COMMAND_BUFFER_SIZE8,
- card->ctrl[i].cmd.buf,
- card->ctrl[i].cmd.dma_addr);
- }
-failed_hw_setup:
- rsxx_eeh_failure(dev);
- return PCI_ERS_RESULT_DISCONNECT;
-
-}
-
-/*----------------- Driver Initialization & Setup -------------------*/
-/* Returns: 0 if the driver is compatible with the device
- -1 if the driver is NOT compatible with the device */
-static int rsxx_compatibility_check(struct rsxx_cardinfo *card)
-{
- unsigned char pci_rev;
-
- pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
-
- if (pci_rev > RS70_PCI_REV_SUPPORTED)
- return -1;
- return 0;
-}
-
-static int rsxx_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- struct rsxx_cardinfo *card;
- int st;
- unsigned int sync_timeout;
-
- dev_info(&dev->dev, "PCI-Flash SSD discovered\n");
-
- card = kzalloc(sizeof(*card), GFP_KERNEL);
- if (!card)
- return -ENOMEM;
-
- card->dev = dev;
- pci_set_drvdata(dev, card);
-
- st = ida_alloc(&rsxx_disk_ida, GFP_KERNEL);
- if (st < 0)
- goto failed_ida_get;
- card->disk_id = st;
-
- st = pci_enable_device(dev);
- if (st)
- goto failed_enable;
-
- pci_set_master(dev);
-
- st = dma_set_mask(&dev->dev, DMA_BIT_MASK(64));
- if (st) {
- dev_err(CARD_TO_DEV(card),
- "No usable DMA configuration,aborting\n");
- goto failed_dma_mask;
- }
-
- st = pci_request_regions(dev, DRIVER_NAME);
- if (st) {
- dev_err(CARD_TO_DEV(card),
- "Failed to request memory region\n");
- goto failed_request_regions;
- }
-
- if (pci_resource_len(dev, 0) == 0) {
- dev_err(CARD_TO_DEV(card), "BAR0 has length 0!\n");
- st = -ENOMEM;
- goto failed_iomap;
- }
-
- card->regmap = pci_iomap(dev, 0, 0);
- if (!card->regmap) {
- dev_err(CARD_TO_DEV(card), "Failed to map BAR0\n");
- st = -ENOMEM;
- goto failed_iomap;
- }
-
- spin_lock_init(&card->irq_lock);
- card->halt = 0;
- card->eeh_state = 0;
-
- spin_lock_irq(&card->irq_lock);
- rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
- spin_unlock_irq(&card->irq_lock);
-
- if (!force_legacy) {
- st = pci_enable_msi(dev);
- if (st)
- dev_warn(CARD_TO_DEV(card),
- "Failed to enable MSI\n");
- }
-
- st = request_irq(dev->irq, rsxx_isr, IRQF_SHARED,
- DRIVER_NAME, card);
- if (st) {
- dev_err(CARD_TO_DEV(card),
- "Failed requesting IRQ%d\n", dev->irq);
- goto failed_irq;
- }
-
- /************* Setup Processor Command Interface *************/
- st = rsxx_creg_setup(card);
- if (st) {
- dev_err(CARD_TO_DEV(card), "Failed to setup creg interface.\n");
- goto failed_creg_setup;
- }
-
- spin_lock_irq(&card->irq_lock);
- rsxx_enable_ier_and_isr(card, CR_INTR_CREG);
- spin_unlock_irq(&card->irq_lock);
-
- st = rsxx_compatibility_check(card);
- if (st) {
- dev_warn(CARD_TO_DEV(card),
- "Incompatible driver detected. Please update the driver.\n");
- st = -EINVAL;
- goto failed_compatiblity_check;
- }
-
- /************* Load Card Config *************/
- st = rsxx_load_config(card);
- if (st)
- dev_err(CARD_TO_DEV(card),
- "Failed loading card config\n");
-
- /************* Setup DMA Engine *************/
- st = rsxx_get_num_targets(card, &card->n_targets);
- if (st)
- dev_info(CARD_TO_DEV(card),
- "Failed reading the number of DMA targets\n");
-
- card->ctrl = kcalloc(card->n_targets, sizeof(*card->ctrl),
- GFP_KERNEL);
- if (!card->ctrl) {
- st = -ENOMEM;
- goto failed_dma_setup;
- }
-
- st = rsxx_dma_setup(card);
- if (st) {
- dev_info(CARD_TO_DEV(card),
- "Failed to setup DMA engine\n");
- goto failed_dma_setup;
- }
-
- /************* Setup Card Event Handler *************/
- card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event");
- if (!card->event_wq) {
- dev_err(CARD_TO_DEV(card), "Failed card event setup.\n");
- st = -ENOMEM;
- goto failed_event_handler;
- }
-
- INIT_WORK(&card->event_work, card_event_handler);
-
- st = rsxx_setup_dev(card);
- if (st)
- goto failed_create_dev;
-
- rsxx_get_card_state(card, &card->state);
-
- dev_info(CARD_TO_DEV(card),
- "card state: %s\n",
- rsxx_card_state_to_str(card->state));
-
- /*
- * Now that the DMA Engine and devices have been setup,
- * we can enable the event interrupt(it kicks off actions in
- * those layers so we couldn't enable it right away.)
- */
- spin_lock_irq(&card->irq_lock);
- rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
- spin_unlock_irq(&card->irq_lock);
-
- if (card->state == CARD_STATE_SHUTDOWN) {
- st = rsxx_issue_card_cmd(card, CARD_CMD_STARTUP);
- if (st)
- dev_crit(CARD_TO_DEV(card),
- "Failed issuing card startup\n");
- if (sync_start) {
- sync_timeout = SYNC_START_TIMEOUT;
-
- dev_info(CARD_TO_DEV(card),
- "Waiting for card to startup\n");
-
- do {
- ssleep(1);
- sync_timeout--;
-
- rsxx_get_card_state(card, &card->state);
- } while (sync_timeout &&
- (card->state == CARD_STATE_STARTING));
-
- if (card->state == CARD_STATE_STARTING) {
- dev_warn(CARD_TO_DEV(card),
- "Card startup timed out\n");
- card->size8 = 0;
- } else {
- dev_info(CARD_TO_DEV(card),
- "card state: %s\n",
- rsxx_card_state_to_str(card->state));
- st = rsxx_get_card_size8(card, &card->size8);
- if (st)
- card->size8 = 0;
- }
- }
- } else if (card->state == CARD_STATE_GOOD ||
- card->state == CARD_STATE_RD_ONLY_FAULT) {
- st = rsxx_get_card_size8(card, &card->size8);
- if (st)
- card->size8 = 0;
- }
-
- st = rsxx_attach_dev(card);
- if (st)
- goto failed_create_dev;
-
- /************* Setup Debugfs *************/
- rsxx_debugfs_dev_new(card);
-
- return 0;
-
-failed_create_dev:
- destroy_workqueue(card->event_wq);
- card->event_wq = NULL;
-failed_event_handler:
- rsxx_dma_destroy(card);
-failed_dma_setup:
-failed_compatiblity_check:
- destroy_workqueue(card->creg_ctrl.creg_wq);
- card->creg_ctrl.creg_wq = NULL;
-failed_creg_setup:
- spin_lock_irq(&card->irq_lock);
- rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
- spin_unlock_irq(&card->irq_lock);
- free_irq(dev->irq, card);
- if (!force_legacy)
- pci_disable_msi(dev);
-failed_irq:
- pci_iounmap(dev, card->regmap);
-failed_iomap:
- pci_release_regions(dev);
-failed_request_regions:
-failed_dma_mask:
- pci_disable_device(dev);
-failed_enable:
- ida_free(&rsxx_disk_ida, card->disk_id);
-failed_ida_get:
- kfree(card);
-
- return st;
-}
-
-static void rsxx_pci_remove(struct pci_dev *dev)
-{
- struct rsxx_cardinfo *card = pci_get_drvdata(dev);
- unsigned long flags;
- int st;
- int i;
-
- if (!card)
- return;
-
- dev_info(CARD_TO_DEV(card),
- "Removing PCI-Flash SSD.\n");
-
- rsxx_detach_dev(card);
-
- for (i = 0; i < card->n_targets; i++) {
- spin_lock_irqsave(&card->irq_lock, flags);
- rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
- spin_unlock_irqrestore(&card->irq_lock, flags);
- }
-
- st = card_shutdown(card);
- if (st)
- dev_crit(CARD_TO_DEV(card), "Shutdown failed!\n");
-
- /* Sync outstanding event handlers. */
- spin_lock_irqsave(&card->irq_lock, flags);
- rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
- spin_unlock_irqrestore(&card->irq_lock, flags);
-
- cancel_work_sync(&card->event_work);
-
- destroy_workqueue(card->event_wq);
- rsxx_destroy_dev(card);
- rsxx_dma_destroy(card);
- destroy_workqueue(card->creg_ctrl.creg_wq);
-
- spin_lock_irqsave(&card->irq_lock, flags);
- rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
- spin_unlock_irqrestore(&card->irq_lock, flags);
-
- /* Prevent work_structs from re-queuing themselves. */
- card->halt = 1;
-
- debugfs_remove_recursive(card->debugfs_dir);
-
- free_irq(dev->irq, card);
-
- if (!force_legacy)
- pci_disable_msi(dev);
-
- rsxx_creg_destroy(card);
-
- pci_iounmap(dev, card->regmap);
-
- pci_disable_device(dev);
- pci_release_regions(dev);
-
- ida_free(&rsxx_disk_ida, card->disk_id);
- kfree(card);
-}
-
-static int rsxx_pci_suspend(struct pci_dev *dev, pm_message_t state)
-{
- /* We don't support suspend at this time. */
- return -ENOSYS;
-}
-
-static void rsxx_pci_shutdown(struct pci_dev *dev)
-{
- struct rsxx_cardinfo *card = pci_get_drvdata(dev);
- unsigned long flags;
- int i;
-
- if (!card)
- return;
-
- dev_info(CARD_TO_DEV(card), "Shutting down PCI-Flash SSD.\n");
-
- rsxx_detach_dev(card);
-
- for (i = 0; i < card->n_targets; i++) {
- spin_lock_irqsave(&card->irq_lock, flags);
- rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
- spin_unlock_irqrestore(&card->irq_lock, flags);
- }
-
- card_shutdown(card);
-}
-
-static const struct pci_error_handlers rsxx_err_handler = {
- .error_detected = rsxx_error_detected,
- .slot_reset = rsxx_slot_reset,
-};
-
-static const struct pci_device_id rsxx_pci_ids[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS70_FLASH)},
- {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS80_FLASH)},
- {0,},
-};
-
-MODULE_DEVICE_TABLE(pci, rsxx_pci_ids);
-
-static struct pci_driver rsxx_pci_driver = {
- .name = DRIVER_NAME,
- .id_table = rsxx_pci_ids,
- .probe = rsxx_pci_probe,
- .remove = rsxx_pci_remove,
- .suspend = rsxx_pci_suspend,
- .shutdown = rsxx_pci_shutdown,
- .err_handler = &rsxx_err_handler,
-};
-
-static int __init rsxx_core_init(void)
-{
- int st;
-
- st = rsxx_dev_init();
- if (st)
- return st;
-
- st = rsxx_dma_init();
- if (st)
- goto dma_init_failed;
-
- st = rsxx_creg_init();
- if (st)
- goto creg_init_failed;
-
- return pci_register_driver(&rsxx_pci_driver);
-
-creg_init_failed:
- rsxx_dma_cleanup();
-dma_init_failed:
- rsxx_dev_cleanup();
-
- return st;
-}
-
-static void __exit rsxx_core_cleanup(void)
-{
- pci_unregister_driver(&rsxx_pci_driver);
- rsxx_creg_cleanup();
- rsxx_dma_cleanup();
- rsxx_dev_cleanup();
-}
-
-module_init(rsxx_core_init);
-module_exit(rsxx_core_cleanup);
diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c
deleted file mode 100644
index 60ecd3f7cbd2..000000000000
--- a/drivers/block/rsxx/cregs.c
+++ /dev/null
@@ -1,789 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
-* Filename: cregs.c
-*
-* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
-* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
-*
-* (C) Copyright 2013 IBM Corporation
-*/
-
-#include <linux/completion.h>
-#include <linux/slab.h>
-
-#include "rsxx_priv.h"
-
-#define CREG_TIMEOUT_MSEC 10000
-
-typedef void (*creg_cmd_cb)(struct rsxx_cardinfo *card,
- struct creg_cmd *cmd,
- int st);
-
-struct creg_cmd {
- struct list_head list;
- creg_cmd_cb cb;
- void *cb_private;
- unsigned int op;
- unsigned int addr;
- int cnt8;
- void *buf;
- unsigned int stream;
- unsigned int status;
-};
-
-static struct kmem_cache *creg_cmd_pool;
-
-
-/*------------ Private Functions --------------*/
-
-#if defined(__LITTLE_ENDIAN)
-#define LITTLE_ENDIAN 1
-#elif defined(__BIG_ENDIAN)
-#define LITTLE_ENDIAN 0
-#else
-#error Unknown endianess!!! Aborting...
-#endif
-
-static int copy_to_creg_data(struct rsxx_cardinfo *card,
- int cnt8,
- void *buf,
- unsigned int stream)
-{
- int i = 0;
- u32 *data = buf;
-
- if (unlikely(card->eeh_state))
- return -EIO;
-
- for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
- /*
- * Firmware implementation makes it necessary to byte swap on
- * little endian processors.
- */
- if (LITTLE_ENDIAN && stream)
- iowrite32be(data[i], card->regmap + CREG_DATA(i));
- else
- iowrite32(data[i], card->regmap + CREG_DATA(i));
- }
-
- return 0;
-}
-
-
-static int copy_from_creg_data(struct rsxx_cardinfo *card,
- int cnt8,
- void *buf,
- unsigned int stream)
-{
- int i = 0;
- u32 *data = buf;
-
- if (unlikely(card->eeh_state))
- return -EIO;
-
- for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
- /*
- * Firmware implementation makes it necessary to byte swap on
- * little endian processors.
- */
- if (LITTLE_ENDIAN && stream)
- data[i] = ioread32be(card->regmap + CREG_DATA(i));
- else
- data[i] = ioread32(card->regmap + CREG_DATA(i));
- }
-
- return 0;
-}
-
-static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
-{
- int st;
-
- if (unlikely(card->eeh_state))
- return;
-
- iowrite32(cmd->addr, card->regmap + CREG_ADD);
- iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
-
- if (cmd->op == CREG_OP_WRITE) {
- if (cmd->buf) {
- st = copy_to_creg_data(card, cmd->cnt8,
- cmd->buf, cmd->stream);
- if (st)
- return;
- }
- }
-
- if (unlikely(card->eeh_state))
- return;
-
- /* Setting the valid bit will kick off the command. */
- iowrite32(cmd->op, card->regmap + CREG_CMD);
-}
-
-static void creg_kick_queue(struct rsxx_cardinfo *card)
-{
- if (card->creg_ctrl.active || list_empty(&card->creg_ctrl.queue))
- return;
-
- card->creg_ctrl.active = 1;
- card->creg_ctrl.active_cmd = list_first_entry(&card->creg_ctrl.queue,
- struct creg_cmd, list);
- list_del(&card->creg_ctrl.active_cmd->list);
- card->creg_ctrl.q_depth--;
-
- /*
- * We have to set the timer before we push the new command. Otherwise,
- * we could create a race condition that would occur if the timer
- * was not canceled, and expired after the new command was pushed,
- * but before the command was issued to hardware.
- */
- mod_timer(&card->creg_ctrl.cmd_timer,
- jiffies + msecs_to_jiffies(CREG_TIMEOUT_MSEC));
-
- creg_issue_cmd(card, card->creg_ctrl.active_cmd);
-}
-
-static int creg_queue_cmd(struct rsxx_cardinfo *card,
- unsigned int op,
- unsigned int addr,
- unsigned int cnt8,
- void *buf,
- int stream,
- creg_cmd_cb callback,
- void *cb_private)
-{
- struct creg_cmd *cmd;
-
- /* Don't queue stuff up if we're halted. */
- if (unlikely(card->halt))
- return -EINVAL;
-
- if (card->creg_ctrl.reset)
- return -EAGAIN;
-
- if (cnt8 > MAX_CREG_DATA8)
- return -EINVAL;
-
- cmd = kmem_cache_alloc(creg_cmd_pool, GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&cmd->list);
-
- cmd->op = op;
- cmd->addr = addr;
- cmd->cnt8 = cnt8;
- cmd->buf = buf;
- cmd->stream = stream;
- cmd->cb = callback;
- cmd->cb_private = cb_private;
- cmd->status = 0;
-
- spin_lock_bh(&card->creg_ctrl.lock);
- list_add_tail(&cmd->list, &card->creg_ctrl.queue);
- card->creg_ctrl.q_depth++;
- creg_kick_queue(card);
- spin_unlock_bh(&card->creg_ctrl.lock);
-
- return 0;
-}
-
-static void creg_cmd_timed_out(struct timer_list *t)
-{
- struct rsxx_cardinfo *card = from_timer(card, t, creg_ctrl.cmd_timer);
- struct creg_cmd *cmd;
-
- spin_lock(&card->creg_ctrl.lock);
- cmd = card->creg_ctrl.active_cmd;
- card->creg_ctrl.active_cmd = NULL;
- spin_unlock(&card->creg_ctrl.lock);
-
- if (cmd == NULL) {
- card->creg_ctrl.creg_stats.creg_timeout++;
- dev_warn(CARD_TO_DEV(card),
- "No active command associated with timeout!\n");
- return;
- }
-
- if (cmd->cb)
- cmd->cb(card, cmd, -ETIMEDOUT);
-
- kmem_cache_free(creg_cmd_pool, cmd);
-
-
- spin_lock(&card->creg_ctrl.lock);
- card->creg_ctrl.active = 0;
- creg_kick_queue(card);
- spin_unlock(&card->creg_ctrl.lock);
-}
-
-
-static void creg_cmd_done(struct work_struct *work)
-{
- struct rsxx_cardinfo *card;
- struct creg_cmd *cmd;
- int st = 0;
-
- card = container_of(work, struct rsxx_cardinfo,
- creg_ctrl.done_work);
-
- /*
- * The timer could not be cancelled for some reason,
- * race to pop the active command.
- */
- if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
- card->creg_ctrl.creg_stats.failed_cancel_timer++;
-
- spin_lock_bh(&card->creg_ctrl.lock);
- cmd = card->creg_ctrl.active_cmd;
- card->creg_ctrl.active_cmd = NULL;
- spin_unlock_bh(&card->creg_ctrl.lock);
-
- if (cmd == NULL) {
- dev_err(CARD_TO_DEV(card),
- "Spurious creg interrupt!\n");
- return;
- }
-
- card->creg_ctrl.creg_stats.stat = ioread32(card->regmap + CREG_STAT);
- cmd->status = card->creg_ctrl.creg_stats.stat;
- if ((cmd->status & CREG_STAT_STATUS_MASK) == 0) {
- dev_err(CARD_TO_DEV(card),
- "Invalid status on creg command\n");
- /*
- * At this point we're probably reading garbage from HW. Don't
- * do anything else that could mess up the system and let
- * the sync function return an error.
- */
- st = -EIO;
- goto creg_done;
- } else if (cmd->status & CREG_STAT_ERROR) {
- st = -EIO;
- }
-
- if (cmd->op == CREG_OP_READ) {
- unsigned int cnt8 = ioread32(card->regmap + CREG_CNT);
-
- /* Paranoid Sanity Checks */
- if (!cmd->buf) {
- dev_err(CARD_TO_DEV(card),
- "Buffer not given for read.\n");
- st = -EIO;
- goto creg_done;
- }
- if (cnt8 != cmd->cnt8) {
- dev_err(CARD_TO_DEV(card),
- "count mismatch\n");
- st = -EIO;
- goto creg_done;
- }
-
- st = copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
- }
-
-creg_done:
- if (cmd->cb)
- cmd->cb(card, cmd, st);
-
- kmem_cache_free(creg_cmd_pool, cmd);
-
- spin_lock_bh(&card->creg_ctrl.lock);
- card->creg_ctrl.active = 0;
- creg_kick_queue(card);
- spin_unlock_bh(&card->creg_ctrl.lock);
-}
-
-static void creg_reset(struct rsxx_cardinfo *card)
-{
- struct creg_cmd *cmd = NULL;
- struct creg_cmd *tmp;
- unsigned long flags;
-
- /*
- * mutex_trylock is used here because if reset_lock is taken then a
- * reset is already happening. So, we can just go ahead and return.
- */
- if (!mutex_trylock(&card->creg_ctrl.reset_lock))
- return;
-
- card->creg_ctrl.reset = 1;
- spin_lock_irqsave(&card->irq_lock, flags);
- rsxx_disable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
- spin_unlock_irqrestore(&card->irq_lock, flags);
-
- dev_warn(CARD_TO_DEV(card),
- "Resetting creg interface for recovery\n");
-
- /* Cancel outstanding commands */
- spin_lock_bh(&card->creg_ctrl.lock);
- list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
- list_del(&cmd->list);
- card->creg_ctrl.q_depth--;
- if (cmd->cb)
- cmd->cb(card, cmd, -ECANCELED);
- kmem_cache_free(creg_cmd_pool, cmd);
- }
-
- cmd = card->creg_ctrl.active_cmd;
- card->creg_ctrl.active_cmd = NULL;
- if (cmd) {
- if (timer_pending(&card->creg_ctrl.cmd_timer))
- del_timer_sync(&card->creg_ctrl.cmd_timer);
-
- if (cmd->cb)
- cmd->cb(card, cmd, -ECANCELED);
- kmem_cache_free(creg_cmd_pool, cmd);
-
- card->creg_ctrl.active = 0;
- }
- spin_unlock_bh(&card->creg_ctrl.lock);
-
- card->creg_ctrl.reset = 0;
- spin_lock_irqsave(&card->irq_lock, flags);
- rsxx_enable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
- spin_unlock_irqrestore(&card->irq_lock, flags);
-
- mutex_unlock(&card->creg_ctrl.reset_lock);
-}
-
-/* Used for synchronous accesses */
-struct creg_completion {
- struct completion *cmd_done;
- int st;
- u32 creg_status;
-};
-
-static void creg_cmd_done_cb(struct rsxx_cardinfo *card,
- struct creg_cmd *cmd,
- int st)
-{
- struct creg_completion *cmd_completion;
-
- cmd_completion = cmd->cb_private;
- BUG_ON(!cmd_completion);
-
- cmd_completion->st = st;
- cmd_completion->creg_status = cmd->status;
- complete(cmd_completion->cmd_done);
-}
-
-static int __issue_creg_rw(struct rsxx_cardinfo *card,
- unsigned int op,
- unsigned int addr,
- unsigned int cnt8,
- void *buf,
- int stream,
- unsigned int *hw_stat)
-{
- DECLARE_COMPLETION_ONSTACK(cmd_done);
- struct creg_completion completion;
- unsigned long timeout;
- int st;
-
- completion.cmd_done = &cmd_done;
- completion.st = 0;
- completion.creg_status = 0;
-
- st = creg_queue_cmd(card, op, addr, cnt8, buf, stream, creg_cmd_done_cb,
- &completion);
- if (st)
- return st;
-
- /*
- * This timeout is necessary for unresponsive hardware. The additional
- * 20 seconds to used to guarantee that each cregs requests has time to
- * complete.
- */
- timeout = msecs_to_jiffies(CREG_TIMEOUT_MSEC *
- card->creg_ctrl.q_depth + 20000);
-
- /*
- * The creg interface is guaranteed to complete. It has a timeout
- * mechanism that will kick in if hardware does not respond.
- */
- st = wait_for_completion_timeout(completion.cmd_done, timeout);
- if (st == 0) {
- /*
- * This is really bad, because the kernel timer did not
- * expire and notify us of a timeout!
- */
- dev_crit(CARD_TO_DEV(card),
- "cregs timer failed\n");
- creg_reset(card);
- return -EIO;
- }
-
- *hw_stat = completion.creg_status;
-
- if (completion.st) {
- /*
- * This read is needed to verify that there has not been any
- * extreme errors that might have occurred, i.e. EEH. The
- * function iowrite32 will not detect EEH errors, so it is
- * necessary that we recover if such an error is the reason
- * for the timeout. This is a dummy read.
- */
- ioread32(card->regmap + SCRATCH);
-
- dev_warn(CARD_TO_DEV(card),
- "creg command failed(%d x%08x)\n",
- completion.st, addr);
- return completion.st;
- }
-
- return 0;
-}
-
-static int issue_creg_rw(struct rsxx_cardinfo *card,
- u32 addr,
- unsigned int size8,
- void *data,
- int stream,
- int read)
-{
- unsigned int hw_stat;
- unsigned int xfer;
- unsigned int op;
- int st;
-
- op = read ? CREG_OP_READ : CREG_OP_WRITE;
-
- do {
- xfer = min_t(unsigned int, size8, MAX_CREG_DATA8);
-
- st = __issue_creg_rw(card, op, addr, xfer,
- data, stream, &hw_stat);
- if (st)
- return st;
-
- data = (char *)data + xfer;
- addr += xfer;
- size8 -= xfer;
- } while (size8);
-
- return 0;
-}
-
-/* ---------------------------- Public API ---------------------------------- */
-int rsxx_creg_write(struct rsxx_cardinfo *card,
- u32 addr,
- unsigned int size8,
- void *data,
- int byte_stream)
-{
- return issue_creg_rw(card, addr, size8, data, byte_stream, 0);
-}
-
-int rsxx_creg_read(struct rsxx_cardinfo *card,
- u32 addr,
- unsigned int size8,
- void *data,
- int byte_stream)
-{
- return issue_creg_rw(card, addr, size8, data, byte_stream, 1);
-}
-
-int rsxx_get_card_state(struct rsxx_cardinfo *card, unsigned int *state)
-{
- return rsxx_creg_read(card, CREG_ADD_CARD_STATE,
- sizeof(*state), state, 0);
-}
-
-int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8)
-{
- unsigned int size;
- int st;
-
- st = rsxx_creg_read(card, CREG_ADD_CARD_SIZE,
- sizeof(size), &size, 0);
- if (st)
- return st;
-
- *size8 = (u64)size * RSXX_HW_BLK_SIZE;
- return 0;
-}
-
-int rsxx_get_num_targets(struct rsxx_cardinfo *card,
- unsigned int *n_targets)
-{
- return rsxx_creg_read(card, CREG_ADD_NUM_TARGETS,
- sizeof(*n_targets), n_targets, 0);
-}
-
-int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
- u32 *capabilities)
-{
- return rsxx_creg_read(card, CREG_ADD_CAPABILITIES,
- sizeof(*capabilities), capabilities, 0);
-}
-
-int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd)
-{
- return rsxx_creg_write(card, CREG_ADD_CARD_CMD,
- sizeof(cmd), &cmd, 0);
-}
-
-
-/*----------------- HW Log Functions -------------------*/
-static void hw_log_msg(struct rsxx_cardinfo *card, const char *str, int len)
-{
- static char level;
-
- /*
- * New messages start with "<#>", where # is the log level. Messages
- * that extend past the log buffer will use the previous level
- */
- if ((len > 3) && (str[0] == '<') && (str[2] == '>')) {
- level = str[1];
- str += 3; /* Skip past the log level. */
- len -= 3;
- }
-
- switch (level) {
- case '0':
- dev_emerg(CARD_TO_DEV(card), "HW: %.*s", len, str);
- break;
- case '1':
- dev_alert(CARD_TO_DEV(card), "HW: %.*s", len, str);
- break;
- case '2':
- dev_crit(CARD_TO_DEV(card), "HW: %.*s", len, str);
- break;
- case '3':
- dev_err(CARD_TO_DEV(card), "HW: %.*s", len, str);
- break;
- case '4':
- dev_warn(CARD_TO_DEV(card), "HW: %.*s", len, str);
- break;
- case '5':
- dev_notice(CARD_TO_DEV(card), "HW: %.*s", len, str);
- break;
- case '6':
- dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
- break;
- case '7':
- dev_dbg(CARD_TO_DEV(card), "HW: %.*s", len, str);
- break;
- default:
- dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
- break;
- }
-}
-
-/*
- * The substrncpy function copies the src string (which includes the
- * terminating '\0' character), up to the count into the dest pointer.
- * Returns the number of bytes copied to dest.
- */
-static int substrncpy(char *dest, const char *src, int count)
-{
- int max_cnt = count;
-
- while (count) {
- count--;
- *dest = *src;
- if (*dest == '\0')
- break;
- src++;
- dest++;
- }
- return max_cnt - count;
-}
-
-
-static void read_hw_log_done(struct rsxx_cardinfo *card,
- struct creg_cmd *cmd,
- int st)
-{
- char *buf;
- char *log_str;
- int cnt;
- int len;
- int off;
-
- buf = cmd->buf;
- off = 0;
-
- /* Failed getting the log message */
- if (st)
- return;
-
- while (off < cmd->cnt8) {
- log_str = &card->log.buf[card->log.buf_len];
- cnt = min(cmd->cnt8 - off, LOG_BUF_SIZE8 - card->log.buf_len);
- len = substrncpy(log_str, &buf[off], cnt);
-
- off += len;
- card->log.buf_len += len;
-
- /*
- * Flush the log if we've hit the end of a message or if we've
- * run out of buffer space.
- */
- if ((log_str[len - 1] == '\0') ||
- (card->log.buf_len == LOG_BUF_SIZE8)) {
- if (card->log.buf_len != 1) /* Don't log blank lines. */
- hw_log_msg(card, card->log.buf,
- card->log.buf_len);
- card->log.buf_len = 0;
- }
-
- }
-
- if (cmd->status & CREG_STAT_LOG_PENDING)
- rsxx_read_hw_log(card);
-}
-
-int rsxx_read_hw_log(struct rsxx_cardinfo *card)
-{
- int st;
-
- st = creg_queue_cmd(card, CREG_OP_READ, CREG_ADD_LOG,
- sizeof(card->log.tmp), card->log.tmp,
- 1, read_hw_log_done, NULL);
- if (st)
- dev_err(CARD_TO_DEV(card),
- "Failed getting log text\n");
-
- return st;
-}
-
-/*-------------- IOCTL REG Access ------------------*/
-static int issue_reg_cmd(struct rsxx_cardinfo *card,
- struct rsxx_reg_access *cmd,
- int read)
-{
- unsigned int op = read ? CREG_OP_READ : CREG_OP_WRITE;
-
- return __issue_creg_rw(card, op, cmd->addr, cmd->cnt, cmd->data,
- cmd->stream, &cmd->stat);
-}
-
-int rsxx_reg_access(struct rsxx_cardinfo *card,
- struct rsxx_reg_access __user *ucmd,
- int read)
-{
- struct rsxx_reg_access cmd;
- int st;
-
- st = copy_from_user(&cmd, ucmd, sizeof(cmd));
- if (st)
- return -EFAULT;
-
- if (cmd.cnt > RSXX_MAX_REG_CNT)
- return -EFAULT;
-
- st = issue_reg_cmd(card, &cmd, read);
- if (st)
- return st;
-
- st = put_user(cmd.stat, &ucmd->stat);
- if (st)
- return -EFAULT;
-
- if (read) {
- st = copy_to_user(ucmd->data, cmd.data, cmd.cnt);
- if (st)
- return -EFAULT;
- }
-
- return 0;
-}
-
-void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card)
-{
- struct creg_cmd *cmd = NULL;
-
- cmd = card->creg_ctrl.active_cmd;
- card->creg_ctrl.active_cmd = NULL;
-
- if (cmd) {
- del_timer_sync(&card->creg_ctrl.cmd_timer);
-
- spin_lock_bh(&card->creg_ctrl.lock);
- list_add(&cmd->list, &card->creg_ctrl.queue);
- card->creg_ctrl.q_depth++;
- card->creg_ctrl.active = 0;
- spin_unlock_bh(&card->creg_ctrl.lock);
- }
-}
-
-void rsxx_kick_creg_queue(struct rsxx_cardinfo *card)
-{
- spin_lock_bh(&card->creg_ctrl.lock);
- if (!list_empty(&card->creg_ctrl.queue))
- creg_kick_queue(card);
- spin_unlock_bh(&card->creg_ctrl.lock);
-}
-
-/*------------ Initialization & Setup --------------*/
-int rsxx_creg_setup(struct rsxx_cardinfo *card)
-{
- card->creg_ctrl.active_cmd = NULL;
-
- card->creg_ctrl.creg_wq =
- create_singlethread_workqueue(DRIVER_NAME"_creg");
- if (!card->creg_ctrl.creg_wq)
- return -ENOMEM;
-
- INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done);
- mutex_init(&card->creg_ctrl.reset_lock);
- INIT_LIST_HEAD(&card->creg_ctrl.queue);
- spin_lock_init(&card->creg_ctrl.lock);
- timer_setup(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out, 0);
-
- return 0;
-}
-
-void rsxx_creg_destroy(struct rsxx_cardinfo *card)
-{
- struct creg_cmd *cmd;
- struct creg_cmd *tmp;
- int cnt = 0;
-
- /* Cancel outstanding commands */
- spin_lock_bh(&card->creg_ctrl.lock);
- list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
- list_del(&cmd->list);
- if (cmd->cb)
- cmd->cb(card, cmd, -ECANCELED);
- kmem_cache_free(creg_cmd_pool, cmd);
- cnt++;
- }
-
- if (cnt)
- dev_info(CARD_TO_DEV(card),
- "Canceled %d queue creg commands\n", cnt);
-
- cmd = card->creg_ctrl.active_cmd;
- card->creg_ctrl.active_cmd = NULL;
- if (cmd) {
- if (timer_pending(&card->creg_ctrl.cmd_timer))
- del_timer_sync(&card->creg_ctrl.cmd_timer);
-
- if (cmd->cb)
- cmd->cb(card, cmd, -ECANCELED);
- dev_info(CARD_TO_DEV(card),
- "Canceled active creg command\n");
- kmem_cache_free(creg_cmd_pool, cmd);
- }
- spin_unlock_bh(&card->creg_ctrl.lock);
-
- cancel_work_sync(&card->creg_ctrl.done_work);
-}
-
-
-int rsxx_creg_init(void)
-{
- creg_cmd_pool = KMEM_CACHE(creg_cmd, SLAB_HWCACHE_ALIGN);
- if (!creg_cmd_pool)
- return -ENOMEM;
-
- return 0;
-}
-
-void rsxx_creg_cleanup(void)
-{
- kmem_cache_destroy(creg_cmd_pool);
-}
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
deleted file mode 100644
index dd33f1bdf3b8..000000000000
--- a/drivers/block/rsxx/dev.c
+++ /dev/null
@@ -1,306 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
-* Filename: dev.c
-*
-* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
-* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
-*
-* (C) Copyright 2013 IBM Corporation
-*/
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-
-#include <linux/hdreg.h>
-#include <linux/genhd.h>
-#include <linux/blkdev.h>
-#include <linux/bio.h>
-
-#include <linux/fs.h>
-
-#include "rsxx_priv.h"
-
-static unsigned int blkdev_minors = 64;
-module_param(blkdev_minors, uint, 0444);
-MODULE_PARM_DESC(blkdev_minors, "Number of minors(partitions)");
-
-/*
- * For now I'm making this tweakable in case any applications hit this limit.
- * If you see a "bio too big" error in the log you will need to raise this
- * value.
- */
-static unsigned int blkdev_max_hw_sectors = 1024;
-module_param(blkdev_max_hw_sectors, uint, 0444);
-MODULE_PARM_DESC(blkdev_max_hw_sectors, "Max hw sectors for a single BIO");
-
-static unsigned int enable_blkdev = 1;
-module_param(enable_blkdev , uint, 0444);
-MODULE_PARM_DESC(enable_blkdev, "Enable block device interfaces");
-
-
-struct rsxx_bio_meta {
- struct bio *bio;
- atomic_t pending_dmas;
- atomic_t error;
- unsigned long start_time;
-};
-
-static struct kmem_cache *bio_meta_pool;
-
-static void rsxx_submit_bio(struct bio *bio);
-
-/*----------------- Block Device Operations -----------------*/
-static int rsxx_blkdev_ioctl(struct block_device *bdev,
- fmode_t mode,
- unsigned int cmd,
- unsigned long arg)
-{
- struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
-
- switch (cmd) {
- case RSXX_GETREG:
- return rsxx_reg_access(card, (void __user *)arg, 1);
- case RSXX_SETREG:
- return rsxx_reg_access(card, (void __user *)arg, 0);
- }
-
- return -ENOTTY;
-}
-
-static int rsxx_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
- u64 blocks = card->size8 >> 9;
-
- /*
- * get geometry: Fake it. I haven't found any drivers that set
- * geo->start, so we won't either.
- */
- if (card->size8) {
- geo->heads = 64;
- geo->sectors = 16;
- do_div(blocks, (geo->heads * geo->sectors));
- geo->cylinders = blocks;
- } else {
- geo->heads = 0;
- geo->sectors = 0;
- geo->cylinders = 0;
- }
- return 0;
-}
-
-static const struct block_device_operations rsxx_fops = {
- .owner = THIS_MODULE,
- .submit_bio = rsxx_submit_bio,
- .getgeo = rsxx_getgeo,
- .ioctl = rsxx_blkdev_ioctl,
-};
-
-static void bio_dma_done_cb(struct rsxx_cardinfo *card,
- void *cb_data,
- unsigned int error)
-{
- struct rsxx_bio_meta *meta = cb_data;
-
- if (error)
- atomic_set(&meta->error, 1);
-
- if (atomic_dec_and_test(&meta->pending_dmas)) {
- if (!card->eeh_state && card->gendisk)
- bio_end_io_acct(meta->bio, meta->start_time);
-
- if (atomic_read(&meta->error))
- bio_io_error(meta->bio);
- else
- bio_endio(meta->bio);
- kmem_cache_free(bio_meta_pool, meta);
- }
-}
-
-static void rsxx_submit_bio(struct bio *bio)
-{
- struct rsxx_cardinfo *card = bio->bi_bdev->bd_disk->private_data;
- struct rsxx_bio_meta *bio_meta;
- blk_status_t st = BLK_STS_IOERR;
-
- blk_queue_split(&bio);
-
- might_sleep();
-
- if (!card)
- goto req_err;
-
- if (bio_end_sector(bio) > get_capacity(card->gendisk))
- goto req_err;
-
- if (unlikely(card->halt))
- goto req_err;
-
- if (unlikely(card->dma_fault))
- goto req_err;
-
- if (bio->bi_iter.bi_size == 0) {
- dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
- goto req_err;
- }
-
- bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL);
- if (!bio_meta) {
- st = BLK_STS_RESOURCE;
- goto req_err;
- }
-
- bio_meta->bio = bio;
- atomic_set(&bio_meta->error, 0);
- atomic_set(&bio_meta->pending_dmas, 0);
-
- if (!unlikely(card->halt))
- bio_meta->start_time = bio_start_io_acct(bio);
-
- dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
- bio_data_dir(bio) ? 'W' : 'R', bio_meta,
- (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
-
- st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
- bio_dma_done_cb, bio_meta);
- if (st)
- goto queue_err;
-
- return;
-
-queue_err:
- kmem_cache_free(bio_meta_pool, bio_meta);
-req_err:
- if (st)
- bio->bi_status = st;
- bio_endio(bio);
-}
-
-/*----------------- Device Setup -------------------*/
-static bool rsxx_discard_supported(struct rsxx_cardinfo *card)
-{
- unsigned char pci_rev;
-
- pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
-
- return (pci_rev >= RSXX_DISCARD_SUPPORT);
-}
-
-int rsxx_attach_dev(struct rsxx_cardinfo *card)
-{
- int err = 0;
-
- mutex_lock(&card->dev_lock);
-
- /* The block device requires the stripe size from the config. */
- if (enable_blkdev) {
- if (card->config_valid)
- set_capacity(card->gendisk, card->size8 >> 9);
- else
- set_capacity(card->gendisk, 0);
- err = device_add_disk(CARD_TO_DEV(card), card->gendisk, NULL);
- if (err == 0)
- card->bdev_attached = 1;
- }
-
- mutex_unlock(&card->dev_lock);
-
- if (err)
- blk_cleanup_disk(card->gendisk);
-
- return err;
-}
-
-void rsxx_detach_dev(struct rsxx_cardinfo *card)
-{
- mutex_lock(&card->dev_lock);
-
- if (card->bdev_attached) {
- del_gendisk(card->gendisk);
- card->bdev_attached = 0;
- }
-
- mutex_unlock(&card->dev_lock);
-}
-
-int rsxx_setup_dev(struct rsxx_cardinfo *card)
-{
- unsigned short blk_size;
-
- mutex_init(&card->dev_lock);
-
- if (!enable_blkdev)
- return 0;
-
- card->major = register_blkdev(0, DRIVER_NAME);
- if (card->major < 0) {
- dev_err(CARD_TO_DEV(card), "Failed to get major number\n");
- return -ENOMEM;
- }
-
- card->gendisk = blk_alloc_disk(blkdev_minors);
- if (!card->gendisk) {
- dev_err(CARD_TO_DEV(card), "Failed disk alloc\n");
- unregister_blkdev(card->major, DRIVER_NAME);
- return -ENOMEM;
- }
-
- if (card->config_valid) {
- blk_size = card->config.data.block_size;
- blk_queue_dma_alignment(card->gendisk->queue, blk_size - 1);
- blk_queue_logical_block_size(card->gendisk->queue, blk_size);
- }
-
- blk_queue_max_hw_sectors(card->gendisk->queue, blkdev_max_hw_sectors);
- blk_queue_physical_block_size(card->gendisk->queue, RSXX_HW_BLK_SIZE);
-
- blk_queue_flag_set(QUEUE_FLAG_NONROT, card->gendisk->queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, card->gendisk->queue);
- if (rsxx_discard_supported(card)) {
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, card->gendisk->queue);
- blk_queue_max_discard_sectors(card->gendisk->queue,
- RSXX_HW_BLK_SIZE >> 9);
- card->gendisk->queue->limits.discard_granularity =
- RSXX_HW_BLK_SIZE;
- card->gendisk->queue->limits.discard_alignment =
- RSXX_HW_BLK_SIZE;
- }
-
- snprintf(card->gendisk->disk_name, sizeof(card->gendisk->disk_name),
- "rsxx%d", card->disk_id);
- card->gendisk->major = card->major;
- card->gendisk->minors = blkdev_minors;
- card->gendisk->fops = &rsxx_fops;
- card->gendisk->private_data = card;
-
- return 0;
-}
-
-void rsxx_destroy_dev(struct rsxx_cardinfo *card)
-{
- if (!enable_blkdev)
- return;
-
- blk_cleanup_disk(card->gendisk);
- card->gendisk = NULL;
- unregister_blkdev(card->major, DRIVER_NAME);
-}
-
-int rsxx_dev_init(void)
-{
- bio_meta_pool = KMEM_CACHE(rsxx_bio_meta, SLAB_HWCACHE_ALIGN);
- if (!bio_meta_pool)
- return -ENOMEM;
-
- return 0;
-}
-
-void rsxx_dev_cleanup(void)
-{
- kmem_cache_destroy(bio_meta_pool);
-}
-
-
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
deleted file mode 100644
index ed182f3dd054..000000000000
--- a/drivers/block/rsxx/dma.c
+++ /dev/null
@@ -1,1085 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
-* Filename: dma.c
-*
-* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
-* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
-*
-* (C) Copyright 2013 IBM Corporation
-*/
-
-#include <linux/slab.h>
-#include "rsxx_priv.h"
-
-struct rsxx_dma {
- struct list_head list;
- u8 cmd;
- unsigned int laddr; /* Logical address */
- struct {
- u32 off;
- u32 cnt;
- } sub_page;
- dma_addr_t dma_addr;
- struct page *page;
- unsigned int pg_off; /* Page Offset */
- rsxx_dma_cb cb;
- void *cb_data;
-};
-
-/* This timeout is used to detect a stalled DMA channel */
-#define DMA_ACTIVITY_TIMEOUT msecs_to_jiffies(10000)
-
-struct hw_status {
- u8 status;
- u8 tag;
- __le16 count;
- __le32 _rsvd2;
- __le64 _rsvd3;
-} __packed;
-
-enum rsxx_dma_status {
- DMA_SW_ERR = 0x1,
- DMA_HW_FAULT = 0x2,
- DMA_CANCELLED = 0x4,
-};
-
-struct hw_cmd {
- u8 command;
- u8 tag;
- u8 _rsvd;
- u8 sub_page; /* Bit[0:2]: 512byte offset */
- /* Bit[4:6]: 512byte count */
- __le32 device_addr;
- __le64 host_addr;
-} __packed;
-
-enum rsxx_hw_cmd {
- HW_CMD_BLK_DISCARD = 0x70,
- HW_CMD_BLK_WRITE = 0x80,
- HW_CMD_BLK_READ = 0xC0,
- HW_CMD_BLK_RECON_READ = 0xE0,
-};
-
-enum rsxx_hw_status {
- HW_STATUS_CRC = 0x01,
- HW_STATUS_HARD_ERR = 0x02,
- HW_STATUS_SOFT_ERR = 0x04,
- HW_STATUS_FAULT = 0x08,
-};
-
-static struct kmem_cache *rsxx_dma_pool;
-
-struct dma_tracker {
- int next_tag;
- struct rsxx_dma *dma;
-};
-
-struct dma_tracker_list {
- spinlock_t lock;
- int head;
- struct dma_tracker list[];
-};
-
-
-/*----------------- Misc Utility Functions -------------------*/
-static unsigned int rsxx_addr8_to_laddr(u64 addr8, struct rsxx_cardinfo *card)
-{
- unsigned long long tgt_addr8;
-
- tgt_addr8 = ((addr8 >> card->_stripe.upper_shift) &
- card->_stripe.upper_mask) |
- ((addr8) & card->_stripe.lower_mask);
- do_div(tgt_addr8, RSXX_HW_BLK_SIZE);
- return tgt_addr8;
-}
-
-static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8)
-{
- unsigned int tgt;
-
- tgt = (addr8 >> card->_stripe.target_shift) & card->_stripe.target_mask;
-
- return tgt;
-}
-
-void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
-{
- /* Reset all DMA Command/Status Queues */
- iowrite32(DMA_QUEUE_RESET, card->regmap + RESET);
-}
-
-static unsigned int get_dma_size(struct rsxx_dma *dma)
-{
- if (dma->sub_page.cnt)
- return dma->sub_page.cnt << 9;
- else
- return RSXX_HW_BLK_SIZE;
-}
-
-
-/*----------------- DMA Tracker -------------------*/
-static void set_tracker_dma(struct dma_tracker_list *trackers,
- int tag,
- struct rsxx_dma *dma)
-{
- trackers->list[tag].dma = dma;
-}
-
-static struct rsxx_dma *get_tracker_dma(struct dma_tracker_list *trackers,
- int tag)
-{
- return trackers->list[tag].dma;
-}
-
-static int pop_tracker(struct dma_tracker_list *trackers)
-{
- int tag;
-
- spin_lock(&trackers->lock);
- tag = trackers->head;
- if (tag != -1) {
- trackers->head = trackers->list[tag].next_tag;
- trackers->list[tag].next_tag = -1;
- }
- spin_unlock(&trackers->lock);
-
- return tag;
-}
-
-static void push_tracker(struct dma_tracker_list *trackers, int tag)
-{
- spin_lock(&trackers->lock);
- trackers->list[tag].next_tag = trackers->head;
- trackers->head = tag;
- trackers->list[tag].dma = NULL;
- spin_unlock(&trackers->lock);
-}
-
-
-/*----------------- Interrupt Coalescing -------------*/
-/*
- * Interrupt Coalescing Register Format:
- * Interrupt Timer (64ns units) [15:0]
- * Interrupt Count [24:16]
- * Reserved [31:25]
-*/
-#define INTR_COAL_LATENCY_MASK (0x0000ffff)
-
-#define INTR_COAL_COUNT_SHIFT 16
-#define INTR_COAL_COUNT_BITS 9
-#define INTR_COAL_COUNT_MASK (((1 << INTR_COAL_COUNT_BITS) - 1) << \
- INTR_COAL_COUNT_SHIFT)
-#define INTR_COAL_LATENCY_UNITS_NS 64
-
-
-static u32 dma_intr_coal_val(u32 mode, u32 count, u32 latency)
-{
- u32 latency_units = latency / INTR_COAL_LATENCY_UNITS_NS;
-
- if (mode == RSXX_INTR_COAL_DISABLED)
- return 0;
-
- return ((count << INTR_COAL_COUNT_SHIFT) & INTR_COAL_COUNT_MASK) |
- (latency_units & INTR_COAL_LATENCY_MASK);
-
-}
-
-static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
-{
- int i;
- u32 q_depth = 0;
- u32 intr_coal;
-
- if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE ||
- unlikely(card->eeh_state))
- return;
-
- for (i = 0; i < card->n_targets; i++)
- q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth);
-
- intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
- q_depth / 2,
- card->config.data.intr_coal.latency);
- iowrite32(intr_coal, card->regmap + INTR_COAL);
-}
-
-/*----------------- RSXX DMA Handling -------------------*/
-static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma)
-{
- if (dma->cmd != HW_CMD_BLK_DISCARD) {
- if (!dma_mapping_error(&ctrl->card->dev->dev, dma->dma_addr)) {
- dma_unmap_page(&ctrl->card->dev->dev, dma->dma_addr,
- get_dma_size(dma),
- dma->cmd == HW_CMD_BLK_WRITE ?
- DMA_TO_DEVICE :
- DMA_FROM_DEVICE);
- }
- }
-
- kmem_cache_free(rsxx_dma_pool, dma);
-}
-
-static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
- struct rsxx_dma *dma,
- unsigned int status)
-{
- if (status & DMA_SW_ERR)
- ctrl->stats.dma_sw_err++;
- if (status & DMA_HW_FAULT)
- ctrl->stats.dma_hw_fault++;
- if (status & DMA_CANCELLED)
- ctrl->stats.dma_cancelled++;
-
- if (dma->cb)
- dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
-
- rsxx_free_dma(ctrl, dma);
-}
-
-int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
- struct list_head *q, unsigned int done)
-{
- struct rsxx_dma *dma;
- struct rsxx_dma *tmp;
- int cnt = 0;
-
- list_for_each_entry_safe(dma, tmp, q, list) {
- list_del(&dma->list);
- if (done & COMPLETE_DMA)
- rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
- else
- rsxx_free_dma(ctrl, dma);
- cnt++;
- }
-
- return cnt;
-}
-
-static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl,
- struct rsxx_dma *dma)
-{
- /*
- * Requeued DMAs go to the front of the queue so they are issued
- * first.
- */
- spin_lock_bh(&ctrl->queue_lock);
- ctrl->stats.sw_q_depth++;
- list_add(&dma->list, &ctrl->queue);
- spin_unlock_bh(&ctrl->queue_lock);
-}
-
-static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
- struct rsxx_dma *dma,
- u8 hw_st)
-{
- unsigned int status = 0;
- int requeue_cmd = 0;
-
- dev_dbg(CARD_TO_DEV(ctrl->card),
- "Handling DMA error(cmd x%02x, laddr x%08x st:x%02x)\n",
- dma->cmd, dma->laddr, hw_st);
-
- if (hw_st & HW_STATUS_CRC)
- ctrl->stats.crc_errors++;
- if (hw_st & HW_STATUS_HARD_ERR)
- ctrl->stats.hard_errors++;
- if (hw_st & HW_STATUS_SOFT_ERR)
- ctrl->stats.soft_errors++;
-
- switch (dma->cmd) {
- case HW_CMD_BLK_READ:
- if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
- if (ctrl->card->scrub_hard) {
- dma->cmd = HW_CMD_BLK_RECON_READ;
- requeue_cmd = 1;
- ctrl->stats.reads_retried++;
- } else {
- status |= DMA_HW_FAULT;
- ctrl->stats.reads_failed++;
- }
- } else if (hw_st & HW_STATUS_FAULT) {
- status |= DMA_HW_FAULT;
- ctrl->stats.reads_failed++;
- }
-
- break;
- case HW_CMD_BLK_RECON_READ:
- if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
- /* Data could not be reconstructed. */
- status |= DMA_HW_FAULT;
- ctrl->stats.reads_failed++;
- }
-
- break;
- case HW_CMD_BLK_WRITE:
- status |= DMA_HW_FAULT;
- ctrl->stats.writes_failed++;
-
- break;
- case HW_CMD_BLK_DISCARD:
- status |= DMA_HW_FAULT;
- ctrl->stats.discards_failed++;
-
- break;
- default:
- dev_err(CARD_TO_DEV(ctrl->card),
- "Unknown command in DMA!(cmd: x%02x "
- "laddr x%08x st: x%02x\n",
- dma->cmd, dma->laddr, hw_st);
- status |= DMA_SW_ERR;
-
- break;
- }
-
- if (requeue_cmd)
- rsxx_requeue_dma(ctrl, dma);
- else
- rsxx_complete_dma(ctrl, dma, status);
-}
-
-static void dma_engine_stalled(struct timer_list *t)
-{
- struct rsxx_dma_ctrl *ctrl = from_timer(ctrl, t, activity_timer);
- int cnt;
-
- if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
- unlikely(ctrl->card->eeh_state))
- return;
-
- if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) {
- /*
- * The dma engine was stalled because the SW_CMD_IDX write
- * was lost. Issue it again to recover.
- */
- dev_warn(CARD_TO_DEV(ctrl->card),
- "SW_CMD_IDX write was lost, re-writing...\n");
- iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
- mod_timer(&ctrl->activity_timer,
- jiffies + DMA_ACTIVITY_TIMEOUT);
- } else {
- dev_warn(CARD_TO_DEV(ctrl->card),
- "DMA channel %d has stalled, faulting interface.\n",
- ctrl->id);
- ctrl->card->dma_fault = 1;
-
- /* Clean up the DMA queue */
- spin_lock(&ctrl->queue_lock);
- cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
- spin_unlock(&ctrl->queue_lock);
-
- cnt += rsxx_dma_cancel(ctrl);
-
- if (cnt)
- dev_info(CARD_TO_DEV(ctrl->card),
- "Freed %d queued DMAs on channel %d\n",
- cnt, ctrl->id);
- }
-}
-
-static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
-{
- struct rsxx_dma *dma;
- int tag;
- int cmds_pending = 0;
- struct hw_cmd *hw_cmd_buf;
- int dir;
-
- hw_cmd_buf = ctrl->cmd.buf;
-
- if (unlikely(ctrl->card->halt) ||
- unlikely(ctrl->card->eeh_state))
- return;
-
- while (1) {
- spin_lock_bh(&ctrl->queue_lock);
- if (list_empty(&ctrl->queue)) {
- spin_unlock_bh(&ctrl->queue_lock);
- break;
- }
- spin_unlock_bh(&ctrl->queue_lock);
-
- tag = pop_tracker(ctrl->trackers);
- if (tag == -1)
- break;
-
- spin_lock_bh(&ctrl->queue_lock);
- dma = list_entry(ctrl->queue.next, struct rsxx_dma, list);
- list_del(&dma->list);
- ctrl->stats.sw_q_depth--;
- spin_unlock_bh(&ctrl->queue_lock);
-
- /*
- * This will catch any DMAs that slipped in right before the
- * fault, but was queued after all the other DMAs were
- * cancelled.
- */
- if (unlikely(ctrl->card->dma_fault)) {
- push_tracker(ctrl->trackers, tag);
- rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
- continue;
- }
-
- if (dma->cmd != HW_CMD_BLK_DISCARD) {
- if (dma->cmd == HW_CMD_BLK_WRITE)
- dir = DMA_TO_DEVICE;
- else
- dir = DMA_FROM_DEVICE;
-
- /*
- * The function dma_map_page is placed here because we
- * can only, by design, issue up to 255 commands to the
- * hardware at one time per DMA channel. So the maximum
- * amount of mapped memory would be 255 * 4 channels *
- * 4096 Bytes which is less than 2GB, the limit of a x8
- * Non-HWWD PCIe slot. This way the dma_map_page
- * function should never fail because of a lack of
- * mappable memory.
- */
- dma->dma_addr = dma_map_page(&ctrl->card->dev->dev, dma->page,
- dma->pg_off, dma->sub_page.cnt << 9, dir);
- if (dma_mapping_error(&ctrl->card->dev->dev, dma->dma_addr)) {
- push_tracker(ctrl->trackers, tag);
- rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
- continue;
- }
- }
-
- set_tracker_dma(ctrl->trackers, tag, dma);
- hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd;
- hw_cmd_buf[ctrl->cmd.idx].tag = tag;
- hw_cmd_buf[ctrl->cmd.idx]._rsvd = 0;
- hw_cmd_buf[ctrl->cmd.idx].sub_page =
- ((dma->sub_page.cnt & 0x7) << 4) |
- (dma->sub_page.off & 0x7);
-
- hw_cmd_buf[ctrl->cmd.idx].device_addr =
- cpu_to_le32(dma->laddr);
-
- hw_cmd_buf[ctrl->cmd.idx].host_addr =
- cpu_to_le64(dma->dma_addr);
-
- dev_dbg(CARD_TO_DEV(ctrl->card),
- "Issue DMA%d(laddr %d tag %d) to idx %d\n",
- ctrl->id, dma->laddr, tag, ctrl->cmd.idx);
-
- ctrl->cmd.idx = (ctrl->cmd.idx + 1) & RSXX_CS_IDX_MASK;
- cmds_pending++;
-
- if (dma->cmd == HW_CMD_BLK_WRITE)
- ctrl->stats.writes_issued++;
- else if (dma->cmd == HW_CMD_BLK_DISCARD)
- ctrl->stats.discards_issued++;
- else
- ctrl->stats.reads_issued++;
- }
-
- /* Let HW know we've queued commands. */
- if (cmds_pending) {
- atomic_add(cmds_pending, &ctrl->stats.hw_q_depth);
- mod_timer(&ctrl->activity_timer,
- jiffies + DMA_ACTIVITY_TIMEOUT);
-
- if (unlikely(ctrl->card->eeh_state)) {
- del_timer_sync(&ctrl->activity_timer);
- return;
- }
-
- iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
- }
-}
-
-static void rsxx_dma_done(struct rsxx_dma_ctrl *ctrl)
-{
- struct rsxx_dma *dma;
- unsigned long flags;
- u16 count;
- u8 status;
- u8 tag;
- struct hw_status *hw_st_buf;
-
- hw_st_buf = ctrl->status.buf;
-
- if (unlikely(ctrl->card->halt) ||
- unlikely(ctrl->card->dma_fault) ||
- unlikely(ctrl->card->eeh_state))
- return;
-
- count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
-
- while (count == ctrl->e_cnt) {
- /*
- * The read memory-barrier is necessary to keep aggressive
- * processors/optimizers (such as the PPC Apple G5) from
- * reordering the following status-buffer tag & status read
- * *before* the count read on subsequent iterations of the
- * loop!
- */
- rmb();
-
- status = hw_st_buf[ctrl->status.idx].status;
- tag = hw_st_buf[ctrl->status.idx].tag;
-
- dma = get_tracker_dma(ctrl->trackers, tag);
- if (dma == NULL) {
- spin_lock_irqsave(&ctrl->card->irq_lock, flags);
- rsxx_disable_ier(ctrl->card, CR_INTR_DMA_ALL);
- spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
-
- dev_err(CARD_TO_DEV(ctrl->card),
- "No tracker for tag %d "
- "(idx %d id %d)\n",
- tag, ctrl->status.idx, ctrl->id);
- return;
- }
-
- dev_dbg(CARD_TO_DEV(ctrl->card),
- "Completing DMA%d"
- "(laddr x%x tag %d st: x%x cnt: x%04x) from idx %d.\n",
- ctrl->id, dma->laddr, tag, status, count,
- ctrl->status.idx);
-
- atomic_dec(&ctrl->stats.hw_q_depth);
-
- mod_timer(&ctrl->activity_timer,
- jiffies + DMA_ACTIVITY_TIMEOUT);
-
- if (status)
- rsxx_handle_dma_error(ctrl, dma, status);
- else
- rsxx_complete_dma(ctrl, dma, 0);
-
- push_tracker(ctrl->trackers, tag);
-
- ctrl->status.idx = (ctrl->status.idx + 1) &
- RSXX_CS_IDX_MASK;
- ctrl->e_cnt++;
-
- count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
- }
-
- dma_intr_coal_auto_tune(ctrl->card);
-
- if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
- del_timer_sync(&ctrl->activity_timer);
-
- spin_lock_irqsave(&ctrl->card->irq_lock, flags);
- rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id));
- spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
-
- spin_lock_bh(&ctrl->queue_lock);
- if (ctrl->stats.sw_q_depth)
- queue_work(ctrl->issue_wq, &ctrl->issue_dma_work);
- spin_unlock_bh(&ctrl->queue_lock);
-}
-
-static void rsxx_schedule_issue(struct work_struct *work)
-{
- struct rsxx_dma_ctrl *ctrl;
-
- ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
-
- mutex_lock(&ctrl->work_lock);
- rsxx_issue_dmas(ctrl);
- mutex_unlock(&ctrl->work_lock);
-}
-
-static void rsxx_schedule_done(struct work_struct *work)
-{
- struct rsxx_dma_ctrl *ctrl;
-
- ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work);
-
- mutex_lock(&ctrl->work_lock);
- rsxx_dma_done(ctrl);
- mutex_unlock(&ctrl->work_lock);
-}
-
-static blk_status_t rsxx_queue_discard(struct rsxx_cardinfo *card,
- struct list_head *q,
- unsigned int laddr,
- rsxx_dma_cb cb,
- void *cb_data)
-{
- struct rsxx_dma *dma;
-
- dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
- if (!dma)
- return BLK_STS_RESOURCE;
-
- dma->cmd = HW_CMD_BLK_DISCARD;
- dma->laddr = laddr;
- dma->dma_addr = 0;
- dma->sub_page.off = 0;
- dma->sub_page.cnt = 0;
- dma->page = NULL;
- dma->pg_off = 0;
- dma->cb = cb;
- dma->cb_data = cb_data;
-
- dev_dbg(CARD_TO_DEV(card), "Queuing[D] laddr %x\n", dma->laddr);
-
- list_add_tail(&dma->list, q);
-
- return 0;
-}
-
-static blk_status_t rsxx_queue_dma(struct rsxx_cardinfo *card,
- struct list_head *q,
- int dir,
- unsigned int dma_off,
- unsigned int dma_len,
- unsigned int laddr,
- struct page *page,
- unsigned int pg_off,
- rsxx_dma_cb cb,
- void *cb_data)
-{
- struct rsxx_dma *dma;
-
- dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
- if (!dma)
- return BLK_STS_RESOURCE;
-
- dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
- dma->laddr = laddr;
- dma->sub_page.off = (dma_off >> 9);
- dma->sub_page.cnt = (dma_len >> 9);
- dma->page = page;
- dma->pg_off = pg_off;
- dma->cb = cb;
- dma->cb_data = cb_data;
-
- dev_dbg(CARD_TO_DEV(card),
- "Queuing[%c] laddr %x off %d cnt %d page %p pg_off %d\n",
- dir ? 'W' : 'R', dma->laddr, dma->sub_page.off,
- dma->sub_page.cnt, dma->page, dma->pg_off);
-
- /* Queue the DMA */
- list_add_tail(&dma->list, q);
-
- return 0;
-}
-
-blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
- struct bio *bio,
- atomic_t *n_dmas,
- rsxx_dma_cb cb,
- void *cb_data)
-{
- struct list_head dma_list[RSXX_MAX_TARGETS];
- struct bio_vec bvec;
- struct bvec_iter iter;
- unsigned long long addr8;
- unsigned int laddr;
- unsigned int bv_len;
- unsigned int bv_off;
- unsigned int dma_off;
- unsigned int dma_len;
- int dma_cnt[RSXX_MAX_TARGETS];
- int tgt;
- blk_status_t st;
- int i;
-
- addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
- atomic_set(n_dmas, 0);
-
- for (i = 0; i < card->n_targets; i++) {
- INIT_LIST_HEAD(&dma_list[i]);
- dma_cnt[i] = 0;
- }
-
- if (bio_op(bio) == REQ_OP_DISCARD) {
- bv_len = bio->bi_iter.bi_size;
-
- while (bv_len > 0) {
- tgt = rsxx_get_dma_tgt(card, addr8);
- laddr = rsxx_addr8_to_laddr(addr8, card);
-
- st = rsxx_queue_discard(card, &dma_list[tgt], laddr,
- cb, cb_data);
- if (st)
- goto bvec_err;
-
- dma_cnt[tgt]++;
- atomic_inc(n_dmas);
- addr8 += RSXX_HW_BLK_SIZE;
- bv_len -= RSXX_HW_BLK_SIZE;
- }
- } else {
- bio_for_each_segment(bvec, bio, iter) {
- bv_len = bvec.bv_len;
- bv_off = bvec.bv_offset;
-
- while (bv_len > 0) {
- tgt = rsxx_get_dma_tgt(card, addr8);
- laddr = rsxx_addr8_to_laddr(addr8, card);
- dma_off = addr8 & RSXX_HW_BLK_MASK;
- dma_len = min(bv_len,
- RSXX_HW_BLK_SIZE - dma_off);
-
- st = rsxx_queue_dma(card, &dma_list[tgt],
- bio_data_dir(bio),
- dma_off, dma_len,
- laddr, bvec.bv_page,
- bv_off, cb, cb_data);
- if (st)
- goto bvec_err;
-
- dma_cnt[tgt]++;
- atomic_inc(n_dmas);
- addr8 += dma_len;
- bv_off += dma_len;
- bv_len -= dma_len;
- }
- }
- }
-
- for (i = 0; i < card->n_targets; i++) {
- if (!list_empty(&dma_list[i])) {
- spin_lock_bh(&card->ctrl[i].queue_lock);
- card->ctrl[i].stats.sw_q_depth += dma_cnt[i];
- list_splice_tail(&dma_list[i], &card->ctrl[i].queue);
- spin_unlock_bh(&card->ctrl[i].queue_lock);
-
- queue_work(card->ctrl[i].issue_wq,
- &card->ctrl[i].issue_dma_work);
- }
- }
-
- return 0;
-
-bvec_err:
- for (i = 0; i < card->n_targets; i++)
- rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i],
- FREE_DMA);
- return st;
-}
-
-
-/*----------------- DMA Engine Initialization & Setup -------------------*/
-int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl)
-{
- ctrl->status.buf = dma_alloc_coherent(&dev->dev, STATUS_BUFFER_SIZE8,
- &ctrl->status.dma_addr, GFP_KERNEL);
- ctrl->cmd.buf = dma_alloc_coherent(&dev->dev, COMMAND_BUFFER_SIZE8,
- &ctrl->cmd.dma_addr, GFP_KERNEL);
- if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
- return -ENOMEM;
-
- memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
- iowrite32(lower_32_bits(ctrl->status.dma_addr),
- ctrl->regmap + SB_ADD_LO);
- iowrite32(upper_32_bits(ctrl->status.dma_addr),
- ctrl->regmap + SB_ADD_HI);
-
- memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
- iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
- iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
-
- ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
- if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
- dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
- ctrl->status.idx);
- return -EINVAL;
- }
- iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
- iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
-
- ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
- if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
- dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
- ctrl->status.idx);
- return -EINVAL;
- }
- iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
- iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
-
- return 0;
-}
-
-static int rsxx_dma_ctrl_init(struct pci_dev *dev,
- struct rsxx_dma_ctrl *ctrl)
-{
- int i;
- int st;
-
- memset(&ctrl->stats, 0, sizeof(ctrl->stats));
-
- ctrl->trackers = vmalloc(struct_size(ctrl->trackers, list,
- RSXX_MAX_OUTSTANDING_CMDS));
- if (!ctrl->trackers)
- return -ENOMEM;
-
- ctrl->trackers->head = 0;
- for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) {
- ctrl->trackers->list[i].next_tag = i + 1;
- ctrl->trackers->list[i].dma = NULL;
- }
- ctrl->trackers->list[RSXX_MAX_OUTSTANDING_CMDS-1].next_tag = -1;
- spin_lock_init(&ctrl->trackers->lock);
-
- spin_lock_init(&ctrl->queue_lock);
- mutex_init(&ctrl->work_lock);
- INIT_LIST_HEAD(&ctrl->queue);
-
- timer_setup(&ctrl->activity_timer, dma_engine_stalled, 0);
-
- ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0);
- if (!ctrl->issue_wq)
- return -ENOMEM;
-
- ctrl->done_wq = alloc_ordered_workqueue(DRIVER_NAME"_done", 0);
- if (!ctrl->done_wq)
- return -ENOMEM;
-
- INIT_WORK(&ctrl->issue_dma_work, rsxx_schedule_issue);
- INIT_WORK(&ctrl->dma_done_work, rsxx_schedule_done);
-
- st = rsxx_hw_buffers_init(dev, ctrl);
- if (st)
- return st;
-
- return 0;
-}
-
-static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
- unsigned int stripe_size8)
-{
- if (!is_power_of_2(stripe_size8)) {
- dev_err(CARD_TO_DEV(card),
- "stripe_size is NOT a power of 2!\n");
- return -EINVAL;
- }
-
- card->_stripe.lower_mask = stripe_size8 - 1;
-
- card->_stripe.upper_mask = ~(card->_stripe.lower_mask);
- card->_stripe.upper_shift = ffs(card->n_targets) - 1;
-
- card->_stripe.target_mask = card->n_targets - 1;
- card->_stripe.target_shift = ffs(stripe_size8) - 1;
-
- dev_dbg(CARD_TO_DEV(card), "_stripe.lower_mask = x%016llx\n",
- card->_stripe.lower_mask);
- dev_dbg(CARD_TO_DEV(card), "_stripe.upper_shift = x%016llx\n",
- card->_stripe.upper_shift);
- dev_dbg(CARD_TO_DEV(card), "_stripe.upper_mask = x%016llx\n",
- card->_stripe.upper_mask);
- dev_dbg(CARD_TO_DEV(card), "_stripe.target_mask = x%016llx\n",
- card->_stripe.target_mask);
- dev_dbg(CARD_TO_DEV(card), "_stripe.target_shift = x%016llx\n",
- card->_stripe.target_shift);
-
- return 0;
-}
-
-int rsxx_dma_configure(struct rsxx_cardinfo *card)
-{
- u32 intr_coal;
-
- intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
- card->config.data.intr_coal.count,
- card->config.data.intr_coal.latency);
- iowrite32(intr_coal, card->regmap + INTR_COAL);
-
- return rsxx_dma_stripe_setup(card, card->config.data.stripe_size);
-}
-
-int rsxx_dma_setup(struct rsxx_cardinfo *card)
-{
- unsigned long flags;
- int st;
- int i;
-
- dev_info(CARD_TO_DEV(card),
- "Initializing %d DMA targets\n",
- card->n_targets);
-
- /* Regmap is divided up into 4K chunks. One for each DMA channel */
- for (i = 0; i < card->n_targets; i++)
- card->ctrl[i].regmap = card->regmap + (i * 4096);
-
- card->dma_fault = 0;
-
- /* Reset the DMA queues */
- rsxx_dma_queue_reset(card);
-
- /************* Setup DMA Control *************/
- for (i = 0; i < card->n_targets; i++) {
- st = rsxx_dma_ctrl_init(card->dev, &card->ctrl[i]);
- if (st)
- goto failed_dma_setup;
-
- card->ctrl[i].card = card;
- card->ctrl[i].id = i;
- }
-
- card->scrub_hard = 1;
-
- if (card->config_valid)
- rsxx_dma_configure(card);
-
- /* Enable the interrupts after all setup has completed. */
- for (i = 0; i < card->n_targets; i++) {
- spin_lock_irqsave(&card->irq_lock, flags);
- rsxx_enable_ier_and_isr(card, CR_INTR_DMA(i));
- spin_unlock_irqrestore(&card->irq_lock, flags);
- }
-
- return 0;
-
-failed_dma_setup:
- for (i = 0; i < card->n_targets; i++) {
- struct rsxx_dma_ctrl *ctrl = &card->ctrl[i];
-
- if (ctrl->issue_wq) {
- destroy_workqueue(ctrl->issue_wq);
- ctrl->issue_wq = NULL;
- }
-
- if (ctrl->done_wq) {
- destroy_workqueue(ctrl->done_wq);
- ctrl->done_wq = NULL;
- }
-
- vfree(ctrl->trackers);
-
- if (ctrl->status.buf)
- dma_free_coherent(&card->dev->dev, STATUS_BUFFER_SIZE8,
- ctrl->status.buf,
- ctrl->status.dma_addr);
- if (ctrl->cmd.buf)
- dma_free_coherent(&card->dev->dev, COMMAND_BUFFER_SIZE8,
- ctrl->cmd.buf, ctrl->cmd.dma_addr);
- }
-
- return st;
-}
-
-int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl)
-{
- struct rsxx_dma *dma;
- int i;
- int cnt = 0;
-
- /* Clean up issued DMAs */
- for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) {
- dma = get_tracker_dma(ctrl->trackers, i);
- if (dma) {
- atomic_dec(&ctrl->stats.hw_q_depth);
- rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
- push_tracker(ctrl->trackers, i);
- cnt++;
- }
- }
-
- return cnt;
-}
-
-void rsxx_dma_destroy(struct rsxx_cardinfo *card)
-{
- struct rsxx_dma_ctrl *ctrl;
- int i;
-
- for (i = 0; i < card->n_targets; i++) {
- ctrl = &card->ctrl[i];
-
- if (ctrl->issue_wq) {
- destroy_workqueue(ctrl->issue_wq);
- ctrl->issue_wq = NULL;
- }
-
- if (ctrl->done_wq) {
- destroy_workqueue(ctrl->done_wq);
- ctrl->done_wq = NULL;
- }
-
- if (timer_pending(&ctrl->activity_timer))
- del_timer_sync(&ctrl->activity_timer);
-
- /* Clean up the DMA queue */
- spin_lock_bh(&ctrl->queue_lock);
- rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
- spin_unlock_bh(&ctrl->queue_lock);
-
- rsxx_dma_cancel(ctrl);
-
- vfree(ctrl->trackers);
-
- dma_free_coherent(&card->dev->dev, STATUS_BUFFER_SIZE8,
- ctrl->status.buf, ctrl->status.dma_addr);
- dma_free_coherent(&card->dev->dev, COMMAND_BUFFER_SIZE8,
- ctrl->cmd.buf, ctrl->cmd.dma_addr);
- }
-}
-
-int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
-{
- int i;
- int j;
- int cnt;
- struct rsxx_dma *dma;
- struct list_head *issued_dmas;
-
- issued_dmas = kcalloc(card->n_targets, sizeof(*issued_dmas),
- GFP_KERNEL);
- if (!issued_dmas)
- return -ENOMEM;
-
- for (i = 0; i < card->n_targets; i++) {
- INIT_LIST_HEAD(&issued_dmas[i]);
- cnt = 0;
- for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
- dma = get_tracker_dma(card->ctrl[i].trackers, j);
- if (dma == NULL)
- continue;
-
- if (dma->cmd == HW_CMD_BLK_WRITE)
- card->ctrl[i].stats.writes_issued--;
- else if (dma->cmd == HW_CMD_BLK_DISCARD)
- card->ctrl[i].stats.discards_issued--;
- else
- card->ctrl[i].stats.reads_issued--;
-
- if (dma->cmd != HW_CMD_BLK_DISCARD) {
- dma_unmap_page(&card->dev->dev, dma->dma_addr,
- get_dma_size(dma),
- dma->cmd == HW_CMD_BLK_WRITE ?
- DMA_TO_DEVICE :
- DMA_FROM_DEVICE);
- }
-
- list_add_tail(&dma->list, &issued_dmas[i]);
- push_tracker(card->ctrl[i].trackers, j);
- cnt++;
- }
-
- spin_lock_bh(&card->ctrl[i].queue_lock);
- list_splice(&issued_dmas[i], &card->ctrl[i].queue);
-
- atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
- card->ctrl[i].stats.sw_q_depth += cnt;
- card->ctrl[i].e_cnt = 0;
- spin_unlock_bh(&card->ctrl[i].queue_lock);
- }
-
- kfree(issued_dmas);
-
- return 0;
-}
-
-int rsxx_dma_init(void)
-{
- rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);
- if (!rsxx_dma_pool)
- return -ENOMEM;
-
- return 0;
-}
-
-
-void rsxx_dma_cleanup(void)
-{
- kmem_cache_destroy(rsxx_dma_pool);
-}
-
diff --git a/drivers/block/rsxx/rsxx.h b/drivers/block/rsxx/rsxx.h
deleted file mode 100644
index 4f84905a6fd2..000000000000
--- a/drivers/block/rsxx/rsxx.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
-* Filename: rsxx.h
-*
-* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
-* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
-*
-* (C) Copyright 2013 IBM Corporation
-*/
-
-#ifndef __RSXX_H__
-#define __RSXX_H__
-
-/*----------------- IOCTL Definitions -------------------*/
-
-#define RSXX_MAX_DATA 8
-
-struct rsxx_reg_access {
- __u32 addr;
- __u32 cnt;
- __u32 stat;
- __u32 stream;
- __u32 data[RSXX_MAX_DATA];
-};
-
-#define RSXX_MAX_REG_CNT (RSXX_MAX_DATA * (sizeof(__u32)))
-
-#define RSXX_IOC_MAGIC 'r'
-
-#define RSXX_GETREG _IOWR(RSXX_IOC_MAGIC, 0x20, struct rsxx_reg_access)
-#define RSXX_SETREG _IOWR(RSXX_IOC_MAGIC, 0x21, struct rsxx_reg_access)
-
-#endif /* __RSXX_H_ */
diff --git a/drivers/block/rsxx/rsxx_cfg.h b/drivers/block/rsxx/rsxx_cfg.h
deleted file mode 100644
index 2b79015f5849..000000000000
--- a/drivers/block/rsxx/rsxx_cfg.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
-* Filename: rsXX_cfg.h
-*
-* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
-* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
-*
-* (C) Copyright 2013 IBM Corporation
-*/
-
-#ifndef __RSXX_CFG_H__
-#define __RSXX_CFG_H__
-
-/* NOTE: Config values will be saved in network byte order (i.e. Big endian) */
-#include <linux/types.h>
-
-/*
- * The card config version must match the driver's expected version. If it does
- * not, the DMA interfaces will not be attached and the user will need to
- * initialize/upgrade the card configuration using the card config utility.
- */
-#define RSXX_CFG_VERSION 4
-
-struct card_cfg_hdr {
- __u32 version;
- __u32 crc;
-};
-
-struct card_cfg_data {
- __u32 block_size;
- __u32 stripe_size;
- __u32 vendor_id;
- __u32 cache_order;
- struct {
- __u32 mode; /* Disabled, manual, auto-tune... */
- __u32 count; /* Number of intr to coalesce */
- __u32 latency;/* Max wait time (in ns) */
- } intr_coal;
-};
-
-struct rsxx_card_cfg {
- struct card_cfg_hdr hdr;
- struct card_cfg_data data;
-};
-
-/* Vendor ID Values */
-#define RSXX_VENDOR_ID_IBM 0
-#define RSXX_VENDOR_ID_DSI 1
-#define RSXX_VENDOR_COUNT 2
-
-/* Interrupt Coalescing Values */
-#define RSXX_INTR_COAL_DISABLED 0
-#define RSXX_INTR_COAL_EXPLICIT 1
-#define RSXX_INTR_COAL_AUTO_TUNE 2
-
-
-#endif /* __RSXX_CFG_H__ */
-
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h
deleted file mode 100644
index 26c320c0d924..000000000000
--- a/drivers/block/rsxx/rsxx_priv.h
+++ /dev/null
@@ -1,418 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
-* Filename: rsxx_priv.h
-*
-* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
-* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
-*
-* (C) Copyright 2013 IBM Corporation
-*/
-
-#ifndef __RSXX_PRIV_H__
-#define __RSXX_PRIV_H__
-
-#include <linux/semaphore.h>
-
-#include <linux/fs.h>
-#include <linux/interrupt.h>
-#include <linux/mutex.h>
-#include <linux/pci.h>
-#include <linux/spinlock.h>
-#include <linux/sysfs.h>
-#include <linux/workqueue.h>
-#include <linux/bio.h>
-#include <linux/vmalloc.h>
-#include <linux/timer.h>
-#include <linux/ioctl.h>
-#include <linux/delay.h>
-
-#include "rsxx.h"
-#include "rsxx_cfg.h"
-
-struct proc_cmd;
-
-#define PCI_DEVICE_ID_FS70_FLASH 0x04A9
-#define PCI_DEVICE_ID_FS80_FLASH 0x04AA
-
-#define RS70_PCI_REV_SUPPORTED 4
-
-#define DRIVER_NAME "rsxx"
-#define DRIVER_VERSION "4.0.3.2516"
-
-/* Block size is 4096 */
-#define RSXX_HW_BLK_SHIFT 12
-#define RSXX_HW_BLK_SIZE (1 << RSXX_HW_BLK_SHIFT)
-#define RSXX_HW_BLK_MASK (RSXX_HW_BLK_SIZE - 1)
-
-#define MAX_CREG_DATA8 32
-#define LOG_BUF_SIZE8 128
-
-#define RSXX_MAX_OUTSTANDING_CMDS 255
-#define RSXX_CS_IDX_MASK 0xff
-
-#define STATUS_BUFFER_SIZE8 4096
-#define COMMAND_BUFFER_SIZE8 4096
-
-#define RSXX_MAX_TARGETS 8
-
-struct dma_tracker_list;
-
-/* DMA Command/Status Buffer structure */
-struct rsxx_cs_buffer {
- dma_addr_t dma_addr;
- void *buf;
- u32 idx;
-};
-
-struct rsxx_dma_stats {
- u32 crc_errors;
- u32 hard_errors;
- u32 soft_errors;
- u32 writes_issued;
- u32 writes_failed;
- u32 reads_issued;
- u32 reads_failed;
- u32 reads_retried;
- u32 discards_issued;
- u32 discards_failed;
- u32 done_rescheduled;
- u32 issue_rescheduled;
- u32 dma_sw_err;
- u32 dma_hw_fault;
- u32 dma_cancelled;
- u32 sw_q_depth; /* Number of DMAs on the SW queue. */
- atomic_t hw_q_depth; /* Number of DMAs queued to HW. */
-};
-
-struct rsxx_dma_ctrl {
- struct rsxx_cardinfo *card;
- int id;
- void __iomem *regmap;
- struct rsxx_cs_buffer status;
- struct rsxx_cs_buffer cmd;
- u16 e_cnt;
- spinlock_t queue_lock;
- struct list_head queue;
- struct workqueue_struct *issue_wq;
- struct work_struct issue_dma_work;
- struct workqueue_struct *done_wq;
- struct work_struct dma_done_work;
- struct timer_list activity_timer;
- struct dma_tracker_list *trackers;
- struct rsxx_dma_stats stats;
- struct mutex work_lock;
-};
-
-struct rsxx_cardinfo {
- struct pci_dev *dev;
- unsigned int halt;
- unsigned int eeh_state;
-
- void __iomem *regmap;
- spinlock_t irq_lock;
- unsigned int isr_mask;
- unsigned int ier_mask;
-
- struct rsxx_card_cfg config;
- int config_valid;
-
- /* Embedded CPU Communication */
- struct {
- spinlock_t lock;
- bool active;
- struct creg_cmd *active_cmd;
- struct workqueue_struct *creg_wq;
- struct work_struct done_work;
- struct list_head queue;
- unsigned int q_depth;
- /* Cache the creg status to prevent ioreads */
- struct {
- u32 stat;
- u32 failed_cancel_timer;
- u32 creg_timeout;
- } creg_stats;
- struct timer_list cmd_timer;
- struct mutex reset_lock;
- int reset;
- } creg_ctrl;
-
- struct {
- char tmp[MAX_CREG_DATA8];
- char buf[LOG_BUF_SIZE8]; /* terminated */
- int buf_len;
- } log;
-
- struct workqueue_struct *event_wq;
- struct work_struct event_work;
- unsigned int state;
- u64 size8;
-
- /* Lock the device attach/detach function */
- struct mutex dev_lock;
-
- /* Block Device Variables */
- bool bdev_attached;
- int disk_id;
- int major;
- struct gendisk *gendisk;
- struct {
- /* Used to convert a byte address to a device address. */
- u64 lower_mask;
- u64 upper_shift;
- u64 upper_mask;
- u64 target_mask;
- u64 target_shift;
- } _stripe;
- unsigned int dma_fault;
-
- int scrub_hard;
-
- int n_targets;
- struct rsxx_dma_ctrl *ctrl;
-
- struct dentry *debugfs_dir;
-};
-
-enum rsxx_pci_regmap {
- HWID = 0x00, /* Hardware Identification Register */
- SCRATCH = 0x04, /* Scratch/Debug Register */
- RESET = 0x08, /* Reset Register */
- ISR = 0x10, /* Interrupt Status Register */
- IER = 0x14, /* Interrupt Enable Register */
- IPR = 0x18, /* Interrupt Poll Register */
- CB_ADD_LO = 0x20, /* Command Host Buffer Address [31:0] */
- CB_ADD_HI = 0x24, /* Command Host Buffer Address [63:32]*/
- HW_CMD_IDX = 0x28, /* Hardware Processed Command Index */
- SW_CMD_IDX = 0x2C, /* Software Processed Command Index */
- SB_ADD_LO = 0x30, /* Status Host Buffer Address [31:0] */
- SB_ADD_HI = 0x34, /* Status Host Buffer Address [63:32] */
- HW_STATUS_CNT = 0x38, /* Hardware Status Counter */
- SW_STATUS_CNT = 0x3C, /* Deprecated */
- CREG_CMD = 0x40, /* CPU Command Register */
- CREG_ADD = 0x44, /* CPU Address Register */
- CREG_CNT = 0x48, /* CPU Count Register */
- CREG_STAT = 0x4C, /* CPU Status Register */
- CREG_DATA0 = 0x50, /* CPU Data Registers */
- CREG_DATA1 = 0x54,
- CREG_DATA2 = 0x58,
- CREG_DATA3 = 0x5C,
- CREG_DATA4 = 0x60,
- CREG_DATA5 = 0x64,
- CREG_DATA6 = 0x68,
- CREG_DATA7 = 0x6c,
- INTR_COAL = 0x70, /* Interrupt Coalescing Register */
- HW_ERROR = 0x74, /* Card Error Register */
- PCI_DEBUG0 = 0x78, /* PCI Debug Registers */
- PCI_DEBUG1 = 0x7C,
- PCI_DEBUG2 = 0x80,
- PCI_DEBUG3 = 0x84,
- PCI_DEBUG4 = 0x88,
- PCI_DEBUG5 = 0x8C,
- PCI_DEBUG6 = 0x90,
- PCI_DEBUG7 = 0x94,
- PCI_POWER_THROTTLE = 0x98,
- PERF_CTRL = 0x9c,
- PERF_TIMER_LO = 0xa0,
- PERF_TIMER_HI = 0xa4,
- PERF_RD512_LO = 0xa8,
- PERF_RD512_HI = 0xac,
- PERF_WR512_LO = 0xb0,
- PERF_WR512_HI = 0xb4,
- PCI_RECONFIG = 0xb8,
-};
-
-enum rsxx_intr {
- CR_INTR_DMA0 = 0x00000001,
- CR_INTR_CREG = 0x00000002,
- CR_INTR_DMA1 = 0x00000004,
- CR_INTR_EVENT = 0x00000008,
- CR_INTR_DMA2 = 0x00000010,
- CR_INTR_DMA3 = 0x00000020,
- CR_INTR_DMA4 = 0x00000040,
- CR_INTR_DMA5 = 0x00000080,
- CR_INTR_DMA6 = 0x00000100,
- CR_INTR_DMA7 = 0x00000200,
- CR_INTR_ALL_C = 0x0000003f,
- CR_INTR_ALL_G = 0x000003ff,
- CR_INTR_DMA_ALL = 0x000003f5,
- CR_INTR_ALL = 0xffffffff,
-};
-
-static inline int CR_INTR_DMA(int N)
-{
- static const unsigned int _CR_INTR_DMA[] = {
- CR_INTR_DMA0, CR_INTR_DMA1, CR_INTR_DMA2, CR_INTR_DMA3,
- CR_INTR_DMA4, CR_INTR_DMA5, CR_INTR_DMA6, CR_INTR_DMA7
- };
- return _CR_INTR_DMA[N];
-}
-enum rsxx_pci_reset {
- DMA_QUEUE_RESET = 0x00000001,
-};
-
-enum rsxx_hw_fifo_flush {
- RSXX_FLUSH_BUSY = 0x00000002,
- RSXX_FLUSH_TIMEOUT = 0x00000004,
-};
-
-enum rsxx_pci_revision {
- RSXX_DISCARD_SUPPORT = 2,
- RSXX_EEH_SUPPORT = 3,
-};
-
-enum rsxx_creg_cmd {
- CREG_CMD_TAG_MASK = 0x0000FF00,
- CREG_OP_WRITE = 0x000000C0,
- CREG_OP_READ = 0x000000E0,
-};
-
-enum rsxx_creg_addr {
- CREG_ADD_CARD_CMD = 0x80001000,
- CREG_ADD_CARD_STATE = 0x80001004,
- CREG_ADD_CARD_SIZE = 0x8000100c,
- CREG_ADD_CAPABILITIES = 0x80001050,
- CREG_ADD_LOG = 0x80002000,
- CREG_ADD_NUM_TARGETS = 0x80003000,
- CREG_ADD_CRAM = 0xA0000000,
- CREG_ADD_CONFIG = 0xB0000000,
-};
-
-enum rsxx_creg_card_cmd {
- CARD_CMD_STARTUP = 1,
- CARD_CMD_SHUTDOWN = 2,
- CARD_CMD_LOW_LEVEL_FORMAT = 3,
- CARD_CMD_FPGA_RECONFIG_BR = 4,
- CARD_CMD_FPGA_RECONFIG_MAIN = 5,
- CARD_CMD_BACKUP = 6,
- CARD_CMD_RESET = 7,
- CARD_CMD_deprecated = 8,
- CARD_CMD_UNINITIALIZE = 9,
- CARD_CMD_DSTROY_EMERGENCY = 10,
- CARD_CMD_DSTROY_NORMAL = 11,
- CARD_CMD_DSTROY_EXTENDED = 12,
- CARD_CMD_DSTROY_ABORT = 13,
-};
-
-enum rsxx_card_state {
- CARD_STATE_SHUTDOWN = 0x00000001,
- CARD_STATE_STARTING = 0x00000002,
- CARD_STATE_FORMATTING = 0x00000004,
- CARD_STATE_UNINITIALIZED = 0x00000008,
- CARD_STATE_GOOD = 0x00000010,
- CARD_STATE_SHUTTING_DOWN = 0x00000020,
- CARD_STATE_FAULT = 0x00000040,
- CARD_STATE_RD_ONLY_FAULT = 0x00000080,
- CARD_STATE_DSTROYING = 0x00000100,
-};
-
-enum rsxx_led {
- LED_DEFAULT = 0x0,
- LED_IDENTIFY = 0x1,
- LED_SOAK = 0x2,
-};
-
-enum rsxx_creg_flash_lock {
- CREG_FLASH_LOCK = 1,
- CREG_FLASH_UNLOCK = 2,
-};
-
-enum rsxx_card_capabilities {
- CARD_CAP_SUBPAGE_WRITES = 0x00000080,
-};
-
-enum rsxx_creg_stat {
- CREG_STAT_STATUS_MASK = 0x00000003,
- CREG_STAT_SUCCESS = 0x1,
- CREG_STAT_ERROR = 0x2,
- CREG_STAT_CHAR_PENDING = 0x00000004, /* Character I/O pending bit */
- CREG_STAT_LOG_PENDING = 0x00000008, /* HW log message pending bit */
- CREG_STAT_TAG_MASK = 0x0000ff00,
-};
-
-enum rsxx_dma_finish {
- FREE_DMA = 0x0,
- COMPLETE_DMA = 0x1,
-};
-
-static inline unsigned int CREG_DATA(int N)
-{
- return CREG_DATA0 + (N << 2);
-}
-
-/*----------------- Convenient Log Wrappers -------------------*/
-#define CARD_TO_DEV(__CARD) (&(__CARD)->dev->dev)
-
-/***** config.c *****/
-int rsxx_load_config(struct rsxx_cardinfo *card);
-
-/***** core.c *****/
-void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr);
-void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr);
-void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
- unsigned int intr);
-void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
- unsigned int intr);
-
-/***** dev.c *****/
-int rsxx_attach_dev(struct rsxx_cardinfo *card);
-void rsxx_detach_dev(struct rsxx_cardinfo *card);
-int rsxx_setup_dev(struct rsxx_cardinfo *card);
-void rsxx_destroy_dev(struct rsxx_cardinfo *card);
-int rsxx_dev_init(void);
-void rsxx_dev_cleanup(void);
-
-/***** dma.c ****/
-typedef void (*rsxx_dma_cb)(struct rsxx_cardinfo *card,
- void *cb_data,
- unsigned int status);
-int rsxx_dma_setup(struct rsxx_cardinfo *card);
-void rsxx_dma_destroy(struct rsxx_cardinfo *card);
-int rsxx_dma_init(void);
-int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
- struct list_head *q,
- unsigned int done);
-int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl);
-void rsxx_dma_cleanup(void);
-void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
-int rsxx_dma_configure(struct rsxx_cardinfo *card);
-blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
- struct bio *bio,
- atomic_t *n_dmas,
- rsxx_dma_cb cb,
- void *cb_data);
-int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl);
-int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card);
-int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card);
-
-/***** cregs.c *****/
-int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr,
- unsigned int size8,
- void *data,
- int byte_stream);
-int rsxx_creg_read(struct rsxx_cardinfo *card,
- u32 addr,
- unsigned int size8,
- void *data,
- int byte_stream);
-int rsxx_read_hw_log(struct rsxx_cardinfo *card);
-int rsxx_get_card_state(struct rsxx_cardinfo *card,
- unsigned int *state);
-int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8);
-int rsxx_get_num_targets(struct rsxx_cardinfo *card,
- unsigned int *n_targets);
-int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
- u32 *capabilities);
-int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd);
-int rsxx_creg_setup(struct rsxx_cardinfo *card);
-void rsxx_creg_destroy(struct rsxx_cardinfo *card);
-int rsxx_creg_init(void);
-void rsxx_creg_cleanup(void);
-int rsxx_reg_access(struct rsxx_cardinfo *card,
- struct rsxx_reg_access __user *ucmd,
- int read);
-void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card);
-void rsxx_kick_creg_queue(struct rsxx_cardinfo *card);
-
-
-
-#endif /* __DRIVERS_BLOCK_RSXX_H__ */
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 6f45a53f7cbf..146d85d80e0e 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -143,8 +143,8 @@ static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
static int vdc_ioctl(struct block_device *bdev, fmode_t mode,
unsigned command, unsigned long argument)
{
+ struct vdc_port *port = bdev->bd_disk->private_data;
int i;
- struct gendisk *disk;
switch (command) {
case CDROMMULTISESSION:
@@ -155,12 +155,15 @@ static int vdc_ioctl(struct block_device *bdev, fmode_t mode,
return 0;
case CDROM_GET_CAPABILITY:
- disk = bdev->bd_disk;
-
- if (bdev->bd_disk && (disk->flags & GENHD_FL_CD))
+ if (!vdc_version_supported(port, 1, 1))
+ return -EINVAL;
+ switch (port->vdisk_mtype) {
+ case VD_MEDIA_TYPE_CD:
+ case VD_MEDIA_TYPE_DVD:
return 0;
- return -EINVAL;
-
+ default:
+ return -EINVAL;
+ }
default:
pr_debug(PFX "ioctl %08x not supported\n", command);
return -EINVAL;
@@ -459,7 +462,7 @@ static int __vdc_tx_trigger(struct vdc_port *port)
static int __send_request(struct request *req)
{
- struct vdc_port *port = req->rq_disk->private_data;
+ struct vdc_port *port = req->q->disk->private_data;
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
struct scatterlist sg[MAX_RING_COOKIES];
struct vdc_req_entry *rqe;
@@ -854,14 +857,12 @@ static int probe_disk(struct vdc_port *port)
switch (port->vdisk_mtype) {
case VD_MEDIA_TYPE_CD:
pr_info(PFX "Virtual CDROM %s\n", port->disk_name);
- g->flags |= GENHD_FL_CD;
g->flags |= GENHD_FL_REMOVABLE;
set_disk_ro(g, 1);
break;
case VD_MEDIA_TYPE_DVD:
pr_info(PFX "Virtual DVD %s\n", port->disk_name);
- g->flags |= GENHD_FL_CD;
g->flags |= GENHD_FL_REMOVABLE;
set_disk_ro(g, 1);
break;
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 821594cd1315..fef65a18d56f 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -840,6 +840,7 @@ static int swim_floppy_init(struct swim_priv *swd)
swd->unit[drive].disk->minors = 1;
sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive);
swd->unit[drive].disk->fops = &floppy_fops;
+ swd->unit[drive].disk->flags |= GENHD_FL_NO_PART;
swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE;
swd->unit[drive].disk->private_data = &swd->unit[drive];
set_capacity(swd->unit[drive].disk, 2880);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 4b91c9aa5892..6c39f2c9f806 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1227,7 +1227,7 @@ static int swim3_attach(struct macio_dev *mdev,
disk->fops = &floppy_fops;
disk->private_data = fs;
disk->events = DISK_EVENT_MEDIA_CHANGE;
- disk->flags |= GENHD_FL_REMOVABLE;
+ disk->flags |= GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
sprintf(disk->disk_name, "fd%d", floppy_count);
set_capacity(disk, 2880);
rc = add_disk(disk);
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index d1676fe0da1a..b361583944b9 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -540,7 +540,7 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
spin_unlock_irq(&host->lock);
DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
- blk_execute_rq_nowait(NULL, rq, true, NULL);
+ blk_execute_rq_nowait(rq, true, NULL);
return 0;
@@ -579,7 +579,7 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
crq->msg_bucket = (u32) rc;
DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
- blk_execute_rq_nowait(NULL, rq, true, NULL);
+ blk_execute_rq_nowait(rq, true, NULL);
return 0;
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6ae38776e30e..c443cd64fc9b 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -384,7 +384,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
if (err)
goto out;
- blk_execute_rq(vblk->disk, req, false);
+ blk_execute_rq(req, false);
err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
out:
blk_mq_free_request(req);
@@ -843,7 +843,6 @@ static int virtblk_probe(struct virtio_device *vdev)
vblk->disk->minors = 1 << PART_BITS;
vblk->disk->private_data = vblk;
vblk->disk->fops = &virtblk_fops;
- vblk->disk->flags |= GENHD_FL_EXT_DEVT;
vblk->index = index;
/* configure queue flush support */
@@ -977,7 +976,7 @@ static void virtblk_remove(struct virtio_device *vdev)
mutex_lock(&vblk->vdev_mutex);
/* Stop all the virtqueues. */
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
/* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
vblk->vdev = NULL;
@@ -996,7 +995,7 @@ static int virtblk_freeze(struct virtio_device *vdev)
struct virtio_blk *vblk = vdev->priv;
/* Ensure we don't receive any more interrupts */
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
/* Make sure no work handler is accessing the device. */
flush_work(&vblk->config_work);
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 914587aabca0..62125fd4af4a 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -510,7 +510,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
}
vbd->size = vbd_sz(vbd);
- if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
+ if (cdrom || disk_to_cdi(vbd->bdev->bd_disk))
vbd->type |= VDISK_CDROM;
if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
vbd->type |= VDISK_REMOVABLE;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 286cf1afad78..ccd0dd0c6b83 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -198,6 +198,7 @@ struct blkfront_info
struct gendisk *gd;
u16 sector_size;
unsigned int physical_sector_size;
+ unsigned long vdisk_info;
int vdevice;
blkif_vdev_t handle;
enum blkif_state connected;
@@ -505,6 +506,7 @@ static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
unsigned command, unsigned long argument)
{
+ struct blkfront_info *info = bdev->bd_disk->private_data;
int i;
switch (command) {
@@ -514,9 +516,9 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
return -EFAULT;
return 0;
case CDROM_GET_CAPABILITY:
- if (bdev->bd_disk->flags & GENHD_FL_CD)
- return 0;
- return -EINVAL;
+ if (!(info->vdisk_info & VDISK_CDROM))
+ return -EINVAL;
+ return 0;
default:
return -EINVAL;
}
@@ -1057,9 +1059,8 @@ static char *encode_disk_name(char *ptr, unsigned int n)
}
static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
- struct blkfront_info *info,
- u16 vdisk_info, u16 sector_size,
- unsigned int physical_sector_size)
+ struct blkfront_info *info, u16 sector_size,
+ unsigned int physical_sector_size)
{
struct gendisk *gd;
int nr_minors = 1;
@@ -1157,15 +1158,11 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
xlvbd_flush(info);
- if (vdisk_info & VDISK_READONLY)
+ if (info->vdisk_info & VDISK_READONLY)
set_disk_ro(gd, 1);
-
- if (vdisk_info & VDISK_REMOVABLE)
+ if (info->vdisk_info & VDISK_REMOVABLE)
gd->flags |= GENHD_FL_REMOVABLE;
- if (vdisk_info & VDISK_CDROM)
- gd->flags |= GENHD_FL_CD;
-
return 0;
out_free_tag_set:
@@ -2313,7 +2310,6 @@ static void blkfront_connect(struct blkfront_info *info)
unsigned long long sectors;
unsigned long sector_size;
unsigned int physical_sector_size;
- unsigned int binfo;
int err, i;
struct blkfront_ring_info *rinfo;
@@ -2351,7 +2347,7 @@ static void blkfront_connect(struct blkfront_info *info)
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"sectors", "%llu", &sectors,
- "info", "%u", &binfo,
+ "info", "%u", &info->vdisk_info,
"sector-size", "%lu", &sector_size,
NULL);
if (err) {
@@ -2380,7 +2376,7 @@ static void blkfront_connect(struct blkfront_info *info)
}
}
- err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size,
+ err = xlvbd_alloc_gendisk(sectors, info, sector_size,
physical_sector_size);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index ccc52c935faf..7a6ed83481b8 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -327,6 +327,7 @@ static int z2ram_register_disk(int minor)
disk->major = Z2RAM_MAJOR;
disk->first_minor = minor;
disk->minors = 1;
+ disk->flags |= GENHD_FL_NO_PART;
disk->fops = &z2_fops;
if (minor)
sprintf(disk->disk_name, "z2ram%d", minor);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 25071126995b..cb253d80d72b 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1903,14 +1903,7 @@ static struct attribute *zram_disk_attrs[] = {
NULL,
};
-static const struct attribute_group zram_disk_attr_group = {
- .attrs = zram_disk_attrs,
-};
-
-static const struct attribute_group *zram_disk_attr_groups[] = {
- &zram_disk_attr_group,
- NULL,
-};
+ATTRIBUTE_GROUPS(zram_disk);
/*
* Allocate and initialize new zram device. the function returns
@@ -1947,6 +1940,7 @@ static int zram_add(void)
zram->disk->major = zram_major;
zram->disk->first_minor = device_id;
zram->disk->minors = 1;
+ zram->disk->flags |= GENHD_FL_NO_PART;
zram->disk->fops = &zram_devops;
zram->disk->private_data = zram;
snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
@@ -1982,7 +1976,7 @@ static int zram_add(void)
blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
- ret = device_add_disk(NULL, zram->disk, zram_disk_attr_groups);
+ ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
if (ret)
goto out_cleanup_disk;
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 851842372c9b..36380e618ba4 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -19,6 +19,10 @@ config BT_QCA
tristate
select FW_LOADER
+config BT_MTK
+ tristate
+ select FW_LOADER
+
config BT_HCIBTUSB
tristate "HCI USB driver"
depends on USB
@@ -55,6 +59,7 @@ config BT_HCIBTUSB_BCM
config BT_HCIBTUSB_MTK
bool "MediaTek protocol support"
depends on BT_HCIBTUSB
+ select BT_MTK
default n
help
The MediaTek protocol support enables firmware download
@@ -383,6 +388,7 @@ config BT_ATH3K
config BT_MTKSDIO
tristate "MediaTek HCI SDIO driver"
depends on MMC
+ select BT_MTK
help
MediaTek Bluetooth HCI SDIO driver.
This driver is required if you want to use MediaTek Bluetooth
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 16286ea2655d..3321a8aea4a0 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_BT_QCOMSMD) += btqcomsmd.o
obj-$(CONFIG_BT_BCM) += btbcm.o
obj-$(CONFIG_BT_RTL) += btrtl.o
obj-$(CONFIG_BT_QCA) += btqca.o
+obj-$(CONFIG_BT_MTK) += btmtk.o
obj-$(CONFIG_BT_VIRTIO) += virtio_bt.o
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 5a321b4076aa..cab93935cc7f 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -628,6 +628,9 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
data->bulk_out_ep = bulk_out_ep->desc.bEndpointAddress;
data->bulk_pkt_size = le16_to_cpu(bulk_out_ep->desc.wMaxPacketSize);
+ if (!data->bulk_pkt_size)
+ goto done;
+
rwlock_init(&data->lock);
data->reassembly = NULL;
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index e4182acee488..d9ceca7a7935 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
+#include <linux/dmi.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -343,6 +344,52 @@ static struct sk_buff *btbcm_read_usb_product(struct hci_dev *hdev)
return skb;
}
+static const struct dmi_system_id disable_broken_read_transmit_power[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,1"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,2"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,4"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir8,1"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir8,2"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "iMac20,1"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "iMac20,2"),
+ },
+ },
+ { }
+};
+
static int btbcm_read_info(struct hci_dev *hdev)
{
struct sk_buff *skb;
@@ -363,6 +410,10 @@ static int btbcm_read_info(struct hci_dev *hdev)
bt_dev_info(hdev, "BCM: features 0x%2.2x", skb->data[1]);
kfree_skb(skb);
+ /* Read DMI and disable broken Read LE Min/Max Tx Power */
+ if (dmi_first_match(disable_broken_read_transmit_power))
+ set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks);
+
return 0;
}
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 9359bff47296..1a4f8b227eac 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -2081,14 +2081,16 @@ static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev,
if (ver->img_type == 0x03) {
btintel_clear_flag(hdev, INTEL_BOOTLOADER);
btintel_check_bdaddr(hdev);
- }
-
- /* If the OTP has no valid Bluetooth device address, then there will
- * also be no valid address for the operational firmware.
- */
- if (!bacmp(&ver->otp_bd_addr, BDADDR_ANY)) {
- bt_dev_info(hdev, "No device address configured");
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ } else {
+ /*
+ * Check for valid bd address in boot loader mode. Device
+ * will be marked as unconfigured if empty bd address is
+ * found.
+ */
+ if (!bacmp(&ver->otp_bd_addr, BDADDR_ANY)) {
+ bt_dev_info(hdev, "No device address configured");
+ set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ }
}
btintel_get_fw_name_tlv(ver, fwname, sizeof(fwname), "sfi");
@@ -2353,8 +2355,15 @@ static int btintel_setup_combined(struct hci_dev *hdev)
* As a workaround, send HCI Reset command first which will reset the
* number of completed commands and allow normal command processing
* from now on.
+ *
+ * Regarding the INTEL_BROKEN_SHUTDOWN_LED flag, these devices maybe
+ * in the SW_RFKILL ON state as a workaround of fixing LED issue during
+ * the shutdown() procedure, and once the device is in SW_RFKILL ON
+ * state, the only way to exit out of it is sending the HCI_Reset
+ * command.
*/
- if (btintel_test_flag(hdev, INTEL_BROKEN_INITIAL_NCMD)) {
+ if (btintel_test_flag(hdev, INTEL_BROKEN_INITIAL_NCMD) ||
+ btintel_test_flag(hdev, INTEL_BROKEN_SHUTDOWN_LED)) {
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
@@ -2426,12 +2435,6 @@ static int btintel_setup_combined(struct hci_dev *hdev)
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
&hdev->quirks);
- /* These devices have an issue with LED which doesn't
- * go off immediately during shutdown. Set the flag
- * here to send the LED OFF command during shutdown.
- */
- btintel_set_flag(hdev, INTEL_BROKEN_LED);
-
err = btintel_legacy_rom_setup(hdev, &ver);
break;
case 0x0b: /* SfP */
@@ -2466,6 +2469,10 @@ static int btintel_setup_combined(struct hci_dev *hdev)
goto exit_error;
}
+ /* memset ver_tlv to start with clean state as few fields are exclusive
+ * to bootloader mode and are not populated in operational mode
+ */
+ memset(&ver_tlv, 0, sizeof(ver_tlv));
/* For TLV type device, parse the tlv data */
err = btintel_parse_version_tlv(hdev, &ver_tlv, skb);
if (err) {
@@ -2492,10 +2499,14 @@ static int btintel_setup_combined(struct hci_dev *hdev)
case 0x12: /* ThP */
case 0x13: /* HrP */
case 0x14: /* CcP */
- /* Some legacy bootloader devices from JfP supports both old
- * and TLV based HCI_Intel_Read_Version command. But we don't
- * want to use the TLV based setup routines for those legacy
- * bootloader device.
+ /* Some legacy bootloader devices starting from JfP,
+ * the operational firmware supports both old and TLV based
+ * HCI_Intel_Read_Version command based on the command
+ * parameter.
+ *
+ * For upgrading firmware case, the TLV based version cannot
+ * be used because the firmware filename for legacy bootloader
+ * is based on the old format.
*
* Also, it is not easy to convert TLV based version from the
* legacy version format.
@@ -2507,6 +2518,20 @@ static int btintel_setup_combined(struct hci_dev *hdev)
err = btintel_read_version(hdev, &ver);
if (err)
return err;
+
+ /* Apply the device specific HCI quirks
+ *
+ * All Legacy bootloader devices support WBS
+ */
+ set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+
+ /* Valid LE States quirk for JfP/ThP familiy */
+ if (ver.hw_variant == 0x11 || ver.hw_variant == 0x12)
+ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+
+ /* Setup MSFT Extension support */
+ btintel_set_msft_opcode(hdev, ver.hw_variant);
+
err = btintel_bootloader_setup(hdev, &ver);
break;
case 0x17:
@@ -2562,9 +2587,10 @@ static int btintel_shutdown_combined(struct hci_dev *hdev)
/* Some platforms have an issue with BT LED when the interface is
* down or BT radio is turned off, which takes 5 seconds to BT LED
- * goes off. This command turns off the BT LED immediately.
+ * goes off. As a workaround, sends HCI_Intel_SW_RFKILL to put the
+ * device in the RFKILL ON state which turns off the BT LED immediately.
*/
- if (btintel_test_flag(hdev, INTEL_BROKEN_LED)) {
+ if (btintel_test_flag(hdev, INTEL_BROKEN_SHUTDOWN_LED)) {
skb = __hci_cmd_sync(hdev, 0xfc3f, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
ret = PTR_ERR(skb);
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index e500c0d7a729..c9b24e9299e2 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -150,7 +150,7 @@ enum {
INTEL_FIRMWARE_FAILED,
INTEL_BOOTING,
INTEL_BROKEN_INITIAL_NCMD,
- INTEL_BROKEN_LED,
+ INTEL_BROKEN_SHUTDOWN_LED,
INTEL_ROM_LEGACY,
__INTEL_NUM_FLAGS,
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 5ccbe4d459d0..181338f60530 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -1,4 +1,4 @@
-/**
+/*
* Marvell Bluetooth driver
*
* Copyright (C) 2009, Marvell International Ltd.
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
new file mode 100644
index 000000000000..526dfdf1fe01
--- /dev/null
+++ b/drivers/bluetooth/btmtk.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2021 MediaTek Inc.
+ *
+ */
+#include <linux/module.h>
+#include <linux/firmware.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btmtk.h"
+
+#define VERSION "0.1"
+
+/* It is for mt79xx download rom patch*/
+#define MTK_FW_ROM_PATCH_HEADER_SIZE 32
+#define MTK_FW_ROM_PATCH_GD_SIZE 64
+#define MTK_FW_ROM_PATCH_SEC_MAP_SIZE 64
+#define MTK_SEC_MAP_COMMON_SIZE 12
+#define MTK_SEC_MAP_NEED_SEND_SIZE 52
+
+struct btmtk_patch_header {
+ u8 datetime[16];
+ u8 platform[4];
+ __le16 hwver;
+ __le16 swver;
+ __le32 magicnum;
+} __packed;
+
+struct btmtk_global_desc {
+ __le32 patch_ver;
+ __le32 sub_sys;
+ __le32 feature_opt;
+ __le32 section_num;
+} __packed;
+
+struct btmtk_section_map {
+ __le32 sectype;
+ __le32 secoffset;
+ __le32 secsize;
+ union {
+ __le32 u4SecSpec[13];
+ struct {
+ __le32 dlAddr;
+ __le32 dlsize;
+ __le32 seckeyidx;
+ __le32 alignlen;
+ __le32 sectype;
+ __le32 dlmodecrctype;
+ __le32 crc;
+ __le32 reserved[6];
+ } bin_info_spec;
+ };
+} __packed;
+
+int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname,
+ wmt_cmd_sync_func_t wmt_cmd_sync)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ struct btmtk_global_desc *globaldesc = NULL;
+ struct btmtk_section_map *sectionmap;
+ const struct firmware *fw;
+ const u8 *fw_ptr;
+ const u8 *fw_bin_ptr;
+ int err, dlen, i, status;
+ u8 flag, first_block, retry;
+ u32 section_num, dl_size, section_offset;
+ u8 cmd[64];
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
+ return err;
+ }
+
+ fw_ptr = fw->data;
+ fw_bin_ptr = fw_ptr;
+ globaldesc = (struct btmtk_global_desc *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE);
+ section_num = le32_to_cpu(globaldesc->section_num);
+
+ for (i = 0; i < section_num; i++) {
+ first_block = 1;
+ fw_ptr = fw_bin_ptr;
+ sectionmap = (struct btmtk_section_map *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE +
+ MTK_FW_ROM_PATCH_GD_SIZE + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i);
+
+ section_offset = le32_to_cpu(sectionmap->secoffset);
+ dl_size = le32_to_cpu(sectionmap->bin_info_spec.dlsize);
+
+ if (dl_size > 0) {
+ retry = 20;
+ while (retry > 0) {
+ cmd[0] = 0; /* 0 means legacy dl mode. */
+ memcpy(cmd + 1,
+ fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE +
+ MTK_FW_ROM_PATCH_GD_SIZE +
+ MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i +
+ MTK_SEC_MAP_COMMON_SIZE,
+ MTK_SEC_MAP_NEED_SEND_SIZE + 1);
+
+ wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
+ wmt_params.status = &status;
+ wmt_params.flag = 0;
+ wmt_params.dlen = MTK_SEC_MAP_NEED_SEND_SIZE + 1;
+ wmt_params.data = &cmd;
+
+ err = wmt_cmd_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
+ err);
+ goto err_release_fw;
+ }
+
+ if (status == BTMTK_WMT_PATCH_UNDONE) {
+ break;
+ } else if (status == BTMTK_WMT_PATCH_PROGRESS) {
+ msleep(100);
+ retry--;
+ } else if (status == BTMTK_WMT_PATCH_DONE) {
+ goto next_section;
+ } else {
+ bt_dev_err(hdev, "Failed wmt patch dwnld status (%d)",
+ status);
+ err = -EIO;
+ goto err_release_fw;
+ }
+ }
+
+ fw_ptr += section_offset;
+ wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
+ wmt_params.status = NULL;
+
+ while (dl_size > 0) {
+ dlen = min_t(int, 250, dl_size);
+ if (first_block == 1) {
+ flag = 1;
+ first_block = 0;
+ } else if (dl_size - dlen <= 0) {
+ flag = 3;
+ } else {
+ flag = 2;
+ }
+
+ wmt_params.flag = flag;
+ wmt_params.dlen = dlen;
+ wmt_params.data = fw_ptr;
+
+ err = wmt_cmd_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
+ err);
+ goto err_release_fw;
+ }
+
+ dl_size -= dlen;
+ fw_ptr += dlen;
+ }
+ }
+next_section:
+ continue;
+ }
+ /* Wait a few moments for firmware activation done */
+ usleep_range(100000, 120000);
+
+err_release_fw:
+ release_firmware(fw);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(btmtk_setup_firmware_79xx);
+
+int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname,
+ wmt_cmd_sync_func_t wmt_cmd_sync)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ const struct firmware *fw;
+ const u8 *fw_ptr;
+ size_t fw_size;
+ int err, dlen;
+ u8 flag, param;
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
+ return err;
+ }
+
+ /* Power on data RAM the firmware relies on. */
+ param = 1;
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 3;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = wmt_cmd_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
+ goto err_release_fw;
+ }
+
+ fw_ptr = fw->data;
+ fw_size = fw->size;
+
+ /* The size of patch header is 30 bytes, should be skip */
+ if (fw_size < 30) {
+ err = -EINVAL;
+ goto err_release_fw;
+ }
+
+ fw_size -= 30;
+ fw_ptr += 30;
+ flag = 1;
+
+ wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
+ wmt_params.status = NULL;
+
+ while (fw_size > 0) {
+ dlen = min_t(int, 250, fw_size);
+
+ /* Tell device the position in sequence */
+ if (fw_size - dlen <= 0)
+ flag = 3;
+ else if (fw_size < fw->size - 30)
+ flag = 2;
+
+ wmt_params.flag = flag;
+ wmt_params.dlen = dlen;
+ wmt_params.data = fw_ptr;
+
+ err = wmt_cmd_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
+ err);
+ goto err_release_fw;
+ }
+
+ fw_size -= dlen;
+ fw_ptr += dlen;
+ }
+
+ wmt_params.op = BTMTK_WMT_RST;
+ wmt_params.flag = 4;
+ wmt_params.dlen = 0;
+ wmt_params.data = NULL;
+ wmt_params.status = NULL;
+
+ /* Activate funciton the firmware providing to */
+ err = wmt_cmd_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
+ goto err_release_fw;
+ }
+
+ /* Wait a few moments for firmware activation done */
+ usleep_range(10000, 12000);
+
+err_release_fw:
+ release_firmware(fw);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(btmtk_setup_firmware);
+
+int btmtk_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ struct sk_buff *skb;
+ long ret;
+
+ skb = __hci_cmd_sync(hdev, 0xfc1a, 6, bdaddr, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ bt_dev_err(hdev, "changing Mediatek device address failed (%ld)",
+ ret);
+ return ret;
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btmtk_set_bdaddr);
+
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_AUTHOR("Mark Chen <mark-yw.chen@mediatek.com>");
+MODULE_DESCRIPTION("Bluetooth support for MediaTek devices ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FIRMWARE_MT7663);
+MODULE_FIRMWARE(FIRMWARE_MT7668);
+MODULE_FIRMWARE(FIRMWARE_MT7961);
diff --git a/drivers/bluetooth/btmtk.h b/drivers/bluetooth/btmtk.h
new file mode 100644
index 000000000000..6e7b0c7567c0
--- /dev/null
+++ b/drivers/bluetooth/btmtk.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2021 MediaTek Inc. */
+
+#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
+#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
+#define FIRMWARE_MT7961 "mediatek/BT_RAM_CODE_MT7961_1_2_hdr.bin"
+
+#define HCI_WMT_MAX_EVENT_SIZE 64
+
+#define BTMTK_WMT_REG_READ 0x2
+
+enum {
+ BTMTK_WMT_PATCH_DWNLD = 0x1,
+ BTMTK_WMT_TEST = 0x2,
+ BTMTK_WMT_WAKEUP = 0x3,
+ BTMTK_WMT_HIF = 0x4,
+ BTMTK_WMT_FUNC_CTRL = 0x6,
+ BTMTK_WMT_RST = 0x7,
+ BTMTK_WMT_REGISTER = 0x8,
+ BTMTK_WMT_SEMAPHORE = 0x17,
+};
+
+enum {
+ BTMTK_WMT_INVALID,
+ BTMTK_WMT_PATCH_UNDONE,
+ BTMTK_WMT_PATCH_PROGRESS,
+ BTMTK_WMT_PATCH_DONE,
+ BTMTK_WMT_ON_UNDONE,
+ BTMTK_WMT_ON_DONE,
+ BTMTK_WMT_ON_PROGRESS,
+};
+
+struct btmtk_wmt_hdr {
+ u8 dir;
+ u8 op;
+ __le16 dlen;
+ u8 flag;
+} __packed;
+
+struct btmtk_hci_wmt_cmd {
+ struct btmtk_wmt_hdr hdr;
+ u8 data[];
+} __packed;
+
+struct btmtk_hci_wmt_evt {
+ struct hci_event_hdr hhdr;
+ struct btmtk_wmt_hdr whdr;
+} __packed;
+
+struct btmtk_hci_wmt_evt_funcc {
+ struct btmtk_hci_wmt_evt hwhdr;
+ __be16 status;
+} __packed;
+
+struct btmtk_hci_wmt_evt_reg {
+ struct btmtk_hci_wmt_evt hwhdr;
+ u8 rsv[2];
+ u8 num;
+ __le32 addr;
+ __le32 val;
+} __packed;
+
+struct btmtk_tci_sleep {
+ u8 mode;
+ __le16 duration;
+ __le16 host_duration;
+ u8 host_wakeup_pin;
+ u8 time_compensation;
+} __packed;
+
+struct btmtk_hci_wmt_params {
+ u8 op;
+ u8 flag;
+ u16 dlen;
+ const void *data;
+ u32 *status;
+};
+
+typedef int (*wmt_cmd_sync_func_t)(struct hci_dev *,
+ struct btmtk_hci_wmt_params *);
+
+#if IS_ENABLED(CONFIG_BT_MTK)
+
+int btmtk_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+
+int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname,
+ wmt_cmd_sync_func_t wmt_cmd_sync);
+
+int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname,
+ wmt_cmd_sync_func_t wmt_cmd_sync);
+#else
+
+static inline int btmtk_set_bdaddr(struct hci_dev *hdev,
+ const bdaddr_t *bdaddr)
+{
+ return -EOPNOTSUPP;
+}
+
+static int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname,
+ wmt_cmd_sync_func_t wmt_cmd_sync)
+{
+ return -EOPNOTSUPP;
+}
+
+static int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname,
+ wmt_cmd_sync_func_t wmt_cmd_sync)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index 9872ef18f9fe..b5ea8d3bffaa 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -12,7 +12,6 @@
#include <asm/unaligned.h>
#include <linux/atomic.h>
-#include <linux/firmware.h>
#include <linux/init.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
@@ -28,26 +27,32 @@
#include <net/bluetooth/hci_core.h>
#include "h4_recv.h"
+#include "btmtk.h"
#define VERSION "0.1"
-#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
-#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
-
#define MTKBTSDIO_AUTOSUSPEND_DELAY 8000
static bool enable_autosuspend;
struct btmtksdio_data {
const char *fwname;
+ u16 chipid;
};
static const struct btmtksdio_data mt7663_data = {
.fwname = FIRMWARE_MT7663,
+ .chipid = 0x7663,
};
static const struct btmtksdio_data mt7668_data = {
.fwname = FIRMWARE_MT7668,
+ .chipid = 0x7668,
+};
+
+static const struct btmtksdio_data mt7921_data = {
+ .fwname = FIRMWARE_MT7961,
+ .chipid = 0x7921,
};
static const struct sdio_device_id btmtksdio_table[] = {
@@ -55,6 +60,8 @@ static const struct sdio_device_id btmtksdio_table[] = {
.driver_data = (kernel_ulong_t)&mt7663_data },
{SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7668),
.driver_data = (kernel_ulong_t)&mt7668_data },
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7961),
+ .driver_data = (kernel_ulong_t)&mt7921_data },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(sdio, btmtksdio_table);
@@ -86,28 +93,13 @@ MODULE_DEVICE_TABLE(sdio, btmtksdio_table);
#define MTK_REG_CRDR 0x1c
+#define MTK_REG_CRPLR 0x24
+
#define MTK_SDIO_BLOCK_SIZE 256
#define BTMTKSDIO_TX_WAIT_VND_EVT 1
-
-enum {
- MTK_WMT_PATCH_DWNLD = 0x1,
- MTK_WMT_TEST = 0x2,
- MTK_WMT_WAKEUP = 0x3,
- MTK_WMT_HIF = 0x4,
- MTK_WMT_FUNC_CTRL = 0x6,
- MTK_WMT_RST = 0x7,
- MTK_WMT_SEMAPHORE = 0x17,
-};
-
-enum {
- BTMTK_WMT_INVALID,
- BTMTK_WMT_PATCH_UNDONE,
- BTMTK_WMT_PATCH_DONE,
- BTMTK_WMT_ON_UNDONE,
- BTMTK_WMT_ON_DONE,
- BTMTK_WMT_ON_PROGRESS,
-};
+#define BTMTKSDIO_HW_TX_READY 2
+#define BTMTKSDIO_FUNC_ENABLED 3
struct mtkbtsdio_hdr {
__le16 len;
@@ -115,50 +107,12 @@ struct mtkbtsdio_hdr {
u8 bt_type;
} __packed;
-struct mtk_wmt_hdr {
- u8 dir;
- u8 op;
- __le16 dlen;
- u8 flag;
-} __packed;
-
-struct mtk_hci_wmt_cmd {
- struct mtk_wmt_hdr hdr;
- u8 data[256];
-} __packed;
-
-struct btmtk_hci_wmt_evt {
- struct hci_event_hdr hhdr;
- struct mtk_wmt_hdr whdr;
-} __packed;
-
-struct btmtk_hci_wmt_evt_funcc {
- struct btmtk_hci_wmt_evt hwhdr;
- __be16 status;
-} __packed;
-
-struct btmtk_tci_sleep {
- u8 mode;
- __le16 duration;
- __le16 host_duration;
- u8 host_wakeup_pin;
- u8 time_compensation;
-} __packed;
-
-struct btmtk_hci_wmt_params {
- u8 op;
- u8 flag;
- u16 dlen;
- const void *data;
- u32 *status;
-};
-
struct btmtksdio_dev {
struct hci_dev *hdev;
struct sdio_func *func;
struct device *dev;
- struct work_struct tx_work;
+ struct work_struct txrx_work;
unsigned long tx_state;
struct sk_buff_head txq;
@@ -172,29 +126,35 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
+ struct btmtk_hci_wmt_evt_reg *wmt_evt_reg;
u32 hlen, status = BTMTK_WMT_INVALID;
struct btmtk_hci_wmt_evt *wmt_evt;
- struct mtk_hci_wmt_cmd wc;
- struct mtk_wmt_hdr *hdr;
+ struct btmtk_hci_wmt_cmd *wc;
+ struct btmtk_wmt_hdr *hdr;
int err;
+ /* Send the WMT command and wait until the WMT event returns */
hlen = sizeof(*hdr) + wmt_params->dlen;
if (hlen > 255)
return -EINVAL;
- hdr = (struct mtk_wmt_hdr *)&wc;
+ wc = kzalloc(hlen, GFP_KERNEL);
+ if (!wc)
+ return -ENOMEM;
+
+ hdr = &wc->hdr;
hdr->dir = 1;
hdr->op = wmt_params->op;
hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
hdr->flag = wmt_params->flag;
- memcpy(wc.data, wmt_params->data, wmt_params->dlen);
+ memcpy(wc->data, wmt_params->data, wmt_params->dlen);
set_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
- err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
+ err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
if (err < 0) {
clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
- return err;
+ goto err_free_wc;
}
/* The vendor specific WMT commands are all answered by a vendor
@@ -211,13 +171,14 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
if (err == -EINTR) {
bt_dev_err(hdev, "Execution of wmt command interrupted");
clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
- return err;
+ goto err_free_wc;
}
if (err) {
bt_dev_err(hdev, "Execution of wmt command timed out");
clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
- return -ETIMEDOUT;
+ err = -ETIMEDOUT;
+ goto err_free_wc;
}
/* Parse and handle the return WMT event */
@@ -230,13 +191,13 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
}
switch (wmt_evt->whdr.op) {
- case MTK_WMT_SEMAPHORE:
+ case BTMTK_WMT_SEMAPHORE:
if (wmt_evt->whdr.flag == 2)
status = BTMTK_WMT_PATCH_UNDONE;
else
status = BTMTK_WMT_PATCH_DONE;
break;
- case MTK_WMT_FUNC_CTRL:
+ case BTMTK_WMT_FUNC_CTRL:
wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
status = BTMTK_WMT_ON_DONE;
@@ -245,6 +206,19 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
else
status = BTMTK_WMT_ON_UNDONE;
break;
+ case BTMTK_WMT_PATCH_DWNLD:
+ if (wmt_evt->whdr.flag == 2)
+ status = BTMTK_WMT_PATCH_DONE;
+ else if (wmt_evt->whdr.flag == 1)
+ status = BTMTK_WMT_PATCH_PROGRESS;
+ else
+ status = BTMTK_WMT_PATCH_UNDONE;
+ break;
+ case BTMTK_WMT_REGISTER:
+ wmt_evt_reg = (struct btmtk_hci_wmt_evt_reg *)wmt_evt;
+ if (le16_to_cpu(wmt_evt->whdr.dlen) == 12)
+ status = le32_to_cpu(wmt_evt_reg->val);
+ break;
}
if (wmt_params->status)
@@ -253,6 +227,8 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
err_free_skb:
kfree_skb(bdev->evt_skb);
bdev->evt_skb = NULL;
+err_free_wc:
+ kfree(wc);
return err;
}
@@ -279,6 +255,7 @@ static int btmtksdio_tx_packet(struct btmtksdio_dev *bdev,
sdio_hdr->reserved = cpu_to_le16(0);
sdio_hdr->bt_type = hci_skb_pkt_type(skb);
+ clear_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state);
err = sdio_writesb(bdev->func, MTK_REG_CTDR, skb->data,
round_up(skb->len, MTK_SDIO_BLOCK_SIZE));
if (err < 0)
@@ -301,32 +278,6 @@ static u32 btmtksdio_drv_own_query(struct btmtksdio_dev *bdev)
return sdio_readl(bdev->func, MTK_REG_CHLPCR, NULL);
}
-static void btmtksdio_tx_work(struct work_struct *work)
-{
- struct btmtksdio_dev *bdev = container_of(work, struct btmtksdio_dev,
- tx_work);
- struct sk_buff *skb;
- int err;
-
- pm_runtime_get_sync(bdev->dev);
-
- sdio_claim_host(bdev->func);
-
- while ((skb = skb_dequeue(&bdev->txq))) {
- err = btmtksdio_tx_packet(bdev, skb);
- if (err < 0) {
- bdev->hdev->stat.err_tx++;
- skb_queue_head(&bdev->txq, skb);
- break;
- }
- }
-
- sdio_release_host(bdev->func);
-
- pm_runtime_mark_last_busy(bdev->dev);
- pm_runtime_put_autosuspend(bdev->dev);
-}
-
static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
@@ -374,8 +325,29 @@ err_out:
return err;
}
+static int btmtksdio_recv_acl(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle);
+
+ switch (handle) {
+ case 0xfc6f:
+ /* Firmware dump from device: when the firmware hangs, the
+ * device can no longer suspend and thus disable auto-suspend.
+ */
+ pm_runtime_forbid(bdev->dev);
+ fallthrough;
+ case 0x05ff:
+ case 0x05fe:
+ /* Firmware debug logging */
+ return hci_recv_diag(hdev, skb);
+ }
+
+ return hci_recv_frame(hdev, skb);
+}
+
static const struct h4_recv_pkt mtk_recv_pkts[] = {
- { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_ACL, .recv = btmtksdio_recv_acl },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = btmtksdio_recv_event },
};
@@ -477,70 +449,90 @@ err_kfree_skb:
return err;
}
-static void btmtksdio_interrupt(struct sdio_func *func)
+static void btmtksdio_txrx_work(struct work_struct *work)
{
- struct btmtksdio_dev *bdev = sdio_get_drvdata(func);
- u32 int_status;
- u16 rx_size;
-
- /* It is required that the host gets ownership from the device before
- * accessing any register, however, if SDIO host is not being released,
- * a potential deadlock probably happens in a circular wait between SDIO
- * IRQ work and PM runtime work. So, we have to explicitly release SDIO
- * host here and claim again after the PM runtime work is all done.
- */
- sdio_release_host(bdev->func);
+ struct btmtksdio_dev *bdev = container_of(work, struct btmtksdio_dev,
+ txrx_work);
+ unsigned long txrx_timeout;
+ u32 int_status, rx_size;
+ struct sk_buff *skb;
+ int err;
pm_runtime_get_sync(bdev->dev);
sdio_claim_host(bdev->func);
/* Disable interrupt */
- sdio_writel(func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
-
- int_status = sdio_readl(func, MTK_REG_CHISR, NULL);
-
- /* Ack an interrupt as soon as possible before any operation on
- * hardware.
- *
- * Note that we don't ack any status during operations to avoid race
- * condition between the host and the device such as it's possible to
- * mistakenly ack RX_DONE for the next packet and then cause interrupts
- * not be raised again but there is still pending data in the hardware
- * FIFO.
- */
- sdio_writel(func, int_status, MTK_REG_CHISR, NULL);
+ sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, 0);
+
+ txrx_timeout = jiffies + 5 * HZ;
+
+ do {
+ int_status = sdio_readl(bdev->func, MTK_REG_CHISR, NULL);
+
+ /* Ack an interrupt as soon as possible before any operation on
+ * hardware.
+ *
+ * Note that we don't ack any status during operations to avoid race
+ * condition between the host and the device such as it's possible to
+ * mistakenly ack RX_DONE for the next packet and then cause interrupts
+ * not be raised again but there is still pending data in the hardware
+ * FIFO.
+ */
+ sdio_writel(bdev->func, int_status, MTK_REG_CHISR, NULL);
+
+ if (int_status & FW_OWN_BACK_INT)
+ bt_dev_dbg(bdev->hdev, "Get fw own back");
+
+ if (int_status & TX_EMPTY)
+ set_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state);
+
+ else if (unlikely(int_status & TX_FIFO_OVERFLOW))
+ bt_dev_warn(bdev->hdev, "Tx fifo overflow");
+
+ if (test_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state)) {
+ skb = skb_dequeue(&bdev->txq);
+ if (skb) {
+ err = btmtksdio_tx_packet(bdev, skb);
+ if (err < 0) {
+ bdev->hdev->stat.err_tx++;
+ skb_queue_head(&bdev->txq, skb);
+ }
+ }
+ }
- if (unlikely(!int_status))
- bt_dev_err(bdev->hdev, "CHISR is 0");
+ if (int_status & RX_DONE_INT) {
+ rx_size = sdio_readl(bdev->func, MTK_REG_CRPLR, NULL);
+ rx_size = (rx_size & RX_PKT_LEN) >> 16;
+ if (btmtksdio_rx_packet(bdev, rx_size) < 0)
+ bdev->hdev->stat.err_rx++;
+ }
+ } while (int_status || time_is_before_jiffies(txrx_timeout));
- if (int_status & FW_OWN_BACK_INT)
- bt_dev_dbg(bdev->hdev, "Get fw own back");
+ /* Enable interrupt */
+ sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, 0);
- if (int_status & TX_EMPTY)
- schedule_work(&bdev->tx_work);
- else if (unlikely(int_status & TX_FIFO_OVERFLOW))
- bt_dev_warn(bdev->hdev, "Tx fifo overflow");
+ sdio_release_host(bdev->func);
- if (int_status & RX_DONE_INT) {
- rx_size = (int_status & RX_PKT_LEN) >> 16;
+ pm_runtime_mark_last_busy(bdev->dev);
+ pm_runtime_put_autosuspend(bdev->dev);
+}
- if (btmtksdio_rx_packet(bdev, rx_size) < 0)
- bdev->hdev->stat.err_rx++;
- }
+static void btmtksdio_interrupt(struct sdio_func *func)
+{
+ struct btmtksdio_dev *bdev = sdio_get_drvdata(func);
- /* Enable interrupt */
- sdio_writel(func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL);
+ /* Disable interrupt */
+ sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, 0);
- pm_runtime_mark_last_busy(bdev->dev);
- pm_runtime_put_autosuspend(bdev->dev);
+ schedule_work(&bdev->txrx_work);
}
static int btmtksdio_open(struct hci_dev *hdev)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ u32 status, val;
int err;
- u32 status;
sdio_claim_host(bdev->func);
@@ -548,6 +540,8 @@ static int btmtksdio_open(struct hci_dev *hdev)
if (err < 0)
goto err_release_host;
+ set_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state);
+
/* Get ownership from the device */
sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
if (err < 0)
@@ -580,13 +574,22 @@ static int btmtksdio_open(struct hci_dev *hdev)
/* SDIO CMD 5 allows the SDIO device back to idle state an
* synchronous interrupt is supported in SDIO 4-bit mode
*/
- sdio_writel(bdev->func, SDIO_INT_CTL | SDIO_RE_INIT_EN,
- MTK_REG_CSDIOCSR, &err);
+ val = sdio_readl(bdev->func, MTK_REG_CSDIOCSR, &err);
+ if (err < 0)
+ goto err_release_irq;
+
+ val |= SDIO_INT_CTL;
+ sdio_writel(bdev->func, val, MTK_REG_CSDIOCSR, &err);
if (err < 0)
goto err_release_irq;
- /* Setup write-1-clear for CHISR register */
- sdio_writel(bdev->func, C_INT_CLR_CTRL, MTK_REG_CHCR, &err);
+ /* Explitly set write-1-clear method */
+ val = sdio_readl(bdev->func, MTK_REG_CHCR, &err);
+ if (err < 0)
+ goto err_release_irq;
+
+ val |= C_INT_CLR_CTRL;
+ sdio_writel(bdev->func, val, MTK_REG_CHCR, &err);
if (err < 0)
goto err_release_irq;
@@ -630,6 +633,8 @@ static int btmtksdio_close(struct hci_dev *hdev)
sdio_release_irq(bdev->func);
+ cancel_work_sync(&bdev->txrx_work);
+
/* Return ownership to the device */
sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, NULL);
@@ -638,6 +643,7 @@ static int btmtksdio_close(struct hci_dev *hdev)
if (err < 0)
bt_dev_err(bdev->hdev, "Cannot return ownership to device");
+ clear_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state);
sdio_disable_func(bdev->func);
sdio_release_host(bdev->func);
@@ -651,7 +657,7 @@ static int btmtksdio_flush(struct hci_dev *hdev)
skb_queue_purge(&bdev->txq);
- cancel_work_sync(&bdev->tx_work);
+ cancel_work_sync(&bdev->txrx_work);
return 0;
}
@@ -663,7 +669,7 @@ static int btmtksdio_func_query(struct hci_dev *hdev)
u8 param = 0;
/* Query whether the function is enabled */
- wmt_params.op = MTK_WMT_FUNC_CTRL;
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 4;
wmt_params.dlen = sizeof(param);
wmt_params.data = &param;
@@ -678,111 +684,16 @@ static int btmtksdio_func_query(struct hci_dev *hdev)
return status;
}
-static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
-{
- struct btmtk_hci_wmt_params wmt_params;
- const struct firmware *fw;
- const u8 *fw_ptr;
- size_t fw_size;
- int err, dlen;
- u8 flag, param;
-
- err = request_firmware(&fw, fwname, &hdev->dev);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
- return err;
- }
-
- /* Power on data RAM the firmware relies on. */
- param = 1;
- wmt_params.op = MTK_WMT_FUNC_CTRL;
- wmt_params.flag = 3;
- wmt_params.dlen = sizeof(param);
- wmt_params.data = &param;
- wmt_params.status = NULL;
-
- err = mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
- goto free_fw;
- }
-
- fw_ptr = fw->data;
- fw_size = fw->size;
-
- /* The size of patch header is 30 bytes, should be skip */
- if (fw_size < 30) {
- err = -EINVAL;
- goto free_fw;
- }
-
- fw_size -= 30;
- fw_ptr += 30;
- flag = 1;
-
- wmt_params.op = MTK_WMT_PATCH_DWNLD;
- wmt_params.status = NULL;
-
- while (fw_size > 0) {
- dlen = min_t(int, 250, fw_size);
-
- /* Tell device the position in sequence */
- if (fw_size - dlen <= 0)
- flag = 3;
- else if (fw_size < fw->size - 30)
- flag = 2;
-
- wmt_params.flag = flag;
- wmt_params.dlen = dlen;
- wmt_params.data = fw_ptr;
-
- err = mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
- err);
- goto free_fw;
- }
-
- fw_size -= dlen;
- fw_ptr += dlen;
- }
-
- wmt_params.op = MTK_WMT_RST;
- wmt_params.flag = 4;
- wmt_params.dlen = 0;
- wmt_params.data = NULL;
- wmt_params.status = NULL;
-
- /* Activate funciton the firmware providing to */
- err = mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
- goto free_fw;
- }
-
- /* Wait a few moments for firmware activation done */
- usleep_range(10000, 12000);
-
-free_fw:
- release_firmware(fw);
- return err;
-}
-
-static int btmtksdio_setup(struct hci_dev *hdev)
+static int mt76xx_setup(struct hci_dev *hdev, const char *fwname)
{
- struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
struct btmtk_hci_wmt_params wmt_params;
- ktime_t calltime, delta, rettime;
struct btmtk_tci_sleep tci_sleep;
- unsigned long long duration;
struct sk_buff *skb;
int err, status;
u8 param = 0x1;
- calltime = ktime_get();
-
/* Query whether the firmware is already download */
- wmt_params.op = MTK_WMT_SEMAPHORE;
+ wmt_params.op = BTMTK_WMT_SEMAPHORE;
wmt_params.flag = 1;
wmt_params.dlen = 0;
wmt_params.data = NULL;
@@ -800,7 +711,7 @@ static int btmtksdio_setup(struct hci_dev *hdev)
}
/* Setup a firmware which the device definitely requires */
- err = mtk_setup_firmware(hdev, bdev->data->fwname);
+ err = btmtk_setup_firmware(hdev, fwname, mtk_hci_wmt_sync);
if (err < 0)
return err;
@@ -823,7 +734,7 @@ ignore_setup_fw:
}
/* Enable Bluetooth protocol */
- wmt_params.op = MTK_WMT_FUNC_CTRL;
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 0;
wmt_params.dlen = sizeof(param);
wmt_params.data = &param;
@@ -852,6 +763,116 @@ ignore_func_on:
}
kfree_skb(skb);
+ return 0;
+}
+
+static int mt79xx_setup(struct hci_dev *hdev, const char *fwname)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ u8 param = 0x1;
+ int err;
+
+ err = btmtk_setup_firmware_79xx(hdev, fwname, mtk_hci_wmt_sync);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to setup 79xx firmware (%d)", err);
+ return err;
+ }
+
+ /* Enable Bluetooth protocol */
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 0;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+
+ hci_set_msft_opcode(hdev, 0xFD30);
+ hci_set_aosp_capable(hdev);
+
+ return err;
+}
+
+static int btsdio_mtk_reg_read(struct hci_dev *hdev, u32 reg, u32 *val)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ struct reg_read_cmd {
+ u8 type;
+ u8 rsv;
+ u8 num;
+ __le32 addr;
+ } __packed reg_read = {
+ .type = 1,
+ .num = 1,
+ };
+ u32 status;
+ int err;
+
+ reg_read.addr = cpu_to_le32(reg);
+ wmt_params.op = BTMTK_WMT_REGISTER;
+ wmt_params.flag = BTMTK_WMT_REG_READ;
+ wmt_params.dlen = sizeof(reg_read);
+ wmt_params.data = &reg_read;
+ wmt_params.status = &status;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to read reg(%d)", err);
+ return err;
+ }
+
+ *val = status;
+
+ return err;
+}
+
+static int btmtksdio_setup(struct hci_dev *hdev)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ ktime_t calltime, delta, rettime;
+ unsigned long long duration;
+ char fwname[64];
+ int err, dev_id;
+ u32 fw_version = 0;
+
+ calltime = ktime_get();
+ set_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state);
+
+ switch (bdev->data->chipid) {
+ case 0x7921:
+ err = btsdio_mtk_reg_read(hdev, 0x70010200, &dev_id);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to get device id (%d)", err);
+ return err;
+ }
+
+ err = btsdio_mtk_reg_read(hdev, 0x80021004, &fw_version);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to get fw version (%d)", err);
+ return err;
+ }
+
+ snprintf(fwname, sizeof(fwname),
+ "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
+ dev_id & 0xffff, (fw_version & 0xff) + 1);
+ err = mt79xx_setup(hdev, fwname);
+ if (err < 0)
+ return err;
+ break;
+ case 0x7663:
+ case 0x7668:
+ err = mt76xx_setup(hdev, bdev->data->fwname);
+ if (err < 0)
+ return err;
+ break;
+ default:
+ return -ENODEV;
+ }
+
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
duration = (unsigned long long)ktime_to_ns(delta) >> 10;
@@ -891,7 +912,7 @@ static int btmtksdio_shutdown(struct hci_dev *hdev)
pm_runtime_get_sync(bdev->dev);
/* Disable the device */
- wmt_params.op = MTK_WMT_FUNC_CTRL;
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 0;
wmt_params.dlen = sizeof(param);
wmt_params.data = &param;
@@ -932,7 +953,7 @@ static int btmtksdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb_queue_tail(&bdev->txq, skb);
- schedule_work(&bdev->tx_work);
+ schedule_work(&bdev->txrx_work);
return 0;
}
@@ -955,7 +976,7 @@ static int btmtksdio_probe(struct sdio_func *func,
bdev->dev = &func->dev;
bdev->func = func;
- INIT_WORK(&bdev->tx_work, btmtksdio_tx_work);
+ INIT_WORK(&bdev->txrx_work, btmtksdio_txrx_work);
skb_queue_head_init(&bdev->txq);
/* Initialize and register HCI device */
@@ -976,6 +997,8 @@ static int btmtksdio_probe(struct sdio_func *func,
hdev->setup = btmtksdio_setup;
hdev->shutdown = btmtksdio_shutdown;
hdev->send = btmtksdio_send_frame;
+ hdev->set_bdaddr = btmtk_set_bdaddr;
+
SET_HCIDEV_DEV(hdev, &func->dev);
hdev->manufacturer = 70;
@@ -1042,6 +1065,11 @@ static int btmtksdio_runtime_suspend(struct device *dev)
if (!bdev)
return 0;
+ if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
+ return 0;
+
+ sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+
sdio_claim_host(bdev->func);
sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err);
@@ -1069,6 +1097,9 @@ static int btmtksdio_runtime_resume(struct device *dev)
if (!bdev)
return 0;
+ if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
+ return 0;
+
sdio_claim_host(bdev->func);
sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
@@ -1112,5 +1143,3 @@ MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_DESCRIPTION("MediaTek Bluetooth SDIO driver ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(FIRMWARE_MT7663);
-MODULE_FIRMWARE(FIRMWARE_MT7668);
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index be04d74037d2..c9064d34d830 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -6,6 +6,7 @@
*/
#include <linux/module.h>
#include <linux/firmware.h>
+#include <linux/vmalloc.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -141,6 +142,50 @@ out:
return err;
}
+static int qca_send_patch_config_cmd(struct hci_dev *hdev)
+{
+ const u8 cmd[] = { EDL_PATCH_CONFIG_CMD, 0x01, 0, 0, 0 };
+ struct sk_buff *skb;
+ struct edl_event_hdr *edl;
+ int err;
+
+ bt_dev_dbg(hdev, "QCA Patch config");
+
+ skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, sizeof(cmd),
+ cmd, HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Sending QCA Patch config failed (%d)", err);
+ return err;
+ }
+
+ if (skb->len != 2) {
+ bt_dev_err(hdev, "QCA Patch config cmd size mismatch len %d", skb->len);
+ err = -EILSEQ;
+ goto out;
+ }
+
+ edl = (struct edl_event_hdr *)(skb->data);
+ if (!edl) {
+ bt_dev_err(hdev, "QCA Patch config with no header");
+ err = -EILSEQ;
+ goto out;
+ }
+
+ if (edl->cresp != EDL_PATCH_CONFIG_RES_EVT || edl->rtype != EDL_PATCH_CONFIG_CMD) {
+ bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp,
+ edl->rtype);
+ err = -EIO;
+ goto out;
+ }
+
+ err = 0;
+
+out:
+ kfree_skb(skb);
+ return err;
+}
+
static int qca_send_reset(struct hci_dev *hdev)
{
struct sk_buff *skb;
@@ -551,6 +596,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
*/
rom_ver = ((soc_ver & 0x00000f00) >> 0x04) | (soc_ver & 0x0000000f);
+ if (soc_type == QCA_WCN6750)
+ qca_send_patch_config_cmd(hdev);
+
/* Download rampatch file */
config.type = TLV_TYPE_PATCH;
if (qca_is_wcn399x(soc_type)) {
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index 30afa7703afd..61e9a50e66ae 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -13,6 +13,7 @@
#define EDL_PATCH_TLV_REQ_CMD (0x1E)
#define EDL_GET_BUILD_INFO_CMD (0x20)
#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
+#define EDL_PATCH_CONFIG_CMD (0x28)
#define MAX_SIZE_PER_TLV_SEGMENT (243)
#define QCA_PRE_SHUTDOWN_CMD (0xFC08)
#define QCA_DISABLE_LOGGING (0xFC17)
@@ -24,6 +25,7 @@
#define EDL_CMD_EXE_STATUS_EVT (0x00)
#define EDL_SET_BAUDRATE_RSP_EVT (0x92)
#define EDL_NVM_ACCESS_CODE_EVT (0x0B)
+#define EDL_PATCH_CONFIG_RES_EVT (0x00)
#define QCA_DISABLE_LOGGING_SUB_OP (0x14)
#define EDL_TAG_ID_HCI (17)
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 199e8f7d426d..795be33f2892 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -295,6 +295,8 @@ static int btsdio_probe(struct sdio_func *func,
switch (func->device) {
case SDIO_DEVICE_ID_BROADCOM_43341:
case SDIO_DEVICE_ID_BROADCOM_43430:
+ case SDIO_DEVICE_ID_BROADCOM_4345:
+ case SDIO_DEVICE_ID_BROADCOM_43455:
case SDIO_DEVICE_ID_BROADCOM_4356:
return -ENODEV;
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 75c83768c257..c30d131da784 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -16,6 +16,7 @@
#include <linux/of_irq.h>
#include <linux/suspend.h>
#include <linux/gpio/consumer.h>
+#include <linux/debugfs.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -24,13 +25,13 @@
#include "btintel.h"
#include "btbcm.h"
#include "btrtl.h"
+#include "btmtk.h"
#define VERSION "0.8"
static bool disable_scofix;
static bool force_scofix;
static bool enable_autosuspend = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTOSUSPEND);
-
static bool reset = true;
static struct usb_driver btusb_driver;
@@ -59,6 +60,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_WIDEBAND_SPEECH 0x400000
#define BTUSB_VALID_LE_STATES 0x800000
#define BTUSB_QCA_WCN6855 0x1000000
+#define BTUSB_INTEL_BROKEN_SHUTDOWN_LED 0x2000000
#define BTUSB_INTEL_BROKEN_INITIAL_NCMD 0x4000000
static const struct usb_device_id btusb_table[] = {
@@ -295,6 +297,24 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0cf3, 0xe600), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0489, 0xe0cc), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0489, 0xe0d6), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0489, 0xe0e3), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x10ab, 0x9309), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x10ab, 0x9409), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0489, 0xe0d0), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
@@ -365,10 +385,13 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x8087, 0x0033), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL_COMBINED |
- BTUSB_INTEL_BROKEN_INITIAL_NCMD },
- { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL_COMBINED },
+ BTUSB_INTEL_BROKEN_INITIAL_NCMD |
+ BTUSB_INTEL_BROKEN_SHUTDOWN_LED },
+ { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL_COMBINED |
+ BTUSB_INTEL_BROKEN_SHUTDOWN_LED },
{ USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_COMBINED },
- { USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL_COMBINED },
+ { USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL_COMBINED |
+ BTUSB_INTEL_BROKEN_SHUTDOWN_LED },
{ USB_DEVICE(0x8087, 0x0aaa), .driver_info = BTUSB_INTEL_COMBINED },
/* Other Intel Bluetooth devices */
@@ -384,6 +407,8 @@ static const struct usb_device_id blacklist_table[] = {
/* Realtek 8852AE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0bda, 0x385a), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x4852), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04c5, 0x165c), .driver_info = BTUSB_REALTEK |
@@ -423,6 +448,14 @@ static const struct usb_device_id blacklist_table[] = {
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ /* MediaTek MT7922A Bluetooth devices */
+ { USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0489, 0xe0d9), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3394), .driver_info = BTUSB_REALTEK },
@@ -549,8 +582,13 @@ struct btusb_data {
unsigned long flags;
- struct work_struct work;
- struct work_struct waker;
+ bool poll_sync;
+ int intr_interval;
+ struct work_struct work;
+ struct work_struct waker;
+ struct delayed_work rx_work;
+
+ struct sk_buff_head acl_q;
struct usb_anchor deferred;
struct usb_anchor tx_anchor;
@@ -715,6 +753,16 @@ static inline void btusb_free_frags(struct btusb_data *data)
spin_unlock_irqrestore(&data->rxlock, flags);
}
+static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
+{
+ if (data->intr_interval) {
+ /* Trigger dequeue immediatelly if an event is received */
+ schedule_delayed_work(&data->rx_work, 0);
+ }
+
+ return data->recv_event(data->hdev, skb);
+}
+
static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
{
struct sk_buff *skb;
@@ -760,7 +808,7 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
if (!hci_skb_expect(skb)) {
/* Complete frame */
- data->recv_event(data->hdev, skb);
+ btusb_recv_event(data, skb);
skb = NULL;
}
}
@@ -771,6 +819,20 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
return err;
}
+static int btusb_recv_acl(struct btusb_data *data, struct sk_buff *skb)
+{
+ /* Only queue ACL packet if intr_interval is set as it means
+ * force_poll_sync has been enabled.
+ */
+ if (!data->intr_interval)
+ return data->recv_acl(data->hdev, skb);
+
+ skb_queue_tail(&data->acl_q, skb);
+ schedule_delayed_work(&data->rx_work, data->intr_interval);
+
+ return 0;
+}
+
static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
{
struct sk_buff *skb;
@@ -818,7 +880,7 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
if (!hci_skb_expect(skb)) {
/* Complete frame */
- data->recv_acl(data->hdev, skb);
+ btusb_recv_acl(data, skb);
skb = NULL;
}
}
@@ -924,6 +986,8 @@ static void btusb_intr_complete(struct urb *urb)
if (err != -EPERM && err != -ENODEV)
bt_dev_err(hdev, "urb %p failed to resubmit (%d)",
urb, -err);
+ if (err != -EPERM)
+ hci_cmd_sync_cancel(hdev, -err);
usb_unanchor_urb(urb);
}
}
@@ -967,9 +1031,33 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
if (err != -EPERM && err != -ENODEV)
bt_dev_err(hdev, "urb %p submission failed (%d)",
urb, -err);
+ if (err != -EPERM)
+ hci_cmd_sync_cancel(hdev, -err);
usb_unanchor_urb(urb);
}
+ /* Only initialize intr_interval if URB poll sync is enabled */
+ if (!data->poll_sync)
+ goto done;
+
+ /* The units are frames (milliseconds) for full and low speed devices,
+ * and microframes (1/8 millisecond) for highspeed and SuperSpeed
+ * devices.
+ *
+ * This is done once on open/resume so it shouldn't change even if
+ * force_poll_sync changes.
+ */
+ switch (urb->dev->speed) {
+ case USB_SPEED_SUPER_PLUS:
+ case USB_SPEED_SUPER: /* units are 125us */
+ data->intr_interval = usecs_to_jiffies(urb->interval * 125);
+ break;
+ default:
+ data->intr_interval = msecs_to_jiffies(urb->interval);
+ break;
+ }
+
+done:
usb_free_urb(urb);
return err;
@@ -1322,10 +1410,13 @@ static void btusb_tx_complete(struct urb *urb)
if (!test_bit(HCI_RUNNING, &hdev->flags))
goto done;
- if (!urb->status)
+ if (!urb->status) {
hdev->stat.byte_tx += urb->transfer_buffer_length;
- else
+ } else {
+ if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT)
+ hci_cmd_sync_cancel(hdev, -urb->status);
hdev->stat.err_tx++;
+ }
done:
spin_lock_irqsave(&data->txlock, flags);
@@ -1429,9 +1520,12 @@ static int btusb_close(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
+ cancel_delayed_work(&data->rx_work);
cancel_work_sync(&data->work);
cancel_work_sync(&data->waker);
+ skb_queue_purge(&data->acl_q);
+
clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
clear_bit(BTUSB_BULK_RUNNING, &data->flags);
clear_bit(BTUSB_INTR_RUNNING, &data->flags);
@@ -1463,6 +1557,10 @@ static int btusb_flush(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
+ cancel_delayed_work(&data->rx_work);
+
+ skb_queue_purge(&data->acl_q);
+
usb_kill_anchored_urbs(&data->tx_anchor);
btusb_free_frags(data);
@@ -1826,6 +1924,17 @@ static void btusb_waker(struct work_struct *work)
usb_autopm_put_interface(data->intf);
}
+static void btusb_rx_work(struct work_struct *work)
+{
+ struct btusb_data *data = container_of(work, struct btusb_data,
+ rx_work.work);
+ struct sk_buff *skb;
+
+ /* Dequeue ACL data received during the interval */
+ while ((skb = skb_dequeue(&data->acl_q)))
+ data->recv_acl(data->hdev, skb);
+}
+
static int btusb_setup_bcm92035(struct hci_dev *hdev)
{
struct sk_buff *skb;
@@ -2131,122 +2240,6 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
#define MTK_BT_RST_DONE 0x00000100
#define MTK_BT_RESET_WAIT_MS 100
#define MTK_BT_RESET_NUM_TRIES 10
-#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
-#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
-
-#define HCI_WMT_MAX_EVENT_SIZE 64
-/* It is for mt79xx download rom patch*/
-#define MTK_FW_ROM_PATCH_HEADER_SIZE 32
-#define MTK_FW_ROM_PATCH_GD_SIZE 64
-#define MTK_FW_ROM_PATCH_SEC_MAP_SIZE 64
-#define MTK_SEC_MAP_COMMON_SIZE 12
-#define MTK_SEC_MAP_NEED_SEND_SIZE 52
-
-enum {
- BTMTK_WMT_PATCH_DWNLD = 0x1,
- BTMTK_WMT_FUNC_CTRL = 0x6,
- BTMTK_WMT_RST = 0x7,
- BTMTK_WMT_SEMAPHORE = 0x17,
-};
-
-enum {
- BTMTK_WMT_INVALID,
- BTMTK_WMT_PATCH_UNDONE,
- BTMTK_WMT_PATCH_PROGRESS,
- BTMTK_WMT_PATCH_DONE,
- BTMTK_WMT_ON_UNDONE,
- BTMTK_WMT_ON_DONE,
- BTMTK_WMT_ON_PROGRESS,
-};
-
-struct btmtk_wmt_hdr {
- u8 dir;
- u8 op;
- __le16 dlen;
- u8 flag;
-} __packed;
-
-struct btmtk_hci_wmt_cmd {
- struct btmtk_wmt_hdr hdr;
- u8 data[];
-} __packed;
-
-struct btmtk_hci_wmt_evt {
- struct hci_event_hdr hhdr;
- struct btmtk_wmt_hdr whdr;
-} __packed;
-
-struct btmtk_hci_wmt_evt_funcc {
- struct btmtk_hci_wmt_evt hwhdr;
- __be16 status;
-} __packed;
-
-struct btmtk_tci_sleep {
- u8 mode;
- __le16 duration;
- __le16 host_duration;
- u8 host_wakeup_pin;
- u8 time_compensation;
-} __packed;
-
-struct btmtk_hci_wmt_params {
- u8 op;
- u8 flag;
- u16 dlen;
- const void *data;
- u32 *status;
-};
-
-struct btmtk_patch_header {
- u8 datetime[16];
- u8 platform[4];
- __le16 hwver;
- __le16 swver;
- __le32 magicnum;
-} __packed;
-
-struct btmtk_global_desc {
- __le32 patch_ver;
- __le32 sub_sys;
- __le32 feature_opt;
- __le32 section_num;
-} __packed;
-
-struct btmtk_section_map {
- __le32 sectype;
- __le32 secoffset;
- __le32 secsize;
- union {
- __le32 u4SecSpec[13];
- struct {
- __le32 dlAddr;
- __le32 dlsize;
- __le32 seckeyidx;
- __le32 alignlen;
- __le32 sectype;
- __le32 dlmodecrctype;
- __le32 crc;
- __le32 reserved[6];
- } bin_info_spec;
- };
-} __packed;
-
-static int btusb_set_bdaddr_mtk(struct hci_dev *hdev, const bdaddr_t *bdaddr)
-{
- struct sk_buff *skb;
- long ret;
-
- skb = __hci_cmd_sync(hdev, 0xfc1a, sizeof(bdaddr), bdaddr, HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- ret = PTR_ERR(skb);
- bt_dev_err(hdev, "changing Mediatek device address failed (%ld)",
- ret);
- return ret;
- }
- kfree_skb(skb);
-
- return 0;
-}
static void btusb_mtk_wmt_recv(struct urb *urb)
{
@@ -2265,6 +2258,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC);
if (!skb) {
hdev->stat.err_rx++;
+ kfree(urb->setup_packet);
return;
}
@@ -2285,6 +2279,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
data->evt_skb = skb_clone(skb, GFP_ATOMIC);
if (!data->evt_skb) {
kfree_skb(skb);
+ kfree(urb->setup_packet);
return;
}
}
@@ -2293,6 +2288,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
if (err < 0) {
kfree_skb(data->evt_skb);
data->evt_skb = NULL;
+ kfree(urb->setup_packet);
return;
}
@@ -2303,6 +2299,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
wake_up_bit(&data->flags,
BTUSB_TX_WAIT_VND_EVT);
}
+ kfree(urb->setup_packet);
return;
} else if (urb->status == -ENOENT) {
/* Avoid suspend failed when usb_kill_urb */
@@ -2323,6 +2320,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
usb_anchor_urb(urb, &data->ctrl_anchor);
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
+ kfree(urb->setup_packet);
/* -EPERM: urb is being killed;
* -ENODEV: device got disconnected
*/
@@ -2497,209 +2495,6 @@ err_free_wc:
return err;
}
-static int btusb_mtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname)
-{
- struct btmtk_hci_wmt_params wmt_params;
- struct btmtk_global_desc *globaldesc = NULL;
- struct btmtk_section_map *sectionmap;
- const struct firmware *fw;
- const u8 *fw_ptr;
- const u8 *fw_bin_ptr;
- int err, dlen, i, status;
- u8 flag, first_block, retry;
- u32 section_num, dl_size, section_offset;
- u8 cmd[64];
-
- err = request_firmware(&fw, fwname, &hdev->dev);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
- return err;
- }
-
- fw_ptr = fw->data;
- fw_bin_ptr = fw_ptr;
- globaldesc = (struct btmtk_global_desc *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE);
- section_num = le32_to_cpu(globaldesc->section_num);
-
- for (i = 0; i < section_num; i++) {
- first_block = 1;
- fw_ptr = fw_bin_ptr;
- sectionmap = (struct btmtk_section_map *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE +
- MTK_FW_ROM_PATCH_GD_SIZE + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i);
-
- section_offset = le32_to_cpu(sectionmap->secoffset);
- dl_size = le32_to_cpu(sectionmap->bin_info_spec.dlsize);
-
- if (dl_size > 0) {
- retry = 20;
- while (retry > 0) {
- cmd[0] = 0; /* 0 means legacy dl mode. */
- memcpy(cmd + 1,
- fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE +
- MTK_FW_ROM_PATCH_GD_SIZE + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i +
- MTK_SEC_MAP_COMMON_SIZE,
- MTK_SEC_MAP_NEED_SEND_SIZE + 1);
-
- wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
- wmt_params.status = &status;
- wmt_params.flag = 0;
- wmt_params.dlen = MTK_SEC_MAP_NEED_SEND_SIZE + 1;
- wmt_params.data = &cmd;
-
- err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
- err);
- goto err_release_fw;
- }
-
- if (status == BTMTK_WMT_PATCH_UNDONE) {
- break;
- } else if (status == BTMTK_WMT_PATCH_PROGRESS) {
- msleep(100);
- retry--;
- } else if (status == BTMTK_WMT_PATCH_DONE) {
- goto next_section;
- } else {
- bt_dev_err(hdev, "Failed wmt patch dwnld status (%d)",
- status);
- goto err_release_fw;
- }
- }
-
- fw_ptr += section_offset;
- wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
- wmt_params.status = NULL;
-
- while (dl_size > 0) {
- dlen = min_t(int, 250, dl_size);
- if (first_block == 1) {
- flag = 1;
- first_block = 0;
- } else if (dl_size - dlen <= 0) {
- flag = 3;
- } else {
- flag = 2;
- }
-
- wmt_params.flag = flag;
- wmt_params.dlen = dlen;
- wmt_params.data = fw_ptr;
-
- err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
- err);
- goto err_release_fw;
- }
-
- dl_size -= dlen;
- fw_ptr += dlen;
- }
- }
-next_section:
- continue;
- }
- /* Wait a few moments for firmware activation done */
- usleep_range(100000, 120000);
-
-err_release_fw:
- release_firmware(fw);
-
- return err;
-}
-
-static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
-{
- struct btmtk_hci_wmt_params wmt_params;
- const struct firmware *fw;
- const u8 *fw_ptr;
- size_t fw_size;
- int err, dlen;
- u8 flag, param;
-
- err = request_firmware(&fw, fwname, &hdev->dev);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
- return err;
- }
-
- /* Power on data RAM the firmware relies on. */
- param = 1;
- wmt_params.op = BTMTK_WMT_FUNC_CTRL;
- wmt_params.flag = 3;
- wmt_params.dlen = sizeof(param);
- wmt_params.data = &param;
- wmt_params.status = NULL;
-
- err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
- goto err_release_fw;
- }
-
- fw_ptr = fw->data;
- fw_size = fw->size;
-
- /* The size of patch header is 30 bytes, should be skip */
- if (fw_size < 30) {
- err = -EINVAL;
- goto err_release_fw;
- }
-
- fw_size -= 30;
- fw_ptr += 30;
- flag = 1;
-
- wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
- wmt_params.status = NULL;
-
- while (fw_size > 0) {
- dlen = min_t(int, 250, fw_size);
-
- /* Tell device the position in sequence */
- if (fw_size - dlen <= 0)
- flag = 3;
- else if (fw_size < fw->size - 30)
- flag = 2;
-
- wmt_params.flag = flag;
- wmt_params.dlen = dlen;
- wmt_params.data = fw_ptr;
-
- err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
- err);
- goto err_release_fw;
- }
-
- fw_size -= dlen;
- fw_ptr += dlen;
- }
-
- wmt_params.op = BTMTK_WMT_RST;
- wmt_params.flag = 4;
- wmt_params.dlen = 0;
- wmt_params.data = NULL;
- wmt_params.status = NULL;
-
- /* Activate funciton the firmware providing to */
- err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
- goto err_release_fw;
- }
-
- /* Wait a few moments for firmware activation done */
- usleep_range(10000, 12000);
-
-err_release_fw:
- release_firmware(fw);
-
- return err;
-}
-
static int btusb_mtk_func_query(struct hci_dev *hdev)
{
struct btmtk_hci_wmt_params wmt_params;
@@ -2857,7 +2652,12 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
snprintf(fw_bin_name, sizeof(fw_bin_name),
"mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
dev_id & 0xffff, (fw_version & 0xff) + 1);
- err = btusb_mtk_setup_firmware_79xx(hdev, fw_bin_name);
+ err = btmtk_setup_firmware_79xx(hdev, fw_bin_name,
+ btusb_mtk_hci_wmt_sync);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to set up firmware (%d)", err);
+ return err;
+ }
/* It's Device EndPoint Reset Option Register */
btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT);
@@ -2877,6 +2677,7 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
}
hci_set_msft_opcode(hdev, 0xFD30);
+ hci_set_aosp_capable(hdev);
goto done;
default:
bt_dev_err(hdev, "Unsupported hardware variant (%08x)",
@@ -2903,7 +2704,8 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
}
/* Setup a firmware which the device definitely requires */
- err = btusb_mtk_setup_firmware(hdev, fwname);
+ err = btmtk_setup_firmware(hdev, fwname,
+ btusb_mtk_hci_wmt_sync);
if (err < 0)
return err;
@@ -3064,9 +2866,6 @@ static int btusb_recv_acl_mtk(struct hci_dev *hdev, struct sk_buff *skb)
return hci_recv_frame(hdev, skb);
}
-MODULE_FIRMWARE(FIRMWARE_MT7663);
-MODULE_FIRMWARE(FIRMWARE_MT7668);
-
#ifdef CONFIG_PM
/* Configure an out-of-band gpio as wake-up pin, if specified in device tree */
static int marvell_config_oob_wake(struct hci_dev *hdev)
@@ -3190,11 +2989,15 @@ static int btusb_set_bdaddr_wcn6855(struct hci_dev *hdev,
#define QCA_DFU_TIMEOUT 3000
#define QCA_FLAG_MULTI_NVM 0x80
+#define WCN6855_2_0_RAM_VERSION_GF 0x400c1200
+#define WCN6855_2_1_RAM_VERSION_GF 0x400c1211
+
struct qca_version {
__le32 rom_version;
__le32 patch_version;
__le32 ram_version;
- __le16 board_id;
+ __u8 chip_id;
+ __u8 platform_id;
__le16 flag;
__u8 reserved[4];
} __packed;
@@ -3221,6 +3024,7 @@ static const struct qca_device_info qca_devices_table[] = {
{ 0x00000302, 28, 4, 16 }, /* Rome 3.2 */
{ 0x00130100, 40, 4, 16 }, /* WCN6855 1.0 */
{ 0x00130200, 40, 4, 16 }, /* WCN6855 2.0 */
+ { 0x00130201, 40, 4, 16 }, /* WCN6855 2.1 */
};
static int btusb_qca_send_vendor_req(struct usb_device *udev, u8 request,
@@ -3375,6 +3179,50 @@ done:
return err;
}
+static void btusb_generate_qca_nvm_name(char *fwname, size_t max_size,
+ const struct qca_version *ver)
+{
+ u32 rom_version = le32_to_cpu(ver->rom_version);
+ u16 flag = le16_to_cpu(ver->flag);
+
+ if (((flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) {
+ /* The board_id should be split into two bytes
+ * The 1st byte is chip ID, and the 2nd byte is platform ID
+ * For example, board ID 0x010A, 0x01 is platform ID. 0x0A is chip ID
+ * we have several platforms, and platform IDs are continuously added
+ * Platform ID:
+ * 0x00 is for Mobile
+ * 0x01 is for X86
+ * 0x02 is for Automotive
+ * 0x03 is for Consumer electronic
+ */
+ u16 board_id = (ver->chip_id << 8) + ver->platform_id;
+ const char *variant;
+
+ switch (le32_to_cpu(ver->ram_version)) {
+ case WCN6855_2_0_RAM_VERSION_GF:
+ case WCN6855_2_1_RAM_VERSION_GF:
+ variant = "_gf";
+ break;
+ default:
+ variant = "";
+ break;
+ }
+
+ if (board_id == 0) {
+ snprintf(fwname, max_size, "qca/nvm_usb_%08x%s.bin",
+ rom_version, variant);
+ } else {
+ snprintf(fwname, max_size, "qca/nvm_usb_%08x%s_%04x.bin",
+ rom_version, variant, board_id);
+ }
+ } else {
+ snprintf(fwname, max_size, "qca/nvm_usb_%08x.bin",
+ rom_version);
+ }
+
+}
+
static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
struct qca_version *ver,
const struct qca_device_info *info)
@@ -3383,20 +3231,7 @@ static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
char fwname[64];
int err;
- if (((ver->flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) {
- /* if boardid equal 0, use default nvm without surfix */
- if (le16_to_cpu(ver->board_id) == 0x0) {
- snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x.bin",
- le32_to_cpu(ver->rom_version));
- } else {
- snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x_%04x.bin",
- le32_to_cpu(ver->rom_version),
- le16_to_cpu(ver->board_id));
- }
- } else {
- snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x.bin",
- le32_to_cpu(ver->rom_version));
- }
+ btusb_generate_qca_nvm_name(fwname, sizeof(fwname), ver);
err = request_firmware(&fw, fwname, &hdev->dev);
if (err) {
@@ -3661,6 +3496,49 @@ static int btusb_shutdown_qca(struct hci_dev *hdev)
return 0;
}
+static ssize_t force_poll_sync_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct btusb_data *data = file->private_data;
+ char buf[3];
+
+ buf[0] = data->poll_sync ? 'Y' : 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t force_poll_sync_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct btusb_data *data = file->private_data;
+ bool enable;
+ int err;
+
+ err = kstrtobool_from_user(user_buf, count, &enable);
+ if (err)
+ return err;
+
+ /* Only allow changes while the adapter is down */
+ if (test_bit(HCI_UP, &data->hdev->flags))
+ return -EPERM;
+
+ if (data->poll_sync == enable)
+ return -EALREADY;
+
+ data->poll_sync = enable;
+
+ return count;
+}
+
+static const struct file_operations force_poll_sync_fops = {
+ .open = simple_open,
+ .read = force_poll_sync_read,
+ .write = force_poll_sync_write,
+ .llseek = default_llseek,
+};
+
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -3744,6 +3622,10 @@ static int btusb_probe(struct usb_interface *intf,
INIT_WORK(&data->work, btusb_work);
INIT_WORK(&data->waker, btusb_waker);
+ INIT_DELAYED_WORK(&data->rx_work, btusb_rx_work);
+
+ skb_queue_head_init(&data->acl_q);
+
init_usb_anchor(&data->deferred);
init_usb_anchor(&data->tx_anchor);
spin_lock_init(&data->txlock);
@@ -3857,6 +3739,9 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_INTEL_BROKEN_INITIAL_NCMD)
btintel_set_flag(hdev, INTEL_BROKEN_INITIAL_NCMD);
+
+ if (id->driver_info & BTUSB_INTEL_BROKEN_SHUTDOWN_LED)
+ btintel_set_flag(hdev, INTEL_BROKEN_SHUTDOWN_LED);
}
if (id->driver_info & BTUSB_MARVELL)
@@ -3868,7 +3753,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->shutdown = btusb_mtk_shutdown;
hdev->manufacturer = 70;
hdev->cmd_timeout = btusb_mtk_cmd_timeout;
- hdev->set_bdaddr = btusb_set_bdaddr_mtk;
+ hdev->set_bdaddr = btmtk_set_bdaddr;
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
data->recv_acl = btusb_recv_acl_mtk;
}
@@ -4009,6 +3894,9 @@ static int btusb_probe(struct usb_interface *intf,
usb_set_intfdata(intf, data);
+ debugfs_create_file("force_poll_sync", 0644, hdev->debugfs, data,
+ &force_poll_sync_fops);
+
return 0;
out_free_dev:
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index ef54afa29357..d634a27bc850 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -1188,7 +1188,12 @@ static int bcm_probe(struct platform_device *pdev)
return -ENOMEM;
dev->dev = &pdev->dev;
- dev->irq = platform_get_irq(pdev, 0);
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+
+ dev->irq = ret;
/* Initialize routing field to an unused value */
dev->pcm_int_params[0] = 0xff;
@@ -1508,7 +1513,6 @@ static const struct of_device_id bcm_bluetooth_of_match[] = {
{ .compatible = "brcm,bcm4330-bt" },
{ .compatible = "brcm,bcm4334-bt" },
{ .compatible = "brcm,bcm4345c5" },
- { .compatible = "brcm,bcm4330-bt" },
{ .compatible = "brcm,bcm43438-bt", .data = &bcm43438_device_data },
{ .compatible = "brcm,bcm43540-bt", .data = &bcm4354_device_data },
{ .compatible = "brcm,bcm4335a0" },
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 4b3b14a34794..1d0cdf023243 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -252,7 +252,7 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
}
if (!dlen) {
- hu->padding = (skb->len - 1) % alignment;
+ hu->padding = (skb->len + 1) % alignment;
hu->padding = (alignment - hu->padding) % alignment;
/* No more data, complete frame */
@@ -260,7 +260,7 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
skb = NULL;
}
} else {
- hu->padding = (skb->len - 1) % alignment;
+ hu->padding = (skb->len + 1) % alignment;
hu->padding = (alignment - hu->padding) % alignment;
/* Complete frame */
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index ecdf8e034351..f537673ede17 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -739,14 +739,13 @@ static int hci_uart_set_flags(struct hci_uart *hu, unsigned long flags)
* Arguments:
*
* tty pointer to tty instance data
- * file pointer to open file object for device
* cmd IOCTL command code
* arg argument for IOCTL call (cmd dependent)
*
* Return Value: Command dependent
*/
-static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int hci_uart_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
{
struct hci_uart *hu = tty->disc_data;
int err = 0;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index dd768a8ed7cb..f6e91fb432a3 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -1928,6 +1928,9 @@ static int qca_power_off(struct hci_dev *hdev)
hu->hdev->hw_error = NULL;
hu->hdev->cmd_timeout = NULL;
+ del_timer_sync(&qca->wake_retrans_timer);
+ del_timer_sync(&qca->tx_idle_timer);
+
/* Stop sending shutdown command if soc crashes. */
if (soc_type != QCA_ROME
&& qca->memdump_state == QCA_MEMDUMP_IDLE) {
@@ -2056,14 +2059,14 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
- if (!qcadev->bt_en && data->soc_type == QCA_WCN6750) {
+ if (IS_ERR_OR_NULL(qcadev->bt_en) && data->soc_type == QCA_WCN6750) {
dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
power_ctrl_enabled = false;
}
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
GPIOD_IN);
- if (!qcadev->sw_ctrl && data->soc_type == QCA_WCN6750)
+ if (IS_ERR_OR_NULL(qcadev->sw_ctrl) && data->soc_type == QCA_WCN6750)
dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
@@ -2085,7 +2088,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
- if (!qcadev->bt_en) {
+ if (IS_ERR_OR_NULL(qcadev->bt_en)) {
dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
power_ctrl_enabled = false;
}
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index b45db0db347c..c443c3b0a4da 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -38,9 +38,12 @@ struct vhci_data {
struct mutex open_mutex;
struct delayed_work open_timeout;
+ struct work_struct suspend_work;
bool suspended;
bool wakeup;
+ __u16 msft_opcode;
+ bool aosp_capable;
};
static int vhci_open_dev(struct hci_dev *hdev)
@@ -114,6 +117,17 @@ static ssize_t force_suspend_read(struct file *file, char __user *user_buf,
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
}
+static void vhci_suspend_work(struct work_struct *work)
+{
+ struct vhci_data *data = container_of(work, struct vhci_data,
+ suspend_work);
+
+ if (data->suspended)
+ hci_suspend_dev(data->hdev);
+ else
+ hci_resume_dev(data->hdev);
+}
+
static ssize_t force_suspend_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -129,16 +143,10 @@ static ssize_t force_suspend_write(struct file *file,
if (data->suspended == enable)
return -EALREADY;
- if (enable)
- err = hci_suspend_dev(data->hdev);
- else
- err = hci_resume_dev(data->hdev);
-
- if (err)
- return err;
-
data->suspended = enable;
+ schedule_work(&data->suspend_work);
+
return count;
}
@@ -176,6 +184,8 @@ static ssize_t force_wakeup_write(struct file *file,
if (data->wakeup == enable)
return -EALREADY;
+ data->wakeup = enable;
+
return count;
}
@@ -186,6 +196,88 @@ static const struct file_operations force_wakeup_fops = {
.llseek = default_llseek,
};
+static int msft_opcode_set(void *data, u64 val)
+{
+ struct vhci_data *vhci = data;
+
+ if (val > 0xffff || hci_opcode_ogf(val) != 0x3f)
+ return -EINVAL;
+
+ if (vhci->msft_opcode)
+ return -EALREADY;
+
+ vhci->msft_opcode = val;
+
+ return 0;
+}
+
+static int msft_opcode_get(void *data, u64 *val)
+{
+ struct vhci_data *vhci = data;
+
+ *val = vhci->msft_opcode;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(msft_opcode_fops, msft_opcode_get, msft_opcode_set,
+ "%llu\n");
+
+static ssize_t aosp_capable_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct vhci_data *vhci = file->private_data;
+ char buf[3];
+
+ buf[0] = vhci->aosp_capable ? 'Y' : 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t aosp_capable_write(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct vhci_data *vhci = file->private_data;
+ bool enable;
+ int err;
+
+ err = kstrtobool_from_user(user_buf, count, &enable);
+ if (err)
+ return err;
+
+ if (!enable)
+ return -EINVAL;
+
+ if (vhci->aosp_capable)
+ return -EALREADY;
+
+ vhci->aosp_capable = enable;
+
+ return count;
+}
+
+static const struct file_operations aosp_capable_fops = {
+ .open = simple_open,
+ .read = aosp_capable_read,
+ .write = aosp_capable_write,
+ .llseek = default_llseek,
+};
+
+static int vhci_setup(struct hci_dev *hdev)
+{
+ struct vhci_data *vhci = hci_get_drvdata(hdev);
+
+ if (vhci->msft_opcode)
+ hci_set_msft_opcode(hdev, vhci->msft_opcode);
+
+ if (vhci->aosp_capable)
+ hci_set_aosp_capable(hdev);
+
+ return 0;
+}
+
static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
{
struct hci_dev *hdev;
@@ -228,6 +320,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
hdev->get_data_path_id = vhci_get_data_path_id;
hdev->get_codec_config_data = vhci_get_codec_config_data;
hdev->wakeup = vhci_wakeup;
+ hdev->setup = vhci_setup;
+ set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
/* bit 6 is for external configuration */
if (opcode & 0x40)
@@ -237,6 +331,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
if (opcode & 0x80)
set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
hci_free_dev(hdev);
@@ -251,6 +347,14 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data,
&force_wakeup_fops);
+ if (IS_ENABLED(CONFIG_BT_MSFTEXT))
+ debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data,
+ &msft_opcode_fops);
+
+ if (IS_ENABLED(CONFIG_BT_AOSPEXT))
+ debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data,
+ &aosp_capable_fops);
+
hci_skb_pkt_type(skb) = HCI_VENDOR_PKT;
skb_put_u8(skb, 0xff);
@@ -440,6 +544,7 @@ static int vhci_open(struct inode *inode, struct file *file)
mutex_init(&data->open_mutex);
INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout);
+ INIT_WORK(&data->suspend_work, vhci_suspend_work);
file->private_data = data;
nonseekable_open(inode, file);
@@ -455,6 +560,7 @@ static int vhci_release(struct inode *inode, struct file *file)
struct hci_dev *hdev;
cancel_delayed_work_sync(&data->open_timeout);
+ flush_work(&data->suspend_work);
hdev = data->hdev;
diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c
index 57908ce4fae8..67c21263f9e0 100644
--- a/drivers/bluetooth/virtio_bt.c
+++ b/drivers/bluetooth/virtio_bt.c
@@ -202,6 +202,9 @@ static void virtbt_rx_handle(struct virtio_bluetooth *vbt, struct sk_buff *skb)
hci_skb_pkt_type(skb) = pkt_type;
hci_recv_frame(vbt->hdev, skb);
break;
+ default:
+ kfree_skb(skb);
+ break;
}
}
@@ -364,7 +367,7 @@ static void virtbt_remove(struct virtio_device *vdev)
struct hci_dev *hdev = vbt->hdev;
hci_unregister_dev(hdev);
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
hci_free_dev(hdev);
vbt->hdev = NULL;
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index 315e830b6ecd..5e70f9775a0e 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -400,7 +400,7 @@ static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
struct fsl_mc_io *mc_io = mc_dev->mc_io;
- struct msi_desc *msi_desc = mc_dev->irqs[0]->msi_desc;
+ int irq = mc_dev->irqs[0]->virq;
dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n",
irq_num, smp_processor_id());
@@ -409,7 +409,7 @@ static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
return IRQ_HANDLED;
mutex_lock(&mc_bus->scan_mutex);
- if (!msi_desc || msi_desc->irq != (u32)irq_num)
+ if (irq != (u32)irq_num)
goto out;
status = 0;
@@ -521,7 +521,7 @@ static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
* function that programs the MSI physically in the device
*/
error = devm_request_threaded_irq(&mc_dev->dev,
- irq->msi_desc->irq,
+ irq->virq,
dprc_irq0_handler,
dprc_irq0_handler_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
@@ -771,7 +771,7 @@ static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)
(void)disable_dprc_irq(mc_dev);
- devm_free_irq(&mc_dev->dev, irq->msi_desc->irq, &mc_dev->dev);
+ devm_free_irq(&mc_dev->dev, irq->virq, &mc_dev->dev);
fsl_mc_free_irqs(mc_dev);
}
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
index 6c513556911e..dced427ca8ba 100644
--- a/drivers/bus/fsl-mc/fsl-mc-allocator.c
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
@@ -350,7 +350,6 @@ int fsl_mc_populate_irq_pool(struct fsl_mc_device *mc_bus_dev,
unsigned int irq_count)
{
unsigned int i;
- struct msi_desc *msi_desc;
struct fsl_mc_device_irq *irq_resources;
struct fsl_mc_device_irq *mc_dev_irq;
int error;
@@ -388,16 +387,12 @@ int fsl_mc_populate_irq_pool(struct fsl_mc_device *mc_bus_dev,
mc_dev_irq->resource.type = res_pool->type;
mc_dev_irq->resource.data = mc_dev_irq;
mc_dev_irq->resource.parent_pool = res_pool;
+ mc_dev_irq->virq = msi_get_virq(&mc_bus_dev->dev, i);
+ mc_dev_irq->resource.id = mc_dev_irq->virq;
INIT_LIST_HEAD(&mc_dev_irq->resource.node);
list_add_tail(&mc_dev_irq->resource.node, &res_pool->free_list);
}
- for_each_msi_entry(msi_desc, &mc_bus_dev->dev) {
- mc_dev_irq = &irq_resources[msi_desc->fsl_mc.msi_index];
- mc_dev_irq->msi_desc = msi_desc;
- mc_dev_irq->resource.id = msi_desc->irq;
- }
-
res_pool->max_count = irq_count;
res_pool->free_count = irq_count;
mc_bus->irq_resources = irq_resources;
diff --git a/drivers/bus/fsl-mc/fsl-mc-msi.c b/drivers/bus/fsl-mc/fsl-mc-msi.c
index cf974870ba55..5e0e4393ce4d 100644
--- a/drivers/bus/fsl-mc/fsl-mc-msi.c
+++ b/drivers/bus/fsl-mc/fsl-mc-msi.c
@@ -29,7 +29,7 @@ static irq_hw_number_t fsl_mc_domain_calc_hwirq(struct fsl_mc_device *dev,
* Make the base hwirq value for ICID*10000 so it is readable
* as a decimal value in /proc/interrupts.
*/
- return (irq_hw_number_t)(desc->fsl_mc.msi_index + (dev->icid * 10000));
+ return (irq_hw_number_t)(desc->msi_index + (dev->icid * 10000));
}
static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg,
@@ -58,11 +58,11 @@ static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
}
static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
- struct fsl_mc_device_irq *mc_dev_irq)
+ struct fsl_mc_device_irq *mc_dev_irq,
+ struct msi_desc *msi_desc)
{
int error;
struct fsl_mc_device *owner_mc_dev = mc_dev_irq->mc_dev;
- struct msi_desc *msi_desc = mc_dev_irq->msi_desc;
struct dprc_irq_cfg irq_cfg;
/*
@@ -122,14 +122,14 @@ static void fsl_mc_msi_write_msg(struct irq_data *irq_data,
struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_desc->dev);
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
struct fsl_mc_device_irq *mc_dev_irq =
- &mc_bus->irq_resources[msi_desc->fsl_mc.msi_index];
+ &mc_bus->irq_resources[msi_desc->msi_index];
msi_desc->msg = *msg;
/*
* Program the MSI (paddr, value) pair in the device:
*/
- __fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq);
+ __fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq, msi_desc);
}
static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
@@ -170,6 +170,7 @@ struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
fsl_mc_msi_update_dom_ops(info);
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
fsl_mc_msi_update_chip_ops(info);
+ info->flags |= MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS | MSI_FLAG_FREE_MSI_DESCS;
domain = msi_create_irq_domain(fwnode, info, parent);
if (domain)
@@ -210,61 +211,21 @@ struct irq_domain *fsl_mc_find_msi_domain(struct device *dev)
return msi_domain;
}
-static void fsl_mc_msi_free_descs(struct device *dev)
-{
- struct msi_desc *desc, *tmp;
-
- list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
- list_del(&desc->list);
- free_msi_entry(desc);
- }
-}
-
-static int fsl_mc_msi_alloc_descs(struct device *dev, unsigned int irq_count)
-
-{
- unsigned int i;
- int error;
- struct msi_desc *msi_desc;
-
- for (i = 0; i < irq_count; i++) {
- msi_desc = alloc_msi_entry(dev, 1, NULL);
- if (!msi_desc) {
- dev_err(dev, "Failed to allocate msi entry\n");
- error = -ENOMEM;
- goto cleanup_msi_descs;
- }
-
- msi_desc->fsl_mc.msi_index = i;
- INIT_LIST_HEAD(&msi_desc->list);
- list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
- }
-
- return 0;
-
-cleanup_msi_descs:
- fsl_mc_msi_free_descs(dev);
- return error;
-}
-
-int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
- unsigned int irq_count)
+int fsl_mc_msi_domain_alloc_irqs(struct device *dev, unsigned int irq_count)
{
struct irq_domain *msi_domain;
int error;
- if (!list_empty(dev_to_msi_list(dev)))
+ msi_domain = dev_get_msi_domain(dev);
+ if (!msi_domain)
return -EINVAL;
- error = fsl_mc_msi_alloc_descs(dev, irq_count);
- if (error < 0)
+ error = msi_setup_device_data(dev);
+ if (error)
return error;
- msi_domain = dev_get_msi_domain(dev);
- if (!msi_domain) {
- error = -EINVAL;
- goto cleanup_msi_descs;
- }
+ if (msi_first_desc(dev, MSI_DESC_ALL))
+ return -EINVAL;
/*
* NOTE: Calling this function will trigger the invocation of the
@@ -272,15 +233,8 @@ int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
*/
error = msi_domain_alloc_irqs(msi_domain, dev, irq_count);
- if (error) {
+ if (error)
dev_err(dev, "Failed to allocate IRQs\n");
- goto cleanup_msi_descs;
- }
-
- return 0;
-
-cleanup_msi_descs:
- fsl_mc_msi_free_descs(dev);
return error;
}
@@ -293,9 +247,4 @@ void fsl_mc_msi_domain_free_irqs(struct device *dev)
return;
msi_domain_free_irqs(msi_domain, dev);
-
- if (list_empty(dev_to_msi_list(dev)))
- return;
-
- fsl_mc_msi_free_descs(dev);
}
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 28bb65a5613f..bccb275b65ba 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -21,6 +21,7 @@ struct imx_weim_devtype {
unsigned int cs_stride;
unsigned int wcr_offset;
unsigned int wcr_bcm;
+ unsigned int wcr_cont_bclk;
};
static const struct imx_weim_devtype imx1_weim_devtype = {
@@ -41,6 +42,7 @@ static const struct imx_weim_devtype imx50_weim_devtype = {
.cs_stride = 0x18,
.wcr_offset = 0x90,
.wcr_bcm = BIT(0),
+ .wcr_cont_bclk = BIT(3),
};
static const struct imx_weim_devtype imx51_weim_devtype = {
@@ -206,8 +208,20 @@ static int weim_parse_dt(struct platform_device *pdev, void __iomem *base)
if (of_property_read_bool(pdev->dev.of_node, "fsl,burst-clk-enable")) {
if (devtype->wcr_bcm) {
reg = readl(base + devtype->wcr_offset);
- writel(reg | devtype->wcr_bcm,
- base + devtype->wcr_offset);
+ reg |= devtype->wcr_bcm;
+
+ if (of_property_read_bool(pdev->dev.of_node,
+ "fsl,continuous-burst-clk")) {
+ if (devtype->wcr_cont_bclk) {
+ reg |= devtype->wcr_cont_bclk;
+ } else {
+ dev_err(&pdev->dev,
+ "continuous burst clk not supported.\n");
+ return -EINVAL;
+ }
+ }
+
+ writel(reg, base + devtype->wcr_offset);
} else {
dev_err(&pdev->dev, "burst clk mode not supported.\n");
return -EINVAL;
diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c
index 0a972620a403..74295d3cc662 100644
--- a/drivers/bus/mhi/core/boot.c
+++ b/drivers/bus/mhi/core/boot.c
@@ -417,7 +417,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
}
/* wait for ready on pass through or any other execution environment */
- if (mhi_cntrl->ee != MHI_EE_EDL && mhi_cntrl->ee != MHI_EE_PBL)
+ if (!MHI_FW_LOAD_CAPABLE(mhi_cntrl->ee))
goto fw_load_ready_state;
fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
index 5aaca6d0f52b..046f407dc5d6 100644
--- a/drivers/bus/mhi/core/init.c
+++ b/drivers/bus/mhi/core/init.c
@@ -79,7 +79,8 @@ static const char * const mhi_pm_state_str[] = {
const char *to_mhi_pm_state_str(enum mhi_pm_state state)
{
- int index = find_last_bit((unsigned long *)&state, 32);
+ unsigned long pm_state = state;
+ int index = find_last_bit(&pm_state, 32);
if (index >= ARRAY_SIZE(mhi_pm_state_str))
return "Invalid State";
@@ -788,6 +789,7 @@ static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
mhi_chan->offload_ch = ch_cfg->offload_channel;
mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
mhi_chan->pre_alloc = ch_cfg->auto_queue;
+ mhi_chan->wake_capable = ch_cfg->wake_capable;
/*
* If MHI host allocates buffers, then the channel direction
diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h
index 3a732afaf73e..e2e10474a9d9 100644
--- a/drivers/bus/mhi/core/internal.h
+++ b/drivers/bus/mhi/core/internal.h
@@ -390,7 +390,8 @@ extern const char * const mhi_ee_str[MHI_EE_MAX];
#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \
ee == MHI_EE_EDL)
-
+#define MHI_POWER_UP_CAPABLE(ee) (MHI_IN_PBL(ee) || ee == MHI_EE_AMSS)
+#define MHI_FW_LOAD_CAPABLE(ee) (ee == MHI_EE_PBL || ee == MHI_EE_EDL)
#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW || \
ee == MHI_EE_FP)
@@ -681,8 +682,12 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct image_info *img_info);
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
+
+/* Automatically allocate and queue inbound buffers */
+#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0)
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
- struct mhi_chan *mhi_chan);
+ struct mhi_chan *mhi_chan, unsigned int flags);
+
int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan);
void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index b15c5bc37dd4..ffde617f93a3 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -1065,7 +1065,7 @@ void mhi_ctrl_ev_task(unsigned long data)
return;
}
- /* Process ctrl events events */
+ /* Process ctrl events */
ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
/*
@@ -1430,7 +1430,7 @@ exit_unprepare_channel:
}
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
- struct mhi_chan *mhi_chan)
+ struct mhi_chan *mhi_chan, unsigned int flags)
{
int ret = 0;
struct device *dev = &mhi_chan->mhi_dev->dev;
@@ -1455,6 +1455,9 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
if (ret)
goto error_pm_state;
+ if (mhi_chan->dir == DMA_FROM_DEVICE)
+ mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
+
/* Pre-allocate buffer for xfer ring */
if (mhi_chan->pre_alloc) {
int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
@@ -1464,6 +1467,7 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
while (nr_el--) {
void *buf;
struct mhi_buf_info info = { };
+
buf = kmalloc(len, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
@@ -1609,8 +1613,7 @@ void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
read_unlock_bh(&mhi_cntrl->pm_lock);
}
-/* Move channel to start state */
-int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
+static int __mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
{
int ret, dir;
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
@@ -1621,7 +1624,7 @@ int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
if (!mhi_chan)
continue;
- ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
+ ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
if (ret)
goto error_open_chan;
}
@@ -1639,8 +1642,19 @@ error_open_chan:
return ret;
}
+
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
+{
+ return __mhi_prepare_for_transfer(mhi_dev, 0);
+}
EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
+int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev)
+{
+ return __mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS);
+}
+EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer_autoqueue);
+
void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
index 547e6e769546..4aae0baea008 100644
--- a/drivers/bus/mhi/core/pm.c
+++ b/drivers/bus/mhi/core/pm.c
@@ -42,7 +42,7 @@
* L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
* LD_ERR_FATAL_DETECT -> DISABLE
*/
-static struct mhi_pm_transitions const dev_state_transitions[] = {
+static const struct mhi_pm_transitions dev_state_transitions[] = {
/* L0 States */
{
MHI_PM_DISABLE,
@@ -1053,7 +1053,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
enum mhi_ee_type current_ee;
enum dev_st_transition next_state;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
- u32 val;
+ u32 interval_us = 25000; /* poll register field every 25 milliseconds */
int ret;
dev_info(dev, "Requested to power ON\n");
@@ -1070,10 +1070,6 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
mutex_lock(&mhi_cntrl->pm_mutex);
mhi_cntrl->pm_state = MHI_PM_DISABLE;
- ret = mhi_init_irq_setup(mhi_cntrl);
- if (ret)
- goto error_setup_irq;
-
/* Setup BHI INTVEC */
write_lock_irq(&mhi_cntrl->pm_lock);
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
@@ -1083,11 +1079,11 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
write_unlock_irq(&mhi_cntrl->pm_lock);
/* Confirm that the device is in valid exec env */
- if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) {
+ if (!MHI_POWER_UP_CAPABLE(current_ee)) {
dev_err(dev, "%s is not a valid EE for power on\n",
TO_MHI_EXEC_STR(current_ee));
ret = -EIO;
- goto error_async_power_up;
+ goto error_exit;
}
state = mhi_get_mhi_state(mhi_cntrl);
@@ -1096,20 +1092,12 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
if (state == MHI_STATE_SYS_ERR) {
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
- ret = wait_event_timeout(mhi_cntrl->state_event,
- MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
- mhi_read_reg_field(mhi_cntrl,
- mhi_cntrl->regs,
- MHICTRL,
- MHICTRL_RESET_MASK,
- MHICTRL_RESET_SHIFT,
- &val) ||
- !val,
- msecs_to_jiffies(mhi_cntrl->timeout_ms));
- if (!ret) {
- ret = -EIO;
+ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0,
+ interval_us);
+ if (ret) {
dev_info(dev, "Failed to reset MHI due to syserr state\n");
- goto error_async_power_up;
+ goto error_exit;
}
/*
@@ -1119,6 +1107,10 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
}
+ ret = mhi_init_irq_setup(mhi_cntrl);
+ if (ret)
+ goto error_exit;
+
/* Transition to next state */
next_state = MHI_IN_PBL(current_ee) ?
DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
@@ -1131,10 +1123,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
return 0;
-error_async_power_up:
- mhi_deinit_free_irq(mhi_cntrl);
-
-error_setup_irq:
+error_exit:
mhi_cntrl->pm_state = MHI_PM_DISABLE;
mutex_unlock(&mhi_cntrl->pm_mutex);
diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
index 4c577a731709..3a258a677df8 100644
--- a/drivers/bus/mhi/pci_generic.c
+++ b/drivers/bus/mhi/pci_generic.c
@@ -403,7 +403,50 @@ static const struct mhi_pci_dev_info mhi_mv31_info = {
.dma_data_width = 32,
};
+static const struct mhi_channel_config mhi_sierra_em919x_channels[] = {
+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 256, 0),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 128, 0),
+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 128, 0),
+ MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 512, 1),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 512, 2),
+};
+
+static struct mhi_event_config modem_sierra_em919x_mhi_events[] = {
+ /* first ring is control+data and DIAG ring */
+ MHI_EVENT_CONFIG_CTRL(0, 2048),
+ /* Hardware channels request dedicated hardware event rings */
+ MHI_EVENT_CONFIG_HW_DATA(1, 2048, 100),
+ MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
+};
+
+static const struct mhi_controller_config modem_sierra_em919x_config = {
+ .max_channels = 128,
+ .timeout_ms = 24000,
+ .num_channels = ARRAY_SIZE(mhi_sierra_em919x_channels),
+ .ch_cfg = mhi_sierra_em919x_channels,
+ .num_events = ARRAY_SIZE(modem_sierra_em919x_mhi_events),
+ .event_cfg = modem_sierra_em919x_mhi_events,
+};
+
+static const struct mhi_pci_dev_info mhi_sierra_em919x_info = {
+ .name = "sierra-em919x",
+ .config = &modem_sierra_em919x_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+};
+
static const struct pci_device_id mhi_pci_id_table[] = {
+ /* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200),
+ .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
@@ -423,6 +466,9 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* DW5930e (sdx55), Non-eSIM, It's also T99W175 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ /* T99W175 (sdx55), Based on Qualcomm new baseline */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
/* MV31-W (Cinterion) */
{ PCI_DEVICE(0x1269, 0x00b3),
.driver_data = (kernel_ulong_t) &mhi_mv31_info },
@@ -529,18 +575,12 @@ static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num];
mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num);
- err = pci_set_dma_mask(pdev, dma_mask);
+ err = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
if (err) {
dev_err(&pdev->dev, "Cannot set proper DMA mask\n");
return err;
}
- err = pci_set_consistent_dma_mask(pdev, dma_mask);
- if (err) {
- dev_err(&pdev->dev, "set consistent dma mask failed\n");
- return err;
- }
-
pci_set_master(pdev);
return 0;
@@ -1018,7 +1058,7 @@ static int __maybe_unused mhi_pci_freeze(struct device *dev)
* context.
*/
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
- mhi_power_down(mhi_cntrl, false);
+ mhi_power_down(mhi_cntrl, true);
mhi_unprepare_after_power_down(mhi_cntrl);
}
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index ea0424922de7..db612045616f 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -914,6 +914,7 @@ int mvebu_mbus_add_window_remap_by_id(unsigned int target,
return mvebu_mbus_alloc_window(s, base, size, remap, target, attribute);
}
+EXPORT_SYMBOL_GPL(mvebu_mbus_add_window_remap_by_id);
int mvebu_mbus_add_window_by_id(unsigned int target, unsigned int attribute,
phys_addr_t base, size_t size)
@@ -921,6 +922,7 @@ int mvebu_mbus_add_window_by_id(unsigned int target, unsigned int attribute,
return mvebu_mbus_add_window_remap_by_id(target, attribute, base,
size, MVEBU_MBUS_NO_REMAP);
}
+EXPORT_SYMBOL_GPL(mvebu_mbus_add_window_by_id);
int mvebu_mbus_del_window(phys_addr_t base, size_t size)
{
@@ -933,6 +935,7 @@ int mvebu_mbus_del_window(phys_addr_t base, size_t size)
mvebu_mbus_disable_window(&mbus_state, win);
return 0;
}
+EXPORT_SYMBOL_GPL(mvebu_mbus_del_window);
void mvebu_mbus_get_pcie_mem_aperture(struct resource *res)
{
@@ -940,6 +943,7 @@ void mvebu_mbus_get_pcie_mem_aperture(struct resource *res)
return;
*res = mbus_state.pcie_mem_aperture;
}
+EXPORT_SYMBOL_GPL(mvebu_mbus_get_pcie_mem_aperture);
void mvebu_mbus_get_pcie_io_aperture(struct resource *res)
{
@@ -947,6 +951,7 @@ void mvebu_mbus_get_pcie_io_aperture(struct resource *res)
return;
*res = mbus_state.pcie_io_aperture;
}
+EXPORT_SYMBOL_GPL(mvebu_mbus_get_pcie_io_aperture);
int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr)
{
diff --git a/drivers/bus/tegra-gmi.c b/drivers/bus/tegra-gmi.c
index a6570789f7af..35b59f92fa66 100644
--- a/drivers/bus/tegra-gmi.c
+++ b/drivers/bus/tegra-gmi.c
@@ -13,8 +13,11 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <soc/tegra/common.h>
+
#define TEGRA_GMI_CONFIG 0x00
#define TEGRA_GMI_CONFIG_GO BIT(31)
#define TEGRA_GMI_BUS_WIDTH_32BIT BIT(30)
@@ -54,9 +57,10 @@ static int tegra_gmi_enable(struct tegra_gmi *gmi)
{
int err;
- err = clk_prepare_enable(gmi->clk);
- if (err < 0) {
- dev_err(gmi->dev, "failed to enable clock: %d\n", err);
+ pm_runtime_enable(gmi->dev);
+ err = pm_runtime_resume_and_get(gmi->dev);
+ if (err) {
+ pm_runtime_disable(gmi->dev);
return err;
}
@@ -83,7 +87,9 @@ static void tegra_gmi_disable(struct tegra_gmi *gmi)
writel(config, gmi->base + TEGRA_GMI_CONFIG);
reset_control_assert(gmi->rst);
- clk_disable_unprepare(gmi->clk);
+
+ pm_runtime_put_sync_suspend(gmi->dev);
+ pm_runtime_force_suspend(gmi->dev);
}
static int tegra_gmi_parse_dt(struct tegra_gmi *gmi)
@@ -213,6 +219,7 @@ static int tegra_gmi_probe(struct platform_device *pdev)
if (!gmi)
return -ENOMEM;
+ platform_set_drvdata(pdev, gmi);
gmi->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -232,6 +239,10 @@ static int tegra_gmi_probe(struct platform_device *pdev)
return PTR_ERR(gmi->rst);
}
+ err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (err)
+ return err;
+
err = tegra_gmi_parse_dt(gmi);
if (err)
return err;
@@ -247,8 +258,6 @@ static int tegra_gmi_probe(struct platform_device *pdev)
return err;
}
- platform_set_drvdata(pdev, gmi);
-
return 0;
}
@@ -262,6 +271,34 @@ static int tegra_gmi_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused tegra_gmi_runtime_resume(struct device *dev)
+{
+ struct tegra_gmi *gmi = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(gmi->clk);
+ if (err < 0) {
+ dev_err(gmi->dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_gmi_runtime_suspend(struct device *dev)
+{
+ struct tegra_gmi *gmi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(gmi->clk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_gmi_pm = {
+ SET_RUNTIME_PM_OPS(tegra_gmi_runtime_suspend, tegra_gmi_runtime_resume,
+ NULL)
+};
+
static const struct of_device_id tegra_gmi_id_table[] = {
{ .compatible = "nvidia,tegra20-gmi", },
{ .compatible = "nvidia,tegra30-gmi", },
@@ -275,6 +312,7 @@ static struct platform_driver tegra_gmi_driver = {
.driver = {
.name = "tegra-gmi",
.of_match_table = tegra_gmi_id_table,
+ .pm = &tegra_gmi_pm,
},
};
module_platform_driver(tegra_gmi_driver);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 9877e413fce3..1b57d4666e43 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -3691,27 +3691,6 @@ static struct ctl_table cdrom_table[] = {
},
{ }
};
-
-static struct ctl_table cdrom_cdrom_table[] = {
- {
- .procname = "cdrom",
- .maxlen = 0,
- .mode = 0555,
- .child = cdrom_table,
- },
- { }
-};
-
-/* Make sure that /proc/sys/dev is there */
-static struct ctl_table cdrom_root_table[] = {
- {
- .procname = "dev",
- .maxlen = 0,
- .mode = 0555,
- .child = cdrom_cdrom_table,
- },
- { }
-};
static struct ctl_table_header *cdrom_sysctl_header;
static void cdrom_sysctl_register(void)
@@ -3721,7 +3700,7 @@ static void cdrom_sysctl_register(void)
if (!atomic_add_unless(&initialized, 1, 1))
return;
- cdrom_sysctl_header = register_sysctl_table(cdrom_root_table);
+ cdrom_sysctl_header = register_sysctl("dev/cdrom", cdrom_table);
/* set the defaults */
cdrom_sysctl_settings.autoclose = autoclose;
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index d50cc1fd34d5..faead41709bc 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -719,6 +719,7 @@ static void probe_gdrom_setupdisk(void)
gd.disk->major = gdrom_major;
gd.disk->first_minor = 1;
gd.disk->minors = 1;
+ gd.disk->flags |= GENHD_FL_NO_PART;
strcpy(gd.disk->disk_name, GDROM_DEV_NAME);
}
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index b40edae32817..dc78a4fb879e 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -588,20 +588,11 @@ static void agp_amd64_remove(struct pci_dev *pdev)
agp_bridges_found--;
}
-#ifdef CONFIG_PM
+#define agp_amd64_suspend NULL
-static int agp_amd64_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused agp_amd64_resume(struct device *dev)
{
- pci_save_state(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
- return 0;
-}
-
-static int agp_amd64_resume(struct pci_dev *pdev)
-{
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
+ struct pci_dev *pdev = to_pci_dev(dev);
if (pdev->vendor == PCI_VENDOR_ID_NVIDIA)
nforce3_agp_init(pdev);
@@ -609,8 +600,6 @@ static int agp_amd64_resume(struct pci_dev *pdev)
return amd_8151_configure();
}
-#endif /* CONFIG_PM */
-
static const struct pci_device_id agp_amd64_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
@@ -738,15 +727,14 @@ static const struct pci_device_id agp_amd64_pci_promisc_table[] = {
{ }
};
+static SIMPLE_DEV_PM_OPS(agp_amd64_pm_ops, agp_amd64_suspend, agp_amd64_resume);
+
static struct pci_driver agp_amd64_pci_driver = {
.name = "agpgart-amd64",
.id_table = agp_amd64_pci_table,
.probe = agp_amd64_probe,
.remove = agp_amd64_remove,
-#ifdef CONFIG_PM
- .suspend = agp_amd64_suspend,
- .resume = agp_amd64_resume,
-#endif
+ .driver.pm = &agp_amd64_pm_ops,
};
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 5bfdf222d5f9..c53cc9868cd8 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/agp_backend.h>
+#include <linux/intel-iommu.h>
#include <linux/delay.h>
#include <asm/smp.h>
#include "agp.h"
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index 14909fc5d767..f8a02f4bef1b 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -217,26 +217,14 @@ static void agp_sis_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
-#ifdef CONFIG_PM
+#define agp_sis_suspend NULL
-static int agp_sis_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused agp_sis_resume(
+ __attribute__((unused)) struct device *dev)
{
- pci_save_state(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
- return 0;
-}
-
-static int agp_sis_resume(struct pci_dev *pdev)
-{
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
return sis_driver.configure();
}
-#endif /* CONFIG_PM */
-
static const struct pci_device_id agp_sis_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
@@ -419,15 +407,14 @@ static const struct pci_device_id agp_sis_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_sis_pci_table);
+static SIMPLE_DEV_PM_OPS(agp_sis_pm_ops, agp_sis_suspend, agp_sis_resume);
+
static struct pci_driver agp_sis_pci_driver = {
.name = "agpgart-sis",
.id_table = agp_sis_pci_table,
.probe = agp_sis_probe,
.remove = agp_sis_remove,
-#ifdef CONFIG_PM
- .suspend = agp_sis_suspend,
- .resume = agp_sis_resume,
-#endif
+ .driver.pm = &agp_sis_pm_ops,
};
static int __init agp_sis_init(void)
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index 87a92a044570..a460ae352772 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -492,22 +492,11 @@ static void agp_via_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
-#ifdef CONFIG_PM
+#define agp_via_suspend NULL
-static int agp_via_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused agp_via_resume(struct device *dev)
{
- pci_save_state (pdev);
- pci_set_power_state (pdev, PCI_D3hot);
-
- return 0;
-}
-
-static int agp_via_resume(struct pci_dev *pdev)
-{
- struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
-
- pci_set_power_state (pdev, PCI_D0);
- pci_restore_state(pdev);
+ struct agp_bridge_data *bridge = dev_get_drvdata(dev);
if (bridge->driver == &via_agp3_driver)
return via_configure_agp3();
@@ -517,8 +506,6 @@ static int agp_via_resume(struct pci_dev *pdev)
return 0;
}
-#endif /* CONFIG_PM */
-
/* must be the same order as name table above */
static const struct pci_device_id agp_via_pci_table[] = {
#define ID(x) \
@@ -567,16 +554,14 @@ static const struct pci_device_id agp_via_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_via_pci_table);
+static SIMPLE_DEV_PM_OPS(agp_via_pm_ops, agp_via_suspend, agp_via_resume);
static struct pci_driver agp_via_pci_driver = {
.name = "agpgart-via",
.id_table = agp_via_pci_table,
.probe = agp_via_probe,
.remove = agp_via_remove,
-#ifdef CONFIG_PM
- .suspend = agp_via_suspend,
- .resume = agp_via_resume,
-#endif
+ .driver.pm = &agp_via_pm_ops,
};
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index deb85a334c93..36203d3fa6ea 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -89,8 +89,8 @@ static struct applicom_board {
spinlock_t mutex;
} apbs[MAX_BOARD];
-static unsigned int irq = 0; /* interrupt number IRQ */
-static unsigned long mem = 0; /* physical segment of board */
+static unsigned int irq; /* interrupt number IRQ */
+static unsigned long mem; /* physical segment of board */
module_param_hw(irq, uint, irq, 0);
MODULE_PARM_DESC(irq, "IRQ of the Applicom board");
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 4e5431f01450..563dfae3b8da 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -746,26 +746,6 @@ static struct ctl_table hpet_table[] = {
{}
};
-static struct ctl_table hpet_root[] = {
- {
- .procname = "hpet",
- .maxlen = 0,
- .mode = 0555,
- .child = hpet_table,
- },
- {}
-};
-
-static struct ctl_table dev_root[] = {
- {
- .procname = "dev",
- .maxlen = 0,
- .mode = 0555,
- .child = hpet_root,
- },
- {}
-};
-
static struct ctl_table_header *sysctl_header;
/*
@@ -1061,7 +1041,7 @@ static int __init hpet_init(void)
if (result < 0)
return -ENODEV;
- sysctl_header = register_sysctl_table(dev_root);
+ sysctl_header = register_sysctl("dev/hpet", hpet_table);
result = acpi_bus_register_driver(&hpet_acpi_driver);
if (result < 0) {
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 814b3d0ca7b7..9704963f9d50 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -226,19 +226,6 @@ config HW_RANDOM_VIRTIO
To compile this driver as a module, choose M here: the
module will be called virtio-rng. If unsure, say N.
-config HW_RANDOM_TX4939
- tristate "TX4939 Random Number Generator support"
- depends on SOC_TX4939
- default HW_RANDOM
- help
- This driver provides kernel-side support for the Random Number
- Generator hardware found on TX4939 SoC.
-
- To compile this driver as a module, choose M here: the
- module will be called tx4939-rng.
-
- If unsure, say Y.
-
config HW_RANDOM_MXC_RNGA
tristate "Freescale i.MX RNGA Random Number Generator"
depends on SOC_IMX31
@@ -414,7 +401,7 @@ config HW_RANDOM_MESON
config HW_RANDOM_CAVIUM
tristate "Cavium ThunderX Random Number Generator support"
- depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT))
+ depends on HW_RANDOM && PCI && ARM64
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -538,6 +525,17 @@ config HW_RANDOM_ARM_SMCCC_TRNG
To compile this driver as a module, choose M here: the
module will be called arm_smccc_trng.
+config HW_RANDOM_CN10K
+ tristate "Marvell CN10K Random Number Generator support"
+ depends on HW_RANDOM && PCI && ARM64
+ default HW_RANDOM
+ help
+ This driver provides support for the True Random Number
+ generator available in Marvell CN10K SoCs.
+
+ To compile this driver as a module, choose M here.
+ The module will be called cn10k_rng. If unsure, say Y.
+
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index a5a1c765a394..584d47ba32f7 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -20,7 +20,6 @@ obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o
obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
-obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
obj-$(CONFIG_HW_RANDOM_IMX_RNGC) += imx-rngc.o
obj-$(CONFIG_HW_RANDOM_INGENIC_RNG) += ingenic-rng.o
@@ -46,3 +45,4 @@ obj-$(CONFIG_HW_RANDOM_NPCM) += npcm-rng.o
obj-$(CONFIG_HW_RANDOM_CCTRNG) += cctrng.o
obj-$(CONFIG_HW_RANDOM_XIPHERA) += xiphera-trng.o
obj-$(CONFIG_HW_RANDOM_ARM_SMCCC_TRNG) += arm_smccc_trng.o
+obj-$(CONFIG_HW_RANDOM_CN10K) += cn10k-rng.o
diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c
index 3de4a6a443ef..6f66919652bf 100644
--- a/drivers/char/hw_random/cavium-rng-vf.c
+++ b/drivers/char/hw_random/cavium-rng-vf.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Hardware Random Number Generator support for Cavium, Inc.
- * Thunder processor family.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
+ * Hardware Random Number Generator support.
+ * Cavium Thunder, Marvell OcteonTx/Tx2 processor families.
*
* Copyright (C) 2016 Cavium, Inc.
*/
@@ -15,16 +12,146 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
+#include <asm/arch_timer.h>
+
+/* PCI device IDs */
+#define PCI_DEVID_CAVIUM_RNG_PF 0xA018
+#define PCI_DEVID_CAVIUM_RNG_VF 0xA033
+
+#define HEALTH_STATUS_REG 0x38
+
+/* RST device info */
+#define PCI_DEVICE_ID_RST_OTX2 0xA085
+#define RST_BOOT_REG 0x1600ULL
+#define CLOCK_BASE_RATE 50000000ULL
+#define MSEC_TO_NSEC(x) (x * 1000000)
+
struct cavium_rng {
struct hwrng ops;
void __iomem *result;
+ void __iomem *pf_regbase;
+ struct pci_dev *pdev;
+ u64 clock_rate;
+ u64 prev_error;
+ u64 prev_time;
};
+static inline bool is_octeontx(struct pci_dev *pdev)
+{
+ if (midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX_83XX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(3, 0)) ||
+ midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX_81XX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(3, 0)) ||
+ midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(3, 0)))
+ return true;
+
+ return false;
+}
+
+static u64 rng_get_coprocessor_clkrate(void)
+{
+ u64 ret = CLOCK_BASE_RATE * 16; /* Assume 800Mhz as default */
+ struct pci_dev *pdev;
+ void __iomem *base;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_RST_OTX2, NULL);
+ if (!pdev)
+ goto error;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto error_put_pdev;
+
+ /* RST: PNR_MUL * 50Mhz gives clockrate */
+ ret = CLOCK_BASE_RATE * ((readq(base + RST_BOOT_REG) >> 33) & 0x3F);
+
+ iounmap(base);
+
+error_put_pdev:
+ pci_dev_put(pdev);
+
+error:
+ return ret;
+}
+
+static int check_rng_health(struct cavium_rng *rng)
+{
+ u64 cur_err, cur_time;
+ u64 status, cycles;
+ u64 time_elapsed;
+
+
+ /* Skip checking health for OcteonTx */
+ if (!rng->pf_regbase)
+ return 0;
+
+ status = readq(rng->pf_regbase + HEALTH_STATUS_REG);
+ if (status & BIT_ULL(0)) {
+ dev_err(&rng->pdev->dev, "HWRNG: Startup health test failed\n");
+ return -EIO;
+ }
+
+ cycles = status >> 1;
+ if (!cycles)
+ return 0;
+
+ cur_time = arch_timer_read_counter();
+
+ /* RNM_HEALTH_STATUS[CYCLES_SINCE_HEALTH_FAILURE]
+ * Number of coprocessor cycles times 2 since the last failure.
+ * This field doesn't get cleared/updated until another failure.
+ */
+ cycles = cycles / 2;
+ cur_err = (cycles * 1000000000) / rng->clock_rate; /* In nanosec */
+
+ /* Ignore errors that happenned a long time ago, these
+ * are most likely false positive errors.
+ */
+ if (cur_err > MSEC_TO_NSEC(10)) {
+ rng->prev_error = 0;
+ rng->prev_time = 0;
+ return 0;
+ }
+
+ if (rng->prev_error) {
+ /* Calculate time elapsed since last error
+ * '1' tick of CNTVCT is 10ns, since it runs at 100Mhz.
+ */
+ time_elapsed = (cur_time - rng->prev_time) * 10;
+ time_elapsed += rng->prev_error;
+
+ /* Check if current error is a new one or the old one itself.
+ * If error is a new one then consider there is a persistent
+ * issue with entropy, declare hardware failure.
+ */
+ if (cur_err < time_elapsed) {
+ dev_err(&rng->pdev->dev, "HWRNG failure detected\n");
+ rng->prev_error = cur_err;
+ rng->prev_time = cur_time;
+ return -EIO;
+ }
+ }
+
+ rng->prev_error = cur_err;
+ rng->prev_time = cur_time;
+ return 0;
+}
+
/* Read data from the RNG unit */
static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait)
{
struct cavium_rng *p = container_of(rng, struct cavium_rng, ops);
unsigned int size = max;
+ int err = 0;
+
+ err = check_rng_health(p);
+ if (err)
+ return err;
while (size >= 8) {
*((u64 *)dat) = readq(p->result);
@@ -39,6 +166,39 @@ static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait)
return max;
}
+static int cavium_map_pf_regs(struct cavium_rng *rng)
+{
+ struct pci_dev *pdev;
+
+ /* Health status is not supported on 83xx, skip mapping PF CSRs */
+ if (is_octeontx(rng->pdev)) {
+ rng->pf_regbase = NULL;
+ return 0;
+ }
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_CAVIUM_RNG_PF, NULL);
+ if (!pdev) {
+ dev_err(&pdev->dev, "Cannot find RNG PF device\n");
+ return -EIO;
+ }
+
+ rng->pf_regbase = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!rng->pf_regbase) {
+ dev_err(&pdev->dev, "Failed to map PF CSR region\n");
+ pci_dev_put(pdev);
+ return -ENOMEM;
+ }
+
+ pci_dev_put(pdev);
+
+ /* Get co-processor clock rate */
+ rng->clock_rate = rng_get_coprocessor_clkrate();
+
+ return 0;
+}
+
/* Map Cavium RNG to an HWRNG object */
static int cavium_rng_probe_vf(struct pci_dev *pdev,
const struct pci_device_id *id)
@@ -50,6 +210,8 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev,
if (!rng)
return -ENOMEM;
+ rng->pdev = pdev;
+
/* Map the RNG result */
rng->result = pcim_iomap(pdev, 0, 0);
if (!rng->result) {
@@ -67,6 +229,11 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev,
pci_set_drvdata(pdev, rng);
+ /* Health status is available only at PF, hence map PF registers. */
+ ret = cavium_map_pf_regs(rng);
+ if (ret)
+ return ret;
+
ret = devm_hwrng_register(&pdev->dev, &rng->ops);
if (ret) {
dev_err(&pdev->dev, "Error registering device as HWRNG.\n");
@@ -76,10 +243,18 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev,
return 0;
}
+/* Remove the VF */
+static void cavium_rng_remove_vf(struct pci_dev *pdev)
+{
+ struct cavium_rng *rng;
+
+ rng = pci_get_drvdata(pdev);
+ iounmap(rng->pf_regbase);
+}
static const struct pci_device_id cavium_rng_vf_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa033), 0, 0, 0},
- {0,},
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CAVIUM_RNG_VF) },
+ { 0, }
};
MODULE_DEVICE_TABLE(pci, cavium_rng_vf_id_table);
@@ -87,8 +262,9 @@ static struct pci_driver cavium_rng_vf_driver = {
.name = "cavium_rng_vf",
.id_table = cavium_rng_vf_id_table,
.probe = cavium_rng_probe_vf,
+ .remove = cavium_rng_remove_vf,
};
module_pci_driver(cavium_rng_vf_driver);
MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/cavium-rng.c b/drivers/char/hw_random/cavium-rng.c
index 63d6e68c24d2..b96579222408 100644
--- a/drivers/char/hw_random/cavium-rng.c
+++ b/drivers/char/hw_random/cavium-rng.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Hardware Random Number Generator support for Cavium Inc.
- * Thunder processor family.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
+ * Hardware Random Number Generator support.
+ * Cavium Thunder, Marvell OcteonTx/Tx2 processor families.
*
* Copyright (C) 2016 Cavium, Inc.
*/
@@ -91,4 +88,4 @@ static struct pci_driver cavium_rng_pf_driver = {
module_pci_driver(cavium_rng_pf_driver);
MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/cn10k-rng.c b/drivers/char/hw_random/cn10k-rng.c
new file mode 100644
index 000000000000..35001c63648b
--- /dev/null
+++ b/drivers/char/hw_random/cn10k-rng.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CN10K RVU Hardware Random Number Generator.
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/delay.h>
+
+#include <linux/arm-smccc.h>
+
+/* CSRs */
+#define RNM_CTL_STATUS 0x000
+#define RNM_ENTROPY_STATUS 0x008
+#define RNM_CONST 0x030
+#define RNM_EBG_ENT 0x048
+#define RNM_PF_EBG_HEALTH 0x050
+#define RNM_PF_RANDOM 0x400
+#define RNM_TRNG_RESULT 0x408
+
+struct cn10k_rng {
+ void __iomem *reg_base;
+ struct hwrng ops;
+ struct pci_dev *pdev;
+};
+
+#define PLAT_OCTEONTX_RESET_RNG_EBG_HEALTH_STATE 0xc2000b0f
+
+static int reset_rng_health_state(struct cn10k_rng *rng)
+{
+ struct arm_smccc_res res;
+
+ /* Send SMC service call to reset EBG health state */
+ arm_smccc_smc(PLAT_OCTEONTX_RESET_RNG_EBG_HEALTH_STATE, 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 != 0UL)
+ return -EIO;
+
+ return 0;
+}
+
+static int check_rng_health(struct cn10k_rng *rng)
+{
+ u64 status;
+ int err;
+
+ /* Skip checking health */
+ if (!rng->reg_base)
+ return 0;
+
+ status = readq(rng->reg_base + RNM_PF_EBG_HEALTH);
+ if (status & BIT_ULL(20)) {
+ err = reset_rng_health_state(rng);
+ if (err) {
+ dev_err(&rng->pdev->dev, "HWRNG: Health test failed (status=%llx)\n",
+ status);
+ dev_err(&rng->pdev->dev, "HWRNG: error during reset\n");
+ }
+ }
+ return 0;
+}
+
+static void cn10k_read_trng(struct cn10k_rng *rng, u64 *value)
+{
+ u64 upper, lower;
+
+ *value = readq(rng->reg_base + RNM_PF_RANDOM);
+
+ /* HW can run out of entropy if large amount random data is read in
+ * quick succession. Zeros may not be real random data from HW.
+ */
+ if (!*value) {
+ upper = readq(rng->reg_base + RNM_PF_RANDOM);
+ lower = readq(rng->reg_base + RNM_PF_RANDOM);
+ while (!(upper & 0x00000000FFFFFFFFULL))
+ upper = readq(rng->reg_base + RNM_PF_RANDOM);
+ while (!(lower & 0xFFFFFFFF00000000ULL))
+ lower = readq(rng->reg_base + RNM_PF_RANDOM);
+
+ *value = (upper & 0xFFFFFFFF00000000) | (lower & 0xFFFFFFFF);
+ }
+}
+
+static int cn10k_rng_read(struct hwrng *hwrng, void *data,
+ size_t max, bool wait)
+{
+ struct cn10k_rng *rng = (struct cn10k_rng *)hwrng->priv;
+ unsigned int size;
+ int err = 0;
+ u64 value;
+
+ err = check_rng_health(rng);
+ if (err)
+ return err;
+
+ size = max;
+
+ while (size >= 8) {
+ cn10k_read_trng(rng, &value);
+
+ *((u64 *)data) = (u64)value;
+ size -= 8;
+ data += 8;
+ }
+
+ while (size > 0) {
+ cn10k_read_trng(rng, &value);
+
+ *((u8 *)data) = (u8)value;
+ size--;
+ data++;
+ }
+
+ return max - size;
+}
+
+static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct cn10k_rng *rng;
+ int err;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ rng->pdev = pdev;
+ pci_set_drvdata(pdev, rng);
+
+ rng->reg_base = pcim_iomap(pdev, 0, 0);
+ if (!rng->reg_base) {
+ dev_err(&pdev->dev, "Error while mapping CSRs, exiting\n");
+ return -ENOMEM;
+ }
+
+ rng->ops.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "cn10k-rng-%s", dev_name(&pdev->dev));
+ if (!rng->ops.name)
+ return -ENOMEM;
+
+ rng->ops.read = cn10k_rng_read;
+ rng->ops.quality = 1000;
+ rng->ops.priv = (unsigned long)rng;
+
+ reset_rng_health_state(rng);
+
+ err = devm_hwrng_register(&pdev->dev, &rng->ops);
+ if (err) {
+ dev_err(&pdev->dev, "Could not register hwrng device.\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static void cn10k_rng_remove(struct pci_dev *pdev)
+{
+ /* Nothing to do */
+}
+
+static const struct pci_device_id cn10k_rng_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA098) }, /* RNG PF */
+ {0,},
+};
+
+MODULE_DEVICE_TABLE(pci, cn10k_rng_id_table);
+
+static struct pci_driver cn10k_rng_driver = {
+ .name = "cn10k_rng",
+ .id_table = cn10k_rng_id_table,
+ .probe = cn10k_rng_probe,
+ .remove = cn10k_rng_remove,
+};
+
+module_pci_driver(cn10k_rng_driver);
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
+MODULE_DESCRIPTION("Marvell CN10K HW RNG Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/tx4939-rng.c b/drivers/char/hw_random/tx4939-rng.c
deleted file mode 100644
index c8bd34e740fd..000000000000
--- a/drivers/char/hw_random/tx4939-rng.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * RNG driver for TX4939 Random Number Generators (RNG)
- *
- * Copyright (C) 2009 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/hw_random.h>
-#include <linux/gfp.h>
-
-#define TX4939_RNG_RCSR 0x00000000
-#define TX4939_RNG_ROR(n) (0x00000018 + (n) * 8)
-
-#define TX4939_RNG_RCSR_INTE 0x00000008
-#define TX4939_RNG_RCSR_RST 0x00000004
-#define TX4939_RNG_RCSR_FIN 0x00000002
-#define TX4939_RNG_RCSR_ST 0x00000001
-
-struct tx4939_rng {
- struct hwrng rng;
- void __iomem *base;
- u64 databuf[3];
- unsigned int data_avail;
-};
-
-static void rng_io_start(void)
-{
-#ifndef CONFIG_64BIT
- /*
- * readq is reading a 64-bit register using a 64-bit load. On
- * a 32-bit kernel however interrupts or any other processor
- * exception would clobber the upper 32-bit of the processor
- * register so interrupts need to be disabled.
- */
- local_irq_disable();
-#endif
-}
-
-static void rng_io_end(void)
-{
-#ifndef CONFIG_64BIT
- local_irq_enable();
-#endif
-}
-
-static u64 read_rng(void __iomem *base, unsigned int offset)
-{
- return ____raw_readq(base + offset);
-}
-
-static void write_rng(u64 val, void __iomem *base, unsigned int offset)
-{
- return ____raw_writeq(val, base + offset);
-}
-
-static int tx4939_rng_data_present(struct hwrng *rng, int wait)
-{
- struct tx4939_rng *rngdev = container_of(rng, struct tx4939_rng, rng);
- int i;
-
- if (rngdev->data_avail)
- return rngdev->data_avail;
- for (i = 0; i < 20; i++) {
- rng_io_start();
- if (!(read_rng(rngdev->base, TX4939_RNG_RCSR)
- & TX4939_RNG_RCSR_ST)) {
- rngdev->databuf[0] =
- read_rng(rngdev->base, TX4939_RNG_ROR(0));
- rngdev->databuf[1] =
- read_rng(rngdev->base, TX4939_RNG_ROR(1));
- rngdev->databuf[2] =
- read_rng(rngdev->base, TX4939_RNG_ROR(2));
- rngdev->data_avail =
- sizeof(rngdev->databuf) / sizeof(u32);
- /* Start RNG */
- write_rng(TX4939_RNG_RCSR_ST,
- rngdev->base, TX4939_RNG_RCSR);
- wait = 0;
- }
- rng_io_end();
- if (!wait)
- break;
- /* 90 bus clock cycles by default for generation */
- ndelay(90 * 5);
- }
- return rngdev->data_avail;
-}
-
-static int tx4939_rng_data_read(struct hwrng *rng, u32 *buffer)
-{
- struct tx4939_rng *rngdev = container_of(rng, struct tx4939_rng, rng);
-
- rngdev->data_avail--;
- *buffer = *((u32 *)&rngdev->databuf + rngdev->data_avail);
- return sizeof(u32);
-}
-
-static int __init tx4939_rng_probe(struct platform_device *dev)
-{
- struct tx4939_rng *rngdev;
- int i;
-
- rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL);
- if (!rngdev)
- return -ENOMEM;
- rngdev->base = devm_platform_ioremap_resource(dev, 0);
- if (IS_ERR(rngdev->base))
- return PTR_ERR(rngdev->base);
-
- rngdev->rng.name = dev_name(&dev->dev);
- rngdev->rng.data_present = tx4939_rng_data_present;
- rngdev->rng.data_read = tx4939_rng_data_read;
-
- rng_io_start();
- /* Reset RNG */
- write_rng(TX4939_RNG_RCSR_RST, rngdev->base, TX4939_RNG_RCSR);
- write_rng(0, rngdev->base, TX4939_RNG_RCSR);
- /* Start RNG */
- write_rng(TX4939_RNG_RCSR_ST, rngdev->base, TX4939_RNG_RCSR);
- rng_io_end();
- /*
- * Drop first two results. From the datasheet:
- * The quality of the random numbers generated immediately
- * after reset can be insufficient. Therefore, do not use
- * random numbers obtained from the first and second
- * generations; use the ones from the third or subsequent
- * generation.
- */
- for (i = 0; i < 2; i++) {
- rngdev->data_avail = 0;
- if (!tx4939_rng_data_present(&rngdev->rng, 1))
- return -EIO;
- }
-
- platform_set_drvdata(dev, rngdev);
- return devm_hwrng_register(&dev->dev, &rngdev->rng);
-}
-
-static struct platform_driver tx4939_rng_driver = {
- .driver = {
- .name = "tx4939-rng",
- },
-};
-
-module_platform_driver_probe(tx4939_rng_driver, tx4939_rng_probe);
-
-MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver for TX4939");
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 0a7dde135db1..e856df7e285c 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -179,9 +179,9 @@ static void remove_common(struct virtio_device *vdev)
vi->data_avail = 0;
vi->data_idx = 0;
complete(&vi->have_data);
- vdev->config->reset(vdev);
if (vi->hwrng_register_done)
hwrng_unregister(&vi->hwrng);
+ virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
ida_simple_remove(&rng_index_ida, vi->index);
kfree(vi);
diff --git a/drivers/char/mwave/3780i.h b/drivers/char/mwave/3780i.h
index 9ccb6b270b07..95164246afd1 100644
--- a/drivers/char/mwave/3780i.h
+++ b/drivers/char/mwave/3780i.h
@@ -68,7 +68,7 @@ typedef struct {
unsigned char ClockControl:1; /* RW: Clock control: 0=normal, 1=stop 3780i clocks */
unsigned char SoftReset:1; /* RW: Soft reset 0=normal, 1=soft reset active */
unsigned char ConfigMode:1; /* RW: Configuration mode, 0=normal, 1=config mode */
- unsigned char Reserved:5; /* 0: Reserved */
+ unsigned short Reserved:13; /* 0: Reserved */
} DSP_ISA_SLAVE_CONTROL;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 605969ed0f96..68613f0b6887 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1,8 +1,7 @@
/*
* random.c -- A strong random number generator
*
- * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
- * Rights Reserved.
+ * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*
* Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
*
@@ -78,12 +77,12 @@
* an *estimate* of how many bits of randomness have been stored into
* the random number generator's internal state.
*
- * When random bytes are desired, they are obtained by taking the SHA
- * hash of the contents of the "entropy pool". The SHA hash avoids
+ * When random bytes are desired, they are obtained by taking the BLAKE2s
+ * hash of the contents of the "entropy pool". The BLAKE2s hash avoids
* exposing the internal state of the entropy pool. It is believed to
* be computationally infeasible to derive any useful information
- * about the input of SHA from its output. Even if it is possible to
- * analyze SHA in some clever way, as long as the amount of data
+ * about the input of BLAKE2s from its output. Even if it is possible to
+ * analyze BLAKE2s in some clever way, as long as the amount of data
* returned from the generator is less than the inherent entropy in
* the pool, the output data is totally unpredictable. For this
* reason, the routine decreases its internal estimate of how many
@@ -93,7 +92,7 @@
* If this estimate goes to zero, the routine can still generate
* random numbers; however, an attacker may (at least in theory) be
* able to infer the future output of the generator from prior
- * outputs. This requires successful cryptanalysis of SHA, which is
+ * outputs. This requires successful cryptanalysis of BLAKE2s, which is
* not believed to be feasible, but there is a remote possibility.
* Nonetheless, these numbers should be useful for the vast majority
* of purposes.
@@ -102,7 +101,7 @@
* ===============================
*
* There are four exported interfaces; two for use within the kernel,
- * and two or use from userspace.
+ * and two for use from userspace.
*
* Exported interfaces ---- userspace output
* -----------------------------------------
@@ -125,7 +124,7 @@
*
* The primary kernel interface is
*
- * void get_random_bytes(void *buf, int nbytes);
+ * void get_random_bytes(void *buf, int nbytes);
*
* This interface will return the requested number of random bytes,
* and place it in the requested buffer. This is equivalent to a
@@ -133,10 +132,10 @@
*
* For less critical applications, there are the functions:
*
- * u32 get_random_u32()
- * u64 get_random_u64()
- * unsigned int get_random_int()
- * unsigned long get_random_long()
+ * u32 get_random_u32()
+ * u64 get_random_u64()
+ * unsigned int get_random_int()
+ * unsigned long get_random_long()
*
* These are produced by a cryptographic RNG seeded from get_random_bytes,
* and so do not deplete the entropy pool as much. These are recommended
@@ -198,10 +197,13 @@
* from the devices are:
*
* void add_device_randomness(const void *buf, unsigned int size);
- * void add_input_randomness(unsigned int type, unsigned int code,
+ * void add_input_randomness(unsigned int type, unsigned int code,
* unsigned int value);
- * void add_interrupt_randomness(int irq, int irq_flags);
- * void add_disk_randomness(struct gendisk *disk);
+ * void add_interrupt_randomness(int irq);
+ * void add_disk_randomness(struct gendisk *disk);
+ * void add_hwgenerator_randomness(const char *buffer, size_t count,
+ * size_t entropy);
+ * void add_bootloader_randomness(const void *buf, unsigned int size);
*
* add_device_randomness() is for adding data to the random pool that
* is likely to differ between two devices (or possibly even per boot).
@@ -228,6 +230,14 @@
* particular randomness source. They do this by keeping track of the
* first and second order deltas of the event timings.
*
+ * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
+ * entropy as specified by the caller. If the entropy pool is full it will
+ * block until more entropy is needed.
+ *
+ * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or
+ * add_device_randomness(), depending on whether or not the configuration
+ * option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
+ *
* Ensuring unpredictability at system startup
* ============================================
*
@@ -286,8 +296,8 @@
* /dev/random and /dev/urandom created already, they can be created
* by using the commands:
*
- * mknod /dev/random c 1 8
- * mknod /dev/urandom c 1 9
+ * mknod /dev/random c 1 8
+ * mknod /dev/urandom c 1 9
*
* Acknowledgements:
* =================
@@ -327,7 +337,6 @@
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/percpu.h>
-#include <linux/fips.h>
#include <linux/ptrace.h>
#include <linux/workqueue.h>
#include <linux/irq.h>
@@ -336,7 +345,7 @@
#include <linux/completion.h>
#include <linux/uuid.h>
#include <crypto/chacha.h>
-#include <crypto/sha1.h>
+#include <crypto/blake2s.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
@@ -350,33 +359,11 @@
/* #define ADD_INTERRUPT_BENCH */
/*
- * Configuration information
- */
-#define INPUT_POOL_SHIFT 12
-#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
-#define OUTPUT_POOL_SHIFT 10
-#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
-#define EXTRACT_SIZE 10
-
-
-#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
-
-/*
- * To allow fractional bits to be tracked, the entropy_count field is
- * denominated in units of 1/8th bits.
- *
- * 2*(ENTROPY_SHIFT + poolbitshift) must <= 31, or the multiply in
- * credit_entropy_bits() needs to be 64 bits wide.
- */
-#define ENTROPY_SHIFT 3
-#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
-
-/*
* If the entropy count falls under this number of bits, then we
* should wake up processes which are selecting or polling on write
* access to /dev/random.
*/
-static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
+static int random_write_wakeup_bits = 28 * (1 << 5);
/*
* Originally, we used a primitive polynomial of degree .poolwords
@@ -395,7 +382,7 @@ static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
* Thanks to Colin Plumb for suggesting this.
*
* The mixing operation is much less sensitive than the output hash,
- * where we use SHA-1. All that we want of mixing operation is that
+ * where we use BLAKE2s. All that we want of mixing operation is that
* it be a good non-cryptographic hash; i.e. it not produce collisions
* when fed "random" data of the sort we expect to see. As long as
* the pool state differs for different inputs, we have preserved the
@@ -423,14 +410,27 @@ static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
* polynomial which improves the resulting TGFSR polynomial to be
* irreducible, which we have made here.
*/
-static const struct poolinfo {
- int poolbitshift, poolwords, poolbytes, poolfracbits;
-#define S(x) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5)
- int tap1, tap2, tap3, tap4, tap5;
-} poolinfo_table[] = {
- /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
+enum poolinfo {
+ POOL_WORDS = 128,
+ POOL_WORDMASK = POOL_WORDS - 1,
+ POOL_BYTES = POOL_WORDS * sizeof(u32),
+ POOL_BITS = POOL_BYTES * 8,
+ POOL_BITSHIFT = ilog2(POOL_BITS),
+
+ /* To allow fractional bits to be tracked, the entropy_count field is
+ * denominated in units of 1/8th bits. */
+ POOL_ENTROPY_SHIFT = 3,
+#define POOL_ENTROPY_BITS() (input_pool.entropy_count >> POOL_ENTROPY_SHIFT)
+ POOL_FRACBITS = POOL_BITS << POOL_ENTROPY_SHIFT,
+
/* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
- { S(128), 104, 76, 51, 25, 1 },
+ POOL_TAP1 = 104,
+ POOL_TAP2 = 76,
+ POOL_TAP3 = 51,
+ POOL_TAP4 = 25,
+ POOL_TAP5 = 1,
+
+ EXTRACT_SIZE = BLAKE2S_HASH_SIZE / 2
};
/*
@@ -443,13 +443,17 @@ static DEFINE_SPINLOCK(random_ready_list_lock);
static LIST_HEAD(random_ready_list);
struct crng_state {
- __u32 state[16];
- unsigned long init_time;
- spinlock_t lock;
+ u32 state[16];
+ unsigned long init_time;
+ spinlock_t lock;
};
static struct crng_state primary_crng = {
.lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
+ .state[0] = CHACHA_CONSTANT_EXPA,
+ .state[1] = CHACHA_CONSTANT_ND_3,
+ .state[2] = CHACHA_CONSTANT_2_BY,
+ .state[3] = CHACHA_CONSTANT_TE_K,
};
/*
@@ -461,13 +465,14 @@ static struct crng_state primary_crng = {
* its value (from 0->1->2).
*/
static int crng_init = 0;
+static bool crng_need_final_init = false;
#define crng_ready() (likely(crng_init > 1))
static int crng_init_cnt = 0;
static unsigned long crng_global_init_time = 0;
-#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE)
-static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]);
+#define CRNG_INIT_CNT_THRESH (2 * CHACHA_KEY_SIZE)
+static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE]);
static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA_BLOCK_SIZE], int used);
+ u8 tmp[CHACHA_BLOCK_SIZE], int used);
static void process_random_ready_list(void);
static void _get_random_bytes(void *buf, int nbytes);
@@ -488,38 +493,23 @@ MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
*
**********************************************************************/
-struct entropy_store;
-struct entropy_store {
- /* read-only data: */
- const struct poolinfo *poolinfo;
- __u32 *pool;
- const char *name;
+static u32 input_pool_data[POOL_WORDS] __latent_entropy;
- /* read-write data: */
+static struct {
spinlock_t lock;
- unsigned short add_ptr;
- unsigned short input_rotate;
+ u16 add_ptr;
+ u16 input_rotate;
int entropy_count;
- unsigned int last_data_init:1;
- __u8 last_data[EXTRACT_SIZE];
+} input_pool = {
+ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
};
-static ssize_t extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int min, int rsvd);
-static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int fips);
-
-static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
-static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
+static ssize_t extract_entropy(void *buf, size_t nbytes, int min);
+static ssize_t _extract_entropy(void *buf, size_t nbytes);
-static struct entropy_store input_pool = {
- .poolinfo = &poolinfo_table[0],
- .name = "input",
- .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
- .pool = input_pool_data
-};
+static void crng_reseed(struct crng_state *crng, bool use_input_pool);
-static __u32 const twist_table[8] = {
+static const u32 twist_table[8] = {
0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
@@ -533,39 +523,31 @@ static __u32 const twist_table[8] = {
* it's cheap to do so and helps slightly in the expected case where
* the entropy is concentrated in the low-order bits.
*/
-static void _mix_pool_bytes(struct entropy_store *r, const void *in,
- int nbytes)
+static void _mix_pool_bytes(const void *in, int nbytes)
{
- unsigned long i, tap1, tap2, tap3, tap4, tap5;
+ unsigned long i;
int input_rotate;
- int wordmask = r->poolinfo->poolwords - 1;
- const char *bytes = in;
- __u32 w;
+ const u8 *bytes = in;
+ u32 w;
- tap1 = r->poolinfo->tap1;
- tap2 = r->poolinfo->tap2;
- tap3 = r->poolinfo->tap3;
- tap4 = r->poolinfo->tap4;
- tap5 = r->poolinfo->tap5;
-
- input_rotate = r->input_rotate;
- i = r->add_ptr;
+ input_rotate = input_pool.input_rotate;
+ i = input_pool.add_ptr;
/* mix one byte at a time to simplify size handling and churn faster */
while (nbytes--) {
w = rol32(*bytes++, input_rotate);
- i = (i - 1) & wordmask;
+ i = (i - 1) & POOL_WORDMASK;
/* XOR in the various taps */
- w ^= r->pool[i];
- w ^= r->pool[(i + tap1) & wordmask];
- w ^= r->pool[(i + tap2) & wordmask];
- w ^= r->pool[(i + tap3) & wordmask];
- w ^= r->pool[(i + tap4) & wordmask];
- w ^= r->pool[(i + tap5) & wordmask];
+ w ^= input_pool_data[i];
+ w ^= input_pool_data[(i + POOL_TAP1) & POOL_WORDMASK];
+ w ^= input_pool_data[(i + POOL_TAP2) & POOL_WORDMASK];
+ w ^= input_pool_data[(i + POOL_TAP3) & POOL_WORDMASK];
+ w ^= input_pool_data[(i + POOL_TAP4) & POOL_WORDMASK];
+ w ^= input_pool_data[(i + POOL_TAP5) & POOL_WORDMASK];
/* Mix the result back in with a twist */
- r->pool[i] = (w >> 3) ^ twist_table[w & 7];
+ input_pool_data[i] = (w >> 3) ^ twist_table[w & 7];
/*
* Normally, we add 7 bits of rotation to the pool.
@@ -576,33 +558,31 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
}
- r->input_rotate = input_rotate;
- r->add_ptr = i;
+ input_pool.input_rotate = input_rotate;
+ input_pool.add_ptr = i;
}
-static void __mix_pool_bytes(struct entropy_store *r, const void *in,
- int nbytes)
+static void __mix_pool_bytes(const void *in, int nbytes)
{
- trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
- _mix_pool_bytes(r, in, nbytes);
+ trace_mix_pool_bytes_nolock(nbytes, _RET_IP_);
+ _mix_pool_bytes(in, nbytes);
}
-static void mix_pool_bytes(struct entropy_store *r, const void *in,
- int nbytes)
+static void mix_pool_bytes(const void *in, int nbytes)
{
unsigned long flags;
- trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
- spin_lock_irqsave(&r->lock, flags);
- _mix_pool_bytes(r, in, nbytes);
- spin_unlock_irqrestore(&r->lock, flags);
+ trace_mix_pool_bytes(nbytes, _RET_IP_);
+ spin_lock_irqsave(&input_pool.lock, flags);
+ _mix_pool_bytes(in, nbytes);
+ spin_unlock_irqrestore(&input_pool.lock, flags);
}
struct fast_pool {
- __u32 pool[4];
- unsigned long last;
- unsigned short reg_idx;
- unsigned char count;
+ u32 pool[4];
+ unsigned long last;
+ u16 reg_idx;
+ u8 count;
};
/*
@@ -612,8 +592,8 @@ struct fast_pool {
*/
static void fast_mix(struct fast_pool *f)
{
- __u32 a = f->pool[0], b = f->pool[1];
- __u32 c = f->pool[2], d = f->pool[3];
+ u32 a = f->pool[0], b = f->pool[1];
+ u32 c = f->pool[2], d = f->pool[3];
a += b; c += d;
b = rol32(b, 6); d = rol32(d, 27);
@@ -657,17 +637,19 @@ static void process_random_ready_list(void)
* Use credit_entropy_bits_safe() if the value comes from userspace
* or otherwise should be checked for extreme values.
*/
-static void credit_entropy_bits(struct entropy_store *r, int nbits)
+static void credit_entropy_bits(int nbits)
{
- int entropy_count, orig;
- const int pool_size = r->poolinfo->poolfracbits;
- int nfrac = nbits << ENTROPY_SHIFT;
+ int entropy_count, entropy_bits, orig;
+ int nfrac = nbits << POOL_ENTROPY_SHIFT;
+
+ /* Ensure that the multiplication can avoid being 64 bits wide. */
+ BUILD_BUG_ON(2 * (POOL_ENTROPY_SHIFT + POOL_BITSHIFT) > 31);
if (!nbits)
return;
retry:
- entropy_count = orig = READ_ONCE(r->entropy_count);
+ entropy_count = orig = READ_ONCE(input_pool.entropy_count);
if (nfrac < 0) {
/* Debit */
entropy_count += nfrac;
@@ -694,50 +676,43 @@ retry:
* turns no matter how large nbits is.
*/
int pnfrac = nfrac;
- const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
+ const int s = POOL_BITSHIFT + POOL_ENTROPY_SHIFT + 2;
/* The +2 corresponds to the /4 in the denominator */
do {
- unsigned int anfrac = min(pnfrac, pool_size/2);
+ unsigned int anfrac = min(pnfrac, POOL_FRACBITS / 2);
unsigned int add =
- ((pool_size - entropy_count)*anfrac*3) >> s;
+ ((POOL_FRACBITS - entropy_count) * anfrac * 3) >> s;
entropy_count += add;
pnfrac -= anfrac;
- } while (unlikely(entropy_count < pool_size-2 && pnfrac));
+ } while (unlikely(entropy_count < POOL_FRACBITS - 2 && pnfrac));
}
if (WARN_ON(entropy_count < 0)) {
- pr_warn("negative entropy/overflow: pool %s count %d\n",
- r->name, entropy_count);
+ pr_warn("negative entropy/overflow: count %d\n", entropy_count);
entropy_count = 0;
- } else if (entropy_count > pool_size)
- entropy_count = pool_size;
- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+ } else if (entropy_count > POOL_FRACBITS)
+ entropy_count = POOL_FRACBITS;
+ if (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig)
goto retry;
- trace_credit_entropy_bits(r->name, nbits,
- entropy_count >> ENTROPY_SHIFT, _RET_IP_);
-
- if (r == &input_pool) {
- int entropy_bits = entropy_count >> ENTROPY_SHIFT;
+ trace_credit_entropy_bits(nbits, entropy_count >> POOL_ENTROPY_SHIFT, _RET_IP_);
- if (crng_init < 2 && entropy_bits >= 128)
- crng_reseed(&primary_crng, r);
- }
+ entropy_bits = entropy_count >> POOL_ENTROPY_SHIFT;
+ if (crng_init < 2 && entropy_bits >= 128)
+ crng_reseed(&primary_crng, true);
}
-static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
+static int credit_entropy_bits_safe(int nbits)
{
- const int nbits_max = r->poolinfo->poolwords * 32;
-
if (nbits < 0)
return -EINVAL;
/* Cap the value to avoid overflows */
- nbits = min(nbits, nbits_max);
+ nbits = min(nbits, POOL_BITS);
- credit_entropy_bits(r, nbits);
+ credit_entropy_bits(nbits);
return 0;
}
@@ -747,11 +722,10 @@ static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
*
*********************************************************************/
-#define CRNG_RESEED_INTERVAL (300*HZ)
+#define CRNG_RESEED_INTERVAL (300 * HZ)
static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
-#ifdef CONFIG_NUMA
/*
* Hack to deal with crazy userspace progams when they are all trying
* to access /dev/urandom in parallel. The programs are almost
@@ -759,7 +733,6 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
* their brain damage.
*/
static struct crng_state **crng_node_pool __read_mostly;
-#endif
static void invalidate_batched_entropy(void);
static void numa_crng_init(void);
@@ -773,9 +746,9 @@ early_param("random.trust_cpu", parse_trust_cpu);
static bool crng_init_try_arch(struct crng_state *crng)
{
- int i;
- bool arch_init = true;
- unsigned long rv;
+ int i;
+ bool arch_init = true;
+ unsigned long rv;
for (i = 4; i < 16; i++) {
if (!arch_get_random_seed_long(&rv) &&
@@ -791,9 +764,9 @@ static bool crng_init_try_arch(struct crng_state *crng)
static bool __init crng_init_try_arch_early(struct crng_state *crng)
{
- int i;
- bool arch_init = true;
- unsigned long rv;
+ int i;
+ bool arch_init = true;
+ unsigned long rv;
for (i = 4; i < 16; i++) {
if (!arch_get_random_seed_long_early(&rv) &&
@@ -807,35 +780,63 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
return arch_init;
}
-static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
+static void crng_initialize_secondary(struct crng_state *crng)
{
chacha_init_consts(crng->state);
- _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
+ _get_random_bytes(&crng->state[4], sizeof(u32) * 12);
crng_init_try_arch(crng);
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
static void __init crng_initialize_primary(struct crng_state *crng)
{
- chacha_init_consts(crng->state);
- _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
- if (crng_init_try_arch_early(crng) && trust_cpu) {
+ _extract_entropy(&crng->state[4], sizeof(u32) * 12);
+ if (crng_init_try_arch_early(crng) && trust_cpu && crng_init < 2) {
invalidate_batched_entropy();
numa_crng_init();
crng_init = 2;
- pr_notice("crng done (trusting CPU's manufacturer)\n");
+ pr_notice("crng init done (trusting CPU's manufacturer)\n");
}
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
-#ifdef CONFIG_NUMA
+static void crng_finalize_init(struct crng_state *crng)
+{
+ if (crng != &primary_crng || crng_init >= 2)
+ return;
+ if (!system_wq) {
+ /* We can't call numa_crng_init until we have workqueues,
+ * so mark this for processing later. */
+ crng_need_final_init = true;
+ return;
+ }
+
+ invalidate_batched_entropy();
+ numa_crng_init();
+ crng_init = 2;
+ process_random_ready_list();
+ wake_up_interruptible(&crng_init_wait);
+ kill_fasync(&fasync, SIGIO, POLL_IN);
+ pr_notice("crng init done\n");
+ if (unseeded_warning.missed) {
+ pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
+ unseeded_warning.missed);
+ unseeded_warning.missed = 0;
+ }
+ if (urandom_warning.missed) {
+ pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
+ urandom_warning.missed);
+ urandom_warning.missed = 0;
+ }
+}
+
static void do_numa_crng_init(struct work_struct *work)
{
int i;
struct crng_state *crng;
struct crng_state **pool;
- pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
+ pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL | __GFP_NOFAIL);
for_each_online_node(i) {
crng = kmalloc_node(sizeof(struct crng_state),
GFP_KERNEL | __GFP_NOFAIL, i);
@@ -843,8 +844,8 @@ static void do_numa_crng_init(struct work_struct *work)
crng_initialize_secondary(crng);
pool[i] = crng;
}
- mb();
- if (cmpxchg(&crng_node_pool, NULL, pool)) {
+ /* pairs with READ_ONCE() in select_crng() */
+ if (cmpxchg_release(&crng_node_pool, NULL, pool) != NULL) {
for_each_node(i)
kfree(pool[i]);
kfree(pool);
@@ -855,20 +856,35 @@ static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
static void numa_crng_init(void)
{
- schedule_work(&numa_crng_init_work);
+ if (IS_ENABLED(CONFIG_NUMA))
+ schedule_work(&numa_crng_init_work);
+}
+
+static struct crng_state *select_crng(void)
+{
+ if (IS_ENABLED(CONFIG_NUMA)) {
+ struct crng_state **pool;
+ int nid = numa_node_id();
+
+ /* pairs with cmpxchg_release() in do_numa_crng_init() */
+ pool = READ_ONCE(crng_node_pool);
+ if (pool && pool[nid])
+ return pool[nid];
+ }
+
+ return &primary_crng;
}
-#else
-static void numa_crng_init(void) {}
-#endif
/*
* crng_fast_load() can be called by code in the interrupt service
- * path. So we can't afford to dilly-dally.
+ * path. So we can't afford to dilly-dally. Returns the number of
+ * bytes processed from cp.
*/
-static int crng_fast_load(const char *cp, size_t len)
+static size_t crng_fast_load(const u8 *cp, size_t len)
{
unsigned long flags;
- char *p;
+ u8 *p;
+ size_t ret = 0;
if (!spin_trylock_irqsave(&primary_crng.lock, flags))
return 0;
@@ -876,10 +892,10 @@ static int crng_fast_load(const char *cp, size_t len)
spin_unlock_irqrestore(&primary_crng.lock, flags);
return 0;
}
- p = (unsigned char *) &primary_crng.state[4];
+ p = (u8 *)&primary_crng.state[4];
while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
- cp++; crng_init_cnt++; len--;
+ cp++; crng_init_cnt++; len--; ret++;
}
spin_unlock_irqrestore(&primary_crng.lock, flags);
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
@@ -887,7 +903,7 @@ static int crng_fast_load(const char *cp, size_t len)
crng_init = 1;
pr_notice("fast init done\n");
}
- return 1;
+ return ret;
}
/*
@@ -904,14 +920,14 @@ static int crng_fast_load(const char *cp, size_t len)
* like a fixed DMI table (for example), which might very well be
* unique to the machine, but is otherwise unvarying.
*/
-static int crng_slow_load(const char *cp, size_t len)
+static int crng_slow_load(const u8 *cp, size_t len)
{
- unsigned long flags;
- static unsigned char lfsr = 1;
- unsigned char tmp;
- unsigned i, max = CHACHA_KEY_SIZE;
- const char * src_buf = cp;
- char * dest_buf = (char *) &primary_crng.state[4];
+ unsigned long flags;
+ static u8 lfsr = 1;
+ u8 tmp;
+ unsigned int i, max = CHACHA_KEY_SIZE;
+ const u8 *src_buf = cp;
+ u8 *dest_buf = (u8 *)&primary_crng.state[4];
if (!spin_trylock_irqsave(&primary_crng.lock, flags))
return 0;
@@ -922,7 +938,7 @@ static int crng_slow_load(const char *cp, size_t len)
if (len > max)
max = len;
- for (i = 0; i < max ; i++) {
+ for (i = 0; i < max; i++) {
tmp = lfsr;
lfsr >>= 1;
if (tmp & 1)
@@ -935,17 +951,17 @@ static int crng_slow_load(const char *cp, size_t len)
return 1;
}
-static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
+static void crng_reseed(struct crng_state *crng, bool use_input_pool)
{
- unsigned long flags;
- int i, num;
+ unsigned long flags;
+ int i, num;
union {
- __u8 block[CHACHA_BLOCK_SIZE];
- __u32 key[8];
+ u8 block[CHACHA_BLOCK_SIZE];
+ u32 key[8];
} buf;
- if (r) {
- num = extract_entropy(r, &buf, 32, 16, 0);
+ if (use_input_pool) {
+ num = extract_entropy(&buf, 32, 16);
if (num == 0)
return;
} else {
@@ -955,65 +971,38 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
}
spin_lock_irqsave(&crng->lock, flags);
for (i = 0; i < 8; i++) {
- unsigned long rv;
+ unsigned long rv;
if (!arch_get_random_seed_long(&rv) &&
!arch_get_random_long(&rv))
rv = random_get_entropy();
- crng->state[i+4] ^= buf.key[i] ^ rv;
+ crng->state[i + 4] ^= buf.key[i] ^ rv;
}
memzero_explicit(&buf, sizeof(buf));
- crng->init_time = jiffies;
+ WRITE_ONCE(crng->init_time, jiffies);
spin_unlock_irqrestore(&crng->lock, flags);
- if (crng == &primary_crng && crng_init < 2) {
- invalidate_batched_entropy();
- numa_crng_init();
- crng_init = 2;
- process_random_ready_list();
- wake_up_interruptible(&crng_init_wait);
- kill_fasync(&fasync, SIGIO, POLL_IN);
- pr_notice("crng init done\n");
- if (unseeded_warning.missed) {
- pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
- unseeded_warning.missed);
- unseeded_warning.missed = 0;
- }
- if (urandom_warning.missed) {
- pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
- urandom_warning.missed);
- urandom_warning.missed = 0;
- }
- }
+ crng_finalize_init(crng);
}
-static void _extract_crng(struct crng_state *crng,
- __u8 out[CHACHA_BLOCK_SIZE])
+static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE])
{
- unsigned long v, flags;
+ unsigned long flags, init_time;
- if (crng_ready() &&
- (time_after(crng_global_init_time, crng->init_time) ||
- time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
- crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
+ if (crng_ready()) {
+ init_time = READ_ONCE(crng->init_time);
+ if (time_after(READ_ONCE(crng_global_init_time), init_time) ||
+ time_after(jiffies, init_time + CRNG_RESEED_INTERVAL))
+ crng_reseed(crng, crng == &primary_crng);
+ }
spin_lock_irqsave(&crng->lock, flags);
- if (arch_get_random_long(&v))
- crng->state[14] ^= v;
chacha20_block(&crng->state[0], out);
if (crng->state[12] == 0)
crng->state[13]++;
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
+static void extract_crng(u8 out[CHACHA_BLOCK_SIZE])
{
- struct crng_state *crng = NULL;
-
-#ifdef CONFIG_NUMA
- if (crng_node_pool)
- crng = crng_node_pool[numa_node_id()];
- if (crng == NULL)
-#endif
- crng = &primary_crng;
- _extract_crng(crng, out);
+ _extract_crng(select_crng(), out);
}
/*
@@ -1021,42 +1010,34 @@ static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
* enough) to mutate the CRNG key to provide backtracking protection.
*/
static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA_BLOCK_SIZE], int used)
+ u8 tmp[CHACHA_BLOCK_SIZE], int used)
{
- unsigned long flags;
- __u32 *s, *d;
- int i;
+ unsigned long flags;
+ u32 *s, *d;
+ int i;
- used = round_up(used, sizeof(__u32));
+ used = round_up(used, sizeof(u32));
if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) {
extract_crng(tmp);
used = 0;
}
spin_lock_irqsave(&crng->lock, flags);
- s = (__u32 *) &tmp[used];
+ s = (u32 *)&tmp[used];
d = &crng->state[4];
- for (i=0; i < 8; i++)
+ for (i = 0; i < 8; i++)
*d++ ^= *s++;
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
+static void crng_backtrack_protect(u8 tmp[CHACHA_BLOCK_SIZE], int used)
{
- struct crng_state *crng = NULL;
-
-#ifdef CONFIG_NUMA
- if (crng_node_pool)
- crng = crng_node_pool[numa_node_id()];
- if (crng == NULL)
-#endif
- crng = &primary_crng;
- _crng_backtrack_protect(crng, tmp, used);
+ _crng_backtrack_protect(select_crng(), tmp, used);
}
static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
{
ssize_t ret = 0, i = CHACHA_BLOCK_SIZE;
- __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
+ u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
int large_request = (nbytes > 256);
while (nbytes) {
@@ -1088,7 +1069,6 @@ static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
return ret;
}
-
/*********************************************************************
*
* Entropy input management
@@ -1121,8 +1101,8 @@ void add_device_randomness(const void *buf, unsigned int size)
trace_add_device_randomness(size, _RET_IP_);
spin_lock_irqsave(&input_pool.lock, flags);
- _mix_pool_bytes(&input_pool, buf, size);
- _mix_pool_bytes(&input_pool, &time, sizeof(time));
+ _mix_pool_bytes(buf, size);
+ _mix_pool_bytes(&time, sizeof(time));
spin_unlock_irqrestore(&input_pool.lock, flags);
}
EXPORT_SYMBOL(add_device_randomness);
@@ -1141,19 +1121,17 @@ static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
*/
static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
{
- struct entropy_store *r;
struct {
long jiffies;
- unsigned cycles;
- unsigned num;
+ unsigned int cycles;
+ unsigned int num;
} sample;
long delta, delta2, delta3;
sample.jiffies = jiffies;
sample.cycles = random_get_entropy();
sample.num = num;
- r = &input_pool;
- mix_pool_bytes(r, &sample, sizeof(sample));
+ mix_pool_bytes(&sample, sizeof(sample));
/*
* Calculate number of bits of randomness we probably added.
@@ -1185,11 +1163,11 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
* Round down by 1 bit on general principles,
* and limit entropy estimate to 12 bits.
*/
- credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
+ credit_entropy_bits(min_t(int, fls(delta >> 1), 11));
}
void add_input_randomness(unsigned int type, unsigned int code,
- unsigned int value)
+ unsigned int value)
{
static unsigned char last_value;
@@ -1200,7 +1178,7 @@ void add_input_randomness(unsigned int type, unsigned int code,
last_value = value;
add_timer_randomness(&input_timer_state,
(type << 4) ^ code ^ (code >> 4) ^ value);
- trace_add_input_randomness(ENTROPY_BITS(&input_pool));
+ trace_add_input_randomness(POOL_ENTROPY_BITS());
}
EXPORT_SYMBOL_GPL(add_input_randomness);
@@ -1209,48 +1187,47 @@ static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
#ifdef ADD_INTERRUPT_BENCH
static unsigned long avg_cycles, avg_deviation;
-#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */
-#define FIXED_1_2 (1 << (AVG_SHIFT-1))
+#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */
+#define FIXED_1_2 (1 << (AVG_SHIFT - 1))
static void add_interrupt_bench(cycles_t start)
{
- long delta = random_get_entropy() - start;
-
- /* Use a weighted moving average */
- delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
- avg_cycles += delta;
- /* And average deviation */
- delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
- avg_deviation += delta;
+ long delta = random_get_entropy() - start;
+
+ /* Use a weighted moving average */
+ delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
+ avg_cycles += delta;
+ /* And average deviation */
+ delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
+ avg_deviation += delta;
}
#else
#define add_interrupt_bench(x)
#endif
-static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
+static u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
{
- __u32 *ptr = (__u32 *) regs;
+ u32 *ptr = (u32 *)regs;
unsigned int idx;
if (regs == NULL)
return 0;
idx = READ_ONCE(f->reg_idx);
- if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
+ if (idx >= sizeof(struct pt_regs) / sizeof(u32))
idx = 0;
ptr += idx++;
WRITE_ONCE(f->reg_idx, idx);
return *ptr;
}
-void add_interrupt_randomness(int irq, int irq_flags)
+void add_interrupt_randomness(int irq)
{
- struct entropy_store *r;
- struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
- struct pt_regs *regs = get_irq_regs();
- unsigned long now = jiffies;
- cycles_t cycles = random_get_entropy();
- __u32 c_high, j_high;
- __u64 ip;
+ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
+ struct pt_regs *regs = get_irq_regs();
+ unsigned long now = jiffies;
+ cycles_t cycles = random_get_entropy();
+ u32 c_high, j_high;
+ u64 ip;
if (cycles == 0)
cycles = get_reg(fast_pool, regs);
@@ -1260,38 +1237,35 @@ void add_interrupt_randomness(int irq, int irq_flags)
fast_pool->pool[1] ^= now ^ c_high;
ip = regs ? instruction_pointer(regs) : _RET_IP_;
fast_pool->pool[2] ^= ip;
- fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
- get_reg(fast_pool, regs);
+ fast_pool->pool[3] ^=
+ (sizeof(ip) > 4) ? ip >> 32 : get_reg(fast_pool, regs);
fast_mix(fast_pool);
add_interrupt_bench(cycles);
if (unlikely(crng_init == 0)) {
if ((fast_pool->count >= 64) &&
- crng_fast_load((char *) fast_pool->pool,
- sizeof(fast_pool->pool))) {
+ crng_fast_load((u8 *)fast_pool->pool, sizeof(fast_pool->pool)) > 0) {
fast_pool->count = 0;
fast_pool->last = now;
}
return;
}
- if ((fast_pool->count < 64) &&
- !time_after(now, fast_pool->last + HZ))
+ if ((fast_pool->count < 64) && !time_after(now, fast_pool->last + HZ))
return;
- r = &input_pool;
- if (!spin_trylock(&r->lock))
+ if (!spin_trylock(&input_pool.lock))
return;
fast_pool->last = now;
- __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
- spin_unlock(&r->lock);
+ __mix_pool_bytes(&fast_pool->pool, sizeof(fast_pool->pool));
+ spin_unlock(&input_pool.lock);
fast_pool->count = 0;
/* award one bit for the contents of the fast pool */
- credit_entropy_bits(r, 1);
+ credit_entropy_bits(1);
}
EXPORT_SYMBOL_GPL(add_interrupt_randomness);
@@ -1302,7 +1276,7 @@ void add_disk_randomness(struct gendisk *disk)
return;
/* first major is 1, so we get >= 0x200 here */
add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
- trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
+ trace_add_disk_randomness(disk_devt(disk), POOL_ENTROPY_BITS());
}
EXPORT_SYMBOL_GPL(add_disk_randomness);
#endif
@@ -1317,43 +1291,36 @@ EXPORT_SYMBOL_GPL(add_disk_randomness);
* This function decides how many bytes to actually take from the
* given pool, and also debits the entropy count accordingly.
*/
-static size_t account(struct entropy_store *r, size_t nbytes, int min,
- int reserved)
+static size_t account(size_t nbytes, int min)
{
- int entropy_count, orig, have_bytes;
+ int entropy_count, orig;
size_t ibytes, nfrac;
- BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
+ BUG_ON(input_pool.entropy_count > POOL_FRACBITS);
/* Can we pull enough? */
retry:
- entropy_count = orig = READ_ONCE(r->entropy_count);
- ibytes = nbytes;
- /* never pull more than available */
- have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
-
- if ((have_bytes -= reserved) < 0)
- have_bytes = 0;
- ibytes = min_t(size_t, ibytes, have_bytes);
- if (ibytes < min)
- ibytes = 0;
-
+ entropy_count = orig = READ_ONCE(input_pool.entropy_count);
if (WARN_ON(entropy_count < 0)) {
- pr_warn("negative entropy count: pool %s count %d\n",
- r->name, entropy_count);
+ pr_warn("negative entropy count: count %d\n", entropy_count);
entropy_count = 0;
}
- nfrac = ibytes << (ENTROPY_SHIFT + 3);
- if ((size_t) entropy_count > nfrac)
+
+ /* never pull more than available */
+ ibytes = min_t(size_t, nbytes, entropy_count >> (POOL_ENTROPY_SHIFT + 3));
+ if (ibytes < min)
+ ibytes = 0;
+ nfrac = ibytes << (POOL_ENTROPY_SHIFT + 3);
+ if ((size_t)entropy_count > nfrac)
entropy_count -= nfrac;
else
entropy_count = 0;
- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+ if (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig)
goto retry;
- trace_debit_entropy(r->name, 8 * ibytes);
- if (ibytes && ENTROPY_BITS(r) < random_write_wakeup_bits) {
+ trace_debit_entropy(8 * ibytes);
+ if (ibytes && POOL_ENTROPY_BITS() < random_write_wakeup_bits) {
wake_up_interruptible(&random_write_wait);
kill_fasync(&fasync, SIGIO, POLL_OUT);
}
@@ -1366,77 +1333,59 @@ retry:
*
* Note: we assume that .poolwords is a multiple of 16 words.
*/
-static void extract_buf(struct entropy_store *r, __u8 *out)
+static void extract_buf(u8 *out)
{
- int i;
- union {
- __u32 w[5];
- unsigned long l[LONGS(20)];
- } hash;
- __u32 workspace[SHA1_WORKSPACE_WORDS];
+ struct blake2s_state state __aligned(__alignof__(unsigned long));
+ u8 hash[BLAKE2S_HASH_SIZE];
+ unsigned long *salt;
unsigned long flags;
+ blake2s_init(&state, sizeof(hash));
+
/*
* If we have an architectural hardware random number
- * generator, use it for SHA's initial vector
+ * generator, use it for BLAKE2's salt & personal fields.
*/
- sha1_init(hash.w);
- for (i = 0; i < LONGS(20); i++) {
+ for (salt = (unsigned long *)&state.h[4];
+ salt < (unsigned long *)&state.h[8]; ++salt) {
unsigned long v;
if (!arch_get_random_long(&v))
break;
- hash.l[i] = v;
+ *salt ^= v;
}
- /* Generate a hash across the pool, 16 words (512 bits) at a time */
- spin_lock_irqsave(&r->lock, flags);
- for (i = 0; i < r->poolinfo->poolwords; i += 16)
- sha1_transform(hash.w, (__u8 *)(r->pool + i), workspace);
+ /* Generate a hash across the pool */
+ spin_lock_irqsave(&input_pool.lock, flags);
+ blake2s_update(&state, (const u8 *)input_pool_data, POOL_BYTES);
+ blake2s_final(&state, hash); /* final zeros out state */
/*
* We mix the hash back into the pool to prevent backtracking
* attacks (where the attacker knows the state of the pool
* plus the current outputs, and attempts to find previous
- * ouputs), unless the hash function can be inverted. By
- * mixing at least a SHA1 worth of hash data back, we make
+ * outputs), unless the hash function can be inverted. By
+ * mixing at least a hash worth of hash data back, we make
* brute-forcing the feedback as hard as brute-forcing the
* hash.
*/
- __mix_pool_bytes(r, hash.w, sizeof(hash.w));
- spin_unlock_irqrestore(&r->lock, flags);
-
- memzero_explicit(workspace, sizeof(workspace));
+ __mix_pool_bytes(hash, sizeof(hash));
+ spin_unlock_irqrestore(&input_pool.lock, flags);
- /*
- * In case the hash function has some recognizable output
- * pattern, we fold it in half. Thus, we always feed back
- * twice as much data as we output.
+ /* Note that EXTRACT_SIZE is half of hash size here, because above
+ * we've dumped the full length back into mixer. By reducing the
+ * amount that we emit, we retain a level of forward secrecy.
*/
- hash.w[0] ^= hash.w[3];
- hash.w[1] ^= hash.w[4];
- hash.w[2] ^= rol32(hash.w[2], 16);
-
- memcpy(out, &hash, EXTRACT_SIZE);
- memzero_explicit(&hash, sizeof(hash));
+ memcpy(out, hash, EXTRACT_SIZE);
+ memzero_explicit(hash, sizeof(hash));
}
-static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int fips)
+static ssize_t _extract_entropy(void *buf, size_t nbytes)
{
ssize_t ret = 0, i;
- __u8 tmp[EXTRACT_SIZE];
- unsigned long flags;
+ u8 tmp[EXTRACT_SIZE];
while (nbytes) {
- extract_buf(r, tmp);
-
- if (fips) {
- spin_lock_irqsave(&r->lock, flags);
- if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
- panic("Hardware RNG duplicated output!\n");
- memcpy(r->last_data, tmp, EXTRACT_SIZE);
- spin_unlock_irqrestore(&r->lock, flags);
- }
+ extract_buf(tmp);
i = min_t(int, nbytes, EXTRACT_SIZE);
memcpy(buf, tmp, i);
nbytes -= i;
@@ -1455,42 +1404,19 @@ static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
* returns it in a buffer.
*
* The min parameter specifies the minimum amount we can pull before
- * failing to avoid races that defeat catastrophic reseeding while the
- * reserved parameter indicates how much entropy we must leave in the
- * pool after each pull to avoid starving other readers.
+ * failing to avoid races that defeat catastrophic reseeding.
*/
-static ssize_t extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int min, int reserved)
+static ssize_t extract_entropy(void *buf, size_t nbytes, int min)
{
- __u8 tmp[EXTRACT_SIZE];
- unsigned long flags;
-
- /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
- if (fips_enabled) {
- spin_lock_irqsave(&r->lock, flags);
- if (!r->last_data_init) {
- r->last_data_init = 1;
- spin_unlock_irqrestore(&r->lock, flags);
- trace_extract_entropy(r->name, EXTRACT_SIZE,
- ENTROPY_BITS(r), _RET_IP_);
- extract_buf(r, tmp);
- spin_lock_irqsave(&r->lock, flags);
- memcpy(r->last_data, tmp, EXTRACT_SIZE);
- }
- spin_unlock_irqrestore(&r->lock, flags);
- }
-
- trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
- nbytes = account(r, nbytes, min, reserved);
-
- return _extract_entropy(r, buf, nbytes, fips_enabled);
+ trace_extract_entropy(nbytes, POOL_ENTROPY_BITS(), _RET_IP_);
+ nbytes = account(nbytes, min);
+ return _extract_entropy(buf, nbytes);
}
#define warn_unseeded_randomness(previous) \
- _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
+ _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous))
-static void _warn_unseeded_randomness(const char *func_name, void *caller,
- void **previous)
+static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous)
{
#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
const bool print_once = false;
@@ -1498,8 +1424,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
static bool print_once __read_mostly;
#endif
- if (print_once ||
- crng_ready() ||
+ if (print_once || crng_ready() ||
(previous && (caller == READ_ONCE(*previous))))
return;
WRITE_ONCE(*previous, caller);
@@ -1507,9 +1432,8 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
print_once = true;
#endif
if (__ratelimit(&unseeded_warning))
- printk_deferred(KERN_NOTICE "random: %s called from %pS "
- "with crng_init=%d\n", func_name, caller,
- crng_init);
+ printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n",
+ func_name, caller, crng_init);
}
/*
@@ -1524,7 +1448,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
*/
static void _get_random_bytes(void *buf, int nbytes)
{
- __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
+ u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
trace_get_random_bytes(nbytes, _RET_IP_);
@@ -1552,7 +1476,6 @@ void get_random_bytes(void *buf, int nbytes)
}
EXPORT_SYMBOL(get_random_bytes);
-
/*
* Each time the timer fires, we expect that we got an unpredictable
* jump in the cycle counter. Even if the timer is running on another
@@ -1568,7 +1491,7 @@ EXPORT_SYMBOL(get_random_bytes);
*/
static void entropy_timer(struct timer_list *t)
{
- credit_entropy_bits(&input_pool, 1);
+ credit_entropy_bits(1);
}
/*
@@ -1591,15 +1514,15 @@ static void try_to_generate_entropy(void)
timer_setup_on_stack(&stack.timer, entropy_timer, 0);
while (!crng_ready()) {
if (!timer_pending(&stack.timer))
- mod_timer(&stack.timer, jiffies+1);
- mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
+ mod_timer(&stack.timer, jiffies + 1);
+ mix_pool_bytes(&stack.now, sizeof(stack.now));
schedule();
stack.now = random_get_entropy();
}
del_timer_sync(&stack.timer);
destroy_timer_on_stack(&stack.timer);
- mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
+ mix_pool_bytes(&stack.now, sizeof(stack.now));
}
/*
@@ -1718,7 +1641,7 @@ EXPORT_SYMBOL(del_random_ready_callback);
int __must_check get_random_bytes_arch(void *buf, int nbytes)
{
int left = nbytes;
- char *p = buf;
+ u8 *p = buf;
trace_get_random_bytes_arch(left, _RET_IP_);
while (left) {
@@ -1740,26 +1663,24 @@ EXPORT_SYMBOL(get_random_bytes_arch);
/*
* init_std_data - initialize pool with system data
*
- * @r: pool to initialize
- *
* This function clears the pool's entropy count and mixes some system
* data into the pool to prepare it for use. The pool is not cleared
* as that can only decrease the entropy in the pool.
*/
-static void __init init_std_data(struct entropy_store *r)
+static void __init init_std_data(void)
{
int i;
ktime_t now = ktime_get_real();
unsigned long rv;
- mix_pool_bytes(r, &now, sizeof(now));
- for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
+ mix_pool_bytes(&now, sizeof(now));
+ for (i = POOL_BYTES; i > 0; i -= sizeof(rv)) {
if (!arch_get_random_seed_long(&rv) &&
!arch_get_random_long(&rv))
rv = random_get_entropy();
- mix_pool_bytes(r, &rv, sizeof(rv));
+ mix_pool_bytes(&rv, sizeof(rv));
}
- mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
+ mix_pool_bytes(utsname(), sizeof(*(utsname())));
}
/*
@@ -1774,7 +1695,9 @@ static void __init init_std_data(struct entropy_store *r)
*/
int __init rand_initialize(void)
{
- init_std_data(&input_pool);
+ init_std_data();
+ if (crng_need_final_init)
+ crng_finalize_init(&primary_crng);
crng_initialize_primary(&primary_crng);
crng_global_init_time = jiffies;
if (ratelimit_disable) {
@@ -1801,22 +1724,20 @@ void rand_initialize_disk(struct gendisk *disk)
}
#endif
-static ssize_t
-urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes,
- loff_t *ppos)
+static ssize_t urandom_read_nowarn(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
{
int ret;
- nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
+ nbytes = min_t(size_t, nbytes, INT_MAX >> (POOL_ENTROPY_SHIFT + 3));
ret = extract_crng_user(buf, nbytes);
- trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
+ trace_urandom_read(8 * nbytes, 0, POOL_ENTROPY_BITS());
return ret;
}
-static ssize_t
-urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
{
- unsigned long flags;
static int maxwarn = 10;
if (!crng_ready() && maxwarn > 0) {
@@ -1824,16 +1745,13 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
if (__ratelimit(&urandom_warning))
pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
current->comm, nbytes);
- spin_lock_irqsave(&primary_crng.lock, flags);
- crng_init_cnt = 0;
- spin_unlock_irqrestore(&primary_crng.lock, flags);
}
return urandom_read_nowarn(file, buf, nbytes, ppos);
}
-static ssize_t
-random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
{
int ret;
@@ -1843,8 +1761,7 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
return urandom_read_nowarn(file, buf, nbytes, ppos);
}
-static __poll_t
-random_poll(struct file *file, poll_table * wait)
+static __poll_t random_poll(struct file *file, poll_table *wait)
{
__poll_t mask;
@@ -1853,16 +1770,15 @@ random_poll(struct file *file, poll_table * wait)
mask = 0;
if (crng_ready())
mask |= EPOLLIN | EPOLLRDNORM;
- if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
+ if (POOL_ENTROPY_BITS() < random_write_wakeup_bits)
mask |= EPOLLOUT | EPOLLWRNORM;
return mask;
}
-static int
-write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
+static int write_pool(const char __user *buffer, size_t count)
{
size_t bytes;
- __u32 t, buf[16];
+ u32 t, buf[16];
const char __user *p = buffer;
while (count > 0) {
@@ -1872,7 +1788,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
if (copy_from_user(&buf, p, bytes))
return -EFAULT;
- for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
+ for (b = bytes; b > 0; b -= sizeof(u32), i++) {
if (!arch_get_random_int(&t))
break;
buf[i] ^= t;
@@ -1881,7 +1797,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
count -= bytes;
p += bytes;
- mix_pool_bytes(r, buf, bytes);
+ mix_pool_bytes(buf, bytes);
cond_resched();
}
@@ -1893,7 +1809,7 @@ static ssize_t random_write(struct file *file, const char __user *buffer,
{
size_t ret;
- ret = write_pool(&input_pool, buffer, count);
+ ret = write_pool(buffer, count);
if (ret)
return ret;
@@ -1909,7 +1825,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
switch (cmd) {
case RNDGETENTCNT:
/* inherently racy, no point locking */
- ent_count = ENTROPY_BITS(&input_pool);
+ ent_count = POOL_ENTROPY_BITS();
if (put_user(ent_count, p))
return -EFAULT;
return 0;
@@ -1918,7 +1834,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EPERM;
if (get_user(ent_count, p))
return -EFAULT;
- return credit_entropy_bits_safe(&input_pool, ent_count);
+ return credit_entropy_bits_safe(ent_count);
case RNDADDENTROPY:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1928,11 +1844,10 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EINVAL;
if (get_user(size, p++))
return -EFAULT;
- retval = write_pool(&input_pool, (const char __user *)p,
- size);
+ retval = write_pool((const char __user *)p, size);
if (retval < 0)
return retval;
- return credit_entropy_bits_safe(&input_pool, ent_count);
+ return credit_entropy_bits_safe(ent_count);
case RNDZAPENTCNT:
case RNDCLEARPOOL:
/*
@@ -1948,8 +1863,8 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EPERM;
if (crng_init < 2)
return -ENODATA;
- crng_reseed(&primary_crng, &input_pool);
- crng_global_init_time = jiffies - 1;
+ crng_reseed(&primary_crng, true);
+ WRITE_ONCE(crng_global_init_time, jiffies - 1);
return 0;
default:
return -EINVAL;
@@ -1962,9 +1877,9 @@ static int random_fasync(int fd, struct file *filp, int on)
}
const struct file_operations random_fops = {
- .read = random_read,
+ .read = random_read,
.write = random_write,
- .poll = random_poll,
+ .poll = random_poll,
.unlocked_ioctl = random_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.fasync = random_fasync,
@@ -1972,7 +1887,7 @@ const struct file_operations random_fops = {
};
const struct file_operations urandom_fops = {
- .read = urandom_read,
+ .read = urandom_read,
.write = random_write,
.unlocked_ioctl = random_ioctl,
.compat_ioctl = compat_ptr_ioctl,
@@ -1980,19 +1895,19 @@ const struct file_operations urandom_fops = {
.llseek = noop_llseek,
};
-SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
- unsigned int, flags)
+SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
+ flags)
{
int ret;
- if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE))
+ if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
return -EINVAL;
/*
* Requesting insecure and blocking randomness at the same time makes
* no sense.
*/
- if ((flags & (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM))
+ if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
return -EINVAL;
if (count > INT_MAX)
@@ -2019,7 +1934,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
#include <linux/sysctl.h>
static int min_write_thresh;
-static int max_write_thresh = INPUT_POOL_WORDS * 32;
+static int max_write_thresh = POOL_BITS;
static int random_min_urandom_seed = 60;
static char sysctl_bootid[16];
@@ -2032,8 +1947,8 @@ static char sysctl_bootid[16];
* returned as an ASCII string in the standard UUID format; if via the
* sysctl system call, as 16 bytes of binary data.
*/
-static int proc_do_uuid(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+static int proc_do_uuid(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
{
struct ctl_table fake_table;
unsigned char buf[64], tmp_uuid[16], *uuid;
@@ -2062,13 +1977,13 @@ static int proc_do_uuid(struct ctl_table *table, int write,
/*
* Return entropy available scaled to integral bits
*/
-static int proc_do_entropy(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+static int proc_do_entropy(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
{
struct ctl_table fake_table;
int entropy_count;
- entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
+ entropy_count = *(int *)table->data >> POOL_ENTROPY_SHIFT;
fake_table.data = &entropy_count;
fake_table.maxlen = sizeof(entropy_count);
@@ -2076,9 +1991,8 @@ static int proc_do_entropy(struct ctl_table *table, int write,
return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
}
-static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
-extern struct ctl_table random_table[];
-struct ctl_table random_table[] = {
+static int sysctl_poolsize = POOL_BITS;
+static struct ctl_table random_table[] = {
{
.procname = "poolsize",
.data = &sysctl_poolsize,
@@ -2140,7 +2054,18 @@ struct ctl_table random_table[] = {
#endif
{ }
};
-#endif /* CONFIG_SYSCTL */
+
+/*
+ * rand_initialize() is called before sysctl_init(),
+ * so we cannot call register_sysctl_init() in rand_initialize()
+ */
+static int __init random_sysctls_init(void)
+{
+ register_sysctl_init("kernel/random", random_table);
+ return 0;
+}
+device_initcall(random_sysctls_init);
+#endif /* CONFIG_SYSCTL */
struct batched_entropy {
union {
@@ -2160,7 +2085,7 @@ struct batched_entropy {
* point prior.
*/
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
+ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
};
u64 get_random_u64(void)
@@ -2185,7 +2110,7 @@ u64 get_random_u64(void)
EXPORT_SYMBOL(get_random_u64);
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
+ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
};
u32 get_random_u32(void)
{
@@ -2217,7 +2142,7 @@ static void invalidate_batched_entropy(void)
int cpu;
unsigned long flags;
- for_each_possible_cpu (cpu) {
+ for_each_possible_cpu(cpu) {
struct batched_entropy *batched_entropy;
batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
@@ -2246,8 +2171,7 @@ static void invalidate_batched_entropy(void)
* Return: A page aligned address within [start, start + range). On error,
* @start is returned.
*/
-unsigned long
-randomize_page(unsigned long start, unsigned long range)
+unsigned long randomize_page(unsigned long start, unsigned long range)
{
if (!PAGE_ALIGNED(start)) {
range -= PAGE_ALIGN(start) - start;
@@ -2272,21 +2196,24 @@ randomize_page(unsigned long start, unsigned long range)
void add_hwgenerator_randomness(const char *buffer, size_t count,
size_t entropy)
{
- struct entropy_store *poolp = &input_pool;
-
if (unlikely(crng_init == 0)) {
- crng_fast_load(buffer, count);
- return;
+ size_t ret = crng_fast_load(buffer, count);
+ mix_pool_bytes(buffer, ret);
+ count -= ret;
+ buffer += ret;
+ if (!count || crng_init == 0)
+ return;
}
/* Suspend writing if we're above the trickle threshold.
* We'll be woken up again once below random_write_wakeup_thresh,
* or when the calling thread is about to terminate.
*/
- wait_event_interruptible(random_write_wait, kthread_should_stop() ||
- ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
- mix_pool_bytes(poolp, buffer, count);
- credit_entropy_bits(poolp, entropy);
+ wait_event_interruptible(random_write_wait,
+ !system_wq || kthread_should_stop() ||
+ POOL_ENTROPY_BITS() <= random_write_wakeup_bits);
+ mix_pool_bytes(buffer, count);
+ credit_entropy_bits(entropy);
}
EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
index 4ec10ab5e576..ce9efb73c144 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.c
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -61,9 +61,7 @@ enum tis_defaults {
};
/*
- * clear_interruption clear the pending interrupt.
- * @param: tpm_dev, the tpm device device.
- * @return: the interrupt status value.
+ * clear the pending interrupt.
*/
static u8 clear_interruption(struct st33zp24_dev *tpm_dev)
{
@@ -72,12 +70,10 @@ static u8 clear_interruption(struct st33zp24_dev *tpm_dev)
tpm_dev->ops->recv(tpm_dev->phy_id, TPM_INT_STATUS, &interrupt, 1);
tpm_dev->ops->send(tpm_dev->phy_id, TPM_INT_STATUS, &interrupt, 1);
return interrupt;
-} /* clear_interruption() */
+}
/*
- * st33zp24_cancel, cancel the current command execution or
- * set STS to COMMAND READY.
- * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h
+ * cancel the current command execution or set STS to COMMAND READY.
*/
static void st33zp24_cancel(struct tpm_chip *chip)
{
@@ -86,12 +82,10 @@ static void st33zp24_cancel(struct tpm_chip *chip)
data = TPM_STS_COMMAND_READY;
tpm_dev->ops->send(tpm_dev->phy_id, TPM_STS, &data, 1);
-} /* st33zp24_cancel() */
+}
/*
- * st33zp24_status return the TPM_STS register
- * @param: chip, the tpm chip description
- * @return: the TPM_STS register value.
+ * return the TPM_STS register
*/
static u8 st33zp24_status(struct tpm_chip *chip)
{
@@ -100,12 +94,10 @@ static u8 st33zp24_status(struct tpm_chip *chip)
tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS, &data, 1);
return data;
-} /* st33zp24_status() */
+}
/*
- * check_locality if the locality is active
- * @param: chip, the tpm chip description
- * @return: true if LOCALITY0 is active, otherwise false
+ * if the locality is active
*/
static bool check_locality(struct tpm_chip *chip)
{
@@ -120,13 +112,8 @@ static bool check_locality(struct tpm_chip *chip)
return true;
return false;
-} /* check_locality() */
+}
-/*
- * request_locality request the TPM locality
- * @param: chip, the chip description
- * @return: the active locality or negative value.
- */
static int request_locality(struct tpm_chip *chip)
{
struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
@@ -153,12 +140,8 @@ static int request_locality(struct tpm_chip *chip)
/* could not get locality */
return -EACCES;
-} /* request_locality() */
+}
-/*
- * release_locality release the active locality
- * @param: chip, the tpm chip description.
- */
static void release_locality(struct tpm_chip *chip)
{
struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
@@ -171,8 +154,6 @@ static void release_locality(struct tpm_chip *chip)
/*
* get_burstcount return the burstcount value
- * @param: chip, the chip description
- * return: the burstcount or negative value.
*/
static int get_burstcount(struct tpm_chip *chip)
{
@@ -200,18 +181,8 @@ static int get_burstcount(struct tpm_chip *chip)
msleep(TPM_TIMEOUT);
} while (time_before(jiffies, stop));
return -EBUSY;
-} /* get_burstcount() */
-
+}
-/*
- * wait_for_tpm_stat_cond
- * @param: chip, chip description
- * @param: mask, expected mask value
- * @param: check_cancel, does the command expected to be canceled ?
- * @param: canceled, did we received a cancel request ?
- * @return: true if status == mask or if the command is canceled.
- * false in other cases.
- */
static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
bool check_cancel, bool *canceled)
{
@@ -228,13 +199,7 @@ static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
}
/*
- * wait_for_stat wait for a TPM_STS value
- * @param: chip, the tpm chip description
- * @param: mask, the value mask to wait
- * @param: timeout, the timeout
- * @param: queue, the wait queue.
- * @param: check_cancel, does the command can be cancelled ?
- * @return: the tpm status, 0 if success, -ETIME if timeout is reached.
+ * wait for a TPM_STS value
*/
static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
wait_queue_head_t *queue, bool check_cancel)
@@ -292,15 +257,8 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
}
return -ETIME;
-} /* wait_for_stat() */
+}
-/*
- * recv_data receive data
- * @param: chip, the tpm chip description
- * @param: buf, the buffer where the data are received
- * @param: count, the number of data to receive
- * @return: the number of bytes read from TPM FIFO.
- */
static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
@@ -325,12 +283,6 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
return size;
}
-/*
- * tpm_ioserirq_handler the serirq irq handler
- * @param: irq, the tpm chip description
- * @param: dev_id, the description of the chip
- * @return: the status of the handler.
- */
static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id)
{
struct tpm_chip *chip = dev_id;
@@ -341,16 +293,10 @@ static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id)
disable_irq_nosync(tpm_dev->irq);
return IRQ_HANDLED;
-} /* tpm_ioserirq_handler() */
+}
/*
- * st33zp24_send send TPM commands through the I2C bus.
- *
- * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h
- * @param: buf, the buffer to send.
- * @param: count, the number of bytes to send.
- * @return: In case of success the number of bytes sent.
- * In other case, a < 0 value describing the issue.
+ * send TPM commands through the I2C bus.
*/
static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf,
size_t len)
@@ -431,14 +377,6 @@ out_err:
return ret;
}
-/*
- * st33zp24_recv received TPM response through TPM phy.
- * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h.
- * @param: buf, the buffer to store datas.
- * @param: count, the number of bytes to send.
- * @return: In case of success the number of bytes received.
- * In other case, a < 0 value describing the issue.
- */
static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
size_t count)
{
@@ -478,12 +416,6 @@ out:
return size;
}
-/*
- * st33zp24_req_canceled
- * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h.
- * @param: status, the TPM status.
- * @return: Does TPM ready to compute a new command ? true.
- */
static bool st33zp24_req_canceled(struct tpm_chip *chip, u8 status)
{
return (status == TPM_STS_COMMAND_READY);
@@ -501,11 +433,7 @@ static const struct tpm_class_ops st33zp24_tpm = {
};
/*
- * st33zp24_probe initialize the TPM device
- * @param: client, the i2c_client description (TPM I2C description).
- * @param: id, the i2c_device_id struct.
- * @return: 0 in case of success.
- * -1 in other case.
+ * initialize the TPM device
*/
int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops,
struct device *dev, int irq, int io_lpcpd)
@@ -583,11 +511,6 @@ _tpm_clean_answer:
}
EXPORT_SYMBOL(st33zp24_probe);
-/*
- * st33zp24_remove remove the TPM device
- * @param: tpm_data, the tpm phy.
- * @return: 0 in case of success.
- */
int st33zp24_remove(struct tpm_chip *chip)
{
tpm_chip_unregister(chip);
@@ -596,12 +519,6 @@ int st33zp24_remove(struct tpm_chip *chip)
EXPORT_SYMBOL(st33zp24_remove);
#ifdef CONFIG_PM_SLEEP
-/*
- * st33zp24_pm_suspend suspend the TPM device
- * @param: tpm_data, the tpm phy.
- * @param: mesg, the power management message.
- * @return: 0 in case of success.
- */
int st33zp24_pm_suspend(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
@@ -615,14 +532,9 @@ int st33zp24_pm_suspend(struct device *dev)
ret = tpm_pm_suspend(dev);
return ret;
-} /* st33zp24_pm_suspend() */
+}
EXPORT_SYMBOL(st33zp24_pm_suspend);
-/*
- * st33zp24_pm_resume resume the TPM device
- * @param: tpm_data, the tpm phy.
- * @return: 0 in case of success.
- */
int st33zp24_pm_resume(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
@@ -640,7 +552,7 @@ int st33zp24_pm_resume(struct device *dev)
tpm1_do_selftest(chip);
}
return ret;
-} /* st33zp24_pm_resume() */
+}
EXPORT_SYMBOL(st33zp24_pm_resume);
#endif
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index ddaeceb7e109..b009e7479b70 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -444,7 +444,7 @@ static int tpm_add_char_device(struct tpm_chip *chip)
return rc;
}
- if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip)) {
rc = cdev_device_add(&chip->cdevs, &chip->devs);
if (rc) {
dev_err(&chip->devs,
@@ -474,13 +474,21 @@ static void tpm_del_char_device(struct tpm_chip *chip)
/* Make the driver uncallable. */
down_write(&chip->ops_sem);
- if (chip->flags & TPM_CHIP_FLAG_TPM2) {
- if (!tpm_chip_start(chip)) {
- tpm2_shutdown(chip, TPM2_SU_CLEAR);
- tpm_chip_stop(chip);
+
+ /*
+ * Check if chip->ops is still valid: In case that the controller
+ * drivers shutdown handler unregisters the controller in its
+ * shutdown handler we are called twice and chip->ops to NULL.
+ */
+ if (chip->ops) {
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ if (!tpm_chip_start(chip)) {
+ tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ tpm_chip_stop(chip);
+ }
}
+ chip->ops = NULL;
}
- chip->ops = NULL;
up_write(&chip->ops_sem);
}
@@ -488,7 +496,8 @@ static void tpm_del_legacy_sysfs(struct tpm_chip *chip)
{
struct attribute **i;
- if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL))
+ if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL) ||
+ tpm_is_firmware_upgrade(chip))
return;
sysfs_remove_link(&chip->dev.parent->kobj, "ppi");
@@ -506,7 +515,8 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
struct attribute **i;
int rc;
- if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL))
+ if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL) ||
+ tpm_is_firmware_upgrade(chip))
return 0;
rc = compat_only_sysfs_link_entry_to_kobj(
@@ -536,7 +546,7 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
static int tpm_add_hwrng(struct tpm_chip *chip)
{
- if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+ if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM) || tpm_is_firmware_upgrade(chip))
return 0;
snprintf(chip->hwrng_name, sizeof(chip->hwrng_name),
@@ -550,6 +560,9 @@ static int tpm_get_pcr_allocation(struct tpm_chip *chip)
{
int rc;
+ if (tpm_is_firmware_upgrade(chip))
+ return 0;
+
rc = (chip->flags & TPM_CHIP_FLAG_TPM2) ?
tpm2_get_pcr_allocation(chip) :
tpm1_get_pcr_allocation(chip);
@@ -612,7 +625,7 @@ int tpm_chip_register(struct tpm_chip *chip)
return 0;
out_hwrng:
- if (IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+ if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip))
hwrng_unregister(&chip->hwrng);
out_ppi:
tpm_bios_log_teardown(chip);
@@ -637,10 +650,10 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
void tpm_chip_unregister(struct tpm_chip *chip)
{
tpm_del_legacy_sysfs(chip);
- if (IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+ if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip))
hwrng_unregister(&chip->hwrng);
tpm_bios_log_teardown(chip);
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip))
cdev_device_del(&chip->cdevs, &chip->devs);
tpm_del_char_device(chip);
}
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index 63f03cfb8e6a..54c71473aa29 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -480,6 +480,9 @@ void tpm_sysfs_add_device(struct tpm_chip *chip)
WARN_ON(chip->groups_cnt != 0);
+ if (tpm_is_firmware_upgrade(chip))
+ return;
+
if (chip->flags & TPM_CHIP_FLAG_TPM2)
chip->groups[chip->groups_cnt++] = &tpm2_dev_group;
else
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index a25815a6f625..4704fa553098 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -745,6 +745,12 @@ int tpm2_auto_startup(struct tpm_chip *chip)
rc = tpm2_get_cc_attrs_tbl(chip);
out:
+ if (rc == TPM2_RC_UPGRADE) {
+ dev_info(&chip->dev, "TPM in field upgrade mode, requires firmware upgrade\n");
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_UPGRADE;
+ rc = 0;
+ }
+
if (rc > 0)
rc = -ENODEV;
return rc;
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index b2659a4c4016..dc56b976d816 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -950,9 +950,11 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
priv->timeout_max = TPM_TIMEOUT_USECS_MAX;
priv->phy_ops = phy_ops;
+ dev_set_drvdata(&chip->dev, priv);
+
rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor);
if (rc < 0)
- goto out_err;
+ return rc;
priv->manufacturer_id = vendor;
@@ -962,8 +964,6 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
priv->timeout_max = TIS_TIMEOUT_MAX_ATML;
}
- dev_set_drvdata(&chip->dev, priv);
-
if (is_bsw()) {
priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
ILB_REMAP_SIZE);
@@ -994,7 +994,15 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT |
TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT;
intmask &= ~TPM_GLOBAL_INT_ENABLE;
+
+ rc = request_locality(chip, 0);
+ if (rc < 0) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+ release_locality(chip, 0);
rc = tpm_chip_start(chip);
if (rc)
diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
index c89278103703..f6c0affbb456 100644
--- a/drivers/char/tpm/tpm_tis_i2c_cr50.c
+++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
@@ -628,6 +628,19 @@ static bool tpm_cr50_i2c_req_canceled(struct tpm_chip *chip, u8 status)
return status == TPM_STS_COMMAND_READY;
}
+static bool tpm_cr50_i2c_is_firmware_power_managed(struct device *dev)
+{
+ u8 val;
+ int ret;
+
+ /* This flag should default true when the device property is not present */
+ ret = device_property_read_u8(dev, "firmware-power-managed", &val);
+ if (ret)
+ return true;
+
+ return val;
+}
+
static const struct tpm_class_ops cr50_i2c = {
.flags = TPM_OPS_AUTO_STARTUP,
.status = &tpm_cr50_i2c_tis_status,
@@ -686,7 +699,8 @@ static int tpm_cr50_i2c_probe(struct i2c_client *client)
/* cr50 is a TPM 2.0 chip */
chip->flags |= TPM_CHIP_FLAG_TPM2;
- chip->flags |= TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED;
+ if (tpm_cr50_i2c_is_firmware_power_managed(dev))
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED;
/* Default timeouts */
chip->timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
diff --git a/drivers/char/tpm/tpm_tis_spi_cr50.c b/drivers/char/tpm/tpm_tis_spi_cr50.c
index ea759af25634..7bf123d3c537 100644
--- a/drivers/char/tpm/tpm_tis_spi_cr50.c
+++ b/drivers/char/tpm/tpm_tis_spi_cr50.c
@@ -36,6 +36,9 @@
#define TPM_CR50_FW_VER(l) (0x0f90 | ((l) << 12))
#define TPM_CR50_MAX_FW_VER_LEN 64
+/* Default quality for hwrng. */
+#define TPM_CR50_DEFAULT_RNG_QUALITY 700
+
struct cr50_spi_phy {
struct tpm_tis_spi_phy spi_phy;
@@ -182,6 +185,19 @@ static int cr50_spi_flow_control(struct tpm_tis_spi_phy *phy,
return 0;
}
+static bool tpm_cr50_spi_is_firmware_power_managed(struct device *dev)
+{
+ u8 val;
+ int ret;
+
+ /* This flag should default true when the device property is not present */
+ ret = device_property_read_u8(dev, "firmware-power-managed", &val);
+ if (ret)
+ return true;
+
+ return val;
+}
+
static int tpm_tis_spi_cr50_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
u8 *in, const u8 *out)
{
@@ -264,6 +280,7 @@ int cr50_spi_probe(struct spi_device *spi)
phy = &cr50_phy->spi_phy;
phy->flow_control = cr50_spi_flow_control;
phy->wake_after = jiffies;
+ phy->priv.rng_quality = TPM_CR50_DEFAULT_RNG_QUALITY;
init_completion(&phy->ready);
cr50_phy->access_delay = CR50_NOIRQ_ACCESS_DELAY;
@@ -305,7 +322,8 @@ int cr50_spi_probe(struct spi_device *spi)
cr50_print_fw_version(&phy->priv);
chip = dev_get_drvdata(&spi->dev);
- chip->flags |= TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED;
+ if (tpm_cr50_spi_is_firmware_power_managed(&spi->dev))
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED;
return 0;
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 660c5c388c29..2359889a35a0 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1958,7 +1958,7 @@ static void virtcons_remove(struct virtio_device *vdev)
spin_unlock_irq(&pdrvdata_lock);
/* Disable interrupts for vqs */
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
/* Finish up work that's lined up */
if (use_multiport(portdev))
cancel_work_sync(&portdev->control_work);
@@ -2148,7 +2148,7 @@ static int virtcons_freeze(struct virtio_device *vdev)
portdev = vdev->priv;
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
if (use_multiport(portdev))
virtqueue_disable_cb(portdev->c_ivq);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index c5b3dc97396a..ad4256d54361 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -169,6 +169,14 @@ config COMMON_CLK_CDCE706
help
This driver supports TI CDCE706 programmable 3-PLL clock synthesizer.
+config COMMON_CLK_TPS68470
+ tristate "Clock Driver for TI TPS68470 PMIC"
+ depends on I2C
+ depends on INTEL_SKL_INT3472 || COMPILE_TEST
+ select REGMAP_I2C
+ help
+ This driver supports the clocks provided by the TPS68470 PMIC.
+
config COMMON_CLK_CDCE925
tristate "Clock driver for TI CDCE913/925/937/949 devices"
depends on I2C
@@ -221,6 +229,13 @@ config COMMON_CLK_GEMINI
This driver supports the SoC clocks on the Cortina Systems Gemini
platform, also known as SL3516 or CS3516.
+config COMMON_CLK_LAN966X
+ bool "Generic Clock Controller driver for LAN966X SoC"
+ help
+ This driver provides support for Generic Clock Controller(GCK) on
+ LAN966X SoC. GCK generates and supplies clock to various peripherals
+ within the SoC.
+
config COMMON_CLK_ASPEED
bool "Clock driver for Aspeed BMC SoCs"
depends on ARCH_ASPEED || COMPILE_TEST
@@ -339,16 +354,6 @@ config COMMON_CLK_STM32MP157
help
Support for stm32mp157 SoC family clocks
-config COMMON_CLK_STM32MP157_SCMI
- bool "stm32mp157 Clock driver with Trusted Firmware"
- depends on COMMON_CLK_STM32MP157
- select COMMON_CLK_SCMI
- select ARM_SCMI_PROTOCOL
- default y
- help
- Support for stm32mp157 SoC family clocks with Trusted Firmware using
- SCMI protocol.
-
config COMMON_CLK_STM32F
def_bool COMMON_CLK && (MACH_STM32F429 || MACH_STM32F469 || MACH_STM32F746)
help
@@ -412,11 +417,13 @@ source "drivers/clk/samsung/Kconfig"
source "drivers/clk/sifive/Kconfig"
source "drivers/clk/socfpga/Kconfig"
source "drivers/clk/sprd/Kconfig"
+source "drivers/clk/starfive/Kconfig"
source "drivers/clk/sunxi/Kconfig"
source "drivers/clk/sunxi-ng/Kconfig"
source "drivers/clk/tegra/Kconfig"
source "drivers/clk/ti/Kconfig"
source "drivers/clk/uniphier/Kconfig"
+source "drivers/clk/visconti/Kconfig"
source "drivers/clk/x86/Kconfig"
source "drivers/clk/xilinx/Kconfig"
source "drivers/clk/zynqmp/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index e42312121e51..16e588630472 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
obj-$(CONFIG_CLK_HSDK) += clk-hsdk-pll.o
obj-$(CONFIG_COMMON_CLK_K210) += clk-k210.o
obj-$(CONFIG_LMK04832) += clk-lmk04832.o
+obj-$(CONFIG_COMMON_CLK_LAN966X) += clk-lan966x.o
obj-$(CONFIG_COMMON_CLK_LOCHNAGAR) += clk-lochnagar.o
obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
obj-$(CONFIG_COMMON_CLK_MAX9485) += clk-max9485.o
@@ -63,6 +64,7 @@ obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
obj-$(CONFIG_COMMON_CLK_STM32F) += clk-stm32f4.o
obj-$(CONFIG_COMMON_CLK_STM32H7) += clk-stm32h7.o
obj-$(CONFIG_COMMON_CLK_STM32MP157) += clk-stm32mp1.o
+obj-$(CONFIG_COMMON_CLK_TPS68470) += clk-tps68470.o
obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
obj-$(CONFIG_COMMON_CLK_VC5) += clk-versaclock5.o
@@ -109,13 +111,15 @@ obj-y += socfpga/
obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-y += sprd/
obj-$(CONFIG_ARCH_STI) += st/
+obj-$(CONFIG_SOC_STARFIVE) += starfive/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
-obj-$(CONFIG_SUNXI_CCU) += sunxi-ng/
+obj-y += sunxi-ng/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += ti/
obj-$(CONFIG_CLK_UNIPHIER) += uniphier/
obj-$(CONFIG_ARCH_U8500) += ux500/
obj-y += versatile/
+obj-$(CONFIG_COMMON_CLK_VISCONTI) += visconti/
ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_X86) += x86/
endif
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index a254512965eb..3667b4d731e7 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -932,8 +932,7 @@ static int bcm2835_clock_is_on(struct clk_hw *hw)
static u32 bcm2835_clock_choose_div(struct clk_hw *hw,
unsigned long rate,
- unsigned long parent_rate,
- bool round_up)
+ unsigned long parent_rate)
{
struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
const struct bcm2835_clock_data *data = clock->data;
@@ -945,10 +944,6 @@ static u32 bcm2835_clock_choose_div(struct clk_hw *hw,
rem = do_div(temp, rate);
div = temp;
-
- /* Round up and mask off the unused bits */
- if (round_up && ((div & unused_frac_mask) != 0 || rem != 0))
- div += unused_frac_mask + 1;
div &= ~unused_frac_mask;
/* different clamping limits apply for a mash clock */
@@ -1079,7 +1074,7 @@ static int bcm2835_clock_set_rate(struct clk_hw *hw,
struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
struct bcm2835_cprman *cprman = clock->cprman;
const struct bcm2835_clock_data *data = clock->data;
- u32 div = bcm2835_clock_choose_div(hw, rate, parent_rate, false);
+ u32 div = bcm2835_clock_choose_div(hw, rate, parent_rate);
u32 ctl;
spin_lock(&cprman->regs_lock);
@@ -1130,7 +1125,7 @@ static unsigned long bcm2835_clock_choose_div_and_prate(struct clk_hw *hw,
if (!(BIT(parent_idx) & data->set_rate_parent)) {
*prate = clk_hw_get_rate(parent);
- *div = bcm2835_clock_choose_div(hw, rate, *prate, true);
+ *div = bcm2835_clock_choose_div(hw, rate, *prate);
*avgrate = bcm2835_clock_rate_from_divisor(clock, *prate, *div);
@@ -1216,7 +1211,7 @@ static int bcm2835_clock_determine_rate(struct clk_hw *hw,
rate = bcm2835_clock_choose_div_and_prate(hw, i, req->rate,
&div, &prate,
&avgrate);
- if (rate > best_rate && rate <= req->rate) {
+ if (abs(req->rate - rate) < abs(req->rate - best_rate)) {
best_parent = parent;
best_prate = prate;
best_rate = rate;
diff --git a/drivers/clk/clk-bm1880.c b/drivers/clk/clk-bm1880.c
index e6d6599d310a..fad78a22218e 100644
--- a/drivers/clk/clk-bm1880.c
+++ b/drivers/clk/clk-bm1880.c
@@ -522,14 +522,6 @@ static struct clk_hw *bm1880_clk_register_pll(struct bm1880_pll_hw_clock *pll_cl
return hw;
}
-static void bm1880_clk_unregister_pll(struct clk_hw *hw)
-{
- struct bm1880_pll_hw_clock *pll_hw = to_bm1880_pll_clk(hw);
-
- clk_hw_unregister(hw);
- kfree(pll_hw);
-}
-
static int bm1880_clk_register_plls(struct bm1880_pll_hw_clock *clks,
int num_clks,
struct bm1880_clock_data *data)
@@ -555,7 +547,7 @@ static int bm1880_clk_register_plls(struct bm1880_pll_hw_clock *clks,
err_clk:
while (i--)
- bm1880_clk_unregister_pll(data->hw_data.hws[clks[i].pll.id]);
+ clk_hw_unregister(data->hw_data.hws[clks[i].pll.id]);
return PTR_ERR(hw);
}
@@ -695,14 +687,6 @@ static struct clk_hw *bm1880_clk_register_div(struct bm1880_div_hw_clock *div_cl
return hw;
}
-static void bm1880_clk_unregister_div(struct clk_hw *hw)
-{
- struct bm1880_div_hw_clock *div_hw = to_bm1880_div_clk(hw);
-
- clk_hw_unregister(hw);
- kfree(div_hw);
-}
-
static int bm1880_clk_register_divs(struct bm1880_div_hw_clock *clks,
int num_clks,
struct bm1880_clock_data *data)
@@ -729,7 +713,7 @@ static int bm1880_clk_register_divs(struct bm1880_div_hw_clock *clks,
err_clk:
while (i--)
- bm1880_clk_unregister_div(data->hw_data.hws[clks[i].div.id]);
+ clk_hw_unregister(data->hw_data.hws[clks[i].div.id]);
return PTR_ERR(hw);
}
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 070dc47e95a1..64283807600b 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -7,6 +7,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
@@ -222,3 +223,37 @@ void clk_hw_unregister_gate(struct clk_hw *hw)
kfree(gate);
}
EXPORT_SYMBOL_GPL(clk_hw_unregister_gate);
+
+static void devm_clk_hw_release_gate(struct device *dev, void *res)
+{
+ clk_hw_unregister_gate(*(struct clk_hw **)res);
+}
+
+struct clk_hw *__devm_clk_hw_register_gate(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data,
+ unsigned long flags,
+ void __iomem *reg, u8 bit_idx,
+ u8 clk_gate_flags, spinlock_t *lock)
+{
+ struct clk_hw **ptr, *hw;
+
+ ptr = devres_alloc(devm_clk_hw_release_gate, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ hw = __clk_hw_register_gate(dev, np, name, parent_name, parent_hw,
+ parent_data, flags, reg, bit_idx,
+ clk_gate_flags, lock);
+
+ if (!IS_ERR(hw)) {
+ *ptr = hw;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return hw;
+}
+EXPORT_SYMBOL_GPL(__devm_clk_hw_register_gate);
diff --git a/drivers/clk/clk-gemini.c b/drivers/clk/clk-gemini.c
index b51069e794ff..a23fa6d47ef1 100644
--- a/drivers/clk/clk-gemini.c
+++ b/drivers/clk/clk-gemini.c
@@ -50,7 +50,7 @@ static DEFINE_SPINLOCK(gemini_clk_lock);
#define PCI_DLL_TAP_SEL_MASK 0x1f
/**
- * struct gemini_data_data - Gemini gated clocks
+ * struct gemini_gate_data - Gemini gated clocks
* @bit_idx: the bit used to gate this clock in the clock register
* @name: the clock name
* @parent_name: the name of the parent clock
diff --git a/drivers/clk/clk-lan966x.c b/drivers/clk/clk-lan966x.c
new file mode 100644
index 000000000000..d1535ac13e89
--- /dev/null
+++ b/drivers/clk/clk-lan966x.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Microchip LAN966x SoC Clock driver.
+ *
+ * Copyright (C) 2021 Microchip Technology, Inc. and its subsidiaries
+ *
+ * Author: Kavyasree Kotagiri <kavyasree.kotagiri@microchip.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/microchip,lan966x.h>
+
+#define GCK_ENA BIT(0)
+#define GCK_SRC_SEL GENMASK(9, 8)
+#define GCK_PRESCALER GENMASK(23, 16)
+
+#define DIV_MAX 255
+
+static const char *clk_names[N_CLOCKS] = {
+ "qspi0", "qspi1", "qspi2", "sdmmc0",
+ "pi", "mcan0", "mcan1", "flexcom0",
+ "flexcom1", "flexcom2", "flexcom3",
+ "flexcom4", "timer1", "usb_refclk",
+};
+
+struct lan966x_gck {
+ struct clk_hw hw;
+ void __iomem *reg;
+};
+#define to_lan966x_gck(hw) container_of(hw, struct lan966x_gck, hw)
+
+static const struct clk_parent_data lan966x_gck_pdata[] = {
+ { .fw_name = "cpu", },
+ { .fw_name = "ddr", },
+ { .fw_name = "sys", },
+};
+
+static struct clk_init_data init = {
+ .parent_data = lan966x_gck_pdata,
+ .num_parents = ARRAY_SIZE(lan966x_gck_pdata),
+};
+
+struct clk_gate_soc_desc {
+ const char *name;
+ int bit_idx;
+};
+
+static const struct clk_gate_soc_desc clk_gate_desc[] = {
+ { "uhphs", 11 },
+ { "udphs", 10 },
+ { "mcramc", 9 },
+ { "hmatrix", 8 },
+ { }
+};
+
+static DEFINE_SPINLOCK(clk_gate_lock);
+static void __iomem *base;
+
+static int lan966x_gck_enable(struct clk_hw *hw)
+{
+ struct lan966x_gck *gck = to_lan966x_gck(hw);
+ u32 val = readl(gck->reg);
+
+ val |= GCK_ENA;
+ writel(val, gck->reg);
+
+ return 0;
+}
+
+static void lan966x_gck_disable(struct clk_hw *hw)
+{
+ struct lan966x_gck *gck = to_lan966x_gck(hw);
+ u32 val = readl(gck->reg);
+
+ val &= ~GCK_ENA;
+ writel(val, gck->reg);
+}
+
+static int lan966x_gck_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct lan966x_gck *gck = to_lan966x_gck(hw);
+ u32 div, val = readl(gck->reg);
+
+ if (rate == 0 || parent_rate == 0)
+ return -EINVAL;
+
+ /* Set Prescalar */
+ div = parent_rate / rate;
+ val &= ~GCK_PRESCALER;
+ val |= FIELD_PREP(GCK_PRESCALER, (div - 1));
+ writel(val, gck->reg);
+
+ return 0;
+}
+
+static long lan966x_gck_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned int div;
+
+ if (rate == 0 || *parent_rate == 0)
+ return -EINVAL;
+
+ if (rate >= *parent_rate)
+ return *parent_rate;
+
+ div = DIV_ROUND_CLOSEST(*parent_rate, rate);
+
+ return *parent_rate / div;
+}
+
+static unsigned long lan966x_gck_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct lan966x_gck *gck = to_lan966x_gck(hw);
+ u32 div, val = readl(gck->reg);
+
+ div = FIELD_GET(GCK_PRESCALER, val);
+
+ return parent_rate / (div + 1);
+}
+
+static int lan966x_gck_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_hw *parent;
+ int i;
+
+ for (i = 0; i < clk_hw_get_num_parents(hw); ++i) {
+ parent = clk_hw_get_parent_by_index(hw, i);
+ if (!parent)
+ continue;
+
+ /* Allowed prescaler divider range is 0-255 */
+ if (clk_hw_get_rate(parent) / req->rate <= DIV_MAX) {
+ req->best_parent_hw = parent;
+ req->best_parent_rate = clk_hw_get_rate(parent);
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static u8 lan966x_gck_get_parent(struct clk_hw *hw)
+{
+ struct lan966x_gck *gck = to_lan966x_gck(hw);
+ u32 val = readl(gck->reg);
+
+ return FIELD_GET(GCK_SRC_SEL, val);
+}
+
+static int lan966x_gck_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct lan966x_gck *gck = to_lan966x_gck(hw);
+ u32 val = readl(gck->reg);
+
+ val &= ~GCK_SRC_SEL;
+ val |= FIELD_PREP(GCK_SRC_SEL, index);
+ writel(val, gck->reg);
+
+ return 0;
+}
+
+static const struct clk_ops lan966x_gck_ops = {
+ .enable = lan966x_gck_enable,
+ .disable = lan966x_gck_disable,
+ .set_rate = lan966x_gck_set_rate,
+ .round_rate = lan966x_gck_round_rate,
+ .recalc_rate = lan966x_gck_recalc_rate,
+ .determine_rate = lan966x_gck_determine_rate,
+ .set_parent = lan966x_gck_set_parent,
+ .get_parent = lan966x_gck_get_parent,
+};
+
+static struct clk_hw *lan966x_gck_clk_register(struct device *dev, int i)
+{
+ struct lan966x_gck *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->reg = base + (i * 4);
+ priv->hw.init = &init;
+ ret = devm_clk_hw_register(dev, &priv->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &priv->hw;
+};
+
+static int lan966x_gate_clk_register(struct device *dev,
+ struct clk_hw_onecell_data *hw_data,
+ void __iomem *gate_base)
+{
+ int i;
+
+ for (i = GCK_GATE_UHPHS; i < N_CLOCKS; ++i) {
+ int idx = i - GCK_GATE_UHPHS;
+
+ hw_data->hws[i] =
+ devm_clk_hw_register_gate(dev, clk_gate_desc[idx].name,
+ "lan966x", 0, base,
+ clk_gate_desc[idx].bit_idx,
+ 0, &clk_gate_lock);
+
+ if (IS_ERR(hw_data->hws[i]))
+ return dev_err_probe(dev, PTR_ERR(hw_data->hws[i]),
+ "failed to register %s clock\n",
+ clk_gate_desc[idx].name);
+ }
+
+ return 0;
+}
+
+static int lan966x_clk_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *hw_data;
+ struct device *dev = &pdev->dev;
+ void __iomem *gate_base;
+ struct resource *res;
+ int i, ret;
+
+ hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, N_CLOCKS),
+ GFP_KERNEL);
+ if (!hw_data)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ init.ops = &lan966x_gck_ops;
+
+ hw_data->num = GCK_GATE_UHPHS;
+
+ for (i = 0; i < GCK_GATE_UHPHS; i++) {
+ init.name = clk_names[i];
+ hw_data->hws[i] = lan966x_gck_clk_register(dev, i);
+ if (IS_ERR(hw_data->hws[i])) {
+ dev_err(dev, "failed to register %s clock\n",
+ init.name);
+ return PTR_ERR(hw_data->hws[i]);
+ }
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ gate_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gate_base))
+ return PTR_ERR(gate_base);
+
+ hw_data->num = N_CLOCKS;
+
+ ret = lan966x_gate_clk_register(dev, hw_data, gate_base);
+ if (ret)
+ return ret;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, hw_data);
+}
+
+static const struct of_device_id lan966x_clk_dt_ids[] = {
+ { .compatible = "microchip,lan966x-gck", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lan966x_clk_dt_ids);
+
+static struct platform_driver lan966x_clk_driver = {
+ .probe = lan966x_clk_probe,
+ .driver = {
+ .name = "lan966x-clk",
+ .of_match_table = lan966x_clk_dt_ids,
+ },
+};
+builtin_platform_driver(lan966x_clk_driver);
+
+MODULE_AUTHOR("Kavyasree Kotagiri <kavyasree.kotagiri@microchip.com>");
+MODULE_DESCRIPTION("LAN966X clock driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
index 57ae183982d8..f7b41366666e 100644
--- a/drivers/clk/clk-si5341.c
+++ b/drivers/clk/clk-si5341.c
@@ -1740,7 +1740,7 @@ static int si5341_probe(struct i2c_client *client,
clk_prepare(data->clk[i].hw.clk);
}
- err = of_clk_add_hw_provider(client->dev.of_node, of_clk_si5341_get,
+ err = devm_of_clk_add_hw_provider(&client->dev, of_clk_si5341_get,
data);
if (err) {
dev_err(&client->dev, "unable to add clk provider\n");
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index af46176ad053..473dfe632cc5 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -129,7 +129,6 @@ static const struct stm32f4_gate_data stm32f429_gates[] __initconst = {
{ STM32F4_RCC_APB2ENR, 20, "spi5", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" },
- { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" },
};
static const struct stm32f4_gate_data stm32f469_gates[] __initconst = {
@@ -211,7 +210,6 @@ static const struct stm32f4_gate_data stm32f469_gates[] __initconst = {
{ STM32F4_RCC_APB2ENR, 20, "spi5", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" },
- { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" },
};
static const struct stm32f4_gate_data stm32f746_gates[] __initconst = {
@@ -286,7 +284,6 @@ static const struct stm32f4_gate_data stm32f746_gates[] __initconst = {
{ STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 23, "sai2", "apb2_div" },
- { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" },
};
static const struct stm32f4_gate_data stm32f769_gates[] __initconst = {
@@ -364,7 +361,6 @@ static const struct stm32f4_gate_data stm32f769_gates[] __initconst = {
{ STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 23, "sai2", "apb2_div" },
- { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 30, "mdio", "apb2_div" },
};
diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c
index 4bd1fe7d8af4..863274aa50e3 100644
--- a/drivers/clk/clk-stm32mp1.c
+++ b/drivers/clk/clk-stm32mp1.c
@@ -2253,8 +2253,6 @@ static int stm32_rcc_reset_init(struct device *dev, void __iomem *base,
const struct stm32_rcc_match_data *data = match->data;
struct stm32_reset_data *reset_data = NULL;
- data = match->data;
-
reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
if (!reset_data)
return -ENOMEM;
diff --git a/drivers/clk/clk-tps68470.c b/drivers/clk/clk-tps68470.c
new file mode 100644
index 000000000000..e5fbefd6ac2d
--- /dev/null
+++ b/drivers/clk/clk-tps68470.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Clock driver for TPS68470 PMIC
+ *
+ * Copyright (c) 2021 Red Hat Inc.
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ * Zaikuo Wang <zaikuo.wang@intel.com>
+ * Tianshu Qiu <tian.shu.qiu@intel.com>
+ * Jian Xu Zheng <jian.xu.zheng@intel.com>
+ * Yuning Pu <yuning.pu@intel.com>
+ * Antti Laakso <antti.laakso@intel.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/kernel.h>
+#include <linux/mfd/tps68470.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/tps68470.h>
+#include <linux/regmap.h>
+
+#define TPS68470_CLK_NAME "tps68470-clk"
+
+#define to_tps68470_clkdata(clkd) \
+ container_of(clkd, struct tps68470_clkdata, clkout_hw)
+
+static struct tps68470_clkout_freqs {
+ unsigned long freq;
+ unsigned int xtaldiv;
+ unsigned int plldiv;
+ unsigned int postdiv;
+ unsigned int buckdiv;
+ unsigned int boostdiv;
+} clk_freqs[] = {
+/*
+ * The PLL is used to multiply the crystal oscillator
+ * frequency range of 3 MHz to 27 MHz by a programmable
+ * factor of F = (M/N)*(1/P) such that the output
+ * available at the HCLK_A or HCLK_B pins are in the range
+ * of 4 MHz to 64 MHz in increments of 0.1 MHz.
+ *
+ * hclk_# = osc_in * (((plldiv*2)+320) / (xtaldiv+30)) * (1 / 2^postdiv)
+ *
+ * PLL_REF_CLK should be as close as possible to 100kHz
+ * PLL_REF_CLK = input clk / XTALDIV[7:0] + 30)
+ *
+ * PLL_VCO_CLK = (PLL_REF_CLK * (plldiv*2 + 320))
+ *
+ * BOOST should be as close as possible to 2Mhz
+ * BOOST = PLL_VCO_CLK / (BOOSTDIV[4:0] + 16) *
+ *
+ * BUCK should be as close as possible to 5.2Mhz
+ * BUCK = PLL_VCO_CLK / (BUCKDIV[3:0] + 5)
+ *
+ * osc_in xtaldiv plldiv postdiv hclk_#
+ * 20Mhz 170 32 1 19.2Mhz
+ * 20Mhz 170 40 1 20Mhz
+ * 20Mhz 170 80 1 24Mhz
+ */
+ { 19200000, 170, 32, 1, 2, 3 },
+ { 20000000, 170, 40, 1, 3, 4 },
+ { 24000000, 170, 80, 1, 4, 8 },
+};
+
+struct tps68470_clkdata {
+ struct clk_hw clkout_hw;
+ struct regmap *regmap;
+ unsigned long rate;
+};
+
+static int tps68470_clk_is_prepared(struct clk_hw *hw)
+{
+ struct tps68470_clkdata *clkdata = to_tps68470_clkdata(hw);
+ int val;
+
+ if (regmap_read(clkdata->regmap, TPS68470_REG_PLLCTL, &val))
+ return 0;
+
+ return val & TPS68470_PLL_EN_MASK;
+}
+
+static int tps68470_clk_prepare(struct clk_hw *hw)
+{
+ struct tps68470_clkdata *clkdata = to_tps68470_clkdata(hw);
+
+ regmap_write(clkdata->regmap, TPS68470_REG_CLKCFG1,
+ (TPS68470_PLL_OUTPUT_ENABLE << TPS68470_OUTPUT_A_SHIFT) |
+ (TPS68470_PLL_OUTPUT_ENABLE << TPS68470_OUTPUT_B_SHIFT));
+
+ regmap_update_bits(clkdata->regmap, TPS68470_REG_PLLCTL,
+ TPS68470_PLL_EN_MASK, TPS68470_PLL_EN_MASK);
+
+ /*
+ * The PLLCTL reg lock bit is set by the PMIC after approx. 4ms and
+ * does not indicate a true lock, so just wait 4 ms.
+ */
+ usleep_range(4000, 5000);
+
+ return 0;
+}
+
+static void tps68470_clk_unprepare(struct clk_hw *hw)
+{
+ struct tps68470_clkdata *clkdata = to_tps68470_clkdata(hw);
+
+ /* Disable clock first ... */
+ regmap_update_bits(clkdata->regmap, TPS68470_REG_PLLCTL, TPS68470_PLL_EN_MASK, 0);
+
+ /* ... and then tri-state the clock outputs. */
+ regmap_write(clkdata->regmap, TPS68470_REG_CLKCFG1, 0);
+}
+
+static unsigned long tps68470_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct tps68470_clkdata *clkdata = to_tps68470_clkdata(hw);
+
+ return clkdata->rate;
+}
+
+/*
+ * This returns the index of the clk_freqs[] cfg with the closest rate for
+ * use in tps68470_clk_round_rate(). tps68470_clk_set_rate() checks that
+ * the rate of the returned cfg is an exact match.
+ */
+static unsigned int tps68470_clk_cfg_lookup(unsigned long rate)
+{
+ long diff, best_diff = LONG_MAX;
+ unsigned int i, best_idx = 0;
+
+ for (i = 0; i < ARRAY_SIZE(clk_freqs); i++) {
+ diff = clk_freqs[i].freq - rate;
+ if (diff == 0)
+ return i;
+
+ diff = abs(diff);
+ if (diff < best_diff) {
+ best_diff = diff;
+ best_idx = i;
+ }
+ }
+
+ return best_idx;
+}
+
+static long tps68470_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned int idx = tps68470_clk_cfg_lookup(rate);
+
+ return clk_freqs[idx].freq;
+}
+
+static int tps68470_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tps68470_clkdata *clkdata = to_tps68470_clkdata(hw);
+ unsigned int idx = tps68470_clk_cfg_lookup(rate);
+
+ if (rate != clk_freqs[idx].freq)
+ return -EINVAL;
+
+ regmap_write(clkdata->regmap, TPS68470_REG_BOOSTDIV, clk_freqs[idx].boostdiv);
+ regmap_write(clkdata->regmap, TPS68470_REG_BUCKDIV, clk_freqs[idx].buckdiv);
+ regmap_write(clkdata->regmap, TPS68470_REG_PLLSWR, TPS68470_PLLSWR_DEFAULT);
+ regmap_write(clkdata->regmap, TPS68470_REG_XTALDIV, clk_freqs[idx].xtaldiv);
+ regmap_write(clkdata->regmap, TPS68470_REG_PLLDIV, clk_freqs[idx].plldiv);
+ regmap_write(clkdata->regmap, TPS68470_REG_POSTDIV, clk_freqs[idx].postdiv);
+ regmap_write(clkdata->regmap, TPS68470_REG_POSTDIV2, clk_freqs[idx].postdiv);
+ regmap_write(clkdata->regmap, TPS68470_REG_CLKCFG2, TPS68470_CLKCFG2_DRV_STR_2MA);
+
+ regmap_write(clkdata->regmap, TPS68470_REG_PLLCTL,
+ TPS68470_OSC_EXT_CAP_DEFAULT << TPS68470_OSC_EXT_CAP_SHIFT |
+ TPS68470_CLK_SRC_XTAL << TPS68470_CLK_SRC_SHIFT);
+
+ clkdata->rate = rate;
+
+ return 0;
+}
+
+static const struct clk_ops tps68470_clk_ops = {
+ .is_prepared = tps68470_clk_is_prepared,
+ .prepare = tps68470_clk_prepare,
+ .unprepare = tps68470_clk_unprepare,
+ .recalc_rate = tps68470_clk_recalc_rate,
+ .round_rate = tps68470_clk_round_rate,
+ .set_rate = tps68470_clk_set_rate,
+};
+
+static int tps68470_clk_probe(struct platform_device *pdev)
+{
+ struct tps68470_clk_platform_data *pdata = pdev->dev.platform_data;
+ struct clk_init_data tps68470_clk_initdata = {
+ .name = TPS68470_CLK_NAME,
+ .ops = &tps68470_clk_ops,
+ /* Changing the dividers when the PLL is on is not allowed */
+ .flags = CLK_SET_RATE_GATE,
+ };
+ struct tps68470_clkdata *tps68470_clkdata;
+ int ret;
+
+ tps68470_clkdata = devm_kzalloc(&pdev->dev, sizeof(*tps68470_clkdata),
+ GFP_KERNEL);
+ if (!tps68470_clkdata)
+ return -ENOMEM;
+
+ tps68470_clkdata->regmap = dev_get_drvdata(pdev->dev.parent);
+ tps68470_clkdata->clkout_hw.init = &tps68470_clk_initdata;
+
+ /* Set initial rate */
+ tps68470_clk_set_rate(&tps68470_clkdata->clkout_hw, clk_freqs[0].freq, 0);
+
+ ret = devm_clk_hw_register(&pdev->dev, &tps68470_clkdata->clkout_hw);
+ if (ret)
+ return ret;
+
+ ret = devm_clk_hw_register_clkdev(&pdev->dev, &tps68470_clkdata->clkout_hw,
+ TPS68470_CLK_NAME, NULL);
+ if (ret)
+ return ret;
+
+ if (pdata) {
+ ret = devm_clk_hw_register_clkdev(&pdev->dev,
+ &tps68470_clkdata->clkout_hw,
+ pdata->consumer_con_id,
+ pdata->consumer_dev_name);
+ }
+
+ return ret;
+}
+
+static struct platform_driver tps68470_clk_driver = {
+ .driver = {
+ .name = TPS68470_CLK_NAME,
+ },
+ .probe = tps68470_clk_probe,
+};
+
+/*
+ * The ACPI tps68470 probe-ordering depends on the clk/gpio/regulator drivers
+ * registering before the drivers for the camera-sensors which use them bind.
+ * subsys_initcall() ensures this when the drivers are builtin.
+ */
+static int __init tps68470_clk_init(void)
+{
+ return platform_driver_register(&tps68470_clk_driver);
+}
+subsys_initcall(tps68470_clk_init);
+
+static void __exit tps68470_clk_exit(void)
+{
+ platform_driver_unregister(&tps68470_clk_driver);
+}
+module_exit(tps68470_clk_exit);
+
+MODULE_ALIAS("platform:tps68470-clk");
+MODULE_DESCRIPTION("clock driver for TPS68470 pmic");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 566ee2c78709..8de6a22498e7 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -424,19 +424,20 @@ static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
if (entry->hw) {
parent = entry->hw->core;
- /*
- * We have a direct reference but it isn't registered yet?
- * Orphan it and let clk_reparent() update the orphan status
- * when the parent is registered.
- */
- if (!parent)
- parent = ERR_PTR(-EPROBE_DEFER);
} else {
parent = clk_core_get(core, index);
if (PTR_ERR(parent) == -ENOENT && entry->name)
parent = clk_core_lookup(entry->name);
}
+ /*
+ * We have a direct reference but it isn't registered yet?
+ * Orphan it and let clk_reparent() update the orphan status
+ * when the parent is registered.
+ */
+ if (!parent)
+ parent = ERR_PTR(-EPROBE_DEFER);
+
/* Only cache it if it's not an error */
if (!IS_ERR(parent))
entry->core = parent;
@@ -2965,7 +2966,9 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
{
struct clk_core *child;
+ clk_pm_runtime_get(c);
clk_summary_show_one(s, c, level);
+ clk_pm_runtime_put(c);
hlist_for_each_entry(child, &c->children, child_node)
clk_summary_show_subtree(s, child, level + 1);
@@ -3217,6 +3220,42 @@ static int current_parent_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(current_parent);
+#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
+static ssize_t current_parent_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct clk_core *core = s->private;
+ struct clk_core *parent;
+ u8 idx;
+ int err;
+
+ err = kstrtou8_from_user(ubuf, count, 0, &idx);
+ if (err < 0)
+ return err;
+
+ parent = clk_core_get_parent_by_index(core, idx);
+ if (!parent)
+ return -ENOENT;
+
+ clk_prepare_lock();
+ err = clk_core_set_parent_nolock(core, parent);
+ clk_prepare_unlock();
+ if (err)
+ return err;
+
+ return count;
+}
+
+static const struct file_operations current_parent_rw_fops = {
+ .open = current_parent_open,
+ .write = current_parent_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
static int clk_duty_cycle_show(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
@@ -3282,8 +3321,12 @@ static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
debugfs_create_file("clk_prepare_enable", 0644, root, core,
&clk_prepare_enable_fops);
-#endif
+ if (core->num_parents > 1)
+ debugfs_create_file("clk_parent", 0644, root, core,
+ &current_parent_rw_fops);
+ else
+#endif
if (core->num_parents > 0)
debugfs_create_file("clk_parent", 0444, root, core,
&current_parent_fops);
@@ -3343,6 +3386,24 @@ static int __init clk_debug_init(void)
{
struct clk_core *core;
+#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
+ pr_warn("\n");
+ pr_warn("********************************************************************\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("** **\n");
+ pr_warn("** WRITEABLE clk DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n");
+ pr_warn("** **\n");
+ pr_warn("** This means that this kernel is built to expose clk operations **\n");
+ pr_warn("** such as parent or rate setting, enabling, disabling, etc. **\n");
+ pr_warn("** to userspace, which may compromise security on your system. **\n");
+ pr_warn("** **\n");
+ pr_warn("** If you see this message and you are not debugging the **\n");
+ pr_warn("** kernel, report this immediately to your vendor! **\n");
+ pr_warn("** **\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("********************************************************************\n");
+#endif
+
rootdir = debugfs_create_dir("clk", NULL);
debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
@@ -3413,9 +3474,6 @@ static int __clk_core_init(struct clk_core *core)
unsigned long rate;
int phase;
- if (!core)
- return -EINVAL;
-
clk_prepare_lock();
/*
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index c55577604e16..021355a24708 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -277,9 +277,9 @@ static const char * const imx8mn_pdm_sels[] = {"osc_24m", "sys_pll2_100m", "audi
static const char * const imx8mn_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", };
-static const char * const imx8mn_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m",
- "sys_pll1_200m", "audio_pll2_out", "vpu_pll",
- "sys_pll1_80m", };
+static const char * const imx8mn_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "dummy",
+ "sys_pll1_200m", "audio_pll2_out", "sys_pll2_500m",
+ "dummy", "sys_pll1_80m", };
static const char * const imx8mn_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_400m",
"sys_pll2_166m", "sys_pll3_out", "audio_pll1_out",
"video_pll1_out", "osc_32k", };
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index 12837304545d..c990ad37882b 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -700,7 +700,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_HDMI_ROOT] = imx_clk_hw_gate4("hdmi_root_clk", "hdmi_axi", ccm_base + 0x45f0, 0);
hws[IMX8MP_CLK_TSENSOR_ROOT] = imx_clk_hw_gate4("tsensor_root_clk", "ipg_root", ccm_base + 0x4620, 0);
hws[IMX8MP_CLK_VPU_ROOT] = imx_clk_hw_gate4("vpu_root_clk", "vpu_bus", ccm_base + 0x4630, 0);
- hws[IMX8MP_CLK_AUDIO_ROOT] = imx_clk_hw_gate4("audio_root_clk", "ipg_root", ccm_base + 0x4650, 0);
+ hws[IMX8MP_CLK_AUDIO_ROOT] = imx_clk_hw_gate4("audio_root_clk", "audio_ahb", ccm_base + 0x4650, 0);
hws[IMX8MP_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_core",
hws[IMX8MP_CLK_A53_CORE]->clk,
diff --git a/drivers/clk/imx/clk-imx8ulp.c b/drivers/clk/imx/clk-imx8ulp.c
index 6699437e17b8..8eb1af2d6429 100644
--- a/drivers/clk/imx/clk-imx8ulp.c
+++ b/drivers/clk/imx/clk-imx8ulp.c
@@ -559,6 +559,7 @@ static struct platform_driver imx8ulp_clk_driver = {
.probe = imx8ulp_clk_probe,
.driver = {
.name = KBUILD_MODNAME,
+ .suppress_bind_attrs = true,
.of_match_table = imx8ulp_clk_dt_ids,
},
};
diff --git a/drivers/clk/imx/clk-pllv1.c b/drivers/clk/imx/clk-pllv1.c
index 36ffb0525735..93ee81b28fc7 100644
--- a/drivers/clk/imx/clk-pllv1.c
+++ b/drivers/clk/imx/clk-pllv1.c
@@ -8,20 +8,19 @@
#include "clk.h"
+#define MFN_BITS (10)
+#define MFN_SIGN (BIT(MFN_BITS - 1))
+#define MFN_MASK (MFN_SIGN - 1)
+
/**
- * pll v1
+ * struct clk_pllv1 - IMX PLLv1 clock descriptor
*
- * @clk_hw clock source
- * @parent the parent clock name
- * @base base address of pll registers
+ * @hw: clock source
+ * @base: base address of pll registers
+ * @type: type of IMX_PLLV1
*
* PLL clock version 1, found on i.MX1/21/25/27/31/35
*/
-
-#define MFN_BITS (10)
-#define MFN_SIGN (BIT(MFN_BITS - 1))
-#define MFN_MASK (MFN_SIGN - 1)
-
struct clk_pllv1 {
struct clk_hw hw;
void __iomem *base;
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index 20ee9611ba6e..eea32f87c60a 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -247,7 +247,7 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
div = rate / parent_rate;
temp64 = (u64) (rate - div * parent_rate);
temp64 *= mfd;
- do_div(temp64, parent_rate);
+ temp64 = div64_ul(temp64, parent_rate);
mfn = temp64;
temp64 = (u64)parent_rate;
@@ -277,7 +277,7 @@ static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
div = rate / parent_rate;
temp64 = (u64) (rate - div * parent_rate);
temp64 *= mfd;
- do_div(temp64, parent_rate);
+ temp64 = div64_ul(temp64, parent_rate);
mfn = temp64;
val = readl_relaxed(pll->base);
@@ -334,7 +334,7 @@ static struct clk_pllv3_vf610_mf clk_pllv3_vf610_rate_to_mf(
/* rate = parent_rate * (mfi + mfn/mfd) */
temp64 = rate - parent_rate * mf.mfi;
temp64 *= mf.mfd;
- do_div(temp64, parent_rate);
+ temp64 = div64_ul(temp64, parent_rate);
mf.mfn = temp64;
}
diff --git a/drivers/clk/ingenic/jz4760-cgu.c b/drivers/clk/ingenic/jz4760-cgu.c
index 080d492ac95c..8fdd383560fb 100644
--- a/drivers/clk/ingenic/jz4760-cgu.c
+++ b/drivers/clk/ingenic/jz4760-cgu.c
@@ -313,6 +313,16 @@ static const struct ingenic_cgu_clk_info jz4760_cgu_clocks[] = {
.parents = { JZ4760_CLK_H2CLK, },
.gate = { CGU_REG_CLKGR0, 21 },
},
+ [JZ4760_CLK_MDMA] = {
+ "mdma", CGU_CLK_GATE,
+ .parents = { JZ4760_CLK_HCLK, },
+ .gate = { CGU_REG_CLKGR0, 25 },
+ },
+ [JZ4760_CLK_BDMA] = {
+ "bdma", CGU_CLK_GATE,
+ .parents = { JZ4760_CLK_HCLK, },
+ .gate = { CGU_REG_CLKGR1, 0 },
+ },
[JZ4760_CLK_I2C0] = {
"i2c0", CGU_CLK_GATE,
.parents = { JZ4760_CLK_EXT, },
diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c
index 8c6c1208f462..7ef91257630e 100644
--- a/drivers/clk/ingenic/jz4770-cgu.c
+++ b/drivers/clk/ingenic/jz4770-cgu.c
@@ -329,6 +329,11 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = {
.parents = { JZ4770_CLK_H2CLK, },
.gate = { CGU_REG_CLKGR0, 21 },
},
+ [JZ4770_CLK_BDMA] = {
+ "bdma", CGU_CLK_GATE,
+ .parents = { JZ4770_CLK_H2CLK, },
+ .gate = { CGU_REG_CLKGR1, 0 },
+ },
[JZ4770_CLK_I2C0] = {
"i2c0", CGU_CLK_GATE,
.parents = { JZ4770_CLK_EXT, },
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 3ce6fb04d8ff..01ef02c54725 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -344,6 +344,23 @@ config COMMON_CLK_MT7629_HIFSYS
This driver supports MediaTek MT7629 HIFSYS clocks providing
to PCI-E and USB.
+config COMMON_CLK_MT7986
+ bool "Clock driver for MediaTek MT7986"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT7986 basic clocks and clocks
+ required for various peripherals found on MediaTek.
+
+config COMMON_CLK_MT7986_ETHSYS
+ bool "Clock driver for MediaTek MT7986 ETHSYS"
+ depends on COMMON_CLK_MT7986
+ default COMMON_CLK_MT7986
+ help
+ This driver adds support for clocks for Ethernet and SGMII
+ required on MediaTek MT7986 SoC.
+
config COMMON_CLK_MT8135
bool "Clock driver for MediaTek MT8135"
depends on (ARCH_MEDIATEK && ARM) || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index dc96038a0155..7b0c2646ce4a 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -46,6 +46,10 @@ obj-$(CONFIG_COMMON_CLK_MT7622_AUDSYS) += clk-mt7622-aud.o
obj-$(CONFIG_COMMON_CLK_MT7629) += clk-mt7629.o
obj-$(CONFIG_COMMON_CLK_MT7629_ETHSYS) += clk-mt7629-eth.o
obj-$(CONFIG_COMMON_CLK_MT7629_HIFSYS) += clk-mt7629-hif.o
+obj-$(CONFIG_COMMON_CLK_MT7986) += clk-mt7986-apmixed.o
+obj-$(CONFIG_COMMON_CLK_MT7986) += clk-mt7986-topckgen.o
+obj-$(CONFIG_COMMON_CLK_MT7986) += clk-mt7986-infracfg.o
+obj-$(CONFIG_COMMON_CLK_MT7986_ETHSYS) += clk-mt7986-eth.o
obj-$(CONFIG_COMMON_CLK_MT8135) += clk-mt8135.o
obj-$(CONFIG_COMMON_CLK_MT8167) += clk-mt8167.o
obj-$(CONFIG_COMMON_CLK_MT8167_AUDSYS) += clk-mt8167-aud.o
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index b02d2f74dd0d..5d88b428565b 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -16,28 +16,24 @@
#include "clk-mtk.h"
#include "clk-gate.h"
-static int mtk_cg_bit_is_cleared(struct clk_hw *hw)
+static u32 mtk_get_clockgating(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
u32 val;
regmap_read(cg->regmap, cg->sta_ofs, &val);
- val &= BIT(cg->bit);
+ return val & BIT(cg->bit);
+}
- return val == 0;
+static int mtk_cg_bit_is_cleared(struct clk_hw *hw)
+{
+ return mtk_get_clockgating(hw) == 0;
}
static int mtk_cg_bit_is_set(struct clk_hw *hw)
{
- struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- u32 val;
-
- regmap_read(cg->regmap, cg->sta_ofs, &val);
-
- val &= BIT(cg->bit);
-
- return val != 0;
+ return mtk_get_clockgating(hw) != 0;
}
static void mtk_cg_set_bit(struct clk_hw *hw)
@@ -57,17 +53,15 @@ static void mtk_cg_clr_bit(struct clk_hw *hw)
static void mtk_cg_set_bit_no_setclr(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- u32 cgbit = BIT(cg->bit);
- regmap_update_bits(cg->regmap, cg->sta_ofs, cgbit, cgbit);
+ regmap_set_bits(cg->regmap, cg->sta_ofs, BIT(cg->bit));
}
static void mtk_cg_clr_bit_no_setclr(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- u32 cgbit = BIT(cg->bit);
- regmap_update_bits(cg->regmap, cg->sta_ofs, cgbit, 0);
+ regmap_clear_bits(cg->regmap, cg->sta_ofs, BIT(cg->bit));
}
static int mtk_cg_enable(struct clk_hw *hw)
diff --git a/drivers/clk/mediatek/clk-mt7986-apmixed.c b/drivers/clk/mediatek/clk-mt7986-apmixed.c
new file mode 100644
index 000000000000..98ec3887585f
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7986-apmixed.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ * Author: Wenzhen Yu <wenzhen.yu@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "clk-mtk.h"
+#include "clk-gate.h"
+#include "clk-mux.h"
+
+#include <dt-bindings/clock/mt7986-clk.h>
+#include <linux/clk.h>
+
+#define MT7986_PLL_FMAX (2500UL * MHZ)
+#define CON0_MT7986_RST_BAR BIT(27)
+
+#define PLL_xtal(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
+ _div_table, _parent_name) \
+ { \
+ .id = _id, .name = _name, .reg = _reg, .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, .flags = _flags, \
+ .rst_bar_mask = CON0_MT7986_RST_BAR, .fmax = MT7986_PLL_FMAX, \
+ .pcwbits = _pcwbits, .pd_reg = _pd_reg, .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, .div_table = _div_table, \
+ .parent_name = _parent_name, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, _pd_reg, \
+ _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift) \
+ PLL_xtal(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, NULL, \
+ "clkxtal")
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0x00000001, 0, 32,
+ 0x0200, 4, 0, 0x0204, 0),
+ PLL(CLK_APMIXED_NET2PLL, "net2pll", 0x0210, 0x021C, 0x00000001, 0, 32,
+ 0x0210, 4, 0, 0x0214, 0),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0220, 0x022C, 0x00000001, 0, 32,
+ 0x0220, 4, 0, 0x0224, 0),
+ PLL(CLK_APMIXED_SGMPLL, "sgmpll", 0x0230, 0x023c, 0x00000001, 0, 32,
+ 0x0230, 4, 0, 0x0234, 0),
+ PLL(CLK_APMIXED_WEDMCUPLL, "wedmcupll", 0x0240, 0x024c, 0x00000001, 0,
+ 32, 0x0240, 4, 0, 0x0244, 0),
+ PLL(CLK_APMIXED_NET1PLL, "net1pll", 0x0250, 0x025c, 0x00000001, 0, 32,
+ 0x0250, 4, 0, 0x0254, 0),
+ PLL(CLK_APMIXED_MPLL, "mpll", 0x0260, 0x0270, 0x00000001, 0, 32, 0x0260,
+ 4, 0, 0x0264, 0),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x0278, 0x0288, 0x00000001, 0, 32,
+ 0x0278, 4, 0, 0x027c, 0),
+};
+
+static const struct of_device_id of_match_clk_mt7986_apmixed[] = {
+ { .compatible = "mediatek,mt7986-apmixedsys", },
+ {}
+};
+
+static int clk_mt7986_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(ARRAY_SIZE(plls));
+ if (!clk_data)
+ return -ENOMEM;
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+
+ clk_prepare_enable(clk_data->clks[CLK_APMIXED_ARMPLL]);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r) {
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+ goto free_apmixed_data;
+ }
+ return r;
+
+free_apmixed_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static struct platform_driver clk_mt7986_apmixed_drv = {
+ .probe = clk_mt7986_apmixed_probe,
+ .driver = {
+ .name = "clk-mt7986-apmixed",
+ .of_match_table = of_match_clk_mt7986_apmixed,
+ },
+};
+builtin_platform_driver(clk_mt7986_apmixed_drv);
diff --git a/drivers/clk/mediatek/clk-mt7986-eth.c b/drivers/clk/mediatek/clk-mt7986-eth.c
new file mode 100644
index 000000000000..495d023ccad7
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7986-eth.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ * Author: Wenzhen Yu <wenzhen.yu@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt7986-clk.h>
+
+static const struct mtk_gate_regs sgmii0_cg_regs = {
+ .set_ofs = 0xe4,
+ .clr_ofs = 0xe4,
+ .sta_ofs = 0xe4,
+};
+
+#define GATE_SGMII0(_id, _name, _parent, _shift) \
+ { \
+ .id = _id, .name = _name, .parent_name = _parent, \
+ .regs = &sgmii0_cg_regs, .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate sgmii0_clks[] __initconst = {
+ GATE_SGMII0(CLK_SGMII0_TX250M_EN, "sgmii0_tx250m_en", "top_xtal", 2),
+ GATE_SGMII0(CLK_SGMII0_RX250M_EN, "sgmii0_rx250m_en", "top_xtal", 3),
+ GATE_SGMII0(CLK_SGMII0_CDR_REF, "sgmii0_cdr_ref", "top_xtal", 4),
+ GATE_SGMII0(CLK_SGMII0_CDR_FB, "sgmii0_cdr_fb", "top_xtal", 5),
+};
+
+static const struct mtk_gate_regs sgmii1_cg_regs = {
+ .set_ofs = 0xe4,
+ .clr_ofs = 0xe4,
+ .sta_ofs = 0xe4,
+};
+
+#define GATE_SGMII1(_id, _name, _parent, _shift) \
+ { \
+ .id = _id, .name = _name, .parent_name = _parent, \
+ .regs = &sgmii1_cg_regs, .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate sgmii1_clks[] __initconst = {
+ GATE_SGMII1(CLK_SGMII1_TX250M_EN, "sgmii1_tx250m_en", "top_xtal", 2),
+ GATE_SGMII1(CLK_SGMII1_RX250M_EN, "sgmii1_rx250m_en", "top_xtal", 3),
+ GATE_SGMII1(CLK_SGMII1_CDR_REF, "sgmii1_cdr_ref", "top_xtal", 4),
+ GATE_SGMII1(CLK_SGMII1_CDR_FB, "sgmii1_cdr_fb", "top_xtal", 5),
+};
+
+static const struct mtk_gate_regs eth_cg_regs = {
+ .set_ofs = 0x30,
+ .clr_ofs = 0x30,
+ .sta_ofs = 0x30,
+};
+
+#define GATE_ETH(_id, _name, _parent, _shift) \
+ { \
+ .id = _id, .name = _name, .parent_name = _parent, \
+ .regs = &eth_cg_regs, .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate eth_clks[] __initconst = {
+ GATE_ETH(CLK_ETH_FE_EN, "eth_fe_en", "netsys_2x_sel", 6),
+ GATE_ETH(CLK_ETH_GP2_EN, "eth_gp2_en", "sgm_325m_sel", 7),
+ GATE_ETH(CLK_ETH_GP1_EN, "eth_gp1_en", "sgm_325m_sel", 8),
+ GATE_ETH(CLK_ETH_WOCPU1_EN, "eth_wocpu1_en", "netsys_mcu_sel", 14),
+ GATE_ETH(CLK_ETH_WOCPU0_EN, "eth_wocpu0_en", "netsys_mcu_sel", 15),
+};
+
+static void __init mtk_sgmiisys_0_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(ARRAY_SIZE(sgmii0_clks));
+
+ mtk_clk_register_gates(node, sgmii0_clks, ARRAY_SIZE(sgmii0_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+}
+CLK_OF_DECLARE(mtk_sgmiisys_0, "mediatek,mt7986-sgmiisys_0",
+ mtk_sgmiisys_0_init);
+
+static void __init mtk_sgmiisys_1_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(ARRAY_SIZE(sgmii1_clks));
+
+ mtk_clk_register_gates(node, sgmii1_clks, ARRAY_SIZE(sgmii1_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+}
+CLK_OF_DECLARE(mtk_sgmiisys_1, "mediatek,mt7986-sgmiisys_1",
+ mtk_sgmiisys_1_init);
+
+static void __init mtk_ethsys_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(ARRAY_SIZE(eth_clks));
+
+ mtk_clk_register_gates(node, eth_clks, ARRAY_SIZE(eth_clks), clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+}
+CLK_OF_DECLARE(mtk_ethsys, "mediatek,mt7986-ethsys_ck", mtk_ethsys_init);
diff --git a/drivers/clk/mediatek/clk-mt7986-infracfg.c b/drivers/clk/mediatek/clk-mt7986-infracfg.c
new file mode 100644
index 000000000000..f209c559fbc3
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7986-infracfg.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ * Author: Wenzhen Yu <wenzhen.yu@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "clk-mtk.h"
+#include "clk-gate.h"
+#include "clk-mux.h"
+
+#include <dt-bindings/clock/mt7986-clk.h>
+#include <linux/clk.h>
+
+static DEFINE_SPINLOCK(mt7986_clk_lock);
+
+static const struct mtk_fixed_factor infra_divs[] = {
+ FACTOR(CLK_INFRA_SYSAXI_D2, "infra_sysaxi_d2", "sysaxi_sel", 1, 2),
+};
+
+static const char *const infra_uart_parent[] __initconst = { "csw_f26m_sel",
+ "uart_sel" };
+
+static const char *const infra_spi_parents[] __initconst = { "i2c_sel",
+ "spi_sel" };
+
+static const char *const infra_pwm_bsel_parents[] __initconst = {
+ "top_rtc_32p7k", "csw_f26m_sel", "infra_sysaxi_d2", "pwm_sel"
+};
+
+static const char *const infra_pcie_parents[] __initconst = {
+ "top_rtc_32p7k", "csw_f26m_sel", "top_xtal", "pextp_tl_ck_sel"
+};
+
+static const struct mtk_mux infra_muxes[] = {
+ /* MODULE_CLK_SEL_0 */
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_UART0_SEL, "infra_uart0_sel",
+ infra_uart_parent, 0x0018, 0x0010, 0x0014, 0, 1,
+ -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_UART1_SEL, "infra_uart1_sel",
+ infra_uart_parent, 0x0018, 0x0010, 0x0014, 1, 1,
+ -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_UART2_SEL, "infra_uart2_sel",
+ infra_uart_parent, 0x0018, 0x0010, 0x0014, 2, 1,
+ -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_SPI0_SEL, "infra_spi0_sel",
+ infra_spi_parents, 0x0018, 0x0010, 0x0014, 4, 1,
+ -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_SPI1_SEL, "infra_spi1_sel",
+ infra_spi_parents, 0x0018, 0x0010, 0x0014, 5, 1,
+ -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PWM1_SEL, "infra_pwm1_sel",
+ infra_pwm_bsel_parents, 0x0018, 0x0010, 0x0014, 9,
+ 2, -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PWM2_SEL, "infra_pwm2_sel",
+ infra_pwm_bsel_parents, 0x0018, 0x0010, 0x0014, 11,
+ 2, -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PWM_BSEL, "infra_pwm_bsel",
+ infra_pwm_bsel_parents, 0x0018, 0x0010, 0x0014, 13,
+ 2, -1, -1, -1),
+ /* MODULE_CLK_SEL_1 */
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PCIE_SEL, "infra_pcie_sel",
+ infra_pcie_parents, 0x0028, 0x0020, 0x0024, 0, 2,
+ -1, -1, -1),
+};
+
+static const struct mtk_gate_regs infra0_cg_regs = {
+ .set_ofs = 0x40,
+ .clr_ofs = 0x44,
+ .sta_ofs = 0x48,
+};
+
+static const struct mtk_gate_regs infra1_cg_regs = {
+ .set_ofs = 0x50,
+ .clr_ofs = 0x54,
+ .sta_ofs = 0x58,
+};
+
+static const struct mtk_gate_regs infra2_cg_regs = {
+ .set_ofs = 0x60,
+ .clr_ofs = 0x64,
+ .sta_ofs = 0x68,
+};
+
+#define GATE_INFRA0(_id, _name, _parent, _shift) \
+ { \
+ .id = _id, .name = _name, .parent_name = _parent, \
+ .regs = &infra0_cg_regs, .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_INFRA1(_id, _name, _parent, _shift) \
+ { \
+ .id = _id, .name = _name, .parent_name = _parent, \
+ .regs = &infra1_cg_regs, .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_INFRA2(_id, _name, _parent, _shift) \
+ { \
+ .id = _id, .name = _name, .parent_name = _parent, \
+ .regs = &infra2_cg_regs, .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate infra_clks[] = {
+ /* INFRA0 */
+ GATE_INFRA0(CLK_INFRA_GPT_STA, "infra_gpt_sta", "infra_sysaxi_d2", 0),
+ GATE_INFRA0(CLK_INFRA_PWM_HCK, "infra_pwm_hck", "infra_sysaxi_d2", 1),
+ GATE_INFRA0(CLK_INFRA_PWM_STA, "infra_pwm_sta", "infra_pwm_bsel", 2),
+ GATE_INFRA0(CLK_INFRA_PWM1_CK, "infra_pwm1", "infra_pwm1_sel", 3),
+ GATE_INFRA0(CLK_INFRA_PWM2_CK, "infra_pwm2", "infra_pwm2_sel", 4),
+ GATE_INFRA0(CLK_INFRA_CQ_DMA_CK, "infra_cq_dma", "sysaxi_sel", 6),
+ GATE_INFRA0(CLK_INFRA_EIP97_CK, "infra_eip97", "eip_b_sel", 7),
+ GATE_INFRA0(CLK_INFRA_AUD_BUS_CK, "infra_aud_bus", "sysaxi_sel", 8),
+ GATE_INFRA0(CLK_INFRA_AUD_26M_CK, "infra_aud_26m", "csw_f26m_sel", 9),
+ GATE_INFRA0(CLK_INFRA_AUD_L_CK, "infra_aud_l", "aud_l_sel", 10),
+ GATE_INFRA0(CLK_INFRA_AUD_AUD_CK, "infra_aud_aud", "a1sys_sel", 11),
+ GATE_INFRA0(CLK_INFRA_AUD_EG2_CK, "infra_aud_eg2", "a_tuner_sel", 13),
+ GATE_INFRA0(CLK_INFRA_DRAMC_26M_CK, "infra_dramc_26m", "csw_f26m_sel",
+ 14),
+ GATE_INFRA0(CLK_INFRA_DBG_CK, "infra_dbg", "infra_sysaxi_d2", 15),
+ GATE_INFRA0(CLK_INFRA_AP_DMA_CK, "infra_ap_dma", "infra_sysaxi_d2", 16),
+ GATE_INFRA0(CLK_INFRA_SEJ_CK, "infra_sej", "infra_sysaxi_d2", 24),
+ GATE_INFRA0(CLK_INFRA_SEJ_13M_CK, "infra_sej_13m", "csw_f26m_sel", 25),
+ GATE_INFRA0(CLK_INFRA_TRNG_CK, "infra_trng", "sysaxi_sel", 26),
+ /* INFRA1 */
+ GATE_INFRA1(CLK_INFRA_THERM_CK, "infra_therm", "csw_f26m_sel", 0),
+ GATE_INFRA1(CLK_INFRA_I2C0_CK, "infra_i2c0", "i2c_sel", 1),
+ GATE_INFRA1(CLK_INFRA_UART0_CK, "infra_uart0", "infra_uart0_sel", 2),
+ GATE_INFRA1(CLK_INFRA_UART1_CK, "infra_uart1", "infra_uart1_sel", 3),
+ GATE_INFRA1(CLK_INFRA_UART2_CK, "infra_uart2", "infra_uart2_sel", 4),
+ GATE_INFRA1(CLK_INFRA_NFI1_CK, "infra_nfi1", "nfi1x_sel", 8),
+ GATE_INFRA1(CLK_INFRA_SPINFI1_CK, "infra_spinfi1", "spinfi_sel", 9),
+ GATE_INFRA1(CLK_INFRA_NFI_HCK_CK, "infra_nfi_hck", "infra_sysaxi_d2",
+ 10),
+ GATE_INFRA1(CLK_INFRA_SPI0_CK, "infra_spi0", "infra_spi0_sel", 11),
+ GATE_INFRA1(CLK_INFRA_SPI1_CK, "infra_spi1", "infra_spi1_sel", 12),
+ GATE_INFRA1(CLK_INFRA_SPI0_HCK_CK, "infra_spi0_hck", "infra_sysaxi_d2",
+ 13),
+ GATE_INFRA1(CLK_INFRA_SPI1_HCK_CK, "infra_spi1_hck", "infra_sysaxi_d2",
+ 14),
+ GATE_INFRA1(CLK_INFRA_FRTC_CK, "infra_frtc", "top_rtc_32k", 15),
+ GATE_INFRA1(CLK_INFRA_MSDC_CK, "infra_msdc", "emmc_416m_sel", 16),
+ GATE_INFRA1(CLK_INFRA_MSDC_HCK_CK, "infra_msdc_hck", "emmc_250m_sel",
+ 17),
+ GATE_INFRA1(CLK_INFRA_MSDC_133M_CK, "infra_msdc_133m", "sysaxi_sel",
+ 18),
+ GATE_INFRA1(CLK_INFRA_MSDC_66M_CK, "infra_msdc_66m", "infra_sysaxi_d2",
+ 19),
+ GATE_INFRA1(CLK_INFRA_ADC_26M_CK, "infra_adc_26m", "csw_f26m_sel", 20),
+ GATE_INFRA1(CLK_INFRA_ADC_FRC_CK, "infra_adc_frc", "csw_f26m_sel", 21),
+ GATE_INFRA1(CLK_INFRA_FBIST2FPC_CK, "infra_fbist2fpc", "nfi1x_sel", 23),
+ /* INFRA2 */
+ GATE_INFRA2(CLK_INFRA_IUSB_133_CK, "infra_iusb_133", "sysaxi_sel", 0),
+ GATE_INFRA2(CLK_INFRA_IUSB_66M_CK, "infra_iusb_66m", "infra_sysaxi_d2",
+ 1),
+ GATE_INFRA2(CLK_INFRA_IUSB_SYS_CK, "infra_iusb_sys", "u2u3_sys_sel", 2),
+ GATE_INFRA2(CLK_INFRA_IUSB_CK, "infra_iusb", "u2u3_sel", 3),
+ GATE_INFRA2(CLK_INFRA_IPCIE_CK, "infra_ipcie", "pextp_tl_ck_sel", 12),
+ GATE_INFRA2(CLK_INFRA_IPCIE_PIPE_CK, "infra_ipcie_pipe", "top_xtal",
+ 13),
+ GATE_INFRA2(CLK_INFRA_IPCIER_CK, "infra_ipcier", "csw_f26m_sel", 14),
+ GATE_INFRA2(CLK_INFRA_IPCIEB_CK, "infra_ipcieb", "sysaxi_sel", 15),
+};
+
+static int clk_mt7986_infracfg_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+ void __iomem *base;
+ int nr = ARRAY_SIZE(infra_divs) + ARRAY_SIZE(infra_muxes) +
+ ARRAY_SIZE(infra_clks);
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ clk_data = mtk_alloc_clk_data(nr);
+
+ if (!clk_data)
+ return -ENOMEM;
+
+ mtk_clk_register_factors(infra_divs, ARRAY_SIZE(infra_divs), clk_data);
+ mtk_clk_register_muxes(infra_muxes, ARRAY_SIZE(infra_muxes), node,
+ &mt7986_clk_lock, clk_data);
+ mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r) {
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+ goto free_infracfg_data;
+ }
+ return r;
+
+free_infracfg_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+
+}
+
+static const struct of_device_id of_match_clk_mt7986_infracfg[] = {
+ { .compatible = "mediatek,mt7986-infracfg", },
+ {}
+};
+
+static struct platform_driver clk_mt7986_infracfg_drv = {
+ .probe = clk_mt7986_infracfg_probe,
+ .driver = {
+ .name = "clk-mt7986-infracfg",
+ .of_match_table = of_match_clk_mt7986_infracfg,
+ },
+};
+builtin_platform_driver(clk_mt7986_infracfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt7986-topckgen.c b/drivers/clk/mediatek/clk-mt7986-topckgen.c
new file mode 100644
index 000000000000..8f6f79b6e31e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7986-topckgen.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ * Author: Wenzhen Yu <wenzhen.yu@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "clk-mtk.h"
+#include "clk-gate.h"
+#include "clk-mux.h"
+
+#include <dt-bindings/clock/mt7986-clk.h>
+#include <linux/clk.h>
+
+static DEFINE_SPINLOCK(mt7986_clk_lock);
+
+static const struct mtk_fixed_clk top_fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_XTAL, "top_xtal", "clkxtal", 40000000),
+ FIXED_CLK(CLK_TOP_JTAG, "top_jtag", "clkxtal", 50000000),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+ /* XTAL */
+ FACTOR(CLK_TOP_XTAL_D2, "top_xtal_d2", "top_xtal", 1, 2),
+ FACTOR(CLK_TOP_RTC_32K, "top_rtc_32k", "top_xtal", 1, 1250),
+ FACTOR(CLK_TOP_RTC_32P7K, "top_rtc_32p7k", "top_xtal", 1, 1220),
+ /* MPLL */
+ FACTOR(CLK_TOP_MPLL_D2, "top_mpll_d2", "mpll", 1, 2),
+ FACTOR(CLK_TOP_MPLL_D4, "top_mpll_d4", "mpll", 1, 4),
+ FACTOR(CLK_TOP_MPLL_D8, "top_mpll_d8", "mpll", 1, 8),
+ FACTOR(CLK_TOP_MPLL_D8_D2, "top_mpll_d8_d2", "mpll", 1, 16),
+ FACTOR(CLK_TOP_MPLL_D3_D2, "top_mpll_d3_d2", "mpll", 1, 6),
+ /* MMPLL */
+ FACTOR(CLK_TOP_MMPLL_D2, "top_mmpll_d2", "mmpll", 1, 2),
+ FACTOR(CLK_TOP_MMPLL_D4, "top_mmpll_d4", "mmpll", 1, 4),
+ FACTOR(CLK_TOP_MMPLL_D8, "top_mmpll_d8", "mmpll", 1, 8),
+ FACTOR(CLK_TOP_MMPLL_D8_D2, "top_mmpll_d8_d2", "mmpll", 1, 16),
+ FACTOR(CLK_TOP_MMPLL_D3_D8, "top_mmpll_d3_d8", "mmpll", 1, 24),
+ FACTOR(CLK_TOP_MMPLL_U2PHY, "top_mmpll_u2phy", "mmpll", 1, 30),
+ /* APLL2 */
+ FACTOR(CLK_TOP_APLL2_D4, "top_apll2_d4", "apll2", 1, 4),
+ /* NET1PLL */
+ FACTOR(CLK_TOP_NET1PLL_D4, "top_net1pll_d4", "net1pll", 1, 4),
+ FACTOR(CLK_TOP_NET1PLL_D5, "top_net1pll_d5", "net1pll", 1, 5),
+ FACTOR(CLK_TOP_NET1PLL_D5_D2, "top_net1pll_d5_d2", "net1pll", 1, 10),
+ FACTOR(CLK_TOP_NET1PLL_D5_D4, "top_net1pll_d5_d4", "net1pll", 1, 20),
+ FACTOR(CLK_TOP_NET1PLL_D8_D2, "top_net1pll_d8_d2", "net1pll", 1, 16),
+ FACTOR(CLK_TOP_NET1PLL_D8_D4, "top_net1pll_d8_d4", "net1pll", 1, 32),
+ /* NET2PLL */
+ FACTOR(CLK_TOP_NET2PLL_D4, "top_net2pll_d4", "net2pll", 1, 4),
+ FACTOR(CLK_TOP_NET2PLL_D4_D2, "top_net2pll_d4_d2", "net2pll", 1, 8),
+ FACTOR(CLK_TOP_NET2PLL_D3_D2, "top_net2pll_d3_d2", "net2pll", 1, 2),
+ /* WEDMCUPLL */
+ FACTOR(CLK_TOP_WEDMCUPLL_D5_D2, "top_wedmcupll_d5_d2", "wedmcupll", 1,
+ 10),
+};
+
+static const char *const nfi1x_parents[] __initconst = { "top_xtal",
+ "top_mmpll_d8",
+ "top_net1pll_d8_d2",
+ "top_net2pll_d3_d2",
+ "top_mpll_d4",
+ "top_mmpll_d8_d2",
+ "top_wedmcupll_d5_d2",
+ "top_mpll_d8" };
+
+static const char *const spinfi_parents[] __initconst = {
+ "top_xtal_d2", "top_xtal", "top_net1pll_d5_d4",
+ "top_mpll_d4", "top_mmpll_d8_d2", "top_wedmcupll_d5_d2",
+ "top_mmpll_d3_d8", "top_mpll_d8"
+};
+
+static const char *const spi_parents[] __initconst = {
+ "top_xtal", "top_mpll_d2", "top_mmpll_d8",
+ "top_net1pll_d8_d2", "top_net2pll_d3_d2", "top_net1pll_d5_d4",
+ "top_mpll_d4", "top_wedmcupll_d5_d2"
+};
+
+static const char *const uart_parents[] __initconst = { "top_xtal",
+ "top_mpll_d8",
+ "top_mpll_d8_d2" };
+
+static const char *const pwm_parents[] __initconst = {
+ "top_xtal", "top_net1pll_d8_d2", "top_net1pll_d5_d4", "top_mpll_d4"
+};
+
+static const char *const i2c_parents[] __initconst = {
+ "top_xtal", "top_net1pll_d5_d4", "top_mpll_d4", "top_net1pll_d8_d4"
+};
+
+static const char *const pextp_tl_ck_parents[] __initconst = {
+ "top_xtal", "top_net1pll_d5_d4", "top_net2pll_d4_d2", "top_rtc_32k"
+};
+
+static const char *const emmc_250m_parents[] __initconst = {
+ "top_xtal", "top_net1pll_d5_d2"
+};
+
+static const char *const emmc_416m_parents[] __initconst = { "top_xtal",
+ "mpll" };
+
+static const char *const f_26m_adc_parents[] __initconst = { "top_xtal",
+ "top_mpll_d8_d2" };
+
+static const char *const dramc_md32_parents[] __initconst = { "top_xtal",
+ "top_mpll_d2" };
+
+static const char *const sysaxi_parents[] __initconst = { "top_xtal",
+ "top_net1pll_d8_d2",
+ "top_net2pll_d4" };
+
+static const char *const sysapb_parents[] __initconst = { "top_xtal",
+ "top_mpll_d3_d2",
+ "top_net2pll_d4_d2" };
+
+static const char *const arm_db_main_parents[] __initconst = {
+ "top_xtal", "top_net2pll_d3_d2"
+};
+
+static const char *const arm_db_jtsel_parents[] __initconst = { "top_jtag",
+ "top_xtal" };
+
+static const char *const netsys_parents[] __initconst = { "top_xtal",
+ "top_mmpll_d4" };
+
+static const char *const netsys_500m_parents[] __initconst = {
+ "top_xtal", "top_net1pll_d5"
+};
+
+static const char *const netsys_mcu_parents[] __initconst = {
+ "top_xtal", "wedmcupll", "top_mmpll_d2", "top_net1pll_d4",
+ "top_net1pll_d5"
+};
+
+static const char *const netsys_2x_parents[] __initconst = {
+ "top_xtal", "net2pll", "wedmcupll", "top_mmpll_d2"
+};
+
+static const char *const sgm_325m_parents[] __initconst = { "top_xtal",
+ "sgmpll" };
+
+static const char *const sgm_reg_parents[] __initconst = {
+ "top_xtal", "top_net1pll_d8_d4"
+};
+
+static const char *const a1sys_parents[] __initconst = { "top_xtal",
+ "top_apll2_d4" };
+
+static const char *const conn_mcusys_parents[] __initconst = { "top_xtal",
+ "top_mmpll_d2" };
+
+static const char *const eip_b_parents[] __initconst = { "top_xtal",
+ "net2pll" };
+
+static const char *const aud_l_parents[] __initconst = { "top_xtal", "apll2",
+ "top_mpll_d8_d2" };
+
+static const char *const a_tuner_parents[] __initconst = { "top_xtal",
+ "top_apll2_d4",
+ "top_mpll_d8_d2" };
+
+static const char *const u2u3_sys_parents[] __initconst = {
+ "top_xtal", "top_net1pll_d5_d4"
+};
+
+static const char *const da_u2_refsel_parents[] __initconst = {
+ "top_xtal", "top_mmpll_u2phy"
+};
+
+static const struct mtk_mux top_muxes[] = {
+ /* CLK_CFG_0 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NFI1X_SEL, "nfi1x_sel", nfi1x_parents,
+ 0x000, 0x004, 0x008, 0, 3, 7, 0x1C0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPINFI_SEL, "spinfi_sel", spinfi_parents,
+ 0x000, 0x004, 0x008, 8, 3, 15, 0x1C0, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPI_SEL, "spi_sel", spi_parents, 0x000,
+ 0x004, 0x008, 16, 3, 23, 0x1C0, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPIM_MST_SEL, "spim_mst_sel", spi_parents,
+ 0x000, 0x004, 0x008, 24, 3, 31, 0x1C0, 3),
+ /* CLK_CFG_1 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UART_SEL, "uart_sel", uart_parents, 0x010,
+ 0x014, 0x018, 0, 2, 7, 0x1C0, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents, 0x010,
+ 0x014, 0x018, 8, 2, 15, 0x1C0, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2C_SEL, "i2c_sel", i2c_parents, 0x010,
+ 0x014, 0x018, 16, 2, 23, 0x1C0, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PEXTP_TL_SEL, "pextp_tl_ck_sel",
+ pextp_tl_ck_parents, 0x010, 0x014, 0x018, 24, 2,
+ 31, 0x1C0, 7),
+ /* CLK_CFG_2 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_EMMC_250M_SEL, "emmc_250m_sel",
+ emmc_250m_parents, 0x020, 0x024, 0x028, 0, 1, 7,
+ 0x1C0, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_EMMC_416M_SEL, "emmc_416m_sel",
+ emmc_416m_parents, 0x020, 0x024, 0x028, 8, 1, 15,
+ 0x1C0, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_F_26M_ADC_SEL, "f_26m_adc_sel",
+ f_26m_adc_parents, 0x020, 0x024, 0x028, 16, 1, 23,
+ 0x1C0, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DRAMC_SEL, "dramc_sel", f_26m_adc_parents,
+ 0x020, 0x024, 0x028, 24, 1, 31, 0x1C0, 11),
+ /* CLK_CFG_3 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DRAMC_MD32_SEL, "dramc_md32_sel",
+ dramc_md32_parents, 0x030, 0x034, 0x038, 0, 1, 7,
+ 0x1C0, 12),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SYSAXI_SEL, "sysaxi_sel", sysaxi_parents,
+ 0x030, 0x034, 0x038, 8, 2, 15, 0x1C0, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SYSAPB_SEL, "sysapb_sel", sysapb_parents,
+ 0x030, 0x034, 0x038, 16, 2, 23, 0x1C0, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ARM_DB_MAIN_SEL, "arm_db_main_sel",
+ arm_db_main_parents, 0x030, 0x034, 0x038, 24, 1,
+ 31, 0x1C0, 15),
+ /* CLK_CFG_4 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ARM_DB_JTSEL, "arm_db_jtsel",
+ arm_db_jtsel_parents, 0x040, 0x044, 0x048, 0, 1, 7,
+ 0x1C0, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NETSYS_SEL, "netsys_sel", netsys_parents,
+ 0x040, 0x044, 0x048, 8, 1, 15, 0x1C0, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NETSYS_500M_SEL, "netsys_500m_sel",
+ netsys_500m_parents, 0x040, 0x044, 0x048, 16, 1,
+ 23, 0x1C0, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NETSYS_MCU_SEL, "netsys_mcu_sel",
+ netsys_mcu_parents, 0x040, 0x044, 0x048, 24, 3, 31,
+ 0x1C0, 19),
+ /* CLK_CFG_5 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NETSYS_2X_SEL, "netsys_2x_sel",
+ netsys_2x_parents, 0x050, 0x054, 0x058, 0, 2, 7,
+ 0x1C0, 20),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SGM_325M_SEL, "sgm_325m_sel",
+ sgm_325m_parents, 0x050, 0x054, 0x058, 8, 1, 15,
+ 0x1C0, 21),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SGM_REG_SEL, "sgm_reg_sel",
+ sgm_reg_parents, 0x050, 0x054, 0x058, 16, 1, 23,
+ 0x1C0, 22),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_A1SYS_SEL, "a1sys_sel", a1sys_parents,
+ 0x050, 0x054, 0x058, 24, 1, 31, 0x1C0, 23),
+ /* CLK_CFG_6 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CONN_MCUSYS_SEL, "conn_mcusys_sel",
+ conn_mcusys_parents, 0x060, 0x064, 0x068, 0, 1, 7,
+ 0x1C0, 24),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_EIP_B_SEL, "eip_b_sel", eip_b_parents,
+ 0x060, 0x064, 0x068, 8, 1, 15, 0x1C0, 25),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PCIE_PHY_SEL, "pcie_phy_sel",
+ f_26m_adc_parents, 0x060, 0x064, 0x068, 16, 1, 23,
+ 0x1C0, 26),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB3_PHY_SEL, "usb3_phy_sel",
+ f_26m_adc_parents, 0x060, 0x064, 0x068, 24, 1, 31,
+ 0x1C0, 27),
+ /* CLK_CFG_7 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_F26M_SEL, "csw_f26m_sel",
+ f_26m_adc_parents, 0x070, 0x074, 0x078, 0, 1, 7,
+ 0x1C0, 28),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_L_SEL, "aud_l_sel", aud_l_parents,
+ 0x070, 0x074, 0x078, 8, 2, 15, 0x1C0, 29),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_A_TUNER_SEL, "a_tuner_sel",
+ a_tuner_parents, 0x070, 0x074, 0x078, 16, 2, 23,
+ 0x1C0, 30),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_U2U3_SEL, "u2u3_sel", f_26m_adc_parents,
+ 0x070, 0x074, 0x078, 24, 1, 31, 0x1C4, 0),
+ /* CLK_CFG_8 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_U2U3_SYS_SEL, "u2u3_sys_sel",
+ u2u3_sys_parents, 0x080, 0x084, 0x088, 0, 1, 7,
+ 0x1C4, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_U2U3_XHCI_SEL, "u2u3_xhci_sel",
+ u2u3_sys_parents, 0x080, 0x084, 0x088, 8, 1, 15,
+ 0x1C4, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DA_U2_REFSEL, "da_u2_refsel",
+ da_u2_refsel_parents, 0x080, 0x084, 0x088, 16, 1,
+ 23, 0x1C4, 3),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DA_U2_CK_1P_SEL, "da_u2_ck_1p_sel",
+ da_u2_refsel_parents, 0x080, 0x084, 0x088, 24, 1,
+ 31, 0x1C4, 4),
+ /* CLK_CFG_9 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AP2CNN_HOST_SEL, "ap2cnn_host_sel",
+ sgm_reg_parents, 0x090, 0x094, 0x098, 0, 1, 7,
+ 0x1C4, 5),
+};
+
+static int clk_mt7986_topckgen_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+ void __iomem *base;
+ int nr = ARRAY_SIZE(top_fixed_clks) + ARRAY_SIZE(top_divs) +
+ ARRAY_SIZE(top_muxes);
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ clk_data = mtk_alloc_clk_data(nr);
+ if (!clk_data)
+ return -ENOMEM;
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ clk_data);
+ mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+ mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node,
+ &mt7986_clk_lock, clk_data);
+
+ clk_prepare_enable(clk_data->clks[CLK_TOP_SYSAXI_SEL]);
+ clk_prepare_enable(clk_data->clks[CLK_TOP_SYSAPB_SEL]);
+ clk_prepare_enable(clk_data->clks[CLK_TOP_DRAMC_SEL]);
+ clk_prepare_enable(clk_data->clks[CLK_TOP_DRAMC_MD32_SEL]);
+ clk_prepare_enable(clk_data->clks[CLK_TOP_F26M_SEL]);
+ clk_prepare_enable(clk_data->clks[CLK_TOP_SGM_REG_SEL]);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r) {
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+ goto free_topckgen_data;
+ }
+ return r;
+
+free_topckgen_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt7986_topckgen[] = {
+ { .compatible = "mediatek,mt7986-topckgen", },
+ {}
+};
+
+static struct platform_driver clk_mt7986_topckgen_drv = {
+ .probe = clk_mt7986_topckgen_probe,
+ .driver = {
+ .name = "clk-mt7986-topckgen",
+ .of_match_table = of_match_clk_mt7986_topckgen,
+ },
+};
+builtin_platform_driver(clk_mt7986_topckgen_drv);
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index d6eed760327d..608e0e8ca49a 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -720,6 +720,35 @@ static struct clk_regmap gxbb_mpll0_div = {
.width = 14,
},
.sdm_en = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 25,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 16,
+ .width = 9,
+ },
+ .lock = &meson_clk_lock,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll0_div",
+ .ops = &meson_clk_mpll_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_mpll_prediv.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap gxl_mpll0_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
.reg_off = HHI_MPLL_CNTL7,
.shift = 15,
.width = 1,
@@ -749,7 +778,16 @@ static struct clk_regmap gxbb_mpll0 = {
.hw.init = &(struct clk_init_data){
.name = "mpll0",
.ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) { &gxbb_mpll0_div.hw },
+ .parent_data = &(const struct clk_parent_data) {
+ /*
+ * Note:
+ * GXL and GXBB have different SDM_EN registers. We
+ * fallback to the global naming string mechanism so
+ * mpll0_div picks up the appropriate one.
+ */
+ .name = "mpll0_div",
+ .index = -1,
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -3044,7 +3082,7 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = {
[CLKID_VAPB_1] = &gxbb_vapb_1.hw,
[CLKID_VAPB_SEL] = &gxbb_vapb_sel.hw,
[CLKID_VAPB] = &gxbb_vapb.hw,
- [CLKID_MPLL0_DIV] = &gxbb_mpll0_div.hw,
+ [CLKID_MPLL0_DIV] = &gxl_mpll0_div.hw,
[CLKID_MPLL1_DIV] = &gxbb_mpll1_div.hw,
[CLKID_MPLL2_DIV] = &gxbb_mpll2_div.hw,
[CLKID_MPLL_PREDIV] = &gxbb_mpll_prediv.hw,
@@ -3439,7 +3477,7 @@ static struct clk_regmap *const gxl_clk_regmaps[] = {
&gxbb_mpll0,
&gxbb_mpll1,
&gxbb_mpll2,
- &gxbb_mpll0_div,
+ &gxl_mpll0_div,
&gxbb_mpll1_div,
&gxbb_mpll2_div,
&gxbb_cts_amclk_div,
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 74efc82127e1..42c874194d1a 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -265,6 +265,14 @@ config MSM_MMCC_8974
Say Y if you want to support multimedia devices such as display,
graphics, video encode/decode, camera, etc.
+config MSM_GCC_8976
+ tristate "MSM8956/76 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on msm8956/76 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
config MSM_MMCC_8994
tristate "MSM8994 Multimedia Clock Controller"
select MSM_GCC_8994
@@ -564,6 +572,14 @@ config SM_CAMCC_8250
Support for the camera clock controller on SM8250 devices.
Say Y if you want to support camera devices and camera functionality.
+config SDX_GCC_65
+ tristate "SDX65 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on SDX65 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
config SM_DISPCC_8250
tristate "SM8150 and SM8250 Display Clock Controller"
depends on SM_GCC_8150 || SM_GCC_8250
@@ -618,6 +634,14 @@ config SM_GCC_8350
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_GCC_8450
+ tristate "SM8450 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on SM8450 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
config SM_GPUCC_8150
tristate "SM8150 Graphics Clock Controller"
select SM_GCC_8150
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 1718c34d3551..0d98ca9be67f 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_MSM_GCC_8939) += gcc-msm8939.o
obj-$(CONFIG_MSM_GCC_8953) += gcc-msm8953.o
obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
+obj-$(CONFIG_MSM_GCC_8976) += gcc-msm8976.o
obj-$(CONFIG_MSM_GCC_8994) += gcc-msm8994.o
obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
@@ -83,6 +84,7 @@ obj-$(CONFIG_SDM_LPASSCC_845) += lpasscc-sdm845.o
obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
obj-$(CONFIG_SDX_GCC_55) += gcc-sdx55.o
obj-$(CONFIG_SM_CAMCC_8250) += camcc-sm8250.o
+obj-$(CONFIG_SDX_GCC_65) += gcc-sdx65.o
obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
obj-$(CONFIG_SM_GCC_6115) += gcc-sm6115.o
obj-$(CONFIG_SM_GCC_6125) += gcc-sm6125.o
@@ -90,6 +92,7 @@ obj-$(CONFIG_SM_GCC_6350) += gcc-sm6350.o
obj-$(CONFIG_SM_GCC_8150) += gcc-sm8150.o
obj-$(CONFIG_SM_GCC_8250) += gcc-sm8250.o
obj-$(CONFIG_SM_GCC_8350) += gcc-sm8350.o
+obj-$(CONFIG_SM_GCC_8450) += gcc-sm8450.o
obj-$(CONFIG_SM_GPUCC_8150) += gpucc-sm8150.o
obj-$(CONFIG_SM_GPUCC_8250) += gpucc-sm8250.o
obj-$(CONFIG_SM_VIDEOCC_8150) += videocc-sm8150.o
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 8f65b9bdafce..4406cf609aae 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kernel.h>
@@ -139,6 +140,20 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_OPMODE] = 0x28,
[PLL_OFF_STATUS] = 0x38,
},
+ [CLK_ALPHA_PLL_TYPE_LUCID_EVO] = {
+ [PLL_OFF_OPMODE] = 0x04,
+ [PLL_OFF_STATUS] = 0x0c,
+ [PLL_OFF_L_VAL] = 0x10,
+ [PLL_OFF_ALPHA_VAL] = 0x14,
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_USER_CTL_U] = 0x1c,
+ [PLL_OFF_CONFIG_CTL] = 0x20,
+ [PLL_OFF_CONFIG_CTL_U] = 0x24,
+ [PLL_OFF_CONFIG_CTL_U1] = 0x28,
+ [PLL_OFF_TEST_CTL] = 0x2c,
+ [PLL_OFF_TEST_CTL_U] = 0x30,
+ [PLL_OFF_TEST_CTL_U1] = 0x34,
+ },
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
@@ -175,6 +190,10 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
#define LUCID_5LPE_PLL_LATCH_INPUT BIT(14)
#define LUCID_5LPE_ENABLE_VOTE_RUN BIT(21)
+/* LUCID EVO PLL specific settings and offsets */
+#define LUCID_EVO_ENABLE_VOTE_RUN BIT(25)
+#define LUCID_EVO_PLL_L_VAL_MASK GENMASK(15, 0)
+
/* ZONDA PLL specific */
#define ZONDA_PLL_OUT_MASK 0xf
#define ZONDA_STAY_IN_CFA BIT(16)
@@ -204,7 +223,7 @@ static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
if (ret)
return ret;
- for (count = 100; count > 0; count--) {
+ for (count = 200; count > 0; count--) {
ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
if (ret)
return ret;
@@ -1750,24 +1769,32 @@ static int alpha_pll_lucid_5lpe_set_rate(struct clk_hw *hw, unsigned long rate,
LUCID_5LPE_ALPHA_PLL_ACK_LATCH);
}
-static int clk_lucid_5lpe_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+static int __clk_lucid_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate,
+ unsigned long enable_vote_run)
{
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
- int i, val = 0, div, ret;
+ struct regmap *regmap = pll->clkr.regmap;
+ int i, val, div, ret;
u32 mask;
/*
* If the PLL is in FSM mode, then treat set_rate callback as a
* no-operation.
*/
- ret = regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &val);
+ ret = regmap_read(regmap, PLL_USER_CTL(pll), &val);
if (ret)
return ret;
- if (val & LUCID_5LPE_ENABLE_VOTE_RUN)
+ if (val & enable_vote_run)
return 0;
+ if (!pll->post_div_table) {
+ pr_err("Missing the post_div_table for the %s PLL\n",
+ clk_hw_get_name(&pll->clkr.hw));
+ return -EINVAL;
+ }
+
div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
for (i = 0; i < pll->num_post_div; i++) {
if (pll->post_div_table[i].div == div) {
@@ -1781,6 +1808,12 @@ static int clk_lucid_5lpe_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long
mask, val << pll->post_div_shift);
}
+static int clk_lucid_5lpe_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_lucid_pll_postdiv_set_rate(hw, rate, parent_rate, LUCID_5LPE_ENABLE_VOTE_RUN);
+}
+
const struct clk_ops clk_alpha_pll_lucid_5lpe_ops = {
.prepare = alpha_pll_lucid_5lpe_prepare,
.enable = alpha_pll_lucid_5lpe_enable,
@@ -1960,3 +1993,124 @@ const struct clk_ops clk_alpha_pll_zonda_ops = {
.set_rate = clk_zonda_pll_set_rate,
};
EXPORT_SYMBOL(clk_alpha_pll_zonda_ops);
+
+static int alpha_pll_lucid_evo_enable(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct regmap *regmap = pll->clkr.regmap;
+ u32 val;
+ int ret;
+
+ ret = regmap_read(regmap, PLL_USER_CTL(pll), &val);
+ if (ret)
+ return ret;
+
+ /* If in FSM mode, just vote for it */
+ if (val & LUCID_EVO_ENABLE_VOTE_RUN) {
+ ret = clk_enable_regmap(hw);
+ if (ret)
+ return ret;
+ return wait_for_pll_enable_lock(pll);
+ }
+
+ /* Check if PLL is already enabled */
+ ret = trion_pll_is_enabled(pll, regmap);
+ if (ret < 0) {
+ return ret;
+ } else if (ret) {
+ pr_warn("%s PLL is already enabled\n", clk_hw_get_name(&pll->clkr.hw));
+ return 0;
+ }
+
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ /* Set operation mode to RUN */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_RUN);
+
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret)
+ return ret;
+
+ /* Enable the PLL outputs */
+ ret = regmap_update_bits(regmap, PLL_USER_CTL(pll), PLL_OUT_MASK, PLL_OUT_MASK);
+ if (ret)
+ return ret;
+
+ /* Enable the global PLL outputs */
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, PLL_OUTCTRL);
+ if (ret)
+ return ret;
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+ return ret;
+}
+
+static void alpha_pll_lucid_evo_disable(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct regmap *regmap = pll->clkr.regmap;
+ u32 val;
+ int ret;
+
+ ret = regmap_read(regmap, PLL_USER_CTL(pll), &val);
+ if (ret)
+ return;
+
+ /* If in FSM mode, just unvote it */
+ if (val & LUCID_EVO_ENABLE_VOTE_RUN) {
+ clk_disable_regmap(hw);
+ return;
+ }
+
+ /* Disable the global PLL output */
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+ if (ret)
+ return;
+
+ /* Disable the PLL outputs */
+ ret = regmap_update_bits(regmap, PLL_USER_CTL(pll), PLL_OUT_MASK, 0);
+ if (ret)
+ return;
+
+ /* Place the PLL mode in STANDBY */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+}
+
+static unsigned long alpha_pll_lucid_evo_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct regmap *regmap = pll->clkr.regmap;
+ u32 l, frac;
+
+ regmap_read(regmap, PLL_L_VAL(pll), &l);
+ l &= LUCID_EVO_PLL_L_VAL_MASK;
+ regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac);
+
+ return alpha_pll_calc_rate(parent_rate, l, frac, pll_alpha_width(pll));
+}
+
+static int clk_lucid_evo_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_lucid_pll_postdiv_set_rate(hw, rate, parent_rate, LUCID_EVO_ENABLE_VOTE_RUN);
+}
+
+const struct clk_ops clk_alpha_pll_fixed_lucid_evo_ops = {
+ .enable = alpha_pll_lucid_evo_enable,
+ .disable = alpha_pll_lucid_evo_disable,
+ .is_enabled = clk_trion_pll_is_enabled,
+ .recalc_rate = alpha_pll_lucid_evo_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_lucid_evo_ops);
+
+const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops = {
+ .recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .set_rate = clk_lucid_evo_pll_postdiv_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_evo_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 55e4fa47912f..6e9907deaf30 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -17,6 +17,7 @@ enum {
CLK_ALPHA_PLL_TYPE_LUCID = CLK_ALPHA_PLL_TYPE_TRION,
CLK_ALPHA_PLL_TYPE_AGERA,
CLK_ALPHA_PLL_TYPE_ZONDA,
+ CLK_ALPHA_PLL_TYPE_LUCID_EVO,
CLK_ALPHA_PLL_TYPE_MAX,
};
@@ -151,6 +152,8 @@ extern const struct clk_ops clk_alpha_pll_postdiv_lucid_5lpe_ops;
extern const struct clk_ops clk_alpha_pll_zonda_ops;
#define clk_alpha_pll_postdiv_zonda_ops clk_alpha_pll_postdiv_fabia_ops
+extern const struct clk_ops clk_alpha_pll_fixed_lucid_evo_ops;
+extern const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops;
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 441d7a20e6f3..74e57c84f60a 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -515,6 +515,32 @@ static const struct clk_rpmh_desc clk_rpmh_sm8350 = {
/* Resource name must match resource id present in cmd-db */
DEFINE_CLK_RPMH_ARC(sc7280, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 4);
+DEFINE_CLK_RPMH_VRM(sm8450, ln_bb_clk1, ln_bb_clk1_ao, "lnbclka1", 4);
+DEFINE_CLK_RPMH_VRM(sm8450, ln_bb_clk2, ln_bb_clk2_ao, "lnbclka2", 4);
+
+static struct clk_hw *sm8450_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &sc7280_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sc7280_bi_tcxo_ao.hw,
+ [RPMH_LN_BB_CLK1] = &sm8450_ln_bb_clk1.hw,
+ [RPMH_LN_BB_CLK1_A] = &sm8450_ln_bb_clk1_ao.hw,
+ [RPMH_LN_BB_CLK2] = &sm8450_ln_bb_clk2.hw,
+ [RPMH_LN_BB_CLK2_A] = &sm8450_ln_bb_clk2_ao.hw,
+ [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sdm845_rf_clk1_ao.hw,
+ [RPMH_RF_CLK2] = &sdm845_rf_clk2.hw,
+ [RPMH_RF_CLK2_A] = &sdm845_rf_clk2_ao.hw,
+ [RPMH_RF_CLK3] = &sdm845_rf_clk3.hw,
+ [RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw,
+ [RPMH_RF_CLK4] = &sm8350_rf_clk4.hw,
+ [RPMH_RF_CLK4_A] = &sm8350_rf_clk4_ao.hw,
+ [RPMH_IPA_CLK] = &sdm845_ipa.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sm8450 = {
+ .clks = sm8450_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sm8450_rpmh_clocks),
+};
+
static struct clk_hw *sc7280_rpmh_clocks[] = {
[RPMH_CXO_CLK] = &sc7280_bi_tcxo.hw,
[RPMH_CXO_CLK_A] = &sc7280_bi_tcxo_ao.hw,
@@ -556,6 +582,30 @@ static const struct clk_rpmh_desc clk_rpmh_sm6350 = {
.num_clks = ARRAY_SIZE(sm6350_rpmh_clocks),
};
+DEFINE_CLK_RPMH_VRM(sdx65, ln_bb_clk1, ln_bb_clk1_ao, "lnbclka1", 4);
+
+static struct clk_hw *sdx65_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &sc7280_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sc7280_bi_tcxo_ao.hw,
+ [RPMH_LN_BB_CLK1] = &sdx65_ln_bb_clk1.hw,
+ [RPMH_LN_BB_CLK1_A] = &sdx65_ln_bb_clk1_ao.hw,
+ [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sdm845_rf_clk1_ao.hw,
+ [RPMH_RF_CLK2] = &sdm845_rf_clk2.hw,
+ [RPMH_RF_CLK2_A] = &sdm845_rf_clk2_ao.hw,
+ [RPMH_RF_CLK3] = &sdm845_rf_clk3.hw,
+ [RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw,
+ [RPMH_RF_CLK4] = &sm8350_rf_clk4.hw,
+ [RPMH_RF_CLK4_A] = &sm8350_rf_clk4_ao.hw,
+ [RPMH_IPA_CLK] = &sdm845_ipa.hw,
+ [RPMH_QPIC_CLK] = &sdx55_qpic_clk.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sdx65 = {
+ .clks = sdx65_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sdx65_rpmh_clocks),
+};
+
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -643,10 +693,12 @@ static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,sc8180x-rpmh-clk", .data = &clk_rpmh_sc8180x},
{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
{ .compatible = "qcom,sdx55-rpmh-clk", .data = &clk_rpmh_sdx55},
+ { .compatible = "qcom,sdx65-rpmh-clk", .data = &clk_rpmh_sdx65},
{ .compatible = "qcom,sm6350-rpmh-clk", .data = &clk_rpmh_sm6350},
{ .compatible = "qcom,sm8150-rpmh-clk", .data = &clk_rpmh_sm8150},
{ .compatible = "qcom,sm8250-rpmh-clk", .data = &clk_rpmh_sm8250},
{ .compatible = "qcom,sm8350-rpmh-clk", .data = &clk_rpmh_sm8350},
+ { .compatible = "qcom,sm8450-rpmh-clk", .data = &clk_rpmh_sm8450},
{ .compatible = "qcom,sc7280-rpmh-clk", .data = &clk_rpmh_sc7280},
{ }
};
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 5776d85a1e5c..ea28e45ca371 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -17,7 +17,6 @@
#include <linux/soc/qcom/smd-rpm.h>
#include <dt-bindings/clock/qcom,rpmcc.h>
-#include <dt-bindings/mfd/qcom-rpm.h>
#define QCOM_RPM_KEY_SOFTWARE_ENABLE 0x6e657773
#define QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY 0x62636370
@@ -151,12 +150,6 @@ struct clk_smd_rpm_req {
__le32 value;
};
-struct rpm_cc {
- struct qcom_rpm *rpm;
- struct clk_smd_rpm **clks;
- size_t num_clks;
-};
-
struct rpm_smd_clk_desc {
struct clk_smd_rpm **clks;
size_t num_clks;
@@ -196,10 +189,6 @@ static int clk_smd_rpm_set_rate_active(struct clk_smd_rpm *r,
.value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
};
- /* Buffered clock needs a binary value */
- if (r->rpm_res_type == QCOM_SMD_RPM_CLK_BUF_A)
- req.value = cpu_to_le32(!!req.value);
-
return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
r->rpm_res_type, r->rpm_clk_id, &req,
sizeof(req));
@@ -214,10 +203,6 @@ static int clk_smd_rpm_set_rate_sleep(struct clk_smd_rpm *r,
.value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
};
- /* Buffered clock needs a binary value */
- if (r->rpm_res_type == QCOM_SMD_RPM_CLK_BUF_A)
- req.value = cpu_to_le32(!!req.value);
-
return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE,
r->rpm_res_type, r->rpm_clk_id, &req,
sizeof(req));
@@ -1159,20 +1144,19 @@ MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table);
static struct clk_hw *qcom_smdrpm_clk_hw_get(struct of_phandle_args *clkspec,
void *data)
{
- struct rpm_cc *rcc = data;
+ const struct rpm_smd_clk_desc *desc = data;
unsigned int idx = clkspec->args[0];
- if (idx >= rcc->num_clks) {
+ if (idx >= desc->num_clks) {
pr_err("%s: invalid index %u\n", __func__, idx);
return ERR_PTR(-EINVAL);
}
- return rcc->clks[idx] ? &rcc->clks[idx]->hw : ERR_PTR(-ENOENT);
+ return desc->clks[idx] ? &desc->clks[idx]->hw : ERR_PTR(-ENOENT);
}
static int rpm_smd_clk_probe(struct platform_device *pdev)
{
- struct rpm_cc *rcc;
int ret;
size_t num_clks, i;
struct qcom_smd_rpm *rpm;
@@ -1192,13 +1176,6 @@ static int rpm_smd_clk_probe(struct platform_device *pdev)
rpm_smd_clks = desc->clks;
num_clks = desc->num_clks;
- rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc), GFP_KERNEL);
- if (!rcc)
- return -ENOMEM;
-
- rcc->clks = rpm_smd_clks;
- rcc->num_clks = num_clks;
-
for (i = 0; i < num_clks; i++) {
if (!rpm_smd_clks[i])
continue;
@@ -1224,7 +1201,7 @@ static int rpm_smd_clk_probe(struct platform_device *pdev)
}
ret = devm_of_clk_add_hw_provider(&pdev->dev, qcom_smdrpm_clk_hw_get,
- rcc);
+ (void *)desc);
if (ret)
goto err;
diff --git a/drivers/clk/qcom/gcc-msm8976.c b/drivers/clk/qcom/gcc-msm8976.c
new file mode 100644
index 000000000000..a8b15814933e
--- /dev/null
+++ b/drivers/clk/qcom/gcc-msm8976.c
@@ -0,0 +1,4155 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm Global Clock Controller driver for MSM8956/76
+ *
+ * Copyright (c) 2016-2021, AngeloGioacchino Del Regno
+ * <angelogioacchino.delregno@somainline.org>
+ *
+ * Driver cleanup and modernization
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ * Marijn Suijten <marijn.suijten@somainline.org>
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-msm8976.h>
+
+#include "clk-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_AUX,
+ P_GPLL0_OUT,
+ P_GPLL0_OUT_M,
+ P_GPLL0_OUT_MDP,
+ P_GPLL2_AUX,
+ P_GPLL2_OUT,
+ P_GPLL4_OUT_MAIN,
+ P_GPLL4_AUX,
+ P_GPLL4_OUT,
+ P_GPLL4_GFX3D,
+ P_GPLL6_OUT_MAIN,
+ P_GPLL6_AUX,
+ P_GPLL6_OUT,
+ P_GPLL6_GFX3D,
+ P_DSI0PLL,
+ P_DSI1PLL,
+ P_DSI0PLL_BYTE,
+ P_DSI1PLL_BYTE,
+ P_XO_A,
+ P_XO,
+};
+
+static struct clk_pll gpll0 = {
+ .l_reg = 0x21004,
+ .m_reg = 0x21008,
+ .n_reg = 0x2100c,
+ .config_reg = 0x21014,
+ .mode_reg = 0x21000,
+ .status_reg = 0x2101c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll0_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_vote",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ /* This clock is required for other ones to function. */
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll gpll2 = {
+ .l_reg = 0x4a004,
+ .m_reg = 0x4a008,
+ .n_reg = 0x4a00c,
+ .config_reg = 0x4a014,
+ .mode_reg = 0x4a000,
+ .status_reg = 0x4a01c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll2_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll2_vote",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static const struct pll_freq_tbl gpll3_freq_tbl[] = {
+ { 1100000000, 57, 7, 24, 0 },
+ { }
+};
+
+static struct clk_pll gpll3 = {
+ .l_reg = 0x22004,
+ .m_reg = 0x22008,
+ .n_reg = 0x2200c,
+ .config_reg = 0x22010,
+ .mode_reg = 0x22000,
+ .status_reg = 0x22024,
+ .status_bit = 17,
+ .freq_tbl = gpll3_freq_tbl,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpll3",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll3_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll3_vote",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+/* GPLL3 at 1100MHz, main output enabled. */
+static const struct pll_config gpll3_config = {
+ .l = 57,
+ .m = 7,
+ .n = 24,
+ .vco_val = 0x0,
+ .vco_mask = 0x3 << 20,
+ .pre_div_val = 0x0,
+ .pre_div_mask = 0x7 << 12,
+ .post_div_val = 0x0,
+ .post_div_mask = 0x3 << 8,
+ .mn_ena_mask = BIT(24),
+ .main_output_mask = BIT(0),
+ .aux_output_mask = BIT(1),
+};
+
+static struct clk_pll gpll4 = {
+ .l_reg = 0x24004,
+ .m_reg = 0x24008,
+ .n_reg = 0x2400c,
+ .config_reg = 0x24018,
+ .mode_reg = 0x24000,
+ .status_reg = 0x24024,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll4_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4_vote",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll gpll6 = {
+ .mode_reg = 0x37000,
+ .l_reg = 0x37004,
+ .m_reg = 0x37008,
+ .n_reg = 0x3700c,
+ .config_reg = 0x37014,
+ .status_reg = 0x3701c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll6",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll6_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6_vote",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll4_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_v1_1[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL2_OUT, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_v1_1[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll2_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL2_AUX, 3 },
+ { P_GPLL4_OUT, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll2_vote.hw },
+ { .hw = &gpll4_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL2_AUX, 3 },
+ { P_GPLL6_AUX, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll2_vote.hw },
+ { .hw = &gpll6_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+};
+
+static const struct parent_map gcc_parent_map_4_fs[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT, 2 },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_XO, 0 },
+ { P_GPLL4_OUT, 2 },
+ { P_GPLL6_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll4_vote.hw },
+ { .hw = &gpll6_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll4_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_7_mdp[] = {
+ { P_XO, 0 },
+ { P_GPLL6_OUT, 3 },
+ { P_GPLL0_OUT_MDP, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7_mdp[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll6_vote.hw },
+ { .hw = &gpll0_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL6_OUT, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll6_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4_8[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_8_a[] = {
+ { P_XO_A, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8_a[] = {
+ { .fw_name = "xo_a" },
+ { .hw = &gpll0_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_8_gp[] = {
+ { P_GPLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8_gp[] = {
+ { .hw = &gpll0_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_XO, 0 },
+ { P_GPLL6_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll6_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_XO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .fw_name = "xo" },
+};
+
+static const struct parent_map gcc_parent_map_sdcc_ice[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_M, 3 },
+};
+
+static const struct parent_map gcc_parent_map_cci[] = {
+ { P_XO, 0 },
+ { P_GPLL0_AUX, 2 },
+};
+
+static const struct parent_map gcc_parent_map_cpp[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_AUX, 3 },
+};
+
+static const struct parent_map gcc_parent_map_mdss_pix0[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_mdss_pix0[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "dsi0pll" },
+};
+
+static const struct parent_map gcc_parent_map_mdss_pix1[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL, 3 },
+ { P_DSI1PLL, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_mdss_pix1[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "dsi0pll" },
+ { .fw_name = "dsi1pll" },
+};
+
+static const struct parent_map gcc_parent_map_mdss_byte0[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL_BYTE, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_mdss_byte0[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "dsi0pllbyte" },
+};
+
+static const struct parent_map gcc_parent_map_mdss_byte1[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL_BYTE, 3 },
+ { P_DSI1PLL_BYTE, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_mdss_byte1[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "dsi0pllbyte" },
+ { .fw_name = "dsi1pllbyte" },
+};
+
+static const struct parent_map gcc_parent_map_gfx3d[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_GFX3D, 5 },
+ { P_GPLL6_GFX3D, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_gfx3d[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll4_vote.hw },
+ { .hw = &gpll6_vote.hw },
+};
+
+static const struct freq_tbl ftbl_aps_0_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(300000000, P_GPLL4_OUT, 4, 0, 0),
+ F(540000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 aps_0_clk_src = {
+ .cmd_rcgr = 0x78008,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_aps_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "aps_0_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_aps_1_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(300000000, P_GPLL4_OUT, 4, 0, 0),
+ F(540000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 aps_1_clk_src = {
+ .cmd_rcgr = 0x79008,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_aps_1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "aps_1_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_apss_ahb_clk_src[] = {
+ F(19200000, P_XO_A, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(88890000, P_GPLL0_OUT_MAIN, 9, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apss_ahb_clk_src = {
+ .cmd_rcgr = 0x46000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8_a,
+ .freq_tbl = ftbl_apss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "apss_ahb_clk_src",
+ .parent_data = gcc_parent_data_8_a,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8_a),
+ .ops = &clk_rcg2_ops,
+ /*
+ * This clock allows the CPUs to communicate with
+ * the rest of the SoC. Without it, the brain will
+ * operate without the rest of the body.
+ */
+ .flags = CLK_IS_CRITICAL,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp_i2c_apps_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x200c,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(16000000, P_GPLL0_OUT_MAIN, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x3000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x3014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x4000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x4024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x5000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x5024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp_uart_apps_clk_src[] = {
+ F(3686400, P_GPLL0_OUT_MAIN, 1, 72, 15625),
+ F(7372800, P_GPLL0_OUT_MAIN, 1, 144, 15625),
+ F(14745600, P_GPLL0_OUT_MAIN, 1, 288, 15625),
+ F(16000000, P_GPLL0_OUT_MAIN, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0_OUT_MAIN, 1, 3, 100),
+ F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
+ F(32000000, P_GPLL0_OUT_MAIN, 1, 1, 25),
+ F(40000000, P_GPLL0_OUT_MAIN, 1, 1, 20),
+ F(46400000, P_GPLL0_OUT_MAIN, 1, 29, 500),
+ F(48000000, P_GPLL0_OUT_MAIN, 1, 3, 50),
+ F(51200000, P_GPLL0_OUT_MAIN, 1, 8, 125),
+ F(56000000, P_GPLL0_OUT_MAIN, 1, 7, 100),
+ F(58982400, P_GPLL0_OUT_MAIN, 1, 1152, 15625),
+ F(60000000, P_GPLL0_OUT_MAIN, 1, 3, 40),
+ F(64000000, P_GPLL0_OUT_MAIN, 1, 2, 25),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x2044,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x3034,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0xc00c,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup1_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0xc024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup1_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0xd000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup2_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0xd014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup2_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0xf000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup3_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0xf024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup3_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x18000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x18024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
+ .cmd_rcgr = 0xc044,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart1_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
+ .cmd_rcgr = 0xd034,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart2_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cci_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(37500000, P_GPLL0_AUX, 1, 3, 64),
+ { }
+};
+
+static struct clk_rcg2 cci_clk_src = {
+ .cmd_rcgr = 0x51000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_cci,
+ .freq_tbl = ftbl_cci_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cci_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cpp_clk_src[] = {
+ F(160000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(240000000, P_GPLL4_AUX, 5, 0, 0),
+ F(320000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ F(400000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(480000000, P_GPLL4_AUX, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cpp_clk_src = {
+ .cmd_rcgr = 0x58018,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_cpp,
+ .freq_tbl = ftbl_cpp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cpp_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csi0_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266670000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0_clk_src = {
+ .cmd_rcgr = 0x4e020,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_csi0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csi1_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266670000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi1_clk_src = {
+ .cmd_rcgr = 0x4f020,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_csi1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csi2_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266670000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi2_clk_src = {
+ .cmd_rcgr = 0x3c020,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_csi2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi2_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camss_gp0_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266670000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camss_gp0_clk_src = {
+ .cmd_rcgr = 0x54000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8_gp,
+ .freq_tbl = ftbl_camss_gp0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_gp0_clk_src",
+ .parent_data = gcc_parent_data_8_gp,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8_gp),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camss_gp1_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266670000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camss_gp1_clk_src = {
+ .cmd_rcgr = 0x55000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8_gp,
+ .freq_tbl = ftbl_camss_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_gp1_clk_src",
+ .parent_data = gcc_parent_data_8_gp,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8_gp),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_jpeg0_clk_src[] = {
+ F(133330000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266666667, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(320000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 jpeg0_clk_src = {
+ .cmd_rcgr = 0x57000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_jpeg0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "jpeg0_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mclk_clk_src[] = {
+ F(8000000, P_GPLL0_OUT_MAIN, 1, 1, 100),
+ F(24000000, P_GPLL6_OUT, 1, 1, 45),
+ F(66670000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mclk0_clk_src = {
+ .cmd_rcgr = 0x52000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk0_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk1_clk_src = {
+ .cmd_rcgr = 0x53000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk1_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk2_clk_src = {
+ .cmd_rcgr = 0x5c000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk2_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csi0phytimer_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266670000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x4e000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0phytimer_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csi1phytimer_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266670000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x4f000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_csi1phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1phytimer_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camss_top_ahb_clk_src[] = {
+ F(40000000, P_GPLL0_OUT_MAIN, 10, 1, 2),
+ F(80000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camss_top_ahb_clk_src = {
+ .cmd_rcgr = 0x5a000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_camss_top_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_top_ahb_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_vfe0_clk_src[] = {
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(80000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(160000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266666667, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL4_OUT, 4, 0, 0),
+ F(320000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ F(466000000, P_GPLL2_AUX, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vfe0_clk_src = {
+ .cmd_rcgr = 0x58000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_vfe0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vfe0_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_vfe1_clk_src[] = {
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(80000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(160000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266666667, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL4_OUT, 4, 0, 0),
+ F(320000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ F(466000000, P_GPLL2_AUX, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vfe1_clk_src = {
+ .cmd_rcgr = 0x58054,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_vfe1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vfe1_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_crypto_clk_src[] = {
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(80000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(160000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 crypto_clk_src = {
+ .cmd_rcgr = 0x16004,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_crypto_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "crypto_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gp1_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x8004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8_gp,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp1_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll0_vote.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gp2_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x9004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8_gp,
+ .freq_tbl = ftbl_gp2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp2_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll0_vote.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gp3_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0xa004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8_gp,
+ .freq_tbl = ftbl_gp3_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp3_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll0_vote.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 byte0_clk_src = {
+ .cmd_rcgr = 0x4d044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_mdss_byte0,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte0_clk_src",
+ .parent_data = gcc_parent_data_mdss_byte0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_mdss_byte0),
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_rcg2 byte1_clk_src = {
+ .cmd_rcgr = 0x4d0b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_mdss_byte1,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte1_clk_src",
+ .parent_data = gcc_parent_data_mdss_byte1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_mdss_byte1),
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_esc0_1_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 esc0_clk_src = {
+ .cmd_rcgr = 0x4d05c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_esc0_1_clk_src,
+ .parent_map = gcc_parent_map_mdss_byte0,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc0_clk_src",
+ .parent_data = gcc_parent_data_mdss_byte0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_mdss_byte0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 esc1_clk_src = {
+ .cmd_rcgr = 0x4d0a8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_esc0_1_clk_src,
+ .parent_map = gcc_parent_map_mdss_byte1,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc1_clk_src",
+ .parent_data = gcc_parent_data_mdss_byte1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_mdss_byte1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mdp_clk_src[] = {
+ F(50000000, P_GPLL0_OUT_MDP, 16, 0, 0),
+ F(80000000, P_GPLL0_OUT_MDP, 10, 0, 0),
+ F(100000000, P_GPLL0_OUT_MDP, 8, 0, 0),
+ F(145454545, P_GPLL0_OUT_MDP, 5.5, 0, 0),
+ F(160000000, P_GPLL0_OUT_MDP, 5, 0, 0),
+ F(177777778, P_GPLL0_OUT_MDP, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MDP, 4, 0, 0),
+ F(270000000, P_GPLL6_OUT, 4, 0, 0),
+ F(320000000, P_GPLL0_OUT_MDP, 2.5, 0, 0),
+ F(360000000, P_GPLL6_OUT, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdp_clk_src = {
+ .cmd_rcgr = 0x4d014,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7_mdp,
+ .freq_tbl = ftbl_mdp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mdp_clk_src",
+ .parent_data = gcc_parent_data_7_mdp,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7_mdp),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 pclk0_clk_src = {
+ .cmd_rcgr = 0x4d000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_mdss_pix0,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk0_clk_src",
+ .parent_data = gcc_parent_data_mdss_pix0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_mdss_pix0),
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_rcg2 pclk1_clk_src = {
+ .cmd_rcgr = 0x4d0b8,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_mdss_pix1,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk1_clk_src",
+ .parent_data = gcc_parent_data_mdss_pix1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_mdss_pix1),
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_vsync_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vsync_clk_src = {
+ .cmd_rcgr = 0x4d02c,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_vsync_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vsync_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gfx3d_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(80000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(160000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(228571429, P_GPLL0_OUT_MAIN, 3.5, 0, 0),
+ F(240000000, P_GPLL6_GFX3D, 4.5, 0, 0),
+ F(266666667, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL4_GFX3D, 4, 0, 0),
+ F(360000000, P_GPLL6_GFX3D, 3, 0, 0),
+ F(400000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(432000000, P_GPLL6_GFX3D, 2.5, 0, 0),
+ F(480000000, P_GPLL4_GFX3D, 2.5, 0, 0),
+ F(540000000, P_GPLL6_GFX3D, 2, 0, 0),
+ F(600000000, P_GPLL4_GFX3D, 2, 0, 0),
+ { }
+};
+
+static const struct clk_init_data gfx3d_clk_params = {
+ .name = "gfx3d_clk_src",
+ .parent_data = gcc_parent_data_gfx3d,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_gfx3d),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gfx3d_clk_src = {
+ .cmd_rcgr = 0x59000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_gfx3d,
+ .freq_tbl = ftbl_gfx3d_clk_src,
+ .clkr.hw.init = &gfx3d_clk_params,
+};
+
+static const struct freq_tbl ftbl_pdm2_clk_src[] = {
+ F(64000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pdm2_clk_src = {
+ .cmd_rcgr = 0x44010,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pdm2_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_rbcpr_gfx_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 rbcpr_gfx_clk_src = {
+ .cmd_rcgr = 0x3a00c,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_rbcpr_gfx_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "rbcpr_gfx_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc1_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_MAIN, 10, 1, 4),
+ F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(342850000, P_GPLL4_OUT, 3.5, 0, 0),
+ F(400000000, P_GPLL4_OUT, 3, 0, 0),
+ { }
+};
+
+static const struct freq_tbl ftbl_sdcc1_8976_v1_1_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_MAIN, 10, 1, 4),
+ F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(186400000, P_GPLL2_OUT, 5, 0, 0),
+ F(372800000, P_GPLL2_OUT, 2.5, 0, 0),
+ { }
+};
+
+static const struct clk_init_data sdcc1_apps_clk_src_8976v1_1_init = {
+ .name = "sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_v1_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_v1_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x42004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc1_ice_core_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_M, 8, 0, 0),
+ F(200000000, P_GPLL0_OUT_M, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x5d000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_sdcc_ice,
+ .freq_tbl = ftbl_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc2_4_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_MAIN, 10, 1, 4),
+ F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
+ F(40000000, P_GPLL0_OUT_MAIN, 10, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(80000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x43004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_sdcc2_4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 sdcc3_apps_clk_src = {
+ .cmd_rcgr = 0x39004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_sdcc2_4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc3_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb_fs_ic_clk_src[] = {
+ F(60000000, P_GPLL6_OUT_MAIN, 6, 1, 3),
+ { }
+};
+
+static struct clk_rcg2 usb_fs_ic_clk_src = {
+ .cmd_rcgr = 0x3f034,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_usb_fs_ic_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_fs_ic_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb_fs_system_clk_src[] = {
+ F(64000000, P_GPLL0_OUT, 12.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb_fs_system_clk_src = {
+ .cmd_rcgr = 0x3f010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4_fs,
+ .freq_tbl = ftbl_usb_fs_system_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_fs_system_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb_hs_system_clk_src[] = {
+ F(57140000, P_GPLL0_OUT_MAIN, 14, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(177780000, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb_hs_system_clk_src = {
+ .cmd_rcgr = 0x41010,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_usb_hs_system_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_hs_system_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_vcodec0_clk_src[] = {
+ F(72727200, P_GPLL0_OUT_MAIN, 11, 0, 0),
+ F(80000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(228570000, P_GPLL0_OUT_MAIN, 3.5, 0, 0),
+ F(310667000, P_GPLL2_AUX, 3, 0, 0),
+ F(360000000, P_GPLL6_AUX, 3, 0, 0),
+ F(400000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(466000000, P_GPLL2_AUX, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vcodec0_clk_src = {
+ .cmd_rcgr = 0x4c000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_vcodec0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vcodec0_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_aps_0_clk = {
+ .halt_reg = 0x78004,
+ .clkr = {
+ .enable_reg = 0x78004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_aps_0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &aps_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aps_1_clk = {
+ .halt_reg = 0x79004,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_aps_1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &aps_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x2008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x2004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup1_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x3010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup2_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup2_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x4020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup3_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x401c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup3_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x5020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup4_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup4_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_uart1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x302c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_uart2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
+ .halt_reg = 0xc008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup1_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup1_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
+ .halt_reg = 0xc004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup1_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup1_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
+ .halt_reg = 0xd010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup2_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup2_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
+ .halt_reg = 0xd00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup2_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup2_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
+ .halt_reg = 0xf020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup3_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup3_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
+ .halt_reg = 0xf01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup3_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup3_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
+ .halt_reg = 0x18020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x18020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup4_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup4_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
+ .halt_reg = 0x1801c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup4_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup4_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart1_apps_clk = {
+ .halt_reg = 0xc03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_uart1_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_uart1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart2_apps_clk = {
+ .halt_reg = 0xd02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_uart2_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_uart2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cci_ahb_clk = {
+ .halt_reg = 0x5101c,
+ .clkr = {
+ .enable_reg = 0x5101c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_cci_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cci_clk = {
+ .halt_reg = 0x51018,
+ .clkr = {
+ .enable_reg = 0x51018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_cci_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &cci_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cpp_ahb_clk = {
+ .halt_reg = 0x58040,
+ .clkr = {
+ .enable_reg = 0x58040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_cpp_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cpp_axi_clk = {
+ .halt_reg = 0x58064,
+ .clkr = {
+ .enable_reg = 0x58064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_cpp_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cpp_clk = {
+ .halt_reg = 0x5803c,
+ .clkr = {
+ .enable_reg = 0x5803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_cpp_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &cpp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0_ahb_clk = {
+ .halt_reg = 0x4e040,
+ .clkr = {
+ .enable_reg = 0x4e040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0_clk = {
+ .halt_reg = 0x4e03c,
+ .clkr = {
+ .enable_reg = 0x4e03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0phy_clk = {
+ .halt_reg = 0x4e048,
+ .clkr = {
+ .enable_reg = 0x4e048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0phy_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0pix_clk = {
+ .halt_reg = 0x4e058,
+ .clkr = {
+ .enable_reg = 0x4e058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0pix_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0rdi_clk = {
+ .halt_reg = 0x4e050,
+ .clkr = {
+ .enable_reg = 0x4e050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0rdi_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1_ahb_clk = {
+ .halt_reg = 0x4f040,
+ .clkr = {
+ .enable_reg = 0x4f040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1_clk = {
+ .halt_reg = 0x4f03c,
+ .clkr = {
+ .enable_reg = 0x4f03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1phy_clk = {
+ .halt_reg = 0x4f048,
+ .clkr = {
+ .enable_reg = 0x4f048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1phy_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1pix_clk = {
+ .halt_reg = 0x4f058,
+ .clkr = {
+ .enable_reg = 0x4f058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1pix_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1rdi_clk = {
+ .halt_reg = 0x4f050,
+ .clkr = {
+ .enable_reg = 0x4f050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1rdi_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi2_ahb_clk = {
+ .halt_reg = 0x3c040,
+ .clkr = {
+ .enable_reg = 0x3c040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi2_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi2_clk = {
+ .halt_reg = 0x3c03c,
+ .clkr = {
+ .enable_reg = 0x3c03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi2_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi2phy_clk = {
+ .halt_reg = 0x3c048,
+ .clkr = {
+ .enable_reg = 0x3c048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi2phy_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi2pix_clk = {
+ .halt_reg = 0x3c058,
+ .clkr = {
+ .enable_reg = 0x3c058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi2pix_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi2rdi_clk = {
+ .halt_reg = 0x3c050,
+ .clkr = {
+ .enable_reg = 0x3c050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi2rdi_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi_vfe0_clk = {
+ .halt_reg = 0x58050,
+ .clkr = {
+ .enable_reg = 0x58050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi_vfe0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &vfe0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi_vfe1_clk = {
+ .halt_reg = 0x58074,
+ .clkr = {
+ .enable_reg = 0x58074,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi_vfe1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &vfe1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_gp0_clk = {
+ .halt_reg = 0x54018,
+ .clkr = {
+ .enable_reg = 0x54018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_gp0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_gp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_gp1_clk = {
+ .halt_reg = 0x55018,
+ .clkr = {
+ .enable_reg = 0x55018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_gp1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_ispif_ahb_clk = {
+ .halt_reg = 0x50004,
+ .clkr = {
+ .enable_reg = 0x50004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_ispif_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_jpeg0_clk = {
+ .halt_reg = 0x57020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x57020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_jpeg0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &jpeg0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_jpeg_ahb_clk = {
+ .halt_reg = 0x57024,
+ .clkr = {
+ .enable_reg = 0x57024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_jpeg_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_jpeg_axi_clk = {
+ .halt_reg = 0x57028,
+ .clkr = {
+ .enable_reg = 0x57028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_jpeg_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk0_clk = {
+ .halt_reg = 0x52018,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_mclk0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk1_clk = {
+ .halt_reg = 0x53018,
+ .clkr = {
+ .enable_reg = 0x53018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_mclk1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk2_clk = {
+ .halt_reg = 0x5c018,
+ .clkr = {
+ .enable_reg = 0x5c018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_mclk2_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_micro_ahb_clk = {
+ .halt_reg = 0x5600c,
+ .clkr = {
+ .enable_reg = 0x5600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_micro_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0phytimer_clk = {
+ .halt_reg = 0x4e01c,
+ .clkr = {
+ .enable_reg = 0x4e01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1phytimer_clk = {
+ .halt_reg = 0x4f01c,
+ .clkr = {
+ .enable_reg = 0x4f01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_ahb_clk = {
+ .halt_reg = 0x56004,
+ .clkr = {
+ .enable_reg = 0x56004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_top_ahb_clk = {
+ .halt_reg = 0x5a014,
+ .clkr = {
+ .enable_reg = 0x5a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_top_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_vfe0_clk = {
+ .halt_reg = 0x58038,
+ .clkr = {
+ .enable_reg = 0x58038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &vfe0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_vfe_ahb_clk = {
+ .halt_reg = 0x58044,
+ .clkr = {
+ .enable_reg = 0x58044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_vfe_axi_clk = {
+ .halt_reg = 0x58048,
+ .clkr = {
+ .enable_reg = 0x58048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_vfe1_ahb_clk = {
+ .halt_reg = 0x58060,
+ .clkr = {
+ .enable_reg = 0x58060,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe1_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_vfe1_axi_clk = {
+ .halt_reg = 0x58068,
+ .clkr = {
+ .enable_reg = 0x58068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_vfe1_clk = {
+ .halt_reg = 0x5805c,
+ .clkr = {
+ .enable_reg = 0x5805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &vfe1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_dcc_clk = {
+ .halt_reg = 0x77004,
+ .clkr = {
+ .enable_reg = 0x77004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_dcc_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_gmem_clk = {
+ .halt_reg = 0x59024,
+ .clkr = {
+ .enable_reg = 0x59024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_oxili_gmem_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x8000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x9000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0xa000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_ahb_clk = {
+ .halt_reg = 0x4d07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_axi_clk = {
+ .halt_reg = 0x4d080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_byte0_clk = {
+ .halt_reg = 0x4d094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_byte1_clk = {
+ .halt_reg = 0x4d0a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d0a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_esc0_clk = {
+ .halt_reg = 0x4d098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_esc1_clk = {
+ .halt_reg = 0x4d09c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d09c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_mdp_clk = {
+ .halt_reg = 0x4d088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_pclk0_clk = {
+ .halt_reg = 0x4d084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_pclk1_clk = {
+ .halt_reg = 0x4d0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_vsync_clk = {
+ .halt_reg = 0x4d090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x49000,
+ .clkr = {
+ .enable_reg = 0x49000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .halt_reg = 0x49004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mss_q6_bimc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_gfx_clk = {
+ .halt_reg = 0x59048,
+ .clkr = {
+ .enable_reg = 0x59048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_bimc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_ahb_clk = {
+ .halt_reg = 0x59028,
+ .clkr = {
+ .enable_reg = 0x59028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_oxili_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_aon_clk = {
+ .halt_reg = 0x59044,
+ .clkr = {
+ .enable_reg = 0x59044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_oxili_aon_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_gfx3d_clk = {
+ .halt_reg = 0x59020,
+ .clkr = {
+ .enable_reg = 0x59020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_oxili_gfx3d_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_timer_clk = {
+ .halt_reg = 0x59040,
+ .clkr = {
+ .enable_reg = 0x59040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_oxili_timer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x4400c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x44004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x44004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rbcpr_gfx_ahb_clk = {
+ .halt_reg = 0x3a008,
+ .clkr = {
+ .enable_reg = 0x3a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_rbcpr_gfx_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rbcpr_gfx_clk = {
+ .halt_reg = 0x3a004,
+ .clkr = {
+ .enable_reg = 0x3a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_rbcpr_gfx_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &rbcpr_gfx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x4201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x42018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x42018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x5d014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5d014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x4301c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4301c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x43018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x43018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc3_ahb_clk = {
+ .halt_reg = 0x3901c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3901c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc3_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc3_apps_clk = {
+ .halt_reg = 0x39018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc3_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &sdcc3_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2a_phy_sleep_clk = {
+ .halt_reg = 0x4102c,
+ .clkr = {
+ .enable_reg = 0x4102c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb2a_phy_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_phy_cfg_ahb_clk = {
+ .halt_reg = 0x41030,
+ .clkr = {
+ .enable_reg = 0x41030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb_hs_phy_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_fs_ahb_clk = {
+ .halt_reg = 0x3f008,
+ .clkr = {
+ .enable_reg = 0x3f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb_fs_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_fs_ic_clk = {
+ .halt_reg = 0x3f030,
+ .clkr = {
+ .enable_reg = 0x3f030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb_fs_ic_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &usb_fs_ic_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_fs_system_clk = {
+ .halt_reg = 0x3f004,
+ .clkr = {
+ .enable_reg = 0x3f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb_fs_system_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &usb_fs_system_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_ahb_clk = {
+ .halt_reg = 0x41008,
+ .clkr = {
+ .enable_reg = 0x41008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb_hs_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_system_clk = {
+ .halt_reg = 0x41004,
+ .clkr = {
+ .enable_reg = 0x41004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb_hs_system_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &usb_hs_system_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_ahb_clk = {
+ .halt_reg = 0x4c020,
+ .clkr = {
+ .enable_reg = 0x4c020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_axi_clk = {
+ .halt_reg = 0x4c024,
+ .clkr = {
+ .enable_reg = 0x4c024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_core0_vcodec0_clk = {
+ .halt_reg = 0x4c02c,
+ .clkr = {
+ .enable_reg = 0x4c02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_core0_vcodec0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &vcodec0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_core1_vcodec0_clk = {
+ .halt_reg = 0x4c034,
+ .clkr = {
+ .enable_reg = 0x4c034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_core1_vcodec0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &vcodec0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_vcodec0_clk = {
+ .halt_reg = 0x4c01c,
+ .clkr = {
+ .enable_reg = 0x4c01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_vcodec0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &vcodec0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Vote clocks */
+static struct clk_branch gcc_apss_ahb_clk = {
+ .halt_reg = 0x4601c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apss_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_apss_axi_clk = {
+ .halt_reg = 0x46020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apss_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x1008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_ahb_clk = {
+ .halt_reg = 0xb008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x13004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x1300c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_ahb_clk = {
+ .halt_reg = 0x16024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_axi_clk = {
+ .halt_reg = 0x16020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_clk = {
+ .halt_reg = 0x1601c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpp_tbu_clk = {
+ .halt_reg = 0x12040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpp_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gfx_1_tbu_clk = {
+ .halt_reg = 0x12098,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gfx_1_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gfx_tbu_clk = {
+ .halt_reg = 0x12010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gfx_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gfx_tcu_clk = {
+ .halt_reg = 0x12020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gfx_tcu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_apss_tcu_clk = {
+ .halt_reg = 0x12018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apss_tcu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gtcu_ahb_clk = {
+ .halt_reg = 0x12044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gtcu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_jpeg_tbu_clk = {
+ .halt_reg = 0x12034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_jpeg_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdp_rt_tbu_clk = {
+ .halt_reg = 0x1204c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdp_rt_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdp_tbu_clk = {
+ .halt_reg = 0x1201c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdp_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_smmu_cfg_clk = {
+ .halt_reg = 0x12038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_smmu_cfg_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus_1_tbu_clk = {
+ .halt_reg = 0x1209c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus_1_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus_tbu_clk = {
+ .halt_reg = 0x12014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vfe1_tbu_clk = {
+ .halt_reg = 0x12090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vfe1_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vfe_tbu_clk = {
+ .halt_reg = 0x1203c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vfe_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x4c018,
+ .cxcs = (unsigned int []){ 0x4c024, 0x4c01c },
+ .cxc_count = 2,
+ .pd = {
+ .name = "venus_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_core0_gdsc = {
+ .gdscr = 0x4c028,
+ .cxcs = (unsigned int []){ 0x4c02c },
+ .cxc_count = 1,
+ .pd = {
+ .name = "venus_core0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_core1_gdsc = {
+ .gdscr = 0x4c030,
+ .pd = {
+ .name = "venus_core1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x4d078,
+ .cxcs = (unsigned int []){ 0x4d080, 0x4d088 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc jpeg_gdsc = {
+ .gdscr = 0x5701c,
+ .cxcs = (unsigned int []){ 0x57020, 0x57028 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "jpeg_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vfe0_gdsc = {
+ .gdscr = 0x58034,
+ .cxcs = (unsigned int []){ 0x58038, 0x58048, 0x5600c, 0x58050 },
+ .cxc_count = 4,
+ .pd = {
+ .name = "vfe0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vfe1_gdsc = {
+ .gdscr = 0x5806c,
+ .cxcs = (unsigned int []){ 0x5805c, 0x58068, 0x5600c, 0x58074 },
+ .cxc_count = 4,
+ .pd = {
+ .name = "vfe1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc cpp_gdsc = {
+ .gdscr = 0x58078,
+ .cxcs = (unsigned int []){ 0x5803c, 0x58064 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "cpp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc oxili_cx_gdsc = {
+ .gdscr = 0x5904c,
+ .cxcs = (unsigned int []){ 0x59020 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "oxili_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc oxili_gx_gdsc = {
+ .gdscr = 0x5901c,
+ .clamp_io_ctrl = 0x5b00c,
+ .cxcs = (unsigned int []){ 0x59000, 0x59024 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "oxili_gx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .supply = "vdd_gfx",
+ .flags = CLAMP_IO,
+};
+
+static struct clk_regmap *gcc_msm8976_clocks[] = {
+ [GPLL0] = &gpll0.clkr,
+ [GPLL2] = &gpll2.clkr,
+ [GPLL3] = &gpll3.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL6] = &gpll6.clkr,
+ [GPLL0_CLK_SRC] = &gpll0_vote,
+ [GPLL2_CLK_SRC] = &gpll2_vote,
+ [GPLL3_CLK_SRC] = &gpll3_vote,
+ [GPLL4_CLK_SRC] = &gpll4_vote,
+ [GPLL6_CLK_SRC] = &gpll6_vote,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP2_QUP1_I2C_APPS_CLK] = &gcc_blsp2_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP1_SPI_APPS_CLK] = &gcc_blsp2_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_I2C_APPS_CLK] = &gcc_blsp2_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_SPI_APPS_CLK] = &gcc_blsp2_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_I2C_APPS_CLK] = &gcc_blsp2_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_SPI_APPS_CLK] = &gcc_blsp2_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_I2C_APPS_CLK] = &gcc_blsp2_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_SPI_APPS_CLK] = &gcc_blsp2_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP2_UART1_APPS_CLK] = &gcc_blsp2_uart1_apps_clk.clkr,
+ [GCC_BLSP2_UART2_APPS_CLK] = &gcc_blsp2_uart2_apps_clk.clkr,
+ [GCC_CAMSS_CCI_AHB_CLK] = &gcc_camss_cci_ahb_clk.clkr,
+ [GCC_CAMSS_CCI_CLK] = &gcc_camss_cci_clk.clkr,
+ [GCC_CAMSS_CPP_AHB_CLK] = &gcc_camss_cpp_ahb_clk.clkr,
+ [GCC_CAMSS_CPP_AXI_CLK] = &gcc_camss_cpp_axi_clk.clkr,
+ [GCC_CAMSS_CPP_CLK] = &gcc_camss_cpp_clk.clkr,
+ [GCC_CAMSS_CSI0_AHB_CLK] = &gcc_camss_csi0_ahb_clk.clkr,
+ [GCC_CAMSS_CSI0_CLK] = &gcc_camss_csi0_clk.clkr,
+ [GCC_CAMSS_CSI0PHY_CLK] = &gcc_camss_csi0phy_clk.clkr,
+ [GCC_CAMSS_CSI0PIX_CLK] = &gcc_camss_csi0pix_clk.clkr,
+ [GCC_CAMSS_CSI0RDI_CLK] = &gcc_camss_csi0rdi_clk.clkr,
+ [GCC_CAMSS_CSI1_AHB_CLK] = &gcc_camss_csi1_ahb_clk.clkr,
+ [GCC_CAMSS_CSI1_CLK] = &gcc_camss_csi1_clk.clkr,
+ [GCC_CAMSS_CSI1PHY_CLK] = &gcc_camss_csi1phy_clk.clkr,
+ [GCC_CAMSS_CSI1PIX_CLK] = &gcc_camss_csi1pix_clk.clkr,
+ [GCC_CAMSS_CSI1RDI_CLK] = &gcc_camss_csi1rdi_clk.clkr,
+ [GCC_CAMSS_CSI2_AHB_CLK] = &gcc_camss_csi2_ahb_clk.clkr,
+ [GCC_CAMSS_CSI2_CLK] = &gcc_camss_csi2_clk.clkr,
+ [GCC_CAMSS_CSI2PHY_CLK] = &gcc_camss_csi2phy_clk.clkr,
+ [GCC_CAMSS_CSI2PIX_CLK] = &gcc_camss_csi2pix_clk.clkr,
+ [GCC_CAMSS_CSI2RDI_CLK] = &gcc_camss_csi2rdi_clk.clkr,
+ [GCC_CAMSS_CSI_VFE0_CLK] = &gcc_camss_csi_vfe0_clk.clkr,
+ [GCC_CAMSS_CSI_VFE1_CLK] = &gcc_camss_csi_vfe1_clk.clkr,
+ [GCC_CAMSS_GP0_CLK] = &gcc_camss_gp0_clk.clkr,
+ [GCC_CAMSS_GP1_CLK] = &gcc_camss_gp1_clk.clkr,
+ [GCC_CAMSS_ISPIF_AHB_CLK] = &gcc_camss_ispif_ahb_clk.clkr,
+ [GCC_CAMSS_JPEG0_CLK] = &gcc_camss_jpeg0_clk.clkr,
+ [GCC_CAMSS_JPEG_AHB_CLK] = &gcc_camss_jpeg_ahb_clk.clkr,
+ [GCC_CAMSS_JPEG_AXI_CLK] = &gcc_camss_jpeg_axi_clk.clkr,
+ [GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr,
+ [GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr,
+ [GCC_CAMSS_MCLK2_CLK] = &gcc_camss_mclk2_clk.clkr,
+ [GCC_CAMSS_MICRO_AHB_CLK] = &gcc_camss_micro_ahb_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK] = &gcc_camss_csi1phytimer_clk.clkr,
+ [GCC_CAMSS_AHB_CLK] = &gcc_camss_ahb_clk.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr,
+ [GCC_CAMSS_VFE0_CLK] = &gcc_camss_vfe0_clk.clkr,
+ [GCC_CAMSS_VFE_AHB_CLK] = &gcc_camss_vfe_ahb_clk.clkr,
+ [GCC_CAMSS_VFE_AXI_CLK] = &gcc_camss_vfe_axi_clk.clkr,
+ [GCC_CAMSS_VFE1_AHB_CLK] = &gcc_camss_vfe1_ahb_clk.clkr,
+ [GCC_CAMSS_VFE1_AXI_CLK] = &gcc_camss_vfe1_axi_clk.clkr,
+ [GCC_CAMSS_VFE1_CLK] = &gcc_camss_vfe1_clk.clkr,
+ [GCC_DCC_CLK] = &gcc_dcc_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_MDSS_AHB_CLK] = &gcc_mdss_ahb_clk.clkr,
+ [GCC_MDSS_AXI_CLK] = &gcc_mdss_axi_clk.clkr,
+ [GCC_MDSS_ESC0_CLK] = &gcc_mdss_esc0_clk.clkr,
+ [GCC_MDSS_ESC1_CLK] = &gcc_mdss_esc1_clk.clkr,
+ [GCC_MDSS_MDP_CLK] = &gcc_mdss_mdp_clk.clkr,
+ [GCC_MDSS_VSYNC_CLK] = &gcc_mdss_vsync_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_RBCPR_GFX_AHB_CLK] = &gcc_rbcpr_gfx_ahb_clk.clkr,
+ [GCC_RBCPR_GFX_CLK] = &gcc_rbcpr_gfx_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC3_AHB_CLK] = &gcc_sdcc3_ahb_clk.clkr,
+ [GCC_SDCC3_APPS_CLK] = &gcc_sdcc3_apps_clk.clkr,
+ [GCC_USB2A_PHY_SLEEP_CLK] = &gcc_usb2a_phy_sleep_clk.clkr,
+ [GCC_USB_HS_PHY_CFG_AHB_CLK] = &gcc_usb_hs_phy_cfg_ahb_clk.clkr,
+ [GCC_USB_FS_AHB_CLK] = &gcc_usb_fs_ahb_clk.clkr,
+ [GCC_USB_FS_IC_CLK] = &gcc_usb_fs_ic_clk.clkr,
+ [GCC_USB_FS_SYSTEM_CLK] = &gcc_usb_fs_system_clk.clkr,
+ [GCC_USB_HS_AHB_CLK] = &gcc_usb_hs_ahb_clk.clkr,
+ [GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr,
+ [GCC_VENUS0_AHB_CLK] = &gcc_venus0_ahb_clk.clkr,
+ [GCC_VENUS0_AXI_CLK] = &gcc_venus0_axi_clk.clkr,
+ [GCC_VENUS0_CORE0_VCODEC0_CLK] = &gcc_venus0_core0_vcodec0_clk.clkr,
+ [GCC_VENUS0_CORE1_VCODEC0_CLK] = &gcc_venus0_core1_vcodec0_clk.clkr,
+ [GCC_VENUS0_VCODEC0_CLK] = &gcc_venus0_vcodec0_clk.clkr,
+ [GCC_APSS_AHB_CLK] = &gcc_apss_ahb_clk.clkr,
+ [GCC_APSS_AXI_CLK] = &gcc_apss_axi_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr,
+ [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr,
+ [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr,
+ [GCC_CPP_TBU_CLK] = &gcc_cpp_tbu_clk.clkr,
+ [GCC_APSS_TCU_CLK] = &gcc_apss_tcu_clk.clkr,
+ [GCC_JPEG_TBU_CLK] = &gcc_jpeg_tbu_clk.clkr,
+ [GCC_MDP_RT_TBU_CLK] = &gcc_mdp_rt_tbu_clk.clkr,
+ [GCC_MDP_TBU_CLK] = &gcc_mdp_tbu_clk.clkr,
+ [GCC_SMMU_CFG_CLK] = &gcc_smmu_cfg_clk.clkr,
+ [GCC_VENUS_1_TBU_CLK] = &gcc_venus_1_tbu_clk.clkr,
+ [GCC_VENUS_TBU_CLK] = &gcc_venus_tbu_clk.clkr,
+ [GCC_VFE1_TBU_CLK] = &gcc_vfe1_tbu_clk.clkr,
+ [GCC_VFE_TBU_CLK] = &gcc_vfe_tbu_clk.clkr,
+ [GCC_APS_0_CLK] = &gcc_aps_0_clk.clkr,
+ [GCC_APS_1_CLK] = &gcc_aps_1_clk.clkr,
+ [APS_0_CLK_SRC] = &aps_0_clk_src.clkr,
+ [APS_1_CLK_SRC] = &aps_1_clk_src.clkr,
+ [APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr,
+ [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [BLSP2_QUP1_I2C_APPS_CLK_SRC] = &blsp2_qup1_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP1_SPI_APPS_CLK_SRC] = &blsp2_qup1_spi_apps_clk_src.clkr,
+ [BLSP2_QUP2_I2C_APPS_CLK_SRC] = &blsp2_qup2_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP2_SPI_APPS_CLK_SRC] = &blsp2_qup2_spi_apps_clk_src.clkr,
+ [BLSP2_QUP3_I2C_APPS_CLK_SRC] = &blsp2_qup3_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP3_SPI_APPS_CLK_SRC] = &blsp2_qup3_spi_apps_clk_src.clkr,
+ [BLSP2_QUP4_I2C_APPS_CLK_SRC] = &blsp2_qup4_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP4_SPI_APPS_CLK_SRC] = &blsp2_qup4_spi_apps_clk_src.clkr,
+ [BLSP2_UART1_APPS_CLK_SRC] = &blsp2_uart1_apps_clk_src.clkr,
+ [BLSP2_UART2_APPS_CLK_SRC] = &blsp2_uart2_apps_clk_src.clkr,
+ [CCI_CLK_SRC] = &cci_clk_src.clkr,
+ [CPP_CLK_SRC] = &cpp_clk_src.clkr,
+ [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+ [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+ [CSI2_CLK_SRC] = &csi2_clk_src.clkr,
+ [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr,
+ [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr,
+ [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+ [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+ [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+ [MCLK2_CLK_SRC] = &mclk2_clk_src.clkr,
+ [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+ [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+ [CAMSS_TOP_AHB_CLK_SRC] = &camss_top_ahb_clk_src.clkr,
+ [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+ [VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
+ [CRYPTO_CLK_SRC] = &crypto_clk_src.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+ [ESC1_CLK_SRC] = &esc1_clk_src.clkr,
+ [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+ [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [RBCPR_GFX_CLK_SRC] = &rbcpr_gfx_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [SDCC1_ICE_CORE_CLK_SRC] = &sdcc1_ice_core_clk_src.clkr,
+ [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [SDCC3_APPS_CLK_SRC] = &sdcc3_apps_clk_src.clkr,
+ [USB_FS_IC_CLK_SRC] = &usb_fs_ic_clk_src.clkr,
+ [USB_FS_SYSTEM_CLK_SRC] = &usb_fs_system_clk_src.clkr,
+ [USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr,
+ [VCODEC0_CLK_SRC] = &vcodec0_clk_src.clkr,
+ [GCC_MDSS_BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+ [GCC_MDSS_BYTE1_CLK_SRC] = &byte1_clk_src.clkr,
+ [GCC_MDSS_BYTE0_CLK] = &gcc_mdss_byte0_clk.clkr,
+ [GCC_MDSS_BYTE1_CLK] = &gcc_mdss_byte1_clk.clkr,
+ [GCC_MDSS_PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+ [GCC_MDSS_PCLK1_CLK_SRC] = &pclk1_clk_src.clkr,
+ [GCC_MDSS_PCLK0_CLK] = &gcc_mdss_pclk0_clk.clkr,
+ [GCC_MDSS_PCLK1_CLK] = &gcc_mdss_pclk1_clk.clkr,
+ [GCC_GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+ [GCC_GFX3D_OXILI_CLK] = &gcc_oxili_gfx3d_clk.clkr,
+ [GCC_GFX3D_BIMC_CLK] = &gcc_bimc_gfx_clk.clkr,
+ [GCC_GFX3D_OXILI_AHB_CLK] = &gcc_oxili_ahb_clk.clkr,
+ [GCC_GFX3D_OXILI_AON_CLK] = &gcc_oxili_aon_clk.clkr,
+ [GCC_GFX3D_OXILI_GMEM_CLK] = &gcc_oxili_gmem_clk.clkr,
+ [GCC_GFX3D_OXILI_TIMER_CLK] = &gcc_oxili_timer_clk.clkr,
+ [GCC_GFX3D_TBU0_CLK] = &gcc_gfx_tbu_clk.clkr,
+ [GCC_GFX3D_TBU1_CLK] = &gcc_gfx_1_tbu_clk.clkr,
+ [GCC_GFX3D_TCU_CLK] = &gcc_gfx_tcu_clk.clkr,
+ [GCC_GFX3D_GTCU_AHB_CLK] = &gcc_gtcu_ahb_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_msm8976_resets[] = {
+ [RST_CAMSS_MICRO_BCR] = { 0x56008 },
+ [RST_USB_HS_BCR] = { 0x41000 },
+ [RST_QUSB2_PHY_BCR] = { 0x4103c },
+ [RST_USB2_HS_PHY_ONLY_BCR] = { 0x41034 },
+ [RST_USB_HS_PHY_CFG_AHB_BCR] = { 0x41038 },
+ [RST_USB_FS_BCR] = { 0x3f000 },
+ [RST_CAMSS_CSI1PIX_BCR] = { 0x4f054 },
+ [RST_CAMSS_CSI_VFE1_BCR] = { 0x58070 },
+ [RST_CAMSS_VFE1_BCR] = { 0x5807c },
+ [RST_CAMSS_CPP_BCR] = { 0x58080 },
+};
+
+static struct gdsc *gcc_msm8976_gdscs[] = {
+ [VENUS_GDSC] = &venus_gdsc,
+ [VENUS_CORE0_GDSC] = &venus_core0_gdsc,
+ [VENUS_CORE1_GDSC] = &venus_core1_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [JPEG_GDSC] = &jpeg_gdsc,
+ [VFE0_GDSC] = &vfe0_gdsc,
+ [VFE1_GDSC] = &vfe1_gdsc,
+ [CPP_GDSC] = &cpp_gdsc,
+ [OXILI_GX_GDSC] = &oxili_gx_gdsc,
+ [OXILI_CX_GDSC] = &oxili_cx_gdsc,
+};
+
+static const struct regmap_config gcc_msm8976_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x7fffc,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_msm8976_desc = {
+ .config = &gcc_msm8976_regmap_config,
+ .clks = gcc_msm8976_clocks,
+ .num_clks = ARRAY_SIZE(gcc_msm8976_clocks),
+ .resets = gcc_msm8976_resets,
+ .num_resets = ARRAY_SIZE(gcc_msm8976_resets),
+ .gdscs = gcc_msm8976_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8976_gdscs),
+};
+
+static const struct of_device_id gcc_msm8976_match_table[] = {
+ { .compatible = "qcom,gcc-msm8976" }, /* Also valid for 8x56 */
+ { .compatible = "qcom,gcc-msm8976-v1.1" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_msm8976_match_table);
+
+static int gcc_msm8976_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,gcc-msm8976-v1.1")) {
+ sdcc1_apps_clk_src.parent_map = gcc_parent_map_v1_1;
+ sdcc1_apps_clk_src.freq_tbl = ftbl_sdcc1_8976_v1_1_apps_clk_src;
+ sdcc1_apps_clk_src.clkr.hw.init = &sdcc1_apps_clk_src_8976v1_1_init;
+ }
+
+ regmap = qcom_cc_map(pdev, &gcc_msm8976_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* Set Sleep and Wakeup cycles to 0 for GMEM clock */
+ ret = regmap_update_bits(regmap, gcc_oxili_gmem_clk.clkr.enable_reg, 0xff0, 0);
+ if (ret)
+ return ret;
+
+ clk_pll_configure_sr_hpm_lp(&gpll3, regmap, &gpll3_config, true);
+
+ /* Enable AUX2 clock for APSS */
+ ret = regmap_update_bits(regmap, 0x60000, BIT(2), BIT(2));
+ if (ret)
+ return ret;
+
+ /* Set Sleep cycles to 0 for OXILI clock */
+ ret = regmap_update_bits(regmap, gcc_oxili_gfx3d_clk.clkr.enable_reg, 0xf0, 0);
+ if (ret)
+ return ret;
+
+ return qcom_cc_really_probe(pdev, &gcc_msm8976_desc, regmap);
+}
+
+static struct platform_driver gcc_msm8976_driver = {
+ .probe = gcc_msm8976_probe,
+ .driver = {
+ .name = "qcom,gcc-msm8976",
+ .of_match_table = gcc_msm8976_match_table,
+ },
+};
+
+static int __init gcc_msm8976_init(void)
+{
+ return platform_driver_register(&gcc_msm8976_driver);
+}
+core_initcall(gcc_msm8976_init);
+
+static void __exit gcc_msm8976_exit(void)
+{
+ platform_driver_unregister(&gcc_msm8976_driver);
+}
+module_exit(gcc_msm8976_exit);
+
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@somainline.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c
index 702a9bdc0559..71aa630fa4bd 100644
--- a/drivers/clk/qcom/gcc-msm8994.c
+++ b/drivers/clk/qcom/gcc-msm8994.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*/
+#include <linux/clk-provider.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/err.h>
diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c
index 8fb6bd69f240..423627d49719 100644
--- a/drivers/clk/qcom/gcc-sc7280.c
+++ b/drivers/clk/qcom/gcc-sc7280.c
@@ -2917,7 +2917,7 @@ static struct clk_branch gcc_cfg_noc_lpass_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_cfg_noc_lpass_clk",
- .ops = &clk_branch2_ops,
+ .ops = &clk_branch2_aon_ops,
},
},
};
diff --git a/drivers/clk/qcom/gcc-sdx65.c b/drivers/clk/qcom/gcc-sdx65.c
new file mode 100644
index 000000000000..748ac15b5ed8
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sdx65.c
@@ -0,0 +1,1611 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-sdx65.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_EVEN,
+ P_GPLL0_OUT_MAIN,
+ P_PCIE_PIPE_CLK,
+ P_SLEEP_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x6d000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct clk_parent_data gcc_parent_data_0_ao[] = {
+ { .fw_name = "bi_tcxo_ao" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .fw_name = "sleep_clk" },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 2 },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_PCIE_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .fw_name = "pcie_pipe_clk"},
+ { .fw_name = "bi_tcxo"},
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .fw_name = "usb3_phy_wrapper_gcc_usb30_pipe_clk"},
+ { .fw_name = "bi_tcxo"},
+};
+
+static struct clk_regmap_mux gcc_pcie_aux_clk_src = {
+ .reg = 0x43060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_aux_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_pipe_clk_src = {
+ .reg = 0x43044,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_5,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_pipe_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_phy_pipe_clk_src = {
+ .reg = 0x1706c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_6,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_i2c_apps_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x1c024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_spi_apps_clk_src[] = {
+ F(960000, P_BI_TCXO, 10, 1, 2),
+ F(4800000, P_BI_TCXO, 4, 0, 0),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(15000000, P_GPLL0_OUT_EVEN, 5, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_GPLL0_OUT_MAIN, 12.5, 1, 2),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1c00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x1e024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1e00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x20024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2000c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x22024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2200c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_uart1_apps_clk_src[] = {
+ F(3686400, P_GPLL0_OUT_EVEN, 1, 192, 15625),
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(16000000, P_GPLL0_OUT_EVEN, 1, 4, 75),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(19354839, P_GPLL0_OUT_MAIN, 15.5, 1, 2),
+ F(20000000, P_GPLL0_OUT_MAIN, 15, 1, 2),
+ F(20689655, P_GPLL0_OUT_MAIN, 14.5, 1, 2),
+ F(21428571, P_GPLL0_OUT_MAIN, 14, 1, 2),
+ F(22222222, P_GPLL0_OUT_MAIN, 13.5, 1, 2),
+ F(23076923, P_GPLL0_OUT_MAIN, 13, 1, 2),
+ F(24000000, P_GPLL0_OUT_MAIN, 5, 1, 5),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(26086957, P_GPLL0_OUT_MAIN, 11.5, 1, 2),
+ F(27272727, P_GPLL0_OUT_MAIN, 11, 1, 2),
+ F(28571429, P_GPLL0_OUT_MAIN, 10.5, 1, 2),
+ F(32000000, P_GPLL0_OUT_MAIN, 1, 4, 75),
+ F(40000000, P_GPLL0_OUT_MAIN, 15, 0, 0),
+ F(46400000, P_GPLL0_OUT_MAIN, 1, 29, 375),
+ F(48000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0),
+ F(51200000, P_GPLL0_OUT_MAIN, 1, 32, 375),
+ F(56000000, P_GPLL0_OUT_MAIN, 1, 7, 75),
+ F(58982400, P_GPLL0_OUT_MAIN, 1, 1536, 15625),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(63157895, P_GPLL0_OUT_MAIN, 9.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x1d00c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x1f00c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x2100c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart3_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_uart4_apps_clk_src = {
+ .cmd_rcgr = 0x2300c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart4_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x3000c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_data = gcc_parent_data_0_ao,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x37004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x38004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x39004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_aux_phy_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_aux_phy_clk_src = {
+ .cmd_rcgr = 0x43048,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_aux_phy_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_aux_phy_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_rchng_phy_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_rchng_phy_clk_src = {
+ .cmd_rcgr = 0x43064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_rchng_phy_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_rchng_phy_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x24010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x1a010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_master_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_EVEN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_master_clk_src = {
+ .cmd_rcgr = 0x17030,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x17048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_aux_phy_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb3_phy_aux_clk_src[] = {
+ F(1000000, P_BI_TCXO, 1, 5, 96),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb3_phy_aux_clk_src = {
+ .cmd_rcgr = 0x17070,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_usb3_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_cpuss_ahb_postdiv_clk_src = {
+ .reg = 0x30024,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_cpuss_ahb_postdiv_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_mock_utmi_postdiv_clk_src = {
+ .reg = 0x17060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_mock_utmi_postdiv_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_ahb_pcie_link_clk = {
+ .halt_reg = 0x2e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ahb_pcie_link_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x1b004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d008,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x1c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_qup1_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x1c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_qup1_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x1e008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_qup2_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_qup2_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x20008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_qup3_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x20004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_qup3_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x22008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x22008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_qup4_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x22004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x22004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_qup4_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_sleep_clk = {
+ .halt_reg = 0x1b00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d008,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x1d004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_uart1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x1f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_uart2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart3_apps_clk = {
+ .halt_reg = 0x21004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x21004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart3_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_uart3_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart4_apps_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x23004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart4_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_uart4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x27004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x27004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d008,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x37000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x37000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x38000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x38000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x39000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_clkref_en = {
+ .halt_reg = 0x88004,
+ /*
+ * The clock controller does not handle the status bit for
+ * the clocks with gdscs(powerdomains) in hw controlled mode
+ * and hence avoid checking for the status bit of those clocks
+ * by setting the BRANCH_HALT_DELAY flag
+ */
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x88004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_aux_clk = {
+ .halt_reg = 0x43034,
+ /*
+ * The clock controller does not handle the status bit for
+ * the clocks with gdscs(powerdomains) in hw controlled mode
+ * and hence avoid checking for the status bit of those clocks
+ * by setting the BRANCH_HALT_DELAY flag
+ */
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x43034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_cfg_ahb_clk = {
+ .halt_reg = 0x4302c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4302c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_mstr_axi_clk = {
+ .halt_reg = 0x43024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x43024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_pipe_clk = {
+ .halt_reg = 0x4303c,
+ /*
+ * The clock controller does not handle the status bit for
+ * the clocks with gdscs(powerdomains) in hw controlled mode
+ * and hence avoid checking for the status bit of those clocks
+ * by setting the BRANCH_HALT_DELAY flag
+ */
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x4303c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rchng_phy_clk = {
+ .halt_reg = 0x43030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x43030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_rchng_phy_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_rchng_phy_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_sleep_clk = {
+ .halt_reg = 0x43038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x43038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_sleep_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_aux_phy_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_slv_axi_clk = {
+ .halt_reg = 0x4301c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4301c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_slv_q2a_axi_clk = {
+ .halt_reg = 0x43018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x43018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x2400c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x24004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x24004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x24004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x24008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x24008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rx1_usb2_clkref_en = {
+ .halt_reg = 0x88008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x88008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_rx1_usb2_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x1a00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x1a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_master_clk = {
+ .halt_reg = 0x17018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_master_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mock_utmi_clk = {
+ .halt_reg = 0x1702c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mock_utmi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw =
+ &gcc_usb30_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mstr_axi_clk = {
+ .halt_reg = 0x17020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sleep_clk = {
+ .halt_reg = 0x17028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_slv_ahb_clk = {
+ .halt_reg = 0x17024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_slv_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_aux_clk = {
+ .halt_reg = 0x17064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc usb30_gdsc = {
+ .gdscr = 0x17004,
+ .pd = {
+ .name = "usb30_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_gdsc = {
+ .gdscr = 0x43004,
+ .pd = {
+ .name = "pcie_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_branch gcc_usb3_phy_pipe_clk = {
+ .halt_reg = 0x17068,
+ /*
+ * The clock controller does not handle the status bit for
+ * the clocks with gdscs(powerdomains) in hw controlled mode
+ * and hence avoid checking for the status bit of those clocks
+ * by setting the BRANCH_HALT_DELAY flag
+ */
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x17068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_en = {
+ .halt_reg = 0x88000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x88000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .halt_reg = 0x19008,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x19008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x19008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_xo_div4_clk = {
+ .halt_reg = 0x2e010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_xo_div4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_xo_pcie_link_clk = {
+ .halt_reg = 0x2e008,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x2e008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_xo_pcie_link_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *gcc_sdx65_clocks[] = {
+ [GCC_AHB_PCIE_LINK_CLK] = &gcc_ahb_pcie_link_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC] = &gcc_blsp1_qup1_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC] = &gcc_blsp1_qup1_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC] = &gcc_blsp1_qup2_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC] = &gcc_blsp1_qup2_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC] = &gcc_blsp1_qup3_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC] = &gcc_blsp1_qup3_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC] = &gcc_blsp1_qup4_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC] = &gcc_blsp1_qup4_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_SLEEP_CLK] = &gcc_blsp1_sleep_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK_SRC] = &gcc_blsp1_uart1_apps_clk_src.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK_SRC] = &gcc_blsp1_uart2_apps_clk_src.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK_SRC] = &gcc_blsp1_uart3_apps_clk_src.clkr,
+ [GCC_BLSP1_UART4_APPS_CLK] = &gcc_blsp1_uart4_apps_clk.clkr,
+ [GCC_BLSP1_UART4_APPS_CLK_SRC] = &gcc_blsp1_uart4_apps_clk_src.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_CPUSS_AHB_POSTDIV_CLK_SRC] = &gcc_cpuss_ahb_postdiv_clk_src.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_PCIE_0_CLKREF_EN] = &gcc_pcie_0_clkref_en.clkr,
+ [GCC_PCIE_AUX_CLK] = &gcc_pcie_aux_clk.clkr,
+ [GCC_PCIE_AUX_CLK_SRC] = &gcc_pcie_aux_clk_src.clkr,
+ [GCC_PCIE_AUX_PHY_CLK_SRC] = &gcc_pcie_aux_phy_clk_src.clkr,
+ [GCC_PCIE_CFG_AHB_CLK] = &gcc_pcie_cfg_ahb_clk.clkr,
+ [GCC_PCIE_MSTR_AXI_CLK] = &gcc_pcie_mstr_axi_clk.clkr,
+ [GCC_PCIE_PIPE_CLK] = &gcc_pcie_pipe_clk.clkr,
+ [GCC_PCIE_PIPE_CLK_SRC] = &gcc_pcie_pipe_clk_src.clkr,
+ [GCC_PCIE_RCHNG_PHY_CLK] = &gcc_pcie_rchng_phy_clk.clkr,
+ [GCC_PCIE_RCHNG_PHY_CLK_SRC] = &gcc_pcie_rchng_phy_clk_src.clkr,
+ [GCC_PCIE_SLEEP_CLK] = &gcc_pcie_sleep_clk.clkr,
+ [GCC_PCIE_SLV_AXI_CLK] = &gcc_pcie_slv_axi_clk.clkr,
+ [GCC_PCIE_SLV_Q2A_AXI_CLK] = &gcc_pcie_slv_q2a_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_RX1_USB2_CLKREF_EN] = &gcc_rx1_usb2_clkref_en.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr,
+ [GCC_USB30_MASTER_CLK_SRC] = &gcc_usb30_master_clk_src.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK_SRC] = &gcc_usb30_mock_utmi_clk_src.clkr,
+ [GCC_USB30_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_MSTR_AXI_CLK] = &gcc_usb30_mstr_axi_clk.clkr,
+ [GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr,
+ [GCC_USB30_SLV_AHB_CLK] = &gcc_usb30_slv_ahb_clk.clkr,
+ [GCC_USB3_PHY_AUX_CLK] = &gcc_usb3_phy_aux_clk.clkr,
+ [GCC_USB3_PHY_AUX_CLK_SRC] = &gcc_usb3_phy_aux_clk_src.clkr,
+ [GCC_USB3_PHY_PIPE_CLK] = &gcc_usb3_phy_pipe_clk.clkr,
+ [GCC_USB3_PHY_PIPE_CLK_SRC] = &gcc_usb3_phy_pipe_clk_src.clkr,
+ [GCC_USB3_PRIM_CLKREF_EN] = &gcc_usb3_prim_clkref_en.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GCC_XO_DIV4_CLK] = &gcc_xo_div4_clk.clkr,
+ [GCC_XO_PCIE_LINK_CLK] = &gcc_xo_pcie_link_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+};
+
+static const struct qcom_reset_map gcc_sdx65_resets[] = {
+ [GCC_BLSP1_QUP1_BCR] = { 0x1c000 },
+ [GCC_BLSP1_QUP2_BCR] = { 0x1e000 },
+ [GCC_BLSP1_QUP3_BCR] = { 0x20000 },
+ [GCC_BLSP1_QUP4_BCR] = { 0x22000 },
+ [GCC_BLSP1_UART1_BCR] = { 0x1d000 },
+ [GCC_BLSP1_UART2_BCR] = { 0x1f000 },
+ [GCC_BLSP1_UART3_BCR] = { 0x21000 },
+ [GCC_BLSP1_UART4_BCR] = { 0x23000 },
+ [GCC_PCIE_BCR] = { 0x43000 },
+ [GCC_PCIE_LINK_DOWN_BCR] = { 0x77000 },
+ [GCC_PCIE_NOCSR_COM_PHY_BCR] = { 0x78008 },
+ [GCC_PCIE_PHY_BCR] = { 0x44000 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x78000 },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x78004 },
+ [GCC_PCIE_PHY_NOCSR_COM_PHY_BCR] = { 0x7800c },
+ [GCC_PDM_BCR] = { 0x24000 },
+ [GCC_QUSB2PHY_BCR] = { 0x19000 },
+ [GCC_SDCC1_BCR] = { 0x1a000 },
+ [GCC_TCSR_PCIE_BCR] = { 0x57000 },
+ [GCC_USB30_BCR] = { 0x17000 },
+ [GCC_USB3_PHY_BCR] = { 0x18000 },
+ [GCC_USB3PHY_PHY_BCR] = { 0x18004 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x19004 },
+};
+
+static struct gdsc *gcc_sdx65_gdscs[] = {
+ [USB30_GDSC] = &usb30_gdsc,
+ [PCIE_GDSC] = &pcie_gdsc,
+};
+
+static const struct regmap_config gcc_sdx65_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f101c,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sdx65_desc = {
+ .config = &gcc_sdx65_regmap_config,
+ .clks = gcc_sdx65_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sdx65_clocks),
+ .resets = gcc_sdx65_resets,
+ .num_resets = ARRAY_SIZE(gcc_sdx65_resets),
+ .gdscs = gcc_sdx65_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sdx65_gdscs),
+};
+
+static const struct of_device_id gcc_sdx65_match_table[] = {
+ { .compatible = "qcom,gcc-sdx65" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sdx65_match_table);
+
+static int gcc_sdx65_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gcc_sdx65_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+ /*
+ * Keep the clocks always-ON as they are critical to the functioning
+ * of the system:
+ * GCC_SYS_NOC_CPUSS_AHB_CLK, GCC_CPUSS_AHB_CLK, GCC_CPUSS_GNOC_CLK
+ */
+ regmap_update_bits(regmap, 0x6d008, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x6d008, BIT(21), BIT(21));
+ regmap_update_bits(regmap, 0x6d008, BIT(22), BIT(22));
+
+ return qcom_cc_really_probe(pdev, &gcc_sdx65_desc, regmap);
+}
+
+static struct platform_driver gcc_sdx65_driver = {
+ .probe = gcc_sdx65_probe,
+ .driver = {
+ .name = "gcc-sdx65",
+ .of_match_table = gcc_sdx65_match_table,
+ },
+};
+
+static int __init gcc_sdx65_init(void)
+{
+ return platform_driver_register(&gcc_sdx65_driver);
+}
+subsys_initcall(gcc_sdx65_init);
+
+static void __exit gcc_sdx65_exit(void)
+{
+ platform_driver_unregister(&gcc_sdx65_driver);
+}
+module_exit(gcc_sdx65_exit);
+
+MODULE_DESCRIPTION("QTI GCC SDX65 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
index 3236706771b1..a4f7fba70393 100644
--- a/drivers/clk/qcom/gcc-sm6350.c
+++ b/drivers/clk/qcom/gcc-sm6350.c
@@ -4,6 +4,7 @@
* Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
*/
+#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/gcc-sm8350.c b/drivers/clk/qcom/gcc-sm8350.c
index 6d0a9e2d5104..c3731f96c8e6 100644
--- a/drivers/clk/qcom/gcc-sm8350.c
+++ b/drivers/clk/qcom/gcc-sm8350.c
@@ -4,6 +4,7 @@
* Copyright (c) 2020-2021, Linaro Limited
*/
+#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/gcc-sm8450.c b/drivers/clk/qcom/gcc-sm8450.c
new file mode 100644
index 000000000000..593a195467ff
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sm8450.c
@@ -0,0 +1,3304 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-sm8450.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_PCIE_0_PIPE_CLK,
+ P_PCIE_1_PHY_AUX_CLK,
+ P_PCIE_1_PIPE_CLK,
+ P_SLEEP_CLK,
+ P_UFS_PHY_RX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_1_CLK,
+ P_UFS_PHY_TX_SYMBOL_0_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll0_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x9000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .fw_name = "sleep_clk" },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_PCIE_0_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .fw_name = "pcie_0_pipe_clk", },
+ { .fw_name = "bi_tcxo", },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_PCIE_1_PHY_AUX_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .fw_name = "pcie_1_phy_aux_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_PCIE_1_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .fw_name = "pcie_1_pipe_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .fw_name = "ufs_phy_rx_symbol_0_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .fw_name = "ufs_phy_rx_symbol_1_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .fw_name = "ufs_phy_tx_symbol_0_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .fw_name = "usb3_phy_wrapper_gcc_usb30_pipe_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = {
+ .reg = 0x7b060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_1_phy_aux_clk_src = {
+ .reg = 0x9d080,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_5,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_1_pipe_clk_src = {
+ .reg = 0x9d064,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_6,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = {
+ .reg = 0x87060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_8,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = {
+ .reg = 0x870d0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_9,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = {
+ .reg = 0x87050,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_10,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0x49068,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_11,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x74004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x75004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x76004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x7b064,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x7b048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x9d068,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x9d04c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x43010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x27014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x27148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x2727c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x273b0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x274e4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s5_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(37500000, P_GCC_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(50000000, P_GCC_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x27618,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s5_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x2774c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x27880,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap1_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x28014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x28148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x2827c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x283b0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x284e4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x28618,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x2874c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
+ .cmd_rcgr = 0x2e014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
+ .cmd_rcgr = 0x2e148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
+ .cmd_rcgr = 0x2e27c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
+ .cmd_rcgr = 0x2e3b0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
+ .cmd_rcgr = 0x2e4e4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
+ .cmd_rcgr = 0x2e618,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
+ .cmd_rcgr = 0x2e74c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s6_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x24014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x26014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x8702c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x87074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_phy_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x870a8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x8708c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x49028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x49040,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x4906c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x49058,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_0_axi_clk = {
+ .halt_reg = 0x7b08c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x7b08c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_1_axi_clk = {
+ .halt_reg = 0x9d098,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9d098,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x870d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x870d4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x870d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x870d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x870d4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x870d4,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x49088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x49088,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x49088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x48004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x48004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x36010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x36010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x36010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x36018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x36018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x36018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_ahb_clk = {
+ .halt_reg = 0x20030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x20030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_pcie_anoc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x49084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x49084,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x49084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x81154,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x81154,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x81154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_pcie_sf_tbu_clk = {
+ .halt_reg = 0x9d094,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9d094,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_pcie_sf_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x3700c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x3700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_sf_axi_clk = {
+ .halt_reg = 0x37014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x37014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x37014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eusb3_0_clkref_en = {
+ .halt_reg = 0x9c00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9c00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_eusb3_0_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x74000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x74000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x75000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x75000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x76000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x76000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x81010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x81010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x81010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x81018,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x81018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x7b034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x7b030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7b030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_clkref_en = {
+ .halt_reg = 0x9c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x7b028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_rchng_clk = {
+ .halt_reg = 0x7b044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_phy_rchng_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x7b03c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x7b020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7b020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x7b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x9d030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(29),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x9d02c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9d02c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_clkref_en = {
+ .halt_reg = 0x9c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x9d024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_phy_aux_clk = {
+ .halt_reg = 0x9d038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_1_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_phy_rchng_clk = {
+ .halt_reg = 0x9d048,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_phy_rchng_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x9d040,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(30),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x9d01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9d01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x9d018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x4300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x43004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x43004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x43004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x43008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x43008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x36008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x36008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x36008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x3600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0x37008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x37008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x37008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_ahb_clk = {
+ .halt_reg = 0x81008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x81008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x81008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_gpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_ahb_clk = {
+ .halt_reg = 0x7b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7b018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7b018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_pcie_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cv_cpu_ahb_clk = {
+ .halt_reg = 0x42014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x42014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x42014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_cv_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x42008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x42008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x42008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_v_cpu_ahb_clk = {
+ .halt_reg = 0x42010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x42010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x42010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_v_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x4200c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4200c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x33000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x2700c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x27140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x27274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x273a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x274dc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x27610,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x27744,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x27878,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x3314c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x33140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x2800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x28140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x28274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x283a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x284dc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x28610,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x28744,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_2x_clk = {
+ .halt_reg = 0x3328c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_clk = {
+ .halt_reg = 0x33280,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s0_clk = {
+ .halt_reg = 0x2e00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s1_clk = {
+ .halt_reg = 0x2e140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s2_clk = {
+ .halt_reg = 0x2e274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s3_clk = {
+ .halt_reg = 0x2e3a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s4_clk = {
+ .halt_reg = 0x2e4dc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s5_clk = {
+ .halt_reg = 0x2e610,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s6_clk = {
+ .halt_reg = 0x2e744,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s6_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x27004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x27004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x27008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x27008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x28004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x28004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x28008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x28008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = {
+ .halt_reg = 0x2e004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2e004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_2_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = {
+ .halt_reg = 0x2e008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2e008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_2_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x2400c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x24004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x24004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_at_clk = {
+ .halt_reg = 0x24010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x24010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x24010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_at_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x2600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x26004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x26004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_at_clk = {
+ .halt_reg = 0x26010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_at_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_0_clkref_en = {
+ .halt_reg = 0x9c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_0_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x87020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x87020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x87020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x87018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x87018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x87018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x87018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x87018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x87018,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x8706c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8706c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8706c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = {
+ .halt_reg = 0x8706c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8706c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8706c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x870a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x870a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x870a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = {
+ .halt_reg = 0x870a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x870a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x870a4,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x87028,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x87028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x870c0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x870c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x87024,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x87024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x87064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x87064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x87064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_hw_ctl_clk = {
+ .halt_reg = 0x87064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x87064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x87064,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x49018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x49024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x49020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_0_clkref_en = {
+ .halt_reg = 0x9c010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9c010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_0_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0x4905c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4905c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x49060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49060,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x49064,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x49064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x49064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x42018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x42018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x42018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0x42020,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x42020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x42020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x7b004,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x9d004,
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x87004,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0x49004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *gcc_sm8450_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_0_AXI_CLK] = &gcc_aggre_noc_pcie_0_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_1_AXI_CLK] = &gcc_aggre_noc_pcie_1_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_aggre_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DDRSS_PCIE_SF_TBU_CLK] = &gcc_ddrss_pcie_sf_tbu_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_DISP_SF_AXI_CLK] = &gcc_disp_sf_axi_clk.clkr,
+ [GCC_EUSB3_0_CLKREF_EN] = &gcc_eusb3_0_clkref_en.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_CLKREF_EN] = &gcc_pcie_0_clkref_en.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_CLKREF_EN] = &gcc_pcie_1_clkref_en.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PHY_AUX_CLK] = &gcc_pcie_1_phy_aux_clk.clkr,
+ [GCC_PCIE_1_PHY_AUX_CLK_SRC] = &gcc_pcie_1_phy_aux_clk_src.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK] = &gcc_pcie_1_phy_rchng_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK_SRC] = &gcc_pcie_1_pipe_clk_src.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_GPU_AHB_CLK] = &gcc_qmip_gpu_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_AHB_CLK] = &gcc_qmip_pcie_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CV_CPU_AHB_CLK] = &gcc_qmip_video_cv_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_V_CPU_AHB_CLK] = &gcc_qmip_video_v_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_CORE_2X_CLK] = &gcc_qupv3_wrap2_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP2_CORE_CLK] = &gcc_qupv3_wrap2_core_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK] = &gcc_qupv3_wrap2_s6_clk.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK_SRC] = &gcc_qupv3_wrap2_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC2_AT_CLK] = &gcc_sdcc2_at_clk.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_SDCC4_AT_CLK] = &gcc_sdcc4_at_clk.clkr,
+ [GCC_UFS_0_CLKREF_EN] = &gcc_ufs_0_clkref_en.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK] = &gcc_ufs_phy_ice_core_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_phy_unipro_core_hw_ctl_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_0_CLKREF_EN] = &gcc_usb3_0_clkref_en.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_sm8450_resets[] = {
+ [GCC_CAMERA_BCR] = { 0x36000 },
+ [GCC_DISPLAY_BCR] = { 0x37000 },
+ [GCC_GPU_BCR] = { 0x81000 },
+ [GCC_PCIE_0_BCR] = { 0x7b000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x7c014 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x7c020 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x7c01c },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x7c028 },
+ [GCC_PCIE_1_BCR] = { 0x9d000 },
+ [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x9e014 },
+ [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x9e020 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x9e01c },
+ [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0x9e000 },
+ [GCC_PCIE_PHY_BCR] = { 0x7f000 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x7f00c },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x7f010 },
+ [GCC_PDM_BCR] = { 0x43000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x27000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x28000 },
+ [GCC_QUPV3_WRAPPER_2_BCR] = { 0x2e000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x22000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x22004 },
+ [GCC_SDCC2_BCR] = { 0x24000 },
+ [GCC_SDCC4_BCR] = { 0x26000 },
+ [GCC_UFS_PHY_BCR] = { 0x87000 },
+ [GCC_USB30_PRIM_BCR] = { 0x49000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x60008 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x60014 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x60000 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x6000c },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x60004 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x60010 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x7a000 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0x42018, 2 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { 0x42020, 2 },
+ [GCC_VIDEO_BCR] = { 0x42000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s6_clk_src),
+};
+
+static struct gdsc *gcc_sm8450_gdscs[] = {
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [PCIE_1_GDSC] = &pcie_1_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+};
+
+static const struct regmap_config gcc_sm8450_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f1030,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sm8450_desc = {
+ .config = &gcc_sm8450_regmap_config,
+ .clks = gcc_sm8450_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sm8450_clocks),
+ .resets = gcc_sm8450_resets,
+ .num_resets = ARRAY_SIZE(gcc_sm8450_resets),
+ .gdscs = gcc_sm8450_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sm8450_gdscs),
+};
+
+static const struct of_device_id gcc_sm8450_match_table[] = {
+ { .compatible = "qcom,gcc-sm8450" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sm8450_match_table);
+
+static int gcc_sm8450_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sm8450_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
+ regmap_update_bits(regmap, gcc_ufs_phy_ice_core_clk.halt_reg, BIT(14), BIT(14));
+
+ /*
+ * Keep the critical clock always-On
+ * gcc_camera_ahb_clk, gcc_camera_xo_clk, gcc_disp_ahb_clk,
+ * gcc_disp_xo_clk, gcc_gpu_cfg_ahb_clk, gcc_video_ahb_clk,
+ * gcc_video_xo_clk
+ */
+ regmap_update_bits(regmap, 0x36004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x36020, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x37004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x3701c, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x81004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x42004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x42028, BIT(0), BIT(0));
+
+ return qcom_cc_really_probe(pdev, &gcc_sm8450_desc, regmap);
+}
+
+static struct platform_driver gcc_sm8450_driver = {
+ .probe = gcc_sm8450_probe,
+ .driver = {
+ .name = "gcc-sm8450",
+ .of_match_table = gcc_sm8450_match_table,
+ },
+};
+
+static int __init gcc_sm8450_init(void)
+{
+ return platform_driver_register(&gcc_sm8450_driver);
+}
+subsys_initcall(gcc_sm8450_init);
+
+static void __exit gcc_sm8450_exit(void)
+{
+ platform_driver_unregister(&gcc_sm8450_driver);
+}
+module_exit(gcc_sm8450_exit);
+
+MODULE_DESCRIPTION("QTI GCC SM8450 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/lpasscc-sc7280.c b/drivers/clk/qcom/lpasscc-sc7280.c
index 89f1ad6631da..b39ee1c9647b 100644
--- a/drivers/clk/qcom/lpasscc-sc7280.c
+++ b/drivers/clk/qcom/lpasscc-sc7280.c
@@ -3,6 +3,7 @@
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
*/
+#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/clk/qcom/lpasscc-sdm845.c b/drivers/clk/qcom/lpasscc-sdm845.c
index 56d3e9928892..7040da952728 100644
--- a/drivers/clk/qcom/lpasscc-sdm845.c
+++ b/drivers/clk/qcom/lpasscc-sdm845.c
@@ -3,6 +3,7 @@
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
+#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index fbfcf0006739..e9f971359155 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -3,6 +3,7 @@
* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
*/
+#include <linux/clk-provider.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/module.h>
diff --git a/drivers/clk/qcom/q6sstop-qcs404.c b/drivers/clk/qcom/q6sstop-qcs404.c
index 507386bee07d..780074e05841 100644
--- a/drivers/clk/qcom/q6sstop-qcs404.c
+++ b/drivers/clk/qcom/q6sstop-qcs404.c
@@ -4,6 +4,7 @@
*/
#include <linux/bitops.h>
+#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/qcom/turingcc-qcs404.c b/drivers/clk/qcom/turingcc-qcs404.c
index 4543bda793f4..43184459228f 100644
--- a/drivers/clk/qcom/turingcc-qcs404.c
+++ b/drivers/clk/qcom/turingcc-qcs404.c
@@ -4,6 +4,7 @@
*/
#include <linux/bitops.h>
+#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/module.h>
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 6d0280751bb1..be6e6ae7448c 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -31,6 +31,7 @@ config CLK_RENESAS
select CLK_R8A77990 if ARCH_R8A77990
select CLK_R8A77995 if ARCH_R8A77995
select CLK_R8A779A0 if ARCH_R8A779A0
+ select CLK_R8A779F0 if ARCH_R8A779F0
select CLK_R9A06G032 if ARCH_R9A06G032
select CLK_R9A07G044 if ARCH_R9A07G044
select CLK_SH73A0 if ARCH_SH73A0
@@ -149,8 +150,11 @@ config CLK_R8A77995
config CLK_R8A779A0
bool "R-Car V3U clock support" if COMPILE_TEST
- select CLK_RCAR_CPG_LIB
- select CLK_RENESAS_CPG_MSSR
+ select CLK_RCAR_GEN4_CPG
+
+config CLK_R8A779F0
+ bool "R-Car S4-8 clock support" if COMPILE_TEST
+ select CLK_RCAR_GEN4_CPG
config CLK_R9A06G032
bool "RZ/N1D clock support" if COMPILE_TEST
@@ -178,6 +182,11 @@ config CLK_RCAR_GEN3_CPG
select CLK_RCAR_CPG_LIB
select CLK_RENESAS_CPG_MSSR
+config CLK_RCAR_GEN4_CPG
+ bool "R-Car Gen4 clock support" if COMPILE_TEST
+ select CLK_RCAR_CPG_LIB
+ select CLK_RENESAS_CPG_MSSR
+
config CLK_RCAR_USB2_CLOCK_SEL
bool "Renesas R-Car USB2 clock selector support"
depends on ARCH_RENESAS || COMPILE_TEST
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 7d018700d08b..8b34db1a328c 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_CLK_R8A77980) += r8a77980-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77990) += r8a77990-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77995) += r8a77995-cpg-mssr.o
obj-$(CONFIG_CLK_R8A779A0) += r8a779a0-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A779F0) += r8a779f0-cpg-mssr.o
obj-$(CONFIG_CLK_R9A06G032) += r9a06g032-clocks.o
obj-$(CONFIG_CLK_R9A07G044) += r9a07g044-cpg.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
@@ -36,6 +37,7 @@ obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
obj-$(CONFIG_CLK_RCAR_CPG_LIB) += rcar-cpg-lib.o
obj-$(CONFIG_CLK_RCAR_GEN2_CPG) += rcar-gen2-cpg.o
obj-$(CONFIG_CLK_RCAR_GEN3_CPG) += rcar-gen3-cpg.o
+obj-$(CONFIG_CLK_RCAR_GEN4_CPG) += rcar-gen4-cpg.o
obj-$(CONFIG_CLK_RCAR_USB2_CLOCK_SEL) += rcar-usb2-clock-sel.o
obj-$(CONFIG_CLK_RZG2L) += rzg2l-cpg.o
diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
index 39b185d8e957..95dd56b64d64 100644
--- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
@@ -100,10 +100,14 @@ static const struct cpg_core_clk r8a774a1_core_clks[] __initconst = {
DEF_FIXED("s3d2", R8A774A1_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A774A1_CLK_S3D4, CLK_S3, 4, 1),
- DEF_GEN3_SD("sd0", R8A774A1_CLK_SD0, CLK_SDSRC, 0x074),
- DEF_GEN3_SD("sd1", R8A774A1_CLK_SD1, CLK_SDSRC, 0x078),
- DEF_GEN3_SD("sd2", R8A774A1_CLK_SD2, CLK_SDSRC, 0x268),
- DEF_GEN3_SD("sd3", R8A774A1_CLK_SD3, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SDH("sd0h", R8A774A1_CLK_SD0H, CLK_SDSRC, 0x074),
+ DEF_GEN3_SDH("sd1h", R8A774A1_CLK_SD1H, CLK_SDSRC, 0x078),
+ DEF_GEN3_SDH("sd2h", R8A774A1_CLK_SD2H, CLK_SDSRC, 0x268),
+ DEF_GEN3_SDH("sd3h", R8A774A1_CLK_SD3H, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SD("sd0", R8A774A1_CLK_SD0, R8A774A1_CLK_SD0H, 0x074),
+ DEF_GEN3_SD("sd1", R8A774A1_CLK_SD1, R8A774A1_CLK_SD1H, 0x078),
+ DEF_GEN3_SD("sd2", R8A774A1_CLK_SD2, R8A774A1_CLK_SD2H, 0x268),
+ DEF_GEN3_SD("sd3", R8A774A1_CLK_SD3, R8A774A1_CLK_SD3H, 0x26c),
DEF_FIXED("cl", R8A774A1_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A774A1_CLK_CP, CLK_EXTAL, 2, 1),
diff --git a/drivers/clk/renesas/r8a774b1-cpg-mssr.c b/drivers/clk/renesas/r8a774b1-cpg-mssr.c
index af602d83c8ce..56061b9b8437 100644
--- a/drivers/clk/renesas/r8a774b1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774b1-cpg-mssr.c
@@ -97,10 +97,14 @@ static const struct cpg_core_clk r8a774b1_core_clks[] __initconst = {
DEF_FIXED("s3d2", R8A774B1_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A774B1_CLK_S3D4, CLK_S3, 4, 1),
- DEF_GEN3_SD("sd0", R8A774B1_CLK_SD0, CLK_SDSRC, 0x074),
- DEF_GEN3_SD("sd1", R8A774B1_CLK_SD1, CLK_SDSRC, 0x078),
- DEF_GEN3_SD("sd2", R8A774B1_CLK_SD2, CLK_SDSRC, 0x268),
- DEF_GEN3_SD("sd3", R8A774B1_CLK_SD3, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SDH("sd0h", R8A774B1_CLK_SD0H, CLK_SDSRC, 0x074),
+ DEF_GEN3_SDH("sd1h", R8A774B1_CLK_SD1H, CLK_SDSRC, 0x078),
+ DEF_GEN3_SDH("sd2h", R8A774B1_CLK_SD2H, CLK_SDSRC, 0x268),
+ DEF_GEN3_SDH("sd3h", R8A774B1_CLK_SD3H, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SD("sd0", R8A774B1_CLK_SD0, R8A774B1_CLK_SD0H, 0x074),
+ DEF_GEN3_SD("sd1", R8A774B1_CLK_SD1, R8A774B1_CLK_SD1H, 0x078),
+ DEF_GEN3_SD("sd2", R8A774B1_CLK_SD2, R8A774B1_CLK_SD2H, 0x268),
+ DEF_GEN3_SD("sd3", R8A774B1_CLK_SD3, R8A774B1_CLK_SD3H, 0x26c),
DEF_FIXED("cl", R8A774B1_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A774B1_CLK_CP, CLK_EXTAL, 2, 1),
diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
index 5b938eb2df25..b5eb5dc45d62 100644
--- a/drivers/clk/renesas/r8a774c0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
@@ -108,9 +108,12 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = {
DEF_FIXED("s3d2", R8A774C0_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A774C0_CLK_S3D4, CLK_S3, 4, 1),
- DEF_GEN3_SD("sd0", R8A774C0_CLK_SD0, CLK_SDSRC, 0x0074),
- DEF_GEN3_SD("sd1", R8A774C0_CLK_SD1, CLK_SDSRC, 0x0078),
- DEF_GEN3_SD("sd3", R8A774C0_CLK_SD3, CLK_SDSRC, 0x026c),
+ DEF_GEN3_SDH("sd0h", R8A774C0_CLK_SD0H, CLK_SDSRC, 0x0074),
+ DEF_GEN3_SDH("sd1h", R8A774C0_CLK_SD1H, CLK_SDSRC, 0x0078),
+ DEF_GEN3_SDH("sd3h", R8A774C0_CLK_SD3H, CLK_SDSRC, 0x026c),
+ DEF_GEN3_SD("sd0", R8A774C0_CLK_SD0, R8A774C0_CLK_SD0H, 0x0074),
+ DEF_GEN3_SD("sd1", R8A774C0_CLK_SD1, R8A774C0_CLK_SD1H, 0x0078),
+ DEF_GEN3_SD("sd3", R8A774C0_CLK_SD3, R8A774C0_CLK_SD3H, 0x026c),
DEF_FIXED("cl", R8A774C0_CLK_CL, CLK_PLL1, 48, 1),
DEF_FIXED("cp", R8A774C0_CLK_CP, CLK_EXTAL, 2, 1),
diff --git a/drivers/clk/renesas/r8a774e1-cpg-mssr.c b/drivers/clk/renesas/r8a774e1-cpg-mssr.c
index 40c71466df37..2950f0db90ae 100644
--- a/drivers/clk/renesas/r8a774e1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774e1-cpg-mssr.c
@@ -100,10 +100,14 @@ static const struct cpg_core_clk r8a774e1_core_clks[] __initconst = {
DEF_FIXED("s3d2", R8A774E1_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A774E1_CLK_S3D4, CLK_S3, 4, 1),
- DEF_GEN3_SD("sd0", R8A774E1_CLK_SD0, CLK_SDSRC, 0x074),
- DEF_GEN3_SD("sd1", R8A774E1_CLK_SD1, CLK_SDSRC, 0x078),
- DEF_GEN3_SD("sd2", R8A774E1_CLK_SD2, CLK_SDSRC, 0x268),
- DEF_GEN3_SD("sd3", R8A774E1_CLK_SD3, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SDH("sd0h", R8A774E1_CLK_SD0H, CLK_SDSRC, 0x074),
+ DEF_GEN3_SDH("sd1h", R8A774E1_CLK_SD1H, CLK_SDSRC, 0x078),
+ DEF_GEN3_SDH("sd2h", R8A774E1_CLK_SD2H, CLK_SDSRC, 0x268),
+ DEF_GEN3_SDH("sd3h", R8A774E1_CLK_SD3H, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SD("sd0", R8A774E1_CLK_SD0, R8A774E1_CLK_SD0H, 0x074),
+ DEF_GEN3_SD("sd1", R8A774E1_CLK_SD1, R8A774E1_CLK_SD1H, 0x078),
+ DEF_GEN3_SD("sd2", R8A774E1_CLK_SD2, R8A774E1_CLK_SD2H, 0x268),
+ DEF_GEN3_SD("sd3", R8A774E1_CLK_SD3, R8A774E1_CLK_SD3H, 0x26c),
DEF_FIXED("cl", R8A774E1_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cr", R8A774E1_CLK_CR, CLK_PLL1_DIV4, 2, 1),
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index d6b1d0148bfd..991a44315d71 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -104,10 +104,14 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = {
DEF_FIXED("s3d2", R8A7795_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A7795_CLK_S3D4, CLK_S3, 4, 1),
- DEF_GEN3_SD("sd0", R8A7795_CLK_SD0, CLK_SDSRC, 0x074),
- DEF_GEN3_SD("sd1", R8A7795_CLK_SD1, CLK_SDSRC, 0x078),
- DEF_GEN3_SD("sd2", R8A7795_CLK_SD2, CLK_SDSRC, 0x268),
- DEF_GEN3_SD("sd3", R8A7795_CLK_SD3, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SDH("sd0h", R8A7795_CLK_SD0H, CLK_SDSRC, 0x074),
+ DEF_GEN3_SDH("sd1h", R8A7795_CLK_SD1H, CLK_SDSRC, 0x078),
+ DEF_GEN3_SDH("sd2h", R8A7795_CLK_SD2H, CLK_SDSRC, 0x268),
+ DEF_GEN3_SDH("sd3h", R8A7795_CLK_SD3H, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SD("sd0", R8A7795_CLK_SD0, R8A7795_CLK_SD0H, 0x074),
+ DEF_GEN3_SD("sd1", R8A7795_CLK_SD1, R8A7795_CLK_SD1H, 0x078),
+ DEF_GEN3_SD("sd2", R8A7795_CLK_SD2, R8A7795_CLK_SD2H, 0x268),
+ DEF_GEN3_SD("sd3", R8A7795_CLK_SD3, R8A7795_CLK_SD3H, 0x26c),
DEF_FIXED("cl", R8A7795_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cr", R8A7795_CLK_CR, CLK_PLL1_DIV4, 2, 1),
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index 9c22977e42c2..7950313611ef 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -106,10 +106,14 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
DEF_FIXED("s3d2", R8A7796_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A7796_CLK_S3D4, CLK_S3, 4, 1),
- DEF_GEN3_SD("sd0", R8A7796_CLK_SD0, CLK_SDSRC, 0x074),
- DEF_GEN3_SD("sd1", R8A7796_CLK_SD1, CLK_SDSRC, 0x078),
- DEF_GEN3_SD("sd2", R8A7796_CLK_SD2, CLK_SDSRC, 0x268),
- DEF_GEN3_SD("sd3", R8A7796_CLK_SD3, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SDH("sd0h", R8A7796_CLK_SD0H, CLK_SDSRC, 0x074),
+ DEF_GEN3_SDH("sd1h", R8A7796_CLK_SD1H, CLK_SDSRC, 0x078),
+ DEF_GEN3_SDH("sd2h", R8A7796_CLK_SD2H, CLK_SDSRC, 0x268),
+ DEF_GEN3_SDH("sd3h", R8A7796_CLK_SD3H, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SD("sd0", R8A7796_CLK_SD0, R8A7796_CLK_SD0H, 0x074),
+ DEF_GEN3_SD("sd1", R8A7796_CLK_SD1, R8A7796_CLK_SD1H, 0x078),
+ DEF_GEN3_SD("sd2", R8A7796_CLK_SD2, R8A7796_CLK_SD2H, 0x268),
+ DEF_GEN3_SD("sd3", R8A7796_CLK_SD3, R8A7796_CLK_SD3H, 0x26c),
DEF_FIXED("cl", R8A7796_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cr", R8A7796_CLK_CR, CLK_PLL1_DIV4, 2, 1),
diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c
index 7eee45a31b2a..d687c29efa3c 100644
--- a/drivers/clk/renesas/r8a77965-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c
@@ -101,10 +101,14 @@ static const struct cpg_core_clk r8a77965_core_clks[] __initconst = {
DEF_FIXED("s3d2", R8A77965_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A77965_CLK_S3D4, CLK_S3, 4, 1),
- DEF_GEN3_SD("sd0", R8A77965_CLK_SD0, CLK_SDSRC, 0x074),
- DEF_GEN3_SD("sd1", R8A77965_CLK_SD1, CLK_SDSRC, 0x078),
- DEF_GEN3_SD("sd2", R8A77965_CLK_SD2, CLK_SDSRC, 0x268),
- DEF_GEN3_SD("sd3", R8A77965_CLK_SD3, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SDH("sd0h", R8A77965_CLK_SD0H, CLK_SDSRC, 0x074),
+ DEF_GEN3_SDH("sd1h", R8A77965_CLK_SD1H, CLK_SDSRC, 0x078),
+ DEF_GEN3_SDH("sd2h", R8A77965_CLK_SD2H, CLK_SDSRC, 0x268),
+ DEF_GEN3_SDH("sd3h", R8A77965_CLK_SD3H, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SD("sd0", R8A77965_CLK_SD0, R8A77965_CLK_SD0H, 0x074),
+ DEF_GEN3_SD("sd1", R8A77965_CLK_SD1, R8A77965_CLK_SD1H, 0x078),
+ DEF_GEN3_SD("sd2", R8A77965_CLK_SD2, R8A77965_CLK_SD2H, 0x268),
+ DEF_GEN3_SD("sd3", R8A77965_CLK_SD3, R8A77965_CLK_SD3H, 0x26c),
DEF_FIXED("cl", R8A77965_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cr", R8A77965_CLK_CR, CLK_PLL1_DIV4, 2, 1),
diff --git a/drivers/clk/renesas/r8a77980-cpg-mssr.c b/drivers/clk/renesas/r8a77980-cpg-mssr.c
index 9fe372286c1e..f3cd64de4dc6 100644
--- a/drivers/clk/renesas/r8a77980-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77980-cpg-mssr.c
@@ -96,7 +96,8 @@ static const struct cpg_core_clk r8a77980_core_clks[] __initconst = {
DEF_FIXED("s3d2", R8A77980_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A77980_CLK_S3D4, CLK_S3, 4, 1),
- DEF_GEN3_SD("sd0", R8A77980_CLK_SD0, CLK_SDSRC, 0x0074),
+ DEF_GEN3_SDH("sd0h", R8A77980_CLK_SD0H, CLK_SDSRC, 0x0074),
+ DEF_GEN3_SD("sd0", R8A77980_CLK_SD0, R8A77980_CLK_SD0H, 0x0074),
DEF_FIXED("cl", R8A77980_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A77980_CLK_CP, CLK_EXTAL, 2, 1),
diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c
index a582f2ec3294..faf60f7adc8d 100644
--- a/drivers/clk/renesas/r8a77990-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c
@@ -100,9 +100,12 @@ static const struct cpg_core_clk r8a77990_core_clks[] __initconst = {
DEF_FIXED("s3d2", R8A77990_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A77990_CLK_S3D4, CLK_S3, 4, 1),
- DEF_GEN3_SD("sd0", R8A77990_CLK_SD0, CLK_SDSRC, 0x0074),
- DEF_GEN3_SD("sd1", R8A77990_CLK_SD1, CLK_SDSRC, 0x0078),
- DEF_GEN3_SD("sd3", R8A77990_CLK_SD3, CLK_SDSRC, 0x026c),
+ DEF_GEN3_SDH("sd0h", R8A77990_CLK_SD0H, CLK_SDSRC, 0x0074),
+ DEF_GEN3_SDH("sd1h", R8A77990_CLK_SD1H, CLK_SDSRC, 0x0078),
+ DEF_GEN3_SDH("sd3h", R8A77990_CLK_SD3H, CLK_SDSRC, 0x026c),
+ DEF_GEN3_SD("sd0", R8A77990_CLK_SD0, R8A77990_CLK_SD0H, 0x0074),
+ DEF_GEN3_SD("sd1", R8A77990_CLK_SD1, R8A77990_CLK_SD1H, 0x0078),
+ DEF_GEN3_SD("sd3", R8A77990_CLK_SD3, R8A77990_CLK_SD3H, 0x026c),
DEF_FIXED("cl", R8A77990_CLK_CL, CLK_PLL1, 48, 1),
DEF_FIXED("cr", R8A77990_CLK_CR, CLK_PLL1D2, 2, 1),
diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c
index 81c0bc1e78af..7713cfd99c1d 100644
--- a/drivers/clk/renesas/r8a77995-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c
@@ -103,7 +103,8 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = {
DEF_GEN3_PE("s3d2c", R8A77995_CLK_S3D2C, CLK_S3, 2, CLK_PE, 2),
DEF_GEN3_PE("s3d4c", R8A77995_CLK_S3D4C, CLK_S3, 4, CLK_PE, 4),
- DEF_GEN3_SD("sd0", R8A77995_CLK_SD0, CLK_SDSRC, 0x268),
+ DEF_GEN3_SDH("sd0h", R8A77995_CLK_SD0H, CLK_SDSRC, 0x268),
+ DEF_GEN3_SD("sd0", R8A77995_CLK_SD0, R8A77995_CLK_SD0H, 0x268),
DEF_DIV6P1("canfd", R8A77995_CLK_CANFD, CLK_PLL0D3, 0x244),
DEF_DIV6P1("mso", R8A77995_CLK_MSO, CLK_PLL1D2, 0x014),
diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
index fbd7454f2beb..1c09d4ebe90f 100644
--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
@@ -10,46 +10,19 @@
* Copyright (C) 2015 Renesas Electronics Corp.
*/
-#include <linux/bug.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
-#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/pm.h>
-#include <linux/slab.h>
#include <linux/soc/renesas/rcar-rst.h>
#include <dt-bindings/clock/r8a779a0-cpg-mssr.h>
-#include "rcar-cpg-lib.h"
#include "renesas-cpg-mssr.h"
-
-enum rcar_r8a779a0_clk_types {
- CLK_TYPE_R8A779A0_MAIN = CLK_TYPE_CUSTOM,
- CLK_TYPE_R8A779A0_PLL1,
- CLK_TYPE_R8A779A0_PLL2X_3X, /* PLL[23][01] */
- CLK_TYPE_R8A779A0_PLL5,
- CLK_TYPE_R8A779A0_Z,
- CLK_TYPE_R8A779A0_SD,
- CLK_TYPE_R8A779A0_MDSEL, /* Select parent/divider using mode pin */
- CLK_TYPE_R8A779A0_OSC, /* OSC EXTAL predivider and fixed divider */
- CLK_TYPE_R8A779A0_RPCSRC,
- CLK_TYPE_R8A779A0_RPC,
- CLK_TYPE_R8A779A0_RPCD2,
-};
-
-struct rcar_r8a779a0_cpg_pll_config {
- u8 extal_div;
- u8 pll1_mult;
- u8 pll1_div;
- u8 pll5_mult;
- u8 pll5_div;
- u8 osc_prediv;
-};
+#include "rcar-gen4-cpg.h"
enum clk_ids {
/* Core Clock Outputs exported to DT */
@@ -85,33 +58,18 @@ enum clk_ids {
};
#define DEF_PLL(_name, _id, _offset) \
- DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_PLL2X_3X, CLK_MAIN, \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN4_PLL2X_3X, CLK_MAIN, \
.offset = _offset)
-#define DEF_Z(_name, _id, _parent, _div, _offset) \
- DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_Z, _parent, .div = _div, \
- .offset = _offset)
-
-#define DEF_SD(_name, _id, _parent, _offset) \
- DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_SD, _parent, .offset = _offset)
-
-#define DEF_MDSEL(_name, _id, _md, _parent0, _div0, _parent1, _div1) \
- DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_MDSEL, \
- (_parent0) << 16 | (_parent1), \
- .div = (_div0) << 16 | (_div1), .offset = _md)
-
-#define DEF_OSC(_name, _id, _parent, _div) \
- DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_OSC, _parent, .div = _div)
-
static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("extal", CLK_EXTAL),
DEF_INPUT("extalr", CLK_EXTALR),
/* Internal Core Clocks */
- DEF_BASE(".main", CLK_MAIN, CLK_TYPE_R8A779A0_MAIN, CLK_EXTAL),
- DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_R8A779A0_PLL1, CLK_MAIN),
- DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_R8A779A0_PLL5, CLK_MAIN),
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN4_PLL1, CLK_MAIN),
+ DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN),
DEF_PLL(".pll20", CLK_PLL20, 0x0834),
DEF_PLL(".pll21", CLK_PLL21, 0x0838),
DEF_PLL(".pll30", CLK_PLL30, 0x083c),
@@ -128,14 +86,14 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL5_DIV4, 1, 1),
DEF_RATE(".oco", CLK_OCO, 32768),
- DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_R8A779A0_RPCSRC, CLK_PLL5),
- DEF_BASE("rpc", R8A779A0_CLK_RPC, CLK_TYPE_R8A779A0_RPC, CLK_RPCSRC),
- DEF_BASE("rpcd2", R8A779A0_CLK_RPCD2, CLK_TYPE_R8A779A0_RPCD2,
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN4_RPCSRC, CLK_PLL5),
+ DEF_BASE("rpc", R8A779A0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A779A0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2,
R8A779A0_CLK_RPC),
/* Core Clock Outputs */
- DEF_Z("z0", R8A779A0_CLK_Z0, CLK_PLL20, 2, 0),
- DEF_Z("z1", R8A779A0_CLK_Z1, CLK_PLL21, 2, 8),
+ DEF_GEN4_Z("z0", R8A779A0_CLK_Z0, CLK_TYPE_GEN4_Z, CLK_PLL20, 2, 0),
+ DEF_GEN4_Z("z1", R8A779A0_CLK_Z1, CLK_TYPE_GEN4_Z, CLK_PLL21, 2, 8),
DEF_FIXED("zx", R8A779A0_CLK_ZX, CLK_PLL20_DIV2, 2, 1),
DEF_FIXED("s1d1", R8A779A0_CLK_S1D1, CLK_S1, 1, 1),
DEF_FIXED("s1d2", R8A779A0_CLK_S1D2, CLK_S1, 2, 1),
@@ -159,15 +117,16 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
DEF_FIXED("cp", R8A779A0_CLK_CP, CLK_EXTAL, 2, 1),
DEF_FIXED("cl16mck", R8A779A0_CLK_CL16MCK, CLK_PLL1_DIV2, 64, 1),
- DEF_SD("sd0", R8A779A0_CLK_SD0, CLK_SDSRC, 0x870),
+ DEF_GEN4_SDH("sdh0", R8A779A0_CLK_SD0H, CLK_SDSRC, 0x870),
+ DEF_GEN4_SD("sd0", R8A779A0_CLK_SD0, R8A779A0_CLK_SD0H, 0x870),
DEF_DIV6P1("mso", R8A779A0_CLK_MSO, CLK_PLL5_DIV4, 0x87c),
DEF_DIV6P1("canfd", R8A779A0_CLK_CANFD, CLK_PLL5_DIV4, 0x878),
DEF_DIV6P1("csi0", R8A779A0_CLK_CSI0, CLK_PLL5_DIV4, 0x880),
DEF_DIV6P1("dsi", R8A779A0_CLK_DSI, CLK_PLL5_DIV4, 0x884),
- DEF_OSC("osc", R8A779A0_CLK_OSC, CLK_EXTAL, 8),
- DEF_MDSEL("r", R8A779A0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1),
+ DEF_GEN4_OSC("osc", R8A779A0_CLK_OSC, CLK_EXTAL, 8),
+ DEF_GEN4_MDSEL("r", R8A779A0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1),
};
static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
@@ -271,256 +230,6 @@ static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
DEF_MOD("vspx3", 1031, R8A779A0_CLK_S1D1),
};
-static const struct rcar_r8a779a0_cpg_pll_config *cpg_pll_config __initdata;
-static unsigned int cpg_clk_extalr __initdata;
-static u32 cpg_mode __initdata;
-
-/*
- * Z0 Clock & Z1 Clock
- */
-#define CPG_FRQCRB 0x00000804
-#define CPG_FRQCRB_KICK BIT(31)
-#define CPG_FRQCRC 0x00000808
-
-struct cpg_z_clk {
- struct clk_hw hw;
- void __iomem *reg;
- void __iomem *kick_reg;
- unsigned long max_rate; /* Maximum rate for normal mode */
- unsigned int fixed_div;
- u32 mask;
-};
-
-#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
-
-static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct cpg_z_clk *zclk = to_z_clk(hw);
- unsigned int mult;
- u32 val;
-
- val = readl(zclk->reg) & zclk->mask;
- mult = 32 - (val >> __ffs(zclk->mask));
-
- return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
- 32 * zclk->fixed_div);
-}
-
-static int cpg_z_clk_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- struct cpg_z_clk *zclk = to_z_clk(hw);
- unsigned int min_mult, max_mult, mult;
- unsigned long rate, prate;
-
- rate = min(req->rate, req->max_rate);
- if (rate <= zclk->max_rate) {
- /* Set parent rate to initial value for normal modes */
- prate = zclk->max_rate;
- } else {
- /* Set increased parent rate for boost modes */
- prate = rate;
- }
- req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
- prate * zclk->fixed_div);
-
- prate = req->best_parent_rate / zclk->fixed_div;
- min_mult = max(div64_ul(req->min_rate * 32ULL, prate), 1ULL);
- max_mult = min(div64_ul(req->max_rate * 32ULL, prate), 32ULL);
- if (max_mult < min_mult)
- return -EINVAL;
-
- mult = DIV_ROUND_CLOSEST_ULL(rate * 32ULL, prate);
- mult = clamp(mult, min_mult, max_mult);
-
- req->rate = DIV_ROUND_CLOSEST_ULL((u64)prate * mult, 32);
- return 0;
-}
-
-static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct cpg_z_clk *zclk = to_z_clk(hw);
- unsigned int mult;
- unsigned int i;
-
- mult = DIV64_U64_ROUND_CLOSEST(rate * 32ULL * zclk->fixed_div,
- parent_rate);
- mult = clamp(mult, 1U, 32U);
-
- if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
- return -EBUSY;
-
- cpg_reg_modify(zclk->reg, zclk->mask, (32 - mult) << __ffs(zclk->mask));
-
- /*
- * Set KICK bit in FRQCRB to update hardware setting and wait for
- * clock change completion.
- */
- cpg_reg_modify(zclk->kick_reg, 0, CPG_FRQCRB_KICK);
-
- /*
- * Note: There is no HW information about the worst case latency.
- *
- * Using experimental measurements, it seems that no more than
- * ~10 iterations are needed, independently of the CPU rate.
- * Since this value might be dependent on external xtal rate, pll1
- * rate or even the other emulation clocks rate, use 1000 as a
- * "super" safe value.
- */
- for (i = 1000; i; i--) {
- if (!(readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
- return 0;
-
- cpu_relax();
- }
-
- return -ETIMEDOUT;
-}
-
-static const struct clk_ops cpg_z_clk_ops = {
- .recalc_rate = cpg_z_clk_recalc_rate,
- .determine_rate = cpg_z_clk_determine_rate,
- .set_rate = cpg_z_clk_set_rate,
-};
-
-static struct clk * __init cpg_z_clk_register(const char *name,
- const char *parent_name,
- void __iomem *reg,
- unsigned int div,
- unsigned int offset)
-{
- struct clk_init_data init = {};
- struct cpg_z_clk *zclk;
- struct clk *clk;
-
- zclk = kzalloc(sizeof(*zclk), GFP_KERNEL);
- if (!zclk)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &cpg_z_clk_ops;
- init.flags = CLK_SET_RATE_PARENT;
- init.parent_names = &parent_name;
- init.num_parents = 1;
-
- zclk->reg = reg + CPG_FRQCRC;
- zclk->kick_reg = reg + CPG_FRQCRB;
- zclk->hw.init = &init;
- zclk->mask = GENMASK(offset + 4, offset);
- zclk->fixed_div = div; /* PLLVCO x 1/div x SYS-CPU divider */
-
- clk = clk_register(NULL, &zclk->hw);
- if (IS_ERR(clk)) {
- kfree(zclk);
- return clk;
- }
-
- zclk->max_rate = clk_hw_get_rate(clk_hw_get_parent(&zclk->hw)) /
- zclk->fixed_div;
- return clk;
-}
-
-/*
- * RPC Clocks
- */
-#define CPG_RPCCKCR 0x874
-
-static const struct clk_div_table cpg_rpcsrc_div_table[] = {
- { 0, 4 }, { 1, 6 }, { 2, 5 }, { 3, 6 }, { 0, 0 },
-};
-
-static struct clk * __init rcar_r8a779a0_cpg_clk_register(struct device *dev,
- const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers)
-{
- const struct clk *parent;
- unsigned int mult = 1;
- unsigned int div = 1;
- u32 value;
-
- parent = clks[core->parent & 0xffff]; /* some types use high bits */
- if (IS_ERR(parent))
- return ERR_CAST(parent);
-
- switch (core->type) {
- case CLK_TYPE_R8A779A0_MAIN:
- div = cpg_pll_config->extal_div;
- break;
-
- case CLK_TYPE_R8A779A0_PLL1:
- mult = cpg_pll_config->pll1_mult;
- div = cpg_pll_config->pll1_div;
- break;
-
- case CLK_TYPE_R8A779A0_PLL2X_3X:
- value = readl(base + core->offset);
- mult = (((value >> 24) & 0x7f) + 1) * 2;
- break;
-
- case CLK_TYPE_R8A779A0_PLL5:
- mult = cpg_pll_config->pll5_mult;
- div = cpg_pll_config->pll5_div;
- break;
-
- case CLK_TYPE_R8A779A0_Z:
- return cpg_z_clk_register(core->name, __clk_get_name(parent),
- base, core->div, core->offset);
-
- case CLK_TYPE_R8A779A0_SD:
- return cpg_sd_clk_register(core->name, base, core->offset,
- __clk_get_name(parent), notifiers,
- false);
- break;
-
- case CLK_TYPE_R8A779A0_MDSEL:
- /*
- * Clock selectable between two parents and two fixed dividers
- * using a mode pin
- */
- if (cpg_mode & BIT(core->offset)) {
- div = core->div & 0xffff;
- } else {
- parent = clks[core->parent >> 16];
- if (IS_ERR(parent))
- return ERR_CAST(parent);
- div = core->div >> 16;
- }
- mult = 1;
- break;
-
- case CLK_TYPE_R8A779A0_OSC:
- /*
- * Clock combining OSC EXTAL predivider and a fixed divider
- */
- div = cpg_pll_config->osc_prediv * core->div;
- break;
-
- case CLK_TYPE_R8A779A0_RPCSRC:
- return clk_register_divider_table(NULL, core->name,
- __clk_get_name(parent), 0,
- base + CPG_RPCCKCR, 3, 2, 0,
- cpg_rpcsrc_div_table,
- &cpg_lock);
-
- case CLK_TYPE_R8A779A0_RPC:
- return cpg_rpc_clk_register(core->name, base + CPG_RPCCKCR,
- __clk_get_name(parent), notifiers);
-
- case CLK_TYPE_R8A779A0_RPCD2:
- return cpg_rpcd2_clk_register(core->name, base + CPG_RPCCKCR,
- __clk_get_name(parent));
-
- default:
- return ERR_PTR(-EINVAL);
- }
-
- return clk_register_fixed_factor(NULL, core->name,
- __clk_get_name(parent), 0, mult, div);
-}
-
static const unsigned int r8a779a0_crit_mod_clks[] __initconst = {
MOD_CLK_ID(907), /* RWDT */
};
@@ -539,17 +248,19 @@ static const unsigned int r8a779a0_crit_mod_clks[] __initconst = {
*/
#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
(((md) & BIT(13)) >> 13))
-
-static const struct rcar_r8a779a0_cpg_pll_config cpg_pll_configs[4] = {
- /* EXTAL div PLL1 mult/div PLL5 mult/div OSC prediv */
- { 1, 128, 1, 192, 1, 16, },
- { 1, 106, 1, 160, 1, 19, },
- { 0, 0, 0, 0, 0, 0, },
- { 2, 128, 1, 192, 1, 32, },
+static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = {
+ /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */
+ { 1, 128, 1, 0, 0, 0, 0, 192, 1, 0, 0, 16, },
+ { 1, 106, 1, 0, 0, 0, 0, 160, 1, 0, 0, 19, },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ { 2, 128, 1, 0, 0, 0, 0, 192, 1, 0, 0, 32, },
};
+
static int __init r8a779a0_cpg_mssr_init(struct device *dev)
{
+ const struct rcar_gen4_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
int error;
error = rcar_rst_read_mode_pins(&cpg_mode);
@@ -557,10 +268,8 @@ static int __init r8a779a0_cpg_mssr_init(struct device *dev)
return error;
cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
- cpg_clk_extalr = CLK_EXTALR;
- spin_lock_init(&cpg_lock);
- return 0;
+ return rcar_gen4_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
}
const struct cpg_mssr_info r8a779a0_cpg_mssr_info __initconst = {
@@ -581,7 +290,7 @@ const struct cpg_mssr_info r8a779a0_cpg_mssr_info __initconst = {
/* Callbacks */
.init = r8a779a0_cpg_mssr_init,
- .cpg_clk_register = rcar_r8a779a0_cpg_clk_register,
+ .cpg_clk_register = rcar_gen4_cpg_clk_register,
- .reg_layout = CLK_REG_LAYOUT_RCAR_V3U,
+ .reg_layout = CLK_REG_LAYOUT_RCAR_GEN4,
};
diff --git a/drivers/clk/renesas/r8a779f0-cpg-mssr.c b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
new file mode 100644
index 000000000000..e6ec02c2c2a8
--- /dev/null
+++ b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r8a779f0 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ *
+ * Based on r8a779a0-cpg-mssr.c
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a779f0-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen4-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A779F0_CLK_R,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_EXTALR,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL1,
+ CLK_PLL2,
+ CLK_PLL3,
+ CLK_PLL5,
+ CLK_PLL6,
+ CLK_PLL1_DIV2,
+ CLK_PLL2_DIV2,
+ CLK_PLL3_DIV2,
+ CLK_PLL5_DIV2,
+ CLK_PLL5_DIV4,
+ CLK_PLL6_DIV2,
+ CLK_S0,
+ CLK_SDSRC,
+ CLK_RPCSRC,
+ CLK_OCO,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("extalr", CLK_EXTALR),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN4_PLL1, CLK_MAIN),
+ DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN4_PLL2, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN4_PLL3, CLK_MAIN),
+ DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN),
+ DEF_BASE(".pll6", CLK_PLL6, CLK_TYPE_GEN4_PLL6, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+ DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 2, 1),
+ DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 2, 1),
+ DEF_FIXED(".pll5_div2", CLK_PLL5_DIV2, CLK_PLL5, 2, 1),
+ DEF_FIXED(".pll5_div4", CLK_PLL5_DIV4, CLK_PLL5_DIV2, 2, 1),
+ DEF_FIXED(".pll6_div2", CLK_PLL6_DIV2, CLK_PLL6, 2, 1),
+ DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1),
+ DEF_BASE(".sdsrc", CLK_SDSRC, CLK_TYPE_GEN4_SDSRC, CLK_PLL5),
+ DEF_RATE(".oco", CLK_OCO, 32768),
+
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN4_RPCSRC, CLK_PLL5),
+ DEF_BASE(".rpc", R8A779F0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A779F0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, R8A779F0_CLK_RPC),
+
+ /* Core Clock Outputs */
+ DEF_FIXED("s0d2", R8A779F0_CLK_S0D2, CLK_S0, 2, 1),
+ DEF_FIXED("s0d3", R8A779F0_CLK_S0D3, CLK_S0, 3, 1),
+ DEF_FIXED("s0d4", R8A779F0_CLK_S0D4, CLK_S0, 4, 1),
+ DEF_FIXED("cl16m", R8A779F0_CLK_CL16M, CLK_S0, 48, 1),
+ DEF_FIXED("s0d2_mm", R8A779F0_CLK_S0D2_MM, CLK_S0, 2, 1),
+ DEF_FIXED("s0d3_mm", R8A779F0_CLK_S0D3_MM, CLK_S0, 3, 1),
+ DEF_FIXED("s0d4_mm", R8A779F0_CLK_S0D4_MM, CLK_S0, 4, 1),
+ DEF_FIXED("cl16m_mm", R8A779F0_CLK_CL16M_MM, CLK_S0, 48, 1),
+ DEF_FIXED("s0d2_rt", R8A779F0_CLK_S0D2_RT, CLK_S0, 2, 1),
+ DEF_FIXED("s0d3_rt", R8A779F0_CLK_S0D3_RT, CLK_S0, 3, 1),
+ DEF_FIXED("s0d4_rt", R8A779F0_CLK_S0D4_RT, CLK_S0, 4, 1),
+ DEF_FIXED("s0d6_rt", R8A779F0_CLK_S0D6_RT, CLK_S0, 6, 1),
+ DEF_FIXED("cl16m_rt", R8A779F0_CLK_CL16M_RT, CLK_S0, 48, 1),
+ DEF_FIXED("s0d3_per", R8A779F0_CLK_S0D3_PER, CLK_S0, 3, 1),
+ DEF_FIXED("s0d6_per", R8A779F0_CLK_S0D6_PER, CLK_S0, 6, 1),
+ DEF_FIXED("s0d12_per", R8A779F0_CLK_S0D12_PER, CLK_S0, 12, 1),
+ DEF_FIXED("s0d24_per", R8A779F0_CLK_S0D24_PER, CLK_S0, 24, 1),
+ DEF_FIXED("cl16m_per", R8A779F0_CLK_CL16M_PER, CLK_S0, 48, 1),
+ DEF_FIXED("s0d2_hsc", R8A779F0_CLK_S0D2_HSC, CLK_S0, 2, 1),
+ DEF_FIXED("s0d3_hsc", R8A779F0_CLK_S0D3_HSC, CLK_S0, 3, 1),
+ DEF_FIXED("s0d4_hsc", R8A779F0_CLK_S0D4_HSC, CLK_S0, 4, 1),
+ DEF_FIXED("s0d6_hsc", R8A779F0_CLK_S0D6_HSC, CLK_S0, 6, 1),
+ DEF_FIXED("s0d12_hsc", R8A779F0_CLK_S0D12_HSC, CLK_S0, 12, 1),
+ DEF_FIXED("cl16m_hsc", R8A779F0_CLK_CL16M_HSC, CLK_S0, 48, 1),
+ DEF_FIXED("s0d2_cc", R8A779F0_CLK_S0D2_CC, CLK_S0, 2, 1),
+ DEF_FIXED("rsw2", R8A779F0_CLK_RSW2, CLK_PLL5, 2, 1),
+ DEF_FIXED("cbfusa", R8A779F0_CLK_CBFUSA, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cpex", R8A779F0_CLK_CPEX, CLK_EXTAL, 2, 1),
+
+ DEF_GEN4_SD("sd0", R8A779F0_CLK_SD0, CLK_SDSRC, 0x870),
+ DEF_DIV6P1("mso", R8A779F0_CLK_MSO, CLK_PLL5_DIV4, 0x87c),
+
+ DEF_GEN4_OSC("osc", R8A779F0_CLK_OSC, CLK_EXTAL, 8),
+ DEF_GEN4_MDSEL("r", R8A779F0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1),
+};
+
+static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = {
+ DEF_MOD("scif0", 702, R8A779F0_CLK_S0D12_PER),
+ DEF_MOD("scif1", 703, R8A779F0_CLK_S0D12_PER),
+ DEF_MOD("scif3", 704, R8A779F0_CLK_S0D12_PER),
+ DEF_MOD("scif4", 705, R8A779F0_CLK_S0D12_PER),
+};
+
+/*
+ * CPG Clock Data
+ */
+/*
+ * MD EXTAL PLL1 PLL2 PLL3 PLL5 PLL6 OSC
+ * 14 13 (MHz)
+ * ----------------------------------------------------------------
+ * 0 0 16 / 1 x200 x150 x200 x200 x134 /15
+ * 0 1 20 / 1 x160 x120 x160 x160 x106 /19
+ * 1 0 Prohibited setting
+ * 1 1 40 / 2 x160 x120 x160 x160 x106 /38
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
+ (((md) & BIT(13)) >> 13))
+
+static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = {
+ /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */
+ { 1, 200, 1, 150, 1, 200, 1, 200, 1, 134, 1, 15, },
+ { 1, 160, 1, 120, 1, 160, 1, 160, 1, 106, 1, 19, },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ { 2, 160, 1, 120, 1, 160, 1, 160, 1, 106, 1, 38, },
+};
+
+static int __init r8a779f0_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen4_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+ if (!cpg_pll_config->extal_div) {
+ dev_err(dev, "Prohibited setting (cpg_mode=0x%x)\n", cpg_mode);
+ return -EINVAL;
+ }
+
+ return rcar_gen4_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a779f0_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a779f0_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a779f0_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a779f0_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a779f0_mod_clks),
+ .num_hw_mod_clks = 28 * 32,
+
+ /* Callbacks */
+ .init = r8a779f0_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen4_cpg_clk_register,
+
+ .reg_layout = CLK_REG_LAYOUT_RCAR_GEN4,
+};
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index 47c16265fca9..79042bf46fe8 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -26,15 +26,15 @@ enum clk_ids {
CLK_PLL1,
CLK_PLL2,
CLK_PLL2_DIV2,
- CLK_PLL2_DIV16,
- CLK_PLL2_DIV20,
+ CLK_PLL2_DIV2_8,
+ CLK_PLL2_DIV2_10,
CLK_PLL3,
CLK_PLL3_400,
CLK_PLL3_533,
CLK_PLL3_DIV2,
+ CLK_PLL3_DIV2_2,
CLK_PLL3_DIV2_4,
CLK_PLL3_DIV2_4_2,
- CLK_PLL3_DIV4,
CLK_SEL_PLL3_3,
CLK_DIV_PLL3_C,
CLK_PLL4,
@@ -50,12 +50,21 @@ enum clk_ids {
CLK_PLL2_SDHI_266,
CLK_SD0_DIV4,
CLK_SD1_DIV4,
+ CLK_SEL_GPU2,
/* Module Clocks */
MOD_CLK_BASE,
};
/* Divider tables */
+static const struct clk_div_table dtable_1_8[] = {
+ {0, 1},
+ {1, 2},
+ {2, 4},
+ {3, 8},
+ {0, 0},
+};
+
static const struct clk_div_table dtable_1_32[] = {
{0, 1},
{1, 2},
@@ -69,6 +78,7 @@ static const struct clk_div_table dtable_1_32[] = {
static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" };
static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" };
static const char * const sel_shdi[] = { ".clk_533", ".clk_400", ".clk_266" };
+static const char * const sel_gpu2[] = { ".pll6", ".pll3_div2_2" };
static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
/* External Clock Inputs */
@@ -94,13 +104,13 @@ static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
DEF_FIXED(".clk_400", CLK_PLL2_SDHI_400, CLK_PLL2_800, 1, 2),
DEF_FIXED(".clk_266", CLK_PLL2_SDHI_266, CLK_PLL2_SDHI_533, 1, 2),
- DEF_FIXED(".pll2_div16", CLK_PLL2_DIV16, CLK_PLL2, 1, 16),
- DEF_FIXED(".pll2_div20", CLK_PLL2_DIV20, CLK_PLL2, 1, 20),
+ DEF_FIXED(".pll2_div2_8", CLK_PLL2_DIV2_8, CLK_PLL2_DIV2, 1, 8),
+ DEF_FIXED(".pll2_div2_10", CLK_PLL2_DIV2_10, CLK_PLL2_DIV2, 1, 10),
DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 1, 2),
+ DEF_FIXED(".pll3_div2_2", CLK_PLL3_DIV2_2, CLK_PLL3_DIV2, 1, 2),
DEF_FIXED(".pll3_div2_4", CLK_PLL3_DIV2_4, CLK_PLL3_DIV2, 1, 4),
DEF_FIXED(".pll3_div2_4_2", CLK_PLL3_DIV2_4_2, CLK_PLL3_DIV2_4, 1, 2),
- DEF_FIXED(".pll3_div4", CLK_PLL3_DIV4, CLK_PLL3, 1, 4),
DEF_MUX(".sel_pll3_3", CLK_SEL_PLL3_3, SEL_PLL3_3,
sel_pll3_3, ARRAY_SIZE(sel_pll3_3), 0, CLK_MUX_READ_ONLY),
DEF_DIV("divpl3c", CLK_DIV_PLL3_C, CLK_SEL_PLL3_3,
@@ -108,13 +118,16 @@ static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
DEF_FIXED(".pll5_250", CLK_PLL5_250, CLK_PLL5_FOUT3, 1, 2),
DEF_FIXED(".pll6_250", CLK_PLL6_250, CLK_PLL6, 1, 2),
+ DEF_MUX(".sel_gpu2", CLK_SEL_GPU2, SEL_GPU2,
+ sel_gpu2, ARRAY_SIZE(sel_gpu2), 0, CLK_MUX_READ_ONLY),
/* Core output clk */
- DEF_FIXED("I", R9A07G044_CLK_I, CLK_PLL1, 1, 1),
- DEF_DIV("P0", R9A07G044_CLK_P0, CLK_PLL2_DIV16, DIVPL2A,
+ DEF_DIV("I", R9A07G044_CLK_I, CLK_PLL1, DIVPL1A, dtable_1_8,
+ CLK_DIVIDER_HIWORD_MASK),
+ DEF_DIV("P0", R9A07G044_CLK_P0, CLK_PLL2_DIV2_8, DIVPL2A,
dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
DEF_FIXED("P0_DIV2", R9A07G044_CLK_P0_DIV2, R9A07G044_CLK_P0, 1, 2),
- DEF_FIXED("TSU", R9A07G044_CLK_TSU, CLK_PLL2_DIV20, 1, 1),
+ DEF_FIXED("TSU", R9A07G044_CLK_TSU, CLK_PLL2_DIV2_10, 1, 1),
DEF_DIV("P1", R9A07G044_CLK_P1, CLK_PLL3_DIV2_4,
DIVPL3B, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
DEF_FIXED("P1_DIV2", CLK_P1_DIV2, R9A07G044_CLK_P1, 1, 2),
@@ -132,6 +145,8 @@ static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
sel_shdi, ARRAY_SIZE(sel_shdi)),
DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G044_CLK_SD0, 1, 4),
DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G044_CLK_SD1, 1, 4),
+ DEF_DIV("G", R9A07G044_CLK_G, CLK_SEL_GPU2, DIVGPU, dtable_1_8,
+ CLK_DIVIDER_HIWORD_MASK),
};
static struct rzg2l_mod_clk r9a07g044_mod_clks[] = {
@@ -145,6 +160,24 @@ static struct rzg2l_mod_clk r9a07g044_mod_clks[] = {
0x52c, 0),
DEF_MOD("dmac_pclk", R9A07G044_DMAC_PCLK, CLK_P1_DIV2,
0x52c, 1),
+ DEF_MOD("ostm0_pclk", R9A07G044_OSTM0_PCLK, R9A07G044_CLK_P0,
+ 0x534, 0),
+ DEF_MOD("ostm1_clk", R9A07G044_OSTM1_PCLK, R9A07G044_CLK_P0,
+ 0x534, 1),
+ DEF_MOD("ostm2_pclk", R9A07G044_OSTM2_PCLK, R9A07G044_CLK_P0,
+ 0x534, 2),
+ DEF_MOD("wdt0_pclk", R9A07G044_WDT0_PCLK, R9A07G044_CLK_P0,
+ 0x548, 0),
+ DEF_MOD("wdt0_clk", R9A07G044_WDT0_CLK, R9A07G044_OSCCLK,
+ 0x548, 1),
+ DEF_MOD("wdt1_pclk", R9A07G044_WDT1_PCLK, R9A07G044_CLK_P0,
+ 0x548, 2),
+ DEF_MOD("wdt1_clk", R9A07G044_WDT1_CLK, R9A07G044_OSCCLK,
+ 0x548, 3),
+ DEF_MOD("wdt2_pclk", R9A07G044_WDT2_PCLK, R9A07G044_CLK_P0,
+ 0x548, 4),
+ DEF_MOD("wdt2_clk", R9A07G044_WDT2_CLK, R9A07G044_OSCCLK,
+ 0x548, 5),
DEF_MOD("spi_clk2", R9A07G044_SPI_CLK2, R9A07G044_CLK_SPI1,
0x550, 0),
DEF_MOD("spi_clk", R9A07G044_SPI_CLK, R9A07G044_CLK_SPI0,
@@ -165,6 +198,12 @@ static struct rzg2l_mod_clk r9a07g044_mod_clks[] = {
0x554, 6),
DEF_MOD("sdhi1_aclk", R9A07G044_SDHI1_ACLK, R9A07G044_CLK_P1,
0x554, 7),
+ DEF_MOD("gpu_clk", R9A07G044_GPU_CLK, R9A07G044_CLK_G,
+ 0x558, 0),
+ DEF_MOD("gpu_axi_clk", R9A07G044_GPU_AXI_CLK, R9A07G044_CLK_P1,
+ 0x558, 1),
+ DEF_MOD("gpu_ace_clk", R9A07G044_GPU_ACE_CLK, R9A07G044_CLK_P1,
+ 0x558, 2),
DEF_MOD("ssi0_pclk", R9A07G044_SSI0_PCLK2, R9A07G044_CLK_P0,
0x570, 0),
DEF_MOD("ssi0_sfr", R9A07G044_SSI0_PCLK_SFR, R9A07G044_CLK_P0,
@@ -217,6 +256,14 @@ static struct rzg2l_mod_clk r9a07g044_mod_clks[] = {
0x584, 4),
DEF_MOD("sci0", R9A07G044_SCI0_CLKP, R9A07G044_CLK_P0,
0x588, 0),
+ DEF_MOD("sci1", R9A07G044_SCI1_CLKP, R9A07G044_CLK_P0,
+ 0x588, 1),
+ DEF_MOD("rspi0", R9A07G044_RSPI0_CLKB, R9A07G044_CLK_P0,
+ 0x590, 0),
+ DEF_MOD("rspi1", R9A07G044_RSPI1_CLKB, R9A07G044_CLK_P0,
+ 0x590, 1),
+ DEF_MOD("rspi2", R9A07G044_RSPI2_CLKB, R9A07G044_CLK_P0,
+ 0x590, 2),
DEF_MOD("canfd", R9A07G044_CANFD_PCLK, R9A07G044_CLK_P0,
0x594, 0),
DEF_MOD("gpio", R9A07G044_GPIO_HCLK, R9A07G044_OSCCLK,
@@ -225,6 +272,8 @@ static struct rzg2l_mod_clk r9a07g044_mod_clks[] = {
0x5a8, 0),
DEF_MOD("adc_pclk", R9A07G044_ADC_PCLK, R9A07G044_CLK_P0,
0x5a8, 1),
+ DEF_MOD("tsu_pclk", R9A07G044_TSU_PCLK, R9A07G044_CLK_TSU,
+ 0x5ac, 0),
};
static struct rzg2l_reset r9a07g044_resets[] = {
@@ -233,9 +282,18 @@ static struct rzg2l_reset r9a07g044_resets[] = {
DEF_RST(R9A07G044_IA55_RESETN, 0x818, 0),
DEF_RST(R9A07G044_DMAC_ARESETN, 0x82c, 0),
DEF_RST(R9A07G044_DMAC_RST_ASYNC, 0x82c, 1),
+ DEF_RST(R9A07G044_OSTM0_PRESETZ, 0x834, 0),
+ DEF_RST(R9A07G044_OSTM1_PRESETZ, 0x834, 1),
+ DEF_RST(R9A07G044_OSTM2_PRESETZ, 0x834, 2),
+ DEF_RST(R9A07G044_WDT0_PRESETN, 0x848, 0),
+ DEF_RST(R9A07G044_WDT1_PRESETN, 0x848, 1),
+ DEF_RST(R9A07G044_WDT2_PRESETN, 0x848, 2),
DEF_RST(R9A07G044_SPI_RST, 0x850, 0),
DEF_RST(R9A07G044_SDHI0_IXRST, 0x854, 0),
DEF_RST(R9A07G044_SDHI1_IXRST, 0x854, 1),
+ DEF_RST(R9A07G044_GPU_RESETN, 0x858, 0),
+ DEF_RST(R9A07G044_GPU_AXI_RESETN, 0x858, 1),
+ DEF_RST(R9A07G044_GPU_ACE_RESETN, 0x858, 2),
DEF_RST(R9A07G044_SSI0_RST_M2_REG, 0x870, 0),
DEF_RST(R9A07G044_SSI1_RST_M2_REG, 0x870, 1),
DEF_RST(R9A07G044_SSI2_RST_M2_REG, 0x870, 2),
@@ -256,6 +314,10 @@ static struct rzg2l_reset r9a07g044_resets[] = {
DEF_RST(R9A07G044_SCIF3_RST_SYSTEM_N, 0x884, 3),
DEF_RST(R9A07G044_SCIF4_RST_SYSTEM_N, 0x884, 4),
DEF_RST(R9A07G044_SCI0_RST, 0x888, 0),
+ DEF_RST(R9A07G044_SCI1_RST, 0x888, 1),
+ DEF_RST(R9A07G044_RSPI0_RST, 0x890, 0),
+ DEF_RST(R9A07G044_RSPI1_RST, 0x890, 1),
+ DEF_RST(R9A07G044_RSPI2_RST, 0x890, 2),
DEF_RST(R9A07G044_CANFD_RSTP_N, 0x894, 0),
DEF_RST(R9A07G044_CANFD_RSTC_N, 0x894, 1),
DEF_RST(R9A07G044_GPIO_RSTN, 0x898, 0),
@@ -263,6 +325,7 @@ static struct rzg2l_reset r9a07g044_resets[] = {
DEF_RST(R9A07G044_GPIO_SPARE_RESETN, 0x898, 2),
DEF_RST(R9A07G044_ADC_PRESETN, 0x8a8, 0),
DEF_RST(R9A07G044_ADC_ADRST_N, 0x8a8, 1),
+ DEF_RST(R9A07G044_TSU_PRESETN, 0x8ac, 0),
};
static const unsigned int r9a07g044_crit_mod_clks[] __initconst = {
diff --git a/drivers/clk/renesas/rcar-cpg-lib.c b/drivers/clk/renesas/rcar-cpg-lib.c
index e93f0011eb07..e2e0447de190 100644
--- a/drivers/clk/renesas/rcar-cpg-lib.c
+++ b/drivers/clk/renesas/rcar-cpg-lib.c
@@ -65,206 +65,49 @@ void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
/*
* SDn Clock
*/
-#define CPG_SD_STP_HCK BIT(9)
-#define CPG_SD_STP_CK BIT(8)
-
-#define CPG_SD_STP_MASK (CPG_SD_STP_HCK | CPG_SD_STP_CK)
-#define CPG_SD_FC_MASK (0x7 << 2 | 0x3 << 0)
-
-#define CPG_SD_DIV_TABLE_DATA(stp_hck, sd_srcfc, sd_fc, sd_div) \
-{ \
- .val = ((stp_hck) ? CPG_SD_STP_HCK : 0) | \
- ((sd_srcfc) << 2) | \
- ((sd_fc) << 0), \
- .div = (sd_div), \
-}
-struct sd_div_table {
- u32 val;
- unsigned int div;
-};
+#define SDnSRCFC_SHIFT 2
+#define STPnHCK BIT(9 - SDnSRCFC_SHIFT)
-struct sd_clock {
- struct clk_hw hw;
- const struct sd_div_table *div_table;
- struct cpg_simple_notifier csn;
- unsigned int div_num;
- unsigned int cur_div_idx;
-};
-
-/* SDn divider
- * sd_srcfc sd_fc div
- * stp_hck (div) (div) = sd_srcfc x sd_fc
- *---------------------------------------------------------
- * 0 0 (1) 1 (4) 4 : SDR104 / HS200 / HS400 (8 TAP)
- * 0 1 (2) 1 (4) 8 : SDR50
- * 1 2 (4) 1 (4) 16 : HS / SDR25
- * 1 3 (8) 1 (4) 32 : NS / SDR12
- * 1 4 (16) 1 (4) 64
- * 0 0 (1) 0 (2) 2
- * 0 1 (2) 0 (2) 4 : SDR104 / HS200 / HS400 (4 TAP)
- * 1 2 (4) 0 (2) 8
- * 1 3 (8) 0 (2) 16
- * 1 4 (16) 0 (2) 32
- *
- * NOTE: There is a quirk option to ignore the first row of the dividers
- * table when searching for suitable settings. This is because HS400 on
- * early ES versions of H3 and M3-W requires a specific setting to work.
- */
-static const struct sd_div_table cpg_sd_div_table[] = {
-/* CPG_SD_DIV_TABLE_DATA(stp_hck, sd_srcfc, sd_fc, sd_div) */
- CPG_SD_DIV_TABLE_DATA(0, 0, 1, 4),
- CPG_SD_DIV_TABLE_DATA(0, 1, 1, 8),
- CPG_SD_DIV_TABLE_DATA(1, 2, 1, 16),
- CPG_SD_DIV_TABLE_DATA(1, 3, 1, 32),
- CPG_SD_DIV_TABLE_DATA(1, 4, 1, 64),
- CPG_SD_DIV_TABLE_DATA(0, 0, 0, 2),
- CPG_SD_DIV_TABLE_DATA(0, 1, 0, 4),
- CPG_SD_DIV_TABLE_DATA(1, 2, 0, 8),
- CPG_SD_DIV_TABLE_DATA(1, 3, 0, 16),
- CPG_SD_DIV_TABLE_DATA(1, 4, 0, 32),
+static const struct clk_div_table cpg_sdh_div_table[] = {
+ { 0, 1 }, { 1, 2 }, { STPnHCK | 2, 4 }, { STPnHCK | 3, 8 },
+ { STPnHCK | 4, 16 }, { 0, 0 },
};
-#define to_sd_clock(_hw) container_of(_hw, struct sd_clock, hw)
-
-static int cpg_sd_clock_enable(struct clk_hw *hw)
-{
- struct sd_clock *clock = to_sd_clock(hw);
-
- cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK,
- clock->div_table[clock->cur_div_idx].val &
- CPG_SD_STP_MASK);
-
- return 0;
-}
-
-static void cpg_sd_clock_disable(struct clk_hw *hw)
-{
- struct sd_clock *clock = to_sd_clock(hw);
-
- cpg_reg_modify(clock->csn.reg, 0, CPG_SD_STP_MASK);
-}
-
-static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
+struct clk * __init cpg_sdh_clk_register(const char *name,
+ void __iomem *sdnckcr, const char *parent_name,
+ struct raw_notifier_head *notifiers)
{
- struct sd_clock *clock = to_sd_clock(hw);
-
- return !(readl(clock->csn.reg) & CPG_SD_STP_MASK);
-}
+ struct cpg_simple_notifier *csn;
+ struct clk *clk;
-static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct sd_clock *clock = to_sd_clock(hw);
+ csn = kzalloc(sizeof(*csn), GFP_KERNEL);
+ if (!csn)
+ return ERR_PTR(-ENOMEM);
- return DIV_ROUND_CLOSEST(parent_rate,
- clock->div_table[clock->cur_div_idx].div);
-}
+ csn->reg = sdnckcr;
-static int cpg_sd_clock_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- unsigned long best_rate = ULONG_MAX, diff_min = ULONG_MAX;
- struct sd_clock *clock = to_sd_clock(hw);
- unsigned long calc_rate, diff;
- unsigned int i;
-
- for (i = 0; i < clock->div_num; i++) {
- calc_rate = DIV_ROUND_CLOSEST(req->best_parent_rate,
- clock->div_table[i].div);
- if (calc_rate < req->min_rate || calc_rate > req->max_rate)
- continue;
-
- diff = calc_rate > req->rate ? calc_rate - req->rate
- : req->rate - calc_rate;
- if (diff < diff_min) {
- best_rate = calc_rate;
- diff_min = diff;
- }
+ clk = clk_register_divider_table(NULL, name, parent_name, 0, sdnckcr,
+ SDnSRCFC_SHIFT, 8, 0, cpg_sdh_div_table,
+ &cpg_lock);
+ if (IS_ERR(clk)) {
+ kfree(csn);
+ return clk;
}
- if (best_rate == ULONG_MAX)
- return -EINVAL;
-
- req->rate = best_rate;
- return 0;
-}
-
-static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct sd_clock *clock = to_sd_clock(hw);
- unsigned int i;
-
- for (i = 0; i < clock->div_num; i++)
- if (rate == DIV_ROUND_CLOSEST(parent_rate,
- clock->div_table[i].div))
- break;
-
- if (i >= clock->div_num)
- return -EINVAL;
-
- clock->cur_div_idx = i;
-
- cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK | CPG_SD_FC_MASK,
- clock->div_table[i].val &
- (CPG_SD_STP_MASK | CPG_SD_FC_MASK));
-
- return 0;
+ cpg_simple_notifier_register(notifiers, csn);
+ return clk;
}
-static const struct clk_ops cpg_sd_clock_ops = {
- .enable = cpg_sd_clock_enable,
- .disable = cpg_sd_clock_disable,
- .is_enabled = cpg_sd_clock_is_enabled,
- .recalc_rate = cpg_sd_clock_recalc_rate,
- .determine_rate = cpg_sd_clock_determine_rate,
- .set_rate = cpg_sd_clock_set_rate,
+static const struct clk_div_table cpg_sd_div_table[] = {
+ { 0, 2 }, { 1, 4 }, { 0, 0 },
};
struct clk * __init cpg_sd_clk_register(const char *name,
- void __iomem *base, unsigned int offset, const char *parent_name,
- struct raw_notifier_head *notifiers, bool skip_first)
+ void __iomem *sdnckcr, const char *parent_name)
{
- struct clk_init_data init = {};
- struct sd_clock *clock;
- struct clk *clk;
- u32 val;
-
- clock = kzalloc(sizeof(*clock), GFP_KERNEL);
- if (!clock)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &cpg_sd_clock_ops;
- init.flags = CLK_SET_RATE_PARENT;
- init.parent_names = &parent_name;
- init.num_parents = 1;
-
- clock->csn.reg = base + offset;
- clock->hw.init = &init;
- clock->div_table = cpg_sd_div_table;
- clock->div_num = ARRAY_SIZE(cpg_sd_div_table);
-
- if (skip_first) {
- clock->div_table++;
- clock->div_num--;
- }
-
- val = readl(clock->csn.reg) & ~CPG_SD_FC_MASK;
- val |= CPG_SD_STP_MASK | (clock->div_table[0].val & CPG_SD_FC_MASK);
- writel(val, clock->csn.reg);
-
- clk = clk_register(NULL, &clock->hw);
- if (IS_ERR(clk))
- goto free_clock;
-
- cpg_simple_notifier_register(notifiers, &clock->csn);
- return clk;
-
-free_clock:
- kfree(clock);
- return clk;
+ return clk_register_divider_table(NULL, name, parent_name, 0, sdnckcr,
+ 0, 2, 0, cpg_sd_div_table, &cpg_lock);
}
struct rpc_clock {
diff --git a/drivers/clk/renesas/rcar-cpg-lib.h b/drivers/clk/renesas/rcar-cpg-lib.h
index 35c0217c2f8b..94627df1c94c 100644
--- a/drivers/clk/renesas/rcar-cpg-lib.h
+++ b/drivers/clk/renesas/rcar-cpg-lib.h
@@ -26,9 +26,12 @@ void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
void cpg_reg_modify(void __iomem *reg, u32 clear, u32 set);
+struct clk * __init cpg_sdh_clk_register(const char *name,
+ void __iomem *sdnckcr, const char *parent_name,
+ struct raw_notifier_head *notifiers);
+
struct clk * __init cpg_sd_clk_register(const char *name,
- void __iomem *base, unsigned int offset, const char *parent_name,
- struct raw_notifier_head *notifiers, bool skip_first);
+ void __iomem *sdnckcr, const char *parent_name);
struct clk * __init cpg_rpc_clk_register(const char *name,
void __iomem *rpcckcr, const char *parent_name,
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 741f6e74bbcf..e668f23c75e7 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -312,29 +312,20 @@ static u32 cpg_quirks __initdata;
#define PLL_ERRATA BIT(0) /* Missing PLL0/2/4 post-divider */
#define RCKCR_CKSEL BIT(1) /* Manual RCLK parent selection */
-#define SD_SKIP_FIRST BIT(2) /* Skip first clock in SD table */
static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
{
.soc_id = "r8a7795", .revision = "ES1.0",
- .data = (void *)(PLL_ERRATA | RCKCR_CKSEL | SD_SKIP_FIRST),
+ .data = (void *)(PLL_ERRATA | RCKCR_CKSEL),
},
{
.soc_id = "r8a7795", .revision = "ES1.*",
- .data = (void *)(RCKCR_CKSEL | SD_SKIP_FIRST),
- },
- {
- .soc_id = "r8a7795", .revision = "ES2.0",
- .data = (void *)SD_SKIP_FIRST,
+ .data = (void *)(RCKCR_CKSEL),
},
{
.soc_id = "r8a7796", .revision = "ES1.0",
- .data = (void *)(RCKCR_CKSEL | SD_SKIP_FIRST),
- },
- {
- .soc_id = "r8a7796", .revision = "ES1.1",
- .data = (void *)SD_SKIP_FIRST,
+ .data = (void *)(RCKCR_CKSEL),
},
{ /* sentinel */ }
};
@@ -401,10 +392,13 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
mult *= 2;
break;
+ case CLK_TYPE_GEN3_SDH:
+ return cpg_sdh_clk_register(core->name, base + core->offset,
+ __clk_get_name(parent), notifiers);
+
case CLK_TYPE_GEN3_SD:
- return cpg_sd_clk_register(core->name, base, core->offset,
- __clk_get_name(parent), notifiers,
- cpg_quirks & SD_SKIP_FIRST);
+ return cpg_sd_clk_register(core->name, base + core->offset,
+ __clk_get_name(parent));
case CLK_TYPE_GEN3_R:
if (cpg_quirks & RCKCR_CKSEL) {
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
index 3d949c4a3244..2bc0afadf604 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.h
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -17,6 +17,7 @@ enum rcar_gen3_clk_types {
CLK_TYPE_GEN3_PLL2,
CLK_TYPE_GEN3_PLL3,
CLK_TYPE_GEN3_PLL4,
+ CLK_TYPE_GEN3_SDH,
CLK_TYPE_GEN3_SD,
CLK_TYPE_GEN3_R,
CLK_TYPE_GEN3_MDSEL, /* Select parent/divider using mode pin */
@@ -32,6 +33,9 @@ enum rcar_gen3_clk_types {
CLK_TYPE_GEN3_SOC_BASE,
};
+#define DEF_GEN3_SDH(_name, _id, _parent, _offset) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN3_SDH, _parent, .offset = _offset)
+
#define DEF_GEN3_SD(_name, _id, _parent, _offset) \
DEF_BASE(_name, _id, CLK_TYPE_GEN3_SD, _parent, .offset = _offset)
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.c b/drivers/clk/renesas/rcar-gen4-cpg.c
new file mode 100644
index 000000000000..54ebf4b3c128
--- /dev/null
+++ b/drivers/clk/renesas/rcar-gen4-cpg.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R-Car Gen4 Clock Pulse Generator
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ *
+ * Based on rcar-gen3-cpg.c
+ *
+ * Copyright (C) 2015-2018 Glider bvba
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen4-cpg.h"
+#include "rcar-cpg-lib.h"
+
+static const struct rcar_gen4_cpg_pll_config *cpg_pll_config __initconst;
+static unsigned int cpg_clk_extalr __initdata;
+static u32 cpg_mode __initdata;
+
+/*
+ * Z0 Clock & Z1 Clock
+ */
+#define CPG_FRQCRB 0x00000804
+#define CPG_FRQCRB_KICK BIT(31)
+#define CPG_FRQCRC 0x00000808
+
+struct cpg_z_clk {
+ struct clk_hw hw;
+ void __iomem *reg;
+ void __iomem *kick_reg;
+ unsigned long max_rate; /* Maximum rate for normal mode */
+ unsigned int fixed_div;
+ u32 mask;
+};
+
+#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
+
+static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int mult;
+ u32 val;
+
+ val = readl(zclk->reg) & zclk->mask;
+ mult = 32 - (val >> __ffs(zclk->mask));
+
+ return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
+ 32 * zclk->fixed_div);
+}
+
+static int cpg_z_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int min_mult, max_mult, mult;
+ unsigned long rate, prate;
+
+ rate = min(req->rate, req->max_rate);
+ if (rate <= zclk->max_rate) {
+ /* Set parent rate to initial value for normal modes */
+ prate = zclk->max_rate;
+ } else {
+ /* Set increased parent rate for boost modes */
+ prate = rate;
+ }
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
+ prate * zclk->fixed_div);
+
+ prate = req->best_parent_rate / zclk->fixed_div;
+ min_mult = max(div64_ul(req->min_rate * 32ULL, prate), 1ULL);
+ max_mult = min(div64_ul(req->max_rate * 32ULL, prate), 32ULL);
+ if (max_mult < min_mult)
+ return -EINVAL;
+
+ mult = DIV_ROUND_CLOSEST_ULL(rate * 32ULL, prate);
+ mult = clamp(mult, min_mult, max_mult);
+
+ req->rate = DIV_ROUND_CLOSEST_ULL((u64)prate * mult, 32);
+ return 0;
+}
+
+static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int mult;
+ unsigned int i;
+
+ mult = DIV64_U64_ROUND_CLOSEST(rate * 32ULL * zclk->fixed_div,
+ parent_rate);
+ mult = clamp(mult, 1U, 32U);
+
+ if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
+ return -EBUSY;
+
+ cpg_reg_modify(zclk->reg, zclk->mask, (32 - mult) << __ffs(zclk->mask));
+
+ /*
+ * Set KICK bit in FRQCRB to update hardware setting and wait for
+ * clock change completion.
+ */
+ cpg_reg_modify(zclk->kick_reg, 0, CPG_FRQCRB_KICK);
+
+ /*
+ * Note: There is no HW information about the worst case latency.
+ *
+ * Using experimental measurements, it seems that no more than
+ * ~10 iterations are needed, independently of the CPU rate.
+ * Since this value might be dependent on external xtal rate, pll1
+ * rate or even the other emulation clocks rate, use 1000 as a
+ * "super" safe value.
+ */
+ for (i = 1000; i; i--) {
+ if (!(readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
+ return 0;
+
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+static const struct clk_ops cpg_z_clk_ops = {
+ .recalc_rate = cpg_z_clk_recalc_rate,
+ .determine_rate = cpg_z_clk_determine_rate,
+ .set_rate = cpg_z_clk_set_rate,
+};
+
+static struct clk * __init cpg_z_clk_register(const char *name,
+ const char *parent_name,
+ void __iomem *reg,
+ unsigned int div,
+ unsigned int offset)
+{
+ struct clk_init_data init = {};
+ struct cpg_z_clk *zclk;
+ struct clk *clk;
+
+ zclk = kzalloc(sizeof(*zclk), GFP_KERNEL);
+ if (!zclk)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &cpg_z_clk_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ zclk->reg = reg + CPG_FRQCRC;
+ zclk->kick_reg = reg + CPG_FRQCRB;
+ zclk->hw.init = &init;
+ zclk->mask = GENMASK(offset + 4, offset);
+ zclk->fixed_div = div; /* PLLVCO x 1/div x SYS-CPU divider */
+
+ clk = clk_register(NULL, &zclk->hw);
+ if (IS_ERR(clk)) {
+ kfree(zclk);
+ return clk;
+ }
+
+ zclk->max_rate = clk_hw_get_rate(clk_hw_get_parent(&zclk->hw)) /
+ zclk->fixed_div;
+ return clk;
+}
+
+/*
+ * RPC Clocks
+ */
+static const struct clk_div_table cpg_rpcsrc_div_table[] = {
+ { 0, 4 }, { 1, 6 }, { 2, 5 }, { 3, 6 }, { 0, 0 },
+};
+
+struct clk * __init rcar_gen4_cpg_clk_register(struct device *dev,
+ const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
+ struct clk **clks, void __iomem *base,
+ struct raw_notifier_head *notifiers)
+{
+ const struct clk *parent;
+ unsigned int mult = 1;
+ unsigned int div = 1;
+ u32 value;
+
+ parent = clks[core->parent & 0xffff]; /* some types use high bits */
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ switch (core->type) {
+ case CLK_TYPE_GEN4_MAIN:
+ div = cpg_pll_config->extal_div;
+ break;
+
+ case CLK_TYPE_GEN4_PLL1:
+ mult = cpg_pll_config->pll1_mult;
+ div = cpg_pll_config->pll1_div;
+ break;
+
+ case CLK_TYPE_GEN4_PLL2:
+ mult = cpg_pll_config->pll2_mult;
+ div = cpg_pll_config->pll2_div;
+ break;
+
+ case CLK_TYPE_GEN4_PLL3:
+ mult = cpg_pll_config->pll3_mult;
+ div = cpg_pll_config->pll3_div;
+ break;
+
+ case CLK_TYPE_GEN4_PLL5:
+ mult = cpg_pll_config->pll5_mult;
+ div = cpg_pll_config->pll5_div;
+ break;
+
+ case CLK_TYPE_GEN4_PLL6:
+ mult = cpg_pll_config->pll6_mult;
+ div = cpg_pll_config->pll6_div;
+ break;
+
+ case CLK_TYPE_GEN4_PLL2X_3X:
+ value = readl(base + core->offset);
+ mult = (((value >> 24) & 0x7f) + 1) * 2;
+ break;
+
+ case CLK_TYPE_GEN4_Z:
+ return cpg_z_clk_register(core->name, __clk_get_name(parent),
+ base, core->div, core->offset);
+
+ case CLK_TYPE_GEN4_SDSRC:
+ div = ((readl(base + SD0CKCR1) >> 29) & 0x03) + 4;
+ break;
+
+ case CLK_TYPE_GEN4_SDH:
+ return cpg_sdh_clk_register(core->name, base + core->offset,
+ __clk_get_name(parent), notifiers);
+
+ case CLK_TYPE_GEN4_SD:
+ return cpg_sd_clk_register(core->name, base + core->offset,
+ __clk_get_name(parent));
+
+ case CLK_TYPE_GEN4_MDSEL:
+ /*
+ * Clock selectable between two parents and two fixed dividers
+ * using a mode pin
+ */
+ if (cpg_mode & BIT(core->offset)) {
+ div = core->div & 0xffff;
+ } else {
+ parent = clks[core->parent >> 16];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+ div = core->div >> 16;
+ }
+ mult = 1;
+ break;
+
+ case CLK_TYPE_GEN4_OSC:
+ /*
+ * Clock combining OSC EXTAL predivider and a fixed divider
+ */
+ div = cpg_pll_config->osc_prediv * core->div;
+ break;
+
+ case CLK_TYPE_GEN4_RPCSRC:
+ return clk_register_divider_table(NULL, core->name,
+ __clk_get_name(parent), 0,
+ base + CPG_RPCCKCR, 3, 2, 0,
+ cpg_rpcsrc_div_table,
+ &cpg_lock);
+
+ case CLK_TYPE_GEN4_RPC:
+ return cpg_rpc_clk_register(core->name, base + CPG_RPCCKCR,
+ __clk_get_name(parent), notifiers);
+
+ case CLK_TYPE_GEN4_RPCD2:
+ return cpg_rpcd2_clk_register(core->name, base + CPG_RPCCKCR,
+ __clk_get_name(parent));
+
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ return clk_register_fixed_factor(NULL, core->name,
+ __clk_get_name(parent), 0, mult, div);
+}
+
+int __init rcar_gen4_cpg_init(const struct rcar_gen4_cpg_pll_config *config,
+ unsigned int clk_extalr, u32 mode)
+{
+ cpg_pll_config = config;
+ cpg_clk_extalr = clk_extalr;
+ cpg_mode = mode;
+
+ spin_lock_init(&cpg_lock);
+
+ return 0;
+}
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.h b/drivers/clk/renesas/rcar-gen4-cpg.h
new file mode 100644
index 000000000000..afc8c024d538
--- /dev/null
+++ b/drivers/clk/renesas/rcar-gen4-cpg.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * R-Car Gen4 Clock Pulse Generator
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ *
+ */
+
+#ifndef __CLK_RENESAS_RCAR_GEN4_CPG_H__
+#define __CLK_RENESAS_RCAR_GEN4_CPG_H__
+
+enum rcar_gen4_clk_types {
+ CLK_TYPE_GEN4_MAIN = CLK_TYPE_CUSTOM,
+ CLK_TYPE_GEN4_PLL1,
+ CLK_TYPE_GEN4_PLL2,
+ CLK_TYPE_GEN4_PLL2X_3X, /* r8a779a0 only */
+ CLK_TYPE_GEN4_PLL3,
+ CLK_TYPE_GEN4_PLL5,
+ CLK_TYPE_GEN4_PLL6,
+ CLK_TYPE_GEN4_SDSRC,
+ CLK_TYPE_GEN4_SDH,
+ CLK_TYPE_GEN4_SD,
+ CLK_TYPE_GEN4_MDSEL, /* Select parent/divider using mode pin */
+ CLK_TYPE_GEN4_Z,
+ CLK_TYPE_GEN4_OSC, /* OSC EXTAL predivider and fixed divider */
+ CLK_TYPE_GEN4_RPCSRC,
+ CLK_TYPE_GEN4_RPC,
+ CLK_TYPE_GEN4_RPCD2,
+
+ /* SoC specific definitions start here */
+ CLK_TYPE_GEN4_SOC_BASE,
+};
+
+#define DEF_GEN4_SDH(_name, _id, _parent, _offset) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN4_SDH, _parent, .offset = _offset)
+
+#define DEF_GEN4_SD(_name, _id, _parent, _offset) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN4_SD, _parent, .offset = _offset)
+
+#define DEF_GEN4_MDSEL(_name, _id, _md, _parent0, _div0, _parent1, _div1) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN4_MDSEL, \
+ (_parent0) << 16 | (_parent1), \
+ .div = (_div0) << 16 | (_div1), .offset = _md)
+
+#define DEF_GEN4_OSC(_name, _id, _parent, _div) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN4_OSC, _parent, .div = _div)
+
+#define DEF_GEN4_Z(_name, _id, _type, _parent, _div, _offset) \
+ DEF_BASE(_name, _id, _type, _parent, .div = _div, .offset = _offset)
+
+struct rcar_gen4_cpg_pll_config {
+ u8 extal_div;
+ u8 pll1_mult;
+ u8 pll1_div;
+ u8 pll2_mult;
+ u8 pll2_div;
+ u8 pll3_mult;
+ u8 pll3_div;
+ u8 pll5_mult;
+ u8 pll5_div;
+ u8 pll6_mult;
+ u8 pll6_div;
+ u8 osc_prediv;
+};
+
+#define CPG_RPCCKCR 0x874
+#define SD0CKCR1 0x8a4
+
+struct clk *rcar_gen4_cpg_clk_register(struct device *dev,
+ const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
+ struct clk **clks, void __iomem *base,
+ struct raw_notifier_head *notifiers);
+int rcar_gen4_cpg_init(const struct rcar_gen4_cpg_pll_config *config,
+ unsigned int clk_extalr, u32 mode);
+
+#endif
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 21f762aa2131..5d2c3edbaa14 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -57,9 +57,11 @@ static const u16 mstpsr[] = {
0x9A0, 0x9A4, 0x9A8, 0x9AC,
};
-static const u16 mstpsr_for_v3u[] = {
+static const u16 mstpsr_for_gen4[] = {
0x2E00, 0x2E04, 0x2E08, 0x2E0C, 0x2E10, 0x2E14, 0x2E18, 0x2E1C,
- 0x2E20, 0x2E24, 0x2E28, 0x2E2C, 0x2E30, 0x2E34, 0x2E38,
+ 0x2E20, 0x2E24, 0x2E28, 0x2E2C, 0x2E30, 0x2E34, 0x2E38, 0x2E3C,
+ 0x2E40, 0x2E44, 0x2E48, 0x2E4C, 0x2E50, 0x2E54, 0x2E58, 0x2E5C,
+ 0x2E60, 0x2E64, 0x2E68, 0x2E6C,
};
/*
@@ -71,9 +73,11 @@ static const u16 smstpcr[] = {
0x990, 0x994, 0x998, 0x99C,
};
-static const u16 mstpcr_for_v3u[] = {
+static const u16 mstpcr_for_gen4[] = {
0x2D00, 0x2D04, 0x2D08, 0x2D0C, 0x2D10, 0x2D14, 0x2D18, 0x2D1C,
- 0x2D20, 0x2D24, 0x2D28, 0x2D2C, 0x2D30, 0x2D34, 0x2D38,
+ 0x2D20, 0x2D24, 0x2D28, 0x2D2C, 0x2D30, 0x2D34, 0x2D38, 0x2D3C,
+ 0x2D40, 0x2D44, 0x2D48, 0x2D4C, 0x2D50, 0x2D54, 0x2D58, 0x2D5C,
+ 0x2D60, 0x2D64, 0x2D68, 0x2D6C,
};
/*
@@ -95,9 +99,11 @@ static const u16 srcr[] = {
0x920, 0x924, 0x928, 0x92C,
};
-static const u16 srcr_for_v3u[] = {
+static const u16 srcr_for_gen4[] = {
0x2C00, 0x2C04, 0x2C08, 0x2C0C, 0x2C10, 0x2C14, 0x2C18, 0x2C1C,
- 0x2C20, 0x2C24, 0x2C28, 0x2C2C, 0x2C30, 0x2C34, 0x2C38,
+ 0x2C20, 0x2C24, 0x2C28, 0x2C2C, 0x2C30, 0x2C34, 0x2C38, 0x2C3C,
+ 0x2C40, 0x2C44, 0x2C48, 0x2C4C, 0x2C50, 0x2C54, 0x2C58, 0x2C5C,
+ 0x2C60, 0x2C64, 0x2C68, 0x2C6C,
};
/*
@@ -109,9 +115,11 @@ static const u16 srstclr[] = {
0x960, 0x964, 0x968, 0x96C,
};
-static const u16 srstclr_for_v3u[] = {
+static const u16 srstclr_for_gen4[] = {
0x2C80, 0x2C84, 0x2C88, 0x2C8C, 0x2C90, 0x2C94, 0x2C98, 0x2C9C,
- 0x2CA0, 0x2CA4, 0x2CA8, 0x2CAC, 0x2CB0, 0x2CB4, 0x2CB8,
+ 0x2CA0, 0x2CA4, 0x2CA8, 0x2CAC, 0x2CB0, 0x2CB4, 0x2CB8, 0x2CBC,
+ 0x2CC0, 0x2CC4, 0x2CC8, 0x2CCC, 0x2CD0, 0x2CD4, 0x2CD8, 0x2CDC,
+ 0x2CE0, 0x2CE4, 0x2CE8, 0x2CEC,
};
/**
@@ -158,7 +166,7 @@ struct cpg_mssr_priv {
struct {
u32 mask;
u32 val;
- } smstpcr_saved[ARRAY_SIZE(mstpsr_for_v3u)];
+ } smstpcr_saved[ARRAY_SIZE(mstpsr_for_gen4)];
struct clk *clks[];
};
@@ -552,6 +560,11 @@ void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev)
pm_clk_destroy(dev);
}
+static void cpg_mssr_genpd_remove(void *data)
+{
+ pm_genpd_remove(data);
+}
+
static int __init cpg_mssr_add_clk_domain(struct device *dev,
const unsigned int *core_pm_clks,
unsigned int num_core_pm_clks)
@@ -560,6 +573,7 @@ static int __init cpg_mssr_add_clk_domain(struct device *dev,
struct generic_pm_domain *genpd;
struct cpg_mssr_clk_domain *pd;
size_t pm_size = num_core_pm_clks * sizeof(core_pm_clks[0]);
+ int ret;
pd = devm_kzalloc(dev, sizeof(*pd) + pm_size, GFP_KERNEL);
if (!pd)
@@ -574,11 +588,17 @@ static int __init cpg_mssr_add_clk_domain(struct device *dev,
GENPD_FLAG_ACTIVE_WAKEUP;
genpd->attach_dev = cpg_mssr_attach_dev;
genpd->detach_dev = cpg_mssr_detach_dev;
- pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
+ ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, cpg_mssr_genpd_remove, genpd);
+ if (ret)
+ return ret;
+
cpg_mssr_clk_domain = pd;
- of_genpd_add_provider_simple(np, genpd);
- return 0;
+ return of_genpd_add_provider_simple(np, genpd);
}
#ifdef CONFIG_RESET_CONTROLLER
@@ -828,6 +848,12 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r8a779a0_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R8A779F0
+ {
+ .compatible = "renesas,r8a779f0-cpg-mssr",
+ .data = &r8a779f0_cpg_mssr_info,
+ },
+#endif
{ /* sentinel */ }
};
@@ -970,11 +996,11 @@ static int __init cpg_mssr_common_init(struct device *dev,
priv->reset_clear_regs = srstclr;
} else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
priv->control_regs = stbcr;
- } else if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_V3U) {
- priv->status_regs = mstpsr_for_v3u;
- priv->control_regs = mstpcr_for_v3u;
- priv->reset_regs = srcr_for_v3u;
- priv->reset_clear_regs = srstclr_for_v3u;
+ } else if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN4) {
+ priv->status_regs = mstpsr_for_gen4;
+ priv->control_regs = mstpcr_for_gen4;
+ priv->reset_regs = srcr_for_gen4;
+ priv->reset_clear_regs = srstclr_for_gen4;
} else {
error = -EINVAL;
goto out_err;
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index 6b2a0ade482e..16810dd4e6ac 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -88,7 +88,7 @@ struct device_node;
enum clk_reg_layout {
CLK_REG_LAYOUT_RCAR_GEN2_AND_GEN3 = 0,
CLK_REG_LAYOUT_RZ_A,
- CLK_REG_LAYOUT_RCAR_V3U,
+ CLK_REG_LAYOUT_RCAR_GEN4,
};
/**
@@ -178,6 +178,7 @@ extern const struct cpg_mssr_info r8a77980_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77990_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77995_cpg_mssr_info;
extern const struct cpg_mssr_info r8a779a0_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a779f0_cpg_mssr_info;
void __init cpg_mssr_early_init(struct device_node *np,
const struct cpg_mssr_info *info);
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index 4021f6cabda4..edd0abe34a37 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -74,6 +74,7 @@ struct sd_hw_data {
* @clks: Array containing all Core and Module Clocks
* @num_core_clks: Number of Core Clocks in clks[]
* @num_mod_clks: Number of Module Clocks in clks[]
+ * @num_resets: Number of Module Resets in info->resets[]
* @last_dt_core_clk: ID of the last Core Clock exported to DT
* @notifiers: Notifier chain to save/restore clock state for system resume
* @info: Pointer to platform data
@@ -850,10 +851,16 @@ static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device
pm_clk_destroy(dev);
}
+static void rzg2l_cpg_genpd_remove(void *data)
+{
+ pm_genpd_remove(data);
+}
+
static int __init rzg2l_cpg_add_clk_domain(struct device *dev)
{
struct device_node *np = dev->of_node;
struct generic_pm_domain *genpd;
+ int ret;
genpd = devm_kzalloc(dev, sizeof(*genpd), GFP_KERNEL);
if (!genpd)
@@ -864,10 +871,15 @@ static int __init rzg2l_cpg_add_clk_domain(struct device *dev)
GENPD_FLAG_ACTIVE_WAKEUP;
genpd->attach_dev = rzg2l_cpg_attach_dev;
genpd->detach_dev = rzg2l_cpg_detach_dev;
- pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
+ ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
+ if (ret)
+ return ret;
- of_genpd_add_provider_simple(np, genpd);
- return 0;
+ ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, genpd);
+ if (ret)
+ return ret;
+
+ return of_genpd_add_provider_simple(np, genpd);
}
static int __init rzg2l_cpg_probe(struct platform_device *pdev)
diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
index 7fb6b4030f72..5729d102034b 100644
--- a/drivers/clk/renesas/rzg2l-cpg.h
+++ b/drivers/clk/renesas/rzg2l-cpg.h
@@ -9,11 +9,14 @@
#ifndef __RENESAS_RZG2L_CPG_H__
#define __RENESAS_RZG2L_CPG_H__
+#define CPG_PL1_DDIV (0x200)
#define CPG_PL2_DDIV (0x204)
#define CPG_PL3A_DDIV (0x208)
+#define CPG_PL6_DDIV (0x210)
#define CPG_PL2SDHI_DSEL (0x218)
#define CPG_CLKSTATUS (0x280)
#define CPG_PL3_SSEL (0x408)
+#define CPG_PL6_SSEL (0x414)
#define CPG_PL6_ETH_SSEL (0x418)
#define CPG_CLKSTATUS_SELSDHI0_STS BIT(28)
@@ -29,16 +32,19 @@
#define DDIV_PACK(offset, bitpos, size) \
(((offset) << 20) | ((bitpos) << 12) | ((size) << 8))
+#define DIVPL1A DDIV_PACK(CPG_PL1_DDIV, 0, 2)
#define DIVPL2A DDIV_PACK(CPG_PL2_DDIV, 0, 3)
#define DIVPL3A DDIV_PACK(CPG_PL3A_DDIV, 0, 3)
#define DIVPL3B DDIV_PACK(CPG_PL3A_DDIV, 4, 3)
#define DIVPL3C DDIV_PACK(CPG_PL3A_DDIV, 8, 3)
+#define DIVGPU DDIV_PACK(CPG_PL6_DDIV, 0, 2)
#define SEL_PLL_PACK(offset, bitpos, size) \
(((offset) << 20) | ((bitpos) << 12) | ((size) << 8))
#define SEL_PLL3_3 SEL_PLL_PACK(CPG_PL3_SSEL, 8, 1)
#define SEL_PLL6_2 SEL_PLL_PACK(CPG_PL6_ETH_SSEL, 0, 1)
+#define SEL_GPU2 SEL_PLL_PACK(CPG_PL6_SSEL, 12, 1)
#define SEL_SDHI0 DDIV_PACK(CPG_PL2SDHI_DSEL, 0, 2)
#define SEL_SDHI1 DDIV_PACK(CPG_PL2SDHI_DSEL, 4, 2)
@@ -168,6 +174,9 @@ struct rzg2l_reset {
* @num_mod_clks: Number of entries in mod_clks[]
* @num_hw_mod_clks: Number of Module Clocks supported by the hardware
*
+ * @resets: Array of Module Reset definitions
+ * @num_resets: Number of entries in resets[]
+ *
* @crit_mod_clks: Array with Module Clock IDs of critical clocks that
* should not be disabled without a knowledgeable driver
* @num_crit_mod_clks: Number of entries in crit_mod_clks[]
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index c46cf11e4d0b..0df74916a895 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -16,7 +16,9 @@ obj-$(CONFIG_EXYNOS_5420_COMMON_CLK) += clk-exynos5-subcmu.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos5433.o
obj-$(CONFIG_EXYNOS_AUDSS_CLK_CON) += clk-exynos-audss.o
obj-$(CONFIG_EXYNOS_CLKOUT) += clk-exynos-clkout.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos-arm64.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7885.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos850.o
obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o
obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index 7f20d9aedaa9..3e62ade120c5 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -400,7 +400,7 @@ static int exynos5433_cpuclk_notifier_cb(struct notifier_block *nb,
}
/* helper function to register a CPU clock */
-int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
+static int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
unsigned int lookup_id, const char *name,
const struct clk_hw *parent, const struct clk_hw *alt_parent,
unsigned long offset, const struct exynos_cpuclk_cfg_data *cfg,
diff --git a/drivers/clk/samsung/clk-cpu.h b/drivers/clk/samsung/clk-cpu.h
index af74686db9ef..fc9f67a3b22e 100644
--- a/drivers/clk/samsung/clk-cpu.h
+++ b/drivers/clk/samsung/clk-cpu.h
@@ -62,11 +62,4 @@ struct exynos_cpuclk {
#define CLK_CPU_HAS_E5433_REGS_LAYOUT (1 << 2)
};
-int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
- unsigned int lookup_id, const char *name,
- const struct clk_hw *parent, const struct clk_hw *alt_parent,
- unsigned long offset,
- const struct exynos_cpuclk_cfg_data *cfg,
- unsigned long num_cfgs, unsigned long flags);
-
#endif /* __SAMSUNG_CLK_CPU_H */
diff --git a/drivers/clk/samsung/clk-exynos-arm64.c b/drivers/clk/samsung/clk-exynos-arm64.c
new file mode 100644
index 000000000000..b921b9a1134a
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos-arm64.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Linaro Ltd.
+ * Copyright (C) 2021 Dávid Virág <virag.david003@gmail.com>
+ * Author: Sam Protsenko <semen.protsenko@linaro.org>
+ * Author: Dávid Virág <virag.david003@gmail.com>
+ *
+ * This file contains shared functions used by some arm64 Exynos SoCs,
+ * such as Exynos7885 or Exynos850 to register and init CMUs.
+ */
+#include <linux/clk.h>
+#include <linux/of_address.h>
+
+#include "clk-exynos-arm64.h"
+
+/* Gate register bits */
+#define GATE_MANUAL BIT(20)
+#define GATE_ENABLE_HWACG BIT(28)
+
+/* Gate register offsets range */
+#define GATE_OFF_START 0x2000
+#define GATE_OFF_END 0x2fff
+
+/**
+ * exynos_arm64_init_clocks - Set clocks initial configuration
+ * @np: CMU device tree node with "reg" property (CMU addr)
+ * @reg_offs: Register offsets array for clocks to init
+ * @reg_offs_len: Number of register offsets in reg_offs array
+ *
+ * Set manual control mode for all gate clocks.
+ */
+static void __init exynos_arm64_init_clocks(struct device_node *np,
+ const unsigned long *reg_offs, size_t reg_offs_len)
+{
+ void __iomem *reg_base;
+ size_t i;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: failed to map registers\n", __func__);
+
+ for (i = 0; i < reg_offs_len; ++i) {
+ void __iomem *reg = reg_base + reg_offs[i];
+ u32 val;
+
+ /* Modify only gate clock registers */
+ if (reg_offs[i] < GATE_OFF_START || reg_offs[i] > GATE_OFF_END)
+ continue;
+
+ val = readl(reg);
+ val |= GATE_MANUAL;
+ val &= ~GATE_ENABLE_HWACG;
+ writel(val, reg);
+ }
+
+ iounmap(reg_base);
+}
+
+/**
+ * exynos_arm64_register_cmu - Register specified Exynos CMU domain
+ * @dev: Device object; may be NULL if this function is not being
+ * called from platform driver probe function
+ * @np: CMU device tree node
+ * @cmu: CMU data
+ *
+ * Register specified CMU domain, which includes next steps:
+ *
+ * 1. Enable parent clock of @cmu CMU
+ * 2. Set initial registers configuration for @cmu CMU clocks
+ * 3. Register @cmu CMU clocks using Samsung clock framework API
+ */
+void __init exynos_arm64_register_cmu(struct device *dev,
+ struct device_node *np, const struct samsung_cmu_info *cmu)
+{
+ /* Keep CMU parent clock running (needed for CMU registers access) */
+ if (cmu->clk_name) {
+ struct clk *parent_clk;
+
+ if (dev)
+ parent_clk = clk_get(dev, cmu->clk_name);
+ else
+ parent_clk = of_clk_get_by_name(np, cmu->clk_name);
+
+ if (IS_ERR(parent_clk)) {
+ pr_err("%s: could not find bus clock %s; err = %ld\n",
+ __func__, cmu->clk_name, PTR_ERR(parent_clk));
+ } else {
+ clk_prepare_enable(parent_clk);
+ }
+ }
+
+ exynos_arm64_init_clocks(np, cmu->clk_regs, cmu->nr_clk_regs);
+ samsung_cmu_register_one(np, cmu);
+}
diff --git a/drivers/clk/samsung/clk-exynos-arm64.h b/drivers/clk/samsung/clk-exynos-arm64.h
new file mode 100644
index 000000000000..0dd174693935
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos-arm64.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Linaro Ltd.
+ * Copyright (C) 2021 Dávid Virág <virag.david003@gmail.com>
+ * Author: Sam Protsenko <semen.protsenko@linaro.org>
+ * Author: Dávid Virág <virag.david003@gmail.com>
+ *
+ * This file contains shared functions used by some arm64 Exynos SoCs,
+ * such as Exynos7885 or Exynos850 to register and init CMUs.
+ */
+
+#ifndef __CLK_EXYNOS_ARM64_H
+#define __CLK_EXYNOS_ARM64_H
+
+#include "clk.h"
+
+void exynos_arm64_register_cmu(struct device *dev,
+ struct device_node *np, const struct samsung_cmu_info *cmu);
+
+#endif /* __CLK_EXYNOS_ARM64_H */
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index 17df7f9755aa..6cc65ccf867c 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -748,6 +748,31 @@ static const struct samsung_pll_clock exynos3250_plls[] __initconst = {
UPLL_LOCK, UPLL_CON0, exynos3250_pll_rates),
};
+#define E3250_CPU_DIV0(apll, pclk_dbg, atb, corem) \
+ (((apll) << 24) | ((pclk_dbg) << 20) | ((atb) << 16) | \
+ ((corem) << 4))
+#define E3250_CPU_DIV1(hpm, copy) \
+ (((hpm) << 4) | ((copy) << 0))
+
+static const struct exynos_cpuclk_cfg_data e3250_armclk_d[] __initconst = {
+ { 1000000, E3250_CPU_DIV0(1, 7, 4, 1), E3250_CPU_DIV1(7, 7), },
+ { 900000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 800000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 700000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 600000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 500000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 400000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 300000, E3250_CPU_DIV0(1, 5, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 200000, E3250_CPU_DIV0(1, 3, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 100000, E3250_CPU_DIV0(1, 1, 1, 1), E3250_CPU_DIV1(7, 7), },
+ { 0 },
+};
+
+static const struct samsung_cpu_clock exynos3250_cpu_clks[] __initconst = {
+ CPU_CLK(CLK_ARM_CLK, "armclk", CLK_MOUT_APLL, CLK_MOUT_MPLL_USER_C,
+ CLK_CPU_HAS_DIV1, 0x14200, e3250_armclk_d),
+};
+
static void __init exynos3_core_down_clock(void __iomem *reg_base)
{
unsigned int tmp;
@@ -780,46 +805,21 @@ static const struct samsung_cmu_info cmu_info __initconst = {
.nr_gate_clks = ARRAY_SIZE(gate_clks),
.fixed_factor_clks = fixed_factor_clks,
.nr_fixed_factor_clks = ARRAY_SIZE(fixed_factor_clks),
+ .cpu_clks = exynos3250_cpu_clks,
+ .nr_cpu_clks = ARRAY_SIZE(exynos3250_cpu_clks),
.nr_clk_ids = CLK_NR_CLKS,
.clk_regs = exynos3250_cmu_clk_regs,
.nr_clk_regs = ARRAY_SIZE(exynos3250_cmu_clk_regs),
};
-#define E3250_CPU_DIV0(apll, pclk_dbg, atb, corem) \
- (((apll) << 24) | ((pclk_dbg) << 20) | ((atb) << 16) | \
- ((corem) << 4))
-#define E3250_CPU_DIV1(hpm, copy) \
- (((hpm) << 4) | ((copy) << 0))
-
-static const struct exynos_cpuclk_cfg_data e3250_armclk_d[] __initconst = {
- { 1000000, E3250_CPU_DIV0(1, 7, 4, 1), E3250_CPU_DIV1(7, 7), },
- { 900000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
- { 800000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
- { 700000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
- { 600000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
- { 500000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
- { 400000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
- { 300000, E3250_CPU_DIV0(1, 5, 3, 1), E3250_CPU_DIV1(7, 7), },
- { 200000, E3250_CPU_DIV0(1, 3, 3, 1), E3250_CPU_DIV1(7, 7), },
- { 100000, E3250_CPU_DIV0(1, 1, 1, 1), E3250_CPU_DIV1(7, 7), },
- { 0 },
-};
-
static void __init exynos3250_cmu_init(struct device_node *np)
{
struct samsung_clk_provider *ctx;
- struct clk_hw **hws;
ctx = samsung_cmu_register_one(np, &cmu_info);
if (!ctx)
return;
- hws = ctx->clk_data.hws;
- exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
- hws[CLK_MOUT_APLL], hws[CLK_MOUT_MPLL_USER_C],
- 0x14200, e3250_armclk_d, ARRAY_SIZE(e3250_armclk_d),
- CLK_CPU_HAS_DIV1);
-
exynos3_core_down_clock(ctx->reg_base);
}
CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init);
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index bf13e29a655c..22009cb53428 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -437,7 +437,7 @@ static const struct samsung_mux_clock exynos4_mux_clks[] __initconst = {
/* list of mux clocks supported in exynos4210 soc */
static const struct samsung_mux_clock exynos4210_mux_early[] __initconst = {
- MUX(0, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP1, 0, 1),
+ MUX(CLK_MOUT_VPLLSRC, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP1, 0, 1),
};
static const struct samsung_mux_clock exynos4210_mux_clks[] __initconst = {
@@ -603,7 +603,7 @@ static const struct samsung_div_clock exynos4_div_clks[] __initconst = {
DIV(0, "div_periph", "div_core2", DIV_CPU0, 12, 3),
DIV(0, "div_atb", "mout_core", DIV_CPU0, 16, 3),
DIV(0, "div_pclk_dbg", "div_atb", DIV_CPU0, 20, 3),
- DIV(0, "div_core2", "div_core", DIV_CPU0, 28, 3),
+ DIV(CLK_DIV_CORE2, "div_core2", "div_core", DIV_CPU0, 28, 3),
DIV(0, "div_copy", "mout_hpm", DIV_CPU1, 0, 3),
DIV(0, "div_hpm", "div_copy", DIV_CPU1, 4, 3),
DIV(0, "div_clkout_cpu", "mout_clkout_cpu", CLKOUT_CMU_CPU, 8, 6),
@@ -1228,6 +1228,16 @@ static const struct exynos_cpuclk_cfg_data e4412_armclk_d[] __initconst = {
{ 0 },
};
+static const struct samsung_cpu_clock exynos4210_cpu_clks[] __initconst = {
+ CPU_CLK(CLK_ARM_CLK, "armclk", CLK_MOUT_APLL, CLK_SCLK_MPLL,
+ CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1, 0x14200, e4210_armclk_d),
+};
+
+static const struct samsung_cpu_clock exynos4412_cpu_clks[] __initconst = {
+ CPU_CLK(CLK_ARM_CLK, "armclk", CLK_MOUT_APLL, CLK_MOUT_MPLL_USER_C,
+ CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1, 0x14200, e4412_armclk_d),
+};
+
/* register exynos4 clocks */
static void __init exynos4_clk_init(struct device_node *np,
enum exynos4_soc soc)
@@ -1254,21 +1264,21 @@ static void __init exynos4_clk_init(struct device_node *np,
samsung_clk_register_mux(ctx, exynos4210_mux_early,
ARRAY_SIZE(exynos4210_mux_early));
- if (_get_rate("fin_pll") == 24000000) {
+ if (clk_hw_get_rate(hws[CLK_FIN_PLL]) == 24000000) {
exynos4210_plls[apll].rate_table =
exynos4210_apll_rates;
exynos4210_plls[epll].rate_table =
exynos4210_epll_rates;
}
- if (_get_rate("mout_vpllsrc") == 24000000)
+ if (clk_hw_get_rate(hws[CLK_MOUT_VPLLSRC]) == 24000000)
exynos4210_plls[vpll].rate_table =
exynos4210_vpll_rates;
samsung_clk_register_pll(ctx, exynos4210_plls,
ARRAY_SIZE(exynos4210_plls), reg_base);
} else {
- if (_get_rate("fin_pll") == 24000000) {
+ if (clk_hw_get_rate(hws[CLK_FIN_PLL]) == 24000000) {
exynos4x12_plls[apll].rate_table =
exynos4x12_apll_rates;
exynos4x12_plls[epll].rate_table =
@@ -1304,10 +1314,8 @@ static void __init exynos4_clk_init(struct device_node *np,
samsung_clk_register_fixed_factor(ctx,
exynos4210_fixed_factor_clks,
ARRAY_SIZE(exynos4210_fixed_factor_clks));
- exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
- hws[CLK_MOUT_APLL], hws[CLK_SCLK_MPLL], 0x14200,
- e4210_armclk_d, ARRAY_SIZE(e4210_armclk_d),
- CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
+ samsung_clk_register_cpu(ctx, exynos4210_cpu_clks,
+ ARRAY_SIZE(exynos4210_cpu_clks));
} else {
samsung_clk_register_mux(ctx, exynos4x12_mux_clks,
ARRAY_SIZE(exynos4x12_mux_clks));
@@ -1318,11 +1326,8 @@ static void __init exynos4_clk_init(struct device_node *np,
samsung_clk_register_fixed_factor(ctx,
exynos4x12_fixed_factor_clks,
ARRAY_SIZE(exynos4x12_fixed_factor_clks));
-
- exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
- hws[CLK_MOUT_APLL], hws[CLK_MOUT_MPLL_USER_C], 0x14200,
- e4412_armclk_d, ARRAY_SIZE(e4412_armclk_d),
- CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
+ samsung_clk_register_cpu(ctx, exynos4412_cpu_clks,
+ ARRAY_SIZE(exynos4412_cpu_clks));
}
if (soc == EXYNOS4X12)
@@ -1344,9 +1349,11 @@ static void __init exynos4_clk_init(struct device_node *np,
pr_info("%s clocks: sclk_apll = %ld, sclk_mpll = %ld\n"
"\tsclk_epll = %ld, sclk_vpll = %ld, arm_clk = %ld\n",
exynos4_soc == EXYNOS4210 ? "Exynos4210" : "Exynos4x12",
- _get_rate("sclk_apll"), _get_rate("sclk_mpll"),
- _get_rate("sclk_epll"), _get_rate("sclk_vpll"),
- _get_rate("div_core2"));
+ clk_hw_get_rate(hws[CLK_SCLK_APLL]),
+ clk_hw_get_rate(hws[CLK_SCLK_MPLL]),
+ clk_hw_get_rate(hws[CLK_SCLK_EPLL]),
+ clk_hw_get_rate(hws[CLK_SCLK_VPLL]),
+ clk_hw_get_rate(hws[CLK_DIV_CORE2]));
}
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 06588fab408a..113df773ee44 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -239,7 +239,7 @@ static const struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __
};
static const struct samsung_mux_clock exynos5250_pll_pmux_clks[] __initconst = {
- MUX(0, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
+ MUX(CLK_MOUT_VPLLSRC, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
};
static const struct samsung_mux_clock exynos5250_mux_clks[] __initconst = {
@@ -351,7 +351,7 @@ static const struct samsung_div_clock exynos5250_div_clks[] __initconst = {
*/
DIV(0, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
DIV(0, "div_apll", "mout_apll", DIV_CPU0, 24, 3),
- DIV(0, "div_arm2", "div_arm", DIV_CPU0, 28, 3),
+ DIV(CLK_DIV_ARM2, "div_arm2", "div_arm", DIV_CPU0, 28, 3),
/*
* CMU_TOP
@@ -772,6 +772,11 @@ static const struct exynos_cpuclk_cfg_data exynos5250_armclk_d[] __initconst = {
{ 0 },
};
+static const struct samsung_cpu_clock exynos5250_cpu_clks[] __initconst = {
+ CPU_CLK(CLK_ARM_CLK, "armclk", CLK_MOUT_APLL, CLK_MOUT_MPLL, CLK_CPU_HAS_DIV1, 0x200,
+ exynos5250_armclk_d),
+};
+
static const struct of_device_id ext_clk_match[] __initconst = {
{ .compatible = "samsung,clock-xxti", .data = (void *)0, },
{ },
@@ -801,12 +806,12 @@ static void __init exynos5250_clk_init(struct device_node *np)
samsung_clk_register_mux(ctx, exynos5250_pll_pmux_clks,
ARRAY_SIZE(exynos5250_pll_pmux_clks));
- if (_get_rate("fin_pll") == 24 * MHZ) {
+ if (clk_hw_get_rate(hws[CLK_FIN_PLL]) == 24 * MHZ) {
exynos5250_plls[epll].rate_table = epll_24mhz_tbl;
exynos5250_plls[apll].rate_table = apll_24mhz_tbl;
}
- if (_get_rate("mout_vpllsrc") == 24 * MHZ)
+ if (clk_hw_get_rate(hws[CLK_MOUT_VPLLSRC]) == 24 * MHZ)
exynos5250_plls[vpll].rate_table = vpll_24mhz_tbl;
samsung_clk_register_pll(ctx, exynos5250_plls,
@@ -822,10 +827,8 @@ static void __init exynos5250_clk_init(struct device_node *np)
ARRAY_SIZE(exynos5250_div_clks));
samsung_clk_register_gate(ctx, exynos5250_gate_clks,
ARRAY_SIZE(exynos5250_gate_clks));
- exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
- hws[CLK_MOUT_APLL], hws[CLK_MOUT_MPLL], 0x200,
- exynos5250_armclk_d, ARRAY_SIZE(exynos5250_armclk_d),
- CLK_CPU_HAS_DIV1);
+ samsung_clk_register_cpu(ctx, exynos5250_cpu_clks,
+ ARRAY_SIZE(exynos5250_cpu_clks));
/*
* Enable arm clock down (in idle) and set arm divider
@@ -855,6 +858,6 @@ static void __init exynos5250_clk_init(struct device_node *np)
samsung_clk_of_add_provider(np, ctx);
pr_info("Exynos5250: clock setup completed, armclk=%ld\n",
- _get_rate("div_arm2"));
+ clk_hw_get_rate(hws[CLK_DIV_ARM2]));
}
CLK_OF_DECLARE_DRIVER(exynos5250_clk, "samsung,exynos5250-clock", exynos5250_clk_init);
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 3ccd4eabd2a6..caad74dee297 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -1551,6 +1551,20 @@ static const struct exynos_cpuclk_cfg_data exynos5420_kfcclk_d[] __initconst = {
{ 0 },
};
+static const struct samsung_cpu_clock exynos5420_cpu_clks[] __initconst = {
+ CPU_CLK(CLK_ARM_CLK, "armclk", CLK_MOUT_APLL, CLK_MOUT_MSPLL_CPU, 0, 0x200,
+ exynos5420_eglclk_d),
+ CPU_CLK(CLK_KFC_CLK, "kfcclk", CLK_MOUT_KPLL, CLK_MOUT_MSPLL_KFC, 0, 0x28200,
+ exynos5420_kfcclk_d),
+};
+
+static const struct samsung_cpu_clock exynos5800_cpu_clks[] __initconst = {
+ CPU_CLK(CLK_ARM_CLK, "armclk", CLK_MOUT_APLL, CLK_MOUT_MSPLL_CPU, 0, 0x200,
+ exynos5800_eglclk_d),
+ CPU_CLK(CLK_KFC_CLK, "kfcclk", CLK_MOUT_KPLL, CLK_MOUT_MSPLL_KFC, 0, 0x28200,
+ exynos5420_kfcclk_d),
+};
+
static const struct of_device_id ext_clk_match[] __initconst = {
{ .compatible = "samsung,exynos5420-oscclk", .data = (void *)0, },
{ },
@@ -1580,7 +1594,7 @@ static void __init exynos5x_clk_init(struct device_node *np,
ARRAY_SIZE(exynos5x_fixed_rate_ext_clks),
ext_clk_match);
- if (_get_rate("fin_pll") == 24 * MHZ) {
+ if (clk_hw_get_rate(hws[CLK_FIN_PLL]) == 24 * MHZ) {
exynos5x_plls[apll].rate_table = exynos5420_pll2550x_24mhz_tbl;
exynos5x_plls[epll].rate_table = exynos5420_epll_24mhz_tbl;
exynos5x_plls[kpll].rate_table = exynos5420_pll2550x_24mhz_tbl;
@@ -1625,17 +1639,12 @@ static void __init exynos5x_clk_init(struct device_node *np,
}
if (soc == EXYNOS5420) {
- exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
- hws[CLK_MOUT_APLL], hws[CLK_MOUT_MSPLL_CPU], 0x200,
- exynos5420_eglclk_d, ARRAY_SIZE(exynos5420_eglclk_d), 0);
+ samsung_clk_register_cpu(ctx, exynos5420_cpu_clks,
+ ARRAY_SIZE(exynos5420_cpu_clks));
} else {
- exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
- hws[CLK_MOUT_APLL], hws[CLK_MOUT_MSPLL_CPU], 0x200,
- exynos5800_eglclk_d, ARRAY_SIZE(exynos5800_eglclk_d), 0);
+ samsung_clk_register_cpu(ctx, exynos5800_cpu_clks,
+ ARRAY_SIZE(exynos5800_cpu_clks));
}
- exynos_register_cpu_clock(ctx, CLK_KFC_CLK, "kfcclk",
- hws[CLK_MOUT_KPLL], hws[CLK_MOUT_MSPLL_KFC], 0x28200,
- exynos5420_kfcclk_d, ARRAY_SIZE(exynos5420_kfcclk_d), 0);
samsung_clk_extended_sleep_init(reg_base,
exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs),
diff --git a/drivers/clk/samsung/clk-exynos7885.c b/drivers/clk/samsung/clk-exynos7885.c
new file mode 100644
index 000000000000..a7b106302706
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos7885.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Dávid Virág <virag.david003@gmail.com>
+ * Author: Dávid Virág <virag.david003@gmail.com>
+ *
+ * Common Clock Framework support for Exynos7885 SoC.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/exynos7885.h>
+
+#include "clk.h"
+#include "clk-exynos-arm64.h"
+
+/* ---- CMU_TOP ------------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_TOP (0x12060000) */
+#define PLL_LOCKTIME_PLL_SHARED0 0x0000
+#define PLL_LOCKTIME_PLL_SHARED1 0x0004
+#define PLL_CON0_PLL_SHARED0 0x0100
+#define PLL_CON0_PLL_SHARED1 0x0120
+#define CLK_CON_MUX_MUX_CLKCMU_CORE_BUS 0x1014
+#define CLK_CON_MUX_MUX_CLKCMU_CORE_CCI 0x1018
+#define CLK_CON_MUX_MUX_CLKCMU_CORE_G3D 0x101c
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_BUS 0x1058
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_SPI0 0x105c
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_SPI1 0x1060
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_UART0 0x1064
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_UART1 0x1068
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_UART2 0x106c
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_USI0 0x1070
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_USI1 0x1074
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_USI2 0x1078
+#define CLK_CON_DIV_CLKCMU_CORE_BUS 0x181c
+#define CLK_CON_DIV_CLKCMU_CORE_CCI 0x1820
+#define CLK_CON_DIV_CLKCMU_CORE_G3D 0x1824
+#define CLK_CON_DIV_CLKCMU_PERI_BUS 0x1874
+#define CLK_CON_DIV_CLKCMU_PERI_SPI0 0x1878
+#define CLK_CON_DIV_CLKCMU_PERI_SPI1 0x187c
+#define CLK_CON_DIV_CLKCMU_PERI_UART0 0x1880
+#define CLK_CON_DIV_CLKCMU_PERI_UART1 0x1884
+#define CLK_CON_DIV_CLKCMU_PERI_UART2 0x1888
+#define CLK_CON_DIV_CLKCMU_PERI_USI0 0x188c
+#define CLK_CON_DIV_CLKCMU_PERI_USI1 0x1890
+#define CLK_CON_DIV_CLKCMU_PERI_USI2 0x1894
+#define CLK_CON_DIV_PLL_SHARED0_DIV2 0x189c
+#define CLK_CON_DIV_PLL_SHARED0_DIV3 0x18a0
+#define CLK_CON_DIV_PLL_SHARED0_DIV4 0x18a4
+#define CLK_CON_DIV_PLL_SHARED0_DIV5 0x18a8
+#define CLK_CON_DIV_PLL_SHARED1_DIV2 0x18ac
+#define CLK_CON_DIV_PLL_SHARED1_DIV3 0x18b0
+#define CLK_CON_DIV_PLL_SHARED1_DIV4 0x18b4
+#define CLK_CON_GAT_GATE_CLKCMUC_PERI_UART1 0x2004
+#define CLK_CON_GAT_GATE_CLKCMU_CORE_BUS 0x201c
+#define CLK_CON_GAT_GATE_CLKCMU_CORE_CCI 0x2020
+#define CLK_CON_GAT_GATE_CLKCMU_CORE_G3D 0x2024
+#define CLK_CON_GAT_GATE_CLKCMU_PERI_BUS 0x207c
+#define CLK_CON_GAT_GATE_CLKCMU_PERI_SPI0 0x2080
+#define CLK_CON_GAT_GATE_CLKCMU_PERI_SPI1 0x2084
+#define CLK_CON_GAT_GATE_CLKCMU_PERI_UART0 0x2088
+#define CLK_CON_GAT_GATE_CLKCMU_PERI_UART2 0x208c
+#define CLK_CON_GAT_GATE_CLKCMU_PERI_USI0 0x2090
+#define CLK_CON_GAT_GATE_CLKCMU_PERI_USI1 0x2094
+#define CLK_CON_GAT_GATE_CLKCMU_PERI_USI2 0x2098
+
+static const unsigned long top_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_SHARED0,
+ PLL_LOCKTIME_PLL_SHARED1,
+ PLL_CON0_PLL_SHARED0,
+ PLL_CON0_PLL_SHARED1,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_CCI,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_G3D,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_SPI0,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_SPI1,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_UART0,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_UART1,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_UART2,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_USI0,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_USI1,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_USI2,
+ CLK_CON_DIV_CLKCMU_CORE_BUS,
+ CLK_CON_DIV_CLKCMU_CORE_CCI,
+ CLK_CON_DIV_CLKCMU_CORE_G3D,
+ CLK_CON_DIV_CLKCMU_PERI_BUS,
+ CLK_CON_DIV_CLKCMU_PERI_SPI0,
+ CLK_CON_DIV_CLKCMU_PERI_SPI1,
+ CLK_CON_DIV_CLKCMU_PERI_UART0,
+ CLK_CON_DIV_CLKCMU_PERI_UART1,
+ CLK_CON_DIV_CLKCMU_PERI_UART2,
+ CLK_CON_DIV_CLKCMU_PERI_USI0,
+ CLK_CON_DIV_CLKCMU_PERI_USI1,
+ CLK_CON_DIV_CLKCMU_PERI_USI2,
+ CLK_CON_DIV_PLL_SHARED0_DIV2,
+ CLK_CON_DIV_PLL_SHARED0_DIV3,
+ CLK_CON_DIV_PLL_SHARED0_DIV4,
+ CLK_CON_DIV_PLL_SHARED0_DIV5,
+ CLK_CON_DIV_PLL_SHARED1_DIV2,
+ CLK_CON_DIV_PLL_SHARED1_DIV3,
+ CLK_CON_DIV_PLL_SHARED1_DIV4,
+ CLK_CON_GAT_GATE_CLKCMUC_PERI_UART1,
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CORE_CCI,
+ CLK_CON_GAT_GATE_CLKCMU_CORE_G3D,
+ CLK_CON_GAT_GATE_CLKCMU_PERI_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERI_SPI0,
+ CLK_CON_GAT_GATE_CLKCMU_PERI_SPI1,
+ CLK_CON_GAT_GATE_CLKCMU_PERI_UART0,
+ CLK_CON_GAT_GATE_CLKCMU_PERI_UART2,
+ CLK_CON_GAT_GATE_CLKCMU_PERI_USI0,
+ CLK_CON_GAT_GATE_CLKCMU_PERI_USI1,
+ CLK_CON_GAT_GATE_CLKCMU_PERI_USI2,
+};
+
+static const struct samsung_pll_clock top_pll_clks[] __initconst = {
+ PLL(pll_1417x, CLK_FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED0, PLL_CON0_PLL_SHARED0,
+ NULL),
+ PLL(pll_1417x, CLK_FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED1, PLL_CON0_PLL_SHARED1,
+ NULL),
+};
+
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_CORE */
+PNAME(mout_core_bus_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared0_div3", "dout_shared0_div3" };
+PNAME(mout_core_cci_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared0_div3", "dout_shared0_div3" };
+PNAME(mout_core_g3d_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared0_div3", "dout_shared0_div3" };
+
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_PERI */
+PNAME(mout_peri_bus_p) = { "dout_shared0_div4", "dout_shared1_div4" };
+PNAME(mout_peri_spi0_p) = { "oscclk", "dout_shared0_div4" };
+PNAME(mout_peri_spi1_p) = { "oscclk", "dout_shared0_div4" };
+PNAME(mout_peri_uart0_p) = { "oscclk", "dout_shared0_div4" };
+PNAME(mout_peri_uart1_p) = { "oscclk", "dout_shared0_div4" };
+PNAME(mout_peri_uart2_p) = { "oscclk", "dout_shared0_div4" };
+PNAME(mout_peri_usi0_p) = { "oscclk", "dout_shared0_div4" };
+PNAME(mout_peri_usi1_p) = { "oscclk", "dout_shared0_div4" };
+PNAME(mout_peri_usi2_p) = { "oscclk", "dout_shared0_div4" };
+
+static const struct samsung_mux_clock top_mux_clks[] __initconst = {
+ /* CORE */
+ MUX(CLK_MOUT_CORE_BUS, "mout_core_bus", mout_core_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, 0, 2),
+ MUX(CLK_MOUT_CORE_CCI, "mout_core_cci", mout_core_cci_p,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_CCI, 0, 2),
+ MUX(CLK_MOUT_CORE_G3D, "mout_core_g3d", mout_core_g3d_p,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_G3D, 0, 2),
+
+ /* PERI */
+ MUX(CLK_MOUT_PERI_BUS, "mout_peri_bus", mout_peri_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_BUS, 0, 1),
+ MUX(CLK_MOUT_PERI_SPI0, "mout_peri_spi0", mout_peri_spi0_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_SPI0, 0, 1),
+ MUX(CLK_MOUT_PERI_SPI1, "mout_peri_spi1", mout_peri_spi1_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_SPI1, 0, 1),
+ MUX(CLK_MOUT_PERI_UART0, "mout_peri_uart0", mout_peri_uart0_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_UART0, 0, 1),
+ MUX(CLK_MOUT_PERI_UART1, "mout_peri_uart1", mout_peri_uart1_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_UART1, 0, 1),
+ MUX(CLK_MOUT_PERI_UART2, "mout_peri_uart2", mout_peri_uart2_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_UART2, 0, 1),
+ MUX(CLK_MOUT_PERI_USI0, "mout_peri_usi0", mout_peri_usi0_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_USI0, 0, 1),
+ MUX(CLK_MOUT_PERI_USI1, "mout_peri_usi1", mout_peri_usi1_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_USI1, 0, 1),
+ MUX(CLK_MOUT_PERI_USI2, "mout_peri_usi2", mout_peri_usi2_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_USI2, 0, 1),
+};
+
+static const struct samsung_div_clock top_div_clks[] __initconst = {
+ /* TOP */
+ DIV(CLK_DOUT_SHARED0_DIV2, "dout_shared0_div2", "fout_shared0_pll",
+ CLK_CON_DIV_PLL_SHARED0_DIV2, 0, 1),
+ DIV(CLK_DOUT_SHARED0_DIV3, "dout_shared0_div3", "fout_shared0_pll",
+ CLK_CON_DIV_PLL_SHARED0_DIV3, 0, 2),
+ DIV(CLK_DOUT_SHARED0_DIV4, "dout_shared0_div4", "fout_shared0_pll",
+ CLK_CON_DIV_PLL_SHARED0_DIV4, 0, 1),
+ DIV(CLK_DOUT_SHARED0_DIV5, "dout_shared0_div5", "fout_shared0_pll",
+ CLK_CON_DIV_PLL_SHARED0_DIV5, 0, 3),
+ DIV(CLK_DOUT_SHARED1_DIV2, "dout_shared1_div2", "fout_shared1_pll",
+ CLK_CON_DIV_PLL_SHARED1_DIV2, 0, 1),
+ DIV(CLK_DOUT_SHARED1_DIV3, "dout_shared1_div3", "fout_shared1_pll",
+ CLK_CON_DIV_PLL_SHARED1_DIV3, 0, 2),
+ DIV(CLK_DOUT_SHARED1_DIV4, "dout_shared1_div4", "fout_shared1_pll",
+ CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1),
+
+ /* CORE */
+ DIV(CLK_DOUT_CORE_BUS, "dout_core_bus", "gout_core_bus",
+ CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 3),
+ DIV(CLK_DOUT_CORE_CCI, "dout_core_cci", "gout_core_cci",
+ CLK_CON_DIV_CLKCMU_CORE_CCI, 0, 3),
+ DIV(CLK_DOUT_CORE_G3D, "dout_core_g3d", "gout_core_g3d",
+ CLK_CON_DIV_CLKCMU_CORE_G3D, 0, 3),
+
+ /* PERI */
+ DIV(CLK_DOUT_PERI_BUS, "dout_peri_bus", "gout_peri_bus",
+ CLK_CON_DIV_CLKCMU_PERI_BUS, 0, 4),
+ DIV(CLK_DOUT_PERI_SPI0, "dout_peri_spi0", "gout_peri_spi0",
+ CLK_CON_DIV_CLKCMU_PERI_SPI0, 0, 6),
+ DIV(CLK_DOUT_PERI_SPI1, "dout_peri_spi1", "gout_peri_spi1",
+ CLK_CON_DIV_CLKCMU_PERI_SPI1, 0, 6),
+ DIV(CLK_DOUT_PERI_UART0, "dout_peri_uart0", "gout_peri_uart0",
+ CLK_CON_DIV_CLKCMU_PERI_UART0, 0, 4),
+ DIV(CLK_DOUT_PERI_UART1, "dout_peri_uart1", "gout_peri_uart1",
+ CLK_CON_DIV_CLKCMU_PERI_UART1, 0, 4),
+ DIV(CLK_DOUT_PERI_UART2, "dout_peri_uart2", "gout_peri_uart2",
+ CLK_CON_DIV_CLKCMU_PERI_UART2, 0, 4),
+ DIV(CLK_DOUT_PERI_USI0, "dout_peri_usi0", "gout_peri_usi0",
+ CLK_CON_DIV_CLKCMU_PERI_USI0, 0, 4),
+ DIV(CLK_DOUT_PERI_USI1, "dout_peri_usi1", "gout_peri_usi1",
+ CLK_CON_DIV_CLKCMU_PERI_USI1, 0, 4),
+ DIV(CLK_DOUT_PERI_USI2, "dout_peri_usi2", "gout_peri_usi2",
+ CLK_CON_DIV_CLKCMU_PERI_USI2, 0, 4),
+};
+
+static const struct samsung_gate_clock top_gate_clks[] __initconst = {
+ /* CORE */
+ GATE(CLK_GOUT_CORE_BUS, "gout_core_bus", "mout_core_bus",
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CORE_CCI, "gout_core_cci", "mout_core_cci",
+ CLK_CON_GAT_GATE_CLKCMU_CORE_CCI, 21, 0, 0),
+ GATE(CLK_GOUT_CORE_G3D, "gout_core_g3d", "mout_core_g3d",
+ CLK_CON_GAT_GATE_CLKCMU_CORE_G3D, 21, 0, 0),
+
+ /* PERI */
+ GATE(CLK_GOUT_PERI_BUS, "gout_peri_bus", "mout_peri_bus",
+ CLK_CON_GAT_GATE_CLKCMU_PERI_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_SPI0, "gout_peri_spi0", "mout_peri_spi0",
+ CLK_CON_GAT_GATE_CLKCMU_PERI_SPI0, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_SPI1, "gout_peri_spi1", "mout_peri_spi1",
+ CLK_CON_GAT_GATE_CLKCMU_PERI_SPI1, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_UART0, "gout_peri_uart0", "mout_peri_uart0",
+ CLK_CON_GAT_GATE_CLKCMU_PERI_UART0, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_UART1, "gout_peri_uart1", "mout_peri_uart1",
+ CLK_CON_GAT_GATE_CLKCMUC_PERI_UART1, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_UART2, "gout_peri_uart2", "mout_peri_uart2",
+ CLK_CON_GAT_GATE_CLKCMU_PERI_UART2, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_USI0, "gout_peri_usi0", "mout_peri_usi0",
+ CLK_CON_GAT_GATE_CLKCMU_PERI_USI0, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_USI1, "gout_peri_usi1", "mout_peri_usi1",
+ CLK_CON_GAT_GATE_CLKCMU_PERI_USI1, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_USI2, "gout_peri_usi2", "mout_peri_usi2",
+ CLK_CON_GAT_GATE_CLKCMU_PERI_USI2, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info top_cmu_info __initconst = {
+ .pll_clks = top_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(top_pll_clks),
+ .mux_clks = top_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(top_mux_clks),
+ .div_clks = top_div_clks,
+ .nr_div_clks = ARRAY_SIZE(top_div_clks),
+ .gate_clks = top_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(top_gate_clks),
+ .nr_clk_ids = TOP_NR_CLK,
+ .clk_regs = top_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(top_clk_regs),
+};
+
+static void __init exynos7885_cmu_top_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &top_cmu_info);
+}
+
+/* Register CMU_TOP early, as it's a dependency for other early domains */
+CLK_OF_DECLARE(exynos7885_cmu_top, "samsung,exynos7885-cmu-top",
+ exynos7885_cmu_top_init);
+
+/* ---- CMU_PERI ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_PERI (0x10010000) */
+#define PLL_CON0_MUX_CLKCMU_PERI_BUS_USER 0x0100
+#define PLL_CON0_MUX_CLKCMU_PERI_SPI0_USER 0x0120
+#define PLL_CON0_MUX_CLKCMU_PERI_SPI1_USER 0x0140
+#define PLL_CON0_MUX_CLKCMU_PERI_UART0_USER 0x0160
+#define PLL_CON0_MUX_CLKCMU_PERI_UART1_USER 0x0180
+#define PLL_CON0_MUX_CLKCMU_PERI_UART2_USER 0x01a0
+#define PLL_CON0_MUX_CLKCMU_PERI_USI0_USER 0x01c0
+#define PLL_CON0_MUX_CLKCMU_PERI_USI1_USER 0x01e0
+#define PLL_CON0_MUX_CLKCMU_PERI_USI2_USER 0x0200
+#define CLK_CON_GAT_GOUT_PERI_GPIO_TOP_PCLK 0x2024
+#define CLK_CON_GAT_GOUT_PERI_HSI2C_0_PCLK 0x2028
+#define CLK_CON_GAT_GOUT_PERI_HSI2C_1_PCLK 0x202c
+#define CLK_CON_GAT_GOUT_PERI_HSI2C_2_PCLK 0x2030
+#define CLK_CON_GAT_GOUT_PERI_HSI2C_3_PCLK 0x2034
+#define CLK_CON_GAT_GOUT_PERI_I2C_0_PCLK 0x2038
+#define CLK_CON_GAT_GOUT_PERI_I2C_1_PCLK 0x203c
+#define CLK_CON_GAT_GOUT_PERI_I2C_2_PCLK 0x2040
+#define CLK_CON_GAT_GOUT_PERI_I2C_3_PCLK 0x2044
+#define CLK_CON_GAT_GOUT_PERI_I2C_4_PCLK 0x2048
+#define CLK_CON_GAT_GOUT_PERI_I2C_5_PCLK 0x204c
+#define CLK_CON_GAT_GOUT_PERI_I2C_6_PCLK 0x2050
+#define CLK_CON_GAT_GOUT_PERI_I2C_7_PCLK 0x2054
+#define CLK_CON_GAT_GOUT_PERI_PWM_MOTOR_PCLK 0x2058
+#define CLK_CON_GAT_GOUT_PERI_SPI_0_PCLK 0x205c
+#define CLK_CON_GAT_GOUT_PERI_SPI_0_EXT_CLK 0x2060
+#define CLK_CON_GAT_GOUT_PERI_SPI_1_PCLK 0x2064
+#define CLK_CON_GAT_GOUT_PERI_SPI_1_EXT_CLK 0x2068
+#define CLK_CON_GAT_GOUT_PERI_UART_0_EXT_UCLK 0x206c
+#define CLK_CON_GAT_GOUT_PERI_UART_0_PCLK 0x2070
+#define CLK_CON_GAT_GOUT_PERI_UART_1_EXT_UCLK 0x2074
+#define CLK_CON_GAT_GOUT_PERI_UART_1_PCLK 0x2078
+#define CLK_CON_GAT_GOUT_PERI_UART_2_EXT_UCLK 0x207c
+#define CLK_CON_GAT_GOUT_PERI_UART_2_PCLK 0x2080
+#define CLK_CON_GAT_GOUT_PERI_USI0_PCLK 0x2084
+#define CLK_CON_GAT_GOUT_PERI_USI0_SCLK 0x2088
+#define CLK_CON_GAT_GOUT_PERI_USI1_PCLK 0x208c
+#define CLK_CON_GAT_GOUT_PERI_USI1_SCLK 0x2090
+#define CLK_CON_GAT_GOUT_PERI_USI2_PCLK 0x2094
+#define CLK_CON_GAT_GOUT_PERI_USI2_SCLK 0x2098
+#define CLK_CON_GAT_GOUT_PERI_MCT_PCLK 0x20a0
+#define CLK_CON_GAT_GOUT_PERI_SYSREG_PERI_PCLK 0x20b0
+#define CLK_CON_GAT_GOUT_PERI_WDT_CLUSTER0_PCLK 0x20b4
+#define CLK_CON_GAT_GOUT_PERI_WDT_CLUSTER1_PCLK 0x20b8
+
+static const unsigned long peri_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERI_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_PERI_SPI0_USER,
+ PLL_CON0_MUX_CLKCMU_PERI_SPI1_USER,
+ PLL_CON0_MUX_CLKCMU_PERI_UART0_USER,
+ PLL_CON0_MUX_CLKCMU_PERI_UART1_USER,
+ PLL_CON0_MUX_CLKCMU_PERI_UART2_USER,
+ PLL_CON0_MUX_CLKCMU_PERI_USI0_USER,
+ PLL_CON0_MUX_CLKCMU_PERI_USI1_USER,
+ PLL_CON0_MUX_CLKCMU_PERI_USI2_USER,
+ CLK_CON_GAT_GOUT_PERI_GPIO_TOP_PCLK,
+ CLK_CON_GAT_GOUT_PERI_HSI2C_0_PCLK,
+ CLK_CON_GAT_GOUT_PERI_HSI2C_1_PCLK,
+ CLK_CON_GAT_GOUT_PERI_HSI2C_2_PCLK,
+ CLK_CON_GAT_GOUT_PERI_HSI2C_3_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_0_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_1_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_2_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_3_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_4_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_5_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_6_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_7_PCLK,
+ CLK_CON_GAT_GOUT_PERI_PWM_MOTOR_PCLK,
+ CLK_CON_GAT_GOUT_PERI_SPI_0_PCLK,
+ CLK_CON_GAT_GOUT_PERI_SPI_0_EXT_CLK,
+ CLK_CON_GAT_GOUT_PERI_SPI_1_PCLK,
+ CLK_CON_GAT_GOUT_PERI_SPI_1_EXT_CLK,
+ CLK_CON_GAT_GOUT_PERI_UART_0_EXT_UCLK,
+ CLK_CON_GAT_GOUT_PERI_UART_0_PCLK,
+ CLK_CON_GAT_GOUT_PERI_UART_1_EXT_UCLK,
+ CLK_CON_GAT_GOUT_PERI_UART_1_PCLK,
+ CLK_CON_GAT_GOUT_PERI_UART_2_EXT_UCLK,
+ CLK_CON_GAT_GOUT_PERI_UART_2_PCLK,
+ CLK_CON_GAT_GOUT_PERI_USI0_PCLK,
+ CLK_CON_GAT_GOUT_PERI_USI0_SCLK,
+ CLK_CON_GAT_GOUT_PERI_USI1_PCLK,
+ CLK_CON_GAT_GOUT_PERI_USI1_SCLK,
+ CLK_CON_GAT_GOUT_PERI_USI2_PCLK,
+ CLK_CON_GAT_GOUT_PERI_USI2_SCLK,
+ CLK_CON_GAT_GOUT_PERI_MCT_PCLK,
+ CLK_CON_GAT_GOUT_PERI_SYSREG_PERI_PCLK,
+ CLK_CON_GAT_GOUT_PERI_WDT_CLUSTER0_PCLK,
+ CLK_CON_GAT_GOUT_PERI_WDT_CLUSTER1_PCLK,
+};
+
+/* List of parent clocks for Muxes in CMU_PERI */
+PNAME(mout_peri_bus_user_p) = { "oscclk", "dout_peri_bus" };
+PNAME(mout_peri_spi0_user_p) = { "oscclk", "dout_peri_spi0" };
+PNAME(mout_peri_spi1_user_p) = { "oscclk", "dout_peri_spi1" };
+PNAME(mout_peri_uart0_user_p) = { "oscclk", "dout_peri_uart0" };
+PNAME(mout_peri_uart1_user_p) = { "oscclk", "dout_peri_uart1" };
+PNAME(mout_peri_uart2_user_p) = { "oscclk", "dout_peri_uart2" };
+PNAME(mout_peri_usi0_user_p) = { "oscclk", "dout_peri_usi0" };
+PNAME(mout_peri_usi1_user_p) = { "oscclk", "dout_peri_usi1" };
+PNAME(mout_peri_usi2_user_p) = { "oscclk", "dout_peri_usi2" };
+
+static const struct samsung_mux_clock peri_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERI_BUS_USER, "mout_peri_bus_user", mout_peri_bus_user_p,
+ PLL_CON0_MUX_CLKCMU_PERI_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_SPI0_USER, "mout_peri_spi0_user", mout_peri_spi0_user_p,
+ PLL_CON0_MUX_CLKCMU_PERI_SPI0_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_SPI1_USER, "mout_peri_spi1_user", mout_peri_spi1_user_p,
+ PLL_CON0_MUX_CLKCMU_PERI_SPI1_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_UART0_USER, "mout_peri_uart0_user",
+ mout_peri_uart0_user_p, PLL_CON0_MUX_CLKCMU_PERI_UART0_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_UART1_USER, "mout_peri_uart1_user",
+ mout_peri_uart1_user_p, PLL_CON0_MUX_CLKCMU_PERI_UART1_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_UART2_USER, "mout_peri_uart2_user",
+ mout_peri_uart2_user_p, PLL_CON0_MUX_CLKCMU_PERI_UART2_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_USI0_USER, "mout_peri_usi0_user",
+ mout_peri_usi0_user_p, PLL_CON0_MUX_CLKCMU_PERI_USI0_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_USI1_USER, "mout_peri_usi1_user",
+ mout_peri_usi1_user_p, PLL_CON0_MUX_CLKCMU_PERI_USI1_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_USI2_USER, "mout_peri_usi2_user",
+ mout_peri_usi2_user_p, PLL_CON0_MUX_CLKCMU_PERI_USI2_USER, 4, 1),
+};
+
+static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
+ /* TODO: Should be enabled in GPIO driver (or made CLK_IS_CRITICAL) */
+ GATE(CLK_GOUT_GPIO_TOP_PCLK, "gout_gpio_top_pclk",
+ "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_GPIO_TOP_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_HSI2C0_PCLK, "gout_hsi2c0_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_HSI2C_0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_HSI2C1_PCLK, "gout_hsi2c1_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_HSI2C_1_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_HSI2C2_PCLK, "gout_hsi2c2_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_HSI2C_2_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_HSI2C3_PCLK, "gout_hsi2c3_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_HSI2C_3_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C0_PCLK, "gout_i2c0_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C1_PCLK, "gout_i2c1_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_1_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C2_PCLK, "gout_i2c2_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_2_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C3_PCLK, "gout_i2c3_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_3_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C4_PCLK, "gout_i2c4_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_4_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C5_PCLK, "gout_i2c5_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_5_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C6_PCLK, "gout_i2c6_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_6_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C7_PCLK, "gout_i2c7_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_7_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PWM_MOTOR_PCLK, "gout_pwm_motor_pclk",
+ "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_PWM_MOTOR_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_SPI0_PCLK, "gout_spi0_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_SPI_0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_SPI0_EXT_CLK, "gout_spi0_ipclk", "mout_peri_spi0_user",
+ CLK_CON_GAT_GOUT_PERI_SPI_0_EXT_CLK, 21, 0, 0),
+ GATE(CLK_GOUT_SPI1_PCLK, "gout_spi1_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_SPI_1_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_SPI1_EXT_CLK, "gout_spi1_ipclk", "mout_peri_spi1_user",
+ CLK_CON_GAT_GOUT_PERI_SPI_1_EXT_CLK, 21, 0, 0),
+ GATE(CLK_GOUT_UART0_EXT_UCLK, "gout_uart0_ext_uclk", "mout_peri_uart0_user",
+ CLK_CON_GAT_GOUT_PERI_UART_0_EXT_UCLK, 21, 0, 0),
+ GATE(CLK_GOUT_UART0_PCLK, "gout_uart0_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_UART_0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_UART1_EXT_UCLK, "gout_uart1_ext_uclk", "mout_peri_uart1_user",
+ CLK_CON_GAT_GOUT_PERI_UART_1_EXT_UCLK, 21, 0, 0),
+ GATE(CLK_GOUT_UART1_PCLK, "gout_uart1_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_UART_1_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_UART2_EXT_UCLK, "gout_uart2_ext_uclk", "mout_peri_uart2_user",
+ CLK_CON_GAT_GOUT_PERI_UART_2_EXT_UCLK, 21, 0, 0),
+ GATE(CLK_GOUT_UART2_PCLK, "gout_uart2_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_UART_2_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_USI0_PCLK, "gout_usi0_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_USI0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_USI0_SCLK, "gout_usi0_sclk", "mout_peri_usi0_user",
+ CLK_CON_GAT_GOUT_PERI_USI0_SCLK, 21, 0, 0),
+ GATE(CLK_GOUT_USI1_PCLK, "gout_usi1_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_USI1_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_USI1_SCLK, "gout_usi1_sclk", "mout_peri_usi1_user",
+ CLK_CON_GAT_GOUT_PERI_USI1_SCLK, 21, 0, 0),
+ GATE(CLK_GOUT_USI2_PCLK, "gout_usi2_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_USI2_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_USI2_SCLK, "gout_usi2_sclk", "mout_peri_usi2_user",
+ CLK_CON_GAT_GOUT_PERI_USI2_SCLK, 21, 0, 0),
+ GATE(CLK_GOUT_MCT_PCLK, "gout_mct_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_MCT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_SYSREG_PERI_PCLK, "gout_sysreg_peri_pclk",
+ "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_SYSREG_PERI_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_WDT0_PCLK, "gout_wdt0_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_WDT_CLUSTER0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_WDT1_PCLK, "gout_wdt1_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_WDT_CLUSTER1_PCLK, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info peri_cmu_info __initconst = {
+ .mux_clks = peri_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peri_mux_clks),
+ .gate_clks = peri_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peri_gate_clks),
+ .nr_clk_ids = PERI_NR_CLK,
+ .clk_regs = peri_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peri_clk_regs),
+ .clk_name = "dout_peri_bus",
+};
+
+static void __init exynos7885_cmu_peri_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &peri_cmu_info);
+}
+
+/* Register CMU_PERI early, as it's needed for MCT timer */
+CLK_OF_DECLARE(exynos7885_cmu_peri, "samsung,exynos7885-cmu-peri",
+ exynos7885_cmu_peri_init);
+
+/* ---- CMU_CORE ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_CORE (0x12000000) */
+#define PLL_CON0_MUX_CLKCMU_CORE_BUS_USER 0x0100
+#define PLL_CON0_MUX_CLKCMU_CORE_CCI_USER 0x0120
+#define PLL_CON0_MUX_CLKCMU_CORE_G3D_USER 0x0140
+#define CLK_CON_MUX_MUX_CLK_CORE_GIC 0x1000
+#define CLK_CON_DIV_DIV_CLK_CORE_BUSP 0x1800
+#define CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK 0x2054
+#define CLK_CON_GAT_GOUT_CORE_GIC400_CLK 0x2058
+
+static const unsigned long core_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_CORE_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_CORE_CCI_USER,
+ PLL_CON0_MUX_CLKCMU_CORE_G3D_USER,
+ CLK_CON_MUX_MUX_CLK_CORE_GIC,
+ CLK_CON_DIV_DIV_CLK_CORE_BUSP,
+ CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK,
+ CLK_CON_GAT_GOUT_CORE_GIC400_CLK,
+};
+
+/* List of parent clocks for Muxes in CMU_CORE */
+PNAME(mout_core_bus_user_p) = { "oscclk", "dout_core_bus" };
+PNAME(mout_core_cci_user_p) = { "oscclk", "dout_core_cci" };
+PNAME(mout_core_g3d_user_p) = { "oscclk", "dout_core_g3d" };
+PNAME(mout_core_gic_p) = { "dout_core_busp", "oscclk" };
+
+static const struct samsung_mux_clock core_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_CORE_BUS_USER, "mout_core_bus_user", mout_core_bus_user_p,
+ PLL_CON0_MUX_CLKCMU_CORE_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_CORE_CCI_USER, "mout_core_cci_user", mout_core_cci_user_p,
+ PLL_CON0_MUX_CLKCMU_CORE_CCI_USER, 4, 1),
+ MUX(CLK_MOUT_CORE_G3D_USER, "mout_core_g3d_user", mout_core_g3d_user_p,
+ PLL_CON0_MUX_CLKCMU_CORE_G3D_USER, 4, 1),
+ MUX(CLK_MOUT_CORE_GIC, "mout_core_gic", mout_core_gic_p,
+ CLK_CON_MUX_MUX_CLK_CORE_GIC, 0, 1),
+};
+
+static const struct samsung_div_clock core_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CORE_BUSP, "dout_core_busp", "mout_core_bus_user",
+ CLK_CON_DIV_DIV_CLK_CORE_BUSP, 0, 2),
+};
+
+static const struct samsung_gate_clock core_gate_clks[] __initconst = {
+ /* CCI (interconnect) clock must be always running */
+ GATE(CLK_GOUT_CCI_ACLK, "gout_cci_aclk", "mout_core_cci_user",
+ CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK, 21, CLK_IS_CRITICAL, 0),
+ /* GIC (interrupt controller) clock must be always running */
+ GATE(CLK_GOUT_GIC400_CLK, "gout_gic400_clk", "mout_core_gic",
+ CLK_CON_GAT_GOUT_CORE_GIC400_CLK, 21, CLK_IS_CRITICAL, 0),
+};
+
+static const struct samsung_cmu_info core_cmu_info __initconst = {
+ .mux_clks = core_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(core_mux_clks),
+ .div_clks = core_div_clks,
+ .nr_div_clks = ARRAY_SIZE(core_div_clks),
+ .gate_clks = core_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(core_gate_clks),
+ .nr_clk_ids = CORE_NR_CLK,
+ .clk_regs = core_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(core_clk_regs),
+ .clk_name = "dout_core_bus",
+};
+
+/* ---- platform_driver ----------------------------------------------------- */
+
+static int __init exynos7885_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+
+ info = of_device_get_match_data(dev);
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
+
+ return 0;
+}
+
+static const struct of_device_id exynos7885_cmu_of_match[] = {
+ {
+ .compatible = "samsung,exynos7885-cmu-core",
+ .data = &core_cmu_info,
+ }, {
+ },
+};
+
+static struct platform_driver exynos7885_cmu_driver __refdata = {
+ .driver = {
+ .name = "exynos7885-cmu",
+ .of_match_table = exynos7885_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = exynos7885_cmu_probe,
+};
+
+static int __init exynos7885_cmu_init(void)
+{
+ return platform_driver_register(&exynos7885_cmu_driver);
+}
+core_initcall(exynos7885_cmu_init);
diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c
index 2294989e244c..cd9725f1dbf7 100644
--- a/drivers/clk/samsung/clk-exynos850.c
+++ b/drivers/clk/samsung/clk-exynos850.c
@@ -9,56 +9,13 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
-#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/exynos850.h>
#include "clk.h"
-
-/* Gate register bits */
-#define GATE_MANUAL BIT(20)
-#define GATE_ENABLE_HWACG BIT(28)
-
-/* Gate register offsets range */
-#define GATE_OFF_START 0x2000
-#define GATE_OFF_END 0x2fff
-
-/**
- * exynos850_init_clocks - Set clocks initial configuration
- * @np: CMU device tree node with "reg" property (CMU addr)
- * @reg_offs: Register offsets array for clocks to init
- * @reg_offs_len: Number of register offsets in reg_offs array
- *
- * Set manual control mode for all gate clocks.
- */
-static void __init exynos850_init_clocks(struct device_node *np,
- const unsigned long *reg_offs, size_t reg_offs_len)
-{
- void __iomem *reg_base;
- size_t i;
-
- reg_base = of_iomap(np, 0);
- if (!reg_base)
- panic("%s: failed to map registers\n", __func__);
-
- for (i = 0; i < reg_offs_len; ++i) {
- void __iomem *reg = reg_base + reg_offs[i];
- u32 val;
-
- /* Modify only gate clock registers */
- if (reg_offs[i] < GATE_OFF_START || reg_offs[i] > GATE_OFF_END)
- continue;
-
- val = readl(reg);
- val |= GATE_MANUAL;
- val &= ~GATE_ENABLE_HWACG;
- writel(val, reg);
- }
-
- iounmap(reg_base);
-}
+#include "clk-exynos-arm64.h"
/* ---- CMU_TOP ------------------------------------------------------------- */
@@ -72,6 +29,7 @@ static void __init exynos850_init_clocks(struct device_node *np,
#define PLL_CON3_PLL_SHARED0 0x014c
#define PLL_CON0_PLL_SHARED1 0x0180
#define PLL_CON3_PLL_SHARED1 0x018c
+#define CLK_CON_MUX_MUX_CLKCMU_APM_BUS 0x1000
#define CLK_CON_MUX_MUX_CLKCMU_CORE_BUS 0x1014
#define CLK_CON_MUX_MUX_CLKCMU_CORE_CCI 0x1018
#define CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD 0x101c
@@ -83,6 +41,7 @@ static void __init exynos850_init_clocks(struct device_node *np,
#define CLK_CON_MUX_MUX_CLKCMU_PERI_BUS 0x1070
#define CLK_CON_MUX_MUX_CLKCMU_PERI_IP 0x1074
#define CLK_CON_MUX_MUX_CLKCMU_PERI_UART 0x1078
+#define CLK_CON_DIV_CLKCMU_APM_BUS 0x180c
#define CLK_CON_DIV_CLKCMU_CORE_BUS 0x1820
#define CLK_CON_DIV_CLKCMU_CORE_CCI 0x1824
#define CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD 0x1828
@@ -100,6 +59,7 @@ static void __init exynos850_init_clocks(struct device_node *np,
#define CLK_CON_DIV_PLL_SHARED1_DIV2 0x1898
#define CLK_CON_DIV_PLL_SHARED1_DIV3 0x189c
#define CLK_CON_DIV_PLL_SHARED1_DIV4 0x18a0
+#define CLK_CON_GAT_GATE_CLKCMU_APM_BUS 0x2008
#define CLK_CON_GAT_GATE_CLKCMU_CORE_BUS 0x201c
#define CLK_CON_GAT_GATE_CLKCMU_CORE_CCI 0x2020
#define CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD 0x2024
@@ -122,6 +82,7 @@ static const unsigned long top_clk_regs[] __initconst = {
PLL_CON3_PLL_SHARED0,
PLL_CON0_PLL_SHARED1,
PLL_CON3_PLL_SHARED1,
+ CLK_CON_MUX_MUX_CLKCMU_APM_BUS,
CLK_CON_MUX_MUX_CLKCMU_CORE_BUS,
CLK_CON_MUX_MUX_CLKCMU_CORE_CCI,
CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD,
@@ -133,6 +94,7 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_MUX_MUX_CLKCMU_PERI_BUS,
CLK_CON_MUX_MUX_CLKCMU_PERI_IP,
CLK_CON_MUX_MUX_CLKCMU_PERI_UART,
+ CLK_CON_DIV_CLKCMU_APM_BUS,
CLK_CON_DIV_CLKCMU_CORE_BUS,
CLK_CON_DIV_CLKCMU_CORE_CCI,
CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD,
@@ -150,6 +112,7 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_DIV_PLL_SHARED1_DIV2,
CLK_CON_DIV_PLL_SHARED1_DIV3,
CLK_CON_DIV_PLL_SHARED1_DIV4,
+ CLK_CON_GAT_GATE_CLKCMU_APM_BUS,
CLK_CON_GAT_GATE_CLKCMU_CORE_BUS,
CLK_CON_GAT_GATE_CLKCMU_CORE_CCI,
CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD,
@@ -183,6 +146,8 @@ static const struct samsung_pll_clock top_pll_clks[] __initconst = {
PNAME(mout_shared0_pll_p) = { "oscclk", "fout_shared0_pll" };
PNAME(mout_shared1_pll_p) = { "oscclk", "fout_shared1_pll" };
PNAME(mout_mmc_pll_p) = { "oscclk", "fout_mmc_pll" };
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_APM */
+PNAME(mout_clkcmu_apm_bus_p) = { "dout_shared0_div4", "pll_shared1_div4" };
/* List of parent clocks for Muxes in CMU_TOP: for CMU_CORE */
PNAME(mout_core_bus_p) = { "dout_shared1_div2", "dout_shared0_div3",
"dout_shared1_div3", "dout_shared0_div4" };
@@ -222,6 +187,10 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
MUX(CLK_MOUT_MMC_PLL, "mout_mmc_pll", mout_mmc_pll_p,
PLL_CON0_PLL_MMC, 4, 1),
+ /* APM */
+ MUX(CLK_MOUT_CLKCMU_APM_BUS, "mout_clkcmu_apm_bus",
+ mout_clkcmu_apm_bus_p, CLK_CON_MUX_MUX_CLKCMU_APM_BUS, 0, 1),
+
/* CORE */
MUX(CLK_MOUT_CORE_BUS, "mout_core_bus", mout_core_bus_p,
CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, 0, 2),
@@ -268,6 +237,10 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
DIV(CLK_DOUT_SHARED1_DIV4, "dout_shared1_div4", "dout_shared1_div2",
CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1),
+ /* APM */
+ DIV(CLK_DOUT_CLKCMU_APM_BUS, "dout_clkcmu_apm_bus",
+ "gout_clkcmu_apm_bus", CLK_CON_DIV_CLKCMU_APM_BUS, 0, 3),
+
/* CORE */
DIV(CLK_DOUT_CORE_BUS, "dout_core_bus", "gout_core_bus",
CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 4),
@@ -310,6 +283,10 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
GATE(CLK_GOUT_CORE_SSS, "gout_core_sss", "mout_core_sss",
CLK_CON_GAT_GATE_CLKCMU_CORE_SSS, 21, 0, 0),
+ /* APM */
+ GATE(CLK_GOUT_CLKCMU_APM_BUS, "gout_clkcmu_apm_bus",
+ "mout_clkcmu_apm_bus", CLK_CON_GAT_GATE_CLKCMU_APM_BUS, 21, 0, 0),
+
/* DPU */
GATE(CLK_GOUT_DPU, "gout_dpu", "mout_dpu",
CLK_CON_GAT_GATE_CLKCMU_DPU, 21, 0, 0),
@@ -347,13 +324,248 @@ static const struct samsung_cmu_info top_cmu_info __initconst = {
static void __init exynos850_cmu_top_init(struct device_node *np)
{
- exynos850_init_clocks(np, top_clk_regs, ARRAY_SIZE(top_clk_regs));
- samsung_cmu_register_one(np, &top_cmu_info);
+ exynos_arm64_register_cmu(NULL, np, &top_cmu_info);
}
+/* Register CMU_TOP early, as it's a dependency for other early domains */
CLK_OF_DECLARE(exynos850_cmu_top, "samsung,exynos850-cmu-top",
exynos850_cmu_top_init);
+/* ---- CMU_APM ------------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_APM (0x11800000) */
+#define PLL_CON0_MUX_CLKCMU_APM_BUS_USER 0x0600
+#define PLL_CON0_MUX_CLK_RCO_APM_I3C_USER 0x0610
+#define PLL_CON0_MUX_CLK_RCO_APM_USER 0x0620
+#define PLL_CON0_MUX_DLL_USER 0x0630
+#define CLK_CON_MUX_MUX_CLKCMU_CHUB_BUS 0x1000
+#define CLK_CON_MUX_MUX_CLK_APM_BUS 0x1004
+#define CLK_CON_MUX_MUX_CLK_APM_I3C 0x1008
+#define CLK_CON_DIV_CLKCMU_CHUB_BUS 0x1800
+#define CLK_CON_DIV_DIV_CLK_APM_BUS 0x1804
+#define CLK_CON_DIV_DIV_CLK_APM_I3C 0x1808
+#define CLK_CON_GAT_CLKCMU_CMGP_BUS 0x2000
+#define CLK_CON_GAT_GATE_CLKCMU_CHUB_BUS 0x2014
+#define CLK_CON_GAT_GOUT_APM_APBIF_GPIO_ALIVE_PCLK 0x2018
+#define CLK_CON_GAT_GOUT_APM_APBIF_PMU_ALIVE_PCLK 0x2020
+#define CLK_CON_GAT_GOUT_APM_APBIF_RTC_PCLK 0x2024
+#define CLK_CON_GAT_GOUT_APM_APBIF_TOP_RTC_PCLK 0x2028
+#define CLK_CON_GAT_GOUT_APM_I3C_APM_PMIC_I_PCLK 0x2034
+#define CLK_CON_GAT_GOUT_APM_I3C_APM_PMIC_I_SCLK 0x2038
+#define CLK_CON_GAT_GOUT_APM_SPEEDY_APM_PCLK 0x20bc
+#define CLK_CON_GAT_GOUT_APM_SYSREG_APM_PCLK 0x20c0
+
+static const unsigned long apm_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_APM_BUS_USER,
+ PLL_CON0_MUX_CLK_RCO_APM_I3C_USER,
+ PLL_CON0_MUX_CLK_RCO_APM_USER,
+ PLL_CON0_MUX_DLL_USER,
+ CLK_CON_MUX_MUX_CLKCMU_CHUB_BUS,
+ CLK_CON_MUX_MUX_CLK_APM_BUS,
+ CLK_CON_MUX_MUX_CLK_APM_I3C,
+ CLK_CON_DIV_CLKCMU_CHUB_BUS,
+ CLK_CON_DIV_DIV_CLK_APM_BUS,
+ CLK_CON_DIV_DIV_CLK_APM_I3C,
+ CLK_CON_GAT_CLKCMU_CMGP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CHUB_BUS,
+ CLK_CON_GAT_GOUT_APM_APBIF_GPIO_ALIVE_PCLK,
+ CLK_CON_GAT_GOUT_APM_APBIF_PMU_ALIVE_PCLK,
+ CLK_CON_GAT_GOUT_APM_APBIF_RTC_PCLK,
+ CLK_CON_GAT_GOUT_APM_APBIF_TOP_RTC_PCLK,
+ CLK_CON_GAT_GOUT_APM_I3C_APM_PMIC_I_PCLK,
+ CLK_CON_GAT_GOUT_APM_I3C_APM_PMIC_I_SCLK,
+ CLK_CON_GAT_GOUT_APM_SPEEDY_APM_PCLK,
+ CLK_CON_GAT_GOUT_APM_SYSREG_APM_PCLK,
+};
+
+/* List of parent clocks for Muxes in CMU_APM */
+PNAME(mout_apm_bus_user_p) = { "oscclk_rco_apm", "dout_clkcmu_apm_bus" };
+PNAME(mout_rco_apm_i3c_user_p) = { "oscclk_rco_apm", "clk_rco_i3c_pmic" };
+PNAME(mout_rco_apm_user_p) = { "oscclk_rco_apm", "clk_rco_apm__alv" };
+PNAME(mout_dll_user_p) = { "oscclk_rco_apm", "clk_dll_dco" };
+PNAME(mout_clkcmu_chub_bus_p) = { "mout_apm_bus_user", "mout_dll_user" };
+PNAME(mout_apm_bus_p) = { "mout_rco_apm_user", "mout_apm_bus_user",
+ "mout_dll_user", "oscclk_rco_apm" };
+PNAME(mout_apm_i3c_p) = { "dout_apm_i3c", "mout_rco_apm_i3c_user" };
+
+static const struct samsung_fixed_rate_clock apm_fixed_clks[] __initconst = {
+ FRATE(CLK_RCO_I3C_PMIC, "clk_rco_i3c_pmic", NULL, 0, 491520000),
+ FRATE(OSCCLK_RCO_APM, "oscclk_rco_apm", NULL, 0, 24576000),
+ FRATE(CLK_RCO_APM__ALV, "clk_rco_apm__alv", NULL, 0, 49152000),
+ FRATE(CLK_DLL_DCO, "clk_dll_dco", NULL, 0, 360000000),
+};
+
+static const struct samsung_mux_clock apm_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_APM_BUS_USER, "mout_apm_bus_user", mout_apm_bus_user_p,
+ PLL_CON0_MUX_CLKCMU_APM_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_RCO_APM_I3C_USER, "mout_rco_apm_i3c_user",
+ mout_rco_apm_i3c_user_p, PLL_CON0_MUX_CLK_RCO_APM_I3C_USER, 4, 1),
+ MUX(CLK_MOUT_RCO_APM_USER, "mout_rco_apm_user", mout_rco_apm_user_p,
+ PLL_CON0_MUX_CLK_RCO_APM_USER, 4, 1),
+ MUX(CLK_MOUT_DLL_USER, "mout_dll_user", mout_dll_user_p,
+ PLL_CON0_MUX_DLL_USER, 4, 1),
+ MUX(CLK_MOUT_CLKCMU_CHUB_BUS, "mout_clkcmu_chub_bus",
+ mout_clkcmu_chub_bus_p, CLK_CON_MUX_MUX_CLKCMU_CHUB_BUS, 0, 1),
+ MUX(CLK_MOUT_APM_BUS, "mout_apm_bus", mout_apm_bus_p,
+ CLK_CON_MUX_MUX_CLK_APM_BUS, 0, 2),
+ MUX(CLK_MOUT_APM_I3C, "mout_apm_i3c", mout_apm_i3c_p,
+ CLK_CON_MUX_MUX_CLK_APM_I3C, 0, 1),
+};
+
+static const struct samsung_div_clock apm_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CLKCMU_CHUB_BUS, "dout_clkcmu_chub_bus",
+ "gout_clkcmu_chub_bus",
+ CLK_CON_DIV_CLKCMU_CHUB_BUS, 0, 3),
+ DIV(CLK_DOUT_APM_BUS, "dout_apm_bus", "mout_apm_bus",
+ CLK_CON_DIV_DIV_CLK_APM_BUS, 0, 3),
+ DIV(CLK_DOUT_APM_I3C, "dout_apm_i3c", "mout_apm_bus",
+ CLK_CON_DIV_DIV_CLK_APM_I3C, 0, 3),
+};
+
+static const struct samsung_gate_clock apm_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_CLKCMU_CMGP_BUS, "gout_clkcmu_cmgp_bus", "dout_apm_bus",
+ CLK_CON_GAT_CLKCMU_CMGP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CLKCMU_CHUB_BUS, "gout_clkcmu_chub_bus",
+ "mout_clkcmu_chub_bus",
+ CLK_CON_GAT_GATE_CLKCMU_CHUB_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_RTC_PCLK, "gout_rtc_pclk", "dout_apm_bus",
+ CLK_CON_GAT_GOUT_APM_APBIF_RTC_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_TOP_RTC_PCLK, "gout_top_rtc_pclk", "dout_apm_bus",
+ CLK_CON_GAT_GOUT_APM_APBIF_TOP_RTC_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I3C_PCLK, "gout_i3c_pclk", "dout_apm_bus",
+ CLK_CON_GAT_GOUT_APM_I3C_APM_PMIC_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I3C_SCLK, "gout_i3c_sclk", "mout_apm_i3c",
+ CLK_CON_GAT_GOUT_APM_I3C_APM_PMIC_I_SCLK, 21, 0, 0),
+ GATE(CLK_GOUT_SPEEDY_PCLK, "gout_speedy_pclk", "dout_apm_bus",
+ CLK_CON_GAT_GOUT_APM_SPEEDY_APM_PCLK, 21, 0, 0),
+ /* TODO: Should be enabled in GPIO driver (or made CLK_IS_CRITICAL) */
+ GATE(CLK_GOUT_GPIO_ALIVE_PCLK, "gout_gpio_alive_pclk", "dout_apm_bus",
+ CLK_CON_GAT_GOUT_APM_APBIF_GPIO_ALIVE_PCLK, 21, CLK_IGNORE_UNUSED,
+ 0),
+ GATE(CLK_GOUT_PMU_ALIVE_PCLK, "gout_pmu_alive_pclk", "dout_apm_bus",
+ CLK_CON_GAT_GOUT_APM_APBIF_PMU_ALIVE_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_SYSREG_APM_PCLK, "gout_sysreg_apm_pclk", "dout_apm_bus",
+ CLK_CON_GAT_GOUT_APM_SYSREG_APM_PCLK, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info apm_cmu_info __initconst = {
+ .mux_clks = apm_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(apm_mux_clks),
+ .div_clks = apm_div_clks,
+ .nr_div_clks = ARRAY_SIZE(apm_div_clks),
+ .gate_clks = apm_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(apm_gate_clks),
+ .fixed_clks = apm_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(apm_fixed_clks),
+ .nr_clk_ids = APM_NR_CLK,
+ .clk_regs = apm_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(apm_clk_regs),
+ .clk_name = "dout_clkcmu_apm_bus",
+};
+
+/* ---- CMU_CMGP ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_CMGP (0x11c00000) */
+#define CLK_CON_MUX_CLK_CMGP_ADC 0x1000
+#define CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP0 0x1004
+#define CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP1 0x1008
+#define CLK_CON_DIV_DIV_CLK_CMGP_ADC 0x1800
+#define CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP0 0x1804
+#define CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP1 0x1808
+#define CLK_CON_GAT_GOUT_CMGP_ADC_PCLK_S0 0x200c
+#define CLK_CON_GAT_GOUT_CMGP_ADC_PCLK_S1 0x2010
+#define CLK_CON_GAT_GOUT_CMGP_GPIO_PCLK 0x2018
+#define CLK_CON_GAT_GOUT_CMGP_SYSREG_CMGP_PCLK 0x2040
+#define CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_IPCLK 0x2044
+#define CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_PCLK 0x2048
+#define CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_IPCLK 0x204c
+#define CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_PCLK 0x2050
+
+static const unsigned long cmgp_clk_regs[] __initconst = {
+ CLK_CON_MUX_CLK_CMGP_ADC,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP0,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP1,
+ CLK_CON_DIV_DIV_CLK_CMGP_ADC,
+ CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP0,
+ CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP1,
+ CLK_CON_GAT_GOUT_CMGP_ADC_PCLK_S0,
+ CLK_CON_GAT_GOUT_CMGP_ADC_PCLK_S1,
+ CLK_CON_GAT_GOUT_CMGP_GPIO_PCLK,
+ CLK_CON_GAT_GOUT_CMGP_SYSREG_CMGP_PCLK,
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_IPCLK,
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_PCLK,
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_IPCLK,
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_PCLK,
+};
+
+/* List of parent clocks for Muxes in CMU_CMGP */
+PNAME(mout_cmgp_usi0_p) = { "clk_rco_cmgp", "gout_clkcmu_cmgp_bus" };
+PNAME(mout_cmgp_usi1_p) = { "clk_rco_cmgp", "gout_clkcmu_cmgp_bus" };
+PNAME(mout_cmgp_adc_p) = { "oscclk", "dout_cmgp_adc" };
+
+static const struct samsung_fixed_rate_clock cmgp_fixed_clks[] __initconst = {
+ FRATE(CLK_RCO_CMGP, "clk_rco_cmgp", NULL, 0, 49152000),
+};
+
+static const struct samsung_mux_clock cmgp_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_CMGP_ADC, "mout_cmgp_adc", mout_cmgp_adc_p,
+ CLK_CON_MUX_CLK_CMGP_ADC, 0, 1),
+ MUX(CLK_MOUT_CMGP_USI0, "mout_cmgp_usi0", mout_cmgp_usi0_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP0, 0, 1),
+ MUX(CLK_MOUT_CMGP_USI1, "mout_cmgp_usi1", mout_cmgp_usi1_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP1, 0, 1),
+};
+
+static const struct samsung_div_clock cmgp_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CMGP_ADC, "dout_cmgp_adc", "gout_clkcmu_cmgp_bus",
+ CLK_CON_DIV_DIV_CLK_CMGP_ADC, 0, 4),
+ DIV(CLK_DOUT_CMGP_USI0, "dout_cmgp_usi0", "mout_cmgp_usi0",
+ CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP0, 0, 5),
+ DIV(CLK_DOUT_CMGP_USI1, "dout_cmgp_usi1", "mout_cmgp_usi1",
+ CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP1, 0, 5),
+};
+
+static const struct samsung_gate_clock cmgp_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_CMGP_ADC_S0_PCLK, "gout_adc_s0_pclk",
+ "gout_clkcmu_cmgp_bus",
+ CLK_CON_GAT_GOUT_CMGP_ADC_PCLK_S0, 21, 0, 0),
+ GATE(CLK_GOUT_CMGP_ADC_S1_PCLK, "gout_adc_s1_pclk",
+ "gout_clkcmu_cmgp_bus",
+ CLK_CON_GAT_GOUT_CMGP_ADC_PCLK_S1, 21, 0, 0),
+ /* TODO: Should be enabled in GPIO driver (or made CLK_IS_CRITICAL) */
+ GATE(CLK_GOUT_CMGP_GPIO_PCLK, "gout_gpio_cmgp_pclk",
+ "gout_clkcmu_cmgp_bus",
+ CLK_CON_GAT_GOUT_CMGP_GPIO_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMGP_USI0_IPCLK, "gout_cmgp_usi0_ipclk", "dout_cmgp_usi0",
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_IPCLK, 21, 0, 0),
+ GATE(CLK_GOUT_CMGP_USI0_PCLK, "gout_cmgp_usi0_pclk",
+ "gout_clkcmu_cmgp_bus",
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_CMGP_USI1_IPCLK, "gout_cmgp_usi1_ipclk", "dout_cmgp_usi1",
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_IPCLK, 21, 0, 0),
+ GATE(CLK_GOUT_CMGP_USI1_PCLK, "gout_cmgp_usi1_pclk",
+ "gout_clkcmu_cmgp_bus",
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_SYSREG_CMGP_PCLK, "gout_sysreg_cmgp_pclk",
+ "gout_clkcmu_cmgp_bus",
+ CLK_CON_GAT_GOUT_CMGP_SYSREG_CMGP_PCLK, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info cmgp_cmu_info __initconst = {
+ .mux_clks = cmgp_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmgp_mux_clks),
+ .div_clks = cmgp_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmgp_div_clks),
+ .gate_clks = cmgp_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cmgp_gate_clks),
+ .fixed_clks = cmgp_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(cmgp_fixed_clks),
+ .nr_clk_ids = CMGP_NR_CLK,
+ .clk_regs = cmgp_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmgp_clk_regs),
+ .clk_name = "gout_clkcmu_cmgp_bus",
+};
+
/* ---- CMU_HSI ------------------------------------------------------------- */
/* Register Offset definitions for CMU_HSI (0x13400000) */
@@ -413,8 +625,9 @@ static const struct samsung_gate_clock hsi_gate_clks[] __initconst = {
CLK_CON_GAT_HSI_USB20DRD_TOP_I_REF_CLK_50, 21, 0, 0),
GATE(CLK_GOUT_USB_PHY_REF_CLK, "gout_usb_phy_ref", "oscclk",
CLK_CON_GAT_HSI_USB20DRD_TOP_I_PHY_REFCLK_26, 21, 0, 0),
+ /* TODO: Should be enabled in GPIO driver (or made CLK_IS_CRITICAL) */
GATE(CLK_GOUT_GPIO_HSI_PCLK, "gout_gpio_hsi_pclk", "mout_hsi_bus_user",
- CLK_CON_GAT_GOUT_HSI_GPIO_HSI_PCLK, 21, 0, 0),
+ CLK_CON_GAT_GOUT_HSI_GPIO_HSI_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_MMC_CARD_ACLK, "gout_mmc_card_aclk", "mout_hsi_bus_user",
CLK_CON_GAT_GOUT_HSI_MMC_CARD_I_ACLK, 21, 0, 0),
GATE(CLK_GOUT_MMC_CARD_SDCLKIN, "gout_mmc_card_sdclkin",
@@ -597,9 +810,10 @@ static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
CLK_CON_GAT_GOUT_PERI_WDT_0_PCLK, 21, 0, 0),
GATE(CLK_GOUT_WDT1_PCLK, "gout_wdt1_pclk", "mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_WDT_1_PCLK, 21, 0, 0),
+ /* TODO: Should be enabled in GPIO driver (or made CLK_IS_CRITICAL) */
GATE(CLK_GOUT_GPIO_PERI_PCLK, "gout_gpio_peri_pclk",
"mout_peri_bus_user",
- CLK_CON_GAT_GOUT_PERI_GPIO_PERI_PCLK, 21, 0, 0),
+ CLK_CON_GAT_GOUT_PERI_GPIO_PERI_PCLK, 21, CLK_IGNORE_UNUSED, 0),
};
static const struct samsung_cmu_info peri_cmu_info __initconst = {
@@ -615,6 +829,15 @@ static const struct samsung_cmu_info peri_cmu_info __initconst = {
.clk_name = "dout_peri_bus",
};
+static void __init exynos850_cmu_peri_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &peri_cmu_info);
+}
+
+/* Register CMU_PERI early, as it's needed for MCT timer */
+CLK_OF_DECLARE(exynos850_cmu_peri, "samsung,exynos850-cmu-peri",
+ exynos850_cmu_peri_init);
+
/* ---- CMU_CORE ------------------------------------------------------------ */
/* Register Offset definitions for CMU_CORE (0x12000000) */
@@ -626,10 +849,12 @@ static const struct samsung_cmu_info peri_cmu_info __initconst = {
#define CLK_CON_DIV_DIV_CLK_CORE_BUSP 0x1800
#define CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK 0x2038
#define CLK_CON_GAT_GOUT_CORE_GIC_CLK 0x2040
+#define CLK_CON_GAT_GOUT_CORE_GPIO_CORE_PCLK 0x2044
#define CLK_CON_GAT_GOUT_CORE_MMC_EMBD_I_ACLK 0x20e8
#define CLK_CON_GAT_GOUT_CORE_MMC_EMBD_SDCLKIN 0x20ec
#define CLK_CON_GAT_GOUT_CORE_SSS_I_ACLK 0x2128
#define CLK_CON_GAT_GOUT_CORE_SSS_I_PCLK 0x212c
+#define CLK_CON_GAT_GOUT_CORE_SYSREG_CORE_PCLK 0x2130
static const unsigned long core_clk_regs[] __initconst = {
PLL_CON0_MUX_CLKCMU_CORE_BUS_USER,
@@ -640,10 +865,12 @@ static const unsigned long core_clk_regs[] __initconst = {
CLK_CON_DIV_DIV_CLK_CORE_BUSP,
CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK,
CLK_CON_GAT_GOUT_CORE_GIC_CLK,
+ CLK_CON_GAT_GOUT_CORE_GPIO_CORE_PCLK,
CLK_CON_GAT_GOUT_CORE_MMC_EMBD_I_ACLK,
CLK_CON_GAT_GOUT_CORE_MMC_EMBD_SDCLKIN,
CLK_CON_GAT_GOUT_CORE_SSS_I_ACLK,
CLK_CON_GAT_GOUT_CORE_SSS_I_PCLK,
+ CLK_CON_GAT_GOUT_CORE_SYSREG_CORE_PCLK,
};
/* List of parent clocks for Muxes in CMU_CORE */
@@ -673,10 +900,12 @@ static const struct samsung_div_clock core_div_clks[] __initconst = {
};
static const struct samsung_gate_clock core_gate_clks[] __initconst = {
+ /* CCI (interconnect) clock must be always running */
GATE(CLK_GOUT_CCI_ACLK, "gout_cci_aclk", "mout_core_cci_user",
- CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK, 21, 0, 0),
+ CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK, 21, CLK_IS_CRITICAL, 0),
+ /* GIC (interrupt controller) clock must be always running */
GATE(CLK_GOUT_GIC_CLK, "gout_gic_clk", "mout_core_gic",
- CLK_CON_GAT_GOUT_CORE_GIC_CLK, 21, 0, 0),
+ CLK_CON_GAT_GOUT_CORE_GIC_CLK, 21, CLK_IS_CRITICAL, 0),
GATE(CLK_GOUT_MMC_EMBD_ACLK, "gout_mmc_embd_aclk", "dout_core_busp",
CLK_CON_GAT_GOUT_CORE_MMC_EMBD_I_ACLK, 21, 0, 0),
GATE(CLK_GOUT_MMC_EMBD_SDCLKIN, "gout_mmc_embd_sdclkin",
@@ -686,6 +915,12 @@ static const struct samsung_gate_clock core_gate_clks[] __initconst = {
CLK_CON_GAT_GOUT_CORE_SSS_I_ACLK, 21, 0, 0),
GATE(CLK_GOUT_SSS_PCLK, "gout_sss_pclk", "dout_core_busp",
CLK_CON_GAT_GOUT_CORE_SSS_I_PCLK, 21, 0, 0),
+ /* TODO: Should be enabled in GPIO driver (or made CLK_IS_CRITICAL) */
+ GATE(CLK_GOUT_GPIO_CORE_PCLK, "gout_gpio_core_pclk", "dout_core_busp",
+ CLK_CON_GAT_GOUT_CORE_GPIO_CORE_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_SYSREG_CORE_PCLK, "gout_sysreg_core_pclk",
+ "dout_core_busp",
+ CLK_CON_GAT_GOUT_CORE_SYSREG_CORE_PCLK, 21, 0, 0),
};
static const struct samsung_cmu_info core_cmu_info __initconst = {
@@ -742,8 +977,10 @@ static const struct samsung_div_clock dpu_div_clks[] __initconst = {
};
static const struct samsung_gate_clock dpu_gate_clks[] __initconst = {
+ /* TODO: Should be enabled in DSIM driver */
GATE(CLK_GOUT_DPU_CMU_DPU_PCLK, "gout_dpu_cmu_dpu_pclk",
- "dout_dpu_busp", CLK_CON_GAT_CLK_DPU_CMU_DPU_PCLK, 21, 0, 0),
+ "dout_dpu_busp",
+ CLK_CON_GAT_CLK_DPU_CMU_DPU_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_DPU_DECON0_ACLK, "gout_dpu_decon0_aclk", "mout_dpu_user",
CLK_CON_GAT_GOUT_DPU_ACLK_DECON0, 21, 0, 0),
GATE(CLK_GOUT_DPU_DMA_ACLK, "gout_dpu_dma_aclk", "mout_dpu_user",
@@ -779,37 +1016,24 @@ static int __init exynos850_cmu_probe(struct platform_device *pdev)
{
const struct samsung_cmu_info *info;
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
info = of_device_get_match_data(dev);
- exynos850_init_clocks(np, info->clk_regs, info->nr_clk_regs);
- samsung_cmu_register_one(np, info);
-
- /* Keep bus clock running, so it's possible to access CMU registers */
- if (info->clk_name) {
- struct clk *bus_clk;
-
- bus_clk = clk_get(dev, info->clk_name);
- if (IS_ERR(bus_clk)) {
- pr_err("%s: could not find bus clock %s; err = %ld\n",
- __func__, info->clk_name, PTR_ERR(bus_clk));
- } else {
- clk_prepare_enable(bus_clk);
- }
- }
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
return 0;
}
-/* CMUs which belong to Power Domains and need runtime PM to be implemented */
static const struct of_device_id exynos850_cmu_of_match[] = {
{
+ .compatible = "samsung,exynos850-cmu-apm",
+ .data = &apm_cmu_info,
+ }, {
+ .compatible = "samsung,exynos850-cmu-cmgp",
+ .data = &cmgp_cmu_info,
+ }, {
.compatible = "samsung,exynos850-cmu-hsi",
.data = &hsi_cmu_info,
}, {
- .compatible = "samsung,exynos850-cmu-peri",
- .data = &peri_cmu_info,
- }, {
.compatible = "samsung,exynos850-cmu-core",
.data = &core_cmu_info,
}, {
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 83d1b03647db..70cdc87f714e 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -1476,6 +1476,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
else
init.ops = &samsung_pll35xx_clk_ops;
break;
+ case pll_1417x:
case pll_0822x:
pll->enable_offs = PLL0822X_ENABLE_SHIFT;
pll->lock_offs = PLL0822X_LOCK_STAT_SHIFT;
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index a739f2b7ae80..c83a20195f6d 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -32,6 +32,7 @@ enum samsung_pll_type {
pll_2550xx,
pll_2650x,
pll_2650xx,
+ pll_1417x,
pll_1450x,
pll_1451x,
pll_1452x,
diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
index 5831d0606077..3d152a46169b 100644
--- a/drivers/clk/samsung/clk-s3c2410.c
+++ b/drivers/clk/samsung/clk-s3c2410.c
@@ -323,6 +323,7 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
void __iomem *base)
{
struct samsung_clk_provider *ctx;
+ struct clk_hw **hws;
reg_base = base;
if (np) {
@@ -332,13 +333,14 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
}
ctx = samsung_clk_init(np, reg_base, NR_CLKS);
+ hws = ctx->clk_data.hws;
/* Register external clocks only in non-dt cases */
if (!np)
s3c2410_common_clk_register_fixed_ext(ctx, xti_f);
if (current_soc == S3C2410) {
- if (_get_rate("xti") == 12 * MHZ) {
+ if (clk_hw_get_rate(hws[XTI]) == 12 * MHZ) {
s3c2410_plls[mpll].rate_table = pll_s3c2410_12mhz_tbl;
s3c2410_plls[upll].rate_table = pll_s3c2410_12mhz_tbl;
}
@@ -348,7 +350,7 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
ARRAY_SIZE(s3c2410_plls), reg_base);
} else { /* S3C2440, S3C2442 */
- if (_get_rate("xti") == 12 * MHZ) {
+ if (clk_hw_get_rate(hws[XTI]) == 12 * MHZ) {
/*
* plls follow different calculation schemes, with the
* upll following the same scheme as the s3c2410 plls
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index 56f95b63f71f..d6b432a26d63 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -394,6 +394,7 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
void __iomem *base)
{
struct samsung_clk_provider *ctx;
+ struct clk_hw **hws;
reg_base = base;
is_s3c6400 = s3c6400;
@@ -405,6 +406,7 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
}
ctx = samsung_clk_init(np, reg_base, NR_CLKS);
+ hws = ctx->clk_data.hws;
/* Register external clocks. */
if (!np)
@@ -459,8 +461,10 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
pr_info("%s clocks: apll = %lu, mpll = %lu\n"
"\tepll = %lu, arm_clk = %lu\n",
is_s3c6400 ? "S3C6400" : "S3C6410",
- _get_rate("fout_apll"), _get_rate("fout_mpll"),
- _get_rate("fout_epll"), _get_rate("armclk"));
+ clk_hw_get_rate(hws[MOUT_APLL]),
+ clk_hw_get_rate(hws[MOUT_MPLL]),
+ clk_hw_get_rate(hws[MOUT_EPLL]),
+ clk_hw_get_rate(hws[ARMCLK]));
}
static void __init s3c6400_clk_init(struct device_node *np)
diff --git a/drivers/clk/samsung/clk-s5pv210.c b/drivers/clk/samsung/clk-s5pv210.c
index e7b68ffe36de..4425186bdcab 100644
--- a/drivers/clk/samsung/clk-s5pv210.c
+++ b/drivers/clk/samsung/clk-s5pv210.c
@@ -741,8 +741,10 @@ static void __init __s5pv210_clk_init(struct device_node *np,
bool is_s5p6442)
{
struct samsung_clk_provider *ctx;
+ struct clk_hw **hws;
ctx = samsung_clk_init(np, reg_base, NR_CLKS);
+ hws = ctx->clk_data.hws;
samsung_clk_register_mux(ctx, early_mux_clks,
ARRAY_SIZE(early_mux_clks));
@@ -789,8 +791,10 @@ static void __init __s5pv210_clk_init(struct device_node *np,
pr_info("%s clocks: mout_apll = %ld, mout_mpll = %ld\n"
"\tmout_epll = %ld, mout_vpll = %ld\n",
is_s5p6442 ? "S5P6442" : "S5PV210",
- _get_rate("mout_apll"), _get_rate("mout_mpll"),
- _get_rate("mout_epll"), _get_rate("mout_vpll"));
+ clk_hw_get_rate(hws[MOUT_APLL]),
+ clk_hw_get_rate(hws[MOUT_MPLL]),
+ clk_hw_get_rate(hws[MOUT_EPLL]),
+ clk_hw_get_rate(hws[MOUT_VPLL]));
}
static void __init s5pv210_clk_dt_init(struct device_node *np)
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index 336243c6f120..bca4731b14ea 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -268,20 +268,6 @@ void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx,
samsung_clk_register_fixed_rate(ctx, fixed_rate_clk, nr_fixed_rate_clk);
}
-/* utility function to get the rate of a specified clock */
-unsigned long _get_rate(const char *clk_name)
-{
- struct clk *clk;
-
- clk = __clk_lookup(clk_name);
- if (!clk) {
- pr_err("%s: could not find clock %s\n", __func__, clk_name);
- return 0;
- }
-
- return clk_get_rate(clk);
-}
-
#ifdef CONFIG_PM_SLEEP
static int samsung_clk_suspend(void)
{
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index 26499e97275b..b46e83a2581f 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -337,54 +337,52 @@ struct samsung_cmu_info {
const char *clk_name;
};
-extern struct samsung_clk_provider *__init samsung_clk_init(
+struct samsung_clk_provider * samsung_clk_init(
struct device_node *np, void __iomem *base,
unsigned long nr_clks);
-extern void __init samsung_clk_of_add_provider(struct device_node *np,
+void samsung_clk_of_add_provider(struct device_node *np,
struct samsung_clk_provider *ctx);
-extern void __init samsung_clk_of_register_fixed_ext(
+void samsung_clk_of_register_fixed_ext(
struct samsung_clk_provider *ctx,
struct samsung_fixed_rate_clock *fixed_rate_clk,
unsigned int nr_fixed_rate_clk,
const struct of_device_id *clk_matches);
-extern void samsung_clk_add_lookup(struct samsung_clk_provider *ctx,
+void samsung_clk_add_lookup(struct samsung_clk_provider *ctx,
struct clk_hw *clk_hw, unsigned int id);
-extern void __init samsung_clk_register_alias(struct samsung_clk_provider *ctx,
+void samsung_clk_register_alias(struct samsung_clk_provider *ctx,
const struct samsung_clock_alias *list,
unsigned int nr_clk);
-extern void __init samsung_clk_register_fixed_rate(
+void samsung_clk_register_fixed_rate(
struct samsung_clk_provider *ctx,
const struct samsung_fixed_rate_clock *clk_list,
unsigned int nr_clk);
-extern void __init samsung_clk_register_fixed_factor(
+void samsung_clk_register_fixed_factor(
struct samsung_clk_provider *ctx,
const struct samsung_fixed_factor_clock *list,
unsigned int nr_clk);
-extern void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx,
+void samsung_clk_register_mux(struct samsung_clk_provider *ctx,
const struct samsung_mux_clock *clk_list,
unsigned int nr_clk);
-extern void __init samsung_clk_register_div(struct samsung_clk_provider *ctx,
+void samsung_clk_register_div(struct samsung_clk_provider *ctx,
const struct samsung_div_clock *clk_list,
unsigned int nr_clk);
-extern void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
+void samsung_clk_register_gate(struct samsung_clk_provider *ctx,
const struct samsung_gate_clock *clk_list,
unsigned int nr_clk);
-extern void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx,
+void samsung_clk_register_pll(struct samsung_clk_provider *ctx,
const struct samsung_pll_clock *pll_list,
unsigned int nr_clk, void __iomem *base);
-extern void samsung_clk_register_cpu(struct samsung_clk_provider *ctx,
+void samsung_clk_register_cpu(struct samsung_clk_provider *ctx,
const struct samsung_cpu_clock *list, unsigned int nr_clk);
-extern struct samsung_clk_provider __init *samsung_cmu_register_one(
+struct samsung_clk_provider *samsung_cmu_register_one(
struct device_node *,
const struct samsung_cmu_info *);
-extern unsigned long _get_rate(const char *clk_name);
-
#ifdef CONFIG_PM_SLEEP
-extern void samsung_clk_extended_sleep_init(void __iomem *reg_base,
+void samsung_clk_extended_sleep_init(void __iomem *reg_base,
const unsigned long *rdump,
unsigned long nr_rdump,
const struct samsung_clk_reg_dump *rsuspend,
@@ -399,13 +397,13 @@ static inline void samsung_clk_extended_sleep_init(void __iomem *reg_base,
#define samsung_clk_sleep_init(reg_base, rdump, nr_rdump) \
samsung_clk_extended_sleep_init(reg_base, rdump, nr_rdump, NULL, 0)
-extern void samsung_clk_save(void __iomem *base,
+void samsung_clk_save(void __iomem *base,
struct samsung_clk_reg_dump *rd,
unsigned int num_regs);
-extern void samsung_clk_restore(void __iomem *base,
+void samsung_clk_restore(void __iomem *base,
const struct samsung_clk_reg_dump *rd,
unsigned int num_regs);
-extern struct samsung_clk_reg_dump *samsung_clk_alloc_reg_dump(
+struct samsung_clk_reg_dump *samsung_clk_alloc_reg_dump(
const unsigned long *rdump,
unsigned long nr_rdump);
diff --git a/drivers/clk/socfpga/clk-agilex.c b/drivers/clk/socfpga/clk-agilex.c
index bf8cd928c228..74d21bd82710 100644
--- a/drivers/clk/socfpga/clk-agilex.c
+++ b/drivers/clk/socfpga/clk-agilex.c
@@ -500,12 +500,10 @@ static int n5x_clkmgr_init(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct stratix10_clock_data *clk_data;
- struct resource *res;
void __iomem *base;
int i, num_clks;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
index 1ec9678d8cd3..53d6e3ec4309 100644
--- a/drivers/clk/socfpga/clk-gate.c
+++ b/drivers/clk/socfpga/clk-gate.c
@@ -34,7 +34,7 @@ static u8 socfpga_clk_get_parent(struct clk_hw *hwclk)
if (streq(name, SOCFPGA_L4_MP_CLK)) {
l4_src = readl(clk_mgr_base_addr + CLKMGR_L4SRC);
- return l4_src &= 0x1;
+ return l4_src & 0x1;
}
if (streq(name, SOCFPGA_L4_SP_CLK)) {
l4_src = readl(clk_mgr_base_addr + CLKMGR_L4SRC);
@@ -43,7 +43,7 @@ static u8 socfpga_clk_get_parent(struct clk_hw *hwclk)
perpll_src = readl(clk_mgr_base_addr + CLKMGR_PERPLL_SRC);
if (streq(name, SOCFPGA_MMC_CLK))
- return perpll_src &= 0x3;
+ return perpll_src & 0x3;
if (streq(name, SOCFPGA_NAND_CLK) ||
streq(name, SOCFPGA_NAND_X_CLK))
return (perpll_src >> 2) & 3;
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index 70076a80149d..e444e4a0ee53 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -113,7 +113,7 @@ static unsigned long clk_boot_clk_recalc_rate(struct clk_hw *hwclk,
SWCTRLBTCLKSEL_MASK) >>
SWCTRLBTCLKSEL_SHIFT);
div += 1;
- return parent_rate /= div;
+ return parent_rate / div;
}
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
index b532d51faaee..4e508a844b3d 100644
--- a/drivers/clk/socfpga/clk-s10.c
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -388,12 +388,10 @@ static int s10_clkmgr_init(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct stratix10_clock_data *clk_data;
- struct resource *res;
void __iomem *base;
int i, num_clks;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
pr_err("%s: failed to map clock registers\n", __func__);
return PTR_ERR(base);
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index 164285d6be97..582a22c04919 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -988,9 +988,18 @@ static void __init st_of_quadfs_setup(struct device_node *np,
void __iomem *reg;
spinlock_t *lock;
+ /*
+ * First check for reg property within the node to keep backward
+ * compatibility, then if reg doesn't exist look at the parent node
+ */
reg = of_iomap(np, 0);
- if (!reg)
- return;
+ if (!reg) {
+ reg = of_iomap(of_get_parent(np), 0);
+ if (!reg) {
+ pr_err("%s: Failed to get base address\n", __func__);
+ return;
+ }
+ }
clk_parent_name = of_clk_get_parent_name(np, 0);
if (!clk_parent_name)
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index ce583ded968a..ee39af7a0b72 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -57,10 +57,17 @@ static void __init st_of_clkgen_mux_setup(struct device_node *np,
const char **parents;
int num_parents = 0;
+ /*
+ * First check for reg property within the node to keep backward
+ * compatibility, then if reg doesn't exist look at the parent node
+ */
reg = of_iomap(np, 0);
if (!reg) {
- pr_err("%s: Failed to get base address\n", __func__);
- return;
+ reg = of_iomap(of_get_parent(np), 0);
+ if (!reg) {
+ pr_err("%s: Failed to get base address\n", __func__);
+ return;
+ }
}
parents = clkgen_mux_get_parents(np, &num_parents);
diff --git a/drivers/clk/starfive/Kconfig b/drivers/clk/starfive/Kconfig
new file mode 100644
index 000000000000..c0fa9d5e641f
--- /dev/null
+++ b/drivers/clk/starfive/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config CLK_STARFIVE_JH7100
+ bool "StarFive JH7100 clock support"
+ depends on SOC_STARFIVE || COMPILE_TEST
+ default SOC_STARFIVE
+ help
+ Say yes here to support the clock controller on the StarFive JH7100
+ SoC.
diff --git a/drivers/clk/starfive/Makefile b/drivers/clk/starfive/Makefile
new file mode 100644
index 000000000000..09759cc73530
--- /dev/null
+++ b/drivers/clk/starfive/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+# StarFive Clock
+obj-$(CONFIG_CLK_STARFIVE_JH7100) += clk-starfive-jh7100.o
diff --git a/drivers/clk/starfive/clk-starfive-jh7100.c b/drivers/clk/starfive/clk-starfive-jh7100.c
new file mode 100644
index 000000000000..25d31afa0f87
--- /dev/null
+++ b/drivers/clk/starfive/clk-starfive-jh7100.c
@@ -0,0 +1,689 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * StarFive JH7100 Clock Generator Driver
+ *
+ * Copyright 2021 Ahmad Fatoum, Pengutronix
+ * Copyright (C) 2021 Glider bv
+ * Copyright (C) 2021 Emil Renner Berthing <kernel@esmil.dk>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk-provider.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/starfive-jh7100.h>
+
+/* external clocks */
+#define JH7100_CLK_OSC_SYS (JH7100_CLK_END + 0)
+#define JH7100_CLK_OSC_AUD (JH7100_CLK_END + 1)
+#define JH7100_CLK_GMAC_RMII_REF (JH7100_CLK_END + 2)
+#define JH7100_CLK_GMAC_GR_MII_RX (JH7100_CLK_END + 3)
+
+/* register fields */
+#define JH7100_CLK_ENABLE BIT(31)
+#define JH7100_CLK_INVERT BIT(30)
+#define JH7100_CLK_MUX_MASK GENMASK(27, 24)
+#define JH7100_CLK_MUX_SHIFT 24
+#define JH7100_CLK_DIV_MASK GENMASK(23, 0)
+
+/* clock data */
+#define JH7100_GATE(_idx, _name, _flags, _parent) [_idx] = { \
+ .name = _name, \
+ .flags = CLK_SET_RATE_PARENT | (_flags), \
+ .max = JH7100_CLK_ENABLE, \
+ .parents = { [0] = _parent }, \
+}
+
+#define JH7100__DIV(_idx, _name, _max, _parent) [_idx] = { \
+ .name = _name, \
+ .flags = 0, \
+ .max = _max, \
+ .parents = { [0] = _parent }, \
+}
+
+#define JH7100_GDIV(_idx, _name, _flags, _max, _parent) [_idx] = { \
+ .name = _name, \
+ .flags = _flags, \
+ .max = JH7100_CLK_ENABLE | (_max), \
+ .parents = { [0] = _parent }, \
+}
+
+#define JH7100__MUX(_idx, _name, _nparents, ...) [_idx] = { \
+ .name = _name, \
+ .flags = 0, \
+ .max = ((_nparents) - 1) << JH7100_CLK_MUX_SHIFT, \
+ .parents = { __VA_ARGS__ }, \
+}
+
+#define JH7100_GMUX(_idx, _name, _flags, _nparents, ...) [_idx] = { \
+ .name = _name, \
+ .flags = _flags, \
+ .max = JH7100_CLK_ENABLE | \
+ (((_nparents) - 1) << JH7100_CLK_MUX_SHIFT), \
+ .parents = { __VA_ARGS__ }, \
+}
+
+#define JH7100__INV(_idx, _name, _parent) [_idx] = { \
+ .name = _name, \
+ .flags = CLK_SET_RATE_PARENT, \
+ .max = JH7100_CLK_INVERT, \
+ .parents = { [0] = _parent }, \
+}
+
+static const struct {
+ const char *name;
+ unsigned long flags;
+ u32 max;
+ u8 parents[4];
+} jh7100_clk_data[] __initconst = {
+ JH7100__MUX(JH7100_CLK_CPUNDBUS_ROOT, "cpundbus_root", 4,
+ JH7100_CLK_OSC_SYS,
+ JH7100_CLK_PLL0_OUT,
+ JH7100_CLK_PLL1_OUT,
+ JH7100_CLK_PLL2_OUT),
+ JH7100__MUX(JH7100_CLK_DLA_ROOT, "dla_root", 3,
+ JH7100_CLK_OSC_SYS,
+ JH7100_CLK_PLL1_OUT,
+ JH7100_CLK_PLL2_OUT),
+ JH7100__MUX(JH7100_CLK_DSP_ROOT, "dsp_root", 4,
+ JH7100_CLK_OSC_SYS,
+ JH7100_CLK_PLL0_OUT,
+ JH7100_CLK_PLL1_OUT,
+ JH7100_CLK_PLL2_OUT),
+ JH7100__MUX(JH7100_CLK_GMACUSB_ROOT, "gmacusb_root", 3,
+ JH7100_CLK_OSC_SYS,
+ JH7100_CLK_PLL0_OUT,
+ JH7100_CLK_PLL2_OUT),
+ JH7100__MUX(JH7100_CLK_PERH0_ROOT, "perh0_root", 2,
+ JH7100_CLK_OSC_SYS,
+ JH7100_CLK_PLL0_OUT),
+ JH7100__MUX(JH7100_CLK_PERH1_ROOT, "perh1_root", 2,
+ JH7100_CLK_OSC_SYS,
+ JH7100_CLK_PLL2_OUT),
+ JH7100__MUX(JH7100_CLK_VIN_ROOT, "vin_root", 3,
+ JH7100_CLK_OSC_SYS,
+ JH7100_CLK_PLL1_OUT,
+ JH7100_CLK_PLL2_OUT),
+ JH7100__MUX(JH7100_CLK_VOUT_ROOT, "vout_root", 3,
+ JH7100_CLK_OSC_AUD,
+ JH7100_CLK_PLL0_OUT,
+ JH7100_CLK_PLL2_OUT),
+ JH7100_GDIV(JH7100_CLK_AUDIO_ROOT, "audio_root", 0, 8, JH7100_CLK_PLL0_OUT),
+ JH7100__MUX(JH7100_CLK_CDECHIFI4_ROOT, "cdechifi4_root", 3,
+ JH7100_CLK_OSC_SYS,
+ JH7100_CLK_PLL1_OUT,
+ JH7100_CLK_PLL2_OUT),
+ JH7100__MUX(JH7100_CLK_CDEC_ROOT, "cdec_root", 3,
+ JH7100_CLK_OSC_SYS,
+ JH7100_CLK_PLL0_OUT,
+ JH7100_CLK_PLL1_OUT),
+ JH7100__MUX(JH7100_CLK_VOUTBUS_ROOT, "voutbus_root", 3,
+ JH7100_CLK_OSC_AUD,
+ JH7100_CLK_PLL0_OUT,
+ JH7100_CLK_PLL2_OUT),
+ JH7100__DIV(JH7100_CLK_CPUNBUS_ROOT_DIV, "cpunbus_root_div", 2, JH7100_CLK_CPUNDBUS_ROOT),
+ JH7100__DIV(JH7100_CLK_DSP_ROOT_DIV, "dsp_root_div", 4, JH7100_CLK_DSP_ROOT),
+ JH7100__DIV(JH7100_CLK_PERH0_SRC, "perh0_src", 4, JH7100_CLK_PERH0_ROOT),
+ JH7100__DIV(JH7100_CLK_PERH1_SRC, "perh1_src", 4, JH7100_CLK_PERH1_ROOT),
+ JH7100_GDIV(JH7100_CLK_PLL0_TESTOUT, "pll0_testout", 0, 31, JH7100_CLK_PERH0_SRC),
+ JH7100_GDIV(JH7100_CLK_PLL1_TESTOUT, "pll1_testout", 0, 31, JH7100_CLK_DLA_ROOT),
+ JH7100_GDIV(JH7100_CLK_PLL2_TESTOUT, "pll2_testout", 0, 31, JH7100_CLK_PERH1_SRC),
+ JH7100__MUX(JH7100_CLK_PLL2_REF, "pll2_refclk", 2,
+ JH7100_CLK_OSC_SYS,
+ JH7100_CLK_OSC_AUD),
+ JH7100__DIV(JH7100_CLK_CPU_CORE, "cpu_core", 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
+ JH7100__DIV(JH7100_CLK_CPU_AXI, "cpu_axi", 8, JH7100_CLK_CPU_CORE),
+ JH7100__DIV(JH7100_CLK_AHB_BUS, "ahb_bus", 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
+ JH7100__DIV(JH7100_CLK_APB1_BUS, "apb1_bus", 8, JH7100_CLK_AHB_BUS),
+ JH7100__DIV(JH7100_CLK_APB2_BUS, "apb2_bus", 8, JH7100_CLK_AHB_BUS),
+ JH7100_GATE(JH7100_CLK_DOM3AHB_BUS, "dom3ahb_bus", CLK_IS_CRITICAL, JH7100_CLK_AHB_BUS),
+ JH7100_GATE(JH7100_CLK_DOM7AHB_BUS, "dom7ahb_bus", CLK_IS_CRITICAL, JH7100_CLK_AHB_BUS),
+ JH7100_GATE(JH7100_CLK_U74_CORE0, "u74_core0", CLK_IS_CRITICAL, JH7100_CLK_CPU_CORE),
+ JH7100_GDIV(JH7100_CLK_U74_CORE1, "u74_core1", CLK_IS_CRITICAL, 8, JH7100_CLK_CPU_CORE),
+ JH7100_GATE(JH7100_CLK_U74_AXI, "u74_axi", CLK_IS_CRITICAL, JH7100_CLK_CPU_AXI),
+ JH7100_GATE(JH7100_CLK_U74RTC_TOGGLE, "u74rtc_toggle", CLK_IS_CRITICAL, JH7100_CLK_OSC_SYS),
+ JH7100_GATE(JH7100_CLK_SGDMA2P_AXI, "sgdma2p_axi", 0, JH7100_CLK_CPU_AXI),
+ JH7100_GATE(JH7100_CLK_DMA2PNOC_AXI, "dma2pnoc_axi", 0, JH7100_CLK_CPU_AXI),
+ JH7100_GATE(JH7100_CLK_SGDMA2P_AHB, "sgdma2p_ahb", 0, JH7100_CLK_AHB_BUS),
+ JH7100__DIV(JH7100_CLK_DLA_BUS, "dla_bus", 4, JH7100_CLK_DLA_ROOT),
+ JH7100_GATE(JH7100_CLK_DLA_AXI, "dla_axi", 0, JH7100_CLK_DLA_BUS),
+ JH7100_GATE(JH7100_CLK_DLANOC_AXI, "dlanoc_axi", 0, JH7100_CLK_DLA_BUS),
+ JH7100_GATE(JH7100_CLK_DLA_APB, "dla_apb", 0, JH7100_CLK_APB1_BUS),
+ JH7100_GDIV(JH7100_CLK_VP6_CORE, "vp6_core", 0, 4, JH7100_CLK_DSP_ROOT_DIV),
+ JH7100__DIV(JH7100_CLK_VP6BUS_SRC, "vp6bus_src", 4, JH7100_CLK_DSP_ROOT),
+ JH7100_GDIV(JH7100_CLK_VP6_AXI, "vp6_axi", 0, 4, JH7100_CLK_VP6BUS_SRC),
+ JH7100__DIV(JH7100_CLK_VCDECBUS_SRC, "vcdecbus_src", 4, JH7100_CLK_CDECHIFI4_ROOT),
+ JH7100__DIV(JH7100_CLK_VDEC_BUS, "vdec_bus", 8, JH7100_CLK_VCDECBUS_SRC),
+ JH7100_GATE(JH7100_CLK_VDEC_AXI, "vdec_axi", 0, JH7100_CLK_VDEC_BUS),
+ JH7100_GATE(JH7100_CLK_VDECBRG_MAIN, "vdecbrg_mainclk", 0, JH7100_CLK_VDEC_BUS),
+ JH7100_GDIV(JH7100_CLK_VDEC_BCLK, "vdec_bclk", 0, 8, JH7100_CLK_VCDECBUS_SRC),
+ JH7100_GDIV(JH7100_CLK_VDEC_CCLK, "vdec_cclk", 0, 8, JH7100_CLK_CDEC_ROOT),
+ JH7100_GATE(JH7100_CLK_VDEC_APB, "vdec_apb", 0, JH7100_CLK_APB1_BUS),
+ JH7100_GDIV(JH7100_CLK_JPEG_AXI, "jpeg_axi", 0, 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
+ JH7100_GDIV(JH7100_CLK_JPEG_CCLK, "jpeg_cclk", 0, 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
+ JH7100_GATE(JH7100_CLK_JPEG_APB, "jpeg_apb", 0, JH7100_CLK_APB1_BUS),
+ JH7100_GDIV(JH7100_CLK_GC300_2X, "gc300_2x", 0, 8, JH7100_CLK_CDECHIFI4_ROOT),
+ JH7100_GATE(JH7100_CLK_GC300_AHB, "gc300_ahb", 0, JH7100_CLK_AHB_BUS),
+ JH7100__DIV(JH7100_CLK_JPCGC300_AXIBUS, "jpcgc300_axibus", 8, JH7100_CLK_VCDECBUS_SRC),
+ JH7100_GATE(JH7100_CLK_GC300_AXI, "gc300_axi", 0, JH7100_CLK_JPCGC300_AXIBUS),
+ JH7100_GATE(JH7100_CLK_JPCGC300_MAIN, "jpcgc300_mainclk", 0, JH7100_CLK_JPCGC300_AXIBUS),
+ JH7100__DIV(JH7100_CLK_VENC_BUS, "venc_bus", 8, JH7100_CLK_VCDECBUS_SRC),
+ JH7100_GATE(JH7100_CLK_VENC_AXI, "venc_axi", 0, JH7100_CLK_VENC_BUS),
+ JH7100_GATE(JH7100_CLK_VENCBRG_MAIN, "vencbrg_mainclk", 0, JH7100_CLK_VENC_BUS),
+ JH7100_GDIV(JH7100_CLK_VENC_BCLK, "venc_bclk", 0, 8, JH7100_CLK_VCDECBUS_SRC),
+ JH7100_GDIV(JH7100_CLK_VENC_CCLK, "venc_cclk", 0, 8, JH7100_CLK_CDEC_ROOT),
+ JH7100_GATE(JH7100_CLK_VENC_APB, "venc_apb", 0, JH7100_CLK_APB1_BUS),
+ JH7100_GDIV(JH7100_CLK_DDRPLL_DIV2, "ddrpll_div2", CLK_IS_CRITICAL, 2, JH7100_CLK_PLL1_OUT),
+ JH7100_GDIV(JH7100_CLK_DDRPLL_DIV4, "ddrpll_div4", CLK_IS_CRITICAL, 2, JH7100_CLK_DDRPLL_DIV2),
+ JH7100_GDIV(JH7100_CLK_DDRPLL_DIV8, "ddrpll_div8", CLK_IS_CRITICAL, 2, JH7100_CLK_DDRPLL_DIV4),
+ JH7100_GDIV(JH7100_CLK_DDROSC_DIV2, "ddrosc_div2", CLK_IS_CRITICAL, 2, JH7100_CLK_OSC_SYS),
+ JH7100_GMUX(JH7100_CLK_DDRC0, "ddrc0", CLK_IS_CRITICAL, 4,
+ JH7100_CLK_DDROSC_DIV2,
+ JH7100_CLK_DDRPLL_DIV2,
+ JH7100_CLK_DDRPLL_DIV4,
+ JH7100_CLK_DDRPLL_DIV8),
+ JH7100_GMUX(JH7100_CLK_DDRC1, "ddrc1", CLK_IS_CRITICAL, 4,
+ JH7100_CLK_DDROSC_DIV2,
+ JH7100_CLK_DDRPLL_DIV2,
+ JH7100_CLK_DDRPLL_DIV4,
+ JH7100_CLK_DDRPLL_DIV8),
+ JH7100_GATE(JH7100_CLK_DDRPHY_APB, "ddrphy_apb", 0, JH7100_CLK_APB1_BUS),
+ JH7100__DIV(JH7100_CLK_NOC_ROB, "noc_rob", 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
+ JH7100__DIV(JH7100_CLK_NOC_COG, "noc_cog", 8, JH7100_CLK_DLA_ROOT),
+ JH7100_GATE(JH7100_CLK_NNE_AHB, "nne_ahb", 0, JH7100_CLK_AHB_BUS),
+ JH7100__DIV(JH7100_CLK_NNEBUS_SRC1, "nnebus_src1", 4, JH7100_CLK_DSP_ROOT),
+ JH7100__MUX(JH7100_CLK_NNE_BUS, "nne_bus", 2,
+ JH7100_CLK_CPU_AXI,
+ JH7100_CLK_NNEBUS_SRC1),
+ JH7100_GATE(JH7100_CLK_NNE_AXI, "nne_axi", 0, JH7100_CLK_NNE_BUS),
+ JH7100_GATE(JH7100_CLK_NNENOC_AXI, "nnenoc_axi", 0, JH7100_CLK_NNE_BUS),
+ JH7100_GATE(JH7100_CLK_DLASLV_AXI, "dlaslv_axi", 0, JH7100_CLK_NNE_BUS),
+ JH7100_GATE(JH7100_CLK_DSPX2C_AXI, "dspx2c_axi", CLK_IS_CRITICAL, JH7100_CLK_NNE_BUS),
+ JH7100__DIV(JH7100_CLK_HIFI4_SRC, "hifi4_src", 4, JH7100_CLK_CDECHIFI4_ROOT),
+ JH7100__DIV(JH7100_CLK_HIFI4_COREFREE, "hifi4_corefree", 8, JH7100_CLK_HIFI4_SRC),
+ JH7100_GATE(JH7100_CLK_HIFI4_CORE, "hifi4_core", 0, JH7100_CLK_HIFI4_COREFREE),
+ JH7100__DIV(JH7100_CLK_HIFI4_BUS, "hifi4_bus", 8, JH7100_CLK_HIFI4_COREFREE),
+ JH7100_GATE(JH7100_CLK_HIFI4_AXI, "hifi4_axi", 0, JH7100_CLK_HIFI4_BUS),
+ JH7100_GATE(JH7100_CLK_HIFI4NOC_AXI, "hifi4noc_axi", 0, JH7100_CLK_HIFI4_BUS),
+ JH7100__DIV(JH7100_CLK_SGDMA1P_BUS, "sgdma1p_bus", 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
+ JH7100_GATE(JH7100_CLK_SGDMA1P_AXI, "sgdma1p_axi", 0, JH7100_CLK_SGDMA1P_BUS),
+ JH7100_GATE(JH7100_CLK_DMA1P_AXI, "dma1p_axi", 0, JH7100_CLK_SGDMA1P_BUS),
+ JH7100_GDIV(JH7100_CLK_X2C_AXI, "x2c_axi", CLK_IS_CRITICAL, 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
+ JH7100__DIV(JH7100_CLK_USB_BUS, "usb_bus", 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
+ JH7100_GATE(JH7100_CLK_USB_AXI, "usb_axi", 0, JH7100_CLK_USB_BUS),
+ JH7100_GATE(JH7100_CLK_USBNOC_AXI, "usbnoc_axi", 0, JH7100_CLK_USB_BUS),
+ JH7100__DIV(JH7100_CLK_USBPHY_ROOTDIV, "usbphy_rootdiv", 4, JH7100_CLK_GMACUSB_ROOT),
+ JH7100_GDIV(JH7100_CLK_USBPHY_125M, "usbphy_125m", 0, 8, JH7100_CLK_USBPHY_ROOTDIV),
+ JH7100_GDIV(JH7100_CLK_USBPHY_PLLDIV25M, "usbphy_plldiv25m", 0, 32, JH7100_CLK_USBPHY_ROOTDIV),
+ JH7100__MUX(JH7100_CLK_USBPHY_25M, "usbphy_25m", 2,
+ JH7100_CLK_OSC_SYS,
+ JH7100_CLK_USBPHY_PLLDIV25M),
+ JH7100__DIV(JH7100_CLK_AUDIO_DIV, "audio_div", 131072, JH7100_CLK_AUDIO_ROOT),
+ JH7100_GATE(JH7100_CLK_AUDIO_SRC, "audio_src", 0, JH7100_CLK_AUDIO_DIV),
+ JH7100_GATE(JH7100_CLK_AUDIO_12288, "audio_12288", 0, JH7100_CLK_OSC_AUD),
+ JH7100_GDIV(JH7100_CLK_VIN_SRC, "vin_src", 0, 4, JH7100_CLK_VIN_ROOT),
+ JH7100__DIV(JH7100_CLK_ISP0_BUS, "isp0_bus", 8, JH7100_CLK_VIN_SRC),
+ JH7100_GATE(JH7100_CLK_ISP0_AXI, "isp0_axi", 0, JH7100_CLK_ISP0_BUS),
+ JH7100_GATE(JH7100_CLK_ISP0NOC_AXI, "isp0noc_axi", 0, JH7100_CLK_ISP0_BUS),
+ JH7100_GATE(JH7100_CLK_ISPSLV_AXI, "ispslv_axi", 0, JH7100_CLK_ISP0_BUS),
+ JH7100__DIV(JH7100_CLK_ISP1_BUS, "isp1_bus", 8, JH7100_CLK_VIN_SRC),
+ JH7100_GATE(JH7100_CLK_ISP1_AXI, "isp1_axi", 0, JH7100_CLK_ISP1_BUS),
+ JH7100_GATE(JH7100_CLK_ISP1NOC_AXI, "isp1noc_axi", 0, JH7100_CLK_ISP1_BUS),
+ JH7100__DIV(JH7100_CLK_VIN_BUS, "vin_bus", 8, JH7100_CLK_VIN_SRC),
+ JH7100_GATE(JH7100_CLK_VIN_AXI, "vin_axi", 0, JH7100_CLK_VIN_BUS),
+ JH7100_GATE(JH7100_CLK_VINNOC_AXI, "vinnoc_axi", 0, JH7100_CLK_VIN_BUS),
+ JH7100_GDIV(JH7100_CLK_VOUT_SRC, "vout_src", 0, 4, JH7100_CLK_VOUT_ROOT),
+ JH7100__DIV(JH7100_CLK_DISPBUS_SRC, "dispbus_src", 4, JH7100_CLK_VOUTBUS_ROOT),
+ JH7100__DIV(JH7100_CLK_DISP_BUS, "disp_bus", 4, JH7100_CLK_DISPBUS_SRC),
+ JH7100_GATE(JH7100_CLK_DISP_AXI, "disp_axi", 0, JH7100_CLK_DISP_BUS),
+ JH7100_GATE(JH7100_CLK_DISPNOC_AXI, "dispnoc_axi", 0, JH7100_CLK_DISP_BUS),
+ JH7100_GATE(JH7100_CLK_SDIO0_AHB, "sdio0_ahb", 0, JH7100_CLK_AHB_BUS),
+ JH7100_GDIV(JH7100_CLK_SDIO0_CCLKINT, "sdio0_cclkint", 0, 24, JH7100_CLK_PERH0_SRC),
+ JH7100__INV(JH7100_CLK_SDIO0_CCLKINT_INV, "sdio0_cclkint_inv", JH7100_CLK_SDIO0_CCLKINT),
+ JH7100_GATE(JH7100_CLK_SDIO1_AHB, "sdio1_ahb", 0, JH7100_CLK_AHB_BUS),
+ JH7100_GDIV(JH7100_CLK_SDIO1_CCLKINT, "sdio1_cclkint", 0, 24, JH7100_CLK_PERH1_SRC),
+ JH7100__INV(JH7100_CLK_SDIO1_CCLKINT_INV, "sdio1_cclkint_inv", JH7100_CLK_SDIO1_CCLKINT),
+ JH7100_GATE(JH7100_CLK_GMAC_AHB, "gmac_ahb", 0, JH7100_CLK_AHB_BUS),
+ JH7100__DIV(JH7100_CLK_GMAC_ROOT_DIV, "gmac_root_div", 8, JH7100_CLK_GMACUSB_ROOT),
+ JH7100_GDIV(JH7100_CLK_GMAC_PTP_REF, "gmac_ptp_refclk", 0, 31, JH7100_CLK_GMAC_ROOT_DIV),
+ JH7100_GDIV(JH7100_CLK_GMAC_GTX, "gmac_gtxclk", 0, 255, JH7100_CLK_GMAC_ROOT_DIV),
+ JH7100_GDIV(JH7100_CLK_GMAC_RMII_TX, "gmac_rmii_txclk", 0, 8, JH7100_CLK_GMAC_RMII_REF),
+ JH7100_GDIV(JH7100_CLK_GMAC_RMII_RX, "gmac_rmii_rxclk", 0, 8, JH7100_CLK_GMAC_RMII_REF),
+ JH7100__MUX(JH7100_CLK_GMAC_TX, "gmac_tx", 3,
+ JH7100_CLK_GMAC_GTX,
+ JH7100_CLK_GMAC_TX_INV,
+ JH7100_CLK_GMAC_RMII_TX),
+ JH7100__INV(JH7100_CLK_GMAC_TX_INV, "gmac_tx_inv", JH7100_CLK_GMAC_TX),
+ JH7100__MUX(JH7100_CLK_GMAC_RX_PRE, "gmac_rx_pre", 2,
+ JH7100_CLK_GMAC_GR_MII_RX,
+ JH7100_CLK_GMAC_RMII_RX),
+ JH7100__INV(JH7100_CLK_GMAC_RX_INV, "gmac_rx_inv", JH7100_CLK_GMAC_RX_PRE),
+ JH7100_GATE(JH7100_CLK_GMAC_RMII, "gmac_rmii", 0, JH7100_CLK_GMAC_RMII_REF),
+ JH7100_GDIV(JH7100_CLK_GMAC_TOPHYREF, "gmac_tophyref", 0, 127, JH7100_CLK_GMAC_ROOT_DIV),
+ JH7100_GATE(JH7100_CLK_SPI2AHB_AHB, "spi2ahb_ahb", 0, JH7100_CLK_AHB_BUS),
+ JH7100_GDIV(JH7100_CLK_SPI2AHB_CORE, "spi2ahb_core", 0, 31, JH7100_CLK_PERH0_SRC),
+ JH7100_GATE(JH7100_CLK_EZMASTER_AHB, "ezmaster_ahb", 0, JH7100_CLK_AHB_BUS),
+ JH7100_GATE(JH7100_CLK_E24_AHB, "e24_ahb", 0, JH7100_CLK_AHB_BUS),
+ JH7100_GATE(JH7100_CLK_E24RTC_TOGGLE, "e24rtc_toggle", 0, JH7100_CLK_OSC_SYS),
+ JH7100_GATE(JH7100_CLK_QSPI_AHB, "qspi_ahb", 0, JH7100_CLK_AHB_BUS),
+ JH7100_GATE(JH7100_CLK_QSPI_APB, "qspi_apb", 0, JH7100_CLK_APB1_BUS),
+ JH7100_GDIV(JH7100_CLK_QSPI_REF, "qspi_refclk", 0, 31, JH7100_CLK_PERH0_SRC),
+ JH7100_GATE(JH7100_CLK_SEC_AHB, "sec_ahb", 0, JH7100_CLK_AHB_BUS),
+ JH7100_GATE(JH7100_CLK_AES, "aes_clk", 0, JH7100_CLK_SEC_AHB),
+ JH7100_GATE(JH7100_CLK_SHA, "sha_clk", 0, JH7100_CLK_SEC_AHB),
+ JH7100_GATE(JH7100_CLK_PKA, "pka_clk", 0, JH7100_CLK_SEC_AHB),
+ JH7100_GATE(JH7100_CLK_TRNG_APB, "trng_apb", 0, JH7100_CLK_APB1_BUS),
+ JH7100_GATE(JH7100_CLK_OTP_APB, "otp_apb", 0, JH7100_CLK_APB1_BUS),
+ JH7100_GATE(JH7100_CLK_UART0_APB, "uart0_apb", 0, JH7100_CLK_APB1_BUS),
+ JH7100_GDIV(JH7100_CLK_UART0_CORE, "uart0_core", 0, 63, JH7100_CLK_PERH1_SRC),
+ JH7100_GATE(JH7100_CLK_UART1_APB, "uart1_apb", 0, JH7100_CLK_APB1_BUS),
+ JH7100_GDIV(JH7100_CLK_UART1_CORE, "uart1_core", 0, 63, JH7100_CLK_PERH1_SRC),
+ JH7100_GATE(JH7100_CLK_SPI0_APB, "spi0_apb", 0, JH7100_CLK_APB1_BUS),
+ JH7100_GDIV(JH7100_CLK_SPI0_CORE, "spi0_core", 0, 63, JH7100_CLK_PERH1_SRC),
+ JH7100_GATE(JH7100_CLK_SPI1_APB, "spi1_apb", 0, JH7100_CLK_APB1_BUS),
+ JH7100_GDIV(JH7100_CLK_SPI1_CORE, "spi1_core", 0, 63, JH7100_CLK_PERH1_SRC),
+ JH7100_GATE(JH7100_CLK_I2C0_APB, "i2c0_apb", 0, JH7100_CLK_APB1_BUS),
+ JH7100_GDIV(JH7100_CLK_I2C0_CORE, "i2c0_core", 0, 63, JH7100_CLK_PERH1_SRC),
+ JH7100_GATE(JH7100_CLK_I2C1_APB, "i2c1_apb", 0, JH7100_CLK_APB1_BUS),
+ JH7100_GDIV(JH7100_CLK_I2C1_CORE, "i2c1_core", 0, 63, JH7100_CLK_PERH1_SRC),
+ JH7100_GATE(JH7100_CLK_GPIO_APB, "gpio_apb", 0, JH7100_CLK_APB1_BUS),
+ JH7100_GATE(JH7100_CLK_UART2_APB, "uart2_apb", 0, JH7100_CLK_APB2_BUS),
+ JH7100_GDIV(JH7100_CLK_UART2_CORE, "uart2_core", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH7100_GATE(JH7100_CLK_UART3_APB, "uart3_apb", 0, JH7100_CLK_APB2_BUS),
+ JH7100_GDIV(JH7100_CLK_UART3_CORE, "uart3_core", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH7100_GATE(JH7100_CLK_SPI2_APB, "spi2_apb", 0, JH7100_CLK_APB2_BUS),
+ JH7100_GDIV(JH7100_CLK_SPI2_CORE, "spi2_core", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH7100_GATE(JH7100_CLK_SPI3_APB, "spi3_apb", 0, JH7100_CLK_APB2_BUS),
+ JH7100_GDIV(JH7100_CLK_SPI3_CORE, "spi3_core", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH7100_GATE(JH7100_CLK_I2C2_APB, "i2c2_apb", 0, JH7100_CLK_APB2_BUS),
+ JH7100_GDIV(JH7100_CLK_I2C2_CORE, "i2c2_core", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH7100_GATE(JH7100_CLK_I2C3_APB, "i2c3_apb", 0, JH7100_CLK_APB2_BUS),
+ JH7100_GDIV(JH7100_CLK_I2C3_CORE, "i2c3_core", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH7100_GATE(JH7100_CLK_WDTIMER_APB, "wdtimer_apb", 0, JH7100_CLK_APB2_BUS),
+ JH7100_GDIV(JH7100_CLK_WDT_CORE, "wdt_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH7100_GDIV(JH7100_CLK_TIMER0_CORE, "timer0_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH7100_GDIV(JH7100_CLK_TIMER1_CORE, "timer1_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH7100_GDIV(JH7100_CLK_TIMER2_CORE, "timer2_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH7100_GDIV(JH7100_CLK_TIMER3_CORE, "timer3_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH7100_GDIV(JH7100_CLK_TIMER4_CORE, "timer4_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH7100_GDIV(JH7100_CLK_TIMER5_CORE, "timer5_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH7100_GDIV(JH7100_CLK_TIMER6_CORE, "timer6_coreclk", 0, 63, JH7100_CLK_PERH0_SRC),
+ JH7100_GATE(JH7100_CLK_VP6INTC_APB, "vp6intc_apb", 0, JH7100_CLK_APB2_BUS),
+ JH7100_GATE(JH7100_CLK_PWM_APB, "pwm_apb", 0, JH7100_CLK_APB2_BUS),
+ JH7100_GATE(JH7100_CLK_MSI_APB, "msi_apb", 0, JH7100_CLK_APB2_BUS),
+ JH7100_GATE(JH7100_CLK_TEMP_APB, "temp_apb", 0, JH7100_CLK_APB2_BUS),
+ JH7100_GDIV(JH7100_CLK_TEMP_SENSE, "temp_sense", 0, 31, JH7100_CLK_OSC_SYS),
+ JH7100_GATE(JH7100_CLK_SYSERR_APB, "syserr_apb", 0, JH7100_CLK_APB2_BUS),
+};
+
+struct jh7100_clk {
+ struct clk_hw hw;
+ unsigned int idx;
+ unsigned int max_div;
+};
+
+struct jh7100_clk_priv {
+ /* protect clk enable and set rate/parent from happening at the same time */
+ spinlock_t rmw_lock;
+ struct device *dev;
+ void __iomem *base;
+ struct clk_hw *pll[3];
+ struct jh7100_clk reg[JH7100_CLK_PLL0_OUT];
+};
+
+static struct jh7100_clk *jh7100_clk_from(struct clk_hw *hw)
+{
+ return container_of(hw, struct jh7100_clk, hw);
+}
+
+static struct jh7100_clk_priv *jh7100_priv_from(struct jh7100_clk *clk)
+{
+ return container_of(clk, struct jh7100_clk_priv, reg[clk->idx]);
+}
+
+static u32 jh7100_clk_reg_get(struct jh7100_clk *clk)
+{
+ struct jh7100_clk_priv *priv = jh7100_priv_from(clk);
+ void __iomem *reg = priv->base + 4 * clk->idx;
+
+ return readl_relaxed(reg);
+}
+
+static void jh7100_clk_reg_rmw(struct jh7100_clk *clk, u32 mask, u32 value)
+{
+ struct jh7100_clk_priv *priv = jh7100_priv_from(clk);
+ void __iomem *reg = priv->base + 4 * clk->idx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->rmw_lock, flags);
+ value |= readl_relaxed(reg) & ~mask;
+ writel_relaxed(value, reg);
+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
+}
+
+static int jh7100_clk_enable(struct clk_hw *hw)
+{
+ struct jh7100_clk *clk = jh7100_clk_from(hw);
+
+ jh7100_clk_reg_rmw(clk, JH7100_CLK_ENABLE, JH7100_CLK_ENABLE);
+ return 0;
+}
+
+static void jh7100_clk_disable(struct clk_hw *hw)
+{
+ struct jh7100_clk *clk = jh7100_clk_from(hw);
+
+ jh7100_clk_reg_rmw(clk, JH7100_CLK_ENABLE, 0);
+}
+
+static int jh7100_clk_is_enabled(struct clk_hw *hw)
+{
+ struct jh7100_clk *clk = jh7100_clk_from(hw);
+
+ return !!(jh7100_clk_reg_get(clk) & JH7100_CLK_ENABLE);
+}
+
+static unsigned long jh7100_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct jh7100_clk *clk = jh7100_clk_from(hw);
+ u32 div = jh7100_clk_reg_get(clk) & JH7100_CLK_DIV_MASK;
+
+ return div ? parent_rate / div : 0;
+}
+
+static unsigned long jh7100_clk_bestdiv(struct jh7100_clk *clk,
+ unsigned long rate, unsigned long parent)
+{
+ unsigned long max = clk->max_div;
+ unsigned long div = DIV_ROUND_UP(parent, rate);
+
+ return min(div, max);
+}
+
+static int jh7100_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct jh7100_clk *clk = jh7100_clk_from(hw);
+ unsigned long parent = req->best_parent_rate;
+ unsigned long rate = clamp(req->rate, req->min_rate, req->max_rate);
+ unsigned long div = jh7100_clk_bestdiv(clk, rate, parent);
+ unsigned long result = parent / div;
+
+ /*
+ * we want the result clamped by min_rate and max_rate if possible:
+ * case 1: div hits the max divider value, which means it's less than
+ * parent / rate, so the result is greater than rate and min_rate in
+ * particular. we can't do anything about result > max_rate because the
+ * divider doesn't go any further.
+ * case 2: div = DIV_ROUND_UP(parent, rate) which means the result is
+ * always lower or equal to rate and max_rate. however the result may
+ * turn out lower than min_rate, but then the next higher rate is fine:
+ * div - 1 = ceil(parent / rate) - 1 < parent / rate
+ * and thus
+ * min_rate <= rate < parent / (div - 1)
+ */
+ if (result < req->min_rate && div > 1)
+ result = parent / (div - 1);
+
+ req->rate = result;
+ return 0;
+}
+
+static int jh7100_clk_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct jh7100_clk *clk = jh7100_clk_from(hw);
+ unsigned long div = jh7100_clk_bestdiv(clk, rate, parent_rate);
+
+ jh7100_clk_reg_rmw(clk, JH7100_CLK_DIV_MASK, div);
+ return 0;
+}
+
+static u8 jh7100_clk_get_parent(struct clk_hw *hw)
+{
+ struct jh7100_clk *clk = jh7100_clk_from(hw);
+ u32 value = jh7100_clk_reg_get(clk);
+
+ return (value & JH7100_CLK_MUX_MASK) >> JH7100_CLK_MUX_SHIFT;
+}
+
+static int jh7100_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct jh7100_clk *clk = jh7100_clk_from(hw);
+ u32 value = (u32)index << JH7100_CLK_MUX_SHIFT;
+
+ jh7100_clk_reg_rmw(clk, JH7100_CLK_MUX_MASK, value);
+ return 0;
+}
+
+static int jh7100_clk_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return clk_mux_determine_rate_flags(hw, req, 0);
+}
+
+static int jh7100_clk_get_phase(struct clk_hw *hw)
+{
+ struct jh7100_clk *clk = jh7100_clk_from(hw);
+ u32 value = jh7100_clk_reg_get(clk);
+
+ return (value & JH7100_CLK_INVERT) ? 180 : 0;
+}
+
+static int jh7100_clk_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct jh7100_clk *clk = jh7100_clk_from(hw);
+ u32 value;
+
+ if (degrees == 0)
+ value = 0;
+ else if (degrees == 180)
+ value = JH7100_CLK_INVERT;
+ else
+ return -EINVAL;
+
+ jh7100_clk_reg_rmw(clk, JH7100_CLK_INVERT, value);
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void jh7100_clk_debug_init(struct clk_hw *hw, struct dentry *dentry)
+{
+ static const struct debugfs_reg32 jh7100_clk_reg = {
+ .name = "CTRL",
+ .offset = 0,
+ };
+ struct jh7100_clk *clk = jh7100_clk_from(hw);
+ struct jh7100_clk_priv *priv = jh7100_priv_from(clk);
+ struct debugfs_regset32 *regset;
+
+ regset = devm_kzalloc(priv->dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return;
+
+ regset->regs = &jh7100_clk_reg;
+ regset->nregs = 1;
+ regset->base = priv->base + 4 * clk->idx;
+
+ debugfs_create_regset32("registers", 0400, dentry, regset);
+}
+#else
+#define jh7100_clk_debug_init NULL
+#endif
+
+static const struct clk_ops jh7100_clk_gate_ops = {
+ .enable = jh7100_clk_enable,
+ .disable = jh7100_clk_disable,
+ .is_enabled = jh7100_clk_is_enabled,
+ .debug_init = jh7100_clk_debug_init,
+};
+
+static const struct clk_ops jh7100_clk_div_ops = {
+ .recalc_rate = jh7100_clk_recalc_rate,
+ .determine_rate = jh7100_clk_determine_rate,
+ .set_rate = jh7100_clk_set_rate,
+ .debug_init = jh7100_clk_debug_init,
+};
+
+static const struct clk_ops jh7100_clk_gdiv_ops = {
+ .enable = jh7100_clk_enable,
+ .disable = jh7100_clk_disable,
+ .is_enabled = jh7100_clk_is_enabled,
+ .recalc_rate = jh7100_clk_recalc_rate,
+ .determine_rate = jh7100_clk_determine_rate,
+ .set_rate = jh7100_clk_set_rate,
+ .debug_init = jh7100_clk_debug_init,
+};
+
+static const struct clk_ops jh7100_clk_mux_ops = {
+ .determine_rate = jh7100_clk_mux_determine_rate,
+ .set_parent = jh7100_clk_set_parent,
+ .get_parent = jh7100_clk_get_parent,
+ .debug_init = jh7100_clk_debug_init,
+};
+
+static const struct clk_ops jh7100_clk_gmux_ops = {
+ .enable = jh7100_clk_enable,
+ .disable = jh7100_clk_disable,
+ .is_enabled = jh7100_clk_is_enabled,
+ .determine_rate = jh7100_clk_mux_determine_rate,
+ .set_parent = jh7100_clk_set_parent,
+ .get_parent = jh7100_clk_get_parent,
+ .debug_init = jh7100_clk_debug_init,
+};
+
+static const struct clk_ops jh7100_clk_inv_ops = {
+ .get_phase = jh7100_clk_get_phase,
+ .set_phase = jh7100_clk_set_phase,
+ .debug_init = jh7100_clk_debug_init,
+};
+
+static const struct clk_ops *__init jh7100_clk_ops(u32 max)
+{
+ if (max & JH7100_CLK_DIV_MASK) {
+ if (max & JH7100_CLK_ENABLE)
+ return &jh7100_clk_gdiv_ops;
+ return &jh7100_clk_div_ops;
+ }
+
+ if (max & JH7100_CLK_MUX_MASK) {
+ if (max & JH7100_CLK_ENABLE)
+ return &jh7100_clk_gmux_ops;
+ return &jh7100_clk_mux_ops;
+ }
+
+ if (max & JH7100_CLK_ENABLE)
+ return &jh7100_clk_gate_ops;
+
+ return &jh7100_clk_inv_ops;
+}
+
+static struct clk_hw *jh7100_clk_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct jh7100_clk_priv *priv = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx < JH7100_CLK_PLL0_OUT)
+ return &priv->reg[idx].hw;
+
+ if (idx < JH7100_CLK_END)
+ return priv->pll[idx - JH7100_CLK_PLL0_OUT];
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int __init clk_starfive_jh7100_probe(struct platform_device *pdev)
+{
+ struct jh7100_clk_priv *priv;
+ unsigned int idx;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->rmw_lock);
+ priv->dev = &pdev->dev;
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->pll[0] = devm_clk_hw_register_fixed_factor(priv->dev, "pll0_out",
+ "osc_sys", 0, 40, 1);
+ if (IS_ERR(priv->pll[0]))
+ return PTR_ERR(priv->pll[0]);
+
+ priv->pll[1] = devm_clk_hw_register_fixed_factor(priv->dev, "pll1_out",
+ "osc_sys", 0, 64, 1);
+ if (IS_ERR(priv->pll[1]))
+ return PTR_ERR(priv->pll[1]);
+
+ priv->pll[2] = devm_clk_hw_register_fixed_factor(priv->dev, "pll2_out",
+ "pll2_refclk", 0, 55, 1);
+ if (IS_ERR(priv->pll[2]))
+ return PTR_ERR(priv->pll[2]);
+
+ for (idx = 0; idx < JH7100_CLK_PLL0_OUT; idx++) {
+ u32 max = jh7100_clk_data[idx].max;
+ struct clk_parent_data parents[4] = {};
+ struct clk_init_data init = {
+ .name = jh7100_clk_data[idx].name,
+ .ops = jh7100_clk_ops(max),
+ .parent_data = parents,
+ .num_parents = ((max & JH7100_CLK_MUX_MASK) >> JH7100_CLK_MUX_SHIFT) + 1,
+ .flags = jh7100_clk_data[idx].flags,
+ };
+ struct jh7100_clk *clk = &priv->reg[idx];
+ unsigned int i;
+
+ for (i = 0; i < init.num_parents; i++) {
+ unsigned int pidx = jh7100_clk_data[idx].parents[i];
+
+ if (pidx < JH7100_CLK_PLL0_OUT)
+ parents[i].hw = &priv->reg[pidx].hw;
+ else if (pidx < JH7100_CLK_END)
+ parents[i].hw = priv->pll[pidx - JH7100_CLK_PLL0_OUT];
+ else if (pidx == JH7100_CLK_OSC_SYS)
+ parents[i].fw_name = "osc_sys";
+ else if (pidx == JH7100_CLK_OSC_AUD)
+ parents[i].fw_name = "osc_aud";
+ else if (pidx == JH7100_CLK_GMAC_RMII_REF)
+ parents[i].fw_name = "gmac_rmii_ref";
+ else if (pidx == JH7100_CLK_GMAC_GR_MII_RX)
+ parents[i].fw_name = "gmac_gr_mii_rxclk";
+ }
+
+ clk->hw.init = &init;
+ clk->idx = idx;
+ clk->max_div = max & JH7100_CLK_DIV_MASK;
+
+ ret = devm_clk_hw_register(priv->dev, &clk->hw);
+ if (ret)
+ return ret;
+ }
+
+ return devm_of_clk_add_hw_provider(priv->dev, jh7100_clk_get, priv);
+}
+
+static const struct of_device_id clk_starfive_jh7100_match[] = {
+ { .compatible = "starfive,jh7100-clkgen" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver clk_starfive_jh7100_driver = {
+ .driver = {
+ .name = "clk-starfive-jh7100",
+ .of_match_table = clk_starfive_jh7100_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(clk_starfive_jh7100_driver, clk_starfive_jh7100_probe);
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index e76e1676f0f0..68a94e5af8ed 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config SUNXI_CCU
- bool "Clock support for Allwinner SoCs"
+ tristate "Clock support for Allwinner SoCs"
depends on ARCH_SUNXI || COMPILE_TEST
select RESET_CONTROLLER
default ARCH_SUNXI
@@ -8,42 +8,52 @@ config SUNXI_CCU
if SUNXI_CCU
config SUNIV_F1C100S_CCU
- bool "Support for the Allwinner newer F1C100s CCU"
+ tristate "Support for the Allwinner newer F1C100s CCU"
default MACH_SUNIV
depends on MACH_SUNIV || COMPILE_TEST
+config SUN20I_D1_CCU
+ tristate "Support for the Allwinner D1 CCU"
+ default RISCV && ARCH_SUNXI
+ depends on (RISCV && ARCH_SUNXI) || COMPILE_TEST
+
+config SUN20I_D1_R_CCU
+ tristate "Support for the Allwinner D1 PRCM CCU"
+ default RISCV && ARCH_SUNXI
+ depends on (RISCV && ARCH_SUNXI) || COMPILE_TEST
+
config SUN50I_A64_CCU
- bool "Support for the Allwinner A64 CCU"
+ tristate "Support for the Allwinner A64 CCU"
default ARM64 && ARCH_SUNXI
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
config SUN50I_A100_CCU
- bool "Support for the Allwinner A100 CCU"
+ tristate "Support for the Allwinner A100 CCU"
default ARM64 && ARCH_SUNXI
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
config SUN50I_A100_R_CCU
- bool "Support for the Allwinner A100 PRCM CCU"
+ tristate "Support for the Allwinner A100 PRCM CCU"
default ARM64 && ARCH_SUNXI
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
config SUN50I_H6_CCU
- bool "Support for the Allwinner H6 CCU"
+ tristate "Support for the Allwinner H6 CCU"
default ARM64 && ARCH_SUNXI
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
config SUN50I_H616_CCU
- bool "Support for the Allwinner H616 CCU"
+ tristate "Support for the Allwinner H616 CCU"
default ARM64 && ARCH_SUNXI
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
config SUN50I_H6_R_CCU
- bool "Support for the Allwinner H6 and H616 PRCM CCU"
+ tristate "Support for the Allwinner H6 and H616 PRCM CCU"
default ARM64 && ARCH_SUNXI
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
config SUN4I_A10_CCU
- bool "Support for the Allwinner A10/A20 CCU"
+ tristate "Support for the Allwinner A10/A20 CCU"
default MACH_SUN4I
default MACH_SUN7I
depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
@@ -52,53 +62,54 @@ config SUN5I_CCU
bool "Support for the Allwinner sun5i family CCM"
default MACH_SUN5I
depends on MACH_SUN5I || COMPILE_TEST
+ depends on SUNXI_CCU=y
config SUN6I_A31_CCU
- bool "Support for the Allwinner A31/A31s CCU"
+ tristate "Support for the Allwinner A31/A31s CCU"
default MACH_SUN6I
depends on MACH_SUN6I || COMPILE_TEST
config SUN8I_A23_CCU
- bool "Support for the Allwinner A23 CCU"
+ tristate "Support for the Allwinner A23 CCU"
default MACH_SUN8I
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_A33_CCU
- bool "Support for the Allwinner A33 CCU"
+ tristate "Support for the Allwinner A33 CCU"
default MACH_SUN8I
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_A83T_CCU
- bool "Support for the Allwinner A83T CCU"
+ tristate "Support for the Allwinner A83T CCU"
default MACH_SUN8I
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_H3_CCU
- bool "Support for the Allwinner H3 CCU"
+ tristate "Support for the Allwinner H3 CCU"
default MACH_SUN8I || (ARM64 && ARCH_SUNXI)
depends on MACH_SUN8I || (ARM64 && ARCH_SUNXI) || COMPILE_TEST
config SUN8I_V3S_CCU
- bool "Support for the Allwinner V3s CCU"
+ tristate "Support for the Allwinner V3s CCU"
default MACH_SUN8I
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_DE2_CCU
- bool "Support for the Allwinner SoCs DE2 CCU"
+ tristate "Support for the Allwinner SoCs DE2 CCU"
default MACH_SUN8I || (ARM64 && ARCH_SUNXI)
config SUN8I_R40_CCU
- bool "Support for the Allwinner R40 CCU"
+ tristate "Support for the Allwinner R40 CCU"
default MACH_SUN8I
depends on MACH_SUN8I || COMPILE_TEST
config SUN9I_A80_CCU
- bool "Support for the Allwinner A80 CCU"
+ tristate "Support for the Allwinner A80 CCU"
default MACH_SUN9I
depends on MACH_SUN9I || COMPILE_TEST
config SUN8I_R_CCU
- bool "Support for Allwinner SoCs' PRCM CCUs"
+ tristate "Support for Allwinner SoCs' PRCM CCUs"
default MACH_SUN8I || (ARCH_SUNXI && ARM64)
endif
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index 96c324306d97..ec931cb7aa14 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -1,44 +1,73 @@
# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SUNXI_CCU) += sunxi-ccu.o
+
# Common objects
-obj-y += ccu_common.o
-obj-y += ccu_mmc_timing.o
-obj-y += ccu_reset.o
+sunxi-ccu-y += ccu_common.o
+sunxi-ccu-y += ccu_mmc_timing.o
+sunxi-ccu-y += ccu_reset.o
# Base clock types
-obj-y += ccu_div.o
-obj-y += ccu_frac.o
-obj-y += ccu_gate.o
-obj-y += ccu_mux.o
-obj-y += ccu_mult.o
-obj-y += ccu_phase.o
-obj-y += ccu_sdm.o
+sunxi-ccu-y += ccu_div.o
+sunxi-ccu-y += ccu_frac.o
+sunxi-ccu-y += ccu_gate.o
+sunxi-ccu-y += ccu_mux.o
+sunxi-ccu-y += ccu_mult.o
+sunxi-ccu-y += ccu_phase.o
+sunxi-ccu-y += ccu_sdm.o
# Multi-factor clocks
-obj-y += ccu_nk.o
-obj-y += ccu_nkm.o
-obj-y += ccu_nkmp.o
-obj-y += ccu_nm.o
-obj-y += ccu_mp.o
+sunxi-ccu-y += ccu_nk.o
+sunxi-ccu-y += ccu_nkm.o
+sunxi-ccu-y += ccu_nkmp.o
+sunxi-ccu-y += ccu_nm.o
+sunxi-ccu-y += ccu_mp.o
# SoC support
-obj-$(CONFIG_SUNIV_F1C100S_CCU) += ccu-suniv-f1c100s.o
-obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o
-obj-$(CONFIG_SUN50I_A100_CCU) += ccu-sun50i-a100.o
-obj-$(CONFIG_SUN50I_A100_R_CCU) += ccu-sun50i-a100-r.o
-obj-$(CONFIG_SUN50I_H6_CCU) += ccu-sun50i-h6.o
-obj-$(CONFIG_SUN50I_H616_CCU) += ccu-sun50i-h616.o
-obj-$(CONFIG_SUN50I_H6_R_CCU) += ccu-sun50i-h6-r.o
-obj-$(CONFIG_SUN4I_A10_CCU) += ccu-sun4i-a10.o
-obj-$(CONFIG_SUN5I_CCU) += ccu-sun5i.o
-obj-$(CONFIG_SUN6I_A31_CCU) += ccu-sun6i-a31.o
-obj-$(CONFIG_SUN8I_A23_CCU) += ccu-sun8i-a23.o
-obj-$(CONFIG_SUN8I_A33_CCU) += ccu-sun8i-a33.o
-obj-$(CONFIG_SUN8I_A83T_CCU) += ccu-sun8i-a83t.o
-obj-$(CONFIG_SUN8I_H3_CCU) += ccu-sun8i-h3.o
-obj-$(CONFIG_SUN8I_V3S_CCU) += ccu-sun8i-v3s.o
-obj-$(CONFIG_SUN8I_DE2_CCU) += ccu-sun8i-de2.o
-obj-$(CONFIG_SUN8I_R_CCU) += ccu-sun8i-r.o
-obj-$(CONFIG_SUN8I_R40_CCU) += ccu-sun8i-r40.o
-obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80.o
-obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-de.o
-obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-usb.o
+obj-$(CONFIG_SUNIV_F1C100S_CCU) += suniv-f1c100s-ccu.o
+obj-$(CONFIG_SUN20I_D1_CCU) += sun20i-d1-ccu.o
+obj-$(CONFIG_SUN20I_D1_R_CCU) += sun20i-d1-r-ccu.o
+obj-$(CONFIG_SUN50I_A64_CCU) += sun50i-a64-ccu.o
+obj-$(CONFIG_SUN50I_A100_CCU) += sun50i-a100-ccu.o
+obj-$(CONFIG_SUN50I_A100_R_CCU) += sun50i-a100-r-ccu.o
+obj-$(CONFIG_SUN50I_H6_CCU) += sun50i-h6-ccu.o
+obj-$(CONFIG_SUN50I_H6_R_CCU) += sun50i-h6-r-ccu.o
+obj-$(CONFIG_SUN50I_H616_CCU) += sun50i-h616-ccu.o
+obj-$(CONFIG_SUN4I_A10_CCU) += sun4i-a10-ccu.o
+obj-$(CONFIG_SUN5I_CCU) += sun5i-ccu.o
+obj-$(CONFIG_SUN6I_A31_CCU) += sun6i-a31-ccu.o
+obj-$(CONFIG_SUN8I_A23_CCU) += sun8i-a23-ccu.o
+obj-$(CONFIG_SUN8I_A33_CCU) += sun8i-a33-ccu.o
+obj-$(CONFIG_SUN8I_A83T_CCU) += sun8i-a83t-ccu.o
+obj-$(CONFIG_SUN8I_H3_CCU) += sun8i-h3-ccu.o
+obj-$(CONFIG_SUN8I_R40_CCU) += sun8i-r40-ccu.o
+obj-$(CONFIG_SUN8I_V3S_CCU) += sun8i-v3s-ccu.o
+obj-$(CONFIG_SUN8I_DE2_CCU) += sun8i-de2-ccu.o
+obj-$(CONFIG_SUN8I_R_CCU) += sun8i-r-ccu.o
+obj-$(CONFIG_SUN9I_A80_CCU) += sun9i-a80-ccu.o
+obj-$(CONFIG_SUN9I_A80_CCU) += sun9i-a80-de-ccu.o
+obj-$(CONFIG_SUN9I_A80_CCU) += sun9i-a80-usb-ccu.o
+
+suniv-f1c100s-ccu-y += ccu-suniv-f1c100s.o
+sun20i-d1-ccu-y += ccu-sun20i-d1.o
+sun20i-d1-r-ccu-y += ccu-sun20i-d1-r.o
+sun50i-a64-ccu-y += ccu-sun50i-a64.o
+sun50i-a100-ccu-y += ccu-sun50i-a100.o
+sun50i-a100-r-ccu-y += ccu-sun50i-a100-r.o
+sun50i-h6-ccu-y += ccu-sun50i-h6.o
+sun50i-h6-r-ccu-y += ccu-sun50i-h6-r.o
+sun50i-h616-ccu-y += ccu-sun50i-h616.o
+sun4i-a10-ccu-y += ccu-sun4i-a10.o
+sun5i-ccu-y += ccu-sun5i.o
+sun6i-a31-ccu-y += ccu-sun6i-a31.o
+sun8i-a23-ccu-y += ccu-sun8i-a23.o
+sun8i-a33-ccu-y += ccu-sun8i-a33.o
+sun8i-a83t-ccu-y += ccu-sun8i-a83t.o
+sun8i-h3-ccu-y += ccu-sun8i-h3.o
+sun8i-r40-ccu-y += ccu-sun8i-r40.o
+sun8i-v3s-ccu-y += ccu-sun8i-v3s.o
+sun8i-de2-ccu-y += ccu-sun8i-de2.o
+sun8i-r-ccu-y += ccu-sun8i-r.o
+sun9i-a80-ccu-y += ccu-sun9i-a80.o
+sun9i-a80-de-ccu-y += ccu-sun9i-a80-de.o
+sun9i-a80-usb-ccu-y += ccu-sun9i-a80-usb.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c
new file mode 100644
index 000000000000..9d3ffd3fb2c1
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 huangzhenwei@allwinnertech.com
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+
+#include "ccu-sun20i-d1-r.h"
+
+static const struct clk_parent_data r_ahb_apb0_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ { .fw_name = "pll-periph" },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX(r_ahb_clk, "r-ahb",
+ r_ahb_apb0_parents, 0x000,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ 0);
+static const struct clk_hw *r_ahb_hw = &r_ahb_clk.common.hw;
+
+static SUNXI_CCU_MP_DATA_WITH_MUX(r_apb0_clk, "r-apb0",
+ r_ahb_apb0_parents, 0x00c,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ 0);
+static const struct clk_hw *r_apb0_hw = &r_apb0_clk.common.hw;
+
+static SUNXI_CCU_GATE_HWS(bus_r_timer_clk, "bus-r-timer", &r_apb0_hw,
+ 0x11c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_r_twd_clk, "bus-r-twd", &r_apb0_hw,
+ 0x12c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_r_ppu_clk, "bus-r-ppu", &r_apb0_hw,
+ 0x1ac, BIT(0), 0);
+
+static const struct clk_parent_data r_ir_rx_parents[] = {
+ { .fw_name = "losc" },
+ { .fw_name = "hosc" },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(r_ir_rx_clk, "r-ir-rx",
+ r_ir_rx_parents, 0x1c0,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_r_ir_rx_clk, "bus-r-ir-rx", &r_apb0_hw,
+ 0x1cc, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_r_rtc_clk, "bus-r-rtc", &r_ahb_hw,
+ 0x20c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_r_cpucfg_clk, "bus-r-cpucfg", &r_apb0_hw,
+ 0x22c, BIT(0), 0);
+
+static struct ccu_common *sun20i_d1_r_ccu_clks[] = {
+ &r_ahb_clk.common,
+ &r_apb0_clk.common,
+ &bus_r_timer_clk.common,
+ &bus_r_twd_clk.common,
+ &bus_r_ppu_clk.common,
+ &r_ir_rx_clk.common,
+ &bus_r_ir_rx_clk.common,
+ &bus_r_rtc_clk.common,
+ &bus_r_cpucfg_clk.common,
+};
+
+static struct clk_hw_onecell_data sun20i_d1_r_hw_clks = {
+ .num = CLK_NUMBER,
+ .hws = {
+ [CLK_R_AHB] = &r_ahb_clk.common.hw,
+ [CLK_R_APB0] = &r_apb0_clk.common.hw,
+ [CLK_BUS_R_TIMER] = &bus_r_timer_clk.common.hw,
+ [CLK_BUS_R_TWD] = &bus_r_twd_clk.common.hw,
+ [CLK_BUS_R_PPU] = &bus_r_ppu_clk.common.hw,
+ [CLK_R_IR_RX] = &r_ir_rx_clk.common.hw,
+ [CLK_BUS_R_IR_RX] = &bus_r_ir_rx_clk.common.hw,
+ [CLK_BUS_R_RTC] = &bus_r_rtc_clk.common.hw,
+ [CLK_BUS_R_CPUCFG] = &bus_r_cpucfg_clk.common.hw,
+ },
+};
+
+static struct ccu_reset_map sun20i_d1_r_ccu_resets[] = {
+ [RST_BUS_R_TIMER] = { 0x11c, BIT(16) },
+ [RST_BUS_R_TWD] = { 0x12c, BIT(16) },
+ [RST_BUS_R_PPU] = { 0x1ac, BIT(16) },
+ [RST_BUS_R_IR_RX] = { 0x1cc, BIT(16) },
+ [RST_BUS_R_RTC] = { 0x20c, BIT(16) },
+ [RST_BUS_R_CPUCFG] = { 0x22c, BIT(16) },
+};
+
+static const struct sunxi_ccu_desc sun20i_d1_r_ccu_desc = {
+ .ccu_clks = sun20i_d1_r_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun20i_d1_r_ccu_clks),
+
+ .hw_clks = &sun20i_d1_r_hw_clks,
+
+ .resets = sun20i_d1_r_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun20i_d1_r_ccu_resets),
+};
+
+static int sun20i_d1_r_ccu_probe(struct platform_device *pdev)
+{
+ void __iomem *reg;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun20i_d1_r_ccu_desc);
+}
+
+static const struct of_device_id sun20i_d1_r_ccu_ids[] = {
+ { .compatible = "allwinner,sun20i-d1-r-ccu" },
+ { }
+};
+
+static struct platform_driver sun20i_d1_r_ccu_driver = {
+ .probe = sun20i_d1_r_ccu_probe,
+ .driver = {
+ .name = "sun20i-d1-r-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun20i_d1_r_ccu_ids,
+ },
+};
+module_platform_driver(sun20i_d1_r_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.h b/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.h
new file mode 100644
index 000000000000..afd4342209ee
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 frank@allwinnertech.com
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef _CCU_SUN20I_D1_R_H
+#define _CCU_SUN20I_D1_R_H
+
+#include <dt-bindings/clock/sun20i-d1-r-ccu.h>
+#include <dt-bindings/reset/sun20i-d1-r-ccu.h>
+
+#define CLK_R_APB0 1
+
+#define CLK_NUMBER (CLK_BUS_R_CPUCFG + 1)
+
+#endif /* _CCU_SUN20I_D1_R_H */
diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
new file mode 100644
index 000000000000..51058ba4db4d
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
@@ -0,0 +1,1390 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 huangzhenwei@allwinnertech.com
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "../clk.h"
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mult.h"
+#include "ccu_nk.h"
+#include "ccu_nkm.h"
+#include "ccu_nkmp.h"
+#include "ccu_nm.h"
+
+#include "ccu-sun20i-d1.h"
+
+static const struct clk_parent_data osc24M[] = {
+ { .fw_name = "hosc" }
+};
+
+/*
+ * For the CPU PLL, the output divider is described as "only for testing"
+ * in the user manual. So it's not modelled and forced to 0.
+ */
+#define SUN20I_D1_PLL_CPUX_REG 0x000
+static struct ccu_mult pll_cpux_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .mult = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .common = {
+ .reg = 0x000,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-cpux", osc24M,
+ &ccu_mult_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/* Some PLLs are input * N / div1 / P. Model them as NKMP with no K */
+#define SUN20I_D1_PLL_DDR0_REG 0x010
+static struct ccu_nkmp pll_ddr0_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x010,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-ddr0", osc24M,
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN20I_D1_PLL_PERIPH0_REG 0x020
+static struct ccu_nm pll_periph0_4x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x020,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-periph0-4x", osc24M,
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static const struct clk_hw *pll_periph0_4x_hws[] = {
+ &pll_periph0_4x_clk.common.hw
+};
+static SUNXI_CCU_M_HWS(pll_periph0_2x_clk, "pll-periph0-2x",
+ pll_periph0_4x_hws, 0x020, 16, 3, 0);
+static SUNXI_CCU_M_HWS(pll_periph0_800M_clk, "pll-periph0-800M",
+ pll_periph0_4x_hws, 0x020, 20, 3, 0);
+
+static const struct clk_hw *pll_periph0_2x_hws[] = {
+ &pll_periph0_2x_clk.common.hw
+};
+static CLK_FIXED_FACTOR_HWS(pll_periph0_clk, "pll-periph0",
+ pll_periph0_2x_hws, 2, 1, 0);
+
+static const struct clk_hw *pll_periph0_hws[] = { &pll_periph0_clk.hw };
+static CLK_FIXED_FACTOR_HWS(pll_periph0_div3_clk, "pll-periph0-div3",
+ pll_periph0_2x_hws, 6, 1, 0);
+
+/*
+ * For Video PLLs, the output divider is described as "only for testing"
+ * in the user manual. So it's not modelled and forced to 0.
+ */
+#define SUN20I_D1_PLL_VIDEO0_REG 0x040
+static struct ccu_nm pll_video0_4x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x040,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-video0-4x", osc24M,
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static const struct clk_hw *pll_video0_4x_hws[] = {
+ &pll_video0_4x_clk.common.hw
+};
+static CLK_FIXED_FACTOR_HWS(pll_video0_2x_clk, "pll-video0-2x",
+ pll_video0_4x_hws, 2, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR_HWS(pll_video0_clk, "pll-video0",
+ pll_video0_4x_hws, 4, 1, CLK_SET_RATE_PARENT);
+
+#define SUN20I_D1_PLL_VIDEO1_REG 0x048
+static struct ccu_nm pll_video1_4x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x048,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-video1-4x", osc24M,
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static const struct clk_hw *pll_video1_4x_hws[] = {
+ &pll_video1_4x_clk.common.hw
+};
+static CLK_FIXED_FACTOR_HWS(pll_video1_2x_clk, "pll-video1-2x",
+ pll_video1_4x_hws, 2, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR_HWS(pll_video1_clk, "pll-video1",
+ pll_video1_4x_hws, 4, 1, CLK_SET_RATE_PARENT);
+
+#define SUN20I_D1_PLL_VE_REG 0x058
+static struct ccu_nkmp pll_ve_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x058,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-ve", osc24M,
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/*
+ * PLL_AUDIO0 has m0, m1 dividers in addition to the usual N, M factors.
+ * Since we only need one frequency from this PLL (22.5792 x 4 == 90.3168 MHz),
+ * ignore them for now. Enforce the default for them, which is m1 = 0, m0 = 0.
+ * The M factor must be an even number to produce a 50% duty cycle output.
+ */
+#define SUN20I_D1_PLL_AUDIO0_REG 0x078
+static struct ccu_sdm_setting pll_audio0_sdm_table[] = {
+ { .rate = 90316800, .pattern = 0xc001288d, .m = 6, .n = 22 },
+};
+
+static struct ccu_nm pll_audio0_4x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(16, 6),
+ .sdm = _SUNXI_CCU_SDM(pll_audio0_sdm_table, BIT(24),
+ 0x178, BIT(31)),
+ .common = {
+ .reg = 0x078,
+ .features = CCU_FEATURE_SIGMA_DELTA_MOD,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-audio0-4x", osc24M,
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static const struct clk_hw *pll_audio0_4x_hws[] = {
+ &pll_audio0_4x_clk.common.hw
+};
+static CLK_FIXED_FACTOR_HWS(pll_audio0_2x_clk, "pll-audio0-2x",
+ pll_audio0_4x_hws, 2, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_audio0_clk, "pll-audio0",
+ pll_audio0_4x_hws, 4, 1, 0);
+
+/*
+ * PLL_AUDIO1 doesn't need Fractional-N. The output is usually 614.4 MHz for
+ * audio. The ADC or DAC should divide the PLL output further to 24.576 MHz.
+ */
+#define SUN20I_D1_PLL_AUDIO1_REG 0x080
+static struct ccu_nm pll_audio1_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1),
+ .common = {
+ .reg = 0x080,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-audio1", osc24M,
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static const struct clk_hw *pll_audio1_hws[] = {
+ &pll_audio1_clk.common.hw
+};
+static SUNXI_CCU_M_HWS(pll_audio1_div2_clk, "pll-audio1-div2",
+ pll_audio1_hws, 0x080, 16, 3, 0);
+static SUNXI_CCU_M_HWS(pll_audio1_div5_clk, "pll-audio1-div5",
+ pll_audio1_hws, 0x080, 20, 3, 0);
+
+/*
+ * The CPUX gate is not modelled - it is in a separate register (0x504)
+ * and has a special key field. The clock does not need to be ungated anyway.
+ */
+static const struct clk_parent_data cpux_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ { .hw = &pll_cpux_clk.common.hw },
+ { .hw = &pll_periph0_clk.hw },
+ { .hw = &pll_periph0_2x_clk.common.hw },
+ { .hw = &pll_periph0_800M_clk.common.hw },
+};
+static SUNXI_CCU_MUX_DATA(cpux_clk, "cpux", cpux_parents,
+ 0x500, 24, 3, CLK_SET_RATE_PARENT);
+
+static const struct clk_hw *cpux_hws[] = { &cpux_clk.common.hw };
+static SUNXI_CCU_M_HWS(cpux_axi_clk, "cpux-axi",
+ cpux_hws, 0x500, 0, 2, 0);
+static SUNXI_CCU_M_HWS(cpux_apb_clk, "cpux-apb",
+ cpux_hws, 0x500, 8, 2, 0);
+
+static const struct clk_parent_data psi_ahb_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ { .hw = &pll_periph0_clk.hw },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX(psi_ahb_clk, "psi-ahb", psi_ahb_parents, 0x510,
+ 0, 2, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static const struct clk_parent_data apb0_apb1_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .hw = &psi_ahb_clk.common.hw },
+ { .hw = &pll_periph0_clk.hw },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX(apb0_clk, "apb0", apb0_apb1_parents, 0x520,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX(apb1_clk, "apb1", apb0_apb1_parents, 0x524,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static const struct clk_hw *psi_ahb_hws[] = { &psi_ahb_clk.common.hw };
+static const struct clk_hw *apb0_hws[] = { &apb0_clk.common.hw };
+static const struct clk_hw *apb1_hws[] = { &apb1_clk.common.hw };
+
+static const struct clk_hw *de_di_g2d_parents[] = {
+ &pll_periph0_2x_clk.common.hw,
+ &pll_video0_4x_clk.common.hw,
+ &pll_video1_4x_clk.common.hw,
+ &pll_audio1_div2_clk.common.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(de_clk, "de", de_di_g2d_parents, 0x600,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_de_clk, "bus-de", psi_ahb_hws,
+ 0x60c, BIT(0), 0);
+
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(di_clk, "di", de_di_g2d_parents, 0x620,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_di_clk, "bus-di", psi_ahb_hws,
+ 0x62c, BIT(0), 0);
+
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(g2d_clk, "g2d", de_di_g2d_parents, 0x630,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_g2d_clk, "bus-g2d", psi_ahb_hws,
+ 0x63c, BIT(0), 0);
+
+static const struct clk_parent_data ce_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_2x_clk.common.hw },
+ { .hw = &pll_periph0_clk.hw },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(ce_clk, "ce", ce_parents, 0x680,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_ce_clk, "bus-ce", psi_ahb_hws,
+ 0x68c, BIT(0), 0);
+
+static const struct clk_hw *ve_parents[] = {
+ &pll_ve_clk.common.hw,
+ &pll_periph0_2x_clk.common.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(ve_clk, "ve", ve_parents, 0x690,
+ 0, 5, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_ve_clk, "bus-ve", psi_ahb_hws,
+ 0x69c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_dma_clk, "bus-dma", psi_ahb_hws,
+ 0x70c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_msgbox0_clk, "bus-msgbox0", psi_ahb_hws,
+ 0x71c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_msgbox1_clk, "bus-msgbox1", psi_ahb_hws,
+ 0x71c, BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_msgbox2_clk, "bus-msgbox2", psi_ahb_hws,
+ 0x71c, BIT(2), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_spinlock_clk, "bus-spinlock", psi_ahb_hws,
+ 0x72c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_hstimer_clk, "bus-hstimer", psi_ahb_hws,
+ 0x73c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_DATA(avs_clk, "avs", osc24M,
+ 0x740, BIT(31), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_dbg_clk, "bus-dbg", psi_ahb_hws,
+ 0x78c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_pwm_clk, "bus-pwm", apb0_hws,
+ 0x7ac, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_iommu_clk, "bus-iommu", apb0_hws,
+ 0x7bc, BIT(0), 0);
+
+static const struct clk_hw *dram_parents[] = {
+ &pll_ddr0_clk.common.hw,
+ &pll_audio1_div2_clk.common.hw,
+ &pll_periph0_2x_clk.common.hw,
+ &pll_periph0_800M_clk.common.hw,
+};
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(dram_clk, "dram", dram_parents, 0x800,
+ 0, 2, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), CLK_IS_CRITICAL);
+
+static CLK_FIXED_FACTOR_HW(mbus_clk, "mbus",
+ &dram_clk.common.hw, 4, 1, 0);
+
+static const struct clk_hw *mbus_hws[] = { &mbus_clk.hw };
+
+static SUNXI_CCU_GATE_HWS(mbus_dma_clk, "mbus-dma", mbus_hws,
+ 0x804, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(mbus_ve_clk, "mbus-ve", mbus_hws,
+ 0x804, BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(mbus_ce_clk, "mbus-ce", mbus_hws,
+ 0x804, BIT(2), 0);
+static SUNXI_CCU_GATE_HWS(mbus_tvin_clk, "mbus-tvin", mbus_hws,
+ 0x804, BIT(7), 0);
+static SUNXI_CCU_GATE_HWS(mbus_csi_clk, "mbus-csi", mbus_hws,
+ 0x804, BIT(8), 0);
+static SUNXI_CCU_GATE_HWS(mbus_g2d_clk, "mbus-g2d", mbus_hws,
+ 0x804, BIT(10), 0);
+static SUNXI_CCU_GATE_HWS(mbus_riscv_clk, "mbus-riscv", mbus_hws,
+ 0x804, BIT(11), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_dram_clk, "bus-dram", psi_ahb_hws,
+ 0x80c, BIT(0), CLK_IS_CRITICAL);
+
+static const struct clk_parent_data mmc0_mmc1_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_clk.hw },
+ { .hw = &pll_periph0_2x_clk.common.hw },
+ { .hw = &pll_audio1_div2_clk.common.hw },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc0_mmc1_parents, 0x830,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc0_mmc1_parents, 0x834,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const struct clk_parent_data mmc2_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_clk.hw },
+ { .hw = &pll_periph0_2x_clk.common.hw },
+ { .hw = &pll_periph0_800M_clk.common.hw },
+ { .hw = &pll_audio1_div2_clk.common.hw },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc2_parents, 0x838,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_mmc0_clk, "bus-mmc0", psi_ahb_hws,
+ 0x84c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_mmc1_clk, "bus-mmc1", psi_ahb_hws,
+ 0x84c, BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_mmc2_clk, "bus-mmc2", psi_ahb_hws,
+ 0x84c, BIT(2), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_uart0_clk, "bus-uart0", apb1_hws,
+ 0x90c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart1_clk, "bus-uart1", apb1_hws,
+ 0x90c, BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart2_clk, "bus-uart2", apb1_hws,
+ 0x90c, BIT(2), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart3_clk, "bus-uart3", apb1_hws,
+ 0x90c, BIT(3), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart4_clk, "bus-uart4", apb1_hws,
+ 0x90c, BIT(4), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart5_clk, "bus-uart5", apb1_hws,
+ 0x90c, BIT(5), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_i2c0_clk, "bus-i2c0", apb1_hws,
+ 0x91c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_i2c1_clk, "bus-i2c1", apb1_hws,
+ 0x91c, BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_i2c2_clk, "bus-i2c2", apb1_hws,
+ 0x91c, BIT(2), 0);
+static SUNXI_CCU_GATE_HWS(bus_i2c3_clk, "bus-i2c3", apb1_hws,
+ 0x91c, BIT(3), 0);
+
+static const struct clk_parent_data spi_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_clk.hw },
+ { .hw = &pll_periph0_2x_clk.common.hw },
+ { .hw = &pll_audio1_div2_clk.common.hw },
+ { .hw = &pll_audio1_div5_clk.common.hw },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(spi0_clk, "spi0", spi_parents, 0x940,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(spi1_clk, "spi1", spi_parents, 0x944,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_spi0_clk, "bus-spi0", psi_ahb_hws,
+ 0x96c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_spi1_clk, "bus-spi1", psi_ahb_hws,
+ 0x96c, BIT(1), 0);
+
+static SUNXI_CCU_GATE_HWS_WITH_PREDIV(emac_25M_clk, "emac-25M", pll_periph0_hws,
+ 0x970, BIT(31) | BIT(30), 24, 0);
+
+static SUNXI_CCU_GATE_HWS(bus_emac_clk, "bus-emac", psi_ahb_hws,
+ 0x97c, BIT(0), 0);
+
+static const struct clk_parent_data ir_tx_ledc_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_clk.hw },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(ir_tx_clk, "ir-tx", ir_tx_ledc_parents, 0x9c0,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_ir_tx_clk, "bus-ir-tx", apb0_hws,
+ 0x9cc, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_gpadc_clk, "bus-gpadc", apb0_hws,
+ 0x9ec, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_ths_clk, "bus-ths", apb0_hws,
+ 0x9fc, BIT(0), 0);
+
+static const struct clk_hw *i2s_spdif_tx_parents[] = {
+ &pll_audio0_clk.hw,
+ &pll_audio0_4x_clk.common.hw,
+ &pll_audio1_div2_clk.common.hw,
+ &pll_audio1_div5_clk.common.hw,
+};
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(i2s0_clk, "i2s0", i2s_spdif_tx_parents, 0xa10,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(i2s1_clk, "i2s1", i2s_spdif_tx_parents, 0xa14,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(i2s2_clk, "i2s2", i2s_spdif_tx_parents, 0xa18,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const struct clk_hw *i2s2_asrc_parents[] = {
+ &pll_audio0_4x_clk.common.hw,
+ &pll_periph0_clk.hw,
+ &pll_audio1_div2_clk.common.hw,
+ &pll_audio1_div5_clk.common.hw,
+};
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(i2s2_asrc_clk, "i2s2-asrc", i2s2_asrc_parents, 0xa1c,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_i2s0_clk, "bus-i2s0", apb0_hws,
+ 0xa20, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_i2s1_clk, "bus-i2s1", apb0_hws,
+ 0xa20, BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_i2s2_clk, "bus-i2s2", apb0_hws,
+ 0xa20, BIT(2), 0);
+
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(spdif_tx_clk, "spdif-tx", i2s_spdif_tx_parents, 0xa24,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const struct clk_hw *spdif_rx_parents[] = {
+ &pll_periph0_clk.hw,
+ &pll_audio1_div2_clk.common.hw,
+ &pll_audio1_div5_clk.common.hw,
+};
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(spdif_rx_clk, "spdif-rx", spdif_rx_parents, 0xa28,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_spdif_clk, "bus-spdif", apb0_hws,
+ 0xa2c, BIT(0), 0);
+
+static const struct clk_hw *dmic_codec_parents[] = {
+ &pll_audio0_clk.hw,
+ &pll_audio1_div2_clk.common.hw,
+ &pll_audio1_div5_clk.common.hw,
+};
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(dmic_clk, "dmic", dmic_codec_parents, 0xa40,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_dmic_clk, "bus-dmic", apb0_hws,
+ 0xa4c, BIT(0), 0);
+
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(audio_dac_clk, "audio-dac", dmic_codec_parents, 0xa50,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(audio_adc_clk, "audio-adc", dmic_codec_parents, 0xa54,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_audio_clk, "bus-audio", apb0_hws,
+ 0xa5c, BIT(0), 0);
+
+
+/*
+ * The first parent is a 48 MHz input clock divided by 4. That 48 MHz clock is
+ * a 2x multiplier from osc24M synchronized by pll-periph0, and is also used by
+ * the OHCI module.
+ */
+static const struct clk_parent_data usb_ohci_parents[] = {
+ { .hw = &pll_periph0_clk.hw },
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+};
+static const struct ccu_mux_fixed_prediv usb_ohci_predivs[] = {
+ { .index = 0, .div = 50 },
+ { .index = 1, .div = 2 },
+};
+
+static struct ccu_mux usb_ohci0_clk = {
+ .enable = BIT(31),
+ .mux = {
+ .shift = 24,
+ .width = 2,
+ .fixed_predivs = usb_ohci_predivs,
+ .n_predivs = ARRAY_SIZE(usb_ohci_predivs),
+ },
+ .common = {
+ .reg = 0xa70,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("usb-ohci0",
+ usb_ohci_parents,
+ &ccu_mux_ops,
+ 0),
+ },
+};
+
+static struct ccu_mux usb_ohci1_clk = {
+ .enable = BIT(31),
+ .mux = {
+ .shift = 24,
+ .width = 2,
+ .fixed_predivs = usb_ohci_predivs,
+ .n_predivs = ARRAY_SIZE(usb_ohci_predivs),
+ },
+ .common = {
+ .reg = 0xa74,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("usb-ohci1",
+ usb_ohci_parents,
+ &ccu_mux_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE_HWS(bus_ohci0_clk, "bus-ohci0", psi_ahb_hws,
+ 0xa8c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_ohci1_clk, "bus-ohci1", psi_ahb_hws,
+ 0xa8c, BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_ehci0_clk, "bus-ehci0", psi_ahb_hws,
+ 0xa8c, BIT(4), 0);
+static SUNXI_CCU_GATE_HWS(bus_ehci1_clk, "bus-ehci1", psi_ahb_hws,
+ 0xa8c, BIT(5), 0);
+static SUNXI_CCU_GATE_HWS(bus_otg_clk, "bus-otg", psi_ahb_hws,
+ 0xa8c, BIT(8), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_lradc_clk, "bus-lradc", apb0_hws,
+ 0xa9c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_dpss_top_clk, "bus-dpss-top", psi_ahb_hws,
+ 0xabc, BIT(0), 0);
+
+static SUNXI_CCU_GATE_DATA(hdmi_24M_clk, "hdmi-24M", osc24M,
+ 0xb04, BIT(31), 0);
+
+static SUNXI_CCU_GATE_HWS_WITH_PREDIV(hdmi_cec_32k_clk, "hdmi-cec-32k",
+ pll_periph0_2x_hws,
+ 0xb10, BIT(30), 36621, 0);
+
+static const struct clk_parent_data hdmi_cec_parents[] = {
+ { .fw_name = "losc" },
+ { .hw = &hdmi_cec_32k_clk.common.hw },
+};
+static SUNXI_CCU_MUX_DATA_WITH_GATE(hdmi_cec_clk, "hdmi-cec", hdmi_cec_parents, 0xb10,
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_hdmi_clk, "bus-hdmi", psi_ahb_hws,
+ 0xb1c, BIT(0), 0);
+
+static const struct clk_parent_data mipi_dsi_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_clk.hw },
+ { .hw = &pll_video0_2x_clk.hw },
+ { .hw = &pll_video1_2x_clk.hw },
+ { .hw = &pll_audio1_div2_clk.common.hw },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(mipi_dsi_clk, "mipi-dsi", mipi_dsi_parents, 0xb24,
+ 0, 4, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_mipi_dsi_clk, "bus-mipi-dsi", psi_ahb_hws,
+ 0xb4c, BIT(0), 0);
+
+static const struct clk_hw *tcon_tve_parents[] = {
+ &pll_video0_clk.hw,
+ &pll_video0_4x_clk.common.hw,
+ &pll_video1_clk.hw,
+ &pll_video1_4x_clk.common.hw,
+ &pll_periph0_2x_clk.common.hw,
+ &pll_audio1_div2_clk.common.hw,
+};
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(tcon_lcd0_clk, "tcon-lcd0", tcon_tve_parents, 0xb60,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_tcon_lcd0_clk, "bus-tcon-lcd0", psi_ahb_hws,
+ 0xb7c, BIT(0), 0);
+
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(tcon_tv_clk, "tcon-tv", tcon_tve_parents, 0xb80,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_tcon_tv_clk, "bus-tcon-tv", psi_ahb_hws,
+ 0xb9c, BIT(0), 0);
+
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(tve_clk, "tve", tcon_tve_parents, 0xbb0,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_tve_top_clk, "bus-tve-top", psi_ahb_hws,
+ 0xbbc, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_tve_clk, "bus-tve", psi_ahb_hws,
+ 0xbbc, BIT(1), 0);
+
+static const struct clk_parent_data tvd_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_video0_clk.hw },
+ { .hw = &pll_video1_clk.hw },
+ { .hw = &pll_periph0_clk.hw },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(tvd_clk, "tvd", tvd_parents, 0xbc0,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_tvd_top_clk, "bus-tvd-top", psi_ahb_hws,
+ 0xbdc, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_tvd_clk, "bus-tvd", psi_ahb_hws,
+ 0xbdc, BIT(1), 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(ledc_clk, "ledc", ir_tx_ledc_parents, 0xbf0,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_ledc_clk, "bus-ledc", psi_ahb_hws,
+ 0xbfc, BIT(0), 0);
+
+static const struct clk_hw *csi_top_parents[] = {
+ &pll_periph0_2x_clk.common.hw,
+ &pll_video0_2x_clk.hw,
+ &pll_video1_2x_clk.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(csi_top_clk, "csi-top", csi_top_parents, 0xc04,
+ 0, 4, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const struct clk_parent_data csi_mclk_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_clk.hw },
+ { .hw = &pll_video0_clk.hw },
+ { .hw = &pll_video1_clk.hw },
+ { .hw = &pll_audio1_div2_clk.common.hw },
+ { .hw = &pll_audio1_div5_clk.common.hw },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(csi_mclk_clk, "csi-mclk", csi_mclk_parents, 0xc08,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_csi_clk, "bus-csi", psi_ahb_hws,
+ 0xc1c, BIT(0), 0);
+
+static const struct clk_parent_data tpadc_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_audio0_clk.hw },
+};
+static SUNXI_CCU_MUX_DATA_WITH_GATE(tpadc_clk, "tpadc", tpadc_parents, 0xc50,
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_tpadc_clk, "bus-tpadc", apb0_hws,
+ 0xc5c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_tzma_clk, "bus-tzma", apb0_hws,
+ 0xc6c, BIT(0), 0);
+
+static const struct clk_parent_data dsp_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ { .hw = &pll_periph0_2x_clk.common.hw },
+ { .hw = &pll_audio1_div2_clk.common.hw },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(dsp_clk, "dsp", dsp_parents, 0xc70,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_dsp_cfg_clk, "bus-dsp-cfg", psi_ahb_hws,
+ 0xc7c, BIT(1), 0);
+
+/*
+ * The RISC-V gate is not modelled - it is in a separate register (0xd04)
+ * and has a special key field. The clock is critical anyway.
+ */
+static const struct clk_parent_data riscv_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ { .hw = &pll_periph0_800M_clk.common.hw },
+ { .hw = &pll_periph0_clk.hw },
+ { .hw = &pll_cpux_clk.common.hw },
+ { .hw = &pll_audio1_div2_clk.common.hw },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX(riscv_clk, "riscv", riscv_parents, 0xd00,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+
+/* The riscv-axi clk must be divided by at least 2. */
+static struct clk_div_table riscv_axi_table[] = {
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 3 },
+ { .val = 3, .div = 4 },
+ { /* Sentinel */ }
+};
+static SUNXI_CCU_DIV_TABLE_HW(riscv_axi_clk, "riscv-axi", &riscv_clk.common.hw,
+ 0xd00, 8, 2, riscv_axi_table, 0);
+
+static SUNXI_CCU_GATE_HWS(bus_riscv_cfg_clk, "bus-riscv-cfg", psi_ahb_hws,
+ 0xd0c, BIT(0), CLK_IS_CRITICAL);
+
+static SUNXI_CCU_GATE_DATA(fanout_24M_clk, "fanout-24M", osc24M,
+ 0xf30, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA_WITH_PREDIV(fanout_12M_clk, "fanout-12M", osc24M,
+ 0xf30, BIT(1), 2, 0);
+static SUNXI_CCU_GATE_HWS_WITH_PREDIV(fanout_16M_clk, "fanout-16M", pll_periph0_2x_hws,
+ 0xf30, BIT(2), 75, 0);
+static SUNXI_CCU_GATE_HWS_WITH_PREDIV(fanout_25M_clk, "fanout-25M", pll_periph0_hws,
+ 0xf30, BIT(3), 24, 0);
+static SUNXI_CCU_GATE_HWS_WITH_PREDIV(fanout_32k_clk, "fanout-32k", pll_periph0_2x_hws,
+ 0xf30, BIT(4), 36621, 0);
+
+/* This clock has a second divider that is not modelled and forced to 0. */
+#define SUN20I_D1_FANOUT_27M_REG 0xf34
+static const struct clk_hw *fanout_27M_parents[] = {
+ &pll_video0_clk.hw,
+ &pll_video1_clk.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(fanout_27M_clk, "fanout-27M", fanout_27M_parents, 0xf34,
+ 0, 5, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_M_HWS_WITH_GATE(fanout_pclk_clk, "fanout-pclk", apb0_hws, 0xf38,
+ 0, 5, /* M */
+ BIT(31), /* gate */
+ 0);
+
+static const struct clk_hw *fanout_parents[] = {
+ &fanout_32k_clk.common.hw,
+ &fanout_12M_clk.common.hw,
+ &fanout_16M_clk.common.hw,
+ &fanout_24M_clk.common.hw,
+ &fanout_25M_clk.common.hw,
+ &fanout_27M_clk.common.hw,
+ &fanout_pclk_clk.common.hw,
+};
+static SUNXI_CCU_MUX_HW_WITH_GATE(fanout0_clk, "fanout0", fanout_parents, 0xf3c,
+ 0, 3, /* mux */
+ BIT(21), /* gate */
+ 0);
+static SUNXI_CCU_MUX_HW_WITH_GATE(fanout1_clk, "fanout1", fanout_parents, 0xf3c,
+ 3, 3, /* mux */
+ BIT(22), /* gate */
+ 0);
+static SUNXI_CCU_MUX_HW_WITH_GATE(fanout2_clk, "fanout2", fanout_parents, 0xf3c,
+ 6, 3, /* mux */
+ BIT(23), /* gate */
+ 0);
+
+static struct ccu_common *sun20i_d1_ccu_clks[] = {
+ &pll_cpux_clk.common,
+ &pll_ddr0_clk.common,
+ &pll_periph0_4x_clk.common,
+ &pll_periph0_2x_clk.common,
+ &pll_periph0_800M_clk.common,
+ &pll_video0_4x_clk.common,
+ &pll_video1_4x_clk.common,
+ &pll_ve_clk.common,
+ &pll_audio0_4x_clk.common,
+ &pll_audio1_clk.common,
+ &pll_audio1_div2_clk.common,
+ &pll_audio1_div5_clk.common,
+ &cpux_clk.common,
+ &cpux_axi_clk.common,
+ &cpux_apb_clk.common,
+ &psi_ahb_clk.common,
+ &apb0_clk.common,
+ &apb1_clk.common,
+ &de_clk.common,
+ &bus_de_clk.common,
+ &di_clk.common,
+ &bus_di_clk.common,
+ &g2d_clk.common,
+ &bus_g2d_clk.common,
+ &ce_clk.common,
+ &bus_ce_clk.common,
+ &ve_clk.common,
+ &bus_ve_clk.common,
+ &bus_dma_clk.common,
+ &bus_msgbox0_clk.common,
+ &bus_msgbox1_clk.common,
+ &bus_msgbox2_clk.common,
+ &bus_spinlock_clk.common,
+ &bus_hstimer_clk.common,
+ &avs_clk.common,
+ &bus_dbg_clk.common,
+ &bus_pwm_clk.common,
+ &bus_iommu_clk.common,
+ &dram_clk.common,
+ &mbus_dma_clk.common,
+ &mbus_ve_clk.common,
+ &mbus_ce_clk.common,
+ &mbus_tvin_clk.common,
+ &mbus_csi_clk.common,
+ &mbus_g2d_clk.common,
+ &mbus_riscv_clk.common,
+ &bus_dram_clk.common,
+ &mmc0_clk.common,
+ &mmc1_clk.common,
+ &mmc2_clk.common,
+ &bus_mmc0_clk.common,
+ &bus_mmc1_clk.common,
+ &bus_mmc2_clk.common,
+ &bus_uart0_clk.common,
+ &bus_uart1_clk.common,
+ &bus_uart2_clk.common,
+ &bus_uart3_clk.common,
+ &bus_uart4_clk.common,
+ &bus_uart5_clk.common,
+ &bus_i2c0_clk.common,
+ &bus_i2c1_clk.common,
+ &bus_i2c2_clk.common,
+ &bus_i2c3_clk.common,
+ &spi0_clk.common,
+ &spi1_clk.common,
+ &bus_spi0_clk.common,
+ &bus_spi1_clk.common,
+ &emac_25M_clk.common,
+ &bus_emac_clk.common,
+ &ir_tx_clk.common,
+ &bus_ir_tx_clk.common,
+ &bus_gpadc_clk.common,
+ &bus_ths_clk.common,
+ &i2s0_clk.common,
+ &i2s1_clk.common,
+ &i2s2_clk.common,
+ &i2s2_asrc_clk.common,
+ &bus_i2s0_clk.common,
+ &bus_i2s1_clk.common,
+ &bus_i2s2_clk.common,
+ &spdif_tx_clk.common,
+ &spdif_rx_clk.common,
+ &bus_spdif_clk.common,
+ &dmic_clk.common,
+ &bus_dmic_clk.common,
+ &audio_dac_clk.common,
+ &audio_adc_clk.common,
+ &bus_audio_clk.common,
+ &usb_ohci0_clk.common,
+ &usb_ohci1_clk.common,
+ &bus_ohci0_clk.common,
+ &bus_ohci1_clk.common,
+ &bus_ehci0_clk.common,
+ &bus_ehci1_clk.common,
+ &bus_otg_clk.common,
+ &bus_lradc_clk.common,
+ &bus_dpss_top_clk.common,
+ &hdmi_24M_clk.common,
+ &hdmi_cec_32k_clk.common,
+ &hdmi_cec_clk.common,
+ &bus_hdmi_clk.common,
+ &mipi_dsi_clk.common,
+ &bus_mipi_dsi_clk.common,
+ &tcon_lcd0_clk.common,
+ &bus_tcon_lcd0_clk.common,
+ &tcon_tv_clk.common,
+ &bus_tcon_tv_clk.common,
+ &tve_clk.common,
+ &bus_tve_top_clk.common,
+ &bus_tve_clk.common,
+ &tvd_clk.common,
+ &bus_tvd_top_clk.common,
+ &bus_tvd_clk.common,
+ &ledc_clk.common,
+ &bus_ledc_clk.common,
+ &csi_top_clk.common,
+ &csi_mclk_clk.common,
+ &bus_csi_clk.common,
+ &tpadc_clk.common,
+ &bus_tpadc_clk.common,
+ &bus_tzma_clk.common,
+ &dsp_clk.common,
+ &bus_dsp_cfg_clk.common,
+ &riscv_clk.common,
+ &riscv_axi_clk.common,
+ &bus_riscv_cfg_clk.common,
+ &fanout_24M_clk.common,
+ &fanout_12M_clk.common,
+ &fanout_16M_clk.common,
+ &fanout_25M_clk.common,
+ &fanout_32k_clk.common,
+ &fanout_27M_clk.common,
+ &fanout_pclk_clk.common,
+ &fanout0_clk.common,
+ &fanout1_clk.common,
+ &fanout2_clk.common,
+};
+
+static struct clk_hw_onecell_data sun20i_d1_hw_clks = {
+ .num = CLK_NUMBER,
+ .hws = {
+ [CLK_PLL_CPUX] = &pll_cpux_clk.common.hw,
+ [CLK_PLL_DDR0] = &pll_ddr0_clk.common.hw,
+ [CLK_PLL_PERIPH0_4X] = &pll_periph0_4x_clk.common.hw,
+ [CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.common.hw,
+ [CLK_PLL_PERIPH0_800M] = &pll_periph0_800M_clk.common.hw,
+ [CLK_PLL_PERIPH0] = &pll_periph0_clk.hw,
+ [CLK_PLL_PERIPH0_DIV3] = &pll_periph0_div3_clk.hw,
+ [CLK_PLL_VIDEO0_4X] = &pll_video0_4x_clk.common.hw,
+ [CLK_PLL_VIDEO0_2X] = &pll_video0_2x_clk.hw,
+ [CLK_PLL_VIDEO0] = &pll_video0_clk.hw,
+ [CLK_PLL_VIDEO1_4X] = &pll_video1_4x_clk.common.hw,
+ [CLK_PLL_VIDEO1_2X] = &pll_video1_2x_clk.hw,
+ [CLK_PLL_VIDEO1] = &pll_video1_clk.hw,
+ [CLK_PLL_VE] = &pll_ve_clk.common.hw,
+ [CLK_PLL_AUDIO0_4X] = &pll_audio0_4x_clk.common.hw,
+ [CLK_PLL_AUDIO0_2X] = &pll_audio0_2x_clk.hw,
+ [CLK_PLL_AUDIO0] = &pll_audio0_clk.hw,
+ [CLK_PLL_AUDIO1] = &pll_audio1_clk.common.hw,
+ [CLK_PLL_AUDIO1_DIV2] = &pll_audio1_div2_clk.common.hw,
+ [CLK_PLL_AUDIO1_DIV5] = &pll_audio1_div5_clk.common.hw,
+ [CLK_CPUX] = &cpux_clk.common.hw,
+ [CLK_CPUX_AXI] = &cpux_axi_clk.common.hw,
+ [CLK_CPUX_APB] = &cpux_apb_clk.common.hw,
+ [CLK_PSI_AHB] = &psi_ahb_clk.common.hw,
+ [CLK_APB0] = &apb0_clk.common.hw,
+ [CLK_APB1] = &apb1_clk.common.hw,
+ [CLK_MBUS] = &mbus_clk.hw,
+ [CLK_DE] = &de_clk.common.hw,
+ [CLK_BUS_DE] = &bus_de_clk.common.hw,
+ [CLK_DI] = &di_clk.common.hw,
+ [CLK_BUS_DI] = &bus_di_clk.common.hw,
+ [CLK_G2D] = &g2d_clk.common.hw,
+ [CLK_BUS_G2D] = &bus_g2d_clk.common.hw,
+ [CLK_CE] = &ce_clk.common.hw,
+ [CLK_BUS_CE] = &bus_ce_clk.common.hw,
+ [CLK_VE] = &ve_clk.common.hw,
+ [CLK_BUS_VE] = &bus_ve_clk.common.hw,
+ [CLK_BUS_DMA] = &bus_dma_clk.common.hw,
+ [CLK_BUS_MSGBOX0] = &bus_msgbox0_clk.common.hw,
+ [CLK_BUS_MSGBOX1] = &bus_msgbox1_clk.common.hw,
+ [CLK_BUS_MSGBOX2] = &bus_msgbox2_clk.common.hw,
+ [CLK_BUS_SPINLOCK] = &bus_spinlock_clk.common.hw,
+ [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw,
+ [CLK_AVS] = &avs_clk.common.hw,
+ [CLK_BUS_DBG] = &bus_dbg_clk.common.hw,
+ [CLK_BUS_PWM] = &bus_pwm_clk.common.hw,
+ [CLK_BUS_IOMMU] = &bus_iommu_clk.common.hw,
+ [CLK_DRAM] = &dram_clk.common.hw,
+ [CLK_MBUS_DMA] = &mbus_dma_clk.common.hw,
+ [CLK_MBUS_VE] = &mbus_ve_clk.common.hw,
+ [CLK_MBUS_CE] = &mbus_ce_clk.common.hw,
+ [CLK_MBUS_TVIN] = &mbus_tvin_clk.common.hw,
+ [CLK_MBUS_CSI] = &mbus_csi_clk.common.hw,
+ [CLK_MBUS_G2D] = &mbus_g2d_clk.common.hw,
+ [CLK_MBUS_RISCV] = &mbus_riscv_clk.common.hw,
+ [CLK_BUS_DRAM] = &bus_dram_clk.common.hw,
+ [CLK_MMC0] = &mmc0_clk.common.hw,
+ [CLK_MMC1] = &mmc1_clk.common.hw,
+ [CLK_MMC2] = &mmc2_clk.common.hw,
+ [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw,
+ [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw,
+ [CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw,
+ [CLK_BUS_UART0] = &bus_uart0_clk.common.hw,
+ [CLK_BUS_UART1] = &bus_uart1_clk.common.hw,
+ [CLK_BUS_UART2] = &bus_uart2_clk.common.hw,
+ [CLK_BUS_UART3] = &bus_uart3_clk.common.hw,
+ [CLK_BUS_UART4] = &bus_uart4_clk.common.hw,
+ [CLK_BUS_UART5] = &bus_uart5_clk.common.hw,
+ [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw,
+ [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw,
+ [CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw,
+ [CLK_BUS_I2C3] = &bus_i2c3_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_SPI1] = &spi1_clk.common.hw,
+ [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw,
+ [CLK_BUS_SPI1] = &bus_spi1_clk.common.hw,
+ [CLK_EMAC_25M] = &emac_25M_clk.common.hw,
+ [CLK_BUS_EMAC] = &bus_emac_clk.common.hw,
+ [CLK_IR_TX] = &ir_tx_clk.common.hw,
+ [CLK_BUS_IR_TX] = &bus_ir_tx_clk.common.hw,
+ [CLK_BUS_GPADC] = &bus_gpadc_clk.common.hw,
+ [CLK_BUS_THS] = &bus_ths_clk.common.hw,
+ [CLK_I2S0] = &i2s0_clk.common.hw,
+ [CLK_I2S1] = &i2s1_clk.common.hw,
+ [CLK_I2S2] = &i2s2_clk.common.hw,
+ [CLK_I2S2_ASRC] = &i2s2_asrc_clk.common.hw,
+ [CLK_BUS_I2S0] = &bus_i2s0_clk.common.hw,
+ [CLK_BUS_I2S1] = &bus_i2s1_clk.common.hw,
+ [CLK_BUS_I2S2] = &bus_i2s2_clk.common.hw,
+ [CLK_SPDIF_TX] = &spdif_tx_clk.common.hw,
+ [CLK_SPDIF_RX] = &spdif_rx_clk.common.hw,
+ [CLK_BUS_SPDIF] = &bus_spdif_clk.common.hw,
+ [CLK_DMIC] = &dmic_clk.common.hw,
+ [CLK_BUS_DMIC] = &bus_dmic_clk.common.hw,
+ [CLK_AUDIO_DAC] = &audio_dac_clk.common.hw,
+ [CLK_AUDIO_ADC] = &audio_adc_clk.common.hw,
+ [CLK_BUS_AUDIO] = &bus_audio_clk.common.hw,
+ [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw,
+ [CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw,
+ [CLK_BUS_OHCI0] = &bus_ohci0_clk.common.hw,
+ [CLK_BUS_OHCI1] = &bus_ohci1_clk.common.hw,
+ [CLK_BUS_EHCI0] = &bus_ehci0_clk.common.hw,
+ [CLK_BUS_EHCI1] = &bus_ehci1_clk.common.hw,
+ [CLK_BUS_OTG] = &bus_otg_clk.common.hw,
+ [CLK_BUS_LRADC] = &bus_lradc_clk.common.hw,
+ [CLK_BUS_DPSS_TOP] = &bus_dpss_top_clk.common.hw,
+ [CLK_HDMI_24M] = &hdmi_24M_clk.common.hw,
+ [CLK_HDMI_CEC_32K] = &hdmi_cec_32k_clk.common.hw,
+ [CLK_HDMI_CEC] = &hdmi_cec_clk.common.hw,
+ [CLK_BUS_HDMI] = &bus_hdmi_clk.common.hw,
+ [CLK_MIPI_DSI] = &mipi_dsi_clk.common.hw,
+ [CLK_BUS_MIPI_DSI] = &bus_mipi_dsi_clk.common.hw,
+ [CLK_TCON_LCD0] = &tcon_lcd0_clk.common.hw,
+ [CLK_BUS_TCON_LCD0] = &bus_tcon_lcd0_clk.common.hw,
+ [CLK_TCON_TV] = &tcon_tv_clk.common.hw,
+ [CLK_BUS_TCON_TV] = &bus_tcon_tv_clk.common.hw,
+ [CLK_TVE] = &tve_clk.common.hw,
+ [CLK_BUS_TVE_TOP] = &bus_tve_top_clk.common.hw,
+ [CLK_BUS_TVE] = &bus_tve_clk.common.hw,
+ [CLK_TVD] = &tvd_clk.common.hw,
+ [CLK_BUS_TVD_TOP] = &bus_tvd_top_clk.common.hw,
+ [CLK_BUS_TVD] = &bus_tvd_clk.common.hw,
+ [CLK_LEDC] = &ledc_clk.common.hw,
+ [CLK_BUS_LEDC] = &bus_ledc_clk.common.hw,
+ [CLK_CSI_TOP] = &csi_top_clk.common.hw,
+ [CLK_CSI_MCLK] = &csi_mclk_clk.common.hw,
+ [CLK_BUS_CSI] = &bus_csi_clk.common.hw,
+ [CLK_TPADC] = &tpadc_clk.common.hw,
+ [CLK_BUS_TPADC] = &bus_tpadc_clk.common.hw,
+ [CLK_BUS_TZMA] = &bus_tzma_clk.common.hw,
+ [CLK_DSP] = &dsp_clk.common.hw,
+ [CLK_BUS_DSP_CFG] = &bus_dsp_cfg_clk.common.hw,
+ [CLK_RISCV] = &riscv_clk.common.hw,
+ [CLK_RISCV_AXI] = &riscv_axi_clk.common.hw,
+ [CLK_BUS_RISCV_CFG] = &bus_riscv_cfg_clk.common.hw,
+ [CLK_FANOUT_24M] = &fanout_24M_clk.common.hw,
+ [CLK_FANOUT_12M] = &fanout_12M_clk.common.hw,
+ [CLK_FANOUT_16M] = &fanout_16M_clk.common.hw,
+ [CLK_FANOUT_25M] = &fanout_25M_clk.common.hw,
+ [CLK_FANOUT_32K] = &fanout_32k_clk.common.hw,
+ [CLK_FANOUT_27M] = &fanout_27M_clk.common.hw,
+ [CLK_FANOUT_PCLK] = &fanout_pclk_clk.common.hw,
+ [CLK_FANOUT0] = &fanout0_clk.common.hw,
+ [CLK_FANOUT1] = &fanout1_clk.common.hw,
+ [CLK_FANOUT2] = &fanout2_clk.common.hw,
+ },
+};
+
+static struct ccu_reset_map sun20i_d1_ccu_resets[] = {
+ [RST_MBUS] = { 0x540, BIT(30) },
+ [RST_BUS_DE] = { 0x60c, BIT(16) },
+ [RST_BUS_DI] = { 0x62c, BIT(16) },
+ [RST_BUS_G2D] = { 0x63c, BIT(16) },
+ [RST_BUS_CE] = { 0x68c, BIT(16) },
+ [RST_BUS_VE] = { 0x69c, BIT(16) },
+ [RST_BUS_DMA] = { 0x70c, BIT(16) },
+ [RST_BUS_MSGBOX0] = { 0x71c, BIT(16) },
+ [RST_BUS_MSGBOX1] = { 0x71c, BIT(17) },
+ [RST_BUS_MSGBOX2] = { 0x71c, BIT(18) },
+ [RST_BUS_SPINLOCK] = { 0x72c, BIT(16) },
+ [RST_BUS_HSTIMER] = { 0x73c, BIT(16) },
+ [RST_BUS_DBG] = { 0x78c, BIT(16) },
+ [RST_BUS_PWM] = { 0x7ac, BIT(16) },
+ [RST_BUS_DRAM] = { 0x80c, BIT(16) },
+ [RST_BUS_MMC0] = { 0x84c, BIT(16) },
+ [RST_BUS_MMC1] = { 0x84c, BIT(17) },
+ [RST_BUS_MMC2] = { 0x84c, BIT(18) },
+ [RST_BUS_UART0] = { 0x90c, BIT(16) },
+ [RST_BUS_UART1] = { 0x90c, BIT(17) },
+ [RST_BUS_UART2] = { 0x90c, BIT(18) },
+ [RST_BUS_UART3] = { 0x90c, BIT(19) },
+ [RST_BUS_UART4] = { 0x90c, BIT(20) },
+ [RST_BUS_UART5] = { 0x90c, BIT(21) },
+ [RST_BUS_I2C0] = { 0x91c, BIT(16) },
+ [RST_BUS_I2C1] = { 0x91c, BIT(17) },
+ [RST_BUS_I2C2] = { 0x91c, BIT(18) },
+ [RST_BUS_I2C3] = { 0x91c, BIT(19) },
+ [RST_BUS_SPI0] = { 0x96c, BIT(16) },
+ [RST_BUS_SPI1] = { 0x96c, BIT(17) },
+ [RST_BUS_EMAC] = { 0x97c, BIT(16) },
+ [RST_BUS_IR_TX] = { 0x9cc, BIT(16) },
+ [RST_BUS_GPADC] = { 0x9ec, BIT(16) },
+ [RST_BUS_THS] = { 0x9fc, BIT(16) },
+ [RST_BUS_I2S0] = { 0xa20, BIT(16) },
+ [RST_BUS_I2S1] = { 0xa20, BIT(17) },
+ [RST_BUS_I2S2] = { 0xa20, BIT(18) },
+ [RST_BUS_SPDIF] = { 0xa2c, BIT(16) },
+ [RST_BUS_DMIC] = { 0xa4c, BIT(16) },
+ [RST_BUS_AUDIO] = { 0xa5c, BIT(16) },
+ [RST_USB_PHY0] = { 0xa70, BIT(30) },
+ [RST_USB_PHY1] = { 0xa74, BIT(30) },
+ [RST_BUS_OHCI0] = { 0xa8c, BIT(16) },
+ [RST_BUS_OHCI1] = { 0xa8c, BIT(17) },
+ [RST_BUS_EHCI0] = { 0xa8c, BIT(20) },
+ [RST_BUS_EHCI1] = { 0xa8c, BIT(21) },
+ [RST_BUS_OTG] = { 0xa8c, BIT(24) },
+ [RST_BUS_LRADC] = { 0xa9c, BIT(16) },
+ [RST_BUS_DPSS_TOP] = { 0xabc, BIT(16) },
+ [RST_BUS_HDMI_MAIN] = { 0xb1c, BIT(16) },
+ [RST_BUS_HDMI_SUB] = { 0xb1c, BIT(17) },
+ [RST_BUS_MIPI_DSI] = { 0xb4c, BIT(16) },
+ [RST_BUS_TCON_LCD0] = { 0xb7c, BIT(16) },
+ [RST_BUS_TCON_TV] = { 0xb9c, BIT(16) },
+ [RST_BUS_LVDS0] = { 0xbac, BIT(16) },
+ [RST_BUS_TVE_TOP] = { 0xbbc, BIT(16) },
+ [RST_BUS_TVE] = { 0xbbc, BIT(17) },
+ [RST_BUS_TVD_TOP] = { 0xbdc, BIT(16) },
+ [RST_BUS_TVD] = { 0xbdc, BIT(17) },
+ [RST_BUS_LEDC] = { 0xbfc, BIT(16) },
+ [RST_BUS_CSI] = { 0xc1c, BIT(16) },
+ [RST_BUS_TPADC] = { 0xc5c, BIT(16) },
+ [RST_DSP] = { 0xc7c, BIT(16) },
+ [RST_BUS_DSP_CFG] = { 0xc7c, BIT(17) },
+ [RST_BUS_DSP_DBG] = { 0xc7c, BIT(18) },
+ [RST_BUS_RISCV_CFG] = { 0xd0c, BIT(16) },
+};
+
+static const struct sunxi_ccu_desc sun20i_d1_ccu_desc = {
+ .ccu_clks = sun20i_d1_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun20i_d1_ccu_clks),
+
+ .hw_clks = &sun20i_d1_hw_clks,
+
+ .resets = sun20i_d1_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun20i_d1_ccu_resets),
+};
+
+static const u32 pll_regs[] = {
+ SUN20I_D1_PLL_CPUX_REG,
+ SUN20I_D1_PLL_DDR0_REG,
+ SUN20I_D1_PLL_PERIPH0_REG,
+ SUN20I_D1_PLL_VIDEO0_REG,
+ SUN20I_D1_PLL_VIDEO1_REG,
+ SUN20I_D1_PLL_VE_REG,
+ SUN20I_D1_PLL_AUDIO0_REG,
+ SUN20I_D1_PLL_AUDIO1_REG,
+};
+
+static const u32 pll_video_regs[] = {
+ SUN20I_D1_PLL_VIDEO0_REG,
+ SUN20I_D1_PLL_VIDEO1_REG,
+};
+
+static struct ccu_mux_nb sun20i_d1_riscv_nb = {
+ .common = &riscv_clk.common,
+ .cm = &riscv_clk.mux,
+ .delay_us = 1,
+ .bypass_index = 4, /* index of pll-periph0 */
+};
+
+static int sun20i_d1_ccu_probe(struct platform_device *pdev)
+{
+ void __iomem *reg;
+ u32 val;
+ int i, ret;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ /* Enable the enable, LDO, and lock bits on all PLLs. */
+ for (i = 0; i < ARRAY_SIZE(pll_regs); i++) {
+ val = readl(reg + pll_regs[i]);
+ val |= BIT(31) | BIT(30) | BIT(29);
+ writel(val, reg + pll_regs[i]);
+ }
+
+ /* Force PLL_CPUX factor M to 0. */
+ val = readl(reg + SUN20I_D1_PLL_CPUX_REG);
+ val &= ~GENMASK(1, 0);
+ writel(val, reg + SUN20I_D1_PLL_CPUX_REG);
+
+ /*
+ * Force the output divider of video PLLs to 0.
+ *
+ * See the comment before pll-video0 definition for the reason.
+ */
+ for (i = 0; i < ARRAY_SIZE(pll_video_regs); i++) {
+ val = readl(reg + pll_video_regs[i]);
+ val &= ~BIT(0);
+ writel(val, reg + pll_video_regs[i]);
+ }
+
+ /* Enforce m1 = 0, m0 = 0 for PLL_AUDIO0 */
+ val = readl(reg + SUN20I_D1_PLL_AUDIO0_REG);
+ val &= ~BIT(1) | BIT(0);
+ writel(val, reg + SUN20I_D1_PLL_AUDIO0_REG);
+
+ /* Force fanout-27M factor N to 0. */
+ val = readl(reg + SUN20I_D1_FANOUT_27M_REG);
+ val &= ~GENMASK(9, 8);
+ writel(val, reg + SUN20I_D1_FANOUT_27M_REG);
+
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun20i_d1_ccu_desc);
+ if (ret)
+ return ret;
+
+ /* Reparent CPU during PLL CPUX rate changes */
+ ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
+ &sun20i_d1_riscv_nb);
+
+ return 0;
+}
+
+static const struct of_device_id sun20i_d1_ccu_ids[] = {
+ { .compatible = "allwinner,sun20i-d1-ccu" },
+ { }
+};
+
+static struct platform_driver sun20i_d1_ccu_driver = {
+ .probe = sun20i_d1_ccu_probe,
+ .driver = {
+ .name = "sun20i-d1-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun20i_d1_ccu_ids,
+ },
+};
+module_platform_driver(sun20i_d1_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.h b/drivers/clk/sunxi-ng/ccu-sun20i-d1.h
new file mode 100644
index 000000000000..e303176f0d4e
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 frank@allwinnertech.com
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef _CCU_SUN20I_D1_H_
+#define _CCU_SUN20I_D1_H_
+
+#include <dt-bindings/clock/sun20i-d1-ccu.h>
+#include <dt-bindings/reset/sun20i-d1-ccu.h>
+
+#define CLK_NUMBER (CLK_FANOUT2 + 1)
+
+#endif /* _CCU_SUN20I_D1_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
index bd9a8782fec3..c19828f1aa0f 100644
--- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
+++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
@@ -7,7 +7,9 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include "ccu_common.h"
#include "ccu_reset.h"
@@ -1425,18 +1427,19 @@ static const struct sunxi_ccu_desc sun7i_a20_ccu_desc = {
.num_resets = ARRAY_SIZE(sunxi_a10_a20_ccu_resets),
};
-static void __init sun4i_ccu_init(struct device_node *node,
- const struct sunxi_ccu_desc *desc)
+static int sun4i_a10_ccu_probe(struct platform_device *pdev)
{
+ const struct sunxi_ccu_desc *desc;
void __iomem *reg;
u32 val;
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg)) {
- pr_err("%s: Could not map the clock registers\n",
- of_node_full_name(node));
- return;
- }
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
val = readl(reg + SUN4I_PLL_AUDIO_REG);
@@ -1464,19 +1467,30 @@ static void __init sun4i_ccu_init(struct device_node *node,
val &= ~GENMASK(7, 6);
writel(val | (2 << 6), reg + SUN4I_AHB_REG);
- of_sunxi_ccu_probe(node, reg, desc);
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, desc);
}
-static void __init sun4i_a10_ccu_setup(struct device_node *node)
-{
- sun4i_ccu_init(node, &sun4i_a10_ccu_desc);
-}
-CLK_OF_DECLARE(sun4i_a10_ccu, "allwinner,sun4i-a10-ccu",
- sun4i_a10_ccu_setup);
+static const struct of_device_id sun4i_a10_ccu_ids[] = {
+ {
+ .compatible = "allwinner,sun4i-a10-ccu",
+ .data = &sun4i_a10_ccu_desc,
+ },
+ {
+ .compatible = "allwinner,sun7i-a20-ccu",
+ .data = &sun7i_a20_ccu_desc,
+ },
+ { }
+};
-static void __init sun7i_a20_ccu_setup(struct device_node *node)
-{
- sun4i_ccu_init(node, &sun7i_a20_ccu_desc);
-}
-CLK_OF_DECLARE(sun7i_a20_ccu, "allwinner,sun7i-a20-ccu",
- sun7i_a20_ccu_setup);
+static struct platform_driver sun4i_a10_ccu_driver = {
+ .probe = sun4i_a10_ccu_probe,
+ .driver = {
+ .name = "sun4i-a10-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun4i_a10_ccu_ids,
+ },
+};
+module_platform_driver(sun4i_a10_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
index 804729e0a208..fddd6c877cec 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
@@ -5,7 +5,6 @@
#include <linux/clk-provider.h>
#include <linux/module.h>
-#include <linux/of_address.h>
#include <linux/platform_device.h>
#include "ccu_common.h"
@@ -213,3 +212,6 @@ static struct platform_driver sun50i_a100_r_ccu_driver = {
},
};
module_platform_driver(sun50i_a100_r_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
index 1d475d5a3d91..5f93b5526e13 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
@@ -6,7 +6,6 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_address.h>
#include <linux/platform_device.h>
#include "ccu_common.h"
@@ -1275,3 +1274,6 @@ static struct platform_driver sun50i_a100_ccu_driver = {
},
};
module_platform_driver(sun50i_a100_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index a8c5a92b7d0c..41519185600a 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -5,7 +5,7 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include "ccu_common.h"
@@ -980,4 +980,7 @@ static struct platform_driver sun50i_a64_ccu_driver = {
.of_match_table = sun50i_a64_ccu_ids,
},
};
-builtin_platform_driver(sun50i_a64_ccu_driver);
+module_platform_driver(sun50i_a64_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
index 54d1f96f4b68..a8c11c0b4e06 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
@@ -51,8 +51,6 @@
#define CLK_USB_OHCI1_12M 92
-#define CLK_DRAM 94
-
/* All the DRAM gates are exported */
/* And the DSI and GPU module clock is exported */
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
index f30d7eb5424d..712e103382d8 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
@@ -4,7 +4,8 @@
*/
#include <linux/clk-provider.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include "ccu_common.h"
@@ -221,30 +222,43 @@ static const struct sunxi_ccu_desc sun50i_h616_r_ccu_desc = {
.num_resets = ARRAY_SIZE(sun50i_h616_r_ccu_resets),
};
-static void __init sunxi_r_ccu_init(struct device_node *node,
- const struct sunxi_ccu_desc *desc)
+static int sun50i_h6_r_ccu_probe(struct platform_device *pdev)
{
+ const struct sunxi_ccu_desc *desc;
void __iomem *reg;
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg)) {
- pr_err("%pOF: Could not map the clock registers\n", node);
- return;
- }
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
- of_sunxi_ccu_probe(node, reg, desc);
-}
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
-static void __init sun50i_h6_r_ccu_setup(struct device_node *node)
-{
- sunxi_r_ccu_init(node, &sun50i_h6_r_ccu_desc);
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, desc);
}
-CLK_OF_DECLARE(sun50i_h6_r_ccu, "allwinner,sun50i-h6-r-ccu",
- sun50i_h6_r_ccu_setup);
-static void __init sun50i_h616_r_ccu_setup(struct device_node *node)
-{
- sunxi_r_ccu_init(node, &sun50i_h616_r_ccu_desc);
-}
-CLK_OF_DECLARE(sun50i_h616_r_ccu, "allwinner,sun50i-h616-r-ccu",
- sun50i_h616_r_ccu_setup);
+static const struct of_device_id sun50i_h6_r_ccu_ids[] = {
+ {
+ .compatible = "allwinner,sun50i-h6-r-ccu",
+ .data = &sun50i_h6_r_ccu_desc,
+ },
+ {
+ .compatible = "allwinner,sun50i-h616-r-ccu",
+ .data = &sun50i_h616_r_ccu_desc,
+ },
+ { }
+};
+
+static struct platform_driver sun50i_h6_r_ccu_driver = {
+ .probe = sun50i_h6_r_ccu_probe,
+ .driver = {
+ .name = "sun50i-h6-r-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun50i_h6_r_ccu_ids,
+ },
+};
+module_platform_driver(sun50i_h6_r_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index e5672c10d065..1a5e418923f6 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -5,7 +5,7 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include "ccu_common.h"
@@ -1254,4 +1254,7 @@ static struct platform_driver sun50i_h6_ccu_driver = {
.of_match_table = sun50i_h6_ccu_ids,
},
};
-builtin_platform_driver(sun50i_h6_ccu_driver);
+module_platform_driver(sun50i_h6_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
index 22eb18079a15..49a2474cf314 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
@@ -7,7 +7,7 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include "ccu_common.h"
@@ -1082,17 +1082,15 @@ static const u32 usb2_clk_regs[] = {
SUN50I_H616_USB3_CLK_REG,
};
-static void __init sun50i_h616_ccu_setup(struct device_node *node)
+static int sun50i_h616_ccu_probe(struct platform_device *pdev)
{
void __iomem *reg;
u32 val;
int i;
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg)) {
- pr_err("%pOF: Could not map clock registers\n", node);
- return;
- }
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
/* Enable the lock bits and the output enable bits on all PLLs */
for (i = 0; i < ARRAY_SIZE(pll_regs); i++) {
@@ -1141,8 +1139,23 @@ static void __init sun50i_h616_ccu_setup(struct device_node *node)
val |= BIT(24);
writel(val, reg + SUN50I_H616_HDMI_CEC_CLK_REG);
- of_sunxi_ccu_probe(node, reg, &sun50i_h616_ccu_desc);
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h616_ccu_desc);
}
-CLK_OF_DECLARE(sun50i_h616_ccu, "allwinner,sun50i-h616-ccu",
- sun50i_h616_ccu_setup);
+static const struct of_device_id sun50i_h616_ccu_ids[] = {
+ { .compatible = "allwinner,sun50i-h616-ccu" },
+ { }
+};
+
+static struct platform_driver sun50i_h616_ccu_driver = {
+ .probe = sun50i_h616_ccu_probe,
+ .driver = {
+ .name = "sun50i-h616-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun50i_h616_ccu_ids,
+ },
+};
+module_platform_driver(sun50i_h616_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 3df5c0b41580..0762deffb33c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -9,7 +9,8 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include "ccu_common.h"
#include "ccu_reset.h"
@@ -1226,16 +1227,15 @@ static struct ccu_mux_nb sun6i_a31_cpu_nb = {
.bypass_index = 1, /* index of 24 MHz oscillator */
};
-static void __init sun6i_a31_ccu_setup(struct device_node *node)
+static int sun6i_a31_ccu_probe(struct platform_device *pdev)
{
void __iomem *reg;
+ int ret;
u32 val;
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg)) {
- pr_err("%pOF: Could not map the clock registers\n", node);
- return;
- }
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
/* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN6I_A31_PLL_AUDIO_REG);
@@ -1257,10 +1257,30 @@ static void __init sun6i_a31_ccu_setup(struct device_node *node)
val |= 0x3 << 12;
writel(val, reg + SUN6I_A31_AHB1_REG);
- of_sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun6i_a31_ccu_desc);
+ if (ret)
+ return ret;
ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk,
&sun6i_a31_cpu_nb);
+
+ return 0;
}
-CLK_OF_DECLARE(sun6i_a31_ccu, "allwinner,sun6i-a31-ccu",
- sun6i_a31_ccu_setup);
+
+static const struct of_device_id sun6i_a31_ccu_ids[] = {
+ { .compatible = "allwinner,sun6i-a31-ccu" },
+ { }
+};
+
+static struct platform_driver sun6i_a31_ccu_driver = {
+ .probe = sun6i_a31_ccu_probe,
+ .driver = {
+ .name = "sun6i-a31-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun6i_a31_ccu_ids,
+ },
+};
+module_platform_driver(sun6i_a31_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
index 577bb235d658..e80cc3864e44 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -5,7 +5,8 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include "ccu_common.h"
#include "ccu_reset.h"
@@ -724,16 +725,14 @@ static const struct sunxi_ccu_desc sun8i_a23_ccu_desc = {
.num_resets = ARRAY_SIZE(sun8i_a23_ccu_resets),
};
-static void __init sun8i_a23_ccu_setup(struct device_node *node)
+static int sun8i_a23_ccu_probe(struct platform_device *pdev)
{
void __iomem *reg;
u32 val;
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg)) {
- pr_err("%pOF: Could not map the clock registers\n", node);
- return;
- }
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
/* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN8I_A23_PLL_AUDIO_REG);
@@ -745,7 +744,23 @@ static void __init sun8i_a23_ccu_setup(struct device_node *node)
val &= ~BIT(16);
writel(val, reg + SUN8I_A23_PLL_MIPI_REG);
- of_sunxi_ccu_probe(node, reg, &sun8i_a23_ccu_desc);
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun8i_a23_ccu_desc);
}
-CLK_OF_DECLARE(sun8i_a23_ccu, "allwinner,sun8i-a23-ccu",
- sun8i_a23_ccu_setup);
+
+static const struct of_device_id sun8i_a23_ccu_ids[] = {
+ { .compatible = "allwinner,sun8i-a23-ccu" },
+ { }
+};
+
+static struct platform_driver sun8i_a23_ccu_driver = {
+ .probe = sun8i_a23_ccu_probe,
+ .driver = {
+ .name = "sun8i-a23-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun8i_a23_ccu_ids,
+ },
+};
+module_platform_driver(sun8i_a23_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index 8f65cd03f5ac..d12878a1ba9e 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -5,7 +5,8 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include "ccu_common.h"
#include "ccu_reset.h"
@@ -784,16 +785,15 @@ static struct ccu_mux_nb sun8i_a33_cpu_nb = {
.bypass_index = 1, /* index of 24 MHz oscillator */
};
-static void __init sun8i_a33_ccu_setup(struct device_node *node)
+static int sun8i_a33_ccu_probe(struct platform_device *pdev)
{
void __iomem *reg;
+ int ret;
u32 val;
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg)) {
- pr_err("%pOF: Could not map the clock registers\n", node);
- return;
- }
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
/* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN8I_A33_PLL_AUDIO_REG);
@@ -805,7 +805,9 @@ static void __init sun8i_a33_ccu_setup(struct device_node *node)
val &= ~BIT(16);
writel(val, reg + SUN8I_A33_PLL_MIPI_REG);
- of_sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun8i_a33_ccu_desc);
+ if (ret)
+ return ret;
/* Gate then ungate PLL CPU after any rate changes */
ccu_pll_notifier_register(&sun8i_a33_pll_cpu_nb);
@@ -813,6 +815,24 @@ static void __init sun8i_a33_ccu_setup(struct device_node *node)
/* Reparent CPU during PLL CPU rate changes */
ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
&sun8i_a33_cpu_nb);
+
+ return 0;
}
-CLK_OF_DECLARE(sun8i_a33_ccu, "allwinner,sun8i-a33-ccu",
- sun8i_a33_ccu_setup);
+
+static const struct of_device_id sun8i_a33_ccu_ids[] = {
+ { .compatible = "allwinner,sun8i-a33-ccu" },
+ { }
+};
+
+static struct platform_driver sun8i_a33_ccu_driver = {
+ .probe = sun8i_a33_ccu_probe,
+ .driver = {
+ .name = "sun8i-a33-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun8i_a33_ccu_ids,
+ },
+};
+module_platform_driver(sun8i_a33_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
index 3c310aea8cfa..76cbd9e9e89f 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
@@ -5,7 +5,7 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include "ccu_common.h"
@@ -920,4 +920,7 @@ static struct platform_driver sun8i_a83t_ccu_driver = {
.of_match_table = sun8i_a83t_ccu_ids,
},
};
-builtin_platform_driver(sun8i_a83t_ccu_driver);
+module_platform_driver(sun8i_a83t_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index 573b5051d305..e7e3ddf4a227 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -5,8 +5,8 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
@@ -394,4 +394,7 @@ static struct platform_driver sunxi_de2_clk_driver = {
.of_match_table = sunxi_de2_clk_ids,
},
};
-builtin_platform_driver(sunxi_de2_clk_driver);
+module_platform_driver(sunxi_de2_clk_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index d2fc2903787d..e058cf691aea 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -5,7 +5,9 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include "ccu_common.h"
#include "ccu_reset.h"
@@ -1137,24 +1139,29 @@ static struct ccu_mux_nb sun8i_h3_cpu_nb = {
.bypass_index = 1, /* index of 24 MHz oscillator */
};
-static void __init sunxi_h3_h5_ccu_init(struct device_node *node,
- const struct sunxi_ccu_desc *desc)
+static int sun8i_h3_ccu_probe(struct platform_device *pdev)
{
+ const struct sunxi_ccu_desc *desc;
void __iomem *reg;
+ int ret;
u32 val;
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg)) {
- pr_err("%pOF: Could not map the clock registers\n", node);
- return;
- }
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
/* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN8I_H3_PLL_AUDIO_REG);
val &= ~GENMASK(19, 16);
writel(val | (0 << 16), reg + SUN8I_H3_PLL_AUDIO_REG);
- of_sunxi_ccu_probe(node, reg, desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, desc);
+ if (ret)
+ return ret;
/* Gate then ungate PLL CPU after any rate changes */
ccu_pll_notifier_register(&sun8i_h3_pll_cpu_nb);
@@ -1162,18 +1169,31 @@ static void __init sunxi_h3_h5_ccu_init(struct device_node *node,
/* Reparent CPU during PLL CPU rate changes */
ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
&sun8i_h3_cpu_nb);
-}
-static void __init sun8i_h3_ccu_setup(struct device_node *node)
-{
- sunxi_h3_h5_ccu_init(node, &sun8i_h3_ccu_desc);
+ return 0;
}
-CLK_OF_DECLARE(sun8i_h3_ccu, "allwinner,sun8i-h3-ccu",
- sun8i_h3_ccu_setup);
-static void __init sun50i_h5_ccu_setup(struct device_node *node)
-{
- sunxi_h3_h5_ccu_init(node, &sun50i_h5_ccu_desc);
-}
-CLK_OF_DECLARE(sun50i_h5_ccu, "allwinner,sun50i-h5-ccu",
- sun50i_h5_ccu_setup);
+static const struct of_device_id sun8i_h3_ccu_ids[] = {
+ {
+ .compatible = "allwinner,sun8i-h3-ccu",
+ .data = &sun8i_h3_ccu_desc,
+ },
+ {
+ .compatible = "allwinner,sun50i-h5-ccu",
+ .data = &sun50i_h5_ccu_desc,
+ },
+ { }
+};
+
+static struct platform_driver sun8i_h3_ccu_driver = {
+ .probe = sun8i_h3_ccu_probe,
+ .driver = {
+ .name = "sun8i-h3-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun8i_h3_ccu_ids,
+ },
+};
+module_platform_driver(sun8i_h3_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
index d8c38447e11b..e13f3c4b57d0 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
@@ -42,8 +42,6 @@
/* The first bunch of module clocks are exported */
-#define CLK_DRAM 96
-
/* All the DRAM gates are exported */
/* Some more module clocks are exported */
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
index 9e754d1f754a..5b7fab832a52 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
@@ -4,7 +4,8 @@
*/
#include <linux/clk-provider.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include "ccu_common.h"
@@ -254,37 +255,47 @@ static const struct sunxi_ccu_desc sun50i_a64_r_ccu_desc = {
.num_resets = ARRAY_SIZE(sun50i_a64_r_ccu_resets),
};
-static void __init sunxi_r_ccu_init(struct device_node *node,
- const struct sunxi_ccu_desc *desc)
+static int sun8i_r_ccu_probe(struct platform_device *pdev)
{
+ const struct sunxi_ccu_desc *desc;
void __iomem *reg;
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg)) {
- pr_err("%pOF: Could not map the clock registers\n", node);
- return;
- }
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
- of_sunxi_ccu_probe(node, reg, desc);
-}
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
-static void __init sun8i_a83t_r_ccu_setup(struct device_node *node)
-{
- sunxi_r_ccu_init(node, &sun8i_a83t_r_ccu_desc);
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, desc);
}
-CLK_OF_DECLARE(sun8i_a83t_r_ccu, "allwinner,sun8i-a83t-r-ccu",
- sun8i_a83t_r_ccu_setup);
-static void __init sun8i_h3_r_ccu_setup(struct device_node *node)
-{
- sunxi_r_ccu_init(node, &sun8i_h3_r_ccu_desc);
-}
-CLK_OF_DECLARE(sun8i_h3_r_ccu, "allwinner,sun8i-h3-r-ccu",
- sun8i_h3_r_ccu_setup);
+static const struct of_device_id sun8i_r_ccu_ids[] = {
+ {
+ .compatible = "allwinner,sun8i-a83t-r-ccu",
+ .data = &sun8i_a83t_r_ccu_desc,
+ },
+ {
+ .compatible = "allwinner,sun8i-h3-r-ccu",
+ .data = &sun8i_h3_r_ccu_desc,
+ },
+ {
+ .compatible = "allwinner,sun50i-a64-r-ccu",
+ .data = &sun50i_a64_r_ccu_desc,
+ },
+ { }
+};
-static void __init sun50i_a64_r_ccu_setup(struct device_node *node)
-{
- sunxi_r_ccu_init(node, &sun50i_a64_r_ccu_desc);
-}
-CLK_OF_DECLARE(sun50i_a64_r_ccu, "allwinner,sun50i-a64-r-ccu",
- sun50i_a64_r_ccu_setup);
+static struct platform_driver sun8i_r_ccu_driver = {
+ .probe = sun8i_r_ccu_probe,
+ .driver = {
+ .name = "sun8i-r-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun8i_r_ccu_ids,
+ },
+};
+module_platform_driver(sun8i_r_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index 8bb18d9add05..31eca0d3bc1e 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -5,6 +5,7 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -1371,4 +1372,7 @@ static struct platform_driver sun8i_r40_ccu_driver = {
.of_match_table = sun8i_r40_ccu_ids,
},
};
-builtin_platform_driver(sun8i_r40_ccu_driver);
+module_platform_driver(sun8i_r40_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index ce150f83ab54..87f87d6ea3ad 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -8,7 +8,9 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include "ccu_common.h"
#include "ccu_reset.h"
@@ -805,38 +807,49 @@ static const struct sunxi_ccu_desc sun8i_v3_ccu_desc = {
.num_resets = ARRAY_SIZE(sun8i_v3_ccu_resets),
};
-static void __init sun8i_v3_v3s_ccu_init(struct device_node *node,
- const struct sunxi_ccu_desc *ccu_desc)
+static int sun8i_v3s_ccu_probe(struct platform_device *pdev)
{
+ const struct sunxi_ccu_desc *desc;
void __iomem *reg;
u32 val;
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg)) {
- pr_err("%pOF: Could not map the clock registers\n", node);
- return;
- }
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
/* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN8I_V3S_PLL_AUDIO_REG);
val &= ~GENMASK(19, 16);
writel(val, reg + SUN8I_V3S_PLL_AUDIO_REG);
- of_sunxi_ccu_probe(node, reg, ccu_desc);
-}
-
-static void __init sun8i_v3s_ccu_setup(struct device_node *node)
-{
- sun8i_v3_v3s_ccu_init(node, &sun8i_v3s_ccu_desc);
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, desc);
}
-static void __init sun8i_v3_ccu_setup(struct device_node *node)
-{
- sun8i_v3_v3s_ccu_init(node, &sun8i_v3_ccu_desc);
-}
+static const struct of_device_id sun8i_v3s_ccu_ids[] = {
+ {
+ .compatible = "allwinner,sun8i-v3-ccu",
+ .data = &sun8i_v3_ccu_desc,
+ },
+ {
+ .compatible = "allwinner,sun8i-v3s-ccu",
+ .data = &sun8i_v3s_ccu_desc,
+ },
+ { }
+};
-CLK_OF_DECLARE(sun8i_v3s_ccu, "allwinner,sun8i-v3s-ccu",
- sun8i_v3s_ccu_setup);
+static struct platform_driver sun8i_v3s_ccu_driver = {
+ .probe = sun8i_v3s_ccu_probe,
+ .driver = {
+ .name = "sun8i-v3s-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun8i_v3s_ccu_ids,
+ },
+};
+module_platform_driver(sun8i_v3s_ccu_driver);
-CLK_OF_DECLARE(sun8i_v3_ccu, "allwinner,sun8i-v3-ccu",
- sun8i_v3_ccu_setup);
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
index 3cde2610f467..f2fe0e1cc3c0 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
@@ -5,7 +5,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
@@ -270,4 +270,7 @@ static struct platform_driver sun9i_a80_de_clk_driver = {
.of_match_table = sun9i_a80_de_clk_ids,
},
};
-builtin_platform_driver(sun9i_a80_de_clk_driver);
+module_platform_driver(sun9i_a80_de_clk_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
index 0740e8978ae8..575ae4ccc65f 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
@@ -5,7 +5,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include "ccu_common.h"
@@ -138,4 +138,7 @@ static struct platform_driver sun9i_a80_usb_clk_driver = {
.of_match_table = sun9i_a80_usb_clk_ids,
},
};
-builtin_platform_driver(sun9i_a80_usb_clk_driver);
+module_platform_driver(sun9i_a80_usb_clk_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
index d416af29e0d3..730fd8e28014 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
@@ -5,7 +5,7 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include "ccu_common.h"
@@ -1245,4 +1245,7 @@ static struct platform_driver sun9i_a80_ccu_driver = {
.of_match_table = sun9i_a80_ccu_ids,
},
};
-builtin_platform_driver(sun9i_a80_ccu_driver);
+module_platform_driver(sun9i_a80_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
index 61ad7ee91c11..ed097c4f780f 100644
--- a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
+++ b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
@@ -6,7 +6,8 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include "ccu_common.h"
#include "ccu_reset.h"
@@ -522,23 +523,24 @@ static struct ccu_mux_nb suniv_cpu_nb = {
.bypass_index = 1, /* index of 24 MHz oscillator */
};
-static void __init suniv_f1c100s_ccu_setup(struct device_node *node)
+static int suniv_f1c100s_ccu_probe(struct platform_device *pdev)
{
void __iomem *reg;
+ int ret;
u32 val;
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg)) {
- pr_err("%pOF: Could not map the clock registers\n", node);
- return;
- }
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
/* Force the PLL-Audio-1x divider to 4 */
val = readl(reg + SUNIV_PLL_AUDIO_REG);
val &= ~GENMASK(19, 16);
writel(val | (3 << 16), reg + SUNIV_PLL_AUDIO_REG);
- of_sunxi_ccu_probe(node, reg, &suniv_ccu_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &suniv_ccu_desc);
+ if (ret)
+ return ret;
/* Gate then ungate PLL CPU after any rate changes */
ccu_pll_notifier_register(&suniv_pll_cpu_nb);
@@ -546,6 +548,24 @@ static void __init suniv_f1c100s_ccu_setup(struct device_node *node)
/* Reparent CPU during PLL CPU rate changes */
ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk,
&suniv_cpu_nb);
+
+ return 0;
}
-CLK_OF_DECLARE(suniv_f1c100s_ccu, "allwinner,suniv-f1c100s-ccu",
- suniv_f1c100s_ccu_setup);
+
+static const struct of_device_id suniv_f1c100s_ccu_ids[] = {
+ { .compatible = "allwinner,suniv-f1c100s-ccu" },
+ { }
+};
+
+static struct platform_driver suniv_f1c100s_ccu_driver = {
+ .probe = suniv_f1c100s_ccu_probe,
+ .driver = {
+ .name = "suniv-f1c100s-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = suniv_f1c100s_ccu_ids,
+ },
+};
+module_platform_driver(suniv_f1c100s_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
index 31af8b6b5286..8d28a7a079d0 100644
--- a/drivers/clk/sunxi-ng/ccu_common.c
+++ b/drivers/clk/sunxi-ng/ccu_common.c
@@ -9,6 +9,7 @@
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/iopoll.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include "ccu_common.h"
@@ -36,6 +37,7 @@ void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
WARN_ON(readl_relaxed_poll_timeout(addr, reg, reg & lock, 100, 70000));
}
+EXPORT_SYMBOL_NS_GPL(ccu_helper_wait_for_lock, SUNXI_CCU);
/*
* This clock notifier is called when the frequency of a PLL clock is
@@ -83,6 +85,7 @@ int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb)
return clk_notifier_register(pll_nb->common->hw.clk,
&pll_nb->clk_nb);
}
+EXPORT_SYMBOL_NS_GPL(ccu_pll_notifier_register, SUNXI_CCU);
static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev,
struct device_node *node, void __iomem *reg,
@@ -194,6 +197,7 @@ int devm_sunxi_ccu_probe(struct device *dev, void __iomem *reg,
return 0;
}
+EXPORT_SYMBOL_NS_GPL(devm_sunxi_ccu_probe, SUNXI_CCU);
void of_sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
const struct sunxi_ccu_desc *desc)
@@ -211,3 +215,5 @@ void of_sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
kfree(ccu);
}
}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu_div.c b/drivers/clk/sunxi-ng/ccu_div.c
index 4c297089483c..cb10a3ea23f9 100644
--- a/drivers/clk/sunxi-ng/ccu_div.c
+++ b/drivers/clk/sunxi-ng/ccu_div.c
@@ -141,3 +141,4 @@ const struct clk_ops ccu_div_ops = {
.recalc_rate = ccu_div_recalc_rate,
.set_rate = ccu_div_set_rate,
};
+EXPORT_SYMBOL_NS_GPL(ccu_div_ops, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_div.h b/drivers/clk/sunxi-ng/ccu_div.h
index 6682fde6043c..948e2b0c0c3b 100644
--- a/drivers/clk/sunxi-ng/ccu_div.h
+++ b/drivers/clk/sunxi-ng/ccu_div.h
@@ -108,6 +108,22 @@ struct ccu_div {
_shift, _width, _table, 0, \
_flags)
+#define SUNXI_CCU_DIV_TABLE_HW(_struct, _name, _parent, _reg, \
+ _shift, _width, \
+ _table, _flags) \
+ struct ccu_div _struct = { \
+ .div = _SUNXI_CCU_DIV_TABLE(_shift, _width, \
+ _table), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_HW(_name, \
+ _parent, \
+ &ccu_div_ops, \
+ _flags), \
+ } \
+ }
+
+
#define SUNXI_CCU_M_WITH_MUX_TABLE_GATE(_struct, _name, \
_parents, _table, \
_reg, \
@@ -166,6 +182,68 @@ struct ccu_div {
SUNXI_CCU_M_WITH_GATE(_struct, _name, _parent, _reg, \
_mshift, _mwidth, 0, _flags)
+#define SUNXI_CCU_M_DATA_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _flags) \
+ struct ccu_div _struct = { \
+ .enable = _gate, \
+ .div = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
+ _parents, \
+ &ccu_div_ops, \
+ _flags), \
+ }, \
+ }
+
+#define SUNXI_CCU_M_DATA_WITH_MUX(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _muxshift, _muxwidth, \
+ _flags) \
+ SUNXI_CCU_M_DATA_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _muxshift, _muxwidth, \
+ 0, _flags)
+
+#define SUNXI_CCU_M_HW_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, _muxshift, _muxwidth, \
+ _gate, _flags) \
+ struct ccu_div _struct = { \
+ .enable = _gate, \
+ .div = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS_HW(_name, \
+ _parents, \
+ &ccu_div_ops, \
+ _flags), \
+ }, \
+ }
+
+#define SUNXI_CCU_M_HWS_WITH_GATE(_struct, _name, _parent, _reg, \
+ _mshift, _mwidth, _gate, \
+ _flags) \
+ struct ccu_div _struct = { \
+ .enable = _gate, \
+ .div = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_HWS(_name, \
+ _parent, \
+ &ccu_div_ops, \
+ _flags), \
+ }, \
+ }
+
+#define SUNXI_CCU_M_HWS(_struct, _name, _parent, _reg, _mshift, \
+ _mwidth, _flags) \
+ SUNXI_CCU_M_HWS_WITH_GATE(_struct, _name, _parent, _reg, \
+ _mshift, _mwidth, 0, _flags)
+
static inline struct ccu_div *hw_to_ccu_div(struct clk_hw *hw)
{
struct ccu_common *common = hw_to_ccu_common(hw);
diff --git a/drivers/clk/sunxi-ng/ccu_frac.c b/drivers/clk/sunxi-ng/ccu_frac.c
index 44fcded8b354..b31f3ad946d6 100644
--- a/drivers/clk/sunxi-ng/ccu_frac.c
+++ b/drivers/clk/sunxi-ng/ccu_frac.c
@@ -18,6 +18,7 @@ bool ccu_frac_helper_is_enabled(struct ccu_common *common,
return !(readl(common->base + common->reg) & cf->enable);
}
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_is_enabled, SUNXI_CCU);
void ccu_frac_helper_enable(struct ccu_common *common,
struct ccu_frac_internal *cf)
@@ -33,6 +34,7 @@ void ccu_frac_helper_enable(struct ccu_common *common,
writel(reg & ~cf->enable, common->base + common->reg);
spin_unlock_irqrestore(common->lock, flags);
}
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_enable, SUNXI_CCU);
void ccu_frac_helper_disable(struct ccu_common *common,
struct ccu_frac_internal *cf)
@@ -48,6 +50,7 @@ void ccu_frac_helper_disable(struct ccu_common *common,
writel(reg | cf->enable, common->base + common->reg);
spin_unlock_irqrestore(common->lock, flags);
}
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_disable, SUNXI_CCU);
bool ccu_frac_helper_has_rate(struct ccu_common *common,
struct ccu_frac_internal *cf,
@@ -58,6 +61,7 @@ bool ccu_frac_helper_has_rate(struct ccu_common *common,
return (cf->rates[0] == rate) || (cf->rates[1] == rate);
}
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_has_rate, SUNXI_CCU);
unsigned long ccu_frac_helper_read_rate(struct ccu_common *common,
struct ccu_frac_internal *cf)
@@ -79,6 +83,7 @@ unsigned long ccu_frac_helper_read_rate(struct ccu_common *common,
return (reg & cf->select) ? cf->rates[1] : cf->rates[0];
}
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_read_rate, SUNXI_CCU);
int ccu_frac_helper_set_rate(struct ccu_common *common,
struct ccu_frac_internal *cf,
@@ -107,3 +112,4 @@ int ccu_frac_helper_set_rate(struct ccu_common *common,
return 0;
}
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_set_rate, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_gate.c b/drivers/clk/sunxi-ng/ccu_gate.c
index 3d5ca092b08f..a2115a21807d 100644
--- a/drivers/clk/sunxi-ng/ccu_gate.c
+++ b/drivers/clk/sunxi-ng/ccu_gate.c
@@ -24,6 +24,7 @@ void ccu_gate_helper_disable(struct ccu_common *common, u32 gate)
spin_unlock_irqrestore(common->lock, flags);
}
+EXPORT_SYMBOL_NS_GPL(ccu_gate_helper_disable, SUNXI_CCU);
static void ccu_gate_disable(struct clk_hw *hw)
{
@@ -49,6 +50,7 @@ int ccu_gate_helper_enable(struct ccu_common *common, u32 gate)
return 0;
}
+EXPORT_SYMBOL_NS_GPL(ccu_gate_helper_enable, SUNXI_CCU);
static int ccu_gate_enable(struct clk_hw *hw)
{
@@ -64,6 +66,7 @@ int ccu_gate_helper_is_enabled(struct ccu_common *common, u32 gate)
return readl(common->base + common->reg) & gate;
}
+EXPORT_SYMBOL_NS_GPL(ccu_gate_helper_is_enabled, SUNXI_CCU);
static int ccu_gate_is_enabled(struct clk_hw *hw)
{
@@ -124,3 +127,4 @@ const struct clk_ops ccu_gate_ops = {
.set_rate = ccu_gate_set_rate,
.recalc_rate = ccu_gate_recalc_rate,
};
+EXPORT_SYMBOL_NS_GPL(ccu_gate_ops, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_gate.h b/drivers/clk/sunxi-ng/ccu_gate.h
index c386689a952b..dc05ce06737a 100644
--- a/drivers/clk/sunxi-ng/ccu_gate.h
+++ b/drivers/clk/sunxi-ng/ccu_gate.h
@@ -53,7 +53,7 @@ struct ccu_gate {
}
/*
- * The following two macros allow the re-use of the data structure
+ * The following macros allow the re-use of the data structure
* holding the parent info.
*/
#define SUNXI_CCU_GATE_HWS(_struct, _name, _parent, _reg, _gate, _flags) \
@@ -68,6 +68,21 @@ struct ccu_gate {
} \
}
+#define SUNXI_CCU_GATE_HWS_WITH_PREDIV(_struct, _name, _parent, _reg, \
+ _gate, _prediv, _flags) \
+ struct ccu_gate _struct = { \
+ .enable = _gate, \
+ .common = { \
+ .reg = _reg, \
+ .prediv = _prediv, \
+ .features = CCU_FEATURE_ALL_PREDIV, \
+ .hw.init = CLK_HW_INIT_HWS(_name, \
+ _parent, \
+ &ccu_gate_ops, \
+ _flags), \
+ } \
+ }
+
#define SUNXI_CCU_GATE_DATA(_struct, _name, _data, _reg, _gate, _flags) \
struct ccu_gate _struct = { \
.enable = _gate, \
@@ -81,6 +96,21 @@ struct ccu_gate {
} \
}
+#define SUNXI_CCU_GATE_DATA_WITH_PREDIV(_struct, _name, _parent, _reg, \
+ _gate, _prediv, _flags) \
+ struct ccu_gate _struct = { \
+ .enable = _gate, \
+ .common = { \
+ .reg = _reg, \
+ .prediv = _prediv, \
+ .features = CCU_FEATURE_ALL_PREDIV, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
+ _parent, \
+ &ccu_gate_ops, \
+ _flags), \
+ } \
+ }
+
static inline struct ccu_gate *hw_to_ccu_gate(struct clk_hw *hw)
{
struct ccu_common *common = hw_to_ccu_common(hw);
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index 9d3a76604d94..57cf2d615148 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -245,6 +245,7 @@ const struct clk_ops ccu_mp_ops = {
.recalc_rate = ccu_mp_recalc_rate,
.set_rate = ccu_mp_set_rate,
};
+EXPORT_SYMBOL_NS_GPL(ccu_mp_ops, SUNXI_CCU);
/*
* Support for MMC timing mode switching
@@ -325,3 +326,4 @@ const struct clk_ops ccu_mp_mmc_ops = {
.recalc_rate = ccu_mp_mmc_recalc_rate,
.set_rate = ccu_mp_mmc_set_rate,
};
+EXPORT_SYMBOL_NS_GPL(ccu_mp_mmc_ops, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h
index b392e0d575b5..6e50f3728fb5 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.h
+++ b/drivers/clk/sunxi-ng/ccu_mp.h
@@ -82,6 +82,55 @@ struct ccu_mp {
_muxshift, _muxwidth, \
0, _flags)
+#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _flags) \
+ struct ccu_mp _struct = { \
+ .enable = _gate, \
+ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .p = _SUNXI_CCU_DIV(_pshift, _pwidth), \
+ .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
+ _parents, \
+ &ccu_mp_ops, \
+ _flags), \
+ } \
+ }
+
+#define SUNXI_CCU_MP_DATA_WITH_MUX(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _flags) \
+ SUNXI_CCU_MP_DATA_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ 0, _flags)
+
+#define SUNXI_CCU_MP_HW_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _flags) \
+ struct ccu_mp _struct = { \
+ .enable = _gate, \
+ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .p = _SUNXI_CCU_DIV(_pshift, _pwidth), \
+ .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS_HW(_name, \
+ _parents, \
+ &ccu_mp_ops, \
+ _flags), \
+ } \
+ }
+
static inline struct ccu_mp *hw_to_ccu_mp(struct clk_hw *hw)
{
struct ccu_common *common = hw_to_ccu_common(hw);
diff --git a/drivers/clk/sunxi-ng/ccu_mult.c b/drivers/clk/sunxi-ng/ccu_mult.c
index 7c8cf2e04e94..7bee217ef111 100644
--- a/drivers/clk/sunxi-ng/ccu_mult.c
+++ b/drivers/clk/sunxi-ng/ccu_mult.c
@@ -170,3 +170,4 @@ const struct clk_ops ccu_mult_ops = {
.recalc_rate = ccu_mult_recalc_rate,
.set_rate = ccu_mult_set_rate,
};
+EXPORT_SYMBOL_NS_GPL(ccu_mult_ops, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_mux.c b/drivers/clk/sunxi-ng/ccu_mux.c
index 7d75da9a1f2e..2306a1cd83e4 100644
--- a/drivers/clk/sunxi-ng/ccu_mux.c
+++ b/drivers/clk/sunxi-ng/ccu_mux.c
@@ -64,6 +64,7 @@ unsigned long ccu_mux_helper_apply_prediv(struct ccu_common *common,
{
return parent_rate / ccu_mux_get_prediv(common, cm, parent_index);
}
+EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_apply_prediv, SUNXI_CCU);
static unsigned long ccu_mux_helper_unapply_prediv(struct ccu_common *common,
struct ccu_mux_internal *cm,
@@ -152,6 +153,7 @@ out:
req->rate = best_rate;
return 0;
}
+EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_determine_rate, SUNXI_CCU);
u8 ccu_mux_helper_get_parent(struct ccu_common *common,
struct ccu_mux_internal *cm)
@@ -174,6 +176,7 @@ u8 ccu_mux_helper_get_parent(struct ccu_common *common,
return parent;
}
+EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_get_parent, SUNXI_CCU);
int ccu_mux_helper_set_parent(struct ccu_common *common,
struct ccu_mux_internal *cm,
@@ -195,6 +198,7 @@ int ccu_mux_helper_set_parent(struct ccu_common *common,
return 0;
}
+EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_set_parent, SUNXI_CCU);
static void ccu_mux_disable(struct clk_hw *hw)
{
@@ -251,6 +255,7 @@ const struct clk_ops ccu_mux_ops = {
.determine_rate = __clk_mux_determine_rate,
.recalc_rate = ccu_mux_recalc_rate,
};
+EXPORT_SYMBOL_NS_GPL(ccu_mux_ops, SUNXI_CCU);
/*
* This clock notifier is called when the frequency of the of the parent
@@ -285,3 +290,4 @@ int ccu_mux_notifier_register(struct clk *clk, struct ccu_mux_nb *mux_nb)
return clk_notifier_register(clk, &mux_nb->clk_nb);
}
+EXPORT_SYMBOL_NS_GPL(ccu_mux_notifier_register, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_mux.h b/drivers/clk/sunxi-ng/ccu_mux.h
index e31efc509b3d..2c1811a445b0 100644
--- a/drivers/clk/sunxi-ng/ccu_mux.h
+++ b/drivers/clk/sunxi-ng/ccu_mux.h
@@ -72,6 +72,39 @@ struct ccu_mux {
SUNXI_CCU_MUX_TABLE_WITH_GATE(_struct, _name, _parents, NULL, \
_reg, _shift, _width, 0, _flags)
+#define SUNXI_CCU_MUX_DATA_WITH_GATE(_struct, _name, _parents, _reg, \
+ _shift, _width, _gate, _flags) \
+ struct ccu_mux _struct = { \
+ .enable = _gate, \
+ .mux = _SUNXI_CCU_MUX(_shift, _width), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
+ _parents, \
+ &ccu_mux_ops, \
+ _flags), \
+ } \
+ }
+
+#define SUNXI_CCU_MUX_DATA(_struct, _name, _parents, _reg, \
+ _shift, _width, _flags) \
+ SUNXI_CCU_MUX_DATA_WITH_GATE(_struct, _name, _parents, _reg, \
+ _shift, _width, 0, _flags)
+
+#define SUNXI_CCU_MUX_HW_WITH_GATE(_struct, _name, _parents, _reg, \
+ _shift, _width, _gate, _flags) \
+ struct ccu_mux _struct = { \
+ .enable = _gate, \
+ .mux = _SUNXI_CCU_MUX(_shift, _width), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS_HW(_name, \
+ _parents, \
+ &ccu_mux_ops, \
+ _flags), \
+ } \
+ }
+
static inline struct ccu_mux *hw_to_ccu_mux(struct clk_hw *hw)
{
struct ccu_common *common = hw_to_ccu_common(hw);
diff --git a/drivers/clk/sunxi-ng/ccu_nk.c b/drivers/clk/sunxi-ng/ccu_nk.c
index aee68b00f3b2..c4fb82af97e8 100644
--- a/drivers/clk/sunxi-ng/ccu_nk.c
+++ b/drivers/clk/sunxi-ng/ccu_nk.c
@@ -157,3 +157,4 @@ const struct clk_ops ccu_nk_ops = {
.round_rate = ccu_nk_round_rate,
.set_rate = ccu_nk_set_rate,
};
+EXPORT_SYMBOL_NS_GPL(ccu_nk_ops, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.c b/drivers/clk/sunxi-ng/ccu_nkm.c
index b9cfee0276ea..67da2c189b53 100644
--- a/drivers/clk/sunxi-ng/ccu_nkm.c
+++ b/drivers/clk/sunxi-ng/ccu_nkm.c
@@ -206,3 +206,4 @@ const struct clk_ops ccu_nkm_ops = {
.recalc_rate = ccu_nkm_recalc_rate,
.set_rate = ccu_nkm_set_rate,
};
+EXPORT_SYMBOL_NS_GPL(ccu_nkm_ops, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index bda87b38c45c..39413cb0985c 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -230,3 +230,4 @@ const struct clk_ops ccu_nkmp_ops = {
.round_rate = ccu_nkmp_round_rate,
.set_rate = ccu_nkmp_set_rate,
};
+EXPORT_SYMBOL_NS_GPL(ccu_nkmp_ops, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index e6bcc0a7170c..9ca9257f4426 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -238,3 +238,4 @@ const struct clk_ops ccu_nm_ops = {
.round_rate = ccu_nm_round_rate,
.set_rate = ccu_nm_set_rate,
};
+EXPORT_SYMBOL_NS_GPL(ccu_nm_ops, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_phase.c b/drivers/clk/sunxi-ng/ccu_phase.c
index 92ab8bd66427..e4cae2afe9db 100644
--- a/drivers/clk/sunxi-ng/ccu_phase.c
+++ b/drivers/clk/sunxi-ng/ccu_phase.c
@@ -121,3 +121,4 @@ const struct clk_ops ccu_phase_ops = {
.get_phase = ccu_phase_get_phase,
.set_phase = ccu_phase_set_phase,
};
+EXPORT_SYMBOL_NS_GPL(ccu_phase_ops, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_reset.c b/drivers/clk/sunxi-ng/ccu_reset.c
index 483100e45df3..6577aa18cb01 100644
--- a/drivers/clk/sunxi-ng/ccu_reset.c
+++ b/drivers/clk/sunxi-ng/ccu_reset.c
@@ -75,3 +75,4 @@ const struct reset_control_ops ccu_reset_ops = {
.reset = ccu_reset_reset,
.status = ccu_reset_status,
};
+EXPORT_SYMBOL_NS_GPL(ccu_reset_ops, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_sdm.c b/drivers/clk/sunxi-ng/ccu_sdm.c
index 79581a1c649a..41937ed0766d 100644
--- a/drivers/clk/sunxi-ng/ccu_sdm.c
+++ b/drivers/clk/sunxi-ng/ccu_sdm.c
@@ -20,6 +20,7 @@ bool ccu_sdm_helper_is_enabled(struct ccu_common *common,
return !!(readl(common->base + sdm->tuning_reg) & sdm->tuning_enable);
}
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_is_enabled, SUNXI_CCU);
void ccu_sdm_helper_enable(struct ccu_common *common,
struct ccu_sdm_internal *sdm,
@@ -49,6 +50,7 @@ void ccu_sdm_helper_enable(struct ccu_common *common,
writel(reg | sdm->enable, common->base + common->reg);
spin_unlock_irqrestore(common->lock, flags);
}
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_enable, SUNXI_CCU);
void ccu_sdm_helper_disable(struct ccu_common *common,
struct ccu_sdm_internal *sdm)
@@ -69,6 +71,7 @@ void ccu_sdm_helper_disable(struct ccu_common *common,
writel(reg & ~sdm->tuning_enable, common->base + sdm->tuning_reg);
spin_unlock_irqrestore(common->lock, flags);
}
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_disable, SUNXI_CCU);
/*
* Sigma delta modulation provides a way to do fractional-N frequency
@@ -102,6 +105,7 @@ bool ccu_sdm_helper_has_rate(struct ccu_common *common,
return false;
}
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_has_rate, SUNXI_CCU);
unsigned long ccu_sdm_helper_read_rate(struct ccu_common *common,
struct ccu_sdm_internal *sdm,
@@ -132,6 +136,7 @@ unsigned long ccu_sdm_helper_read_rate(struct ccu_common *common,
/* We can't calculate the effective clock rate, so just fail. */
return 0;
}
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_read_rate, SUNXI_CCU);
int ccu_sdm_helper_get_factors(struct ccu_common *common,
struct ccu_sdm_internal *sdm,
@@ -153,3 +158,4 @@ int ccu_sdm_helper_get_factors(struct ccu_common *common,
/* nothing found */
return -EINVAL;
}
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_get_factors, SUNXI_CCU);
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index 7b1816856eb5..a0715cdfc1a4 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-y += clk.o
obj-y += clk-audio-sync.o
+obj-y += clk-device.o
obj-y += clk-dfll.o
obj-y += clk-divider.o
obj-y += clk-periph.o
diff --git a/drivers/clk/tegra/clk-device.c b/drivers/clk/tegra/clk-device.c
new file mode 100644
index 000000000000..c58beaf8afbc
--- /dev/null
+++ b/drivers/clk/tegra/clk-device.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <soc/tegra/common.h>
+
+#include "clk.h"
+
+/*
+ * This driver manages performance state of the core power domain for the
+ * independent PLLs and system clocks. We created a virtual clock device
+ * for such clocks, see tegra_clk_dev_register().
+ */
+
+struct tegra_clk_device {
+ struct notifier_block clk_nb;
+ struct device *dev;
+ struct clk_hw *hw;
+ struct mutex lock;
+};
+
+static int tegra_clock_set_pd_state(struct tegra_clk_device *clk_dev,
+ unsigned long rate)
+{
+ struct device *dev = clk_dev->dev;
+ struct dev_pm_opp *opp;
+ unsigned int pstate;
+
+ opp = dev_pm_opp_find_freq_ceil(dev, &rate);
+ if (opp == ERR_PTR(-ERANGE)) {
+ /*
+ * Some clocks may be unused by a particular board and they
+ * may have uninitiated clock rate that is overly high. In
+ * this case clock is expected to be disabled, but still we
+ * need to set up performance state of the power domain and
+ * not error out clk initialization. A typical example is
+ * a PCIe clock on Android tablets.
+ */
+ dev_dbg(dev, "failed to find ceil OPP for %luHz\n", rate);
+ opp = dev_pm_opp_find_freq_floor(dev, &rate);
+ }
+
+ if (IS_ERR(opp)) {
+ dev_err(dev, "failed to find OPP for %luHz: %pe\n", rate, opp);
+ return PTR_ERR(opp);
+ }
+
+ pstate = dev_pm_opp_get_required_pstate(opp, 0);
+ dev_pm_opp_put(opp);
+
+ return dev_pm_genpd_set_performance_state(dev, pstate);
+}
+
+static int tegra_clock_change_notify(struct notifier_block *nb,
+ unsigned long msg, void *data)
+{
+ struct clk_notifier_data *cnd = data;
+ struct tegra_clk_device *clk_dev;
+ int err = 0;
+
+ clk_dev = container_of(nb, struct tegra_clk_device, clk_nb);
+
+ mutex_lock(&clk_dev->lock);
+ switch (msg) {
+ case PRE_RATE_CHANGE:
+ if (cnd->new_rate > cnd->old_rate)
+ err = tegra_clock_set_pd_state(clk_dev, cnd->new_rate);
+ break;
+
+ case ABORT_RATE_CHANGE:
+ err = tegra_clock_set_pd_state(clk_dev, cnd->old_rate);
+ break;
+
+ case POST_RATE_CHANGE:
+ if (cnd->new_rate < cnd->old_rate)
+ err = tegra_clock_set_pd_state(clk_dev, cnd->new_rate);
+ break;
+
+ default:
+ break;
+ }
+ mutex_unlock(&clk_dev->lock);
+
+ return notifier_from_errno(err);
+}
+
+static int tegra_clock_sync_pd_state(struct tegra_clk_device *clk_dev)
+{
+ unsigned long rate;
+ int ret;
+
+ mutex_lock(&clk_dev->lock);
+
+ rate = clk_hw_get_rate(clk_dev->hw);
+ ret = tegra_clock_set_pd_state(clk_dev, rate);
+
+ mutex_unlock(&clk_dev->lock);
+
+ return ret;
+}
+
+static int tegra_clock_probe(struct platform_device *pdev)
+{
+ struct tegra_core_opp_params opp_params = {};
+ struct tegra_clk_device *clk_dev;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ int err;
+
+ if (!dev->pm_domain)
+ return -EINVAL;
+
+ clk_dev = devm_kzalloc(dev, sizeof(*clk_dev), GFP_KERNEL);
+ if (!clk_dev)
+ return -ENOMEM;
+
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ clk_dev->dev = dev;
+ clk_dev->hw = __clk_get_hw(clk);
+ clk_dev->clk_nb.notifier_call = tegra_clock_change_notify;
+ mutex_init(&clk_dev->lock);
+
+ platform_set_drvdata(pdev, clk_dev);
+
+ /*
+ * Runtime PM was already enabled for this device by the parent clk
+ * driver and power domain state should be synced under clk_dev lock,
+ * hence we don't use the common OPP helper that initializes OPP
+ * state. For some clocks common OPP helper may fail to find ceil
+ * rate, it's handled by this driver.
+ */
+ err = devm_tegra_core_dev_init_opp_table(dev, &opp_params);
+ if (err)
+ return err;
+
+ err = clk_notifier_register(clk, &clk_dev->clk_nb);
+ if (err) {
+ dev_err(dev, "failed to register clk notifier: %d\n", err);
+ return err;
+ }
+
+ /*
+ * The driver is attaching to a potentially active/resumed clock, hence
+ * we need to sync the power domain performance state in a accordance to
+ * the clock rate if clock is resumed.
+ */
+ err = tegra_clock_sync_pd_state(clk_dev);
+ if (err)
+ goto unreg_clk;
+
+ return 0;
+
+unreg_clk:
+ clk_notifier_unregister(clk, &clk_dev->clk_nb);
+
+ return err;
+}
+
+/*
+ * Tegra GENPD driver enables clocks during NOIRQ phase. It can't be done
+ * for clocks served by this driver because runtime PM is unavailable in
+ * NOIRQ phase. We will keep clocks resumed during suspend to mitigate this
+ * problem. In practice this makes no difference from a power management
+ * perspective since voltage is kept at a nominal level during suspend anyways.
+ */
+static const struct dev_pm_ops tegra_clock_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_resume_and_get, pm_runtime_put)
+};
+
+static const struct of_device_id tegra_clock_match[] = {
+ { .compatible = "nvidia,tegra20-sclk" },
+ { .compatible = "nvidia,tegra30-sclk" },
+ { .compatible = "nvidia,tegra30-pllc" },
+ { .compatible = "nvidia,tegra30-plle" },
+ { .compatible = "nvidia,tegra30-pllm" },
+ { }
+};
+
+static struct platform_driver tegra_clock_driver = {
+ .driver = {
+ .name = "tegra-clock",
+ .of_match_table = tegra_clock_match,
+ .pm = &tegra_clock_pm,
+ .suppress_bind_attrs = true,
+ },
+ .probe = tegra_clock_probe,
+};
+builtin_platform_driver(tegra_clock_driver);
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index eaa079c177c3..100b5d9b7e26 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -1914,7 +1914,7 @@ static struct clk *_tegra_clk_register_pll(struct tegra_clk_pll *pll,
/* Data in .init is copied by clk_register(), so stack variable OK */
pll->hw.init = &init;
- return clk_register(NULL, &pll->hw);
+ return tegra_clk_dev_register(&pll->hw);
}
struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
diff --git a/drivers/clk/tegra/clk-super.c b/drivers/clk/tegra/clk-super.c
index 6099c6e9acd4..a98a420398fa 100644
--- a/drivers/clk/tegra/clk-super.c
+++ b/drivers/clk/tegra/clk-super.c
@@ -226,7 +226,7 @@ struct clk *tegra_clk_register_super_mux(const char *name,
/* Data in .init is copied by clk_register(), so stack variable OK */
super->hw.init = &init;
- clk = clk_register(NULL, &super->hw);
+ clk = tegra_clk_dev_register(&super->hw);
if (IS_ERR(clk))
kfree(super);
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index bc9e47a4cb60..ef718c4b3826 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -1158,7 +1158,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA114_CLK_XUSB_HS_SRC, TEGRA114_CLK_XUSB_SS_DIV2, 61200000, 0 },
{ TEGRA114_CLK_XUSB_FALCON_SRC, TEGRA114_CLK_PLL_P, 204000000, 0 },
{ TEGRA114_CLK_XUSB_HOST_SRC, TEGRA114_CLK_PLL_P, 102000000, 0 },
- { TEGRA114_CLK_VDE, TEGRA114_CLK_CLK_MAX, 600000000, 0 },
+ { TEGRA114_CLK_VDE, TEGRA114_CLK_PLL_P, 408000000, 0 },
{ TEGRA114_CLK_SPDIF_IN_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA114_CLK_I2S0_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA114_CLK_I2S1_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 3664593a5ba4..be3c33441cfc 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -6,8 +6,11 @@
#include <linux/io.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/clk/tegra.h>
#include <linux/delay.h>
#include <dt-bindings/clock/tegra20-car.h>
@@ -414,7 +417,7 @@ static struct tegra_clk_pll_params pll_e_params = {
.fixed_rate = 100000000,
};
-static struct tegra_devclk devclks[] __initdata = {
+static struct tegra_devclk devclks[] = {
{ .con_id = "pll_c", .dt_id = TEGRA20_CLK_PLL_C },
{ .con_id = "pll_c_out1", .dt_id = TEGRA20_CLK_PLL_C_OUT1 },
{ .con_id = "pll_p", .dt_id = TEGRA20_CLK_PLL_P },
@@ -710,13 +713,6 @@ static void tegra20_super_clk_init(void)
NULL);
clks[TEGRA20_CLK_CCLK] = clk;
- /* SCLK */
- clk = tegra_clk_register_super_mux("sclk", sclk_parents,
- ARRAY_SIZE(sclk_parents),
- CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
- clk_base + SCLK_BURST_POLICY, 0, 4, 0, 0, NULL);
- clks[TEGRA20_CLK_SCLK] = clk;
-
/* twd */
clk = clk_register_fixed_factor(NULL, "twd", "cclk", 0, 1, 4);
clks[TEGRA20_CLK_TWD] = clk;
@@ -1014,7 +1010,7 @@ static struct tegra_cpu_car_ops tegra20_cpu_car_ops = {
#endif
};
-static struct tegra_clk_init_table init_table[] __initdata = {
+static struct tegra_clk_init_table init_table[] = {
{ TEGRA20_CLK_PLL_P, TEGRA20_CLK_CLK_MAX, 216000000, 1 },
{ TEGRA20_CLK_PLL_P_OUT1, TEGRA20_CLK_CLK_MAX, 28800000, 1 },
{ TEGRA20_CLK_PLL_P_OUT2, TEGRA20_CLK_CLK_MAX, 48000000, 1 },
@@ -1052,11 +1048,6 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA20_CLK_CLK_MAX, TEGRA20_CLK_CLK_MAX, 0, 0 },
};
-static void __init tegra20_clock_apply_init_table(void)
-{
- tegra_init_from_table(init_table, clks, TEGRA20_CLK_CLK_MAX);
-}
-
/*
* Some clocks may be used by different drivers depending on the board
* configuration. List those here to register them twice in the clock lookup
@@ -1076,6 +1067,8 @@ static const struct of_device_id pmc_match[] __initconst = {
{ },
};
+static bool tegra20_car_initialized;
+
static struct clk *tegra20_clk_src_onecell_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -1083,6 +1076,16 @@ static struct clk *tegra20_clk_src_onecell_get(struct of_phandle_args *clkspec,
struct clk_hw *hw;
struct clk *clk;
+ /*
+ * Timer clocks are needed early, the rest of the clocks shouldn't be
+ * available to device drivers until clock tree is fully initialized.
+ */
+ if (clkspec->args[0] != TEGRA20_CLK_RTC &&
+ clkspec->args[0] != TEGRA20_CLK_TWD &&
+ clkspec->args[0] != TEGRA20_CLK_TIMER &&
+ !tegra20_car_initialized)
+ return ERR_PTR(-EPROBE_DEFER);
+
clk = of_clk_src_onecell_get(clkspec, data);
if (IS_ERR(clk))
return clk;
@@ -1149,10 +1152,48 @@ static void __init tegra20_clock_init(struct device_node *np)
tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA20_CLK_CLK_MAX);
tegra_add_of_provider(np, tegra20_clk_src_onecell_get);
- tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
-
- tegra_clk_apply_init_table = tegra20_clock_apply_init_table;
tegra_cpu_car_ops = &tegra20_cpu_car_ops;
}
-CLK_OF_DECLARE(tegra20, "nvidia,tegra20-car", tegra20_clock_init);
+CLK_OF_DECLARE_DRIVER(tegra20, "nvidia,tegra20-car", tegra20_clock_init);
+
+/*
+ * Clocks that use runtime PM can't be created at the tegra20_clock_init
+ * time because drivers' base isn't initialized yet, and thus platform
+ * devices can't be created for the clocks. Hence we need to split the
+ * registration of the clocks into two phases. The first phase registers
+ * essential clocks which don't require RPM and are actually used during
+ * early boot. The second phase registers clocks which use RPM and this
+ * is done when device drivers' core API is ready.
+ */
+static int tegra20_car_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+
+ clk = tegra_clk_register_super_mux("sclk", sclk_parents,
+ ARRAY_SIZE(sclk_parents),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ clk_base + SCLK_BURST_POLICY, 0, 4, 0, 0, NULL);
+ clks[TEGRA20_CLK_SCLK] = clk;
+
+ tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
+ tegra_init_from_table(init_table, clks, TEGRA20_CLK_CLK_MAX);
+ tegra20_car_initialized = true;
+
+ return 0;
+}
+
+static const struct of_device_id tegra20_car_match[] = {
+ { .compatible = "nvidia,tegra20-car" },
+ { }
+};
+
+static struct platform_driver tegra20_car_driver = {
+ .driver = {
+ .name = "tegra20-car",
+ .of_match_table = tegra20_car_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = tegra20_car_probe,
+};
+builtin_platform_driver(tegra20_car_driver);
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 64121bc66d85..04b496123820 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -7,8 +7,11 @@
#include <linux/delay.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/clk/tegra.h>
#include <soc/tegra/pmc.h>
@@ -532,7 +535,7 @@ static unsigned long tegra30_input_freq[] = {
[12] = 26000000,
};
-static struct tegra_devclk devclks[] __initdata = {
+static struct tegra_devclk devclks[] = {
{ .con_id = "pll_c", .dt_id = TEGRA30_CLK_PLL_C },
{ .con_id = "pll_c_out1", .dt_id = TEGRA30_CLK_PLL_C_OUT1 },
{ .con_id = "pll_p", .dt_id = TEGRA30_CLK_PLL_P },
@@ -812,11 +815,6 @@ static void __init tegra30_pll_init(void)
{
struct clk *clk;
- /* PLLC */
- clk = tegra_clk_register_pll("pll_c", "pll_ref", clk_base, pmc_base, 0,
- &pll_c_params, NULL);
- clks[TEGRA30_CLK_PLL_C] = clk;
-
/* PLLC_OUT1 */
clk = tegra_clk_register_divider("pll_c_out1_div", "pll_c",
clk_base + PLLC_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
@@ -826,11 +824,6 @@ static void __init tegra30_pll_init(void)
0, NULL);
clks[TEGRA30_CLK_PLL_C_OUT1] = clk;
- /* PLLM */
- clk = tegra_clk_register_pll("pll_m", "pll_ref", clk_base, pmc_base,
- CLK_SET_RATE_GATE, &pll_m_params, NULL);
- clks[TEGRA30_CLK_PLL_M] = clk;
-
/* PLLM_OUT1 */
clk = tegra_clk_register_divider("pll_m_out1_div", "pll_m",
clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
@@ -880,9 +873,6 @@ static void __init tegra30_pll_init(void)
ARRAY_SIZE(pll_e_parents),
CLK_SET_RATE_NO_REPARENT,
clk_base + PLLE_AUX, 2, 1, 0, NULL);
- clk = tegra_clk_register_plle("pll_e", "pll_e_mux", clk_base, pmc_base,
- CLK_GET_RATE_NOCACHE, &pll_e_params, NULL);
- clks[TEGRA30_CLK_PLL_E] = clk;
}
static const char *cclk_g_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
@@ -971,14 +961,6 @@ static void __init tegra30_super_clk_init(void)
NULL);
clks[TEGRA30_CLK_CCLK_LP] = clk;
- /* SCLK */
- clk = tegra_clk_register_super_mux("sclk", sclk_parents,
- ARRAY_SIZE(sclk_parents),
- CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
- clk_base + SCLK_BURST_POLICY,
- 0, 4, 0, 0, NULL);
- clks[TEGRA30_CLK_SCLK] = clk;
-
/* twd */
clk = clk_register_fixed_factor(NULL, "twd", "cclk_g",
CLK_SET_RATE_PARENT, 1, 2);
@@ -1214,7 +1196,7 @@ static struct tegra_cpu_car_ops tegra30_cpu_car_ops = {
#endif
};
-static struct tegra_clk_init_table init_table[] __initdata = {
+static struct tegra_clk_init_table init_table[] = {
{ TEGRA30_CLK_UARTA, TEGRA30_CLK_PLL_P, 408000000, 0 },
{ TEGRA30_CLK_UARTB, TEGRA30_CLK_PLL_P, 408000000, 0 },
{ TEGRA30_CLK_UARTC, TEGRA30_CLK_PLL_P, 408000000, 0 },
@@ -1259,11 +1241,6 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 },
};
-static void __init tegra30_clock_apply_init_table(void)
-{
- tegra_init_from_table(init_table, clks, TEGRA30_CLK_CLK_MAX);
-}
-
/*
* Some clocks may be used by different drivers depending on the board
* configuration. List those here to register them twice in the clock lookup
@@ -1294,12 +1271,24 @@ static struct tegra_audio_clk_info tegra30_audio_plls[] = {
{ "pll_a", &pll_a_params, tegra_clk_pll_a, "pll_p_out1" },
};
+static bool tegra30_car_initialized;
+
static struct clk *tegra30_clk_src_onecell_get(struct of_phandle_args *clkspec,
void *data)
{
struct clk_hw *hw;
struct clk *clk;
+ /*
+ * Timer clocks are needed early, the rest of the clocks shouldn't be
+ * available to device drivers until clock tree is fully initialized.
+ */
+ if (clkspec->args[0] != TEGRA30_CLK_RTC &&
+ clkspec->args[0] != TEGRA30_CLK_TWD &&
+ clkspec->args[0] != TEGRA30_CLK_TIMER &&
+ !tegra30_car_initialized)
+ return ERR_PTR(-EPROBE_DEFER);
+
clk = of_clk_src_onecell_get(clkspec, data);
if (IS_ERR(clk))
return clk;
@@ -1357,10 +1346,75 @@ static void __init tegra30_clock_init(struct device_node *np)
tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA30_CLK_CLK_MAX);
tegra_add_of_provider(np, tegra30_clk_src_onecell_get);
+
+ tegra_cpu_car_ops = &tegra30_cpu_car_ops;
+}
+CLK_OF_DECLARE_DRIVER(tegra30, "nvidia,tegra30-car", tegra30_clock_init);
+
+/*
+ * Clocks that use runtime PM can't be created at the tegra30_clock_init
+ * time because drivers' base isn't initialized yet, and thus platform
+ * devices can't be created for the clocks. Hence we need to split the
+ * registration of the clocks into two phases. The first phase registers
+ * essential clocks which don't require RPM and are actually used during
+ * early boot. The second phase registers clocks which use RPM and this
+ * is done when device drivers' core API is ready.
+ */
+static int tegra30_car_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+
+ /* PLLC */
+ clk = tegra_clk_register_pll("pll_c", "pll_ref", clk_base, pmc_base, 0,
+ &pll_c_params, NULL);
+ clks[TEGRA30_CLK_PLL_C] = clk;
+
+ /* PLLE */
+ clk = tegra_clk_register_plle("pll_e", "pll_e_mux", clk_base, pmc_base,
+ CLK_GET_RATE_NOCACHE, &pll_e_params, NULL);
+ clks[TEGRA30_CLK_PLL_E] = clk;
+
+ /* PLLM */
+ clk = tegra_clk_register_pll("pll_m", "pll_ref", clk_base, pmc_base,
+ CLK_SET_RATE_GATE, &pll_m_params, NULL);
+ clks[TEGRA30_CLK_PLL_M] = clk;
+
+ /* SCLK */
+ clk = tegra_clk_register_super_mux("sclk", sclk_parents,
+ ARRAY_SIZE(sclk_parents),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ clk_base + SCLK_BURST_POLICY,
+ 0, 4, 0, 0, NULL);
+ clks[TEGRA30_CLK_SCLK] = clk;
+
tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
+ tegra_init_from_table(init_table, clks, TEGRA30_CLK_CLK_MAX);
+ tegra30_car_initialized = true;
- tegra_clk_apply_init_table = tegra30_clock_apply_init_table;
+ return 0;
+}
- tegra_cpu_car_ops = &tegra30_cpu_car_ops;
+static const struct of_device_id tegra30_car_match[] = {
+ { .compatible = "nvidia,tegra30-car" },
+ { }
+};
+
+static struct platform_driver tegra30_car_driver = {
+ .driver = {
+ .name = "tegra30-car",
+ .of_match_table = tegra30_car_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = tegra30_car_probe,
+};
+
+/*
+ * Clock driver must be registered before memory controller driver,
+ * which doesn't support deferred probing for today and is registered
+ * from arch init-level.
+ */
+static int tegra30_car_init(void)
+{
+ return platform_driver_register(&tegra30_car_driver);
}
-CLK_OF_DECLARE(tegra30, "nvidia,tegra30-car", tegra30_clock_init);
+postcore_initcall(tegra30_car_init);
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index f6cdce441cf7..26bda45813c0 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -9,14 +9,19 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/clk/tegra.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset-controller.h>
+#include <linux/string.h>
#include <soc/tegra/fuse.h>
#include "clk.h"
/* Global data of Tegra CPU CAR ops */
+static struct device_node *tegra_car_np;
static struct tegra_cpu_car_ops dummy_car_ops;
struct tegra_cpu_car_ops *tegra_cpu_car_ops = &dummy_car_ops;
@@ -261,8 +266,8 @@ void __init tegra_init_dup_clks(struct tegra_clk_duplicate *dup_list,
}
}
-void __init tegra_init_from_table(struct tegra_clk_init_table *tbl,
- struct clk *clks[], int clk_max)
+void tegra_init_from_table(struct tegra_clk_init_table *tbl,
+ struct clk *clks[], int clk_max)
{
struct clk *clk;
@@ -320,6 +325,8 @@ void __init tegra_add_of_provider(struct device_node *np,
{
int i;
+ tegra_car_np = np;
+
for (i = 0; i < clk_num; i++) {
if (IS_ERR(clks[i])) {
pr_err
@@ -348,7 +355,7 @@ void __init tegra_init_special_resets(unsigned int num,
special_reset_deassert = deassert;
}
-void __init tegra_register_devclks(struct tegra_devclk *dev_clks, int num)
+void tegra_register_devclks(struct tegra_devclk *dev_clks, int num)
{
int i;
@@ -372,6 +379,68 @@ struct clk ** __init tegra_lookup_dt_id(int clk_id,
return NULL;
}
+static struct device_node *tegra_clk_get_of_node(struct clk_hw *hw)
+{
+ struct device_node *np;
+ char *node_name;
+
+ node_name = kstrdup(hw->init->name, GFP_KERNEL);
+ if (!node_name)
+ return NULL;
+
+ strreplace(node_name, '_', '-');
+
+ for_each_child_of_node(tegra_car_np, np) {
+ if (!strcmp(np->name, node_name))
+ break;
+ }
+
+ kfree(node_name);
+
+ return np;
+}
+
+struct clk *tegra_clk_dev_register(struct clk_hw *hw)
+{
+ struct platform_device *pdev, *parent;
+ const char *dev_name = NULL;
+ struct device *dev = NULL;
+ struct device_node *np;
+
+ np = tegra_clk_get_of_node(hw);
+
+ if (!of_device_is_available(np))
+ goto put_node;
+
+ dev_name = kasprintf(GFP_KERNEL, "tegra_clk_%s", hw->init->name);
+ if (!dev_name)
+ goto put_node;
+
+ parent = of_find_device_by_node(tegra_car_np);
+ if (parent) {
+ pdev = of_platform_device_create(np, dev_name, &parent->dev);
+ put_device(&parent->dev);
+
+ if (!pdev) {
+ pr_err("%s: failed to create device for %pOF\n",
+ __func__, np);
+ goto free_name;
+ }
+
+ dev = &pdev->dev;
+ pm_runtime_enable(dev);
+ } else {
+ WARN(1, "failed to find device for %pOF\n", tegra_car_np);
+ }
+
+free_name:
+ kfree(dev_name);
+put_node:
+ of_node_put(np);
+
+ return clk_register(dev, hw);
+}
+
tegra_clk_apply_init_table_func tegra_clk_apply_init_table;
static int __init tegra_clocks_apply_init_table(void)
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 0c3ba0ccce1a..5d80d8b79b8e 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -927,4 +927,6 @@ struct clk *tegra20_clk_register_emc(void __iomem *ioaddr, bool low_jitter);
struct clk *tegra210_clk_register_emc(struct device_node *np,
void __iomem *regs);
+struct clk *tegra_clk_dev_register(struct clk_hw *hw);
+
#endif /* TEGRA_CLK_H */
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
index b341cd990be7..962502ca7ff0 100644
--- a/drivers/clk/ti/adpll.c
+++ b/drivers/clk/ti/adpll.c
@@ -807,7 +807,7 @@ static int ti_adpll_init_registers(struct ti_adpll_data *d)
static int ti_adpll_init_inputs(struct ti_adpll_data *d)
{
- const char *error = "need at least %i inputs";
+ static const char error[] = "need at least %i inputs";
struct clk *clock;
int nr_inputs;
diff --git a/drivers/clk/visconti/Kconfig b/drivers/clk/visconti/Kconfig
new file mode 100644
index 000000000000..1661097b0d92
--- /dev/null
+++ b/drivers/clk/visconti/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config COMMON_CLK_VISCONTI
+ bool "Support for Toshiba Visconti5 ARM SoC clock controllers"
+ depends on ARCH_VISCONTI || COMPILE_TEST
+ default ARCH_VISCONTI
+ help
+ Support for the Toshiba Visconti5 ARM SoC clock controller.
+ Say Y if you want to include clock support.
diff --git a/drivers/clk/visconti/Makefile b/drivers/clk/visconti/Makefile
new file mode 100644
index 000000000000..c1254fd52b31
--- /dev/null
+++ b/drivers/clk/visconti/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Makefile for Toshiba Visconti clock
+
+obj-y += clkc.o pll.o reset.o
+obj-y += pll-tmpv770x.o clkc-tmpv770x.o
diff --git a/drivers/clk/visconti/clkc-tmpv770x.c b/drivers/clk/visconti/clkc-tmpv770x.c
new file mode 100644
index 000000000000..c2b2f41a85a4
--- /dev/null
+++ b/drivers/clk/visconti/clkc-tmpv770x.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Toshiba Visconti clock controller
+ *
+ * Copyright (c) 2021 TOSHIBA CORPORATION
+ * Copyright (c) 2021 Toshiba Electronic Devices & Storage Corporation
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/toshiba,tmpv770x.h>
+#include <dt-bindings/reset/toshiba,tmpv770x.h>
+
+#include "clkc.h"
+#include "reset.h"
+
+static DEFINE_SPINLOCK(tmpv770x_clk_lock);
+static DEFINE_SPINLOCK(tmpv770x_rst_lock);
+
+static const struct clk_parent_data clks_parent_data[] = {
+ { .fw_name = "pipll1", .name = "pipll1", },
+};
+
+static const struct clk_parent_data pietherplls_parent_data[] = {
+ { .fw_name = "pietherpll", .name = "pietherpll", },
+};
+
+static const struct visconti_fixed_clk fixed_clk_tables[] = {
+ /* PLL1 */
+ /* PICMPT0/1, PITSC, PIUWDT, PISWDT, PISBUS, PIPMU, PIGPMU, PITMU */
+ /* PIEMM, PIMISC, PIGCOMM, PIDCOMM, PIMBUS, PIGPIO, PIPGM */
+ { TMPV770X_CLK_PIPLL1_DIV4, "pipll1_div4", "pipll1", 0, 1, 4, },
+ /* PISBUS */
+ { TMPV770X_CLK_PIPLL1_DIV2, "pipll1_div2", "pipll1", 0, 1, 2, },
+ /* PICOBUS_CLK */
+ { TMPV770X_CLK_PIPLL1_DIV1, "pipll1_div1", "pipll1", 0, 1, 1, },
+ /* PIDNNPLL */
+ /* CONN_CLK, PIMBUS, PICRC0/1 */
+ { TMPV770X_CLK_PIDNNPLL_DIV1, "pidnnpll_div1", "pidnnpll", 0, 1, 1, },
+ { TMPV770X_CLK_PIREFCLK, "pirefclk", "osc2-clk", 0, 1, 1, },
+ { TMPV770X_CLK_WDTCLK, "wdtclk", "osc2-clk", 0, 1, 1, },
+};
+
+static const struct visconti_clk_gate_table pietherpll_clk_gate_tables[] = {
+ /* pietherpll */
+ { TMPV770X_CLK_PIETHER_2P5M, "piether_2p5m",
+ pietherplls_parent_data, ARRAY_SIZE(pietherplls_parent_data),
+ CLK_SET_RATE_PARENT, 0x34, 0x134, 4, 200,
+ TMPV770X_RESET_PIETHER_2P5M, },
+ { TMPV770X_CLK_PIETHER_25M, "piether_25m",
+ pietherplls_parent_data, ARRAY_SIZE(pietherplls_parent_data),
+ CLK_SET_RATE_PARENT, 0x34, 0x134, 5, 20,
+ TMPV770X_RESET_PIETHER_25M, },
+ { TMPV770X_CLK_PIETHER_50M, "piether_50m",
+ pietherplls_parent_data, ARRAY_SIZE(pietherplls_parent_data),
+ CLK_SET_RATE_PARENT, 0x34, 0x134, 6, 10,
+ TMPV770X_RESET_PIETHER_50M, },
+ { TMPV770X_CLK_PIETHER_125M, "piether_125m",
+ pietherplls_parent_data, ARRAY_SIZE(pietherplls_parent_data),
+ CLK_SET_RATE_PARENT, 0x34, 0x134, 7, 4,
+ TMPV770X_RESET_PIETHER_125M, },
+};
+
+static const struct visconti_clk_gate_table clk_gate_tables[] = {
+ { TMPV770X_CLK_HOX, "hox",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x4c, 0x14c, 0, 1,
+ TMPV770X_RESET_HOX, },
+ { TMPV770X_CLK_PCIE_MSTR, "pcie_mstr",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x38, 0x138, 0, 1,
+ TMPV770X_RESET_PCIE_MSTR, },
+ { TMPV770X_CLK_PCIE_AUX, "pcie_aux",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x38, 0x138, 1, 24,
+ TMPV770X_RESET_PCIE_AUX, },
+ { TMPV770X_CLK_PIINTC, "piintc",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ CLK_IGNORE_UNUSED, 0x8, 0x108, 0, 2, //FIX!!
+ TMPV770X_RESET_PIINTC,},
+ { TMPV770X_CLK_PIETHER_BUS, "piether_bus",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x34, 0x134, 0, 2,
+ TMPV770X_RESET_PIETHER_BUS, }, /* BUS_CLK */
+ { TMPV770X_CLK_PISPI0, "pispi0",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x28, 0x128, 0, 2,
+ TMPV770X_RESET_PISPI0, },
+ { TMPV770X_CLK_PISPI1, "pispi1",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x28, 0x128, 1, 2,
+ TMPV770X_RESET_PISPI1, },
+ { TMPV770X_CLK_PISPI2, "pispi2",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x28, 0x128, 2, 2,
+ TMPV770X_RESET_PISPI2, },
+ { TMPV770X_CLK_PISPI3, "pispi3",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x28, 0x128, 3, 2,
+ TMPV770X_RESET_PISPI3,},
+ { TMPV770X_CLK_PISPI4, "pispi4",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x28, 0x128, 4, 2,
+ TMPV770X_RESET_PISPI4, },
+ { TMPV770X_CLK_PISPI5, "pispi5",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x28, 0x128, 5, 2,
+ TMPV770X_RESET_PISPI5},
+ { TMPV770X_CLK_PISPI6, "pispi6",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x28, 0x128, 6, 2,
+ TMPV770X_RESET_PISPI6,},
+ { TMPV770X_CLK_PIUART0, "piuart0",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ //CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2c, 0x12c, 0, 4,
+ 0, 0x2c, 0x12c, 0, 4,
+ TMPV770X_RESET_PIUART0,},
+ { TMPV770X_CLK_PIUART1, "piuart1",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ //CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2c, 0x12c, 1, 4,
+ 0, 0x2c, 0x12c, 1, 4,
+ TMPV770X_RESET_PIUART1, },
+ { TMPV770X_CLK_PIUART2, "piuart2",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x2c, 0x12c, 2, 4,
+ TMPV770X_RESET_PIUART2, },
+ { TMPV770X_CLK_PIUART3, "piuart3",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x2c, 0x12c, 3, 4,
+ TMPV770X_RESET_PIUART3, },
+ { TMPV770X_CLK_PII2C0, "pii2c0",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x30, 0x130, 0, 4,
+ TMPV770X_RESET_PII2C0, },
+ { TMPV770X_CLK_PII2C1, "pii2c1",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x30, 0x130, 1, 4,
+ TMPV770X_RESET_PII2C1, },
+ { TMPV770X_CLK_PII2C2, "pii2c2",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x30, 0x130, 2, 4,
+ TMPV770X_RESET_PII2C2, },
+ { TMPV770X_CLK_PII2C3, "pii2c3",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x30, 0x130, 3, 4,
+ TMPV770X_RESET_PII2C3,},
+ { TMPV770X_CLK_PII2C4, "pii2c4",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x30, 0x130, 4, 4,
+ TMPV770X_RESET_PII2C4, },
+ { TMPV770X_CLK_PII2C5, "pii2c5",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x30, 0x130, 5, 4,
+ TMPV770X_RESET_PII2C5, },
+ { TMPV770X_CLK_PII2C6, "pii2c6",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x30, 0x130, 6, 4,
+ TMPV770X_RESET_PII2C6, },
+ { TMPV770X_CLK_PII2C7, "pii2c7",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x30, 0x130, 7, 4,
+ TMPV770X_RESET_PII2C7, },
+ { TMPV770X_CLK_PII2C8, "pii2c8",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x30, 0x130, 8, 4,
+ TMPV770X_RESET_PII2C8, },
+ /* PIPCMIF */
+ { TMPV770X_CLK_PIPCMIF, "pipcmif",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x64, 0x164, 0, 4,
+ TMPV770X_RESET_PIPCMIF, },
+ /* PISYSTEM */
+ { TMPV770X_CLK_WRCK, "wrck",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x68, 0x168, 9, 32,
+ -1, }, /* No reset */
+ { TMPV770X_CLK_PICKMON, "pickmon",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x10, 0x110, 8, 4,
+ TMPV770X_RESET_PICKMON, },
+ { TMPV770X_CLK_SBUSCLK, "sbusclk",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x14, 0x114, 0, 4,
+ TMPV770X_RESET_SBUSCLK, },
+};
+
+static const struct visconti_reset_data clk_reset_data[] = {
+ [TMPV770X_RESET_PIETHER_2P5M] = { 0x434, 0x534, 4, },
+ [TMPV770X_RESET_PIETHER_25M] = { 0x434, 0x534, 5, },
+ [TMPV770X_RESET_PIETHER_50M] = { 0x434, 0x534, 6, },
+ [TMPV770X_RESET_PIETHER_125M] = { 0x434, 0x534, 7, },
+ [TMPV770X_RESET_HOX] = { 0x44c, 0x54c, 0, },
+ [TMPV770X_RESET_PCIE_MSTR] = { 0x438, 0x538, 0, },
+ [TMPV770X_RESET_PCIE_AUX] = { 0x438, 0x538, 1, },
+ [TMPV770X_RESET_PIINTC] = { 0x408, 0x508, 0, },
+ [TMPV770X_RESET_PIETHER_BUS] = { 0x434, 0x534, 0, },
+ [TMPV770X_RESET_PISPI0] = { 0x428, 0x528, 0, },
+ [TMPV770X_RESET_PISPI1] = { 0x428, 0x528, 1, },
+ [TMPV770X_RESET_PISPI2] = { 0x428, 0x528, 2, },
+ [TMPV770X_RESET_PISPI3] = { 0x428, 0x528, 3, },
+ [TMPV770X_RESET_PISPI4] = { 0x428, 0x528, 4, },
+ [TMPV770X_RESET_PISPI5] = { 0x428, 0x528, 5, },
+ [TMPV770X_RESET_PISPI6] = { 0x428, 0x528, 6, },
+ [TMPV770X_RESET_PIUART0] = { 0x42c, 0x52c, 0, },
+ [TMPV770X_RESET_PIUART1] = { 0x42c, 0x52c, 1, },
+ [TMPV770X_RESET_PIUART2] = { 0x42c, 0x52c, 2, },
+ [TMPV770X_RESET_PIUART3] = { 0x42c, 0x52c, 3, },
+ [TMPV770X_RESET_PII2C0] = { 0x430, 0x530, 0, },
+ [TMPV770X_RESET_PII2C1] = { 0x430, 0x530, 1, },
+ [TMPV770X_RESET_PII2C2] = { 0x430, 0x530, 2, },
+ [TMPV770X_RESET_PII2C3] = { 0x430, 0x530, 3, },
+ [TMPV770X_RESET_PII2C4] = { 0x430, 0x530, 4, },
+ [TMPV770X_RESET_PII2C5] = { 0x430, 0x530, 5, },
+ [TMPV770X_RESET_PII2C6] = { 0x430, 0x530, 6, },
+ [TMPV770X_RESET_PII2C7] = { 0x430, 0x530, 7, },
+ [TMPV770X_RESET_PII2C8] = { 0x430, 0x530, 8, },
+ [TMPV770X_RESET_PIPCMIF] = { 0x464, 0x564, 0, },
+ [TMPV770X_RESET_PICKMON] = { 0x410, 0x510, 8, },
+ [TMPV770X_RESET_SBUSCLK] = { 0x414, 0x514, 0, },
+};
+
+static int visconti_clk_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct visconti_clk_provider *ctx;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ int ret, i;
+
+ regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ctx = visconti_init_clk(dev, regmap, TMPV770X_NR_CLK);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = visconti_register_reset_controller(dev, regmap, clk_reset_data,
+ TMPV770X_NR_RESET,
+ &visconti_reset_ops,
+ &tmpv770x_rst_lock);
+ if (ret) {
+ dev_err(dev, "Failed to register reset controller: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < (ARRAY_SIZE(fixed_clk_tables)); i++)
+ ctx->clk_data.hws[fixed_clk_tables[i].id] =
+ clk_hw_register_fixed_factor(NULL,
+ fixed_clk_tables[i].name,
+ fixed_clk_tables[i].parent,
+ fixed_clk_tables[i].flag,
+ fixed_clk_tables[i].mult,
+ fixed_clk_tables[i].div);
+
+ ret = visconti_clk_register_gates(ctx, clk_gate_tables,
+ ARRAY_SIZE(clk_gate_tables), clk_reset_data,
+ &tmpv770x_clk_lock);
+ if (ret) {
+ dev_err(dev, "Failed to register main clock gate: %d\n", ret);
+ return ret;
+ }
+
+ ret = visconti_clk_register_gates(ctx, pietherpll_clk_gate_tables,
+ ARRAY_SIZE(pietherpll_clk_gate_tables),
+ clk_reset_data, &tmpv770x_clk_lock);
+ if (ret) {
+ dev_err(dev, "Failed to register pietherpll clock gate: %d\n", ret);
+ return ret;
+ }
+
+ return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &ctx->clk_data);
+}
+
+static const struct of_device_id visconti_clk_ids[] = {
+ { .compatible = "toshiba,tmpv7708-pismu", },
+ { }
+};
+
+static struct platform_driver visconti_clk_driver = {
+ .probe = visconti_clk_probe,
+ .driver = {
+ .name = "visconti-clk",
+ .of_match_table = visconti_clk_ids,
+ },
+};
+
+builtin_platform_driver(visconti_clk_driver);
diff --git a/drivers/clk/visconti/clkc.c b/drivers/clk/visconti/clkc.c
new file mode 100644
index 000000000000..56a8a4ffebca
--- /dev/null
+++ b/drivers/clk/visconti/clkc.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Toshiba Visconti clock controller
+ *
+ * Copyright (c) 2021 TOSHIBA CORPORATION
+ * Copyright (c) 2021 Toshiba Electronic Devices & Storage Corporation
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "clkc.h"
+
+static inline struct visconti_clk_gate *to_visconti_clk_gate(struct clk_hw *hw)
+{
+ return container_of(hw, struct visconti_clk_gate, hw);
+}
+
+static int visconti_gate_clk_is_enabled(struct clk_hw *hw)
+{
+ struct visconti_clk_gate *gate = to_visconti_clk_gate(hw);
+ u32 clk = BIT(gate->ck_idx);
+ u32 val;
+
+ regmap_read(gate->regmap, gate->ckon_offset, &val);
+ return (val & clk) ? 1 : 0;
+}
+
+static void visconti_gate_clk_disable(struct clk_hw *hw)
+{
+ struct visconti_clk_gate *gate = to_visconti_clk_gate(hw);
+ u32 clk = BIT(gate->ck_idx);
+ unsigned long flags;
+
+ spin_lock_irqsave(gate->lock, flags);
+
+ if (!visconti_gate_clk_is_enabled(hw)) {
+ spin_unlock_irqrestore(gate->lock, flags);
+ return;
+ }
+
+ regmap_update_bits(gate->regmap, gate->ckoff_offset, clk, clk);
+ spin_unlock_irqrestore(gate->lock, flags);
+}
+
+static int visconti_gate_clk_enable(struct clk_hw *hw)
+{
+ struct visconti_clk_gate *gate = to_visconti_clk_gate(hw);
+ u32 clk = BIT(gate->ck_idx);
+ unsigned long flags;
+
+ spin_lock_irqsave(gate->lock, flags);
+ regmap_update_bits(gate->regmap, gate->ckon_offset, clk, clk);
+ spin_unlock_irqrestore(gate->lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops visconti_clk_gate_ops = {
+ .enable = visconti_gate_clk_enable,
+ .disable = visconti_gate_clk_disable,
+ .is_enabled = visconti_gate_clk_is_enabled,
+};
+
+static struct clk_hw *visconti_clk_register_gate(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ struct regmap *regmap,
+ const struct visconti_clk_gate_table *clks,
+ u32 rson_offset,
+ u32 rsoff_offset,
+ u8 rs_idx,
+ spinlock_t *lock)
+{
+ struct visconti_clk_gate *gate;
+ struct clk_parent_data *pdata;
+ struct clk_init_data init;
+ struct clk_hw *hw;
+ int ret;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->name = pdata->fw_name = parent_name;
+
+ gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &visconti_clk_gate_ops;
+ init.flags = clks->flags;
+ init.parent_data = pdata;
+ init.num_parents = 1;
+
+ gate->regmap = regmap;
+ gate->ckon_offset = clks->ckon_offset;
+ gate->ckoff_offset = clks->ckoff_offset;
+ gate->ck_idx = clks->ck_idx;
+ gate->rson_offset = rson_offset;
+ gate->rsoff_offset = rsoff_offset;
+ gate->rs_idx = rs_idx;
+ gate->lock = lock;
+ gate->hw.init = &init;
+
+ hw = &gate->hw;
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret)
+ hw = ERR_PTR(ret);
+
+ return hw;
+}
+
+int visconti_clk_register_gates(struct visconti_clk_provider *ctx,
+ const struct visconti_clk_gate_table *clks,
+ int num_gate,
+ const struct visconti_reset_data *reset,
+ spinlock_t *lock)
+{
+ struct device *dev = ctx->dev;
+ int i;
+
+ for (i = 0; i < num_gate; i++) {
+ const char *parent_div_name = clks[i].parent_data[0].name;
+ struct clk_parent_data *pdata;
+ u32 rson_offset, rsoff_offset;
+ struct clk_hw *gate_clk;
+ struct clk_hw *div_clk;
+ char *dev_name;
+ u8 rs_idx;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ dev_name = devm_kasprintf(dev, GFP_KERNEL, "%s_div", clks[i].name);
+ if (!dev_name)
+ return -ENOMEM;
+
+ if (clks[i].rs_id >= 0) {
+ rson_offset = reset[clks[i].rs_id].rson_offset;
+ rsoff_offset = reset[clks[i].rs_id].rsoff_offset;
+ rs_idx = reset[clks[i].rs_id].rs_idx;
+ } else {
+ rson_offset = rsoff_offset = rs_idx = -1;
+ }
+
+ div_clk = devm_clk_hw_register_fixed_factor(dev,
+ dev_name,
+ parent_div_name,
+ 0, 1,
+ clks[i].div);
+ if (IS_ERR(div_clk))
+ return PTR_ERR(div_clk);
+
+ gate_clk = visconti_clk_register_gate(dev,
+ clks[i].name,
+ dev_name,
+ ctx->regmap,
+ &clks[i],
+ rson_offset,
+ rsoff_offset,
+ rs_idx,
+ lock);
+ if (IS_ERR(gate_clk)) {
+ dev_err(dev, "%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ return PTR_ERR(gate_clk);
+ }
+
+ ctx->clk_data.hws[clks[i].id] = gate_clk;
+ }
+
+ return 0;
+}
+
+struct visconti_clk_provider *visconti_init_clk(struct device *dev,
+ struct regmap *regmap,
+ unsigned long nr_clks)
+{
+ struct visconti_clk_provider *ctx;
+ int i;
+
+ ctx = devm_kzalloc(dev, struct_size(ctx, clk_data.hws, nr_clks), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nr_clks; ++i)
+ ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
+ ctx->clk_data.num = nr_clks;
+
+ ctx->dev = dev;
+ ctx->regmap = regmap;
+
+ return ctx;
+}
diff --git a/drivers/clk/visconti/clkc.h b/drivers/clk/visconti/clkc.h
new file mode 100644
index 000000000000..09ed82ff64e4
--- /dev/null
+++ b/drivers/clk/visconti/clkc.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Toshiba Visconti clock controller
+ *
+ * Copyright (c) 2021 TOSHIBA CORPORATION
+ * Copyright (c) 2021 Toshiba Electronic Devices & Storage Corporation
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#ifndef _VISCONTI_CLKC_H_
+#define _VISCONTI_CLKC_H_
+
+#include <linux/mfd/syscon.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+#include "reset.h"
+
+struct visconti_clk_provider {
+ struct device *dev;
+ struct regmap *regmap;
+ struct clk_hw_onecell_data clk_data;
+};
+
+struct visconti_clk_gate_table {
+ unsigned int id;
+ const char *name;
+ const struct clk_parent_data *parent_data;
+ u8 num_parents;
+ u8 flags;
+ u32 ckon_offset;
+ u32 ckoff_offset;
+ u8 ck_idx;
+ unsigned int div;
+ u8 rs_id;
+};
+
+struct visconti_fixed_clk {
+ unsigned int id;
+ const char *name;
+ const char *parent;
+ unsigned long flag;
+ unsigned int mult;
+ unsigned int div;
+};
+
+struct visconti_clk_gate {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ u32 ckon_offset;
+ u32 ckoff_offset;
+ u8 ck_idx;
+ u8 flags;
+ u32 rson_offset;
+ u32 rsoff_offset;
+ u8 rs_idx;
+ spinlock_t *lock;
+};
+
+struct visconti_clk_provider *visconti_init_clk(struct device *dev,
+ struct regmap *regmap,
+ unsigned long nr_clks);
+int visconti_clk_register_gates(struct visconti_clk_provider *data,
+ const struct visconti_clk_gate_table *clks,
+ int num_gate,
+ const struct visconti_reset_data *reset,
+ spinlock_t *lock);
+#endif /* _VISCONTI_CLKC_H_ */
diff --git a/drivers/clk/visconti/pll-tmpv770x.c b/drivers/clk/visconti/pll-tmpv770x.c
new file mode 100644
index 000000000000..8360ccf88867
--- /dev/null
+++ b/drivers/clk/visconti/pll-tmpv770x.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Toshiba Visconti PLL controller
+ *
+ * Copyright (c) 2021 TOSHIBA CORPORATION
+ * Copyright (c) 2021 Toshiba Electronic Devices & Storage Corporation
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/toshiba,tmpv770x.h>
+
+#include "pll.h"
+
+static DEFINE_SPINLOCK(tmpv770x_pll_lock);
+
+static const struct visconti_pll_rate_table pipll0_rates[] __initconst = {
+ VISCONTI_PLL_RATE(840000000, 0x1, 0x0, 0x1, 0x54, 0x000000, 0x2, 0x1),
+ VISCONTI_PLL_RATE(780000000, 0x1, 0x0, 0x1, 0x4e, 0x000000, 0x2, 0x1),
+ VISCONTI_PLL_RATE(600000000, 0x1, 0x0, 0x1, 0x3c, 0x000000, 0x2, 0x1),
+ { /* sentinel */ },
+};
+
+static const struct visconti_pll_rate_table piddrcpll_rates[] __initconst = {
+ VISCONTI_PLL_RATE(780000000, 0x1, 0x0, 0x1, 0x4e, 0x000000, 0x2, 0x1),
+ VISCONTI_PLL_RATE(760000000, 0x1, 0x0, 0x1, 0x4c, 0x000000, 0x2, 0x1),
+ { /* sentinel */ },
+};
+
+static const struct visconti_pll_rate_table pivoifpll_rates[] __initconst = {
+ VISCONTI_PLL_RATE(165000000, 0x1, 0x0, 0x1, 0x42, 0x000000, 0x4, 0x2),
+ VISCONTI_PLL_RATE(148500000, 0x1, 0x1, 0x1, 0x3b, 0x666666, 0x4, 0x2),
+ VISCONTI_PLL_RATE(96000000, 0x1, 0x0, 0x1, 0x30, 0x000000, 0x5, 0x2),
+ VISCONTI_PLL_RATE(74250000, 0x1, 0x1, 0x1, 0x3b, 0x666666, 0x4, 0x4),
+ VISCONTI_PLL_RATE(54000000, 0x1, 0x0, 0x1, 0x36, 0x000000, 0x5, 0x4),
+ VISCONTI_PLL_RATE(48000000, 0x1, 0x0, 0x1, 0x30, 0x000000, 0x5, 0x4),
+ VISCONTI_PLL_RATE(35750000, 0x1, 0x1, 0x1, 0x32, 0x0ccccc, 0x7, 0x4),
+ { /* sentinel */ },
+};
+
+static const struct visconti_pll_rate_table piimgerpll_rates[] __initconst = {
+ VISCONTI_PLL_RATE(165000000, 0x1, 0x0, 0x1, 0x42, 0x000000, 0x4, 0x2),
+ VISCONTI_PLL_RATE(96000000, 0x1, 0x0, 0x1, 0x30, 0x000000, 0x5, 0x2),
+ VISCONTI_PLL_RATE(54000000, 0x1, 0x0, 0x1, 0x36, 0x000000, 0x5, 0x4),
+ VISCONTI_PLL_RATE(48000000, 0x1, 0x0, 0x1, 0x30, 0x000000, 0x5, 0x4),
+ { /* sentinel */ },
+};
+
+static const struct visconti_pll_info pll_info[] __initconst = {
+ { TMPV770X_PLL_PIPLL0, "pipll0", "osc2-clk", 0x0, pipll0_rates },
+ { TMPV770X_PLL_PIDDRCPLL, "piddrcpll", "osc2-clk", 0x500, piddrcpll_rates },
+ { TMPV770X_PLL_PIVOIFPLL, "pivoifpll", "osc2-clk", 0x600, pivoifpll_rates },
+ { TMPV770X_PLL_PIIMGERPLL, "piimgerpll", "osc2-clk", 0x700, piimgerpll_rates },
+};
+
+static void __init tmpv770x_setup_plls(struct device_node *np)
+{
+ struct visconti_pll_provider *ctx;
+ void __iomem *reg_base;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ return;
+
+ ctx = visconti_init_pll(np, reg_base, TMPV770X_NR_PLL);
+ if (IS_ERR(ctx)) {
+ iounmap(reg_base);
+ return;
+ }
+
+ ctx->clk_data.hws[TMPV770X_PLL_PIPLL1] =
+ clk_hw_register_fixed_rate(NULL, "pipll1", NULL, 0, 600000000);
+ ctx->clk_data.hws[TMPV770X_PLL_PIDNNPLL] =
+ clk_hw_register_fixed_rate(NULL, "pidnnpll", NULL, 0, 500000000);
+ ctx->clk_data.hws[TMPV770X_PLL_PIETHERPLL] =
+ clk_hw_register_fixed_rate(NULL, "pietherpll", NULL, 0, 500000000);
+
+ visconti_register_plls(ctx, pll_info, ARRAY_SIZE(pll_info), &tmpv770x_pll_lock);
+}
+
+CLK_OF_DECLARE(tmpv770x_plls, "toshiba,tmpv7708-pipllct", tmpv770x_setup_plls);
diff --git a/drivers/clk/visconti/pll.c b/drivers/clk/visconti/pll.c
new file mode 100644
index 000000000000..a484cb945d67
--- /dev/null
+++ b/drivers/clk/visconti/pll.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Toshiba Visconti PLL driver
+ *
+ * Copyright (c) 2021 TOSHIBA CORPORATION
+ * Copyright (c) 2021 Toshiba Electronic Devices & Storage Corporation
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include "pll.h"
+
+struct visconti_pll {
+ struct clk_hw hw;
+ void __iomem *pll_base;
+ spinlock_t *lock;
+ unsigned long flags;
+ const struct visconti_pll_rate_table *rate_table;
+ size_t rate_count;
+ struct visconti_pll_provider *ctx;
+};
+
+#define PLL_CONF_REG 0x0000
+#define PLL_CTRL_REG 0x0004
+#define PLL_FRACMODE_REG 0x0010
+#define PLL_INTIN_REG 0x0014
+#define PLL_FRACIN_REG 0x0018
+#define PLL_REFDIV_REG 0x001c
+#define PLL_POSTDIV_REG 0x0020
+
+#define PLL_CONFIG_SEL BIT(0)
+#define PLL_PLLEN BIT(4)
+#define PLL_BYPASS BIT(16)
+#define PLL_INTIN_MASK GENMASK(11, 0)
+#define PLL_FRACIN_MASK GENMASK(23, 0)
+#define PLL_REFDIV_MASK GENMASK(5, 0)
+#define PLL_POSTDIV_MASK GENMASK(2, 0)
+
+#define PLL0_FRACMODE_DACEN BIT(4)
+#define PLL0_FRACMODE_DSMEN BIT(0)
+
+#define PLL_CREATE_FRACMODE(table) (table->dacen << 4 | table->dsmen)
+#define PLL_CREATE_OSTDIV(table) (table->postdiv2 << 4 | table->postdiv1)
+
+static inline struct visconti_pll *to_visconti_pll(struct clk_hw *hw)
+{
+ return container_of(hw, struct visconti_pll, hw);
+}
+
+static void visconti_pll_get_params(struct visconti_pll *pll,
+ struct visconti_pll_rate_table *rate_table)
+{
+ u32 postdiv, val;
+
+ val = readl(pll->pll_base + PLL_FRACMODE_REG);
+
+ rate_table->dacen = FIELD_GET(PLL0_FRACMODE_DACEN, val);
+ rate_table->dsmen = FIELD_GET(PLL0_FRACMODE_DSMEN, val);
+
+ rate_table->fracin = readl(pll->pll_base + PLL_FRACIN_REG) & PLL_FRACIN_MASK;
+ rate_table->intin = readl(pll->pll_base + PLL_INTIN_REG) & PLL_INTIN_MASK;
+ rate_table->refdiv = readl(pll->pll_base + PLL_REFDIV_REG) & PLL_REFDIV_MASK;
+
+ postdiv = readl(pll->pll_base + PLL_POSTDIV_REG);
+ rate_table->postdiv1 = postdiv & PLL_POSTDIV_MASK;
+ rate_table->postdiv2 = (postdiv >> 4) & PLL_POSTDIV_MASK;
+}
+
+static const struct visconti_pll_rate_table *visconti_get_pll_settings(struct visconti_pll *pll,
+ unsigned long rate)
+{
+ const struct visconti_pll_rate_table *rate_table = pll->rate_table;
+ int i;
+
+ for (i = 0; i < pll->rate_count; i++)
+ if (rate == rate_table[i].rate)
+ return &rate_table[i];
+
+ return NULL;
+}
+
+static unsigned long visconti_get_pll_rate_from_data(struct visconti_pll *pll,
+ const struct visconti_pll_rate_table *rate)
+{
+ const struct visconti_pll_rate_table *rate_table = pll->rate_table;
+ int i;
+
+ for (i = 0; i < pll->rate_count; i++)
+ if (memcmp(&rate_table[i].dacen, &rate->dacen,
+ sizeof(*rate) - sizeof(unsigned long)) == 0)
+ return rate_table[i].rate;
+
+ /* set default */
+ return rate_table[0].rate;
+}
+
+static long visconti_pll_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *prate)
+{
+ struct visconti_pll *pll = to_visconti_pll(hw);
+ const struct visconti_pll_rate_table *rate_table = pll->rate_table;
+ int i;
+
+ /* Assumming rate_table is in descending order */
+ for (i = 0; i < pll->rate_count; i++)
+ if (rate >= rate_table[i].rate)
+ return rate_table[i].rate;
+
+ /* return minimum supported value */
+ return rate_table[i - 1].rate;
+}
+
+static unsigned long visconti_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct visconti_pll *pll = to_visconti_pll(hw);
+ struct visconti_pll_rate_table rate_table;
+
+ memset(&rate_table, 0, sizeof(rate_table));
+ visconti_pll_get_params(pll, &rate_table);
+
+ return visconti_get_pll_rate_from_data(pll, &rate_table);
+}
+
+static int visconti_pll_set_params(struct visconti_pll *pll,
+ const struct visconti_pll_rate_table *rate_table)
+{
+ writel(PLL_CREATE_FRACMODE(rate_table), pll->pll_base + PLL_FRACMODE_REG);
+ writel(PLL_CREATE_OSTDIV(rate_table), pll->pll_base + PLL_POSTDIV_REG);
+ writel(rate_table->intin, pll->pll_base + PLL_INTIN_REG);
+ writel(rate_table->fracin, pll->pll_base + PLL_FRACIN_REG);
+ writel(rate_table->refdiv, pll->pll_base + PLL_REFDIV_REG);
+
+ return 0;
+}
+
+static int visconti_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct visconti_pll *pll = to_visconti_pll(hw);
+ const struct visconti_pll_rate_table *rate_table;
+
+ rate_table = visconti_get_pll_settings(pll, rate);
+ if (!rate_table)
+ return -EINVAL;
+
+ return visconti_pll_set_params(pll, rate_table);
+}
+
+static int visconti_pll_is_enabled(struct clk_hw *hw)
+{
+ struct visconti_pll *pll = to_visconti_pll(hw);
+ u32 reg;
+
+ reg = readl(pll->pll_base + PLL_CTRL_REG);
+
+ return (reg & PLL_PLLEN);
+}
+
+static int visconti_pll_enable(struct clk_hw *hw)
+{
+ struct visconti_pll *pll = to_visconti_pll(hw);
+ const struct visconti_pll_rate_table *rate_table = pll->rate_table;
+ unsigned long flags;
+ u32 reg;
+
+ if (visconti_pll_is_enabled(hw))
+ return 0;
+
+ spin_lock_irqsave(pll->lock, flags);
+
+ writel(PLL_CONFIG_SEL, pll->pll_base + PLL_CONF_REG);
+
+ reg = readl(pll->pll_base + PLL_CTRL_REG);
+ reg |= PLL_BYPASS;
+ writel(reg, pll->pll_base + PLL_CTRL_REG);
+
+ visconti_pll_set_params(pll, &rate_table[0]);
+
+ reg = readl(pll->pll_base + PLL_CTRL_REG);
+ reg &= ~PLL_PLLEN;
+ writel(reg, pll->pll_base + PLL_CTRL_REG);
+
+ udelay(1);
+
+ reg = readl(pll->pll_base + PLL_CTRL_REG);
+ reg |= PLL_PLLEN;
+ writel(reg, pll->pll_base + PLL_CTRL_REG);
+
+ udelay(40);
+
+ reg = readl(pll->pll_base + PLL_CTRL_REG);
+ reg &= ~PLL_BYPASS;
+ writel(reg, pll->pll_base + PLL_CTRL_REG);
+
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ return 0;
+}
+
+static void visconti_pll_disable(struct clk_hw *hw)
+{
+ struct visconti_pll *pll = to_visconti_pll(hw);
+ unsigned long flags;
+ u32 reg;
+
+ if (!visconti_pll_is_enabled(hw))
+ return;
+
+ spin_lock_irqsave(pll->lock, flags);
+
+ writel(PLL_CONFIG_SEL, pll->pll_base + PLL_CONF_REG);
+
+ reg = readl(pll->pll_base + PLL_CTRL_REG);
+ reg |= PLL_BYPASS;
+ writel(reg, pll->pll_base + PLL_CTRL_REG);
+
+ reg = readl(pll->pll_base + PLL_CTRL_REG);
+ reg &= ~PLL_PLLEN;
+ writel(reg, pll->pll_base + PLL_CTRL_REG);
+
+ spin_unlock_irqrestore(pll->lock, flags);
+}
+
+static const struct clk_ops visconti_pll_ops = {
+ .enable = visconti_pll_enable,
+ .disable = visconti_pll_disable,
+ .is_enabled = visconti_pll_is_enabled,
+ .round_rate = visconti_pll_round_rate,
+ .recalc_rate = visconti_pll_recalc_rate,
+ .set_rate = visconti_pll_set_rate,
+};
+
+static struct clk_hw *visconti_register_pll(struct visconti_pll_provider *ctx,
+ const char *name,
+ const char *parent_name,
+ int offset,
+ const struct visconti_pll_rate_table *rate_table,
+ spinlock_t *lock)
+{
+ struct clk_init_data init;
+ struct visconti_pll *pll;
+ struct clk_hw *pll_hw_clk;
+ size_t len;
+ int ret;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.flags = CLK_IGNORE_UNUSED;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ for (len = 0; rate_table[len].rate != 0; )
+ len++;
+ pll->rate_count = len;
+ pll->rate_table = kmemdup(rate_table,
+ pll->rate_count * sizeof(struct visconti_pll_rate_table),
+ GFP_KERNEL);
+ WARN(!pll->rate_table, "%s: could not allocate rate table for %s\n", __func__, name);
+
+ init.ops = &visconti_pll_ops;
+ pll->hw.init = &init;
+ pll->pll_base = ctx->reg_base + offset;
+ pll->lock = lock;
+ pll->ctx = ctx;
+
+ pll_hw_clk = &pll->hw;
+ ret = clk_hw_register(NULL, &pll->hw);
+ if (ret) {
+ pr_err("failed to register pll clock %s : %d\n", name, ret);
+ kfree(pll);
+ pll_hw_clk = ERR_PTR(ret);
+ }
+
+ return pll_hw_clk;
+}
+
+static void visconti_pll_add_lookup(struct visconti_pll_provider *ctx,
+ struct clk_hw *hw_clk,
+ unsigned int id)
+{
+ if (id)
+ ctx->clk_data.hws[id] = hw_clk;
+}
+
+void __init visconti_register_plls(struct visconti_pll_provider *ctx,
+ const struct visconti_pll_info *list,
+ unsigned int nr_plls,
+ spinlock_t *lock)
+{
+ int idx;
+
+ for (idx = 0; idx < nr_plls; idx++, list++) {
+ struct clk_hw *clk;
+
+ clk = visconti_register_pll(ctx,
+ list->name,
+ list->parent,
+ list->base_reg,
+ list->rate_table,
+ lock);
+ if (IS_ERR(clk)) {
+ pr_err("failed to register clock %s\n", list->name);
+ continue;
+ }
+
+ visconti_pll_add_lookup(ctx, clk, list->id);
+ }
+}
+
+struct visconti_pll_provider * __init visconti_init_pll(struct device_node *np,
+ void __iomem *base,
+ unsigned long nr_plls)
+{
+ struct visconti_pll_provider *ctx;
+ int i;
+
+ ctx = kzalloc(struct_size(ctx, clk_data.hws, nr_plls), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nr_plls; ++i)
+ ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
+
+ ctx->node = np;
+ ctx->reg_base = base;
+ ctx->clk_data.num = nr_plls;
+
+ return ctx;
+}
diff --git a/drivers/clk/visconti/pll.h b/drivers/clk/visconti/pll.h
new file mode 100644
index 000000000000..16dae35ab370
--- /dev/null
+++ b/drivers/clk/visconti/pll.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 TOSHIBA CORPORATION
+ * Copyright (c) 2021 Toshiba Electronic Devices & Storage Corporation
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#ifndef _VISCONTI_PLL_H_
+#define _VISCONTI_PLL_H_
+
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+struct visconti_pll_provider {
+ void __iomem *reg_base;
+ struct regmap *regmap;
+ struct clk_hw_onecell_data clk_data;
+ struct device_node *node;
+};
+
+#define VISCONTI_PLL_RATE(_rate, _dacen, _dsmen, \
+ _refdiv, _intin, _fracin, _postdiv1, _postdiv2) \
+{ \
+ .rate = _rate, \
+ .dacen = _dacen, \
+ .dsmen = _dsmen, \
+ .refdiv = _refdiv, \
+ .intin = _intin, \
+ .fracin = _fracin, \
+ .postdiv1 = _postdiv1, \
+ .postdiv2 = _postdiv2 \
+}
+
+struct visconti_pll_rate_table {
+ unsigned long rate;
+ unsigned int dacen;
+ unsigned int dsmen;
+ unsigned int refdiv;
+ unsigned long intin;
+ unsigned long fracin;
+ unsigned int postdiv1;
+ unsigned int postdiv2;
+};
+
+struct visconti_pll_info {
+ unsigned int id;
+ const char *name;
+ const char *parent;
+ unsigned long base_reg;
+ const struct visconti_pll_rate_table *rate_table;
+};
+
+struct visconti_pll_provider * __init visconti_init_pll(struct device_node *np,
+ void __iomem *base,
+ unsigned long nr_plls);
+void visconti_register_plls(struct visconti_pll_provider *ctx,
+ const struct visconti_pll_info *list,
+ unsigned int nr_plls, spinlock_t *lock);
+
+#endif /* _VISCONTI_PLL_H_ */
diff --git a/drivers/clk/visconti/reset.c b/drivers/clk/visconti/reset.c
new file mode 100644
index 000000000000..e3c3d7804612
--- /dev/null
+++ b/drivers/clk/visconti/reset.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Toshiba Visconti ARM SoC reset controller
+ *
+ * Copyright (c) 2021 TOSHIBA CORPORATION
+ * Copyright (c) 2021 Toshiba Electronic Devices & Storage Corporation
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "reset.h"
+
+static inline struct visconti_reset *to_visconti_reset(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct visconti_reset, rcdev);
+}
+
+static int visconti_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct visconti_reset *reset = to_visconti_reset(rcdev);
+ const struct visconti_reset_data *data = &reset->resets[id];
+ u32 rst = BIT(data->rs_idx);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(reset->lock, flags);
+ ret = regmap_update_bits(reset->regmap, data->rson_offset, rst, rst);
+ spin_unlock_irqrestore(reset->lock, flags);
+
+ return ret;
+}
+
+static int visconti_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct visconti_reset *reset = to_visconti_reset(rcdev);
+ const struct visconti_reset_data *data = &reset->resets[id];
+ u32 rst = BIT(data->rs_idx);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(reset->lock, flags);
+ ret = regmap_update_bits(reset->regmap, data->rsoff_offset, rst, rst);
+ spin_unlock_irqrestore(reset->lock, flags);
+
+ return ret;
+}
+
+static int visconti_reset_reset(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ visconti_reset_assert(rcdev, id);
+ udelay(1);
+ visconti_reset_deassert(rcdev, id);
+
+ return 0;
+}
+
+static int visconti_reset_status(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct visconti_reset *reset = to_visconti_reset(rcdev);
+ const struct visconti_reset_data *data = &reset->resets[id];
+ unsigned long flags;
+ u32 reg;
+ int ret;
+
+ spin_lock_irqsave(reset->lock, flags);
+ ret = regmap_read(reset->regmap, data->rson_offset, &reg);
+ spin_unlock_irqrestore(reset->lock, flags);
+ if (ret)
+ return ret;
+
+ return !(reg & data->rs_idx);
+}
+
+const struct reset_control_ops visconti_reset_ops = {
+ .assert = visconti_reset_assert,
+ .deassert = visconti_reset_deassert,
+ .reset = visconti_reset_reset,
+ .status = visconti_reset_status,
+};
+
+int visconti_register_reset_controller(struct device *dev,
+ struct regmap *regmap,
+ const struct visconti_reset_data *resets,
+ unsigned int num_resets,
+ const struct reset_control_ops *reset_ops,
+ spinlock_t *lock)
+{
+ struct visconti_reset *reset;
+
+ reset = devm_kzalloc(dev, sizeof(*reset), GFP_KERNEL);
+ if (!reset)
+ return -ENOMEM;
+
+ reset->regmap = regmap;
+ reset->resets = resets;
+ reset->rcdev.ops = reset_ops;
+ reset->rcdev.nr_resets = num_resets;
+ reset->rcdev.of_node = dev->of_node;
+ reset->lock = lock;
+
+ return devm_reset_controller_register(dev, &reset->rcdev);
+}
diff --git a/drivers/clk/visconti/reset.h b/drivers/clk/visconti/reset.h
new file mode 100644
index 000000000000..229dffcbdc98
--- /dev/null
+++ b/drivers/clk/visconti/reset.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Toshiba ARM SoC reset controller driver
+ *
+ * Copyright (c) 2021 TOSHIBA CORPORATION
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#ifndef _VISCONTI_RESET_H_
+#define _VISCONTI_RESET_H_
+
+#include <linux/reset-controller.h>
+
+struct visconti_reset_data {
+ u32 rson_offset;
+ u32 rsoff_offset;
+ u8 rs_idx;
+};
+
+struct visconti_reset {
+ struct reset_controller_dev rcdev;
+ struct regmap *regmap;
+ const struct visconti_reset_data *resets;
+ spinlock_t *lock;
+};
+
+extern const struct reset_control_ops visconti_reset_ops;
+
+int visconti_register_reset_controller(struct device *dev,
+ struct regmap *regmap,
+ const struct visconti_reset_data *resets,
+ unsigned int num_resets,
+ const struct reset_control_ops *reset_ops,
+ spinlock_t *lock);
+#endif /* _VISCONTI_RESET_H_ */
diff --git a/drivers/clk/x86/clk-fch.c b/drivers/clk/x86/clk-fch.c
index 8f7c5142b0f0..fdc060e75839 100644
--- a/drivers/clk/x86/clk-fch.c
+++ b/drivers/clk/x86/clk-fch.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: MIT
/*
- * clock framework for AMD Stoney based clocks
+ * clock framework for AMD FCH controller block
*
* Copyright 2018 Advanced Micro Devices, Inc.
*/
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
+#include <linux/pci.h>
#include <linux/platform_data/clk-fch.h>
#include <linux/platform_device.h>
@@ -26,22 +27,37 @@
#define ST_CLK_GATE 3
#define ST_MAX_CLKS 4
-#define RV_CLK_48M 0
-#define RV_CLK_GATE 1
-#define RV_MAX_CLKS 2
+#define CLK_48M_FIXED 0
+#define CLK_GATE_FIXED 1
+#define CLK_MAX_FIXED 2
+
+/* List of supported CPU ids for clk mux with 25Mhz clk support */
+#define AMD_CPU_ID_ST 0x1576
static const char * const clk_oscout1_parents[] = { "clk48MHz", "clk25MHz" };
static struct clk_hw *hws[ST_MAX_CLKS];
+static const struct pci_device_id fch_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_ST) },
+ { }
+};
+
static int fch_clk_probe(struct platform_device *pdev)
{
struct fch_clk_data *fch_data;
+ struct pci_dev *rdev;
fch_data = dev_get_platdata(&pdev->dev);
if (!fch_data || !fch_data->base)
return -EINVAL;
- if (!fch_data->is_rv) {
+ rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+ if (!rdev) {
+ dev_err(&pdev->dev, "FCH device not found\n");
+ return -ENODEV;
+ }
+
+ if (pci_match_id(fch_pci_ids, rdev)) {
hws[ST_CLK_48M] = clk_hw_register_fixed_rate(NULL, "clk48MHz",
NULL, 0, 48000000);
hws[ST_CLK_25M] = clk_hw_register_fixed_rate(NULL, "clk25MHz",
@@ -59,34 +75,38 @@ static int fch_clk_probe(struct platform_device *pdev)
OSCCLKENB, CLK_GATE_SET_TO_DISABLE, NULL);
devm_clk_hw_register_clkdev(&pdev->dev, hws[ST_CLK_GATE],
- "oscout1", NULL);
+ fch_data->name, NULL);
} else {
- hws[RV_CLK_48M] = clk_hw_register_fixed_rate(NULL, "clk48MHz",
+ hws[CLK_48M_FIXED] = clk_hw_register_fixed_rate(NULL, "clk48MHz",
NULL, 0, 48000000);
- hws[RV_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1",
+ hws[CLK_GATE_FIXED] = clk_hw_register_gate(NULL, "oscout1",
"clk48MHz", 0, fch_data->base + MISCCLKCNTL1,
- OSCCLKENB, CLK_GATE_SET_TO_DISABLE, NULL);
+ OSCCLKENB, 0, NULL);
- devm_clk_hw_register_clkdev(&pdev->dev, hws[RV_CLK_GATE],
- "oscout1", NULL);
+ devm_clk_hw_register_clkdev(&pdev->dev, hws[CLK_GATE_FIXED],
+ fch_data->name, NULL);
}
+ pci_dev_put(rdev);
return 0;
}
static int fch_clk_remove(struct platform_device *pdev)
{
int i, clks;
- struct fch_clk_data *fch_data;
+ struct pci_dev *rdev;
- fch_data = dev_get_platdata(&pdev->dev);
+ rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+ if (!rdev)
+ return -ENODEV;
- clks = fch_data->is_rv ? RV_MAX_CLKS : ST_MAX_CLKS;
+ clks = pci_match_id(fch_pci_ids, rdev) ? CLK_MAX_FIXED : ST_MAX_CLKS;
for (i = 0; i < clks; i++)
clk_hw_unregister(hws[i]);
+ pci_dev_put(rdev);
return 0;
}
diff --git a/drivers/clk/zynq/pll.c b/drivers/clk/zynq/pll.c
index 54f4184de89a..e5f8fb704df2 100644
--- a/drivers/clk/zynq/pll.c
+++ b/drivers/clk/zynq/pll.c
@@ -12,7 +12,7 @@
#include <linux/io.h>
/**
- * struct zynq_pll
+ * struct zynq_pll - pll clock
* @hw: Handle between common and hardware-specific interfaces
* @pll_ctrl: PLL control register
* @pll_status: PLL status register
@@ -46,7 +46,7 @@ struct zynq_pll {
* @hw: Handle between common and hardware-specific interfaces
* @rate: Desired clock frequency
* @prate: Clock frequency of parent clock
- * Returns frequency closest to @rate the hardware can generate.
+ * Return: frequency closest to @rate the hardware can generate.
*/
static long zynq_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
@@ -66,7 +66,7 @@ static long zynq_pll_round_rate(struct clk_hw *hw, unsigned long rate,
* zynq_pll_recalc_rate() - Recalculate clock frequency
* @hw: Handle between common and hardware-specific interfaces
* @parent_rate: Clock frequency of parent clock
- * Returns current clock frequency.
+ * Return: current clock frequency.
*/
static unsigned long zynq_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
@@ -87,7 +87,7 @@ static unsigned long zynq_pll_recalc_rate(struct clk_hw *hw,
/**
* zynq_pll_is_enabled - Check if a clock is enabled
* @hw: Handle between common and hardware-specific interfaces
- * Returns 1 if the clock is enabled, 0 otherwise.
+ * Return: 1 if the clock is enabled, 0 otherwise.
*
* Not sure this is a good idea, but since disabled means bypassed for
* this clock implementation we say we are always enabled.
@@ -110,7 +110,7 @@ static int zynq_pll_is_enabled(struct clk_hw *hw)
/**
* zynq_pll_enable - Enable clock
* @hw: Handle between common and hardware-specific interfaces
- * Returns 0 on success
+ * Return: 0 on success
*/
static int zynq_pll_enable(struct clk_hw *hw)
{
@@ -179,7 +179,7 @@ static const struct clk_ops zynq_pll_ops = {
* @pll_status: Pointer to PLL status register
* @lock_index: Bit index to this PLL's lock status bit in @pll_status
* @lock: Register lock
- * Returns handle to the registered clock.
+ * Return: handle to the registered clock.
*/
struct clk *clk_register_zynq_pll(const char *name, const char *parent,
void __iomem *pll_ctrl, void __iomem *pll_status, u8 lock_index,
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index f65e31bab9ae..cfb8ea0df3b1 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -510,7 +510,8 @@ config SH_TIMER_MTU2
This hardware comes with 16-bit timer registers.
config RENESAS_OSTM
- bool "Renesas OSTM timer driver" if COMPILE_TEST
+ bool "Renesas OSTM timer driver"
+ depends on ARCH_RENESAS || COMPILE_TEST
select CLKSRC_MMIO
select TIMER_OF
help
@@ -671,6 +672,15 @@ config MILBEAUT_TIMER
help
Enables the support for Milbeaut timer driver.
+config MSC313E_TIMER
+ bool "MSC313E timer driver" if COMPILE_TEST
+ select TIMER_OF
+ select CLKSRC_MMIO
+ help
+ Enables support for the MStar MSC313E timer driver.
+ This provides access to multiple interrupt generating
+ programmable 32-bit free running incrementing counters.
+
config INGENIC_TIMER
bool "Clocksource/timer using the TCU in Ingenic JZ SoCs"
default MACH_INGENIC
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index c17ee32a7151..fa5f624eadb6 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -88,3 +88,4 @@ obj-$(CONFIG_CSKY_MP_TIMER) += timer-mp-csky.o
obj-$(CONFIG_GX6605S_TIMER) += timer-gx6605s.o
obj-$(CONFIG_HYPERV_TIMER) += hyperv_timer.o
obj-$(CONFIG_MICROCHIP_PIT64B) += timer-microchip-pit64b.o
+obj-$(CONFIG_MSC313E_TIMER) += timer-msc313e.o
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 5e3e96d3d1b9..6db3d5511b0f 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -467,7 +467,7 @@ static int exynos4_mct_starting_cpu(unsigned int cpu)
evt->tick_resume = set_state_shutdown;
evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERCPU;
- evt->rating = MCT_CLKEVENTS_RATING,
+ evt->rating = MCT_CLKEVENTS_RATING;
exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
@@ -504,11 +504,14 @@ static int exynos4_mct_dying_cpu(unsigned int cpu)
return 0;
}
-static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
+static int __init exynos4_timer_resources(struct device_node *np)
{
- int err, cpu;
struct clk *mct_clk, *tick_clk;
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: unable to ioremap mct address space\n", __func__);
+
tick_clk = of_clk_get_by_name(np, "fin_pll");
if (IS_ERR(tick_clk))
panic("%s: unable to determine tick clock rate\n", __func__);
@@ -519,9 +522,27 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem *
panic("%s: unable to retrieve mct clock instance\n", __func__);
clk_prepare_enable(mct_clk);
- reg_base = base;
- if (!reg_base)
- panic("%s: unable to ioremap mct address space\n", __func__);
+ return 0;
+}
+
+static int __init exynos4_timer_interrupts(struct device_node *np,
+ unsigned int int_type)
+{
+ int nr_irqs, i, err, cpu;
+
+ mct_int_type = int_type;
+
+ /* This driver uses only one global timer interrupt */
+ mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ);
+
+ /*
+ * Find out the number of local irqs specified. The local
+ * timer irqs are specified after the four global timer
+ * irqs are specified.
+ */
+ nr_irqs = of_irq_count(np);
+ for (i = MCT_L0_IRQ; i < nr_irqs; i++)
+ mct_irqs[i] = irq_of_parse_and_map(np, i);
if (mct_int_type == MCT_INT_PPI) {
@@ -581,24 +602,13 @@ out_irq:
static int __init mct_init_dt(struct device_node *np, unsigned int int_type)
{
- u32 nr_irqs, i;
int ret;
- mct_int_type = int_type;
-
- /* This driver uses only one global timer interrupt */
- mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ);
-
- /*
- * Find out the number of local irqs specified. The local
- * timer irqs are specified after the four global timer
- * irqs are specified.
- */
- nr_irqs = of_irq_count(np);
- for (i = MCT_L0_IRQ; i < nr_irqs; i++)
- mct_irqs[i] = irq_of_parse_and_map(np, i);
+ ret = exynos4_timer_resources(np);
+ if (ret)
+ return ret;
- ret = exynos4_timer_resources(np, of_iomap(np, 0));
+ ret = exynos4_timer_interrupts(np, int_type);
if (ret)
return ret;
diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c
index 3d06ba66008c..21d1392637b8 100644
--- a/drivers/clocksource/renesas-ostm.c
+++ b/drivers/clocksource/renesas-ostm.c
@@ -9,6 +9,8 @@
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/sched_clock.h>
#include <linux/slab.h>
@@ -159,6 +161,7 @@ static int __init ostm_init_clkevt(struct timer_of *to)
static int __init ostm_init(struct device_node *np)
{
+ struct reset_control *rstc;
struct timer_of *to;
int ret;
@@ -166,6 +169,14 @@ static int __init ostm_init(struct device_node *np)
if (!to)
return -ENOMEM;
+ rstc = of_reset_control_get_optional_exclusive(np, NULL);
+ if (IS_ERR(rstc)) {
+ ret = PTR_ERR(rstc);
+ goto err_free;
+ }
+
+ reset_control_deassert(rstc);
+
to->flags = TIMER_OF_BASE | TIMER_OF_CLOCK;
if (system_clock) {
/*
@@ -178,7 +189,7 @@ static int __init ostm_init(struct device_node *np)
ret = timer_of_init(np, to);
if (ret)
- goto err_free;
+ goto err_reset;
/*
* First probed device will be used as system clocksource. Any
@@ -203,9 +214,35 @@ static int __init ostm_init(struct device_node *np)
err_cleanup:
timer_of_cleanup(to);
+err_reset:
+ reset_control_assert(rstc);
+ reset_control_put(rstc);
err_free:
kfree(to);
return ret;
}
TIMER_OF_DECLARE(ostm, "renesas,ostm", ostm_init);
+
+#ifdef CONFIG_ARCH_R9A07G044
+static int __init ostm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ return ostm_init(dev->of_node);
+}
+
+static const struct of_device_id ostm_of_table[] = {
+ { .compatible = "renesas,ostm", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver ostm_device_driver = {
+ .driver = {
+ .name = "renesas_ostm",
+ .of_match_table = of_match_ptr(ostm_of_table),
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(ostm_device_driver, ostm_probe);
+#endif
diff --git a/drivers/clocksource/timer-imx-sysctr.c b/drivers/clocksource/timer-imx-sysctr.c
index 18b90fc56bfc..55a8e198d2a1 100644
--- a/drivers/clocksource/timer-imx-sysctr.c
+++ b/drivers/clocksource/timer-imx-sysctr.c
@@ -20,8 +20,8 @@
#define SYS_CTR_CLK_DIV 0x3
-static void __iomem *sys_ctr_base;
-static u32 cmpcr;
+static void __iomem *sys_ctr_base __ro_after_init;
+static u32 cmpcr __ro_after_init;
static void sysctr_timer_enable(bool enable)
{
@@ -119,7 +119,7 @@ static struct timer_of to_sysctr = {
static void __init sysctr_clockevent_init(void)
{
- to_sysctr.clkevt.cpumask = cpumask_of(0);
+ to_sysctr.clkevt.cpumask = cpu_possible_mask;
clockevents_config_and_register(&to_sysctr.clkevt,
timer_of_rate(&to_sysctr),
diff --git a/drivers/clocksource/timer-msc313e.c b/drivers/clocksource/timer-msc313e.c
new file mode 100644
index 000000000000..54c54ca7c786
--- /dev/null
+++ b/drivers/clocksource/timer-msc313e.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MStar timer driver
+ *
+ * Copyright (C) 2021 Daniel Palmer
+ * Copyright (C) 2021 Romain Perier
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/sched_clock.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#ifdef CONFIG_ARM
+#include <linux/delay.h>
+#endif
+
+#include "timer-of.h"
+
+#define TIMER_NAME "msc313e_timer"
+
+#define MSC313E_REG_CTRL 0x00
+#define MSC313E_REG_CTRL_TIMER_EN BIT(0)
+#define MSC313E_REG_CTRL_TIMER_TRIG BIT(1)
+#define MSC313E_REG_CTRL_TIMER_INT_EN BIT(8)
+#define MSC313E_REG_TIMER_MAX_LOW 0x08
+#define MSC313E_REG_TIMER_MAX_HIGH 0x0c
+#define MSC313E_REG_COUNTER_LOW 0x10
+#define MSC313E_REG_COUNTER_HIGH 0x14
+#define MSC313E_REG_TIMER_DIVIDE 0x18
+
+#define MSC313E_CLK_DIVIDER 9
+#define TIMER_SYNC_TICKS 3
+
+#ifdef CONFIG_ARM
+struct msc313e_delay {
+ void __iomem *base;
+ struct delay_timer delay;
+};
+static struct msc313e_delay msc313e_delay;
+#endif
+
+static void __iomem *msc313e_clksrc;
+
+static void msc313e_timer_stop(void __iomem *base)
+{
+ writew(0, base + MSC313E_REG_CTRL);
+}
+
+static void msc313e_timer_start(void __iomem *base, bool periodic)
+{
+ u16 reg;
+
+ reg = readw(base + MSC313E_REG_CTRL);
+ if (periodic)
+ reg |= MSC313E_REG_CTRL_TIMER_EN;
+ else
+ reg |= MSC313E_REG_CTRL_TIMER_TRIG;
+ writew(reg | MSC313E_REG_CTRL_TIMER_INT_EN, base + MSC313E_REG_CTRL);
+}
+
+static void msc313e_timer_setup(void __iomem *base, unsigned long delay)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ writew(delay >> 16, base + MSC313E_REG_TIMER_MAX_HIGH);
+ writew(delay & 0xffff, base + MSC313E_REG_TIMER_MAX_LOW);
+ local_irq_restore(flags);
+}
+
+static unsigned long msc313e_timer_current_value(void __iomem *base)
+{
+ unsigned long flags;
+ u16 l, h;
+
+ local_irq_save(flags);
+ l = readw(base + MSC313E_REG_COUNTER_LOW);
+ h = readw(base + MSC313E_REG_COUNTER_HIGH);
+ local_irq_restore(flags);
+
+ return (((u32)h) << 16 | l);
+}
+
+static int msc313e_timer_clkevt_shutdown(struct clock_event_device *evt)
+{
+ struct timer_of *timer = to_timer_of(evt);
+
+ msc313e_timer_stop(timer_of_base(timer));
+
+ return 0;
+}
+
+static int msc313e_timer_clkevt_set_oneshot(struct clock_event_device *evt)
+{
+ struct timer_of *timer = to_timer_of(evt);
+
+ msc313e_timer_stop(timer_of_base(timer));
+ msc313e_timer_start(timer_of_base(timer), false);
+
+ return 0;
+}
+
+static int msc313e_timer_clkevt_set_periodic(struct clock_event_device *evt)
+{
+ struct timer_of *timer = to_timer_of(evt);
+
+ msc313e_timer_stop(timer_of_base(timer));
+ msc313e_timer_setup(timer_of_base(timer), timer_of_period(timer));
+ msc313e_timer_start(timer_of_base(timer), true);
+
+ return 0;
+}
+
+static int msc313e_timer_clkevt_next_event(unsigned long evt, struct clock_event_device *clkevt)
+{
+ struct timer_of *timer = to_timer_of(clkevt);
+
+ msc313e_timer_stop(timer_of_base(timer));
+ msc313e_timer_setup(timer_of_base(timer), evt);
+ msc313e_timer_start(timer_of_base(timer), false);
+
+ return 0;
+}
+
+static irqreturn_t msc313e_timer_clkevt_irq(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static u64 msc313e_timer_clksrc_read(struct clocksource *cs)
+{
+ return msc313e_timer_current_value(msc313e_clksrc) & cs->mask;
+}
+
+#ifdef CONFIG_ARM
+static unsigned long msc313e_read_delay_timer_read(void)
+{
+ return msc313e_timer_current_value(msc313e_delay.base);
+}
+#endif
+
+static u64 msc313e_timer_sched_clock_read(void)
+{
+ return msc313e_timer_current_value(msc313e_clksrc);
+}
+
+static struct clock_event_device msc313e_clkevt = {
+ .name = TIMER_NAME,
+ .rating = 300,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_state_shutdown = msc313e_timer_clkevt_shutdown,
+ .set_state_periodic = msc313e_timer_clkevt_set_periodic,
+ .set_state_oneshot = msc313e_timer_clkevt_set_oneshot,
+ .tick_resume = msc313e_timer_clkevt_shutdown,
+ .set_next_event = msc313e_timer_clkevt_next_event,
+};
+
+static int __init msc313e_clkevt_init(struct device_node *np)
+{
+ int ret;
+ struct timer_of *to;
+
+ to = kzalloc(sizeof(*to), GFP_KERNEL);
+ if (!to)
+ return -ENOMEM;
+
+ to->flags = TIMER_OF_IRQ | TIMER_OF_CLOCK | TIMER_OF_BASE;
+ to->of_irq.handler = msc313e_timer_clkevt_irq;
+ ret = timer_of_init(np, to);
+ if (ret)
+ return ret;
+
+ if (of_device_is_compatible(np, "sstar,ssd20xd-timer")) {
+ to->of_clk.rate = clk_get_rate(to->of_clk.clk) / MSC313E_CLK_DIVIDER;
+ to->of_clk.period = DIV_ROUND_UP(to->of_clk.rate, HZ);
+ writew(MSC313E_CLK_DIVIDER - 1, timer_of_base(to) + MSC313E_REG_TIMER_DIVIDE);
+ }
+
+ msc313e_clkevt.cpumask = cpu_possible_mask;
+ msc313e_clkevt.irq = to->of_irq.irq;
+ to->clkevt = msc313e_clkevt;
+
+ clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
+ TIMER_SYNC_TICKS, 0xffffffff);
+ return 0;
+}
+
+static int __init msc313e_clksrc_init(struct device_node *np)
+{
+ struct timer_of to = { 0 };
+ int ret;
+ u16 reg;
+
+ to.flags = TIMER_OF_BASE | TIMER_OF_CLOCK;
+ ret = timer_of_init(np, &to);
+ if (ret)
+ return ret;
+
+ msc313e_clksrc = timer_of_base(&to);
+ reg = readw(msc313e_clksrc + MSC313E_REG_CTRL);
+ reg |= MSC313E_REG_CTRL_TIMER_EN;
+ writew(reg, msc313e_clksrc + MSC313E_REG_CTRL);
+
+#ifdef CONFIG_ARM
+ msc313e_delay.base = timer_of_base(&to);
+ msc313e_delay.delay.read_current_timer = msc313e_read_delay_timer_read;
+ msc313e_delay.delay.freq = timer_of_rate(&to);
+
+ register_current_timer_delay(&msc313e_delay.delay);
+#endif
+
+ sched_clock_register(msc313e_timer_sched_clock_read, 32, timer_of_rate(&to));
+ return clocksource_mmio_init(timer_of_base(&to), TIMER_NAME, timer_of_rate(&to), 300, 32,
+ msc313e_timer_clksrc_read);
+}
+
+static int __init msc313e_timer_init(struct device_node *np)
+{
+ int ret = 0;
+ static int num_called;
+
+ switch (num_called) {
+ case 0:
+ ret = msc313e_clksrc_init(np);
+ if (ret)
+ return ret;
+ break;
+
+ default:
+ ret = msc313e_clkevt_init(np);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ num_called++;
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(msc313, "mstar,msc313e-timer", msc313e_timer_init);
+TIMER_OF_DECLARE(ssd20xd, "sstar,ssd20xd-timer", msc313e_timer_init);
diff --git a/drivers/clocksource/timer-pistachio.c b/drivers/clocksource/timer-pistachio.c
index 6f37181a8c63..69c069e6f0a2 100644
--- a/drivers/clocksource/timer-pistachio.c
+++ b/drivers/clocksource/timer-pistachio.c
@@ -71,7 +71,8 @@ static u64 notrace
pistachio_clocksource_read_cycles(struct clocksource *cs)
{
struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
- u32 counter, overflow;
+ __maybe_unused u32 overflow;
+ u32 counter;
unsigned long flags;
/*
diff --git a/drivers/comedi/comedi.h b/drivers/comedi/comedi.h
deleted file mode 100644
index b5d00a006dbb..000000000000
--- a/drivers/comedi/comedi.h
+++ /dev/null
@@ -1,1528 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.0+ */
-/*
- * comedi.h
- * header file for COMEDI user API
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1998-2001 David A. Schleef <ds@schleef.org>
- */
-
-#ifndef _COMEDI_H
-#define _COMEDI_H
-
-#define COMEDI_MAJORVERSION 0
-#define COMEDI_MINORVERSION 7
-#define COMEDI_MICROVERSION 76
-#define VERSION "0.7.76"
-
-/* comedi's major device number */
-#define COMEDI_MAJOR 98
-
-/*
- * maximum number of minor devices. This can be increased, although
- * kernel structures are currently statically allocated, thus you
- * don't want this to be much more than you actually use.
- */
-#define COMEDI_NDEVICES 16
-
-/* number of config options in the config structure */
-#define COMEDI_NDEVCONFOPTS 32
-
-/*
- * NOTE: 'comedi_config --init-data' is deprecated
- *
- * The following indexes in the config options were used by
- * comedi_config to pass firmware blobs from user space to the
- * comedi drivers. The request_firmware() hotplug interface is
- * now used by all comedi drivers instead.
- */
-
-/* length of nth chunk of firmware data -*/
-#define COMEDI_DEVCONF_AUX_DATA3_LENGTH 25
-#define COMEDI_DEVCONF_AUX_DATA2_LENGTH 26
-#define COMEDI_DEVCONF_AUX_DATA1_LENGTH 27
-#define COMEDI_DEVCONF_AUX_DATA0_LENGTH 28
-/* most significant 32 bits of pointer address (if needed) */
-#define COMEDI_DEVCONF_AUX_DATA_HI 29
-/* least significant 32 bits of pointer address */
-#define COMEDI_DEVCONF_AUX_DATA_LO 30
-#define COMEDI_DEVCONF_AUX_DATA_LENGTH 31 /* total data length */
-
-/* max length of device and driver names */
-#define COMEDI_NAMELEN 20
-
-/* packs and unpacks a channel/range number */
-
-#define CR_PACK(chan, rng, aref) \
- ((((aref) & 0x3) << 24) | (((rng) & 0xff) << 16) | (chan))
-#define CR_PACK_FLAGS(chan, range, aref, flags) \
- (CR_PACK(chan, range, aref) | ((flags) & CR_FLAGS_MASK))
-
-#define CR_CHAN(a) ((a) & 0xffff)
-#define CR_RANGE(a) (((a) >> 16) & 0xff)
-#define CR_AREF(a) (((a) >> 24) & 0x03)
-
-#define CR_FLAGS_MASK 0xfc000000
-#define CR_ALT_FILTER 0x04000000
-#define CR_DITHER CR_ALT_FILTER
-#define CR_DEGLITCH CR_ALT_FILTER
-#define CR_ALT_SOURCE 0x08000000
-#define CR_EDGE 0x40000000
-#define CR_INVERT 0x80000000
-
-#define AREF_GROUND 0x00 /* analog ref = analog ground */
-#define AREF_COMMON 0x01 /* analog ref = analog common */
-#define AREF_DIFF 0x02 /* analog ref = differential */
-#define AREF_OTHER 0x03 /* analog ref = other (undefined) */
-
-/* counters -- these are arbitrary values */
-#define GPCT_RESET 0x0001
-#define GPCT_SET_SOURCE 0x0002
-#define GPCT_SET_GATE 0x0004
-#define GPCT_SET_DIRECTION 0x0008
-#define GPCT_SET_OPERATION 0x0010
-#define GPCT_ARM 0x0020
-#define GPCT_DISARM 0x0040
-#define GPCT_GET_INT_CLK_FRQ 0x0080
-
-#define GPCT_INT_CLOCK 0x0001
-#define GPCT_EXT_PIN 0x0002
-#define GPCT_NO_GATE 0x0004
-#define GPCT_UP 0x0008
-#define GPCT_DOWN 0x0010
-#define GPCT_HWUD 0x0020
-#define GPCT_SIMPLE_EVENT 0x0040
-#define GPCT_SINGLE_PERIOD 0x0080
-#define GPCT_SINGLE_PW 0x0100
-#define GPCT_CONT_PULSE_OUT 0x0200
-#define GPCT_SINGLE_PULSE_OUT 0x0400
-
-/* instructions */
-
-#define INSN_MASK_WRITE 0x8000000
-#define INSN_MASK_READ 0x4000000
-#define INSN_MASK_SPECIAL 0x2000000
-
-#define INSN_READ (0 | INSN_MASK_READ)
-#define INSN_WRITE (1 | INSN_MASK_WRITE)
-#define INSN_BITS (2 | INSN_MASK_READ | INSN_MASK_WRITE)
-#define INSN_CONFIG (3 | INSN_MASK_READ | INSN_MASK_WRITE)
-#define INSN_DEVICE_CONFIG (INSN_CONFIG | INSN_MASK_SPECIAL)
-#define INSN_GTOD (4 | INSN_MASK_READ | INSN_MASK_SPECIAL)
-#define INSN_WAIT (5 | INSN_MASK_WRITE | INSN_MASK_SPECIAL)
-#define INSN_INTTRIG (6 | INSN_MASK_WRITE | INSN_MASK_SPECIAL)
-
-/* command flags */
-/* These flags are used in comedi_cmd structures */
-
-#define CMDF_BOGUS 0x00000001 /* do the motions */
-
-/* try to use a real-time interrupt while performing command */
-#define CMDF_PRIORITY 0x00000008
-
-/* wake up on end-of-scan events */
-#define CMDF_WAKE_EOS 0x00000020
-
-#define CMDF_WRITE 0x00000040
-
-#define CMDF_RAWDATA 0x00000080
-
-/* timer rounding definitions */
-#define CMDF_ROUND_MASK 0x00030000
-#define CMDF_ROUND_NEAREST 0x00000000
-#define CMDF_ROUND_DOWN 0x00010000
-#define CMDF_ROUND_UP 0x00020000
-#define CMDF_ROUND_UP_NEXT 0x00030000
-
-#define COMEDI_EV_START 0x00040000
-#define COMEDI_EV_SCAN_BEGIN 0x00080000
-#define COMEDI_EV_CONVERT 0x00100000
-#define COMEDI_EV_SCAN_END 0x00200000
-#define COMEDI_EV_STOP 0x00400000
-
-/* compatibility definitions */
-#define TRIG_BOGUS CMDF_BOGUS
-#define TRIG_RT CMDF_PRIORITY
-#define TRIG_WAKE_EOS CMDF_WAKE_EOS
-#define TRIG_WRITE CMDF_WRITE
-#define TRIG_ROUND_MASK CMDF_ROUND_MASK
-#define TRIG_ROUND_NEAREST CMDF_ROUND_NEAREST
-#define TRIG_ROUND_DOWN CMDF_ROUND_DOWN
-#define TRIG_ROUND_UP CMDF_ROUND_UP
-#define TRIG_ROUND_UP_NEXT CMDF_ROUND_UP_NEXT
-
-/* trigger sources */
-
-#define TRIG_ANY 0xffffffff
-#define TRIG_INVALID 0x00000000
-
-#define TRIG_NONE 0x00000001 /* never trigger */
-#define TRIG_NOW 0x00000002 /* trigger now + N ns */
-#define TRIG_FOLLOW 0x00000004 /* trigger on next lower level trig */
-#define TRIG_TIME 0x00000008 /* trigger at time N ns */
-#define TRIG_TIMER 0x00000010 /* trigger at rate N ns */
-#define TRIG_COUNT 0x00000020 /* trigger when count reaches N */
-#define TRIG_EXT 0x00000040 /* trigger on external signal N */
-#define TRIG_INT 0x00000080 /* trigger on comedi-internal signal N */
-#define TRIG_OTHER 0x00000100 /* driver defined */
-
-/* subdevice flags */
-
-#define SDF_BUSY 0x0001 /* device is busy */
-#define SDF_BUSY_OWNER 0x0002 /* device is busy with your job */
-#define SDF_LOCKED 0x0004 /* subdevice is locked */
-#define SDF_LOCK_OWNER 0x0008 /* you own lock */
-#define SDF_MAXDATA 0x0010 /* maxdata depends on channel */
-#define SDF_FLAGS 0x0020 /* flags depend on channel */
-#define SDF_RANGETYPE 0x0040 /* range type depends on channel */
-#define SDF_PWM_COUNTER 0x0080 /* PWM can automatically switch off */
-#define SDF_PWM_HBRIDGE 0x0100 /* PWM is signed (H-bridge) */
-#define SDF_CMD 0x1000 /* can do commands (deprecated) */
-#define SDF_SOFT_CALIBRATED 0x2000 /* subdevice uses software calibration */
-#define SDF_CMD_WRITE 0x4000 /* can do output commands */
-#define SDF_CMD_READ 0x8000 /* can do input commands */
-
-/* subdevice can be read (e.g. analog input) */
-#define SDF_READABLE 0x00010000
-/* subdevice can be written (e.g. analog output) */
-#define SDF_WRITABLE 0x00020000
-#define SDF_WRITEABLE SDF_WRITABLE /* spelling error in API */
-/* subdevice does not have externally visible lines */
-#define SDF_INTERNAL 0x00040000
-#define SDF_GROUND 0x00100000 /* can do aref=ground */
-#define SDF_COMMON 0x00200000 /* can do aref=common */
-#define SDF_DIFF 0x00400000 /* can do aref=diff */
-#define SDF_OTHER 0x00800000 /* can do aref=other */
-#define SDF_DITHER 0x01000000 /* can do dithering */
-#define SDF_DEGLITCH 0x02000000 /* can do deglitching */
-#define SDF_MMAP 0x04000000 /* can do mmap() */
-#define SDF_RUNNING 0x08000000 /* subdevice is acquiring data */
-#define SDF_LSAMPL 0x10000000 /* subdevice uses 32-bit samples */
-#define SDF_PACKED 0x20000000 /* subdevice can do packed DIO */
-
-/* subdevice types */
-
-/**
- * enum comedi_subdevice_type - COMEDI subdevice types
- * @COMEDI_SUBD_UNUSED: Unused subdevice.
- * @COMEDI_SUBD_AI: Analog input.
- * @COMEDI_SUBD_AO: Analog output.
- * @COMEDI_SUBD_DI: Digital input.
- * @COMEDI_SUBD_DO: Digital output.
- * @COMEDI_SUBD_DIO: Digital input/output.
- * @COMEDI_SUBD_COUNTER: Counter.
- * @COMEDI_SUBD_TIMER: Timer.
- * @COMEDI_SUBD_MEMORY: Memory, EEPROM, DPRAM.
- * @COMEDI_SUBD_CALIB: Calibration DACs.
- * @COMEDI_SUBD_PROC: Processor, DSP.
- * @COMEDI_SUBD_SERIAL: Serial I/O.
- * @COMEDI_SUBD_PWM: Pulse-Width Modulation output.
- */
-enum comedi_subdevice_type {
- COMEDI_SUBD_UNUSED,
- COMEDI_SUBD_AI,
- COMEDI_SUBD_AO,
- COMEDI_SUBD_DI,
- COMEDI_SUBD_DO,
- COMEDI_SUBD_DIO,
- COMEDI_SUBD_COUNTER,
- COMEDI_SUBD_TIMER,
- COMEDI_SUBD_MEMORY,
- COMEDI_SUBD_CALIB,
- COMEDI_SUBD_PROC,
- COMEDI_SUBD_SERIAL,
- COMEDI_SUBD_PWM
-};
-
-/* configuration instructions */
-
-/**
- * enum comedi_io_direction - COMEDI I/O directions
- * @COMEDI_INPUT: Input.
- * @COMEDI_OUTPUT: Output.
- * @COMEDI_OPENDRAIN: Open-drain (or open-collector) output.
- *
- * These are used by the %INSN_CONFIG_DIO_QUERY configuration instruction to
- * report a direction. They may also be used in other places where a direction
- * needs to be specified.
- */
-enum comedi_io_direction {
- COMEDI_INPUT = 0,
- COMEDI_OUTPUT = 1,
- COMEDI_OPENDRAIN = 2
-};
-
-/**
- * enum configuration_ids - COMEDI configuration instruction codes
- * @INSN_CONFIG_DIO_INPUT: Configure digital I/O as input.
- * @INSN_CONFIG_DIO_OUTPUT: Configure digital I/O as output.
- * @INSN_CONFIG_DIO_OPENDRAIN: Configure digital I/O as open-drain (or open
- * collector) output.
- * @INSN_CONFIG_ANALOG_TRIG: Configure analog trigger.
- * @INSN_CONFIG_ALT_SOURCE: Configure alternate input source.
- * @INSN_CONFIG_DIGITAL_TRIG: Configure digital trigger.
- * @INSN_CONFIG_BLOCK_SIZE: Configure block size for DMA transfers.
- * @INSN_CONFIG_TIMER_1: Configure divisor for external clock.
- * @INSN_CONFIG_FILTER: Configure a filter.
- * @INSN_CONFIG_CHANGE_NOTIFY: Configure change notification for digital
- * inputs. (New drivers should use
- * %INSN_CONFIG_DIGITAL_TRIG instead.)
- * @INSN_CONFIG_SERIAL_CLOCK: Configure clock for serial I/O.
- * @INSN_CONFIG_BIDIRECTIONAL_DATA: Send and receive byte over serial I/O.
- * @INSN_CONFIG_DIO_QUERY: Query direction of digital I/O channel.
- * @INSN_CONFIG_PWM_OUTPUT: Configure pulse-width modulator output.
- * @INSN_CONFIG_GET_PWM_OUTPUT: Get pulse-width modulator output configuration.
- * @INSN_CONFIG_ARM: Arm a subdevice or channel.
- * @INSN_CONFIG_DISARM: Disarm a subdevice or channel.
- * @INSN_CONFIG_GET_COUNTER_STATUS: Get counter status.
- * @INSN_CONFIG_RESET: Reset a subdevice or channel.
- * @INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR: Configure counter/timer as
- * single pulse generator.
- * @INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR: Configure counter/timer as
- * pulse train generator.
- * @INSN_CONFIG_GPCT_QUADRATURE_ENCODER: Configure counter as a quadrature
- * encoder.
- * @INSN_CONFIG_SET_GATE_SRC: Set counter/timer gate source.
- * @INSN_CONFIG_GET_GATE_SRC: Get counter/timer gate source.
- * @INSN_CONFIG_SET_CLOCK_SRC: Set counter/timer master clock source.
- * @INSN_CONFIG_GET_CLOCK_SRC: Get counter/timer master clock source.
- * @INSN_CONFIG_SET_OTHER_SRC: Set counter/timer "other" source.
- * @INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE: Get size (in bytes) of subdevice's
- * on-board FIFOs used during streaming
- * input/output.
- * @INSN_CONFIG_SET_COUNTER_MODE: Set counter/timer mode.
- * @INSN_CONFIG_8254_SET_MODE: (Deprecated) Same as
- * %INSN_CONFIG_SET_COUNTER_MODE.
- * @INSN_CONFIG_8254_READ_STATUS: Read status of 8254 counter channel.
- * @INSN_CONFIG_SET_ROUTING: Set routing for a channel.
- * @INSN_CONFIG_GET_ROUTING: Get routing for a channel.
- * @INSN_CONFIG_PWM_SET_PERIOD: Set PWM period in nanoseconds.
- * @INSN_CONFIG_PWM_GET_PERIOD: Get PWM period in nanoseconds.
- * @INSN_CONFIG_GET_PWM_STATUS: Get PWM status.
- * @INSN_CONFIG_PWM_SET_H_BRIDGE: Set PWM H bridge duty cycle and polarity for
- * a relay simultaneously.
- * @INSN_CONFIG_PWM_GET_H_BRIDGE: Get PWM H bridge duty cycle and polarity.
- * @INSN_CONFIG_GET_CMD_TIMING_CONSTRAINTS: Get the hardware timing restraints,
- * regardless of trigger sources.
- */
-enum configuration_ids {
- INSN_CONFIG_DIO_INPUT = COMEDI_INPUT,
- INSN_CONFIG_DIO_OUTPUT = COMEDI_OUTPUT,
- INSN_CONFIG_DIO_OPENDRAIN = COMEDI_OPENDRAIN,
- INSN_CONFIG_ANALOG_TRIG = 16,
-/* INSN_CONFIG_WAVEFORM = 17, */
-/* INSN_CONFIG_TRIG = 18, */
-/* INSN_CONFIG_COUNTER = 19, */
- INSN_CONFIG_ALT_SOURCE = 20,
- INSN_CONFIG_DIGITAL_TRIG = 21,
- INSN_CONFIG_BLOCK_SIZE = 22,
- INSN_CONFIG_TIMER_1 = 23,
- INSN_CONFIG_FILTER = 24,
- INSN_CONFIG_CHANGE_NOTIFY = 25,
-
- INSN_CONFIG_SERIAL_CLOCK = 26, /*ALPHA*/
- INSN_CONFIG_BIDIRECTIONAL_DATA = 27,
- INSN_CONFIG_DIO_QUERY = 28,
- INSN_CONFIG_PWM_OUTPUT = 29,
- INSN_CONFIG_GET_PWM_OUTPUT = 30,
- INSN_CONFIG_ARM = 31,
- INSN_CONFIG_DISARM = 32,
- INSN_CONFIG_GET_COUNTER_STATUS = 33,
- INSN_CONFIG_RESET = 34,
- INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR = 1001,
- INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR = 1002,
- INSN_CONFIG_GPCT_QUADRATURE_ENCODER = 1003,
- INSN_CONFIG_SET_GATE_SRC = 2001,
- INSN_CONFIG_GET_GATE_SRC = 2002,
- INSN_CONFIG_SET_CLOCK_SRC = 2003,
- INSN_CONFIG_GET_CLOCK_SRC = 2004,
- INSN_CONFIG_SET_OTHER_SRC = 2005,
- INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE = 2006,
- INSN_CONFIG_SET_COUNTER_MODE = 4097,
- INSN_CONFIG_8254_SET_MODE = INSN_CONFIG_SET_COUNTER_MODE,
- INSN_CONFIG_8254_READ_STATUS = 4098,
- INSN_CONFIG_SET_ROUTING = 4099,
- INSN_CONFIG_GET_ROUTING = 4109,
- INSN_CONFIG_PWM_SET_PERIOD = 5000,
- INSN_CONFIG_PWM_GET_PERIOD = 5001,
- INSN_CONFIG_GET_PWM_STATUS = 5002,
- INSN_CONFIG_PWM_SET_H_BRIDGE = 5003,
- INSN_CONFIG_PWM_GET_H_BRIDGE = 5004,
- INSN_CONFIG_GET_CMD_TIMING_CONSTRAINTS = 5005,
-};
-
-/**
- * enum device_configuration_ids - COMEDI configuration instruction codes global
- * to an entire device.
- * @INSN_DEVICE_CONFIG_TEST_ROUTE: Validate the possibility of a
- * globally-named route
- * @INSN_DEVICE_CONFIG_CONNECT_ROUTE: Connect a globally-named route
- * @INSN_DEVICE_CONFIG_DISCONNECT_ROUTE:Disconnect a globally-named route
- * @INSN_DEVICE_CONFIG_GET_ROUTES: Get a list of all globally-named routes
- * that are valid for a particular device.
- */
-enum device_config_route_ids {
- INSN_DEVICE_CONFIG_TEST_ROUTE = 0,
- INSN_DEVICE_CONFIG_CONNECT_ROUTE = 1,
- INSN_DEVICE_CONFIG_DISCONNECT_ROUTE = 2,
- INSN_DEVICE_CONFIG_GET_ROUTES = 3,
-};
-
-/**
- * enum comedi_digital_trig_op - operations for configuring a digital trigger
- * @COMEDI_DIGITAL_TRIG_DISABLE: Return digital trigger to its default,
- * inactive, unconfigured state.
- * @COMEDI_DIGITAL_TRIG_ENABLE_EDGES: Set rising and/or falling edge inputs
- * that each can fire the trigger.
- * @COMEDI_DIGITAL_TRIG_ENABLE_LEVELS: Set a combination of high and/or low
- * level inputs that can fire the trigger.
- *
- * These are used with the %INSN_CONFIG_DIGITAL_TRIG configuration instruction.
- * The data for the configuration instruction is as follows...
- *
- * data[%0] = %INSN_CONFIG_DIGITAL_TRIG
- *
- * data[%1] = trigger ID
- *
- * data[%2] = configuration operation
- *
- * data[%3] = configuration parameter 1
- *
- * data[%4] = configuration parameter 2
- *
- * data[%5] = configuration parameter 3
- *
- * The trigger ID (data[%1]) is used to differentiate multiple digital triggers
- * belonging to the same subdevice. The configuration operation (data[%2]) is
- * one of the enum comedi_digital_trig_op values. The configuration
- * parameters (data[%3], data[%4], and data[%5]) depend on the operation; they
- * are not used with %COMEDI_DIGITAL_TRIG_DISABLE.
- *
- * For %COMEDI_DIGITAL_TRIG_ENABLE_EDGES and %COMEDI_DIGITAL_TRIG_ENABLE_LEVELS,
- * configuration parameter 1 (data[%3]) contains a "left-shift" value that
- * specifies the input corresponding to bit 0 of configuration parameters 2
- * and 3. This is useful if the trigger has more than 32 inputs.
- *
- * For %COMEDI_DIGITAL_TRIG_ENABLE_EDGES, configuration parameter 2 (data[%4])
- * specifies which of up to 32 inputs have rising-edge sensitivity, and
- * configuration parameter 3 (data[%5]) specifies which of up to 32 inputs
- * have falling-edge sensitivity that can fire the trigger.
- *
- * For %COMEDI_DIGITAL_TRIG_ENABLE_LEVELS, configuration parameter 2 (data[%4])
- * specifies which of up to 32 inputs must be at a high level, and
- * configuration parameter 3 (data[%5]) specifies which of up to 32 inputs
- * must be at a low level for the trigger to fire.
- *
- * Some sequences of %INSN_CONFIG_DIGITAL_TRIG instructions may have a (partly)
- * accumulative effect, depending on the low-level driver. This is useful
- * when setting up a trigger that has more than 32 inputs, or has a combination
- * of edge- and level-triggered inputs.
- */
-enum comedi_digital_trig_op {
- COMEDI_DIGITAL_TRIG_DISABLE = 0,
- COMEDI_DIGITAL_TRIG_ENABLE_EDGES = 1,
- COMEDI_DIGITAL_TRIG_ENABLE_LEVELS = 2
-};
-
-/**
- * enum comedi_support_level - support level for a COMEDI feature
- * @COMEDI_UNKNOWN_SUPPORT: Unspecified support for feature.
- * @COMEDI_SUPPORTED: Feature is supported.
- * @COMEDI_UNSUPPORTED: Feature is unsupported.
- */
-enum comedi_support_level {
- COMEDI_UNKNOWN_SUPPORT = 0,
- COMEDI_SUPPORTED,
- COMEDI_UNSUPPORTED
-};
-
-/**
- * enum comedi_counter_status_flags - counter status bits
- * @COMEDI_COUNTER_ARMED: Counter is armed.
- * @COMEDI_COUNTER_COUNTING: Counter is counting.
- * @COMEDI_COUNTER_TERMINAL_COUNT: Counter reached terminal count.
- *
- * These bitwise values are used by the %INSN_CONFIG_GET_COUNTER_STATUS
- * configuration instruction to report the status of a counter.
- */
-enum comedi_counter_status_flags {
- COMEDI_COUNTER_ARMED = 0x1,
- COMEDI_COUNTER_COUNTING = 0x2,
- COMEDI_COUNTER_TERMINAL_COUNT = 0x4,
-};
-
-/* ioctls */
-
-#define CIO 'd'
-#define COMEDI_DEVCONFIG _IOW(CIO, 0, struct comedi_devconfig)
-#define COMEDI_DEVINFO _IOR(CIO, 1, struct comedi_devinfo)
-#define COMEDI_SUBDINFO _IOR(CIO, 2, struct comedi_subdinfo)
-#define COMEDI_CHANINFO _IOR(CIO, 3, struct comedi_chaninfo)
-/* _IOWR(CIO, 4, ...) is reserved */
-#define COMEDI_LOCK _IO(CIO, 5)
-#define COMEDI_UNLOCK _IO(CIO, 6)
-#define COMEDI_CANCEL _IO(CIO, 7)
-#define COMEDI_RANGEINFO _IOR(CIO, 8, struct comedi_rangeinfo)
-#define COMEDI_CMD _IOR(CIO, 9, struct comedi_cmd)
-#define COMEDI_CMDTEST _IOR(CIO, 10, struct comedi_cmd)
-#define COMEDI_INSNLIST _IOR(CIO, 11, struct comedi_insnlist)
-#define COMEDI_INSN _IOR(CIO, 12, struct comedi_insn)
-#define COMEDI_BUFCONFIG _IOR(CIO, 13, struct comedi_bufconfig)
-#define COMEDI_BUFINFO _IOWR(CIO, 14, struct comedi_bufinfo)
-#define COMEDI_POLL _IO(CIO, 15)
-#define COMEDI_SETRSUBD _IO(CIO, 16)
-#define COMEDI_SETWSUBD _IO(CIO, 17)
-
-/* structures */
-
-/**
- * struct comedi_insn - COMEDI instruction
- * @insn: COMEDI instruction type (%INSN_xxx).
- * @n: Length of @data[].
- * @data: Pointer to data array operated on by the instruction.
- * @subdev: Subdevice index.
- * @chanspec: A packed "chanspec" value consisting of channel number,
- * analog range index, analog reference type, and flags.
- * @unused: Reserved for future use.
- *
- * This is used with the %COMEDI_INSN ioctl, and indirectly with the
- * %COMEDI_INSNLIST ioctl.
- */
-struct comedi_insn {
- unsigned int insn;
- unsigned int n;
- unsigned int __user *data;
- unsigned int subdev;
- unsigned int chanspec;
- unsigned int unused[3];
-};
-
-/**
- * struct comedi_insnlist - list of COMEDI instructions
- * @n_insns: Number of COMEDI instructions.
- * @insns: Pointer to array COMEDI instructions.
- *
- * This is used with the %COMEDI_INSNLIST ioctl.
- */
-struct comedi_insnlist {
- unsigned int n_insns;
- struct comedi_insn __user *insns;
-};
-
-/**
- * struct comedi_cmd - COMEDI asynchronous acquisition command details
- * @subdev: Subdevice index.
- * @flags: Command flags (%CMDF_xxx).
- * @start_src: "Start acquisition" trigger source (%TRIG_xxx).
- * @start_arg: "Start acquisition" trigger argument.
- * @scan_begin_src: "Scan begin" trigger source.
- * @scan_begin_arg: "Scan begin" trigger argument.
- * @convert_src: "Convert" trigger source.
- * @convert_arg: "Convert" trigger argument.
- * @scan_end_src: "Scan end" trigger source.
- * @scan_end_arg: "Scan end" trigger argument.
- * @stop_src: "Stop acquisition" trigger source.
- * @stop_arg: "Stop acquisition" trigger argument.
- * @chanlist: Pointer to array of "chanspec" values, containing a
- * sequence of channel numbers packed with analog range
- * index, etc.
- * @chanlist_len: Number of channels in sequence.
- * @data: Pointer to miscellaneous set-up data (not used).
- * @data_len: Length of miscellaneous set-up data.
- *
- * This is used with the %COMEDI_CMD or %COMEDI_CMDTEST ioctl to set-up
- * or validate an asynchronous acquisition command. The ioctl may modify
- * the &struct comedi_cmd and copy it back to the caller.
- *
- * Optional command @flags values that can be ORed together...
- *
- * %CMDF_BOGUS - makes %COMEDI_CMD ioctl return error %EAGAIN instead of
- * starting the command.
- *
- * %CMDF_PRIORITY - requests "hard real-time" processing (which is not
- * supported in this version of COMEDI).
- *
- * %CMDF_WAKE_EOS - requests the command makes data available for reading
- * after every "scan" period.
- *
- * %CMDF_WRITE - marks the command as being in the "write" (to device)
- * direction. This does not need to be specified by the caller unless the
- * subdevice supports commands in either direction.
- *
- * %CMDF_RAWDATA - prevents the command from "munging" the data between the
- * COMEDI sample format and the raw hardware sample format.
- *
- * %CMDF_ROUND_NEAREST - requests timing periods to be rounded to nearest
- * supported values.
- *
- * %CMDF_ROUND_DOWN - requests timing periods to be rounded down to supported
- * values (frequencies rounded up).
- *
- * %CMDF_ROUND_UP - requests timing periods to be rounded up to supported
- * values (frequencies rounded down).
- *
- * Trigger source values for @start_src, @scan_begin_src, @convert_src,
- * @scan_end_src, and @stop_src...
- *
- * %TRIG_ANY - "all ones" value used to test which trigger sources are
- * supported.
- *
- * %TRIG_INVALID - "all zeroes" value used to indicate that all requested
- * trigger sources are invalid.
- *
- * %TRIG_NONE - never trigger (often used as a @stop_src value).
- *
- * %TRIG_NOW - trigger after '_arg' nanoseconds.
- *
- * %TRIG_FOLLOW - trigger follows another event.
- *
- * %TRIG_TIMER - trigger every '_arg' nanoseconds.
- *
- * %TRIG_COUNT - trigger when count '_arg' is reached.
- *
- * %TRIG_EXT - trigger on external signal specified by '_arg'.
- *
- * %TRIG_INT - trigger on internal, software trigger specified by '_arg'.
- *
- * %TRIG_OTHER - trigger on other, driver-defined signal specified by '_arg'.
- */
-struct comedi_cmd {
- unsigned int subdev;
- unsigned int flags;
-
- unsigned int start_src;
- unsigned int start_arg;
-
- unsigned int scan_begin_src;
- unsigned int scan_begin_arg;
-
- unsigned int convert_src;
- unsigned int convert_arg;
-
- unsigned int scan_end_src;
- unsigned int scan_end_arg;
-
- unsigned int stop_src;
- unsigned int stop_arg;
-
- unsigned int *chanlist;
- unsigned int chanlist_len;
-
- short __user *data;
- unsigned int data_len;
-};
-
-/**
- * struct comedi_chaninfo - used to retrieve per-channel information
- * @subdev: Subdevice index.
- * @maxdata_list: Optional pointer to per-channel maximum data values.
- * @flaglist: Optional pointer to per-channel flags.
- * @rangelist: Optional pointer to per-channel range types.
- * @unused: Reserved for future use.
- *
- * This is used with the %COMEDI_CHANINFO ioctl to get per-channel information
- * for the subdevice. Use of this requires knowledge of the number of channels
- * and subdevice flags obtained using the %COMEDI_SUBDINFO ioctl.
- *
- * The @maxdata_list member must be %NULL unless the %SDF_MAXDATA subdevice
- * flag is set. The @flaglist member must be %NULL unless the %SDF_FLAGS
- * subdevice flag is set. The @rangelist member must be %NULL unless the
- * %SDF_RANGETYPE subdevice flag is set. Otherwise, the arrays they point to
- * must be at least as long as the number of channels.
- */
-struct comedi_chaninfo {
- unsigned int subdev;
- unsigned int __user *maxdata_list;
- unsigned int __user *flaglist;
- unsigned int __user *rangelist;
- unsigned int unused[4];
-};
-
-/**
- * struct comedi_rangeinfo - used to retrieve the range table for a channel
- * @range_type: Encodes subdevice index (bits 27:24), channel index
- * (bits 23:16) and range table length (bits 15:0).
- * @range_ptr: Pointer to array of @struct comedi_krange to be filled
- * in with the range table for the channel or subdevice.
- *
- * This is used with the %COMEDI_RANGEINFO ioctl to retrieve the range table
- * for a specific channel (if the subdevice has the %SDF_RANGETYPE flag set to
- * indicate that the range table depends on the channel), or for the subdevice
- * as a whole (if the %SDF_RANGETYPE flag is clear, indicating the range table
- * is shared by all channels).
- *
- * The @range_type value is an input to the ioctl and comes from a previous
- * use of the %COMEDI_SUBDINFO ioctl (if the %SDF_RANGETYPE flag is clear),
- * or the %COMEDI_CHANINFO ioctl (if the %SDF_RANGETYPE flag is set).
- */
-struct comedi_rangeinfo {
- unsigned int range_type;
- void __user *range_ptr;
-};
-
-/**
- * struct comedi_krange - describes a range in a range table
- * @min: Minimum value in millionths (1e-6) of a unit.
- * @max: Maximum value in millionths (1e-6) of a unit.
- * @flags: Indicates the units (in bits 7:0) OR'ed with optional flags.
- *
- * A range table is associated with a single channel, or with all channels in a
- * subdevice, and a list of one or more ranges. A %struct comedi_krange
- * describes the physical range of units for one of those ranges. Sample
- * values in COMEDI are unsigned from %0 up to some 'maxdata' value. The
- * mapping from sample values to physical units is assumed to be nomimally
- * linear (for the purpose of describing the range), with sample value %0
- * mapping to @min, and the 'maxdata' sample value mapping to @max.
- *
- * The currently defined units are %UNIT_volt (%0), %UNIT_mA (%1), and
- * %UNIT_none (%2). The @min and @max values are the physical range multiplied
- * by 1e6, so a @max value of %1000000 (with %UNIT_volt) represents a maximal
- * value of 1 volt.
- *
- * The only defined flag value is %RF_EXTERNAL (%0x100), indicating that the
- * range needs to be multiplied by an external reference.
- */
-struct comedi_krange {
- int min;
- int max;
- unsigned int flags;
-};
-
-/**
- * struct comedi_subdinfo - used to retrieve information about a subdevice
- * @type: Type of subdevice from &enum comedi_subdevice_type.
- * @n_chan: Number of channels the subdevice supports.
- * @subd_flags: A mixture of static and dynamic flags describing
- * aspects of the subdevice and its current state.
- * @timer_type: Timer type. Always set to %5 ("nanosecond timer").
- * @len_chanlist: Maximum length of a channel list if the subdevice
- * supports asynchronous acquisition commands.
- * @maxdata: Maximum sample value for all channels if the
- * %SDF_MAXDATA subdevice flag is clear.
- * @flags: Channel flags for all channels if the %SDF_FLAGS
- * subdevice flag is clear.
- * @range_type: The range type for all channels if the %SDF_RANGETYPE
- * subdevice flag is clear. Encodes the subdevice index
- * (bits 27:24), a dummy channel index %0 (bits 23:16),
- * and the range table length (bits 15:0).
- * @settling_time_0: Not used.
- * @insn_bits_support: Set to %COMEDI_SUPPORTED if the subdevice supports the
- * %INSN_BITS instruction, or to %COMEDI_UNSUPPORTED if it
- * does not.
- * @unused: Reserved for future use.
- *
- * This is used with the %COMEDI_SUBDINFO ioctl which copies an array of
- * &struct comedi_subdinfo back to user space, with one element per subdevice.
- * Use of this requires knowledge of the number of subdevices obtained from
- * the %COMEDI_DEVINFO ioctl.
- *
- * These are the @subd_flags values that may be ORed together...
- *
- * %SDF_BUSY - the subdevice is busy processing an asynchronous command or a
- * synchronous instruction.
- *
- * %SDF_BUSY_OWNER - the subdevice is busy processing an asynchronous
- * acquisition command started on the current file object (the file object
- * issuing the %COMEDI_SUBDINFO ioctl).
- *
- * %SDF_LOCKED - the subdevice is locked by a %COMEDI_LOCK ioctl.
- *
- * %SDF_LOCK_OWNER - the subdevice is locked by a %COMEDI_LOCK ioctl from the
- * current file object.
- *
- * %SDF_MAXDATA - maximum sample values are channel-specific.
- *
- * %SDF_FLAGS - channel flags are channel-specific.
- *
- * %SDF_RANGETYPE - range types are channel-specific.
- *
- * %SDF_PWM_COUNTER - PWM can switch off automatically.
- *
- * %SDF_PWM_HBRIDGE - or PWM is signed (H-bridge).
- *
- * %SDF_CMD - the subdevice supports asynchronous commands.
- *
- * %SDF_SOFT_CALIBRATED - the subdevice uses software calibration.
- *
- * %SDF_CMD_WRITE - the subdevice supports asynchronous commands in the output
- * ("write") direction.
- *
- * %SDF_CMD_READ - the subdevice supports asynchronous commands in the input
- * ("read") direction.
- *
- * %SDF_READABLE - the subdevice is readable (e.g. analog input).
- *
- * %SDF_WRITABLE (aliased as %SDF_WRITEABLE) - the subdevice is writable (e.g.
- * analog output).
- *
- * %SDF_INTERNAL - the subdevice has no externally visible lines.
- *
- * %SDF_GROUND - the subdevice can use ground as an analog reference.
- *
- * %SDF_COMMON - the subdevice can use a common analog reference.
- *
- * %SDF_DIFF - the subdevice can use differential inputs (or outputs).
- *
- * %SDF_OTHER - the subdevice can use some other analog reference.
- *
- * %SDF_DITHER - the subdevice can do dithering.
- *
- * %SDF_DEGLITCH - the subdevice can do deglitching.
- *
- * %SDF_MMAP - this is never set.
- *
- * %SDF_RUNNING - an asynchronous command is still running.
- *
- * %SDF_LSAMPL - the subdevice uses "long" (32-bit) samples (for asynchronous
- * command data).
- *
- * %SDF_PACKED - the subdevice packs several DIO samples into a single sample
- * (for asynchronous command data).
- *
- * No "channel flags" (@flags) values are currently defined.
- */
-struct comedi_subdinfo {
- unsigned int type;
- unsigned int n_chan;
- unsigned int subd_flags;
- unsigned int timer_type;
- unsigned int len_chanlist;
- unsigned int maxdata;
- unsigned int flags;
- unsigned int range_type;
- unsigned int settling_time_0;
- unsigned int insn_bits_support;
- unsigned int unused[8];
-};
-
-/**
- * struct comedi_devinfo - used to retrieve information about a COMEDI device
- * @version_code: COMEDI version code.
- * @n_subdevs: Number of subdevices the device has.
- * @driver_name: Null-terminated COMEDI driver name.
- * @board_name: Null-terminated COMEDI board name.
- * @read_subdevice: Index of the current "read" subdevice (%-1 if none).
- * @write_subdevice: Index of the current "write" subdevice (%-1 if none).
- * @unused: Reserved for future use.
- *
- * This is used with the %COMEDI_DEVINFO ioctl to get basic information about
- * the device.
- */
-struct comedi_devinfo {
- unsigned int version_code;
- unsigned int n_subdevs;
- char driver_name[COMEDI_NAMELEN];
- char board_name[COMEDI_NAMELEN];
- int read_subdevice;
- int write_subdevice;
- int unused[30];
-};
-
-/**
- * struct comedi_devconfig - used to configure a legacy COMEDI device
- * @board_name: Null-terminated string specifying the type of board
- * to configure.
- * @options: An array of integer configuration options.
- *
- * This is used with the %COMEDI_DEVCONFIG ioctl to configure a "legacy" COMEDI
- * device, such as an ISA card. Not all COMEDI drivers support this. Those
- * that do either expect the specified board name to match one of a list of
- * names registered with the COMEDI core, or expect the specified board name
- * to match the COMEDI driver name itself. The configuration options are
- * handled in a driver-specific manner.
- */
-struct comedi_devconfig {
- char board_name[COMEDI_NAMELEN];
- int options[COMEDI_NDEVCONFOPTS];
-};
-
-/**
- * struct comedi_bufconfig - used to set or get buffer size for a subdevice
- * @subdevice: Subdevice index.
- * @flags: Not used.
- * @maximum_size: Maximum allowed buffer size.
- * @size: Buffer size.
- * @unused: Reserved for future use.
- *
- * This is used with the %COMEDI_BUFCONFIG ioctl to get or configure the
- * maximum buffer size and current buffer size for a COMEDI subdevice that
- * supports asynchronous commands. If the subdevice does not support
- * asynchronous commands, @maximum_size and @size are ignored and set to 0.
- *
- * On ioctl input, non-zero values of @maximum_size and @size specify a
- * new maximum size and new current size (in bytes), respectively. These
- * will by rounded up to a multiple of %PAGE_SIZE. Specifying a new maximum
- * size requires admin capabilities.
- *
- * On ioctl output, @maximum_size and @size and set to the current maximum
- * buffer size and current buffer size, respectively.
- */
-struct comedi_bufconfig {
- unsigned int subdevice;
- unsigned int flags;
-
- unsigned int maximum_size;
- unsigned int size;
-
- unsigned int unused[4];
-};
-
-/**
- * struct comedi_bufinfo - used to manipulate buffer position for a subdevice
- * @subdevice: Subdevice index.
- * @bytes_read: Specify amount to advance read position for an
- * asynchronous command in the input ("read") direction.
- * @buf_write_ptr: Current write position (index) within the buffer.
- * @buf_read_ptr: Current read position (index) within the buffer.
- * @buf_write_count: Total amount written, modulo 2^32.
- * @buf_read_count: Total amount read, modulo 2^32.
- * @bytes_written: Specify amount to advance write position for an
- * asynchronous command in the output ("write") direction.
- * @unused: Reserved for future use.
- *
- * This is used with the %COMEDI_BUFINFO ioctl to optionally advance the
- * current read or write position in an asynchronous acquisition data buffer,
- * and to get the current read and write positions in the buffer.
- */
-struct comedi_bufinfo {
- unsigned int subdevice;
- unsigned int bytes_read;
-
- unsigned int buf_write_ptr;
- unsigned int buf_read_ptr;
- unsigned int buf_write_count;
- unsigned int buf_read_count;
-
- unsigned int bytes_written;
-
- unsigned int unused[4];
-};
-
-/* range stuff */
-
-#define __RANGE(a, b) ((((a) & 0xffff) << 16) | ((b) & 0xffff))
-
-#define RANGE_OFFSET(a) (((a) >> 16) & 0xffff)
-#define RANGE_LENGTH(b) ((b) & 0xffff)
-
-#define RF_UNIT(flags) ((flags) & 0xff)
-#define RF_EXTERNAL 0x100
-
-#define UNIT_volt 0
-#define UNIT_mA 1
-#define UNIT_none 2
-
-#define COMEDI_MIN_SPEED 0xffffffffu
-
-/**********************************************************/
-/* everything after this line is ALPHA */
-/**********************************************************/
-
-/*
- * 8254 specific configuration.
- *
- * It supports two config commands:
- *
- * 0 ID: INSN_CONFIG_SET_COUNTER_MODE
- * 1 8254 Mode
- * I8254_MODE0, I8254_MODE1, ..., I8254_MODE5
- * OR'ed with:
- * I8254_BCD, I8254_BINARY
- *
- * 0 ID: INSN_CONFIG_8254_READ_STATUS
- * 1 <-- Status byte returned here.
- * B7 = Output
- * B6 = NULL Count
- * B5 - B0 Current mode.
- */
-
-enum i8254_mode {
- I8254_MODE0 = (0 << 1), /* Interrupt on terminal count */
- I8254_MODE1 = (1 << 1), /* Hardware retriggerable one-shot */
- I8254_MODE2 = (2 << 1), /* Rate generator */
- I8254_MODE3 = (3 << 1), /* Square wave mode */
- I8254_MODE4 = (4 << 1), /* Software triggered strobe */
- /* Hardware triggered strobe (retriggerable) */
- I8254_MODE5 = (5 << 1),
- /* Use binary-coded decimal instead of binary (pretty useless) */
- I8254_BCD = 1,
- I8254_BINARY = 0
-};
-
-/* *** BEGIN GLOBALLY-NAMED NI TERMINALS/SIGNALS *** */
-
-/*
- * Common National Instruments Terminal/Signal names.
- * Some of these have no NI_ prefix as they are useful for non-NI hardware, such
- * as those that utilize the PXI/RTSI trigger lines.
- *
- * NOTE ABOUT THE CHOICE OF NAMES HERE AND THE CAMELSCRIPT:
- * The choice to use CamelScript and the exact names below is for
- * maintainability, clarity, similarity to manufacturer's documentation,
- * _and_ a mitigation for confusion that has plagued the use of these drivers
- * for years!
- *
- * More detail:
- * There have been significant confusions over the past many years for users
- * when trying to understand how to connect to/from signals and terminals on
- * NI hardware using comedi. The major reason for this is that the actual
- * register values were exposed and required to be used by users. Several
- * major reasons exist why this caused major confusion for users:
- * 1) The register values are _NOT_ in user documentation, but rather in
- * arcane locations, such as a few register programming manuals that are
- * increasingly hard to find and the NI MHDDK (comments in example code).
- * There is no one place to find the various valid values of the registers.
- * 2) The register values are _NOT_ completely consistent. There is no way to
- * gain any sense of intuition of which values, or even enums one should use
- * for various registers. There was some attempt in prior use of comedi to
- * name enums such that a user might know which enums should be used for
- * varying purposes, but the end-user had to gain a knowledge of register
- * values to correctly wield this approach.
- * 3) The names for signals and registers found in the various register level
- * programming manuals and vendor-provided documentation are _not_ even
- * close to the same names that are in the end-user documentation.
- *
- * Similar, albeit less, confusion plagued NI's previous version of their own
- * drivers. Earlier than 2003, NI greatly simplified the situation for users
- * by releasing a new API that abstracted the names of signals/terminals to a
- * common and intuitive set of names.
- *
- * The names below mirror the names chosen and well documented by NI. These
- * names are exposed to the user via the comedilib user library. By keeping
- * the names below, in spite of the use of CamelScript, maintenance will be
- * greatly eased and confusion for users _and_ comedi developers will be
- * greatly reduced.
- */
-
-/*
- * Base of abstracted NI names.
- * The first 16 bits of *_arg are reserved for channel selection.
- * Since we only actually need the first 4 or 5 bits for all register values on
- * NI select registers anyways, we'll identify all values >= (1<<15) as being an
- * abstracted NI signal/terminal name.
- * These values are also used/returned by INSN_DEVICE_CONFIG_TEST_ROUTE,
- * INSN_DEVICE_CONFIG_CONNECT_ROUTE, INSN_DEVICE_CONFIG_DISCONNECT_ROUTE,
- * and INSN_DEVICE_CONFIG_GET_ROUTES.
- */
-#define NI_NAMES_BASE 0x8000u
-
-#define _TERM_N(base, n, x) ((base) + ((x) & ((n) - 1)))
-
-/*
- * not necessarily all allowed 64 PFIs are valid--certainly not for all devices
- */
-#define NI_PFI(x) _TERM_N(NI_NAMES_BASE, 64, x)
-/* 8 trigger lines by standard, Some devices cannot talk to all eight. */
-#define TRIGGER_LINE(x) _TERM_N(NI_PFI(-1) + 1, 8, x)
-/* 4 RTSI shared MUXes to route signals to/from TRIGGER_LINES on NI hardware */
-#define NI_RTSI_BRD(x) _TERM_N(TRIGGER_LINE(-1) + 1, 4, x)
-
-/* *** Counter/timer names : 8 counters max *** */
-#define NI_MAX_COUNTERS 8
-#define NI_COUNTER_NAMES_BASE (NI_RTSI_BRD(-1) + 1)
-#define NI_CtrSource(x) _TERM_N(NI_COUNTER_NAMES_BASE, NI_MAX_COUNTERS, x)
-/* Gate, Aux, A,B,Z are all treated, at times as gates */
-#define NI_GATES_NAMES_BASE (NI_CtrSource(-1) + 1)
-#define NI_CtrGate(x) _TERM_N(NI_GATES_NAMES_BASE, NI_MAX_COUNTERS, x)
-#define NI_CtrAux(x) _TERM_N(NI_CtrGate(-1) + 1, NI_MAX_COUNTERS, x)
-#define NI_CtrA(x) _TERM_N(NI_CtrAux(-1) + 1, NI_MAX_COUNTERS, x)
-#define NI_CtrB(x) _TERM_N(NI_CtrA(-1) + 1, NI_MAX_COUNTERS, x)
-#define NI_CtrZ(x) _TERM_N(NI_CtrB(-1) + 1, NI_MAX_COUNTERS, x)
-#define NI_GATES_NAMES_MAX NI_CtrZ(-1)
-#define NI_CtrArmStartTrigger(x) _TERM_N(NI_CtrZ(-1) + 1, NI_MAX_COUNTERS, x)
-#define NI_CtrInternalOutput(x) \
- _TERM_N(NI_CtrArmStartTrigger(-1) + 1, NI_MAX_COUNTERS, x)
-/** external pin(s) labeled conveniently as Ctr<i>Out. */
-#define NI_CtrOut(x) _TERM_N(NI_CtrInternalOutput(-1) + 1, NI_MAX_COUNTERS, x)
-/** For Buffered sampling of ctr -- x series capability. */
-#define NI_CtrSampleClock(x) _TERM_N(NI_CtrOut(-1) + 1, NI_MAX_COUNTERS, x)
-#define NI_COUNTER_NAMES_MAX NI_CtrSampleClock(-1)
-
-enum ni_common_signal_names {
- /* PXI_Star: this is a non-NI-specific signal */
- PXI_Star = NI_COUNTER_NAMES_MAX + 1,
- PXI_Clk10,
- PXIe_Clk100,
- NI_AI_SampleClock,
- NI_AI_SampleClockTimebase,
- NI_AI_StartTrigger,
- NI_AI_ReferenceTrigger,
- NI_AI_ConvertClock,
- NI_AI_ConvertClockTimebase,
- NI_AI_PauseTrigger,
- NI_AI_HoldCompleteEvent,
- NI_AI_HoldComplete,
- NI_AI_ExternalMUXClock,
- NI_AI_STOP, /* pulse signal that occurs when a update is finished(?) */
- NI_AO_SampleClock,
- NI_AO_SampleClockTimebase,
- NI_AO_StartTrigger,
- NI_AO_PauseTrigger,
- NI_DI_SampleClock,
- NI_DI_SampleClockTimebase,
- NI_DI_StartTrigger,
- NI_DI_ReferenceTrigger,
- NI_DI_PauseTrigger,
- NI_DI_InputBufferFull,
- NI_DI_ReadyForStartEvent,
- NI_DI_ReadyForTransferEventBurst,
- NI_DI_ReadyForTransferEventPipelined,
- NI_DO_SampleClock,
- NI_DO_SampleClockTimebase,
- NI_DO_StartTrigger,
- NI_DO_PauseTrigger,
- NI_DO_OutputBufferFull,
- NI_DO_DataActiveEvent,
- NI_DO_ReadyForStartEvent,
- NI_DO_ReadyForTransferEvent,
- NI_MasterTimebase,
- NI_20MHzTimebase,
- NI_80MHzTimebase,
- NI_100MHzTimebase,
- NI_200MHzTimebase,
- NI_100kHzTimebase,
- NI_10MHzRefClock,
- NI_FrequencyOutput,
- NI_ChangeDetectionEvent,
- NI_AnalogComparisonEvent,
- NI_WatchdogExpiredEvent,
- NI_WatchdogExpirationTrigger,
- NI_SCXI_Trig1,
- NI_LogicLow,
- NI_LogicHigh,
- NI_ExternalStrobe,
- NI_PFI_DO,
- NI_CaseGround,
- /* special internal signal used as variable source for RTSI bus: */
- NI_RGOUT0,
-
- /* just a name to make the next more convenient, regardless of above */
- _NI_NAMES_MAX_PLUS_1,
- NI_NUM_NAMES = _NI_NAMES_MAX_PLUS_1 - NI_NAMES_BASE,
-};
-
-/* *** END GLOBALLY-NAMED NI TERMINALS/SIGNALS *** */
-
-#define NI_USUAL_PFI_SELECT(x) (((x) < 10) ? (0x1 + (x)) : (0xb + (x)))
-#define NI_USUAL_RTSI_SELECT(x) (((x) < 7) ? (0xb + (x)) : 0x1b)
-
-/*
- * mode bits for NI general-purpose counters, set with
- * INSN_CONFIG_SET_COUNTER_MODE
- */
-#define NI_GPCT_COUNTING_MODE_SHIFT 16
-#define NI_GPCT_INDEX_PHASE_BITSHIFT 20
-#define NI_GPCT_COUNTING_DIRECTION_SHIFT 24
-enum ni_gpct_mode_bits {
- NI_GPCT_GATE_ON_BOTH_EDGES_BIT = 0x4,
- NI_GPCT_EDGE_GATE_MODE_MASK = 0x18,
- NI_GPCT_EDGE_GATE_STARTS_STOPS_BITS = 0x0,
- NI_GPCT_EDGE_GATE_STOPS_STARTS_BITS = 0x8,
- NI_GPCT_EDGE_GATE_STARTS_BITS = 0x10,
- NI_GPCT_EDGE_GATE_NO_STARTS_NO_STOPS_BITS = 0x18,
- NI_GPCT_STOP_MODE_MASK = 0x60,
- NI_GPCT_STOP_ON_GATE_BITS = 0x00,
- NI_GPCT_STOP_ON_GATE_OR_TC_BITS = 0x20,
- NI_GPCT_STOP_ON_GATE_OR_SECOND_TC_BITS = 0x40,
- NI_GPCT_LOAD_B_SELECT_BIT = 0x80,
- NI_GPCT_OUTPUT_MODE_MASK = 0x300,
- NI_GPCT_OUTPUT_TC_PULSE_BITS = 0x100,
- NI_GPCT_OUTPUT_TC_TOGGLE_BITS = 0x200,
- NI_GPCT_OUTPUT_TC_OR_GATE_TOGGLE_BITS = 0x300,
- NI_GPCT_HARDWARE_DISARM_MASK = 0xc00,
- NI_GPCT_NO_HARDWARE_DISARM_BITS = 0x000,
- NI_GPCT_DISARM_AT_TC_BITS = 0x400,
- NI_GPCT_DISARM_AT_GATE_BITS = 0x800,
- NI_GPCT_DISARM_AT_TC_OR_GATE_BITS = 0xc00,
- NI_GPCT_LOADING_ON_TC_BIT = 0x1000,
- NI_GPCT_LOADING_ON_GATE_BIT = 0x4000,
- NI_GPCT_COUNTING_MODE_MASK = 0x7 << NI_GPCT_COUNTING_MODE_SHIFT,
- NI_GPCT_COUNTING_MODE_NORMAL_BITS =
- 0x0 << NI_GPCT_COUNTING_MODE_SHIFT,
- NI_GPCT_COUNTING_MODE_QUADRATURE_X1_BITS =
- 0x1 << NI_GPCT_COUNTING_MODE_SHIFT,
- NI_GPCT_COUNTING_MODE_QUADRATURE_X2_BITS =
- 0x2 << NI_GPCT_COUNTING_MODE_SHIFT,
- NI_GPCT_COUNTING_MODE_QUADRATURE_X4_BITS =
- 0x3 << NI_GPCT_COUNTING_MODE_SHIFT,
- NI_GPCT_COUNTING_MODE_TWO_PULSE_BITS =
- 0x4 << NI_GPCT_COUNTING_MODE_SHIFT,
- NI_GPCT_COUNTING_MODE_SYNC_SOURCE_BITS =
- 0x6 << NI_GPCT_COUNTING_MODE_SHIFT,
- NI_GPCT_INDEX_PHASE_MASK = 0x3 << NI_GPCT_INDEX_PHASE_BITSHIFT,
- NI_GPCT_INDEX_PHASE_LOW_A_LOW_B_BITS =
- 0x0 << NI_GPCT_INDEX_PHASE_BITSHIFT,
- NI_GPCT_INDEX_PHASE_LOW_A_HIGH_B_BITS =
- 0x1 << NI_GPCT_INDEX_PHASE_BITSHIFT,
- NI_GPCT_INDEX_PHASE_HIGH_A_LOW_B_BITS =
- 0x2 << NI_GPCT_INDEX_PHASE_BITSHIFT,
- NI_GPCT_INDEX_PHASE_HIGH_A_HIGH_B_BITS =
- 0x3 << NI_GPCT_INDEX_PHASE_BITSHIFT,
- NI_GPCT_INDEX_ENABLE_BIT = 0x400000,
- NI_GPCT_COUNTING_DIRECTION_MASK =
- 0x3 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
- NI_GPCT_COUNTING_DIRECTION_DOWN_BITS =
- 0x00 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
- NI_GPCT_COUNTING_DIRECTION_UP_BITS =
- 0x1 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
- NI_GPCT_COUNTING_DIRECTION_HW_UP_DOWN_BITS =
- 0x2 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
- NI_GPCT_COUNTING_DIRECTION_HW_GATE_BITS =
- 0x3 << NI_GPCT_COUNTING_DIRECTION_SHIFT,
- NI_GPCT_RELOAD_SOURCE_MASK = 0xc000000,
- NI_GPCT_RELOAD_SOURCE_FIXED_BITS = 0x0,
- NI_GPCT_RELOAD_SOURCE_SWITCHING_BITS = 0x4000000,
- NI_GPCT_RELOAD_SOURCE_GATE_SELECT_BITS = 0x8000000,
- NI_GPCT_OR_GATE_BIT = 0x10000000,
- NI_GPCT_INVERT_OUTPUT_BIT = 0x20000000
-};
-
-/*
- * Bits for setting a clock source with
- * INSN_CONFIG_SET_CLOCK_SRC when using NI general-purpose counters.
- */
-enum ni_gpct_clock_source_bits {
- NI_GPCT_CLOCK_SRC_SELECT_MASK = 0x3f,
- NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS = 0x0,
- NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS = 0x1,
- NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS = 0x2,
- NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS = 0x3,
- NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS = 0x4,
- NI_GPCT_NEXT_TC_CLOCK_SRC_BITS = 0x5,
- /* NI 660x-specific */
- NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS = 0x6,
- NI_GPCT_PXI10_CLOCK_SRC_BITS = 0x7,
- NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS = 0x8,
- NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS = 0x9,
- NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK = 0x30000000,
- NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS = 0x0,
- /* divide source by 2 */
- NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS = 0x10000000,
- /* divide source by 8 */
- NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS = 0x20000000,
- NI_GPCT_INVERT_CLOCK_SRC_BIT = 0x80000000
-};
-
-/* NI 660x-specific */
-#define NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(x) (0x10 + (x))
-
-#define NI_GPCT_RTSI_CLOCK_SRC_BITS(x) (0x18 + (x))
-
-/* no pfi on NI 660x */
-#define NI_GPCT_PFI_CLOCK_SRC_BITS(x) (0x20 + (x))
-
-/*
- * Possibilities for setting a gate source with
- * INSN_CONFIG_SET_GATE_SRC when using NI general-purpose counters.
- * May be bitwise-or'd with CR_EDGE or CR_INVERT.
- */
-enum ni_gpct_gate_select {
- /* m-series gates */
- NI_GPCT_TIMESTAMP_MUX_GATE_SELECT = 0x0,
- NI_GPCT_AI_START2_GATE_SELECT = 0x12,
- NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT = 0x13,
- NI_GPCT_NEXT_OUT_GATE_SELECT = 0x14,
- NI_GPCT_AI_START1_GATE_SELECT = 0x1c,
- NI_GPCT_NEXT_SOURCE_GATE_SELECT = 0x1d,
- NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT = 0x1e,
- NI_GPCT_LOGIC_LOW_GATE_SELECT = 0x1f,
- /* more gates for 660x */
- NI_GPCT_SOURCE_PIN_i_GATE_SELECT = 0x100,
- NI_GPCT_GATE_PIN_i_GATE_SELECT = 0x101,
- /* more gates for 660x "second gate" */
- NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT = 0x201,
- NI_GPCT_SELECTED_GATE_GATE_SELECT = 0x21e,
- /*
- * m-series "second gate" sources are unknown,
- * we should add them here with an offset of 0x300 when
- * known.
- */
- NI_GPCT_DISABLED_GATE_SELECT = 0x8000,
-};
-
-#define NI_GPCT_GATE_PIN_GATE_SELECT(x) (0x102 + (x))
-#define NI_GPCT_RTSI_GATE_SELECT(x) NI_USUAL_RTSI_SELECT(x)
-#define NI_GPCT_PFI_GATE_SELECT(x) NI_USUAL_PFI_SELECT(x)
-#define NI_GPCT_UP_DOWN_PIN_GATE_SELECT(x) (0x202 + (x))
-
-/*
- * Possibilities for setting a source with
- * INSN_CONFIG_SET_OTHER_SRC when using NI general-purpose counters.
- */
-enum ni_gpct_other_index {
- NI_GPCT_SOURCE_ENCODER_A,
- NI_GPCT_SOURCE_ENCODER_B,
- NI_GPCT_SOURCE_ENCODER_Z
-};
-
-enum ni_gpct_other_select {
- /* m-series gates */
- /* Still unknown, probably only need NI_GPCT_PFI_OTHER_SELECT */
- NI_GPCT_DISABLED_OTHER_SELECT = 0x8000,
-};
-
-#define NI_GPCT_PFI_OTHER_SELECT(x) NI_USUAL_PFI_SELECT(x)
-
-/*
- * start sources for ni general-purpose counters for use with
- * INSN_CONFIG_ARM
- */
-enum ni_gpct_arm_source {
- NI_GPCT_ARM_IMMEDIATE = 0x0,
- /*
- * Start both the counter and the adjacent paired counter simultaneously
- */
- NI_GPCT_ARM_PAIRED_IMMEDIATE = 0x1,
- /*
- * If the NI_GPCT_HW_ARM bit is set, we will pass the least significant
- * bits (3 bits for 660x or 5 bits for m-series) through to the
- * hardware. To select a hardware trigger, pass the appropriate select
- * bit, e.g.,
- * NI_GPCT_HW_ARM | NI_GPCT_AI_START1_GATE_SELECT or
- * NI_GPCT_HW_ARM | NI_GPCT_PFI_GATE_SELECT(pfi_number)
- */
- NI_GPCT_HW_ARM = 0x1000,
- NI_GPCT_ARM_UNKNOWN = NI_GPCT_HW_ARM, /* for backward compatibility */
-};
-
-/* digital filtering options for ni 660x for use with INSN_CONFIG_FILTER. */
-enum ni_gpct_filter_select {
- NI_GPCT_FILTER_OFF = 0x0,
- NI_GPCT_FILTER_TIMEBASE_3_SYNC = 0x1,
- NI_GPCT_FILTER_100x_TIMEBASE_1 = 0x2,
- NI_GPCT_FILTER_20x_TIMEBASE_1 = 0x3,
- NI_GPCT_FILTER_10x_TIMEBASE_1 = 0x4,
- NI_GPCT_FILTER_2x_TIMEBASE_1 = 0x5,
- NI_GPCT_FILTER_2x_TIMEBASE_3 = 0x6
-};
-
-/*
- * PFI digital filtering options for ni m-series for use with
- * INSN_CONFIG_FILTER.
- */
-enum ni_pfi_filter_select {
- NI_PFI_FILTER_OFF = 0x0,
- NI_PFI_FILTER_125ns = 0x1,
- NI_PFI_FILTER_6425ns = 0x2,
- NI_PFI_FILTER_2550us = 0x3
-};
-
-/* master clock sources for ni mio boards and INSN_CONFIG_SET_CLOCK_SRC */
-enum ni_mio_clock_source {
- NI_MIO_INTERNAL_CLOCK = 0,
- /*
- * Doesn't work for m-series, use NI_MIO_PLL_RTSI_CLOCK()
- * the NI_MIO_PLL_* sources are m-series only
- */
- NI_MIO_RTSI_CLOCK = 1,
- NI_MIO_PLL_PXI_STAR_TRIGGER_CLOCK = 2,
- NI_MIO_PLL_PXI10_CLOCK = 3,
- NI_MIO_PLL_RTSI0_CLOCK = 4
-};
-
-#define NI_MIO_PLL_RTSI_CLOCK(x) (NI_MIO_PLL_RTSI0_CLOCK + (x))
-
-/*
- * Signals which can be routed to an NI RTSI pin with INSN_CONFIG_SET_ROUTING.
- * The numbers assigned are not arbitrary, they correspond to the bits required
- * to program the board.
- */
-enum ni_rtsi_routing {
- NI_RTSI_OUTPUT_ADR_START1 = 0,
- NI_RTSI_OUTPUT_ADR_START2 = 1,
- NI_RTSI_OUTPUT_SCLKG = 2,
- NI_RTSI_OUTPUT_DACUPDN = 3,
- NI_RTSI_OUTPUT_DA_START1 = 4,
- NI_RTSI_OUTPUT_G_SRC0 = 5,
- NI_RTSI_OUTPUT_G_GATE0 = 6,
- NI_RTSI_OUTPUT_RGOUT0 = 7,
- NI_RTSI_OUTPUT_RTSI_BRD_0 = 8,
- /* Pre-m-series always have RTSI clock on line 7 */
- NI_RTSI_OUTPUT_RTSI_OSC = 12
-};
-
-#define NI_RTSI_OUTPUT_RTSI_BRD(x) (NI_RTSI_OUTPUT_RTSI_BRD_0 + (x))
-
-/*
- * Signals which can be routed to an NI PFI pin on an m-series board with
- * INSN_CONFIG_SET_ROUTING. These numbers are also returned by
- * INSN_CONFIG_GET_ROUTING on pre-m-series boards, even though their routing
- * cannot be changed. The numbers assigned are not arbitrary, they correspond
- * to the bits required to program the board.
- */
-enum ni_pfi_routing {
- NI_PFI_OUTPUT_PFI_DEFAULT = 0,
- NI_PFI_OUTPUT_AI_START1 = 1,
- NI_PFI_OUTPUT_AI_START2 = 2,
- NI_PFI_OUTPUT_AI_CONVERT = 3,
- NI_PFI_OUTPUT_G_SRC1 = 4,
- NI_PFI_OUTPUT_G_GATE1 = 5,
- NI_PFI_OUTPUT_AO_UPDATE_N = 6,
- NI_PFI_OUTPUT_AO_START1 = 7,
- NI_PFI_OUTPUT_AI_START_PULSE = 8,
- NI_PFI_OUTPUT_G_SRC0 = 9,
- NI_PFI_OUTPUT_G_GATE0 = 10,
- NI_PFI_OUTPUT_EXT_STROBE = 11,
- NI_PFI_OUTPUT_AI_EXT_MUX_CLK = 12,
- NI_PFI_OUTPUT_GOUT0 = 13,
- NI_PFI_OUTPUT_GOUT1 = 14,
- NI_PFI_OUTPUT_FREQ_OUT = 15,
- NI_PFI_OUTPUT_PFI_DO = 16,
- NI_PFI_OUTPUT_I_ATRIG = 17,
- NI_PFI_OUTPUT_RTSI0 = 18,
- NI_PFI_OUTPUT_PXI_STAR_TRIGGER_IN = 26,
- NI_PFI_OUTPUT_SCXI_TRIG1 = 27,
- NI_PFI_OUTPUT_DIO_CHANGE_DETECT_RTSI = 28,
- NI_PFI_OUTPUT_CDI_SAMPLE = 29,
- NI_PFI_OUTPUT_CDO_UPDATE = 30
-};
-
-#define NI_PFI_OUTPUT_RTSI(x) (NI_PFI_OUTPUT_RTSI0 + (x))
-
-/*
- * Signals which can be routed to output on a NI PFI pin on a 660x board
- * with INSN_CONFIG_SET_ROUTING. The numbers assigned are
- * not arbitrary, they correspond to the bits required
- * to program the board. Lines 0 to 7 can only be set to
- * NI_660X_PFI_OUTPUT_DIO. Lines 32 to 39 can only be set to
- * NI_660X_PFI_OUTPUT_COUNTER.
- */
-enum ni_660x_pfi_routing {
- NI_660X_PFI_OUTPUT_COUNTER = 1, /* counter */
- NI_660X_PFI_OUTPUT_DIO = 2, /* static digital output */
-};
-
-/*
- * NI External Trigger lines. These values are not arbitrary, but are related
- * to the bits required to program the board (offset by 1 for historical
- * reasons).
- */
-#define NI_EXT_PFI(x) (NI_USUAL_PFI_SELECT(x) - 1)
-#define NI_EXT_RTSI(x) (NI_USUAL_RTSI_SELECT(x) - 1)
-
-/*
- * Clock sources for CDIO subdevice on NI m-series boards. Used as the
- * scan_begin_arg for a comedi_command. These sources may also be bitwise-or'd
- * with CR_INVERT to change polarity.
- */
-enum ni_m_series_cdio_scan_begin_src {
- NI_CDIO_SCAN_BEGIN_SRC_GROUND = 0,
- NI_CDIO_SCAN_BEGIN_SRC_AI_START = 18,
- NI_CDIO_SCAN_BEGIN_SRC_AI_CONVERT = 19,
- NI_CDIO_SCAN_BEGIN_SRC_PXI_STAR_TRIGGER = 20,
- NI_CDIO_SCAN_BEGIN_SRC_G0_OUT = 28,
- NI_CDIO_SCAN_BEGIN_SRC_G1_OUT = 29,
- NI_CDIO_SCAN_BEGIN_SRC_ANALOG_TRIGGER = 30,
- NI_CDIO_SCAN_BEGIN_SRC_AO_UPDATE = 31,
- NI_CDIO_SCAN_BEGIN_SRC_FREQ_OUT = 32,
- NI_CDIO_SCAN_BEGIN_SRC_DIO_CHANGE_DETECT_IRQ = 33
-};
-
-#define NI_CDIO_SCAN_BEGIN_SRC_PFI(x) NI_USUAL_PFI_SELECT(x)
-#define NI_CDIO_SCAN_BEGIN_SRC_RTSI(x) NI_USUAL_RTSI_SELECT(x)
-
-/*
- * scan_begin_src for scan_begin_arg==TRIG_EXT with analog output command on NI
- * boards. These scan begin sources can also be bitwise-or'd with CR_INVERT to
- * change polarity.
- */
-#define NI_AO_SCAN_BEGIN_SRC_PFI(x) NI_USUAL_PFI_SELECT(x)
-#define NI_AO_SCAN_BEGIN_SRC_RTSI(x) NI_USUAL_RTSI_SELECT(x)
-
-/*
- * Bits for setting a clock source with
- * INSN_CONFIG_SET_CLOCK_SRC when using NI frequency output subdevice.
- */
-enum ni_freq_out_clock_source_bits {
- NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC, /* 10 MHz */
- NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC /* 100 KHz */
-};
-
-/*
- * Values for setting a clock source with INSN_CONFIG_SET_CLOCK_SRC for
- * 8254 counter subdevices on Amplicon DIO boards (amplc_dio200 driver).
- */
-enum amplc_dio_clock_source {
- /*
- * Per channel external clock
- * input/output pin (pin is only an
- * input when clock source set to this value,
- * otherwise it is an output)
- */
- AMPLC_DIO_CLK_CLKN,
- AMPLC_DIO_CLK_10MHZ, /* 10 MHz internal clock */
- AMPLC_DIO_CLK_1MHZ, /* 1 MHz internal clock */
- AMPLC_DIO_CLK_100KHZ, /* 100 kHz internal clock */
- AMPLC_DIO_CLK_10KHZ, /* 10 kHz internal clock */
- AMPLC_DIO_CLK_1KHZ, /* 1 kHz internal clock */
- /*
- * Output of preceding counter channel
- * (for channel 0, preceding counter
- * channel is channel 2 on preceding
- * counter subdevice, for first counter
- * subdevice, preceding counter
- * subdevice is the last counter
- * subdevice)
- */
- AMPLC_DIO_CLK_OUTNM1,
- AMPLC_DIO_CLK_EXT, /* per chip external input pin */
- /* the following are "enhanced" clock sources for PCIe models */
- AMPLC_DIO_CLK_VCC, /* clock input HIGH */
- AMPLC_DIO_CLK_GND, /* clock input LOW */
- AMPLC_DIO_CLK_PAT_PRESENT, /* "pattern present" signal */
- AMPLC_DIO_CLK_20MHZ /* 20 MHz internal clock */
-};
-
-/*
- * Values for setting a clock source with INSN_CONFIG_SET_CLOCK_SRC for
- * timer subdevice on some Amplicon DIO PCIe boards (amplc_dio200 driver).
- */
-enum amplc_dio_ts_clock_src {
- AMPLC_DIO_TS_CLK_1GHZ, /* 1 ns period with 20 ns granularity */
- AMPLC_DIO_TS_CLK_1MHZ, /* 1 us period */
- AMPLC_DIO_TS_CLK_1KHZ /* 1 ms period */
-};
-
-/*
- * Values for setting a gate source with INSN_CONFIG_SET_GATE_SRC for
- * 8254 counter subdevices on Amplicon DIO boards (amplc_dio200 driver).
- */
-enum amplc_dio_gate_source {
- AMPLC_DIO_GAT_VCC, /* internal high logic level */
- AMPLC_DIO_GAT_GND, /* internal low logic level */
- AMPLC_DIO_GAT_GATN, /* per channel external gate input */
- /*
- * negated output of counter channel minus 2
- * (for channels 0 or 1, channel minus 2 is channel 1 or 2 on
- * the preceding counter subdevice, for the first counter subdevice
- * the preceding counter subdevice is the last counter subdevice)
- */
- AMPLC_DIO_GAT_NOUTNM2,
- AMPLC_DIO_GAT_RESERVED4,
- AMPLC_DIO_GAT_RESERVED5,
- AMPLC_DIO_GAT_RESERVED6,
- AMPLC_DIO_GAT_RESERVED7,
- /* the following are "enhanced" gate sources for PCIe models */
- AMPLC_DIO_GAT_NGATN = 6, /* negated per channel gate input */
- /* non-negated output of counter channel minus 2 */
- AMPLC_DIO_GAT_OUTNM2,
- AMPLC_DIO_GAT_PAT_PRESENT, /* "pattern present" signal */
- AMPLC_DIO_GAT_PAT_OCCURRED, /* "pattern occurred" latched */
- AMPLC_DIO_GAT_PAT_GONE, /* "pattern gone away" latched */
- AMPLC_DIO_GAT_NPAT_PRESENT, /* negated "pattern present" */
- AMPLC_DIO_GAT_NPAT_OCCURRED, /* negated "pattern occurred" */
- AMPLC_DIO_GAT_NPAT_GONE /* negated "pattern gone away" */
-};
-
-/*
- * Values for setting a clock source with INSN_CONFIG_SET_CLOCK_SRC for
- * the counter subdevice on the Kolter Electronic PCI-Counter board
- * (ke_counter driver).
- */
-enum ke_counter_clock_source {
- KE_CLK_20MHZ, /* internal 20MHz (default) */
- KE_CLK_4MHZ, /* internal 4MHz (option) */
- KE_CLK_EXT /* external clock on pin 21 of D-Sub */
-};
-
-#endif /* _COMEDI_H */
diff --git a/drivers/comedi/comedi_buf.c b/drivers/comedi/comedi_buf.c
index 06bfc859ab31..393966c09740 100644
--- a/drivers/comedi/comedi_buf.c
+++ b/drivers/comedi/comedi_buf.c
@@ -9,8 +9,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
-
-#include "comedidev.h"
+#include <linux/comedi/comedidev.h>
#include "comedi_internal.h"
#ifdef PAGE_KERNEL_NOCACHE
diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
index 763cea8418f8..55a0cae04b8d 100644
--- a/drivers/comedi/comedi_fops.c
+++ b/drivers/comedi/comedi_fops.c
@@ -23,7 +23,7 @@
#include <linux/poll.h>
#include <linux/device.h>
#include <linux/fs.h>
-#include "comedidev.h"
+#include <linux/comedi/comedidev.h>
#include <linux/cdev.h>
#include <linux/io.h>
diff --git a/drivers/comedi/comedi_pci.c b/drivers/comedi/comedi_pci.c
index 54739af7eb71..cc2581902195 100644
--- a/drivers/comedi/comedi_pci.c
+++ b/drivers/comedi/comedi_pci.c
@@ -9,8 +9,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/**
* comedi_to_pci_dev() - Return PCI device attached to COMEDI device
diff --git a/drivers/comedi/comedi_pci.h b/drivers/comedi/comedi_pci.h
deleted file mode 100644
index 4e069440cbdc..000000000000
--- a/drivers/comedi/comedi_pci.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * comedi_pci.h
- * header file for Comedi PCI drivers
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- */
-
-#ifndef _COMEDI_PCI_H
-#define _COMEDI_PCI_H
-
-#include <linux/pci.h>
-
-#include "comedidev.h"
-
-/*
- * PCI Vendor IDs not in <linux/pci_ids.h>
- */
-#define PCI_VENDOR_ID_KOLTER 0x1001
-#define PCI_VENDOR_ID_ICP 0x104c
-#define PCI_VENDOR_ID_DT 0x1116
-#define PCI_VENDOR_ID_IOTECH 0x1616
-#define PCI_VENDOR_ID_CONTEC 0x1221
-#define PCI_VENDOR_ID_RTD 0x1435
-#define PCI_VENDOR_ID_HUMUSOFT 0x186c
-
-struct pci_dev *comedi_to_pci_dev(struct comedi_device *dev);
-
-int comedi_pci_enable(struct comedi_device *dev);
-void comedi_pci_disable(struct comedi_device *dev);
-void comedi_pci_detach(struct comedi_device *dev);
-
-int comedi_pci_auto_config(struct pci_dev *pcidev, struct comedi_driver *driver,
- unsigned long context);
-void comedi_pci_auto_unconfig(struct pci_dev *pcidev);
-
-int comedi_pci_driver_register(struct comedi_driver *comedi_driver,
- struct pci_driver *pci_driver);
-void comedi_pci_driver_unregister(struct comedi_driver *comedi_driver,
- struct pci_driver *pci_driver);
-
-/**
- * module_comedi_pci_driver() - Helper macro for registering a comedi PCI driver
- * @__comedi_driver: comedi_driver struct
- * @__pci_driver: pci_driver struct
- *
- * Helper macro for comedi PCI drivers which do not do anything special
- * in module init/exit. This eliminates a lot of boilerplate. Each
- * module may only use this macro once, and calling it replaces
- * module_init() and module_exit()
- */
-#define module_comedi_pci_driver(__comedi_driver, __pci_driver) \
- module_driver(__comedi_driver, comedi_pci_driver_register, \
- comedi_pci_driver_unregister, &(__pci_driver))
-
-#endif /* _COMEDI_PCI_H */
diff --git a/drivers/comedi/comedi_pcmcia.c b/drivers/comedi/comedi_pcmcia.c
index bb273bb202e6..c53aad0fc2ce 100644
--- a/drivers/comedi/comedi_pcmcia.c
+++ b/drivers/comedi/comedi_pcmcia.c
@@ -9,8 +9,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
-
-#include "comedi_pcmcia.h"
+#include <linux/comedi/comedi_pcmcia.h>
/**
* comedi_to_pcmcia_dev() - Return PCMCIA device attached to COMEDI device
diff --git a/drivers/comedi/comedi_pcmcia.h b/drivers/comedi/comedi_pcmcia.h
deleted file mode 100644
index f2f6e779645b..000000000000
--- a/drivers/comedi/comedi_pcmcia.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * comedi_pcmcia.h
- * header file for Comedi PCMCIA drivers
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- */
-
-#ifndef _COMEDI_PCMCIA_H
-#define _COMEDI_PCMCIA_H
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/ds.h>
-
-#include "comedidev.h"
-
-struct pcmcia_device *comedi_to_pcmcia_dev(struct comedi_device *dev);
-
-int comedi_pcmcia_enable(struct comedi_device *dev,
- int (*conf_check)(struct pcmcia_device *p_dev,
- void *priv_data));
-void comedi_pcmcia_disable(struct comedi_device *dev);
-
-int comedi_pcmcia_auto_config(struct pcmcia_device *link,
- struct comedi_driver *driver);
-void comedi_pcmcia_auto_unconfig(struct pcmcia_device *link);
-
-int comedi_pcmcia_driver_register(struct comedi_driver *comedi_driver,
- struct pcmcia_driver *pcmcia_driver);
-void comedi_pcmcia_driver_unregister(struct comedi_driver *comedi_driver,
- struct pcmcia_driver *pcmcia_driver);
-
-/**
- * module_comedi_pcmcia_driver() - Helper macro for registering a comedi
- * PCMCIA driver
- * @__comedi_driver: comedi_driver struct
- * @__pcmcia_driver: pcmcia_driver struct
- *
- * Helper macro for comedi PCMCIA drivers which do not do anything special
- * in module init/exit. This eliminates a lot of boilerplate. Each
- * module may only use this macro once, and calling it replaces
- * module_init() and module_exit()
- */
-#define module_comedi_pcmcia_driver(__comedi_driver, __pcmcia_driver) \
- module_driver(__comedi_driver, comedi_pcmcia_driver_register, \
- comedi_pcmcia_driver_unregister, &(__pcmcia_driver))
-
-#endif /* _COMEDI_PCMCIA_H */
diff --git a/drivers/comedi/comedi_usb.c b/drivers/comedi/comedi_usb.c
index eea8ebf32ed0..d11ea148ebf8 100644
--- a/drivers/comedi/comedi_usb.c
+++ b/drivers/comedi/comedi_usb.c
@@ -8,8 +8,7 @@
*/
#include <linux/module.h>
-
-#include "comedi_usb.h"
+#include <linux/comedi/comedi_usb.h>
/**
* comedi_to_usb_interface() - Return USB interface attached to COMEDI device
diff --git a/drivers/comedi/comedi_usb.h b/drivers/comedi/comedi_usb.h
deleted file mode 100644
index 601e29d3891c..000000000000
--- a/drivers/comedi/comedi_usb.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/* comedi_usb.h
- * header file for USB Comedi drivers
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- */
-
-#ifndef _COMEDI_USB_H
-#define _COMEDI_USB_H
-
-#include <linux/usb.h>
-
-#include "comedidev.h"
-
-struct usb_interface *comedi_to_usb_interface(struct comedi_device *dev);
-struct usb_device *comedi_to_usb_dev(struct comedi_device *dev);
-
-int comedi_usb_auto_config(struct usb_interface *intf,
- struct comedi_driver *driver, unsigned long context);
-void comedi_usb_auto_unconfig(struct usb_interface *intf);
-
-int comedi_usb_driver_register(struct comedi_driver *comedi_driver,
- struct usb_driver *usb_driver);
-void comedi_usb_driver_unregister(struct comedi_driver *comedi_driver,
- struct usb_driver *usb_driver);
-
-/**
- * module_comedi_usb_driver() - Helper macro for registering a comedi USB driver
- * @__comedi_driver: comedi_driver struct
- * @__usb_driver: usb_driver struct
- *
- * Helper macro for comedi USB drivers which do not do anything special
- * in module init/exit. This eliminates a lot of boilerplate. Each
- * module may only use this macro once, and calling it replaces
- * module_init() and module_exit()
- */
-#define module_comedi_usb_driver(__comedi_driver, __usb_driver) \
- module_driver(__comedi_driver, comedi_usb_driver_register, \
- comedi_usb_driver_unregister, &(__usb_driver))
-
-#endif /* _COMEDI_USB_H */
diff --git a/drivers/comedi/comedidev.h b/drivers/comedi/comedidev.h
deleted file mode 100644
index 0e1b95ef9a4d..000000000000
--- a/drivers/comedi/comedidev.h
+++ /dev/null
@@ -1,1054 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * comedidev.h
- * header file for kernel-only structures, variables, and constants
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
- */
-
-#ifndef _COMEDIDEV_H
-#define _COMEDIDEV_H
-
-#include <linux/dma-mapping.h>
-#include <linux/mutex.h>
-#include <linux/spinlock_types.h>
-#include <linux/rwsem.h>
-#include <linux/kref.h>
-
-#include "comedi.h"
-
-#define COMEDI_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
-#define COMEDI_VERSION_CODE COMEDI_VERSION(COMEDI_MAJORVERSION, \
- COMEDI_MINORVERSION, COMEDI_MICROVERSION)
-#define COMEDI_RELEASE VERSION
-
-#define COMEDI_NUM_BOARD_MINORS 0x30
-
-/**
- * struct comedi_subdevice - Working data for a COMEDI subdevice
- * @device: COMEDI device to which this subdevice belongs. (Initialized by
- * comedi_alloc_subdevices().)
- * @index: Index of this subdevice within device's array of subdevices.
- * (Initialized by comedi_alloc_subdevices().)
- * @type: Type of subdevice from &enum comedi_subdevice_type. (Initialized by
- * the low-level driver.)
- * @n_chan: Number of channels the subdevice supports. (Initialized by the
- * low-level driver.)
- * @subdev_flags: Various "SDF" flags indicating aspects of the subdevice to
- * the COMEDI core and user application. (Initialized by the low-level
- * driver.)
- * @len_chanlist: Maximum length of a channel list if the subdevice supports
- * asynchronous acquisition commands. (Optionally initialized by the
- * low-level driver, or changed from 0 to 1 during post-configuration.)
- * @private: Private data pointer which is either set by the low-level driver
- * itself, or by a call to comedi_alloc_spriv() which allocates storage.
- * In the latter case, the storage is automatically freed after the
- * low-level driver's "detach" handler is called for the device.
- * (Initialized by the low-level driver.)
- * @async: Pointer to &struct comedi_async id the subdevice supports
- * asynchronous acquisition commands. (Allocated and initialized during
- * post-configuration if needed.)
- * @lock: Pointer to a file object that performed a %COMEDI_LOCK ioctl on the
- * subdevice. (Initially NULL.)
- * @busy: Pointer to a file object that is performing an asynchronous
- * acquisition command on the subdevice. (Initially NULL.)
- * @runflags: Internal flags for use by COMEDI core, mostly indicating whether
- * an asynchronous acquisition command is running.
- * @spin_lock: Generic spin-lock for use by the COMEDI core and the low-level
- * driver. (Initialized by comedi_alloc_subdevices().)
- * @io_bits: Bit-mask indicating the channel directions for a DIO subdevice
- * with no more than 32 channels. A '1' at a bit position indicates the
- * corresponding channel is configured as an output. (Initialized by the
- * low-level driver for a DIO subdevice. Forced to all-outputs during
- * post-configuration for a digital output subdevice.)
- * @maxdata: If non-zero, this is the maximum raw data value of each channel.
- * If zero, the maximum data value is channel-specific. (Initialized by
- * the low-level driver.)
- * @maxdata_list: If the maximum data value is channel-specific, this points
- * to an array of maximum data values indexed by channel index.
- * (Initialized by the low-level driver.)
- * @range_table: If non-NULL, this points to a COMEDI range table for the
- * subdevice. If NULL, the range table is channel-specific. (Initialized
- * by the low-level driver, will be set to an "invalid" range table during
- * post-configuration if @range_table and @range_table_list are both
- * NULL.)
- * @range_table_list: If the COMEDI range table is channel-specific, this
- * points to an array of pointers to COMEDI range tables indexed by
- * channel number. (Initialized by the low-level driver.)
- * @chanlist: Not used.
- * @insn_read: Optional pointer to a handler for the %INSN_READ instruction.
- * (Initialized by the low-level driver, or set to a default handler
- * during post-configuration.)
- * @insn_write: Optional pointer to a handler for the %INSN_WRITE instruction.
- * (Initialized by the low-level driver, or set to a default handler
- * during post-configuration.)
- * @insn_bits: Optional pointer to a handler for the %INSN_BITS instruction
- * for a digital input, digital output or digital input/output subdevice.
- * (Initialized by the low-level driver, or set to a default handler
- * during post-configuration.)
- * @insn_config: Optional pointer to a handler for the %INSN_CONFIG
- * instruction. (Initialized by the low-level driver, or set to a default
- * handler during post-configuration.)
- * @do_cmd: If the subdevice supports asynchronous acquisition commands, this
- * points to a handler to set it up in hardware. (Initialized by the
- * low-level driver.)
- * @do_cmdtest: If the subdevice supports asynchronous acquisition commands,
- * this points to a handler used to check and possibly tweak a prospective
- * acquisition command without setting it up in hardware. (Initialized by
- * the low-level driver.)
- * @poll: If the subdevice supports asynchronous acquisition commands, this
- * is an optional pointer to a handler for the %COMEDI_POLL ioctl which
- * instructs the low-level driver to synchronize buffers. (Initialized by
- * the low-level driver if needed.)
- * @cancel: If the subdevice supports asynchronous acquisition commands, this
- * points to a handler used to terminate a running command. (Initialized
- * by the low-level driver.)
- * @buf_change: If the subdevice supports asynchronous acquisition commands,
- * this is an optional pointer to a handler that is called when the data
- * buffer for handling asynchronous commands is allocated or reallocated.
- * (Initialized by the low-level driver if needed.)
- * @munge: If the subdevice supports asynchronous acquisition commands and
- * uses DMA to transfer data from the hardware to the acquisition buffer,
- * this points to a function used to "munge" the data values from the
- * hardware into the format expected by COMEDI. (Initialized by the
- * low-level driver if needed.)
- * @async_dma_dir: If the subdevice supports asynchronous acquisition commands
- * and uses DMA to transfer data from the hardware to the acquisition
- * buffer, this sets the DMA direction for the buffer. (initialized to
- * %DMA_NONE by comedi_alloc_subdevices() and changed by the low-level
- * driver if necessary.)
- * @state: Handy bit-mask indicating the output states for a DIO or digital
- * output subdevice with no more than 32 channels. (Initialized by the
- * low-level driver.)
- * @class_dev: If the subdevice supports asynchronous acquisition commands,
- * this points to a sysfs comediX_subdY device where X is the minor device
- * number of the COMEDI device and Y is the subdevice number. The minor
- * device number for the sysfs device is allocated dynamically in the
- * range 48 to 255. This is used to allow the COMEDI device to be opened
- * with a different default read or write subdevice. (Allocated during
- * post-configuration if needed.)
- * @minor: If @class_dev is set, this is its dynamically allocated minor
- * device number. (Set during post-configuration if necessary.)
- * @readback: Optional pointer to memory allocated by
- * comedi_alloc_subdev_readback() used to hold the values written to
- * analog output channels so they can be read back. The storage is
- * automatically freed after the low-level driver's "detach" handler is
- * called for the device. (Initialized by the low-level driver.)
- *
- * This is the main control structure for a COMEDI subdevice. If the subdevice
- * supports asynchronous acquisition commands, additional information is stored
- * in the &struct comedi_async pointed to by @async.
- *
- * Most of the subdevice is initialized by the low-level driver's "attach" or
- * "auto_attach" handlers but parts of it are initialized by
- * comedi_alloc_subdevices(), and other parts are initialized during
- * post-configuration on return from that handler.
- *
- * A low-level driver that sets @insn_bits for a digital input, digital output,
- * or DIO subdevice may leave @insn_read and @insn_write uninitialized, in
- * which case they will be set to a default handler during post-configuration
- * that uses @insn_bits to emulate the %INSN_READ and %INSN_WRITE instructions.
- */
-struct comedi_subdevice {
- struct comedi_device *device;
- int index;
- int type;
- int n_chan;
- int subdev_flags;
- int len_chanlist; /* maximum length of channel/gain list */
-
- void *private;
-
- struct comedi_async *async;
-
- void *lock;
- void *busy;
- unsigned int runflags;
- spinlock_t spin_lock; /* generic spin-lock for COMEDI and drivers */
-
- unsigned int io_bits;
-
- unsigned int maxdata; /* if maxdata==0, use list */
- const unsigned int *maxdata_list; /* list is channel specific */
-
- const struct comedi_lrange *range_table;
- const struct comedi_lrange *const *range_table_list;
-
- unsigned int *chanlist; /* driver-owned chanlist (not used) */
-
- int (*insn_read)(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data);
- int (*insn_write)(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data);
- int (*insn_bits)(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data);
- int (*insn_config)(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data);
-
- int (*do_cmd)(struct comedi_device *dev, struct comedi_subdevice *s);
- int (*do_cmdtest)(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd);
- int (*poll)(struct comedi_device *dev, struct comedi_subdevice *s);
- int (*cancel)(struct comedi_device *dev, struct comedi_subdevice *s);
-
- /* called when the buffer changes */
- int (*buf_change)(struct comedi_device *dev,
- struct comedi_subdevice *s);
-
- void (*munge)(struct comedi_device *dev, struct comedi_subdevice *s,
- void *data, unsigned int num_bytes,
- unsigned int start_chan_index);
- enum dma_data_direction async_dma_dir;
-
- unsigned int state;
-
- struct device *class_dev;
- int minor;
-
- unsigned int *readback;
-};
-
-/**
- * struct comedi_buf_page - Describe a page of a COMEDI buffer
- * @virt_addr: Kernel address of page.
- * @dma_addr: DMA address of page if in DMA coherent memory.
- */
-struct comedi_buf_page {
- void *virt_addr;
- dma_addr_t dma_addr;
-};
-
-/**
- * struct comedi_buf_map - Describe pages in a COMEDI buffer
- * @dma_hw_dev: Low-level hardware &struct device pointer copied from the
- * COMEDI device's hw_dev member.
- * @page_list: Pointer to array of &struct comedi_buf_page, one for each
- * page in the buffer.
- * @n_pages: Number of pages in the buffer.
- * @dma_dir: DMA direction used to allocate pages of DMA coherent memory,
- * or %DMA_NONE if pages allocated from regular memory.
- * @refcount: &struct kref reference counter used to free the buffer.
- *
- * A COMEDI data buffer is allocated as individual pages, either in
- * conventional memory or DMA coherent memory, depending on the attached,
- * low-level hardware device. (The buffer pages also get mapped into the
- * kernel's contiguous virtual address space pointed to by the 'prealloc_buf'
- * member of &struct comedi_async.)
- *
- * The buffer is normally freed when the COMEDI device is detached from the
- * low-level driver (which may happen due to device removal), but if it happens
- * to be mmapped at the time, the pages cannot be freed until the buffer has
- * been munmapped. That is what the reference counter is for. (The virtual
- * address space pointed by 'prealloc_buf' is freed when the COMEDI device is
- * detached.)
- */
-struct comedi_buf_map {
- struct device *dma_hw_dev;
- struct comedi_buf_page *page_list;
- unsigned int n_pages;
- enum dma_data_direction dma_dir;
- struct kref refcount;
-};
-
-/**
- * struct comedi_async - Control data for asynchronous COMEDI commands
- * @prealloc_buf: Kernel virtual address of allocated acquisition buffer.
- * @prealloc_bufsz: Buffer size (in bytes).
- * @buf_map: Map of buffer pages.
- * @max_bufsize: Maximum allowed buffer size (in bytes).
- * @buf_write_count: "Write completed" count (in bytes, modulo 2**32).
- * @buf_write_alloc_count: "Allocated for writing" count (in bytes,
- * modulo 2**32).
- * @buf_read_count: "Read completed" count (in bytes, modulo 2**32).
- * @buf_read_alloc_count: "Allocated for reading" count (in bytes,
- * modulo 2**32).
- * @buf_write_ptr: Buffer position for writer.
- * @buf_read_ptr: Buffer position for reader.
- * @cur_chan: Current position in chanlist for scan (for those drivers that
- * use it).
- * @scans_done: The number of scans completed.
- * @scan_progress: Amount received or sent for current scan (in bytes).
- * @munge_chan: Current position in chanlist for "munging".
- * @munge_count: "Munge" count (in bytes, modulo 2**32).
- * @munge_ptr: Buffer position for "munging".
- * @events: Bit-vector of events that have occurred.
- * @cmd: Details of comedi command in progress.
- * @wait_head: Task wait queue for file reader or writer.
- * @cb_mask: Bit-vector of events that should wake waiting tasks.
- * @inttrig: Software trigger function for command, or NULL.
- *
- * Note about the ..._count and ..._ptr members:
- *
- * Think of the _Count values being integers of unlimited size, indexing
- * into a buffer of infinite length (though only an advancing portion
- * of the buffer of fixed length prealloc_bufsz is accessible at any
- * time). Then:
- *
- * Buf_Read_Count <= Buf_Read_Alloc_Count <= Munge_Count <=
- * Buf_Write_Count <= Buf_Write_Alloc_Count <=
- * (Buf_Read_Count + prealloc_bufsz)
- *
- * (Those aren't the actual members, apart from prealloc_bufsz.) When the
- * buffer is reset, those _Count values start at 0 and only increase in value,
- * maintaining the above inequalities until the next time the buffer is
- * reset. The buffer is divided into the following regions by the inequalities:
- *
- * [0, Buf_Read_Count):
- * old region no longer accessible
- *
- * [Buf_Read_Count, Buf_Read_Alloc_Count):
- * filled and munged region allocated for reading but not yet read
- *
- * [Buf_Read_Alloc_Count, Munge_Count):
- * filled and munged region not yet allocated for reading
- *
- * [Munge_Count, Buf_Write_Count):
- * filled region not yet munged
- *
- * [Buf_Write_Count, Buf_Write_Alloc_Count):
- * unfilled region allocated for writing but not yet written
- *
- * [Buf_Write_Alloc_Count, Buf_Read_Count + prealloc_bufsz):
- * unfilled region not yet allocated for writing
- *
- * [Buf_Read_Count + prealloc_bufsz, infinity):
- * unfilled region not yet accessible
- *
- * Data needs to be written into the buffer before it can be read out,
- * and may need to be converted (or "munged") between the two
- * operations. Extra unfilled buffer space may need to allocated for
- * writing (advancing Buf_Write_Alloc_Count) before new data is written.
- * After writing new data, the newly filled space needs to be released
- * (advancing Buf_Write_Count). This also results in the new data being
- * "munged" (advancing Munge_Count). Before data is read out of the
- * buffer, extra space may need to be allocated for reading (advancing
- * Buf_Read_Alloc_Count). After the data has been read out, the space
- * needs to be released (advancing Buf_Read_Count).
- *
- * The actual members, buf_read_count, buf_read_alloc_count,
- * munge_count, buf_write_count, and buf_write_alloc_count take the
- * value of the corresponding capitalized _Count values modulo 2^32
- * (UINT_MAX+1). Subtracting a "higher" _count value from a "lower"
- * _count value gives the same answer as subtracting a "higher" _Count
- * value from a lower _Count value because prealloc_bufsz < UINT_MAX+1.
- * The modulo operation is done implicitly.
- *
- * The buf_read_ptr, munge_ptr, and buf_write_ptr members take the value
- * of the corresponding capitalized _Count values modulo prealloc_bufsz.
- * These correspond to byte indices in the physical buffer. The modulo
- * operation is done by subtracting prealloc_bufsz when the value
- * exceeds prealloc_bufsz (assuming prealloc_bufsz plus the increment is
- * less than or equal to UINT_MAX).
- */
-struct comedi_async {
- void *prealloc_buf;
- unsigned int prealloc_bufsz;
- struct comedi_buf_map *buf_map;
- unsigned int max_bufsize;
- unsigned int buf_write_count;
- unsigned int buf_write_alloc_count;
- unsigned int buf_read_count;
- unsigned int buf_read_alloc_count;
- unsigned int buf_write_ptr;
- unsigned int buf_read_ptr;
- unsigned int cur_chan;
- unsigned int scans_done;
- unsigned int scan_progress;
- unsigned int munge_chan;
- unsigned int munge_count;
- unsigned int munge_ptr;
- unsigned int events;
- struct comedi_cmd cmd;
- wait_queue_head_t wait_head;
- unsigned int cb_mask;
- int (*inttrig)(struct comedi_device *dev, struct comedi_subdevice *s,
- unsigned int x);
-};
-
-/**
- * enum comedi_cb - &struct comedi_async callback "events"
- * @COMEDI_CB_EOS: end-of-scan
- * @COMEDI_CB_EOA: end-of-acquisition/output
- * @COMEDI_CB_BLOCK: data has arrived, wakes up read() / write()
- * @COMEDI_CB_EOBUF: DEPRECATED: end of buffer
- * @COMEDI_CB_ERROR: card error during acquisition
- * @COMEDI_CB_OVERFLOW: buffer overflow/underflow
- * @COMEDI_CB_ERROR_MASK: events that indicate an error has occurred
- * @COMEDI_CB_CANCEL_MASK: events that will cancel an async command
- */
-enum comedi_cb {
- COMEDI_CB_EOS = BIT(0),
- COMEDI_CB_EOA = BIT(1),
- COMEDI_CB_BLOCK = BIT(2),
- COMEDI_CB_EOBUF = BIT(3),
- COMEDI_CB_ERROR = BIT(4),
- COMEDI_CB_OVERFLOW = BIT(5),
- /* masks */
- COMEDI_CB_ERROR_MASK = (COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW),
- COMEDI_CB_CANCEL_MASK = (COMEDI_CB_EOA | COMEDI_CB_ERROR_MASK)
-};
-
-/**
- * struct comedi_driver - COMEDI driver registration
- * @driver_name: Name of driver.
- * @module: Owning module.
- * @attach: The optional "attach" handler for manually configured COMEDI
- * devices.
- * @detach: The "detach" handler for deconfiguring COMEDI devices.
- * @auto_attach: The optional "auto_attach" handler for automatically
- * configured COMEDI devices.
- * @num_names: Optional number of "board names" supported.
- * @board_name: Optional pointer to a pointer to a board name. The pointer
- * to a board name is embedded in an element of a driver-defined array
- * of static, read-only board type information.
- * @offset: Optional size of each element of the driver-defined array of
- * static, read-only board type information, i.e. the offset between each
- * pointer to a board name.
- *
- * This is used with comedi_driver_register() and comedi_driver_unregister() to
- * register and unregister a low-level COMEDI driver with the COMEDI core.
- *
- * If @num_names is non-zero, @board_name should be non-NULL, and @offset
- * should be at least sizeof(*board_name). These are used by the handler for
- * the %COMEDI_DEVCONFIG ioctl to match a hardware device and its driver by
- * board name. If @num_names is zero, the %COMEDI_DEVCONFIG ioctl matches a
- * hardware device and its driver by driver name. This is only useful if the
- * @attach handler is set. If @num_names is non-zero, the driver's @attach
- * handler will be called with the COMEDI device structure's board_ptr member
- * pointing to the matched pointer to a board name within the driver's private
- * array of static, read-only board type information.
- *
- * The @detach handler has two roles. If a COMEDI device was successfully
- * configured by the @attach or @auto_attach handler, it is called when the
- * device is being deconfigured (by the %COMEDI_DEVCONFIG ioctl, or due to
- * unloading of the driver, or due to device removal). It is also called when
- * the @attach or @auto_attach handler returns an error. Therefore, the
- * @attach or @auto_attach handlers can defer clean-up on error until the
- * @detach handler is called. If the @attach or @auto_attach handlers free
- * any resources themselves, they must prevent the @detach handler from
- * freeing the same resources. The @detach handler must not assume that all
- * resources requested by the @attach or @auto_attach handler were
- * successfully allocated.
- */
-struct comedi_driver {
- /* private: */
- struct comedi_driver *next; /* Next in list of COMEDI drivers. */
- /* public: */
- const char *driver_name;
- struct module *module;
- int (*attach)(struct comedi_device *dev, struct comedi_devconfig *it);
- void (*detach)(struct comedi_device *dev);
- int (*auto_attach)(struct comedi_device *dev, unsigned long context);
- unsigned int num_names;
- const char *const *board_name;
- int offset;
-};
-
-/**
- * struct comedi_device - Working data for a COMEDI device
- * @use_count: Number of open file objects.
- * @driver: Low-level COMEDI driver attached to this COMEDI device.
- * @pacer: Optional pointer to a dynamically allocated acquisition pacer
- * control. It is freed automatically after the COMEDI device is
- * detached from the low-level driver.
- * @private: Optional pointer to private data allocated by the low-level
- * driver. It is freed automatically after the COMEDI device is
- * detached from the low-level driver.
- * @class_dev: Sysfs comediX device.
- * @minor: Minor device number of COMEDI char device (0-47).
- * @detach_count: Counter incremented every time the COMEDI device is detached.
- * Used for checking a previous attachment is still valid.
- * @hw_dev: Optional pointer to the low-level hardware &struct device. It is
- * required for automatically configured COMEDI devices and optional for
- * COMEDI devices configured by the %COMEDI_DEVCONFIG ioctl, although
- * the bus-specific COMEDI functions only work if it is set correctly.
- * It is also passed to dma_alloc_coherent() for COMEDI subdevices that
- * have their 'async_dma_dir' member set to something other than
- * %DMA_NONE.
- * @board_name: Pointer to a COMEDI board name or a COMEDI driver name. When
- * the low-level driver's "attach" handler is called by the handler for
- * the %COMEDI_DEVCONFIG ioctl, it either points to a matched board name
- * string if the 'num_names' member of the &struct comedi_driver is
- * non-zero, otherwise it points to the low-level driver name string.
- * When the low-lever driver's "auto_attach" handler is called for an
- * automatically configured COMEDI device, it points to the low-level
- * driver name string. The low-level driver is free to change it in its
- * "attach" or "auto_attach" handler if it wishes.
- * @board_ptr: Optional pointer to private, read-only board type information in
- * the low-level driver. If the 'num_names' member of the &struct
- * comedi_driver is non-zero, the handler for the %COMEDI_DEVCONFIG ioctl
- * will point it to a pointer to a matched board name string within the
- * driver's private array of static, read-only board type information when
- * calling the driver's "attach" handler. The low-level driver is free to
- * change it.
- * @attached: Flag indicating that the COMEDI device is attached to a low-level
- * driver.
- * @ioenabled: Flag used to indicate that a PCI device has been enabled and
- * its regions requested.
- * @spinlock: Generic spin-lock for use by the low-level driver.
- * @mutex: Generic mutex for use by the COMEDI core module.
- * @attach_lock: &struct rw_semaphore used to guard against the COMEDI device
- * being detached while an operation is in progress. The down_write()
- * operation is only allowed while @mutex is held and is used when
- * changing @attached and @detach_count and calling the low-level driver's
- * "detach" handler. The down_read() operation is generally used without
- * holding @mutex.
- * @refcount: &struct kref reference counter for freeing COMEDI device.
- * @n_subdevices: Number of COMEDI subdevices allocated by the low-level
- * driver for this device.
- * @subdevices: Dynamically allocated array of COMEDI subdevices.
- * @mmio: Optional pointer to a remapped MMIO region set by the low-level
- * driver.
- * @iobase: Optional base of an I/O port region requested by the low-level
- * driver.
- * @iolen: Length of I/O port region requested at @iobase.
- * @irq: Optional IRQ number requested by the low-level driver.
- * @read_subdev: Optional pointer to a default COMEDI subdevice operated on by
- * the read() file operation. Set by the low-level driver.
- * @write_subdev: Optional pointer to a default COMEDI subdevice operated on by
- * the write() file operation. Set by the low-level driver.
- * @async_queue: Storage for fasync_helper().
- * @open: Optional pointer to a function set by the low-level driver to be
- * called when @use_count changes from 0 to 1.
- * @close: Optional pointer to a function set by the low-level driver to be
- * called when @use_count changed from 1 to 0.
- * @insn_device_config: Optional pointer to a handler for all sub-instructions
- * except %INSN_DEVICE_CONFIG_GET_ROUTES of the %INSN_DEVICE_CONFIG
- * instruction. If this is not initialized by the low-level driver, a
- * default handler will be set during post-configuration.
- * @get_valid_routes: Optional pointer to a handler for the
- * %INSN_DEVICE_CONFIG_GET_ROUTES sub-instruction of the
- * %INSN_DEVICE_CONFIG instruction set. If this is not initialized by the
- * low-level driver, a default handler that copies zero routes back to the
- * user will be used.
- *
- * This is the main control data structure for a COMEDI device (as far as the
- * COMEDI core is concerned). There are two groups of COMEDI devices -
- * "legacy" devices that are configured by the handler for the
- * %COMEDI_DEVCONFIG ioctl, and automatically configured devices resulting
- * from a call to comedi_auto_config() as a result of a bus driver probe in
- * a low-level COMEDI driver. The "legacy" COMEDI devices are allocated
- * during module initialization if the "comedi_num_legacy_minors" module
- * parameter is non-zero and use minor device numbers from 0 to
- * comedi_num_legacy_minors minus one. The automatically configured COMEDI
- * devices are allocated on demand and use minor device numbers from
- * comedi_num_legacy_minors to 47.
- */
-struct comedi_device {
- int use_count;
- struct comedi_driver *driver;
- struct comedi_8254 *pacer;
- void *private;
-
- struct device *class_dev;
- int minor;
- unsigned int detach_count;
- struct device *hw_dev;
-
- const char *board_name;
- const void *board_ptr;
- unsigned int attached:1;
- unsigned int ioenabled:1;
- spinlock_t spinlock; /* generic spin-lock for low-level driver */
- struct mutex mutex; /* generic mutex for COMEDI core */
- struct rw_semaphore attach_lock;
- struct kref refcount;
-
- int n_subdevices;
- struct comedi_subdevice *subdevices;
-
- /* dumb */
- void __iomem *mmio;
- unsigned long iobase;
- unsigned long iolen;
- unsigned int irq;
-
- struct comedi_subdevice *read_subdev;
- struct comedi_subdevice *write_subdev;
-
- struct fasync_struct *async_queue;
-
- int (*open)(struct comedi_device *dev);
- void (*close)(struct comedi_device *dev);
- int (*insn_device_config)(struct comedi_device *dev,
- struct comedi_insn *insn, unsigned int *data);
- unsigned int (*get_valid_routes)(struct comedi_device *dev,
- unsigned int n_pairs,
- unsigned int *pair_data);
-};
-
-/*
- * function prototypes
- */
-
-void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s);
-
-struct comedi_device *comedi_dev_get_from_minor(unsigned int minor);
-int comedi_dev_put(struct comedi_device *dev);
-
-bool comedi_is_subdevice_running(struct comedi_subdevice *s);
-
-void *comedi_alloc_spriv(struct comedi_subdevice *s, size_t size);
-void comedi_set_spriv_auto_free(struct comedi_subdevice *s);
-
-int comedi_check_chanlist(struct comedi_subdevice *s,
- int n,
- unsigned int *chanlist);
-
-/* range stuff */
-
-#define RANGE(a, b) {(a) * 1e6, (b) * 1e6, 0}
-#define RANGE_ext(a, b) {(a) * 1e6, (b) * 1e6, RF_EXTERNAL}
-#define RANGE_mA(a, b) {(a) * 1e6, (b) * 1e6, UNIT_mA}
-#define RANGE_unitless(a, b) {(a) * 1e6, (b) * 1e6, 0}
-#define BIP_RANGE(a) {-(a) * 1e6, (a) * 1e6, 0}
-#define UNI_RANGE(a) {0, (a) * 1e6, 0}
-
-extern const struct comedi_lrange range_bipolar10;
-extern const struct comedi_lrange range_bipolar5;
-extern const struct comedi_lrange range_bipolar2_5;
-extern const struct comedi_lrange range_unipolar10;
-extern const struct comedi_lrange range_unipolar5;
-extern const struct comedi_lrange range_unipolar2_5;
-extern const struct comedi_lrange range_0_20mA;
-extern const struct comedi_lrange range_4_20mA;
-extern const struct comedi_lrange range_0_32mA;
-extern const struct comedi_lrange range_unknown;
-
-#define range_digital range_unipolar5
-
-/**
- * struct comedi_lrange - Describes a COMEDI range table
- * @length: Number of entries in the range table.
- * @range: Array of &struct comedi_krange, one for each range.
- *
- * Each element of @range[] describes the minimum and maximum physical range
- * and the type of units. Typically, the type of unit is %UNIT_volt
- * (i.e. volts) and the minimum and maximum are in millionths of a volt.
- * There may also be a flag that indicates the minimum and maximum are merely
- * scale factors for an unknown, external reference.
- */
-struct comedi_lrange {
- int length;
- struct comedi_krange range[];
-};
-
-/**
- * comedi_range_is_bipolar() - Test if subdevice range is bipolar
- * @s: COMEDI subdevice.
- * @range: Index of range within a range table.
- *
- * Tests whether a range is bipolar by checking whether its minimum value
- * is negative.
- *
- * Assumes @range is valid. Does not work for subdevices using a
- * channel-specific range table list.
- *
- * Return:
- * %true if the range is bipolar.
- * %false if the range is unipolar.
- */
-static inline bool comedi_range_is_bipolar(struct comedi_subdevice *s,
- unsigned int range)
-{
- return s->range_table->range[range].min < 0;
-}
-
-/**
- * comedi_range_is_unipolar() - Test if subdevice range is unipolar
- * @s: COMEDI subdevice.
- * @range: Index of range within a range table.
- *
- * Tests whether a range is unipolar by checking whether its minimum value
- * is at least 0.
- *
- * Assumes @range is valid. Does not work for subdevices using a
- * channel-specific range table list.
- *
- * Return:
- * %true if the range is unipolar.
- * %false if the range is bipolar.
- */
-static inline bool comedi_range_is_unipolar(struct comedi_subdevice *s,
- unsigned int range)
-{
- return s->range_table->range[range].min >= 0;
-}
-
-/**
- * comedi_range_is_external() - Test if subdevice range is external
- * @s: COMEDI subdevice.
- * @range: Index of range within a range table.
- *
- * Tests whether a range is externally reference by checking whether its
- * %RF_EXTERNAL flag is set.
- *
- * Assumes @range is valid. Does not work for subdevices using a
- * channel-specific range table list.
- *
- * Return:
- * %true if the range is external.
- * %false if the range is internal.
- */
-static inline bool comedi_range_is_external(struct comedi_subdevice *s,
- unsigned int range)
-{
- return !!(s->range_table->range[range].flags & RF_EXTERNAL);
-}
-
-/**
- * comedi_chan_range_is_bipolar() - Test if channel-specific range is bipolar
- * @s: COMEDI subdevice.
- * @chan: The channel number.
- * @range: Index of range within a range table.
- *
- * Tests whether a range is bipolar by checking whether its minimum value
- * is negative.
- *
- * Assumes @chan and @range are valid. Only works for subdevices with a
- * channel-specific range table list.
- *
- * Return:
- * %true if the range is bipolar.
- * %false if the range is unipolar.
- */
-static inline bool comedi_chan_range_is_bipolar(struct comedi_subdevice *s,
- unsigned int chan,
- unsigned int range)
-{
- return s->range_table_list[chan]->range[range].min < 0;
-}
-
-/**
- * comedi_chan_range_is_unipolar() - Test if channel-specific range is unipolar
- * @s: COMEDI subdevice.
- * @chan: The channel number.
- * @range: Index of range within a range table.
- *
- * Tests whether a range is unipolar by checking whether its minimum value
- * is at least 0.
- *
- * Assumes @chan and @range are valid. Only works for subdevices with a
- * channel-specific range table list.
- *
- * Return:
- * %true if the range is unipolar.
- * %false if the range is bipolar.
- */
-static inline bool comedi_chan_range_is_unipolar(struct comedi_subdevice *s,
- unsigned int chan,
- unsigned int range)
-{
- return s->range_table_list[chan]->range[range].min >= 0;
-}
-
-/**
- * comedi_chan_range_is_external() - Test if channel-specific range is external
- * @s: COMEDI subdevice.
- * @chan: The channel number.
- * @range: Index of range within a range table.
- *
- * Tests whether a range is externally reference by checking whether its
- * %RF_EXTERNAL flag is set.
- *
- * Assumes @chan and @range are valid. Only works for subdevices with a
- * channel-specific range table list.
- *
- * Return:
- * %true if the range is bipolar.
- * %false if the range is unipolar.
- */
-static inline bool comedi_chan_range_is_external(struct comedi_subdevice *s,
- unsigned int chan,
- unsigned int range)
-{
- return !!(s->range_table_list[chan]->range[range].flags & RF_EXTERNAL);
-}
-
-/**
- * comedi_offset_munge() - Convert between offset binary and 2's complement
- * @s: COMEDI subdevice.
- * @val: Value to be converted.
- *
- * Toggles the highest bit of a sample value to toggle between offset binary
- * and 2's complement. Assumes that @s->maxdata is a power of 2 minus 1.
- *
- * Return: The converted value.
- */
-static inline unsigned int comedi_offset_munge(struct comedi_subdevice *s,
- unsigned int val)
-{
- return val ^ s->maxdata ^ (s->maxdata >> 1);
-}
-
-/**
- * comedi_bytes_per_sample() - Determine subdevice sample size
- * @s: COMEDI subdevice.
- *
- * The sample size will be 4 (sizeof int) or 2 (sizeof short) depending on
- * whether the %SDF_LSAMPL subdevice flag is set or not.
- *
- * Return: The subdevice sample size.
- */
-static inline unsigned int comedi_bytes_per_sample(struct comedi_subdevice *s)
-{
- return s->subdev_flags & SDF_LSAMPL ? sizeof(int) : sizeof(short);
-}
-
-/**
- * comedi_sample_shift() - Determine log2 of subdevice sample size
- * @s: COMEDI subdevice.
- *
- * The sample size will be 4 (sizeof int) or 2 (sizeof short) depending on
- * whether the %SDF_LSAMPL subdevice flag is set or not. The log2 of the
- * sample size will be 2 or 1 and can be used as the right operand of a
- * bit-shift operator to multiply or divide something by the sample size.
- *
- * Return: log2 of the subdevice sample size.
- */
-static inline unsigned int comedi_sample_shift(struct comedi_subdevice *s)
-{
- return s->subdev_flags & SDF_LSAMPL ? 2 : 1;
-}
-
-/**
- * comedi_bytes_to_samples() - Convert a number of bytes to a number of samples
- * @s: COMEDI subdevice.
- * @nbytes: Number of bytes
- *
- * Return: The number of bytes divided by the subdevice sample size.
- */
-static inline unsigned int comedi_bytes_to_samples(struct comedi_subdevice *s,
- unsigned int nbytes)
-{
- return nbytes >> comedi_sample_shift(s);
-}
-
-/**
- * comedi_samples_to_bytes() - Convert a number of samples to a number of bytes
- * @s: COMEDI subdevice.
- * @nsamples: Number of samples.
- *
- * Return: The number of samples multiplied by the subdevice sample size.
- * (Does not check for arithmetic overflow.)
- */
-static inline unsigned int comedi_samples_to_bytes(struct comedi_subdevice *s,
- unsigned int nsamples)
-{
- return nsamples << comedi_sample_shift(s);
-}
-
-/**
- * comedi_check_trigger_src() - Trivially validate a comedi_cmd trigger source
- * @src: Pointer to the trigger source to validate.
- * @flags: Bitmask of valid %TRIG_* for the trigger.
- *
- * This is used in "step 1" of the do_cmdtest functions of comedi drivers
- * to validate the comedi_cmd triggers. The mask of the @src against the
- * @flags allows the userspace comedilib to pass all the comedi_cmd
- * triggers as %TRIG_ANY and get back a bitmask of the valid trigger sources.
- *
- * Return:
- * 0 if trigger sources in *@src are all supported.
- * -EINVAL if any trigger source in *@src is unsupported.
- */
-static inline int comedi_check_trigger_src(unsigned int *src,
- unsigned int flags)
-{
- unsigned int orig_src = *src;
-
- *src = orig_src & flags;
- if (*src == TRIG_INVALID || *src != orig_src)
- return -EINVAL;
- return 0;
-}
-
-/**
- * comedi_check_trigger_is_unique() - Make sure a trigger source is unique
- * @src: The trigger source to check.
- *
- * Return:
- * 0 if no more than one trigger source is set.
- * -EINVAL if more than one trigger source is set.
- */
-static inline int comedi_check_trigger_is_unique(unsigned int src)
-{
- /* this test is true if more than one _src bit is set */
- if ((src & (src - 1)) != 0)
- return -EINVAL;
- return 0;
-}
-
-/**
- * comedi_check_trigger_arg_is() - Trivially validate a trigger argument
- * @arg: Pointer to the trigger arg to validate.
- * @val: The value the argument should be.
- *
- * Forces *@arg to be @val.
- *
- * Return:
- * 0 if *@arg was already @val.
- * -EINVAL if *@arg differed from @val.
- */
-static inline int comedi_check_trigger_arg_is(unsigned int *arg,
- unsigned int val)
-{
- if (*arg != val) {
- *arg = val;
- return -EINVAL;
- }
- return 0;
-}
-
-/**
- * comedi_check_trigger_arg_min() - Trivially validate a trigger argument min
- * @arg: Pointer to the trigger arg to validate.
- * @val: The minimum value the argument should be.
- *
- * Forces *@arg to be at least @val, setting it to @val if necessary.
- *
- * Return:
- * 0 if *@arg was already at least @val.
- * -EINVAL if *@arg was less than @val.
- */
-static inline int comedi_check_trigger_arg_min(unsigned int *arg,
- unsigned int val)
-{
- if (*arg < val) {
- *arg = val;
- return -EINVAL;
- }
- return 0;
-}
-
-/**
- * comedi_check_trigger_arg_max() - Trivially validate a trigger argument max
- * @arg: Pointer to the trigger arg to validate.
- * @val: The maximum value the argument should be.
- *
- * Forces *@arg to be no more than @val, setting it to @val if necessary.
- *
- * Return:
- * 0 if*@arg was already no more than @val.
- * -EINVAL if *@arg was greater than @val.
- */
-static inline int comedi_check_trigger_arg_max(unsigned int *arg,
- unsigned int val)
-{
- if (*arg > val) {
- *arg = val;
- return -EINVAL;
- }
- return 0;
-}
-
-/*
- * Must set dev->hw_dev if you wish to dma directly into comedi's buffer.
- * Also useful for retrieving a previously configured hardware device of
- * known bus type. Set automatically for auto-configured devices.
- * Automatically set to NULL when detaching hardware device.
- */
-int comedi_set_hw_dev(struct comedi_device *dev, struct device *hw_dev);
-
-/**
- * comedi_buf_n_bytes_ready - Determine amount of unread data in buffer
- * @s: COMEDI subdevice.
- *
- * Determines the number of bytes of unread data in the asynchronous
- * acquisition data buffer for a subdevice. The data in question might not
- * have been fully "munged" yet.
- *
- * Returns: The amount of unread data in bytes.
- */
-static inline unsigned int comedi_buf_n_bytes_ready(struct comedi_subdevice *s)
-{
- return s->async->buf_write_count - s->async->buf_read_count;
-}
-
-unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s, unsigned int n);
-unsigned int comedi_buf_write_free(struct comedi_subdevice *s, unsigned int n);
-
-unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s);
-unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s, unsigned int n);
-unsigned int comedi_buf_read_free(struct comedi_subdevice *s, unsigned int n);
-
-unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
- const void *data, unsigned int nsamples);
-unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
- void *data, unsigned int nsamples);
-
-/* drivers.c - general comedi driver functions */
-
-#define COMEDI_TIMEOUT_MS 1000
-
-int comedi_timeout(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn,
- int (*cb)(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned long context),
- unsigned long context);
-
-unsigned int comedi_handle_events(struct comedi_device *dev,
- struct comedi_subdevice *s);
-
-int comedi_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data,
- unsigned int mask);
-unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
- unsigned int *data);
-unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
- struct comedi_cmd *cmd);
-unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
-unsigned int comedi_nscans_left(struct comedi_subdevice *s,
- unsigned int nscans);
-unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
- unsigned int nsamples);
-void comedi_inc_scan_progress(struct comedi_subdevice *s,
- unsigned int num_bytes);
-
-void *comedi_alloc_devpriv(struct comedi_device *dev, size_t size);
-int comedi_alloc_subdevices(struct comedi_device *dev, int num_subdevices);
-int comedi_alloc_subdev_readback(struct comedi_subdevice *s);
-
-int comedi_readback_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data);
-
-int comedi_load_firmware(struct comedi_device *dev, struct device *hw_dev,
- const char *name,
- int (*cb)(struct comedi_device *dev,
- const u8 *data, size_t size,
- unsigned long context),
- unsigned long context);
-
-int __comedi_request_region(struct comedi_device *dev,
- unsigned long start, unsigned long len);
-int comedi_request_region(struct comedi_device *dev,
- unsigned long start, unsigned long len);
-void comedi_legacy_detach(struct comedi_device *dev);
-
-int comedi_auto_config(struct device *hardware_device,
- struct comedi_driver *driver, unsigned long context);
-void comedi_auto_unconfig(struct device *hardware_device);
-
-int comedi_driver_register(struct comedi_driver *driver);
-void comedi_driver_unregister(struct comedi_driver *driver);
-
-/**
- * module_comedi_driver() - Helper macro for registering a comedi driver
- * @__comedi_driver: comedi_driver struct
- *
- * Helper macro for comedi drivers which do not do anything special in module
- * init/exit. This eliminates a lot of boilerplate. Each module may only use
- * this macro once, and calling it replaces module_init() and module_exit().
- */
-#define module_comedi_driver(__comedi_driver) \
- module_driver(__comedi_driver, comedi_driver_register, \
- comedi_driver_unregister)
-
-#endif /* _COMEDIDEV_H */
diff --git a/drivers/comedi/comedilib.h b/drivers/comedi/comedilib.h
deleted file mode 100644
index 0223c9cd9215..000000000000
--- a/drivers/comedi/comedilib.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * comedilib.h
- * Header file for kcomedilib
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1998-2001 David A. Schleef <ds@schleef.org>
- */
-
-#ifndef _LINUX_COMEDILIB_H
-#define _LINUX_COMEDILIB_H
-
-struct comedi_device *comedi_open(const char *path);
-int comedi_close(struct comedi_device *dev);
-int comedi_dio_get_config(struct comedi_device *dev, unsigned int subdev,
- unsigned int chan, unsigned int *io);
-int comedi_dio_config(struct comedi_device *dev, unsigned int subdev,
- unsigned int chan, unsigned int io);
-int comedi_dio_bitfield2(struct comedi_device *dev, unsigned int subdev,
- unsigned int mask, unsigned int *bits,
- unsigned int base_channel);
-int comedi_find_subdevice_by_type(struct comedi_device *dev, int type,
- unsigned int subd);
-int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice);
-
-#endif
diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c
index 750a6ff3c03c..8eb1f699a857 100644
--- a/drivers/comedi/drivers.c
+++ b/drivers/comedi/drivers.c
@@ -17,8 +17,7 @@
#include <linux/dma-direction.h>
#include <linux/interrupt.h>
#include <linux/firmware.h>
-
-#include "comedidev.h"
+#include <linux/comedi/comedidev.h>
#include "comedi_internal.h"
struct comedi_driver *comedi_drivers;
diff --git a/drivers/comedi/drivers/8255.c b/drivers/comedi/drivers/8255.c
index e23335c75867..ced8ea09d4fa 100644
--- a/drivers/comedi/drivers/8255.c
+++ b/drivers/comedi/drivers/8255.c
@@ -40,9 +40,8 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
-
-#include "8255.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
static int dev_8255_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
diff --git a/drivers/comedi/drivers/8255.h b/drivers/comedi/drivers/8255.h
deleted file mode 100644
index ceae3ca52e60..000000000000
--- a/drivers/comedi/drivers/8255.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * module/8255.h
- * Header file for 8255
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
- */
-
-#ifndef _8255_H
-#define _8255_H
-
-#define I8255_SIZE 0x04
-
-#define I8255_DATA_A_REG 0x00
-#define I8255_DATA_B_REG 0x01
-#define I8255_DATA_C_REG 0x02
-#define I8255_CTRL_REG 0x03
-#define I8255_CTRL_C_LO_IO BIT(0)
-#define I8255_CTRL_B_IO BIT(1)
-#define I8255_CTRL_B_MODE BIT(2)
-#define I8255_CTRL_C_HI_IO BIT(3)
-#define I8255_CTRL_A_IO BIT(4)
-#define I8255_CTRL_A_MODE(x) ((x) << 5)
-#define I8255_CTRL_CW BIT(7)
-
-struct comedi_device;
-struct comedi_subdevice;
-
-int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int (*io)(struct comedi_device *dev, int dir, int port,
- int data, unsigned long regbase),
- unsigned long regbase);
-
-int subdev_8255_mm_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int (*io)(struct comedi_device *dev, int dir, int port,
- int data, unsigned long regbase),
- unsigned long regbase);
-
-unsigned long subdev_8255_regbase(struct comedi_subdevice *s);
-
-#endif
diff --git a/drivers/comedi/drivers/8255_pci.c b/drivers/comedi/drivers/8255_pci.c
index 5a810f0e532a..0fec048e3a53 100644
--- a/drivers/comedi/drivers/8255_pci.c
+++ b/drivers/comedi/drivers/8255_pci.c
@@ -53,10 +53,8 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
-
-#include "8255.h"
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8255.h>
enum pci_8255_boardid {
BOARD_ADLINK_PCI7224,
diff --git a/drivers/comedi/drivers/addi_apci_1032.c b/drivers/comedi/drivers/addi_apci_1032.c
index 81a246fbcc01..8eec6d9402de 100644
--- a/drivers/comedi/drivers/addi_apci_1032.c
+++ b/drivers/comedi/drivers/addi_apci_1032.c
@@ -63,8 +63,8 @@
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/comedi/comedi_pci.h>
-#include "../comedi_pci.h"
#include "amcc_s5933.h"
/*
diff --git a/drivers/comedi/drivers/addi_apci_1500.c b/drivers/comedi/drivers/addi_apci_1500.c
index b04c15dcfb57..c94c78588889 100644
--- a/drivers/comedi/drivers/addi_apci_1500.c
+++ b/drivers/comedi/drivers/addi_apci_1500.c
@@ -14,8 +14,8 @@
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/comedi/comedi_pci.h>
-#include "../comedi_pci.h"
#include "amcc_s5933.h"
#include "z8536.h"
diff --git a/drivers/comedi/drivers/addi_apci_1516.c b/drivers/comedi/drivers/addi_apci_1516.c
index 274ec9fb030c..3c48b72dad9d 100644
--- a/drivers/comedi/drivers/addi_apci_1516.c
+++ b/drivers/comedi/drivers/addi_apci_1516.c
@@ -14,8 +14,8 @@
*/
#include <linux/module.h>
+#include <linux/comedi/comedi_pci.h>
-#include "../comedi_pci.h"
#include "addi_watchdog.h"
/*
diff --git a/drivers/comedi/drivers/addi_apci_1564.c b/drivers/comedi/drivers/addi_apci_1564.c
index 06fc7ed96200..0cd40948bee7 100644
--- a/drivers/comedi/drivers/addi_apci_1564.c
+++ b/drivers/comedi/drivers/addi_apci_1564.c
@@ -68,8 +68,8 @@
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/comedi/comedi_pci.h>
-#include "../comedi_pci.h"
#include "addi_tcw.h"
#include "addi_watchdog.h"
diff --git a/drivers/comedi/drivers/addi_apci_16xx.c b/drivers/comedi/drivers/addi_apci_16xx.c
index c306aa41df97..ec2c321d2431 100644
--- a/drivers/comedi/drivers/addi_apci_16xx.c
+++ b/drivers/comedi/drivers/addi_apci_16xx.c
@@ -14,8 +14,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/addi_apci_2032.c b/drivers/comedi/drivers/addi_apci_2032.c
index e9a2b37a4ae0..e048dfc3ec77 100644
--- a/drivers/comedi/drivers/addi_apci_2032.c
+++ b/drivers/comedi/drivers/addi_apci_2032.c
@@ -16,8 +16,8 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/comedi/comedi_pci.h>
-#include "../comedi_pci.h"
#include "addi_watchdog.h"
/*
diff --git a/drivers/comedi/drivers/addi_apci_2200.c b/drivers/comedi/drivers/addi_apci_2200.c
index 4c5aee784bd9..00378c9dddc8 100644
--- a/drivers/comedi/drivers/addi_apci_2200.c
+++ b/drivers/comedi/drivers/addi_apci_2200.c
@@ -14,8 +14,8 @@
*/
#include <linux/module.h>
+#include <linux/comedi/comedi_pci.h>
-#include "../comedi_pci.h"
#include "addi_watchdog.h"
/*
diff --git a/drivers/comedi/drivers/addi_apci_3120.c b/drivers/comedi/drivers/addi_apci_3120.c
index 1ed3b33d1a30..28a242e69721 100644
--- a/drivers/comedi/drivers/addi_apci_3120.c
+++ b/drivers/comedi/drivers/addi_apci_3120.c
@@ -14,8 +14,8 @@
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/comedi/comedi_pci.h>
-#include "../comedi_pci.h"
#include "amcc_s5933.h"
/*
diff --git a/drivers/comedi/drivers/addi_apci_3501.c b/drivers/comedi/drivers/addi_apci_3501.c
index f0c9642f3f1a..ecb5552f1785 100644
--- a/drivers/comedi/drivers/addi_apci_3501.c
+++ b/drivers/comedi/drivers/addi_apci_3501.c
@@ -41,8 +41,8 @@
*/
#include <linux/module.h>
+#include <linux/comedi/comedi_pci.h>
-#include "../comedi_pci.h"
#include "amcc_s5933.h"
/*
diff --git a/drivers/comedi/drivers/addi_apci_3xxx.c b/drivers/comedi/drivers/addi_apci_3xxx.c
index a90d59377e18..bc72273e6a29 100644
--- a/drivers/comedi/drivers/addi_apci_3xxx.c
+++ b/drivers/comedi/drivers/addi_apci_3xxx.c
@@ -15,8 +15,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#define CONV_UNIT_NS BIT(0)
#define CONV_UNIT_US BIT(1)
diff --git a/drivers/comedi/drivers/addi_watchdog.c b/drivers/comedi/drivers/addi_watchdog.c
index 69b323fb869f..ed87ab432020 100644
--- a/drivers/comedi/drivers/addi_watchdog.c
+++ b/drivers/comedi/drivers/addi_watchdog.c
@@ -10,7 +10,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
#include "addi_tcw.h"
#include "addi_watchdog.h"
diff --git a/drivers/comedi/drivers/adl_pci6208.c b/drivers/comedi/drivers/adl_pci6208.c
index 9ae4cc523dd4..b27354a51f5c 100644
--- a/drivers/comedi/drivers/adl_pci6208.c
+++ b/drivers/comedi/drivers/adl_pci6208.c
@@ -24,8 +24,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/*
* PCI-6208/6216-GL register map
diff --git a/drivers/comedi/drivers/adl_pci7x3x.c b/drivers/comedi/drivers/adl_pci7x3x.c
index 8fc45638ff59..e9f22de9b6f1 100644
--- a/drivers/comedi/drivers/adl_pci7x3x.c
+++ b/drivers/comedi/drivers/adl_pci7x3x.c
@@ -46,8 +46,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "plx9052.h"
diff --git a/drivers/comedi/drivers/adl_pci8164.c b/drivers/comedi/drivers/adl_pci8164.c
index d5e1bda81557..0c513a67a264 100644
--- a/drivers/comedi/drivers/adl_pci8164.c
+++ b/drivers/comedi/drivers/adl_pci8164.c
@@ -19,8 +19,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#define PCI8164_AXIS(x) ((x) * 0x08)
#define PCI8164_CMD_MSTS_REG 0x00
diff --git a/drivers/comedi/drivers/adl_pci9111.c b/drivers/comedi/drivers/adl_pci9111.c
index a062c5ab20e9..c50f94272a74 100644
--- a/drivers/comedi/drivers/adl_pci9111.c
+++ b/drivers/comedi/drivers/adl_pci9111.c
@@ -42,11 +42,10 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8254.h>
#include "plx9052.h"
-#include "comedi_8254.h"
#define PCI9111_FIFO_HALF_SIZE 512
diff --git a/drivers/comedi/drivers/adl_pci9118.c b/drivers/comedi/drivers/adl_pci9118.c
index cda3a4267dca..9a816c718303 100644
--- a/drivers/comedi/drivers/adl_pci9118.c
+++ b/drivers/comedi/drivers/adl_pci9118.c
@@ -78,11 +78,10 @@
#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8254.h>
#include "amcc_s5933.h"
-#include "comedi_8254.h"
/*
* PCI BAR2 Register map (dev->iobase)
diff --git a/drivers/comedi/drivers/adq12b.c b/drivers/comedi/drivers/adq12b.c
index d719f76709ef..19d765182006 100644
--- a/drivers/comedi/drivers/adq12b.c
+++ b/drivers/comedi/drivers/adq12b.c
@@ -48,8 +48,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/* address scheme (page 2.17 of the manual) */
#define ADQ12B_CTREG 0x00
diff --git a/drivers/comedi/drivers/adv_pci1710.c b/drivers/comedi/drivers/adv_pci1710.c
index 090607760be6..4f2639968260 100644
--- a/drivers/comedi/drivers/adv_pci1710.c
+++ b/drivers/comedi/drivers/adv_pci1710.c
@@ -30,10 +30,9 @@
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8254.h>
-#include "../comedi_pci.h"
-
-#include "comedi_8254.h"
#include "amcc_s5933.h"
/*
diff --git a/drivers/comedi/drivers/adv_pci1720.c b/drivers/comedi/drivers/adv_pci1720.c
index 2fcd7e8e7d85..2619591ba301 100644
--- a/drivers/comedi/drivers/adv_pci1720.c
+++ b/drivers/comedi/drivers/adv_pci1720.c
@@ -42,8 +42,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/*
* PCI BAR2 Register map (dev->iobase)
diff --git a/drivers/comedi/drivers/adv_pci1723.c b/drivers/comedi/drivers/adv_pci1723.c
index 23660a9fdb9c..e2aedb152068 100644
--- a/drivers/comedi/drivers/adv_pci1723.c
+++ b/drivers/comedi/drivers/adv_pci1723.c
@@ -32,8 +32,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/*
* PCI Bar 2 I/O Register map (dev->iobase)
diff --git a/drivers/comedi/drivers/adv_pci1724.c b/drivers/comedi/drivers/adv_pci1724.c
index e8ab573c839f..bb43b7deeb56 100644
--- a/drivers/comedi/drivers/adv_pci1724.c
+++ b/drivers/comedi/drivers/adv_pci1724.c
@@ -38,8 +38,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/*
* PCI bar 2 Register I/O map (dev->iobase)
diff --git a/drivers/comedi/drivers/adv_pci1760.c b/drivers/comedi/drivers/adv_pci1760.c
index 6de8ab97d346..fcfc2e299110 100644
--- a/drivers/comedi/drivers/adv_pci1760.c
+++ b/drivers/comedi/drivers/adv_pci1760.c
@@ -22,8 +22,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/*
* PCI-1760 Register Map
diff --git a/drivers/comedi/drivers/adv_pci_dio.c b/drivers/comedi/drivers/adv_pci_dio.c
index 54c7419c8ca6..efa3e46b554b 100644
--- a/drivers/comedi/drivers/adv_pci_dio.c
+++ b/drivers/comedi/drivers/adv_pci_dio.c
@@ -23,11 +23,9 @@
#include <linux/module.h>
#include <linux/delay.h>
-
-#include "../comedi_pci.h"
-
-#include "8255.h"
-#include "comedi_8254.h"
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8255.h>
+#include <linux/comedi/comedi_8254.h>
/*
* Register offset definitions
diff --git a/drivers/comedi/drivers/aio_aio12_8.c b/drivers/comedi/drivers/aio_aio12_8.c
index 4829115921a3..30b8a32204d8 100644
--- a/drivers/comedi/drivers/aio_aio12_8.c
+++ b/drivers/comedi/drivers/aio_aio12_8.c
@@ -22,10 +22,9 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
-
-#include "comedi_8254.h"
-#include "8255.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
+#include <linux/comedi/comedi_8254.h>
/*
* Register map
diff --git a/drivers/comedi/drivers/aio_iiro_16.c b/drivers/comedi/drivers/aio_iiro_16.c
index fe3876235075..b00fab0b89d4 100644
--- a/drivers/comedi/drivers/aio_iiro_16.c
+++ b/drivers/comedi/drivers/aio_iiro_16.c
@@ -30,8 +30,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
#define AIO_IIRO_16_RELAY_0_7 0x00
#define AIO_IIRO_16_INPUT_0_7 0x01
diff --git a/drivers/comedi/drivers/amplc_dio200.c b/drivers/comedi/drivers/amplc_dio200.c
index fa19c9e7c56b..4544bcdd8a70 100644
--- a/drivers/comedi/drivers/amplc_dio200.c
+++ b/drivers/comedi/drivers/amplc_dio200.c
@@ -185,7 +185,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
#include "amplc_dio200.h"
diff --git a/drivers/comedi/drivers/amplc_dio200_common.c b/drivers/comedi/drivers/amplc_dio200_common.c
index a3454130d5f8..ff651f2eb86c 100644
--- a/drivers/comedi/drivers/amplc_dio200_common.c
+++ b/drivers/comedi/drivers/amplc_dio200_common.c
@@ -12,12 +12,11 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h> /* only for register defines */
+#include <linux/comedi/comedi_8254.h>
#include "amplc_dio200.h"
-#include "comedi_8254.h"
-#include "8255.h" /* only for register defines */
/* 200 series registers */
#define DIO200_IO_SIZE 0x20
diff --git a/drivers/comedi/drivers/amplc_dio200_pci.c b/drivers/comedi/drivers/amplc_dio200_pci.c
index 1bd7a42c8464..527994d82a1f 100644
--- a/drivers/comedi/drivers/amplc_dio200_pci.c
+++ b/drivers/comedi/drivers/amplc_dio200_pci.c
@@ -214,8 +214,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "amplc_dio200.h"
diff --git a/drivers/comedi/drivers/amplc_pc236.c b/drivers/comedi/drivers/amplc_pc236.c
index c377af1d5246..b21e0c906aab 100644
--- a/drivers/comedi/drivers/amplc_pc236.c
+++ b/drivers/comedi/drivers/amplc_pc236.c
@@ -32,8 +32,7 @@
*/
#include <linux/module.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
#include "amplc_pc236.h"
diff --git a/drivers/comedi/drivers/amplc_pc236_common.c b/drivers/comedi/drivers/amplc_pc236_common.c
index 981d281e87a1..9f4f89b1ef23 100644
--- a/drivers/comedi/drivers/amplc_pc236_common.c
+++ b/drivers/comedi/drivers/amplc_pc236_common.c
@@ -11,11 +11,10 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
#include "amplc_pc236.h"
-#include "8255.h"
static void pc236_intr_update(struct comedi_device *dev, bool enable)
{
diff --git a/drivers/comedi/drivers/amplc_pc263.c b/drivers/comedi/drivers/amplc_pc263.c
index 68da6098ee84..d7f088a8a5e3 100644
--- a/drivers/comedi/drivers/amplc_pc263.c
+++ b/drivers/comedi/drivers/amplc_pc263.c
@@ -25,7 +25,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/* PC263 registers */
#define PC263_DO_0_7_REG 0x00
diff --git a/drivers/comedi/drivers/amplc_pci224.c b/drivers/comedi/drivers/amplc_pci224.c
index bcf6d61af863..5a04e55daeea 100644
--- a/drivers/comedi/drivers/amplc_pci224.c
+++ b/drivers/comedi/drivers/amplc_pci224.c
@@ -96,10 +96,8 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
-
-#include "../comedi_pci.h"
-
-#include "comedi_8254.h"
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8254.h>
/*
* PCI224/234 i/o space 1 (PCIBAR2) registers.
diff --git a/drivers/comedi/drivers/amplc_pci230.c b/drivers/comedi/drivers/amplc_pci230.c
index 8911dc2bd2c6..92ba8b8c0172 100644
--- a/drivers/comedi/drivers/amplc_pci230.c
+++ b/drivers/comedi/drivers/amplc_pci230.c
@@ -174,11 +174,9 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-
-#include "comedi_8254.h"
-#include "8255.h"
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8255.h>
+#include <linux/comedi/comedi_8254.h>
/*
* PCI230 PCI configuration register information
diff --git a/drivers/comedi/drivers/amplc_pci236.c b/drivers/comedi/drivers/amplc_pci236.c
index e7f6fa4d101a..482eb261c333 100644
--- a/drivers/comedi/drivers/amplc_pci236.c
+++ b/drivers/comedi/drivers/amplc_pci236.c
@@ -34,8 +34,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "amplc_pc236.h"
#include "plx9052.h"
diff --git a/drivers/comedi/drivers/amplc_pci263.c b/drivers/comedi/drivers/amplc_pci263.c
index 9217973f1141..1609665c4b18 100644
--- a/drivers/comedi/drivers/amplc_pci263.c
+++ b/drivers/comedi/drivers/amplc_pci263.c
@@ -24,8 +24,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/* PCI263 registers */
#define PCI263_DO_0_7_REG 0x00
diff --git a/drivers/comedi/drivers/c6xdigio.c b/drivers/comedi/drivers/c6xdigio.c
index 786fd15698df..14b90d1c64dc 100644
--- a/drivers/comedi/drivers/c6xdigio.c
+++ b/drivers/comedi/drivers/c6xdigio.c
@@ -30,8 +30,7 @@
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/pnp.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/cb_das16_cs.c b/drivers/comedi/drivers/cb_das16_cs.c
index a5d171e71c33..8e0d2fa5f95d 100644
--- a/drivers/comedi/drivers/cb_das16_cs.c
+++ b/drivers/comedi/drivers/cb_das16_cs.c
@@ -27,10 +27,8 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-
-#include "../comedi_pcmcia.h"
-
-#include "comedi_8254.h"
+#include <linux/comedi/comedi_pcmcia.h>
+#include <linux/comedi/comedi_8254.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/cb_pcidas.c b/drivers/comedi/drivers/cb_pcidas.c
index 2f20bd56ec6c..0c7576b967fc 100644
--- a/drivers/comedi/drivers/cb_pcidas.c
+++ b/drivers/comedi/drivers/cb_pcidas.c
@@ -54,11 +54,10 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8255.h>
+#include <linux/comedi/comedi_8254.h>
-#include "../comedi_pci.h"
-
-#include "comedi_8254.h"
-#include "8255.h"
#include "amcc_s5933.h"
#define AI_BUFFER_SIZE 1024 /* max ai fifo size */
diff --git a/drivers/comedi/drivers/cb_pcidas64.c b/drivers/comedi/drivers/cb_pcidas64.c
index 41a8fea7f48a..ca6038a25f26 100644
--- a/drivers/comedi/drivers/cb_pcidas64.c
+++ b/drivers/comedi/drivers/cb_pcidas64.c
@@ -73,10 +73,9 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8255.h>
-#include "../comedi_pci.h"
-
-#include "8255.h"
#include "plx9080.h"
#define TIMER_BASE 25 /* 40MHz master clock */
diff --git a/drivers/comedi/drivers/cb_pcidda.c b/drivers/comedi/drivers/cb_pcidda.c
index 78cf1603638c..c52204a6bda4 100644
--- a/drivers/comedi/drivers/cb_pcidda.c
+++ b/drivers/comedi/drivers/cb_pcidda.c
@@ -27,10 +27,8 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
-
-#include "8255.h"
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8255.h>
#define EEPROM_SIZE 128 /* number of entries in eeprom */
/* maximum number of ao channels for supported boards */
diff --git a/drivers/comedi/drivers/cb_pcimdas.c b/drivers/comedi/drivers/cb_pcimdas.c
index 2292f69da4f4..8bdb00774f11 100644
--- a/drivers/comedi/drivers/cb_pcimdas.c
+++ b/drivers/comedi/drivers/cb_pcimdas.c
@@ -34,12 +34,11 @@
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8255.h>
+#include <linux/comedi/comedi_8254.h>
-#include "../comedi_pci.h"
-
-#include "comedi_8254.h"
#include "plx9052.h"
-#include "8255.h"
/*
* PCI Bar 1 Register map
diff --git a/drivers/comedi/drivers/cb_pcimdda.c b/drivers/comedi/drivers/cb_pcimdda.c
index 21fc7b3c5f60..bf8093a10315 100644
--- a/drivers/comedi/drivers/cb_pcimdda.c
+++ b/drivers/comedi/drivers/cb_pcimdda.c
@@ -67,10 +67,8 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
-
-#include "8255.h"
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8255.h>
/* device ids of the cards we support -- currently only 1 card supported */
#define PCI_ID_PCIM_DDA06_16 0x0053
diff --git a/drivers/comedi/drivers/comedi_8254.c b/drivers/comedi/drivers/comedi_8254.c
index 4bf5daa9e885..b4185c1b2695 100644
--- a/drivers/comedi/drivers/comedi_8254.c
+++ b/drivers/comedi/drivers/comedi_8254.c
@@ -116,10 +116,8 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
-
-#include "../comedidev.h"
-
-#include "comedi_8254.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8254.h>
static unsigned int __i8254_read(struct comedi_8254 *i8254, unsigned int reg)
{
diff --git a/drivers/comedi/drivers/comedi_8254.h b/drivers/comedi/drivers/comedi_8254.h
deleted file mode 100644
index d8264417e53c..000000000000
--- a/drivers/comedi/drivers/comedi_8254.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * comedi_8254.h
- * Generic 8254 timer/counter support
- * Copyright (C) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
- *
- * COMEDI - Linux Control and Measurement Device Interface
- * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- */
-
-#ifndef _COMEDI_8254_H
-#define _COMEDI_8254_H
-
-#include <linux/types.h>
-
-struct comedi_device;
-struct comedi_insn;
-struct comedi_subdevice;
-
-/*
- * Common oscillator base values in nanoseconds
- */
-#define I8254_OSC_BASE_10MHZ 100
-#define I8254_OSC_BASE_5MHZ 200
-#define I8254_OSC_BASE_4MHZ 250
-#define I8254_OSC_BASE_2MHZ 500
-#define I8254_OSC_BASE_1MHZ 1000
-#define I8254_OSC_BASE_100KHZ 10000
-#define I8254_OSC_BASE_10KHZ 100000
-#define I8254_OSC_BASE_1KHZ 1000000
-
-/*
- * I/O access size used to read/write registers
- */
-#define I8254_IO8 1
-#define I8254_IO16 2
-#define I8254_IO32 4
-
-/*
- * Register map for generic 8254 timer (I8254_IO8 with 0 regshift)
- */
-#define I8254_COUNTER0_REG 0x00
-#define I8254_COUNTER1_REG 0x01
-#define I8254_COUNTER2_REG 0x02
-#define I8254_CTRL_REG 0x03
-#define I8254_CTRL_SEL_CTR(x) ((x) << 6)
-#define I8254_CTRL_READBACK(x) (I8254_CTRL_SEL_CTR(3) | BIT(x))
-#define I8254_CTRL_READBACK_COUNT I8254_CTRL_READBACK(4)
-#define I8254_CTRL_READBACK_STATUS I8254_CTRL_READBACK(5)
-#define I8254_CTRL_READBACK_SEL_CTR(x) (2 << (x))
-#define I8254_CTRL_RW(x) (((x) & 0x3) << 4)
-#define I8254_CTRL_LATCH I8254_CTRL_RW(0)
-#define I8254_CTRL_LSB_ONLY I8254_CTRL_RW(1)
-#define I8254_CTRL_MSB_ONLY I8254_CTRL_RW(2)
-#define I8254_CTRL_LSB_MSB I8254_CTRL_RW(3)
-
-/* counter maps zero to 0x10000 */
-#define I8254_MAX_COUNT 0x10000
-
-/**
- * struct comedi_8254 - private data used by this module
- * @iobase: PIO base address of the registers (in/out)
- * @mmio: MMIO base address of the registers (read/write)
- * @iosize: I/O size used to access the registers (b/w/l)
- * @regshift: register gap shift
- * @osc_base: cascaded oscillator speed in ns
- * @divisor: divisor for single counter
- * @divisor1: divisor loaded into first cascaded counter
- * @divisor2: divisor loaded into second cascaded counter
- * #next_div: next divisor for single counter
- * @next_div1: next divisor to use for first cascaded counter
- * @next_div2: next divisor to use for second cascaded counter
- * @clock_src; current clock source for each counter (driver specific)
- * @gate_src; current gate source for each counter (driver specific)
- * @busy: flags used to indicate that a counter is "busy"
- * @insn_config: driver specific (*insn_config) callback
- */
-struct comedi_8254 {
- unsigned long iobase;
- void __iomem *mmio;
- unsigned int iosize;
- unsigned int regshift;
- unsigned int osc_base;
- unsigned int divisor;
- unsigned int divisor1;
- unsigned int divisor2;
- unsigned int next_div;
- unsigned int next_div1;
- unsigned int next_div2;
- unsigned int clock_src[3];
- unsigned int gate_src[3];
- bool busy[3];
-
- int (*insn_config)(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data);
-};
-
-unsigned int comedi_8254_status(struct comedi_8254 *i8254,
- unsigned int counter);
-unsigned int comedi_8254_read(struct comedi_8254 *i8254, unsigned int counter);
-void comedi_8254_write(struct comedi_8254 *i8254,
- unsigned int counter, unsigned int val);
-
-int comedi_8254_set_mode(struct comedi_8254 *i8254,
- unsigned int counter, unsigned int mode);
-int comedi_8254_load(struct comedi_8254 *i8254,
- unsigned int counter, unsigned int val, unsigned int mode);
-
-void comedi_8254_pacer_enable(struct comedi_8254 *i8254,
- unsigned int counter1, unsigned int counter2,
- bool enable);
-void comedi_8254_update_divisors(struct comedi_8254 *i8254);
-void comedi_8254_cascade_ns_to_timer(struct comedi_8254 *i8254,
- unsigned int *nanosec, unsigned int flags);
-void comedi_8254_ns_to_timer(struct comedi_8254 *i8254,
- unsigned int *nanosec, unsigned int flags);
-
-void comedi_8254_set_busy(struct comedi_8254 *i8254,
- unsigned int counter, bool busy);
-
-void comedi_8254_subdevice_init(struct comedi_subdevice *s,
- struct comedi_8254 *i8254);
-
-struct comedi_8254 *comedi_8254_init(unsigned long iobase,
- unsigned int osc_base,
- unsigned int iosize,
- unsigned int regshift);
-struct comedi_8254 *comedi_8254_mm_init(void __iomem *mmio,
- unsigned int osc_base,
- unsigned int iosize,
- unsigned int regshift);
-
-#endif /* _COMEDI_8254_H */
diff --git a/drivers/comedi/drivers/comedi_8255.c b/drivers/comedi/drivers/comedi_8255.c
index b7ca465933ee..5562b9cd0a17 100644
--- a/drivers/comedi/drivers/comedi_8255.c
+++ b/drivers/comedi/drivers/comedi_8255.c
@@ -29,9 +29,8 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
-
-#include "8255.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
struct subdev_8255_private {
unsigned long regbase;
diff --git a/drivers/comedi/drivers/comedi_bond.c b/drivers/comedi/drivers/comedi_bond.c
index 4392b5927a99..78c39fa84177 100644
--- a/drivers/comedi/drivers/comedi_bond.c
+++ b/drivers/comedi/drivers/comedi_bond.c
@@ -40,9 +40,9 @@
#include <linux/module.h>
#include <linux/string.h>
#include <linux/slab.h>
-#include "../comedi.h"
-#include "../comedilib.h"
-#include "../comedidev.h"
+#include <linux/comedi.h>
+#include <linux/comedi/comedilib.h>
+#include <linux/comedi/comedidev.h>
struct bonded_device {
struct comedi_device *dev;
diff --git a/drivers/comedi/drivers/comedi_isadma.c b/drivers/comedi/drivers/comedi_isadma.c
index 479b58e209ba..700982464c53 100644
--- a/drivers/comedi/drivers/comedi_isadma.c
+++ b/drivers/comedi/drivers/comedi_isadma.c
@@ -9,10 +9,8 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <asm/dma.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_isadma.h>
/**
* comedi_isadma_program - program and enable an ISA DMA transfer
diff --git a/drivers/comedi/drivers/comedi_isadma.h b/drivers/comedi/drivers/comedi_isadma.h
deleted file mode 100644
index 9d2b12db7e6e..000000000000
--- a/drivers/comedi/drivers/comedi_isadma.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * COMEDI ISA DMA support functions
- * Copyright (c) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
- */
-
-#ifndef _COMEDI_ISADMA_H
-#define _COMEDI_ISADMA_H
-
-#include <linux/types.h>
-
-struct comedi_device;
-struct device;
-
-/*
- * These are used to avoid issues when <asm/dma.h> and the DMA_MODE_
- * defines are not available.
- */
-#define COMEDI_ISADMA_READ 0
-#define COMEDI_ISADMA_WRITE 1
-
-/**
- * struct comedi_isadma_desc - cookie for ISA DMA
- * @virt_addr: virtual address of buffer
- * @hw_addr: hardware (bus) address of buffer
- * @chan: DMA channel
- * @maxsize: allocated size of buffer (in bytes)
- * @size: transfer size (in bytes)
- * @mode: DMA_MODE_READ or DMA_MODE_WRITE
- */
-struct comedi_isadma_desc {
- void *virt_addr;
- dma_addr_t hw_addr;
- unsigned int chan;
- unsigned int maxsize;
- unsigned int size;
- char mode;
-};
-
-/**
- * struct comedi_isadma - ISA DMA data
- * @dev: device to allocate non-coherent memory for
- * @desc: cookie for each DMA buffer
- * @n_desc: the number of cookies
- * @cur_dma: the current cookie in use
- * @chan: the first DMA channel requested
- * @chan2: the second DMA channel requested
- */
-struct comedi_isadma {
- struct device *dev;
- struct comedi_isadma_desc *desc;
- int n_desc;
- int cur_dma;
- unsigned int chan;
- unsigned int chan2;
-};
-
-#if IS_ENABLED(CONFIG_ISA_DMA_API)
-
-void comedi_isadma_program(struct comedi_isadma_desc *desc);
-unsigned int comedi_isadma_disable(unsigned int dma_chan);
-unsigned int comedi_isadma_disable_on_sample(unsigned int dma_chan,
- unsigned int size);
-unsigned int comedi_isadma_poll(struct comedi_isadma *dma);
-void comedi_isadma_set_mode(struct comedi_isadma_desc *desc, char dma_dir);
-
-struct comedi_isadma *comedi_isadma_alloc(struct comedi_device *dev,
- int n_desc, unsigned int dma_chan1,
- unsigned int dma_chan2,
- unsigned int maxsize, char dma_dir);
-void comedi_isadma_free(struct comedi_isadma *dma);
-
-#else /* !IS_ENABLED(CONFIG_ISA_DMA_API) */
-
-static inline void comedi_isadma_program(struct comedi_isadma_desc *desc)
-{
-}
-
-static inline unsigned int comedi_isadma_disable(unsigned int dma_chan)
-{
- return 0;
-}
-
-static inline unsigned int
-comedi_isadma_disable_on_sample(unsigned int dma_chan, unsigned int size)
-{
- return 0;
-}
-
-static inline unsigned int comedi_isadma_poll(struct comedi_isadma *dma)
-{
- return 0;
-}
-
-static inline void comedi_isadma_set_mode(struct comedi_isadma_desc *desc,
- char dma_dir)
-{
-}
-
-static inline struct comedi_isadma *
-comedi_isadma_alloc(struct comedi_device *dev, int n_desc,
- unsigned int dma_chan1, unsigned int dma_chan2,
- unsigned int maxsize, char dma_dir)
-{
- return NULL;
-}
-
-static inline void comedi_isadma_free(struct comedi_isadma *dma)
-{
-}
-
-#endif /* !IS_ENABLED(CONFIG_ISA_DMA_API) */
-
-#endif /* #ifndef _COMEDI_ISADMA_H */
diff --git a/drivers/comedi/drivers/comedi_parport.c b/drivers/comedi/drivers/comedi_parport.c
index 5338b5eea440..098738a688fe 100644
--- a/drivers/comedi/drivers/comedi_parport.c
+++ b/drivers/comedi/drivers/comedi_parport.c
@@ -57,8 +57,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* Register map
diff --git a/drivers/comedi/drivers/comedi_test.c b/drivers/comedi/drivers/comedi_test.c
index cbc225eb1991..0b5c0af1cebf 100644
--- a/drivers/comedi/drivers/comedi_test.c
+++ b/drivers/comedi/drivers/comedi_test.c
@@ -45,10 +45,8 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
-
+#include <linux/comedi/comedidev.h>
#include <asm/div64.h>
-
#include <linux/timer.h>
#include <linux/ktime.h>
#include <linux/jiffies.h>
diff --git a/drivers/comedi/drivers/contec_pci_dio.c b/drivers/comedi/drivers/contec_pci_dio.c
index b8fdd9c1f166..41d42ff14144 100644
--- a/drivers/comedi/drivers/contec_pci_dio.c
+++ b/drivers/comedi/drivers/contec_pci_dio.c
@@ -18,8 +18,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/*
* Register map
diff --git a/drivers/comedi/drivers/dac02.c b/drivers/comedi/drivers/dac02.c
index 5ef8114c2c85..4b011d66d7b0 100644
--- a/drivers/comedi/drivers/dac02.c
+++ b/drivers/comedi/drivers/dac02.c
@@ -25,8 +25,7 @@
*/
#include <linux/module.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* The output range is selected by jumpering pins on the I/O connector.
diff --git a/drivers/comedi/drivers/daqboard2000.c b/drivers/comedi/drivers/daqboard2000.c
index f64e747078bd..c0a4e1b06fb3 100644
--- a/drivers/comedi/drivers/daqboard2000.c
+++ b/drivers/comedi/drivers/daqboard2000.c
@@ -96,10 +96,9 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8255.h>
-#include "../comedi_pci.h"
-
-#include "8255.h"
#include "plx9080.h"
#define DB2K_FIRMWARE "daqboard2000_firmware.bin"
diff --git a/drivers/comedi/drivers/das08.c b/drivers/comedi/drivers/das08.c
index b50743c5b822..f8ab3af2e391 100644
--- a/drivers/comedi/drivers/das08.c
+++ b/drivers/comedi/drivers/das08.c
@@ -10,11 +10,10 @@
*/
#include <linux/module.h>
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
+#include <linux/comedi/comedi_8254.h>
-#include "../comedidev.h"
-
-#include "8255.h"
-#include "comedi_8254.h"
#include "das08.h"
/*
diff --git a/drivers/comedi/drivers/das08_cs.c b/drivers/comedi/drivers/das08_cs.c
index 223479f9ea3c..6075efcf10d6 100644
--- a/drivers/comedi/drivers/das08_cs.c
+++ b/drivers/comedi/drivers/das08_cs.c
@@ -30,8 +30,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pcmcia.h"
+#include <linux/comedi/comedi_pcmcia.h>
#include "das08.h"
diff --git a/drivers/comedi/drivers/das08_isa.c b/drivers/comedi/drivers/das08_isa.c
index 8c4cfa821423..3d43b77cc9f4 100644
--- a/drivers/comedi/drivers/das08_isa.c
+++ b/drivers/comedi/drivers/das08_isa.c
@@ -29,7 +29,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
#include "das08.h"
diff --git a/drivers/comedi/drivers/das08_pci.c b/drivers/comedi/drivers/das08_pci.c
index 1cd903336a4c..982f3ab0ccbd 100644
--- a/drivers/comedi/drivers/das08_pci.c
+++ b/drivers/comedi/drivers/das08_pci.c
@@ -23,8 +23,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "das08.h"
diff --git a/drivers/comedi/drivers/das16.c b/drivers/comedi/drivers/das16.c
index 4ac2622b0fac..937a69ce0977 100644
--- a/drivers/comedi/drivers/das16.c
+++ b/drivers/comedi/drivers/das16.c
@@ -63,12 +63,10 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
-#include "comedi_8254.h"
-#include "8255.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
+#include <linux/comedi/comedi_8254.h>
+#include <linux/comedi/comedi_isadma.h>
#define DAS16_DMA_SIZE 0xff00 /* size in bytes of allocated dma buffer */
diff --git a/drivers/comedi/drivers/das16m1.c b/drivers/comedi/drivers/das16m1.c
index 75f3dbbe97ac..275effb77746 100644
--- a/drivers/comedi/drivers/das16m1.c
+++ b/drivers/comedi/drivers/das16m1.c
@@ -42,10 +42,9 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include "../comedidev.h"
-
-#include "8255.h"
-#include "comedi_8254.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
+#include <linux/comedi/comedi_8254.h>
/*
* Register map (dev->iobase)
diff --git a/drivers/comedi/drivers/das1800.c b/drivers/comedi/drivers/das1800.c
index f50891a6ee7d..f09608c0f4ff 100644
--- a/drivers/comedi/drivers/das1800.c
+++ b/drivers/comedi/drivers/das1800.c
@@ -73,11 +73,9 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/io.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
-#include "comedi_8254.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8254.h>
+#include <linux/comedi/comedi_isadma.h>
/* misc. defines */
#define DAS1800_SIZE 16 /* uses 16 io addresses */
diff --git a/drivers/comedi/drivers/das6402.c b/drivers/comedi/drivers/das6402.c
index 96f4107b8054..1af394591e74 100644
--- a/drivers/comedi/drivers/das6402.c
+++ b/drivers/comedi/drivers/das6402.c
@@ -24,10 +24,8 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedidev.h"
-
-#include "comedi_8254.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8254.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/das800.c b/drivers/comedi/drivers/das800.c
index bc08324f422f..4ca33f46eaa7 100644
--- a/drivers/comedi/drivers/das800.c
+++ b/drivers/comedi/drivers/das800.c
@@ -46,10 +46,8 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-
-#include "../comedidev.h"
-
-#include "comedi_8254.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8254.h>
#define N_CHAN_AI 8 /* number of analog input channels */
diff --git a/drivers/comedi/drivers/dmm32at.c b/drivers/comedi/drivers/dmm32at.c
index 56682f01242f..fe023c722aa3 100644
--- a/drivers/comedi/drivers/dmm32at.c
+++ b/drivers/comedi/drivers/dmm32at.c
@@ -29,9 +29,8 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include "../comedidev.h"
-
-#include "8255.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
/* Board register addresses */
#define DMM32AT_AI_START_CONV_REG 0x00
diff --git a/drivers/comedi/drivers/dt2801.c b/drivers/comedi/drivers/dt2801.c
index 0d571d817b4e..230d25010f58 100644
--- a/drivers/comedi/drivers/dt2801.c
+++ b/drivers/comedi/drivers/dt2801.c
@@ -31,7 +31,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
#include <linux/delay.h>
#define DT2801_TIMEOUT 1000
diff --git a/drivers/comedi/drivers/dt2811.c b/drivers/comedi/drivers/dt2811.c
index 0eb5e6ba6916..dbb9f38da289 100644
--- a/drivers/comedi/drivers/dt2811.c
+++ b/drivers/comedi/drivers/dt2811.c
@@ -40,8 +40,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/dt2814.c b/drivers/comedi/drivers/dt2814.c
index ed44ce0d151b..c98a5a4a7aec 100644
--- a/drivers/comedi/drivers/dt2814.c
+++ b/drivers/comedi/drivers/dt2814.c
@@ -27,8 +27,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-#include "../comedidev.h"
-
+#include <linux/comedi/comedidev.h>
#include <linux/delay.h>
#define DT2814_CSR 0
diff --git a/drivers/comedi/drivers/dt2815.c b/drivers/comedi/drivers/dt2815.c
index 5906f32aa01f..03ba2fd18a21 100644
--- a/drivers/comedi/drivers/dt2815.c
+++ b/drivers/comedi/drivers/dt2815.c
@@ -43,8 +43,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
-
+#include <linux/comedi/comedidev.h>
#include <linux/delay.h>
#define DT2815_DATA 0
diff --git a/drivers/comedi/drivers/dt2817.c b/drivers/comedi/drivers/dt2817.c
index 7c1463e835d3..6738045c7531 100644
--- a/drivers/comedi/drivers/dt2817.c
+++ b/drivers/comedi/drivers/dt2817.c
@@ -25,7 +25,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
#define DT2817_CR 0
#define DT2817_DATA 1
diff --git a/drivers/comedi/drivers/dt282x.c b/drivers/comedi/drivers/dt282x.c
index 2656b4b0e3d0..4ae80e6c7266 100644
--- a/drivers/comedi/drivers/dt282x.c
+++ b/drivers/comedi/drivers/dt282x.c
@@ -51,10 +51,8 @@
#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_isadma.h>
/*
* Register map
diff --git a/drivers/comedi/drivers/dt3000.c b/drivers/comedi/drivers/dt3000.c
index ec27aa4730d4..fc6e9c30e522 100644
--- a/drivers/comedi/drivers/dt3000.c
+++ b/drivers/comedi/drivers/dt3000.c
@@ -43,8 +43,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/*
* PCI BAR0 - dual-ported RAM location definitions (dev->mmio)
diff --git a/drivers/comedi/drivers/dt9812.c b/drivers/comedi/drivers/dt9812.c
index 704b04d2980d..b37b9d8eca0d 100644
--- a/drivers/comedi/drivers/dt9812.c
+++ b/drivers/comedi/drivers/dt9812.c
@@ -34,8 +34,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
-
-#include "../comedi_usb.h"
+#include <linux/comedi/comedi_usb.h>
#define DT9812_DIAGS_BOARD_INFO_ADDR 0xFBFF
#define DT9812_MAX_WRITE_CMD_PIPE_SIZE 32
diff --git a/drivers/comedi/drivers/dyna_pci10xx.c b/drivers/comedi/drivers/dyna_pci10xx.c
index c224422bb126..407a038fb3e0 100644
--- a/drivers/comedi/drivers/dyna_pci10xx.c
+++ b/drivers/comedi/drivers/dyna_pci10xx.c
@@ -26,8 +26,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mutex.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#define READ_TIMEOUT 50
diff --git a/drivers/comedi/drivers/fl512.c b/drivers/comedi/drivers/fl512.c
index b715f30659fa..139e801fc358 100644
--- a/drivers/comedi/drivers/fl512.c
+++ b/drivers/comedi/drivers/fl512.c
@@ -21,8 +21,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
-
+#include <linux/comedi/comedidev.h>
#include <linux/delay.h>
/*
diff --git a/drivers/comedi/drivers/gsc_hpdi.c b/drivers/comedi/drivers/gsc_hpdi.c
index e35e4a743714..c09d135df38d 100644
--- a/drivers/comedi/drivers/gsc_hpdi.c
+++ b/drivers/comedi/drivers/gsc_hpdi.c
@@ -34,8 +34,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "plx9080.h"
diff --git a/drivers/comedi/drivers/icp_multi.c b/drivers/comedi/drivers/icp_multi.c
index 16d2b78de83c..ac4b11dbd741 100644
--- a/drivers/comedi/drivers/icp_multi.c
+++ b/drivers/comedi/drivers/icp_multi.c
@@ -36,8 +36,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#define ICP_MULTI_ADC_CSR 0x00 /* R/W: ADC command/status register */
#define ICP_MULTI_ADC_CSR_ST BIT(0) /* Start ADC */
diff --git a/drivers/comedi/drivers/ii_pci20kc.c b/drivers/comedi/drivers/ii_pci20kc.c
index 399255dbe388..4a19bf8462be 100644
--- a/drivers/comedi/drivers/ii_pci20kc.c
+++ b/drivers/comedi/drivers/ii_pci20kc.c
@@ -30,7 +30,7 @@
#include <linux/module.h>
#include <linux/io.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/jr3_pci.c b/drivers/comedi/drivers/jr3_pci.c
index f963080dd61f..951c23fa0369 100644
--- a/drivers/comedi/drivers/jr3_pci.c
+++ b/drivers/comedi/drivers/jr3_pci.c
@@ -35,8 +35,7 @@
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/timer.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "jr3_pci.h"
diff --git a/drivers/comedi/drivers/ke_counter.c b/drivers/comedi/drivers/ke_counter.c
index bef1b20c1c8d..b825cf60e1e0 100644
--- a/drivers/comedi/drivers/ke_counter.c
+++ b/drivers/comedi/drivers/ke_counter.c
@@ -19,8 +19,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/*
* PCI BAR 0 Register I/O map
diff --git a/drivers/comedi/drivers/me4000.c b/drivers/comedi/drivers/me4000.c
index 0d3d4cafce2e..9aea02b86ed9 100644
--- a/drivers/comedi/drivers/me4000.c
+++ b/drivers/comedi/drivers/me4000.c
@@ -32,10 +32,9 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8254.h>
-#include "../comedi_pci.h"
-
-#include "comedi_8254.h"
#include "plx9052.h"
#define ME4000_FIRMWARE "me4000_firmware.bin"
diff --git a/drivers/comedi/drivers/me_daq.c b/drivers/comedi/drivers/me_daq.c
index ef18e387471b..076b15097afd 100644
--- a/drivers/comedi/drivers/me_daq.c
+++ b/drivers/comedi/drivers/me_daq.c
@@ -23,8 +23,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "plx9052.h"
diff --git a/drivers/comedi/drivers/mf6x4.c b/drivers/comedi/drivers/mf6x4.c
index 9da8dd748078..14f1d5e9cd59 100644
--- a/drivers/comedi/drivers/mf6x4.c
+++ b/drivers/comedi/drivers/mf6x4.c
@@ -18,8 +18,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/* Registers present in BAR0 memory region */
#define MF624_GPIOC_REG 0x54
diff --git a/drivers/comedi/drivers/mite.c b/drivers/comedi/drivers/mite.c
index 70960e3ba878..88f3cd6f54f1 100644
--- a/drivers/comedi/drivers/mite.c
+++ b/drivers/comedi/drivers/mite.c
@@ -38,8 +38,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/log2.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "mite.h"
diff --git a/drivers/comedi/drivers/mpc624.c b/drivers/comedi/drivers/mpc624.c
index 646f4c086204..9e51ff528ed1 100644
--- a/drivers/comedi/drivers/mpc624.c
+++ b/drivers/comedi/drivers/mpc624.c
@@ -44,8 +44,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
-
+#include <linux/comedi/comedidev.h>
#include <linux/delay.h>
/* Offsets of different ports */
diff --git a/drivers/comedi/drivers/multiq3.c b/drivers/comedi/drivers/multiq3.c
index c1897aee9a9a..07ff5383da99 100644
--- a/drivers/comedi/drivers/multiq3.c
+++ b/drivers/comedi/drivers/multiq3.c
@@ -26,8 +26,7 @@
*/
#include <linux/module.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* Register map
diff --git a/drivers/comedi/drivers/ni_6527.c b/drivers/comedi/drivers/ni_6527.c
index f1a45cf7342a..ac5820085231 100644
--- a/drivers/comedi/drivers/ni_6527.c
+++ b/drivers/comedi/drivers/ni_6527.c
@@ -20,8 +20,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/*
* PCI BAR1 - Register memory map
diff --git a/drivers/comedi/drivers/ni_65xx.c b/drivers/comedi/drivers/ni_65xx.c
index 7cd8497420f2..58334de3b253 100644
--- a/drivers/comedi/drivers/ni_65xx.c
+++ b/drivers/comedi/drivers/ni_65xx.c
@@ -49,8 +49,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/*
* PCI BAR1 Register Map
diff --git a/drivers/comedi/drivers/ni_660x.c b/drivers/comedi/drivers/ni_660x.c
index e60d0125bcb2..0679bc39e0bc 100644
--- a/drivers/comedi/drivers/ni_660x.c
+++ b/drivers/comedi/drivers/ni_660x.c
@@ -26,8 +26,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "mite.h"
#include "ni_tio.h"
diff --git a/drivers/comedi/drivers/ni_670x.c b/drivers/comedi/drivers/ni_670x.c
index c197e47486be..c875d251c230 100644
--- a/drivers/comedi/drivers/ni_670x.c
+++ b/drivers/comedi/drivers/ni_670x.c
@@ -24,8 +24,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#define AO_VALUE_OFFSET 0x00
#define AO_CHAN_OFFSET 0x0c
diff --git a/drivers/comedi/drivers/ni_at_a2150.c b/drivers/comedi/drivers/ni_at_a2150.c
index 10ad7b88713e..df8d219e6723 100644
--- a/drivers/comedi/drivers/ni_at_a2150.c
+++ b/drivers/comedi/drivers/ni_at_a2150.c
@@ -39,11 +39,9 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/io.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
-#include "comedi_8254.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8254.h>
+#include <linux/comedi/comedi_isadma.h>
#define A2150_DMA_BUFFER_SIZE 0xff00 /* size in bytes of dma buffer */
diff --git a/drivers/comedi/drivers/ni_at_ao.c b/drivers/comedi/drivers/ni_at_ao.c
index 2a0fb4d460db..9f3147b72aa8 100644
--- a/drivers/comedi/drivers/ni_at_ao.c
+++ b/drivers/comedi/drivers/ni_at_ao.c
@@ -25,10 +25,8 @@
*/
#include <linux/module.h>
-
-#include "../comedidev.h"
-
-#include "comedi_8254.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8254.h>
/*
* Register map
diff --git a/drivers/comedi/drivers/ni_atmio.c b/drivers/comedi/drivers/ni_atmio.c
index 56c78da475e7..8876a1d24c56 100644
--- a/drivers/comedi/drivers/ni_atmio.c
+++ b/drivers/comedi/drivers/ni_atmio.c
@@ -73,12 +73,11 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-#include "../comedidev.h"
-
+#include <linux/comedi/comedidev.h>
#include <linux/isapnp.h>
+#include <linux/comedi/comedi_8255.h>
#include "ni_stc.h"
-#include "8255.h"
/* AT specific setup */
static const struct ni_board_struct ni_boards[] = {
diff --git a/drivers/comedi/drivers/ni_atmio16d.c b/drivers/comedi/drivers/ni_atmio16d.c
index dffce1aa3e69..9fa902529a8e 100644
--- a/drivers/comedi/drivers/ni_atmio16d.c
+++ b/drivers/comedi/drivers/ni_atmio16d.c
@@ -39,9 +39,8 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-#include "../comedidev.h"
-
-#include "8255.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
/* Configuration and Status Registers */
#define COM_REG_1 0x00 /* wo 16 */
diff --git a/drivers/comedi/drivers/ni_daq_700.c b/drivers/comedi/drivers/ni_daq_700.c
index d40fc89f9cef..0ef20e9a8bc4 100644
--- a/drivers/comedi/drivers/ni_daq_700.c
+++ b/drivers/comedi/drivers/ni_daq_700.c
@@ -41,8 +41,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pcmcia.h"
+#include <linux/comedi/comedi_pcmcia.h>
/* daqcard700 registers */
#define DIO_W 0x04 /* WO 8bit */
diff --git a/drivers/comedi/drivers/ni_daq_dio24.c b/drivers/comedi/drivers/ni_daq_dio24.c
index 44fb65afc218..487733111023 100644
--- a/drivers/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/comedi/drivers/ni_daq_dio24.c
@@ -23,9 +23,8 @@
*/
#include <linux/module.h>
-#include "../comedi_pcmcia.h"
-
-#include "8255.h"
+#include <linux/comedi/comedi_pcmcia.h>
+#include <linux/comedi/comedi_8255.h>
static int dio24_auto_attach(struct comedi_device *dev,
unsigned long context)
diff --git a/drivers/comedi/drivers/ni_labpc.c b/drivers/comedi/drivers/ni_labpc.c
index 1f4a07bd1d26..b25a8e117072 100644
--- a/drivers/comedi/drivers/ni_labpc.c
+++ b/drivers/comedi/drivers/ni_labpc.c
@@ -48,8 +48,7 @@
*/
#include <linux/module.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
#include "ni_labpc.h"
#include "ni_labpc_isadma.h"
diff --git a/drivers/comedi/drivers/ni_labpc_common.c b/drivers/comedi/drivers/ni_labpc_common.c
index dd97946eacaf..763249653228 100644
--- a/drivers/comedi/drivers/ni_labpc_common.c
+++ b/drivers/comedi/drivers/ni_labpc_common.c
@@ -12,11 +12,10 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
+#include <linux/comedi/comedi_8254.h>
-#include "../comedidev.h"
-
-#include "comedi_8254.h"
-#include "8255.h"
#include "ni_labpc.h"
#include "ni_labpc_regs.h"
#include "ni_labpc_isadma.h"
diff --git a/drivers/comedi/drivers/ni_labpc_cs.c b/drivers/comedi/drivers/ni_labpc_cs.c
index 4f7e2fe21254..62fecb50ec6e 100644
--- a/drivers/comedi/drivers/ni_labpc_cs.c
+++ b/drivers/comedi/drivers/ni_labpc_cs.c
@@ -38,8 +38,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pcmcia.h"
+#include <linux/comedi/comedi_pcmcia.h>
#include "ni_labpc.h"
diff --git a/drivers/comedi/drivers/ni_labpc_isadma.c b/drivers/comedi/drivers/ni_labpc_isadma.c
index a551aca6e615..0652ca8345b6 100644
--- a/drivers/comedi/drivers/ni_labpc_isadma.c
+++ b/drivers/comedi/drivers/ni_labpc_isadma.c
@@ -10,10 +10,9 @@
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_isadma.h>
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
#include "ni_labpc.h"
#include "ni_labpc_regs.h"
#include "ni_labpc_isadma.h"
diff --git a/drivers/comedi/drivers/ni_labpc_pci.c b/drivers/comedi/drivers/ni_labpc_pci.c
index ec180b0fedf7..e2a44bbd9fa6 100644
--- a/drivers/comedi/drivers/ni_labpc_pci.c
+++ b/drivers/comedi/drivers/ni_labpc_pci.c
@@ -22,8 +22,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "ni_labpc.h"
diff --git a/drivers/comedi/drivers/ni_mio_common.c b/drivers/comedi/drivers/ni_mio_common.c
index 4f80a4991f95..d39998565808 100644
--- a/drivers/comedi/drivers/ni_mio_common.c
+++ b/drivers/comedi/drivers/ni_mio_common.c
@@ -43,7 +43,7 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/delay.h>
-#include "8255.h"
+#include <linux/comedi/comedi_8255.h>
#include "mite.h"
/* A timeout count */
diff --git a/drivers/comedi/drivers/ni_mio_cs.c b/drivers/comedi/drivers/ni_mio_cs.c
index 4f37b4e58f09..796f0b743772 100644
--- a/drivers/comedi/drivers/ni_mio_cs.c
+++ b/drivers/comedi/drivers/ni_mio_cs.c
@@ -28,10 +28,10 @@
#include <linux/module.h>
#include <linux/delay.h>
+#include <linux/comedi/comedi_pcmcia.h>
+#include <linux/comedi/comedi_8255.h>
-#include "../comedi_pcmcia.h"
#include "ni_stc.h"
-#include "8255.h"
/*
* AT specific setup
diff --git a/drivers/comedi/drivers/ni_pcidio.c b/drivers/comedi/drivers/ni_pcidio.c
index 623f8d08d13a..2d58e83420e8 100644
--- a/drivers/comedi/drivers/ni_pcidio.c
+++ b/drivers/comedi/drivers/ni_pcidio.c
@@ -42,8 +42,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "mite.h"
diff --git a/drivers/comedi/drivers/ni_pcimio.c b/drivers/comedi/drivers/ni_pcimio.c
index 6c813a490ba5..0b055321023d 100644
--- a/drivers/comedi/drivers/ni_pcimio.c
+++ b/drivers/comedi/drivers/ni_pcimio.c
@@ -94,9 +94,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-
-#include "../comedi_pci.h"
-
+#include <linux/comedi/comedi_pci.h>
#include <asm/byteorder.h>
#include "ni_stc.h"
diff --git a/drivers/comedi/drivers/ni_routes.c b/drivers/comedi/drivers/ni_routes.c
index f0f8cd424b30..f24eeb464eba 100644
--- a/drivers/comedi/drivers/ni_routes.c
+++ b/drivers/comedi/drivers/ni_routes.c
@@ -21,8 +21,7 @@
#include <linux/slab.h>
#include <linux/bsearch.h>
#include <linux/sort.h>
-
-#include "../comedi.h"
+#include <linux/comedi.h>
#include "ni_routes.h"
#include "ni_routing/ni_route_values.h"
diff --git a/drivers/comedi/drivers/ni_routes.h b/drivers/comedi/drivers/ni_routes.h
index 036982315584..cff8a463a03f 100644
--- a/drivers/comedi/drivers/ni_routes.h
+++ b/drivers/comedi/drivers/ni_routes.h
@@ -27,7 +27,7 @@
#include <linux/bitops.h>
#endif
-#include "../comedi.h"
+#include <linux/comedi.h>
/**
* struct ni_route_set - Set of destinations with a common source.
diff --git a/drivers/comedi/drivers/ni_routing/ni_route_values.h b/drivers/comedi/drivers/ni_routing/ni_route_values.h
index 6e358efa6f7f..80880083ea41 100644
--- a/drivers/comedi/drivers/ni_routing/ni_route_values.h
+++ b/drivers/comedi/drivers/ni_routing/ni_route_values.h
@@ -20,7 +20,7 @@
#ifndef _COMEDI_DRIVERS_NI_ROUTINT_NI_ROUTE_VALUES_H
#define _COMEDI_DRIVERS_NI_ROUTINT_NI_ROUTE_VALUES_H
-#include "../../comedi.h"
+#include <linux/comedi.h>
#include <linux/types.h>
/*
diff --git a/drivers/comedi/drivers/ni_routing/tools/.gitignore b/drivers/comedi/drivers/ni_routing/tools/.gitignore
index e3ebffcd900e..c12f825db266 100644
--- a/drivers/comedi/drivers/ni_routing/tools/.gitignore
+++ b/drivers/comedi/drivers/ni_routing/tools/.gitignore
@@ -5,4 +5,5 @@ ni_values.py
convert_c_to_py
c/
csv/
+linux/
all_cfiles.c
diff --git a/drivers/comedi/drivers/ni_routing/tools/Makefile b/drivers/comedi/drivers/ni_routing/tools/Makefile
index 6e92a06a44cb..31212101b3bc 100644
--- a/drivers/comedi/drivers/ni_routing/tools/Makefile
+++ b/drivers/comedi/drivers/ni_routing/tools/Makefile
@@ -3,7 +3,7 @@
# ni_route_values.h
# ni_device_routes.h
# in order to do this, we are also generating a python representation (using
-# ctypesgen) of ../../comedi.h.
+# ctypesgen) of ../../../../../include/uapi/linux/comedi.h.
# This allows us to sort NI signal/terminal names numerically to use a binary
# search through the device_routes tables to find valid routes.
@@ -30,13 +30,21 @@ ALL:
everything : csv-files c-files csv-blank
-CPPFLAGS=-D"BIT(x)=(1UL<<(x))" -D__user=
+CPPFLAGS = -D__user=
+INC_UAPI = ../../../../../include/uapi
-comedi_h.py : ../../../comedi.h
+comedi_h.py: $(INC_UAPI)/linux/comedi.h
ctypesgen $< --include "sys/ioctl.h" --cpp 'gcc -E $(CPPFLAGS)' -o $@
-convert_c_to_py: all_cfiles.c
- gcc -g convert_c_to_py.c -o convert_c_to_py -std=c99
+convert_c_to_py: all_cfiles.c linux/comedi.h
+ gcc -g -I. convert_c_to_py.c -o convert_c_to_py -std=c99
+
+# Create a local 'linux/comedi.h' for use when compiling 'convert_c_to_py.c'
+# with the '-I.' option. (Cannot specify '-I../../../../../include/uapi'
+# because that interferes with inclusion of other system headers.)
+linux/comedi.h: $(INC_UAPI)/linux/comedi.h
+ mkdir -p linux
+ ln -snf ../$< $@
ni_values.py: convert_c_to_py
./convert_c_to_py
@@ -44,7 +52,7 @@ ni_values.py: convert_c_to_py
csv-files : ni_values.py comedi_h.py
./convert_py_to_csv.py
-csv-blank :
+csv-blank : comedi_h.py
./make_blank_csv.py
@echo New blank csv signal table in csv/blank_route_table.csv
@@ -62,17 +70,16 @@ clean-partial :
$(RM) -rf comedi_h.py ni_values.py convert_c_to_py all_cfiles.c *.pyc \
__pycache__/
-clean : partial_clean
- $(RM) -rf c/ csv/
+clean : clean-partial
+ $(RM) -rf c/ csv/ linux/
# Note: One could also use ctypeslib in order to generate these files. The
# caveat is that ctypeslib does not do a great job at handling macro functions.
# The make rules are as follows:
-# comedi.h.xml : ../../comedi.h
+# comedi.h.xml : $(INC_UAPI)/linux/comedi.h
# # note that we have to use PWD here to avoid h2xml finding a system
# # installed version of the comedilib/comedi.h file
-# h2xml ${PWD}/../../comedi.h -c -D__user="" -D"BIT(x)=(1<<(x))" \
-# -o comedi.h.xml
+# h2xml ${PWD}/$(INC_UAPI)/linux/comedi.h -c D__user="" -o comedi.h.xml
#
# comedi_h.py : comedi.h.xml
# xml2py ./comedi.h.xml -o comedi_h.py
diff --git a/drivers/comedi/drivers/ni_tio.h b/drivers/comedi/drivers/ni_tio.h
index e7b05718df9b..9ae2221c3c18 100644
--- a/drivers/comedi/drivers/ni_tio.h
+++ b/drivers/comedi/drivers/ni_tio.h
@@ -8,7 +8,7 @@
#ifndef _COMEDI_NI_TIO_H
#define _COMEDI_NI_TIO_H
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
enum ni_gpct_register {
NITIO_G0_AUTO_INC,
diff --git a/drivers/comedi/drivers/ni_usb6501.c b/drivers/comedi/drivers/ni_usb6501.c
index c42987b74b1d..0dd9edf7bced 100644
--- a/drivers/comedi/drivers/ni_usb6501.c
+++ b/drivers/comedi/drivers/ni_usb6501.c
@@ -87,8 +87,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-
-#include "../comedi_usb.h"
+#include <linux/comedi/comedi_usb.h>
#define NI6501_TIMEOUT 1000
diff --git a/drivers/comedi/drivers/pcl711.c b/drivers/comedi/drivers/pcl711.c
index bd6f42fe9e3c..05172c553c8a 100644
--- a/drivers/comedi/drivers/pcl711.c
+++ b/drivers/comedi/drivers/pcl711.c
@@ -29,10 +29,8 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-
-#include "../comedidev.h"
-
-#include "comedi_8254.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8254.h>
/*
* I/O port register map
diff --git a/drivers/comedi/drivers/pcl724.c b/drivers/comedi/drivers/pcl724.c
index 1a5799278a7a..948a0576c9ef 100644
--- a/drivers/comedi/drivers/pcl724.c
+++ b/drivers/comedi/drivers/pcl724.c
@@ -25,9 +25,8 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
-
-#include "8255.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
struct pcl724_board {
const char *name;
diff --git a/drivers/comedi/drivers/pcl726.c b/drivers/comedi/drivers/pcl726.c
index 88f25d7e76f7..0430630e6ebb 100644
--- a/drivers/comedi/drivers/pcl726.c
+++ b/drivers/comedi/drivers/pcl726.c
@@ -50,8 +50,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
#define PCL726_AO_MSB_REG(x) (0x00 + ((x) * 2))
#define PCL726_AO_LSB_REG(x) (0x01 + ((x) * 2))
diff --git a/drivers/comedi/drivers/pcl730.c b/drivers/comedi/drivers/pcl730.c
index 32a29129e6e8..d2733cd5383d 100644
--- a/drivers/comedi/drivers/pcl730.c
+++ b/drivers/comedi/drivers/pcl730.c
@@ -25,7 +25,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* Register map
diff --git a/drivers/comedi/drivers/pcl812.c b/drivers/comedi/drivers/pcl812.c
index b87ab3840eee..70dbc129fcf5 100644
--- a/drivers/comedi/drivers/pcl812.c
+++ b/drivers/comedi/drivers/pcl812.c
@@ -114,11 +114,9 @@
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/io.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
-#include "comedi_8254.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8254.h>
+#include <linux/comedi/comedi_isadma.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/pcl816.c b/drivers/comedi/drivers/pcl816.c
index c368a337a0ae..a5e5320be648 100644
--- a/drivers/comedi/drivers/pcl816.c
+++ b/drivers/comedi/drivers/pcl816.c
@@ -35,11 +35,9 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/interrupt.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
-#include "comedi_8254.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8254.h>
+#include <linux/comedi/comedi_isadma.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/pcl818.c b/drivers/comedi/drivers/pcl818.c
index f4b4a686c710..29e503de8267 100644
--- a/drivers/comedi/drivers/pcl818.c
+++ b/drivers/comedi/drivers/pcl818.c
@@ -97,11 +97,9 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/interrupt.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
-#include "comedi_8254.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8254.h>
+#include <linux/comedi/comedi_isadma.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/pcm3724.c b/drivers/comedi/drivers/pcm3724.c
index 0cb1ad060402..e4103f9eeced 100644
--- a/drivers/comedi/drivers/pcm3724.c
+++ b/drivers/comedi/drivers/pcm3724.c
@@ -24,9 +24,8 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
-
-#include "8255.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
/*
* Register I/O Map
diff --git a/drivers/comedi/drivers/pcmad.c b/drivers/comedi/drivers/pcmad.c
index eec89a0afb2f..976eda43881b 100644
--- a/drivers/comedi/drivers/pcmad.c
+++ b/drivers/comedi/drivers/pcmad.c
@@ -29,7 +29,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
#define PCMAD_STATUS 0
#define PCMAD_LSB 1
diff --git a/drivers/comedi/drivers/pcmda12.c b/drivers/comedi/drivers/pcmda12.c
index 14ab1f0d1e9f..611f13bedca0 100644
--- a/drivers/comedi/drivers/pcmda12.c
+++ b/drivers/comedi/drivers/pcmda12.c
@@ -40,7 +40,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/* AI range is not configurable, it's set by jumpers on the board */
static const struct comedi_lrange pcmda12_ranges = {
diff --git a/drivers/comedi/drivers/pcmmio.c b/drivers/comedi/drivers/pcmmio.c
index 24a9568d3378..c2402239d551 100644
--- a/drivers/comedi/drivers/pcmmio.c
+++ b/drivers/comedi/drivers/pcmmio.c
@@ -66,8 +66,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/pcmuio.c b/drivers/comedi/drivers/pcmuio.c
index b299d648a0eb..33b24dbbb919 100644
--- a/drivers/comedi/drivers/pcmuio.c
+++ b/drivers/comedi/drivers/pcmuio.c
@@ -65,8 +65,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/quatech_daqp_cs.c b/drivers/comedi/drivers/quatech_daqp_cs.c
index fe4408ebf6b3..2a76c75c513b 100644
--- a/drivers/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/comedi/drivers/quatech_daqp_cs.c
@@ -41,8 +41,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pcmcia.h"
+#include <linux/comedi/comedi_pcmcia.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/rtd520.c b/drivers/comedi/drivers/rtd520.c
index 2d99a648b054..7e0ec1a2a2ca 100644
--- a/drivers/comedi/drivers/rtd520.c
+++ b/drivers/comedi/drivers/rtd520.c
@@ -85,10 +85,9 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8254.h>
-#include "../comedi_pci.h"
-
-#include "comedi_8254.h"
#include "plx9080.h"
/*
diff --git a/drivers/comedi/drivers/rti800.c b/drivers/comedi/drivers/rti800.c
index 327fd93b8b12..1b02e47bdb4c 100644
--- a/drivers/comedi/drivers/rti800.c
+++ b/drivers/comedi/drivers/rti800.c
@@ -42,7 +42,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* Register map
diff --git a/drivers/comedi/drivers/rti802.c b/drivers/comedi/drivers/rti802.c
index 195e2b1ac4c1..d66762a22258 100644
--- a/drivers/comedi/drivers/rti802.c
+++ b/drivers/comedi/drivers/rti802.c
@@ -22,7 +22,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/s526.c b/drivers/comedi/drivers/s526.c
index 085cf5b449e5..9245c679a3c4 100644
--- a/drivers/comedi/drivers/s526.c
+++ b/drivers/comedi/drivers/s526.c
@@ -27,7 +27,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/s626.c b/drivers/comedi/drivers/s626.c
index e7aba937d896..0e5f9a9a7fd3 100644
--- a/drivers/comedi/drivers/s626.c
+++ b/drivers/comedi/drivers/s626.c
@@ -55,8 +55,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/types.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "s626.h"
diff --git a/drivers/comedi/drivers/ssv_dnp.c b/drivers/comedi/drivers/ssv_dnp.c
index 016d315aa584..813bd0853b0b 100644
--- a/drivers/comedi/drivers/ssv_dnp.c
+++ b/drivers/comedi/drivers/ssv_dnp.c
@@ -19,7 +19,7 @@
/* include files ----------------------------------------------------------- */
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/* Some global definitions: the registers of the DNP ----------------------- */
/* */
diff --git a/drivers/comedi/drivers/usbdux.c b/drivers/comedi/drivers/usbdux.c
index 0350f303d557..92d514b3c1c3 100644
--- a/drivers/comedi/drivers/usbdux.c
+++ b/drivers/comedi/drivers/usbdux.c
@@ -73,8 +73,7 @@
#include <linux/input.h>
#include <linux/fcntl.h>
#include <linux/compiler.h>
-
-#include "../comedi_usb.h"
+#include <linux/comedi/comedi_usb.h>
/* constants for firmware upload and download */
#define USBDUX_FIRMWARE "usbdux_firmware.bin"
diff --git a/drivers/comedi/drivers/usbduxfast.c b/drivers/comedi/drivers/usbduxfast.c
index 4af012968cb6..39faae0ecb19 100644
--- a/drivers/comedi/drivers/usbduxfast.c
+++ b/drivers/comedi/drivers/usbduxfast.c
@@ -40,7 +40,7 @@
#include <linux/input.h>
#include <linux/fcntl.h>
#include <linux/compiler.h>
-#include "../comedi_usb.h"
+#include <linux/comedi/comedi_usb.h>
/*
* timeout for the USB-transfer
diff --git a/drivers/comedi/drivers/usbduxsigma.c b/drivers/comedi/drivers/usbduxsigma.c
index 54d7605e909f..2aaeaf44fbe5 100644
--- a/drivers/comedi/drivers/usbduxsigma.c
+++ b/drivers/comedi/drivers/usbduxsigma.c
@@ -40,8 +40,7 @@
#include <linux/fcntl.h>
#include <linux/compiler.h>
#include <asm/unaligned.h>
-
-#include "../comedi_usb.h"
+#include <linux/comedi/comedi_usb.h>
/* timeout for the USB-transfer in ms*/
#define BULK_TIMEOUT 1000
diff --git a/drivers/comedi/drivers/vmk80xx.c b/drivers/comedi/drivers/vmk80xx.c
index 4b00a9ea611a..46023adc5395 100644
--- a/drivers/comedi/drivers/vmk80xx.c
+++ b/drivers/comedi/drivers/vmk80xx.c
@@ -35,8 +35,7 @@
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/uaccess.h>
-
-#include "../comedi_usb.h"
+#include <linux/comedi/comedi_usb.h>
enum {
DEVICE_VMK8055,
diff --git a/drivers/comedi/kcomedilib/kcomedilib_main.c b/drivers/comedi/kcomedilib/kcomedilib_main.c
index df9bba1b69ed..43fbe1a63b14 100644
--- a/drivers/comedi/kcomedilib/kcomedilib_main.c
+++ b/drivers/comedi/kcomedilib/kcomedilib_main.c
@@ -16,9 +16,9 @@
#include <linux/mm.h>
#include <linux/io.h>
-#include "../comedi.h"
-#include "../comedilib.h"
-#include "../comedidev.h"
+#include <linux/comedi.h>
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedilib.h>
MODULE_AUTHOR("David Schleef <ds@schleef.org>");
MODULE_DESCRIPTION("Comedi kernel library");
diff --git a/drivers/comedi/proc.c b/drivers/comedi/proc.c
index 8bc8e42beb90..2e4496633d3d 100644
--- a/drivers/comedi/proc.c
+++ b/drivers/comedi/proc.c
@@ -13,7 +13,7 @@
* was cool.
*/
-#include "comedidev.h"
+#include <linux/comedi/comedidev.h>
#include "comedi_internal.h"
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
diff --git a/drivers/comedi/range.c b/drivers/comedi/range.c
index a4e6fe0fb729..8f43cf88d784 100644
--- a/drivers/comedi/range.c
+++ b/drivers/comedi/range.c
@@ -8,7 +8,7 @@
*/
#include <linux/uaccess.h>
-#include "comedidev.h"
+#include <linux/comedi/comedidev.h>
#include "comedi_internal.h"
const struct comedi_lrange range_bipolar10 = { 1, {BIP_RANGE(10)} };
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 646ad385e490..ccac1c453080 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -358,7 +358,7 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
* other namespaces.
*/
if ((current_user_ns() != &init_user_ns) ||
- (task_active_pid_ns(current) != &init_pid_ns))
+ !task_is_in_init_pid_ns(current))
return;
/* Can only change if privileged. */
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index 1cbd60aaed69..a17e51d65aca 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/isa.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
@@ -44,7 +45,6 @@ MODULE_PARM_DESC(irq, "ACCES 104-QUAD-8 interrupt line numbers");
* @ab_enable: array of A and B inputs enable configurations
* @preset_enable: array of set_to_preset_on_index attribute configurations
* @irq_trigger: array of current IRQ trigger function configurations
- * @next_irq_trigger: array of next IRQ trigger function configurations
* @synchronous_mode: array of index function synchronous mode configurations
* @index_polarity: array of index function polarity configurations
* @cable_fault_enable: differential encoder cable status enable configurations
@@ -52,7 +52,6 @@ MODULE_PARM_DESC(irq, "ACCES 104-QUAD-8 interrupt line numbers");
*/
struct quad8 {
spinlock_t lock;
- struct counter_device counter;
unsigned int fck_prescaler[QUAD8_NUM_COUNTERS];
unsigned int preset[QUAD8_NUM_COUNTERS];
unsigned int count_mode[QUAD8_NUM_COUNTERS];
@@ -61,7 +60,6 @@ struct quad8 {
unsigned int ab_enable[QUAD8_NUM_COUNTERS];
unsigned int preset_enable[QUAD8_NUM_COUNTERS];
unsigned int irq_trigger[QUAD8_NUM_COUNTERS];
- unsigned int next_irq_trigger[QUAD8_NUM_COUNTERS];
unsigned int synchronous_mode[QUAD8_NUM_COUNTERS];
unsigned int index_polarity[QUAD8_NUM_COUNTERS];
unsigned int cable_fault_enable;
@@ -113,7 +111,7 @@ static int quad8_signal_read(struct counter_device *counter,
struct counter_signal *signal,
enum counter_signal_level *level)
{
- const struct quad8 *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
unsigned int state;
/* Only Index signal levels can be read */
@@ -131,7 +129,7 @@ static int quad8_signal_read(struct counter_device *counter,
static int quad8_count_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const int base_offset = priv->base + 2 * count->id;
unsigned int flags;
unsigned int borrow;
@@ -163,7 +161,7 @@ static int quad8_count_read(struct counter_device *counter,
static int quad8_count_write(struct counter_device *counter,
struct counter_count *count, u64 val)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const int base_offset = priv->base + 2 * count->id;
unsigned long irqflags;
int i;
@@ -213,7 +211,7 @@ static int quad8_function_read(struct counter_device *counter,
struct counter_count *count,
enum counter_function *function)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const int id = count->id;
unsigned long irqflags;
@@ -243,7 +241,7 @@ static int quad8_function_write(struct counter_device *counter,
struct counter_count *count,
enum counter_function function)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const int id = count->id;
unsigned int *const quadrature_mode = priv->quadrature_mode + id;
unsigned int *const scale = priv->quadrature_scale + id;
@@ -305,7 +303,7 @@ static int quad8_direction_read(struct counter_device *counter,
struct counter_count *count,
enum counter_count_direction *direction)
{
- const struct quad8 *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
unsigned int ud_flag;
const unsigned int flag_addr = priv->base + 2 * count->id + 1;
@@ -335,7 +333,7 @@ static int quad8_action_read(struct counter_device *counter,
struct counter_synapse *synapse,
enum counter_synapse_action *action)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
int err;
enum counter_function function;
const size_t signal_a_id = count->synapses[0].signal->id;
@@ -390,7 +388,6 @@ static int quad8_action_read(struct counter_device *counter,
}
enum {
- QUAD8_EVENT_NONE = -1,
QUAD8_EVENT_CARRY = 0,
QUAD8_EVENT_COMPARE = 1,
QUAD8_EVENT_CARRY_BORROW = 2,
@@ -399,37 +396,52 @@ enum {
static int quad8_events_configure(struct counter_device *counter)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
unsigned long irq_enabled = 0;
unsigned long irqflags;
- size_t channel;
+ struct counter_event_node *event_node;
+ unsigned int next_irq_trigger;
unsigned long ior_cfg;
unsigned long base_offset;
spin_lock_irqsave(&priv->lock, irqflags);
- /* Enable interrupts for the requested channels, disable for the rest */
- for (channel = 0; channel < QUAD8_NUM_COUNTERS; channel++) {
- if (priv->next_irq_trigger[channel] == QUAD8_EVENT_NONE)
+ list_for_each_entry(event_node, &counter->events_list, l) {
+ switch (event_node->event) {
+ case COUNTER_EVENT_OVERFLOW:
+ next_irq_trigger = QUAD8_EVENT_CARRY;
+ break;
+ case COUNTER_EVENT_THRESHOLD:
+ next_irq_trigger = QUAD8_EVENT_COMPARE;
+ break;
+ case COUNTER_EVENT_OVERFLOW_UNDERFLOW:
+ next_irq_trigger = QUAD8_EVENT_CARRY_BORROW;
+ break;
+ case COUNTER_EVENT_INDEX:
+ next_irq_trigger = QUAD8_EVENT_INDEX;
+ break;
+ default:
+ /* should never reach this path */
+ spin_unlock_irqrestore(&priv->lock, irqflags);
+ return -EINVAL;
+ }
+
+ /* Skip configuration if it is the same as previously set */
+ if (priv->irq_trigger[event_node->channel] == next_irq_trigger)
continue;
- if (priv->irq_trigger[channel] != priv->next_irq_trigger[channel]) {
- /* Save new IRQ function configuration */
- priv->irq_trigger[channel] = priv->next_irq_trigger[channel];
+ /* Save new IRQ function configuration */
+ priv->irq_trigger[event_node->channel] = next_irq_trigger;
- /* Load configuration to I/O Control Register */
- ior_cfg = priv->ab_enable[channel] |
- priv->preset_enable[channel] << 1 |
- priv->irq_trigger[channel] << 3;
- base_offset = priv->base + 2 * channel + 1;
- outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
- }
-
- /* Reset next IRQ trigger function configuration */
- priv->next_irq_trigger[channel] = QUAD8_EVENT_NONE;
+ /* Load configuration to I/O Control Register */
+ ior_cfg = priv->ab_enable[event_node->channel] |
+ priv->preset_enable[event_node->channel] << 1 |
+ priv->irq_trigger[event_node->channel] << 3;
+ base_offset = priv->base + 2 * event_node->channel + 1;
+ outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
/* Enable IRQ line */
- irq_enabled |= BIT(channel);
+ irq_enabled |= BIT(event_node->channel);
}
outb(irq_enabled, priv->base + QUAD8_REG_INDEX_INTERRUPT);
@@ -442,35 +454,20 @@ static int quad8_events_configure(struct counter_device *counter)
static int quad8_watch_validate(struct counter_device *counter,
const struct counter_watch *watch)
{
- struct quad8 *const priv = counter->priv;
+ struct counter_event_node *event_node;
if (watch->channel > QUAD8_NUM_COUNTERS - 1)
return -EINVAL;
switch (watch->event) {
case COUNTER_EVENT_OVERFLOW:
- if (priv->next_irq_trigger[watch->channel] == QUAD8_EVENT_NONE)
- priv->next_irq_trigger[watch->channel] = QUAD8_EVENT_CARRY;
- else if (priv->next_irq_trigger[watch->channel] != QUAD8_EVENT_CARRY)
- return -EINVAL;
- return 0;
case COUNTER_EVENT_THRESHOLD:
- if (priv->next_irq_trigger[watch->channel] == QUAD8_EVENT_NONE)
- priv->next_irq_trigger[watch->channel] = QUAD8_EVENT_COMPARE;
- else if (priv->next_irq_trigger[watch->channel] != QUAD8_EVENT_COMPARE)
- return -EINVAL;
- return 0;
case COUNTER_EVENT_OVERFLOW_UNDERFLOW:
- if (priv->next_irq_trigger[watch->channel] == QUAD8_EVENT_NONE)
- priv->next_irq_trigger[watch->channel] = QUAD8_EVENT_CARRY_BORROW;
- else if (priv->next_irq_trigger[watch->channel] != QUAD8_EVENT_CARRY_BORROW)
- return -EINVAL;
- return 0;
case COUNTER_EVENT_INDEX:
- if (priv->next_irq_trigger[watch->channel] == QUAD8_EVENT_NONE)
- priv->next_irq_trigger[watch->channel] = QUAD8_EVENT_INDEX;
- else if (priv->next_irq_trigger[watch->channel] != QUAD8_EVENT_INDEX)
- return -EINVAL;
+ list_for_each_entry(event_node, &counter->next_events_list, l)
+ if (watch->channel == event_node->channel &&
+ watch->event != event_node->event)
+ return -EINVAL;
return 0;
default:
return -EINVAL;
@@ -497,7 +494,7 @@ static int quad8_index_polarity_get(struct counter_device *counter,
struct counter_signal *signal,
u32 *index_polarity)
{
- const struct quad8 *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id - 16;
*index_polarity = priv->index_polarity[channel_id];
@@ -509,7 +506,7 @@ static int quad8_index_polarity_set(struct counter_device *counter,
struct counter_signal *signal,
u32 index_polarity)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id - 16;
const int base_offset = priv->base + 2 * channel_id + 1;
unsigned long irqflags;
@@ -538,7 +535,7 @@ static int quad8_synchronous_mode_get(struct counter_device *counter,
struct counter_signal *signal,
u32 *synchronous_mode)
{
- const struct quad8 *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id - 16;
*synchronous_mode = priv->synchronous_mode[channel_id];
@@ -550,7 +547,7 @@ static int quad8_synchronous_mode_set(struct counter_device *counter,
struct counter_signal *signal,
u32 synchronous_mode)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id - 16;
const int base_offset = priv->base + 2 * channel_id + 1;
unsigned long irqflags;
@@ -589,7 +586,7 @@ static int quad8_count_mode_read(struct counter_device *counter,
struct counter_count *count,
enum counter_count_mode *cnt_mode)
{
- const struct quad8 *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
/* Map 104-QUAD-8 count mode to Generic Counter count mode */
switch (priv->count_mode[count->id]) {
@@ -614,7 +611,7 @@ static int quad8_count_mode_write(struct counter_device *counter,
struct counter_count *count,
enum counter_count_mode cnt_mode)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
unsigned int count_mode;
unsigned int mode_cfg;
const int base_offset = priv->base + 2 * count->id + 1;
@@ -661,7 +658,7 @@ static int quad8_count_mode_write(struct counter_device *counter,
static int quad8_count_enable_read(struct counter_device *counter,
struct counter_count *count, u8 *enable)
{
- const struct quad8 *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
*enable = priv->ab_enable[count->id];
@@ -671,7 +668,7 @@ static int quad8_count_enable_read(struct counter_device *counter,
static int quad8_count_enable_write(struct counter_device *counter,
struct counter_count *count, u8 enable)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const int base_offset = priv->base + 2 * count->id;
unsigned long irqflags;
unsigned int ior_cfg;
@@ -699,7 +696,7 @@ static const char *const quad8_noise_error_states[] = {
static int quad8_error_noise_get(struct counter_device *counter,
struct counter_count *count, u32 *noise_error)
{
- const struct quad8 *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
const int base_offset = priv->base + 2 * count->id + 1;
*noise_error = !!(inb(base_offset) & QUAD8_FLAG_E);
@@ -710,7 +707,7 @@ static int quad8_error_noise_get(struct counter_device *counter,
static int quad8_count_preset_read(struct counter_device *counter,
struct counter_count *count, u64 *preset)
{
- const struct quad8 *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
*preset = priv->preset[count->id];
@@ -736,7 +733,7 @@ static void quad8_preset_register_set(struct quad8 *const priv, const int id,
static int quad8_count_preset_write(struct counter_device *counter,
struct counter_count *count, u64 preset)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
/* Only 24-bit values are supported */
@@ -755,7 +752,7 @@ static int quad8_count_preset_write(struct counter_device *counter,
static int quad8_count_ceiling_read(struct counter_device *counter,
struct counter_count *count, u64 *ceiling)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
spin_lock_irqsave(&priv->lock, irqflags);
@@ -780,7 +777,7 @@ static int quad8_count_ceiling_read(struct counter_device *counter,
static int quad8_count_ceiling_write(struct counter_device *counter,
struct counter_count *count, u64 ceiling)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
/* Only 24-bit values are supported */
@@ -807,7 +804,7 @@ static int quad8_count_preset_enable_read(struct counter_device *counter,
struct counter_count *count,
u8 *preset_enable)
{
- const struct quad8 *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
*preset_enable = !priv->preset_enable[count->id];
@@ -818,7 +815,7 @@ static int quad8_count_preset_enable_write(struct counter_device *counter,
struct counter_count *count,
u8 preset_enable)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const int base_offset = priv->base + 2 * count->id + 1;
unsigned long irqflags;
unsigned int ior_cfg;
@@ -845,7 +842,7 @@ static int quad8_signal_cable_fault_read(struct counter_device *counter,
struct counter_signal *signal,
u8 *cable_fault)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id / 2;
unsigned long irqflags;
bool disabled;
@@ -875,7 +872,7 @@ static int quad8_signal_cable_fault_enable_read(struct counter_device *counter,
struct counter_signal *signal,
u8 *enable)
{
- const struct quad8 *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id / 2;
*enable = !!(priv->cable_fault_enable & BIT(channel_id));
@@ -887,7 +884,7 @@ static int quad8_signal_cable_fault_enable_write(struct counter_device *counter,
struct counter_signal *signal,
u8 enable)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id / 2;
unsigned long irqflags;
unsigned int cable_fault_enable;
@@ -913,7 +910,7 @@ static int quad8_signal_fck_prescaler_read(struct counter_device *counter,
struct counter_signal *signal,
u8 *prescaler)
{
- const struct quad8 *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
*prescaler = priv->fck_prescaler[signal->id / 2];
@@ -924,7 +921,7 @@ static int quad8_signal_fck_prescaler_write(struct counter_device *counter,
struct counter_signal *signal,
u8 prescaler)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id / 2;
const int base_offset = priv->base + 2 * channel_id;
unsigned long irqflags;
@@ -1085,7 +1082,8 @@ static struct counter_count quad8_counts[] = {
static irqreturn_t quad8_irq_handler(int irq, void *private)
{
- struct quad8 *const priv = private;
+ struct counter_device *counter = private;
+ struct quad8 *const priv = counter_priv(counter);
const unsigned long base = priv->base;
unsigned long irq_status;
unsigned long channel;
@@ -1116,7 +1114,7 @@ static irqreturn_t quad8_irq_handler(int irq, void *private)
continue;
}
- counter_push_event(&priv->counter, event, channel);
+ counter_push_event(counter, event, channel);
}
/* Clear pending interrupts on device */
@@ -1127,6 +1125,7 @@ static irqreturn_t quad8_irq_handler(int irq, void *private)
static int quad8_probe(struct device *dev, unsigned int id)
{
+ struct counter_device *counter;
struct quad8 *priv;
int i, j;
unsigned int base_offset;
@@ -1138,19 +1137,19 @@ static int quad8_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ counter = devm_counter_alloc(dev, sizeof(*priv));
+ if (!counter)
return -ENOMEM;
+ priv = counter_priv(counter);
/* Initialize Counter device and driver data */
- priv->counter.name = dev_name(dev);
- priv->counter.parent = dev;
- priv->counter.ops = &quad8_ops;
- priv->counter.counts = quad8_counts;
- priv->counter.num_counts = ARRAY_SIZE(quad8_counts);
- priv->counter.signals = quad8_signals;
- priv->counter.num_signals = ARRAY_SIZE(quad8_signals);
- priv->counter.priv = priv;
+ counter->name = dev_name(dev);
+ counter->parent = dev;
+ counter->ops = &quad8_ops;
+ counter->counts = quad8_counts;
+ counter->num_counts = ARRAY_SIZE(quad8_counts);
+ counter->signals = quad8_signals;
+ counter->num_signals = ARRAY_SIZE(quad8_signals);
priv->base = base[id];
spin_lock_init(&priv->lock);
@@ -1183,20 +1182,22 @@ static int quad8_probe(struct device *dev, unsigned int id)
outb(QUAD8_CTR_IOR, base_offset + 1);
/* Disable index function; negative index polarity */
outb(QUAD8_CTR_IDR, base_offset + 1);
- /* Initialize next IRQ trigger function configuration */
- priv->next_irq_trigger[i] = QUAD8_EVENT_NONE;
}
/* Disable Differential Encoder Cable Status for all channels */
outb(0xFF, base[id] + QUAD8_DIFF_ENCODER_CABLE_STATUS);
/* Enable all counters and enable interrupt function */
outb(QUAD8_CHAN_OP_ENABLE_INTERRUPT_FUNC, base[id] + QUAD8_REG_CHAN_OP);
- err = devm_request_irq(dev, irq[id], quad8_irq_handler, IRQF_SHARED,
- priv->counter.name, priv);
+ err = devm_request_irq(&counter->dev, irq[id], quad8_irq_handler,
+ IRQF_SHARED, counter->name, counter);
if (err)
return err;
- return devm_counter_register(dev, &priv->counter);
+ err = devm_counter_add(dev, counter);
+ if (err < 0)
+ return dev_err_probe(dev, err, "Failed to add counter\n");
+
+ return 0;
}
static struct isa_driver quad8_driver = {
diff --git a/drivers/counter/counter-core.c b/drivers/counter/counter-core.c
index 5acc54539623..869894b74741 100644
--- a/drivers/counter/counter-core.c
+++ b/drivers/counter/counter-core.c
@@ -15,6 +15,7 @@
#include <linux/kdev_t.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/slab.h>
#include <linux/types.h>
#include <linux/wait.h>
@@ -24,12 +25,25 @@
/* Provides a unique ID for each counter device */
static DEFINE_IDA(counter_ida);
+struct counter_device_allochelper {
+ struct counter_device counter;
+
+ /*
+ * This is cache line aligned to ensure private data behaves like if it
+ * were kmalloced separately.
+ */
+ unsigned long privdata[] ____cacheline_aligned;
+};
+
static void counter_device_release(struct device *dev)
{
- struct counter_device *const counter = dev_get_drvdata(dev);
+ struct counter_device *const counter =
+ container_of(dev, struct counter_device, dev);
counter_chrdev_remove(counter);
ida_free(&counter_ida, dev->id);
+
+ kfree(container_of(counter, struct counter_device_allochelper, counter));
}
static struct device_type counter_device_type = {
@@ -45,62 +59,105 @@ static struct bus_type counter_bus_type = {
static dev_t counter_devt;
/**
- * counter_register - register Counter to the system
- * @counter: pointer to Counter to register
+ * counter_priv - access counter device private data
+ * @counter: counter device
*
- * This function registers a Counter to the system. A sysfs "counter" directory
- * will be created and populated with sysfs attributes correlating with the
- * Counter Signals, Synapses, and Counts respectively.
+ * Get the counter device private data
+ */
+void *counter_priv(const struct counter_device *const counter)
+{
+ struct counter_device_allochelper *ch =
+ container_of(counter, struct counter_device_allochelper, counter);
+
+ return &ch->privdata;
+}
+EXPORT_SYMBOL_GPL(counter_priv);
+
+/**
+ * counter_alloc - allocate a counter_device
+ * @sizeof_priv: size of the driver private data
+ *
+ * This is part one of counter registration. The structure is allocated
+ * dynamically to ensure the right lifetime for the embedded struct device.
*
- * RETURNS:
- * 0 on success, negative error number on failure.
+ * If this succeeds, call counter_put() to get rid of the counter_device again.
*/
-int counter_register(struct counter_device *const counter)
+struct counter_device *counter_alloc(size_t sizeof_priv)
{
- struct device *const dev = &counter->dev;
- int id;
+ struct counter_device_allochelper *ch;
+ struct counter_device *counter;
+ struct device *dev;
int err;
+ ch = kzalloc(sizeof(*ch) + sizeof_priv, GFP_KERNEL);
+ if (!ch)
+ return NULL;
+
+ counter = &ch->counter;
+ dev = &counter->dev;
+
/* Acquire unique ID */
- id = ida_alloc(&counter_ida, GFP_KERNEL);
- if (id < 0)
- return id;
+ err = ida_alloc(&counter_ida, GFP_KERNEL);
+ if (err < 0)
+ goto err_ida_alloc;
+ dev->id = err;
mutex_init(&counter->ops_exist_lock);
-
- /* Configure device structure for Counter */
- dev->id = id;
dev->type = &counter_device_type;
dev->bus = &counter_bus_type;
- dev->devt = MKDEV(MAJOR(counter_devt), id);
+ dev->devt = MKDEV(MAJOR(counter_devt), dev->id);
+
+ err = counter_chrdev_add(counter);
+ if (err < 0)
+ goto err_chrdev_add;
+
+ device_initialize(dev);
+
+ return counter;
+
+err_chrdev_add:
+
+ ida_free(&counter_ida, dev->id);
+err_ida_alloc:
+
+ kfree(ch);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(counter_alloc);
+
+void counter_put(struct counter_device *counter)
+{
+ put_device(&counter->dev);
+}
+EXPORT_SYMBOL_GPL(counter_put);
+
+/**
+ * counter_add - complete registration of a counter
+ * @counter: the counter to add
+ *
+ * This is part two of counter registration.
+ *
+ * If this succeeds, call counter_unregister() to get rid of the counter_device again.
+ */
+int counter_add(struct counter_device *counter)
+{
+ int err;
+ struct device *dev = &counter->dev;
+
if (counter->parent) {
dev->parent = counter->parent;
dev->of_node = counter->parent->of_node;
}
- device_initialize(dev);
- dev_set_drvdata(dev, counter);
err = counter_sysfs_add(counter);
if (err < 0)
- goto err_free_id;
-
- err = counter_chrdev_add(counter);
- if (err < 0)
- goto err_free_id;
-
- err = cdev_device_add(&counter->chrdev, dev);
- if (err < 0)
- goto err_remove_chrdev;
-
- return 0;
+ return err;
-err_remove_chrdev:
- counter_chrdev_remove(counter);
-err_free_id:
- put_device(dev);
- return err;
+ /* implies device_add(dev) */
+ return cdev_device_add(&counter->chrdev, dev);
}
-EXPORT_SYMBOL_GPL(counter_register);
+EXPORT_SYMBOL_GPL(counter_add);
/**
* counter_unregister - unregister Counter from the system
@@ -121,8 +178,6 @@ void counter_unregister(struct counter_device *const counter)
wake_up(&counter->events_wait);
mutex_unlock(&counter->ops_exist_lock);
-
- put_device(&counter->dev);
}
EXPORT_SYMBOL_GPL(counter_unregister);
@@ -131,30 +186,56 @@ static void devm_counter_release(void *counter)
counter_unregister(counter);
}
+static void devm_counter_put(void *counter)
+{
+ counter_put(counter);
+}
+
/**
- * devm_counter_register - Resource-managed counter_register
- * @dev: device to allocate counter_device for
- * @counter: pointer to Counter to register
+ * devm_counter_alloc - allocate a counter_device
+ * @dev: the device to register the release callback for
+ * @sizeof_priv: size of the driver private data
*
- * Managed counter_register. The Counter registered with this function is
- * automatically unregistered on driver detach. This function calls
- * counter_register internally. Refer to that function for more information.
+ * This is the device managed version of counter_add(). It registers a cleanup
+ * callback to care for calling counter_put().
+ */
+struct counter_device *devm_counter_alloc(struct device *dev, size_t sizeof_priv)
+{
+ struct counter_device *counter;
+ int err;
+
+ counter = counter_alloc(sizeof_priv);
+ if (!counter)
+ return NULL;
+
+ err = devm_add_action_or_reset(dev, devm_counter_put, counter);
+ if (err < 0)
+ return NULL;
+
+ return counter;
+}
+EXPORT_SYMBOL_GPL(devm_counter_alloc);
+
+/**
+ * devm_counter_add - complete registration of a counter
+ * @dev: the device to register the release callback for
+ * @counter: the counter to add
*
- * RETURNS:
- * 0 on success, negative error number on failure.
+ * This is the device managed version of counter_add(). It registers a cleanup
+ * callback to care for calling counter_unregister().
*/
-int devm_counter_register(struct device *dev,
- struct counter_device *const counter)
+int devm_counter_add(struct device *dev,
+ struct counter_device *const counter)
{
int err;
- err = counter_register(counter);
+ err = counter_add(counter);
if (err < 0)
return err;
return devm_add_action_or_reset(dev, devm_counter_release, counter);
}
-EXPORT_SYMBOL_GPL(devm_counter_register);
+EXPORT_SYMBOL_GPL(devm_counter_add);
#define COUNTER_DEV_MAX 256
diff --git a/drivers/counter/ftm-quaddec.c b/drivers/counter/ftm-quaddec.c
index 5ef0478709cd..2a58582a9df4 100644
--- a/drivers/counter/ftm-quaddec.c
+++ b/drivers/counter/ftm-quaddec.c
@@ -26,7 +26,6 @@
})
struct ftm_quaddec {
- struct counter_device counter;
struct platform_device *pdev;
void __iomem *ftm_base;
bool big_endian;
@@ -118,7 +117,7 @@ static void ftm_quaddec_disable(void *ftm)
static int ftm_quaddec_get_prescaler(struct counter_device *counter,
struct counter_count *count, u32 *cnt_mode)
{
- struct ftm_quaddec *ftm = counter->priv;
+ struct ftm_quaddec *ftm = counter_priv(counter);
uint32_t scflags;
ftm_read(ftm, FTM_SC, &scflags);
@@ -131,7 +130,7 @@ static int ftm_quaddec_get_prescaler(struct counter_device *counter,
static int ftm_quaddec_set_prescaler(struct counter_device *counter,
struct counter_count *count, u32 cnt_mode)
{
- struct ftm_quaddec *ftm = counter->priv;
+ struct ftm_quaddec *ftm = counter_priv(counter);
mutex_lock(&ftm->ftm_quaddec_mutex);
@@ -162,7 +161,7 @@ static int ftm_quaddec_count_read(struct counter_device *counter,
struct counter_count *count,
u64 *val)
{
- struct ftm_quaddec *const ftm = counter->priv;
+ struct ftm_quaddec *const ftm = counter_priv(counter);
uint32_t cntval;
ftm_read(ftm, FTM_CNT, &cntval);
@@ -176,7 +175,7 @@ static int ftm_quaddec_count_write(struct counter_device *counter,
struct counter_count *count,
const u64 val)
{
- struct ftm_quaddec *const ftm = counter->priv;
+ struct ftm_quaddec *const ftm = counter_priv(counter);
if (val != 0) {
dev_warn(&ftm->pdev->dev, "Can only accept '0' as new counter value\n");
@@ -259,17 +258,17 @@ static struct counter_count ftm_quaddec_counts = {
static int ftm_quaddec_probe(struct platform_device *pdev)
{
+ struct counter_device *counter;
struct ftm_quaddec *ftm;
struct device_node *node = pdev->dev.of_node;
struct resource *io;
int ret;
- ftm = devm_kzalloc(&pdev->dev, sizeof(*ftm), GFP_KERNEL);
- if (!ftm)
+ counter = devm_counter_alloc(&pdev->dev, sizeof(*ftm));
+ if (!counter)
return -ENOMEM;
-
- platform_set_drvdata(pdev, ftm);
+ ftm = counter_priv(counter);
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!io) {
@@ -285,14 +284,13 @@ static int ftm_quaddec_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to map memory region\n");
return -EINVAL;
}
- ftm->counter.name = dev_name(&pdev->dev);
- ftm->counter.parent = &pdev->dev;
- ftm->counter.ops = &ftm_quaddec_cnt_ops;
- ftm->counter.counts = &ftm_quaddec_counts;
- ftm->counter.num_counts = 1;
- ftm->counter.signals = ftm_quaddec_signals;
- ftm->counter.num_signals = ARRAY_SIZE(ftm_quaddec_signals);
- ftm->counter.priv = ftm;
+ counter->name = dev_name(&pdev->dev);
+ counter->parent = &pdev->dev;
+ counter->ops = &ftm_quaddec_cnt_ops;
+ counter->counts = &ftm_quaddec_counts;
+ counter->num_counts = 1;
+ counter->signals = ftm_quaddec_signals;
+ counter->num_signals = ARRAY_SIZE(ftm_quaddec_signals);
mutex_init(&ftm->ftm_quaddec_mutex);
@@ -302,9 +300,9 @@ static int ftm_quaddec_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = devm_counter_register(&pdev->dev, &ftm->counter);
+ ret = devm_counter_add(&pdev->dev, counter);
if (ret)
- return ret;
+ return dev_err_probe(&pdev->dev, ret, "Failed to add counter\n");
return 0;
}
diff --git a/drivers/counter/intel-qep.c b/drivers/counter/intel-qep.c
index 0924d16de6e2..47a6a9dfc9e8 100644
--- a/drivers/counter/intel-qep.c
+++ b/drivers/counter/intel-qep.c
@@ -63,7 +63,6 @@
#define INTEL_QEP_CLK_PERIOD_NS 10
struct intel_qep {
- struct counter_device counter;
struct mutex lock;
struct device *dev;
void __iomem *regs;
@@ -109,7 +108,7 @@ static void intel_qep_init(struct intel_qep *qep)
static int intel_qep_count_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
- struct intel_qep *const qep = counter->priv;
+ struct intel_qep *const qep = counter_priv(counter);
pm_runtime_get_sync(qep->dev);
*val = intel_qep_readl(qep, INTEL_QEPCOUNT);
@@ -176,7 +175,7 @@ static struct counter_synapse intel_qep_count_synapses[] = {
static int intel_qep_ceiling_read(struct counter_device *counter,
struct counter_count *count, u64 *ceiling)
{
- struct intel_qep *qep = counter->priv;
+ struct intel_qep *qep = counter_priv(counter);
pm_runtime_get_sync(qep->dev);
*ceiling = intel_qep_readl(qep, INTEL_QEPMAX);
@@ -188,7 +187,7 @@ static int intel_qep_ceiling_read(struct counter_device *counter,
static int intel_qep_ceiling_write(struct counter_device *counter,
struct counter_count *count, u64 max)
{
- struct intel_qep *qep = counter->priv;
+ struct intel_qep *qep = counter_priv(counter);
int ret = 0;
/* Intel QEP ceiling configuration only supports 32-bit values */
@@ -213,7 +212,7 @@ out:
static int intel_qep_enable_read(struct counter_device *counter,
struct counter_count *count, u8 *enable)
{
- struct intel_qep *qep = counter->priv;
+ struct intel_qep *qep = counter_priv(counter);
*enable = qep->enabled;
@@ -223,7 +222,7 @@ static int intel_qep_enable_read(struct counter_device *counter,
static int intel_qep_enable_write(struct counter_device *counter,
struct counter_count *count, u8 val)
{
- struct intel_qep *qep = counter->priv;
+ struct intel_qep *qep = counter_priv(counter);
u32 reg;
bool changed;
@@ -256,7 +255,7 @@ static int intel_qep_spike_filter_ns_read(struct counter_device *counter,
struct counter_count *count,
u64 *length)
{
- struct intel_qep *qep = counter->priv;
+ struct intel_qep *qep = counter_priv(counter);
u32 reg;
pm_runtime_get_sync(qep->dev);
@@ -277,7 +276,7 @@ static int intel_qep_spike_filter_ns_write(struct counter_device *counter,
struct counter_count *count,
u64 length)
{
- struct intel_qep *qep = counter->priv;
+ struct intel_qep *qep = counter_priv(counter);
u32 reg;
bool enable;
int ret = 0;
@@ -326,7 +325,7 @@ static int intel_qep_preset_enable_read(struct counter_device *counter,
struct counter_count *count,
u8 *preset_enable)
{
- struct intel_qep *qep = counter->priv;
+ struct intel_qep *qep = counter_priv(counter);
u32 reg;
pm_runtime_get_sync(qep->dev);
@@ -341,7 +340,7 @@ static int intel_qep_preset_enable_read(struct counter_device *counter,
static int intel_qep_preset_enable_write(struct counter_device *counter,
struct counter_count *count, u8 val)
{
- struct intel_qep *qep = counter->priv;
+ struct intel_qep *qep = counter_priv(counter);
u32 reg;
int ret = 0;
@@ -392,14 +391,16 @@ static struct counter_count intel_qep_counter_count[] = {
static int intel_qep_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
+ struct counter_device *counter;
struct intel_qep *qep;
struct device *dev = &pci->dev;
void __iomem *regs;
int ret;
- qep = devm_kzalloc(dev, sizeof(*qep), GFP_KERNEL);
- if (!qep)
+ counter = devm_counter_alloc(dev, sizeof(*qep));
+ if (!counter)
return -ENOMEM;
+ qep = counter_priv(counter);
ret = pcim_enable_device(pci);
if (ret)
@@ -422,20 +423,23 @@ static int intel_qep_probe(struct pci_dev *pci, const struct pci_device_id *id)
intel_qep_init(qep);
pci_set_drvdata(pci, qep);
- qep->counter.name = pci_name(pci);
- qep->counter.parent = dev;
- qep->counter.ops = &intel_qep_counter_ops;
- qep->counter.counts = intel_qep_counter_count;
- qep->counter.num_counts = ARRAY_SIZE(intel_qep_counter_count);
- qep->counter.signals = intel_qep_signals;
- qep->counter.num_signals = ARRAY_SIZE(intel_qep_signals);
- qep->counter.priv = qep;
+ counter->name = pci_name(pci);
+ counter->parent = dev;
+ counter->ops = &intel_qep_counter_ops;
+ counter->counts = intel_qep_counter_count;
+ counter->num_counts = ARRAY_SIZE(intel_qep_counter_count);
+ counter->signals = intel_qep_signals;
+ counter->num_signals = ARRAY_SIZE(intel_qep_signals);
qep->enabled = false;
pm_runtime_put(dev);
pm_runtime_allow(dev);
- return devm_counter_register(&pci->dev, &qep->counter);
+ ret = devm_counter_add(&pci->dev, counter);
+ if (ret < 0)
+ return dev_err_probe(&pci->dev, ret, "Failed to add counter\n");
+
+ return 0;
}
static void intel_qep_remove(struct pci_dev *pci)
diff --git a/drivers/counter/interrupt-cnt.c b/drivers/counter/interrupt-cnt.c
index 8514a87fcbee..9e99702470c2 100644
--- a/drivers/counter/interrupt-cnt.c
+++ b/drivers/counter/interrupt-cnt.c
@@ -16,7 +16,6 @@
struct interrupt_cnt_priv {
atomic_t count;
- struct counter_device counter;
struct gpio_desc *gpio;
int irq;
bool enabled;
@@ -37,7 +36,7 @@ static irqreturn_t interrupt_cnt_isr(int irq, void *dev_id)
static int interrupt_cnt_enable_read(struct counter_device *counter,
struct counter_count *count, u8 *enable)
{
- struct interrupt_cnt_priv *priv = counter->priv;
+ struct interrupt_cnt_priv *priv = counter_priv(counter);
*enable = priv->enabled;
@@ -47,7 +46,7 @@ static int interrupt_cnt_enable_read(struct counter_device *counter,
static int interrupt_cnt_enable_write(struct counter_device *counter,
struct counter_count *count, u8 enable)
{
- struct interrupt_cnt_priv *priv = counter->priv;
+ struct interrupt_cnt_priv *priv = counter_priv(counter);
if (priv->enabled == enable)
return 0;
@@ -85,7 +84,7 @@ static int interrupt_cnt_action_read(struct counter_device *counter,
static int interrupt_cnt_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
- struct interrupt_cnt_priv *priv = counter->priv;
+ struct interrupt_cnt_priv *priv = counter_priv(counter);
*val = atomic_read(&priv->count);
@@ -95,7 +94,7 @@ static int interrupt_cnt_read(struct counter_device *counter,
static int interrupt_cnt_write(struct counter_device *counter,
struct counter_count *count, const u64 val)
{
- struct interrupt_cnt_priv *priv = counter->priv;
+ struct interrupt_cnt_priv *priv = counter_priv(counter);
if (val != (typeof(priv->count.counter))val)
return -ERANGE;
@@ -122,7 +121,7 @@ static int interrupt_cnt_signal_read(struct counter_device *counter,
struct counter_signal *signal,
enum counter_signal_level *level)
{
- struct interrupt_cnt_priv *priv = counter->priv;
+ struct interrupt_cnt_priv *priv = counter_priv(counter);
int ret;
if (!priv->gpio)
@@ -148,12 +147,14 @@ static const struct counter_ops interrupt_cnt_ops = {
static int interrupt_cnt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct counter_device *counter;
struct interrupt_cnt_priv *priv;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ counter = devm_counter_alloc(dev, sizeof(*priv));
+ if (!counter)
return -ENOMEM;
+ priv = counter_priv(counter);
priv->irq = platform_get_irq_optional(pdev, 0);
if (priv->irq == -ENXIO)
@@ -184,8 +185,8 @@ static int interrupt_cnt_probe(struct platform_device *pdev)
if (!priv->signals.name)
return -ENOMEM;
- priv->counter.signals = &priv->signals;
- priv->counter.num_signals = 1;
+ counter->signals = &priv->signals;
+ counter->num_signals = 1;
priv->synapses.actions_list = interrupt_cnt_synapse_actions;
priv->synapses.num_actions = ARRAY_SIZE(interrupt_cnt_synapse_actions);
@@ -199,12 +200,11 @@ static int interrupt_cnt_probe(struct platform_device *pdev)
priv->cnts.ext = interrupt_cnt_ext;
priv->cnts.num_ext = ARRAY_SIZE(interrupt_cnt_ext);
- priv->counter.priv = priv;
- priv->counter.name = dev_name(dev);
- priv->counter.parent = dev;
- priv->counter.ops = &interrupt_cnt_ops;
- priv->counter.counts = &priv->cnts;
- priv->counter.num_counts = 1;
+ counter->name = dev_name(dev);
+ counter->parent = dev;
+ counter->ops = &interrupt_cnt_ops;
+ counter->counts = &priv->cnts;
+ counter->num_counts = 1;
irq_set_status_flags(priv->irq, IRQ_NOAUTOEN);
ret = devm_request_irq(dev, priv->irq, interrupt_cnt_isr,
@@ -213,7 +213,11 @@ static int interrupt_cnt_probe(struct platform_device *pdev)
if (ret)
return ret;
- return devm_counter_register(dev, &priv->counter);
+ ret = devm_counter_add(dev, counter);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to add counter\n");
+
+ return 0;
}
static const struct of_device_id interrupt_cnt_of_match[] = {
diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
index 0ab1b2716784..00844445143b 100644
--- a/drivers/counter/microchip-tcb-capture.c
+++ b/drivers/counter/microchip-tcb-capture.c
@@ -24,7 +24,6 @@
struct mchp_tc_data {
const struct atmel_tcb_config *tc_cfg;
- struct counter_device counter;
struct regmap *regmap;
int qdec_mode;
int num_channels;
@@ -72,7 +71,7 @@ static int mchp_tc_count_function_read(struct counter_device *counter,
struct counter_count *count,
enum counter_function *function)
{
- struct mchp_tc_data *const priv = counter->priv;
+ struct mchp_tc_data *const priv = counter_priv(counter);
if (priv->qdec_mode)
*function = COUNTER_FUNCTION_QUADRATURE_X4;
@@ -86,7 +85,7 @@ static int mchp_tc_count_function_write(struct counter_device *counter,
struct counter_count *count,
enum counter_function function)
{
- struct mchp_tc_data *const priv = counter->priv;
+ struct mchp_tc_data *const priv = counter_priv(counter);
u32 bmr, cmr;
regmap_read(priv->regmap, ATMEL_TC_BMR, &bmr);
@@ -148,7 +147,7 @@ static int mchp_tc_count_signal_read(struct counter_device *counter,
struct counter_signal *signal,
enum counter_signal_level *lvl)
{
- struct mchp_tc_data *const priv = counter->priv;
+ struct mchp_tc_data *const priv = counter_priv(counter);
bool sigstatus;
u32 sr;
@@ -169,7 +168,7 @@ static int mchp_tc_count_action_read(struct counter_device *counter,
struct counter_synapse *synapse,
enum counter_synapse_action *action)
{
- struct mchp_tc_data *const priv = counter->priv;
+ struct mchp_tc_data *const priv = counter_priv(counter);
u32 cmr;
regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], CMR), &cmr);
@@ -197,7 +196,7 @@ static int mchp_tc_count_action_write(struct counter_device *counter,
struct counter_synapse *synapse,
enum counter_synapse_action action)
{
- struct mchp_tc_data *const priv = counter->priv;
+ struct mchp_tc_data *const priv = counter_priv(counter);
u32 edge = ATMEL_TC_ETRGEDG_NONE;
/* QDEC mode is rising edge only */
@@ -230,7 +229,7 @@ static int mchp_tc_count_action_write(struct counter_device *counter,
static int mchp_tc_count_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
- struct mchp_tc_data *const priv = counter->priv;
+ struct mchp_tc_data *const priv = counter_priv(counter);
u32 cnt;
regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], CV), &cnt);
@@ -296,6 +295,7 @@ static int mchp_tc_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
const struct atmel_tcb_config *tcb_config;
const struct of_device_id *match;
+ struct counter_device *counter;
struct mchp_tc_data *priv;
char clk_name[7];
struct regmap *regmap;
@@ -303,11 +303,10 @@ static int mchp_tc_probe(struct platform_device *pdev)
int channel;
int ret, i;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ counter = devm_counter_alloc(&pdev->dev, sizeof(*priv));
+ if (!counter)
return -ENOMEM;
-
- platform_set_drvdata(pdev, priv);
+ priv = counter_priv(counter);
match = of_match_node(atmel_tc_of_match, np->parent);
tcb_config = match->data;
@@ -362,16 +361,19 @@ static int mchp_tc_probe(struct platform_device *pdev)
priv->tc_cfg = tcb_config;
priv->regmap = regmap;
- priv->counter.name = dev_name(&pdev->dev);
- priv->counter.parent = &pdev->dev;
- priv->counter.ops = &mchp_tc_ops;
- priv->counter.num_counts = ARRAY_SIZE(mchp_tc_counts);
- priv->counter.counts = mchp_tc_counts;
- priv->counter.num_signals = ARRAY_SIZE(mchp_tc_count_signals);
- priv->counter.signals = mchp_tc_count_signals;
- priv->counter.priv = priv;
-
- return devm_counter_register(&pdev->dev, &priv->counter);
+ counter->name = dev_name(&pdev->dev);
+ counter->parent = &pdev->dev;
+ counter->ops = &mchp_tc_ops;
+ counter->num_counts = ARRAY_SIZE(mchp_tc_counts);
+ counter->counts = mchp_tc_counts;
+ counter->num_signals = ARRAY_SIZE(mchp_tc_count_signals);
+ counter->signals = mchp_tc_count_signals;
+
+ ret = devm_counter_add(&pdev->dev, counter);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to add counter\n");
+
+ return 0;
}
static const struct of_device_id mchp_tc_dt_ids[] = {
diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c
index 5168833b1fdf..68031d93ce89 100644
--- a/drivers/counter/stm32-lptimer-cnt.c
+++ b/drivers/counter/stm32-lptimer-cnt.c
@@ -20,7 +20,6 @@
#include <linux/types.h>
struct stm32_lptim_cnt {
- struct counter_device counter;
struct device *dev;
struct regmap *regmap;
struct clk *clk;
@@ -141,7 +140,7 @@ static const enum counter_synapse_action stm32_lptim_cnt_synapse_actions[] = {
static int stm32_lptim_cnt_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
u32 cnt;
int ret;
@@ -158,7 +157,7 @@ static int stm32_lptim_cnt_function_read(struct counter_device *counter,
struct counter_count *count,
enum counter_function *function)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
if (!priv->quadrature_mode) {
*function = COUNTER_FUNCTION_INCREASE;
@@ -177,7 +176,7 @@ static int stm32_lptim_cnt_function_write(struct counter_device *counter,
struct counter_count *count,
enum counter_function function)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
if (stm32_lptim_is_enabled(priv))
return -EBUSY;
@@ -200,7 +199,7 @@ static int stm32_lptim_cnt_enable_read(struct counter_device *counter,
struct counter_count *count,
u8 *enable)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
int ret;
ret = stm32_lptim_is_enabled(priv);
@@ -216,7 +215,7 @@ static int stm32_lptim_cnt_enable_write(struct counter_device *counter,
struct counter_count *count,
u8 enable)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
int ret;
/* Check nobody uses the timer, or already disabled/enabled */
@@ -241,7 +240,7 @@ static int stm32_lptim_cnt_ceiling_read(struct counter_device *counter,
struct counter_count *count,
u64 *ceiling)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
*ceiling = priv->ceiling;
@@ -252,7 +251,7 @@ static int stm32_lptim_cnt_ceiling_write(struct counter_device *counter,
struct counter_count *count,
u64 ceiling)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
if (stm32_lptim_is_enabled(priv))
return -EBUSY;
@@ -277,7 +276,7 @@ static int stm32_lptim_cnt_action_read(struct counter_device *counter,
struct counter_synapse *synapse,
enum counter_synapse_action *action)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
enum counter_function function;
int err;
@@ -321,7 +320,7 @@ static int stm32_lptim_cnt_action_write(struct counter_device *counter,
struct counter_synapse *synapse,
enum counter_synapse_action action)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
enum counter_function function;
int err;
@@ -411,14 +410,17 @@ static struct counter_count stm32_lptim_in1_counts = {
static int stm32_lptim_cnt_probe(struct platform_device *pdev)
{
struct stm32_lptimer *ddata = dev_get_drvdata(pdev->dev.parent);
+ struct counter_device *counter;
struct stm32_lptim_cnt *priv;
+ int ret;
if (IS_ERR_OR_NULL(ddata))
return -EINVAL;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ counter = devm_counter_alloc(&pdev->dev, sizeof(*priv));
+ if (!counter)
return -ENOMEM;
+ priv = counter_priv(counter);
priv->dev = &pdev->dev;
priv->regmap = ddata->regmap;
@@ -426,23 +428,26 @@ static int stm32_lptim_cnt_probe(struct platform_device *pdev)
priv->ceiling = STM32_LPTIM_MAX_ARR;
/* Initialize Counter device */
- priv->counter.name = dev_name(&pdev->dev);
- priv->counter.parent = &pdev->dev;
- priv->counter.ops = &stm32_lptim_cnt_ops;
+ counter->name = dev_name(&pdev->dev);
+ counter->parent = &pdev->dev;
+ counter->ops = &stm32_lptim_cnt_ops;
if (ddata->has_encoder) {
- priv->counter.counts = &stm32_lptim_enc_counts;
- priv->counter.num_signals = ARRAY_SIZE(stm32_lptim_cnt_signals);
+ counter->counts = &stm32_lptim_enc_counts;
+ counter->num_signals = ARRAY_SIZE(stm32_lptim_cnt_signals);
} else {
- priv->counter.counts = &stm32_lptim_in1_counts;
- priv->counter.num_signals = 1;
+ counter->counts = &stm32_lptim_in1_counts;
+ counter->num_signals = 1;
}
- priv->counter.num_counts = 1;
- priv->counter.signals = stm32_lptim_cnt_signals;
- priv->counter.priv = priv;
+ counter->num_counts = 1;
+ counter->signals = stm32_lptim_cnt_signals;
platform_set_drvdata(pdev, priv);
- return devm_counter_register(&pdev->dev, &priv->counter);
+ ret = devm_counter_add(&pdev->dev, counter);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to add counter\n");
+
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
index 0546e932db0c..5779ae7c73cf 100644
--- a/drivers/counter/stm32-timer-cnt.c
+++ b/drivers/counter/stm32-timer-cnt.c
@@ -29,7 +29,6 @@ struct stm32_timer_regs {
};
struct stm32_timer_cnt {
- struct counter_device counter;
struct regmap *regmap;
struct clk *clk;
u32 max_arr;
@@ -47,7 +46,7 @@ static const enum counter_function stm32_count_functions[] = {
static int stm32_count_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 cnt;
regmap_read(priv->regmap, TIM_CNT, &cnt);
@@ -59,7 +58,7 @@ static int stm32_count_read(struct counter_device *counter,
static int stm32_count_write(struct counter_device *counter,
struct counter_count *count, const u64 val)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 ceiling;
regmap_read(priv->regmap, TIM_ARR, &ceiling);
@@ -73,7 +72,7 @@ static int stm32_count_function_read(struct counter_device *counter,
struct counter_count *count,
enum counter_function *function)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 smcr;
regmap_read(priv->regmap, TIM_SMCR, &smcr);
@@ -100,7 +99,7 @@ static int stm32_count_function_write(struct counter_device *counter,
struct counter_count *count,
enum counter_function function)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 cr1, sms;
switch (function) {
@@ -140,7 +139,7 @@ static int stm32_count_direction_read(struct counter_device *counter,
struct counter_count *count,
enum counter_count_direction *direction)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 cr1;
regmap_read(priv->regmap, TIM_CR1, &cr1);
@@ -153,7 +152,7 @@ static int stm32_count_direction_read(struct counter_device *counter,
static int stm32_count_ceiling_read(struct counter_device *counter,
struct counter_count *count, u64 *ceiling)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 arr;
regmap_read(priv->regmap, TIM_ARR, &arr);
@@ -166,7 +165,7 @@ static int stm32_count_ceiling_read(struct counter_device *counter,
static int stm32_count_ceiling_write(struct counter_device *counter,
struct counter_count *count, u64 ceiling)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
if (ceiling > priv->max_arr)
return -ERANGE;
@@ -181,7 +180,7 @@ static int stm32_count_ceiling_write(struct counter_device *counter,
static int stm32_count_enable_read(struct counter_device *counter,
struct counter_count *count, u8 *enable)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 cr1;
regmap_read(priv->regmap, TIM_CR1, &cr1);
@@ -194,7 +193,7 @@ static int stm32_count_enable_read(struct counter_device *counter,
static int stm32_count_enable_write(struct counter_device *counter,
struct counter_count *count, u8 enable)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 cr1;
if (enable) {
@@ -317,31 +316,38 @@ static int stm32_timer_cnt_probe(struct platform_device *pdev)
struct stm32_timers *ddata = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
struct stm32_timer_cnt *priv;
+ struct counter_device *counter;
+ int ret;
if (IS_ERR_OR_NULL(ddata))
return -EINVAL;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ counter = devm_counter_alloc(dev, sizeof(*priv));
+ if (!counter)
return -ENOMEM;
+ priv = counter_priv(counter);
+
priv->regmap = ddata->regmap;
priv->clk = ddata->clk;
priv->max_arr = ddata->max_arr;
- priv->counter.name = dev_name(dev);
- priv->counter.parent = dev;
- priv->counter.ops = &stm32_timer_cnt_ops;
- priv->counter.counts = &stm32_counts;
- priv->counter.num_counts = 1;
- priv->counter.signals = stm32_signals;
- priv->counter.num_signals = ARRAY_SIZE(stm32_signals);
- priv->counter.priv = priv;
+ counter->name = dev_name(dev);
+ counter->parent = dev;
+ counter->ops = &stm32_timer_cnt_ops;
+ counter->counts = &stm32_counts;
+ counter->num_counts = 1;
+ counter->signals = stm32_signals;
+ counter->num_signals = ARRAY_SIZE(stm32_signals);
platform_set_drvdata(pdev, priv);
/* Register Counter device */
- return devm_counter_register(dev, &priv->counter);
+ ret = devm_counter_add(dev, counter);
+ if (ret < 0)
+ dev_err_probe(dev, ret, "Failed to add counter\n");
+
+ return ret;
}
static int __maybe_unused stm32_timer_cnt_suspend(struct device *dev)
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
index 09817c953f9a..0489d26eb47c 100644
--- a/drivers/counter/ti-eqep.c
+++ b/drivers/counter/ti-eqep.c
@@ -87,10 +87,15 @@ struct ti_eqep_cnt {
struct regmap *regmap16;
};
+static struct ti_eqep_cnt *ti_eqep_count_from_counter(struct counter_device *counter)
+{
+ return counter_priv(counter);
+}
+
static int ti_eqep_count_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
u32 cnt;
regmap_read(priv->regmap32, QPOSCNT, &cnt);
@@ -102,7 +107,7 @@ static int ti_eqep_count_read(struct counter_device *counter,
static int ti_eqep_count_write(struct counter_device *counter,
struct counter_count *count, u64 val)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
u32 max;
regmap_read(priv->regmap32, QPOSMAX, &max);
@@ -116,7 +121,7 @@ static int ti_eqep_function_read(struct counter_device *counter,
struct counter_count *count,
enum counter_function *function)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
u32 qdecctl;
regmap_read(priv->regmap16, QDECCTL, &qdecctl);
@@ -143,7 +148,7 @@ static int ti_eqep_function_write(struct counter_device *counter,
struct counter_count *count,
enum counter_function function)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
enum ti_eqep_count_func qsrc;
switch (function) {
@@ -173,7 +178,7 @@ static int ti_eqep_action_read(struct counter_device *counter,
struct counter_synapse *synapse,
enum counter_synapse_action *action)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
enum counter_function function;
u32 qdecctl;
int err;
@@ -245,7 +250,7 @@ static int ti_eqep_position_ceiling_read(struct counter_device *counter,
struct counter_count *count,
u64 *ceiling)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
u32 qposmax;
regmap_read(priv->regmap32, QPOSMAX, &qposmax);
@@ -259,7 +264,7 @@ static int ti_eqep_position_ceiling_write(struct counter_device *counter,
struct counter_count *count,
u64 ceiling)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
if (ceiling != (u32)ceiling)
return -ERANGE;
@@ -272,7 +277,7 @@ static int ti_eqep_position_ceiling_write(struct counter_device *counter,
static int ti_eqep_position_enable_read(struct counter_device *counter,
struct counter_count *count, u8 *enable)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
u32 qepctl;
regmap_read(priv->regmap16, QEPCTL, &qepctl);
@@ -285,7 +290,7 @@ static int ti_eqep_position_enable_read(struct counter_device *counter,
static int ti_eqep_position_enable_write(struct counter_device *counter,
struct counter_count *count, u8 enable)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
regmap_write_bits(priv->regmap16, QEPCTL, QEPCTL_PHEN, enable ? -1 : 0);
@@ -368,13 +373,15 @@ static const struct regmap_config ti_eqep_regmap16_config = {
static int ti_eqep_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct counter_device *counter;
struct ti_eqep_cnt *priv;
void __iomem *base;
int err;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ counter = devm_counter_alloc(dev, sizeof(*priv));
+ if (!counter)
return -ENOMEM;
+ priv = counter_priv(counter);
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -390,16 +397,15 @@ static int ti_eqep_probe(struct platform_device *pdev)
if (IS_ERR(priv->regmap16))
return PTR_ERR(priv->regmap16);
- priv->counter.name = dev_name(dev);
- priv->counter.parent = dev;
- priv->counter.ops = &ti_eqep_counter_ops;
- priv->counter.counts = ti_eqep_counts;
- priv->counter.num_counts = ARRAY_SIZE(ti_eqep_counts);
- priv->counter.signals = ti_eqep_signals;
- priv->counter.num_signals = ARRAY_SIZE(ti_eqep_signals);
- priv->counter.priv = priv;
+ counter->name = dev_name(dev);
+ counter->parent = dev;
+ counter->ops = &ti_eqep_counter_ops;
+ counter->counts = ti_eqep_counts;
+ counter->num_counts = ARRAY_SIZE(ti_eqep_counts);
+ counter->signals = ti_eqep_signals;
+ counter->num_signals = ARRAY_SIZE(ti_eqep_signals);
- platform_set_drvdata(pdev, priv);
+ platform_set_drvdata(pdev, counter);
/*
* Need to make sure power is turned on. On AM33xx, this comes from the
@@ -409,7 +415,7 @@ static int ti_eqep_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
- err = counter_register(&priv->counter);
+ err = counter_add(counter);
if (err < 0) {
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
@@ -421,10 +427,10 @@ static int ti_eqep_probe(struct platform_device *pdev)
static int ti_eqep_remove(struct platform_device *pdev)
{
- struct ti_eqep_cnt *priv = platform_get_drvdata(pdev);
+ struct counter_device *counter = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
- counter_unregister(&priv->counter);
+ counter_unregister(counter);
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 92701a18bdd9..55516043b656 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -34,6 +34,23 @@ config X86_PCC_CPUFREQ
If in doubt, say N.
+config X86_AMD_PSTATE
+ tristate "AMD Processor P-State driver"
+ depends on X86 && ACPI
+ select ACPI_PROCESSOR
+ select ACPI_CPPC_LIB if X86_64
+ select CPU_FREQ_GOV_SCHEDUTIL if SMP
+ help
+ This driver adds a CPUFreq driver which utilizes a fine grain
+ processor performance frequency control range instead of legacy
+ performance levels. _CPC needs to be present in the ACPI tables
+ of the system.
+
+ For details, take a look at:
+ <file:Documentation/admin-guide/pm/amd-pstate.rst>.
+
+ If in doubt, say N.
+
config X86_ACPI_CPUFREQ
tristate "ACPI Processor P-States driver"
depends on ACPI_PROCESSOR
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 48ee5859030c..285de70af877 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -17,6 +17,10 @@ obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o
obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o
obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o
+# Traces
+CFLAGS_amd-pstate-trace.o := -I$(src)
+amd_pstate-y := amd-pstate.o amd-pstate-trace.o
+
##################################################################################
# x86 drivers.
# Link order matters. K8 is preferred to ACPI because of firmware bugs in early
@@ -25,6 +29,7 @@ obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o
# speedstep-* is preferred over p4-clockmod.
obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
+obj-$(CONFIG_X86_AMD_PSTATE) += amd_pstate.o
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
diff --git a/drivers/cpufreq/amd-pstate-trace.c b/drivers/cpufreq/amd-pstate-trace.c
new file mode 100644
index 000000000000..891b696dcd69
--- /dev/null
+++ b/drivers/cpufreq/amd-pstate-trace.c
@@ -0,0 +1,2 @@
+#define CREATE_TRACE_POINTS
+#include "amd-pstate-trace.h"
diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h
new file mode 100644
index 000000000000..647505957d4f
--- /dev/null
+++ b/drivers/cpufreq/amd-pstate-trace.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * amd-pstate-trace.h - AMD Processor P-state Frequency Driver Tracer
+ *
+ * Copyright (C) 2021 Advanced Micro Devices, Inc. All Rights Reserved.
+ *
+ * Author: Huang Rui <ray.huang@amd.com>
+ */
+
+#if !defined(_AMD_PSTATE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _AMD_PSTATE_TRACE_H
+
+#include <linux/cpufreq.h>
+#include <linux/tracepoint.h>
+#include <linux/trace_events.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM amd_cpu
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE amd-pstate-trace
+
+#define TPS(x) tracepoint_string(x)
+
+TRACE_EVENT(amd_pstate_perf,
+
+ TP_PROTO(unsigned long min_perf,
+ unsigned long target_perf,
+ unsigned long capacity,
+ unsigned int cpu_id,
+ bool changed,
+ bool fast_switch
+ ),
+
+ TP_ARGS(min_perf,
+ target_perf,
+ capacity,
+ cpu_id,
+ changed,
+ fast_switch
+ ),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, min_perf)
+ __field(unsigned long, target_perf)
+ __field(unsigned long, capacity)
+ __field(unsigned int, cpu_id)
+ __field(bool, changed)
+ __field(bool, fast_switch)
+ ),
+
+ TP_fast_assign(
+ __entry->min_perf = min_perf;
+ __entry->target_perf = target_perf;
+ __entry->capacity = capacity;
+ __entry->cpu_id = cpu_id;
+ __entry->changed = changed;
+ __entry->fast_switch = fast_switch;
+ ),
+
+ TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu cpu_id=%u changed=%s fast_switch=%s",
+ (unsigned long)__entry->min_perf,
+ (unsigned long)__entry->target_perf,
+ (unsigned long)__entry->capacity,
+ (unsigned int)__entry->cpu_id,
+ (__entry->changed) ? "true" : "false",
+ (__entry->fast_switch) ? "true" : "false"
+ )
+);
+
+#endif /* _AMD_PSTATE_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#include <trace/define_trace.h>
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
new file mode 100644
index 000000000000..9ce75ed11f8e
--- /dev/null
+++ b/drivers/cpufreq/amd-pstate.c
@@ -0,0 +1,645 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * amd-pstate.c - AMD Processor P-state Frequency Driver
+ *
+ * Copyright (C) 2021 Advanced Micro Devices, Inc. All Rights Reserved.
+ *
+ * Author: Huang Rui <ray.huang@amd.com>
+ *
+ * AMD P-State introduces a new CPU performance scaling design for AMD
+ * processors using the ACPI Collaborative Performance and Power Control (CPPC)
+ * feature which works with the AMD SMU firmware providing a finer grained
+ * frequency control range. It is to replace the legacy ACPI P-States control,
+ * allows a flexible, low-latency interface for the Linux kernel to directly
+ * communicate the performance hints to hardware.
+ *
+ * AMD P-State is supported on recent AMD Zen base CPU series include some of
+ * Zen2 and Zen3 processors. _CPC needs to be present in the ACPI tables of AMD
+ * P-State supported system. And there are two types of hardware implementations
+ * for AMD P-State: 1) Full MSR Solution and 2) Shared Memory Solution.
+ * X86_FEATURE_CPPC CPU feature flag is used to distinguish the different types.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/compiler.h>
+#include <linux/dmi.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/static_call.h>
+
+#include <acpi/processor.h>
+#include <acpi/cppc_acpi.h>
+
+#include <asm/msr.h>
+#include <asm/processor.h>
+#include <asm/cpufeature.h>
+#include <asm/cpu_device_id.h>
+#include "amd-pstate-trace.h"
+
+#define AMD_PSTATE_TRANSITION_LATENCY 0x20000
+#define AMD_PSTATE_TRANSITION_DELAY 500
+
+/*
+ * TODO: We need more time to fine tune processors with shared memory solution
+ * with community together.
+ *
+ * There are some performance drops on the CPU benchmarks which reports from
+ * Suse. We are co-working with them to fine tune the shared memory solution. So
+ * we disable it by default to go acpi-cpufreq on these processors and add a
+ * module parameter to be able to enable it manually for debugging.
+ */
+static bool shared_mem = false;
+module_param(shared_mem, bool, 0444);
+MODULE_PARM_DESC(shared_mem,
+ "enable amd-pstate on processors with shared memory solution (false = disabled (default), true = enabled)");
+
+static struct cpufreq_driver amd_pstate_driver;
+
+/**
+ * struct amd_cpudata - private CPU data for AMD P-State
+ * @cpu: CPU number
+ * @req: constraint request to apply
+ * @cppc_req_cached: cached performance request hints
+ * @highest_perf: the maximum performance an individual processor may reach,
+ * assuming ideal conditions
+ * @nominal_perf: the maximum sustained performance level of the processor,
+ * assuming ideal operating conditions
+ * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
+ * savings are achieved
+ * @lowest_perf: the absolute lowest performance level of the processor
+ * @max_freq: the frequency that mapped to highest_perf
+ * @min_freq: the frequency that mapped to lowest_perf
+ * @nominal_freq: the frequency that mapped to nominal_perf
+ * @lowest_nonlinear_freq: the frequency that mapped to lowest_nonlinear_perf
+ * @boost_supported: check whether the Processor or SBIOS supports boost mode
+ *
+ * The amd_cpudata is key private data for each CPU thread in AMD P-State, and
+ * represents all the attributes and goals that AMD P-State requests at runtime.
+ */
+struct amd_cpudata {
+ int cpu;
+
+ struct freq_qos_request req[2];
+ u64 cppc_req_cached;
+
+ u32 highest_perf;
+ u32 nominal_perf;
+ u32 lowest_nonlinear_perf;
+ u32 lowest_perf;
+
+ u32 max_freq;
+ u32 min_freq;
+ u32 nominal_freq;
+ u32 lowest_nonlinear_freq;
+
+ bool boost_supported;
+};
+
+static inline int pstate_enable(bool enable)
+{
+ return wrmsrl_safe(MSR_AMD_CPPC_ENABLE, enable);
+}
+
+static int cppc_enable(bool enable)
+{
+ int cpu, ret = 0;
+
+ for_each_present_cpu(cpu) {
+ ret = cppc_set_enable(cpu, enable);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable);
+
+static inline int amd_pstate_enable(bool enable)
+{
+ return static_call(amd_pstate_enable)(enable);
+}
+
+static int pstate_init_perf(struct amd_cpudata *cpudata)
+{
+ u64 cap1;
+
+ int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
+ &cap1);
+ if (ret)
+ return ret;
+
+ /*
+ * TODO: Introduce AMD specific power feature.
+ *
+ * CPPC entry doesn't indicate the highest performance in some ASICs.
+ */
+ WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf());
+
+ WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
+ WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
+ WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
+
+ return 0;
+}
+
+static int cppc_init_perf(struct amd_cpudata *cpudata)
+{
+ struct cppc_perf_caps cppc_perf;
+
+ int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
+ if (ret)
+ return ret;
+
+ WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf());
+
+ WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
+ WRITE_ONCE(cpudata->lowest_nonlinear_perf,
+ cppc_perf.lowest_nonlinear_perf);
+ WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
+
+ return 0;
+}
+
+DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf);
+
+static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
+{
+ return static_call(amd_pstate_init_perf)(cpudata);
+}
+
+static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
+ u32 des_perf, u32 max_perf, bool fast_switch)
+{
+ if (fast_switch)
+ wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
+ else
+ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
+ READ_ONCE(cpudata->cppc_req_cached));
+}
+
+static void cppc_update_perf(struct amd_cpudata *cpudata,
+ u32 min_perf, u32 des_perf,
+ u32 max_perf, bool fast_switch)
+{
+ struct cppc_perf_ctrls perf_ctrls;
+
+ perf_ctrls.max_perf = max_perf;
+ perf_ctrls.min_perf = min_perf;
+ perf_ctrls.desired_perf = des_perf;
+
+ cppc_set_perf(cpudata->cpu, &perf_ctrls);
+}
+
+DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
+
+static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
+ u32 min_perf, u32 des_perf,
+ u32 max_perf, bool fast_switch)
+{
+ static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
+ max_perf, fast_switch);
+}
+
+static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
+ u32 des_perf, u32 max_perf, bool fast_switch)
+{
+ u64 prev = READ_ONCE(cpudata->cppc_req_cached);
+ u64 value = prev;
+
+ value &= ~AMD_CPPC_MIN_PERF(~0L);
+ value |= AMD_CPPC_MIN_PERF(min_perf);
+
+ value &= ~AMD_CPPC_DES_PERF(~0L);
+ value |= AMD_CPPC_DES_PERF(des_perf);
+
+ value &= ~AMD_CPPC_MAX_PERF(~0L);
+ value |= AMD_CPPC_MAX_PERF(max_perf);
+
+ trace_amd_pstate_perf(min_perf, des_perf, max_perf,
+ cpudata->cpu, (value != prev), fast_switch);
+
+ if (value == prev)
+ return;
+
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
+ amd_pstate_update_perf(cpudata, min_perf, des_perf,
+ max_perf, fast_switch);
+}
+
+static int amd_pstate_verify(struct cpufreq_policy_data *policy)
+{
+ cpufreq_verify_within_cpu_limits(policy);
+
+ return 0;
+}
+
+static int amd_pstate_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_freqs freqs;
+ struct amd_cpudata *cpudata = policy->driver_data;
+ unsigned long max_perf, min_perf, des_perf, cap_perf;
+
+ if (!cpudata->max_freq)
+ return -ENODEV;
+
+ cap_perf = READ_ONCE(cpudata->highest_perf);
+ min_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
+ max_perf = cap_perf;
+
+ freqs.old = policy->cur;
+ freqs.new = target_freq;
+
+ des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,
+ cpudata->max_freq);
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ amd_pstate_update(cpudata, min_perf, des_perf,
+ max_perf, false);
+ cpufreq_freq_transition_end(policy, &freqs, false);
+
+ return 0;
+}
+
+static void amd_pstate_adjust_perf(unsigned int cpu,
+ unsigned long _min_perf,
+ unsigned long target_perf,
+ unsigned long capacity)
+{
+ unsigned long max_perf, min_perf, des_perf,
+ cap_perf, lowest_nonlinear_perf;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ cap_perf = READ_ONCE(cpudata->highest_perf);
+ lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
+
+ des_perf = cap_perf;
+ if (target_perf < capacity)
+ des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
+
+ min_perf = READ_ONCE(cpudata->highest_perf);
+ if (_min_perf < capacity)
+ min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
+
+ if (min_perf < lowest_nonlinear_perf)
+ min_perf = lowest_nonlinear_perf;
+
+ max_perf = cap_perf;
+ if (max_perf < min_perf)
+ max_perf = min_perf;
+
+ des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
+
+ amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true);
+}
+
+static int amd_get_min_freq(struct amd_cpudata *cpudata)
+{
+ struct cppc_perf_caps cppc_perf;
+
+ int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
+ if (ret)
+ return ret;
+
+ /* Switch to khz */
+ return cppc_perf.lowest_freq * 1000;
+}
+
+static int amd_get_max_freq(struct amd_cpudata *cpudata)
+{
+ struct cppc_perf_caps cppc_perf;
+ u32 max_perf, max_freq, nominal_freq, nominal_perf;
+ u64 boost_ratio;
+
+ int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
+ if (ret)
+ return ret;
+
+ nominal_freq = cppc_perf.nominal_freq;
+ nominal_perf = READ_ONCE(cpudata->nominal_perf);
+ max_perf = READ_ONCE(cpudata->highest_perf);
+
+ boost_ratio = div_u64(max_perf << SCHED_CAPACITY_SHIFT,
+ nominal_perf);
+
+ max_freq = nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT;
+
+ /* Switch to khz */
+ return max_freq * 1000;
+}
+
+static int amd_get_nominal_freq(struct amd_cpudata *cpudata)
+{
+ struct cppc_perf_caps cppc_perf;
+
+ int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
+ if (ret)
+ return ret;
+
+ /* Switch to khz */
+ return cppc_perf.nominal_freq * 1000;
+}
+
+static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata)
+{
+ struct cppc_perf_caps cppc_perf;
+ u32 lowest_nonlinear_freq, lowest_nonlinear_perf,
+ nominal_freq, nominal_perf;
+ u64 lowest_nonlinear_ratio;
+
+ int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
+ if (ret)
+ return ret;
+
+ nominal_freq = cppc_perf.nominal_freq;
+ nominal_perf = READ_ONCE(cpudata->nominal_perf);
+
+ lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
+
+ lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
+ nominal_perf);
+
+ lowest_nonlinear_freq = nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT;
+
+ /* Switch to khz */
+ return lowest_nonlinear_freq * 1000;
+}
+
+static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ int ret;
+
+ if (!cpudata->boost_supported) {
+ pr_err("Boost mode is not supported by this processor or SBIOS\n");
+ return -EINVAL;
+ }
+
+ if (state)
+ policy->cpuinfo.max_freq = cpudata->max_freq;
+ else
+ policy->cpuinfo.max_freq = cpudata->nominal_freq;
+
+ policy->max = policy->cpuinfo.max_freq;
+
+ ret = freq_qos_update_request(&cpudata->req[1],
+ policy->cpuinfo.max_freq);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
+{
+ u32 highest_perf, nominal_perf;
+
+ highest_perf = READ_ONCE(cpudata->highest_perf);
+ nominal_perf = READ_ONCE(cpudata->nominal_perf);
+
+ if (highest_perf <= nominal_perf)
+ return;
+
+ cpudata->boost_supported = true;
+ amd_pstate_driver.boost_enabled = true;
+}
+
+static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
+{
+ int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
+ struct device *dev;
+ struct amd_cpudata *cpudata;
+
+ dev = get_cpu_device(policy->cpu);
+ if (!dev)
+ return -ENODEV;
+
+ cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL);
+ if (!cpudata)
+ return -ENOMEM;
+
+ cpudata->cpu = policy->cpu;
+
+ ret = amd_pstate_init_perf(cpudata);
+ if (ret)
+ goto free_cpudata1;
+
+ min_freq = amd_get_min_freq(cpudata);
+ max_freq = amd_get_max_freq(cpudata);
+ nominal_freq = amd_get_nominal_freq(cpudata);
+ lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
+
+ if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
+ dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
+ min_freq, max_freq);
+ ret = -EINVAL;
+ goto free_cpudata1;
+ }
+
+ policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY;
+ policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY;
+
+ policy->min = min_freq;
+ policy->max = max_freq;
+
+ policy->cpuinfo.min_freq = min_freq;
+ policy->cpuinfo.max_freq = max_freq;
+
+ /* It will be updated by governor */
+ policy->cur = policy->cpuinfo.min_freq;
+
+ if (boot_cpu_has(X86_FEATURE_CPPC))
+ policy->fast_switch_possible = true;
+
+ ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
+ FREQ_QOS_MIN, policy->cpuinfo.min_freq);
+ if (ret < 0) {
+ dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
+ goto free_cpudata1;
+ }
+
+ ret = freq_qos_add_request(&policy->constraints, &cpudata->req[1],
+ FREQ_QOS_MAX, policy->cpuinfo.max_freq);
+ if (ret < 0) {
+ dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
+ goto free_cpudata2;
+ }
+
+ /* Initial processor data capability frequencies */
+ cpudata->max_freq = max_freq;
+ cpudata->min_freq = min_freq;
+ cpudata->nominal_freq = nominal_freq;
+ cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
+
+ policy->driver_data = cpudata;
+
+ amd_pstate_boost_init(cpudata);
+
+ return 0;
+
+free_cpudata2:
+ freq_qos_remove_request(&cpudata->req[0]);
+free_cpudata1:
+ kfree(cpudata);
+ return ret;
+}
+
+static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata;
+
+ cpudata = policy->driver_data;
+
+ freq_qos_remove_request(&cpudata->req[1]);
+ freq_qos_remove_request(&cpudata->req[0]);
+ kfree(cpudata);
+
+ return 0;
+}
+
+/* Sysfs attributes */
+
+/*
+ * This frequency is to indicate the maximum hardware frequency.
+ * If boost is not active but supported, the frequency will be larger than the
+ * one in cpuinfo.
+ */
+static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
+ char *buf)
+{
+ int max_freq;
+ struct amd_cpudata *cpudata;
+
+ cpudata = policy->driver_data;
+
+ max_freq = amd_get_max_freq(cpudata);
+ if (max_freq < 0)
+ return max_freq;
+
+ return sprintf(&buf[0], "%u\n", max_freq);
+}
+
+static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy,
+ char *buf)
+{
+ int freq;
+ struct amd_cpudata *cpudata;
+
+ cpudata = policy->driver_data;
+
+ freq = amd_get_lowest_nonlinear_freq(cpudata);
+ if (freq < 0)
+ return freq;
+
+ return sprintf(&buf[0], "%u\n", freq);
+}
+
+/*
+ * In some of ASICs, the highest_perf is not the one in the _CPC table, so we
+ * need to expose it to sysfs.
+ */
+static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
+ char *buf)
+{
+ u32 perf;
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ perf = READ_ONCE(cpudata->highest_perf);
+
+ return sprintf(&buf[0], "%u\n", perf);
+}
+
+cpufreq_freq_attr_ro(amd_pstate_max_freq);
+cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
+
+cpufreq_freq_attr_ro(amd_pstate_highest_perf);
+
+static struct freq_attr *amd_pstate_attr[] = {
+ &amd_pstate_max_freq,
+ &amd_pstate_lowest_nonlinear_freq,
+ &amd_pstate_highest_perf,
+ NULL,
+};
+
+static struct cpufreq_driver amd_pstate_driver = {
+ .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
+ .verify = amd_pstate_verify,
+ .target = amd_pstate_target,
+ .init = amd_pstate_cpu_init,
+ .exit = amd_pstate_cpu_exit,
+ .set_boost = amd_pstate_set_boost,
+ .name = "amd-pstate",
+ .attr = amd_pstate_attr,
+};
+
+static int __init amd_pstate_init(void)
+{
+ int ret;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ return -ENODEV;
+
+ if (!acpi_cpc_valid()) {
+ pr_debug("the _CPC object is not present in SBIOS\n");
+ return -ENODEV;
+ }
+
+ /* don't keep reloading if cpufreq_driver exists */
+ if (cpufreq_get_current_driver())
+ return -EEXIST;
+
+ /* capability check */
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ pr_debug("AMD CPPC MSR based functionality is supported\n");
+ amd_pstate_driver.adjust_perf = amd_pstate_adjust_perf;
+ } else if (shared_mem) {
+ static_call_update(amd_pstate_enable, cppc_enable);
+ static_call_update(amd_pstate_init_perf, cppc_init_perf);
+ static_call_update(amd_pstate_update_perf, cppc_update_perf);
+ } else {
+ pr_info("This processor supports shared memory solution, you can enable it with amd_pstate.shared_mem=1\n");
+ return -ENODEV;
+ }
+
+ /* enable amd pstate feature */
+ ret = amd_pstate_enable(true);
+ if (ret) {
+ pr_err("failed to enable amd-pstate with return %d\n", ret);
+ return ret;
+ }
+
+ ret = cpufreq_register_driver(&amd_pstate_driver);
+ if (ret)
+ pr_err("failed to register amd_pstate_driver with return %d\n",
+ ret);
+
+ return ret;
+}
+
+static void __exit amd_pstate_exit(void)
+{
+ cpufreq_unregister_driver(&amd_pstate_driver);
+
+ amd_pstate_enable(false);
+}
+
+module_init(amd_pstate_init);
+module_exit(amd_pstate_exit);
+
+MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
+MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 096c3848fa41..b8d95536ee22 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -924,7 +924,7 @@ cpufreq_freq_attr_rw(scaling_max_freq);
cpufreq_freq_attr_rw(scaling_governor);
cpufreq_freq_attr_rw(scaling_setspeed);
-static struct attribute *default_attrs[] = {
+static struct attribute *cpufreq_attrs[] = {
&cpuinfo_min_freq.attr,
&cpuinfo_max_freq.attr,
&cpuinfo_transition_latency.attr,
@@ -938,6 +938,7 @@ static struct attribute *default_attrs[] = {
&scaling_setspeed.attr,
NULL
};
+ATTRIBUTE_GROUPS(cpufreq);
#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
#define to_attr(a) container_of(a, struct freq_attr, attr)
@@ -1000,7 +1001,7 @@ static const struct sysfs_ops sysfs_ops = {
static struct kobj_type ktype_cpufreq = {
.sysfs_ops = &sysfs_ops,
- .default_attrs = default_attrs,
+ .default_groups = cpufreq_groups,
.release = cpufreq_sysfs_release,
};
@@ -1403,7 +1404,7 @@ static int cpufreq_online(unsigned int cpu)
ret = freq_qos_add_request(&policy->constraints,
policy->min_freq_req, FREQ_QOS_MIN,
- policy->min);
+ FREQ_QOS_MIN_DEFAULT_VALUE);
if (ret < 0) {
/*
* So we don't call freq_qos_remove_request() for an
@@ -1423,7 +1424,7 @@ static int cpufreq_online(unsigned int cpu)
ret = freq_qos_add_request(&policy->constraints,
policy->max_freq_req, FREQ_QOS_MAX,
- policy->max);
+ FREQ_QOS_MAX_DEFAULT_VALUE);
if (ret < 0) {
policy->max_freq_req = NULL;
goto out_destroy_policy;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 0879ec3c170c..08515f7e515f 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -257,7 +257,7 @@ gov_attr_rw(ignore_nice_load);
gov_attr_rw(down_threshold);
gov_attr_rw(freq_step);
-static struct attribute *cs_attributes[] = {
+static struct attribute *cs_attrs[] = {
&sampling_rate.attr,
&sampling_down_factor.attr,
&up_threshold.attr,
@@ -266,6 +266,7 @@ static struct attribute *cs_attributes[] = {
&freq_step.attr,
NULL
};
+ATTRIBUTE_GROUPS(cs);
/************************** sysfs end ************************/
@@ -315,7 +316,7 @@ static void cs_start(struct cpufreq_policy *policy)
static struct dbs_governor cs_governor = {
.gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("conservative"),
- .kobj_type = { .default_attrs = cs_attributes },
+ .kobj_type = { .default_groups = cs_groups },
.gov_dbs_update = cs_dbs_update,
.alloc = cs_alloc,
.free = cs_free,
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 3b8f924771b4..6a41ea4729b8 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -328,7 +328,7 @@ gov_attr_rw(sampling_down_factor);
gov_attr_rw(ignore_nice_load);
gov_attr_rw(powersave_bias);
-static struct attribute *od_attributes[] = {
+static struct attribute *od_attrs[] = {
&sampling_rate.attr,
&up_threshold.attr,
&sampling_down_factor.attr,
@@ -337,6 +337,7 @@ static struct attribute *od_attributes[] = {
&io_is_busy.attr,
NULL
};
+ATTRIBUTE_GROUPS(od);
/************************** sysfs end ************************/
@@ -401,7 +402,7 @@ static struct od_ops od_ops = {
static struct dbs_governor od_dbs_gov = {
.gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"),
- .kobj_type = { .default_attrs = od_attributes },
+ .kobj_type = { .default_groups = od_groups },
.gov_dbs_update = od_dbs_update,
.alloc = od_alloc,
.free = od_free,
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index dec2a5649ac1..bc7f7e6759bd 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -664,19 +664,29 @@ static int intel_pstate_set_epb(int cpu, s16 pref)
* 3 balance_power
* 4 power
*/
+
+enum energy_perf_value_index {
+ EPP_INDEX_DEFAULT = 0,
+ EPP_INDEX_PERFORMANCE,
+ EPP_INDEX_BALANCE_PERFORMANCE,
+ EPP_INDEX_BALANCE_POWERSAVE,
+ EPP_INDEX_POWERSAVE,
+};
+
static const char * const energy_perf_strings[] = {
- "default",
- "performance",
- "balance_performance",
- "balance_power",
- "power",
+ [EPP_INDEX_DEFAULT] = "default",
+ [EPP_INDEX_PERFORMANCE] = "performance",
+ [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
+ [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
+ [EPP_INDEX_POWERSAVE] = "power",
NULL
};
-static const unsigned int epp_values[] = {
- HWP_EPP_PERFORMANCE,
- HWP_EPP_BALANCE_PERFORMANCE,
- HWP_EPP_BALANCE_POWERSAVE,
- HWP_EPP_POWERSAVE
+static unsigned int epp_values[] = {
+ [EPP_INDEX_DEFAULT] = 0, /* Unused index */
+ [EPP_INDEX_PERFORMANCE] = HWP_EPP_PERFORMANCE,
+ [EPP_INDEX_BALANCE_PERFORMANCE] = HWP_EPP_BALANCE_PERFORMANCE,
+ [EPP_INDEX_BALANCE_POWERSAVE] = HWP_EPP_BALANCE_POWERSAVE,
+ [EPP_INDEX_POWERSAVE] = HWP_EPP_POWERSAVE,
};
static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp)
@@ -690,14 +700,14 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw
return epp;
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
- if (epp == HWP_EPP_PERFORMANCE)
- return 1;
- if (epp == HWP_EPP_BALANCE_PERFORMANCE)
- return 2;
- if (epp == HWP_EPP_BALANCE_POWERSAVE)
- return 3;
- if (epp == HWP_EPP_POWERSAVE)
- return 4;
+ if (epp == epp_values[EPP_INDEX_PERFORMANCE])
+ return EPP_INDEX_PERFORMANCE;
+ if (epp == epp_values[EPP_INDEX_BALANCE_PERFORMANCE])
+ return EPP_INDEX_BALANCE_PERFORMANCE;
+ if (epp == epp_values[EPP_INDEX_BALANCE_POWERSAVE])
+ return EPP_INDEX_BALANCE_POWERSAVE;
+ if (epp == epp_values[EPP_INDEX_POWERSAVE])
+ return EPP_INDEX_POWERSAVE;
*raw_epp = epp;
return 0;
} else if (boot_cpu_has(X86_FEATURE_EPB)) {
@@ -757,7 +767,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
if (use_raw)
epp = raw_epp;
else if (epp == -EINVAL)
- epp = epp_values[pref_index - 1];
+ epp = epp_values[pref_index];
/*
* To avoid confusion, refuse to set EPP to any values different
@@ -843,7 +853,7 @@ static ssize_t store_energy_performance_preference(
* upfront.
*/
if (!raw)
- epp = ret ? epp_values[ret - 1] : cpu->epp_default;
+ epp = ret ? epp_values[ret] : cpu->epp_default;
if (cpu->epp_cached != epp) {
int err;
@@ -1124,19 +1134,22 @@ static void intel_pstate_update_policies(void)
cpufreq_update_policy(cpu);
}
+static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
+ struct cpufreq_policy *policy)
+{
+ policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
+ cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
+ refresh_frequency_limits(policy);
+}
+
static void intel_pstate_update_max_freq(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
- struct cpudata *cpudata;
if (!policy)
return;
- cpudata = all_cpu_data[cpu];
- policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
- cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
-
- refresh_frequency_limits(policy);
+ __intel_pstate_update_max_freq(all_cpu_data[cpu], policy);
cpufreq_cpu_release(policy);
}
@@ -1584,8 +1597,15 @@ static void intel_pstate_notify_work(struct work_struct *work)
{
struct cpudata *cpudata =
container_of(to_delayed_work(work), struct cpudata, hwp_notify_work);
+ struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);
+
+ if (policy) {
+ intel_pstate_get_hwp_cap(cpudata);
+ __intel_pstate_update_max_freq(cpudata, policy);
+
+ cpufreq_cpu_release(policy);
+ }
- cpufreq_update_policy(cpudata->cpu);
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
}
@@ -1679,10 +1699,18 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
- if (cpudata->epp_default == -EINVAL)
- cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
intel_pstate_enable_hwp_interrupt(cpudata);
+
+ if (cpudata->epp_default >= 0)
+ return;
+
+ if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) {
+ cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
+ } else {
+ cpudata->epp_default = epp_values[EPP_INDEX_BALANCE_PERFORMANCE];
+ intel_pstate_set_epp(cpudata, cpudata->epp_default);
+ }
}
static int atom_get_min_pstate(void)
@@ -2486,18 +2514,14 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
* HWP needs some special consideration, because HWP_REQUEST uses
* abstract values to represent performance rather than pure ratios.
*/
- if (hwp_active) {
- intel_pstate_get_hwp_cap(cpu);
+ if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) {
+ int scaling = cpu->pstate.scaling;
+ int freq;
- if (cpu->pstate.scaling != perf_ctl_scaling) {
- int scaling = cpu->pstate.scaling;
- int freq;
-
- freq = max_policy_perf * perf_ctl_scaling;
- max_policy_perf = DIV_ROUND_UP(freq, scaling);
- freq = min_policy_perf * perf_ctl_scaling;
- min_policy_perf = DIV_ROUND_UP(freq, scaling);
- }
+ freq = max_policy_perf * perf_ctl_scaling;
+ max_policy_perf = DIV_ROUND_UP(freq, scaling);
+ freq = min_policy_perf * perf_ctl_scaling;
+ min_policy_perf = DIV_ROUND_UP(freq, scaling);
}
pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n",
@@ -3349,6 +3373,16 @@ static bool intel_pstate_hwp_is_enabled(void)
return !!(value & 0x1);
}
+static const struct x86_cpu_id intel_epp_balance_perf[] = {
+ /*
+ * Set EPP value as 102, this is the max suggested EPP
+ * which can result in one core turbo frequency for
+ * AlderLake Mobile CPUs.
+ */
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 102),
+ {}
+};
+
static int __init intel_pstate_init(void)
{
static struct cpudata **_all_cpu_data;
@@ -3438,6 +3472,13 @@ hwp_cpu_matched:
intel_pstate_sysfs_expose_params();
+ if (hwp_active) {
+ const struct x86_cpu_id *id = x86_match_cpu(intel_epp_balance_perf);
+
+ if (id)
+ epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = id->driver_data;
+ }
+
mutex_lock(&intel_pstate_driver_lock);
rc = intel_pstate_register_driver(default_driver);
mutex_unlock(&intel_pstate_driver_lock);
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index 8ddbd0c5ce37..0a94c56ddad2 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -36,6 +36,8 @@ enum {
struct mtk_cpufreq_data {
struct cpufreq_frequency_table *table;
void __iomem *reg_bases[REG_ARRAY_SIZE];
+ struct resource *res;
+ void __iomem *base;
int nr_opp;
};
@@ -156,6 +158,7 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
{
struct mtk_cpufreq_data *data;
struct device *dev = &pdev->dev;
+ struct resource *res;
void __iomem *base;
int ret, i;
int index;
@@ -170,9 +173,26 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
if (index < 0)
return index;
- base = devm_platform_ioremap_resource(pdev, index);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, index);
+ if (!res) {
+ dev_err(dev, "failed to get mem resource %d\n", index);
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), res->name)) {
+ dev_err(dev, "failed to request resource %pR\n", res);
+ return -EBUSY;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ dev_err(dev, "failed to map resource %pR\n", res);
+ ret = -ENOMEM;
+ goto release_region;
+ }
+
+ data->base = base;
+ data->res = res;
for (i = REG_FREQ_LUT_TABLE; i < REG_ARRAY_SIZE; i++)
data->reg_bases[i] = base + offsets[i];
@@ -187,6 +207,9 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
policy->driver_data = data;
return 0;
+release_region:
+ release_mem_region(res->start, resource_size(res));
+ return ret;
}
static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
@@ -233,9 +256,13 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
static int mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{
struct mtk_cpufreq_data *data = policy->driver_data;
+ struct resource *res = data->res;
+ void __iomem *base = data->base;
/* HW should be in paused state now */
writel_relaxed(0x0, data->reg_bases[REG_FREQ_ENABLE]);
+ iounmap(base);
+ release_mem_region(res->start, resource_size(res));
return 0;
}
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index a2be0df7e174..05f3d7876e44 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -46,6 +46,7 @@ struct qcom_cpufreq_data {
*/
struct mutex throttle_lock;
int throttle_irq;
+ char irq_name[15];
bool cancel_throttle;
struct delayed_work throttle_work;
struct cpufreq_policy *policy;
@@ -275,10 +276,10 @@ static unsigned int qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
{
- unsigned long max_capacity, capacity, freq_hz, throttled_freq;
struct cpufreq_policy *policy = data->policy;
int cpu = cpumask_first(policy->cpus);
struct device *dev = get_cpu_device(cpu);
+ unsigned long freq_hz, throttled_freq;
struct dev_pm_opp *opp;
unsigned int freq;
@@ -295,16 +296,8 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
throttled_freq = freq_hz / HZ_PER_KHZ;
- /* Update thermal pressure */
-
- max_capacity = arch_scale_cpu_capacity(cpu);
- capacity = mult_frac(max_capacity, throttled_freq, policy->cpuinfo.max_freq);
-
- /* Don't pass boost capacity to scheduler */
- if (capacity > max_capacity)
- capacity = max_capacity;
-
- arch_set_thermal_pressure(policy->cpus, max_capacity - capacity);
+ /* Update thermal pressure (the boost frequencies are accepted) */
+ arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
/*
* In the unlikely case policy is unregistered do not enable
@@ -342,9 +335,9 @@ static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
/* Disable interrupt and enable polling */
disable_irq_nosync(c_data->throttle_irq);
- qcom_lmh_dcvs_notify(c_data);
+ schedule_delayed_work(&c_data->throttle_work, 0);
- return 0;
+ return IRQ_HANDLED;
}
static const struct qcom_cpufreq_soc_data qcom_soc_data = {
@@ -375,16 +368,17 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
{
struct qcom_cpufreq_data *data = policy->driver_data;
struct platform_device *pdev = cpufreq_get_driver_data();
- char irq_name[15];
int ret;
/*
* Look for LMh interrupt. If no interrupt line is specified /
* if there is an error, allow cpufreq to be enabled as usual.
*/
- data->throttle_irq = platform_get_irq(pdev, index);
- if (data->throttle_irq <= 0)
- return data->throttle_irq == -EPROBE_DEFER ? -EPROBE_DEFER : 0;
+ data->throttle_irq = platform_get_irq_optional(pdev, index);
+ if (data->throttle_irq == -ENXIO)
+ return 0;
+ if (data->throttle_irq < 0)
+ return data->throttle_irq;
data->cancel_throttle = false;
data->policy = policy;
@@ -392,14 +386,19 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
mutex_init(&data->throttle_lock);
INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
- snprintf(irq_name, sizeof(irq_name), "dcvsh-irq-%u", policy->cpu);
+ snprintf(data->irq_name, sizeof(data->irq_name), "dcvsh-irq-%u", policy->cpu);
ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq,
- IRQF_ONESHOT, irq_name, data);
+ IRQF_ONESHOT, data->irq_name, data);
if (ret) {
- dev_err(&pdev->dev, "Error registering %s: %d\n", irq_name, ret);
+ dev_err(&pdev->dev, "Error registering %s: %d\n", data->irq_name, ret);
return 0;
}
+ ret = irq_set_affinity_hint(data->throttle_irq, policy->cpus);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
+ data->irq_name, data->throttle_irq);
+
return 0;
}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 2e5670446991..c4922684f305 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -34,7 +34,7 @@
* 1) Energy break even point
* 2) Performance impact
* 3) Latency tolerance (from pmqos infrastructure)
- * These these three factors are treated independently.
+ * These three factors are treated independently.
*
* Energy break even point
* -----------------------
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 469e18547d06..2b496a53cbca 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -335,6 +335,7 @@ static struct attribute *cpuidle_state_default_attrs[] = {
&attr_default_status.attr,
NULL
};
+ATTRIBUTE_GROUPS(cpuidle_state_default);
struct cpuidle_state_kobj {
struct cpuidle_state *state;
@@ -448,7 +449,7 @@ static void cpuidle_state_sysfs_release(struct kobject *kobj)
static struct kobj_type ktype_state_cpuidle = {
.sysfs_ops = &cpuidle_state_sysfs_ops,
- .default_attrs = cpuidle_state_default_attrs,
+ .default_groups = cpuidle_state_default_groups,
.release = cpuidle_state_sysfs_release,
};
@@ -505,7 +506,7 @@ error_state:
}
/**
- * cpuidle_remove_driver_sysfs - removes the cpuidle states sysfs attributes
+ * cpuidle_remove_state_sysfs - removes the cpuidle states sysfs attributes
* @device: the target device
*/
static void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
@@ -591,10 +592,11 @@ static struct attribute *cpuidle_driver_default_attrs[] = {
&attr_driver_name.attr,
NULL
};
+ATTRIBUTE_GROUPS(cpuidle_driver_default);
static struct kobj_type ktype_driver_cpuidle = {
.sysfs_ops = &cpuidle_driver_sysfs_ops,
- .default_attrs = cpuidle_driver_default_attrs,
+ .default_groups = cpuidle_driver_default_groups,
.release = cpuidle_driver_sysfs_release,
};
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 51690e73153a..4f705674f94f 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -213,6 +213,18 @@ config CRYPTO_AES_S390
key sizes and XTS mode is hardware accelerated for 256 and
512 bit keys.
+config CRYPTO_CHACHA_S390
+ tristate "ChaCha20 stream cipher"
+ depends on S390
+ select CRYPTO_ALGAPI
+ select CRYPTO_SKCIPHER
+ select CRYPTO_CHACHA20
+ help
+ This is the s390 SIMD implementation of the ChaCha20 stream
+ cipher (RFC 7539).
+
+ It is available as of z13.
+
config S390_PRNG
tristate "Pseudo random number generator device driver"
depends on S390
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index 00194d1d9ae6..d8623c7e0d1d 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -106,6 +106,24 @@ static const struct ce_variant ce_a64_variant = {
.trng = CE_ID_NOTSUPP,
};
+static const struct ce_variant ce_d1_variant = {
+ .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
+ },
+ .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256,
+ CE_ALG_SHA384, CE_ALG_SHA512
+ },
+ .op_mode = { CE_OP_ECB, CE_OP_CBC
+ },
+ .ce_clks = {
+ { "bus", 0, 200000000 },
+ { "mod", 300000000, 0 },
+ { "ram", 0, 400000000 },
+ },
+ .esr = ESR_D1,
+ .prng = CE_ALG_PRNG,
+ .trng = CE_ALG_TRNG,
+};
+
static const struct ce_variant ce_r40_variant = {
.alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
},
@@ -192,6 +210,7 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
dev_err(ce->dev, "CE ERROR: keysram access error for AES\n");
break;
case ESR_A64:
+ case ESR_D1:
case ESR_H5:
case ESR_R40:
v >>= (flow * 4);
@@ -990,6 +1009,8 @@ static const struct of_device_id sun8i_ce_crypto_of_match_table[] = {
.data = &ce_h3_variant },
{ .compatible = "allwinner,sun8i-r40-crypto",
.data = &ce_r40_variant },
+ { .compatible = "allwinner,sun20i-d1-crypto",
+ .data = &ce_d1_variant },
{ .compatible = "allwinner,sun50i-a64-crypto",
.data = &ce_a64_variant },
{ .compatible = "allwinner,sun50i-h5-crypto",
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index cec781d5063c..624a5926f21f 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -94,6 +94,7 @@
#define ESR_R40 2
#define ESR_H5 3
#define ESR_H6 4
+#define ESR_D1 5
#define PRNG_DATA_SIZE (160 / 8)
#define PRNG_SEED_SIZE DIV_ROUND_UP(175, 8)
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 9391ccc03382..fe0558403191 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -960,6 +960,7 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
ctx = crypto_tfm_ctx(areq->tfm);
dd->areq = areq;
+ dd->ctx = ctx;
start_async = (areq != new_areq);
dd->is_async = start_async;
@@ -1274,7 +1275,6 @@ static int atmel_aes_init_tfm(struct crypto_skcipher *tfm)
crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
ctx->base.dd = dd;
- ctx->base.dd->ctx = &ctx->base;
ctx->base.start = atmel_aes_start;
return 0;
@@ -1291,7 +1291,6 @@ static int atmel_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
ctx->base.dd = dd;
- ctx->base.dd->ctx = &ctx->base;
ctx->base.start = atmel_aes_ctr_start;
return 0;
@@ -1783,7 +1782,6 @@ static int atmel_aes_gcm_init(struct crypto_aead *tfm)
crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
ctx->base.dd = dd;
- ctx->base.dd->ctx = &ctx->base;
ctx->base.start = atmel_aes_gcm_start;
return 0;
@@ -1927,7 +1925,6 @@ static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm)
crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx) +
crypto_skcipher_reqsize(ctx->fallback_tfm));
ctx->base.dd = dd;
- ctx->base.dd->ctx = &ctx->base;
ctx->base.start = atmel_aes_xts_start;
return 0;
@@ -2154,7 +2151,6 @@ static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm,
crypto_aead_set_reqsize(tfm, (sizeof(struct atmel_aes_authenc_reqctx) +
auth_reqsize));
ctx->base.dd = dd;
- ctx->base.dd->ctx = &ctx->base;
ctx->base.start = atmel_aes_authenc_start;
return 0;
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 8697ae53b063..d3d8bb0a6990 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1533,6 +1533,9 @@ static int aead_do_one_req(struct crypto_engine *engine, void *areq)
ret = caam_jr_enqueue(ctx->jrdev, desc, aead_crypt_done, req);
+ if (ret == -ENOSPC && engine->retry_support)
+ return ret;
+
if (ret != -EINPROGRESS) {
aead_unmap(ctx->jrdev, rctx->edesc, req);
kfree(rctx->edesc);
@@ -1762,6 +1765,9 @@ static int skcipher_do_one_req(struct crypto_engine *engine, void *areq)
ret = caam_jr_enqueue(ctx->jrdev, desc, skcipher_crypt_done, req);
+ if (ret == -ENOSPC && engine->retry_support)
+ return ret;
+
if (ret != -EINPROGRESS) {
skcipher_unmap(ctx->jrdev, rctx->edesc, req);
kfree(rctx->edesc);
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 8b8ed77d8715..6753f0e6e55d 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -5470,7 +5470,7 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
dpaa2_fd_set_flc(&fd, req->flc_dma);
- ppriv = this_cpu_ptr(priv->ppriv);
+ ppriv = raw_cpu_ptr(priv->ppriv);
for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
&fd);
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index e8a6d8bc43b5..36ef738e4a18 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -765,6 +765,9 @@ static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
+ if (ret == -ENOSPC && engine->retry_support)
+ return ret;
+
if (ret != -EINPROGRESS) {
ahash_unmap(jrdev, state->edesc, req, 0);
kfree(state->edesc);
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index bf6275ffc4aa..886727576710 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -380,6 +380,9 @@ static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
+ if (ret == -ENOSPC && engine->retry_support)
+ return ret;
+
if (ret != -EINPROGRESS) {
rsa_pub_unmap(jrdev, req_ctx->edesc, req);
rsa_io_unmap(jrdev, req_ctx->edesc, req);
diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c
index 112b12a32542..c246920e6f54 100644
--- a/drivers/crypto/cavium/cpt/cptvf_main.c
+++ b/drivers/crypto/cavium/cpt/cptvf_main.c
@@ -104,17 +104,14 @@ static int alloc_pending_queues(struct pending_qinfo *pqinfo, u32 qlen,
u32 nr_queues)
{
u32 i;
- size_t size;
int ret;
struct pending_queue *queue = NULL;
pqinfo->nr_queues = nr_queues;
pqinfo->qlen = qlen;
- size = (qlen * sizeof(struct pending_entry));
-
for_each_pending_queue(pqinfo, queue, i) {
- queue->head = kzalloc((size), GFP_KERNEL);
+ queue->head = kcalloc(qlen, sizeof(*queue->head), GFP_KERNEL);
if (!queue->head) {
ret = -ENOMEM;
goto pending_qfail;
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 9ce4b68e9c48..c531d13d971f 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -31,7 +31,7 @@
#define MAX_CCPS 32
/* Limit CCP use to a specifed number of queues per device */
-static unsigned int nqueues = 0;
+static unsigned int nqueues;
module_param(nqueues, uint, 0444);
MODULE_PARM_DESC(nqueues, "Number of queues per CCP (minimum 1; default: all available)");
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index e09925d86bf3..8fd774a10edc 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -22,6 +22,7 @@
#include <linux/firmware.h>
#include <linux/gfp.h>
#include <linux/cpufeature.h>
+#include <linux/fs.h>
#include <asm/smp.h>
@@ -43,6 +44,14 @@ static int psp_probe_timeout = 5;
module_param(psp_probe_timeout, int, 0644);
MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
+static char *init_ex_path;
+module_param(init_ex_path, charp, 0444);
+MODULE_PARM_DESC(init_ex_path, " Path for INIT_EX data; if set try INIT_EX");
+
+static bool psp_init_on_probe = true;
+module_param(psp_init_on_probe, bool, 0444);
+MODULE_PARM_DESC(psp_init_on_probe, " if true, the PSP will be initialized on module init. Else the PSP will be initialized on the first command requiring it");
+
MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */
@@ -58,6 +67,14 @@ static int psp_timeout;
#define SEV_ES_TMR_SIZE (1024 * 1024)
static void *sev_es_tmr;
+/* INIT_EX NV Storage:
+ * The NV Storage is a 32Kb area and must be 4Kb page aligned. Use the page
+ * allocator to allocate the memory, which will return aligned memory for the
+ * specified allocation order.
+ */
+#define NV_LENGTH (32 * 1024)
+static void *sev_init_ex_buffer;
+
static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
{
struct sev_device *sev = psp_master->sev_data;
@@ -107,6 +124,7 @@ static int sev_cmd_buffer_len(int cmd)
{
switch (cmd) {
case SEV_CMD_INIT: return sizeof(struct sev_data_init);
+ case SEV_CMD_INIT_EX: return sizeof(struct sev_data_init_ex);
case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status);
case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr);
case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import);
@@ -141,6 +159,112 @@ static int sev_cmd_buffer_len(int cmd)
return 0;
}
+static void *sev_fw_alloc(unsigned long len)
+{
+ struct page *page;
+
+ page = alloc_pages(GFP_KERNEL, get_order(len));
+ if (!page)
+ return NULL;
+
+ return page_address(page);
+}
+
+static int sev_read_init_ex_file(void)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct file *fp;
+ ssize_t nread;
+
+ lockdep_assert_held(&sev_cmd_mutex);
+
+ if (!sev_init_ex_buffer)
+ return -EOPNOTSUPP;
+
+ fp = filp_open(init_ex_path, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ int ret = PTR_ERR(fp);
+
+ dev_err(sev->dev,
+ "SEV: could not open %s for read, error %d\n",
+ init_ex_path, ret);
+ return ret;
+ }
+
+ nread = kernel_read(fp, sev_init_ex_buffer, NV_LENGTH, NULL);
+ if (nread != NV_LENGTH) {
+ dev_err(sev->dev,
+ "SEV: failed to read %u bytes to non volatile memory area, ret %ld\n",
+ NV_LENGTH, nread);
+ return -EIO;
+ }
+
+ dev_dbg(sev->dev, "SEV: read %ld bytes from NV file\n", nread);
+ filp_close(fp, NULL);
+
+ return 0;
+}
+
+static void sev_write_init_ex_file(void)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct file *fp;
+ loff_t offset = 0;
+ ssize_t nwrite;
+
+ lockdep_assert_held(&sev_cmd_mutex);
+
+ if (!sev_init_ex_buffer)
+ return;
+
+ fp = filp_open(init_ex_path, O_CREAT | O_WRONLY, 0600);
+ if (IS_ERR(fp)) {
+ dev_err(sev->dev,
+ "SEV: could not open file for write, error %ld\n",
+ PTR_ERR(fp));
+ return;
+ }
+
+ nwrite = kernel_write(fp, sev_init_ex_buffer, NV_LENGTH, &offset);
+ vfs_fsync(fp, 0);
+ filp_close(fp, NULL);
+
+ if (nwrite != NV_LENGTH) {
+ dev_err(sev->dev,
+ "SEV: failed to write %u bytes to non volatile memory area, ret %ld\n",
+ NV_LENGTH, nwrite);
+ return;
+ }
+
+ dev_dbg(sev->dev, "SEV: write successful to NV file\n");
+}
+
+static void sev_write_init_ex_file_if_required(int cmd_id)
+{
+ lockdep_assert_held(&sev_cmd_mutex);
+
+ if (!sev_init_ex_buffer)
+ return;
+
+ /*
+ * Only a few platform commands modify the SPI/NV area, but none of the
+ * non-platform commands do. Only INIT(_EX), PLATFORM_RESET, PEK_GEN,
+ * PEK_CERT_IMPORT, and PDH_GEN do.
+ */
+ switch (cmd_id) {
+ case SEV_CMD_FACTORY_RESET:
+ case SEV_CMD_INIT_EX:
+ case SEV_CMD_PDH_GEN:
+ case SEV_CMD_PEK_CERT_IMPORT:
+ case SEV_CMD_PEK_GEN:
+ break;
+ default:
+ return;
+ }
+
+ sev_write_init_ex_file();
+}
+
static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
{
struct psp_device *psp = psp_master;
@@ -210,6 +334,8 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
dev_dbg(sev->dev, "sev command %#x failed (%#010x)\n",
cmd, reg & PSP_CMDRESP_ERR_MASK);
ret = -EIO;
+ } else {
+ sev_write_init_ex_file_if_required(cmd);
}
print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
@@ -236,37 +362,85 @@ static int sev_do_cmd(int cmd, void *data, int *psp_ret)
return rc;
}
-static int __sev_platform_init_locked(int *error)
+static int __sev_init_locked(int *error)
{
- struct psp_device *psp = psp_master;
struct sev_data_init data;
- struct sev_device *sev;
- int rc = 0;
- if (!psp || !psp->sev_data)
- return -ENODEV;
+ memset(&data, 0, sizeof(data));
+ if (sev_es_tmr) {
+ /*
+ * Do not include the encryption mask on the physical
+ * address of the TMR (firmware should clear it anyway).
+ */
+ data.tmr_address = __pa(sev_es_tmr);
- sev = psp->sev_data;
+ data.flags |= SEV_INIT_FLAGS_SEV_ES;
+ data.tmr_len = SEV_ES_TMR_SIZE;
+ }
- if (sev->state == SEV_STATE_INIT)
- return 0;
+ return __sev_do_cmd_locked(SEV_CMD_INIT, &data, error);
+}
+
+static int __sev_init_ex_locked(int *error)
+{
+ struct sev_data_init_ex data;
+ int ret;
memset(&data, 0, sizeof(data));
- if (sev_es_tmr) {
- u64 tmr_pa;
+ data.length = sizeof(data);
+ data.nv_address = __psp_pa(sev_init_ex_buffer);
+ data.nv_len = NV_LENGTH;
+
+ ret = sev_read_init_ex_file();
+ if (ret)
+ return ret;
+ if (sev_es_tmr) {
/*
* Do not include the encryption mask on the physical
* address of the TMR (firmware should clear it anyway).
*/
- tmr_pa = __pa(sev_es_tmr);
+ data.tmr_address = __pa(sev_es_tmr);
data.flags |= SEV_INIT_FLAGS_SEV_ES;
- data.tmr_address = tmr_pa;
data.tmr_len = SEV_ES_TMR_SIZE;
}
- rc = __sev_do_cmd_locked(SEV_CMD_INIT, &data, error);
+ return __sev_do_cmd_locked(SEV_CMD_INIT_EX, &data, error);
+}
+
+static int __sev_platform_init_locked(int *error)
+{
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
+ int rc, psp_ret;
+ int (*init_function)(int *error);
+
+ if (!psp || !psp->sev_data)
+ return -ENODEV;
+
+ sev = psp->sev_data;
+
+ if (sev->state == SEV_STATE_INIT)
+ return 0;
+
+ init_function = sev_init_ex_buffer ? __sev_init_ex_locked :
+ __sev_init_locked;
+ rc = init_function(&psp_ret);
+ if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) {
+ /*
+ * Initialization command returned an integrity check failure
+ * status code, meaning that firmware load and validation of SEV
+ * related persistent data has failed. Retrying the
+ * initialization function should succeed by replacing the state
+ * with a reset state.
+ */
+ dev_dbg(sev->dev, "SEV: retrying INIT command");
+ rc = init_function(&psp_ret);
+ }
+ if (error)
+ *error = psp_ret;
+
if (rc)
return rc;
@@ -280,7 +454,10 @@ static int __sev_platform_init_locked(int *error)
dev_dbg(sev->dev, "SEV firmware initialized\n");
- return rc;
+ dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major,
+ sev->api_minor, sev->build);
+
+ return 0;
}
int sev_platform_init(int *error)
@@ -1034,6 +1211,12 @@ static void sev_firmware_shutdown(struct sev_device *sev)
get_order(SEV_ES_TMR_SIZE));
sev_es_tmr = NULL;
}
+
+ if (sev_init_ex_buffer) {
+ free_pages((unsigned long)sev_init_ex_buffer,
+ get_order(NV_LENGTH));
+ sev_init_ex_buffer = NULL;
+ }
}
void sev_dev_destroy(struct psp_device *psp)
@@ -1064,7 +1247,6 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
void sev_pci_init(void)
{
struct sev_device *sev = psp_master->sev_data;
- struct page *tmr_page;
int error, rc;
if (!sev)
@@ -1079,37 +1261,32 @@ void sev_pci_init(void)
sev_update_firmware(sev->dev) == 0)
sev_get_api_version();
+ /* If an init_ex_path is provided rely on INIT_EX for PSP initialization
+ * instead of INIT.
+ */
+ if (init_ex_path) {
+ sev_init_ex_buffer = sev_fw_alloc(NV_LENGTH);
+ if (!sev_init_ex_buffer) {
+ dev_err(sev->dev,
+ "SEV: INIT_EX NV memory allocation failed\n");
+ goto err;
+ }
+ }
+
/* Obtain the TMR memory area for SEV-ES use */
- tmr_page = alloc_pages(GFP_KERNEL, get_order(SEV_ES_TMR_SIZE));
- if (tmr_page) {
- sev_es_tmr = page_address(tmr_page);
- } else {
- sev_es_tmr = NULL;
+ sev_es_tmr = sev_fw_alloc(SEV_ES_TMR_SIZE);
+ if (!sev_es_tmr)
dev_warn(sev->dev,
"SEV: TMR allocation failed, SEV-ES support unavailable\n");
- }
-
- /* Initialize the platform */
- rc = sev_platform_init(&error);
- if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) {
- /*
- * INIT command returned an integrity check failure
- * status code, meaning that firmware load and
- * validation of SEV related persistent data has
- * failed and persistent state has been erased.
- * Retrying INIT command here should succeed.
- */
- dev_dbg(sev->dev, "SEV: retrying INIT command");
- rc = sev_platform_init(&error);
- }
- if (rc) {
- dev_err(sev->dev, "SEV: failed to INIT error %#x\n", error);
+ if (!psp_init_on_probe)
return;
- }
- dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major,
- sev->api_minor, sev->build);
+ /* Initialize the platform */
+ rc = sev_platform_init(&error);
+ if (rc)
+ dev_err(sev->dev, "SEV: failed to INIT error %#x, rc %d\n",
+ error, rc);
return;
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
index 33fb27745d52..887162df50f9 100644
--- a/drivers/crypto/ccree/cc_request_mgr.c
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -101,7 +101,6 @@ void cc_req_mgr_fini(struct cc_drvdata *drvdata)
dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
#ifdef COMP_IN_WQ
- flush_workqueue(req_mgr_h->workq);
destroy_workqueue(req_mgr_h->workq);
#else
/* Kill tasklet */
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index a032c192ef1d..97d54c1465c2 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -1177,13 +1177,10 @@ static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
static void hpre_key_to_big_end(u8 *data, int len)
{
int i, j;
- u8 tmp;
for (i = 0; i < len / 2; i++) {
j = len - i - 1;
- tmp = data[j];
- data[j] = data[i];
- data[i] = tmp;
+ swap(data[j], data[i]);
}
}
@@ -1865,7 +1862,7 @@ static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
*/
if (memcmp(ptr, p, ctx->key_sz) == 0) {
dev_err(dev, "gx is p!\n");
- return -EINVAL;
+ goto err;
} else if (memcmp(ptr, p, ctx->key_sz) > 0) {
hpre_curve25519_src_modulo_p(ptr);
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 65a641396c07..ebfab3e14499 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -103,7 +103,7 @@
#define HPRE_QM_PM_FLR BIT(11)
#define HPRE_QM_SRIOV_FLR BIT(12)
-#define HPRE_SHAPER_TYPE_RATE 128
+#define HPRE_SHAPER_TYPE_RATE 640
#define HPRE_VIA_MSI_DSM 1
#define HPRE_SQE_MASK_OFFSET 8
#define HPRE_SQE_MASK_LEN 24
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 52d6cca6262e..c5b84a5ea350 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -89,6 +89,10 @@
#define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
#define QM_AEQE_TYPE_SHIFT 17
+#define QM_AEQE_CQN_MASK GENMASK(15, 0)
+#define QM_CQ_OVERFLOW 0
+#define QM_EQ_OVERFLOW 1
+#define QM_CQE_ERROR 2
#define QM_DOORBELL_CMD_SQ 0
#define QM_DOORBELL_CMD_CQ 1
@@ -122,6 +126,8 @@
#define QM_CQC_VFT 0x1
#define QM_VFT_CFG 0x100060
#define QM_VFT_CFG_OP_ENABLE 0x100054
+#define QM_PM_CTRL 0x100148
+#define QM_IDLE_DISABLE BIT(9)
#define QM_VFT_CFG_DATA_L 0x100064
#define QM_VFT_CFG_DATA_H 0x100068
@@ -501,10 +507,30 @@ static const char * const qp_s[] = {
"none", "init", "start", "stop", "close",
};
-static const u32 typical_qos_val[QM_QOS_TYPICAL_NUM] = {100, 250, 500, 1000,
- 10000, 25000, 50000, 100000};
-static const u32 typical_qos_cbs_s[QM_QOS_TYPICAL_NUM] = {9, 10, 11, 12, 16,
- 17, 18, 19};
+struct qm_typical_qos_table {
+ u32 start;
+ u32 end;
+ u32 val;
+};
+
+/* the qos step is 100 */
+static struct qm_typical_qos_table shaper_cir_s[] = {
+ {100, 100, 4},
+ {200, 200, 3},
+ {300, 500, 2},
+ {600, 1000, 1},
+ {1100, 100000, 0},
+};
+
+static struct qm_typical_qos_table shaper_cbs_s[] = {
+ {100, 200, 9},
+ {300, 500, 11},
+ {600, 1000, 12},
+ {1100, 10000, 16},
+ {10100, 25000, 17},
+ {25100, 50000, 18},
+ {50100, 100000, 19}
+};
static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
{
@@ -585,6 +611,75 @@ static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
return avail;
}
+static u32 qm_get_hw_error_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
+}
+
+static u32 qm_get_dev_err_status(struct hisi_qm *qm)
+{
+ return qm->err_ini->get_dev_hw_err_status(qm);
+}
+
+/* Check if the error causes the master ooo block */
+static int qm_check_dev_error(struct hisi_qm *qm)
+{
+ u32 val, dev_val;
+
+ if (qm->fun_type == QM_HW_VF)
+ return 0;
+
+ val = qm_get_hw_error_status(qm);
+ dev_val = qm_get_dev_err_status(qm);
+
+ if (qm->ver < QM_HW_V3)
+ return (val & QM_ECC_MBIT) ||
+ (dev_val & qm->err_info.ecc_2bits_mask);
+
+ return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) ||
+ (dev_val & (~qm->err_info.dev_ce_mask));
+}
+
+static int qm_wait_reset_finish(struct hisi_qm *qm)
+{
+ int delay = 0;
+
+ /* All reset requests need to be queued for processing */
+ while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
+ msleep(++delay);
+ if (delay > QM_RESET_WAIT_TIMEOUT)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int qm_reset_prepare_ready(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+
+ /*
+ * PF and VF on host doesnot support resetting at the
+ * same time on Kunpeng920.
+ */
+ if (qm->ver < QM_HW_V3)
+ return qm_wait_reset_finish(pf_qm);
+
+ return qm_wait_reset_finish(qm);
+}
+
+static void qm_reset_bit_clear(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+
+ if (qm->ver < QM_HW_V3)
+ clear_bit(QM_RESETTING, &pf_qm->misc_ctl);
+
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
+}
+
static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd,
u64 base, u16 queue, bool op)
{
@@ -707,6 +802,19 @@ static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
qm->ops->qm_db(qm, qn, cmd, index, priority);
}
+static void qm_disable_clock_gate(struct hisi_qm *qm)
+{
+ u32 val;
+
+ /* if qm enables clock gating in Kunpeng930, qos will be inaccurate. */
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ val = readl(qm->io_base + QM_PM_CTRL);
+ val |= QM_IDLE_DISABLE;
+ writel(val, qm->io_base + QM_PM_CTRL);
+}
+
static int qm_dev_mem_reset(struct hisi_qm *qm)
{
u32 val;
@@ -899,24 +1007,71 @@ static void qm_set_qp_disable(struct hisi_qp *qp, int offset)
mb();
}
-static irqreturn_t qm_aeq_irq(int irq, void *data)
+static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id)
+{
+ struct hisi_qp *qp = &qm->qp_array[qp_id];
+
+ qm_set_qp_disable(qp, QM_RESET_STOP_TX_OFFSET);
+ hisi_qm_stop_qp(qp);
+ qm_set_qp_disable(qp, QM_RESET_STOP_RX_OFFSET);
+}
+
+static void qm_reset_function(struct hisi_qm *qm)
+{
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
+ struct device *dev = &qm->pdev->dev;
+ int ret;
+
+ if (qm_check_dev_error(pf_qm))
+ return;
+
+ ret = qm_reset_prepare_ready(qm);
+ if (ret) {
+ dev_err(dev, "reset function not ready\n");
+ return;
+ }
+
+ ret = hisi_qm_stop(qm, QM_FLR);
+ if (ret) {
+ dev_err(dev, "failed to stop qm when reset function\n");
+ goto clear_bit;
+ }
+
+ ret = hisi_qm_start(qm);
+ if (ret)
+ dev_err(dev, "failed to start qm when reset function\n");
+
+clear_bit:
+ qm_reset_bit_clear(qm);
+}
+
+static irqreturn_t qm_aeq_thread(int irq, void *data)
{
struct hisi_qm *qm = data;
struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
- u32 type;
-
- atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
- if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
- return IRQ_NONE;
+ u32 type, qp_id;
while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT;
- if (type < ARRAY_SIZE(qm_fifo_overflow))
- dev_err(&qm->pdev->dev, "%s overflow\n",
- qm_fifo_overflow[type]);
- else
+ qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK;
+
+ switch (type) {
+ case QM_EQ_OVERFLOW:
+ dev_err(&qm->pdev->dev, "eq overflow, reset function\n");
+ qm_reset_function(qm);
+ return IRQ_HANDLED;
+ case QM_CQ_OVERFLOW:
+ dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n",
+ qp_id);
+ fallthrough;
+ case QM_CQE_ERROR:
+ qm_disable_qp(qm, qp_id);
+ break;
+ default:
dev_err(&qm->pdev->dev, "unknown error type %u\n",
type);
+ break;
+ }
if (qm->status.aeq_head == QM_Q_DEPTH - 1) {
qm->status.aeqc_phase = !qm->status.aeqc_phase;
@@ -926,13 +1081,24 @@ static irqreturn_t qm_aeq_irq(int irq, void *data)
aeqe++;
qm->status.aeq_head++;
}
-
- qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
}
+ qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
+
return IRQ_HANDLED;
}
+static irqreturn_t qm_aeq_irq(int irq, void *data)
+{
+ struct hisi_qm *qm = data;
+
+ atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
+ if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
+ return IRQ_NONE;
+
+ return IRQ_WAKE_THREAD;
+}
+
static void qm_irq_unregister(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -988,12 +1154,14 @@ static void qm_init_prefetch(struct hisi_qm *qm)
}
/*
+ * acc_shaper_para_calc() Get the IR value by the qos formula, the return value
+ * is the expected qos calculated.
* the formula:
* IR = X Mbps if ir = 1 means IR = 100 Mbps, if ir = 10000 means = 10Gbps
*
- * IR_b * (2 ^ IR_u) * 8
- * IR(Mbps) * 10 ^ -3 = -------------------------
- * Tick * (2 ^ IR_s)
+ * IR_b * (2 ^ IR_u) * 8000
+ * IR(Mbps) = -------------------------
+ * Tick * (2 ^ IR_s)
*/
static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s)
{
@@ -1003,17 +1171,28 @@ static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s)
static u32 acc_shaper_calc_cbs_s(u32 ir)
{
+ int table_size = ARRAY_SIZE(shaper_cbs_s);
int i;
- if (ir < typical_qos_val[0])
- return QM_SHAPER_MIN_CBS_S;
+ for (i = 0; i < table_size; i++) {
+ if (ir >= shaper_cbs_s[i].start && ir <= shaper_cbs_s[i].end)
+ return shaper_cbs_s[i].val;
+ }
+
+ return QM_SHAPER_MIN_CBS_S;
+}
+
+static u32 acc_shaper_calc_cir_s(u32 ir)
+{
+ int table_size = ARRAY_SIZE(shaper_cir_s);
+ int i;
- for (i = 1; i < QM_QOS_TYPICAL_NUM; i++) {
- if (ir >= typical_qos_val[i - 1] && ir < typical_qos_val[i])
- return typical_qos_cbs_s[i - 1];
+ for (i = 0; i < table_size; i++) {
+ if (ir >= shaper_cir_s[i].start && ir <= shaper_cir_s[i].end)
+ return shaper_cir_s[i].val;
}
- return typical_qos_cbs_s[QM_QOS_TYPICAL_NUM - 1];
+ return 0;
}
static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor)
@@ -1022,25 +1201,18 @@ static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor)
u32 error_rate;
factor->cbs_s = acc_shaper_calc_cbs_s(ir);
+ cir_s = acc_shaper_calc_cir_s(ir);
for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) {
for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) {
- for (cir_s = 0; cir_s <= QM_QOS_MAX_CIR_S; cir_s++) {
- /** the formula is changed to:
- * IR_b * (2 ^ IR_u) * DIVISOR_CLK
- * IR(Mbps) = -------------------------
- * 768 * (2 ^ IR_s)
- */
- ir_calc = acc_shaper_para_calc(cir_b, cir_u,
- cir_s);
- error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
- if (error_rate <= QM_QOS_MIN_ERROR_RATE) {
- factor->cir_b = cir_b;
- factor->cir_u = cir_u;
- factor->cir_s = cir_s;
-
- return 0;
- }
+ ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s);
+
+ error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
+ if (error_rate <= QM_QOS_MIN_ERROR_RATE) {
+ factor->cir_b = cir_b;
+ factor->cir_u = cir_u;
+ factor->cir_s = cir_s;
+ return 0;
}
}
}
@@ -1126,10 +1298,10 @@ static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num)
{
+ u32 qos = qm->factor[fun_num].func_qos;
int ret, i;
- qm->factor[fun_num].func_qos = QM_QOS_MAX_VAL;
- ret = qm_get_shaper_para(QM_QOS_MAX_VAL * QM_QOS_RATE, &qm->factor[fun_num]);
+ ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]);
if (ret) {
dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n");
return ret;
@@ -2082,35 +2254,6 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
return ACC_ERR_RECOVERED;
}
-static u32 qm_get_hw_error_status(struct hisi_qm *qm)
-{
- return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
-}
-
-static u32 qm_get_dev_err_status(struct hisi_qm *qm)
-{
- return qm->err_ini->get_dev_hw_err_status(qm);
-}
-
-/* Check if the error causes the master ooo block */
-static int qm_check_dev_error(struct hisi_qm *qm)
-{
- u32 val, dev_val;
-
- if (qm->fun_type == QM_HW_VF)
- return 0;
-
- val = qm_get_hw_error_status(qm);
- dev_val = qm_get_dev_err_status(qm);
-
- if (qm->ver < QM_HW_V3)
- return (val & QM_ECC_MBIT) ||
- (dev_val & qm->err_info.ecc_2bits_mask);
-
- return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) ||
- (dev_val & (~qm->err_info.dev_ce_mask));
-}
-
static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num)
{
struct qm_mailbox mailbox;
@@ -3399,6 +3542,7 @@ void hisi_qm_uninit(struct hisi_qm *qm)
dma_free_coherent(dev, qm->qdma.size,
qm->qdma.va, qm->qdma.dma);
}
+ up_write(&qm->qps_lock);
qm_irq_unregister(qm);
hisi_qm_pci_uninit(qm);
@@ -3406,8 +3550,6 @@ void hisi_qm_uninit(struct hisi_qm *qm)
uacce_remove(qm->uacce);
qm->uacce = NULL;
}
-
- up_write(&qm->qps_lock);
}
EXPORT_SYMBOL_GPL(hisi_qm_uninit);
@@ -3473,6 +3615,22 @@ static void qm_init_eq_aeq_status(struct hisi_qm *qm)
status->aeqc_phase = true;
}
+static void qm_enable_eq_aeq_interrupts(struct hisi_qm *qm)
+{
+ /* Clear eq/aeq interrupt source */
+ qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
+ qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
+
+ writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
+ writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
+}
+
+static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm)
+{
+ writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
+ writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
+}
+
static int qm_eq_ctx_cfg(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
@@ -3556,10 +3714,6 @@ static int __hisi_qm_start(struct hisi_qm *qm)
WARN_ON(!qm->qdma.va);
if (qm->fun_type == QM_HW_PF) {
- ret = qm_dev_mem_reset(qm);
- if (ret)
- return ret;
-
ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
if (ret)
return ret;
@@ -3578,9 +3732,7 @@ static int __hisi_qm_start(struct hisi_qm *qm)
return ret;
qm_init_prefetch(qm);
-
- writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
- writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
+ qm_enable_eq_aeq_interrupts(qm);
return 0;
}
@@ -3728,10 +3880,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
}
- /* Mask eq and aeq irq */
- writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
- writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
-
+ qm_disable_eq_aeq_interrupts(qm);
if (qm->fun_type == QM_HW_PF) {
ret = hisi_qm_set_vft(qm, 0, 0, 0);
if (ret < 0) {
@@ -4231,66 +4380,69 @@ static ssize_t qm_qos_value_init(const char *buf, unsigned long *val)
return 0;
}
+static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
+ unsigned long *val,
+ unsigned int *fun_index)
+{
+ char tbuf_bdf[QM_DBG_READ_LEN] = {0};
+ char val_buf[QM_QOS_VAL_MAX_LEN] = {0};
+ u32 tmp1, device, function;
+ int ret, bus;
+
+ ret = sscanf(buf, "%s %s", tbuf_bdf, val_buf);
+ if (ret != QM_QOS_PARAM_NUM)
+ return -EINVAL;
+
+ ret = qm_qos_value_init(val_buf, val);
+ if (ret || *val == 0 || *val > QM_QOS_MAX_VAL) {
+ pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n");
+ return -EINVAL;
+ }
+
+ ret = sscanf(tbuf_bdf, "%u:%x:%u.%u", &tmp1, &bus, &device, &function);
+ if (ret != QM_QOS_BDF_PARAM_NUM) {
+ pci_err(qm->pdev, "input pci bdf value is error!\n");
+ return -EINVAL;
+ }
+
+ *fun_index = PCI_DEVFN(device, function);
+
+ return 0;
+}
+
static ssize_t qm_algqos_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct hisi_qm *qm = filp->private_data;
char tbuf[QM_DBG_READ_LEN];
- int tmp1, bus, device, function;
- char tbuf_bdf[QM_DBG_READ_LEN] = {0};
- char val_buf[QM_QOS_VAL_MAX_LEN] = {0};
unsigned int fun_index;
- unsigned long val = 0;
+ unsigned long val;
int len, ret;
if (qm->fun_type == QM_HW_VF)
return -EINVAL;
- /* Mailbox and reset cannot be operated at the same time */
- if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
- pci_err(qm->pdev, "dev resetting, write alg qos failed!\n");
- return -EAGAIN;
- }
-
- if (*pos != 0) {
- ret = 0;
- goto err_get_status;
- }
+ if (*pos != 0)
+ return 0;
- if (count >= QM_DBG_READ_LEN) {
- ret = -ENOSPC;
- goto err_get_status;
- }
+ if (count >= QM_DBG_READ_LEN)
+ return -ENOSPC;
len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count);
- if (len < 0) {
- ret = len;
- goto err_get_status;
- }
+ if (len < 0)
+ return len;
tbuf[len] = '\0';
- ret = sscanf(tbuf, "%s %s", tbuf_bdf, val_buf);
- if (ret != QM_QOS_PARAM_NUM) {
- ret = -EINVAL;
- goto err_get_status;
- }
-
- ret = qm_qos_value_init(val_buf, &val);
- if (val == 0 || val > QM_QOS_MAX_VAL || ret) {
- pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n");
- ret = -EINVAL;
- goto err_get_status;
- }
+ ret = qm_get_qos_value(qm, tbuf, &val, &fun_index);
+ if (ret)
+ return ret;
- ret = sscanf(tbuf_bdf, "%d:%x:%d.%d", &tmp1, &bus, &device, &function);
- if (ret != QM_QOS_BDF_PARAM_NUM) {
- pci_err(qm->pdev, "input pci bdf value is error!\n");
- ret = -EINVAL;
- goto err_get_status;
+ /* Mailbox and reset cannot be operated at the same time */
+ if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
+ pci_err(qm->pdev, "dev resetting, write alg qos failed!\n");
+ return -EAGAIN;
}
- fun_index = device * 8 + function;
-
ret = qm_pm_get_sync(qm);
if (ret) {
ret = -EINVAL;
@@ -4304,6 +4456,8 @@ static ssize_t qm_algqos_write(struct file *filp, const char __user *buf,
goto err_put_sync;
}
+ pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n",
+ fun_index, val);
ret = count;
err_put_sync:
@@ -4728,46 +4882,6 @@ static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
return ret;
}
-static int qm_wait_reset_finish(struct hisi_qm *qm)
-{
- int delay = 0;
-
- /* All reset requests need to be queued for processing */
- while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
- msleep(++delay);
- if (delay > QM_RESET_WAIT_TIMEOUT)
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int qm_reset_prepare_ready(struct hisi_qm *qm)
-{
- struct pci_dev *pdev = qm->pdev;
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
-
- /*
- * PF and VF on host doesnot support resetting at the
- * same time on Kunpeng920.
- */
- if (qm->ver < QM_HW_V3)
- return qm_wait_reset_finish(pf_qm);
-
- return qm_wait_reset_finish(qm);
-}
-
-static void qm_reset_bit_clear(struct hisi_qm *qm)
-{
- struct pci_dev *pdev = qm->pdev;
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
-
- if (qm->ver < QM_HW_V3)
- clear_bit(QM_RESETTING, &pf_qm->misc_ctl);
-
- clear_bit(QM_RESETTING, &qm->misc_ctl);
-}
-
static int qm_controller_reset_prepare(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -5053,6 +5167,12 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
if (qm->err_ini->open_axi_master_ooo)
qm->err_ini->open_axi_master_ooo(qm);
+ ret = qm_dev_mem_reset(qm);
+ if (ret) {
+ pci_err(pdev, "failed to reset device memory\n");
+ return ret;
+ }
+
ret = qm_restart(qm);
if (ret) {
pci_err(pdev, "Failed to start QM!\n");
@@ -5267,8 +5387,10 @@ static int qm_irq_register(struct hisi_qm *qm)
return ret;
if (qm->ver > QM_HW_V1) {
- ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR),
- qm_aeq_irq, 0, qm->dev_name, qm);
+ ret = request_threaded_irq(pci_irq_vector(pdev,
+ QM_AEQ_EVENT_IRQ_VECTOR),
+ qm_aeq_irq, qm_aeq_thread,
+ 0, qm->dev_name, qm);
if (ret)
goto err_aeq_irq;
@@ -5750,13 +5872,15 @@ err_init_qp_mem:
static int hisi_qm_memory_init(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- int ret, total_vfs;
+ int ret, total_func, i;
size_t off = 0;
- total_vfs = pci_sriov_get_totalvfs(qm->pdev);
- qm->factor = kcalloc(total_vfs + 1, sizeof(struct qm_shaper_factor), GFP_KERNEL);
+ total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
+ qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL);
if (!qm->factor)
return -ENOMEM;
+ for (i = 0; i < total_func; i++)
+ qm->factor[i].func_qos = QM_QOS_MAX_VAL;
#define QM_INIT_BUF(qm, type, num) do { \
(qm)->type = ((qm)->qdma.va + (off)); \
@@ -5825,6 +5949,15 @@ int hisi_qm_init(struct hisi_qm *qm)
goto err_irq_register;
}
+ if (qm->fun_type == QM_HW_PF) {
+ qm_disable_clock_gate(qm);
+ ret = qm_dev_mem_reset(qm);
+ if (ret) {
+ dev_err(dev, "failed to reset device memory\n");
+ goto err_irq_register;
+ }
+ }
+
if (qm->mode == UACCE_MODE_SVA) {
ret = qm_alloc_uacce(qm);
if (ret < 0)
@@ -5982,8 +6115,12 @@ static int qm_rebuild_for_resume(struct hisi_qm *qm)
qm_cmd_init(qm);
hisi_qm_dev_err_init(qm);
+ qm_disable_clock_gate(qm);
+ ret = qm_dev_mem_reset(qm);
+ if (ret)
+ pci_err(pdev, "failed to reset device memory\n");
- return 0;
+ return ret;
}
/**
@@ -6038,7 +6175,7 @@ int hisi_qm_resume(struct device *dev)
if (ret)
pci_err(pdev, "failed to start qm(%d)\n", ret);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_resume);
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 90551bf38b52..26d3ab1d308b 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -105,7 +105,7 @@
#define SEC_SQE_MASK_OFFSET 64
#define SEC_SQE_MASK_LEN 48
-#define SEC_SHAPER_TYPE_RATE 128
+#define SEC_SHAPER_TYPE_RATE 400
struct sec_hw_error {
u32 int_msk;
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 873971ef9aee..678f8b58ec42 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -103,8 +103,8 @@
#define HZIP_PREFETCH_ENABLE (~(BIT(26) | BIT(17) | BIT(0)))
#define HZIP_SVA_PREFETCH_DISABLE BIT(26)
#define HZIP_SVA_DISABLE_READY (BIT(26) | BIT(30))
-#define HZIP_SHAPER_RATE_COMPRESS 252
-#define HZIP_SHAPER_RATE_DECOMPRESS 229
+#define HZIP_SHAPER_RATE_COMPRESS 750
+#define HZIP_SHAPER_RATE_DECOMPRESS 140
#define HZIP_DELAY_1_US 1
#define HZIP_POLL_TIMEOUT_US 1000
@@ -364,15 +364,16 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
/* user domain configurations */
writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63);
- writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
if (qm->use_sva && qm->ver == QM_HW_V2) {
writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63);
writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63);
+ writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_SGL_RUSER_32_63);
} else {
writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63);
writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
+ writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
}
/* let's open all compression/decompression cores */
@@ -829,7 +830,10 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->pdev = pdev;
qm->ver = pdev->revision;
- qm->algs = "zlib\ngzip";
+ if (pdev->revision >= QM_HW_V3)
+ qm->algs = "zlib\ngzip\ndeflate\nlz77_zstd";
+ else
+ qm->algs = "zlib\ngzip";
qm->mode = uacce_mode;
qm->sqe_size = HZIP_SQE_SIZE;
qm->dev_name = hisi_zip_name;
diff --git a/drivers/crypto/keembay/keembay-ocs-ecc.c b/drivers/crypto/keembay/keembay-ocs-ecc.c
index 679e6ae295e0..5d0785d3f1b5 100644
--- a/drivers/crypto/keembay/keembay-ocs-ecc.c
+++ b/drivers/crypto/keembay/keembay-ocs-ecc.c
@@ -930,6 +930,7 @@ static int kmb_ocs_ecc_probe(struct platform_device *pdev)
ecc_dev->engine = crypto_engine_alloc_init(dev, 1);
if (!ecc_dev->engine) {
dev_err(dev, "Could not allocate crypto engine\n");
+ rc = -ENOMEM;
goto list_del;
}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
index c076d0b3ad5f..b681bd2dc6ad 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
@@ -94,15 +94,13 @@ static int alloc_pending_queues(struct otx_cpt_pending_qinfo *pqinfo, u32 qlen,
u32 num_queues)
{
struct otx_cpt_pending_queue *queue = NULL;
- size_t size;
int ret;
u32 i;
pqinfo->num_queues = num_queues;
- size = (qlen * sizeof(struct otx_cpt_pending_entry));
for_each_pending_queue(pqinfo, queue, i) {
- queue->head = kzalloc((size), GFP_KERNEL);
+ queue->head = kcalloc(qlen, sizeof(*queue->head), GFP_KERNEL);
if (!queue->head) {
ret = -ENOMEM;
goto pending_qfail;
diff --git a/drivers/crypto/marvell/octeontx2/Makefile b/drivers/crypto/marvell/octeontx2/Makefile
index c242d22008c3..965297e96954 100644
--- a/drivers/crypto/marvell/octeontx2/Makefile
+++ b/drivers/crypto/marvell/octeontx2/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += rvu_cptpf.o rvu_cptvf.o
rvu_cptpf-objs := otx2_cptpf_main.o otx2_cptpf_mbox.o \
otx2_cpt_mbox_common.o otx2_cptpf_ucode.o otx2_cptlf.o \
- cn10k_cpt.o
+ cn10k_cpt.o otx2_cpt_devlink.o
rvu_cptvf-objs := otx2_cptvf_main.o otx2_cptvf_mbox.o otx2_cptlf.o \
otx2_cpt_mbox_common.o otx2_cptvf_reqmgr.o \
otx2_cptvf_algs.o cn10k_cpt.o
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index c5445b05f53c..fb56824cb0a6 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/crypto.h>
+#include <net/devlink.h>
#include "otx2_cpt_hw_types.h"
#include "rvu.h"
#include "mbox.h"
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
new file mode 100644
index 000000000000..bb02e0db3615
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Marvell. */
+
+#include "otx2_cpt_devlink.h"
+
+static int otx2_cpt_dl_egrp_create(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
+ struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
+
+ return otx2_cpt_dl_custom_egrp_create(cptpf, ctx);
+}
+
+static int otx2_cpt_dl_egrp_delete(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
+ struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
+
+ return otx2_cpt_dl_custom_egrp_delete(cptpf, ctx);
+}
+
+static int otx2_cpt_dl_uc_info(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
+ struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
+
+ otx2_cpt_print_uc_dbg_info(cptpf);
+
+ return 0;
+}
+
+enum otx2_cpt_dl_param_id {
+ OTX2_CPT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ OTX2_CPT_DEVLINK_PARAM_ID_EGRP_CREATE,
+ OTX2_CPT_DEVLINK_PARAM_ID_EGRP_DELETE,
+};
+
+static const struct devlink_param otx2_cpt_dl_params[] = {
+ DEVLINK_PARAM_DRIVER(OTX2_CPT_DEVLINK_PARAM_ID_EGRP_CREATE,
+ "egrp_create", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ otx2_cpt_dl_uc_info, otx2_cpt_dl_egrp_create,
+ NULL),
+ DEVLINK_PARAM_DRIVER(OTX2_CPT_DEVLINK_PARAM_ID_EGRP_DELETE,
+ "egrp_delete", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ otx2_cpt_dl_uc_info, otx2_cpt_dl_egrp_delete,
+ NULL),
+};
+
+static int otx2_cpt_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ return devlink_info_driver_name_put(req, "rvu_cptpf");
+}
+
+static const struct devlink_ops otx2_cpt_devlink_ops = {
+ .info_get = otx2_cpt_devlink_info_get,
+};
+
+int otx2_cpt_register_dl(struct otx2_cptpf_dev *cptpf)
+{
+ struct device *dev = &cptpf->pdev->dev;
+ struct otx2_cpt_devlink *cpt_dl;
+ struct devlink *dl;
+ int ret;
+
+ dl = devlink_alloc(&otx2_cpt_devlink_ops,
+ sizeof(struct otx2_cpt_devlink), dev);
+ if (!dl) {
+ dev_warn(dev, "devlink_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ cpt_dl = devlink_priv(dl);
+ cpt_dl->dl = dl;
+ cpt_dl->cptpf = cptpf;
+ cptpf->dl = dl;
+ ret = devlink_params_register(dl, otx2_cpt_dl_params,
+ ARRAY_SIZE(otx2_cpt_dl_params));
+ if (ret) {
+ dev_err(dev, "devlink params register failed with error %d",
+ ret);
+ devlink_free(dl);
+ return ret;
+ }
+
+ devlink_register(dl);
+
+ return 0;
+}
+
+void otx2_cpt_unregister_dl(struct otx2_cptpf_dev *cptpf)
+{
+ struct devlink *dl = cptpf->dl;
+
+ if (!dl)
+ return;
+
+ devlink_unregister(dl);
+ devlink_params_unregister(dl, otx2_cpt_dl_params,
+ ARRAY_SIZE(otx2_cpt_dl_params));
+ devlink_free(dl);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.h
new file mode 100644
index 000000000000..8b7d88c5d519
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2021 Marvell.
+ */
+
+#ifndef __OTX2_CPT_DEVLINK_H
+#define __OTX2_CPT_DEVLINK_H
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptpf.h"
+
+struct otx2_cpt_devlink {
+ struct devlink *dl;
+ struct otx2_cptpf_dev *cptpf;
+};
+
+/* Devlink APIs */
+int otx2_cpt_register_dl(struct otx2_cptpf_dev *cptpf);
+void otx2_cpt_unregister_dl(struct otx2_cptpf_dev *cptpf);
+
+#endif /* __OTX2_CPT_DEVLINK_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
index 5ebba86c65d9..05b2d9c650e1 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
@@ -53,6 +53,9 @@ struct otx2_cptpf_dev {
u8 enabled_vfs; /* Number of enabled VFs */
u8 kvf_limits; /* Kernel crypto limits */
bool has_cpt1;
+
+ /* Devlink */
+ struct devlink *dl;
};
irqreturn_t otx2_cptpf_afpf_mbox_intr(int irq, void *arg);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index 146a55ac4b9b..1720a5bb7016 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -4,6 +4,7 @@
#include <linux/firmware.h>
#include "otx2_cpt_hw_types.h"
#include "otx2_cpt_common.h"
+#include "otx2_cpt_devlink.h"
#include "otx2_cptpf_ucode.h"
#include "otx2_cptpf.h"
#include "cn10k_cpt.h"
@@ -494,12 +495,11 @@ static ssize_t kvf_limits_store(struct device *dev,
{
struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
int lfs_num;
+ int ret;
- if (kstrtoint(buf, 0, &lfs_num)) {
- dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
- lfs_num, num_online_cpus());
- return -EINVAL;
- }
+ ret = kstrtoint(buf, 0, &lfs_num);
+ if (ret)
+ return ret;
if (lfs_num < 1 || lfs_num > num_online_cpus()) {
dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
lfs_num, num_online_cpus());
@@ -767,8 +767,15 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
if (err)
goto cleanup_eng_grps;
+
+ err = otx2_cpt_register_dl(cptpf);
+ if (err)
+ goto sysfs_grp_del;
+
return 0;
+sysfs_grp_del:
+ sysfs_remove_group(&dev->kobj, &cptpf_sysfs_group);
cleanup_eng_grps:
otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
unregister_intr:
@@ -788,6 +795,7 @@ static void otx2_cptpf_remove(struct pci_dev *pdev)
return;
cptpf_sriov_disable(pdev);
+ otx2_cpt_unregister_dl(cptpf);
/* Delete sysfs entry created for kernel VF limits */
sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
/* Cleanup engine groups */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index dff34b3ec09e..4c8ebdf671ca 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -29,7 +29,8 @@ static struct otx2_cpt_bitmap get_cores_bmap(struct device *dev,
bool found = false;
int i;
- if (eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {
+ if (eng_grp->g->engs_num < 0 ||
+ eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {
dev_err(dev, "unsupported number of engines %d on octeontx2\n",
eng_grp->g->engs_num);
return bmap;
@@ -1110,18 +1111,19 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
struct pci_dev *pdev = cptpf->pdev;
struct fw_info_t fw_info;
- int ret;
+ int ret = 0;
+ mutex_lock(&eng_grps->lock);
/*
* We don't create engine groups if it was already
* made (when user enabled VFs for the first time)
*/
if (eng_grps->is_grps_created)
- return 0;
+ goto unlock;
ret = cpt_ucode_load_fw(pdev, &fw_info);
if (ret)
- return ret;
+ goto unlock;
/*
* Create engine group with SE engines for kernel
@@ -1186,7 +1188,7 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
cpt_ucode_release_fw(&fw_info);
if (is_dev_otx2(pdev))
- return 0;
+ goto unlock;
/*
* Configure engine group mask to allow context prefetching
* for the groups.
@@ -1201,12 +1203,15 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
*/
otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTX_FLUSH_TIMER,
CTX_FLUSH_TIMER_CNT, BLKADDR_CPT0);
+ mutex_unlock(&eng_grps->lock);
return 0;
delete_eng_grp:
delete_engine_grps(pdev, eng_grps);
release_fw:
cpt_ucode_release_fw(&fw_info);
+unlock:
+ mutex_unlock(&eng_grps->lock);
return ret;
}
@@ -1286,6 +1291,7 @@ void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
struct otx2_cpt_eng_grp_info *grp;
int i, j;
+ mutex_lock(&eng_grps->lock);
delete_engine_grps(pdev, eng_grps);
/* Release memory */
for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
@@ -1295,6 +1301,7 @@ void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
grp->engs[j].bmap = NULL;
}
}
+ mutex_unlock(&eng_grps->lock);
}
int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
@@ -1303,6 +1310,7 @@ int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
struct otx2_cpt_eng_grp_info *grp;
int i, j, ret;
+ mutex_init(&eng_grps->lock);
eng_grps->obj = pci_get_drvdata(pdev);
eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
eng_grps->avail.ie_cnt = eng_grps->avail.max_ie_cnt;
@@ -1349,11 +1357,14 @@ static int create_eng_caps_discovery_grps(struct pci_dev *pdev,
struct fw_info_t fw_info;
int ret;
+ mutex_lock(&eng_grps->lock);
ret = cpt_ucode_load_fw(pdev, &fw_info);
- if (ret)
+ if (ret) {
+ mutex_unlock(&eng_grps->lock);
return ret;
+ }
- uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
if (uc_info[0] == NULL) {
dev_err(&pdev->dev, "Unable to find firmware for AE\n");
ret = -EINVAL;
@@ -1396,12 +1407,14 @@ static int create_eng_caps_discovery_grps(struct pci_dev *pdev,
goto delete_eng_grp;
cpt_ucode_release_fw(&fw_info);
+ mutex_unlock(&eng_grps->lock);
return 0;
delete_eng_grp:
delete_engine_grps(pdev, eng_grps);
release_fw:
cpt_ucode_release_fw(&fw_info);
+ mutex_unlock(&eng_grps->lock);
return ret;
}
@@ -1501,3 +1514,291 @@ delete_grps:
return ret;
}
+
+int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { { 0 } };
+ struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {};
+ struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
+ char *ucode_filename[OTX2_CPT_MAX_ETYPES_PER_GRP];
+ char tmp_buf[OTX2_CPT_NAME_LENGTH] = { 0 };
+ struct device *dev = &cptpf->pdev->dev;
+ char *start, *val, *err_msg, *tmp;
+ int grp_idx = 0, ret = -EINVAL;
+ bool has_se, has_ie, has_ae;
+ struct fw_info_t fw_info;
+ int ucode_idx = 0;
+
+ if (!eng_grps->is_grps_created) {
+ dev_err(dev, "Not allowed before creating the default groups\n");
+ return -EINVAL;
+ }
+ err_msg = "Invalid engine group format";
+ strscpy(tmp_buf, ctx->val.vstr, strlen(ctx->val.vstr) + 1);
+ start = tmp_buf;
+
+ has_se = has_ie = has_ae = false;
+
+ for (;;) {
+ val = strsep(&start, ";");
+ if (!val)
+ break;
+ val = strim(val);
+ if (!*val)
+ continue;
+
+ if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {
+ if (has_se || ucode_idx)
+ goto err_print;
+ tmp = strim(strsep(&val, ":"));
+ if (!val)
+ goto err_print;
+ if (strlen(tmp) != 2)
+ goto err_print;
+ if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
+ goto err_print;
+ engs[grp_idx++].type = OTX2_CPT_SE_TYPES;
+ has_se = true;
+ } else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {
+ if (has_ae || ucode_idx)
+ goto err_print;
+ tmp = strim(strsep(&val, ":"));
+ if (!val)
+ goto err_print;
+ if (strlen(tmp) != 2)
+ goto err_print;
+ if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
+ goto err_print;
+ engs[grp_idx++].type = OTX2_CPT_AE_TYPES;
+ has_ae = true;
+ } else if (!strncasecmp(val, "ie", 2) && strchr(val, ':')) {
+ if (has_ie || ucode_idx)
+ goto err_print;
+ tmp = strim(strsep(&val, ":"));
+ if (!val)
+ goto err_print;
+ if (strlen(tmp) != 2)
+ goto err_print;
+ if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
+ goto err_print;
+ engs[grp_idx++].type = OTX2_CPT_IE_TYPES;
+ has_ie = true;
+ } else {
+ if (ucode_idx > 1)
+ goto err_print;
+ if (!strlen(val))
+ goto err_print;
+ if (strnstr(val, " ", strlen(val)))
+ goto err_print;
+ ucode_filename[ucode_idx++] = val;
+ }
+ }
+
+ /* Validate input parameters */
+ if (!(grp_idx && ucode_idx))
+ goto err_print;
+
+ if (ucode_idx > 1 && grp_idx < 2)
+ goto err_print;
+
+ if (grp_idx > OTX2_CPT_MAX_ETYPES_PER_GRP) {
+ err_msg = "Error max 2 engine types can be attached";
+ goto err_print;
+ }
+
+ if (grp_idx > 1) {
+ if ((engs[0].type + engs[1].type) !=
+ (OTX2_CPT_SE_TYPES + OTX2_CPT_IE_TYPES)) {
+ err_msg = "Only combination of SE+IE engines is allowed";
+ goto err_print;
+ }
+ /* Keep SE engines at zero index */
+ if (engs[1].type == OTX2_CPT_SE_TYPES)
+ swap(engs[0], engs[1]);
+ }
+ mutex_lock(&eng_grps->lock);
+
+ if (cptpf->enabled_vfs) {
+ dev_err(dev, "Disable VFs before modifying engine groups\n");
+ ret = -EACCES;
+ goto err_unlock;
+ }
+ INIT_LIST_HEAD(&fw_info.ucodes);
+ ret = load_fw(dev, &fw_info, ucode_filename[0]);
+ if (ret) {
+ dev_err(dev, "Unable to load firmware %s\n", ucode_filename[0]);
+ goto err_unlock;
+ }
+ if (ucode_idx > 1) {
+ ret = load_fw(dev, &fw_info, ucode_filename[1]);
+ if (ret) {
+ dev_err(dev, "Unable to load firmware %s\n",
+ ucode_filename[1]);
+ goto release_fw;
+ }
+ }
+ uc_info[0] = get_ucode(&fw_info, engs[0].type);
+ if (uc_info[0] == NULL) {
+ dev_err(dev, "Unable to find firmware for %s\n",
+ get_eng_type_str(engs[0].type));
+ ret = -EINVAL;
+ goto release_fw;
+ }
+ if (ucode_idx > 1) {
+ uc_info[1] = get_ucode(&fw_info, engs[1].type);
+ if (uc_info[1] == NULL) {
+ dev_err(dev, "Unable to find firmware for %s\n",
+ get_eng_type_str(engs[1].type));
+ ret = -EINVAL;
+ goto release_fw;
+ }
+ }
+ ret = create_engine_group(dev, eng_grps, engs, grp_idx,
+ (void **)uc_info, 1);
+
+release_fw:
+ cpt_ucode_release_fw(&fw_info);
+err_unlock:
+ mutex_unlock(&eng_grps->lock);
+ return ret;
+err_print:
+ dev_err(dev, "%s\n", err_msg);
+ return ret;
+}
+
+int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
+ struct device *dev = &cptpf->pdev->dev;
+ char *tmp, *err_msg;
+ int egrp;
+ int ret;
+
+ err_msg = "Invalid input string format(ex: egrp:0)";
+ if (strncasecmp(ctx->val.vstr, "egrp", 4))
+ goto err_print;
+ tmp = ctx->val.vstr;
+ strsep(&tmp, ":");
+ if (!tmp)
+ goto err_print;
+ if (kstrtoint(tmp, 10, &egrp))
+ goto err_print;
+
+ if (egrp < 0 || egrp >= OTX2_CPT_MAX_ENGINE_GROUPS) {
+ dev_err(dev, "Invalid engine group %d", egrp);
+ return -EINVAL;
+ }
+ if (!eng_grps->grp[egrp].is_enabled) {
+ dev_err(dev, "Error engine_group%d is not configured", egrp);
+ return -EINVAL;
+ }
+ mutex_lock(&eng_grps->lock);
+ ret = delete_engine_group(dev, &eng_grps->grp[egrp]);
+ mutex_unlock(&eng_grps->lock);
+
+ return ret;
+
+err_print:
+ dev_err(dev, "%s\n", err_msg);
+ return -EINVAL;
+}
+
+static void get_engs_info(struct otx2_cpt_eng_grp_info *eng_grp, char *buf,
+ int size, int idx)
+{
+ struct otx2_cpt_engs_rsvd *mirrored_engs = NULL;
+ struct otx2_cpt_engs_rsvd *engs;
+ int len, i;
+
+ buf[0] = '\0';
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ engs = &eng_grp->engs[i];
+ if (!engs->type)
+ continue;
+ if (idx != -1 && idx != i)
+ continue;
+
+ if (eng_grp->mirror.is_ena)
+ mirrored_engs = find_engines_by_type(
+ &eng_grp->g->grp[eng_grp->mirror.idx],
+ engs->type);
+ if (i > 0 && idx == -1) {
+ len = strlen(buf);
+ scnprintf(buf + len, size - len, ", ");
+ }
+
+ len = strlen(buf);
+ scnprintf(buf + len, size - len, "%d %s ",
+ mirrored_engs ? engs->count + mirrored_engs->count :
+ engs->count,
+ get_eng_type_str(engs->type));
+ if (mirrored_engs) {
+ len = strlen(buf);
+ scnprintf(buf + len, size - len,
+ "(%d shared with engine_group%d) ",
+ engs->count <= 0 ?
+ engs->count + mirrored_engs->count :
+ mirrored_engs->count,
+ eng_grp->mirror.idx);
+ }
+ }
+}
+
+void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf)
+{
+ struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
+ struct otx2_cpt_eng_grp_info *mirrored_grp;
+ char engs_info[2 * OTX2_CPT_NAME_LENGTH];
+ struct otx2_cpt_eng_grp_info *grp;
+ struct otx2_cpt_engs_rsvd *engs;
+ u32 mask[4];
+ int i, j;
+
+ pr_debug("Engine groups global info");
+ pr_debug("max SE %d, max IE %d, max AE %d", eng_grps->avail.max_se_cnt,
+ eng_grps->avail.max_ie_cnt, eng_grps->avail.max_ae_cnt);
+ pr_debug("free SE %d", eng_grps->avail.se_cnt);
+ pr_debug("free IE %d", eng_grps->avail.ie_cnt);
+ pr_debug("free AE %d", eng_grps->avail.ae_cnt);
+
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ grp = &eng_grps->grp[i];
+ pr_debug("engine_group%d, state %s", i,
+ grp->is_enabled ? "enabled" : "disabled");
+ if (grp->is_enabled) {
+ mirrored_grp = &eng_grps->grp[grp->mirror.idx];
+ pr_debug("Ucode0 filename %s, version %s",
+ grp->mirror.is_ena ?
+ mirrored_grp->ucode[0].filename :
+ grp->ucode[0].filename,
+ grp->mirror.is_ena ?
+ mirrored_grp->ucode[0].ver_str :
+ grp->ucode[0].ver_str);
+ if (is_2nd_ucode_used(grp))
+ pr_debug("Ucode1 filename %s, version %s",
+ grp->ucode[1].filename,
+ grp->ucode[1].ver_str);
+ }
+
+ for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
+ engs = &grp->engs[j];
+ if (engs->type) {
+ get_engs_info(grp, engs_info,
+ 2 * OTX2_CPT_NAME_LENGTH, j);
+ pr_debug("Slot%d: %s", j, engs_info);
+ bitmap_to_arr32(mask, engs->bmap,
+ eng_grps->engs_num);
+ if (is_dev_otx2(cptpf->pdev))
+ pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x",
+ mask[3], mask[2], mask[1],
+ mask[0]);
+ else
+ pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x %8.8x",
+ mask[4], mask[3], mask[2], mask[1],
+ mask[0]);
+ }
+ }
+ }
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
index fe019ab730b2..8f4d4e5f531a 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
@@ -143,6 +143,7 @@ struct otx2_cpt_eng_grp_info {
};
struct otx2_cpt_eng_grps {
+ struct mutex lock;
struct otx2_cpt_eng_grp_info grp[OTX2_CPT_MAX_ENGINE_GROUPS];
struct otx2_cpt_engs_available avail;
void *obj; /* device specific data */
@@ -160,5 +161,9 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf);
int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type);
int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf);
-
+int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
+ struct devlink_param_gset_ctx *ctx);
+int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf,
+ struct devlink_param_gset_ctx *ctx);
+void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf);
#endif /* __OTX2_CPTPF_UCODE_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
index 877a948469bd..2748a3327e39 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -1682,11 +1682,8 @@ static void swap_func(void *lptr, void *rptr, int size)
{
struct cpt_device_desc *ldesc = lptr;
struct cpt_device_desc *rdesc = rptr;
- struct cpt_device_desc desc;
- desc = *ldesc;
- *ldesc = *rdesc;
- *rdesc = desc;
+ swap(*ldesc, *rdesc);
}
int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 9b968ac4ee7b..a196bb8b1701 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1302,7 +1302,7 @@ static int omap_aes_suspend(struct device *dev)
static int omap_aes_resume(struct device *dev)
{
- pm_runtime_resume_and_get(dev);
+ pm_runtime_get_sync(dev);
return 0;
}
#endif
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index be77656864e3..538aff80869f 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -735,7 +735,7 @@ static struct skcipher_alg algs_ecb_cbc[] = {
{
.base.cra_name = "ecb(des)",
.base.cra_driver_name = "ecb-des-omap",
- .base.cra_priority = 100,
+ .base.cra_priority = 300,
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
@@ -752,7 +752,7 @@ static struct skcipher_alg algs_ecb_cbc[] = {
{
.base.cra_name = "cbc(des)",
.base.cra_driver_name = "cbc-des-omap",
- .base.cra_priority = 100,
+ .base.cra_priority = 300,
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
@@ -770,7 +770,7 @@ static struct skcipher_alg algs_ecb_cbc[] = {
{
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "ecb-des3-omap",
- .base.cra_priority = 100,
+ .base.cra_priority = 300,
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -787,7 +787,7 @@ static struct skcipher_alg algs_ecb_cbc[] = {
{
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cbc-des3-omap",
- .base.cra_priority = 100,
+ .base.cra_priority = 300,
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
index 77783feb62b2..4b90c0f22b03 100644
--- a/drivers/crypto/qat/Kconfig
+++ b/drivers/crypto/qat/Kconfig
@@ -13,6 +13,7 @@ config CRYPTO_DEV_QAT
select CRYPTO_SHA512
select CRYPTO_LIB_AES
select FW_LOADER
+ select CRC8
config CRYPTO_DEV_QAT_DH895xCC
tristate "Support for Intel(R) DH895xCC"
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
index fd29861526d6..6d10edc40aca 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2020 Intel Corporation */
+/* Copyright(c) 2020 - 2021 Intel Corporation */
#include <linux/iopoll.h>
#include <adf_accel_devices.h>
+#include <adf_cfg.h>
#include <adf_common_drv.h>
-#include <adf_pf2vf_msg.h>
#include <adf_gen4_hw_data.h>
+#include <adf_gen4_pfvf.h>
#include "adf_4xxx_hw_data.h"
#include "icp_qat_hw.h"
@@ -13,12 +14,18 @@ struct adf_fw_config {
char *obj_name;
};
-static struct adf_fw_config adf_4xxx_fw_config[] = {
+static struct adf_fw_config adf_4xxx_fw_cy_config[] = {
{0xF0, ADF_4XXX_SYM_OBJ},
{0xF, ADF_4XXX_ASYM_OBJ},
{0x100, ADF_4XXX_ADMIN_OBJ},
};
+static struct adf_fw_config adf_4xxx_fw_dc_config[] = {
+ {0xF0, ADF_4XXX_DC_OBJ},
+ {0xF, ADF_4XXX_DC_OBJ},
+ {0x100, ADF_4XXX_ADMIN_OBJ},
+};
+
/* Worker thread to service arbiter mappings */
static const u32 thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = {
0x5555555, 0x5555555, 0x5555555, 0x5555555,
@@ -32,6 +39,39 @@ static struct adf_hw_device_class adf_4xxx_class = {
.instances = 0,
};
+enum dev_services {
+ SVC_CY = 0,
+ SVC_DC,
+};
+
+static const char *const dev_cfg_services[] = {
+ [SVC_CY] = ADF_CFG_CY,
+ [SVC_DC] = ADF_CFG_DC,
+};
+
+static int get_service_enabled(struct adf_accel_dev *accel_dev)
+{
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+ u32 ret;
+
+ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, services);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ ADF_SERVICES_ENABLED " param not found\n");
+ return ret;
+ }
+
+ ret = match_string(dev_cfg_services, ARRAY_SIZE(dev_cfg_services),
+ services);
+ if (ret < 0)
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n",
+ services);
+
+ return ret;
+}
+
static u32 get_accel_mask(struct adf_hw_device_data *self)
{
return ADF_4XXX_ACCELERATORS_MASK;
@@ -96,23 +136,67 @@ static void set_msix_default_rttable(struct adf_accel_dev *accel_dev)
static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
+ u32 capabilities_cy, capabilities_dc;
u32 fusectl1;
- u32 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
- ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
- ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
- ICP_ACCEL_CAPABILITIES_AES_V2;
/* Read accelerator capabilities mask */
pci_read_config_dword(pdev, ADF_4XXX_FUSECTL1_OFFSET, &fusectl1);
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE)
- capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE)
- capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE)
- capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ capabilities_cy = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+ ICP_ACCEL_CAPABILITIES_SHA3 |
+ ICP_ACCEL_CAPABILITIES_SHA3_EXT |
+ ICP_ACCEL_CAPABILITIES_HKDF |
+ ICP_ACCEL_CAPABILITIES_ECEDMONT |
+ ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
+ ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
+ ICP_ACCEL_CAPABILITIES_AES_V2;
+
+ /* A set bit in fusectl1 means the feature is OFF in this SKU */
+ if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) {
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_HKDF;
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+ if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) {
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+ if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) {
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_SHA3;
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+ if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) {
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
+ }
+
+ capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+
+ if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) {
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+ }
+
+ switch (get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ return capabilities_cy;
+ case SVC_DC:
+ return capabilities_dc;
+ }
- return capabilities;
+ return 0;
}
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
@@ -191,29 +275,36 @@ static int adf_init_device(struct adf_accel_dev *accel_dev)
return ret;
}
-static int pfvf_comms_disabled(struct adf_accel_dev *accel_dev)
-{
- return 0;
-}
-
static u32 uof_get_num_objs(void)
{
- return ARRAY_SIZE(adf_4xxx_fw_config);
-}
+ BUILD_BUG_ON_MSG(ARRAY_SIZE(adf_4xxx_fw_cy_config) !=
+ ARRAY_SIZE(adf_4xxx_fw_dc_config),
+ "Size mismatch between adf_4xxx_fw_*_config arrays");
-static char *uof_get_name(u32 obj_num)
-{
- return adf_4xxx_fw_config[obj_num].obj_name;
+ return ARRAY_SIZE(adf_4xxx_fw_cy_config);
}
-static u32 uof_get_ae_mask(u32 obj_num)
+static char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num)
{
- return adf_4xxx_fw_config[obj_num].ae_mask;
+ switch (get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ return adf_4xxx_fw_cy_config[obj_num].obj_name;
+ case SVC_DC:
+ return adf_4xxx_fw_dc_config[obj_num].obj_name;
+ }
+
+ return NULL;
}
-static u32 get_vf2pf_sources(void __iomem *pmisc_addr)
+static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
{
- /* For the moment do not report vf2pf sources */
+ switch (get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ return adf_4xxx_fw_cy_config[obj_num].ae_mask;
+ case SVC_DC:
+ return adf_4xxx_fw_dc_config[obj_num].ae_mask;
+ }
+
return 0;
}
@@ -222,12 +313,14 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->dev_class = &adf_4xxx_class;
hw_data->instance_id = adf_4xxx_class.instances++;
hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS;
+ hw_data->num_banks_per_vf = ADF_4XXX_NUM_BANKS_PER_VF;
hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK;
hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS;
hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES;
hw_data->num_logical_accel = 1;
hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP;
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
hw_data->enable_error_correction = adf_enable_error_correction;
@@ -259,12 +352,11 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->uof_get_ae_mask = uof_get_ae_mask;
hw_data->set_msix_rttable = set_msix_default_rttable;
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
- hw_data->enable_pfvf_comms = pfvf_comms_disabled;
- hw_data->get_vf2pf_sources = get_vf2pf_sources;
hw_data->disable_iov = adf_disable_sriov;
- hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
+ hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
}
void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
index 924bac6feb37..12e4fb9b40ce 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
@@ -37,6 +37,7 @@
/* Bank and ring configuration */
#define ADF_4XXX_NUM_RINGS_PER_BANK 2
+#define ADF_4XXX_NUM_BANKS_PER_VF 4
/* Error source registers */
#define ADF_4XXX_ERRSOU0 (0x41A200)
@@ -76,6 +77,7 @@
#define ADF_4XXX_FW "qat_4xxx.bin"
#define ADF_4XXX_MMP "qat_4xxx_mmp.bin"
#define ADF_4XXX_SYM_OBJ "qat_4xxx_sym.bin"
+#define ADF_4XXX_DC_OBJ "qat_4xxx_dc.bin"
#define ADF_4XXX_ASYM_OBJ "qat_4xxx_asym.bin"
#define ADF_4XXX_ADMIN_OBJ "qat_4xxx_admin.bin"
diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c
index 71ef065914b2..a6c78b9c730b 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_drv.c
@@ -29,6 +29,29 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
adf_devmgr_rm_dev(accel_dev, NULL);
}
+static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev)
+{
+ const char *config;
+ int ret;
+
+ config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY;
+
+ ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
+ if (ret)
+ return ret;
+
+ /* Default configuration is crypto only for even devices
+ * and compression for odd devices
+ */
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, config,
+ ADF_STR);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
{
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
@@ -227,8 +250,18 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err;
}
+ ret = adf_cfg_dev_init(accel_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize configuration.\n");
+ goto out_err;
+ }
+
/* Get accelerator capabilities mask */
hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+ if (!hw_data->accel_capabilities_mask) {
+ dev_err(&pdev->dev, "Failed to get capabilities mask.\n");
+ goto out_err;
+ }
/* Find and map all the device's BARS */
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_4XXX_BAR_MASK;
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index 1fa690219d92..b941fe3713ff 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
+/* Copyright(c) 2014 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
-#include <adf_pf2vf_msg.h>
#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
#include "adf_c3xxx_hw_data.h"
#include "icp_qat_hw.h"
@@ -109,6 +109,7 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->num_engines = ADF_C3XXX_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
hw_data->enable_error_correction = adf_gen2_enable_error_correction;
@@ -135,14 +136,9 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->enable_ints = adf_enable_ints;
hw_data->reset_device = adf_reset_flr;
hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
- hw_data->get_pf2vf_offset = adf_gen2_get_pf2vf_offset;
- hw_data->get_vf2pf_sources = adf_gen2_get_vf2pf_sources;
- hw_data->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts;
- hw_data->disable_vf2pf_interrupts = adf_gen2_disable_vf2pf_interrupts;
- hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
hw_data->disable_iov = adf_disable_sriov;
- hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
+ adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
index 3e69b520e82f..a9fbe57b32ae 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
@@ -1,9 +1,10 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2020 Intel Corporation */
+/* Copyright(c) 2015 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
-#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h>
#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include <adf_pfvf_vf_msg.h>
#include "adf_c3xxxvf_hw_data.h"
static struct adf_hw_device_class c3xxxiov_class = {
@@ -47,11 +48,6 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_VF;
}
-static u32 get_pf2vf_offset(u32 i)
-{
- return ADF_C3XXXIOV_PF2VF_OFFSET;
-}
-
static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
{
return 0;
@@ -71,6 +67,7 @@ void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
hw_data->num_engines = ADF_C3XXXIOV_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_C3XXXIOV_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_C3XXXIOV_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
hw_data->alloc_irq = adf_vf_isr_resource_alloc;
hw_data->free_irq = adf_vf_isr_resource_free;
hw_data->enable_error_correction = adf_vf_void_noop;
@@ -86,13 +83,11 @@ void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
hw_data->get_num_aes = get_num_aes;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
- hw_data->get_pf2vf_offset = get_pf2vf_offset;
hw_data->get_sku = get_sku;
hw_data->enable_ints = adf_vf_void_noop;
- hw_data->enable_pfvf_comms = adf_enable_vf2pf_comms;
- hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
hw_data->dev_class->instances++;
adf_devmgr_update_class_index(hw_data);
+ adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
index f5de4ce66014..6b4bf181d15b 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
@@ -12,7 +12,6 @@
#define ADF_C3XXXIOV_TX_RINGS_MASK 0xFF
#define ADF_C3XXXIOV_ETR_BAR 0
#define ADF_C3XXXIOV_ETR_MAX_BANKS 1
-#define ADF_C3XXXIOV_PF2VF_OFFSET 0x200
void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 1df1b868978d..fa18d8009f53 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -171,11 +171,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
/* Completion for VF2PF request/response message exchange */
- init_completion(&accel_dev->vf.iov_msg_completion);
-
- ret = qat_crypto_dev_config(accel_dev);
- if (ret)
- goto out_err_free_reg;
+ init_completion(&accel_dev->vf.msg_received);
ret = adf_dev_init(accel_dev);
if (ret)
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
index 0613db077689..b1eac2f81faa 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
+/* Copyright(c) 2014 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
-#include <adf_pf2vf_msg.h>
#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
#include "adf_c62x_hw_data.h"
#include "icp_qat_hw.h"
@@ -111,6 +111,7 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->num_engines = ADF_C62X_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
hw_data->enable_error_correction = adf_gen2_enable_error_correction;
@@ -137,14 +138,9 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->enable_ints = adf_enable_ints;
hw_data->reset_device = adf_reset_flr;
hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
- hw_data->get_pf2vf_offset = adf_gen2_get_pf2vf_offset;
- hw_data->get_vf2pf_sources = adf_gen2_get_vf2pf_sources;
- hw_data->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts;
- hw_data->disable_vf2pf_interrupts = adf_gen2_disable_vf2pf_interrupts;
- hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
hw_data->disable_iov = adf_disable_sriov;
- hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
+ adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
index 3bee3e467363..0282038fca54 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
@@ -1,9 +1,10 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2020 Intel Corporation */
+/* Copyright(c) 2015 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
-#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h>
#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include <adf_pfvf_vf_msg.h>
#include "adf_c62xvf_hw_data.h"
static struct adf_hw_device_class c62xiov_class = {
@@ -47,11 +48,6 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_VF;
}
-static u32 get_pf2vf_offset(u32 i)
-{
- return ADF_C62XIOV_PF2VF_OFFSET;
-}
-
static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
{
return 0;
@@ -71,6 +67,7 @@ void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
hw_data->num_engines = ADF_C62XIOV_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_C62XIOV_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_C62XIOV_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
hw_data->alloc_irq = adf_vf_isr_resource_alloc;
hw_data->free_irq = adf_vf_isr_resource_free;
hw_data->enable_error_correction = adf_vf_void_noop;
@@ -86,13 +83,11 @@ void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
hw_data->get_num_aes = get_num_aes;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
- hw_data->get_pf2vf_offset = get_pf2vf_offset;
hw_data->get_sku = get_sku;
hw_data->enable_ints = adf_vf_void_noop;
- hw_data->enable_pfvf_comms = adf_enable_vf2pf_comms;
- hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
hw_data->dev_class->instances++;
adf_devmgr_update_class_index(hw_data);
+ adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
index 794778c48678..a1a62c003ebf 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
+++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
@@ -12,7 +12,6 @@
#define ADF_C62XIOV_TX_RINGS_MASK 0xFF
#define ADF_C62XIOV_ETR_BAR 0
#define ADF_C62XIOV_ETR_MAX_BANKS 1
-#define ADF_C62XIOV_PF2VF_OFFSET 0x200
void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index 8103bd81d617..686ec752d0e9 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -171,11 +171,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
/* Completion for VF2PF request/response message exchange */
- init_completion(&accel_dev->vf.iov_msg_completion);
-
- ret = qat_crypto_dev_config(accel_dev);
- if (ret)
- goto out_err_free_reg;
+ init_completion(&accel_dev->vf.msg_received);
ret = adf_dev_init(accel_dev);
if (ret)
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index 9c57abdf56b7..7e191a42a5c7 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -19,5 +19,7 @@ intel_qat-objs := adf_cfg.o \
qat_hal.o
intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
-intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o \
- adf_vf2pf_msg.o adf_vf_isr.o
+intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
+ adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \
+ adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \
+ adf_gen2_pfvf.o adf_gen4_pfvf.o
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 57d9ca08e611..2d4cd7c7cf33 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -8,6 +8,7 @@
#include <linux/io.h>
#include <linux/ratelimit.h>
#include "adf_cfg_common.h"
+#include "adf_pfvf_msg.h"
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
@@ -147,6 +148,19 @@ struct adf_accel_dev;
struct adf_etr_data;
struct adf_etr_ring_data;
+struct adf_pfvf_ops {
+ int (*enable_comms)(struct adf_accel_dev *accel_dev);
+ u32 (*get_pf2vf_offset)(u32 i);
+ u32 (*get_vf2pf_offset)(u32 i);
+ u32 (*get_vf2pf_sources)(void __iomem *pmisc_addr);
+ void (*enable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask);
+ void (*disable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask);
+ int (*send_msg)(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+ u32 pfvf_offset, struct mutex *csr_lock);
+ struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev,
+ u32 pfvf_offset, u8 compat_ver);
+};
+
struct adf_hw_device_data {
struct adf_hw_device_class *dev_class;
u32 (*get_accel_mask)(struct adf_hw_device_data *self);
@@ -157,7 +171,6 @@ struct adf_hw_device_data {
u32 (*get_etr_bar_id)(struct adf_hw_device_data *self);
u32 (*get_num_aes)(struct adf_hw_device_data *self);
u32 (*get_num_accels)(struct adf_hw_device_data *self);
- u32 (*get_pf2vf_offset)(u32 i);
void (*get_arb_info)(struct arb_info *arb_csrs_info);
void (*get_admin_info)(struct admin_info *admin_csrs_info);
enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
@@ -176,35 +189,34 @@ struct adf_hw_device_data {
bool enable);
void (*enable_ints)(struct adf_accel_dev *accel_dev);
void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
- int (*enable_pfvf_comms)(struct adf_accel_dev *accel_dev);
- u32 (*get_vf2pf_sources)(void __iomem *pmisc_addr);
- void (*enable_vf2pf_interrupts)(void __iomem *pmisc_bar_addr,
- u32 vf_mask);
- void (*disable_vf2pf_interrupts)(void __iomem *pmisc_bar_addr,
- u32 vf_mask);
+ int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
- char *(*uof_get_name)(u32 obj_num);
+ char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
u32 (*uof_get_num_objs)(void);
- u32 (*uof_get_ae_mask)(u32 obj_num);
+ u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
+ struct adf_pfvf_ops pfvf_ops;
struct adf_hw_csr_ops csr_ops;
const char *fw_name;
const char *fw_mmp_name;
u32 fuses;
u32 straps;
u32 accel_capabilities_mask;
+ u32 extended_dc_capabilities;
+ u32 clock_frequency;
u32 instance_id;
u16 accel_mask;
u32 ae_mask;
u32 admin_ae_mask;
u16 tx_rings_mask;
+ u16 ring_to_svc_map;
u8 tx_rx_gap;
u8 num_banks;
+ u16 num_banks_per_vf;
u8 num_rings_per_bank;
u8 num_accel;
u8 num_logical_accel;
u8 num_engines;
- u8 min_iov_compat_ver;
};
/* CSR write macro */
@@ -214,14 +226,22 @@ struct adf_hw_device_data {
/* CSR read macro */
#define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
+#define ADF_CFG_NUM_SERVICES 4
+#define ADF_SRV_TYPE_BIT_LEN 3
+#define ADF_SRV_TYPE_MASK 0x7
+
#define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
#define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
#define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
#define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
#define GET_NUM_RINGS_PER_BANK(accel_dev) \
GET_HW_DATA(accel_dev)->num_rings_per_bank
+#define GET_SRV_TYPE(accel_dev, idx) \
+ (((GET_HW_DATA(accel_dev)->ring_to_svc_map) >> (ADF_SRV_TYPE_BIT_LEN * (idx))) \
+ & ADF_SRV_TYPE_MASK)
#define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
+#define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
struct adf_admin_comms;
@@ -238,6 +258,7 @@ struct adf_accel_vf_info {
struct ratelimit_state vf2pf_ratelimit;
u32 vf_nr;
bool init;
+ u8 vf_compat_ver;
};
struct adf_accel_dev {
@@ -265,9 +286,9 @@ struct adf_accel_dev {
char irq_name[ADF_MAX_MSIX_VECTOR_NAME];
struct tasklet_struct pf2vf_bh_tasklet;
struct mutex vf2pf_lock; /* protect CSR access */
- struct completion iov_msg_completion;
- u8 compatible;
- u8 pf_version;
+ struct completion msg_received;
+ struct pfvf_message response; /* temp field holding pf2vf response */
+ u8 pf_compat_ver;
} vf;
};
bool is_vf;
diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c
index ca4eae8cdd0b..4ce2b666929e 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_engine.c
+++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c
@@ -22,8 +22,12 @@ static int adf_ae_fw_load_images(struct adf_accel_dev *accel_dev, void *fw_addr,
num_objs = hw_device->uof_get_num_objs();
for (i = 0; i < num_objs; i++) {
- obj_name = hw_device->uof_get_name(i);
- ae_mask = hw_device->uof_get_ae_mask(i);
+ obj_name = hw_device->uof_get_name(accel_dev, i);
+ ae_mask = hw_device->uof_get_ae_mask(accel_dev, i);
+ if (!obj_name || !ae_mask) {
+ dev_err(&GET_DEV(accel_dev), "Invalid UOF image\n");
+ goto out_err;
+ }
if (qat_uclo_set_cfg_ae_mask(loader, ae_mask)) {
dev_err(&GET_DEV(accel_dev),
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
index 43680e178242..498eb6f690e3 100644
--- a/drivers/crypto/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -194,6 +194,35 @@ static int adf_set_fw_constants(struct adf_accel_dev *accel_dev)
return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
+static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev,
+ u32 *capabilities)
+{
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ struct icp_qat_fw_init_admin_resp resp;
+ struct icp_qat_fw_init_admin_req req;
+ unsigned long ae_mask;
+ unsigned long ae;
+ int ret;
+
+ /* Target only service accelerator engines */
+ ae_mask = hw_device->ae_mask & ~hw_device->admin_ae_mask;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+ req.cmd_id = ICP_QAT_FW_COMP_CAPABILITY_GET;
+
+ *capabilities = 0;
+ for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
+ ret = adf_send_admin(accel_dev, &req, &resp, 1ULL << ae);
+ if (ret)
+ return ret;
+
+ *capabilities |= resp.extended_features;
+ }
+
+ return 0;
+}
+
/**
* adf_send_admin_init() - Function sends init message to FW
* @accel_dev: Pointer to acceleration device.
@@ -204,8 +233,16 @@ static int adf_set_fw_constants(struct adf_accel_dev *accel_dev)
*/
int adf_send_admin_init(struct adf_accel_dev *accel_dev)
{
+ u32 dc_capabilities = 0;
int ret;
+ ret = adf_get_dc_capabilities(accel_dev, &dc_capabilities);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Cannot get dc capabilities\n");
+ return ret;
+ }
+ accel_dev->hw_device->extended_dc_capabilities = dc_capabilities;
+
ret = adf_set_fw_constants(accel_dev);
if (ret)
return ret;
@@ -218,9 +255,7 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
{
struct adf_admin_comms *admin;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct adf_bar *pmisc =
- &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
- void __iomem *csr = pmisc->virt_addr;
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
struct admin_info admin_csrs_info;
u32 mailbox_offset, adminmsg_u, adminmsg_l;
void __iomem *mailbox;
@@ -254,13 +289,13 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
hw_data->get_admin_info(&admin_csrs_info);
mailbox_offset = admin_csrs_info.mailbox_offset;
- mailbox = csr + mailbox_offset;
+ mailbox = pmisc_addr + mailbox_offset;
adminmsg_u = admin_csrs_info.admin_msg_ur;
adminmsg_l = admin_csrs_info.admin_msg_lr;
reg_val = (u64)admin->phy_addr;
- ADF_CSR_WR(csr, adminmsg_u, upper_32_bits(reg_val));
- ADF_CSR_WR(csr, adminmsg_l, lower_32_bits(reg_val));
+ ADF_CSR_WR(pmisc_addr, adminmsg_u, upper_32_bits(reg_val));
+ ADF_CSR_WR(pmisc_addr, adminmsg_l, lower_32_bits(reg_val));
mutex_init(&admin->lock);
admin->mailbox_addr = mailbox;
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
index 575b6f002303..b5b208cbe5a1 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -297,3 +297,4 @@ int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
up_read(&cfg->lock);
return ret;
}
+EXPORT_SYMBOL_GPL(adf_cfg_get_param_value);
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h
index 4fabb70b1f18..6e5de1dab97b 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_common.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h
@@ -19,6 +19,19 @@
#define ADF_MAX_DEVICES (32 * 32)
#define ADF_DEVS_ARRAY_SIZE BITS_TO_LONGS(ADF_MAX_DEVICES)
+#define ADF_CFG_SERV_RING_PAIR_0_SHIFT 0
+#define ADF_CFG_SERV_RING_PAIR_1_SHIFT 3
+#define ADF_CFG_SERV_RING_PAIR_2_SHIFT 6
+#define ADF_CFG_SERV_RING_PAIR_3_SHIFT 9
+enum adf_cfg_service_type {
+ UNUSED = 0,
+ CRYPTO,
+ COMP,
+ SYM,
+ ASYM,
+ USED
+};
+
enum adf_cfg_val_type {
ADF_DEC,
ADF_HEX,
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
index 09651e1f937a..655248dbf962 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
@@ -22,6 +22,9 @@
#define ADF_RING_ASYM_BANK_NUM "BankAsymNumber"
#define ADF_CY "Cy"
#define ADF_DC "Dc"
+#define ADF_CFG_DC "dc"
+#define ADF_CFG_CY "sym;asym"
+#define ADF_SERVICES_ENABLED "ServicesEnabled"
#define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled"
#define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \
ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_ENABLED
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index de94b76a6d2c..76f4f96ec5eb 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
+/* Copyright(c) 2014 - 2021 Intel Corporation */
#ifndef ADF_DRV_H
#define ADF_DRV_H
@@ -62,9 +62,6 @@ int adf_dev_start(struct adf_accel_dev *accel_dev);
void adf_dev_stop(struct adf_accel_dev *accel_dev);
void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
-void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
-int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
-void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info);
void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
void adf_clean_vf_map(bool);
@@ -117,6 +114,7 @@ void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev);
int qat_crypto_register(void);
int qat_crypto_unregister(void);
int qat_crypto_dev_config(struct adf_accel_dev *accel_dev);
+int qat_crypto_vf_dev_config(struct adf_accel_dev *accel_dev);
struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
void qat_crypto_put_instance(struct qat_crypto_instance *inst);
void qat_alg_callback(void *resp);
@@ -131,6 +129,8 @@ void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev);
+int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev);
+
int qat_hal_init(struct adf_accel_dev *accel_dev);
void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
int qat_hal_start(struct icp_qat_fw_loader_handle *handle);
@@ -193,17 +193,14 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
u32 vf_mask);
-void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev,
- u32 vf_mask);
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
u32 vf_mask);
-int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev);
+bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev);
+bool adf_recv_and_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 vf_nr);
+int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev);
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info);
-int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 msg);
-int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
-void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
int adf_init_pf_wq(void);
void adf_exit_pf_wq(void);
int adf_init_vf_wq(void);
@@ -212,11 +209,6 @@ void adf_flush_vf_wq(struct adf_accel_dev *accel_dev);
#else
#define adf_sriov_configure NULL
-static inline int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
-{
- return 0;
-}
-
static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
{
}
@@ -229,15 +221,6 @@ static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{
}
-static inline int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
-{
- return 0;
-}
-
-static inline void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
-{
-}
-
static inline int adf_init_pf_wq(void)
{
return 0;
@@ -261,4 +244,15 @@ static inline void adf_flush_vf_wq(struct adf_accel_dev *accel_dev)
}
#endif
+
+static inline void __iomem *adf_get_pmisc_base(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *pmisc;
+
+ pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+
+ return pmisc->virt_addr;
+}
+
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
index 262bdc05dab4..57035b7dd4b2 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
+++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
@@ -1,57 +1,10 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2020 Intel Corporation */
+#include "adf_common_drv.h"
#include "adf_gen2_hw_data.h"
#include "icp_qat_hw.h"
#include <linux/pci.h>
-#define ADF_GEN2_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
-
-u32 adf_gen2_get_pf2vf_offset(u32 i)
-{
- return ADF_GEN2_PF2VF_OFFSET(i);
-}
-EXPORT_SYMBOL_GPL(adf_gen2_get_pf2vf_offset);
-
-u32 adf_gen2_get_vf2pf_sources(void __iomem *pmisc_addr)
-{
- u32 errsou3, errmsk3, vf_int_mask;
-
- /* Get the interrupt sources triggered by VFs */
- errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
- vf_int_mask = ADF_GEN2_ERR_REG_VF2PF(errsou3);
-
- /* To avoid adding duplicate entries to work queue, clear
- * vf_int_mask_sets bits that are already masked in ERRMSK register.
- */
- errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
- vf_int_mask &= ~ADF_GEN2_ERR_REG_VF2PF(errmsk3);
-
- return vf_int_mask;
-}
-EXPORT_SYMBOL_GPL(adf_gen2_get_vf2pf_sources);
-
-void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
-{
- /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
- if (vf_mask & 0xFFFF) {
- u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
- & ~ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
- ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
- }
-}
-EXPORT_SYMBOL_GPL(adf_gen2_enable_vf2pf_interrupts);
-
-void adf_gen2_disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
-{
- /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
- if (vf_mask & 0xFFFF) {
- u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
- | ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
- ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
- }
-}
-EXPORT_SYMBOL_GPL(adf_gen2_disable_vf2pf_interrupts);
-
u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self)
{
if (!self || !self->accel_mask)
@@ -73,31 +26,29 @@ EXPORT_SYMBOL_GPL(adf_gen2_get_num_aes);
void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct adf_bar *misc_bar = &GET_BARS(accel_dev)
- [hw_data->get_misc_bar_id(hw_data)];
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
unsigned long accel_mask = hw_data->accel_mask;
unsigned long ae_mask = hw_data->ae_mask;
- void __iomem *csr = misc_bar->virt_addr;
unsigned int val, i;
/* Enable Accel Engine error detection & correction */
for_each_set_bit(i, &ae_mask, hw_data->num_engines) {
- val = ADF_CSR_RD(csr, ADF_GEN2_AE_CTX_ENABLES(i));
+ val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_AE_CTX_ENABLES(i));
val |= ADF_GEN2_ENABLE_AE_ECC_ERR;
- ADF_CSR_WR(csr, ADF_GEN2_AE_CTX_ENABLES(i), val);
- val = ADF_CSR_RD(csr, ADF_GEN2_AE_MISC_CONTROL(i));
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_AE_CTX_ENABLES(i), val);
+ val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_AE_MISC_CONTROL(i));
val |= ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR;
- ADF_CSR_WR(csr, ADF_GEN2_AE_MISC_CONTROL(i), val);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_AE_MISC_CONTROL(i), val);
}
/* Enable shared memory error detection & correction */
for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
- val = ADF_CSR_RD(csr, ADF_GEN2_UERRSSMSH(i));
+ val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_UERRSSMSH(i));
val |= ADF_GEN2_ERRSSMSH_EN;
- ADF_CSR_WR(csr, ADF_GEN2_UERRSSMSH(i), val);
- val = ADF_CSR_RD(csr, ADF_GEN2_CERRSSMSH(i));
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_UERRSSMSH(i), val);
+ val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_CERRSSMSH(i));
val |= ADF_GEN2_ERRSSMSH_EN;
- ADF_CSR_WR(csr, ADF_GEN2_CERRSSMSH(i), val);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_CERRSSMSH(i), val);
}
}
EXPORT_SYMBOL_GPL(adf_gen2_enable_error_correction);
@@ -105,15 +56,9 @@ EXPORT_SYMBOL_GPL(adf_gen2_enable_error_correction);
void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
int num_a_regs, int num_b_regs)
{
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- void __iomem *pmisc_addr;
- struct adf_bar *pmisc;
- int pmisc_id, i;
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
u32 reg;
-
- pmisc_id = hw_data->get_misc_bar_id(hw_data);
- pmisc = &GET_BARS(accel_dev)[pmisc_id];
- pmisc_addr = pmisc->virt_addr;
+ int i;
/* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group A */
for (i = 0; i < num_a_regs; i++) {
@@ -259,21 +204,33 @@ u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev)
u32 legfuses;
u32 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
- ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_COMPRESSION;
/* Read accelerator capabilities mask */
pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
- if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE)
+ /* A set bit in legfuses means the feature is OFF in this SKU */
+ if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
- if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE)
+ if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+ if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
if ((straps | fuses) & ADF_POWERGATE_PKE)
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ if ((straps | fuses) & ADF_POWERGATE_DC)
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+
return capabilities;
}
EXPORT_SYMBOL_GPL(adf_gen2_get_accel_cap);
@@ -281,18 +238,12 @@ EXPORT_SYMBOL_GPL(adf_gen2_get_accel_cap);
void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
u32 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
u32 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
unsigned long accel_mask = hw_data->accel_mask;
- void __iomem *pmisc_addr;
- struct adf_bar *pmisc;
- int pmisc_id;
u32 i = 0;
- pmisc_id = hw_data->get_misc_bar_id(hw_data);
- pmisc = &GET_BARS(accel_dev)[pmisc_id];
- pmisc_addr = pmisc->virt_addr;
-
/* Configures WDT timers */
for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
/* Enable WDT for sym and dc */
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
index c169d704097d..f2e0451b11c0 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
+++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
@@ -4,6 +4,7 @@
#define ADF_GEN2_HW_DATA_H_
#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
/* Transport access */
#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
@@ -113,8 +114,16 @@ do { \
(ADF_ARB_REG_SLOT * (index)), value)
/* Power gating */
+#define ADF_POWERGATE_DC BIT(23)
#define ADF_POWERGATE_PKE BIT(24)
+/* Default ring mapping */
+#define ADF_GEN2_DEFAULT_RING_TO_SRV_MAP \
+ (CRYPTO << ADF_CFG_SERV_RING_PAIR_0_SHIFT | \
+ CRYPTO << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
+ UNUSED << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
+ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
+
/* WDT timers
*
* Timeout is in cycles. Clock speed may vary across products but this
@@ -136,19 +145,6 @@ do { \
#define ADF_GEN2_CERRSSMSH(i) ((i) * 0x4000 + 0x10)
#define ADF_GEN2_ERRSSMSH_EN BIT(3)
- /* VF2PF interrupts */
-#define ADF_GEN2_ERRSOU3 (0x3A000 + 0x0C)
-#define ADF_GEN2_ERRSOU5 (0x3A000 + 0xD8)
-#define ADF_GEN2_ERRMSK3 (0x3A000 + 0x1C)
-#define ADF_GEN2_ERRMSK5 (0x3A000 + 0xDC)
-#define ADF_GEN2_ERR_REG_VF2PF(vf_src) (((vf_src) & 0x01FFFE00) >> 9)
-#define ADF_GEN2_ERR_MSK_VF2PF(vf_mask) (((vf_mask) & 0xFFFF) << 9)
-
-u32 adf_gen2_get_pf2vf_offset(u32 i);
-u32 adf_gen2_get_vf2pf_sources(void __iomem *pmisc_bar);
-void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask);
-void adf_gen2_disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask);
-
u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self);
u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self);
void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c b/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c
new file mode 100644
index 000000000000..1a9072aac2ca
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2021 Intel Corporation */
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen2_pfvf.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_pf_proto.h"
+#include "adf_pfvf_vf_proto.h"
+#include "adf_pfvf_utils.h"
+
+ /* VF2PF interrupts */
+#define ADF_GEN2_ERR_REG_VF2PF(vf_src) (((vf_src) & 0x01FFFE00) >> 9)
+#define ADF_GEN2_ERR_MSK_VF2PF(vf_mask) (((vf_mask) & 0xFFFF) << 9)
+
+#define ADF_GEN2_PF_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
+#define ADF_GEN2_VF_PF2VF_OFFSET 0x200
+
+#define ADF_GEN2_CSR_IN_USE 0x6AC2
+#define ADF_GEN2_CSR_IN_USE_MASK 0xFFFE
+
+enum gen2_csr_pos {
+ ADF_GEN2_CSR_PF2VF_OFFSET = 0,
+ ADF_GEN2_CSR_VF2PF_OFFSET = 16,
+};
+
+#define ADF_PFVF_GEN2_MSGTYPE_SHIFT 2
+#define ADF_PFVF_GEN2_MSGTYPE_MASK 0x0F
+#define ADF_PFVF_GEN2_MSGDATA_SHIFT 6
+#define ADF_PFVF_GEN2_MSGDATA_MASK 0x3FF
+
+static const struct pfvf_csr_format csr_gen2_fmt = {
+ { ADF_PFVF_GEN2_MSGTYPE_SHIFT, ADF_PFVF_GEN2_MSGTYPE_MASK },
+ { ADF_PFVF_GEN2_MSGDATA_SHIFT, ADF_PFVF_GEN2_MSGDATA_MASK },
+};
+
+#define ADF_PFVF_MSG_RETRY_DELAY 5
+#define ADF_PFVF_MSG_MAX_RETRIES 3
+
+static u32 adf_gen2_pf_get_pfvf_offset(u32 i)
+{
+ return ADF_GEN2_PF_PF2VF_OFFSET(i);
+}
+
+static u32 adf_gen2_vf_get_pfvf_offset(u32 i)
+{
+ return ADF_GEN2_VF_PF2VF_OFFSET;
+}
+
+static u32 adf_gen2_get_vf2pf_sources(void __iomem *pmisc_addr)
+{
+ u32 errsou3, errmsk3, vf_int_mask;
+
+ /* Get the interrupt sources triggered by VFs */
+ errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
+ vf_int_mask = ADF_GEN2_ERR_REG_VF2PF(errsou3);
+
+ /* To avoid adding duplicate entries to work queue, clear
+ * vf_int_mask_sets bits that are already masked in ERRMSK register.
+ */
+ errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
+ vf_int_mask &= ~ADF_GEN2_ERR_REG_VF2PF(errmsk3);
+
+ return vf_int_mask;
+}
+
+static void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr,
+ u32 vf_mask)
+{
+ /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
+ if (vf_mask & 0xFFFF) {
+ u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+ & ~ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+ }
+}
+
+static void adf_gen2_disable_vf2pf_interrupts(void __iomem *pmisc_addr,
+ u32 vf_mask)
+{
+ /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
+ if (vf_mask & 0xFFFF) {
+ u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+ | ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+ }
+}
+
+static u32 gen2_csr_get_int_bit(enum gen2_csr_pos offset)
+{
+ return ADF_PFVF_INT << offset;
+}
+
+static u32 gen2_csr_msg_to_position(u32 csr_msg, enum gen2_csr_pos offset)
+{
+ return (csr_msg & 0xFFFF) << offset;
+}
+
+static u32 gen2_csr_msg_from_position(u32 csr_val, enum gen2_csr_pos offset)
+{
+ return (csr_val >> offset) & 0xFFFF;
+}
+
+static bool gen2_csr_is_in_use(u32 msg, enum gen2_csr_pos offset)
+{
+ return ((msg >> offset) & ADF_GEN2_CSR_IN_USE_MASK) == ADF_GEN2_CSR_IN_USE;
+}
+
+static void gen2_csr_clear_in_use(u32 *msg, enum gen2_csr_pos offset)
+{
+ *msg &= ~(ADF_GEN2_CSR_IN_USE_MASK << offset);
+}
+
+static void gen2_csr_set_in_use(u32 *msg, enum gen2_csr_pos offset)
+{
+ *msg |= (ADF_GEN2_CSR_IN_USE << offset);
+}
+
+static bool is_legacy_user_pfvf_message(u32 msg)
+{
+ return !(msg & ADF_PFVF_MSGORIGIN_SYSTEM);
+}
+
+static bool is_pf2vf_notification(u8 msg_type)
+{
+ switch (msg_type) {
+ case ADF_PF2VF_MSGTYPE_RESTARTING:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_vf2pf_notification(u8 msg_type)
+{
+ switch (msg_type) {
+ case ADF_VF2PF_MSGTYPE_INIT:
+ case ADF_VF2PF_MSGTYPE_SHUTDOWN:
+ return true;
+ default:
+ return false;
+ }
+}
+
+struct pfvf_gen2_params {
+ u32 pfvf_offset;
+ struct mutex *csr_lock; /* lock preventing concurrent access of CSR */
+ enum gen2_csr_pos local_offset;
+ enum gen2_csr_pos remote_offset;
+ bool (*is_notification_message)(u8 msg_type);
+ u8 compat_ver;
+};
+
+static int adf_gen2_pfvf_send(struct adf_accel_dev *accel_dev,
+ struct pfvf_message msg,
+ struct pfvf_gen2_params *params)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ enum gen2_csr_pos remote_offset = params->remote_offset;
+ enum gen2_csr_pos local_offset = params->local_offset;
+ unsigned int retries = ADF_PFVF_MSG_MAX_RETRIES;
+ struct mutex *lock = params->csr_lock;
+ u32 pfvf_offset = params->pfvf_offset;
+ u32 int_bit;
+ u32 csr_val;
+ u32 csr_msg;
+ int ret;
+
+ /* Gen2 messages, both PF->VF and VF->PF, are all 16 bits long. This
+ * allows us to build and read messages as if they where all 0 based.
+ * However, send and receive are in a single shared 32 bits register,
+ * so we need to shift and/or mask the message half before decoding
+ * it and after encoding it. Which one to shift depends on the
+ * direction.
+ */
+
+ int_bit = gen2_csr_get_int_bit(local_offset);
+
+ csr_msg = adf_pfvf_csr_msg_of(accel_dev, msg, &csr_gen2_fmt);
+ if (unlikely(!csr_msg))
+ return -EINVAL;
+
+ /* Prepare for CSR format, shifting the wire message in place and
+ * setting the in use pattern
+ */
+ csr_msg = gen2_csr_msg_to_position(csr_msg, local_offset);
+ gen2_csr_set_in_use(&csr_msg, remote_offset);
+
+ mutex_lock(lock);
+
+start:
+ /* Check if the PFVF CSR is in use by remote function */
+ csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
+ if (gen2_csr_is_in_use(csr_val, local_offset)) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "PFVF CSR in use by remote function\n");
+ goto retry;
+ }
+
+ /* Attempt to get ownership of the PFVF CSR */
+ ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_msg | int_bit);
+
+ /* Wait for confirmation from remote func it received the message */
+ ret = read_poll_timeout(ADF_CSR_RD, csr_val, !(csr_val & int_bit),
+ ADF_PFVF_MSG_ACK_DELAY_US,
+ ADF_PFVF_MSG_ACK_MAX_DELAY_US,
+ true, pmisc_addr, pfvf_offset);
+ if (unlikely(ret < 0)) {
+ dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
+ csr_val &= ~int_bit;
+ }
+
+ /* For fire-and-forget notifications, the receiver does not clear
+ * the in-use pattern. This is used to detect collisions.
+ */
+ if (params->is_notification_message(msg.type) && csr_val != csr_msg) {
+ /* Collision must have overwritten the message */
+ dev_err(&GET_DEV(accel_dev),
+ "Collision on notification - PFVF CSR overwritten by remote function\n");
+ goto retry;
+ }
+
+ /* If the far side did not clear the in-use pattern it is either
+ * 1) Notification - message left intact to detect collision
+ * 2) Older protocol (compatibility version < 3) on the far side
+ * where the sender is responsible for clearing the in-use
+ * pattern after the received has acknowledged receipt.
+ * In either case, clear the in-use pattern now.
+ */
+ if (gen2_csr_is_in_use(csr_val, remote_offset)) {
+ gen2_csr_clear_in_use(&csr_val, remote_offset);
+ ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val);
+ }
+
+out:
+ mutex_unlock(lock);
+ return ret;
+
+retry:
+ if (--retries) {
+ msleep(ADF_PFVF_MSG_RETRY_DELAY);
+ goto start;
+ } else {
+ ret = -EBUSY;
+ goto out;
+ }
+}
+
+static struct pfvf_message adf_gen2_pfvf_recv(struct adf_accel_dev *accel_dev,
+ struct pfvf_gen2_params *params)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ enum gen2_csr_pos remote_offset = params->remote_offset;
+ enum gen2_csr_pos local_offset = params->local_offset;
+ u32 pfvf_offset = params->pfvf_offset;
+ struct pfvf_message msg = { 0 };
+ u32 int_bit;
+ u32 csr_val;
+ u16 csr_msg;
+
+ int_bit = gen2_csr_get_int_bit(local_offset);
+
+ /* Read message */
+ csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
+ if (!(csr_val & int_bit)) {
+ dev_info(&GET_DEV(accel_dev),
+ "Spurious PFVF interrupt, msg 0x%.8x. Ignored\n", csr_val);
+ return msg;
+ }
+
+ /* Extract the message from the CSR */
+ csr_msg = gen2_csr_msg_from_position(csr_val, local_offset);
+
+ /* Ignore legacy non-system (non-kernel) messages */
+ if (unlikely(is_legacy_user_pfvf_message(csr_msg))) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Ignored non-system message (0x%.8x);\n", csr_val);
+ /* Because this must be a legacy message, the far side
+ * must clear the in-use pattern, so don't do it.
+ */
+ return msg;
+ }
+
+ /* Return the pfvf_message format */
+ msg = adf_pfvf_message_of(accel_dev, csr_msg, &csr_gen2_fmt);
+
+ /* The in-use pattern is not cleared for notifications (so that
+ * it can be used for collision detection) or older implementations
+ */
+ if (params->compat_ver >= ADF_PFVF_COMPAT_FAST_ACK &&
+ !params->is_notification_message(msg.type))
+ gen2_csr_clear_in_use(&csr_val, remote_offset);
+
+ /* To ACK, clear the INT bit */
+ csr_val &= ~int_bit;
+ ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val);
+
+ return msg;
+}
+
+static int adf_gen2_pf2vf_send(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+ u32 pfvf_offset, struct mutex *csr_lock)
+{
+ struct pfvf_gen2_params params = {
+ .csr_lock = csr_lock,
+ .pfvf_offset = pfvf_offset,
+ .local_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
+ .remote_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
+ .is_notification_message = is_pf2vf_notification,
+ };
+
+ return adf_gen2_pfvf_send(accel_dev, msg, &params);
+}
+
+static int adf_gen2_vf2pf_send(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+ u32 pfvf_offset, struct mutex *csr_lock)
+{
+ struct pfvf_gen2_params params = {
+ .csr_lock = csr_lock,
+ .pfvf_offset = pfvf_offset,
+ .local_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
+ .remote_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
+ .is_notification_message = is_vf2pf_notification,
+ };
+
+ return adf_gen2_pfvf_send(accel_dev, msg, &params);
+}
+
+static struct pfvf_message adf_gen2_pf2vf_recv(struct adf_accel_dev *accel_dev,
+ u32 pfvf_offset, u8 compat_ver)
+{
+ struct pfvf_gen2_params params = {
+ .pfvf_offset = pfvf_offset,
+ .local_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
+ .remote_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
+ .is_notification_message = is_pf2vf_notification,
+ .compat_ver = compat_ver,
+ };
+
+ return adf_gen2_pfvf_recv(accel_dev, &params);
+}
+
+static struct pfvf_message adf_gen2_vf2pf_recv(struct adf_accel_dev *accel_dev,
+ u32 pfvf_offset, u8 compat_ver)
+{
+ struct pfvf_gen2_params params = {
+ .pfvf_offset = pfvf_offset,
+ .local_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
+ .remote_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
+ .is_notification_message = is_vf2pf_notification,
+ .compat_ver = compat_ver,
+ };
+
+ return adf_gen2_pfvf_recv(accel_dev, &params);
+}
+
+void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+ pfvf_ops->enable_comms = adf_enable_pf2vf_comms;
+ pfvf_ops->get_pf2vf_offset = adf_gen2_pf_get_pfvf_offset;
+ pfvf_ops->get_vf2pf_offset = adf_gen2_pf_get_pfvf_offset;
+ pfvf_ops->get_vf2pf_sources = adf_gen2_get_vf2pf_sources;
+ pfvf_ops->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts;
+ pfvf_ops->disable_vf2pf_interrupts = adf_gen2_disable_vf2pf_interrupts;
+ pfvf_ops->send_msg = adf_gen2_pf2vf_send;
+ pfvf_ops->recv_msg = adf_gen2_vf2pf_recv;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_pf_pfvf_ops);
+
+void adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+ pfvf_ops->enable_comms = adf_enable_vf2pf_comms;
+ pfvf_ops->get_pf2vf_offset = adf_gen2_vf_get_pfvf_offset;
+ pfvf_ops->get_vf2pf_offset = adf_gen2_vf_get_pfvf_offset;
+ pfvf_ops->send_msg = adf_gen2_vf2pf_send;
+ pfvf_ops->recv_msg = adf_gen2_pf2vf_recv;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_vf_pfvf_ops);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.h b/drivers/crypto/qat/qat_common/adf_gen2_pfvf.h
new file mode 100644
index 000000000000..a716545a764c
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_gen2_pfvf.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_GEN2_PFVF_H
+#define ADF_GEN2_PFVF_H
+
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+
+#define ADF_GEN2_ERRSOU3 (0x3A000 + 0x0C)
+#define ADF_GEN2_ERRSOU5 (0x3A000 + 0xD8)
+#define ADF_GEN2_ERRMSK3 (0x3A000 + 0x1C)
+#define ADF_GEN2_ERRMSK5 (0x3A000 + 0xDC)
+
+#if defined(CONFIG_PCI_IOV)
+void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
+void adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
+#else
+static inline void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+ pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
+}
+
+static inline void adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+ pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
+}
+#endif
+
+#endif /* ADF_GEN2_PFVF_H */
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c
index 000528327b29..3148a62938fd 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2020 Intel Corporation */
+#include <linux/iopoll.h>
#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
#include "adf_gen4_hw_data.h"
static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
@@ -109,20 +111,13 @@ static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper,
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
{
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
u32 ssm_wdt_pke_high = 0;
u32 ssm_wdt_pke_low = 0;
u32 ssm_wdt_high = 0;
u32 ssm_wdt_low = 0;
- void __iomem *pmisc_addr;
- struct adf_bar *pmisc;
- int pmisc_id;
-
- pmisc_id = hw_data->get_misc_bar_id(hw_data);
- pmisc = &GET_BARS(accel_dev)[pmisc_id];
- pmisc_addr = pmisc->virt_addr;
/* Convert 64bit WDT timer value into 32bit values for
* mmio write to 32bit CSRs.
@@ -139,3 +134,61 @@ void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high);
}
EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
+
+int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_pfvf_comms_disabled);
+
+static int reset_ring_pair(void __iomem *csr, u32 bank_number)
+{
+ u32 status;
+ int ret;
+
+ /* Write rpresetctl register BIT(0) as 1
+ * Since rpresetctl registers have no RW fields, no need to preserve
+ * values for other bits. Just write directly.
+ */
+ ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
+ ADF_WQM_CSR_RPRESETCTL_RESET);
+
+ /* Read rpresetsts register and wait for rp reset to complete */
+ ret = read_poll_timeout(ADF_CSR_RD, status,
+ status & ADF_WQM_CSR_RPRESETSTS_STATUS,
+ ADF_RPRESET_POLL_DELAY_US,
+ ADF_RPRESET_POLL_TIMEOUT_US, true,
+ csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
+ if (!ret) {
+ /* When rp reset is done, clear rpresetsts */
+ ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number),
+ ADF_WQM_CSR_RPRESETSTS_STATUS);
+ }
+
+ return ret;
+}
+
+int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 etr_bar_id = hw_data->get_etr_bar_id(hw_data);
+ void __iomem *csr;
+ int ret;
+
+ if (bank_number >= hw_data->num_banks)
+ return -EINVAL;
+
+ dev_dbg(&GET_DEV(accel_dev),
+ "ring pair reset for bank:%d\n", bank_number);
+
+ csr = (&GET_BARS(accel_dev)[etr_bar_id])->virt_addr;
+ ret = reset_ring_pair(csr, bank_number);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev),
+ "ring pair reset failed (timeout)\n");
+ else
+ dev_dbg(&GET_DEV(accel_dev), "ring pair reset successful\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset);
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
index b8fca1ff7aab..f0f71ca44ca3 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
@@ -4,6 +4,7 @@
#define ADF_GEN4_HW_CSR_DATA_H_
#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
/* Transport access */
#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL
@@ -94,6 +95,13 @@ do { \
ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_RING_SRV_ARB_EN, (value))
+/* Default ring mapping */
+#define ADF_GEN4_DEFAULT_RING_TO_SRV_MAP \
+ (ASYM << ADF_CFG_SERV_RING_PAIR_0_SHIFT | \
+ SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
+ ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
+ SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
+
/* WDT timers
*
* Timeout is in cycles. Clock speed may vary across products but this
@@ -106,6 +114,15 @@ do { \
#define ADF_SSMWDTPKEL_OFFSET 0x58
#define ADF_SSMWDTPKEH_OFFSET 0x60
+/* Ring reset */
+#define ADF_RPRESET_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
+#define ADF_RPRESET_POLL_DELAY_US 20
+#define ADF_WQM_CSR_RPRESETCTL_RESET BIT(0)
+#define ADF_WQM_CSR_RPRESETCTL(bank) (0x6000 + ((bank) << 3))
+#define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0)
+#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
+
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
new file mode 100644
index 000000000000..8efbedf63bc8
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2021 Intel Corporation */
+#include <linux/iopoll.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_pfvf.h"
+#include "adf_pfvf_pf_proto.h"
+#include "adf_pfvf_utils.h"
+
+#define ADF_4XXX_MAX_NUM_VFS 16
+
+#define ADF_4XXX_PF2VM_OFFSET(i) (0x40B010 + ((i) * 0x20))
+#define ADF_4XXX_VM2PF_OFFSET(i) (0x40B014 + ((i) * 0x20))
+
+/* VF2PF interrupt source registers */
+#define ADF_4XXX_VM2PF_SOU(i) (0x41A180 + ((i) * 4))
+#define ADF_4XXX_VM2PF_MSK(i) (0x41A1C0 + ((i) * 4))
+#define ADF_4XXX_VM2PF_INT_EN_MSK BIT(0)
+
+#define ADF_PFVF_GEN4_MSGTYPE_SHIFT 2
+#define ADF_PFVF_GEN4_MSGTYPE_MASK 0x3F
+#define ADF_PFVF_GEN4_MSGDATA_SHIFT 8
+#define ADF_PFVF_GEN4_MSGDATA_MASK 0xFFFFFF
+
+static const struct pfvf_csr_format csr_gen4_fmt = {
+ { ADF_PFVF_GEN4_MSGTYPE_SHIFT, ADF_PFVF_GEN4_MSGTYPE_MASK },
+ { ADF_PFVF_GEN4_MSGDATA_SHIFT, ADF_PFVF_GEN4_MSGDATA_MASK },
+};
+
+static u32 adf_gen4_pf_get_pf2vf_offset(u32 i)
+{
+ return ADF_4XXX_PF2VM_OFFSET(i);
+}
+
+static u32 adf_gen4_pf_get_vf2pf_offset(u32 i)
+{
+ return ADF_4XXX_VM2PF_OFFSET(i);
+}
+
+static u32 adf_gen4_get_vf2pf_sources(void __iomem *pmisc_addr)
+{
+ int i;
+ u32 sou, mask;
+ int num_csrs = ADF_4XXX_MAX_NUM_VFS;
+ u32 vf_mask = 0;
+
+ for (i = 0; i < num_csrs; i++) {
+ sou = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU(i));
+ mask = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK(i));
+ sou &= ~mask;
+ vf_mask |= sou << i;
+ }
+
+ return vf_mask;
+}
+
+static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr,
+ u32 vf_mask)
+{
+ int num_csrs = ADF_4XXX_MAX_NUM_VFS;
+ unsigned long mask = vf_mask;
+ unsigned int val;
+ int i;
+
+ for_each_set_bit(i, &mask, num_csrs) {
+ unsigned int offset = ADF_4XXX_VM2PF_MSK(i);
+
+ val = ADF_CSR_RD(pmisc_addr, offset) & ~ADF_4XXX_VM2PF_INT_EN_MSK;
+ ADF_CSR_WR(pmisc_addr, offset, val);
+ }
+}
+
+static void adf_gen4_disable_vf2pf_interrupts(void __iomem *pmisc_addr,
+ u32 vf_mask)
+{
+ int num_csrs = ADF_4XXX_MAX_NUM_VFS;
+ unsigned long mask = vf_mask;
+ unsigned int val;
+ int i;
+
+ for_each_set_bit(i, &mask, num_csrs) {
+ unsigned int offset = ADF_4XXX_VM2PF_MSK(i);
+
+ val = ADF_CSR_RD(pmisc_addr, offset) | ADF_4XXX_VM2PF_INT_EN_MSK;
+ ADF_CSR_WR(pmisc_addr, offset, val);
+ }
+}
+
+static int adf_gen4_pfvf_send(struct adf_accel_dev *accel_dev,
+ struct pfvf_message msg, u32 pfvf_offset,
+ struct mutex *csr_lock)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ u32 csr_val;
+ int ret;
+
+ csr_val = adf_pfvf_csr_msg_of(accel_dev, msg, &csr_gen4_fmt);
+ if (unlikely(!csr_val))
+ return -EINVAL;
+
+ mutex_lock(csr_lock);
+
+ ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val | ADF_PFVF_INT);
+
+ /* Wait for confirmation from remote that it received the message */
+ ret = read_poll_timeout(ADF_CSR_RD, csr_val, !(csr_val & ADF_PFVF_INT),
+ ADF_PFVF_MSG_ACK_DELAY_US,
+ ADF_PFVF_MSG_ACK_MAX_DELAY_US,
+ true, pmisc_addr, pfvf_offset);
+ if (ret < 0)
+ dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
+
+ mutex_unlock(csr_lock);
+ return ret;
+}
+
+static struct pfvf_message adf_gen4_pfvf_recv(struct adf_accel_dev *accel_dev,
+ u32 pfvf_offset, u8 compat_ver)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ u32 csr_val;
+
+ /* Read message from the CSR */
+ csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
+
+ /* We can now acknowledge the message reception by clearing the
+ * interrupt bit
+ */
+ ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val & ~ADF_PFVF_INT);
+
+ /* Return the pfvf_message format */
+ return adf_pfvf_message_of(accel_dev, csr_val, &csr_gen4_fmt);
+}
+
+void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+ pfvf_ops->enable_comms = adf_enable_pf2vf_comms;
+ pfvf_ops->get_pf2vf_offset = adf_gen4_pf_get_pf2vf_offset;
+ pfvf_ops->get_vf2pf_offset = adf_gen4_pf_get_vf2pf_offset;
+ pfvf_ops->get_vf2pf_sources = adf_gen4_get_vf2pf_sources;
+ pfvf_ops->enable_vf2pf_interrupts = adf_gen4_enable_vf2pf_interrupts;
+ pfvf_ops->disable_vf2pf_interrupts = adf_gen4_disable_vf2pf_interrupts;
+ pfvf_ops->send_msg = adf_gen4_pfvf_send;
+ pfvf_ops->recv_msg = adf_gen4_pfvf_recv;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_pf_pfvf_ops);
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.h b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.h
new file mode 100644
index 000000000000..17d1b774d4a8
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_GEN4_PFVF_H
+#define ADF_GEN4_PFVF_H
+
+#include "adf_accel_devices.h"
+
+#ifdef CONFIG_PCI_IOV
+void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
+#else
+static inline void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+ pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
+}
+#endif
+
+#endif /* ADF_GEN4_PFVF_H */
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index e3749e5817d9..2edc63c6b6ca 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -69,7 +69,8 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
return -EFAULT;
}
- if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) {
+ if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
+ !accel_dev->is_vf) {
dev_err(&GET_DEV(accel_dev), "Device not configured\n");
return -EFAULT;
}
@@ -117,10 +118,16 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
hw_data->enable_ints(accel_dev);
hw_data->enable_error_correction(accel_dev);
- ret = hw_data->enable_pfvf_comms(accel_dev);
+ ret = hw_data->pfvf_ops.enable_comms(accel_dev);
if (ret)
return ret;
+ if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
+ accel_dev->is_vf) {
+ if (qat_crypto_vf_dev_config(accel_dev))
+ return -EFAULT;
+ }
+
/*
* Subservice initialisation is divided into two stages: init and start.
* This is to facilitate any ordering dependencies between services
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
index 40593c9449a2..4ca482aa69f7 100644
--- a/drivers/crypto/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_isr.c
@@ -54,52 +54,83 @@ static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
return IRQ_HANDLED;
}
-static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
+#ifdef CONFIG_PCI_IOV
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
{
- struct adf_accel_dev *accel_dev = dev_ptr;
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ unsigned long flags;
-#ifdef CONFIG_PCI_IOV
- /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
- if (accel_dev->pf.vf_info) {
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct adf_bar *pmisc =
- &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
- void __iomem *pmisc_addr = pmisc->virt_addr;
- unsigned long vf_mask;
-
- /* Get the interrupt sources triggered by VFs */
- vf_mask = hw_data->get_vf2pf_sources(pmisc_addr);
-
- if (vf_mask) {
- struct adf_accel_vf_info *vf_info;
- bool irq_handled = false;
- int i;
-
- /* Disable VF2PF interrupts for VFs with pending ints */
- adf_disable_vf2pf_interrupts_irq(accel_dev, vf_mask);
-
- /*
- * Handle VF2PF interrupt unless the VF is malicious and
- * is attempting to flood the host OS with VF2PF interrupts.
- */
- for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
- vf_info = accel_dev->pf.vf_info + i;
-
- if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
- dev_info(&GET_DEV(accel_dev),
- "Too many ints from VF%d\n",
- vf_info->vf_nr + 1);
- continue;
- }
-
- adf_schedule_vf2pf_handler(vf_info);
- irq_handled = true;
+ spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
+ GET_PFVF_OPS(accel_dev)->enable_vf2pf_interrupts(pmisc_addr, vf_mask);
+ spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
+}
+
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
+ GET_PFVF_OPS(accel_dev)->disable_vf2pf_interrupts(pmisc_addr, vf_mask);
+ spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
+}
+
+static void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev,
+ u32 vf_mask)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+
+ spin_lock(&accel_dev->pf.vf2pf_ints_lock);
+ GET_PFVF_OPS(accel_dev)->disable_vf2pf_interrupts(pmisc_addr, vf_mask);
+ spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
+}
+
+static bool adf_handle_vf2pf_int(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ bool irq_handled = false;
+ unsigned long vf_mask;
+
+ /* Get the interrupt sources triggered by VFs */
+ vf_mask = GET_PFVF_OPS(accel_dev)->get_vf2pf_sources(pmisc_addr);
+
+ if (vf_mask) {
+ struct adf_accel_vf_info *vf_info;
+ int i;
+
+ /* Disable VF2PF interrupts for VFs with pending ints */
+ adf_disable_vf2pf_interrupts_irq(accel_dev, vf_mask);
+
+ /*
+ * Handle VF2PF interrupt unless the VF is malicious and
+ * is attempting to flood the host OS with VF2PF interrupts.
+ */
+ for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
+ vf_info = accel_dev->pf.vf_info + i;
+
+ if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
+ dev_info(&GET_DEV(accel_dev),
+ "Too many ints from VF%d\n",
+ vf_info->vf_nr);
+ continue;
}
- if (irq_handled)
- return IRQ_HANDLED;
+ adf_schedule_vf2pf_handler(vf_info);
+ irq_handled = true;
}
}
+ return irq_handled;
+}
+#endif /* CONFIG_PCI_IOV */
+
+static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
+{
+ struct adf_accel_dev *accel_dev = dev_ptr;
+
+#ifdef CONFIG_PCI_IOV
+ /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
+ if (accel_dev->pf.vf_info && adf_handle_vf2pf_int(accel_dev))
+ return IRQ_HANDLED;
#endif /* CONFIG_PCI_IOV */
dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
deleted file mode 100644
index 59860bdaedb6..000000000000
--- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
+++ /dev/null
@@ -1,416 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2020 Intel Corporation */
-#include <linux/delay.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_pf2vf_msg.h"
-
-#define ADF_PFVF_MSG_COLLISION_DETECT_DELAY 10
-#define ADF_PFVF_MSG_ACK_DELAY 2
-#define ADF_PFVF_MSG_ACK_MAX_RETRY 100
-#define ADF_PFVF_MSG_RETRY_DELAY 5
-#define ADF_PFVF_MSG_MAX_RETRIES 3
-#define ADF_PFVF_MSG_RESP_TIMEOUT (ADF_PFVF_MSG_ACK_DELAY * \
- ADF_PFVF_MSG_ACK_MAX_RETRY + \
- ADF_PFVF_MSG_COLLISION_DETECT_DELAY)
-
-void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
-{
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data);
- struct adf_bar *pmisc = &GET_BARS(accel_dev)[misc_bar_id];
- void __iomem *pmisc_addr = pmisc->virt_addr;
- unsigned long flags;
-
- spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
- hw_data->enable_vf2pf_interrupts(pmisc_addr, vf_mask);
- spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
-}
-
-void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
-{
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data);
- struct adf_bar *pmisc = &GET_BARS(accel_dev)[misc_bar_id];
- void __iomem *pmisc_addr = pmisc->virt_addr;
- unsigned long flags;
-
- spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
- hw_data->disable_vf2pf_interrupts(pmisc_addr, vf_mask);
- spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
-}
-
-void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev,
- u32 vf_mask)
-{
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data);
- struct adf_bar *pmisc = &GET_BARS(accel_dev)[misc_bar_id];
- void __iomem *pmisc_addr = pmisc->virt_addr;
-
- spin_lock(&accel_dev->pf.vf2pf_ints_lock);
- hw_data->disable_vf2pf_interrupts(pmisc_addr, vf_mask);
- spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
-}
-
-static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
-{
- struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- void __iomem *pmisc_bar_addr =
- pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
- u32 val, pf2vf_offset, count = 0;
- u32 local_in_use_mask, local_in_use_pattern;
- u32 remote_in_use_mask, remote_in_use_pattern;
- struct mutex *lock; /* lock preventing concurrent acces of CSR */
- u32 int_bit;
- int ret = 0;
-
- if (accel_dev->is_vf) {
- pf2vf_offset = hw_data->get_pf2vf_offset(0);
- lock = &accel_dev->vf.vf2pf_lock;
- local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
- local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
- remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
- remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
- int_bit = ADF_VF2PF_INT;
- } else {
- pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
- lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock;
- local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
- local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
- remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
- remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
- int_bit = ADF_PF2VF_INT;
- }
-
- mutex_lock(lock);
-
- /* Check if the PFVF CSR is in use by remote function */
- val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
- if ((val & remote_in_use_mask) == remote_in_use_pattern) {
- dev_dbg(&GET_DEV(accel_dev),
- "PFVF CSR in use by remote function\n");
- ret = -EBUSY;
- goto out;
- }
-
- msg &= ~local_in_use_mask;
- msg |= local_in_use_pattern;
-
- /* Attempt to get ownership of the PFVF CSR */
- ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
-
- /* Wait for confirmation from remote func it received the message */
- do {
- msleep(ADF_PFVF_MSG_ACK_DELAY);
- val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
- } while ((val & int_bit) && (count++ < ADF_PFVF_MSG_ACK_MAX_RETRY));
-
- if (val != msg) {
- dev_dbg(&GET_DEV(accel_dev),
- "Collision - PFVF CSR overwritten by remote function\n");
- ret = -EIO;
- goto out;
- }
-
- if (val & int_bit) {
- dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
- val &= ~int_bit;
- ret = -EIO;
- }
-
- /* Finished with the PFVF CSR; relinquish it and leave msg in CSR */
- ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
-out:
- mutex_unlock(lock);
- return ret;
-}
-
-/**
- * adf_iov_putmsg() - send PFVF message
- * @accel_dev: Pointer to acceleration device.
- * @msg: Message to send
- * @vf_nr: VF number to which the message will be sent if on PF, ignored
- * otherwise
- *
- * Function sends a message through the PFVF channel
- *
- * Return: 0 on success, error code otherwise.
- */
-static int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
-{
- u32 count = 0;
- int ret;
-
- do {
- ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
- if (ret)
- msleep(ADF_PFVF_MSG_RETRY_DELAY);
- } while (ret && (count++ < ADF_PFVF_MSG_MAX_RETRIES));
-
- return ret;
-}
-
-/**
- * adf_send_pf2vf_msg() - send PF to VF message
- * @accel_dev: Pointer to acceleration device
- * @vf_nr: VF number to which the message will be sent
- * @msg: Message to send
- *
- * This function allows the PF to send a message to a specific VF.
- *
- * Return: 0 on success, error code otherwise.
- */
-static int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, u32 msg)
-{
- return adf_iov_putmsg(accel_dev, msg, vf_nr);
-}
-
-/**
- * adf_send_vf2pf_msg() - send VF to PF message
- * @accel_dev: Pointer to acceleration device
- * @msg: Message to send
- *
- * This function allows the VF to send a message to the PF.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 msg)
-{
- return adf_iov_putmsg(accel_dev, msg, 0);
-}
-
-/**
- * adf_send_vf2pf_req() - send VF2PF request message
- * @accel_dev: Pointer to acceleration device.
- * @msg: Request message to send
- *
- * This function sends a message that requires a response from the VF to the PF
- * and waits for a reply.
- *
- * Return: 0 on success, error code otherwise.
- */
-static int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, u32 msg)
-{
- unsigned long timeout = msecs_to_jiffies(ADF_PFVF_MSG_RESP_TIMEOUT);
- int ret;
-
- reinit_completion(&accel_dev->vf.iov_msg_completion);
-
- /* Send request from VF to PF */
- ret = adf_send_vf2pf_msg(accel_dev, msg);
- if (ret) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to send request msg to PF\n");
- return ret;
- }
-
- /* Wait for response */
- if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
- timeout)) {
- dev_err(&GET_DEV(accel_dev),
- "PFVF request/response message timeout expired\n");
- return -EIO;
- }
-
- return 0;
-}
-
-void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
-{
- struct adf_accel_dev *accel_dev = vf_info->accel_dev;
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- int bar_id = hw_data->get_misc_bar_id(hw_data);
- struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
- void __iomem *pmisc_addr = pmisc->virt_addr;
- u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
-
- /* Read message from the VF */
- msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
- if (!(msg & ADF_VF2PF_INT)) {
- dev_info(&GET_DEV(accel_dev),
- "Spurious VF2PF interrupt, msg %X. Ignored\n", msg);
- goto out;
- }
-
- /* To ACK, clear the VF2PFINT bit */
- msg &= ~ADF_VF2PF_INT;
- ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
-
- if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM))
- /* Ignore legacy non-system (non-kernel) VF2PF messages */
- goto err;
-
- switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
- case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
- {
- u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
-
- resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
- (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
- ADF_PF2VF_MSGTYPE_SHIFT) |
- (ADF_PFVF_COMPAT_THIS_VERSION <<
- ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
-
- dev_dbg(&GET_DEV(accel_dev),
- "Compatibility Version Request from VF%d vers=%u\n",
- vf_nr + 1, vf_compat_ver);
-
- if (vf_compat_ver < hw_data->min_iov_compat_ver) {
- dev_err(&GET_DEV(accel_dev),
- "VF (vers %d) incompatible with PF (vers %d)\n",
- vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
- resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
- ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
- } else if (vf_compat_ver > ADF_PFVF_COMPAT_THIS_VERSION) {
- dev_err(&GET_DEV(accel_dev),
- "VF (vers %d) compat with PF (vers %d) unkn.\n",
- vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
- resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
- ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
- } else {
- dev_dbg(&GET_DEV(accel_dev),
- "VF (vers %d) compatible with PF (vers %d)\n",
- vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
- resp |= ADF_PF2VF_VF_COMPATIBLE <<
- ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
- }
- }
- break;
- case ADF_VF2PF_MSGTYPE_VERSION_REQ:
- dev_dbg(&GET_DEV(accel_dev),
- "Legacy VersionRequest received from VF%d 0x%x\n",
- vf_nr + 1, msg);
- resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
- (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
- ADF_PF2VF_MSGTYPE_SHIFT) |
- (ADF_PFVF_COMPAT_THIS_VERSION <<
- ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
- resp |= ADF_PF2VF_VF_COMPATIBLE <<
- ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
- /* Set legacy major and minor version num */
- resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
- 1 << ADF_PF2VF_MINORVERSION_SHIFT;
- break;
- case ADF_VF2PF_MSGTYPE_INIT:
- {
- dev_dbg(&GET_DEV(accel_dev),
- "Init message received from VF%d 0x%x\n",
- vf_nr + 1, msg);
- vf_info->init = true;
- }
- break;
- case ADF_VF2PF_MSGTYPE_SHUTDOWN:
- {
- dev_dbg(&GET_DEV(accel_dev),
- "Shutdown message received from VF%d 0x%x\n",
- vf_nr + 1, msg);
- vf_info->init = false;
- }
- break;
- default:
- goto err;
- }
-
- if (resp && adf_send_pf2vf_msg(accel_dev, vf_nr, resp))
- dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
-
-out:
- /* re-enable interrupt on PF from this VF */
- adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
-
- return;
-err:
- dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
- vf_nr + 1, msg);
-}
-
-void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
-{
- struct adf_accel_vf_info *vf;
- u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
- (ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
- int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
-
- for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
- if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg))
- dev_err(&GET_DEV(accel_dev),
- "Failed to send restarting msg to VF%d\n", i);
- }
-}
-
-static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
-{
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- u32 msg = 0;
- int ret;
-
- msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
- msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
- msg |= ADF_PFVF_COMPAT_THIS_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
- BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255);
-
- ret = adf_send_vf2pf_req(accel_dev, msg);
- if (ret) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to send Compatibility Version Request.\n");
- return ret;
- }
-
- /* Response from PF received, check compatibility */
- switch (accel_dev->vf.compatible) {
- case ADF_PF2VF_VF_COMPATIBLE:
- break;
- case ADF_PF2VF_VF_COMPAT_UNKNOWN:
- /* VF is newer than PF and decides whether it is compatible */
- if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver) {
- accel_dev->vf.compatible = ADF_PF2VF_VF_COMPATIBLE;
- break;
- }
- fallthrough;
- case ADF_PF2VF_VF_INCOMPATIBLE:
- dev_err(&GET_DEV(accel_dev),
- "PF (vers %d) and VF (vers %d) are not compatible\n",
- accel_dev->vf.pf_version,
- ADF_PFVF_COMPAT_THIS_VERSION);
- return -EINVAL;
- default:
- dev_err(&GET_DEV(accel_dev),
- "Invalid response from PF; assume not compatible\n");
- return -EINVAL;
- }
- return ret;
-}
-
-/**
- * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
- *
- * @accel_dev: Pointer to acceleration device virtual function.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
-{
- adf_enable_pf2vf_interrupts(accel_dev);
- return adf_vf2pf_request_version(accel_dev);
-}
-EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
-
-/**
- * adf_enable_pf2vf_comms() - Function enables communication from pf to vf
- *
- * @accel_dev: Pointer to acceleration device virtual function.
- *
- * This function carries out the necessary steps to setup and start the PFVF
- * communication channel, if any.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
-{
- spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(adf_enable_pf2vf_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
deleted file mode 100644
index a7d8f8367345..000000000000
--- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2015 - 2020 Intel Corporation */
-#ifndef ADF_PF2VF_MSG_H
-#define ADF_PF2VF_MSG_H
-
-/*
- * PF<->VF Messaging
- * The PF has an array of 32-bit PF2VF registers, one for each VF. The
- * PF can access all these registers; each VF can access only the one
- * register associated with that particular VF.
- *
- * The register functionally is split into two parts:
- * The bottom half is for PF->VF messages. In particular when the first
- * bit of this register (bit 0) gets set an interrupt will be triggered
- * in the respective VF.
- * The top half is for VF->PF messages. In particular when the first bit
- * of this half of register (bit 16) gets set an interrupt will be triggered
- * in the PF.
- *
- * The remaining bits within this register are available to encode messages.
- * and implement a collision control mechanism to prevent concurrent use of
- * the PF2VF register by both the PF and VF.
- *
- * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
- * _______________________________________________
- * | | | | | | | | | | | | | | | | |
- * +-----------------------------------------------+
- * \___________________________/ \_________/ ^ ^
- * ^ ^ | |
- * | | | VF2PF Int
- * | | Message Origin
- * | Message Type
- * Message-specific Data/Reserved
- *
- * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
- * _______________________________________________
- * | | | | | | | | | | | | | | | | |
- * +-----------------------------------------------+
- * \___________________________/ \_________/ ^ ^
- * ^ ^ | |
- * | | | PF2VF Int
- * | | Message Origin
- * | Message Type
- * Message-specific Data/Reserved
- *
- * Message Origin (Should always be 1)
- * A legacy out-of-tree QAT driver allowed for a set of messages not supported
- * by this driver; these had a Msg Origin of 0 and are ignored by this driver.
- *
- * When a PF or VF attempts to send a message in the lower or upper 16 bits,
- * respectively, the other 16 bits are written to first with a defined
- * IN_USE_BY pattern as part of a collision control scheme (see adf_iov_putmsg).
- */
-
-#define ADF_PFVF_COMPAT_THIS_VERSION 0x1 /* PF<->VF compat */
-
-/* PF->VF messages */
-#define ADF_PF2VF_INT BIT(0)
-#define ADF_PF2VF_MSGORIGIN_SYSTEM BIT(1)
-#define ADF_PF2VF_MSGTYPE_MASK 0x0000003C
-#define ADF_PF2VF_MSGTYPE_SHIFT 2
-#define ADF_PF2VF_MSGTYPE_RESTARTING 0x01
-#define ADF_PF2VF_MSGTYPE_VERSION_RESP 0x02
-#define ADF_PF2VF_IN_USE_BY_PF 0x6AC20000
-#define ADF_PF2VF_IN_USE_BY_PF_MASK 0xFFFE0000
-
-/* PF->VF Version Response */
-#define ADF_PF2VF_VERSION_RESP_VERS_MASK 0x00003FC0
-#define ADF_PF2VF_VERSION_RESP_VERS_SHIFT 6
-#define ADF_PF2VF_VERSION_RESP_RESULT_MASK 0x0000C000
-#define ADF_PF2VF_VERSION_RESP_RESULT_SHIFT 14
-#define ADF_PF2VF_MINORVERSION_SHIFT 6
-#define ADF_PF2VF_MAJORVERSION_SHIFT 10
-#define ADF_PF2VF_VF_COMPATIBLE 1
-#define ADF_PF2VF_VF_INCOMPATIBLE 2
-#define ADF_PF2VF_VF_COMPAT_UNKNOWN 3
-
-/* VF->PF messages */
-#define ADF_VF2PF_IN_USE_BY_VF 0x00006AC2
-#define ADF_VF2PF_IN_USE_BY_VF_MASK 0x0000FFFE
-#define ADF_VF2PF_INT BIT(16)
-#define ADF_VF2PF_MSGORIGIN_SYSTEM BIT(17)
-#define ADF_VF2PF_MSGTYPE_MASK 0x003C0000
-#define ADF_VF2PF_MSGTYPE_SHIFT 18
-#define ADF_VF2PF_MSGTYPE_INIT 0x3
-#define ADF_VF2PF_MSGTYPE_SHUTDOWN 0x4
-#define ADF_VF2PF_MSGTYPE_VERSION_REQ 0x5
-#define ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ 0x6
-
-/* VF->PF Compatible Version Request */
-#define ADF_VF2PF_COMPAT_VER_REQ_SHIFT 22
-
-#endif /* ADF_IOV_MSG_H */
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_msg.h b/drivers/crypto/qat/qat_common/adf_pfvf_msg.h
new file mode 100644
index 000000000000..9c37a2661392
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_msg.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#ifndef ADF_PFVF_MSG_H
+#define ADF_PFVF_MSG_H
+
+#include <linux/bits.h>
+
+/*
+ * PF<->VF Gen2 Messaging format
+ *
+ * The PF has an array of 32-bit PF2VF registers, one for each VF. The
+ * PF can access all these registers; each VF can access only the one
+ * register associated with that particular VF.
+ *
+ * The register functionally is split into two parts:
+ * The bottom half is for PF->VF messages. In particular when the first
+ * bit of this register (bit 0) gets set an interrupt will be triggered
+ * in the respective VF.
+ * The top half is for VF->PF messages. In particular when the first bit
+ * of this half of register (bit 16) gets set an interrupt will be triggered
+ * in the PF.
+ *
+ * The remaining bits within this register are available to encode messages.
+ * and implement a collision control mechanism to prevent concurrent use of
+ * the PF2VF register by both the PF and VF.
+ *
+ * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
+ * _______________________________________________
+ * | | | | | | | | | | | | | | | | |
+ * +-----------------------------------------------+
+ * \___________________________/ \_________/ ^ ^
+ * ^ ^ | |
+ * | | | VF2PF Int
+ * | | Message Origin
+ * | Message Type
+ * Message-specific Data/Reserved
+ *
+ * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+ * _______________________________________________
+ * | | | | | | | | | | | | | | | | |
+ * +-----------------------------------------------+
+ * \___________________________/ \_________/ ^ ^
+ * ^ ^ | |
+ * | | | PF2VF Int
+ * | | Message Origin
+ * | Message Type
+ * Message-specific Data/Reserved
+ *
+ * Message Origin (Should always be 1)
+ * A legacy out-of-tree QAT driver allowed for a set of messages not supported
+ * by this driver; these had a Msg Origin of 0 and are ignored by this driver.
+ *
+ * When a PF or VF attempts to send a message in the lower or upper 16 bits,
+ * respectively, the other 16 bits are written to first with a defined
+ * IN_USE_BY pattern as part of a collision control scheme (see function
+ * adf_gen2_pfvf_send() in adf_pf2vf_msg.c).
+ *
+ *
+ * PF<->VF Gen4 Messaging format
+ *
+ * Similarly to the gen2 messaging format, 32-bit long registers are used for
+ * communication between PF and VFs. However, each VF and PF share a pair of
+ * 32-bits register to avoid collisions: one for PV to VF messages and one
+ * for VF to PF messages.
+ *
+ * Both the Interrupt bit and the Message Origin bit retain the same position
+ * and meaning, although non-system messages are now deprecated and not
+ * expected.
+ *
+ * 31 30 9 8 7 6 5 4 3 2 1 0
+ * _______________________________________________
+ * | | | . . . | | | | | | | | | | |
+ * +-----------------------------------------------+
+ * \_____________________/ \_______________/ ^ ^
+ * ^ ^ | |
+ * | | | PF/VF Int
+ * | | Message Origin
+ * | Message Type
+ * Message-specific Data/Reserved
+ *
+ * For both formats, the message reception is acknowledged by lowering the
+ * interrupt bit on the register where the message was sent.
+ */
+
+/* PFVF message common bits */
+#define ADF_PFVF_INT BIT(0)
+#define ADF_PFVF_MSGORIGIN_SYSTEM BIT(1)
+
+/* Different generations have different CSR layouts, use this struct
+ * to abstract these differences away
+ */
+struct pfvf_message {
+ u8 type;
+ u32 data;
+};
+
+/* PF->VF messages */
+enum pf2vf_msgtype {
+ ADF_PF2VF_MSGTYPE_RESTARTING = 0x01,
+ ADF_PF2VF_MSGTYPE_VERSION_RESP = 0x02,
+ ADF_PF2VF_MSGTYPE_BLKMSG_RESP = 0x03,
+/* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */
+ ADF_PF2VF_MSGTYPE_RP_RESET_RESP = 0x10,
+};
+
+/* VF->PF messages */
+enum vf2pf_msgtype {
+ ADF_VF2PF_MSGTYPE_INIT = 0x03,
+ ADF_VF2PF_MSGTYPE_SHUTDOWN = 0x04,
+ ADF_VF2PF_MSGTYPE_VERSION_REQ = 0x05,
+ ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ = 0x06,
+ ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ = 0x07,
+ ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ = 0x08,
+ ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ = 0x09,
+/* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */
+ ADF_VF2PF_MSGTYPE_RP_RESET = 0x10,
+};
+
+/* VF/PF compatibility version. */
+enum pfvf_compatibility_version {
+ /* Support for extended capabilities */
+ ADF_PFVF_COMPAT_CAPABILITIES = 0x02,
+ /* In-use pattern cleared by receiver */
+ ADF_PFVF_COMPAT_FAST_ACK = 0x03,
+ /* Ring to service mapping support for non-standard mappings */
+ ADF_PFVF_COMPAT_RING_TO_SVC_MAP = 0x04,
+ /* Reference to the latest version */
+ ADF_PFVF_COMPAT_THIS_VERSION = 0x04,
+};
+
+/* PF->VF Version Response */
+#define ADF_PF2VF_VERSION_RESP_VERS_MASK GENMASK(7, 0)
+#define ADF_PF2VF_VERSION_RESP_RESULT_MASK GENMASK(9, 8)
+
+enum pf2vf_compat_response {
+ ADF_PF2VF_VF_COMPATIBLE = 0x01,
+ ADF_PF2VF_VF_INCOMPATIBLE = 0x02,
+ ADF_PF2VF_VF_COMPAT_UNKNOWN = 0x03,
+};
+
+enum ring_reset_result {
+ RPRESET_SUCCESS = 0x00,
+ RPRESET_NOT_SUPPORTED = 0x01,
+ RPRESET_INVAL_BANK = 0x02,
+ RPRESET_TIMEOUT = 0x03,
+};
+
+#define ADF_VF2PF_RNG_RESET_RP_MASK GENMASK(1, 0)
+#define ADF_VF2PF_RNG_RESET_RSVD_MASK GENMASK(25, 2)
+
+/* PF->VF Block Responses */
+#define ADF_PF2VF_BLKMSG_RESP_TYPE_MASK GENMASK(1, 0)
+#define ADF_PF2VF_BLKMSG_RESP_DATA_MASK GENMASK(9, 2)
+
+enum pf2vf_blkmsg_resp_type {
+ ADF_PF2VF_BLKMSG_RESP_TYPE_DATA = 0x00,
+ ADF_PF2VF_BLKMSG_RESP_TYPE_CRC = 0x01,
+ ADF_PF2VF_BLKMSG_RESP_TYPE_ERROR = 0x02,
+};
+
+/* PF->VF Block Error Code */
+enum pf2vf_blkmsg_error {
+ ADF_PF2VF_INVALID_BLOCK_TYPE = 0x00,
+ ADF_PF2VF_INVALID_BYTE_NUM_REQ = 0x01,
+ ADF_PF2VF_PAYLOAD_TRUNCATED = 0x02,
+ ADF_PF2VF_UNSPECIFIED_ERROR = 0x03,
+};
+
+/* VF->PF Block Requests */
+#define ADF_VF2PF_LARGE_BLOCK_TYPE_MASK GENMASK(1, 0)
+#define ADF_VF2PF_LARGE_BLOCK_BYTE_MASK GENMASK(8, 2)
+#define ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK GENMASK(2, 0)
+#define ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK GENMASK(8, 3)
+#define ADF_VF2PF_SMALL_BLOCK_TYPE_MASK GENMASK(3, 0)
+#define ADF_VF2PF_SMALL_BLOCK_BYTE_MASK GENMASK(8, 4)
+#define ADF_VF2PF_BLOCK_CRC_REQ_MASK BIT(9)
+
+/* PF->VF Block Request Types
+ * 0..15 - 32 byte message
+ * 16..23 - 64 byte message
+ * 24..27 - 128 byte message
+ */
+enum vf2pf_blkmsg_req_type {
+ ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY = 0x02,
+ ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP = 0x03,
+};
+
+#define ADF_VF2PF_SMALL_BLOCK_TYPE_MAX \
+ (FIELD_MAX(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK))
+
+#define ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX \
+ (FIELD_MAX(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK) + \
+ ADF_VF2PF_SMALL_BLOCK_TYPE_MAX + 1)
+
+#define ADF_VF2PF_LARGE_BLOCK_TYPE_MAX \
+ (FIELD_MAX(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK) + \
+ ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX)
+
+#define ADF_VF2PF_SMALL_BLOCK_BYTE_MAX \
+ FIELD_MAX(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK)
+
+#define ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX \
+ FIELD_MAX(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK)
+
+#define ADF_VF2PF_LARGE_BLOCK_BYTE_MAX \
+ FIELD_MAX(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK)
+
+struct pfvf_blkmsg_header {
+ u8 version;
+ u8 payload_size;
+} __packed;
+
+#define ADF_PFVF_BLKMSG_HEADER_SIZE (sizeof(struct pfvf_blkmsg_header))
+#define ADF_PFVF_BLKMSG_PAYLOAD_SIZE(blkmsg) (sizeof(blkmsg) - \
+ ADF_PFVF_BLKMSG_HEADER_SIZE)
+#define ADF_PFVF_BLKMSG_MSG_SIZE(blkmsg) (ADF_PFVF_BLKMSG_HEADER_SIZE + \
+ (blkmsg)->hdr.payload_size)
+#define ADF_PFVF_BLKMSG_MSG_MAX_SIZE 128
+
+/* PF->VF Block message header bytes */
+#define ADF_PFVF_BLKMSG_VER_BYTE 0
+#define ADF_PFVF_BLKMSG_LEN_BYTE 1
+
+/* PF/VF Capabilities message values */
+enum blkmsg_capabilities_versions {
+ ADF_PFVF_CAPABILITIES_V1_VERSION = 0x01,
+ ADF_PFVF_CAPABILITIES_V2_VERSION = 0x02,
+ ADF_PFVF_CAPABILITIES_V3_VERSION = 0x03,
+};
+
+struct capabilities_v1 {
+ struct pfvf_blkmsg_header hdr;
+ u32 ext_dc_caps;
+} __packed;
+
+struct capabilities_v2 {
+ struct pfvf_blkmsg_header hdr;
+ u32 ext_dc_caps;
+ u32 capabilities;
+} __packed;
+
+struct capabilities_v3 {
+ struct pfvf_blkmsg_header hdr;
+ u32 ext_dc_caps;
+ u32 capabilities;
+ u32 frequency;
+} __packed;
+
+/* PF/VF Ring to service mapping values */
+enum blkmsg_ring_to_svc_versions {
+ ADF_PFVF_RING_TO_SVC_VERSION = 0x01,
+};
+
+struct ring_to_svc_map_v1 {
+ struct pfvf_blkmsg_header hdr;
+ u16 map;
+} __packed;
+
+#endif /* ADF_PFVF_MSG_H */
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.c b/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.c
new file mode 100644
index 000000000000..14c069f0d71a
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/pci.h>
+#include "adf_accel_devices.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_pf_msg.h"
+#include "adf_pfvf_pf_proto.h"
+
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_vf_info *vf;
+ struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTING };
+ int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+
+ for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
+ if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send restarting msg to VF%d\n", i);
+ }
+}
+
+int adf_pf_capabilities_msg_provider(struct adf_accel_dev *accel_dev,
+ u8 *buffer, u8 compat)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct capabilities_v2 caps_msg;
+
+ caps_msg.ext_dc_caps = hw_data->extended_dc_capabilities;
+ caps_msg.capabilities = hw_data->accel_capabilities_mask;
+
+ caps_msg.hdr.version = ADF_PFVF_CAPABILITIES_V2_VERSION;
+ caps_msg.hdr.payload_size =
+ ADF_PFVF_BLKMSG_PAYLOAD_SIZE(struct capabilities_v2);
+
+ memcpy(buffer, &caps_msg, sizeof(caps_msg));
+
+ return 0;
+}
+
+int adf_pf_ring_to_svc_msg_provider(struct adf_accel_dev *accel_dev,
+ u8 *buffer, u8 compat)
+{
+ struct ring_to_svc_map_v1 rts_map_msg;
+
+ rts_map_msg.map = accel_dev->hw_device->ring_to_svc_map;
+ rts_map_msg.hdr.version = ADF_PFVF_RING_TO_SVC_VERSION;
+ rts_map_msg.hdr.payload_size = ADF_PFVF_BLKMSG_PAYLOAD_SIZE(rts_map_msg);
+
+ memcpy(buffer, &rts_map_msg, sizeof(rts_map_msg));
+
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.h b/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.h
new file mode 100644
index 000000000000..e8982d1ac896
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_PF_MSG_H
+#define ADF_PFVF_PF_MSG_H
+
+#include "adf_accel_devices.h"
+
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
+
+typedef int (*adf_pf2vf_blkmsg_provider)(struct adf_accel_dev *accel_dev,
+ u8 *buffer, u8 compat);
+
+int adf_pf_capabilities_msg_provider(struct adf_accel_dev *accel_dev,
+ u8 *buffer, u8 comapt);
+int adf_pf_ring_to_svc_msg_provider(struct adf_accel_dev *accel_dev,
+ u8 *buffer, u8 comapt);
+
+#endif /* ADF_PFVF_PF_MSG_H */
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c
new file mode 100644
index 000000000000..588352de1ef0
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_pf_msg.h"
+#include "adf_pfvf_pf_proto.h"
+#include "adf_pfvf_utils.h"
+
+typedef u8 (*pf2vf_blkmsg_data_getter_fn)(u8 const *blkmsg, u8 byte);
+
+static const adf_pf2vf_blkmsg_provider pf2vf_blkmsg_providers[] = {
+ NULL, /* no message type defined for value 0 */
+ NULL, /* no message type defined for value 1 */
+ adf_pf_capabilities_msg_provider, /* ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY */
+ adf_pf_ring_to_svc_msg_provider, /* ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP */
+};
+
+/**
+ * adf_send_pf2vf_msg() - send PF to VF message
+ * @accel_dev: Pointer to acceleration device
+ * @vf_nr: VF number to which the message will be sent
+ * @msg: Message to send
+ *
+ * This function allows the PF to send a message to a specific VF.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, struct pfvf_message msg)
+{
+ struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
+ u32 pfvf_offset = pfvf_ops->get_pf2vf_offset(vf_nr);
+
+ return pfvf_ops->send_msg(accel_dev, msg, pfvf_offset,
+ &accel_dev->pf.vf_info[vf_nr].pf2vf_lock);
+}
+
+/**
+ * adf_recv_vf2pf_msg() - receive a VF to PF message
+ * @accel_dev: Pointer to acceleration device
+ * @vf_nr: Number of the VF from where the message will be received
+ *
+ * This function allows the PF to receive a message from a specific VF.
+ *
+ * Return: a valid message on success, zero otherwise.
+ */
+static struct pfvf_message adf_recv_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
+ u32 pfvf_offset = pfvf_ops->get_vf2pf_offset(vf_nr);
+
+ return pfvf_ops->recv_msg(accel_dev, pfvf_offset, vf_info->vf_compat_ver);
+}
+
+static adf_pf2vf_blkmsg_provider get_blkmsg_response_provider(u8 type)
+{
+ if (type >= ARRAY_SIZE(pf2vf_blkmsg_providers))
+ return NULL;
+
+ return pf2vf_blkmsg_providers[type];
+}
+
+/* Byte pf2vf_blkmsg_data_getter_fn callback */
+static u8 adf_pf2vf_blkmsg_get_byte(u8 const *blkmsg, u8 index)
+{
+ return blkmsg[index];
+}
+
+/* CRC pf2vf_blkmsg_data_getter_fn callback */
+static u8 adf_pf2vf_blkmsg_get_crc(u8 const *blkmsg, u8 count)
+{
+ /* count is 0-based, turn it into a length */
+ return adf_pfvf_calc_blkmsg_crc(blkmsg, count + 1);
+}
+
+static int adf_pf2vf_blkmsg_get_data(struct adf_accel_vf_info *vf_info,
+ u8 type, u8 byte, u8 max_size, u8 *data,
+ pf2vf_blkmsg_data_getter_fn data_getter)
+{
+ u8 blkmsg[ADF_PFVF_BLKMSG_MSG_MAX_SIZE] = { 0 };
+ struct adf_accel_dev *accel_dev = vf_info->accel_dev;
+ adf_pf2vf_blkmsg_provider provider;
+ u8 msg_size;
+
+ provider = get_blkmsg_response_provider(type);
+
+ if (unlikely(!provider)) {
+ pr_err("QAT: No registered provider for message %d\n", type);
+ *data = ADF_PF2VF_INVALID_BLOCK_TYPE;
+ return -EINVAL;
+ }
+
+ if (unlikely((*provider)(accel_dev, blkmsg, vf_info->vf_compat_ver))) {
+ pr_err("QAT: unknown error from provider for message %d\n", type);
+ *data = ADF_PF2VF_UNSPECIFIED_ERROR;
+ return -EINVAL;
+ }
+
+ msg_size = ADF_PFVF_BLKMSG_HEADER_SIZE + blkmsg[ADF_PFVF_BLKMSG_LEN_BYTE];
+
+ if (unlikely(msg_size >= max_size)) {
+ pr_err("QAT: Invalid size %d provided for message type %d\n",
+ msg_size, type);
+ *data = ADF_PF2VF_PAYLOAD_TRUNCATED;
+ return -EINVAL;
+ }
+
+ if (unlikely(byte >= msg_size)) {
+ pr_err("QAT: Out-of-bound byte number %d (msg size %d)\n",
+ byte, msg_size);
+ *data = ADF_PF2VF_INVALID_BYTE_NUM_REQ;
+ return -EINVAL;
+ }
+
+ *data = data_getter(blkmsg, byte);
+ return 0;
+}
+
+static struct pfvf_message handle_blkmsg_req(struct adf_accel_vf_info *vf_info,
+ struct pfvf_message req)
+{
+ u8 resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_ERROR;
+ struct pfvf_message resp = { 0 };
+ u8 resp_data = 0;
+ u8 blk_type;
+ u8 blk_byte;
+ u8 byte_max;
+
+ switch (req.type) {
+ case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ:
+ blk_type = FIELD_GET(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK, req.data)
+ + ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX + 1;
+ blk_byte = FIELD_GET(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK, req.data);
+ byte_max = ADF_VF2PF_LARGE_BLOCK_BYTE_MAX;
+ break;
+ case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ:
+ blk_type = FIELD_GET(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK, req.data)
+ + ADF_VF2PF_SMALL_BLOCK_TYPE_MAX + 1;
+ blk_byte = FIELD_GET(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK, req.data);
+ byte_max = ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX;
+ break;
+ case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ:
+ blk_type = FIELD_GET(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK, req.data);
+ blk_byte = FIELD_GET(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK, req.data);
+ byte_max = ADF_VF2PF_SMALL_BLOCK_BYTE_MAX;
+ break;
+ }
+
+ /* Is this a request for CRC or data? */
+ if (FIELD_GET(ADF_VF2PF_BLOCK_CRC_REQ_MASK, req.data)) {
+ dev_dbg(&GET_DEV(vf_info->accel_dev),
+ "BlockMsg of type %d for CRC over %d bytes received from VF%d\n",
+ blk_type, blk_byte, vf_info->vf_nr);
+
+ if (!adf_pf2vf_blkmsg_get_data(vf_info, blk_type, blk_byte,
+ byte_max, &resp_data,
+ adf_pf2vf_blkmsg_get_crc))
+ resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_CRC;
+ } else {
+ dev_dbg(&GET_DEV(vf_info->accel_dev),
+ "BlockMsg of type %d for data byte %d received from VF%d\n",
+ blk_type, blk_byte, vf_info->vf_nr);
+
+ if (!adf_pf2vf_blkmsg_get_data(vf_info, blk_type, blk_byte,
+ byte_max, &resp_data,
+ adf_pf2vf_blkmsg_get_byte))
+ resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_DATA;
+ }
+
+ resp.type = ADF_PF2VF_MSGTYPE_BLKMSG_RESP;
+ resp.data = FIELD_PREP(ADF_PF2VF_BLKMSG_RESP_TYPE_MASK, resp_type) |
+ FIELD_PREP(ADF_PF2VF_BLKMSG_RESP_DATA_MASK, resp_data);
+
+ return resp;
+}
+
+static struct pfvf_message handle_rp_reset_req(struct adf_accel_dev *accel_dev, u8 vf_nr,
+ struct pfvf_message req)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct pfvf_message resp = {
+ .type = ADF_PF2VF_MSGTYPE_RP_RESET_RESP,
+ .data = RPRESET_SUCCESS
+ };
+ u32 bank_number;
+ u32 rsvd_field;
+
+ bank_number = FIELD_GET(ADF_VF2PF_RNG_RESET_RP_MASK, req.data);
+ rsvd_field = FIELD_GET(ADF_VF2PF_RNG_RESET_RSVD_MASK, req.data);
+
+ dev_dbg(&GET_DEV(accel_dev),
+ "Ring Pair Reset Message received from VF%d for bank 0x%x\n",
+ vf_nr, bank_number);
+
+ if (!hw_data->ring_pair_reset || rsvd_field) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Ring Pair Reset for VF%d is not supported\n", vf_nr);
+ resp.data = RPRESET_NOT_SUPPORTED;
+ goto out;
+ }
+
+ if (bank_number >= hw_data->num_banks_per_vf) {
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid bank number (0x%x) from VF%d for Ring Reset\n",
+ bank_number, vf_nr);
+ resp.data = RPRESET_INVAL_BANK;
+ goto out;
+ }
+
+ /* Convert the VF provided value to PF bank number */
+ bank_number = vf_nr * hw_data->num_banks_per_vf + bank_number;
+ if (hw_data->ring_pair_reset(accel_dev, bank_number)) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Ring pair reset for VF%d failure\n", vf_nr);
+ resp.data = RPRESET_TIMEOUT;
+ goto out;
+ }
+
+ dev_dbg(&GET_DEV(accel_dev),
+ "Ring pair reset for VF%d successfully\n", vf_nr);
+
+out:
+ return resp;
+}
+
+static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr,
+ struct pfvf_message msg, struct pfvf_message *resp)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+
+ switch (msg.type) {
+ case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
+ {
+ u8 vf_compat_ver = msg.data;
+ u8 compat;
+
+ dev_dbg(&GET_DEV(accel_dev),
+ "VersionRequest received from VF%d (vers %d) to PF (vers %d)\n",
+ vf_nr, vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
+
+ if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION)
+ compat = ADF_PF2VF_VF_COMPATIBLE;
+ else
+ compat = ADF_PF2VF_VF_COMPAT_UNKNOWN;
+
+ vf_info->vf_compat_ver = vf_compat_ver;
+
+ resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP;
+ resp->data = FIELD_PREP(ADF_PF2VF_VERSION_RESP_VERS_MASK,
+ ADF_PFVF_COMPAT_THIS_VERSION) |
+ FIELD_PREP(ADF_PF2VF_VERSION_RESP_RESULT_MASK, compat);
+ }
+ break;
+ case ADF_VF2PF_MSGTYPE_VERSION_REQ:
+ {
+ u8 compat;
+
+ dev_dbg(&GET_DEV(accel_dev),
+ "Legacy VersionRequest received from VF%d to PF (vers 1.1)\n",
+ vf_nr);
+
+ /* legacy driver, VF compat_ver is 0 */
+ vf_info->vf_compat_ver = 0;
+
+ /* PF always newer than legacy VF */
+ compat = ADF_PF2VF_VF_COMPATIBLE;
+
+ /* Set legacy major and minor version to the latest, 1.1 */
+ resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP;
+ resp->data = FIELD_PREP(ADF_PF2VF_VERSION_RESP_VERS_MASK, 0x11) |
+ FIELD_PREP(ADF_PF2VF_VERSION_RESP_RESULT_MASK, compat);
+ }
+ break;
+ case ADF_VF2PF_MSGTYPE_INIT:
+ {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Init message received from VF%d\n", vf_nr);
+ vf_info->init = true;
+ }
+ break;
+ case ADF_VF2PF_MSGTYPE_SHUTDOWN:
+ {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Shutdown message received from VF%d\n", vf_nr);
+ vf_info->init = false;
+ }
+ break;
+ case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ:
+ case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ:
+ case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ:
+ *resp = handle_blkmsg_req(vf_info, msg);
+ break;
+ case ADF_VF2PF_MSGTYPE_RP_RESET:
+ *resp = handle_rp_reset_req(accel_dev, vf_nr, msg);
+ break;
+ default:
+ dev_dbg(&GET_DEV(accel_dev),
+ "Unknown message from VF%d (type 0x%.4x, data: 0x%.4x)\n",
+ vf_nr, msg.type, msg.data);
+ return -ENOMSG;
+ }
+
+ return 0;
+}
+
+bool adf_recv_and_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct pfvf_message req;
+ struct pfvf_message resp = {0};
+
+ req = adf_recv_vf2pf_msg(accel_dev, vf_nr);
+ if (!req.type) /* Legacy or no message */
+ return true;
+
+ if (adf_handle_vf2pf_msg(accel_dev, vf_nr, req, &resp))
+ return false;
+
+ if (resp.type && adf_send_pf2vf_msg(accel_dev, vf_nr, resp))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send response to VF%d\n", vf_nr);
+
+ return true;
+}
+
+/**
+ * adf_enable_pf2vf_comms() - Function enables communication from pf to vf
+ *
+ * @accel_dev: Pointer to acceleration device virtual function.
+ *
+ * This function carries out the necessary steps to setup and start the PFVF
+ * communication channel, if any.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
+{
+ adf_pfvf_crc_init();
+ spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_enable_pf2vf_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.h b/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.h
new file mode 100644
index 000000000000..165d266d023d
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_PF_PROTO_H
+#define ADF_PFVF_PF_PROTO_H
+
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+
+int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, struct pfvf_message msg);
+
+int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_PFVF_PF_PROTO_H */
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_utils.c b/drivers/crypto/qat/qat_common/adf_pfvf_utils.c
new file mode 100644
index 000000000000..c5f6d77d4bb8
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_utils.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2021 Intel Corporation */
+#include <linux/crc8.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_utils.h"
+
+/* CRC Calculation */
+DECLARE_CRC8_TABLE(pfvf_crc8_table);
+#define ADF_PFVF_CRC8_POLYNOMIAL 0x97
+
+void adf_pfvf_crc_init(void)
+{
+ crc8_populate_msb(pfvf_crc8_table, ADF_PFVF_CRC8_POLYNOMIAL);
+}
+
+u8 adf_pfvf_calc_blkmsg_crc(u8 const *buf, u8 buf_len)
+{
+ return crc8(pfvf_crc8_table, buf, buf_len, CRC8_INIT_VALUE);
+}
+
+static bool set_value_on_csr_msg(struct adf_accel_dev *accel_dev, u32 *csr_msg,
+ u32 value, const struct pfvf_field_format *fmt)
+{
+ if (unlikely((value & fmt->mask) != value)) {
+ dev_err(&GET_DEV(accel_dev),
+ "PFVF message value 0x%X out of range, %u max allowed\n",
+ value, fmt->mask);
+ return false;
+ }
+
+ *csr_msg |= value << fmt->offset;
+
+ return true;
+}
+
+u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev,
+ struct pfvf_message msg,
+ const struct pfvf_csr_format *fmt)
+{
+ u32 csr_msg = 0;
+
+ if (!set_value_on_csr_msg(accel_dev, &csr_msg, msg.type, &fmt->type) ||
+ !set_value_on_csr_msg(accel_dev, &csr_msg, msg.data, &fmt->data))
+ return 0;
+
+ return csr_msg | ADF_PFVF_MSGORIGIN_SYSTEM;
+}
+
+struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev, u32 csr_msg,
+ const struct pfvf_csr_format *fmt)
+{
+ struct pfvf_message msg = { 0 };
+
+ msg.type = (csr_msg >> fmt->type.offset) & fmt->type.mask;
+ msg.data = (csr_msg >> fmt->data.offset) & fmt->data.mask;
+
+ if (unlikely(!msg.type))
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid PFVF msg with no type received\n");
+
+ return msg;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_utils.h b/drivers/crypto/qat/qat_common/adf_pfvf_utils.h
new file mode 100644
index 000000000000..2be048e2287b
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_utils.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_UTILS_H
+#define ADF_PFVF_UTILS_H
+
+#include <linux/types.h>
+#include "adf_pfvf_msg.h"
+
+/* How long to wait for far side to acknowledge receipt */
+#define ADF_PFVF_MSG_ACK_DELAY_US 4
+#define ADF_PFVF_MSG_ACK_MAX_DELAY_US (1 * USEC_PER_SEC)
+
+u8 adf_pfvf_calc_blkmsg_crc(u8 const *buf, u8 buf_len);
+void adf_pfvf_crc_init(void);
+
+struct pfvf_field_format {
+ u8 offset;
+ u32 mask;
+};
+
+struct pfvf_csr_format {
+ struct pfvf_field_format type;
+ struct pfvf_field_format data;
+};
+
+u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+ const struct pfvf_csr_format *fmt);
+struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev, u32 raw_msg,
+ const struct pfvf_csr_format *fmt);
+
+#endif /* ADF_PFVF_UTILS_H */
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c b/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c
new file mode 100644
index 000000000000..14b222691c9c
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/bitfield.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_vf_msg.h"
+#include "adf_pfvf_vf_proto.h"
+
+/**
+ * adf_vf2pf_notify_init() - send init msg to PF
+ * @accel_dev: Pointer to acceleration VF device.
+ *
+ * Function sends an init message from the VF to a PF
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
+{
+ struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_INIT };
+
+ if (adf_send_vf2pf_msg(accel_dev, msg)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Init event to PF\n");
+ return -EFAULT;
+ }
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init);
+
+/**
+ * adf_vf2pf_notify_shutdown() - send shutdown msg to PF
+ * @accel_dev: Pointer to acceleration VF device.
+ *
+ * Function sends a shutdown message from the VF to a PF
+ *
+ * Return: void
+ */
+void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
+{
+ struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_SHUTDOWN };
+
+ if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
+ if (adf_send_vf2pf_msg(accel_dev, msg))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Shutdown event to PF\n");
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);
+
+int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
+{
+ u8 pf_version;
+ int compat;
+ int ret;
+ struct pfvf_message resp;
+ struct pfvf_message msg = {
+ .type = ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ,
+ .data = ADF_PFVF_COMPAT_THIS_VERSION,
+ };
+
+ BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255);
+
+ ret = adf_send_vf2pf_req(accel_dev, msg, &resp);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Compatibility Version Request.\n");
+ return ret;
+ }
+
+ pf_version = FIELD_GET(ADF_PF2VF_VERSION_RESP_VERS_MASK, resp.data);
+ compat = FIELD_GET(ADF_PF2VF_VERSION_RESP_RESULT_MASK, resp.data);
+
+ /* Response from PF received, check compatibility */
+ switch (compat) {
+ case ADF_PF2VF_VF_COMPATIBLE:
+ break;
+ case ADF_PF2VF_VF_COMPAT_UNKNOWN:
+ /* VF is newer than PF - compatible for now */
+ break;
+ case ADF_PF2VF_VF_INCOMPATIBLE:
+ dev_err(&GET_DEV(accel_dev),
+ "PF (vers %d) and VF (vers %d) are not compatible\n",
+ pf_version, ADF_PFVF_COMPAT_THIS_VERSION);
+ return -EINVAL;
+ default:
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid response from PF; assume not compatible\n");
+ return -EINVAL;
+ }
+
+ accel_dev->vf.pf_compat_ver = pf_version;
+ return 0;
+}
+
+int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct capabilities_v3 cap_msg = { { 0 }, };
+ unsigned int len = sizeof(cap_msg);
+
+ if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_CAPABILITIES)
+ /* The PF is too old to support the extended capabilities */
+ return 0;
+
+ if (adf_send_vf2pf_blkmsg_req(accel_dev, ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY,
+ (u8 *)&cap_msg, &len)) {
+ dev_err(&GET_DEV(accel_dev),
+ "QAT: Failed to get block message response\n");
+ return -EFAULT;
+ }
+
+ switch (cap_msg.hdr.version) {
+ default:
+ /* Newer version received, handle only the know parts */
+ fallthrough;
+ case ADF_PFVF_CAPABILITIES_V3_VERSION:
+ if (likely(len >= sizeof(struct capabilities_v3)))
+ hw_data->clock_frequency = cap_msg.frequency;
+ else
+ dev_info(&GET_DEV(accel_dev), "Could not get frequency");
+ fallthrough;
+ case ADF_PFVF_CAPABILITIES_V2_VERSION:
+ if (likely(len >= sizeof(struct capabilities_v2)))
+ hw_data->accel_capabilities_mask = cap_msg.capabilities;
+ else
+ dev_info(&GET_DEV(accel_dev), "Could not get capabilities");
+ fallthrough;
+ case ADF_PFVF_CAPABILITIES_V1_VERSION:
+ if (likely(len >= sizeof(struct capabilities_v1))) {
+ hw_data->extended_dc_capabilities = cap_msg.ext_dc_caps;
+ } else {
+ dev_err(&GET_DEV(accel_dev),
+ "Capabilities message truncated to %d bytes\n", len);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev)
+{
+ struct ring_to_svc_map_v1 rts_map_msg = { { 0 }, };
+ unsigned int len = sizeof(rts_map_msg);
+
+ if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_RING_TO_SVC_MAP)
+ /* Use already set default mappings */
+ return 0;
+
+ if (adf_send_vf2pf_blkmsg_req(accel_dev, ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP,
+ (u8 *)&rts_map_msg, &len)) {
+ dev_err(&GET_DEV(accel_dev),
+ "QAT: Failed to get block message response\n");
+ return -EFAULT;
+ }
+
+ if (unlikely(len < sizeof(struct ring_to_svc_map_v1))) {
+ dev_err(&GET_DEV(accel_dev),
+ "RING_TO_SVC message truncated to %d bytes\n", len);
+ return -EFAULT;
+ }
+
+ /* Only v1 at present */
+ accel_dev->hw_device->ring_to_svc_map = rts_map_msg.map;
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.h b/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.h
new file mode 100644
index 000000000000..71bc0e3f1d93
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_VF_MSG_H
+#define ADF_PFVF_VF_MSG_H
+
+#if defined(CONFIG_PCI_IOV)
+int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
+int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev);
+int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev);
+int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev);
+#else
+static inline int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static inline void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
+{
+}
+#endif
+
+#endif /* ADF_PFVF_VF_MSG_H */
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.c b/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.c
new file mode 100644
index 000000000000..1015155b6374
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/minmax.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_utils.h"
+#include "adf_pfvf_vf_msg.h"
+#include "adf_pfvf_vf_proto.h"
+
+#define ADF_PFVF_MSG_COLLISION_DETECT_DELAY 10
+#define ADF_PFVF_MSG_ACK_DELAY 2
+#define ADF_PFVF_MSG_ACK_MAX_RETRY 100
+
+/* How often to retry if there is no response */
+#define ADF_PFVF_MSG_RESP_RETRIES 5
+#define ADF_PFVF_MSG_RESP_TIMEOUT (ADF_PFVF_MSG_ACK_DELAY * \
+ ADF_PFVF_MSG_ACK_MAX_RETRY + \
+ ADF_PFVF_MSG_COLLISION_DETECT_DELAY)
+
+/**
+ * adf_send_vf2pf_msg() - send VF to PF message
+ * @accel_dev: Pointer to acceleration device
+ * @msg: Message to send
+ *
+ * This function allows the VF to send a message to the PF.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg)
+{
+ struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
+ u32 pfvf_offset = pfvf_ops->get_vf2pf_offset(0);
+
+ return pfvf_ops->send_msg(accel_dev, msg, pfvf_offset,
+ &accel_dev->vf.vf2pf_lock);
+}
+
+/**
+ * adf_recv_pf2vf_msg() - receive a PF to VF message
+ * @accel_dev: Pointer to acceleration device
+ *
+ * This function allows the VF to receive a message from the PF.
+ *
+ * Return: a valid message on success, zero otherwise.
+ */
+static struct pfvf_message adf_recv_pf2vf_msg(struct adf_accel_dev *accel_dev)
+{
+ struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
+ u32 pfvf_offset = pfvf_ops->get_pf2vf_offset(0);
+
+ return pfvf_ops->recv_msg(accel_dev, pfvf_offset, accel_dev->vf.pf_compat_ver);
+}
+
+/**
+ * adf_send_vf2pf_req() - send VF2PF request message
+ * @accel_dev: Pointer to acceleration device.
+ * @msg: Request message to send
+ * @resp: Returned PF response
+ *
+ * This function sends a message that requires a response from the VF to the PF
+ * and waits for a reply.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+ struct pfvf_message *resp)
+{
+ unsigned long timeout = msecs_to_jiffies(ADF_PFVF_MSG_RESP_TIMEOUT);
+ unsigned int retries = ADF_PFVF_MSG_RESP_RETRIES;
+ int ret;
+
+ reinit_completion(&accel_dev->vf.msg_received);
+
+ /* Send request from VF to PF */
+ do {
+ ret = adf_send_vf2pf_msg(accel_dev, msg);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send request msg to PF\n");
+ return ret;
+ }
+
+ /* Wait for response, if it times out retry */
+ ret = wait_for_completion_timeout(&accel_dev->vf.msg_received,
+ timeout);
+ if (ret) {
+ if (likely(resp))
+ *resp = accel_dev->vf.response;
+
+ /* Once copied, set to an invalid value */
+ accel_dev->vf.response.type = 0;
+
+ return 0;
+ }
+
+ dev_err(&GET_DEV(accel_dev), "PFVF response message timeout\n");
+ } while (--retries);
+
+ return -EIO;
+}
+
+static int adf_vf2pf_blkmsg_data_req(struct adf_accel_dev *accel_dev, bool crc,
+ u8 *type, u8 *data)
+{
+ struct pfvf_message req = { 0 };
+ struct pfvf_message resp = { 0 };
+ u8 blk_type;
+ u8 blk_byte;
+ u8 msg_type;
+ u8 max_data;
+ int err;
+
+ /* Convert the block type to {small, medium, large} size category */
+ if (*type <= ADF_VF2PF_SMALL_BLOCK_TYPE_MAX) {
+ msg_type = ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ;
+ blk_type = FIELD_PREP(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK, *type);
+ blk_byte = FIELD_PREP(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK, *data);
+ max_data = ADF_VF2PF_SMALL_BLOCK_BYTE_MAX;
+ } else if (*type <= ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX) {
+ msg_type = ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ;
+ blk_type = FIELD_PREP(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK,
+ *type - ADF_VF2PF_SMALL_BLOCK_TYPE_MAX);
+ blk_byte = FIELD_PREP(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK, *data);
+ max_data = ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX;
+ } else if (*type <= ADF_VF2PF_LARGE_BLOCK_TYPE_MAX) {
+ msg_type = ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ;
+ blk_type = FIELD_PREP(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK,
+ *type - ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX);
+ blk_byte = FIELD_PREP(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK, *data);
+ max_data = ADF_VF2PF_LARGE_BLOCK_BYTE_MAX;
+ } else {
+ dev_err(&GET_DEV(accel_dev), "Invalid message type %u\n", *type);
+ return -EINVAL;
+ }
+
+ /* Sanity check */
+ if (*data > max_data) {
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid byte %s %u for message type %u\n",
+ crc ? "count" : "index", *data, *type);
+ return -EINVAL;
+ }
+
+ /* Build the block message */
+ req.type = msg_type;
+ req.data = blk_type | blk_byte | FIELD_PREP(ADF_VF2PF_BLOCK_CRC_REQ_MASK, crc);
+
+ err = adf_send_vf2pf_req(accel_dev, req, &resp);
+ if (err)
+ return err;
+
+ *type = FIELD_GET(ADF_PF2VF_BLKMSG_RESP_TYPE_MASK, resp.data);
+ *data = FIELD_GET(ADF_PF2VF_BLKMSG_RESP_DATA_MASK, resp.data);
+
+ return 0;
+}
+
+static int adf_vf2pf_blkmsg_get_byte(struct adf_accel_dev *accel_dev, u8 type,
+ u8 index, u8 *data)
+{
+ int ret;
+
+ ret = adf_vf2pf_blkmsg_data_req(accel_dev, false, &type, &index);
+ if (ret < 0)
+ return ret;
+
+ if (unlikely(type != ADF_PF2VF_BLKMSG_RESP_TYPE_DATA)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Unexpected BLKMSG response type %u, byte 0x%x\n",
+ type, index);
+ return -EFAULT;
+ }
+
+ *data = index;
+ return 0;
+}
+
+static int adf_vf2pf_blkmsg_get_crc(struct adf_accel_dev *accel_dev, u8 type,
+ u8 bytes, u8 *crc)
+{
+ int ret;
+
+ /* The count of bytes refers to a length, however shift it to a 0-based
+ * count to avoid overflows. Thus, a request for 0 bytes is technically
+ * valid.
+ */
+ --bytes;
+
+ ret = adf_vf2pf_blkmsg_data_req(accel_dev, true, &type, &bytes);
+ if (ret < 0)
+ return ret;
+
+ if (unlikely(type != ADF_PF2VF_BLKMSG_RESP_TYPE_CRC)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Unexpected CRC BLKMSG response type %u, crc 0x%x\n",
+ type, bytes);
+ return -EFAULT;
+ }
+
+ *crc = bytes;
+ return 0;
+}
+
+/**
+ * adf_send_vf2pf_blkmsg_req() - retrieve block message
+ * @accel_dev: Pointer to acceleration VF device.
+ * @type: The block message type, see adf_pfvf_msg.h for allowed values
+ * @buffer: input buffer where to place the received data
+ * @buffer_len: buffer length as input, the amount of written bytes on output
+ *
+ * Request a message of type 'type' over the block message transport.
+ * This function will send the required amount block message requests and
+ * return the overall content back to the caller through the provided buffer.
+ * The buffer should be large enough to contain the requested message type,
+ * otherwise the response will be truncated.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_vf2pf_blkmsg_req(struct adf_accel_dev *accel_dev, u8 type,
+ u8 *buffer, unsigned int *buffer_len)
+{
+ unsigned int index;
+ unsigned int msg_len;
+ int ret;
+ u8 remote_crc;
+ u8 local_crc;
+
+ if (unlikely(type > ADF_VF2PF_LARGE_BLOCK_TYPE_MAX)) {
+ dev_err(&GET_DEV(accel_dev), "Invalid block message type %d\n",
+ type);
+ return -EINVAL;
+ }
+
+ if (unlikely(*buffer_len < ADF_PFVF_BLKMSG_HEADER_SIZE)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Buffer size too small for a block message\n");
+ return -EINVAL;
+ }
+
+ ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type,
+ ADF_PFVF_BLKMSG_VER_BYTE,
+ &buffer[ADF_PFVF_BLKMSG_VER_BYTE]);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(!buffer[ADF_PFVF_BLKMSG_VER_BYTE])) {
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid version 0 received for block request %u", type);
+ return -EFAULT;
+ }
+
+ ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type,
+ ADF_PFVF_BLKMSG_LEN_BYTE,
+ &buffer[ADF_PFVF_BLKMSG_LEN_BYTE]);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(!buffer[ADF_PFVF_BLKMSG_LEN_BYTE])) {
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid size 0 received for block request %u", type);
+ return -EFAULT;
+ }
+
+ /* We need to pick the minimum since there is no way to request a
+ * specific version. As a consequence any scenario is possible:
+ * - PF has a newer (longer) version which doesn't fit in the buffer
+ * - VF expects a newer (longer) version, so we must not ask for
+ * bytes in excess
+ * - PF and VF share the same version, no problem
+ */
+ msg_len = ADF_PFVF_BLKMSG_HEADER_SIZE + buffer[ADF_PFVF_BLKMSG_LEN_BYTE];
+ msg_len = min(*buffer_len, msg_len);
+
+ /* Get the payload */
+ for (index = ADF_PFVF_BLKMSG_HEADER_SIZE; index < msg_len; index++) {
+ ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type, index,
+ &buffer[index]);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = adf_vf2pf_blkmsg_get_crc(accel_dev, type, msg_len, &remote_crc);
+ if (unlikely(ret))
+ return ret;
+
+ local_crc = adf_pfvf_calc_blkmsg_crc(buffer, msg_len);
+ if (unlikely(local_crc != remote_crc)) {
+ dev_err(&GET_DEV(accel_dev),
+ "CRC error on msg type %d. Local %02X, remote %02X\n",
+ type, local_crc, remote_crc);
+ return -EIO;
+ }
+
+ *buffer_len = msg_len;
+ return 0;
+}
+
+static bool adf_handle_pf2vf_msg(struct adf_accel_dev *accel_dev,
+ struct pfvf_message msg)
+{
+ switch (msg.type) {
+ case ADF_PF2VF_MSGTYPE_RESTARTING:
+ dev_dbg(&GET_DEV(accel_dev), "Restarting message received from PF\n");
+
+ adf_pf2vf_handle_pf_restarting(accel_dev);
+ return false;
+ case ADF_PF2VF_MSGTYPE_VERSION_RESP:
+ case ADF_PF2VF_MSGTYPE_BLKMSG_RESP:
+ case ADF_PF2VF_MSGTYPE_RP_RESET_RESP:
+ dev_dbg(&GET_DEV(accel_dev),
+ "Response Message received from PF (type 0x%.4x, data 0x%.4x)\n",
+ msg.type, msg.data);
+ accel_dev->vf.response = msg;
+ complete(&accel_dev->vf.msg_received);
+ return true;
+ default:
+ dev_err(&GET_DEV(accel_dev),
+ "Unknown message from PF (type 0x%.4x, data: 0x%.4x)\n",
+ msg.type, msg.data);
+ }
+
+ return false;
+}
+
+bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev)
+{
+ struct pfvf_message msg;
+
+ msg = adf_recv_pf2vf_msg(accel_dev);
+ if (msg.type) /* Invalid or no message */
+ return adf_handle_pf2vf_msg(accel_dev, msg);
+
+ /* No replies for PF->VF messages at present */
+
+ return true;
+}
+
+/**
+ * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
+ *
+ * @accel_dev: Pointer to acceleration device virtual function.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+ int ret;
+
+ adf_pfvf_crc_init();
+ adf_enable_pf2vf_interrupts(accel_dev);
+
+ ret = adf_vf2pf_request_version(accel_dev);
+ if (ret)
+ return ret;
+
+ ret = adf_vf2pf_get_capabilities(accel_dev);
+ if (ret)
+ return ret;
+
+ ret = adf_vf2pf_get_ring_to_svc(accel_dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.h b/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.h
new file mode 100644
index 000000000000..f6ee9b38c0e1
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_VF_PROTO_H
+#define ADF_PFVF_VF_PROTO_H
+
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+
+int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg);
+int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+ struct pfvf_message *resp);
+int adf_send_vf2pf_blkmsg_req(struct adf_accel_dev *accel_dev, u8 type,
+ u8 *buffer, unsigned int *buffer_len);
+
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_PFVF_VF_PROTO_H */
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
index 90ec057f9183..b960bca1f9d2 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -1,12 +1,15 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2020 Intel Corporation */
+/* Copyright(c) 2015 - 2021 Intel Corporation */
#include <linux/workqueue.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/iommu.h>
#include "adf_common_drv.h"
#include "adf_cfg.h"
-#include "adf_pf2vf_msg.h"
+#include "adf_pfvf_pf_msg.h"
+
+#define ADF_VF2PF_RATELIMIT_INTERVAL 8
+#define ADF_VF2PF_RATELIMIT_BURST 130
static struct workqueue_struct *pf2vf_resp_wq;
@@ -19,8 +22,16 @@ static void adf_iov_send_resp(struct work_struct *work)
{
struct adf_pf2vf_resp *pf2vf_resp =
container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
+ struct adf_accel_vf_info *vf_info = pf2vf_resp->vf_info;
+ struct adf_accel_dev *accel_dev = vf_info->accel_dev;
+ u32 vf_nr = vf_info->vf_nr;
+ bool ret;
+
+ ret = adf_recv_and_handle_vf2pf_msg(accel_dev, vf_nr);
+ if (ret)
+ /* re-enable interrupt on PF from this VF */
+ adf_enable_vf2pf_interrupts(accel_dev, 1 << vf_nr);
- adf_vf2pf_req_hndl(pf2vf_resp->vf_info);
kfree(pf2vf_resp);
}
@@ -50,11 +61,12 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
/* This ptr will be populated when VFs will be created */
vf_info->accel_dev = accel_dev;
vf_info->vf_nr = i;
+ vf_info->vf_compat_ver = 0;
mutex_init(&vf_info->pf2vf_lock);
ratelimit_state_init(&vf_info->vf2pf_ratelimit,
- DEFAULT_RATELIMIT_INTERVAL,
- DEFAULT_RATELIMIT_BURST);
+ ADF_VF2PF_RATELIMIT_INTERVAL,
+ ADF_VF2PF_RATELIMIT_BURST);
}
/* Set Valid bits in AE Thread to PCIe Function Mapping */
@@ -62,7 +74,7 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
hw_data->configure_iov_threads(accel_dev, true);
/* Enable VF to PF interrupts for all VFs */
- if (hw_data->get_pf2vf_offset)
+ if (hw_data->pfvf_ops.get_pf2vf_offset)
adf_enable_vf2pf_interrupts(accel_dev, BIT_ULL(totalvfs) - 1);
/*
@@ -92,13 +104,13 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
if (!accel_dev->pf.vf_info)
return;
- if (hw_data->get_pf2vf_offset)
+ if (hw_data->pfvf_ops.get_pf2vf_offset)
adf_pf2vf_notify_restarting(accel_dev);
pci_disable_sriov(accel_to_pci_dev(accel_dev));
/* Disable VF to PF interrupts */
- if (hw_data->get_pf2vf_offset)
+ if (hw_data->pfvf_ops.get_pf2vf_offset)
adf_disable_vf2pf_interrupts(accel_dev, GENMASK(31, 0));
/* Clear Valid bits in AE Thread to PCIe Function Mapping */
@@ -114,6 +126,32 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
}
EXPORT_SYMBOL_GPL(adf_disable_sriov);
+static int adf_sriov_prepare_restart(struct adf_accel_dev *accel_dev)
+{
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+ int ret;
+
+ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, services);
+
+ adf_dev_stop(accel_dev);
+ adf_dev_shutdown(accel_dev);
+
+ if (!ret) {
+ ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
+ if (ret)
+ return ret;
+
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED,
+ services, ADF_STR);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* adf_sriov_configure() - Enable SRIOV for the device
* @pdev: Pointer to PCI device.
@@ -153,8 +191,9 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
return -EBUSY;
}
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
+ ret = adf_sriov_prepare_restart(accel_dev);
+ if (ret)
+ return ret;
}
if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
deleted file mode 100644
index 8d11bb24cea0..000000000000
--- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
+++ /dev/null
@@ -1,48 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2020 Intel Corporation */
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_pf2vf_msg.h"
-
-/**
- * adf_vf2pf_notify_init() - send init msg to PF
- * @accel_dev: Pointer to acceleration VF device.
- *
- * Function sends an init message from the VF to a PF
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
-{
- u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
- (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
-
- if (adf_send_vf2pf_msg(accel_dev, msg)) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to send Init event to PF\n");
- return -EFAULT;
- }
- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
- return 0;
-}
-EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init);
-
-/**
- * adf_vf2pf_notify_shutdown() - send shutdown msg to PF
- * @accel_dev: Pointer to acceleration VF device.
- *
- * Function sends a shutdown message from the VF to a PF
- *
- * Return: void
- */
-void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
-{
- u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
- (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
-
- if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
- if (adf_send_vf2pf_msg(accel_dev, msg))
- dev_err(&GET_DEV(accel_dev),
- "Failed to send Shutdown event to PF\n");
-}
-EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
index db5e7abbe5f3..86c3bd0c9c2b 100644
--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
@@ -15,7 +15,6 @@
#include "adf_cfg_common.h"
#include "adf_transport_access_macros.h"
#include "adf_transport_internal.h"
-#include "adf_pf2vf_msg.h"
#define ADF_VINTSOU_OFFSET 0x204
#define ADF_VINTMSK_OFFSET 0x208
@@ -31,22 +30,16 @@ struct adf_vf_stop_data {
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{
- struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- void __iomem *pmisc_bar_addr =
- pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
- ADF_CSR_WR(pmisc_bar_addr, ADF_VINTMSK_OFFSET, 0x0);
+ ADF_CSR_WR(pmisc_addr, ADF_VINTMSK_OFFSET, 0x0);
}
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{
- struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- void __iomem *pmisc_bar_addr =
- pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
- ADF_CSR_WR(pmisc_bar_addr, ADF_VINTMSK_OFFSET, 0x2);
+ ADF_CSR_WR(pmisc_addr, ADF_VINTMSK_OFFSET, 0x2);
}
EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
@@ -85,78 +78,37 @@ static void adf_dev_stop_async(struct work_struct *work)
kfree(stop_data);
}
-static void adf_pf2vf_bh_handler(void *data)
+int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev)
{
- struct adf_accel_dev *accel_dev = data;
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct adf_bar *pmisc =
- &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
- void __iomem *pmisc_bar_addr = pmisc->virt_addr;
- u32 msg;
-
- /* Read the message from PF */
- msg = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_pf2vf_offset(0));
- if (!(msg & ADF_PF2VF_INT)) {
- dev_info(&GET_DEV(accel_dev),
- "Spurious PF2VF interrupt, msg %X. Ignored\n", msg);
- goto out;
- }
+ struct adf_vf_stop_data *stop_data;
- if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM))
- /* Ignore legacy non-system (non-kernel) PF2VF messages */
- goto err;
-
- switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) {
- case ADF_PF2VF_MSGTYPE_RESTARTING: {
- struct adf_vf_stop_data *stop_data;
-
- dev_dbg(&GET_DEV(accel_dev),
- "Restarting msg received from PF 0x%x\n", msg);
-
- clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-
- stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
- if (!stop_data) {
- dev_err(&GET_DEV(accel_dev),
- "Couldn't schedule stop for vf_%d\n",
- accel_dev->accel_id);
- return;
- }
- stop_data->accel_dev = accel_dev;
- INIT_WORK(&stop_data->work, adf_dev_stop_async);
- queue_work(adf_vf_stop_wq, &stop_data->work);
- /* To ack, clear the PF2VFINT bit */
- msg &= ~ADF_PF2VF_INT;
- ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
- return;
- }
- case ADF_PF2VF_MSGTYPE_VERSION_RESP:
- dev_dbg(&GET_DEV(accel_dev),
- "Version resp received from PF 0x%x\n", msg);
- accel_dev->vf.pf_version =
- (msg & ADF_PF2VF_VERSION_RESP_VERS_MASK) >>
- ADF_PF2VF_VERSION_RESP_VERS_SHIFT;
- accel_dev->vf.compatible =
- (msg & ADF_PF2VF_VERSION_RESP_RESULT_MASK) >>
- ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
- complete(&accel_dev->vf.iov_msg_completion);
- break;
- default:
- goto err;
+ clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+ stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
+ if (!stop_data) {
+ dev_err(&GET_DEV(accel_dev),
+ "Couldn't schedule stop for vf_%d\n",
+ accel_dev->accel_id);
+ return -ENOMEM;
}
+ stop_data->accel_dev = accel_dev;
+ INIT_WORK(&stop_data->work, adf_dev_stop_async);
+ queue_work(adf_vf_stop_wq, &stop_data->work);
- /* To ack, clear the PF2VFINT bit */
- msg &= ~ADF_PF2VF_INT;
- ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
+ return 0;
+}
+
+static void adf_pf2vf_bh_handler(void *data)
+{
+ struct adf_accel_dev *accel_dev = data;
+ bool ret;
+
+ ret = adf_recv_and_handle_pf2vf_msg(accel_dev);
+ if (ret)
+ /* Re-enable PF2VF interrupts */
+ adf_enable_pf2vf_interrupts(accel_dev);
-out:
- /* Re-enable PF2VF interrupts */
- adf_enable_pf2vf_interrupts(accel_dev);
return;
-err:
- dev_err(&GET_DEV(accel_dev),
- "Unknown message from PF (0x%x); leaving PF2VF ints disabled\n",
- msg);
+
}
static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev)
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
index f05ad17fbdd6..afe59a7684ac 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
@@ -14,7 +14,8 @@ enum icp_qat_fw_init_admin_cmd_id {
ICP_QAT_FW_COUNTERS_GET = 5,
ICP_QAT_FW_LOOPBACK = 6,
ICP_QAT_FW_HEARTBEAT_SYNC = 7,
- ICP_QAT_FW_HEARTBEAT_GET = 8
+ ICP_QAT_FW_HEARTBEAT_GET = 8,
+ ICP_QAT_FW_COMP_CAPABILITY_GET = 9,
};
enum icp_qat_fw_init_admin_resp_status {
@@ -52,6 +53,7 @@ struct icp_qat_fw_init_admin_resp {
__u16 version_minor_num;
__u16 version_major_num;
};
+ __u32 extended_features;
};
__u64 opaque_data;
union {
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h
index e39e8a2d51a7..433304cad2ed 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_hw.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h
@@ -91,7 +91,18 @@ enum icp_qat_capabilities_mask {
ICP_ACCEL_CAPABILITIES_RAND = BIT(7),
ICP_ACCEL_CAPABILITIES_ZUC = BIT(8),
ICP_ACCEL_CAPABILITIES_SHA3 = BIT(9),
- /* Bits 10-25 are currently reserved */
+ /* Bits 10-11 are currently reserved */
+ ICP_ACCEL_CAPABILITIES_HKDF = BIT(12),
+ ICP_ACCEL_CAPABILITIES_ECEDMONT = BIT(13),
+ /* Bit 14 is currently reserved */
+ ICP_ACCEL_CAPABILITIES_SHA3_EXT = BIT(15),
+ ICP_ACCEL_CAPABILITIES_AESGCM_SPC = BIT(16),
+ ICP_ACCEL_CAPABILITIES_CHACHA_POLY = BIT(17),
+ /* Bits 18-21 are currently reserved */
+ ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY = BIT(22),
+ ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 = BIT(23),
+ ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION = BIT(24),
+ ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION = BIT(25),
ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26)
};
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index ece6776fbd53..7234c4940fae 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -8,6 +8,7 @@
#include "adf_transport_access_macros.h"
#include "adf_cfg.h"
#include "adf_cfg_strings.h"
+#include "adf_gen2_hw_data.h"
#include "qat_crypto.h"
#include "icp_qat_fw.h"
@@ -105,6 +106,30 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
}
/**
+ * qat_crypto_vf_dev_config()
+ * create dev config required to create crypto inst.
+ *
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function creates device configuration required to create
+ * asym, sym or, crypto instances
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int qat_crypto_vf_dev_config(struct adf_accel_dev *accel_dev)
+{
+ u16 ring_to_svc_map = GET_HW_DATA(accel_dev)->ring_to_svc_map;
+
+ if (ring_to_svc_map != ADF_GEN2_DEFAULT_RING_TO_SRV_MAP) {
+ dev_err(&GET_DEV(accel_dev),
+ "Unsupported ring/service mapping present on PF");
+ return -EFAULT;
+ }
+
+ return qat_crypto_dev_config(accel_dev);
+}
+
+/**
* qat_crypto_dev_config() - create dev config required to create crypto inst.
*
* @accel_dev: Pointer to acceleration device.
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index 12ca6b8764aa..4bfd8f3566f7 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -684,8 +684,7 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
{
struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct adf_bar *misc_bar =
- &pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)];
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
unsigned int max_en_ae_id = 0;
struct adf_bar *sram_bar;
unsigned int csr_val = 0;
@@ -715,18 +714,12 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
handle->chip_info->fcu_loaded_ae_csr = FCU_AE_LOADED_4XXX;
handle->chip_info->fcu_loaded_ae_pos = 0;
- handle->hal_cap_g_ctl_csr_addr_v =
- (void __iomem *)((uintptr_t)misc_bar->virt_addr +
- ICP_QAT_CAP_OFFSET_4XXX);
- handle->hal_cap_ae_xfer_csr_addr_v =
- (void __iomem *)((uintptr_t)misc_bar->virt_addr +
- ICP_QAT_AE_OFFSET_4XXX);
- handle->hal_ep_csr_addr_v =
- (void __iomem *)((uintptr_t)misc_bar->virt_addr +
- ICP_QAT_EP_OFFSET_4XXX);
+ handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET_4XXX;
+ handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET_4XXX;
+ handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET_4XXX;
handle->hal_cap_ae_local_csr_addr_v =
(void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
- + LOCAL_TO_XFER_REG_OFFSET);
+ + LOCAL_TO_XFER_REG_OFFSET);
break;
case PCI_DEVICE_ID_INTEL_QAT_C62X:
case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
@@ -749,15 +742,9 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
handle->chip_info->fcu_dram_addr_lo = FCU_DRAM_ADDR_LO;
handle->chip_info->fcu_loaded_ae_csr = FCU_STATUS;
handle->chip_info->fcu_loaded_ae_pos = FCU_LOADED_AE_POS;
- handle->hal_cap_g_ctl_csr_addr_v =
- (void __iomem *)((uintptr_t)misc_bar->virt_addr +
- ICP_QAT_CAP_OFFSET);
- handle->hal_cap_ae_xfer_csr_addr_v =
- (void __iomem *)((uintptr_t)misc_bar->virt_addr +
- ICP_QAT_AE_OFFSET);
- handle->hal_ep_csr_addr_v =
- (void __iomem *)((uintptr_t)misc_bar->virt_addr +
- ICP_QAT_EP_OFFSET);
+ handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET;
+ handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET;
+ handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET;
handle->hal_cap_ae_local_csr_addr_v =
(void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
+ LOCAL_TO_XFER_REG_OFFSET);
@@ -782,15 +769,9 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
handle->chip_info->fcu_dram_addr_lo = 0;
handle->chip_info->fcu_loaded_ae_csr = 0;
handle->chip_info->fcu_loaded_ae_pos = 0;
- handle->hal_cap_g_ctl_csr_addr_v =
- (void __iomem *)((uintptr_t)misc_bar->virt_addr +
- ICP_QAT_CAP_OFFSET);
- handle->hal_cap_ae_xfer_csr_addr_v =
- (void __iomem *)((uintptr_t)misc_bar->virt_addr +
- ICP_QAT_AE_OFFSET);
- handle->hal_ep_csr_addr_v =
- (void __iomem *)((uintptr_t)misc_bar->virt_addr +
- ICP_QAT_EP_OFFSET);
+ handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET;
+ handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET;
+ handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET;
handle->hal_cap_ae_local_csr_addr_v =
(void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
+ LOCAL_TO_XFER_REG_OFFSET);
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index 8e2e1554dcf6..09599fe4d2f3 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
+/* Copyright(c) 2014 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
-#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h>
#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
#include "adf_dh895xcc_hw_data.h"
#include "icp_qat_hw.h"
@@ -69,6 +69,8 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE)
capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
return capabilities;
}
@@ -114,14 +116,19 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
static u32 get_vf2pf_sources(void __iomem *pmisc_bar)
{
- u32 errsou5, errmsk5, vf_int_mask;
+ u32 errsou3, errmsk3, errsou5, errmsk5, vf_int_mask;
- vf_int_mask = adf_gen2_get_vf2pf_sources(pmisc_bar);
+ /* Get the interrupt sources triggered by VFs */
+ errsou3 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRSOU3);
+ vf_int_mask = ADF_DH895XCC_ERR_REG_VF2PF_L(errsou3);
- /* Get the interrupt sources triggered by VFs, but to avoid duplicates
- * in the work queue, clear vf_int_mask_sets bits that are already
- * masked in ERRMSK register.
+ /* To avoid adding duplicate entries to work queue, clear
+ * vf_int_mask_sets bits that are already masked in ERRMSK register.
*/
+ errmsk3 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRMSK3);
+ vf_int_mask &= ~ADF_DH895XCC_ERR_REG_VF2PF_L(errmsk3);
+
+ /* Do the same for ERRSOU5 */
errsou5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRSOU5);
errmsk5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRMSK5);
vf_int_mask |= ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5);
@@ -133,7 +140,11 @@ static u32 get_vf2pf_sources(void __iomem *pmisc_bar)
static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
{
/* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
- adf_gen2_enable_vf2pf_interrupts(pmisc_addr, vf_mask);
+ if (vf_mask & 0xFFFF) {
+ u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+ & ~ADF_DH895XCC_ERR_MSK_VF2PF_L(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+ }
/* Enable VF2PF Messaging Ints - VFs 16 through 31 per vf_mask[31:16] */
if (vf_mask >> 16) {
@@ -147,7 +158,11 @@ static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
static void disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
{
/* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
- adf_gen2_disable_vf2pf_interrupts(pmisc_addr, vf_mask);
+ if (vf_mask & 0xFFFF) {
+ u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+ | ADF_DH895XCC_ERR_MSK_VF2PF_L(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+ }
/* Disable VF2PF interrupts for VFs 16 through 31 per vf_mask[31:16] */
if (vf_mask >> 16) {
@@ -176,6 +191,7 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
hw_data->enable_error_correction = adf_gen2_enable_error_correction;
@@ -201,14 +217,12 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints;
hw_data->reset_device = adf_reset_sbr;
- hw_data->get_pf2vf_offset = adf_gen2_get_pf2vf_offset;
- hw_data->get_vf2pf_sources = get_vf2pf_sources;
- hw_data->enable_vf2pf_interrupts = enable_vf2pf_interrupts;
- hw_data->disable_vf2pf_interrupts = disable_vf2pf_interrupts;
- hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
hw_data->disable_iov = adf_disable_sriov;
- hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
+ adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+ hw_data->pfvf_ops.get_vf2pf_sources = get_vf2pf_sources;
+ hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts;
+ hw_data->pfvf_ops.disable_vf2pf_interrupts = disable_vf2pf_interrupts;
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
index 0af34dd8708a..aa17272a1507 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -25,6 +25,8 @@
#define ADF_DH895XCC_SMIA1_MASK 0x1
/* Masks for VF2PF interrupts */
+#define ADF_DH895XCC_ERR_REG_VF2PF_L(vf_src) (((vf_src) & 0x01FFFE00) >> 9)
+#define ADF_DH895XCC_ERR_MSK_VF2PF_L(vf_mask) (((vf_mask) & 0xFFFF) << 9)
#define ADF_DH895XCC_ERR_REG_VF2PF_U(vf_src) (((vf_src) & 0x0000FFFF) << 16)
#define ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask) ((vf_mask) >> 16)
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
index 7c6ed6bc8abf..31c14d7e1c11 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -1,9 +1,10 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2020 Intel Corporation */
+/* Copyright(c) 2015 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
-#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h>
#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include <adf_pfvf_vf_msg.h>
#include "adf_dh895xccvf_hw_data.h"
static struct adf_hw_device_class dh895xcciov_class = {
@@ -47,11 +48,6 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_VF;
}
-static u32 get_pf2vf_offset(u32 i)
-{
- return ADF_DH895XCCIOV_PF2VF_OFFSET;
-}
-
static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
{
return 0;
@@ -71,6 +67,7 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
hw_data->num_engines = ADF_DH895XCCIOV_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_DH895XCCIOV_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_DH895XCCIOV_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
hw_data->alloc_irq = adf_vf_isr_resource_alloc;
hw_data->free_irq = adf_vf_isr_resource_free;
hw_data->enable_error_correction = adf_vf_void_noop;
@@ -86,13 +83,11 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
hw_data->get_num_aes = get_num_aes;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
- hw_data->get_pf2vf_offset = get_pf2vf_offset;
hw_data->get_sku = get_sku;
hw_data->enable_ints = adf_vf_void_noop;
- hw_data->enable_pfvf_comms = adf_enable_vf2pf_comms;
- hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
hw_data->dev_class->instances++;
adf_devmgr_update_class_index(hw_data);
+ adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
index 306ebb71a408..6973fa967bc8 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
@@ -12,7 +12,6 @@
#define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF
#define ADF_DH895XCCIOV_ETR_BAR 0
#define ADF_DH895XCCIOV_ETR_MAX_BANKS 1
-#define ADF_DH895XCCIOV_PF2VF_OFFSET 0x200
void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index 99d90f3ea2b7..18756b2e1c91 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -171,11 +171,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
/* Completion for VF2PF request/response message exchange */
- init_completion(&accel_dev->vf.iov_msg_completion);
-
- ret = qat_crypto_dev_config(accel_dev);
- if (ret)
- goto out_err_free_reg;
+ init_completion(&accel_dev->vf.msg_received);
ret = adf_dev_init(accel_dev);
if (ret)
diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
index 290e2446a2f3..97a530171f07 100644
--- a/drivers/crypto/qce/aead.c
+++ b/drivers/crypto/qce/aead.c
@@ -802,8 +802,8 @@ static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_devi
ret = crypto_register_aead(alg);
if (ret) {
- kfree(tmpl);
dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
+ kfree(tmpl);
return ret;
}
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 8e6fcf2c21cc..59159f5e64e5 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -498,8 +498,8 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def,
ret = crypto_register_ahash(alg);
if (ret) {
- kfree(tmpl);
dev_err(qce->dev, "%s registration failed\n", base->cra_name);
+ kfree(tmpl);
return ret;
}
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 8ff10928f581..3d27cd5210ef 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -484,8 +484,8 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
ret = crypto_register_skcipher(alg);
if (ret) {
- kfree(tmpl);
dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
+ kfree(tmpl);
return ret;
}
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index bcbc38dc6ae8..51b58e57153f 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -8,6 +8,7 @@
* Vitaly Andrianov
* Tero Kristo
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
@@ -646,8 +647,8 @@ static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl,
cmdl[upd_info->enc_offset.index] &=
~SA_CMDL_SOP_BYPASS_LEN_MASK;
cmdl[upd_info->enc_offset.index] |=
- ((u32)req->enc_offset <<
- __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
+ FIELD_PREP(SA_CMDL_SOP_BYPASS_LEN_MASK,
+ req->enc_offset);
if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) {
__be32 *data = (__be32 *)&cmdl[upd_info->enc_iv.index];
@@ -666,8 +667,8 @@ static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl,
cmdl[upd_info->auth_offset.index] &=
~SA_CMDL_SOP_BYPASS_LEN_MASK;
cmdl[upd_info->auth_offset.index] |=
- ((u32)req->auth_offset <<
- __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
+ FIELD_PREP(SA_CMDL_SOP_BYPASS_LEN_MASK,
+ req->auth_offset);
if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) {
sa_copy_iv((void *)&cmdl[upd_info->auth_iv.index],
req->auth_iv,
@@ -689,16 +690,16 @@ void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys,
u8 hash_size, u32 *swinfo)
{
swinfo[0] = sc_id;
- swinfo[0] |= (flags << __ffs(SA_SW0_FLAGS_MASK));
+ swinfo[0] |= FIELD_PREP(SA_SW0_FLAGS_MASK, flags);
if (likely(cmdl_present))
- swinfo[0] |= ((cmdl_offset | SA_SW0_CMDL_PRESENT) <<
- __ffs(SA_SW0_CMDL_INFO_MASK));
- swinfo[0] |= (eng_id << __ffs(SA_SW0_ENG_ID_MASK));
+ swinfo[0] |= FIELD_PREP(SA_SW0_CMDL_INFO_MASK,
+ cmdl_offset | SA_SW0_CMDL_PRESENT);
+ swinfo[0] |= FIELD_PREP(SA_SW0_ENG_ID_MASK, eng_id);
swinfo[0] |= SA_SW0_DEST_INFO_PRESENT;
swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL);
swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32);
- swinfo[2] |= (hash_size << __ffs(SA_SW2_EGRESS_LENGTH));
+ swinfo[2] |= FIELD_PREP(SA_SW2_EGRESS_LENGTH, hash_size);
}
/* Dump the security context */
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index 75867c0b0017..be1bf39a317d 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -279,7 +279,7 @@ static struct shash_alg algs[] = {
.digestsize = CHKSUM_DIGEST_SIZE,
.base = {
.cra_name = "crc32",
- .cra_driver_name = DRIVER_NAME,
+ .cra_driver_name = "stm32-crc32-crc32",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
@@ -301,7 +301,7 @@ static struct shash_alg algs[] = {
.digestsize = CHKSUM_DIGEST_SIZE,
.base = {
.cra_name = "crc32c",
- .cra_driver_name = DRIVER_NAME,
+ .cra_driver_name = "stm32-crc32-crc32c",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 7389a0536ff0..59ef541123ae 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -37,7 +37,6 @@
/* Mode mask = bits [15..0] */
#define FLG_MODE_MASK GENMASK(15, 0)
/* Bit [31..16] status */
-#define FLG_CCM_PADDED_WA BIT(16)
/* Registers */
#define CRYP_CR 0x00000000
@@ -105,8 +104,6 @@
/* Misc */
#define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32))
#define GCM_CTR_INIT 2
-#define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset)
-#define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset)
#define CRYP_AUTOSUSPEND_DELAY 50
struct stm32_cryp_caps {
@@ -144,26 +141,16 @@ struct stm32_cryp {
size_t authsize;
size_t hw_blocksize;
- size_t total_in;
- size_t total_in_save;
- size_t total_out;
- size_t total_out_save;
+ size_t payload_in;
+ size_t header_in;
+ size_t payload_out;
- struct scatterlist *in_sg;
struct scatterlist *out_sg;
- struct scatterlist *out_sg_save;
-
- struct scatterlist in_sgl;
- struct scatterlist out_sgl;
- bool sgs_copied;
-
- int in_sg_len;
- int out_sg_len;
struct scatter_walk in_walk;
struct scatter_walk out_walk;
- u32 last_ctr[4];
+ __be32 last_ctr[4];
u32 gcm_ctr;
};
@@ -245,6 +232,11 @@ static inline int stm32_cryp_wait_busy(struct stm32_cryp *cryp)
!(status & SR_BUSY), 10, 100000);
}
+static inline void stm32_cryp_enable(struct stm32_cryp *cryp)
+{
+ writel_relaxed(readl_relaxed(cryp->regs + CRYP_CR) | CR_CRYPEN, cryp->regs + CRYP_CR);
+}
+
static inline int stm32_cryp_wait_enable(struct stm32_cryp *cryp)
{
u32 status;
@@ -262,6 +254,7 @@ static inline int stm32_cryp_wait_output(struct stm32_cryp *cryp)
}
static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp);
+static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err);
static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx)
{
@@ -283,103 +276,6 @@ static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx)
return cryp;
}
-static int stm32_cryp_check_aligned(struct scatterlist *sg, size_t total,
- size_t align)
-{
- int len = 0;
-
- if (!total)
- return 0;
-
- if (!IS_ALIGNED(total, align))
- return -EINVAL;
-
- while (sg) {
- if (!IS_ALIGNED(sg->offset, sizeof(u32)))
- return -EINVAL;
-
- if (!IS_ALIGNED(sg->length, align))
- return -EINVAL;
-
- len += sg->length;
- sg = sg_next(sg);
- }
-
- if (len != total)
- return -EINVAL;
-
- return 0;
-}
-
-static int stm32_cryp_check_io_aligned(struct stm32_cryp *cryp)
-{
- int ret;
-
- ret = stm32_cryp_check_aligned(cryp->in_sg, cryp->total_in,
- cryp->hw_blocksize);
- if (ret)
- return ret;
-
- ret = stm32_cryp_check_aligned(cryp->out_sg, cryp->total_out,
- cryp->hw_blocksize);
-
- return ret;
-}
-
-static void sg_copy_buf(void *buf, struct scatterlist *sg,
- unsigned int start, unsigned int nbytes, int out)
-{
- struct scatter_walk walk;
-
- if (!nbytes)
- return;
-
- scatterwalk_start(&walk, sg);
- scatterwalk_advance(&walk, start);
- scatterwalk_copychunks(buf, &walk, nbytes, out);
- scatterwalk_done(&walk, out, 0);
-}
-
-static int stm32_cryp_copy_sgs(struct stm32_cryp *cryp)
-{
- void *buf_in, *buf_out;
- int pages, total_in, total_out;
-
- if (!stm32_cryp_check_io_aligned(cryp)) {
- cryp->sgs_copied = 0;
- return 0;
- }
-
- total_in = ALIGN(cryp->total_in, cryp->hw_blocksize);
- pages = total_in ? get_order(total_in) : 1;
- buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
-
- total_out = ALIGN(cryp->total_out, cryp->hw_blocksize);
- pages = total_out ? get_order(total_out) : 1;
- buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
-
- if (!buf_in || !buf_out) {
- dev_err(cryp->dev, "Can't allocate pages when unaligned\n");
- cryp->sgs_copied = 0;
- return -EFAULT;
- }
-
- sg_copy_buf(buf_in, cryp->in_sg, 0, cryp->total_in, 0);
-
- sg_init_one(&cryp->in_sgl, buf_in, total_in);
- cryp->in_sg = &cryp->in_sgl;
- cryp->in_sg_len = 1;
-
- sg_init_one(&cryp->out_sgl, buf_out, total_out);
- cryp->out_sg_save = cryp->out_sg;
- cryp->out_sg = &cryp->out_sgl;
- cryp->out_sg_len = 1;
-
- cryp->sgs_copied = 1;
-
- return 0;
-}
-
static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, __be32 *iv)
{
if (!iv)
@@ -481,16 +377,99 @@ static int stm32_cryp_gcm_init(struct stm32_cryp *cryp, u32 cfg)
/* Wait for end of processing */
ret = stm32_cryp_wait_enable(cryp);
- if (ret)
+ if (ret) {
dev_err(cryp->dev, "Timeout (gcm init)\n");
+ return ret;
+ }
- return ret;
+ /* Prepare next phase */
+ if (cryp->areq->assoclen) {
+ cfg |= CR_PH_HEADER;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+ } else if (stm32_cryp_get_input_text_len(cryp)) {
+ cfg |= CR_PH_PAYLOAD;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+ }
+
+ return 0;
+}
+
+static void stm32_crypt_gcmccm_end_header(struct stm32_cryp *cryp)
+{
+ u32 cfg;
+ int err;
+
+ /* Check if whole header written */
+ if (!cryp->header_in) {
+ /* Wait for completion */
+ err = stm32_cryp_wait_busy(cryp);
+ if (err) {
+ dev_err(cryp->dev, "Timeout (gcm/ccm header)\n");
+ stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+ stm32_cryp_finish_req(cryp, err);
+ return;
+ }
+
+ if (stm32_cryp_get_input_text_len(cryp)) {
+ /* Phase 3 : payload */
+ cfg = stm32_cryp_read(cryp, CRYP_CR);
+ cfg &= ~CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ cfg &= ~CR_PH_MASK;
+ cfg |= CR_PH_PAYLOAD | CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+ } else {
+ /*
+ * Phase 4 : tag.
+ * Nothing to read, nothing to write, caller have to
+ * end request
+ */
+ }
+ }
+}
+
+static void stm32_cryp_write_ccm_first_header(struct stm32_cryp *cryp)
+{
+ unsigned int i;
+ size_t written;
+ size_t len;
+ u32 alen = cryp->areq->assoclen;
+ u32 block[AES_BLOCK_32] = {0};
+ u8 *b8 = (u8 *)block;
+
+ if (alen <= 65280) {
+ /* Write first u32 of B1 */
+ b8[0] = (alen >> 8) & 0xFF;
+ b8[1] = alen & 0xFF;
+ len = 2;
+ } else {
+ /* Build the two first u32 of B1 */
+ b8[0] = 0xFF;
+ b8[1] = 0xFE;
+ b8[2] = (alen & 0xFF000000) >> 24;
+ b8[3] = (alen & 0x00FF0000) >> 16;
+ b8[4] = (alen & 0x0000FF00) >> 8;
+ b8[5] = alen & 0x000000FF;
+ len = 6;
+ }
+
+ written = min_t(size_t, AES_BLOCK_SIZE - len, alen);
+
+ scatterwalk_copychunks((char *)block + len, &cryp->in_walk, written, 0);
+ for (i = 0; i < AES_BLOCK_32; i++)
+ stm32_cryp_write(cryp, CRYP_DIN, block[i]);
+
+ cryp->header_in -= written;
+
+ stm32_crypt_gcmccm_end_header(cryp);
}
static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg)
{
int ret;
- u8 iv[AES_BLOCK_SIZE], b0[AES_BLOCK_SIZE];
+ u32 iv_32[AES_BLOCK_32], b0_32[AES_BLOCK_32];
+ u8 *iv = (u8 *)iv_32, *b0 = (u8 *)b0_32;
__be32 *bd;
u32 *d;
unsigned int i, textlen;
@@ -531,10 +510,24 @@ static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg)
/* Wait for end of processing */
ret = stm32_cryp_wait_enable(cryp);
- if (ret)
+ if (ret) {
dev_err(cryp->dev, "Timeout (ccm init)\n");
+ return ret;
+ }
- return ret;
+ /* Prepare next phase */
+ if (cryp->areq->assoclen) {
+ cfg |= CR_PH_HEADER | CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* Write first (special) block (may move to next phase [payload]) */
+ stm32_cryp_write_ccm_first_header(cryp);
+ } else if (stm32_cryp_get_input_text_len(cryp)) {
+ cfg |= CR_PH_PAYLOAD;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+ }
+
+ return 0;
}
static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
@@ -542,14 +535,11 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
int ret;
u32 cfg, hw_mode;
- pm_runtime_resume_and_get(cryp->dev);
+ pm_runtime_get_sync(cryp->dev);
/* Disable interrupt */
stm32_cryp_write(cryp, CRYP_IMSCR, 0);
- /* Set key */
- stm32_cryp_hw_write_key(cryp);
-
/* Set configuration */
cfg = CR_DATA8 | CR_FFLUSH;
@@ -575,23 +565,36 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
/* AES ECB/CBC decrypt: run key preparation first */
if (is_decrypt(cryp) &&
((hw_mode == CR_AES_ECB) || (hw_mode == CR_AES_CBC))) {
- stm32_cryp_write(cryp, CRYP_CR, cfg | CR_AES_KP | CR_CRYPEN);
+ /* Configure in key preparation mode */
+ stm32_cryp_write(cryp, CRYP_CR, cfg | CR_AES_KP);
+ /* Set key only after full configuration done */
+ stm32_cryp_hw_write_key(cryp);
+
+ /* Start prepare key */
+ stm32_cryp_enable(cryp);
/* Wait for end of processing */
ret = stm32_cryp_wait_busy(cryp);
if (ret) {
dev_err(cryp->dev, "Timeout (key preparation)\n");
return ret;
}
- }
- cfg |= hw_mode;
+ cfg |= hw_mode | CR_DEC_NOT_ENC;
- if (is_decrypt(cryp))
- cfg |= CR_DEC_NOT_ENC;
+ /* Apply updated config (Decrypt + algo) and flush */
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+ } else {
+ cfg |= hw_mode;
+ if (is_decrypt(cryp))
+ cfg |= CR_DEC_NOT_ENC;
- /* Apply config and flush (valid when CRYPEN = 0) */
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ /* Apply config and flush */
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* Set key only after configuration done */
+ stm32_cryp_hw_write_key(cryp);
+ }
switch (hw_mode) {
case CR_AES_GCM:
@@ -605,16 +608,6 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
if (ret)
return ret;
- /* Phase 2 : header (authenticated data) */
- if (cryp->areq->assoclen) {
- cfg |= CR_PH_HEADER;
- } else if (stm32_cryp_get_input_text_len(cryp)) {
- cfg |= CR_PH_PAYLOAD;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
- } else {
- cfg |= CR_PH_INIT;
- }
-
break;
case CR_DES_CBC:
@@ -629,11 +622,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
}
/* Enable now */
- cfg |= CR_CRYPEN;
-
- stm32_cryp_write(cryp, CRYP_CR, cfg);
-
- cryp->flags &= ~FLG_CCM_PADDED_WA;
+ stm32_cryp_enable(cryp);
return 0;
}
@@ -644,28 +633,9 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
/* Phase 4 : output tag */
err = stm32_cryp_read_auth_tag(cryp);
- if (!err && (!(is_gcm(cryp) || is_ccm(cryp))))
+ if (!err && (!(is_gcm(cryp) || is_ccm(cryp) || is_ecb(cryp))))
stm32_cryp_get_iv(cryp);
- if (cryp->sgs_copied) {
- void *buf_in, *buf_out;
- int pages, len;
-
- buf_in = sg_virt(&cryp->in_sgl);
- buf_out = sg_virt(&cryp->out_sgl);
-
- sg_copy_buf(buf_out, cryp->out_sg_save, 0,
- cryp->total_out_save, 1);
-
- len = ALIGN(cryp->total_in_save, cryp->hw_blocksize);
- pages = len ? get_order(len) : 1;
- free_pages((unsigned long)buf_in, pages);
-
- len = ALIGN(cryp->total_out_save, cryp->hw_blocksize);
- pages = len ? get_order(len) : 1;
- free_pages((unsigned long)buf_out, pages);
- }
-
pm_runtime_mark_last_busy(cryp->dev);
pm_runtime_put_autosuspend(cryp->dev);
@@ -674,8 +644,6 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
else
crypto_finalize_skcipher_request(cryp->engine, cryp->req,
err);
-
- memset(cryp->ctx->key, 0, cryp->ctx->keylen);
}
static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
@@ -801,7 +769,20 @@ static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
static int stm32_cryp_aes_gcm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- return authsize == AES_BLOCK_SIZE ? 0 : -EINVAL;
+ switch (authsize) {
+ case 4:
+ case 8:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
}
static int stm32_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm,
@@ -825,31 +806,61 @@ static int stm32_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm,
static int stm32_cryp_aes_ecb_encrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT);
}
static int stm32_cryp_aes_ecb_decrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_AES | FLG_ECB);
}
static int stm32_cryp_aes_cbc_encrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_AES | FLG_CBC | FLG_ENCRYPT);
}
static int stm32_cryp_aes_cbc_decrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_AES | FLG_CBC);
}
static int stm32_cryp_aes_ctr_encrypt(struct skcipher_request *req)
{
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_AES | FLG_CTR | FLG_ENCRYPT);
}
static int stm32_cryp_aes_ctr_decrypt(struct skcipher_request *req)
{
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_AES | FLG_CTR);
}
@@ -863,53 +874,122 @@ static int stm32_cryp_aes_gcm_decrypt(struct aead_request *req)
return stm32_cryp_aead_crypt(req, FLG_AES | FLG_GCM);
}
+static inline int crypto_ccm_check_iv(const u8 *iv)
+{
+ /* 2 <= L <= 8, so 1 <= L' <= 7. */
+ if (iv[0] < 1 || iv[0] > 7)
+ return -EINVAL;
+
+ return 0;
+}
+
static int stm32_cryp_aes_ccm_encrypt(struct aead_request *req)
{
+ int err;
+
+ err = crypto_ccm_check_iv(req->iv);
+ if (err)
+ return err;
+
return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM | FLG_ENCRYPT);
}
static int stm32_cryp_aes_ccm_decrypt(struct aead_request *req)
{
+ int err;
+
+ err = crypto_ccm_check_iv(req->iv);
+ if (err)
+ return err;
+
return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM);
}
static int stm32_cryp_des_ecb_encrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % DES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT);
}
static int stm32_cryp_des_ecb_decrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % DES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_DES | FLG_ECB);
}
static int stm32_cryp_des_cbc_encrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % DES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_DES | FLG_CBC | FLG_ENCRYPT);
}
static int stm32_cryp_des_cbc_decrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % DES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_DES | FLG_CBC);
}
static int stm32_cryp_tdes_ecb_encrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % DES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB | FLG_ENCRYPT);
}
static int stm32_cryp_tdes_ecb_decrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % DES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB);
}
static int stm32_cryp_tdes_cbc_encrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % DES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC | FLG_ENCRYPT);
}
static int stm32_cryp_tdes_cbc_decrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % DES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC);
}
@@ -919,6 +999,7 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req,
struct stm32_cryp_ctx *ctx;
struct stm32_cryp *cryp;
struct stm32_cryp_reqctx *rctx;
+ struct scatterlist *in_sg;
int ret;
if (!req && !areq)
@@ -944,76 +1025,55 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req,
if (req) {
cryp->req = req;
cryp->areq = NULL;
- cryp->total_in = req->cryptlen;
- cryp->total_out = cryp->total_in;
+ cryp->header_in = 0;
+ cryp->payload_in = req->cryptlen;
+ cryp->payload_out = req->cryptlen;
+ cryp->authsize = 0;
} else {
/*
* Length of input and output data:
* Encryption case:
- * INPUT = AssocData || PlainText
+ * INPUT = AssocData || PlainText
* <- assoclen -> <- cryptlen ->
- * <------- total_in ----------->
*
- * OUTPUT = AssocData || CipherText || AuthTag
- * <- assoclen -> <- cryptlen -> <- authsize ->
- * <---------------- total_out ----------------->
+ * OUTPUT = AssocData || CipherText || AuthTag
+ * <- assoclen -> <-- cryptlen --> <- authsize ->
*
* Decryption case:
- * INPUT = AssocData || CipherText || AuthTag
- * <- assoclen -> <--------- cryptlen --------->
- * <- authsize ->
- * <---------------- total_in ------------------>
+ * INPUT = AssocData || CipherTex || AuthTag
+ * <- assoclen ---> <---------- cryptlen ---------->
*
- * OUTPUT = AssocData || PlainText
- * <- assoclen -> <- crypten - authsize ->
- * <---------- total_out ----------------->
+ * OUTPUT = AssocData || PlainText
+ * <- assoclen -> <- cryptlen - authsize ->
*/
cryp->areq = areq;
cryp->req = NULL;
cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq));
- cryp->total_in = areq->assoclen + areq->cryptlen;
- if (is_encrypt(cryp))
- /* Append auth tag to output */
- cryp->total_out = cryp->total_in + cryp->authsize;
- else
- /* No auth tag in output */
- cryp->total_out = cryp->total_in - cryp->authsize;
+ if (is_encrypt(cryp)) {
+ cryp->payload_in = areq->cryptlen;
+ cryp->header_in = areq->assoclen;
+ cryp->payload_out = areq->cryptlen;
+ } else {
+ cryp->payload_in = areq->cryptlen - cryp->authsize;
+ cryp->header_in = areq->assoclen;
+ cryp->payload_out = cryp->payload_in;
+ }
}
- cryp->total_in_save = cryp->total_in;
- cryp->total_out_save = cryp->total_out;
+ in_sg = req ? req->src : areq->src;
+ scatterwalk_start(&cryp->in_walk, in_sg);
- cryp->in_sg = req ? req->src : areq->src;
cryp->out_sg = req ? req->dst : areq->dst;
- cryp->out_sg_save = cryp->out_sg;
-
- cryp->in_sg_len = sg_nents_for_len(cryp->in_sg, cryp->total_in);
- if (cryp->in_sg_len < 0) {
- dev_err(cryp->dev, "Cannot get in_sg_len\n");
- ret = cryp->in_sg_len;
- return ret;
- }
-
- cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out);
- if (cryp->out_sg_len < 0) {
- dev_err(cryp->dev, "Cannot get out_sg_len\n");
- ret = cryp->out_sg_len;
- return ret;
- }
-
- ret = stm32_cryp_copy_sgs(cryp);
- if (ret)
- return ret;
-
- scatterwalk_start(&cryp->in_walk, cryp->in_sg);
scatterwalk_start(&cryp->out_walk, cryp->out_sg);
if (is_gcm(cryp) || is_ccm(cryp)) {
/* In output, jump after assoc data */
- scatterwalk_advance(&cryp->out_walk, cryp->areq->assoclen);
- cryp->total_out -= cryp->areq->assoclen;
+ scatterwalk_copychunks(NULL, &cryp->out_walk, cryp->areq->assoclen, 2);
}
+ if (is_ctr(cryp))
+ memset(cryp->last_ctr, 0, sizeof(cryp->last_ctr));
+
ret = stm32_cryp_hw_init(cryp);
return ret;
}
@@ -1061,8 +1121,7 @@ static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq)
if (!cryp)
return -ENODEV;
- if (unlikely(!cryp->areq->assoclen &&
- !stm32_cryp_get_input_text_len(cryp))) {
+ if (unlikely(!cryp->payload_in && !cryp->header_in)) {
/* No input data to process: get tag and finish */
stm32_cryp_finish_req(cryp, 0);
return 0;
@@ -1071,43 +1130,10 @@ static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq)
return stm32_cryp_cpu_start(cryp);
}
-static u32 *stm32_cryp_next_out(struct stm32_cryp *cryp, u32 *dst,
- unsigned int n)
-{
- scatterwalk_advance(&cryp->out_walk, n);
-
- if (unlikely(cryp->out_sg->length == _walked_out)) {
- cryp->out_sg = sg_next(cryp->out_sg);
- if (cryp->out_sg) {
- scatterwalk_start(&cryp->out_walk, cryp->out_sg);
- return (sg_virt(cryp->out_sg) + _walked_out);
- }
- }
-
- return (u32 *)((u8 *)dst + n);
-}
-
-static u32 *stm32_cryp_next_in(struct stm32_cryp *cryp, u32 *src,
- unsigned int n)
-{
- scatterwalk_advance(&cryp->in_walk, n);
-
- if (unlikely(cryp->in_sg->length == _walked_in)) {
- cryp->in_sg = sg_next(cryp->in_sg);
- if (cryp->in_sg) {
- scatterwalk_start(&cryp->in_walk, cryp->in_sg);
- return (sg_virt(cryp->in_sg) + _walked_in);
- }
- }
-
- return (u32 *)((u8 *)src + n);
-}
-
static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
{
- u32 cfg, size_bit, *dst, d32;
- u8 *d8;
- unsigned int i, j;
+ u32 cfg, size_bit;
+ unsigned int i;
int ret = 0;
/* Update Config */
@@ -1130,7 +1156,7 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
stm32_cryp_write(cryp, CRYP_DIN, size_bit);
size_bit = is_encrypt(cryp) ? cryp->areq->cryptlen :
- cryp->areq->cryptlen - AES_BLOCK_SIZE;
+ cryp->areq->cryptlen - cryp->authsize;
size_bit *= 8;
if (cryp->caps->swap_final)
size_bit = (__force u32)cpu_to_be32(size_bit);
@@ -1139,11 +1165,9 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
stm32_cryp_write(cryp, CRYP_DIN, size_bit);
} else {
/* CCM: write CTR0 */
- u8 iv[AES_BLOCK_SIZE];
- u32 *iv32 = (u32 *)iv;
- __be32 *biv;
-
- biv = (void *)iv;
+ u32 iv32[AES_BLOCK_32];
+ u8 *iv = (u8 *)iv32;
+ __be32 *biv = (__be32 *)iv32;
memcpy(iv, cryp->areq->iv, AES_BLOCK_SIZE);
memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1);
@@ -1165,39 +1189,18 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
}
if (is_encrypt(cryp)) {
+ u32 out_tag[AES_BLOCK_32];
+
/* Get and write tag */
- dst = sg_virt(cryp->out_sg) + _walked_out;
+ for (i = 0; i < AES_BLOCK_32; i++)
+ out_tag[i] = stm32_cryp_read(cryp, CRYP_DOUT);
- for (i = 0; i < AES_BLOCK_32; i++) {
- if (cryp->total_out >= sizeof(u32)) {
- /* Read a full u32 */
- *dst = stm32_cryp_read(cryp, CRYP_DOUT);
-
- dst = stm32_cryp_next_out(cryp, dst,
- sizeof(u32));
- cryp->total_out -= sizeof(u32);
- } else if (!cryp->total_out) {
- /* Empty fifo out (data from input padding) */
- stm32_cryp_read(cryp, CRYP_DOUT);
- } else {
- /* Read less than an u32 */
- d32 = stm32_cryp_read(cryp, CRYP_DOUT);
- d8 = (u8 *)&d32;
-
- for (j = 0; j < cryp->total_out; j++) {
- *((u8 *)dst) = *(d8++);
- dst = stm32_cryp_next_out(cryp, dst, 1);
- }
- cryp->total_out = 0;
- }
- }
+ scatterwalk_copychunks(out_tag, &cryp->out_walk, cryp->authsize, 1);
} else {
/* Get and check tag */
u32 in_tag[AES_BLOCK_32], out_tag[AES_BLOCK_32];
- scatterwalk_map_and_copy(in_tag, cryp->in_sg,
- cryp->total_in_save - cryp->authsize,
- cryp->authsize, 0);
+ scatterwalk_copychunks(in_tag, &cryp->in_walk, cryp->authsize, 0);
for (i = 0; i < AES_BLOCK_32; i++)
out_tag[i] = stm32_cryp_read(cryp, CRYP_DOUT);
@@ -1217,115 +1220,59 @@ static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp)
{
u32 cr;
- if (unlikely(cryp->last_ctr[3] == 0xFFFFFFFF)) {
- cryp->last_ctr[3] = 0;
- cryp->last_ctr[2]++;
- if (!cryp->last_ctr[2]) {
- cryp->last_ctr[1]++;
- if (!cryp->last_ctr[1])
- cryp->last_ctr[0]++;
- }
+ if (unlikely(cryp->last_ctr[3] == cpu_to_be32(0xFFFFFFFF))) {
+ /*
+ * In this case, we need to increment manually the ctr counter,
+ * as HW doesn't handle the U32 carry.
+ */
+ crypto_inc((u8 *)cryp->last_ctr, sizeof(cryp->last_ctr));
cr = stm32_cryp_read(cryp, CRYP_CR);
stm32_cryp_write(cryp, CRYP_CR, cr & ~CR_CRYPEN);
- stm32_cryp_hw_write_iv(cryp, (__be32 *)cryp->last_ctr);
+ stm32_cryp_hw_write_iv(cryp, cryp->last_ctr);
stm32_cryp_write(cryp, CRYP_CR, cr);
}
- cryp->last_ctr[0] = stm32_cryp_read(cryp, CRYP_IV0LR);
- cryp->last_ctr[1] = stm32_cryp_read(cryp, CRYP_IV0RR);
- cryp->last_ctr[2] = stm32_cryp_read(cryp, CRYP_IV1LR);
- cryp->last_ctr[3] = stm32_cryp_read(cryp, CRYP_IV1RR);
+ /* The IV registers are BE */
+ cryp->last_ctr[0] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0LR));
+ cryp->last_ctr[1] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0RR));
+ cryp->last_ctr[2] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1LR));
+ cryp->last_ctr[3] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1RR));
}
-static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
+static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
{
- unsigned int i, j;
- u32 d32, *dst;
- u8 *d8;
- size_t tag_size;
-
- /* Do no read tag now (if any) */
- if (is_encrypt(cryp) && (is_gcm(cryp) || is_ccm(cryp)))
- tag_size = cryp->authsize;
- else
- tag_size = 0;
-
- dst = sg_virt(cryp->out_sg) + _walked_out;
+ unsigned int i;
+ u32 block[AES_BLOCK_32];
- for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
- if (likely(cryp->total_out - tag_size >= sizeof(u32))) {
- /* Read a full u32 */
- *dst = stm32_cryp_read(cryp, CRYP_DOUT);
+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++)
+ block[i] = stm32_cryp_read(cryp, CRYP_DOUT);
- dst = stm32_cryp_next_out(cryp, dst, sizeof(u32));
- cryp->total_out -= sizeof(u32);
- } else if (cryp->total_out == tag_size) {
- /* Empty fifo out (data from input padding) */
- d32 = stm32_cryp_read(cryp, CRYP_DOUT);
- } else {
- /* Read less than an u32 */
- d32 = stm32_cryp_read(cryp, CRYP_DOUT);
- d8 = (u8 *)&d32;
-
- for (j = 0; j < cryp->total_out - tag_size; j++) {
- *((u8 *)dst) = *(d8++);
- dst = stm32_cryp_next_out(cryp, dst, 1);
- }
- cryp->total_out = tag_size;
- }
- }
-
- return !(cryp->total_out - tag_size) || !cryp->total_in;
+ scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize,
+ cryp->payload_out), 1);
+ cryp->payload_out -= min_t(size_t, cryp->hw_blocksize,
+ cryp->payload_out);
}
static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
{
- unsigned int i, j;
- u32 *src;
- u8 d8[4];
- size_t tag_size;
-
- /* Do no write tag (if any) */
- if (is_decrypt(cryp) && (is_gcm(cryp) || is_ccm(cryp)))
- tag_size = cryp->authsize;
- else
- tag_size = 0;
-
- src = sg_virt(cryp->in_sg) + _walked_in;
+ unsigned int i;
+ u32 block[AES_BLOCK_32] = {0};
- for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
- if (likely(cryp->total_in - tag_size >= sizeof(u32))) {
- /* Write a full u32 */
- stm32_cryp_write(cryp, CRYP_DIN, *src);
+ scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, cryp->hw_blocksize,
+ cryp->payload_in), 0);
+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++)
+ stm32_cryp_write(cryp, CRYP_DIN, block[i]);
- src = stm32_cryp_next_in(cryp, src, sizeof(u32));
- cryp->total_in -= sizeof(u32);
- } else if (cryp->total_in == tag_size) {
- /* Write padding data */
- stm32_cryp_write(cryp, CRYP_DIN, 0);
- } else {
- /* Write less than an u32 */
- memset(d8, 0, sizeof(u32));
- for (j = 0; j < cryp->total_in - tag_size; j++) {
- d8[j] = *((u8 *)src);
- src = stm32_cryp_next_in(cryp, src, 1);
- }
-
- stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
- cryp->total_in = tag_size;
- }
- }
+ cryp->payload_in -= min_t(size_t, cryp->hw_blocksize, cryp->payload_in);
}
static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
{
int err;
- u32 cfg, tmp[AES_BLOCK_32];
- size_t total_in_ori = cryp->total_in;
- struct scatterlist *out_sg_ori = cryp->out_sg;
+ u32 cfg, block[AES_BLOCK_32] = {0};
unsigned int i;
/* 'Special workaround' procedure described in the datasheet */
@@ -1350,18 +1297,25 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
/* b) pad and write the last block */
stm32_cryp_irq_write_block(cryp);
- cryp->total_in = total_in_ori;
+ /* wait end of process */
err = stm32_cryp_wait_output(cryp);
if (err) {
- dev_err(cryp->dev, "Timeout (write gcm header)\n");
+ dev_err(cryp->dev, "Timeout (write gcm last data)\n");
return stm32_cryp_finish_req(cryp, err);
}
/* c) get and store encrypted data */
- stm32_cryp_irq_read_data(cryp);
- scatterwalk_map_and_copy(tmp, out_sg_ori,
- cryp->total_in_save - total_in_ori,
- total_in_ori, 0);
+ /*
+ * Same code as stm32_cryp_irq_read_data(), but we want to store
+ * block value
+ */
+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++)
+ block[i] = stm32_cryp_read(cryp, CRYP_DOUT);
+
+ scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize,
+ cryp->payload_out), 1);
+ cryp->payload_out -= min_t(size_t, cryp->hw_blocksize,
+ cryp->payload_out);
/* d) change mode back to AES GCM */
cfg &= ~CR_ALGO_MASK;
@@ -1374,19 +1328,13 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
stm32_cryp_write(cryp, CRYP_CR, cfg);
/* f) write padded data */
- for (i = 0; i < AES_BLOCK_32; i++) {
- if (cryp->total_in)
- stm32_cryp_write(cryp, CRYP_DIN, tmp[i]);
- else
- stm32_cryp_write(cryp, CRYP_DIN, 0);
-
- cryp->total_in -= min_t(size_t, sizeof(u32), cryp->total_in);
- }
+ for (i = 0; i < AES_BLOCK_32; i++)
+ stm32_cryp_write(cryp, CRYP_DIN, block[i]);
/* g) Empty fifo out */
err = stm32_cryp_wait_output(cryp);
if (err) {
- dev_err(cryp->dev, "Timeout (write gcm header)\n");
+ dev_err(cryp->dev, "Timeout (write gcm padded data)\n");
return stm32_cryp_finish_req(cryp, err);
}
@@ -1399,16 +1347,14 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
static void stm32_cryp_irq_set_npblb(struct stm32_cryp *cryp)
{
- u32 cfg, payload_bytes;
+ u32 cfg;
/* disable ip, set NPBLB and reneable ip */
cfg = stm32_cryp_read(cryp, CRYP_CR);
cfg &= ~CR_CRYPEN;
stm32_cryp_write(cryp, CRYP_CR, cfg);
- payload_bytes = is_decrypt(cryp) ? cryp->total_in - cryp->authsize :
- cryp->total_in;
- cfg |= (cryp->hw_blocksize - payload_bytes) << CR_NBPBL_SHIFT;
+ cfg |= (cryp->hw_blocksize - cryp->payload_in) << CR_NBPBL_SHIFT;
cfg |= CR_CRYPEN;
stm32_cryp_write(cryp, CRYP_CR, cfg);
}
@@ -1417,13 +1363,11 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
{
int err = 0;
u32 cfg, iv1tmp;
- u32 cstmp1[AES_BLOCK_32], cstmp2[AES_BLOCK_32], tmp[AES_BLOCK_32];
- size_t last_total_out, total_in_ori = cryp->total_in;
- struct scatterlist *out_sg_ori = cryp->out_sg;
+ u32 cstmp1[AES_BLOCK_32], cstmp2[AES_BLOCK_32];
+ u32 block[AES_BLOCK_32] = {0};
unsigned int i;
/* 'Special workaround' procedure described in the datasheet */
- cryp->flags |= FLG_CCM_PADDED_WA;
/* a) disable ip */
stm32_cryp_write(cryp, CRYP_IMSCR, 0);
@@ -1453,7 +1397,7 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
/* b) pad and write the last block */
stm32_cryp_irq_write_block(cryp);
- cryp->total_in = total_in_ori;
+ /* wait end of process */
err = stm32_cryp_wait_output(cryp);
if (err) {
dev_err(cryp->dev, "Timeout (wite ccm padded data)\n");
@@ -1461,13 +1405,16 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
}
/* c) get and store decrypted data */
- last_total_out = cryp->total_out;
- stm32_cryp_irq_read_data(cryp);
+ /*
+ * Same code as stm32_cryp_irq_read_data(), but we want to store
+ * block value
+ */
+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++)
+ block[i] = stm32_cryp_read(cryp, CRYP_DOUT);
- memset(tmp, 0, sizeof(tmp));
- scatterwalk_map_and_copy(tmp, out_sg_ori,
- cryp->total_out_save - last_total_out,
- last_total_out, 0);
+ scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize,
+ cryp->payload_out), 1);
+ cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, cryp->payload_out);
/* d) Load again CRYP_CSGCMCCMxR */
for (i = 0; i < ARRAY_SIZE(cstmp2); i++)
@@ -1484,10 +1431,10 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
stm32_cryp_write(cryp, CRYP_CR, cfg);
/* g) XOR and write padded data */
- for (i = 0; i < ARRAY_SIZE(tmp); i++) {
- tmp[i] ^= cstmp1[i];
- tmp[i] ^= cstmp2[i];
- stm32_cryp_write(cryp, CRYP_DIN, tmp[i]);
+ for (i = 0; i < ARRAY_SIZE(block); i++) {
+ block[i] ^= cstmp1[i];
+ block[i] ^= cstmp2[i];
+ stm32_cryp_write(cryp, CRYP_DIN, block[i]);
}
/* h) wait for completion */
@@ -1501,30 +1448,34 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp)
{
- if (unlikely(!cryp->total_in)) {
+ if (unlikely(!cryp->payload_in)) {
dev_warn(cryp->dev, "No more data to process\n");
return;
}
- if (unlikely(cryp->total_in < AES_BLOCK_SIZE &&
+ if (unlikely(cryp->payload_in < AES_BLOCK_SIZE &&
(stm32_cryp_get_hw_mode(cryp) == CR_AES_GCM) &&
is_encrypt(cryp))) {
/* Padding for AES GCM encryption */
- if (cryp->caps->padding_wa)
+ if (cryp->caps->padding_wa) {
/* Special case 1 */
- return stm32_cryp_irq_write_gcm_padded_data(cryp);
+ stm32_cryp_irq_write_gcm_padded_data(cryp);
+ return;
+ }
/* Setting padding bytes (NBBLB) */
stm32_cryp_irq_set_npblb(cryp);
}
- if (unlikely((cryp->total_in - cryp->authsize < AES_BLOCK_SIZE) &&
+ if (unlikely((cryp->payload_in < AES_BLOCK_SIZE) &&
(stm32_cryp_get_hw_mode(cryp) == CR_AES_CCM) &&
is_decrypt(cryp))) {
/* Padding for AES CCM decryption */
- if (cryp->caps->padding_wa)
+ if (cryp->caps->padding_wa) {
/* Special case 2 */
- return stm32_cryp_irq_write_ccm_padded_data(cryp);
+ stm32_cryp_irq_write_ccm_padded_data(cryp);
+ return;
+ }
/* Setting padding bytes (NBBLB) */
stm32_cryp_irq_set_npblb(cryp);
@@ -1536,192 +1487,60 @@ static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp)
stm32_cryp_irq_write_block(cryp);
}
-static void stm32_cryp_irq_write_gcm_header(struct stm32_cryp *cryp)
+static void stm32_cryp_irq_write_gcmccm_header(struct stm32_cryp *cryp)
{
- int err;
- unsigned int i, j;
- u32 cfg, *src;
-
- src = sg_virt(cryp->in_sg) + _walked_in;
-
- for (i = 0; i < AES_BLOCK_32; i++) {
- stm32_cryp_write(cryp, CRYP_DIN, *src);
-
- src = stm32_cryp_next_in(cryp, src, sizeof(u32));
- cryp->total_in -= min_t(size_t, sizeof(u32), cryp->total_in);
-
- /* Check if whole header written */
- if ((cryp->total_in_save - cryp->total_in) ==
- cryp->areq->assoclen) {
- /* Write padding if needed */
- for (j = i + 1; j < AES_BLOCK_32; j++)
- stm32_cryp_write(cryp, CRYP_DIN, 0);
-
- /* Wait for completion */
- err = stm32_cryp_wait_busy(cryp);
- if (err) {
- dev_err(cryp->dev, "Timeout (gcm header)\n");
- return stm32_cryp_finish_req(cryp, err);
- }
-
- if (stm32_cryp_get_input_text_len(cryp)) {
- /* Phase 3 : payload */
- cfg = stm32_cryp_read(cryp, CRYP_CR);
- cfg &= ~CR_CRYPEN;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
-
- cfg &= ~CR_PH_MASK;
- cfg |= CR_PH_PAYLOAD;
- cfg |= CR_CRYPEN;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
- } else {
- /* Phase 4 : tag */
- stm32_cryp_write(cryp, CRYP_IMSCR, 0);
- stm32_cryp_finish_req(cryp, 0);
- }
-
- break;
- }
-
- if (!cryp->total_in)
- break;
- }
-}
+ unsigned int i;
+ u32 block[AES_BLOCK_32] = {0};
+ size_t written;
-static void stm32_cryp_irq_write_ccm_header(struct stm32_cryp *cryp)
-{
- int err;
- unsigned int i = 0, j, k;
- u32 alen, cfg, *src;
- u8 d8[4];
-
- src = sg_virt(cryp->in_sg) + _walked_in;
- alen = cryp->areq->assoclen;
-
- if (!_walked_in) {
- if (cryp->areq->assoclen <= 65280) {
- /* Write first u32 of B1 */
- d8[0] = (alen >> 8) & 0xFF;
- d8[1] = alen & 0xFF;
- d8[2] = *((u8 *)src);
- src = stm32_cryp_next_in(cryp, src, 1);
- d8[3] = *((u8 *)src);
- src = stm32_cryp_next_in(cryp, src, 1);
-
- stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
- i++;
-
- cryp->total_in -= min_t(size_t, 2, cryp->total_in);
- } else {
- /* Build the two first u32 of B1 */
- d8[0] = 0xFF;
- d8[1] = 0xFE;
- d8[2] = alen & 0xFF000000;
- d8[3] = alen & 0x00FF0000;
-
- stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
- i++;
-
- d8[0] = alen & 0x0000FF00;
- d8[1] = alen & 0x000000FF;
- d8[2] = *((u8 *)src);
- src = stm32_cryp_next_in(cryp, src, 1);
- d8[3] = *((u8 *)src);
- src = stm32_cryp_next_in(cryp, src, 1);
-
- stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
- i++;
-
- cryp->total_in -= min_t(size_t, 2, cryp->total_in);
- }
- }
+ written = min_t(size_t, AES_BLOCK_SIZE, cryp->header_in);
- /* Write next u32 */
- for (; i < AES_BLOCK_32; i++) {
- /* Build an u32 */
- memset(d8, 0, sizeof(u32));
- for (k = 0; k < sizeof(u32); k++) {
- d8[k] = *((u8 *)src);
- src = stm32_cryp_next_in(cryp, src, 1);
-
- cryp->total_in -= min_t(size_t, 1, cryp->total_in);
- if ((cryp->total_in_save - cryp->total_in) == alen)
- break;
- }
+ scatterwalk_copychunks(block, &cryp->in_walk, written, 0);
+ for (i = 0; i < AES_BLOCK_32; i++)
+ stm32_cryp_write(cryp, CRYP_DIN, block[i]);
- stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
-
- if ((cryp->total_in_save - cryp->total_in) == alen) {
- /* Write padding if needed */
- for (j = i + 1; j < AES_BLOCK_32; j++)
- stm32_cryp_write(cryp, CRYP_DIN, 0);
-
- /* Wait for completion */
- err = stm32_cryp_wait_busy(cryp);
- if (err) {
- dev_err(cryp->dev, "Timeout (ccm header)\n");
- return stm32_cryp_finish_req(cryp, err);
- }
-
- if (stm32_cryp_get_input_text_len(cryp)) {
- /* Phase 3 : payload */
- cfg = stm32_cryp_read(cryp, CRYP_CR);
- cfg &= ~CR_CRYPEN;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
-
- cfg &= ~CR_PH_MASK;
- cfg |= CR_PH_PAYLOAD;
- cfg |= CR_CRYPEN;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
- } else {
- /* Phase 4 : tag */
- stm32_cryp_write(cryp, CRYP_IMSCR, 0);
- stm32_cryp_finish_req(cryp, 0);
- }
+ cryp->header_in -= written;
- break;
- }
- }
+ stm32_crypt_gcmccm_end_header(cryp);
}
static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg)
{
struct stm32_cryp *cryp = arg;
u32 ph;
+ u32 it_mask = stm32_cryp_read(cryp, CRYP_IMSCR);
if (cryp->irq_status & MISR_OUT)
/* Output FIFO IRQ: read data */
- if (unlikely(stm32_cryp_irq_read_data(cryp))) {
- /* All bytes processed, finish */
- stm32_cryp_write(cryp, CRYP_IMSCR, 0);
- stm32_cryp_finish_req(cryp, 0);
- return IRQ_HANDLED;
- }
+ stm32_cryp_irq_read_data(cryp);
if (cryp->irq_status & MISR_IN) {
- if (is_gcm(cryp)) {
- ph = stm32_cryp_read(cryp, CRYP_CR) & CR_PH_MASK;
- if (unlikely(ph == CR_PH_HEADER))
- /* Write Header */
- stm32_cryp_irq_write_gcm_header(cryp);
- else
- /* Input FIFO IRQ: write data */
- stm32_cryp_irq_write_data(cryp);
- cryp->gcm_ctr++;
- } else if (is_ccm(cryp)) {
+ if (is_gcm(cryp) || is_ccm(cryp)) {
ph = stm32_cryp_read(cryp, CRYP_CR) & CR_PH_MASK;
if (unlikely(ph == CR_PH_HEADER))
/* Write Header */
- stm32_cryp_irq_write_ccm_header(cryp);
+ stm32_cryp_irq_write_gcmccm_header(cryp);
else
/* Input FIFO IRQ: write data */
stm32_cryp_irq_write_data(cryp);
+ if (is_gcm(cryp))
+ cryp->gcm_ctr++;
} else {
/* Input FIFO IRQ: write data */
stm32_cryp_irq_write_data(cryp);
}
}
+ /* Mask useless interrupts */
+ if (!cryp->payload_in && !cryp->header_in)
+ it_mask &= ~IMSCR_IN;
+ if (!cryp->payload_out)
+ it_mask &= ~IMSCR_OUT;
+ stm32_cryp_write(cryp, CRYP_IMSCR, it_mask);
+
+ if (!cryp->payload_in && !cryp->header_in && !cryp->payload_out)
+ stm32_cryp_finish_req(cryp, 0);
+
return IRQ_HANDLED;
}
@@ -1742,7 +1561,7 @@ static struct skcipher_alg crypto_algs[] = {
.base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0xf,
+ .base.cra_alignmask = 0,
.base.cra_module = THIS_MODULE,
.init = stm32_cryp_init_tfm,
@@ -1759,7 +1578,7 @@ static struct skcipher_alg crypto_algs[] = {
.base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0xf,
+ .base.cra_alignmask = 0,
.base.cra_module = THIS_MODULE,
.init = stm32_cryp_init_tfm,
@@ -1777,7 +1596,7 @@ static struct skcipher_alg crypto_algs[] = {
.base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0xf,
+ .base.cra_alignmask = 0,
.base.cra_module = THIS_MODULE,
.init = stm32_cryp_init_tfm,
@@ -1795,7 +1614,7 @@ static struct skcipher_alg crypto_algs[] = {
.base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0xf,
+ .base.cra_alignmask = 0,
.base.cra_module = THIS_MODULE,
.init = stm32_cryp_init_tfm,
@@ -1812,7 +1631,7 @@ static struct skcipher_alg crypto_algs[] = {
.base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0xf,
+ .base.cra_alignmask = 0,
.base.cra_module = THIS_MODULE,
.init = stm32_cryp_init_tfm,
@@ -1830,7 +1649,7 @@ static struct skcipher_alg crypto_algs[] = {
.base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0xf,
+ .base.cra_alignmask = 0,
.base.cra_module = THIS_MODULE,
.init = stm32_cryp_init_tfm,
@@ -1847,7 +1666,7 @@ static struct skcipher_alg crypto_algs[] = {
.base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0xf,
+ .base.cra_alignmask = 0,
.base.cra_module = THIS_MODULE,
.init = stm32_cryp_init_tfm,
@@ -1877,7 +1696,7 @@ static struct aead_alg aead_algs[] = {
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
+ .cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
},
@@ -1897,7 +1716,7 @@ static struct aead_alg aead_algs[] = {
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
+ .cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
},
@@ -1955,7 +1774,8 @@ static int stm32_cryp_probe(struct platform_device *pdev)
cryp->clk = devm_clk_get(dev, NULL);
if (IS_ERR(cryp->clk)) {
- dev_err(dev, "Could not get clock\n");
+ dev_err_probe(dev, PTR_ERR(cryp->clk), "Could not get clock\n");
+
return PTR_ERR(cryp->clk);
}
@@ -1973,7 +1793,11 @@ static int stm32_cryp_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
rst = devm_reset_control_get(dev, NULL);
- if (!IS_ERR(rst)) {
+ if (IS_ERR(rst)) {
+ ret = PTR_ERR(rst);
+ if (ret == -EPROBE_DEFER)
+ goto err_rst;
+ } else {
reset_control_assert(rst);
udelay(2);
reset_control_deassert(rst);
@@ -2024,9 +1848,7 @@ err_engine1:
spin_lock(&cryp_list.lock);
list_del(&cryp->list);
spin_unlock(&cryp_list.lock);
-
- pm_runtime_disable(dev);
- pm_runtime_put_noidle(dev);
+err_rst:
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 389de9e3302d..d33006d43f76 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -813,7 +813,7 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
struct stm32_hash_request_ctx *rctx)
{
- pm_runtime_resume_and_get(hdev->dev);
+ pm_runtime_get_sync(hdev->dev);
if (!(HASH_FLAGS_INIT & hdev->flags)) {
stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
@@ -962,7 +962,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
u32 *preg;
unsigned int i;
- pm_runtime_resume_and_get(hdev->dev);
+ pm_runtime_get_sync(hdev->dev);
while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
cpu_relax();
@@ -1000,7 +1000,7 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
preg = rctx->hw_context;
- pm_runtime_resume_and_get(hdev->dev);
+ pm_runtime_get_sync(hdev->dev);
stm32_hash_write(hdev, HASH_IMR, *preg++);
stm32_hash_write(hdev, HASH_STR, *preg++);
diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h
index db5713d7c940..59e1557a620a 100644
--- a/drivers/crypto/ux500/cryp/cryp.h
+++ b/drivers/crypto/ux500/cryp/cryp.h
@@ -224,6 +224,7 @@ struct cryp_dma {
* @phybase: Pointer to physical memory location of the cryp device.
* @dev: Pointer to the devices dev structure.
* @clk: Pointer to the device's clock control.
+ * @irq: IRQ number
* @pwr_regulator: Pointer to the device's power control.
* @power_status: Current status of the power.
* @ctx_lock: Lock for current_ctx.
@@ -239,6 +240,7 @@ struct cryp_device_data {
phys_addr_t phybase;
struct device *dev;
struct clk *clk;
+ int irq;
struct regulator *pwr_regulator;
int power_status;
spinlock_t ctx_lock;
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 30cdd5253929..97277b7150cb 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1257,7 +1257,6 @@ static int ux500_cryp_probe(struct platform_device *pdev)
{
int ret;
struct resource *res;
- struct resource *res_irq;
struct cryp_device_data *device_data;
struct cryp_protection_config prot = {
.privilege_access = CRYP_STATE_ENABLE
@@ -1341,15 +1340,13 @@ static int ux500_cryp_probe(struct platform_device *pdev)
goto out_power;
}
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res_irq) {
- dev_err(dev, "[%s]: IORESOURCE_IRQ unavailable",
- __func__);
- ret = -ENODEV;
+ device_data->irq = platform_get_irq(pdev, 0);
+ if (device_data->irq <= 0) {
+ ret = device_data->irq ? device_data->irq : -ENXIO;
goto out_power;
}
- ret = devm_request_irq(&pdev->dev, res_irq->start,
+ ret = devm_request_irq(&pdev->dev, device_data->irq,
cryp_interrupt_handler, 0, "cryp1", device_data);
if (ret) {
dev_err(dev, "[%s]: Unable to request IRQ", __func__);
@@ -1489,7 +1486,6 @@ static int ux500_cryp_suspend(struct device *dev)
int ret;
struct platform_device *pdev = to_platform_device(dev);
struct cryp_device_data *device_data;
- struct resource *res_irq;
struct cryp_ctx *temp_ctx = NULL;
dev_dbg(dev, "[%s]", __func__);
@@ -1501,11 +1497,7 @@ static int ux500_cryp_suspend(struct device *dev)
return -ENOMEM;
}
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res_irq)
- dev_err(dev, "[%s]: IORESOURCE_IRQ, unavailable", __func__);
- else
- disable_irq(res_irq->start);
+ disable_irq(device_data->irq);
spin_lock(&device_data->ctx_lock);
if (!device_data->current_ctx)
@@ -1532,7 +1524,6 @@ static int ux500_cryp_resume(struct device *dev)
int ret = 0;
struct platform_device *pdev = to_platform_device(dev);
struct cryp_device_data *device_data;
- struct resource *res_irq;
struct cryp_ctx *temp_ctx = NULL;
dev_dbg(dev, "[%s]", __func__);
@@ -1556,11 +1547,8 @@ static int ux500_cryp_resume(struct device *dev)
if (ret)
dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
- else {
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res_irq)
- enable_irq(res_irq->start);
- }
+ else
+ enable_irq(device_data->irq);
return ret;
}
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index e2375d992308..8e977b7627cb 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -404,7 +404,7 @@ static int virtcrypto_probe(struct virtio_device *vdev)
free_engines:
virtcrypto_clear_crypto_engines(vcrypto);
free_vqs:
- vcrypto->vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
virtcrypto_del_vqs(vcrypto);
free_dev:
virtcrypto_devmgr_rm_dev(vcrypto);
@@ -436,7 +436,7 @@ static void virtcrypto_remove(struct virtio_device *vdev)
if (virtcrypto_dev_started(vcrypto))
virtcrypto_dev_stop(vcrypto);
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
virtcrypto_free_unused_reqs(vcrypto);
virtcrypto_clear_crypto_engines(vcrypto);
virtcrypto_del_vqs(vcrypto);
@@ -456,7 +456,7 @@ static int virtcrypto_freeze(struct virtio_device *vdev)
{
struct virtio_crypto *vcrypto = vdev->priv;
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
virtcrypto_free_unused_reqs(vcrypto);
if (virtcrypto_dev_started(vcrypto))
virtcrypto_dev_stop(vcrypto);
@@ -492,7 +492,7 @@ static int virtcrypto_restore(struct virtio_device *vdev)
free_engines:
virtcrypto_clear_crypto_engines(vcrypto);
free_vqs:
- vcrypto->vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
virtcrypto_del_vqs(vcrypto);
return err;
}
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index e6de221cc568..67c91378f2dd 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -51,6 +51,7 @@ config CXL_ACPI
tristate "CXL ACPI: Platform Support"
depends on ACPI
default CXL_BUS
+ select ACPI_TABLE_LIB
help
Enable support for host managed device memory (HDM) resources
published by a platform's ACPI CXL memory layout description. See
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index dadc7f64b9ff..3163167ecc3a 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -8,8 +8,6 @@
#include <linux/pci.h>
#include "cxl.h"
-static struct acpi_table_header *acpi_cedt;
-
/* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */
#define CFMWS_INTERLEAVE_WAYS(x) (1 << (x)->interleave_ways)
#define CFMWS_INTERLEAVE_GRANULARITY(x) ((x)->granularity + 8)
@@ -74,134 +72,64 @@ static int cxl_acpi_cfmws_verify(struct device *dev,
return 0;
}
-static void cxl_add_cfmws_decoders(struct device *dev,
- struct cxl_port *root_port)
+struct cxl_cfmws_context {
+ struct device *dev;
+ struct cxl_port *root_port;
+};
+
+static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
+ const unsigned long end)
{
int target_map[CXL_DECODER_MAX_INTERLEAVE];
+ struct cxl_cfmws_context *ctx = arg;
+ struct cxl_port *root_port = ctx->root_port;
+ struct device *dev = ctx->dev;
struct acpi_cedt_cfmws *cfmws;
struct cxl_decoder *cxld;
- acpi_size len, cur = 0;
- void *cedt_subtable;
- int rc;
-
- len = acpi_cedt->length - sizeof(*acpi_cedt);
- cedt_subtable = acpi_cedt + 1;
-
- while (cur < len) {
- struct acpi_cedt_header *c = cedt_subtable + cur;
- int i;
-
- if (c->type != ACPI_CEDT_TYPE_CFMWS) {
- cur += c->length;
- continue;
- }
+ int rc, i;
- cfmws = cedt_subtable + cur;
+ cfmws = (struct acpi_cedt_cfmws *) header;
- if (cfmws->header.length < sizeof(*cfmws)) {
- dev_warn_once(dev,
- "CFMWS entry skipped:invalid length:%u\n",
- cfmws->header.length);
- cur += c->length;
- continue;
- }
-
- rc = cxl_acpi_cfmws_verify(dev, cfmws);
- if (rc) {
- dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
- cfmws->base_hpa, cfmws->base_hpa +
- cfmws->window_size - 1);
- cur += c->length;
- continue;
- }
-
- for (i = 0; i < CFMWS_INTERLEAVE_WAYS(cfmws); i++)
- target_map[i] = cfmws->interleave_targets[i];
-
- cxld = cxl_decoder_alloc(root_port,
- CFMWS_INTERLEAVE_WAYS(cfmws));
- if (IS_ERR(cxld))
- goto next;
-
- cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
- cxld->target_type = CXL_DECODER_EXPANDER;
- cxld->range = (struct range) {
- .start = cfmws->base_hpa,
- .end = cfmws->base_hpa + cfmws->window_size - 1,
- };
- cxld->interleave_ways = CFMWS_INTERLEAVE_WAYS(cfmws);
- cxld->interleave_granularity =
- CFMWS_INTERLEAVE_GRANULARITY(cfmws);
-
- rc = cxl_decoder_add(cxld, target_map);
- if (rc)
- put_device(&cxld->dev);
- else
- rc = cxl_decoder_autoremove(dev, cxld);
- if (rc) {
- dev_err(dev, "Failed to add decoder for %#llx-%#llx\n",
- cfmws->base_hpa, cfmws->base_hpa +
- cfmws->window_size - 1);
- goto next;
- }
- dev_dbg(dev, "add: %s range %#llx-%#llx\n",
- dev_name(&cxld->dev), cfmws->base_hpa,
+ rc = cxl_acpi_cfmws_verify(dev, cfmws);
+ if (rc) {
+ dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
+ cfmws->base_hpa,
cfmws->base_hpa + cfmws->window_size - 1);
-next:
- cur += c->length;
+ return 0;
}
-}
-
-static struct acpi_cedt_chbs *cxl_acpi_match_chbs(struct device *dev, u32 uid)
-{
- struct acpi_cedt_chbs *chbs, *chbs_match = NULL;
- acpi_size len, cur = 0;
- void *cedt_subtable;
- len = acpi_cedt->length - sizeof(*acpi_cedt);
- cedt_subtable = acpi_cedt + 1;
+ for (i = 0; i < CFMWS_INTERLEAVE_WAYS(cfmws); i++)
+ target_map[i] = cfmws->interleave_targets[i];
- while (cur < len) {
- struct acpi_cedt_header *c = cedt_subtable + cur;
-
- if (c->type != ACPI_CEDT_TYPE_CHBS) {
- cur += c->length;
- continue;
- }
-
- chbs = cedt_subtable + cur;
-
- if (chbs->header.length < sizeof(*chbs)) {
- dev_warn_once(dev,
- "CHBS entry skipped: invalid length:%u\n",
- chbs->header.length);
- cur += c->length;
- continue;
- }
-
- if (chbs->uid != uid) {
- cur += c->length;
- continue;
- }
+ cxld = cxl_decoder_alloc(root_port, CFMWS_INTERLEAVE_WAYS(cfmws));
+ if (IS_ERR(cxld))
+ return 0;
- if (chbs_match) {
- dev_warn_once(dev,
- "CHBS entry skipped: duplicate UID:%u\n",
- uid);
- cur += c->length;
- continue;
- }
+ cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
+ cxld->target_type = CXL_DECODER_EXPANDER;
+ cxld->range = (struct range){
+ .start = cfmws->base_hpa,
+ .end = cfmws->base_hpa + cfmws->window_size - 1,
+ };
+ cxld->interleave_ways = CFMWS_INTERLEAVE_WAYS(cfmws);
+ cxld->interleave_granularity = CFMWS_INTERLEAVE_GRANULARITY(cfmws);
- chbs_match = chbs;
- cur += c->length;
+ rc = cxl_decoder_add(cxld, target_map);
+ if (rc)
+ put_device(&cxld->dev);
+ else
+ rc = cxl_decoder_autoremove(dev, cxld);
+ if (rc) {
+ dev_err(dev, "Failed to add decoder for %#llx-%#llx\n",
+ cfmws->base_hpa,
+ cfmws->base_hpa + cfmws->window_size - 1);
+ return 0;
}
+ dev_dbg(dev, "add: %s node: %d range %#llx-%#llx\n",
+ dev_name(&cxld->dev), phys_to_target_node(cxld->range.start),
+ cfmws->base_hpa, cfmws->base_hpa + cfmws->window_size - 1);
- return chbs_match ? chbs_match : ERR_PTR(-ENODEV);
-}
-
-static resource_size_t get_chbcr(struct acpi_cedt_chbs *chbs)
-{
- return IS_ERR(chbs) ? CXL_RESOURCE_NONE : chbs->base;
+ return 0;
}
__mock int match_add_root_ports(struct pci_dev *pdev, void *data)
@@ -355,12 +283,36 @@ static int add_host_bridge_uport(struct device *match, void *arg)
return rc;
}
+struct cxl_chbs_context {
+ struct device *dev;
+ unsigned long long uid;
+ resource_size_t chbcr;
+};
+
+static int cxl_get_chbcr(union acpi_subtable_headers *header, void *arg,
+ const unsigned long end)
+{
+ struct cxl_chbs_context *ctx = arg;
+ struct acpi_cedt_chbs *chbs;
+
+ if (ctx->chbcr)
+ return 0;
+
+ chbs = (struct acpi_cedt_chbs *) header;
+
+ if (ctx->uid != chbs->uid)
+ return 0;
+ ctx->chbcr = chbs->base;
+
+ return 0;
+}
+
static int add_host_bridge_dport(struct device *match, void *arg)
{
int rc;
acpi_status status;
unsigned long long uid;
- struct acpi_cedt_chbs *chbs;
+ struct cxl_chbs_context ctx;
struct cxl_port *root_port = arg;
struct device *host = root_port->dev.parent;
struct acpi_device *bridge = to_cxl_host_bridge(host, match);
@@ -376,14 +328,19 @@ static int add_host_bridge_dport(struct device *match, void *arg)
return -ENODEV;
}
- chbs = cxl_acpi_match_chbs(host, uid);
- if (IS_ERR(chbs)) {
+ ctx = (struct cxl_chbs_context) {
+ .dev = host,
+ .uid = uid,
+ };
+ acpi_table_parse_cedt(ACPI_CEDT_TYPE_CHBS, cxl_get_chbcr, &ctx);
+
+ if (ctx.chbcr == 0) {
dev_warn(host, "No CHBS found for Host Bridge: %s\n",
dev_name(match));
return 0;
}
- rc = cxl_add_dport(root_port, match, uid, get_chbcr(chbs));
+ rc = cxl_add_dport(root_port, match, uid, ctx.chbcr);
if (rc) {
dev_err(host, "failed to add downstream port: %s\n",
dev_name(match));
@@ -417,40 +374,29 @@ static int add_root_nvdimm_bridge(struct device *match, void *data)
return 1;
}
-static u32 cedt_instance(struct platform_device *pdev)
-{
- const bool *native_acpi0017 = acpi_device_get_match_data(&pdev->dev);
-
- if (native_acpi0017 && *native_acpi0017)
- return 0;
-
- /* for cxl_test request a non-canonical instance */
- return U32_MAX;
-}
-
static int cxl_acpi_probe(struct platform_device *pdev)
{
int rc;
- acpi_status status;
struct cxl_port *root_port;
struct device *host = &pdev->dev;
struct acpi_device *adev = ACPI_COMPANION(host);
+ struct cxl_cfmws_context ctx;
root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
if (IS_ERR(root_port))
return PTR_ERR(root_port);
dev_dbg(host, "add: %s\n", dev_name(&root_port->dev));
- status = acpi_get_table(ACPI_SIG_CEDT, cedt_instance(pdev), &acpi_cedt);
- if (ACPI_FAILURE(status))
- return -ENXIO;
-
rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
add_host_bridge_dport);
- if (rc)
- goto out;
+ if (rc < 0)
+ return rc;
- cxl_add_cfmws_decoders(host, root_port);
+ ctx = (struct cxl_cfmws_context) {
+ .dev = host,
+ .root_port = root_port,
+ };
+ acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx);
/*
* Root level scanned with host-bridge as dports, now scan host-bridges
@@ -458,24 +404,20 @@ static int cxl_acpi_probe(struct platform_device *pdev)
*/
rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
add_host_bridge_uport);
- if (rc)
- goto out;
+ if (rc < 0)
+ return rc;
if (IS_ENABLED(CONFIG_CXL_PMEM))
rc = device_for_each_child(&root_port->dev, root_port,
add_root_nvdimm_bridge);
-
-out:
- acpi_put_table(acpi_cedt);
if (rc < 0)
return rc;
+
return 0;
}
-static bool native_acpi0017 = true;
-
static const struct acpi_device_id cxl_acpi_ids[] = {
- { "ACPI0017", (unsigned long) &native_acpi0017 },
+ { "ACPI0017" },
{ },
};
MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids);
@@ -491,3 +433,4 @@ static struct platform_driver cxl_acpi_driver = {
module_platform_driver(cxl_acpi_driver);
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(CXL);
+MODULE_IMPORT_NS(ACPI);
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index 07eb8e1fb8a6..40ab50318daf 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CXL_BUS) += cxl_core.o
-ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL -I$(srctree)/drivers/cxl
+ccflags-y += -I$(srctree)/drivers/cxl
cxl_core-y := bus.o
cxl_core-y += pmem.o
cxl_core-y += regs.o
diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c
index ebd061d03950..3f9b98ecd18b 100644
--- a/drivers/cxl/core/bus.c
+++ b/drivers/cxl/core/bus.c
@@ -200,7 +200,7 @@ bool is_root_decoder(struct device *dev)
{
return dev->type == &cxl_decoder_root_type;
}
-EXPORT_SYMBOL_GPL(is_root_decoder);
+EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL);
struct cxl_decoder *to_cxl_decoder(struct device *dev)
{
@@ -209,7 +209,7 @@ struct cxl_decoder *to_cxl_decoder(struct device *dev)
return NULL;
return container_of(dev, struct cxl_decoder, dev);
}
-EXPORT_SYMBOL_GPL(to_cxl_decoder);
+EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL);
static void cxl_dport_release(struct cxl_dport *dport)
{
@@ -376,7 +376,7 @@ err:
put_device(dev);
return ERR_PTR(rc);
}
-EXPORT_SYMBOL_GPL(devm_cxl_add_port);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
static struct cxl_dport *find_dport(struct cxl_port *port, int id)
{
@@ -451,7 +451,7 @@ err:
cxl_dport_release(dport);
return rc;
}
-EXPORT_SYMBOL_GPL(cxl_add_dport);
+EXPORT_SYMBOL_NS_GPL(cxl_add_dport, CXL);
static int decoder_populate_targets(struct cxl_decoder *cxld,
struct cxl_port *port, int *target_map)
@@ -485,9 +485,7 @@ out_unlock:
struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets)
{
- struct cxl_decoder *cxld, cxld_const_init = {
- .nr_targets = nr_targets,
- };
+ struct cxl_decoder *cxld;
struct device *dev;
int rc = 0;
@@ -497,13 +495,13 @@ struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets)
cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
if (!cxld)
return ERR_PTR(-ENOMEM);
- memcpy(cxld, &cxld_const_init, sizeof(cxld_const_init));
rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
if (rc < 0)
goto err;
cxld->id = rc;
+ cxld->nr_targets = nr_targets;
dev = &cxld->dev;
device_initialize(dev);
device_set_pm_not_required(dev);
@@ -521,7 +519,7 @@ err:
kfree(cxld);
return ERR_PTR(rc);
}
-EXPORT_SYMBOL_GPL(cxl_decoder_alloc);
+EXPORT_SYMBOL_NS_GPL(cxl_decoder_alloc, CXL);
int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
{
@@ -550,7 +548,7 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
return device_add(dev);
}
-EXPORT_SYMBOL_GPL(cxl_decoder_add);
+EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
static void cxld_unregister(void *dev)
{
@@ -561,7 +559,7 @@ int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
{
return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev);
}
-EXPORT_SYMBOL_GPL(cxl_decoder_autoremove);
+EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL);
/**
* __cxl_driver_register - register a driver for the cxl bus
@@ -594,13 +592,13 @@ int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
return driver_register(&cxl_drv->drv);
}
-EXPORT_SYMBOL_GPL(__cxl_driver_register);
+EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL);
void cxl_driver_unregister(struct cxl_driver *cxl_drv)
{
driver_unregister(&cxl_drv->drv);
}
-EXPORT_SYMBOL_GPL(cxl_driver_unregister);
+EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL);
static int cxl_device_id(struct device *dev)
{
@@ -642,7 +640,7 @@ struct bus_type cxl_bus_type = {
.probe = cxl_bus_probe,
.remove = cxl_bus_remove,
};
-EXPORT_SYMBOL_GPL(cxl_bus_type);
+EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL);
static __init int cxl_core_init(void)
{
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 576796a5d9f3..be61a0d8016b 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -128,8 +128,8 @@ static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
}
/**
- * cxl_mem_mbox_send_cmd() - Send a mailbox command to a memory device.
- * @cxlm: The CXL memory device to communicate with.
+ * cxl_mbox_send_cmd() - Send a mailbox command to a device.
+ * @cxlds: The device data for the operation
* @opcode: Opcode for the mailbox command.
* @in: The input payload for the mailbox command.
* @in_size: The length of the input payload
@@ -148,11 +148,9 @@ static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
* Mailbox commands may execute successfully yet the device itself reported an
* error. While this distinction can be useful for commands from userspace, the
* kernel will only be able to use results when both are successful.
- *
- * See __cxl_mem_mbox_send_cmd()
*/
-int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, void *in,
- size_t in_size, void *out, size_t out_size)
+int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in,
+ size_t in_size, void *out, size_t out_size)
{
const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
struct cxl_mbox_cmd mbox_cmd = {
@@ -164,10 +162,10 @@ int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, void *in,
};
int rc;
- if (out_size > cxlm->payload_size)
+ if (out_size > cxlds->payload_size)
return -E2BIG;
- rc = cxlm->mbox_send(cxlm, &mbox_cmd);
+ rc = cxlds->mbox_send(cxlds, &mbox_cmd);
if (rc)
return rc;
@@ -184,7 +182,7 @@ int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, void *in,
return 0;
}
-EXPORT_SYMBOL_GPL(cxl_mem_mbox_send_cmd);
+EXPORT_SYMBOL_NS_GPL(cxl_mbox_send_cmd, CXL);
static bool cxl_mem_raw_command_allowed(u16 opcode)
{
@@ -211,7 +209,7 @@ static bool cxl_mem_raw_command_allowed(u16 opcode)
/**
* cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
- * @cxlm: &struct cxl_mem device whose mailbox will be used.
+ * @cxlds: The device data for the operation
* @send_cmd: &struct cxl_send_command copied in from userspace.
* @out_cmd: Sanitized and populated &struct cxl_mem_command.
*
@@ -228,7 +226,7 @@ static bool cxl_mem_raw_command_allowed(u16 opcode)
*
* See handle_mailbox_cmd_from_user()
*/
-static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm,
+static int cxl_validate_cmd_from_user(struct cxl_dev_state *cxlds,
const struct cxl_send_command *send_cmd,
struct cxl_mem_command *out_cmd)
{
@@ -243,7 +241,7 @@ static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm,
* supports, but output can be arbitrarily large (simply write out as
* much data as the hardware provides).
*/
- if (send_cmd->in.size > cxlm->payload_size)
+ if (send_cmd->in.size > cxlds->payload_size)
return -EINVAL;
/*
@@ -269,7 +267,7 @@ static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm,
* gets passed along without further checking, so it must be
* validated here.
*/
- if (send_cmd->out.size > cxlm->payload_size)
+ if (send_cmd->out.size > cxlds->payload_size)
return -EINVAL;
if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
@@ -294,11 +292,11 @@ static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm,
info = &c->info;
/* Check that the command is enabled for hardware */
- if (!test_bit(info->id, cxlm->enabled_cmds))
+ if (!test_bit(info->id, cxlds->enabled_cmds))
return -ENOTTY;
/* Check that the command is not claimed for exclusive kernel use */
- if (test_bit(info->id, cxlm->exclusive_cmds))
+ if (test_bit(info->id, cxlds->exclusive_cmds))
return -EBUSY;
/* Check the input buffer is the expected size */
@@ -356,7 +354,7 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
/**
* handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
- * @cxlm: The CXL memory device to communicate with.
+ * @cxlds: The device data for the operation
* @cmd: The validated command.
* @in_payload: Pointer to userspace's input payload.
* @out_payload: Pointer to userspace's output payload.
@@ -379,12 +377,12 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
*
* See cxl_send_cmd().
*/
-static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm,
+static int handle_mailbox_cmd_from_user(struct cxl_dev_state *cxlds,
const struct cxl_mem_command *cmd,
u64 in_payload, u64 out_payload,
s32 *size_out, u32 *retval)
{
- struct device *dev = cxlm->dev;
+ struct device *dev = cxlds->dev;
struct cxl_mbox_cmd mbox_cmd = {
.opcode = cmd->opcode,
.size_in = cmd->info.size_in,
@@ -417,7 +415,7 @@ static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm,
dev_WARN_ONCE(dev, cmd->info.id == CXL_MEM_COMMAND_ID_RAW,
"raw command path used\n");
- rc = cxlm->mbox_send(cxlm, &mbox_cmd);
+ rc = cxlds->mbox_send(cxlds, &mbox_cmd);
if (rc)
goto out;
@@ -447,7 +445,7 @@ out:
int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
{
- struct cxl_mem *cxlm = cxlmd->cxlm;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *dev = &cxlmd->dev;
struct cxl_send_command send;
struct cxl_mem_command c;
@@ -458,15 +456,15 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
if (copy_from_user(&send, s, sizeof(send)))
return -EFAULT;
- rc = cxl_validate_cmd_from_user(cxlmd->cxlm, &send, &c);
+ rc = cxl_validate_cmd_from_user(cxlmd->cxlds, &send, &c);
if (rc)
return rc;
/* Prepare to handle a full payload for variable sized output */
if (c.info.size_out < 0)
- c.info.size_out = cxlm->payload_size;
+ c.info.size_out = cxlds->payload_size;
- rc = handle_mailbox_cmd_from_user(cxlm, &c, send.in.payload,
+ rc = handle_mailbox_cmd_from_user(cxlds, &c, send.in.payload,
send.out.payload, &send.out.size,
&send.retval);
if (rc)
@@ -478,13 +476,13 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
return 0;
}
-static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out)
+static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 size, u8 *out)
{
u32 remaining = size;
u32 offset = 0;
while (remaining) {
- u32 xfer_size = min_t(u32, remaining, cxlm->payload_size);
+ u32 xfer_size = min_t(u32, remaining, cxlds->payload_size);
struct cxl_mbox_get_log log = {
.uuid = *uuid,
.offset = cpu_to_le32(offset),
@@ -492,8 +490,8 @@ static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out)
};
int rc;
- rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LOG, &log,
- sizeof(log), out, xfer_size);
+ rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LOG, &log, sizeof(log),
+ out, xfer_size);
if (rc < 0)
return rc;
@@ -507,14 +505,14 @@ static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out)
/**
* cxl_walk_cel() - Walk through the Command Effects Log.
- * @cxlm: Device.
+ * @cxlds: The device data for the operation
* @size: Length of the Command Effects Log.
* @cel: CEL
*
* Iterate over each entry in the CEL and determine if the driver supports the
* command. If so, the command is enabled for the device and can be used later.
*/
-static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel)
+static void cxl_walk_cel(struct cxl_dev_state *cxlds, size_t size, u8 *cel)
{
struct cxl_cel_entry *cel_entry;
const int cel_entries = size / sizeof(*cel_entry);
@@ -527,26 +525,26 @@ static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel)
struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
if (!cmd) {
- dev_dbg(cxlm->dev,
+ dev_dbg(cxlds->dev,
"Opcode 0x%04x unsupported by driver", opcode);
continue;
}
- set_bit(cmd->info.id, cxlm->enabled_cmds);
+ set_bit(cmd->info.id, cxlds->enabled_cmds);
}
}
-static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm)
+static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_dev_state *cxlds)
{
struct cxl_mbox_get_supported_logs *ret;
int rc;
- ret = kvmalloc(cxlm->payload_size, GFP_KERNEL);
+ ret = kvmalloc(cxlds->payload_size, GFP_KERNEL);
if (!ret)
return ERR_PTR(-ENOMEM);
- rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL,
- 0, ret, cxlm->payload_size);
+ rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL, 0, ret,
+ cxlds->payload_size);
if (rc < 0) {
kvfree(ret);
return ERR_PTR(rc);
@@ -567,23 +565,23 @@ static const uuid_t log_uuid[] = {
};
/**
- * cxl_mem_enumerate_cmds() - Enumerate commands for a device.
- * @cxlm: The device.
+ * cxl_enumerate_cmds() - Enumerate commands for a device.
+ * @cxlds: The device data for the operation
*
* Returns 0 if enumerate completed successfully.
*
* CXL devices have optional support for certain commands. This function will
* determine the set of supported commands for the hardware and update the
- * enabled_cmds bitmap in the @cxlm.
+ * enabled_cmds bitmap in the @cxlds.
*/
-int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm)
+int cxl_enumerate_cmds(struct cxl_dev_state *cxlds)
{
struct cxl_mbox_get_supported_logs *gsl;
- struct device *dev = cxlm->dev;
+ struct device *dev = cxlds->dev;
struct cxl_mem_command *cmd;
int i, rc;
- gsl = cxl_get_gsl(cxlm);
+ gsl = cxl_get_gsl(cxlds);
if (IS_ERR(gsl))
return PTR_ERR(gsl);
@@ -604,19 +602,19 @@ int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm)
goto out;
}
- rc = cxl_xfer_log(cxlm, &uuid, size, log);
+ rc = cxl_xfer_log(cxlds, &uuid, size, log);
if (rc) {
kvfree(log);
goto out;
}
- cxl_walk_cel(cxlm, size, log);
+ cxl_walk_cel(cxlds, size, log);
kvfree(log);
/* In case CEL was bogus, enable some default commands. */
cxl_for_each_cmd(cmd)
if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
- set_bit(cmd->info.id, cxlm->enabled_cmds);
+ set_bit(cmd->info.id, cxlds->enabled_cmds);
/* Found the required CEL */
rc = 0;
@@ -626,11 +624,11 @@ out:
kvfree(gsl);
return rc;
}
-EXPORT_SYMBOL_GPL(cxl_mem_enumerate_cmds);
+EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
/**
* cxl_mem_get_partition_info - Get partition info
- * @cxlm: cxl_mem instance to update partition info
+ * @cxlds: The device data for the operation
*
* Retrieve the current partition info for the device specified. The active
* values are the current capacity in bytes. If not 0, the 'next' values are
@@ -640,7 +638,7 @@ EXPORT_SYMBOL_GPL(cxl_mem_enumerate_cmds);
*
* See CXL @8.2.9.5.2.1 Get Partition Info
*/
-static int cxl_mem_get_partition_info(struct cxl_mem *cxlm)
+static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds)
{
struct cxl_mbox_get_partition_info {
__le64 active_volatile_cap;
@@ -650,124 +648,124 @@ static int cxl_mem_get_partition_info(struct cxl_mem *cxlm)
} __packed pi;
int rc;
- rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_PARTITION_INFO,
- NULL, 0, &pi, sizeof(pi));
+ rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_PARTITION_INFO, NULL, 0,
+ &pi, sizeof(pi));
if (rc)
return rc;
- cxlm->active_volatile_bytes =
+ cxlds->active_volatile_bytes =
le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
- cxlm->active_persistent_bytes =
+ cxlds->active_persistent_bytes =
le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
- cxlm->next_volatile_bytes =
+ cxlds->next_volatile_bytes =
le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
- cxlm->next_persistent_bytes =
+ cxlds->next_persistent_bytes =
le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
return 0;
}
/**
- * cxl_mem_identify() - Send the IDENTIFY command to the device.
- * @cxlm: The device to identify.
+ * cxl_dev_state_identify() - Send the IDENTIFY command to the device.
+ * @cxlds: The device data for the operation
*
* Return: 0 if identify was executed successfully.
*
* This will dispatch the identify command to the device and on success populate
* structures to be exported to sysfs.
*/
-int cxl_mem_identify(struct cxl_mem *cxlm)
+int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
{
/* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
struct cxl_mbox_identify id;
int rc;
- rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id,
- sizeof(id));
+ rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id,
+ sizeof(id));
if (rc < 0)
return rc;
- cxlm->total_bytes =
+ cxlds->total_bytes =
le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER;
- cxlm->volatile_only_bytes =
+ cxlds->volatile_only_bytes =
le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER;
- cxlm->persistent_only_bytes =
+ cxlds->persistent_only_bytes =
le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER;
- cxlm->partition_align_bytes =
+ cxlds->partition_align_bytes =
le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
- dev_dbg(cxlm->dev,
+ dev_dbg(cxlds->dev,
"Identify Memory Device\n"
" total_bytes = %#llx\n"
" volatile_only_bytes = %#llx\n"
" persistent_only_bytes = %#llx\n"
" partition_align_bytes = %#llx\n",
- cxlm->total_bytes, cxlm->volatile_only_bytes,
- cxlm->persistent_only_bytes, cxlm->partition_align_bytes);
+ cxlds->total_bytes, cxlds->volatile_only_bytes,
+ cxlds->persistent_only_bytes, cxlds->partition_align_bytes);
- cxlm->lsa_size = le32_to_cpu(id.lsa_size);
- memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision));
+ cxlds->lsa_size = le32_to_cpu(id.lsa_size);
+ memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision));
return 0;
}
-EXPORT_SYMBOL_GPL(cxl_mem_identify);
+EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
-int cxl_mem_create_range_info(struct cxl_mem *cxlm)
+int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
{
int rc;
- if (cxlm->partition_align_bytes == 0) {
- cxlm->ram_range.start = 0;
- cxlm->ram_range.end = cxlm->volatile_only_bytes - 1;
- cxlm->pmem_range.start = cxlm->volatile_only_bytes;
- cxlm->pmem_range.end = cxlm->volatile_only_bytes +
- cxlm->persistent_only_bytes - 1;
+ if (cxlds->partition_align_bytes == 0) {
+ cxlds->ram_range.start = 0;
+ cxlds->ram_range.end = cxlds->volatile_only_bytes - 1;
+ cxlds->pmem_range.start = cxlds->volatile_only_bytes;
+ cxlds->pmem_range.end = cxlds->volatile_only_bytes +
+ cxlds->persistent_only_bytes - 1;
return 0;
}
- rc = cxl_mem_get_partition_info(cxlm);
+ rc = cxl_mem_get_partition_info(cxlds);
if (rc) {
- dev_err(cxlm->dev, "Failed to query partition information\n");
+ dev_err(cxlds->dev, "Failed to query partition information\n");
return rc;
}
- dev_dbg(cxlm->dev,
+ dev_dbg(cxlds->dev,
"Get Partition Info\n"
" active_volatile_bytes = %#llx\n"
" active_persistent_bytes = %#llx\n"
" next_volatile_bytes = %#llx\n"
" next_persistent_bytes = %#llx\n",
- cxlm->active_volatile_bytes, cxlm->active_persistent_bytes,
- cxlm->next_volatile_bytes, cxlm->next_persistent_bytes);
+ cxlds->active_volatile_bytes, cxlds->active_persistent_bytes,
+ cxlds->next_volatile_bytes, cxlds->next_persistent_bytes);
- cxlm->ram_range.start = 0;
- cxlm->ram_range.end = cxlm->active_volatile_bytes - 1;
+ cxlds->ram_range.start = 0;
+ cxlds->ram_range.end = cxlds->active_volatile_bytes - 1;
- cxlm->pmem_range.start = cxlm->active_volatile_bytes;
- cxlm->pmem_range.end =
- cxlm->active_volatile_bytes + cxlm->active_persistent_bytes - 1;
+ cxlds->pmem_range.start = cxlds->active_volatile_bytes;
+ cxlds->pmem_range.end =
+ cxlds->active_volatile_bytes + cxlds->active_persistent_bytes - 1;
return 0;
}
-EXPORT_SYMBOL_GPL(cxl_mem_create_range_info);
+EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
-struct cxl_mem *cxl_mem_create(struct device *dev)
+struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
{
- struct cxl_mem *cxlm;
+ struct cxl_dev_state *cxlds;
- cxlm = devm_kzalloc(dev, sizeof(*cxlm), GFP_KERNEL);
- if (!cxlm) {
+ cxlds = devm_kzalloc(dev, sizeof(*cxlds), GFP_KERNEL);
+ if (!cxlds) {
dev_err(dev, "No memory available\n");
return ERR_PTR(-ENOMEM);
}
- mutex_init(&cxlm->mbox_mutex);
- cxlm->dev = dev;
+ mutex_init(&cxlds->mbox_mutex);
+ cxlds->dev = dev;
- return cxlm;
+ return cxlds;
}
-EXPORT_SYMBOL_GPL(cxl_mem_create);
+EXPORT_SYMBOL_NS_GPL(cxl_dev_state_create, CXL);
static struct dentry *cxl_debugfs;
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index bf1b04d00ff4..61029cb7ac62 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -37,9 +37,9 @@ static ssize_t firmware_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
- return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version);
+ return sysfs_emit(buf, "%.16s\n", cxlds->firmware_version);
}
static DEVICE_ATTR_RO(firmware_version);
@@ -47,9 +47,9 @@ static ssize_t payload_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
- return sysfs_emit(buf, "%zu\n", cxlm->payload_size);
+ return sysfs_emit(buf, "%zu\n", cxlds->payload_size);
}
static DEVICE_ATTR_RO(payload_max);
@@ -57,9 +57,9 @@ static ssize_t label_storage_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
- return sysfs_emit(buf, "%zu\n", cxlm->lsa_size);
+ return sysfs_emit(buf, "%zu\n", cxlds->lsa_size);
}
static DEVICE_ATTR_RO(label_storage_size);
@@ -67,8 +67,8 @@ static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
- unsigned long long len = range_len(&cxlm->ram_range);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ unsigned long long len = range_len(&cxlds->ram_range);
return sysfs_emit(buf, "%#llx\n", len);
}
@@ -80,8 +80,8 @@ static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
- unsigned long long len = range_len(&cxlm->pmem_range);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ unsigned long long len = range_len(&cxlds->pmem_range);
return sysfs_emit(buf, "%#llx\n", len);
}
@@ -136,42 +136,42 @@ static const struct device_type cxl_memdev_type = {
/**
* set_exclusive_cxl_commands() - atomically disable user cxl commands
- * @cxlm: cxl_mem instance to modify
+ * @cxlds: The device state to operate on
* @cmds: bitmap of commands to mark exclusive
*
* Grab the cxl_memdev_rwsem in write mode to flush in-flight
* invocations of the ioctl path and then disable future execution of
* commands with the command ids set in @cmds.
*/
-void set_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds)
+void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds)
{
down_write(&cxl_memdev_rwsem);
- bitmap_or(cxlm->exclusive_cmds, cxlm->exclusive_cmds, cmds,
+ bitmap_or(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds,
CXL_MEM_COMMAND_ID_MAX);
up_write(&cxl_memdev_rwsem);
}
-EXPORT_SYMBOL_GPL(set_exclusive_cxl_commands);
+EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, CXL);
/**
* clear_exclusive_cxl_commands() - atomically enable user cxl commands
- * @cxlm: cxl_mem instance to modify
+ * @cxlds: The device state to modify
* @cmds: bitmap of commands to mark available for userspace
*/
-void clear_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds)
+void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds)
{
down_write(&cxl_memdev_rwsem);
- bitmap_andnot(cxlm->exclusive_cmds, cxlm->exclusive_cmds, cmds,
+ bitmap_andnot(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds,
CXL_MEM_COMMAND_ID_MAX);
up_write(&cxl_memdev_rwsem);
}
-EXPORT_SYMBOL_GPL(clear_exclusive_cxl_commands);
+EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
static void cxl_memdev_shutdown(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
down_write(&cxl_memdev_rwsem);
- cxlmd->cxlm = NULL;
+ cxlmd->cxlds = NULL;
up_write(&cxl_memdev_rwsem);
}
@@ -185,7 +185,7 @@ static void cxl_memdev_unregister(void *_cxlmd)
put_device(dev);
}
-static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm,
+static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds,
const struct file_operations *fops)
{
struct cxl_memdev *cxlmd;
@@ -204,7 +204,7 @@ static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm,
dev = &cxlmd->dev;
device_initialize(dev);
- dev->parent = cxlm->dev;
+ dev->parent = cxlds->dev;
dev->bus = &cxl_bus_type;
dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
dev->type = &cxl_memdev_type;
@@ -239,7 +239,7 @@ static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
int rc = -ENXIO;
down_read(&cxl_memdev_rwsem);
- if (cxlmd->cxlm)
+ if (cxlmd->cxlds)
rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
up_read(&cxl_memdev_rwsem);
@@ -276,15 +276,14 @@ static const struct file_operations cxl_memdev_fops = {
.llseek = noop_llseek,
};
-struct cxl_memdev *
-devm_cxl_add_memdev(struct cxl_mem *cxlm)
+struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
{
struct cxl_memdev *cxlmd;
struct device *dev;
struct cdev *cdev;
int rc;
- cxlmd = cxl_memdev_alloc(cxlm, &cxl_memdev_fops);
+ cxlmd = cxl_memdev_alloc(cxlds, &cxl_memdev_fops);
if (IS_ERR(cxlmd))
return cxlmd;
@@ -297,14 +296,14 @@ devm_cxl_add_memdev(struct cxl_mem *cxlm)
* Activate ioctl operations, no cxl_memdev_rwsem manipulation
* needed as this is ordered with cdev_add() publishing the device.
*/
- cxlmd->cxlm = cxlm;
+ cxlmd->cxlds = cxlds;
cdev = &cxlmd->cdev;
rc = cdev_device_add(cdev, dev);
if (rc)
goto err;
- rc = devm_add_action_or_reset(cxlm->dev, cxl_memdev_unregister, cxlmd);
+ rc = devm_add_action_or_reset(cxlds->dev, cxl_memdev_unregister, cxlmd);
if (rc)
return ERR_PTR(rc);
return cxlmd;
@@ -318,7 +317,7 @@ err:
put_device(dev);
return ERR_PTR(rc);
}
-EXPORT_SYMBOL_GPL(devm_cxl_add_memdev);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL);
__init int cxl_memdev_init(void)
{
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
index 5032f4c1c69d..b5fca97b0a07 100644
--- a/drivers/cxl/core/pmem.c
+++ b/drivers/cxl/core/pmem.c
@@ -49,12 +49,18 @@ struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
return NULL;
return container_of(dev, struct cxl_nvdimm_bridge, dev);
}
-EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge);
+EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm_bridge, CXL);
-__mock int match_nvdimm_bridge(struct device *dev, const void *data)
+bool is_cxl_nvdimm_bridge(struct device *dev)
{
return dev->type == &cxl_nvdimm_bridge_type;
}
+EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm_bridge, CXL);
+
+__mock int match_nvdimm_bridge(struct device *dev, const void *data)
+{
+ return is_cxl_nvdimm_bridge(dev);
+}
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd)
{
@@ -65,7 +71,7 @@ struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd)
return NULL;
return to_cxl_nvdimm_bridge(dev);
}
-EXPORT_SYMBOL_GPL(cxl_find_nvdimm_bridge);
+EXPORT_SYMBOL_NS_GPL(cxl_find_nvdimm_bridge, CXL);
static struct cxl_nvdimm_bridge *
cxl_nvdimm_bridge_alloc(struct cxl_port *port)
@@ -167,7 +173,7 @@ err:
put_device(dev);
return ERR_PTR(rc);
}
-EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, CXL);
static void cxl_nvdimm_release(struct device *dev)
{
@@ -191,7 +197,7 @@ bool is_cxl_nvdimm(struct device *dev)
{
return dev->type == &cxl_nvdimm_type;
}
-EXPORT_SYMBOL_GPL(is_cxl_nvdimm);
+EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm, CXL);
struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
{
@@ -200,7 +206,7 @@ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
return NULL;
return container_of(dev, struct cxl_nvdimm, dev);
}
-EXPORT_SYMBOL_GPL(to_cxl_nvdimm);
+EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm, CXL);
static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
{
@@ -262,4 +268,4 @@ err:
put_device(dev);
return rc;
}
-EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm, CXL);
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
index 41de4a136ecd..e37e23bf4355 100644
--- a/drivers/cxl/core/regs.c
+++ b/drivers/cxl/core/regs.c
@@ -90,7 +90,7 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base,
}
}
}
-EXPORT_SYMBOL_GPL(cxl_probe_component_regs);
+EXPORT_SYMBOL_NS_GPL(cxl_probe_component_regs, CXL);
/**
* cxl_probe_device_regs() - Detect CXL Device register blocks
@@ -156,7 +156,7 @@ void cxl_probe_device_regs(struct device *dev, void __iomem *base,
}
}
}
-EXPORT_SYMBOL_GPL(cxl_probe_device_regs);
+EXPORT_SYMBOL_NS_GPL(cxl_probe_device_regs, CXL);
static void __iomem *devm_cxl_iomap_block(struct device *dev,
resource_size_t addr,
@@ -199,7 +199,7 @@ int cxl_map_component_regs(struct pci_dev *pdev,
return 0;
}
-EXPORT_SYMBOL_GPL(cxl_map_component_regs);
+EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, CXL);
int cxl_map_device_regs(struct pci_dev *pdev,
struct cxl_device_regs *regs,
@@ -246,4 +246,4 @@ int cxl_map_device_regs(struct pci_dev *pdev,
return 0;
}
-EXPORT_SYMBOL_GPL(cxl_map_device_regs);
+EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, CXL);
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 3af704e9b448..a5a0be3f088b 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -191,11 +191,18 @@ struct cxl_decoder {
int interleave_granularity;
enum cxl_decoder_type target_type;
unsigned long flags;
- const int nr_targets;
+ int nr_targets;
struct cxl_dport *target[];
};
+/**
+ * enum cxl_nvdimm_brige_state - state machine for managing bus rescans
+ * @CXL_NVB_NEW: Set at bridge create and after cxl_pmem_wq is destroyed
+ * @CXL_NVB_DEAD: Set at brige unregistration to preclude async probing
+ * @CXL_NVB_ONLINE: Target state after successful ->probe()
+ * @CXL_NVB_OFFLINE: Target state after ->remove() or failed ->probe()
+ */
enum cxl_nvdimm_brige_state {
CXL_NVB_NEW,
CXL_NVB_DEAD,
@@ -308,6 +315,7 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
struct cxl_port *port);
struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm(struct device *dev);
+bool is_cxl_nvdimm_bridge(struct device *dev);
int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd);
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd);
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index c4f450ad434d..8d96d009ad90 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -33,13 +33,13 @@
* struct cxl_memdev - CXL bus object representing a Type-3 Memory Device
* @dev: driver core device object
* @cdev: char dev core object for ioctl operations
- * @cxlm: pointer to the parent device driver data
+ * @cxlds: The device state backing this device
* @id: id number of this memdev instance.
*/
struct cxl_memdev {
struct device dev;
struct cdev cdev;
- struct cxl_mem *cxlm;
+ struct cxl_dev_state *cxlds;
int id;
};
@@ -48,7 +48,7 @@ static inline struct cxl_memdev *to_cxl_memdev(struct device *dev)
return container_of(dev, struct cxl_memdev, dev);
}
-struct cxl_memdev *devm_cxl_add_memdev(struct cxl_mem *cxlm);
+struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
/**
* struct cxl_mbox_cmd - A command to be submitted to hardware.
@@ -90,9 +90,13 @@ struct cxl_mbox_cmd {
#define CXL_CAPACITY_MULTIPLIER SZ_256M
/**
- * struct cxl_mem - A CXL memory device
- * @dev: The device associated with this CXL device.
- * @cxlmd: Logical memory device chardev / interface
+ * struct cxl_dev_state - The driver device state
+ *
+ * cxl_dev_state represents the CXL driver/device state. It provides an
+ * interface to mailbox commands as well as some cached data about the device.
+ * Currently only memory devices are represented.
+ *
+ * @dev: The device associated with this CXL state
* @regs: Parsed register blocks
* @payload_size: Size of space for payload
* (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register)
@@ -117,9 +121,8 @@ struct cxl_mbox_cmd {
* See section 8.2.9.5.2 Capacity Configuration and Label Storage for
* details on capacity parameters.
*/
-struct cxl_mem {
+struct cxl_dev_state {
struct device *dev;
- struct cxl_memdev *cxlmd;
struct cxl_regs regs;
@@ -142,7 +145,7 @@ struct cxl_mem {
u64 next_volatile_bytes;
u64 next_persistent_bytes;
- int (*mbox_send)(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd);
+ int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd);
};
enum cxl_opcode {
@@ -253,12 +256,12 @@ struct cxl_mem_command {
#define CXL_CMD_FLAG_FORCE_ENABLE BIT(0)
};
-int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, void *in,
- size_t in_size, void *out, size_t out_size);
-int cxl_mem_identify(struct cxl_mem *cxlm);
-int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm);
-int cxl_mem_create_range_info(struct cxl_mem *cxlm);
-struct cxl_mem *cxl_mem_create(struct device *dev);
-void set_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds);
-void clear_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds);
+int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in,
+ size_t in_size, void *out, size_t out_size);
+int cxl_dev_state_identify(struct cxl_dev_state *cxlds);
+int cxl_enumerate_cmds(struct cxl_dev_state *cxlds);
+int cxl_mem_create_range_info(struct cxl_dev_state *cxlds);
+struct cxl_dev_state *cxl_dev_state_create(struct device *dev);
+void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds);
+void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds);
#endif /* __CXL_MEM_H__ */
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index c734e21fb4e0..8dc91fd3396a 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -28,39 +28,39 @@
* - Registers a CXL mailbox with cxl_core.
*/
-#define cxl_doorbell_busy(cxlm) \
- (readl((cxlm)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \
+#define cxl_doorbell_busy(cxlds) \
+ (readl((cxlds)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \
CXLDEV_MBOX_CTRL_DOORBELL)
/* CXL 2.0 - 8.2.8.4 */
#define CXL_MAILBOX_TIMEOUT_MS (2 * HZ)
-static int cxl_pci_mbox_wait_for_doorbell(struct cxl_mem *cxlm)
+static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds)
{
const unsigned long start = jiffies;
unsigned long end = start;
- while (cxl_doorbell_busy(cxlm)) {
+ while (cxl_doorbell_busy(cxlds)) {
end = jiffies;
if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) {
/* Check again in case preempted before timeout test */
- if (!cxl_doorbell_busy(cxlm))
+ if (!cxl_doorbell_busy(cxlds))
break;
return -ETIMEDOUT;
}
cpu_relax();
}
- dev_dbg(cxlm->dev, "Doorbell wait took %dms",
+ dev_dbg(cxlds->dev, "Doorbell wait took %dms",
jiffies_to_msecs(end) - jiffies_to_msecs(start));
return 0;
}
-static void cxl_pci_mbox_timeout(struct cxl_mem *cxlm,
+static void cxl_pci_mbox_timeout(struct cxl_dev_state *cxlds,
struct cxl_mbox_cmd *mbox_cmd)
{
- struct device *dev = cxlm->dev;
+ struct device *dev = cxlds->dev;
dev_dbg(dev, "Mailbox command (opcode: %#x size: %zub) timed out\n",
mbox_cmd->opcode, mbox_cmd->size_in);
@@ -68,7 +68,7 @@ static void cxl_pci_mbox_timeout(struct cxl_mem *cxlm,
/**
* __cxl_pci_mbox_send_cmd() - Execute a mailbox command
- * @cxlm: The CXL memory device to communicate with.
+ * @cxlds: The device state to communicate with.
* @mbox_cmd: Command to send to the memory device.
*
* Context: Any context. Expects mbox_mutex to be held.
@@ -88,16 +88,16 @@ static void cxl_pci_mbox_timeout(struct cxl_mem *cxlm,
* not need to coordinate with each other. The driver only uses the primary
* mailbox.
*/
-static int __cxl_pci_mbox_send_cmd(struct cxl_mem *cxlm,
+static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds,
struct cxl_mbox_cmd *mbox_cmd)
{
- void __iomem *payload = cxlm->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET;
- struct device *dev = cxlm->dev;
+ void __iomem *payload = cxlds->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET;
+ struct device *dev = cxlds->dev;
u64 cmd_reg, status_reg;
size_t out_len;
int rc;
- lockdep_assert_held(&cxlm->mbox_mutex);
+ lockdep_assert_held(&cxlds->mbox_mutex);
/*
* Here are the steps from 8.2.8.4 of the CXL 2.0 spec.
@@ -117,7 +117,7 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_mem *cxlm,
*/
/* #1 */
- if (cxl_doorbell_busy(cxlm)) {
+ if (cxl_doorbell_busy(cxlds)) {
dev_err_ratelimited(dev, "Mailbox re-busy after acquiring\n");
return -EBUSY;
}
@@ -134,22 +134,22 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_mem *cxlm,
}
/* #2, #3 */
- writeq(cmd_reg, cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
+ writeq(cmd_reg, cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
/* #4 */
dev_dbg(dev, "Sending command\n");
writel(CXLDEV_MBOX_CTRL_DOORBELL,
- cxlm->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
+ cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
/* #5 */
- rc = cxl_pci_mbox_wait_for_doorbell(cxlm);
+ rc = cxl_pci_mbox_wait_for_doorbell(cxlds);
if (rc == -ETIMEDOUT) {
- cxl_pci_mbox_timeout(cxlm, mbox_cmd);
+ cxl_pci_mbox_timeout(cxlds, mbox_cmd);
return rc;
}
/* #6 */
- status_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET);
+ status_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET);
mbox_cmd->return_code =
FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg);
@@ -159,7 +159,7 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_mem *cxlm,
}
/* #7 */
- cmd_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
+ cmd_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg);
/* #8 */
@@ -171,7 +171,7 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_mem *cxlm,
* have requested less data than the hardware supplied even
* within spec.
*/
- size_t n = min3(mbox_cmd->size_out, cxlm->payload_size, out_len);
+ size_t n = min3(mbox_cmd->size_out, cxlds->payload_size, out_len);
memcpy_fromio(mbox_cmd->payload_out, payload, n);
mbox_cmd->size_out = n;
@@ -184,18 +184,18 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_mem *cxlm,
/**
* cxl_pci_mbox_get() - Acquire exclusive access to the mailbox.
- * @cxlm: The memory device to gain access to.
+ * @cxlds: The device state to gain access to.
*
* Context: Any context. Takes the mbox_mutex.
* Return: 0 if exclusive access was acquired.
*/
-static int cxl_pci_mbox_get(struct cxl_mem *cxlm)
+static int cxl_pci_mbox_get(struct cxl_dev_state *cxlds)
{
- struct device *dev = cxlm->dev;
+ struct device *dev = cxlds->dev;
u64 md_status;
int rc;
- mutex_lock_io(&cxlm->mbox_mutex);
+ mutex_lock_io(&cxlds->mbox_mutex);
/*
* XXX: There is some amount of ambiguity in the 2.0 version of the spec
@@ -214,13 +214,13 @@ static int cxl_pci_mbox_get(struct cxl_mem *cxlm)
* Mailbox Interface Ready bit. Therefore, waiting for the doorbell
* to be ready is sufficient.
*/
- rc = cxl_pci_mbox_wait_for_doorbell(cxlm);
+ rc = cxl_pci_mbox_wait_for_doorbell(cxlds);
if (rc) {
dev_warn(dev, "Mailbox interface not ready\n");
goto out;
}
- md_status = readq(cxlm->regs.memdev + CXLMDEV_STATUS_OFFSET);
+ md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
if (!(md_status & CXLMDEV_MBOX_IF_READY && CXLMDEV_READY(md_status))) {
dev_err(dev, "mbox: reported doorbell ready, but not mbox ready\n");
rc = -EBUSY;
@@ -249,41 +249,41 @@ static int cxl_pci_mbox_get(struct cxl_mem *cxlm)
return 0;
out:
- mutex_unlock(&cxlm->mbox_mutex);
+ mutex_unlock(&cxlds->mbox_mutex);
return rc;
}
/**
* cxl_pci_mbox_put() - Release exclusive access to the mailbox.
- * @cxlm: The CXL memory device to communicate with.
+ * @cxlds: The device state to communicate with.
*
* Context: Any context. Expects mbox_mutex to be held.
*/
-static void cxl_pci_mbox_put(struct cxl_mem *cxlm)
+static void cxl_pci_mbox_put(struct cxl_dev_state *cxlds)
{
- mutex_unlock(&cxlm->mbox_mutex);
+ mutex_unlock(&cxlds->mbox_mutex);
}
-static int cxl_pci_mbox_send(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
+static int cxl_pci_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
{
int rc;
- rc = cxl_pci_mbox_get(cxlm);
+ rc = cxl_pci_mbox_get(cxlds);
if (rc)
return rc;
- rc = __cxl_pci_mbox_send_cmd(cxlm, cmd);
- cxl_pci_mbox_put(cxlm);
+ rc = __cxl_pci_mbox_send_cmd(cxlds, cmd);
+ cxl_pci_mbox_put(cxlds);
return rc;
}
-static int cxl_pci_setup_mailbox(struct cxl_mem *cxlm)
+static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds)
{
- const int cap = readl(cxlm->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
+ const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
- cxlm->mbox_send = cxl_pci_mbox_send;
- cxlm->payload_size =
+ cxlds->mbox_send = cxl_pci_mbox_send;
+ cxlds->payload_size =
1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap);
/*
@@ -293,15 +293,15 @@ static int cxl_pci_setup_mailbox(struct cxl_mem *cxlm)
* there's no point in going forward. If the size is too large, there's
* no harm is soft limiting it.
*/
- cxlm->payload_size = min_t(size_t, cxlm->payload_size, SZ_1M);
- if (cxlm->payload_size < 256) {
- dev_err(cxlm->dev, "Mailbox is too small (%zub)",
- cxlm->payload_size);
+ cxlds->payload_size = min_t(size_t, cxlds->payload_size, SZ_1M);
+ if (cxlds->payload_size < 256) {
+ dev_err(cxlds->dev, "Mailbox is too small (%zub)",
+ cxlds->payload_size);
return -ENXIO;
}
- dev_dbg(cxlm->dev, "Mailbox payload sized %zu",
- cxlm->payload_size);
+ dev_dbg(cxlds->dev, "Mailbox payload sized %zu",
+ cxlds->payload_size);
return 0;
}
@@ -379,18 +379,18 @@ static int cxl_probe_regs(struct pci_dev *pdev, struct cxl_register_map *map)
return 0;
}
-static int cxl_map_regs(struct cxl_mem *cxlm, struct cxl_register_map *map)
+static int cxl_map_regs(struct cxl_dev_state *cxlds, struct cxl_register_map *map)
{
- struct device *dev = cxlm->dev;
+ struct device *dev = cxlds->dev;
struct pci_dev *pdev = to_pci_dev(dev);
switch (map->reg_type) {
case CXL_REGLOC_RBI_COMPONENT:
- cxl_map_component_regs(pdev, &cxlm->regs.component, map);
+ cxl_map_component_regs(pdev, &cxlds->regs.component, map);
dev_dbg(dev, "Mapping component registers...\n");
break;
case CXL_REGLOC_RBI_MEMDEV:
- cxl_map_device_regs(pdev, &cxlm->regs.device_regs, map);
+ cxl_map_device_regs(pdev, &cxlds->regs.device_regs, map);
dev_dbg(dev, "Probing device registers...\n");
break;
default:
@@ -475,7 +475,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct cxl_register_map map;
struct cxl_memdev *cxlmd;
- struct cxl_mem *cxlm;
+ struct cxl_dev_state *cxlds;
int rc;
/*
@@ -489,39 +489,39 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
- cxlm = cxl_mem_create(&pdev->dev);
- if (IS_ERR(cxlm))
- return PTR_ERR(cxlm);
+ cxlds = cxl_dev_state_create(&pdev->dev);
+ if (IS_ERR(cxlds))
+ return PTR_ERR(cxlds);
rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map);
if (rc)
return rc;
- rc = cxl_map_regs(cxlm, &map);
+ rc = cxl_map_regs(cxlds, &map);
if (rc)
return rc;
- rc = cxl_pci_setup_mailbox(cxlm);
+ rc = cxl_pci_setup_mailbox(cxlds);
if (rc)
return rc;
- rc = cxl_mem_enumerate_cmds(cxlm);
+ rc = cxl_enumerate_cmds(cxlds);
if (rc)
return rc;
- rc = cxl_mem_identify(cxlm);
+ rc = cxl_dev_state_identify(cxlds);
if (rc)
return rc;
- rc = cxl_mem_create_range_info(cxlm);
+ rc = cxl_mem_create_range_info(cxlds);
if (rc)
return rc;
- cxlmd = devm_cxl_add_memdev(cxlm);
+ cxlmd = devm_cxl_add_memdev(cxlds);
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
- if (range_len(&cxlm->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM))
+ if (range_len(&cxlds->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM))
rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd);
return rc;
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index ceb2115981e5..b65a272a2d6d 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -19,9 +19,9 @@ static struct workqueue_struct *cxl_pmem_wq;
static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
-static void clear_exclusive(void *cxlm)
+static void clear_exclusive(void *cxlds)
{
- clear_exclusive_cxl_commands(cxlm, exclusive_cmds);
+ clear_exclusive_cxl_commands(cxlds, exclusive_cmds);
}
static void unregister_nvdimm(void *nvdimm)
@@ -34,7 +34,7 @@ static int cxl_nvdimm_probe(struct device *dev)
struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
unsigned long flags = 0, cmd_mask = 0;
- struct cxl_mem *cxlm = cxlmd->cxlm;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_nvdimm_bridge *cxl_nvb;
struct nvdimm *nvdimm;
int rc;
@@ -49,8 +49,8 @@ static int cxl_nvdimm_probe(struct device *dev)
goto out;
}
- set_exclusive_cxl_commands(cxlm, exclusive_cmds);
- rc = devm_add_action_or_reset(dev, clear_exclusive, cxlm);
+ set_exclusive_cxl_commands(cxlds, exclusive_cmds);
+ rc = devm_add_action_or_reset(dev, clear_exclusive, cxlds);
if (rc)
goto out;
@@ -80,7 +80,7 @@ static struct cxl_driver cxl_nvdimm_driver = {
.id = CXL_DEVICE_NVDIMM,
};
-static int cxl_pmem_get_config_size(struct cxl_mem *cxlm,
+static int cxl_pmem_get_config_size(struct cxl_dev_state *cxlds,
struct nd_cmd_get_config_size *cmd,
unsigned int buf_len)
{
@@ -88,14 +88,14 @@ static int cxl_pmem_get_config_size(struct cxl_mem *cxlm,
return -EINVAL;
*cmd = (struct nd_cmd_get_config_size) {
- .config_size = cxlm->lsa_size,
- .max_xfer = cxlm->payload_size,
+ .config_size = cxlds->lsa_size,
+ .max_xfer = cxlds->payload_size,
};
return 0;
}
-static int cxl_pmem_get_config_data(struct cxl_mem *cxlm,
+static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds,
struct nd_cmd_get_config_data_hdr *cmd,
unsigned int buf_len)
{
@@ -112,15 +112,14 @@ static int cxl_pmem_get_config_data(struct cxl_mem *cxlm,
.length = cmd->in_length,
};
- rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LSA, &get_lsa,
- sizeof(get_lsa), cmd->out_buf,
- cmd->in_length);
+ rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LSA, &get_lsa,
+ sizeof(get_lsa), cmd->out_buf, cmd->in_length);
cmd->status = 0;
return rc;
}
-static int cxl_pmem_set_config_data(struct cxl_mem *cxlm,
+static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds,
struct nd_cmd_set_config_hdr *cmd,
unsigned int buf_len)
{
@@ -144,9 +143,9 @@ static int cxl_pmem_set_config_data(struct cxl_mem *cxlm,
};
memcpy(set_lsa->data, cmd->in_buf, cmd->in_length);
- rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_SET_LSA, set_lsa,
- struct_size(set_lsa, data, cmd->in_length),
- NULL, 0);
+ rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_SET_LSA, set_lsa,
+ struct_size(set_lsa, data, cmd->in_length),
+ NULL, 0);
/*
* Set "firmware" status (4-packed bytes at the end of the input
@@ -164,18 +163,18 @@ static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd,
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
- struct cxl_mem *cxlm = cxlmd->cxlm;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
if (!test_bit(cmd, &cmd_mask))
return -ENOTTY;
switch (cmd) {
case ND_CMD_GET_CONFIG_SIZE:
- return cxl_pmem_get_config_size(cxlm, buf, buf_len);
+ return cxl_pmem_get_config_size(cxlds, buf, buf_len);
case ND_CMD_GET_CONFIG_DATA:
- return cxl_pmem_get_config_data(cxlm, buf, buf_len);
+ return cxl_pmem_get_config_data(cxlds, buf, buf_len);
case ND_CMD_SET_CONFIG_DATA:
- return cxl_pmem_set_config_data(cxlm, buf, buf_len);
+ return cxl_pmem_set_config_data(cxlds, buf, buf_len);
default:
return -ENOTTY;
}
@@ -266,14 +265,24 @@ static void cxl_nvb_update_state(struct work_struct *work)
put_device(&cxl_nvb->dev);
}
+static void cxl_nvdimm_bridge_state_work(struct cxl_nvdimm_bridge *cxl_nvb)
+{
+ /*
+ * Take a reference that the workqueue will drop if new work
+ * gets queued.
+ */
+ get_device(&cxl_nvb->dev);
+ if (!queue_work(cxl_pmem_wq, &cxl_nvb->state_work))
+ put_device(&cxl_nvb->dev);
+}
+
static void cxl_nvdimm_bridge_remove(struct device *dev)
{
struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
if (cxl_nvb->state == CXL_NVB_ONLINE)
cxl_nvb->state = CXL_NVB_OFFLINE;
- if (queue_work(cxl_pmem_wq, &cxl_nvb->state_work))
- get_device(&cxl_nvb->dev);
+ cxl_nvdimm_bridge_state_work(cxl_nvb);
}
static int cxl_nvdimm_bridge_probe(struct device *dev)
@@ -294,8 +303,7 @@ static int cxl_nvdimm_bridge_probe(struct device *dev)
}
cxl_nvb->state = CXL_NVB_ONLINE;
- if (queue_work(cxl_pmem_wq, &cxl_nvb->state_work))
- get_device(&cxl_nvb->dev);
+ cxl_nvdimm_bridge_state_work(cxl_nvb);
return 0;
}
@@ -307,6 +315,31 @@ static struct cxl_driver cxl_nvdimm_bridge_driver = {
.id = CXL_DEVICE_NVDIMM_BRIDGE,
};
+/*
+ * Return all bridges to the CXL_NVB_NEW state to invalidate any
+ * ->state_work referring to the now destroyed cxl_pmem_wq.
+ */
+static int cxl_nvdimm_bridge_reset(struct device *dev, void *data)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb;
+
+ if (!is_cxl_nvdimm_bridge(dev))
+ return 0;
+
+ cxl_nvb = to_cxl_nvdimm_bridge(dev);
+ device_lock(dev);
+ cxl_nvb->state = CXL_NVB_NEW;
+ device_unlock(dev);
+
+ return 0;
+}
+
+static void destroy_cxl_pmem_wq(void)
+{
+ destroy_workqueue(cxl_pmem_wq);
+ bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_nvdimm_bridge_reset);
+}
+
static __init int cxl_pmem_init(void)
{
int rc;
@@ -332,7 +365,7 @@ static __init int cxl_pmem_init(void)
err_nvdimm:
cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
err_bridge:
- destroy_workqueue(cxl_pmem_wq);
+ destroy_cxl_pmem_wq();
return rc;
}
@@ -340,7 +373,7 @@ static __exit void cxl_pmem_exit(void)
{
cxl_driver_unregister(&cxl_nvdimm_driver);
cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
- destroy_workqueue(cxl_pmem_wq);
+ destroy_cxl_pmem_wq();
}
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index d2834c2cfa10..5fdf269a822e 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -1,8 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-config DAX_DRIVER
- select DAX
- bool
-
menuconfig DAX
tristate "DAX: direct access to differentiated memory"
select SRCU
@@ -70,13 +66,4 @@ config DEV_DAX_KMEM
Say N if unsure.
-config DEV_DAX_PMEM_COMPAT
- tristate "PMEM DAX: support the deprecated /sys/class/dax interface"
- depends on m && DEV_DAX_PMEM=m
- default DEV_DAX_PMEM
- help
- Older versions of the libdaxctl library expect to find all
- device-dax instances under /sys/class/dax. If libdaxctl in
- your distribution is older than v58 say M, otherwise say N.
-
endif
diff --git a/drivers/dax/Makefile b/drivers/dax/Makefile
index 9d4ba672d305..90a56ca3b345 100644
--- a/drivers/dax/Makefile
+++ b/drivers/dax/Makefile
@@ -2,10 +2,11 @@
obj-$(CONFIG_DAX) += dax.o
obj-$(CONFIG_DEV_DAX) += device_dax.o
obj-$(CONFIG_DEV_DAX_KMEM) += kmem.o
+obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o
dax-y := super.o
dax-y += bus.o
device_dax-y := device.o
+dax_pmem-y := pmem.o
-obj-y += pmem/
obj-y += hmem/
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 6cc4da4c713d..1dad813ee4a6 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -10,8 +10,6 @@
#include "dax-private.h"
#include "bus.h"
-static struct class *dax_class;
-
static DEFINE_MUTEX(dax_bus_lock);
#define DAX_NAME_LEN 30
@@ -129,11 +127,35 @@ ATTRIBUTE_GROUPS(dax_drv);
static int dax_bus_match(struct device *dev, struct device_driver *drv);
+/*
+ * Static dax regions are regions created by an external subsystem
+ * nvdimm where a single range is assigned. Its boundaries are by the external
+ * subsystem and are usually limited to one physical memory range. For example,
+ * for PMEM it is usually defined by NVDIMM Namespace boundaries (i.e. a
+ * single contiguous range)
+ *
+ * On dynamic dax regions, the assigned region can be partitioned by dax core
+ * into multiple subdivisions. A subdivision is represented into one
+ * /dev/daxN.M device composed by one or more potentially discontiguous ranges.
+ *
+ * When allocating a dax region, drivers must set whether it's static
+ * (IORESOURCE_DAX_STATIC). On static dax devices, the @pgmap is pre-assigned
+ * to dax core when calling devm_create_dev_dax(), whereas in dynamic dax
+ * devices it is NULL but afterwards allocated by dax core on device ->probe().
+ * Care is needed to make sure that dynamic dax devices are torn down with a
+ * cleared @pgmap field (see kill_dev_dax()).
+ */
static bool is_static(struct dax_region *dax_region)
{
return (dax_region->res.flags & IORESOURCE_DAX_STATIC) != 0;
}
+bool static_dev_dax(struct dev_dax *dev_dax)
+{
+ return is_static(dev_dax->region);
+}
+EXPORT_SYMBOL_GPL(static_dev_dax);
+
static u64 dev_dax_size(struct dev_dax *dev_dax)
{
u64 size = 0;
@@ -363,6 +385,14 @@ void kill_dev_dax(struct dev_dax *dev_dax)
kill_dax(dax_dev);
unmap_mapping_range(inode->i_mapping, 0, 0, 1);
+
+ /*
+ * Dynamic dax region have the pgmap allocated via dev_kzalloc()
+ * and thus freed by devm. Clear the pgmap to not have stale pgmap
+ * ranges on probe() from previous reconfigurations of region devices.
+ */
+ if (!static_dev_dax(dev_dax))
+ dev_dax->pgmap = NULL;
}
EXPORT_SYMBOL_GPL(kill_dev_dax);
@@ -1323,14 +1353,17 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
}
/*
- * No 'host' or dax_operations since there is no access to this
- * device outside of mmap of the resulting character device.
+ * No dax_operations since there is no access to this device outside of
+ * mmap of the resulting character device.
*/
- dax_dev = alloc_dax(dev_dax, NULL, NULL, DAXDEV_F_SYNC);
+ dax_dev = alloc_dax(dev_dax, NULL);
if (IS_ERR(dax_dev)) {
rc = PTR_ERR(dax_dev);
goto err_alloc_dax;
}
+ set_dax_synchronous(dax_dev);
+ set_dax_nocache(dax_dev);
+ set_dax_nomc(dax_dev);
/* a device_dax instance is dead while the driver is not attached */
kill_dax(dax_dev);
@@ -1343,10 +1376,7 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
inode = dax_inode(dax_dev);
dev->devt = inode->i_rdev;
- if (data->subsys == DEV_DAX_BUS)
- dev->bus = &dax_bus_type;
- else
- dev->class = dax_class;
+ dev->bus = &dax_bus_type;
dev->parent = parent;
dev->type = &dev_dax_type;
@@ -1445,22 +1475,10 @@ EXPORT_SYMBOL_GPL(dax_driver_unregister);
int __init dax_bus_init(void)
{
- int rc;
-
- if (IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT)) {
- dax_class = class_create(THIS_MODULE, "dax");
- if (IS_ERR(dax_class))
- return PTR_ERR(dax_class);
- }
-
- rc = bus_register(&dax_bus_type);
- if (rc)
- class_destroy(dax_class);
- return rc;
+ return bus_register(&dax_bus_type);
}
void __exit dax_bus_exit(void)
{
bus_unregister(&dax_bus_type);
- class_destroy(dax_class);
}
diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h
index 1e946ad7780a..fbb940293d6d 100644
--- a/drivers/dax/bus.h
+++ b/drivers/dax/bus.h
@@ -16,24 +16,15 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
struct range *range, int target_node, unsigned int align,
unsigned long flags);
-enum dev_dax_subsys {
- DEV_DAX_BUS = 0, /* zeroed dev_dax_data picks this by default */
- DEV_DAX_CLASS,
-};
-
struct dev_dax_data {
struct dax_region *dax_region;
struct dev_pagemap *pgmap;
- enum dev_dax_subsys subsys;
resource_size_t size;
int id;
};
struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data);
-/* to be deleted when DEV_DAX_CLASS is removed */
-struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys);
-
struct dax_device_driver {
struct device_driver drv;
struct list_head ids;
@@ -48,10 +39,7 @@ int __dax_driver_register(struct dax_device_driver *dax_drv,
__dax_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
void dax_driver_unregister(struct dax_device_driver *dax_drv);
void kill_dev_dax(struct dev_dax *dev_dax);
-
-#if IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT)
-int dev_dax_probe(struct dev_dax *dev_dax);
-#endif
+bool static_dev_dax(struct dev_dax *dev_dax);
/*
* While run_dax() is potentially a generic operation that could be
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index dd8222a42808..d33a0613ed0c 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -73,11 +73,39 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
return -1;
}
+static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn,
+ unsigned long fault_size)
+{
+ unsigned long i, nr_pages = fault_size / PAGE_SIZE;
+ struct file *filp = vmf->vma->vm_file;
+ struct dev_dax *dev_dax = filp->private_data;
+ pgoff_t pgoff;
+
+ /* mapping is only set on the head */
+ if (dev_dax->pgmap->vmemmap_shift)
+ nr_pages = 1;
+
+ pgoff = linear_page_index(vmf->vma,
+ ALIGN(vmf->address, fault_size));
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
+
+ page = compound_head(page);
+ if (page->mapping)
+ continue;
+
+ page->mapping = filp->f_mapping;
+ page->index = pgoff + i;
+ }
+}
+
static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
- struct vm_fault *vmf, pfn_t *pfn)
+ struct vm_fault *vmf)
{
struct device *dev = &dev_dax->dev;
phys_addr_t phys;
+ pfn_t pfn;
unsigned int fault_size = PAGE_SIZE;
if (check_vma(dev_dax, vmf->vma, __func__))
@@ -98,18 +126,21 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
+ pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
- return vmf_insert_mixed(vmf->vma, vmf->address, *pfn);
+ dax_set_mapping(vmf, pfn, fault_size);
+
+ return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
}
static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
- struct vm_fault *vmf, pfn_t *pfn)
+ struct vm_fault *vmf)
{
unsigned long pmd_addr = vmf->address & PMD_MASK;
struct device *dev = &dev_dax->dev;
phys_addr_t phys;
pgoff_t pgoff;
+ pfn_t pfn;
unsigned int fault_size = PMD_SIZE;
if (check_vma(dev_dax, vmf->vma, __func__))
@@ -138,19 +169,22 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
+ pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
- return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
+ dax_set_mapping(vmf, pfn, fault_size);
+
+ return vmf_insert_pfn_pmd(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE);
}
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
- struct vm_fault *vmf, pfn_t *pfn)
+ struct vm_fault *vmf)
{
unsigned long pud_addr = vmf->address & PUD_MASK;
struct device *dev = &dev_dax->dev;
phys_addr_t phys;
pgoff_t pgoff;
+ pfn_t pfn;
unsigned int fault_size = PUD_SIZE;
@@ -180,13 +214,15 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
+ pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
- return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
+ dax_set_mapping(vmf, pfn, fault_size);
+
+ return vmf_insert_pfn_pud(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE);
}
#else
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
- struct vm_fault *vmf, pfn_t *pfn)
+ struct vm_fault *vmf)
{
return VM_FAULT_FALLBACK;
}
@@ -196,10 +232,8 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
enum page_entry_size pe_size)
{
struct file *filp = vmf->vma->vm_file;
- unsigned long fault_size;
vm_fault_t rc = VM_FAULT_SIGBUS;
int id;
- pfn_t pfn;
struct dev_dax *dev_dax = filp->private_data;
dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm,
@@ -209,43 +243,18 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
id = dax_read_lock();
switch (pe_size) {
case PE_SIZE_PTE:
- fault_size = PAGE_SIZE;
- rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn);
+ rc = __dev_dax_pte_fault(dev_dax, vmf);
break;
case PE_SIZE_PMD:
- fault_size = PMD_SIZE;
- rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn);
+ rc = __dev_dax_pmd_fault(dev_dax, vmf);
break;
case PE_SIZE_PUD:
- fault_size = PUD_SIZE;
- rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn);
+ rc = __dev_dax_pud_fault(dev_dax, vmf);
break;
default:
rc = VM_FAULT_SIGBUS;
}
- if (rc == VM_FAULT_NOPAGE) {
- unsigned long i;
- pgoff_t pgoff;
-
- /*
- * In the device-dax case the only possibility for a
- * VM_FAULT_NOPAGE result is when device-dax capacity is
- * mapped. No need to consider the zero page, or racing
- * conflicting mappings.
- */
- pgoff = linear_page_index(vmf->vma, vmf->address
- & ~(fault_size - 1));
- for (i = 0; i < fault_size / PAGE_SIZE; i++) {
- struct page *page;
-
- page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
- if (page->mapping)
- continue;
- page->mapping = filp->f_mapping;
- page->index = pgoff + i;
- }
- }
dax_read_unlock(id);
return rc;
@@ -398,17 +407,34 @@ int dev_dax_probe(struct dev_dax *dev_dax)
void *addr;
int rc, i;
- pgmap = dev_dax->pgmap;
- if (dev_WARN_ONCE(dev, pgmap && dev_dax->nr_range > 1,
- "static pgmap / multi-range device conflict\n"))
- return -EINVAL;
+ if (static_dev_dax(dev_dax)) {
+ if (dev_dax->nr_range > 1) {
+ dev_warn(dev,
+ "static pgmap / multi-range device conflict\n");
+ return -EINVAL;
+ }
- if (!pgmap) {
- pgmap = devm_kzalloc(dev, sizeof(*pgmap) + sizeof(struct range)
- * (dev_dax->nr_range - 1), GFP_KERNEL);
+ pgmap = dev_dax->pgmap;
+ } else {
+ if (dev_dax->pgmap) {
+ dev_warn(dev,
+ "dynamic-dax with pre-populated page map\n");
+ return -EINVAL;
+ }
+
+ pgmap = devm_kzalloc(dev,
+ struct_size(pgmap, ranges, dev_dax->nr_range - 1),
+ GFP_KERNEL);
if (!pgmap)
return -ENOMEM;
+
pgmap->nr_range = dev_dax->nr_range;
+ dev_dax->pgmap = pgmap;
+
+ for (i = 0; i < dev_dax->nr_range; i++) {
+ struct range *range = &dev_dax->ranges[i].range;
+ pgmap->ranges[i] = *range;
+ }
}
for (i = 0; i < dev_dax->nr_range; i++) {
@@ -420,12 +446,12 @@ int dev_dax_probe(struct dev_dax *dev_dax)
i, range->start, range->end);
return -EBUSY;
}
- /* don't update the range for static pgmap */
- if (!dev_dax->pgmap)
- pgmap->ranges[i] = *range;
}
pgmap->type = MEMORY_DEVICE_GENERIC;
+ if (dev_dax->align > PAGE_SIZE)
+ pgmap->vmemmap_shift =
+ order_base_2(dev_dax->align >> PAGE_SHIFT);
addr = devm_memremap_pages(dev, pgmap);
if (IS_ERR(addr))
return PTR_ERR(addr);
@@ -433,11 +459,7 @@ int dev_dax_probe(struct dev_dax *dev_dax)
inode = dax_inode(dax_dev);
cdev = inode->i_cdev;
cdev_init(cdev, &dax_fops);
- if (dev->class) {
- /* for the CONFIG_DEV_DAX_PMEM_COMPAT case */
- cdev->owner = dev->parent->driver->owner;
- } else
- cdev->owner = dev->driver->owner;
+ cdev->owner = dev->driver->owner;
cdev_set_parent(cdev, &dev->kobj);
rc = cdev_add(cdev, dev->devt, 1);
if (rc)
diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem.c
index 062e8bc14223..f050ea78bb83 100644
--- a/drivers/dax/pmem/core.c
+++ b/drivers/dax/pmem.c
@@ -3,11 +3,11 @@
#include <linux/memremap.h>
#include <linux/module.h>
#include <linux/pfn_t.h>
-#include "../../nvdimm/pfn.h"
-#include "../../nvdimm/nd.h"
-#include "../bus.h"
+#include "../nvdimm/pfn.h"
+#include "../nvdimm/nd.h"
+#include "bus.h"
-struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
+static struct dev_dax *__dax_pmem_probe(struct device *dev)
{
struct range range;
int rc, id, region_id;
@@ -63,7 +63,6 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
.dax_region = dax_region,
.id = id,
.pgmap = &pgmap,
- .subsys = subsys,
.size = range_len(&range),
};
dev_dax = devm_create_dev_dax(&data);
@@ -73,7 +72,32 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
return dev_dax;
}
-EXPORT_SYMBOL_GPL(__dax_pmem_probe);
+
+static int dax_pmem_probe(struct device *dev)
+{
+ return PTR_ERR_OR_ZERO(__dax_pmem_probe(dev));
+}
+
+static struct nd_device_driver dax_pmem_driver = {
+ .probe = dax_pmem_probe,
+ .drv = {
+ .name = "dax_pmem",
+ },
+ .type = ND_DRIVER_DAX_PMEM,
+};
+
+static int __init dax_pmem_init(void)
+{
+ return nd_driver_register(&dax_pmem_driver);
+}
+module_init(dax_pmem_init);
+
+static void __exit dax_pmem_exit(void)
+{
+ driver_unregister(&dax_pmem_driver.drv);
+}
+module_exit(dax_pmem_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
+MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM);
diff --git a/drivers/dax/pmem/Makefile b/drivers/dax/pmem/Makefile
index 010269f61d41..191c31f0d4f0 100644
--- a/drivers/dax/pmem/Makefile
+++ b/drivers/dax/pmem/Makefile
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o
obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem_core.o
-obj-$(CONFIG_DEV_DAX_PMEM_COMPAT) += dax_pmem_compat.o
dax_pmem-y := pmem.o
dax_pmem_core-y := core.o
diff --git a/drivers/dax/pmem/compat.c b/drivers/dax/pmem/compat.c
deleted file mode 100644
index d81dc35fd65d..000000000000
--- a/drivers/dax/pmem/compat.c
+++ /dev/null
@@ -1,72 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */
-#include <linux/percpu-refcount.h>
-#include <linux/memremap.h>
-#include <linux/module.h>
-#include <linux/pfn_t.h>
-#include <linux/nd.h>
-#include "../bus.h"
-
-/* we need the private definitions to implement compat suport */
-#include "../dax-private.h"
-
-static int dax_pmem_compat_probe(struct device *dev)
-{
- struct dev_dax *dev_dax = __dax_pmem_probe(dev, DEV_DAX_CLASS);
- int rc;
-
- if (IS_ERR(dev_dax))
- return PTR_ERR(dev_dax);
-
- if (!devres_open_group(&dev_dax->dev, dev_dax, GFP_KERNEL))
- return -ENOMEM;
-
- device_lock(&dev_dax->dev);
- rc = dev_dax_probe(dev_dax);
- device_unlock(&dev_dax->dev);
-
- devres_close_group(&dev_dax->dev, dev_dax);
- if (rc)
- devres_release_group(&dev_dax->dev, dev_dax);
-
- return rc;
-}
-
-static int dax_pmem_compat_release(struct device *dev, void *data)
-{
- device_lock(dev);
- devres_release_group(dev, to_dev_dax(dev));
- device_unlock(dev);
-
- return 0;
-}
-
-static void dax_pmem_compat_remove(struct device *dev)
-{
- device_for_each_child(dev, NULL, dax_pmem_compat_release);
-}
-
-static struct nd_device_driver dax_pmem_compat_driver = {
- .probe = dax_pmem_compat_probe,
- .remove = dax_pmem_compat_remove,
- .drv = {
- .name = "dax_pmem_compat",
- },
- .type = ND_DRIVER_DAX_PMEM,
-};
-
-static int __init dax_pmem_compat_init(void)
-{
- return nd_driver_register(&dax_pmem_compat_driver);
-}
-module_init(dax_pmem_compat_init);
-
-static void __exit dax_pmem_compat_exit(void)
-{
- driver_unregister(&dax_pmem_compat_driver.drv);
-}
-module_exit(dax_pmem_compat_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Intel Corporation");
-MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM);
diff --git a/drivers/dax/pmem/pmem.c b/drivers/dax/pmem/pmem.c
index 0ae4238a0ef8..dfe91a2990fe 100644
--- a/drivers/dax/pmem/pmem.c
+++ b/drivers/dax/pmem/pmem.c
@@ -7,34 +7,4 @@
#include <linux/nd.h>
#include "../bus.h"
-static int dax_pmem_probe(struct device *dev)
-{
- return PTR_ERR_OR_ZERO(__dax_pmem_probe(dev, DEV_DAX_BUS));
-}
-static struct nd_device_driver dax_pmem_driver = {
- .probe = dax_pmem_probe,
- .drv = {
- .name = "dax_pmem",
- },
- .type = ND_DRIVER_DAX_PMEM,
-};
-
-static int __init dax_pmem_init(void)
-{
- return nd_driver_register(&dax_pmem_driver);
-}
-module_init(dax_pmem_init);
-
-static void __exit dax_pmem_exit(void)
-{
- driver_unregister(&dax_pmem_driver.drv);
-}
-module_exit(dax_pmem_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Intel Corporation");
-#if !IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT)
-/* For compat builds, don't load this module by default */
-MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM);
-#endif
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index b882cf8106ea..e3029389d809 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -7,10 +7,8 @@
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
#include <linux/magic.h>
-#include <linux/genhd.h>
#include <linux/pfn_t.h>
#include <linux/cdev.h>
-#include <linux/hash.h>
#include <linux/slab.h>
#include <linux/uio.h>
#include <linux/dax.h>
@@ -21,15 +19,12 @@
* struct dax_device - anchor object for dax services
* @inode: core vfs
* @cdev: optional character interface for "device dax"
- * @host: optional name for lookups where the device path is not available
* @private: dax driver private data
* @flags: state and boolean properties
*/
struct dax_device {
- struct hlist_node list;
struct inode inode;
struct cdev cdev;
- const char *host;
void *private;
unsigned long flags;
const struct dax_operations *ops;
@@ -42,10 +37,6 @@ static DEFINE_IDA(dax_minor_ida);
static struct kmem_cache *dax_cache __read_mostly;
static struct super_block *dax_superblock __read_mostly;
-#define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head))
-static struct hlist_head dax_host_list[DAX_HASH_SIZE];
-static DEFINE_SPINLOCK(dax_host_lock);
-
int dax_read_lock(void)
{
return srcu_read_lock(&dax_srcu);
@@ -58,169 +49,54 @@ void dax_read_unlock(int id)
}
EXPORT_SYMBOL_GPL(dax_read_unlock);
-static int dax_host_hash(const char *host)
-{
- return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
-}
-
-#ifdef CONFIG_BLOCK
+#if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX)
#include <linux/blkdev.h>
-int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
- pgoff_t *pgoff)
+static DEFINE_XARRAY(dax_hosts);
+
+int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk)
{
- sector_t start_sect = bdev ? get_start_sect(bdev) : 0;
- phys_addr_t phys_off = (start_sect + sector) * 512;
+ return xa_insert(&dax_hosts, (unsigned long)disk, dax_dev, GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(dax_add_host);
- if (pgoff)
- *pgoff = PHYS_PFN(phys_off);
- if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
- return -EINVAL;
- return 0;
+void dax_remove_host(struct gendisk *disk)
+{
+ xa_erase(&dax_hosts, (unsigned long)disk);
}
-EXPORT_SYMBOL(bdev_dax_pgoff);
+EXPORT_SYMBOL_GPL(dax_remove_host);
-#if IS_ENABLED(CONFIG_FS_DAX)
/**
- * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
- * @host: alternate name for the device registered by a dax driver
+ * fs_dax_get_by_bdev() - temporary lookup mechanism for filesystem-dax
+ * @bdev: block device to find a dax_device for
+ * @start_off: returns the byte offset into the dax_device that @bdev starts
*/
-static struct dax_device *dax_get_by_host(const char *host)
+struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off)
{
- struct dax_device *dax_dev, *found = NULL;
- int hash, id;
-
- if (!host)
- return NULL;
-
- hash = dax_host_hash(host);
-
- id = dax_read_lock();
- spin_lock(&dax_host_lock);
- hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
- if (!dax_alive(dax_dev)
- || strcmp(host, dax_dev->host) != 0)
- continue;
-
- if (igrab(&dax_dev->inode))
- found = dax_dev;
- break;
- }
- spin_unlock(&dax_host_lock);
- dax_read_unlock(id);
-
- return found;
-}
+ struct dax_device *dax_dev;
+ u64 part_size;
+ int id;
-struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
-{
if (!blk_queue_dax(bdev->bd_disk->queue))
return NULL;
- return dax_get_by_host(bdev->bd_disk->disk_name);
-}
-EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
-
-bool generic_fsdax_supported(struct dax_device *dax_dev,
- struct block_device *bdev, int blocksize, sector_t start,
- sector_t sectors)
-{
- bool dax_enabled = false;
- pgoff_t pgoff, pgoff_end;
- void *kaddr, *end_kaddr;
- pfn_t pfn, end_pfn;
- sector_t last_page;
- long len, len2;
- int err, id;
-
- if (blocksize != PAGE_SIZE) {
- pr_info("%pg: error: unsupported blocksize for dax\n", bdev);
- return false;
- }
-
- if (!dax_dev) {
- pr_debug("%pg: error: dax unsupported by block device\n", bdev);
- return false;
- }
- err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
- if (err) {
+ *start_off = get_start_sect(bdev) * SECTOR_SIZE;
+ part_size = bdev_nr_sectors(bdev) * SECTOR_SIZE;
+ if (*start_off % PAGE_SIZE || part_size % PAGE_SIZE) {
pr_info("%pg: error: unaligned partition for dax\n", bdev);
- return false;
- }
-
- last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512;
- err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
- if (err) {
- pr_info("%pg: error: unaligned partition for dax\n", bdev);
- return false;
+ return NULL;
}
id = dax_read_lock();
- len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
- len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
-
- if (len < 1 || len2 < 1) {
- pr_info("%pg: error: dax access failed (%ld)\n",
- bdev, len < 1 ? len : len2);
- dax_read_unlock(id);
- return false;
- }
-
- if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) {
- /*
- * An arch that has enabled the pmem api should also
- * have its drivers support pfn_t_devmap()
- *
- * This is a developer warning and should not trigger in
- * production. dax_flush() will crash since it depends
- * on being able to do (page_address(pfn_to_page())).
- */
- WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
- dax_enabled = true;
- } else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) {
- struct dev_pagemap *pgmap, *end_pgmap;
-
- pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
- end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL);
- if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX
- && pfn_t_to_page(pfn)->pgmap == pgmap
- && pfn_t_to_page(end_pfn)->pgmap == pgmap
- && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr))
- && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr)))
- dax_enabled = true;
- put_dev_pagemap(pgmap);
- put_dev_pagemap(end_pgmap);
-
- }
+ dax_dev = xa_load(&dax_hosts, (unsigned long)bdev->bd_disk);
+ if (!dax_dev || !dax_alive(dax_dev) || !igrab(&dax_dev->inode))
+ dax_dev = NULL;
dax_read_unlock(id);
- if (!dax_enabled) {
- pr_info("%pg: error: dax support not enabled\n", bdev);
- return false;
- }
- return true;
-}
-EXPORT_SYMBOL_GPL(generic_fsdax_supported);
-
-bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
- int blocksize, sector_t start, sector_t len)
-{
- bool ret = false;
- int id;
-
- if (!dax_dev)
- return false;
-
- id = dax_read_lock();
- if (dax_alive(dax_dev) && dax_dev->ops->dax_supported)
- ret = dax_dev->ops->dax_supported(dax_dev, bdev, blocksize,
- start, len);
- dax_read_unlock(id);
- return ret;
+ return dax_dev;
}
-EXPORT_SYMBOL_GPL(dax_supported);
-#endif /* CONFIG_FS_DAX */
-#endif /* CONFIG_BLOCK */
+EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
+#endif /* CONFIG_BLOCK && CONFIG_FS_DAX */
enum dax_device_flags {
/* !alive + rcu grace period == no new operations / mappings */
@@ -229,6 +105,10 @@ enum dax_device_flags {
DAXDEV_WRITE_CACHE,
/* flag to check if device supports synchronous flush */
DAXDEV_SYNC,
+ /* do not leave the caches dirty after writes */
+ DAXDEV_NOCACHE,
+ /* handle CPU fetch exceptions during reads */
+ DAXDEV_NOMC,
};
/**
@@ -270,9 +150,15 @@ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
if (!dax_alive(dax_dev))
return 0;
- return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i);
+ /*
+ * The userspace address for the memory copy has already been validated
+ * via access_ok() in vfs_write, so use the 'no check' version to bypass
+ * the HARDENED_USERCOPY overhead.
+ */
+ if (test_bit(DAXDEV_NOCACHE, &dax_dev->flags))
+ return _copy_from_iter_flushcache(addr, bytes, i);
+ return _copy_from_iter(addr, bytes, i);
}
-EXPORT_SYMBOL_GPL(dax_copy_from_iter);
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i)
@@ -280,9 +166,15 @@ size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
if (!dax_alive(dax_dev))
return 0;
- return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i);
+ /*
+ * The userspace address for the memory copy has already been validated
+ * via access_ok() in vfs_red, so use the 'no check' version to bypass
+ * the HARDENED_USERCOPY overhead.
+ */
+ if (test_bit(DAXDEV_NOMC, &dax_dev->flags))
+ return _copy_mc_to_iter(addr, bytes, i);
+ return _copy_to_iter(addr, bytes, i);
}
-EXPORT_SYMBOL_GPL(dax_copy_to_iter);
int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
size_t nr_pages)
@@ -332,17 +224,29 @@ bool dax_write_cache_enabled(struct dax_device *dax_dev)
}
EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
-bool __dax_synchronous(struct dax_device *dax_dev)
+bool dax_synchronous(struct dax_device *dax_dev)
{
return test_bit(DAXDEV_SYNC, &dax_dev->flags);
}
-EXPORT_SYMBOL_GPL(__dax_synchronous);
+EXPORT_SYMBOL_GPL(dax_synchronous);
-void __set_dax_synchronous(struct dax_device *dax_dev)
+void set_dax_synchronous(struct dax_device *dax_dev)
{
set_bit(DAXDEV_SYNC, &dax_dev->flags);
}
-EXPORT_SYMBOL_GPL(__set_dax_synchronous);
+EXPORT_SYMBOL_GPL(set_dax_synchronous);
+
+void set_dax_nocache(struct dax_device *dax_dev)
+{
+ set_bit(DAXDEV_NOCACHE, &dax_dev->flags);
+}
+EXPORT_SYMBOL_GPL(set_dax_nocache);
+
+void set_dax_nomc(struct dax_device *dax_dev)
+{
+ set_bit(DAXDEV_NOMC, &dax_dev->flags);
+}
+EXPORT_SYMBOL_GPL(set_dax_nomc);
bool dax_alive(struct dax_device *dax_dev)
{
@@ -363,12 +267,7 @@ void kill_dax(struct dax_device *dax_dev)
return;
clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
-
synchronize_srcu(&dax_srcu);
-
- spin_lock(&dax_host_lock);
- hlist_del_init(&dax_dev->list);
- spin_unlock(&dax_host_lock);
}
EXPORT_SYMBOL_GPL(kill_dax);
@@ -400,8 +299,6 @@ static struct dax_device *to_dax_dev(struct inode *inode)
static void dax_free_inode(struct inode *inode)
{
struct dax_device *dax_dev = to_dax_dev(inode);
- kfree(dax_dev->host);
- dax_dev->host = NULL;
if (inode->i_rdev)
ida_simple_remove(&dax_minor_ida, iminor(inode));
kmem_cache_free(dax_cache, dax_dev);
@@ -476,65 +373,30 @@ static struct dax_device *dax_dev_get(dev_t devt)
return dax_dev;
}
-static void dax_add_host(struct dax_device *dax_dev, const char *host)
-{
- int hash;
-
- /*
- * Unconditionally init dax_dev since it's coming from a
- * non-zeroed slab cache
- */
- INIT_HLIST_NODE(&dax_dev->list);
- dax_dev->host = host;
- if (!host)
- return;
-
- hash = dax_host_hash(host);
- spin_lock(&dax_host_lock);
- hlist_add_head(&dax_dev->list, &dax_host_list[hash]);
- spin_unlock(&dax_host_lock);
-}
-
-struct dax_device *alloc_dax(void *private, const char *__host,
- const struct dax_operations *ops, unsigned long flags)
+struct dax_device *alloc_dax(void *private, const struct dax_operations *ops)
{
struct dax_device *dax_dev;
- const char *host;
dev_t devt;
int minor;
- if (ops && !ops->zero_page_range) {
- pr_debug("%s: error: device does not provide dax"
- " operation zero_page_range()\n",
- __host ? __host : "Unknown");
+ if (WARN_ON_ONCE(ops && !ops->zero_page_range))
return ERR_PTR(-EINVAL);
- }
-
- host = kstrdup(__host, GFP_KERNEL);
- if (__host && !host)
- return ERR_PTR(-ENOMEM);
minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
if (minor < 0)
- goto err_minor;
+ return ERR_PTR(-ENOMEM);
devt = MKDEV(MAJOR(dax_devt), minor);
dax_dev = dax_dev_get(devt);
if (!dax_dev)
goto err_dev;
- dax_add_host(dax_dev, host);
dax_dev->ops = ops;
dax_dev->private = private;
- if (flags & DAXDEV_F_SYNC)
- set_dax_synchronous(dax_dev);
-
return dax_dev;
err_dev:
ida_simple_remove(&dax_minor_ida, minor);
- err_minor:
- kfree(host);
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(alloc_dax);
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index e87d01c0b76a..87eb2b837e68 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -132,6 +132,15 @@ config ARM_RK3399_DMC_DEVFREQ
It sets the frequency for the memory controller and reads the usage counts
from hardware.
+config ARM_SUN8I_A33_MBUS_DEVFREQ
+ tristate "sun8i/sun50i MBUS DEVFREQ Driver"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on COMMON_CLK
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ help
+ This adds the DEVFREQ driver for the MBUS controller in some
+ Allwinner sun8i (A33 through H3) and sun50i (A64 and H5) SoCs.
+
source "drivers/devfreq/event/Kconfig"
endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index a16333ea7034..0b6be92a25d9 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o
obj-$(CONFIG_ARM_IMX_BUS_DEVFREQ) += imx-bus.o
obj-$(CONFIG_ARM_IMX8M_DDRC_DEVFREQ) += imx8m-ddrc.o
obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ) += rk3399_dmc.o
+obj-$(CONFIG_ARM_SUN8I_A33_MBUS_DEVFREQ) += sun8i-a33-mbus.o
obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra30-devfreq.o
# DEVFREQ Event Drivers
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 7906220d025c..a525a609dfc6 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -382,8 +382,8 @@ static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq,
devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
if (devfreq_update_status(devfreq, new_freq))
- dev_err(&devfreq->dev,
- "Couldn't update frequency transition information.\n");
+ dev_warn(&devfreq->dev,
+ "Couldn't update frequency transition information.\n");
devfreq->previous_freq = new_freq;
diff --git a/drivers/devfreq/sun8i-a33-mbus.c b/drivers/devfreq/sun8i-a33-mbus.c
new file mode 100644
index 000000000000..13d32213139f
--- /dev/null
+++ b/drivers/devfreq/sun8i-a33-mbus.c
@@ -0,0 +1,511 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (C) 2020-2021 Samuel Holland <samuel@sholland.org>
+//
+
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#define MBUS_CR 0x0000
+#define MBUS_CR_GET_DRAM_TYPE(x) (((x) >> 16) & 0x7)
+#define MBUS_CR_DRAM_TYPE_DDR2 2
+#define MBUS_CR_DRAM_TYPE_DDR3 3
+#define MBUS_CR_DRAM_TYPE_DDR4 4
+#define MBUS_CR_DRAM_TYPE_LPDDR2 6
+#define MBUS_CR_DRAM_TYPE_LPDDR3 7
+
+#define MBUS_TMR 0x000c
+#define MBUS_TMR_PERIOD(x) ((x) - 1)
+
+#define MBUS_PMU_CFG 0x009c
+#define MBUS_PMU_CFG_PERIOD(x) (((x) - 1) << 16)
+#define MBUS_PMU_CFG_UNIT (0x3 << 1)
+#define MBUS_PMU_CFG_UNIT_B (0x0 << 1)
+#define MBUS_PMU_CFG_UNIT_KB (0x1 << 1)
+#define MBUS_PMU_CFG_UNIT_MB (0x2 << 1)
+#define MBUS_PMU_CFG_ENABLE (0x1 << 0)
+
+#define MBUS_PMU_BWCR(n) (0x00a0 + (0x04 * (n)))
+
+#define MBUS_TOTAL_BWCR MBUS_PMU_BWCR(5)
+#define MBUS_TOTAL_BWCR_H616 MBUS_PMU_BWCR(13)
+
+#define MBUS_MDFSCR 0x0100
+#define MBUS_MDFSCR_BUFFER_TIMING (0x1 << 15)
+#define MBUS_MDFSCR_PAD_HOLD (0x1 << 13)
+#define MBUS_MDFSCR_BYPASS (0x1 << 4)
+#define MBUS_MDFSCR_MODE (0x1 << 1)
+#define MBUS_MDFSCR_MODE_DFS (0x0 << 1)
+#define MBUS_MDFSCR_MODE_CFS (0x1 << 1)
+#define MBUS_MDFSCR_START (0x1 << 0)
+
+#define MBUS_MDFSMRMR 0x0108
+
+#define DRAM_PWRCTL 0x0004
+#define DRAM_PWRCTL_SELFREF_EN (0x1 << 0)
+
+#define DRAM_RFSHTMG 0x0090
+#define DRAM_RFSHTMG_TREFI(x) ((x) << 16)
+#define DRAM_RFSHTMG_TRFC(x) ((x) << 0)
+
+#define DRAM_VTFCR 0x00b8
+#define DRAM_VTFCR_VTF_ENABLE (0x3 << 8)
+
+#define DRAM_ODTMAP 0x0120
+
+#define DRAM_DX_MAX 4
+
+#define DRAM_DXnGCR0(n) (0x0344 + 0x80 * (n))
+#define DRAM_DXnGCR0_DXODT (0x3 << 4)
+#define DRAM_DXnGCR0_DXODT_DYNAMIC (0x0 << 4)
+#define DRAM_DXnGCR0_DXODT_ENABLED (0x1 << 4)
+#define DRAM_DXnGCR0_DXODT_DISABLED (0x2 << 4)
+#define DRAM_DXnGCR0_DXEN (0x1 << 0)
+
+struct sun8i_a33_mbus_variant {
+ u32 min_dram_divider;
+ u32 max_dram_divider;
+ u32 odt_freq_mhz;
+};
+
+struct sun8i_a33_mbus {
+ const struct sun8i_a33_mbus_variant *variant;
+ void __iomem *reg_dram;
+ void __iomem *reg_mbus;
+ struct clk *clk_bus;
+ struct clk *clk_dram;
+ struct clk *clk_mbus;
+ struct devfreq *devfreq_dram;
+ struct devfreq_simple_ondemand_data gov_data;
+ struct devfreq_dev_profile profile;
+ u32 data_width;
+ u32 nominal_bw;
+ u32 odtmap;
+ u32 tREFI_ns;
+ u32 tRFC_ns;
+ unsigned long freq_table[];
+};
+
+/*
+ * The unit for this value is (MBUS clock cycles / MBUS_TMR_PERIOD). When
+ * MBUS_TMR_PERIOD is programmed to match the MBUS clock frequency in MHz, as
+ * it is during DRAM init and during probe, the resulting unit is microseconds.
+ */
+static int pmu_period = 50000;
+module_param(pmu_period, int, 0644);
+MODULE_PARM_DESC(pmu_period, "Bandwidth measurement period (microseconds)");
+
+static u32 sun8i_a33_mbus_get_peak_bw(struct sun8i_a33_mbus *priv)
+{
+ /* Returns the peak transfer (in KiB) during any single PMU period. */
+ return readl_relaxed(priv->reg_mbus + MBUS_TOTAL_BWCR);
+}
+
+static void sun8i_a33_mbus_restart_pmu_counters(struct sun8i_a33_mbus *priv)
+{
+ u32 pmu_cfg = MBUS_PMU_CFG_PERIOD(pmu_period) | MBUS_PMU_CFG_UNIT_KB;
+
+ /* All PMU counters are cleared on a disable->enable transition. */
+ writel_relaxed(pmu_cfg,
+ priv->reg_mbus + MBUS_PMU_CFG);
+ writel_relaxed(pmu_cfg | MBUS_PMU_CFG_ENABLE,
+ priv->reg_mbus + MBUS_PMU_CFG);
+
+}
+
+static void sun8i_a33_mbus_update_nominal_bw(struct sun8i_a33_mbus *priv,
+ u32 ddr_freq_mhz)
+{
+ /*
+ * Nominal bandwidth (KiB per PMU period):
+ *
+ * DDR transfers microseconds KiB
+ * ------------- * ------------ * --------
+ * microsecond PMU period transfer
+ */
+ priv->nominal_bw = ddr_freq_mhz * pmu_period * priv->data_width / 1024;
+}
+
+static int sun8i_a33_mbus_set_dram_freq(struct sun8i_a33_mbus *priv,
+ unsigned long freq)
+{
+ u32 ddr_freq_mhz = freq / USEC_PER_SEC; /* DDR */
+ u32 dram_freq_mhz = ddr_freq_mhz / 2; /* SDR */
+ u32 mctl_freq_mhz = dram_freq_mhz / 2; /* HDR */
+ u32 dxodt, mdfscr, pwrctl, vtfcr;
+ u32 i, tREFI_32ck, tRFC_ck;
+ int ret;
+
+ /* The rate change is not effective until the MDFS process runs. */
+ ret = clk_set_rate(priv->clk_dram, freq);
+ if (ret)
+ return ret;
+
+ /* Disable automatic self-refesh and VTF before starting MDFS. */
+ pwrctl = readl_relaxed(priv->reg_dram + DRAM_PWRCTL) &
+ ~DRAM_PWRCTL_SELFREF_EN;
+ writel_relaxed(pwrctl, priv->reg_dram + DRAM_PWRCTL);
+ vtfcr = readl_relaxed(priv->reg_dram + DRAM_VTFCR);
+ writel_relaxed(vtfcr & ~DRAM_VTFCR_VTF_ENABLE,
+ priv->reg_dram + DRAM_VTFCR);
+
+ /* Set up MDFS and enable double buffering for timing registers. */
+ mdfscr = MBUS_MDFSCR_MODE_DFS |
+ MBUS_MDFSCR_BYPASS |
+ MBUS_MDFSCR_PAD_HOLD |
+ MBUS_MDFSCR_BUFFER_TIMING;
+ writel(mdfscr, priv->reg_mbus + MBUS_MDFSCR);
+
+ /* Update the buffered copy of RFSHTMG. */
+ tREFI_32ck = priv->tREFI_ns * mctl_freq_mhz / 1000 / 32;
+ tRFC_ck = DIV_ROUND_UP(priv->tRFC_ns * mctl_freq_mhz, 1000);
+ writel(DRAM_RFSHTMG_TREFI(tREFI_32ck) | DRAM_RFSHTMG_TRFC(tRFC_ck),
+ priv->reg_dram + DRAM_RFSHTMG);
+
+ /* Enable ODT if needed, or disable it to save power. */
+ if (priv->odtmap && dram_freq_mhz > priv->variant->odt_freq_mhz) {
+ dxodt = DRAM_DXnGCR0_DXODT_DYNAMIC;
+ writel(priv->odtmap, priv->reg_dram + DRAM_ODTMAP);
+ } else {
+ dxodt = DRAM_DXnGCR0_DXODT_DISABLED;
+ writel(0, priv->reg_dram + DRAM_ODTMAP);
+ }
+ for (i = 0; i < DRAM_DX_MAX; ++i) {
+ void __iomem *reg = priv->reg_dram + DRAM_DXnGCR0(i);
+
+ writel((readl(reg) & ~DRAM_DXnGCR0_DXODT) | dxodt, reg);
+ }
+
+ dev_dbg(priv->devfreq_dram->dev.parent,
+ "Setting DRAM to %u MHz, tREFI=%u, tRFC=%u, ODT=%s\n",
+ dram_freq_mhz, tREFI_32ck, tRFC_ck,
+ dxodt == DRAM_DXnGCR0_DXODT_DYNAMIC ? "dynamic" : "disabled");
+
+ /* Trigger hardware MDFS. */
+ writel(mdfscr | MBUS_MDFSCR_START, priv->reg_mbus + MBUS_MDFSCR);
+ ret = readl_poll_timeout_atomic(priv->reg_mbus + MBUS_MDFSCR, mdfscr,
+ !(mdfscr & MBUS_MDFSCR_START), 10, 1000);
+ if (ret)
+ return ret;
+
+ /* Disable double buffering. */
+ writel(0, priv->reg_mbus + MBUS_MDFSCR);
+
+ /* Restore VTF configuration. */
+ writel_relaxed(vtfcr, priv->reg_dram + DRAM_VTFCR);
+
+ /* Enable automatic self-refresh at the lowest frequency only. */
+ if (freq == priv->freq_table[0])
+ pwrctl |= DRAM_PWRCTL_SELFREF_EN;
+ writel_relaxed(pwrctl, priv->reg_dram + DRAM_PWRCTL);
+
+ sun8i_a33_mbus_restart_pmu_counters(priv);
+ sun8i_a33_mbus_update_nominal_bw(priv, ddr_freq_mhz);
+
+ return 0;
+}
+
+static int sun8i_a33_mbus_set_dram_target(struct device *dev,
+ unsigned long *freq, u32 flags)
+{
+ struct sun8i_a33_mbus *priv = dev_get_drvdata(dev);
+ struct devfreq *devfreq = priv->devfreq_dram;
+ struct dev_pm_opp *opp;
+ int ret;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ dev_pm_opp_put(opp);
+
+ if (*freq == devfreq->previous_freq)
+ return 0;
+
+ ret = sun8i_a33_mbus_set_dram_freq(priv, *freq);
+ if (ret) {
+ dev_warn(dev, "failed to set DRAM frequency: %d\n", ret);
+ *freq = devfreq->previous_freq;
+ }
+
+ return ret;
+}
+
+static int sun8i_a33_mbus_get_dram_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct sun8i_a33_mbus *priv = dev_get_drvdata(dev);
+
+ stat->busy_time = sun8i_a33_mbus_get_peak_bw(priv);
+ stat->total_time = priv->nominal_bw;
+ stat->current_frequency = priv->devfreq_dram->previous_freq;
+
+ sun8i_a33_mbus_restart_pmu_counters(priv);
+
+ dev_dbg(dev, "Using %lu/%lu (%lu%%) at %lu MHz\n",
+ stat->busy_time, stat->total_time,
+ DIV_ROUND_CLOSEST(stat->busy_time * 100, stat->total_time),
+ stat->current_frequency / USEC_PER_SEC);
+
+ return 0;
+}
+
+static int sun8i_a33_mbus_hw_init(struct device *dev,
+ struct sun8i_a33_mbus *priv,
+ unsigned long ddr_freq)
+{
+ u32 i, mbus_cr, mbus_freq_mhz;
+
+ /* Choose tREFI and tRFC to match the configured DRAM type. */
+ mbus_cr = readl_relaxed(priv->reg_mbus + MBUS_CR);
+ switch (MBUS_CR_GET_DRAM_TYPE(mbus_cr)) {
+ case MBUS_CR_DRAM_TYPE_DDR2:
+ case MBUS_CR_DRAM_TYPE_DDR3:
+ case MBUS_CR_DRAM_TYPE_DDR4:
+ priv->tREFI_ns = 7800;
+ priv->tRFC_ns = 350;
+ break;
+ case MBUS_CR_DRAM_TYPE_LPDDR2:
+ case MBUS_CR_DRAM_TYPE_LPDDR3:
+ priv->tREFI_ns = 3900;
+ priv->tRFC_ns = 210;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Save ODTMAP so it can be restored when raising the frequency. */
+ priv->odtmap = readl_relaxed(priv->reg_dram + DRAM_ODTMAP);
+
+ /* Compute the DRAM data bus width by counting enabled DATx8 blocks. */
+ for (i = 0; i < DRAM_DX_MAX; ++i) {
+ void __iomem *reg = priv->reg_dram + DRAM_DXnGCR0(i);
+
+ if (!(readl_relaxed(reg) & DRAM_DXnGCR0_DXEN))
+ break;
+ }
+ priv->data_width = i;
+
+ dev_dbg(dev, "Detected %u-bit %sDDRx with%s ODT\n",
+ priv->data_width * 8,
+ MBUS_CR_GET_DRAM_TYPE(mbus_cr) > 4 ? "LP" : "",
+ priv->odtmap ? "" : "out");
+
+ /* Program MBUS_TMR such that the PMU period unit is microseconds. */
+ mbus_freq_mhz = clk_get_rate(priv->clk_mbus) / USEC_PER_SEC;
+ writel_relaxed(MBUS_TMR_PERIOD(mbus_freq_mhz),
+ priv->reg_mbus + MBUS_TMR);
+
+ /* "Master Ready Mask Register" bits must be set or MDFS will block. */
+ writel_relaxed(0xffffffff, priv->reg_mbus + MBUS_MDFSMRMR);
+
+ sun8i_a33_mbus_restart_pmu_counters(priv);
+ sun8i_a33_mbus_update_nominal_bw(priv, ddr_freq / USEC_PER_SEC);
+
+ return 0;
+}
+
+static int __maybe_unused sun8i_a33_mbus_suspend(struct device *dev)
+{
+ struct sun8i_a33_mbus *priv = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(priv->clk_bus);
+
+ return 0;
+}
+
+static int __maybe_unused sun8i_a33_mbus_resume(struct device *dev)
+{
+ struct sun8i_a33_mbus *priv = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(priv->clk_bus);
+}
+
+static int sun8i_a33_mbus_probe(struct platform_device *pdev)
+{
+ const struct sun8i_a33_mbus_variant *variant;
+ struct device *dev = &pdev->dev;
+ struct sun8i_a33_mbus *priv;
+ unsigned long base_freq;
+ unsigned int max_state;
+ const char *err;
+ int i, ret;
+
+ variant = device_get_match_data(dev);
+ if (!variant)
+ return -EINVAL;
+
+ max_state = variant->max_dram_divider - variant->min_dram_divider + 1;
+
+ priv = devm_kzalloc(dev, struct_size(priv, freq_table, max_state), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ priv->variant = variant;
+
+ priv->reg_dram = devm_platform_ioremap_resource_byname(pdev, "dram");
+ if (IS_ERR(priv->reg_dram))
+ return PTR_ERR(priv->reg_dram);
+
+ priv->reg_mbus = devm_platform_ioremap_resource_byname(pdev, "mbus");
+ if (IS_ERR(priv->reg_mbus))
+ return PTR_ERR(priv->reg_mbus);
+
+ priv->clk_bus = devm_clk_get(dev, "bus");
+ if (IS_ERR(priv->clk_bus))
+ return dev_err_probe(dev, PTR_ERR(priv->clk_bus),
+ "failed to get bus clock\n");
+
+ priv->clk_dram = devm_clk_get(dev, "dram");
+ if (IS_ERR(priv->clk_dram))
+ return dev_err_probe(dev, PTR_ERR(priv->clk_dram),
+ "failed to get dram clock\n");
+
+ priv->clk_mbus = devm_clk_get(dev, "mbus");
+ if (IS_ERR(priv->clk_mbus))
+ return dev_err_probe(dev, PTR_ERR(priv->clk_mbus),
+ "failed to get mbus clock\n");
+
+ ret = clk_prepare_enable(priv->clk_bus);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to enable bus clock\n");
+
+ /* Lock the DRAM clock rate to keep priv->nominal_bw in sync. */
+ ret = clk_rate_exclusive_get(priv->clk_dram);
+ if (ret) {
+ err = "failed to lock dram clock rate\n";
+ goto err_disable_bus;
+ }
+
+ /* Lock the MBUS clock rate to keep MBUS_TMR_PERIOD in sync. */
+ ret = clk_rate_exclusive_get(priv->clk_mbus);
+ if (ret) {
+ err = "failed to lock mbus clock rate\n";
+ goto err_unlock_dram;
+ }
+
+ priv->gov_data.upthreshold = 10;
+ priv->gov_data.downdifferential = 5;
+
+ priv->profile.initial_freq = clk_get_rate(priv->clk_dram);
+ priv->profile.polling_ms = 1000;
+ priv->profile.target = sun8i_a33_mbus_set_dram_target;
+ priv->profile.get_dev_status = sun8i_a33_mbus_get_dram_status;
+ priv->profile.freq_table = priv->freq_table;
+ priv->profile.max_state = max_state;
+
+ ret = devm_pm_opp_set_clkname(dev, "dram");
+ if (ret) {
+ err = "failed to add OPP table\n";
+ goto err_unlock_mbus;
+ }
+
+ base_freq = clk_get_rate(clk_get_parent(priv->clk_dram));
+ for (i = 0; i < max_state; ++i) {
+ unsigned int div = variant->max_dram_divider - i;
+
+ priv->freq_table[i] = base_freq / div;
+
+ ret = dev_pm_opp_add(dev, priv->freq_table[i], 0);
+ if (ret) {
+ err = "failed to add OPPs\n";
+ goto err_remove_opps;
+ }
+ }
+
+ ret = sun8i_a33_mbus_hw_init(dev, priv, priv->profile.initial_freq);
+ if (ret) {
+ err = "failed to init hardware\n";
+ goto err_remove_opps;
+ }
+
+ priv->devfreq_dram = devfreq_add_device(dev, &priv->profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ &priv->gov_data);
+ if (IS_ERR(priv->devfreq_dram)) {
+ ret = PTR_ERR(priv->devfreq_dram);
+ err = "failed to add devfreq device\n";
+ goto err_remove_opps;
+ }
+
+ /*
+ * This must be set manually after registering the devfreq device,
+ * because there is no way to select a dynamic OPP as the suspend OPP.
+ */
+ priv->devfreq_dram->suspend_freq = priv->freq_table[0];
+
+ return 0;
+
+err_remove_opps:
+ dev_pm_opp_remove_all_dynamic(dev);
+err_unlock_mbus:
+ clk_rate_exclusive_put(priv->clk_mbus);
+err_unlock_dram:
+ clk_rate_exclusive_put(priv->clk_dram);
+err_disable_bus:
+ clk_disable_unprepare(priv->clk_bus);
+
+ return dev_err_probe(dev, ret, err);
+}
+
+static int sun8i_a33_mbus_remove(struct platform_device *pdev)
+{
+ struct sun8i_a33_mbus *priv = platform_get_drvdata(pdev);
+ unsigned long initial_freq = priv->profile.initial_freq;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ devfreq_remove_device(priv->devfreq_dram);
+
+ ret = sun8i_a33_mbus_set_dram_freq(priv, initial_freq);
+ if (ret)
+ dev_warn(dev, "failed to restore DRAM frequency: %d\n", ret);
+
+ dev_pm_opp_remove_all_dynamic(dev);
+ clk_rate_exclusive_put(priv->clk_mbus);
+ clk_rate_exclusive_put(priv->clk_dram);
+ clk_disable_unprepare(priv->clk_bus);
+
+ return 0;
+}
+
+static const struct sun8i_a33_mbus_variant sun50i_a64_mbus = {
+ .min_dram_divider = 1,
+ .max_dram_divider = 4,
+ .odt_freq_mhz = 400,
+};
+
+static const struct of_device_id sun8i_a33_mbus_of_match[] = {
+ { .compatible = "allwinner,sun50i-a64-mbus", .data = &sun50i_a64_mbus },
+ { .compatible = "allwinner,sun50i-h5-mbus", .data = &sun50i_a64_mbus },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sun8i_a33_mbus_of_match);
+
+static SIMPLE_DEV_PM_OPS(sun8i_a33_mbus_pm_ops,
+ sun8i_a33_mbus_suspend, sun8i_a33_mbus_resume);
+
+static struct platform_driver sun8i_a33_mbus_driver = {
+ .probe = sun8i_a33_mbus_probe,
+ .remove = sun8i_a33_mbus_remove,
+ .driver = {
+ .name = "sun8i-a33-mbus",
+ .of_match_table = sun8i_a33_mbus_of_match,
+ .pm = pm_ptr(&sun8i_a33_mbus_pm_ops),
+ },
+};
+module_platform_driver(sun8i_a33_mbus_driver);
+
+MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>");
+MODULE_DESCRIPTION("Allwinner sun8i/sun50i MBUS DEVFREQ Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 1ef021273a06..511805dbeb75 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_DMABUF_SYSFS_STATS) += dma-buf-sysfs-stats.o
dmabuf_selftests-y := \
selftest.o \
st-dma-fence.o \
- st-dma-fence-chain.o
+ st-dma-fence-chain.o \
+ st-dma-resv.o
obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o
diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.c b/drivers/dma-buf/dma-buf-sysfs-stats.c
index 053baadcada9..2bba0babcb62 100644
--- a/drivers/dma-buf/dma-buf-sysfs-stats.c
+++ b/drivers/dma-buf/dma-buf-sysfs-stats.c
@@ -132,7 +132,7 @@ void dma_buf_stats_teardown(struct dma_buf *dmabuf)
/* Statistics files do not need to send uevents. */
-static int dmabuf_sysfs_uevent_filter(struct kset *kset, struct kobject *kobj)
+static int dmabuf_sysfs_uevent_filter(struct kobject *kobj)
{
return 0;
}
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 6437b2e978fb..602b12d7470d 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -299,10 +299,8 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
/**
* dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
- * The name of the dma-buf buffer can only be set when the dma-buf is not
- * attached to any devices. It could theoritically support changing the
- * name of the dma-buf if the same piece of memory is used for multiple
- * purpose between different devices.
+ * It could support changing the name of the dma-buf if the same
+ * piece of memory is used for multiple purpose between different devices.
*
* @dmabuf: [in] dmabuf buffer that will be renamed.
* @buf: [in] A piece of userspace memory that contains the name of
@@ -315,25 +313,16 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
{
char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
- long ret = 0;
if (IS_ERR(name))
return PTR_ERR(name);
- dma_resv_lock(dmabuf->resv, NULL);
- if (!list_empty(&dmabuf->attachments)) {
- ret = -EBUSY;
- kfree(name);
- goto out_unlock;
- }
spin_lock(&dmabuf->name_lock);
kfree(dmabuf->name);
dmabuf->name = name;
spin_unlock(&dmabuf->name_lock);
-out_unlock:
- dma_resv_unlock(dmabuf->resv);
- return ret;
+ return 0;
}
static long dma_buf_ioctl(struct file *file,
@@ -1058,8 +1047,8 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
*
* Interfaces::
*
- * void \*dma_buf_vmap(struct dma_buf \*dmabuf)
- * void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
+ * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct dma_buf_map \*map)
+ * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct dma_buf_map \*map)
*
* The vmap call can fail if there is no vmap support in the exporter, or if
* it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
@@ -1338,8 +1327,6 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
{
struct dma_buf *buf_obj;
struct dma_buf_attachment *attach_obj;
- struct dma_resv_iter cursor;
- struct dma_fence *fence;
int count = 0, attach_count;
size_t size = 0;
int ret;
@@ -1370,14 +1357,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
buf_obj->name ?: "");
spin_unlock(&buf_obj->name_lock);
- dma_resv_for_each_fence(&cursor, buf_obj->resv, true, fence) {
- seq_printf(s, "\t%s fence: %s %s %ssignalled\n",
- dma_resv_iter_is_exclusive(&cursor) ?
- "Exclusive" : "Shared",
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence),
- dma_fence_is_signaled(fence) ? "" : "un");
- }
+ dma_resv_describe(buf_obj->resv, s);
seq_puts(s, "\tAttached Devices:\n");
attach_count = 0;
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index d3fbd950be94..3e07f961e2f3 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -104,7 +104,11 @@ static bool dma_fence_array_signaled(struct dma_fence *fence)
{
struct dma_fence_array *array = to_dma_fence_array(fence);
- return atomic_read(&array->num_pending) <= 0;
+ if (atomic_read(&array->num_pending) > 0)
+ return false;
+
+ dma_fence_array_clear_pending_error(array);
+ return true;
}
static void dma_fence_array_release(struct dma_fence *fence)
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 1e82ecd443fa..066400ed8841 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -15,6 +15,7 @@
#include <linux/atomic.h>
#include <linux/dma-fence.h>
#include <linux/sched/signal.h>
+#include <linux/seq_file.h>
#define CREATE_TRACE_POINTS
#include <trace/events/dma_fence.h>
@@ -908,6 +909,22 @@ err_free_cb:
EXPORT_SYMBOL(dma_fence_wait_any_timeout);
/**
+ * dma_fence_describe - Dump fence describtion into seq_file
+ * @fence: the 6fence to describe
+ * @seq: the seq_file to put the textual description into
+ *
+ * Dump a textual description of the fence and it's state into the seq_file.
+ */
+void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq)
+{
+ seq_printf(seq, "%s %s seq %llu %ssignalled\n",
+ fence->ops->get_driver_name(fence),
+ fence->ops->get_timeline_name(fence), fence->seqno,
+ dma_fence_is_signaled(fence) ? "" : "un");
+}
+EXPORT_SYMBOL(dma_fence_describe);
+
+/**
* dma_fence_init - Initialize a custom fence.
* @fence: the fence to initialize
* @ops: the dma_fence_ops for operations on this fence
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 9eb2baa387d4..4deea75c0b9c 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -38,6 +38,7 @@
#include <linux/mm.h>
#include <linux/sched/mm.h>
#include <linux/mmu_notifier.h>
+#include <linux/seq_file.h>
/**
* DOC: Reservation Object Overview
@@ -304,8 +305,7 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
if (old)
i = old->shared_count;
- if (fence)
- dma_fence_get(fence);
+ dma_fence_get(fence);
write_seqcount_begin(&obj->seq);
/* write_seqcount_begin provides the necessary memory barrier */
@@ -666,6 +666,28 @@ bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
}
EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
+/**
+ * dma_resv_describe - Dump description of the resv object into seq_file
+ * @obj: the reservation object
+ * @seq: the seq_file to dump the description into
+ *
+ * Dump a textual description of the fences inside an dma_resv object into the
+ * seq_file.
+ */
+void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
+{
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&cursor, obj, true, fence) {
+ seq_printf(seq, "\t%s fence:",
+ dma_resv_iter_is_exclusive(&cursor) ?
+ "Exclusive" : "Shared");
+ dma_fence_describe(fence, seq);
+ }
+}
+EXPORT_SYMBOL_GPL(dma_resv_describe);
+
#if IS_ENABLED(CONFIG_LOCKDEP)
static int __init dma_resv_lockdep(void)
{
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 0c05b79870f9..83f02bd51dda 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -124,10 +124,11 @@ static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
struct cma_heap_buffer *buffer = dmabuf->priv;
struct dma_heap_attachment *a;
+ mutex_lock(&buffer->lock);
+
if (buffer->vmap_cnt)
invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
- mutex_lock(&buffer->lock);
list_for_each_entry(a, &buffer->attachments, list) {
if (!a->mapped)
continue;
@@ -144,10 +145,11 @@ static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
struct cma_heap_buffer *buffer = dmabuf->priv;
struct dma_heap_attachment *a;
+ mutex_lock(&buffer->lock);
+
if (buffer->vmap_cnt)
flush_kernel_vmap_range(buffer->vaddr, buffer->len);
- mutex_lock(&buffer->lock);
list_for_each_entry(a, &buffer->attachments, list) {
if (!a->mapped)
continue;
diff --git a/drivers/dma-buf/selftests.h b/drivers/dma-buf/selftests.h
index bc8cea67bf1e..97d73aaa31da 100644
--- a/drivers/dma-buf/selftests.h
+++ b/drivers/dma-buf/selftests.h
@@ -12,3 +12,4 @@
selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */
selftest(dma_fence, dma_fence)
selftest(dma_fence_chain, dma_fence_chain)
+selftest(dma_resv, dma_resv)
diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
new file mode 100644
index 000000000000..bc32b3eedcb6
--- /dev/null
+++ b/drivers/dma-buf/st-dma-resv.c
@@ -0,0 +1,371 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+* Copyright © 2019 Intel Corporation
+* Copyright © 2021 Advanced Micro Devices, Inc.
+*/
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/dma-resv.h>
+
+#include "selftest.h"
+
+static struct spinlock fence_lock;
+
+static const char *fence_name(struct dma_fence *f)
+{
+ return "selftest";
+}
+
+static const struct dma_fence_ops fence_ops = {
+ .get_driver_name = fence_name,
+ .get_timeline_name = fence_name,
+};
+
+static struct dma_fence *alloc_fence(void)
+{
+ struct dma_fence *f;
+
+ f = kmalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ dma_fence_init(f, &fence_ops, &fence_lock, 0, 0);
+ return f;
+}
+
+static int sanitycheck(void *arg)
+{
+ struct dma_resv resv;
+ struct dma_fence *f;
+ int r;
+
+ f = alloc_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_fence_signal(f);
+ dma_fence_put(f);
+
+ dma_resv_init(&resv);
+ r = dma_resv_lock(&resv, NULL);
+ if (r)
+ pr_err("Resv locking failed\n");
+ else
+ dma_resv_unlock(&resv);
+ dma_resv_fini(&resv);
+ return r;
+}
+
+static int test_signaling(void *arg, bool shared)
+{
+ struct dma_resv resv;
+ struct dma_fence *f;
+ int r;
+
+ f = alloc_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_resv_init(&resv);
+ r = dma_resv_lock(&resv, NULL);
+ if (r) {
+ pr_err("Resv locking failed\n");
+ goto err_free;
+ }
+
+ if (shared) {
+ r = dma_resv_reserve_shared(&resv, 1);
+ if (r) {
+ pr_err("Resv shared slot allocation failed\n");
+ goto err_unlock;
+ }
+
+ dma_resv_add_shared_fence(&resv, f);
+ } else {
+ dma_resv_add_excl_fence(&resv, f);
+ }
+
+ if (dma_resv_test_signaled(&resv, shared)) {
+ pr_err("Resv unexpectedly signaled\n");
+ r = -EINVAL;
+ goto err_unlock;
+ }
+ dma_fence_signal(f);
+ if (!dma_resv_test_signaled(&resv, shared)) {
+ pr_err("Resv not reporting signaled\n");
+ r = -EINVAL;
+ goto err_unlock;
+ }
+err_unlock:
+ dma_resv_unlock(&resv);
+err_free:
+ dma_resv_fini(&resv);
+ dma_fence_put(f);
+ return r;
+}
+
+static int test_excl_signaling(void *arg)
+{
+ return test_signaling(arg, false);
+}
+
+static int test_shared_signaling(void *arg)
+{
+ return test_signaling(arg, true);
+}
+
+static int test_for_each(void *arg, bool shared)
+{
+ struct dma_resv_iter cursor;
+ struct dma_fence *f, *fence;
+ struct dma_resv resv;
+ int r;
+
+ f = alloc_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_resv_init(&resv);
+ r = dma_resv_lock(&resv, NULL);
+ if (r) {
+ pr_err("Resv locking failed\n");
+ goto err_free;
+ }
+
+ if (shared) {
+ r = dma_resv_reserve_shared(&resv, 1);
+ if (r) {
+ pr_err("Resv shared slot allocation failed\n");
+ goto err_unlock;
+ }
+
+ dma_resv_add_shared_fence(&resv, f);
+ } else {
+ dma_resv_add_excl_fence(&resv, f);
+ }
+
+ r = -ENOENT;
+ dma_resv_for_each_fence(&cursor, &resv, shared, fence) {
+ if (!r) {
+ pr_err("More than one fence found\n");
+ r = -EINVAL;
+ goto err_unlock;
+ }
+ if (f != fence) {
+ pr_err("Unexpected fence\n");
+ r = -EINVAL;
+ goto err_unlock;
+ }
+ if (dma_resv_iter_is_exclusive(&cursor) != !shared) {
+ pr_err("Unexpected fence usage\n");
+ r = -EINVAL;
+ goto err_unlock;
+ }
+ r = 0;
+ }
+ if (r) {
+ pr_err("No fence found\n");
+ goto err_unlock;
+ }
+ dma_fence_signal(f);
+err_unlock:
+ dma_resv_unlock(&resv);
+err_free:
+ dma_resv_fini(&resv);
+ dma_fence_put(f);
+ return r;
+}
+
+static int test_excl_for_each(void *arg)
+{
+ return test_for_each(arg, false);
+}
+
+static int test_shared_for_each(void *arg)
+{
+ return test_for_each(arg, true);
+}
+
+static int test_for_each_unlocked(void *arg, bool shared)
+{
+ struct dma_resv_iter cursor;
+ struct dma_fence *f, *fence;
+ struct dma_resv resv;
+ int r;
+
+ f = alloc_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_resv_init(&resv);
+ r = dma_resv_lock(&resv, NULL);
+ if (r) {
+ pr_err("Resv locking failed\n");
+ goto err_free;
+ }
+
+ if (shared) {
+ r = dma_resv_reserve_shared(&resv, 1);
+ if (r) {
+ pr_err("Resv shared slot allocation failed\n");
+ dma_resv_unlock(&resv);
+ goto err_free;
+ }
+
+ dma_resv_add_shared_fence(&resv, f);
+ } else {
+ dma_resv_add_excl_fence(&resv, f);
+ }
+ dma_resv_unlock(&resv);
+
+ r = -ENOENT;
+ dma_resv_iter_begin(&cursor, &resv, shared);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ if (!r) {
+ pr_err("More than one fence found\n");
+ r = -EINVAL;
+ goto err_iter_end;
+ }
+ if (!dma_resv_iter_is_restarted(&cursor)) {
+ pr_err("No restart flag\n");
+ goto err_iter_end;
+ }
+ if (f != fence) {
+ pr_err("Unexpected fence\n");
+ r = -EINVAL;
+ goto err_iter_end;
+ }
+ if (dma_resv_iter_is_exclusive(&cursor) != !shared) {
+ pr_err("Unexpected fence usage\n");
+ r = -EINVAL;
+ goto err_iter_end;
+ }
+
+ /* We use r as state here */
+ if (r == -ENOENT) {
+ r = -EINVAL;
+ /* That should trigger an restart */
+ cursor.seq--;
+ } else if (r == -EINVAL) {
+ r = 0;
+ }
+ }
+ if (r)
+ pr_err("No fence found\n");
+err_iter_end:
+ dma_resv_iter_end(&cursor);
+ dma_fence_signal(f);
+err_free:
+ dma_resv_fini(&resv);
+ dma_fence_put(f);
+ return r;
+}
+
+static int test_excl_for_each_unlocked(void *arg)
+{
+ return test_for_each_unlocked(arg, false);
+}
+
+static int test_shared_for_each_unlocked(void *arg)
+{
+ return test_for_each_unlocked(arg, true);
+}
+
+static int test_get_fences(void *arg, bool shared)
+{
+ struct dma_fence *f, *excl = NULL, **fences = NULL;
+ struct dma_resv resv;
+ int r, i;
+
+ f = alloc_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_resv_init(&resv);
+ r = dma_resv_lock(&resv, NULL);
+ if (r) {
+ pr_err("Resv locking failed\n");
+ goto err_resv;
+ }
+
+ if (shared) {
+ r = dma_resv_reserve_shared(&resv, 1);
+ if (r) {
+ pr_err("Resv shared slot allocation failed\n");
+ dma_resv_unlock(&resv);
+ goto err_resv;
+ }
+
+ dma_resv_add_shared_fence(&resv, f);
+ } else {
+ dma_resv_add_excl_fence(&resv, f);
+ }
+ dma_resv_unlock(&resv);
+
+ r = dma_resv_get_fences(&resv, &excl, &i, &fences);
+ if (r) {
+ pr_err("get_fences failed\n");
+ goto err_free;
+ }
+
+ if (shared) {
+ if (excl != NULL) {
+ pr_err("get_fences returned unexpected excl fence\n");
+ goto err_free;
+ }
+ if (i != 1 || fences[0] != f) {
+ pr_err("get_fences returned unexpected shared fence\n");
+ goto err_free;
+ }
+ } else {
+ if (excl != f) {
+ pr_err("get_fences returned unexpected excl fence\n");
+ goto err_free;
+ }
+ if (i != 0) {
+ pr_err("get_fences returned unexpected shared fence\n");
+ goto err_free;
+ }
+ }
+
+ dma_fence_signal(f);
+err_free:
+ dma_fence_put(excl);
+ while (i--)
+ dma_fence_put(fences[i]);
+ kfree(fences);
+err_resv:
+ dma_resv_fini(&resv);
+ dma_fence_put(f);
+ return r;
+}
+
+static int test_excl_get_fences(void *arg)
+{
+ return test_get_fences(arg, false);
+}
+
+static int test_shared_get_fences(void *arg)
+{
+ return test_get_fences(arg, true);
+}
+
+int dma_resv(void)
+{
+ static const struct subtest tests[] = {
+ SUBTEST(sanitycheck),
+ SUBTEST(test_excl_signaling),
+ SUBTEST(test_shared_signaling),
+ SUBTEST(test_excl_for_each),
+ SUBTEST(test_shared_for_each),
+ SUBTEST(test_excl_for_each_unlocked),
+ SUBTEST(test_shared_for_each_unlocked),
+ SUBTEST(test_excl_get_fences),
+ SUBTEST(test_shared_get_fences),
+ };
+
+ spin_lock_init(&fence_lock);
+ return subtests(tests, NULL);
+}
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 275a76f188ae..a1da2b4b6d73 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -99,6 +99,7 @@
#define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
#define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
#define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
+#define AT_XDMAC_CNDC_NDVIEW_MASK GENMASK(28, 27)
#define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
#define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
#define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
@@ -252,15 +253,15 @@ struct at_xdmac {
/* Linked List Descriptor */
struct at_xdmac_lld {
- dma_addr_t mbr_nda; /* Next Descriptor Member */
- u32 mbr_ubc; /* Microblock Control Member */
- dma_addr_t mbr_sa; /* Source Address Member */
- dma_addr_t mbr_da; /* Destination Address Member */
- u32 mbr_cfg; /* Configuration Register */
- u32 mbr_bc; /* Block Control Register */
- u32 mbr_ds; /* Data Stride Register */
- u32 mbr_sus; /* Source Microblock Stride Register */
- u32 mbr_dus; /* Destination Microblock Stride Register */
+ u32 mbr_nda; /* Next Descriptor Member */
+ u32 mbr_ubc; /* Microblock Control Member */
+ u32 mbr_sa; /* Source Address Member */
+ u32 mbr_da; /* Destination Address Member */
+ u32 mbr_cfg; /* Configuration Register */
+ u32 mbr_bc; /* Block Control Register */
+ u32 mbr_ds; /* Data Stride Register */
+ u32 mbr_sus; /* Source Microblock Stride Register */
+ u32 mbr_dus; /* Destination Microblock Stride Register */
};
/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
@@ -385,9 +386,6 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
- if (at_xdmac_chan_is_enabled(atchan))
- return;
-
/* Set transfer as active to not try to start it again. */
first->active_xfer = true;
@@ -405,7 +403,8 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
*/
if (at_xdmac_chan_is_cyclic(atchan))
reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
- else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3)
+ else if ((first->lld.mbr_ubc &
+ AT_XDMAC_CNDC_NDVIEW_MASK) == AT_XDMAC_MBR_UBC_NDV3)
reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
else
reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
@@ -476,13 +475,12 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave(&atchan->lock, irqflags);
cookie = dma_cookie_assign(tx);
+ list_add_tail(&desc->xfer_node, &atchan->xfers_list);
+ spin_unlock_irqrestore(&atchan->lock, irqflags);
+
dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
__func__, atchan, desc);
- list_add_tail(&desc->xfer_node, &atchan->xfers_list);
- if (list_is_singular(&atchan->xfers_list))
- at_xdmac_start_xfer(atchan, desc);
- spin_unlock_irqrestore(&atchan->lock, irqflags);
return cookie;
}
@@ -733,7 +731,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (!desc) {
dev_err(chan2dev(chan), "can't get descriptor\n");
if (first)
- list_splice_init(&first->descs_list, &atchan->free_descs_list);
+ list_splice_tail_init(&first->descs_list,
+ &atchan->free_descs_list);
goto spin_unlock;
}
@@ -821,7 +820,8 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
if (!desc) {
dev_err(chan2dev(chan), "can't get descriptor\n");
if (first)
- list_splice_init(&first->descs_list, &atchan->free_descs_list);
+ list_splice_tail_init(&first->descs_list,
+ &atchan->free_descs_list);
spin_unlock_irqrestore(&atchan->lock, irqflags);
return NULL;
}
@@ -1055,8 +1055,8 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
src_addr, dst_addr,
xt, chunk);
if (!desc) {
- list_splice_init(&first->descs_list,
- &atchan->free_descs_list);
+ list_splice_tail_init(&first->descs_list,
+ &atchan->free_descs_list);
return NULL;
}
@@ -1136,7 +1136,8 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if (!desc) {
dev_err(chan2dev(chan), "can't get descriptor\n");
if (first)
- list_splice_init(&first->descs_list, &atchan->free_descs_list);
+ list_splice_tail_init(&first->descs_list,
+ &atchan->free_descs_list);
return NULL;
}
@@ -1312,8 +1313,8 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
sg_dma_len(sg),
value);
if (!desc && first)
- list_splice_init(&first->descs_list,
- &atchan->free_descs_list);
+ list_splice_tail_init(&first->descs_list,
+ &atchan->free_descs_list);
if (!first)
first = desc;
@@ -1586,20 +1587,6 @@ spin_unlock:
return ret;
}
-/* Call must be protected by lock. */
-static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
- struct at_xdmac_desc *desc)
-{
- dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
-
- /*
- * Remove the transfer from the transfer list then move the transfer
- * descriptors into the free descriptors list.
- */
- list_del(&desc->xfer_node);
- list_splice_init(&desc->descs_list, &atchan->free_descs_list);
-}
-
static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
{
struct at_xdmac_desc *desc;
@@ -1608,14 +1595,14 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
* If channel is enabled, do nothing, advance_work will be triggered
* after the interruption.
*/
- if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) {
- desc = list_first_entry(&atchan->xfers_list,
- struct at_xdmac_desc,
- xfer_node);
- dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
- if (!desc->active_xfer)
- at_xdmac_start_xfer(atchan, desc);
- }
+ if (at_xdmac_chan_is_enabled(atchan) || list_empty(&atchan->xfers_list))
+ return;
+
+ desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
+ xfer_node);
+ dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
+ if (!desc->active_xfer)
+ at_xdmac_start_xfer(atchan, desc);
}
static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
@@ -1623,16 +1610,22 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
struct at_xdmac_desc *desc;
struct dma_async_tx_descriptor *txd;
- if (!list_empty(&atchan->xfers_list)) {
- desc = list_first_entry(&atchan->xfers_list,
- struct at_xdmac_desc, xfer_node);
- txd = &desc->tx_dma_desc;
-
- if (txd->flags & DMA_PREP_INTERRUPT)
- dmaengine_desc_get_callback_invoke(txd, NULL);
+ spin_lock_irq(&atchan->lock);
+ dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
+ __func__, atchan->irq_status);
+ if (list_empty(&atchan->xfers_list)) {
+ spin_unlock_irq(&atchan->lock);
+ return;
}
+ desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
+ xfer_node);
+ spin_unlock_irq(&atchan->lock);
+ txd = &desc->tx_dma_desc;
+ if (txd->flags & DMA_PREP_INTERRUPT)
+ dmaengine_desc_get_callback_invoke(txd, NULL);
}
+/* Called with atchan->lock held. */
static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
{
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
@@ -1651,8 +1644,6 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
- spin_lock_irq(&atchan->lock);
-
/* Channel must be disabled first as it's not done automatically */
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
@@ -1662,8 +1653,6 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
struct at_xdmac_desc,
xfer_node);
- spin_unlock_irq(&atchan->lock);
-
/* Print bad descriptor's details if needed */
dev_dbg(chan2dev(&atchan->chan),
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
@@ -1677,50 +1666,52 @@ static void at_xdmac_tasklet(struct tasklet_struct *t)
{
struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet);
struct at_xdmac_desc *desc;
+ struct dma_async_tx_descriptor *txd;
u32 error_mask;
+ if (at_xdmac_chan_is_cyclic(atchan))
+ return at_xdmac_handle_cyclic(atchan);
+
+ error_mask = AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS |
+ AT_XDMAC_CIS_ROIS;
+
+ spin_lock_irq(&atchan->lock);
+
dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
__func__, atchan->irq_status);
- error_mask = AT_XDMAC_CIS_RBEIS
- | AT_XDMAC_CIS_WBEIS
- | AT_XDMAC_CIS_ROIS;
-
- if (at_xdmac_chan_is_cyclic(atchan)) {
- at_xdmac_handle_cyclic(atchan);
- } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
- || (atchan->irq_status & error_mask)) {
- struct dma_async_tx_descriptor *txd;
-
- if (atchan->irq_status & error_mask)
- at_xdmac_handle_error(atchan);
-
- spin_lock_irq(&atchan->lock);
- desc = list_first_entry(&atchan->xfers_list,
- struct at_xdmac_desc,
- xfer_node);
- dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
- if (!desc->active_xfer) {
- dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
- spin_unlock_irq(&atchan->lock);
- return;
- }
+ if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) &&
+ !(atchan->irq_status & error_mask))
+ return;
- txd = &desc->tx_dma_desc;
+ if (atchan->irq_status & error_mask)
+ at_xdmac_handle_error(atchan);
- at_xdmac_remove_xfer(atchan, desc);
+ desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
+ xfer_node);
+ dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
+ if (!desc->active_xfer) {
+ dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
spin_unlock_irq(&atchan->lock);
+ return;
+ }
- dma_cookie_complete(txd);
- if (txd->flags & DMA_PREP_INTERRUPT)
- dmaengine_desc_get_callback_invoke(txd, NULL);
+ txd = &desc->tx_dma_desc;
+ dma_cookie_complete(txd);
+ /* Remove the transfer from the transfer list. */
+ list_del(&desc->xfer_node);
+ spin_unlock_irq(&atchan->lock);
- dma_run_dependencies(txd);
+ if (txd->flags & DMA_PREP_INTERRUPT)
+ dmaengine_desc_get_callback_invoke(txd, NULL);
- spin_lock_irq(&atchan->lock);
- at_xdmac_advance_work(atchan);
- spin_unlock_irq(&atchan->lock);
- }
+ dma_run_dependencies(txd);
+
+ spin_lock_irq(&atchan->lock);
+ /* Move the xfer descriptors into the free descriptors list. */
+ list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list);
+ at_xdmac_advance_work(atchan);
+ spin_unlock_irq(&atchan->lock);
}
static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
@@ -1784,11 +1775,9 @@ static void at_xdmac_issue_pending(struct dma_chan *chan)
dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
- if (!at_xdmac_chan_is_cyclic(atchan)) {
- spin_lock_irqsave(&atchan->lock, flags);
- at_xdmac_advance_work(atchan);
- spin_unlock_irqrestore(&atchan->lock, flags);
- }
+ spin_lock_irqsave(&atchan->lock, flags);
+ at_xdmac_advance_work(atchan);
+ spin_unlock_irqrestore(&atchan->lock, flags);
return;
}
@@ -1866,8 +1855,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
cpu_relax();
/* Cancel all pending transfers. */
- list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
- at_xdmac_remove_xfer(atchan, desc);
+ list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
+ list_del(&desc->xfer_node);
+ list_splice_tail_init(&desc->descs_list,
+ &atchan->free_descs_list);
+ }
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
@@ -2031,7 +2023,7 @@ static int __maybe_unused atmel_xdmac_resume(struct device *dev)
static int at_xdmac_probe(struct platform_device *pdev)
{
struct at_xdmac *atxdmac;
- int irq, size, nr_channels, i, ret;
+ int irq, nr_channels, i, ret;
void __iomem *base;
u32 reg;
@@ -2056,9 +2048,9 @@ static int at_xdmac_probe(struct platform_device *pdev)
return -EINVAL;
}
- size = sizeof(*atxdmac);
- size += nr_channels * sizeof(struct at_xdmac_chan);
- atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ atxdmac = devm_kzalloc(&pdev->dev,
+ struct_size(atxdmac, chan, nr_channels),
+ GFP_KERNEL);
if (!atxdmac) {
dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
return -ENOMEM;
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 96701dedcac8..fc513eb2b289 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -104,10 +104,10 @@
* descriptor base address in the upper 8 bits.
*/
struct jz4780_dma_hwdesc {
- uint32_t dcm;
- uint32_t dsa;
- uint32_t dta;
- uint32_t dtc;
+ u32 dcm;
+ u32 dsa;
+ u32 dta;
+ u32 dtc;
};
/* Size of allocations for hardware descriptor blocks. */
@@ -122,7 +122,8 @@ struct jz4780_dma_desc {
dma_addr_t desc_phys;
unsigned int count;
enum dma_transaction_type type;
- uint32_t status;
+ u32 transfer_type;
+ u32 status;
};
struct jz4780_dma_chan {
@@ -130,8 +131,8 @@ struct jz4780_dma_chan {
unsigned int id;
struct dma_pool *desc_pool;
- uint32_t transfer_type;
- uint32_t transfer_shift;
+ u32 transfer_type_tx, transfer_type_rx;
+ u32 transfer_shift;
struct dma_slave_config config;
struct jz4780_dma_desc *desc;
@@ -152,12 +153,12 @@ struct jz4780_dma_dev {
unsigned int irq;
const struct jz4780_dma_soc_data *soc_data;
- uint32_t chan_reserved;
+ u32 chan_reserved;
struct jz4780_dma_chan chan[];
};
struct jz4780_dma_filter_data {
- uint32_t transfer_type;
+ u32 transfer_type_tx, transfer_type_rx;
int channel;
};
@@ -179,26 +180,26 @@ static inline struct jz4780_dma_dev *jz4780_dma_chan_parent(
dma_device);
}
-static inline uint32_t jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma,
+static inline u32 jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma,
unsigned int chn, unsigned int reg)
{
return readl(jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
}
static inline void jz4780_dma_chn_writel(struct jz4780_dma_dev *jzdma,
- unsigned int chn, unsigned int reg, uint32_t val)
+ unsigned int chn, unsigned int reg, u32 val)
{
writel(val, jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
}
-static inline uint32_t jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma,
+static inline u32 jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma,
unsigned int reg)
{
return readl(jzdma->ctrl_base + reg);
}
static inline void jz4780_dma_ctrl_writel(struct jz4780_dma_dev *jzdma,
- unsigned int reg, uint32_t val)
+ unsigned int reg, u32 val)
{
writel(val, jzdma->ctrl_base + reg);
}
@@ -226,9 +227,10 @@ static inline void jz4780_dma_chan_disable(struct jz4780_dma_dev *jzdma,
jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DCKEC, BIT(chn));
}
-static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
- struct jz4780_dma_chan *jzchan, unsigned int count,
- enum dma_transaction_type type)
+static struct jz4780_dma_desc *
+jz4780_dma_desc_alloc(struct jz4780_dma_chan *jzchan, unsigned int count,
+ enum dma_transaction_type type,
+ enum dma_transfer_direction direction)
{
struct jz4780_dma_desc *desc;
@@ -248,6 +250,12 @@ static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
desc->count = count;
desc->type = type;
+
+ if (direction == DMA_DEV_TO_MEM)
+ desc->transfer_type = jzchan->transfer_type_rx;
+ else
+ desc->transfer_type = jzchan->transfer_type_tx;
+
return desc;
}
@@ -260,8 +268,8 @@ static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc)
kfree(desc);
}
-static uint32_t jz4780_dma_transfer_size(struct jz4780_dma_chan *jzchan,
- unsigned long val, uint32_t *shift)
+static u32 jz4780_dma_transfer_size(struct jz4780_dma_chan *jzchan,
+ unsigned long val, u32 *shift)
{
struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
int ord = ffs(val) - 1;
@@ -303,7 +311,7 @@ static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
enum dma_transfer_direction direction)
{
struct dma_slave_config *config = &jzchan->config;
- uint32_t width, maxburst, tsz;
+ u32 width, maxburst, tsz;
if (direction == DMA_MEM_TO_DEV) {
desc->dcm = JZ_DMA_DCM_SAI;
@@ -361,7 +369,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
unsigned int i;
int err;
- desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE);
+ desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE, direction);
if (!desc)
return NULL;
@@ -410,7 +418,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
periods = buf_len / period_len;
- desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC);
+ desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC, direction);
if (!desc)
return NULL;
@@ -453,16 +461,16 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
{
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
struct jz4780_dma_desc *desc;
- uint32_t tsz;
+ u32 tsz;
- desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY);
+ desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY, 0);
if (!desc)
return NULL;
tsz = jz4780_dma_transfer_size(jzchan, dest | src | len,
&jzchan->transfer_shift);
- jzchan->transfer_type = JZ_DMA_DRT_AUTO;
+ desc->transfer_type = JZ_DMA_DRT_AUTO;
desc->desc[0].dsa = src;
desc->desc[0].dta = dest;
@@ -528,7 +536,7 @@ static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan)
/* Set transfer type. */
jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DRT,
- jzchan->transfer_type);
+ jzchan->desc->transfer_type);
/*
* Set the transfer count. This is redundant for a descriptor-driven
@@ -670,7 +678,7 @@ static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
{
const unsigned int soc_flags = jzdma->soc_data->flags;
struct jz4780_dma_desc *desc = jzchan->desc;
- uint32_t dcs;
+ u32 dcs;
bool ack = true;
spin_lock(&jzchan->vchan.lock);
@@ -727,7 +735,7 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
struct jz4780_dma_dev *jzdma = data;
unsigned int nb_channels = jzdma->soc_data->nb_channels;
unsigned long pending;
- uint32_t dmac;
+ u32 dmac;
int i;
pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
@@ -788,7 +796,8 @@ static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param)
return false;
}
- jzchan->transfer_type = data->transfer_type;
+ jzchan->transfer_type_tx = data->transfer_type_tx;
+ jzchan->transfer_type_rx = data->transfer_type_rx;
return true;
}
@@ -800,11 +809,17 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
dma_cap_mask_t mask = jzdma->dma_device.cap_mask;
struct jz4780_dma_filter_data data;
- if (dma_spec->args_count != 2)
+ if (dma_spec->args_count == 2) {
+ data.transfer_type_tx = dma_spec->args[0];
+ data.transfer_type_rx = dma_spec->args[0];
+ data.channel = dma_spec->args[1];
+ } else if (dma_spec->args_count == 3) {
+ data.transfer_type_tx = dma_spec->args[0];
+ data.transfer_type_rx = dma_spec->args[1];
+ data.channel = dma_spec->args[2];
+ } else {
return NULL;
-
- data.transfer_type = dma_spec->args[0];
- data.channel = dma_spec->args[1];
+ }
if (data.channel > -1) {
if (data.channel >= jzdma->soc_data->nb_channels) {
@@ -822,7 +837,8 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
return NULL;
}
- jzdma->chan[data.channel].transfer_type = data.transfer_type;
+ jzdma->chan[data.channel].transfer_type_tx = data.transfer_type_tx;
+ jzdma->chan[data.channel].transfer_type_rx = data.transfer_type_rx;
return dma_get_slave_channel(
&jzdma->chan[data.channel].vchan.chan);
@@ -938,6 +954,14 @@ static int jz4780_dma_probe(struct platform_device *pdev)
jzchan->vchan.desc_free = jz4780_dma_desc_free;
}
+ /*
+ * On JZ4760, chan0 won't enable properly the first time.
+ * Enabling then disabling chan1 will magically make chan0 work
+ * correctly.
+ */
+ jz4780_dma_chan_enable(jzdma, 1);
+ jz4780_dma_chan_disable(jzdma, 1);
+
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto err_disable_clk;
@@ -1011,12 +1035,36 @@ static const struct jz4780_dma_soc_data jz4760_dma_soc_data = {
.flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
};
+static const struct jz4780_dma_soc_data jz4760_mdma_soc_data = {
+ .nb_channels = 2,
+ .transfer_ord_max = 6,
+ .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
+};
+
+static const struct jz4780_dma_soc_data jz4760_bdma_soc_data = {
+ .nb_channels = 3,
+ .transfer_ord_max = 6,
+ .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
+};
+
static const struct jz4780_dma_soc_data jz4760b_dma_soc_data = {
.nb_channels = 5,
.transfer_ord_max = 6,
.flags = JZ_SOC_DATA_PER_CHAN_PM,
};
+static const struct jz4780_dma_soc_data jz4760b_mdma_soc_data = {
+ .nb_channels = 2,
+ .transfer_ord_max = 6,
+ .flags = JZ_SOC_DATA_PER_CHAN_PM,
+};
+
+static const struct jz4780_dma_soc_data jz4760b_bdma_soc_data = {
+ .nb_channels = 3,
+ .transfer_ord_max = 6,
+ .flags = JZ_SOC_DATA_PER_CHAN_PM,
+};
+
static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
.nb_channels = 6,
.transfer_ord_max = 6,
@@ -1045,7 +1093,11 @@ static const struct of_device_id jz4780_dma_dt_match[] = {
{ .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
{ .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
{ .compatible = "ingenic,jz4760-dma", .data = &jz4760_dma_soc_data },
+ { .compatible = "ingenic,jz4760-mdma", .data = &jz4760_mdma_soc_data },
+ { .compatible = "ingenic,jz4760-bdma", .data = &jz4760_bdma_soc_data },
{ .compatible = "ingenic,jz4760b-dma", .data = &jz4760b_dma_soc_data },
+ { .compatible = "ingenic,jz4760b-mdma", .data = &jz4760b_mdma_soc_data },
+ { .compatible = "ingenic,jz4760b-bdma", .data = &jz4760b_bdma_soc_data },
{ .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
{ .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
{ .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data },
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index d9f7c097cfd6..2cfa8458b51b 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1159,6 +1159,13 @@ int dma_async_device_register(struct dma_device *device)
return -EIO;
}
+ if (dma_has_cap(DMA_MEMCPY_SG, device->cap_mask) && !device->device_prep_dma_memcpy_sg) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_MEMCPY_SG");
+ return -EIO;
+ }
+
if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index fab412349f7f..573ad8b86804 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -19,30 +19,6 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
/* Interrupt control bits */
-void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
-{
- struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
-
- pci_msi_mask_irq(data);
-}
-
-void idxd_mask_msix_vectors(struct idxd_device *idxd)
-{
- struct pci_dev *pdev = idxd->pdev;
- int msixcnt = pci_msix_vec_count(pdev);
- int i;
-
- for (i = 0; i < msixcnt; i++)
- idxd_mask_msix_vector(idxd, i);
-}
-
-void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
-{
- struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
-
- pci_msi_unmask_irq(data);
-}
-
void idxd_unmask_error_interrupts(struct idxd_device *idxd)
{
union genctrl_reg genctrl;
@@ -382,14 +358,24 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
lockdep_assert_held(&wq->wq_lock);
memset(wq->wqcfg, 0, idxd->wqcfg_size);
wq->type = IDXD_WQT_NONE;
- wq->size = 0;
- wq->group = NULL;
wq->threshold = 0;
wq->priority = 0;
wq->ats_dis = 0;
+ wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
memset(wq->name, 0, WQ_NAME_SIZE);
+ wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
+ wq->max_batch_size = WQ_DEFAULT_MAX_BATCH;
+}
+
+static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
+{
+ lockdep_assert_held(&wq->wq_lock);
+
+ idxd_wq_disable_cleanup(wq);
+ wq->size = 0;
+ wq->group = NULL;
}
static void idxd_wq_ref_release(struct percpu_ref *ref)
@@ -404,19 +390,31 @@ int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
int rc;
memset(&wq->wq_active, 0, sizeof(wq->wq_active));
- rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release, 0, GFP_KERNEL);
+ rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release,
+ PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
if (rc < 0)
return rc;
reinit_completion(&wq->wq_dead);
+ reinit_completion(&wq->wq_resurrect);
return 0;
}
-void idxd_wq_quiesce(struct idxd_wq *wq)
+void __idxd_wq_quiesce(struct idxd_wq *wq)
{
+ lockdep_assert_held(&wq->wq_lock);
+ reinit_completion(&wq->wq_resurrect);
percpu_ref_kill(&wq->wq_active);
+ complete_all(&wq->wq_resurrect);
wait_for_completion(&wq->wq_dead);
}
+void idxd_wq_quiesce(struct idxd_wq *wq)
+{
+ mutex_lock(&wq->wq_lock);
+ __idxd_wq_quiesce(wq);
+ mutex_unlock(&wq->wq_lock);
+}
+
/* Device control bits */
static inline bool idxd_is_enabled(struct idxd_device *idxd)
{
@@ -572,7 +570,6 @@ void idxd_device_reset(struct idxd_device *idxd)
idxd_device_clear_state(idxd);
idxd->state = IDXD_DEV_DISABLED;
idxd_unmask_error_interrupts(idxd);
- idxd_msix_perm_setup(idxd);
spin_unlock(&idxd->dev_lock);
}
@@ -681,9 +678,9 @@ static void idxd_groups_clear_state(struct idxd_device *idxd)
memset(&group->grpcfg, 0, sizeof(group->grpcfg));
group->num_engines = 0;
group->num_wqs = 0;
- group->use_token_limit = false;
- group->tokens_allowed = 0;
- group->tokens_reserved = 0;
+ group->use_rdbuf_limit = false;
+ group->rdbufs_allowed = 0;
+ group->rdbufs_reserved = 0;
group->tc_a = -1;
group->tc_b = -1;
}
@@ -699,6 +696,7 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
if (wq->state == IDXD_WQ_ENABLED) {
idxd_wq_disable_cleanup(wq);
+ idxd_wq_device_reset_cleanup(wq);
wq->state = IDXD_WQ_DISABLED;
}
}
@@ -711,36 +709,6 @@ void idxd_device_clear_state(struct idxd_device *idxd)
idxd_device_wqs_clear_state(idxd);
}
-void idxd_msix_perm_setup(struct idxd_device *idxd)
-{
- union msix_perm mperm;
- int i, msixcnt;
-
- msixcnt = pci_msix_vec_count(idxd->pdev);
- if (msixcnt < 0)
- return;
-
- mperm.bits = 0;
- mperm.pasid = idxd->pasid;
- mperm.pasid_en = device_pasid_enabled(idxd);
- for (i = 1; i < msixcnt; i++)
- iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
-}
-
-void idxd_msix_perm_clear(struct idxd_device *idxd)
-{
- union msix_perm mperm;
- int i, msixcnt;
-
- msixcnt = pci_msix_vec_count(idxd->pdev);
- if (msixcnt < 0)
- return;
-
- mperm.bits = 0;
- for (i = 1; i < msixcnt; i++)
- iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
-}
-
static void idxd_group_config_write(struct idxd_group *group)
{
struct idxd_device *idxd = group->idxd;
@@ -780,10 +748,10 @@ static int idxd_groups_config_write(struct idxd_device *idxd)
int i;
struct device *dev = &idxd->pdev->dev;
- /* Setup bandwidth token limit */
- if (idxd->hw.gen_cap.config_en && idxd->token_limit) {
+ /* Setup bandwidth rdbuf limit */
+ if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) {
reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
- reg.token_limit = idxd->token_limit;
+ reg.rdbuf_limit = idxd->rdbuf_limit;
iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
}
@@ -827,15 +795,12 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
}
+ if (wq->size == 0 && wq->type != IDXD_WQT_NONE)
+ wq->size = WQ_DEFAULT_QUEUE_DEPTH;
+
/* byte 0-3 */
wq->wqcfg->wq_size = wq->size;
- if (wq->size == 0) {
- idxd->cmd_status = IDXD_SCMD_WQ_NO_SIZE;
- dev_warn(dev, "Incorrect work queue size: 0\n");
- return -EINVAL;
- }
-
/* bytes 4-7 */
wq->wqcfg->wq_thresh = wq->threshold;
@@ -924,13 +889,12 @@ static void idxd_group_flags_setup(struct idxd_device *idxd)
group->tc_b = group->grpcfg.flags.tc_b = 1;
else
group->grpcfg.flags.tc_b = group->tc_b;
- group->grpcfg.flags.use_token_limit = group->use_token_limit;
- group->grpcfg.flags.tokens_reserved = group->tokens_reserved;
- if (group->tokens_allowed)
- group->grpcfg.flags.tokens_allowed =
- group->tokens_allowed;
+ group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit;
+ group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved;
+ if (group->rdbufs_allowed)
+ group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed;
else
- group->grpcfg.flags.tokens_allowed = idxd->max_tokens;
+ group->grpcfg.flags.rdbufs_allowed = idxd->max_rdbufs;
}
}
@@ -981,8 +945,6 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
if (!wq->group)
continue;
- if (!wq->size)
- continue;
if (wq_shared(wq) && !device_swq_supported(idxd)) {
idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
@@ -1123,7 +1085,7 @@ int idxd_device_load_config(struct idxd_device *idxd)
int i, rc;
reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
- idxd->token_limit = reg.token_limit;
+ idxd->rdbuf_limit = reg.rdbuf_limit;
for (i = 0; i < idxd->max_groups; i++) {
struct idxd_group *group = idxd->groups[i];
@@ -1142,6 +1104,106 @@ int idxd_device_load_config(struct idxd_device *idxd)
return 0;
}
+static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
+{
+ struct idxd_desc *desc, *itr;
+ struct llist_node *head;
+ LIST_HEAD(flist);
+ enum idxd_complete_type ctype;
+
+ spin_lock(&ie->list_lock);
+ head = llist_del_all(&ie->pending_llist);
+ if (head) {
+ llist_for_each_entry_safe(desc, itr, head, llnode)
+ list_add_tail(&desc->list, &ie->work_list);
+ }
+
+ list_for_each_entry_safe(desc, itr, &ie->work_list, list)
+ list_move_tail(&desc->list, &flist);
+ spin_unlock(&ie->list_lock);
+
+ list_for_each_entry_safe(desc, itr, &flist, list) {
+ list_del(&desc->list);
+ ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
+ idxd_dma_complete_txd(desc, ctype, true);
+ }
+}
+
+static void idxd_device_set_perm_entry(struct idxd_device *idxd,
+ struct idxd_irq_entry *ie)
+{
+ union msix_perm mperm;
+
+ if (ie->pasid == INVALID_IOASID)
+ return;
+
+ mperm.bits = 0;
+ mperm.pasid = ie->pasid;
+ mperm.pasid_en = 1;
+ iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
+}
+
+static void idxd_device_clear_perm_entry(struct idxd_device *idxd,
+ struct idxd_irq_entry *ie)
+{
+ iowrite32(0, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
+}
+
+void idxd_wq_free_irq(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_irq_entry *ie = &wq->ie;
+
+ synchronize_irq(ie->vector);
+ free_irq(ie->vector, ie);
+ idxd_flush_pending_descs(ie);
+ if (idxd->request_int_handles)
+ idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
+ idxd_device_clear_perm_entry(idxd, ie);
+ ie->vector = -1;
+ ie->int_handle = INVALID_INT_HANDLE;
+ ie->pasid = INVALID_IOASID;
+}
+
+int idxd_wq_request_irq(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ struct idxd_irq_entry *ie;
+ int rc;
+
+ ie = &wq->ie;
+ ie->vector = pci_irq_vector(pdev, ie->id);
+ ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : INVALID_IOASID;
+ idxd_device_set_perm_entry(idxd, ie);
+
+ rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie);
+ if (rc < 0) {
+ dev_err(dev, "Failed to request irq %d.\n", ie->vector);
+ goto err_irq;
+ }
+
+ if (idxd->request_int_handles) {
+ rc = idxd_device_request_int_handle(idxd, ie->id, &ie->int_handle,
+ IDXD_IRQ_MSIX);
+ if (rc < 0)
+ goto err_int_handle;
+ } else {
+ ie->int_handle = ie->id;
+ }
+
+ return 0;
+
+err_int_handle:
+ ie->int_handle = INVALID_INT_HANDLE;
+ free_irq(ie->vector, ie);
+err_irq:
+ idxd_device_clear_perm_entry(idxd, ie);
+ ie->pasid = INVALID_IOASID;
+ return rc;
+}
+
int __drv_enable_wq(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index c39e9483206a..bfff59617d04 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -21,20 +21,27 @@ static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
}
void idxd_dma_complete_txd(struct idxd_desc *desc,
- enum idxd_complete_type comp_type)
+ enum idxd_complete_type comp_type,
+ bool free_desc)
{
+ struct idxd_device *idxd = desc->wq->idxd;
struct dma_async_tx_descriptor *tx;
struct dmaengine_result res;
int complete = 1;
- if (desc->completion->status == DSA_COMP_SUCCESS)
+ if (desc->completion->status == DSA_COMP_SUCCESS) {
res.result = DMA_TRANS_NOERROR;
- else if (desc->completion->status)
+ } else if (desc->completion->status) {
+ if (idxd->request_int_handles && comp_type != IDXD_COMPLETE_ABORT &&
+ desc->completion->status == DSA_COMP_INT_HANDLE_INVAL &&
+ idxd_queue_int_handle_resubmit(desc))
+ return;
res.result = DMA_TRANS_WRITE_FAILED;
- else if (comp_type == IDXD_COMPLETE_ABORT)
+ } else if (comp_type == IDXD_COMPLETE_ABORT) {
res.result = DMA_TRANS_ABORTED;
- else
+ } else {
complete = 0;
+ }
tx = &desc->txd;
if (complete && tx->cookie) {
@@ -44,6 +51,9 @@ void idxd_dma_complete_txd(struct idxd_desc *desc,
tx->callback = NULL;
tx->callback_result = NULL;
}
+
+ if (free_desc)
+ idxd_free_desc(desc->wq, desc);
}
static void op_flag_setup(unsigned long flags, u32 *desc_flags)
@@ -153,8 +163,10 @@ static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = dma_cookie_assign(tx);
rc = idxd_submit_desc(wq, desc);
- if (rc < 0)
+ if (rc < 0) {
+ idxd_free_desc(wq, desc);
return rc;
+ }
return cookie;
}
@@ -277,6 +289,14 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
mutex_lock(&wq->wq_lock);
wq->type = IDXD_WQT_KERNEL;
+
+ rc = idxd_wq_request_irq(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR;
+ dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc);
+ goto err_irq;
+ }
+
rc = __drv_enable_wq(wq);
if (rc < 0) {
dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc);
@@ -310,13 +330,15 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
return 0;
err_dma:
- idxd_wq_quiesce(wq);
+ __idxd_wq_quiesce(wq);
percpu_ref_exit(&wq->wq_active);
err_ref:
idxd_wq_free_resources(wq);
err_res_alloc:
__drv_disable_wq(wq);
err:
+ idxd_wq_free_irq(wq);
+err_irq:
wq->type = IDXD_WQT_NONE;
mutex_unlock(&wq->wq_lock);
return rc;
@@ -327,11 +349,13 @@ static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev)
struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
mutex_lock(&wq->wq_lock);
- idxd_wq_quiesce(wq);
+ __idxd_wq_quiesce(wq);
idxd_unregister_dma_channel(wq);
idxd_wq_free_resources(wq);
__drv_disable_wq(wq);
percpu_ref_exit(&wq->wq_active);
+ idxd_wq_free_irq(wq);
+ wq->type = IDXD_WQT_NONE;
mutex_unlock(&wq->wq_lock);
}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 0cf8d3145870..da72eb15f610 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -10,6 +10,7 @@
#include <linux/cdev.h>
#include <linux/idr.h>
#include <linux/pci.h>
+#include <linux/ioasid.h>
#include <linux/perf_event.h>
#include <uapi/linux/idxd.h>
#include "registers.h"
@@ -51,6 +52,9 @@ enum idxd_type {
#define IDXD_NAME_SIZE 128
#define IDXD_PMU_EVENT_MAX 64
+#define IDXD_ENQCMDS_RETRIES 32
+#define IDXD_ENQCMDS_MAX_RETRIES 64
+
struct idxd_device_driver {
const char *name;
enum idxd_dev_type *type;
@@ -64,8 +68,8 @@ extern struct idxd_device_driver idxd_drv;
extern struct idxd_device_driver idxd_dmaengine_drv;
extern struct idxd_device_driver idxd_user_drv;
+#define INVALID_INT_HANDLE -1
struct idxd_irq_entry {
- struct idxd_device *idxd;
int id;
int vector;
struct llist_head pending_llist;
@@ -75,6 +79,8 @@ struct idxd_irq_entry {
* and irq thread processing error descriptor.
*/
spinlock_t list_lock;
+ int int_handle;
+ ioasid_t pasid;
};
struct idxd_group {
@@ -84,9 +90,9 @@ struct idxd_group {
int id;
int num_engines;
int num_wqs;
- bool use_token_limit;
- u8 tokens_allowed;
- u8 tokens_reserved;
+ bool use_rdbuf_limit;
+ u8 rdbufs_allowed;
+ u8 rdbufs_reserved;
int tc_a;
int tc_b;
};
@@ -145,6 +151,10 @@ struct idxd_cdev {
#define WQ_NAME_SIZE 1024
#define WQ_TYPE_SIZE 10
+#define WQ_DEFAULT_QUEUE_DEPTH 16
+#define WQ_DEFAULT_MAX_XFER SZ_2M
+#define WQ_DEFAULT_MAX_BATCH 32
+
enum idxd_op_type {
IDXD_OP_BLOCK = 0,
IDXD_OP_NONBLOCK = 1,
@@ -164,13 +174,16 @@ struct idxd_dma_chan {
struct idxd_wq {
void __iomem *portal;
u32 portal_offset;
+ unsigned int enqcmds_retries;
struct percpu_ref wq_active;
struct completion wq_dead;
+ struct completion wq_resurrect;
struct idxd_dev idxd_dev;
struct idxd_cdev *idxd_cdev;
struct wait_queue_head err_queue;
struct idxd_device *idxd;
int id;
+ struct idxd_irq_entry ie;
enum idxd_wq_type type;
struct idxd_group *group;
int client_count;
@@ -251,6 +264,7 @@ struct idxd_device {
int id;
int major;
u32 cmd_status;
+ struct idxd_irq_entry ie; /* misc irq, msix 0 */
struct pci_dev *pdev;
void __iomem *reg_base;
@@ -266,6 +280,8 @@ struct idxd_device {
unsigned int pasid;
int num_groups;
+ int irq_cnt;
+ bool request_int_handles;
u32 msix_perm_offset;
u32 wqcfg_offset;
@@ -276,24 +292,20 @@ struct idxd_device {
u32 max_batch_size;
int max_groups;
int max_engines;
- int max_tokens;
+ int max_rdbufs;
int max_wqs;
int max_wq_size;
- int token_limit;
- int nr_tokens; /* non-reserved tokens */
+ int rdbuf_limit;
+ int nr_rdbufs; /* non-reserved read buffers */
unsigned int wqcfg_size;
union sw_err_reg sw_err;
wait_queue_head_t cmd_waitq;
- int num_wq_irqs;
- struct idxd_irq_entry *irq_entries;
struct idxd_dma_dev *idxd_dma;
struct workqueue_struct *wq;
struct work_struct work;
- int *int_handles;
-
struct idxd_pmu *idxd_pmu;
};
@@ -380,6 +392,21 @@ static inline void idxd_dev_set_type(struct idxd_dev *idev, int type)
idev->type = type;
}
+static inline struct idxd_irq_entry *idxd_get_ie(struct idxd_device *idxd, int idx)
+{
+ return (idx == 0) ? &idxd->ie : &idxd->wqs[idx - 1]->ie;
+}
+
+static inline struct idxd_wq *ie_to_wq(struct idxd_irq_entry *ie)
+{
+ return container_of(ie, struct idxd_wq, ie);
+}
+
+static inline struct idxd_device *ie_to_idxd(struct idxd_irq_entry *ie)
+{
+ return container_of(ie, struct idxd_device, ie);
+}
+
extern struct bus_type dsa_bus_type;
extern bool support_enqcmd;
@@ -518,17 +545,13 @@ void idxd_unregister_devices(struct idxd_device *idxd);
int idxd_register_driver(void);
void idxd_unregister_driver(void);
void idxd_wqs_quiesce(struct idxd_device *idxd);
+bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc);
/* device interrupt control */
-void idxd_msix_perm_setup(struct idxd_device *idxd);
-void idxd_msix_perm_clear(struct idxd_device *idxd);
irqreturn_t idxd_misc_thread(int vec, void *data);
irqreturn_t idxd_wq_thread(int irq, void *data);
void idxd_mask_error_interrupts(struct idxd_device *idxd);
void idxd_unmask_error_interrupts(struct idxd_device *idxd);
-void idxd_mask_msix_vectors(struct idxd_device *idxd);
-void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
-void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
/* device control */
int idxd_register_idxd_drv(void);
@@ -564,13 +587,17 @@ int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq);
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
int idxd_wq_disable_pasid(struct idxd_wq *wq);
+void __idxd_wq_quiesce(struct idxd_wq *wq);
void idxd_wq_quiesce(struct idxd_wq *wq);
int idxd_wq_init_percpu_ref(struct idxd_wq *wq);
+void idxd_wq_free_irq(struct idxd_wq *wq);
+int idxd_wq_request_irq(struct idxd_wq *wq);
/* submission */
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
+int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc);
/* dmaengine */
int idxd_register_dma_device(struct idxd_device *idxd);
@@ -579,7 +606,7 @@ int idxd_register_dma_channel(struct idxd_wq *wq);
void idxd_unregister_dma_channel(struct idxd_wq *wq);
void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
void idxd_dma_complete_txd(struct idxd_desc *desc,
- enum idxd_complete_type comp_type);
+ enum idxd_complete_type comp_type, bool free_desc);
/* cdev */
int idxd_cdev_register(void);
@@ -603,10 +630,4 @@ static inline void perfmon_init(void) {}
static inline void perfmon_exit(void) {}
#endif
-static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
-{
- idxd_dma_complete_txd(desc, reason);
- idxd_free_desc(desc->wq, desc);
-}
-
#endif
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 7bf03f371ce1..08a5f4310188 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -72,7 +72,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
struct device *dev = &pdev->dev;
- struct idxd_irq_entry *irq_entry;
+ struct idxd_irq_entry *ie;
int i, msixcnt;
int rc = 0;
@@ -81,6 +81,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
dev_err(dev, "Not MSI-X interrupt capable.\n");
return -ENOSPC;
}
+ idxd->irq_cnt = msixcnt;
rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
if (rc != msixcnt) {
@@ -89,87 +90,34 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
}
dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
- /*
- * We implement 1 completion list per MSI-X entry except for
- * entry 0, which is for errors and others.
- */
- idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry),
- GFP_KERNEL, dev_to_node(dev));
- if (!idxd->irq_entries) {
- rc = -ENOMEM;
- goto err_irq_entries;
- }
-
- for (i = 0; i < msixcnt; i++) {
- idxd->irq_entries[i].id = i;
- idxd->irq_entries[i].idxd = idxd;
- idxd->irq_entries[i].vector = pci_irq_vector(pdev, i);
- spin_lock_init(&idxd->irq_entries[i].list_lock);
- }
-
- idxd_msix_perm_setup(idxd);
- irq_entry = &idxd->irq_entries[0];
- rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread,
- 0, "idxd-misc", irq_entry);
+ ie = idxd_get_ie(idxd, 0);
+ ie->vector = pci_irq_vector(pdev, 0);
+ rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie);
if (rc < 0) {
dev_err(dev, "Failed to allocate misc interrupt.\n");
goto err_misc_irq;
}
+ dev_dbg(dev, "Requested idxd-misc handler on msix vector %d\n", ie->vector);
- dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector);
-
- /* first MSI-X entry is not for wq interrupts */
- idxd->num_wq_irqs = msixcnt - 1;
+ for (i = 0; i < idxd->max_wqs; i++) {
+ int msix_idx = i + 1;
- for (i = 1; i < msixcnt; i++) {
- irq_entry = &idxd->irq_entries[i];
+ ie = idxd_get_ie(idxd, msix_idx);
+ ie->id = msix_idx;
+ ie->int_handle = INVALID_INT_HANDLE;
+ ie->pasid = INVALID_IOASID;
- init_llist_head(&idxd->irq_entries[i].pending_llist);
- INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
- rc = request_threaded_irq(irq_entry->vector, NULL,
- idxd_wq_thread, 0, "idxd-portal", irq_entry);
- if (rc < 0) {
- dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector);
- goto err_wq_irqs;
- }
-
- dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector);
- if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) {
- /*
- * The MSIX vector enumeration starts at 1 with vector 0 being the
- * misc interrupt that handles non I/O completion events. The
- * interrupt handles are for IMS enumeration on guest. The misc
- * interrupt vector does not require a handle and therefore we start
- * the int_handles at index 0. Since 'i' starts at 1, the first
- * int_handles index will be 0.
- */
- rc = idxd_device_request_int_handle(idxd, i, &idxd->int_handles[i - 1],
- IDXD_IRQ_MSIX);
- if (rc < 0) {
- free_irq(irq_entry->vector, irq_entry);
- goto err_wq_irqs;
- }
- dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i - 1]);
- }
+ spin_lock_init(&ie->list_lock);
+ init_llist_head(&ie->pending_llist);
+ INIT_LIST_HEAD(&ie->work_list);
}
idxd_unmask_error_interrupts(idxd);
return 0;
- err_wq_irqs:
- while (--i >= 0) {
- irq_entry = &idxd->irq_entries[i];
- free_irq(irq_entry->vector, irq_entry);
- if (i != 0)
- idxd_device_release_int_handle(idxd,
- idxd->int_handles[i], IDXD_IRQ_MSIX);
- }
err_misc_irq:
- /* Disable error interrupt generation */
idxd_mask_error_interrupts(idxd);
- idxd_msix_perm_clear(idxd);
- err_irq_entries:
pci_free_irq_vectors(pdev);
dev_err(dev, "No usable interrupts\n");
return rc;
@@ -178,26 +126,16 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
static void idxd_cleanup_interrupts(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
- struct idxd_irq_entry *irq_entry;
- int i, msixcnt;
+ struct idxd_irq_entry *ie;
+ int msixcnt;
msixcnt = pci_msix_vec_count(pdev);
if (msixcnt <= 0)
return;
- irq_entry = &idxd->irq_entries[0];
- free_irq(irq_entry->vector, irq_entry);
-
- for (i = 1; i < msixcnt; i++) {
-
- irq_entry = &idxd->irq_entries[i];
- if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE))
- idxd_device_release_int_handle(idxd, idxd->int_handles[i],
- IDXD_IRQ_MSIX);
- free_irq(irq_entry->vector, irq_entry);
- }
-
+ ie = idxd_get_ie(idxd, 0);
idxd_mask_error_interrupts(idxd);
+ free_irq(ie->vector, ie);
pci_free_irq_vectors(pdev);
}
@@ -237,8 +175,10 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
mutex_init(&wq->wq_lock);
init_waitqueue_head(&wq->err_queue);
init_completion(&wq->wq_dead);
- wq->max_xfer_bytes = idxd->max_xfer_bytes;
- wq->max_batch_size = idxd->max_batch_size;
+ init_completion(&wq->wq_resurrect);
+ wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
+ wq->max_batch_size = WQ_DEFAULT_MAX_BATCH;
+ wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
if (!wq->wqcfg) {
put_device(conf_dev);
@@ -379,13 +319,6 @@ static int idxd_setup_internals(struct idxd_device *idxd)
init_waitqueue_head(&idxd->cmd_waitq);
- if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) {
- idxd->int_handles = kcalloc_node(idxd->max_wqs, sizeof(int), GFP_KERNEL,
- dev_to_node(dev));
- if (!idxd->int_handles)
- return -ENOMEM;
- }
-
rc = idxd_setup_wqs(idxd);
if (rc < 0)
goto err_wqs;
@@ -416,7 +349,6 @@ static int idxd_setup_internals(struct idxd_device *idxd)
for (i = 0; i < idxd->max_wqs; i++)
put_device(wq_confdev(idxd->wqs[i]));
err_wqs:
- kfree(idxd->int_handles);
return rc;
}
@@ -451,6 +383,10 @@ static void idxd_read_caps(struct idxd_device *idxd)
dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap);
}
+ /* reading command capabilities */
+ if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE))
+ idxd->request_int_handles = true;
+
idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
@@ -464,9 +400,9 @@ static void idxd_read_caps(struct idxd_device *idxd)
dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
idxd->max_groups = idxd->hw.group_cap.num_groups;
dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
- idxd->max_tokens = idxd->hw.group_cap.total_tokens;
- dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
- idxd->nr_tokens = idxd->max_tokens;
+ idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs;
+ dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs);
+ idxd->nr_rdbufs = idxd->max_rdbufs;
/* read engine capabilities */
idxd->hw.engine_cap.bits =
@@ -611,8 +547,6 @@ static int idxd_probe(struct idxd_device *idxd)
if (rc)
goto err_config;
- dev_dbg(dev, "IDXD interrupt setup complete.\n");
-
idxd->major = idxd_cdev_get_major(idxd);
rc = perfmon_pmu_init(idxd);
@@ -708,32 +642,6 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return rc;
}
-static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
-{
- struct idxd_desc *desc, *itr;
- struct llist_node *head;
-
- head = llist_del_all(&ie->pending_llist);
- if (!head)
- return;
-
- llist_for_each_entry_safe(desc, itr, head, llnode) {
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
- idxd_free_desc(desc->wq, desc);
- }
-}
-
-static void idxd_flush_work_list(struct idxd_irq_entry *ie)
-{
- struct idxd_desc *desc, *iter;
-
- list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
- list_del(&desc->list);
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
- idxd_free_desc(desc->wq, desc);
- }
-}
-
void idxd_wqs_quiesce(struct idxd_device *idxd)
{
struct idxd_wq *wq;
@@ -746,47 +654,19 @@ void idxd_wqs_quiesce(struct idxd_device *idxd)
}
}
-static void idxd_release_int_handles(struct idxd_device *idxd)
-{
- struct device *dev = &idxd->pdev->dev;
- int i, rc;
-
- for (i = 0; i < idxd->num_wq_irqs; i++) {
- if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)) {
- rc = idxd_device_release_int_handle(idxd, idxd->int_handles[i],
- IDXD_IRQ_MSIX);
- if (rc < 0)
- dev_warn(dev, "irq handle %d release failed\n",
- idxd->int_handles[i]);
- else
- dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i]);
- }
- }
-}
-
static void idxd_shutdown(struct pci_dev *pdev)
{
struct idxd_device *idxd = pci_get_drvdata(pdev);
- int rc, i;
struct idxd_irq_entry *irq_entry;
- int msixcnt = pci_msix_vec_count(pdev);
+ int rc;
rc = idxd_device_disable(idxd);
if (rc)
dev_err(&pdev->dev, "Disabling device failed\n");
- dev_dbg(&pdev->dev, "%s called\n", __func__);
- idxd_mask_msix_vectors(idxd);
+ irq_entry = &idxd->ie;
+ synchronize_irq(irq_entry->vector);
idxd_mask_error_interrupts(idxd);
-
- for (i = 0; i < msixcnt; i++) {
- irq_entry = &idxd->irq_entries[i];
- synchronize_irq(irq_entry->vector);
- if (i == 0)
- continue;
- idxd_flush_pending_llist(irq_entry);
- idxd_flush_work_list(irq_entry);
- }
flush_workqueue(idxd->wq);
}
@@ -794,8 +674,6 @@ static void idxd_remove(struct pci_dev *pdev)
{
struct idxd_device *idxd = pci_get_drvdata(pdev);
struct idxd_irq_entry *irq_entry;
- int msixcnt = pci_msix_vec_count(pdev);
- int i;
idxd_unregister_devices(idxd);
/*
@@ -811,12 +689,8 @@ static void idxd_remove(struct pci_dev *pdev)
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
- for (i = 0; i < msixcnt; i++) {
- irq_entry = &idxd->irq_entries[i];
- free_irq(irq_entry->vector, irq_entry);
- }
- idxd_msix_perm_clear(idxd);
- idxd_release_int_handles(idxd);
+ irq_entry = idxd_get_ie(idxd, 0);
+ free_irq(irq_entry->vector, irq_entry);
pci_free_irq_vectors(pdev);
pci_iounmap(pdev, idxd->reg_base);
iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index cf2c8bc4f147..743ead5ebc57 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/dmaengine.h>
+#include <linux/delay.h>
#include <uapi/linux/idxd.h>
#include "../dmaengine.h"
#include "idxd.h"
@@ -22,6 +23,16 @@ struct idxd_fault {
struct idxd_device *idxd;
};
+struct idxd_resubmit {
+ struct work_struct work;
+ struct idxd_desc *desc;
+};
+
+struct idxd_int_handle_revoke {
+ struct work_struct work;
+ struct idxd_device *idxd;
+};
+
static void idxd_device_reinit(struct work_struct *work)
{
struct idxd_device *idxd = container_of(work, struct idxd_device, work);
@@ -55,6 +66,162 @@ static void idxd_device_reinit(struct work_struct *work)
idxd_device_clear_state(idxd);
}
+/*
+ * The function sends a drain descriptor for the interrupt handle. The drain ensures
+ * all descriptors with this interrupt handle is flushed and the interrupt
+ * will allow the cleanup of the outstanding descriptors.
+ */
+static void idxd_int_handle_revoke_drain(struct idxd_irq_entry *ie)
+{
+ struct idxd_wq *wq = ie_to_wq(ie);
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ struct dsa_hw_desc desc = {};
+ void __iomem *portal;
+ int rc;
+
+ /* Issue a simple drain operation with interrupt but no completion record */
+ desc.flags = IDXD_OP_FLAG_RCI;
+ desc.opcode = DSA_OPCODE_DRAIN;
+ desc.priv = 1;
+
+ if (ie->pasid != INVALID_IOASID)
+ desc.pasid = ie->pasid;
+ desc.int_handle = ie->int_handle;
+ portal = idxd_wq_portal_addr(wq);
+
+ /*
+ * The wmb() makes sure that the descriptor is all there before we
+ * issue.
+ */
+ wmb();
+ if (wq_dedicated(wq)) {
+ iosubmit_cmds512(portal, &desc, 1);
+ } else {
+ rc = idxd_enqcmds(wq, portal, &desc);
+ /* This should not fail unless hardware failed. */
+ if (rc < 0)
+ dev_warn(dev, "Failed to submit drain desc on wq %d\n", wq->id);
+ }
+}
+
+static void idxd_abort_invalid_int_handle_descs(struct idxd_irq_entry *ie)
+{
+ LIST_HEAD(flist);
+ struct idxd_desc *d, *t;
+ struct llist_node *head;
+
+ spin_lock(&ie->list_lock);
+ head = llist_del_all(&ie->pending_llist);
+ if (head) {
+ llist_for_each_entry_safe(d, t, head, llnode)
+ list_add_tail(&d->list, &ie->work_list);
+ }
+
+ list_for_each_entry_safe(d, t, &ie->work_list, list) {
+ if (d->completion->status == DSA_COMP_INT_HANDLE_INVAL)
+ list_move_tail(&d->list, &flist);
+ }
+ spin_unlock(&ie->list_lock);
+
+ list_for_each_entry_safe(d, t, &flist, list) {
+ list_del(&d->list);
+ idxd_dma_complete_txd(d, IDXD_COMPLETE_ABORT, true);
+ }
+}
+
+static void idxd_int_handle_revoke(struct work_struct *work)
+{
+ struct idxd_int_handle_revoke *revoke =
+ container_of(work, struct idxd_int_handle_revoke, work);
+ struct idxd_device *idxd = revoke->idxd;
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ int i, new_handle, rc;
+
+ if (!idxd->request_int_handles) {
+ kfree(revoke);
+ dev_warn(dev, "Unexpected int handle refresh interrupt.\n");
+ return;
+ }
+
+ /*
+ * The loop attempts to acquire new interrupt handle for all interrupt
+ * vectors that supports a handle. If a new interrupt handle is acquired and the
+ * wq is kernel type, the driver will kill the percpu_ref to pause all
+ * ongoing descriptor submissions. The interrupt handle is then changed.
+ * After change, the percpu_ref is revived and all the pending submissions
+ * are woken to try again. A drain is sent to for the interrupt handle
+ * at the end to make sure all invalid int handle descriptors are processed.
+ */
+ for (i = 1; i < idxd->irq_cnt; i++) {
+ struct idxd_irq_entry *ie = idxd_get_ie(idxd, i);
+ struct idxd_wq *wq = ie_to_wq(ie);
+
+ if (ie->int_handle == INVALID_INT_HANDLE)
+ continue;
+
+ rc = idxd_device_request_int_handle(idxd, i, &new_handle, IDXD_IRQ_MSIX);
+ if (rc < 0) {
+ dev_warn(dev, "get int handle %d failed: %d\n", i, rc);
+ /*
+ * Failed to acquire new interrupt handle. Kill the WQ
+ * and release all the pending submitters. The submitters will
+ * get error return code and handle appropriately.
+ */
+ ie->int_handle = INVALID_INT_HANDLE;
+ idxd_wq_quiesce(wq);
+ idxd_abort_invalid_int_handle_descs(ie);
+ continue;
+ }
+
+ /* No change in interrupt handle, nothing needs to be done */
+ if (ie->int_handle == new_handle)
+ continue;
+
+ if (wq->state != IDXD_WQ_ENABLED || wq->type != IDXD_WQT_KERNEL) {
+ /*
+ * All the MSIX interrupts are allocated at once during probe.
+ * Therefore we need to update all interrupts even if the WQ
+ * isn't supporting interrupt operations.
+ */
+ ie->int_handle = new_handle;
+ continue;
+ }
+
+ mutex_lock(&wq->wq_lock);
+ reinit_completion(&wq->wq_resurrect);
+
+ /* Kill percpu_ref to pause additional descriptor submissions */
+ percpu_ref_kill(&wq->wq_active);
+
+ /* Wait for all submitters quiesce before we change interrupt handle */
+ wait_for_completion(&wq->wq_dead);
+
+ ie->int_handle = new_handle;
+
+ /* Revive percpu ref and wake up all the waiting submitters */
+ percpu_ref_reinit(&wq->wq_active);
+ complete_all(&wq->wq_resurrect);
+ mutex_unlock(&wq->wq_lock);
+
+ /*
+ * The delay here is to wait for all possible MOVDIR64B that
+ * are issued before percpu_ref_kill() has happened to have
+ * reached the PCIe domain before the drain is issued. The driver
+ * needs to ensure that the drain descriptor issued does not pass
+ * all the other issued descriptors that contain the invalid
+ * interrupt handle in order to ensure that the drain descriptor
+ * interrupt will allow the cleanup of all the descriptors with
+ * invalid interrupt handle.
+ */
+ if (wq_dedicated(wq))
+ udelay(100);
+ idxd_int_handle_revoke_drain(ie);
+ }
+ kfree(revoke);
+}
+
static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
{
struct device *dev = &idxd->pdev->dev;
@@ -101,6 +268,23 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
err = true;
}
+ if (cause & IDXD_INTC_INT_HANDLE_REVOKED) {
+ struct idxd_int_handle_revoke *revoke;
+
+ val |= IDXD_INTC_INT_HANDLE_REVOKED;
+
+ revoke = kzalloc(sizeof(*revoke), GFP_ATOMIC);
+ if (revoke) {
+ revoke->idxd = idxd;
+ INIT_WORK(&revoke->work, idxd_int_handle_revoke);
+ queue_work(idxd->wq, &revoke->work);
+
+ } else {
+ dev_err(dev, "Failed to allocate work for int handle revoke\n");
+ idxd_wqs_quiesce(idxd);
+ }
+ }
+
if (cause & IDXD_INTC_CMD) {
val |= IDXD_INTC_CMD;
complete(idxd->cmd_done);
@@ -157,7 +341,7 @@ halt:
irqreturn_t idxd_misc_thread(int vec, void *data)
{
struct idxd_irq_entry *irq_entry = data;
- struct idxd_device *idxd = irq_entry->idxd;
+ struct idxd_device *idxd = ie_to_idxd(irq_entry);
int rc;
u32 cause;
@@ -177,6 +361,51 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
return IRQ_HANDLED;
}
+static void idxd_int_handle_resubmit_work(struct work_struct *work)
+{
+ struct idxd_resubmit *irw = container_of(work, struct idxd_resubmit, work);
+ struct idxd_desc *desc = irw->desc;
+ struct idxd_wq *wq = desc->wq;
+ int rc;
+
+ desc->completion->status = 0;
+ rc = idxd_submit_desc(wq, desc);
+ if (rc < 0) {
+ dev_dbg(&wq->idxd->pdev->dev, "Failed to resubmit desc %d to wq %d.\n",
+ desc->id, wq->id);
+ /*
+ * If the error is not -EAGAIN, it means the submission failed due to wq
+ * has been killed instead of ENQCMDS failure. Here the driver needs to
+ * notify the submitter of the failure by reporting abort status.
+ *
+ * -EAGAIN comes from ENQCMDS failure. idxd_submit_desc() will handle the
+ * abort.
+ */
+ if (rc != -EAGAIN) {
+ desc->completion->status = IDXD_COMP_DESC_ABORT;
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, false);
+ }
+ idxd_free_desc(wq, desc);
+ }
+ kfree(irw);
+}
+
+bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc)
+{
+ struct idxd_wq *wq = desc->wq;
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_resubmit *irw;
+
+ irw = kzalloc(sizeof(*irw), GFP_KERNEL);
+ if (!irw)
+ return false;
+
+ irw->desc = desc;
+ INIT_WORK(&irw->work, idxd_int_handle_resubmit_work);
+ queue_work(idxd->wq, &irw->work);
+ return true;
+}
+
static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry)
{
struct idxd_desc *desc, *t;
@@ -195,11 +424,11 @@ static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry)
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
*/
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
- complete_desc(desc, IDXD_COMPLETE_ABORT);
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
continue;
}
- complete_desc(desc, IDXD_COMPLETE_NORMAL);
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true);
} else {
spin_lock(&irq_entry->list_lock);
list_add_tail(&desc->list,
@@ -238,11 +467,11 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
*/
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
- complete_desc(desc, IDXD_COMPLETE_ABORT);
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
continue;
}
- complete_desc(desc, IDXD_COMPLETE_NORMAL);
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true);
}
}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index 262c8220adbd..aa642aecdc0b 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -64,9 +64,9 @@ union wq_cap_reg {
union group_cap_reg {
struct {
u64 num_groups:8;
- u64 total_tokens:8;
- u64 token_en:1;
- u64 token_limit:1;
+ u64 total_rdbufs:8; /* formerly total_tokens */
+ u64 rdbuf_ctrl:1; /* formerly token_en */
+ u64 rdbuf_limit:1; /* formerly token_limit */
u64 rsvd:46;
};
u64 bits;
@@ -110,7 +110,7 @@ union offsets_reg {
#define IDXD_GENCFG_OFFSET 0x80
union gencfg_reg {
struct {
- u32 token_limit:8;
+ u32 rdbuf_limit:8;
u32 rsvd:4;
u32 user_int_en:1;
u32 rsvd2:19;
@@ -158,6 +158,7 @@ enum idxd_device_reset_type {
#define IDXD_INTC_OCCUPY 0x04
#define IDXD_INTC_PERFMON_OVFL 0x08
#define IDXD_INTC_HALT_STATE 0x10
+#define IDXD_INTC_INT_HANDLE_REVOKED 0x80000000
#define IDXD_CMD_OFFSET 0xa0
union idxd_command_reg {
@@ -287,10 +288,10 @@ union group_flags {
u32 tc_a:3;
u32 tc_b:3;
u32 rsvd:1;
- u32 use_token_limit:1;
- u32 tokens_reserved:8;
+ u32 use_rdbuf_limit:1;
+ u32 rdbufs_reserved:8;
u32 rsvd2:4;
- u32 tokens_allowed:8;
+ u32 rdbufs_allowed:8;
u32 rsvd3:4;
};
u32 bits;
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index 83452fbbb168..e289fd48711a 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -21,15 +21,6 @@ static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
if (device_pasid_enabled(idxd))
desc->hw->pasid = idxd->pasid;
- /*
- * On host, MSIX vecotr 0 is used for misc interrupt. Therefore when we match
- * vector 1:1 to the WQ id, we need to add 1
- */
- if (!idxd->int_handles)
- desc->hw->int_handle = wq->id + 1;
- else
- desc->hw->int_handle = idxd->int_handles[wq->id];
-
return desc;
}
@@ -134,35 +125,58 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
spin_unlock(&ie->list_lock);
if (found)
- complete_desc(found, IDXD_COMPLETE_ABORT);
+ idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, false);
/*
- * complete_desc() will return desc to allocator and the desc can be
- * acquired by a different process and the desc->list can be modified.
- * Delete desc from list so the list trasversing does not get corrupted
- * by the other process.
+ * completing the descriptor will return desc to allocator and
+ * the desc can be acquired by a different process and the
+ * desc->list can be modified. Delete desc from list so the
+ * list trasversing does not get corrupted by the other process.
*/
list_for_each_entry_safe(d, t, &flist, list) {
list_del_init(&d->list);
- complete_desc(d, IDXD_COMPLETE_NORMAL);
+ idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true);
}
}
+/*
+ * ENQCMDS typically fail when the WQ is inactive or busy. On host submission, the driver
+ * has better control of number of descriptors being submitted to a shared wq by limiting
+ * the number of driver allocated descriptors to the wq size. However, when the swq is
+ * exported to a guest kernel, it may be shared with multiple guest kernels. This means
+ * the likelihood of getting busy returned on the swq when submitting goes significantly up.
+ * Having a tunable retry mechanism allows the driver to keep trying for a bit before giving
+ * up. The sysfs knob can be tuned by the system administrator.
+ */
+int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc)
+{
+ int rc, retries = 0;
+
+ do {
+ rc = enqcmds(portal, desc);
+ if (rc == 0)
+ break;
+ cpu_relax();
+ } while (retries++ < wq->enqcmds_retries);
+
+ return rc;
+}
+
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
{
struct idxd_device *idxd = wq->idxd;
struct idxd_irq_entry *ie = NULL;
+ u32 desc_flags = desc->hw->flags;
void __iomem *portal;
int rc;
- if (idxd->state != IDXD_DEV_ENABLED) {
- idxd_free_desc(wq, desc);
+ if (idxd->state != IDXD_DEV_ENABLED)
return -EIO;
- }
if (!percpu_ref_tryget_live(&wq->wq_active)) {
- idxd_free_desc(wq, desc);
- return -ENXIO;
+ wait_for_completion(&wq->wq_resurrect);
+ if (!percpu_ref_tryget_live(&wq->wq_active))
+ return -ENXIO;
}
portal = idxd_wq_portal_addr(wq);
@@ -178,28 +192,21 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
* Pending the descriptor to the lockless list for the irq_entry
* that we designated the descriptor to.
*/
- if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
- ie = &idxd->irq_entries[wq->id + 1];
+ if (desc_flags & IDXD_OP_FLAG_RCI) {
+ ie = &wq->ie;
+ desc->hw->int_handle = ie->int_handle;
llist_add(&desc->llnode, &ie->pending_llist);
}
if (wq_dedicated(wq)) {
iosubmit_cmds512(portal, desc->hw, 1);
} else {
- /*
- * It's not likely that we would receive queue full rejection
- * since the descriptor allocation gates at wq size. If we
- * receive a -EAGAIN, that means something went wrong such as the
- * device is not accepting descriptor at all.
- */
- rc = enqcmds(portal, desc->hw);
+ rc = idxd_enqcmds(wq, portal, desc->hw);
if (rc < 0) {
percpu_ref_put(&wq->wq_active);
/* abort operation frees the descriptor */
if (ie)
llist_abort_desc(wq, ie, desc);
- else
- idxd_free_desc(wq, desc);
return rc;
}
}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index a9025be940db..7e19ab92b61a 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -99,31 +99,39 @@ struct device_type idxd_engine_device_type = {
/* Group attributes */
-static void idxd_set_free_tokens(struct idxd_device *idxd)
+static void idxd_set_free_rdbufs(struct idxd_device *idxd)
{
- int i, tokens;
+ int i, rdbufs;
- for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
+ for (i = 0, rdbufs = 0; i < idxd->max_groups; i++) {
struct idxd_group *g = idxd->groups[i];
- tokens += g->tokens_reserved;
+ rdbufs += g->rdbufs_reserved;
}
- idxd->nr_tokens = idxd->max_tokens - tokens;
+ idxd->nr_rdbufs = idxd->max_rdbufs - rdbufs;
+}
+
+static ssize_t group_read_buffers_reserved_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group = confdev_to_group(dev);
+
+ return sysfs_emit(buf, "%u\n", group->rdbufs_reserved);
}
static ssize_t group_tokens_reserved_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_group *group = confdev_to_group(dev);
-
- return sysfs_emit(buf, "%u\n", group->tokens_reserved);
+ dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n");
+ return group_read_buffers_reserved_show(dev, attr, buf);
}
-static ssize_t group_tokens_reserved_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t group_read_buffers_reserved_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct idxd_group *group = confdev_to_group(dev);
struct idxd_device *idxd = group->idxd;
@@ -143,33 +151,53 @@ static ssize_t group_tokens_reserved_store(struct device *dev,
if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
- if (val > idxd->max_tokens)
+ if (val > idxd->max_rdbufs)
return -EINVAL;
- if (val > idxd->nr_tokens + group->tokens_reserved)
+ if (val > idxd->nr_rdbufs + group->rdbufs_reserved)
return -EINVAL;
- group->tokens_reserved = val;
- idxd_set_free_tokens(idxd);
+ group->rdbufs_reserved = val;
+ idxd_set_free_rdbufs(idxd);
return count;
}
+static ssize_t group_tokens_reserved_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n");
+ return group_read_buffers_reserved_store(dev, attr, buf, count);
+}
+
static struct device_attribute dev_attr_group_tokens_reserved =
__ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
group_tokens_reserved_store);
+static struct device_attribute dev_attr_group_read_buffers_reserved =
+ __ATTR(read_buffers_reserved, 0644, group_read_buffers_reserved_show,
+ group_read_buffers_reserved_store);
+
+static ssize_t group_read_buffers_allowed_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group = confdev_to_group(dev);
+
+ return sysfs_emit(buf, "%u\n", group->rdbufs_allowed);
+}
+
static ssize_t group_tokens_allowed_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_group *group = confdev_to_group(dev);
-
- return sysfs_emit(buf, "%u\n", group->tokens_allowed);
+ dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n");
+ return group_read_buffers_allowed_show(dev, attr, buf);
}
-static ssize_t group_tokens_allowed_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t group_read_buffers_allowed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct idxd_group *group = confdev_to_group(dev);
struct idxd_device *idxd = group->idxd;
@@ -190,29 +218,49 @@ static ssize_t group_tokens_allowed_store(struct device *dev,
return -EPERM;
if (val < 4 * group->num_engines ||
- val > group->tokens_reserved + idxd->nr_tokens)
+ val > group->rdbufs_reserved + idxd->nr_rdbufs)
return -EINVAL;
- group->tokens_allowed = val;
+ group->rdbufs_allowed = val;
return count;
}
+static ssize_t group_tokens_allowed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n");
+ return group_read_buffers_allowed_store(dev, attr, buf, count);
+}
+
static struct device_attribute dev_attr_group_tokens_allowed =
__ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
group_tokens_allowed_store);
+static struct device_attribute dev_attr_group_read_buffers_allowed =
+ __ATTR(read_buffers_allowed, 0644, group_read_buffers_allowed_show,
+ group_read_buffers_allowed_store);
+
+static ssize_t group_use_read_buffer_limit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group = confdev_to_group(dev);
+
+ return sysfs_emit(buf, "%u\n", group->use_rdbuf_limit);
+}
+
static ssize_t group_use_token_limit_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_group *group = confdev_to_group(dev);
-
- return sysfs_emit(buf, "%u\n", group->use_token_limit);
+ dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n");
+ return group_use_read_buffer_limit_show(dev, attr, buf);
}
-static ssize_t group_use_token_limit_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t group_use_read_buffer_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct idxd_group *group = confdev_to_group(dev);
struct idxd_device *idxd = group->idxd;
@@ -232,17 +280,29 @@ static ssize_t group_use_token_limit_store(struct device *dev,
if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
- if (idxd->token_limit == 0)
+ if (idxd->rdbuf_limit == 0)
return -EPERM;
- group->use_token_limit = !!val;
+ group->use_rdbuf_limit = !!val;
return count;
}
+static ssize_t group_use_token_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n");
+ return group_use_read_buffer_limit_store(dev, attr, buf, count);
+}
+
static struct device_attribute dev_attr_group_use_token_limit =
__ATTR(use_token_limit, 0644, group_use_token_limit_show,
group_use_token_limit_store);
+static struct device_attribute dev_attr_group_use_read_buffer_limit =
+ __ATTR(use_read_buffer_limit, 0644, group_use_read_buffer_limit_show,
+ group_use_read_buffer_limit_store);
+
static ssize_t group_engines_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -387,8 +447,11 @@ static struct attribute *idxd_group_attributes[] = {
&dev_attr_group_work_queues.attr,
&dev_attr_group_engines.attr,
&dev_attr_group_use_token_limit.attr,
+ &dev_attr_group_use_read_buffer_limit.attr,
&dev_attr_group_tokens_allowed.attr,
+ &dev_attr_group_read_buffers_allowed.attr,
&dev_attr_group_tokens_reserved.attr,
+ &dev_attr_group_read_buffers_reserved.attr,
&dev_attr_group_traffic_class_a.attr,
&dev_attr_group_traffic_class_b.attr,
NULL,
@@ -945,6 +1008,41 @@ static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *at
static struct device_attribute dev_attr_wq_occupancy =
__ATTR(occupancy, 0444, wq_occupancy_show, NULL);
+static ssize_t wq_enqcmds_retries_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = confdev_to_wq(dev);
+
+ if (wq_dedicated(wq))
+ return -EOPNOTSUPP;
+
+ return sysfs_emit(buf, "%u\n", wq->enqcmds_retries);
+}
+
+static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = confdev_to_wq(dev);
+ int rc;
+ unsigned int retries;
+
+ if (wq_dedicated(wq))
+ return -EOPNOTSUPP;
+
+ rc = kstrtouint(buf, 10, &retries);
+ if (rc < 0)
+ return rc;
+
+ if (retries > IDXD_ENQCMDS_MAX_RETRIES)
+ retries = IDXD_ENQCMDS_MAX_RETRIES;
+
+ wq->enqcmds_retries = retries;
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_enqcmds_retries =
+ __ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store);
+
static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_clients.attr,
&dev_attr_wq_state.attr,
@@ -961,6 +1059,7 @@ static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_max_batch_size.attr,
&dev_attr_wq_ats_disable.attr,
&dev_attr_wq_occupancy.attr,
+ &dev_attr_wq_enqcmds_retries.attr,
NULL,
};
@@ -1156,26 +1255,42 @@ static ssize_t errors_show(struct device *dev,
}
static DEVICE_ATTR_RO(errors);
+static ssize_t max_read_buffers_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd = confdev_to_idxd(dev);
+
+ return sysfs_emit(buf, "%u\n", idxd->max_rdbufs);
+}
+
static ssize_t max_tokens_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ dev_warn_once(dev, "attribute deprecated, see max_read_buffers.\n");
+ return max_read_buffers_show(dev, attr, buf);
+}
+
+static DEVICE_ATTR_RO(max_tokens); /* deprecated */
+static DEVICE_ATTR_RO(max_read_buffers);
+
+static ssize_t read_buffer_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
struct idxd_device *idxd = confdev_to_idxd(dev);
- return sysfs_emit(buf, "%u\n", idxd->max_tokens);
+ return sysfs_emit(buf, "%u\n", idxd->rdbuf_limit);
}
-static DEVICE_ATTR_RO(max_tokens);
static ssize_t token_limit_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd = confdev_to_idxd(dev);
-
- return sysfs_emit(buf, "%u\n", idxd->token_limit);
+ dev_warn_once(dev, "attribute deprecated, see read_buffer_limit.\n");
+ return read_buffer_limit_show(dev, attr, buf);
}
-static ssize_t token_limit_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t read_buffer_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct idxd_device *idxd = confdev_to_idxd(dev);
unsigned long val;
@@ -1191,16 +1306,26 @@ static ssize_t token_limit_store(struct device *dev,
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
- if (!idxd->hw.group_cap.token_limit)
+ if (!idxd->hw.group_cap.rdbuf_limit)
return -EPERM;
- if (val > idxd->hw.group_cap.total_tokens)
+ if (val > idxd->hw.group_cap.total_rdbufs)
return -EINVAL;
- idxd->token_limit = val;
+ idxd->rdbuf_limit = val;
return count;
}
-static DEVICE_ATTR_RW(token_limit);
+
+static ssize_t token_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ dev_warn_once(dev, "attribute deprecated, see read_buffer_limit\n");
+ return read_buffer_limit_store(dev, attr, buf, count);
+}
+
+static DEVICE_ATTR_RW(token_limit); /* deprecated */
+static DEVICE_ATTR_RW(read_buffer_limit);
static ssize_t cdev_major_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -1246,7 +1371,9 @@ static struct attribute *idxd_device_attributes[] = {
&dev_attr_state.attr,
&dev_attr_errors.attr,
&dev_attr_max_tokens.attr,
+ &dev_attr_max_read_buffers.attr,
&dev_attr_token_limit.attr,
+ &dev_attr_read_buffer_limit.attr,
&dev_attr_cdev_major.attr,
&dev_attr_cmd_status.attr,
NULL,
@@ -1268,8 +1395,6 @@ static void idxd_conf_device_release(struct device *dev)
kfree(idxd->groups);
kfree(idxd->wqs);
kfree(idxd->engines);
- kfree(idxd->irq_entries);
- kfree(idxd->int_handles);
ida_free(&idxd_ida, idxd->id);
kfree(idxd);
}
diff --git a/drivers/dma/ioat/sysfs.c b/drivers/dma/ioat/sysfs.c
index aa44bcd6a356..168adf28c5b1 100644
--- a/drivers/dma/ioat/sysfs.c
+++ b/drivers/dma/ioat/sysfs.c
@@ -158,8 +158,9 @@ static struct attribute *ioat_attrs[] = {
&intr_coalesce_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(ioat);
struct kobj_type ioat_ktype = {
.sysfs_ops = &ioat_sysfs_ops,
- .default_attrs = ioat_attrs,
+ .default_groups = ioat_groups,
};
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index a23563cd118b..5a53d7fcef01 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -727,12 +727,6 @@ static int mmp_pdma_config_write(struct dma_chan *dchan,
chan->dir = direction;
chan->dev_addr = addr;
- /* FIXME: drivers should be ported over to use the filter
- * function. Once that's done, the following two lines can
- * be removed.
- */
- if (cfg->slave_id)
- chan->drcmr = cfg->slave_id;
return 0;
}
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 9b0d463f89bb..9c8b4084ba2f 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -149,7 +149,7 @@ struct mv_xor_v2_descriptor {
* @desc_size: HW descriptor size
* @npendings: number of pending descriptors (for which tx_submit has
* @hw_queue_idx: HW queue index
- * @msi_desc: local interrupt descriptor information
+ * @irq: The Linux interrupt number
* been called, but not yet issue_pending)
*/
struct mv_xor_v2_device {
@@ -168,7 +168,7 @@ struct mv_xor_v2_device {
int desc_size;
unsigned int npendings;
unsigned int hw_queue_idx;
- struct msi_desc *msi_desc;
+ unsigned int irq;
};
/**
@@ -718,7 +718,6 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
int i, ret = 0;
struct dma_device *dma_dev;
struct mv_xor_v2_sw_desc *sw_desc;
- struct msi_desc *msi_desc;
BUILD_BUG_ON(sizeof(struct mv_xor_v2_descriptor) !=
MV_XOR_V2_EXT_DESC_SIZE);
@@ -770,14 +769,9 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
if (ret)
goto disable_clk;
- msi_desc = first_msi_entry(&pdev->dev);
- if (!msi_desc) {
- ret = -ENODEV;
- goto free_msi_irqs;
- }
- xor_dev->msi_desc = msi_desc;
+ xor_dev->irq = msi_get_virq(&pdev->dev, 0);
- ret = devm_request_irq(&pdev->dev, msi_desc->irq,
+ ret = devm_request_irq(&pdev->dev, xor_dev->irq,
mv_xor_v2_interrupt_handler, 0,
dev_name(&pdev->dev), xor_dev);
if (ret)
@@ -892,7 +886,7 @@ static int mv_xor_v2_remove(struct platform_device *pdev)
xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
xor_dev->hw_desq_virt, xor_dev->hw_desq);
- devm_free_irq(&pdev->dev, xor_dev->msi_desc->irq, xor_dev);
+ devm_free_irq(&pdev->dev, xor_dev->irq, xor_dev);
platform_msi_domain_free_irqs(&pdev->dev);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 1da04112fcdb..c359decc07a3 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -835,7 +835,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
goto err_disable_pdev;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "Cannot set proper DMA config\n");
goto err_free_res;
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index e2b5129c5f84..5e46e347e28b 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -3240,7 +3240,6 @@ static int ppc440spe_adma_dma2rxor_prep_src(
struct ppc440spe_rxor *cursor, int index,
int src_cnt, u32 addr)
{
- int rval = 0;
u32 sign;
struct ppc440spe_adma_desc_slot *desc = hdesc;
int i;
@@ -3348,7 +3347,7 @@ static int ppc440spe_adma_dma2rxor_prep_src(
break;
}
- return rval;
+ return 0;
}
/**
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 52d04641e361..6078cc81892e 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -909,13 +909,6 @@ static void pxad_get_config(struct pxad_chan *chan,
*dcmd |= PXA_DCMD_BURST16;
else if (maxburst == 32)
*dcmd |= PXA_DCMD_BURST32;
-
- /* FIXME: drivers should be ported over to use the filter
- * function. Once that's done, the following two lines can
- * be removed.
- */
- if (chan->cfg.slave_id)
- chan->drcmr = chan->cfg.slave_id;
}
static struct dma_async_tx_descriptor *
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 1a1b7d8458c9..94f3648f7483 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -2206,10 +2206,8 @@ static int gpi_probe(struct platform_device *pdev)
/* set up irq */
ret = platform_get_irq(pdev, i);
- if (ret < 0) {
- dev_err(gpi_dev->dev, "platform_get_irq failed for %d:%d\n", i, ret);
+ if (ret < 0)
return ret;
- }
gpii->irq = ret;
/* set up channel specific register info */
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 23d64489d25f..65d054bb11aa 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -666,7 +666,7 @@ static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
struct device *dev = msi_desc_to_dev(desc);
struct hidma_dev *dmadev = dev_get_drvdata(dev);
- if (!desc->platform.msi_index) {
+ if (!desc->msi_index) {
writel(msg->address_lo, dmadev->dev_evca + 0x118);
writel(msg->address_hi, dmadev->dev_evca + 0x11C);
writel(msg->data, dmadev->dev_evca + 0x120);
@@ -678,11 +678,13 @@ static void hidma_free_msis(struct hidma_dev *dmadev)
{
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
struct device *dev = dmadev->ddev.dev;
- struct msi_desc *desc;
+ int i, virq;
- /* free allocated MSI interrupts above */
- for_each_msi_entry(desc, dev)
- devm_free_irq(dev, desc->irq, &dmadev->lldev);
+ for (i = 0; i < HIDMA_MSI_INTS; i++) {
+ virq = msi_get_virq(dev, i);
+ if (virq)
+ devm_free_irq(dev, virq, &dmadev->lldev);
+ }
platform_msi_domain_free_irqs(dev);
#endif
@@ -692,45 +694,37 @@ static int hidma_request_msi(struct hidma_dev *dmadev,
struct platform_device *pdev)
{
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- int rc;
- struct msi_desc *desc;
- struct msi_desc *failed_desc = NULL;
+ int rc, i, virq;
rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
hidma_write_msi_msg);
if (rc)
return rc;
- for_each_msi_entry(desc, &pdev->dev) {
- if (!desc->platform.msi_index)
- dmadev->msi_virqbase = desc->irq;
-
- rc = devm_request_irq(&pdev->dev, desc->irq,
+ for (i = 0; i < HIDMA_MSI_INTS; i++) {
+ virq = msi_get_virq(&pdev->dev, i);
+ rc = devm_request_irq(&pdev->dev, virq,
hidma_chirq_handler_msi,
0, "qcom-hidma-msi",
&dmadev->lldev);
- if (rc) {
- failed_desc = desc;
+ if (rc)
break;
- }
+ if (!i)
+ dmadev->msi_virqbase = virq;
}
if (rc) {
/* free allocated MSI interrupts above */
- for_each_msi_entry(desc, &pdev->dev) {
- if (desc == failed_desc)
- break;
- devm_free_irq(&pdev->dev, desc->irq,
- &dmadev->lldev);
+ for (--i; i >= 0; i--) {
+ virq = msi_get_virq(&pdev->dev, i);
+ devm_free_irq(&pdev->dev, virq, &dmadev->lldev);
}
+ dev_warn(&pdev->dev,
+ "failed to request MSI irq, falling back to wired IRQ\n");
} else {
/* Add callback to free MSIs on teardown */
hidma_ll_setup_irq(dmadev->lldev, true);
-
}
- if (rc)
- dev_warn(&pdev->dev,
- "failed to request MSI irq, falling back to wired IRQ\n");
return rc;
#else
return -EINVAL;
diff --git a/drivers/dma/qcom/qcom_adm.c b/drivers/dma/qcom/qcom_adm.c
index ee78bed8d60d..facdacf8aede 100644
--- a/drivers/dma/qcom/qcom_adm.c
+++ b/drivers/dma/qcom/qcom_adm.c
@@ -8,6 +8,7 @@
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/dma/qcom_adm.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -140,6 +141,8 @@ struct adm_chan {
struct adm_async_desc *curr_txd;
struct dma_slave_config slave;
+ u32 crci;
+ u32 mux;
struct list_head node;
int error;
@@ -379,8 +382,8 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
return ERR_PTR(-EINVAL);
}
- crci = achan->slave.slave_id & 0xf;
- if (!crci || achan->slave.slave_id > 0x1f) {
+ crci = achan->crci & 0xf;
+ if (!crci || achan->crci > 0x1f) {
dev_err(adev->dev, "invalid crci value\n");
return ERR_PTR(-EINVAL);
}
@@ -403,9 +406,7 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
if (!async_desc)
return ERR_PTR(-ENOMEM);
- if (crci)
- async_desc->mux = achan->slave.slave_id & ADM_CRCI_MUX_SEL ?
- ADM_CRCI_CTL_MUX_SEL : 0;
+ async_desc->mux = achan->mux ? ADM_CRCI_CTL_MUX_SEL : 0;
async_desc->crci = crci;
async_desc->blk_size = blk_size;
async_desc->dma_len = single_count * sizeof(struct adm_desc_hw_single) +
@@ -488,10 +489,13 @@ static int adm_terminate_all(struct dma_chan *chan)
static int adm_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
{
struct adm_chan *achan = to_adm_chan(chan);
+ struct qcom_adm_peripheral_config *config = cfg->peripheral_config;
unsigned long flag;
spin_lock_irqsave(&achan->vc.lock, flag);
memcpy(&achan->slave, cfg, sizeof(struct dma_slave_config));
+ if (cfg->peripheral_size == sizeof(config))
+ achan->crci = config->crci;
spin_unlock_irqrestore(&achan->vc.lock, flag);
return 0;
@@ -694,6 +698,45 @@ static void adm_channel_init(struct adm_device *adev, struct adm_chan *achan,
achan->vc.desc_free = adm_dma_free_desc;
}
+/**
+ * adm_dma_xlate
+ * @dma_spec: pointer to DMA specifier as found in the device tree
+ * @ofdma: pointer to DMA controller data
+ *
+ * This can use either 1-cell or 2-cell formats, the first cell
+ * identifies the slave device, while the optional second cell
+ * contains the crci value.
+ *
+ * Returns pointer to appropriate dma channel on success or NULL on error.
+ */
+static struct dma_chan *adm_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct dma_device *dev = ofdma->of_dma_data;
+ struct dma_chan *chan, *candidate = NULL;
+ struct adm_chan *achan;
+
+ if (!dev || dma_spec->args_count > 2)
+ return NULL;
+
+ list_for_each_entry(chan, &dev->channels, device_node)
+ if (chan->chan_id == dma_spec->args[0]) {
+ candidate = chan;
+ break;
+ }
+
+ if (!candidate)
+ return NULL;
+
+ achan = to_adm_chan(candidate);
+ if (dma_spec->args_count == 2)
+ achan->crci = dma_spec->args[1];
+ else
+ achan->crci = 0;
+
+ return dma_get_slave_channel(candidate);
+}
+
static int adm_dma_probe(struct platform_device *pdev)
{
struct adm_device *adev;
@@ -838,8 +881,7 @@ static int adm_dma_probe(struct platform_device *pdev)
goto err_disable_clks;
}
- ret = of_dma_controller_register(pdev->dev.of_node,
- of_dma_xlate_by_chan_id,
+ ret = of_dma_controller_register(pdev->dev.of_node, adm_dma_xlate,
&adev->common);
if (ret)
goto err_unregister_dma;
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 5c7716fd6bc5..481f45c77ce1 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -236,7 +236,7 @@ struct rcar_dmac_of_data {
#define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
#define RCAR_DMAOR_AE (1 << 2)
#define RCAR_DMAOR_DME (1 << 0)
-#define RCAR_DMACHCLR 0x0080 /* Not on R-Car V3U */
+#define RCAR_DMACHCLR 0x0080 /* Not on R-Car Gen4 */
#define RCAR_DMADPSEC 0x00a0
#define RCAR_DMASAR 0x0000
@@ -299,8 +299,8 @@ struct rcar_dmac_of_data {
#define RCAR_DMAFIXDAR 0x0014
#define RCAR_DMAFIXDPBASE 0x0060
-/* For R-Car V3U */
-#define RCAR_V3U_DMACHCLR 0x0100
+/* For R-Car Gen4 */
+#define RCAR_GEN4_DMACHCLR 0x0100
/* Hardcode the MEMCPY transfer size to 4 bytes. */
#define RCAR_DMAC_MEMCPY_XFER_SIZE 4
@@ -345,7 +345,7 @@ static void rcar_dmac_chan_clear(struct rcar_dmac *dmac,
struct rcar_dmac_chan *chan)
{
if (dmac->chan_base)
- rcar_dmac_chan_write(chan, RCAR_V3U_DMACHCLR, 1);
+ rcar_dmac_chan_write(chan, RCAR_GEN4_DMACHCLR, 1);
else
rcar_dmac_write(dmac, RCAR_DMACHCLR, BIT(chan->index));
}
@@ -357,7 +357,7 @@ static void rcar_dmac_chan_clear_all(struct rcar_dmac *dmac)
if (dmac->chan_base) {
for_each_rcar_dmac_chan(i, dmac, chan)
- rcar_dmac_chan_write(chan, RCAR_V3U_DMACHCLR, 1);
+ rcar_dmac_chan_write(chan, RCAR_GEN4_DMACHCLR, 1);
} else {
rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
}
@@ -2009,7 +2009,7 @@ static const struct rcar_dmac_of_data rcar_dmac_data = {
.chan_offset_stride = 0x80,
};
-static const struct rcar_dmac_of_data rcar_v3u_dmac_data = {
+static const struct rcar_dmac_of_data rcar_gen4_dmac_data = {
.chan_offset_base = 0x0,
.chan_offset_stride = 0x1000,
};
@@ -2019,8 +2019,11 @@ static const struct of_device_id rcar_dmac_of_ids[] = {
.compatible = "renesas,rcar-dmac",
.data = &rcar_dmac_data,
}, {
+ .compatible = "renesas,rcar-gen4-dmac",
+ .data = &rcar_gen4_dmac_data,
+ }, {
.compatible = "renesas,dmac-r8a779a0",
- .data = &rcar_v3u_dmac_data,
+ .data = &rcar_gen4_dmac_data,
},
{ /* Sentinel */ }
};
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 7f72b3f4cd1a..158e5e7defae 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -787,14 +787,6 @@ static int shdma_config(struct dma_chan *chan,
return -EINVAL;
/*
- * overriding the slave_id through dma_slave_config is deprecated,
- * but possibly some out-of-tree drivers still do it.
- */
- if (WARN_ON_ONCE(config->slave_id &&
- config->slave_id != schan->real_slave_id))
- schan->real_slave_id = config->slave_id;
-
- /*
* We could lock this, but you shouldn't be configuring the
* channel, while using it...
*/
@@ -1042,9 +1034,7 @@ EXPORT_SYMBOL(shdma_cleanup);
static int __init shdma_enter(void)
{
- shdma_slave_used = kcalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG),
- sizeof(long),
- GFP_KERNEL);
+ shdma_slave_used = bitmap_zalloc(slave_num, GFP_KERNEL);
if (!shdma_slave_used)
return -ENOMEM;
return 0;
@@ -1053,7 +1043,7 @@ module_init(shdma_enter);
static void __exit shdma_exit(void)
{
- kfree(shdma_slave_used);
+ bitmap_free(shdma_slave_used);
}
module_exit(shdma_exit);
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 4357d2395e6b..7f158ef5672d 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -795,9 +795,6 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
return dst_datawidth;
}
- if (slave_cfg->slave_id)
- schan->dev_id = slave_cfg->slave_id;
-
hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
/*
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index d30a4a28d3bf..6f57ff0e7b37 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -10,6 +10,7 @@
* Inspired by stm32-dma.c and dma-jz4780.c
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
@@ -32,13 +33,6 @@
#include "virt-dma.h"
-/* MDMA Generic getter/setter */
-#define STM32_MDMA_SHIFT(n) (ffs(n) - 1)
-#define STM32_MDMA_SET(n, mask) (((n) << STM32_MDMA_SHIFT(mask)) & \
- (mask))
-#define STM32_MDMA_GET(n, mask) (((n) & (mask)) >> \
- STM32_MDMA_SHIFT(mask))
-
#define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */
#define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */
@@ -80,8 +74,7 @@
#define STM32_MDMA_CCR_HEX BIT(13)
#define STM32_MDMA_CCR_BEX BIT(12)
#define STM32_MDMA_CCR_PL_MASK GENMASK(7, 6)
-#define STM32_MDMA_CCR_PL(n) STM32_MDMA_SET(n, \
- STM32_MDMA_CCR_PL_MASK)
+#define STM32_MDMA_CCR_PL(n) FIELD_PREP(STM32_MDMA_CCR_PL_MASK, (n))
#define STM32_MDMA_CCR_TCIE BIT(5)
#define STM32_MDMA_CCR_BTIE BIT(4)
#define STM32_MDMA_CCR_BRTIE BIT(3)
@@ -99,48 +92,33 @@
#define STM32_MDMA_CTCR_BWM BIT(31)
#define STM32_MDMA_CTCR_SWRM BIT(30)
#define STM32_MDMA_CTCR_TRGM_MSK GENMASK(29, 28)
-#define STM32_MDMA_CTCR_TRGM(n) STM32_MDMA_SET((n), \
- STM32_MDMA_CTCR_TRGM_MSK)
-#define STM32_MDMA_CTCR_TRGM_GET(n) STM32_MDMA_GET((n), \
- STM32_MDMA_CTCR_TRGM_MSK)
+#define STM32_MDMA_CTCR_TRGM(n) FIELD_PREP(STM32_MDMA_CTCR_TRGM_MSK, (n))
+#define STM32_MDMA_CTCR_TRGM_GET(n) FIELD_GET(STM32_MDMA_CTCR_TRGM_MSK, (n))
#define STM32_MDMA_CTCR_PAM_MASK GENMASK(27, 26)
-#define STM32_MDMA_CTCR_PAM(n) STM32_MDMA_SET(n, \
- STM32_MDMA_CTCR_PAM_MASK)
+#define STM32_MDMA_CTCR_PAM(n) FIELD_PREP(STM32_MDMA_CTCR_PAM_MASK, (n))
#define STM32_MDMA_CTCR_PKE BIT(25)
#define STM32_MDMA_CTCR_TLEN_MSK GENMASK(24, 18)
-#define STM32_MDMA_CTCR_TLEN(n) STM32_MDMA_SET((n), \
- STM32_MDMA_CTCR_TLEN_MSK)
-#define STM32_MDMA_CTCR_TLEN_GET(n) STM32_MDMA_GET((n), \
- STM32_MDMA_CTCR_TLEN_MSK)
+#define STM32_MDMA_CTCR_TLEN(n) FIELD_PREP(STM32_MDMA_CTCR_TLEN_MSK, (n))
+#define STM32_MDMA_CTCR_TLEN_GET(n) FIELD_GET(STM32_MDMA_CTCR_TLEN_MSK, (n))
#define STM32_MDMA_CTCR_LEN2_MSK GENMASK(25, 18)
-#define STM32_MDMA_CTCR_LEN2(n) STM32_MDMA_SET((n), \
- STM32_MDMA_CTCR_LEN2_MSK)
-#define STM32_MDMA_CTCR_LEN2_GET(n) STM32_MDMA_GET((n), \
- STM32_MDMA_CTCR_LEN2_MSK)
+#define STM32_MDMA_CTCR_LEN2(n) FIELD_PREP(STM32_MDMA_CTCR_LEN2_MSK, (n))
+#define STM32_MDMA_CTCR_LEN2_GET(n) FIELD_GET(STM32_MDMA_CTCR_LEN2_MSK, (n))
#define STM32_MDMA_CTCR_DBURST_MASK GENMASK(17, 15)
-#define STM32_MDMA_CTCR_DBURST(n) STM32_MDMA_SET(n, \
- STM32_MDMA_CTCR_DBURST_MASK)
+#define STM32_MDMA_CTCR_DBURST(n) FIELD_PREP(STM32_MDMA_CTCR_DBURST_MASK, (n))
#define STM32_MDMA_CTCR_SBURST_MASK GENMASK(14, 12)
-#define STM32_MDMA_CTCR_SBURST(n) STM32_MDMA_SET(n, \
- STM32_MDMA_CTCR_SBURST_MASK)
+#define STM32_MDMA_CTCR_SBURST(n) FIELD_PREP(STM32_MDMA_CTCR_SBURST_MASK, (n))
#define STM32_MDMA_CTCR_DINCOS_MASK GENMASK(11, 10)
-#define STM32_MDMA_CTCR_DINCOS(n) STM32_MDMA_SET((n), \
- STM32_MDMA_CTCR_DINCOS_MASK)
+#define STM32_MDMA_CTCR_DINCOS(n) FIELD_PREP(STM32_MDMA_CTCR_DINCOS_MASK, (n))
#define STM32_MDMA_CTCR_SINCOS_MASK GENMASK(9, 8)
-#define STM32_MDMA_CTCR_SINCOS(n) STM32_MDMA_SET((n), \
- STM32_MDMA_CTCR_SINCOS_MASK)
+#define STM32_MDMA_CTCR_SINCOS(n) FIELD_PREP(STM32_MDMA_CTCR_SINCOS_MASK, (n))
#define STM32_MDMA_CTCR_DSIZE_MASK GENMASK(7, 6)
-#define STM32_MDMA_CTCR_DSIZE(n) STM32_MDMA_SET(n, \
- STM32_MDMA_CTCR_DSIZE_MASK)
+#define STM32_MDMA_CTCR_DSIZE(n) FIELD_PREP(STM32_MDMA_CTCR_DSIZE_MASK, (n))
#define STM32_MDMA_CTCR_SSIZE_MASK GENMASK(5, 4)
-#define STM32_MDMA_CTCR_SSIZE(n) STM32_MDMA_SET(n, \
- STM32_MDMA_CTCR_SSIZE_MASK)
+#define STM32_MDMA_CTCR_SSIZE(n) FIELD_PREP(STM32_MDMA_CTCR_SSIZE_MASK, (n))
#define STM32_MDMA_CTCR_DINC_MASK GENMASK(3, 2)
-#define STM32_MDMA_CTCR_DINC(n) STM32_MDMA_SET((n), \
- STM32_MDMA_CTCR_DINC_MASK)
+#define STM32_MDMA_CTCR_DINC(n) FIELD_PREP(STM32_MDMA_CTCR_DINC_MASK, (n))
#define STM32_MDMA_CTCR_SINC_MASK GENMASK(1, 0)
-#define STM32_MDMA_CTCR_SINC(n) STM32_MDMA_SET((n), \
- STM32_MDMA_CTCR_SINC_MASK)
+#define STM32_MDMA_CTCR_SINC(n) FIELD_PREP(STM32_MDMA_CTCR_SINC_MASK, (n))
#define STM32_MDMA_CTCR_CFG_MASK (STM32_MDMA_CTCR_SINC_MASK \
| STM32_MDMA_CTCR_DINC_MASK \
| STM32_MDMA_CTCR_SINCOS_MASK \
@@ -151,16 +129,13 @@
/* MDMA Channel x block number of data register */
#define STM32_MDMA_CBNDTR(x) (0x54 + 0x40 * (x))
#define STM32_MDMA_CBNDTR_BRC_MK GENMASK(31, 20)
-#define STM32_MDMA_CBNDTR_BRC(n) STM32_MDMA_SET(n, \
- STM32_MDMA_CBNDTR_BRC_MK)
-#define STM32_MDMA_CBNDTR_BRC_GET(n) STM32_MDMA_GET((n), \
- STM32_MDMA_CBNDTR_BRC_MK)
+#define STM32_MDMA_CBNDTR_BRC(n) FIELD_PREP(STM32_MDMA_CBNDTR_BRC_MK, (n))
+#define STM32_MDMA_CBNDTR_BRC_GET(n) FIELD_GET(STM32_MDMA_CBNDTR_BRC_MK, (n))
#define STM32_MDMA_CBNDTR_BRDUM BIT(19)
#define STM32_MDMA_CBNDTR_BRSUM BIT(18)
#define STM32_MDMA_CBNDTR_BNDT_MASK GENMASK(16, 0)
-#define STM32_MDMA_CBNDTR_BNDT(n) STM32_MDMA_SET(n, \
- STM32_MDMA_CBNDTR_BNDT_MASK)
+#define STM32_MDMA_CBNDTR_BNDT(n) FIELD_PREP(STM32_MDMA_CBNDTR_BNDT_MASK, (n))
/* MDMA Channel x source address register */
#define STM32_MDMA_CSAR(x) (0x58 + 0x40 * (x))
@@ -171,11 +146,9 @@
/* MDMA Channel x block repeat address update register */
#define STM32_MDMA_CBRUR(x) (0x60 + 0x40 * (x))
#define STM32_MDMA_CBRUR_DUV_MASK GENMASK(31, 16)
-#define STM32_MDMA_CBRUR_DUV(n) STM32_MDMA_SET(n, \
- STM32_MDMA_CBRUR_DUV_MASK)
+#define STM32_MDMA_CBRUR_DUV(n) FIELD_PREP(STM32_MDMA_CBRUR_DUV_MASK, (n))
#define STM32_MDMA_CBRUR_SUV_MASK GENMASK(15, 0)
-#define STM32_MDMA_CBRUR_SUV(n) STM32_MDMA_SET(n, \
- STM32_MDMA_CBRUR_SUV_MASK)
+#define STM32_MDMA_CBRUR_SUV(n) FIELD_PREP(STM32_MDMA_CBRUR_SUV_MASK, (n))
/* MDMA Channel x link address register */
#define STM32_MDMA_CLAR(x) (0x64 + 0x40 * (x))
@@ -184,9 +157,8 @@
#define STM32_MDMA_CTBR(x) (0x68 + 0x40 * (x))
#define STM32_MDMA_CTBR_DBUS BIT(17)
#define STM32_MDMA_CTBR_SBUS BIT(16)
-#define STM32_MDMA_CTBR_TSEL_MASK GENMASK(7, 0)
-#define STM32_MDMA_CTBR_TSEL(n) STM32_MDMA_SET(n, \
- STM32_MDMA_CTBR_TSEL_MASK)
+#define STM32_MDMA_CTBR_TSEL_MASK GENMASK(5, 0)
+#define STM32_MDMA_CTBR_TSEL(n) FIELD_PREP(STM32_MDMA_CTBR_TSEL_MASK, (n))
/* MDMA Channel x mask address register */
#define STM32_MDMA_CMAR(x) (0x70 + 0x40 * (x))
@@ -1279,7 +1251,7 @@ static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan,
u32 curr_hwdesc)
{
struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
- struct stm32_mdma_hwdesc *hwdesc = desc->node[0].hwdesc;
+ struct stm32_mdma_hwdesc *hwdesc;
u32 cbndtr, residue, modulo, burst_size;
int i;
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index b7260749e8ee..eaafcbe4ca94 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -343,12 +343,6 @@ static int tegra_dma_slave_config(struct dma_chan *dc,
}
memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
- if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID &&
- sconfig->device_fc) {
- if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK)
- return -EINVAL;
- tdc->slave_id = sconfig->slave_id;
- }
tdc->config_init = true;
return 0;
diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile
index bd496efadff7..1d4081a049b7 100644
--- a/drivers/dma/ti/Makefile
+++ b/drivers/dma/ti/Makefile
@@ -8,5 +8,6 @@ obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o \
k3-psil-am654.o \
k3-psil-j721e.o \
k3-psil-j7200.o \
- k3-psil-am64.o
+ k3-psil-am64.o \
+ k3-psil-j721s2.o
obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 35d81bd857f1..08e47f44d325 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -1681,8 +1681,7 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val);
emr = val;
- for (i = find_next_bit(&emr, 32, 0); i < 32;
- i = find_next_bit(&emr, 32, i + 1)) {
+ for_each_set_bit(i, &emr, 32) {
int k = (j << 5) + i;
/* Clear the corresponding EMR bits */
diff --git a/drivers/dma/ti/k3-psil-j721s2.c b/drivers/dma/ti/k3-psil-j721s2.c
new file mode 100644
index 000000000000..4c4172a4d271
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-j721s2.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_PDMA_MCASP(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pdma_acc32 = 1, \
+ .pdma_burst = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ }, \
+ }
+
+#define PSIL_SA2UL(x, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep j721s2_src_ep_map[] = {
+ /* PDMA_MCASP - McASP0-4 */
+ PSIL_PDMA_MCASP(0x4400),
+ PSIL_PDMA_MCASP(0x4401),
+ PSIL_PDMA_MCASP(0x4402),
+ PSIL_PDMA_MCASP(0x4403),
+ PSIL_PDMA_MCASP(0x4404),
+ /* PDMA_SPI_G0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0x4600),
+ PSIL_PDMA_XY_PKT(0x4601),
+ PSIL_PDMA_XY_PKT(0x4602),
+ PSIL_PDMA_XY_PKT(0x4603),
+ PSIL_PDMA_XY_PKT(0x4604),
+ PSIL_PDMA_XY_PKT(0x4605),
+ PSIL_PDMA_XY_PKT(0x4606),
+ PSIL_PDMA_XY_PKT(0x4607),
+ PSIL_PDMA_XY_PKT(0x4608),
+ PSIL_PDMA_XY_PKT(0x4609),
+ PSIL_PDMA_XY_PKT(0x460a),
+ PSIL_PDMA_XY_PKT(0x460b),
+ PSIL_PDMA_XY_PKT(0x460c),
+ PSIL_PDMA_XY_PKT(0x460d),
+ PSIL_PDMA_XY_PKT(0x460e),
+ PSIL_PDMA_XY_PKT(0x460f),
+ /* PDMA_SPI_G1 - SPI4-7 */
+ PSIL_PDMA_XY_PKT(0x4610),
+ PSIL_PDMA_XY_PKT(0x4611),
+ PSIL_PDMA_XY_PKT(0x4612),
+ PSIL_PDMA_XY_PKT(0x4613),
+ PSIL_PDMA_XY_PKT(0x4614),
+ PSIL_PDMA_XY_PKT(0x4615),
+ PSIL_PDMA_XY_PKT(0x4616),
+ PSIL_PDMA_XY_PKT(0x4617),
+ PSIL_PDMA_XY_PKT(0x4618),
+ PSIL_PDMA_XY_PKT(0x4619),
+ PSIL_PDMA_XY_PKT(0x461a),
+ PSIL_PDMA_XY_PKT(0x461b),
+ PSIL_PDMA_XY_PKT(0x461c),
+ PSIL_PDMA_XY_PKT(0x461d),
+ PSIL_PDMA_XY_PKT(0x461e),
+ PSIL_PDMA_XY_PKT(0x461f),
+ /* PDMA_USART_G0 - UART0-1 */
+ PSIL_PDMA_XY_PKT(0x4700),
+ PSIL_PDMA_XY_PKT(0x4701),
+ /* PDMA_USART_G1 - UART2-3 */
+ PSIL_PDMA_XY_PKT(0x4702),
+ PSIL_PDMA_XY_PKT(0x4703),
+ /* PDMA_USART_G2 - UART4-9 */
+ PSIL_PDMA_XY_PKT(0x4704),
+ PSIL_PDMA_XY_PKT(0x4705),
+ PSIL_PDMA_XY_PKT(0x4706),
+ PSIL_PDMA_XY_PKT(0x4707),
+ PSIL_PDMA_XY_PKT(0x4708),
+ PSIL_PDMA_XY_PKT(0x4709),
+ /* CPSW0 */
+ PSIL_ETHERNET(0x7000),
+ /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */
+ PSIL_PDMA_XY_PKT(0x7100),
+ PSIL_PDMA_XY_PKT(0x7101),
+ PSIL_PDMA_XY_PKT(0x7102),
+ PSIL_PDMA_XY_PKT(0x7103),
+ /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */
+ PSIL_PDMA_XY_PKT(0x7200),
+ PSIL_PDMA_XY_PKT(0x7201),
+ PSIL_PDMA_XY_PKT(0x7202),
+ PSIL_PDMA_XY_PKT(0x7203),
+ PSIL_PDMA_XY_PKT(0x7204),
+ PSIL_PDMA_XY_PKT(0x7205),
+ PSIL_PDMA_XY_PKT(0x7206),
+ PSIL_PDMA_XY_PKT(0x7207),
+ /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */
+ PSIL_PDMA_XY_PKT(0x7300),
+ /* MCU_PDMA_ADC - ADC0-1 */
+ PSIL_PDMA_XY_TR(0x7400),
+ PSIL_PDMA_XY_TR(0x7401),
+ PSIL_PDMA_XY_TR(0x7402),
+ PSIL_PDMA_XY_TR(0x7403),
+ /* SA2UL */
+ PSIL_SA2UL(0x7500, 0),
+ PSIL_SA2UL(0x7501, 0),
+ PSIL_SA2UL(0x7502, 0),
+ PSIL_SA2UL(0x7503, 0),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep j721s2_dst_ep_map[] = {
+ /* CPSW0 */
+ PSIL_ETHERNET(0xf000),
+ PSIL_ETHERNET(0xf001),
+ PSIL_ETHERNET(0xf002),
+ PSIL_ETHERNET(0xf003),
+ PSIL_ETHERNET(0xf004),
+ PSIL_ETHERNET(0xf005),
+ PSIL_ETHERNET(0xf006),
+ PSIL_ETHERNET(0xf007),
+ /* SA2UL */
+ PSIL_SA2UL(0xf500, 1),
+ PSIL_SA2UL(0xf501, 1),
+};
+
+struct psil_ep_map j721s2_ep_map = {
+ .name = "j721s2",
+ .src = j721s2_src_ep_map,
+ .src_count = ARRAY_SIZE(j721s2_src_ep_map),
+ .dst = j721s2_dst_ep_map,
+ .dst_count = ARRAY_SIZE(j721s2_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h
index b74e192e3c2d..e51e179cdb56 100644
--- a/drivers/dma/ti/k3-psil-priv.h
+++ b/drivers/dma/ti/k3-psil-priv.h
@@ -41,5 +41,6 @@ extern struct psil_ep_map am654_ep_map;
extern struct psil_ep_map j721e_ep_map;
extern struct psil_ep_map j7200_ep_map;
extern struct psil_ep_map am64_ep_map;
+extern struct psil_ep_map j721s2_ep_map;
#endif /* K3_PSIL_PRIV_H_ */
diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c
index 13ce7367d870..8867b4bd0c51 100644
--- a/drivers/dma/ti/k3-psil.c
+++ b/drivers/dma/ti/k3-psil.c
@@ -21,6 +21,7 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "J721E", .data = &j721e_ep_map },
{ .family = "J7200", .data = &j7200_ep_map },
{ .family = "AM64X", .data = &am64_ep_map },
+ { .family = "J721S2", .data = &j721s2_ep_map },
{ /* sentinel */ }
};
diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
index aada84f40723..d4f1e4e9603a 100644
--- a/drivers/dma/ti/k3-udma-private.c
+++ b/drivers/dma/ti/k3-udma-private.c
@@ -168,8 +168,7 @@ int xudma_pktdma_tflow_get_irq(struct udma_dev *ud, int udma_tflow_id)
{
const struct udma_oes_offsets *oes = &ud->soc_data->oes;
- return ti_sci_inta_msi_get_virq(ud->dev, udma_tflow_id +
- oes->pktdma_tchan_flow);
+ return msi_get_virq(ud->dev, udma_tflow_id + oes->pktdma_tchan_flow);
}
EXPORT_SYMBOL(xudma_pktdma_tflow_get_irq);
@@ -177,7 +176,6 @@ int xudma_pktdma_rflow_get_irq(struct udma_dev *ud, int udma_rflow_id)
{
const struct udma_oes_offsets *oes = &ud->soc_data->oes;
- return ti_sci_inta_msi_get_virq(ud->dev, udma_rflow_id +
- oes->pktdma_rchan_flow);
+ return msi_get_virq(ud->dev, udma_rflow_id + oes->pktdma_rchan_flow);
}
EXPORT_SYMBOL(xudma_pktdma_rflow_get_irq);
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 6e56d1cef5ee..d2d4cbe63e44 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -2313,8 +2313,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
/* Event from UDMA (TR events) only needed for slave TR mode channels */
if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
- uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
- irq_udma_idx);
+ uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
if (uc->irq_num_udma <= 0) {
dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
irq_udma_idx);
@@ -2486,7 +2485,7 @@ static int bcdma_alloc_chan_resources(struct dma_chan *chan)
uc->psil_paired = true;
}
- uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx);
+ uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
if (uc->irq_num_ring <= 0) {
dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
irq_ring_idx);
@@ -2503,8 +2502,7 @@ static int bcdma_alloc_chan_resources(struct dma_chan *chan)
/* Event from BCDMA (TR events) only needed for slave channels */
if (is_slave_direction(uc->config.dir)) {
- uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
- irq_udma_idx);
+ uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
if (uc->irq_num_udma <= 0) {
dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n",
irq_udma_idx);
@@ -2672,7 +2670,7 @@ static int pktdma_alloc_chan_resources(struct dma_chan *chan)
uc->psil_paired = true;
- uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx);
+ uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
if (uc->irq_num_ring <= 0) {
dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
irq_ring_idx);
@@ -4376,6 +4374,7 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "J721E", .data = &j721e_soc_data },
{ .family = "J7200", .data = &j7200_soc_data },
{ .family = "AM64X", .data = &am64_soc_data },
+ { .family = "J721S2", .data = &j721e_soc_data},
{ /* sentinel */ }
};
@@ -5336,9 +5335,9 @@ static int udma_probe(struct platform_device *pdev)
if (IS_ERR(ud->ringacc))
return PTR_ERR(ud->ringacc);
- dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
+ dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
DOMAIN_BUS_TI_SCI_INTA_MSI);
- if (!dev->msi_domain) {
+ if (!dev->msi.domain) {
dev_err(dev, "Failed to get MSI domain\n");
return -EPROBE_DEFER;
}
diff --git a/drivers/dma/uniphier-xdmac.c b/drivers/dma/uniphier-xdmac.c
index d6b8a202474f..290836b7e1be 100644
--- a/drivers/dma/uniphier-xdmac.c
+++ b/drivers/dma/uniphier-xdmac.c
@@ -131,8 +131,9 @@ uniphier_xdmac_next_desc(struct uniphier_xdmac_chan *xc)
static void uniphier_xdmac_chan_start(struct uniphier_xdmac_chan *xc,
struct uniphier_xdmac_desc *xd)
{
- u32 src_mode, src_addr, src_width;
- u32 dst_mode, dst_addr, dst_width;
+ u32 src_mode, src_width;
+ u32 dst_mode, dst_width;
+ dma_addr_t src_addr, dst_addr;
u32 val, its, tnum;
enum dma_slave_buswidth buswidth;
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 4677ce08ed40..cd62bbb50e8b 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2128,6 +2128,126 @@ error:
}
/**
+ * xilinx_cdma_prep_memcpy_sg - prepare descriptors for a memcpy_sg transaction
+ * @dchan: DMA channel
+ * @dst_sg: Destination scatter list
+ * @dst_sg_len: Number of entries in destination scatter list
+ * @src_sg: Source scatter list
+ * @src_sg_len: Number of entries in source scatter list
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *xilinx_cdma_prep_memcpy_sg(
+ struct dma_chan *dchan, struct scatterlist *dst_sg,
+ unsigned int dst_sg_len, struct scatterlist *src_sg,
+ unsigned int src_sg_len, unsigned long flags)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_cdma_tx_segment *segment, *prev = NULL;
+ struct xilinx_cdma_desc_hw *hw;
+ size_t len, dst_avail, src_avail;
+ dma_addr_t dma_dst, dma_src;
+
+ if (unlikely(dst_sg_len == 0 || src_sg_len == 0))
+ return NULL;
+
+ if (unlikely(!dst_sg || !src_sg))
+ return NULL;
+
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ dst_avail = sg_dma_len(dst_sg);
+ src_avail = sg_dma_len(src_sg);
+ /*
+ * loop until there is either no more source or no more destination
+ * scatterlist entry
+ */
+ while (true) {
+ len = min_t(size_t, src_avail, dst_avail);
+ len = min_t(size_t, len, chan->xdev->max_buffer_len);
+ if (len == 0)
+ goto fetch;
+
+ /* Allocate the link descriptor from DMA pool */
+ segment = xilinx_cdma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
+ dst_avail;
+ dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
+ src_avail;
+ hw = &segment->hw;
+ hw->control = len;
+ hw->src_addr = dma_src;
+ hw->dest_addr = dma_dst;
+ if (chan->ext_addr) {
+ hw->src_addr_msb = upper_32_bits(dma_src);
+ hw->dest_addr_msb = upper_32_bits(dma_dst);
+ }
+
+ if (prev) {
+ prev->hw.next_desc = segment->phys;
+ if (chan->ext_addr)
+ prev->hw.next_desc_msb =
+ upper_32_bits(segment->phys);
+ }
+
+ prev = segment;
+ dst_avail -= len;
+ src_avail -= len;
+ list_add_tail(&segment->node, &desc->segments);
+
+fetch:
+ /* Fetch the next dst scatterlist entry */
+ if (dst_avail == 0) {
+ if (dst_sg_len == 0)
+ break;
+ dst_sg = sg_next(dst_sg);
+ if (dst_sg == NULL)
+ break;
+ dst_sg_len--;
+ dst_avail = sg_dma_len(dst_sg);
+ }
+ /* Fetch the next src scatterlist entry */
+ if (src_avail == 0) {
+ if (src_sg_len == 0)
+ break;
+ src_sg = sg_next(src_sg);
+ if (src_sg == NULL)
+ break;
+ src_sg_len--;
+ src_avail = sg_dma_len(src_sg);
+ }
+ }
+
+ if (list_empty(&desc->segments)) {
+ dev_err(chan->xdev->dev,
+ "%s: Zero-size SG transfer requested\n", __func__);
+ goto error;
+ }
+
+ /* Link the last hardware descriptor with the first. */
+ segment = list_first_entry(&desc->segments,
+ struct xilinx_cdma_tx_segment, node);
+ desc->async_tx.phys = segment->phys;
+ prev->hw.next_desc = segment->phys;
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
* xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @dchan: DMA channel
* @sgl: scatterlist to transfer to/from
@@ -2860,7 +2980,9 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
}
/* Request the interrupt */
- chan->irq = irq_of_parse_and_map(node, chan->tdest);
+ chan->irq = of_irq_get(node, chan->tdest);
+ if (chan->irq < 0)
+ return dev_err_probe(xdev->dev, chan->irq, "failed to get irq\n");
err = request_irq(chan->irq, xdev->dma_config->irq_handler,
IRQF_SHARED, "xilinx-dma-controller", chan);
if (err) {
@@ -2934,8 +3056,11 @@ static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
dev_warn(xdev->dev, "missing dma-channels property\n");
- for (i = 0; i < nr_channels; i++)
- xilinx_dma_chan_probe(xdev, node);
+ for (i = 0; i < nr_channels; i++) {
+ ret = xilinx_dma_chan_probe(xdev, node);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -3115,7 +3240,9 @@ static int xilinx_dma_probe(struct platform_device *pdev)
DMA_RESIDUE_GRANULARITY_SEGMENT;
} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
+ dma_cap_set(DMA_MEMCPY_SG, xdev->common.cap_mask);
xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
+ xdev->common.device_prep_dma_memcpy_sg = xilinx_cdma_prep_memcpy_sg;
/* Residue calculation is supported by only AXI DMA and CDMA */
xdev->common.residue_granularity =
DMA_RESIDUE_GRANULARITY_SEGMENT;
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index ce5c66e6897d..b0f4948b00a5 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/dma/xilinx_dpdma.h>
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
#include <linux/interrupt.h>
@@ -1273,6 +1274,7 @@ static int xilinx_dpdma_config(struct dma_chan *dchan,
struct dma_slave_config *config)
{
struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dpdma_peripheral_config *pconfig;
unsigned long flags;
/*
@@ -1282,15 +1284,18 @@ static int xilinx_dpdma_config(struct dma_chan *dchan,
* fixed both on the DPDMA side and on the DP controller side.
*/
- spin_lock_irqsave(&chan->lock, flags);
-
/*
- * Abuse the slave_id to indicate that the channel is part of a video
- * group.
+ * Use the peripheral_config to indicate that the channel is part
+ * of a video group. This requires matching use of the custom
+ * structure in each driver.
*/
- if (chan->id <= ZYNQMP_DPDMA_VIDEO2)
- chan->video_group = config->slave_id != 0;
+ pconfig = config->peripheral_config;
+ if (WARN_ON(pconfig && config->peripheral_size != sizeof(*pconfig)))
+ return -EINVAL;
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->id <= ZYNQMP_DPDMA_VIDEO2 && pconfig)
+ chan->video_group = pconfig->video_group;
spin_unlock_irqrestore(&chan->lock, flags);
return 0;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 2fc4c3f91fd5..58ab63642e72 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -484,7 +484,7 @@ config EDAC_ARMADA_XP
config EDAC_SYNOPSYS
tristate "Synopsys DDR Memory Controller"
- depends on ARCH_ZYNQ || ARCH_ZYNQMP
+ depends on ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_INTEL_SOCFPGA
help
Support for error detection and correction on the Synopsys DDR
memory controller.
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 4fce75013674..fba609ada0e6 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -988,6 +988,281 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
return csrow;
}
+/* Protect the PCI config register pairs used for DF indirect access. */
+static DEFINE_MUTEX(df_indirect_mutex);
+
+/*
+ * Data Fabric Indirect Access uses FICAA/FICAD.
+ *
+ * Fabric Indirect Configuration Access Address (FICAA): Constructed based
+ * on the device's Instance Id and the PCI function and register offset of
+ * the desired register.
+ *
+ * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
+ * and FICAD HI registers but so far we only need the LO register.
+ *
+ * Use Instance Id 0xFF to indicate a broadcast read.
+ */
+#define DF_BROADCAST 0xFF
+static int __df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
+{
+ struct pci_dev *F4;
+ u32 ficaa;
+ int err = -ENODEV;
+
+ if (node >= amd_nb_num())
+ goto out;
+
+ F4 = node_to_amd_nb(node)->link;
+ if (!F4)
+ goto out;
+
+ ficaa = (instance_id == DF_BROADCAST) ? 0 : 1;
+ ficaa |= reg & 0x3FC;
+ ficaa |= (func & 0x7) << 11;
+ ficaa |= instance_id << 16;
+
+ mutex_lock(&df_indirect_mutex);
+
+ err = pci_write_config_dword(F4, 0x5C, ficaa);
+ if (err) {
+ pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
+ goto out_unlock;
+ }
+
+ err = pci_read_config_dword(F4, 0x98, lo);
+ if (err)
+ pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
+
+out_unlock:
+ mutex_unlock(&df_indirect_mutex);
+
+out:
+ return err;
+}
+
+static int df_indirect_read_instance(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
+{
+ return __df_indirect_read(node, func, reg, instance_id, lo);
+}
+
+static int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo)
+{
+ return __df_indirect_read(node, func, reg, DF_BROADCAST, lo);
+}
+
+struct addr_ctx {
+ u64 ret_addr;
+ u32 tmp;
+ u16 nid;
+ u8 inst_id;
+};
+
+static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
+{
+ u64 dram_base_addr, dram_limit_addr, dram_hole_base;
+
+ u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask;
+ u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets;
+ u8 intlv_addr_sel, intlv_addr_bit;
+ u8 num_intlv_bits, hashed_bit;
+ u8 lgcy_mmio_hole_en, base = 0;
+ u8 cs_mask, cs_id = 0;
+ bool hash_enabled = false;
+
+ struct addr_ctx ctx;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ /* Start from the normalized address */
+ ctx.ret_addr = norm_addr;
+
+ ctx.nid = nid;
+ ctx.inst_id = umc;
+
+ /* Read D18F0x1B4 (DramOffset), check if base 1 is used. */
+ if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp))
+ goto out_err;
+
+ /* Remove HiAddrOffset from normalized address, if enabled: */
+ if (ctx.tmp & BIT(0)) {
+ u64 hi_addr_offset = (ctx.tmp & GENMASK_ULL(31, 20)) << 8;
+
+ if (norm_addr >= hi_addr_offset) {
+ ctx.ret_addr -= hi_addr_offset;
+ base = 1;
+ }
+ }
+
+ /* Read D18F0x110 (DramBaseAddress). */
+ if (df_indirect_read_instance(nid, 0, 0x110 + (8 * base), umc, &ctx.tmp))
+ goto out_err;
+
+ /* Check if address range is valid. */
+ if (!(ctx.tmp & BIT(0))) {
+ pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
+ __func__, ctx.tmp);
+ goto out_err;
+ }
+
+ lgcy_mmio_hole_en = ctx.tmp & BIT(1);
+ intlv_num_chan = (ctx.tmp >> 4) & 0xF;
+ intlv_addr_sel = (ctx.tmp >> 8) & 0x7;
+ dram_base_addr = (ctx.tmp & GENMASK_ULL(31, 12)) << 16;
+
+ /* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
+ if (intlv_addr_sel > 3) {
+ pr_err("%s: Invalid interleave address select %d.\n",
+ __func__, intlv_addr_sel);
+ goto out_err;
+ }
+
+ /* Read D18F0x114 (DramLimitAddress). */
+ if (df_indirect_read_instance(nid, 0, 0x114 + (8 * base), umc, &ctx.tmp))
+ goto out_err;
+
+ intlv_num_sockets = (ctx.tmp >> 8) & 0x1;
+ intlv_num_dies = (ctx.tmp >> 10) & 0x3;
+ dram_limit_addr = ((ctx.tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);
+
+ intlv_addr_bit = intlv_addr_sel + 8;
+
+ /* Re-use intlv_num_chan by setting it equal to log2(#channels) */
+ switch (intlv_num_chan) {
+ case 0: intlv_num_chan = 0; break;
+ case 1: intlv_num_chan = 1; break;
+ case 3: intlv_num_chan = 2; break;
+ case 5: intlv_num_chan = 3; break;
+ case 7: intlv_num_chan = 4; break;
+
+ case 8: intlv_num_chan = 1;
+ hash_enabled = true;
+ break;
+ default:
+ pr_err("%s: Invalid number of interleaved channels %d.\n",
+ __func__, intlv_num_chan);
+ goto out_err;
+ }
+
+ num_intlv_bits = intlv_num_chan;
+
+ if (intlv_num_dies > 2) {
+ pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
+ __func__, intlv_num_dies);
+ goto out_err;
+ }
+
+ num_intlv_bits += intlv_num_dies;
+
+ /* Add a bit if sockets are interleaved. */
+ num_intlv_bits += intlv_num_sockets;
+
+ /* Assert num_intlv_bits <= 4 */
+ if (num_intlv_bits > 4) {
+ pr_err("%s: Invalid interleave bits %d.\n",
+ __func__, num_intlv_bits);
+ goto out_err;
+ }
+
+ if (num_intlv_bits > 0) {
+ u64 temp_addr_x, temp_addr_i, temp_addr_y;
+ u8 die_id_bit, sock_id_bit, cs_fabric_id;
+
+ /*
+ * Read FabricBlockInstanceInformation3_CS[BlockFabricID].
+ * This is the fabric id for this coherent slave. Use
+ * umc/channel# as instance id of the coherent slave
+ * for FICAA.
+ */
+ if (df_indirect_read_instance(nid, 0, 0x50, umc, &ctx.tmp))
+ goto out_err;
+
+ cs_fabric_id = (ctx.tmp >> 8) & 0xFF;
+ die_id_bit = 0;
+
+ /* If interleaved over more than 1 channel: */
+ if (intlv_num_chan) {
+ die_id_bit = intlv_num_chan;
+ cs_mask = (1 << die_id_bit) - 1;
+ cs_id = cs_fabric_id & cs_mask;
+ }
+
+ sock_id_bit = die_id_bit;
+
+ /* Read D18F1x208 (SystemFabricIdMask). */
+ if (intlv_num_dies || intlv_num_sockets)
+ if (df_indirect_read_broadcast(nid, 1, 0x208, &ctx.tmp))
+ goto out_err;
+
+ /* If interleaved over more than 1 die. */
+ if (intlv_num_dies) {
+ sock_id_bit = die_id_bit + intlv_num_dies;
+ die_id_shift = (ctx.tmp >> 24) & 0xF;
+ die_id_mask = (ctx.tmp >> 8) & 0xFF;
+
+ cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit;
+ }
+
+ /* If interleaved over more than 1 socket. */
+ if (intlv_num_sockets) {
+ socket_id_shift = (ctx.tmp >> 28) & 0xF;
+ socket_id_mask = (ctx.tmp >> 16) & 0xFF;
+
+ cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit;
+ }
+
+ /*
+ * The pre-interleaved address consists of XXXXXXIIIYYYYY
+ * where III is the ID for this CS, and XXXXXXYYYYY are the
+ * address bits from the post-interleaved address.
+ * "num_intlv_bits" has been calculated to tell us how many "I"
+ * bits there are. "intlv_addr_bit" tells us how many "Y" bits
+ * there are (where "I" starts).
+ */
+ temp_addr_y = ctx.ret_addr & GENMASK_ULL(intlv_addr_bit - 1, 0);
+ temp_addr_i = (cs_id << intlv_addr_bit);
+ temp_addr_x = (ctx.ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits;
+ ctx.ret_addr = temp_addr_x | temp_addr_i | temp_addr_y;
+ }
+
+ /* Add dram base address */
+ ctx.ret_addr += dram_base_addr;
+
+ /* If legacy MMIO hole enabled */
+ if (lgcy_mmio_hole_en) {
+ if (df_indirect_read_broadcast(nid, 0, 0x104, &ctx.tmp))
+ goto out_err;
+
+ dram_hole_base = ctx.tmp & GENMASK(31, 24);
+ if (ctx.ret_addr >= dram_hole_base)
+ ctx.ret_addr += (BIT_ULL(32) - dram_hole_base);
+ }
+
+ if (hash_enabled) {
+ /* Save some parentheses and grab ls-bit at the end. */
+ hashed_bit = (ctx.ret_addr >> 12) ^
+ (ctx.ret_addr >> 18) ^
+ (ctx.ret_addr >> 21) ^
+ (ctx.ret_addr >> 30) ^
+ cs_id;
+
+ hashed_bit &= BIT(0);
+
+ if (hashed_bit != ((ctx.ret_addr >> intlv_addr_bit) & BIT(0)))
+ ctx.ret_addr ^= BIT(intlv_addr_bit);
+ }
+
+ /* Is calculated system address is above DRAM limit address? */
+ if (ctx.ret_addr > dram_limit_addr)
+ goto out_err;
+
+ *sys_addr = ctx.ret_addr;
+ return 0;
+
+out_err:
+ return -EINVAL;
+}
+
static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
/*
@@ -2650,6 +2925,26 @@ static struct amd64_family_type family_types[] = {
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
+ [F19_M10H_CPUS] = {
+ .ctl_name = "F19h_M10h",
+ .f0_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F0,
+ .f6_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F6,
+ .max_mcs = 12,
+ .ops = {
+ .early_channel_count = f17_early_channel_count,
+ .dbam_to_cs = f17_addr_mask_to_cs_size,
+ }
+ },
+ [F19_M50H_CPUS] = {
+ .ctl_name = "F19h_M50h",
+ .f0_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F0,
+ .f6_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F6,
+ .max_mcs = 2,
+ .ops = {
+ .early_channel_count = f17_early_channel_count,
+ .dbam_to_cs = f17_addr_mask_to_cs_size,
+ }
+ },
};
/*
@@ -3687,11 +3982,25 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
break;
case 0x19:
- if (pvt->model >= 0x20 && pvt->model <= 0x2f) {
+ if (pvt->model >= 0x10 && pvt->model <= 0x1f) {
+ fam_type = &family_types[F19_M10H_CPUS];
+ pvt->ops = &family_types[F19_M10H_CPUS].ops;
+ break;
+ } else if (pvt->model >= 0x20 && pvt->model <= 0x2f) {
fam_type = &family_types[F17_M70H_CPUS];
pvt->ops = &family_types[F17_M70H_CPUS].ops;
fam_type->ctl_name = "F19h_M20h";
break;
+ } else if (pvt->model >= 0x50 && pvt->model <= 0x5f) {
+ fam_type = &family_types[F19_M50H_CPUS];
+ pvt->ops = &family_types[F19_M50H_CPUS].ops;
+ fam_type->ctl_name = "F19h_M50h";
+ break;
+ } else if (pvt->model >= 0xa0 && pvt->model <= 0xaf) {
+ fam_type = &family_types[F19_M10H_CPUS];
+ pvt->ops = &family_types[F19_M10H_CPUS].ops;
+ fam_type->ctl_name = "F19h_MA0h";
+ break;
}
fam_type = &family_types[F19_CPUS];
pvt->ops = &family_types[F19_CPUS].ops;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 85aa820bc165..352bda9803f6 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -96,7 +96,7 @@
/* Hardware limit on ChipSelect rows per MC and processors per system */
#define NUM_CHIPSELECTS 8
#define DRAM_RANGES 8
-#define NUM_CONTROLLERS 8
+#define NUM_CONTROLLERS 12
#define ON true
#define OFF false
@@ -126,6 +126,10 @@
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446
#define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650
#define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656
+#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F0 0x14ad
+#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F6 0x14b3
+#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F0 0x166a
+#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F6 0x1670
/*
* Function 1 - Address Map
@@ -298,6 +302,8 @@ enum amd_families {
F17_M60H_CPUS,
F17_M70H_CPUS,
F19_CPUS,
+ F19_M10H_CPUS,
+ F19_M50H_CPUS,
NUM_FAMILIES,
};
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 9f82ca295353..9d9aabdec96b 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -162,6 +162,8 @@ const char * const edac_mem_types[] = {
[MEM_LPDDR4] = "Low-Power-DDR4-RAM",
[MEM_LRDDR4] = "Load-Reduced-DDR4-RAM",
[MEM_DDR5] = "Unbuffered-DDR5",
+ [MEM_RDDR5] = "Registered-DDR5",
+ [MEM_LRDDR5] = "Load-Reduced-DDR5-RAM",
[MEM_NVDIMM] = "Non-volatile-RAM",
[MEM_WIO2] = "Wide-IO-2",
[MEM_HBM2] = "High-bandwidth-memory-Gen2",
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 67dbf4c31271..cc5c63feb26a 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -399,6 +399,63 @@ static const char * const smca_mp5_mce_desc[] = {
"Instruction Tag Cache Bank B ECC or parity error",
};
+static const char * const smca_mpdma_mce_desc[] = {
+ "Main SRAM [31:0] bank ECC or parity error",
+ "Main SRAM [63:32] bank ECC or parity error",
+ "Main SRAM [95:64] bank ECC or parity error",
+ "Main SRAM [127:96] bank ECC or parity error",
+ "Data Cache Bank A ECC or parity error",
+ "Data Cache Bank B ECC or parity error",
+ "Data Tag Cache Bank A ECC or parity error",
+ "Data Tag Cache Bank B ECC or parity error",
+ "Instruction Cache Bank A ECC or parity error",
+ "Instruction Cache Bank B ECC or parity error",
+ "Instruction Tag Cache Bank A ECC or parity error",
+ "Instruction Tag Cache Bank B ECC or parity error",
+ "Data Cache Bank A ECC or parity error",
+ "Data Cache Bank B ECC or parity error",
+ "Data Tag Cache Bank A ECC or parity error",
+ "Data Tag Cache Bank B ECC or parity error",
+ "Instruction Cache Bank A ECC or parity error",
+ "Instruction Cache Bank B ECC or parity error",
+ "Instruction Tag Cache Bank A ECC or parity error",
+ "Instruction Tag Cache Bank B ECC or parity error",
+ "Data Cache Bank A ECC or parity error",
+ "Data Cache Bank B ECC or parity error",
+ "Data Tag Cache Bank A ECC or parity error",
+ "Data Tag Cache Bank B ECC or parity error",
+ "Instruction Cache Bank A ECC or parity error",
+ "Instruction Cache Bank B ECC or parity error",
+ "Instruction Tag Cache Bank A ECC or parity error",
+ "Instruction Tag Cache Bank B ECC or parity error",
+ "System Hub Read Buffer ECC or parity error",
+ "MPDMA TVF DVSEC Memory ECC or parity error",
+ "MPDMA TVF MMIO Mailbox0 ECC or parity error",
+ "MPDMA TVF MMIO Mailbox1 ECC or parity error",
+ "MPDMA TVF Doorbell Memory ECC or parity error",
+ "MPDMA TVF SDP Slave Memory 0 ECC or parity error",
+ "MPDMA TVF SDP Slave Memory 1 ECC or parity error",
+ "MPDMA TVF SDP Slave Memory 2 ECC or parity error",
+ "MPDMA TVF SDP Master Memory 0 ECC or parity error",
+ "MPDMA TVF SDP Master Memory 1 ECC or parity error",
+ "MPDMA TVF SDP Master Memory 2 ECC or parity error",
+ "MPDMA TVF SDP Master Memory 3 ECC or parity error",
+ "MPDMA TVF SDP Master Memory 4 ECC or parity error",
+ "MPDMA TVF SDP Master Memory 5 ECC or parity error",
+ "MPDMA TVF SDP Master Memory 6 ECC or parity error",
+ "MPDMA PTE Command FIFO ECC or parity error",
+ "MPDMA PTE Hub Data FIFO ECC or parity error",
+ "MPDMA PTE Internal Data FIFO ECC or parity error",
+ "MPDMA PTE Command Memory DMA ECC or parity error",
+ "MPDMA PTE Command Memory Internal ECC or parity error",
+ "MPDMA PTE DMA Completion FIFO ECC or parity error",
+ "MPDMA PTE Tablewalk Completion FIFO ECC or parity error",
+ "MPDMA PTE Descriptor Completion FIFO ECC or parity error",
+ "MPDMA PTE ReadOnly Completion FIFO ECC or parity error",
+ "MPDMA PTE DirectWrite Completion FIFO ECC or parity error",
+ "SDP Watchdog Timer expired",
+};
+
static const char * const smca_nbio_mce_desc[] = {
"ECC or Parity error",
"PCIE error",
@@ -448,7 +505,7 @@ static const char * const smca_xgmipcs_mce_desc[] = {
"Rx Replay Timeout Error",
"LinkSub Tx Timeout Error",
"LinkSub Rx Timeout Error",
- "Rx CMD Pocket Error",
+ "Rx CMD Packet Error",
};
static const char * const smca_xgmiphy_mce_desc[] = {
@@ -458,11 +515,66 @@ static const char * const smca_xgmiphy_mce_desc[] = {
"PHY APB error",
};
-static const char * const smca_waflphy_mce_desc[] = {
- "RAM ECC Error",
- "ARC instruction buffer parity error",
- "ARC data buffer parity error",
- "PHY APB error",
+static const char * const smca_nbif_mce_desc[] = {
+ "Timeout error from GMI",
+ "SRAM ECC error",
+ "NTB Error Event",
+ "SDP Parity error",
+};
+
+static const char * const smca_sata_mce_desc[] = {
+ "Parity error for port 0",
+ "Parity error for port 1",
+ "Parity error for port 2",
+ "Parity error for port 3",
+ "Parity error for port 4",
+ "Parity error for port 5",
+ "Parity error for port 6",
+ "Parity error for port 7",
+};
+
+static const char * const smca_usb_mce_desc[] = {
+ "Parity error or ECC error for S0 RAM0",
+ "Parity error or ECC error for S0 RAM1",
+ "Parity error or ECC error for S0 RAM2",
+ "Parity error for PHY RAM0",
+ "Parity error for PHY RAM1",
+ "AXI Slave Response error",
+};
+
+static const char * const smca_gmipcs_mce_desc[] = {
+ "Data Loss Error",
+ "Training Error",
+ "Replay Parity Error",
+ "Rx Fifo Underflow Error",
+ "Rx Fifo Overflow Error",
+ "CRC Error",
+ "BER Exceeded Error",
+ "Tx Fifo Underflow Error",
+ "Replay Buffer Parity Error",
+ "Tx Overflow Error",
+ "Replay Fifo Overflow Error",
+ "Replay Fifo Underflow Error",
+ "Elastic Fifo Overflow Error",
+ "Deskew Error",
+ "Offline Error",
+ "Data Startup Limit Error",
+ "FC Init Timeout Error",
+ "Recovery Timeout Error",
+ "Ready Serial Timeout Error",
+ "Ready Serial Attempt Error",
+ "Recovery Attempt Error",
+ "Recovery Relock Attempt Error",
+ "Deskew Abort Error",
+ "Rx Buffer Error",
+ "Rx LFDS Fifo Overflow Error",
+ "Rx LFDS Fifo Underflow Error",
+ "LinkSub Tx Timeout Error",
+ "LinkSub Rx Timeout Error",
+ "Rx CMD Packet Error",
+ "LFDS Training Timeout Error",
+ "LFDS FC Init Timeout Error",
+ "Data Loss Error",
};
struct smca_mce_desc {
@@ -490,12 +602,21 @@ static struct smca_mce_desc smca_mce_descs[] = {
[SMCA_SMU] = { smca_smu_mce_desc, ARRAY_SIZE(smca_smu_mce_desc) },
[SMCA_SMU_V2] = { smca_smu2_mce_desc, ARRAY_SIZE(smca_smu2_mce_desc) },
[SMCA_MP5] = { smca_mp5_mce_desc, ARRAY_SIZE(smca_mp5_mce_desc) },
+ [SMCA_MPDMA] = { smca_mpdma_mce_desc, ARRAY_SIZE(smca_mpdma_mce_desc) },
[SMCA_NBIO] = { smca_nbio_mce_desc, ARRAY_SIZE(smca_nbio_mce_desc) },
[SMCA_PCIE] = { smca_pcie_mce_desc, ARRAY_SIZE(smca_pcie_mce_desc) },
[SMCA_PCIE_V2] = { smca_pcie2_mce_desc, ARRAY_SIZE(smca_pcie2_mce_desc) },
[SMCA_XGMI_PCS] = { smca_xgmipcs_mce_desc, ARRAY_SIZE(smca_xgmipcs_mce_desc) },
+ /* NBIF and SHUB have the same error descriptions, for now. */
+ [SMCA_NBIF] = { smca_nbif_mce_desc, ARRAY_SIZE(smca_nbif_mce_desc) },
+ [SMCA_SHUB] = { smca_nbif_mce_desc, ARRAY_SIZE(smca_nbif_mce_desc) },
+ [SMCA_SATA] = { smca_sata_mce_desc, ARRAY_SIZE(smca_sata_mce_desc) },
+ [SMCA_USB] = { smca_usb_mce_desc, ARRAY_SIZE(smca_usb_mce_desc) },
+ [SMCA_GMI_PCS] = { smca_gmipcs_mce_desc, ARRAY_SIZE(smca_gmipcs_mce_desc) },
+ /* All the PHY bank types have the same error descriptions, for now. */
[SMCA_XGMI_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) },
- [SMCA_WAFL_PHY] = { smca_waflphy_mce_desc, ARRAY_SIZE(smca_waflphy_mce_desc) },
+ [SMCA_WAFL_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) },
+ [SMCA_GMI_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) },
};
static bool f12h_mc0_mce(u16 ec, u8 xec)
@@ -1045,20 +1166,13 @@ static void decode_mc6_mce(struct mce *m)
/* Decode errors according to Scalable MCA specification */
static void decode_smca_error(struct mce *m)
{
- struct smca_hwid *hwid;
- enum smca_bank_types bank_type;
+ enum smca_bank_types bank_type = smca_get_bank_type(m->extcpu, m->bank);
const char *ip_name;
u8 xec = XEC(m->status, xec_mask);
- if (m->bank >= ARRAY_SIZE(smca_banks))
+ if (bank_type >= N_SMCA_BANK_TYPES)
return;
- hwid = smca_banks[m->bank].hwid;
- if (!hwid)
- return;
-
- bank_type = hwid->bank_type;
-
if (bank_type == SMCA_RESERVED) {
pr_emerg(HW_ERR "Bank %d is reserved.\n", m->bank);
return;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 1522d4aa2ca6..9678ab97c7ac 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -3439,7 +3439,7 @@ MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
static int sbridge_probe(const struct x86_cpu_id *id)
{
- int rc = -ENODEV;
+ int rc;
u8 mc, num_mc = 0;
struct sbridge_dev *sbridge_dev;
struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data;
diff --git a/drivers/edac/sifive_edac.c b/drivers/edac/sifive_edac.c
index 3a3dcb14ed99..ee800aec7d47 100644
--- a/drivers/edac/sifive_edac.c
+++ b/drivers/edac/sifive_edac.c
@@ -19,7 +19,7 @@ struct sifive_edac_priv {
struct edac_device_ctl_info *dci;
};
-/**
+/*
* EDAC error callback
*
* @event: non-zero if unrecoverable.
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index 7d08627e738b..f05ff02c0656 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -101,6 +101,7 @@
/* DDR ECC Quirks */
#define DDR_ECC_INTR_SUPPORT BIT(0)
#define DDR_ECC_DATA_POISON_SUPPORT BIT(1)
+#define DDR_ECC_INTR_SELF_CLEAR BIT(2)
/* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */
/* ECC Configuration Registers */
@@ -171,6 +172,10 @@
#define DDR_QOS_IRQ_EN_OFST 0x20208
#define DDR_QOS_IRQ_DB_OFST 0x2020C
+/* DDR QOS Interrupt register definitions */
+#define DDR_UE_MASK BIT(9)
+#define DDR_CE_MASK BIT(8)
+
/* ECC Corrected Error Register Mask and Shifts*/
#define ECC_CEADDR0_RW_MASK 0x3FFFF
#define ECC_CEADDR0_RNK_MASK BIT(24)
@@ -533,10 +538,16 @@ static irqreturn_t intr_handler(int irq, void *dev_id)
priv = mci->pvt_info;
p_data = priv->p_data;
- regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
- regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK);
- if (!(regval & ECC_CE_UE_INTR_MASK))
- return IRQ_NONE;
+ /*
+ * v3.0 of the controller has the ce/ue bits cleared automatically,
+ * so this condition does not apply.
+ */
+ if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
+ regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
+ regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK);
+ if (!(regval & ECC_CE_UE_INTR_MASK))
+ return IRQ_NONE;
+ }
status = p_data->get_error_info(priv);
if (status)
@@ -548,7 +559,9 @@ static irqreturn_t intr_handler(int irq, void *dev_id)
edac_dbg(3, "Total error count CE %d UE %d\n",
priv->ce_cnt, priv->ue_cnt);
- writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
+ /* v3.0 of the controller does not have this register */
+ if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
+ writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
return IRQ_HANDLED;
}
@@ -834,8 +847,13 @@ static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
static void enable_intr(struct synps_edac_priv *priv)
{
/* Enable UE/CE Interrupts */
- writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
- priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
+ if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
+ writel(DDR_UE_MASK | DDR_CE_MASK,
+ priv->baseaddr + ECC_CLR_OFST);
+ else
+ writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
+ priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
+
}
static void disable_intr(struct synps_edac_priv *priv)
@@ -890,6 +908,19 @@ static const struct synps_platform_data zynqmp_edac_def = {
),
};
+static const struct synps_platform_data synopsys_edac_def = {
+ .get_error_info = zynqmp_get_error_info,
+ .get_mtype = zynqmp_get_mtype,
+ .get_dtype = zynqmp_get_dtype,
+ .get_ecc_state = zynqmp_get_ecc_state,
+ .quirks = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR
+#ifdef CONFIG_EDAC_DEBUG
+ | DDR_ECC_DATA_POISON_SUPPORT
+#endif
+ ),
+};
+
+
static const struct of_device_id synps_edac_match[] = {
{
.compatible = "xlnx,zynq-ddrc-a05",
@@ -900,6 +931,10 @@ static const struct of_device_id synps_edac_match[] = {
.data = (void *)&zynqmp_edac_def
},
{
+ .compatible = "snps,ddrc-3.80a",
+ .data = (void *)&synopsys_edac_def
+ },
+ {
/* end of table */
}
};
@@ -1352,8 +1387,7 @@ static int mc_probe(struct platform_device *pdev)
}
}
- if (of_device_is_compatible(pdev->dev.of_node,
- "xlnx,zynqmp-ddrc-2.40a"))
+ if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
setup_address_map(priv);
#endif
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index 0cb440bdd5cb..f2b65d967384 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* drivers/extcon/extcon-usb-gpio.c - USB GPIO extcon driver
*
* Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index e7a9561a826d..a09e704fd0fa 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -576,19 +576,7 @@ EXPORT_SYMBOL_GPL(extcon_set_state);
*/
int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id, bool state)
{
- int ret, index;
- unsigned long flags;
-
- index = find_cable_index_by_id(edev, id);
- if (index < 0)
- return index;
-
- /* Check whether the external connector's state is changed. */
- spin_lock_irqsave(&edev->lock, flags);
- ret = is_extcon_changed(edev, index, state);
- spin_unlock_irqrestore(&edev->lock, flags);
- if (!ret)
- return 0;
+ int ret;
ret = extcon_set_state(edev, id, state);
if (ret < 0)
diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c
index 87039c5c03fd..eefcc4146749 100644
--- a/drivers/firmware/arm_scmi/virtio.c
+++ b/drivers/firmware/arm_scmi/virtio.c
@@ -452,7 +452,7 @@ static void scmi_vio_remove(struct virtio_device *vdev)
* outstanding message on any vqueue to be ignored by complete_cb: now
* we can just stop processing buffers and destroy the vqueues.
*/
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
/* Ensure scmi_vdev is visible as NULL */
smp_store_mb(scmi_vdev, NULL);
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 948dd8382686..e48108e694f8 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -12,16 +12,10 @@
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/firmware.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/workqueue.h>
#include <linux/firmware/cirrus/cs_dsp.h>
#include <linux/firmware/cirrus/wmfw.h>
@@ -622,7 +616,8 @@ static void cs_dsp_halo_show_fw_status(struct cs_dsp *dsp)
offs[0], offs[1], offs[2], offs[3]);
}
-static int cs_dsp_coeff_base_reg(struct cs_dsp_coeff_ctl *ctl, unsigned int *reg)
+static int cs_dsp_coeff_base_reg(struct cs_dsp_coeff_ctl *ctl, unsigned int *reg,
+ unsigned int off)
{
const struct cs_dsp_alg_region *alg_region = &ctl->alg_region;
struct cs_dsp *dsp = ctl->dsp;
@@ -635,7 +630,7 @@ static int cs_dsp_coeff_base_reg(struct cs_dsp_coeff_ctl *ctl, unsigned int *reg
return -EINVAL;
}
- *reg = dsp->ops->region_to_reg(mem, ctl->alg_region.base + ctl->offset);
+ *reg = dsp->ops->region_to_reg(mem, ctl->alg_region.base + ctl->offset + off);
return 0;
}
@@ -659,10 +654,12 @@ int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int
unsigned int reg;
int i, ret;
+ lockdep_assert_held(&dsp->pwr_lock);
+
if (!dsp->running)
return -EPERM;
- ret = cs_dsp_coeff_base_reg(ctl, &reg);
+ ret = cs_dsp_coeff_base_reg(ctl, &reg, 0);
if (ret)
return ret;
@@ -716,14 +713,14 @@ int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int
EXPORT_SYMBOL_GPL(cs_dsp_coeff_write_acked_control);
static int cs_dsp_coeff_write_ctrl_raw(struct cs_dsp_coeff_ctl *ctl,
- const void *buf, size_t len)
+ unsigned int off, const void *buf, size_t len)
{
struct cs_dsp *dsp = ctl->dsp;
void *scratch;
int ret;
unsigned int reg;
- ret = cs_dsp_coeff_base_reg(ctl, &reg);
+ ret = cs_dsp_coeff_base_reg(ctl, &reg, off);
if (ret)
return ret;
@@ -749,38 +746,49 @@ static int cs_dsp_coeff_write_ctrl_raw(struct cs_dsp_coeff_ctl *ctl,
/**
* cs_dsp_coeff_write_ctrl() - Writes the given buffer to the given coefficient control
* @ctl: pointer to coefficient control
+ * @off: word offset at which data should be written
* @buf: the buffer to write to the given control
- * @len: the length of the buffer
+ * @len: the length of the buffer in bytes
*
* Must be called with pwr_lock held.
*
* Return: Zero for success, a negative number on error.
*/
-int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, const void *buf, size_t len)
+int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl,
+ unsigned int off, const void *buf, size_t len)
{
int ret = 0;
+ if (!ctl)
+ return -ENOENT;
+
+ lockdep_assert_held(&ctl->dsp->pwr_lock);
+
+ if (len + off * sizeof(u32) > ctl->len)
+ return -EINVAL;
+
if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
ret = -EPERM;
else if (buf != ctl->cache)
- memcpy(ctl->cache, buf, len);
+ memcpy(ctl->cache + off * sizeof(u32), buf, len);
ctl->set = 1;
if (ctl->enabled && ctl->dsp->running)
- ret = cs_dsp_coeff_write_ctrl_raw(ctl, buf, len);
+ ret = cs_dsp_coeff_write_ctrl_raw(ctl, off, buf, len);
return ret;
}
EXPORT_SYMBOL_GPL(cs_dsp_coeff_write_ctrl);
-static int cs_dsp_coeff_read_ctrl_raw(struct cs_dsp_coeff_ctl *ctl, void *buf, size_t len)
+static int cs_dsp_coeff_read_ctrl_raw(struct cs_dsp_coeff_ctl *ctl,
+ unsigned int off, void *buf, size_t len)
{
struct cs_dsp *dsp = ctl->dsp;
void *scratch;
int ret;
unsigned int reg;
- ret = cs_dsp_coeff_base_reg(ctl, &reg);
+ ret = cs_dsp_coeff_base_reg(ctl, &reg, off);
if (ret)
return ret;
@@ -806,28 +814,38 @@ static int cs_dsp_coeff_read_ctrl_raw(struct cs_dsp_coeff_ctl *ctl, void *buf, s
/**
* cs_dsp_coeff_read_ctrl() - Reads the given coefficient control into the given buffer
* @ctl: pointer to coefficient control
+ * @off: word offset at which data should be read
* @buf: the buffer to store to the given control
- * @len: the length of the buffer
+ * @len: the length of the buffer in bytes
*
* Must be called with pwr_lock held.
*
* Return: Zero for success, a negative number on error.
*/
-int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl, void *buf, size_t len)
+int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl,
+ unsigned int off, void *buf, size_t len)
{
int ret = 0;
+ if (!ctl)
+ return -ENOENT;
+
+ lockdep_assert_held(&ctl->dsp->pwr_lock);
+
+ if (len + off * sizeof(u32) > ctl->len)
+ return -EINVAL;
+
if (ctl->flags & WMFW_CTL_FLAG_VOLATILE) {
if (ctl->enabled && ctl->dsp->running)
- return cs_dsp_coeff_read_ctrl_raw(ctl, buf, len);
+ return cs_dsp_coeff_read_ctrl_raw(ctl, off, buf, len);
else
return -EPERM;
} else {
if (!ctl->flags && ctl->enabled && ctl->dsp->running)
- ret = cs_dsp_coeff_read_ctrl_raw(ctl, ctl->cache, ctl->len);
+ ret = cs_dsp_coeff_read_ctrl_raw(ctl, 0, ctl->cache, ctl->len);
if (buf != ctl->cache)
- memcpy(buf, ctl->cache, len);
+ memcpy(buf, ctl->cache + off * sizeof(u32), len);
}
return ret;
@@ -851,7 +869,7 @@ static int cs_dsp_coeff_init_control_caches(struct cs_dsp *dsp)
* created so we don't need to do anything.
*/
if (!ctl->flags || (ctl->flags & WMFW_CTL_FLAG_READABLE)) {
- ret = cs_dsp_coeff_read_ctrl_raw(ctl, ctl->cache, ctl->len);
+ ret = cs_dsp_coeff_read_ctrl_raw(ctl, 0, ctl->cache, ctl->len);
if (ret < 0)
return ret;
}
@@ -869,7 +887,7 @@ static int cs_dsp_coeff_sync_controls(struct cs_dsp *dsp)
if (!ctl->enabled)
continue;
if (ctl->set && !(ctl->flags & WMFW_CTL_FLAG_VOLATILE)) {
- ret = cs_dsp_coeff_write_ctrl_raw(ctl, ctl->cache,
+ ret = cs_dsp_coeff_write_ctrl_raw(ctl, 0, ctl->cache,
ctl->len);
if (ret < 0)
return ret;
@@ -1159,6 +1177,7 @@ static int cs_dsp_parse_coeff(struct cs_dsp *dsp,
return -EINVAL;
break;
case WMFW_CTL_TYPE_HOSTEVENT:
+ case WMFW_CTL_TYPE_FWEVENT:
ret = cs_dsp_check_coeff_flags(dsp, &coeff_blk,
WMFW_CTL_FLAG_SYS |
WMFW_CTL_FLAG_VOLATILE |
@@ -1459,6 +1478,8 @@ struct cs_dsp_coeff_ctl *cs_dsp_get_ctl(struct cs_dsp *dsp, const char *name, in
{
struct cs_dsp_coeff_ctl *pos, *rslt = NULL;
+ lockdep_assert_held(&dsp->pwr_lock);
+
list_for_each_entry(pos, &dsp->ctl_list, list) {
if (!pos->subname)
continue;
@@ -1554,6 +1575,8 @@ struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp,
{
struct cs_dsp_alg_region *alg_region;
+ lockdep_assert_held(&dsp->pwr_lock);
+
list_for_each_entry(alg_region, &dsp->alg_regions, list) {
if (id == alg_region->alg && type == alg_region->type)
return alg_region;
@@ -1565,7 +1588,7 @@ EXPORT_SYMBOL_GPL(cs_dsp_find_alg_region);
static struct cs_dsp_alg_region *cs_dsp_create_region(struct cs_dsp *dsp,
int type, __be32 id,
- __be32 base)
+ __be32 ver, __be32 base)
{
struct cs_dsp_alg_region *alg_region;
@@ -1575,6 +1598,7 @@ static struct cs_dsp_alg_region *cs_dsp_create_region(struct cs_dsp *dsp,
alg_region->type = type;
alg_region->alg = be32_to_cpu(id);
+ alg_region->ver = be32_to_cpu(ver);
alg_region->base = be32_to_cpu(base);
list_add_tail(&alg_region->list, &dsp->alg_regions);
@@ -1624,14 +1648,14 @@ static void cs_dsp_parse_wmfw_v3_id_header(struct cs_dsp *dsp,
nalgs);
}
-static int cs_dsp_create_regions(struct cs_dsp *dsp, __be32 id, int nregions,
- const int *type, __be32 *base)
+static int cs_dsp_create_regions(struct cs_dsp *dsp, __be32 id, __be32 ver,
+ int nregions, const int *type, __be32 *base)
{
struct cs_dsp_alg_region *alg_region;
int i;
for (i = 0; i < nregions; i++) {
- alg_region = cs_dsp_create_region(dsp, type[i], id, base[i]);
+ alg_region = cs_dsp_create_region(dsp, type[i], id, ver, base[i]);
if (IS_ERR(alg_region))
return PTR_ERR(alg_region);
}
@@ -1666,12 +1690,14 @@ static int cs_dsp_adsp1_setup_algs(struct cs_dsp *dsp)
cs_dsp_parse_wmfw_id_header(dsp, &adsp1_id.fw, n_algs);
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_ZM,
- adsp1_id.fw.id, adsp1_id.zm);
+ adsp1_id.fw.id, adsp1_id.fw.ver,
+ adsp1_id.zm);
if (IS_ERR(alg_region))
return PTR_ERR(alg_region);
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_DM,
- adsp1_id.fw.id, adsp1_id.dm);
+ adsp1_id.fw.id, adsp1_id.fw.ver,
+ adsp1_id.dm);
if (IS_ERR(alg_region))
return PTR_ERR(alg_region);
@@ -1694,6 +1720,7 @@ static int cs_dsp_adsp1_setup_algs(struct cs_dsp *dsp)
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_DM,
adsp1_alg[i].alg.id,
+ adsp1_alg[i].alg.ver,
adsp1_alg[i].dm);
if (IS_ERR(alg_region)) {
ret = PTR_ERR(alg_region);
@@ -1715,6 +1742,7 @@ static int cs_dsp_adsp1_setup_algs(struct cs_dsp *dsp)
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_ZM,
adsp1_alg[i].alg.id,
+ adsp1_alg[i].alg.ver,
adsp1_alg[i].zm);
if (IS_ERR(alg_region)) {
ret = PTR_ERR(alg_region);
@@ -1767,17 +1795,20 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp)
cs_dsp_parse_wmfw_id_header(dsp, &adsp2_id.fw, n_algs);
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_XM,
- adsp2_id.fw.id, adsp2_id.xm);
+ adsp2_id.fw.id, adsp2_id.fw.ver,
+ adsp2_id.xm);
if (IS_ERR(alg_region))
return PTR_ERR(alg_region);
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_YM,
- adsp2_id.fw.id, adsp2_id.ym);
+ adsp2_id.fw.id, adsp2_id.fw.ver,
+ adsp2_id.ym);
if (IS_ERR(alg_region))
return PTR_ERR(alg_region);
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_ZM,
- adsp2_id.fw.id, adsp2_id.zm);
+ adsp2_id.fw.id, adsp2_id.fw.ver,
+ adsp2_id.zm);
if (IS_ERR(alg_region))
return PTR_ERR(alg_region);
@@ -1802,6 +1833,7 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp)
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_XM,
adsp2_alg[i].alg.id,
+ adsp2_alg[i].alg.ver,
adsp2_alg[i].xm);
if (IS_ERR(alg_region)) {
ret = PTR_ERR(alg_region);
@@ -1823,6 +1855,7 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp)
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_YM,
adsp2_alg[i].alg.id,
+ adsp2_alg[i].alg.ver,
adsp2_alg[i].ym);
if (IS_ERR(alg_region)) {
ret = PTR_ERR(alg_region);
@@ -1844,6 +1877,7 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp)
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_ZM,
adsp2_alg[i].alg.id,
+ adsp2_alg[i].alg.ver,
adsp2_alg[i].zm);
if (IS_ERR(alg_region)) {
ret = PTR_ERR(alg_region);
@@ -1869,7 +1903,7 @@ out:
return ret;
}
-static int cs_dsp_halo_create_regions(struct cs_dsp *dsp, __be32 id,
+static int cs_dsp_halo_create_regions(struct cs_dsp *dsp, __be32 id, __be32 ver,
__be32 xm_base, __be32 ym_base)
{
static const int types[] = {
@@ -1878,7 +1912,7 @@ static int cs_dsp_halo_create_regions(struct cs_dsp *dsp, __be32 id,
};
__be32 bases[] = { xm_base, xm_base, ym_base, ym_base };
- return cs_dsp_create_regions(dsp, id, ARRAY_SIZE(types), types, bases);
+ return cs_dsp_create_regions(dsp, id, ver, ARRAY_SIZE(types), types, bases);
}
static int cs_dsp_halo_setup_algs(struct cs_dsp *dsp)
@@ -1906,7 +1940,7 @@ static int cs_dsp_halo_setup_algs(struct cs_dsp *dsp)
cs_dsp_parse_wmfw_v3_id_header(dsp, &halo_id.fw, n_algs);
- ret = cs_dsp_halo_create_regions(dsp, halo_id.fw.id,
+ ret = cs_dsp_halo_create_regions(dsp, halo_id.fw.id, halo_id.fw.ver,
halo_id.xm_base, halo_id.ym_base);
if (ret)
return ret;
@@ -1930,6 +1964,7 @@ static int cs_dsp_halo_setup_algs(struct cs_dsp *dsp)
be32_to_cpu(halo_alg[i].ym_base));
ret = cs_dsp_halo_create_regions(dsp, halo_alg[i].alg.id,
+ halo_alg[i].alg.ver,
halo_alg[i].xm_base,
halo_alg[i].ym_base);
if (ret)
@@ -1951,7 +1986,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
const struct cs_dsp_region *mem;
struct cs_dsp_alg_region *alg_region;
const char *region_name;
- int ret, pos, blocks, type, offset, reg;
+ int ret, pos, blocks, type, offset, reg, version;
+ char *text = NULL;
struct cs_dsp_buf *buf;
if (!firmware)
@@ -1973,6 +2009,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
switch (be32_to_cpu(hdr->rev) & 0xff) {
case 1:
+ case 2:
break;
default:
cs_dsp_err(dsp, "%s: Unsupported coefficient file format %d\n",
@@ -1995,6 +2032,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
type = le16_to_cpu(blk->type);
offset = le16_to_cpu(blk->offset);
+ version = le32_to_cpu(blk->ver) >> 8;
cs_dsp_dbg(dsp, "%s.%d: %x v%d.%d.%d\n",
file, blocks, le32_to_cpu(blk->id),
@@ -2008,6 +2046,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
region_name = "Unknown";
switch (type) {
case (WMFW_NAME_TEXT << 8):
+ text = kzalloc(le32_to_cpu(blk->len) + 1, GFP_KERNEL);
+ break;
case (WMFW_INFO_TEXT << 8):
case (WMFW_METADATA << 8):
break;
@@ -2052,6 +2092,16 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
alg_region = cs_dsp_find_alg_region(dsp, type,
le32_to_cpu(blk->id));
if (alg_region) {
+ if (version != alg_region->ver)
+ cs_dsp_warn(dsp,
+ "Algorithm coefficient version %d.%d.%d but expected %d.%d.%d\n",
+ (version >> 16) & 0xFF,
+ (version >> 8) & 0xFF,
+ version & 0xFF,
+ (alg_region->ver >> 16) & 0xFF,
+ (alg_region->ver >> 8) & 0xFF,
+ alg_region->ver & 0xFF);
+
reg = alg_region->base;
reg = dsp->ops->region_to_reg(mem, reg);
reg += offset;
@@ -2067,6 +2117,13 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
break;
}
+ if (text) {
+ memcpy(text, blk->data, le32_to_cpu(blk->len));
+ cs_dsp_info(dsp, "%s: %s\n", dsp->fw_name, text);
+ kfree(text);
+ text = NULL;
+ }
+
if (reg) {
if (le32_to_cpu(blk->len) >
firmware->size - pos - sizeof(*blk)) {
@@ -2117,6 +2174,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
out_fw:
regmap_async_complete(regmap);
cs_dsp_buf_free(&buf_list);
+ kfree(text);
return ret;
}
@@ -2600,6 +2658,12 @@ int cs_dsp_run(struct cs_dsp *dsp)
goto err;
}
+ if (dsp->client_ops->pre_run) {
+ ret = dsp->client_ops->pre_run(dsp);
+ if (ret)
+ goto err;
+ }
+
/* Sync set controls */
ret = cs_dsp_coeff_sync_controls(dsp);
if (ret != 0)
@@ -2680,10 +2744,16 @@ EXPORT_SYMBOL_GPL(cs_dsp_stop);
static int cs_dsp_halo_start_core(struct cs_dsp *dsp)
{
- return regmap_update_bits(dsp->regmap,
- dsp->base + HALO_CCM_CORE_CONTROL,
- HALO_CORE_RESET | HALO_CORE_EN,
- HALO_CORE_RESET | HALO_CORE_EN);
+ int ret;
+
+ ret = regmap_update_bits(dsp->regmap, dsp->base + HALO_CCM_CORE_CONTROL,
+ HALO_CORE_RESET | HALO_CORE_EN,
+ HALO_CORE_RESET | HALO_CORE_EN);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(dsp->regmap, dsp->base + HALO_CCM_CORE_CONTROL,
+ HALO_CORE_RESET, 0);
}
static void cs_dsp_halo_stop_core(struct cs_dsp *dsp)
@@ -2789,6 +2859,8 @@ int cs_dsp_read_raw_data_block(struct cs_dsp *dsp, int mem_type, unsigned int me
unsigned int reg;
int ret;
+ lockdep_assert_held(&dsp->pwr_lock);
+
if (!mem)
return -EINVAL;
@@ -2842,6 +2914,8 @@ int cs_dsp_write_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_ad
__be32 val = cpu_to_be32(data & 0x00ffffffu);
unsigned int reg;
+ lockdep_assert_held(&dsp->pwr_lock);
+
if (!mem)
return -EINVAL;
diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
index 8b8127fa8955..3a353776bd34 100644
--- a/drivers/firmware/dmi-sysfs.c
+++ b/drivers/firmware/dmi-sysfs.c
@@ -302,12 +302,12 @@ static struct attribute *dmi_sysfs_sel_attrs[] = {
&dmi_sysfs_attr_sel_per_log_type_descriptor_length.attr,
NULL,
};
-
+ATTRIBUTE_GROUPS(dmi_sysfs_sel);
static struct kobj_type dmi_system_event_log_ktype = {
.release = dmi_entry_free,
.sysfs_ops = &dmi_sysfs_specialize_attr_ops,
- .default_attrs = dmi_sysfs_sel_attrs,
+ .default_groups = dmi_sysfs_sel_groups,
};
typedef u8 (*sel_io_reader)(const struct dmi_system_event_log *sel,
@@ -518,6 +518,7 @@ static struct attribute *dmi_sysfs_entry_attrs[] = {
&dmi_sysfs_attr_entry_position.attr,
NULL,
};
+ATTRIBUTE_GROUPS(dmi_sysfs_entry);
static ssize_t dmi_entry_raw_read_helper(struct dmi_sysfs_entry *entry,
const struct dmi_header *dh,
@@ -565,7 +566,7 @@ static void dmi_sysfs_entry_release(struct kobject *kobj)
static struct kobj_type dmi_sysfs_entry_ktype = {
.release = dmi_sysfs_entry_release,
.sysfs_ops = &dmi_sysfs_attr_ops,
- .default_attrs = dmi_sysfs_entry_attrs,
+ .default_groups = dmi_sysfs_entry_groups,
};
static struct kset *dmi_kset;
diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
index 14d0970a7198..69353dd0ea22 100644
--- a/drivers/firmware/edd.c
+++ b/drivers/firmware/edd.c
@@ -574,14 +574,6 @@ static EDD_DEVICE_ATTR(interface, 0444, edd_show_interface, edd_has_edd30);
static EDD_DEVICE_ATTR(host_bus, 0444, edd_show_host_bus, edd_has_edd30);
static EDD_DEVICE_ATTR(mbr_signature, 0444, edd_show_mbr_signature, edd_has_mbr_signature);
-
-/* These are default attributes that are added for every edd
- * device discovered. There are none.
- */
-static struct attribute * def_attrs[] = {
- NULL,
-};
-
/* These attributes are conditional and only added for some devices. */
static struct edd_attribute * edd_attrs[] = {
&edd_attr_raw_data,
@@ -619,7 +611,6 @@ static void edd_release(struct kobject * kobj)
static struct kobj_type edd_ktype = {
.release = edd_release,
.sysfs_ops = &edd_attr_ops,
- .default_attrs = def_attrs,
};
static struct kset *edd_kset;
diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
index b19ce1a83f91..b2c829e95bd1 100644
--- a/drivers/firmware/efi/efi-init.c
+++ b/drivers/firmware/efi/efi-init.c
@@ -235,6 +235,11 @@ void __init efi_init(void)
}
reserve_regions();
+ /*
+ * For memblock manipulation, the cap should come after the memblock_add().
+ * And now, memblock is fully populated, it is time to do capping.
+ */
+ early_init_dt_check_for_usable_mem_range();
efi_esrt_init();
efi_mokvar_table_init();
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index ae79c3300129..7de3f5b6e8d0 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -722,6 +722,13 @@ void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
systab_hdr->revision >> 16,
systab_hdr->revision & 0xffff,
vendor);
+
+ if (IS_ENABLED(CONFIG_X86_64) &&
+ systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
+ !strcmp(vendor, "Apple")) {
+ pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
+ efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
+ }
}
static __initdata char memory_type_name[][13] = {
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index e6b16b3a17a8..ea0bc39dc965 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -352,11 +352,12 @@ static struct attribute *def_attrs[] = {
&efivar_attr_raw_var.attr,
NULL,
};
+ATTRIBUTE_GROUPS(def);
static struct kobj_type efivar_ktype = {
.release = efivar_release,
.sysfs_ops = &efivar_attr_ops,
- .default_attrs = def_attrs,
+ .default_groups = def_groups,
};
static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index d5915272141f..2a2f52b017e7 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -146,6 +146,8 @@ static struct attribute *esre1_attrs[] = {
&esre_last_attempt_status.attr,
NULL
};
+ATTRIBUTE_GROUPS(esre1);
+
static void esre_release(struct kobject *kobj)
{
struct esre_entry *entry = to_entry(kobj);
@@ -157,7 +159,7 @@ static void esre_release(struct kobject *kobj)
static struct kobj_type esre1_ktype = {
.release = esre_release,
.sysfs_ops = &esre_attr_ops,
- .default_attrs = esre1_attrs,
+ .default_groups = esre1_groups,
};
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 2363fee9211c..9cc556013d08 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -119,9 +119,9 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
if (image->image_base != _text)
efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
- if (!IS_ALIGNED((u64)_text, EFI_KIMG_ALIGN))
- efi_err("FIRMWARE BUG: kernel image not aligned on %ldk boundary\n",
- EFI_KIMG_ALIGN >> 10);
+ if (!IS_ALIGNED((u64)_text, SEGMENT_ALIGN))
+ efi_err("FIRMWARE BUG: kernel image not aligned on %dk boundary\n",
+ SEGMENT_ALIGN >> 10);
kernel_size = _edata - _text;
kernel_memsize = kernel_size + (_end - _edata);
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index d489bdc645fe..3d972061c1b0 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -20,10 +20,10 @@
bool efi_nochunk;
bool efi_nokaslr = !IS_ENABLED(CONFIG_RANDOMIZE_BASE);
-bool efi_noinitrd;
int efi_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
bool efi_novamap;
+static bool efi_noinitrd;
static bool efi_nosoftreserve;
static bool efi_disable_pci_dma = IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA);
@@ -625,6 +625,47 @@ efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image,
load_addr, load_size);
}
+static const struct {
+ efi_tcg2_event_t event_data;
+ efi_tcg2_tagged_event_t tagged_event;
+ u8 tagged_event_data[];
+} initrd_tcg2_event = {
+ {
+ sizeof(initrd_tcg2_event) + sizeof("Linux initrd"),
+ {
+ sizeof(initrd_tcg2_event.event_data.event_header),
+ EFI_TCG2_EVENT_HEADER_VERSION,
+ 9,
+ EV_EVENT_TAG,
+ },
+ },
+ {
+ INITRD_EVENT_TAG_ID,
+ sizeof("Linux initrd"),
+ },
+ { "Linux initrd" },
+};
+
+static void efi_measure_initrd(unsigned long load_addr, unsigned long load_size)
+{
+ efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
+ efi_tcg2_protocol_t *tcg2 = NULL;
+ efi_status_t status;
+
+ efi_bs_call(locate_protocol, &tcg2_guid, NULL, (void **)&tcg2);
+ if (tcg2) {
+ status = efi_call_proto(tcg2, hash_log_extend_event,
+ 0, load_addr, load_size,
+ &initrd_tcg2_event.event_data);
+ if (status != EFI_SUCCESS)
+ efi_warn("Failed to measure initrd data: 0x%lx\n",
+ status);
+ else
+ efi_info("Measured initrd data into PCR %d\n",
+ initrd_tcg2_event.event_data.event_header.pcr_index);
+ }
+}
+
/**
* efi_load_initrd() - Load initial RAM disk
* @image: EFI loaded image protocol
@@ -643,17 +684,25 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image,
{
efi_status_t status;
- if (!load_addr || !load_size)
- return EFI_INVALID_PARAMETER;
-
- status = efi_load_initrd_dev_path(load_addr, load_size, hard_limit);
- if (status == EFI_SUCCESS) {
- efi_info("Loaded initrd from LINUX_EFI_INITRD_MEDIA_GUID device path\n");
- } else if (status == EFI_NOT_FOUND) {
- status = efi_load_initrd_cmdline(image, load_addr, load_size,
- soft_limit, hard_limit);
- if (status == EFI_SUCCESS && *load_size > 0)
- efi_info("Loaded initrd from command line option\n");
+ if (efi_noinitrd) {
+ *load_addr = *load_size = 0;
+ status = EFI_SUCCESS;
+ } else {
+ status = efi_load_initrd_dev_path(load_addr, load_size, hard_limit);
+ if (status == EFI_SUCCESS) {
+ efi_info("Loaded initrd from LINUX_EFI_INITRD_MEDIA_GUID device path\n");
+ if (*load_size > 0)
+ efi_measure_initrd(*load_addr, *load_size);
+ } else if (status == EFI_NOT_FOUND) {
+ status = efi_load_initrd_cmdline(image, load_addr, load_size,
+ soft_limit, hard_limit);
+ if (status == EFI_SUCCESS && *load_size > 0)
+ efi_info("Loaded initrd from command line option\n");
+ }
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to load initrd: 0x%lx\n", status);
+ *load_addr = *load_size = 0;
+ }
}
return status;
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
index 26e69788f27a..da93864d7abc 100644
--- a/drivers/firmware/efi/libstub/efi-stub.c
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -40,6 +40,8 @@
#ifdef CONFIG_ARM64
# define EFI_RT_VIRTUAL_LIMIT DEFAULT_MAP_WINDOW_64
+#elif defined(CONFIG_RISCV)
+# define EFI_RT_VIRTUAL_LIMIT TASK_SIZE_MIN
#else
# define EFI_RT_VIRTUAL_LIMIT TASK_SIZE
#endif
@@ -134,7 +136,6 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
enum efi_secureboot_mode secure_boot;
struct screen_info *si;
efi_properties_table_t *prop_tbl;
- unsigned long max_addr;
efi_system_table = sys_table_arg;
@@ -240,13 +241,8 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
if (!fdt_addr)
efi_info("Generating empty DTB\n");
- if (!efi_noinitrd) {
- max_addr = efi_get_max_initrd_addr(image_addr);
- status = efi_load_initrd(image, &initrd_addr, &initrd_size,
- ULONG_MAX, max_addr);
- if (status != EFI_SUCCESS)
- efi_err("Failed to load initrd!\n");
- }
+ efi_load_initrd(image, &initrd_addr, &initrd_size, ULONG_MAX,
+ efi_get_max_initrd_addr(image_addr));
efi_random_get_seed();
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index cde0a2ef507d..edb77b0621ea 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -31,7 +31,6 @@
extern bool efi_nochunk;
extern bool efi_nokaslr;
-extern bool efi_noinitrd;
extern int efi_loglevel;
extern bool efi_novamap;
@@ -667,6 +666,29 @@ union apple_properties_protocol {
typedef u32 efi_tcg2_event_log_format;
+#define INITRD_EVENT_TAG_ID 0x8F3B22ECU
+#define EV_EVENT_TAG 0x00000006U
+#define EFI_TCG2_EVENT_HEADER_VERSION 0x1
+
+struct efi_tcg2_event {
+ u32 event_size;
+ struct {
+ u32 header_size;
+ u16 header_version;
+ u32 pcr_index;
+ u32 event_type;
+ } __packed event_header;
+ /* u8[] event follows here */
+} __packed;
+
+struct efi_tcg2_tagged_event {
+ u32 tagged_event_id;
+ u32 tagged_event_data_size;
+ /* u8 tagged event data follows here */
+} __packed;
+
+typedef struct efi_tcg2_event efi_tcg2_event_t;
+typedef struct efi_tcg2_tagged_event efi_tcg2_tagged_event_t;
typedef union efi_tcg2_protocol efi_tcg2_protocol_t;
union efi_tcg2_protocol {
@@ -677,7 +699,11 @@ union efi_tcg2_protocol {
efi_physical_addr_t *,
efi_physical_addr_t *,
efi_bool_t *);
- void *hash_log_extend_event;
+ efi_status_t (__efiapi *hash_log_extend_event)(efi_tcg2_protocol_t *,
+ u64,
+ efi_physical_addr_t,
+ u64,
+ const efi_tcg2_event_t *);
void *submit_command;
void *get_active_pcr_banks;
void *set_active_pcr_banks;
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index f14c4ff5839f..01ddd4502e28 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -673,6 +673,7 @@ unsigned long efi_main(efi_handle_t handle,
unsigned long bzimage_addr = (unsigned long)startup_32;
unsigned long buffer_start, buffer_end;
struct setup_header *hdr = &boot_params->hdr;
+ unsigned long addr, size;
efi_status_t status;
efi_system_table = sys_table_arg;
@@ -761,22 +762,15 @@ unsigned long efi_main(efi_handle_t handle,
* arguments will be processed only if image is not NULL, which will be
* the case only if we were loaded via the PE entry point.
*/
- if (!efi_noinitrd) {
- unsigned long addr, size;
-
- status = efi_load_initrd(image, &addr, &size,
- hdr->initrd_addr_max, ULONG_MAX);
-
- if (status != EFI_SUCCESS) {
- efi_err("Failed to load initrd!\n");
- goto fail;
- }
- if (size > 0) {
- efi_set_u64_split(addr, &hdr->ramdisk_image,
- &boot_params->ext_ramdisk_image);
- efi_set_u64_split(size, &hdr->ramdisk_size,
- &boot_params->ext_ramdisk_size);
- }
+ status = efi_load_initrd(image, &addr, &size, hdr->initrd_addr_max,
+ ULONG_MAX);
+ if (status != EFI_SUCCESS)
+ goto fail;
+ if (size > 0) {
+ efi_set_u64_split(addr, &hdr->ramdisk_image,
+ &boot_params->ext_ramdisk_image);
+ efi_set_u64_split(size, &hdr->ramdisk_size,
+ &boot_params->ext_ramdisk_size);
}
/*
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
index ad9ddefc9dcb..92a3d45a795c 100644
--- a/drivers/firmware/efi/runtime-map.c
+++ b/drivers/firmware/efi/runtime-map.c
@@ -79,6 +79,7 @@ static struct attribute *def_attrs[] = {
&map_attribute_attr.attr,
NULL
};
+ATTRIBUTE_GROUPS(def);
static const struct sysfs_ops map_attr_ops = {
.show = map_attr_show,
@@ -94,7 +95,7 @@ static void map_release(struct kobject *kobj)
static struct kobj_type __refdata map_ktype = {
.sysfs_ops = &map_attr_ops,
- .default_attrs = def_attrs,
+ .default_groups = def_groups,
.release = map_release,
};
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
index 97968aece54f..931544c9f63d 100644
--- a/drivers/firmware/google/Kconfig
+++ b/drivers/firmware/google/Kconfig
@@ -3,9 +3,9 @@ menuconfig GOOGLE_FIRMWARE
bool "Google Firmware Drivers"
default n
help
- These firmware drivers are used by Google's servers. They are
- only useful if you are working directly on one of their
- proprietary servers. If in doubt, say "N".
+ These firmware drivers are used by Google servers,
+ Chromebooks and other devices using coreboot firmware.
+ If in doubt, say "N".
if GOOGLE_FIRMWARE
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 24945e2da77b..8e59be3782cb 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -69,6 +69,7 @@ static struct attribute *def_attrs[] = {
&memmap_type_attr.attr,
NULL
};
+ATTRIBUTE_GROUPS(def);
static const struct sysfs_ops memmap_attr_ops = {
.show = memmap_attr_show,
@@ -118,7 +119,7 @@ static void __meminit release_firmware_map_entry(struct kobject *kobj)
static struct kobj_type __refdata memmap_ktype = {
.release = release_firmware_map_entry,
.sysfs_ops = &memmap_attr_ops,
- .default_attrs = def_attrs,
+ .default_groups = def_groups,
};
/*
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 172c751a4f6c..a69399a6b7c0 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -388,14 +388,13 @@ static void fw_cfg_sysfs_cache_cleanup(void)
struct fw_cfg_sysfs_entry *entry, *next;
list_for_each_entry_safe(entry, next, &fw_cfg_entry_cache, list) {
- /* will end up invoking fw_cfg_sysfs_cache_delist()
- * via each object's release() method (i.e. destructor)
- */
+ fw_cfg_sysfs_cache_delist(entry);
+ kobject_del(&entry->kobj);
kobject_put(&entry->kobj);
}
}
-/* default_attrs: per-entry attributes and show methods */
+/* per-entry attributes and show methods */
#define FW_CFG_SYSFS_ATTR(_attr) \
struct fw_cfg_sysfs_attribute fw_cfg_sysfs_attr_##_attr = { \
@@ -428,6 +427,7 @@ static struct attribute *fw_cfg_sysfs_entry_attrs[] = {
&fw_cfg_sysfs_attr_name.attr,
NULL,
};
+ATTRIBUTE_GROUPS(fw_cfg_sysfs_entry);
/* sysfs_ops: find fw_cfg_[entry, attribute] and call appropriate show method */
static ssize_t fw_cfg_sysfs_attr_show(struct kobject *kobj, struct attribute *a,
@@ -448,13 +448,12 @@ static void fw_cfg_sysfs_release_entry(struct kobject *kobj)
{
struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
- fw_cfg_sysfs_cache_delist(entry);
kfree(entry);
}
/* kobj_type: ties together all properties required to register an entry */
static struct kobj_type fw_cfg_sysfs_entry_ktype = {
- .default_attrs = fw_cfg_sysfs_entry_attrs,
+ .default_groups = fw_cfg_sysfs_entry_groups,
.sysfs_ops = &fw_cfg_sysfs_attr_ops,
.release = fw_cfg_sysfs_release_entry,
};
@@ -601,20 +600,18 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
/* set file entry information */
entry->size = be32_to_cpu(f->size);
entry->select = be16_to_cpu(f->select);
- memcpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH);
+ strscpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH);
/* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
fw_cfg_sel_ko, "%d", entry->select);
- if (err) {
- kobject_put(&entry->kobj);
- return err;
- }
+ if (err)
+ goto err_put_entry;
/* add raw binary content access */
err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
if (err)
- goto err_add_raw;
+ goto err_del_entry;
/* try adding "/sys/firmware/qemu_fw_cfg/by_name/" symlink */
fw_cfg_build_symlink(fw_cfg_fname_kset, &entry->kobj, entry->name);
@@ -623,9 +620,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
fw_cfg_sysfs_cache_enlist(entry);
return 0;
-err_add_raw:
+err_del_entry:
kobject_del(&entry->kobj);
- kfree(entry);
+err_put_entry:
+ kobject_put(&entry->kobj);
return err;
}
diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
index b86761904949..303a491e520d 100644
--- a/drivers/firmware/sysfb_simplefb.c
+++ b/drivers/firmware/sysfb_simplefb.c
@@ -113,12 +113,16 @@ __init int sysfb_create_simplefb(const struct screen_info *si,
sysfb_apply_efi_quirks(pd);
ret = platform_device_add_resources(pd, &res, 1);
- if (ret)
+ if (ret) {
+ platform_device_put(pd);
return ret;
+ }
ret = platform_device_add_data(pd, mode, sizeof(*mode));
- if (ret)
+ if (ret) {
+ platform_device_put(pd);
return ret;
+ }
return platform_device_add(pd);
}
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 235c7e7869aa..5ae2040b8b02 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -1759,7 +1759,7 @@ static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
desc->num = resp->range_num;
desc->start_sec = resp->range_start_sec;
desc->num_sec = resp->range_num_sec;
- };
+ }
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 3dd45a7420dc..450c5f6a1cbf 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -23,6 +23,7 @@
#include <linux/hashtable.h>
#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/firmware/xlnx-event-manager.h>
#include "zynqmp-debug.h"
/* Max HashMap Order for PM API feature check (1<<7 = 128) */
@@ -38,6 +39,8 @@
static bool feature_check_enabled;
static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
+static struct platform_device *em_dev;
+
/**
* struct pm_api_feature_data - PM API Feature data
* @pm_api_id: PM API Id, used as key to index into hashmap
@@ -160,7 +163,7 @@ static noinline int do_fw_call_hvc(u64 arg0, u64 arg1, u64 arg2,
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_feature(u32 api_id)
+int zynqmp_pm_feature(const u32 api_id)
{
int ret;
u32 ret_payload[PAYLOAD_ARG_CNT];
@@ -197,6 +200,7 @@ static int zynqmp_pm_feature(u32 api_id)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_feature);
/**
* zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer
@@ -1117,6 +1121,29 @@ int zynqmp_pm_aes_engine(const u64 address, u32 *out)
EXPORT_SYMBOL_GPL(zynqmp_pm_aes_engine);
/**
+ * zynqmp_pm_register_notifier() - PM API for register a subsystem
+ * to be notified about specific
+ * event/error.
+ * @node: Node ID to which the event is related.
+ * @event: Event Mask of Error events for which wants to get notified.
+ * @wake: Wake subsystem upon capturing the event if value 1
+ * @enable: Enable the registration for value 1, disable for value 0
+ *
+ * This function is used to register/un-register for particular node-event
+ * combination in firmware.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+
+int zynqmp_pm_register_notifier(const u32 node, const u32 event,
+ const u32 wake, const u32 enable)
+{
+ return zynqmp_pm_invoke_fn(PM_REGISTER_NOTIFIER, node, event,
+ wake, enable, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_register_notifier);
+
+/**
* zynqmp_pm_system_shutdown - PM call to request a system shutdown or restart
* @type: Shutdown or restart? 0 for shutdown, 1 for restart
* @subtype: Specifies which system should be restarted or shut down
@@ -1434,7 +1461,10 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
return ret;
/* Check PM API version number */
- zynqmp_pm_get_api_version(&pm_api_version);
+ ret = zynqmp_pm_get_api_version(&pm_api_version);
+ if (ret)
+ return ret;
+
if (pm_api_version < ZYNQMP_PM_VERSION) {
panic("%s Platform Management API version error. Expected: v%d.%d - Found: v%d.%d\n",
__func__,
@@ -1468,6 +1498,15 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
zynqmp_pm_api_debugfs_init();
+ np = of_find_compatible_node(NULL, NULL, "xlnx,versal");
+ if (np) {
+ em_dev = platform_device_register_data(&pdev->dev, "xlnx_event_manager",
+ -1, NULL, 0);
+ if (IS_ERR(em_dev))
+ dev_err_probe(&pdev->dev, PTR_ERR(em_dev), "EM register fail with error\n");
+ }
+ of_node_put(np);
+
return of_platform_populate(dev->of_node, NULL, NULL, dev);
}
@@ -1485,6 +1524,8 @@ static int zynqmp_firmware_remove(struct platform_device *pdev)
kfree(feature_data);
}
+ platform_device_unregister(em_dev);
+
return 0;
}
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index ccf4546eff29..4ffb9da537d8 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -652,19 +652,15 @@ static int altera_cvp_probe(struct pci_dev *pdev,
snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s @%s",
ALTERA_CVP_MGR_NAME, pci_name(pdev));
- mgr = devm_fpga_mgr_create(&pdev->dev, conf->mgr_name,
- &altera_cvp_ops, conf);
- if (!mgr) {
- ret = -ENOMEM;
+ mgr = fpga_mgr_register(&pdev->dev, conf->mgr_name,
+ &altera_cvp_ops, conf);
+ if (IS_ERR(mgr)) {
+ ret = PTR_ERR(mgr);
goto err_unmap;
}
pci_set_drvdata(pdev, mgr);
- ret = fpga_mgr_register(mgr);
- if (ret)
- goto err_unmap;
-
return 0;
err_unmap:
diff --git a/drivers/fpga/altera-fpga2sdram.c b/drivers/fpga/altera-fpga2sdram.c
index a78e49c63c64..ff3a646fd9e3 100644
--- a/drivers/fpga/altera-fpga2sdram.c
+++ b/drivers/fpga/altera-fpga2sdram.c
@@ -121,17 +121,13 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
/* Get f2s bridge configuration saved in handoff register */
regmap_read(sysmgr, SYSMGR_ISWGRP_HANDOFF3, &priv->mask);
- br = devm_fpga_bridge_create(dev, F2S_BRIDGE_NAME,
- &altera_fpga2sdram_br_ops, priv);
- if (!br)
- return -ENOMEM;
+ br = fpga_bridge_register(dev, F2S_BRIDGE_NAME,
+ &altera_fpga2sdram_br_ops, priv);
+ if (IS_ERR(br))
+ return PTR_ERR(br);
platform_set_drvdata(pdev, br);
- ret = fpga_bridge_register(br);
- if (ret)
- return ret;
-
dev_info(dev, "driver initialized with handoff %08x\n", priv->mask);
if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) {
diff --git a/drivers/fpga/altera-freeze-bridge.c b/drivers/fpga/altera-freeze-bridge.c
index 7d22a44d652e..445f4b011167 100644
--- a/drivers/fpga/altera-freeze-bridge.c
+++ b/drivers/fpga/altera-freeze-bridge.c
@@ -246,14 +246,14 @@ static int altera_freeze_br_probe(struct platform_device *pdev)
priv->base_addr = base_addr;
- br = devm_fpga_bridge_create(dev, FREEZE_BRIDGE_NAME,
- &altera_freeze_br_br_ops, priv);
- if (!br)
- return -ENOMEM;
+ br = fpga_bridge_register(dev, FREEZE_BRIDGE_NAME,
+ &altera_freeze_br_br_ops, priv);
+ if (IS_ERR(br))
+ return PTR_ERR(br);
platform_set_drvdata(pdev, br);
- return fpga_bridge_register(br);
+ return 0;
}
static int altera_freeze_br_remove(struct platform_device *pdev)
diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c
index 77b95f251821..aa758426c22b 100644
--- a/drivers/fpga/altera-hps2fpga.c
+++ b/drivers/fpga/altera-hps2fpga.c
@@ -180,19 +180,15 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
}
}
- br = devm_fpga_bridge_create(dev, priv->name,
- &altera_hps2fpga_br_ops, priv);
- if (!br) {
- ret = -ENOMEM;
+ br = fpga_bridge_register(dev, priv->name,
+ &altera_hps2fpga_br_ops, priv);
+ if (IS_ERR(br)) {
+ ret = PTR_ERR(br);
goto err;
}
platform_set_drvdata(pdev, br);
- ret = fpga_bridge_register(br);
- if (ret)
- goto err;
-
return 0;
err:
diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c
index dfdf21ed34c4..be0667968d33 100644
--- a/drivers/fpga/altera-pr-ip-core.c
+++ b/drivers/fpga/altera-pr-ip-core.c
@@ -191,11 +191,8 @@ int alt_pr_register(struct device *dev, void __iomem *reg_base)
(val & ALT_PR_CSR_STATUS_MSK) >> ALT_PR_CSR_STATUS_SFT,
(int)(val & ALT_PR_CSR_PR_START));
- mgr = devm_fpga_mgr_create(dev, dev_name(dev), &alt_pr_ops, priv);
- if (!mgr)
- return -ENOMEM;
-
- return devm_fpga_mgr_register(dev, mgr);
+ mgr = devm_fpga_mgr_register(dev, dev_name(dev), &alt_pr_ops, priv);
+ return PTR_ERR_OR_ZERO(mgr);
}
EXPORT_SYMBOL_GPL(alt_pr_register);
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
index 23bfd4d1ad0f..5e1e009dba89 100644
--- a/drivers/fpga/altera-ps-spi.c
+++ b/drivers/fpga/altera-ps-spi.c
@@ -302,12 +302,9 @@ static int altera_ps_probe(struct spi_device *spi)
snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s %s",
dev_driver_string(&spi->dev), dev_name(&spi->dev));
- mgr = devm_fpga_mgr_create(&spi->dev, conf->mgr_name,
- &altera_ps_ops, conf);
- if (!mgr)
- return -ENOMEM;
-
- return devm_fpga_mgr_register(&spi->dev, mgr);
+ mgr = devm_fpga_mgr_register(&spi->dev, conf->mgr_name,
+ &altera_ps_ops, conf);
+ return PTR_ERR_OR_ZERO(mgr);
}
static const struct spi_device_id altera_ps_spi_ids[] = {
diff --git a/drivers/fpga/dfl-fme-br.c b/drivers/fpga/dfl-fme-br.c
index 3ff9f3a687ce..808d1f4d76df 100644
--- a/drivers/fpga/dfl-fme-br.c
+++ b/drivers/fpga/dfl-fme-br.c
@@ -68,14 +68,14 @@ static int fme_br_probe(struct platform_device *pdev)
priv->pdata = dev_get_platdata(dev);
- br = devm_fpga_bridge_create(dev, "DFL FPGA FME Bridge",
- &fme_bridge_ops, priv);
- if (!br)
- return -ENOMEM;
+ br = fpga_bridge_register(dev, "DFL FPGA FME Bridge",
+ &fme_bridge_ops, priv);
+ if (IS_ERR(br))
+ return PTR_ERR(br);
platform_set_drvdata(pdev, br);
- return fpga_bridge_register(br);
+ return 0;
}
static int fme_br_remove(struct platform_device *pdev)
diff --git a/drivers/fpga/dfl-fme-mgr.c b/drivers/fpga/dfl-fme-mgr.c
index 313420405d5e..af0785783b52 100644
--- a/drivers/fpga/dfl-fme-mgr.c
+++ b/drivers/fpga/dfl-fme-mgr.c
@@ -276,7 +276,7 @@ static void fme_mgr_get_compat_id(void __iomem *fme_pr,
static int fme_mgr_probe(struct platform_device *pdev)
{
struct dfl_fme_mgr_pdata *pdata = dev_get_platdata(&pdev->dev);
- struct fpga_compat_id *compat_id;
+ struct fpga_manager_info info = { 0 };
struct device *dev = &pdev->dev;
struct fme_mgr_priv *priv;
struct fpga_manager *mgr;
@@ -296,20 +296,16 @@ static int fme_mgr_probe(struct platform_device *pdev)
return PTR_ERR(priv->ioaddr);
}
- compat_id = devm_kzalloc(dev, sizeof(*compat_id), GFP_KERNEL);
- if (!compat_id)
+ info.name = "DFL FME FPGA Manager";
+ info.mops = &fme_mgr_ops;
+ info.priv = priv;
+ info.compat_id = devm_kzalloc(dev, sizeof(*info.compat_id), GFP_KERNEL);
+ if (!info.compat_id)
return -ENOMEM;
- fme_mgr_get_compat_id(priv->ioaddr, compat_id);
-
- mgr = devm_fpga_mgr_create(dev, "DFL FME FPGA Manager",
- &fme_mgr_ops, priv);
- if (!mgr)
- return -ENOMEM;
-
- mgr->compat_id = compat_id;
-
- return devm_fpga_mgr_register(dev, mgr);
+ fme_mgr_get_compat_id(priv->ioaddr, info.compat_id);
+ mgr = devm_fpga_mgr_register_full(dev, &info);
+ return PTR_ERR_OR_ZERO(mgr);
}
static struct platform_driver fme_mgr_driver = {
diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c
index 1eeb42af1012..4aebde0a7f1c 100644
--- a/drivers/fpga/dfl-fme-region.c
+++ b/drivers/fpga/dfl-fme-region.c
@@ -30,6 +30,7 @@ static int fme_region_get_bridges(struct fpga_region *region)
static int fme_region_probe(struct platform_device *pdev)
{
struct dfl_fme_region_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct fpga_region_info info = { 0 };
struct device *dev = &pdev->dev;
struct fpga_region *region;
struct fpga_manager *mgr;
@@ -39,20 +40,18 @@ static int fme_region_probe(struct platform_device *pdev)
if (IS_ERR(mgr))
return -EPROBE_DEFER;
- region = devm_fpga_region_create(dev, mgr, fme_region_get_bridges);
- if (!region) {
- ret = -ENOMEM;
+ info.mgr = mgr;
+ info.compat_id = mgr->compat_id;
+ info.get_bridges = fme_region_get_bridges;
+ info.priv = pdata;
+ region = fpga_region_register_full(dev, &info);
+ if (IS_ERR(region)) {
+ ret = PTR_ERR(region);
goto eprobe_mgr_put;
}
- region->priv = pdata;
- region->compat_id = mgr->compat_id;
platform_set_drvdata(pdev, region);
- ret = fpga_region_register(region);
- if (ret)
- goto eprobe_mgr_put;
-
dev_dbg(dev, "DFL FME FPGA Region probed\n");
return 0;
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index f86666cf2c6a..599bb21d86af 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -1407,19 +1407,15 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
if (!cdev)
return ERR_PTR(-ENOMEM);
- cdev->region = devm_fpga_region_create(info->dev, NULL, NULL);
- if (!cdev->region) {
- ret = -ENOMEM;
- goto free_cdev_exit;
- }
-
cdev->parent = info->dev;
mutex_init(&cdev->lock);
INIT_LIST_HEAD(&cdev->port_dev_list);
- ret = fpga_region_register(cdev->region);
- if (ret)
+ cdev->region = fpga_region_register(info->dev, NULL, NULL);
+ if (IS_ERR(cdev->region)) {
+ ret = PTR_ERR(cdev->region);
goto free_cdev_exit;
+ }
/* create and init build info for enumeration */
binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL);
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 798f55670646..16f2b164a178 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -312,36 +312,41 @@ static struct attribute *fpga_bridge_attrs[] = {
ATTRIBUTE_GROUPS(fpga_bridge);
/**
- * fpga_bridge_create - create and initialize a struct fpga_bridge
+ * fpga_bridge_register - create and register an FPGA Bridge device
* @parent: FPGA bridge device from pdev
* @name: FPGA bridge name
* @br_ops: pointer to structure of fpga bridge ops
* @priv: FPGA bridge private data
*
- * The caller of this function is responsible for freeing the bridge with
- * fpga_bridge_free(). Using devm_fpga_bridge_create() instead is recommended.
- *
- * Return: struct fpga_bridge or NULL
+ * Return: struct fpga_bridge pointer or ERR_PTR()
*/
-struct fpga_bridge *fpga_bridge_create(struct device *parent, const char *name,
- const struct fpga_bridge_ops *br_ops,
- void *priv)
+struct fpga_bridge *
+fpga_bridge_register(struct device *parent, const char *name,
+ const struct fpga_bridge_ops *br_ops,
+ void *priv)
{
struct fpga_bridge *bridge;
int id, ret;
+ if (!br_ops) {
+ dev_err(parent, "Attempt to register without fpga_bridge_ops\n");
+ return ERR_PTR(-EINVAL);
+ }
+
if (!name || !strlen(name)) {
dev_err(parent, "Attempt to register with no name!\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
if (!bridge)
- return NULL;
+ return ERR_PTR(-ENOMEM);
id = ida_simple_get(&fpga_bridge_ida, 0, 0, GFP_KERNEL);
- if (id < 0)
+ if (id < 0) {
+ ret = id;
goto error_kfree;
+ }
mutex_init(&bridge->mutex);
INIT_LIST_HEAD(&bridge->node);
@@ -350,17 +355,23 @@ struct fpga_bridge *fpga_bridge_create(struct device *parent, const char *name,
bridge->br_ops = br_ops;
bridge->priv = priv;
- device_initialize(&bridge->dev);
bridge->dev.groups = br_ops->groups;
bridge->dev.class = fpga_bridge_class;
bridge->dev.parent = parent;
bridge->dev.of_node = parent->of_node;
bridge->dev.id = id;
+ of_platform_populate(bridge->dev.of_node, NULL, NULL, &bridge->dev);
ret = dev_set_name(&bridge->dev, "br%d", id);
if (ret)
goto error_device;
+ ret = device_register(&bridge->dev);
+ if (ret) {
+ put_device(&bridge->dev);
+ return ERR_PTR(ret);
+ }
+
return bridge;
error_device:
@@ -368,88 +379,7 @@ error_device:
error_kfree:
kfree(bridge);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(fpga_bridge_create);
-
-/**
- * fpga_bridge_free - free an fpga bridge created by fpga_bridge_create()
- * @bridge: FPGA bridge struct
- */
-void fpga_bridge_free(struct fpga_bridge *bridge)
-{
- ida_simple_remove(&fpga_bridge_ida, bridge->dev.id);
- kfree(bridge);
-}
-EXPORT_SYMBOL_GPL(fpga_bridge_free);
-
-static void devm_fpga_bridge_release(struct device *dev, void *res)
-{
- struct fpga_bridge *bridge = *(struct fpga_bridge **)res;
-
- fpga_bridge_free(bridge);
-}
-
-/**
- * devm_fpga_bridge_create - create and init a managed struct fpga_bridge
- * @parent: FPGA bridge device from pdev
- * @name: FPGA bridge name
- * @br_ops: pointer to structure of fpga bridge ops
- * @priv: FPGA bridge private data
- *
- * This function is intended for use in an FPGA bridge driver's probe function.
- * After the bridge driver creates the struct with devm_fpga_bridge_create(), it
- * should register the bridge with fpga_bridge_register(). The bridge driver's
- * remove function should call fpga_bridge_unregister(). The bridge struct
- * allocated with this function will be freed automatically on driver detach.
- * This includes the case of a probe function returning error before calling
- * fpga_bridge_register(), the struct will still get cleaned up.
- *
- * Return: struct fpga_bridge or NULL
- */
-struct fpga_bridge
-*devm_fpga_bridge_create(struct device *parent, const char *name,
- const struct fpga_bridge_ops *br_ops, void *priv)
-{
- struct fpga_bridge **ptr, *bridge;
-
- ptr = devres_alloc(devm_fpga_bridge_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return NULL;
-
- bridge = fpga_bridge_create(parent, name, br_ops, priv);
- if (!bridge) {
- devres_free(ptr);
- } else {
- *ptr = bridge;
- devres_add(parent, ptr);
- }
-
- return bridge;
-}
-EXPORT_SYMBOL_GPL(devm_fpga_bridge_create);
-
-/**
- * fpga_bridge_register - register an FPGA bridge
- *
- * @bridge: FPGA bridge struct
- *
- * Return: 0 for success, error code otherwise.
- */
-int fpga_bridge_register(struct fpga_bridge *bridge)
-{
- struct device *dev = &bridge->dev;
- int ret;
-
- ret = device_add(dev);
- if (ret)
- return ret;
-
- of_platform_populate(dev->of_node, NULL, NULL, dev);
-
- dev_info(dev->parent, "fpga bridge [%s] registered\n", bridge->name);
-
- return 0;
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(fpga_bridge_register);
@@ -475,6 +405,10 @@ EXPORT_SYMBOL_GPL(fpga_bridge_unregister);
static void fpga_bridge_dev_release(struct device *dev)
{
+ struct fpga_bridge *bridge = to_fpga_bridge(dev);
+
+ ida_simple_remove(&fpga_bridge_ida, bridge->dev.id);
+ kfree(bridge);
}
static int __init fpga_bridge_dev_init(void)
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index aa30889e2320..d49a9ce34568 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -592,49 +592,49 @@ void fpga_mgr_unlock(struct fpga_manager *mgr)
EXPORT_SYMBOL_GPL(fpga_mgr_unlock);
/**
- * fpga_mgr_create - create and initialize an FPGA manager struct
+ * fpga_mgr_register_full - create and register an FPGA Manager device
* @parent: fpga manager device from pdev
- * @name: fpga manager name
- * @mops: pointer to structure of fpga manager ops
- * @priv: fpga manager private data
+ * @info: parameters for fpga manager
*
- * The caller of this function is responsible for freeing the struct with
- * fpga_mgr_free(). Using devm_fpga_mgr_create() instead is recommended.
+ * The caller of this function is responsible for calling fpga_mgr_unregister().
+ * Using devm_fpga_mgr_register_full() instead is recommended.
*
- * Return: pointer to struct fpga_manager or NULL
+ * Return: pointer to struct fpga_manager pointer or ERR_PTR()
*/
-struct fpga_manager *fpga_mgr_create(struct device *parent, const char *name,
- const struct fpga_manager_ops *mops,
- void *priv)
+struct fpga_manager *
+fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info)
{
+ const struct fpga_manager_ops *mops = info->mops;
struct fpga_manager *mgr;
int id, ret;
if (!mops) {
dev_err(parent, "Attempt to register without fpga_manager_ops\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
- if (!name || !strlen(name)) {
+ if (!info->name || !strlen(info->name)) {
dev_err(parent, "Attempt to register with no name!\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
if (!mgr)
- return NULL;
+ return ERR_PTR(-ENOMEM);
id = ida_simple_get(&fpga_mgr_ida, 0, 0, GFP_KERNEL);
- if (id < 0)
+ if (id < 0) {
+ ret = id;
goto error_kfree;
+ }
mutex_init(&mgr->ref_mutex);
- mgr->name = name;
- mgr->mops = mops;
- mgr->priv = priv;
+ mgr->name = info->name;
+ mgr->mops = info->mops;
+ mgr->priv = info->priv;
+ mgr->compat_id = info->compat_id;
- device_initialize(&mgr->dev);
mgr->dev.class = fpga_mgr_class;
mgr->dev.groups = mops->groups;
mgr->dev.parent = parent;
@@ -645,6 +645,19 @@ struct fpga_manager *fpga_mgr_create(struct device *parent, const char *name,
if (ret)
goto error_device;
+ /*
+ * Initialize framework state by requesting low level driver read state
+ * from device. FPGA may be in reset mode or may have been programmed
+ * by bootloader or EEPROM.
+ */
+ mgr->state = fpga_mgr_state(mgr);
+
+ ret = device_register(&mgr->dev);
+ if (ret) {
+ put_device(&mgr->dev);
+ return ERR_PTR(ret);
+ }
+
return mgr;
error_device:
@@ -652,96 +665,36 @@ error_device:
error_kfree:
kfree(mgr);
- return NULL;
+ return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(fpga_mgr_create);
+EXPORT_SYMBOL_GPL(fpga_mgr_register_full);
/**
- * fpga_mgr_free - free an FPGA manager created with fpga_mgr_create()
- * @mgr: fpga manager struct
- */
-void fpga_mgr_free(struct fpga_manager *mgr)
-{
- ida_simple_remove(&fpga_mgr_ida, mgr->dev.id);
- kfree(mgr);
-}
-EXPORT_SYMBOL_GPL(fpga_mgr_free);
-
-static void devm_fpga_mgr_release(struct device *dev, void *res)
-{
- struct fpga_mgr_devres *dr = res;
-
- fpga_mgr_free(dr->mgr);
-}
-
-/**
- * devm_fpga_mgr_create - create and initialize a managed FPGA manager struct
+ * fpga_mgr_register - create and register an FPGA Manager device
* @parent: fpga manager device from pdev
* @name: fpga manager name
* @mops: pointer to structure of fpga manager ops
* @priv: fpga manager private data
*
- * This function is intended for use in an FPGA manager driver's probe function.
- * After the manager driver creates the manager struct with
- * devm_fpga_mgr_create(), it should register it with fpga_mgr_register(). The
- * manager driver's remove function should call fpga_mgr_unregister(). The
- * manager struct allocated with this function will be freed automatically on
- * driver detach. This includes the case of a probe function returning error
- * before calling fpga_mgr_register(), the struct will still get cleaned up.
+ * The caller of this function is responsible for calling fpga_mgr_unregister().
+ * Using devm_fpga_mgr_register() instead is recommended. This simple
+ * version of the register function should be sufficient for most users. The
+ * fpga_mgr_register_full() function is available for users that need to pass
+ * additional, optional parameters.
*
- * Return: pointer to struct fpga_manager or NULL
+ * Return: pointer to struct fpga_manager pointer or ERR_PTR()
*/
-struct fpga_manager *devm_fpga_mgr_create(struct device *parent, const char *name,
- const struct fpga_manager_ops *mops,
- void *priv)
+struct fpga_manager *
+fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv)
{
- struct fpga_mgr_devres *dr;
+ struct fpga_manager_info info = { 0 };
- dr = devres_alloc(devm_fpga_mgr_release, sizeof(*dr), GFP_KERNEL);
- if (!dr)
- return NULL;
+ info.name = name;
+ info.mops = mops;
+ info.priv = priv;
- dr->mgr = fpga_mgr_create(parent, name, mops, priv);
- if (!dr->mgr) {
- devres_free(dr);
- return NULL;
- }
-
- devres_add(parent, dr);
-
- return dr->mgr;
-}
-EXPORT_SYMBOL_GPL(devm_fpga_mgr_create);
-
-/**
- * fpga_mgr_register - register an FPGA manager
- * @mgr: fpga manager struct
- *
- * Return: 0 on success, negative error code otherwise.
- */
-int fpga_mgr_register(struct fpga_manager *mgr)
-{
- int ret;
-
- /*
- * Initialize framework state by requesting low level driver read state
- * from device. FPGA may be in reset mode or may have been programmed
- * by bootloader or EEPROM.
- */
- mgr->state = fpga_mgr_state(mgr);
-
- ret = device_add(&mgr->dev);
- if (ret)
- goto error_device;
-
- dev_info(&mgr->dev, "%s registered\n", mgr->name);
-
- return 0;
-
-error_device:
- ida_simple_remove(&fpga_mgr_ida, mgr->dev.id);
-
- return ret;
+ return fpga_mgr_register_full(parent, &info);
}
EXPORT_SYMBOL_GPL(fpga_mgr_register);
@@ -765,14 +718,6 @@ void fpga_mgr_unregister(struct fpga_manager *mgr)
}
EXPORT_SYMBOL_GPL(fpga_mgr_unregister);
-static int fpga_mgr_devres_match(struct device *dev, void *res,
- void *match_data)
-{
- struct fpga_mgr_devres *dr = res;
-
- return match_data == dr->mgr;
-}
-
static void devm_fpga_mgr_unregister(struct device *dev, void *res)
{
struct fpga_mgr_devres *dr = res;
@@ -781,45 +726,67 @@ static void devm_fpga_mgr_unregister(struct device *dev, void *res)
}
/**
- * devm_fpga_mgr_register - resource managed variant of fpga_mgr_register()
- * @dev: managing device for this FPGA manager
- * @mgr: fpga manager struct
+ * devm_fpga_mgr_register_full - resource managed variant of fpga_mgr_register()
+ * @parent: fpga manager device from pdev
+ * @info: parameters for fpga manager
*
- * This is the devres variant of fpga_mgr_register() for which the unregister
+ * This is the devres variant of fpga_mgr_register_full() for which the unregister
* function will be called automatically when the managing device is detached.
*/
-int devm_fpga_mgr_register(struct device *dev, struct fpga_manager *mgr)
+struct fpga_manager *
+devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info)
{
struct fpga_mgr_devres *dr;
- int ret;
-
- /*
- * Make sure that the struct fpga_manager * that is passed in is
- * managed itself.
- */
- if (WARN_ON(!devres_find(dev, devm_fpga_mgr_release,
- fpga_mgr_devres_match, mgr)))
- return -EINVAL;
+ struct fpga_manager *mgr;
dr = devres_alloc(devm_fpga_mgr_unregister, sizeof(*dr), GFP_KERNEL);
if (!dr)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- ret = fpga_mgr_register(mgr);
- if (ret) {
+ mgr = fpga_mgr_register_full(parent, info);
+ if (IS_ERR(mgr)) {
devres_free(dr);
- return ret;
+ return mgr;
}
dr->mgr = mgr;
- devres_add(dev, dr);
+ devres_add(parent, dr);
- return 0;
+ return mgr;
+}
+EXPORT_SYMBOL_GPL(devm_fpga_mgr_register_full);
+
+/**
+ * devm_fpga_mgr_register - resource managed variant of fpga_mgr_register()
+ * @parent: fpga manager device from pdev
+ * @name: fpga manager name
+ * @mops: pointer to structure of fpga manager ops
+ * @priv: fpga manager private data
+ *
+ * This is the devres variant of fpga_mgr_register() for which the
+ * unregister function will be called automatically when the managing
+ * device is detached.
+ */
+struct fpga_manager *
+devm_fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv)
+{
+ struct fpga_manager_info info = { 0 };
+
+ info.name = name;
+ info.mops = mops;
+ info.priv = priv;
+
+ return devm_fpga_mgr_register_full(parent, &info);
}
EXPORT_SYMBOL_GPL(devm_fpga_mgr_register);
static void fpga_mgr_dev_release(struct device *dev)
{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+
+ ida_simple_remove(&fpga_mgr_ida, mgr->dev.id);
+ kfree(mgr);
}
static int __init fpga_mgr_class_init(void)
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index a4838715221f..b0ac18de4885 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -180,39 +180,42 @@ static struct attribute *fpga_region_attrs[] = {
ATTRIBUTE_GROUPS(fpga_region);
/**
- * fpga_region_create - alloc and init a struct fpga_region
+ * fpga_region_register_full - create and register an FPGA Region device
* @parent: device parent
- * @mgr: manager that programs this region
- * @get_bridges: optional function to get bridges to a list
- *
- * The caller of this function is responsible for freeing the resulting region
- * struct with fpga_region_free(). Using devm_fpga_region_create() instead is
- * recommended.
+ * @info: parameters for FPGA Region
*
- * Return: struct fpga_region or NULL
+ * Return: struct fpga_region or ERR_PTR()
*/
-struct fpga_region
-*fpga_region_create(struct device *parent,
- struct fpga_manager *mgr,
- int (*get_bridges)(struct fpga_region *))
+struct fpga_region *
+fpga_region_register_full(struct device *parent, const struct fpga_region_info *info)
{
struct fpga_region *region;
int id, ret = 0;
+ if (!info) {
+ dev_err(parent,
+ "Attempt to register without required info structure\n");
+ return ERR_PTR(-EINVAL);
+ }
+
region = kzalloc(sizeof(*region), GFP_KERNEL);
if (!region)
- return NULL;
+ return ERR_PTR(-ENOMEM);
id = ida_simple_get(&fpga_region_ida, 0, 0, GFP_KERNEL);
- if (id < 0)
+ if (id < 0) {
+ ret = id;
goto err_free;
+ }
+
+ region->mgr = info->mgr;
+ region->compat_id = info->compat_id;
+ region->priv = info->priv;
+ region->get_bridges = info->get_bridges;
- region->mgr = mgr;
- region->get_bridges = get_bridges;
mutex_init(&region->mutex);
INIT_LIST_HEAD(&region->bridge_list);
- device_initialize(&region->dev);
region->dev.class = fpga_region_class;
region->dev.parent = parent;
region->dev.of_node = parent->of_node;
@@ -222,6 +225,12 @@ struct fpga_region
if (ret)
goto err_remove;
+ ret = device_register(&region->dev);
+ if (ret) {
+ put_device(&region->dev);
+ return ERR_PTR(ret);
+ }
+
return region;
err_remove:
@@ -229,76 +238,32 @@ err_remove:
err_free:
kfree(region);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(fpga_region_create);
-
-/**
- * fpga_region_free - free an FPGA region created by fpga_region_create()
- * @region: FPGA region
- */
-void fpga_region_free(struct fpga_region *region)
-{
- ida_simple_remove(&fpga_region_ida, region->dev.id);
- kfree(region);
-}
-EXPORT_SYMBOL_GPL(fpga_region_free);
-
-static void devm_fpga_region_release(struct device *dev, void *res)
-{
- struct fpga_region *region = *(struct fpga_region **)res;
-
- fpga_region_free(region);
+ return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(fpga_region_register_full);
/**
- * devm_fpga_region_create - create and initialize a managed FPGA region struct
+ * fpga_region_register - create and register an FPGA Region device
* @parent: device parent
* @mgr: manager that programs this region
* @get_bridges: optional function to get bridges to a list
*
- * This function is intended for use in an FPGA region driver's probe function.
- * After the region driver creates the region struct with
- * devm_fpga_region_create(), it should register it with fpga_region_register().
- * The region driver's remove function should call fpga_region_unregister().
- * The region struct allocated with this function will be freed automatically on
- * driver detach. This includes the case of a probe function returning error
- * before calling fpga_region_register(), the struct will still get cleaned up.
+ * This simple version of the register function should be sufficient for most users.
+ * The fpga_region_register_full() function is available for users that need to
+ * pass additional, optional parameters.
*
- * Return: struct fpga_region or NULL
+ * Return: struct fpga_region or ERR_PTR()
*/
-struct fpga_region
-*devm_fpga_region_create(struct device *parent,
- struct fpga_manager *mgr,
- int (*get_bridges)(struct fpga_region *))
+struct fpga_region *
+fpga_region_register(struct device *parent, struct fpga_manager *mgr,
+ int (*get_bridges)(struct fpga_region *))
{
- struct fpga_region **ptr, *region;
-
- ptr = devres_alloc(devm_fpga_region_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return NULL;
+ struct fpga_region_info info = { 0 };
- region = fpga_region_create(parent, mgr, get_bridges);
- if (!region) {
- devres_free(ptr);
- } else {
- *ptr = region;
- devres_add(parent, ptr);
- }
+ info.mgr = mgr;
+ info.get_bridges = get_bridges;
- return region;
-}
-EXPORT_SYMBOL_GPL(devm_fpga_region_create);
-
-/**
- * fpga_region_register - register an FPGA region
- * @region: FPGA region
- *
- * Return: 0 or -errno
- */
-int fpga_region_register(struct fpga_region *region)
-{
- return device_add(&region->dev);
+ return fpga_region_register_full(parent, &info);
}
EXPORT_SYMBOL_GPL(fpga_region_register);
@@ -316,6 +281,10 @@ EXPORT_SYMBOL_GPL(fpga_region_unregister);
static void fpga_region_dev_release(struct device *dev)
{
+ struct fpga_region *region = to_fpga_region(dev);
+
+ ida_simple_remove(&fpga_region_ida, region->dev.id);
+ kfree(region);
}
/**
diff --git a/drivers/fpga/ice40-spi.c b/drivers/fpga/ice40-spi.c
index 029d3cdb918d..7cbb3558b844 100644
--- a/drivers/fpga/ice40-spi.c
+++ b/drivers/fpga/ice40-spi.c
@@ -178,12 +178,9 @@ static int ice40_fpga_probe(struct spi_device *spi)
return ret;
}
- mgr = devm_fpga_mgr_create(dev, "Lattice iCE40 FPGA Manager",
- &ice40_fpga_ops, priv);
- if (!mgr)
- return -ENOMEM;
-
- return devm_fpga_mgr_register(dev, mgr);
+ mgr = devm_fpga_mgr_register(dev, "Lattice iCE40 FPGA Manager",
+ &ice40_fpga_ops, priv);
+ return PTR_ERR_OR_ZERO(mgr);
}
static const struct of_device_id ice40_fpga_of_match[] = {
diff --git a/drivers/fpga/machxo2-spi.c b/drivers/fpga/machxo2-spi.c
index ea2ec3c6815c..905607992a12 100644
--- a/drivers/fpga/machxo2-spi.c
+++ b/drivers/fpga/machxo2-spi.c
@@ -370,12 +370,9 @@ static int machxo2_spi_probe(struct spi_device *spi)
return -EINVAL;
}
- mgr = devm_fpga_mgr_create(dev, "Lattice MachXO2 SPI FPGA Manager",
- &machxo2_ops, spi);
- if (!mgr)
- return -ENOMEM;
-
- return devm_fpga_mgr_register(dev, mgr);
+ mgr = devm_fpga_mgr_register(dev, "Lattice MachXO2 SPI FPGA Manager",
+ &machxo2_ops, spi);
+ return PTR_ERR_OR_ZERO(mgr);
}
#ifdef CONFIG_OF
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c
index e3c25576b6b9..50b83057c048 100644
--- a/drivers/fpga/of-fpga-region.c
+++ b/drivers/fpga/of-fpga-region.c
@@ -405,16 +405,12 @@ static int of_fpga_region_probe(struct platform_device *pdev)
if (IS_ERR(mgr))
return -EPROBE_DEFER;
- region = devm_fpga_region_create(dev, mgr, of_fpga_region_get_bridges);
- if (!region) {
- ret = -ENOMEM;
+ region = fpga_region_register(dev, mgr, of_fpga_region_get_bridges);
+ if (IS_ERR(region)) {
+ ret = PTR_ERR(region);
goto eprobe_mgr_put;
}
- ret = fpga_region_register(region);
- if (ret)
- goto eprobe_mgr_put;
-
of_platform_populate(np, fpga_region_of_match, NULL, &region->dev);
platform_set_drvdata(pdev, region);
@@ -448,7 +444,7 @@ static struct platform_driver of_fpga_region_driver = {
};
/**
- * fpga_region_init - init function for fpga_region class
+ * of_fpga_region_init - init function for fpga_region class
* Creates the fpga_region class and registers a reconfig notifier.
*/
static int __init of_fpga_region_init(void)
diff --git a/drivers/fpga/socfpga-a10.c b/drivers/fpga/socfpga-a10.c
index 573d88bdf730..ac8e89b8a5cc 100644
--- a/drivers/fpga/socfpga-a10.c
+++ b/drivers/fpga/socfpga-a10.c
@@ -508,19 +508,15 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev)
return -EBUSY;
}
- mgr = devm_fpga_mgr_create(dev, "SoCFPGA Arria10 FPGA Manager",
- &socfpga_a10_fpga_mgr_ops, priv);
- if (!mgr)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, mgr);
-
- ret = fpga_mgr_register(mgr);
- if (ret) {
+ mgr = fpga_mgr_register(dev, "SoCFPGA Arria10 FPGA Manager",
+ &socfpga_a10_fpga_mgr_ops, priv);
+ if (IS_ERR(mgr)) {
clk_disable_unprepare(priv->clk);
- return ret;
+ return PTR_ERR(mgr);
}
+ platform_set_drvdata(pdev, mgr);
+
return 0;
}
diff --git a/drivers/fpga/socfpga.c b/drivers/fpga/socfpga.c
index 1f467173fc1f..7e0741f99696 100644
--- a/drivers/fpga/socfpga.c
+++ b/drivers/fpga/socfpga.c
@@ -571,12 +571,9 @@ static int socfpga_fpga_probe(struct platform_device *pdev)
if (ret)
return ret;
- mgr = devm_fpga_mgr_create(dev, "Altera SOCFPGA FPGA Manager",
- &socfpga_fpga_ops, priv);
- if (!mgr)
- return -ENOMEM;
-
- return devm_fpga_mgr_register(dev, mgr);
+ mgr = devm_fpga_mgr_register(dev, "Altera SOCFPGA FPGA Manager",
+ &socfpga_fpga_ops, priv);
+ return PTR_ERR_OR_ZERO(mgr);
}
#ifdef CONFIG_OF
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index 047fd7f23706..357cea58ec98 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -419,23 +419,16 @@ static int s10_probe(struct platform_device *pdev)
init_completion(&priv->status_return_completion);
- mgr = fpga_mgr_create(dev, "Stratix10 SOC FPGA Manager",
- &s10_ops, priv);
- if (!mgr) {
- dev_err(dev, "unable to create FPGA manager\n");
- ret = -ENOMEM;
- goto probe_err;
- }
-
- ret = fpga_mgr_register(mgr);
- if (ret) {
+ mgr = fpga_mgr_register(dev, "Stratix10 SOC FPGA Manager",
+ &s10_ops, priv);
+ if (IS_ERR(mgr)) {
dev_err(dev, "unable to register FPGA manager\n");
- fpga_mgr_free(mgr);
+ ret = PTR_ERR(mgr);
goto probe_err;
}
platform_set_drvdata(pdev, mgr);
- return ret;
+ return 0;
probe_err:
stratix10_svc_free_channel(priv->chan);
@@ -448,7 +441,6 @@ static int s10_remove(struct platform_device *pdev)
struct s10_priv *priv = mgr->priv;
fpga_mgr_unregister(mgr);
- fpga_mgr_free(mgr);
stratix10_svc_free_channel(priv->chan);
return 0;
diff --git a/drivers/fpga/ts73xx-fpga.c b/drivers/fpga/ts73xx-fpga.c
index 167abb0b08d4..8e6e9c840d9d 100644
--- a/drivers/fpga/ts73xx-fpga.c
+++ b/drivers/fpga/ts73xx-fpga.c
@@ -116,12 +116,9 @@ static int ts73xx_fpga_probe(struct platform_device *pdev)
if (IS_ERR(priv->io_base))
return PTR_ERR(priv->io_base);
- mgr = devm_fpga_mgr_create(kdev, "TS-73xx FPGA Manager",
- &ts73xx_fpga_ops, priv);
- if (!mgr)
- return -ENOMEM;
-
- return devm_fpga_mgr_register(kdev, mgr);
+ mgr = devm_fpga_mgr_register(kdev, "TS-73xx FPGA Manager",
+ &ts73xx_fpga_ops, priv);
+ return PTR_ERR_OR_ZERO(mgr);
}
static struct platform_driver ts73xx_fpga_driver = {
diff --git a/drivers/fpga/versal-fpga.c b/drivers/fpga/versal-fpga.c
index 5b0dda304bd2..e1601b3a345b 100644
--- a/drivers/fpga/versal-fpga.c
+++ b/drivers/fpga/versal-fpga.c
@@ -54,12 +54,9 @@ static int versal_fpga_probe(struct platform_device *pdev)
return ret;
}
- mgr = devm_fpga_mgr_create(dev, "Xilinx Versal FPGA Manager",
- &versal_fpga_ops, NULL);
- if (!mgr)
- return -ENOMEM;
-
- return devm_fpga_mgr_register(dev, mgr);
+ mgr = devm_fpga_mgr_register(dev, "Xilinx Versal FPGA Manager",
+ &versal_fpga_ops, NULL);
+ return PTR_ERR_OR_ZERO(mgr);
}
static const struct of_device_id versal_fpga_of_match[] = {
diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c
index e986ed47c4ed..2d9c491f7be9 100644
--- a/drivers/fpga/xilinx-pr-decoupler.c
+++ b/drivers/fpga/xilinx-pr-decoupler.c
@@ -140,22 +140,17 @@ static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
clk_disable(priv->clk);
- br = devm_fpga_bridge_create(&pdev->dev, priv->ipconfig->name,
- &xlnx_pr_decoupler_br_ops, priv);
- if (!br) {
- err = -ENOMEM;
- goto err_clk;
- }
-
- platform_set_drvdata(pdev, br);
-
- err = fpga_bridge_register(br);
- if (err) {
+ br = fpga_bridge_register(&pdev->dev, priv->ipconfig->name,
+ &xlnx_pr_decoupler_br_ops, priv);
+ if (IS_ERR(br)) {
+ err = PTR_ERR(br);
dev_err(&pdev->dev, "unable to register %s",
priv->ipconfig->name);
goto err_clk;
}
+ platform_set_drvdata(pdev, br);
+
return 0;
err_clk:
diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
index b6bcf1d9233d..e1a227e7ff2a 100644
--- a/drivers/fpga/xilinx-spi.c
+++ b/drivers/fpga/xilinx-spi.c
@@ -247,13 +247,10 @@ static int xilinx_spi_probe(struct spi_device *spi)
return dev_err_probe(&spi->dev, PTR_ERR(conf->done),
"Failed to get DONE gpio\n");
- mgr = devm_fpga_mgr_create(&spi->dev,
- "Xilinx Slave Serial FPGA Manager",
- &xilinx_spi_ops, conf);
- if (!mgr)
- return -ENOMEM;
-
- return devm_fpga_mgr_register(&spi->dev, mgr);
+ mgr = devm_fpga_mgr_register(&spi->dev,
+ "Xilinx Slave Serial FPGA Manager",
+ &xilinx_spi_ops, conf);
+ return PTR_ERR_OR_ZERO(mgr);
}
#ifdef CONFIG_OF
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index 9b75bd4f93d8..426aa34c6a0d 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -609,20 +609,16 @@ static int zynq_fpga_probe(struct platform_device *pdev)
clk_disable(priv->clk);
- mgr = devm_fpga_mgr_create(dev, "Xilinx Zynq FPGA Manager",
- &zynq_fpga_ops, priv);
- if (!mgr)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, mgr);
-
- err = fpga_mgr_register(mgr);
- if (err) {
+ mgr = fpga_mgr_register(dev, "Xilinx Zynq FPGA Manager",
+ &zynq_fpga_ops, priv);
+ if (IS_ERR(mgr)) {
dev_err(dev, "unable to register FPGA manager\n");
clk_unprepare(priv->clk);
- return err;
+ return PTR_ERR(mgr);
}
+ platform_set_drvdata(pdev, mgr);
+
return 0;
}
diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c
index 7d3d5650c322..c60f20949c47 100644
--- a/drivers/fpga/zynqmp-fpga.c
+++ b/drivers/fpga/zynqmp-fpga.c
@@ -95,12 +95,9 @@ static int zynqmp_fpga_probe(struct platform_device *pdev)
priv->dev = dev;
- mgr = devm_fpga_mgr_create(dev, "Xilinx ZynqMP FPGA Manager",
- &zynqmp_fpga_ops, priv);
- if (!mgr)
- return -ENOMEM;
-
- return devm_fpga_mgr_register(dev, mgr);
+ mgr = devm_fpga_mgr_register(dev, "Xilinx ZynqMP FPGA Manager",
+ &zynqmp_fpga_ops, priv);
+ return PTR_ERR_OR_ZERO(mgr);
}
#ifdef CONFIG_OF
diff --git a/drivers/gnss/Kconfig b/drivers/gnss/Kconfig
index bd12e3d57baa..d7fe265c2869 100644
--- a/drivers/gnss/Kconfig
+++ b/drivers/gnss/Kconfig
@@ -54,4 +54,15 @@ config GNSS_UBX_SERIAL
If unsure, say N.
+config GNSS_USB
+ tristate "USB GNSS receiver support"
+ depends on USB
+ help
+ Say Y here if you have a GNSS receiver which uses a USB interface.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gnss-usb.
+
+ If unsure, say N.
+
endif # GNSS
diff --git a/drivers/gnss/Makefile b/drivers/gnss/Makefile
index 451f11401ecc..bb2cbada3435 100644
--- a/drivers/gnss/Makefile
+++ b/drivers/gnss/Makefile
@@ -17,3 +17,6 @@ gnss-sirf-y := sirf.o
obj-$(CONFIG_GNSS_UBX_SERIAL) += gnss-ubx.o
gnss-ubx-y := ubx.o
+
+obj-$(CONFIG_GNSS_USB) += gnss-usb.o
+gnss-usb-y := usb.o
diff --git a/drivers/gnss/mtk.c b/drivers/gnss/mtk.c
index d1fc55560daf..c62b1211f4fe 100644
--- a/drivers/gnss/mtk.c
+++ b/drivers/gnss/mtk.c
@@ -126,7 +126,7 @@ static void mtk_remove(struct serdev_device *serdev)
if (data->vbackup)
regulator_disable(data->vbackup);
gnss_serial_free(gserial);
-};
+}
#ifdef CONFIG_OF
static const struct of_device_id mtk_of_match[] = {
diff --git a/drivers/gnss/serial.c b/drivers/gnss/serial.c
index def64b36d994..5d8e9bfb24d0 100644
--- a/drivers/gnss/serial.c
+++ b/drivers/gnss/serial.c
@@ -165,7 +165,7 @@ void gnss_serial_free(struct gnss_serial *gserial)
{
gnss_put_device(gserial->gdev);
kfree(gserial);
-};
+}
EXPORT_SYMBOL_GPL(gnss_serial_free);
int gnss_serial_register(struct gnss_serial *gserial)
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
index 2ecb1d3e8eeb..bcb53ccfee4d 100644
--- a/drivers/gnss/sirf.c
+++ b/drivers/gnss/sirf.c
@@ -551,7 +551,7 @@ static void sirf_remove(struct serdev_device *serdev)
regulator_disable(data->vcc);
gnss_put_device(data->gdev);
-};
+}
#ifdef CONFIG_OF
static const struct of_device_id sirf_of_match[] = {
diff --git a/drivers/gnss/ubx.c b/drivers/gnss/ubx.c
index 7b05bc40532e..c951be202ca2 100644
--- a/drivers/gnss/ubx.c
+++ b/drivers/gnss/ubx.c
@@ -126,7 +126,7 @@ static void ubx_remove(struct serdev_device *serdev)
if (data->v_bckp)
regulator_disable(data->v_bckp);
gnss_serial_free(gserial);
-};
+}
#ifdef CONFIG_OF
static const struct of_device_id ubx_of_match[] = {
diff --git a/drivers/gnss/usb.c b/drivers/gnss/usb.c
new file mode 100644
index 000000000000..028ce56b20ea
--- /dev/null
+++ b/drivers/gnss/usb.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic USB GNSS receiver driver
+ *
+ * Copyright (C) 2021 Johan Hovold <johan@kernel.org>
+ */
+
+#include <linux/errno.h>
+#include <linux/gnss.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#define GNSS_USB_READ_BUF_LEN 512
+#define GNSS_USB_WRITE_TIMEOUT 1000
+
+static const struct usb_device_id gnss_usb_id_table[] = {
+ { USB_DEVICE(0x1199, 0xb000) }, /* Sierra Wireless XM1210 */
+ { }
+};
+MODULE_DEVICE_TABLE(usb, gnss_usb_id_table);
+
+struct gnss_usb {
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ struct gnss_device *gdev;
+ struct urb *read_urb;
+ unsigned int write_pipe;
+};
+
+static void gnss_usb_rx_complete(struct urb *urb)
+{
+ struct gnss_usb *gusb = urb->context;
+ struct gnss_device *gdev = gusb->gdev;
+ int status = urb->status;
+ int len;
+ int ret;
+
+ switch (status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ dev_dbg(&gdev->dev, "urb stopped: %d\n", status);
+ return;
+ case -EPIPE:
+ dev_err(&gdev->dev, "urb stopped: %d\n", status);
+ return;
+ default:
+ dev_dbg(&gdev->dev, "nonzero urb status: %d\n", status);
+ goto resubmit;
+ }
+
+ len = urb->actual_length;
+ if (len == 0)
+ goto resubmit;
+
+ ret = gnss_insert_raw(gdev, urb->transfer_buffer, len);
+ if (ret < len)
+ dev_dbg(&gdev->dev, "dropped %d bytes\n", len - ret);
+resubmit:
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret && ret != -EPERM && ret != -ENODEV)
+ dev_err(&gdev->dev, "failed to resubmit urb: %d\n", ret);
+}
+
+static int gnss_usb_open(struct gnss_device *gdev)
+{
+ struct gnss_usb *gusb = gnss_get_drvdata(gdev);
+ int ret;
+
+ ret = usb_submit_urb(gusb->read_urb, GFP_KERNEL);
+ if (ret) {
+ if (ret != -EPERM && ret != -ENODEV)
+ dev_err(&gdev->dev, "failed to submit urb: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void gnss_usb_close(struct gnss_device *gdev)
+{
+ struct gnss_usb *gusb = gnss_get_drvdata(gdev);
+
+ usb_kill_urb(gusb->read_urb);
+}
+
+static int gnss_usb_write_raw(struct gnss_device *gdev,
+ const unsigned char *buf, size_t count)
+{
+ struct gnss_usb *gusb = gnss_get_drvdata(gdev);
+ void *tbuf;
+ int ret;
+
+ tbuf = kmemdup(buf, count, GFP_KERNEL);
+ if (!tbuf)
+ return -ENOMEM;
+
+ ret = usb_bulk_msg(gusb->udev, gusb->write_pipe, tbuf, count, NULL,
+ GNSS_USB_WRITE_TIMEOUT);
+ kfree(tbuf);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct gnss_operations gnss_usb_gnss_ops = {
+ .open = gnss_usb_open,
+ .close = gnss_usb_close,
+ .write_raw = gnss_usb_write_raw,
+};
+
+static int gnss_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_endpoint_descriptor *in, *out;
+ struct gnss_device *gdev;
+ struct gnss_usb *gusb;
+ struct urb *urb;
+ size_t buf_len;
+ void *buf;
+ int ret;
+
+ ret = usb_find_common_endpoints(intf->cur_altsetting, &in, &out, NULL,
+ NULL);
+ if (ret)
+ return ret;
+
+ gusb = kzalloc(sizeof(*gusb), GFP_KERNEL);
+ if (!gusb)
+ return -ENOMEM;
+
+ gdev = gnss_allocate_device(&intf->dev);
+ if (!gdev) {
+ ret = -ENOMEM;
+ goto err_free_gusb;
+ }
+
+ gdev->ops = &gnss_usb_gnss_ops;
+ gdev->type = GNSS_TYPE_NMEA;
+ gnss_set_drvdata(gdev, gusb);
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ ret = -ENOMEM;
+ goto err_put_gdev;
+ }
+
+ buf_len = max(usb_endpoint_maxp(in), GNSS_USB_READ_BUF_LEN);
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_free_urb;
+ }
+
+ usb_fill_bulk_urb(urb, udev,
+ usb_rcvbulkpipe(udev, usb_endpoint_num(in)),
+ buf, buf_len, gnss_usb_rx_complete, gusb);
+
+ gusb->intf = intf;
+ gusb->udev = udev;
+ gusb->gdev = gdev;
+ gusb->read_urb = urb;
+ gusb->write_pipe = usb_sndbulkpipe(udev, usb_endpoint_num(out));
+
+ ret = gnss_register_device(gdev);
+ if (ret)
+ goto err_free_buf;
+
+ usb_set_intfdata(intf, gusb);
+
+ return 0;
+
+err_free_buf:
+ kfree(buf);
+err_free_urb:
+ usb_free_urb(urb);
+err_put_gdev:
+ gnss_put_device(gdev);
+err_free_gusb:
+ kfree(gusb);
+
+ return ret;
+}
+
+static void gnss_usb_disconnect(struct usb_interface *intf)
+{
+ struct gnss_usb *gusb = usb_get_intfdata(intf);
+
+ gnss_deregister_device(gusb->gdev);
+
+ kfree(gusb->read_urb->transfer_buffer);
+ usb_free_urb(gusb->read_urb);
+ gnss_put_device(gusb->gdev);
+ kfree(gusb);
+}
+
+static struct usb_driver gnss_usb_driver = {
+ .name = "gnss-usb",
+ .probe = gnss_usb_probe,
+ .disconnect = gnss_usb_disconnect,
+ .id_table = gnss_usb_id_table,
+};
+module_usb_driver(gnss_usb_driver);
+
+MODULE_AUTHOR("Johan Hovold <johan@kernel.org>");
+MODULE_DESCRIPTION("Generic USB GNSS receiver driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 60d9374c72c0..1c211b4c63be 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -731,14 +731,12 @@ config GPIO_XILINX
Say yes here to support the Xilinx FPGA GPIO device.
config GPIO_XLP
- tristate "Netlogic XLP GPIO support"
- depends on OF_GPIO && (CPU_XLP || ARCH_THUNDER2 || COMPILE_TEST)
+ tristate "Cavium ThunderX2 GPIO support"
+ depends on ARCH_THUNDER2 || COMPILE_TEST
select GPIOLIB_IRQCHIP
help
- This driver provides support for GPIO interface on Netlogic XLP MIPS64
- SoCs. Currently supported XLP variants are XLP8XX, XLP3XX, XLP2XX,
- XLP9XX and XLP5XX. The same GPIO controller block is also present in
- Cavium's ThunderX2 CN99XX SoCs.
+ This driver provides support for GPIO interface on Cavium's ThunderX2
+ CN99XX SoCs (Originally from Netlogic XLP).
If unsure, say N.
@@ -1133,17 +1131,6 @@ config GPIO_ARIZONA
help
Support for GPIOs on Wolfson Arizona class devices.
-config GPIO_BD70528
- tristate "ROHM BD70528 GPIO support"
- depends on MFD_ROHM_BD70528
- help
- Support for GPIOs on ROHM BD70528 PMIC. There are four GPIOs
- available on the ROHM PMIC in total. The GPIOs can also
- generate interrupts.
-
- This driver can also be built as a module. If so, the module
- will be called gpio-bd70528.
-
config GPIO_BD71815
tristate "ROHM BD71815 PMIC GPIO support"
depends on MFD_ROHM_BD71828
@@ -1694,6 +1681,14 @@ config GPIO_VIRTIO
These virtual GPIOs can be routed to real GPIOs or attached to
simulators on the host (like QEMU).
+config GPIO_SIM
+ tristate "GPIO Simulator Module"
+ select IRQ_SIM
+ select CONFIGFS_FS
+ help
+ This enables the GPIO simulator - a configfs-based GPIO testing
+ driver.
+
endmenu
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 71ee9fc2ff83..edbaa3cb343c 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -38,7 +38,6 @@ obj-$(CONFIG_GPIO_ASPEED_SGPIO) += gpio-aspeed-sgpio.o
obj-$(CONFIG_GPIO_ATH79) += gpio-ath79.o
obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o
obj-$(CONFIG_GPIO_BCM_XGS_IPROC) += gpio-xgs-iproc.o
-obj-$(CONFIG_GPIO_BD70528) += gpio-bd70528.o
obj-$(CONFIG_GPIO_BD71815) += gpio-bd71815.o
obj-$(CONFIG_GPIO_BD71828) += gpio-bd71828.o
obj-$(CONFIG_GPIO_BD9571MWV) += gpio-bd9571mwv.o
@@ -133,6 +132,7 @@ obj-$(CONFIG_GPIO_SAMA5D2_PIOBU) += gpio-sama5d2-piobu.o
obj-$(CONFIG_GPIO_SCH311X) += gpio-sch311x.o
obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
obj-$(CONFIG_GPIO_SIFIVE) += gpio-sifive.o
+obj-$(CONFIG_GPIO_SIM) += gpio-sim.o
obj-$(CONFIG_GPIO_SIOX) += gpio-siox.o
obj-$(CONFIG_GPIO_SL28CPLD) += gpio-sl28cpld.o
obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index 8eedfc6451df..cc349d4e4973 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -458,7 +458,6 @@ static int adnp_gpio_setup(struct adnp *adnp, unsigned int num_gpios,
chip->ngpio = num_gpios;
chip->label = adnp->client->name;
chip->parent = &adnp->client->dev;
- chip->of_node = chip->parent->of_node;
chip->owner = THIS_MODULE;
if (is_irq_controller) {
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index e9671d1660ef..869dc952cf45 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -371,6 +371,13 @@ static int gpio_fwd_set_config(struct gpio_chip *chip, unsigned int offset,
return gpiod_set_config(fwd->descs[offset], config);
}
+static int gpio_fwd_to_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ return gpiod_to_irq(fwd->descs[offset]);
+}
+
/**
* gpiochip_fwd_create() - Create a new GPIO forwarder
* @dev: Parent device pointer
@@ -411,7 +418,8 @@ static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
for (i = 0; i < ngpios; i++) {
struct gpio_chip *parent = gpiod_to_chip(descs[i]);
- dev_dbg(dev, "%u => gpio-%d\n", i, desc_to_gpio(descs[i]));
+ dev_dbg(dev, "%u => gpio %d irq %d\n", i,
+ desc_to_gpio(descs[i]), gpiod_to_irq(descs[i]));
if (gpiod_cansleep(descs[i]))
chip->can_sleep = true;
@@ -429,6 +437,7 @@ static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
chip->get_multiple = gpio_fwd_get_multiple_locked;
chip->set = gpio_fwd_set;
chip->set_multiple = gpio_fwd_set_multiple_locked;
+ chip->to_irq = gpio_fwd_to_irq;
chip->base = -1;
chip->ngpio = ngpios;
fwd->descs = descs;
diff --git a/drivers/gpio/gpio-amdpt.c b/drivers/gpio/gpio-amdpt.c
index bbf53e289141..8cfb353c3abb 100644
--- a/drivers/gpio/gpio-amdpt.c
+++ b/drivers/gpio/gpio-amdpt.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#define PT_TOTAL_GPIO 8
+#define PT_TOTAL_GPIO_EX 24
/* PCI-E MMIO register offsets */
#define PT_DIRECTION_REG 0x00
@@ -103,10 +104,8 @@ static int pt_gpio_probe(struct platform_device *pdev)
pt_gpio->gc.owner = THIS_MODULE;
pt_gpio->gc.request = pt_gpio_request;
pt_gpio->gc.free = pt_gpio_free;
- pt_gpio->gc.ngpio = PT_TOTAL_GPIO;
-#if defined(CONFIG_OF_GPIO)
- pt_gpio->gc.of_node = dev->of_node;
-#endif
+ pt_gpio->gc.ngpio = (uintptr_t)device_get_match_data(dev);
+
ret = gpiochip_add_data(&pt_gpio->gc, pt_gpio);
if (ret) {
dev_err(dev, "Failed to register GPIO lib\n");
@@ -133,8 +132,9 @@ static int pt_gpio_remove(struct platform_device *pdev)
}
static const struct acpi_device_id pt_gpio_acpi_match[] = {
- { "AMDF030", 0 },
- { "AMDIF030", 0 },
+ { "AMDF030", PT_TOTAL_GPIO },
+ { "AMDIF030", PT_TOTAL_GPIO },
+ { "AMDIF031", PT_TOTAL_GPIO_EX },
{ },
};
MODULE_DEVICE_TABLE(acpi, pt_gpio_acpi_match);
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index 2bc173c352ce..02f9ae19cd44 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -151,6 +151,8 @@ static int arizona_gpio_probe(struct platform_device *pdev)
struct arizona_gpio *arizona_gpio;
int ret;
+ device_set_node(&pdev->dev, dev_fwnode(pdev->dev.parent));
+
arizona_gpio = devm_kzalloc(&pdev->dev, sizeof(*arizona_gpio),
GFP_KERNEL);
if (!arizona_gpio)
@@ -159,9 +161,6 @@ static int arizona_gpio_probe(struct platform_device *pdev)
arizona_gpio->arizona = arizona;
arizona_gpio->gpio_chip = template_chip;
arizona_gpio->gpio_chip.parent = &pdev->dev;
-#ifdef CONFIG_OF_GPIO
- arizona_gpio->gpio_chip.of_node = arizona->dev->of_node;
-#endif
switch (arizona->type) {
case WM5102:
diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
index b3a9b8488f11..454cefbeecf0 100644
--- a/drivers/gpio/gpio-aspeed-sgpio.c
+++ b/drivers/gpio/gpio-aspeed-sgpio.c
@@ -31,7 +31,7 @@ struct aspeed_sgpio {
struct gpio_chip chip;
struct irq_chip intc;
struct clk *pclk;
- spinlock_t lock;
+ raw_spinlock_t lock;
void __iomem *base;
int irq;
};
@@ -173,12 +173,12 @@ static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset)
enum aspeed_sgpio_reg reg;
int rc = 0;
- spin_lock_irqsave(&gpio->lock, flags);
+ raw_spin_lock_irqsave(&gpio->lock, flags);
reg = aspeed_sgpio_is_input(offset) ? reg_val : reg_rdata;
rc = !!(ioread32(bank_reg(gpio, bank, reg)) & GPIO_BIT(offset));
- spin_unlock_irqrestore(&gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
return rc;
}
@@ -215,11 +215,11 @@ static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
unsigned long flags;
- spin_lock_irqsave(&gpio->lock, flags);
+ raw_spin_lock_irqsave(&gpio->lock, flags);
sgpio_set_value(gc, offset, val);
- spin_unlock_irqrestore(&gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
}
static int aspeed_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset)
@@ -236,9 +236,9 @@ static int aspeed_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int v
/* No special action is required for setting the direction; we'll
* error-out in sgpio_set_value if this isn't an output GPIO */
- spin_lock_irqsave(&gpio->lock, flags);
+ raw_spin_lock_irqsave(&gpio->lock, flags);
rc = sgpio_set_value(gc, offset, val);
- spin_unlock_irqrestore(&gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
return rc;
}
@@ -277,11 +277,11 @@ static void aspeed_sgpio_irq_ack(struct irq_data *d)
status_addr = bank_reg(gpio, bank, reg_irq_status);
- spin_lock_irqsave(&gpio->lock, flags);
+ raw_spin_lock_irqsave(&gpio->lock, flags);
iowrite32(bit, status_addr);
- spin_unlock_irqrestore(&gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
}
static void aspeed_sgpio_irq_set_mask(struct irq_data *d, bool set)
@@ -296,7 +296,7 @@ static void aspeed_sgpio_irq_set_mask(struct irq_data *d, bool set)
irqd_to_aspeed_sgpio_data(d, &gpio, &bank, &bit, &offset);
addr = bank_reg(gpio, bank, reg_irq_enable);
- spin_lock_irqsave(&gpio->lock, flags);
+ raw_spin_lock_irqsave(&gpio->lock, flags);
reg = ioread32(addr);
if (set)
@@ -306,7 +306,7 @@ static void aspeed_sgpio_irq_set_mask(struct irq_data *d, bool set)
iowrite32(reg, addr);
- spin_unlock_irqrestore(&gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
}
static void aspeed_sgpio_irq_mask(struct irq_data *d)
@@ -355,7 +355,7 @@ static int aspeed_sgpio_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- spin_lock_irqsave(&gpio->lock, flags);
+ raw_spin_lock_irqsave(&gpio->lock, flags);
addr = bank_reg(gpio, bank, reg_irq_type0);
reg = ioread32(addr);
@@ -372,7 +372,7 @@ static int aspeed_sgpio_set_type(struct irq_data *d, unsigned int type)
reg = (reg & ~bit) | type2;
iowrite32(reg, addr);
- spin_unlock_irqrestore(&gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
irq_set_handler_locked(d, handler);
@@ -467,7 +467,7 @@ static int aspeed_sgpio_reset_tolerance(struct gpio_chip *chip,
reg = bank_reg(gpio, to_bank(offset), reg_tolerance);
- spin_lock_irqsave(&gpio->lock, flags);
+ raw_spin_lock_irqsave(&gpio->lock, flags);
val = readl(reg);
@@ -478,7 +478,7 @@ static int aspeed_sgpio_reset_tolerance(struct gpio_chip *chip,
writel(val, reg);
- spin_unlock_irqrestore(&gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
return 0;
}
@@ -575,7 +575,7 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
iowrite32(FIELD_PREP(ASPEED_SGPIO_CLK_DIV_MASK, sgpio_clk_div) | gpio_cnt_regval |
ASPEED_SGPIO_ENABLE, gpio->base + ASPEED_SGPIO_CTRL);
- spin_lock_init(&gpio->lock);
+ raw_spin_lock_init(&gpio->lock);
gpio->chip.parent = &pdev->dev;
gpio->chip.ngpio = nr_gpios * 2;
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 3c8f20c57695..318a7d95a1a8 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -53,7 +53,7 @@ struct aspeed_gpio_config {
struct aspeed_gpio {
struct gpio_chip chip;
struct irq_chip irqc;
- spinlock_t lock;
+ raw_spinlock_t lock;
void __iomem *base;
int irq;
const struct aspeed_gpio_config *config;
@@ -413,14 +413,14 @@ static void aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
unsigned long flags;
bool copro;
- spin_lock_irqsave(&gpio->lock, flags);
+ raw_spin_lock_irqsave(&gpio->lock, flags);
copro = aspeed_gpio_copro_request(gpio, offset);
__aspeed_gpio_set(gc, offset, val);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- spin_unlock_irqrestore(&gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
}
static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset)
@@ -435,7 +435,7 @@ static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset)
if (!have_input(gpio, offset))
return -ENOTSUPP;
- spin_lock_irqsave(&gpio->lock, flags);
+ raw_spin_lock_irqsave(&gpio->lock, flags);
reg = ioread32(addr);
reg &= ~GPIO_BIT(offset);
@@ -445,7 +445,7 @@ static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset)
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- spin_unlock_irqrestore(&gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
return 0;
}
@@ -463,7 +463,7 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc,
if (!have_output(gpio, offset))
return -ENOTSUPP;
- spin_lock_irqsave(&gpio->lock, flags);
+ raw_spin_lock_irqsave(&gpio->lock, flags);
reg = ioread32(addr);
reg |= GPIO_BIT(offset);
@@ -474,7 +474,7 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc,
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- spin_unlock_irqrestore(&gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
return 0;
}
@@ -492,11 +492,11 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
if (!have_output(gpio, offset))
return GPIO_LINE_DIRECTION_IN;
- spin_lock_irqsave(&gpio->lock, flags);
+ raw_spin_lock_irqsave(&gpio->lock, flags);
val = ioread32(bank_reg(gpio, bank, reg_dir)) & GPIO_BIT(offset);
- spin_unlock_irqrestore(&gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
return val ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
}
@@ -539,14 +539,14 @@ static void aspeed_gpio_irq_ack(struct irq_data *d)
status_addr = bank_reg(gpio, bank, reg_irq_status);
- spin_lock_irqsave(&gpio->lock, flags);
+ raw_spin_lock_irqsave(&gpio->lock, flags);
copro = aspeed_gpio_copro_request(gpio, offset);
iowrite32(bit, status_addr);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- spin_unlock_irqrestore(&gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
}
static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
@@ -565,7 +565,7 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
addr = bank_reg(gpio, bank, reg_irq_enable);
- spin_lock_irqsave(&gpio->lock, flags);
+ raw_spin_lock_irqsave(&gpio->lock, flags);
copro = aspeed_gpio_copro_request(gpio, offset);
reg = ioread32(addr);
@@ -577,7 +577,7 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- spin_unlock_irqrestore(&gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
}
static void aspeed_gpio_irq_mask(struct irq_data *d)
@@ -629,7 +629,7 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- spin_lock_irqsave(&gpio->lock, flags);
+ raw_spin_lock_irqsave(&gpio->lock, flags);
copro = aspeed_gpio_copro_request(gpio, offset);
addr = bank_reg(gpio, bank, reg_irq_type0);
@@ -649,7 +649,7 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type)
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- spin_unlock_irqrestore(&gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
irq_set_handler_locked(d, handler);
@@ -716,7 +716,7 @@ static int aspeed_gpio_reset_tolerance(struct gpio_chip *chip,
treg = bank_reg(gpio, to_bank(offset), reg_tolerance);
- spin_lock_irqsave(&gpio->lock, flags);
+ raw_spin_lock_irqsave(&gpio->lock, flags);
copro = aspeed_gpio_copro_request(gpio, offset);
val = readl(treg);
@@ -730,7 +730,7 @@ static int aspeed_gpio_reset_tolerance(struct gpio_chip *chip,
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- spin_unlock_irqrestore(&gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
return 0;
}
@@ -856,7 +856,7 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
return rc;
}
- spin_lock_irqsave(&gpio->lock, flags);
+ raw_spin_lock_irqsave(&gpio->lock, flags);
if (timer_allocation_registered(gpio, offset)) {
rc = unregister_allocated_timer(gpio, offset);
@@ -916,7 +916,7 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
configure_timer(gpio, offset, i);
out:
- spin_unlock_irqrestore(&gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
return rc;
}
@@ -927,13 +927,13 @@ static int disable_debounce(struct gpio_chip *chip, unsigned int offset)
unsigned long flags;
int rc;
- spin_lock_irqsave(&gpio->lock, flags);
+ raw_spin_lock_irqsave(&gpio->lock, flags);
rc = unregister_allocated_timer(gpio, offset);
if (!rc)
configure_timer(gpio, offset, 0);
- spin_unlock_irqrestore(&gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
return rc;
}
@@ -1015,7 +1015,7 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
return -EINVAL;
bindex = offset >> 3;
- spin_lock_irqsave(&gpio->lock, flags);
+ raw_spin_lock_irqsave(&gpio->lock, flags);
/* Sanity check, this shouldn't happen */
if (gpio->cf_copro_bankmap[bindex] == 0xff) {
@@ -1036,7 +1036,7 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
if (bit)
*bit = GPIO_OFFSET(offset);
bail:
- spin_unlock_irqrestore(&gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
return rc;
}
EXPORT_SYMBOL_GPL(aspeed_gpio_copro_grab_gpio);
@@ -1060,7 +1060,7 @@ int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc)
return -EINVAL;
bindex = offset >> 3;
- spin_lock_irqsave(&gpio->lock, flags);
+ raw_spin_lock_irqsave(&gpio->lock, flags);
/* Sanity check, this shouldn't happen */
if (gpio->cf_copro_bankmap[bindex] == 0) {
@@ -1074,7 +1074,7 @@ int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc)
aspeed_gpio_change_cmd_source(gpio, bank, bindex,
GPIO_CMDSRC_ARM);
bail:
- spin_unlock_irqrestore(&gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
return rc;
}
EXPORT_SYMBOL_GPL(aspeed_gpio_copro_release_gpio);
@@ -1148,7 +1148,7 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gpio->base))
return PTR_ERR(gpio->base);
- spin_lock_init(&gpio->lock);
+ raw_spin_lock_init(&gpio->lock);
gpio_id = of_match_node(aspeed_gpio_of_table, pdev->dev.of_node);
if (!gpio_id)
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index d329a143f5ec..e84474494429 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -606,7 +606,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
kona_gpio->pdev = pdev;
platform_set_drvdata(pdev, kona_gpio);
- chip->of_node = dev->of_node;
+ chip->parent = dev;
chip->ngpio = kona_gpio->num_bank * GPIO_PER_BANK;
kona_gpio->irq_domain = irq_domain_add_linear(dev->of_node,
diff --git a/drivers/gpio/gpio-bd70528.c b/drivers/gpio/gpio-bd70528.c
deleted file mode 100644
index 397a50d6bc65..000000000000
--- a/drivers/gpio/gpio-bd70528.c
+++ /dev/null
@@ -1,230 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 ROHM Semiconductors
-// gpio-bd70528.c ROHM BD70528MWV gpio driver
-
-#include <linux/gpio/driver.h>
-#include <linux/mfd/rohm-bd70528.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-
-#define GPIO_IN_REG(offset) (BD70528_REG_GPIO1_IN + (offset) * 2)
-#define GPIO_OUT_REG(offset) (BD70528_REG_GPIO1_OUT + (offset) * 2)
-
-struct bd70528_gpio {
- struct regmap *regmap;
- struct device *dev;
- struct gpio_chip gpio;
-};
-
-static int bd70528_set_debounce(struct bd70528_gpio *bdgpio,
- unsigned int offset, unsigned int debounce)
-{
- u8 val;
-
- switch (debounce) {
- case 0:
- val = BD70528_DEBOUNCE_DISABLE;
- break;
- case 1 ... 15000:
- val = BD70528_DEBOUNCE_15MS;
- break;
- case 15001 ... 30000:
- val = BD70528_DEBOUNCE_30MS;
- break;
- case 30001 ... 50000:
- val = BD70528_DEBOUNCE_50MS;
- break;
- default:
- dev_err(bdgpio->dev,
- "Invalid debounce value %u\n", debounce);
- return -EINVAL;
- }
- return regmap_update_bits(bdgpio->regmap, GPIO_IN_REG(offset),
- BD70528_DEBOUNCE_MASK, val);
-}
-
-static int bd70528_get_direction(struct gpio_chip *chip, unsigned int offset)
-{
- struct bd70528_gpio *bdgpio = gpiochip_get_data(chip);
- int val, ret;
-
- /* Do we need to do something to IRQs here? */
- ret = regmap_read(bdgpio->regmap, GPIO_OUT_REG(offset), &val);
- if (ret) {
- dev_err(bdgpio->dev, "Could not read gpio direction\n");
- return ret;
- }
- if (val & BD70528_GPIO_OUT_EN_MASK)
- return GPIO_LINE_DIRECTION_OUT;
-
- return GPIO_LINE_DIRECTION_IN;
-}
-
-static int bd70528_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
- unsigned long config)
-{
- struct bd70528_gpio *bdgpio = gpiochip_get_data(chip);
-
- switch (pinconf_to_config_param(config)) {
- case PIN_CONFIG_DRIVE_OPEN_DRAIN:
- return regmap_update_bits(bdgpio->regmap,
- GPIO_OUT_REG(offset),
- BD70528_GPIO_DRIVE_MASK,
- BD70528_GPIO_OPEN_DRAIN);
- break;
- case PIN_CONFIG_DRIVE_PUSH_PULL:
- return regmap_update_bits(bdgpio->regmap,
- GPIO_OUT_REG(offset),
- BD70528_GPIO_DRIVE_MASK,
- BD70528_GPIO_PUSH_PULL);
- break;
- case PIN_CONFIG_INPUT_DEBOUNCE:
- return bd70528_set_debounce(bdgpio, offset,
- pinconf_to_config_argument(config));
- break;
- default:
- break;
- }
- return -ENOTSUPP;
-}
-
-static int bd70528_direction_input(struct gpio_chip *chip, unsigned int offset)
-{
- struct bd70528_gpio *bdgpio = gpiochip_get_data(chip);
-
- /* Do we need to do something to IRQs here? */
- return regmap_update_bits(bdgpio->regmap, GPIO_OUT_REG(offset),
- BD70528_GPIO_OUT_EN_MASK,
- BD70528_GPIO_OUT_DISABLE);
-}
-
-static void bd70528_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
-{
- int ret;
- struct bd70528_gpio *bdgpio = gpiochip_get_data(chip);
- u8 val = (value) ? BD70528_GPIO_OUT_HI : BD70528_GPIO_OUT_LO;
-
- ret = regmap_update_bits(bdgpio->regmap, GPIO_OUT_REG(offset),
- BD70528_GPIO_OUT_MASK, val);
- if (ret)
- dev_err(bdgpio->dev, "Could not set gpio to %d\n", value);
-}
-
-static int bd70528_direction_output(struct gpio_chip *chip, unsigned int offset,
- int value)
-{
- struct bd70528_gpio *bdgpio = gpiochip_get_data(chip);
-
- bd70528_gpio_set(chip, offset, value);
- return regmap_update_bits(bdgpio->regmap, GPIO_OUT_REG(offset),
- BD70528_GPIO_OUT_EN_MASK,
- BD70528_GPIO_OUT_ENABLE);
-}
-
-#define GPIO_IN_STATE_MASK(offset) (BD70528_GPIO_IN_STATE_BASE << (offset))
-
-static int bd70528_gpio_get_o(struct bd70528_gpio *bdgpio, unsigned int offset)
-{
- int ret;
- unsigned int val;
-
- ret = regmap_read(bdgpio->regmap, GPIO_OUT_REG(offset), &val);
- if (!ret)
- ret = !!(val & BD70528_GPIO_OUT_MASK);
- else
- dev_err(bdgpio->dev, "GPIO (out) state read failed\n");
-
- return ret;
-}
-
-static int bd70528_gpio_get_i(struct bd70528_gpio *bdgpio, unsigned int offset)
-{
- unsigned int val;
- int ret;
-
- ret = regmap_read(bdgpio->regmap, BD70528_REG_GPIO_STATE, &val);
-
- if (!ret)
- ret = !(val & GPIO_IN_STATE_MASK(offset));
- else
- dev_err(bdgpio->dev, "GPIO (in) state read failed\n");
-
- return ret;
-}
-
-static int bd70528_gpio_get(struct gpio_chip *chip, unsigned int offset)
-{
- int ret;
- struct bd70528_gpio *bdgpio = gpiochip_get_data(chip);
-
- /*
- * There is a race condition where someone might be changing the
- * GPIO direction after we get it but before we read the value. But
- * application design where GPIO direction may be changed just when
- * we read GPIO value would be pointless as reader could not know
- * whether the returned high/low state is caused by input or output.
- * Or then there must be other ways to mitigate the issue. Thus
- * locking would make no sense.
- */
- ret = bd70528_get_direction(chip, offset);
- if (ret == GPIO_LINE_DIRECTION_OUT)
- ret = bd70528_gpio_get_o(bdgpio, offset);
- else if (ret == GPIO_LINE_DIRECTION_IN)
- ret = bd70528_gpio_get_i(bdgpio, offset);
- else
- dev_err(bdgpio->dev, "failed to read GPIO direction\n");
-
- return ret;
-}
-
-static int bd70528_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct bd70528_gpio *bdgpio;
- int ret;
-
- bdgpio = devm_kzalloc(dev, sizeof(*bdgpio), GFP_KERNEL);
- if (!bdgpio)
- return -ENOMEM;
- bdgpio->dev = dev;
- bdgpio->gpio.parent = dev->parent;
- bdgpio->gpio.label = "bd70528-gpio";
- bdgpio->gpio.owner = THIS_MODULE;
- bdgpio->gpio.get_direction = bd70528_get_direction;
- bdgpio->gpio.direction_input = bd70528_direction_input;
- bdgpio->gpio.direction_output = bd70528_direction_output;
- bdgpio->gpio.set_config = bd70528_gpio_set_config;
- bdgpio->gpio.can_sleep = true;
- bdgpio->gpio.get = bd70528_gpio_get;
- bdgpio->gpio.set = bd70528_gpio_set;
- bdgpio->gpio.ngpio = 4;
- bdgpio->gpio.base = -1;
-#ifdef CONFIG_OF_GPIO
- bdgpio->gpio.of_node = dev->parent->of_node;
-#endif
- bdgpio->regmap = dev_get_regmap(dev->parent, NULL);
- if (!bdgpio->regmap)
- return -ENODEV;
-
- ret = devm_gpiochip_add_data(dev, &bdgpio->gpio, bdgpio);
- if (ret)
- dev_err(dev, "gpio_init: Failed to add bd70528-gpio\n");
-
- return ret;
-}
-
-static struct platform_driver bd70528_gpio = {
- .driver = {
- .name = "bd70528-gpio"
- },
- .probe = bd70528_probe,
-};
-
-module_platform_driver(bd70528_gpio);
-
-MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
-MODULE_DESCRIPTION("BD70528 voltage regulator driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:bd70528-gpio");
diff --git a/drivers/gpio/gpio-bd71828.c b/drivers/gpio/gpio-bd71828.c
index c8e382b53f2f..b2ccc320c7b5 100644
--- a/drivers/gpio/gpio-bd71828.c
+++ b/drivers/gpio/gpio-bd71828.c
@@ -121,7 +121,6 @@ static int bd71828_probe(struct platform_device *pdev)
* "gpio-reserved-ranges" and exclude them from control
*/
bdgpio->gpio.ngpio = 4;
- bdgpio->gpio.of_node = dev->parent->of_node;
bdgpio->regmap = dev_get_regmap(dev->parent, NULL);
if (!bdgpio->regmap)
return -ENODEV;
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 895a79936248..74ef89248867 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -703,9 +703,8 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
goto fail;
}
- gc->of_node = np;
gc->owner = THIS_MODULE;
- gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", dev->of_node);
+ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np);
if (!gc->label) {
err = -ENOMEM;
goto fail;
diff --git a/drivers/gpio/gpio-creg-snps.c b/drivers/gpio/gpio-creg-snps.c
index 1d0827e79703..789384c6e178 100644
--- a/drivers/gpio/gpio-creg-snps.c
+++ b/drivers/gpio/gpio-creg-snps.c
@@ -163,12 +163,12 @@ static int creg_gpio_probe(struct platform_device *pdev)
spin_lock_init(&hcg->lock);
+ hcg->gc.parent = dev;
hcg->gc.label = dev_name(dev);
hcg->gc.base = -1;
hcg->gc.ngpio = ngpios;
hcg->gc.set = creg_gpio_set;
hcg->gc.direction_output = creg_gpio_dir_out;
- hcg->gc.of_node = dev->of_node;
ret = devm_gpiochip_add_data(dev, &hcg->gc, hcg);
if (ret)
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index cb5afaa7ed48..f960587f86a3 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -254,7 +254,6 @@ static int davinci_gpio_probe(struct platform_device *pdev)
#ifdef CONFIG_OF_GPIO
chips->chip.of_gpio_n_cells = 2;
chips->chip.parent = dev;
- chips->chip.of_node = dev->of_node;
chips->chip.request = gpiochip_generic_request;
chips->chip.free = gpiochip_generic_free;
#endif
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index f98fa33e1679..b0f3aca61974 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -15,7 +15,6 @@
#include <linux/irq.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/reset.h>
@@ -53,7 +52,9 @@
#define GPIO_SWPORT_DR_STRIDE 0x0c /* register stride 3*32 bits */
#define GPIO_SWPORT_DDR_STRIDE 0x0c /* register stride 3*32 bits */
+#define GPIO_REG_OFFSET_V1 0
#define GPIO_REG_OFFSET_V2 1
+#define GPIO_REG_OFFSET_MASK BIT(0)
#define GPIO_INTMASK_V2 0x44
#define GPIO_INTTYPE_LEVEL_V2 0x34
@@ -141,7 +142,7 @@ static inline u32 gpio_reg_v2_convert(unsigned int offset)
static inline u32 gpio_reg_convert(struct dwapb_gpio *gpio, unsigned int offset)
{
- if (gpio->flags & GPIO_REG_OFFSET_V2)
+ if ((gpio->flags & GPIO_REG_OFFSET_MASK) == GPIO_REG_OFFSET_V2)
return gpio_reg_v2_convert(offset);
return offset;
@@ -513,9 +514,7 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
return err;
}
-#ifdef CONFIG_OF_GPIO
- port->gc.of_node = to_of_node(pp->fwnode);
-#endif
+ port->gc.fwnode = pp->fwnode;
port->gc.ngpio = pp->ngpio;
port->gc.base = pp->gpio_base;
@@ -668,15 +667,15 @@ static int dwapb_get_clks(struct dwapb_gpio *gpio)
}
static const struct of_device_id dwapb_of_match[] = {
- { .compatible = "snps,dw-apb-gpio", .data = (void *)0},
+ { .compatible = "snps,dw-apb-gpio", .data = (void *)GPIO_REG_OFFSET_V1},
{ .compatible = "apm,xgene-gpio-v2", .data = (void *)GPIO_REG_OFFSET_V2},
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, dwapb_of_match);
static const struct acpi_device_id dwapb_acpi_match[] = {
- {"HISI0181", 0},
- {"APMC0D07", 0},
+ {"HISI0181", GPIO_REG_OFFSET_V1},
+ {"APMC0D07", GPIO_REG_OFFSET_V1},
{"APMC0D81", GPIO_REG_OFFSET_V2},
{ }
};
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index 865ab2b34fdd..8d722e026e9c 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -609,7 +609,6 @@ static int sprd_eic_probe(struct platform_device *pdev)
sprd_eic->chip.ngpio = pdata->num_eics;
sprd_eic->chip.base = -1;
sprd_eic->chip.parent = &pdev->dev;
- sprd_eic->chip.of_node = pdev->dev.of_node;
sprd_eic->chip.direction_input = sprd_eic_direction_input;
switch (sprd_eic->type) {
case SPRD_EIC_DEBOUNCE:
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 90b336e6ee27..858e6ebbb584 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -306,7 +306,6 @@ static int em_gio_probe(struct platform_device *pdev)
}
gpio_chip = &p->gpio_chip;
- gpio_chip->of_node = dev->of_node;
gpio_chip->direction_input = em_gio_direction_input;
gpio_chip->get = em_gio_get;
gpio_chip->direction_output = em_gio_direction_output;
diff --git a/drivers/gpio/gpio-ge.c b/drivers/gpio/gpio-ge.c
index 636952769bc8..f6a3de99f7db 100644
--- a/drivers/gpio/gpio-ge.c
+++ b/drivers/gpio/gpio-ge.c
@@ -82,7 +82,6 @@ static int __init gef_gpio_probe(struct platform_device *pdev)
gc->base = -1;
gc->ngpio = (u16)(uintptr_t)of_device_get_match_data(&pdev->dev);
gc->of_gpio_n_cells = 2;
- gc->of_node = pdev->dev.of_node;
/* This function adds a memory mapped GPIO chip */
ret = devm_gpiochip_add_data(&pdev->dev, gc, NULL);
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index f954359c9544..23d447e17a67 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -358,7 +358,6 @@ static int grgpio_probe(struct platform_device *ofdev)
priv->imask = gc->read_reg(regs + GRGPIO_IMASK);
priv->dev = &ofdev->dev;
- gc->of_node = np;
gc->owner = THIS_MODULE;
gc->to_irq = grgpio_to_irq;
gc->label = devm_kasprintf(&ofdev->dev, GFP_KERNEL, "%pOF", np);
diff --git a/drivers/gpio/gpio-gw-pld.c b/drivers/gpio/gpio-gw-pld.c
index 242112ff60ee..2109803ffb38 100644
--- a/drivers/gpio/gpio-gw-pld.c
+++ b/drivers/gpio/gpio-gw-pld.c
@@ -71,7 +71,6 @@ static int gw_pld_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
- struct device_node *np = dev->of_node;
struct gw_pld *gw;
int ret;
@@ -82,7 +81,6 @@ static int gw_pld_probe(struct i2c_client *client,
gw->chip.base = -1;
gw->chip.can_sleep = true;
gw->chip.parent = dev;
- gw->chip.of_node = np;
gw->chip.owner = THIS_MODULE;
gw->chip.label = dev_name(dev);
gw->chip.ngpio = 8;
diff --git a/drivers/gpio/gpio-idt3243x.c b/drivers/gpio/gpio-idt3243x.c
index 50003ad2e589..52b8b72ded77 100644
--- a/drivers/gpio/gpio-idt3243x.c
+++ b/drivers/gpio/gpio-idt3243x.c
@@ -132,7 +132,7 @@ static int idt_gpio_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq;
struct idt_gpio_ctrl *ctrl;
- unsigned int parent_irq;
+ int parent_irq;
int ngpios;
int ret;
@@ -164,8 +164,8 @@ static int idt_gpio_probe(struct platform_device *pdev)
return PTR_ERR(ctrl->pic);
parent_irq = platform_get_irq(pdev, 0);
- if (!parent_irq)
- return -EINVAL;
+ if (parent_irq < 0)
+ return parent_irq;
girq = &ctrl->gc.irq;
girq->chip = &idt_gpio_irqchip;
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 4e626c4235c2..d2b65cfb336e 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -512,10 +512,10 @@ static int lpc32xx_gpio_probe(struct platform_device *pdev)
return PTR_ERR(reg_base);
for (i = 0; i < ARRAY_SIZE(lpc32xx_gpiochip); i++) {
+ lpc32xx_gpiochip[i].chip.parent = &pdev->dev;
if (pdev->dev.of_node) {
lpc32xx_gpiochip[i].chip.of_xlate = lpc32xx_of_xlate;
lpc32xx_gpiochip[i].chip.of_gpio_n_cells = 3;
- lpc32xx_gpiochip[i].chip.of_node = pdev->dev.of_node;
lpc32xx_gpiochip[i].reg_base = reg_base;
}
devm_gpiochip_add_data(&pdev->dev, &lpc32xx_gpiochip[i].chip,
diff --git a/drivers/gpio/gpio-max3191x.c b/drivers/gpio/gpio-max3191x.c
index 310d1a248cae..51cd6f98d1c7 100644
--- a/drivers/gpio/gpio-max3191x.c
+++ b/drivers/gpio/gpio-max3191x.c
@@ -326,7 +326,7 @@ static void gpiod_set_array_single_value_cansleep(unsigned int ndescs,
bitmap_zero(values, ndescs);
gpiod_set_array_value_cansleep(ndescs, desc, info, values);
- kfree(values);
+ bitmap_free(values);
}
static struct gpio_descs *devm_gpiod_get_array_optional_count(
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index efa9acdc320a..b060c4773698 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -98,9 +98,9 @@ static void ioh_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
spin_lock_irqsave(&chip->spinlock, flags);
reg_val = ioread32(&chip->reg->regs[chip->ch].po);
if (val)
- reg_val |= (1 << nr);
+ reg_val |= BIT(nr);
else
- reg_val &= ~(1 << nr);
+ reg_val &= ~BIT(nr);
iowrite32(reg_val, &chip->reg->regs[chip->ch].po);
spin_unlock_irqrestore(&chip->spinlock, flags);
@@ -110,7 +110,7 @@ static int ioh_gpio_get(struct gpio_chip *gpio, unsigned nr)
{
struct ioh_gpio *chip = gpiochip_get_data(gpio);
- return !!(ioread32(&chip->reg->regs[chip->ch].pi) & (1 << nr));
+ return !!(ioread32(&chip->reg->regs[chip->ch].pi) & BIT(nr));
}
static int ioh_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
@@ -122,16 +122,16 @@ static int ioh_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
unsigned long flags;
spin_lock_irqsave(&chip->spinlock, flags);
- pm = ioread32(&chip->reg->regs[chip->ch].pm) &
- ((1 << num_ports[chip->ch]) - 1);
- pm |= (1 << nr);
+ pm = ioread32(&chip->reg->regs[chip->ch].pm);
+ pm &= BIT(num_ports[chip->ch]) - 1;
+ pm |= BIT(nr);
iowrite32(pm, &chip->reg->regs[chip->ch].pm);
reg_val = ioread32(&chip->reg->regs[chip->ch].po);
if (val)
- reg_val |= (1 << nr);
+ reg_val |= BIT(nr);
else
- reg_val &= ~(1 << nr);
+ reg_val &= ~BIT(nr);
iowrite32(reg_val, &chip->reg->regs[chip->ch].po);
spin_unlock_irqrestore(&chip->spinlock, flags);
@@ -146,9 +146,9 @@ static int ioh_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
unsigned long flags;
spin_lock_irqsave(&chip->spinlock, flags);
- pm = ioread32(&chip->reg->regs[chip->ch].pm) &
- ((1 << num_ports[chip->ch]) - 1);
- pm &= ~(1 << nr);
+ pm = ioread32(&chip->reg->regs[chip->ch].pm);
+ pm &= BIT(num_ports[chip->ch]) - 1;
+ pm &= ~BIT(nr);
iowrite32(pm, &chip->reg->regs[chip->ch].pm);
spin_unlock_irqrestore(&chip->spinlock, flags);
@@ -304,7 +304,7 @@ static void ioh_irq_unmask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct ioh_gpio *chip = gc->private;
- iowrite32(1 << (d->irq - chip->irq_base),
+ iowrite32(BIT(d->irq - chip->irq_base),
&chip->reg->regs[chip->ch].imaskclr);
}
@@ -313,7 +313,7 @@ static void ioh_irq_mask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct ioh_gpio *chip = gc->private;
- iowrite32(1 << (d->irq - chip->irq_base),
+ iowrite32(BIT(d->irq - chip->irq_base),
&chip->reg->regs[chip->ch].imask);
}
@@ -326,7 +326,7 @@ static void ioh_irq_disable(struct irq_data *d)
spin_lock_irqsave(&chip->spinlock, flags);
ien = ioread32(&chip->reg->regs[chip->ch].ien);
- ien &= ~(1 << (d->irq - chip->irq_base));
+ ien &= ~BIT(d->irq - chip->irq_base);
iowrite32(ien, &chip->reg->regs[chip->ch].ien);
spin_unlock_irqrestore(&chip->spinlock, flags);
}
@@ -340,7 +340,7 @@ static void ioh_irq_enable(struct irq_data *d)
spin_lock_irqsave(&chip->spinlock, flags);
ien = ioread32(&chip->reg->regs[chip->ch].ien);
- ien |= 1 << (d->irq - chip->irq_base);
+ ien |= BIT(d->irq - chip->irq_base);
iowrite32(ien, &chip->reg->regs[chip->ch].ien);
spin_unlock_irqrestore(&chip->spinlock, flags);
}
@@ -401,6 +401,7 @@ static int ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
static int ioh_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
+ struct device *dev = &pdev->dev;
int ret;
int i, j;
struct ioh_gpio *chip;
@@ -410,19 +411,19 @@ static int ioh_gpio_probe(struct pci_dev *pdev,
ret = pci_enable_device(pdev);
if (ret) {
- dev_err(&pdev->dev, "%s : pci_enable_device failed", __func__);
+ dev_err(dev, "%s : pci_enable_device failed", __func__);
goto err_pci_enable;
}
ret = pci_request_regions(pdev, KBUILD_MODNAME);
if (ret) {
- dev_err(&pdev->dev, "pci_request_regions failed-%d", ret);
+ dev_err(dev, "pci_request_regions failed-%d", ret);
goto err_request_regions;
}
base = pci_iomap(pdev, 1, 0);
if (!base) {
- dev_err(&pdev->dev, "%s : pci_iomap failed", __func__);
+ dev_err(dev, "%s : pci_iomap failed", __func__);
ret = -ENOMEM;
goto err_iomap;
}
@@ -435,7 +436,7 @@ static int ioh_gpio_probe(struct pci_dev *pdev,
chip = chip_save;
for (i = 0; i < 8; i++, chip++) {
- chip->dev = &pdev->dev;
+ chip->dev = dev;
chip->base = base;
chip->reg = chip->base;
chip->ch = i;
@@ -443,17 +444,17 @@ static int ioh_gpio_probe(struct pci_dev *pdev,
ioh_gpio_setup(chip, num_ports[i]);
ret = gpiochip_add_data(&chip->gpio, chip);
if (ret) {
- dev_err(&pdev->dev, "IOH gpio: Failed to register GPIO\n");
+ dev_err(dev, "IOH gpio: Failed to register GPIO\n");
goto err_gpiochip_add;
}
}
chip = chip_save;
for (j = 0; j < 8; j++, chip++) {
- irq_base = devm_irq_alloc_descs(&pdev->dev, -1, IOH_IRQ_BASE,
+ irq_base = devm_irq_alloc_descs(dev, -1, IOH_IRQ_BASE,
num_ports[j], NUMA_NO_NODE);
if (irq_base < 0) {
- dev_warn(&pdev->dev,
+ dev_warn(dev,
"ml_ioh_gpio: Failed to get IRQ base num\n");
ret = irq_base;
goto err_gpiochip_add;
@@ -467,11 +468,10 @@ static int ioh_gpio_probe(struct pci_dev *pdev,
}
chip = chip_save;
- ret = devm_request_irq(&pdev->dev, pdev->irq, ioh_gpio_handler,
+ ret = devm_request_irq(dev, pdev->irq, ioh_gpio_handler,
IRQF_SHARED, KBUILD_MODNAME, chip);
if (ret != 0) {
- dev_err(&pdev->dev,
- "%s request_irq failed\n", __func__);
+ dev_err(dev, "%s request_irq failed\n", __func__);
goto err_gpiochip_add;
}
@@ -498,7 +498,7 @@ err_request_regions:
err_pci_enable:
- dev_err(&pdev->dev, "%s Failed returns %d\n", __func__, ret);
+ dev_err(dev, "%s Failed returns %d\n", __func__, ret);
return ret;
}
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index d26bff29157b..8943cea92764 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -491,27 +491,6 @@ static void gpio_mockup_unregister_pdevs(void)
}
}
-static __init char **gpio_mockup_make_line_names(const char *label,
- unsigned int num_lines)
-{
- unsigned int i;
- char **names;
-
- names = kcalloc(num_lines + 1, sizeof(char *), GFP_KERNEL);
- if (!names)
- return NULL;
-
- for (i = 0; i < num_lines; i++) {
- names[i] = kasprintf(GFP_KERNEL, "%s-%u", label, i);
- if (!names[i]) {
- kfree_strarray(names, i);
- return NULL;
- }
- }
-
- return names;
-}
-
static int __init gpio_mockup_register_chip(int idx)
{
struct property_entry properties[GPIO_MOCKUP_MAX_PROP];
@@ -538,7 +517,7 @@ static int __init gpio_mockup_register_chip(int idx)
properties[prop++] = PROPERTY_ENTRY_U16("nr-gpios", ngpio);
if (gpio_mockup_named_lines) {
- line_names = gpio_mockup_make_line_names(chip_label, ngpio);
+ line_names = kasprintf_strarray(GFP_KERNEL, chip_label, ngpio);
if (!line_names)
return -ENOMEM;
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 70d6ae20b1da..a964e25ea620 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -47,7 +47,7 @@ struct mpc8xxx_gpio_chip {
unsigned offset, int value);
struct irq_domain *irq;
- unsigned int irqn;
+ int irqn;
};
/*
@@ -388,8 +388,8 @@ static int mpc8xxx_probe(struct platform_device *pdev)
}
mpc8xxx_gc->irqn = platform_get_irq(pdev, 0);
- if (!mpc8xxx_gc->irqn)
- return 0;
+ if (mpc8xxx_gc->irqn < 0)
+ return mpc8xxx_gc->irqn;
mpc8xxx_gc->irq = irq_domain_create_linear(fwnode,
MPC8XXX_GPIO_PINS,
diff --git a/drivers/gpio/gpio-msc313.c b/drivers/gpio/gpio-msc313.c
index da31a5ff7a2b..b2c90bdd39d0 100644
--- a/drivers/gpio/gpio-msc313.c
+++ b/drivers/gpio/gpio-msc313.c
@@ -221,6 +221,263 @@ static const unsigned int msc313_offsets[] = {
};
MSC313_GPIO_CHIPDATA(msc313);
+
+/*
+ * Unlike the msc313(e) the ssd20xd have a bunch of pins
+ * that are actually called gpio probably because they
+ * have no dedicated function.
+ */
+#define SSD20XD_PINNAME_GPIO0 "gpio0"
+#define SSD20XD_PINNAME_GPIO1 "gpio1"
+#define SSD20XD_PINNAME_GPIO2 "gpio2"
+#define SSD20XD_PINNAME_GPIO3 "gpio3"
+#define SSD20XD_PINNAME_GPIO4 "gpio4"
+#define SSD20XD_PINNAME_GPIO5 "gpio5"
+#define SSD20XD_PINNAME_GPIO6 "gpio6"
+#define SSD20XD_PINNAME_GPIO7 "gpio7"
+#define SSD20XD_PINNAME_GPIO10 "gpio10"
+#define SSD20XD_PINNAME_GPIO11 "gpio11"
+#define SSD20XD_PINNAME_GPIO12 "gpio12"
+#define SSD20XD_PINNAME_GPIO13 "gpio13"
+#define SSD20XD_PINNAME_GPIO14 "gpio14"
+#define SSD20XD_PINNAME_GPIO85 "gpio85"
+#define SSD20XD_PINNAME_GPIO86 "gpio86"
+#define SSD20XD_PINNAME_GPIO90 "gpio90"
+
+#define SSD20XD_GPIO_NAMES SSD20XD_PINNAME_GPIO0, \
+ SSD20XD_PINNAME_GPIO1, \
+ SSD20XD_PINNAME_GPIO2, \
+ SSD20XD_PINNAME_GPIO3, \
+ SSD20XD_PINNAME_GPIO4, \
+ SSD20XD_PINNAME_GPIO5, \
+ SSD20XD_PINNAME_GPIO6, \
+ SSD20XD_PINNAME_GPIO7, \
+ SSD20XD_PINNAME_GPIO10, \
+ SSD20XD_PINNAME_GPIO11, \
+ SSD20XD_PINNAME_GPIO12, \
+ SSD20XD_PINNAME_GPIO13, \
+ SSD20XD_PINNAME_GPIO14, \
+ SSD20XD_PINNAME_GPIO85, \
+ SSD20XD_PINNAME_GPIO86, \
+ SSD20XD_PINNAME_GPIO90
+
+#define SSD20XD_GPIO_OFF_GPIO0 0x0
+#define SSD20XD_GPIO_OFF_GPIO1 0x4
+#define SSD20XD_GPIO_OFF_GPIO2 0x8
+#define SSD20XD_GPIO_OFF_GPIO3 0xc
+#define SSD20XD_GPIO_OFF_GPIO4 0x10
+#define SSD20XD_GPIO_OFF_GPIO5 0x14
+#define SSD20XD_GPIO_OFF_GPIO6 0x18
+#define SSD20XD_GPIO_OFF_GPIO7 0x1c
+#define SSD20XD_GPIO_OFF_GPIO10 0x28
+#define SSD20XD_GPIO_OFF_GPIO11 0x2c
+#define SSD20XD_GPIO_OFF_GPIO12 0x30
+#define SSD20XD_GPIO_OFF_GPIO13 0x34
+#define SSD20XD_GPIO_OFF_GPIO14 0x38
+#define SSD20XD_GPIO_OFF_GPIO85 0x100
+#define SSD20XD_GPIO_OFF_GPIO86 0x104
+#define SSD20XD_GPIO_OFF_GPIO90 0x114
+
+#define SSD20XD_GPIO_OFFSETS SSD20XD_GPIO_OFF_GPIO0, \
+ SSD20XD_GPIO_OFF_GPIO1, \
+ SSD20XD_GPIO_OFF_GPIO2, \
+ SSD20XD_GPIO_OFF_GPIO3, \
+ SSD20XD_GPIO_OFF_GPIO4, \
+ SSD20XD_GPIO_OFF_GPIO5, \
+ SSD20XD_GPIO_OFF_GPIO6, \
+ SSD20XD_GPIO_OFF_GPIO7, \
+ SSD20XD_GPIO_OFF_GPIO10, \
+ SSD20XD_GPIO_OFF_GPIO11, \
+ SSD20XD_GPIO_OFF_GPIO12, \
+ SSD20XD_GPIO_OFF_GPIO13, \
+ SSD20XD_GPIO_OFF_GPIO14, \
+ SSD20XD_GPIO_OFF_GPIO85, \
+ SSD20XD_GPIO_OFF_GPIO86, \
+ SSD20XD_GPIO_OFF_GPIO90
+
+/* "ttl" pins lcd interface pins */
+#define SSD20XD_PINNAME_TTL0 "ttl0"
+#define SSD20XD_PINNAME_TTL1 "ttl1"
+#define SSD20XD_PINNAME_TTL2 "ttl2"
+#define SSD20XD_PINNAME_TTL3 "ttl3"
+#define SSD20XD_PINNAME_TTL4 "ttl4"
+#define SSD20XD_PINNAME_TTL5 "ttl5"
+#define SSD20XD_PINNAME_TTL6 "ttl6"
+#define SSD20XD_PINNAME_TTL7 "ttl7"
+#define SSD20XD_PINNAME_TTL8 "ttl8"
+#define SSD20XD_PINNAME_TTL9 "ttl9"
+#define SSD20XD_PINNAME_TTL10 "ttl10"
+#define SSD20XD_PINNAME_TTL11 "ttl11"
+#define SSD20XD_PINNAME_TTL12 "ttl12"
+#define SSD20XD_PINNAME_TTL13 "ttl13"
+#define SSD20XD_PINNAME_TTL14 "ttl14"
+#define SSD20XD_PINNAME_TTL15 "ttl15"
+#define SSD20XD_PINNAME_TTL16 "ttl16"
+#define SSD20XD_PINNAME_TTL17 "ttl17"
+#define SSD20XD_PINNAME_TTL18 "ttl18"
+#define SSD20XD_PINNAME_TTL19 "ttl19"
+#define SSD20XD_PINNAME_TTL20 "ttl20"
+#define SSD20XD_PINNAME_TTL21 "ttl21"
+#define SSD20XD_PINNAME_TTL22 "ttl22"
+#define SSD20XD_PINNAME_TTL23 "ttl23"
+#define SSD20XD_PINNAME_TTL24 "ttl24"
+#define SSD20XD_PINNAME_TTL25 "ttl25"
+#define SSD20XD_PINNAME_TTL26 "ttl26"
+#define SSD20XD_PINNAME_TTL27 "ttl27"
+
+#define SSD20XD_TTL_PINNAMES SSD20XD_PINNAME_TTL0, \
+ SSD20XD_PINNAME_TTL1, \
+ SSD20XD_PINNAME_TTL2, \
+ SSD20XD_PINNAME_TTL3, \
+ SSD20XD_PINNAME_TTL4, \
+ SSD20XD_PINNAME_TTL5, \
+ SSD20XD_PINNAME_TTL6, \
+ SSD20XD_PINNAME_TTL7, \
+ SSD20XD_PINNAME_TTL8, \
+ SSD20XD_PINNAME_TTL9, \
+ SSD20XD_PINNAME_TTL10, \
+ SSD20XD_PINNAME_TTL11, \
+ SSD20XD_PINNAME_TTL12, \
+ SSD20XD_PINNAME_TTL13, \
+ SSD20XD_PINNAME_TTL14, \
+ SSD20XD_PINNAME_TTL15, \
+ SSD20XD_PINNAME_TTL16, \
+ SSD20XD_PINNAME_TTL17, \
+ SSD20XD_PINNAME_TTL18, \
+ SSD20XD_PINNAME_TTL19, \
+ SSD20XD_PINNAME_TTL20, \
+ SSD20XD_PINNAME_TTL21, \
+ SSD20XD_PINNAME_TTL22, \
+ SSD20XD_PINNAME_TTL23, \
+ SSD20XD_PINNAME_TTL24, \
+ SSD20XD_PINNAME_TTL25, \
+ SSD20XD_PINNAME_TTL26, \
+ SSD20XD_PINNAME_TTL27
+
+#define SSD20XD_TTL_OFFSET_TTL0 0x80
+#define SSD20XD_TTL_OFFSET_TTL1 0x84
+#define SSD20XD_TTL_OFFSET_TTL2 0x88
+#define SSD20XD_TTL_OFFSET_TTL3 0x8c
+#define SSD20XD_TTL_OFFSET_TTL4 0x90
+#define SSD20XD_TTL_OFFSET_TTL5 0x94
+#define SSD20XD_TTL_OFFSET_TTL6 0x98
+#define SSD20XD_TTL_OFFSET_TTL7 0x9c
+#define SSD20XD_TTL_OFFSET_TTL8 0xa0
+#define SSD20XD_TTL_OFFSET_TTL9 0xa4
+#define SSD20XD_TTL_OFFSET_TTL10 0xa8
+#define SSD20XD_TTL_OFFSET_TTL11 0xac
+#define SSD20XD_TTL_OFFSET_TTL12 0xb0
+#define SSD20XD_TTL_OFFSET_TTL13 0xb4
+#define SSD20XD_TTL_OFFSET_TTL14 0xb8
+#define SSD20XD_TTL_OFFSET_TTL15 0xbc
+#define SSD20XD_TTL_OFFSET_TTL16 0xc0
+#define SSD20XD_TTL_OFFSET_TTL17 0xc4
+#define SSD20XD_TTL_OFFSET_TTL18 0xc8
+#define SSD20XD_TTL_OFFSET_TTL19 0xcc
+#define SSD20XD_TTL_OFFSET_TTL20 0xd0
+#define SSD20XD_TTL_OFFSET_TTL21 0xd4
+#define SSD20XD_TTL_OFFSET_TTL22 0xd8
+#define SSD20XD_TTL_OFFSET_TTL23 0xdc
+#define SSD20XD_TTL_OFFSET_TTL24 0xe0
+#define SSD20XD_TTL_OFFSET_TTL25 0xe4
+#define SSD20XD_TTL_OFFSET_TTL26 0xe8
+#define SSD20XD_TTL_OFFSET_TTL27 0xec
+
+#define SSD20XD_TTL_OFFSETS SSD20XD_TTL_OFFSET_TTL0, \
+ SSD20XD_TTL_OFFSET_TTL1, \
+ SSD20XD_TTL_OFFSET_TTL2, \
+ SSD20XD_TTL_OFFSET_TTL3, \
+ SSD20XD_TTL_OFFSET_TTL4, \
+ SSD20XD_TTL_OFFSET_TTL5, \
+ SSD20XD_TTL_OFFSET_TTL6, \
+ SSD20XD_TTL_OFFSET_TTL7, \
+ SSD20XD_TTL_OFFSET_TTL8, \
+ SSD20XD_TTL_OFFSET_TTL9, \
+ SSD20XD_TTL_OFFSET_TTL10, \
+ SSD20XD_TTL_OFFSET_TTL11, \
+ SSD20XD_TTL_OFFSET_TTL12, \
+ SSD20XD_TTL_OFFSET_TTL13, \
+ SSD20XD_TTL_OFFSET_TTL14, \
+ SSD20XD_TTL_OFFSET_TTL15, \
+ SSD20XD_TTL_OFFSET_TTL16, \
+ SSD20XD_TTL_OFFSET_TTL17, \
+ SSD20XD_TTL_OFFSET_TTL18, \
+ SSD20XD_TTL_OFFSET_TTL19, \
+ SSD20XD_TTL_OFFSET_TTL20, \
+ SSD20XD_TTL_OFFSET_TTL21, \
+ SSD20XD_TTL_OFFSET_TTL22, \
+ SSD20XD_TTL_OFFSET_TTL23, \
+ SSD20XD_TTL_OFFSET_TTL24, \
+ SSD20XD_TTL_OFFSET_TTL25, \
+ SSD20XD_TTL_OFFSET_TTL26, \
+ SSD20XD_TTL_OFFSET_TTL27
+
+/* On the ssd20xd the two normal uarts have dedicated pins */
+#define SSD20XD_PINNAME_UART0_RX "uart0_rx"
+#define SSD20XD_PINNAME_UART0_TX "uart0_tx"
+
+#define SSD20XD_UART0_NAMES \
+ SSD20XD_PINNAME_UART0_RX, \
+ SSD20XD_PINNAME_UART0_TX
+
+#define SSD20XD_PINNAME_UART1_RX "uart1_rx"
+#define SSD20XD_PINNAME_UART1_TX "uart1_tx"
+
+#define SSD20XD_UART1_NAMES \
+ SSD20XD_PINNAME_UART1_RX, \
+ SSD20XD_PINNAME_UART1_TX
+
+#define SSD20XD_OFF_UART0_RX 0x60
+#define SSD20XD_OFF_UART0_TX 0x64
+
+#define SSD20XD_UART0_OFFSETS \
+ SSD20XD_OFF_UART0_RX, \
+ SSD20XD_OFF_UART0_TX
+
+#define SSD20XD_OFF_UART1_RX 0x68
+#define SSD20XD_OFF_UART1_TX 0x6c
+
+#define SSD20XD_UART1_OFFSETS \
+ SSD20XD_OFF_UART1_RX, \
+ SSD20XD_OFF_UART1_TX
+
+/*
+ * ssd20x has the same pin names but different ordering
+ * of the registers that control the gpio.
+ */
+#define SSD20XD_OFF_SD_D0 0x140
+#define SSD20XD_OFF_SD_D1 0x144
+#define SSD20XD_OFF_SD_D2 0x148
+#define SSD20XD_OFF_SD_D3 0x14c
+#define SSD20XD_OFF_SD_CMD 0x150
+#define SSD20XD_OFF_SD_CLK 0x154
+
+#define SSD20XD_SD_OFFSETS SSD20XD_OFF_SD_CLK, \
+ SSD20XD_OFF_SD_CMD, \
+ SSD20XD_OFF_SD_D0, \
+ SSD20XD_OFF_SD_D1, \
+ SSD20XD_OFF_SD_D2, \
+ SSD20XD_OFF_SD_D3
+
+static const char * const ssd20xd_names[] = {
+ FUART_NAMES,
+ SD_NAMES,
+ SSD20XD_UART0_NAMES,
+ SSD20XD_UART1_NAMES,
+ SSD20XD_TTL_PINNAMES,
+ SSD20XD_GPIO_NAMES,
+};
+
+static const unsigned int ssd20xd_offsets[] = {
+ FUART_OFFSETS,
+ SSD20XD_SD_OFFSETS,
+ SSD20XD_UART0_OFFSETS,
+ SSD20XD_UART1_OFFSETS,
+ SSD20XD_TTL_OFFSETS,
+ SSD20XD_GPIO_OFFSETS,
+};
+
+MSC313_GPIO_CHIPDATA(ssd20xd);
#endif
struct msc313_gpio {
@@ -344,7 +601,6 @@ static int msc313_gpio_probe(struct platform_device *pdev)
struct irq_domain *parent_domain;
struct device_node *parent_node;
struct device *dev = &pdev->dev;
- int ret;
match_data = of_device_get_match_data(dev);
if (!match_data)
@@ -399,8 +655,7 @@ static int msc313_gpio_probe(struct platform_device *pdev)
gpioirqchip->handler = handle_bad_irq;
gpioirqchip->default_type = IRQ_TYPE_NONE;
- ret = devm_gpiochip_add_data(dev, gpiochip, gpio);
- return ret;
+ return devm_gpiochip_add_data(dev, gpiochip, gpio);
}
static int msc313_gpio_remove(struct platform_device *pdev)
@@ -414,6 +669,10 @@ static const struct of_device_id msc313_gpio_of_match[] = {
.compatible = "mstar,msc313-gpio",
.data = &msc313_data,
},
+ {
+ .compatible = "sstar,ssd20xd-gpio",
+ .data = &ssd20xd_data,
+ },
#endif
{ }
};
@@ -456,5 +715,4 @@ static struct platform_driver msc313_gpio_driver = {
.probe = msc313_gpio_probe,
.remove = msc313_gpio_remove,
};
-
builtin_platform_driver(msc313_gpio_driver);
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index c3658a597a80..ccaad1cb3c2e 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -205,8 +205,7 @@ mediatek_gpio_xlate(struct gpio_chip *chip,
}
static int
-mediatek_gpio_bank_probe(struct device *dev,
- struct device_node *node, int bank)
+mediatek_gpio_bank_probe(struct device *dev, int bank)
{
struct mtk *mtk = dev_get_drvdata(dev);
struct mtk_gc *rg;
@@ -217,7 +216,6 @@ mediatek_gpio_bank_probe(struct device *dev,
memset(rg, 0, sizeof(*rg));
spin_lock_init(&rg->lock);
- rg->chip.of_node = node;
rg->bank = bank;
dat = mtk->base + GPIO_REG_DATA + (rg->bank * GPIO_BANK_STRIDE);
@@ -311,7 +309,7 @@ mediatek_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mtk);
for (i = 0; i < MTK_BANK_CNT; i++) {
- ret = mediatek_gpio_bank_probe(dev, np, i);
+ ret = mediatek_gpio_bank_probe(dev, i);
if (ret)
return ret;
}
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 8f429d9f3661..4c1f9e1091b7 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -1183,7 +1183,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
mvchip->chip.ngpio = ngpios;
mvchip->chip.can_sleep = false;
- mvchip->chip.of_node = np;
mvchip->chip.dbg_show = mvebu_gpio_dbg_show;
if (soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K)
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 415e8df89d6f..e099c39e0355 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1419,9 +1419,6 @@ static int omap_gpio_probe(struct platform_device *pdev)
bank->is_mpuio = pdata->is_mpuio;
bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
bank->regs = pdata->regs;
-#ifdef CONFIG_OF_GPIO
- bank->chip.of_node = of_node_get(node);
-#endif
if (node) {
if (!of_property_read_bool(node, "ti,gpio-always-on"))
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
index e8e9029ba5bd..bac10c2faf56 100644
--- a/drivers/gpio/gpio-palmas.c
+++ b/drivers/gpio/gpio-palmas.c
@@ -170,9 +170,7 @@ static int palmas_gpio_probe(struct platform_device *pdev)
palmas_gpio->gpio_chip.set = palmas_gpio_set;
palmas_gpio->gpio_chip.get = palmas_gpio_get;
palmas_gpio->gpio_chip.parent = &pdev->dev;
-#ifdef CONFIG_OF_GPIO
- palmas_gpio->gpio_chip.of_node = pdev->dev.of_node;
-#endif
+
palmas_pdata = dev_get_platdata(palmas->dev);
if (palmas_pdata && palmas_pdata->gpio_base)
palmas_gpio->gpio_chip.base = palmas_pdata->gpio_base;
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index a552df298a97..3a0bd8795741 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -346,51 +346,45 @@ static int pch_gpio_alloc_generic_chip(struct pch_gpio *chip,
static int pch_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
+ struct device *dev = &pdev->dev;
s32 ret;
struct pch_gpio *chip;
int irq_base;
- chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
- chip->dev = &pdev->dev;
+ chip->dev = dev;
ret = pcim_enable_device(pdev);
if (ret) {
- dev_err(&pdev->dev, "pci_enable_device FAILED");
+ dev_err(dev, "pci_enable_device FAILED");
return ret;
}
ret = pcim_iomap_regions(pdev, BIT(1), KBUILD_MODNAME);
if (ret) {
- dev_err(&pdev->dev, "pci_request_regions FAILED-%d", ret);
+ dev_err(dev, "pci_request_regions FAILED-%d", ret);
return ret;
}
chip->base = pcim_iomap_table(pdev)[1];
-
- if (pdev->device == 0x8803)
- chip->ioh = INTEL_EG20T_PCH;
- else if (pdev->device == 0x8014)
- chip->ioh = OKISEMI_ML7223m_IOH;
- else if (pdev->device == 0x8043)
- chip->ioh = OKISEMI_ML7223n_IOH;
-
+ chip->ioh = id->driver_data;
chip->reg = chip->base;
pci_set_drvdata(pdev, chip);
spin_lock_init(&chip->spinlock);
pch_gpio_setup(chip);
- ret = devm_gpiochip_add_data(&pdev->dev, &chip->gpio, chip);
+ ret = devm_gpiochip_add_data(dev, &chip->gpio, chip);
if (ret) {
- dev_err(&pdev->dev, "PCH gpio: Failed to register GPIO\n");
+ dev_err(dev, "PCH gpio: Failed to register GPIO\n");
return ret;
}
- irq_base = devm_irq_alloc_descs(&pdev->dev, -1, 0,
+ irq_base = devm_irq_alloc_descs(dev, -1, 0,
gpio_pins[chip->ioh], NUMA_NO_NODE);
if (irq_base < 0) {
- dev_warn(&pdev->dev, "PCH gpio: Failed to get IRQ base num\n");
+ dev_warn(dev, "PCH gpio: Failed to get IRQ base num\n");
chip->irq_base = -1;
return 0;
}
@@ -400,10 +394,10 @@ static int pch_gpio_probe(struct pci_dev *pdev,
iowrite32(BIT(gpio_pins[chip->ioh]) - 1, &chip->reg->imask);
iowrite32(BIT(gpio_pins[chip->ioh]) - 1, &chip->reg->ien);
- ret = devm_request_irq(&pdev->dev, pdev->irq, pch_gpio_handler,
+ ret = devm_request_irq(dev, pdev->irq, pch_gpio_handler,
IRQF_SHARED, KBUILD_MODNAME, chip);
if (ret) {
- dev_err(&pdev->dev, "request_irq failed\n");
+ dev_err(dev, "request_irq failed\n");
return ret;
}
@@ -439,10 +433,14 @@ static int __maybe_unused pch_gpio_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pch_gpio_pm_ops, pch_gpio_suspend, pch_gpio_resume);
static const struct pci_device_id pch_gpio_pcidev_id[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
- { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014) },
- { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8043) },
- { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8803) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803),
+ .driver_data = INTEL_EG20T_PCH },
+ { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014),
+ .driver_data = OKISEMI_ML7223m_IOH },
+ { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8043),
+ .driver_data = OKISEMI_ML7223n_IOH },
+ { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8803),
+ .driver_data = INTEL_EG20T_PCH },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id);
diff --git a/drivers/gpio/gpio-pmic-eic-sprd.c b/drivers/gpio/gpio-pmic-eic-sprd.c
index 938285190566..e518490c4b68 100644
--- a/drivers/gpio/gpio-pmic-eic-sprd.c
+++ b/drivers/gpio/gpio-pmic-eic-sprd.c
@@ -331,7 +331,6 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev)
pmic_eic->chip.ngpio = SPRD_PMIC_EIC_NR;
pmic_eic->chip.base = -1;
pmic_eic->chip.parent = &pdev->dev;
- pmic_eic->chip.of_node = pdev->dev.of_node;
pmic_eic->chip.direction_input = sprd_pmic_eic_direction_input;
pmic_eic->chip.request = sprd_pmic_eic_request;
pmic_eic->chip.free = sprd_pmic_eic_free;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 382468e294e1..c7fbfa3ae43b 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -343,8 +343,7 @@ static int pxa_gpio_of_xlate(struct gpio_chip *gc,
}
#endif
-static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio,
- struct device_node *np, void __iomem *regbase)
+static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio, void __iomem *regbase)
{
int i, gpio, nbanks = DIV_ROUND_UP(ngpio, 32);
struct pxa_gpio_bank *bank;
@@ -354,6 +353,7 @@ static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio,
if (!pchip->banks)
return -ENOMEM;
+ pchip->chip.parent = pchip->dev;
pchip->chip.label = "gpio-pxa";
pchip->chip.direction_input = pxa_gpio_direction_input;
pchip->chip.direction_output = pxa_gpio_direction_output;
@@ -365,7 +365,6 @@ static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio,
pchip->chip.free = gpiochip_generic_free;
#ifdef CONFIG_OF_GPIO
- pchip->chip.of_node = np;
pchip->chip.of_xlate = pxa_gpio_of_xlate;
pchip->chip.of_gpio_n_cells = 2;
#endif
@@ -675,8 +674,7 @@ static int pxa_gpio_probe(struct platform_device *pdev)
}
/* Initialize GPIO chips */
- ret = pxa_init_gpio_chip(pchip, pxa_last_gpio + 1, pdev->dev.of_node,
- gpio_reg_base);
+ ret = pxa_init_gpio_chip(pchip, pxa_last_gpio + 1, gpio_reg_base);
if (ret) {
clk_put(clk);
return ret;
diff --git a/drivers/gpio/gpio-raspberrypi-exp.c b/drivers/gpio/gpio-raspberrypi-exp.c
index 64a552ecc2ad..3c414e0005fc 100644
--- a/drivers/gpio/gpio-raspberrypi-exp.c
+++ b/drivers/gpio/gpio-raspberrypi-exp.c
@@ -221,7 +221,6 @@ static int rpi_exp_gpio_probe(struct platform_device *pdev)
rpi_gpio->gc.parent = dev;
rpi_gpio->gc.label = MODULE_NAME;
rpi_gpio->gc.owner = THIS_MODULE;
- rpi_gpio->gc.of_node = np;
rpi_gpio->gc.base = -1;
rpi_gpio->gc.ngpio = NUM_GPIO;
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index f7b653314e7e..bd2e16d6e21c 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -477,7 +477,6 @@ static void gpio_rcar_enable_inputs(struct gpio_rcar_priv *p)
static int gpio_rcar_probe(struct platform_device *pdev)
{
struct gpio_rcar_priv *p;
- struct resource *irq;
struct gpio_chip *gpio_chip;
struct irq_chip *irq_chip;
struct gpio_irq_chip *girq;
@@ -502,12 +501,10 @@ static int gpio_rcar_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq) {
- dev_err(dev, "missing IRQ\n");
- ret = -EINVAL;
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
goto err0;
- }
+ p->irq_parent = ret;
p->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(p->base)) {
@@ -555,11 +552,10 @@ static int gpio_rcar_probe(struct platform_device *pdev)
goto err0;
}
- p->irq_parent = irq->start;
- if (devm_request_irq(dev, irq->start, gpio_rcar_irq_handler,
- IRQF_SHARED, name, p)) {
+ ret = devm_request_irq(dev, p->irq_parent, gpio_rcar_irq_handler,
+ IRQF_SHARED, name, p);
+ if (ret) {
dev_err(dev, "failed to request IRQ\n");
- ret = -ENOENT;
goto err1;
}
diff --git a/drivers/gpio/gpio-rda.c b/drivers/gpio/gpio-rda.c
index 463846431183..62ba18b3a602 100644
--- a/drivers/gpio/gpio-rda.c
+++ b/drivers/gpio/gpio-rda.c
@@ -197,7 +197,6 @@ static void rda_gpio_irq_handler(struct irq_desc *desc)
static int rda_gpio_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq;
struct rda_gpio *rda_gpio;
@@ -240,8 +239,6 @@ static int rda_gpio_probe(struct platform_device *pdev)
rda_gpio->chip.label = dev_name(dev);
rda_gpio->chip.ngpio = ngpios;
rda_gpio->chip.base = -1;
- rda_gpio->chip.parent = dev;
- rda_gpio->chip.of_node = np;
if (rda_gpio->irq >= 0) {
rda_gpio->irq_chip.name = "rda-gpio",
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
index 69c219742083..6383136cbe59 100644
--- a/drivers/gpio/gpio-regmap.c
+++ b/drivers/gpio/gpio-regmap.c
@@ -244,16 +244,12 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
chip = &gpio->gpio_chip;
chip->parent = config->parent;
+ chip->fwnode = config->fwnode;
chip->base = -1;
chip->ngpio = config->ngpio;
chip->names = config->names;
chip->label = config->label ?: dev_name(config->parent);
-#if defined(CONFIG_OF_GPIO)
- /* gpiolib will use of_node of the parent if chip->of_node is NULL */
- chip->of_node = to_of_node(config->fwnode);
-#endif /* CONFIG_OF_GPIO */
-
/*
* If our regmap is fast_io we should probably set can_sleep to false.
* Right now, the regmap doesn't save this property, nor is there any
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index ce63cbd14d69..a4c4e4584f5b 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -465,6 +465,22 @@ out:
return ret;
}
+static int rockchip_irq_reqres(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct rockchip_pin_bank *bank = gc->private;
+
+ return gpiochip_reqres_irq(&bank->gpio_chip, d->hwirq);
+}
+
+static void rockchip_irq_relres(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct rockchip_pin_bank *bank = gc->private;
+
+ gpiochip_relres_irq(&bank->gpio_chip, d->hwirq);
+}
+
static void rockchip_irq_suspend(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
@@ -536,6 +552,8 @@ static int rockchip_interrupts_register(struct rockchip_pin_bank *bank)
gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend;
gc->chip_types[0].chip.irq_resume = rockchip_irq_resume;
gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
+ gc->chip_types[0].chip.irq_request_resources = rockchip_irq_reqres;
+ gc->chip_types[0].chip.irq_release_resources = rockchip_irq_relres;
gc->wake_enabled = IRQ_MSK(bank->nr_pins);
/*
@@ -566,9 +584,6 @@ static int rockchip_gpiolib_register(struct rockchip_pin_bank *bank)
gc->ngpio = bank->nr_pins;
gc->label = bank->name;
gc->parent = bank->dev;
-#ifdef CONFIG_OF_GPIO
- gc->of_node = of_node_get(bank->of_node);
-#endif
ret = gpiochip_add_data(gc, bank);
if (ret) {
diff --git a/drivers/gpio/gpio-sama5d2-piobu.c b/drivers/gpio/gpio-sama5d2-piobu.c
index b7c950658170..3e95da717fc9 100644
--- a/drivers/gpio/gpio-sama5d2-piobu.c
+++ b/drivers/gpio/gpio-sama5d2-piobu.c
@@ -192,7 +192,6 @@ static int sama5d2_piobu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, piobu);
piobu->chip.label = pdev->name;
piobu->chip.parent = &pdev->dev;
- piobu->chip.of_node = pdev->dev.of_node;
piobu->chip.owner = THIS_MODULE,
piobu->chip.get_direction = sama5d2_piobu_get_direction,
piobu->chip.direction_input = sama5d2_piobu_direction_input,
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 0600f71462b5..acda4c5052d3 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -139,7 +139,7 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned int gpio_num,
/*
* according to the datasheet, writing to the level register has no
* effect when GPIO is programmed as input.
- * Actually the the level register is read-only when configured as input.
+ * Actually the level register is read-only when configured as input.
* Thus presetting the output level before switching to output is _NOT_ possible.
* Hence we set the level after configuring the GPIO as output.
* But we cannot prevent a short low pulse if direction is set to high
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
new file mode 100644
index 000000000000..04b137eca8da
--- /dev/null
+++ b/drivers/gpio/gpio-sim.c
@@ -0,0 +1,1592 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * GPIO testing driver based on configfs.
+ *
+ * Copyright (C) 2021 Bartosz Golaszewski <brgl@bgdev.pl>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitmap.h>
+#include <linux/completion.h>
+#include <linux/configfs.h>
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irq_sim.h>
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/string_helpers.h>
+#include <linux/sysfs.h>
+
+#include "gpiolib.h"
+
+#define GPIO_SIM_PROP_MAX 4 /* Max 3 properties + sentinel. */
+#define GPIO_SIM_NUM_ATTRS 3 /* value, pull and sentinel */
+
+static DEFINE_IDA(gpio_sim_ida);
+
+struct gpio_sim_chip {
+ struct gpio_chip gc;
+ unsigned long *direction_map;
+ unsigned long *value_map;
+ unsigned long *pull_map;
+ struct irq_domain *irq_sim;
+ struct mutex lock;
+ const struct attribute_group **attr_groups;
+};
+
+struct gpio_sim_attribute {
+ struct device_attribute dev_attr;
+ unsigned int offset;
+};
+
+static struct gpio_sim_attribute *
+to_gpio_sim_attr(struct device_attribute *dev_attr)
+{
+ return container_of(dev_attr, struct gpio_sim_attribute, dev_attr);
+}
+
+static int gpio_sim_apply_pull(struct gpio_sim_chip *chip,
+ unsigned int offset, int value)
+{
+ int irq, irq_type, ret;
+ struct gpio_desc *desc;
+ struct gpio_chip *gc;
+
+ gc = &chip->gc;
+ desc = &gc->gpiodev->descs[offset];
+
+ mutex_lock(&chip->lock);
+
+ if (test_bit(FLAG_REQUESTED, &desc->flags) &&
+ !test_bit(FLAG_IS_OUT, &desc->flags)) {
+ if (value == !!test_bit(offset, chip->value_map))
+ goto set_pull;
+
+ /*
+ * This is fine - it just means, nobody is listening
+ * for interrupts on this line, otherwise
+ * irq_create_mapping() would have been called from
+ * the to_irq() callback.
+ */
+ irq = irq_find_mapping(chip->irq_sim, offset);
+ if (!irq)
+ goto set_value;
+
+ irq_type = irq_get_trigger_type(irq);
+
+ if ((value && (irq_type & IRQ_TYPE_EDGE_RISING)) ||
+ (!value && (irq_type & IRQ_TYPE_EDGE_FALLING))) {
+ ret = irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING,
+ true);
+ if (ret)
+ goto set_pull;
+ }
+ }
+
+set_value:
+ /* Change the value unless we're actively driving the line. */
+ if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
+ !test_bit(FLAG_IS_OUT, &desc->flags))
+ __assign_bit(offset, chip->value_map, value);
+
+set_pull:
+ __assign_bit(offset, chip->pull_map, value);
+ mutex_unlock(&chip->lock);
+ return 0;
+}
+
+static int gpio_sim_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct gpio_sim_chip *chip = gpiochip_get_data(gc);
+ int ret;
+
+ mutex_lock(&chip->lock);
+ ret = !!test_bit(offset, chip->value_map);
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
+static void gpio_sim_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct gpio_sim_chip *chip = gpiochip_get_data(gc);
+
+ mutex_lock(&chip->lock);
+ __assign_bit(offset, chip->value_map, value);
+ mutex_unlock(&chip->lock);
+}
+
+static int gpio_sim_get_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
+{
+ struct gpio_sim_chip *chip = gpiochip_get_data(gc);
+
+ mutex_lock(&chip->lock);
+ bitmap_copy(bits, chip->value_map, gc->ngpio);
+ mutex_unlock(&chip->lock);
+
+ return 0;
+}
+
+static void gpio_sim_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
+{
+ struct gpio_sim_chip *chip = gpiochip_get_data(gc);
+
+ mutex_lock(&chip->lock);
+ bitmap_copy(chip->value_map, bits, gc->ngpio);
+ mutex_unlock(&chip->lock);
+}
+
+static int gpio_sim_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct gpio_sim_chip *chip = gpiochip_get_data(gc);
+
+ mutex_lock(&chip->lock);
+ __clear_bit(offset, chip->direction_map);
+ __assign_bit(offset, chip->value_map, value);
+ mutex_unlock(&chip->lock);
+
+ return 0;
+}
+
+static int gpio_sim_direction_input(struct gpio_chip *gc, unsigned int offset)
+{
+ struct gpio_sim_chip *chip = gpiochip_get_data(gc);
+
+ mutex_lock(&chip->lock);
+ __set_bit(offset, chip->direction_map);
+ mutex_unlock(&chip->lock);
+
+ return 0;
+}
+
+static int gpio_sim_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct gpio_sim_chip *chip = gpiochip_get_data(gc);
+ int direction;
+
+ mutex_lock(&chip->lock);
+ direction = !!test_bit(offset, chip->direction_map);
+ mutex_unlock(&chip->lock);
+
+ return direction ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
+}
+
+static int gpio_sim_set_config(struct gpio_chip *gc,
+ unsigned int offset, unsigned long config)
+{
+ struct gpio_sim_chip *chip = gpiochip_get_data(gc);
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ return gpio_sim_apply_pull(chip, offset, 1);
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ return gpio_sim_apply_pull(chip, offset, 0);
+ default:
+ break;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int gpio_sim_to_irq(struct gpio_chip *gc, unsigned int offset)
+{
+ struct gpio_sim_chip *chip = gpiochip_get_data(gc);
+
+ return irq_create_mapping(chip->irq_sim, offset);
+}
+
+static void gpio_sim_free(struct gpio_chip *gc, unsigned int offset)
+{
+ struct gpio_sim_chip *chip = gpiochip_get_data(gc);
+
+ mutex_lock(&chip->lock);
+ __assign_bit(offset, chip->value_map, !!test_bit(offset, chip->pull_map));
+ mutex_unlock(&chip->lock);
+}
+
+static ssize_t gpio_sim_sysfs_val_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gpio_sim_attribute *line_attr = to_gpio_sim_attr(attr);
+ struct gpio_sim_chip *chip = dev_get_drvdata(dev);
+ int val;
+
+ mutex_lock(&chip->lock);
+ val = !!test_bit(line_attr->offset, chip->value_map);
+ mutex_unlock(&chip->lock);
+
+ return sysfs_emit(buf, "%d\n", val);
+}
+
+static ssize_t gpio_sim_sysfs_val_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ /*
+ * Not assigning this function will result in write() returning -EIO
+ * which is confusing. Return -EPERM explicitly.
+ */
+ return -EPERM;
+}
+
+static const char *const gpio_sim_sysfs_pull_strings[] = {
+ [0] = "pull-down",
+ [1] = "pull-up",
+};
+
+static ssize_t gpio_sim_sysfs_pull_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct gpio_sim_attribute *line_attr = to_gpio_sim_attr(attr);
+ struct gpio_sim_chip *chip = dev_get_drvdata(dev);
+ int pull;
+
+ mutex_lock(&chip->lock);
+ pull = !!test_bit(line_attr->offset, chip->pull_map);
+ mutex_unlock(&chip->lock);
+
+ return sysfs_emit(buf, "%s\n", gpio_sim_sysfs_pull_strings[pull]);
+}
+
+static ssize_t gpio_sim_sysfs_pull_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct gpio_sim_attribute *line_attr = to_gpio_sim_attr(attr);
+ struct gpio_sim_chip *chip = dev_get_drvdata(dev);
+ int ret, pull;
+
+ pull = sysfs_match_string(gpio_sim_sysfs_pull_strings, buf);
+ if (pull < 0)
+ return pull;
+
+ ret = gpio_sim_apply_pull(chip, line_attr->offset, pull);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static void gpio_sim_mutex_destroy(void *data)
+{
+ struct mutex *lock = data;
+
+ mutex_destroy(lock);
+}
+
+static void gpio_sim_sysfs_remove(void *data)
+{
+ struct gpio_sim_chip *chip = data;
+
+ sysfs_remove_groups(&chip->gc.gpiodev->dev.kobj, chip->attr_groups);
+}
+
+static int gpio_sim_setup_sysfs(struct gpio_sim_chip *chip)
+{
+ struct device_attribute *val_dev_attr, *pull_dev_attr;
+ struct gpio_sim_attribute *val_attr, *pull_attr;
+ unsigned int num_lines = chip->gc.ngpio;
+ struct device *dev = chip->gc.parent;
+ struct attribute_group *attr_group;
+ struct attribute **attrs;
+ int i, ret;
+
+ chip->attr_groups = devm_kcalloc(dev, sizeof(*chip->attr_groups),
+ num_lines + 1, GFP_KERNEL);
+ if (!chip->attr_groups)
+ return -ENOMEM;
+
+ for (i = 0; i < num_lines; i++) {
+ attr_group = devm_kzalloc(dev, sizeof(*attr_group), GFP_KERNEL);
+ attrs = devm_kcalloc(dev, sizeof(*attrs),
+ GPIO_SIM_NUM_ATTRS, GFP_KERNEL);
+ val_attr = devm_kzalloc(dev, sizeof(*val_attr), GFP_KERNEL);
+ pull_attr = devm_kzalloc(dev, sizeof(*pull_attr), GFP_KERNEL);
+ if (!attr_group || !attrs || !val_attr || !pull_attr)
+ return -ENOMEM;
+
+ attr_group->name = devm_kasprintf(dev, GFP_KERNEL,
+ "sim_gpio%u", i);
+ if (!attr_group->name)
+ return -ENOMEM;
+
+ val_attr->offset = pull_attr->offset = i;
+
+ val_dev_attr = &val_attr->dev_attr;
+ pull_dev_attr = &pull_attr->dev_attr;
+
+ sysfs_attr_init(&val_dev_attr->attr);
+ sysfs_attr_init(&pull_dev_attr->attr);
+
+ val_dev_attr->attr.name = "value";
+ pull_dev_attr->attr.name = "pull";
+
+ val_dev_attr->attr.mode = pull_dev_attr->attr.mode = 0644;
+
+ val_dev_attr->show = gpio_sim_sysfs_val_show;
+ val_dev_attr->store = gpio_sim_sysfs_val_store;
+ pull_dev_attr->show = gpio_sim_sysfs_pull_show;
+ pull_dev_attr->store = gpio_sim_sysfs_pull_store;
+
+ attrs[0] = &val_dev_attr->attr;
+ attrs[1] = &pull_dev_attr->attr;
+
+ attr_group->attrs = attrs;
+ chip->attr_groups[i] = attr_group;
+ }
+
+ ret = sysfs_create_groups(&chip->gc.gpiodev->dev.kobj,
+ chip->attr_groups);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, gpio_sim_sysfs_remove, chip);
+}
+
+static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
+{
+ struct gpio_sim_chip *chip;
+ struct gpio_chip *gc;
+ const char *label;
+ u32 num_lines;
+ int ret;
+
+ ret = fwnode_property_read_u32(swnode, "ngpios", &num_lines);
+ if (ret)
+ return ret;
+
+ ret = fwnode_property_read_string(swnode, "gpio-sim,label", &label);
+ if (ret) {
+ label = devm_kasprintf(dev, GFP_KERNEL, "%s-%s",
+ dev_name(dev), fwnode_get_name(swnode));
+ if (!label)
+ return -ENOMEM;
+ }
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->direction_map = devm_bitmap_alloc(dev, num_lines, GFP_KERNEL);
+ if (!chip->direction_map)
+ return -ENOMEM;
+
+ /* Default to input mode. */
+ bitmap_fill(chip->direction_map, num_lines);
+
+ chip->value_map = devm_bitmap_zalloc(dev, num_lines, GFP_KERNEL);
+ if (!chip->value_map)
+ return -ENOMEM;
+
+ chip->pull_map = devm_bitmap_zalloc(dev, num_lines, GFP_KERNEL);
+ if (!chip->pull_map)
+ return -ENOMEM;
+
+ chip->irq_sim = devm_irq_domain_create_sim(dev, NULL, num_lines);
+ if (IS_ERR(chip->irq_sim))
+ return PTR_ERR(chip->irq_sim);
+
+ mutex_init(&chip->lock);
+ ret = devm_add_action_or_reset(dev, gpio_sim_mutex_destroy,
+ &chip->lock);
+ if (ret)
+ return ret;
+
+ gc = &chip->gc;
+ gc->base = -1;
+ gc->ngpio = num_lines;
+ gc->label = label;
+ gc->owner = THIS_MODULE;
+ gc->parent = dev;
+ gc->fwnode = swnode;
+ gc->get = gpio_sim_get;
+ gc->set = gpio_sim_set;
+ gc->get_multiple = gpio_sim_get_multiple;
+ gc->set_multiple = gpio_sim_set_multiple;
+ gc->direction_output = gpio_sim_direction_output;
+ gc->direction_input = gpio_sim_direction_input;
+ gc->get_direction = gpio_sim_get_direction;
+ gc->set_config = gpio_sim_set_config;
+ gc->to_irq = gpio_sim_to_irq;
+ gc->free = gpio_sim_free;
+
+ ret = devm_gpiochip_add_data(dev, gc, chip);
+ if (ret)
+ return ret;
+
+ /* Used by sysfs and configfs callbacks. */
+ dev_set_drvdata(&gc->gpiodev->dev, chip);
+
+ return gpio_sim_setup_sysfs(chip);
+}
+
+static int gpio_sim_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fwnode_handle *swnode;
+ int ret;
+
+ device_for_each_child_node(dev, swnode) {
+ ret = gpio_sim_add_bank(swnode, dev);
+ if (ret) {
+ fwnode_handle_put(swnode);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id gpio_sim_of_match[] = {
+ { .compatible = "gpio-simulator" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpio_sim_of_match);
+
+static struct platform_driver gpio_sim_driver = {
+ .driver = {
+ .name = "gpio-sim",
+ .of_match_table = gpio_sim_of_match,
+ },
+ .probe = gpio_sim_probe,
+};
+
+struct gpio_sim_device {
+ struct config_group group;
+
+ /*
+ * If pdev is NULL, the device is 'pending' (waiting for configuration).
+ * Once the pointer is assigned, the device has been created and the
+ * item is 'live'.
+ */
+ struct platform_device *pdev;
+ int id;
+
+ /*
+ * Each configfs filesystem operation is protected with the subsystem
+ * mutex. Each separate attribute is protected with the buffer mutex.
+ * This structure however can be modified by callbacks of different
+ * attributes so we need another lock.
+ *
+ * We use this lock fo protecting all data structures owned by this
+ * object too.
+ */
+ struct mutex lock;
+
+ /*
+ * This is used to synchronously wait for the driver's probe to complete
+ * and notify the user-space about any errors.
+ */
+ struct notifier_block bus_notifier;
+ struct completion probe_completion;
+ bool driver_bound;
+
+ struct gpiod_hog *hogs;
+
+ struct list_head bank_list;
+};
+
+/* This is called with dev->lock already taken. */
+static int gpio_sim_bus_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct gpio_sim_device *simdev = container_of(nb,
+ struct gpio_sim_device,
+ bus_notifier);
+ struct device *dev = data;
+ char devname[32];
+
+ snprintf(devname, sizeof(devname), "gpio-sim.%u", simdev->id);
+
+ if (strcmp(dev_name(dev), devname) == 0) {
+ if (action == BUS_NOTIFY_BOUND_DRIVER)
+ simdev->driver_bound = true;
+ else if (action == BUS_NOTIFY_DRIVER_NOT_BOUND)
+ simdev->driver_bound = false;
+ else
+ return NOTIFY_DONE;
+
+ complete(&simdev->probe_completion);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct gpio_sim_device *to_gpio_sim_device(struct config_item *item)
+{
+ struct config_group *group = to_config_group(item);
+
+ return container_of(group, struct gpio_sim_device, group);
+}
+
+struct gpio_sim_bank {
+ struct config_group group;
+
+ /*
+ * We could have used the ci_parent field of the config_item but
+ * configfs is stupid and calls the item's release callback after
+ * already having cleared the parent pointer even though the parent
+ * is guaranteed to survive the child...
+ *
+ * So we need to store the pointer to the parent struct here. We can
+ * dereference it anywhere we need with no checks and no locking as
+ * it's guaranteed to survive the childred and protected by configfs
+ * locks.
+ *
+ * Same for other structures.
+ */
+ struct gpio_sim_device *parent;
+ struct list_head siblings;
+
+ char *label;
+ unsigned int num_lines;
+
+ struct list_head line_list;
+
+ struct fwnode_handle *swnode;
+};
+
+static struct gpio_sim_bank *to_gpio_sim_bank(struct config_item *item)
+{
+ struct config_group *group = to_config_group(item);
+
+ return container_of(group, struct gpio_sim_bank, group);
+}
+
+static struct gpio_sim_device *
+gpio_sim_bank_get_device(struct gpio_sim_bank *bank)
+{
+ return bank->parent;
+}
+
+struct gpio_sim_hog;
+
+struct gpio_sim_line {
+ struct config_group group;
+
+ struct gpio_sim_bank *parent;
+ struct list_head siblings;
+
+ unsigned int offset;
+ char *name;
+
+ /* There can only be one hog per line. */
+ struct gpio_sim_hog *hog;
+};
+
+static struct gpio_sim_line *to_gpio_sim_line(struct config_item *item)
+{
+ struct config_group *group = to_config_group(item);
+
+ return container_of(group, struct gpio_sim_line, group);
+}
+
+static struct gpio_sim_device *
+gpio_sim_line_get_device(struct gpio_sim_line *line)
+{
+ struct gpio_sim_bank *bank = line->parent;
+
+ return gpio_sim_bank_get_device(bank);
+}
+
+struct gpio_sim_hog {
+ struct config_item item;
+ struct gpio_sim_line *parent;
+
+ char *name;
+ int dir;
+};
+
+static struct gpio_sim_hog *to_gpio_sim_hog(struct config_item *item)
+{
+ return container_of(item, struct gpio_sim_hog, item);
+}
+
+static struct gpio_sim_device *gpio_sim_hog_get_device(struct gpio_sim_hog *hog)
+{
+ struct gpio_sim_line *line = hog->parent;
+
+ return gpio_sim_line_get_device(line);
+}
+
+static bool gpio_sim_device_is_live_unlocked(struct gpio_sim_device *dev)
+{
+ return !!dev->pdev;
+}
+
+static char *gpio_sim_strdup_trimmed(const char *str, size_t count)
+{
+ char *dup, *trimmed;
+
+ dup = kstrndup(str, count, GFP_KERNEL);
+ if (!dup)
+ return NULL;
+
+ trimmed = strstrip(dup);
+ memmove(dup, trimmed, strlen(trimmed) + 1);
+
+ return dup;
+}
+
+static ssize_t gpio_sim_device_config_dev_name_show(struct config_item *item,
+ char *page)
+{
+ struct gpio_sim_device *dev = to_gpio_sim_device(item);
+ struct platform_device *pdev;
+ int ret;
+
+ mutex_lock(&dev->lock);
+ pdev = dev->pdev;
+ if (pdev)
+ ret = sprintf(page, "%s\n", dev_name(&pdev->dev));
+ else
+ ret = sprintf(page, "gpio-sim.%d\n", dev->id);
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+CONFIGFS_ATTR_RO(gpio_sim_device_config_, dev_name);
+
+static ssize_t
+gpio_sim_device_config_live_show(struct config_item *item, char *page)
+{
+ struct gpio_sim_device *dev = to_gpio_sim_device(item);
+ bool live;
+
+ mutex_lock(&dev->lock);
+ live = gpio_sim_device_is_live_unlocked(dev);
+ mutex_unlock(&dev->lock);
+
+ return sprintf(page, "%c\n", live ? '1' : '0');
+}
+
+static char **gpio_sim_make_line_names(struct gpio_sim_bank *bank,
+ unsigned int *line_names_size)
+{
+ unsigned int max_offset = 0;
+ bool has_line_names = false;
+ struct gpio_sim_line *line;
+ char **line_names;
+
+ list_for_each_entry(line, &bank->line_list, siblings) {
+ if (line->name) {
+ if (line->offset > max_offset)
+ max_offset = line->offset;
+
+ /*
+ * max_offset can stay at 0 so it's not an indicator
+ * of whether line names were configured at all.
+ */
+ has_line_names = true;
+ }
+ }
+
+ if (!has_line_names)
+ /*
+ * This is not an error - NULL means, there are no line
+ * names configured.
+ */
+ return NULL;
+
+ *line_names_size = max_offset + 1;
+
+ line_names = kcalloc(*line_names_size, sizeof(*line_names), GFP_KERNEL);
+ if (!line_names)
+ return ERR_PTR(-ENOMEM);
+
+ list_for_each_entry(line, &bank->line_list, siblings)
+ line_names[line->offset] = line->name;
+
+ return line_names;
+}
+
+static void gpio_sim_remove_hogs(struct gpio_sim_device *dev)
+{
+ struct gpiod_hog *hog;
+
+ if (!dev->hogs)
+ return;
+
+ gpiod_remove_hogs(dev->hogs);
+
+ for (hog = dev->hogs; !hog->chip_label; hog++) {
+ kfree(hog->chip_label);
+ kfree(hog->line_name);
+ }
+
+ kfree(dev->hogs);
+ dev->hogs = NULL;
+}
+
+static int gpio_sim_add_hogs(struct gpio_sim_device *dev)
+{
+ unsigned int num_hogs = 0, idx = 0;
+ struct gpio_sim_bank *bank;
+ struct gpio_sim_line *line;
+ struct gpiod_hog *hog;
+
+ list_for_each_entry(bank, &dev->bank_list, siblings) {
+ list_for_each_entry(line, &bank->line_list, siblings) {
+ if (line->hog)
+ num_hogs++;
+ }
+ }
+
+ if (!num_hogs)
+ return 0;
+
+ /* Allocate one more for the sentinel. */
+ dev->hogs = kcalloc(num_hogs + 1, sizeof(*dev->hogs), GFP_KERNEL);
+ if (!dev->hogs)
+ return -ENOMEM;
+
+ list_for_each_entry(bank, &dev->bank_list, siblings) {
+ list_for_each_entry(line, &bank->line_list, siblings) {
+ if (!line->hog)
+ continue;
+
+ hog = &dev->hogs[idx++];
+
+ /*
+ * We need to make this string manually because at this
+ * point the device doesn't exist yet and so dev_name()
+ * is not available.
+ */
+ hog->chip_label = kasprintf(GFP_KERNEL,
+ "gpio-sim.%u-%s", dev->id,
+ fwnode_get_name(bank->swnode));
+ if (!hog->chip_label) {
+ gpio_sim_remove_hogs(dev);
+ return -ENOMEM;
+ }
+
+ /*
+ * We need to duplicate this because the hog config
+ * item can be removed at any time (and we can't block
+ * it) and gpiolib doesn't make a deep copy of the hog
+ * data.
+ */
+ if (line->hog->name) {
+ hog->line_name = kstrdup(line->hog->name,
+ GFP_KERNEL);
+ if (!hog->line_name) {
+ gpio_sim_remove_hogs(dev);
+ return -ENOMEM;
+ }
+ }
+
+ hog->chip_hwnum = line->offset;
+ hog->dflags = line->hog->dir;
+ }
+ }
+
+ gpiod_add_hogs(dev->hogs);
+
+ return 0;
+}
+
+static struct fwnode_handle *
+gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank,
+ struct fwnode_handle *parent)
+{
+ struct property_entry properties[GPIO_SIM_PROP_MAX];
+ unsigned int prop_idx = 0, line_names_size = 0;
+ struct fwnode_handle *swnode;
+ char **line_names;
+
+ memset(properties, 0, sizeof(properties));
+
+ properties[prop_idx++] = PROPERTY_ENTRY_U32("ngpios", bank->num_lines);
+
+ if (bank->label && (strlen(bank->label) > 0))
+ properties[prop_idx++] = PROPERTY_ENTRY_STRING("gpio-sim,label",
+ bank->label);
+
+ line_names = gpio_sim_make_line_names(bank, &line_names_size);
+ if (IS_ERR(line_names))
+ return ERR_CAST(line_names);
+
+ if (line_names)
+ properties[prop_idx++] = PROPERTY_ENTRY_STRING_ARRAY_LEN(
+ "gpio-line-names",
+ line_names, line_names_size);
+
+ swnode = fwnode_create_software_node(properties, parent);
+ kfree(line_names);
+ return swnode;
+}
+
+static void gpio_sim_remove_swnode_recursive(struct fwnode_handle *swnode)
+{
+ struct fwnode_handle *child;
+
+ fwnode_for_each_child_node(swnode, child)
+ fwnode_remove_software_node(child);
+
+ fwnode_remove_software_node(swnode);
+}
+
+static bool gpio_sim_bank_labels_non_unique(struct gpio_sim_device *dev)
+{
+ struct gpio_sim_bank *this, *pos;
+
+ list_for_each_entry(this, &dev->bank_list, siblings) {
+ list_for_each_entry(pos, &dev->bank_list, siblings) {
+ if (this == pos || (!this->label || !pos->label))
+ continue;
+
+ if (strcmp(this->label, pos->label) == 0)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int gpio_sim_device_activate_unlocked(struct gpio_sim_device *dev)
+{
+ struct platform_device_info pdevinfo;
+ struct fwnode_handle *swnode;
+ struct platform_device *pdev;
+ struct gpio_sim_bank *bank;
+ int ret;
+
+ if (list_empty(&dev->bank_list))
+ return -ENODATA;
+
+ /*
+ * Non-unique GPIO device labels are a corner-case we don't support
+ * as it would interfere with machine hogging mechanism and has little
+ * use in real life.
+ */
+ if (gpio_sim_bank_labels_non_unique(dev))
+ return -EINVAL;
+
+ memset(&pdevinfo, 0, sizeof(pdevinfo));
+
+ swnode = fwnode_create_software_node(NULL, NULL);
+ if (IS_ERR(swnode))
+ return PTR_ERR(swnode);
+
+ list_for_each_entry(bank, &dev->bank_list, siblings) {
+ bank->swnode = gpio_sim_make_bank_swnode(bank, swnode);
+ if (IS_ERR(bank->swnode)) {
+ ret = PTR_ERR(bank->swnode);
+ gpio_sim_remove_swnode_recursive(swnode);
+ return ret;
+ }
+ }
+
+ ret = gpio_sim_add_hogs(dev);
+ if (ret) {
+ gpio_sim_remove_swnode_recursive(swnode);
+ return ret;
+ }
+
+ pdevinfo.name = "gpio-sim";
+ pdevinfo.fwnode = swnode;
+ pdevinfo.id = dev->id;
+
+ reinit_completion(&dev->probe_completion);
+ dev->driver_bound = false;
+ bus_register_notifier(&platform_bus_type, &dev->bus_notifier);
+
+ pdev = platform_device_register_full(&pdevinfo);
+ if (IS_ERR(pdev)) {
+ bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
+ gpio_sim_remove_hogs(dev);
+ gpio_sim_remove_swnode_recursive(swnode);
+ return PTR_ERR(pdev);
+ }
+
+ wait_for_completion(&dev->probe_completion);
+ bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
+
+ if (!dev->driver_bound) {
+ /* Probe failed, check kernel log. */
+ platform_device_unregister(pdev);
+ gpio_sim_remove_hogs(dev);
+ gpio_sim_remove_swnode_recursive(swnode);
+ return -ENXIO;
+ }
+
+ dev->pdev = pdev;
+
+ return 0;
+}
+
+static void gpio_sim_device_deactivate_unlocked(struct gpio_sim_device *dev)
+{
+ struct fwnode_handle *swnode;
+
+ swnode = dev_fwnode(&dev->pdev->dev);
+ platform_device_unregister(dev->pdev);
+ gpio_sim_remove_swnode_recursive(swnode);
+ dev->pdev = NULL;
+ gpio_sim_remove_hogs(dev);
+}
+
+static ssize_t
+gpio_sim_device_config_live_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct gpio_sim_device *dev = to_gpio_sim_device(item);
+ bool live;
+ int ret;
+
+ ret = kstrtobool(page, &live);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev->lock);
+
+ if ((!live && !gpio_sim_device_is_live_unlocked(dev)) ||
+ (live && gpio_sim_device_is_live_unlocked(dev)))
+ ret = -EPERM;
+ else if (live)
+ ret = gpio_sim_device_activate_unlocked(dev);
+ else
+ gpio_sim_device_deactivate_unlocked(dev);
+
+ mutex_unlock(&dev->lock);
+
+ return ret ?: count;
+}
+
+CONFIGFS_ATTR(gpio_sim_device_config_, live);
+
+static struct configfs_attribute *gpio_sim_device_config_attrs[] = {
+ &gpio_sim_device_config_attr_dev_name,
+ &gpio_sim_device_config_attr_live,
+ NULL
+};
+
+struct gpio_sim_chip_name_ctx {
+ struct gpio_sim_device *dev;
+ char *page;
+};
+
+static int gpio_sim_emit_chip_name(struct device *dev, void *data)
+{
+ struct gpio_sim_chip_name_ctx *ctx = data;
+ struct fwnode_handle *swnode;
+ struct gpio_sim_bank *bank;
+
+ /* This would be the sysfs device exported in /sys/class/gpio. */
+ if (dev->class)
+ return 0;
+
+ swnode = dev_fwnode(dev);
+
+ list_for_each_entry(bank, &ctx->dev->bank_list, siblings) {
+ if (bank->swnode == swnode)
+ return sprintf(ctx->page, "%s\n", dev_name(dev));
+ }
+
+ return -ENODATA;
+}
+
+static ssize_t gpio_sim_bank_config_chip_name_show(struct config_item *item,
+ char *page)
+{
+ struct gpio_sim_bank *bank = to_gpio_sim_bank(item);
+ struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank);
+ struct gpio_sim_chip_name_ctx ctx = { dev, page };
+ int ret;
+
+ mutex_lock(&dev->lock);
+ if (gpio_sim_device_is_live_unlocked(dev))
+ ret = device_for_each_child(&dev->pdev->dev, &ctx,
+ gpio_sim_emit_chip_name);
+ else
+ ret = sprintf(page, "none\n");
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+CONFIGFS_ATTR_RO(gpio_sim_bank_config_, chip_name);
+
+static ssize_t
+gpio_sim_bank_config_label_show(struct config_item *item, char *page)
+{
+ struct gpio_sim_bank *bank = to_gpio_sim_bank(item);
+ struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank);
+ int ret;
+
+ mutex_lock(&dev->lock);
+ ret = sprintf(page, "%s\n", bank->label ?: "");
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static ssize_t gpio_sim_bank_config_label_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct gpio_sim_bank *bank = to_gpio_sim_bank(item);
+ struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank);
+ char *trimmed;
+
+ mutex_lock(&dev->lock);
+
+ if (gpio_sim_device_is_live_unlocked(dev)) {
+ mutex_unlock(&dev->lock);
+ return -EBUSY;
+ }
+
+ trimmed = gpio_sim_strdup_trimmed(page, count);
+ if (!trimmed) {
+ mutex_unlock(&dev->lock);
+ return -ENOMEM;
+ }
+
+ kfree(bank->label);
+ bank->label = trimmed;
+
+ mutex_unlock(&dev->lock);
+ return count;
+}
+
+CONFIGFS_ATTR(gpio_sim_bank_config_, label);
+
+static ssize_t
+gpio_sim_bank_config_num_lines_show(struct config_item *item, char *page)
+{
+ struct gpio_sim_bank *bank = to_gpio_sim_bank(item);
+ struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank);
+ int ret;
+
+ mutex_lock(&dev->lock);
+ ret = sprintf(page, "%u\n", bank->num_lines);
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static ssize_t
+gpio_sim_bank_config_num_lines_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct gpio_sim_bank *bank = to_gpio_sim_bank(item);
+ struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank);
+ unsigned int num_lines;
+ int ret;
+
+ ret = kstrtouint(page, 0, &num_lines);
+ if (ret)
+ return ret;
+
+ if (num_lines == 0)
+ return -EINVAL;
+
+ mutex_lock(&dev->lock);
+
+ if (gpio_sim_device_is_live_unlocked(dev)) {
+ mutex_unlock(&dev->lock);
+ return -EBUSY;
+ }
+
+ bank->num_lines = num_lines;
+
+ mutex_unlock(&dev->lock);
+ return count;
+}
+
+CONFIGFS_ATTR(gpio_sim_bank_config_, num_lines);
+
+static struct configfs_attribute *gpio_sim_bank_config_attrs[] = {
+ &gpio_sim_bank_config_attr_chip_name,
+ &gpio_sim_bank_config_attr_label,
+ &gpio_sim_bank_config_attr_num_lines,
+ NULL
+};
+
+static ssize_t
+gpio_sim_line_config_name_show(struct config_item *item, char *page)
+{
+ struct gpio_sim_line *line = to_gpio_sim_line(item);
+ struct gpio_sim_device *dev = gpio_sim_line_get_device(line);
+ int ret;
+
+ mutex_lock(&dev->lock);
+ ret = sprintf(page, "%s\n", line->name ?: "");
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static ssize_t gpio_sim_line_config_name_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct gpio_sim_line *line = to_gpio_sim_line(item);
+ struct gpio_sim_device *dev = gpio_sim_line_get_device(line);
+ char *trimmed;
+
+ mutex_lock(&dev->lock);
+
+ if (gpio_sim_device_is_live_unlocked(dev)) {
+ mutex_unlock(&dev->lock);
+ return -EBUSY;
+ }
+
+ trimmed = gpio_sim_strdup_trimmed(page, count);
+ if (!trimmed) {
+ mutex_unlock(&dev->lock);
+ return -ENOMEM;
+ }
+
+ kfree(line->name);
+ line->name = trimmed;
+
+ mutex_unlock(&dev->lock);
+
+ return count;
+}
+
+CONFIGFS_ATTR(gpio_sim_line_config_, name);
+
+static struct configfs_attribute *gpio_sim_line_config_attrs[] = {
+ &gpio_sim_line_config_attr_name,
+ NULL
+};
+
+static ssize_t gpio_sim_hog_config_name_show(struct config_item *item,
+ char *page)
+{
+ struct gpio_sim_hog *hog = to_gpio_sim_hog(item);
+ struct gpio_sim_device *dev = gpio_sim_hog_get_device(hog);
+ int ret;
+
+ mutex_lock(&dev->lock);
+ ret = sprintf(page, "%s\n", hog->name ?: "");
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static ssize_t gpio_sim_hog_config_name_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct gpio_sim_hog *hog = to_gpio_sim_hog(item);
+ struct gpio_sim_device *dev = gpio_sim_hog_get_device(hog);
+ char *trimmed;
+
+ mutex_lock(&dev->lock);
+
+ if (gpio_sim_device_is_live_unlocked(dev)) {
+ mutex_unlock(&dev->lock);
+ return -EBUSY;
+ }
+
+ trimmed = gpio_sim_strdup_trimmed(page, count);
+ if (!trimmed) {
+ mutex_unlock(&dev->lock);
+ return -ENOMEM;
+ }
+
+ kfree(hog->name);
+ hog->name = trimmed;
+
+ mutex_unlock(&dev->lock);
+
+ return count;
+}
+
+CONFIGFS_ATTR(gpio_sim_hog_config_, name);
+
+static ssize_t gpio_sim_hog_config_direction_show(struct config_item *item,
+ char *page)
+{
+ struct gpio_sim_hog *hog = to_gpio_sim_hog(item);
+ struct gpio_sim_device *dev = gpio_sim_hog_get_device(hog);
+ char *repr;
+ int dir;
+
+ mutex_lock(&dev->lock);
+ dir = hog->dir;
+ mutex_unlock(&dev->lock);
+
+ switch (dir) {
+ case GPIOD_IN:
+ repr = "input";
+ break;
+ case GPIOD_OUT_HIGH:
+ repr = "output-high";
+ break;
+ case GPIOD_OUT_LOW:
+ repr = "output-low";
+ break;
+ default:
+ /* This would be a programmer bug. */
+ WARN(1, "Unexpected hog direction value: %d", dir);
+ return -EINVAL;
+ }
+
+ return sprintf(page, "%s\n", repr);
+}
+
+static ssize_t
+gpio_sim_hog_config_direction_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct gpio_sim_hog *hog = to_gpio_sim_hog(item);
+ struct gpio_sim_device *dev = gpio_sim_hog_get_device(hog);
+ char *trimmed;
+ int dir;
+
+ mutex_lock(&dev->lock);
+
+ if (gpio_sim_device_is_live_unlocked(dev)) {
+ mutex_unlock(&dev->lock);
+ return -EBUSY;
+ }
+
+ trimmed = gpio_sim_strdup_trimmed(page, count);
+ if (!trimmed) {
+ mutex_unlock(&dev->lock);
+ return -ENOMEM;
+ }
+
+ if (strcmp(trimmed, "input") == 0)
+ dir = GPIOD_IN;
+ else if (strcmp(trimmed, "output-high") == 0)
+ dir = GPIOD_OUT_HIGH;
+ else if (strcmp(trimmed, "output-low") == 0)
+ dir = GPIOD_OUT_LOW;
+ else
+ dir = -EINVAL;
+
+ kfree(trimmed);
+
+ if (dir < 0) {
+ mutex_unlock(&dev->lock);
+ return dir;
+ }
+
+ hog->dir = dir;
+
+ mutex_unlock(&dev->lock);
+
+ return count;
+}
+
+CONFIGFS_ATTR(gpio_sim_hog_config_, direction);
+
+static struct configfs_attribute *gpio_sim_hog_config_attrs[] = {
+ &gpio_sim_hog_config_attr_name,
+ &gpio_sim_hog_config_attr_direction,
+ NULL
+};
+
+static void gpio_sim_hog_config_item_release(struct config_item *item)
+{
+ struct gpio_sim_hog *hog = to_gpio_sim_hog(item);
+ struct gpio_sim_line *line = hog->parent;
+ struct gpio_sim_device *dev = gpio_sim_hog_get_device(hog);
+
+ mutex_lock(&dev->lock);
+ line->hog = NULL;
+ mutex_unlock(&dev->lock);
+
+ kfree(hog->name);
+ kfree(hog);
+}
+
+struct configfs_item_operations gpio_sim_hog_config_item_ops = {
+ .release = gpio_sim_hog_config_item_release,
+};
+
+static const struct config_item_type gpio_sim_hog_config_type = {
+ .ct_item_ops = &gpio_sim_hog_config_item_ops,
+ .ct_attrs = gpio_sim_hog_config_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_item *
+gpio_sim_line_config_make_hog_item(struct config_group *group, const char *name)
+{
+ struct gpio_sim_line *line = to_gpio_sim_line(&group->cg_item);
+ struct gpio_sim_device *dev = gpio_sim_line_get_device(line);
+ struct gpio_sim_hog *hog;
+
+ if (strcmp(name, "hog") != 0)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&dev->lock);
+
+ hog = kzalloc(sizeof(*hog), GFP_KERNEL);
+ if (!hog) {
+ mutex_unlock(&dev->lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ config_item_init_type_name(&hog->item, name,
+ &gpio_sim_hog_config_type);
+
+ hog->dir = GPIOD_IN;
+ hog->name = NULL;
+ hog->parent = line;
+ line->hog = hog;
+
+ mutex_unlock(&dev->lock);
+
+ return &hog->item;
+}
+
+static void gpio_sim_line_config_group_release(struct config_item *item)
+{
+ struct gpio_sim_line *line = to_gpio_sim_line(item);
+ struct gpio_sim_device *dev = gpio_sim_line_get_device(line);
+
+ mutex_lock(&dev->lock);
+ list_del(&line->siblings);
+ mutex_unlock(&dev->lock);
+
+ kfree(line->name);
+ kfree(line);
+}
+
+static struct configfs_item_operations gpio_sim_line_config_item_ops = {
+ .release = gpio_sim_line_config_group_release,
+};
+
+static struct configfs_group_operations gpio_sim_line_config_group_ops = {
+ .make_item = gpio_sim_line_config_make_hog_item,
+};
+
+static const struct config_item_type gpio_sim_line_config_type = {
+ .ct_item_ops = &gpio_sim_line_config_item_ops,
+ .ct_group_ops = &gpio_sim_line_config_group_ops,
+ .ct_attrs = gpio_sim_line_config_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *
+gpio_sim_bank_config_make_line_group(struct config_group *group,
+ const char *name)
+{
+ struct gpio_sim_bank *bank = to_gpio_sim_bank(&group->cg_item);
+ struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank);
+ struct gpio_sim_line *line;
+ unsigned int offset;
+ int ret, nchar;
+
+ ret = sscanf(name, "line%u%n", &offset, &nchar);
+ if (ret != 1 || nchar != strlen(name))
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&dev->lock);
+
+ if (gpio_sim_device_is_live_unlocked(dev)) {
+ mutex_unlock(&dev->lock);
+ return ERR_PTR(-EBUSY);
+ }
+
+ line = kzalloc(sizeof(*line), GFP_KERNEL);
+ if (!line) {
+ mutex_unlock(&dev->lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ config_group_init_type_name(&line->group, name,
+ &gpio_sim_line_config_type);
+
+ line->parent = bank;
+ line->offset = offset;
+ list_add_tail(&line->siblings, &bank->line_list);
+
+ mutex_unlock(&dev->lock);
+
+ return &line->group;
+}
+
+static void gpio_sim_bank_config_group_release(struct config_item *item)
+{
+ struct gpio_sim_bank *bank = to_gpio_sim_bank(item);
+ struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank);
+
+ mutex_lock(&dev->lock);
+ list_del(&bank->siblings);
+ mutex_unlock(&dev->lock);
+
+ kfree(bank->label);
+ kfree(bank);
+}
+
+static struct configfs_item_operations gpio_sim_bank_config_item_ops = {
+ .release = gpio_sim_bank_config_group_release,
+};
+
+static struct configfs_group_operations gpio_sim_bank_config_group_ops = {
+ .make_group = gpio_sim_bank_config_make_line_group,
+};
+
+static const struct config_item_type gpio_sim_bank_config_group_type = {
+ .ct_item_ops = &gpio_sim_bank_config_item_ops,
+ .ct_group_ops = &gpio_sim_bank_config_group_ops,
+ .ct_attrs = gpio_sim_bank_config_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *
+gpio_sim_device_config_make_bank_group(struct config_group *group,
+ const char *name)
+{
+ struct gpio_sim_device *dev = to_gpio_sim_device(&group->cg_item);
+ struct gpio_sim_bank *bank;
+
+ mutex_lock(&dev->lock);
+
+ if (gpio_sim_device_is_live_unlocked(dev)) {
+ mutex_unlock(&dev->lock);
+ return ERR_PTR(-EBUSY);
+ }
+
+ bank = kzalloc(sizeof(*bank), GFP_KERNEL);
+ if (!bank) {
+ mutex_unlock(&dev->lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ config_group_init_type_name(&bank->group, name,
+ &gpio_sim_bank_config_group_type);
+ bank->num_lines = 1;
+ bank->parent = dev;
+ INIT_LIST_HEAD(&bank->line_list);
+ list_add_tail(&bank->siblings, &dev->bank_list);
+
+ mutex_unlock(&dev->lock);
+
+ return &bank->group;
+}
+
+static void gpio_sim_device_config_group_release(struct config_item *item)
+{
+ struct gpio_sim_device *dev = to_gpio_sim_device(item);
+
+ mutex_lock(&dev->lock);
+ if (gpio_sim_device_is_live_unlocked(dev))
+ gpio_sim_device_deactivate_unlocked(dev);
+ mutex_unlock(&dev->lock);
+
+ mutex_destroy(&dev->lock);
+ ida_free(&gpio_sim_ida, dev->id);
+ kfree(dev);
+}
+
+static struct configfs_item_operations gpio_sim_device_config_item_ops = {
+ .release = gpio_sim_device_config_group_release,
+};
+
+static struct configfs_group_operations gpio_sim_device_config_group_ops = {
+ .make_group = gpio_sim_device_config_make_bank_group,
+};
+
+static const struct config_item_type gpio_sim_device_config_group_type = {
+ .ct_item_ops = &gpio_sim_device_config_item_ops,
+ .ct_group_ops = &gpio_sim_device_config_group_ops,
+ .ct_attrs = gpio_sim_device_config_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *
+gpio_sim_config_make_device_group(struct config_group *group, const char *name)
+{
+ struct gpio_sim_device *dev;
+ int id;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ id = ida_alloc(&gpio_sim_ida, GFP_KERNEL);
+ if (id < 0) {
+ kfree(dev);
+ return ERR_PTR(id);
+ }
+
+ config_group_init_type_name(&dev->group, name,
+ &gpio_sim_device_config_group_type);
+ dev->id = id;
+ mutex_init(&dev->lock);
+ INIT_LIST_HEAD(&dev->bank_list);
+
+ dev->bus_notifier.notifier_call = gpio_sim_bus_notifier_call;
+ init_completion(&dev->probe_completion);
+
+ return &dev->group;
+}
+
+static struct configfs_group_operations gpio_sim_config_group_ops = {
+ .make_group = gpio_sim_config_make_device_group,
+};
+
+static const struct config_item_type gpio_sim_config_type = {
+ .ct_group_ops = &gpio_sim_config_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem gpio_sim_config_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "gpio-sim",
+ .ci_type = &gpio_sim_config_type,
+ },
+ },
+};
+
+static int __init gpio_sim_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&gpio_sim_driver);
+ if (ret) {
+ pr_err("Error %d while registering the platform driver\n", ret);
+ return ret;
+ }
+
+ config_group_init(&gpio_sim_config_subsys.su_group);
+ mutex_init(&gpio_sim_config_subsys.su_mutex);
+ ret = configfs_register_subsystem(&gpio_sim_config_subsys);
+ if (ret) {
+ pr_err("Error %d while registering the configfs subsystem %s\n",
+ ret, gpio_sim_config_subsys.su_group.cg_item.ci_namebuf);
+ mutex_destroy(&gpio_sim_config_subsys.su_mutex);
+ platform_driver_unregister(&gpio_sim_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(gpio_sim_init);
+
+static void __exit gpio_sim_exit(void)
+{
+ configfs_unregister_subsystem(&gpio_sim_config_subsys);
+ mutex_destroy(&gpio_sim_config_subsys.su_mutex);
+ platform_driver_unregister(&gpio_sim_driver);
+}
+module_exit(gpio_sim_exit);
+
+MODULE_AUTHOR("Bartosz Golaszewski <brgl@bgdev.pl");
+MODULE_DESCRIPTION("GPIO Simulator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c
index 9dd9dabb579e..9bff63990eee 100644
--- a/drivers/gpio/gpio-sprd.c
+++ b/drivers/gpio/gpio-sprd.c
@@ -237,7 +237,6 @@ static int sprd_gpio_probe(struct platform_device *pdev)
sprd_gpio->chip.ngpio = SPRD_GPIO_NR;
sprd_gpio->chip.base = -1;
sprd_gpio->chip.parent = &pdev->dev;
- sprd_gpio->chip.of_node = pdev->dev.of_node;
sprd_gpio->chip.request = sprd_gpio_request;
sprd_gpio->chip.free = sprd_gpio_free;
sprd_gpio->chip.get = sprd_gpio_get;
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 392fcab06ab8..e07cca0f8d35 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -324,7 +324,7 @@ static int gsta_alloc_irq_chip(struct gsta_gpio *chip)
if (rv)
return rv;
- /* Set up all all 128 interrupts: code from setup_generic_chip */
+ /* Set up all 128 interrupts: code from setup_generic_chip */
{
struct irq_chip_type *ct = gc->chip_types;
int i, j;
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index dd4d58b4ae49..0fa4f0a93378 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -477,7 +477,6 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
stmpe_gpio->chip = template_chip;
stmpe_gpio->chip.ngpio = stmpe->num_gpios;
stmpe_gpio->chip.parent = &pdev->dev;
- stmpe_gpio->chip.of_node = np;
stmpe_gpio->chip.base = -1;
if (IS_ENABLED(CONFIG_DEBUG_FS))
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 8d158492488f..443fe975bf13 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -319,7 +319,6 @@ static int tc3589x_gpio_probe(struct platform_device *pdev)
tc3589x_gpio->chip.ngpio = tc3589x->num_gpio;
tc3589x_gpio->chip.parent = &pdev->dev;
tc3589x_gpio->chip.base = -1;
- tc3589x_gpio->chip.of_node = np;
girq = &tc3589x_gpio->chip.irq;
girq->chip = &tc3589x_gpio_irq_chip;
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index c026e7141e4e..34b36a8c035f 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -14,6 +14,8 @@
#include <dt-bindings/gpio/tegra186-gpio.h>
#include <dt-bindings/gpio/tegra194-gpio.h>
+#include <dt-bindings/gpio/tegra234-gpio.h>
+#include <dt-bindings/gpio/tegra241-gpio.h>
/* security registers */
#define TEGRA186_GPIO_CTL_SCR 0x0c
@@ -748,7 +750,6 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->gpio.names = (const char * const *)names;
#if defined(CONFIG_OF_GPIO)
- gpio->gpio.of_node = pdev->dev.of_node;
gpio->gpio.of_gpio_n_cells = 2;
gpio->gpio.of_xlate = tegra186_gpio_of_xlate;
#endif /* CONFIG_OF_GPIO */
@@ -972,6 +973,124 @@ static const struct tegra_gpio_soc tegra194_aon_soc = {
.num_irqs_per_bank = 8,
};
+#define TEGRA234_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
+ [TEGRA234_MAIN_GPIO_PORT_##_name] = { \
+ .name = #_name, \
+ .bank = _bank, \
+ .port = _port, \
+ .pins = _pins, \
+ }
+
+static const struct tegra_gpio_port tegra234_main_ports[] = {
+ TEGRA234_MAIN_GPIO_PORT( A, 0, 0, 8),
+ TEGRA234_MAIN_GPIO_PORT( B, 0, 3, 1),
+ TEGRA234_MAIN_GPIO_PORT( C, 5, 1, 8),
+ TEGRA234_MAIN_GPIO_PORT( D, 5, 2, 4),
+ TEGRA234_MAIN_GPIO_PORT( E, 5, 3, 8),
+ TEGRA234_MAIN_GPIO_PORT( F, 5, 4, 6),
+ TEGRA234_MAIN_GPIO_PORT( G, 4, 0, 8),
+ TEGRA234_MAIN_GPIO_PORT( H, 4, 1, 8),
+ TEGRA234_MAIN_GPIO_PORT( I, 4, 2, 7),
+ TEGRA234_MAIN_GPIO_PORT( J, 5, 0, 6),
+ TEGRA234_MAIN_GPIO_PORT( K, 3, 0, 8),
+ TEGRA234_MAIN_GPIO_PORT( L, 3, 1, 4),
+ TEGRA234_MAIN_GPIO_PORT( M, 2, 0, 8),
+ TEGRA234_MAIN_GPIO_PORT( N, 2, 1, 8),
+ TEGRA234_MAIN_GPIO_PORT( P, 2, 2, 8),
+ TEGRA234_MAIN_GPIO_PORT( Q, 2, 3, 8),
+ TEGRA234_MAIN_GPIO_PORT( R, 2, 4, 6),
+ TEGRA234_MAIN_GPIO_PORT( X, 1, 0, 8),
+ TEGRA234_MAIN_GPIO_PORT( Y, 1, 1, 8),
+ TEGRA234_MAIN_GPIO_PORT( Z, 1, 2, 8),
+ TEGRA234_MAIN_GPIO_PORT(AC, 0, 1, 8),
+ TEGRA234_MAIN_GPIO_PORT(AD, 0, 2, 4),
+ TEGRA234_MAIN_GPIO_PORT(AE, 3, 3, 2),
+ TEGRA234_MAIN_GPIO_PORT(AF, 3, 4, 4),
+ TEGRA234_MAIN_GPIO_PORT(AG, 3, 2, 8),
+};
+
+static const struct tegra_gpio_soc tegra234_main_soc = {
+ .num_ports = ARRAY_SIZE(tegra234_main_ports),
+ .ports = tegra234_main_ports,
+ .name = "tegra234-gpio",
+ .instance = 0,
+ .num_irqs_per_bank = 8,
+};
+
+#define TEGRA234_AON_GPIO_PORT(_name, _bank, _port, _pins) \
+ [TEGRA234_AON_GPIO_PORT_##_name] = { \
+ .name = #_name, \
+ .bank = _bank, \
+ .port = _port, \
+ .pins = _pins, \
+ }
+
+static const struct tegra_gpio_port tegra234_aon_ports[] = {
+ TEGRA234_AON_GPIO_PORT(AA, 0, 4, 8),
+ TEGRA234_AON_GPIO_PORT(BB, 0, 5, 4),
+ TEGRA234_AON_GPIO_PORT(CC, 0, 2, 8),
+ TEGRA234_AON_GPIO_PORT(DD, 0, 3, 3),
+ TEGRA234_AON_GPIO_PORT(EE, 0, 0, 8),
+ TEGRA234_AON_GPIO_PORT(GG, 0, 1, 1),
+};
+
+static const struct tegra_gpio_soc tegra234_aon_soc = {
+ .num_ports = ARRAY_SIZE(tegra234_aon_ports),
+ .ports = tegra234_aon_ports,
+ .name = "tegra234-gpio-aon",
+ .instance = 1,
+ .num_irqs_per_bank = 8,
+};
+
+#define TEGRA241_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
+ [TEGRA241_MAIN_GPIO_PORT_##_name] = { \
+ .name = #_name, \
+ .bank = _bank, \
+ .port = _port, \
+ .pins = _pins, \
+ }
+
+static const struct tegra_gpio_port tegra241_main_ports[] = {
+ TEGRA241_MAIN_GPIO_PORT(A, 0, 0, 8),
+ TEGRA241_MAIN_GPIO_PORT(B, 0, 1, 8),
+ TEGRA241_MAIN_GPIO_PORT(C, 0, 2, 2),
+ TEGRA241_MAIN_GPIO_PORT(D, 0, 3, 6),
+ TEGRA241_MAIN_GPIO_PORT(E, 0, 4, 8),
+ TEGRA241_MAIN_GPIO_PORT(F, 1, 0, 8),
+ TEGRA241_MAIN_GPIO_PORT(G, 1, 1, 8),
+ TEGRA241_MAIN_GPIO_PORT(H, 1, 2, 8),
+ TEGRA241_MAIN_GPIO_PORT(J, 1, 3, 8),
+ TEGRA241_MAIN_GPIO_PORT(K, 1, 4, 4),
+ TEGRA241_MAIN_GPIO_PORT(L, 1, 5, 6),
+};
+
+static const struct tegra_gpio_soc tegra241_main_soc = {
+ .num_ports = ARRAY_SIZE(tegra241_main_ports),
+ .ports = tegra241_main_ports,
+ .name = "tegra241-gpio",
+ .instance = 0,
+};
+
+#define TEGRA241_AON_GPIO_PORT(_name, _bank, _port, _pins) \
+ [TEGRA241_AON_GPIO_PORT_##_name] = { \
+ .name = #_name, \
+ .bank = _bank, \
+ .port = _port, \
+ .pins = _pins, \
+ }
+
+static const struct tegra_gpio_port tegra241_aon_ports[] = {
+ TEGRA241_AON_GPIO_PORT(AA, 0, 0, 8),
+ TEGRA241_AON_GPIO_PORT(BB, 0, 0, 4),
+};
+
+static const struct tegra_gpio_soc tegra241_aon_soc = {
+ .num_ports = ARRAY_SIZE(tegra241_aon_ports),
+ .ports = tegra241_aon_ports,
+ .name = "tegra241-gpio-aon",
+ .instance = 1,
+};
+
static const struct of_device_id tegra186_gpio_of_match[] = {
{
.compatible = "nvidia,tegra186-gpio",
@@ -986,6 +1105,12 @@ static const struct of_device_id tegra186_gpio_of_match[] = {
.compatible = "nvidia,tegra194-gpio-aon",
.data = &tegra194_aon_soc
}, {
+ .compatible = "nvidia,tegra234-gpio",
+ .data = &tegra234_main_soc
+ }, {
+ .compatible = "nvidia,tegra234-gpio-aon",
+ .data = &tegra234_aon_soc
+ }, {
/* sentinel */
}
};
@@ -996,6 +1121,8 @@ static const struct acpi_device_id tegra186_gpio_acpi_match[] = {
{ .id = "NVDA0208", .driver_data = (kernel_ulong_t)&tegra186_aon_soc },
{ .id = "NVDA0308", .driver_data = (kernel_ulong_t)&tegra194_main_soc },
{ .id = "NVDA0408", .driver_data = (kernel_ulong_t)&tegra194_aon_soc },
+ { .id = "NVDA0508", .driver_data = (kernel_ulong_t)&tegra241_main_soc },
+ { .id = "NVDA0608", .driver_data = (kernel_ulong_t)&tegra241_aon_soc },
{}
};
MODULE_DEVICE_TABLE(acpi, tegra186_gpio_acpi_match);
diff --git a/drivers/gpio/gpio-tps65218.c b/drivers/gpio/gpio-tps65218.c
index 912382be48e1..e1d425a18854 100644
--- a/drivers/gpio/gpio-tps65218.c
+++ b/drivers/gpio/gpio-tps65218.c
@@ -196,9 +196,6 @@ static int tps65218_gpio_probe(struct platform_device *pdev)
tps65218_gpio->tps65218 = tps65218;
tps65218_gpio->gpio_chip = template_chip;
tps65218_gpio->gpio_chip.parent = &pdev->dev;
-#ifdef CONFIG_OF_GPIO
- tps65218_gpio->gpio_chip.of_node = pdev->dev.of_node;
-#endif
return devm_gpiochip_add_data(&pdev->dev, &tps65218_gpio->gpio_chip,
tps65218_gpio);
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
index da0304b764a5..c5713524b581 100644
--- a/drivers/gpio/gpio-tps6586x.c
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -77,6 +77,8 @@ static int tps6586x_gpio_probe(struct platform_device *pdev)
struct tps6586x_platform_data *pdata;
struct tps6586x_gpio *tps6586x_gpio;
+ device_set_node(&pdev->dev, dev_fwnode(pdev->dev.parent));
+
pdata = dev_get_platdata(pdev->dev.parent);
tps6586x_gpio = devm_kzalloc(&pdev->dev,
sizeof(*tps6586x_gpio), GFP_KERNEL);
@@ -97,9 +99,6 @@ static int tps6586x_gpio_probe(struct platform_device *pdev)
tps6586x_gpio->gpio_chip.get = tps6586x_gpio_get;
tps6586x_gpio->gpio_chip.to_irq = tps6586x_gpio_to_irq;
-#ifdef CONFIG_OF_GPIO
- tps6586x_gpio->gpio_chip.of_node = pdev->dev.parent->of_node;
-#endif
if (pdata && pdata->gpio_base)
tps6586x_gpio->gpio_chip.base = pdata->gpio_base;
else
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index 7fa8c841081f..321e6945f0be 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -111,6 +111,8 @@ static int tps65910_gpio_probe(struct platform_device *pdev)
int ret;
int i;
+ device_set_node(&pdev->dev, dev_fwnode(pdev->dev.parent));
+
tps65910_gpio = devm_kzalloc(&pdev->dev,
sizeof(*tps65910_gpio), GFP_KERNEL);
if (!tps65910_gpio)
@@ -137,9 +139,7 @@ static int tps65910_gpio_probe(struct platform_device *pdev)
tps65910_gpio->gpio_chip.set = tps65910_gpio_set;
tps65910_gpio->gpio_chip.get = tps65910_gpio_get;
tps65910_gpio->gpio_chip.parent = &pdev->dev;
-#ifdef CONFIG_OF_GPIO
- tps65910_gpio->gpio_chip.of_node = tps65910->dev->of_node;
-#endif
+
if (pdata && pdata->gpio_base)
tps65910_gpio->gpio_chip.base = pdata->gpio_base;
else
diff --git a/drivers/gpio/gpio-ts5500.c b/drivers/gpio/gpio-ts5500.c
index c91890488402..b159e92a3612 100644
--- a/drivers/gpio/gpio-ts5500.c
+++ b/drivers/gpio/gpio-ts5500.c
@@ -317,22 +317,19 @@ static int ts5500_dio_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const char *name = dev_name(dev);
struct ts5500_priv *priv;
- struct resource *res;
unsigned long flags;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev, "missing IRQ resource\n");
- return -EINVAL;
- }
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
priv = devm_kzalloc(dev, sizeof(struct ts5500_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
platform_set_drvdata(pdev, priv);
- priv->hwirq = res->start;
+ priv->hwirq = ret;
spin_lock_init(&priv->lock);
priv->gpio_chip.owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c
index 648fb418d775..6c3fbf382dba 100644
--- a/drivers/gpio/gpio-twl6040.c
+++ b/drivers/gpio/gpio-twl6040.c
@@ -80,6 +80,8 @@ static int gpo_twl6040_probe(struct platform_device *pdev)
struct twl6040 *twl6040 = dev_get_drvdata(twl6040_core_dev);
int ret;
+ device_set_node(&pdev->dev, dev_fwnode(pdev->dev.parent));
+
twl6040gpo_chip.base = -1;
if (twl6040_get_revid(twl6040) < TWL6041_REV_ES2_0)
@@ -88,9 +90,6 @@ static int gpo_twl6040_probe(struct platform_device *pdev)
twl6040gpo_chip.ngpio = 1; /* twl6041 have 1 GPO */
twl6040gpo_chip.parent = &pdev->dev;
-#ifdef CONFIG_OF_GPIO
- twl6040gpo_chip.of_node = twl6040_core_dev->of_node;
-#endif
ret = devm_gpiochip_add_data(&pdev->dev, &twl6040gpo_chip, NULL);
if (ret < 0) {
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index e0f2b67558e7..20780c35da1b 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -298,7 +298,6 @@ static int vf610_gpio_probe(struct platform_device *pdev)
}
gc = &port->gc;
- gc->of_node = np;
gc->parent = dev;
gc->label = "vf610-gpio";
gc->ngpio = VF610_GPIO_PER_PORT;
diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c
index 9f4941bc5760..fcc5e8c08973 100644
--- a/drivers/gpio/gpio-virtio.c
+++ b/drivers/gpio/gpio-virtio.c
@@ -450,7 +450,7 @@ static void virtio_gpio_request_vq(struct virtqueue *vq)
static void virtio_gpio_free_vqs(struct virtio_device *vdev)
{
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
}
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index 9cf1e5ebb352..7eaf8a28638c 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -262,6 +262,8 @@ static int wm831x_gpio_probe(struct platform_device *pdev)
struct wm831x_pdata *pdata = &wm831x->pdata;
struct wm831x_gpio *wm831x_gpio;
+ device_set_node(&pdev->dev, dev_fwnode(pdev->dev.parent));
+
wm831x_gpio = devm_kzalloc(&pdev->dev, sizeof(*wm831x_gpio),
GFP_KERNEL);
if (wm831x_gpio == NULL)
@@ -275,9 +277,6 @@ static int wm831x_gpio_probe(struct platform_device *pdev)
wm831x_gpio->gpio_chip.base = pdata->gpio_base;
else
wm831x_gpio->gpio_chip.base = -1;
-#ifdef CONFIG_OF_GPIO
- wm831x_gpio->gpio_chip.of_node = wm831x->dev->of_node;
-#endif
return devm_gpiochip_add_data(&pdev->dev, &wm831x_gpio->gpio_chip, wm831x_gpio);
}
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index 0d94d3aef752..0199f545335f 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -6,7 +6,6 @@
#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
-#include <linux/of_device.h>
#include <linux/module.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
@@ -26,16 +25,6 @@
*
* where addr is base address of the that feature register and gpio is the pin.
*/
-#define GPIO_OUTPUT_EN 0x00
-#define GPIO_PADDRV 0x08
-#define GPIO_INT_EN00 0x18
-#define GPIO_INT_EN10 0x20
-#define GPIO_INT_EN20 0x28
-#define GPIO_INT_EN30 0x30
-#define GPIO_INT_POL 0x38
-#define GPIO_INT_TYPE 0x40
-#define GPIO_INT_STAT 0x48
-
#define GPIO_9XX_BYTESWAP 0X00
#define GPIO_9XX_CTRL 0X04
#define GPIO_9XX_OUTPUT_EN 0x14
@@ -52,14 +41,6 @@
#define GPIO_9XX_INT_TYPE 0x114
#define GPIO_9XX_INT_STAT 0x124
-#define GPIO_3XX_INT_EN00 0x18
-#define GPIO_3XX_INT_EN10 0x20
-#define GPIO_3XX_INT_EN20 0x28
-#define GPIO_3XX_INT_EN30 0x30
-#define GPIO_3XX_INT_POL 0x78
-#define GPIO_3XX_INT_TYPE 0x80
-#define GPIO_3XX_INT_STAT 0x88
-
/* Interrupt type register mask */
#define XLP_GPIO_IRQ_TYPE_LVL 0x0
#define XLP_GPIO_IRQ_TYPE_EDGE 0x1
@@ -72,16 +53,6 @@
#define XLP_GPIO_IRQ_BASE 768
#define XLP_MAX_NR_GPIO 96
-/* XLP variants supported by this driver */
-enum {
- XLP_GPIO_VARIANT_XLP832 = 1,
- XLP_GPIO_VARIANT_XLP316,
- XLP_GPIO_VARIANT_XLP208,
- XLP_GPIO_VARIANT_XLP980,
- XLP_GPIO_VARIANT_XLP532,
- GPIO_VARIANT_VULCAN
-};
-
struct xlp_gpio_priv {
struct gpio_chip chip;
DECLARE_BITMAP(gpio_enabled_mask, XLP_MAX_NR_GPIO);
@@ -257,44 +228,13 @@ static void xlp_gpio_set(struct gpio_chip *gc, unsigned gpio, int state)
xlp_gpio_set_reg(priv->gpio_paddrv, gpio, state);
}
-static const struct of_device_id xlp_gpio_of_ids[] = {
- {
- .compatible = "netlogic,xlp832-gpio",
- .data = (void *)XLP_GPIO_VARIANT_XLP832,
- },
- {
- .compatible = "netlogic,xlp316-gpio",
- .data = (void *)XLP_GPIO_VARIANT_XLP316,
- },
- {
- .compatible = "netlogic,xlp208-gpio",
- .data = (void *)XLP_GPIO_VARIANT_XLP208,
- },
- {
- .compatible = "netlogic,xlp980-gpio",
- .data = (void *)XLP_GPIO_VARIANT_XLP980,
- },
- {
- .compatible = "netlogic,xlp532-gpio",
- .data = (void *)XLP_GPIO_VARIANT_XLP532,
- },
- {
- .compatible = "brcm,vulcan-gpio",
- .data = (void *)GPIO_VARIANT_VULCAN,
- },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, xlp_gpio_of_ids);
-
static int xlp_gpio_probe(struct platform_device *pdev)
{
struct gpio_chip *gc;
struct gpio_irq_chip *girq;
struct xlp_gpio_priv *priv;
void __iomem *gpio_base;
- int irq_base, irq, err;
- int ngpio;
- u32 soc_type;
+ int irq, err;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -308,62 +248,12 @@ static int xlp_gpio_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- if (pdev->dev.of_node) {
- soc_type = (uintptr_t)of_device_get_match_data(&pdev->dev);
- } else {
- const struct acpi_device_id *acpi_id;
-
- acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
- &pdev->dev);
- if (!acpi_id || !acpi_id->driver_data) {
- dev_err(&pdev->dev, "Unable to match ACPI ID\n");
- return -ENODEV;
- }
- soc_type = (uintptr_t) acpi_id->driver_data;
- }
-
- switch (soc_type) {
- case XLP_GPIO_VARIANT_XLP832:
- priv->gpio_out_en = gpio_base + GPIO_OUTPUT_EN;
- priv->gpio_paddrv = gpio_base + GPIO_PADDRV;
- priv->gpio_intr_stat = gpio_base + GPIO_INT_STAT;
- priv->gpio_intr_type = gpio_base + GPIO_INT_TYPE;
- priv->gpio_intr_pol = gpio_base + GPIO_INT_POL;
- priv->gpio_intr_en = gpio_base + GPIO_INT_EN00;
- ngpio = 41;
- break;
- case XLP_GPIO_VARIANT_XLP208:
- case XLP_GPIO_VARIANT_XLP316:
- priv->gpio_out_en = gpio_base + GPIO_OUTPUT_EN;
- priv->gpio_paddrv = gpio_base + GPIO_PADDRV;
- priv->gpio_intr_stat = gpio_base + GPIO_3XX_INT_STAT;
- priv->gpio_intr_type = gpio_base + GPIO_3XX_INT_TYPE;
- priv->gpio_intr_pol = gpio_base + GPIO_3XX_INT_POL;
- priv->gpio_intr_en = gpio_base + GPIO_3XX_INT_EN00;
-
- ngpio = (soc_type == XLP_GPIO_VARIANT_XLP208) ? 42 : 57;
- break;
- case XLP_GPIO_VARIANT_XLP980:
- case XLP_GPIO_VARIANT_XLP532:
- case GPIO_VARIANT_VULCAN:
- priv->gpio_out_en = gpio_base + GPIO_9XX_OUTPUT_EN;
- priv->gpio_paddrv = gpio_base + GPIO_9XX_PADDRV;
- priv->gpio_intr_stat = gpio_base + GPIO_9XX_INT_STAT;
- priv->gpio_intr_type = gpio_base + GPIO_9XX_INT_TYPE;
- priv->gpio_intr_pol = gpio_base + GPIO_9XX_INT_POL;
- priv->gpio_intr_en = gpio_base + GPIO_9XX_INT_EN00;
-
- if (soc_type == XLP_GPIO_VARIANT_XLP980)
- ngpio = 66;
- else if (soc_type == XLP_GPIO_VARIANT_XLP532)
- ngpio = 67;
- else
- ngpio = 70;
- break;
- default:
- dev_err(&pdev->dev, "Unknown Processor type!\n");
- return -ENODEV;
- }
+ priv->gpio_out_en = gpio_base + GPIO_9XX_OUTPUT_EN;
+ priv->gpio_paddrv = gpio_base + GPIO_9XX_PADDRV;
+ priv->gpio_intr_stat = gpio_base + GPIO_9XX_INT_STAT;
+ priv->gpio_intr_type = gpio_base + GPIO_9XX_INT_TYPE;
+ priv->gpio_intr_pol = gpio_base + GPIO_9XX_INT_POL;
+ priv->gpio_intr_en = gpio_base + GPIO_9XX_INT_EN00;
bitmap_zero(priv->gpio_enabled_mask, XLP_MAX_NR_GPIO);
@@ -373,8 +263,7 @@ static int xlp_gpio_probe(struct platform_device *pdev)
gc->label = dev_name(&pdev->dev);
gc->base = 0;
gc->parent = &pdev->dev;
- gc->ngpio = ngpio;
- gc->of_node = pdev->dev.of_node;
+ gc->ngpio = 70;
gc->direction_output = xlp_gpio_dir_output;
gc->direction_input = xlp_gpio_dir_input;
gc->set = xlp_gpio_set;
@@ -382,19 +271,6 @@ static int xlp_gpio_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
- /* XLP(MIPS) has fixed range for GPIO IRQs, Vulcan(ARM64) does not */
- if (soc_type != GPIO_VARIANT_VULCAN) {
- irq_base = devm_irq_alloc_descs(&pdev->dev, -1,
- XLP_GPIO_IRQ_BASE,
- gc->ngpio, 0);
- if (irq_base < 0) {
- dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n");
- return irq_base;
- }
- } else {
- irq_base = 0;
- }
-
girq = &gc->irq;
girq->chip = &xlp_gpio_irq_chip;
girq->parent_handler = xlp_gpio_generic_handler;
@@ -405,7 +281,7 @@ static int xlp_gpio_probe(struct platform_device *pdev)
if (!girq->parents)
return -ENOMEM;
girq->parents[0] = irq;
- girq->first = irq_base;
+ girq->first = 0;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
@@ -420,8 +296,8 @@ static int xlp_gpio_probe(struct platform_device *pdev)
#ifdef CONFIG_ACPI
static const struct acpi_device_id xlp_gpio_acpi_match[] = {
- { "BRCM9006", GPIO_VARIANT_VULCAN },
- { "CAV9006", GPIO_VARIANT_VULCAN },
+ { "BRCM9006" },
+ { "CAV9006" },
{},
};
MODULE_DEVICE_TABLE(acpi, xlp_gpio_acpi_match);
@@ -430,7 +306,6 @@ MODULE_DEVICE_TABLE(acpi, xlp_gpio_acpi_match);
static struct platform_driver xlp_gpio_driver = {
.driver = {
.name = "xlp-gpio",
- .of_match_table = xlp_gpio_of_ids,
.acpi_match_table = ACPI_PTR(xlp_gpio_acpi_match),
},
.probe = xlp_gpio_probe,
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 985e8589c58b..c0f6a25c3279 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -219,14 +219,13 @@ EXPORT_SYMBOL_GPL(acpi_gpio_get_io_resource);
static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
struct acpi_gpio_event *event)
{
+ struct device *parent = acpi_gpio->chip->parent;
int ret, value;
ret = request_threaded_irq(event->irq, NULL, event->handler,
event->irqflags | IRQF_ONESHOT, "ACPI:Event", event);
if (ret) {
- dev_err(acpi_gpio->chip->parent,
- "Failed to setup interrupt handler for %d\n",
- event->irq);
+ dev_err(parent, "Failed to setup interrupt handler for %d\n", event->irq);
return;
}
@@ -347,8 +346,7 @@ static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
return false;
err:
- pr_err_once("Error invalid value for gpiolib_acpi.ignore_wake: %s\n",
- ignore_wake);
+ pr_err_once("Error: Invalid value for gpiolib_acpi.ignore_wake: %s\n", ignore_wake);
return false;
}
@@ -579,36 +577,24 @@ void acpi_dev_remove_driver_gpios(struct acpi_device *adev)
}
EXPORT_SYMBOL_GPL(acpi_dev_remove_driver_gpios);
-static void devm_acpi_dev_release_driver_gpios(struct device *dev, void *res)
+static void acpi_dev_release_driver_gpios(void *adev)
{
- acpi_dev_remove_driver_gpios(ACPI_COMPANION(dev));
+ acpi_dev_remove_driver_gpios(adev);
}
int devm_acpi_dev_add_driver_gpios(struct device *dev,
const struct acpi_gpio_mapping *gpios)
{
- void *res;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
int ret;
- res = devres_alloc(devm_acpi_dev_release_driver_gpios, 0, GFP_KERNEL);
- if (!res)
- return -ENOMEM;
-
- ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev), gpios);
- if (ret) {
- devres_free(res);
+ ret = acpi_dev_add_driver_gpios(adev, gpios);
+ if (ret)
return ret;
- }
- devres_add(dev, res);
- return 0;
-}
-EXPORT_SYMBOL_GPL(devm_acpi_dev_add_driver_gpios);
-void devm_acpi_dev_remove_driver_gpios(struct device *dev)
-{
- WARN_ON(devres_release(dev, devm_acpi_dev_release_driver_gpios, NULL, NULL));
+ return devm_add_action_or_reset(dev, acpi_dev_release_driver_gpios, adev);
}
-EXPORT_SYMBOL_GPL(devm_acpi_dev_remove_driver_gpios);
+EXPORT_SYMBOL_GPL(devm_acpi_dev_add_driver_gpios);
static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
const char *name, int index,
@@ -941,7 +927,7 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
if (info.gpioint &&
(*dflags == GPIOD_OUT_LOW || *dflags == GPIOD_OUT_HIGH)) {
- dev_dbg(dev, "refusing GpioInt() entry when doing GPIOD_OUT_* lookup\n");
+ dev_dbg(&adev->dev, "refusing GpioInt() entry when doing GPIOD_OUT_* lookup\n");
return ERR_PTR(-ENOENT);
}
@@ -1056,10 +1042,17 @@ int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int ind
irq_flags = acpi_dev_get_irq_type(info.triggering,
info.polarity);
- /* Set type if specified and different than the current one */
- if (irq_flags != IRQ_TYPE_NONE &&
- irq_flags != irq_get_trigger_type(irq))
- irq_set_irq_type(irq, irq_flags);
+ /*
+ * If the IRQ is not already in use then set type
+ * if specified and different than the current one.
+ */
+ if (can_request_irq(irq, irq_flags)) {
+ if (irq_flags != IRQ_TYPE_NONE &&
+ irq_flags != irq_get_trigger_type(irq))
+ irq_set_irq_type(irq, irq_flags);
+ } else {
+ dev_dbg(&adev->dev, "IRQ %d already in use\n", irq);
+ }
return irq;
}
@@ -1346,6 +1339,9 @@ void acpi_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev)
/* Set default fwnode to parent's one if present */
if (gc->parent)
ACPI_COMPANION_SET(&gdev->dev, ACPI_COMPANION(gc->parent));
+
+ if (gc->fwnode)
+ device_set_node(&gdev->dev, gc->fwnode);
}
static int acpi_gpio_package_count(const union acpi_object *obj)
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 0ad288ab6262..91dcf2c6cdd8 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -1046,6 +1046,9 @@ void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev)
if (gc->parent)
gdev->dev.of_node = gc->parent->of_node;
+ if (gc->fwnode)
+ gc->of_node = to_of_node(gc->fwnode);
+
/* If the gpiochip has an assigned OF node this takes precedence */
if (gc->of_node)
gdev->dev.of_node = gc->of_node;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index abfbf546d159..3859911b61e9 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -422,8 +422,16 @@ static int devprop_gpiochip_set_names(struct gpio_chip *chip)
if (count > chip->ngpio)
count = chip->ngpio;
- for (i = 0; i < count; i++)
- gdev->descs[i].name = names[chip->offset + i];
+ for (i = 0; i < count; i++) {
+ /*
+ * Allow overriding "fixed" names provided by the GPIO
+ * provider. The "fixed" names are more often than not
+ * generic and less informative than the names given in
+ * device properties.
+ */
+ if (names[chip->offset + i] && names[chip->offset + i][0])
+ gdev->descs[i].name = names[chip->offset + i];
+ }
kfree(names);
@@ -593,12 +601,18 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
struct lock_class_key *lock_key,
struct lock_class_key *request_key)
{
- struct fwnode_handle *fwnode = gc->parent ? dev_fwnode(gc->parent) : NULL;
- unsigned long flags;
- int ret = 0;
- unsigned i;
- int base = gc->base;
+ struct fwnode_handle *fwnode = NULL;
struct gpio_device *gdev;
+ unsigned long flags;
+ int base = gc->base;
+ unsigned int i;
+ int ret = 0;
+ u32 ngpios;
+
+ if (gc->fwnode)
+ fwnode = gc->fwnode;
+ else if (gc->parent)
+ fwnode = dev_fwnode(gc->parent);
/*
* First: allocate and populate the internal stat container, and
@@ -646,6 +660,26 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
goto err_free_dev_name;
}
+ /*
+ * Try the device properties if the driver didn't supply the number
+ * of GPIO lines.
+ */
+ if (gc->ngpio == 0) {
+ ret = device_property_read_u32(&gdev->dev, "ngpios", &ngpios);
+ if (ret == -ENODATA)
+ /*
+ * -ENODATA means that there is no property found and
+ * we want to issue the error message to the user.
+ * Besides that, we want to return different error code
+ * to state that supplied value is not valid.
+ */
+ ngpios = 0;
+ else if (ret)
+ goto err_free_descs;
+
+ gc->ngpio = ngpios;
+ }
+
if (gc->ngpio == 0) {
chip_err(gc, "tried to insert a GPIO chip with zero lines\n");
ret = -EINVAL;
@@ -708,10 +742,12 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
INIT_LIST_HEAD(&gdev->pin_ranges);
#endif
- if (gc->names)
+ if (gc->names) {
ret = gpiochip_set_desc_names(gc);
- else
- ret = devprop_gpiochip_set_names(gc);
+ if (ret)
+ goto err_remove_from_list;
+ }
+ ret = devprop_gpiochip_set_names(gc);
if (ret)
goto err_remove_from_list;
@@ -3487,11 +3523,7 @@ EXPORT_SYMBOL_GPL(gpiod_set_array_value_cansleep);
*/
void gpiod_add_lookup_table(struct gpiod_lookup_table *table)
{
- mutex_lock(&gpio_lookup_lock);
-
- list_add_tail(&table->list, &gpio_lookup_list);
-
- mutex_unlock(&gpio_lookup_lock);
+ gpiod_add_lookup_tables(&table, 1);
}
EXPORT_SYMBOL_GPL(gpiod_add_lookup_table);
@@ -3540,6 +3572,17 @@ void gpiod_add_hogs(struct gpiod_hog *hogs)
}
EXPORT_SYMBOL_GPL(gpiod_add_hogs);
+void gpiod_remove_hogs(struct gpiod_hog *hogs)
+{
+ struct gpiod_hog *hog;
+
+ mutex_lock(&gpio_machine_hogs_mutex);
+ for (hog = &hogs[0]; hog->chip_label; hog++)
+ list_del(&hog->list);
+ mutex_unlock(&gpio_machine_hogs_mutex);
+}
+EXPORT_SYMBOL_GPL(gpiod_remove_hogs);
+
static struct gpiod_lookup_table *gpiod_find_lookup_table(struct device *dev)
{
const char *dev_id = dev ? dev_name(dev) : NULL;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 0039df26854b..b1f22e457fd0 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -8,6 +8,7 @@
menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA
+ select DRM_NOMODESET
select DRM_PANEL_ORIENTATION_QUIRKS
select HDMI
select FB_CMDLINE
@@ -211,20 +212,13 @@ config DRM_TTM_HELPER
Helpers for ttm-based gem objects
config DRM_GEM_CMA_HELPER
- bool
+ tristate
depends on DRM
help
Choose this if you need the GEM CMA helper functions
-config DRM_KMS_CMA_HELPER
- bool
- depends on DRM
- select DRM_GEM_CMA_HELPER
- help
- Choose this if you need the KMS CMA helper functions
-
config DRM_GEM_SHMEM_HELPER
- bool
+ tristate
depends on DRM && MMU
help
Choose this if you need the GEM shmem helper functions
@@ -394,6 +388,8 @@ source "drivers/gpu/drm/xlnx/Kconfig"
source "drivers/gpu/drm/gud/Kconfig"
+source "drivers/gpu/drm/sprd/Kconfig"
+
config DRM_HYPERV
tristate "DRM Support for Hyper-V synthetic video device"
depends on DRM && PCI && MMU && HYPERV
@@ -492,6 +488,15 @@ config DRM_EXPORT_FOR_TESTS
config DRM_PANEL_ORIENTATION_QUIRKS
tristate
+# Separate option because nomodeset parameter is global and expected built-in
+config DRM_NOMODESET
+ bool
+ default n
+
config DRM_LIB_RANDOM
bool
default n
+
+config DRM_PRIVACY_SCREEN
+ bool
+ default n
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 0dff40bb863c..301a44dc18e3 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -4,37 +4,44 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
drm-y := drm_aperture.o drm_auth.o drm_cache.o \
- drm_file.o drm_gem.o drm_ioctl.o drm_irq.o \
+ drm_file.o drm_gem.o drm_ioctl.o \
drm_drv.o \
- drm_sysfs.o drm_hashtab.o drm_mm.o \
+ drm_sysfs.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o drm_displayid.o \
- drm_encoder_slave.o \
drm_trace_points.o drm_prime.o \
- drm_rect.o drm_vma_manager.o drm_flip_work.o \
+ drm_vma_manager.o \
drm_modeset_lock.o drm_atomic.o drm_bridge.o \
drm_framebuffer.o drm_connector.o drm_blend.o \
drm_encoder.o drm_mode_object.o drm_property.o \
drm_plane.o drm_color_mgmt.o drm_print.o \
drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \
- drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o \
+ drm_client_modeset.o drm_atomic_uapi.o \
drm_managed.o drm_vblank_work.o
drm-$(CONFIG_DRM_LEGACY) += drm_agpsupport.o drm_bufs.o drm_context.o drm_dma.o \
- drm_legacy_misc.o drm_lock.o drm_memory.o drm_scatter.o \
- drm_vm.o
+ drm_hashtab.o drm_irq.o drm_legacy_misc.o drm_lock.o \
+ drm_memory.o drm_scatter.o drm_vm.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
-drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
-drm-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_gem_shmem_helper.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
drm-$(CONFIG_PCI) += drm_pci.o
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
+drm-$(CONFIG_DRM_PRIVACY_SCREEN) += drm_privacy_screen.o drm_privacy_screen_x86.o
obj-$(CONFIG_DRM_DP_AUX_BUS) += drm_dp_aux_bus.o
+obj-$(CONFIG_DRM_NOMODESET) += drm_nomodeset.o
+
+drm_cma_helper-y := drm_gem_cma_helper.o
+drm_cma_helper-$(CONFIG_DRM_KMS_HELPER) += drm_fb_cma_helper.o
+obj-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_cma_helper.o
+
+drm_shmem_helper-y := drm_gem_shmem_helper.o
+obj-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_shmem_helper.o
+
drm_vram_helper-y := drm_gem_vram_helper.o
obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
@@ -42,18 +49,18 @@ drm_ttm_helper-y := drm_gem_ttm_helper.o
obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
drm_kms_helper-y := drm_bridge_connector.o drm_crtc_helper.o drm_dp_helper.o \
- drm_dsc.o drm_probe_helper.o \
+ drm_dsc.o drm_encoder_slave.o drm_flip_work.o drm_hdcp.o \
+ drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
drm_scdc_helper.o drm_gem_atomic_helper.o \
drm_gem_framebuffer_helper.o \
drm_atomic_state_helper.o drm_damage_helper.o \
- drm_format_helper.o drm_self_refresh_helper.o
+ drm_format_helper.o drm_self_refresh_helper.o drm_rect.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
-drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
drm_kms_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o
@@ -127,3 +134,4 @@ obj-$(CONFIG_DRM_TIDSS) += tidss/
obj-y += xlnx/
obj-y += gud/
obj-$(CONFIG_DRM_HYPERV) += hyperv/
+obj-$(CONFIG_DRM_SPRD) += sprd/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 653726588956..7fedbb725e17 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -45,7 +45,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_atombios.o atombios_crtc.o amdgpu_connectors.o \
atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \
amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \
- amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \
+ amdgpu_gem.o amdgpu_ring.o \
amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \
atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 7d67aec6f4a2..d8b854fcbffa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -458,7 +458,6 @@ struct amdgpu_flip_work {
uint64_t base;
struct drm_pending_vblank_event *event;
struct amdgpu_bo *old_abo;
- struct dma_fence *excl;
unsigned shared_count;
struct dma_fence **shared;
struct dma_fence_cb cb;
@@ -813,6 +812,7 @@ struct amd_powerplay {
#define AMDGPU_RESET_MAGIC_NUM 64
#define AMDGPU_MAX_DF_PERFMONS 4
+#define AMDGPU_PRODUCT_NAME_LEN 64
struct amdgpu_device {
struct device *dev;
struct pci_dev *pdev;
@@ -1084,7 +1084,7 @@ struct amdgpu_device {
/* Chip product information */
char product_number[16];
- char product_name[32];
+ char product_name[AMDGPU_PRODUCT_NAME_LEN];
char serial[20];
atomic_t throttling_logging_enabled;
@@ -1097,7 +1097,9 @@ struct amdgpu_device {
pci_channel_state_t pci_channel_state;
struct amdgpu_reset_control *reset_cntl;
- uint32_t ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
+ uint32_t ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE];
+
+ bool ram_is_direct_mapped;
};
static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
@@ -1318,6 +1320,8 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
+void amdgpu_device_halt(struct amdgpu_device *adev);
+
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
void amdgpu_register_atpx_handler(void);
@@ -1361,8 +1365,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
int amdgpu_enable_vblank_kms(struct drm_crtc *crtc);
void amdgpu_disable_vblank_kms(struct drm_crtc *crtc);
-long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg);
int amdgpu_info_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 7077f21f0021..6ca1db3c243f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -72,7 +72,7 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
if (!kfd_initialized)
return;
- adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev, vf);
+ adev->kfd.dev = kgd2kfd_probe(adev, vf);
if (adev->kfd.dev)
amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
@@ -233,19 +233,16 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
return r;
}
-void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
+void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
if (amdgpu_device_should_recover_gpu(adev))
amdgpu_device_gpu_recover(adev, NULL);
}
-int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr, bool cp_mqd_gfx9)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_bo *bo = NULL;
struct amdgpu_bo_param bp;
int r;
@@ -314,7 +311,7 @@ allocate_mem_reserve_bo_failed:
return r;
}
-void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
+void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj)
{
struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
@@ -325,10 +322,9 @@ void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
amdgpu_bo_unref(&(bo));
}
-int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size,
+int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
void **mem_obj)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_bo *bo = NULL;
struct amdgpu_bo_user *ubo;
struct amdgpu_bo_param bp;
@@ -355,18 +351,16 @@ int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size,
return 0;
}
-void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj)
+void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj)
{
struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj;
amdgpu_bo_unref(&bo);
}
-uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
+uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev,
enum kgd_engine_type type)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
switch (type) {
case KGD_ENGINE_PFP:
return adev->gfx.pfp_fw_version;
@@ -399,11 +393,9 @@ uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
return 0;
}
-void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
+void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
struct kfd_local_mem_info *mem_info)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
memset(mem_info, 0, sizeof(*mem_info));
mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
@@ -428,19 +420,15 @@ void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
mem_info->mem_clk_max = 100;
}
-uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd)
+uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
if (adev->gfx.funcs->get_gpu_clock_counter)
return adev->gfx.funcs->get_gpu_clock_counter(adev);
return 0;
}
-uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
+uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
/* the sclk is in quantas of 10kHz */
if (amdgpu_sriov_vf(adev))
return adev->clock.default_sclk / 100;
@@ -450,9 +438,8 @@ uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
return 100;
}
-void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
+void amdgpu_amdkfd_get_cu_info(struct amdgpu_device *adev, struct kfd_cu_info *cu_info)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
memset(cu_info, 0, sizeof(*cu_info));
@@ -473,13 +460,12 @@ void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
cu_info->lds_size = acu_info.lds_size;
}
-int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
- struct kgd_dev **dma_buf_kgd,
+int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
+ struct amdgpu_device **dmabuf_adev,
uint64_t *bo_size, void *metadata_buffer,
size_t buffer_size, uint32_t *metadata_size,
uint32_t *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct dma_buf *dma_buf;
struct drm_gem_object *obj;
struct amdgpu_bo *bo;
@@ -507,8 +493,8 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
goto out_put;
r = 0;
- if (dma_buf_kgd)
- *dma_buf_kgd = (struct kgd_dev *)adev;
+ if (dmabuf_adev)
+ *dmabuf_adev = adev;
if (bo_size)
*bo_size = amdgpu_bo_size(bo);
if (metadata_buffer)
@@ -528,32 +514,11 @@ out_put:
return r;
}
-uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
+uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
+ struct amdgpu_device *src)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
- struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
-
- return amdgpu_vram_mgr_usage(vram_man);
-}
-
-uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- return adev->gmc.xgmi.hive_id;
-}
-
-uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- return adev->unique_id;
-}
-
-uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src)
-{
- struct amdgpu_device *peer_adev = (struct amdgpu_device *)src;
- struct amdgpu_device *adev = (struct amdgpu_device *)dst;
+ struct amdgpu_device *peer_adev = src;
+ struct amdgpu_device *adev = dst;
int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev);
if (ret < 0) {
@@ -565,16 +530,18 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
return (uint8_t)ret;
}
-int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev *src, bool is_min)
+int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
+ struct amdgpu_device *src,
+ bool is_min)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)dst, *peer_adev;
+ struct amdgpu_device *adev = dst, *peer_adev;
int num_links;
if (adev->asic_type != CHIP_ALDEBARAN)
return 0;
if (src)
- peer_adev = (struct amdgpu_device *)src;
+ peer_adev = src;
/* num links returns 0 for indirect peers since indirect route is unknown. */
num_links = is_min ? 1 : amdgpu_xgmi_get_num_links(adev, peer_adev);
@@ -589,9 +556,8 @@ int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev
return (num_links * 16 * 25000)/BITS_PER_BYTE;
}
-int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min)
+int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)dev;
int num_lanes_shift = (is_min ? ffs(adev->pm.pcie_mlw_mask) :
fls(adev->pm.pcie_mlw_mask)) - 1;
int gen_speed_shift = (is_min ? ffs(adev->pm.pcie_gen_mask &
@@ -647,39 +613,11 @@ int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min)
return (num_lanes_factor * gen_speed_mbits_factor)/BITS_PER_BYTE;
}
-uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- return adev->rmmio_remap.bus_addr;
-}
-
-uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- return adev->gds.gws_size;
-}
-
-uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- return adev->rev_id;
-}
-
-int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- return adev->gmc.noretry;
-}
-
-int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
+int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
+ enum kgd_engine_type engine,
uint32_t vmid, uint64_t gpu_addr,
uint32_t *ib_cmd, uint32_t ib_len)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct amdgpu_ring *ring;
@@ -730,10 +668,8 @@ err:
return ret;
}
-void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
+void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
amdgpu_dpm_switch_power_profile(adev,
PP_SMC_POWER_PROFILE_COMPUTE,
!idle);
@@ -747,10 +683,9 @@ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
return false;
}
-int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid)
+int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev,
+ uint16_t vmid)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
if (adev->family == AMDGPU_FAMILY_AI) {
int i;
@@ -763,10 +698,9 @@ int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid)
return 0;
}
-int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid,
- enum TLB_FLUSH_TYPE flush_type)
+int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
+ uint16_t pasid, enum TLB_FLUSH_TYPE flush_type)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
bool all_hub = false;
if (adev->family == AMDGPU_FAMILY_AI)
@@ -775,21 +709,18 @@ int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid,
return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub);
}
-bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
+bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
return adev->have_atomics_support;
}
-void amdgpu_amdkfd_ras_poison_consumption_handler(struct kgd_dev *kgd)
+void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bool reset)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct ras_err_data err_data = {0, 0, 0, NULL};
/* CPU MCA will handle page retirement if connected_to_cpu is 1 */
if (!adev->gmc.xgmi.connected_to_cpu)
- amdgpu_umc_process_ras_data_cb(adev, &err_data, NULL);
- else
- amdgpu_amdkfd_gpu_reset(kgd);
+ amdgpu_umc_poison_handler(adev, &err_data, reset);
+ else if (reset)
+ amdgpu_amdkfd_gpu_reset(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index a15a4787c7ee..ac841ae8f5cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -144,14 +144,16 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev);
-int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
+int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
+ enum kgd_engine_type engine,
uint32_t vmid, uint64_t gpu_addr,
uint32_t *ib_cmd, uint32_t ib_len);
-void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
-bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd);
-int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid);
-int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid,
- enum TLB_FLUSH_TYPE flush_type);
+void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle);
+bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev);
+int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev,
+ uint16_t vmid);
+int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
+ uint16_t pasid, enum TLB_FLUSH_TYPE flush_type);
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
@@ -159,7 +161,7 @@ int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev);
int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
-void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
+void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev);
int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
int queue_bit);
@@ -198,37 +200,35 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
}
#endif
/* Shared API */
-int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr, bool mqd_gfx9);
-void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
-int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size, void **mem_obj);
-void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj);
+void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj);
+int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
+ void **mem_obj);
+void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj);
int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem);
int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem);
-uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
+uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev,
enum kgd_engine_type type);
-void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
+void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
struct kfd_local_mem_info *mem_info);
-uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd);
+uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev);
-uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
-void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
-int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
- struct kgd_dev **dmabuf_kgd,
+uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev);
+void amdgpu_amdkfd_get_cu_info(struct amdgpu_device *adev,
+ struct kfd_cu_info *cu_info);
+int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
+ struct amdgpu_device **dmabuf_adev,
uint64_t *bo_size, void *metadata_buffer,
size_t buffer_size, uint32_t *metadata_size,
uint32_t *flags);
-uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
-uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
-uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd);
-uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
-uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
-uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd);
-int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd);
-uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
-int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev *src, bool is_min);
-int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min);
+uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
+ struct amdgpu_device *src);
+int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
+ struct amdgpu_device *src,
+ bool is_min);
+int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min);
/* Read user wptr from a specified user address space with page fault
* disabled. The memory must be pinned and mapped to the hardware when
@@ -258,45 +258,55 @@ int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min);
(&((struct amdgpu_fpriv *) \
((struct drm_file *)(drm_priv))->driver_priv)->vm)
-int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
+int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
struct file *filp, u32 pasid,
void **process_info,
struct dma_fence **ef);
-void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv);
+void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
+ void *drm_priv);
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv);
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
- struct kgd_dev *kgd, uint64_t va, uint64_t size,
+ struct amdgpu_device *adev, uint64_t va, uint64_t size,
void *drm_priv, struct kgd_mem **mem,
uint64_t *offset, uint32_t flags);
int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
+ struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
uint64_t *size);
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, bool *table_freed);
+ struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
+ bool *table_freed);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
+ struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_sync_memory(
- struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
-int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
+ struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
+int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev,
struct kgd_mem *mem, void **kptr, uint64_t *size);
-void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_dev *kgd, struct kgd_mem *mem);
+void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct amdgpu_device *adev,
+ struct kgd_mem *mem);
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
struct dma_fence **ef);
-int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
+int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
struct kfd_vm_fault_info *info);
-int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
+int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
struct dma_buf *dmabuf,
uint64_t va, void *drm_priv,
struct kgd_mem **mem, uint64_t *size,
uint64_t *mmap_offset);
-int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
+int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
struct tile_config *config);
-void amdgpu_amdkfd_ras_poison_consumption_handler(struct kgd_dev *kgd);
+void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
+ bool reset);
#if IS_ENABLED(CONFIG_HSA_AMD)
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
+
+/**
+ * @amdgpu_amdkfd_release_notify() - Notify KFD when GEM object is released
+ *
+ * Allows KFD to release its resources associated with the GEM object.
+ */
void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo);
void amdgpu_amdkfd_reserve_system_mem(uint64_t size);
#else
@@ -324,7 +334,7 @@ int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
#if IS_ENABLED(CONFIG_HSA_AMD)
int kgd2kfd_init(void);
void kgd2kfd_exit(void);
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf);
+struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf);
bool kgd2kfd_device_init(struct kfd_dev *kfd,
struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources);
@@ -348,7 +358,7 @@ static inline void kgd2kfd_exit(void)
}
static inline
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf)
+struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
{
return NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 5a7f680bcb3f..abe93b3ff765 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -57,11 +57,6 @@
(*dump)[i++][1] = RREG32(addr); \
} while (0)
-static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
-{
- return (struct amdgpu_device *)kgd;
-}
-
static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
{
return (struct v9_sdma_mqd *)mqd;
@@ -123,10 +118,9 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
return sdma_rlc_reg_offset;
}
-int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+int kgd_arcturus_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
@@ -193,11 +187,10 @@ int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
return 0;
}
-int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd,
+int kgd_arcturus_hqd_sdma_dump(struct amdgpu_device *adev,
uint32_t engine_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
engine_id, queue_id);
uint32_t i = 0, reg;
@@ -225,9 +218,9 @@ int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd,
return 0;
}
-bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+bool kgd_arcturus_hqd_sdma_is_occupied(struct amdgpu_device *adev,
+ void *mqd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
@@ -244,10 +237,9 @@ bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-int kgd_arcturus_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+int kgd_arcturus_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
unsigned int utimeout)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t temp;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h
index ce08131b7b5f..756c1a5679c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h
@@ -20,11 +20,12 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+int kgd_arcturus_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm);
-int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd,
+int kgd_arcturus_hqd_sdma_dump(struct amdgpu_device *adev,
uint32_t engine_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs);
-bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-int kgd_arcturus_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+bool kgd_arcturus_hqd_sdma_is_occupied(struct amdgpu_device *adev,
+ void *mqd);
+int kgd_arcturus_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
unsigned int utimeout);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 960acf68150a..7b7f4b2764c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -39,37 +39,26 @@ enum hqd_dequeue_request_type {
SAVE_WAVES
};
-static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
-{
- return (struct amdgpu_device *)kgd;
-}
-
-static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
mutex_lock(&adev->srbm_mutex);
nv_grbm_select(adev, mec, pipe, queue, vmid);
}
-static void unlock_srbm(struct kgd_dev *kgd)
+static void unlock_srbm(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
nv_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
-static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, queue_id, 0);
+ lock_srbm(adev, mec, pipe, queue_id, 0);
}
static uint64_t get_queue_mask(struct amdgpu_device *adev,
@@ -81,33 +70,29 @@ static uint64_t get_queue_mask(struct amdgpu_device *adev,
return 1ull << bit;
}
-static void release_queue(struct kgd_dev *kgd)
+static void release_queue(struct amdgpu_device *adev)
{
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit,
uint32_t sh_mem_bases)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
- lock_srbm(kgd, 0, 0, 0, vmid);
+ lock_srbm(adev, 0, 0, 0, vmid);
WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
/* APE1 no longer exists on GFX9 */
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
+static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
unsigned int vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
/*
* We have to assume that there is no outstanding mapping.
* The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
@@ -150,22 +135,21 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
* but still works
*/
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t mec;
uint32_t pipe;
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, 0, 0);
+ lock_srbm(adev, mec, pipe, 0, 0);
WREG32_SOC15(GC, 0, mmCPC_INT_CNTL,
CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
- unlock_srbm(kgd);
+ unlock_srbm(adev);
return 0;
}
@@ -218,12 +202,11 @@ static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
return (struct v10_sdma_mqd *)mqd;
}
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm)
+static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr, uint32_t wptr_shift,
+ uint32_t wptr_mask, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_compute_mqd *m;
uint32_t *mqd_hqd;
uint32_t reg, hqd_base, data;
@@ -231,7 +214,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
m = get_mqd(mqd);
pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
mqd_hqd = &m->cp_mqd_base_addr_lo;
@@ -296,16 +279,15 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data);
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
+static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t doorbell_off)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
struct v10_compute_mqd *m;
uint32_t mec, pipe;
@@ -313,7 +295,7 @@ static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
m = get_mqd(mqd);
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
@@ -349,16 +331,15 @@ static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
out_unlock:
spin_unlock(&adev->gfx.kiq.ring_lock);
- release_queue(kgd);
+ release_queue(adev);
return r;
}
-static int kgd_hqd_dump(struct kgd_dev *kgd,
+static int kgd_hqd_dump(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t i = 0, reg;
#define HQD_N_REGS 56
#define DUMP_REG(addr) do { \
@@ -372,13 +353,13 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
if (*dump == NULL)
return -ENOMEM;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
DUMP_REG(reg);
- release_queue(kgd);
+ release_queue(adev);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -386,10 +367,9 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
return 0;
}
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
@@ -456,11 +436,10 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
return 0;
}
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
uint32_t engine_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
engine_id, queue_id);
uint32_t i = 0, reg;
@@ -488,15 +467,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
return 0;
}
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id)
+static bool kgd_hqd_is_occupied(struct amdgpu_device *adev,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t act;
bool retval = false;
uint32_t low, high;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
if (act) {
low = lower_32_bits(queue_address >> 8);
@@ -506,13 +485,12 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI))
retval = true;
}
- release_queue(kgd);
+ release_queue(adev);
return retval;
}
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
@@ -529,12 +507,11 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
enum hqd_dequeue_request_type type;
unsigned long end_jiffies;
uint32_t temp;
@@ -548,7 +525,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
int retry;
#endif
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
if (m->cp_hqd_vmid == 0)
WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
@@ -633,20 +610,19 @@ loop:
break;
if (time_after(jiffies, end_jiffies)) {
pr_err("cp queue preemption time out.\n");
- release_queue(kgd);
+ release_queue(adev);
return -ETIME;
}
usleep_range(500, 1000);
}
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
unsigned int utimeout)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t temp;
@@ -683,11 +659,10 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
return 0;
}
-static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
uint8_t vmid, uint16_t *p_pasid)
{
uint32_t value;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ vmid);
@@ -696,12 +671,12 @@ static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
-static int kgd_address_watch_disable(struct kgd_dev *kgd)
+static int kgd_address_watch_disable(struct amdgpu_device *adev)
{
return 0;
}
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
+static int kgd_address_watch_execute(struct amdgpu_device *adev,
unsigned int watch_point_id,
uint32_t cntl_val,
uint32_t addr_hi,
@@ -710,11 +685,10 @@ static int kgd_address_watch_execute(struct kgd_dev *kgd,
return 0;
}
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
+static int kgd_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t data = 0;
mutex_lock(&adev->grbm_idx_mutex);
@@ -735,18 +709,16 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd,
return 0;
}
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev,
unsigned int watch_point_id,
unsigned int reg_offset)
{
return 0;
}
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base)
+static void set_vm_context_page_table_base(struct amdgpu_device *adev,
+ uint32_t vmid, uint64_t page_table_base)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("trying to set page table base for wrong VMID %u\n",
vmid);
@@ -757,12 +729,10 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
}
-static void program_trap_handler_settings(struct kgd_dev *kgd,
+static void program_trap_handler_settings(struct amdgpu_device *adev,
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
- lock_srbm(kgd, 0, 0, 0, vmid);
+ lock_srbm(adev, 0, 0, 0, vmid);
/*
* Program TBA registers
@@ -781,7 +751,7 @@ static void program_trap_handler_settings(struct kgd_dev *kgd,
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
upper_32_bits(tma_addr >> 8));
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index dac0d751d5af..1f37d3574001 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -38,37 +38,26 @@ enum hqd_dequeue_request_type {
SAVE_WAVES
};
-static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
-{
- return (struct amdgpu_device *)kgd;
-}
-
-static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
mutex_lock(&adev->srbm_mutex);
nv_grbm_select(adev, mec, pipe, queue, vmid);
}
-static void unlock_srbm(struct kgd_dev *kgd)
+static void unlock_srbm(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
nv_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
-static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, queue_id, 0);
+ lock_srbm(adev, mec, pipe, queue_id, 0);
}
static uint64_t get_queue_mask(struct amdgpu_device *adev,
@@ -80,34 +69,30 @@ static uint64_t get_queue_mask(struct amdgpu_device *adev,
return 1ull << bit;
}
-static void release_queue(struct kgd_dev *kgd)
+static void release_queue(struct amdgpu_device *adev)
{
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-static void program_sh_mem_settings_v10_3(struct kgd_dev *kgd, uint32_t vmid,
+static void program_sh_mem_settings_v10_3(struct amdgpu_device *adev, uint32_t vmid,
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit,
uint32_t sh_mem_bases)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
- lock_srbm(kgd, 0, 0, 0, vmid);
+ lock_srbm(adev, 0, 0, 0, vmid);
WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
/* APE1 no longer exists on GFX9 */
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
/* ATC is defeatured on Sienna_Cichlid */
-static int set_pasid_vmid_mapping_v10_3(struct kgd_dev *kgd, unsigned int pasid,
+static int set_pasid_vmid_mapping_v10_3(struct amdgpu_device *adev, unsigned int pasid,
unsigned int vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
uint32_t value = pasid << IH_VMID_0_LUT__PASID__SHIFT;
/* Mapping vmid to pasid also for IH block */
@@ -118,22 +103,21 @@ static int set_pasid_vmid_mapping_v10_3(struct kgd_dev *kgd, unsigned int pasid,
return 0;
}
-static int init_interrupts_v10_3(struct kgd_dev *kgd, uint32_t pipe_id)
+static int init_interrupts_v10_3(struct amdgpu_device *adev, uint32_t pipe_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t mec;
uint32_t pipe;
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, 0, 0);
+ lock_srbm(adev, mec, pipe, 0, 0);
WREG32_SOC15(GC, 0, mmCPC_INT_CNTL,
CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
- unlock_srbm(kgd);
+ unlock_srbm(adev);
return 0;
}
@@ -188,12 +172,11 @@ static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
return (struct v10_sdma_mqd *)mqd;
}
-static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm)
+static int hqd_load_v10_3(struct amdgpu_device *adev, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr, uint32_t wptr_shift,
+ uint32_t wptr_mask, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_compute_mqd *m;
uint32_t *mqd_hqd;
uint32_t reg, hqd_base, data;
@@ -201,7 +184,7 @@ static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
m = get_mqd(mqd);
pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
/* HIQ is set during driver init period with vmid set to 0*/
if (m->cp_hqd_vmid == 0) {
@@ -281,16 +264,15 @@ static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data);
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-static int hiq_mqd_load_v10_3(struct kgd_dev *kgd, void *mqd,
+static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t doorbell_off)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
struct v10_compute_mqd *m;
uint32_t mec, pipe;
@@ -298,7 +280,7 @@ static int hiq_mqd_load_v10_3(struct kgd_dev *kgd, void *mqd,
m = get_mqd(mqd);
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
@@ -334,16 +316,15 @@ static int hiq_mqd_load_v10_3(struct kgd_dev *kgd, void *mqd,
out_unlock:
spin_unlock(&adev->gfx.kiq.ring_lock);
- release_queue(kgd);
+ release_queue(adev);
return r;
}
-static int hqd_dump_v10_3(struct kgd_dev *kgd,
+static int hqd_dump_v10_3(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t i = 0, reg;
#define HQD_N_REGS 56
#define DUMP_REG(addr) do { \
@@ -357,13 +338,13 @@ static int hqd_dump_v10_3(struct kgd_dev *kgd,
if (*dump == NULL)
return -ENOMEM;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
DUMP_REG(reg);
- release_queue(kgd);
+ release_queue(adev);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -371,10 +352,9 @@ static int hqd_dump_v10_3(struct kgd_dev *kgd,
return 0;
}
-static int hqd_sdma_load_v10_3(struct kgd_dev *kgd, void *mqd,
+static int hqd_sdma_load_v10_3(struct amdgpu_device *adev, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
@@ -441,11 +421,10 @@ static int hqd_sdma_load_v10_3(struct kgd_dev *kgd, void *mqd,
return 0;
}
-static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd,
+static int hqd_sdma_dump_v10_3(struct amdgpu_device *adev,
uint32_t engine_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
engine_id, queue_id);
uint32_t i = 0, reg;
@@ -473,15 +452,15 @@ static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd,
return 0;
}
-static bool hqd_is_occupied_v10_3(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id)
+static bool hqd_is_occupied_v10_3(struct amdgpu_device *adev,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t act;
bool retval = false;
uint32_t low, high;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
if (act) {
low = lower_32_bits(queue_address >> 8);
@@ -491,13 +470,13 @@ static bool hqd_is_occupied_v10_3(struct kgd_dev *kgd, uint64_t queue_address,
high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI))
retval = true;
}
- release_queue(kgd);
+ release_queue(adev);
return retval;
}
-static bool hqd_sdma_is_occupied_v10_3(struct kgd_dev *kgd, void *mqd)
+static bool hqd_sdma_is_occupied_v10_3(struct amdgpu_device *adev,
+ void *mqd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
@@ -514,18 +493,17 @@ static bool hqd_sdma_is_occupied_v10_3(struct kgd_dev *kgd, void *mqd)
return false;
}
-static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd,
+static int hqd_destroy_v10_3(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
enum hqd_dequeue_request_type type;
unsigned long end_jiffies;
uint32_t temp;
struct v10_compute_mqd *m = get_mqd(mqd);
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
if (m->cp_hqd_vmid == 0)
WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
@@ -555,20 +533,19 @@ static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd,
if (time_after(jiffies, end_jiffies)) {
pr_err("cp queue pipe %d queue %d preemption failed\n",
pipe_id, queue_id);
- release_queue(kgd);
+ release_queue(adev);
return -ETIME;
}
usleep_range(500, 1000);
}
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-static int hqd_sdma_destroy_v10_3(struct kgd_dev *kgd, void *mqd,
+static int hqd_sdma_destroy_v10_3(struct amdgpu_device *adev, void *mqd,
unsigned int utimeout)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t temp;
@@ -606,12 +583,12 @@ static int hqd_sdma_destroy_v10_3(struct kgd_dev *kgd, void *mqd,
}
-static int address_watch_disable_v10_3(struct kgd_dev *kgd)
+static int address_watch_disable_v10_3(struct amdgpu_device *adev)
{
return 0;
}
-static int address_watch_execute_v10_3(struct kgd_dev *kgd,
+static int address_watch_execute_v10_3(struct amdgpu_device *adev,
unsigned int watch_point_id,
uint32_t cntl_val,
uint32_t addr_hi,
@@ -620,11 +597,10 @@ static int address_watch_execute_v10_3(struct kgd_dev *kgd,
return 0;
}
-static int wave_control_execute_v10_3(struct kgd_dev *kgd,
+static int wave_control_execute_v10_3(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t data = 0;
mutex_lock(&adev->grbm_idx_mutex);
@@ -645,28 +621,24 @@ static int wave_control_execute_v10_3(struct kgd_dev *kgd,
return 0;
}
-static uint32_t address_watch_get_offset_v10_3(struct kgd_dev *kgd,
+static uint32_t address_watch_get_offset_v10_3(struct amdgpu_device *adev,
unsigned int watch_point_id,
unsigned int reg_offset)
{
return 0;
}
-static void set_vm_context_page_table_base_v10_3(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base)
+static void set_vm_context_page_table_base_v10_3(struct amdgpu_device *adev,
+ uint32_t vmid, uint64_t page_table_base)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
/* SDMA is on gfxhub as well for Navi1* series */
adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
}
-static void program_trap_handler_settings_v10_3(struct kgd_dev *kgd,
+static void program_trap_handler_settings_v10_3(struct amdgpu_device *adev,
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
- lock_srbm(kgd, 0, 0, 0, vmid);
+ lock_srbm(adev, 0, 0, 0, vmid);
/*
* Program TBA registers
@@ -685,15 +657,14 @@ static void program_trap_handler_settings_v10_3(struct kgd_dev *kgd,
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
upper_32_bits(tma_addr >> 8));
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
#if 0
-uint32_t enable_debug_trap_v10_3(struct kgd_dev *kgd,
+uint32_t enable_debug_trap_v10_3(struct amdgpu_device *adev,
uint32_t trap_debug_wave_launch_mode,
uint32_t vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t data = 0;
uint32_t orig_wave_cntl_value;
uint32_t orig_stall_vmid;
@@ -720,10 +691,8 @@ uint32_t enable_debug_trap_v10_3(struct kgd_dev *kgd,
return 0;
}
-uint32_t disable_debug_trap_v10_3(struct kgd_dev *kgd)
+uint32_t disable_debug_trap_v10_3(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
mutex_lock(&adev->grbm_idx_mutex);
WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
@@ -733,11 +702,10 @@ uint32_t disable_debug_trap_v10_3(struct kgd_dev *kgd)
return 0;
}
-uint32_t set_wave_launch_trap_override_v10_3(struct kgd_dev *kgd,
+uint32_t set_wave_launch_trap_override_v10_3(struct amdgpu_device *adev,
uint32_t trap_override,
uint32_t trap_mask)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t data = 0;
mutex_lock(&adev->grbm_idx_mutex);
@@ -762,11 +730,10 @@ uint32_t set_wave_launch_trap_override_v10_3(struct kgd_dev *kgd,
return 0;
}
-uint32_t set_wave_launch_mode_v10_3(struct kgd_dev *kgd,
+uint32_t set_wave_launch_mode_v10_3(struct amdgpu_device *adev,
uint8_t wave_launch_mode,
uint32_t vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t data = 0;
bool is_stall_mode;
bool is_mode_set;
@@ -805,16 +772,14 @@ uint32_t set_wave_launch_mode_v10_3(struct kgd_dev *kgd,
* sem_rearm_wait_time -- Wait Count for Semaphore re-arm.
* deq_retry_wait_time -- Wait Count for Global Wave Syncs.
*/
-void get_iq_wait_times_v10_3(struct kgd_dev *kgd,
+void get_iq_wait_times_v10_3(struct amdgpu_device *adev,
uint32_t *wait_times)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
*wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2));
}
-void build_grace_period_packet_info_v10_3(struct kgd_dev *kgd,
+void build_grace_period_packet_info_v10_3(struct amdgpu_device *adev,
uint32_t wait_times,
uint32_t grace_period,
uint32_t *reg_offset,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index b91d27e39bad..36528dad7684 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -82,68 +82,54 @@ union TCP_WATCH_CNTL_BITS {
float f32All;
};
-static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
-{
- return (struct amdgpu_device *)kgd;
-}
-
-static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
mutex_lock(&adev->srbm_mutex);
WREG32(mmSRBM_GFX_CNTL, value);
}
-static void unlock_srbm(struct kgd_dev *kgd)
+static void unlock_srbm(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
WREG32(mmSRBM_GFX_CNTL, 0);
mutex_unlock(&adev->srbm_mutex);
}
-static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, queue_id, 0);
+ lock_srbm(adev, mec, pipe, queue_id, 0);
}
-static void release_queue(struct kgd_dev *kgd)
+static void release_queue(struct amdgpu_device *adev)
{
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit,
uint32_t sh_mem_bases)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
- lock_srbm(kgd, 0, 0, 0, vmid);
+ lock_srbm(adev, 0, 0, 0, vmid);
WREG32(mmSH_MEM_CONFIG, sh_mem_config);
WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
WREG32(mmSH_MEM_BASES, sh_mem_bases);
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
+static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
unsigned int vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
/*
* We have to assume that there is no outstanding mapping.
* The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
@@ -165,21 +151,20 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
return 0;
}
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t mec;
uint32_t pipe;
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, 0, 0);
+ lock_srbm(adev, mec, pipe, 0, 0);
WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
- unlock_srbm(kgd);
+ unlock_srbm(adev);
return 0;
}
@@ -207,12 +192,11 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
return (struct cik_sdma_rlc_registers *)mqd;
}
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm)
+static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr, uint32_t wptr_shift,
+ uint32_t wptr_mask, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_mqd *m;
uint32_t *mqd_hqd;
uint32_t reg, wptr_val, data;
@@ -220,7 +204,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
m = get_mqd(mqd);
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
/* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */
mqd_hqd = &m->cp_mqd_base_addr_lo;
@@ -239,25 +223,24 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
* release srbm_mutex to avoid circular dependency between
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
*/
- release_queue(kgd);
+ release_queue(adev);
valid_wptr = read_user_wptr(mm, wptr, wptr_val);
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
if (valid_wptr)
WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
WREG32(mmCP_HQD_ACTIVE, data);
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-static int kgd_hqd_dump(struct kgd_dev *kgd,
+static int kgd_hqd_dump(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t i = 0, reg;
#define HQD_N_REGS (35+4)
#define DUMP_REG(addr) do { \
@@ -271,7 +254,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
if (*dump == NULL)
return -ENOMEM;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
@@ -281,7 +264,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
DUMP_REG(reg);
- release_queue(kgd);
+ release_queue(adev);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -289,10 +272,9 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
return 0;
}
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
unsigned long end_jiffies;
uint32_t sdma_rlc_reg_offset;
@@ -345,11 +327,10 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
return 0;
}
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
uint32_t engine_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
uint32_t i = 0, reg;
@@ -372,15 +353,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
return 0;
}
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id)
+static bool kgd_hqd_is_occupied(struct amdgpu_device *adev,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t act;
bool retval = false;
uint32_t low, high;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
act = RREG32(mmCP_HQD_ACTIVE);
if (act) {
low = lower_32_bits(queue_address >> 8);
@@ -390,13 +371,12 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
high == RREG32(mmCP_HQD_PQ_BASE_HI))
retval = true;
}
- release_queue(kgd);
+ release_queue(adev);
return retval;
}
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
@@ -412,12 +392,11 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t temp;
enum hqd_dequeue_request_type type;
unsigned long flags, end_jiffies;
@@ -426,7 +405,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
if (amdgpu_in_reset(adev))
return -EIO;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
switch (reset_type) {
@@ -504,20 +483,19 @@ loop:
break;
if (time_after(jiffies, end_jiffies)) {
pr_err("cp queue preemption time out\n");
- release_queue(kgd);
+ release_queue(adev);
return -ETIME;
}
usleep_range(500, 1000);
}
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
unsigned int utimeout)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
uint32_t sdma_rlc_reg_offset;
uint32_t temp;
@@ -551,9 +529,8 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
return 0;
}
-static int kgd_address_watch_disable(struct kgd_dev *kgd)
+static int kgd_address_watch_disable(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
union TCP_WATCH_CNTL_BITS cntl;
unsigned int i;
@@ -571,13 +548,12 @@ static int kgd_address_watch_disable(struct kgd_dev *kgd)
return 0;
}
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
+static int kgd_address_watch_execute(struct amdgpu_device *adev,
unsigned int watch_point_id,
uint32_t cntl_val,
uint32_t addr_hi,
uint32_t addr_lo)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
union TCP_WATCH_CNTL_BITS cntl;
cntl.u32All = cntl_val;
@@ -602,11 +578,10 @@ static int kgd_address_watch_execute(struct kgd_dev *kgd,
return 0;
}
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
+static int kgd_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t data;
mutex_lock(&adev->grbm_idx_mutex);
@@ -627,18 +602,17 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd,
return 0;
}
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev,
unsigned int watch_point_id,
unsigned int reg_offset)
{
return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
}
-static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
uint8_t vmid, uint16_t *p_pasid)
{
uint32_t value;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
@@ -646,21 +620,17 @@ static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
-static void set_scratch_backing_va(struct kgd_dev *kgd,
+static void set_scratch_backing_va(struct amdgpu_device *adev,
uint64_t va, uint32_t vmid)
{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- lock_srbm(kgd, 0, 0, 0, vmid);
+ lock_srbm(adev, 0, 0, 0, vmid);
WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base)
+static void set_vm_context_page_table_base(struct amdgpu_device *adev,
+ uint32_t vmid, uint64_t page_table_base)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("trying to set page table base for wrong VMID\n");
return;
@@ -676,10 +646,8 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
* @vmid: vmid pointer
* read vmid from register (CIK).
*/
-static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd)
+static uint32_t read_vmid_from_vmfault_reg(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
uint32_t status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 5ce0ce704a21..52832cd69a93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -39,68 +39,54 @@ enum hqd_dequeue_request_type {
RESET_WAVES
};
-static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
-{
- return (struct amdgpu_device *)kgd;
-}
-
-static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
mutex_lock(&adev->srbm_mutex);
WREG32(mmSRBM_GFX_CNTL, value);
}
-static void unlock_srbm(struct kgd_dev *kgd)
+static void unlock_srbm(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
WREG32(mmSRBM_GFX_CNTL, 0);
mutex_unlock(&adev->srbm_mutex);
}
-static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, queue_id, 0);
+ lock_srbm(adev, mec, pipe, queue_id, 0);
}
-static void release_queue(struct kgd_dev *kgd)
+static void release_queue(struct amdgpu_device *adev)
{
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit,
uint32_t sh_mem_bases)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
- lock_srbm(kgd, 0, 0, 0, vmid);
+ lock_srbm(adev, 0, 0, 0, vmid);
WREG32(mmSH_MEM_CONFIG, sh_mem_config);
WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
WREG32(mmSH_MEM_BASES, sh_mem_bases);
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
+static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
unsigned int vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
/*
* We have to assume that there is no outstanding mapping.
* The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
@@ -123,21 +109,20 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
return 0;
}
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t mec;
uint32_t pipe;
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, 0, 0);
+ lock_srbm(adev, mec, pipe, 0, 0);
WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
- unlock_srbm(kgd);
+ unlock_srbm(adev);
return 0;
}
@@ -165,12 +150,11 @@ static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
return (struct vi_sdma_mqd *)mqd;
}
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm)
+static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr, uint32_t wptr_shift,
+ uint32_t wptr_mask, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_mqd *m;
uint32_t *mqd_hqd;
uint32_t reg, wptr_val, data;
@@ -178,7 +162,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
m = get_mqd(mqd);
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
/* HIQ is set during driver init period with vmid set to 0*/
if (m->cp_hqd_vmid == 0) {
@@ -206,7 +190,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
* on ASICs that do not support context-save.
* EOP writes/reads can start anywhere in the ring.
*/
- if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) {
+ if (adev->asic_type != CHIP_TONGA) {
WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
@@ -226,25 +210,24 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
* release srbm_mutex to avoid circular dependency between
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
*/
- release_queue(kgd);
+ release_queue(adev);
valid_wptr = read_user_wptr(mm, wptr, wptr_val);
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
if (valid_wptr)
WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
WREG32(mmCP_HQD_ACTIVE, data);
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-static int kgd_hqd_dump(struct kgd_dev *kgd,
+static int kgd_hqd_dump(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t i = 0, reg;
#define HQD_N_REGS (54+4)
#define DUMP_REG(addr) do { \
@@ -258,7 +241,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
if (*dump == NULL)
return -ENOMEM;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
@@ -268,7 +251,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_DONES; reg++)
DUMP_REG(reg);
- release_queue(kgd);
+ release_queue(adev);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -276,10 +259,9 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
return 0;
}
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_sdma_mqd *m;
unsigned long end_jiffies;
uint32_t sdma_rlc_reg_offset;
@@ -331,11 +313,10 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
return 0;
}
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
uint32_t engine_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
uint32_t i = 0, reg;
@@ -367,15 +348,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
return 0;
}
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id)
+static bool kgd_hqd_is_occupied(struct amdgpu_device *adev,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t act;
bool retval = false;
uint32_t low, high;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
act = RREG32(mmCP_HQD_ACTIVE);
if (act) {
low = lower_32_bits(queue_address >> 8);
@@ -385,13 +366,12 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
high == RREG32(mmCP_HQD_PQ_BASE_HI))
retval = true;
}
- release_queue(kgd);
+ release_queue(adev);
return retval;
}
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
@@ -407,12 +387,11 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t temp;
enum hqd_dequeue_request_type type;
unsigned long flags, end_jiffies;
@@ -422,7 +401,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
if (amdgpu_in_reset(adev))
return -EIO;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
if (m->cp_hqd_vmid == 0)
WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0);
@@ -502,20 +481,19 @@ loop:
break;
if (time_after(jiffies, end_jiffies)) {
pr_err("cp queue preemption time out.\n");
- release_queue(kgd);
+ release_queue(adev);
return -ETIME;
}
usleep_range(500, 1000);
}
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
unsigned int utimeout)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t temp;
@@ -549,11 +527,10 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
return 0;
}
-static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
uint8_t vmid, uint16_t *p_pasid)
{
uint32_t value;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
@@ -561,12 +538,12 @@ static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
-static int kgd_address_watch_disable(struct kgd_dev *kgd)
+static int kgd_address_watch_disable(struct amdgpu_device *adev)
{
return 0;
}
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
+static int kgd_address_watch_execute(struct amdgpu_device *adev,
unsigned int watch_point_id,
uint32_t cntl_val,
uint32_t addr_hi,
@@ -575,11 +552,10 @@ static int kgd_address_watch_execute(struct kgd_dev *kgd,
return 0;
}
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
+static int kgd_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t data = 0;
mutex_lock(&adev->grbm_idx_mutex);
@@ -600,28 +576,24 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd,
return 0;
}
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev,
unsigned int watch_point_id,
unsigned int reg_offset)
{
return 0;
}
-static void set_scratch_backing_va(struct kgd_dev *kgd,
+static void set_scratch_backing_va(struct amdgpu_device *adev,
uint64_t va, uint32_t vmid)
{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- lock_srbm(kgd, 0, 0, 0, vmid);
+ lock_srbm(adev, 0, 0, 0, vmid);
WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base)
+static void set_vm_context_page_table_base(struct amdgpu_device *adev,
+ uint32_t vmid, uint64_t page_table_base)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("trying to set page table base for wrong VMID\n");
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index bcc1cbeb8799..1abf662a0e91 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -46,37 +46,26 @@ enum hqd_dequeue_request_type {
SAVE_WAVES
};
-static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
-{
- return (struct amdgpu_device *)kgd;
-}
-
-static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, mec, pipe, queue, vmid);
}
-static void unlock_srbm(struct kgd_dev *kgd)
+static void unlock_srbm(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
soc15_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
-static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, queue_id, 0);
+ lock_srbm(adev, mec, pipe, queue_id, 0);
}
static uint64_t get_queue_mask(struct amdgpu_device *adev,
@@ -88,33 +77,29 @@ static uint64_t get_queue_mask(struct amdgpu_device *adev,
return 1ull << bit;
}
-static void release_queue(struct kgd_dev *kgd)
+static void release_queue(struct amdgpu_device *adev)
{
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit,
uint32_t sh_mem_bases)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
- lock_srbm(kgd, 0, 0, 0, vmid);
+ lock_srbm(adev, 0, 0, 0, vmid);
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
/* APE1 no longer exists on GFX9 */
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
+int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
unsigned int vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
/*
* We have to assume that there is no outstanding mapping.
* The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
@@ -171,22 +156,21 @@ int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
* but still works
*/
-int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t mec;
uint32_t pipe;
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, 0, 0);
+ lock_srbm(adev, mec, pipe, 0, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL),
+ WREG32_SOC15(GC, 0, mmCPC_INT_CNTL,
CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
- unlock_srbm(kgd);
+ unlock_srbm(adev);
return 0;
}
@@ -233,19 +217,18 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
return (struct v9_sdma_mqd *)mqd;
}
-int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm)
+int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr, uint32_t wptr_shift,
+ uint32_t wptr_mask, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_mqd *m;
uint32_t *mqd_hqd;
uint32_t reg, hqd_base, data;
m = get_mqd(mqd);
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
mqd_hqd = &m->cp_mqd_base_addr_lo;
@@ -296,7 +279,7 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
lower_32_bits((uintptr_t)wptr));
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
upper_32_bits((uintptr_t)wptr));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
+ WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1,
(uint32_t)get_queue_mask(adev, pipe_id, queue_id));
}
@@ -308,16 +291,15 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
+int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t doorbell_off)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
struct v9_mqd *m;
uint32_t mec, pipe;
@@ -325,7 +307,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
m = get_mqd(mqd);
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
@@ -361,16 +343,15 @@ int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
out_unlock:
spin_unlock(&adev->gfx.kiq.ring_lock);
- release_queue(kgd);
+ release_queue(adev);
return r;
}
-int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd,
+int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t i = 0, reg;
#define HQD_N_REGS 56
#define DUMP_REG(addr) do { \
@@ -384,13 +365,13 @@ int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd,
if (*dump == NULL)
return -ENOMEM;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
DUMP_REG(reg);
- release_queue(kgd);
+ release_queue(adev);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -398,10 +379,9 @@ int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd,
return 0;
}
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
@@ -468,11 +448,10 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
return 0;
}
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
uint32_t engine_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
engine_id, queue_id);
uint32_t i = 0, reg;
@@ -500,31 +479,30 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
return 0;
}
-bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id)
+bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t act;
bool retval = false;
uint32_t low, high;
- acquire_queue(kgd, pipe_id, queue_id);
- act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
+ acquire_queue(adev, pipe_id, queue_id);
+ act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
if (act) {
low = lower_32_bits(queue_address >> 8);
high = upper_32_bits(queue_address >> 8);
- if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) &&
- high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI)))
+ if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) &&
+ high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI))
retval = true;
}
- release_queue(kgd);
+ release_queue(adev);
return retval;
}
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
@@ -541,12 +519,11 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
enum hqd_dequeue_request_type type;
unsigned long end_jiffies;
uint32_t temp;
@@ -555,7 +532,7 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
if (amdgpu_in_reset(adev))
return -EIO;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
if (m->cp_hqd_vmid == 0)
WREG32_FIELD15_RLC(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
@@ -579,25 +556,24 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
end_jiffies = (utimeout * HZ / 1000) + jiffies;
while (true) {
- temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
+ temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
if (time_after(jiffies, end_jiffies)) {
pr_err("cp queue preemption time out.\n");
- release_queue(kgd);
+ release_queue(adev);
return -ETIME;
}
usleep_range(500, 1000);
}
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
unsigned int utimeout)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t temp;
@@ -634,11 +610,10 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
return 0;
}
-bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
uint8_t vmid, uint16_t *p_pasid)
{
uint32_t value;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ vmid);
@@ -647,12 +622,12 @@ bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
-int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd)
+int kgd_gfx_v9_address_watch_disable(struct amdgpu_device *adev)
{
return 0;
}
-int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd,
+int kgd_gfx_v9_address_watch_execute(struct amdgpu_device *adev,
unsigned int watch_point_id,
uint32_t cntl_val,
uint32_t addr_hi,
@@ -661,17 +636,16 @@ int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd,
return 0;
}
-int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd,
+int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t data = 0;
mutex_lock(&adev->grbm_idx_mutex);
WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
+ WREG32_SOC15(GC, 0, mmSQ_CMD, sq_cmd);
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
INSTANCE_BROADCAST_WRITES, 1);
@@ -686,18 +660,16 @@ int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd,
return 0;
}
-uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
+uint32_t kgd_gfx_v9_address_watch_get_offset(struct amdgpu_device *adev,
unsigned int watch_point_id,
unsigned int reg_offset)
{
return 0;
}
-void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
+void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev,
uint32_t vmid, uint64_t page_table_base)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("trying to set page table base for wrong VMID %u\n",
vmid);
@@ -750,7 +722,7 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe;
queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe;
soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0);
- reg_val = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_CSQ_WF_ACTIVE_COUNT_0) +
+ reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, 0, mmSPI_CSQ_WF_ACTIVE_COUNT_0) +
queue_slot);
*wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK;
if (*wave_cnt != 0)
@@ -804,7 +776,7 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
*
* Reading registers referenced above involves programming GRBM appropriately
*/
-void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
+void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
int *pasid_wave_cnt, int *max_waves_per_cu)
{
int qidx;
@@ -818,10 +790,8 @@ void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
int pasid_tmp;
int max_queue_cnt;
int vmid_wave_cnt = 0;
- struct amdgpu_device *adev;
DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES);
- adev = get_amdgpu_device(kgd);
lock_spi_csq_mutexes(adev);
soc15_grbm_select(adev, 1, 0, 0, 0);
@@ -839,8 +809,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) {
gfx_v9_0_select_se_sh(adev, se_idx, sh_idx, 0xffffffff);
- queue_map = RREG32(SOC15_REG_OFFSET(GC, 0,
- mmSPI_CSQ_WF_ACTIVE_STATUS));
+ queue_map = RREG32_SOC15(GC, 0, mmSPI_CSQ_WF_ACTIVE_STATUS);
/*
* Assumption: queue map encodes following schema: four
@@ -882,30 +851,28 @@ void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
adev->gfx.cu_info.max_waves_per_simd;
}
-void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd,
+void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
- lock_srbm(kgd, 0, 0, 0, vmid);
+ lock_srbm(adev, 0, 0, 0, vmid);
/*
* Program TBA registers
*/
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
+ WREG32_SOC15(GC, 0, mmSQ_SHADER_TBA_LO,
lower_32_bits(tba_addr >> 8));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
+ WREG32_SOC15(GC, 0, mmSQ_SHADER_TBA_HI,
upper_32_bits(tba_addr >> 8));
/*
* Program TMA registers
*/
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
+ WREG32_SOC15(GC, 0, mmSQ_SHADER_TMA_LO,
lower_32_bits(tma_addr >> 8));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
+ WREG32_SOC15(GC, 0, mmSQ_SHADER_TMA_HI,
upper_32_bits(tma_addr >> 8));
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index c63591106879..24be49df26fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -22,48 +22,49 @@
-void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
uint32_t sh_mem_bases);
-int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
+int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
unsigned int vmid);
-int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
-int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id);
+int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr,
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm);
-int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
+int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t doorbell_off);
-int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd,
+int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs);
-bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id);
+int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id);
-int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd);
-int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd,
+int kgd_gfx_v9_address_watch_disable(struct amdgpu_device *adev);
+int kgd_gfx_v9_address_watch_execute(struct amdgpu_device *adev,
unsigned int watch_point_id,
uint32_t cntl_val,
uint32_t addr_hi,
uint32_t addr_lo);
-int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd,
+int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd);
-uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
+uint32_t kgd_gfx_v9_address_watch_get_offset(struct amdgpu_device *adev,
unsigned int watch_point_id,
unsigned int reg_offset);
-bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
uint8_t vmid, uint16_t *p_pasid);
-void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
+void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev,
uint32_t vmid, uint64_t page_table_base);
-void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
+void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
int *pasid_wave_cnt, int *max_waves_per_cu);
-void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd,
+void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 6348559608ce..f9bab963a948 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -60,12 +60,6 @@ static const char * const domain_bit_to_string[] = {
static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
-
-static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
-{
- return (struct amdgpu_device *)kgd;
-}
-
static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
struct kgd_mem *mem)
{
@@ -126,8 +120,19 @@ static size_t amdgpu_amdkfd_acc_size(uint64_t size)
PAGE_ALIGN(size);
}
+/**
+ * @amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size
+ * of buffer including any reserved for control structures
+ *
+ * @adev: Device to which allocated BO belongs to
+ * @size: Size of buffer, in bytes, encapsulated by B0. This should be
+ * equivalent to amdgpu_bo_size(BO)
+ * @alloc_flag: Flag used in allocating a BO as noted above
+ *
+ * Return: returns -ENOMEM in case of error, ZERO otherwise
+ */
static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
- uint64_t size, u32 domain, bool sg)
+ uint64_t size, u32 alloc_flag)
{
uint64_t reserved_for_pt =
ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
@@ -137,20 +142,24 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
acc_size = amdgpu_amdkfd_acc_size(size);
vram_needed = 0;
- if (domain == AMDGPU_GEM_DOMAIN_GTT) {
- /* TTM GTT memory */
+ if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
system_mem_needed = acc_size + size;
ttm_mem_needed = acc_size + size;
- } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
- /* Userptr */
+ } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
+ system_mem_needed = acc_size;
+ ttm_mem_needed = acc_size;
+ vram_needed = size;
+ } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
system_mem_needed = acc_size + size;
ttm_mem_needed = acc_size;
- } else {
- /* VRAM and SG */
+ } else if (alloc_flag &
+ (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
+ KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
system_mem_needed = acc_size;
ttm_mem_needed = acc_size;
- if (domain == AMDGPU_GEM_DOMAIN_VRAM)
- vram_needed = size;
+ } else {
+ pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
+ return -ENOMEM;
}
spin_lock(&kfd_mem_limit.mem_limit_lock);
@@ -166,64 +175,72 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
(adev->kfd.vram_used + vram_needed >
adev->gmc.real_vram_size - reserved_for_pt)) {
ret = -ENOMEM;
- } else {
- kfd_mem_limit.system_mem_used += system_mem_needed;
- kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
- adev->kfd.vram_used += vram_needed;
+ goto release;
}
+ /* Update memory accounting by decreasing available system
+ * memory, TTM memory and GPU memory as computed above
+ */
+ adev->kfd.vram_used += vram_needed;
+ kfd_mem_limit.system_mem_used += system_mem_needed;
+ kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
+
+release:
spin_unlock(&kfd_mem_limit.mem_limit_lock);
return ret;
}
static void unreserve_mem_limit(struct amdgpu_device *adev,
- uint64_t size, u32 domain, bool sg)
+ uint64_t size, u32 alloc_flag)
{
size_t acc_size;
acc_size = amdgpu_amdkfd_acc_size(size);
spin_lock(&kfd_mem_limit.mem_limit_lock);
- if (domain == AMDGPU_GEM_DOMAIN_GTT) {
+
+ if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
kfd_mem_limit.system_mem_used -= (acc_size + size);
kfd_mem_limit.ttm_mem_used -= (acc_size + size);
- } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
+ } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
+ kfd_mem_limit.system_mem_used -= acc_size;
+ kfd_mem_limit.ttm_mem_used -= acc_size;
+ adev->kfd.vram_used -= size;
+ } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
kfd_mem_limit.system_mem_used -= (acc_size + size);
kfd_mem_limit.ttm_mem_used -= acc_size;
- } else {
+ } else if (alloc_flag &
+ (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
+ KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
kfd_mem_limit.system_mem_used -= acc_size;
kfd_mem_limit.ttm_mem_used -= acc_size;
- if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
- adev->kfd.vram_used -= size;
- WARN_ONCE(adev->kfd.vram_used < 0,
- "kfd VRAM memory accounting unbalanced");
- }
+ } else {
+ pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
+ goto release;
}
- WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
- "kfd system memory accounting unbalanced");
+
+ WARN_ONCE(adev->kfd.vram_used < 0,
+ "KFD VRAM memory accounting unbalanced");
WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
- "kfd TTM memory accounting unbalanced");
+ "KFD TTM memory accounting unbalanced");
+ WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
+ "KFD system memory accounting unbalanced");
+release:
spin_unlock(&kfd_mem_limit.mem_limit_lock);
}
void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- u32 domain = bo->preferred_domains;
- bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
+ u32 alloc_flags = bo->kfd_bo->alloc_flags;
+ u64 size = amdgpu_bo_size(bo);
- if (bo->flags & AMDGPU_AMDKFD_CREATE_USERPTR_BO) {
- domain = AMDGPU_GEM_DOMAIN_CPU;
- sg = false;
- }
-
- unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
+ unreserve_mem_limit(adev, size, alloc_flags);
kfree(bo->kfd_bo);
}
-
/* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
* reservation object.
*
@@ -691,10 +708,12 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
va + bo_size, vm);
- if (adev == bo_adev || (mem->domain == AMDGPU_GEM_DOMAIN_VRAM &&
- amdgpu_xgmi_same_hive(adev, bo_adev))) {
- /* Mappings on the local GPU and VRAM mappings in the
- * local hive share the original BO
+ if (adev == bo_adev ||
+ (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && adev->ram_is_direct_mapped) ||
+ (mem->domain == AMDGPU_GEM_DOMAIN_VRAM && amdgpu_xgmi_same_hive(adev, bo_adev))) {
+ /* Mappings on the local GPU, or VRAM mappings in the
+ * local hive, or userptr mapping IOMMU direct map mode
+ * share the original BO
*/
attachment[i]->type = KFD_MEM_ATT_SHARED;
bo[i] = mem->bo;
@@ -1272,12 +1291,60 @@ create_evict_fence_fail:
return ret;
}
-int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
+/**
+ * amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria
+ * @bo: Handle of buffer object being pinned
+ * @domain: Domain into which BO should be pinned
+ *
+ * - USERPTR BOs are UNPINNABLE and will return error
+ * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
+ * PIN count incremented. It is valid to PIN a BO multiple times
+ *
+ * Return: ZERO if successful in pinning, Non-Zero in case of error.
+ */
+static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain)
+{
+ int ret = 0;
+
+ ret = amdgpu_bo_reserve(bo, false);
+ if (unlikely(ret))
+ return ret;
+
+ ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0);
+ if (ret)
+ pr_err("Error in Pinning BO to domain: %d\n", domain);
+
+ amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
+ amdgpu_bo_unreserve(bo);
+
+ return ret;
+}
+
+/**
+ * amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria
+ * @bo: Handle of buffer object being unpinned
+ *
+ * - Is a illegal request for USERPTR BOs and is ignored
+ * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
+ * PIN count decremented. Calls to UNPIN must balance calls to PIN
+ */
+static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
+{
+ int ret = 0;
+
+ ret = amdgpu_bo_reserve(bo, false);
+ if (unlikely(ret))
+ return;
+
+ amdgpu_bo_unpin(bo);
+ amdgpu_bo_unreserve(bo);
+}
+
+int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
struct file *filp, u32 pasid,
void **process_info,
struct dma_fence **ef)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_fpriv *drv_priv;
struct amdgpu_vm *avm;
int ret;
@@ -1353,12 +1420,12 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
}
}
-void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv)
+void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
+ void *drm_priv)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_vm *avm;
- if (WARN_ON(!kgd || !drm_priv))
+ if (WARN_ON(!adev || !drm_priv))
return;
avm = drm_priv_to_vm(drm_priv);
@@ -1386,11 +1453,10 @@ uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
}
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
- struct kgd_dev *kgd, uint64_t va, uint64_t size,
+ struct amdgpu_device *adev, uint64_t va, uint64_t size,
void *drm_priv, struct kgd_mem **mem,
uint64_t *offset, uint32_t flags)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
enum ttm_bo_type bo_type = ttm_bo_type_device;
struct sg_table *sg = NULL;
@@ -1454,7 +1520,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
amdgpu_sync_create(&(*mem)->sync);
- ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
+ ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, flags);
if (ret) {
pr_debug("Insufficient memory\n");
goto err_reserve_limit;
@@ -1495,6 +1561,15 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
ret = init_user_pages(*mem, user_addr);
if (ret)
goto allocate_init_user_pages_failed;
+ } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
+ KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
+ ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT);
+ if (ret) {
+ pr_err("Pinning MMIO/DOORBELL BO during ALLOC FAILED\n");
+ goto err_pin_bo;
+ }
+ bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
+ bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
}
if (offset)
@@ -1503,13 +1578,14 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
return 0;
allocate_init_user_pages_failed:
+err_pin_bo:
remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
drm_vma_node_revoke(&gobj->vma_node, drm_priv);
err_node_allow:
/* Don't unreserve system mem limit twice */
goto err_reserve_limit;
err_bo_create:
- unreserve_mem_limit(adev, size, alloc_domain, !!sg);
+ unreserve_mem_limit(adev, size, flags);
err_reserve_limit:
mutex_destroy(&(*mem)->lock);
if (gobj)
@@ -1525,7 +1601,7 @@ err:
}
int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
+ struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
uint64_t *size)
{
struct amdkfd_process_info *process_info = mem->process_info;
@@ -1538,6 +1614,14 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
bool is_imported = false;
mutex_lock(&mem->lock);
+
+ /* Unpin MMIO/DOORBELL BO's that were pinnned during allocation */
+ if (mem->alloc_flags &
+ (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
+ KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
+ amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo);
+ }
+
mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
is_imported = mem->is_imported;
mutex_unlock(&mem->lock);
@@ -1617,10 +1701,9 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
}
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem,
+ struct amdgpu_device *adev, struct kgd_mem *mem,
void *drm_priv, bool *table_freed)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
int ret;
struct amdgpu_bo *bo;
@@ -1747,7 +1830,7 @@ out:
}
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
+ struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
{
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
struct amdkfd_process_info *process_info = avm->process_info;
@@ -1808,7 +1891,7 @@ out:
}
int amdgpu_amdkfd_gpuvm_sync_memory(
- struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
+ struct amdgpu_device *adev, struct kgd_mem *mem, bool intr)
{
struct amdgpu_sync sync;
int ret;
@@ -1824,7 +1907,7 @@ int amdgpu_amdkfd_gpuvm_sync_memory(
return ret;
}
-int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
+int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev,
struct kgd_mem *mem, void **kptr, uint64_t *size)
{
int ret;
@@ -1880,7 +1963,8 @@ bo_reserve_failed:
return ret;
}
-void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_dev *kgd, struct kgd_mem *mem)
+void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct amdgpu_device *adev,
+ struct kgd_mem *mem)
{
struct amdgpu_bo *bo = mem->bo;
@@ -1890,12 +1974,9 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_dev *kgd, struct kg
amdgpu_bo_unreserve(bo);
}
-int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
- struct kfd_vm_fault_info *mem)
+int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
+ struct kfd_vm_fault_info *mem)
{
- struct amdgpu_device *adev;
-
- adev = (struct amdgpu_device *)kgd;
if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
*mem = *adev->gmc.vm_fault_info;
mb();
@@ -1904,13 +1985,12 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
return 0;
}
-int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
+int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
struct dma_buf *dma_buf,
uint64_t va, void *drm_priv,
struct kgd_mem **mem, uint64_t *size,
uint64_t *mmap_offset)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
struct drm_gem_object *obj;
struct amdgpu_bo *bo;
@@ -2537,11 +2617,9 @@ int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
}
/* Returns GPU-specific tiling mode information */
-int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
+int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
struct tile_config *config)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
config->gb_addr_config = adev->gfx.config.gb_addr_config;
config->tile_config_ptr = adev->gfx.config.tile_mode_array;
config->num_tile_configs =
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 97178b307ed6..4d4ddf026faf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -470,8 +470,8 @@ bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *ade
/**
* amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS
- * adev: amdgpu_device pointer
- * i2c_address: pointer to u8; if not NULL, will contain
+ * @adev: amdgpu_device pointer
+ * @i2c_address: pointer to u8; if not NULL, will contain
* the RAS EEPROM address if the function returns true
*
* Return true if VBIOS supports RAS EEPROM address reporting,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 7abe9500c0c6..d6d986be906a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -11,6 +11,7 @@
#include <linux/pci.h>
#include <linux/delay.h>
+#include "amdgpu.h"
#include "amd_acpi.h"
#define AMDGPU_PX_QUIRK_FORCE_ATPX (1 << 0)
@@ -165,7 +166,7 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
}
/**
- * amdgpu_atpx_validate_functions - validate ATPX functions
+ * amdgpu_atpx_validate - validate ATPX functions
*
* @atpx: amdgpu atpx struct
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 0de66f59adb8..c16a2704ced6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -108,7 +108,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB:
if (amdgpu_connector->use_digital) {
- if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ if (connector->display_info.is_hdmi) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
@@ -116,7 +116,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
break;
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
- if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ if (connector->display_info.is_hdmi) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
@@ -125,7 +125,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
dig_connector = amdgpu_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
- drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ connector->display_info.is_hdmi) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
@@ -149,7 +149,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
break;
}
- if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ if (connector->display_info.is_hdmi) {
/*
* Pre DCE-8 hw can't handle > 12 bpc, and more than 12 bpc doesn't make
* much sense without support for > 12 bpc framebuffers. RGB 4:4:4 at
@@ -315,8 +315,10 @@ static void amdgpu_connector_get_edid(struct drm_connector *connector)
if (!amdgpu_connector->edid) {
/* some laptops provide a hardcoded edid in rom for LCDs */
if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
- (connector->connector_type == DRM_MODE_CONNECTOR_eDP)))
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP))) {
amdgpu_connector->edid = amdgpu_connector_get_hardcoded_edid(adev);
+ drm_connector_update_edid_property(connector, amdgpu_connector->edid);
+ }
}
}
@@ -326,6 +328,7 @@ static void amdgpu_connector_free_edid(struct drm_connector *connector)
kfree(amdgpu_connector->edid);
amdgpu_connector->edid = NULL;
+ drm_connector_update_edid_property(connector, NULL);
}
static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
@@ -387,6 +390,9 @@ amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder)
native_mode->vdisplay != 0 &&
native_mode->clock != 0) {
mode = drm_mode_duplicate(dev, native_mode);
+ if (!mode)
+ return NULL;
+
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
drm_mode_set_name(mode);
@@ -401,6 +407,9 @@ amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder)
* simpler.
*/
mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
+ if (!mode)
+ return NULL;
+
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
}
@@ -1171,7 +1180,7 @@ static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector
(amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
(amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) {
return MODE_OK;
- } else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ } else if (connector->display_info.is_hdmi) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
@@ -1463,7 +1472,7 @@ static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector
(amdgpu_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
return amdgpu_atombios_dp_mode_valid_helper(connector, mode);
} else {
- if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ if (connector->display_info.is_hdmi) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 0311d799a010..06d07502a1f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -298,7 +298,6 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
{
s64 time_us, increment_us;
u64 free_vram, total_vram, used_vram;
- struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
/* Allow a maximum of 200 accumulated ms. This is basically per-IB
* throttling.
*
@@ -315,7 +314,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
}
total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
- used_vram = amdgpu_vram_mgr_usage(vram_man);
+ used_vram = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
spin_lock(&adev->mm_stats.lock);
@@ -362,7 +361,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
u64 total_vis_vram = adev->gmc.visible_vram_size;
u64 used_vis_vram =
- amdgpu_vram_mgr_vis_usage(vram_man);
+ amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
if (used_vis_vram < total_vis_vram) {
u64 free_vis_vram = total_vis_vram - used_vis_vram;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 164d6a9e9fbb..25e2e5bf90eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1618,6 +1618,9 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
if (!debugfs_initialized())
return 0;
+ debugfs_create_x32("amdgpu_smu_debug", 0600, root,
+ &adev->pm.smu_debug_mask);
+
ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
&fops_ib_preempt);
if (IS_ERR(ent)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 694c3726e0f4..ed077de426d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -30,6 +30,7 @@
#include <linux/module.h>
#include <linux/console.h>
#include <linux/slab.h>
+#include <linux/iommu.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
@@ -331,7 +332,7 @@ void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
}
/**
- * amdgpu_device_vram_access - access vram by vram aperature
+ * amdgpu_device_aper_access - access vram by vram aperature
*
* @adev: amdgpu_device pointer
* @pos: offset of the buffer in vram
@@ -550,11 +551,11 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
}
-/*
- * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range
+/**
+ * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range
*
* this function is invoked only the debugfs register access
- * */
+ */
void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
uint32_t reg, uint32_t v)
{
@@ -566,6 +567,8 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
adev->gfx.rlc.funcs->is_rlcg_access_range) {
if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0);
+ } else if ((reg * 4) >= adev->rmmio_size) {
+ adev->pcie_wreg(adev, reg * 4, v);
} else {
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
}
@@ -1100,7 +1103,7 @@ static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
}
/**
- * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
+ * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
*
* @adev: amdgpu_device pointer
*
@@ -1447,7 +1450,7 @@ static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
break;
default:
- return -EINVAL;
+ break;
}
return 0;
@@ -2316,6 +2319,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
/* need to do gmc hw init early so we can allocate gpu mem */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+ /* Try to reserve bad pages early */
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_exchange_data(adev);
+
r = amdgpu_device_vram_scratch_init(adev);
if (r) {
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
@@ -2614,11 +2621,10 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
if (r)
DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
- /* For XGMI + passthrough configuration on arcturus, enable light SBR */
- if (adev->asic_type == CHIP_ARCTURUS &&
- amdgpu_passthrough(adev) &&
- adev->gmc.xgmi.num_physical_nodes > 1)
- smu_set_light_sbr(&adev->smu, true);
+ /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
+ if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
+ adev->asic_type == CHIP_ALDEBARAN ))
+ smu_handle_passthrough_sbr(&adev->smu, true);
if (adev->gmc.xgmi.num_physical_nodes > 1) {
mutex_lock(&mgpu_info.mutex);
@@ -2657,6 +2663,36 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
return 0;
}
+/**
+ * amdgpu_device_smu_fini_early - smu hw_fini wrapper
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * For ASICs need to disable SMC first
+ */
+static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
+ return;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.hw)
+ continue;
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
+ r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
+ /* XXX handle errors */
+ if (r) {
+ DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ }
+ adev->ip_blocks[i].status.hw = false;
+ break;
+ }
+ }
+}
+
static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
{
int i, r;
@@ -2677,21 +2713,8 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
- /* need to disable SMC first */
- for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.hw)
- continue;
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
- r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
- /* XXX handle errors */
- if (r) {
- DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- }
- adev->ip_blocks[i].status.hw = false;
- break;
- }
- }
+ /* Workaroud for ASICs need to disable SMC first */
+ amdgpu_device_smu_fini_early(adev);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.hw)
@@ -2733,8 +2756,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
amdgpu_virt_release_ras_err_handler_data(adev);
- amdgpu_ras_pre_fini(adev);
-
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_remove_device(adev);
@@ -3373,6 +3394,22 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
return ret;
}
+/**
+ * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
+ */
+static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
+{
+ struct iommu_domain *domain;
+
+ domain = iommu_get_domain_for_dev(adev->dev);
+ if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
+ adev->ram_is_direct_mapped = true;
+}
+
static const struct attribute *amdgpu_dev_attributes[] = {
&dev_attr_product_name.attr,
&dev_attr_product_number.attr,
@@ -3461,9 +3498,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->psp.mutex);
mutex_init(&adev->notifier_lock);
- r = amdgpu_device_init_apu_flags(adev);
- if (r)
- return r;
+ amdgpu_device_init_apu_flags(adev);
r = amdgpu_device_check_arguments(adev);
if (r)
@@ -3547,6 +3582,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
+ /* Need to get xgmi info early to decide the reset behavior*/
+ if (adev->gmc.xgmi.supported) {
+ r = adev->gfxhub.funcs->get_xgmi_info(adev);
+ if (r)
+ return r;
+ }
+
/* enable PCIE atomic ops */
if (amdgpu_sriov_vf(adev))
adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
@@ -3693,8 +3735,6 @@ fence_driver_init:
/* Get a log2 for easy divisions. */
adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
- amdgpu_fbdev_init(adev);
-
r = amdgpu_pm_sysfs_init(adev);
if (r) {
adev->pm_sysfs_en = false;
@@ -3778,6 +3818,8 @@ fence_driver_init:
queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
+ amdgpu_device_check_iommu_direct_map(adev);
+
return 0;
release_ras_con:
@@ -3791,6 +3833,7 @@ failed:
static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
{
+
/* Clear all CPU mappings pointing to this device */
unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
@@ -3811,7 +3854,7 @@ static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
}
/**
- * amdgpu_device_fini - tear down the driver
+ * amdgpu_device_fini_hw - tear down the driver
*
* @adev: amdgpu_device pointer
*
@@ -3852,21 +3895,27 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_ucode_sysfs_fini(adev);
sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
- amdgpu_fbdev_fini(adev);
+ /* disable ras feature must before hw fini */
+ amdgpu_ras_pre_fini(adev);
amdgpu_device_ip_fini_early(adev);
amdgpu_irq_fini_hw(adev);
- ttm_device_clear_dma_mappings(&adev->mman.bdev);
+ if (adev->mman.initialized)
+ ttm_device_clear_dma_mappings(&adev->mman.bdev);
amdgpu_gart_dummy_page_fini(adev);
- amdgpu_device_unmap_mmio(adev);
+ if (drm_dev_is_unplugged(adev_to_drm(adev)))
+ amdgpu_device_unmap_mmio(adev);
+
}
void amdgpu_device_fini_sw(struct amdgpu_device *adev)
{
+ int idx;
+
amdgpu_fence_driver_sw_fini(adev);
amdgpu_device_ip_fini(adev);
release_firmware(adev->firmware.gpu_info_fw);
@@ -3891,6 +3940,14 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_unregister(adev->pdev);
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+
+ iounmap(adev->rmmio);
+ adev->rmmio = NULL;
+ amdgpu_device_doorbell_fini(adev);
+ drm_dev_exit(idx);
+ }
+
if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev);
if (adev->mman.discovery_bin)
@@ -3911,8 +3968,8 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
*/
static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
{
- /* No need to evict vram on APUs for suspend to ram */
- if (adev->in_s3 && (adev->flags & AMD_IS_APU))
+ /* No need to evict vram on APUs for suspend to ram or s2idle */
+ if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
return;
if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
@@ -3948,7 +4005,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
drm_kms_helper_poll_disable(dev);
if (fbcon)
- amdgpu_fbdev_set_suspend(adev, 1);
+ drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
cancel_delayed_work_sync(&adev->delayed_init_work);
@@ -3959,16 +4016,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
if (!adev->in_s0ix)
amdgpu_amdkfd_suspend(adev, adev->in_runpm);
- /* First evict vram memory */
amdgpu_device_evict_resources(adev);
amdgpu_fence_driver_hw_fini(adev);
amdgpu_device_ip_suspend_phase2(adev);
- /* This second call to evict device resources is to evict
- * the gart page table using the CPU.
- */
- amdgpu_device_evict_resources(adev);
return 0;
}
@@ -4025,7 +4077,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
flush_delayed_work(&adev->delayed_init_work);
if (fbcon)
- amdgpu_fbdev_set_suspend(adev, 0);
+ drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
drm_kms_helper_poll_enable(dev);
@@ -4294,6 +4346,9 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
bool from_hypervisor)
{
int r;
+ struct amdgpu_hive_info *hive = NULL;
+
+ amdgpu_amdkfd_pre_reset(adev);
amdgpu_amdkfd_pre_reset(adev);
@@ -4310,8 +4365,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
goto error;
amdgpu_virt_init_data_exchange(adev);
- /* we need recover gart prior to run SMC/CP/SDMA resume */
- amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
r = amdgpu_device_fw_loading(adev);
if (r)
@@ -4322,9 +4375,19 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (r)
goto error;
- amdgpu_irq_gpu_reset_resume_helper(adev);
- r = amdgpu_ib_ring_tests(adev);
- amdgpu_amdkfd_post_reset(adev);
+ hive = amdgpu_get_xgmi_hive(adev);
+ /* Update PSP FW topology after reset */
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
+ r = amdgpu_xgmi_update_topology(hive, adev);
+
+ if (hive)
+ amdgpu_put_xgmi_hive(hive);
+
+ if (!r) {
+ amdgpu_irq_gpu_reset_resume_helper(adev);
+ r = amdgpu_ib_ring_tests(adev);
+ amdgpu_amdkfd_post_reset(adev);
+ }
error:
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
@@ -4387,33 +4450,24 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
if (amdgpu_gpu_recovery == -1) {
switch (adev->asic_type) {
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- case CHIP_TOPAZ:
- case CHIP_TONGA:
- case CHIP_FIJI:
- case CHIP_POLARIS10:
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- case CHIP_VEGAM:
- case CHIP_VEGA20:
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_RAVEN:
- case CHIP_ARCTURUS:
- case CHIP_RENOIR:
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_VANGOGH:
- case CHIP_ALDEBARAN:
- break;
- default:
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_VERDE:
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+#endif
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_CYAN_SKILLFISH:
goto disabled;
+ default:
+ break;
}
}
@@ -4621,10 +4675,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
amdgpu_inc_vram_lost(tmp_adev);
}
- r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
- if (r)
- goto out;
-
r = amdgpu_device_fw_loading(tmp_adev);
if (r)
return r;
@@ -4650,7 +4700,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
if (r)
goto out;
- amdgpu_fbdev_set_suspend(tmp_adev, 0);
+ drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
/*
* The GPU enters bad state once faulty pages
@@ -4749,7 +4799,7 @@ static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgp
{
struct amdgpu_device *tmp_adev = NULL;
- if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
if (!hive) {
dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
return -ENODEV;
@@ -4961,7 +5011,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* We always reset all schedulers for device and all devices for XGMI
* hive so that should take care of them too.
*/
- hive = amdgpu_get_xgmi_hive(adev);
+ if (!amdgpu_sriov_vf(adev))
+ hive = amdgpu_get_xgmi_hive(adev);
if (hive) {
if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
@@ -5002,7 +5053,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* to put adev in the 1st position.
*/
INIT_LIST_HEAD(&device_list);
- if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
list_add_tail(&tmp_adev->reset_list, &device_list);
if (!list_is_first(&adev->reset_list, &device_list))
@@ -5041,7 +5092,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*/
amdgpu_unregister_gpu_instance(tmp_adev);
- amdgpu_fbdev_set_suspend(tmp_adev, 1);
+ drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
/* disable ras on ALL IPs */
if (!need_emergency_restart &&
@@ -5636,3 +5687,42 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
amdgpu_asic_invalidate_hdp(adev, ring);
}
+
+/**
+ * amdgpu_device_halt() - bring hardware to some kind of halt state
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Bring hardware to some kind of halt state so that no one can touch it
+ * any more. It will help to maintain error context when error occurred.
+ * Compare to a simple hang, the system will keep stable at least for SSH
+ * access. Then it should be trivial to inspect the hardware state and
+ * see what's going on. Implemented as following:
+ *
+ * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
+ * clears all CPU mappings to device, disallows remappings through page faults
+ * 2. amdgpu_irq_disable_all() disables all interrupts
+ * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
+ * 4. set adev->no_hw_access to avoid potential crashes after setp 5
+ * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
+ * 6. pci_disable_device() and pci_wait_for_pending_transaction()
+ * flush any in flight DMA operations
+ */
+void amdgpu_device_halt(struct amdgpu_device *adev)
+{
+ struct pci_dev *pdev = adev->pdev;
+ struct drm_device *ddev = adev_to_drm(adev);
+
+ drm_dev_unplug(ddev);
+
+ amdgpu_irq_disable_all(adev);
+
+ amdgpu_fence_driver_hw_fini(adev);
+
+ adev->no_hw_access = true;
+
+ amdgpu_device_unmap_mmio(adev);
+
+ pci_disable_device(pdev);
+ pci_wait_for_pending_transaction(pdev);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index bcc9343353b5..81bfee978b74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -67,7 +67,8 @@
#include "smuio_v11_0_6.h"
#include "smuio_v13_0.h"
-MODULE_FIRMWARE("amdgpu/ip_discovery.bin");
+#define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
+MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
#define mmRCC_CONFIG_MEMSIZE 0xde3
#define mmMM_INDEX 0x0
@@ -179,7 +180,7 @@ static int hw_id_map[MAX_HWIP] = {
[DCI_HWIP] = DCI_HWID,
};
-static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary)
+static int amdgpu_discovery_read_binary_from_vram(struct amdgpu_device *adev, uint8_t *binary)
{
uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
@@ -189,6 +190,34 @@ static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *bin
return 0;
}
+static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
+{
+ const struct firmware *fw;
+ const char *fw_name;
+ int r;
+
+ switch (amdgpu_discovery) {
+ case 2:
+ fw_name = FIRMWARE_IP_DISCOVERY;
+ break;
+ default:
+ dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
+ return -EINVAL;
+ }
+
+ r = request_firmware(&fw, fw_name, adev->dev);
+ if (r) {
+ dev_err(adev->dev, "can't load firmware \"%s\"\n",
+ fw_name);
+ return r;
+ }
+
+ memcpy((u8 *)binary, (u8 *)fw->data, adev->mman.discovery_tmr_size);
+ release_firmware(fw);
+
+ return 0;
+}
+
static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
{
uint16_t checksum = 0;
@@ -206,13 +235,44 @@ static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size
return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
}
+static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
+{
+ struct binary_header *bhdr;
+ bhdr = (struct binary_header *)binary;
+
+ return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
+}
+
+static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
+{
+ /*
+ * So far, apply this quirk only on those Navy Flounder boards which
+ * have a bad harvest table of VCN config.
+ */
+ if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) &&
+ (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) {
+ switch (adev->pdev->revision) {
+ case 0xC1:
+ case 0xC2:
+ case 0xC3:
+ case 0xC5:
+ case 0xC7:
+ case 0xCF:
+ case 0xDF:
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
static int amdgpu_discovery_init(struct amdgpu_device *adev)
{
struct table_info *info;
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct gpu_info_header *ghdr;
- const struct firmware *fw;
uint16_t offset;
uint16_t size;
uint16_t checksum;
@@ -223,31 +283,32 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (!adev->mman.discovery_bin)
return -ENOMEM;
- if (amdgpu_discovery == 2) {
- r = request_firmware(&fw, "amdgpu/ip_discovery.bin", adev->dev);
- if (r)
- goto get_from_vram;
- dev_info(adev->dev, "Using IP discovery from file\n");
- memcpy((u8 *)adev->mman.discovery_bin, (u8 *)fw->data,
- adev->mman.discovery_tmr_size);
- release_firmware(fw);
- } else {
-get_from_vram:
- r = amdgpu_discovery_read_binary(adev, adev->mman.discovery_bin);
+ r = amdgpu_discovery_read_binary_from_vram(adev, adev->mman.discovery_bin);
+ if (r) {
+ dev_err(adev->dev, "failed to read ip discovery binary from vram\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if(!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
+ dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n");
+ /* retry read ip discovery binary from file */
+ r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
if (r) {
- DRM_ERROR("failed to read ip discovery binary\n");
+ dev_err(adev->dev, "failed to read ip discovery binary from file\n");
+ r = -EINVAL;
+ goto out;
+ }
+ /* check the ip discovery binary signature */
+ if(!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
+ dev_warn(adev->dev, "get invalid ip discovery binary signature from file\n");
+ r = -EINVAL;
goto out;
}
}
bhdr = (struct binary_header *)adev->mman.discovery_bin;
- if (le32_to_cpu(bhdr->binary_signature) != BINARY_SIGNATURE) {
- DRM_ERROR("invalid ip discovery binary signature\n");
- r = -EINVAL;
- goto out;
- }
-
offset = offsetof(struct binary_header, binary_checksum) +
sizeof(bhdr->binary_checksum);
size = le16_to_cpu(bhdr->binary_size) - offset;
@@ -255,7 +316,7 @@ get_from_vram:
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
size, checksum)) {
- DRM_ERROR("invalid ip discovery binary checksum\n");
+ dev_err(adev->dev, "invalid ip discovery binary checksum\n");
r = -EINVAL;
goto out;
}
@@ -266,14 +327,14 @@ get_from_vram:
ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
- DRM_ERROR("invalid ip discovery data table signature\n");
+ dev_err(adev->dev, "invalid ip discovery data table signature\n");
r = -EINVAL;
goto out;
}
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
le16_to_cpu(ihdr->size), checksum)) {
- DRM_ERROR("invalid ip discovery data table checksum\n");
+ dev_err(adev->dev, "invalid ip discovery data table checksum\n");
r = -EINVAL;
goto out;
}
@@ -285,7 +346,7 @@ get_from_vram:
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
le32_to_cpu(ghdr->size), checksum)) {
- DRM_ERROR("invalid gc data table checksum\n");
+ dev_err(adev->dev, "invalid gc data table checksum\n");
r = -EINVAL;
goto out;
}
@@ -379,8 +440,18 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
ip->major, ip->minor,
ip->revision);
- if (le16_to_cpu(ip->hw_id) == VCN_HWID)
+ if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
+ /* Bit [5:0]: original revision value
+ * Bit [7:6]: en/decode capability:
+ * 0b00 : VCN function normally
+ * 0b10 : encode is disabled
+ * 0b01 : decode is disabled
+ */
+ adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
+ ip->revision & 0xc0;
+ ip->revision &= ~0xc0;
adev->vcn.num_vcn_inst++;
+ }
if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
@@ -472,14 +543,6 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int n
return -EINVAL;
}
-
-int amdgpu_discovery_get_vcn_version(struct amdgpu_device *adev, int vcn_instance,
- int *major, int *minor, int *revision)
-{
- return amdgpu_discovery_get_ip_version(adev, VCN_HWID,
- vcn_instance, major, minor, revision);
-}
-
void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
@@ -509,10 +572,9 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
break;
}
}
- /* some IP discovery tables on Navy Flounder don't have this set correctly */
- if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) &&
- (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2)))
- adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
+
+ amdgpu_discovery_harvest_config_quirk(adev);
+
if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
@@ -949,7 +1011,6 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 16):
- case IP_VERSION(3, 0, 64):
case IP_VERSION(3, 1, 1):
case IP_VERSION(3, 0, 2):
case IP_VERSION(3, 0, 192):
@@ -986,7 +1047,7 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
break;
default:
- break;;
+ break;
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index 0ea029e3b850..14537cec19db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -33,8 +33,6 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev);
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance,
int *major, int *minor, int *revision);
-int amdgpu_discovery_get_vcn_version(struct amdgpu_device *adev, int vcn_instance,
- int *major, int *minor, int *revision);
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev);
int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index dc50c05f23fc..82011e75ed85 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -83,9 +83,6 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work)
unsigned i;
int vpos, hpos;
- if (amdgpu_display_flip_handle_fence(work, &work->excl))
- return;
-
for (i = 0; i < work->shared_count; ++i)
if (amdgpu_display_flip_handle_fence(work, &work->shared[i]))
return;
@@ -203,7 +200,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto unpin;
}
- r = dma_resv_get_fences(new_abo->tbo.base.resv, &work->excl,
+ r = dma_resv_get_fences(new_abo->tbo.base.resv, NULL,
&work->shared_count, &work->shared);
if (unlikely(r != 0)) {
DRM_ERROR("failed to get fences for buffer\n");
@@ -253,7 +250,6 @@ unreserve:
cleanup:
amdgpu_bo_unref(&work->old_abo);
- dma_fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
dma_fence_put(work->shared[i]);
kfree(work->shared);
@@ -1364,7 +1360,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
- drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
+ connector->display_info.is_hdmi &&
amdgpu_display_is_hdtv_mode(mode)))) {
if (amdgpu_encoder->underscan_hborder != 0)
amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
@@ -1603,13 +1599,10 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
continue;
}
robj = gem_to_amdgpu_bo(fb->obj[0]);
- /* don't unpin kernel fb objects */
- if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
- r = amdgpu_bo_reserve(robj, true);
- if (r == 0) {
- amdgpu_bo_unpin(robj);
- amdgpu_bo_unreserve(robj);
- }
+ r = amdgpu_bo_reserve(robj, true);
+ if (r == 0) {
+ amdgpu_bo_unpin(robj);
+ amdgpu_bo_unreserve(robj);
}
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 7444484a12bf..579adfafe4d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -61,9 +61,6 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0)
attach->peer2peer = false;
- if (attach->dev->driver == adev->dev->driver)
- return 0;
-
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 99370bdd8c5b..4c83f1db8a24 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -31,7 +31,6 @@
#include "amdgpu_drv.h"
#include <drm/drm_pciids.h>
-#include <linux/console.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
@@ -315,9 +314,12 @@ module_param_named(dpm, amdgpu_dpm, int, 0444);
/**
* DOC: fw_load_type (int)
- * Set different firmware loading type for debugging (0 = direct, 1 = SMU, 2 = PSP). The default is -1 (auto).
+ * Set different firmware loading type for debugging, if supported.
+ * Set to 0 to force direct loading if supported by the ASIC. Set
+ * to -1 to select the default loading mode for the ASIC, as defined
+ * by the driver. The default is -1 (auto).
*/
-MODULE_PARM_DESC(fw_load_type, "firmware loading type (0 = direct, 1 = SMU, 2 = PSP, -1 = auto)");
+MODULE_PARM_DESC(fw_load_type, "firmware loading type (0 = force direct if supported, -1 = auto)");
module_param_named(fw_load_type, amdgpu_fw_load_type, int, 0444);
/**
@@ -1523,6 +1525,87 @@ static const u16 amdgpu_unsupported_pciidlist[] = {
0x99A0,
0x99A2,
0x99A4,
+ /* radeon secondary ids */
+ 0x3171,
+ 0x3e70,
+ 0x4164,
+ 0x4165,
+ 0x4166,
+ 0x4168,
+ 0x4170,
+ 0x4171,
+ 0x4172,
+ 0x4173,
+ 0x496e,
+ 0x4a69,
+ 0x4a6a,
+ 0x4a6b,
+ 0x4a70,
+ 0x4a74,
+ 0x4b69,
+ 0x4b6b,
+ 0x4b6c,
+ 0x4c6e,
+ 0x4e64,
+ 0x4e65,
+ 0x4e66,
+ 0x4e67,
+ 0x4e68,
+ 0x4e69,
+ 0x4e6a,
+ 0x4e71,
+ 0x4f73,
+ 0x5569,
+ 0x556b,
+ 0x556d,
+ 0x556f,
+ 0x5571,
+ 0x5854,
+ 0x5874,
+ 0x5940,
+ 0x5941,
+ 0x5b72,
+ 0x5b73,
+ 0x5b74,
+ 0x5b75,
+ 0x5d44,
+ 0x5d45,
+ 0x5d6d,
+ 0x5d6f,
+ 0x5d72,
+ 0x5d77,
+ 0x5e6b,
+ 0x5e6d,
+ 0x7120,
+ 0x7124,
+ 0x7129,
+ 0x712e,
+ 0x712f,
+ 0x7162,
+ 0x7163,
+ 0x7166,
+ 0x7167,
+ 0x7172,
+ 0x7173,
+ 0x71a0,
+ 0x71a1,
+ 0x71a3,
+ 0x71a7,
+ 0x71bb,
+ 0x71e0,
+ 0x71e1,
+ 0x71e2,
+ 0x71e6,
+ 0x71e7,
+ 0x71f2,
+ 0x7269,
+ 0x726b,
+ 0x726e,
+ 0x72a0,
+ 0x72a8,
+ 0x72b1,
+ 0x72b3,
+ 0x793f,
};
static const struct pci_device_id pciidlist[] = {
@@ -1928,11 +2011,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENODEV;
}
- if (flags == 0) {
- DRM_INFO("Unsupported asic. Remove me when IP discovery init is in place.\n");
- return -ENODEV;
- }
-
if (amdgpu_virtual_display ||
amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
supports_atomic = true;
@@ -2031,6 +2109,19 @@ retry_init:
goto err_pci;
}
+ /*
+ * 1. don't init fbdev on hw without DCE
+ * 2. don't init fbdev if there are no connectors
+ */
+ if (adev->mode_info.mode_config_initialized &&
+ !list_empty(&adev_to_drm(adev)->mode_config.connector_list)) {
+ /* select 8 bpp console on low vram cards */
+ if (adev->gmc.real_vram_size <= (32*1024*1024))
+ drm_fbdev_generic_setup(adev_to_drm(adev), 8);
+ else
+ drm_fbdev_generic_setup(adev_to_drm(adev), 32);
+ }
+
ret = amdgpu_debugfs_init(adev);
if (ret)
DRM_ERROR("Creating debugfs files failed (%d).\n", ret);
@@ -2179,9 +2270,9 @@ static int amdgpu_pmops_suspend(struct device *dev)
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
- adev->in_s3 = true;
+ else
+ adev->in_s3 = true;
r = amdgpu_device_suspend(drm_dev, true);
- adev->in_s3 = false;
if (r)
return r;
if (!adev->in_s0ix)
@@ -2202,6 +2293,8 @@ static int amdgpu_pmops_resume(struct device *dev)
r = amdgpu_device_resume(drm_dev, true);
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = false;
+ else
+ adev->in_s3 = false;
return r;
}
@@ -2563,10 +2656,8 @@ static int __init amdgpu_init(void)
{
int r;
- if (vgacon_text_force()) {
- DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
+ if (drm_firmware_drivers_only())
return -EINVAL;
- }
r = amdgpu_sync_init();
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
index e3a4f7048042..8178323e4bef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
@@ -45,4 +45,7 @@
long amdgpu_drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
+long amdgpu_kms_compat_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
index af4ef84e27a7..c96e458ed088 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
@@ -222,7 +222,7 @@ bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder,
case DRM_MODE_CONNECTOR_HDMIB:
if (amdgpu_connector->use_digital) {
/* HDMI 1.3 supports up to 340 Mhz over single link */
- if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ if (connector->display_info.is_hdmi) {
if (pixel_clock > 340000)
return true;
else
@@ -244,7 +244,7 @@ bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder,
return false;
else {
/* HDMI 1.3 supports up to 340 Mhz over single link */
- if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ if (connector->display_info.is_hdmi) {
if (pixel_clock > 340000)
return true;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
deleted file mode 100644
index cd0acbea75da..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ /dev/null
@@ -1,388 +0,0 @@
-/*
- * Copyright © 2007 David Airlie
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * David Airlie
- */
-
-#include <linux/module.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-#include <linux/vga_switcheroo.h>
-
-#include <drm/amdgpu_drm.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-
-#include "amdgpu.h"
-#include "cikd.h"
-#include "amdgpu_gem.h"
-
-#include "amdgpu_display.h"
-
-/* object hierarchy -
- this contains a helper + a amdgpu fb
- the helper contains a pointer to amdgpu framebuffer baseclass.
-*/
-
-static int
-amdgpufb_open(struct fb_info *info, int user)
-{
- struct drm_fb_helper *fb_helper = info->par;
- int ret = pm_runtime_get_sync(fb_helper->dev->dev);
- if (ret < 0 && ret != -EACCES) {
- pm_runtime_mark_last_busy(fb_helper->dev->dev);
- pm_runtime_put_autosuspend(fb_helper->dev->dev);
- return ret;
- }
- return 0;
-}
-
-static int
-amdgpufb_release(struct fb_info *info, int user)
-{
- struct drm_fb_helper *fb_helper = info->par;
-
- pm_runtime_mark_last_busy(fb_helper->dev->dev);
- pm_runtime_put_autosuspend(fb_helper->dev->dev);
- return 0;
-}
-
-static const struct fb_ops amdgpufb_ops = {
- .owner = THIS_MODULE,
- DRM_FB_HELPER_DEFAULT_OPS,
- .fb_open = amdgpufb_open,
- .fb_release = amdgpufb_release,
- .fb_fillrect = drm_fb_helper_cfb_fillrect,
- .fb_copyarea = drm_fb_helper_cfb_copyarea,
- .fb_imageblit = drm_fb_helper_cfb_imageblit,
-};
-
-
-int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int cpp, bool tiled)
-{
- int aligned = width;
- int pitch_mask = 0;
-
- switch (cpp) {
- case 1:
- pitch_mask = 255;
- break;
- case 2:
- pitch_mask = 127;
- break;
- case 3:
- case 4:
- pitch_mask = 63;
- break;
- }
-
- aligned += pitch_mask;
- aligned &= ~pitch_mask;
- return aligned * cpp;
-}
-
-static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
-{
- struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
- int ret;
-
- ret = amdgpu_bo_reserve(abo, true);
- if (likely(ret == 0)) {
- amdgpu_bo_kunmap(abo);
- amdgpu_bo_unpin(abo);
- amdgpu_bo_unreserve(abo);
- }
- drm_gem_object_put(gobj);
-}
-
-static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
- struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object **gobj_p)
-{
- const struct drm_format_info *info;
- struct amdgpu_device *adev = rfbdev->adev;
- struct drm_gem_object *gobj = NULL;
- struct amdgpu_bo *abo = NULL;
- bool fb_tiled = false; /* useful for testing */
- u32 tiling_flags = 0, domain;
- int ret;
- int aligned_size, size;
- int height = mode_cmd->height;
- u32 cpp;
- u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
- AMDGPU_GEM_CREATE_VRAM_CLEARED;
-
- info = drm_get_format_info(adev_to_drm(adev), mode_cmd);
- cpp = info->cpp[0];
-
- /* need to align pitch with crtc limits */
- mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
- fb_tiled);
- domain = amdgpu_display_supported_domains(adev, flags);
- height = ALIGN(mode_cmd->height, 8);
- size = mode_cmd->pitches[0] * height;
- aligned_size = ALIGN(size, PAGE_SIZE);
- ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, flags,
- ttm_bo_type_device, NULL, &gobj);
- if (ret) {
- pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
- return -ENOMEM;
- }
- abo = gem_to_amdgpu_bo(gobj);
-
- if (fb_tiled)
- tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1);
-
- ret = amdgpu_bo_reserve(abo, false);
- if (unlikely(ret != 0))
- goto out_unref;
-
- if (tiling_flags) {
- ret = amdgpu_bo_set_tiling_flags(abo,
- tiling_flags);
- if (ret)
- dev_err(adev->dev, "FB failed to set tiling flags\n");
- }
-
- ret = amdgpu_bo_pin(abo, domain);
- if (ret) {
- amdgpu_bo_unreserve(abo);
- goto out_unref;
- }
-
- ret = amdgpu_ttm_alloc_gart(&abo->tbo);
- if (ret) {
- amdgpu_bo_unreserve(abo);
- dev_err(adev->dev, "%p bind failed\n", abo);
- goto out_unref;
- }
-
- ret = amdgpu_bo_kmap(abo, NULL);
- amdgpu_bo_unreserve(abo);
- if (ret) {
- goto out_unref;
- }
-
- *gobj_p = gobj;
- return 0;
-out_unref:
- amdgpufb_destroy_pinned_object(gobj);
- *gobj_p = NULL;
- return ret;
-}
-
-static int amdgpufb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct amdgpu_fbdev *rfbdev = (struct amdgpu_fbdev *)helper;
- struct amdgpu_device *adev = rfbdev->adev;
- struct fb_info *info;
- struct drm_framebuffer *fb = NULL;
- struct drm_mode_fb_cmd2 mode_cmd;
- struct drm_gem_object *gobj = NULL;
- struct amdgpu_bo *abo = NULL;
- int ret;
-
- memset(&mode_cmd, 0, sizeof(mode_cmd));
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
-
- if (sizes->surface_bpp == 24)
- sizes->surface_bpp = 32;
-
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
-
- ret = amdgpufb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
- if (ret) {
- DRM_ERROR("failed to create fbcon object %d\n", ret);
- return ret;
- }
-
- abo = gem_to_amdgpu_bo(gobj);
-
- /* okay we have an object now allocate the framebuffer */
- info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto out;
- }
-
- ret = amdgpu_display_gem_fb_init(adev_to_drm(adev), &rfbdev->rfb,
- &mode_cmd, gobj);
- if (ret) {
- DRM_ERROR("failed to initialize framebuffer %d\n", ret);
- goto out;
- }
-
- fb = &rfbdev->rfb.base;
-
- /* setup helper */
- rfbdev->helper.fb = fb;
-
- info->fbops = &amdgpufb_ops;
-
- info->fix.smem_start = amdgpu_gmc_vram_cpu_pa(adev, abo);
- info->fix.smem_len = amdgpu_bo_size(abo);
- info->screen_base = amdgpu_bo_kptr(abo);
- info->screen_size = amdgpu_bo_size(abo);
-
- drm_fb_helper_fill_info(info, &rfbdev->helper, sizes);
-
- /* setup aperture base/size for vesafb takeover */
- info->apertures->ranges[0].base = adev_to_drm(adev)->mode_config.fb_base;
- info->apertures->ranges[0].size = adev->gmc.aper_size;
-
- /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
-
- if (info->screen_base == NULL) {
- ret = -ENOSPC;
- goto out;
- }
-
- DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
- DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->gmc.aper_base);
- DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo));
- DRM_INFO("fb depth is %d\n", fb->format->depth);
- DRM_INFO(" pitch is %d\n", fb->pitches[0]);
-
- vga_switcheroo_client_fb_set(adev->pdev, info);
- return 0;
-
-out:
- if (fb && ret) {
- drm_gem_object_put(gobj);
- drm_framebuffer_unregister_private(fb);
- drm_framebuffer_cleanup(fb);
- kfree(fb);
- }
- return ret;
-}
-
-static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
-{
- struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
- int i;
-
- drm_fb_helper_unregister_fbi(&rfbdev->helper);
-
- if (rfb->base.obj[0]) {
- for (i = 0; i < rfb->base.format->num_planes; i++)
- drm_gem_object_put(rfb->base.obj[0]);
- amdgpufb_destroy_pinned_object(rfb->base.obj[0]);
- rfb->base.obj[0] = NULL;
- drm_framebuffer_unregister_private(&rfb->base);
- drm_framebuffer_cleanup(&rfb->base);
- }
- drm_fb_helper_fini(&rfbdev->helper);
-
- return 0;
-}
-
-static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs = {
- .fb_probe = amdgpufb_create,
-};
-
-int amdgpu_fbdev_init(struct amdgpu_device *adev)
-{
- struct amdgpu_fbdev *rfbdev;
- int bpp_sel = 32;
- int ret;
-
- /* don't init fbdev on hw without DCE */
- if (!adev->mode_info.mode_config_initialized)
- return 0;
-
- /* don't init fbdev if there are no connectors */
- if (list_empty(&adev_to_drm(adev)->mode_config.connector_list))
- return 0;
-
- /* select 8 bpp console on low vram cards */
- if (adev->gmc.real_vram_size <= (32*1024*1024))
- bpp_sel = 8;
-
- rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL);
- if (!rfbdev)
- return -ENOMEM;
-
- rfbdev->adev = adev;
- adev->mode_info.rfbdev = rfbdev;
-
- drm_fb_helper_prepare(adev_to_drm(adev), &rfbdev->helper,
- &amdgpu_fb_helper_funcs);
-
- ret = drm_fb_helper_init(adev_to_drm(adev), &rfbdev->helper);
- if (ret) {
- kfree(rfbdev);
- return ret;
- }
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- if (!amdgpu_device_has_dc_support(adev) && !amdgpu_virtual_display)
- drm_helper_disable_unused_functions(adev_to_drm(adev));
-
- drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
- return 0;
-}
-
-void amdgpu_fbdev_fini(struct amdgpu_device *adev)
-{
- if (!adev->mode_info.rfbdev)
- return;
-
- amdgpu_fbdev_destroy(adev_to_drm(adev), adev->mode_info.rfbdev);
- kfree(adev->mode_info.rfbdev);
- adev->mode_info.rfbdev = NULL;
-}
-
-void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
-{
- if (adev->mode_info.rfbdev)
- drm_fb_helper_set_suspend_unlocked(&adev->mode_info.rfbdev->helper,
- state);
-}
-
-int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
-{
- struct amdgpu_bo *robj;
- int size = 0;
-
- if (!adev->mode_info.rfbdev)
- return 0;
-
- robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0]);
- size += amdgpu_bo_size(robj);
- return size;
-}
-
-bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
-{
- if (!adev->mode_info.rfbdev)
- return false;
- if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0]))
- return true;
- return false;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index 7709caeb233d..2a786e788627 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -56,6 +56,9 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
return true;
else
return false;
+ case CHIP_ALDEBARAN:
+ /* All Aldebaran SKUs have the FRU */
+ return true;
default:
return false;
}
@@ -88,13 +91,17 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
{
- unsigned char buff[34];
+ unsigned char buff[AMDGPU_PRODUCT_NAME_LEN+2];
u32 addrptr;
int size, len;
+ int offset = 2;
if (!is_fru_eeprom_supported(adev))
return 0;
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ offset = 0;
+
/* If algo exists, it means that the i2c_adapter's initialized */
if (!adev->pm.smu_i2c.algo) {
DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
@@ -131,15 +138,13 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
}
len = size;
- /* Product name should only be 32 characters. Any more,
- * and something could be wrong. Cap it at 32 to be safe
- */
- if (len >= sizeof(adev->product_name)) {
- DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake");
- len = sizeof(adev->product_name) - 1;
+ if (len >= AMDGPU_PRODUCT_NAME_LEN) {
+ DRM_WARN("FRU Product Name is larger than %d characters. This is likely a mistake",
+ AMDGPU_PRODUCT_NAME_LEN);
+ len = AMDGPU_PRODUCT_NAME_LEN - 1;
}
/* Start at 2 due to buff using fields 0 and 1 for the address */
- memcpy(adev->product_name, &buff[2], len);
+ memcpy(adev->product_name, &buff[offset], len);
adev->product_name[len] = '\0';
addrptr += size + 1;
@@ -157,7 +162,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
len = sizeof(adev->product_number) - 1;
}
- memcpy(adev->product_number, &buff[2], len);
+ memcpy(adev->product_number, &buff[offset], len);
adev->product_number[len] = '\0';
addrptr += size + 1;
@@ -184,7 +189,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
len = sizeof(adev->serial) - 1;
}
- memcpy(adev->serial, &buff[2], len);
+ memcpy(adev->serial, &buff[offset], len);
adev->serial[len] = '\0';
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index d3e4203f6217..645950a653a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -114,80 +114,12 @@ void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
*/
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->gart.bo == NULL) {
- struct amdgpu_bo_param bp;
-
- memset(&bp, 0, sizeof(bp));
- bp.size = adev->gart.table_size;
- bp.byte_align = PAGE_SIZE;
- bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
- bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- bp.type = ttm_bo_type_kernel;
- bp.resv = NULL;
- bp.bo_ptr_size = sizeof(struct amdgpu_bo);
-
- r = amdgpu_bo_create(adev, &bp, &adev->gart.bo);
- if (r) {
- return r;
- }
- }
- return 0;
-}
-
-/**
- * amdgpu_gart_table_vram_pin - pin gart page table in vram
- *
- * @adev: amdgpu_device pointer
- *
- * Pin the GART page table in vram so it will not be moved
- * by the memory manager (pcie r4xx, r5xx+). These asics require the
- * gart table to be in video memory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
-{
- int r;
-
- r = amdgpu_bo_reserve(adev->gart.bo, false);
- if (unlikely(r != 0))
- return r;
- r = amdgpu_bo_pin(adev->gart.bo, AMDGPU_GEM_DOMAIN_VRAM);
- if (r) {
- amdgpu_bo_unreserve(adev->gart.bo);
- return r;
- }
- r = amdgpu_bo_kmap(adev->gart.bo, &adev->gart.ptr);
- if (r)
- amdgpu_bo_unpin(adev->gart.bo);
- amdgpu_bo_unreserve(adev->gart.bo);
- return r;
-}
-
-/**
- * amdgpu_gart_table_vram_unpin - unpin gart page table in vram
- *
- * @adev: amdgpu_device pointer
- *
- * Unpin the GART page table in vram (pcie r4xx, r5xx+).
- * These asics require the gart table to be in video memory.
- */
-void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
-{
- int r;
+ if (adev->gart.bo != NULL)
+ return 0;
- if (adev->gart.bo == NULL) {
- return;
- }
- r = amdgpu_bo_reserve(adev->gart.bo, true);
- if (likely(r == 0)) {
- amdgpu_bo_kunmap(adev->gart.bo);
- amdgpu_bo_unpin(adev->gart.bo);
- amdgpu_bo_unreserve(adev->gart.bo);
- adev->gart.ptr = NULL;
- }
+ return amdgpu_bo_create_kernel(adev, adev->gart.table_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &adev->gart.bo,
+ NULL, (void *)&adev->gart.ptr);
}
/**
@@ -201,11 +133,7 @@ void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
*/
void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
{
- if (adev->gart.bo == NULL) {
- return;
- }
- amdgpu_bo_unref(&adev->gart.bo);
- adev->gart.ptr = NULL;
+ amdgpu_bo_free_kernel(&adev->gart.bo, NULL, (void *)&adev->gart.ptr);
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index a1e63ba4c54a..c0d8f40a5b45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -877,6 +877,32 @@ out:
return r;
}
+static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
+ int width,
+ int cpp,
+ bool tiled)
+{
+ int aligned = width;
+ int pitch_mask = 0;
+
+ switch (cpp) {
+ case 1:
+ pitch_mask = 255;
+ break;
+ case 2:
+ pitch_mask = 127;
+ break;
+ case 3:
+ case 4:
+ pitch_mask = 63;
+ break;
+ }
+
+ aligned += pitch_mask;
+ aligned &= ~pitch_mask;
+ return aligned * cpp;
+}
+
int amdgpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
@@ -885,7 +911,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_gem_object *gobj;
uint32_t handle;
u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
u32 domain;
int r;
@@ -897,8 +924,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
if (adev->mman.buffer_funcs_enabled)
flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
- args->pitch = amdgpu_align_pitch(adev, args->width,
- DIV_ROUND_UP(args->bpp, 8), 0);
+ args->pitch = amdgpu_gem_align_pitch(adev, args->width,
+ DIV_ROUND_UP(args->bpp, 8), 0);
args->size = (u64)args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
domain = amdgpu_bo_get_preferred_domain(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 08478fce00f2..2430d6223c2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -350,6 +350,7 @@ static inline uint64_t amdgpu_gmc_fault_key(uint64_t addr, uint16_t pasid)
* amdgpu_gmc_filter_faults - filter VM faults
*
* @adev: amdgpu device structure
+ * @ih: interrupt ring that the fault received from
* @addr: address of the VM fault
* @pasid: PASID of the process causing the fault
* @timestamp: timestamp of the fault
@@ -358,7 +359,8 @@ static inline uint64_t amdgpu_gmc_fault_key(uint64_t addr, uint16_t pasid)
* True if the fault was filtered and should not be processed further.
* False if the fault is a new one and needs to be handled.
*/
-bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
+bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih, uint64_t addr,
uint16_t pasid, uint64_t timestamp)
{
struct amdgpu_gmc *gmc = &adev->gmc;
@@ -366,6 +368,10 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
struct amdgpu_gmc_fault *fault;
uint32_t hash;
+ /* Stale retry fault if timestamp goes backward */
+ if (amdgpu_ih_ts_after(timestamp, ih->processed_timestamp))
+ return true;
+
/* If we don't have space left in the ring buffer return immediately */
stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) -
AMDGPU_GMC_FAULT_TIMEOUT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index e55201134a01..8458cebc6d5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -316,7 +316,8 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc);
void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc);
-bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
+bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih, uint64_t addr,
uint16_t pasid, uint64_t timestamp);
void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
uint16_t pasid);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 675a72ef305d..72022df264f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -77,10 +77,8 @@ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- struct ttm_resource_manager *man;
- man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
- return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(man));
+ return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr));
}
static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
@@ -206,30 +204,27 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
/**
* amdgpu_gtt_mgr_usage - return usage of GTT domain
*
- * @man: TTM memory type manager
+ * @mgr: amdgpu_gtt_mgr pointer
*
* Return how many bytes are used in the GTT domain
*/
-uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man)
+uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr)
{
- struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
-
return atomic64_read(&mgr->used) * PAGE_SIZE;
}
/**
* amdgpu_gtt_mgr_recover - re-init gart
*
- * @man: TTM memory type manager
+ * @mgr: amdgpu_gtt_mgr pointer
*
* Re-init the gart for each known BO in the GTT.
*/
-int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
+int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
{
- struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
- struct amdgpu_device *adev;
struct amdgpu_gtt_node *node;
struct drm_mm_node *mm_node;
+ struct amdgpu_device *adev;
int r = 0;
adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 0c7963dfacad..3df146579ad9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -164,52 +164,32 @@ void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv,
}
}
-/* Waiter helper that checks current rptr matches or passes checkpoint wptr */
-static bool amdgpu_ih_has_checkpoint_processed(struct amdgpu_device *adev,
- struct amdgpu_ih_ring *ih,
- uint32_t checkpoint_wptr,
- uint32_t *prev_rptr)
-{
- uint32_t cur_rptr = ih->rptr | (*prev_rptr & ~ih->ptr_mask);
-
- /* rptr has wrapped. */
- if (cur_rptr < *prev_rptr)
- cur_rptr += ih->ptr_mask + 1;
- *prev_rptr = cur_rptr;
-
- /* check ring is empty to workaround missing wptr overflow flag */
- return cur_rptr >= checkpoint_wptr ||
- (cur_rptr & ih->ptr_mask) == amdgpu_ih_get_wptr(adev, ih);
-}
-
/**
- * amdgpu_ih_wait_on_checkpoint_process - wait to process IVs up to checkpoint
+ * amdgpu_ih_wait_on_checkpoint_process_ts - wait to process IVs up to checkpoint
*
* @adev: amdgpu_device pointer
* @ih: ih ring to process
*
* Used to ensure ring has processed IVs up to the checkpoint write pointer.
*/
-int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev,
+int amdgpu_ih_wait_on_checkpoint_process_ts(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
- uint32_t checkpoint_wptr, rptr;
+ uint32_t checkpoint_wptr;
+ uint64_t checkpoint_ts;
+ long timeout = HZ;
if (!ih->enabled || adev->shutdown)
return -ENODEV;
checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
- /* Order wptr with rptr. */
+ /* Order wptr with ring data. */
rmb();
- rptr = READ_ONCE(ih->rptr);
-
- /* wptr has wrapped. */
- if (rptr > checkpoint_wptr)
- checkpoint_wptr += ih->ptr_mask + 1;
+ checkpoint_ts = amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1);
- return wait_event_interruptible(ih->wait_process,
- amdgpu_ih_has_checkpoint_processed(adev, ih,
- checkpoint_wptr, &rptr));
+ return wait_event_interruptible_timeout(ih->wait_process,
+ amdgpu_ih_ts_after(checkpoint_ts, ih->processed_timestamp) ||
+ ih->rptr == amdgpu_ih_get_wptr(adev, ih), timeout);
}
/**
@@ -299,3 +279,18 @@ void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
/* wptr/rptr are in bytes! */
ih->rptr += 32;
}
+
+uint64_t amdgpu_ih_decode_iv_ts_helper(struct amdgpu_ih_ring *ih, u32 rptr,
+ signed int offset)
+{
+ uint32_t iv_size = 32;
+ uint32_t ring_index;
+ uint32_t dw1, dw2;
+
+ rptr += iv_size * offset;
+ ring_index = (rptr & ih->ptr_mask) >> 2;
+
+ dw1 = le32_to_cpu(ih->ring[ring_index + 1]);
+ dw2 = le32_to_cpu(ih->ring[ring_index + 2]);
+ return dw1 | ((u64)(dw2 & 0xffff) << 32);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 0649b59830a5..dd1c2eded6b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -68,20 +68,30 @@ struct amdgpu_ih_ring {
/* For waiting on IH processing at checkpoint. */
wait_queue_head_t wait_process;
+ uint64_t processed_timestamp;
};
+/* return true if time stamp t2 is after t1 with 48bit wrap around */
+#define amdgpu_ih_ts_after(t1, t2) \
+ (((int64_t)((t2) << 16) - (int64_t)((t1) << 16)) > 0LL)
+
/* provided by the ih block */
struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */
u32 (*get_wptr)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
void (*decode_iv)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
struct amdgpu_iv_entry *entry);
+ uint64_t (*decode_iv_ts)(struct amdgpu_ih_ring *ih, u32 rptr,
+ signed int offset);
void (*set_rptr)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
};
#define amdgpu_ih_get_wptr(adev, ih) (adev)->irq.ih_funcs->get_wptr((adev), (ih))
#define amdgpu_ih_decode_iv(adev, iv) \
(adev)->irq.ih_funcs->decode_iv((adev), (ih), (iv))
+#define amdgpu_ih_decode_iv_ts(adev, ih, rptr, offset) \
+ (WARN_ON_ONCE(!(adev)->irq.ih_funcs->decode_iv_ts) ? 0 : \
+ (adev)->irq.ih_funcs->decode_iv_ts((ih), (rptr), (offset)))
#define amdgpu_ih_set_rptr(adev, ih) (adev)->irq.ih_funcs->set_rptr((adev), (ih))
int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
@@ -89,10 +99,12 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv,
unsigned int num_dw);
-int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev,
- struct amdgpu_ih_ring *ih);
+int amdgpu_ih_wait_on_checkpoint_process_ts(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih);
int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih,
struct amdgpu_iv_entry *entry);
+uint64_t amdgpu_ih_decode_iv_ts_helper(struct amdgpu_ih_ring *ih, u32 rptr,
+ signed int offset);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c
index 5cf142e849bb..a1cbd7c3deb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file amdgpu_ioc32.c
*
* 32-bit ioctl compatibility routines for the AMDGPU DRM.
@@ -37,12 +37,9 @@
long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
- int ret;
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
- ret = amdgpu_drm_ioctl(filp, cmd, arg);
-
- return ret;
+ return amdgpu_drm_ioctl(filp, cmd, arg);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index cc2e0c9cfe0a..f5cbc2747ac6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -333,7 +333,6 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
if (!amdgpu_device_has_dc_support(adev)) {
if (!adev->enable_virtual_display)
/* Disable vblank IRQs aggressively for power-saving */
- /* XXX: can this be enabled for DC? */
adev_to_drm(adev)->vblank_disable_immediate = true;
r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
@@ -391,7 +390,7 @@ void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_fini - shut down interrupt handling
+ * amdgpu_irq_fini_sw - shut down interrupt handling
*
* @adev: amdgpu device pointer
*
@@ -529,6 +528,9 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
/* Send it to amdkfd as well if it isn't already handled */
if (!handled)
amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
+
+ if (amdgpu_ih_ts_after(ih->processed_timestamp, entry.timestamp))
+ ih->processed_timestamp = entry.timestamp;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 09ad17944eb2..1ebb91db2274 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -678,13 +678,13 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VRAM_USAGE:
- ui64 = amdgpu_vram_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM));
+ ui64 = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VIS_VRAM_USAGE:
- ui64 = amdgpu_vram_mgr_vis_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM));
+ ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GTT_USAGE:
- ui64 = amdgpu_gtt_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
+ ui64 = amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GDS_CONFIG: {
struct drm_amdgpu_info_gds gds_info;
@@ -715,8 +715,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
case AMDGPU_INFO_MEMORY: {
struct drm_amdgpu_memory_info mem;
- struct ttm_resource_manager *vram_man =
- ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
struct ttm_resource_manager *gtt_man =
ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
memset(&mem, 0, sizeof(mem));
@@ -725,7 +723,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
atomic64_read(&adev->vram_pin_size) -
AMDGPU_VM_RESERVED_VRAM;
mem.vram.heap_usage =
- amdgpu_vram_mgr_usage(vram_man);
+ amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
@@ -735,7 +733,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
atomic64_read(&adev->visible_pin_size),
mem.vram.usable_heap_size);
mem.cpu_accessible_vram.heap_usage =
- amdgpu_vram_mgr_vis_usage(vram_man);
+ amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
mem.cpu_accessible_vram.max_allocation =
mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
@@ -744,7 +742,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
atomic64_read(&adev->gart_pin_size);
mem.gtt.heap_usage =
- amdgpu_gtt_mgr_usage(gtt_man);
+ amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr);
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
return copy_to_user(out, &mem,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 89fb372ed49c..6043bf6fd414 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -232,8 +232,6 @@ struct amdgpu_i2c_chan {
struct mutex mutex;
};
-struct amdgpu_fbdev;
-
struct amdgpu_afmt {
bool enabled;
int offset;
@@ -309,13 +307,6 @@ struct amdgpu_framebuffer {
uint64_t address;
};
-struct amdgpu_fbdev {
- struct drm_fb_helper helper;
- struct amdgpu_framebuffer rfb;
- struct list_head fbdev_list;
- struct amdgpu_device *adev;
-};
-
struct amdgpu_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
@@ -341,8 +332,6 @@ struct amdgpu_mode_info {
struct edid *bios_hardcoded_edid;
int bios_hardcoded_edid_size;
- /* pointer to fbdev info structure */
- struct amdgpu_fbdev *rfbdev;
/* firmware flags */
u32 firmware_flags;
/* pointer to backlight encoder */
@@ -631,15 +620,6 @@ bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
int *hpos, ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode);
-/* fbdev layer */
-int amdgpu_fbdev_init(struct amdgpu_device *adev);
-void amdgpu_fbdev_fini(struct amdgpu_device *adev);
-void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state);
-int amdgpu_fbdev_total_size(struct amdgpu_device *adev);
-bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj);
-
-int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled);
-
/* amdgpu_display.c */
void amdgpu_display_print_display_setup(struct drm_device *dev);
int amdgpu_display_modeset_create_props(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 4fcfc2313b8c..5661b82d84d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <linux/dma-buf.h>
+#include <drm/drm_drv.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_cache.h>
#include "amdgpu.h"
@@ -1032,9 +1033,14 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
/* On A+A platform, VRAM can be mapped as WB */
if (!adev->gmc.xgmi.connected_to_cpu) {
/* reserve PAT memory space to WC for VRAM */
- arch_io_reserve_memtype_wc(adev->gmc.aper_base,
+ int r = arch_io_reserve_memtype_wc(adev->gmc.aper_base,
adev->gmc.aper_size);
+ if (r) {
+ DRM_ERROR("Unable to set WC memtype for the aperture base\n");
+ return r;
+ }
+
/* Add an MTRR for the VRAM */
adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
adev->gmc.aper_size);
@@ -1056,7 +1062,18 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
*/
void amdgpu_bo_fini(struct amdgpu_device *adev)
{
+ int idx;
+
amdgpu_ttm_fini(adev);
+
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+
+ if (!adev->gmc.xgmi.connected_to_cpu) {
+ arch_phys_wc_del(adev->gmc.vram_mtrr);
+ arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
+ }
+ drm_dev_exit(idx);
+ }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
index 4eaec446b49d..0bb2466d539a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
@@ -69,6 +69,7 @@ static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den,
/**
* amdgpu_pll_get_fb_ref_div - feedback and ref divider calculation
*
+ * @adev: amdgpu_device pointer
* @nom: nominator
* @den: denominator
* @post_div: post divider
@@ -106,6 +107,7 @@ static void amdgpu_pll_get_fb_ref_div(struct amdgpu_device *adev, unsigned int n
/**
* amdgpu_pll_compute - compute PLL paramaters
*
+ * @adev: amdgpu_device pointer
* @pll: information about the PLL
* @freq: requested frequency
* @dot_clock_p: resulting pixel clock
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
index 82e9ecf84352..71ee361d0972 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
@@ -233,6 +233,10 @@ static void amdgpu_perf_start(struct perf_event *event, int flags)
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
return;
+ if ((!pe->adev->df.funcs) ||
+ (!pe->adev->df.funcs->pmc_start))
+ return;
+
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
@@ -268,6 +272,10 @@ static void amdgpu_perf_read(struct perf_event *event)
pmu);
u64 count, prev;
+ if ((!pe->adev->df.funcs) ||
+ (!pe->adev->df.funcs->pmc_get_count))
+ return;
+
do {
prev = local64_read(&hwc->prev_count);
@@ -297,6 +305,10 @@ static void amdgpu_perf_stop(struct perf_event *event, int flags)
if (hwc->state & PERF_HES_UPTODATE)
return;
+ if ((!pe->adev->df.funcs) ||
+ (!pe->adev->df.funcs->pmc_stop))
+ return;
+
switch (hwc->config_base) {
case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
@@ -326,6 +338,10 @@ static int amdgpu_perf_add(struct perf_event *event, int flags)
struct amdgpu_pmu_entry,
pmu);
+ if ((!pe->adev->df.funcs) ||
+ (!pe->adev->df.funcs->pmc_start))
+ return -EINVAL;
+
switch (pe->pmu_perf_type) {
case AMDGPU_PMU_PERF_TYPE_DF:
hwc->config_base = AMDGPU_PMU_EVENT_CONFIG_TYPE_DF;
@@ -371,6 +387,9 @@ static void amdgpu_perf_del(struct perf_event *event, int flags)
struct amdgpu_pmu_entry *pe = container_of(event->pmu,
struct amdgpu_pmu_entry,
pmu);
+ if ((!pe->adev->df.funcs) ||
+ (!pe->adev->df.funcs->pmc_stop))
+ return;
amdgpu_perf_stop(event, PERF_EF_UPDATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
index d02c8637f909..786afe4f58f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
@@ -59,7 +59,7 @@ static DEVICE_ATTR_RO(mem_info_preempt_used);
* @man: TTM memory type manager
* @tbo: TTM BO we need this range for
* @place: placement flags and restrictions
- * @mem: the resulting mem object
+ * @res: TTM memory object
*
* Dummy, just count the space used without allocating resources or any limit.
*/
@@ -85,7 +85,7 @@ static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man,
* amdgpu_preempt_mgr_del - free ranges
*
* @man: TTM memory type manager
- * @mem: TTM memory object
+ * @res: TTM memory object
*
* Free the allocated GTT again.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index c641f84649d6..dee17a0e1187 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -518,7 +518,7 @@ static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
return cmd;
}
-void release_psp_cmd_buf(struct psp_context *psp)
+static void release_psp_cmd_buf(struct psp_context *psp)
{
mutex_unlock(&psp->mutex);
}
@@ -2017,12 +2017,16 @@ static int psp_hw_start(struct psp_context *psp)
return ret;
}
+ if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
+ goto skip_pin_bo;
+
ret = psp_tmr_init(psp);
if (ret) {
DRM_ERROR("PSP tmr init failed!\n");
return ret;
}
+skip_pin_bo:
/*
* For ASICs with DF Cstate management centralized
* to PMFW, TMR setup should be performed after PMFW
@@ -2452,6 +2456,18 @@ skip_memalloc:
return ret;
}
+ if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ ret = psp_xgmi_initialize(psp, false, true);
+ /* Warning the XGMI seesion initialize failure
+ * Instead of stop driver initialization
+ */
+ if (ret)
+ dev_err(psp->adev->dev,
+ "XGMI: Failed to initialize XGMI session\n");
+ }
+ }
+
if (psp->ta_fw) {
ret = psp_ras_initialize(psp);
if (ret)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 08133de21fdd..8f47c14ecbc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -867,9 +867,9 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
/* feature ctl end */
-void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev,
- struct ras_common_if *ras_block,
- struct ras_err_data *err_data)
+static void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_err_data *err_data)
{
switch (ras_block->sub_block_index) {
case AMDGPU_RAS_MCA_BLOCK__MP0:
@@ -892,6 +892,38 @@ void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev,
}
}
+static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ int ret = 0;
+
+ /*
+ * choosing right query method according to
+ * whether smu support query error information
+ */
+ ret = smu_get_ecc_info(&adev->smu, (void *)&(ras->umc_ecc));
+ if (ret == -EOPNOTSUPP) {
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->query_ras_error_count)
+ adev->umc.ras_funcs->query_ras_error_count(adev, err_data);
+
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->query_ras_error_address)
+ adev->umc.ras_funcs->query_ras_error_address(adev, err_data);
+ } else if (!ret) {
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->ecc_info_query_ras_error_count)
+ adev->umc.ras_funcs->ecc_info_query_ras_error_count(adev, err_data);
+
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->ecc_info_query_ras_error_address)
+ adev->umc.ras_funcs->ecc_info_query_ras_error_address(adev, err_data);
+ }
+}
+
/* query/inject/cure begin */
int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
struct ras_query_if *info)
@@ -905,15 +937,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
switch (info->head.block) {
case AMDGPU_RAS_BLOCK__UMC:
- if (adev->umc.ras_funcs &&
- adev->umc.ras_funcs->query_ras_error_count)
- adev->umc.ras_funcs->query_ras_error_count(adev, &err_data);
- /* umc query_ras_error_address is also responsible for clearing
- * error status
- */
- if (adev->umc.ras_funcs &&
- adev->umc.ras_funcs->query_ras_error_address)
- adev->umc.ras_funcs->query_ras_error_address(adev, &err_data);
+ amdgpu_ras_get_ecc_info(adev, &err_data);
break;
case AMDGPU_RAS_BLOCK__SDMA:
if (adev->sdma.funcs->query_ras_error_count) {
@@ -1137,9 +1161,9 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
/**
* amdgpu_ras_query_error_count -- Get error counts of all IPs
- * adev: pointer to AMD GPU device
- * ce_count: pointer to an integer to be set to the count of correctible errors.
- * ue_count: pointer to an integer to be set to the count of uncorrectible
+ * @adev: pointer to AMD GPU device
+ * @ce_count: pointer to an integer to be set to the count of correctible errors.
+ * @ue_count: pointer to an integer to be set to the count of uncorrectible
* errors.
*
* If set, @ce_count or @ue_count, count and return the corresponding
@@ -1568,6 +1592,7 @@ static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
/* Let IP handle its data, maybe we need get the output
* from the callback to udpate the error type/count, etc
*/
+ memset(&err_data, 0, sizeof(err_data));
ret = data->cb(obj->adev, &err_data, &entry);
/* ue will trigger an interrupt, and in that case
* we need do a reset to recovery the whole system.
@@ -1723,6 +1748,16 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
continue;
+ /*
+ * this is a workaround for aldebaran, skip send msg to
+ * smu to get ecc_info table due to smu handle get ecc
+ * info table failed temporarily.
+ * should be removed until smu fix handle ecc_info table.
+ */
+ if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
+ (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)))
+ continue;
+
amdgpu_ras_query_error_status(adev, &info);
}
}
@@ -1804,8 +1839,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
.size = AMDGPU_GPU_PAGE_SIZE,
.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
};
- status = amdgpu_vram_mgr_query_page_status(
- ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
+ status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
data->bps[i].retired_page);
if (status == -EBUSY)
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
@@ -1906,8 +1940,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
goto out;
}
- amdgpu_vram_mgr_reserve_range(
- ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
+ amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
AMDGPU_GPU_PAGE_SIZE);
@@ -1935,9 +1968,11 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
if (!con || !con->eh_data)
return 0;
+ mutex_lock(&con->recovery_lock);
control = &con->eeprom_control;
data = con->eh_data;
save_count = data->count - control->ras_num_recs;
+ mutex_unlock(&con->recovery_lock);
/* only new entries are saved */
if (save_count > 0) {
if (amdgpu_ras_eeprom_append(control,
@@ -2336,7 +2371,11 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
}
/* Init poison supported flag, the default value is false */
- if (adev->df.funcs &&
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ /* enabled by default when GPU is connected to CPU */
+ con->poison_supported = true;
+ }
+ else if (adev->df.funcs &&
adev->df.funcs->query_ras_poison_mode &&
adev->umc.ras_funcs &&
adev->umc.ras_funcs->query_ras_poison_mode) {
@@ -2477,7 +2516,6 @@ void amdgpu_ras_late_fini(struct amdgpu_device *adev,
amdgpu_ras_sysfs_remove(adev, ras_block);
if (ih_info->cb)
amdgpu_ras_interrupt_remove_handler(adev, ih_info);
- amdgpu_ras_feature_enable(adev, ras_block, 0);
}
/* do some init work after IP late init as dependence.
@@ -2647,7 +2685,7 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
* and error occurred in DramECC (Extended error code = 0) then only
* process the error, else bail out.
*/
- if (!m || !((smca_get_bank_type(m->bank) == SMCA_UMC_V2) &&
+ if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
(XEC(m->status, 0x3f) == 0x0)))
return NOTIFY_DONE;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index e36f4de9fa55..1c708122d492 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -319,6 +319,19 @@ struct ras_common_if {
char name[32];
};
+#define MAX_UMC_CHANNEL_NUM 32
+
+struct ecc_info_per_ch {
+ uint16_t ce_count_lo_chip;
+ uint16_t ce_count_hi_chip;
+ uint64_t mca_umc_status;
+ uint64_t mca_umc_addr;
+};
+
+struct umc_ecc_info {
+ struct ecc_info_per_ch ecc[MAX_UMC_CHANNEL_NUM];
+};
+
struct amdgpu_ras {
/* ras infrastructure */
/* for ras itself. */
@@ -358,6 +371,9 @@ struct amdgpu_ras {
struct delayed_work ras_counte_delay_work;
atomic_t ras_ue_count;
atomic_t ras_ce_count;
+
+ /* record umc error info queried from smu */
+ struct umc_ecc_info umc_ecc;
};
struct ras_fs_data {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 862eb3c1c4c5..f7d8487799b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -252,41 +252,25 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct dma_resv *resv, enum amdgpu_sync_mode mode,
void *owner)
{
- struct dma_resv_list *flist;
+ struct dma_resv_iter cursor;
struct dma_fence *f;
- unsigned i;
- int r = 0;
+ int r;
if (resv == NULL)
return -EINVAL;
- /* always sync to the exclusive fence */
- f = dma_resv_excl_fence(resv);
- dma_fence_chain_for_each(f, f) {
- struct dma_fence_chain *chain = to_dma_fence_chain(f);
-
- if (amdgpu_sync_test_fence(adev, mode, owner, chain ?
- chain->fence : f)) {
- r = amdgpu_sync_fence(sync, f);
- dma_fence_put(f);
- if (r)
- return r;
- break;
- }
- }
-
- flist = dma_resv_shared_list(resv);
- if (!flist)
- return 0;
-
- for (i = 0; i < flist->shared_count; ++i) {
- f = rcu_dereference_protected(flist->shared[i],
- dma_resv_held(resv));
-
- if (amdgpu_sync_test_fence(adev, mode, owner, f)) {
- r = amdgpu_sync_fence(sync, f);
- if (r)
- return r;
+ dma_resv_for_each_fence(&cursor, resv, true, f) {
+ dma_fence_chain_for_each(f, f) {
+ struct dma_fence_chain *chain = to_dma_fence_chain(f);
+
+ if (amdgpu_sync_test_fence(adev, mode, owner, chain ?
+ chain->fence : f)) {
+ r = amdgpu_sync_fence(sync, f);
+ dma_fence_put(f);
+ if (r)
+ return r;
+ break;
+ }
}
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c875f1cdd2af..5c3f24069f2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -43,6 +43,7 @@
#include <linux/sizes.h>
#include <linux/module.h>
+#include <drm/drm_drv.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
@@ -116,17 +117,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
abo = ttm_to_amdgpu_bo(bo);
if (abo->flags & AMDGPU_AMDKFD_CREATE_SVM_BO) {
- struct dma_fence *fence;
- struct dma_resv *resv = &bo->base._resv;
-
- rcu_read_lock();
- fence = rcu_dereference(resv->fence_excl);
- if (fence && !fence->ops->signaled)
- dma_fence_enable_sw_signaling(fence);
-
placement->num_placement = 0;
placement->num_busy_placement = 0;
- rcu_read_unlock();
return;
}
@@ -922,11 +914,6 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
ttm->num_pages, bo_mem, ttm);
}
- if (bo_mem->mem_type == AMDGPU_PL_GDS ||
- bo_mem->mem_type == AMDGPU_PL_GWS ||
- bo_mem->mem_type == AMDGPU_PL_OA)
- return -EINVAL;
-
if (bo_mem->mem_type != TTM_PL_TT ||
!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
gtt->offset = AMDGPU_BO_INVALID_OFFSET;
@@ -1353,10 +1340,9 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
unsigned long num_pages = bo->resource->num_pages;
+ struct dma_resv_iter resv_cursor;
struct amdgpu_res_cursor cursor;
- struct dma_resv_list *flist;
struct dma_fence *f;
- int i;
/* Swapout? */
if (bo->resource->mem_type == TTM_PL_SYSTEM)
@@ -1370,14 +1356,9 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
* If true, then return false as any KFD process needs all its BOs to
* be resident to run successfully
*/
- flist = dma_resv_shared_list(bo->base.resv);
- if (flist) {
- for (i = 0; i < flist->shared_count; ++i) {
- f = rcu_dereference_protected(flist->shared[i],
- dma_resv_held(bo->base.resv));
- if (amdkfd_fence_check_mm(f, current->mm))
- return false;
- }
+ dma_resv_for_each_fence(&resv_cursor, bo->base.resv, true, f) {
+ if (amdkfd_fence_check_mm(f, current->mm))
+ return false;
}
switch (bo->resource->mem_type) {
@@ -1824,6 +1805,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
*/
void amdgpu_ttm_fini(struct amdgpu_device *adev)
{
+ int idx;
if (!adev->mman.initialized)
return;
@@ -1838,6 +1820,15 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
NULL, NULL);
amdgpu_ttm_fw_reserve_vram_fini(adev);
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+
+ if (adev->mman.aper_base_kaddr)
+ iounmap(adev->mman.aper_base_kaddr);
+ adev->mman.aper_base_kaddr = NULL;
+
+ drm_dev_exit(idx);
+ }
+
amdgpu_vram_mgr_fini(adev);
amdgpu_gtt_mgr_fini(adev);
amdgpu_preempt_mgr_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 7346ecff4438..f8f48be16d80 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -114,8 +114,8 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev);
void amdgpu_vram_mgr_fini(struct amdgpu_device *adev);
bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem);
-uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man);
-int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man);
+uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr);
+int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr);
uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man);
@@ -129,11 +129,11 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
void amdgpu_vram_mgr_free_sgt(struct device *dev,
enum dma_data_direction dir,
struct sg_table *sgt);
-uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man);
-uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man);
-int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
+uint64_t amdgpu_vram_mgr_usage(struct amdgpu_vram_mgr *mgr);
+uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr);
+int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
uint64_t start, uint64_t size);
-int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man,
+int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start);
int amdgpu_ttm_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index a90029ee9733..46264a4002f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -23,6 +23,120 @@
#include "amdgpu_ras.h"
+static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
+ void *ras_error_status,
+ struct amdgpu_iv_entry *entry,
+ bool reset)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int ret = 0;
+
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ ret = smu_get_ecc_info(&adev->smu, (void *)&(con->umc_ecc));
+ if (ret == -EOPNOTSUPP) {
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->query_ras_error_count)
+ adev->umc.ras_funcs->query_ras_error_count(adev, ras_error_status);
+
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->query_ras_error_address &&
+ adev->umc.max_ras_err_cnt_per_query) {
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if(!err_data->err_addr)
+ dev_warn(adev->dev, "Failed to alloc memory for "
+ "umc error address record!\n");
+
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ adev->umc.ras_funcs->query_ras_error_address(adev, ras_error_status);
+ }
+ } else if (!ret) {
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->ecc_info_query_ras_error_count)
+ adev->umc.ras_funcs->ecc_info_query_ras_error_count(adev, ras_error_status);
+
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->ecc_info_query_ras_error_address &&
+ adev->umc.max_ras_err_cnt_per_query) {
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if(!err_data->err_addr)
+ dev_warn(adev->dev, "Failed to alloc memory for "
+ "umc error address record!\n");
+
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ adev->umc.ras_funcs->ecc_info_query_ras_error_address(adev, ras_error_status);
+ }
+ }
+
+ /* only uncorrectable error needs gpu reset */
+ if (err_data->ue_count) {
+ dev_info(adev->dev, "%ld uncorrectable hardware errors "
+ "detected in UMC block\n",
+ err_data->ue_count);
+
+ if ((amdgpu_bad_page_threshold != 0) &&
+ err_data->err_addr_cnt) {
+ amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
+ err_data->err_addr_cnt);
+ amdgpu_ras_save_bad_pages(adev);
+
+ if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num)
+ adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs);
+ }
+
+ if (reset)
+ amdgpu_ras_reset_gpu(adev);
+ }
+
+ kfree(err_data->err_addr);
+ return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
+ void *ras_error_status,
+ bool reset)
+{
+ int ret;
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ struct ras_common_if head = {
+ .block = AMDGPU_RAS_BLOCK__UMC,
+ };
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
+
+ ret =
+ amdgpu_umc_do_page_retirement(adev, ras_error_status, NULL, reset);
+
+ if (ret == AMDGPU_RAS_SUCCESS && obj) {
+ obj->err_data.ue_count += err_data->ue_count;
+ obj->err_data.ce_count += err_data->ce_count;
+ }
+
+ return ret;
+}
+
+static int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
+ void *ras_error_status,
+ struct amdgpu_iv_entry *entry)
+{
+ return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true);
+}
+
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
{
int r;
@@ -88,61 +202,6 @@ void amdgpu_umc_ras_fini(struct amdgpu_device *adev)
}
}
-int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
- void *ras_error_status,
- struct amdgpu_iv_entry *entry)
-{
- struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
- struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
-
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- if (adev->umc.ras_funcs &&
- adev->umc.ras_funcs->query_ras_error_count)
- adev->umc.ras_funcs->query_ras_error_count(adev, ras_error_status);
-
- if (adev->umc.ras_funcs &&
- adev->umc.ras_funcs->query_ras_error_address &&
- adev->umc.max_ras_err_cnt_per_query) {
- err_data->err_addr =
- kcalloc(adev->umc.max_ras_err_cnt_per_query,
- sizeof(struct eeprom_table_record), GFP_KERNEL);
-
- /* still call query_ras_error_address to clear error status
- * even NOMEM error is encountered
- */
- if(!err_data->err_addr)
- dev_warn(adev->dev, "Failed to alloc memory for "
- "umc error address record!\n");
-
- /* umc query_ras_error_address is also responsible for clearing
- * error status
- */
- adev->umc.ras_funcs->query_ras_error_address(adev, ras_error_status);
- }
-
- /* only uncorrectable error needs gpu reset */
- if (err_data->ue_count) {
- dev_info(adev->dev, "%ld uncorrectable hardware errors "
- "detected in UMC block\n",
- err_data->ue_count);
-
- if ((amdgpu_bad_page_threshold != 0) &&
- err_data->err_addr_cnt) {
- amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
- err_data->err_addr_cnt);
- amdgpu_ras_save_bad_pages(adev);
-
- if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num)
- adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs);
- }
-
- amdgpu_ras_reset_gpu(adev);
- }
-
- kfree(err_data->err_addr);
- return AMDGPU_RAS_SUCCESS;
-}
-
int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 1f5fe2315236..b72194e8bfe5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -49,6 +49,10 @@ struct amdgpu_umc_ras_funcs {
void (*query_ras_error_address)(struct amdgpu_device *adev,
void *ras_error_status);
bool (*query_ras_poison_mode)(struct amdgpu_device *adev);
+ void (*ecc_info_query_ras_error_count)(struct amdgpu_device *adev,
+ void *ras_error_status);
+ void (*ecc_info_query_ras_error_address)(struct amdgpu_device *adev,
+ void *ras_error_status);
};
struct amdgpu_umc_funcs {
@@ -74,9 +78,9 @@ struct amdgpu_umc {
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev);
void amdgpu_umc_ras_fini(struct amdgpu_device *adev);
-int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
+int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
void *ras_error_status,
- struct amdgpu_iv_entry *entry);
+ bool reset);
int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 688bef1649b5..344f711ad144 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -434,7 +434,6 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
*
* @ring: ring we should submit the msg to
* @handle: VCE session handle to use
- * @bo: amdgpu object for which we query the offset
* @fence: optional fence to return
*
* Open up a stream for HW test
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 585961c2f5f2..9a19a6a57b23 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -286,20 +286,13 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
{
bool ret = false;
+ int vcn_config = adev->vcn.vcn_config[vcn_instance];
- int major;
- int minor;
- int revision;
-
- /* if cannot find IP data, then this VCN does not exist */
- if (amdgpu_discovery_get_vcn_version(adev, vcn_instance, &major, &minor, &revision) != 0)
- return true;
-
- if ((type == VCN_ENCODE_RING) && (revision & VCN_BLOCK_ENCODE_DISABLE_MASK)) {
+ if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK)) {
ret = true;
- } else if ((type == VCN_DECODE_RING) && (revision & VCN_BLOCK_DECODE_DISABLE_MASK)) {
+ } else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK)) {
ret = true;
- } else if ((type == VCN_UNIFIED_RING) && (revision & VCN_BLOCK_QUEUE_DISABLE_MASK)) {
+ } else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK)) {
ret = true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index bfa27ea94804..5d3728b027d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -235,6 +235,7 @@ struct amdgpu_vcn {
uint8_t num_vcn_inst;
struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
+ uint8_t vcn_config[AMDGPU_MAX_VCN_INSTANCES];
struct amdgpu_vcn_reg internal;
struct mutex vcn_pg_lock;
struct mutex vcn1_jpeg1_workaround;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 04cf9b207e62..07bc0f504713 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -283,17 +283,15 @@ static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
*data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL);
if (!*data)
- return -ENOMEM;
+ goto data_failure;
bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL);
- bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL);
+ if (!bps)
+ goto bps_failure;
- if (!bps || !bps_bo) {
- kfree(bps);
- kfree(bps_bo);
- kfree(*data);
- return -ENOMEM;
- }
+ bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL);
+ if (!bps_bo)
+ goto bps_bo_failure;
(*data)->bps = bps;
(*data)->bps_bo = bps_bo;
@@ -303,6 +301,13 @@ static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
virt->ras_init_done = true;
return 0;
+
+bps_bo_failure:
+ kfree(bps);
+bps_failure:
+ kfree(*data);
+data_failure:
+ return -ENOMEM;
}
static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
@@ -548,7 +553,6 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
{
struct amd_sriov_msg_vf2pf_info *vf2pf_info;
- struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
@@ -571,8 +575,8 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
vf2pf_info->driver_cert = 0;
vf2pf_info->os_info.all = 0;
- vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(vram_man) >> 20;
- vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(vram_man) >> 20;
+ vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr) >> 20;
+ vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
@@ -617,16 +621,34 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
{
- uint64_t bp_block_offset = 0;
- uint32_t bp_block_size = 0;
- struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
-
adev->virt.fw_reserve.p_pf2vf = NULL;
adev->virt.fw_reserve.p_vf2pf = NULL;
adev->virt.vf2pf_update_interval_ms = 0;
if (adev->mman.fw_vram_usage_va != NULL) {
- adev->virt.vf2pf_update_interval_ms = 2000;
+ /* go through this logic in ip_init and reset to init workqueue*/
+ amdgpu_virt_exchange_data(adev);
+
+ INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
+ schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
+ } else if (adev->bios != NULL) {
+ /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+
+ amdgpu_virt_read_pf2vf_data(adev);
+ }
+}
+
+
+void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
+{
+ uint64_t bp_block_offset = 0;
+ uint32_t bp_block_size = 0;
+ struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
+
+ if (adev->mman.fw_vram_usage_va != NULL) {
adev->virt.fw_reserve.p_pf2vf =
(struct amd_sriov_msg_pf2vf_info_header *)
@@ -652,22 +674,10 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
if (adev->virt.ras_init_done)
amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
}
- } else if (adev->bios != NULL) {
- adev->virt.fw_reserve.p_pf2vf =
- (struct amd_sriov_msg_pf2vf_info_header *)
- (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
-
- amdgpu_virt_read_pf2vf_data(adev);
-
- return;
- }
-
- if (adev->virt.vf2pf_update_interval_ms != 0) {
- INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
- schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
}
}
+
void amdgpu_detect_virtualization(struct amdgpu_device *adev)
{
uint32_t reg;
@@ -710,6 +720,10 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
vi_set_virt_ops(adev);
break;
case CHIP_VEGA10:
+ soc15_set_virt_ops(adev);
+ /* send a dummy GPU_INIT_DATA request to host on vega10 */
+ amdgpu_virt_request_init_data(adev);
+ break;
case CHIP_VEGA20:
case CHIP_ARCTURUS:
case CHIP_ALDEBARAN:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 8d4c20bb71c5..9adfb8d63280 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -308,6 +308,7 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+void amdgpu_virt_exchange_data(struct amdgpu_device *adev);
void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
void amdgpu_detect_virtualization(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index ac9a8cd21c4b..d99c8779b51e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -16,6 +16,8 @@
#include "ivsrcid/ivsrcid_vislands30.h"
#include "amdgpu_vkms.h"
#include "amdgpu_display.h"
+#include "atom.h"
+#include "amdgpu_irq.h"
/**
* DOC: amdgpu_vkms
@@ -41,16 +43,16 @@ static const u32 amdgpu_vkms_formats[] = {
static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer)
{
- struct amdgpu_vkms_output *output = container_of(timer,
- struct amdgpu_vkms_output,
- vblank_hrtimer);
- struct drm_crtc *crtc = &output->crtc;
+ struct amdgpu_crtc *amdgpu_crtc = container_of(timer, struct amdgpu_crtc, vblank_timer);
+ struct drm_crtc *crtc = &amdgpu_crtc->base;
+ struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc);
u64 ret_overrun;
bool ret;
- ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
+ ret_overrun = hrtimer_forward_now(&amdgpu_crtc->vblank_timer,
output->period_ns);
- WARN_ON(ret_overrun != 1);
+ if (ret_overrun != 1)
+ DRM_WARN("%s: vblank timer overrun\n", __func__);
ret = drm_crtc_handle_vblank(crtc);
if (!ret)
@@ -65,22 +67,21 @@ static int amdgpu_vkms_enable_vblank(struct drm_crtc *crtc)
unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc);
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
drm_calc_timestamping_constants(crtc, &crtc->mode);
- hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- out->vblank_hrtimer.function = &amdgpu_vkms_vblank_simulate;
out->period_ns = ktime_set(0, vblank->framedur_ns);
- hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
+ hrtimer_start(&amdgpu_crtc->vblank_timer, out->period_ns, HRTIMER_MODE_REL);
return 0;
}
static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc)
{
- struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc);
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- hrtimer_cancel(&out->vblank_hrtimer);
+ hrtimer_cancel(&amdgpu_crtc->vblank_timer);
}
static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
@@ -92,13 +93,14 @@ static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
unsigned int pipe = crtc->index;
struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (!READ_ONCE(vblank->enabled)) {
*vblank_time = ktime_get();
return true;
}
- *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
+ *vblank_time = READ_ONCE(amdgpu_crtc->vblank_timer.node.expires);
if (WARN_ON(*vblank_time == vblank->time))
return true;
@@ -142,15 +144,16 @@ static void amdgpu_vkms_crtc_atomic_disable(struct drm_crtc *crtc,
static void amdgpu_vkms_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ unsigned long flags;
if (crtc->state->event) {
- spin_lock(&crtc->dev->event_lock);
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
if (drm_crtc_vblank_get(crtc) != 0)
drm_crtc_send_vblank_event(crtc, crtc->state->event);
else
drm_crtc_arm_vblank_event(crtc, crtc->state->event);
- spin_unlock(&crtc->dev->event_lock);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
crtc->state->event = NULL;
}
@@ -165,6 +168,8 @@ static const struct drm_crtc_helper_funcs amdgpu_vkms_crtc_helper_funcs = {
static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary, struct drm_plane *cursor)
{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
int ret;
ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
@@ -176,6 +181,17 @@ static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
drm_crtc_helper_add(crtc, &amdgpu_vkms_crtc_helper_funcs);
+ amdgpu_crtc->crtc_id = drm_crtc_index(crtc);
+ adev->mode_info.crtcs[drm_crtc_index(crtc)] = amdgpu_crtc;
+
+ amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+ amdgpu_crtc->encoder = NULL;
+ amdgpu_crtc->connector = NULL;
+ amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
+
+ hrtimer_init(&amdgpu_crtc->vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ amdgpu_crtc->vblank_timer.function = &amdgpu_vkms_vblank_simulate;
+
return ret;
}
@@ -375,6 +391,7 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev,
int index)
{
struct drm_plane *plane;
+ uint64_t modifiers[] = {DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID};
int ret;
plane = kzalloc(sizeof(*plane), GFP_KERNEL);
@@ -385,7 +402,7 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev,
&amdgpu_vkms_plane_funcs,
amdgpu_vkms_formats,
ARRAY_SIZE(amdgpu_vkms_formats),
- NULL, type, NULL);
+ modifiers, type, NULL);
if (ret) {
kfree(plane);
return ERR_PTR(ret);
@@ -396,12 +413,12 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev,
return plane;
}
-int amdgpu_vkms_output_init(struct drm_device *dev,
- struct amdgpu_vkms_output *output, int index)
+static int amdgpu_vkms_output_init(struct drm_device *dev, struct
+ amdgpu_vkms_output *output, int index)
{
struct drm_connector *connector = &output->connector;
struct drm_encoder *encoder = &output->encoder;
- struct drm_crtc *crtc = &output->crtc;
+ struct drm_crtc *crtc = &output->crtc.base;
struct drm_plane *primary, *cursor = NULL;
int ret;
@@ -465,6 +482,11 @@ static int amdgpu_vkms_sw_init(void *handle)
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc,
+ sizeof(struct amdgpu_vkms_output), GFP_KERNEL);
+ if (!adev->amdgpu_vkms_output)
+ return -ENOMEM;
+
adev_to_drm(adev)->max_vblank_count = 0;
adev_to_drm(adev)->mode_config.funcs = &amdgpu_vkms_mode_funcs;
@@ -481,10 +503,6 @@ static int amdgpu_vkms_sw_init(void *handle)
if (r)
return r;
- adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc, sizeof(struct amdgpu_vkms_output), GFP_KERNEL);
- if (!adev->amdgpu_vkms_output)
- return -ENOMEM;
-
/* allocate crtcs, encoders, connectors */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = amdgpu_vkms_output_init(adev_to_drm(adev), &adev->amdgpu_vkms_output[i], i);
@@ -504,15 +522,16 @@ static int amdgpu_vkms_sw_fini(void *handle)
int i = 0;
for (i = 0; i < adev->mode_info.num_crtc; i++)
- if (adev->amdgpu_vkms_output[i].vblank_hrtimer.function)
- hrtimer_cancel(&adev->amdgpu_vkms_output[i].vblank_hrtimer);
-
- kfree(adev->mode_info.bios_hardcoded_edid);
- kfree(adev->amdgpu_vkms_output);
+ if (adev->mode_info.crtcs[i])
+ hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer);
drm_kms_helper_poll_fini(adev_to_drm(adev));
+ drm_mode_config_cleanup(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = false;
+
+ kfree(adev->mode_info.bios_hardcoded_edid);
+ kfree(adev->amdgpu_vkms_output);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h
index 97f1b79c0724..4f8722ff37c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h
@@ -10,15 +10,14 @@
#define YRES_MAX 16384
#define drm_crtc_to_amdgpu_vkms_output(target) \
- container_of(target, struct amdgpu_vkms_output, crtc)
+ container_of(target, struct amdgpu_vkms_output, crtc.base)
extern const struct amdgpu_ip_block_version amdgpu_vkms_ip_block;
struct amdgpu_vkms_output {
- struct drm_crtc crtc;
+ struct amdgpu_crtc crtc;
struct drm_encoder encoder;
struct drm_connector connector;
- struct hrtimer vblank_hrtimer;
ktime_t period_ns;
struct drm_pending_vblank_event *event;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 0e7dc23f78e7..b37fc7d7d2c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -53,7 +53,7 @@
* can be mapped as snooped (cached system pages) or unsnooped
* (uncached system pages).
* Each VM has an ID associated with it and there is a page table
- * associated with each VMID. When execting a command buffer,
+ * associated with each VMID. When executing a command buffer,
* the kernel tells the the ring what VMID to use for that command
* buffer. VMIDs are allocated dynamically as commands are submitted.
* The userspace drivers maintain their own address space and the kernel
@@ -2102,30 +2102,14 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
struct dma_resv *resv = vm->root.bo->tbo.base.resv;
- struct dma_fence *excl, **shared;
- unsigned i, shared_count;
- int r;
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
- r = dma_resv_get_fences(resv, &excl, &shared_count, &shared);
- if (r) {
- /* Not enough memory to grab the fence list, as last resort
- * block for all the fences to complete.
- */
- dma_resv_wait_timeout(resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
- return;
- }
-
- /* Add a callback for each fence in the reservation object */
- amdgpu_vm_prt_get(adev);
- amdgpu_vm_add_prt_cb(adev, excl);
-
- for (i = 0; i < shared_count; ++i) {
+ dma_resv_for_each_fence(&cursor, resv, true, fence) {
+ /* Add a callback for each fence in the reservation object */
amdgpu_vm_prt_get(adev);
- amdgpu_vm_add_prt_cb(adev, shared[i]);
+ amdgpu_vm_add_prt_cb(adev, fence);
}
-
- kfree(shared);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 7b2b0980ec41..7a2b487db57c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -96,10 +96,9 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- struct ttm_resource_manager *man;
- man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
- return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_usage(man));
+ return sysfs_emit(buf, "%llu\n",
+ amdgpu_vram_mgr_usage(&adev->mman.vram_mgr));
}
/**
@@ -116,10 +115,9 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- struct ttm_resource_manager *man;
- man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
- return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_vis_usage(man));
+ return sysfs_emit(buf, "%llu\n",
+ amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr));
}
/**
@@ -263,16 +261,15 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
/**
* amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM
*
- * @man: TTM memory type manager
+ * @mgr: amdgpu_vram_mgr pointer
* @start: start address of the range in VRAM
* @size: size of the range
*
- * Reserve memory from start addess with the specified size in VRAM
+ * Reserve memory from start address with the specified size in VRAM
*/
-int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
+int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
uint64_t start, uint64_t size)
{
- struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_vram_reservation *rsv;
rsv = kzalloc(sizeof(*rsv), GFP_KERNEL);
@@ -285,7 +282,7 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
spin_lock(&mgr->lock);
list_add_tail(&mgr->reservations_pending, &rsv->node);
- amdgpu_vram_mgr_do_reserve(man);
+ amdgpu_vram_mgr_do_reserve(&mgr->manager);
spin_unlock(&mgr->lock);
return 0;
@@ -294,7 +291,7 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
/**
* amdgpu_vram_mgr_query_page_status - query the reservation status
*
- * @man: TTM memory type manager
+ * @mgr: amdgpu_vram_mgr pointer
* @start: start address of a page in VRAM
*
* Returns:
@@ -302,10 +299,9 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
* 0: the page has been reserved
* -ENOENT: the input page is not a reservation
*/
-int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man,
+int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start)
{
- struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_vram_reservation *rsv;
int ret;
@@ -632,28 +628,24 @@ void amdgpu_vram_mgr_free_sgt(struct device *dev,
/**
* amdgpu_vram_mgr_usage - how many bytes are used in this domain
*
- * @man: TTM memory type manager
+ * @mgr: amdgpu_vram_mgr pointer
*
* Returns how many bytes are used in this domain.
*/
-uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man)
+uint64_t amdgpu_vram_mgr_usage(struct amdgpu_vram_mgr *mgr)
{
- struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
-
return atomic64_read(&mgr->usage);
}
/**
* amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
*
- * @man: TTM memory type manager
+ * @mgr: amdgpu_vram_mgr pointer
*
* Returns how many bytes are used in the visible part of VRAM
*/
-uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man)
+uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
{
- struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
-
return atomic64_read(&mgr->vis_usage);
}
@@ -675,8 +667,8 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
spin_unlock(&mgr->lock);
drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
- man->size, amdgpu_vram_mgr_usage(man) >> 20,
- amdgpu_vram_mgr_vis_usage(man) >> 20);
+ man->size, amdgpu_vram_mgr_usage(mgr) >> 20,
+ amdgpu_vram_mgr_vis_usage(mgr) >> 20);
}
static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 567df2db23ac..e8b8f28c2f72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -208,6 +208,7 @@ static struct attribute *amdgpu_xgmi_hive_attrs[] = {
&amdgpu_xgmi_hive_id,
NULL
};
+ATTRIBUTE_GROUPS(amdgpu_xgmi_hive);
static ssize_t amdgpu_xgmi_show_attrs(struct kobject *kobj,
struct attribute *attr, char *buf)
@@ -237,7 +238,7 @@ static const struct sysfs_ops amdgpu_xgmi_hive_ops = {
struct kobj_type amdgpu_xgmi_hive_type = {
.release = amdgpu_xgmi_hive_release,
.sysfs_ops = &amdgpu_xgmi_hive_ops,
- .default_attrs = amdgpu_xgmi_hive_attrs,
+ .default_groups = amdgpu_xgmi_hive_groups,
};
static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
@@ -265,6 +266,11 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev,
ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200);
ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208);
+ if ((!adev->df.funcs) ||
+ (!adev->df.funcs->get_fica) ||
+ (!adev->df.funcs->set_fica))
+ return -EINVAL;
+
fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in);
if (fica_out != 0x1f)
pr_err("xGMI error counters not enabled!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 6134ed964027..a92d86e12718 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -469,7 +469,7 @@ int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder)
if (amdgpu_connector->use_digital &&
(amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE))
return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
+ else if (connector->display_info.is_hdmi &&
(amdgpu_connector->audio == AMDGPU_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else if (amdgpu_connector->use_digital)
@@ -488,7 +488,7 @@ int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder)
if (amdgpu_audio != 0) {
if (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE)
return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
+ else if (connector->display_info.is_hdmi &&
(amdgpu_connector->audio == AMDGPU_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else
@@ -506,7 +506,7 @@ int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder)
} else if (amdgpu_audio != 0) {
if (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE)
return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
+ else if (connector->display_info.is_hdmi &&
(amdgpu_connector->audio == AMDGPU_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 54f28c075f21..f10ce740a29c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1428,6 +1428,10 @@ static int cik_asic_reset(struct amdgpu_device *adev)
{
int r;
+ /* APUs don't have full asic reset */
+ if (adev->flags & AMD_IS_APU)
+ return 0;
+
if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
dev_info(adev->dev, "BACO reset\n");
r = amdgpu_dpm_baco_reset(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index b200b9e722d9..8318ee8339f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2092,22 +2092,18 @@ static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder)
return 1;
else
return 0;
- break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
if (dig->linkb)
return 3;
else
return 2;
- break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (dig->linkb)
return 5;
else
return 4;
- break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
return 6;
- break;
default:
DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index edb3e3b08eed..9189fb85a4dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -63,6 +63,13 @@
#define mmGCEA_PROBE_MAP 0x070c
#define mmGCEA_PROBE_MAP_BASE_IDX 0
+#define GFX9_RLCG_GC_WRITE_OLD (0x8 << 28)
+#define GFX9_RLCG_GC_WRITE (0x0 << 28)
+#define GFX9_RLCG_GC_READ (0x1 << 28)
+#define GFX9_RLCG_VFGATE_DISABLED 0x4000000
+#define GFX9_RLCG_WRONG_OPERATION_TYPE 0x2000000
+#define GFX9_RLCG_NOT_IN_RANGE 0x1000000
+
MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
MODULE_FIRMWARE("amdgpu/vega10_me.bin");
@@ -739,7 +746,7 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
};
-static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
+static u32 gfx_v9_0_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32_t flag)
{
static void *scratch_reg0;
static void *scratch_reg1;
@@ -748,21 +755,20 @@ static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
static void *spare_int;
static uint32_t grbm_cntl;
static uint32_t grbm_idx;
+ uint32_t i = 0;
+ uint32_t retries = 50000;
+ u32 ret = 0;
+ u32 tmp;
scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
- scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
- scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
+ scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG2_BASE_IDX] + mmSCRATCH_REG2)*4;
+ scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG3_BASE_IDX] + mmSCRATCH_REG3)*4;
spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
- if (amdgpu_sriov_runtime(adev)) {
- pr_err("shouldn't call rlcg write register during runtime\n");
- return;
- }
-
if (offset == grbm_cntl || offset == grbm_idx) {
if (offset == grbm_cntl)
writel(v, scratch_reg2);
@@ -771,41 +777,95 @@ static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
} else {
- uint32_t i = 0;
- uint32_t retries = 50000;
-
+ /*
+ * SCRATCH_REG0 = read/write value
+ * SCRATCH_REG1[30:28] = command
+ * SCRATCH_REG1[19:0] = address in dword
+ * SCRATCH_REG1[26:24] = Error reporting
+ */
writel(v, scratch_reg0);
- writel(offset | 0x80000000, scratch_reg1);
+ writel(offset | flag, scratch_reg1);
writel(1, spare_int);
- for (i = 0; i < retries; i++) {
- u32 tmp;
+ for (i = 0; i < retries; i++) {
tmp = readl(scratch_reg1);
- if (!(tmp & 0x80000000))
+ if (!(tmp & flag))
break;
udelay(10);
}
- if (i >= retries)
- pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
+
+ if (i >= retries) {
+ if (amdgpu_sriov_reg_indirect_gc(adev)) {
+ if (tmp & GFX9_RLCG_VFGATE_DISABLED)
+ pr_err("The vfgate is disabled, program reg:0x%05x failed!\n", offset);
+ else if (tmp & GFX9_RLCG_WRONG_OPERATION_TYPE)
+ pr_err("Wrong operation type, program reg:0x%05x failed!\n", offset);
+ else if (tmp & GFX9_RLCG_NOT_IN_RANGE)
+ pr_err("The register is not in range, program reg:0x%05x failed!\n", offset);
+ else
+ pr_err("Unknown error type, program reg:0x%05x failed!\n", offset);
+ } else
+ pr_err("timeout: rlcg program reg:0x%05x failed!\n", offset);
+ }
}
+ ret = readl(scratch_reg0);
+
+ return ret;
+}
+
+static bool gfx_v9_0_get_rlcg_flag(struct amdgpu_device *adev, u32 acc_flags, u32 hwip,
+ int write, u32 *rlcg_flag)
+{
+
+ switch (hwip) {
+ case GC_HWIP:
+ if (amdgpu_sriov_reg_indirect_gc(adev)) {
+ *rlcg_flag = write ? GFX9_RLCG_GC_WRITE : GFX9_RLCG_GC_READ;
+
+ return true;
+ /* only in new version, AMDGPU_REGS_NO_KIQ and AMDGPU_REGS_RLC enabled simultaneously */
+ } else if ((acc_flags & AMDGPU_REGS_RLC) && !(acc_flags & AMDGPU_REGS_NO_KIQ) && write) {
+ *rlcg_flag = GFX9_RLCG_GC_WRITE_OLD;
+ return true;
+ }
+
+ break;
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+static u32 gfx_v9_0_sriov_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip)
+{
+ u32 rlcg_flag;
+
+ if (!amdgpu_sriov_runtime(adev) && gfx_v9_0_get_rlcg_flag(adev, acc_flags, hwip, 0, &rlcg_flag))
+ return gfx_v9_0_rlcg_rw(adev, offset, 0, rlcg_flag);
+
+ if (acc_flags & AMDGPU_REGS_NO_KIQ)
+ return RREG32_NO_KIQ(offset);
+ else
+ return RREG32(offset);
}
static void gfx_v9_0_sriov_wreg(struct amdgpu_device *adev, u32 offset,
- u32 v, u32 acc_flags, u32 hwip)
+ u32 value, u32 acc_flags, u32 hwip)
{
- if ((acc_flags & AMDGPU_REGS_RLC) &&
- amdgpu_sriov_fullaccess(adev)) {
- gfx_v9_0_rlcg_w(adev, offset, v, acc_flags);
+ u32 rlcg_flag;
+ if (!amdgpu_sriov_runtime(adev) && gfx_v9_0_get_rlcg_flag(adev, acc_flags, hwip, 1, &rlcg_flag)) {
+ gfx_v9_0_rlcg_rw(adev, offset, value, rlcg_flag);
return;
}
if (acc_flags & AMDGPU_REGS_NO_KIQ)
- WREG32_NO_KIQ(offset, v);
+ WREG32_NO_KIQ(offset, value);
else
- WREG32(offset, v);
+ WREG32(offset, value);
}
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
@@ -5135,7 +5195,7 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
if (amdgpu_sriov_is_pp_one_vf(adev))
data = RREG32_NO_KIQ(reg);
else
- data = RREG32(reg);
+ data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
@@ -5191,6 +5251,7 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
.start = gfx_v9_0_rlc_start,
.update_spm_vmid = gfx_v9_0_update_spm_vmid,
.sriov_wreg = gfx_v9_0_sriov_wreg,
+ .sriov_rreg = gfx_v9_0_sriov_rreg,
.is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
};
@@ -5796,16 +5857,16 @@ static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- mec_int_cntl = RREG32(mec_int_cntl_reg);
+ mec_int_cntl = RREG32_SOC15_IP(GC,mec_int_cntl_reg);
mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
TIME_STAMP_INT_ENABLE, 0);
- WREG32(mec_int_cntl_reg, mec_int_cntl);
+ WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- mec_int_cntl = RREG32(mec_int_cntl_reg);
+ mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
TIME_STAMP_INT_ENABLE, 1);
- WREG32(mec_int_cntl_reg, mec_int_cntl);
+ WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 3ec5ff5a6dbe..38bb42727715 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -107,7 +107,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
/* Process it onyl if it's the first fault for this address */
if (entry->ih != &adev->irq.ih_soft &&
- amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
+ amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
entry->timestamp))
return 1;
@@ -914,12 +914,6 @@ static int gmc_v10_0_sw_init(void *handle)
return r;
}
- if (adev->gmc.xgmi.supported) {
- r = adev->gfxhub.funcs->get_xgmi_info(adev);
- if (r)
- return r;
- }
-
r = gmc_v10_0_mc_init(adev);
if (r)
return r;
@@ -992,10 +986,14 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
return -EINVAL;
}
- r = amdgpu_gart_table_vram_pin(adev);
+ if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
+ goto skip_pin_bo;
+
+ r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
if (r)
return r;
+skip_pin_bo:
r = adev->gfxhub.funcs->gart_enable(adev);
if (r)
return r;
@@ -1062,7 +1060,6 @@ static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
{
adev->gfxhub.funcs->gart_disable(adev);
adev->mmhub.funcs->gart_disable(adev);
- amdgpu_gart_table_vram_unpin(adev);
}
static int gmc_v10_0_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 0fe714f54cca..cd6c38e083d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -476,7 +476,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
- r = amdgpu_gart_table_vram_pin(adev);
+ r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
if (r)
return r;
@@ -608,7 +608,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
WREG32(mmVM_L2_CNTL3,
VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
(0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
- amdgpu_gart_table_vram_unpin(adev);
}
static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 0a50fdaced7e..ab8adbff9e2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -620,7 +620,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
- r = amdgpu_gart_table_vram_pin(adev);
+ r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
if (r)
return r;
@@ -758,7 +758,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
WREG32(mmVM_L2_CNTL, tmp);
WREG32(mmVM_L2_CNTL2, 0);
- amdgpu_gart_table_vram_unpin(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 492ebed2915b..054733838292 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -515,10 +515,10 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
{
int r;
+ u32 tmp;
adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
if (!adev->gmc.vram_width) {
- u32 tmp;
int chansize, numchan;
/* Get VRAM informations */
@@ -562,8 +562,15 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
adev->gmc.vram_width = numchan * chansize;
}
/* size in MB on si */
- adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
- adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ tmp = RREG32(mmCONFIG_MEMSIZE);
+ /* some boards may have garbage in the upper 16 bits */
+ if (tmp & 0xffff0000) {
+ DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
+ if (tmp & 0xffff)
+ tmp &= 0xffff;
+ }
+ adev->gmc.mc_vram_size = tmp * 1024ULL * 1024ULL;
+ adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
if (!(adev->flags & AMD_IS_APU)) {
r = amdgpu_device_resize_fb_bar(adev);
@@ -837,7 +844,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
- r = amdgpu_gart_table_vram_pin(adev);
+ r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
if (r)
return r;
@@ -992,7 +999,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
WREG32(mmVM_L2_CNTL, tmp);
WREG32(mmVM_L2_CNTL2, 0);
- amdgpu_gart_table_vram_unpin(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index d84523cf5f75..88c1eb9ad068 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -72,6 +72,9 @@
#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d
#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2
+
static const char *gfxhub_client_ids[] = {
"CB",
@@ -478,9 +481,18 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
hub = &adev->vmhub[j];
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
- tmp = RREG32(reg);
+
+ if (j == AMDGPU_GFXHUB_0)
+ tmp = RREG32_SOC15_IP(GC, reg);
+ else
+ tmp = RREG32_SOC15_IP(MMHUB, reg);
+
tmp &= ~bits;
- WREG32(reg, tmp);
+
+ if (j == AMDGPU_GFXHUB_0)
+ WREG32_SOC15_IP(GC, reg, tmp);
+ else
+ WREG32_SOC15_IP(MMHUB, reg, tmp);
}
}
break;
@@ -489,9 +501,18 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
hub = &adev->vmhub[j];
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
- tmp = RREG32(reg);
+
+ if (j == AMDGPU_GFXHUB_0)
+ tmp = RREG32_SOC15_IP(GC, reg);
+ else
+ tmp = RREG32_SOC15_IP(MMHUB, reg);
+
tmp |= bits;
- WREG32(reg, tmp);
+
+ if (j == AMDGPU_GFXHUB_0)
+ WREG32_SOC15_IP(GC, reg, tmp);
+ else
+ WREG32_SOC15_IP(MMHUB, reg, tmp);
}
}
break;
@@ -523,7 +544,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
/* Process it onyl if it's the first fault for this address */
if (entry->ih != &adev->irq.ih_soft &&
- amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
+ amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
entry->timestamp))
return 1;
@@ -788,9 +809,12 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
if (use_semaphore) {
for (j = 0; j < adev->usec_timeout; j++) {
- /* a read return value of 1 means semaphore acuqire */
- tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
- hub->eng_distance * eng);
+ /* a read return value of 1 means semaphore acquire */
+ if (vmhub == AMDGPU_GFXHUB_0)
+ tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng);
+ else
+ tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng);
+
if (tmp & 0x1)
break;
udelay(1);
@@ -801,8 +825,10 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
}
do {
- WREG32_NO_KIQ(hub->vm_inv_eng0_req +
- hub->eng_distance * eng, inv_req);
+ if (vmhub == AMDGPU_GFXHUB_0)
+ WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
+ else
+ WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
/*
* Issue a dummy read to wait for the ACK register to
@@ -815,8 +841,11 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
hub->eng_distance * eng);
for (j = 0; j < adev->usec_timeout; j++) {
- tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
- hub->eng_distance * eng);
+ if (vmhub == AMDGPU_GFXHUB_0)
+ tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_ack + hub->eng_distance * eng);
+ else
+ tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_ack + hub->eng_distance * eng);
+
if (tmp & (1 << vmid))
break;
udelay(1);
@@ -827,13 +856,16 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
} while (inv_req);
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
- if (use_semaphore)
+ if (use_semaphore) {
/*
* add semaphore release after invalidation,
* write with 0 means semaphore release
*/
- WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
- hub->eng_distance * eng, 0);
+ if (vmhub == AMDGPU_GFXHUB_0)
+ WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0);
+ else
+ WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0);
+ }
spin_unlock(&adev->gmc.invalidate_lock);
@@ -1105,6 +1137,8 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
unsigned size;
+ /* TODO move to DC so GMC doesn't need to hard-code DCN registers */
+
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
size = AMDGPU_VBIOS_VGA_ALLOCATION;
} else {
@@ -1113,7 +1147,6 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(1, 0, 0):
case IP_VERSION(1, 0, 1):
- case IP_VERSION(2, 1, 0):
viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
size = (REG_GET_FIELD(viewport,
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
@@ -1121,6 +1154,14 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
4);
break;
+ case IP_VERSION(2, 1, 0):
+ viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2);
+ size = (REG_GET_FIELD(viewport,
+ HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
+ REG_GET_FIELD(viewport,
+ HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
+ 4);
+ break;
default:
viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
@@ -1294,7 +1335,8 @@ static int gmc_v9_0_late_init(void *handle)
if (!amdgpu_sriov_vf(adev) &&
(adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) {
if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
- if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
+ if (adev->df.funcs &&
+ adev->df.funcs->enable_ecc_force_par_wr_rmw)
adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
}
}
@@ -1505,9 +1547,11 @@ static int gmc_v9_0_sw_init(void *handle)
chansize = 64;
else
chansize = 128;
-
- numchan = adev->df.funcs->get_hbm_channel_number(adev);
- adev->gmc.vram_width = numchan * chansize;
+ if (adev->df.funcs &&
+ adev->df.funcs->get_hbm_channel_number) {
+ numchan = adev->df.funcs->get_hbm_channel_number(adev);
+ adev->gmc.vram_width = numchan * chansize;
+ }
}
adev->gmc.vram_type = vram_type;
@@ -1596,12 +1640,6 @@ static int gmc_v9_0_sw_init(void *handle)
}
adev->need_swiotlb = drm_need_swiotlb(44);
- if (adev->gmc.xgmi.supported) {
- r = adev->gfxhub.funcs->get_xgmi_info(adev);
- if (r)
- return r;
- }
-
r = gmc_v9_0_mc_init(adev);
if (r)
return r;
@@ -1714,10 +1752,14 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
return -EINVAL;
}
- r = amdgpu_gart_table_vram_pin(adev);
+ if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
+ goto skip_pin_bo;
+
+ r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
if (r)
return r;
+skip_pin_bo:
r = adev->gfxhub.funcs->gart_enable(adev);
if (r)
return r;
@@ -1742,7 +1784,7 @@ static int gmc_v9_0_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool value;
- int r, i;
+ int i;
/* The sequence of these two function calls matters.*/
gmc_v9_0_init_golden_registers(adev);
@@ -1777,9 +1819,7 @@ static int gmc_v9_0_hw_init(void *handle)
if (adev->umc.funcs && adev->umc.funcs->init_registers)
adev->umc.funcs->init_registers(adev);
- r = gmc_v9_0_gart_enable(adev);
-
- return r;
+ return gmc_v9_0_gart_enable(adev);
}
/**
@@ -1793,7 +1833,6 @@ static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
{
adev->gfxhub.funcs->gart_disable(adev);
adev->mmhub.funcs->gart_disable(adev);
- amdgpu_gart_table_vram_unpin(adev);
}
static int gmc_v9_0_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 23b066bcffb2..56da5ab82987 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -180,6 +180,11 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
}
+ } else if (req == IDH_REQ_GPU_INIT_DATA){
+ /* Dummy REQ_GPU_INIT_DATA handling */
+ r = xgpu_ai_poll_msg(adev, IDH_REQ_GPU_INIT_DATA_READY);
+ /* version set to 0 since dummy */
+ adev->virt.req_init_data_ver = 0;
}
return 0;
@@ -252,11 +257,12 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
* otherwise the mailbox msg will be ruined/reseted by
* the VF FLR.
*/
- if (!down_write_trylock(&adev->reset_sem))
+ if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
return;
+ down_write(&adev->reset_sem);
+
amdgpu_virt_fini_data_exchange(adev);
- atomic_set(&adev->in_gpu_reset, 1);
xgpu_ai_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
@@ -380,10 +386,16 @@ void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
}
+static int xgpu_ai_request_init_data(struct amdgpu_device *adev)
+{
+ return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
+}
+
const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.req_full_gpu = xgpu_ai_request_full_gpu_access,
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
.reset_gpu = xgpu_ai_request_reset,
.wait_reset = NULL,
.trans_msg = xgpu_ai_mailbox_trans_msg,
+ .req_init_data = xgpu_ai_request_init_data,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index bd3b23171579..fa7e13e0459e 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -26,7 +26,7 @@
#define AI_MAILBOX_POLL_ACK_TIMEDOUT 500
#define AI_MAILBOX_POLL_MSG_TIMEDOUT 6000
-#define AI_MAILBOX_POLL_FLR_TIMEDOUT 5000
+#define AI_MAILBOX_POLL_FLR_TIMEDOUT 10000
#define AI_MAILBOX_POLL_MSG_REP_MAX 11
enum idh_request {
@@ -35,6 +35,7 @@ enum idh_request {
IDH_REQ_GPU_FINI_ACCESS,
IDH_REL_GPU_FINI_ACCESS,
IDH_REQ_GPU_RESET_ACCESS,
+ IDH_REQ_GPU_INIT_DATA,
IDH_LOG_VF_ERROR = 200,
IDH_READY_TO_RESET = 201,
@@ -48,6 +49,7 @@ enum idh_event {
IDH_SUCCESS,
IDH_FAIL,
IDH_QUERY_ALIVE,
+ IDH_REQ_GPU_INIT_DATA_READY,
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index a35e6d87e537..477d0dde19c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -281,11 +281,12 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
* otherwise the mailbox msg will be ruined/reseted by
* the VF FLR.
*/
- if (!down_write_trylock(&adev->reset_sem))
+ if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
return;
+ down_write(&adev->reset_sem);
+
amdgpu_virt_fini_data_exchange(adev);
- atomic_set(&adev->in_gpu_reset, 1);
xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 38241cf0e1f1..8ce5b8ca1fd7 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -716,6 +716,7 @@ static const struct amd_ip_funcs navi10_ih_ip_funcs = {
static const struct amdgpu_ih_funcs navi10_ih_funcs = {
.get_wptr = navi10_ih_get_wptr,
.decode_iv = amdgpu_ih_decode_iv_helper,
+ .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
.set_rptr = navi10_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 2176ef85f137..d0e76b36d4ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -277,13 +277,15 @@ static bool psp_v11_0_is_sos_alive(struct psp_context *psp)
return sol_reg != 0x0;
}
-static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
+static int psp_v11_0_bootloader_load_component(struct psp_context *psp,
+ struct psp_bin_desc *bin_desc,
+ enum psp_bootloader_cmd bl_cmd)
{
int ret;
uint32_t psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
- /* Check tOS sign of life register to confirm sys driver and sOS
+ /* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
if (psp_v11_0_is_sos_alive(psp))
@@ -293,13 +295,13 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
if (ret)
return ret;
- /* Copy PSP KDB binary to memory */
- psp_copy_fw(psp, psp->kdb.start_addr, psp->kdb.size_bytes);
+ /* Copy PSP System Driver binary to memory */
+ psp_copy_fw(psp, bin_desc->start_addr, bin_desc->size_bytes);
- /* Provide the PSP KDB to bootloader */
+ /* Provide the sys driver to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
- psp_gfxdrv_command_reg = PSP_BL__LOAD_KEY_DATABASE;
+ psp_gfxdrv_command_reg = bl_cmd;
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
@@ -308,69 +310,19 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
return ret;
}
-static int psp_v11_0_bootloader_load_spl(struct psp_context *psp)
+static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
{
- int ret;
- uint32_t psp_gfxdrv_command_reg = 0;
- struct amdgpu_device *adev = psp->adev;
-
- /* Check tOS sign of life register to confirm sys driver and sOS
- * are already been loaded.
- */
- if (psp_v11_0_is_sos_alive(psp))
- return 0;
-
- ret = psp_v11_0_wait_for_bootloader(psp);
- if (ret)
- return ret;
-
- /* Copy PSP SPL binary to memory */
- psp_copy_fw(psp, psp->spl.start_addr, psp->spl.size_bytes);
-
- /* Provide the PSP SPL to bootloader */
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
- (uint32_t)(psp->fw_pri_mc_addr >> 20));
- psp_gfxdrv_command_reg = PSP_BL__LOAD_TOS_SPL_TABLE;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
- psp_gfxdrv_command_reg);
-
- ret = psp_v11_0_wait_for_bootloader(psp);
+ return psp_v11_0_bootloader_load_component(psp, &psp->kdb, PSP_BL__LOAD_KEY_DATABASE);
+}
- return ret;
+static int psp_v11_0_bootloader_load_spl(struct psp_context *psp)
+{
+ return psp_v11_0_bootloader_load_component(psp, &psp->spl, PSP_BL__LOAD_TOS_SPL_TABLE);
}
static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
{
- int ret;
- uint32_t psp_gfxdrv_command_reg = 0;
- struct amdgpu_device *adev = psp->adev;
-
- /* Check sOS sign of life register to confirm sys driver and sOS
- * are already been loaded.
- */
- if (psp_v11_0_is_sos_alive(psp))
- return 0;
-
- ret = psp_v11_0_wait_for_bootloader(psp);
- if (ret)
- return ret;
-
- /* Copy PSP System Driver binary to memory */
- psp_copy_fw(psp, psp->sys.start_addr, psp->sys.size_bytes);
-
- /* Provide the sys driver to bootloader */
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
- (uint32_t)(psp->fw_pri_mc_addr >> 20));
- psp_gfxdrv_command_reg = PSP_BL__LOAD_SYSDRV;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
- psp_gfxdrv_command_reg);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- ret = psp_v11_0_wait_for_bootloader(psp);
-
- return ret;
+ return psp_v11_0_bootloader_load_component(psp, &psp->sys, PSP_BL__LOAD_SYSDRV);
}
static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 853d1511b889..81e033549dda 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -481,8 +481,6 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
* sdma_v5_0_ring_emit_mem_sync - flush the IB by graphics cache rinse
*
* @ring: amdgpu ring pointer
- * @job: job to retrieve vmid from
- * @ib: IB object to schedule
*
* flush the IB by graphics cache rinse.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 4d4d1aa51b8a..d3d6d5b045b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -368,8 +368,6 @@ static void sdma_v5_2_ring_emit_ib(struct amdgpu_ring *ring,
* sdma_v5_2_ring_emit_mem_sync - flush the IB by graphics cache rinse
*
* @ring: amdgpu ring pointer
- * @job: job to retrieve vmid from
- * @ib: IB object to schedule
*
* flush the IB by graphics cache rinse.
*/
@@ -544,9 +542,6 @@ static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
}
for (i = 0; i < adev->sdma.num_instances; i++) {
- f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
- f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
- AUTO_CTXSW_ENABLE, enable ? 1 : 0);
if (enable && amdgpu_sdma_phase_quantum) {
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
phase_quantum);
@@ -555,7 +550,13 @@ static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
phase_quantum);
}
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+ AUTO_CTXSW_ENABLE, enable ? 1 : 0);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
+ }
}
}
@@ -578,10 +579,12 @@ static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
sdma_v5_2_rlc_stop(adev);
}
- for (i = 0; i < adev->sdma.num_instances; i++) {
- f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
- f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
+ if (!amdgpu_sriov_vf(adev)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
+ }
}
}
@@ -610,7 +613,8 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
ring = &adev->sdma.instance[i].ring;
wb_offset = (ring->rptr_offs * 4);
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ if (!amdgpu_sriov_vf(adev))
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
@@ -685,32 +689,34 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
sdma_v5_2_ring_set_wptr(ring);
/* set minor_ptr_update to 0 after wptr programed */
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
- /* set utc l1 enable flag always to 1 */
- temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
-
- /* enable MCBP */
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
-
- /* Set up RESP_MODE to non-copy addresses */
- temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
-
- /* program default cache read and write policy */
- temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
- /* clean read policy and write policy bits */
- temp &= 0xFF0FFF;
- temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
- (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
- SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
+ /* SRIOV VF has no control of any of registers below */
if (!amdgpu_sriov_vf(adev)) {
+ /* set utc l1 enable flag always to 1 */
+ temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
+
+ /* enable MCBP */
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
+
+ /* Set up RESP_MODE to non-copy addresses */
+ temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
+
+ /* program default cache read and write policy */
+ temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
+ /* clean read policy and write policy bits */
+ temp &= 0xFF0FFF;
+ temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
+ (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
+ SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
+
/* unhalt engine */
temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
@@ -1438,13 +1444,14 @@ static int sdma_v5_2_set_trap_irq_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
u32 sdma_cntl;
-
u32 reg_offset = sdma_v5_2_get_reg_offset(adev, type, mmSDMA0_CNTL);
- sdma_cntl = RREG32(reg_offset);
- sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
- WREG32(reg_offset, sdma_cntl);
+ if (!amdgpu_sriov_vf(adev)) {
+ sdma_cntl = RREG32(reg_offset);
+ sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32(reg_offset, sdma_cntl);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index de9b55383e9f..0fc1747e4a70 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -744,7 +744,7 @@ static void soc15_reg_base_init(struct amdgpu_device *adev)
vega10_reg_base_init(adev);
break;
case CHIP_RENOIR:
- /* It's safe to do ip discovery here for Renior,
+ /* It's safe to do ip discovery here for Renoir,
* it doesn't support SRIOV. */
if (amdgpu_discovery) {
r = amdgpu_discovery_reg_base_init(adev);
@@ -1238,7 +1238,9 @@ static int soc15_common_sw_init(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_add_irq_id(adev);
- adev->df.funcs->sw_init(adev);
+ if (adev->df.funcs &&
+ adev->df.funcs->sw_init)
+ adev->df.funcs->sw_init(adev);
return 0;
}
@@ -1250,7 +1252,10 @@ static int soc15_common_sw_fini(void *handle)
if (adev->nbio.ras_funcs &&
adev->nbio.ras_funcs->ras_fini)
adev->nbio.ras_funcs->ras_fini(adev);
- adev->df.funcs->sw_fini(adev);
+
+ if (adev->df.funcs &&
+ adev->df.funcs->sw_fini)
+ adev->df.funcs->sw_fini(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 8a9ca87d8663..473767e03676 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -51,6 +51,8 @@
#define RREG32_SOC15_IP(ip, reg) __RREG32_SOC15_RLC__(reg, 0, ip##_HWIP)
+#define RREG32_SOC15_IP_NO_KIQ(ip, reg) __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ, ip##_HWIP)
+
#define RREG32_SOC15_NO_KIQ(ip, inst, reg) \
__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
AMDGPU_REGS_NO_KIQ, ip##_HWIP)
@@ -65,6 +67,9 @@
#define WREG32_SOC15_IP(ip, reg, value) \
__WREG32_SOC15_RLC__(reg, value, 0, ip##_HWIP)
+#define WREG32_SOC15_IP_NO_KIQ(ip, reg, value) \
+ __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ, ip##_HWIP)
+
#define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \
__WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
value, AMDGPU_REGS_NO_KIQ, ip##_HWIP)
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
index f7ec3fe134e5..6dd1e19e8d43 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -50,6 +50,165 @@ static inline uint32_t get_umc_v6_7_reg_offset(struct amdgpu_device *adev,
return adev->umc.channel_offs * ch_inst + UMC_V6_7_INST_DIST * umc_inst;
}
+static inline uint32_t get_umc_v6_7_channel_index(struct amdgpu_device *adev,
+ uint32_t umc_inst,
+ uint32_t ch_inst)
+{
+ return adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+}
+
+static void umc_v6_7_ecc_info_query_correctable_error_count(struct amdgpu_device *adev,
+ uint32_t channel_index,
+ unsigned long *error_count)
+{
+ uint32_t ecc_err_cnt;
+ uint64_t mc_umc_status;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ /*
+ * select the lower chip and check the error count
+ * skip add error count, calc error counter only from mca_umc_status
+ */
+ ecc_err_cnt = ras->umc_ecc.ecc[channel_index].ce_count_lo_chip;
+
+ /*
+ * select the higher chip and check the err counter
+ * skip add error count, calc error counter only from mca_umc_status
+ */
+ ecc_err_cnt = ras->umc_ecc.ecc[channel_index].ce_count_hi_chip;
+
+ /* check for SRAM correctable error
+ MCUMC_STATUS is a 64 bit register */
+ mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status;
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
+ *error_count += 1;
+}
+
+static void umc_v6_7_ecc_info_querry_uncorrectable_error_count(struct amdgpu_device *adev,
+ uint32_t channel_index,
+ unsigned long *error_count)
+{
+ uint64_t mc_umc_status;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ /* check the MCUMC_STATUS */
+ mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status;
+ if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
+ *error_count += 1;
+}
+
+static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+ uint32_t channel_index = 0;
+
+ /*TODO: driver needs to toggle DF Cstate to ensure
+ * safe access of UMC registers. Will add the protection */
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_v6_7_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+ channel_index = get_umc_v6_7_channel_index(adev,
+ umc_inst,
+ ch_inst);
+ umc_v6_7_ecc_info_query_correctable_error_count(adev,
+ channel_index,
+ &(err_data->ce_count));
+ umc_v6_7_ecc_info_querry_uncorrectable_error_count(adev,
+ channel_index,
+ &(err_data->ue_count));
+ }
+}
+
+static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data,
+ uint32_t umc_reg_offset,
+ uint32_t ch_inst,
+ uint32_t umc_inst)
+{
+ uint64_t mc_umc_status, err_addr, retired_page;
+ struct eeprom_table_record *err_rec;
+ uint32_t channel_index;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ channel_index =
+ adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+
+ mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status;
+
+ if (mc_umc_status == 0)
+ return;
+
+ if (!err_data->err_addr)
+ return;
+
+ err_rec = &err_data->err_addr[err_data->err_addr_cnt];
+
+ /* calculate error address if ue/ce error is detected */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+
+ err_addr = ras->umc_ecc.ecc[channel_index].mca_umc_addr;
+ err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+
+ /* translate umc channel address to soc pa, 3 parts are included */
+ retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(err_addr);
+
+ /* we only save ue error information currently, ce is skipped */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
+ == 1) {
+ err_rec->address = err_addr;
+ /* page frame address is saved */
+ err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+ err_rec->ts = (uint64_t)ktime_get_real_seconds();
+ err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+ err_rec->cu = 0;
+ err_rec->mem_channel = channel_index;
+ err_rec->mcumc_id = umc_inst;
+
+ err_data->err_addr_cnt++;
+ }
+ }
+}
+
+static void umc_v6_7_ecc_info_query_ras_error_address(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ /*TODO: driver needs to toggle DF Cstate to ensure
+ * safe access of UMC resgisters. Will add the protection
+ * when firmware interface is ready */
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_v6_7_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+ umc_v6_7_ecc_info_query_error_address(adev,
+ err_data,
+ umc_reg_offset,
+ ch_inst,
+ umc_inst);
+ }
+}
+
static void umc_v6_7_query_correctable_error_count(struct amdgpu_device *adev,
uint32_t umc_reg_offset,
unsigned long *error_count)
@@ -327,4 +486,6 @@ const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs = {
.query_ras_error_count = umc_v6_7_query_ras_error_count,
.query_ras_error_address = umc_v6_7_query_ras_error_address,
.query_ras_poison_mode = umc_v6_7_query_ras_poison_mode,
+ .ecc_info_query_ras_error_count = umc_v6_7_ecc_info_query_ras_error_count,
+ .ecc_info_query_ras_error_address = umc_v6_7_ecc_info_query_ras_error_address,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index a9ca6988009e..3070466f54e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -640,6 +640,7 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = {
static const struct amdgpu_ih_funcs vega10_ih_funcs = {
.get_wptr = vega10_ih_get_wptr,
.decode_iv = amdgpu_ih_decode_iv_helper,
+ .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
.set_rptr = vega10_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index f51dfc38ac65..3b4eb8285943 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -688,6 +688,7 @@ const struct amd_ip_funcs vega20_ih_ip_funcs = {
static const struct amdgpu_ih_funcs vega20_ih_funcs = {
.get_wptr = vega20_ih_get_wptr,
.decode_iv = amdgpu_ih_decode_iv_helper,
+ .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
.set_rptr = vega20_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index fe9a7cc8d9eb..6645ebbd2696 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -956,6 +956,10 @@ static int vi_asic_reset(struct amdgpu_device *adev)
{
int r;
+ /* APUs don't have full asic reset */
+ if (adev->flags & AMD_IS_APU)
+ return 0;
+
if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
dev_info(adev->dev, "BACO reset\n");
r = amdgpu_dpm_baco_reset(adev);
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index f6233019f042..d60576ce10cd 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -43,15 +43,15 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
*/
if ((ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) &&
- dev->device_info->asic_family == CHIP_HAWAII) {
+ dev->adev->asic_type == CHIP_HAWAII) {
struct cik_ih_ring_entry *tmp_ihre =
(struct cik_ih_ring_entry *)patched_ihre;
*patched_flag = true;
*tmp_ihre = *ihre;
- vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd);
- ret = f2g->get_atc_vmid_pasid_mapping_info(dev->kgd, vmid, &pasid);
+ vmid = f2g->read_vmid_from_vmfault_reg(dev->adev);
+ ret = f2g->get_atc_vmid_pasid_mapping_info(dev->adev, vmid, &pasid);
tmp_ihre->ring_id &= 0x000000ff;
tmp_ihre->ring_id |= vmid << 8;
@@ -113,7 +113,7 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
kfd_process_vm_fault(dev->dqm, pasid);
memset(&info, 0, sizeof(info));
- amdgpu_amdkfd_gpuvm_get_vm_fault_info(dev->kgd, &info);
+ amdgpu_amdkfd_gpuvm_get_vm_fault_info(dev->adev, &info);
if (!info.page_addr && !info.status)
return;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 24ebd61395d8..4bfc0c8ab764 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -321,7 +321,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
/* Return gpu_id as doorbell offset for mmap usage */
args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL;
args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
- if (KFD_IS_SOC15(dev->device_info->asic_family))
+ if (KFD_IS_SOC15(dev))
/* On SOC15 ASICs, include the doorbell offset within the
* process doorbell frame, which is 2 pages.
*/
@@ -580,7 +580,7 @@ static int kfd_ioctl_dbg_register(struct file *filep,
if (!dev)
return -EINVAL;
- if (dev->device_info->asic_family == CHIP_CARRIZO) {
+ if (dev->adev->asic_type == CHIP_CARRIZO) {
pr_debug("kfd_ioctl_dbg_register not supported on CZ\n");
return -EINVAL;
}
@@ -631,7 +631,7 @@ static int kfd_ioctl_dbg_unregister(struct file *filep,
if (!dev || !dev->dbgmgr)
return -EINVAL;
- if (dev->device_info->asic_family == CHIP_CARRIZO) {
+ if (dev->adev->asic_type == CHIP_CARRIZO) {
pr_debug("kfd_ioctl_dbg_unregister not supported on CZ\n");
return -EINVAL;
}
@@ -676,7 +676,7 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
if (!dev)
return -EINVAL;
- if (dev->device_info->asic_family == CHIP_CARRIZO) {
+ if (dev->adev->asic_type == CHIP_CARRIZO) {
pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n");
return -EINVAL;
}
@@ -784,7 +784,7 @@ static int kfd_ioctl_dbg_wave_control(struct file *filep,
if (!dev)
return -EINVAL;
- if (dev->device_info->asic_family == CHIP_CARRIZO) {
+ if (dev->adev->asic_type == CHIP_CARRIZO) {
pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n");
return -EINVAL;
}
@@ -851,7 +851,7 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
dev = kfd_device_by_id(args->gpu_id);
if (dev)
/* Reading GPU clock counter from KGD */
- args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->kgd);
+ args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->adev);
else
/* Node without GPU resource */
args->gpu_clock_counter = 0;
@@ -1041,7 +1041,7 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
goto out_unlock;
}
- err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->kgd,
+ err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->adev,
mem, &kern_addr, &size);
if (err) {
pr_err("Failed to map event page to kernel\n");
@@ -1051,7 +1051,7 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
err = kfd_event_page_set(p, kern_addr, size);
if (err) {
pr_err("Failed to set event page\n");
- amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kfd->kgd, mem);
+ amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kfd->adev, mem);
goto out_unlock;
}
@@ -1137,7 +1137,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va)
dev->kfd2kgd->set_scratch_backing_va(
- dev->kgd, args->va_addr, pdd->qpd.vmid);
+ dev->adev, args->va_addr, pdd->qpd.vmid);
return 0;
@@ -1158,7 +1158,7 @@ static int kfd_ioctl_get_tile_config(struct file *filep,
if (!dev)
return -EINVAL;
- amdgpu_amdkfd_get_tile_config(dev->kgd, &config);
+ amdgpu_amdkfd_get_tile_config(dev->adev, &config);
args->gb_addr_config = config.gb_addr_config;
args->num_banks = config.num_banks;
@@ -1244,7 +1244,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev)
if (dev->use_iommu_v2)
return false;
- amdgpu_amdkfd_get_local_mem_info(dev->kgd, &mem_info);
+ amdgpu_amdkfd_get_local_mem_info(dev->adev, &mem_info);
if (mem_info.local_mem_size_private == 0 &&
mem_info.local_mem_size_public > 0)
return true;
@@ -1313,7 +1313,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
err = -EINVAL;
goto err_unlock;
}
- offset = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd);
+ offset = dev->adev->rmmio_remap.bus_addr;
if (!offset) {
err = -ENOMEM;
goto err_unlock;
@@ -1321,7 +1321,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
}
err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
- dev->kgd, args->va_addr, args->size,
+ dev->adev, args->va_addr, args->size,
pdd->drm_priv, (struct kgd_mem **) &mem, &offset,
flags);
@@ -1353,7 +1353,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
return 0;
err_free:
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem,
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem,
pdd->drm_priv, NULL);
err_unlock:
mutex_unlock(&p->mutex);
@@ -1399,7 +1399,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
goto err_unlock;
}
- ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd,
+ ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev,
(struct kgd_mem *)mem, pdd->drm_priv, &size);
/* If freeing the buffer failed, leave the handle in place for
@@ -1484,7 +1484,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
goto get_mem_obj_from_handle_failed;
}
err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
- peer->kgd, (struct kgd_mem *)mem,
+ peer->adev, (struct kgd_mem *)mem,
peer_pdd->drm_priv, &table_freed);
if (err) {
pr_err("Failed to map to gpu %d/%d\n",
@@ -1496,7 +1496,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
mutex_unlock(&p->mutex);
- err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
+ err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
@@ -1593,7 +1593,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
goto get_mem_obj_from_handle_failed;
}
err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
- peer->kgd, (struct kgd_mem *)mem, peer_pdd->drm_priv);
+ peer->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv);
if (err) {
pr_err("Failed to unmap from gpu %d/%d\n",
i, args->n_devices);
@@ -1603,8 +1603,8 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
}
mutex_unlock(&p->mutex);
- if (dev->device_info->asic_family == CHIP_ALDEBARAN) {
- err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd,
+ if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) {
+ err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev,
(struct kgd_mem *) mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
@@ -1680,7 +1680,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
{
struct kfd_ioctl_get_dmabuf_info_args *args = data;
struct kfd_dev *dev = NULL;
- struct kgd_dev *dma_buf_kgd;
+ struct amdgpu_device *dmabuf_adev;
void *metadata_buffer = NULL;
uint32_t flags;
unsigned int i;
@@ -1700,15 +1700,15 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
}
/* Get dmabuf info from KGD */
- r = amdgpu_amdkfd_get_dmabuf_info(dev->kgd, args->dmabuf_fd,
- &dma_buf_kgd, &args->size,
+ r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd,
+ &dmabuf_adev, &args->size,
metadata_buffer, args->metadata_size,
&args->metadata_size, &flags);
if (r)
goto exit;
/* Reverse-lookup gpu_id from kgd pointer */
- dev = kfd_device_by_kgd(dma_buf_kgd);
+ dev = kfd_device_by_adev(dmabuf_adev);
if (!dev) {
r = -EINVAL;
goto exit;
@@ -1758,7 +1758,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
goto err_unlock;
}
- r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->kgd, dmabuf,
+ r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->adev, dmabuf,
args->va_addr, pdd->drm_priv,
(struct kgd_mem **)&mem, &size,
NULL);
@@ -1779,7 +1779,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
return 0;
err_free:
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem,
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem,
pdd->drm_priv, NULL);
err_unlock:
mutex_unlock(&p->mutex);
@@ -2066,7 +2066,7 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
- address = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd);
+ address = dev->adev->rmmio_remap.bus_addr;
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
VM_DONTDUMP | VM_PFNMAP;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index cfedfb1e8596..9624bbe8b501 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1060,6 +1060,9 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
return -ENODEV;
/* same everything but the other direction */
props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
+ if (!props2)
+ return -ENOMEM;
+
props2->node_from = id_to;
props2->node_to = id_from;
props2->kobj = NULL;
@@ -1340,7 +1343,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
int ret;
unsigned int num_cu_shared;
- switch (kdev->device_info->asic_family) {
+ switch (kdev->adev->asic_type) {
case CHIP_KAVERI:
pcache_info = kaveri_cache_info;
num_of_cache_types = ARRAY_SIZE(kaveri_cache_info);
@@ -1377,67 +1380,71 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
pcache_info = vegam_cache_info;
num_of_cache_types = ARRAY_SIZE(vegam_cache_info);
break;
- case CHIP_VEGA10:
- pcache_info = vega10_cache_info;
- num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
- break;
- case CHIP_VEGA12:
- pcache_info = vega12_cache_info;
- num_of_cache_types = ARRAY_SIZE(vega12_cache_info);
- break;
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
- pcache_info = vega20_cache_info;
- num_of_cache_types = ARRAY_SIZE(vega20_cache_info);
- break;
- case CHIP_ALDEBARAN:
- pcache_info = aldebaran_cache_info;
- num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info);
- break;
- case CHIP_RAVEN:
- pcache_info = raven_cache_info;
- num_of_cache_types = ARRAY_SIZE(raven_cache_info);
- break;
- case CHIP_RENOIR:
- pcache_info = renoir_cache_info;
- num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
- break;
- case CHIP_NAVI10:
- case CHIP_NAVI12:
- case CHIP_CYAN_SKILLFISH:
- pcache_info = navi10_cache_info;
- num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
- break;
- case CHIP_NAVI14:
- pcache_info = navi14_cache_info;
- num_of_cache_types = ARRAY_SIZE(navi14_cache_info);
- break;
- case CHIP_SIENNA_CICHLID:
- pcache_info = sienna_cichlid_cache_info;
- num_of_cache_types = ARRAY_SIZE(sienna_cichlid_cache_info);
- break;
- case CHIP_NAVY_FLOUNDER:
- pcache_info = navy_flounder_cache_info;
- num_of_cache_types = ARRAY_SIZE(navy_flounder_cache_info);
- break;
- case CHIP_DIMGREY_CAVEFISH:
- pcache_info = dimgrey_cavefish_cache_info;
- num_of_cache_types = ARRAY_SIZE(dimgrey_cavefish_cache_info);
- break;
- case CHIP_VANGOGH:
- pcache_info = vangogh_cache_info;
- num_of_cache_types = ARRAY_SIZE(vangogh_cache_info);
- break;
- case CHIP_BEIGE_GOBY:
- pcache_info = beige_goby_cache_info;
- num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info);
- break;
- case CHIP_YELLOW_CARP:
- pcache_info = yellow_carp_cache_info;
- num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info);
- break;
default:
- return -EINVAL;
+ switch(KFD_GC_VERSION(kdev)) {
+ case IP_VERSION(9, 0, 1):
+ pcache_info = vega10_cache_info;
+ num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
+ break;
+ case IP_VERSION(9, 2, 1):
+ pcache_info = vega12_cache_info;
+ num_of_cache_types = ARRAY_SIZE(vega12_cache_info);
+ break;
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ pcache_info = vega20_cache_info;
+ num_of_cache_types = ARRAY_SIZE(vega20_cache_info);
+ break;
+ case IP_VERSION(9, 4, 2):
+ pcache_info = aldebaran_cache_info;
+ num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info);
+ break;
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 2):
+ pcache_info = raven_cache_info;
+ num_of_cache_types = ARRAY_SIZE(raven_cache_info);
+ break;
+ case IP_VERSION(9, 3, 0):
+ pcache_info = renoir_cache_info;
+ num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ pcache_info = navi10_cache_info;
+ num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
+ break;
+ case IP_VERSION(10, 1, 1):
+ pcache_info = navi14_cache_info;
+ num_of_cache_types = ARRAY_SIZE(navi14_cache_info);
+ break;
+ case IP_VERSION(10, 3, 0):
+ pcache_info = sienna_cichlid_cache_info;
+ num_of_cache_types = ARRAY_SIZE(sienna_cichlid_cache_info);
+ break;
+ case IP_VERSION(10, 3, 2):
+ pcache_info = navy_flounder_cache_info;
+ num_of_cache_types = ARRAY_SIZE(navy_flounder_cache_info);
+ break;
+ case IP_VERSION(10, 3, 4):
+ pcache_info = dimgrey_cavefish_cache_info;
+ num_of_cache_types = ARRAY_SIZE(dimgrey_cavefish_cache_info);
+ break;
+ case IP_VERSION(10, 3, 1):
+ pcache_info = vangogh_cache_info;
+ num_of_cache_types = ARRAY_SIZE(vangogh_cache_info);
+ break;
+ case IP_VERSION(10, 3, 5):
+ pcache_info = beige_goby_cache_info;
+ num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info);
+ break;
+ case IP_VERSION(10, 3, 3):
+ pcache_info = yellow_carp_cache_info;
+ num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info);
+ break;
+ default:
+ return -EINVAL;
+ }
}
*size_filled = 0;
@@ -1963,8 +1970,6 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
struct crat_subtype_iolink *sub_type_hdr,
uint32_t proximity_domain)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)kdev->kgd;
-
*avail_size -= sizeof(struct crat_subtype_iolink);
if (*avail_size < 0)
return -ENOMEM;
@@ -1981,7 +1986,7 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
/* Fill in IOLINK subtype.
* TODO: Fill-in other fields of iolink subtype
*/
- if (adev->gmc.xgmi.connected_to_cpu) {
+ if (kdev->adev->gmc.xgmi.connected_to_cpu) {
/*
* with host gpu xgmi link, host can access gpu memory whether
* or not pcie bar type is large, so always create bidirectional
@@ -1990,19 +1995,19 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
sub_type_hdr->num_hops_xgmi = 1;
- if (adev->asic_type == CHIP_ALDEBARAN) {
+ if (KFD_GC_VERSION(kdev) == IP_VERSION(9, 4, 2)) {
sub_type_hdr->minimum_bandwidth_mbs =
amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(
- kdev->kgd, NULL, true);
+ kdev->adev, NULL, true);
sub_type_hdr->maximum_bandwidth_mbs =
sub_type_hdr->minimum_bandwidth_mbs;
}
} else {
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
sub_type_hdr->minimum_bandwidth_mbs =
- amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->kgd, true);
+ amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->adev, true);
sub_type_hdr->maximum_bandwidth_mbs =
- amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->kgd, false);
+ amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->adev, false);
}
sub_type_hdr->proximity_domain_from = proximity_domain;
@@ -2044,11 +2049,11 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
sub_type_hdr->proximity_domain_from = proximity_domain_from;
sub_type_hdr->proximity_domain_to = proximity_domain_to;
sub_type_hdr->num_hops_xgmi =
- amdgpu_amdkfd_get_xgmi_hops_count(kdev->kgd, peer_kdev->kgd);
+ amdgpu_amdkfd_get_xgmi_hops_count(kdev->adev, peer_kdev->adev);
sub_type_hdr->maximum_bandwidth_mbs =
- amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->kgd, peer_kdev->kgd, false);
+ amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, peer_kdev->adev, false);
sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->maximum_bandwidth_mbs ?
- amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->kgd, NULL, true) : 0;
+ amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, NULL, true) : 0;
return 0;
}
@@ -2114,7 +2119,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
cu->proximity_domain = proximity_domain;
- amdgpu_amdkfd_get_cu_info(kdev->kgd, &cu_info);
+ amdgpu_amdkfd_get_cu_info(kdev->adev, &cu_info);
cu->num_simd_per_cu = cu_info.simd_per_cu;
cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
cu->max_waves_simd = cu_info.max_waves_per_simd;
@@ -2145,7 +2150,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
* report the total FB size (public+private) as a single
* private heap.
*/
- amdgpu_amdkfd_get_local_mem_info(kdev->kgd, &local_mem_info);
+ amdgpu_amdkfd_get_local_mem_info(kdev->adev, &local_mem_info);
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
sub_type_hdr->length);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index 159add0f5aaa..1e30717b5253 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -41,7 +41,7 @@
static void dbgdev_address_watch_disable_nodiq(struct kfd_dev *dev)
{
- dev->kfd2kgd->address_watch_disable(dev->kgd);
+ dev->kfd2kgd->address_watch_disable(dev->adev);
}
static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
@@ -322,7 +322,7 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
pr_debug("\t\t%30s\n", "* * * * * * * * * * * * * * * * * *");
pdd->dev->kfd2kgd->address_watch_execute(
- dbgdev->dev->kgd,
+ dbgdev->dev->adev,
i,
cntl.u32All,
addrHi.u32All,
@@ -420,7 +420,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
aw_reg_add_dword =
dbgdev->dev->kfd2kgd->address_watch_get_offset(
- dbgdev->dev->kgd,
+ dbgdev->dev->adev,
i,
ADDRESS_WATCH_REG_CNTL);
@@ -431,7 +431,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
aw_reg_add_dword =
dbgdev->dev->kfd2kgd->address_watch_get_offset(
- dbgdev->dev->kgd,
+ dbgdev->dev->adev,
i,
ADDRESS_WATCH_REG_ADDR_HI);
@@ -441,7 +441,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
aw_reg_add_dword =
dbgdev->dev->kfd2kgd->address_watch_get_offset(
- dbgdev->dev->kgd,
+ dbgdev->dev->adev,
i,
ADDRESS_WATCH_REG_ADDR_LO);
@@ -457,7 +457,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
aw_reg_add_dword =
dbgdev->dev->kfd2kgd->address_watch_get_offset(
- dbgdev->dev->kgd,
+ dbgdev->dev->adev,
i,
ADDRESS_WATCH_REG_CNTL);
@@ -752,7 +752,7 @@ static int dbgdev_wave_control_nodiq(struct kfd_dbgdev *dbgdev,
pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *");
- return dbgdev->dev->kfd2kgd->wave_control_execute(dbgdev->dev->kgd,
+ return dbgdev->dev->kfd2kgd->wave_control_execute(dbgdev->dev->adev,
reg_gfx_index.u32All,
reg_sq_cmd.u32All);
}
@@ -784,7 +784,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info
- (dev->kgd, vmid, &queried_pasid);
+ (dev->adev, vmid, &queried_pasid);
if (status && queried_pasid == p->pasid) {
pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n",
@@ -811,7 +811,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
/* for non DIQ we need to patch the VMID: */
reg_sq_cmd.bits.vm_id = vmid;
- dev->kfd2kgd->wave_control_execute(dev->kgd,
+ dev->kfd2kgd->wave_control_execute(dev->adev,
reg_gfx_index.u32All,
reg_sq_cmd.u32All);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 3b119db16003..2b65d0acae2c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -53,770 +53,310 @@ extern const struct kfd2kgd_calls aldebaran_kfd2kgd;
extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd;
-#ifdef KFD_SUPPORT_IOMMU_V2
-static const struct kfd_device_info kaveri_device_info = {
- .asic_family = CHIP_KAVERI,
- .asic_name = "kaveri",
- .gfx_target_version = 70000,
- .max_pasid_bits = 16,
- /* max num of queues for KV.TODO should be a dynamic value */
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = false,
- .needs_iommu_device = true,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info carrizo_device_info = {
- .asic_family = CHIP_CARRIZO,
- .asic_name = "carrizo",
- .gfx_target_version = 80001,
- .max_pasid_bits = 16,
- /* max num of queues for CZ.TODO should be a dynamic value */
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = true,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info raven_device_info = {
- .asic_family = CHIP_RAVEN,
- .asic_name = "raven",
- .gfx_target_version = 90002,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = true,
- .needs_pci_atomics = true,
- .num_sdma_engines = 1,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-#endif
-
-#ifdef CONFIG_DRM_AMDGPU_CIK
-static const struct kfd_device_info hawaii_device_info = {
- .asic_family = CHIP_HAWAII,
- .asic_name = "hawaii",
- .gfx_target_version = 70001,
- .max_pasid_bits = 16,
- /* max num of queues for KV.TODO should be a dynamic value */
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = false,
- .needs_iommu_device = false,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-#endif
-
-static const struct kfd_device_info tonga_device_info = {
- .asic_family = CHIP_TONGA,
- .asic_name = "tonga",
- .gfx_target_version = 80002,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = false,
- .needs_iommu_device = false,
- .needs_pci_atomics = true,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info fiji_device_info = {
- .asic_family = CHIP_FIJI,
- .asic_name = "fiji",
- .gfx_target_version = 80003,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = true,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info fiji_vf_device_info = {
- .asic_family = CHIP_FIJI,
- .asic_name = "fiji",
- .gfx_target_version = 80003,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-
-static const struct kfd_device_info polaris10_device_info = {
- .asic_family = CHIP_POLARIS10,
- .asic_name = "polaris10",
- .gfx_target_version = 80003,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = true,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info polaris10_vf_device_info = {
- .asic_family = CHIP_POLARIS10,
- .asic_name = "polaris10",
- .gfx_target_version = 80003,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info polaris11_device_info = {
- .asic_family = CHIP_POLARIS11,
- .asic_name = "polaris11",
- .gfx_target_version = 80003,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = true,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info polaris12_device_info = {
- .asic_family = CHIP_POLARIS12,
- .asic_name = "polaris12",
- .gfx_target_version = 80003,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = true,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info vegam_device_info = {
- .asic_family = CHIP_VEGAM,
- .asic_name = "vegam",
- .gfx_target_version = 80003,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 4,
- .ih_ring_entry_size = 4 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_cik,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = true,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info vega10_device_info = {
- .asic_family = CHIP_VEGA10,
- .asic_name = "vega10",
- .gfx_target_version = 90000,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info vega10_vf_device_info = {
- .asic_family = CHIP_VEGA10,
- .asic_name = "vega10",
- .gfx_target_version = 90000,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info vega12_device_info = {
- .asic_family = CHIP_VEGA12,
- .asic_name = "vega12",
- .gfx_target_version = 90004,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info vega20_device_info = {
- .asic_family = CHIP_VEGA20,
- .asic_name = "vega20",
- .gfx_target_version = 90006,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 8,
-};
-
-static const struct kfd_device_info arcturus_device_info = {
- .asic_family = CHIP_ARCTURUS,
- .asic_name = "arcturus",
- .gfx_target_version = 90008,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 6,
- .num_sdma_queues_per_engine = 8,
-};
-
-static const struct kfd_device_info aldebaran_device_info = {
- .asic_family = CHIP_ALDEBARAN,
- .asic_name = "aldebaran",
- .gfx_target_version = 90010,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = false,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 3,
- .num_sdma_queues_per_engine = 8,
-};
-
-static const struct kfd_device_info renoir_device_info = {
- .asic_family = CHIP_RENOIR,
- .asic_name = "renoir",
- .gfx_target_version = 90012,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .supports_cwsr = true,
- .needs_iommu_device = false,
- .needs_pci_atomics = false,
- .num_sdma_engines = 1,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info navi10_device_info = {
- .asic_family = CHIP_NAVI10,
- .asic_name = "navi10",
- .gfx_target_version = 100100,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .needs_iommu_device = false,
- .supports_cwsr = true,
- .needs_pci_atomics = true,
- .no_atomic_fw_version = 145,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 8,
-};
-
-static const struct kfd_device_info navi12_device_info = {
- .asic_family = CHIP_NAVI12,
- .asic_name = "navi12",
- .gfx_target_version = 100101,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .needs_iommu_device = false,
- .supports_cwsr = true,
- .needs_pci_atomics = true,
- .no_atomic_fw_version = 145,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 8,
-};
-
-static const struct kfd_device_info navi14_device_info = {
- .asic_family = CHIP_NAVI14,
- .asic_name = "navi14",
- .gfx_target_version = 100102,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .needs_iommu_device = false,
- .supports_cwsr = true,
- .needs_pci_atomics = true,
- .no_atomic_fw_version = 145,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 8,
-};
-
-static const struct kfd_device_info sienna_cichlid_device_info = {
- .asic_family = CHIP_SIENNA_CICHLID,
- .asic_name = "sienna_cichlid",
- .gfx_target_version = 100300,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .needs_iommu_device = false,
- .supports_cwsr = true,
- .needs_pci_atomics = true,
- .no_atomic_fw_version = 92,
- .num_sdma_engines = 4,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 8,
-};
-
-static const struct kfd_device_info navy_flounder_device_info = {
- .asic_family = CHIP_NAVY_FLOUNDER,
- .asic_name = "navy_flounder",
- .gfx_target_version = 100301,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .needs_iommu_device = false,
- .supports_cwsr = true,
- .needs_pci_atomics = true,
- .no_atomic_fw_version = 92,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 8,
-};
-
-static const struct kfd_device_info vangogh_device_info = {
- .asic_family = CHIP_VANGOGH,
- .asic_name = "vangogh",
- .gfx_target_version = 100303,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .needs_iommu_device = false,
- .supports_cwsr = true,
- .needs_pci_atomics = true,
- .no_atomic_fw_version = 92,
- .num_sdma_engines = 1,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info dimgrey_cavefish_device_info = {
- .asic_family = CHIP_DIMGREY_CAVEFISH,
- .asic_name = "dimgrey_cavefish",
- .gfx_target_version = 100302,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .needs_iommu_device = false,
- .supports_cwsr = true,
- .needs_pci_atomics = true,
- .no_atomic_fw_version = 92,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 8,
-};
-
-static const struct kfd_device_info beige_goby_device_info = {
- .asic_family = CHIP_BEIGE_GOBY,
- .asic_name = "beige_goby",
- .gfx_target_version = 100304,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .needs_iommu_device = false,
- .supports_cwsr = true,
- .needs_pci_atomics = true,
- .no_atomic_fw_version = 92,
- .num_sdma_engines = 1,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 8,
-};
-
-static const struct kfd_device_info yellow_carp_device_info = {
- .asic_family = CHIP_YELLOW_CARP,
- .asic_name = "yellow_carp",
- .gfx_target_version = 100305,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .needs_iommu_device = false,
- .supports_cwsr = true,
- .needs_pci_atomics = true,
- .no_atomic_fw_version = 92,
- .num_sdma_engines = 1,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 2,
-};
-
-static const struct kfd_device_info cyan_skillfish_device_info = {
- .asic_family = CHIP_CYAN_SKILLFISH,
- .asic_name = "cyan_skillfish",
- .gfx_target_version = 100103,
- .max_pasid_bits = 16,
- .max_no_of_hqd = 24,
- .doorbell_size = 8,
- .ih_ring_entry_size = 8 * sizeof(uint32_t),
- .event_interrupt_class = &event_interrupt_class_v9,
- .num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED,
- .needs_iommu_device = false,
- .supports_cwsr = true,
- .needs_pci_atomics = true,
- .num_sdma_engines = 2,
- .num_xgmi_sdma_engines = 0,
- .num_sdma_queues_per_engine = 8,
-};
-
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size);
static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
static int kfd_resume(struct kfd_dev *kfd);
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf)
+static void kfd_device_info_set_sdma_queue_num(struct kfd_dev *kfd)
{
- struct kfd_dev *kfd;
- const struct kfd_device_info *device_info;
- const struct kfd2kgd_calls *f2g;
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0];
+
+ switch (sdma_version) {
+ case IP_VERSION(4, 0, 0):/* VEGA10 */
+ case IP_VERSION(4, 0, 1):/* VEGA12 */
+ case IP_VERSION(4, 1, 0):/* RAVEN */
+ case IP_VERSION(4, 1, 1):/* RAVEN */
+ case IP_VERSION(4, 1, 2):/* RENOIR */
+ case IP_VERSION(5, 2, 1):/* VANGOGH */
+ case IP_VERSION(5, 2, 3):/* YELLOW_CARP */
+ kfd->device_info.num_sdma_queues_per_engine = 2;
+ break;
+ case IP_VERSION(4, 2, 0):/* VEGA20 */
+ case IP_VERSION(4, 2, 2):/* ARCTURUS */
+ case IP_VERSION(4, 4, 0):/* ALDEBARAN */
+ case IP_VERSION(5, 0, 0):/* NAVI10 */
+ case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
+ case IP_VERSION(5, 0, 2):/* NAVI14 */
+ case IP_VERSION(5, 0, 5):/* NAVI12 */
+ case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */
+ case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */
+ case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */
+ case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */
+ kfd->device_info.num_sdma_queues_per_engine = 8;
+ break;
+ default:
+ dev_warn(kfd_device,
+ "Default sdma queue per engine(8) is set due to "
+ "mismatch of sdma ip block(SDMA_HWIP:0x%x).\n",
+ sdma_version);
+ kfd->device_info.num_sdma_queues_per_engine = 8;
+ }
+}
+
+static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
+{
+ uint32_t gc_version = KFD_GC_VERSION(kfd);
+
+ switch (gc_version) {
+ case IP_VERSION(9, 0, 1): /* VEGA10 */
+ case IP_VERSION(9, 1, 0): /* RAVEN */
+ case IP_VERSION(9, 2, 1): /* VEGA12 */
+ case IP_VERSION(9, 2, 2): /* RAVEN */
+ case IP_VERSION(9, 3, 0): /* RENOIR */
+ case IP_VERSION(9, 4, 0): /* VEGA20 */
+ case IP_VERSION(9, 4, 1): /* ARCTURUS */
+ case IP_VERSION(9, 4, 2): /* ALDEBARAN */
+ case IP_VERSION(10, 3, 1): /* VANGOGH */
+ case IP_VERSION(10, 3, 3): /* YELLOW_CARP */
+ case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */
+ case IP_VERSION(10, 1, 10): /* NAVI10 */
+ case IP_VERSION(10, 1, 2): /* NAVI12 */
+ case IP_VERSION(10, 1, 1): /* NAVI14 */
+ case IP_VERSION(10, 3, 0): /* SIENNA_CICHLID */
+ case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */
+ case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */
+ case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */
+ kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
+ break;
+ default:
+ dev_warn(kfd_device, "v9 event interrupt handler is set due to "
+ "mismatch of gc ip block(GC_HWIP:0x%x).\n", gc_version);
+ kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
+ }
+}
+
+static void kfd_device_info_init(struct kfd_dev *kfd,
+ bool vf, uint32_t gfx_target_version)
+{
+ uint32_t gc_version = KFD_GC_VERSION(kfd);
+ uint32_t asic_type = kfd->adev->asic_type;
+
+ kfd->device_info.max_pasid_bits = 16;
+ kfd->device_info.max_no_of_hqd = 24;
+ kfd->device_info.num_of_watch_points = 4;
+ kfd->device_info.mqd_size_aligned = MQD_SIZE_ALIGNED;
+ kfd->device_info.gfx_target_version = gfx_target_version;
+
+ if (KFD_IS_SOC15(kfd)) {
+ kfd->device_info.doorbell_size = 8;
+ kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t);
+ kfd->device_info.supports_cwsr = true;
+
+ kfd_device_info_set_sdma_queue_num(kfd);
+
+ kfd_device_info_set_event_interrupt_class(kfd);
+
+ /* Raven */
+ if (gc_version == IP_VERSION(9, 1, 0) ||
+ gc_version == IP_VERSION(9, 2, 2))
+ kfd->device_info.needs_iommu_device = true;
+
+ if (gc_version < IP_VERSION(11, 0, 0)) {
+ /* Navi2x+, Navi1x+ */
+ if (gc_version >= IP_VERSION(10, 3, 0))
+ kfd->device_info.no_atomic_fw_version = 92;
+ else if (gc_version >= IP_VERSION(10, 1, 1))
+ kfd->device_info.no_atomic_fw_version = 145;
+
+ /* Navi1x+ */
+ if (gc_version >= IP_VERSION(10, 1, 1))
+ kfd->device_info.needs_pci_atomics = true;
+ }
+ } else {
+ kfd->device_info.doorbell_size = 4;
+ kfd->device_info.ih_ring_entry_size = 4 * sizeof(uint32_t);
+ kfd->device_info.event_interrupt_class = &event_interrupt_class_cik;
+ kfd->device_info.num_sdma_queues_per_engine = 2;
+
+ if (asic_type != CHIP_KAVERI &&
+ asic_type != CHIP_HAWAII &&
+ asic_type != CHIP_TONGA)
+ kfd->device_info.supports_cwsr = true;
+
+ if (asic_type == CHIP_KAVERI ||
+ asic_type == CHIP_CARRIZO)
+ kfd->device_info.needs_iommu_device = true;
+
+ if (asic_type != CHIP_HAWAII && !vf)
+ kfd->device_info.needs_pci_atomics = true;
+ }
+}
+
+struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
+{
+ struct kfd_dev *kfd = NULL;
+ const struct kfd2kgd_calls *f2g = NULL;
struct pci_dev *pdev = adev->pdev;
+ uint32_t gfx_target_version = 0;
switch (adev->asic_type) {
#ifdef KFD_SUPPORT_IOMMU_V2
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_KAVERI:
- if (vf)
- device_info = NULL;
- else
- device_info = &kaveri_device_info;
- f2g = &gfx_v7_kfd2kgd;
+ gfx_target_version = 70000;
+ if (!vf)
+ f2g = &gfx_v7_kfd2kgd;
break;
#endif
case CHIP_CARRIZO:
- if (vf)
- device_info = NULL;
- else
- device_info = &carrizo_device_info;
- f2g = &gfx_v8_kfd2kgd;
+ gfx_target_version = 80001;
+ if (!vf)
+ f2g = &gfx_v8_kfd2kgd;
break;
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_HAWAII:
- if (vf)
- device_info = NULL;
- else
- device_info = &hawaii_device_info;
- f2g = &gfx_v7_kfd2kgd;
+ gfx_target_version = 70001;
+ if (!amdgpu_exp_hw_support)
+ pr_info(
+ "KFD support on Hawaii is experimental. See modparam exp_hw_support\n"
+ );
+ else if (!vf)
+ f2g = &gfx_v7_kfd2kgd;
break;
#endif
case CHIP_TONGA:
- if (vf)
- device_info = NULL;
- else
- device_info = &tonga_device_info;
- f2g = &gfx_v8_kfd2kgd;
+ gfx_target_version = 80002;
+ if (!vf)
+ f2g = &gfx_v8_kfd2kgd;
break;
case CHIP_FIJI:
- if (vf)
- device_info = &fiji_vf_device_info;
- else
- device_info = &fiji_device_info;
+ gfx_target_version = 80003;
f2g = &gfx_v8_kfd2kgd;
break;
case CHIP_POLARIS10:
- if (vf)
- device_info = &polaris10_vf_device_info;
- else
- device_info = &polaris10_device_info;
+ gfx_target_version = 80003;
f2g = &gfx_v8_kfd2kgd;
break;
case CHIP_POLARIS11:
- if (vf)
- device_info = NULL;
- else
- device_info = &polaris11_device_info;
- f2g = &gfx_v8_kfd2kgd;
+ gfx_target_version = 80003;
+ if (!vf)
+ f2g = &gfx_v8_kfd2kgd;
break;
case CHIP_POLARIS12:
- if (vf)
- device_info = NULL;
- else
- device_info = &polaris12_device_info;
- f2g = &gfx_v8_kfd2kgd;
+ gfx_target_version = 80003;
+ if (!vf)
+ f2g = &gfx_v8_kfd2kgd;
break;
case CHIP_VEGAM:
- if (vf)
- device_info = NULL;
- else
- device_info = &vegam_device_info;
- f2g = &gfx_v8_kfd2kgd;
+ gfx_target_version = 80003;
+ if (!vf)
+ f2g = &gfx_v8_kfd2kgd;
break;
default:
switch (adev->ip_versions[GC_HWIP][0]) {
+ /* Vega 10 */
case IP_VERSION(9, 0, 1):
- if (vf)
- device_info = &vega10_vf_device_info;
- else
- device_info = &vega10_device_info;
+ gfx_target_version = 90000;
f2g = &gfx_v9_kfd2kgd;
break;
#ifdef KFD_SUPPORT_IOMMU_V2
+ /* Raven */
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2):
- if (vf)
- device_info = NULL;
- else
- device_info = &raven_device_info;
- f2g = &gfx_v9_kfd2kgd;
+ gfx_target_version = 90002;
+ if (!vf)
+ f2g = &gfx_v9_kfd2kgd;
break;
#endif
+ /* Vega12 */
case IP_VERSION(9, 2, 1):
- if (vf)
- device_info = NULL;
- else
- device_info = &vega12_device_info;
- f2g = &gfx_v9_kfd2kgd;
+ gfx_target_version = 90004;
+ if (!vf)
+ f2g = &gfx_v9_kfd2kgd;
break;
+ /* Renoir */
case IP_VERSION(9, 3, 0):
- if (vf)
- device_info = NULL;
- else
- device_info = &renoir_device_info;
- f2g = &gfx_v9_kfd2kgd;
+ gfx_target_version = 90012;
+ if (!vf)
+ f2g = &gfx_v9_kfd2kgd;
break;
+ /* Vega20 */
case IP_VERSION(9, 4, 0):
- if (vf)
- device_info = NULL;
- else
- device_info = &vega20_device_info;
- f2g = &gfx_v9_kfd2kgd;
+ gfx_target_version = 90006;
+ if (!vf)
+ f2g = &gfx_v9_kfd2kgd;
break;
+ /* Arcturus */
case IP_VERSION(9, 4, 1):
- device_info = &arcturus_device_info;
+ gfx_target_version = 90008;
f2g = &arcturus_kfd2kgd;
break;
+ /* Aldebaran */
case IP_VERSION(9, 4, 2):
- device_info = &aldebaran_device_info;
+ gfx_target_version = 90010;
f2g = &aldebaran_kfd2kgd;
break;
+ /* Navi10 */
case IP_VERSION(10, 1, 10):
- if (vf)
- device_info = NULL;
- else
- device_info = &navi10_device_info;
- f2g = &gfx_v10_kfd2kgd;
+ gfx_target_version = 100100;
+ if (!vf)
+ f2g = &gfx_v10_kfd2kgd;
break;
+ /* Navi12 */
case IP_VERSION(10, 1, 2):
- device_info = &navi12_device_info;
+ gfx_target_version = 100101;
f2g = &gfx_v10_kfd2kgd;
break;
+ /* Navi14 */
case IP_VERSION(10, 1, 1):
- if (vf)
- device_info = NULL;
- else
- device_info = &navi14_device_info;
- f2g = &gfx_v10_kfd2kgd;
+ gfx_target_version = 100102;
+ if (!vf)
+ f2g = &gfx_v10_kfd2kgd;
break;
+ /* Cyan Skillfish */
case IP_VERSION(10, 1, 3):
- if (vf)
- device_info = NULL;
- else
- device_info = &cyan_skillfish_device_info;
- f2g = &gfx_v10_kfd2kgd;
+ gfx_target_version = 100103;
+ if (!vf)
+ f2g = &gfx_v10_kfd2kgd;
break;
+ /* Sienna Cichlid */
case IP_VERSION(10, 3, 0):
- device_info = &sienna_cichlid_device_info;
+ gfx_target_version = 100300;
f2g = &gfx_v10_3_kfd2kgd;
break;
+ /* Navy Flounder */
case IP_VERSION(10, 3, 2):
- device_info = &navy_flounder_device_info;
+ gfx_target_version = 100301;
f2g = &gfx_v10_3_kfd2kgd;
break;
+ /* Van Gogh */
case IP_VERSION(10, 3, 1):
- if (vf)
- device_info = NULL;
- else
- device_info = &vangogh_device_info;
- f2g = &gfx_v10_3_kfd2kgd;
+ gfx_target_version = 100303;
+ if (!vf)
+ f2g = &gfx_v10_3_kfd2kgd;
break;
+ /* Dimgrey Cavefish */
case IP_VERSION(10, 3, 4):
- device_info = &dimgrey_cavefish_device_info;
+ gfx_target_version = 100302;
f2g = &gfx_v10_3_kfd2kgd;
break;
+ /* Beige Goby */
case IP_VERSION(10, 3, 5):
- device_info = &beige_goby_device_info;
+ gfx_target_version = 100304;
f2g = &gfx_v10_3_kfd2kgd;
break;
+ /* Yellow Carp */
case IP_VERSION(10, 3, 3):
- if (vf)
- device_info = NULL;
- else
- device_info = &yellow_carp_device_info;
- f2g = &gfx_v10_3_kfd2kgd;
+ gfx_target_version = 100305;
+ if (!vf)
+ f2g = &gfx_v10_3_kfd2kgd;
break;
default:
- return NULL;
+ break;
}
break;
}
- if (!device_info || !f2g) {
- dev_err(kfd_device, "%s %s not supported in kfd\n",
- amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
+ if (!f2g) {
+ if (adev->ip_versions[GC_HWIP][0])
+ dev_err(kfd_device, "GC IP %06x %s not supported in kfd\n",
+ adev->ip_versions[GC_HWIP][0], vf ? "VF" : "");
+ else
+ dev_err(kfd_device, "%s %s not supported in kfd\n",
+ amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
return NULL;
}
@@ -824,8 +364,8 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf)
if (!kfd)
return NULL;
- kfd->kgd = kgd;
- kfd->device_info = device_info;
+ kfd->adev = adev;
+ kfd_device_info_init(kfd, vf, gfx_target_version);
kfd->pdev = pdev;
kfd->init_complete = false;
kfd->kfd2kgd = f2g;
@@ -844,24 +384,24 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf)
static void kfd_cwsr_init(struct kfd_dev *kfd)
{
- if (cwsr_enable && kfd->device_info->supports_cwsr) {
- if (kfd->device_info->asic_family < CHIP_VEGA10) {
+ if (cwsr_enable && kfd->device_info.supports_cwsr) {
+ if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) {
BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_gfx8_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
- } else if (kfd->device_info->asic_family == CHIP_ARCTURUS) {
+ } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) {
BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_arcturus_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex);
- } else if (kfd->device_info->asic_family == CHIP_ALDEBARAN) {
+ } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) {
BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_aldebaran_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex);
- } else if (kfd->device_info->asic_family < CHIP_NAVI10) {
+ } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) {
BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_gfx9_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
- } else if (kfd->device_info->asic_family < CHIP_SIENNA_CICHLID) {
+ } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) {
BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_nv1x_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex);
@@ -882,18 +422,17 @@ static int kfd_gws_init(struct kfd_dev *kfd)
if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
return 0;
- if (hws_gws_support
- || (kfd->device_info->asic_family == CHIP_VEGA10
- && kfd->mec2_fw_version >= 0x81b3)
- || (kfd->device_info->asic_family >= CHIP_VEGA12
- && kfd->device_info->asic_family <= CHIP_RAVEN
- && kfd->mec2_fw_version >= 0x1b3)
- || (kfd->device_info->asic_family == CHIP_ARCTURUS
- && kfd->mec2_fw_version >= 0x30)
- || (kfd->device_info->asic_family == CHIP_ALDEBARAN
- && kfd->mec2_fw_version >= 0x28))
- ret = amdgpu_amdkfd_alloc_gws(kfd->kgd,
- amdgpu_amdkfd_get_num_gws(kfd->kgd), &kfd->gws);
+ if (hws_gws_support || (KFD_IS_SOC15(kfd) &&
+ ((KFD_GC_VERSION(kfd) == IP_VERSION(9, 0, 1)
+ && kfd->mec2_fw_version >= 0x81b3) ||
+ (KFD_GC_VERSION(kfd) <= IP_VERSION(9, 4, 0)
+ && kfd->mec2_fw_version >= 0x1b3) ||
+ (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)
+ && kfd->mec2_fw_version >= 0x30) ||
+ (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)
+ && kfd->mec2_fw_version >= 0x28))))
+ ret = amdgpu_amdkfd_alloc_gws(kfd->adev,
+ kfd->adev->gds.gws_size, &kfd->gws);
return ret;
}
@@ -910,11 +449,11 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
unsigned int size, map_process_packet_size;
kfd->ddev = ddev;
- kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
+ kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
KGD_ENGINE_MEC1);
- kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
+ kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
KGD_ENGINE_MEC2);
- kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
+ kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
KGD_ENGINE_SDMA1);
kfd->shared_resources = *gpu_resources;
@@ -927,16 +466,16 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
* 32 and 64-bit requests are possible and must be
* supported.
*/
- kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->kgd);
+ kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev);
if (!kfd->pci_atomic_requested &&
- kfd->device_info->needs_pci_atomics &&
- (!kfd->device_info->no_atomic_fw_version ||
- kfd->mec_fw_version < kfd->device_info->no_atomic_fw_version)) {
+ kfd->device_info.needs_pci_atomics &&
+ (!kfd->device_info.no_atomic_fw_version ||
+ kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) {
dev_info(kfd_device,
"skipped device %x:%x, PCI rejects atomics %d<%d\n",
kfd->pdev->vendor, kfd->pdev->device,
kfd->mec_fw_version,
- kfd->device_info->no_atomic_fw_version);
+ kfd->device_info.no_atomic_fw_version);
return false;
}
@@ -953,16 +492,15 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *
- kfd->device_info->mqd_size_aligned;
+ kfd->device_info.mqd_size_aligned;
/*
* calculate max size of runlist packet.
* There can be only 2 packets at once
*/
- map_process_packet_size =
- kfd->device_info->asic_family == CHIP_ALDEBARAN ?
+ map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ?
sizeof(struct pm4_mes_map_process_aldebaran) :
- sizeof(struct pm4_mes_map_process);
+ sizeof(struct pm4_mes_map_process);
size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size +
max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
+ sizeof(struct pm4_mes_runlist)) * 2;
@@ -974,7 +512,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
size += 512 * 1024;
if (amdgpu_amdkfd_alloc_gtt_mem(
- kfd->kgd, size, &kfd->gtt_mem,
+ kfd->adev, size, &kfd->gtt_mem,
&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
false)) {
dev_err(kfd_device, "Could not allocate %d bytes\n", size);
@@ -995,9 +533,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto kfd_doorbell_error;
}
- kfd->hive_id = amdgpu_amdkfd_get_hive_id(kfd->kgd);
+ kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
- kfd->noretry = amdgpu_amdkfd_get_noretry(kfd->kgd);
+ kfd->noretry = kfd->adev->gmc.noretry;
if (kfd_interrupt_init(kfd)) {
dev_err(kfd_device, "Error initializing interrupts\n");
@@ -1015,7 +553,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
*/
if (kfd_gws_init(kfd)) {
dev_err(kfd_device, "Could not allocate %d gws\n",
- amdgpu_amdkfd_get_num_gws(kfd->kgd));
+ kfd->adev->gds.gws_size);
goto gws_error;
}
@@ -1030,7 +568,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd_cwsr_init(kfd);
- svm_migrate_init((struct amdgpu_device *)kfd->kgd);
+ svm_migrate_init(kfd->adev);
if(kgd2kfd_resume_iommu(kfd))
goto device_iommu_error;
@@ -1068,10 +606,10 @@ kfd_interrupt_error:
kfd_doorbell_error:
kfd_gtt_sa_fini(kfd);
kfd_gtt_sa_init_error:
- amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
+ amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
alloc_gtt_mem_failure:
if (kfd->gws)
- amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws);
+ amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws);
dev_err(kfd_device,
"device %x:%x NOT added due to errors\n",
kfd->pdev->vendor, kfd->pdev->device);
@@ -1088,9 +626,9 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
kfd_doorbell_fini(kfd);
ida_destroy(&kfd->doorbell_ida);
kfd_gtt_sa_fini(kfd);
- amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
+ amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
if (kfd->gws)
- amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws);
+ amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws);
}
kfree(kfd);
@@ -1229,7 +767,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
if (!kfd->init_complete)
return;
- if (kfd->device_info->ih_ring_entry_size > sizeof(patched_ihre)) {
+ if (kfd->device_info.ih_ring_entry_size > sizeof(patched_ihre)) {
dev_err_once(kfd_device, "Ring entry too small\n");
return;
}
@@ -1526,7 +1064,7 @@ void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
void kfd_inc_compute_active(struct kfd_dev *kfd)
{
if (atomic_inc_return(&kfd->compute_profile) == 1)
- amdgpu_amdkfd_set_compute_idle(kfd->kgd, false);
+ amdgpu_amdkfd_set_compute_idle(kfd->adev, false);
}
void kfd_dec_compute_active(struct kfd_dev *kfd)
@@ -1534,7 +1072,7 @@ void kfd_dec_compute_active(struct kfd_dev *kfd)
int count = atomic_dec_return(&kfd->compute_profile);
if (count == 0)
- amdgpu_amdkfd_set_compute_idle(kfd->kgd, true);
+ amdgpu_amdkfd_set_compute_idle(kfd->adev, true);
WARN_ONCE(count < 0, "Compute profile ref. count error");
}
@@ -1544,6 +1082,26 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask);
}
+/* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and
+ * kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA.
+ * When the device has more than two engines, we reserve two for PCIe to enable
+ * full-duplex and the rest are used as XGMI.
+ */
+unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev)
+{
+ /* If XGMI is not supported, all SDMA engines are PCIe */
+ if (!kdev->adev->gmc.xgmi.supported)
+ return kdev->adev->sdma.num_instances;
+
+ return min(kdev->adev->sdma.num_instances, 2);
+}
+
+unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev)
+{
+ /* After reserved for PCIe, the rest of engines are XGMI */
+ return kdev->adev->sdma.num_instances - kfd_get_num_sdma_engines(kdev);
+}
+
#if defined(CONFIG_DEBUG_FS)
/* This function will send a package to HIQ to hang the HWS
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 93e33dd84dd4..4b6814949aad 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -47,7 +47,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm,
uint32_t filter_param);
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
- uint32_t filter_param);
+ uint32_t filter_param, bool reset);
static int map_queues_cpsch(struct device_queue_manager *dqm);
@@ -99,38 +99,29 @@ unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
return dqm->dev->shared_resources.num_pipe_per_mec;
}
-static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
-{
- return dqm->dev->device_info->num_sdma_engines;
-}
-
-static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm)
-{
- return dqm->dev->device_info->num_xgmi_sdma_engines;
-}
-
static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
{
- return get_num_sdma_engines(dqm) + get_num_xgmi_sdma_engines(dqm);
+ return kfd_get_num_sdma_engines(dqm->dev) +
+ kfd_get_num_xgmi_sdma_engines(dqm->dev);
}
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
{
- return dqm->dev->device_info->num_sdma_engines
- * dqm->dev->device_info->num_sdma_queues_per_engine;
+ return kfd_get_num_sdma_engines(dqm->dev) *
+ dqm->dev->device_info.num_sdma_queues_per_engine;
}
unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
{
- return dqm->dev->device_info->num_xgmi_sdma_engines
- * dqm->dev->device_info->num_sdma_queues_per_engine;
+ return kfd_get_num_xgmi_sdma_engines(dqm->dev) *
+ dqm->dev->device_info.num_sdma_queues_per_engine;
}
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
return dqm->dev->kfd2kgd->program_sh_mem_settings(
- dqm->dev->kgd, qpd->vmid,
+ dqm->dev->adev, qpd->vmid,
qpd->sh_mem_config,
qpd->sh_mem_ape1_base,
qpd->sh_mem_ape1_limit,
@@ -157,7 +148,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
{
struct kfd_dev *dev = qpd->dqm->dev;
- if (!KFD_IS_SOC15(dev->device_info->asic_family)) {
+ if (!KFD_IS_SOC15(dev)) {
/* On pre-SOC15 chips we need to use the queue ID to
* preserve the user mode ABI.
*/
@@ -202,7 +193,7 @@ static void deallocate_doorbell(struct qcm_process_device *qpd,
unsigned int old;
struct kfd_dev *dev = qpd->dqm->dev;
- if (!KFD_IS_SOC15(dev->device_info->asic_family) ||
+ if (!KFD_IS_SOC15(dev) ||
q->properties.type == KFD_QUEUE_TYPE_SDMA ||
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
return;
@@ -216,7 +207,7 @@ static void program_trap_handler_settings(struct device_queue_manager *dqm,
{
if (dqm->dev->kfd2kgd->program_trap_handler_settings)
dqm->dev->kfd2kgd->program_trap_handler_settings(
- dqm->dev->kgd, qpd->vmid,
+ dqm->dev->adev, qpd->vmid,
qpd->tba_addr, qpd->tma_addr);
}
@@ -250,21 +241,20 @@ static int allocate_vmid(struct device_queue_manager *dqm,
program_sh_mem_settings(dqm, qpd);
- if (dqm->dev->device_info->asic_family >= CHIP_VEGA10 &&
- dqm->dev->cwsr_enabled)
+ if (KFD_IS_SOC15(dqm->dev) && dqm->dev->cwsr_enabled)
program_trap_handler_settings(dqm, qpd);
/* qpd->page_table_base is set earlier when register_process()
* is called, i.e. when the first queue is created.
*/
- dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
+ dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->adev,
qpd->vmid,
qpd->page_table_base);
/* invalidate the VM context after pasid and vmid mapping is set up */
kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
if (dqm->dev->kfd2kgd->set_scratch_backing_va)
- dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd,
+ dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->adev,
qpd->sh_hidden_private_base, qpd->vmid);
return 0;
@@ -283,7 +273,7 @@ static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
if (ret)
return ret;
- return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
+ return amdgpu_amdkfd_submit_ib(kdev->adev, KGD_ENGINE_MEC1, qpd->vmid,
qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
pmf->release_mem_size / sizeof(uint32_t));
}
@@ -293,7 +283,7 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
struct queue *q)
{
/* On GFX v7, CP doesn't flush TC at dequeue */
- if (q->device->device_info->asic_family == CHIP_HAWAII)
+ if (q->device->adev->asic_type == CHIP_HAWAII)
if (flush_texture_cache_nocpsch(q->device, qpd))
pr_err("Failed to flush TC\n");
@@ -580,7 +570,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
/* Make sure the queue is unmapped before updating the MQD */
if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
retval = unmap_queues_cpsch(dqm,
- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false);
if (retval) {
pr_err("unmap queue failed\n");
goto out_unlock;
@@ -776,7 +766,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
if (!list_empty(&qpd->queues_list)) {
dqm->dev->kfd2kgd->set_vm_context_page_table_base(
- dqm->dev->kgd,
+ dqm->dev->adev,
qpd->vmid,
qpd->page_table_base);
kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
@@ -954,7 +944,7 @@ set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid,
unsigned int vmid)
{
return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
- dqm->dev->kgd, pasid, vmid);
+ dqm->dev->adev, pasid, vmid);
}
static void init_interrupts(struct device_queue_manager *dqm)
@@ -963,7 +953,7 @@ static void init_interrupts(struct device_queue_manager *dqm)
for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
if (is_pipe_enabled(dqm, 0, i))
- dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
+ dqm->dev->kfd2kgd->init_interrupts(dqm->dev->adev, i);
}
static int initialize_nocpsch(struct device_queue_manager *dqm)
@@ -1014,19 +1004,22 @@ static void uninitialize(struct device_queue_manager *dqm)
static int start_nocpsch(struct device_queue_manager *dqm)
{
+ int r = 0;
+
pr_info("SW scheduler is used");
init_interrupts(dqm);
- if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
- return pm_init(&dqm->packet_mgr, dqm);
- dqm->sched_running = true;
+ if (dqm->dev->adev->asic_type == CHIP_HAWAII)
+ r = pm_init(&dqm->packet_mgr, dqm);
+ if (!r)
+ dqm->sched_running = true;
- return 0;
+ return r;
}
static int stop_nocpsch(struct device_queue_manager *dqm)
{
- if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
+ if (dqm->dev->adev->asic_type == CHIP_HAWAII)
pm_uninit(&dqm->packet_mgr, false);
dqm->sched_running = false;
@@ -1055,9 +1048,9 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
dqm->sdma_bitmap &= ~(1ULL << bit);
q->sdma_id = bit;
q->properties.sdma_engine_id = q->sdma_id %
- get_num_sdma_engines(dqm);
+ kfd_get_num_sdma_engines(dqm->dev);
q->properties.sdma_queue_id = q->sdma_id /
- get_num_sdma_engines(dqm);
+ kfd_get_num_sdma_engines(dqm->dev);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
if (dqm->xgmi_sdma_bitmap == 0) {
pr_err("No more XGMI SDMA queue to allocate\n");
@@ -1072,10 +1065,11 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
* assumes the first N engines are always
* PCIe-optimized ones
*/
- q->properties.sdma_engine_id = get_num_sdma_engines(dqm) +
- q->sdma_id % get_num_xgmi_sdma_engines(dqm);
+ q->properties.sdma_engine_id =
+ kfd_get_num_sdma_engines(dqm->dev) +
+ q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev);
q->properties.sdma_queue_id = q->sdma_id /
- get_num_xgmi_sdma_engines(dqm);
+ kfd_get_num_xgmi_sdma_engines(dqm->dev);
}
pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
@@ -1132,7 +1126,7 @@ static int set_sched_resources(struct device_queue_manager *dqm)
res.queue_mask |= 1ull
<< amdgpu_queue_mask_bit_to_set_resource_bit(
- (struct amdgpu_device *)dqm->dev->kgd, i);
+ dqm->dev->adev, i);
}
res.gws_mask = ~0ull;
res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
@@ -1232,7 +1226,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
}
if (!dqm->is_hws_hang)
- unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, false);
hanging = dqm->is_hws_hang || dqm->is_resetting;
dqm->sched_running = false;
@@ -1428,7 +1422,7 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
/* dqm->lock mutex has to be locked before calling this function */
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
- uint32_t filter_param)
+ uint32_t filter_param, bool reset)
{
int retval = 0;
struct mqd_manager *mqd_mgr;
@@ -1441,7 +1435,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
return retval;
retval = pm_send_unmap_queue(&dqm->packet_mgr, KFD_QUEUE_TYPE_COMPUTE,
- filter, filter_param, false, 0);
+ filter, filter_param, reset, 0);
if (retval)
return retval;
@@ -1485,6 +1479,21 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
return retval;
}
+/* only for compute queue */
+static int reset_queues_cpsch(struct device_queue_manager *dqm,
+ uint16_t pasid)
+{
+ int retval;
+
+ dqm_lock(dqm);
+
+ retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_BY_PASID,
+ pasid, true);
+
+ dqm_unlock(dqm);
+ return retval;
+}
+
/* dqm->lock mutex has to be locked before calling this function */
static int execute_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
@@ -1494,7 +1503,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm,
if (dqm->is_hws_hang)
return -EIO;
- retval = unmap_queues_cpsch(dqm, filter, filter_param);
+ retval = unmap_queues_cpsch(dqm, filter, filter_param, false);
if (retval)
return retval;
@@ -1847,10 +1856,10 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
get_num_all_sdma_engines(dqm) *
- dev->device_info->num_sdma_queues_per_engine +
+ dev->device_info.num_sdma_queues_per_engine +
dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
- retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size,
+ retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size,
&(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
(void *)&(mem_obj->cpu_ptr), false);
@@ -1867,7 +1876,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
if (!dqm)
return NULL;
- switch (dev->device_info->asic_family) {
+ switch (dev->adev->asic_type) {
/* HWS is not available on Hawaii. */
case CHIP_HAWAII:
/* HWS depends on CWSR for timely dequeue. CWSR is not
@@ -1905,6 +1914,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.evict_process_queues = evict_process_queues_cpsch;
dqm->ops.restore_process_queues = restore_process_queues_cpsch;
dqm->ops.get_wave_state = get_wave_state;
+ dqm->ops.reset_queues = reset_queues_cpsch;
break;
case KFD_SCHED_POLICY_NO_HWS:
/* initialize dqm for no cp scheduling */
@@ -1930,7 +1940,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
goto out_free;
}
- switch (dev->device_info->asic_family) {
+ switch (dev->adev->asic_type) {
case CHIP_CARRIZO:
device_queue_manager_init_vi(&dqm->asic_ops);
break;
@@ -1952,31 +1962,16 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
device_queue_manager_init_vi_tonga(&dqm->asic_ops);
break;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_RENOIR:
- case CHIP_ARCTURUS:
- case CHIP_ALDEBARAN:
- device_queue_manager_init_v9(&dqm->asic_ops);
- break;
- case CHIP_NAVI10:
- case CHIP_NAVI12:
- case CHIP_NAVI14:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
- case CHIP_CYAN_SKILLFISH:
- device_queue_manager_init_v10_navi10(&dqm->asic_ops);
- break;
default:
- WARN(1, "Unexpected ASIC family %u",
- dev->device_info->asic_family);
- goto out_free;
+ if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
+ device_queue_manager_init_v10_navi10(&dqm->asic_ops);
+ else if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1))
+ device_queue_manager_init_v9(&dqm->asic_ops);
+ else {
+ WARN(1, "Unexpected ASIC family %u",
+ dev->adev->asic_type);
+ goto out_free;
+ }
}
if (init_mqd_managers(dqm))
@@ -2000,7 +1995,7 @@ static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
{
WARN(!mqd, "No hiq sdma mqd trunk to free");
- amdgpu_amdkfd_free_gtt_mem(dev->kgd, mqd->gtt_mem);
+ amdgpu_amdkfd_free_gtt_mem(dev->adev, mqd->gtt_mem);
}
void device_queue_manager_uninit(struct device_queue_manager *dqm)
@@ -2031,7 +2026,7 @@ static void kfd_process_hw_exception(struct work_struct *work)
{
struct device_queue_manager *dqm = container_of(work,
struct device_queue_manager, hw_exception_work);
- amdgpu_amdkfd_gpu_reset(dqm->dev->kgd);
+ amdgpu_amdkfd_gpu_reset(dqm->dev->adev);
}
#if defined(CONFIG_DEBUG_FS)
@@ -2070,7 +2065,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
return 0;
}
- r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd,
+ r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev,
KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
&dump, &n_regs);
if (!r) {
@@ -2092,7 +2087,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
continue;
r = dqm->dev->kfd2kgd->hqd_dump(
- dqm->dev->kgd, pipe, queue, &dump, &n_regs);
+ dqm->dev->adev, pipe, queue, &dump, &n_regs);
if (r)
break;
@@ -2106,10 +2101,10 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) {
for (queue = 0;
- queue < dqm->dev->device_info->num_sdma_queues_per_engine;
+ queue < dqm->dev->device_info.num_sdma_queues_per_engine;
queue++) {
r = dqm->dev->kfd2kgd->hqd_sdma_dump(
- dqm->dev->kgd, pipe, queue, &dump, &n_regs);
+ dqm->dev->adev, pipe, queue, &dump, &n_regs);
if (r)
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 499fc0ea387f..e145e4deb53a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -81,6 +81,8 @@ struct device_process_node {
*
* @get_wave_state: Retrieves context save state and optionally copies the
* control stack, if kept in the MQD, to the given userspace address.
+ *
+ * @reset_queues: reset queues which consume RAS poison
*/
struct device_queue_manager_ops {
@@ -134,6 +136,9 @@ struct device_queue_manager_ops {
void __user *ctl_stack,
u32 *ctl_stack_used_size,
u32 *save_area_used_size);
+
+ int (*reset_queues)(struct device_queue_manager *dqm,
+ uint16_t pasid);
};
struct device_queue_manager_asic_ops {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
index b5c3d13643f1..f20434d9980e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
@@ -62,7 +62,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
- if (dqm->dev->device_info->asic_family == CHIP_ALDEBARAN) {
+ if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2)) {
/* Aldebaran can safely support different XNACK modes
* per process
*/
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index 768d153acff4..0dbcf54657ed 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -48,7 +48,7 @@
/* # of doorbell bytes allocated for each process. */
size_t kfd_doorbell_process_slice(struct kfd_dev *kfd)
{
- return roundup(kfd->device_info->doorbell_size *
+ return roundup(kfd->device_info.doorbell_size *
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
PAGE_SIZE);
}
@@ -180,7 +180,7 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
if (inx >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
return NULL;
- inx *= kfd->device_info->doorbell_size / sizeof(u32);
+ inx *= kfd->device_info.doorbell_size / sizeof(u32);
/*
* Calculating the kernel doorbell offset using the first
@@ -201,7 +201,7 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
unsigned int inx;
inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr)
- * sizeof(u32) / kfd->device_info->doorbell_size;
+ * sizeof(u32) / kfd->device_info.doorbell_size;
mutex_lock(&kfd->doorbell_mutex);
__clear_bit(inx, kfd->doorbell_available_index);
@@ -239,7 +239,7 @@ unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
return kfd->doorbell_base_dw_offset +
pdd->doorbell_index
* kfd_doorbell_process_slice(kfd) / sizeof(u32) +
- doorbell_id * kfd->device_info->doorbell_size / sizeof(u32);
+ doorbell_id * kfd->device_info.doorbell_size / sizeof(u32);
}
uint64_t kfd_get_number_elems(struct kfd_dev *kfd)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 3eea4edee355..afe72dd11325 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -935,8 +935,10 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, u32 pasid,
/* Workaround on Raven to not kill the process when memory is freed
* before IOMMU is able to finish processing all the excessive PPRs
*/
- if (dev->device_info->asic_family != CHIP_RAVEN &&
- dev->device_info->asic_family != CHIP_RENOIR) {
+
+ if (KFD_GC_VERSION(dev) != IP_VERSION(9, 1, 0) &&
+ KFD_GC_VERSION(dev) != IP_VERSION(9, 2, 2) &&
+ KFD_GC_VERSION(dev) != IP_VERSION(9, 3, 0)) {
mutex_lock(&p->event_mutex);
/* Lookup events by type and signal them */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index d1388896f9c1..2e2b7ceb71db 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -394,7 +394,7 @@ int kfd_init_apertures(struct kfd_process *process)
pdd->gpuvm_base = pdd->gpuvm_limit = 0;
pdd->scratch_base = pdd->scratch_limit = 0;
} else {
- switch (dev->device_info->asic_family) {
+ switch (dev->adev->asic_type) {
case CHIP_KAVERI:
case CHIP_HAWAII:
case CHIP_CARRIZO:
@@ -406,29 +406,14 @@ int kfd_init_apertures(struct kfd_process *process)
case CHIP_VEGAM:
kfd_init_apertures_vi(pdd, id);
break;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_RENOIR:
- case CHIP_ARCTURUS:
- case CHIP_ALDEBARAN:
- case CHIP_NAVI10:
- case CHIP_NAVI12:
- case CHIP_NAVI14:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
- case CHIP_CYAN_SKILLFISH:
- kfd_init_apertures_v9(pdd, id);
- break;
default:
- WARN(1, "Unexpected ASIC family %u",
- dev->device_info->asic_family);
- return -EINVAL;
+ if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1))
+ kfd_init_apertures_v9(pdd, id);
+ else {
+ WARN(1, "Unexpected ASIC family %u",
+ dev->adev->asic_type);
+ return -EINVAL;
+ }
}
if (!dev->use_iommu_v2) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 543e7ea75593..e8bc28009c22 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -89,6 +89,44 @@ enum SQ_INTERRUPT_ERROR_TYPE {
#define KFD_SQ_INT_DATA__ERR_TYPE_MASK 0xF00000
#define KFD_SQ_INT_DATA__ERR_TYPE__SHIFT 20
+static void event_interrupt_poison_consumption(struct kfd_dev *dev,
+ uint16_t pasid, uint16_t source_id)
+{
+ int ret = -EINVAL;
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+
+ if (!p)
+ return;
+
+ /* all queues of a process will be unmapped in one time */
+ if (atomic_read(&p->poison)) {
+ kfd_unref_process(p);
+ return;
+ }
+
+ atomic_set(&p->poison, 1);
+ kfd_unref_process(p);
+
+ switch (source_id) {
+ case SOC15_INTSRC_SQ_INTERRUPT_MSG:
+ if (dev->dqm->ops.reset_queues)
+ ret = dev->dqm->ops.reset_queues(dev->dqm, pasid);
+ break;
+ case SOC15_INTSRC_SDMA_ECC:
+ default:
+ break;
+ }
+
+ kfd_signal_poison_consumed_event(dev, pasid);
+
+ /* resetting queue passes, do page retirement without gpu reset
+ resetting queue fails, fallback to gpu reset solution */
+ if (!ret)
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false);
+ else
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true);
+}
+
static bool event_interrupt_isr_v9(struct kfd_dev *dev,
const uint32_t *ih_ring_entry,
uint32_t *patched_ihre,
@@ -135,7 +173,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
*patched_flag = true;
memcpy(patched_ihre, ih_ring_entry,
- dev->device_info->ih_ring_entry_size);
+ dev->device_info.ih_ring_entry_size);
pasid = dev->dqm->vmid_pasid[vmid];
@@ -159,6 +197,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
*/
return source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
source_id == SOC15_INTSRC_SDMA_TRAP ||
+ source_id == SOC15_INTSRC_SDMA_ECC ||
source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
((client_id == SOC15_IH_CLIENTID_VMC ||
@@ -230,8 +269,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
sq_intr_err);
if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
- kfd_signal_poison_consumed_event(dev, pasid);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->kgd);
+ event_interrupt_poison_consumption(dev, pasid, source_id);
return;
}
break;
@@ -252,8 +290,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
if (source_id == SOC15_INTSRC_SDMA_TRAP) {
kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28);
} else if (source_id == SOC15_INTSRC_SDMA_ECC) {
- kfd_signal_poison_consumed_event(dev, pasid);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->kgd);
+ event_interrupt_poison_consumption(dev, pasid, source_id);
return;
}
} else if (client_id == SOC15_IH_CLIENTID_VMC ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index bc47f6a44456..81887c2013c9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -54,7 +54,7 @@ int kfd_interrupt_init(struct kfd_dev *kfd)
int r;
r = kfifo_alloc(&kfd->ih_fifo,
- KFD_IH_NUM_ENTRIES * kfd->device_info->ih_ring_entry_size,
+ KFD_IH_NUM_ENTRIES * kfd->device_info.ih_ring_entry_size,
GFP_KERNEL);
if (r) {
dev_err(kfd_chardev(), "Failed to allocate IH fifo\n");
@@ -114,8 +114,8 @@ bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry)
int count;
count = kfifo_in(&kfd->ih_fifo, ih_ring_entry,
- kfd->device_info->ih_ring_entry_size);
- if (count != kfd->device_info->ih_ring_entry_size) {
+ kfd->device_info.ih_ring_entry_size);
+ if (count != kfd->device_info.ih_ring_entry_size) {
dev_err_ratelimited(kfd_chardev(),
"Interrupt ring overflow, dropping interrupt %d\n",
count);
@@ -133,11 +133,11 @@ static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry)
int count;
count = kfifo_out(&kfd->ih_fifo, ih_ring_entry,
- kfd->device_info->ih_ring_entry_size);
+ kfd->device_info.ih_ring_entry_size);
- WARN_ON(count && count != kfd->device_info->ih_ring_entry_size);
+ WARN_ON(count && count != kfd->device_info.ih_ring_entry_size);
- return count == kfd->device_info->ih_ring_entry_size;
+ return count == kfd->device_info.ih_ring_entry_size;
}
static void interrupt_wq(struct work_struct *work)
@@ -146,13 +146,13 @@ static void interrupt_wq(struct work_struct *work)
interrupt_work);
uint32_t ih_ring_entry[KFD_MAX_RING_ENTRY_SIZE];
- if (dev->device_info->ih_ring_entry_size > sizeof(ih_ring_entry)) {
+ if (dev->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) {
dev_err_once(kfd_chardev(), "Ring entry too small\n");
return;
}
while (dequeue_ih_ring_entry(dev, ih_ring_entry))
- dev->device_info->event_interrupt_class->interrupt_wq(dev,
+ dev->device_info.event_interrupt_class->interrupt_wq(dev,
ih_ring_entry);
}
@@ -163,7 +163,7 @@ bool interrupt_is_wanted(struct kfd_dev *dev,
/* integer and bitwise OR so there is no boolean short-circuiting */
unsigned int wanted = 0;
- wanted |= dev->device_info->event_interrupt_class->interrupt_isr(dev,
+ wanted |= dev->device_info.event_interrupt_class->interrupt_isr(dev,
ih_ring_entry, patched_ihre, flag);
return wanted != 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 73f2257acc23..66ad8d0b8f7f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -89,7 +89,7 @@ int kfd_iommu_device_init(struct kfd_dev *kfd)
}
pasid_limit = min_t(unsigned int,
- (unsigned int)(1 << kfd->device_info->max_pasid_bits),
+ (unsigned int)(1 << kfd->device_info.max_pasid_bits),
iommu_info.max_pasids);
if (!kfd_set_pasid_limit(pasid_limit)) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 64b4ac339904..16f8bc4ca7f6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -91,7 +91,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->pq_gpu_addr = kq->pq->gpu_addr;
/* For CIK family asics, kq->eop_mem is not needed */
- if (dev->device_info->asic_family > CHIP_MULLINS) {
+ if (dev->adev->asic_type > CHIP_MULLINS) {
retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
if (retval != 0)
goto err_eop_allocate_vidmem;
@@ -111,7 +111,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->rptr_kernel = kq->rptr_mem->cpu_ptr;
kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr;
- retval = kfd_gtt_sa_allocate(dev, dev->device_info->doorbell_size,
+ retval = kfd_gtt_sa_allocate(dev, dev->device_info.doorbell_size,
&kq->wptr_mem);
if (retval != 0)
@@ -297,7 +297,7 @@ void kq_submit_packet(struct kernel_queue *kq)
}
pr_debug("\n");
#endif
- if (kq->dev->device_info->doorbell_size == 8) {
+ if (kq->dev->device_info.doorbell_size == 8) {
*kq->wptr64_kernel = kq->pending_wptr64;
write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
kq->pending_wptr64);
@@ -310,7 +310,7 @@ void kq_submit_packet(struct kernel_queue *kq)
void kq_rollback_packet(struct kernel_queue *kq)
{
- if (kq->dev->device_info->doorbell_size == 8) {
+ if (kq->dev->device_info.doorbell_size == 8) {
kq->pending_wptr64 = *kq->wptr64_kernel;
kq->pending_wptr = *kq->wptr_kernel %
(kq->queue->properties.queue_size / 4);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 9b9c2b9bf2ef..ed5385137f48 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -108,8 +108,8 @@ error_free:
* svm_migrate_copy_memory_gart - sdma copy data between ram and vram
*
* @adev: amdgpu device the sdma ring running
- * @src: source page address array
- * @dst: destination page address array
+ * @sys: system DMA pointer to be copied
+ * @vram: vram destination DMA pointer
* @npages: number of pages to copy
* @direction: enum MIGRATION_COPY_DIR
* @mfence: output, sdma fence to signal after sdma is done
@@ -549,7 +549,7 @@ static void svm_migrate_page_free(struct page *page)
if (svm_bo) {
pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref));
- svm_range_bo_unref(svm_bo);
+ svm_range_bo_unref_async(svm_bo);
}
}
@@ -938,7 +938,7 @@ int svm_migrate_init(struct amdgpu_device *adev)
void *r;
/* Page migration works on Vega10 or newer */
- if (kfddev->device_info->asic_family < CHIP_VEGA10)
+ if (!KFD_IS_SOC15(kfddev))
return -EINVAL;
pgmap = &kfddev->pgmap;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index c021519af810..e2825ad4d699 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -71,7 +71,7 @@ struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_dev *dev,
return NULL;
offset = (q->sdma_engine_id *
- dev->device_info->num_sdma_queues_per_engine +
+ dev->device_info.num_sdma_queues_per_engine +
q->sdma_queue_id) *
dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size;
@@ -100,7 +100,7 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
struct kfd_cu_info cu_info;
uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0};
int i, se, sh, cu;
- amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info);
+ amdgpu_amdkfd_get_cu_info(mm->dev->adev, &cu_info);
if (cu_mask_count > cu_info.cu_active_number)
cu_mask_count = cu_info.cu_active_number;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 8128f4d312f1..e9a8e21e144e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -171,7 +171,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1);
- return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
+ return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
wptr_shift, wptr_mask, mms);
}
@@ -180,7 +180,7 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
{
- return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd,
+ return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
(uint32_t __user *)p->write_ptr,
mms);
}
@@ -276,7 +276,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, mqd, type, timeout,
+ return mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, mqd, type, timeout,
pipe_id, queue_id);
}
@@ -289,7 +289,7 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
+ return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
}
static bool is_occupied(struct mqd_manager *mm, void *mqd,
@@ -297,7 +297,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
+ return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->adev, queue_address,
pipe_id, queue_id);
}
@@ -306,7 +306,7 @@ static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
+ return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
}
/*
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 270160fc401b..d74d8a6ac27a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -148,7 +148,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
/* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
- r = mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
+ r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
wptr_shift, 0, mms);
return r;
@@ -158,7 +158,7 @@ static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
{
- return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id,
+ return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id,
queue_id, p->doorbell_off);
}
@@ -239,7 +239,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_destroy
- (mm->dev->kgd, mqd, type, timeout,
+ (mm->dev->adev, mqd, type, timeout,
pipe_id, queue_id);
}
@@ -254,7 +254,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_is_occupied(
- mm->dev->kgd, queue_address,
+ mm->dev->adev, queue_address,
pipe_id, queue_id);
}
@@ -320,7 +320,7 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
{
- return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd,
+ return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
(uint32_t __user *)p->write_ptr,
mms);
}
@@ -363,14 +363,14 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
+ return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
}
static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
+ return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
}
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 4e5932f54b5a..326eb2285029 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -108,7 +108,7 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
if (!mqd_mem_obj)
return NULL;
- retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd,
+ retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->adev,
ALIGN(q->ctl_stack_size, PAGE_SIZE) +
ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
&(mqd_mem_obj->gtt_mem),
@@ -199,7 +199,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
/* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
- return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
+ return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
wptr_shift, 0, mms);
}
@@ -208,7 +208,7 @@ static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
{
- return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id,
+ return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id,
queue_id, p->doorbell_off);
}
@@ -291,7 +291,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_destroy
- (mm->dev->kgd, mqd, type, timeout,
+ (mm->dev->adev, mqd, type, timeout,
pipe_id, queue_id);
}
@@ -301,7 +301,7 @@ static void free_mqd(struct mqd_manager *mm, void *mqd,
struct kfd_dev *kfd = mm->dev;
if (mqd_mem_obj->gtt_mem) {
- amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
+ amdgpu_amdkfd_free_gtt_mem(kfd->adev, mqd_mem_obj->gtt_mem);
kfree(mqd_mem_obj);
} else {
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
@@ -313,7 +313,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_is_occupied(
- mm->dev->kgd, queue_address,
+ mm->dev->adev, queue_address,
pipe_id, queue_id);
}
@@ -375,7 +375,7 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
{
- return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd,
+ return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
(uint32_t __user *)p->write_ptr,
mms);
}
@@ -418,14 +418,14 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
+ return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
}
static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
+ return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
}
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index cd9220eb8a7a..d456e950ce1d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -162,7 +162,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1);
- return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
+ return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
wptr_shift, wptr_mask, mms);
}
@@ -265,7 +265,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_destroy
- (mm->dev->kgd, mqd, type, timeout,
+ (mm->dev->adev, mqd, type, timeout,
pipe_id, queue_id);
}
@@ -280,7 +280,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_is_occupied(
- mm->dev->kgd, queue_address,
+ mm->dev->adev, queue_address,
pipe_id, queue_id);
}
@@ -347,7 +347,7 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
{
- return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd,
+ return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
(uint32_t __user *)p->write_ptr,
mms);
}
@@ -389,14 +389,14 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
+ return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
}
static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
+ return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
}
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index e547f1f8c49f..1439420925a0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -223,7 +223,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
{
- switch (dqm->dev->device_info->asic_family) {
+ switch (dqm->dev->adev->asic_type) {
case CHIP_KAVERI:
case CHIP_HAWAII:
/* PM4 packet structures on CIK are the same as on VI */
@@ -236,31 +236,16 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
case CHIP_VEGAM:
pm->pmf = &kfd_vi_pm_funcs;
break;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_RENOIR:
- case CHIP_ARCTURUS:
- case CHIP_NAVI10:
- case CHIP_NAVI12:
- case CHIP_NAVI14:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
- case CHIP_CYAN_SKILLFISH:
- pm->pmf = &kfd_v9_pm_funcs;
- break;
- case CHIP_ALDEBARAN:
- pm->pmf = &kfd_aldebaran_pm_funcs;
- break;
default:
- WARN(1, "Unexpected ASIC family %u",
- dqm->dev->device_info->asic_family);
- return -EINVAL;
+ if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2))
+ pm->pmf = &kfd_aldebaran_pm_funcs;
+ else if (KFD_GC_VERSION(dqm->dev) >= IP_VERSION(9, 0, 1))
+ pm->pmf = &kfd_v9_pm_funcs;
+ else {
+ WARN(1, "Unexpected ASIC family %u",
+ dqm->dev->adev->asic_type);
+ return -EINVAL;
+ }
}
pm->dqm = dqm;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
index 08442e7d9944..3c0658e32e93 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
@@ -110,8 +110,8 @@ static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
return 0;
}
-int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
- struct scheduling_resources *res)
+static int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
+ struct scheduling_resources *res)
{
struct pm4_mes_set_resources *packet;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 8fd48d0ed240..ea68f3b3a4e9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -183,7 +183,8 @@ enum cache_policy {
cache_policy_noncoherent
};
-#define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10)
+#define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0])
+#define KFD_IS_SOC15(dev) ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1)))
struct kfd_event_interrupt_class {
bool (*interrupt_isr)(struct kfd_dev *dev,
@@ -194,8 +195,6 @@ struct kfd_event_interrupt_class {
};
struct kfd_device_info {
- enum amd_asic_type asic_family;
- const char *asic_name;
uint32_t gfx_target_version;
const struct kfd_event_interrupt_class *event_interrupt_class;
unsigned int max_pasid_bits;
@@ -208,11 +207,12 @@ struct kfd_device_info {
bool needs_iommu_device;
bool needs_pci_atomics;
uint32_t no_atomic_fw_version;
- unsigned int num_sdma_engines;
- unsigned int num_xgmi_sdma_engines;
unsigned int num_sdma_queues_per_engine;
};
+unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev);
+unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev);
+
struct kfd_mem_obj {
uint32_t range_start;
uint32_t range_end;
@@ -228,9 +228,9 @@ struct kfd_vmid_info {
};
struct kfd_dev {
- struct kgd_dev *kgd;
+ struct amdgpu_device *adev;
- const struct kfd_device_info *device_info;
+ struct kfd_device_info device_info;
struct pci_dev *pdev;
struct drm_device *ddev;
@@ -856,6 +856,8 @@ struct kfd_process {
struct svm_range_list svms;
bool xnack_enabled;
+
+ atomic_t poison;
};
#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
@@ -891,7 +893,7 @@ struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid);
struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id);
-int kfd_process_gpuid_from_kgd(struct kfd_process *p,
+int kfd_process_gpuid_from_adev(struct kfd_process *p,
struct amdgpu_device *adev, uint32_t *gpuid,
uint32_t *gpuidx);
static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p,
@@ -984,7 +986,7 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
-struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd);
+struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev);
int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
int kfd_numa_node_to_apic_id(int numa_node_id);
void kfd_double_confirm_iommu_support(struct kfd_dev *gpu);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index b993011cfa64..d1145da5348f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -251,14 +251,13 @@ cleanup:
}
/**
- * @kfd_get_cu_occupancy - Collect number of waves in-flight on this device
+ * kfd_get_cu_occupancy - Collect number of waves in-flight on this device
* by current process. Translates acquired wave count into number of compute units
* that are occupied.
*
- * @atr: Handle of attribute that allows reporting of wave count. The attribute
+ * @attr: Handle of attribute that allows reporting of wave count. The attribute
* handle encapsulates GPU device it is associated with, thereby allowing collection
* of waves in flight, etc
- *
* @buffer: Handle of user provided buffer updated with wave count
*
* Return: Number of bytes written to user buffer or an error value
@@ -288,7 +287,7 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
/* Collect wave count from device if it supports */
wave_cnt = 0;
max_waves_per_cu = 0;
- dev->kfd2kgd->get_cu_occupancy(dev->kgd, proc->pasid, &wave_cnt,
+ dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt,
&max_waves_per_cu);
/* Translate wave count to number of compute units */
@@ -462,6 +461,7 @@ static struct attribute *procfs_queue_attrs[] = {
&attr_queue_gpuid,
NULL
};
+ATTRIBUTE_GROUPS(procfs_queue);
static const struct sysfs_ops procfs_queue_ops = {
.show = kfd_procfs_queue_show,
@@ -469,7 +469,7 @@ static const struct sysfs_ops procfs_queue_ops = {
static struct kobj_type procfs_queue_type = {
.sysfs_ops = &procfs_queue_ops,
- .default_attrs = procfs_queue_attrs,
+ .default_groups = procfs_queue_groups,
};
static const struct sysfs_ops procfs_stats_ops = {
@@ -692,12 +692,12 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem,
struct kfd_dev *dev = pdd->dev;
if (kptr) {
- amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(dev->kgd, mem);
+ amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(dev->adev, mem);
kptr = NULL;
}
- amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->drm_priv);
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, pdd->drm_priv,
+ amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, mem, pdd->drm_priv,
NULL);
}
@@ -714,24 +714,24 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
struct kfd_dev *kdev = pdd->dev;
int err;
- err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
+ err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size,
pdd->drm_priv, mem, NULL, flags);
if (err)
goto err_alloc_mem;
- err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, *mem,
+ err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem,
pdd->drm_priv, NULL);
if (err)
goto err_map_mem;
- err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, *mem, true);
+ err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->adev, *mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
}
if (kptr) {
- err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd,
+ err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->adev,
(struct kgd_mem *)*mem, kptr, NULL);
if (err) {
pr_debug("Map GTT BO to kernel failed\n");
@@ -742,10 +742,10 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
return err;
sync_memory_failed:
- amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->kgd, *mem, pdd->drm_priv);
+ amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->adev, *mem, pdd->drm_priv);
err_map_mem:
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, *mem, pdd->drm_priv,
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->adev, *mem, pdd->drm_priv,
NULL);
err_alloc_mem:
*mem = NULL;
@@ -940,10 +940,10 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
if (!peer_pdd->drm_priv)
continue;
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
- peer_pdd->dev->kgd, mem, peer_pdd->drm_priv);
+ peer_pdd->dev->adev, mem, peer_pdd->drm_priv);
}
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem,
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, mem,
pdd->drm_priv, NULL);
kfd_process_device_remove_obj_handle(pdd, id);
}
@@ -974,7 +974,7 @@ static void kfd_process_kunmap_signal_bo(struct kfd_process *p)
if (!mem)
goto out;
- amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kdev->kgd, mem);
+ amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kdev->adev, mem);
out:
mutex_unlock(&p->mutex);
@@ -1003,7 +1003,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
if (pdd->drm_file) {
amdgpu_amdkfd_gpuvm_release_process_vm(
- pdd->dev->kgd, pdd->drm_priv);
+ pdd->dev->adev, pdd->drm_priv);
fput(pdd->drm_file);
}
@@ -1011,7 +1011,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
get_order(KFD_CWSR_TBA_TMA_SIZE));
- kfree(pdd->qpd.doorbell_bitmap);
+ bitmap_free(pdd->qpd.doorbell_bitmap);
idr_destroy(&pdd->alloc_idr);
kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index);
@@ -1317,14 +1317,13 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
* support the SVM APIs and don't need to be considered
* for the XNACK mode selection.
*/
- if (dev->device_info->asic_family < CHIP_VEGA10)
+ if (!KFD_IS_SOC15(dev))
continue;
/* Aldebaran can always support XNACK because it can support
* per-process XNACK mode selection. But let the dev->noretry
* setting still influence the default XNACK mode.
*/
- if (supported &&
- dev->device_info->asic_family == CHIP_ALDEBARAN)
+ if (supported && KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2))
continue;
/* GFXv10 and later GPUs do not support shader preemption
@@ -1332,7 +1331,7 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
* management and memory-manager-related preemptions or
* even deadlocks.
*/
- if (dev->device_info->asic_family >= CHIP_NAVI10)
+ if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
return false;
if (dev->noretry)
@@ -1431,12 +1430,11 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
int range_start = dev->shared_resources.non_cp_doorbells_start;
int range_end = dev->shared_resources.non_cp_doorbells_end;
- if (!KFD_IS_SOC15(dev->device_info->asic_family))
+ if (!KFD_IS_SOC15(dev))
return 0;
- qpd->doorbell_bitmap =
- kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
- BITS_PER_BYTE), GFP_KERNEL);
+ qpd->doorbell_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
+ GFP_KERNEL);
if (!qpd->doorbell_bitmap)
return -ENOMEM;
@@ -1448,9 +1446,9 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
if (i >= range_start && i <= range_end) {
- set_bit(i, qpd->doorbell_bitmap);
- set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
- qpd->doorbell_bitmap);
+ __set_bit(i, qpd->doorbell_bitmap);
+ __set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
+ qpd->doorbell_bitmap);
}
}
@@ -1547,7 +1545,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
dev = pdd->dev;
ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
- dev->kgd, drm_file, p->pasid,
+ dev->adev, drm_file, p->pasid,
&p->kgd_process_info, &p->ef);
if (ret) {
pr_err("Failed to create process VM object\n");
@@ -1779,14 +1777,13 @@ int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id)
}
int
-kfd_process_gpuid_from_kgd(struct kfd_process *p, struct amdgpu_device *adev,
+kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev,
uint32_t *gpuid, uint32_t *gpuidx)
{
- struct kgd_dev *kgd = (struct kgd_dev *)adev;
int i;
for (i = 0; i < p->n_pdds; i++)
- if (p->pdds[i] && p->pdds[i]->dev->kgd == kgd) {
+ if (p->pdds[i] && p->pdds[i]->dev->adev == adev) {
*gpuid = p->pdds[i]->dev->id;
*gpuidx = i;
return 0;
@@ -1951,10 +1948,10 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
* only happens when the first queue is created.
*/
if (pdd->qpd.vmid)
- amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->kgd,
+ amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev,
pdd->qpd.vmid);
} else {
- amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->kgd,
+ amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->adev,
pdd->process->pasid, type);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 3627e7ac161b..5e5c84a8e1ef 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -118,7 +118,7 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
return ret;
pqn->q->gws = mem;
- pdd->qpd.num_gws = gws ? amdgpu_amdkfd_get_num_gws(dev->kgd) : 0;
+ pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0;
return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
pqn->q, NULL);
@@ -135,9 +135,8 @@ void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
{
INIT_LIST_HEAD(&pqm->queues);
- pqm->queue_slot_bitmap =
- kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
- BITS_PER_BYTE), GFP_KERNEL);
+ pqm->queue_slot_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
+ GFP_KERNEL);
if (!pqm->queue_slot_bitmap)
return -ENOMEM;
pqm->process = p;
@@ -159,7 +158,7 @@ void pqm_uninit(struct process_queue_manager *pqm)
kfree(pqn);
}
- kfree(pqm->queue_slot_bitmap);
+ bitmap_free(pqm->queue_slot_bitmap);
pqm->queue_slot_bitmap = NULL;
}
@@ -220,7 +219,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
* Hence we also check the type as well
*/
if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ))
- max_queues = dev->device_info->max_no_of_hqd/2;
+ max_queues = dev->device_info.max_no_of_hqd/2;
if (pdd->qpd.queue_count >= max_queues)
return -ENOSPC;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index ed4bc5f844ce..deae12dc777d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -207,7 +207,6 @@ void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset)
void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev,
uint64_t throttle_bitmask)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd;
/*
* ThermalThrottle msg = throttle_bitmask(8):
* thermal_interrupt_count(16):
@@ -223,14 +222,13 @@ void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev,
len = snprintf(fifo_in, sizeof(fifo_in), "%x %llx:%llx\n",
KFD_SMI_EVENT_THERMAL_THROTTLE, throttle_bitmask,
- atomic64_read(&adev->smu.throttle_int_counter));
+ atomic64_read(&dev->adev->smu.throttle_int_counter));
add_event_to_kfifo(dev, KFD_SMI_EVENT_THERMAL_THROTTLE, fifo_in, len);
}
void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd;
struct amdgpu_task_info task_info;
/* VmFault msg = (hex)uint32_pid(8) + :(1) + task name(16) = 25 */
/* 1 byte event + 1 byte space + 25 bytes msg + 1 byte \n +
@@ -243,7 +241,7 @@ void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid)
return;
memset(&task_info, 0, sizeof(struct amdgpu_task_info));
- amdgpu_vm_get_task_info(adev, pasid, &task_info);
+ amdgpu_vm_get_task_info(dev->adev, pasid, &task_info);
/* Report VM faults from user applications, not retry from kernel */
if (!task_info.pid)
return;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 3cb4681c5f53..f2805ba74c80 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -107,7 +107,7 @@ static void svm_range_add_to_svms(struct svm_range *prange)
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
prange, prange->start, prange->last);
- list_add_tail(&prange->list, &prange->svms->list);
+ list_move_tail(&prange->list, &prange->svms->list);
prange->it_node.start = prange->start;
prange->it_node.last = prange->last;
interval_tree_insert(&prange->it_node, &prange->svms->objects);
@@ -193,7 +193,6 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
struct kfd_process_device *pdd;
- struct amdgpu_device *adev;
pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
pdd = kfd_process_device_from_gpuidx(p, gpuidx);
@@ -201,9 +200,8 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
pr_debug("failed to find device idx %d\n", gpuidx);
return -EINVAL;
}
- adev = (struct amdgpu_device *)pdd->dev->kgd;
- r = svm_range_dma_map_dev(adev, prange, offset, npages,
+ r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
hmm_pfns, gpuidx);
if (r)
break;
@@ -297,8 +295,6 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
prange->last = last;
INIT_LIST_HEAD(&prange->list);
INIT_LIST_HEAD(&prange->update_list);
- INIT_LIST_HEAD(&prange->remove_list);
- INIT_LIST_HEAD(&prange->insert_list);
INIT_LIST_HEAD(&prange->svm_bo_list);
INIT_LIST_HEAD(&prange->deferred_list);
INIT_LIST_HEAD(&prange->child_list);
@@ -334,6 +330,8 @@ static void svm_range_bo_release(struct kref *kref)
struct svm_range_bo *svm_bo;
svm_bo = container_of(kref, struct svm_range_bo, kref);
+ pr_debug("svm_bo 0x%p\n", svm_bo);
+
spin_lock(&svm_bo->list_lock);
while (!list_empty(&svm_bo->range_list)) {
struct svm_range *prange =
@@ -367,12 +365,33 @@ static void svm_range_bo_release(struct kref *kref)
kfree(svm_bo);
}
-void svm_range_bo_unref(struct svm_range_bo *svm_bo)
+static void svm_range_bo_wq_release(struct work_struct *work)
{
- if (!svm_bo)
- return;
+ struct svm_range_bo *svm_bo;
- kref_put(&svm_bo->kref, svm_range_bo_release);
+ svm_bo = container_of(work, struct svm_range_bo, release_work);
+ svm_range_bo_release(&svm_bo->kref);
+}
+
+static void svm_range_bo_release_async(struct kref *kref)
+{
+ struct svm_range_bo *svm_bo;
+
+ svm_bo = container_of(kref, struct svm_range_bo, kref);
+ pr_debug("svm_bo 0x%p\n", svm_bo);
+ INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
+ schedule_work(&svm_bo->release_work);
+}
+
+void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
+{
+ kref_put(&svm_bo->kref, svm_range_bo_release_async);
+}
+
+static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
+{
+ if (svm_bo)
+ kref_put(&svm_bo->kref, svm_range_bo_release);
}
static bool
@@ -581,7 +600,7 @@ svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id)
return NULL;
}
- return (struct amdgpu_device *)pdd->dev->kgd;
+ return pdd->dev->adev;
}
struct kfd_process_device *
@@ -593,7 +612,7 @@ svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev)
p = container_of(prange->svms, struct kfd_process, svms);
- r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpu_idx);
+ r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpu_idx);
if (r) {
pr_debug("failed to get device id by adev %p\n", adev);
return NULL;
@@ -706,6 +725,61 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
}
}
+static bool
+svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
+ uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
+{
+ uint32_t i;
+ int gpuidx;
+
+ for (i = 0; i < nattr; i++) {
+ switch (attrs[i].type) {
+ case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
+ if (prange->preferred_loc != attrs[i].value)
+ return false;
+ break;
+ case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
+ /* Prefetch should always trigger a migration even
+ * if the value of the attribute didn't change.
+ */
+ return false;
+ case KFD_IOCTL_SVM_ATTR_ACCESS:
+ case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
+ case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
+ gpuidx = kfd_process_gpuidx_from_gpuid(p,
+ attrs[i].value);
+ if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
+ if (test_bit(gpuidx, prange->bitmap_access) ||
+ test_bit(gpuidx, prange->bitmap_aip))
+ return false;
+ } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
+ if (!test_bit(gpuidx, prange->bitmap_access))
+ return false;
+ } else {
+ if (!test_bit(gpuidx, prange->bitmap_aip))
+ return false;
+ }
+ break;
+ case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
+ if ((prange->flags & attrs[i].value) != attrs[i].value)
+ return false;
+ break;
+ case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
+ if ((prange->flags & attrs[i].value) != 0)
+ return false;
+ break;
+ case KFD_IOCTL_SVM_ATTR_GRANULARITY:
+ if (prange->granularity != attrs[i].value)
+ return false;
+ break;
+ default:
+ WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
+ }
+ }
+
+ return true;
+}
+
/**
* svm_range_debug_dump - print all range information from svms
* @svms: svm range list header
@@ -743,14 +817,6 @@ static void svm_range_debug_dump(struct svm_range_list *svms)
}
}
-static bool
-svm_range_is_same_attrs(struct svm_range *old, struct svm_range *new)
-{
- return (old->prefetch_loc == new->prefetch_loc &&
- old->flags == new->flags &&
- old->granularity == new->granularity);
-}
-
static int
svm_range_split_array(void *ppnew, void *ppold, size_t size,
uint64_t old_start, uint64_t old_n,
@@ -943,26 +1009,26 @@ svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
}
static int
-svm_range_split_tail(struct svm_range *prange, struct svm_range *new,
+svm_range_split_tail(struct svm_range *prange,
uint64_t new_last, struct list_head *insert_list)
{
struct svm_range *tail;
int r = svm_range_split(prange, prange->start, new_last, &tail);
if (!r)
- list_add(&tail->insert_list, insert_list);
+ list_add(&tail->list, insert_list);
return r;
}
static int
-svm_range_split_head(struct svm_range *prange, struct svm_range *new,
+svm_range_split_head(struct svm_range *prange,
uint64_t new_start, struct list_head *insert_list)
{
struct svm_range *head;
int r = svm_range_split(prange, new_start, prange->last, &head);
if (!r)
- list_add(&head->insert_list, insert_list);
+ list_add(&head->list, insert_list);
return r;
}
@@ -1053,8 +1119,8 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
if (domain == SVM_RANGE_VRAM_DOMAIN)
bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
- switch (adev->asic_type) {
- case CHIP_ARCTURUS:
+ switch (KFD_GC_VERSION(adev->kfd.dev)) {
+ case IP_VERSION(9, 4, 1):
if (domain == SVM_RANGE_VRAM_DOMAIN) {
if (bo_adev == adev) {
mapping_flags |= coherent ?
@@ -1070,7 +1136,7 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
}
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(9, 4, 2):
if (domain == SVM_RANGE_VRAM_DOMAIN) {
if (bo_adev == adev) {
mapping_flags |= coherent ?
@@ -1129,7 +1195,6 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
struct kfd_process_device *pdd;
struct dma_fence *fence = NULL;
- struct amdgpu_device *adev;
struct kfd_process *p;
uint32_t gpuidx;
int r = 0;
@@ -1145,9 +1210,9 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
pr_debug("failed to find device idx %d\n", gpuidx);
return -EINVAL;
}
- adev = (struct amdgpu_device *)pdd->dev->kgd;
- r = svm_range_unmap_from_gpu(adev, drm_priv_to_vm(pdd->drm_priv),
+ r = svm_range_unmap_from_gpu(pdd->dev->adev,
+ drm_priv_to_vm(pdd->drm_priv),
start, last, &fence);
if (r)
break;
@@ -1159,7 +1224,7 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
if (r)
break;
}
- amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev,
+ amdgpu_amdkfd_flush_gpu_tlb_pasid(pdd->dev->adev,
p->pasid, TLB_FLUSH_HEAVYWEIGHT);
}
@@ -1172,7 +1237,6 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned long npages, bool readonly, dma_addr_t *dma_addr,
struct amdgpu_device *bo_adev, struct dma_fence **fence)
{
- struct amdgpu_bo_va bo_va;
bool table_freed = false;
uint64_t pte_flags;
unsigned long last_start;
@@ -1185,9 +1249,6 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
last_start, last_start + npages - 1, readonly);
- if (prange->svm_bo && prange->ttm_res)
- bo_va.is_xgmi = amdgpu_xgmi_same_hive(adev, bo_adev);
-
for (i = offset; i < offset + npages; i++) {
last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
@@ -1243,8 +1304,7 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct kfd_process *p;
p = container_of(prange->svms, struct kfd_process, svms);
- amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev,
- p->pasid, TLB_FLUSH_LEGACY);
+ amdgpu_amdkfd_flush_gpu_tlb_pasid(adev, p->pasid, TLB_FLUSH_LEGACY);
}
out:
return r;
@@ -1257,7 +1317,6 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
{
struct kfd_process_device *pdd;
struct amdgpu_device *bo_adev;
- struct amdgpu_device *adev;
struct kfd_process *p;
struct dma_fence *fence = NULL;
uint32_t gpuidx;
@@ -1276,19 +1335,18 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
pr_debug("failed to find device idx %d\n", gpuidx);
return -EINVAL;
}
- adev = (struct amdgpu_device *)pdd->dev->kgd;
pdd = kfd_bind_process_to_device(pdd->dev, p);
if (IS_ERR(pdd))
return -EINVAL;
- if (bo_adev && adev != bo_adev &&
- !amdgpu_xgmi_same_hive(adev, bo_adev)) {
+ if (bo_adev && pdd->dev->adev != bo_adev &&
+ !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
pr_debug("cannot map to device idx %d\n", gpuidx);
continue;
}
- r = svm_range_map_to_gpu(adev, drm_priv_to_vm(pdd->drm_priv),
+ r = svm_range_map_to_gpu(pdd->dev->adev, drm_priv_to_vm(pdd->drm_priv),
prange, offset, npages, readonly,
prange->dma_addr[gpuidx],
bo_adev, wait ? &fence : NULL);
@@ -1322,7 +1380,6 @@ struct svm_validate_context {
static int svm_range_reserve_bos(struct svm_validate_context *ctx)
{
struct kfd_process_device *pdd;
- struct amdgpu_device *adev;
struct amdgpu_vm *vm;
uint32_t gpuidx;
int r;
@@ -1334,7 +1391,6 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx)
pr_debug("failed to find device idx %d\n", gpuidx);
return -EINVAL;
}
- adev = (struct amdgpu_device *)pdd->dev->kgd;
vm = drm_priv_to_vm(pdd->drm_priv);
ctx->tv[gpuidx].bo = &vm->root.bo->tbo;
@@ -1356,9 +1412,9 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx)
r = -EINVAL;
goto unreserve_out;
}
- adev = (struct amdgpu_device *)pdd->dev->kgd;
- r = amdgpu_vm_validate_pt_bos(adev, drm_priv_to_vm(pdd->drm_priv),
+ r = amdgpu_vm_validate_pt_bos(pdd->dev->adev,
+ drm_priv_to_vm(pdd->drm_priv),
svm_range_bo_validate, NULL);
if (r) {
pr_debug("failed %d validate pt bos\n", r);
@@ -1381,12 +1437,10 @@ static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
{
struct kfd_process_device *pdd;
- struct amdgpu_device *adev;
pdd = kfd_process_device_from_gpuidx(p, gpuidx);
- adev = (struct amdgpu_device *)pdd->dev->kgd;
- return SVM_ADEV_PGMAP_OWNER(adev);
+ return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
}
/*
@@ -1660,6 +1714,10 @@ out_reschedule:
/**
* svm_range_evict - evict svm range
+ * @prange: svm range structure
+ * @mm: current process mm_struct
+ * @start: starting process queue number
+ * @last: last process queue number
*
* Stop all queues of the process to ensure GPU doesn't access the memory, then
* return to let CPU evict the buffer and proceed CPU pagetable update.
@@ -1764,46 +1822,49 @@ static struct svm_range *svm_range_clone(struct svm_range *old)
}
/**
- * svm_range_handle_overlap - split overlap ranges
- * @svms: svm range list header
- * @new: range added with this attributes
- * @start: range added start address, in pages
- * @last: range last address, in pages
- * @update_list: output, the ranges attributes are updated. For set_attr, this
- * will do validation and map to GPUs. For unmap, this will be
- * removed and unmap from GPUs
- * @insert_list: output, the ranges will be inserted into svms, attributes are
- * not changes. For set_attr, this will add into svms.
- * @remove_list:output, the ranges will be removed from svms
- * @left: the remaining range after overlap, For set_attr, this will be added
- * as new range.
+ * svm_range_add - add svm range and handle overlap
+ * @p: the range add to this process svms
+ * @start: page size aligned
+ * @size: page size aligned
+ * @nattr: number of attributes
+ * @attrs: array of attributes
+ * @update_list: output, the ranges need validate and update GPU mapping
+ * @insert_list: output, the ranges need insert to svms
+ * @remove_list: output, the ranges are replaced and need remove from svms
*
- * Total have 5 overlap cases.
+ * Check if the virtual address range has overlap with any existing ranges,
+ * split partly overlapping ranges and add new ranges in the gaps. All changes
+ * should be applied to the range_list and interval tree transactionally. If
+ * any range split or allocation fails, the entire update fails. Therefore any
+ * existing overlapping svm_ranges are cloned and the original svm_ranges left
+ * unchanged.
*
- * This function handles overlap of an address interval with existing
- * struct svm_ranges for applying new attributes. This may require
- * splitting existing struct svm_ranges. All changes should be applied to
- * the range_list and interval tree transactionally. If any split operation
- * fails, the entire update fails. Therefore the existing overlapping
- * svm_ranges are cloned and the original svm_ranges left unchanged. If the
- * transaction succeeds, the modified clones are added and the originals
- * freed. Otherwise the clones are removed and the old svm_ranges remain.
+ * If the transaction succeeds, the caller can update and insert clones and
+ * new ranges, then free the originals.
*
- * Context: The caller must hold svms->lock
+ * Otherwise the caller can free the clones and new ranges, while the old
+ * svm_ranges remain unchanged.
+ *
+ * Context: Process context, caller must hold svms->lock
+ *
+ * Return:
+ * 0 - OK, otherwise error code
*/
static int
-svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new,
- unsigned long start, unsigned long last,
- struct list_head *update_list,
- struct list_head *insert_list,
- struct list_head *remove_list,
- unsigned long *left)
+svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
+ uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
+ struct list_head *update_list, struct list_head *insert_list,
+ struct list_head *remove_list)
{
+ unsigned long last = start + size - 1UL;
+ struct svm_range_list *svms = &p->svms;
struct interval_tree_node *node;
struct svm_range *prange;
struct svm_range *tmp;
int r = 0;
+ pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
+
INIT_LIST_HEAD(update_list);
INIT_LIST_HEAD(insert_list);
INIT_LIST_HEAD(remove_list);
@@ -1811,37 +1872,44 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new,
node = interval_tree_iter_first(&svms->objects, start, last);
while (node) {
struct interval_tree_node *next;
- struct svm_range *old;
unsigned long next_start;
pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
node->last);
- old = container_of(node, struct svm_range, it_node);
+ prange = container_of(node, struct svm_range, it_node);
next = interval_tree_iter_next(node, start, last);
next_start = min(node->last, last) + 1;
- if (node->start < start || node->last > last) {
- /* node intersects the updated range, clone+split it */
+ if (svm_range_is_same_attrs(p, prange, nattr, attrs)) {
+ /* nothing to do */
+ } else if (node->start < start || node->last > last) {
+ /* node intersects the update range and its attributes
+ * will change. Clone and split it, apply updates only
+ * to the overlapping part
+ */
+ struct svm_range *old = prange;
+
prange = svm_range_clone(old);
if (!prange) {
r = -ENOMEM;
goto out;
}
- list_add(&old->remove_list, remove_list);
- list_add(&prange->insert_list, insert_list);
+ list_add(&old->update_list, remove_list);
+ list_add(&prange->list, insert_list);
+ list_add(&prange->update_list, update_list);
if (node->start < start) {
pr_debug("change old range start\n");
- r = svm_range_split_head(prange, new, start,
+ r = svm_range_split_head(prange, start,
insert_list);
if (r)
goto out;
}
if (node->last > last) {
pr_debug("change old range last\n");
- r = svm_range_split_tail(prange, new, last,
+ r = svm_range_split_tail(prange, last,
insert_list);
if (r)
goto out;
@@ -1850,22 +1918,18 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new,
/* The node is contained within start..last,
* just update it
*/
- prange = old;
- }
-
- if (!svm_range_is_same_attrs(prange, new))
list_add(&prange->update_list, update_list);
+ }
/* insert a new node if needed */
if (node->start > start) {
- prange = svm_range_new(prange->svms, start,
- node->start - 1);
+ prange = svm_range_new(svms, start, node->start - 1);
if (!prange) {
r = -ENOMEM;
goto out;
}
- list_add(&prange->insert_list, insert_list);
+ list_add(&prange->list, insert_list);
list_add(&prange->update_list, update_list);
}
@@ -1873,12 +1937,20 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new,
start = next_start;
}
- if (left && start <= last)
- *left = last - start + 1;
+ /* add a final range at the end if needed */
+ if (start <= last) {
+ prange = svm_range_new(svms, start, last);
+ if (!prange) {
+ r = -ENOMEM;
+ goto out;
+ }
+ list_add(&prange->list, insert_list);
+ list_add(&prange->update_list, update_list);
+ }
out:
if (r)
- list_for_each_entry_safe(prange, tmp, insert_list, insert_list)
+ list_for_each_entry_safe(prange, tmp, insert_list, list)
svm_range_free(prange);
return r;
@@ -1962,7 +2034,6 @@ svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange)
static void svm_range_drain_retry_fault(struct svm_range_list *svms)
{
struct kfd_process_device *pdd;
- struct amdgpu_device *adev;
struct kfd_process *p;
int drain;
uint32_t i;
@@ -1980,9 +2051,9 @@ restart:
continue;
pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
- adev = (struct amdgpu_device *)pdd->dev->kgd;
- amdgpu_ih_wait_on_checkpoint_process(adev, &adev->irq.ih1);
+ amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
+ &pdd->dev->adev->irq.ih1);
pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
}
if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
@@ -2172,6 +2243,9 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
/**
* svm_range_cpu_invalidate_pagetables - interval notifier callback
+ * @mni: mmu_interval_notifier struct
+ * @range: mmu_notifier_range struct
+ * @cur_seq: value to pass to mmu_interval_set_seq()
*
* If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
* is from migration, or CPU page invalidation callback.
@@ -2201,8 +2275,8 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
start = mni->interval_tree.start;
last = mni->interval_tree.last;
- start = (start > range->start ? start : range->start) >> PAGE_SHIFT;
- last = (last < (range->end - 1) ? last : range->end - 1) >> PAGE_SHIFT;
+ start = max(start, range->start) >> PAGE_SHIFT;
+ last = min(last, range->end - 1) >> PAGE_SHIFT;
pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
start, last, range->start >> PAGE_SHIFT,
(range->end - 1) >> PAGE_SHIFT,
@@ -2304,7 +2378,7 @@ svm_range_best_restore_location(struct svm_range *prange,
p = container_of(prange->svms, struct kfd_process, svms);
- r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, gpuidx);
+ r = kfd_process_gpuid_from_adev(p, adev, &gpuid, gpuidx);
if (r < 0) {
pr_debug("failed to get gpuid from kgd\n");
return -1;
@@ -2481,7 +2555,7 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
pr_debug("Failed to create prange in address [0x%llx]\n", addr);
return NULL;
}
- if (kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx)) {
+ if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) {
pr_debug("failed to get gpuid from kgd\n");
svm_range_free(prange);
return NULL;
@@ -2548,7 +2622,7 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
uint32_t gpuid;
int r;
- r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx);
+ r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx);
if (r < 0)
return;
}
@@ -2895,59 +2969,6 @@ svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
}
/**
- * svm_range_add - add svm range and handle overlap
- * @p: the range add to this process svms
- * @start: page size aligned
- * @size: page size aligned
- * @nattr: number of attributes
- * @attrs: array of attributes
- * @update_list: output, the ranges need validate and update GPU mapping
- * @insert_list: output, the ranges need insert to svms
- * @remove_list: output, the ranges are replaced and need remove from svms
- *
- * Check if the virtual address range has overlap with the registered ranges,
- * split the overlapped range, copy and adjust pages address and vram nodes in
- * old and new ranges.
- *
- * Context: Process context, caller must hold svms->lock
- *
- * Return:
- * 0 - OK, otherwise error code
- */
-static int
-svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
- uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
- struct list_head *update_list, struct list_head *insert_list,
- struct list_head *remove_list)
-{
- uint64_t last = start + size - 1UL;
- struct svm_range_list *svms;
- struct svm_range new = {0};
- struct svm_range *prange;
- unsigned long left = 0;
- int r = 0;
-
- pr_debug("svms 0x%p [0x%llx 0x%llx]\n", &p->svms, start, last);
-
- svm_range_apply_attrs(p, &new, nattr, attrs);
-
- svms = &p->svms;
-
- r = svm_range_handle_overlap(svms, &new, start, last, update_list,
- insert_list, remove_list, &left);
- if (r)
- return r;
-
- if (left) {
- prange = svm_range_new(svms, last - left + 1, last);
- list_add(&prange->insert_list, insert_list);
- list_add(&prange->update_list, update_list);
- }
-
- return 0;
-}
-
-/**
* svm_range_best_prefetch_location - decide the best prefetch location
* @prange: svm range structure
*
@@ -2980,7 +3001,6 @@ svm_range_best_prefetch_location(struct svm_range *prange)
uint32_t best_loc = prange->prefetch_loc;
struct kfd_process_device *pdd;
struct amdgpu_device *bo_adev;
- struct amdgpu_device *adev;
struct kfd_process *p;
uint32_t gpuidx;
@@ -3008,12 +3028,11 @@ svm_range_best_prefetch_location(struct svm_range *prange)
pr_debug("failed to get device by idx 0x%x\n", gpuidx);
continue;
}
- adev = (struct amdgpu_device *)pdd->dev->kgd;
- if (adev == bo_adev)
+ if (pdd->dev->adev == bo_adev)
continue;
- if (!amdgpu_xgmi_same_hive(adev, bo_adev)) {
+ if (!amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
best_loc = 0;
break;
}
@@ -3215,7 +3234,7 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
goto out;
}
/* Apply changes as a transaction */
- list_for_each_entry_safe(prange, next, &insert_list, insert_list) {
+ list_for_each_entry_safe(prange, next, &insert_list, list) {
svm_range_add_to_svms(prange);
svm_range_add_notifier_locked(mm, prange);
}
@@ -3223,8 +3242,7 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
svm_range_apply_attrs(p, prange, nattr, attrs);
/* TODO: unmap ranges from GPU that lost access */
}
- list_for_each_entry_safe(prange, next, &remove_list,
- remove_list) {
+ list_for_each_entry_safe(prange, next, &remove_list, update_list) {
pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
prange->svms, prange, prange->start,
prange->last);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 6dc91c33e80f..949b477e2f4c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -48,6 +48,7 @@ struct svm_range_bo {
struct work_struct eviction_work;
struct svm_range_list *svms;
uint32_t evicting;
+ struct work_struct release_work;
};
enum svm_work_list_ops {
@@ -75,8 +76,6 @@ struct svm_work_list_item {
* aligned, page size is (last - start + 1)
* @list: link list node, used to scan all ranges of svms
* @update_list:link list node used to add to update_list
- * @remove_list:link list node used to add to remove list
- * @insert_list:link list node used to add to insert list
* @mapping: bo_va mapping structure to create and update GPU page table
* @npages: number of pages
* @dma_addr: dma mapping address on each GPU for system memory physical page
@@ -112,8 +111,6 @@ struct svm_range {
struct interval_tree_node it_node;
struct list_head list;
struct list_head update_list;
- struct list_head remove_list;
- struct list_head insert_list;
uint64_t npages;
dma_addr_t *dma_addr[MAX_GPU_INSTANCE];
struct ttm_resource *ttm_res;
@@ -195,7 +192,7 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
*/
#define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0)
-void svm_range_bo_unref(struct svm_range_bo *svm_bo);
+void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
#else
struct kfd_process;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index dd593ad0614a..948fbb39336e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -113,7 +113,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
return device;
}
-struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd)
+struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev)
{
struct kfd_topology_device *top_dev;
struct kfd_dev *device = NULL;
@@ -121,7 +121,7 @@ struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd)
down_read(&topology_lock);
list_for_each_entry(top_dev, &topology_device_list, list)
- if (top_dev->gpu && top_dev->gpu->kgd == kgd) {
+ if (top_dev->gpu && top_dev->gpu->adev == adev) {
device = top_dev->gpu;
break;
}
@@ -503,7 +503,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
if (dev->gpu) {
log_max_watch_addr =
- __ilog2_u32(dev->gpu->device_info->num_of_watch_points);
+ __ilog2_u32(dev->gpu->device_info.num_of_watch_points);
if (log_max_watch_addr) {
dev->node_props.capability |=
@@ -515,7 +515,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
HSA_CAP_WATCH_POINTS_TOTALBITS_MASK);
}
- if (dev->gpu->device_info->asic_family == CHIP_TONGA)
+ if (dev->gpu->adev->asic_type == CHIP_TONGA)
dev->node_props.capability |=
HSA_CAP_AQL_QUEUE_DOUBLE_MAP;
@@ -531,7 +531,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version",
dev->gpu->sdma_fw_version);
sysfs_show_64bit_prop(buffer, offs, "unique_id",
- amdgpu_amdkfd_get_unique_id(dev->gpu->kgd));
+ dev->gpu->adev->unique_id);
}
@@ -1106,7 +1106,7 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
if (!gpu)
return 0;
- amdgpu_amdkfd_get_local_mem_info(gpu->kgd, &local_mem_info);
+ amdgpu_amdkfd_get_local_mem_info(gpu->adev, &local_mem_info);
local_mem_size = local_mem_info.local_mem_size_private +
local_mem_info.local_mem_size_public;
@@ -1189,7 +1189,7 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev)
* for APUs - If CRAT from ACPI reports more than one bank, then
* all the banks will report the same mem_clk_max information
*/
- amdgpu_amdkfd_get_local_mem_info(dev->gpu->kgd, &local_mem_info);
+ amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info);
list_for_each_entry(mem, &dev->mem_props, list)
mem->mem_clk_max = local_mem_info.mem_clk_max;
@@ -1217,8 +1217,7 @@ static void kfd_set_iolink_no_atomics(struct kfd_topology_device *dev,
/* set gpu (dev) flags. */
} else {
if (!dev->gpu->pci_atomic_requested ||
- dev->gpu->device_info->asic_family ==
- CHIP_HAWAII)
+ dev->gpu->adev->asic_type == CHIP_HAWAII)
link->flags |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
}
@@ -1239,7 +1238,7 @@ static void kfd_set_iolink_non_coherent(struct kfd_topology_device *to_dev,
*/
if (inbound_link->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS ||
(inbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI &&
- to_dev->gpu->device_info->asic_family == CHIP_VEGA20)) {
+ KFD_GC_VERSION(to_dev->gpu) == IP_VERSION(9, 4, 0))) {
outbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT;
inbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT;
}
@@ -1286,7 +1285,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
void *crat_image = NULL;
size_t image_size = 0;
int proximity_domain;
- struct amdgpu_device *adev;
+ int i;
+ const char *asic_name = amdgpu_asic_name[gpu->adev->asic_type];
INIT_LIST_HEAD(&temp_topology_device_list);
@@ -1296,10 +1296,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
proximity_domain = atomic_inc_return(&topology_crat_proximity_domain);
- adev = (struct amdgpu_device *)(gpu->kgd);
-
/* Include the CPU in xGMI hive if xGMI connected by assigning it the hive ID. */
- if (gpu->hive_id && adev->gmc.xgmi.connected_to_cpu) {
+ if (gpu->hive_id && gpu->adev->gmc.xgmi.connected_to_cpu) {
struct kfd_topology_device *top_dev;
down_read(&topology_lock);
@@ -1372,45 +1370,48 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
* needed for the topology
*/
- amdgpu_amdkfd_get_cu_info(dev->gpu->kgd, &cu_info);
+ amdgpu_amdkfd_get_cu_info(dev->gpu->adev, &cu_info);
- strncpy(dev->node_props.name, gpu->device_info->asic_name,
- KFD_TOPOLOGY_PUBLIC_NAME_SIZE);
+ for (i = 0; i < KFD_TOPOLOGY_PUBLIC_NAME_SIZE-1; i++) {
+ dev->node_props.name[i] = __tolower(asic_name[i]);
+ if (asic_name[i] == '\0')
+ break;
+ }
+ dev->node_props.name[i] = '\0';
dev->node_props.simd_arrays_per_engine =
cu_info.num_shader_arrays_per_engine;
- dev->node_props.gfx_target_version = gpu->device_info->gfx_target_version;
+ dev->node_props.gfx_target_version = gpu->device_info.gfx_target_version;
dev->node_props.vendor_id = gpu->pdev->vendor;
dev->node_props.device_id = gpu->pdev->device;
dev->node_props.capability |=
- ((amdgpu_amdkfd_get_asic_rev_id(dev->gpu->kgd) <<
- HSA_CAP_ASIC_REVISION_SHIFT) &
+ ((dev->gpu->adev->rev_id << HSA_CAP_ASIC_REVISION_SHIFT) &
HSA_CAP_ASIC_REVISION_MASK);
dev->node_props.location_id = pci_dev_id(gpu->pdev);
dev->node_props.domain = pci_domain_nr(gpu->pdev->bus);
dev->node_props.max_engine_clk_fcompute =
- amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd);
+ amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->adev);
dev->node_props.max_engine_clk_ccompute =
cpufreq_quick_get_max(0) / 1000;
dev->node_props.drm_render_minor =
gpu->shared_resources.drm_render_minor;
dev->node_props.hive_id = gpu->hive_id;
- dev->node_props.num_sdma_engines = gpu->device_info->num_sdma_engines;
+ dev->node_props.num_sdma_engines = kfd_get_num_sdma_engines(gpu);
dev->node_props.num_sdma_xgmi_engines =
- gpu->device_info->num_xgmi_sdma_engines;
+ kfd_get_num_xgmi_sdma_engines(gpu);
dev->node_props.num_sdma_queues_per_engine =
- gpu->device_info->num_sdma_queues_per_engine;
+ gpu->device_info.num_sdma_queues_per_engine;
dev->node_props.num_gws = (dev->gpu->gws &&
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
- amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
+ dev->gpu->adev->gds.gws_size : 0;
dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
kfd_fill_mem_clk_max_info(dev);
kfd_fill_iolink_non_crat_info(dev);
- switch (dev->gpu->device_info->asic_family) {
+ switch (dev->gpu->adev->asic_type) {
case CHIP_KAVERI:
case CHIP_HAWAII:
case CHIP_TONGA:
@@ -1429,30 +1430,14 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
break;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_RENOIR:
- case CHIP_ARCTURUS:
- case CHIP_ALDEBARAN:
- case CHIP_NAVI10:
- case CHIP_NAVI12:
- case CHIP_NAVI14:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
- case CHIP_CYAN_SKILLFISH:
- dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
- HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
- HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
- break;
default:
- WARN(1, "Unexpected ASIC family %u",
- dev->gpu->device_info->asic_family);
+ if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 0, 1))
+ dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
+ else
+ WARN(1, "Unexpected ASIC family %u",
+ dev->gpu->adev->asic_type);
}
/*
@@ -1469,7 +1454,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
* because it doesn't consider masked out CUs
* max_waves_per_simd: Carrizo reports wrong max_waves_per_simd
*/
- if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) {
+ if (dev->gpu->adev->asic_type == CHIP_CARRIZO) {
dev->node_props.simd_count =
cu_info.simd_per_cu * cu_info.cu_active_number;
dev->node_props.max_waves_per_simd = 10;
@@ -1477,16 +1462,17 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
/* kfd only concerns sram ecc on GFX and HBM ecc on UMC */
dev->node_props.capability |=
- ((adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ?
+ ((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ?
HSA_CAP_SRAM_EDCSUPPORTED : 0;
- dev->node_props.capability |= ((adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ?
+ dev->node_props.capability |=
+ ((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ?
HSA_CAP_MEM_EDCSUPPORTED : 0;
- if (adev->asic_type != CHIP_VEGA10)
- dev->node_props.capability |= (adev->ras_enabled != 0) ?
+ if (KFD_GC_VERSION(dev->gpu) != IP_VERSION(9, 0, 1))
+ dev->node_props.capability |= (dev->gpu->adev->ras_enabled != 0) ?
HSA_CAP_RASEVENTNOTIFY : 0;
- if (KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev))
+ if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev->kfd.dev))
dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED;
kfd_debug_print_topology();
@@ -1592,7 +1578,7 @@ void kfd_double_confirm_iommu_support(struct kfd_dev *gpu)
gpu->use_iommu_v2 = false;
- if (!gpu->device_info->needs_iommu_device)
+ if (!gpu->device_info.needs_iommu_device)
return;
down_read(&topology_lock);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index a8db017c9b8e..f0cc59d2fd5d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -25,38 +25,11 @@
#include <linux/types.h>
#include <linux/list.h>
+#include <linux/kfd_sysfs.h>
#include "kfd_crat.h"
#define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 32
-#define HSA_CAP_HOT_PLUGGABLE 0x00000001
-#define HSA_CAP_ATS_PRESENT 0x00000002
-#define HSA_CAP_SHARED_WITH_GRAPHICS 0x00000004
-#define HSA_CAP_QUEUE_SIZE_POW2 0x00000008
-#define HSA_CAP_QUEUE_SIZE_32BIT 0x00000010
-#define HSA_CAP_QUEUE_IDLE_EVENT 0x00000020
-#define HSA_CAP_VA_LIMIT 0x00000040
-#define HSA_CAP_WATCH_POINTS_SUPPORTED 0x00000080
-#define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK 0x00000f00
-#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8
-#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK 0x00003000
-#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT 12
-
-#define HSA_CAP_DOORBELL_TYPE_PRE_1_0 0x0
-#define HSA_CAP_DOORBELL_TYPE_1_0 0x1
-#define HSA_CAP_DOORBELL_TYPE_2_0 0x2
-#define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000
-
-#define HSA_CAP_RESERVED_WAS_SRAM_EDCSUPPORTED 0x00080000 /* Old buggy user mode depends on this being 0 */
-#define HSA_CAP_MEM_EDCSUPPORTED 0x00100000
-#define HSA_CAP_RASEVENTNOTIFY 0x00200000
-#define HSA_CAP_ASIC_REVISION_MASK 0x03c00000
-#define HSA_CAP_ASIC_REVISION_SHIFT 22
-#define HSA_CAP_SRAM_EDCSUPPORTED 0x04000000
-#define HSA_CAP_SVMAPI_SUPPORTED 0x08000000
-#define HSA_CAP_FLAGS_COHERENTHOSTACCESS 0x10000000
-#define HSA_CAP_RESERVED 0xe00f8000
-
struct kfd_node_properties {
uint64_t hive_id;
uint32_t cpu_cores_count;
@@ -93,17 +66,6 @@ struct kfd_node_properties {
char name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
};
-#define HSA_MEM_HEAP_TYPE_SYSTEM 0
-#define HSA_MEM_HEAP_TYPE_FB_PUBLIC 1
-#define HSA_MEM_HEAP_TYPE_FB_PRIVATE 2
-#define HSA_MEM_HEAP_TYPE_GPU_GDS 3
-#define HSA_MEM_HEAP_TYPE_GPU_LDS 4
-#define HSA_MEM_HEAP_TYPE_GPU_SCRATCH 5
-
-#define HSA_MEM_FLAGS_HOT_PLUGGABLE 0x00000001
-#define HSA_MEM_FLAGS_NON_VOLATILE 0x00000002
-#define HSA_MEM_FLAGS_RESERVED 0xfffffffc
-
struct kfd_mem_properties {
struct list_head list;
uint32_t heap_type;
@@ -116,12 +78,6 @@ struct kfd_mem_properties {
struct attribute attr;
};
-#define HSA_CACHE_TYPE_DATA 0x00000001
-#define HSA_CACHE_TYPE_INSTRUCTION 0x00000002
-#define HSA_CACHE_TYPE_CPU 0x00000004
-#define HSA_CACHE_TYPE_HSACU 0x00000008
-#define HSA_CACHE_TYPE_RESERVED 0xfffffff0
-
struct kfd_cache_properties {
struct list_head list;
uint32_t processor_id_low;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index e727f1dd2a9a..7f9773f8dab6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -624,7 +624,7 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
/**
- * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
+ * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
* @adev: amdgpu_device pointer
* @notify: dmub notification structure
*
@@ -632,7 +632,8 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
* Copies dmub notification to DM which is to be read by AUX command.
* issuing thread and also signals the event to wake up the thread.
*/
-void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
+static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
+ struct dmub_notification *notify)
{
if (adev->dm.dmub_notify)
memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
@@ -648,7 +649,8 @@ void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notific
* Dmub Hpd interrupt processing callback. Gets displayindex through the
* ink index and calls helper to do the processing.
*/
-void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
+static void dmub_hpd_callback(struct amdgpu_device *adev,
+ struct dmub_notification *notify)
{
struct amdgpu_dm_connector *aconnector;
struct amdgpu_dm_connector *hpd_aconnector = NULL;
@@ -656,7 +658,7 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not
struct drm_connector_list_iter iter;
struct dc_link *link;
uint8_t link_index = 0;
- struct drm_device *dev = adev->dm.ddev;
+ struct drm_device *dev;
if (adev == NULL)
return;
@@ -673,6 +675,7 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not
link_index = notify->link_index;
link = adev->dm.dc->links[link_index];
+ dev = adev->dm.ddev;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@@ -705,8 +708,10 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not
* to dmub interrupt handling thread
* Return: true if successfully registered, false if there is existing registration
*/
-bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
-dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
+static bool register_dmub_notify_callback(struct amdgpu_device *adev,
+ enum dmub_notification_type type,
+ dmub_notify_interrupt_callback_t callback,
+ bool dmub_int_thread_offload)
{
if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
adev->dm.dmub_callback[type] = callback;
@@ -790,8 +795,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
plink = adev->dm.dc->links[notify.link_index];
if (plink) {
plink->hpd_status =
- notify.hpd_status ==
- DP_HPD_PLUG ? true : false;
+ notify.hpd_status == DP_HPD_PLUG;
}
}
queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
@@ -1158,6 +1162,32 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
return 0;
}
+static void dm_dmub_hw_resume(struct amdgpu_device *adev)
+{
+ struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
+ enum dmub_status status;
+ bool init;
+
+ if (!dmub_srv) {
+ /* DMUB isn't supported on the ASIC. */
+ return;
+ }
+
+ status = dmub_srv_is_hw_init(dmub_srv, &init);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("DMUB hardware init check failed: %d\n", status);
+
+ if (status == DMUB_STATUS_OK && init) {
+ /* Wait for firmware load to finish. */
+ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+ } else {
+ /* Perform the full hardware initialization. */
+ dm_dmub_hw_init(adev);
+ }
+}
+
#if defined(CONFIG_DRM_AMD_DC_DCN)
static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
{
@@ -1459,8 +1489,21 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
init_data.flags.edp_no_power_sequencing = true;
+#ifdef CONFIG_DRM_AMD_DC_DCN
+ if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
+ init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
+ if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
+ init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
+#endif
+
init_data.flags.power_down_display_on_boot = true;
+ if (check_seamless_boot_capability(adev)) {
+ init_data.flags.power_down_display_on_boot = false;
+ init_data.flags.allow_seamless_boot_optimization = true;
+ DRM_INFO("Seamless boot condition check passed\n");
+ }
+
INIT_LIST_HEAD(&adev->dm.da_list);
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
@@ -1485,8 +1528,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
adev->dm.dc->debug.disable_stutter = true;
- if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
+ if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
adev->dm.dc->debug.disable_dsc = true;
+ adev->dm.dc->debug.disable_dsc_edp = true;
+ }
if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
adev->dm.dc->debug.disable_clock_gate = true;
@@ -2309,14 +2354,6 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
goto fail;
}
-
- res = dc_validate_global_state(dc, context, false);
-
- if (res != DC_OK) {
- DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
- goto fail;
- }
-
res = dc_commit_state(dc, context);
fail:
@@ -2600,15 +2637,6 @@ static int dm_resume(void *handle)
= 0xffffffff;
}
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- /*
- * Resource allocation happens for link encoders for newer ASIC in
- * dc_validate_global_state, so we need to revalidate it.
- *
- * This shouldn't fail (it passed once before), so warn if it does.
- */
- WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
-#endif
WARN_ON(!dc_commit_state(dm->dc, dc_state));
@@ -2636,9 +2664,7 @@ static int dm_resume(void *handle)
amdgpu_dm_outbox_init(adev);
/* Before powering on DC we need to re-initialize DMUB. */
- r = dm_dmub_hw_init(adev);
- if (r)
- DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ dm_dmub_hw_resume(adev);
/* power on hardware */
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
@@ -2965,13 +2991,12 @@ void amdgpu_dm_update_connector_after_detect(
aconnector->edid =
(struct edid *)sink->dc_edid.raw_edid;
- drm_connector_update_edid_property(connector,
- aconnector->edid);
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
aconnector->edid);
}
+ drm_connector_update_edid_property(connector, aconnector->edid);
amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
update_connector_ext_caps(aconnector);
} else {
@@ -3039,7 +3064,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
drm_modeset_unlock_all(dev);
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
- drm_kms_helper_hotplug_event(dev);
+ drm_kms_helper_connector_hotplug_event(connector);
} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
if (new_connection_type == dc_connection_none &&
@@ -3054,7 +3079,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
drm_modeset_unlock_all(dev);
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
- drm_kms_helper_hotplug_event(dev);
+ drm_kms_helper_connector_hotplug_event(connector);
}
mutex_unlock(&aconnector->hpd_lock);
@@ -3248,7 +3273,7 @@ out:
dm_restore_drm_connector_state(dev, connector);
drm_modeset_unlock_all(dev);
- drm_kms_helper_hotplug_event(dev);
+ drm_kms_helper_connector_hotplug_event(connector);
} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
if (aconnector->fake_enable)
@@ -3261,7 +3286,7 @@ out:
dm_restore_drm_connector_state(dev, connector);
drm_modeset_unlock_all(dev);
- drm_kms_helper_hotplug_event(dev);
+ drm_kms_helper_connector_hotplug_event(connector);
}
}
#ifdef CONFIG_DRM_AMD_DC_HDCP
@@ -4281,6 +4306,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
+ /*
+ * Disable vblank IRQs aggressively for power-saving.
+ *
+ * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR
+ * is also supported.
+ */
+ adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled;
+
/* Software is initialized. Now we can register interrupt handlers. */
switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
@@ -6065,12 +6098,74 @@ static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
struct dsc_dec_dpcd_caps *dsc_caps)
{
stream->timing.flags.DSC = 0;
+ dsc_caps->is_dsc_supported = false;
+
+ if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ sink->sink_signal == SIGNAL_TYPE_EDP)) {
+ if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
+ sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
+ dsc_caps);
+ }
+}
+
+static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
+ struct dc_sink *sink, struct dc_stream_state *stream,
+ struct dsc_dec_dpcd_caps *dsc_caps,
+ uint32_t max_dsc_target_bpp_limit_override)
+{
+ const struct dc_link_settings *verified_link_cap = NULL;
+ uint32_t link_bw_in_kbps;
+ uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
+ struct dc *dc = sink->ctx->dc;
+ struct dc_dsc_bw_range bw_range = {0};
+ struct dc_dsc_config dsc_cfg = {0};
+
+ verified_link_cap = dc_link_get_link_cap(stream->link);
+ link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
+ edp_min_bpp_x16 = 8 * 16;
+ edp_max_bpp_x16 = 8 * 16;
+
+ if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
+ edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
+
+ if (edp_max_bpp_x16 < edp_min_bpp_x16)
+ edp_min_bpp_x16 = edp_max_bpp_x16;
+
+ if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
+ dc->debug.dsc_min_slice_height_override,
+ edp_min_bpp_x16, edp_max_bpp_x16,
+ dsc_caps,
+ &stream->timing,
+ &bw_range)) {
+
+ if (bw_range.max_kbps < link_bw_in_kbps) {
+ if (dc_dsc_compute_config(dc->res_pool->dscs[0],
+ dsc_caps,
+ dc->debug.dsc_min_slice_height_override,
+ max_dsc_target_bpp_limit_override,
+ 0,
+ &stream->timing,
+ &dsc_cfg)) {
+ stream->timing.dsc_cfg = dsc_cfg;
+ stream->timing.flags.DSC = 1;
+ stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
+ }
+ return;
+ }
+ }
- if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
- dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
- aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
- aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
- dsc_caps);
+ if (dc_dsc_compute_config(dc->res_pool->dscs[0],
+ dsc_caps,
+ dc->debug.dsc_min_slice_height_override,
+ max_dsc_target_bpp_limit_override,
+ link_bw_in_kbps,
+ &stream->timing,
+ &dsc_cfg)) {
+ stream->timing.dsc_cfg = dsc_cfg;
+ stream->timing.flags.DSC = 1;
}
}
@@ -6081,6 +6176,9 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
struct drm_connector *drm_connector = &aconnector->base;
uint32_t link_bandwidth_kbps;
uint32_t max_dsc_target_bpp_limit_override = 0;
+ struct dc *dc = sink->ctx->dc;
+ uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
+ uint32_t dsc_max_supported_bw_in_kbps;
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
dc_link_get_link_cap(aconnector->dc_link));
@@ -6093,17 +6191,43 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_dsc_policy_set_enable_dsc_when_not_needed(
aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
- if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
+ dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
- if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
+ apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
+
+ } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
+ if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
dsc_caps,
aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
max_dsc_target_bpp_limit_override,
link_bandwidth_kbps,
&stream->timing,
&stream->timing.dsc_cfg)) {
- stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
+ stream->timing.flags.DSC = 1;
+ DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
+ __func__, drm_connector->name);
+ }
+ } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
+ timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+ max_supported_bw_in_kbps = link_bandwidth_kbps;
+ dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
+
+ if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
+ max_supported_bw_in_kbps > 0 &&
+ dsc_max_supported_bw_in_kbps > 0)
+ if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
+ dsc_caps,
+ aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
+ max_dsc_target_bpp_limit_override,
+ dsc_max_supported_bw_in_kbps,
+ &stream->timing,
+ &stream->timing.dsc_cfg)) {
+ stream->timing.flags.DSC = 1;
+ DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
+ __func__, drm_connector->name);
+ }
}
}
@@ -8247,15 +8371,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
break;
case DRM_MODE_CONNECTOR_DisplayPort:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
- if (link->is_dig_mapping_flexible &&
- link->dc->res_pool->funcs->link_encs_assign) {
- link->link_enc =
- link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
- if (!link->link_enc)
- link->link_enc =
- link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
- }
-
+ link->link_enc = dp_get_link_enc(link);
+ ASSERT(link->link_enc);
if (link->link_enc)
aconnector->base.ycbcr_420_allowed =
link->link_enc->features.dp_ycbcr420_supported ? true : false;
@@ -10646,6 +10763,8 @@ static int dm_update_plane_state(struct dc *dc,
dm_new_plane_state->dc_state = dc_new_plane_state;
+ dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
+
/* Tell DC to do a full surface update every time there
* is a plane change. Inefficient, but works for now.
*/
@@ -10658,6 +10777,24 @@ static int dm_update_plane_state(struct dc *dc,
return ret;
}
+static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
+ int *src_w, int *src_h)
+{
+ switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_90:
+ case DRM_MODE_ROTATE_270:
+ *src_w = plane_state->src_h >> 16;
+ *src_h = plane_state->src_w >> 16;
+ break;
+ case DRM_MODE_ROTATE_0:
+ case DRM_MODE_ROTATE_180:
+ default:
+ *src_w = plane_state->src_w >> 16;
+ *src_h = plane_state->src_h >> 16;
+ break;
+ }
+}
+
static int dm_check_crtc_cursor(struct drm_atomic_state *state,
struct drm_crtc *crtc,
struct drm_crtc_state *new_crtc_state)
@@ -10666,6 +10803,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
struct drm_plane_state *new_cursor_state, *new_underlying_state;
int i;
int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
+ int cursor_src_w, cursor_src_h;
+ int underlying_src_w, underlying_src_h;
/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
* cursor per pipe but it's going to inherit the scaling and
@@ -10677,10 +10816,9 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
return 0;
}
- cursor_scale_w = new_cursor_state->crtc_w * 1000 /
- (new_cursor_state->src_w >> 16);
- cursor_scale_h = new_cursor_state->crtc_h * 1000 /
- (new_cursor_state->src_h >> 16);
+ dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
+ cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
+ cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
/* Narrow down to non-cursor planes on the same CRTC as the cursor */
@@ -10691,10 +10829,10 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
if (!new_underlying_state->fb)
continue;
- underlying_scale_w = new_underlying_state->crtc_w * 1000 /
- (new_underlying_state->src_w >> 16);
- underlying_scale_h = new_underlying_state->crtc_h * 1000 /
- (new_underlying_state->src_h >> 16);
+ dm_get_oriented_plane_size(new_underlying_state,
+ &underlying_src_w, &underlying_src_h);
+ underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
+ underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
if (cursor_scale_w != underlying_scale_w ||
cursor_scale_h != underlying_scale_h) {
@@ -10779,7 +10917,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
enum dc_status status;
int ret, i;
bool lock_and_validation_needed = false;
- struct dm_crtc_state *dm_old_crtc_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dsc_mst_fairness_vars vars[MAX_PIPES];
struct drm_dp_mst_topology_state *mst_state;
@@ -10789,8 +10927,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
trace_amdgpu_dm_atomic_check_begin(state);
ret = drm_atomic_helper_check_modeset(dev, state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
goto fail;
+ }
/* Check connector changes */
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
@@ -10806,6 +10946,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
if (IS_ERR(new_crtc_state)) {
+ DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
ret = PTR_ERR(new_crtc_state);
goto fail;
}
@@ -10820,8 +10961,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
ret = add_affected_mst_dsc_crtcs(state, crtc);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
goto fail;
+ }
}
}
}
@@ -10836,19 +10979,25 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
continue;
ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
goto fail;
+ }
if (!new_crtc_state->enable)
continue;
ret = drm_atomic_add_affected_connectors(state, crtc);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
goto fail;
+ }
ret = drm_atomic_add_affected_planes(state, crtc);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
goto fail;
+ }
if (dm_old_crtc_state->dsc_force_changed)
new_crtc_state->mode_changed = true;
@@ -10885,6 +11034,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (IS_ERR(new_plane_state)) {
ret = PTR_ERR(new_plane_state);
+ DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
goto fail;
}
}
@@ -10897,8 +11047,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_plane_state,
false,
&lock_and_validation_needed);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
goto fail;
+ }
}
/* Disable all crtcs which require disable */
@@ -10908,8 +11060,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_crtc_state,
false,
&lock_and_validation_needed);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
goto fail;
+ }
}
/* Enable all crtcs which require enable */
@@ -10919,8 +11073,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_crtc_state,
true,
&lock_and_validation_needed);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
goto fail;
+ }
}
/* Add new/modified planes */
@@ -10930,20 +11086,32 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_plane_state,
true,
&lock_and_validation_needed);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
goto fail;
+ }
}
/* Run this here since we want to validate the streams we created */
ret = drm_atomic_helper_check_planes(dev, state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
goto fail;
+ }
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->mpo_requested)
+ DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
+ }
/* Check cursor planes scaling */
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
goto fail;
+ }
}
if (state->legacy_cursor_update) {
@@ -11030,20 +11198,28 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
*/
if (lock_and_validation_needed) {
ret = dm_atomic_get_state(state, &dm_state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
goto fail;
+ }
ret = do_aquire_global_lock(dev, state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
goto fail;
+ }
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
+ if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
+ DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
goto fail;
+ }
ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
goto fail;
+ }
#endif
/*
@@ -11053,12 +11229,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
* to get stuck in an infinite loop and hang eventually.
*/
ret = drm_dp_mst_atomic_check(state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
goto fail;
- status = dc_validate_global_state(dc, dm_state->context, false);
+ }
+ status = dc_validate_global_state(dc, dm_state->context, true);
if (status != DC_OK) {
- drm_dbg_atomic(dev,
- "DC global validation failure: %s (%d)",
+ DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
dc_status_to_str(status), status);
ret = -EINVAL;
goto fail;
@@ -11180,7 +11357,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
input->offset = offset;
input->length = length;
- input->total_length = total_length;
+ input->cea_total_length = total_length;
memcpy(input->payload, data, length);
res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
@@ -11487,8 +11664,10 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
return value;
}
-int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
- uint8_t status_type, uint32_t *operation_result)
+static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
+ struct dc_context *ctx,
+ uint8_t status_type,
+ uint32_t *operation_result)
{
struct amdgpu_device *adev = ctx->driver_context;
int return_status = -1;
@@ -11559,3 +11738,24 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context
ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
(uint32_t *)operation_result);
}
+
+/*
+ * Check whether seamless boot is supported.
+ *
+ * So far we only support seamless boot on CHIP_VANGOGH.
+ * If everything goes well, we may consider expanding
+ * seamless boot to other ASICs.
+ */
+bool check_seamless_boot_capability(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_VANGOGH:
+ if (!adev->mman.keep_stolen_vga_memory)
+ return true;
+ break;
+ default:
+ break;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 37e61a88d49e..b9a69b0cef23 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -50,9 +50,9 @@
#define AMDGPU_DMUB_NOTIFICATION_MAX 5
-/**
+/*
* DMUB Async to Sync Mechanism Status
- **/
+ */
#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1
#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2
#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3
@@ -626,6 +626,8 @@ struct dm_crtc_state {
bool cm_has_degamma;
bool cm_is_degamma_srgb;
+ bool mpo_requested;
+
int update_type;
int active_planes;
@@ -731,4 +733,7 @@ extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux,
struct dc_context *ctx, unsigned int link_index,
void *payload, void *operation_result);
+
+bool check_seamless_boot_capability(struct amdgpu_device *adev);
+
#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index a022e5bb30a5..a71177305bcd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -285,8 +285,12 @@ static int __set_input_tf(struct dc_transfer_func *func,
}
/**
+ * amdgpu_dm_verify_lut_sizes
+ * @crtc_state: the DRM CRTC state
+ *
* Verifies that the Degamma and Gamma LUTs attached to the |crtc_state| are of
* the expected size.
+ *
* Returns 0 on success.
*/
int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 9d43ecb1f692..26719efa5396 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -824,6 +824,48 @@ static int dmub_fw_state_show(struct seq_file *m, void *data)
return seq_write(m, state_base, state_size);
}
+/* psr_capability_show() - show eDP panel PSR capability
+ *
+ * The read function: sink_psr_capability_show
+ * Shows if sink has PSR capability or not.
+ * If yes - the PSR version is appended
+ *
+ * cat /sys/kernel/debug/dri/0/eDP-X/psr_capability
+ *
+ * Expected output:
+ * "Sink support: no\n" - if panel doesn't support PSR
+ * "Sink support: yes [0x01]\n" - if panel supports PSR1
+ * "Driver support: no\n" - if driver doesn't support PSR
+ * "Driver support: yes [0x01]\n" - if driver supports PSR1
+ */
+static int psr_capability_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct dc_link *link = aconnector->dc_link;
+
+ if (!link)
+ return -ENODEV;
+
+ if (link->type == dc_connection_none)
+ return -ENODEV;
+
+ if (!(link->connector_signal & SIGNAL_TYPE_EDP))
+ return -ENODEV;
+
+ seq_printf(m, "Sink support: %s", yesno(link->dpcd_caps.psr_caps.psr_version != 0));
+ if (link->dpcd_caps.psr_caps.psr_version)
+ seq_printf(m, " [0x%02x]", link->dpcd_caps.psr_caps.psr_version);
+ seq_puts(m, "\n");
+
+ seq_printf(m, "Driver support: %s", yesno(link->psr_settings.psr_feature_enabled));
+ if (link->psr_settings.psr_version)
+ seq_printf(m, " [0x%02x]", link->psr_settings.psr_version);
+ seq_puts(m, "\n");
+
+ return 0;
+}
+
/*
* Returns the current and maximum output bpc for the connector.
* Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc
@@ -1243,7 +1285,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
dm_restore_drm_connector_state(dev, connector);
drm_modeset_unlock_all(dev);
- drm_kms_helper_hotplug_event(dev);
+ drm_kms_helper_connector_hotplug_event(connector);
} else if (param[0] == 0) {
if (!aconnector->dc_link)
goto unlock;
@@ -1265,7 +1307,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
dm_restore_drm_connector_state(dev, connector);
drm_modeset_unlock_all(dev);
- drm_kms_helper_hotplug_event(dev);
+ drm_kms_helper_connector_hotplug_event(connector);
}
unlock:
@@ -2467,6 +2509,7 @@ DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status);
DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
#endif
DEFINE_SHOW_ATTRIBUTE(internal_display);
+DEFINE_SHOW_ATTRIBUTE(psr_capability);
static const struct file_operations dp_dsc_clock_en_debugfs_fops = {
.owner = THIS_MODULE,
@@ -2712,6 +2755,138 @@ static const struct {
{"internal_display", &internal_display_fops}
};
+/*
+ * Returns supported customized link rates by this eDP panel.
+ * Example usage: cat /sys/kernel/debug/dri/0/eDP-x/ilr_setting
+ */
+static int edp_ilr_show(struct seq_file *m, void *unused)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(m->private);
+ struct dc_link *link = aconnector->dc_link;
+ uint8_t supported_link_rates[16];
+ uint32_t link_rate_in_khz;
+ uint32_t entry = 0;
+ uint8_t dpcd_rev;
+
+ memset(supported_link_rates, 0, sizeof(supported_link_rates));
+ dm_helpers_dp_read_dpcd(link->ctx, link, DP_SUPPORTED_LINK_RATES,
+ supported_link_rates, sizeof(supported_link_rates));
+
+ dpcd_rev = link->dpcd_caps.dpcd_rev.raw;
+
+ if (dpcd_rev >= DP_DPCD_REV_13 &&
+ (supported_link_rates[entry+1] != 0 || supported_link_rates[entry] != 0)) {
+
+ for (entry = 0; entry < 16; entry += 2) {
+ link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 +
+ supported_link_rates[entry]) * 200;
+ seq_printf(m, "[%d] %d kHz\n", entry/2, link_rate_in_khz);
+ }
+ } else {
+ seq_printf(m, "ILR is not supported by this eDP panel.\n");
+ }
+
+ return 0;
+}
+
+/*
+ * Set supported customized link rate to eDP panel.
+ *
+ * echo <lane_count> <link_rate option> > ilr_setting
+ *
+ * for example, supported ILR : [0] 1620000 kHz [1] 2160000 kHz [2] 2430000 kHz ...
+ * echo 4 1 > /sys/kernel/debug/dri/0/eDP-x/ilr_setting
+ * to set 4 lanes and 2.16 GHz
+ */
+static ssize_t edp_ilr_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ struct amdgpu_device *adev = drm_to_adev(connector->base.dev);
+ struct dc *dc = (struct dc *)link->dc;
+ struct dc_link_settings prefer_link_settings;
+ char *wr_buf = NULL;
+ const uint32_t wr_buf_size = 40;
+ /* 0: lane_count; 1: link_rate */
+ int max_param_num = 2;
+ uint8_t param_nums = 0;
+ long param[2];
+ bool valid_input = true;
+
+ if (size == 0)
+ return -EINVAL;
+
+ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+ if (!wr_buf)
+ return -ENOMEM;
+
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+ (long *)param, buf,
+ max_param_num,
+ &param_nums)) {
+ kfree(wr_buf);
+ return -EINVAL;
+ }
+
+ if (param_nums <= 0) {
+ kfree(wr_buf);
+ return -EINVAL;
+ }
+
+ switch (param[0]) {
+ case LANE_COUNT_ONE:
+ case LANE_COUNT_TWO:
+ case LANE_COUNT_FOUR:
+ break;
+ default:
+ valid_input = false;
+ break;
+ }
+
+ if (param[1] >= link->dpcd_caps.edp_supported_link_rates_count)
+ valid_input = false;
+
+ if (!valid_input) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("Invalid Input value. No HW will be programmed\n");
+ prefer_link_settings.use_link_rate_set = false;
+ dc_link_set_preferred_training_settings(dc, NULL, NULL, link, true);
+ return size;
+ }
+
+ /* save user force lane_count, link_rate to preferred settings
+ * spread spectrum will not be changed
+ */
+ prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
+ prefer_link_settings.lane_count = param[0];
+ prefer_link_settings.use_link_rate_set = true;
+ prefer_link_settings.link_rate_set = param[1];
+ prefer_link_settings.link_rate = link->dpcd_caps.edp_supported_link_rates[param[1]];
+
+ mutex_lock(&adev->dm.dc_lock);
+ dc_link_set_preferred_training_settings(dc, &prefer_link_settings,
+ NULL, link, false);
+ mutex_unlock(&adev->dm.dc_lock);
+
+ kfree(wr_buf);
+ return size;
+}
+
+static int edp_ilr_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, edp_ilr_show, inode->i_private);
+}
+
+static const struct file_operations edp_ilr_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = edp_ilr_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = edp_ilr_write
+};
+
void connector_debugfs_init(struct amdgpu_dm_connector *connector)
{
int i;
@@ -2726,11 +2901,14 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
}
}
if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
+ debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops);
debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops);
debugfs_create_file("amdgpu_current_backlight_pwm", 0444, dir, connector,
&current_backlight_fops);
debugfs_create_file("amdgpu_target_backlight_pwm", 0444, dir, connector,
&target_backlight_fops);
+ debugfs_create_file("ilr_setting", 0644, dir, connector,
+ &edp_ilr_debugfs_fops);
}
for (i = 0; i < ARRAY_SIZE(connector_debugfs_entries); i++) {
@@ -2909,10 +3087,13 @@ static int crc_win_update_set(void *data, u64 val)
struct amdgpu_device *adev = drm_to_adev(new_crtc->dev);
struct crc_rd_work *crc_rd_wrk = adev->dm.crc_rd_wrk;
+ if (!crc_rd_wrk)
+ return 0;
+
if (val) {
spin_lock_irq(&adev_to_drm(adev)->event_lock);
spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
- if (crc_rd_wrk && crc_rd_wrk->crtc) {
+ if (crc_rd_wrk->crtc) {
old_crtc = crc_rd_wrk->crtc;
old_acrtc = to_amdgpu_crtc(old_crtc);
}
@@ -3190,6 +3371,32 @@ static int disable_hpd_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(disable_hpd_ops, disable_hpd_get,
disable_hpd_set, "%llu\n");
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+/*
+ * Temporary w/a to force sst sequence in M42D DP2 mst receiver
+ * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dp_set_mst_en_for_sst
+ */
+static int dp_force_sst_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = data;
+
+ adev->dm.dc->debug.set_mst_en_for_sst = val;
+
+ return 0;
+}
+
+static int dp_force_sst_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = data;
+
+ *val = adev->dm.dc->debug.set_mst_en_for_sst;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(dp_set_mst_en_for_sst_ops, dp_force_sst_get,
+ dp_force_sst_set, "%llu\n");
+#endif
+
/*
* Sets the DC visual confirm debug option from the given string.
* Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm
@@ -3299,6 +3506,10 @@ void dtn_debugfs_init(struct amdgpu_device *adev)
adev, &mst_topo_fops);
debugfs_create_file("amdgpu_dm_dtn_log", 0644, root, adev,
&dtn_log_fops);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ debugfs_create_file("amdgpu_dm_dp_set_mst_en_for_sst", 0644, root, adev,
+ &dp_set_mst_en_for_sst_ops);
+#endif
debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev,
&visual_confirm_fops);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 8cbeeb7c986d..29f07c26d080 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -83,16 +83,17 @@ static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps)
* void
* */
enum dc_edid_status dm_helpers_parse_edid_caps(
- struct dc_context *ctx,
+ struct dc_link *link,
const struct dc_edid *edid,
struct dc_edid_caps *edid_caps)
{
+ struct amdgpu_dm_connector *aconnector = link->priv;
+ struct drm_connector *connector = &aconnector->base;
struct edid *edid_buf = (struct edid *) edid->raw_edid;
struct cea_sad *sads;
int sad_count = -1;
int sadb_count = -1;
int i = 0;
- int j = 0;
uint8_t *sadb = NULL;
enum dc_edid_status result = EDID_OK;
@@ -111,23 +112,11 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
edid_caps->manufacture_week = edid_buf->mfg_week;
edid_caps->manufacture_year = edid_buf->mfg_year;
- /* One of the four detailed_timings stores the monitor name. It's
- * stored in an array of length 13. */
- for (i = 0; i < 4; i++) {
- if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) {
- while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) {
- if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n')
- break;
-
- edid_caps->display_name[j] =
- edid_buf->detailed_timings[i].data.other_data.data.str.str[j];
- j++;
- }
- }
- }
+ drm_edid_get_monitor_name(edid_buf,
+ edid_caps->display_name,
+ AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
- edid_caps->edid_hdmi = drm_detect_hdmi_monitor(
- (struct edid *) edid->raw_edid);
+ edid_caps->edid_hdmi = connector->display_info.is_hdmi;
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
if (sad_count <= 0)
@@ -584,9 +573,18 @@ bool dm_helpers_dp_write_dsc_enable(
ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1);
}
- if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT) {
- ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
- DC_LOG_DC("Send DSC %s to sst display\n", enable_dsc ? "enable" : "disable");
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
+#endif
+ ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
+ DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable");
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
+ ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
+ DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable");
+ }
+#endif
}
return (ret > 0);
@@ -650,14 +648,8 @@ enum dc_edid_status dm_helpers_read_local_edid(
/* We don't need the original edid anymore */
kfree(edid);
- /* connector->display_info will be parsed from EDID and saved
- * into drm_connector->display_info from edid by call stack
- * below:
- * drm_parse_ycbcr420_deep_color_info
- * drm_parse_hdmi_forum_vsdb
- * drm_parse_cea_ext
- * drm_add_display_info
- * drm_connector_update_edid_property
+ /* connector->display_info is parsed from EDID and saved
+ * into drm_connector->display_info
*
* drm_connector->display_info will be used by amdgpu_dm funcs,
* like fill_stream_properties_from_drm_display_mode
@@ -665,7 +657,7 @@ enum dc_edid_status dm_helpers_read_local_edid(
amdgpu_dm_update_connector_after_detect(aconnector);
edid_status = dm_helpers_parse_edid_caps(
- ctx,
+ link,
&sink->dc_edid,
&sink->edid_caps);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index c022e56f9459..c510638b4f99 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -26,6 +26,73 @@
#include "amdgpu_dm_psr.h"
#include "dc.h"
#include "dm_helpers.h"
+#include "amdgpu_dm.h"
+
+static bool link_get_psr_caps(struct dc_link *link)
+{
+ uint8_t psr_dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
+ uint8_t edp_rev_dpcd_data;
+
+
+
+ if (!dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
+ psr_dpcd_data, sizeof(psr_dpcd_data)))
+ return false;
+
+ if (!dm_helpers_dp_read_dpcd(NULL, link, DP_EDP_DPCD_REV,
+ &edp_rev_dpcd_data, sizeof(edp_rev_dpcd_data)))
+ return false;
+
+ link->dpcd_caps.psr_caps.psr_version = psr_dpcd_data[0];
+ link->dpcd_caps.psr_caps.edp_revision = edp_rev_dpcd_data;
+
+#ifdef CONFIG_DRM_AMD_DC_DCN
+ if (link->dpcd_caps.psr_caps.psr_version > 0x1) {
+ uint8_t alpm_dpcd_data;
+ uint8_t su_granularity_dpcd_data;
+
+ if (!dm_helpers_dp_read_dpcd(NULL, link, DP_RECEIVER_ALPM_CAP,
+ &alpm_dpcd_data, sizeof(alpm_dpcd_data)))
+ return false;
+
+ if (!dm_helpers_dp_read_dpcd(NULL, link, DP_PSR2_SU_Y_GRANULARITY,
+ &su_granularity_dpcd_data, sizeof(su_granularity_dpcd_data)))
+ return false;
+
+ link->dpcd_caps.psr_caps.y_coordinate_required = psr_dpcd_data[1] & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
+ link->dpcd_caps.psr_caps.su_granularity_required = psr_dpcd_data[1] & DP_PSR2_SU_GRANULARITY_REQUIRED;
+
+ link->dpcd_caps.psr_caps.alpm_cap = alpm_dpcd_data & DP_ALPM_CAP;
+ link->dpcd_caps.psr_caps.standby_support = alpm_dpcd_data & (1 << 1);
+
+ link->dpcd_caps.psr_caps.su_y_granularity = su_granularity_dpcd_data;
+ }
+#endif
+ return true;
+}
+
+#ifdef CONFIG_DRM_AMD_DC_DCN
+static bool link_supports_psrsu(struct dc_link *link)
+{
+ struct dc *dc = link->ctx->dc;
+
+ if (!dc->caps.dmcub_support)
+ return false;
+
+ if (dc->ctx->dce_version < DCN_VERSION_3_1)
+ return false;
+
+ if (!link->dpcd_caps.psr_caps.alpm_cap ||
+ !link->dpcd_caps.psr_caps.y_coordinate_required)
+ return false;
+
+ if (link->dpcd_caps.psr_caps.su_granularity_required &&
+ !link->dpcd_caps.psr_caps.su_y_granularity)
+ return false;
+
+ return true;
+}
+#endif
/*
* amdgpu_dm_set_psr_caps() - set link psr capabilities
@@ -34,26 +101,34 @@
*/
void amdgpu_dm_set_psr_caps(struct dc_link *link)
{
- uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
-
if (!(link->connector_signal & SIGNAL_TYPE_EDP))
return;
+
if (link->type == dc_connection_none)
return;
- if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
- dpcd_data, sizeof(dpcd_data))) {
- link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
-
- if (dpcd_data[0] == 0) {
- link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
- link->psr_settings.psr_feature_enabled = false;
- } else {
+
+ if (!link_get_psr_caps(link)) {
+ DRM_ERROR("amdgpu: Failed to read PSR Caps!\n");
+ return;
+ }
+
+ if (link->dpcd_caps.psr_caps.psr_version == 0) {
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+ link->psr_settings.psr_feature_enabled = false;
+
+ } else {
+#ifdef CONFIG_DRM_AMD_DC_DCN
+ if (link_supports_psrsu(link))
+ link->psr_settings.psr_version = DC_PSR_VERSION_SU_1;
+ else
+#endif
link->psr_settings.psr_version = DC_PSR_VERSION_1;
- link->psr_settings.psr_feature_enabled = true;
- }
- DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
+ link->psr_settings.psr_feature_enabled = true;
}
+
+ DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
+
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index a4bef4364afd..1e385d55e7fb 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -2995,7 +2995,7 @@ static bool bios_parser2_construct(
&bp->object_info_tbl.revision);
if (bp->object_info_tbl.revision.major == 1
- && bp->object_info_tbl.revision.minor >= 4) {
+ && bp->object_info_tbl.revision.minor == 4) {
struct display_object_info_table_v1_4 *tbl_v1_4;
tbl_v1_4 = GET_IMAGE(struct display_object_info_table_v1_4,
@@ -3004,8 +3004,10 @@ static bool bios_parser2_construct(
return false;
bp->object_info_tbl.v1_4 = tbl_v1_4;
- } else
+ } else {
+ ASSERT(0);
return false;
+ }
dal_firmware_parser_init_cmd_tbl(bp);
dal_bios_parser_init_cmd_tbl_helper2(&bp->cmd_helper, dce_version);
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index ff5bb152ef49..e6ef36de0825 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -2033,10 +2033,10 @@ static void calculate_bandwidth(
kfree(surface_type);
free_tiling_mode:
kfree(tiling_mode);
-free_yclk:
- kfree(yclk);
free_sclk:
kfree(sclk);
+free_yclk:
+ kfree(yclk);
}
/*******************************************************************************
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 6b248cd2a461..e447c74be713 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -503,7 +503,6 @@ static void dcn_bw_calc_rq_dlg_ttu(
//input[in_idx].dout.output_standard;
/*todo: soc->sr_enter_plus_exit_time??*/
- dlg_sys_param->t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep;
dml1_rq_dlg_get_rq_params(dml, rq_param, &input->pipe.src);
dml1_extract_rq_regs(dml, rq_regs, rq_param);
@@ -739,7 +738,9 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_100hz);
}
-unsigned int get_highest_allowed_voltage_level(uint32_t chip_family, uint32_t hw_internal_rev, uint32_t pci_revision_id)
+static unsigned int get_highest_allowed_voltage_level(uint32_t chip_family,
+ uint32_t hw_internal_rev,
+ uint32_t pci_revision_id)
{
/* for low power RV2 variants, the highest voltage level we want is 0 */
if ((chip_family == FAMILY_RV) &&
@@ -763,7 +764,7 @@ unsigned int get_highest_allowed_voltage_level(uint32_t chip_family, uint32_t hw
return 4;
}
-bool dcn_validate_bandwidth(
+bool dcn10_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 26f96ee32472..9200c8ce02ba 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -308,8 +308,7 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
case FAMILY_NV:
if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
dcn3_clk_mgr_destroy(clk_mgr);
- }
- if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
+ } else if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
dcn3_clk_mgr_destroy(clk_mgr);
}
if (ASICREV_IS_BEIGE_GOBY_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
index 76ec8ec92efd..60761ff3cbf1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
@@ -34,7 +34,7 @@
#include "rv1_clk_mgr_vbios_smu.h"
#include "rv1_clk_mgr_clk.h"
-void rv1_init_clocks(struct clk_mgr *clk_mgr)
+static void rv1_init_clocks(struct clk_mgr *clk_mgr)
{
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
index fe18bb9e19aa..06bab24d8e27 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
@@ -28,6 +28,8 @@
#include "reg_helper.h"
#include <linux/delay.h>
+#include "rv1_clk_mgr_vbios_smu.h"
+
#define MAX_INSTANCE 5
#define MAX_SEGMENT 5
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 2108bff49d4e..cac80ba69072 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -38,7 +38,6 @@
#include "clk/clk_11_0_0_offset.h"
#include "clk/clk_11_0_0_sh_mask.h"
-#include "irq/dcn20/irq_service_dcn20.h"
#undef FN
#define FN(reg_name, field_name) \
@@ -223,8 +222,6 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
bool force_reset = false;
bool p_state_change_support;
int total_plane_count;
- int irq_src;
- uint32_t hpd_state;
if (dc->work_arounds.skip_clock_update)
return;
@@ -242,13 +239,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->res_pool->pp_smu)
pp_smu = &dc->res_pool->pp_smu->nv_funcs;
- for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD6; irq_src++) {
- hpd_state = dc_get_hpd_state_dcn20(dc->res_pool->irqs, irq_src);
- if (hpd_state)
- break;
- }
-
- if (display_count == 0 && !hpd_state)
+ if (display_count == 0)
enter_display_off = true;
if (enter_display_off == safe_to_lower) {
@@ -409,7 +400,7 @@ void dcn2_init_clocks(struct clk_mgr *clk_mgr)
clk_mgr->clks.prev_p_state_change_support = true;
}
-void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base)
+static void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct pp_smu_funcs_nv *pp_smu = NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
index db9950244c7b..fbdd0a92d146 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
@@ -74,42 +74,6 @@ static const struct clk_mgr_mask clk_mgr_mask = {
CLK_COMMON_MASK_SH_LIST_DCN201_BASE(_MASK)
};
-void dcn201_update_clocks_vbios(struct clk_mgr *clk_mgr,
- struct dc_state *context,
- bool safe_to_lower)
-{
- struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
-
- bool update_dppclk = false;
- bool update_dispclk = false;
-
- if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->clks.dppclk_khz)) {
- clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz;
- update_dppclk = true;
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)) {
- clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
- update_dispclk = true;
- }
-
- if (update_dppclk || update_dispclk) {
- struct bp_set_dce_clock_parameters dce_clk_params;
- struct dc_bios *bp = clk_mgr->ctx->dc_bios;
-
- if (update_dispclk) {
- memset(&dce_clk_params, 0, sizeof(dce_clk_params));
- dce_clk_params.target_clock_frequency = new_clocks->dispclk_khz;
- dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
- dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
- bp->funcs->set_dce_clock(bp, &dce_clk_params);
- }
- /* currently there is no DCECLOCK_TYPE_DPPCLK type defined in VBIOS interface.
- * vbios program DPPCLK to the same DispCLK limitation
- */
- }
-}
-
static void dcn201_init_clocks(struct clk_mgr *clk_mgr)
{
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
@@ -126,10 +90,8 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base,
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
- int display_count;
bool update_dppclk = false;
bool update_dispclk = false;
- bool enter_display_off = false;
bool dpp_clock_lowered = false;
bool force_reset = false;
bool p_state_change_support;
@@ -145,10 +107,7 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base,
dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
}
- display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
-
- if (display_count == 0)
- enter_display_off = true;
+ clk_mgr_helper_get_active_display_cnt(dc, context);
if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz))
clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index ac2d4c4f04e4..f4dee0e48a67 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -42,7 +42,6 @@
#include "clk/clk_10_0_2_sh_mask.h"
#include "renoir_ip_offset.h"
-#include "irq/dcn21/irq_service_dcn21.h"
/* Constants */
@@ -56,9 +55,7 @@
/* TODO: evaluate how to lower or disable all dcn clocks in screen off case */
-int rn_get_active_display_cnt_wa(
- struct dc *dc,
- struct dc_state *context)
+static int rn_get_active_display_cnt_wa(struct dc *dc, struct dc_state *context)
{
int i, display_count;
bool tmds_present = false;
@@ -77,7 +74,8 @@ int rn_get_active_display_cnt_wa(
const struct dc_link *link = dc->links[i];
/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
- if (link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ if (link->link_enc->funcs->is_dig_enabled &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++;
}
@@ -88,7 +86,7 @@ int rn_get_active_display_cnt_wa(
return display_count;
}
-void rn_set_low_power_state(struct clk_mgr *clk_mgr_base)
+static void rn_set_low_power_state(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
@@ -122,7 +120,7 @@ static void rn_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
}
-void rn_update_clocks(struct clk_mgr *clk_mgr_base,
+static void rn_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
@@ -130,11 +128,9 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
int display_count;
- int irq_src;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
- uint32_t hpd_state;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
@@ -151,14 +147,8 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
display_count = rn_get_active_display_cnt_wa(dc, context);
- for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD5; irq_src++) {
- hpd_state = dc_get_hpd_state_dcn21(dc->res_pool->irqs, irq_src);
- if (hpd_state)
- break;
- }
-
/* if we can go lower, go lower */
- if (display_count == 0 && !hpd_state) {
+ if (display_count == 0) {
rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER);
/* update power state */
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
@@ -437,25 +427,14 @@ static void rn_dump_clk_registers(struct clk_state_registers_and_bypass *regs_an
}
}
-/* This function produce translated logical clk state values*/
-void rn_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s)
-{
- struct clk_state_registers_and_bypass sb = { 0 };
- struct clk_log_info log_info = { 0 };
-
- rn_dump_clk_registers(&sb, clk_mgr_base, &log_info);
-
- s->dprefclk_khz = sb.dprefclk * 1000;
-}
-
-void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base)
+static void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
rn_vbios_smu_enable_pme_wa(clk_mgr);
}
-void rn_init_clocks(struct clk_mgr *clk_mgr)
+static void rn_init_clocks(struct clk_mgr *clk_mgr)
{
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
// Assumption is that boot state always supports pstate
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 9f7eed6688c4..8161a6ae410d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -33,6 +33,8 @@
#include "mp/mp_12_0_0_offset.h"
#include "mp/mp_12_0_0_sh_mask.h"
+#include "rn_clk_mgr_vbios_smu.h"
+
#define REG(reg_name) \
(MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
@@ -86,7 +88,9 @@ static uint32_t rn_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsig
}
-int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, unsigned int msg_id, unsigned int param)
+static int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
+ unsigned int msg_id,
+ unsigned int param)
{
uint32_t result;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 1861a147a7fa..f977f29907df 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -252,6 +252,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
bool update_dispclk = false;
bool enter_display_off = false;
bool dpp_clock_lowered = false;
+ bool update_pstate_unsupported_clk = false;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
bool force_reset = false;
bool update_uclk = false;
@@ -299,13 +300,28 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
- if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+
+ // invalidate the current P-State forced min in certain dc_mode_softmax situations
+ if (dc->clk_mgr->dc_mode_softmax_enabled && safe_to_lower && !p_state_change_support) {
+ if ((new_clocks->dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) !=
+ (clk_mgr_base->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000))
+ update_pstate_unsupported_clk = true;
+ }
+
+ if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support) ||
+ update_pstate_unsupported_clk) {
clk_mgr_base->clks.p_state_change_support = p_state_change_support;
/* to disable P-State switching, set UCLK min = max */
- if (!clk_mgr_base->clks.p_state_change_support)
- dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
+ if (!clk_mgr_base->clks.p_state_change_support) {
+ if (dc->clk_mgr->dc_mode_softmax_enabled &&
+ new_clocks->dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
+ dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
+ dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
+ else
+ dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ }
}
/* Always update saved value, even if new value not set due to P-State switching unsupported */
@@ -421,6 +437,24 @@ static void dcn3_set_hard_max_memclk(struct clk_mgr *clk_mgr_base)
clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
}
+static void dcn3_set_max_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ if (!clk_mgr->smu_present)
+ return;
+
+ dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz);
+}
+static void dcn3_set_min_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ if (!clk_mgr->smu_present)
+ return;
+ dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz);
+}
+
/* Get current memclk states, update bounding box */
static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
{
@@ -436,6 +470,8 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
&num_levels);
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
+ clk_mgr_base->bw_params->dc_mode_softmax_memclk = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
+
/* Refresh bounding box */
clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
@@ -505,6 +541,8 @@ static struct clk_mgr_funcs dcn3_funcs = {
.notify_wm_ranges = dcn3_notify_wm_ranges,
.set_hard_min_memclk = dcn3_set_hard_min_memclk,
.set_hard_max_memclk = dcn3_set_hard_max_memclk,
+ .set_max_memclk = dcn3_set_max_memclk,
+ .set_min_memclk = dcn3_set_min_memclk,
.get_memclk_states_from_smu = dcn3_get_memclk_states_from_smu,
.are_clock_states_equal = dcn3_are_clock_states_equal,
.enable_pme_wa = dcn3_enable_pme_wa,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
index 6ea642615854..d9920d91838d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
@@ -88,9 +88,9 @@ static uint32_t dcn301_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, u
return res_val;
}
-int dcn301_smu_send_msg_with_param(
- struct clk_mgr_internal *clk_mgr,
- unsigned int msg_id, unsigned int param)
+static int dcn301_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
+ unsigned int msg_id,
+ unsigned int param)
{
uint32_t result;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 3eee32faa208..48005def1164 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -89,9 +89,9 @@ static int vg_get_active_display_cnt_wa(
return display_count;
}
-void vg_update_clocks(struct clk_mgr *clk_mgr_base,
- struct dc_state *context,
- bool safe_to_lower)
+static void vg_update_clocks(struct clk_mgr *clk_mgr_base,
+ struct dc_state *context,
+ bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
@@ -367,18 +367,6 @@ static void vg_dump_clk_registers(struct clk_state_registers_and_bypass *regs_an
}
}
-/* This function produce translated logical clk state values*/
-void vg_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s)
-{
-
- struct clk_state_registers_and_bypass sb = { 0 };
- struct clk_log_info log_info = { 0 };
-
- vg_dump_clk_registers(&sb, clk_mgr_base, &log_info);
-
- s->dprefclk_khz = sb.dprefclk * 1000;
-}
-
static void vg_enable_pme_wa(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
@@ -386,7 +374,7 @@ static void vg_enable_pme_wa(struct clk_mgr *clk_mgr_base)
dcn301_smu_enable_pme_wa(clk_mgr);
}
-void vg_init_clocks(struct clk_mgr *clk_mgr)
+static void vg_init_clocks(struct clk_mgr *clk_mgr)
{
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
// Assumption is that boot state always supports pstate
@@ -753,7 +741,7 @@ void vg_clk_mgr_construct(
sizeof(struct watermarks),
&clk_mgr->smu_wm_set.mc_address.quad_part);
- if (clk_mgr->smu_wm_set.wm_set == 0) {
+ if (!clk_mgr->smu_wm_set.wm_set) {
clk_mgr->smu_wm_set.wm_set = &dummy_wms;
clk_mgr->smu_wm_set.mc_address.quad_part = 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index 9df38e2ee4f4..4162ce40089b 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -66,7 +66,7 @@
#define TO_CLK_MGR_DCN31(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn31, base)
-int dcn31_get_active_display_cnt_wa(
+static int dcn31_get_active_display_cnt_wa(
struct dc *dc,
struct dc_state *context)
{
@@ -118,7 +118,7 @@ static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
}
}
-static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
+void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
@@ -285,7 +285,7 @@ static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base)
dcn31_smu_enable_pme_wa(clk_mgr);
}
-static void dcn31_init_clocks(struct clk_mgr *clk_mgr)
+void dcn31_init_clocks(struct clk_mgr *clk_mgr)
{
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
// Assumption is that boot state always supports pstate
@@ -295,7 +295,7 @@ static void dcn31_init_clocks(struct clk_mgr *clk_mgr)
clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
}
-static bool dcn31_are_clock_states_equal(struct dc_clocks *a,
+bool dcn31_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b)
{
if (a->dispclk_khz != b->dispclk_khz)
@@ -541,10 +541,9 @@ static unsigned int find_clk_for_voltage(
return clock;
}
-void dcn31_clk_mgr_helper_populate_bw_params(
- struct clk_mgr_internal *clk_mgr,
- struct integrated_info *bios_info,
- const DpmClocks_t *clock_table)
+static void dcn31_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr,
+ struct integrated_info *bios_info,
+ const DpmClocks_t *clock_table)
{
int i, j;
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
index f8f100535526..961b10a49486 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
@@ -39,6 +39,13 @@ struct clk_mgr_dcn31 {
struct dcn31_smu_watermark_set smu_wm_set;
};
+bool dcn31_are_clock_states_equal(struct dc_clocks *a,
+ struct dc_clocks *b);
+void dcn31_init_clocks(struct clk_mgr *clk_mgr);
+void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
+ struct dc_state *context,
+ bool safe_to_lower);
+
void dcn31_clk_mgr_construct(struct dc_context *ctx,
struct clk_mgr_dcn31 *clk_mgr,
struct pp_smu_funcs *pp_smu,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
index 8c2b77eb9459..a1011f3273f3 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
@@ -95,9 +95,9 @@ static uint32_t dcn31_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un
return res_val;
}
-int dcn31_smu_send_msg_with_param(
- struct clk_mgr_internal *clk_mgr,
- unsigned int msg_id, unsigned int param)
+static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
+ unsigned int msg_id,
+ unsigned int param)
{
uint32_t result;
@@ -119,6 +119,12 @@ int dcn31_smu_send_msg_with_param(
result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000);
+ if (result == VBIOSSMC_Result_Failed) {
+ ASSERT(0);
+ REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
+ return -1;
+ }
+
if (IS_SMU_TIMEOUT(result)) {
ASSERT(0);
dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 0ded4decee05..6f5528d34093 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -221,9 +221,9 @@ static bool create_links(
link = link_create(&link_init_params);
if (link) {
- dc->links[dc->link_count] = link;
- link->dc = dc;
- ++dc->link_count;
+ dc->links[dc->link_count] = link;
+ link->dc = dc;
+ ++dc->link_count;
}
}
@@ -274,24 +274,6 @@ static bool create_links(
goto failed_alloc;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
- dc->caps.dp_hpo &&
- link->dc->res_pool->res_cap->num_hpo_dp_link_encoder > 0) {
- /* FPGA case - Allocate HPO DP link encoder */
- if (i < link->dc->res_pool->res_cap->num_hpo_dp_link_encoder) {
- link->hpo_dp_link_enc = link->dc->res_pool->hpo_dp_link_enc[i];
-
- if (link->hpo_dp_link_enc == NULL) {
- BREAK_TO_DEBUGGER();
- goto failed_alloc;
- }
- link->hpo_dp_link_enc->hpd_source = link->link_enc->hpd_source;
- link->hpo_dp_link_enc->transmitter = link->link_enc->transmitter;
- }
- }
-#endif
-
link->link_status.dpcd_caps = &link->dpcd_caps;
enc_init.ctx = dc->ctx;
@@ -808,6 +790,10 @@ void dc_stream_set_static_screen_params(struct dc *dc,
static void dc_destruct(struct dc *dc)
{
+ // reset link encoder assignment table on destruct
+ if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
+ link_enc_cfg_init(dc, dc->current_state);
+
if (dc->current_state) {
dc_release_state(dc->current_state);
dc->current_state = NULL;
@@ -1016,8 +1002,6 @@ static bool dc_construct(struct dc *dc,
goto fail;
}
- dc_resource_state_construct(dc, dc->current_state);
-
if (!create_links(dc, init_params->num_virtual_links))
goto fail;
@@ -1027,8 +1011,7 @@ static bool dc_construct(struct dc *dc,
if (!create_link_encoders(dc))
goto fail;
- /* Initialise DIG link encoder resource tracking variables. */
- link_enc_cfg_init(dc, dc->current_state);
+ dc_resource_state_construct(dc, dc->current_state);
return true;
@@ -1421,20 +1404,34 @@ static void program_timing_sync(
status->timing_sync_info.master = false;
}
- /* remove any other unblanked pipes as they have already been synced */
- for (j = j + 1; j < group_size; j++) {
- bool is_blanked;
- if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
- is_blanked =
- pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
- else
- is_blanked =
- pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
- if (!is_blanked) {
- group_size--;
- pipe_set[j] = pipe_set[group_size];
- j--;
+ /* remove any other pipes that are already been synced */
+ if (dc->config.use_pipe_ctx_sync_logic) {
+ /* check pipe's syncd to decide which pipe to be removed */
+ for (j = 1; j < group_size; j++) {
+ if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
+ group_size--;
+ pipe_set[j] = pipe_set[group_size];
+ j--;
+ } else
+ /* link slave pipe's syncd with master pipe */
+ pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
+ }
+ } else {
+ for (j = j + 1; j < group_size; j++) {
+ bool is_blanked;
+
+ if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
+ is_blanked =
+ pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
+ else
+ is_blanked =
+ pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
+ if (!is_blanked) {
+ group_size--;
+ pipe_set[j] = pipe_set[group_size];
+ j--;
+ }
}
}
@@ -1830,6 +1827,19 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
dc_stream_log(dc, stream);
}
+ /*
+ * Previous validation was perfomred with fast_validation = true and
+ * the full DML state required for hardware programming was skipped.
+ *
+ * Re-validate here to calculate these parameters / watermarks.
+ */
+ result = dc_validate_global_state(dc, context, false);
+ if (result != DC_OK) {
+ DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
+ dc_status_to_str(result), result);
+ return result;
+ }
+
result = dc_commit_state_no_check(dc, context);
return (result == DC_OK);
@@ -2870,7 +2880,8 @@ static void commit_planes_for_stream(struct dc *dc,
#endif
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
- if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
+ if (top_pipe_to_program &&
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
if (should_use_dmub_lock(stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -2979,12 +2990,12 @@ static void commit_planes_for_stream(struct dc *dc,
#ifdef CONFIG_DRM_AMD_DC_DCN
if (dc->debug.validate_dml_output) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i];
- if (cur_pipe.stream == NULL)
+ struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
+ if (cur_pipe->stream == NULL)
continue;
- cur_pipe.plane_res.hubp->funcs->validate_dml_output(
- cur_pipe.plane_res.hubp, dc->ctx,
+ cur_pipe->plane_res.hubp->funcs->validate_dml_output(
+ cur_pipe->plane_res.hubp, dc->ctx,
&context->res_ctx.pipe_ctx[i].rq_regs,
&context->res_ctx.pipe_ctx[i].dlg_regs,
&context->res_ctx.pipe_ctx[i].ttu_regs);
@@ -3426,7 +3437,7 @@ struct dc_sink *dc_link_add_remote_sink(
goto fail_add_sink;
edid_status = dm_helpers_parse_edid_caps(
- link->ctx,
+ link,
&dc_sink->dc_edid,
&dc_sink->edid_caps);
@@ -3583,6 +3594,98 @@ void dc_lock_memory_clock_frequency(struct dc *dc)
core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
}
+static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
+{
+ struct dc_state *context = dc->current_state;
+ struct hubp *hubp;
+ struct pipe_ctx *pipe;
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream != NULL) {
+ dc->hwss.disable_pixel_data(dc, pipe, true);
+
+ // wait for double buffer
+ pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
+ pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
+ pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
+
+ hubp = pipe->plane_res.hubp;
+ hubp->funcs->set_blank_regs(hubp, true);
+ }
+ }
+
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
+ dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream != NULL) {
+ dc->hwss.disable_pixel_data(dc, pipe, false);
+
+ hubp = pipe->plane_res.hubp;
+ hubp->funcs->set_blank_regs(hubp, false);
+ }
+ }
+}
+
+
+/**
+ * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
+ * @dc: pointer to dc of the dm calling this
+ * @enable: True = transition to DC mode, false = transition back to AC mode
+ *
+ * Some SoCs define additional clock limits when in DC mode, DM should
+ * invoke this function when the platform undergoes a power source transition
+ * so DC can apply/unapply the limit. This interface may be disruptive to
+ * the onscreen content.
+ *
+ * Context: Triggered by OS through DM interface, or manually by escape calls.
+ * Need to hold a dclock when doing so.
+ *
+ * Return: none (void function)
+ *
+ */
+void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
+{
+ uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev;
+ unsigned int softMax, maxDPM, funcMin;
+ bool p_state_change_support;
+
+ if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev))
+ return;
+
+ softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
+ maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz;
+ funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
+ p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
+
+ if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
+ if (p_state_change_support) {
+ if (funcMin <= softMax)
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
+ // else: No-Op
+ } else {
+ if (funcMin <= softMax)
+ blank_and_force_memclk(dc, true, softMax);
+ // else: No-Op
+ }
+ } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
+ if (p_state_change_support) {
+ if (funcMin <= softMax)
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
+ // else: No-Op
+ } else {
+ if (funcMin <= softMax)
+ blank_and_force_memclk(dc, true, maxDPM);
+ // else: No-Op
+ }
+ }
+ dc->clk_mgr->dc_mode_softmax_enabled = enable;
+}
bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
struct dc_cursor_attributes *cursor_attr)
{
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 21be2a684393..643762542e4d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -422,6 +422,8 @@ char *dc_status_to_str(enum dc_status status)
return "The operation is not supported.";
case DC_UNSUPPORTED_VALUE:
return "The value specified is not supported.";
+ case DC_NO_LINK_ENC_RESOURCE:
+ return "No link encoder resource";
case DC_ERROR_UNEXPECTED:
return "Unexpected error";
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index c0bdc23702c8..b5e570d33ca9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -66,31 +66,6 @@
/*******************************************************************************
* Private functions
******************************************************************************/
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-static bool add_dp_hpo_link_encoder_to_link(struct dc_link *link)
-{
- struct hpo_dp_link_encoder *enc = resource_get_unused_hpo_dp_link_encoder(
- link->dc->res_pool);
-
- if (!link->hpo_dp_link_enc && enc) {
- link->hpo_dp_link_enc = enc;
- link->hpo_dp_link_enc->transmitter = link->link_enc->transmitter;
- link->hpo_dp_link_enc->hpd_source = link->link_enc->hpd_source;
- }
-
- return (link->hpo_dp_link_enc != NULL);
-}
-
-static void remove_dp_hpo_link_encoder_from_link(struct dc_link *link)
-{
- if (link->hpo_dp_link_enc) {
- link->hpo_dp_link_enc->hpd_source = HPD_SOURCEID_UNKNOWN;
- link->hpo_dp_link_enc->transmitter = TRANSMITTER_UNKNOWN;
- link->hpo_dp_link_enc = NULL;
- }
-}
-#endif
-
static void dc_link_destruct(struct dc_link *link)
{
int i;
@@ -118,12 +93,6 @@ static void dc_link_destruct(struct dc_link *link)
link->link_enc->funcs->destroy(&link->link_enc);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (link->hpo_dp_link_enc) {
- remove_dp_hpo_link_encoder_from_link(link);
- }
-#endif
-
if (link->local_sink)
dc_sink_release(link->local_sink);
@@ -270,10 +239,10 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
/* Link may not have physical HPD pin. */
if (link->ep_type != DISPLAY_ENDPOINT_PHY) {
- if (link->hpd_status)
- *type = dc_connection_single;
- else
+ if (link->is_hpd_pending || !link->hpd_status)
*type = dc_connection_none;
+ else
+ *type = dc_connection_single;
return true;
}
@@ -881,6 +850,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
enum dc_connection_type pre_connection_type = dc_connection_none;
bool perform_dp_seamless_boot = false;
const uint32_t post_oui_delay = 30; // 30ms
+ struct link_resource link_res = { 0 };
DC_LOGGER_INIT(link->ctx->logger);
@@ -975,7 +945,10 @@ static bool dc_link_detect_helper(struct dc_link *link,
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING)
- add_dp_hpo_link_encoder_to_link(link);
+ link_res.hpo_dp_link_enc = resource_get_hpo_dp_link_enc_for_det_lt(
+ &link->dc->current_state->res_ctx,
+ link->dc->res_pool,
+ link);
#endif
if (link->type == dc_connection_mst_branch) {
@@ -986,7 +959,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
* empty which leads to allocate_mst_payload() has "0"
* pbn_per_slot value leading to exception on dc_fixpt_div()
*/
- dp_verify_mst_link_cap(link);
+ dp_verify_mst_link_cap(link, &link_res);
/*
* This call will initiate MST topology discovery. Which
@@ -1150,6 +1123,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
// verify link cap for SST non-seamless boot
if (!perform_dp_seamless_boot)
dp_verify_link_cap_with_retries(link,
+ &link_res,
&link->reported_link_cap,
LINK_TRAINING_MAX_VERIFY_RETRY);
} else {
@@ -1844,6 +1818,8 @@ static void enable_stream_features(struct pipe_ctx *pipe_ctx)
union down_spread_ctrl old_downspread;
union down_spread_ctrl new_downspread;
+ memset(&old_downspread, 0, sizeof(old_downspread));
+
core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL,
&old_downspread.raw, sizeof(old_downspread));
@@ -2015,6 +1991,57 @@ static enum dc_status enable_link_dp_mst(
return enable_link_dp(state, pipe_ctx);
}
+void dc_link_blank_all_dp_displays(struct dc *dc)
+{
+ unsigned int i;
+ uint8_t dpcd_power_state = '\0';
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if ((dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) ||
+ (dc->links[i]->priv == NULL) || (dc->links[i]->local_sink == NULL))
+ continue;
+
+ /* DP 2.0 spec requires that we read LTTPR caps first */
+ dp_retrieve_lttpr_cap(dc->links[i]);
+ /* if any of the displays are lit up turn them off */
+ status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+
+ if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
+ dc_link_blank_dp_stream(dc->links[i], true);
+ }
+
+}
+
+void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init)
+{
+ unsigned int j;
+ struct dc *dc = link->ctx->dc;
+ enum signal_type signal = link->connector_signal;
+
+ if ((signal == SIGNAL_TYPE_EDP) ||
+ (signal == SIGNAL_TYPE_DISPLAY_PORT)) {
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
+ link->link_enc->funcs->get_dig_frontend &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
+ unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc);
+
+ if (fe != ENGINE_ID_UNKNOWN)
+ for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+ if (fe == dc->res_pool->stream_enc[j]->id) {
+ dc->res_pool->stream_enc[j]->funcs->dp_blank(link,
+ dc->res_pool->stream_enc[j]);
+ break;
+ }
+ }
+ }
+
+ if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)
+ dp_receiver_power_ctrl(link, false);
+ }
+}
+
static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx,
enum engine_id eng_id,
struct ext_hdmi_settings *settings)
@@ -2452,7 +2479,8 @@ static void write_i2c_redriver_setting(
DC_LOG_DEBUG("Set redriver failed");
}
-static void disable_link(struct dc_link *link, enum signal_type signal)
+static void disable_link(struct dc_link *link, const struct link_resource *link_res,
+ enum signal_type signal)
{
/*
* TODO: implement call for dp_set_hw_test_pattern
@@ -2471,20 +2499,20 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
struct dc_link_settings link_settings = link->cur_link_settings;
#endif
if (dc_is_dp_sst_signal(signal))
- dp_disable_link_phy(link, signal);
+ dp_disable_link_phy(link, link_res, signal);
else
- dp_disable_link_phy_mst(link, signal);
+ dp_disable_link_phy_mst(link, link_res, signal);
if (dc_is_dp_sst_signal(signal) ||
link->mst_stream_alloc_table.stream_count == 0) {
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) {
dp_set_fec_enable(link, false);
- dp_set_fec_ready(link, false);
+ dp_set_fec_ready(link, link_res, false);
}
#else
dp_set_fec_enable(link, false);
- dp_set_fec_ready(link, false);
+ dp_set_fec_ready(link, link_res, false);
#endif
}
} else {
@@ -2595,7 +2623,7 @@ static enum dc_status enable_link(
* new link settings.
*/
if (link->link_status.link_active) {
- disable_link(link, pipe_ctx->stream->signal);
+ disable_link(link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
}
switch (pipe_ctx->stream->signal) {
@@ -2715,8 +2743,23 @@ static bool dp_active_dongle_validate_timing(
return false;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter
+ struct dc_crtc_timing outputTiming = *timing;
+
+ if (timing->flags.DSC && !timing->dsc_cfg.is_frl)
+ /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */
+ outputTiming.flags.DSC = 0;
+ if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps)
+ return false;
+ } else { // DP to HDMI TMDS converter
+ if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
+ return false;
+ }
+#else
if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
return false;
+#endif
#if defined(CONFIG_DRM_AMD_DC_DCN)
}
@@ -2962,7 +3005,7 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active
link->psr_settings.psr_power_opt = *power_opts;
if (psr != NULL && link->psr_settings.psr_feature_enabled && psr->funcs->psr_set_power_opt)
- psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt);
+ psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt, panel_inst);
}
/* Enable or Disable PSR */
@@ -3350,11 +3393,12 @@ static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_tim
/*
* Payload allocation/deallocation for SST introduced in DP2.0
*/
-enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, bool allocate)
+static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx,
+ bool allocate)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc;
+ struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc;
struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
struct link_mst_stream_allocation_table proposed_table = {0};
struct fixed31_32 avg_time_slots_per_mtp;
@@ -3436,7 +3480,7 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
struct link_encoder *link_encoder = NULL;
struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc;
+ struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc;
struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
#endif
struct dp_mst_stream_allocation_table proposed_table = {0};
@@ -3766,7 +3810,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
struct link_encoder *link_encoder = NULL;
struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc;
+ struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc;
struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
#endif
struct dp_mst_stream_allocation_table proposed_table = {0};
@@ -3927,109 +3971,73 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
{
struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
struct link_encoder *link_enc = NULL;
- struct dc_state *state = pipe_ctx->stream->ctx->dc->current_state;
- struct link_enc_assignment link_enc_assign;
- int i;
-#endif
+ struct cp_psp_stream_config config = {0};
+ enum dp_panel_mode panel_mode =
+ dp_get_panel_mode(pipe_ctx->stream->link);
- if (cp_psp && cp_psp->funcs.update_stream_config) {
- struct cp_psp_stream_config config = {0};
- enum dp_panel_mode panel_mode =
- dp_get_panel_mode(pipe_ctx->stream->link);
+ if (cp_psp == NULL || cp_psp->funcs.update_stream_config == NULL)
+ return;
- config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
- /*stream_enc_inst*/
- config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
- config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
-
- if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY ||
- pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY)
- link_enc = pipe_ctx->stream->link->link_enc;
- else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
- if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) {
- link_enc = link_enc_cfg_get_link_enc_used_by_stream(
- pipe_ctx->stream->ctx->dc,
- pipe_ctx->stream);
- }
- // Initialize PHY ID with ABCDE - 01234 mapping except when it is B0
- config.phy_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY)
+ link_enc = pipe_ctx->stream->link->link_enc;
+ else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign)
+ link_enc = link_enc_cfg_get_link_enc_used_by_stream(
+ pipe_ctx->stream->ctx->dc,
+ pipe_ctx->stream);
+ ASSERT(link_enc);
+ if (link_enc == NULL)
+ return;
- //look up the link_enc_assignment for the current pipe_ctx
- for (i = 0; i < state->stream_count; i++) {
- if (pipe_ctx->stream == state->streams[i]) {
- link_enc_assign = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
- }
- }
- // Add flag to guard new A0 DIG mapping
- if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true) {
- config.dig_be = link_enc_assign.eng_id;
- config.dio_output_type = pipe_ctx->stream->link->ep_type;
- config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
- } else {
- config.dio_output_type = 0;
- config.dio_output_idx = 0;
- }
+ /* otg instance */
+ config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
- // Add flag to guard B0 implementation
- if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true &&
- link_enc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
- if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- link_enc = link_enc_assign.stream->link_enc;
+ /* dig front end */
+ config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
- // enum ID 1-4 maps to DPIA PHY ID 0-3
- config.phy_idx = link_enc_assign.ep_id.link_id.enum_id - ENUM_ID_1;
- } else { // for non DPIA mode over B0, ABCDE maps to 01564
+ /* stream encoder index */
+ config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ config.stream_enc_idx =
+ pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0;
+#endif
- switch (link_enc->transmitter) {
- case TRANSMITTER_UNIPHY_A:
- config.phy_idx = 0;
- break;
- case TRANSMITTER_UNIPHY_B:
- config.phy_idx = 1;
- break;
- case TRANSMITTER_UNIPHY_C:
- config.phy_idx = 5;
- break;
- case TRANSMITTER_UNIPHY_D:
- config.phy_idx = 6;
- break;
- case TRANSMITTER_UNIPHY_E:
- config.phy_idx = 4;
- break;
- default:
- config.phy_idx = 0;
- break;
- }
+ /* dig back end */
+ config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
- }
- }
- } else if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) {
- link_enc = link_enc_cfg_get_link_enc_used_by_stream(
- pipe_ctx->stream->ctx->dc,
- pipe_ctx->stream);
- config.phy_idx = 0; /* Clear phy_idx for non-physical display endpoints. */
- }
- ASSERT(link_enc);
- if (link_enc)
- config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
- if (is_dp_128b_132b_signal(pipe_ctx)) {
- config.stream_enc_idx = pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0;
- config.link_enc_idx = pipe_ctx->stream->link->hpo_dp_link_enc->inst;
- config.dp2_enabled = 1;
- }
+ /* link encoder index */
+ config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst;
#endif
- config.dpms_off = dpms_off;
- config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
- config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP);
- config.mst_enabled = (pipe_ctx->stream->signal ==
- SIGNAL_TYPE_DISPLAY_PORT_MST);
- cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
- }
+ /* dio output index */
+ config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+
+ /* phy index */
+ config.phy_idx = resource_transmitter_to_phy_idx(
+ pipe_ctx->stream->link->dc, link_enc->transmitter);
+ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ /* USB4 DPIA doesn't use PHY in our soc, initialize it to 0 */
+ config.phy_idx = 0;
+
+ /* stream properties */
+ config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0;
+ config.mst_enabled = (pipe_ctx->stream->signal ==
+ SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ config.dp2_enabled = is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0;
+#endif
+ config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ?
+ 1 : 0;
+ config.dpms_off = dpms_off;
+
+ /* dm stream context */
+ config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
+
+ cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
}
#endif
@@ -4050,7 +4058,7 @@ static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pi
stream->link->cur_link_settings = link_settings;
/* Enable clock, Configure lane count, and Enable Link Encoder*/
- enable_dp_hpo_output(stream->link, &stream->link->cur_link_settings);
+ enable_dp_hpo_output(stream->link, &pipe_ctx->link_res, &stream->link->cur_link_settings);
#ifdef DIAGS_BUILD
/* Workaround for FPGA HPO capture DP link data:
@@ -4100,12 +4108,12 @@ static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pi
proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
}
- stream->link->hpo_dp_link_enc->funcs->update_stream_allocation_table(
- stream->link->hpo_dp_link_enc,
+ pipe_ctx->link_res.hpo_dp_link_enc->funcs->update_stream_allocation_table(
+ pipe_ctx->link_res.hpo_dp_link_enc,
&proposed_table);
- stream->link->hpo_dp_link_enc->funcs->set_throttled_vcp_size(
- stream->link->hpo_dp_link_enc,
+ pipe_ctx->link_res.hpo_dp_link_enc->funcs->set_throttled_vcp_size(
+ pipe_ctx->link_res.hpo_dp_link_enc,
pipe_ctx->stream_res.hpo_dp_stream_enc->inst,
avg_time_slots_per_mtp);
@@ -4255,7 +4263,8 @@ void core_link_enable_stream(
/* eDP lit up by bios already, no need to enable again. */
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
apply_edp_fast_boot_optimization &&
- !pipe_ctx->stream->timing.flags.DSC) {
+ !pipe_ctx->stream->timing.flags.DSC &&
+ !pipe_ctx->next_odm_pipe) {
pipe_ctx->stream->dpms_off = false;
#if defined(CONFIG_DRM_AMD_DC_HDCP)
update_psp_stream_config(pipe_ctx, false);
@@ -4293,7 +4302,8 @@ void core_link_enable_stream(
if (status != DC_FAIL_DP_LINK_TRAINING ||
pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
if (false == stream->link->link_status.link_active)
- disable_link(stream->link, pipe_ctx->stream->signal);
+ disable_link(stream->link, &pipe_ctx->link_res,
+ pipe_ctx->stream->signal);
BREAK_TO_DEBUGGER();
return;
}
@@ -4442,14 +4452,14 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
* state machine.
* In DP2 or MST mode, our encoder will stay video active
*/
- disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
+ disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
dc->hwss.disable_stream(pipe_ctx);
} else {
dc->hwss.disable_stream(pipe_ctx);
- disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
+ disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
}
#else
- disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
+ disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
dc->hwss.disable_stream(pipe_ctx);
#endif
@@ -4532,16 +4542,22 @@ void dc_link_set_drive_settings(struct dc *dc,
{
int i;
+ struct pipe_ctx *pipe = NULL;
+ const struct link_resource *link_res;
- for (i = 0; i < dc->link_count; i++) {
- if (dc->links[i] == link)
- break;
- }
+ link_res = dc_link_get_cur_link_res(link);
- if (i >= dc->link_count)
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream && pipe->stream->link) {
+ if (pipe->stream->link == link)
+ break;
+ }
+ }
+ if (pipe && link_res)
+ dc_link_dp_set_drive_settings(pipe->stream->link, link_res, lt_settings);
+ else
ASSERT_CRITICAL(false);
-
- dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
}
void dc_link_set_preferred_link_settings(struct dc *dc,
@@ -4602,11 +4618,9 @@ void dc_link_set_preferred_training_settings(struct dc *dc,
if (link_setting != NULL) {
link->preferred_link_setting = *link_setting;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (dp_get_link_encoding_format(link_setting) ==
- DP_128b_132b_ENCODING && !link->hpo_dp_link_enc) {
- if (!add_dp_hpo_link_encoder_to_link(link))
- memset(&link->preferred_link_setting, 0, sizeof(link->preferred_link_setting));
- }
+ if (dp_get_link_encoding_format(link_setting) == DP_128b_132b_ENCODING)
+ /* TODO: add dc update for acquiring link res */
+ skip_immediate_retrain = true;
#endif
} else {
link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN;
@@ -4733,6 +4747,9 @@ void dc_link_overwrite_extended_receiver_cap(
bool dc_link_is_fec_supported(const struct dc_link *link)
{
+ /* TODO - use asic cap instead of link_enc->features
+ * we no longer know which link enc to use for this link before commit
+ */
struct link_encoder *link_enc = NULL;
/* Links supporting dynamically assigned link encoder will be assigned next
@@ -4762,6 +4779,8 @@ bool dc_link_should_enable_fec(const struct dc_link *link)
link->local_sink &&
link->local_sink->edid_caps.panel_patch.disable_fec) ||
(link->connector_signal == SIGNAL_TYPE_EDP
+ // enable FEC for EDP if DSC is supported
+ && link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT == false
))
is_fec_disable = true;
@@ -4825,3 +4844,125 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
return kbps;
}
+
+const struct link_resource *dc_link_get_cur_link_res(const struct dc_link *link)
+{
+ int i;
+ struct pipe_ctx *pipe = NULL;
+ const struct link_resource *link_res = NULL;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream && pipe->stream->link && pipe->top_pipe == NULL) {
+ if (pipe->stream->link == link) {
+ link_res = &pipe->link_res;
+ break;
+ }
+ }
+ }
+
+ return link_res;
+}
+
+/**
+ * dc_get_cur_link_res_map() - take a snapshot of current link resource allocation state
+ * @dc: pointer to dc of the dm calling this
+ * @map: a dc link resource snapshot defined internally to dc.
+ *
+ * DM needs to capture a snapshot of current link resource allocation mapping
+ * and store it in its persistent storage.
+ *
+ * Some of the link resource is using first come first serve policy.
+ * The allocation mapping depends on original hotplug order. This information
+ * is lost after driver is loaded next time. The snapshot is used in order to
+ * restore link resource to its previous state so user will get consistent
+ * link capability allocation across reboot.
+ *
+ * Return: none (void function)
+ *
+ */
+void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map)
+{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct dc_link *link;
+ uint8_t i;
+ uint32_t hpo_dp_recycle_map = 0;
+
+ *map = 0;
+
+ if (dc->caps.dp_hpo) {
+ for (i = 0; i < dc->caps.max_links; i++) {
+ link = dc->links[i];
+ if (link->link_status.link_active &&
+ dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING &&
+ dp_get_link_encoding_format(&link->cur_link_settings) != DP_128b_132b_ENCODING)
+ /* hpo dp link encoder is considered as recycled, when RX reports 128b/132b encoding capability
+ * but current link doesn't use it.
+ */
+ hpo_dp_recycle_map |= (1 << i);
+ }
+ *map |= (hpo_dp_recycle_map << LINK_RES_HPO_DP_REC_MAP__SHIFT);
+ }
+#endif
+}
+
+/**
+ * dc_restore_link_res_map() - restore link resource allocation state from a snapshot
+ * @dc: pointer to dc of the dm calling this
+ * @map: a dc link resource snapshot defined internally to dc.
+ *
+ * DM needs to call this function after initial link detection on boot and
+ * before first commit streams to restore link resource allocation state
+ * from previous boot session.
+ *
+ * Some of the link resource is using first come first serve policy.
+ * The allocation mapping depends on original hotplug order. This information
+ * is lost after driver is loaded next time. The snapshot is used in order to
+ * restore link resource to its previous state so user will get consistent
+ * link capability allocation across reboot.
+ *
+ * Return: none (void function)
+ *
+ */
+void dc_restore_link_res_map(const struct dc *dc, uint32_t *map)
+{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct dc_link *link;
+ uint8_t i;
+ unsigned int available_hpo_dp_count;
+ uint32_t hpo_dp_recycle_map = (*map & LINK_RES_HPO_DP_REC_MAP__MASK)
+ >> LINK_RES_HPO_DP_REC_MAP__SHIFT;
+
+ if (dc->caps.dp_hpo) {
+ available_hpo_dp_count = dc->res_pool->hpo_dp_link_enc_count;
+ /* remove excess 128b/132b encoding support for not recycled links */
+ for (i = 0; i < dc->caps.max_links; i++) {
+ if ((hpo_dp_recycle_map & (1 << i)) == 0) {
+ link = dc->links[i];
+ if (link->type != dc_connection_none &&
+ dp_get_link_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) {
+ if (available_hpo_dp_count > 0)
+ available_hpo_dp_count--;
+ else
+ /* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */
+ link->verified_link_cap.link_rate = LINK_RATE_HIGH3;
+ }
+ }
+ }
+ /* remove excess 128b/132b encoding support for recycled links */
+ for (i = 0; i < dc->caps.max_links; i++) {
+ if ((hpo_dp_recycle_map & (1 << i)) != 0) {
+ link = dc->links[i];
+ if (link->type != dc_connection_none &&
+ dp_get_link_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) {
+ if (available_hpo_dp_count > 0)
+ available_hpo_dp_count--;
+ else
+ /* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */
+ link->verified_link_cap.link_rate = LINK_RATE_HIGH3;
+ }
+ }
+ }
+ }
+#endif
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 60539b1f2a80..24dc662ec3e4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -626,7 +626,7 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
do {
struct aux_payload current_payload;
bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >=
- payload->length ? true : false;
+ payload->length;
uint32_t payload_length = is_end_of_payload ?
payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 13bc69d6b679..4c3ab2575e4b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -100,6 +100,7 @@ static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = {
#endif
static bool decide_fallback_link_setting(
+ struct dc_link *link,
struct dc_link_settings initial_link_settings,
struct dc_link_settings *current_link_setting,
enum link_training_result training_result);
@@ -201,7 +202,7 @@ void dp_wait_for_training_aux_rd_interval(
uint32_t wait_in_micro_secs)
{
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (wait_in_micro_secs > 16000)
+ if (wait_in_micro_secs > 1000)
msleep(wait_in_micro_secs/1000);
else
udelay(wait_in_micro_secs);
@@ -398,6 +399,223 @@ static uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings)
}
#endif
+static void vendor_specific_lttpr_wa_one_start(struct dc_link *link)
+{
+ const uint8_t vendor_lttpr_write_data[4] = {0x1, 0x50, 0x63, 0xff};
+ const uint8_t offset = dp_convert_to_count(
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ uint32_t vendor_lttpr_write_address = 0xF004F;
+
+ if (offset != 0xFF)
+ vendor_lttpr_write_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+
+ /* W/A for certain LTTPR to reset their lane settings, part one of two */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data[0],
+ sizeof(vendor_lttpr_write_data));
+}
+
+static void vendor_specific_lttpr_wa_one_end(
+ struct dc_link *link,
+ uint8_t retry_count)
+{
+ const uint8_t vendor_lttpr_write_data[4] = {0x1, 0x50, 0x63, 0x0};
+ const uint8_t offset = dp_convert_to_count(
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ uint32_t vendor_lttpr_write_address = 0xF004F;
+
+ if (!retry_count) {
+ if (offset != 0xFF)
+ vendor_lttpr_write_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+
+ /* W/A for certain LTTPR to reset their lane settings, part two of two */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data[0],
+ sizeof(vendor_lttpr_write_data));
+ }
+}
+
+static void vendor_specific_lttpr_wa_one_two(
+ struct dc_link *link,
+ const uint8_t rate)
+{
+ if (link->apply_vendor_specific_lttpr_link_rate_wa) {
+ uint8_t toggle_rate = 0x0;
+
+ if (rate == 0x6)
+ toggle_rate = 0xA;
+ else
+ toggle_rate = 0x6;
+
+ if (link->vendor_specific_lttpr_link_rate_wa == rate) {
+ /* W/A for certain LTTPR to reset internal state for link training */
+ core_link_write_dpcd(
+ link,
+ DP_LINK_BW_SET,
+ &toggle_rate,
+ 1);
+ }
+
+ /* Store the last attempted link rate for this link */
+ link->vendor_specific_lttpr_link_rate_wa = rate;
+ }
+}
+
+static void vendor_specific_lttpr_wa_three(
+ struct dc_link *link,
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX])
+{
+ const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63};
+ const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63};
+ const uint8_t offset = dp_convert_to_count(
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ uint32_t vendor_lttpr_write_address = 0xF004F;
+ uint32_t vendor_lttpr_read_address = 0xF0053;
+ uint8_t dprx_vs = 0;
+ uint8_t dprx_pe = 0;
+ uint8_t lane;
+
+ if (offset != 0xFF) {
+ vendor_lttpr_write_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ vendor_lttpr_read_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ }
+
+ /* W/A to read lane settings requested by DPRX */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_read_dpcd(
+ link,
+ vendor_lttpr_read_address,
+ &dprx_vs,
+ 1);
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+ core_link_read_dpcd(
+ link,
+ vendor_lttpr_read_address,
+ &dprx_pe,
+ 1);
+
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE = (dprx_vs >> (2 * lane)) & 0x3;
+ dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE = (dprx_pe >> (2 * lane)) & 0x3;
+ }
+}
+
+static void vendor_specific_lttpr_wa_three_dpcd(
+ struct dc_link *link,
+ union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX])
+{
+ union lane_adjust lane_adjust[LANE_COUNT_DP_MAX];
+ uint8_t lane = 0;
+
+ vendor_specific_lttpr_wa_three(link, lane_adjust);
+
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = lane_adjust[lane].bits.VOLTAGE_SWING_LANE;
+ dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET = lane_adjust[lane].bits.PRE_EMPHASIS_LANE;
+ }
+}
+
+static void vendor_specific_lttpr_wa_four(
+ struct dc_link *link,
+ bool apply_wa)
+{
+ const uint8_t vendor_lttpr_write_data_one[4] = {0x1, 0x55, 0x63, 0x8};
+ const uint8_t vendor_lttpr_write_data_two[4] = {0x1, 0x55, 0x63, 0x0};
+ const uint8_t offset = dp_convert_to_count(
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ uint32_t vendor_lttpr_write_address = 0xF004F;
+#if defined(CONFIG_DRM_AMD_DC_DP2_0)
+ uint8_t sink_status = 0;
+ uint8_t i;
+#endif
+
+ if (offset != 0xFF)
+ vendor_lttpr_write_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+
+ /* W/A to pass through DPCD write of TPS=0 to DPRX */
+ if (apply_wa) {
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_one[0],
+ sizeof(vendor_lttpr_write_data_one));
+ }
+
+ /* clear training pattern set */
+ dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
+
+ if (apply_wa) {
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_two[0],
+ sizeof(vendor_lttpr_write_data_two));
+ }
+
+#if defined(CONFIG_DRM_AMD_DC_DP2_0)
+ /* poll for intra-hop disable */
+ for (i = 0; i < 10; i++) {
+ if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
+ (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
+ break;
+ udelay(1000);
+ }
+#endif
+}
+
+static void vendor_specific_lttpr_wa_five(
+ struct dc_link *link,
+ const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX],
+ uint8_t lane_count)
+{
+ const uint32_t vendor_lttpr_write_address = 0xF004F;
+ const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF};
+ uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
+ uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
+ uint8_t lane = 0;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ vendor_lttpr_write_data_vs[3] |=
+ dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
+ vendor_lttpr_write_data_pe[3] |=
+ dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
+ }
+
+ /* Force LTTPR to output desired VS and PE */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_reset[0],
+ sizeof(vendor_lttpr_write_data_reset));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+}
+
enum dc_status dpcd_set_link_settings(
struct dc_link *link,
const struct link_training_settings *lt_settings)
@@ -430,7 +648,7 @@ enum dc_status dpcd_set_link_settings(
status = core_link_write_dpcd(link, DP_LANE_COUNT_SET,
&lane_count_set.raw, 1);
- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
lt_settings->link_settings.use_link_rate_set == true) {
rate = 0;
/* WA for some MUX chips that will power down with eDP and lose supported
@@ -452,6 +670,15 @@ enum dc_status dpcd_set_link_settings(
#else
rate = (uint8_t) (lt_settings->link_settings.link_rate);
#endif
+ if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
+ (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+ vendor_specific_lttpr_wa_one_start(link);
+
+ if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
+ (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN))
+ vendor_specific_lttpr_wa_one_two(link, rate);
+
status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
}
@@ -1024,6 +1251,7 @@ bool dp_is_max_vs_reached(
static bool perform_post_lt_adj_req_sequence(
struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings)
{
enum dc_lane_count lane_count =
@@ -1087,6 +1315,7 @@ static bool perform_post_lt_adj_req_sequence(
lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
dc_link_dp_set_drive_settings(link,
+ link_res,
lt_settings);
break;
}
@@ -1161,6 +1390,7 @@ enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count,
static enum link_training_result perform_channel_equalization_sequence(
struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings,
uint32_t offset)
{
@@ -1183,12 +1413,12 @@ static enum link_training_result perform_channel_equalization_sequence(
tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
#endif
- dp_set_hw_training_pattern(link, tr_pattern, offset);
+ dp_set_hw_training_pattern(link, link_res, tr_pattern, offset);
for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
retries_ch_eq++) {
- dp_set_hw_lane_settings(link, lt_settings, offset);
+ dp_set_hw_lane_settings(link, link_res, lt_settings, offset);
/* 2. update DPCD*/
if (!retries_ch_eq)
@@ -1211,6 +1441,12 @@ static enum link_training_result perform_channel_equalization_sequence(
dp_translate_training_aux_read_interval(
link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
+ if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
+ (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ wait_time_microsec = 16000;
+ }
+
dp_wait_for_training_aux_rd_interval(
link,
wait_time_microsec);
@@ -1246,18 +1482,20 @@ static enum link_training_result perform_channel_equalization_sequence(
}
static void start_clock_recovery_pattern_early(struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings,
uint32_t offset)
{
DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n",
__func__);
- dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, offset);
- dp_set_hw_lane_settings(link, lt_settings, offset);
+ dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset);
+ dp_set_hw_lane_settings(link, link_res, lt_settings, offset);
udelay(400);
}
static enum link_training_result perform_clock_recovery_sequence(
struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings,
uint32_t offset)
{
@@ -1273,7 +1511,7 @@ static enum link_training_result perform_clock_recovery_sequence(
retry_count = 0;
if (!link->ctx->dc->work_arounds.lt_early_cr_pattern)
- dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, offset);
+ dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset);
/* najeeb - The synaptics MST hub can put the LT in
* infinite loop by switching the VS
@@ -1290,6 +1528,7 @@ static enum link_training_result perform_clock_recovery_sequence(
/* 1. call HWSS to set lane settings*/
dp_set_hw_lane_settings(
link,
+ link_res,
lt_settings,
offset);
@@ -1311,8 +1550,10 @@ static enum link_training_result perform_clock_recovery_sequence(
/* 3. wait receiver to lock-on*/
wait_time_microsec = lt_settings->cr_pattern_time;
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
- wait_time_microsec = TRAINING_AUX_RD_INTERVAL;
+ if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
+ (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) {
+ wait_time_microsec = 16000;
+ }
dp_wait_for_training_aux_rd_interval(
link,
@@ -1329,6 +1570,13 @@ static enum link_training_result perform_clock_recovery_sequence(
dpcd_lane_adjust,
offset);
+ if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
+ (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ vendor_specific_lttpr_wa_one_end(link, retry_count);
+ vendor_specific_lttpr_wa_three(link, dpcd_lane_adjust);
+ }
+
/* 5. check CR done*/
if (dp_is_cr_done(lane_count, dpcd_lane_status))
return LINK_TRAINING_SUCCESS;
@@ -1379,13 +1627,14 @@ static enum link_training_result perform_clock_recovery_sequence(
static inline enum link_training_result dp_transition_to_video_idle(
struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings,
enum link_training_result status)
{
union lane_count_set lane_count_set = {0};
/* 4. mainlink output idle pattern*/
- dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+ dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
/*
* 5. post training adjust if required
@@ -1409,7 +1658,7 @@ static inline enum link_training_result dp_transition_to_video_idle(
}
if (status == LINK_TRAINING_SUCCESS &&
- perform_post_lt_adj_req_sequence(link, lt_settings) == false)
+ perform_post_lt_adj_req_sequence(link, link_res, lt_settings) == false)
status = LINK_TRAINING_LQA_FAIL;
lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count;
@@ -1852,10 +2101,11 @@ static void print_status_message(
void dc_link_dp_set_drive_settings(
struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings)
{
/* program ASIC PHY settings*/
- dp_set_hw_lane_settings(link, lt_settings, DPRX);
+ dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
dp_hw_to_dpcd_lane_settings(lt_settings,
lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
@@ -1866,6 +2116,7 @@ void dc_link_dp_set_drive_settings(
bool dc_link_dp_perform_link_training_skip_aux(
struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_setting)
{
struct link_training_settings lt_settings = {0};
@@ -1882,10 +2133,10 @@ bool dc_link_dp_perform_link_training_skip_aux(
/* 1. Perform_clock_recovery_sequence. */
/* transmit training pattern for clock recovery */
- dp_set_hw_training_pattern(link, lt_settings.pattern_for_cr, DPRX);
+ dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_cr, DPRX);
/* call HWSS to set lane settings*/
- dp_set_hw_lane_settings(link, &lt_settings, DPRX);
+ dp_set_hw_lane_settings(link, link_res, &lt_settings, DPRX);
/* wait receiver to lock-on*/
dp_wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time);
@@ -1893,10 +2144,10 @@ bool dc_link_dp_perform_link_training_skip_aux(
/* 2. Perform_channel_equalization_sequence. */
/* transmit training pattern for channel equalization. */
- dp_set_hw_training_pattern(link, lt_settings.pattern_for_eq, DPRX);
+ dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_eq, DPRX);
/* call HWSS to set lane settings*/
- dp_set_hw_lane_settings(link, &lt_settings, DPRX);
+ dp_set_hw_lane_settings(link, link_res, &lt_settings, DPRX);
/* wait receiver to lock-on. */
dp_wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time);
@@ -1904,7 +2155,7 @@ bool dc_link_dp_perform_link_training_skip_aux(
/* 3. Perform_link_training_int. */
/* Mainlink output idle pattern. */
- dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+ dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
print_status_message(link, &lt_settings, LINK_TRAINING_SUCCESS);
@@ -1985,6 +2236,7 @@ static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link,
static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings)
{
uint8_t loop_count;
@@ -1996,7 +2248,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
/* Transmit 128b/132b_TPS1 over Main-Link */
- dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, DPRX);
+ dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, DPRX);
/* Set TRAINING_PATTERN_SET to 01h */
dpcd_set_training_pattern(link, lt_settings->pattern_for_cr);
@@ -2006,8 +2258,8 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
&dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
- dp_set_hw_lane_settings(link, lt_settings, DPRX);
- dp_set_hw_training_pattern(link, lt_settings->pattern_for_eq, DPRX);
+ dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
+ dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_eq, DPRX);
/* Set loop counter to start from 1 */
loop_count = 1;
@@ -2034,7 +2286,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
} else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
status = DP_128b_132b_LT_FAILED;
} else {
- dp_set_hw_lane_settings(link, lt_settings, DPRX);
+ dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
dpcd_set_lane_settings(link, lt_settings, DPRX);
}
loop_count++;
@@ -2063,6 +2315,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
static enum link_training_result dp_perform_128b_132b_cds_done_sequence(
struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings)
{
/* Assumption: assume hardware has transmitted eq pattern */
@@ -2099,6 +2352,7 @@ static enum link_training_result dp_perform_128b_132b_cds_done_sequence(
static enum link_training_result dp_perform_8b_10b_link_training(
struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings)
{
enum link_training_result status = LINK_TRAINING_SUCCESS;
@@ -2108,7 +2362,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(
uint8_t lane = 0;
if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
- start_clock_recovery_pattern_early(link, lt_settings, DPRX);
+ start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX);
/* 1. set link rate, lane count and spread. */
dpcd_set_link_settings(link, lt_settings);
@@ -2122,12 +2376,13 @@ static enum link_training_result dp_perform_8b_10b_link_training(
for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);
repeater_id--) {
- status = perform_clock_recovery_sequence(link, lt_settings, repeater_id);
+ status = perform_clock_recovery_sequence(link, link_res, lt_settings, repeater_id);
if (status != LINK_TRAINING_SUCCESS)
break;
status = perform_channel_equalization_sequence(link,
+ link_res,
lt_settings,
repeater_id);
@@ -2142,9 +2397,10 @@ static enum link_training_result dp_perform_8b_10b_link_training(
}
if (status == LINK_TRAINING_SUCCESS) {
- status = perform_clock_recovery_sequence(link, lt_settings, DPRX);
+ status = perform_clock_recovery_sequence(link, link_res, lt_settings, DPRX);
if (status == LINK_TRAINING_SUCCESS) {
status = perform_channel_equalization_sequence(link,
+ link_res,
lt_settings,
DPRX);
}
@@ -2156,6 +2412,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(
#if defined(CONFIG_DRM_AMD_DC_DCN)
static enum link_training_result dp_perform_128b_132b_link_training(
struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings)
{
enum link_training_result result = LINK_TRAINING_SUCCESS;
@@ -2167,23 +2424,358 @@ static enum link_training_result dp_perform_128b_132b_link_training(
decide_8b_10b_training_settings(link,
&lt_settings->link_settings,
&legacy_settings);
- return dp_perform_8b_10b_link_training(link, &legacy_settings);
+ return dp_perform_8b_10b_link_training(link, link_res, &legacy_settings);
}
dpcd_set_link_settings(link, lt_settings);
if (result == LINK_TRAINING_SUCCESS)
- result = dp_perform_128b_132b_channel_eq_done_sequence(link, lt_settings);
+ result = dp_perform_128b_132b_channel_eq_done_sequence(link, link_res, lt_settings);
if (result == LINK_TRAINING_SUCCESS)
- result = dp_perform_128b_132b_cds_done_sequence(link, lt_settings);
+ result = dp_perform_128b_132b_cds_done_sequence(link, link_res, lt_settings);
return result;
}
#endif
+static enum link_training_result dc_link_dp_perform_fixed_vs_pe_training_sequence(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings)
+{
+ const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF};
+ const uint8_t offset = dp_convert_to_count(
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0};
+ const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68};
+ uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
+ uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
+ uint32_t vendor_lttpr_write_address = 0xF004F;
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+ uint8_t lane = 0;
+ union down_spread_ctrl downspread = {0};
+ union lane_count_set lane_count_set = {0};
+ uint8_t toggle_rate;
+ uint8_t rate;
+
+ /* Only 8b/10b is supported */
+ ASSERT(dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING);
+
+ if (offset != 0xFF) {
+ vendor_lttpr_write_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ }
+
+ /* Vendor specific: Reset lane settings */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_reset[0],
+ sizeof(vendor_lttpr_write_data_reset));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+
+ /* Vendor specific: Enable intercept */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_intercept_en[0],
+ sizeof(vendor_lttpr_write_data_intercept_en));
+
+ /* 1. set link rate, lane count and spread. */
+
+ downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread);
+
+ lane_count_set.bits.LANE_COUNT_SET =
+ lt_settings->link_settings.lane_count;
+
+ lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
+
+
+ if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
+ link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
+ }
+
+ core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &downspread.raw, sizeof(downspread));
+
+ core_link_write_dpcd(link, DP_LANE_COUNT_SET,
+ &lane_count_set.raw, 1);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ rate = get_dpcd_link_rate(&lt_settings->link_settings);
+#else
+ rate = (uint8_t) (lt_settings->link_settings.link_rate);
+#endif
+
+ /* Vendor specific: Toggle link rate */
+ toggle_rate = (rate == 0x6) ? 0xA : 0x6;
+
+ if (link->vendor_specific_lttpr_link_rate_wa == rate) {
+ core_link_write_dpcd(
+ link,
+ DP_LINK_BW_SET,
+ &toggle_rate,
+ 1);
+ }
+
+ link->vendor_specific_lttpr_link_rate_wa = rate;
+
+ core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
+
+ DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n",
+ __func__,
+ DP_LINK_BW_SET,
+ lt_settings->link_settings.link_rate,
+ DP_LANE_COUNT_SET,
+ lt_settings->link_settings.lane_count,
+ lt_settings->enhanced_framing,
+ DP_DOWNSPREAD_CTRL,
+ lt_settings->link_settings.link_spread);
+
+ /* 2. Perform link training */
+
+ /* Perform Clock Recovery Sequence */
+ if (status == LINK_TRAINING_SUCCESS) {
+ uint32_t retries_cr;
+ uint32_t retry_count;
+ uint32_t wait_time_microsec;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+ union lane_align_status_updated dpcd_lane_status_updated;
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+
+ retries_cr = 0;
+ retry_count = 0;
+
+ while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
+ (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+
+ memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
+ memset(&dpcd_lane_status_updated, '\0',
+ sizeof(dpcd_lane_status_updated));
+
+ /* 1. call HWSS to set lane settings */
+ dp_set_hw_lane_settings(
+ link,
+ link_res,
+ lt_settings,
+ 0);
+
+ /* 2. update DPCD of the receiver */
+ if (!retry_count) {
+ /* EPR #361076 - write as a 5-byte burst,
+ * but only for the 1-st iteration.
+ */
+ dpcd_set_lt_pattern_and_lane_settings(
+ link,
+ lt_settings,
+ lt_settings->pattern_for_cr,
+ 0);
+ /* Vendor specific: Disable intercept */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_intercept_dis[0],
+ sizeof(vendor_lttpr_write_data_intercept_dis));
+ } else {
+ vendor_lttpr_write_data_vs[3] = 0;
+ vendor_lttpr_write_data_pe[3] = 0;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ vendor_lttpr_write_data_vs[3] |=
+ lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
+ vendor_lttpr_write_data_pe[3] |=
+ lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
+ }
+
+ /* Vendor specific: Update VS and PE to DPRX requested value */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+
+ dpcd_set_lane_settings(
+ link,
+ lt_settings,
+ 0);
+ }
+
+ /* 3. wait receiver to lock-on*/
+ wait_time_microsec = lt_settings->cr_pattern_time;
+
+ dp_wait_for_training_aux_rd_interval(
+ link,
+ wait_time_microsec);
+
+ /* 4. Read lane status and requested drive
+ * settings as set by the sink
+ */
+ dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ 0);
+
+ /* 5. check CR done*/
+ if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
+ status = LINK_TRAINING_SUCCESS;
+ break;
+ }
+
+ /* 6. max VS reached*/
+ if (dp_is_max_vs_reached(lt_settings))
+ break;
+
+ /* 7. same lane settings */
+ /* Note: settings are the same for all lanes,
+ * so comparing first lane is sufficient
+ */
+ if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
+ dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
+ retries_cr++;
+ else
+ retries_cr = 0;
+
+ /* 8. update VS/PE/PC2 in lt_settings*/
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ retry_count++;
+ }
+
+ if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
+ ASSERT(0);
+ DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue",
+ __func__,
+ LINK_TRAINING_MAX_CR_RETRY);
+
+ }
+
+ status = dp_get_cr_failure(lane_count, dpcd_lane_status);
+ }
+
+ /* Perform Channel EQ Sequence */
+ if (status == LINK_TRAINING_SUCCESS) {
+ enum dc_dp_training_pattern tr_pattern;
+ uint32_t retries_ch_eq;
+ uint32_t wait_time_microsec;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+
+ /* Note: also check that TPS4 is a supported feature*/
+ tr_pattern = lt_settings->pattern_for_eq;
+
+ dp_set_hw_training_pattern(link, link_res, tr_pattern, 0);
+
+ status = LINK_TRAINING_EQ_FAIL_EQ;
+
+ for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
+ retries_ch_eq++) {
+
+ dp_set_hw_lane_settings(link, link_res, lt_settings, 0);
+
+ vendor_lttpr_write_data_vs[3] = 0;
+ vendor_lttpr_write_data_pe[3] = 0;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ vendor_lttpr_write_data_vs[3] |=
+ lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
+ vendor_lttpr_write_data_pe[3] |=
+ lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
+ }
+
+ /* Vendor specific: Update VS and PE to DPRX requested value */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+
+ /* 2. update DPCD*/
+ if (!retries_ch_eq)
+ /* EPR #361076 - write as a 5-byte burst,
+ * but only for the 1-st iteration
+ */
+
+ dpcd_set_lt_pattern_and_lane_settings(
+ link,
+ lt_settings,
+ tr_pattern, 0);
+ else
+ dpcd_set_lane_settings(link, lt_settings, 0);
+
+ /* 3. wait for receiver to lock-on*/
+ wait_time_microsec = lt_settings->eq_pattern_time;
+
+ dp_wait_for_training_aux_rd_interval(
+ link,
+ wait_time_microsec);
+
+ /* 4. Read lane status and requested
+ * drive settings as set by the sink
+ */
+ dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ 0);
+
+ /* 5. check CR done*/
+ if (!dp_is_cr_done(lane_count, dpcd_lane_status)) {
+ status = LINK_TRAINING_EQ_FAIL_CR;
+ break;
+ }
+
+ /* 6. check CHEQ done*/
+ if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
+ dp_is_symbol_locked(lane_count, dpcd_lane_status) &&
+ dp_is_interlane_aligned(dpcd_lane_status_updated)) {
+ status = LINK_TRAINING_SUCCESS;
+ break;
+ }
+
+ /* 7. update VS/PE/PC2 in lt_settings*/
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ }
+ }
+
+ return status;
+}
+
+
enum link_training_result dc_link_dp_perform_link_training(
struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_settings,
bool skip_video_pattern)
{
@@ -2203,30 +2795,51 @@ enum link_training_result dc_link_dp_perform_link_training(
&lt_settings);
/* reset previous training states */
- dpcd_exit_training_mode(link);
+ if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
+ (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ link->apply_vendor_specific_lttpr_link_rate_wa = true;
+ vendor_specific_lttpr_wa_four(link, true);
+ } else {
+ dpcd_exit_training_mode(link);
+ }
/* configure link prior to entering training mode */
dpcd_configure_lttpr_mode(link, &lt_settings);
- dp_set_fec_ready(link, lt_settings.should_set_fec_ready);
+ dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready);
dpcd_configure_channel_coding(link, &lt_settings);
/* enter training mode:
* Per DP specs starting from here, DPTX device shall not issue
* Non-LT AUX transactions inside training mode.
*/
- if (encoding == DP_8b_10b_ENCODING)
- status = dp_perform_8b_10b_link_training(link, &lt_settings);
+ if (!link->dc->debug.apply_vendor_specific_lttpr_wa &&
+ (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+ status = dc_link_dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
+ else if (encoding == DP_8b_10b_ENCODING)
+ status = dp_perform_8b_10b_link_training(link, link_res, &lt_settings);
#if defined(CONFIG_DRM_AMD_DC_DCN)
else if (encoding == DP_128b_132b_ENCODING)
- status = dp_perform_128b_132b_link_training(link, &lt_settings);
+ status = dp_perform_128b_132b_link_training(link, link_res, &lt_settings);
#endif
else
ASSERT(0);
- /* exit training mode and switch to video idle */
- dpcd_exit_training_mode(link);
+ /* exit training mode */
+ if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
+ (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ link->apply_vendor_specific_lttpr_link_rate_wa = false;
+ vendor_specific_lttpr_wa_four(link, (status != LINK_TRAINING_SUCCESS));
+ } else {
+ dpcd_exit_training_mode(link);
+ }
+
+ /* switch to video idle */
if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern)
status = dp_transition_to_video_idle(link,
+ link_res,
&lt_settings,
status);
@@ -2278,6 +2891,7 @@ bool perform_link_training_with_retries(
dp_enable_link_phy(
link,
+ &pipe_ctx->link_res,
signal,
pipe_ctx->clock_source->id,
&current_setting);
@@ -2305,23 +2919,24 @@ bool perform_link_training_with_retries(
dp_set_panel_mode(link, panel_mode);
if (link->aux_access_disabled) {
- dc_link_dp_perform_link_training_skip_aux(link, &current_setting);
+ dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &current_setting);
return true;
} else {
/** @todo Consolidate USB4 DP and DPx.x training. */
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
status = dc_link_dpia_perform_link_training(link,
- &current_setting,
- skip_video_pattern);
+ &pipe_ctx->link_res,
+ &current_setting,
+ skip_video_pattern);
/* Transmit idle pattern once training successful. */
if (status == LINK_TRAINING_SUCCESS)
- dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE,
- NULL, 0);
+ dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
} else {
status = dc_link_dp_perform_link_training(link,
- &current_setting,
- skip_video_pattern);
+ &pipe_ctx->link_res,
+ &current_setting,
+ skip_video_pattern);
}
if (status == LINK_TRAINING_SUCCESS)
@@ -2336,7 +2951,7 @@ bool perform_link_training_with_retries(
DC_LOG_WARNING("%s: Link training attempt %u of %d failed\n",
__func__, (unsigned int)j + 1, attempts);
- dp_disable_link_phy(link, signal);
+ dp_disable_link_phy(link, &pipe_ctx->link_res, signal);
/* Abort link training if failure due to sink being unplugged. */
if (status == LINK_TRAINING_ABORT) {
@@ -2349,7 +2964,7 @@ bool perform_link_training_with_retries(
uint32_t req_bw;
uint32_t link_bw;
- decide_fallback_link_setting(*link_setting, &current_setting, status);
+ decide_fallback_link_setting(link, *link_setting, &current_setting, status);
/* Fail link training if reduced link bandwidth no longer meets
* stream requirements.
*/
@@ -2385,12 +3000,13 @@ static enum clock_source_id get_clock_source_id(struct dc_link *link)
return dp_cs_id;
}
-static void set_dp_mst_mode(struct dc_link *link, bool mst_enable)
+static void set_dp_mst_mode(struct dc_link *link, const struct link_resource *link_res,
+ bool mst_enable)
{
if (mst_enable == false &&
link->type == dc_connection_mst_branch) {
/* Disable MST on link. Use only local sink. */
- dp_disable_link_phy_mst(link, link->connector_signal);
+ dp_disable_link_phy_mst(link, link_res, link->connector_signal);
link->type = dc_connection_single;
link->local_sink = link->remote_sinks[0];
@@ -2401,7 +3017,7 @@ static void set_dp_mst_mode(struct dc_link *link, bool mst_enable)
link->type == dc_connection_single &&
link->remote_sinks[0] != NULL) {
/* Re-enable MST on link. */
- dp_disable_link_phy(link, link->connector_signal);
+ dp_disable_link_phy(link, link_res, link->connector_signal);
dp_enable_mst_on_sink(link, true);
link->type = dc_connection_mst_branch;
@@ -2427,6 +3043,7 @@ bool dc_link_dp_sync_lt_begin(struct dc_link *link)
enum link_training_result dc_link_dp_sync_lt_attempt(
struct dc_link *link,
+ const struct link_resource *link_res,
struct dc_link_settings *link_settings,
struct dc_link_training_overrides *lt_overrides)
{
@@ -2446,14 +3063,14 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
&lt_settings);
/* Setup MST Mode */
if (lt_overrides->mst_enable)
- set_dp_mst_mode(link, *lt_overrides->mst_enable);
+ set_dp_mst_mode(link, link_res, *lt_overrides->mst_enable);
/* Disable link */
- dp_disable_link_phy(link, link->connector_signal);
+ dp_disable_link_phy(link, link_res, link->connector_signal);
/* Enable link */
dp_cs_id = get_clock_source_id(link);
- dp_enable_link_phy(link, link->connector_signal,
+ dp_enable_link_phy(link, link_res, link->connector_signal,
dp_cs_id, link_settings);
/* Set FEC enable */
@@ -2461,7 +3078,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
#endif
fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable;
- dp_set_fec_ready(link, fec_enable);
+ dp_set_fec_ready(link, NULL, fec_enable);
#if defined(CONFIG_DRM_AMD_DC_DCN)
}
#endif
@@ -2478,7 +3095,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
/* Attempt to train with given link training settings */
if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
- start_clock_recovery_pattern_early(link, &lt_settings, DPRX);
+ start_clock_recovery_pattern_early(link, link_res, &lt_settings, DPRX);
/* Set link rate, lane count and spread. */
dpcd_set_link_settings(link, &lt_settings);
@@ -2486,9 +3103,10 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
/* 2. perform link training (set link training done
* to false is done as well)
*/
- lt_status = perform_clock_recovery_sequence(link, &lt_settings, DPRX);
+ lt_status = perform_clock_recovery_sequence(link, link_res, &lt_settings, DPRX);
if (lt_status == LINK_TRAINING_SUCCESS) {
lt_status = perform_channel_equalization_sequence(link,
+ link_res,
&lt_settings,
DPRX);
}
@@ -2509,11 +3127,11 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down)
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dc_link_settings link_settings = link->cur_link_settings;
#endif
- dp_disable_link_phy(link, link->connector_signal);
+ dp_disable_link_phy(link, NULL, link->connector_signal);
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING)
#endif
- dp_set_fec_ready(link, false);
+ dp_set_fec_ready(link, NULL, false);
}
link->sync_lt_in_progress = false;
@@ -2568,7 +3186,8 @@ bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_
return false;
}
-static struct dc_link_settings get_max_link_cap(struct dc_link *link)
+static struct dc_link_settings get_max_link_cap(struct dc_link *link,
+ const struct link_resource *link_res)
{
struct dc_link_settings max_link_cap = {0};
#if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -2592,9 +3211,11 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
if (link_enc)
link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap);
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (max_link_cap.link_rate >= LINK_RATE_UHBR10 &&
- !link->hpo_dp_link_enc)
- max_link_cap.link_rate = LINK_RATE_HIGH3;
+ if (max_link_cap.link_rate >= LINK_RATE_UHBR10) {
+ if (!link_res->hpo_dp_link_enc ||
+ link->dc->debug.disable_uhbr)
+ max_link_cap.link_rate = LINK_RATE_HIGH3;
+ }
#endif
/* Lower link settings based on sink's link cap */
@@ -2612,7 +3233,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
* account for lttpr repeaters cap
* notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
*/
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ if (link->lttpr_mode != LTTPR_MODE_NON_LTTPR) {
if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
@@ -2751,6 +3372,7 @@ bool hpd_rx_irq_check_link_loss_status(
bool dp_verify_link_cap(
struct dc_link *link,
+ const struct link_resource *link_res,
struct dc_link_settings *known_limit_link_setting,
int *fail_count)
{
@@ -2768,7 +3390,7 @@ bool dp_verify_link_cap(
/* link training starts with the maximum common settings
* supported by both sink and ASIC.
*/
- max_link_cap = get_max_link_cap(link);
+ max_link_cap = get_max_link_cap(link, link_res);
initial_link_settings = get_common_supported_link_settings(
*known_limit_link_setting,
max_link_cap);
@@ -2808,7 +3430,7 @@ bool dp_verify_link_cap(
* find the physical link capability
*/
/* disable PHY done possible by BIOS, will be done by driver itself */
- dp_disable_link_phy(link, link->connector_signal);
+ dp_disable_link_phy(link, link_res, link->connector_signal);
dp_cs_id = get_clock_source_id(link);
@@ -2820,8 +3442,8 @@ bool dp_verify_link_cap(
*/
if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C &&
link->dc->debug.usbc_combo_phy_reset_wa) {
- dp_enable_link_phy(link, link->connector_signal, dp_cs_id, cur);
- dp_disable_link_phy(link, link->connector_signal);
+ dp_enable_link_phy(link, link_res, link->connector_signal, dp_cs_id, cur);
+ dp_disable_link_phy(link, link_res, link->connector_signal);
}
do {
@@ -2832,6 +3454,7 @@ bool dp_verify_link_cap(
dp_enable_link_phy(
link,
+ link_res,
link->connector_signal,
dp_cs_id,
cur);
@@ -2842,6 +3465,7 @@ bool dp_verify_link_cap(
else {
status = dc_link_dp_perform_link_training(
link,
+ link_res,
cur,
skip_video_pattern);
if (status == LINK_TRAINING_SUCCESS)
@@ -2863,8 +3487,8 @@ bool dp_verify_link_cap(
* setting or before returning we'll enable it later
* based on the actual mode we're driving
*/
- dp_disable_link_phy(link, link->connector_signal);
- } while (!success && decide_fallback_link_setting(
+ dp_disable_link_phy(link, link_res, link->connector_signal);
+ } while (!success && decide_fallback_link_setting(link,
initial_link_settings, cur, status));
/* Link Training failed for all Link Settings
@@ -2887,6 +3511,7 @@ bool dp_verify_link_cap(
bool dp_verify_link_cap_with_retries(
struct dc_link *link,
+ const struct link_resource *link_res,
struct dc_link_settings *known_limit_link_setting,
int attempts)
{
@@ -2904,7 +3529,7 @@ bool dp_verify_link_cap_with_retries(
link->verified_link_cap.link_rate = LINK_RATE_LOW;
link->verified_link_cap.link_spread = LINK_SPREAD_DISABLED;
break;
- } else if (dp_verify_link_cap(link,
+ } else if (dp_verify_link_cap(link, link_res,
known_limit_link_setting,
&fail_count) && fail_count == 0) {
success = true;
@@ -2916,13 +3541,13 @@ bool dp_verify_link_cap_with_retries(
}
bool dp_verify_mst_link_cap(
- struct dc_link *link)
+ struct dc_link *link, const struct link_resource *link_res)
{
struct dc_link_settings max_link_cap = {0};
if (dp_get_link_encoding_format(&link->reported_link_cap) ==
DP_8b_10b_ENCODING) {
- max_link_cap = get_max_link_cap(link);
+ max_link_cap = get_max_link_cap(link, link_res);
link->verified_link_cap = get_common_supported_link_settings(
link->reported_link_cap,
max_link_cap);
@@ -2931,6 +3556,7 @@ bool dp_verify_mst_link_cap(
else if (dp_get_link_encoding_format(&link->reported_link_cap) ==
DP_128b_132b_ENCODING) {
dp_verify_link_cap_with_retries(link,
+ link_res,
&link->reported_link_cap,
LINK_TRAINING_MAX_VERIFY_RETRY);
}
@@ -3116,6 +3742,7 @@ static bool decide_fallback_link_setting_max_bw_policy(
* and no further fallback could be done
*/
static bool decide_fallback_link_setting(
+ struct dc_link *link,
struct dc_link_settings initial_link_settings,
struct dc_link_settings *current_link_setting,
enum link_training_result training_result)
@@ -3123,7 +3750,8 @@ static bool decide_fallback_link_setting(
if (!current_link_setting)
return false;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING)
+ if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING ||
+ link->dc->debug.force_dp2_lt_fallback_method)
return decide_fallback_link_setting_max_bw_policy(&initial_link_settings,
current_link_setting);
#endif
@@ -3346,6 +3974,148 @@ bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *lin
return false;
}
+static bool decide_edp_link_settings_with_dsc(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint32_t req_bw,
+ enum dc_link_rate max_link_rate)
+{
+ struct dc_link_settings initial_link_setting;
+ struct dc_link_settings current_link_setting;
+ uint32_t link_bw;
+
+ unsigned int policy = 0;
+
+ policy = link->ctx->dc->debug.force_dsc_edp_policy;
+ if (max_link_rate == LINK_RATE_UNKNOWN)
+ max_link_rate = link->verified_link_cap.link_rate;
+ /*
+ * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
+ * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
+ */
+ if ((link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 ||
+ link->dpcd_caps.edp_supported_link_rates_count == 0)) {
+ /* for DSC enabled case, we search for minimum lane count */
+ memset(&initial_link_setting, 0, sizeof(initial_link_setting));
+ initial_link_setting.lane_count = LANE_COUNT_ONE;
+ initial_link_setting.link_rate = LINK_RATE_LOW;
+ initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
+ initial_link_setting.use_link_rate_set = false;
+ initial_link_setting.link_rate_set = 0;
+ current_link_setting = initial_link_setting;
+ if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
+ return false;
+
+ /* search for the minimum link setting that:
+ * 1. is supported according to the link training result
+ * 2. could support the b/w requested by the timing
+ */
+ while (current_link_setting.link_rate <=
+ max_link_rate) {
+ link_bw = dc_link_bandwidth_kbps(
+ link,
+ &current_link_setting);
+ if (req_bw <= link_bw) {
+ *link_setting = current_link_setting;
+ return true;
+ }
+ if (policy) {
+ /* minimize lane */
+ if (current_link_setting.link_rate < max_link_rate) {
+ current_link_setting.link_rate =
+ increase_link_rate(
+ current_link_setting.link_rate);
+ } else {
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ current_link_setting.link_rate = initial_link_setting.link_rate;
+ } else
+ break;
+ }
+ } else {
+ /* minimize link rate */
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ } else {
+ current_link_setting.link_rate =
+ increase_link_rate(
+ current_link_setting.link_rate);
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ }
+ }
+ }
+ return false;
+ }
+
+ /* if optimize edp link is supported */
+ memset(&initial_link_setting, 0, sizeof(initial_link_setting));
+ initial_link_setting.lane_count = LANE_COUNT_ONE;
+ initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0];
+ initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
+ initial_link_setting.use_link_rate_set = true;
+ initial_link_setting.link_rate_set = 0;
+ current_link_setting = initial_link_setting;
+
+ /* search for the minimum link setting that:
+ * 1. is supported according to the link training result
+ * 2. could support the b/w requested by the timing
+ */
+ while (current_link_setting.link_rate <=
+ max_link_rate) {
+ link_bw = dc_link_bandwidth_kbps(
+ link,
+ &current_link_setting);
+ if (req_bw <= link_bw) {
+ *link_setting = current_link_setting;
+ return true;
+ }
+ if (policy) {
+ /* minimize lane */
+ if (current_link_setting.link_rate_set <
+ link->dpcd_caps.edp_supported_link_rates_count
+ && current_link_setting.link_rate < max_link_rate) {
+ current_link_setting.link_rate_set++;
+ current_link_setting.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ } else {
+ if (current_link_setting.lane_count < link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ current_link_setting.link_rate_set = initial_link_setting.link_rate_set;
+ current_link_setting.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ } else
+ break;
+ }
+ } else {
+ /* minimize link rate */
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ } else {
+ if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ current_link_setting.link_rate_set++;
+ current_link_setting.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ } else
+ break;
+ }
+ }
+ }
+ return false;
+}
+
static bool decide_mst_link_settings(const struct dc_link *link, struct dc_link_settings *link_setting)
{
*link_setting = link->verified_link_cap;
@@ -3380,7 +4150,25 @@ void decide_link_settings(struct dc_stream_state *stream,
if (decide_mst_link_settings(link, link_setting))
return;
} else if (link->connector_signal == SIGNAL_TYPE_EDP) {
- if (decide_edp_link_settings(link, link_setting, req_bw))
+ /* enable edp link optimization for DSC eDP case */
+ if (stream->timing.flags.DSC) {
+ enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN;
+
+ if (link->ctx->dc->debug.force_dsc_edp_policy) {
+ /* calculate link max link rate cap*/
+ struct dc_link_settings tmp_link_setting;
+ struct dc_crtc_timing tmp_timing = stream->timing;
+ uint32_t orig_req_bw;
+
+ tmp_link_setting.link_rate = LINK_RATE_UNKNOWN;
+ tmp_timing.flags.DSC = 0;
+ orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing);
+ decide_edp_link_settings(link, &tmp_link_setting, orig_req_bw);
+ max_link_rate = tmp_link_setting.link_rate;
+ }
+ if (decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate))
+ return;
+ } else if (decide_edp_link_settings(link, link_setting, req_bw))
return;
} else if (decide_dp_link_settings(link, link_setting, req_bw))
return;
@@ -3421,7 +4209,6 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
&psr_configuration.raw,
sizeof(psr_configuration.raw));
-
if (psr_configuration.bits.ENABLE) {
unsigned char dpcdbuf[3] = {0};
union psr_error_status psr_error_status;
@@ -3453,10 +4240,12 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
sizeof(psr_error_status.raw));
/* PSR error, disable and re-enable PSR */
- allow_active = false;
- dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
- allow_active = true;
- dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
+ if (link->psr_settings.psr_allow_active) {
+ allow_active = false;
+ dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
+ allow_active = true;
+ dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
+ }
return true;
} else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS ==
@@ -3534,6 +4323,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
&dpcd_lane_adjustment[0].raw,
sizeof(dpcd_lane_adjustment));
+ if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
+ (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+ vendor_specific_lttpr_wa_three_dpcd(
+ link,
+ link_training_settings.dpcd_lane_settings);
+
/*get post cursor 2 parameters
* For DP 1.1a or eariler, this DPCD register's value is 0
* For DP 1.2 or later:
@@ -4153,6 +4949,56 @@ static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
return -1;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw)
+{
+ switch (bw) {
+ case 0b001:
+ return 9000000;
+ case 0b010:
+ return 18000000;
+ case 0b011:
+ return 24000000;
+ case 0b100:
+ return 32000000;
+ case 0b101:
+ return 40000000;
+ case 0b110:
+ return 48000000;
+ }
+
+ return 0;
+}
+
+/**
+ * Return PCON's post FRL link training supported BW if its non-zero, otherwise return max_supported_frl_bw.
+ */
+static uint32_t intersect_frl_link_bw_support(
+ const uint32_t max_supported_frl_bw_in_kbps,
+ const union hdmi_encoded_link_bw hdmi_encoded_link_bw)
+{
+ uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps;
+
+ // HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode)
+ if (hdmi_encoded_link_bw.bits.FRL_MODE) {
+ if (hdmi_encoded_link_bw.bits.BW_48Gbps)
+ supported_bw_in_kbps = 48000000;
+ else if (hdmi_encoded_link_bw.bits.BW_40Gbps)
+ supported_bw_in_kbps = 40000000;
+ else if (hdmi_encoded_link_bw.bits.BW_32Gbps)
+ supported_bw_in_kbps = 32000000;
+ else if (hdmi_encoded_link_bw.bits.BW_24Gbps)
+ supported_bw_in_kbps = 24000000;
+ else if (hdmi_encoded_link_bw.bits.BW_18Gbps)
+ supported_bw_in_kbps = 18000000;
+ else if (hdmi_encoded_link_bw.bits.BW_9Gbps)
+ supported_bw_in_kbps = 9000000;
+ }
+
+ return supported_bw_in_kbps;
+}
+#endif
+
static void read_dp_device_vendor_id(struct dc_link *link)
{
struct dp_device_vendor_id dp_id;
@@ -4264,6 +5110,27 @@ static void get_active_converter_info(
translate_dpcd_max_bpc(
hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (link->dc->caps.hdmi_frl_pcon_support) {
+ union hdmi_encoded_link_bw hdmi_encoded_link_bw;
+
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps =
+ dc_link_bw_kbps_from_raw_frl_link_rate_data(
+ hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT);
+
+ // Intersect reported max link bw support with the supported link rate post FRL link training
+ if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS,
+ &hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) {
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support(
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps,
+ hdmi_encoded_link_bw);
+ }
+
+ if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0)
+ link->dpcd_caps.dongle_caps.extendedCapValid = true;
+ }
+#endif
+
if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0)
link->dpcd_caps.dongle_caps.extendedCapValid = true;
}
@@ -4454,7 +5321,7 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
lttpr_dpcd_data,
sizeof(lttpr_dpcd_data));
if (status != DC_OK) {
- dm_error("%s: Read LTTPR caps data failed.\n", __func__);
+ DC_LOG_DP2("%s: Read LTTPR caps data failed.\n", __func__);
return false;
}
@@ -5218,7 +6085,7 @@ bool dc_link_dp_set_test_pattern(
DP_TEST_PATTERN_VIDEO_MODE) {
/* Set CRTC Test Pattern */
set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
- dp_set_hw_test_pattern(link, test_pattern,
+ dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern,
(uint8_t *)p_custom_pattern,
(uint32_t)cust_pattern_size);
@@ -5240,8 +6107,18 @@ bool dc_link_dp_set_test_pattern(
if (is_dp_phy_pattern(test_pattern)) {
/* Set DPCD Lane Settings before running test pattern */
if (p_link_settings != NULL) {
- dp_set_hw_lane_settings(link, p_link_settings, DPRX);
- dpcd_set_lane_settings(link, p_link_settings, DPRX);
+ if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
+ (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ dpcd_set_lane_settings(link, p_link_settings, DPRX);
+ vendor_specific_lttpr_wa_five(
+ link,
+ p_link_settings->dpcd_lane_settings,
+ p_link_settings->link_settings.lane_count);
+ } else {
+ dp_set_hw_lane_settings(link, &pipe_ctx->link_res, p_link_settings, DPRX);
+ dpcd_set_lane_settings(link, p_link_settings, DPRX);
+ }
}
/* Blank stream if running test pattern */
@@ -5254,7 +6131,7 @@ bool dc_link_dp_set_test_pattern(
pipes->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc);
}
- dp_set_hw_test_pattern(link, test_pattern,
+ dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern,
(uint8_t *)p_custom_pattern,
(uint32_t)cust_pattern_size);
@@ -5574,7 +6451,7 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
return DP_PANEL_MODE_DEFAULT;
}
-enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready)
+enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready)
{
/* FEC has to be "set ready" before the link training.
* The policy is to always train with FEC
@@ -5665,6 +6542,23 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
}
}
+struct link_encoder *dp_get_link_enc(struct dc_link *link)
+{
+ struct link_encoder *link_enc;
+
+ link_enc = link->link_enc;
+ if (link->is_dig_mapping_flexible &&
+ link->dc->res_pool->funcs->link_encs_assign) {
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc,
+ link);
+ if (!link->link_enc)
+ link_enc = link_enc_cfg_get_next_avail_link_enc(
+ link->ctx->dc);
+ }
+
+ return link_enc;
+}
+
void dpcd_set_source_specific_data(struct dc_link *link)
{
if (!link->dc->vendor_signature.is_valid) {
@@ -5885,7 +6779,10 @@ bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timin
req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing);
- decide_edp_link_settings(link, &link_setting, req_bw);
+ if (!crtc_timing->flags.DSC)
+ decide_edp_link_settings(link, &link_setting, req_bw);
+ else
+ decide_edp_link_settings_with_dsc(link, &link_setting, req_bw, LINK_RATE_UNKNOWN);
if (link->dpcd_caps.edp_supported_link_rates[link_rate_set] != link_setting.link_rate ||
lane_count_set.bits.LANE_COUNT_SET != link_setting.lane_count) {
@@ -6038,7 +6935,7 @@ bool dpcd_write_128b_132b_sst_payload_allocation_table(
}
}
retries++;
- udelay(5000);
+ msleep(5);
}
if (!result && retries == max_retries) {
@@ -6090,7 +6987,7 @@ bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link)
break;
}
- udelay(5000);
+ msleep(5);
}
if (result == ACT_FAILED) {
@@ -6121,8 +7018,21 @@ struct fixed31_32 calculate_sst_avg_time_slots_per_mtp(
bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx)
{
+ /* If this assert is hit then we have a link encoder dynamic management issue */
+ ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true);
return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
- pipe_ctx->stream->link->hpo_dp_link_enc &&
+ pipe_ctx->link_res.hpo_dp_link_enc &&
dc_is_dp_signal(pipe_ctx->stream->signal));
}
#endif
+
+void edp_panel_backlight_power_on(struct dc_link *link)
+{
+ if (link->connector_signal != SIGNAL_TYPE_EDP)
+ return;
+
+ link->dc->hwss.edp_power_control(link, true);
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+ if (link->dc->hwss.edp_backlight_control)
+ link->dc->hwss.edp_backlight_control(link, true);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
index b1c9f77d6bf4..0e95bc5df4e7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
@@ -77,7 +77,9 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
* @param[in] link_setting Lane count, link rate and downspread control.
* @param[out] lt_settings Link settings and drive settings (voltage swing and pre-emphasis).
*/
-static enum link_training_result dpia_configure_link(struct dc_link *link,
+static enum link_training_result dpia_configure_link(
+ struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_setting,
struct link_training_settings *lt_settings)
{
@@ -94,25 +96,25 @@ static enum link_training_result dpia_configure_link(struct dc_link *link,
lt_settings);
status = dpcd_configure_channel_coding(link, lt_settings);
- if (status != DC_OK && !link->hpd_status)
+ if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
/* Configure lttpr mode */
status = dpcd_configure_lttpr_mode(link, lt_settings);
- if (status != DC_OK && !link->hpd_status)
+ if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
/* Set link rate, lane count and spread. */
status = dpcd_set_link_settings(link, lt_settings);
- if (status != DC_OK && !link->hpd_status)
+ if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
if (link->preferred_training_settings.fec_enable)
fec_enable = *link->preferred_training_settings.fec_enable;
else
fec_enable = true;
- status = dp_set_fec_ready(link, fec_enable);
- if (status != DC_OK && !link->hpd_status)
+ status = dp_set_fec_ready(link, link_res, fec_enable);
+ if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
return LINK_TRAINING_SUCCESS;
@@ -252,7 +254,9 @@ static enum dc_status dpcd_set_lt_pattern(struct dc_link *link,
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
* @param hop The Hop in display path. DPRX = 0.
*/
-static enum link_training_result dpia_training_cr_non_transparent(struct dc_link *link,
+static enum link_training_result dpia_training_cr_non_transparent(
+ struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings,
uint32_t hop)
{
@@ -388,7 +392,7 @@ static enum link_training_result dpia_training_cr_non_transparent(struct dc_link
}
/* Abort link training if clock recovery failed due to HPD unplug. */
- if (!link->hpd_status)
+ if (link->is_hpd_pending)
result = LINK_TRAINING_ABORT;
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n"
@@ -411,7 +415,9 @@ static enum link_training_result dpia_training_cr_non_transparent(struct dc_link
* @param link DPIA link being trained.
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
*/
-static enum link_training_result dpia_training_cr_transparent(struct dc_link *link,
+static enum link_training_result dpia_training_cr_transparent(
+ struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings)
{
enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0;
@@ -490,7 +496,7 @@ static enum link_training_result dpia_training_cr_transparent(struct dc_link *li
}
/* Abort link training if clock recovery failed due to HPD unplug. */
- if (!link->hpd_status)
+ if (link->is_hpd_pending)
result = LINK_TRAINING_ABORT;
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n"
@@ -511,16 +517,18 @@ static enum link_training_result dpia_training_cr_transparent(struct dc_link *li
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
* @param hop The Hop in display path. DPRX = 0.
*/
-static enum link_training_result dpia_training_cr_phase(struct dc_link *link,
+static enum link_training_result dpia_training_cr_phase(
+ struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings,
uint32_t hop)
{
enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0;
if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
- result = dpia_training_cr_non_transparent(link, lt_settings, hop);
+ result = dpia_training_cr_non_transparent(link, link_res, lt_settings, hop);
else
- result = dpia_training_cr_transparent(link, lt_settings);
+ result = dpia_training_cr_transparent(link, link_res, lt_settings);
return result;
}
@@ -561,7 +569,9 @@ static uint32_t dpia_get_eq_aux_rd_interval(const struct dc_link *link,
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
* @param hop The Hop in display path. DPRX = 0.
*/
-static enum link_training_result dpia_training_eq_non_transparent(struct dc_link *link,
+static enum link_training_result dpia_training_eq_non_transparent(
+ struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings,
uint32_t hop)
{
@@ -675,7 +685,7 @@ static enum link_training_result dpia_training_eq_non_transparent(struct dc_link
}
/* Abort link training if equalization failed due to HPD unplug. */
- if (!link->hpd_status)
+ if (link->is_hpd_pending)
result = LINK_TRAINING_ABORT;
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n"
@@ -700,7 +710,9 @@ static enum link_training_result dpia_training_eq_non_transparent(struct dc_link
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
* @param hop The Hop in display path. DPRX = 0.
*/
-static enum link_training_result dpia_training_eq_transparent(struct dc_link *link,
+static enum link_training_result dpia_training_eq_transparent(
+ struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings)
{
enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ;
@@ -758,7 +770,7 @@ static enum link_training_result dpia_training_eq_transparent(struct dc_link *li
}
/* Abort link training if equalization failed due to HPD unplug. */
- if (!link->hpd_status)
+ if (link->is_hpd_pending)
result = LINK_TRAINING_ABORT;
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n"
@@ -779,16 +791,18 @@ static enum link_training_result dpia_training_eq_transparent(struct dc_link *li
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
* @param hop The Hop in display path. DPRX = 0.
*/
-static enum link_training_result dpia_training_eq_phase(struct dc_link *link,
+static enum link_training_result dpia_training_eq_phase(
+ struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings,
uint32_t hop)
{
enum link_training_result result;
if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
- result = dpia_training_eq_non_transparent(link, lt_settings, hop);
+ result = dpia_training_eq_non_transparent(link, link_res, lt_settings, hop);
else
- result = dpia_training_eq_transparent(link, lt_settings);
+ result = dpia_training_eq_transparent(link, link_res, lt_settings);
return result;
}
@@ -892,10 +906,10 @@ static void dpia_training_abort(struct dc_link *link, uint32_t hop)
__func__,
link->link_id.enum_id - ENUM_ID_1,
link->lttpr_mode,
- link->hpd_status);
+ link->is_hpd_pending);
/* Abandon clean-up if sink unplugged. */
- if (!link->hpd_status)
+ if (link->is_hpd_pending)
return;
if (hop != DPRX)
@@ -908,7 +922,9 @@ static void dpia_training_abort(struct dc_link *link, uint32_t hop)
core_link_send_set_config(link, DPIA_SET_CFG_SET_LINK, data);
}
-enum link_training_result dc_link_dpia_perform_link_training(struct dc_link *link,
+enum link_training_result dc_link_dpia_perform_link_training(
+ struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_setting,
bool skip_video_pattern)
{
@@ -918,7 +934,7 @@ enum link_training_result dc_link_dpia_perform_link_training(struct dc_link *lin
int8_t repeater_id; /* Current hop. */
/* Configure link as prescribed in link_setting and set LTTPR mode. */
- result = dpia_configure_link(link, link_setting, &lt_settings);
+ result = dpia_configure_link(link, link_res, link_setting, &lt_settings);
if (result != LINK_TRAINING_SUCCESS)
return result;
@@ -930,12 +946,12 @@ enum link_training_result dc_link_dpia_perform_link_training(struct dc_link *lin
*/
for (repeater_id = repeater_cnt; repeater_id >= 0; repeater_id--) {
/* Clock recovery. */
- result = dpia_training_cr_phase(link, &lt_settings, repeater_id);
+ result = dpia_training_cr_phase(link, link_res, &lt_settings, repeater_id);
if (result != LINK_TRAINING_SUCCESS)
break;
/* Equalization. */
- result = dpia_training_eq_phase(link, &lt_settings, repeater_id);
+ result = dpia_training_eq_phase(link, link_res, &lt_settings, repeater_id);
if (result != LINK_TRAINING_SUCCESS)
break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
index 25e48a8cbb78..a55944da8d53 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
@@ -118,7 +118,10 @@ static void remove_link_enc_assignment(
*/
if (get_stream_using_link_enc(state, eng_id) == NULL)
state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = eng_id;
+
stream->link_enc = NULL;
+ state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id = ENGINE_ID_UNKNOWN;
+ state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream = NULL;
break;
}
}
@@ -148,6 +151,7 @@ static void add_link_enc_assignment(
.ep_type = stream->link->ep_type},
.eng_id = eng_id,
.stream = stream};
+ dc_stream_retain(stream);
state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = ENGINE_ID_UNKNOWN;
stream->link_enc = stream->ctx->dc->res_pool->link_encoders[eng_idx];
break;
@@ -227,7 +231,7 @@ static struct link_encoder *get_link_enc_used_by_link(
.link_id = link->link_id,
.ep_type = link->ep_type};
- for (i = 0; i < state->stream_count; i++) {
+ for (i = 0; i < MAX_PIPES; i++) {
struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
if (assignment.valid == true && are_ep_ids_equal(&assignment.ep_id, &ep_id))
@@ -237,28 +241,18 @@ static struct link_encoder *get_link_enc_used_by_link(
return link_enc;
}
/* Clear all link encoder assignments. */
-static void clear_enc_assignments(struct dc_state *state)
+static void clear_enc_assignments(const struct dc *dc, struct dc_state *state)
{
int i;
- enum engine_id eng_id;
- struct dc_stream_state *stream;
for (i = 0; i < MAX_PIPES; i++) {
state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid = false;
- eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id;
- stream = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream;
- if (eng_id != ENGINE_ID_UNKNOWN)
- state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_id - ENGINE_ID_DIGA] = eng_id;
- if (stream)
- stream->link_enc = NULL;
+ state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id = ENGINE_ID_UNKNOWN;
+ if (state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream != NULL) {
+ dc_stream_release(state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream);
+ state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream = NULL;
+ }
}
-}
-
-void link_enc_cfg_init(
- struct dc *dc,
- struct dc_state *state)
-{
- int i;
for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) {
if (dc->res_pool->link_encoders[i])
@@ -266,8 +260,13 @@ void link_enc_cfg_init(
else
state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN;
}
+}
- clear_enc_assignments(state);
+void link_enc_cfg_init(
+ const struct dc *dc,
+ struct dc_state *state)
+{
+ clear_enc_assignments(dc, state);
state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
}
@@ -284,12 +283,9 @@ void link_enc_cfg_link_encs_assign(
ASSERT(state->stream_count == stream_count);
- if (stream_count == 0)
- clear_enc_assignments(state);
-
/* Release DIG link encoder resources before running assignment algorithm. */
- for (i = 0; i < stream_count; i++)
- dc->res_pool->funcs->link_enc_unassign(state, streams[i]);
+ for (i = 0; i < dc->current_state->stream_count; i++)
+ dc->res_pool->funcs->link_enc_unassign(state, dc->current_state->streams[i]);
for (i = 0; i < MAX_PIPES; i++)
ASSERT(state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid == false);
@@ -544,6 +540,7 @@ bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state)
uint8_t dig_stream_count = 0;
int matching_stream_ptrs = 0;
int eng_ids_per_ep_id[MAX_PIPES] = {0};
+ int valid_bitmap = 0;
/* (1) No. valid entries same as stream count. */
for (i = 0; i < MAX_PIPES; i++) {
@@ -625,5 +622,15 @@ bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state)
is_valid = valid_entries && valid_stream_ptrs && valid_uniqueness && valid_avail && valid_streams;
ASSERT(is_valid);
+ if (is_valid == false) {
+ valid_bitmap =
+ (valid_entries & 0x1) |
+ ((valid_stream_ptrs & 0x1) << 1) |
+ ((valid_uniqueness & 0x1) << 2) |
+ ((valid_avail & 0x1) << 3) |
+ ((valid_streams & 0x1) << 4);
+ dm_error("Invalid link encoder assignments: 0x%x\n", valid_bitmap);
+ }
+
return is_valid;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 368e834c6809..45d03d3a95c3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -71,6 +71,7 @@ void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode)
void dp_enable_link_phy(
struct dc_link *link,
+ const struct link_resource *link_res,
enum signal_type signal,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings)
@@ -135,7 +136,7 @@ void dp_enable_link_phy(
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
- enable_dp_hpo_output(link, link_settings);
+ enable_dp_hpo_output(link, link_res, link_settings);
} else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
if (dc_is_dp_sst_signal(signal)) {
link_enc->funcs->enable_dp_output(
@@ -236,12 +237,13 @@ bool edp_receiver_ready_T7(struct dc_link *link)
return result;
}
-void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
+void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res,
+ enum signal_type signal)
{
struct dc *dc = link->ctx->dc;
struct dmcu *dmcu = dc->res_pool->dmcu;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- struct hpo_dp_link_encoder *hpo_link_enc = link->hpo_dp_link_enc;
+ struct hpo_dp_link_encoder *hpo_link_enc = link_res->hpo_dp_link_enc;
#endif
struct link_encoder *link_enc;
@@ -260,7 +262,7 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
link->dc->hwss.edp_backlight_control(link, false);
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING)
- disable_dp_hpo_output(link, signal);
+ disable_dp_hpo_output(link, link_res, signal);
else
link_enc->funcs->disable_output(link_enc, signal);
#else
@@ -274,7 +276,7 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING &&
hpo_link_enc)
- disable_dp_hpo_output(link, signal);
+ disable_dp_hpo_output(link, link_res, signal);
else
link_enc->funcs->disable_output(link_enc, signal);
#else
@@ -294,13 +296,14 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
}
-void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal)
+void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res,
+ enum signal_type signal)
{
/* MST disable link only when no stream use the link */
if (link->mst_stream_alloc_table.stream_count > 0)
return;
- dp_disable_link_phy(link, signal);
+ dp_disable_link_phy(link, link_res, signal);
/* set the sink to SST mode after disabling the link */
dp_enable_mst_on_sink(link, false);
@@ -308,6 +311,7 @@ void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal)
bool dp_set_hw_training_pattern(
struct dc_link *link,
+ const struct link_resource *link_res,
enum dc_dp_training_pattern pattern,
uint32_t offset)
{
@@ -338,7 +342,7 @@ bool dp_set_hw_training_pattern(
break;
}
- dp_set_hw_test_pattern(link, test_pattern, NULL, 0);
+ dp_set_hw_test_pattern(link, link_res, test_pattern, NULL, 0);
return true;
}
@@ -349,6 +353,7 @@ bool dp_set_hw_training_pattern(
#endif
void dp_set_hw_lane_settings(
struct dc_link *link,
+ const struct link_resource *link_res,
const struct link_training_settings *link_settings,
uint32_t offset)
{
@@ -361,8 +366,8 @@ void dp_set_hw_lane_settings(
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link_settings->link_settings) ==
DP_128b_132b_ENCODING) {
- link->hpo_dp_link_enc->funcs->set_ffe(
- link->hpo_dp_link_enc,
+ link_res->hpo_dp_link_enc->funcs->set_ffe(
+ link_res->hpo_dp_link_enc,
&link_settings->link_settings,
link_settings->lane_settings[0].FFE_PRESET.raw);
} else if (dp_get_link_encoding_format(&link_settings->link_settings)
@@ -379,6 +384,7 @@ void dp_set_hw_lane_settings(
void dp_set_hw_test_pattern(
struct dc_link *link,
+ const struct link_resource *link_res,
enum dp_test_pattern test_pattern,
uint8_t *custom_pattern,
uint32_t custom_pattern_size)
@@ -406,8 +412,8 @@ void dp_set_hw_test_pattern(
#if defined(CONFIG_DRM_AMD_DC_DCN)
switch (link_encoding_format) {
case DP_128b_132b_ENCODING:
- link->hpo_dp_link_enc->funcs->set_link_test_pattern(
- link->hpo_dp_link_enc, &pattern_param);
+ link_res->hpo_dp_link_enc->funcs->set_link_test_pattern(
+ link_res->hpo_dp_link_enc, &pattern_param);
break;
case DP_8b_10b_ENCODING:
ASSERT(encoder);
@@ -446,7 +452,7 @@ void dp_retrain_link_dp_test(struct dc_link *link,
pipes[i].stream_res.stream_enc);
/* disable any test pattern that might be active */
- dp_set_hw_test_pattern(link,
+ dp_set_hw_test_pattern(link, &pipes[i].link_res,
DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
dp_receiver_power_ctrl(link, false);
@@ -763,7 +769,9 @@ static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
}
}
-void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *link_settings)
+void enable_dp_hpo_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct dc_link_settings *link_settings)
{
const struct dc *dc = link->dc;
enum phyd32clk_clock_source phyd32clk;
@@ -789,10 +797,11 @@ void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *l
}
} else {
/* DP2.0 HW: call transmitter control to enable PHY */
- link->hpo_dp_link_enc->funcs->enable_link_phy(
- link->hpo_dp_link_enc,
+ link_res->hpo_dp_link_enc->funcs->enable_link_phy(
+ link_res->hpo_dp_link_enc,
link_settings,
- link->link_enc->transmitter);
+ link->link_enc->transmitter,
+ link->link_enc->hpd_source);
}
/* DCCG muxing and DTBCLK DTO */
@@ -806,24 +815,26 @@ void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *l
phyd32clk = get_phyd32clk_src(link);
dc->res_pool->dccg->funcs->enable_symclk32_le(
dc->res_pool->dccg,
- link->hpo_dp_link_enc->inst,
+ link_res->hpo_dp_link_enc->inst,
phyd32clk);
- link->hpo_dp_link_enc->funcs->link_enable(
- link->hpo_dp_link_enc,
- link_settings->lane_count);
+ link_res->hpo_dp_link_enc->funcs->link_enable(
+ link_res->hpo_dp_link_enc,
+ link_settings->lane_count);
}
}
-void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal)
+void disable_dp_hpo_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
{
const struct dc *dc = link->dc;
- link->hpo_dp_link_enc->funcs->link_disable(link->hpo_dp_link_enc);
+ link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc);
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
dc->res_pool->dccg->funcs->disable_symclk32_le(
dc->res_pool->dccg,
- link->hpo_dp_link_enc->inst);
+ link_res->hpo_dp_link_enc->inst);
dc->res_pool->dccg->funcs->set_physymclk(
dc->res_pool->dccg,
@@ -834,8 +845,8 @@ void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal)
dm_set_phyd32clk(dc->ctx, 0);
} else {
/* DP2.0 HW: call transmitter control to disable PHY */
- link->hpo_dp_link_enc->funcs->disable_link_phy(
- link->hpo_dp_link_enc,
+ link_res->hpo_dp_link_enc->funcs->disable_link_phy(
+ link_res->hpo_dp_link_enc,
signal);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index e2d9a46d0e1a..b3912ff9dc91 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1724,6 +1724,94 @@ static void update_hpo_dp_stream_engine_usage(
res_ctx->is_hpo_dp_stream_enc_acquired[i] = acquired;
}
}
+
+static inline int find_acquired_hpo_dp_link_enc_for_link(
+ const struct resource_context *res_ctx,
+ const struct dc_link *link)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(res_ctx->hpo_dp_link_enc_to_link_idx); i++)
+ if (res_ctx->hpo_dp_link_enc_ref_cnts[i] > 0 &&
+ res_ctx->hpo_dp_link_enc_to_link_idx[i] == link->link_index)
+ return i;
+
+ return -1;
+}
+
+static inline int find_free_hpo_dp_link_enc(const struct resource_context *res_ctx,
+ const struct resource_pool *pool)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(res_ctx->hpo_dp_link_enc_ref_cnts); i++)
+ if (res_ctx->hpo_dp_link_enc_ref_cnts[i] == 0)
+ break;
+
+ return (i < ARRAY_SIZE(res_ctx->hpo_dp_link_enc_ref_cnts) &&
+ i < pool->hpo_dp_link_enc_count) ? i : -1;
+}
+
+static inline void acquire_hpo_dp_link_enc(
+ struct resource_context *res_ctx,
+ unsigned int link_index,
+ int enc_index)
+{
+ res_ctx->hpo_dp_link_enc_to_link_idx[enc_index] = link_index;
+ res_ctx->hpo_dp_link_enc_ref_cnts[enc_index] = 1;
+}
+
+static inline void retain_hpo_dp_link_enc(
+ struct resource_context *res_ctx,
+ int enc_index)
+{
+ res_ctx->hpo_dp_link_enc_ref_cnts[enc_index]++;
+}
+
+static inline void release_hpo_dp_link_enc(
+ struct resource_context *res_ctx,
+ int enc_index)
+{
+ ASSERT(res_ctx->hpo_dp_link_enc_ref_cnts[enc_index] > 0);
+ res_ctx->hpo_dp_link_enc_ref_cnts[enc_index]--;
+}
+
+static bool add_hpo_dp_link_enc_to_ctx(struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream)
+{
+ int enc_index;
+
+ enc_index = find_acquired_hpo_dp_link_enc_for_link(res_ctx, stream->link);
+
+ if (enc_index >= 0) {
+ retain_hpo_dp_link_enc(res_ctx, enc_index);
+ } else {
+ enc_index = find_free_hpo_dp_link_enc(res_ctx, pool);
+ if (enc_index >= 0)
+ acquire_hpo_dp_link_enc(res_ctx, stream->link->link_index, enc_index);
+ }
+
+ if (enc_index >= 0)
+ pipe_ctx->link_res.hpo_dp_link_enc = pool->hpo_dp_link_enc[enc_index];
+
+ return pipe_ctx->link_res.hpo_dp_link_enc != NULL;
+}
+
+static void remove_hpo_dp_link_enc_from_ctx(struct resource_context *res_ctx,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream)
+{
+ int enc_index;
+
+ enc_index = find_acquired_hpo_dp_link_enc_for_link(res_ctx, stream->link);
+
+ if (enc_index >= 0) {
+ release_hpo_dp_link_enc(res_ctx, enc_index);
+ pipe_ctx->link_res.hpo_dp_link_enc = NULL;
+ }
+}
#endif
/* TODO: release audio object */
@@ -1886,6 +1974,7 @@ enum dc_status dc_remove_stream_from_ctx(
&new_ctx->res_ctx, dc->res_pool,
del_pipe->stream_res.hpo_dp_stream_enc,
false);
+ remove_hpo_dp_link_enc_from_ctx(&new_ctx->res_ctx, del_pipe, del_pipe->stream);
}
#endif
@@ -2082,7 +2171,6 @@ static void mark_seamless_boot_stream(
{
struct dc_bios *dcb = dc->ctx->dc_bios;
- /* TODO: Check Linux */
if (dc->config.allow_seamless_boot_optimization &&
!dcb->funcs->is_accelerated_mode(dcb)) {
if (dc_validate_seamless_boot_timing(dc, stream->sink, &stream->timing))
@@ -2162,6 +2250,8 @@ enum dc_status resource_map_pool_resources(
&context->res_ctx, pool,
pipe_ctx->stream_res.hpo_dp_stream_enc,
true);
+ if (!add_hpo_dp_link_enc_to_ctx(&context->res_ctx, pool, pipe_ctx, stream))
+ return DC_NO_LINK_ENC_RESOURCE;
}
}
#endif
@@ -2228,6 +2318,9 @@ void dc_resource_state_construct(
struct dc_state *dst_ctx)
{
dst_ctx->clk_mgr = dc->clk_mgr;
+
+ /* Initialise DIG link encoder resource tracking variables. */
+ link_enc_cfg_init(dc, dst_ctx);
}
@@ -2510,17 +2603,7 @@ static void set_avi_info_frame(
/* TODO : We should handle YCC quantization */
/* but we do not have matrix calculation */
- if (stream->qy_bit == 1) {
- if (color_space == COLOR_SPACE_SRGB ||
- color_space == COLOR_SPACE_2020_RGB_FULLRANGE)
- hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
- else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
- color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE)
- hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
- else
- hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
- } else
- hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
+ hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
///VIC
format = stream->timing.timing_3d_format;
@@ -2844,6 +2927,8 @@ bool pipe_need_reprogram(
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (pipe_ctx_old->stream_res.hpo_dp_stream_enc != pipe_ctx->stream_res.hpo_dp_stream_enc)
return true;
+ if (pipe_ctx_old->link_res.hpo_dp_link_enc != pipe_ctx->link_res.hpo_dp_link_enc)
+ return true;
#endif
/* DIG link encoder resource assignment for stream changed. */
@@ -3112,21 +3197,109 @@ void get_audio_check(struct audio_info *aud_modes,
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
-struct hpo_dp_link_encoder *resource_get_unused_hpo_dp_link_encoder(
- const struct resource_pool *pool)
+struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt(
+ const struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ const struct dc_link *link)
{
- uint8_t i;
- struct hpo_dp_link_encoder *enc = NULL;
+ struct hpo_dp_link_encoder *hpo_dp_link_enc = NULL;
+ int enc_index;
- ASSERT(pool->hpo_dp_link_enc_count <= MAX_HPO_DP2_LINK_ENCODERS);
+ enc_index = find_acquired_hpo_dp_link_enc_for_link(res_ctx, link);
- for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
- if (pool->hpo_dp_link_enc[i]->transmitter == TRANSMITTER_UNKNOWN) {
- enc = pool->hpo_dp_link_enc[i];
- break;
+ if (enc_index < 0)
+ enc_index = find_free_hpo_dp_link_enc(res_ctx, pool);
+
+ if (enc_index >= 0)
+ hpo_dp_link_enc = pool->hpo_dp_link_enc[enc_index];
+
+ return hpo_dp_link_enc;
+}
+#endif
+
+void reset_syncd_pipes_from_disabled_pipes(struct dc *dc,
+ struct dc_state *context)
+{
+ int i, j;
+ struct pipe_ctx *pipe_ctx_old, *pipe_ctx, *pipe_ctx_syncd;
+
+ /* If pipe backend is reset, need to reset pipe syncd status */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx_old = &dc->current_state->res_ctx.pipe_ctx[i];
+ pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx_old->stream)
+ continue;
+
+ if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
+ continue;
+
+ if (!pipe_ctx->stream ||
+ pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
+
+ /* Reset all the syncd pipes from the disabled pipe */
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ pipe_ctx_syncd = &context->res_ctx.pipe_ctx[j];
+ if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_syncd) == pipe_ctx_old->pipe_idx) ||
+ !IS_PIPE_SYNCD_VALID(pipe_ctx_syncd))
+ SET_PIPE_SYNCD_TO_PIPE(pipe_ctx_syncd, j);
+ }
}
}
+}
+
+void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
+ struct dc_state *context,
+ uint8_t disabled_master_pipe_idx)
+{
+ int i;
+ struct pipe_ctx *pipe_ctx, *pipe_ctx_check;
- return enc;
+ pipe_ctx = &context->res_ctx.pipe_ctx[disabled_master_pipe_idx];
+ if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx) != disabled_master_pipe_idx) ||
+ !IS_PIPE_SYNCD_VALID(pipe_ctx))
+ SET_PIPE_SYNCD_TO_PIPE(pipe_ctx, disabled_master_pipe_idx);
+
+ /* for the pipe disabled, check if any slave pipe exists and assert */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx_check = &context->res_ctx.pipe_ctx[i];
+
+ if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_check) == disabled_master_pipe_idx) &&
+ IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx))
+ DC_ERR("DC: Failure: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n",
+ i, disabled_master_pipe_idx);
+ }
}
+
+uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter)
+{
+ /* TODO - get transmitter to phy idx mapping from DMUB */
+ uint8_t phy_idx = transmitter - TRANSMITTER_UNIPHY_A;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dc->ctx->dce_version == DCN_VERSION_3_1 &&
+ dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ phy_idx = 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ phy_idx = 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ phy_idx = 5;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ phy_idx = 6;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ phy_idx = 4;
+ break;
+ default:
+ phy_idx = 0;
+ break;
+ }
+ }
#endif
+ return phy_idx;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
index a249a0e5edd0..4b5e4d8e7735 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
@@ -33,14 +33,6 @@
* Private functions
******************************************************************************/
-static void dc_sink_destruct(struct dc_sink *sink)
-{
- if (sink->dc_container_id) {
- kfree(sink->dc_container_id);
- sink->dc_container_id = NULL;
- }
-}
-
static bool dc_sink_construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params)
{
@@ -75,7 +67,7 @@ void dc_sink_retain(struct dc_sink *sink)
static void dc_sink_free(struct kref *kref)
{
struct dc_sink *sink = container_of(kref, struct dc_sink, refcount);
- dc_sink_destruct(sink);
+ kfree(sink->dc_container_id);
kfree(sink);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 618e7989176f..288e7b01f561 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.160"
+#define DC_VER "3.2.167"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -75,6 +75,16 @@ enum dc_plane_type {
DC_PLANE_TYPE_DCN_UNIVERSAL,
};
+// Sizes defined as multiples of 64KB
+enum det_size {
+ DET_SIZE_DEFAULT = 0,
+ DET_SIZE_192KB = 3,
+ DET_SIZE_256KB = 4,
+ DET_SIZE_320KB = 5,
+ DET_SIZE_384KB = 6
+};
+
+
struct dc_plane_cap {
enum dc_plane_type type;
uint32_t blends_with_above : 1;
@@ -187,7 +197,9 @@ struct dc_caps {
struct dc_color_caps color;
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool dp_hpo;
+ bool hdmi_frl_pcon_support;
#endif
+ bool edp_dsc_support;
bool vbios_lttpr_aware;
bool vbios_lttpr_enable;
};
@@ -332,6 +344,7 @@ struct dc_config {
uint8_t vblank_alignment_max_frame_time_diff;
bool is_asymmetric_memory;
bool is_single_rank_dimm;
+ bool use_pipe_ctx_sync_logic;
};
enum visual_confirm {
@@ -509,7 +522,8 @@ union dpia_debug_options {
uint32_t force_non_lttpr:1;
uint32_t extend_aux_rd_interval:1;
uint32_t disable_mst_dsc_work_around:1;
- uint32_t reserved:28;
+ uint32_t hpd_delay_in_ms:12;
+ uint32_t reserved:16;
} bits;
uint32_t raw;
};
@@ -574,6 +588,8 @@ struct dc_debug_options {
bool native422_support;
bool disable_dsc;
enum visual_confirm visual_confirm;
+ int visual_confirm_rect_height;
+
bool sanity_checks;
bool max_disp_clk;
bool surface_trace;
@@ -668,11 +684,15 @@ struct dc_debug_options {
bool validate_dml_output;
bool enable_dmcub_surface_flip;
bool usbc_combo_phy_reset_wa;
+ bool disable_dsc_edp;
+ unsigned int force_dsc_edp_policy;
bool enable_dram_clock_change_one_display_vactive;
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* TODO - remove once tested */
bool legacy_dp2_lt;
bool set_mst_en_for_sst;
+ bool disable_uhbr;
+ bool force_dp2_lt_fallback_method;
#endif
union mem_low_power_enable_options enable_mem_low_power;
union root_clock_optimization_options root_clock_optimization;
@@ -685,11 +705,14 @@ struct dc_debug_options {
/* FEC/PSR1 sequence enable delay in 100us */
uint8_t fec_enable_delay_in100us;
bool enable_driver_sequence_debug;
+ enum det_size crb_alloc_policy;
+ int crb_alloc_policy_min_disp_count;
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool disable_z10;
bool enable_sw_cntl_psr;
union dpia_debug_options dpia_debug;
#endif
+ bool apply_vendor_specific_lttpr_wa;
};
struct gpu_info_soc_bounding_box_v1_0;
@@ -1290,6 +1313,11 @@ struct dc_sink_dsc_caps {
// 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology),
// 'false' if they are sink's DSC caps
bool is_virtual_dpcd_dsc;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ // 'true' if MST topology supports DSC passthrough for sink
+ // 'false' if MST topology does not support DSC passthrough
+ bool is_dsc_passthrough_supported;
+#endif
struct dsc_dec_dpcd_caps dsc_dec_caps;
};
@@ -1405,6 +1433,9 @@ void dc_unlock_memory_clock_frequency(struct dc *dc);
*/
void dc_lock_memory_clock_frequency(struct dc *dc);
+/* set soft max for memclk, to be used for AC/DC switching clock limitations */
+void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable);
+
/* cleanup on driver unload */
void dc_hardware_release(struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 360f3199ea6f..541376fabbef 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -115,13 +115,44 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
}
}
+void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv)
+{
+ struct dmub_srv *dmub = dmub_srv->dmub;
+ struct dc_context *dc_ctx = dmub_srv->ctx;
+ enum dmub_status status = DMUB_STATUS_OK;
+
+ status = dmub_srv_clear_inbox0_ack(dmub);
+ if (status != DMUB_STATUS_OK) {
+ DC_ERROR("Error clearing INBOX0 ack: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dmub_srv);
+ }
+}
+
+void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv)
+{
+ struct dmub_srv *dmub = dmub_srv->dmub;
+ struct dc_context *dc_ctx = dmub_srv->ctx;
+ enum dmub_status status = DMUB_STATUS_OK;
+
+ status = dmub_srv_wait_for_inbox0_ack(dmub, 100000);
+ if (status != DMUB_STATUS_OK) {
+ DC_ERROR("Error waiting for INBOX0 HW Lock Ack\n");
+ dc_dmub_srv_log_diagnostic_data(dmub_srv);
+ }
+}
+
void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
union dmub_inbox0_data_register data)
{
struct dmub_srv *dmub = dmub_srv->dmub;
- if (dmub->hw_funcs.send_inbox0_cmd)
- dmub->hw_funcs.send_inbox0_cmd(dmub, data);
- // TODO: Add wait command -- poll register for ACK
+ struct dc_context *dc_ctx = dmub_srv->ctx;
+ enum dmub_status status = DMUB_STATUS_OK;
+
+ status = dmub_srv_send_inbox0_cmd(dmub, data);
+ if (status != DMUB_STATUS_OK) {
+ DC_ERROR("Error sending INBOX0 cmd\n");
+ dc_dmub_srv_log_diagnostic_data(dmub_srv);
+ }
}
bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd)
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 3e35eee7188c..7e4e2ec5915d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -68,6 +68,8 @@ bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_bu
void dc_dmub_trace_event_control(struct dc *dc, bool enable);
+void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv);
+void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv);
void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data);
bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *dmub_oca);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index e68e9a86a4d9..353dac420f34 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -378,7 +378,14 @@ enum dpcd_downstream_port_detailed_type {
union dwnstream_port_caps_byte2 {
struct {
uint8_t MAX_BITS_PER_COLOR_COMPONENT:2;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ uint8_t MAX_ENCODED_LINK_BW_SUPPORT:3;
+ uint8_t SOURCE_CONTROL_MODE_SUPPORT:1;
+ uint8_t CONCURRENT_LINK_BRING_UP_SEQ_SUPPORT:1;
+ uint8_t RESERVED:1;
+#else
uint8_t RESERVED:6;
+#endif
} bits;
uint8_t raw;
};
@@ -416,6 +423,30 @@ union dwnstream_port_caps_byte3_hdmi {
uint8_t raw;
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+union hdmi_sink_encoded_link_bw_support {
+ struct {
+ uint8_t HDMI_SINK_ENCODED_LINK_BW_SUPPORT:3;
+ uint8_t RESERVED:5;
+ } bits;
+ uint8_t raw;
+};
+
+union hdmi_encoded_link_bw {
+ struct {
+ uint8_t FRL_MODE:1; // Bit 0
+ uint8_t BW_9Gbps:1;
+ uint8_t BW_18Gbps:1;
+ uint8_t BW_24Gbps:1;
+ uint8_t BW_32Gbps:1;
+ uint8_t BW_40Gbps:1;
+ uint8_t BW_48Gbps:1;
+ uint8_t RESERVED:1; // Bit 7
+ } bits;
+ uint8_t raw;
+};
+#endif
+
/*4-byte structure for detailed capabilities of a down-stream port
(DP-to-TMDS converter).*/
union dwnstream_portxcaps {
@@ -852,6 +883,15 @@ struct psr_caps {
unsigned char psr_version;
unsigned int psr_rfb_setup_time;
bool psr_exit_link_training_required;
+ unsigned char edp_revision;
+ unsigned char support_ver;
+ bool su_granularity_required;
+ bool y_coordinate_required;
+ uint8_t su_y_granularity;
+ bool alpm_cap;
+ bool standby_support;
+ uint8_t rate_control_caps;
+ unsigned int psr_power_opt_flag;
};
/* Length of router topology ID read from DPCD in bytes. */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 52355fe6994c..eac34f591a3f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -741,6 +741,9 @@ struct dc_dsc_config {
uint32_t version_minor; /* DSC minor version. Full version is formed as 1.version_minor. */
bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */
int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ bool is_frl; /* indicate if DSC is applied based on HDMI FRL sink's capability */
+#endif
bool is_dp; /* indicate if DSC is applied based on DP's capability */
};
struct dc_crtc_timing {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index fad3d883ed89..c0e37ad0e26c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -30,6 +30,8 @@
#include "dc_types.h"
#include "grph_object_defs.h"
+struct link_resource;
+
enum dc_link_fec_state {
dc_link_fec_not_ready,
dc_link_fec_ready,
@@ -113,6 +115,7 @@ struct dc_link {
* DIG encoder. */
bool is_dig_mapping_flexible;
bool hpd_status; /* HPD status of link without physical HPD pin. */
+ bool is_hpd_pending; /* Indicates a new received hpd */
bool edp_sink_present;
@@ -159,9 +162,6 @@ struct dc_link {
struct panel_cntl *panel_cntl;
struct link_encoder *link_enc;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- struct hpo_dp_link_encoder *hpo_dp_link_enc;
-#endif
struct graphics_object_id link_id;
/* Endpoint type distinguishes display endpoints which do not have entries
* in the BIOS connector table from those that do. Helps when tracking link
@@ -185,6 +185,10 @@ struct dc_link {
/* Drive settings read from integrated info table */
struct dc_lane_settings bios_forced_drive_settings;
+ /* Vendor specific LTTPR workaround variables */
+ uint8_t vendor_specific_lttpr_link_rate_wa;
+ bool apply_vendor_specific_lttpr_link_rate_wa;
+
/* MST record stream using this link */
struct link_flags {
bool dp_keep_receiver_powered;
@@ -291,6 +295,10 @@ bool dc_link_setup_psr(struct dc_link *dc_link,
void dc_link_get_psr_residency(const struct dc_link *link, uint32_t *residency);
+void dc_link_blank_all_dp_displays(struct dc *dc);
+
+void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init);
+
/* Request DC to detect if there is a Panel connected.
* boot - If this call is during initial boot.
* Return false for any type of detection failure or MST detection
@@ -302,7 +310,7 @@ enum dc_detect_reason {
DETECT_REASON_HPD,
DETECT_REASON_HPDRX,
DETECT_REASON_FALLBACK,
- DETECT_REASON_RETRAIN
+ DETECT_REASON_RETRAIN,
};
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
@@ -350,14 +358,17 @@ void dc_link_remove_remote_sink(
void dc_link_dp_set_drive_settings(
struct dc_link *link,
+ const struct link_resource *link_res,
struct link_training_settings *lt_settings);
bool dc_link_dp_perform_link_training_skip_aux(
struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_setting);
enum link_training_result dc_link_dp_perform_link_training(
struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_settings,
bool skip_video_pattern);
@@ -365,6 +376,7 @@ bool dc_link_dp_sync_lt_begin(struct dc_link *link);
enum link_training_result dc_link_dp_sync_lt_attempt(
struct dc_link *link,
+ const struct link_resource *link_res,
struct dc_link_settings *link_setting,
struct dc_link_training_overrides *lt_settings);
@@ -442,6 +454,13 @@ bool dc_link_is_fec_supported(const struct dc_link *link);
bool dc_link_should_enable_fec(const struct dc_link *link);
#if defined(CONFIG_DRM_AMD_DC_DCN)
+uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw);
enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link);
#endif
+
+const struct link_resource *dc_link_get_cur_link_res(const struct dc_link *link);
+/* take a snapshot of current link resource allocation state */
+void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map);
+/* restore link resource allocation state from a snapshot */
+void dc_restore_link_res_map(const struct dc *dc, uint32_t *map);
#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 388457ffc0a8..0285a4b38d05 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -430,6 +430,7 @@ struct dc_dongle_caps {
uint32_t dp_hdmi_max_bpc;
uint32_t dp_hdmi_max_pixel_clk_in_khz;
#if defined(CONFIG_DRM_AMD_DC_DCN)
+ uint32_t dp_hdmi_frl_max_link_bw_in_kbps;
struct dc_dongle_dfp_cap_ext dfp_cap_ext;
#endif
};
@@ -950,6 +951,7 @@ enum dc_gpu_mem_alloc_type {
enum dc_psr_version {
DC_PSR_VERSION_1 = 0,
+ DC_PSR_VERSION_SU_1 = 1,
DC_PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 27218ede150a..70eaac017624 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -67,9 +67,6 @@ static void write_indirect_azalia_reg(struct audio *audio,
/* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */
REG_SET(AZALIA_F0_CODEC_ENDPOINT_DATA, 0,
AZALIA_ENDPOINT_REG_DATA, reg_data);
-
- DC_LOG_HW_AUDIO("AUDIO:write_indirect_azalia_reg: index: %u data: %u\n",
- reg_index, reg_data);
}
static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index)
@@ -85,9 +82,6 @@ static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index
/* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */
value = REG_READ(AZALIA_F0_CODEC_ENDPOINT_DATA);
- DC_LOG_HW_AUDIO("AUDIO:read_indirect_azalia_reg: index: %u data: %u\n",
- reg_index, value);
-
return value;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
index 5622d5e32d81..dbd2cfed0603 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
@@ -113,6 +113,7 @@ struct dce_audio_shift {
uint8_t DCCG_AUDIO_DTO2_USE_512FBR_DTO;
uint32_t DCCG_AUDIO_DTO0_USE_512FBR_DTO;
uint32_t DCCG_AUDIO_DTO1_USE_512FBR_DTO;
+ uint32_t CLOCK_GATING_DISABLE;
};
struct dce_audio_mask {
@@ -132,6 +133,7 @@ struct dce_audio_mask {
uint32_t DCCG_AUDIO_DTO2_USE_512FBR_DTO;
uint32_t DCCG_AUDIO_DTO0_USE_512FBR_DTO;
uint32_t DCCG_AUDIO_DTO1_USE_512FBR_DTO;
+ uint32_t CLOCK_GATING_DISABLE;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 1e77ffee71b3..f1c61d5aee6c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -788,8 +788,9 @@ static bool dce110_link_encoder_validate_hdmi_output(
crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
return false;
- if (!enc110->base.features.flags.bits.HDMI_6GB_EN &&
- adjusted_pix_clk_khz >= 300000)
+ if ((!enc110->base.features.flags.bits.HDMI_6GB_EN ||
+ enc110->base.ctx->dc->debug.hdmi20_disable) &&
+ adjusted_pix_clk_khz >= 300000)
return false;
if (enc110->base.ctx->dc->debug.hdmi20_disable &&
crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index 9baf8ca0a920..b1b2e3c6f379 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -56,8 +56,11 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
union dmub_inbox0_cmd_lock_hw hw_lock_cmd)
{
union dmub_inbox0_data_register data = { 0 };
+
data.inbox0_cmd_lock_hw = hw_lock_cmd;
+ dc_dmub_srv_clear_inbox0_ack(dmub_srv);
dc_dmub_srv_send_inbox0_cmd(dmub_srv, data);
+ dc_dmub_srv_wait_for_inbox0_ack(dmub_srv);
}
bool should_use_dmub_lock(struct dc_link *link)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 90eb8eedacf2..87ed48d5530d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -230,7 +230,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_
/**
* Set PSR power optimization flags.
*/
-static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt)
+static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt, uint8_t panel_inst)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
@@ -239,7 +239,9 @@ static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt
cmd.psr_set_power_opt.header.type = DMUB_CMD__PSR;
cmd.psr_set_power_opt.header.sub_type = DMUB_CMD__SET_PSR_POWER_OPT;
cmd.psr_set_power_opt.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_power_opt_data);
+ cmd.psr_set_power_opt.psr_set_power_opt_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt;
+ cmd.psr_set_power_opt.psr_set_power_opt_data.panel_inst = panel_inst;
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
@@ -327,6 +329,16 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->fec_enable_delay_in100us = link->dc->debug.fec_enable_delay_in100us;
copy_settings_data->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
copy_settings_data->panel_inst = panel_inst;
+ copy_settings_data->dsc_enable_status = (pipe_ctx->stream->timing.flags.DSC == 1);
+
+ if (link->fec_state == dc_link_fec_enabled &&
+ (!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1,
+ sizeof(link->dpcd_caps.sink_dev_id_str)) ||
+ !memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_2,
+ sizeof(link->dpcd_caps.sink_dev_id_str))))
+ copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 1;
+ else
+ copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 0;
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
index 5dbd479660f1..01acc01cc191 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
@@ -46,7 +46,7 @@ struct dmub_psr_funcs {
void (*psr_force_static)(struct dmub_psr *dmub, uint8_t panel_inst);
void (*psr_get_residency)(struct dmub_psr *dmub, uint32_t *residency,
uint8_t panel_inst);
- void (*psr_set_power_opt)(struct dmub_psr *dmub, unsigned int power_opt);
+ void (*psr_set_power_opt)(struct dmub_psr *dmub, unsigned int power_opt, uint8_t panel_inst);
};
struct dmub_psr *dmub_psr_create(struct dc_context *ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 24e47df526f6..f3ff141b706a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -69,6 +69,8 @@
#include "dcn10/dcn10_hw_sequencer.h"
+#include "dce110_hw_sequencer.h"
+
#define GAMMA_HW_POINTS_NUM 256
/*
@@ -1564,6 +1566,10 @@ static enum dc_status apply_single_controller_ctx_to_hw(
&pipe_ctx->stream->audio_info);
}
+ /* make sure no pipes syncd to the pipe being enabled */
+ if (!pipe_ctx->stream->apply_seamless_boot_optimization && dc->config.use_pipe_ctx_sync_logic)
+ check_syncd_pipes_for_disabled_master_pipe(dc, context, pipe_ctx->pipe_idx);
+
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* DCN3.1 FPGA Workaround
* Need to enable HPO DP Stream Encoder before setting OTG master enable.
@@ -1602,6 +1608,11 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst);
+ if (dc_is_embedded_signal(pipe_ctx->stream->signal) &&
+ pipe_ctx->stream_res.stream_enc->funcs->reset_fifo)
+ pipe_ctx->stream_res.stream_enc->funcs->reset_fifo(
+ pipe_ctx->stream_res.stream_enc);
+
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
@@ -1655,30 +1666,12 @@ static enum dc_status apply_single_controller_ctx_to_hw(
static void power_down_encoders(struct dc *dc)
{
- int i, j;
+ int i;
for (i = 0; i < dc->link_count; i++) {
enum signal_type signal = dc->links[i]->connector_signal;
- if ((signal == SIGNAL_TYPE_EDP) ||
- (signal == SIGNAL_TYPE_DISPLAY_PORT)) {
- if (dc->links[i]->link_enc->funcs->get_dig_frontend &&
- dc->links[i]->link_enc->funcs->is_dig_enabled(dc->links[i]->link_enc)) {
- unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
- dc->links[i]->link_enc);
-
- for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
- if (fe == dc->res_pool->stream_enc[j]->id) {
- dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
- dc->res_pool->stream_enc[j]);
- break;
- }
- }
- }
-
- if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)
- dp_receiver_power_ctrl(dc->links[i], false);
- }
+ dc_link_blank_dp_stream(dc->links[i], false);
if (signal != SIGNAL_TYPE_EDP)
signal = SIGNAL_TYPE_NONE;
@@ -1805,7 +1798,6 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
struct dc_stream_state *edp_streams[MAX_NUM_EDP];
struct dc_link *edp_link_with_sink = NULL;
struct dc_link *edp_link = NULL;
- struct dc_stream_state *edp_stream = NULL;
struct dce_hwseq *hws = dc->hwseq;
int edp_with_sink_num;
int edp_num;
@@ -1826,27 +1818,29 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
get_edp_streams(context, edp_streams, &edp_stream_num);
// Check fastboot support, disable on DCE8 because of blank screens
- if (edp_num && dc->ctx->dce_version != DCE_VERSION_8_0 &&
+ if (edp_num && edp_stream_num && dc->ctx->dce_version != DCE_VERSION_8_0 &&
dc->ctx->dce_version != DCE_VERSION_8_1 &&
dc->ctx->dce_version != DCE_VERSION_8_3) {
for (i = 0; i < edp_num; i++) {
edp_link = edp_links[i];
+ if (edp_link != edp_streams[0]->link)
+ continue;
// enable fastboot if backend is enabled on eDP
- if (edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc)) {
- /* Set optimization flag on eDP stream*/
- if (edp_stream_num && edp_link->link_status.link_active) {
- edp_stream = edp_streams[0];
- can_apply_edp_fast_boot = !is_edp_ilr_optimization_required(edp_stream->link, &edp_stream->timing);
- edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot;
- if (can_apply_edp_fast_boot)
- DC_LOG_EVENT_LINK_TRAINING("eDP fast boot disabled to optimize link rate\n");
-
- break;
- }
+ if (edp_link->link_enc->funcs->is_dig_enabled &&
+ edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
+ edp_link->link_status.link_active) {
+ struct dc_stream_state *edp_stream = edp_streams[0];
+
+ can_apply_edp_fast_boot = !is_edp_ilr_optimization_required(edp_stream->link, &edp_stream->timing);
+ edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot;
+ if (can_apply_edp_fast_boot)
+ DC_LOG_EVENT_LINK_TRAINING("eDP fast boot disabled to optimize link rate\n");
+
+ break;
}
}
// We are trying to enable eDP, don't power down VDD
- if (edp_stream_num)
+ if (can_apply_edp_fast_boot)
keep_edp_vdd_on = true;
}
@@ -2307,6 +2301,10 @@ enum dc_status dce110_apply_ctx_to_hw(
enum dc_status status;
int i;
+ /* reset syncd pipes from disabled pipes */
+ if (dc->config.use_pipe_ctx_sync_logic)
+ reset_syncd_pipes_from_disabled_pipes(dc, context);
+
/* Reset old context */
/* look up the targets that have been removed since last commit */
hws->funcs.reset_hw_ctx_wrap(dc, context);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 91fdfcd8a14e..db7ca4b0cdb9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -119,14 +119,6 @@ void dpp_read_state(struct dpp *dpp_base,
}
}
-/* Program gamut remap in bypass mode */
-void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp)
-{
- REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
- CM_GAMUT_REMAP_MODE, 0);
- /* Gamut remap in bypass */
-}
-
#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19))
bool dpp1_get_optimal_number_of_taps(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index 44293d66b46b..f607a0e28f14 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -39,6 +39,10 @@
#define BLACK_OFFSET_RGB_Y 0x0
#define BLACK_OFFSET_CBCR 0x8000
+#define VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT 3
+#define VISUAL_CONFIRM_RECT_HEIGHT_MIN 1
+#define VISUAL_CONFIRM_RECT_HEIGHT_MAX 10
+
#define REG(reg)\
dpp->tf_regs->reg
@@ -85,51 +89,6 @@ enum dscl_mode_sel {
DSCL_MODE_DSCL_BYPASS = 6
};
-static void dpp1_dscl_set_overscan(
- struct dcn10_dpp *dpp,
- const struct scaler_data *data)
-{
- uint32_t left = data->recout.x;
- uint32_t top = data->recout.y;
-
- int right = data->h_active - data->recout.x - data->recout.width;
- int bottom = data->v_active - data->recout.y - data->recout.height;
-
- if (right < 0) {
- BREAK_TO_DEBUGGER();
- right = 0;
- }
- if (bottom < 0) {
- BREAK_TO_DEBUGGER();
- bottom = 0;
- }
-
- REG_SET_2(DSCL_EXT_OVERSCAN_LEFT_RIGHT, 0,
- EXT_OVERSCAN_LEFT, left,
- EXT_OVERSCAN_RIGHT, right);
-
- REG_SET_2(DSCL_EXT_OVERSCAN_TOP_BOTTOM, 0,
- EXT_OVERSCAN_BOTTOM, bottom,
- EXT_OVERSCAN_TOP, top);
-}
-
-static void dpp1_dscl_set_otg_blank(
- struct dcn10_dpp *dpp, const struct scaler_data *data)
-{
- uint32_t h_blank_start = data->h_active;
- uint32_t h_blank_end = 0;
- uint32_t v_blank_start = data->v_active;
- uint32_t v_blank_end = 0;
-
- REG_SET_2(OTG_H_BLANK, 0,
- OTG_H_BLANK_START, h_blank_start,
- OTG_H_BLANK_END, h_blank_end);
-
- REG_SET_2(OTG_V_BLANK, 0,
- OTG_V_BLANK_START, v_blank_start,
- OTG_V_BLANK_END, v_blank_end);
-}
-
static int dpp1_dscl_get_pixel_depth_val(enum lb_pixel_depth depth)
{
if (depth == LB_PIXEL_DEPTH_30BPP)
@@ -551,58 +510,6 @@ static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *d
return LB_MEMORY_CONFIG_0;
}
-void dpp1_dscl_set_scaler_auto_scale(
- struct dpp *dpp_base,
- const struct scaler_data *scl_data)
-{
- enum lb_memory_config lb_config;
- struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
- enum dscl_mode_sel dscl_mode = dpp1_dscl_get_dscl_mode(
- dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale);
- bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN
- && scl_data->format <= PIXEL_FORMAT_VIDEO_END;
-
- dpp1_dscl_set_overscan(dpp, scl_data);
-
- dpp1_dscl_set_otg_blank(dpp, scl_data);
-
- REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode);
-
- if (dscl_mode == DSCL_MODE_DSCL_BYPASS)
- return;
-
- lb_config = dpp1_dscl_find_lb_memory_config(dpp, scl_data);
- dpp1_dscl_set_lb(dpp, &scl_data->lb_params, lb_config);
-
- if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS)
- return;
-
- /* TODO: v_min */
- REG_SET_3(DSCL_AUTOCAL, 0,
- AUTOCAL_MODE, AUTOCAL_MODE_AUTOSCALE,
- AUTOCAL_NUM_PIPE, 0,
- AUTOCAL_PIPE_ID, 0);
-
- /* Black offsets */
- if (ycbcr)
- REG_SET_2(SCL_BLACK_OFFSET, 0,
- SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y,
- SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_CBCR);
- else
-
- REG_SET_2(SCL_BLACK_OFFSET, 0,
- SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y,
- SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_RGB_Y);
-
- REG_SET_4(SCL_TAP_CONTROL, 0,
- SCL_V_NUM_TAPS, scl_data->taps.v_taps - 1,
- SCL_H_NUM_TAPS, scl_data->taps.h_taps - 1,
- SCL_V_NUM_TAPS_C, scl_data->taps.v_taps_c - 1,
- SCL_H_NUM_TAPS_C, scl_data->taps.h_taps_c - 1);
-
- dpp1_dscl_set_scl_filter(dpp, scl_data, ycbcr);
-}
-
static void dpp1_dscl_set_manual_ratio_init(
struct dcn10_dpp *dpp, const struct scaler_data *data)
@@ -685,9 +592,17 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp,
const struct rect *recout)
{
int visual_confirm_on = 0;
+ unsigned short visual_confirm_rect_height = VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT;
+
if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE)
visual_confirm_on = 1;
+ /* Check bounds to ensure the VC bar height was set to a sane value */
+ if ((dpp->base.ctx->dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_RECT_HEIGHT_MIN) &&
+ (dpp->base.ctx->dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_RECT_HEIGHT_MAX)) {
+ visual_confirm_rect_height = dpp->base.ctx->dc->debug.visual_confirm_rect_height;
+ }
+
REG_SET_2(RECOUT_START, 0,
/* First pixel of RECOUT in the active OTG area */
RECOUT_START_X, recout->x,
@@ -699,7 +614,7 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp,
RECOUT_WIDTH, recout->width,
/* Number of RECOUT vertical lines */
RECOUT_HEIGHT, recout->height
- - visual_confirm_on * 2 * (dpp->base.inst + 1));
+ - visual_confirm_on * 2 * (dpp->base.inst + visual_confirm_rect_height));
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 04d7bddc915b..530a72e3eefe 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -77,9 +77,9 @@
#define PGFSM_POWER_ON 0
#define PGFSM_POWER_OFF 2
-void print_microsec(struct dc_context *dc_ctx,
- struct dc_log_buffer_ctx *log_ctx,
- uint32_t ref_cycle)
+static void print_microsec(struct dc_context *dc_ctx,
+ struct dc_log_buffer_ctx *log_ctx,
+ uint32_t ref_cycle)
{
const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
static const unsigned int frac = 1000;
@@ -132,7 +132,8 @@ static void log_mpc_crc(struct dc *dc,
REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
}
-void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx)
+static void dcn10_log_hubbub_state(struct dc *dc,
+ struct dc_log_buffer_ctx *log_ctx)
{
struct dc_context *dc_ctx = dc->ctx;
struct dcn_hubbub_wm wm;
@@ -467,8 +468,6 @@ void dcn10_log_hw_state(struct dc *dc,
log_mpc_crc(dc, log_ctx);
{
- int hpo_dp_link_enc_count = 0;
-
if (pool->hpo_dp_stream_enc_count > 0) {
DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
@@ -499,18 +498,14 @@ void dcn10_log_hw_state(struct dc *dc,
}
/* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
- for (i = 0; i < dc->link_count; i++)
- if (dc->links[i]->hpo_dp_link_enc)
- hpo_dp_link_enc_count++;
-
- if (hpo_dp_link_enc_count) {
+ if (pool->hpo_dp_link_enc_count) {
DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");
- for (i = 0; i < dc->link_count; i++) {
- struct hpo_dp_link_encoder *hpo_dp_link_enc = dc->links[i]->hpo_dp_link_enc;
+ for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
+ struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
- if (hpo_dp_link_enc && hpo_dp_link_enc->funcs->read_state) {
+ if (hpo_dp_link_enc->funcs->read_state) {
hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
hpo_dp_link_enc->inst,
@@ -1362,11 +1357,53 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
tg->funcs->tg_init(tg);
}
+
+ /* Power gate DSCs */
+ if (hws->funcs.dsc_pg_control != NULL) {
+ uint32_t num_opps = 0;
+ uint32_t opp_id_src0 = OPP_ID_INVALID;
+ uint32_t opp_id_src1 = OPP_ID_INVALID;
+
+ // Step 1: To find out which OPTC is running & OPTC DSC is ON
+ // We can't use res_pool->res_cap->num_timing_generator to check
+ // Because it records display pipes default setting built in driver,
+ // not display pipes of the current chip.
+ // Some ASICs would be fused display pipes less than the default setting.
+ // In dcnxx_resource_construct function, driver would obatin real information.
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
+ uint32_t optc_dsc_state = 0;
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ if (tg->funcs->is_tg_enabled(tg)) {
+ if (tg->funcs->get_dsc_status)
+ tg->funcs->get_dsc_status(tg, &optc_dsc_state);
+ // Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
+ // non-zero value is DSC enabled
+ if (optc_dsc_state != 0) {
+ tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
+ break;
+ }
+ }
+ }
+
+ // Step 2: To power down DSC but skip DSC of running OPTC
+ for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
+ struct dcn_dsc_state s = {0};
+
+ dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
+
+ if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
+ s.dsc_clock_en && s.dsc_fw_en)
+ continue;
+
+ hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
+ }
+ }
}
void dcn10_init_hw(struct dc *dc)
{
- int i, j;
+ int i;
struct abm *abm = dc->res_pool->abm;
struct dmcu *dmcu = dc->res_pool->dmcu;
struct dce_hwseq *hws = dc->hwseq;
@@ -1468,43 +1505,8 @@ void dcn10_init_hw(struct dc *dc)
dmub_enable_outbox_notification(dc);
/* we want to turn off all dp displays before doing detection */
- if (dc->config.power_down_display_on_boot) {
- uint8_t dpcd_power_state = '\0';
- enum dc_status status = DC_ERROR_UNEXPECTED;
-
- for (i = 0; i < dc->link_count; i++) {
- if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
- continue;
-
- /* DP 2.0 requires that LTTPR Caps be read first */
- dp_retrieve_lttpr_cap(dc->links[i]);
-
- /*
- * If any of the displays are lit up turn them off.
- * The reason is that some MST hubs cannot be turned off
- * completely until we tell them to do so.
- * If not turned off, then displays connected to MST hub
- * won't light up.
- */
- status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
- if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
- /* blank dp stream before power off receiver*/
- if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
- unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc);
-
- for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
- if (fe == dc->res_pool->stream_enc[j]->id) {
- dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
- dc->res_pool->stream_enc[j]);
- break;
- }
- }
- }
- dp_receiver_power_ctrl(dc->links[i], false);
- }
- }
- }
+ if (dc->config.power_down_display_on_boot)
+ dc_link_blank_all_dp_displays(dc);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
@@ -1970,10 +1972,9 @@ static bool wait_for_reset_trigger_to_occur(
return rc;
}
-uint64_t reduceSizeAndFraction(
- uint64_t *numerator,
- uint64_t *denominator,
- bool checkUint32Bounary)
+static uint64_t reduceSizeAndFraction(uint64_t *numerator,
+ uint64_t *denominator,
+ bool checkUint32Bounary)
{
int i;
bool ret = checkUint32Bounary == false;
@@ -2021,7 +2022,7 @@ uint64_t reduceSizeAndFraction(
return ret;
}
-bool is_low_refresh_rate(struct pipe_ctx *pipe)
+static bool is_low_refresh_rate(struct pipe_ctx *pipe)
{
uint32_t master_pipe_refresh_rate =
pipe->stream->timing.pix_clk_100hz * 100 /
@@ -2030,7 +2031,8 @@ bool is_low_refresh_rate(struct pipe_ctx *pipe)
return master_pipe_refresh_rate <= 30;
}
-uint8_t get_clock_divider(struct pipe_ctx *pipe, bool account_low_refresh_rate)
+static uint8_t get_clock_divider(struct pipe_ctx *pipe,
+ bool account_low_refresh_rate)
{
uint32_t clock_divider = 1;
uint32_t numpipes = 1;
@@ -2050,10 +2052,8 @@ uint8_t get_clock_divider(struct pipe_ctx *pipe, bool account_low_refresh_rate)
return clock_divider;
}
-int dcn10_align_pixel_clocks(
- struct dc *dc,
- int group_size,
- struct pipe_ctx *grouped_pipes[])
+static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
+ struct pipe_ctx *grouped_pipes[])
{
struct dc_context *dc_ctx = dc->ctx;
int i, master = -1, embedded = -1;
@@ -2342,7 +2342,7 @@ static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
}
-void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
+static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
struct vm_system_aperture_param apt = {0};
@@ -2624,7 +2624,7 @@ static void dcn10_update_dchubp_dpp(
/* new calculated dispclk, dppclk are stored in
* context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
* dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
- * dcn_validate_bandwidth compute new dispclk, dppclk.
+ * dcn10_validate_bandwidth compute new dispclk, dppclk.
* dispclk will put in use after optimize_bandwidth when
* ramp_up_dispclk_with_dpp is called.
* there are two places for dppclk be put in use. One location
@@ -2638,7 +2638,7 @@ static void dcn10_update_dchubp_dpp(
* for example, eDP + external dp, change resolution of DP from
* 1920x1080x144hz to 1280x960x60hz.
* before change: dispclk = 337889 dppclk = 337889
- * change mode, dcn_validate_bandwidth calculate
+ * change mode, dcn10_validate_bandwidth calculate
* dispclk = 143122 dppclk = 143122
* update_dchubp_dpp be executed before dispclk be updated,
* dispclk = 337889, but dppclk use new value dispclk /2 =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 2dc4b4e4ba02..f4b34c110eae 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -646,8 +646,9 @@ static bool dcn10_link_encoder_validate_hdmi_output(
crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
return false;
- if (!enc10->base.features.flags.bits.HDMI_6GB_EN &&
- adjusted_pix_clk_100hz >= 3000000)
+ if ((!enc10->base.features.flags.bits.HDMI_6GB_EN ||
+ enc10->base.ctx->dc->debug.hdmi20_disable) &&
+ adjusted_pix_clk_100hz >= 3000000)
return false;
if (enc10->base.ctx->dc->debug.hdmi20_disable &&
crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index d54d731415d7..2c409356f512 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -348,36 +348,6 @@ void opp1_program_stereo(
*/
}
-void opp1_program_oppbuf(
- struct output_pixel_processor *opp,
- struct oppbuf_params *oppbuf)
-{
- struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
-
- /* Program the oppbuf active width to be the frame width from mpc */
- REG_UPDATE(OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, oppbuf->active_width);
-
- /* Specifies the number of segments in multi-segment mode (DP-MSO operation)
- * description "In 1/2/4 segment mode, specifies the horizontal active width in pixels of the display panel.
- * In 4 segment split left/right mode, specifies the horizontal 1/2 active width in pixels of the display panel.
- * Used to determine segment boundaries in multi-segment mode. Used to determine the width of the vertical active space in 3D frame packed modes.
- * OPPBUF_ACTIVE_WIDTH must be integer divisible by the total number of segments."
- */
- REG_UPDATE(OPPBUF_CONTROL, OPPBUF_DISPLAY_SEGMENTATION, oppbuf->mso_segmentation);
-
- /* description "Specifies the number of overlap pixels (1-8 overlapping pixels supported), used in multi-segment mode (DP-MSO operation)" */
- REG_UPDATE(OPPBUF_CONTROL, OPPBUF_OVERLAP_PIXEL_NUM, oppbuf->mso_overlap_pixel_num);
-
- /* description "Specifies the number of times a pixel is replicated (0-15 pixel replications supported).
- * A value of 0 disables replication. The total number of times a pixel is output is OPPBUF_PIXEL_REPETITION + 1."
- */
- REG_UPDATE(OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, oppbuf->pixel_repetition);
-
- /* Controls the number of padded pixels at the end of a segment */
- if (REG(OPPBUF_CONTROL1))
- REG_UPDATE(OPPBUF_CONTROL1, OPPBUF_NUM_SEGMENT_PADDED_PIXELS, oppbuf->num_segment_padded_pixels);
-}
-
void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable)
{
struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 3d2a2848857a..b1671b00ce40 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -132,22 +132,6 @@ void optc1_setup_vertical_interrupt2(
}
/**
- * Vupdate keepout can be set to a window to block the update lock for that pipe from changing.
- * Start offset begins with vstartup and goes for x number of clocks,
- * end offset starts from end of vupdate to x number of clocks.
- */
-void optc1_set_vupdate_keepout(struct timing_generator *optc,
- struct vupdate_keepout_params *params)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
- REG_SET_3(OTG_VUPDATE_KEEPOUT, 0,
- MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, params->start_offset,
- MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, params->end_offset,
- OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, params->enable);
-}
-
-/**
* program_timing_generator used by mode timing set
* Program CRTC Timing Registers - OTG_H_*, OTG_V_*, Pixel repetition.
* Including SYNC. Call BIOS command table to program Timings.
@@ -876,7 +860,7 @@ void optc1_set_static_screen_control(
OTG_STATIC_SCREEN_FRAME_COUNT, num_frames);
}
-void optc1_setup_manual_trigger(struct timing_generator *optc)
+static void optc1_setup_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -894,7 +878,7 @@ void optc1_setup_manual_trigger(struct timing_generator *optc)
OTG_TRIGA_CLEAR, 1);
}
-void optc1_program_manual_trigger(struct timing_generator *optc)
+static void optc1_program_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index f37551e00023..858b72149897 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -686,9 +686,8 @@ static struct output_pixel_processor *dcn10_opp_create(
return &opp->base;
}
-struct dce_aux *dcn10_aux_engine_create(
- struct dc_context *ctx,
- uint32_t inst)
+static struct dce_aux *dcn10_aux_engine_create(struct dc_context *ctx,
+ uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
@@ -724,9 +723,8 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
};
-struct dce_i2c_hw *dcn10_i2c_hw_create(
- struct dc_context *ctx,
- uint32_t inst)
+static struct dce_i2c_hw *dcn10_i2c_hw_create(struct dc_context *ctx,
+ uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
@@ -805,7 +803,7 @@ static const struct encoder_feature_support link_enc_feature = {
.flags.bits.IS_TPS4_CAPABLE = true
};
-struct link_encoder *dcn10_link_encoder_create(
+static struct link_encoder *dcn10_link_encoder_create(
const struct encoder_init_data *enc_init_data)
{
struct dcn10_link_encoder *enc10 =
@@ -847,7 +845,7 @@ static struct panel_cntl *dcn10_panel_cntl_create(const struct panel_cntl_init_d
return &panel_cntl->base;
}
-struct clock_source *dcn10_clock_source_create(
+static struct clock_source *dcn10_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
@@ -945,7 +943,7 @@ static const struct resource_create_funcs res_create_maximus_funcs = {
.create_hwseq = dcn10_hwseq_create,
};
-void dcn10_clock_source_destroy(struct clock_source **clk_src)
+static void dcn10_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
*clk_src = NULL;
@@ -978,10 +976,8 @@ static void dcn10_resource_destruct(struct dcn10_resource_pool *pool)
pool->base.mpc = NULL;
}
- if (pool->base.hubbub != NULL) {
- kfree(pool->base.hubbub);
- pool->base.hubbub = NULL;
- }
+ kfree(pool->base.hubbub);
+ pool->base.hubbub = NULL;
for (i = 0; i < pool->base.pipe_count; i++) {
if (pool->base.opps[i] != NULL)
@@ -1011,14 +1007,10 @@ static void dcn10_resource_destruct(struct dcn10_resource_pool *pool)
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
- if (pool->base.hw_i2cs[i] != NULL) {
- kfree(pool->base.hw_i2cs[i]);
- pool->base.hw_i2cs[i] = NULL;
- }
- if (pool->base.sw_i2cs[i] != NULL) {
- kfree(pool->base.sw_i2cs[i]);
- pool->base.sw_i2cs[i] = NULL;
- }
+ kfree(pool->base.hw_i2cs[i]);
+ pool->base.hw_i2cs[i] = NULL;
+ kfree(pool->base.sw_i2cs[i]);
+ pool->base.sw_i2cs[i] = NULL;
}
for (i = 0; i < pool->base.audio_count; i++) {
@@ -1128,7 +1120,7 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-enum dc_status dcn10_add_stream_to_ctx(
+static enum dc_status dcn10_add_stream_to_ctx(
struct dc *dc,
struct dc_state *new_ctx,
struct dc_stream_state *dc_stream)
@@ -1320,7 +1312,7 @@ static const struct resource_funcs dcn10_res_pool_funcs = {
.destroy = dcn10_destroy_resource_pool,
.link_enc_create = dcn10_link_encoder_create,
.panel_cntl_create = dcn10_panel_cntl_create,
- .validate_bandwidth = dcn_validate_bandwidth,
+ .validate_bandwidth = dcn10_validate_bandwidth,
.acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
.validate_plane = dcn10_validate_plane,
.validate_global = dcn10_validate_global,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index b0c08ee6bc2c..bf4436d7aaab 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -902,6 +902,19 @@ void enc1_stream_encoder_stop_dp_info_packets(
}
+void enc1_stream_encoder_reset_fifo(
+ struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ /* set DIG_START to 0x1 to reset FIFO */
+ REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
+ udelay(100);
+
+ /* write 0 to take the FIFO out of reset */
+ REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
+}
+
void enc1_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc)
@@ -1587,6 +1600,8 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
enc1_stream_encoder_send_immediate_sdp_message,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
+ .reset_fifo =
+ enc1_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index 687d7e4bf7ca..a146a41f68e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -626,6 +626,9 @@ void enc1_stream_encoder_send_immediate_sdp_message(
void enc1_stream_encoder_stop_dp_info_packets(
struct stream_encoder *enc);
+void enc1_stream_encoder_reset_fifo(
+ struct stream_encoder *enc);
+
void enc1_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index a9e420c7d75a..970b65efeac1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -251,20 +251,6 @@ static void dpp2_cnv_setup (
}
-void dpp2_cnv_set_bias_scale(
- struct dpp *dpp_base,
- struct dc_bias_and_scale *bias_and_scale)
-{
- struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
-
- REG_UPDATE(FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, bias_and_scale->bias_red);
- REG_UPDATE(FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, bias_and_scale->bias_green);
- REG_UPDATE(FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, bias_and_scale->bias_blue);
- REG_UPDATE(FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, bias_and_scale->scale_red);
- REG_UPDATE(FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, bias_and_scale->scale_green);
- REG_UPDATE(FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, bias_and_scale->scale_blue);
-}
-
/*compute the maximum number of lines that we can fit in the line buffer*/
void dscl2_calc_lb_num_partitions(
const struct scaler_data *scl_data,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 79b640e202eb..ef5c4c0f4d6c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -162,6 +162,8 @@ static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_ds
REG_GET(DSCC_PPS_CONFIG2, PIC_WIDTH, &s->dsc_pic_width);
REG_GET(DSCC_PPS_CONFIG2, PIC_HEIGHT, &s->dsc_pic_height);
REG_GET(DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, &s->dsc_slice_bpg_offset);
+ REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &s->dsc_fw_en,
+ DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
index 880954ac0b02..994fb732a7cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
@@ -527,7 +527,7 @@ static const uint16_t filter_12tap_16p_183[108] = {
0, 84, 16328, 16032, 416, 1944, 1944, 416, 16032, 16328, 84, 0,
};
-const uint16_t *wbscl_get_filter_3tap_16p(struct fixed31_32 ratio)
+static const uint16_t *wbscl_get_filter_3tap_16p(struct fixed31_32 ratio)
{
if (ratio.value < dc_fixpt_one.value)
return filter_3tap_16p_upscale;
@@ -539,7 +539,7 @@ const uint16_t *wbscl_get_filter_3tap_16p(struct fixed31_32 ratio)
return filter_3tap_16p_183;
}
-const uint16_t *wbscl_get_filter_4tap_16p(struct fixed31_32 ratio)
+static const uint16_t *wbscl_get_filter_4tap_16p(struct fixed31_32 ratio)
{
if (ratio.value < dc_fixpt_one.value)
return filter_4tap_16p_upscale;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 5adf42a7cc27..dc1752e9f461 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -192,9 +192,8 @@ void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
}
-void hubp2_program_requestor(
- struct hubp *hubp,
- struct _vcs_dpi_display_rq_regs_st *rq_regs)
+static void hubp2_program_requestor(struct hubp *hubp,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -930,6 +929,16 @@ bool hubp2_is_flip_pending(struct hubp *hubp)
void hubp2_set_blank(struct hubp *hubp, bool blank)
{
+ hubp2_set_blank_regs(hubp, blank);
+
+ if (blank) {
+ hubp->mpcc_id = 0xf;
+ hubp->opp_id = OPP_ID_INVALID;
+ }
+}
+
+void hubp2_set_blank_regs(struct hubp *hubp, bool blank)
+{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
uint32_t blank_en = blank ? 1 : 0;
@@ -951,9 +960,6 @@ void hubp2_set_blank(struct hubp *hubp, bool blank)
HUBP_NO_OUTSTANDING_REQ, 1,
1, 200);
}
-
- hubp->mpcc_id = 0xf;
- hubp->opp_id = OPP_ID_INVALID;
}
}
@@ -1285,7 +1291,7 @@ void hubp2_read_state(struct hubp *hubp)
}
-void hubp2_validate_dml_output(struct hubp *hubp,
+static void hubp2_validate_dml_output(struct hubp *hubp,
struct dc_context *ctx,
struct _vcs_dpi_display_rq_regs_st *dml_rq_regs,
struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr,
@@ -1603,6 +1609,7 @@ static struct hubp_funcs dcn20_hubp_funcs = {
.hubp_setup_interdependent = hubp2_setup_interdependent,
.hubp_set_vm_system_aperture_settings = hubp2_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
+ .set_blank_regs = hubp2_set_blank_regs,
.dcc_control = hubp2_dcc_control,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
index eea2254b15e4..9204c3ef323b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
@@ -330,6 +330,7 @@ void hubp2_program_surface_config(
bool hubp2_is_flip_pending(struct hubp *hubp);
void hubp2_set_blank(struct hubp *hubp, bool blank);
+void hubp2_set_blank_regs(struct hubp *hubp, bool blank);
void hubp2_cursor_set_position(
struct hubp *hubp,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index e6af99ae3d9f..4991e93e5308 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -615,6 +615,11 @@ void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx->pipe_idx);
}
+void dcn20_disable_pixel_data(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank)
+{
+ dcn20_blank_pixel_data(dc, pipe_ctx, blank);
+}
+
static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
int opp_cnt)
{
@@ -1080,10 +1085,8 @@ static void dcn20_power_on_plane(
}
}
-void dcn20_enable_plane(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context)
+static void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
{
//if (dc->debug.sanity_checks) {
// dcn10_verify_allow_pstate_change_high(dc);
@@ -1842,6 +1845,11 @@ void dcn20_optimize_bandwidth(
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
+ if (dc->clk_mgr->dc_mode_softmax_enabled)
+ if (dc->clk_mgr->clks.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
+ context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
+
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
@@ -2406,7 +2414,7 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->map_stream_to_link(
pipe_ctx->stream_res.hpo_dp_stream_enc,
pipe_ctx->stream_res.hpo_dp_stream_enc->inst,
- link->hpo_dp_link_enc->inst);
+ pipe_ctx->link_res.hpo_dp_link_enc->inst);
}
if (!is_dp_128b_132b_signal(pipe_ctx) && link_enc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index 6bba191cd33e..33a36c02b2f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -53,6 +53,10 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx);
void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings);
void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_disable_pixel_data(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank);
void dcn20_blank_pixel_data(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index 5cfd4b0afea5..91e4885b743e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -27,6 +27,8 @@
#include "dcn10/dcn10_hw_sequencer.h"
#include "dcn20_hwseq.h"
+#include "dcn20_init.h"
+
static const struct hw_sequencer_funcs dcn20_funcs = {
.program_gamut_remap = dcn10_program_gamut_remap,
.init_hw = dcn10_init_hw,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 947eb0df3f12..15734db0cdea 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -400,10 +400,9 @@ static void mpc20_program_ogam_pwl(
}
-void apply_DEDCN20_305_wa(
- struct mpc *mpc,
- int mpcc_id, enum dc_lut_mode current_mode,
- enum dc_lut_mode next_mode)
+static void apply_DEDCN20_305_wa(struct mpc *mpc, int mpcc_id,
+ enum dc_lut_mode current_mode,
+ enum dc_lut_mode next_mode)
{
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
@@ -525,7 +524,7 @@ static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
mpcc->sm_cfg.enable = false;
}
-struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
+static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
{
struct mpcc *tmp_mpcc = tree->opp_list;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index c90b8516dcc1..0340fdd3f5fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -73,21 +73,6 @@ bool optc2_enable_crtc(struct timing_generator *optc)
}
/**
- * DRR double buffering control to select buffer point
- * for V_TOTAL, H_TOTAL, VTOTAL_MIN, VTOTAL_MAX, VTOTAL_MIN_SEL and VTOTAL_MAX_SEL registers
- * Options: anytime, start of frame, dp start of frame (range timing)
- */
-void optc2_set_timing_db_mode(struct timing_generator *optc, bool enable)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
- uint32_t blank_data_double_buffer_enable = enable ? 1 : 0;
-
- REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL,
- OTG_RANGE_TIMING_DBUF_UPDATE_MODE, blank_data_double_buffer_enable);
-}
-
-/**
*For the below, I'm not sure how your GSL parameters are stored in your env,
* so I will assume a gsl_params struct for now
*/
@@ -110,30 +95,6 @@ void optc2_set_gsl(struct timing_generator *optc,
}
-/* Use the gsl allow flip as the master update lock */
-void optc2_use_gsl_as_master_update_lock(struct timing_generator *optc,
- const struct gsl_params *params)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
- REG_UPDATE(OTG_GSL_CONTROL,
- OTG_MASTER_UPDATE_LOCK_GSL_EN, params->master_update_lock_gsl_en);
-}
-
-/* You can control the GSL timing by limiting GSL to a window (X,Y) */
-void optc2_set_gsl_window(struct timing_generator *optc,
- const struct gsl_params *params)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
- REG_SET_2(OTG_GSL_WINDOW_X, 0,
- OTG_GSL_WINDOW_START_X, params->gsl_window_start_x,
- OTG_GSL_WINDOW_END_X, params->gsl_window_end_x);
- REG_SET_2(OTG_GSL_WINDOW_Y, 0,
- OTG_GSL_WINDOW_START_Y, params->gsl_window_start_y,
- OTG_GSL_WINDOW_END_Y, params->gsl_window_end_y);
-}
-
void optc2_set_gsl_source_select(
struct timing_generator *optc,
int group_idx,
@@ -156,18 +117,6 @@ void optc2_set_gsl_source_select(
}
}
-/* DSC encoder frame start controls: x = h position, line_num = # of lines from vstartup */
-void optc2_set_dsc_encoder_frame_start(struct timing_generator *optc,
- int x_position,
- int line_num)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
- REG_SET_2(OTG_DSC_START_POSITION, 0,
- OTG_DSC_START_POSITION_X, x_position,
- OTG_DSC_START_POSITION_LINE_NUM, line_num);
-}
-
/* Set DSC-related configuration.
* dsc_mode: 0 disables DSC, other values enable DSC in specified format
* sc_bytes_per_pixel: Bytes per pixel in u3.28 format
@@ -190,6 +139,19 @@ void optc2_set_dsc_config(struct timing_generator *optc,
OPTC_DSC_SLICE_WIDTH, dsc_slice_width);
}
+/* Get DSC-related configuration.
+ * dsc_mode: 0 disables DSC, other values enable DSC in specified format
+ */
+void optc2_get_dsc_status(struct timing_generator *optc,
+ uint32_t *dsc_mode)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_GET(OPTC_DATA_FORMAT_CONTROL,
+ OPTC_DSC_MODE, dsc_mode);
+}
+
+
/*TEMP: Need to figure out inheritance model here.*/
bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
{
@@ -280,8 +242,8 @@ void optc2_get_optc_source(struct timing_generator *optc,
*num_of_src_opp = 1;
}
-void optc2_set_dwb_source(struct timing_generator *optc,
- uint32_t dwb_pipe_inst)
+static void optc2_set_dwb_source(struct timing_generator *optc,
+ uint32_t dwb_pipe_inst)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -293,7 +255,7 @@ void optc2_set_dwb_source(struct timing_generator *optc,
OPTC_DWB1_SOURCE_SELECT, optc->inst);
}
-void optc2_align_vblanks(
+static void optc2_align_vblanks(
struct timing_generator *optc_master,
struct timing_generator *optc_slave,
uint32_t master_pixel_clock_100Hz,
@@ -579,6 +541,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.get_crc = optc1_get_crc,
.configure_crc = optc2_configure_crc,
.set_dsc_config = optc2_set_dsc_config,
+ .get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = optc2_set_dwb_source,
.set_odm_bypass = optc2_set_odm_bypass,
.set_odm_combine = optc2_set_odm_combine,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
index be19a6885fbf..f7968b9ca16e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
@@ -98,6 +98,9 @@ void optc2_set_dsc_config(struct timing_generator *optc,
uint32_t dsc_bytes_per_pixel,
uint32_t dsc_slice_width);
+void optc2_get_dsc_status(struct timing_generator *optc,
+ uint32_t *dsc_mode);
+
void optc2_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 83f5d9aaffcb..2bc93df023ad 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -3093,8 +3093,7 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
struct dc_link *link = context->streams[0]->sink->link;
- if ((link->link_index == 0 && link->psr_settings.psr_feature_enabled)
- || context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
+ if (link->link_index == 0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
return DCN_ZSTATE_SUPPORT_ALLOW;
else
return DCN_ZSTATE_SUPPORT_DISALLOW;
@@ -3796,6 +3795,8 @@ static bool dcn20_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.hdmi_frl_pcon_support = true;
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) {
dc->debug = debug_defaults_drv;
} else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index aab25ca8343a..8a70f92795c2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -593,6 +593,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
enc1_stream_encoder_send_immediate_sdp_message,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
+ .reset_fifo =
+ enc1_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c
index f5bf04f7da25..9a3402148fde 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c
@@ -44,7 +44,8 @@
#define DC_LOGGER \
dccg->ctx->logger
-void dccg201_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
+static void dccg201_update_dpp_dto(struct dccg *dccg, int dpp_inst,
+ int req_dppclk)
{
/* vbios handles it */
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c
index 6b6f74d4afd1..35dd4bac242a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c
@@ -55,7 +55,7 @@ static void hubp201_program_surface_config(
hubp1_program_pixel_format(hubp, format);
}
-void hubp201_program_deadline(
+static void hubp201_program_deadline(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
@@ -63,9 +63,8 @@ void hubp201_program_deadline(
hubp1_program_deadline(hubp, dlg_attr, ttu_attr);
}
-void hubp201_program_requestor(
- struct hubp *hubp,
- struct _vcs_dpi_display_rq_regs_st *rq_regs)
+static void hubp201_program_requestor(struct hubp *hubp,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs)
{
struct dcn201_hubp *hubp201 = TO_DCN201_HUBP(hubp);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
index cfd09b3f705e..fe22530242d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
@@ -134,11 +134,12 @@ void dcn201_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
PHYSICAL_ADDRESS_LOC addr;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dce_hwseq *hws = dc->hwseq;
- struct dc_plane_address uma = plane_state->address;
+ struct dc_plane_address uma;
if (plane_state == NULL)
return;
+ uma = plane_state->address;
addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
plane_address_in_gpu_space_to_uma(hws, &uma);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c
index a65e8f7801db..7f9ec59ef443 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c
@@ -50,8 +50,8 @@
#define IND_REG(index) \
(enc10->link_regs->index)
-void dcn201_link_encoder_get_max_link_cap(struct link_encoder *enc,
- struct dc_link_settings *link_settings)
+static void dcn201_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
{
uint32_t value1, value2;
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
@@ -66,7 +66,7 @@ void dcn201_link_encoder_get_max_link_cap(struct link_encoder *enc,
}
}
-bool dcn201_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+static bool dcn201_link_encoder_is_in_alt_mode(struct link_encoder *enc)
{
uint32_t value;
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
index faec0297ec0a..0bb7d3dd53fa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
@@ -672,9 +672,8 @@ static struct output_pixel_processor *dcn201_opp_create(
return &opp->base;
}
-struct dce_aux *dcn201_aux_engine_create(
- struct dc_context *ctx,
- uint32_t inst)
+static struct dce_aux *dcn201_aux_engine_create(struct dc_context *ctx,
+ uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
@@ -706,9 +705,8 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)
};
-struct dce_i2c_hw *dcn201_i2c_hw_create(
- struct dc_context *ctx,
- uint32_t inst)
+static struct dce_i2c_hw *dcn201_i2c_hw_create(struct dc_context *ctx,
+ uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
@@ -789,7 +787,7 @@ static const struct encoder_feature_support link_enc_feature = {
.flags.bits.IS_TPS4_CAPABLE = true
};
-struct link_encoder *dcn201_link_encoder_create(
+static struct link_encoder *dcn201_link_encoder_create(
const struct encoder_init_data *enc_init_data)
{
struct dcn20_link_encoder *enc20 =
@@ -811,7 +809,7 @@ struct link_encoder *dcn201_link_encoder_create(
return &enc10->base;
}
-struct clock_source *dcn201_clock_source_create(
+static struct clock_source *dcn201_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
@@ -906,7 +904,7 @@ static const struct resource_create_funcs res_create_maximus_funcs = {
.create_hwseq = dcn201_hwseq_create,
};
-void dcn201_clock_source_destroy(struct clock_source **clk_src)
+static void dcn201_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
*clk_src = NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index 36044cb8ec83..c5e200d09038 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -680,7 +680,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage);
}
-void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)
+static void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
uint32_t prog_wm_value;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index 3de1bcf9b3d8..58e459c7e7d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -183,7 +183,7 @@ static void hubp21_setup(
}
-void hubp21_set_viewport(
+static void hubp21_set_viewport(
struct hubp *hubp,
const struct rect *viewport,
const struct rect *viewport_c)
@@ -225,8 +225,8 @@ void hubp21_set_viewport(
SEC_VIEWPORT_Y_START_C, viewport_c->y);
}
-void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,
- struct vm_system_aperture_param *apt)
+static void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,
+ struct vm_system_aperture_param *apt)
{
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
@@ -248,7 +248,7 @@ void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,
SYSTEM_ACCESS_MODE, 0x3);
}
-void hubp21_validate_dml_output(struct hubp *hubp,
+static void hubp21_validate_dml_output(struct hubp *hubp,
struct dc_context *ctx,
struct _vcs_dpi_display_rq_regs_st *dml_rq_regs,
struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr,
@@ -664,7 +664,8 @@ static void program_surface_flip_and_addr(struct hubp *hubp, struct surface_flip
flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS);
}
-void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_regs)
+static void dmcub_PLAT_54186_wa(struct hubp *hubp,
+ struct surface_flip_registers *flip_regs)
{
struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv;
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
@@ -697,7 +698,7 @@ void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_
PERF_TRACE(); // TODO: remove after performance is stable.
}
-bool hubp21_program_surface_flip_and_addr(
+static bool hubp21_program_surface_flip_and_addr(
struct hubp *hubp,
const struct dc_plane_address *address,
bool flip_immediate)
@@ -805,7 +806,7 @@ bool hubp21_program_surface_flip_and_addr(
return true;
}
-void hubp21_init(struct hubp *hubp)
+static void hubp21_init(struct hubp *hubp)
{
// DEDCN21-133: Inconsistent row starting line for flip between DPTE and Meta
// This is a chicken bit to enable the ECO fix.
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index 54c11ba550ae..b270f0b194dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -28,6 +28,8 @@
#include "dcn20/dcn20_hwseq.h"
#include "dcn21_hwseq.h"
+#include "dcn21_init.h"
+
static const struct hw_sequencer_funcs dcn21_funcs = {
.program_gamut_remap = dcn10_program_gamut_remap,
.init_hw = dcn10_init_hw,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
index aa46c35b05a2..0a1ba6e7081c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
@@ -203,7 +203,7 @@ static bool update_cfg_data(
return true;
}
-bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc)
+static bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
int value;
@@ -277,7 +277,7 @@ void dcn21_link_encoder_enable_dp_output(
}
-void dcn21_link_encoder_enable_dp_mst_output(
+static void dcn21_link_encoder_enable_dp_mst_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source)
@@ -288,9 +288,8 @@ void dcn21_link_encoder_enable_dp_mst_output(
dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source);
}
-void dcn21_link_encoder_disable_output(
- struct link_encoder *enc,
- enum signal_type signal)
+static void dcn21_link_encoder_disable_output(struct link_encoder *enc,
+ enum signal_type signal)
{
dcn10_link_encoder_disable_output(enc, signal);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 79313d1ab5d9..e5cc6bf45743 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -784,9 +784,8 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)
};
-struct dce_i2c_hw *dcn21_i2c_hw_create(
- struct dc_context *ctx,
- uint32_t inst)
+static struct dce_i2c_hw *dcn21_i2c_hw_create(struct dc_context *ctx,
+ uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
@@ -1093,7 +1092,7 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
}
}
-void dcn21_calculate_wm(
+static void dcn21_calculate_wm(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *out_pipe_cnt,
@@ -1390,7 +1389,7 @@ validate_out:
* with DC_FP_START()/DC_FP_END(). Use the same approach as for
* dcn20_validate_bandwidth in dcn20_resource.c.
*/
-bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
+static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool fast_validate)
{
bool voltage_supported;
@@ -1480,8 +1479,8 @@ static struct hubbub *dcn21_hubbub_create(struct dc_context *ctx)
return &hubbub->base;
}
-struct output_pixel_processor *dcn21_opp_create(
- struct dc_context *ctx, uint32_t inst)
+static struct output_pixel_processor *dcn21_opp_create(struct dc_context *ctx,
+ uint32_t inst)
{
struct dcn20_opp *opp =
kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
@@ -1496,9 +1495,8 @@ struct output_pixel_processor *dcn21_opp_create(
return &opp->base;
}
-struct timing_generator *dcn21_timing_generator_create(
- struct dc_context *ctx,
- uint32_t instance)
+static struct timing_generator *dcn21_timing_generator_create(struct dc_context *ctx,
+ uint32_t instance)
{
struct optc *tgn10 =
kzalloc(sizeof(struct optc), GFP_KERNEL);
@@ -1518,7 +1516,7 @@ struct timing_generator *dcn21_timing_generator_create(
return &tgn10->base;
}
-struct mpc *dcn21_mpc_create(struct dc_context *ctx)
+static struct mpc *dcn21_mpc_create(struct dc_context *ctx)
{
struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
GFP_KERNEL);
@@ -1545,8 +1543,8 @@ static void read_dce_straps(
}
-struct display_stream_compressor *dcn21_dsc_create(
- struct dc_context *ctx, uint32_t inst)
+static struct display_stream_compressor *dcn21_dsc_create(struct dc_context *ctx,
+ uint32_t inst)
{
struct dcn20_dsc *dsc =
kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
@@ -1683,9 +1681,8 @@ static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
-struct stream_encoder *dcn21_stream_encoder_create(
- enum engine_id eng_id,
- struct dc_context *ctx)
+static struct stream_encoder *dcn21_stream_encoder_create(enum engine_id eng_id,
+ struct dc_context *ctx)
{
struct dcn10_stream_encoder *enc1 =
kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
@@ -1917,7 +1914,7 @@ static int dcn21_populate_dml_pipes_from_context(
return pipe_cnt;
}
-enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state)
+static enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
enum dc_status result = DC_OK;
@@ -2028,6 +2025,8 @@ static bool dcn21_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.hdmi_frl_pcon_support = true;
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
index ebd9c35c914f..8daa12730bc1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
@@ -50,22 +50,6 @@
enc1->base.ctx
-void convert_dc_info_packet_to_128(
- const struct dc_info_packet *info_packet,
- struct dc_info_packet_128 *info_packet_128)
-{
- unsigned int i;
-
- info_packet_128->hb0 = info_packet->hb0;
- info_packet_128->hb1 = info_packet->hb1;
- info_packet_128->hb2 = info_packet->hb2;
- info_packet_128->hb3 = info_packet->hb3;
-
- for (i = 0; i < 32; i++) {
- info_packet_128->sb[i] = info_packet->sb[i];
- }
-
-}
static void enc3_update_hdmi_info_packet(
struct dcn10_stream_encoder *enc1,
uint32_t packet_index,
@@ -489,7 +473,7 @@ static void enc3_dp_set_odm_combine(
}
/* setup stream encoder in dvi mode */
-void enc3_stream_encoder_dvi_set_stream_attribute(
+static void enc3_stream_encoder_dvi_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
bool is_dual_link)
@@ -805,6 +789,8 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = {
enc3_stream_encoder_update_dp_info_packets,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
+ .reset_fifo =
+ enc1_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index c1d967ed6551..ab3918c0a15b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -41,8 +41,7 @@
dpp->tf_shift->field_name, dpp->tf_mask->field_name
-void dpp30_read_state(struct dpp *dpp_base,
- struct dcn_dpp_state *s)
+static void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s)
{
struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
@@ -373,7 +372,7 @@ void dpp3_set_cursor_attributes(
}
-bool dpp3_get_optimal_number_of_taps(
+static bool dpp3_get_optimal_number_of_taps(
struct dpp *dpp,
struct scaler_data *scl_data,
const struct scaling_taps *in_taps)
@@ -474,22 +473,7 @@ bool dpp3_get_optimal_number_of_taps(
return true;
}
-void dpp3_cnv_set_bias_scale(
- struct dpp *dpp_base,
- struct dc_bias_and_scale *bias_and_scale)
-{
- struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
-
- REG_UPDATE(FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, bias_and_scale->bias_red);
- REG_UPDATE(FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, bias_and_scale->bias_green);
- REG_UPDATE(FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, bias_and_scale->bias_blue);
- REG_UPDATE(FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, bias_and_scale->scale_red);
- REG_UPDATE(FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, bias_and_scale->scale_green);
- REG_UPDATE(FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, bias_and_scale->scale_blue);
-}
-
-void dpp3_deferred_update(
- struct dpp *dpp_base)
+static void dpp3_deferred_update(struct dpp *dpp_base)
{
int bypass_state;
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
@@ -751,8 +735,8 @@ static enum dc_lut_mode dpp3_get_blndgam_current(struct dpp *dpp_base)
return mode;
}
-bool dpp3_program_blnd_lut(
- struct dpp *dpp_base, const struct pwl_params *params)
+static bool dpp3_program_blnd_lut(struct dpp *dpp_base,
+ const struct pwl_params *params)
{
enum dc_lut_mode current_mode;
enum dc_lut_mode next_mode;
@@ -1164,9 +1148,8 @@ static void dpp3_program_shaper_lutb_settings(
}
-bool dpp3_program_shaper(
- struct dpp *dpp_base,
- const struct pwl_params *params)
+static bool dpp3_program_shaper(struct dpp *dpp_base,
+ const struct pwl_params *params)
{
enum dc_lut_mode current_mode;
enum dc_lut_mode next_mode;
@@ -1355,9 +1338,8 @@ static void dpp3_select_3dlut_ram_mask(
REG_SET(CM_3DLUT_INDEX, 0, CM_3DLUT_INDEX, 0);
}
-bool dpp3_program_3dlut(
- struct dpp *dpp_base,
- struct tetrahedral_params *params)
+static bool dpp3_program_3dlut(struct dpp *dpp_base,
+ struct tetrahedral_params *params)
{
enum dc_lut_mode mode;
bool is_17x17x17;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index eac08926b574..6a4dcafb9bba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -490,6 +490,7 @@ static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_setup_interdependent = hubp2_setup_interdependent,
.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
+ .set_blank_regs = hubp2_set_blank_regs,
.dcc_control = hubp3_dcc_control,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index df2717116604..1db1ca19411d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -344,6 +344,17 @@ void dcn30_enable_writeback(
dwb->funcs->enable(dwb, &wb_info->dwb_params);
}
+void dcn30_prepare_bandwidth(struct dc *dc,
+ struct dc_state *context)
+{
+ if (dc->clk_mgr->dc_mode_softmax_enabled)
+ if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
+ context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
+
+ dcn20_prepare_bandwidth(dc, context);
+}
+
void dcn30_disable_writeback(
struct dc *dc,
unsigned int dwb_pipe_inst)
@@ -437,7 +448,7 @@ void dcn30_init_hw(struct dc *dc)
struct dce_hwseq *hws = dc->hwseq;
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
- int i, j;
+ int i;
int edp_num;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
@@ -534,41 +545,8 @@ void dcn30_init_hw(struct dc *dc)
hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
/* we want to turn off all dp displays before doing detection */
- if (dc->config.power_down_display_on_boot) {
- uint8_t dpcd_power_state = '\0';
- enum dc_status status = DC_ERROR_UNEXPECTED;
-
- for (i = 0; i < dc->link_count; i++) {
- if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
- continue;
- /* DP 2.0 states that LTTPR regs must be read first */
- dp_retrieve_lttpr_cap(dc->links[i]);
-
- /* if any of the displays are lit up turn them off */
- status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
- if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
- /* blank dp stream before power off receiver*/
- if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
- unsigned int fe;
-
- fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
- dc->links[i]->link_enc);
- if (fe == ENGINE_ID_UNKNOWN)
- continue;
-
- for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
- if (fe == dc->res_pool->stream_enc[j]->id) {
- dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
- dc->res_pool->stream_enc[j]);
- break;
- }
- }
- }
- dp_receiver_power_ctrl(dc->links[i], false);
- }
- }
- }
+ if (dc->config.power_down_display_on_boot)
+ dc_link_blank_all_dp_displays(dc);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
index e9a0005288d3..73e7b690e82c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
@@ -27,7 +27,7 @@
#define __DC_HWSS_DCN30_H__
#include "hw_sequencer_private.h"
-
+#include "dcn20/dcn20_hwseq.h"
struct dc;
void dcn30_init_hw(struct dc *dc);
@@ -47,6 +47,9 @@ void dcn30_disable_writeback(
struct dc *dc,
unsigned int dwb_pipe_inst);
+void dcn30_prepare_bandwidth(struct dc *dc,
+ struct dc_state *context);
+
bool dcn30_mmhubbub_warmup(
struct dc *dc,
unsigned int num_dwb,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index 93f32a312fee..bb347319de83 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -29,6 +29,8 @@
#include "dcn21/dcn21_hwseq.h"
#include "dcn30_hwseq.h"
+#include "dcn30_init.h"
+
static const struct hw_sequencer_funcs dcn30_funcs = {
.program_gamut_remap = dcn10_program_gamut_remap,
.init_hw = dcn30_init_hw,
@@ -53,6 +55,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
+ .disable_pixel_data = dcn20_disable_pixel_data,
.pipe_control_lock = dcn20_pipe_control_lock,
.interdependent_update_lock = dcn10_lock_all_pipes,
.cursor_lock = dcn10_cursor_lock,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c
index 1c4b171c68ad..7a93eff183d9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c
@@ -100,7 +100,7 @@ static void mmhubbub3_warmup_mcif(struct mcif_wb *mcif_wb,
REG_UPDATE(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_EN, false);
}
-void mmhubbub3_config_mcif_buf(struct mcif_wb *mcif_wb,
+static void mmhubbub3_config_mcif_buf(struct mcif_wb *mcif_wb,
struct mcif_buf_params *params,
unsigned int dest_height)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
index 95149734378b..0ce0d6165f43 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
@@ -1362,7 +1362,7 @@ uint32_t mpcc3_acquire_rmu(struct mpc *mpc, int mpcc_id, int rmu_idx)
return -1;
}
-int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id)
+static int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
int rmu_idx;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index 5d9e6413d67a..f5e8916601d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -332,6 +332,7 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
.get_crc = optc1_get_crc,
.configure_crc = optc2_configure_crc,
.set_dsc_config = optc3_set_dsc_config,
+ .get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = NULL,
.set_odm_bypass = optc3_set_odm_bypass,
.set_odm_combine = optc3_set_odm_combine,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 98852b586295..8ca26383b568 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -816,7 +816,7 @@ static const struct dc_plane_cap plane_cap = {
.argb8888 = true,
.nv12 = true,
.fp16 = true,
- .p010 = false,
+ .p010 = true,
.ayuv = false,
},
@@ -875,7 +875,7 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
-void dcn30_dpp_destroy(struct dpp **dpp)
+static void dcn30_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
*dpp = NULL;
@@ -992,7 +992,7 @@ static struct mpc *dcn30_mpc_create(
return &mpc30->base;
}
-struct hubbub *dcn30_hubbub_create(struct dc_context *ctx)
+static struct hubbub *dcn30_hubbub_create(struct dc_context *ctx)
{
int i;
@@ -1143,9 +1143,8 @@ static struct afmt *dcn30_afmt_create(
return &afmt3->base;
}
-struct stream_encoder *dcn30_stream_encoder_create(
- enum engine_id eng_id,
- struct dc_context *ctx)
+static struct stream_encoder *dcn30_stream_encoder_create(enum engine_id eng_id,
+ struct dc_context *ctx)
{
struct dcn10_stream_encoder *enc1;
struct vpg *vpg;
@@ -1179,8 +1178,7 @@ struct stream_encoder *dcn30_stream_encoder_create(
return &enc1->base;
}
-struct dce_hwseq *dcn30_hwseq_create(
- struct dc_context *ctx)
+static struct dce_hwseq *dcn30_hwseq_create(struct dc_context *ctx)
{
struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
@@ -1880,7 +1878,6 @@ noinline bool dcn30_internal_validate_bw(
dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
- DC_FP_START();
if (!pipe_cnt) {
out = true;
goto validate_out;
@@ -2106,7 +2103,6 @@ validate_fail:
out = false;
validate_out:
- DC_FP_END();
return out;
}
@@ -2308,7 +2304,9 @@ bool dcn30_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
+ DC_FP_START();
out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+ DC_FP_END();
if (pipe_cnt == 0)
goto validate_out;
@@ -2639,6 +2637,8 @@ static bool dcn30_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.hdmi_frl_pcon_support = true;
+
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
index e85b695f2351..3d42a1a337ec 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
@@ -30,6 +30,8 @@
#include "dcn30/dcn30_hwseq.h"
#include "dcn301_hwseq.h"
+#include "dcn301_init.h"
+
static const struct hw_sequencer_funcs dcn301_funcs = {
.program_gamut_remap = dcn10_program_gamut_remap,
.init_hw = dcn10_init_hw,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c
index 736bda30abc3..ad0df1a72a90 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c
@@ -93,7 +93,7 @@ static unsigned int dcn301_get_16_bit_backlight_from_pwm(struct panel_cntl *pane
return (uint32_t)(current_backlight);
}
-uint32_t dcn301_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
+static uint32_t dcn301_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
{
struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl);
uint32_t value;
@@ -147,7 +147,7 @@ uint32_t dcn301_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
return current_backlight;
}
-void dcn301_panel_cntl_destroy(struct panel_cntl **panel_cntl)
+static void dcn301_panel_cntl_destroy(struct panel_cntl **panel_cntl)
{
struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(*panel_cntl);
@@ -155,7 +155,7 @@ void dcn301_panel_cntl_destroy(struct panel_cntl **panel_cntl)
*panel_cntl = NULL;
}
-bool dcn301_is_panel_backlight_on(struct panel_cntl *panel_cntl)
+static bool dcn301_is_panel_backlight_on(struct panel_cntl *panel_cntl)
{
struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl);
uint32_t value;
@@ -165,7 +165,7 @@ bool dcn301_is_panel_backlight_on(struct panel_cntl *panel_cntl)
return value;
}
-bool dcn301_is_panel_powered_on(struct panel_cntl *panel_cntl)
+static bool dcn301_is_panel_powered_on(struct panel_cntl *panel_cntl)
{
struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl);
uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
@@ -177,7 +177,7 @@ bool dcn301_is_panel_powered_on(struct panel_cntl *panel_cntl)
return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1);
}
-void dcn301_store_backlight_level(struct panel_cntl *panel_cntl)
+static void dcn301_store_backlight_level(struct panel_cntl *panel_cntl)
{
struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index e472b729d869..5d9637b07429 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -656,7 +656,7 @@ static const struct dc_plane_cap plane_cap = {
.argb8888 = true,
.nv12 = true,
.fp16 = true,
- .p010 = false,
+ .p010 = true,
.ayuv = false,
},
@@ -686,7 +686,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_clock_gate = true,
.disable_pplib_clock_request = true,
.disable_pplib_wm_range = true,
- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+ .pipe_split_policy = MPC_SPLIT_AVOID,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@@ -717,15 +717,13 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = false,
};
-void dcn301_dpp_destroy(struct dpp **dpp)
+static void dcn301_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
*dpp = NULL;
}
-struct dpp *dcn301_dpp_create(
- struct dc_context *ctx,
- uint32_t inst)
+static struct dpp *dcn301_dpp_create(struct dc_context *ctx, uint32_t inst)
{
struct dcn3_dpp *dpp =
kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL);
@@ -741,8 +739,8 @@ struct dpp *dcn301_dpp_create(
kfree(dpp);
return NULL;
}
-struct output_pixel_processor *dcn301_opp_create(
- struct dc_context *ctx, uint32_t inst)
+static struct output_pixel_processor *dcn301_opp_create(struct dc_context *ctx,
+ uint32_t inst)
{
struct dcn20_opp *opp =
kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
@@ -757,9 +755,7 @@ struct output_pixel_processor *dcn301_opp_create(
return &opp->base;
}
-struct dce_aux *dcn301_aux_engine_create(
- struct dc_context *ctx,
- uint32_t inst)
+static struct dce_aux *dcn301_aux_engine_create(struct dc_context *ctx, uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
@@ -793,9 +789,7 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)
};
-struct dce_i2c_hw *dcn301_i2c_hw_create(
- struct dc_context *ctx,
- uint32_t inst)
+static struct dce_i2c_hw *dcn301_i2c_hw_create(struct dc_context *ctx, uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
@@ -829,7 +823,7 @@ static struct mpc *dcn301_mpc_create(
return &mpc30->base;
}
-struct hubbub *dcn301_hubbub_create(struct dc_context *ctx)
+static struct hubbub *dcn301_hubbub_create(struct dc_context *ctx)
{
int i;
@@ -860,9 +854,8 @@ struct hubbub *dcn301_hubbub_create(struct dc_context *ctx)
return &hubbub3->base;
}
-struct timing_generator *dcn301_timing_generator_create(
- struct dc_context *ctx,
- uint32_t instance)
+static struct timing_generator *dcn301_timing_generator_create(
+ struct dc_context *ctx, uint32_t instance)
{
struct optc *tgn10 =
kzalloc(sizeof(struct optc), GFP_KERNEL);
@@ -894,7 +887,7 @@ static const struct encoder_feature_support link_enc_feature = {
.flags.bits.IS_TPS4_CAPABLE = true
};
-struct link_encoder *dcn301_link_encoder_create(
+static struct link_encoder *dcn301_link_encoder_create(
const struct encoder_init_data *enc_init_data)
{
struct dcn20_link_encoder *enc20 =
@@ -915,7 +908,7 @@ struct link_encoder *dcn301_link_encoder_create(
return &enc20->enc10.base;
}
-struct panel_cntl *dcn301_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+static struct panel_cntl *dcn301_panel_cntl_create(const struct panel_cntl_init_data *init_data)
{
struct dcn301_panel_cntl *panel_cntl =
kzalloc(sizeof(struct dcn301_panel_cntl), GFP_KERNEL);
@@ -997,9 +990,8 @@ static struct afmt *dcn301_afmt_create(
return &afmt3->base;
}
-struct stream_encoder *dcn301_stream_encoder_create(
- enum engine_id eng_id,
- struct dc_context *ctx)
+static struct stream_encoder *dcn301_stream_encoder_create(enum engine_id eng_id,
+ struct dc_context *ctx)
{
struct dcn10_stream_encoder *enc1;
struct vpg *vpg;
@@ -1033,8 +1025,7 @@ struct stream_encoder *dcn301_stream_encoder_create(
return &enc1->base;
}
-struct dce_hwseq *dcn301_hwseq_create(
- struct dc_context *ctx)
+static struct dce_hwseq *dcn301_hwseq_create(struct dc_context *ctx)
{
struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
@@ -1182,9 +1173,7 @@ static void dcn301_destruct(struct dcn301_resource_pool *pool)
dcn_dccg_destroy(&pool->base.dccg);
}
-struct hubp *dcn301_hubp_create(
- struct dc_context *ctx,
- uint32_t inst)
+static struct hubp *dcn301_hubp_create(struct dc_context *ctx, uint32_t inst)
{
struct dcn20_hubp *hubp2 =
kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
@@ -1201,7 +1190,7 @@ struct hubp *dcn301_hubp_create(
return NULL;
}
-bool dcn301_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
+static bool dcn301_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
{
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
@@ -1226,7 +1215,7 @@ bool dcn301_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
return true;
}
-bool dcn301_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
+static bool dcn301_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
{
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
@@ -1391,6 +1380,17 @@ static void set_wm_ranges(
pp_smu->nv_funcs.set_wm_ranges(&pp_smu->nv_funcs.pp_smu, &ranges);
}
+static void dcn301_calculate_wm_and_dlg(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+ DC_FP_START();
+ dcn301_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel);
+ DC_FP_END();
+}
+
static struct resource_funcs dcn301_res_pool_funcs = {
.destroy = dcn301_destroy_resource_pool,
.link_enc_create = dcn301_link_encoder_create,
@@ -1449,9 +1449,7 @@ static bool dcn301_resource_construct(
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
-#ifdef CONFIG_DRM_AMD_DC_DMUB
dc->caps.dmcub_support = true;
-#endif
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
@@ -1487,6 +1485,23 @@ static bool dcn301_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ /* read VBIOS LTTPR caps */
+ if (ctx->dc_bios->funcs->get_lttpr_caps) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_lttpr_enable = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+ dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+ }
+
+ if (ctx->dc_bios->funcs->get_lttpr_interop) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_interop_enabled = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled);
+ dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
+ }
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c
index d88b9011c502..eb375f30f5bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c
@@ -29,6 +29,8 @@
#include "dc.h"
+#include "dcn302_init.h"
+
void dcn302_hw_sequencer_construct(struct dc *dc)
{
dcn30_hw_sequencer_construct(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 16e7059393fa..2e9cbfa7663b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -276,7 +276,7 @@ static const struct dc_plane_cap plane_cap = {
.argb8888 = true,
.nv12 = true,
.fp16 = true,
- .p010 = false,
+ .p010 = true,
.ayuv = false,
},
.max_upscale_factor = {
@@ -1557,6 +1557,24 @@ static bool dcn302_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ /* read VBIOS LTTPR caps */
+ if (ctx->dc_bios->funcs->get_lttpr_caps) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_lttpr_enable = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+ dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+ }
+
+ if (ctx->dc_bios->funcs->get_lttpr_interop) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_interop_enabled = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios,
+ &is_vbios_interop_enabled);
+ dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
+ }
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h
index a79c54bbc899..294bd757bcb5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h
@@ -15,7 +15,11 @@
SR(DPPCLK_DTO_CTRL),\
DCCG_SRII(DTO_PARAM, DPPCLK, 0),\
DCCG_SRII(DTO_PARAM, DPPCLK, 1),\
- SR(REFCLK_CNTL)
+ SR(REFCLK_CNTL),\
+ SR(DISPCLK_FREQ_CHANGE_CNTL),\
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1)
+
#define DCCG_MASK_SH_LIST_DCN3_03(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
@@ -25,6 +29,18 @@
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_PHASE, mask_sh),\
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_MODULO, mask_sh),\
DCCG_SF(REFCLK_CNTL, REFCLK_CLOCK_EN, mask_sh),\
- DCCG_SF(REFCLK_CNTL, REFCLK_SRC_SEL, mask_sh)
+ DCCG_SF(REFCLK_CNTL, REFCLK_SRC_SEL, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_DELAY, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_SIZE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_FREQ_RAMP_DONE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_MAX_ERRDET_CYCLES, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_RESET, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_STATE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_OVR_EN, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_CHG_FWD_CORR_DISABLE, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 0, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 1, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 0, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 1, mask_sh)
#endif //__DCN303_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c
index aa5dbbade2bd..f499f8ab5e47 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c
@@ -9,6 +9,8 @@
#include "dcn30/dcn30_init.h"
#include "dc.h"
+#include "dcn303_init.h"
+
void dcn303_hw_sequencer_construct(struct dc *dc)
{
dcn30_hw_sequencer_construct(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 87cec14b7870..2de687f64cf6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -254,7 +254,7 @@ static const struct dc_plane_cap plane_cap = {
.argb8888 = true,
.nv12 = true,
.fp16 = true,
- .p010 = false,
+ .p010 = true,
.ayuv = false,
},
.max_upscale_factor = {
@@ -1500,6 +1500,23 @@ static bool dcn303_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ /* read VBIOS LTTPR caps */
+ if (ctx->dc_bios->funcs->get_lttpr_caps) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_lttpr_enable = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+ dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+ }
+
+ if (ctx->dc_bios->funcs->get_lttpr_interop) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_interop_enabled = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled);
+ dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
+ }
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
index 815481a3ef54..ea4f8e06b07c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
@@ -462,7 +462,7 @@ void dccg31_set_physymclk(
}
/* Controls the generation of pixel valid for OTG in (OTG -> HPO case) */
-void dccg31_set_dtbclk_dto(
+static void dccg31_set_dtbclk_dto(
struct dccg *dccg,
int dtbclk_inst,
int req_dtbclk_khz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index ee6f13bef377..8b9b1a5309ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -67,6 +67,68 @@
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#endif
+static uint8_t phy_id_from_transmitter(enum transmitter t)
+{
+ uint8_t phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ phy_id = 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ phy_id = 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ phy_id = 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ phy_id = 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ phy_id = 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ phy_id = 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ phy_id = 6;
+ break;
+ default:
+ phy_id = 0;
+ break;
+ }
+ return phy_id;
+}
+
+static bool has_query_dp_alt(struct link_encoder *enc)
+{
+ struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
+
+ /* Supports development firmware and firmware >= 4.0.11 */
+ return dc_dmub_srv &&
+ !(dc_dmub_srv->dmub->fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
+ dc_dmub_srv->dmub->fw_version <= DMUB_FW_VERSION(4, 0, 10));
+}
+
+static bool query_dp_alt_from_dmub(struct link_encoder *enc,
+ union dmub_rb_cmd *cmd)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS;
+ cmd->query_dp_alt.header.sub_type =
+ DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT;
+ cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data);
+ cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
+
+ if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, cmd))
+ return false;
+
+ return true;
+}
+
void dcn31_link_encoder_set_dio_phy_mux(
struct link_encoder *enc,
enum encoder_type_select sel,
@@ -141,7 +203,7 @@ void dcn31_link_encoder_set_dio_phy_mux(
}
}
-void enc31_hw_init(struct link_encoder *enc)
+static void enc31_hw_init(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
@@ -536,57 +598,90 @@ void dcn31_link_encoder_disable_output(
bool dcn31_link_encoder_is_in_alt_mode(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ union dmub_rb_cmd cmd;
uint32_t dp_alt_mode_disable;
- bool is_usb_c_alt_mode = false;
- if (enc->features.flags.bits.DP_IS_USB_C) {
- if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
- // [Note] no need to check hw_internal_rev once phy mux selection is ready
- REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
- } else {
+ /* Only applicable to USB-C PHY. */
+ if (!enc->features.flags.bits.DP_IS_USB_C)
+ return false;
+
+ /*
+ * Use the new interface from DMCUB if available.
+ * Avoids hanging the RDCPSPIPE if DMCUB wasn't already running.
+ */
+ if (has_query_dp_alt(enc)) {
+ if (!query_dp_alt_from_dmub(enc, &cmd))
+ return false;
+
+ return (cmd.query_dp_alt.data.is_dp_alt_disable == 0);
+ }
+
+ /* Legacy path, avoid if possible. */
+ if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE,
+ &dp_alt_mode_disable);
+ } else {
/*
* B0 phys use a new set of registers to check whether alt mode is disabled.
* if value == 1 alt mode is disabled, otherwise it is enabled.
*/
- if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A)
- || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B)
- || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
- REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
- } else {
- // [Note] need to change TRANSMITTER_UNIPHY_C/D to F/G once phy mux selection is ready
- REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
- }
+ if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) ||
+ (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) ||
+ (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE,
+ &dp_alt_mode_disable);
+ } else {
+ REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE,
+ &dp_alt_mode_disable);
}
-
- is_usb_c_alt_mode = (dp_alt_mode_disable == 0);
}
- return is_usb_c_alt_mode;
+ return (dp_alt_mode_disable == 0);
}
-void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc,
- struct dc_link_settings *link_settings)
+void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_link_settings *link_settings)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ union dmub_rb_cmd cmd;
uint32_t is_in_usb_c_dp4_mode = 0;
dcn10_link_encoder_get_max_link_cap(enc, link_settings);
- /* in usb c dp2 mode, max lane count is 2 */
- if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) {
- if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
- // [Note] no need to check hw_internal_rev once phy mux selection is ready
- REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+ /* Take the link cap directly if not USB */
+ if (!enc->features.flags.bits.DP_IS_USB_C)
+ return;
+
+ /*
+ * Use the new interface from DMCUB if available.
+ * Avoids hanging the RDCPSPIPE if DMCUB wasn't already running.
+ */
+ if (has_query_dp_alt(enc)) {
+ if (!query_dp_alt_from_dmub(enc, &cmd))
+ return;
+
+ if (cmd.query_dp_alt.data.is_usb &&
+ cmd.query_dp_alt.data.is_dp4 == 0)
+ link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
+
+ return;
+ }
+
+ /* Legacy path, avoid if possible. */
+ if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4,
+ &is_in_usb_c_dp4_mode);
+ } else {
+ if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) ||
+ (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) ||
+ (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4,
+ &is_in_usb_c_dp4_mode);
} else {
- if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A)
- || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B)
- || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
- REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
- } else {
- REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
- }
+ REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4,
+ &is_in_usb_c_dp4_mode);
}
- if (!is_in_usb_c_dp4_mode)
- link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
}
+
+ if (!is_in_usb_c_dp4_mode)
+ link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
index 6c08e21bb708..80dfaa4d4d81 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
@@ -499,7 +499,8 @@ static enum bp_result link_transmitter_control(
void dcn31_hpo_dp_link_enc_enable_dp_output(
struct hpo_dp_link_encoder *enc,
const struct dc_link_settings *link_settings,
- enum transmitter transmitter)
+ enum transmitter transmitter,
+ enum hpd_source_id hpd_source)
{
struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
struct bp_transmitter_control cntl = { 0 };
@@ -508,6 +509,9 @@ void dcn31_hpo_dp_link_enc_enable_dp_output(
/* Set the transmitter */
enc3->base.transmitter = transmitter;
+ /* Set the hpd source */
+ enc3->base.hpd_source = hpd_source;
+
/* Enable the PHY */
cntl.action = TRANSMITTER_CONTROL_ENABLE;
cntl.engine_id = ENGINE_ID_UNKNOWN;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h
index 0706ccaf6fec..e324e9b83136 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h
@@ -184,7 +184,8 @@ void hpo_dp_link_encoder31_construct(struct dcn31_hpo_dp_link_encoder *enc31,
void dcn31_hpo_dp_link_enc_enable_dp_output(
struct hpo_dp_link_encoder *enc,
const struct dc_link_settings *link_settings,
- enum transmitter transmitter);
+ enum transmitter transmitter,
+ enum hpd_source_id hpd_source);
void dcn31_hpo_dp_link_enc_disable_output(
struct hpo_dp_link_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
index 565f12dd179a..5065904c7833 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -358,8 +358,8 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
h_width = hw_crtc_timing.h_border_left + hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right;
v_height = hw_crtc_timing.v_border_top + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom;
- hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0x80 : 0;
- vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0x80 : 0;
+ hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0 : 0x80;
+ vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0 : 0x80;
v_freq = hw_crtc_timing.pix_clk_100hz * 100;
/* MSA Packet Mapping to 32-bit Link Symbols - DP2 spec, section 2.7.4.1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 4d4ac4ceb1e8..4206ce5bf9a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -112,7 +112,7 @@ void dcn31_init_hw(struct dc *dc)
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
- int i, j;
+ int i;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
@@ -192,50 +192,13 @@ void dcn31_init_hw(struct dc *dc)
link->link_status.link_active = true;
}
- /* Power gate DSCs */
- for (i = 0; i < res_pool->res_cap->num_dsc; i++)
- if (hws->funcs.dsc_pg_control != NULL)
- hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
-
/* Enables outbox notifications for usb4 dpia */
if (dc->res_pool->usb4_dpia_count)
dmub_enable_outbox_notification(dc);
/* we want to turn off all dp displays before doing detection */
- if (dc->config.power_down_display_on_boot) {
- uint8_t dpcd_power_state = '\0';
- enum dc_status status = DC_ERROR_UNEXPECTED;
-
- for (i = 0; i < dc->link_count; i++) {
- if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
- continue;
-
- /* if any of the displays are lit up turn them off */
- status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
- if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
- /* blank dp stream before power off receiver*/
- if (dc->links[i]->ep_type == DISPLAY_ENDPOINT_PHY &&
- dc->links[i]->link_enc->funcs->get_dig_frontend) {
- unsigned int fe;
-
- fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
- dc->links[i]->link_enc);
- if (fe == ENGINE_ID_UNKNOWN)
- continue;
-
- for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
- if (fe == dc->res_pool->stream_enc[j]->id) {
- dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
- dc->res_pool->stream_enc[j]);
- break;
- }
- }
- }
- dp_receiver_power_ctrl(dc->links[i], false);
- }
- }
- }
+ if (dc->config.power_down_display_on_boot)
+ dc_link_blank_all_dp_displays(dc);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index 17e2f2bb29ec..d7559e5a99ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -31,6 +31,8 @@
#include "dcn301/dcn301_hwseq.h"
#include "dcn31/dcn31_hwseq.h"
+#include "dcn31_init.h"
+
static const struct hw_sequencer_funcs dcn31_funcs = {
.program_gamut_remap = dcn10_program_gamut_remap,
.init_hw = dcn31_init_hw,
@@ -151,4 +153,9 @@ void dcn31_hw_sequencer_construct(struct dc *dc)
dc->hwss.init_hw = dcn20_fpga_init_hw;
dc->hwseq->funcs.init_pipes = NULL;
}
+ if (dc->debug.disable_z10) {
+ /*hw not support z10 or sw disable it*/
+ dc->hwss.z10_restore = NULL;
+ dc->hwss.z10_save_init = NULL;
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
index a4b1d98f0007..e8562fa11366 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
@@ -256,6 +256,7 @@ static struct timing_generator_funcs dcn31_tg_funcs = {
.get_crc = optc1_get_crc,
.configure_crc = optc2_configure_crc,
.set_dsc_config = optc3_set_dsc_config,
+ .get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = NULL,
.set_odm_bypass = optc3_set_odm_bypass,
.set_odm_combine = optc31_set_odm_combine,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
index 3b3721386571..83ece02380a8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
@@ -65,7 +65,7 @@ static uint32_t dcn31_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cnt
return cmd.panel_cntl.data.current_backlight;
}
-uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
+static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
{
struct dcn31_panel_cntl *dcn31_panel_cntl = TO_DCN31_PANEL_CNTL(panel_cntl);
struct dc_dmub_srv *dc_dmub_srv = panel_cntl->ctx->dmub_srv;
@@ -96,7 +96,7 @@ uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
return cmd.panel_cntl.data.current_backlight;
}
-void dcn31_panel_cntl_destroy(struct panel_cntl **panel_cntl)
+static void dcn31_panel_cntl_destroy(struct panel_cntl **panel_cntl)
{
struct dcn31_panel_cntl *dcn31_panel_cntl = TO_DCN31_PANEL_CNTL(*panel_cntl);
@@ -104,7 +104,7 @@ void dcn31_panel_cntl_destroy(struct panel_cntl **panel_cntl)
*panel_cntl = NULL;
}
-bool dcn31_is_panel_backlight_on(struct panel_cntl *panel_cntl)
+static bool dcn31_is_panel_backlight_on(struct panel_cntl *panel_cntl)
{
union dmub_rb_cmd cmd;
@@ -114,7 +114,7 @@ bool dcn31_is_panel_backlight_on(struct panel_cntl *panel_cntl)
return cmd.panel_cntl.data.is_backlight_on;
}
-bool dcn31_is_panel_powered_on(struct panel_cntl *panel_cntl)
+static bool dcn31_is_panel_powered_on(struct panel_cntl *panel_cntl)
{
union dmub_rb_cmd cmd;
@@ -124,7 +124,7 @@ bool dcn31_is_panel_powered_on(struct panel_cntl *panel_cntl)
return cmd.panel_cntl.data.is_powered_on;
}
-void dcn31_store_backlight_level(struct panel_cntl *panel_cntl)
+static void dcn31_store_backlight_level(struct panel_cntl *panel_cntl)
{
union dmub_rb_cmd cmd;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 27afbe6ec0fe..8d64187478e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -493,7 +493,8 @@ static const struct dcn31_apg_mask apg_mask = {
SE_DCN3_REG_LIST(id)\
}
-static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
+/* Some encoders won't be initialized here - but they're logical, not physical. */
+static const struct dcn10_stream_enc_registers stream_enc_regs[ENGINE_ID_COUNT] = {
stream_enc_regs(0),
stream_enc_regs(1),
stream_enc_regs(2),
@@ -976,7 +977,7 @@ static const struct dc_plane_cap plane_cap = {
.argb8888 = true,
.nv12 = true,
.fp16 = true,
- .p010 = false,
+ .p010 = true,
.ayuv = false,
},
@@ -1031,6 +1032,7 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.optimize_edp_link_rate = true,
.enable_sw_cntl_psr = true,
+ .apply_vendor_specific_lttpr_wa = true,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -1278,7 +1280,7 @@ static struct link_encoder *dcn31_link_enc_create_minimal(
return &enc20->enc10.base;
}
-struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data)
{
struct dcn31_panel_cntl *panel_cntl =
kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL);
@@ -1782,6 +1784,7 @@ static int dcn31_populate_dml_pipes_from_context(
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe;
+ bool upscaled = false;
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
@@ -1793,6 +1796,11 @@ static int dcn31_populate_dml_pipes_from_context(
pipe = &res_ctx->pipe_ctx[i];
timing = &pipe->stream->timing;
+ if (pipe->plane_state &&
+ (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
+ pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width))
+ upscaled = true;
+
/*
* Immediate flip can be set dynamically after enabling the plane.
* We need to require support for immediate flip or underflow can be
@@ -1837,6 +1845,11 @@ static int dcn31_populate_dml_pipes_from_context(
context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
pipes[0].pipe.src.unbounded_req_mode = true;
}
+ } else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count
+ && dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64;
+ } else if (context->stream_count >= 3 && upscaled) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
}
return pipe_cnt;
@@ -1971,7 +1984,7 @@ static void dcn31_calculate_wm_and_dlg_fp(
pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
- if (dc->config.forced_clocks) {
+ if (dc->config.forced_clocks || dc->debug.max_disp_clk) {
pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
}
@@ -2207,6 +2220,8 @@ static bool dcn31_resource_construct(
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.dp_hpo = true;
+ dc->caps.hdmi_frl_pcon_support = true;
+ dc->caps.edp_dsc_support = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.is_apu = true;
@@ -2245,6 +2260,9 @@ static bool dcn31_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ /* Use pipe context based otg sync logic */
+ dc->config.use_pipe_ctx_sync_logic = true;
+
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
index 511f9e1159c7..4229369c57f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -34,12 +34,12 @@ struct cp_psp_stream_config {
uint8_t dig_fe;
uint8_t link_enc_idx;
uint8_t stream_enc_idx;
- uint8_t phy_idx;
uint8_t dio_output_idx;
- uint8_t dio_output_type;
+ uint8_t phy_idx;
uint8_t assr_enabled;
uint8_t mst_enabled;
uint8_t dp2_enabled;
+ uint8_t usb4_enabled;
void *dm_stream_ctx;
bool dpms_off;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 0fe66b080a03..7f94e3f70d7f 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -59,7 +59,7 @@ void dm_helpers_free_gpu_mem(
void *pvMem);
enum dc_edid_status dm_helpers_parse_edid_caps(
- struct dc_context *ctx,
+ struct dc_link *link,
const struct dc_edid *edid,
struct dc_edid_caps *edid_caps);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 246071c72f6b..548cdef8a8ad 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -1576,8 +1576,6 @@ void dml20_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib,
e2e_pipe_param,
num_pipes);
- dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
- / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index 015e7f2c0b16..0fc9f3e3ffae 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -1577,8 +1577,6 @@ void dml20v2_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib,
e2e_pipe_param,
num_pipes);
- dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
- / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index 46c433c0bcb0..618f4b682ab1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -1688,8 +1688,6 @@ void dml21_rq_dlg_get_dlg_reg(
mode_lib,
e2e_pipe_param,
num_pipes);
- dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
- / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
@@ -1711,14 +1709,6 @@ void dml21_rq_dlg_get_dlg_reg(
dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
}
-void dml_rq_dlg_get_arb_params(struct display_mode_lib *mode_lib, display_arb_params_st *arb_param)
-{
- memset(arb_param, 0, sizeof(*arb_param));
- arb_param->max_req_outstanding = 256;
- arb_param->min_req_outstanding = 68;
- arb_param->sat_level_us = 60;
-}
-
static void calculate_ttu_cursor(
struct display_mode_lib *mode_lib,
double *refcyc_per_req_delivery_pre_cur,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index aef854270054..747167083dea 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -1858,8 +1858,6 @@ void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib,
e2e_pipe_param,
num_pipes);
- dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
- / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
index 94c32832a0e7..0a7a33864973 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
@@ -327,7 +327,7 @@ void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info)
dcn3_01_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
}
-void dcn301_calculate_wm_and_dlg(struct dc *dc,
+void dcn301_calculate_wm_and_dlg_fp(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h
index fc7065d17842..774b0fdfc80b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h
@@ -34,7 +34,7 @@ void dcn301_fpu_set_wm_ranges(int i,
void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info);
-void dcn301_calculate_wm_and_dlg(struct dc *dc,
+void dcn301_calculate_wm_and_dlg_fp(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 7e937bdcea00..6feb23432f8d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -422,62 +422,8 @@ static void CalculateUrgentBurstFactor(
static void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
- int MaxInterDCNTileRepeaters,
int MaxPrefetchMode,
- double FinalDRAMClockChangeLatency,
- double SREnterPlusExitTime,
- int ReturnBusWidth,
- int RoundTripPingLatencyCycles,
- int ReorderingBytes,
- int PixelChunkSizeInKByte,
- int MetaChunkSize,
- bool GPUVMEnable,
- int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- int NumberOfActivePlanes,
- double HostVMMinPageSize,
- int HostVMMaxNonCachedPageTableLevels,
- bool DynamicMetadataVMEnabled,
- enum immediate_flip_requirement ImmediateFlipRequirement,
- bool ProgressiveToInterlaceUnitInOPP,
- double MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation,
- double PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency,
- int VTotal[],
- int VActive[],
- int DynamicMetadataTransmittedBytes[],
- int DynamicMetadataLinesBeforeActiveRequired[],
- bool Interlace[],
- double RequiredDPPCLK[][2][DC__NUM_DPP__MAX],
- double RequiredDISPCLK[][2],
- double UrgLatency[],
- unsigned int NoOfDPP[][2][DC__NUM_DPP__MAX],
- double ProjectedDCFCLKDeepSleep[][2],
- double MaximumVStartup[][2][DC__NUM_DPP__MAX],
- double TotalVActivePixelBandwidth[][2],
- double TotalVActiveCursorBandwidth[][2],
- double TotalMetaRowBandwidth[][2],
- double TotalDPTERowBandwidth[][2],
- unsigned int TotalNumberOfActiveDPP[][2],
- unsigned int TotalNumberOfDCCActiveDPP[][2],
- int dpte_group_bytes[],
- double PrefetchLinesY[][2][DC__NUM_DPP__MAX],
- double PrefetchLinesC[][2][DC__NUM_DPP__MAX],
- int swath_width_luma_ub_all_states[][2][DC__NUM_DPP__MAX],
- int swath_width_chroma_ub_all_states[][2][DC__NUM_DPP__MAX],
- int BytePerPixelY[],
- int BytePerPixelC[],
- int HTotal[],
- double PixelClock[],
- double PDEAndMetaPTEBytesPerFrame[][2][DC__NUM_DPP__MAX],
- double DPTEBytesPerRow[][2][DC__NUM_DPP__MAX],
- double MetaRowBytes[][2][DC__NUM_DPP__MAX],
- bool DynamicMetadataEnable[],
- double VActivePixelBandwidth[][2][DC__NUM_DPP__MAX],
- double VActiveCursorBandwidth[][2][DC__NUM_DPP__MAX],
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- double DCFCLKPerState[],
- double DCFCLKState[][2]);
+ int ReorderingBytes);
static void CalculatePixelDeliveryTimes(
unsigned int NumberOfActivePlanes,
@@ -3949,6 +3895,102 @@ static double TruncToValidBPP(
return BPP_INVALID;
}
+static noinline void CalculatePrefetchSchedulePerPlane(
+ struct display_mode_lib *mode_lib,
+ double HostVMInefficiencyFactor,
+ int i,
+ unsigned j,
+ unsigned k)
+{
+ struct vba_vars_st *v = &mode_lib->vba;
+ Pipe myPipe;
+
+ myPipe.DPPCLK = v->RequiredDPPCLK[i][j][k];
+ myPipe.DISPCLK = v->RequiredDISPCLK[i][j];
+ myPipe.PixelClock = v->PixelClock[k];
+ myPipe.DCFCLKDeepSleep = v->ProjectedDCFCLKDeepSleep[i][j];
+ myPipe.DPPPerPlane = v->NoOfDPP[i][j][k];
+ myPipe.ScalerEnabled = v->ScalerEnabled[k];
+ myPipe.SourceScan = v->SourceScan[k];
+ myPipe.BlockWidth256BytesY = v->Read256BlockWidthY[k];
+ myPipe.BlockHeight256BytesY = v->Read256BlockHeightY[k];
+ myPipe.BlockWidth256BytesC = v->Read256BlockWidthC[k];
+ myPipe.BlockHeight256BytesC = v->Read256BlockHeightC[k];
+ myPipe.InterlaceEnable = v->Interlace[k];
+ myPipe.NumberOfCursors = v->NumberOfCursors[k];
+ myPipe.VBlank = v->VTotal[k] - v->VActive[k];
+ myPipe.HTotal = v->HTotal[k];
+ myPipe.DCCEnable = v->DCCEnable[k];
+ myPipe.ODMCombineIsEnabled = v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1
+ || v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1;
+ myPipe.SourcePixelFormat = v->SourcePixelFormat[k];
+ myPipe.BytePerPixelY = v->BytePerPixelY[k];
+ myPipe.BytePerPixelC = v->BytePerPixelC[k];
+ myPipe.ProgressiveToInterlaceUnitInOPP = v->ProgressiveToInterlaceUnitInOPP;
+ v->NoTimeForPrefetch[i][j][k] = CalculatePrefetchSchedule(
+ mode_lib,
+ HostVMInefficiencyFactor,
+ &myPipe,
+ v->DSCDelayPerState[i][k],
+ v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater,
+ v->DPPCLKDelaySCL,
+ v->DPPCLKDelaySCLLBOnly,
+ v->DPPCLKDelayCNVCCursor,
+ v->DISPCLKDelaySubtotal,
+ v->SwathWidthYThisState[k] / v->HRatio[k],
+ v->OutputFormat[k],
+ v->MaxInterDCNTileRepeaters,
+ dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]),
+ v->MaximumVStartup[i][j][k],
+ v->GPUVMMaxPageTableLevels,
+ v->GPUVMEnable,
+ v->HostVMEnable,
+ v->HostVMMaxNonCachedPageTableLevels,
+ v->HostVMMinPageSize,
+ v->DynamicMetadataEnable[k],
+ v->DynamicMetadataVMEnabled,
+ v->DynamicMetadataLinesBeforeActiveRequired[k],
+ v->DynamicMetadataTransmittedBytes[k],
+ v->UrgLatency[i],
+ v->ExtraLatency,
+ v->TimeCalc,
+ v->PDEAndMetaPTEBytesPerFrame[i][j][k],
+ v->MetaRowBytes[i][j][k],
+ v->DPTEBytesPerRow[i][j][k],
+ v->PrefetchLinesY[i][j][k],
+ v->SwathWidthYThisState[k],
+ v->PrefillY[k],
+ v->MaxNumSwY[k],
+ v->PrefetchLinesC[i][j][k],
+ v->SwathWidthCThisState[k],
+ v->PrefillC[k],
+ v->MaxNumSwC[k],
+ v->swath_width_luma_ub_this_state[k],
+ v->swath_width_chroma_ub_this_state[k],
+ v->SwathHeightYThisState[k],
+ v->SwathHeightCThisState[k],
+ v->TWait,
+ &v->DSTXAfterScaler[k],
+ &v->DSTYAfterScaler[k],
+ &v->LineTimesForPrefetch[k],
+ &v->PrefetchBW[k],
+ &v->LinesForMetaPTE[k],
+ &v->LinesForMetaAndDPTERow[k],
+ &v->VRatioPreY[i][j][k],
+ &v->VRatioPreC[i][j][k],
+ &v->RequiredPrefetchPixelDataBWLuma[i][j][k],
+ &v->RequiredPrefetchPixelDataBWChroma[i][j][k],
+ &v->NoTimeForDynamicMetadata[i][j][k],
+ &v->Tno_bw[k],
+ &v->prefetch_vmrow_bw[k],
+ &v->dummy7[k],
+ &v->dummy8[k],
+ &v->dummy13[k],
+ &v->VUpdateOffsetPix[k],
+ &v->VUpdateWidthPix[k],
+ &v->VReadyOffsetPix[k]);
+}
+
void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *v = &mode_lib->vba;
@@ -5079,66 +5121,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- if (v->UseMinimumRequiredDCFCLK == true) {
- UseMinimumDCFCLK(
- mode_lib,
- v->MaxInterDCNTileRepeaters,
- MaxPrefetchMode,
- v->DRAMClockChangeLatency,
- v->SREnterPlusExitTime,
- v->ReturnBusWidth,
- v->RoundTripPingLatencyCycles,
- ReorderingBytes,
- v->PixelChunkSizeInKByte,
- v->MetaChunkSize,
- v->GPUVMEnable,
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->NumberOfActivePlanes,
- v->HostVMMinPageSize,
- v->HostVMMaxNonCachedPageTableLevels,
- v->DynamicMetadataVMEnabled,
- v->ImmediateFlipRequirement[0],
- v->ProgressiveToInterlaceUnitInOPP,
- v->MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation,
- v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency,
- v->VTotal,
- v->VActive,
- v->DynamicMetadataTransmittedBytes,
- v->DynamicMetadataLinesBeforeActiveRequired,
- v->Interlace,
- v->RequiredDPPCLK,
- v->RequiredDISPCLK,
- v->UrgLatency,
- v->NoOfDPP,
- v->ProjectedDCFCLKDeepSleep,
- v->MaximumVStartup,
- v->TotalVActivePixelBandwidth,
- v->TotalVActiveCursorBandwidth,
- v->TotalMetaRowBandwidth,
- v->TotalDPTERowBandwidth,
- v->TotalNumberOfActiveDPP,
- v->TotalNumberOfDCCActiveDPP,
- v->dpte_group_bytes,
- v->PrefetchLinesY,
- v->PrefetchLinesC,
- v->swath_width_luma_ub_all_states,
- v->swath_width_chroma_ub_all_states,
- v->BytePerPixelY,
- v->BytePerPixelC,
- v->HTotal,
- v->PixelClock,
- v->PDEAndMetaPTEBytesPerFrame,
- v->DPTEBytesPerRow,
- v->MetaRowBytes,
- v->DynamicMetadataEnable,
- v->VActivePixelBandwidth,
- v->VActiveCursorBandwidth,
- v->ReadBandwidthLuma,
- v->ReadBandwidthChroma,
- v->DCFCLKPerState,
- v->DCFCLKState);
- }
+ if (v->UseMinimumRequiredDCFCLK == true)
+ UseMinimumDCFCLK(mode_lib, MaxPrefetchMode, ReorderingBytes);
for (i = 0; i < v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
@@ -5276,92 +5260,9 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->SREnterPlusExitTime);
for (k = 0; k < v->NumberOfActivePlanes; k++) {
- Pipe myPipe;
-
- myPipe.DPPCLK = v->RequiredDPPCLK[i][j][k];
- myPipe.DISPCLK = v->RequiredDISPCLK[i][j];
- myPipe.PixelClock = v->PixelClock[k];
- myPipe.DCFCLKDeepSleep = v->ProjectedDCFCLKDeepSleep[i][j];
- myPipe.DPPPerPlane = v->NoOfDPP[i][j][k];
- myPipe.ScalerEnabled = v->ScalerEnabled[k];
- myPipe.SourceScan = v->SourceScan[k];
- myPipe.BlockWidth256BytesY = v->Read256BlockWidthY[k];
- myPipe.BlockHeight256BytesY = v->Read256BlockHeightY[k];
- myPipe.BlockWidth256BytesC = v->Read256BlockWidthC[k];
- myPipe.BlockHeight256BytesC = v->Read256BlockHeightC[k];
- myPipe.InterlaceEnable = v->Interlace[k];
- myPipe.NumberOfCursors = v->NumberOfCursors[k];
- myPipe.VBlank = v->VTotal[k] - v->VActive[k];
- myPipe.HTotal = v->HTotal[k];
- myPipe.DCCEnable = v->DCCEnable[k];
- myPipe.ODMCombineIsEnabled = v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1
- || v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1;
- myPipe.SourcePixelFormat = v->SourcePixelFormat[k];
- myPipe.BytePerPixelY = v->BytePerPixelY[k];
- myPipe.BytePerPixelC = v->BytePerPixelC[k];
- myPipe.ProgressiveToInterlaceUnitInOPP = v->ProgressiveToInterlaceUnitInOPP;
- v->NoTimeForPrefetch[i][j][k] = CalculatePrefetchSchedule(
- mode_lib,
- HostVMInefficiencyFactor,
- &myPipe,
- v->DSCDelayPerState[i][k],
- v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater,
- v->DPPCLKDelaySCL,
- v->DPPCLKDelaySCLLBOnly,
- v->DPPCLKDelayCNVCCursor,
- v->DISPCLKDelaySubtotal,
- v->SwathWidthYThisState[k] / v->HRatio[k],
- v->OutputFormat[k],
- v->MaxInterDCNTileRepeaters,
- dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]),
- v->MaximumVStartup[i][j][k],
- v->GPUVMMaxPageTableLevels,
- v->GPUVMEnable,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->HostVMMinPageSize,
- v->DynamicMetadataEnable[k],
- v->DynamicMetadataVMEnabled,
- v->DynamicMetadataLinesBeforeActiveRequired[k],
- v->DynamicMetadataTransmittedBytes[k],
- v->UrgLatency[i],
- v->ExtraLatency,
- v->TimeCalc,
- v->PDEAndMetaPTEBytesPerFrame[i][j][k],
- v->MetaRowBytes[i][j][k],
- v->DPTEBytesPerRow[i][j][k],
- v->PrefetchLinesY[i][j][k],
- v->SwathWidthYThisState[k],
- v->PrefillY[k],
- v->MaxNumSwY[k],
- v->PrefetchLinesC[i][j][k],
- v->SwathWidthCThisState[k],
- v->PrefillC[k],
- v->MaxNumSwC[k],
- v->swath_width_luma_ub_this_state[k],
- v->swath_width_chroma_ub_this_state[k],
- v->SwathHeightYThisState[k],
- v->SwathHeightCThisState[k],
- v->TWait,
- &v->DSTXAfterScaler[k],
- &v->DSTYAfterScaler[k],
- &v->LineTimesForPrefetch[k],
- &v->PrefetchBW[k],
- &v->LinesForMetaPTE[k],
- &v->LinesForMetaAndDPTERow[k],
- &v->VRatioPreY[i][j][k],
- &v->VRatioPreC[i][j][k],
- &v->RequiredPrefetchPixelDataBWLuma[i][j][k],
- &v->RequiredPrefetchPixelDataBWChroma[i][j][k],
- &v->NoTimeForDynamicMetadata[i][j][k],
- &v->Tno_bw[k],
- &v->prefetch_vmrow_bw[k],
- &v->dummy7[k],
- &v->dummy8[k],
- &v->dummy13[k],
- &v->VUpdateOffsetPix[k],
- &v->VUpdateWidthPix[k],
- &v->VReadyOffsetPix[k]);
+ CalculatePrefetchSchedulePerPlane(mode_lib,
+ HostVMInefficiencyFactor,
+ i, j, k);
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
@@ -7249,69 +7150,15 @@ static double CalculateUrgentLatency(
static void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
- int MaxInterDCNTileRepeaters,
int MaxPrefetchMode,
- double FinalDRAMClockChangeLatency,
- double SREnterPlusExitTime,
- int ReturnBusWidth,
- int RoundTripPingLatencyCycles,
- int ReorderingBytes,
- int PixelChunkSizeInKByte,
- int MetaChunkSize,
- bool GPUVMEnable,
- int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- int NumberOfActivePlanes,
- double HostVMMinPageSize,
- int HostVMMaxNonCachedPageTableLevels,
- bool DynamicMetadataVMEnabled,
- enum immediate_flip_requirement ImmediateFlipRequirement,
- bool ProgressiveToInterlaceUnitInOPP,
- double MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation,
- double PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency,
- int VTotal[],
- int VActive[],
- int DynamicMetadataTransmittedBytes[],
- int DynamicMetadataLinesBeforeActiveRequired[],
- bool Interlace[],
- double RequiredDPPCLK[][2][DC__NUM_DPP__MAX],
- double RequiredDISPCLK[][2],
- double UrgLatency[],
- unsigned int NoOfDPP[][2][DC__NUM_DPP__MAX],
- double ProjectedDCFCLKDeepSleep[][2],
- double MaximumVStartup[][2][DC__NUM_DPP__MAX],
- double TotalVActivePixelBandwidth[][2],
- double TotalVActiveCursorBandwidth[][2],
- double TotalMetaRowBandwidth[][2],
- double TotalDPTERowBandwidth[][2],
- unsigned int TotalNumberOfActiveDPP[][2],
- unsigned int TotalNumberOfDCCActiveDPP[][2],
- int dpte_group_bytes[],
- double PrefetchLinesY[][2][DC__NUM_DPP__MAX],
- double PrefetchLinesC[][2][DC__NUM_DPP__MAX],
- int swath_width_luma_ub_all_states[][2][DC__NUM_DPP__MAX],
- int swath_width_chroma_ub_all_states[][2][DC__NUM_DPP__MAX],
- int BytePerPixelY[],
- int BytePerPixelC[],
- int HTotal[],
- double PixelClock[],
- double PDEAndMetaPTEBytesPerFrame[][2][DC__NUM_DPP__MAX],
- double DPTEBytesPerRow[][2][DC__NUM_DPP__MAX],
- double MetaRowBytes[][2][DC__NUM_DPP__MAX],
- bool DynamicMetadataEnable[],
- double VActivePixelBandwidth[][2][DC__NUM_DPP__MAX],
- double VActiveCursorBandwidth[][2][DC__NUM_DPP__MAX],
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- double DCFCLKPerState[],
- double DCFCLKState[][2])
+ int ReorderingBytes)
{
struct vba_vars_st *v = &mode_lib->vba;
int dummy1, i, j, k;
double NormalEfficiency, dummy2, dummy3;
double TotalMaxPrefetchFlipDPTERowBandwidth[DC__VOLTAGE_STATES][2];
- NormalEfficiency = PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0;
+ NormalEfficiency = v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0;
for (i = 0; i < v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
double PixelDCFCLKCyclesRequiredInPrefetch[DC__NUM_DPP__MAX];
@@ -7329,61 +7176,61 @@ static void UseMinimumDCFCLK(
double MinimumTvmPlus2Tr0;
TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = TotalMaxPrefetchFlipDPTERowBandwidth[i][j]
- + NoOfDPP[i][j][k] * DPTEBytesPerRow[i][j][k] / (15.75 * HTotal[k] / PixelClock[k]);
+ + v->NoOfDPP[i][j][k] * v->DPTEBytesPerRow[i][j][k] / (15.75 * v->HTotal[k] / v->PixelClock[k]);
}
- for (k = 0; k <= NumberOfActivePlanes - 1; ++k) {
- NoOfDPPState[k] = NoOfDPP[i][j][k];
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; ++k) {
+ NoOfDPPState[k] = v->NoOfDPP[i][j][k];
}
- MinimumTWait = CalculateTWait(MaxPrefetchMode, FinalDRAMClockChangeLatency, UrgLatency[i], SREnterPlusExitTime);
- NonDPTEBandwidth = TotalVActivePixelBandwidth[i][j] + TotalVActiveCursorBandwidth[i][j] + TotalMetaRowBandwidth[i][j];
- DPTEBandwidth = (HostVMEnable == true || ImmediateFlipRequirement == dm_immediate_flip_required) ?
- TotalMaxPrefetchFlipDPTERowBandwidth[i][j] : TotalDPTERowBandwidth[i][j];
+ MinimumTWait = CalculateTWait(MaxPrefetchMode, v->FinalDRAMClockChangeLatency, v->UrgLatency[i], v->SREnterPlusExitTime);
+ NonDPTEBandwidth = v->TotalVActivePixelBandwidth[i][j] + v->TotalVActiveCursorBandwidth[i][j] + v->TotalMetaRowBandwidth[i][j];
+ DPTEBandwidth = (v->HostVMEnable == true || v->ImmediateFlipRequirement[0] == dm_immediate_flip_required) ?
+ TotalMaxPrefetchFlipDPTERowBandwidth[i][j] : v->TotalDPTERowBandwidth[i][j];
DCFCLKRequiredForAverageBandwidth = dml_max3(
- ProjectedDCFCLKDeepSleep[i][j],
- (NonDPTEBandwidth + TotalDPTERowBandwidth[i][j]) / ReturnBusWidth
- / (MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation / 100),
- (NonDPTEBandwidth + DPTEBandwidth / NormalEfficiency) / NormalEfficiency / ReturnBusWidth);
+ v->ProjectedDCFCLKDeepSleep[i][j],
+ (NonDPTEBandwidth + v->TotalDPTERowBandwidth[i][j]) / v->ReturnBusWidth
+ / (v->MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation / 100),
+ (NonDPTEBandwidth + DPTEBandwidth / NormalEfficiency) / NormalEfficiency / v->ReturnBusWidth);
ExtraLatencyBytes = CalculateExtraLatencyBytes(
ReorderingBytes,
- TotalNumberOfActiveDPP[i][j],
- PixelChunkSizeInKByte,
- TotalNumberOfDCCActiveDPP[i][j],
- MetaChunkSize,
- GPUVMEnable,
- HostVMEnable,
- NumberOfActivePlanes,
+ v->TotalNumberOfActiveDPP[i][j],
+ v->PixelChunkSizeInKByte,
+ v->TotalNumberOfDCCActiveDPP[i][j],
+ v->MetaChunkSize,
+ v->GPUVMEnable,
+ v->HostVMEnable,
+ v->NumberOfActivePlanes,
NoOfDPPState,
- dpte_group_bytes,
+ v->dpte_group_bytes,
1,
- HostVMMinPageSize,
- HostVMMaxNonCachedPageTableLevels);
- ExtraLatencyCycles = RoundTripPingLatencyCycles + __DML_ARB_TO_RET_DELAY__ + ExtraLatencyBytes / NormalEfficiency / ReturnBusWidth;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ v->HostVMMinPageSize,
+ v->HostVMMaxNonCachedPageTableLevels);
+ ExtraLatencyCycles = v->RoundTripPingLatencyCycles + __DML_ARB_TO_RET_DELAY__ + ExtraLatencyBytes / NormalEfficiency / v->ReturnBusWidth;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double DCFCLKCyclesRequiredInPrefetch;
double ExpectedPrefetchBWAcceleration;
double PrefetchTime;
- PixelDCFCLKCyclesRequiredInPrefetch[k] = (PrefetchLinesY[i][j][k] * swath_width_luma_ub_all_states[i][j][k] * BytePerPixelY[k]
- + PrefetchLinesC[i][j][k] * swath_width_chroma_ub_all_states[i][j][k] * BytePerPixelC[k]) / NormalEfficiency / ReturnBusWidth;
+ PixelDCFCLKCyclesRequiredInPrefetch[k] = (v->PrefetchLinesY[i][j][k] * v->swath_width_luma_ub_all_states[i][j][k] * v->BytePerPixelY[k]
+ + v->PrefetchLinesC[i][j][k] * v->swath_width_chroma_ub_all_states[i][j][k] * v->BytePerPixelC[k]) / NormalEfficiency / v->ReturnBusWidth;
DCFCLKCyclesRequiredInPrefetch = 2 * ExtraLatencyCycles / NoOfDPPState[k]
- + PDEAndMetaPTEBytesPerFrame[i][j][k] / NormalEfficiency / NormalEfficiency / ReturnBusWidth * (GPUVMMaxPageTableLevels > 2 ? 1 : 0)
- + 2 * DPTEBytesPerRow[i][j][k] / NormalEfficiency / NormalEfficiency / ReturnBusWidth
- + 2 * MetaRowBytes[i][j][k] / NormalEfficiency / ReturnBusWidth + PixelDCFCLKCyclesRequiredInPrefetch[k];
- PrefetchPixelLinesTime[k] = dml_max(PrefetchLinesY[i][j][k], PrefetchLinesC[i][j][k]) * HTotal[k] / PixelClock[k];
- ExpectedPrefetchBWAcceleration = (VActivePixelBandwidth[i][j][k] + VActiveCursorBandwidth[i][j][k])
- / (ReadBandwidthLuma[k] + ReadBandwidthChroma[k]);
+ + v->PDEAndMetaPTEBytesPerFrame[i][j][k] / NormalEfficiency / NormalEfficiency / v->ReturnBusWidth * (v->GPUVMMaxPageTableLevels > 2 ? 1 : 0)
+ + 2 * v->DPTEBytesPerRow[i][j][k] / NormalEfficiency / NormalEfficiency / v->ReturnBusWidth
+ + 2 * v->MetaRowBytes[i][j][k] / NormalEfficiency / v->ReturnBusWidth + PixelDCFCLKCyclesRequiredInPrefetch[k];
+ PrefetchPixelLinesTime[k] = dml_max(v->PrefetchLinesY[i][j][k], v->PrefetchLinesC[i][j][k]) * v->HTotal[k] / v->PixelClock[k];
+ ExpectedPrefetchBWAcceleration = (v->VActivePixelBandwidth[i][j][k] + v->VActiveCursorBandwidth[i][j][k])
+ / (v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k]);
DynamicMetadataVMExtraLatency[k] =
- (GPUVMEnable == true && DynamicMetadataEnable[k] == true && DynamicMetadataVMEnabled == true) ?
- UrgLatency[i] * GPUVMMaxPageTableLevels * (HostVMEnable == true ? HostVMMaxNonCachedPageTableLevels + 1 : 1) : 0;
- PrefetchTime = (MaximumVStartup[i][j][k] - 1) * HTotal[k] / PixelClock[k] - MinimumTWait
- - UrgLatency[i]
- * ((GPUVMMaxPageTableLevels <= 2 ? GPUVMMaxPageTableLevels : GPUVMMaxPageTableLevels - 2)
- * (HostVMEnable == true ? HostVMMaxNonCachedPageTableLevels + 1 : 1) - 1)
+ (v->GPUVMEnable == true && v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true) ?
+ v->UrgLatency[i] * v->GPUVMMaxPageTableLevels * (v->HostVMEnable == true ? v->HostVMMaxNonCachedPageTableLevels + 1 : 1) : 0;
+ PrefetchTime = (v->MaximumVStartup[i][j][k] - 1) * v->HTotal[k] / v->PixelClock[k] - MinimumTWait
+ - v->UrgLatency[i]
+ * ((v->GPUVMMaxPageTableLevels <= 2 ? v->GPUVMMaxPageTableLevels : v->GPUVMMaxPageTableLevels - 2)
+ * (v->HostVMEnable == true ? v->HostVMMaxNonCachedPageTableLevels + 1 : 1) - 1)
- DynamicMetadataVMExtraLatency[k];
if (PrefetchTime > 0) {
@@ -7392,14 +7239,14 @@ static void UseMinimumDCFCLK(
/ (PrefetchTime * PixelDCFCLKCyclesRequiredInPrefetch[k] / DCFCLKCyclesRequiredInPrefetch);
DCFCLKRequiredForPeakBandwidthPerPlane[k] = NoOfDPPState[k] * PixelDCFCLKCyclesRequiredInPrefetch[k] / PrefetchPixelLinesTime[k]
* dml_max(1.0, ExpectedVRatioPrefetch) * dml_max(1.0, ExpectedVRatioPrefetch / 4) * ExpectedPrefetchBWAcceleration;
- if (HostVMEnable == true || ImmediateFlipRequirement == dm_immediate_flip_required) {
+ if (v->HostVMEnable == true || v->ImmediateFlipRequirement[0] == dm_immediate_flip_required) {
DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKRequiredForPeakBandwidthPerPlane[k]
- + NoOfDPPState[k] * DPTEBandwidth / NormalEfficiency / NormalEfficiency / ReturnBusWidth;
+ + NoOfDPPState[k] * DPTEBandwidth / NormalEfficiency / NormalEfficiency / v->ReturnBusWidth;
}
} else {
- DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKPerState[i];
+ DCFCLKRequiredForPeakBandwidthPerPlane[k] = v->DCFCLKPerState[i];
}
- if (DynamicMetadataEnable[k] == true) {
+ if (v->DynamicMetadataEnable[k] == true) {
double TSetupPipe;
double TdmbfPipe;
double TdmsksPipe;
@@ -7407,17 +7254,17 @@ static void UseMinimumDCFCLK(
double AllowedTimeForUrgentExtraLatency;
CalculateVupdateAndDynamicMetadataParameters(
- MaxInterDCNTileRepeaters,
- RequiredDPPCLK[i][j][k],
- RequiredDISPCLK[i][j],
- ProjectedDCFCLKDeepSleep[i][j],
- PixelClock[k],
- HTotal[k],
- VTotal[k] - VActive[k],
- DynamicMetadataTransmittedBytes[k],
- DynamicMetadataLinesBeforeActiveRequired[k],
- Interlace[k],
- ProgressiveToInterlaceUnitInOPP,
+ v->MaxInterDCNTileRepeaters,
+ v->RequiredDPPCLK[i][j][k],
+ v->RequiredDISPCLK[i][j],
+ v->ProjectedDCFCLKDeepSleep[i][j],
+ v->PixelClock[k],
+ v->HTotal[k],
+ v->VTotal[k] - v->VActive[k],
+ v->DynamicMetadataTransmittedBytes[k],
+ v->DynamicMetadataLinesBeforeActiveRequired[k],
+ v->Interlace[k],
+ v->ProgressiveToInterlaceUnitInOPP,
&TSetupPipe,
&TdmbfPipe,
&TdmecPipe,
@@ -7425,31 +7272,31 @@ static void UseMinimumDCFCLK(
&dummy1,
&dummy2,
&dummy3);
- AllowedTimeForUrgentExtraLatency = MaximumVStartup[i][j][k] * HTotal[k] / PixelClock[k] - MinimumTWait - TSetupPipe - TdmbfPipe - TdmecPipe
+ AllowedTimeForUrgentExtraLatency = v->MaximumVStartup[i][j][k] * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - TSetupPipe - TdmbfPipe - TdmecPipe
- TdmsksPipe - DynamicMetadataVMExtraLatency[k];
if (AllowedTimeForUrgentExtraLatency > 0) {
DCFCLKRequiredForPeakBandwidthPerPlane[k] = dml_max(
DCFCLKRequiredForPeakBandwidthPerPlane[k],
ExtraLatencyCycles / AllowedTimeForUrgentExtraLatency);
} else {
- DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKPerState[i];
+ DCFCLKRequiredForPeakBandwidthPerPlane[k] = v->DCFCLKPerState[i];
}
}
}
DCFCLKRequiredForPeakBandwidth = 0;
- for (k = 0; k <= NumberOfActivePlanes - 1; ++k) {
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; ++k) {
DCFCLKRequiredForPeakBandwidth = DCFCLKRequiredForPeakBandwidth + DCFCLKRequiredForPeakBandwidthPerPlane[k];
}
- MinimumTvmPlus2Tr0 = UrgLatency[i]
- * (GPUVMEnable == true ?
- (HostVMEnable == true ?
- (GPUVMMaxPageTableLevels + 2) * (HostVMMaxNonCachedPageTableLevels + 1) - 1 : GPUVMMaxPageTableLevels + 1) :
+ MinimumTvmPlus2Tr0 = v->UrgLatency[i]
+ * (v->GPUVMEnable == true ?
+ (v->HostVMEnable == true ?
+ (v->GPUVMMaxPageTableLevels + 2) * (v->HostVMMaxNonCachedPageTableLevels + 1) - 1 : v->GPUVMMaxPageTableLevels + 1) :
0);
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double MaximumTvmPlus2Tr0PlusTsw;
- MaximumTvmPlus2Tr0PlusTsw = (MaximumVStartup[i][j][k] - 2) * HTotal[k] / PixelClock[k] - MinimumTWait - DynamicMetadataVMExtraLatency[k];
+ MaximumTvmPlus2Tr0PlusTsw = (v->MaximumVStartup[i][j][k] - 2) * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - DynamicMetadataVMExtraLatency[k];
if (MaximumTvmPlus2Tr0PlusTsw <= MinimumTvmPlus2Tr0 + PrefetchPixelLinesTime[k] / 4) {
- DCFCLKRequiredForPeakBandwidth = DCFCLKPerState[i];
+ DCFCLKRequiredForPeakBandwidth = v->DCFCLKPerState[i];
} else {
DCFCLKRequiredForPeakBandwidth = dml_max3(
DCFCLKRequiredForPeakBandwidth,
@@ -7457,7 +7304,7 @@ static void UseMinimumDCFCLK(
(2 * ExtraLatencyCycles + PixelDCFCLKCyclesRequiredInPrefetch[k]) / (MaximumTvmPlus2Tr0PlusTsw - MinimumTvmPlus2Tr0));
}
}
- DCFCLKState[i][j] = dml_min(DCFCLKPerState[i], 1.05 * dml_max(DCFCLKRequiredForAverageBandwidth, DCFCLKRequiredForPeakBandwidth));
+ v->DCFCLKState[i][j] = dml_min(v->DCFCLKPerState[i], 1.05 * dml_max(DCFCLKRequiredForAverageBandwidth, DCFCLKRequiredForPeakBandwidth));
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index 6905ef1e75a6..d76251fd1566 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -73,6 +73,7 @@ struct display_mode_lib {
struct vba_vars_st vba;
struct dal_logger *logger;
struct dml_funcs funcs;
+ struct _vcs_dpi_display_e2e_pipe_params_st dml_pipe_state[6];
};
void dml_init_instance(struct display_mode_lib *lib,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index d46a2733024c..8f9f1d607f7c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -546,7 +546,6 @@ struct _vcs_dpi_display_dlg_sys_params_st {
double t_sr_wm_us;
double t_extra_us;
double mem_trip_us;
- double t_srx_delay_us;
double deepsleep_dcfclk_mhz;
double total_flip_bw;
unsigned int total_flip_bytes;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
index 71ea503cb32f..412e75eb4704 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
@@ -142,9 +142,6 @@ void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _v
dml_print("DML_RQ_DLG_CALC: t_sr_wm_us = %3.2f\n", dlg_sys_param->t_sr_wm_us);
dml_print("DML_RQ_DLG_CALC: t_extra_us = %3.2f\n", dlg_sys_param->t_extra_us);
dml_print(
- "DML_RQ_DLG_CALC: t_srx_delay_us = %3.2f\n",
- dlg_sys_param->t_srx_delay_us);
- dml_print(
"DML_RQ_DLG_CALC: deepsleep_dcfclk_mhz = %3.2f\n",
dlg_sys_param->deepsleep_dcfclk_mhz);
dml_print(
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
index 59dc2c5b58dd..3df559c591f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -1331,10 +1331,6 @@ void dml1_rq_dlg_get_dlg_params(
if (dual_plane)
DTRACE("DLG: %s: swath_height_c = %d", __func__, swath_height_c);
- DTRACE(
- "DLG: %s: t_srx_delay_us = %3.2f",
- __func__,
- (double) dlg_sys_param->t_srx_delay_us);
DTRACE("DLG: %s: line_time_in_us = %3.2f", __func__, (double) line_time_in_us);
DTRACE("DLG: %s: vupdate_offset = %d", __func__, vupdate_offset);
DTRACE("DLG: %s: vupdate_width = %d", __func__, vupdate_width);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c
new file mode 100644
index 000000000000..789f7562cdc7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c
@@ -0,0 +1,1889 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dml_wrapper.h"
+#include "resource.h"
+#include "core_types.h"
+#include "dsc.h"
+#include "clk_mgr.h"
+
+#ifndef DC_LOGGER_INIT
+#define DC_LOGGER_INIT
+#undef DC_LOG_WARNING
+#define DC_LOG_WARNING
+#endif
+
+#define DML_WRAPPER_TRANSLATION_
+#include "dml_wrapper_translation.c"
+#undef DML_WRAPPER_TRANSLATION_
+
+static bool is_dual_plane(enum surface_pixel_format format)
+{
+ return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
+}
+
+static void build_clamping_params(struct dc_stream_state *stream)
+{
+ stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
+ stream->clamping.c_depth = stream->timing.display_color_depth;
+ stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
+}
+
+static void get_pixel_clock_parameters(
+ const struct pipe_ctx *pipe_ctx,
+ struct pixel_clk_params *pixel_clk_params)
+{
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+
+ /*TODO: is this halved for YCbCr 420? in that case we might want to move
+ * the pixel clock normalization for hdmi up to here instead of doing it
+ * in pll_adjust_pix_clk
+ */
+ pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
+ pixel_clk_params->encoder_object_id = stream->link->link_enc->id;
+ pixel_clk_params->signal_type = pipe_ctx->stream->signal;
+ pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
+ /* TODO: un-hardcode*/
+ pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
+ LINK_RATE_REF_FREQ_IN_KHZ;
+ pixel_clk_params->flags.ENABLE_SS = 0;
+ pixel_clk_params->color_depth =
+ stream->timing.display_color_depth;
+ pixel_clk_params->flags.DISPLAY_BLANKED = 1;
+ pixel_clk_params->flags.SUPPORT_YCBCR420 = (stream->timing.pixel_encoding ==
+ PIXEL_ENCODING_YCBCR420);
+ pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ pixel_clk_params->color_depth = COLOR_DEPTH_888;
+ }
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ pixel_clk_params->requested_pix_clk_100hz = pixel_clk_params->requested_pix_clk_100hz / 2;
+ }
+ if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
+ pixel_clk_params->requested_pix_clk_100hz *= 2;
+
+}
+
+static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
+{
+ get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
+
+ if (pipe_ctx->clock_source)
+ pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ &pipe_ctx->pll_settings);
+
+ pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
+
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream,
+ &pipe_ctx->stream->bit_depth_params);
+ build_clamping_params(pipe_ctx->stream);
+
+ return DC_OK;
+}
+
+static void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
+ struct bit_depth_reduction_params *fmt_bit_depth)
+{
+ enum dc_dither_option option = stream->dither_option;
+ enum dc_pixel_encoding pixel_encoding =
+ stream->timing.pixel_encoding;
+
+ memset(fmt_bit_depth, 0, sizeof(*fmt_bit_depth));
+
+ if (option == DITHER_OPTION_DEFAULT) {
+ switch (stream->timing.display_color_depth) {
+ case COLOR_DEPTH_666:
+ option = DITHER_OPTION_SPATIAL6;
+ break;
+ case COLOR_DEPTH_888:
+ option = DITHER_OPTION_SPATIAL8;
+ break;
+ case COLOR_DEPTH_101010:
+ option = DITHER_OPTION_SPATIAL10;
+ break;
+ default:
+ option = DITHER_OPTION_DISABLE;
+ }
+ }
+
+ if (option == DITHER_OPTION_DISABLE)
+ return;
+
+ if (option == DITHER_OPTION_TRUN6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 0;
+ } else if (option == DITHER_OPTION_TRUN8 ||
+ option == DITHER_OPTION_TRUN8_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN8_FM6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 1;
+ } else if (option == DITHER_OPTION_TRUN10 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8 ||
+ option == DITHER_OPTION_TRUN10_FM8 ||
+ option == DITHER_OPTION_TRUN10_FM6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
+ }
+
+ /* special case - Formatter can only reduce by 4 bits at most.
+ * When reducing from 12 to 6 bits,
+ * HW recommends we use trunc with round mode
+ * (if we did nothing, trunc to 10 bits would be used)
+ * note that any 12->10 bit reduction is ignored prior to DCE8,
+ * as the input was 10 bits.
+ */
+ if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL6 ||
+ option == DITHER_OPTION_FM6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
+ fmt_bit_depth->flags.TRUNCATE_MODE = 1;
+ }
+
+ /* spatial dither
+ * note that spatial modes 1-3 are never used
+ */
+ if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN8_SPATIAL6) {
+ fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+ fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 0;
+ fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
+ fmt_bit_depth->flags.RGB_RANDOM =
+ (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+ } else if (option == DITHER_OPTION_SPATIAL8_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL8 ||
+ option == DITHER_OPTION_SPATIAL8_FM6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
+ fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+ fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 1;
+ fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
+ fmt_bit_depth->flags.RGB_RANDOM =
+ (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+ } else if (option == DITHER_OPTION_SPATIAL10_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL10 ||
+ option == DITHER_OPTION_SPATIAL10_FM8 ||
+ option == DITHER_OPTION_SPATIAL10_FM6) {
+ fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+ fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 2;
+ fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
+ fmt_bit_depth->flags.RGB_RANDOM =
+ (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+ }
+
+ if (option == DITHER_OPTION_SPATIAL6 ||
+ option == DITHER_OPTION_SPATIAL8 ||
+ option == DITHER_OPTION_SPATIAL10) {
+ fmt_bit_depth->flags.FRAME_RANDOM = 0;
+ } else {
+ fmt_bit_depth->flags.FRAME_RANDOM = 1;
+ }
+
+ //////////////////////
+ //// temporal dither
+ //////////////////////
+ if (option == DITHER_OPTION_FM6 ||
+ option == DITHER_OPTION_SPATIAL8_FM6 ||
+ option == DITHER_OPTION_SPATIAL10_FM6 ||
+ option == DITHER_OPTION_TRUN10_FM6 ||
+ option == DITHER_OPTION_TRUN8_FM6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
+ fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
+ fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 0;
+ } else if (option == DITHER_OPTION_FM8 ||
+ option == DITHER_OPTION_SPATIAL10_FM8 ||
+ option == DITHER_OPTION_TRUN10_FM8) {
+ fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
+ fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 1;
+ } else if (option == DITHER_OPTION_FM10) {
+ fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
+ fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 2;
+ }
+
+ fmt_bit_depth->pixel_encoding = pixel_encoding;
+}
+
+bool dml_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
+{
+ int i;
+
+ /* Validate DSC config, dsc count validation is already done */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dsc_config dsc_cfg;
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
+
+ /* Only need to validate top pipe */
+ if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC)
+ continue;
+
+ dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left
+ + stream->timing.h_border_right) / opp_cnt;
+ dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top
+ + stream->timing.v_border_bottom;
+ dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
+ dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
+ dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+ dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+
+ if (pipe_ctx->stream_res.dsc && !pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg))
+ return false;
+ }
+ return true;
+}
+
+enum dc_status dml_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream)
+{
+ enum dc_status status = DC_OK;
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+
+ status = build_pipe_hw_param(pipe_ctx);
+
+ return status;
+}
+
+void dml_acquire_dsc(const struct dc *dc,
+ struct resource_context *res_ctx,
+ struct display_stream_compressor **dsc,
+ int pipe_idx)
+{
+ int i;
+ const struct resource_pool *pool = dc->res_pool;
+ struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc;
+
+ ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */
+ *dsc = NULL;
+
+ /* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */
+ if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
+ *dsc = pool->dscs[pipe_idx];
+ res_ctx->is_dsc_acquired[pipe_idx] = true;
+ return;
+ }
+
+ /* Return old DSC to avoid the need for redo it */
+ if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) {
+ *dsc = dsc_old;
+ res_ctx->is_dsc_acquired[dsc_old->inst] = true;
+ return ;
+ }
+
+ /* Find first free DSC */
+ for (i = 0; i < pool->res_cap->num_dsc; i++)
+ if (!res_ctx->is_dsc_acquired[i]) {
+ *dsc = pool->dscs[i];
+ res_ctx->is_dsc_acquired[i] = true;
+ break;
+ }
+}
+
+static bool dml_split_stream_for_mpc_or_odm(
+ const struct dc *dc,
+ struct resource_context *res_ctx,
+ struct pipe_ctx *pri_pipe,
+ struct pipe_ctx *sec_pipe,
+ bool odm)
+{
+ int pipe_idx = sec_pipe->pipe_idx;
+ const struct resource_pool *pool = dc->res_pool;
+
+ *sec_pipe = *pri_pipe;
+
+ sec_pipe->pipe_idx = pipe_idx;
+ sec_pipe->plane_res.mi = pool->mis[pipe_idx];
+ sec_pipe->plane_res.hubp = pool->hubps[pipe_idx];
+ sec_pipe->plane_res.ipp = pool->ipps[pipe_idx];
+ sec_pipe->plane_res.xfm = pool->transforms[pipe_idx];
+ sec_pipe->plane_res.dpp = pool->dpps[pipe_idx];
+ sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst;
+ sec_pipe->stream_res.dsc = NULL;
+ if (odm) {
+ if (pri_pipe->next_odm_pipe) {
+ ASSERT(pri_pipe->next_odm_pipe != sec_pipe);
+ sec_pipe->next_odm_pipe = pri_pipe->next_odm_pipe;
+ sec_pipe->next_odm_pipe->prev_odm_pipe = sec_pipe;
+ }
+ if (pri_pipe->top_pipe && pri_pipe->top_pipe->next_odm_pipe) {
+ pri_pipe->top_pipe->next_odm_pipe->bottom_pipe = sec_pipe;
+ sec_pipe->top_pipe = pri_pipe->top_pipe->next_odm_pipe;
+ }
+ if (pri_pipe->bottom_pipe && pri_pipe->bottom_pipe->next_odm_pipe) {
+ pri_pipe->bottom_pipe->next_odm_pipe->top_pipe = sec_pipe;
+ sec_pipe->bottom_pipe = pri_pipe->bottom_pipe->next_odm_pipe;
+ }
+ pri_pipe->next_odm_pipe = sec_pipe;
+ sec_pipe->prev_odm_pipe = pri_pipe;
+ ASSERT(sec_pipe->top_pipe == NULL);
+
+ if (!sec_pipe->top_pipe)
+ sec_pipe->stream_res.opp = pool->opps[pipe_idx];
+ else
+ sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
+ if (sec_pipe->stream->timing.flags.DSC == 1) {
+ dml_acquire_dsc(dc, res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
+ ASSERT(sec_pipe->stream_res.dsc);
+ if (sec_pipe->stream_res.dsc == NULL)
+ return false;
+ }
+ } else {
+ if (pri_pipe->bottom_pipe) {
+ ASSERT(pri_pipe->bottom_pipe != sec_pipe);
+ sec_pipe->bottom_pipe = pri_pipe->bottom_pipe;
+ sec_pipe->bottom_pipe->top_pipe = sec_pipe;
+ }
+ pri_pipe->bottom_pipe = sec_pipe;
+ sec_pipe->top_pipe = pri_pipe;
+
+ ASSERT(pri_pipe->plane_state);
+ }
+
+ return true;
+}
+
+static struct pipe_ctx *dml_find_split_pipe(
+ struct dc *dc,
+ struct dc_state *context,
+ int old_index)
+{
+ struct pipe_ctx *pipe = NULL;
+ int i;
+
+ if (old_index >= 0 && context->res_ctx.pipe_ctx[old_index].stream == NULL) {
+ pipe = &context->res_ctx.pipe_ctx[old_index];
+ pipe->pipe_idx = old_index;
+ }
+
+ if (!pipe)
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].top_pipe == NULL
+ && dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
+ if (context->res_ctx.pipe_ctx[i].stream == NULL) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ pipe->pipe_idx = i;
+ break;
+ }
+ }
+ }
+
+ /*
+ * May need to fix pipes getting tossed from 1 opp to another on flip
+ * Add for debugging transient underflow during topology updates:
+ * ASSERT(pipe);
+ */
+ if (!pipe)
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ if (context->res_ctx.pipe_ctx[i].stream == NULL) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ pipe->pipe_idx = i;
+ break;
+ }
+ }
+
+ return pipe;
+}
+
+static void dml_release_dsc(struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct display_stream_compressor **dsc)
+{
+ int i;
+
+ for (i = 0; i < pool->res_cap->num_dsc; i++)
+ if (pool->dscs[i] == *dsc) {
+ res_ctx->is_dsc_acquired[i] = false;
+ *dsc = NULL;
+ break;
+ }
+}
+
+static int dml_get_num_mpc_splits(struct pipe_ctx *pipe)
+{
+ int mpc_split_count = 0;
+ struct pipe_ctx *other_pipe = pipe->bottom_pipe;
+
+ while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
+ mpc_split_count++;
+ other_pipe = other_pipe->bottom_pipe;
+ }
+ other_pipe = pipe->top_pipe;
+ while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
+ mpc_split_count++;
+ other_pipe = other_pipe->top_pipe;
+ }
+
+ return mpc_split_count;
+}
+
+static bool dml_enough_pipes_for_subvp(struct dc *dc,
+ struct dc_state *context)
+{
+ int i = 0;
+ int num_pipes = 0;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->plane_state)
+ num_pipes++;
+ }
+
+ // Sub-VP only possible if the number of "real" pipes is
+ // less than or equal to half the number of available pipes
+ if (num_pipes * 2 > dc->res_pool->pipe_count)
+ return false;
+
+ return true;
+}
+
+static int dml_validate_apply_pipe_split_flags(
+ struct dc *dc,
+ struct dc_state *context,
+ int vlevel,
+ int *split,
+ bool *merge)
+{
+ int i, pipe_idx, vlevel_split;
+ int plane_count = 0;
+ bool force_split = false;
+ bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID;
+ struct vba_vars_st *v = &context->bw_ctx.dml.vba;
+ int max_mpc_comb = v->maxMpcComb;
+
+ if (context->stream_count > 1) {
+ if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP)
+ avoid_split = true;
+ } else if (dc->debug.force_single_disp_pipe_split)
+ force_split = true;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ /**
+ * Workaround for avoiding pipe-split in cases where we'd split
+ * planes that are too small, resulting in splits that aren't
+ * valid for the scaler.
+ */
+ if (pipe->plane_state &&
+ (pipe->plane_state->dst_rect.width <= 16 ||
+ pipe->plane_state->dst_rect.height <= 16 ||
+ pipe->plane_state->src_rect.width <= 16 ||
+ pipe->plane_state->src_rect.height <= 16))
+ avoid_split = true;
+
+ /* TODO: fix dc bugs and remove this split threshold thing */
+ if (pipe->stream && !pipe->prev_odm_pipe &&
+ (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
+ ++plane_count;
+ }
+ if (plane_count > dc->res_pool->pipe_count / 2)
+ avoid_split = true;
+
+ /* W/A: Mode timing with borders may not work well with pipe split, avoid for this corner case */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct dc_crtc_timing timing;
+
+ if (!pipe->stream)
+ continue;
+ else {
+ timing = pipe->stream->timing;
+ if (timing.h_border_left + timing.h_border_right
+ + timing.v_border_top + timing.v_border_bottom > 0) {
+ avoid_split = true;
+ break;
+ }
+ }
+ }
+
+ /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
+ if (avoid_split) {
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++)
+ if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 &&
+ v->ModeSupport[vlevel][0])
+ break;
+ /* Impossible to not split this pipe */
+ if (vlevel > context->bw_ctx.dml.soc.num_states)
+ vlevel = vlevel_split;
+ else
+ max_mpc_comb = 0;
+ pipe_idx++;
+ }
+ v->maxMpcComb = max_mpc_comb;
+ }
+
+ /* Split loop sets which pipe should be split based on dml outputs and dc flags */
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ int pipe_plane = v->pipe_plane[pipe_idx];
+ bool split4mpc = context->stream_count == 1 && plane_count == 1
+ && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4;
+
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ if (split4mpc || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 4)
+ split[i] = 4;
+ else if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 2)
+ split[i] = 2;
+
+ if ((pipe->stream->view_format ==
+ VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+ pipe->stream->view_format ==
+ VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
+ (pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
+ pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_SIDE_BY_SIDE))
+ split[i] = 2;
+ if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
+ split[i] = 2;
+ v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
+ }
+ if (dc->debug.force_odm_combine_4to1 & (1 << pipe->stream_res.tg->inst)) {
+ split[i] = 4;
+ v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_4to1;
+ }
+ /*420 format workaround*/
+ if (pipe->stream->timing.h_addressable > 7680 &&
+ pipe->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ split[i] = 4;
+ }
+
+ v->ODMCombineEnabled[pipe_plane] =
+ v->ODMCombineEnablePerState[vlevel][pipe_plane];
+
+ if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
+ if (dml_get_num_mpc_splits(pipe) == 1) {
+ /*If need split for mpc but 2 way split already*/
+ if (split[i] == 4)
+ split[i] = 2; /* 2 -> 4 MPC */
+ else if (split[i] == 2)
+ split[i] = 0; /* 2 -> 2 MPC */
+ else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
+ merge[i] = true; /* 2 -> 1 MPC */
+ } else if (dml_get_num_mpc_splits(pipe) == 3) {
+ /*If need split for mpc but 4 way split already*/
+ if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe)
+ || !pipe->bottom_pipe)) {
+ merge[i] = true; /* 4 -> 2 MPC */
+ } else if (split[i] == 0 && pipe->top_pipe &&
+ pipe->top_pipe->plane_state == pipe->plane_state)
+ merge[i] = true; /* 4 -> 1 MPC */
+ split[i] = 0;
+ } else if (dml_get_num_mpc_splits(pipe)) {
+ /* ODM -> MPC transition */
+ if (pipe->prev_odm_pipe) {
+ split[i] = 0;
+ merge[i] = true;
+ }
+ }
+ } else {
+ if (dml_get_num_mpc_splits(pipe) == 1) {
+ /*If need split for odm but 2 way split already*/
+ if (split[i] == 4)
+ split[i] = 2; /* 2 -> 4 ODM */
+ else if (split[i] == 2)
+ split[i] = 0; /* 2 -> 2 ODM */
+ else if (pipe->prev_odm_pipe) {
+ ASSERT(0); /* NOT expected yet */
+ merge[i] = true; /* exit ODM */
+ }
+ } else if (dml_get_num_mpc_splits(pipe) == 3) {
+ /*If need split for odm but 4 way split already*/
+ if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
+ || !pipe->next_odm_pipe)) {
+ ASSERT(0); /* NOT expected yet */
+ merge[i] = true; /* 4 -> 2 ODM */
+ } else if (split[i] == 0 && pipe->prev_odm_pipe) {
+ ASSERT(0); /* NOT expected yet */
+ merge[i] = true; /* exit ODM */
+ }
+ split[i] = 0;
+ } else if (dml_get_num_mpc_splits(pipe)) {
+ /* MPC -> ODM transition */
+ ASSERT(0); /* NOT expected yet */
+ if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
+ split[i] = 0;
+ merge[i] = true;
+ }
+ }
+ }
+
+ /* Adjust dppclk when split is forced, do not bother with dispclk */
+ if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1)
+ v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2;
+ pipe_idx++;
+ }
+
+ return vlevel;
+}
+
+static void dml_set_phantom_stream_timing(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *ref_pipe,
+ struct dc_stream_state *phantom_stream)
+{
+ // phantom_vactive = blackout (latency + margin) + fw_processing_delays + pstate allow width
+ uint32_t phantom_vactive_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us + 60 +
+ dc->caps.subvp_fw_processing_delay_us +
+ dc->caps.subvp_pstate_allow_width_us;
+ uint32_t phantom_vactive = ((double)phantom_vactive_us/1000000) *
+ (ref_pipe->stream->timing.pix_clk_100hz * 100) /
+ (double)ref_pipe->stream->timing.h_total;
+ uint32_t phantom_bp = ref_pipe->pipe_dlg_param.vstartup_start;
+
+ phantom_stream->dst.y = 0;
+ phantom_stream->dst.height = phantom_vactive;
+ phantom_stream->src.y = 0;
+ phantom_stream->src.height = phantom_vactive;
+
+ phantom_stream->timing.v_addressable = phantom_vactive;
+ phantom_stream->timing.v_front_porch = 1;
+ phantom_stream->timing.v_total = phantom_stream->timing.v_addressable +
+ phantom_stream->timing.v_front_porch +
+ phantom_stream->timing.v_sync_width +
+ phantom_bp;
+}
+
+static struct dc_stream_state *dml_enable_phantom_stream(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *ref_pipe)
+{
+ struct dc_stream_state *phantom_stream = NULL;
+
+ phantom_stream = dc_create_stream_for_sink(ref_pipe->stream->sink);
+ phantom_stream->signal = SIGNAL_TYPE_VIRTUAL;
+ phantom_stream->dpms_off = true;
+ phantom_stream->mall_stream_config.type = SUBVP_PHANTOM;
+ phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream;
+ ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN;
+ ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream;
+
+ /* stream has limited viewport and small timing */
+ memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing));
+ memcpy(&phantom_stream->src, &ref_pipe->stream->src, sizeof(phantom_stream->src));
+ memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst));
+ dml_set_phantom_stream_timing(dc, context, ref_pipe, phantom_stream);
+
+ dc_add_stream_to_ctx(dc, context, phantom_stream);
+ dc->hwss.apply_ctx_to_hw(dc, context);
+ return phantom_stream;
+}
+
+static void dml_enable_phantom_plane(struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *phantom_stream,
+ struct pipe_ctx *main_pipe)
+{
+ struct dc_plane_state *phantom_plane = NULL;
+ struct dc_plane_state *prev_phantom_plane = NULL;
+ struct pipe_ctx *curr_pipe = main_pipe;
+
+ while (curr_pipe) {
+ if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state)
+ phantom_plane = prev_phantom_plane;
+ else
+ phantom_plane = dc_create_plane_state(dc);
+
+ memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address));
+ memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality,
+ sizeof(phantom_plane->scaling_quality));
+ memcpy(&phantom_plane->src_rect, &curr_pipe->plane_state->src_rect, sizeof(phantom_plane->src_rect));
+ memcpy(&phantom_plane->dst_rect, &curr_pipe->plane_state->dst_rect, sizeof(phantom_plane->dst_rect));
+ memcpy(&phantom_plane->clip_rect, &curr_pipe->plane_state->clip_rect, sizeof(phantom_plane->clip_rect));
+ memcpy(&phantom_plane->plane_size, &curr_pipe->plane_state->plane_size,
+ sizeof(phantom_plane->plane_size));
+ memcpy(&phantom_plane->tiling_info, &curr_pipe->plane_state->tiling_info,
+ sizeof(phantom_plane->tiling_info));
+ memcpy(&phantom_plane->dcc, &curr_pipe->plane_state->dcc, sizeof(phantom_plane->dcc));
+ /* Currently compat_level is undefined in dc_state
+ * phantom_plane->compat_level = curr_pipe->plane_state->compat_level;
+ */
+ phantom_plane->format = curr_pipe->plane_state->format;
+ phantom_plane->rotation = curr_pipe->plane_state->rotation;
+ phantom_plane->visible = curr_pipe->plane_state->visible;
+
+ /* Shadow pipe has small viewport. */
+ phantom_plane->clip_rect.y = 0;
+ phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable;
+
+ dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context);
+
+ curr_pipe = curr_pipe->bottom_pipe;
+ prev_phantom_plane = phantom_plane;
+ }
+}
+
+static void dml_add_phantom_pipes(struct dc *dc, struct dc_state *context)
+{
+ int i = 0;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct dc_stream_state *ref_stream = pipe->stream;
+ // Only construct phantom stream for top pipes that have plane enabled
+ if (!pipe->top_pipe && pipe->plane_state && pipe->stream &&
+ pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ struct dc_stream_state *phantom_stream = NULL;
+
+ phantom_stream = dml_enable_phantom_stream(dc, context, pipe);
+ dml_enable_phantom_plane(dc, context, phantom_stream, pipe);
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && pipe->stream &&
+ pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ pipe->stream->use_dynamic_meta = false;
+ pipe->plane_state->flip_immediate = false;
+ if (!resource_build_scaling_params(pipe)) {
+ // Log / remove phantom pipes since failed to build scaling params
+ }
+ }
+ }
+}
+
+static void dml_remove_phantom_pipes(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ bool removed_pipe = false;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ // build scaling params for phantom pipes
+ if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ dc_rem_all_planes_for_stream(dc, pipe->stream, context);
+ dc_remove_stream_from_ctx(dc, context, pipe->stream);
+ removed_pipe = true;
+ }
+
+ // Clear all phantom stream info
+ if (pipe->stream) {
+ pipe->stream->mall_stream_config.type = SUBVP_NONE;
+ pipe->stream->mall_stream_config.paired_stream = NULL;
+ }
+ }
+ if (removed_pipe)
+ dc->hwss.apply_ctx_to_hw(dc, context);
+}
+
+/*
+ * If the input state contains no upstream planes for a particular pipe (i.e. only timing)
+ * we need to populate some "conservative" plane information as DML cannot handle "no planes"
+ */
+static void populate_default_plane_from_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_pipe_params_st *pipe)
+{
+ pipe->src.is_hsplit = pipe->dest.odm_combine != dm_odm_combine_mode_disabled;
+ pipe->src.source_scan = dm_horz;
+ pipe->src.sw_mode = dm_sw_4kb_s;
+ pipe->src.macro_tile_size = dm_64k_tile;
+ pipe->src.viewport_width = timing->h_addressable;
+ if (pipe->src.viewport_width > 1920)
+ pipe->src.viewport_width = 1920;
+ pipe->src.viewport_height = timing->v_addressable;
+ if (pipe->src.viewport_height > 1080)
+ pipe->src.viewport_height = 1080;
+ pipe->src.surface_height_y = pipe->src.viewport_height;
+ pipe->src.surface_width_y = pipe->src.viewport_width;
+ pipe->src.surface_height_c = pipe->src.viewport_height;
+ pipe->src.surface_width_c = pipe->src.viewport_width;
+ pipe->src.data_pitch = ((pipe->src.viewport_width + 255) / 256) * 256;
+ pipe->src.source_format = dm_444_32;
+ pipe->dest.recout_width = pipe->src.viewport_width;
+ pipe->dest.recout_height = pipe->src.viewport_height;
+ pipe->dest.full_recout_width = pipe->dest.recout_width;
+ pipe->dest.full_recout_height = pipe->dest.recout_height;
+ pipe->scale_ratio_depth.lb_depth = dm_lb_16;
+ pipe->scale_ratio_depth.hscl_ratio = 1.0;
+ pipe->scale_ratio_depth.vscl_ratio = 1.0;
+ pipe->scale_ratio_depth.scl_enable = 0;
+ pipe->scale_taps.htaps = 1;
+ pipe->scale_taps.vtaps = 1;
+ pipe->dest.vtotal_min = timing->v_total;
+ pipe->dest.vtotal_max = timing->v_total;
+
+ if (pipe->dest.odm_combine == dm_odm_combine_mode_2to1) {
+ pipe->src.viewport_width /= 2;
+ pipe->dest.recout_width /= 2;
+ } else if (pipe->dest.odm_combine == dm_odm_combine_mode_4to1) {
+ pipe->src.viewport_width /= 4;
+ pipe->dest.recout_width /= 4;
+ }
+
+ pipe->src.dcc = false;
+ pipe->src.dcc_rate = 1;
+}
+
+/*
+ * If the pipe is not blending (i.e. pipe_ctx->top pipe == null) then its
+ * hsplit group is equal to its own pipe ID
+ * Otherwise, all pipes part of the same blending tree have the same hsplit group
+ * ID as the top most pipe
+ *
+ * If the pipe ctx is ODM combined, then similar logic follows
+ */
+static void populate_hsplit_group_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe)
+{
+ e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx;
+
+ if (dc_pipe_ctx->top_pipe && dc_pipe_ctx->top_pipe->plane_state
+ == dc_pipe_ctx->plane_state) {
+ struct pipe_ctx *first_pipe = dc_pipe_ctx->top_pipe;
+ int split_idx = 0;
+
+ while (first_pipe->top_pipe && first_pipe->top_pipe->plane_state
+ == dc_pipe_ctx->plane_state) {
+ first_pipe = first_pipe->top_pipe;
+ split_idx++;
+ }
+
+ /* Treat 4to1 mpc combine as an mpo of 2 2-to-1 combines */
+ if (split_idx == 0)
+ e2e_pipe->pipe.src.hsplit_grp = first_pipe->pipe_idx;
+ else if (split_idx == 1)
+ e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx;
+ else if (split_idx == 2)
+ e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->top_pipe->pipe_idx;
+
+ } else if (dc_pipe_ctx->prev_odm_pipe) {
+ struct pipe_ctx *first_pipe = dc_pipe_ctx->prev_odm_pipe;
+
+ while (first_pipe->prev_odm_pipe)
+ first_pipe = first_pipe->prev_odm_pipe;
+ e2e_pipe->pipe.src.hsplit_grp = first_pipe->pipe_idx;
+ }
+}
+
+static void populate_dml_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe, int always_scale)
+{
+ const struct dc_plane_state *pln = dc_pipe_ctx->plane_state;
+ const struct scaler_data *scl = &dc_pipe_ctx->plane_res.scl_data;
+
+ e2e_pipe->pipe.src.immediate_flip = pln->flip_immediate;
+ e2e_pipe->pipe.src.is_hsplit = (dc_pipe_ctx->bottom_pipe && dc_pipe_ctx->bottom_pipe->plane_state == pln)
+ || (dc_pipe_ctx->top_pipe && dc_pipe_ctx->top_pipe->plane_state == pln)
+ || e2e_pipe->pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
+
+ /* stereo is not split */
+ if (pln->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE ||
+ pln->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM) {
+ e2e_pipe->pipe.src.is_hsplit = false;
+ e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx;
+ }
+
+ e2e_pipe->pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
+ || pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
+ e2e_pipe->pipe.src.viewport_y_y = scl->viewport.y;
+ e2e_pipe->pipe.src.viewport_y_c = scl->viewport_c.y;
+ e2e_pipe->pipe.src.viewport_width = scl->viewport.width;
+ e2e_pipe->pipe.src.viewport_width_c = scl->viewport_c.width;
+ e2e_pipe->pipe.src.viewport_height = scl->viewport.height;
+ e2e_pipe->pipe.src.viewport_height_c = scl->viewport_c.height;
+ e2e_pipe->pipe.src.viewport_width_max = pln->src_rect.width;
+ e2e_pipe->pipe.src.viewport_height_max = pln->src_rect.height;
+ e2e_pipe->pipe.src.surface_width_y = pln->plane_size.surface_size.width;
+ e2e_pipe->pipe.src.surface_height_y = pln->plane_size.surface_size.height;
+ e2e_pipe->pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
+ e2e_pipe->pipe.src.surface_height_c = pln->plane_size.chroma_size.height;
+
+ if (pln->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA
+ || pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ e2e_pipe->pipe.src.data_pitch = pln->plane_size.surface_pitch;
+ e2e_pipe->pipe.src.data_pitch_c = pln->plane_size.chroma_pitch;
+ e2e_pipe->pipe.src.meta_pitch = pln->dcc.meta_pitch;
+ e2e_pipe->pipe.src.meta_pitch_c = pln->dcc.meta_pitch_c;
+ } else {
+ e2e_pipe->pipe.src.data_pitch = pln->plane_size.surface_pitch;
+ e2e_pipe->pipe.src.meta_pitch = pln->dcc.meta_pitch;
+ }
+ e2e_pipe->pipe.src.dcc = pln->dcc.enable;
+ e2e_pipe->pipe.src.dcc_rate = 1;
+ e2e_pipe->pipe.dest.recout_width = scl->recout.width;
+ e2e_pipe->pipe.dest.recout_height = scl->recout.height;
+ e2e_pipe->pipe.dest.full_recout_height = scl->recout.height;
+ e2e_pipe->pipe.dest.full_recout_width = scl->recout.width;
+ if (e2e_pipe->pipe.dest.odm_combine == dm_odm_combine_mode_2to1)
+ e2e_pipe->pipe.dest.full_recout_width *= 2;
+ else if (e2e_pipe->pipe.dest.odm_combine == dm_odm_combine_mode_4to1)
+ e2e_pipe->pipe.dest.full_recout_width *= 4;
+ else {
+ struct pipe_ctx *split_pipe = dc_pipe_ctx->bottom_pipe;
+
+ while (split_pipe && split_pipe->plane_state == pln) {
+ e2e_pipe->pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
+ split_pipe = split_pipe->bottom_pipe;
+ }
+ split_pipe = dc_pipe_ctx->top_pipe;
+ while (split_pipe && split_pipe->plane_state == pln) {
+ e2e_pipe->pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
+ split_pipe = split_pipe->top_pipe;
+ }
+ }
+
+ e2e_pipe->pipe.scale_ratio_depth.lb_depth = dm_lb_16;
+ e2e_pipe->pipe.scale_ratio_depth.hscl_ratio = (double) scl->ratios.horz.value / (1ULL<<32);
+ e2e_pipe->pipe.scale_ratio_depth.hscl_ratio_c = (double) scl->ratios.horz_c.value / (1ULL<<32);
+ e2e_pipe->pipe.scale_ratio_depth.vscl_ratio = (double) scl->ratios.vert.value / (1ULL<<32);
+ e2e_pipe->pipe.scale_ratio_depth.vscl_ratio_c = (double) scl->ratios.vert_c.value / (1ULL<<32);
+ e2e_pipe->pipe.scale_ratio_depth.scl_enable =
+ scl->ratios.vert.value != dc_fixpt_one.value
+ || scl->ratios.horz.value != dc_fixpt_one.value
+ || scl->ratios.vert_c.value != dc_fixpt_one.value
+ || scl->ratios.horz_c.value != dc_fixpt_one.value /*Lb only or Full scl*/
+ || always_scale; /*support always scale*/
+ e2e_pipe->pipe.scale_taps.htaps = scl->taps.h_taps;
+ e2e_pipe->pipe.scale_taps.htaps_c = scl->taps.h_taps_c;
+ e2e_pipe->pipe.scale_taps.vtaps = scl->taps.v_taps;
+ e2e_pipe->pipe.scale_taps.vtaps_c = scl->taps.v_taps_c;
+
+ /* Currently compat_level is not defined. Commenting it until further resolution
+ * if (pln->compat_level == DC_LEGACY_TILING_ADDR_GEN_TWO) {
+ swizzle_to_dml_params(pln->tiling_info.gfx9.swizzle,
+ &e2e_pipe->pipe.src.sw_mode);
+ e2e_pipe->pipe.src.macro_tile_size =
+ swizzle_mode_to_macro_tile_size(pln->tiling_info.gfx9.swizzle);
+ } else {
+ gfx10array_mode_to_dml_params(pln->tiling_info.gfx10compatible.array_mode,
+ pln->compat_level,
+ &e2e_pipe->pipe.src.sw_mode);
+ e2e_pipe->pipe.src.macro_tile_size = dm_4k_tile;
+ }*/
+
+ e2e_pipe->pipe.src.source_format = dc_source_format_to_dml_source_format(pln->format);
+}
+
+static void populate_dml_cursor_parameters_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe)
+{
+ /*
+ * For graphic plane, cursor number is 1, nv12 is 0
+ * bw calculations due to cursor on/off
+ */
+ if (dc_pipe_ctx->plane_state &&
+ (dc_pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
+ dc_pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM))
+ e2e_pipe->pipe.src.num_cursors = 0;
+ else
+ e2e_pipe->pipe.src.num_cursors = 1;
+
+ e2e_pipe->pipe.src.cur0_src_width = 256;
+ e2e_pipe->pipe.src.cur0_bpp = dm_cur_32bit;
+}
+
+static int populate_dml_pipes_from_context_base(
+ struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate)
+{
+ int pipe_cnt, i;
+ bool synchronized_vblank = true;
+ struct resource_context *res_ctx = &context->res_ctx;
+
+ for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) {
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ if (pipe_cnt < 0) {
+ pipe_cnt = i;
+ continue;
+ }
+
+ if (res_ctx->pipe_ctx[pipe_cnt].stream == res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ if (dc->debug.disable_timing_sync ||
+ (!resource_are_streams_timing_synchronizable(
+ res_ctx->pipe_ctx[pipe_cnt].stream,
+ res_ctx->pipe_ctx[i].stream) &&
+ !resource_are_vblanks_synchronizable(
+ res_ctx->pipe_ctx[pipe_cnt].stream,
+ res_ctx->pipe_ctx[i].stream))) {
+ synchronized_vblank = false;
+ break;
+ }
+ }
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing;
+
+ struct audio_check aud_check = {0};
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ /* todo:
+ pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;
+ pipes[pipe_cnt].pipe.src.dcc = 0;
+ pipes[pipe_cnt].pipe.src.vm = 0;*/
+
+ pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
+
+ pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC;
+ /* todo: rotation?*/
+ pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h;
+ if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) {
+ pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true;
+ /* 1/2 vblank */
+ pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active =
+ (timing->v_total - timing->v_addressable
+ - timing->v_border_top - timing->v_border_bottom) / 2;
+ /* 36 bytes dp, 32 hdmi */
+ pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes =
+ dc_is_dp_signal(res_ctx->pipe_ctx[i].stream->signal) ? 36 : 32;
+ }
+ pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank;
+
+ dc_timing_to_dml_timing(timing, &pipes[pipe_cnt].pipe.dest);
+ pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
+ pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
+
+ pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst;
+
+ pipes[pipe_cnt].pipe.dest.odm_combine = get_dml_odm_combine(&res_ctx->pipe_ctx[i]);
+
+ populate_hsplit_group_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt]);
+
+ pipes[pipe_cnt].dout.dp_lanes = 4;
+ pipes[pipe_cnt].dout.is_virtual = 0;
+ pipes[pipe_cnt].dout.output_type = get_dml_output_type(res_ctx->pipe_ctx[i].stream->signal);
+ if (pipes[pipe_cnt].dout.output_type < 0) {
+ pipes[pipe_cnt].dout.output_type = dm_dp;
+ pipes[pipe_cnt].dout.is_virtual = 1;
+ }
+
+ populate_color_depth_and_encoding_from_timing(&res_ctx->pipe_ctx[i].stream->timing, &pipes[pipe_cnt].dout);
+
+ if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
+ pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
+
+ /* todo: default max for now, until there is logic reflecting this in dc*/
+ pipes[pipe_cnt].dout.dsc_input_bpc = 12;
+ /*fill up the audio sample rate (unit in kHz)*/
+ get_audio_check(&res_ctx->pipe_ctx[i].stream->audio_info, &aud_check);
+ pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate / 1000;
+
+ populate_dml_cursor_parameters_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt]);
+
+ if (!res_ctx->pipe_ctx[i].plane_state) {
+ populate_default_plane_from_timing(timing, &pipes[pipe_cnt].pipe);
+ } else {
+ populate_dml_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt], dc->debug.always_scale);
+ }
+
+ pipe_cnt++;
+ }
+
+ /* populate writeback information */
+ if (dc->res_pool)
+ dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes);
+
+ return pipe_cnt;
+}
+
+static int dml_populate_dml_pipes_from_context(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate)
+{
+ int i, pipe_cnt;
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *pipe;
+
+ populate_dml_pipes_from_context_base(dc, context, pipes, fast_validate);
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_crtc_timing *timing;
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+ pipe = &res_ctx->pipe_ctx[i];
+ timing = &pipe->stream->timing;
+
+ pipes[pipe_cnt].pipe.src.gpuvm = true;
+ pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
+ pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
+ pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
+
+ pipes[pipe_cnt].dout.dsc_input_bpc = 0;
+ if (pipes[pipe_cnt].dout.dsc_enable) {
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 12;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+ pipe_cnt++;
+ }
+ dc->config.enable_4to1MPC = false;
+ if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
+ if (is_dual_plane(pipe->plane_state->format)
+ && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
+ dc->config.enable_4to1MPC = true;
+ } else if (!is_dual_plane(pipe->plane_state->format)) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ pipes[0].pipe.src.unbounded_req_mode = true;
+ }
+ }
+
+ return pipe_cnt;
+}
+
+static void dml_full_validate_bw_helper(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *vlevel,
+ int *split,
+ bool *merge,
+ int *pipe_cnt)
+{
+ struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+
+ /*
+ * DML favors voltage over p-state, but we're more interested in
+ * supporting p-state over voltage. We can't support p-state in
+ * prefetch mode > 0 so try capping the prefetch mode to start.
+ */
+ context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
+ dm_allow_self_refresh_and_mclk_switch;
+ *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
+ /* This may adjust vlevel and maxMpcComb */
+ if (*vlevel < context->bw_ctx.dml.soc.num_states)
+ *vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
+
+ /* Conditions for setting up phantom pipes for SubVP:
+ * 1. Not force disable SubVP
+ * 2. Full update (i.e. !fast_validate)
+ * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?)
+ * 4. Display configuration passes validation
+ * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
+ */
+ if (!dc->debug.force_disable_subvp &&
+ dml_enough_pipes_for_subvp(dc, context) &&
+ *vlevel < context->bw_ctx.dml.soc.num_states &&
+ (vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
+ dc->debug.force_subvp_mclk_switch)) {
+
+ dml_add_phantom_pipes(dc, context);
+
+ /* Create input to DML based on new context which includes phantom pipes
+ * TODO: Input to DML should mark which pipes are phantom
+ */
+ *pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false);
+ *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
+ if (*vlevel < context->bw_ctx.dml.soc.num_states) {
+ memset(split, 0, MAX_PIPES * sizeof(*split));
+ memset(merge, 0, MAX_PIPES * sizeof(*merge));
+ *vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
+ }
+
+ // If SubVP pipe config is unsupported (or cannot be used for UCLK switching)
+ // remove phantom pipes and repopulate dml pipes
+ if (*vlevel == context->bw_ctx.dml.soc.num_states ||
+ vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
+ dml_remove_phantom_pipes(dc, context);
+ *pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false);
+ }
+ }
+}
+
+static void dcn20_adjust_adaptive_sync_v_startup(
+ const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start)
+{
+ struct dc_crtc_timing patched_crtc_timing;
+ uint32_t asic_blank_end = 0;
+ uint32_t asic_blank_start = 0;
+ uint32_t newVstartup = 0;
+
+ patched_crtc_timing = *dc_crtc_timing;
+
+ if (patched_crtc_timing.flags.INTERLACE == 1) {
+ if (patched_crtc_timing.v_front_porch < 2)
+ patched_crtc_timing.v_front_porch = 2;
+ } else {
+ if (patched_crtc_timing.v_front_porch < 1)
+ patched_crtc_timing.v_front_porch = 1;
+ }
+
+ /* blank_start = frame end - front porch */
+ asic_blank_start = patched_crtc_timing.v_total -
+ patched_crtc_timing.v_front_porch;
+
+ /* blank_end = blank_start - active */
+ asic_blank_end = asic_blank_start -
+ patched_crtc_timing.v_border_bottom -
+ patched_crtc_timing.v_addressable -
+ patched_crtc_timing.v_border_top;
+
+ newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start);
+
+ *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start);
+}
+
+static bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx)
+{
+ return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
+ pipe_ctx->link_res.hpo_dp_link_enc &&
+ dc_is_dp_signal(pipe_ctx->stream->signal));
+}
+
+static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+#if defined (CONFIG_DRM_AMD_DC_DP2_0)
+ if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
+ return true;
+#endif
+ }
+ return false;
+}
+
+static void dml_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
+{
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
+ }
+}
+
+static bool dml_internal_validate(
+ struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *pipe_cnt_out,
+ int *vlevel_out,
+ bool fast_validate)
+{
+ bool out = false;
+ bool repopulate_pipes = false;
+ int split[MAX_PIPES] = { 0 };
+ bool merge[MAX_PIPES] = { false };
+ bool newly_split[MAX_PIPES] = { false };
+ int pipe_cnt, i, pipe_idx, vlevel;
+ struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+
+ ASSERT(pipes);
+ if (!pipes)
+ return false;
+
+ // For each full update, remove all existing phantom pipes first
+ dml_remove_phantom_pipes(dc, context);
+
+ dml_update_soc_for_wm_a(dc, context);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state) {
+ // On initial pass through DML, we intend to use MALL for SS on all
+ // (non-PSR) surfaces with none using MALL for P-State
+ // 'mall_plane_config': is not a member of 'dc_plane_state' - commenting it out till mall_plane_config gets supported in dc_plant_state
+ //if (pipe->stream && pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)
+ // pipe->plane_state->mall_plane_config.use_mall_for_ss = true;
+ }
+ }
+ pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+
+ if (!pipe_cnt) {
+ out = true;
+ goto validate_out;
+ }
+
+ dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
+
+ if (!fast_validate) {
+ dml_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt);
+ }
+
+ if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
+ /*
+ * If mode is unsupported or there's still no p-state support then
+ * fall back to favoring voltage.
+ *
+ * We don't actually support prefetch mode 2, so require that we
+ * at least support prefetch mode 1.
+ */
+ context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
+ dm_allow_self_refresh;
+
+ vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
+ if (vlevel < context->bw_ctx.dml.soc.num_states) {
+ memset(split, 0, sizeof(split));
+ memset(merge, 0, sizeof(merge));
+ vlevel = dml_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
+ }
+ }
+
+ dml_log_mode_support_params(&context->bw_ctx.dml);
+
+ if (vlevel == context->bw_ctx.dml.soc.num_states)
+ goto validate_fail;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
+
+ if (!pipe->stream)
+ continue;
+
+ /* We only support full screen mpo with ODM */
+ if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
+ && pipe->plane_state && mpo_pipe
+ && memcmp(&mpo_pipe->plane_res.scl_data.recout,
+ &pipe->plane_res.scl_data.recout,
+ sizeof(struct rect)) != 0) {
+ ASSERT(mpo_pipe->plane_state != pipe->plane_state);
+ goto validate_fail;
+ }
+ pipe_idx++;
+ }
+
+ /* merge pipes if necessary */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ /*skip pipes that don't need merging*/
+ if (!merge[i])
+ continue;
+
+ /* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */
+ if (pipe->prev_odm_pipe) {
+ /*split off odm pipe*/
+ pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe;
+ if (pipe->next_odm_pipe)
+ pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
+
+ pipe->bottom_pipe = NULL;
+ pipe->next_odm_pipe = NULL;
+ pipe->plane_state = NULL;
+ pipe->stream = NULL;
+ pipe->top_pipe = NULL;
+ pipe->prev_odm_pipe = NULL;
+ if (pipe->stream_res.dsc)
+ dml_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
+ memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
+ memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
+ repopulate_pipes = true;
+ } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
+ struct pipe_ctx *top_pipe = pipe->top_pipe;
+ struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
+
+ top_pipe->bottom_pipe = bottom_pipe;
+ if (bottom_pipe)
+ bottom_pipe->top_pipe = top_pipe;
+
+ pipe->top_pipe = NULL;
+ pipe->bottom_pipe = NULL;
+ pipe->plane_state = NULL;
+ pipe->stream = NULL;
+ memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
+ memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
+ repopulate_pipes = true;
+ } else
+ ASSERT(0); /* Should never try to merge master pipe */
+
+ }
+
+ for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *hsplit_pipe = NULL;
+ bool odm;
+ int old_index = -1;
+
+ if (!pipe->stream || newly_split[i])
+ continue;
+
+ pipe_idx++;
+ odm = vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled;
+
+ if (!pipe->plane_state && !odm)
+ continue;
+
+ if (split[i]) {
+ if (odm) {
+ if (split[i] == 4 && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe)
+ old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
+ else if (old_pipe->next_odm_pipe)
+ old_index = old_pipe->next_odm_pipe->pipe_idx;
+ } else {
+ if (split[i] == 4 && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
+ old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+ old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx;
+ else if (old_pipe->bottom_pipe &&
+ old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+ old_index = old_pipe->bottom_pipe->pipe_idx;
+ }
+ hsplit_pipe = dml_find_split_pipe(dc, context, old_index);
+ ASSERT(hsplit_pipe);
+ if (!hsplit_pipe)
+ goto validate_fail;
+
+ if (!dml_split_stream_for_mpc_or_odm(
+ dc, &context->res_ctx,
+ pipe, hsplit_pipe, odm))
+ goto validate_fail;
+
+ newly_split[hsplit_pipe->pipe_idx] = true;
+ repopulate_pipes = true;
+ }
+ if (split[i] == 4) {
+ struct pipe_ctx *pipe_4to1;
+
+ if (odm && old_pipe->next_odm_pipe)
+ old_index = old_pipe->next_odm_pipe->pipe_idx;
+ else if (!odm && old_pipe->bottom_pipe &&
+ old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+ old_index = old_pipe->bottom_pipe->pipe_idx;
+ else
+ old_index = -1;
+ pipe_4to1 = dml_find_split_pipe(dc, context, old_index);
+ ASSERT(pipe_4to1);
+ if (!pipe_4to1)
+ goto validate_fail;
+ if (!dml_split_stream_for_mpc_or_odm(
+ dc, &context->res_ctx,
+ pipe, pipe_4to1, odm))
+ goto validate_fail;
+ newly_split[pipe_4to1->pipe_idx] = true;
+
+ if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe
+ && old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe)
+ old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
+ else if (!odm && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
+ old_pipe->bottom_pipe->bottom_pipe->bottom_pipe &&
+ old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+ old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx;
+ else
+ old_index = -1;
+ pipe_4to1 = dml_find_split_pipe(dc, context, old_index);
+ ASSERT(pipe_4to1);
+ if (!pipe_4to1)
+ goto validate_fail;
+ if (!dml_split_stream_for_mpc_or_odm(
+ dc, &context->res_ctx,
+ hsplit_pipe, pipe_4to1, odm))
+ goto validate_fail;
+ newly_split[pipe_4to1->pipe_idx] = true;
+ }
+ if (odm)
+ dml_build_mapped_resource(dc, context, pipe->stream);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state) {
+ if (!resource_build_scaling_params(pipe))
+ goto validate_fail;
+ }
+ }
+
+ /* Actual dsc count per stream dsc validation*/
+ if (!dml_validate_dsc(dc, context)) {
+ vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE;
+ goto validate_fail;
+ }
+
+ if (repopulate_pipes)
+ pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ *vlevel_out = vlevel;
+ *pipe_cnt_out = pipe_cnt;
+
+ out = true;
+ goto validate_out;
+
+validate_fail:
+ out = false;
+
+validate_out:
+ return out;
+}
+
+static void dml_calculate_dlg_params(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+ int i, pipe_idx;
+ int plane_count;
+
+ /* Writeback MCIF_WB arbitration parameters */
+ if (dc->res_pool)
+ dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
+
+ context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000;
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000;
+ context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
+ context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
+ context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
+ context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
+ context->bw_ctx.bw.dcn.clk.p_state_change_support =
+ context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
+ != dm_dram_clock_change_unsupported;
+
+ context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
+ /* 'z9_support': is not a member of 'dc_clocks' - Commenting out till we have this support in dc_clocks
+ * context->bw_ctx.bw.dcn.clk.z9_support = (context->bw_ctx.dml.vba.StutterPeriod > 5000.0) ?
+ DCN_Z9_SUPPORT_ALLOW : DCN_Z9_SUPPORT_DISALLOW;
+ */
+ plane_count = 0;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ plane_count++;
+ }
+
+ /* Commented out as per above error for now.
+ if (plane_count == 0)
+ context->bw_ctx.bw.dcn.clk.z9_support = DCN_Z9_SUPPORT_ALLOW;
+ */
+ context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
+ /* TODO : Uncomment the below line and make changes
+ * as per DML nomenclature once it is available.
+ * context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = context->bw_ctx.dml.vba.fclk_pstate_support;
+ */
+
+ if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
+ context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+ pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+ pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+ pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+ pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+ if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
+ context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
+ context->res_ctx.pipe_ctx[i].unbounded_req = false;
+ } else {
+ context->res_ctx.pipe_ctx[i].det_buffer_size_kb = context->bw_ctx.dml.ip.det_buffer_size_kbytes;
+ context->res_ctx.pipe_ctx[i].unbounded_req = pipes[pipe_idx].pipe.src.unbounded_req_mode;
+ }
+
+ if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
+ context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
+ context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
+ pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
+ context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
+ pipe_idx++;
+ }
+ /*save a original dppclock copy*/
+ context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
+ context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
+ context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz * 1000;
+ context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz * 1000;
+ context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes
+ - context->bw_ctx.dml.ip.det_buffer_size_kbytes * pipe_idx;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2;
+
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml,
+ &context->res_ctx.pipe_ctx[i].dlg_regs,
+ &context->res_ctx.pipe_ctx[i].ttu_regs,
+ pipes,
+ pipe_cnt,
+ pipe_idx,
+ cstate_en,
+ context->bw_ctx.bw.dcn.clk.p_state_change_support,
+ false, false, true);
+
+ context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
+ &context->res_ctx.pipe_ctx[i].rq_regs,
+ &pipes[pipe_idx].pipe);
+ pipe_idx++;
+ }
+}
+
+static void dml_calculate_wm_and_dlg(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+ int i, pipe_idx, vlevel_temp = 0;
+
+ double dcfclk = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
+ double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
+ bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
+ dm_dram_clock_change_unsupported;
+
+ /* Set B:
+ * For Set B calculations use clocks from clock_limits[2] when available i.e. when SMU is present,
+ * otherwise use arbitrary low value from spreadsheet for DCFCLK as lower is safer for watermark
+ * calculations to cover bootup clocks.
+ * DCFCLK: soc.clock_limits[2] when available
+ * UCLK: soc.clock_limits[2] when available
+ */
+ if (context->bw_ctx.dml.soc.num_states > 2) {
+ vlevel_temp = 2;
+ dcfclk = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz;
+ } else
+ dcfclk = 615; //DCFCLK Vmin_lv
+
+ pipes[0].clks_cfg.voltage = vlevel_temp;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+ pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz;
+
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ //context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ //context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns / 4;
+ context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns / 8;
+
+ /* Set D:
+ * All clocks min.
+ * DCFCLK: Min, as reported by PM FW when available
+ * UCLK : Min, as reported by PM FW when available
+ * sr_enter_exit/sr_exit should be lower than used for DRAM (TBD after bringup or later, use as decided in Clk Mgr)
+ */
+
+ if (context->bw_ctx.dml.soc.num_states > 2) {
+ vlevel_temp = 0;
+ dcfclk = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
+ } else
+ dcfclk = 615; //DCFCLK Vmin_lv
+
+ pipes[0].clks_cfg.voltage = vlevel_temp;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+ pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz;
+
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ //context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ //context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns / 4;
+ context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns / 8;
+
+ /* Set C, for Dummy P-State:
+ * All clocks min.
+ * DCFCLK: Min, as reported by PM FW, when available
+ * UCLK : Min, as reported by PM FW, when available
+ * pstate latency as per UCLK state dummy pstate latency
+ */
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
+ unsigned int min_dram_speed_mts_margin = 160;
+
+ if ((!pstate_en))
+ min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
+
+ /* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */
+ for (i = 3; i > 0; i--)
+ if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts)
+ break;
+
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
+ context->bw_ctx.dml.soc.dummy_pstate_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ //context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ //context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns / 4;
+ context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns / 8;
+
+ if ((!pstate_en) && (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid)) {
+ /* The only difference between A and C is p-state latency, if p-state is not supported
+ * with full p-state latency we want to calculate DLG based on dummy p-state latency,
+ * Set A p-state watermark set to 0 previously, when p-state unsupported, for now keep as previous implementation.
+ */
+ context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
+ } else {
+ /* Set A:
+ * All clocks min.
+ * DCFCLK: Min, as reported by PM FW, when available
+ * UCLK: Min, as reported by PM FW, when available
+ */
+ dml_update_soc_for_wm_a(dc, context);
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ }
+
+ pipes[0].clks_cfg.voltage = vlevel;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_validation;
+ pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+
+ if (dc->config.forced_clocks) {
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
+ }
+ if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
+ if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
+
+ pipe_idx++;
+ }
+
+ context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
+
+ dml_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+
+ if (!pstate_en)
+ /* Restore full p-state latency */
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+ dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+}
+
+bool dml_validate(struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate)
+{
+ bool out = false;
+
+ BW_VAL_TRACE_SETUP();
+
+ int vlevel = 0;
+ int pipe_cnt = 0;
+ display_e2e_pipe_params_st *pipes = context->bw_ctx.dml.dml_pipe_state;
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ BW_VAL_TRACE_COUNT();
+
+ out = dml_internal_validate(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+
+ if (pipe_cnt == 0)
+ goto validate_out;
+
+ if (!out)
+ goto validate_fail;
+
+ BW_VAL_TRACE_END_VOLTAGE_LEVEL();
+
+ if (fast_validate) {
+ BW_VAL_TRACE_SKIP(fast);
+ goto validate_out;
+ }
+
+ dml_calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
+
+ BW_VAL_TRACE_END_WATERMARKS();
+
+ goto validate_out;
+
+validate_fail:
+ DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
+ dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
+
+ BW_VAL_TRACE_SKIP(fail);
+ out = false;
+
+validate_out:
+ BW_VAL_TRACE_FINISH();
+
+ return out;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c
new file mode 100644
index 000000000000..4ec5310a2962
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifdef DML_WRAPPER_TRANSLATION_
+
+static void gfx10array_mode_to_dml_params(
+ enum array_mode_values array_mode,
+ enum legacy_tiling_compat_level compat_level,
+ unsigned int *sw_mode)
+{
+ switch (array_mode) {
+ case DC_ARRAY_LINEAR_ALLIGNED:
+ case DC_ARRAY_LINEAR_GENERAL:
+ *sw_mode = dm_sw_linear;
+ break;
+ case DC_ARRAY_2D_TILED_THIN1:
+// DC_LEGACY_TILING_ADDR_GEN_ZERO - undefined as per current code hence removed
+#if 0
+ if (compat_level == DC_LEGACY_TILING_ADDR_GEN_ZERO)
+ *sw_mode = dm_sw_gfx7_2d_thin_l_vp;
+ else
+ *sw_mode = dm_sw_gfx7_2d_thin_gl;
+#endif
+ break;
+ default:
+ ASSERT(0); /* Not supported */
+ break;
+ }
+}
+
+static void swizzle_to_dml_params(
+ enum swizzle_mode_values swizzle,
+ unsigned int *sw_mode)
+{
+ switch (swizzle) {
+ case DC_SW_LINEAR:
+ *sw_mode = dm_sw_linear;
+ break;
+ case DC_SW_4KB_S:
+ *sw_mode = dm_sw_4kb_s;
+ break;
+ case DC_SW_4KB_S_X:
+ *sw_mode = dm_sw_4kb_s_x;
+ break;
+ case DC_SW_4KB_D:
+ *sw_mode = dm_sw_4kb_d;
+ break;
+ case DC_SW_4KB_D_X:
+ *sw_mode = dm_sw_4kb_d_x;
+ break;
+ case DC_SW_64KB_S:
+ *sw_mode = dm_sw_64kb_s;
+ break;
+ case DC_SW_64KB_S_X:
+ *sw_mode = dm_sw_64kb_s_x;
+ break;
+ case DC_SW_64KB_S_T:
+ *sw_mode = dm_sw_64kb_s_t;
+ break;
+ case DC_SW_64KB_D:
+ *sw_mode = dm_sw_64kb_d;
+ break;
+ case DC_SW_64KB_D_X:
+ *sw_mode = dm_sw_64kb_d_x;
+ break;
+ case DC_SW_64KB_D_T:
+ *sw_mode = dm_sw_64kb_d_t;
+ break;
+ case DC_SW_64KB_R_X:
+ *sw_mode = dm_sw_64kb_r_x;
+ break;
+ case DC_SW_VAR_S:
+ *sw_mode = dm_sw_var_s;
+ break;
+ case DC_SW_VAR_S_X:
+ *sw_mode = dm_sw_var_s_x;
+ break;
+ case DC_SW_VAR_D:
+ *sw_mode = dm_sw_var_d;
+ break;
+ case DC_SW_VAR_D_X:
+ *sw_mode = dm_sw_var_d_x;
+ break;
+
+ default:
+ ASSERT(0); /* Not supported */
+ break;
+ }
+}
+
+static void dc_timing_to_dml_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_pipe_dest_params_st *dest)
+{
+ dest->hblank_start = timing->h_total - timing->h_front_porch;
+ dest->hblank_end = dest->hblank_start
+ - timing->h_addressable
+ - timing->h_border_left
+ - timing->h_border_right;
+ dest->vblank_start = timing->v_total - timing->v_front_porch;
+ dest->vblank_end = dest->vblank_start
+ - timing->v_addressable
+ - timing->v_border_top
+ - timing->v_border_bottom;
+ dest->htotal = timing->h_total;
+ dest->vtotal = timing->v_total;
+ dest->hactive = timing->h_addressable;
+ dest->vactive = timing->v_addressable;
+ dest->interlaced = timing->flags.INTERLACE;
+ dest->pixel_rate_mhz = timing->pix_clk_100hz/10000.0;
+ if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
+ dest->pixel_rate_mhz *= 2;
+}
+
+static enum odm_combine_mode get_dml_odm_combine(const struct pipe_ctx *pipe)
+{
+ int odm_split_count = 0;
+ enum odm_combine_mode combine_mode = dm_odm_combine_mode_disabled;
+ struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
+
+ // Traverse pipe tree to determine odm split count
+ while (next_pipe) {
+ odm_split_count++;
+ next_pipe = next_pipe->next_odm_pipe;
+ }
+ pipe = pipe->prev_odm_pipe;
+ while (pipe) {
+ odm_split_count++;
+ pipe = pipe->prev_odm_pipe;
+ }
+
+ // Translate split to DML odm combine factor
+ switch (odm_split_count) {
+ case 1:
+ combine_mode = dm_odm_combine_mode_2to1;
+ break;
+ case 3:
+ combine_mode = dm_odm_combine_mode_4to1;
+ break;
+ default:
+ combine_mode = dm_odm_combine_mode_disabled;
+ }
+
+ return combine_mode;
+}
+
+static int get_dml_output_type(enum signal_type dc_signal)
+{
+ int dml_output_type = -1;
+
+ switch (dc_signal) {
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ dml_output_type = dm_dp;
+ break;
+ case SIGNAL_TYPE_EDP:
+ dml_output_type = dm_edp;
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ dml_output_type = dm_hdmi;
+ break;
+ default:
+ break;
+ }
+
+ return dml_output_type;
+}
+
+static void populate_color_depth_and_encoding_from_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_output_params_st *dout)
+{
+ int output_bpc = 0;
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ output_bpc = 6;
+ break;
+ case COLOR_DEPTH_888:
+ output_bpc = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ output_bpc = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ output_bpc = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ output_bpc = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ output_bpc = 16;
+ break;
+ case COLOR_DEPTH_999:
+ output_bpc = 9;
+ break;
+ case COLOR_DEPTH_111111:
+ output_bpc = 11;
+ break;
+ default:
+ output_bpc = 8;
+ break;
+ }
+
+ switch (timing->pixel_encoding) {
+ case PIXEL_ENCODING_RGB:
+ case PIXEL_ENCODING_YCBCR444:
+ dout->output_format = dm_444;
+ dout->output_bpp = output_bpc * 3;
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ dout->output_format = dm_420;
+ dout->output_bpp = (output_bpc * 3.0) / 2;
+ break;
+ case PIXEL_ENCODING_YCBCR422:
+ if (timing->flags.DSC && !timing->dsc_cfg.ycbcr422_simple)
+ dout->output_format = dm_n422;
+ else
+ dout->output_format = dm_s422;
+ dout->output_bpp = output_bpc * 2;
+ break;
+ default:
+ dout->output_format = dm_444;
+ dout->output_bpp = output_bpc * 3;
+ }
+}
+
+static enum source_format_class dc_source_format_to_dml_source_format(enum surface_pixel_format dc_format)
+{
+ enum source_format_class dml_format = dm_444_32;
+
+ switch (dc_format) {
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ dml_format = dm_420_8;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ dml_format = dm_420_10;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ dml_format = dm_444_64;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ dml_format = dm_444_16;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
+ dml_format = dm_444_8;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+ dml_format = dm_rgbe_alpha;
+ break;
+ default:
+ dml_format = dm_444_32;
+ break;
+ }
+
+ return dml_format;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c
index 3ee858f311d1..ec636d06e18c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c
@@ -61,16 +61,6 @@ static double dsc_roundf(double num)
return (int)(num);
}
-static double dsc_ceil(double num)
-{
- double retval = (int)num;
-
- if (retval != num && num > 0)
- retval = num + 1;
-
- return (int)retval;
-}
-
static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc,
enum max_min max_min, float bpp)
{
@@ -103,7 +93,7 @@ static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc,
TABLE_CASE(420, 12, min);
}
- if (table == 0)
+ if (!table)
return;
index = (bpp - table[0].bpp) * 2;
@@ -268,24 +258,3 @@ void _do_calc_rc_params(struct rc_params *rc,
rc->rc_buf_thresh[13] = 8064;
}
-u32 _do_bytes_per_pixel_calc(int slice_width,
- u16 drm_bpp,
- bool is_navite_422_or_420)
-{
- float bpp;
- u32 bytes_per_pixel;
- double d_bytes_per_pixel;
-
- dc_assert_fp_enabled();
-
- bpp = ((float)drm_bpp / 16.0);
- d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width;
- // TODO: Make sure the formula for calculating this is precise (ceiling
- // vs. floor, and at what point they should be applied)
- if (is_navite_422_or_420)
- d_bytes_per_pixel /= 2;
-
- bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000);
-
- return bytes_per_pixel;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h
index b93b95409fbe..cad244c023cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h
@@ -78,10 +78,6 @@ struct qp_entry {
typedef struct qp_entry qp_table[];
-u32 _do_bytes_per_pixel_calc(int slice_width,
- u16 drm_bpp,
- bool is_navite_422_or_420);
-
void _do_calc_rc_params(struct rc_params *rc,
enum colour_mode cm,
enum bits_per_comp bpc,
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 0321b4446e05..9c74564cbd8d 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -455,6 +455,7 @@ static bool intersect_dsc_caps(
if (pixel_encoding == PIXEL_ENCODING_YCBCR422 || pixel_encoding == PIXEL_ENCODING_YCBCR420)
dsc_common_caps->bpp_increment_div = min(dsc_common_caps->bpp_increment_div, (uint32_t)8);
+ dsc_common_caps->edp_sink_max_bits_per_pixel = dsc_sink_caps->edp_max_bits_per_pixel;
dsc_common_caps->is_dp = dsc_sink_caps->is_dp;
return true;
}
@@ -513,6 +514,13 @@ static bool decide_dsc_bandwidth_range(
range->min_target_bpp_x16 = preferred_bpp_x16;
}
}
+ /* TODO - make this value generic to all signal types */
+ else if (dsc_caps->edp_sink_max_bits_per_pixel) {
+ /* apply max bpp limitation from edp sink */
+ range->max_target_bpp_x16 = MIN(dsc_caps->edp_sink_max_bits_per_pixel,
+ max_bpp_x16);
+ range->min_target_bpp_x16 = min_bpp_x16;
+ }
else {
range->max_target_bpp_x16 = max_bpp_x16;
range->min_target_bpp_x16 = min_bpp_x16;
@@ -574,7 +582,7 @@ static bool decide_dsc_target_bpp_x16(
return *target_bpp_x16 != 0;
}
-#define MIN_AVAILABLE_SLICES_SIZE 4
+#define MIN_AVAILABLE_SLICES_SIZE 6
static int get_available_dsc_slices(union dsc_enc_slice_caps slice_caps, int *available_slices)
{
@@ -860,6 +868,10 @@ static bool setup_dsc_config(
min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first?
is_dsc_possible = (min_slices_h <= max_slices_h);
+
+ if (min_slices_h == 0 && max_slices_h == 0)
+ is_dsc_possible = false;
+
if (!is_dsc_possible)
goto done;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
index b19d3aeb5962..e97cf09be9d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
@@ -60,31 +60,3 @@ void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps)
pps->dsc_version_minor);
DC_FP_END();
}
-
-/**
- * calc_dsc_bytes_per_pixel - calculate bytes per pixel
- * @pps: DRM struct with all required DSC values
- *
- * Based on the information inside drm_dsc_config, this function calculates the
- * total of bytes per pixel.
- *
- * @note This calculation requires float point operation, most of it executes
- * under kernel_fpu_{begin,end}.
- *
- * Return:
- * Return the number of bytes per pixel
- */
-u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps)
-
-{
- u32 ret;
- u16 drm_bpp = pps->bits_per_pixel;
- int slice_width = pps->slice_width;
- bool is_navite_422_or_420 = pps->native_422 || pps->native_420;
-
- DC_FP_START();
- ret = _do_bytes_per_pixel_calc(slice_width, drm_bpp,
- is_navite_422_or_420);
- DC_FP_END();
- return ret;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
index c2340e001b57..80921c1c0d53 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
@@ -30,7 +30,6 @@
#include "dml/dsc/rc_calc_fpu.h"
void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps);
-u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
index 1e19dd674e5a..7e306aa3e2b9 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
@@ -100,8 +100,7 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par
int ret;
struct rc_params rc;
struct drm_dsc_config dsc_cfg;
-
- dsc_params->bytes_per_pixel = calc_dsc_bytes_per_pixel(pps);
+ unsigned long long tmp;
calc_rc_params(&rc, pps);
dsc_params->pps = *pps;
@@ -113,6 +112,9 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par
dsc_cfg.mux_word_size = dsc_params->pps.bits_per_component <= 10 ? 48 : 64;
ret = drm_dsc_compute_rc_parameters(&dsc_cfg);
+ tmp = (unsigned long long)dsc_cfg.slice_chunk_size * 0x10000000 + (dsc_cfg.slice_width - 1);
+ do_div(tmp, (uint32_t)dsc_cfg.slice_width); //ROUND-UP
+ dsc_params->bytes_per_pixel = (uint32_t)tmp;
copy_pps_fields(&dsc_params->pps, &dsc_cfg);
dsc_params->rc_buffer_model_size = dsc_cfg.rc_bits;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index d34b0b0eea65..444182a97e6e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -53,6 +53,8 @@ enum dc_status {
DC_NOT_SUPPORTED = 24,
DC_UNSUPPORTED_VALUE = 25,
+ DC_NO_LINK_ENC_RESOURCE = 26,
+
DC_ERROR_UNEXPECTED = -1
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 6fc6488c54c0..943240e2809e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -334,6 +334,20 @@ struct plane_resource {
struct dcn_fe_bandwidth bw;
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+#define LINK_RES_HPO_DP_REC_MAP__MASK 0xFFFF
+#define LINK_RES_HPO_DP_REC_MAP__SHIFT 0
+#endif
+
+/* all mappable hardware resources used to enable a link */
+struct link_resource {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct hpo_dp_link_encoder *hpo_dp_link_enc;
+#else
+ void *dummy;
+#endif
+};
+
union pipe_update_flags {
struct {
uint32_t enable : 1;
@@ -361,12 +375,14 @@ struct pipe_ctx {
struct plane_resource plane_res;
struct stream_resource stream_res;
+ struct link_resource link_res;
struct clock_source *clock_source;
struct pll_settings pll_settings;
uint8_t pipe_idx;
+ uint8_t pipe_idx_syncd;
struct pipe_ctx *top_pipe;
struct pipe_ctx *bottom_pipe;
@@ -411,6 +427,8 @@ struct resource_context {
struct link_enc_cfg_context link_enc_cfg_ctx;
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS];
+ unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS];
+ int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS];
#endif
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool is_mpc_3dlut_acquired[MAX_PIPES];
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index a6d3d859754a..cd52813a8432 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -56,16 +56,19 @@ enum {
bool dp_verify_link_cap(
struct dc_link *link,
+ const struct link_resource *link_res,
struct dc_link_settings *known_limit_link_setting,
int *fail_count);
bool dp_verify_link_cap_with_retries(
struct dc_link *link,
+ const struct link_resource *link_res,
struct dc_link_settings *known_limit_link_setting,
int attempts);
bool dp_verify_mst_link_cap(
- struct dc_link *link);
+ struct dc_link *link,
+ const struct link_resource *link_res);
bool dp_validate_mode_timing(
struct dc_link *link,
@@ -168,8 +171,9 @@ uint8_t dc_dp_initialize_scrambling_data_symbols(
struct dc_link *link,
enum dc_dp_training_pattern pattern);
-enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready);
+enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready);
void dp_set_fec_enable(struct dc_link *link, bool enable);
+struct link_encoder *dp_get_link_enc(struct dc_link *link);
bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update);
void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
@@ -210,11 +214,16 @@ bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link);
struct fixed31_32 calculate_sst_avg_time_slots_per_mtp(
const struct dc_stream_state *stream,
const struct dc_link *link);
-void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *link_settings);
-void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal);
+void enable_dp_hpo_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct dc_link_settings *link_settings);
+void disable_dp_hpo_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal);
void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable);
bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx);
void reset_dp_hpo_stream_encoders_for_link(struct dc_link *link);
bool dp_retrieve_lttpr_cap(struct dc_link *link);
+void edp_panel_backlight_power_on(struct dc_link *link);
#endif /* __DC_LINK_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
index 974d703e3771..74dafd0f9d3d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
@@ -91,8 +91,9 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);
* DPIA equivalent of dc_link_dp_perfrorm_link_training.
* Aborts link training upon detection of sink unplug.
*/
-enum link_training_result
-dc_link_dpia_perform_link_training(struct dc_link *link,
+enum link_training_result dc_link_dpia_perform_link_training(
+ struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_setting,
bool skip_video_pattern);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 806f3041db14..337c0161e72d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -619,7 +619,7 @@ struct dcn_ip_params {
};
extern const struct dcn_ip_params dcn10_ip_defaults;
-bool dcn_validate_bandwidth(
+bool dcn10_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h b/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h
new file mode 100644
index 000000000000..5dcfbd8e2697
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DML_WRAPPER_H_
+#define DML_WRAPPER_H_
+
+#include "dc.h"
+#include "dml/display_mode_vba.h"
+
+bool dml_validate(struct dc *dc, struct dc_state *context, bool fast_validate);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index a17e5de3b100..c920c4b6077d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -211,6 +211,8 @@ struct dummy_pstate_entry {
struct clk_bw_params {
unsigned int vram_type;
unsigned int num_channels;
+ unsigned int dispclk_vco_khz;
+ unsigned int dc_mode_softmax_memclk;
struct clk_limit_table clk_table;
struct wm_table wm_table;
struct dummy_pstate_entry dummy_pstate_table[4];
@@ -261,6 +263,10 @@ struct clk_mgr_funcs {
/* Send message to PMFW to set hard max memclk frequency to highest DPM */
void (*set_hard_max_memclk)(struct clk_mgr *clk_mgr);
+ /* Custom set a memclk freq range*/
+ void (*set_max_memclk)(struct clk_mgr *clk_mgr, unsigned int memclk_mhz);
+ void (*set_min_memclk)(struct clk_mgr *clk_mgr, unsigned int memclk_mhz);
+
/* Get current memclk states from PMFW, update relevant structures */
void (*get_memclk_states_from_smu)(struct clk_mgr *clk_mgr);
@@ -274,6 +280,7 @@ struct clk_mgr {
struct dc_clocks clks;
bool psr_allow_active_cache;
bool force_smu_not_present;
+ bool dc_mode_softmax_enabled;
int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes
int dentist_vco_freq_khz;
struct clk_state_registers_and_bypass boot_snapshot;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
index f94135c6e3c2..346f0ba73e86 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
@@ -61,6 +61,8 @@ struct dcn_dsc_state {
uint32_t dsc_pic_height;
uint32_t dsc_slice_bpg_offset;
uint32_t dsc_chunk_size;
+ uint32_t dsc_fw_en;
+ uint32_t dsc_opp_source;
};
@@ -88,6 +90,7 @@ struct dsc_enc_caps {
int32_t max_total_throughput_mps; /* Maximum total throughput with all the slices combined */
int32_t max_slice_width;
uint32_t bpp_increment_div; /* bpp increment divisor, e.g. if 16, it's 1/16th of a bit */
+ uint32_t edp_sink_max_bits_per_pixel;
bool is_dp;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 80e1a32bc63d..2c031586f4e6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -139,6 +139,7 @@ struct hubp_funcs {
bool (*hubp_is_flip_pending)(struct hubp *hubp);
void (*set_blank)(struct hubp *hubp, bool blank);
+ void (*set_blank_regs)(struct hubp *hubp, bool blank);
void (*set_hubp_blank_en)(struct hubp *hubp, bool blank);
void (*set_cursor_attributes)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index bb0e91756ddd..2ce15cd10d80 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -268,7 +268,8 @@ struct hpo_dp_link_encoder_funcs {
void (*enable_link_phy)(struct hpo_dp_link_encoder *enc,
const struct dc_link_settings *link_settings,
- enum transmitter transmitter);
+ enum transmitter transmitter,
+ enum hpd_source_id hpd_source);
void (*disable_link_phy)(struct hpo_dp_link_encoder *link_enc,
enum signal_type signal);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index c88e113b94d1..073f8b667eff 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -164,6 +164,10 @@ struct stream_encoder_funcs {
void (*stop_dp_info_packets)(
struct stream_encoder *enc);
+ void (*reset_fifo)(
+ struct stream_encoder *enc
+ );
+
void (*dp_blank)(
struct dc_link *link,
struct stream_encoder *enc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 7390baf916b5..c29320b3855d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -290,6 +290,8 @@ struct timing_generator_funcs {
enum optc_dsc_mode dsc_mode,
uint32_t dsc_bytes_per_pixel,
uint32_t dsc_slice_width);
+ void (*get_dsc_status)(struct timing_generator *optc,
+ uint32_t *dsc_mode);
void (*set_odm_bypass)(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing);
void (*set_odm_combine)(struct timing_generator *optc, int *opp_id, int opp_cnt,
struct dc_crtc_timing *timing);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index d50f4bd06b5d..05053f3b4ab7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -64,6 +64,7 @@ struct hw_sequencer_funcs {
enum dc_status (*apply_ctx_to_hw)(struct dc *dc,
struct dc_state *context);
void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*disable_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank);
void (*apply_ctx_for_surface)(struct dc *dc,
const struct dc_stream_state *stream,
int num_planes, struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
index 10dcf6a5e9b1..a4e43b4826e0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
@@ -36,7 +36,7 @@
* Initialise link encoder resource tracking.
*/
void link_enc_cfg_init(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state);
/*
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
index ba664bc49595..69d63763a10e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
@@ -32,6 +32,7 @@ struct gpio *get_hpd_gpio(struct dc_bios *dcb,
void dp_enable_link_phy(
struct dc_link *link,
+ const struct link_resource *link_res,
enum signal_type signal,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings);
@@ -42,22 +43,27 @@ void edp_add_delay_for_T9(struct dc_link *link);
bool edp_receiver_ready_T9(struct dc_link *link);
bool edp_receiver_ready_T7(struct dc_link *link);
-void dp_disable_link_phy(struct dc_link *link, enum signal_type signal);
+void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res,
+ enum signal_type signal);
-void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal);
+void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res,
+ enum signal_type signal);
bool dp_set_hw_training_pattern(
struct dc_link *link,
+ const struct link_resource *link_res,
enum dc_dp_training_pattern pattern,
uint32_t offset);
void dp_set_hw_lane_settings(
struct dc_link *link,
+ const struct link_resource *link_res,
const struct link_training_settings *link_settings,
uint32_t offset);
void dp_set_hw_test_pattern(
struct dc_link *link,
+ const struct link_resource *link_res,
enum dp_test_pattern test_pattern,
uint8_t *custom_pattern,
uint32_t custom_pattern_size);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 372c0898facd..dbfe6690ded8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -34,6 +34,10 @@
#define MEMORY_TYPE_HBM 2
+#define IS_PIPE_SYNCD_VALID(pipe) ((((pipe)->pipe_idx_syncd) & 0x80)?1:0)
+#define GET_PIPE_SYNCD_FROM_PIPE(pipe) ((pipe)->pipe_idx_syncd & 0x7F)
+#define SET_PIPE_SYNCD_TO_PIPE(pipe, pipe_syncd) ((pipe)->pipe_idx_syncd = (0x80 | pipe_syncd))
+
enum dce_version resource_parse_asic_id(
struct hw_asic_id asic_id);
@@ -202,8 +206,19 @@ int get_num_mpc_splits(struct pipe_ctx *pipe);
int get_num_odm_splits(struct pipe_ctx *pipe);
#if defined(CONFIG_DRM_AMD_DC_DCN)
-struct hpo_dp_link_encoder *resource_get_unused_hpo_dp_link_encoder(
- const struct resource_pool *pool);
+struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt(
+ const struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ const struct dc_link *link);
#endif
+void reset_syncd_pipes_from_disabled_pipes(struct dc *dc,
+ struct dc_state *context);
+
+void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
+ struct dc_state *context,
+ uint8_t disabled_master_pipe_idx);
+
+uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter);
+
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
index 378cc11aa047..6b5fedd9ace0 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -185,16 +185,18 @@ bool dal_irq_service_dummy_set(struct irq_service *irq_service,
const struct irq_source_info *info,
bool enable)
{
- DC_LOG_ERROR("%s: called for non-implemented irq source\n",
- __func__);
+ DC_LOG_ERROR("%s: called for non-implemented irq source, src_id=%u, ext_id=%u\n",
+ __func__, info->src_id, info->ext_id);
+
return false;
}
bool dal_irq_service_dummy_ack(struct irq_service *irq_service,
const struct irq_source_info *info)
{
- DC_LOG_ERROR("%s: called for non-implemented irq source\n",
- __func__);
+ DC_LOG_ERROR("%s: called for non-implemented irq source, src_id=%u, ext_id=%u\n",
+ __func__, info->src_id, info->ext_id);
+
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
index 34f43cb650f8..cf072e2347d3 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -40,10 +40,9 @@
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
-enum dc_irq_source to_dal_irq_source_dcn10(
- struct irq_service *irq_service,
- uint32_t src_id,
- uint32_t ext_id)
+static enum dc_irq_source to_dal_irq_source_dcn10(struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
index 9ccafe007b23..c4b067d01895 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
@@ -132,31 +132,6 @@ enum dc_irq_source to_dal_irq_source_dcn20(
}
}
-uint32_t dc_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source)
-{
- const struct irq_source_info *info;
- uint32_t addr;
- uint32_t value;
- uint32_t current_status;
-
- info = find_irq_source_info(irq_service, source);
- if (!info)
- return 0;
-
- addr = info->status_reg;
- if (!addr)
- return 0;
-
- value = dm_read_reg(irq_service->ctx, addr);
- current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE);
-
- return current_status;
-}
-
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h
index 4d69ab24ca25..aee4b37999f1 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h
@@ -31,6 +31,4 @@
struct irq_service *dal_irq_service_dcn20_create(
struct irq_service_init_data *init_data);
-uint32_t dc_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source);
-
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
index a47f68634fc3..aa708b61142f 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
@@ -39,10 +39,9 @@
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
-enum dc_irq_source to_dal_irq_source_dcn201(
- struct irq_service *irq_service,
- uint32_t src_id,
- uint32_t ext_id)
+static enum dc_irq_source to_dal_irq_source_dcn201(struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
index 78940cb20e10..0f15bcada4e9 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
@@ -40,10 +40,9 @@
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
-enum dc_irq_source to_dal_irq_source_dcn21(
- struct irq_service *irq_service,
- uint32_t src_id,
- uint32_t ext_id)
+static enum dc_irq_source to_dal_irq_source_dcn21(struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
@@ -135,31 +134,6 @@ enum dc_irq_source to_dal_irq_source_dcn21(
return DC_IRQ_SOURCE_INVALID;
}
-uint32_t dc_get_hpd_state_dcn21(struct irq_service *irq_service, enum dc_irq_source source)
-{
- const struct irq_source_info *info;
- uint32_t addr;
- uint32_t value;
- uint32_t current_status;
-
- info = find_irq_source_info(irq_service, source);
- if (!info)
- return 0;
-
- addr = info->status_reg;
- if (!addr)
- return 0;
-
- value = dm_read_reg(irq_service->ctx, addr);
- current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE);
-
- return current_status;
-}
-
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h
index 616470e32380..da2bd0e93d7a 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h
@@ -31,6 +31,4 @@
struct irq_service *dal_irq_service_dcn21_create(
struct irq_service_init_data *init_data);
-uint32_t dc_get_hpd_state_dcn21(struct irq_service *irq_service, enum dc_irq_source source);
-
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
index 38e0ade60c7b..1b88e4e627fd 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
@@ -36,10 +36,9 @@
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
-enum dc_irq_source to_dal_irq_source_dcn31(
- struct irq_service *irq_service,
- uint32_t src_id,
- uint32_t ext_id)
+static enum dc_irq_source to_dal_irq_source_dcn31(struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index 4db1133e4466..a2a4fbeb83f8 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -79,7 +79,7 @@ void dal_irq_service_destroy(struct irq_service **irq_service)
*irq_service = NULL;
}
-const struct irq_source_info *find_irq_source_info(
+static const struct irq_source_info *find_irq_source_info(
struct irq_service *irq_service,
enum dc_irq_source source)
{
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
index e60b82480093..dbfcb096eedd 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
@@ -69,10 +69,6 @@ struct irq_service {
const struct irq_service_funcs *funcs;
};
-const struct irq_source_info *find_irq_source_info(
- struct irq_service *irq_service,
- enum dc_irq_source source);
-
void dal_irq_service_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data);
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index cd204eef073b..83855b8a32e9 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -360,6 +360,8 @@ struct dmub_srv_hw_funcs {
uint32_t (*get_gpint_dataout)(struct dmub_srv *dmub);
+ void (*clear_inbox0_ack_register)(struct dmub_srv *dmub);
+ uint32_t (*read_inbox0_ack_register)(struct dmub_srv *dmub);
void (*send_inbox0_cmd)(struct dmub_srv *dmub, union dmub_inbox0_data_register data);
uint32_t (*get_current_time)(struct dmub_srv *dmub);
@@ -409,6 +411,7 @@ struct dmub_srv {
struct dmub_srv_base_funcs funcs;
struct dmub_srv_hw_funcs hw_funcs;
struct dmub_rb inbox1_rb;
+ uint32_t inbox1_last_wptr;
/**
* outbox1_rb is accessed without locks (dal & dc)
* and to be used only in dmub_srv_stat_get_notification()
@@ -735,6 +738,45 @@ bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_
bool dmub_srv_should_detect(struct dmub_srv *dmub);
+/**
+ * dmub_srv_send_inbox0_cmd() - Send command to DMUB using INBOX0
+ * @dmub: the dmub service
+ * @data: the data to be sent in the INBOX0 command
+ *
+ * Send command by writing directly to INBOX0 WPTR
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - hw_init false or hw function does not exist
+ */
+enum dmub_status dmub_srv_send_inbox0_cmd(struct dmub_srv *dmub, union dmub_inbox0_data_register data);
+
+/**
+ * dmub_srv_wait_for_inbox0_ack() - wait for DMUB to ACK INBOX0 command
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Wait for DMUB to ACK the INBOX0 message
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - hw_init false or hw function does not exist
+ * DMUB_STATUS_TIMEOUT - wait for ack timed out
+ */
+enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t timeout_us);
+
+/**
+ * dmub_srv_wait_for_inbox0_ack() - clear ACK register for INBOX0
+ * @dmub: the dmub service
+ *
+ * Clear ACK register for INBOX0
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - hw_init false or hw function does not exist
+ */
+enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub);
+
#if defined(__cplusplus)
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index c29a67ccef17..873ecd04e01d 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -46,10 +46,10 @@
/* Firmware versioning. */
#ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0x1d82d23e
+#define DMUB_FW_VERSION_GIT_HASH 0xbaf06b95
#define DMUB_FW_VERSION_MAJOR 0
#define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 91
+#define DMUB_FW_VERSION_REVISION 98
#define DMUB_FW_VERSION_TEST 0
#define DMUB_FW_VERSION_VBIOS 0
#define DMUB_FW_VERSION_HOTFIX 0
@@ -173,13 +173,6 @@ extern "C" {
#endif
/**
- * Number of nanoseconds per DMUB tick.
- * DMCUB_TIMER_CURRENT increments in DMUB ticks, which are 10ns by default.
- * If DMCUB_TIMER_WINDOW is non-zero this will no longer be true.
- */
-#define NS_PER_DMUB_TICK 10
-
-/**
* union dmub_addr - DMUB physical/virtual 64-bit address.
*/
union dmub_addr {
@@ -208,10 +201,9 @@ union dmub_psr_debug_flags {
uint32_t use_hw_lock_mgr : 1;
/**
- * Unused.
- * TODO: Remove.
+ * Use TPS3 signal when restore main link.
*/
- uint32_t log_line_nums : 1;
+ uint32_t force_wakeup_by_tps3 : 1;
} bitfields;
/**
@@ -416,7 +408,14 @@ enum dmub_cmd_vbios_type {
* Enables or disables power gating.
*/
DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING = 3,
+ /**
+ * Controls embedded panels.
+ */
DMUB_CMD__VBIOS_LVTMA_CONTROL = 15,
+ /**
+ * Query DP alt status on a transmitter.
+ */
+ DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT = 26,
};
//==============================================================================
@@ -1550,10 +1549,14 @@ struct dmub_cmd_psr_copy_settings_data {
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
+ /*
+ * DSC enable status in driver
+ */
+ uint8_t dsc_enable_status;
/**
- * Explicit padding to 4 byte boundary.
+ * Explicit padding to 3 byte boundary.
*/
- uint8_t pad3[4];
+ uint8_t pad3[3];
};
/**
@@ -2398,6 +2401,24 @@ struct dmub_rb_cmd_lvtma_control {
};
/**
+ * Data passed in/out in a DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT command.
+ */
+struct dmub_rb_cmd_transmitter_query_dp_alt_data {
+ uint8_t phy_id; /**< 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4=UNIPHYE, 5=UNIPHYF */
+ uint8_t is_usb; /**< is phy is usb */
+ uint8_t is_dp_alt_disable; /**< is dp alt disable */
+ uint8_t is_dp4; /**< is dp in 4 lane */
+};
+
+/**
+ * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT command.
+ */
+struct dmub_rb_cmd_transmitter_query_dp_alt {
+ struct dmub_cmd_header header; /**< header */
+ struct dmub_rb_cmd_transmitter_query_dp_alt_data data; /**< payload */
+};
+
+/**
* Maximum number of bytes a chunk sent to DMUB for parsing
*/
#define DMUB_EDID_CEA_DATA_CHUNK_BYTES 8
@@ -2408,7 +2429,7 @@ struct dmub_rb_cmd_lvtma_control {
struct dmub_cmd_send_edid_cea {
uint16_t offset; /**< offset into the CEA block */
uint8_t length; /**< number of bytes in payload to copy as part of CEA block */
- uint16_t total_length; /**< total length of the CEA block */
+ uint16_t cea_total_length; /**< total length of the CEA block */
uint8_t payload[DMUB_EDID_CEA_DATA_CHUNK_BYTES]; /**< data chunk of the CEA block */
uint8_t pad[3]; /**< padding and for future expansion */
};
@@ -2605,6 +2626,10 @@ union dmub_rb_cmd {
*/
struct dmub_rb_cmd_lvtma_control lvtma_control;
/**
+ * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT command.
+ */
+ struct dmub_rb_cmd_transmitter_query_dp_alt query_dp_alt;
+ /**
* Definition of a DMUB_CMD__DPIA_DIG1_CONTROL command.
*/
struct dmub_rb_cmd_dig1_dpia_control dig1_dpia_control;
@@ -2722,7 +2747,7 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)
static inline bool dmub_rb_push_front(struct dmub_rb *rb,
const union dmub_rb_cmd *cmd)
{
- uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t);
+ uint64_t volatile *dst = (uint64_t volatile *)((uint8_t *)(rb->base_address) + rb->wrpt);
const uint64_t *src = (const uint64_t *)cmd;
uint8_t i;
@@ -2840,7 +2865,7 @@ static inline bool dmub_rb_peek_offset(struct dmub_rb *rb,
static inline bool dmub_rb_out_front(struct dmub_rb *rb,
union dmub_rb_out_cmd *cmd)
{
- const uint64_t volatile *src = (const uint64_t volatile *)(rb->base_address) + rb->rptr / sizeof(uint64_t);
+ const uint64_t volatile *src = (const uint64_t volatile *)((uint8_t *)(rb->base_address) + rb->rptr);
uint64_t *dst = (uint64_t *)cmd;
uint8_t i;
@@ -2888,7 +2913,7 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
uint32_t wptr = rb->wrpt;
while (rptr != wptr) {
- uint64_t volatile *data = (uint64_t volatile *)rb->base_address + rptr / sizeof(uint64_t);
+ uint64_t volatile *data = (uint64_t volatile *)((uint8_t *)(rb->base_address) + rptr);
//uint64_t volatile *p = (uint64_t volatile *)data;
uint64_t temp;
uint8_t i;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 56d400ffa7ac..9280f2abd973 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -100,24 +100,9 @@ void dmub_flush_buffer_mem(const struct dmub_fb *fb)
}
static const struct dmub_fw_meta_info *
-dmub_get_fw_meta_info(const struct dmub_srv_region_params *params)
+dmub_get_fw_meta_info_from_blob(const uint8_t *blob, uint32_t blob_size, uint32_t meta_offset)
{
const union dmub_fw_meta *meta;
- const uint8_t *blob = NULL;
- uint32_t blob_size = 0;
- uint32_t meta_offset = 0;
-
- if (params->fw_bss_data && params->bss_data_size) {
- /* Legacy metadata region. */
- blob = params->fw_bss_data;
- blob_size = params->bss_data_size;
- meta_offset = DMUB_FW_META_OFFSET;
- } else if (params->fw_inst_const && params->inst_const_size) {
- /* Combined metadata region. */
- blob = params->fw_inst_const;
- blob_size = params->inst_const_size;
- meta_offset = 0;
- }
if (!blob || !blob_size)
return NULL;
@@ -134,6 +119,32 @@ dmub_get_fw_meta_info(const struct dmub_srv_region_params *params)
return &meta->info;
}
+static const struct dmub_fw_meta_info *
+dmub_get_fw_meta_info(const struct dmub_srv_region_params *params)
+{
+ const struct dmub_fw_meta_info *info = NULL;
+
+ if (params->fw_bss_data && params->bss_data_size) {
+ /* Legacy metadata region. */
+ info = dmub_get_fw_meta_info_from_blob(params->fw_bss_data,
+ params->bss_data_size,
+ DMUB_FW_META_OFFSET);
+ } else if (params->fw_inst_const && params->inst_const_size) {
+ /* Combined metadata region - can be aligned to 16-bytes. */
+ uint32_t i;
+
+ for (i = 0; i < 16; ++i) {
+ info = dmub_get_fw_meta_info_from_blob(
+ params->fw_inst_const, params->inst_const_size, i);
+
+ if (info)
+ break;
+ }
+ }
+
+ return info;
+}
+
static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
{
struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs;
@@ -598,6 +609,8 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
{
+ struct dmub_rb flush_rb;
+
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
@@ -606,9 +619,14 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
* been flushed to framebuffer memory. Otherwise DMCUB might
* read back stale, fully invalid or partially invalid data.
*/
- dmub_rb_flush_pending(&dmub->inbox1_rb);
+ flush_rb = dmub->inbox1_rb;
+ flush_rb.rptr = dmub->inbox1_last_wptr;
+ dmub_rb_flush_pending(&flush_rb);
+
+ dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);
+
+ dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
- dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);
return DMUB_STATUS_OK;
}
@@ -831,3 +849,38 @@ bool dmub_srv_should_detect(struct dmub_srv *dmub)
return dmub->hw_funcs.should_detect(dmub);
}
+
+enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub)
+{
+ if (!dmub->hw_init || !dmub->hw_funcs.clear_inbox0_ack_register)
+ return DMUB_STATUS_INVALID;
+
+ dmub->hw_funcs.clear_inbox0_ack_register(dmub);
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t timeout_us)
+{
+ uint32_t i = 0;
+ uint32_t ack = 0;
+
+ if (!dmub->hw_init || !dmub->hw_funcs.read_inbox0_ack_register)
+ return DMUB_STATUS_INVALID;
+
+ for (i = 0; i <= timeout_us; i++) {
+ ack = dmub->hw_funcs.read_inbox0_ack_register(dmub);
+ if (ack)
+ return DMUB_STATUS_OK;
+ }
+ return DMUB_STATUS_TIMEOUT;
+}
+
+enum dmub_status dmub_srv_send_inbox0_cmd(struct dmub_srv *dmub,
+ union dmub_inbox0_data_register data)
+{
+ if (!dmub->hw_init || !dmub->hw_funcs.send_inbox0_cmd)
+ return DMUB_STATUS_INVALID;
+
+ dmub->hw_funcs.send_inbox0_cmd(dmub, data);
+ return DMUB_STATUS_OK;
+}
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
index 4de59b66bb1a..a2b80514d83e 100644
--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -35,6 +35,7 @@
#define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C
#define DP_BRANCH_DEVICE_ID_006037 0x006037
+#define DP_DEVICE_ID_38EC11 0x38EC11
enum ddc_result {
DDC_RESULT_UNKNOWN = 0,
DDC_RESULT_SUCESSFULL,
@@ -117,4 +118,7 @@ struct av_sync_data {
uint8_t aud_del_ins3;/* DPCD 0002Dh */
};
+static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3, 0};
+static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5, 0};
+
#endif /* __DAL_DDC_SERVICE_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 370fad883e33..f093b49c5e6e 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -72,9 +72,7 @@
#define DC_LOG_DSC(...) DRM_DEBUG_KMS(__VA_ARGS__)
#define DC_LOG_SMU(...) pr_debug("[SMU_MSG]:"__VA_ARGS__)
#define DC_LOG_DWB(...) DRM_DEBUG_KMS(__VA_ARGS__)
-#if defined(CONFIG_DRM_AMD_DC_DCN)
#define DC_LOG_DP2(...) DRM_DEBUG_KMS(__VA_ARGS__)
-#endif
struct dal_logger;
@@ -126,9 +124,7 @@ enum dc_log_type {
LOG_MAX_HW_POINTS,
LOG_ALL_TF_CHANNELS,
LOG_SAMPLE_1DLUT,
-#if defined(CONFIG_DRM_AMD_DC_DCN)
LOG_DP2,
-#endif
LOG_SECTION_TOTAL_COUNT
};
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index 6d648c889866..f7420c3f5672 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -104,6 +104,7 @@ struct mod_hdcp_displayport {
uint8_t rev;
uint8_t assr_enabled;
uint8_t mst_enabled;
+ uint8_t usb4_enabled;
};
struct mod_hdcp_hdmi {
@@ -249,7 +250,6 @@ struct mod_hdcp_link {
uint8_t ddc_line;
uint8_t link_enc_idx;
uint8_t phy_idx;
- uint8_t dio_output_type;
uint8_t dio_output_id;
uint8_t hdcp_supported_informational;
union {
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 4b9e68a79f06..f57a1478f0fe 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -231,6 +231,8 @@ enum DC_FEATURE_MASK {
DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2), //0x4, disabled by default
DC_PSR_MASK = (1 << 3), //0x8, disabled by default for dcn < 3.1
DC_EDP_NO_POWER_SEQUENCING = (1 << 4), //0x10, disabled by default
+ DC_DISABLE_LTTPR_DP1_4A = (1 << 5), //0x20, disabled by default
+ DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
};
enum DC_DEBUG_MASK {
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h
index 6d0052ce6bed..da6d380c948b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h
@@ -354,5 +354,12 @@
#define mmMP1_SMN_EXT_SCRATCH7 0x03c7
#define mmMP1_SMN_EXT_SCRATCH7_BASE_IDX 0
+/*
+ * addressBlock: mp_SmuMp1Pub_MmuDec
+ * base address: 0x0
+ */
+#define smnMP1_PMI_3_START 0x3030204
+#define smnMP1_PMI_3_FIFO 0x3030208
+#define smnMP1_PMI_3 0x3030600
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h
index 136fb5de6a4c..a5ae2a801254 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h
@@ -959,5 +959,17 @@
#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL
+// MP1_PMI_3_START
+#define MP1_PMI_3_START__ENABLE_MASK 0x80000000L
+// MP1_PMI_3_FIFO
+#define MP1_PMI_3_FIFO__DEPTH_MASK 0x00000fffL
+
+// MP1_PMI_3_START
+#define MP1_PMI_3_START__ENABLE__SHIFT 0x0000001f
+// MP1_PMI_3_FIFO
+#define MP1_PMI_3_FIFO__DEPTH__SHIFT 0x00000000
+
+
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_offset.h
index 79eae0256dbd..8072b0a6376d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_offset.h
@@ -20753,8 +20753,6 @@
// addressBlock: nbio_nbif0_gdc_GDCDEC
// base address: 0xd0000000
-#define regGDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL 0x2ffc0eda
-#define regGDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL_BASE_IDX 5
#define regGDC1_NGDC_SDP_PORT_CTRL 0x2ffc0ee2
#define regGDC1_NGDC_SDP_PORT_CTRL_BASE_IDX 5
#define regGDC1_SHUB_REGS_IF_CTL 0x2ffc0ee3
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_sh_mask.h
index e27fdc0c643c..54b0e4623971 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_sh_mask.h
@@ -1,4 +1,3 @@
-
/*
* Copyright (C) 2020 Advanced Micro Devices, Inc.
*
@@ -108541,17 +108540,6 @@
// addressBlock: nbio_nbif0_gdc_GDCDEC
-//GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL
-#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN0_FAST_WRITE_RESPONSE_EN__SHIFT 0x0
-#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN1_FAST_WRITE_RESPONSE_EN__SHIFT 0x1
-#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN2_FAST_WRITE_RESPONSE_EN__SHIFT 0x2
-#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN3_FAST_WRITE_RESPONSE_EN__SHIFT 0x3
-#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__FWR_NORMAL_ARB_MODE__SHIFT 0x10
-#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN0_FAST_WRITE_RESPONSE_EN_MASK 0x00000001L
-#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN1_FAST_WRITE_RESPONSE_EN_MASK 0x00000002L
-#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN2_FAST_WRITE_RESPONSE_EN_MASK 0x00000004L
-#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN3_FAST_WRITE_RESPONSE_EN_MASK 0x00000008L
-#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__FWR_NORMAL_ARB_MODE_MASK 0x00010000L
//GDC1_NGDC_SDP_PORT_CTRL
#define GDC1_NGDC_SDP_PORT_CTRL__SDP_DISCON_HYSTERESIS__SHIFT 0x0
#define GDC1_NGDC_SDP_PORT_CTRL__NGDC_OBFF_HW_URGENT_EARLY_WAKEUP_EN__SHIFT 0xf
diff --git a/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h b/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h
index 9cb5f3631c60..ce79e5de8ce3 100644
--- a/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h
@@ -25,15 +25,15 @@
#define MAX_SEGMENT 5
-struct IP_BASE_INSTANCE
+struct IP_BASE_INSTANCE
{
unsigned int segment[MAX_SEGMENT];
-};
-
-struct IP_BASE
+} __maybe_unused;
+
+struct IP_BASE
{
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
-};
+} __maybe_unused;
static const struct IP_BASE ATHUB_BASE ={ { { { 0x00000C00, 0, 0, 0, 0 } },
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index c84bd7b2cf59..ac941f62cbed 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -33,12 +33,11 @@
#include <linux/dma-fence.h>
struct pci_dev;
+struct amdgpu_device;
#define KGD_MAX_QUEUES 128
struct kfd_dev;
-struct kgd_dev;
-
struct kgd_mem;
enum kfd_preempt_type {
@@ -228,61 +227,61 @@ struct tile_config {
*/
struct kfd2kgd_calls {
/* Register access functions */
- void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid,
+ void (*program_sh_mem_settings)(struct amdgpu_device *adev, uint32_t vmid,
uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
- int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, u32 pasid,
+ int (*set_pasid_vmid_mapping)(struct amdgpu_device *adev, u32 pasid,
unsigned int vmid);
- int (*init_interrupts)(struct kgd_dev *kgd, uint32_t pipe_id);
+ int (*init_interrupts)(struct amdgpu_device *adev, uint32_t pipe_id);
- int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+ int (*hqd_load)(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr,
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm);
- int (*hiq_mqd_load)(struct kgd_dev *kgd, void *mqd,
+ int (*hiq_mqd_load)(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t doorbell_off);
- int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd,
+ int (*hqd_sdma_load)(struct amdgpu_device *adev, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm);
- int (*hqd_dump)(struct kgd_dev *kgd,
+ int (*hqd_dump)(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs);
- int (*hqd_sdma_dump)(struct kgd_dev *kgd,
+ int (*hqd_sdma_dump)(struct amdgpu_device *adev,
uint32_t engine_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs);
- bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-
- int (*hqd_destroy)(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
- unsigned int timeout, uint32_t pipe_id,
+ bool (*hqd_is_occupied)(struct amdgpu_device *adev,
+ uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id);
- bool (*hqd_sdma_is_occupied)(struct kgd_dev *kgd, void *mqd);
+ int (*hqd_destroy)(struct amdgpu_device *adev, void *mqd,
+ uint32_t reset_type, unsigned int timeout,
+ uint32_t pipe_id, uint32_t queue_id);
+
+ bool (*hqd_sdma_is_occupied)(struct amdgpu_device *adev, void *mqd);
- int (*hqd_sdma_destroy)(struct kgd_dev *kgd, void *mqd,
+ int (*hqd_sdma_destroy)(struct amdgpu_device *adev, void *mqd,
unsigned int timeout);
- int (*address_watch_disable)(struct kgd_dev *kgd);
- int (*address_watch_execute)(struct kgd_dev *kgd,
+ int (*address_watch_disable)(struct amdgpu_device *adev);
+ int (*address_watch_execute)(struct amdgpu_device *adev,
unsigned int watch_point_id,
uint32_t cntl_val,
uint32_t addr_hi,
uint32_t addr_lo);
- int (*wave_control_execute)(struct kgd_dev *kgd,
+ int (*wave_control_execute)(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd);
- uint32_t (*address_watch_get_offset)(struct kgd_dev *kgd,
+ uint32_t (*address_watch_get_offset)(struct amdgpu_device *adev,
unsigned int watch_point_id,
unsigned int reg_offset);
- bool (*get_atc_vmid_pasid_mapping_info)(
- struct kgd_dev *kgd,
+ bool (*get_atc_vmid_pasid_mapping_info)(struct amdgpu_device *adev,
uint8_t vmid,
uint16_t *p_pasid);
@@ -290,16 +289,16 @@ struct kfd2kgd_calls {
* passed to the shader by the CP. It's the user mode driver's
* responsibility.
*/
- void (*set_scratch_backing_va)(struct kgd_dev *kgd,
+ void (*set_scratch_backing_va)(struct amdgpu_device *adev,
uint64_t va, uint32_t vmid);
- void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
+ void (*set_vm_context_page_table_base)(struct amdgpu_device *adev,
uint32_t vmid, uint64_t page_table_base);
- uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
+ uint32_t (*read_vmid_from_vmfault_reg)(struct amdgpu_device *adev);
- void (*get_cu_occupancy)(struct kgd_dev *kgd, int pasid, int *wave_cnt,
- int *max_waves_per_cu);
- void (*program_trap_handler_settings)(struct kgd_dev *kgd,
+ void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid,
+ int *wave_cnt, int *max_waves_per_cu);
+ void (*program_trap_handler_settings)(struct amdgpu_device *adev,
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr);
};
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index bac15c466733..5c0867ebcfce 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -153,6 +153,10 @@ enum PP_SMC_POWER_PROFILE {
PP_SMC_POWER_PROFILE_COUNT,
};
+extern const char * const amdgpu_pp_profile_name[PP_SMC_POWER_PROFILE_COUNT];
+
+
+
enum {
PP_GROUP_UNKNOWN = 0,
PP_GROUP_GFX = 1,
diff --git a/drivers/gpu/drm/amd/include/yellow_carp_offset.h b/drivers/gpu/drm/amd/include/yellow_carp_offset.h
index 76b9eb3f441d..28a56b56bcaf 100644
--- a/drivers/gpu/drm/amd/include/yellow_carp_offset.h
+++ b/drivers/gpu/drm/amd/include/yellow_carp_offset.h
@@ -9,12 +9,12 @@
struct IP_BASE_INSTANCE
{
unsigned int segment[MAX_SEGMENT];
-};
+} __maybe_unused;
struct IP_BASE
{
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
-};
+} __maybe_unused;
static const struct IP_BASE ACP_BASE = { { { { 0x02403800, 0x00480000, 0, 0, 0, 0 } },
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 41472ed99253..e2cae97f4ff1 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -82,6 +82,16 @@ static const struct hwmon_temp_label {
{PP_TEMP_MEM, "mem"},
};
+const char * const amdgpu_pp_profile_name[] = {
+ "BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
+ "POWER_SAVING",
+ "VIDEO",
+ "VR",
+ "COMPUTE",
+ "CUSTOM"
+};
+
/**
* DOC: power_dpm_state
*
@@ -2080,7 +2090,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
} else if (DEVICE_ATTR_IS(unique_id)) {
if (asic_type != CHIP_VEGA10 &&
asic_type != CHIP_VEGA20 &&
- asic_type != CHIP_ARCTURUS)
+ asic_type != CHIP_ARCTURUS &&
+ asic_type != CHIP_ALDEBARAN)
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_features)) {
if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
@@ -2123,6 +2134,12 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
}
}
+ /* setting should not be allowed from VF */
+ if (amdgpu_sriov_vf(adev)) {
+ dev_attr->attr.mode &= ~S_IWUGO;
+ dev_attr->store = NULL;
+ }
+
#undef DEVICE_ATTR_IS
return 0;
@@ -3759,5 +3776,7 @@ void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
adev,
&amdgpu_debugfs_pm_prv_buffer_fops,
adev->pm.smu_prv_buffer_size);
+
+ amdgpu_smu_stb_debug_fs_init(adev);
#endif
}
diff --git a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h
index 35fa0d8e92dd..ab66a4b9e438 100644
--- a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h
@@ -102,7 +102,9 @@
#define PPSMC_MSG_GfxDriverResetRecovery 0x42
#define PPSMC_MSG_BoardPowerCalibration 0x43
-#define PPSMC_Message_Count 0x44
+#define PPSMC_MSG_HeavySBR 0x45
+#define PPSMC_Message_Count 0x46
+
//PPSMC Reset Types
#define PPSMC_RESET_TYPE_WARM_RESET 0x00
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 16e3f72d31b9..c464a045000d 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -423,6 +423,9 @@ enum ip_power_state {
POWER_STATE_OFF,
};
+/* Used to mask smu debug modes */
+#define SMU_DEBUG_HALT_ON_ERROR 0x1
+
struct amdgpu_pm {
struct mutex mutex;
u32 current_sclk;
@@ -460,6 +463,11 @@ struct amdgpu_pm {
struct list_head pm_attr_list;
atomic_t pwr_state[AMD_IP_BLOCK_TYPE_NUM];
+
+ /*
+ * 0 = disabled (default), otherwise enable corresponding debug mode
+ */
+ uint32_t smu_debug_mask;
};
#define R600_SSTU_DFLT 0
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
index 3557f4e7fc30..ba7565bc8104 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
@@ -324,6 +324,7 @@ enum smu_table_id
SMU_TABLE_OVERDRIVE,
SMU_TABLE_I2C_COMMANDS,
SMU_TABLE_PACE,
+ SMU_TABLE_ECCINFO,
SMU_TABLE_COUNT,
};
@@ -340,6 +341,7 @@ struct smu_table_context
void *max_sustainable_clocks;
struct smu_bios_boot_up_values boot_values;
void *driver_pptable;
+ void *ecc_table;
struct smu_table tables[SMU_TABLE_COUNT];
/*
* The driver table is just a staging buffer for
@@ -472,7 +474,14 @@ struct cmn2asic_mapping {
int map_to;
};
+struct stb_context {
+ uint32_t stb_buf_size;
+ bool enabled;
+ spinlock_t lock;
+};
+
#define WORKLOAD_POLICY_MAX 7
+
struct smu_context
{
struct amdgpu_device *adev;
@@ -559,6 +568,8 @@ struct smu_context
uint16_t cpu_core_num;
struct smu_user_dpm_profile user_dpm_profile;
+
+ struct stb_context stb_context;
};
struct i2c_adapter;
@@ -1246,9 +1257,9 @@ struct pptable_funcs {
int (*set_fine_grain_gfx_freq_parameters)(struct smu_context *smu);
/**
- * @set_light_sbr: Set light sbr mode for the SMU.
+ * @smu_handle_passthrough_sbr: Send message to SMU about special handling for SBR.
*/
- int (*set_light_sbr)(struct smu_context *smu, bool enable);
+ int (*smu_handle_passthrough_sbr)(struct smu_context *smu, bool enable);
/**
* @wait_for_event: Wait for events from SMU.
@@ -1261,6 +1272,17 @@ struct pptable_funcs {
* of SMUBUS table.
*/
int (*send_hbm_bad_pages_num)(struct smu_context *smu, uint32_t size);
+
+ /**
+ * @get_ecc_table: message SMU to get ECC INFO table.
+ */
+ ssize_t (*get_ecc_info)(struct smu_context *smu, void *table);
+
+
+ /**
+ * @stb_collect_info: Collects Smart Trace Buffers data.
+ */
+ int (*stb_collect_info)(struct smu_context *smu, void *buf, uint32_t size);
};
typedef enum {
@@ -1393,10 +1415,13 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value);
-int smu_set_light_sbr(struct smu_context *smu, bool enable);
+int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable);
int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
uint64_t event_arg);
+int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc);
+int smu_stb_collect_info(struct smu_context *smu, void *buff, uint32_t size);
+void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h b/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h
index a017983ff1fa..0f67c56c2863 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h
@@ -140,6 +140,8 @@
#define MAX_SW_I2C_COMMANDS 24
+#define ALDEBARAN_UMC_CHANNEL_NUM 32
+
typedef enum {
I2C_CONTROLLER_PORT_0, //CKSVII2C0
I2C_CONTROLLER_PORT_1, //CKSVII2C1
@@ -507,6 +509,19 @@ typedef struct {
uint32_t MmHubPadding[8]; // SMU internal use
} AvfsDebugTable_t;
+typedef struct {
+ uint64_t mca_umc_status;
+ uint64_t mca_umc_addr;
+ uint16_t ce_count_lo_chip;
+ uint16_t ce_count_hi_chip;
+
+ uint32_t eccPadding;
+} EccInfo_t;
+
+typedef struct {
+ EccInfo_t EccInfo[ALDEBARAN_UMC_CHANNEL_NUM];
+} EccInfoTable_t;
+
// These defines are used with the following messages:
// SMC_MSG_TransferTableDram2Smu
// SMC_MSG_TransferTableSmu2Dram
@@ -517,6 +532,7 @@ typedef struct {
#define TABLE_SMU_METRICS 4
#define TABLE_DRIVER_SMU_CONFIG 5
#define TABLE_I2C_COMMANDS 6
-#define TABLE_COUNT 7
+#define TABLE_ECCINFO 7
+#define TABLE_COUNT 8
#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h
index 18b862a90fbe..ff8a0bcbd290 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_types.h
@@ -229,7 +229,8 @@
__SMU_DUMMY_MAP(BoardPowerCalibration), \
__SMU_DUMMY_MAP(RequestGfxclk), \
__SMU_DUMMY_MAP(ForceGfxVid), \
- __SMU_DUMMY_MAP(UnforceGfxVid),
+ __SMU_DUMMY_MAP(UnforceGfxVid), \
+ __SMU_DUMMY_MAP(HeavySBR),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index 2d422e6a9feb..acb3be292096 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -312,7 +312,7 @@ int smu_v11_0_deep_sleep_control(struct smu_context *smu,
void smu_v11_0_interrupt_work(struct smu_context *smu);
-int smu_v11_0_set_light_sbr(struct smu_context *smu, bool enable);
+int smu_v11_0_handle_passthrough_sbr(struct smu_context *smu, bool enable);
int smu_v11_0_restore_user_od_settings(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
index e5d3b0d1a032..44af23ae059e 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
@@ -27,7 +27,9 @@
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
-#define SMU13_DRIVER_IF_VERSION_ALDE 0x07
+#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
+
+#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
/* MP Apertures */
#define MP0_Public 0x03800000
@@ -216,7 +218,6 @@ int smu_v13_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
int smu_v13_0_baco_enter(struct smu_context *smu);
int smu_v13_0_baco_exit(struct smu_context *smu);
-int smu_v13_0_mode1_reset(struct smu_context *smu);
int smu_v13_0_mode2_reset(struct smu_context *smu);
int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 619f8d305292..3ab67b232cd4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -1556,7 +1556,7 @@ static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
static int pp_asic_reset_mode_2(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
+ int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index 1f406f21b452..9ddd8491ff00 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -1439,13 +1439,6 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
{70, 90, 0, 0,},
{30, 60, 0, 6,},
};
- static const char *profile_name[6] = {
- "BOOTUP_DEFAULT",
- "3D_FULL_SCREEN",
- "POWER_SAVING",
- "VIDEO",
- "VR",
- "COMPUTE"};
static const char *title[6] = {"NUM",
"MODE_NAME",
"BUSY_SET_POINT",
@@ -1463,7 +1456,7 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
for (i = 0; i <= PP_SMC_POWER_PROFILE_COMPUTE; i++)
size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n",
- i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
+ i, amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
profile_mode_setting[i][0], profile_mode_setting[i][1],
profile_mode_setting[i][2], profile_mode_setting[i][3]);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 611969bf4520..cd99db0dc2be 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -5498,14 +5498,6 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
uint32_t i, size = 0;
uint32_t len;
- static const char *profile_name[7] = {"BOOTUP_DEFAULT",
- "3D_FULL_SCREEN",
- "POWER_SAVING",
- "VIDEO",
- "VR",
- "COMPUTE",
- "CUSTOM"};
-
static const char *title[8] = {"NUM",
"MODE_NAME",
"SCLK_UP_HYST",
@@ -5529,7 +5521,7 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
for (i = 0; i < len; i++) {
if (i == hwmgr->power_profile_mode) {
size += sysfs_emit_at(buf, size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
- i, profile_name[i], "*",
+ i, amdgpu_pp_profile_name[i], "*",
data->current_profile_setting.sclk_up_hyst,
data->current_profile_setting.sclk_down_hyst,
data->current_profile_setting.sclk_activity,
@@ -5540,12 +5532,12 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
}
if (smu7_profiling[i].bupdate_sclk)
size += sysfs_emit_at(buf, size, "%3d %16s: %8d %16d %16d ",
- i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
+ i, amdgpu_pp_profile_name[i], smu7_profiling[i].sclk_up_hyst,
smu7_profiling[i].sclk_down_hyst,
smu7_profiling[i].sclk_activity);
else
size += sysfs_emit_at(buf, size, "%3d %16s: %8s %16s %16s ",
- i, profile_name[i], "-", "-", "-");
+ i, amdgpu_pp_profile_name[i], "-", "-", "-");
if (smu7_profiling[i].bupdate_mclk)
size += sysfs_emit_at(buf, size, "%16d %16d %16d\n",
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index e6336654c565..3f040be0d158 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -5097,13 +5097,6 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
{70, 90, 0, 0,},
{30, 60, 0, 6,},
};
- static const char *profile_name[7] = {"BOOTUP_DEFAULT",
- "3D_FULL_SCREEN",
- "POWER_SAVING",
- "VIDEO",
- "VR",
- "COMPUTE",
- "CUSTOM"};
static const char *title[6] = {"NUM",
"MODE_NAME",
"BUSY_SET_POINT",
@@ -5121,11 +5114,12 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
for (i = 0; i < PP_SMC_POWER_PROFILE_CUSTOM; i++)
size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n",
- i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
+ i, amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
profile_mode_setting[i][0], profile_mode_setting[i][1],
profile_mode_setting[i][2], profile_mode_setting[i][3]);
+
size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n", i,
- profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
+ amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
data->custom_profile_mode[0], data->custom_profile_mode[1],
data->custom_profile_mode[2], data->custom_profile_mode[3]);
return size;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index 85d55ab4e369..97b3ad369046 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -3980,14 +3980,6 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
DpmActivityMonitorCoeffInt_t activity_monitor;
uint32_t i, size = 0;
uint16_t workload_type = 0;
- static const char *profile_name[] = {
- "BOOTUP_DEFAULT",
- "3D_FULL_SCREEN",
- "POWER_SAVING",
- "VIDEO",
- "VR",
- "COMPUTE",
- "CUSTOM"};
static const char *title[] = {
"PROFILE_INDEX(NAME)",
"CLOCK_TYPE(NAME)",
@@ -4021,7 +4013,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
return result);
size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
- i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ");
+ i, amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ");
size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 9d7d64fdf410..d93d28c1af95 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -277,8 +277,12 @@ static int smu_dpm_set_power_gate(void *handle,
struct smu_context *smu = handle;
int ret = 0;
- if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
+ dev_WARN(smu->adev->dev,
+ "SMU uninitialized but power %s requested for %u!\n",
+ gate ? "gate" : "ungate", block_type);
return -EOPNOTSUPP;
+ }
switch (block_type) {
/*
@@ -1153,6 +1157,8 @@ static int smu_smc_hw_setup(struct smu_context *smu)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 0, 12):
ret = smu_system_features_control(smu, true);
+ if (ret)
+ dev_err(adev->dev, "Failed system features control!\n");
break;
default:
break;
@@ -1277,8 +1283,10 @@ static int smu_smc_hw_setup(struct smu_context *smu)
}
ret = smu_notify_display_change(smu);
- if (ret)
+ if (ret) {
+ dev_err(adev->dev, "Failed to notify display change!\n");
return ret;
+ }
/*
* Set min deep sleep dce fclk with bootup value from vbios via
@@ -1286,8 +1294,6 @@ static int smu_smc_hw_setup(struct smu_context *smu)
*/
ret = smu_set_min_dcef_deep_sleep(smu,
smu->smu_table.boot_values.dcefclk / 100);
- if (ret)
- return ret;
return ret;
}
@@ -1344,7 +1350,6 @@ static int smu_hw_init(void *handle)
}
if (smu->is_apu) {
- smu_powergate_sdma(&adev->smu, false);
smu_dpm_set_vcn_enable(smu, true);
smu_dpm_set_jpeg_enable(smu, true);
smu_set_gfx_cgpg(&adev->smu, true);
@@ -1512,10 +1517,6 @@ static int smu_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
- if (smu->is_apu) {
- smu_powergate_sdma(&adev->smu, true);
- }
-
smu_dpm_set_vcn_enable(smu, false);
smu_dpm_set_jpeg_enable(smu, false);
@@ -3063,16 +3064,30 @@ static int smu_gfx_state_change_set(void *handle,
return ret;
}
-int smu_set_light_sbr(struct smu_context *smu, bool enable)
+int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
{
int ret = 0;
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->set_light_sbr)
- ret = smu->ppt_funcs->set_light_sbr(smu, enable);
+ if (smu->ppt_funcs->smu_handle_passthrough_sbr)
+ ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
+{
+ int ret = -EOPNOTSUPP;
+
+ mutex_lock(&smu->mutex);
+ if (smu->ppt_funcs &&
+ smu->ppt_funcs->get_ecc_info)
+ ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
mutex_unlock(&smu->mutex);
return ret;
+
}
static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
@@ -3164,3 +3179,107 @@ int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
return ret;
}
+
+int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
+{
+
+ if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
+ return -EOPNOTSUPP;
+
+ /* Confirm the buffer allocated is of correct size */
+ if (size != smu->stb_context.stb_buf_size)
+ return -EINVAL;
+
+ /*
+ * No need to lock smu mutex as we access STB directly through MMIO
+ * and not going through SMU messaging route (for now at least).
+ * For registers access rely on implementation internal locking.
+ */
+ return smu->ppt_funcs->stb_collect_info(smu, buf, size);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct smu_context *smu = &adev->smu;
+ unsigned char *buf;
+ int r;
+
+ buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
+ if (r)
+ goto out;
+
+ filp->private_data = buf;
+
+ return 0;
+
+out:
+ kvfree(buf);
+ return r;
+}
+
+static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
+ loff_t *pos)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct smu_context *smu = &adev->smu;
+
+
+ if (!filp->private_data)
+ return -EINVAL;
+
+ return simple_read_from_buffer(buf,
+ size,
+ pos, filp->private_data,
+ smu->stb_context.stb_buf_size);
+}
+
+static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
+{
+ kvfree(filp->private_data);
+ filp->private_data = NULL;
+
+ return 0;
+}
+
+/*
+ * We have to define not only read method but also
+ * open and release because .read takes up to PAGE_SIZE
+ * data each time so and so is invoked multiple times.
+ * We allocate the STB buffer in .open and release it
+ * in .release
+ */
+static const struct file_operations smu_stb_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = smu_stb_debugfs_open,
+ .read = smu_stb_debugfs_read,
+ .release = smu_stb_debugfs_release,
+ .llseek = default_llseek,
+};
+
+#endif
+
+void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+
+ struct smu_context *smu = &adev->smu;
+
+ if (!smu->stb_context.stb_buf_size)
+ return;
+
+ debugfs_create_file_size("amdgpu_smu_stb_dump",
+ S_IRUSR,
+ adev_to_drm(adev)->primary->debugfs_root,
+ adev,
+ &smu_stb_debugfs_fops,
+ smu->stb_context.stb_buf_size);
+#endif
+
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index fd1d30a93db5..505d2fb94fd9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -295,16 +295,6 @@ static int arcturus_allocate_dpm_context(struct smu_context *smu)
return -ENOMEM;
smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context);
- smu_dpm->dpm_current_power_state = kzalloc(sizeof(struct smu_power_state),
- GFP_KERNEL);
- if (!smu_dpm->dpm_current_power_state)
- return -ENOMEM;
-
- smu_dpm->dpm_request_power_state = kzalloc(sizeof(struct smu_power_state),
- GFP_KERNEL);
- if (!smu_dpm->dpm_request_power_state)
- return -ENOMEM;
-
return 0;
}
@@ -1389,14 +1379,6 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
char *buf)
{
DpmActivityMonitorCoeffInt_t activity_monitor;
- static const char *profile_name[] = {
- "BOOTUP_DEFAULT",
- "3D_FULL_SCREEN",
- "POWER_SAVING",
- "VIDEO",
- "VR",
- "COMPUTE",
- "CUSTOM"};
static const char *title[] = {
"PROFILE_INDEX(NAME)",
"CLOCK_TYPE(NAME)",
@@ -1453,7 +1435,7 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
}
size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
- i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+ i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
if (smu_version >= 0x360d00) {
size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
@@ -2490,7 +2472,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.deep_sleep_control = smu_v11_0_deep_sleep_control,
.get_fan_parameters = arcturus_get_fan_parameters,
.interrupt_work = smu_v11_0_interrupt_work,
- .set_light_sbr = smu_v11_0_set_light_sbr,
+ .smu_handle_passthrough_sbr = smu_v11_0_handle_passthrough_sbr,
.set_mp1_state = smu_cmn_set_mp1_state,
};
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 60a557068ea4..2bb7816b245a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1713,14 +1713,6 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
DpmActivityMonitorCoeffInt_t activity_monitor;
uint32_t i, size = 0;
int16_t workload_type = 0;
- static const char *profile_name[] = {
- "BOOTUP_DEFAULT",
- "3D_FULL_SCREEN",
- "POWER_SAVING",
- "VIDEO",
- "VR",
- "COMPUTE",
- "CUSTOM"};
static const char *title[] = {
"PROFILE_INDEX(NAME)",
"CLOCK_TYPE(NAME)",
@@ -1759,7 +1751,7 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
}
size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
- i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+ i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index a4108025fe29..777f717c37ae 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -80,6 +80,9 @@
(*member) = (smu->smu_table.driver_pptable + offsetof(PPTable_t, field));\
} while(0)
+/* STB FIFO depth is in 64bit units */
+#define SIENNA_CICHLID_STB_DEPTH_UNIT_BYTES 8
+
static int get_table_size(struct smu_context *smu)
{
if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))
@@ -650,6 +653,8 @@ static int sienna_cichlid_allocate_dpm_context(struct smu_context *smu)
return 0;
}
+static void sienna_cichlid_stb_init(struct smu_context *smu);
+
static int sienna_cichlid_init_smc_tables(struct smu_context *smu)
{
int ret = 0;
@@ -662,6 +667,8 @@ static int sienna_cichlid_init_smc_tables(struct smu_context *smu)
if (ret)
return ret;
+ sienna_cichlid_stb_init(smu);
+
return smu_v11_0_init_smc_tables(smu);
}
@@ -1171,7 +1178,7 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, uint32_t mask)
{
struct amdgpu_device *adev = smu->adev;
- int ret = 0, size = 0;
+ int ret = 0;
uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0;
soft_min_level = mask ? (ffs(mask) - 1) : 0;
@@ -1216,7 +1223,7 @@ forec_level_out:
if ((clk_type == SMU_GFXCLK) || (clk_type == SMU_SCLK))
amdgpu_gfx_off_ctrl(adev, true);
- return size;
+ return 0;
}
static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu)
@@ -1342,14 +1349,6 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
uint32_t i, size = 0;
int16_t workload_type = 0;
- static const char *profile_name[] = {
- "BOOTUP_DEFAULT",
- "3D_FULL_SCREEN",
- "POWER_SAVING",
- "VIDEO",
- "VR",
- "COMPUTE",
- "CUSTOM"};
static const char *title[] = {
"PROFILE_INDEX(NAME)",
"CLOCK_TYPE(NAME)",
@@ -1388,7 +1387,7 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
}
size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
- i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+ i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
@@ -2135,7 +2134,13 @@ static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu,
static int sienna_cichlid_run_btc(struct smu_context *smu)
{
- return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
+ int res;
+
+ res = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
+ if (res)
+ dev_err(smu->adev->dev, "RunDcBtc failed!\n");
+
+ return res;
}
static int sienna_cichlid_baco_enter(struct smu_context *smu)
@@ -3619,6 +3624,16 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->energy_accumulator =
use_metrics_v2 ? metrics_v2->EnergyAccumulator : metrics->EnergyAccumulator;
+ if (metrics->CurrGfxVoltageOffset)
+ gpu_metrics->voltage_gfx =
+ (155000 - 625 * metrics->CurrGfxVoltageOffset) / 100;
+ if (metrics->CurrMemVidOffset)
+ gpu_metrics->voltage_mem =
+ (155000 - 625 * metrics->CurrMemVidOffset) / 100;
+ if (metrics->CurrSocVoltageOffset)
+ gpu_metrics->voltage_soc =
+ (155000 - 625 * metrics->CurrSocVoltageOffset) / 100;
+
average_gfx_activity = use_metrics_v2 ? metrics_v2->AverageGfxActivity : metrics->AverageGfxActivity;
if (average_gfx_activity <= SMU_11_0_7_GFX_BUSY_THRESHOLD)
gpu_metrics->average_gfxclk_frequency =
@@ -3793,6 +3808,53 @@ static int sienna_cichlid_set_mp1_state(struct smu_context *smu,
return ret;
}
+static void sienna_cichlid_stb_init(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t reg;
+
+ reg = RREG32_PCIE(MP1_Public | smnMP1_PMI_3_START);
+ smu->stb_context.enabled = REG_GET_FIELD(reg, MP1_PMI_3_START, ENABLE);
+
+ /* STB is disabled */
+ if (!smu->stb_context.enabled)
+ return;
+
+ spin_lock_init(&smu->stb_context.lock);
+
+ /* STB buffer size in bytes as function of FIFO depth */
+ reg = RREG32_PCIE(MP1_Public | smnMP1_PMI_3_FIFO);
+ smu->stb_context.stb_buf_size = 1 << REG_GET_FIELD(reg, MP1_PMI_3_FIFO, DEPTH);
+ smu->stb_context.stb_buf_size *= SIENNA_CICHLID_STB_DEPTH_UNIT_BYTES;
+
+ dev_info(smu->adev->dev, "STB initialized to %d entries",
+ smu->stb_context.stb_buf_size / SIENNA_CICHLID_STB_DEPTH_UNIT_BYTES);
+
+}
+
+int sienna_cichlid_stb_get_data_direct(struct smu_context *smu,
+ void *buf,
+ uint32_t size)
+{
+ uint32_t *p = buf;
+ struct amdgpu_device *adev = smu->adev;
+
+ /* No need to disable interrupts for now as we don't lock it yet from ISR */
+ spin_lock(&smu->stb_context.lock);
+
+ /*
+ * Read the STB FIFO in units of 32bit since this is the accessor window
+ * (register width) we have.
+ */
+ buf = ((char *) buf) + size;
+ while ((void *)p < buf)
+ *p++ = cpu_to_le32(RREG32_PCIE(MP1_Public | smnMP1_PMI_3));
+
+ spin_unlock(&smu->stb_context.lock);
+
+ return 0;
+}
+
static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask,
.set_default_dpm_table = sienna_cichlid_set_default_dpm_table,
@@ -3882,6 +3944,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.interrupt_work = smu_v11_0_interrupt_work,
.gpo_control = sienna_cichlid_gpo_control,
.set_mp1_state = sienna_cichlid_set_mp1_state,
+ .stb_collect_info = sienna_cichlid_stb_get_data_direct,
};
void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 28b7c0562b99..4e9e2cf39859 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1724,7 +1724,7 @@ int smu_v11_0_mode1_reset(struct smu_context *smu)
return ret;
}
-int smu_v11_0_set_light_sbr(struct smu_context *smu, bool enable)
+int smu_v11_0_handle_passthrough_sbr(struct smu_context *smu, bool enable)
{
int ret = 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index c02ed65ffa38..5cb07ed227fb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -1039,14 +1039,6 @@ failed:
static int vangogh_get_power_profile_mode(struct smu_context *smu,
char *buf)
{
- static const char *profile_name[] = {
- "BOOTUP_DEFAULT",
- "3D_FULL_SCREEN",
- "POWER_SAVING",
- "VIDEO",
- "VR",
- "COMPUTE",
- "CUSTOM"};
uint32_t i, size = 0;
int16_t workload_type = 0;
@@ -1066,7 +1058,7 @@ static int vangogh_get_power_profile_mode(struct smu_context *smu,
continue;
size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
- i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+ i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
}
return size;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 145f13b8c977..25c4b135f830 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -1095,14 +1095,6 @@ static int renoir_set_watermarks_table(
static int renoir_get_power_profile_mode(struct smu_context *smu,
char *buf)
{
- static const char *profile_name[] = {
- "BOOTUP_DEFAULT",
- "3D_FULL_SCREEN",
- "POWER_SAVING",
- "VIDEO",
- "VR",
- "COMPUTE",
- "CUSTOM"};
uint32_t i, size = 0;
int16_t workload_type = 0;
@@ -1121,7 +1113,7 @@ static int renoir_get_power_profile_mode(struct smu_context *smu,
continue;
size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
- i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+ i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
}
return size;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 7d50827cf0a8..4885c4ae78b7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -78,6 +78,12 @@
#define smnPCIE_ESM_CTRL 0x111003D0
+/*
+ * SMU support ECCTABLE since version 68.42.0,
+ * use this to check ECCTALE feature whether support
+ */
+#define SUPPORT_ECCTABLE_SMU_VERSION 0x00442a00
+
static const struct smu_temperature_range smu13_thermal_policy[] =
{
{-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
@@ -135,6 +141,7 @@ static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT
MSG_MAP(SetUclkDpmMode, PPSMC_MSG_SetUclkDpmMode, 0),
MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
MSG_MAP(BoardPowerCalibration, PPSMC_MSG_BoardPowerCalibration, 0),
+ MSG_MAP(HeavySBR, PPSMC_MSG_HeavySBR, 0),
};
static const struct cmn2asic_mapping aldebaran_clk_map[SMU_CLK_COUNT] = {
@@ -190,6 +197,7 @@ static const struct cmn2asic_mapping aldebaran_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(SMU_METRICS),
TAB_MAP(DRIVER_SMU_CONFIG),
TAB_MAP(I2C_COMMANDS),
+ TAB_MAP(ECCINFO),
};
static const uint8_t aldebaran_throttler_map[] = {
@@ -223,6 +231,9 @@ static int aldebaran_tables_init(struct smu_context *smu)
SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
if (!smu_table->metrics_table)
return -ENOMEM;
@@ -235,6 +246,10 @@ static int aldebaran_tables_init(struct smu_context *smu)
return -ENOMEM;
}
+ smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
+ if (!smu_table->ecc_table)
+ return -ENOMEM;
+
return 0;
}
@@ -248,16 +263,6 @@ static int aldebaran_allocate_dpm_context(struct smu_context *smu)
return -ENOMEM;
smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context);
- smu_dpm->dpm_current_power_state = kzalloc(sizeof(struct smu_power_state),
- GFP_KERNEL);
- if (!smu_dpm->dpm_current_power_state)
- return -ENOMEM;
-
- smu_dpm->dpm_request_power_state = kzalloc(sizeof(struct smu_power_state),
- GFP_KERNEL);
- if (!smu_dpm->dpm_request_power_state)
- return -ENOMEM;
-
return 0;
}
@@ -1601,7 +1606,8 @@ out_unlock:
mutex_unlock(&smu->metrics_lock);
adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
- sprintf(adev->serial, "%016llx", adev->unique_id);
+ if (adev->serial[0] == '\0')
+ sprintf(adev->serial, "%016llx", adev->unique_id);
}
static bool aldebaran_is_baco_supported(struct smu_context *smu)
@@ -1619,10 +1625,18 @@ static int aldebaran_set_df_cstate(struct smu_context *smu,
static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en)
{
- return smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_GmiPwrDnControl,
- en ? 0 : 1,
- NULL);
+ struct amdgpu_device *adev = smu->adev;
+
+ /* The message only works on master die and NACK will be sent
+ back for other dies, only send it on master die */
+ if (!adev->smuio.funcs->get_socket_id(adev) &&
+ !adev->smuio.funcs->get_die_id(adev))
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GmiPwrDnControl,
+ en ? 0 : 1,
+ NULL);
+ else
+ return 0;
}
static const struct throttling_logging_label {
@@ -1765,6 +1779,98 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v1_3);
}
+static int aldebaran_check_ecc_table_support(struct smu_context *smu)
+{
+ uint32_t if_version = 0xff, smu_version = 0xff;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
+ if (ret) {
+ /* return not support if failed get smu_version */
+ ret = -EOPNOTSUPP;
+ }
+
+ if (smu_version < SUPPORT_ECCTABLE_SMU_VERSION)
+ ret = -EOPNOTSUPP;
+
+ return ret;
+}
+
+static ssize_t aldebaran_get_ecc_info(struct smu_context *smu,
+ void *table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ EccInfoTable_t *ecc_table = NULL;
+ struct ecc_info_per_ch *ecc_info_per_channel = NULL;
+ int i, ret = 0;
+ struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table;
+
+ ret = aldebaran_check_ecc_table_support(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ECCINFO,
+ 0,
+ smu_table->ecc_table,
+ false);
+ if (ret) {
+ dev_info(smu->adev->dev, "Failed to export SMU ecc table!\n");
+ return ret;
+ }
+
+ ecc_table = (EccInfoTable_t *)smu_table->ecc_table;
+
+ for (i = 0; i < ALDEBARAN_UMC_CHANNEL_NUM; i++) {
+ ecc_info_per_channel = &(eccinfo->ecc[i]);
+ ecc_info_per_channel->ce_count_lo_chip =
+ ecc_table->EccInfo[i].ce_count_lo_chip;
+ ecc_info_per_channel->ce_count_hi_chip =
+ ecc_table->EccInfo[i].ce_count_hi_chip;
+ ecc_info_per_channel->mca_umc_status =
+ ecc_table->EccInfo[i].mca_umc_status;
+ ecc_info_per_channel->mca_umc_addr =
+ ecc_table->EccInfo[i].mca_umc_addr;
+ }
+
+ return ret;
+}
+
+static int aldebaran_mode1_reset(struct smu_context *smu)
+{
+ u32 smu_version, fatal_err, param;
+ int ret = 0;
+ struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ fatal_err = 0;
+ param = SMU_RESET_MODE_1;
+
+ /*
+ * PM FW support SMU_MSG_GfxDeviceDriverReset from 68.07
+ */
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (smu_version < 0x00440700) {
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
+ }
+ else {
+ /* fatal error triggered by ras, PMFW supports the flag
+ from 68.44.0 */
+ if ((smu_version >= 0x00442c00) && ras &&
+ atomic_read(&ras->in_recovery))
+ fatal_err = 1;
+
+ param |= (fatal_err << 16);
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GfxDeviceDriverReset, param, NULL);
+ }
+
+ if (!ret)
+ msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
+
+ return ret;
+}
+
static int aldebaran_mode2_reset(struct smu_context *smu)
{
u32 smu_version;
@@ -1816,6 +1922,14 @@ out:
return ret;
}
+static int aldebaran_smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
+{
+ int ret = 0;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_HeavySBR, enable ? 1 : 0, NULL);
+
+ return ret;
+}
+
static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu)
{
#if 0
@@ -1925,13 +2039,15 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.get_gpu_metrics = aldebaran_get_gpu_metrics,
.mode1_reset_is_support = aldebaran_is_mode1_reset_supported,
.mode2_reset_is_support = aldebaran_is_mode2_reset_supported,
- .mode1_reset = smu_v13_0_mode1_reset,
+ .smu_handle_passthrough_sbr = aldebaran_smu_handle_passthrough_sbr,
+ .mode1_reset = aldebaran_mode1_reset,
.set_mp1_state = aldebaran_set_mp1_state,
.mode2_reset = aldebaran_mode2_reset,
.wait_for_event = smu_v13_0_wait_for_event,
.i2c_init = aldebaran_i2c_control_init,
.i2c_fini = aldebaran_i2c_control_fini,
.send_hbm_bad_pages_num = aldebaran_smu_send_hbm_bad_page_num,
+ .get_ecc_info = aldebaran_get_ecc_info,
};
void aldebaran_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 19a5d2c39c8d..b54790d3483e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -60,8 +60,6 @@ MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin");
#define SMU13_VOLTAGE_SCALE 4
-#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
-
#define LINK_WIDTH_MAX 6
#define LINK_SPEED_MAX 3
@@ -214,7 +212,7 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
if (smu->is_apu)
adev->pm.fw_version = smu_version;
- switch (smu->adev->ip_versions[MP1_HWIP][0]) {
+ switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 2):
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE;
break;
@@ -223,13 +221,15 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP;
break;
default:
- dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n",
- smu->adev->ip_versions[MP1_HWIP][0]);
+ dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
+ adev->ip_versions[MP1_HWIP][0]);
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_INV;
break;
}
- dev_info(smu->adev->dev, "smu fw reported version = 0x%08x (%d.%d.%d)\n",
+ /* only for dGPU w/ SMU13*/
+ if (adev->pm.fw)
+ dev_dbg(adev->dev, "smu fw reported version = 0x%08x (%d.%d.%d)\n",
smu_version, smu_major, smu_minor, smu_debug);
/*
@@ -241,11 +241,11 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
* of halt driver loading.
*/
if (if_version != smu->smc_driver_if_version) {
- dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
+ dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
"smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
smu_version, smu_major, smu_minor, smu_debug);
- dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
+ dev_warn(adev->dev, "SMU driver if version not matched\n");
}
return ret;
@@ -433,8 +433,10 @@ int smu_v13_0_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->hardcode_pptable);
smu_table->hardcode_pptable = NULL;
+ kfree(smu_table->ecc_table);
kfree(smu_table->metrics_table);
kfree(smu_table->watermarks_table);
+ smu_table->ecc_table = NULL;
smu_table->metrics_table = NULL;
smu_table->watermarks_table = NULL;
smu_table->metrics_time = 0;
@@ -1427,25 +1429,6 @@ int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu)
return ret;
}
-int smu_v13_0_mode1_reset(struct smu_context *smu)
-{
- u32 smu_version;
- int ret = 0;
- /*
- * PM FW support SMU_MSG_GfxDeviceDriverReset from 68.07
- */
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (smu_version < 0x00440700)
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
- else
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_1, NULL);
-
- if (!ret)
- msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
-
- return ret;
-}
-
static int smu_v13_0_wait_for_reset_complete(struct smu_context *smu,
uint64_t event_arg)
{
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index ea6f50c08c5f..ee1a312fd497 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -94,10 +94,10 @@ static void smu_cmn_read_arg(struct smu_context *smu,
/**
* __smu_cmn_poll_stat -- poll for a status from the SMU
- * smu: a pointer to SMU context
+ * @smu: a pointer to SMU context
*
* Returns the status of the SMU, which could be,
- * 0, the SMU is busy with your previous command;
+ * 0, the SMU is busy with your command;
* 1, execution status: success, execution result: success;
* 0xFF, execution status: success, execution result: failure;
* 0xFE, unknown command;
@@ -257,10 +257,11 @@ int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg_index,
uint32_t param)
{
+ struct amdgpu_device *adev = smu->adev;
u32 reg;
int res;
- if (smu->adev->no_hw_access)
+ if (adev->no_hw_access)
return 0;
reg = __smu_cmn_poll_stat(smu);
@@ -272,6 +273,12 @@ int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
__smu_cmn_send_msg(smu, msg_index, param);
res = 0;
Out:
+ if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
+ res && (res != -ETIME)) {
+ amdgpu_device_halt(adev);
+ WARN_ON(1);
+ }
+
return res;
}
@@ -288,9 +295,18 @@ Out:
int smu_cmn_wait_for_response(struct smu_context *smu)
{
u32 reg;
+ int res;
reg = __smu_cmn_poll_stat(smu);
- return __smu_cmn_reg2errno(smu, reg);
+ res = __smu_cmn_reg2errno(smu, reg);
+
+ if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
+ res && (res != -ETIME)) {
+ amdgpu_device_halt(smu->adev);
+ WARN_ON(1);
+ }
+
+ return res;
}
/**
@@ -328,10 +344,11 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
uint32_t param,
uint32_t *read_arg)
{
+ struct amdgpu_device *adev = smu->adev;
int res, index;
u32 reg;
- if (smu->adev->no_hw_access)
+ if (adev->no_hw_access)
return 0;
index = smu_cmn_to_asic_specific_index(smu,
@@ -352,11 +369,16 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
__smu_cmn_send_msg(smu, (uint16_t) index, param);
reg = __smu_cmn_poll_stat(smu);
res = __smu_cmn_reg2errno(smu, reg);
- if (res == -EREMOTEIO)
+ if (res != 0)
__smu_cmn_reg_print_error(smu, reg, index, param, msg);
if (read_arg)
smu_cmn_read_arg(smu, read_arg);
Out:
+ if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) {
+ amdgpu_device_halt(adev);
+ WARN_ON(1);
+ }
+
mutex_unlock(&smu->message_lock);
return res;
}
diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig
index 3a9e966e0e78..58a242871b28 100644
--- a/drivers/gpu/drm/arm/Kconfig
+++ b/drivers/gpu/drm/arm/Kconfig
@@ -6,7 +6,6 @@ config DRM_HDLCD
depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
depends on COMMON_CLK
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
help
Choose this option if you have an ARM High Definition Colour LCD
controller.
@@ -27,7 +26,6 @@ config DRM_MALI_DISPLAY
depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
depends on COMMON_CLK
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS
help
diff --git a/drivers/gpu/drm/arm/display/Kconfig b/drivers/gpu/drm/arm/display/Kconfig
index cec0639e3aa1..e91598b60781 100644
--- a/drivers/gpu/drm/arm/display/Kconfig
+++ b/drivers/gpu/drm/arm/display/Kconfig
@@ -4,7 +4,6 @@ config DRM_KOMEDA
depends on DRM && OF
depends on COMMON_CLK
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS
help
diff --git a/drivers/gpu/drm/aspeed/Kconfig b/drivers/gpu/drm/aspeed/Kconfig
index 5e95bcea43e9..024ccab14f88 100644
--- a/drivers/gpu/drm/aspeed/Kconfig
+++ b/drivers/gpu/drm/aspeed/Kconfig
@@ -5,7 +5,7 @@ config DRM_ASPEED_GFX
depends on (COMPILE_TEST || ARCH_ASPEED)
depends on MMU
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
select DMA_CMA if HAVE_DMA_CONTIGUOUS
select CMA if HAVE_DMA_CONTIGUOUS
select MFD_SYSCON
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
index 438a2d05b115..21f71160bc3e 100644
--- a/drivers/gpu/drm/ast/Makefile
+++ b/drivers/gpu/drm/ast/Makefile
@@ -3,6 +3,6 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ast-y := ast_drv.o ast_main.o ast_mm.o ast_mode.o ast_post.o ast_dp501.o
+ast-y := ast_drv.o ast_i2c.o ast_main.o ast_mm.o ast_mode.o ast_post.o ast_dp501.o
obj-$(CONFIG_DRM_AST) := ast.o
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 86d5cd7b6318..6d8613f6fe1c 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -26,7 +26,6 @@
* Authors: Dave Airlie <airlied@redhat.com>
*/
-#include <linux/console.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -233,7 +232,7 @@ static struct pci_driver ast_pci_driver = {
static int __init ast_init(void)
{
- if (vgacon_text_force() && ast_modeset == -1)
+ if (drm_firmware_drivers_only() && ast_modeset == -1)
return -EINVAL;
if (ast_modeset == 0)
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 2cfce7dc95af..00bfa41ff7cb 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -357,4 +357,7 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata);
u8 ast_get_dp501_max_clk(struct drm_device *dev);
void ast_init_3rdtx(struct drm_device *dev);
+/* ast_i2c.c */
+struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
+
#endif
diff --git a/drivers/gpu/drm/ast/ast_i2c.c b/drivers/gpu/drm/ast/ast_i2c.c
new file mode 100644
index 000000000000..93e91c36d649
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_i2c.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+
+#include "ast_drv.h"
+
+static void ast_i2c_setsda(void *i2c_priv, int data)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = to_ast_private(i2c->dev);
+ int i;
+ u8 ujcrb7, jtemp;
+
+ for (i = 0; i < 0x10000; i++) {
+ ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7);
+ jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
+ if (ujcrb7 == jtemp)
+ break;
+ }
+}
+
+static void ast_i2c_setscl(void *i2c_priv, int clock)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = to_ast_private(i2c->dev);
+ int i;
+ u8 ujcrb7, jtemp;
+
+ for (i = 0; i < 0x10000; i++) {
+ ujcrb7 = ((clock & 0x01) ? 0 : 1);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7);
+ jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
+ if (ujcrb7 == jtemp)
+ break;
+ }
+}
+
+static int ast_i2c_getsda(void *i2c_priv)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = to_ast_private(i2c->dev);
+ uint32_t val, val2, count, pass;
+
+ count = 0;
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+ do {
+ val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+ if (val == val2) {
+ pass++;
+ } else {
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+ }
+ } while ((pass < 5) && (count++ < 0x10000));
+
+ return val & 1 ? 1 : 0;
+}
+
+static int ast_i2c_getscl(void *i2c_priv)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = to_ast_private(i2c->dev);
+ uint32_t val, val2, count, pass;
+
+ count = 0;
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+ do {
+ val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+ if (val == val2) {
+ pass++;
+ } else {
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+ }
+ } while ((pass < 5) && (count++ < 0x10000));
+
+ return val & 1 ? 1 : 0;
+}
+
+static void ast_i2c_release(struct drm_device *dev, void *res)
+{
+ struct ast_i2c_chan *i2c = res;
+
+ i2c_del_adapter(&i2c->adapter);
+ kfree(i2c);
+}
+
+struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
+{
+ struct ast_i2c_chan *i2c;
+ int ret;
+
+ i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL);
+ if (!i2c)
+ return NULL;
+
+ i2c->adapter.owner = THIS_MODULE;
+ i2c->adapter.class = I2C_CLASS_DDC;
+ i2c->adapter.dev.parent = dev->dev;
+ i2c->dev = dev;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+ "AST i2c bit bus");
+ i2c->adapter.algo_data = &i2c->bit;
+
+ i2c->bit.udelay = 20;
+ i2c->bit.timeout = 2;
+ i2c->bit.data = i2c;
+ i2c->bit.setsda = ast_i2c_setsda;
+ i2c->bit.setscl = ast_i2c_setscl;
+ i2c->bit.getsda = ast_i2c_getsda;
+ i2c->bit.getscl = ast_i2c_getscl;
+ ret = i2c_bit_add_bus(&i2c->adapter);
+ if (ret) {
+ drm_err(dev, "Failed to register bit i2c\n");
+ goto out_kfree;
+ }
+
+ ret = drmm_add_action_or_reset(dev, ast_i2c_release, i2c);
+ if (ret)
+ return NULL;
+ return i2c;
+
+out_kfree:
+ kfree(i2c);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index d5c98f79d58d..956c8982192b 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -47,9 +47,6 @@
#include "ast_drv.h"
#include "ast_tables.h"
-static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
-static void ast_i2c_destroy(struct ast_i2c_chan *i2c);
-
static inline void ast_load_palette_index(struct ast_private *ast,
u8 index, u8 red, u8 green,
u8 blue)
@@ -1213,9 +1210,9 @@ static int ast_get_modes(struct drm_connector *connector)
{
struct ast_connector *ast_connector = to_ast_connector(connector);
struct ast_private *ast = to_ast_private(connector->dev);
- struct edid *edid;
- int ret;
+ struct edid *edid = NULL;
bool flags = false;
+ int ret;
if (ast->tx_chip_type == AST_TX_DP501) {
ast->dp501_maxclk = 0xff;
@@ -1229,7 +1226,7 @@ static int ast_get_modes(struct drm_connector *connector)
else
kfree(edid);
}
- if (!flags)
+ if (!flags && ast_connector->i2c)
edid = drm_get_edid(connector, &ast_connector->i2c->adapter);
if (edid) {
drm_connector_update_edid_property(&ast_connector->base, edid);
@@ -1303,14 +1300,6 @@ static enum drm_mode_status ast_mode_valid(struct drm_connector *connector,
return flags;
}
-static void ast_connector_destroy(struct drm_connector *connector)
-{
- struct ast_connector *ast_connector = to_ast_connector(connector);
-
- ast_i2c_destroy(ast_connector->i2c);
- drm_connector_cleanup(connector);
-}
-
static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
.get_modes = ast_get_modes,
.mode_valid = ast_mode_valid,
@@ -1319,7 +1308,7 @@ static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
static const struct drm_connector_funcs ast_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = ast_connector_destroy,
+ .destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -1335,10 +1324,13 @@ static int ast_connector_init(struct drm_device *dev)
if (!ast_connector->i2c)
drm_err(dev, "failed to add ddc bus for connector\n");
- drm_connector_init_with_ddc(dev, connector,
- &ast_connector_funcs,
- DRM_MODE_CONNECTOR_VGA,
- &ast_connector->i2c->adapter);
+ if (ast_connector->i2c)
+ drm_connector_init_with_ddc(dev, connector, &ast_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &ast_connector->i2c->adapter);
+ else
+ drm_connector_init(dev, connector, &ast_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA);
drm_connector_helper_add(connector, &ast_connector_helper_funcs);
@@ -1416,124 +1408,3 @@ int ast_mode_config_init(struct ast_private *ast)
return 0;
}
-
-static int get_clock(void *i2c_priv)
-{
- struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_private *ast = to_ast_private(i2c->dev);
- uint32_t val, val2, count, pass;
-
- count = 0;
- pass = 0;
- val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
- do {
- val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
- if (val == val2) {
- pass++;
- } else {
- pass = 0;
- val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
- }
- } while ((pass < 5) && (count++ < 0x10000));
-
- return val & 1 ? 1 : 0;
-}
-
-static int get_data(void *i2c_priv)
-{
- struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_private *ast = to_ast_private(i2c->dev);
- uint32_t val, val2, count, pass;
-
- count = 0;
- pass = 0;
- val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
- do {
- val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
- if (val == val2) {
- pass++;
- } else {
- pass = 0;
- val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
- }
- } while ((pass < 5) && (count++ < 0x10000));
-
- return val & 1 ? 1 : 0;
-}
-
-static void set_clock(void *i2c_priv, int clock)
-{
- struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_private *ast = to_ast_private(i2c->dev);
- int i;
- u8 ujcrb7, jtemp;
-
- for (i = 0; i < 0x10000; i++) {
- ujcrb7 = ((clock & 0x01) ? 0 : 1);
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7);
- jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
- if (ujcrb7 == jtemp)
- break;
- }
-}
-
-static void set_data(void *i2c_priv, int data)
-{
- struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_private *ast = to_ast_private(i2c->dev);
- int i;
- u8 ujcrb7, jtemp;
-
- for (i = 0; i < 0x10000; i++) {
- ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7);
- jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
- if (ujcrb7 == jtemp)
- break;
- }
-}
-
-static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
-{
- struct ast_i2c_chan *i2c;
- int ret;
-
- i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL);
- if (!i2c)
- return NULL;
-
- i2c->adapter.owner = THIS_MODULE;
- i2c->adapter.class = I2C_CLASS_DDC;
- i2c->adapter.dev.parent = dev->dev;
- i2c->dev = dev;
- i2c_set_adapdata(&i2c->adapter, i2c);
- snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
- "AST i2c bit bus");
- i2c->adapter.algo_data = &i2c->bit;
-
- i2c->bit.udelay = 20;
- i2c->bit.timeout = 2;
- i2c->bit.data = i2c;
- i2c->bit.setsda = set_data;
- i2c->bit.setscl = set_clock;
- i2c->bit.getsda = get_data;
- i2c->bit.getscl = get_clock;
- ret = i2c_bit_add_bus(&i2c->adapter);
- if (ret) {
- drm_err(dev, "Failed to register bit i2c\n");
- goto out_free;
- }
-
- return i2c;
-out_free:
- kfree(i2c);
- return NULL;
-}
-
-static void ast_i2c_destroy(struct ast_i2c_chan *i2c)
-{
- if (!i2c)
- return;
- i2c_del_adapter(&i2c->adapter);
- kfree(i2c);
-}
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
index d9eb353a4bf0..dbe1cc620f6e 100644
--- a/drivers/gpu/drm/ast/ast_tables.h
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -282,8 +282,6 @@ static const struct ast_vbios_enhtable res_1360x768[] = {
};
static const struct ast_vbios_enhtable res_1600x900[] = {
- {1800, 1600, 24, 80, 1000, 900, 1, 3, VCLK108, /* 60Hz */
- (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 3, 0x3A },
{1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
AST2500PreCatchCRT), 60, 1, 0x3A },
diff --git a/drivers/gpu/drm/atmel-hlcdc/Kconfig b/drivers/gpu/drm/atmel-hlcdc/Kconfig
index 5f67f001553b..8ae679f1a518 100644
--- a/drivers/gpu/drm/atmel-hlcdc/Kconfig
+++ b/drivers/gpu/drm/atmel-hlcdc/Kconfig
@@ -4,7 +4,6 @@ config DRM_ATMEL_HLCDC
depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_PANEL
help
Choose this option if you have an ATMEL SoC with an HLCDC display
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 431b6e12a81f..61db5a66b493 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -182,6 +182,7 @@ config DRM_PARADE_PS8622
config DRM_PARADE_PS8640
tristate "Parade PS8640 MIPI DSI to eDP Converter"
depends on OF
+ select DRM_DP_AUX_BUS
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 05e3abb5a0c9..592ecfcf00ca 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -401,7 +401,6 @@ void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode);
int adv7533_patch_registers(struct adv7511 *adv);
int adv7533_patch_cec_registers(struct adv7511 *adv);
int adv7533_attach_dsi(struct adv7511 *adv);
-void adv7533_detach_dsi(struct adv7511 *adv);
int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv);
#ifdef CONFIG_DRM_I2C_ADV7511_AUDIO
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 76555ae64e9c..f8e5da148599 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -910,9 +910,6 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- if (adv->type == ADV7533 || adv->type == ADV7535)
- ret = adv7533_attach_dsi(adv);
-
if (adv->i2c_main->irq)
regmap_write(adv->regmap, ADV7511_REG_INT_ENABLE(0),
ADV7511_INT0_HPD);
@@ -1288,8 +1285,18 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
drm_bridge_add(&adv7511->bridge);
adv7511_audio_init(dev, adv7511);
+
+ if (adv7511->type == ADV7533 || adv7511->type == ADV7535) {
+ ret = adv7533_attach_dsi(adv7511);
+ if (ret)
+ goto err_unregister_audio;
+ }
+
return 0;
+err_unregister_audio:
+ adv7511_audio_exit(adv7511);
+ drm_bridge_remove(&adv7511->bridge);
err_unregister_cec:
i2c_unregister_device(adv7511->i2c_cec);
clk_disable_unprepare(adv7511->cec_clk);
@@ -1307,8 +1314,6 @@ static int adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
- if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
- adv7533_detach_dsi(adv7511);
i2c_unregister_device(adv7511->i2c_cec);
clk_disable_unprepare(adv7511->cec_clk);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index 59d718bde8c4..eb7579dec40a 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -153,11 +153,10 @@ int adv7533_attach_dsi(struct adv7511 *adv)
return -EPROBE_DEFER;
}
- dsi = mipi_dsi_device_register_full(host, &info);
+ dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi)) {
dev_err(dev, "failed to create dsi device\n");
- ret = PTR_ERR(dsi);
- goto err_dsi_device;
+ return PTR_ERR(dsi);
}
adv->dsi = dsi;
@@ -167,24 +166,13 @@ int adv7533_attach_dsi(struct adv7511 *adv)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE;
- ret = mipi_dsi_attach(dsi);
+ ret = devm_mipi_dsi_attach(dev, dsi);
if (ret < 0) {
dev_err(dev, "failed to attach dsi to host\n");
- goto err_dsi_attach;
+ return ret;
}
return 0;
-
-err_dsi_attach:
- mipi_dsi_device_unregister(dsi);
-err_dsi_device:
- return ret;
-}
-
-void adv7533_detach_dsi(struct adv7511 *adv)
-{
- mipi_dsi_detach(adv->dsi);
- mipi_dsi_device_unregister(adv->dsi);
}
int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index cab6c8b92efd..6a4f20fccf84 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -998,11 +998,21 @@ int analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
if (!blocking)
return 0;
+ /*
+ * db[1]!=0: entering PSR, wait for fully active remote frame buffer.
+ * db[1]==0: exiting PSR, wait for either
+ * (a) ACTIVE_RESYNC - the sink "must display the
+ * incoming active frames from the Source device with no visible
+ * glitches and/or artifacts", even though timings may still be
+ * re-synchronizing; or
+ * (b) INACTIVE - the transition is fully complete.
+ */
ret = readx_poll_timeout(analogix_dp_get_psr_status, dp, psr_status,
psr_status >= 0 &&
((vsc->db[1] && psr_status == DP_PSR_SINK_ACTIVE_RFB) ||
- (!vsc->db[1] && psr_status == DP_PSR_SINK_INACTIVE)), 1500,
- DP_TIMEOUT_PSR_LOOP_MS * 1000);
+ (!vsc->db[1] && (psr_status == DP_PSR_SINK_ACTIVE_RESYNC ||
+ psr_status == DP_PSR_SINK_INACTIVE))),
+ 1500, DP_TIMEOUT_PSR_LOOP_MS * 1000);
if (ret) {
dev_warn(dp->dev, "Failed to apply PSR %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 1a871f6b6822..2346dbcc505f 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -32,6 +32,8 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <media/v4l2-fwnode.h>
+#include <sound/hdmi-codec.h>
#include <video/display_timing.h>
#include "anx7625.h"
@@ -166,6 +168,20 @@ static int anx7625_write_and_or(struct anx7625_data *ctx,
offset, (val & and_mask) | (or_mask));
}
+static int anx7625_config_bit_matrix(struct anx7625_data *ctx)
+{
+ int i, ret;
+
+ ret = anx7625_write_or(ctx, ctx->i2c.tx_p2_client,
+ AUDIO_CONTROL_REGISTER, 0x80);
+ for (i = 0; i < 13; i++)
+ ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+ VIDEO_BIT_MATRIX_12 + i,
+ 0x18 + i);
+
+ return ret;
+}
+
static int anx7625_read_ctrl_status_p0(struct anx7625_data *ctx)
{
return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, AP_AUX_CTRL_STATUS);
@@ -191,10 +207,10 @@ static int wait_aux_op_finish(struct anx7625_data *ctx)
AP_AUX_CTRL_STATUS);
if (val < 0 || (val & 0x0F)) {
DRM_DEV_ERROR(dev, "aux status %02x\n", val);
- val = -EIO;
+ return -EIO;
}
- return val;
+ return 0;
}
static int anx7625_video_mute_control(struct anx7625_data *ctx,
@@ -221,38 +237,6 @@ static int anx7625_video_mute_control(struct anx7625_data *ctx,
return ret;
}
-static int anx7625_config_audio_input(struct anx7625_data *ctx)
-{
- struct device *dev = &ctx->client->dev;
- int ret;
-
- /* Channel num */
- ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
- AUDIO_CHANNEL_STATUS_6, I2S_CH_2 << 5);
-
- /* FS */
- ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client,
- AUDIO_CHANNEL_STATUS_4,
- 0xf0, AUDIO_FS_48K);
- /* Word length */
- ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client,
- AUDIO_CHANNEL_STATUS_5,
- 0xf0, AUDIO_W_LEN_24_24MAX);
- /* I2S */
- ret |= anx7625_write_or(ctx, ctx->i2c.tx_p2_client,
- AUDIO_CHANNEL_STATUS_6, I2S_SLAVE_MODE);
- ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client,
- AUDIO_CONTROL_REGISTER, ~TDM_TIMING_MODE);
- /* Audio change flag */
- ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
- AP_AV_STATUS, AP_AUDIO_CHG);
-
- if (ret < 0)
- DRM_DEV_ERROR(dev, "fail to config audio.\n");
-
- return ret;
-}
-
/* Reduction of fraction a/b */
static void anx7625_reduction_of_a_fraction(unsigned long *a, unsigned long *b)
{
@@ -431,7 +415,7 @@ static int anx7625_dsi_video_timing_config(struct anx7625_data *ctx)
ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client,
MIPI_LANE_CTRL_0, 0xfc);
ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client,
- MIPI_LANE_CTRL_0, 3);
+ MIPI_LANE_CTRL_0, ctx->pdata.mipi_lanes - 1);
/* Htotal */
htotal = ctx->dt.hactive.min + ctx->dt.hfront_porch.min +
@@ -615,6 +599,76 @@ static int anx7625_dsi_config(struct anx7625_data *ctx)
return ret;
}
+static int anx7625_api_dpi_config(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+ u16 freq = ctx->dt.pixelclock.min / 1000;
+ int ret;
+
+ /* configure pixel clock */
+ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ PIXEL_CLOCK_L, freq & 0xFF);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ PIXEL_CLOCK_H, (freq >> 8));
+
+ /* set DPI mode */
+ /* set to DPI PLL module sel */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_DIGITAL_PLL_9, 0x20);
+ /* power down MIPI */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_LANE_CTRL_10, 0x08);
+ /* enable DPI mode */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_DIGITAL_PLL_18, 0x1C);
+ /* set first edge */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+ VIDEO_CONTROL_0, 0x06);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "IO error : dpi phy set failed.\n");
+
+ return ret;
+}
+
+static int anx7625_dpi_config(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+ int ret;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "config dpi\n");
+
+ /* DSC disable */
+ ret = anx7625_write_and(ctx, ctx->i2c.rx_p0_client,
+ R_DSC_CTRL_0, ~DSC_EN);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "IO error : disable dsc failed.\n");
+ return ret;
+ }
+
+ ret = anx7625_config_bit_matrix(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "config bit matrix failed.\n");
+ return ret;
+ }
+
+ ret = anx7625_api_dpi_config(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "mipi phy(dpi) setup failed.\n");
+ return ret;
+ }
+
+ /* set MIPI RX EN */
+ ret = anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
+ AP_AV_STATUS, AP_MIPI_RX_EN);
+ /* clear mute flag */
+ ret |= anx7625_write_and(ctx, ctx->i2c.rx_p0_client,
+ AP_AV_STATUS, (u8)~AP_MIPI_MUTE);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "IO error : enable mipi rx failed.\n");
+
+ return ret;
+}
+
static void anx7625_dp_start(struct anx7625_data *ctx)
{
int ret;
@@ -625,9 +679,10 @@ static void anx7625_dp_start(struct anx7625_data *ctx)
return;
}
- anx7625_config_audio_input(ctx);
-
- ret = anx7625_dsi_config(ctx);
+ if (ctx->pdata.is_dpi)
+ ret = anx7625_dpi_config(ctx);
+ else
+ ret = anx7625_dsi_config(ctx);
if (ret < 0)
DRM_DEV_ERROR(dev, "MIPI phy setup error.\n");
@@ -795,7 +850,7 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
int count, blocks_num;
u8 pblock_buf[MAX_DPCD_BUFFER_SIZE];
u8 i, j;
- u8 g_edid_break = 0;
+ int g_edid_break = 0;
int ret;
struct device *dev = &ctx->client->dev;
@@ -826,7 +881,7 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
g_edid_break = edid_read(ctx, offset,
pblock_buf);
- if (g_edid_break)
+ if (g_edid_break < 0)
break;
memcpy(&pedid_blocks_buf[offset],
@@ -1075,6 +1130,7 @@ static void anx7625_start_dp_work(struct anx7625_data *ctx)
return;
}
+ ctx->hpd_status = 1;
ctx->hpd_high_cnt++;
/* Not support HDCP */
@@ -1084,8 +1140,10 @@ static void anx7625_start_dp_work(struct anx7625_data *ctx)
ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xec, 0x10);
/* Interrupt for DRM */
ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xff, 0x01);
- if (ret < 0)
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "fail to setting HDCP/auth\n");
return;
+ }
ret = anx7625_reg_read(ctx, ctx->i2c.rx_p1_client, 0x86);
if (ret < 0)
@@ -1104,6 +1162,10 @@ static void anx7625_hpd_polling(struct anx7625_data *ctx)
int ret, val;
struct device *dev = &ctx->client->dev;
+ /* Interrupt mode, no need poll HPD status, just return */
+ if (ctx->pdata.intp_irq)
+ return;
+
ret = readx_poll_timeout(anx7625_read_hpd_status_p0,
ctx, val,
((val & HPD_STATUS) || (val < 0)),
@@ -1131,6 +1193,21 @@ static void anx7625_remove_edid(struct anx7625_data *ctx)
ctx->slimport_edid_p.edid_block_num = -1;
}
+static void anx7625_dp_adjust_swing(struct anx7625_data *ctx)
+{
+ int i;
+
+ for (i = 0; i < ctx->pdata.dp_lane0_swing_reg_cnt; i++)
+ anx7625_reg_write(ctx, ctx->i2c.tx_p1_client,
+ DP_TX_LANE0_SWING_REG0 + i,
+ ctx->pdata.lane0_reg_data[i] & 0xFF);
+
+ for (i = 0; i < ctx->pdata.dp_lane1_swing_reg_cnt; i++)
+ anx7625_reg_write(ctx, ctx->i2c.tx_p1_client,
+ DP_TX_LANE1_SWING_REG0 + i,
+ ctx->pdata.lane1_reg_data[i] & 0xFF);
+}
+
static void dp_hpd_change_handler(struct anx7625_data *ctx, bool on)
{
struct device *dev = &ctx->client->dev;
@@ -1146,9 +1223,8 @@ static void dp_hpd_change_handler(struct anx7625_data *ctx, bool on)
} else {
DRM_DEV_DEBUG_DRIVER(dev, " HPD high\n");
anx7625_start_dp_work(ctx);
+ anx7625_dp_adjust_swing(ctx);
}
-
- ctx->hpd_status = 1;
}
static int anx7625_hpd_change_detect(struct anx7625_data *ctx)
@@ -1225,20 +1301,75 @@ static irqreturn_t anx7625_intr_hpd_isr(int irq, void *data)
return IRQ_HANDLED;
}
+static int anx7625_get_swing_setting(struct device *dev,
+ struct anx7625_platform_data *pdata)
+{
+ int num_regs;
+
+ if (of_get_property(dev->of_node,
+ "analogix,lane0-swing", &num_regs)) {
+ if (num_regs > DP_TX_SWING_REG_CNT)
+ num_regs = DP_TX_SWING_REG_CNT;
+
+ pdata->dp_lane0_swing_reg_cnt = num_regs;
+ of_property_read_u32_array(dev->of_node, "analogix,lane0-swing",
+ pdata->lane0_reg_data, num_regs);
+ }
+
+ if (of_get_property(dev->of_node,
+ "analogix,lane1-swing", &num_regs)) {
+ if (num_regs > DP_TX_SWING_REG_CNT)
+ num_regs = DP_TX_SWING_REG_CNT;
+
+ pdata->dp_lane1_swing_reg_cnt = num_regs;
+ of_property_read_u32_array(dev->of_node, "analogix,lane1-swing",
+ pdata->lane1_reg_data, num_regs);
+ }
+
+ return 0;
+}
+
static int anx7625_parse_dt(struct device *dev,
struct anx7625_platform_data *pdata)
{
- struct device_node *np = dev->of_node;
+ struct device_node *np = dev->of_node, *ep0;
struct drm_panel *panel;
int ret;
+ int bus_type, mipi_lanes;
+
+ anx7625_get_swing_setting(dev, pdata);
+ pdata->is_dpi = 1; /* default dpi mode */
pdata->mipi_host_node = of_graph_get_remote_node(np, 0, 0);
if (!pdata->mipi_host_node) {
DRM_DEV_ERROR(dev, "fail to get internal panel.\n");
return -ENODEV;
}
- DRM_DEV_DEBUG_DRIVER(dev, "found dsi host node.\n");
+ bus_type = V4L2_FWNODE_BUS_TYPE_PARALLEL;
+ mipi_lanes = MAX_LANES_SUPPORT;
+ ep0 = of_graph_get_endpoint_by_regs(np, 0, 0);
+ if (ep0) {
+ if (of_property_read_u32(ep0, "bus-type", &bus_type))
+ bus_type = 0;
+
+ mipi_lanes = of_property_count_u32_elems(ep0, "data-lanes");
+ }
+
+ if (bus_type == V4L2_FWNODE_BUS_TYPE_PARALLEL) /* bus type is Parallel(DSI) */
+ pdata->is_dpi = 0;
+
+ pdata->mipi_lanes = mipi_lanes;
+ if (pdata->mipi_lanes > MAX_LANES_SUPPORT || pdata->mipi_lanes <= 0)
+ pdata->mipi_lanes = MAX_LANES_SUPPORT;
+
+ if (pdata->is_dpi)
+ DRM_DEV_DEBUG_DRIVER(dev, "found MIPI DPI host node.\n");
+ else
+ DRM_DEV_DEBUG_DRIVER(dev, "found MIPI DSI host node.\n");
+
+ if (of_property_read_bool(np, "analogix,audio-enable"))
+ pdata->audio_en = 1;
ret = drm_of_find_panel_or_bridge(np, 1, 0, &panel, NULL);
if (ret < 0) {
@@ -1301,9 +1432,215 @@ static enum drm_connector_status anx7625_sink_detect(struct anx7625_data *ctx)
{
struct device *dev = &ctx->client->dev;
- DRM_DEV_DEBUG_DRIVER(dev, "sink detect, return connected\n");
+ DRM_DEV_DEBUG_DRIVER(dev, "sink detect\n");
- return connector_status_connected;
+ if (ctx->pdata.panel_bridge)
+ return connector_status_connected;
+
+ return ctx->hpd_status ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static int anx7625_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *params)
+{
+ struct anx7625_data *ctx = dev_get_drvdata(dev);
+ int wl, ch, rate;
+ int ret = 0;
+
+ if (fmt->fmt != HDMI_DSP_A) {
+ DRM_DEV_ERROR(dev, "only supports DSP_A\n");
+ return -EINVAL;
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, "setting %d Hz, %d bit, %d channels\n",
+ params->sample_rate, params->sample_width,
+ params->cea.channels);
+
+ ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client,
+ AUDIO_CHANNEL_STATUS_6,
+ ~I2S_SLAVE_MODE,
+ TDM_SLAVE_MODE);
+
+ /* Word length */
+ switch (params->sample_width) {
+ case 16:
+ wl = AUDIO_W_LEN_16_20MAX;
+ break;
+ case 18:
+ wl = AUDIO_W_LEN_18_20MAX;
+ break;
+ case 20:
+ wl = AUDIO_W_LEN_20_20MAX;
+ break;
+ case 24:
+ wl = AUDIO_W_LEN_24_24MAX;
+ break;
+ default:
+ DRM_DEV_DEBUG_DRIVER(dev, "wordlength: %d bit not support",
+ params->sample_width);
+ return -EINVAL;
+ }
+ ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client,
+ AUDIO_CHANNEL_STATUS_5,
+ 0xf0, wl);
+
+ /* Channel num */
+ switch (params->cea.channels) {
+ case 2:
+ ch = I2S_CH_2;
+ break;
+ case 4:
+ ch = TDM_CH_4;
+ break;
+ case 6:
+ ch = TDM_CH_6;
+ break;
+ case 8:
+ ch = TDM_CH_8;
+ break;
+ default:
+ DRM_DEV_DEBUG_DRIVER(dev, "channel number: %d not support",
+ params->cea.channels);
+ return -EINVAL;
+ }
+ ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client,
+ AUDIO_CHANNEL_STATUS_6, 0x1f, ch << 5);
+ if (ch > I2S_CH_2)
+ ret |= anx7625_write_or(ctx, ctx->i2c.tx_p2_client,
+ AUDIO_CHANNEL_STATUS_6, AUDIO_LAYOUT);
+ else
+ ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client,
+ AUDIO_CHANNEL_STATUS_6, ~AUDIO_LAYOUT);
+
+ /* FS */
+ switch (params->sample_rate) {
+ case 32000:
+ rate = AUDIO_FS_32K;
+ break;
+ case 44100:
+ rate = AUDIO_FS_441K;
+ break;
+ case 48000:
+ rate = AUDIO_FS_48K;
+ break;
+ case 88200:
+ rate = AUDIO_FS_882K;
+ break;
+ case 96000:
+ rate = AUDIO_FS_96K;
+ break;
+ case 176400:
+ rate = AUDIO_FS_1764K;
+ break;
+ case 192000:
+ rate = AUDIO_FS_192K;
+ break;
+ default:
+ DRM_DEV_DEBUG_DRIVER(dev, "sample rate: %d not support",
+ params->sample_rate);
+ return -EINVAL;
+ }
+ ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client,
+ AUDIO_CHANNEL_STATUS_4,
+ 0xf0, rate);
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
+ AP_AV_STATUS, AP_AUDIO_CHG);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "IO error : config audio.\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void anx7625_audio_shutdown(struct device *dev, void *data)
+{
+ DRM_DEV_DEBUG_DRIVER(dev, "stop audio\n");
+}
+
+static int anx7625_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
+ struct device_node *endpoint)
+{
+ struct of_endpoint of_ep;
+ int ret;
+
+ ret = of_graph_parse_endpoint(endpoint, &of_ep);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * HDMI sound should be located at external DPI port
+ * Didn't have good way to check where is internal(DSI)
+ * or external(DPI) bridge
+ */
+ return 0;
+}
+
+static void
+anx7625_audio_update_connector_status(struct anx7625_data *ctx,
+ enum drm_connector_status status)
+{
+ if (ctx->plugged_cb && ctx->codec_dev) {
+ ctx->plugged_cb(ctx->codec_dev,
+ status == connector_status_connected);
+ }
+}
+
+static int anx7625_audio_hook_plugged_cb(struct device *dev, void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct anx7625_data *ctx = data;
+
+ ctx->plugged_cb = fn;
+ ctx->codec_dev = codec_dev;
+ anx7625_audio_update_connector_status(ctx, anx7625_sink_detect(ctx));
+
+ return 0;
+}
+
+static const struct hdmi_codec_ops anx7625_codec_ops = {
+ .hw_params = anx7625_audio_hw_params,
+ .audio_shutdown = anx7625_audio_shutdown,
+ .get_dai_id = anx7625_hdmi_i2s_get_dai_id,
+ .hook_plugged_cb = anx7625_audio_hook_plugged_cb,
+};
+
+static void anx7625_unregister_audio(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+
+ if (ctx->audio_pdev) {
+ platform_device_unregister(ctx->audio_pdev);
+ ctx->audio_pdev = NULL;
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, "unbound to %s", HDMI_CODEC_DRV_NAME);
+}
+
+static int anx7625_register_audio(struct device *dev, struct anx7625_data *ctx)
+{
+ struct hdmi_codec_pdata codec_data = {
+ .ops = &anx7625_codec_ops,
+ .max_i2s_channels = 8,
+ .i2s = 1,
+ .data = ctx,
+ };
+
+ ctx->audio_pdev = platform_device_register_data(dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_data,
+ sizeof(codec_data));
+
+ if (IS_ERR(ctx->audio_pdev))
+ return PTR_ERR(ctx->audio_pdev);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "bound to %s", HDMI_CODEC_DRV_NAME);
+
+ return 0;
}
static int anx7625_attach_dsi(struct anx7625_data *ctx)
@@ -1316,6 +1653,7 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx)
.channel = 0,
.node = NULL,
};
+ int ret;
DRM_DEV_DEBUG_DRIVER(dev, "attach dsi\n");
@@ -1325,22 +1663,22 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx)
return -EINVAL;
}
- dsi = mipi_dsi_device_register_full(host, &info);
+ dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi)) {
DRM_DEV_ERROR(dev, "fail to create dsi device.\n");
return -EINVAL;
}
- dsi->lanes = 4;
+ dsi->lanes = ctx->pdata.mipi_lanes;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_VIDEO_HSE;
- if (mipi_dsi_attach(dsi) < 0) {
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret) {
DRM_DEV_ERROR(dev, "fail to attach dsi to host.\n");
- mipi_dsi_device_unregister(dsi);
- return -EINVAL;
+ return ret;
}
ctx->dsi = dsi;
@@ -1350,16 +1688,6 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx)
return 0;
}
-static void anx7625_bridge_detach(struct drm_bridge *bridge)
-{
- struct anx7625_data *ctx = bridge_to_anx7625(bridge);
-
- if (ctx->dsi) {
- mipi_dsi_detach(ctx->dsi);
- mipi_dsi_device_unregister(ctx->dsi);
- }
-}
-
static int anx7625_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
@@ -1376,12 +1704,6 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge,
return -ENODEV;
}
- err = anx7625_attach_dsi(ctx);
- if (err) {
- DRM_DEV_ERROR(dev, "Fail to attach to dsi : %d\n", err);
- return err;
- }
-
if (ctx->pdata.panel_bridge) {
err = drm_bridge_attach(bridge->encoder,
ctx->pdata.panel_bridge,
@@ -1475,6 +1797,10 @@ static bool anx7625_bridge_mode_fixup(struct drm_bridge *bridge,
DRM_DEV_DEBUG_DRIVER(dev, "drm mode fixup set\n");
+ /* No need fixup for external monitor */
+ if (!ctx->pdata.panel_bridge)
+ return true;
+
hsync = mode->hsync_end - mode->hsync_start;
hfp = mode->hsync_start - mode->hdisplay;
hbp = mode->htotal - mode->hsync_end;
@@ -1624,7 +1950,6 @@ static struct edid *anx7625_bridge_get_edid(struct drm_bridge *bridge,
static const struct drm_bridge_funcs anx7625_bridge_funcs = {
.attach = anx7625_bridge_attach,
- .detach = anx7625_bridge_detach,
.disable = anx7625_bridge_disable,
.mode_valid = anx7625_bridge_mode_valid,
.mode_set = anx7625_bridge_mode_set,
@@ -1851,14 +2176,39 @@ static int anx7625_i2c_probe(struct i2c_client *client,
platform->bridge.funcs = &anx7625_bridge_funcs;
platform->bridge.of_node = client->dev.of_node;
- platform->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD;
- platform->bridge.type = DRM_MODE_CONNECTOR_eDP;
+ platform->bridge.ops = DRM_BRIDGE_OP_EDID;
+ if (!platform->pdata.panel_bridge)
+ platform->bridge.ops |= DRM_BRIDGE_OP_HPD |
+ DRM_BRIDGE_OP_DETECT;
+ platform->bridge.type = platform->pdata.panel_bridge ?
+ DRM_MODE_CONNECTOR_eDP :
+ DRM_MODE_CONNECTOR_DisplayPort;
+
drm_bridge_add(&platform->bridge);
+ if (!platform->pdata.is_dpi) {
+ ret = anx7625_attach_dsi(platform);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Fail to attach to dsi : %d\n", ret);
+ goto unregister_bridge;
+ }
+ }
+
+ if (platform->pdata.audio_en)
+ anx7625_register_audio(dev, platform);
+
DRM_DEV_DEBUG_DRIVER(dev, "probe done\n");
return 0;
+unregister_bridge:
+ drm_bridge_remove(&platform->bridge);
+
+ if (!platform->pdata.low_power_mode)
+ pm_runtime_put_sync_suspend(&client->dev);
+
+ anx7625_unregister_i2c_dummy_clients(platform);
+
free_wq:
if (platform->workqueue)
destroy_workqueue(platform->workqueue);
@@ -1883,6 +2233,9 @@ static int anx7625_i2c_remove(struct i2c_client *client)
anx7625_unregister_i2c_dummy_clients(platform);
+ if (platform->pdata.audio_en)
+ anx7625_unregister_audio(platform);
+
kfree(platform);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h
index 6dcf64c703f9..3d79b6fb13c8 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.h
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.h
@@ -111,6 +111,7 @@
#define AUDIO_CHANNEL_STATUS_6 0xd5
#define TDM_SLAVE_MODE 0x10
#define I2S_SLAVE_MODE 0x08
+#define AUDIO_LAYOUT 0x01
#define AUDIO_CONTROL_REGISTER 0xe6
#define TDM_TIMING_MODE 0x08
@@ -141,12 +142,20 @@
#define HORIZONTAL_BACK_PORCH_H 0x22 /* Bit[7:4] are reserved */
/******** END of I2C Address 0x72 *********/
+
+/***************************************************************/
+/* Register definition of device address 0x7a */
+#define DP_TX_SWING_REG_CNT 0x14
+#define DP_TX_LANE0_SWING_REG0 0x00
+#define DP_TX_LANE1_SWING_REG0 0x14
+/******** END of I2C Address 0x7a *********/
+
/***************************************************************/
/* Register definition of device address 0x7e */
#define I2C_ADDR_7E_FLASH_CONTROLLER 0x7E
-#define FLASH_LOAD_STA 0x05
+#define FLASH_LOAD_STA 0x05
#define FLASH_LOAD_STA_CHK BIT(7)
#define XTAL_FRQ_SEL 0x3F
@@ -349,12 +358,21 @@ struct s_edid_data {
/***************** Display End *****************/
+#define MAX_LANES_SUPPORT 4
+
struct anx7625_platform_data {
struct gpio_desc *gpio_p_on;
struct gpio_desc *gpio_reset;
struct regulator_bulk_data supplies[3];
struct drm_bridge *panel_bridge;
int intp_irq;
+ int is_dpi;
+ int mipi_lanes;
+ int audio_en;
+ int dp_lane0_swing_reg_cnt;
+ int lane0_reg_data[DP_TX_SWING_REG_CNT];
+ int dp_lane1_swing_reg_cnt;
+ int lane1_reg_data[DP_TX_SWING_REG_CNT];
u32 low_power_mode;
struct device_node *mipi_host_node;
};
@@ -371,6 +389,7 @@ struct anx7625_i2c_client {
struct anx7625_data {
struct anx7625_platform_data pdata;
+ struct platform_device *audio_pdev;
int hpd_status;
int hpd_high_cnt;
/* Lock for work queue */
@@ -379,6 +398,8 @@ struct anx7625_data {
struct anx7625_i2c_client i2c;
struct i2c_client *last_client;
struct s_edid_data slimport_edid_p;
+ struct device *codec_dev;
+ hdmi_codec_plugged_cb plugged_cb;
struct work_struct work;
struct workqueue_struct *workqueue;
char edid_block;
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index 05eb759da6fc..d24f5b90feab 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
@@ -87,10 +88,95 @@ static struct edid *display_connector_get_edid(struct drm_bridge *bridge,
return drm_get_edid(connector, conn->bridge.ddc);
}
+/*
+ * Since this bridge is tied to the connector, it acts like a passthrough,
+ * so concerning the output bus formats, either pass the bus formats from the
+ * previous bridge or return fallback data like done in the bridge function:
+ * drm_atomic_bridge_chain_select_bus_fmts().
+ * This supports negotiation if the bridge chain has all bits in place.
+ */
+static u32 *display_connector_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
+{
+ struct drm_bridge *prev_bridge = drm_bridge_get_prev_bridge(bridge);
+ struct drm_bridge_state *prev_bridge_state;
+
+ if (!prev_bridge || !prev_bridge->funcs->atomic_get_output_bus_fmts) {
+ struct drm_connector *conn = conn_state->connector;
+ u32 *out_bus_fmts;
+
+ *num_output_fmts = 1;
+ out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL);
+ if (!out_bus_fmts)
+ return NULL;
+
+ if (conn->display_info.num_bus_formats &&
+ conn->display_info.bus_formats)
+ out_bus_fmts[0] = conn->display_info.bus_formats[0];
+ else
+ out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED;
+
+ return out_bus_fmts;
+ }
+
+ prev_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+ prev_bridge);
+
+ return prev_bridge->funcs->atomic_get_output_bus_fmts(prev_bridge, prev_bridge_state,
+ crtc_state, conn_state,
+ num_output_fmts);
+}
+
+/*
+ * Since this bridge is tied to the connector, it acts like a passthrough,
+ * so concerning the input bus formats, either pass the bus formats from the
+ * previous bridge or MEDIA_BUS_FMT_FIXED (like select_bus_fmt_recursive())
+ * when atomic_get_input_bus_fmts is not supported.
+ * This supports negotiation if the bridge chain has all bits in place.
+ */
+static u32 *display_connector_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct drm_bridge *prev_bridge = drm_bridge_get_prev_bridge(bridge);
+ struct drm_bridge_state *prev_bridge_state;
+
+ if (!prev_bridge || !prev_bridge->funcs->atomic_get_input_bus_fmts) {
+ u32 *in_bus_fmts;
+
+ *num_input_fmts = 1;
+ in_bus_fmts = kmalloc(sizeof(*in_bus_fmts), GFP_KERNEL);
+ if (!in_bus_fmts)
+ return NULL;
+
+ in_bus_fmts[0] = MEDIA_BUS_FMT_FIXED;
+
+ return in_bus_fmts;
+ }
+
+ prev_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+ prev_bridge);
+
+ return prev_bridge->funcs->atomic_get_input_bus_fmts(prev_bridge, prev_bridge_state,
+ crtc_state, conn_state, output_fmt,
+ num_input_fmts);
+}
+
static const struct drm_bridge_funcs display_connector_bridge_funcs = {
.attach = display_connector_attach,
.detect = display_connector_detect,
.get_edid = display_connector_get_edid,
+ .atomic_get_output_bus_fmts = display_connector_get_output_bus_fmts,
+ .atomic_get_input_bus_fmts = display_connector_get_input_bus_fmts,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
};
static irqreturn_t display_connector_hpd_irq(int irq, void *arg)
@@ -107,7 +193,7 @@ static int display_connector_probe(struct platform_device *pdev)
{
struct display_connector *conn;
unsigned int type;
- const char *label;
+ const char *label = NULL;
int ret;
conn = devm_kzalloc(&pdev->dev, sizeof(*conn), GFP_KERNEL);
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 1b0c7eaf6c84..c642d1e02b2f 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -472,11 +472,11 @@ static int lt8912_attach_dsi(struct lt8912 *lt)
return -EPROBE_DEFER;
}
- dsi = mipi_dsi_device_register_full(host, &info);
+ dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi)) {
ret = PTR_ERR(dsi);
dev_err(dev, "failed to create dsi device (%d)\n", ret);
- goto err_dsi_device;
+ return ret;
}
lt->dsi = dsi;
@@ -489,24 +489,13 @@ static int lt8912_attach_dsi(struct lt8912 *lt)
MIPI_DSI_MODE_LPM |
MIPI_DSI_MODE_NO_EOT_PACKET;
- ret = mipi_dsi_attach(dsi);
+ ret = devm_mipi_dsi_attach(dev, dsi);
if (ret < 0) {
dev_err(dev, "failed to attach dsi to host\n");
- goto err_dsi_attach;
+ return ret;
}
return 0;
-
-err_dsi_attach:
- mipi_dsi_device_unregister(dsi);
-err_dsi_device:
- return ret;
-}
-
-static void lt8912_detach_dsi(struct lt8912 *lt)
-{
- mipi_dsi_detach(lt->dsi);
- mipi_dsi_device_unregister(lt->dsi);
}
static int lt8912_bridge_connector_init(struct drm_bridge *bridge)
@@ -555,10 +544,6 @@ static int lt8912_bridge_attach(struct drm_bridge *bridge,
if (ret)
goto error;
- ret = lt8912_attach_dsi(lt);
- if (ret)
- goto error;
-
lt->is_attached = true;
return 0;
@@ -573,7 +558,6 @@ static void lt8912_bridge_detach(struct drm_bridge *bridge)
struct lt8912 *lt = bridge_to_lt8912(bridge);
if (lt->is_attached) {
- lt8912_detach_dsi(lt);
lt8912_hard_power_off(lt);
drm_connector_unregister(&lt->connector);
drm_connector_cleanup(&lt->connector);
@@ -718,8 +702,15 @@ static int lt8912_probe(struct i2c_client *client,
drm_bridge_add(&lt->bridge);
+ ret = lt8912_attach_dsi(lt);
+ if (ret)
+ goto err_attach;
+
return 0;
+err_attach:
+ drm_bridge_remove(&lt->bridge);
+ lt8912_free_i2c(lt);
err_i2c:
lt8912_put_dt(lt);
err_dt_parse:
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index 29b1ce2140ab..dafb1b47c15f 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -586,7 +586,7 @@ lt9611_connector_detect(struct drm_connector *connector, bool force)
int connected = 0;
regmap_read(lt9611->regmap, 0x825e, &reg_val);
- connected = (reg_val & BIT(2));
+ connected = (reg_val & BIT(0));
lt9611->status = connected ? connector_status_connected :
connector_status_disconnected;
@@ -760,6 +760,7 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
const struct mipi_dsi_device_info info = { "lt9611", 0, NULL };
struct mipi_dsi_device *dsi;
struct mipi_dsi_host *host;
+ struct device *dev = lt9611->dev;
int ret;
host = of_find_mipi_dsi_host_by_node(dsi_node);
@@ -768,7 +769,7 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
return ERR_PTR(-EPROBE_DEFER);
}
- dsi = mipi_dsi_device_register_full(host, &info);
+ dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi)) {
dev_err(lt9611->dev, "failed to create dsi device\n");
return dsi;
@@ -779,29 +780,15 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_VIDEO_HSE;
- ret = mipi_dsi_attach(dsi);
+ ret = devm_mipi_dsi_attach(dev, dsi);
if (ret < 0) {
- dev_err(lt9611->dev, "failed to attach dsi to host\n");
- mipi_dsi_device_unregister(dsi);
+ dev_err(dev, "failed to attach dsi to host\n");
return ERR_PTR(ret);
}
return dsi;
}
-static void lt9611_bridge_detach(struct drm_bridge *bridge)
-{
- struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
-
- if (lt9611->dsi1) {
- mipi_dsi_detach(lt9611->dsi1);
- mipi_dsi_device_unregister(lt9611->dsi1);
- }
-
- mipi_dsi_detach(lt9611->dsi0);
- mipi_dsi_device_unregister(lt9611->dsi0);
-}
-
static int lt9611_connector_init(struct drm_bridge *bridge, struct lt9611 *lt9611)
{
int ret;
@@ -838,28 +825,7 @@ static int lt9611_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- /* Attach primary DSI */
- lt9611->dsi0 = lt9611_attach_dsi(lt9611, lt9611->dsi0_node);
- if (IS_ERR(lt9611->dsi0))
- return PTR_ERR(lt9611->dsi0);
-
- /* Attach secondary DSI, if specified */
- if (lt9611->dsi1_node) {
- lt9611->dsi1 = lt9611_attach_dsi(lt9611, lt9611->dsi1_node);
- if (IS_ERR(lt9611->dsi1)) {
- ret = PTR_ERR(lt9611->dsi1);
- goto err_unregister_dsi0;
- }
- }
-
return 0;
-
-err_unregister_dsi0:
- lt9611_bridge_detach(bridge);
- drm_connector_cleanup(&lt9611->connector);
- mipi_dsi_device_unregister(lt9611->dsi0);
-
- return ret;
}
static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge,
@@ -926,7 +892,7 @@ static enum drm_connector_status lt9611_bridge_detect(struct drm_bridge *bridge)
int connected;
regmap_read(lt9611->regmap, 0x825e, &reg_val);
- connected = reg_val & BIT(2);
+ connected = reg_val & BIT(0);
lt9611->status = connected ? connector_status_connected :
connector_status_disconnected;
@@ -952,7 +918,6 @@ static void lt9611_bridge_hpd_enable(struct drm_bridge *bridge)
static const struct drm_bridge_funcs lt9611_bridge_funcs = {
.attach = lt9611_bridge_attach,
- .detach = lt9611_bridge_detach,
.mode_valid = lt9611_bridge_mode_valid,
.enable = lt9611_bridge_enable,
.disable = lt9611_bridge_disable,
@@ -1181,10 +1146,29 @@ static int lt9611_probe(struct i2c_client *client,
drm_bridge_add(&lt9611->bridge);
+ /* Attach primary DSI */
+ lt9611->dsi0 = lt9611_attach_dsi(lt9611, lt9611->dsi0_node);
+ if (IS_ERR(lt9611->dsi0)) {
+ ret = PTR_ERR(lt9611->dsi0);
+ goto err_remove_bridge;
+ }
+
+ /* Attach secondary DSI, if specified */
+ if (lt9611->dsi1_node) {
+ lt9611->dsi1 = lt9611_attach_dsi(lt9611, lt9611->dsi1_node);
+ if (IS_ERR(lt9611->dsi1)) {
+ ret = PTR_ERR(lt9611->dsi1);
+ goto err_remove_bridge;
+ }
+ }
+
lt9611_enable_hpd_interrupts(lt9611);
return lt9611_audio_init(dev, lt9611);
+err_remove_bridge:
+ drm_bridge_remove(&lt9611->bridge);
+
err_disable_regulators:
regulator_bulk_disable(ARRAY_SIZE(lt9611->supplies), lt9611->supplies);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index 010657ea7af7..33f9716da0ee 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -258,17 +258,18 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
const struct mipi_dsi_device_info info = { "lt9611uxc", 0, NULL };
struct mipi_dsi_device *dsi;
struct mipi_dsi_host *host;
+ struct device *dev = lt9611uxc->dev;
int ret;
host = of_find_mipi_dsi_host_by_node(dsi_node);
if (!host) {
- dev_err(lt9611uxc->dev, "failed to find dsi host\n");
+ dev_err(dev, "failed to find dsi host\n");
return ERR_PTR(-EPROBE_DEFER);
}
- dsi = mipi_dsi_device_register_full(host, &info);
+ dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi)) {
- dev_err(lt9611uxc->dev, "failed to create dsi device\n");
+ dev_err(dev, "failed to create dsi device\n");
return dsi;
}
@@ -277,10 +278,9 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_VIDEO_HSE;
- ret = mipi_dsi_attach(dsi);
+ ret = devm_mipi_dsi_attach(dev, dsi);
if (ret < 0) {
- dev_err(lt9611uxc->dev, "failed to attach dsi to host\n");
- mipi_dsi_device_unregister(dsi);
+ dev_err(dev, "failed to attach dsi to host\n");
return ERR_PTR(ret);
}
@@ -355,19 +355,6 @@ static int lt9611uxc_connector_init(struct drm_bridge *bridge, struct lt9611uxc
return drm_connector_attach_encoder(&lt9611uxc->connector, bridge->encoder);
}
-static void lt9611uxc_bridge_detach(struct drm_bridge *bridge)
-{
- struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
-
- if (lt9611uxc->dsi1) {
- mipi_dsi_detach(lt9611uxc->dsi1);
- mipi_dsi_device_unregister(lt9611uxc->dsi1);
- }
-
- mipi_dsi_detach(lt9611uxc->dsi0);
- mipi_dsi_device_unregister(lt9611uxc->dsi0);
-}
-
static int lt9611uxc_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
@@ -380,27 +367,7 @@ static int lt9611uxc_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- /* Attach primary DSI */
- lt9611uxc->dsi0 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi0_node);
- if (IS_ERR(lt9611uxc->dsi0))
- return PTR_ERR(lt9611uxc->dsi0);
-
- /* Attach secondary DSI, if specified */
- if (lt9611uxc->dsi1_node) {
- lt9611uxc->dsi1 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi1_node);
- if (IS_ERR(lt9611uxc->dsi1)) {
- ret = PTR_ERR(lt9611uxc->dsi1);
- goto err_unregister_dsi0;
- }
- }
-
return 0;
-
-err_unregister_dsi0:
- mipi_dsi_detach(lt9611uxc->dsi0);
- mipi_dsi_device_unregister(lt9611uxc->dsi0);
-
- return ret;
}
static enum drm_mode_status
@@ -544,7 +511,6 @@ static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge,
static const struct drm_bridge_funcs lt9611uxc_bridge_funcs = {
.attach = lt9611uxc_bridge_attach,
- .detach = lt9611uxc_bridge_detach,
.mode_valid = lt9611uxc_bridge_mode_valid,
.mode_set = lt9611uxc_bridge_mode_set,
.detect = lt9611uxc_bridge_detect,
@@ -980,8 +946,27 @@ retry:
drm_bridge_add(&lt9611uxc->bridge);
+ /* Attach primary DSI */
+ lt9611uxc->dsi0 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi0_node);
+ if (IS_ERR(lt9611uxc->dsi0)) {
+ ret = PTR_ERR(lt9611uxc->dsi0);
+ goto err_remove_bridge;
+ }
+
+ /* Attach secondary DSI, if specified */
+ if (lt9611uxc->dsi1_node) {
+ lt9611uxc->dsi1 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi1_node);
+ if (IS_ERR(lt9611uxc->dsi1)) {
+ ret = PTR_ERR(lt9611uxc->dsi1);
+ goto err_remove_bridge;
+ }
+ }
+
return lt9611uxc_audio_init(dev, lt9611uxc);
+err_remove_bridge:
+ drm_bridge_remove(&lt9611uxc->bridge);
+
err_disable_regulators:
regulator_bulk_disable(ARRAY_SIZE(lt9611uxc->supplies), lt9611uxc->supplies);
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index ad460b96c0a3..702ea803a743 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -14,12 +14,14 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_of.h>
#include <drm/drm_panel.h>
struct lvds_codec {
struct device *dev;
struct drm_bridge bridge;
struct drm_bridge *panel_bridge;
+ struct drm_bridge_timings timings;
struct regulator *vcc;
struct gpio_desc *powerdown_gpio;
u32 connector_type;
@@ -118,7 +120,7 @@ static int lvds_codec_probe(struct platform_device *pdev)
struct device_node *bus_node;
struct drm_panel *panel;
struct lvds_codec *lvds_codec;
- const char *mapping;
+ u32 val;
int ret;
lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL);
@@ -174,32 +176,38 @@ static int lvds_codec_probe(struct platform_device *pdev)
return -ENXIO;
}
- ret = of_property_read_string(bus_node, "data-mapping",
- &mapping);
+ ret = drm_of_lvds_get_data_mapping(bus_node);
of_node_put(bus_node);
- if (ret < 0) {
+ if (ret == -ENODEV) {
dev_warn(dev, "missing 'data-mapping' DT property\n");
+ } else if (ret) {
+ dev_err(dev, "invalid 'data-mapping' DT property\n");
+ return ret;
} else {
- if (!strcmp(mapping, "jeida-18")) {
- lvds_codec->bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG;
- } else if (!strcmp(mapping, "jeida-24")) {
- lvds_codec->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA;
- } else if (!strcmp(mapping, "vesa-24")) {
- lvds_codec->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG;
- } else {
- dev_err(dev, "invalid 'data-mapping' DT property\n");
- return -EINVAL;
- }
+ lvds_codec->bus_format = ret;
lvds_codec->bridge.funcs = &funcs_decoder;
}
}
/*
+ * Encoder might sample data on different clock edge than the display,
+ * for example OnSemi FIN3385 has a dedicated strapping pin to select
+ * the sampling edge.
+ */
+ if (lvds_codec->connector_type == DRM_MODE_CONNECTOR_LVDS &&
+ !of_property_read_u32(dev->of_node, "pclk-sample", &val)) {
+ lvds_codec->timings.input_bus_flags = val ?
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE :
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
+ }
+
+ /*
* The panel_bridge bridge is attached to the panel's of_node,
* but we need a bridge attached to our of_node for our user
* to look up.
*/
lvds_codec->bridge.of_node = dev->of_node;
+ lvds_codec->bridge.timings = &lvds_codec->timings;
drm_bridge_add(&lvds_codec->bridge);
platform_set_drvdata(pdev, lvds_codec);
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index d2808c4a6fb1..cce98bf2a4e7 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -306,19 +306,10 @@ out:
mutex_unlock(&ge_b850v3_lvds_dev_mutex);
}
-static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c,
- const struct i2c_device_id *id)
+static int ge_b850v3_register(void)
{
+ struct i2c_client *stdp4028_i2c = ge_b850v3_lvds_ptr->stdp4028_i2c;
struct device *dev = &stdp4028_i2c->dev;
- int ret;
-
- ret = ge_b850v3_lvds_init(dev);
-
- if (ret)
- return ret;
-
- ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c;
- i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr);
/* drm bridge initialization */
ge_b850v3_lvds_ptr->bridge.funcs = &ge_b850v3_lvds_funcs;
@@ -343,6 +334,27 @@ static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c,
"ge-b850v3-lvds-dp", ge_b850v3_lvds_ptr);
}
+static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &stdp4028_i2c->dev;
+ int ret;
+
+ ret = ge_b850v3_lvds_init(dev);
+
+ if (ret)
+ return ret;
+
+ ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c;
+ i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr);
+
+ /* Only register after both bridges are probed */
+ if (!ge_b850v3_lvds_ptr->stdp2690_i2c)
+ return 0;
+
+ return ge_b850v3_register();
+}
+
static int stdp4028_ge_b850v3_fw_remove(struct i2c_client *stdp4028_i2c)
{
ge_b850v3_lvds_remove();
@@ -386,7 +398,11 @@ static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c,
ge_b850v3_lvds_ptr->stdp2690_i2c = stdp2690_i2c;
i2c_set_clientdata(stdp2690_i2c, ge_b850v3_lvds_ptr);
- return 0;
+ /* Only register after both bridges are probed */
+ if (!ge_b850v3_lvds_ptr->stdp4028_i2c)
+ return 0;
+
+ return ge_b850v3_register();
}
static int stdp2690_ge_b850v3_fw_remove(struct i2c_client *stdp2690_i2c)
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 3aaa90913bf8..818704bf5e86 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -9,10 +9,12 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of_graph.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_dp_aux_bus.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
@@ -100,7 +102,7 @@ struct ps8640 {
struct regulator_bulk_data supplies[2];
struct gpio_desc *gpio_reset;
struct gpio_desc *gpio_powerdown;
- bool powered;
+ bool pre_enabled;
};
static const struct regmap_config ps8640_regmap_config[] = {
@@ -148,8 +150,46 @@ static inline struct ps8640 *aux_to_ps8640(struct drm_dp_aux *aux)
return container_of(aux, struct ps8640, aux);
}
-static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux,
- struct drm_dp_aux_msg *msg)
+static bool ps8640_of_panel_on_aux_bus(struct device *dev)
+{
+ struct device_node *bus, *panel;
+
+ bus = of_get_child_by_name(dev->of_node, "aux-bus");
+ if (!bus)
+ return false;
+
+ panel = of_get_child_by_name(bus, "panel");
+ of_node_put(bus);
+ if (!panel)
+ return false;
+ of_node_put(panel);
+
+ return true;
+}
+
+static int ps8640_ensure_hpd(struct ps8640 *ps_bridge)
+{
+ struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL];
+ struct device *dev = &ps_bridge->page[PAGE2_TOP_CNTL]->dev;
+ int status;
+ int ret;
+
+ /*
+ * Apparently something about the firmware in the chip signals that
+ * HPD goes high by reporting GPIO9 as high (even though HPD isn't
+ * actually connected to GPIO9).
+ */
+ ret = regmap_read_poll_timeout(map, PAGE2_GPIO_H, status,
+ status & PS_GPIO9, 20 * 1000, 200 * 1000);
+
+ if (ret < 0)
+ dev_warn(dev, "HPD didn't go high: %d\n", ret);
+
+ return ret;
+}
+
+static ssize_t ps8640_aux_transfer_msg(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
{
struct ps8640 *ps_bridge = aux_to_ps8640(aux);
struct regmap *map = ps_bridge->regmap[PAGE0_DP_CNTL];
@@ -274,38 +314,49 @@ static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux,
return len;
}
-static int ps8640_bridge_vdo_control(struct ps8640 *ps_bridge,
- const enum ps8640_vdo_control ctrl)
+static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct ps8640 *ps_bridge = aux_to_ps8640(aux);
+ struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev;
+ int ret;
+
+ pm_runtime_get_sync(dev);
+ ret = ps8640_ensure_hpd(ps_bridge);
+ if (!ret)
+ ret = ps8640_aux_transfer_msg(aux, msg);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static void ps8640_bridge_vdo_control(struct ps8640 *ps_bridge,
+ const enum ps8640_vdo_control ctrl)
{
struct regmap *map = ps_bridge->regmap[PAGE3_DSI_CNTL1];
+ struct device *dev = &ps_bridge->page[PAGE3_DSI_CNTL1]->dev;
u8 vdo_ctrl_buf[] = { VDO_CTL_ADD, ctrl };
int ret;
ret = regmap_bulk_write(map, PAGE3_SET_ADD,
vdo_ctrl_buf, sizeof(vdo_ctrl_buf));
- if (ret < 0) {
- DRM_ERROR("failed to %sable VDO: %d\n",
- ctrl == ENABLE ? "en" : "dis", ret);
- return ret;
- }
-
- return 0;
+ if (ret < 0)
+ dev_err(dev, "failed to %sable VDO: %d\n",
+ ctrl == ENABLE ? "en" : "dis", ret);
}
-static void ps8640_bridge_poweron(struct ps8640 *ps_bridge)
+static int __maybe_unused ps8640_resume(struct device *dev)
{
- struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL];
- int ret, status;
-
- if (ps_bridge->powered)
- return;
+ struct ps8640 *ps_bridge = dev_get_drvdata(dev);
+ int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ps_bridge->supplies),
ps_bridge->supplies);
if (ret < 0) {
- DRM_ERROR("cannot enable regulators %d\n", ret);
- return;
+ dev_err(dev, "cannot enable regulators %d\n", ret);
+ return ret;
}
gpiod_set_value(ps_bridge->gpio_powerdown, 0);
@@ -314,86 +365,78 @@ static void ps8640_bridge_poweron(struct ps8640 *ps_bridge)
gpiod_set_value(ps_bridge->gpio_reset, 0);
/*
- * Wait for the ps8640 embedded MCU to be ready
- * First wait 200ms and then check the MCU ready flag every 20ms
+ * Mystery 200 ms delay for the "MCU to be ready". It's unclear if
+ * this is truly necessary since the MCU will already signal that
+ * things are "good to go" by signaling HPD on "gpio 9". See
+ * ps8640_ensure_hpd(). For now we'll keep this mystery delay just in
+ * case.
*/
msleep(200);
- ret = regmap_read_poll_timeout(map, PAGE2_GPIO_H, status,
- status & PS_GPIO9, 20 * 1000, 200 * 1000);
-
- if (ret < 0) {
- DRM_ERROR("failed read PAGE2_GPIO_H: %d\n", ret);
- goto err_regulators_disable;
- }
-
- msleep(50);
-
- /*
- * The Manufacturer Command Set (MCS) is a device dependent interface
- * intended for factory programming of the display module default
- * parameters. Once the display module is configured, the MCS shall be
- * disabled by the manufacturer. Once disabled, all MCS commands are
- * ignored by the display interface.
- */
-
- ret = regmap_update_bits(map, PAGE2_MCS_EN, MCS_EN, 0);
- if (ret < 0) {
- DRM_ERROR("failed write PAGE2_MCS_EN: %d\n", ret);
- goto err_regulators_disable;
- }
-
- /* Switch access edp panel's edid through i2c */
- ret = regmap_write(map, PAGE2_I2C_BYPASS, I2C_BYPASS_EN);
- if (ret < 0) {
- DRM_ERROR("failed write PAGE2_I2C_BYPASS: %d\n", ret);
- goto err_regulators_disable;
- }
-
- ps_bridge->powered = true;
-
- return;
-
-err_regulators_disable:
- regulator_bulk_disable(ARRAY_SIZE(ps_bridge->supplies),
- ps_bridge->supplies);
+ return 0;
}
-static void ps8640_bridge_poweroff(struct ps8640 *ps_bridge)
+static int __maybe_unused ps8640_suspend(struct device *dev)
{
+ struct ps8640 *ps_bridge = dev_get_drvdata(dev);
int ret;
- if (!ps_bridge->powered)
- return;
-
gpiod_set_value(ps_bridge->gpio_reset, 1);
gpiod_set_value(ps_bridge->gpio_powerdown, 1);
ret = regulator_bulk_disable(ARRAY_SIZE(ps_bridge->supplies),
ps_bridge->supplies);
if (ret < 0)
- DRM_ERROR("cannot disable regulators %d\n", ret);
+ dev_err(dev, "cannot disable regulators %d\n", ret);
- ps_bridge->powered = false;
+ return ret;
}
+static const struct dev_pm_ops ps8640_pm_ops = {
+ SET_RUNTIME_PM_OPS(ps8640_suspend, ps8640_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
static void ps8640_pre_enable(struct drm_bridge *bridge)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+ struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL];
+ struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev;
int ret;
- ps8640_bridge_poweron(ps_bridge);
+ pm_runtime_get_sync(dev);
+ ps8640_ensure_hpd(ps_bridge);
- ret = ps8640_bridge_vdo_control(ps_bridge, ENABLE);
+ /*
+ * The Manufacturer Command Set (MCS) is a device dependent interface
+ * intended for factory programming of the display module default
+ * parameters. Once the display module is configured, the MCS shall be
+ * disabled by the manufacturer. Once disabled, all MCS commands are
+ * ignored by the display interface.
+ */
+
+ ret = regmap_update_bits(map, PAGE2_MCS_EN, MCS_EN, 0);
if (ret < 0)
- ps8640_bridge_poweroff(ps_bridge);
+ dev_warn(dev, "failed write PAGE2_MCS_EN: %d\n", ret);
+
+ /* Switch access edp panel's edid through i2c */
+ ret = regmap_write(map, PAGE2_I2C_BYPASS, I2C_BYPASS_EN);
+ if (ret < 0)
+ dev_warn(dev, "failed write PAGE2_MCS_EN: %d\n", ret);
+
+ ps8640_bridge_vdo_control(ps_bridge, ENABLE);
+
+ ps_bridge->pre_enabled = true;
}
static void ps8640_post_disable(struct drm_bridge *bridge)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+ ps_bridge->pre_enabled = false;
+
ps8640_bridge_vdo_control(ps_bridge, DISABLE);
- ps8640_bridge_poweroff(ps_bridge);
+ pm_runtime_put_sync_suspend(&ps_bridge->page[PAGE0_DP_CNTL]->dev);
}
static int ps8640_bridge_attach(struct drm_bridge *bridge,
@@ -401,68 +444,21 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge,
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
struct device *dev = &ps_bridge->page[0]->dev;
- struct device_node *in_ep, *dsi_node;
- struct mipi_dsi_device *dsi;
- struct mipi_dsi_host *host;
int ret;
- const struct mipi_dsi_device_info info = { .type = "ps8640",
- .channel = 0,
- .node = NULL,
- };
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- /* port@0 is ps8640 dsi input port */
- in_ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
- if (!in_ep)
- return -ENODEV;
-
- dsi_node = of_graph_get_remote_port_parent(in_ep);
- of_node_put(in_ep);
- if (!dsi_node)
- return -ENODEV;
-
- host = of_find_mipi_dsi_host_by_node(dsi_node);
- of_node_put(dsi_node);
- if (!host)
- return -ENODEV;
-
- dsi = mipi_dsi_device_register_full(host, &info);
- if (IS_ERR(dsi)) {
- dev_err(dev, "failed to create dsi device\n");
- ret = PTR_ERR(dsi);
- return ret;
- }
-
- ps_bridge->dsi = dsi;
-
- dsi->host = host;
- dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
- MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
- dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->lanes = NUM_MIPI_LANES;
- ret = mipi_dsi_attach(dsi);
- if (ret) {
- dev_err(dev, "failed to attach dsi device: %d\n", ret);
- goto err_dsi_attach;
- }
-
+ ps_bridge->aux.drm_dev = bridge->dev;
ret = drm_dp_aux_register(&ps_bridge->aux);
if (ret) {
dev_err(dev, "failed to register DP AUX channel: %d\n", ret);
- goto err_aux_register;
+ return ret;
}
/* Attach the panel-bridge to the dsi bridge */
return drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge,
&ps_bridge->bridge, flags);
-
-err_aux_register:
- mipi_dsi_detach(dsi);
-err_dsi_attach:
- mipi_dsi_device_unregister(dsi);
- return ret;
}
static void ps8640_bridge_detach(struct drm_bridge *bridge)
@@ -474,7 +470,7 @@ static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge,
struct drm_connector *connector)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
- bool poweroff = !ps_bridge->powered;
+ bool poweroff = !ps_bridge->pre_enabled;
struct edid *edid;
/*
@@ -504,6 +500,12 @@ static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge,
return edid;
}
+static void ps8640_runtime_disable(void *data)
+{
+ pm_runtime_dont_use_autosuspend(data);
+ pm_runtime_disable(data);
+}
+
static const struct drm_bridge_funcs ps8640_bridge_funcs = {
.attach = ps8640_bridge_attach,
.detach = ps8640_bridge_detach,
@@ -512,6 +514,53 @@ static const struct drm_bridge_funcs ps8640_bridge_funcs = {
.pre_enable = ps8640_pre_enable,
};
+static int ps8640_bridge_host_attach(struct device *dev, struct ps8640 *ps_bridge)
+{
+ struct device_node *in_ep, *dsi_node;
+ struct mipi_dsi_device *dsi;
+ struct mipi_dsi_host *host;
+ int ret;
+ const struct mipi_dsi_device_info info = { .type = "ps8640",
+ .channel = 0,
+ .node = NULL,
+ };
+
+ /* port@0 is ps8640 dsi input port */
+ in_ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
+ if (!in_ep)
+ return -ENODEV;
+
+ dsi_node = of_graph_get_remote_port_parent(in_ep);
+ of_node_put(in_ep);
+ if (!dsi_node)
+ return -ENODEV;
+
+ host = of_find_mipi_dsi_host_by_node(dsi_node);
+ of_node_put(dsi_node);
+ if (!host)
+ return -EPROBE_DEFER;
+
+ dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ if (IS_ERR(dsi)) {
+ dev_err(dev, "failed to create dsi device\n");
+ return PTR_ERR(dsi);
+ }
+
+ ps_bridge->dsi = dsi;
+
+ dsi->host = host;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = NUM_MIPI_LANES;
+
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int ps8640_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -525,17 +574,6 @@ static int ps8640_probe(struct i2c_client *client)
if (!ps_bridge)
return -ENOMEM;
- /* port@1 is ps8640 output port */
- ret = drm_of_find_panel_or_bridge(np, 1, 0, &panel, NULL);
- if (ret < 0)
- return ret;
- if (!panel)
- return -ENODEV;
-
- ps_bridge->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
- if (IS_ERR(ps_bridge->panel_bridge))
- return PTR_ERR(ps_bridge->panel_bridge);
-
ps_bridge->supplies[0].supply = "vdd33";
ps_bridge->supplies[1].supply = "vdd12";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies),
@@ -558,9 +596,16 @@ static int ps8640_probe(struct i2c_client *client)
ps_bridge->bridge.funcs = &ps8640_bridge_funcs;
ps_bridge->bridge.of_node = dev->of_node;
- ps_bridge->bridge.ops = DRM_BRIDGE_OP_EDID;
ps_bridge->bridge.type = DRM_MODE_CONNECTOR_eDP;
+ /*
+ * In the device tree, if panel is listed under aux-bus of the bridge
+ * node, panel driver should be able to retrieve EDID by itself using
+ * aux-bus. So let's not set DRM_BRIDGE_OP_EDID here.
+ */
+ if (!ps8640_of_panel_on_aux_bus(&client->dev))
+ ps_bridge->bridge.ops = DRM_BRIDGE_OP_EDID;
+
ps_bridge->page[PAGE0_DP_CNTL] = client;
ps_bridge->regmap[PAGE0_DP_CNTL] = devm_regmap_init_i2c(client, ps8640_regmap_config);
@@ -587,9 +632,46 @@ static int ps8640_probe(struct i2c_client *client)
ps_bridge->aux.transfer = ps8640_aux_transfer;
drm_dp_aux_init(&ps_bridge->aux);
+ pm_runtime_enable(dev);
+ /*
+ * Powering on ps8640 takes ~300ms. To avoid wasting time on power
+ * cycling ps8640 too often, set autosuspend_delay to 1000ms to ensure
+ * the bridge wouldn't suspend in between each _aux_transfer_msg() call
+ * during EDID read (~20ms in my experiment) and in between the last
+ * _aux_transfer_msg() call during EDID read and the _pre_enable() call
+ * (~100ms in my experiment).
+ */
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
+ pm_suspend_ignore_children(dev, true);
+ ret = devm_add_action_or_reset(dev, ps8640_runtime_disable, dev);
+ if (ret)
+ return ret;
+
+ devm_of_dp_aux_populate_ep_devices(&ps_bridge->aux);
+
+ /* port@1 is ps8640 output port */
+ ret = drm_of_find_panel_or_bridge(np, 1, 0, &panel, NULL);
+ if (ret < 0)
+ return ret;
+ if (!panel)
+ return -ENODEV;
+
+ ps_bridge->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(ps_bridge->panel_bridge))
+ return PTR_ERR(ps_bridge->panel_bridge);
+
drm_bridge_add(&ps_bridge->bridge);
+ ret = ps8640_bridge_host_attach(dev, ps_bridge);
+ if (ret)
+ goto err_bridge_remove;
+
return 0;
+
+err_bridge_remove:
+ drm_bridge_remove(&ps_bridge->bridge);
+ return ret;
}
static int ps8640_remove(struct i2c_client *client)
@@ -613,6 +695,7 @@ static struct i2c_driver ps8640_driver = {
.driver = {
.name = "ps8640",
.of_match_table = ps8640_match,
+ .pm = &ps8640_pm_ops,
},
};
module_i2c_driver(ps8640_driver);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
index d0db1acf11d7..7d2ed0ed2fe2 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
@@ -320,13 +320,17 @@ static int dw_hdmi_open(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_dw_hdmi *dw = substream->private_data;
void __iomem *base = dw->data.base;
+ u8 *eld;
int ret;
runtime->hw = dw_hdmi_hw;
- ret = snd_pcm_hw_constraint_eld(runtime, dw->data.eld);
- if (ret < 0)
- return ret;
+ eld = dw->data.get_eld(dw->data.hdmi);
+ if (eld) {
+ ret = snd_pcm_hw_constraint_eld(runtime, eld);
+ if (ret < 0)
+ return ret;
+ }
ret = snd_pcm_limit_hw_rates(runtime);
if (ret < 0)
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h
index cb07dc0da5a7..f72d27208ebe 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h
@@ -9,15 +9,15 @@ struct dw_hdmi_audio_data {
void __iomem *base;
int irq;
struct dw_hdmi *hdmi;
- u8 *eld;
+ u8 *(*get_eld)(struct dw_hdmi *hdmi);
};
struct dw_hdmi_i2s_audio_data {
struct dw_hdmi *hdmi;
- u8 *eld;
void (*write)(struct dw_hdmi *hdmi, u8 val, int offset);
u8 (*read)(struct dw_hdmi *hdmi, int offset);
+ u8 *(*get_eld)(struct dw_hdmi *hdmi);
};
#endif
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index feb04f127b55..f50b47ac11a8 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -135,8 +135,15 @@ static int dw_hdmi_i2s_get_eld(struct device *dev, void *data, uint8_t *buf,
size_t len)
{
struct dw_hdmi_i2s_audio_data *audio = data;
+ u8 *eld;
+
+ eld = audio->get_eld(audio->hdmi);
+ if (eld)
+ memcpy(buf, eld, min_t(size_t, MAX_ELD_BYTES, len));
+ else
+ /* Pass en empty ELD if connector not available */
+ memset(buf, 0, len);
- memcpy(buf, audio->eld, min_t(size_t, MAX_ELD_BYTES, len));
return 0;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index f08d0fded61f..54d8fdad395f 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -757,6 +757,14 @@ static void hdmi_enable_audio_clk(struct dw_hdmi *hdmi, bool enable)
hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
}
+static u8 *hdmi_audio_get_eld(struct dw_hdmi *hdmi)
+{
+ if (!hdmi->curr_conn)
+ return NULL;
+
+ return hdmi->curr_conn->eld;
+}
+
static void dw_hdmi_ahb_audio_enable(struct dw_hdmi *hdmi)
{
hdmi_set_cts_n(hdmi, hdmi->audio_cts, hdmi->audio_n);
@@ -3413,6 +3421,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
hdmi->bridge.funcs = &dw_hdmi_bridge_funcs;
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
+ hdmi->bridge.interlace_allowed = true;
#ifdef CONFIG_OF
hdmi->bridge.of_node = pdev->dev.of_node;
#endif
@@ -3431,7 +3440,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
audio.base = hdmi->regs;
audio.irq = irq;
audio.hdmi = hdmi;
- audio.eld = hdmi->connector.eld;
+ audio.get_eld = hdmi_audio_get_eld;
hdmi->enable_audio = dw_hdmi_ahb_audio_enable;
hdmi->disable_audio = dw_hdmi_ahb_audio_disable;
@@ -3444,7 +3453,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
struct dw_hdmi_i2s_audio_data audio;
audio.hdmi = hdmi;
- audio.eld = hdmi->connector.eld;
+ audio.get_eld = hdmi_audio_get_eld;
audio.write = hdmi_writeb;
audio.read = hdmi_readb;
hdmi->enable_audio = dw_hdmi_i2s_audio_enable;
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index a3db532bbdd1..fd585bf925fe 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -237,6 +237,10 @@ static void tc358768_hw_enable(struct tc358768_priv *priv)
if (priv->enabled)
return;
+ ret = clk_prepare_enable(priv->refclk);
+ if (ret < 0)
+ dev_err(priv->dev, "error enabling refclk (%d)\n", ret);
+
ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies), priv->supplies);
if (ret < 0)
dev_err(priv->dev, "error enabling regulators (%d)\n", ret);
@@ -274,6 +278,8 @@ static void tc358768_hw_disable(struct tc358768_priv *priv)
if (ret < 0)
dev_err(priv->dev, "error disabling regulators (%d)\n", ret);
+ clk_disable_unprepare(priv->refclk);
+
priv->enabled = false;
}
@@ -625,12 +631,19 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
struct mipi_dsi_device *dsi_dev = priv->output.dev;
+ unsigned long mode_flags = dsi_dev->mode_flags;
u32 val, val2, lptxcnt, hact, data_type;
const struct drm_display_mode *mode;
u32 dsibclk_nsk, dsiclk_nsk, ui_nsk, phy_delay_nsk;
- u32 dsiclk, dsibclk;
+ u32 dsiclk, dsibclk, video_start;
+ const u32 internal_delay = 40;
int ret, i;
+ if (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
+ dev_warn_once(priv->dev, "Non-continuous mode unimplemented, falling back to continuous\n");
+ mode_flags &= ~MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ }
+
tc358768_hw_enable(priv);
ret = tc358768_sw_reset(priv);
@@ -657,23 +670,27 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
case MIPI_DSI_FMT_RGB888:
val |= (0x3 << 4);
hact = mode->hdisplay * 3;
+ video_start = (mode->htotal - mode->hsync_start) * 3;
data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
break;
case MIPI_DSI_FMT_RGB666:
val |= (0x4 << 4);
hact = mode->hdisplay * 3;
+ video_start = (mode->htotal - mode->hsync_start) * 3;
data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
val |= (0x4 << 4) | BIT(3);
hact = mode->hdisplay * 18 / 8;
+ video_start = (mode->htotal - mode->hsync_start) * 18 / 8;
data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
break;
case MIPI_DSI_FMT_RGB565:
val |= (0x5 << 4);
hact = mode->hdisplay * 2;
+ video_start = (mode->htotal - mode->hsync_start) * 2;
data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
break;
default:
@@ -684,7 +701,8 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
}
/* VSDly[9:0] */
- tc358768_write(priv, TC358768_VSDLY, 1);
+ video_start = max(video_start, internal_delay + 1) - internal_delay;
+ tc358768_write(priv, TC358768_VSDLY, video_start);
tc358768_write(priv, TC358768_DATAFMT, val);
tc358768_write(priv, TC358768_DSITX_DT, data_type);
@@ -764,7 +782,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
val |= BIT(i + 1);
tc358768_write(priv, TC358768_HSTXVREGEN, val);
- if (!(dsi_dev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+ if (!(mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
tc358768_write(priv, TC358768_TXOPTIONCNTRL, 0x1);
/* TXTAGOCNT[26:16] RXTASURECNT[10:0] */
@@ -772,31 +790,61 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
val2 = tc358768_ns_to_cnt(tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk),
dsibclk_nsk) - 2;
- val |= val2 << 16;
+ val = val << 16 | val2;
dev_dbg(priv->dev, "BTACNTRL1: 0x%x\n", val);
tc358768_write(priv, TC358768_BTACNTRL1, val);
/* START[0] */
tc358768_write(priv, TC358768_STARTCNTRL, 1);
- /* Set event mode */
- tc358768_write(priv, TC358768_DSI_EVENT, 1);
-
- /* vsw (+ vbp) */
- tc358768_write(priv, TC358768_DSI_VSW,
- mode->vtotal - mode->vsync_start);
- /* vbp (not used in event mode) */
- tc358768_write(priv, TC358768_DSI_VBPR, 0);
- /* vact */
- tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
-
- /* (hsw + hbp) * byteclk * ndl / pclk */
- val = (u32)div_u64((mode->htotal - mode->hsync_start) *
- ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
- mode->clock * 1000);
- tc358768_write(priv, TC358768_DSI_HSW, val);
- /* hbp (not used in event mode) */
- tc358768_write(priv, TC358768_DSI_HBPR, 0);
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
+ /* Set pulse mode */
+ tc358768_write(priv, TC358768_DSI_EVENT, 0);
+
+ /* vact */
+ tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
+
+ /* vsw */
+ tc358768_write(priv, TC358768_DSI_VSW,
+ mode->vsync_end - mode->vsync_start);
+ /* vbp */
+ tc358768_write(priv, TC358768_DSI_VBPR,
+ mode->vtotal - mode->vsync_end);
+
+ /* hsw * byteclk * ndl / pclk */
+ val = (u32)div_u64((mode->hsync_end - mode->hsync_start) *
+ ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+ mode->clock * 1000);
+ tc358768_write(priv, TC358768_DSI_HSW, val);
+
+ /* hbp * byteclk * ndl / pclk */
+ val = (u32)div_u64((mode->htotal - mode->hsync_end) *
+ ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+ mode->clock * 1000);
+ tc358768_write(priv, TC358768_DSI_HBPR, val);
+ } else {
+ /* Set event mode */
+ tc358768_write(priv, TC358768_DSI_EVENT, 1);
+
+ /* vact */
+ tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
+
+ /* vsw (+ vbp) */
+ tc358768_write(priv, TC358768_DSI_VSW,
+ mode->vtotal - mode->vsync_start);
+ /* vbp (not used in event mode) */
+ tc358768_write(priv, TC358768_DSI_VBPR, 0);
+
+ /* (hsw + hbp) * byteclk * ndl / pclk */
+ val = (u32)div_u64((mode->htotal - mode->hsync_start) *
+ ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+ mode->clock * 1000);
+ tc358768_write(priv, TC358768_DSI_HSW, val);
+
+ /* hbp (not used in event mode) */
+ tc358768_write(priv, TC358768_DSI_HBPR, 0);
+ }
+
/* hact (bytes) */
tc358768_write(priv, TC358768_DSI_HACT, hact);
@@ -822,7 +870,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
if (!(dsi_dev->mode_flags & MIPI_DSI_MODE_LPM))
val |= TC358768_DSI_CONTROL_TXMD;
- if (!(dsi_dev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+ if (!(mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
val |= TC358768_DSI_CONTROL_HSCKMD;
if (dsi_dev->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index 2272adcc5b4a..2c76331b251d 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -594,11 +594,26 @@ static int tc_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct tc_data *tc = bridge_to_tc(bridge);
+
+ /* Attach the panel-bridge to the dsi bridge */
+ return drm_bridge_attach(bridge->encoder, tc->panel_bridge,
+ &tc->bridge, flags);
+}
+
+static const struct drm_bridge_funcs tc_bridge_funcs = {
+ .attach = tc_bridge_attach,
+ .pre_enable = tc_bridge_pre_enable,
+ .enable = tc_bridge_enable,
+ .mode_valid = tc_mode_valid,
+ .post_disable = tc_bridge_post_disable,
+};
+
+static int tc_attach_host(struct tc_data *tc)
+{
struct device *dev = &tc->i2c->dev;
struct mipi_dsi_host *host;
struct mipi_dsi_device *dsi;
int ret;
-
const struct mipi_dsi_device_info info = { .type = "tc358775",
.channel = 0,
.node = NULL,
@@ -610,11 +625,10 @@ static int tc_bridge_attach(struct drm_bridge *bridge,
return -EPROBE_DEFER;
}
- dsi = mipi_dsi_device_register_full(host, &info);
+ dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi)) {
dev_err(dev, "failed to create dsi device\n");
- ret = PTR_ERR(dsi);
- goto err_dsi_device;
+ return PTR_ERR(dsi);
}
tc->dsi = dsi;
@@ -623,29 +637,15 @@ static int tc_bridge_attach(struct drm_bridge *bridge,
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO;
- ret = mipi_dsi_attach(dsi);
+ ret = devm_mipi_dsi_attach(dev, dsi);
if (ret < 0) {
dev_err(dev, "failed to attach dsi to host\n");
- goto err_dsi_attach;
+ return ret;
}
- /* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, tc->panel_bridge,
- &tc->bridge, flags);
-err_dsi_attach:
- mipi_dsi_device_unregister(dsi);
-err_dsi_device:
- return ret;
+ return 0;
}
-static const struct drm_bridge_funcs tc_bridge_funcs = {
- .attach = tc_bridge_attach,
- .pre_enable = tc_bridge_pre_enable,
- .enable = tc_bridge_enable,
- .mode_valid = tc_mode_valid,
- .post_disable = tc_bridge_post_disable,
-};
-
static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
@@ -709,7 +709,15 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
i2c_set_clientdata(client, tc);
+ ret = tc_attach_host(tc);
+ if (ret)
+ goto err_bridge_remove;
+
return 0;
+
+err_bridge_remove:
+ drm_bridge_remove(&tc->bridge);
+ return ret;
}
static int tc_remove(struct i2c_client *client)
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index ba1160ec6d6e..945f08de45f1 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -245,47 +245,9 @@ static int sn65dsi83_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
- struct device *dev = ctx->dev;
- struct mipi_dsi_device *dsi;
- struct mipi_dsi_host *host;
- int ret = 0;
-
- const struct mipi_dsi_device_info info = {
- .type = "sn65dsi83",
- .channel = 0,
- .node = NULL,
- };
-
- host = of_find_mipi_dsi_host_by_node(ctx->host_node);
- if (!host) {
- dev_err(dev, "failed to find dsi host\n");
- return -EPROBE_DEFER;
- }
-
- dsi = mipi_dsi_device_register_full(host, &info);
- if (IS_ERR(dsi)) {
- return dev_err_probe(dev, PTR_ERR(dsi),
- "failed to create dsi device\n");
- }
-
- ctx->dsi = dsi;
-
- dsi->lanes = ctx->dsi_lanes;
- dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST;
-
- ret = mipi_dsi_attach(dsi);
- if (ret < 0) {
- dev_err(dev, "failed to attach dsi to host\n");
- goto err_dsi_attach;
- }
return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
&ctx->bridge, flags);
-
-err_dsi_attach:
- mipi_dsi_device_unregister(dsi);
- return ret;
}
static void sn65dsi83_detach(struct drm_bridge *bridge)
@@ -295,28 +257,9 @@ static void sn65dsi83_detach(struct drm_bridge *bridge)
if (!ctx->dsi)
return;
- mipi_dsi_detach(ctx->dsi);
- mipi_dsi_device_unregister(ctx->dsi);
- drm_bridge_remove(&ctx->bridge);
ctx->dsi = NULL;
}
-static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
-{
- struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
-
- /*
- * Reset the chip, pull EN line low for t_reset=10ms,
- * then high for t_en=1ms.
- */
- regcache_mark_dirty(ctx->regmap);
- gpiod_set_value(ctx->enable_gpio, 0);
- usleep_range(10000, 11000);
- gpiod_set_value(ctx->enable_gpio, 1);
- usleep_range(1000, 1100);
-}
-
static u8 sn65dsi83_get_lvds_range(struct sn65dsi83 *ctx,
const struct drm_display_mode *mode)
{
@@ -394,6 +337,10 @@ static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
u16 val;
int ret;
+ /* Deassert reset */
+ gpiod_set_value(ctx->enable_gpio, 1);
+ usleep_range(1000, 1100);
+
/* Get the LVDS format from the bridge state. */
bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
@@ -540,18 +487,11 @@ static void sn65dsi83_atomic_disable(struct drm_bridge *bridge,
{
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
- /* Clear reset, disable PLL */
- regmap_write(ctx->regmap, REG_RC_RESET, 0x00);
- regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00);
-}
-
-static void sn65dsi83_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
-{
- struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
-
- /* Put the chip in reset, pull EN line low. */
+ /* Put the chip in reset, pull EN line low, and assure 10ms reset low timing. */
gpiod_set_value(ctx->enable_gpio, 0);
+ usleep_range(10000, 11000);
+
+ regcache_mark_dirty(ctx->regmap);
}
static enum drm_mode_status
@@ -597,10 +537,8 @@ sn65dsi83_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
static const struct drm_bridge_funcs sn65dsi83_funcs = {
.attach = sn65dsi83_attach,
.detach = sn65dsi83_detach,
- .atomic_pre_enable = sn65dsi83_atomic_pre_enable,
.atomic_enable = sn65dsi83_atomic_enable,
.atomic_disable = sn65dsi83_atomic_disable,
- .atomic_post_disable = sn65dsi83_atomic_post_disable,
.mode_valid = sn65dsi83_mode_valid,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
@@ -664,6 +602,44 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
return 0;
}
+static int sn65dsi83_host_attach(struct sn65dsi83 *ctx)
+{
+ struct device *dev = ctx->dev;
+ struct mipi_dsi_device *dsi;
+ struct mipi_dsi_host *host;
+ const struct mipi_dsi_device_info info = {
+ .type = "sn65dsi83",
+ .channel = 0,
+ .node = NULL,
+ };
+ int ret;
+
+ host = of_find_mipi_dsi_host_by_node(ctx->host_node);
+ if (!host) {
+ dev_err(dev, "failed to find dsi host\n");
+ return -EPROBE_DEFER;
+ }
+
+ dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ if (IS_ERR(dsi))
+ return dev_err_probe(dev, PTR_ERR(dsi),
+ "failed to create dsi device\n");
+
+ ctx->dsi = dsi;
+
+ dsi->lanes = ctx->dsi_lanes;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST;
+
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret < 0) {
+ dev_err(dev, "failed to attach dsi to host: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int sn65dsi83_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -685,10 +661,13 @@ static int sn65dsi83_probe(struct i2c_client *client,
model = id->driver_data;
}
+ /* Put the chip in reset, pull EN line low, and assure 10ms reset low timing. */
ctx->enable_gpio = devm_gpiod_get(ctx->dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(ctx->enable_gpio))
return PTR_ERR(ctx->enable_gpio);
+ usleep_range(10000, 11000);
+
ret = sn65dsi83_parse_dt(ctx, model);
if (ret)
return ret;
@@ -704,13 +683,22 @@ static int sn65dsi83_probe(struct i2c_client *client,
ctx->bridge.of_node = dev->of_node;
drm_bridge_add(&ctx->bridge);
+ ret = sn65dsi83_host_attach(ctx);
+ if (ret)
+ goto err_remove_bridge;
+
return 0;
+
+err_remove_bridge:
+ drm_bridge_remove(&ctx->bridge);
+ return ret;
}
static int sn65dsi83_remove(struct i2c_client *client)
{
struct sn65dsi83 *ctx = i2c_get_clientdata(client);
+ drm_bridge_remove(&ctx->bridge);
of_node_put(ctx->host_node);
return 0;
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 6154bed0af5b..dab8f76618f3 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -4,7 +4,9 @@
* datasheet: https://www.ti.com/lit/ds/symlink/sn65dsi86.pdf
*/
+#include <linux/atomic.h>
#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
@@ -15,6 +17,7 @@
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/pm_runtime.h>
+#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
@@ -91,6 +94,13 @@
#define SN_ML_TX_MODE_REG 0x96
#define ML_TX_MAIN_LINK_OFF 0
#define ML_TX_NORMAL_MODE BIT(0)
+#define SN_PWM_PRE_DIV_REG 0xA0
+#define SN_BACKLIGHT_SCALE_REG 0xA1
+#define BACKLIGHT_SCALE_MAX 0xFFFF
+#define SN_BACKLIGHT_REG 0xA3
+#define SN_PWM_EN_INV_REG 0xA5
+#define SN_PWM_INV_MASK BIT(0)
+#define SN_PWM_EN_MASK BIT(1)
#define SN_AUX_CMD_STATUS_REG 0xF4
#define AUX_IRQ_STATUS_AUX_RPLY_TOUT BIT(3)
#define AUX_IRQ_STATUS_AUX_SHORT BIT(5)
@@ -113,11 +123,14 @@
#define SN_LINK_TRAINING_TRIES 10
+#define SN_PWM_GPIO_IDX 3 /* 4th GPIO */
+
/**
* struct ti_sn65dsi86 - Platform data for ti-sn65dsi86 driver.
* @bridge_aux: AUX-bus sub device for MIPI-to-eDP bridge functionality.
* @gpio_aux: AUX-bus sub device for GPIO controller functionality.
* @aux_aux: AUX-bus sub device for eDP AUX channel functionality.
+ * @pwm_aux: AUX-bus sub device for PWM controller functionality.
*
* @dev: Pointer to the top level (i2c) device.
* @regmap: Regmap for accessing i2c.
@@ -145,11 +158,17 @@
* bitmap so we can do atomic ops on it without an extra
* lock so concurrent users of our 4 GPIOs don't stomp on
* each other's read-modify-write.
+ *
+ * @pchip: pwm_chip if the PWM is exposed.
+ * @pwm_enabled: Used to track if the PWM signal is currently enabled.
+ * @pwm_pin_busy: Track if GPIO4 is currently requested for GPIO or PWM.
+ * @pwm_refclk_freq: Cache for the reference clock input to the PWM.
*/
struct ti_sn65dsi86 {
struct auxiliary_device bridge_aux;
struct auxiliary_device gpio_aux;
struct auxiliary_device aux_aux;
+ struct auxiliary_device pwm_aux;
struct device *dev;
struct regmap *regmap;
@@ -172,6 +191,12 @@ struct ti_sn65dsi86 {
struct gpio_chip gchip;
DECLARE_BITMAP(gchip_output, SN_NUM_GPIOS);
#endif
+#if defined(CONFIG_PWM)
+ struct pwm_chip pchip;
+ bool pwm_enabled;
+ atomic_t pwm_pin_busy;
+#endif
+ unsigned int pwm_refclk_freq;
};
static const struct regmap_range ti_sn65dsi86_volatile_ranges[] = {
@@ -188,13 +213,30 @@ static const struct regmap_config ti_sn65dsi86_regmap_config = {
.val_bits = 8,
.volatile_table = &ti_sn_bridge_volatile_table,
.cache_type = REGCACHE_NONE,
+ .max_register = 0xFF,
};
+static int __maybe_unused ti_sn65dsi86_read_u16(struct ti_sn65dsi86 *pdata,
+ unsigned int reg, u16 *val)
+{
+ u8 buf[2];
+ int ret;
+
+ ret = regmap_bulk_read(pdata->regmap, reg, buf, ARRAY_SIZE(buf));
+ if (ret)
+ return ret;
+
+ *val = buf[0] | (buf[1] << 8);
+
+ return 0;
+}
+
static void ti_sn65dsi86_write_u16(struct ti_sn65dsi86 *pdata,
unsigned int reg, u16 val)
{
- regmap_write(pdata->regmap, reg, val & 0xFF);
- regmap_write(pdata->regmap, reg + 1, val >> 8);
+ u8 buf[2] = { val & 0xff, val >> 8 };
+
+ regmap_bulk_write(pdata->regmap, reg, buf, ARRAY_SIZE(buf));
}
static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata)
@@ -253,6 +295,12 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
regmap_update_bits(pdata->regmap, SN_DPPLL_SRC_REG, REFCLK_FREQ_MASK,
REFCLK_FREQ(i));
+
+ /*
+ * The PWM refclk is based on the value written to SN_DPPLL_SRC_REG,
+ * regardless of its actual sourcing.
+ */
+ pdata->pwm_refclk_freq = ti_sn_bridge_refclk_lut[i];
}
static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata)
@@ -655,58 +703,24 @@ static struct ti_sn65dsi86 *bridge_to_ti_sn65dsi86(struct drm_bridge *bridge)
return container_of(bridge, struct ti_sn65dsi86, bridge);
}
-static int ti_sn_bridge_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
+static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
{
- int ret, val;
- struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+ int val;
struct mipi_dsi_host *host;
struct mipi_dsi_device *dsi;
+ struct device *dev = pdata->dev;
const struct mipi_dsi_device_info info = { .type = "ti_sn_bridge",
.channel = 0,
.node = NULL,
- };
-
- if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
- DRM_ERROR("Fix bridge driver to make connector optional!");
- return -EINVAL;
- }
-
- pdata->aux.drm_dev = bridge->dev;
- ret = drm_dp_aux_register(&pdata->aux);
- if (ret < 0) {
- drm_err(bridge->dev, "Failed to register DP AUX channel: %d\n", ret);
- return ret;
- }
-
- ret = ti_sn_bridge_connector_init(pdata);
- if (ret < 0)
- goto err_conn_init;
+ };
- /*
- * TODO: ideally finding host resource and dsi dev registration needs
- * to be done in bridge probe. But some existing DSI host drivers will
- * wait for any of the drm_bridge/drm_panel to get added to the global
- * bridge/panel list, before completing their probe. So if we do the
- * dsi dev registration part in bridge probe, before populating in
- * the global bridge list, then it will cause deadlock as dsi host probe
- * will never complete, neither our bridge probe. So keeping it here
- * will satisfy most of the existing host drivers. Once the host driver
- * is fixed we can move the below code to bridge probe safely.
- */
host = of_find_mipi_dsi_host_by_node(pdata->host_node);
- if (!host) {
- DRM_ERROR("failed to find dsi host\n");
- ret = -ENODEV;
- goto err_dsi_host;
- }
+ if (!host)
+ return -EPROBE_DEFER;
- dsi = mipi_dsi_device_register_full(host, &info);
- if (IS_ERR(dsi)) {
- DRM_ERROR("failed to create dsi device\n");
- ret = PTR_ERR(dsi);
- goto err_dsi_host;
- }
+ dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
/* TODO: setting to 4 MIPI lanes always for now */
dsi->lanes = 4;
@@ -714,18 +728,38 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
dsi->mode_flags = MIPI_DSI_MODE_VIDEO;
/* check if continuous dsi clock is required or not */
- pm_runtime_get_sync(pdata->dev);
+ pm_runtime_get_sync(dev);
regmap_read(pdata->regmap, SN_DPPLL_SRC_REG, &val);
- pm_runtime_put_autosuspend(pdata->dev);
+ pm_runtime_put_autosuspend(dev);
if (!(val & DPPLL_CLK_SRC_DSICLK))
dsi->mode_flags |= MIPI_DSI_CLOCK_NON_CONTINUOUS;
- ret = mipi_dsi_attach(dsi);
+ pdata->dsi = dsi;
+
+ return devm_mipi_dsi_attach(dev, dsi);
+}
+
+static int ti_sn_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+ int ret;
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
+ pdata->aux.drm_dev = bridge->dev;
+ ret = drm_dp_aux_register(&pdata->aux);
if (ret < 0) {
- DRM_ERROR("failed to attach dsi to host\n");
- goto err_dsi_attach;
+ drm_err(bridge->dev, "Failed to register DP AUX channel: %d\n", ret);
+ return ret;
}
- pdata->dsi = dsi;
+
+ ret = ti_sn_bridge_connector_init(pdata);
+ if (ret < 0)
+ goto err_conn_init;
/* We never want the next bridge to *also* create a connector: */
flags |= DRM_BRIDGE_ATTACH_NO_CONNECTOR;
@@ -734,14 +768,10 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
ret = drm_bridge_attach(bridge->encoder, pdata->next_bridge,
&pdata->bridge, flags);
if (ret < 0)
- goto err_dsi_detach;
+ goto err_dsi_host;
return 0;
-err_dsi_detach:
- mipi_dsi_detach(dsi);
-err_dsi_attach:
- mipi_dsi_device_unregister(dsi);
err_dsi_host:
drm_connector_cleanup(&pdata->connector);
err_conn_init:
@@ -1227,7 +1257,17 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
drm_bridge_add(&pdata->bridge);
+ ret = ti_sn_attach_host(pdata);
+ if (ret) {
+ dev_err_probe(pdata->dev, ret, "failed to attach dsi host\n");
+ goto err_remove_bridge;
+ }
+
return 0;
+
+err_remove_bridge:
+ drm_bridge_remove(&pdata->bridge);
+ return ret;
}
static void ti_sn_bridge_remove(struct auxiliary_device *adev)
@@ -1237,11 +1277,6 @@ static void ti_sn_bridge_remove(struct auxiliary_device *adev)
if (!pdata)
return;
- if (pdata->dsi) {
- mipi_dsi_detach(pdata->dsi);
- mipi_dsi_device_unregister(pdata->dsi);
- }
-
drm_bridge_remove(&pdata->bridge);
of_node_put(pdata->host_node);
@@ -1260,9 +1295,287 @@ static struct auxiliary_driver ti_sn_bridge_driver = {
};
/* -----------------------------------------------------------------------------
- * GPIO Controller
+ * PWM Controller
*/
+#if defined(CONFIG_PWM)
+static int ti_sn_pwm_pin_request(struct ti_sn65dsi86 *pdata)
+{
+ return atomic_xchg(&pdata->pwm_pin_busy, 1) ? -EBUSY : 0;
+}
+
+static void ti_sn_pwm_pin_release(struct ti_sn65dsi86 *pdata)
+{
+ atomic_set(&pdata->pwm_pin_busy, 0);
+}
+
+static struct ti_sn65dsi86 *pwm_chip_to_ti_sn_bridge(struct pwm_chip *chip)
+{
+ return container_of(chip, struct ti_sn65dsi86, pchip);
+}
+
+static int ti_sn_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct ti_sn65dsi86 *pdata = pwm_chip_to_ti_sn_bridge(chip);
+
+ return ti_sn_pwm_pin_request(pdata);
+}
+
+static void ti_sn_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct ti_sn65dsi86 *pdata = pwm_chip_to_ti_sn_bridge(chip);
+
+ ti_sn_pwm_pin_release(pdata);
+}
+
+/*
+ * Limitations:
+ * - The PWM signal is not driven when the chip is powered down, or in its
+ * reset state and the driver does not implement the "suspend state"
+ * described in the documentation. In order to save power, state->enabled is
+ * interpreted as denoting if the signal is expected to be valid, and is used
+ * to determine if the chip needs to be kept powered.
+ * - Changing both period and duty_cycle is not done atomically, neither is the
+ * multi-byte register updates, so the output might briefly be undefined
+ * during update.
+ */
+static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct ti_sn65dsi86 *pdata = pwm_chip_to_ti_sn_bridge(chip);
+ unsigned int pwm_en_inv;
+ unsigned int backlight;
+ unsigned int pre_div;
+ unsigned int scale;
+ u64 period_max;
+ u64 period;
+ int ret;
+
+ if (!pdata->pwm_enabled) {
+ ret = pm_runtime_get_sync(pdata->dev);
+ if (ret < 0) {
+ pm_runtime_put_sync(pdata->dev);
+ return ret;
+ }
+ }
+
+ if (state->enabled) {
+ if (!pdata->pwm_enabled) {
+ /*
+ * The chip might have been powered down while we
+ * didn't hold a PM runtime reference, so mux in the
+ * PWM function on the GPIO pin again.
+ */
+ ret = regmap_update_bits(pdata->regmap, SN_GPIO_CTRL_REG,
+ SN_GPIO_MUX_MASK << (2 * SN_PWM_GPIO_IDX),
+ SN_GPIO_MUX_SPECIAL << (2 * SN_PWM_GPIO_IDX));
+ if (ret) {
+ dev_err(pdata->dev, "failed to mux in PWM function\n");
+ goto out;
+ }
+ }
+
+ /*
+ * Per the datasheet the PWM frequency is given by:
+ *
+ * REFCLK_FREQ
+ * PWM_FREQ = -----------------------------------
+ * PWM_PRE_DIV * BACKLIGHT_SCALE + 1
+ *
+ * However, after careful review the author is convinced that
+ * the documentation has lost some parenthesis around
+ * "BACKLIGHT_SCALE + 1".
+ *
+ * With the period T_pwm = 1/PWM_FREQ this can be written:
+ *
+ * T_pwm * REFCLK_FREQ = PWM_PRE_DIV * (BACKLIGHT_SCALE + 1)
+ *
+ * In order to keep BACKLIGHT_SCALE within its 16 bits,
+ * PWM_PRE_DIV must be:
+ *
+ * T_pwm * REFCLK_FREQ
+ * PWM_PRE_DIV >= -------------------------
+ * BACKLIGHT_SCALE_MAX + 1
+ *
+ * To simplify the search and to favour higher resolution of
+ * the duty cycle over accuracy of the period, the lowest
+ * possible PWM_PRE_DIV is used. Finally the scale is
+ * calculated as:
+ *
+ * T_pwm * REFCLK_FREQ
+ * BACKLIGHT_SCALE = ---------------------- - 1
+ * PWM_PRE_DIV
+ *
+ * Here T_pwm is represented in seconds, so appropriate scaling
+ * to nanoseconds is necessary.
+ */
+
+ /* Minimum T_pwm is 1 / REFCLK_FREQ */
+ if (state->period <= NSEC_PER_SEC / pdata->pwm_refclk_freq) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Maximum T_pwm is 255 * (65535 + 1) / REFCLK_FREQ
+ * Limit period to this to avoid overflows
+ */
+ period_max = div_u64((u64)NSEC_PER_SEC * 255 * (65535 + 1),
+ pdata->pwm_refclk_freq);
+ period = min(state->period, period_max);
+
+ pre_div = DIV64_U64_ROUND_UP(period * pdata->pwm_refclk_freq,
+ (u64)NSEC_PER_SEC * (BACKLIGHT_SCALE_MAX + 1));
+ scale = div64_u64(period * pdata->pwm_refclk_freq, (u64)NSEC_PER_SEC * pre_div) - 1;
+
+ /*
+ * The documentation has the duty ratio given as:
+ *
+ * duty BACKLIGHT
+ * ------- = ---------------------
+ * period BACKLIGHT_SCALE + 1
+ *
+ * Solve for BACKLIGHT, substituting BACKLIGHT_SCALE according
+ * to definition above and adjusting for nanosecond
+ * representation of duty cycle gives us:
+ */
+ backlight = div64_u64(state->duty_cycle * pdata->pwm_refclk_freq,
+ (u64)NSEC_PER_SEC * pre_div);
+ if (backlight > scale)
+ backlight = scale;
+
+ ret = regmap_write(pdata->regmap, SN_PWM_PRE_DIV_REG, pre_div);
+ if (ret) {
+ dev_err(pdata->dev, "failed to update PWM_PRE_DIV\n");
+ goto out;
+ }
+
+ ti_sn65dsi86_write_u16(pdata, SN_BACKLIGHT_SCALE_REG, scale);
+ ti_sn65dsi86_write_u16(pdata, SN_BACKLIGHT_REG, backlight);
+ }
+
+ pwm_en_inv = FIELD_PREP(SN_PWM_EN_MASK, state->enabled) |
+ FIELD_PREP(SN_PWM_INV_MASK, state->polarity == PWM_POLARITY_INVERSED);
+ ret = regmap_write(pdata->regmap, SN_PWM_EN_INV_REG, pwm_en_inv);
+ if (ret) {
+ dev_err(pdata->dev, "failed to update PWM_EN/PWM_INV\n");
+ goto out;
+ }
+
+ pdata->pwm_enabled = state->enabled;
+out:
+
+ if (!pdata->pwm_enabled)
+ pm_runtime_put_sync(pdata->dev);
+
+ return ret;
+}
+
+static void ti_sn_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct ti_sn65dsi86 *pdata = pwm_chip_to_ti_sn_bridge(chip);
+ unsigned int pwm_en_inv;
+ unsigned int pre_div;
+ u16 backlight;
+ u16 scale;
+ int ret;
+
+ ret = regmap_read(pdata->regmap, SN_PWM_EN_INV_REG, &pwm_en_inv);
+ if (ret)
+ return;
+
+ ret = ti_sn65dsi86_read_u16(pdata, SN_BACKLIGHT_SCALE_REG, &scale);
+ if (ret)
+ return;
+
+ ret = ti_sn65dsi86_read_u16(pdata, SN_BACKLIGHT_REG, &backlight);
+ if (ret)
+ return;
+
+ ret = regmap_read(pdata->regmap, SN_PWM_PRE_DIV_REG, &pre_div);
+ if (ret)
+ return;
+
+ state->enabled = FIELD_GET(SN_PWM_EN_MASK, pwm_en_inv);
+ if (FIELD_GET(SN_PWM_INV_MASK, pwm_en_inv))
+ state->polarity = PWM_POLARITY_INVERSED;
+ else
+ state->polarity = PWM_POLARITY_NORMAL;
+
+ state->period = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * pre_div * (scale + 1),
+ pdata->pwm_refclk_freq);
+ state->duty_cycle = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * pre_div * backlight,
+ pdata->pwm_refclk_freq);
+
+ if (state->duty_cycle > state->period)
+ state->duty_cycle = state->period;
+}
+static const struct pwm_ops ti_sn_pwm_ops = {
+ .request = ti_sn_pwm_request,
+ .free = ti_sn_pwm_free,
+ .apply = ti_sn_pwm_apply,
+ .get_state = ti_sn_pwm_get_state,
+ .owner = THIS_MODULE,
+};
+
+static int ti_sn_pwm_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent);
+
+ pdata->pchip.dev = pdata->dev;
+ pdata->pchip.ops = &ti_sn_pwm_ops;
+ pdata->pchip.npwm = 1;
+ pdata->pchip.of_xlate = of_pwm_single_xlate;
+ pdata->pchip.of_pwm_n_cells = 1;
+
+ return pwmchip_add(&pdata->pchip);
+}
+
+static void ti_sn_pwm_remove(struct auxiliary_device *adev)
+{
+ struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent);
+
+ pwmchip_remove(&pdata->pchip);
+
+ if (pdata->pwm_enabled)
+ pm_runtime_put_sync(pdata->dev);
+}
+
+static const struct auxiliary_device_id ti_sn_pwm_id_table[] = {
+ { .name = "ti_sn65dsi86.pwm", },
+ {},
+};
+
+static struct auxiliary_driver ti_sn_pwm_driver = {
+ .name = "pwm",
+ .probe = ti_sn_pwm_probe,
+ .remove = ti_sn_pwm_remove,
+ .id_table = ti_sn_pwm_id_table,
+};
+
+static int __init ti_sn_pwm_register(void)
+{
+ return auxiliary_driver_register(&ti_sn_pwm_driver);
+}
+
+static void ti_sn_pwm_unregister(void)
+{
+ auxiliary_driver_unregister(&ti_sn_pwm_driver);
+}
+
+#else
+static inline int ti_sn_pwm_pin_request(struct ti_sn65dsi86 *pdata) { return 0; }
+static inline void ti_sn_pwm_pin_release(struct ti_sn65dsi86 *pdata) {}
+
+static inline int ti_sn_pwm_register(void) { return 0; }
+static inline void ti_sn_pwm_unregister(void) {}
+#endif
+
+/* -----------------------------------------------------------------------------
+ * GPIO Controller
+ */
#if defined(CONFIG_OF_GPIO)
static int tn_sn_bridge_of_xlate(struct gpio_chip *chip,
@@ -1395,10 +1708,25 @@ static int ti_sn_bridge_gpio_direction_output(struct gpio_chip *chip,
return ret;
}
+static int ti_sn_bridge_gpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip);
+
+ if (offset == SN_PWM_GPIO_IDX)
+ return ti_sn_pwm_pin_request(pdata);
+
+ return 0;
+}
+
static void ti_sn_bridge_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
+ struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip);
+
/* We won't keep pm_runtime if we're input, so switch there on free */
ti_sn_bridge_gpio_direction_input(chip, offset);
+
+ if (offset == SN_PWM_GPIO_IDX)
+ ti_sn_pwm_pin_release(pdata);
}
static const char * const ti_sn_bridge_gpio_names[SN_NUM_GPIOS] = {
@@ -1420,6 +1748,7 @@ static int ti_sn_gpio_probe(struct auxiliary_device *adev,
pdata->gchip.owner = THIS_MODULE;
pdata->gchip.of_xlate = tn_sn_bridge_of_xlate;
pdata->gchip.of_gpio_n_cells = 2;
+ pdata->gchip.request = ti_sn_bridge_gpio_request;
pdata->gchip.free = ti_sn_bridge_gpio_free;
pdata->gchip.get_direction = ti_sn_bridge_gpio_get_direction;
pdata->gchip.direction_input = ti_sn_bridge_gpio_direction_input;
@@ -1546,10 +1875,9 @@ static int ti_sn65dsi86_probe(struct i2c_client *client,
* ordering. The bridge wants the panel to be there when it probes.
* The panel wants its HPD GPIO (provided by sn65dsi86 on some boards)
* when it probes. The panel and maybe backlight might want the DDC
- * bus. Soon the PWM provided by the bridge chip will have the same
- * problem. Having sub-devices allows the some sub devices to finish
- * probing even if others return -EPROBE_DEFER and gets us around the
- * problems.
+ * bus or the pwm_chip. Having sub-devices allows the some sub devices
+ * to finish probing even if others return -EPROBE_DEFER and gets us
+ * around the problems.
*/
if (IS_ENABLED(CONFIG_OF_GPIO)) {
@@ -1558,6 +1886,12 @@ static int ti_sn65dsi86_probe(struct i2c_client *client,
return ret;
}
+ if (IS_ENABLED(CONFIG_PWM)) {
+ ret = ti_sn65dsi86_add_aux_device(pdata, &pdata->pwm_aux, "pwm");
+ if (ret)
+ return ret;
+ }
+
/*
* NOTE: At the end of the AUX channel probe we'll add the aux device
* for the bridge. This is because the bridge can't be used until the
@@ -1601,10 +1935,14 @@ static int __init ti_sn65dsi86_init(void)
if (ret)
goto err_main_was_registered;
- ret = auxiliary_driver_register(&ti_sn_aux_driver);
+ ret = ti_sn_pwm_register();
if (ret)
goto err_gpio_was_registered;
+ ret = auxiliary_driver_register(&ti_sn_aux_driver);
+ if (ret)
+ goto err_pwm_was_registered;
+
ret = auxiliary_driver_register(&ti_sn_bridge_driver);
if (ret)
goto err_aux_was_registered;
@@ -1613,6 +1951,8 @@ static int __init ti_sn65dsi86_init(void)
err_aux_was_registered:
auxiliary_driver_unregister(&ti_sn_aux_driver);
+err_pwm_was_registered:
+ ti_sn_pwm_unregister();
err_gpio_was_registered:
ti_sn_gpio_unregister();
err_main_was_registered:
@@ -1626,6 +1966,7 @@ static void __exit ti_sn65dsi86_exit(void)
{
auxiliary_driver_unregister(&ti_sn_bridge_driver);
auxiliary_driver_unregister(&ti_sn_aux_driver);
+ ti_sn_pwm_unregister();
ti_sn_gpio_unregister();
i2c_del_driver(&ti_sn65dsi86_driver);
}
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index ff1416cd609a..88cd992df356 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -74,7 +74,7 @@ int drm_crtc_commit_wait(struct drm_crtc_commit *commit)
ret = wait_for_completion_timeout(&commit->hw_done, timeout);
if (!ret) {
- DRM_ERROR("hw_done timed out\n");
+ drm_err(commit->crtc->dev, "hw_done timed out\n");
return -ETIMEDOUT;
}
@@ -84,7 +84,7 @@ int drm_crtc_commit_wait(struct drm_crtc_commit *commit)
*/
ret = wait_for_completion_timeout(&commit->flip_done, timeout);
if (!ret) {
- DRM_ERROR("flip_done timed out\n");
+ drm_err(commit->crtc->dev, "flip_done timed out\n");
return -ETIMEDOUT;
}
@@ -140,7 +140,7 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
state->dev = dev;
- DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state);
+ drm_dbg_atomic(dev, "Allocated atomic state %p\n", state);
return 0;
fail:
@@ -191,7 +191,7 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
struct drm_mode_config *config = &dev->mode_config;
int i;
- DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
+ drm_dbg_atomic(dev, "Clearing atomic state %p\n", state);
for (i = 0; i < state->num_connector; i++) {
struct drm_connector *connector = state->connectors[i].ptr;
@@ -301,7 +301,7 @@ void __drm_atomic_state_free(struct kref *ref)
drm_atomic_state_clear(state);
- DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
+ drm_dbg_atomic(state->dev, "Freeing atomic state %p\n", state);
if (config->funcs->atomic_state_free) {
config->funcs->atomic_state_free(state);
@@ -358,8 +358,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
state->crtcs[index].ptr = crtc;
crtc_state->state = state;
- DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
- crtc->base.id, crtc->name, crtc_state, state);
+ drm_dbg_atomic(state->dev, "Added [CRTC:%d:%s] %p state to %p\n",
+ crtc->base.id, crtc->name, crtc_state, state);
return crtc_state;
}
@@ -379,8 +379,9 @@ static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state,
*/
if (new_crtc_state->active && !new_crtc_state->enable) {
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(crtc->dev,
+ "[CRTC:%d:%s] active without enabled\n",
+ crtc->base.id, crtc->name);
return -EINVAL;
}
@@ -390,15 +391,17 @@ static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state,
*/
if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
WARN_ON(new_crtc_state->enable && !new_crtc_state->mode_blob)) {
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(crtc->dev,
+ "[CRTC:%d:%s] enabled without mode blob\n",
+ crtc->base.id, crtc->name);
return -EINVAL;
}
if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
WARN_ON(!new_crtc_state->enable && new_crtc_state->mode_blob)) {
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(crtc->dev,
+ "[CRTC:%d:%s] disabled with mode blob\n",
+ crtc->base.id, crtc->name);
return -EINVAL;
}
@@ -414,8 +417,9 @@ static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state,
*/
if (new_crtc_state->event &&
!new_crtc_state->active && !old_crtc_state->active) {
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(crtc->dev,
+ "[CRTC:%d:%s] requesting event but off\n",
+ crtc->base.id, crtc->name);
return -EINVAL;
}
@@ -460,8 +464,9 @@ static int drm_atomic_connector_check(struct drm_connector *connector,
return 0;
if (writeback_job->fb && !state->crtc) {
- DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n",
- connector->base.id, connector->name);
+ drm_dbg_atomic(connector->dev,
+ "[CONNECTOR:%d:%s] framebuffer without CRTC\n",
+ connector->base.id, connector->name);
return -EINVAL;
}
@@ -470,16 +475,18 @@ static int drm_atomic_connector_check(struct drm_connector *connector,
state->crtc);
if (writeback_job->fb && !crtc_state->active) {
- DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n",
- connector->base.id, connector->name,
- state->crtc->base.id);
+ drm_dbg_atomic(connector->dev,
+ "[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n",
+ connector->base.id, connector->name,
+ state->crtc->base.id);
return -EINVAL;
}
if (!writeback_job->fb) {
if (writeback_job->out_fence) {
- DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
- connector->base.id, connector->name);
+ drm_dbg_atomic(connector->dev,
+ "[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
+ connector->base.id, connector->name);
return -EINVAL;
}
@@ -537,8 +544,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
state->planes[index].new_state = plane_state;
plane_state->state = state;
- DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
- plane->base.id, plane->name, plane_state, state);
+ drm_dbg_atomic(plane->dev, "Added [PLANE:%d:%s] %p state to %p\n",
+ plane->base.id, plane->name, plane_state, state);
if (plane_state->crtc) {
struct drm_crtc_state *crtc_state;
@@ -594,12 +601,12 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
/* either *both* CRTC and FB must be set, or neither */
if (crtc && !fb) {
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n",
- plane->base.id, plane->name);
+ drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] CRTC set but no FB\n",
+ plane->base.id, plane->name);
return -EINVAL;
} else if (fb && !crtc) {
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n",
- plane->base.id, plane->name);
+ drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] FB set but no CRTC\n",
+ plane->base.id, plane->name);
return -EINVAL;
}
@@ -609,9 +616,10 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
/* Check whether this plane is usable on this CRTC */
if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
- DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
- crtc->base.id, crtc->name,
- plane->base.id, plane->name);
+ drm_dbg_atomic(plane->dev,
+ "Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
+ crtc->base.id, crtc->name,
+ plane->base.id, plane->name);
return -EINVAL;
}
@@ -619,9 +627,10 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
ret = drm_plane_check_pixel_format(plane, fb->format->format,
fb->modifier);
if (ret) {
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %p4cc, modifier 0x%llx\n",
- plane->base.id, plane->name,
- &fb->format->format, fb->modifier);
+ drm_dbg_atomic(plane->dev,
+ "[PLANE:%d:%s] invalid pixel format %p4cc, modifier 0x%llx\n",
+ plane->base.id, plane->name,
+ &fb->format->format, fb->modifier);
return ret;
}
@@ -630,10 +639,11 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
new_plane_state->crtc_x > INT_MAX - (int32_t) new_plane_state->crtc_w ||
new_plane_state->crtc_h > INT_MAX ||
new_plane_state->crtc_y > INT_MAX - (int32_t) new_plane_state->crtc_h) {
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
- plane->base.id, plane->name,
- new_plane_state->crtc_w, new_plane_state->crtc_h,
- new_plane_state->crtc_x, new_plane_state->crtc_y);
+ drm_dbg_atomic(plane->dev,
+ "[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
+ plane->base.id, plane->name,
+ new_plane_state->crtc_w, new_plane_state->crtc_h,
+ new_plane_state->crtc_x, new_plane_state->crtc_y);
return -ERANGE;
}
@@ -645,18 +655,19 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
new_plane_state->src_x > fb_width - new_plane_state->src_w ||
new_plane_state->src_h > fb_height ||
new_plane_state->src_y > fb_height - new_plane_state->src_h) {
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates "
- "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
- plane->base.id, plane->name,
- new_plane_state->src_w >> 16,
- ((new_plane_state->src_w & 0xffff) * 15625) >> 10,
- new_plane_state->src_h >> 16,
- ((new_plane_state->src_h & 0xffff) * 15625) >> 10,
- new_plane_state->src_x >> 16,
- ((new_plane_state->src_x & 0xffff) * 15625) >> 10,
- new_plane_state->src_y >> 16,
- ((new_plane_state->src_y & 0xffff) * 15625) >> 10,
- fb->width, fb->height);
+ drm_dbg_atomic(plane->dev,
+ "[PLANE:%d:%s] invalid source coordinates "
+ "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
+ plane->base.id, plane->name,
+ new_plane_state->src_w >> 16,
+ ((new_plane_state->src_w & 0xffff) * 15625) >> 10,
+ new_plane_state->src_h >> 16,
+ ((new_plane_state->src_h & 0xffff) * 15625) >> 10,
+ new_plane_state->src_x >> 16,
+ ((new_plane_state->src_x & 0xffff) * 15625) >> 10,
+ new_plane_state->src_y >> 16,
+ ((new_plane_state->src_y & 0xffff) * 15625) >> 10,
+ fb->width, fb->height);
return -ENOSPC;
}
@@ -671,9 +682,10 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
clips->y1 < 0 ||
clips->x2 > fb_width ||
clips->y2 > fb_height) {
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid damage clip %d %d %d %d\n",
- plane->base.id, plane->name, clips->x1,
- clips->y1, clips->x2, clips->y2);
+ drm_dbg_atomic(plane->dev,
+ "[PLANE:%d:%s] invalid damage clip %d %d %d %d\n",
+ plane->base.id, plane->name, clips->x1,
+ clips->y1, clips->x2, clips->y2);
return -EINVAL;
}
clips++;
@@ -681,8 +693,9 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
}
if (plane_switching_crtc(old_plane_state, new_plane_state)) {
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
- plane->base.id, plane->name);
+ drm_dbg_atomic(plane->dev,
+ "[PLANE:%d:%s] switching CRTC directly\n",
+ plane->base.id, plane->name);
return -EINVAL;
}
@@ -846,8 +859,9 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
state->num_private_objs = num_objs;
- DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n",
- obj, obj_state, state);
+ drm_dbg_atomic(state->dev,
+ "Added new private object %p state %p to %p\n",
+ obj, obj_state, state);
return obj_state;
}
@@ -1027,7 +1041,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
state->connectors[index].ptr = connector;
connector_state->state = state;
- DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n",
+ drm_dbg_atomic(connector->dev, "Added [CONNECTOR:%d:%s] %p state to %p\n",
connector->base.id, connector->name,
connector_state, state);
@@ -1160,8 +1174,9 @@ drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
if (!encoder)
return 0;
- DRM_DEBUG_ATOMIC("Adding all bridges for [encoder:%d:%s] to %p\n",
- encoder->base.id, encoder->name, state);
+ drm_dbg_atomic(encoder->dev,
+ "Adding all bridges for [encoder:%d:%s] to %p\n",
+ encoder->base.id, encoder->name, state);
drm_for_each_bridge_in_chain(encoder, bridge) {
/* Skip bridges that don't implement the atomic state hooks. */
@@ -1213,8 +1228,9 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
if (ret)
return ret;
- DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n",
- crtc->base.id, crtc->name, state);
+ drm_dbg_atomic(crtc->dev,
+ "Adding all current connectors for [CRTC:%d:%s] to %p\n",
+ crtc->base.id, crtc->name, state);
/*
* Changed connectors are already in @state, so only need to look
@@ -1267,8 +1283,9 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state,
WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
- DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n",
- crtc->base.id, crtc->name, state);
+ drm_dbg_atomic(crtc->dev,
+ "Adding all current planes for [CRTC:%d:%s] to %p\n",
+ crtc->base.id, crtc->name, state);
drm_for_each_plane_mask(plane, state->dev, old_crtc_state->plane_mask) {
struct drm_plane_state *plane_state =
@@ -1308,16 +1325,18 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
unsigned int affected_crtc = 0;
int i, ret = 0;
- DRM_DEBUG_ATOMIC("checking %p\n", state);
+ drm_dbg_atomic(dev, "checking %p\n", state);
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
- requested_crtc |= drm_crtc_mask(crtc);
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->enable)
+ requested_crtc |= drm_crtc_mask(crtc);
+ }
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
ret = drm_atomic_plane_check(old_plane_state, new_plane_state);
if (ret) {
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
- plane->base.id, plane->name);
+ drm_dbg_atomic(dev, "[PLANE:%d:%s] atomic core check failed\n",
+ plane->base.id, plane->name);
return ret;
}
}
@@ -1325,8 +1344,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
ret = drm_atomic_crtc_check(old_crtc_state, new_crtc_state);
if (ret) {
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(dev, "[CRTC:%d:%s] atomic core check failed\n",
+ crtc->base.id, crtc->name);
return ret;
}
}
@@ -1334,8 +1353,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
for_each_new_connector_in_state(state, conn, conn_state, i) {
ret = drm_atomic_connector_check(conn, conn_state);
if (ret) {
- DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n",
- conn->base.id, conn->name);
+ drm_dbg_atomic(dev, "[CONNECTOR:%d:%s] atomic core check failed\n",
+ conn->base.id, conn->name);
return ret;
}
}
@@ -1344,8 +1363,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
ret = config->funcs->atomic_check(state->dev, state);
if (ret) {
- DRM_DEBUG_ATOMIC("atomic driver check for %p failed: %d\n",
- state, ret);
+ drm_dbg_atomic(dev, "atomic driver check for %p failed: %d\n",
+ state, ret);
return ret;
}
}
@@ -1353,15 +1372,17 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
if (!state->allow_modeset) {
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(dev, "[CRTC:%d:%s] requires full modeset\n",
+ crtc->base.id, crtc->name);
return -EINVAL;
}
}
}
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
- affected_crtc |= drm_crtc_mask(crtc);
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->enable)
+ affected_crtc |= drm_crtc_mask(crtc);
+ }
/*
* For commits that allow modesets drivers can add other CRTCs to the
@@ -1374,8 +1395,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
* so compositors know what's going on.
*/
if (affected_crtc != requested_crtc) {
- DRM_DEBUG_ATOMIC("driver added CRTC to commit: requested 0x%x, affected 0x%0x\n",
- requested_crtc, affected_crtc);
+ drm_dbg_atomic(dev,
+ "driver added CRTC to commit: requested 0x%x, affected 0x%0x\n",
+ requested_crtc, affected_crtc);
WARN(!state->allow_modeset, "adding CRTC not allowed without modesets: requested 0x%x, affected 0x%0x\n",
requested_crtc, affected_crtc);
}
@@ -1407,7 +1429,7 @@ int drm_atomic_commit(struct drm_atomic_state *state)
if (ret)
return ret;
- DRM_DEBUG_ATOMIC("committing %p\n", state);
+ drm_dbg_atomic(state->dev, "committing %p\n", state);
return config->funcs->atomic_commit(state->dev, state, false);
}
@@ -1436,7 +1458,7 @@ int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
if (ret)
return ret;
- DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state);
+ drm_dbg_atomic(state->dev, "committing %p nonblocking\n", state);
return config->funcs->atomic_commit(state->dev, state, true);
}
@@ -1633,11 +1655,11 @@ void drm_atomic_print_new_state(const struct drm_atomic_state *state,
int i;
if (!p) {
- DRM_ERROR("invalid drm printer\n");
+ drm_err(state->dev, "invalid drm printer\n");
return;
}
- DRM_DEBUG_ATOMIC("checking %p\n", state);
+ drm_dbg_atomic(state->dev, "checking %p\n", state);
for_each_new_plane_in_state(state, plane, plane_state, i)
drm_atomic_plane_print_state(p, plane_state);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 2c0c6ec92820..9603193d2fa1 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -132,9 +132,10 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
if (new_encoder) {
if (encoder_mask & drm_encoder_mask(new_encoder)) {
- DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
- new_encoder->base.id, new_encoder->name,
- connector->base.id, connector->name);
+ drm_dbg_atomic(connector->dev,
+ "[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
+ new_encoder->base.id, new_encoder->name,
+ connector->base.id, connector->name);
return -EINVAL;
}
@@ -169,11 +170,12 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
continue;
if (!disable_conflicting_encoders) {
- DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
- encoder->base.id, encoder->name,
- connector->state->crtc->base.id,
- connector->state->crtc->name,
- connector->base.id, connector->name);
+ drm_dbg_atomic(connector->dev,
+ "[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
+ encoder->base.id, encoder->name,
+ connector->state->crtc->base.id,
+ connector->state->crtc->name,
+ connector->base.id, connector->name);
ret = -EINVAL;
goto out;
}
@@ -184,10 +186,11 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
goto out;
}
- DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
- encoder->base.id, encoder->name,
- new_conn_state->crtc->base.id, new_conn_state->crtc->name,
- connector->base.id, connector->name);
+ drm_dbg_atomic(connector->dev,
+ "[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
+ encoder->base.id, encoder->name,
+ new_conn_state->crtc->base.id, new_conn_state->crtc->name,
+ connector->base.id, connector->name);
crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
@@ -268,9 +271,10 @@ steal_encoder(struct drm_atomic_state *state,
encoder_crtc = old_connector_state->crtc;
- DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
- encoder->base.id, encoder->name,
- encoder_crtc->base.id, encoder_crtc->name);
+ drm_dbg_atomic(encoder->dev,
+ "[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
+ encoder->base.id, encoder->name,
+ encoder_crtc->base.id, encoder_crtc->name);
set_best_encoder(state, new_connector_state, NULL);
@@ -291,9 +295,8 @@ update_connector_routing(struct drm_atomic_state *state,
struct drm_encoder *new_encoder;
struct drm_crtc_state *crtc_state;
- DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n",
- connector->base.id,
- connector->name);
+ drm_dbg_atomic(connector->dev, "Updating routing for [CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
if (old_connector_state->crtc != new_connector_state->crtc) {
if (old_connector_state->crtc) {
@@ -308,9 +311,8 @@ update_connector_routing(struct drm_atomic_state *state,
}
if (!new_connector_state->crtc) {
- DRM_DEBUG_ATOMIC("Disabling [CONNECTOR:%d:%s]\n",
- connector->base.id,
- connector->name);
+ drm_dbg_atomic(connector->dev, "Disabling [CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
set_best_encoder(state, new_connector_state, NULL);
@@ -339,8 +341,9 @@ update_connector_routing(struct drm_atomic_state *state,
*/
if (!state->duplicated && drm_connector_is_unregistered(connector) &&
crtc_state->active) {
- DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n",
- connector->base.id, connector->name);
+ drm_dbg_atomic(connector->dev,
+ "[CONNECTOR:%d:%s] is not registered\n",
+ connector->base.id, connector->name);
return -EINVAL;
}
@@ -354,31 +357,33 @@ update_connector_routing(struct drm_atomic_state *state,
new_encoder = drm_connector_get_single_encoder(connector);
if (!new_encoder) {
- DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
- connector->base.id,
- connector->name);
+ drm_dbg_atomic(connector->dev,
+ "No suitable encoder found for [CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
return -EINVAL;
}
if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) {
- DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n",
- new_encoder->base.id,
- new_encoder->name,
- new_connector_state->crtc->base.id,
- new_connector_state->crtc->name);
+ drm_dbg_atomic(connector->dev,
+ "[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n",
+ new_encoder->base.id,
+ new_encoder->name,
+ new_connector_state->crtc->base.id,
+ new_connector_state->crtc->name);
return -EINVAL;
}
if (new_encoder == new_connector_state->best_encoder) {
set_best_encoder(state, new_connector_state, new_encoder);
- DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
- connector->base.id,
- connector->name,
- new_encoder->base.id,
- new_encoder->name,
- new_connector_state->crtc->base.id,
- new_connector_state->crtc->name);
+ drm_dbg_atomic(connector->dev,
+ "[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
+ connector->base.id,
+ connector->name,
+ new_encoder->base.id,
+ new_encoder->name,
+ new_connector_state->crtc->base.id,
+ new_connector_state->crtc->name);
return 0;
}
@@ -389,13 +394,14 @@ update_connector_routing(struct drm_atomic_state *state,
crtc_state->connectors_changed = true;
- DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
- connector->base.id,
- connector->name,
- new_encoder->base.id,
- new_encoder->name,
- new_connector_state->crtc->base.id,
- new_connector_state->crtc->name);
+ drm_dbg_atomic(connector->dev,
+ "[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
+ connector->base.id,
+ connector->name,
+ new_encoder->base.id,
+ new_encoder->name,
+ new_connector_state->crtc->base.id,
+ new_connector_state->crtc->name);
return 0;
}
@@ -443,7 +449,7 @@ mode_fixup(struct drm_atomic_state *state)
new_crtc_state,
new_conn_state);
if (ret) {
- DRM_DEBUG_ATOMIC("Bridge atomic check failed\n");
+ drm_dbg_atomic(encoder->dev, "Bridge atomic check failed\n");
return ret;
}
@@ -451,16 +457,18 @@ mode_fixup(struct drm_atomic_state *state)
ret = funcs->atomic_check(encoder, new_crtc_state,
new_conn_state);
if (ret) {
- DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] check failed\n",
- encoder->base.id, encoder->name);
+ drm_dbg_atomic(encoder->dev,
+ "[ENCODER:%d:%s] check failed\n",
+ encoder->base.id, encoder->name);
return ret;
}
} else if (funcs && funcs->mode_fixup) {
ret = funcs->mode_fixup(encoder, &new_crtc_state->mode,
&new_crtc_state->adjusted_mode);
if (!ret) {
- DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] fixup failed\n",
- encoder->base.id, encoder->name);
+ drm_dbg_atomic(encoder->dev,
+ "[ENCODER:%d:%s] fixup failed\n",
+ encoder->base.id, encoder->name);
return -EINVAL;
}
}
@@ -483,8 +491,8 @@ mode_fixup(struct drm_atomic_state *state)
ret = funcs->mode_fixup(crtc, &new_crtc_state->mode,
&new_crtc_state->adjusted_mode);
if (!ret) {
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] fixup failed\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(crtc->dev, "[CRTC:%d:%s] fixup failed\n",
+ crtc->base.id, crtc->name);
return -EINVAL;
}
}
@@ -502,8 +510,9 @@ static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
ret = drm_encoder_mode_valid(encoder, mode);
if (ret != MODE_OK) {
- DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] mode_valid() failed\n",
- encoder->base.id, encoder->name);
+ drm_dbg_atomic(encoder->dev,
+ "[ENCODER:%d:%s] mode_valid() failed\n",
+ encoder->base.id, encoder->name);
return ret;
}
@@ -511,14 +520,14 @@ static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
ret = drm_bridge_chain_mode_valid(bridge, &connector->display_info,
mode);
if (ret != MODE_OK) {
- DRM_DEBUG_ATOMIC("[BRIDGE] mode_valid() failed\n");
+ drm_dbg_atomic(encoder->dev, "[BRIDGE] mode_valid() failed\n");
return ret;
}
ret = drm_crtc_mode_valid(crtc, mode);
if (ret != MODE_OK) {
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode_valid() failed\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(encoder->dev, "[CRTC:%d:%s] mode_valid() failed\n",
+ crtc->base.id, crtc->name);
return ret;
}
@@ -619,14 +628,14 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(dev, "[CRTC:%d:%s] mode changed\n",
+ crtc->base.id, crtc->name);
new_crtc_state->mode_changed = true;
}
if (old_crtc_state->enable != new_crtc_state->enable) {
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enable changed\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(dev, "[CRTC:%d:%s] enable changed\n",
+ crtc->base.id, crtc->name);
/*
* For clarity this assignment is done here, but
@@ -641,14 +650,14 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
}
if (old_crtc_state->active != new_crtc_state->active) {
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(dev, "[CRTC:%d:%s] active changed\n",
+ crtc->base.id, crtc->name);
new_crtc_state->active_changed = true;
}
if (new_crtc_state->enable != has_connectors) {
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(dev, "[CRTC:%d:%s] enabled/connectors mismatch\n",
+ crtc->base.id, crtc->name);
return -EINVAL;
}
@@ -708,10 +717,11 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
continue;
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
- crtc->base.id, crtc->name,
- new_crtc_state->enable ? 'y' : 'n',
- new_crtc_state->active ? 'y' : 'n');
+ drm_dbg_atomic(dev,
+ "[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
+ crtc->base.id, crtc->name,
+ new_crtc_state->enable ? 'y' : 'n',
+ new_crtc_state->active ? 'y' : 'n');
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret != 0)
@@ -818,7 +828,8 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
}
if (!crtc_state->enable && !can_update_disabled) {
- DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
+ drm_dbg_kms(plane_state->plane->dev,
+ "Cannot update plane of a disabled CRTC.\n");
return -EINVAL;
}
@@ -828,7 +839,8 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
if (hscale < 0 || vscale < 0) {
- DRM_DEBUG_KMS("Invalid scaling of plane\n");
+ drm_dbg_kms(plane_state->plane->dev,
+ "Invalid scaling of plane\n");
drm_rect_debug_print("src: ", &plane_state->src, true);
drm_rect_debug_print("dst: ", &plane_state->dst, false);
return -ERANGE;
@@ -852,7 +864,8 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
return 0;
if (!can_position && !drm_rect_equals(dst, &clip)) {
- DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
+ drm_dbg_kms(plane_state->plane->dev,
+ "Plane must cover entire CRTC\n");
drm_rect_debug_print("dst: ", dst, false);
drm_rect_debug_print("clip: ", &clip, false);
return -EINVAL;
@@ -904,8 +917,9 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
ret = funcs->atomic_check(plane, state);
if (ret) {
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
- plane->base.id, plane->name);
+ drm_dbg_atomic(plane->dev,
+ "[PLANE:%d:%s] atomic driver check failed\n",
+ plane->base.id, plane->name);
return ret;
}
}
@@ -920,8 +934,9 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
ret = funcs->atomic_check(crtc, state);
if (ret) {
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(crtc->dev,
+ "[CRTC:%d:%s] atomic driver check failed\n",
+ crtc->base.id, crtc->name);
return ret;
}
}
@@ -1001,7 +1016,7 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
* it's in self refresh mode and needs to be fully disabled.
*/
return old_state->active ||
- (old_state->self_refresh_active && !new_state->enable) ||
+ (old_state->self_refresh_active && !new_state->active) ||
new_state->self_refresh_active;
}
@@ -1049,8 +1064,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs = encoder->helper_private;
- DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
- encoder->base.id, encoder->name);
+ drm_dbg_atomic(dev, "disabling [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
/*
* Each encoder has at most one connector (since we always steal
@@ -1087,8 +1102,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs = crtc->helper_private;
- DRM_DEBUG_ATOMIC("disabling [CRTC:%d:%s]\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(dev, "disabling [CRTC:%d:%s]\n",
+ crtc->base.id, crtc->name);
/* Right function depends upon target state. */
@@ -1229,8 +1244,8 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs = crtc->helper_private;
if (new_crtc_state->enable && funcs->mode_set_nofb) {
- DRM_DEBUG_ATOMIC("modeset on [CRTC:%d:%s]\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(dev, "modeset on [CRTC:%d:%s]\n",
+ crtc->base.id, crtc->name);
funcs->mode_set_nofb(crtc);
}
@@ -1254,8 +1269,8 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
if (!new_crtc_state->mode_changed)
continue;
- DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
- encoder->base.id, encoder->name);
+ drm_dbg_atomic(dev, "modeset on [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
/*
* Each encoder has at most one connector (since we always steal
@@ -1357,8 +1372,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
funcs = crtc->helper_private;
if (new_crtc_state->enable) {
- DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(dev, "enabling [CRTC:%d:%s]\n",
+ crtc->base.id, crtc->name);
if (funcs->atomic_enable)
funcs->atomic_enable(crtc, old_state);
else if (funcs->commit)
@@ -1381,8 +1396,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
encoder = new_conn_state->best_encoder;
funcs = encoder->helper_private;
- DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
- encoder->base.id, encoder->name);
+ drm_dbg_atomic(dev, "enabling [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
/*
* Each encoder has at most one connector (since we always steal
@@ -1551,8 +1566,8 @@ void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
if (ret == 0)
- DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
- crtc->base.id, crtc->name);
+ drm_err(dev, "[CRTC:%d:%s] flip_done timed out\n",
+ crtc->base.id, crtc->name);
}
if (old_state->fake_commit)
@@ -1739,8 +1754,9 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
*/
if (old_plane_state->commit &&
!try_wait_for_completion(&old_plane_state->commit->hw_done)) {
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] inflight previous commit preventing async commit\n",
- plane->base.id, plane->name);
+ drm_dbg_atomic(dev,
+ "[PLANE:%d:%s] inflight previous commit preventing async commit\n",
+ plane->base.id, plane->name);
return -EBUSY;
}
@@ -1962,8 +1978,9 @@ static int stall_checks(struct drm_crtc *crtc, bool nonblock)
*/
if (!completed && nonblock) {
spin_unlock(&crtc->commit_lock);
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] busy with a previous commit\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(crtc->dev,
+ "[CRTC:%d:%s] busy with a previous commit\n",
+ crtc->base.id, crtc->name);
return -EBUSY;
}
@@ -1985,8 +2002,8 @@ static int stall_checks(struct drm_crtc *crtc, bool nonblock)
ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done,
10*HZ);
if (ret == 0)
- DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n",
- crtc->base.id, crtc->name);
+ drm_err(crtc->dev, "[CRTC:%d:%s] cleanup_done timed out\n",
+ crtc->base.id, crtc->name);
drm_crtc_commit_put(stall_commit);
@@ -2150,8 +2167,9 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
*/
if (nonblock && old_conn_state->commit &&
!try_wait_for_completion(&old_conn_state->commit->flip_done)) {
- DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] busy with a previous commit\n",
- conn->base.id, conn->name);
+ drm_dbg_atomic(conn->dev,
+ "[CONNECTOR:%d:%s] busy with a previous commit\n",
+ conn->base.id, conn->name);
return -EBUSY;
}
@@ -2171,8 +2189,9 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
*/
if (nonblock && old_plane_state->commit &&
!try_wait_for_completion(&old_plane_state->commit->flip_done)) {
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] busy with a previous commit\n",
- plane->base.id, plane->name);
+ drm_dbg_atomic(plane->dev,
+ "[PLANE:%d:%s] busy with a previous commit\n",
+ plane->base.id, plane->name);
return -EBUSY;
}
@@ -2218,22 +2237,25 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
ret = drm_crtc_commit_wait(old_crtc_state->commit);
if (ret)
- DRM_ERROR("[CRTC:%d:%s] commit wait timed out\n",
- crtc->base.id, crtc->name);
+ drm_err(crtc->dev,
+ "[CRTC:%d:%s] commit wait timed out\n",
+ crtc->base.id, crtc->name);
}
for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
ret = drm_crtc_commit_wait(old_conn_state->commit);
if (ret)
- DRM_ERROR("[CONNECTOR:%d:%s] commit wait timed out\n",
- conn->base.id, conn->name);
+ drm_err(conn->dev,
+ "[CONNECTOR:%d:%s] commit wait timed out\n",
+ conn->base.id, conn->name);
}
for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
ret = drm_crtc_commit_wait(old_plane_state->commit);
if (ret)
- DRM_ERROR("[PLANE:%d:%s] commit wait timed out\n",
- plane->base.id, plane->name);
+ drm_err(plane->dev,
+ "[PLANE:%d:%s] commit wait timed out\n",
+ plane->base.id, plane->name);
}
}
EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
@@ -3120,7 +3142,9 @@ void drm_atomic_helper_shutdown(struct drm_device *dev)
ret = drm_atomic_helper_disable_all(dev, &ctx);
if (ret)
- DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
+ drm_err(dev,
+ "Disabling all crtc's during unload failed with %i\n",
+ ret);
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
}
@@ -3380,8 +3404,9 @@ static int page_flip_common(struct drm_atomic_state *state,
/* Make sure we don't accidentally do a full modeset. */
state->allow_modeset = false;
if (!crtc_state->active) {
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled, rejecting legacy flip\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(crtc->dev,
+ "[CRTC:%d:%s] disabled, rejecting legacy flip\n",
+ crtc->base.id, crtc->name);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 909f31833181..9781722519c3 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -773,7 +773,7 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
state->scaling_mode = val;
} else if (property == config->content_protection_property) {
if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
- DRM_DEBUG_KMS("only drivers can set CP Enabled\n");
+ drm_dbg_kms(dev, "only drivers can set CP Enabled\n");
return -EINVAL;
}
state->content_protection = val;
@@ -797,6 +797,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
fence_ptr);
} else if (property == connector->max_bpc_property) {
state->max_requested_bpc = val;
+ } else if (property == connector->privacy_screen_sw_state_property) {
+ state->privacy_screen_sw_state = val;
} else if (connector->funcs->atomic_set_property) {
return connector->funcs->atomic_set_property(connector,
state, property, val);
@@ -874,6 +876,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
*val = 0;
} else if (property == connector->max_bpc_property) {
*val = state->max_requested_bpc;
+ } else if (property == connector->privacy_screen_sw_state_property) {
+ *val = state->privacy_screen_sw_state;
} else if (connector->funcs->atomic_get_property) {
return connector->funcs->atomic_get_property(connector,
state, property, val);
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 60a6b21474b1..6e433d465f41 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -106,7 +106,7 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
auth->magic = file_priv->magic;
mutex_unlock(&dev->master_mutex);
- DRM_DEBUG("%u\n", auth->magic);
+ drm_dbg_core(dev, "%u\n", auth->magic);
return ret < 0 ? ret : 0;
}
@@ -117,7 +117,7 @@ int drm_authmagic(struct drm_device *dev, void *data,
struct drm_auth *auth = data;
struct drm_file *file;
- DRM_DEBUG("%u\n", auth->magic);
+ drm_dbg_core(dev, "%u\n", auth->magic);
mutex_lock(&dev->master_mutex);
file = idr_find(&file_priv->master->magic_map, auth->magic);
@@ -274,7 +274,9 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
}
if (file_priv->master->lessor != NULL) {
- DRM_DEBUG_LEASE("Attempt to set lessee %d as master\n", file_priv->master->lessee_id);
+ drm_dbg_lease(dev,
+ "Attempt to set lessee %d as master\n",
+ file_priv->master->lessee_id);
ret = -EINVAL;
goto out_unlock;
}
@@ -315,7 +317,9 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
}
if (file_priv->master->lessor != NULL) {
- DRM_DEBUG_LEASE("Attempt to drop lessee %d as master\n", file_priv->master->lessee_id);
+ drm_dbg_lease(dev,
+ "Attempt to drop lessee %d as master\n",
+ file_priv->master->lessee_id);
ret = -EINVAL;
goto out_unlock;
}
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 52e20c68813b..a50c82bc2b2f 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -28,6 +28,7 @@
#include <drm/drm_print.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
+#include <drm/drm_privacy_screen_consumer.h>
#include <drm/drm_sysfs.h>
#include <linux/uaccess.h>
@@ -462,6 +463,11 @@ void drm_connector_cleanup(struct drm_connector *connector)
DRM_CONNECTOR_REGISTERED))
drm_connector_unregister(connector);
+ if (connector->privacy_screen) {
+ drm_privacy_screen_put(connector->privacy_screen);
+ connector->privacy_screen = NULL;
+ }
+
if (connector->tile_group) {
drm_mode_put_tile_group(dev, connector->tile_group);
connector->tile_group = NULL;
@@ -541,7 +547,11 @@ int drm_connector_register(struct drm_connector *connector)
connector->registration_state = DRM_CONNECTOR_REGISTERED;
/* Let userspace know we have a new connector */
- drm_sysfs_hotplug_event(connector->dev);
+ drm_sysfs_connector_hotplug_event(connector);
+
+ if (connector->privacy_screen)
+ drm_privacy_screen_register_notifier(connector->privacy_screen,
+ &connector->privacy_screen_notifier);
mutex_lock(&connector_list_lock);
list_add_tail(&connector->global_connector_list_entry, &connector_list);
@@ -578,6 +588,11 @@ void drm_connector_unregister(struct drm_connector *connector)
list_del_init(&connector->global_connector_list_entry);
mutex_unlock(&connector_list_lock);
+ if (connector->privacy_screen)
+ drm_privacy_screen_unregister_notifier(
+ connector->privacy_screen,
+ &connector->privacy_screen_notifier);
+
if (connector->funcs->early_unregister)
connector->funcs->early_unregister(connector);
@@ -1271,6 +1286,46 @@ static const struct drm_prop_enum_list dp_colorspaces[] = {
* For DVI-I and TVout there is also a matching property "select subconnector"
* allowing to switch between signal types.
* DP subconnector corresponds to a downstream port.
+ *
+ * privacy-screen sw-state, privacy-screen hw-state:
+ * These 2 optional properties can be used to query the state of the
+ * electronic privacy screen that is available on some displays; and in
+ * some cases also control the state. If a driver implements these
+ * properties then both properties must be present.
+ *
+ * "privacy-screen hw-state" is read-only and reflects the actual state
+ * of the privacy-screen, possible values: "Enabled", "Disabled,
+ * "Enabled-locked", "Disabled-locked". The locked states indicate
+ * that the state cannot be changed through the DRM API. E.g. there
+ * might be devices where the firmware-setup options, or a hardware
+ * slider-switch, offer always on / off modes.
+ *
+ * "privacy-screen sw-state" can be set to change the privacy-screen state
+ * when not locked. In this case the driver must update the hw-state
+ * property to reflect the new state on completion of the commit of the
+ * sw-state property. Setting the sw-state property when the hw-state is
+ * locked must be interpreted by the driver as a request to change the
+ * state to the set state when the hw-state becomes unlocked. E.g. if
+ * "privacy-screen hw-state" is "Enabled-locked" and the sw-state
+ * gets set to "Disabled" followed by the user unlocking the state by
+ * changing the slider-switch position, then the driver must set the
+ * state to "Disabled" upon receiving the unlock event.
+ *
+ * In some cases the privacy-screen's actual state might change outside of
+ * control of the DRM code. E.g. there might be a firmware handled hotkey
+ * which toggles the actual state, or the actual state might be changed
+ * through another userspace API such as writing /proc/acpi/ibm/lcdshadow.
+ * In this case the driver must update both the hw-state and the sw-state
+ * to reflect the new value, overwriting any pending state requests in the
+ * sw-state. Any pending sw-state requests are thus discarded.
+ *
+ * Note that the ability for the state to change outside of control of
+ * the DRM master process means that userspace must not cache the value
+ * of the sw-state. Caching the sw-state value and including it in later
+ * atomic commits may lead to overriding a state change done through e.g.
+ * a firmware handled hotkey. Therefor userspace must not include the
+ * privacy-screen sw-state in an atomic commit unless it wants to change
+ * its value.
*/
int drm_connector_create_standard_properties(struct drm_device *dev)
@@ -2365,6 +2420,154 @@ int drm_connector_set_panel_orientation_with_quirk(
}
EXPORT_SYMBOL(drm_connector_set_panel_orientation_with_quirk);
+static const struct drm_prop_enum_list privacy_screen_enum[] = {
+ { PRIVACY_SCREEN_DISABLED, "Disabled" },
+ { PRIVACY_SCREEN_ENABLED, "Enabled" },
+ { PRIVACY_SCREEN_DISABLED_LOCKED, "Disabled-locked" },
+ { PRIVACY_SCREEN_ENABLED_LOCKED, "Enabled-locked" },
+};
+
+/**
+ * drm_connector_create_privacy_screen_properties - create the drm connecter's
+ * privacy-screen properties.
+ * @connector: connector for which to create the privacy-screen properties
+ *
+ * This function creates the "privacy-screen sw-state" and "privacy-screen
+ * hw-state" properties for the connector. They are not attached.
+ */
+void
+drm_connector_create_privacy_screen_properties(struct drm_connector *connector)
+{
+ if (connector->privacy_screen_sw_state_property)
+ return;
+
+ /* Note sw-state only supports the first 2 values of the enum */
+ connector->privacy_screen_sw_state_property =
+ drm_property_create_enum(connector->dev, DRM_MODE_PROP_ENUM,
+ "privacy-screen sw-state",
+ privacy_screen_enum, 2);
+
+ connector->privacy_screen_hw_state_property =
+ drm_property_create_enum(connector->dev,
+ DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ENUM,
+ "privacy-screen hw-state",
+ privacy_screen_enum,
+ ARRAY_SIZE(privacy_screen_enum));
+}
+EXPORT_SYMBOL(drm_connector_create_privacy_screen_properties);
+
+/**
+ * drm_connector_attach_privacy_screen_properties - attach the drm connecter's
+ * privacy-screen properties.
+ * @connector: connector on which to attach the privacy-screen properties
+ *
+ * This function attaches the "privacy-screen sw-state" and "privacy-screen
+ * hw-state" properties to the connector. The initial state of both is set
+ * to "Disabled".
+ */
+void
+drm_connector_attach_privacy_screen_properties(struct drm_connector *connector)
+{
+ if (!connector->privacy_screen_sw_state_property)
+ return;
+
+ drm_object_attach_property(&connector->base,
+ connector->privacy_screen_sw_state_property,
+ PRIVACY_SCREEN_DISABLED);
+
+ drm_object_attach_property(&connector->base,
+ connector->privacy_screen_hw_state_property,
+ PRIVACY_SCREEN_DISABLED);
+}
+EXPORT_SYMBOL(drm_connector_attach_privacy_screen_properties);
+
+static void drm_connector_update_privacy_screen_properties(
+ struct drm_connector *connector, bool set_sw_state)
+{
+ enum drm_privacy_screen_status sw_state, hw_state;
+
+ drm_privacy_screen_get_state(connector->privacy_screen,
+ &sw_state, &hw_state);
+
+ if (set_sw_state)
+ connector->state->privacy_screen_sw_state = sw_state;
+ drm_object_property_set_value(&connector->base,
+ connector->privacy_screen_hw_state_property, hw_state);
+}
+
+static int drm_connector_privacy_screen_notifier(
+ struct notifier_block *nb, unsigned long action, void *data)
+{
+ struct drm_connector *connector =
+ container_of(nb, struct drm_connector, privacy_screen_notifier);
+ struct drm_device *dev = connector->dev;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ drm_connector_update_privacy_screen_properties(connector, true);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ drm_sysfs_connector_status_event(connector,
+ connector->privacy_screen_sw_state_property);
+ drm_sysfs_connector_status_event(connector,
+ connector->privacy_screen_hw_state_property);
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * drm_connector_attach_privacy_screen_provider - attach a privacy-screen to
+ * the connector
+ * @connector: connector to attach the privacy-screen to
+ * @priv: drm_privacy_screen to attach
+ *
+ * Create and attach the standard privacy-screen properties and register
+ * a generic notifier for generating sysfs-connector-status-events
+ * on external changes to the privacy-screen status.
+ * This function takes ownership of the passed in drm_privacy_screen and will
+ * call drm_privacy_screen_put() on it when the connector is destroyed.
+ */
+void drm_connector_attach_privacy_screen_provider(
+ struct drm_connector *connector, struct drm_privacy_screen *priv)
+{
+ connector->privacy_screen = priv;
+ connector->privacy_screen_notifier.notifier_call =
+ drm_connector_privacy_screen_notifier;
+
+ drm_connector_create_privacy_screen_properties(connector);
+ drm_connector_update_privacy_screen_properties(connector, true);
+ drm_connector_attach_privacy_screen_properties(connector);
+}
+EXPORT_SYMBOL(drm_connector_attach_privacy_screen_provider);
+
+/**
+ * drm_connector_update_privacy_screen - update connector's privacy-screen sw-state
+ * @connector_state: connector-state to update the privacy-screen for
+ *
+ * This function calls drm_privacy_screen_set_sw_state() on the connector's
+ * privacy-screen.
+ *
+ * If the connector has no privacy-screen, then this is a no-op.
+ */
+void drm_connector_update_privacy_screen(const struct drm_connector_state *connector_state)
+{
+ struct drm_connector *connector = connector_state->connector;
+ int ret;
+
+ if (!connector->privacy_screen)
+ return;
+
+ ret = drm_privacy_screen_set_sw_state(connector->privacy_screen,
+ connector_state->privacy_screen_sw_state);
+ if (ret) {
+ drm_err(connector->dev, "Error updating privacy-screen sw_state\n");
+ return;
+ }
+
+ /* The hw_state property value may have changed, update it. */
+ drm_connector_update_privacy_screen_properties(connector, false);
+}
+EXPORT_SYMBOL(drm_connector_update_privacy_screen);
+
int drm_connector_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value)
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 4d0d1e8e51fa..23f9073bc473 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -154,38 +154,155 @@ u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZ
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_post_cursor);
-void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux,
- const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+static int __8b10b_clock_recovery_delay_us(const struct drm_dp_aux *aux, u8 rd_interval)
{
- unsigned long rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
- DP_TRAINING_AUX_RD_MASK;
-
if (rd_interval > 4)
- drm_dbg_kms(aux->drm_dev, "%s: AUX interval %lu, out of range (max 4)\n",
+ drm_dbg_kms(aux->drm_dev, "%s: invalid AUX interval 0x%02x (max 4)\n",
aux->name, rd_interval);
- if (rd_interval == 0 || dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
- rd_interval = 100;
- else
- rd_interval *= 4 * USEC_PER_MSEC;
+ if (rd_interval == 0)
+ return 100;
- usleep_range(rd_interval, rd_interval * 2);
+ return rd_interval * 4 * USEC_PER_MSEC;
}
-EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
-static void __drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
- unsigned long rd_interval)
+static int __8b10b_channel_eq_delay_us(const struct drm_dp_aux *aux, u8 rd_interval)
{
if (rd_interval > 4)
- drm_dbg_kms(aux->drm_dev, "%s: AUX interval %lu, out of range (max 4)\n",
+ drm_dbg_kms(aux->drm_dev, "%s: invalid AUX interval 0x%02x (max 4)\n",
aux->name, rd_interval);
if (rd_interval == 0)
- rd_interval = 400;
+ return 400;
+
+ return rd_interval * 4 * USEC_PER_MSEC;
+}
+
+static int __128b132b_channel_eq_delay_us(const struct drm_dp_aux *aux, u8 rd_interval)
+{
+ switch (rd_interval) {
+ default:
+ drm_dbg_kms(aux->drm_dev, "%s: invalid AUX interval 0x%02x\n",
+ aux->name, rd_interval);
+ fallthrough;
+ case DP_128B132B_TRAINING_AUX_RD_INTERVAL_400_US:
+ return 400;
+ case DP_128B132B_TRAINING_AUX_RD_INTERVAL_4_MS:
+ return 4000;
+ case DP_128B132B_TRAINING_AUX_RD_INTERVAL_8_MS:
+ return 8000;
+ case DP_128B132B_TRAINING_AUX_RD_INTERVAL_12_MS:
+ return 12000;
+ case DP_128B132B_TRAINING_AUX_RD_INTERVAL_16_MS:
+ return 16000;
+ case DP_128B132B_TRAINING_AUX_RD_INTERVAL_32_MS:
+ return 32000;
+ case DP_128B132B_TRAINING_AUX_RD_INTERVAL_64_MS:
+ return 64000;
+ }
+}
+
+/*
+ * The link training delays are different for:
+ *
+ * - Clock recovery vs. channel equalization
+ * - DPRX vs. LTTPR
+ * - 128b/132b vs. 8b/10b
+ * - DPCD rev 1.3 vs. later
+ *
+ * Get the correct delay in us, reading DPCD if necessary.
+ */
+static int __read_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ enum drm_dp_phy dp_phy, bool uhbr, bool cr)
+{
+ int (*parse)(const struct drm_dp_aux *aux, u8 rd_interval);
+ unsigned int offset;
+ u8 rd_interval, mask;
+
+ if (dp_phy == DP_PHY_DPRX) {
+ if (uhbr) {
+ if (cr)
+ return 100;
+
+ offset = DP_128B132B_TRAINING_AUX_RD_INTERVAL;
+ mask = DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK;
+ parse = __128b132b_channel_eq_delay_us;
+ } else {
+ if (cr && dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
+ return 100;
+
+ offset = DP_TRAINING_AUX_RD_INTERVAL;
+ mask = DP_TRAINING_AUX_RD_MASK;
+ if (cr)
+ parse = __8b10b_clock_recovery_delay_us;
+ else
+ parse = __8b10b_channel_eq_delay_us;
+ }
+ } else {
+ if (uhbr) {
+ offset = DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy);
+ mask = DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK;
+ parse = __128b132b_channel_eq_delay_us;
+ } else {
+ if (cr)
+ return 100;
+
+ offset = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy);
+ mask = DP_TRAINING_AUX_RD_MASK;
+ parse = __8b10b_channel_eq_delay_us;
+ }
+ }
+
+ if (offset < DP_RECEIVER_CAP_SIZE) {
+ rd_interval = dpcd[offset];
+ } else {
+ if (drm_dp_dpcd_readb(aux, offset, &rd_interval) != 1) {
+ drm_dbg_kms(aux->drm_dev, "%s: failed rd interval read\n",
+ aux->name);
+ /* arbitrary default delay */
+ return 400;
+ }
+ }
+
+ return parse(aux, rd_interval & mask);
+}
+
+int drm_dp_read_clock_recovery_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ enum drm_dp_phy dp_phy, bool uhbr)
+{
+ return __read_delay(aux, dpcd, dp_phy, uhbr, true);
+}
+EXPORT_SYMBOL(drm_dp_read_clock_recovery_delay);
+
+int drm_dp_read_channel_eq_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ enum drm_dp_phy dp_phy, bool uhbr)
+{
+ return __read_delay(aux, dpcd, dp_phy, uhbr, false);
+}
+EXPORT_SYMBOL(drm_dp_read_channel_eq_delay);
+
+void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ u8 rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_TRAINING_AUX_RD_MASK;
+ int delay_us;
+
+ if (dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
+ delay_us = 100;
else
- rd_interval *= 4 * USEC_PER_MSEC;
+ delay_us = __8b10b_clock_recovery_delay_us(aux, rd_interval);
- usleep_range(rd_interval, rd_interval * 2);
+ usleep_range(delay_us, delay_us * 2);
+}
+EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
+
+static void __drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
+ u8 rd_interval)
+{
+ int delay_us = __8b10b_channel_eq_delay_us(aux, rd_interval);
+
+ usleep_range(delay_us, delay_us * 2);
}
void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
@@ -3173,6 +3290,10 @@ int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_bac
int ret;
u8 buf[2] = { 0 };
+ /* The panel uses the PWM for controlling brightness levels */
+ if (!bl->aux_set)
+ return 0;
+
if (bl->lsb_reg_used) {
buf[0] = (level & 0xff00) >> 8;
buf[1] = (level & 0x00ff);
@@ -3199,7 +3320,7 @@ drm_edp_backlight_set_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
int ret;
u8 buf;
- /* The panel uses something other then DPCD for enabling its backlight */
+ /* This panel uses the EDP_BL_PWR GPIO for enablement */
if (!bl->aux_enable)
return 0;
@@ -3234,11 +3355,11 @@ drm_edp_backlight_set_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
* restoring any important backlight state such as the given backlight level, the brightness byte
* count, backlight frequency, etc.
*
- * Note that certain panels, while supporting brightness level controls over DPCD, may not support
- * having their backlights enabled via the standard %DP_EDP_DISPLAY_CONTROL_REGISTER. On such panels
- * &drm_edp_backlight_info.aux_enable will be set to %false, this function will skip the step of
- * programming the %DP_EDP_DISPLAY_CONTROL_REGISTER, and the driver must perform the required
- * implementation specific step for enabling the backlight after calling this function.
+ * Note that certain panels do not support being enabled or disabled via DPCD, but instead require
+ * that the driver handle enabling/disabling the panel through implementation-specific means using
+ * the EDP_BL_PWR GPIO. For such panels, &drm_edp_backlight_info.aux_enable will be set to %false,
+ * this function becomes a no-op, and the driver is expected to handle powering the panel on using
+ * the EDP_BL_PWR GPIO.
*
* Returns: %0 on success, negative error code on failure.
*/
@@ -3246,27 +3367,18 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
const u16 level)
{
int ret;
- u8 dpcd_buf, new_dpcd_buf;
+ u8 dpcd_buf;
- ret = drm_dp_dpcd_readb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf);
- if (ret != 1) {
- drm_dbg_kms(aux->drm_dev,
- "%s: Failed to read backlight mode: %d\n", aux->name, ret);
- return ret < 0 ? ret : -EIO;
- }
-
- new_dpcd_buf = dpcd_buf;
-
- if ((dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) != DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
- new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK;
- new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD;
+ if (bl->aux_set)
+ dpcd_buf = DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD;
+ else
+ dpcd_buf = DP_EDP_BACKLIGHT_CONTROL_MODE_PWM;
- if (bl->pwmgen_bit_count) {
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count);
- if (ret != 1)
- drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n",
- aux->name, ret);
- }
+ if (bl->pwmgen_bit_count) {
+ ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count);
+ if (ret != 1)
+ drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n",
+ aux->name, ret);
}
if (bl->pwm_freq_pre_divider) {
@@ -3276,16 +3388,14 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
"%s: Failed to write aux backlight frequency: %d\n",
aux->name, ret);
else
- new_dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE;
+ dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE;
}
- if (new_dpcd_buf != dpcd_buf) {
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf);
- if (ret != 1) {
- drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux backlight mode: %d\n",
- aux->name, ret);
- return ret < 0 ? ret : -EIO;
- }
+ ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf);
+ if (ret != 1) {
+ drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux backlight mode: %d\n",
+ aux->name, ret);
+ return ret < 0 ? ret : -EIO;
}
ret = drm_edp_backlight_set_level(aux, bl, level);
@@ -3304,12 +3414,13 @@ EXPORT_SYMBOL(drm_edp_backlight_enable);
* @aux: The DP AUX channel to use
* @bl: Backlight capability info from drm_edp_backlight_init()
*
- * This function handles disabling DPCD backlight controls on a panel over AUX. Note that some
- * panels have backlights that are enabled/disabled by other means, despite having their brightness
- * values controlled through DPCD. On such panels &drm_edp_backlight_info.aux_enable will be set to
- * %false, this function will become a no-op (and we will skip updating
- * %DP_EDP_DISPLAY_CONTROL_REGISTER), and the driver must take care to perform it's own
- * implementation specific step for disabling the backlight.
+ * This function handles disabling DPCD backlight controls on a panel over AUX.
+ *
+ * Note that certain panels do not support being enabled or disabled via DPCD, but instead require
+ * that the driver handle enabling/disabling the panel through implementation-specific means using
+ * the EDP_BL_PWR GPIO. For such panels, &drm_edp_backlight_info.aux_enable will be set to %false,
+ * this function becomes a no-op, and the driver is expected to handle powering the panel off using
+ * the EDP_BL_PWR GPIO.
*
* Returns: %0 on success or no-op, negative error code on failure.
*/
@@ -3333,6 +3444,9 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
int ret;
u8 pn, pn_min, pn_max;
+ if (!bl->aux_set)
+ return 0;
+
ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn);
if (ret != 1) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap: %d\n",
@@ -3418,7 +3532,7 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
}
static inline int
-drm_edp_backlight_probe_level(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl,
+drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl,
u8 *current_mode)
{
int ret;
@@ -3433,6 +3547,9 @@ drm_edp_backlight_probe_level(struct drm_dp_aux *aux, struct drm_edp_backlight_i
}
*current_mode = (mode_reg & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK);
+ if (!bl->aux_set)
+ return 0;
+
if (*current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
int size = 1 + bl->lsb_reg_used;
@@ -3463,7 +3580,7 @@ drm_edp_backlight_probe_level(struct drm_dp_aux *aux, struct drm_edp_backlight_i
* @bl: The &drm_edp_backlight_info struct to fill out with information on the backlight
* @driver_pwm_freq_hz: Optional PWM frequency from the driver in hz
* @edp_dpcd: A cached copy of the eDP DPCD
- * @current_level: Where to store the probed brightness level
+ * @current_level: Where to store the probed brightness level, if any
* @current_mode: Where to store the currently set backlight control mode
*
* Initializes a &drm_edp_backlight_info struct by probing @aux for it's backlight capabilities,
@@ -3483,24 +3600,38 @@ drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl
if (edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP)
bl->aux_enable = true;
+ if (edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP)
+ bl->aux_set = true;
if (edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
bl->lsb_reg_used = true;
+ /* Sanity check caps */
+ if (!bl->aux_set && !(edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) {
+ drm_dbg_kms(aux->drm_dev,
+ "%s: Panel supports neither AUX or PWM brightness control? Aborting\n",
+ aux->name);
+ return -EINVAL;
+ }
+
ret = drm_edp_backlight_probe_max(aux, bl, driver_pwm_freq_hz, edp_dpcd);
if (ret < 0)
return ret;
- ret = drm_edp_backlight_probe_level(aux, bl, current_mode);
+ ret = drm_edp_backlight_probe_state(aux, bl, current_mode);
if (ret < 0)
return ret;
*current_level = ret;
drm_dbg_kms(aux->drm_dev,
- "%s: Found backlight level=%d/%d pwm_freq_pre_divider=%d mode=%x\n",
- aux->name, *current_level, bl->max, bl->pwm_freq_pre_divider, *current_mode);
- drm_dbg_kms(aux->drm_dev,
- "%s: Backlight caps: pwmgen_bit_count=%d lsb_reg_used=%d aux_enable=%d\n",
- aux->name, bl->pwmgen_bit_count, bl->lsb_reg_used, bl->aux_enable);
+ "%s: Found backlight: aux_set=%d aux_enable=%d mode=%d\n",
+ aux->name, bl->aux_set, bl->aux_enable, *current_mode);
+ if (bl->aux_set) {
+ drm_dbg_kms(aux->drm_dev,
+ "%s: Backlight caps: level=%d/%d pwm_freq_pre_divider=%d lsb_reg_used=%d\n",
+ aux->name, *current_level, bl->max, bl->pwm_freq_pre_divider,
+ bl->lsb_reg_used);
+ }
+
return 0;
}
EXPORT_SYMBOL(drm_edp_backlight_init);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index f3d79eda94bb..8b3822142fed 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -5511,6 +5511,7 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mutex_init(&mgr->probe_lock);
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
mutex_init(&mgr->topology_ref_history_lock);
+ stack_depot_init();
#endif
INIT_LIST_HEAD(&mgr->tx_msg_downq);
INIT_LIST_HEAD(&mgr->destroy_port_list);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 7a5097467ba5..8214a0b1ab7f 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -43,6 +43,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_print.h>
+#include <drm/drm_privacy_screen_machine.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -581,6 +582,7 @@ static int drm_dev_init(struct drm_device *dev,
const struct drm_driver *driver,
struct device *parent)
{
+ struct inode *inode;
int ret;
if (!drm_core_init_complete) {
@@ -617,13 +619,15 @@ static int drm_dev_init(struct drm_device *dev,
if (ret)
return ret;
- dev->anon_inode = drm_fs_inode_new();
- if (IS_ERR(dev->anon_inode)) {
- ret = PTR_ERR(dev->anon_inode);
+ inode = drm_fs_inode_new();
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
goto err;
}
+ dev->anon_inode = inode;
+
if (drm_core_check_feature(dev, DRIVER_RENDER)) {
ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
if (ret)
@@ -1029,6 +1033,7 @@ static const struct file_operations drm_stub_fops = {
static void drm_core_exit(void)
{
+ drm_privacy_screen_lookup_exit();
unregister_chrdev(DRM_MAJOR, "drm");
debugfs_remove(drm_debugfs_root);
drm_sysfs_destroy();
@@ -1056,6 +1061,8 @@ static int __init drm_core_init(void)
if (ret < 0)
goto error;
+ drm_privacy_screen_lookup_init();
+
drm_core_init_complete = true;
DRM_DEBUG("Initialized\n");
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 22bf690910b2..ed43b987d306 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -2344,7 +2344,7 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
return PTR_ERR(fbi);
fbi->fbops = &drm_fbdev_fb_ops;
- fbi->screen_size = fb->height * fb->pitches[0];
+ fbi->screen_size = sizes->surface_height * fb->pitches[0];
fbi->fix.smem_len = fbi->screen_size;
drm_fb_helper_fill_info(fbi, fb_helper, sizes);
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 69fde60e36b3..0f28dd2bdd72 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -17,71 +17,91 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_rect.h>
-static unsigned int clip_offset(struct drm_rect *clip,
- unsigned int pitch, unsigned int cpp)
+static unsigned int clip_offset(const struct drm_rect *clip, unsigned int pitch, unsigned int cpp)
{
return clip->y1 * pitch + clip->x1 * cpp;
}
/**
+ * drm_fb_clip_offset - Returns the clipping rectangles byte-offset in a framebuffer
+ * @pitch: Framebuffer line pitch in byte
+ * @format: Framebuffer format
+ * @clip: Clip rectangle
+ *
+ * Returns:
+ * The byte offset of the clip rectangle's top-left corner within the framebuffer.
+ */
+unsigned int drm_fb_clip_offset(unsigned int pitch, const struct drm_format_info *format,
+ const struct drm_rect *clip)
+{
+ return clip_offset(clip, pitch, format->cpp[0]);
+}
+EXPORT_SYMBOL(drm_fb_clip_offset);
+
+/**
* drm_fb_memcpy - Copy clip buffer
* @dst: Destination buffer
+ * @dst_pitch: Number of bytes between two consecutive scanlines within dst
* @vaddr: Source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
* This function does not apply clipping on dst, i.e. the destination
- * is a small buffer containing the clip rect only.
+ * is at the top-left corner.
*/
-void drm_fb_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip)
+void drm_fb_memcpy(void *dst, unsigned int dst_pitch, const void *vaddr,
+ const struct drm_framebuffer *fb, const struct drm_rect *clip)
{
unsigned int cpp = fb->format->cpp[0];
size_t len = (clip->x2 - clip->x1) * cpp;
unsigned int y, lines = clip->y2 - clip->y1;
+ if (!dst_pitch)
+ dst_pitch = len;
+
vaddr += clip_offset(clip, fb->pitches[0], cpp);
for (y = 0; y < lines; y++) {
memcpy(dst, vaddr, len);
vaddr += fb->pitches[0];
- dst += len;
+ dst += dst_pitch;
}
}
EXPORT_SYMBOL(drm_fb_memcpy);
/**
- * drm_fb_memcpy_dstclip - Copy clip buffer
+ * drm_fb_memcpy_toio - Copy clip buffer
* @dst: Destination buffer (iomem)
* @dst_pitch: Number of bytes between two consecutive scanlines within dst
* @vaddr: Source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
- * This function applies clipping on dst, i.e. the destination is a
- * full (iomem) framebuffer but only the clip rect content is copied over.
+ * This function does not apply clipping on dst, i.e. the destination
+ * is at the top-left corner.
*/
-void drm_fb_memcpy_dstclip(void __iomem *dst, unsigned int dst_pitch,
- void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip)
+void drm_fb_memcpy_toio(void __iomem *dst, unsigned int dst_pitch, const void *vaddr,
+ const struct drm_framebuffer *fb, const struct drm_rect *clip)
{
unsigned int cpp = fb->format->cpp[0];
- unsigned int offset = clip_offset(clip, dst_pitch, cpp);
size_t len = (clip->x2 - clip->x1) * cpp;
unsigned int y, lines = clip->y2 - clip->y1;
- vaddr += offset;
- dst += offset;
+ if (!dst_pitch)
+ dst_pitch = len;
+
+ vaddr += clip_offset(clip, fb->pitches[0], cpp);
for (y = 0; y < lines; y++) {
memcpy_toio(dst, vaddr, len);
vaddr += fb->pitches[0];
dst += dst_pitch;
}
}
-EXPORT_SYMBOL(drm_fb_memcpy_dstclip);
+EXPORT_SYMBOL(drm_fb_memcpy_toio);
/**
* drm_fb_swab - Swap bytes into clip buffer
* @dst: Destination buffer
+ * @dst_pitch: Number of bytes between two consecutive scanlines within dst
* @src: Source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
@@ -91,21 +111,27 @@ EXPORT_SYMBOL(drm_fb_memcpy_dstclip);
* time to speed up slow uncached reads.
*
* This function does not apply clipping on dst, i.e. the destination
- * is a small buffer containing the clip rect only.
+ * is at the top-left corner.
*/
-void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb,
- struct drm_rect *clip, bool cached)
+void drm_fb_swab(void *dst, unsigned int dst_pitch, const void *src,
+ const struct drm_framebuffer *fb, const struct drm_rect *clip,
+ bool cached)
{
u8 cpp = fb->format->cpp[0];
size_t len = drm_rect_width(clip) * cpp;
- u16 *src16, *dst16 = dst;
- u32 *src32, *dst32 = dst;
+ const u16 *src16;
+ const u32 *src32;
+ u16 *dst16;
+ u32 *dst32;
unsigned int x, y;
void *buf = NULL;
if (WARN_ON_ONCE(cpp != 2 && cpp != 4))
return;
+ if (!dst_pitch)
+ dst_pitch = len;
+
if (!cached)
buf = kmalloc(len, GFP_KERNEL);
@@ -121,6 +147,9 @@ void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb,
src32 = src;
}
+ dst16 = dst;
+ dst32 = dst;
+
for (x = clip->x1; x < clip->x2; x++) {
if (cpp == 4)
*dst32++ = swab32(*src32++);
@@ -129,13 +158,14 @@ void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb,
}
src += fb->pitches[0];
+ dst += dst_pitch;
}
kfree(buf);
}
EXPORT_SYMBOL(drm_fb_swab);
-static void drm_fb_xrgb8888_to_rgb332_line(u8 *dbuf, __le32 *sbuf, unsigned int pixels)
+static void drm_fb_xrgb8888_to_rgb332_line(u8 *dbuf, const __le32 *sbuf, unsigned int pixels)
{
unsigned int x;
u32 pix;
@@ -151,23 +181,24 @@ static void drm_fb_xrgb8888_to_rgb332_line(u8 *dbuf, __le32 *sbuf, unsigned int
/**
* drm_fb_xrgb8888_to_rgb332 - Convert XRGB8888 to RGB332 clip buffer
* @dst: RGB332 destination buffer
+ * @dst_pitch: Number of bytes between two consecutive scanlines within dst
* @src: XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
* Drivers can use this function for RGB332 devices that don't natively support XRGB8888.
- *
- * This function does not apply clipping on dst, i.e. the destination is a small buffer
- * containing the clip rect only.
*/
-void drm_fb_xrgb8888_to_rgb332(void *dst, void *src, struct drm_framebuffer *fb,
- struct drm_rect *clip)
+void drm_fb_xrgb8888_to_rgb332(void *dst, unsigned int dst_pitch, const void *src,
+ const struct drm_framebuffer *fb, const struct drm_rect *clip)
{
size_t width = drm_rect_width(clip);
size_t src_len = width * sizeof(u32);
unsigned int y;
void *sbuf;
+ if (!dst_pitch)
+ dst_pitch = width;
+
/* Use a buffer to speed up access on buffers with uncached read mapping (i.e. WC) */
sbuf = kmalloc(src_len, GFP_KERNEL);
if (!sbuf)
@@ -178,14 +209,14 @@ void drm_fb_xrgb8888_to_rgb332(void *dst, void *src, struct drm_framebuffer *fb,
memcpy(sbuf, src, src_len);
drm_fb_xrgb8888_to_rgb332_line(dst, sbuf, width);
src += fb->pitches[0];
- dst += width;
+ dst += dst_pitch;
}
kfree(sbuf);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332);
-static void drm_fb_xrgb8888_to_rgb565_line(u16 *dbuf, u32 *sbuf,
+static void drm_fb_xrgb8888_to_rgb565_line(u16 *dbuf, const u32 *sbuf,
unsigned int pixels,
bool swab)
{
@@ -206,6 +237,7 @@ static void drm_fb_xrgb8888_to_rgb565_line(u16 *dbuf, u32 *sbuf,
/**
* drm_fb_xrgb8888_to_rgb565 - Convert XRGB8888 to RGB565 clip buffer
* @dst: RGB565 destination buffer
+ * @dst_pitch: Number of bytes between two consecutive scanlines within dst
* @vaddr: XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
@@ -213,13 +245,10 @@ static void drm_fb_xrgb8888_to_rgb565_line(u16 *dbuf, u32 *sbuf,
*
* Drivers can use this function for RGB565 devices that don't natively
* support XRGB8888.
- *
- * This function does not apply clipping on dst, i.e. the destination
- * is a small buffer containing the clip rect only.
*/
-void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr,
- struct drm_framebuffer *fb,
- struct drm_rect *clip, bool swab)
+void drm_fb_xrgb8888_to_rgb565(void *dst, unsigned int dst_pitch, const void *vaddr,
+ const struct drm_framebuffer *fb, const struct drm_rect *clip,
+ bool swab)
{
size_t linepixels = clip->x2 - clip->x1;
size_t src_len = linepixels * sizeof(u32);
@@ -227,6 +256,9 @@ void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr,
unsigned y, lines = clip->y2 - clip->y1;
void *sbuf;
+ if (!dst_pitch)
+ dst_pitch = dst_len;
+
/*
* The cma memory is write-combined so reads are uncached.
* Speed up by fetching one line at a time.
@@ -240,7 +272,7 @@ void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr,
memcpy(sbuf, vaddr, src_len);
drm_fb_xrgb8888_to_rgb565_line(dst, sbuf, linepixels, swab);
vaddr += fb->pitches[0];
- dst += dst_len;
+ dst += dst_pitch;
}
kfree(sbuf);
@@ -248,9 +280,9 @@ void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr,
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
/**
- * drm_fb_xrgb8888_to_rgb565_dstclip - Convert XRGB8888 to RGB565 clip buffer
+ * drm_fb_xrgb8888_to_rgb565_toio - Convert XRGB8888 to RGB565 clip buffer
* @dst: RGB565 destination buffer (iomem)
- * @dst_pitch: destination buffer pitch
+ * @dst_pitch: Number of bytes between two consecutive scanlines within dst
* @vaddr: XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
@@ -258,37 +290,36 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
*
* Drivers can use this function for RGB565 devices that don't natively
* support XRGB8888.
- *
- * This function applies clipping on dst, i.e. the destination is a
- * full (iomem) framebuffer but only the clip rect content is copied over.
*/
-void drm_fb_xrgb8888_to_rgb565_dstclip(void __iomem *dst, unsigned int dst_pitch,
- void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip, bool swab)
+void drm_fb_xrgb8888_to_rgb565_toio(void __iomem *dst, unsigned int dst_pitch,
+ const void *vaddr, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, bool swab)
{
size_t linepixels = clip->x2 - clip->x1;
size_t dst_len = linepixels * sizeof(u16);
unsigned y, lines = clip->y2 - clip->y1;
void *dbuf;
+ if (!dst_pitch)
+ dst_pitch = dst_len;
+
dbuf = kmalloc(dst_len, GFP_KERNEL);
if (!dbuf)
return;
vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32));
- dst += clip_offset(clip, dst_pitch, sizeof(u16));
for (y = 0; y < lines; y++) {
drm_fb_xrgb8888_to_rgb565_line(dbuf, vaddr, linepixels, swab);
memcpy_toio(dst, dbuf, dst_len);
vaddr += fb->pitches[0];
- dst += dst_len;
+ dst += dst_pitch;
}
kfree(dbuf);
}
-EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565_dstclip);
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565_toio);
-static void drm_fb_xrgb8888_to_rgb888_line(u8 *dbuf, u32 *sbuf,
+static void drm_fb_xrgb8888_to_rgb888_line(u8 *dbuf, const u32 *sbuf,
unsigned int pixels)
{
unsigned int x;
@@ -303,24 +334,25 @@ static void drm_fb_xrgb8888_to_rgb888_line(u8 *dbuf, u32 *sbuf,
/**
* drm_fb_xrgb8888_to_rgb888 - Convert XRGB8888 to RGB888 clip buffer
* @dst: RGB888 destination buffer
+ * @dst_pitch: Number of bytes between two consecutive scanlines within dst
* @src: XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
* Drivers can use this function for RGB888 devices that don't natively
* support XRGB8888.
- *
- * This function does not apply clipping on dst, i.e. the destination
- * is a small buffer containing the clip rect only.
*/
-void drm_fb_xrgb8888_to_rgb888(void *dst, void *src, struct drm_framebuffer *fb,
- struct drm_rect *clip)
+void drm_fb_xrgb8888_to_rgb888(void *dst, unsigned int dst_pitch, const void *src,
+ const struct drm_framebuffer *fb, const struct drm_rect *clip)
{
size_t width = drm_rect_width(clip);
size_t src_len = width * sizeof(u32);
unsigned int y;
void *sbuf;
+ if (!dst_pitch)
+ dst_pitch = width * 3;
+
/* Use a buffer to speed up access on buffers with uncached read mapping (i.e. WC) */
sbuf = kmalloc(src_len, GFP_KERNEL);
if (!sbuf)
@@ -331,7 +363,7 @@ void drm_fb_xrgb8888_to_rgb888(void *dst, void *src, struct drm_framebuffer *fb,
memcpy(sbuf, src, src_len);
drm_fb_xrgb8888_to_rgb888_line(dst, sbuf, width);
src += fb->pitches[0];
- dst += width * 3;
+ dst += dst_pitch;
}
kfree(sbuf);
@@ -339,48 +371,103 @@ void drm_fb_xrgb8888_to_rgb888(void *dst, void *src, struct drm_framebuffer *fb,
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
/**
- * drm_fb_xrgb8888_to_rgb888_dstclip - Convert XRGB8888 to RGB888 clip buffer
+ * drm_fb_xrgb8888_to_rgb888_toio - Convert XRGB8888 to RGB888 clip buffer
* @dst: RGB565 destination buffer (iomem)
- * @dst_pitch: destination buffer pitch
+ * @dst_pitch: Number of bytes between two consecutive scanlines within dst
* @vaddr: XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
* Drivers can use this function for RGB888 devices that don't natively
* support XRGB8888.
- *
- * This function applies clipping on dst, i.e. the destination is a
- * full (iomem) framebuffer but only the clip rect content is copied over.
*/
-void drm_fb_xrgb8888_to_rgb888_dstclip(void __iomem *dst, unsigned int dst_pitch,
- void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip)
+void drm_fb_xrgb8888_to_rgb888_toio(void __iomem *dst, unsigned int dst_pitch,
+ const void *vaddr, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
size_t linepixels = clip->x2 - clip->x1;
size_t dst_len = linepixels * 3;
unsigned y, lines = clip->y2 - clip->y1;
void *dbuf;
+ if (!dst_pitch)
+ dst_pitch = dst_len;
+
dbuf = kmalloc(dst_len, GFP_KERNEL);
if (!dbuf)
return;
vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32));
- dst += clip_offset(clip, dst_pitch, sizeof(u16));
for (y = 0; y < lines; y++) {
drm_fb_xrgb8888_to_rgb888_line(dbuf, vaddr, linepixels);
memcpy_toio(dst, dbuf, dst_len);
vaddr += fb->pitches[0];
- dst += dst_len;
+ dst += dst_pitch;
}
kfree(dbuf);
}
-EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888_dstclip);
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888_toio);
+
+static void drm_fb_xrgb8888_to_xrgb2101010_line(u32 *dbuf, const u32 *sbuf,
+ unsigned int pixels)
+{
+ unsigned int x;
+ u32 val32;
+
+ for (x = 0; x < pixels; x++) {
+ val32 = ((sbuf[x] & 0x000000FF) << 2) |
+ ((sbuf[x] & 0x0000FF00) << 4) |
+ ((sbuf[x] & 0x00FF0000) << 6);
+ *dbuf++ = val32 | ((val32 >> 8) & 0x00300C03);
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_xrgb2101010_toio - Convert XRGB8888 to XRGB2101010 clip
+ * buffer
+ * @dst: XRGB2101010 destination buffer (iomem)
+ * @dst_pitch: Number of bytes between two consecutive scanlines within dst
+ * @vaddr: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * Drivers can use this function for XRGB2101010 devices that don't natively
+ * support XRGB8888.
+ */
+void drm_fb_xrgb8888_to_xrgb2101010_toio(void __iomem *dst,
+ unsigned int dst_pitch, const void *vaddr,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
+{
+ size_t linepixels = clip->x2 - clip->x1;
+ size_t dst_len = linepixels * sizeof(u32);
+ unsigned int y, lines = clip->y2 - clip->y1;
+ void *dbuf;
+
+ if (!dst_pitch)
+ dst_pitch = dst_len;
+
+ dbuf = kmalloc(dst_len, GFP_KERNEL);
+ if (!dbuf)
+ return;
+
+ vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32));
+ for (y = 0; y < lines; y++) {
+ drm_fb_xrgb8888_to_xrgb2101010_line(dbuf, vaddr, linepixels);
+ memcpy_toio(dst, dbuf, dst_len);
+ vaddr += fb->pitches[0];
+ dst += dst_pitch;
+ }
+
+ kfree(dbuf);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010_toio);
/**
* drm_fb_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale
* @dst: 8-bit grayscale destination buffer
+ * @dst_pitch: Number of bytes between two consecutive scanlines within dst
* @vaddr: XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
@@ -394,16 +481,21 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888_dstclip);
*
* ITU BT.601 is used for the RGB -> luma (brightness) conversion.
*/
-void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip)
+void drm_fb_xrgb8888_to_gray8(void *dst, unsigned int dst_pitch, const void *vaddr,
+ const struct drm_framebuffer *fb, const struct drm_rect *clip)
{
unsigned int len = (clip->x2 - clip->x1) * sizeof(u32);
unsigned int x, y;
void *buf;
- u32 *src;
+ u8 *dst8;
+ u32 *src32;
if (WARN_ON(fb->format->format != DRM_FORMAT_XRGB8888))
return;
+
+ if (!dst_pitch)
+ dst_pitch = drm_rect_width(clip);
+
/*
* The cma memory is write-combined so reads are uncached.
* Speed up by fetching one line at a time.
@@ -412,20 +504,22 @@ void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
if (!buf)
return;
+ vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32));
for (y = clip->y1; y < clip->y2; y++) {
- src = vaddr + (y * fb->pitches[0]);
- src += clip->x1;
- memcpy(buf, src, len);
- src = buf;
+ dst8 = dst;
+ src32 = memcpy(buf, vaddr, len);
for (x = clip->x1; x < clip->x2; x++) {
- u8 r = (*src & 0x00ff0000) >> 16;
- u8 g = (*src & 0x0000ff00) >> 8;
- u8 b = *src & 0x000000ff;
+ u8 r = (*src32 & 0x00ff0000) >> 16;
+ u8 g = (*src32 & 0x0000ff00) >> 8;
+ u8 b = *src32 & 0x000000ff;
/* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
- *dst++ = (3 * r + 6 * g + b) / 10;
- src++;
+ *dst8++ = (3 * r + 6 * g + b) / 10;
+ src32++;
}
+
+ vaddr += fb->pitches[0];
+ dst += dst_pitch;
}
kfree(buf);
@@ -433,7 +527,7 @@ void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
/**
- * drm_fb_blit_rect_dstclip - Copy parts of a framebuffer to display memory
+ * drm_fb_blit_toio - Copy parts of a framebuffer to display memory
* @dst: The display memory to copy to
* @dst_pitch: Number of bytes between two consecutive scanlines within dst
* @dst_format: FOURCC code of the display's color format
@@ -445,17 +539,14 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
* formats of the display and the framebuffer mismatch, the blit function
* will attempt to convert between them.
*
- * Use drm_fb_blit_dstclip() to copy the full framebuffer.
- *
* Returns:
* 0 on success, or
* -EINVAL if the color-format conversion failed, or
* a negative error code otherwise.
*/
-int drm_fb_blit_rect_dstclip(void __iomem *dst, unsigned int dst_pitch,
- uint32_t dst_format, void *vmap,
- struct drm_framebuffer *fb,
- struct drm_rect *clip)
+int drm_fb_blit_toio(void __iomem *dst, unsigned int dst_pitch, uint32_t dst_format,
+ const void *vmap, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
uint32_t fb_format = fb->format->format;
@@ -464,58 +555,32 @@ int drm_fb_blit_rect_dstclip(void __iomem *dst, unsigned int dst_pitch,
fb_format = DRM_FORMAT_XRGB8888;
if (dst_format == DRM_FORMAT_ARGB8888)
dst_format = DRM_FORMAT_XRGB8888;
+ if (fb_format == DRM_FORMAT_ARGB2101010)
+ fb_format = DRM_FORMAT_XRGB2101010;
+ if (dst_format == DRM_FORMAT_ARGB2101010)
+ dst_format = DRM_FORMAT_XRGB2101010;
if (dst_format == fb_format) {
- drm_fb_memcpy_dstclip(dst, dst_pitch, vmap, fb, clip);
+ drm_fb_memcpy_toio(dst, dst_pitch, vmap, fb, clip);
return 0;
} else if (dst_format == DRM_FORMAT_RGB565) {
if (fb_format == DRM_FORMAT_XRGB8888) {
- drm_fb_xrgb8888_to_rgb565_dstclip(dst, dst_pitch,
- vmap, fb, clip,
- false);
+ drm_fb_xrgb8888_to_rgb565_toio(dst, dst_pitch, vmap, fb, clip, false);
return 0;
}
} else if (dst_format == DRM_FORMAT_RGB888) {
if (fb_format == DRM_FORMAT_XRGB8888) {
- drm_fb_xrgb8888_to_rgb888_dstclip(dst, dst_pitch,
- vmap, fb, clip);
+ drm_fb_xrgb8888_to_rgb888_toio(dst, dst_pitch, vmap, fb, clip);
+ return 0;
+ }
+ } else if (dst_format == DRM_FORMAT_XRGB2101010) {
+ if (fb_format == DRM_FORMAT_XRGB8888) {
+ drm_fb_xrgb8888_to_xrgb2101010_toio(dst, dst_pitch, vmap, fb, clip);
return 0;
}
}
return -EINVAL;
}
-EXPORT_SYMBOL(drm_fb_blit_rect_dstclip);
-
-/**
- * drm_fb_blit_dstclip - Copy framebuffer to display memory
- * @dst: The display memory to copy to
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @dst_format: FOURCC code of the display's color format
- * @vmap: The framebuffer memory to copy from
- * @fb: The framebuffer to copy from
- *
- * This function copies a full framebuffer to display memory. If the formats
- * of the display and the framebuffer mismatch, the copy function will
- * attempt to convert between them.
- *
- * See drm_fb_blit_rect_dstclip() for more information.
- *
- * Returns:
- * 0 on success, or a negative error code otherwise.
- */
-int drm_fb_blit_dstclip(void __iomem *dst, unsigned int dst_pitch,
- uint32_t dst_format, void *vmap,
- struct drm_framebuffer *fb)
-{
- struct drm_rect fullscreen = {
- .x1 = 0,
- .x2 = fb->width,
- .y1 = 0,
- .y2 = fb->height,
- };
- return drm_fb_blit_rect_dstclip(dst, dst_pitch, dst_format, vmap, fb,
- &fullscreen);
-}
-EXPORT_SYMBOL(drm_fb_blit_dstclip);
+EXPORT_SYMBOL(drm_fb_blit_toio);
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 25837b1d6639..07741b678798 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -269,6 +269,9 @@ const struct drm_format_info *__drm_format_info(u32 format)
.num_planes = 3, .char_per_block = { 2, 2, 2 },
.block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 0,
.vsub = 0, .is_yuv = true },
+ { .format = DRM_FORMAT_P030, .depth = 0, .num_planes = 2,
+ .char_per_block = { 4, 8, 0 }, .block_w = { 3, 3, 0 }, .block_h = { 1, 1, 0 },
+ .hsub = 2, .vsub = 2, .is_yuv = true},
};
unsigned int i;
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index e570398abd78..c3189afe10cb 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -143,6 +143,7 @@
*/
int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
{
+ struct dma_resv_iter cursor;
struct drm_gem_object *obj;
struct dma_fence *fence;
@@ -150,9 +151,18 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st
return 0;
obj = drm_gem_fb_get_obj(state->fb, 0);
- fence = dma_resv_get_excl_unlocked(obj->resv);
- drm_atomic_set_fence_for_plane(state, fence);
+ dma_resv_iter_begin(&cursor, obj->resv, false);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ /* TODO: Currently there should be only one write fence, so this
+ * here works fine. But drm_atomic_set_fence_for_plane() should
+ * be changed to be able to handle more fences in general for
+ * multiple BOs per fb anyway. */
+ dma_fence_get(fence);
+ break;
+ }
+ dma_resv_iter_end(&cursor);
+ drm_atomic_set_fence_for_plane(state, fence);
return 0;
}
EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 9d05674550a4..cefd0cbf9deb 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -13,6 +13,7 @@
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -31,14 +32,18 @@
* The DRM GEM/CMA helpers use this allocator as a means to provide buffer
* objects that are physically contiguous in memory. This is useful for
* display drivers that are unable to map scattered buffers via an IOMMU.
+ *
+ * For GEM callback helpers in struct &drm_gem_object functions, see likewise
+ * named functions with an _object_ infix (e.g., drm_gem_cma_object_vmap() wraps
+ * drm_gem_cma_vmap()). These helpers perform the necessary type conversion.
*/
static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
- .free = drm_gem_cma_free_object,
- .print_info = drm_gem_cma_print_info,
- .get_sg_table = drm_gem_cma_get_sg_table,
- .vmap = drm_gem_cma_vmap,
- .mmap = drm_gem_cma_mmap,
+ .free = drm_gem_cma_object_free,
+ .print_info = drm_gem_cma_object_print_info,
+ .get_sg_table = drm_gem_cma_object_get_sg_table,
+ .vmap = drm_gem_cma_object_vmap,
+ .mmap = drm_gem_cma_object_mmap,
.vm_ops = &drm_gem_cma_vm_ops,
};
@@ -62,18 +67,21 @@ __drm_gem_cma_create(struct drm_device *drm, size_t size, bool private)
struct drm_gem_object *gem_obj;
int ret = 0;
- if (drm->driver->gem_create_object)
+ if (drm->driver->gem_create_object) {
gem_obj = drm->driver->gem_create_object(drm, size);
- else
- gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
- if (!gem_obj)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(gem_obj))
+ return ERR_CAST(gem_obj);
+ cma_obj = to_drm_gem_cma_obj(gem_obj);
+ } else {
+ cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
+ if (!cma_obj)
+ return ERR_PTR(-ENOMEM);
+ gem_obj = &cma_obj->base;
+ }
if (!gem_obj->funcs)
gem_obj->funcs = &drm_gem_cma_default_funcs;
- cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);
-
if (private) {
drm_gem_private_object_init(drm, gem_obj, size);
@@ -191,18 +199,16 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
}
/**
- * drm_gem_cma_free_object - free resources associated with a CMA GEM object
- * @gem_obj: GEM object to free
+ * drm_gem_cma_free - free resources associated with a CMA GEM object
+ * @cma_obj: CMA GEM object to free
*
* This function frees the backing memory of the CMA GEM object, cleans up the
* GEM object state and frees the memory used to store the object itself.
* If the buffer is imported and the virtual address is set, it is released.
- * Drivers using the CMA helpers should set this as their
- * &drm_gem_object_funcs.free callback.
*/
-void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
+void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj)
{
- struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem_obj);
+ struct drm_gem_object *gem_obj = &cma_obj->base;
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(cma_obj->vaddr);
if (gem_obj->import_attach) {
@@ -223,7 +229,7 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
kfree(cma_obj);
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
+EXPORT_SYMBOL_GPL(drm_gem_cma_free);
/**
* drm_gem_cma_dumb_create_internal - create a dumb buffer object
@@ -370,18 +376,15 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
/**
* drm_gem_cma_print_info() - Print &drm_gem_cma_object info for debugfs
+ * @cma_obj: CMA GEM object
* @p: DRM printer
* @indent: Tab indentation level
- * @obj: GEM object
*
- * This function can be used as the &drm_driver->gem_print_info callback.
- * It prints paddr and vaddr for use in e.g. debugfs output.
+ * This function prints paddr and vaddr for use in e.g. debugfs output.
*/
-void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
- const struct drm_gem_object *obj)
+void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj,
+ struct drm_printer *p, unsigned int indent)
{
- const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
-
drm_printf_indent(p, indent, "paddr=%pad\n", &cma_obj->paddr);
drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr);
}
@@ -390,18 +393,17 @@ EXPORT_SYMBOL(drm_gem_cma_print_info);
/**
* drm_gem_cma_get_sg_table - provide a scatter/gather table of pinned
* pages for a CMA GEM object
- * @obj: GEM object
+ * @cma_obj: CMA GEM object
*
- * This function exports a scatter/gather table by
- * calling the standard DMA mapping API. Drivers using the CMA helpers should
- * set this as their &drm_gem_object_funcs.get_sg_table callback.
+ * This function exports a scatter/gather table by calling the standard
+ * DMA mapping API.
*
* Returns:
* A pointer to the scatter/gather table of pinned pages or NULL on failure.
*/
-struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj)
+struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj)
{
- struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+ struct drm_gem_object *obj = &cma_obj->base;
struct sg_table *sgt;
int ret;
@@ -467,23 +469,19 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
/**
* drm_gem_cma_vmap - map a CMA GEM object into the kernel's virtual
* address space
- * @obj: GEM object
+ * @cma_obj: CMA GEM object
* @map: Returns the kernel virtual address of the CMA GEM object's backing
* store.
*
- * This function maps a buffer into the kernel's
- * virtual address space. Since the CMA buffers are already mapped into the
- * kernel virtual address space this simply returns the cached virtual
- * address. Drivers using the CMA helpers should set this as their DRM
- * driver's &drm_gem_object_funcs.vmap callback.
+ * This function maps a buffer into the kernel's virtual address space.
+ * Since the CMA buffers are already mapped into the kernel virtual address
+ * space this simply returns the cached virtual address.
*
* Returns:
* 0 on success, or a negative error code otherwise.
*/
-int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
+int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, struct dma_buf_map *map)
{
- struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
-
dma_buf_map_set_vaddr(map, cma_obj->vaddr);
return 0;
@@ -492,20 +490,19 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_vmap);
/**
* drm_gem_cma_mmap - memory-map an exported CMA GEM object
- * @obj: GEM object
+ * @cma_obj: CMA GEM object
* @vma: VMA for the area to be mapped
*
* This function maps a buffer into a userspace process's address space.
* In addition to the usual GEM VMA setup it immediately faults in the entire
- * object instead of using on-demand faulting. Drivers that use the CMA
- * helpers should set this as their &drm_gem_object_funcs.mmap callback.
+ * object instead of using on-demand faulting.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma)
{
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_object *obj = &cma_obj->base;
int ret;
/*
@@ -516,8 +513,6 @@ int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
vma->vm_flags &= ~VM_PFNMAP;
- cma_obj = to_drm_gem_cma_obj(obj);
-
if (cma_obj->map_noncoherent) {
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
@@ -583,3 +578,7 @@ drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
return obj;
}
EXPORT_SYMBOL(drm_gem_cma_prime_import_sg_table_vmap);
+
+MODULE_DESCRIPTION("DRM CMA memory-management helpers");
+MODULE_IMPORT_NS(DMA_BUF);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index bca0de92802e..621924116eb4 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -5,6 +5,7 @@
#include <linux/dma-buf.h>
#include <linux/export.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
@@ -29,17 +30,22 @@ MODULE_IMPORT_NS(DMA_BUF);
*
* This library provides helpers for GEM objects backed by shmem buffers
* allocated using anonymous pageable memory.
+ *
+ * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
+ * For GEM callback helpers in struct &drm_gem_object functions, see likewise
+ * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
+ * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
*/
static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
- .free = drm_gem_shmem_free_object,
- .print_info = drm_gem_shmem_print_info,
- .pin = drm_gem_shmem_pin,
- .unpin = drm_gem_shmem_unpin,
- .get_sg_table = drm_gem_shmem_get_sg_table,
- .vmap = drm_gem_shmem_vmap,
- .vunmap = drm_gem_shmem_vunmap,
- .mmap = drm_gem_shmem_mmap,
+ .free = drm_gem_shmem_object_free,
+ .print_info = drm_gem_shmem_object_print_info,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
+ .mmap = drm_gem_shmem_object_mmap,
};
static struct drm_gem_shmem_object *
@@ -51,14 +57,17 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
size = PAGE_ALIGN(size);
- if (dev->driver->gem_create_object)
+ if (dev->driver->gem_create_object) {
obj = dev->driver->gem_create_object(dev, size);
- else
- obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
- if (!obj)
- return ERR_PTR(-ENOMEM);
-
- shmem = to_drm_gem_shmem_obj(obj);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+ shmem = to_drm_gem_shmem_obj(obj);
+ } else {
+ shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
+ if (!shmem)
+ return ERR_PTR(-ENOMEM);
+ obj = &shmem->base;
+ }
if (!obj->funcs)
obj->funcs = &drm_gem_shmem_funcs;
@@ -119,16 +128,15 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
/**
- * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
- * @obj: GEM object to free
+ * drm_gem_shmem_free - Free resources associated with a shmem GEM object
+ * @shmem: shmem GEM object to free
*
* This function cleans up the GEM object state and frees the memory used to
- * store the object itself. It should be used to implement
- * &drm_gem_object_funcs.free.
+ * store the object itself.
*/
-void drm_gem_shmem_free_object(struct drm_gem_object *obj)
+void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
{
- struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ struct drm_gem_object *obj = &shmem->base;
WARN_ON(shmem->vmap_use_count);
@@ -152,7 +160,7 @@ void drm_gem_shmem_free_object(struct drm_gem_object *obj)
mutex_destroy(&shmem->vmap_lock);
kfree(shmem);
}
-EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
{
@@ -247,19 +255,16 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages);
/**
* drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
- * @obj: GEM object
+ * @shmem: shmem GEM object
*
* This function makes sure the backing pages are pinned in memory while the
- * buffer is exported. It should only be used to implement
- * &drm_gem_object_funcs.pin.
+ * buffer is exported.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_gem_shmem_pin(struct drm_gem_object *obj)
+int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
{
- struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
-
WARN_ON(shmem->base.import_attach);
return drm_gem_shmem_get_pages(shmem);
@@ -268,15 +273,13 @@ EXPORT_SYMBOL(drm_gem_shmem_pin);
/**
* drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
- * @obj: GEM object
+ * @shmem: shmem GEM object
*
* This function removes the requirement that the backing pages are pinned in
- * memory. It should only be used to implement &drm_gem_object_funcs.unpin.
+ * memory.
*/
-void drm_gem_shmem_unpin(struct drm_gem_object *obj)
+void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
{
- struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
-
WARN_ON(shmem->base.import_attach);
drm_gem_shmem_put_pages(shmem);
@@ -342,20 +345,16 @@ err_zero_use:
* store.
*
* This function makes sure that a contiguous kernel virtual address mapping
- * exists for the buffer backing the shmem GEM object.
- *
- * This function can be used to implement &drm_gem_object_funcs.vmap. But it can
- * also be called by drivers directly, in which case it will hide the
- * differences between dma-buf imported and natively allocated objects.
+ * exists for the buffer backing the shmem GEM object. It hides the differences
+ * between dma-buf imported and natively allocated objects.
*
* Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
*
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_gem_shmem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
+int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
{
- struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
int ret;
ret = mutex_lock_interruptible(&shmem->vmap_lock);
@@ -398,21 +397,18 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
* drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
* zero.
*
- * This function can be used to implement &drm_gem_object_funcs.vmap. But it can
- * also be called by drivers directly, in which case it will hide the
- * differences between dma-buf imported and natively allocated objects.
+ * This function hides the differences between dma-buf imported and natively
+ * allocated objects.
*/
-void drm_gem_shmem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
+void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
{
- struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
-
mutex_lock(&shmem->vmap_lock);
drm_gem_shmem_vunmap_locked(shmem, map);
mutex_unlock(&shmem->vmap_lock);
}
EXPORT_SYMBOL(drm_gem_shmem_vunmap);
-struct drm_gem_shmem_object *
+static struct drm_gem_shmem_object *
drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
struct drm_device *dev, size_t size,
uint32_t *handle)
@@ -436,15 +432,12 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
return shmem;
}
-EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
/* Update madvise status, returns true if not purged, else
* false or -errno.
*/
-int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv)
+int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
{
- struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
-
mutex_lock(&shmem->pages_lock);
if (shmem->madv >= 0)
@@ -458,14 +451,14 @@ int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv)
}
EXPORT_SYMBOL(drm_gem_shmem_madvise);
-void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
+void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
{
+ struct drm_gem_object *obj = &shmem->base;
struct drm_device *dev = obj->dev;
- struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
- dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
+ dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
sg_free_table(shmem->sgt);
kfree(shmem->sgt);
shmem->sgt = NULL;
@@ -484,18 +477,15 @@ void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
*/
shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
- invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
- 0, (loff_t)-1);
+ invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
}
EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
-bool drm_gem_shmem_purge(struct drm_gem_object *obj)
+bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
{
- struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
-
if (!mutex_trylock(&shmem->pages_lock))
return false;
- drm_gem_shmem_purge_locked(obj);
+ drm_gem_shmem_purge_locked(shmem);
mutex_unlock(&shmem->pages_lock);
return true;
@@ -603,19 +593,18 @@ static const struct vm_operations_struct drm_gem_shmem_vm_ops = {
/**
* drm_gem_shmem_mmap - Memory-map a shmem GEM object
- * @obj: gem object
+ * @shmem: shmem GEM object
* @vma: VMA for the area to be mapped
*
* This function implements an augmented version of the GEM DRM file mmap
- * operation for shmem objects. Drivers which employ the shmem helpers should
- * use this function as their &drm_gem_object_funcs.mmap handler.
+ * operation for shmem objects.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
{
- struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj = &shmem->base;
int ret;
if (obj->import_attach) {
@@ -626,8 +615,6 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
return dma_buf_mmap(obj->dma_buf, vma, 0);
}
- shmem = to_drm_gem_shmem_obj(obj);
-
ret = drm_gem_shmem_get_pages(shmem);
if (ret) {
drm_gem_vm_close(vma);
@@ -646,17 +633,13 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
/**
* drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
+ * @shmem: shmem GEM object
* @p: DRM printer
* @indent: Tab indentation level
- * @obj: GEM object
- *
- * This implements the &drm_gem_object_funcs.info callback.
*/
-void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
- const struct drm_gem_object *obj)
+void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
+ struct drm_printer *p, unsigned int indent)
{
- const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
-
drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
@@ -666,12 +649,10 @@ EXPORT_SYMBOL(drm_gem_shmem_print_info);
/**
* drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
* pages for a shmem GEM object
- * @obj: GEM object
+ * @shmem: shmem GEM object
*
* This function exports a scatter/gather table suitable for PRIME usage by
- * calling the standard DMA mapping API. Drivers should not call this function
- * directly, instead it should only be used as an implementation for
- * &drm_gem_object_funcs.get_sg_table.
+ * calling the standard DMA mapping API.
*
* Drivers who need to acquire an scatter/gather table for objects need to call
* drm_gem_shmem_get_pages_sgt() instead.
@@ -679,9 +660,9 @@ EXPORT_SYMBOL(drm_gem_shmem_print_info);
* Returns:
* A pointer to the scatter/gather table of pinned pages or NULL on failure.
*/
-struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
+struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
{
- struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ struct drm_gem_object *obj = &shmem->base;
WARN_ON(shmem->base.import_attach);
@@ -692,7 +673,7 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
/**
* drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
* scatter/gather table for a shmem GEM object.
- * @obj: GEM object
+ * @shmem: shmem GEM object
*
* This function returns a scatter/gather table suitable for driver usage. If
* the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
@@ -705,10 +686,10 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
* Returns:
* A pointer to the scatter/gather table of pinned pages or errno on failure.
*/
-struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
+struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
{
+ struct drm_gem_object *obj = &shmem->base;
int ret;
- struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
struct sg_table *sgt;
if (shmem->sgt)
@@ -720,7 +701,7 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
if (ret)
return ERR_PTR(ret);
- sgt = drm_gem_shmem_get_sg_table(&shmem->base);
+ sgt = drm_gem_shmem_get_sg_table(shmem);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
goto err_put_pages;
@@ -777,3 +758,7 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
return &shmem->base;
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
+
+MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
+MODULE_IMPORT_NS(DMA_BUF);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index bfa386b98134..3f00192215d1 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -197,8 +197,8 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
if (dev->driver->gem_create_object) {
gem = dev->driver->gem_create_object(dev, size);
- if (!gem)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(gem))
+ return ERR_CAST(gem);
gbo = drm_gem_vram_of_gem(gem);
} else {
gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index c50fa6f0709f..60afa1865559 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -32,16 +32,16 @@
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
-#include <linux/export.h>
#include <linux/hash.h>
#include <linux/mm.h>
#include <linux/rculist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <drm/drm_hashtab.h>
#include <drm/drm_print.h>
+#include "drm_legacy.h"
+
int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
{
unsigned int size = 1 << order;
@@ -58,7 +58,6 @@ int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
}
return 0;
}
-EXPORT_SYMBOL(drm_ht_create);
void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
{
@@ -135,7 +134,6 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
}
return 0;
}
-EXPORT_SYMBOL(drm_ht_insert_item);
/*
* Just insert an item and return any "bits" bit key that hasn't been
@@ -164,7 +162,6 @@ int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *it
}
return 0;
}
-EXPORT_SYMBOL(drm_ht_just_insert_please);
int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
struct drm_hash_item **item)
@@ -178,7 +175,6 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
*item = hlist_entry(list, struct drm_hash_item, head);
return 0;
}
-EXPORT_SYMBOL(drm_ht_find_item);
int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
{
@@ -197,7 +193,6 @@ int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
hlist_del_init_rcu(&item->head);
return 0;
}
-EXPORT_SYMBOL(drm_ht_remove_item);
void drm_ht_remove(struct drm_open_hash *ht)
{
@@ -206,4 +201,3 @@ void drm_ht_remove(struct drm_open_hash *ht)
ht->table = NULL;
}
}
-EXPORT_SYMBOL(drm_ht_remove);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 13e1d5c4ec82..d327638e15ee 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -66,7 +66,6 @@
#include "drm_internal.h"
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
static int drm_legacy_irq_install(struct drm_device *dev, int irq)
{
int ret;
@@ -203,4 +202,3 @@ int drm_legacy_irq_control(struct drm_device *dev, void *data,
return -EINVAL;
}
}
-#endif
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index c9206840c87f..70c9dba114a6 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -35,9 +35,47 @@
#include <drm/drm_legacy.h>
struct agp_memory;
+struct drm_buf_desc;
struct drm_device;
struct drm_file;
-struct drm_buf_desc;
+struct drm_hash_item;
+struct drm_open_hash;
+
+/*
+ * Hash-table Support
+ */
+
+#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
+
+/* drm_hashtab.c */
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
+int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
+int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
+int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
+ unsigned long seed, int bits, int shift,
+ unsigned long add);
+int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
+
+void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
+int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
+int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
+void drm_ht_remove(struct drm_open_hash *ht);
+#endif
+
+/*
+ * RCU-safe interface
+ *
+ * The user of this API needs to make sure that two or more instances of the
+ * hash table manipulation functions are never run simultaneously.
+ * The lookup function drm_ht_find_item_rcu may, however, run simultaneously
+ * with any of the manipulation functions as long as it's called from within
+ * an RCU read-locked section.
+ */
+#define drm_ht_insert_item_rcu drm_ht_insert_item
+#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please
+#define drm_ht_remove_key_rcu drm_ht_remove_key
+#define drm_ht_remove_item_rcu drm_ht_remove_item
+#define drm_ht_find_item_rcu drm_ht_find_item
/*
* Generic DRM Contexts
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 71b646c4131f..0327d595e028 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -15,9 +15,10 @@
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_file.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modes.h>
@@ -200,30 +201,38 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
struct drm_rect *clip, bool swap)
{
struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
- struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem);
- void *src = cma_obj->vaddr;
+ struct dma_buf_map map[DRM_FORMAT_MAX_PLANES];
+ struct dma_buf_map data[DRM_FORMAT_MAX_PLANES];
+ void *src;
int ret;
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
return ret;
+ ret = drm_gem_fb_vmap(fb, map, data);
+ if (ret)
+ goto out_drm_gem_fb_end_cpu_access;
+ src = data[0].vaddr; /* TODO: Use mapping abstraction properly */
+
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
if (swap)
- drm_fb_swab(dst, src, fb, clip, !gem->import_attach);
+ drm_fb_swab(dst, 0, src, fb, clip, !gem->import_attach);
else
- drm_fb_memcpy(dst, src, fb, clip);
+ drm_fb_memcpy(dst, 0, src, fb, clip);
break;
case DRM_FORMAT_XRGB8888:
- drm_fb_xrgb8888_to_rgb565(dst, src, fb, clip, swap);
+ drm_fb_xrgb8888_to_rgb565(dst, 0, src, fb, clip, swap);
break;
default:
drm_err_once(fb->dev, "Format is not supported: %p4cc\n",
&fb->format->format);
- return -EINVAL;
+ ret = -EINVAL;
}
+ drm_gem_fb_vunmap(fb, map);
+out_drm_gem_fb_end_cpu_access:
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
return ret;
@@ -249,8 +258,8 @@ static void mipi_dbi_set_window_address(struct mipi_dbi_dev *dbidev,
static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
- struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
- struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem);
+ struct dma_buf_map map[DRM_FORMAT_MAX_PLANES];
+ struct dma_buf_map data[DRM_FORMAT_MAX_PLANES];
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1;
@@ -266,6 +275,10 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
if (!drm_dev_enter(fb->dev, &idx))
return;
+ ret = drm_gem_fb_vmap(fb, map, data);
+ if (ret)
+ goto err_drm_dev_exit;
+
full = width == fb->width && height == fb->height;
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
@@ -277,7 +290,7 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
if (ret)
goto err_msg;
} else {
- tr = cma_obj->vaddr;
+ tr = data[0].vaddr; /* TODO: Use mapping abstraction properly */
}
mipi_dbi_set_window_address(dbidev, rect->x1, rect->x2 - 1, rect->y1,
@@ -289,6 +302,9 @@ err_msg:
if (ret)
drm_err_once(fb->dev, "Failed to update display %d\n", ret);
+ drm_gem_fb_vunmap(fb, map);
+
+err_drm_dev_exit:
drm_dev_exit(idx);
}
@@ -1117,8 +1133,8 @@ int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *dbi,
/*
* Even though it's not the SPI device that does DMA (the master does),
- * the dma mask is necessary for the dma_alloc_wc() in
- * drm_gem_cma_create(). The dma_addr returned will be a physical
+ * the dma mask is necessary for the dma_alloc_wc() in the GEM code
+ * (e.g., drm_gem_cma_create()). The dma_addr returned will be a physical
* address which might be different from the bus address, but this is
* not a problem since the address will not be used.
* The virtual address is used in the transfer and the SPI core
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 7d1c578388d3..8257f9d4f619 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -980,6 +980,10 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
add_hole(&mm->head_node);
mm->scan_active = 0;
+
+#ifdef CONFIG_DRM_DEBUG_MM
+ stack_depot_init();
+#endif
}
EXPORT_SYMBOL(drm_mm_init);
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index c97323365675..918065982db4 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -107,6 +107,11 @@ static void __drm_stack_depot_print(depot_stack_handle_t stack_depot)
kfree(buf);
}
+
+static void __drm_stack_depot_init(void)
+{
+ stack_depot_init();
+}
#else /* CONFIG_DRM_DEBUG_MODESET_LOCK */
static depot_stack_handle_t __drm_stack_depot_save(void)
{
@@ -115,6 +120,9 @@ static depot_stack_handle_t __drm_stack_depot_save(void)
static void __drm_stack_depot_print(depot_stack_handle_t stack_depot)
{
}
+static void __drm_stack_depot_init(void)
+{
+}
#endif /* CONFIG_DRM_DEBUG_MODESET_LOCK */
/**
@@ -359,6 +367,7 @@ void drm_modeset_lock_init(struct drm_modeset_lock *lock)
{
ww_mutex_init(&lock->mutex, &crtc_ww_class);
INIT_LIST_HEAD(&lock->head);
+ __drm_stack_depot_init();
}
EXPORT_SYMBOL(drm_modeset_lock_init);
diff --git a/drivers/gpu/drm/drm_nomodeset.c b/drivers/gpu/drm/drm_nomodeset.c
new file mode 100644
index 000000000000..f3978d5bd3a1
--- /dev/null
+++ b/drivers/gpu/drm/drm_nomodeset.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/module.h>
+#include <linux/types.h>
+
+static bool drm_nomodeset;
+
+bool drm_firmware_drivers_only(void)
+{
+ return drm_nomodeset;
+}
+EXPORT_SYMBOL(drm_firmware_drivers_only);
+
+static int __init disable_modeset(char *str)
+{
+ drm_nomodeset = true;
+
+ pr_warn("Booted with the nomodeset parameter. Only the system framebuffer will be available\n");
+
+ return 1;
+}
+
+/* Disable kernel modesetting */
+__setup("nomodeset", disable_modeset);
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 37c34146eea8..59d368ea006b 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -402,3 +402,36 @@ int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS;
}
EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order);
+
+/**
+ * drm_of_lvds_get_data_mapping - Get LVDS data mapping
+ * @port: DT port node of the LVDS source or sink
+ *
+ * Convert DT "data-mapping" property string value into media bus format value.
+ *
+ * Return:
+ * * MEDIA_BUS_FMT_RGB666_1X7X3_SPWG - data-mapping is "jeida-18"
+ * * MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA - data-mapping is "jeida-24"
+ * * MEDIA_BUS_FMT_RGB888_1X7X4_SPWG - data-mapping is "vesa-24"
+ * * -EINVAL - the "data-mapping" property is unsupported
+ * * -ENODEV - the "data-mapping" property is missing
+ */
+int drm_of_lvds_get_data_mapping(const struct device_node *port)
+{
+ const char *mapping;
+ int ret;
+
+ ret = of_property_read_string(port, "data-mapping", &mapping);
+ if (ret < 0)
+ return -ENODEV;
+
+ if (!strcmp(mapping, "jeida-18"))
+ return MEDIA_BUS_FMT_RGB666_1X7X3_SPWG;
+ if (!strcmp(mapping, "jeida-24"))
+ return MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA;
+ if (!strcmp(mapping, "vesa-24"))
+ return MEDIA_BUS_FMT_RGB888_1X7X4_SPWG;
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(drm_of_lvds_get_data_mapping);
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index a9359878f4ed..b910978d3e48 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -115,6 +115,12 @@ static const struct drm_dmi_panel_orientation_data lcd1280x1920_rightside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd1600x2560_leftside_up = {
+ .width = 1600,
+ .height = 2560,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+};
+
static const struct dmi_system_id orientation_data[] = {
{ /* Acer One 10 (S1003) */
.matches = {
@@ -262,6 +268,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Lenovo Yoga Book X90F / X91F / X91L */
+ .matches = {
+ /* Non exact match to match all versions */
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* OneGX1 Pro */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"),
@@ -269,6 +281,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"),
},
.driver_data = (void *)&onegx1_pro,
+ }, { /* OneXPlayer */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ONE-NETBOOK TECHNOLOGY CO., LTD."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
+ },
+ .driver_data = (void *)&lcd1600x2560_leftside_up,
}, { /* Samsung GalaxyBook 10.6 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
diff --git a/drivers/gpu/drm/drm_privacy_screen.c b/drivers/gpu/drm/drm_privacy_screen.c
new file mode 100644
index 000000000000..beaf99e9120a
--- /dev/null
+++ b/drivers/gpu/drm/drm_privacy_screen.c
@@ -0,0 +1,467 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2020 - 2021 Red Hat, Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <drm/drm_privacy_screen_machine.h>
+#include <drm/drm_privacy_screen_consumer.h>
+#include <drm/drm_privacy_screen_driver.h>
+#include "drm_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * This class allows non KMS drivers, from e.g. drivers/platform/x86 to
+ * register a privacy-screen device, which the KMS drivers can then use
+ * to implement the standard privacy-screen properties, see
+ * :ref:`Standard Connector Properties<standard_connector_properties>`.
+ *
+ * KMS drivers using a privacy-screen class device are advised to use the
+ * drm_connector_attach_privacy_screen_provider() and
+ * drm_connector_update_privacy_screen() helpers for dealing with this.
+ */
+
+#define to_drm_privacy_screen(dev) \
+ container_of(dev, struct drm_privacy_screen, dev)
+
+static DEFINE_MUTEX(drm_privacy_screen_lookup_lock);
+static LIST_HEAD(drm_privacy_screen_lookup_list);
+
+static DEFINE_MUTEX(drm_privacy_screen_devs_lock);
+static LIST_HEAD(drm_privacy_screen_devs);
+
+/*** drm_privacy_screen_machine.h functions ***/
+
+/**
+ * drm_privacy_screen_lookup_add - add an entry to the static privacy-screen
+ * lookup list
+ * @lookup: lookup list entry to add
+ *
+ * Add an entry to the static privacy-screen lookup list. Note the
+ * &struct list_head which is part of the &struct drm_privacy_screen_lookup
+ * gets added to a list owned by the privacy-screen core. So the passed in
+ * &struct drm_privacy_screen_lookup must not be free-ed until it is removed
+ * from the lookup list by calling drm_privacy_screen_lookup_remove().
+ */
+void drm_privacy_screen_lookup_add(struct drm_privacy_screen_lookup *lookup)
+{
+ mutex_lock(&drm_privacy_screen_lookup_lock);
+ list_add(&lookup->list, &drm_privacy_screen_lookup_list);
+ mutex_unlock(&drm_privacy_screen_lookup_lock);
+}
+EXPORT_SYMBOL(drm_privacy_screen_lookup_add);
+
+/**
+ * drm_privacy_screen_lookup_remove - remove an entry to the static
+ * privacy-screen lookup list
+ * @lookup: lookup list entry to remove
+ *
+ * Remove an entry previously added with drm_privacy_screen_lookup_add()
+ * from the static privacy-screen lookup list.
+ */
+void drm_privacy_screen_lookup_remove(struct drm_privacy_screen_lookup *lookup)
+{
+ mutex_lock(&drm_privacy_screen_lookup_lock);
+ list_del(&lookup->list);
+ mutex_unlock(&drm_privacy_screen_lookup_lock);
+}
+EXPORT_SYMBOL(drm_privacy_screen_lookup_remove);
+
+/*** drm_privacy_screen_consumer.h functions ***/
+
+static struct drm_privacy_screen *drm_privacy_screen_get_by_name(
+ const char *name)
+{
+ struct drm_privacy_screen *priv;
+ struct device *dev = NULL;
+
+ mutex_lock(&drm_privacy_screen_devs_lock);
+
+ list_for_each_entry(priv, &drm_privacy_screen_devs, list) {
+ if (strcmp(dev_name(&priv->dev), name) == 0) {
+ dev = get_device(&priv->dev);
+ break;
+ }
+ }
+
+ mutex_unlock(&drm_privacy_screen_devs_lock);
+
+ return dev ? to_drm_privacy_screen(dev) : NULL;
+}
+
+/**
+ * drm_privacy_screen_get - get a privacy-screen provider
+ * @dev: consumer-device for which to get a privacy-screen provider
+ * @con_id: (video)connector name for which to get a privacy-screen provider
+ *
+ * Get a privacy-screen provider for a privacy-screen attached to the
+ * display described by the @dev and @con_id parameters.
+ *
+ * Return:
+ * * A pointer to a &struct drm_privacy_screen on success.
+ * * ERR_PTR(-ENODEV) if no matching privacy-screen is found
+ * * ERR_PTR(-EPROBE_DEFER) if there is a matching privacy-screen,
+ * but it has not been registered yet.
+ */
+struct drm_privacy_screen *drm_privacy_screen_get(struct device *dev,
+ const char *con_id)
+{
+ const char *dev_id = dev ? dev_name(dev) : NULL;
+ struct drm_privacy_screen_lookup *l;
+ struct drm_privacy_screen *priv;
+ const char *provider = NULL;
+ int match, best = -1;
+
+ /*
+ * For now we only support using a static lookup table, which is
+ * populated by the drm_privacy_screen_arch_init() call. This should
+ * be extended with device-tree / fw_node lookup when support is added
+ * for device-tree using hardware with a privacy-screen.
+ *
+ * The lookup algorithm was shamelessly taken from the clock
+ * framework:
+ *
+ * We do slightly fuzzy matching here:
+ * An entry with a NULL ID is assumed to be a wildcard.
+ * If an entry has a device ID, it must match
+ * If an entry has a connection ID, it must match
+ * Then we take the most specific entry - with the following order
+ * of precedence: dev+con > dev only > con only.
+ */
+ mutex_lock(&drm_privacy_screen_lookup_lock);
+
+ list_for_each_entry(l, &drm_privacy_screen_lookup_list, list) {
+ match = 0;
+
+ if (l->dev_id) {
+ if (!dev_id || strcmp(l->dev_id, dev_id))
+ continue;
+
+ match += 2;
+ }
+
+ if (l->con_id) {
+ if (!con_id || strcmp(l->con_id, con_id))
+ continue;
+
+ match += 1;
+ }
+
+ if (match > best) {
+ provider = l->provider;
+ best = match;
+ }
+ }
+
+ mutex_unlock(&drm_privacy_screen_lookup_lock);
+
+ if (!provider)
+ return ERR_PTR(-ENODEV);
+
+ priv = drm_privacy_screen_get_by_name(provider);
+ if (!priv)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return priv;
+}
+EXPORT_SYMBOL(drm_privacy_screen_get);
+
+/**
+ * drm_privacy_screen_put - release a privacy-screen reference
+ * @priv: privacy screen reference to release
+ *
+ * Release a privacy-screen provider reference gotten through
+ * drm_privacy_screen_get(). May be called with a NULL or ERR_PTR,
+ * in which case it is a no-op.
+ */
+void drm_privacy_screen_put(struct drm_privacy_screen *priv)
+{
+ if (IS_ERR_OR_NULL(priv))
+ return;
+
+ put_device(&priv->dev);
+}
+EXPORT_SYMBOL(drm_privacy_screen_put);
+
+/**
+ * drm_privacy_screen_set_sw_state - set a privacy-screen's sw-state
+ * @priv: privacy screen to set the sw-state for
+ * @sw_state: new sw-state value to set
+ *
+ * Set the sw-state of a privacy screen. If the privacy-screen is not
+ * in a locked hw-state, then the actual and hw-state of the privacy-screen
+ * will be immediately updated to the new value. If the privacy-screen is
+ * in a locked hw-state, then the new sw-state will be remembered as the
+ * requested state to put the privacy-screen in when it becomes unlocked.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int drm_privacy_screen_set_sw_state(struct drm_privacy_screen *priv,
+ enum drm_privacy_screen_status sw_state)
+{
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ if (!priv->ops) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /*
+ * As per the DRM connector properties documentation, setting the
+ * sw_state while the hw_state is locked is allowed. In this case
+ * it is a no-op other then storing the new sw_state so that it
+ * can be honored when the state gets unlocked.
+ * Also skip the set if the hw already is in the desired state.
+ */
+ if (priv->hw_state >= PRIVACY_SCREEN_DISABLED_LOCKED ||
+ priv->hw_state == sw_state) {
+ priv->sw_state = sw_state;
+ goto out;
+ }
+
+ ret = priv->ops->set_sw_state(priv, sw_state);
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+EXPORT_SYMBOL(drm_privacy_screen_set_sw_state);
+
+/**
+ * drm_privacy_screen_get_state - get privacy-screen's current state
+ * @priv: privacy screen to get the state for
+ * @sw_state_ret: address where to store the privacy-screens current sw-state
+ * @hw_state_ret: address where to store the privacy-screens current hw-state
+ *
+ * Get the current state of a privacy-screen, both the sw-state and the
+ * hw-state.
+ */
+void drm_privacy_screen_get_state(struct drm_privacy_screen *priv,
+ enum drm_privacy_screen_status *sw_state_ret,
+ enum drm_privacy_screen_status *hw_state_ret)
+{
+ mutex_lock(&priv->lock);
+ *sw_state_ret = priv->sw_state;
+ *hw_state_ret = priv->hw_state;
+ mutex_unlock(&priv->lock);
+}
+EXPORT_SYMBOL(drm_privacy_screen_get_state);
+
+/**
+ * drm_privacy_screen_register_notifier - register a notifier
+ * @priv: Privacy screen to register the notifier with
+ * @nb: Notifier-block for the notifier to register
+ *
+ * Register a notifier with the privacy-screen to be notified of changes made
+ * to the privacy-screen state from outside of the privacy-screen class.
+ * E.g. the state may be changed by the hardware itself in response to a
+ * hotkey press.
+ *
+ * The notifier is called with no locks held. The new hw_state and sw_state
+ * can be retrieved using the drm_privacy_screen_get_state() function.
+ * A pointer to the drm_privacy_screen's struct is passed as the void *data
+ * argument of the notifier_block's notifier_call.
+ *
+ * The notifier will NOT be called when changes are made through
+ * drm_privacy_screen_set_sw_state(). It is only called for external changes.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int drm_privacy_screen_register_notifier(struct drm_privacy_screen *priv,
+ struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&priv->notifier_head, nb);
+}
+EXPORT_SYMBOL(drm_privacy_screen_register_notifier);
+
+/**
+ * drm_privacy_screen_unregister_notifier - unregister a notifier
+ * @priv: Privacy screen to register the notifier with
+ * @nb: Notifier-block for the notifier to register
+ *
+ * Unregister a notifier registered with drm_privacy_screen_register_notifier().
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int drm_privacy_screen_unregister_notifier(struct drm_privacy_screen *priv,
+ struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&priv->notifier_head, nb);
+}
+EXPORT_SYMBOL(drm_privacy_screen_unregister_notifier);
+
+/*** drm_privacy_screen_driver.h functions ***/
+
+static ssize_t sw_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_privacy_screen *priv = to_drm_privacy_screen(dev);
+ const char * const sw_state_names[] = {
+ "Disabled",
+ "Enabled",
+ };
+ ssize_t ret;
+
+ mutex_lock(&priv->lock);
+
+ if (!priv->ops)
+ ret = -ENODEV;
+ else if (WARN_ON(priv->sw_state >= ARRAY_SIZE(sw_state_names)))
+ ret = -ENXIO;
+ else
+ ret = sprintf(buf, "%s\n", sw_state_names[priv->sw_state]);
+
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+/*
+ * RO: Do not allow setting the sw_state through sysfs, this MUST be done
+ * through the drm_properties on the drm_connector.
+ */
+static DEVICE_ATTR_RO(sw_state);
+
+static ssize_t hw_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_privacy_screen *priv = to_drm_privacy_screen(dev);
+ const char * const hw_state_names[] = {
+ "Disabled",
+ "Enabled",
+ "Disabled, locked",
+ "Enabled, locked",
+ };
+ ssize_t ret;
+
+ mutex_lock(&priv->lock);
+
+ if (!priv->ops)
+ ret = -ENODEV;
+ else if (WARN_ON(priv->hw_state >= ARRAY_SIZE(hw_state_names)))
+ ret = -ENXIO;
+ else
+ ret = sprintf(buf, "%s\n", hw_state_names[priv->hw_state]);
+
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+static DEVICE_ATTR_RO(hw_state);
+
+static struct attribute *drm_privacy_screen_attrs[] = {
+ &dev_attr_sw_state.attr,
+ &dev_attr_hw_state.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(drm_privacy_screen);
+
+static struct device_type drm_privacy_screen_type = {
+ .name = "privacy_screen",
+ .groups = drm_privacy_screen_groups,
+};
+
+static void drm_privacy_screen_device_release(struct device *dev)
+{
+ struct drm_privacy_screen *priv = to_drm_privacy_screen(dev);
+
+ kfree(priv);
+}
+
+/**
+ * drm_privacy_screen_register - register a privacy-screen
+ * @parent: parent-device for the privacy-screen
+ * @ops: &struct drm_privacy_screen_ops pointer with ops for the privacy-screen
+ *
+ * Create and register a privacy-screen.
+ *
+ * Return:
+ * * A pointer to the created privacy-screen on success.
+ * * An ERR_PTR(errno) on failure.
+ */
+struct drm_privacy_screen *drm_privacy_screen_register(
+ struct device *parent, const struct drm_privacy_screen_ops *ops)
+{
+ struct drm_privacy_screen *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&priv->lock);
+ BLOCKING_INIT_NOTIFIER_HEAD(&priv->notifier_head);
+
+ priv->dev.class = drm_class;
+ priv->dev.type = &drm_privacy_screen_type;
+ priv->dev.parent = parent;
+ priv->dev.release = drm_privacy_screen_device_release;
+ dev_set_name(&priv->dev, "privacy_screen-%s", dev_name(parent));
+ priv->ops = ops;
+
+ priv->ops->get_hw_state(priv);
+
+ ret = device_register(&priv->dev);
+ if (ret) {
+ put_device(&priv->dev);
+ return ERR_PTR(ret);
+ }
+
+ mutex_lock(&drm_privacy_screen_devs_lock);
+ list_add(&priv->list, &drm_privacy_screen_devs);
+ mutex_unlock(&drm_privacy_screen_devs_lock);
+
+ return priv;
+}
+EXPORT_SYMBOL(drm_privacy_screen_register);
+
+/**
+ * drm_privacy_screen_unregister - unregister privacy-screen
+ * @priv: privacy-screen to unregister
+ *
+ * Unregister a privacy-screen registered with drm_privacy_screen_register().
+ * May be called with a NULL or ERR_PTR, in which case it is a no-op.
+ */
+void drm_privacy_screen_unregister(struct drm_privacy_screen *priv)
+{
+ if (IS_ERR_OR_NULL(priv))
+ return;
+
+ mutex_lock(&drm_privacy_screen_devs_lock);
+ list_del(&priv->list);
+ mutex_unlock(&drm_privacy_screen_devs_lock);
+
+ mutex_lock(&priv->lock);
+ priv->ops = NULL;
+ mutex_unlock(&priv->lock);
+
+ device_unregister(&priv->dev);
+}
+EXPORT_SYMBOL(drm_privacy_screen_unregister);
+
+/**
+ * drm_privacy_screen_call_notifier_chain - notify consumers of state change
+ * @priv: Privacy screen to register the notifier with
+ *
+ * A privacy-screen provider driver can call this functions upon external
+ * changes to the privacy-screen state. E.g. the state may be changed by the
+ * hardware itself in response to a hotkey press.
+ * This function must be called without holding the privacy-screen lock.
+ * the driver must update sw_state and hw_state to reflect the new state before
+ * calling this function.
+ * The expected behavior from the driver upon receiving an external state
+ * change event is: 1. Take the lock; 2. Update sw_state and hw_state;
+ * 3. Release the lock. 4. Call drm_privacy_screen_call_notifier_chain().
+ */
+void drm_privacy_screen_call_notifier_chain(struct drm_privacy_screen *priv)
+{
+ blocking_notifier_call_chain(&priv->notifier_head, 0, priv);
+}
+EXPORT_SYMBOL(drm_privacy_screen_call_notifier_chain);
diff --git a/drivers/gpu/drm/drm_privacy_screen_x86.c b/drivers/gpu/drm/drm_privacy_screen_x86.c
new file mode 100644
index 000000000000..e7aa74ad0b24
--- /dev/null
+++ b/drivers/gpu/drm/drm_privacy_screen_x86.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2020 Red Hat, Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/acpi.h>
+#include <drm/drm_privacy_screen_machine.h>
+
+#ifdef CONFIG_X86
+static struct drm_privacy_screen_lookup arch_lookup;
+
+struct arch_init_data {
+ struct drm_privacy_screen_lookup lookup;
+ bool (*detect)(void);
+};
+
+#if IS_ENABLED(CONFIG_THINKPAD_ACPI)
+static acpi_status __init acpi_set_handle(acpi_handle handle, u32 level,
+ void *context, void **return_value)
+{
+ *(acpi_handle *)return_value = handle;
+ return AE_CTRL_TERMINATE;
+}
+
+static bool __init detect_thinkpad_privacy_screen(void)
+{
+ union acpi_object obj = { .type = ACPI_TYPE_INTEGER };
+ struct acpi_object_list args = { .count = 1, .pointer = &obj, };
+ acpi_handle ec_handle = NULL;
+ unsigned long long output;
+ acpi_status status;
+
+ if (acpi_disabled)
+ return false;
+
+ /* Get embedded-controller handle */
+ status = acpi_get_devices("PNP0C09", acpi_set_handle, NULL, &ec_handle);
+ if (ACPI_FAILURE(status) || !ec_handle)
+ return false;
+
+ /* And call the privacy-screen get-status method */
+ status = acpi_evaluate_integer(ec_handle, "HKEY.GSSS", &args, &output);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ return (output & 0x10000) ? true : false;
+}
+#endif
+
+static const struct arch_init_data arch_init_data[] __initconst = {
+#if IS_ENABLED(CONFIG_THINKPAD_ACPI)
+ {
+ .lookup = {
+ .dev_id = NULL,
+ .con_id = NULL,
+ .provider = "privacy_screen-thinkpad_acpi",
+ },
+ .detect = detect_thinkpad_privacy_screen,
+ },
+#endif
+};
+
+void __init drm_privacy_screen_lookup_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(arch_init_data); i++) {
+ if (!arch_init_data[i].detect())
+ continue;
+
+ pr_info("Found '%s' privacy-screen provider\n",
+ arch_init_data[i].lookup.provider);
+
+ /* Make a copy because arch_init_data is __initconst */
+ arch_lookup = arch_init_data[i].lookup;
+ drm_privacy_screen_lookup_add(&arch_lookup);
+ break;
+ }
+}
+
+void drm_privacy_screen_lookup_exit(void)
+{
+ if (arch_lookup.provider)
+ drm_privacy_screen_lookup_remove(&arch_lookup);
+}
+#endif /* ifdef CONFIG_X86 */
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 61d5c57f23e1..682359512996 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -604,6 +604,9 @@ EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
*
* This function must be called from process context with no mode
* setting locks held.
+ *
+ * If only a single connector has changed, consider calling
+ * drm_kms_helper_connector_hotplug_event() instead.
*/
void drm_kms_helper_hotplug_event(struct drm_device *dev)
{
@@ -616,6 +619,26 @@ void drm_kms_helper_hotplug_event(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
+/**
+ * drm_kms_helper_connector_hotplug_event - fire off a KMS connector hotplug event
+ * @connector: drm_connector which has changed
+ *
+ * This is the same as drm_kms_helper_hotplug_event(), except it fires a more
+ * fine-grained uevent for a single connector.
+ */
+void drm_kms_helper_connector_hotplug_event(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ /* send a uevent + call fbdev */
+ drm_sysfs_connector_hotplug_event(connector);
+ if (dev->mode_config.funcs->output_poll_changed)
+ dev->mode_config.funcs->output_poll_changed(dev);
+
+ drm_client_dev_hotplug(dev);
+}
+EXPORT_SYMBOL(drm_kms_helper_connector_hotplug_event);
+
static void output_poll_execute(struct work_struct *work)
{
struct delayed_work *delayed_work = to_delayed_work(work);
@@ -865,7 +888,7 @@ bool drm_connector_helper_hpd_irq_event(struct drm_connector *connector)
mutex_unlock(&dev->mode_config.mutex);
if (changed) {
- drm_kms_helper_hotplug_event(dev);
+ drm_kms_helper_connector_hotplug_event(connector);
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Sent hotplug event\n",
connector->base.id,
connector->name);
@@ -904,9 +927,9 @@ EXPORT_SYMBOL(drm_connector_helper_hpd_irq_event);
*/
bool drm_helper_hpd_irq_event(struct drm_device *dev)
{
- struct drm_connector *connector;
+ struct drm_connector *connector, *first_changed_connector = NULL;
struct drm_connector_list_iter conn_iter;
- bool changed = false;
+ int changed = 0;
if (!dev->mode_config.poll_enabled)
return false;
@@ -918,16 +941,25 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev)
if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
continue;
- if (check_connector_changed(connector))
- changed = true;
+ if (check_connector_changed(connector)) {
+ if (!first_changed_connector) {
+ drm_connector_get(connector);
+ first_changed_connector = connector;
+ }
+
+ changed++;
+ }
}
drm_connector_list_iter_end(&conn_iter);
mutex_unlock(&dev->mode_config.mutex);
- if (changed) {
+ if (changed == 1)
+ drm_kms_helper_connector_hotplug_event(first_changed_connector);
+ else if (changed > 0)
drm_kms_helper_hotplug_event(dev);
- DRM_DEBUG_KMS("Sent hotplug event\n");
- }
+
+ if (first_changed_connector)
+ drm_connector_put(first_changed_connector);
return changed;
}
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 76ff6ec3421b..430e00b16eec 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -410,6 +410,31 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)
EXPORT_SYMBOL(drm_sysfs_hotplug_event);
/**
+ * drm_sysfs_connector_hotplug_event - generate a DRM uevent for any connector
+ * change
+ * @connector: connector which has changed
+ *
+ * Send a uevent for the DRM connector specified by @connector. This will send
+ * a uevent with the properties HOTPLUG=1 and CONNECTOR.
+ */
+void drm_sysfs_connector_hotplug_event(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ char hotplug_str[] = "HOTPLUG=1", conn_id[21];
+ char *envp[] = { hotplug_str, conn_id, NULL };
+
+ snprintf(conn_id, sizeof(conn_id),
+ "CONNECTOR=%u", connector->base.id);
+
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] generating connector hotplug event\n",
+ connector->base.id, connector->name);
+
+ kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
+}
+EXPORT_SYMBOL(drm_sysfs_connector_hotplug_event);
+
+/**
* drm_sysfs_connector_status_event - generate a DRM uevent for connector
* property status change
* @connector: connector on which property status changed
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 7dcc6392792d..0b756ecb1bc2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -589,6 +589,7 @@ static int compare_str(struct device *dev, void *data)
static int etnaviv_pdev_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct device_node *first_node = NULL;
struct component_match *match = NULL;
if (!dev->platform_data) {
@@ -598,6 +599,9 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
if (!of_device_is_available(core_node))
continue;
+ if (!first_node)
+ first_node = core_node;
+
drm_of_component_match_add(&pdev->dev, &match,
compare_of, core_node);
}
@@ -609,6 +613,32 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
component_match_add(dev, &match, compare_str, names[i]);
}
+ /*
+ * PTA and MTLB can have 40 bit base addresses, but
+ * unfortunately, an entry in the MTLB can only point to a
+ * 32 bit base address of a STLB. Moreover, to initialize the
+ * MMU we need a command buffer with a 32 bit address because
+ * without an MMU there is only an indentity mapping between
+ * the internal 32 bit addresses and the bus addresses.
+ *
+ * To make things easy, we set the dma_coherent_mask to 32
+ * bit to make sure we are allocating the command buffers and
+ * TLBs in the lower 4 GiB address space.
+ */
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)) ||
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ dev_dbg(&pdev->dev, "No suitable DMA available\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Apply the same DMA configuration to the virtual etnaviv
+ * device as the GPU we found. This assumes that all Vivante
+ * GPUs in the system share the same DMA constraints.
+ */
+ if (first_node)
+ of_dma_configure(&pdev->dev, first_node, true);
+
return component_master_add_with_match(dev, &etnaviv_master_ops, match);
}
@@ -653,21 +683,12 @@ static int __init etnaviv_init(void)
if (!of_device_is_available(np))
continue;
- pdev = platform_device_alloc("etnaviv", -1);
+ pdev = platform_device_alloc("etnaviv", PLATFORM_DEVID_NONE);
if (!pdev) {
ret = -ENOMEM;
of_node_put(np);
goto unregister_platform_driver;
}
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-
- /*
- * Apply the same DMA configuration to the virtual etnaviv
- * device as the GPU we found. This assumes that all Vivante
- * GPUs in the system share the same DMA constraints.
- */
- of_dma_configure(&pdev->dev, np, true);
ret = platform_device_add(pdev);
if (ret) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index f0b2540e60e4..d5314aa28ff7 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -424,45 +424,24 @@ int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
}
#ifdef CONFIG_DEBUG_FS
-static void etnaviv_gem_describe_fence(struct dma_fence *fence,
- const char *type, struct seq_file *m)
-{
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- seq_printf(m, "\t%9s: %s %s seq %llu\n",
- type,
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence),
- fence->seqno);
-}
-
static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct dma_resv *robj = obj->resv;
- struct dma_resv_list *fobj;
- struct dma_fence *fence;
unsigned long off = drm_vma_node_start(&obj->vma_node);
+ int r;
seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
obj->name, kref_read(&obj->refcount),
off, etnaviv_obj->vaddr, obj->size);
- rcu_read_lock();
- fobj = dma_resv_shared_list(robj);
- if (fobj) {
- unsigned int i, shared_count = fobj->shared_count;
-
- for (i = 0; i < shared_count; i++) {
- fence = rcu_dereference(fobj->shared[i]);
- etnaviv_gem_describe_fence(fence, "Shared", m);
- }
- }
+ r = dma_resv_lock(robj, NULL);
+ if (r)
+ return;
- fence = dma_resv_excl_fence(robj);
- if (fence)
- etnaviv_gem_describe_fence(fence, "Exclusive", m);
- rcu_read_unlock();
+ dma_resv_describe(robj, m);
+ dma_resv_unlock(robj);
}
void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 486259e154af..a17313282e8b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -189,13 +189,13 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
continue;
if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
- ret = dma_resv_get_fences(robj, &bo->excl,
+ ret = dma_resv_get_fences(robj, NULL,
&bo->nr_shared,
&bo->shared);
if (ret)
return ret;
} else {
- bo->excl = dma_resv_get_excl_unlocked(robj);
+ bo->excl = dma_fence_get(dma_resv_excl_fence(robj));
}
}
@@ -469,6 +469,12 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K ||
+ args->nr_bos > SZ_128K || args->nr_pmrs > 128) {
+ DRM_ERROR("submit arguments out of size limits\n");
+ return -EINVAL;
+ }
+
/*
* Copy the command submission and bo array to kernel space in
* one go, and do this outside of any locks.
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 242a5fd8b932..37018bc55810 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1047,7 +1047,7 @@ pm_put:
void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
{
- unsigned int i = 0;
+ unsigned int i;
dev_err(gpu->dev, "recover hung GPU!\n");
@@ -1060,7 +1060,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
/* complete all events, the GPU won't do it after the reset */
spin_lock(&gpu->event_spinlock);
- for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS)
+ for_each_set_bit(i, gpu->event_bitmap, ETNA_NR_EVENTS)
complete(&gpu->event_free);
bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
spin_unlock(&gpu->event_spinlock);
@@ -1658,7 +1658,7 @@ etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
return 0;
}
-static struct thermal_cooling_device_ops cooling_ops = {
+static const struct thermal_cooling_device_ops cooling_ops = {
.get_max_state = etnaviv_gpu_cooling_get_max_state,
.get_cur_state = etnaviv_gpu_cooling_get_cur_state,
.set_cur_state = etnaviv_gpu_cooling_set_cur_state,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 1c75c8ed5bce..85eddd492774 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -130,6 +130,7 @@ struct etnaviv_gpu {
/* hang detection */
u32 hangcheck_dma_addr;
+ u32 hangcheck_fence;
void __iomem *mmio;
int irq;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 180bb633d5c5..58f593b278c1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -107,8 +107,10 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
*/
dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
change = dma_addr - gpu->hangcheck_dma_addr;
- if (change < 0 || change > 16) {
+ if (gpu->completed_fence != gpu->hangcheck_fence ||
+ change < 0 || change > 16) {
gpu->hangcheck_dma_addr = dma_addr;
+ gpu->hangcheck_fence = gpu->completed_fence;
goto out_no_timeout;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index d8f1cf4d6b69..9743b6b17447 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -102,16 +102,7 @@ static const struct drm_ioctl_desc exynos_ioctls[] = {
DRM_RENDER_ALLOW),
};
-static const struct file_operations exynos_drm_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .mmap = exynos_drm_gem_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .unlocked_ioctl = drm_ioctl,
- .compat_ioctl = drm_compat_ioctl,
- .release = drm_release,
-};
+DEFINE_DRM_GEM_FOPS(exynos_drm_driver_fops);
static const struct drm_driver exynos_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM
@@ -124,7 +115,7 @@ static const struct drm_driver exynos_drm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = exynos_drm_gem_prime_import,
.gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table,
- .gem_prime_mmap = exynos_drm_gem_prime_mmap,
+ .gem_prime_mmap = drm_gem_prime_mmap,
.ioctls = exynos_ioctls,
.num_ioctls = ARRAY_SIZE(exynos_ioctls),
.fops = &exynos_drm_driver_fops,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 8d137857818c..32a36572b894 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -13,7 +13,6 @@
#include <linux/gpio/consumer.h>
#include <linux/irq.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/of_graph.h>
#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
@@ -265,7 +264,7 @@ struct exynos_dsi {
struct clk **clks;
struct regulator_bulk_data supplies[2];
int irq;
- int te_gpio;
+ struct gpio_desc *te_gpio;
u32 pll_clk_rate;
u32 burst_clk_rate;
@@ -1298,14 +1297,14 @@ static void exynos_dsi_enable_irq(struct exynos_dsi *dsi)
{
enable_irq(dsi->irq);
- if (gpio_is_valid(dsi->te_gpio))
- enable_irq(gpio_to_irq(dsi->te_gpio));
+ if (dsi->te_gpio)
+ enable_irq(gpiod_to_irq(dsi->te_gpio));
}
static void exynos_dsi_disable_irq(struct exynos_dsi *dsi)
{
- if (gpio_is_valid(dsi->te_gpio))
- disable_irq(gpio_to_irq(dsi->te_gpio));
+ if (dsi->te_gpio)
+ disable_irq(gpiod_to_irq(dsi->te_gpio));
disable_irq(dsi->irq);
}
@@ -1335,42 +1334,31 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi,
int ret;
int te_gpio_irq;
- dsi->te_gpio = of_get_named_gpio(panel->of_node, "te-gpios", 0);
- if (dsi->te_gpio == -ENOENT)
- return 0;
-
- if (!gpio_is_valid(dsi->te_gpio)) {
- ret = dsi->te_gpio;
- dev_err(dsi->dev, "cannot get te-gpios, %d\n", ret);
- goto out;
- }
-
- ret = gpio_request(dsi->te_gpio, "te_gpio");
- if (ret) {
- dev_err(dsi->dev, "gpio request failed with %d\n", ret);
- goto out;
+ dsi->te_gpio = devm_gpiod_get_optional(dsi->dev, "te", GPIOD_IN);
+ if (IS_ERR(dsi->te_gpio)) {
+ dev_err(dsi->dev, "gpio request failed with %ld\n",
+ PTR_ERR(dsi->te_gpio));
+ return PTR_ERR(dsi->te_gpio);
}
- te_gpio_irq = gpio_to_irq(dsi->te_gpio);
+ te_gpio_irq = gpiod_to_irq(dsi->te_gpio);
ret = request_threaded_irq(te_gpio_irq, exynos_dsi_te_irq_handler, NULL,
IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN, "TE", dsi);
if (ret) {
dev_err(dsi->dev, "request interrupt failed with %d\n", ret);
- gpio_free(dsi->te_gpio);
- goto out;
+ gpiod_put(dsi->te_gpio);
+ return ret;
}
-out:
- return ret;
+ return 0;
}
static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi)
{
- if (gpio_is_valid(dsi->te_gpio)) {
- free_irq(gpio_to_irq(dsi->te_gpio), dsi);
- gpio_free(dsi->te_gpio);
- dsi->te_gpio = -ENOENT;
+ if (dsi->te_gpio) {
+ free_irq(gpiod_to_irq(dsi->te_gpio), dsi);
+ gpiod_put(dsi->te_gpio);
}
}
@@ -1745,9 +1733,6 @@ static int exynos_dsi_probe(struct platform_device *pdev)
if (!dsi)
return -ENOMEM;
- /* To be checked as invalid one */
- dsi->te_gpio = -ENOENT;
-
init_completion(&dsi->completed);
spin_lock_init(&dsi->transfer_lock);
INIT_LIST_HEAD(&dsi->transfer_list);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 5147f5929be7..02c97b9ca926 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -15,6 +15,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_prime.h>
#include <drm/drm_probe_helper.h>
#include <drm/exynos_drm.h>
@@ -39,25 +40,8 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
struct drm_fb_helper *helper = info->par;
struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem;
- unsigned long vm_size;
- int ret;
-
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
-
- vm_size = vma->vm_end - vma->vm_start;
-
- if (vm_size > exynos_gem->size)
- return -EINVAL;
- ret = dma_mmap_attrs(to_dma_dev(helper->dev), vma, exynos_gem->cookie,
- exynos_gem->dma_addr, exynos_gem->size,
- exynos_gem->dma_attrs);
- if (ret < 0) {
- DRM_DEV_ERROR(to_dma_dev(helper->dev), "failed to mmap.\n");
- return ret;
- }
-
- return 0;
+ return drm_gem_prime_mmap(&exynos_gem->base, vma);
}
static const struct fb_ops exynos_drm_fb_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index ecfd82d0afb7..023f54ee61a8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -782,8 +782,8 @@ static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
sc->hratio = (src_w << 14) / (dst_w << hfactor);
sc->vratio = (src_h << 14) / (dst_h << vfactor);
- sc->up_h = (dst_w >= src_w) ? true : false;
- sc->up_v = (dst_h >= src_h) ? true : false;
+ sc->up_h = (dst_w >= src_w);
+ sc->up_v = (dst_h >= src_h);
DRM_DEV_DEBUG_KMS(ctx->dev, "hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
sc->hratio, sc->vratio, sc->up_h, sc->up_v);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 0a0c042a3155..3e493f48e0d4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -20,6 +20,8 @@
MODULE_IMPORT_NS(DMA_BUF);
+static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+
static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap)
{
struct drm_device *dev = exynos_gem->base.dev;
@@ -138,6 +140,7 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = {
.free = exynos_drm_gem_free_object,
.get_sg_table = exynos_drm_gem_prime_get_sg_table,
+ .mmap = exynos_drm_gem_mmap,
.vm_ops = &exynos_drm_gem_vm_ops,
};
@@ -357,12 +360,16 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
return 0;
}
-static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
+static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
int ret;
+ if (obj->import_attach)
+ return dma_buf_mmap(obj->dma_buf, vma, 0);
+
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+
DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
exynos_gem->flags);
@@ -388,26 +395,6 @@ err_close_vm:
return ret;
}
-int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_gem_object *obj;
- int ret;
-
- /* set vm_area_struct. */
- ret = drm_gem_mmap(filp, vma);
- if (ret < 0) {
- DRM_ERROR("failed to mmap.\n");
- return ret;
- }
-
- obj = vma->vm_private_data;
-
- if (obj->import_attach)
- return dma_buf_mmap(obj->dma_buf, vma, 0);
-
- return exynos_drm_gem_mmap_obj(obj, vma);
-}
-
/* low-level interface prime helpers */
struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
@@ -469,15 +456,3 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
exynos_gem->sgt = sgt;
return &exynos_gem->base;
}
-
-int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
-{
- int ret;
-
- ret = drm_gem_mmap_obj(obj, obj->size, vma);
- if (ret < 0)
- return ret;
-
- return exynos_drm_gem_mmap_obj(obj, vma);
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index a23272fb96fb..79d7e1a87419 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -96,9 +96,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-/* set vm_flags and we can change the vm attribute to other one at here. */
-int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-
/* low-level interface prime helpers */
struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
@@ -107,7 +104,5 @@ struct drm_gem_object *
exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
-int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
#endif
diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig
index d7dd8ba90e3a..e95e96c565ba 100644
--- a/drivers/gpu/drm/fsl-dcu/Kconfig
+++ b/drivers/gpu/drm/fsl-dcu/Kconfig
@@ -3,8 +3,8 @@ config DRM_FSL_DCU
tristate "DRM Support for Freescale DCU"
depends on DRM && OF && ARM && COMMON_CLK
select BACKLIGHT_CLASS_DEVICE
+ select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_PANEL
select REGMAP_MMIO
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 321e416489a9..45df9de22007 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -25,7 +25,6 @@
#include "framebuffer.h"
#include "gem.h"
-#include "gtt.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
@@ -82,14 +81,13 @@ static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf)
struct drm_framebuffer *fb = vma->vm_private_data;
struct drm_device *dev = fb->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- struct gtt_range *gtt = to_gtt_range(fb->obj[0]);
+ struct psb_gem_object *pobj = to_psb_gem_object(fb->obj[0]);
int page_num;
int i;
unsigned long address;
vm_fault_t ret = VM_FAULT_SIGBUS;
unsigned long pfn;
- unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
- gtt->offset;
+ unsigned long phys_addr = (unsigned long)dev_priv->stolen_base + pobj->offset;
page_num = vma_pages(vma);
address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
@@ -226,31 +224,6 @@ static struct drm_framebuffer *psb_framebuffer_create
}
/**
- * psbfb_alloc - allocate frame buffer memory
- * @dev: the DRM device
- * @aligned_size: space needed
- *
- * Allocate the frame buffer. In the usual case we get a GTT range that
- * is stolen memory backed and life is simple. If there isn't sufficient
- * we fail as we don't have the virtual mapping space to really vmap it
- * and the kernel console code can't handle non linear framebuffers.
- *
- * Re-address this as and if the framebuffer layer grows this ability.
- */
-static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
-{
- struct gtt_range *backing;
- /* Begin by trying to use stolen memory backing */
- backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE);
- if (backing) {
- backing->gem.funcs = &psb_gem_object_funcs;
- drm_gem_private_object_init(dev, &backing->gem, aligned_size);
- return backing;
- }
- return NULL;
-}
-
-/**
* psbfb_create - create a framebuffer
* @fb_helper: the framebuffer helper
* @sizes: specification of the layout
@@ -268,7 +241,8 @@ static int psbfb_create(struct drm_fb_helper *fb_helper,
struct drm_mode_fb_cmd2 mode_cmd;
int size;
int ret;
- struct gtt_range *backing;
+ struct psb_gem_object *backing;
+ struct drm_gem_object *obj;
u32 bpp, depth;
mode_cmd.width = sizes->surface_width;
@@ -286,24 +260,25 @@ static int psbfb_create(struct drm_fb_helper *fb_helper,
size = ALIGN(size, PAGE_SIZE);
/* Allocate the framebuffer in the GTT with stolen page backing */
- backing = psbfb_alloc(dev, size);
- if (backing == NULL)
- return -ENOMEM;
+ backing = psb_gem_create(dev, size, "fb", true, PAGE_SIZE);
+ if (IS_ERR(backing))
+ return PTR_ERR(backing);
+ obj = &backing->base;
memset(dev_priv->vram_addr + backing->offset, 0, size);
info = drm_fb_helper_alloc_fbi(fb_helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
- goto out;
+ goto err_drm_gem_object_put;
}
mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
- fb = psb_framebuffer_create(dev, &mode_cmd, &backing->gem);
+ fb = psb_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
- goto out;
+ goto err_drm_gem_object_put;
}
fb_helper->fb = fb;
@@ -334,8 +309,9 @@ static int psbfb_create(struct drm_fb_helper *fb_helper,
dev_dbg(dev->dev, "allocated %dx%d fb\n", fb->width, fb->height);
return 0;
-out:
- psb_gtt_free_range(dev, backing);
+
+err_drm_gem_object_put:
+ drm_gem_object_put(obj);
return ret;
}
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 5ae54c9d2819..8d65af80bb08 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -13,24 +13,105 @@
#include <linux/pagemap.h>
+#include <asm/set_memory.h>
+
#include <drm/drm.h>
#include <drm/drm_vma_manager.h>
#include "gem.h"
#include "psb_drv.h"
+int psb_gem_pin(struct psb_gem_object *pobj)
+{
+ struct drm_gem_object *obj = &pobj->base;
+ struct drm_device *dev = obj->dev;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+ u32 gpu_base = dev_priv->gtt.gatt_start;
+ struct page **pages;
+ unsigned int npages;
+ int ret;
+
+ mutex_lock(&dev_priv->gtt_mutex);
+
+ if (pobj->in_gart || pobj->stolen)
+ goto out; /* already mapped */
+
+ pages = drm_gem_get_pages(obj);
+ if (IS_ERR(pages)) {
+ ret = PTR_ERR(pages);
+ goto err_mutex_unlock;
+ }
+
+ npages = obj->size / PAGE_SIZE;
+
+ set_pages_array_wc(pages, npages);
+
+ psb_gtt_insert_pages(dev_priv, &pobj->resource, pages);
+ psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu), pages,
+ (gpu_base + pobj->offset), npages, 0, 0,
+ PSB_MMU_CACHED_MEMORY);
+
+ pobj->npage = npages;
+ pobj->pages = pages;
+
+out:
+ ++pobj->in_gart;
+ mutex_unlock(&dev_priv->gtt_mutex);
+
+ return 0;
+
+err_mutex_unlock:
+ mutex_unlock(&dev_priv->gtt_mutex);
+ return ret;
+}
+
+void psb_gem_unpin(struct psb_gem_object *pobj)
+{
+ struct drm_gem_object *obj = &pobj->base;
+ struct drm_device *dev = obj->dev;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+ u32 gpu_base = dev_priv->gtt.gatt_start;
+
+ mutex_lock(&dev_priv->gtt_mutex);
+
+ WARN_ON(!pobj->in_gart);
+
+ --pobj->in_gart;
+
+ if (pobj->in_gart || pobj->stolen)
+ goto out;
+
+ psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
+ (gpu_base + pobj->offset), pobj->npage, 0, 0);
+ psb_gtt_remove_pages(dev_priv, &pobj->resource);
+
+ /* Reset caching flags */
+ set_pages_array_wb(pobj->pages, pobj->npage);
+
+ drm_gem_put_pages(obj, pobj->pages, true, false);
+ pobj->pages = NULL;
+ pobj->npage = 0;
+
+out:
+ mutex_unlock(&dev_priv->gtt_mutex);
+}
+
static vm_fault_t psb_gem_fault(struct vm_fault *vmf);
static void psb_gem_free_object(struct drm_gem_object *obj)
{
- struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
+ struct psb_gem_object *pobj = to_psb_gem_object(obj);
- /* Remove the list map if one is present */
- drm_gem_free_mmap_offset(obj);
drm_gem_object_release(obj);
- /* This must occur last as it frees up the memory of the GEM object */
- psb_gtt_free_range(obj->dev, gtt);
+ /* Undo the mmap pin if we are destroying the object */
+ if (pobj->mmapping)
+ psb_gem_unpin(pobj);
+
+ WARN_ON(pobj->in_gart && !pobj->stolen);
+
+ release_resource(&pobj->resource);
+ kfree(pobj);
}
static const struct vm_operations_struct psb_gem_vm_ops = {
@@ -39,63 +120,60 @@ static const struct vm_operations_struct psb_gem_vm_ops = {
.close = drm_gem_vm_close,
};
-const struct drm_gem_object_funcs psb_gem_object_funcs = {
+static const struct drm_gem_object_funcs psb_gem_object_funcs = {
.free = psb_gem_free_object,
.vm_ops = &psb_gem_vm_ops,
};
-/**
- * psb_gem_create - create a mappable object
- * @file: the DRM file of the client
- * @dev: our device
- * @size: the size requested
- * @handlep: returned handle (opaque number)
- * @stolen: unused
- * @align: unused
- *
- * Create a GEM object, fill in the boilerplate and attach a handle to
- * it so that userspace can speak about it. This does the core work
- * for the various methods that do/will create GEM objects for things
- */
-int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
- u32 *handlep, int stolen, u32 align)
+struct psb_gem_object *
+psb_gem_create(struct drm_device *dev, u64 size, const char *name, bool stolen, u32 align)
{
- struct gtt_range *r;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+ struct psb_gem_object *pobj;
+ struct drm_gem_object *obj;
int ret;
- u32 handle;
size = roundup(size, PAGE_SIZE);
- /* Allocate our object - for now a direct gtt range which is not
- stolen memory backed */
- r = psb_gtt_alloc_range(dev, size, "gem", 0, PAGE_SIZE);
- if (r == NULL) {
- dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
- return -ENOSPC;
- }
- r->gem.funcs = &psb_gem_object_funcs;
- /* Initialize the extra goodies GEM needs to do all the hard work */
- if (drm_gem_object_init(dev, &r->gem, size) != 0) {
- psb_gtt_free_range(dev, r);
- /* GEM doesn't give an error code so use -ENOMEM */
- dev_err(dev->dev, "GEM init failed for %lld\n", size);
- return -ENOMEM;
+ pobj = kzalloc(sizeof(*pobj), GFP_KERNEL);
+ if (!pobj)
+ return ERR_PTR(-ENOMEM);
+ obj = &pobj->base;
+
+ /* GTT resource */
+
+ ret = psb_gtt_allocate_resource(dev_priv, &pobj->resource, name, size, align, stolen,
+ &pobj->offset);
+ if (ret)
+ goto err_kfree;
+
+ if (stolen) {
+ pobj->stolen = true;
+ pobj->in_gart = 1;
}
- /* Limit the object to 32bit mappings */
- mapping_set_gfp_mask(r->gem.filp->f_mapping, GFP_KERNEL | __GFP_DMA32);
- /* Give the object a handle so we can carry it more easily */
- ret = drm_gem_handle_create(file, &r->gem, &handle);
- if (ret) {
- dev_err(dev->dev, "GEM handle failed for %p, %lld\n",
- &r->gem, size);
- drm_gem_object_release(&r->gem);
- psb_gtt_free_range(dev, r);
- return ret;
+
+ /* GEM object */
+
+ obj->funcs = &psb_gem_object_funcs;
+
+ if (stolen) {
+ drm_gem_private_object_init(dev, obj, size);
+ } else {
+ ret = drm_gem_object_init(dev, obj, size);
+ if (ret)
+ goto err_release_resource;
+
+ /* Limit the object to 32-bit mappings */
+ mapping_set_gfp_mask(obj->filp->f_mapping, GFP_KERNEL | __GFP_DMA32);
}
- /* We have the initial and handle reference but need only one now */
- drm_gem_object_put(&r->gem);
- *handlep = handle;
- return 0;
+
+ return pobj;
+
+err_release_resource:
+ release_resource(&pobj->resource);
+err_kfree:
+ kfree(pobj);
+ return ERR_PTR(ret);
}
/**
@@ -111,10 +189,40 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
- args->size = args->pitch * args->height;
- return psb_gem_create(file, dev, args->size, &args->handle, 0,
- PAGE_SIZE);
+ size_t pitch, size;
+ struct psb_gem_object *pobj;
+ struct drm_gem_object *obj;
+ u32 handle;
+ int ret;
+
+ pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
+ pitch = ALIGN(pitch, 64);
+
+ size = pitch * args->height;
+ size = roundup(size, PAGE_SIZE);
+ if (!size)
+ return -EINVAL;
+
+ pobj = psb_gem_create(dev, size, "gem", false, PAGE_SIZE);
+ if (IS_ERR(pobj))
+ return PTR_ERR(pobj);
+ obj = &pobj->base;
+
+ ret = drm_gem_handle_create(file, obj, &handle);
+ if (ret)
+ goto err_drm_gem_object_put;
+
+ drm_gem_object_put(obj);
+
+ args->pitch = pitch;
+ args->size = size;
+ args->handle = handle;
+
+ return 0;
+
+err_drm_gem_object_put:
+ drm_gem_object_put(obj);
+ return ret;
}
/**
@@ -137,7 +245,7 @@ static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj;
- struct gtt_range *r;
+ struct psb_gem_object *pobj;
int err;
vm_fault_t ret;
unsigned long pfn;
@@ -149,7 +257,7 @@ static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
dev = obj->dev;
dev_priv = to_drm_psb_private(dev);
- r = container_of(obj, struct gtt_range, gem); /* Get the gtt range */
+ pobj = to_psb_gem_object(obj);
/* Make sure we don't parallel update on a fault, nor move or remove
something from beneath our feet */
@@ -157,14 +265,14 @@ static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
/* For now the mmap pins the object and it stays pinned. As things
stand that will do us no harm */
- if (r->mmapping == 0) {
- err = psb_gtt_pin(r);
+ if (pobj->mmapping == 0) {
+ err = psb_gem_pin(pobj);
if (err < 0) {
dev_err(dev->dev, "gma500: pin failed: %d\n", err);
ret = vmf_error(err);
goto fail;
}
- r->mmapping = 1;
+ pobj->mmapping = 1;
}
/* Page relative to the VMA start - we must calculate this ourselves
@@ -172,10 +280,10 @@ static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
/* CPU view of the page, don't go via the GART for CPU writes */
- if (r->stolen)
- pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
+ if (pobj->stolen)
+ pfn = (dev_priv->stolen_base + pobj->offset) >> PAGE_SHIFT;
else
- pfn = page_to_pfn(r->pages[page_offset]);
+ pfn = page_to_pfn(pobj->pages[page_offset]);
ret = vmf_insert_pfn(vma, vmf->address, pfn);
fail:
mutex_unlock(&dev_priv->mmap_mutex);
diff --git a/drivers/gpu/drm/gma500/gem.h b/drivers/gpu/drm/gma500/gem.h
index bae6454ead29..79cced40c87f 100644
--- a/drivers/gpu/drm/gma500/gem.h
+++ b/drivers/gpu/drm/gma500/gem.h
@@ -8,11 +8,33 @@
#ifndef _GEM_H
#define _GEM_H
+#include <linux/kernel.h>
+
+#include <drm/drm_gem.h>
+
struct drm_device;
-extern const struct drm_gem_object_funcs psb_gem_object_funcs;
+struct psb_gem_object {
+ struct drm_gem_object base;
+
+ struct resource resource; /* GTT resource for our allocation */
+ u32 offset; /* GTT offset of our object */
+ int in_gart; /* Currently in the GART (ref ct) */
+ bool stolen; /* Backed from stolen RAM */
+ bool mmapping; /* Is mmappable */
+ struct page **pages; /* Backing pages if present */
+ int npage; /* Number of backing pages */
+};
+
+static inline struct psb_gem_object *to_psb_gem_object(struct drm_gem_object *obj)
+{
+ return container_of(obj, struct psb_gem_object, base);
+}
+
+struct psb_gem_object *
+psb_gem_create(struct drm_device *dev, u64 size, const char *name, bool stolen, u32 align);
-extern int psb_gem_create(struct drm_file *file, struct drm_device *dev,
- u64 size, u32 *handlep, int stolen, u32 align);
+int psb_gem_pin(struct psb_gem_object *pobj);
+void psb_gem_unpin(struct psb_gem_object *pobj);
#endif
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index cbcecbaa041b..99da3118131a 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -15,6 +15,7 @@
#include <drm/drm_vblank.h>
#include "framebuffer.h"
+#include "gem.h"
#include "gma_display.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
@@ -54,7 +55,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
- struct gtt_range *gtt;
+ struct psb_gem_object *pobj;
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
@@ -70,14 +71,14 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
goto gma_pipe_cleaner;
}
- gtt = to_gtt_range(fb->obj[0]);
+ pobj = to_psb_gem_object(fb->obj[0]);
/* We are displaying this buffer, make sure it is actually loaded
into the GTT */
- ret = psb_gtt_pin(gtt);
+ ret = psb_gem_pin(pobj);
if (ret < 0)
goto gma_pipe_set_base_exit;
- start = gtt->offset;
+ start = pobj->offset;
offset = y * fb->pitches[0] + x * fb->format->cpp[0];
REG_WRITE(map->stride, fb->pitches[0]);
@@ -125,7 +126,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
gma_pipe_cleaner:
/* If there was a previous display we can now unpin it */
if (old_fb)
- psb_gtt_unpin(to_gtt_range(old_fb->obj[0]));
+ psb_gem_unpin(to_psb_gem_object(old_fb->obj[0]));
gma_pipe_set_base_exit:
gma_power_end(dev);
@@ -331,8 +332,8 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
uint32_t temp;
size_t addr = 0;
- struct gtt_range *gt;
- struct gtt_range *cursor_gt = gma_crtc->cursor_gt;
+ struct psb_gem_object *pobj;
+ struct psb_gem_object *cursor_pobj = gma_crtc->cursor_pobj;
struct drm_gem_object *obj;
void *tmp_dst, *tmp_src;
int ret = 0, i, cursor_pages;
@@ -348,9 +349,8 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
/* Unpin the old GEM object */
if (gma_crtc->cursor_obj) {
- gt = container_of(gma_crtc->cursor_obj,
- struct gtt_range, gem);
- psb_gtt_unpin(gt);
+ pobj = to_psb_gem_object(gma_crtc->cursor_obj);
+ psb_gem_unpin(pobj);
drm_gem_object_put(gma_crtc->cursor_obj);
gma_crtc->cursor_obj = NULL;
}
@@ -375,40 +375,40 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
goto unref_cursor;
}
- gt = container_of(obj, struct gtt_range, gem);
+ pobj = to_psb_gem_object(obj);
/* Pin the memory into the GTT */
- ret = psb_gtt_pin(gt);
+ ret = psb_gem_pin(pobj);
if (ret) {
dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
goto unref_cursor;
}
if (dev_priv->ops->cursor_needs_phys) {
- if (cursor_gt == NULL) {
+ if (!cursor_pobj) {
dev_err(dev->dev, "No hardware cursor mem available");
ret = -ENOMEM;
goto unref_cursor;
}
/* Prevent overflow */
- if (gt->npage > 4)
+ if (pobj->npage > 4)
cursor_pages = 4;
else
- cursor_pages = gt->npage;
+ cursor_pages = pobj->npage;
/* Copy the cursor to cursor mem */
- tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
+ tmp_dst = dev_priv->vram_addr + cursor_pobj->offset;
for (i = 0; i < cursor_pages; i++) {
- tmp_src = kmap(gt->pages[i]);
+ tmp_src = kmap(pobj->pages[i]);
memcpy(tmp_dst, tmp_src, PAGE_SIZE);
- kunmap(gt->pages[i]);
+ kunmap(pobj->pages[i]);
tmp_dst += PAGE_SIZE;
}
addr = gma_crtc->cursor_addr;
} else {
- addr = gt->offset;
+ addr = pobj->offset;
gma_crtc->cursor_addr = addr;
}
@@ -425,8 +425,8 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
/* unpin the old bo */
if (gma_crtc->cursor_obj) {
- gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
- psb_gtt_unpin(gt);
+ pobj = to_psb_gem_object(gma_crtc->cursor_obj);
+ psb_gem_unpin(pobj);
drm_gem_object_put(gma_crtc->cursor_obj);
}
@@ -483,14 +483,14 @@ void gma_crtc_commit(struct drm_crtc *crtc)
void gma_crtc_disable(struct drm_crtc *crtc)
{
- struct gtt_range *gt;
+ struct psb_gem_object *pobj;
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
- gt = to_gtt_range(crtc->primary->fb->obj[0]);
- psb_gtt_unpin(gt);
+ pobj = to_psb_gem_object(crtc->primary->fb->obj[0]);
+ psb_gem_unpin(pobj);
}
}
@@ -498,6 +498,9 @@ void gma_crtc_destroy(struct drm_crtc *crtc)
{
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ if (gma_crtc->cursor_pobj)
+ drm_gem_object_put(&gma_crtc->cursor_pobj->base);
+
kfree(gma_crtc->crtc_state);
drm_crtc_cleanup(crtc);
kfree(gma_crtc);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 55a2a6919533..309ffe921bfd 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -7,10 +7,7 @@
* Alan Cox <alan@linux.intel.com>
*/
-#include <linux/shmem_fs.h>
-
-#include <asm/set_memory.h>
-
+#include "gem.h" /* TODO: for struct psb_gem_object, see psb_gtt_restore() */
#include "psb_drv.h"
@@ -18,6 +15,33 @@
* GTT resource allocator - manage page mappings in GTT space
*/
+int psb_gtt_allocate_resource(struct drm_psb_private *pdev, struct resource *res,
+ const char *name, resource_size_t size, resource_size_t align,
+ bool stolen, u32 *offset)
+{
+ struct resource *root = pdev->gtt_mem;
+ resource_size_t start, end;
+ int ret;
+
+ if (stolen) {
+ /* The start of the GTT is backed by stolen pages. */
+ start = root->start;
+ end = root->start + pdev->gtt.stolen_size - 1;
+ } else {
+ /* The rest is backed by system pages. */
+ start = root->start + pdev->gtt.stolen_size;
+ end = root->end;
+ }
+
+ res->name = name;
+ ret = allocate_resource(root, res, size, start, end, align, NULL, NULL);
+ if (ret)
+ return ret;
+ *offset = res->start - root->start;
+
+ return 0;
+}
+
/**
* psb_gtt_mask_pte - generate GTT pte entry
* @pfn: page number to encode
@@ -43,281 +67,62 @@ static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
return (pfn << PAGE_SHIFT) | mask;
}
-/**
- * psb_gtt_entry - find the GTT entries for a gtt_range
- * @dev: our DRM device
- * @r: our GTT range
- *
- * Given a gtt_range object return the GTT offset of the page table
- * entries for this gtt_range
- */
-static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
+static u32 __iomem *psb_gtt_entry(struct drm_psb_private *pdev, const struct resource *res)
{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- unsigned long offset;
+ unsigned long offset = res->start - pdev->gtt_mem->start;
- offset = r->resource.start - dev_priv->gtt_mem->start;
-
- return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
+ return pdev->gtt_map + (offset >> PAGE_SHIFT);
}
-/**
- * psb_gtt_insert - put an object into the GTT
- * @dev: our DRM device
- * @r: our GTT range
- * @resume: on resume
- *
- * Take our preallocated GTT range and insert the GEM object into
- * the GTT. This is protected via the gtt mutex which the caller
- * must hold.
+/*
+ * Take our preallocated GTT range and insert the GEM object into
+ * the GTT. This is protected via the gtt mutex which the caller
+ * must hold.
*/
-static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
- int resume)
+void psb_gtt_insert_pages(struct drm_psb_private *pdev, const struct resource *res,
+ struct page **pages)
{
+ resource_size_t npages, i;
u32 __iomem *gtt_slot;
u32 pte;
- struct page **pages;
- int i;
- if (r->pages == NULL) {
- WARN_ON(1);
- return -EINVAL;
- }
-
- WARN_ON(r->stolen); /* refcount these maybe ? */
-
- gtt_slot = psb_gtt_entry(dev, r);
- pages = r->pages;
+ /* Write our page entries into the GTT itself */
- if (!resume) {
- /* Make sure changes are visible to the GPU */
- set_pages_array_wc(pages, r->npage);
- }
+ npages = resource_size(res) >> PAGE_SHIFT;
+ gtt_slot = psb_gtt_entry(pdev, res);
- /* Write our page entries into the GTT itself */
- for (i = 0; i < r->npage; i++) {
- pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
- PSB_MMU_CACHED_MEMORY);
- iowrite32(pte, gtt_slot++);
+ for (i = 0; i < npages; ++i, ++gtt_slot) {
+ pte = psb_gtt_mask_pte(page_to_pfn(pages[i]), PSB_MMU_CACHED_MEMORY);
+ iowrite32(pte, gtt_slot);
}
/* Make sure all the entries are set before we return */
ioread32(gtt_slot - 1);
-
- return 0;
}
-/**
- * psb_gtt_remove - remove an object from the GTT
- * @dev: our DRM device
- * @r: our GTT range
- *
- * Remove a preallocated GTT range from the GTT. Overwrite all the
- * page table entries with the dummy page. This is protected via the gtt
- * mutex which the caller must hold.
+/*
+ * Remove a preallocated GTT range from the GTT. Overwrite all the
+ * page table entries with the dummy page. This is protected via the gtt
+ * mutex which the caller must hold.
*/
-static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
+void psb_gtt_remove_pages(struct drm_psb_private *pdev, const struct resource *res)
{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+ resource_size_t npages, i;
u32 __iomem *gtt_slot;
u32 pte;
- int i;
-
- WARN_ON(r->stolen);
-
- gtt_slot = psb_gtt_entry(dev, r);
- pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page),
- PSB_MMU_CACHED_MEMORY);
-
- for (i = 0; i < r->npage; i++)
- iowrite32(pte, gtt_slot++);
- ioread32(gtt_slot - 1);
- set_pages_array_wb(r->pages, r->npage);
-}
-
-/**
- * psb_gtt_attach_pages - attach and pin GEM pages
- * @gt: the gtt range
- *
- * Pin and build an in kernel list of the pages that back our GEM object.
- * While we hold this the pages cannot be swapped out. This is protected
- * via the gtt mutex which the caller must hold.
- */
-static int psb_gtt_attach_pages(struct gtt_range *gt)
-{
- struct page **pages;
-
- WARN_ON(gt->pages);
-
- pages = drm_gem_get_pages(&gt->gem);
- if (IS_ERR(pages))
- return PTR_ERR(pages);
-
- gt->npage = gt->gem.size / PAGE_SIZE;
- gt->pages = pages;
-
- return 0;
-}
-
-/**
- * psb_gtt_detach_pages - attach and pin GEM pages
- * @gt: the gtt range
- *
- * Undo the effect of psb_gtt_attach_pages. At this point the pages
- * must have been removed from the GTT as they could now be paged out
- * and move bus address. This is protected via the gtt mutex which the
- * caller must hold.
- */
-static void psb_gtt_detach_pages(struct gtt_range *gt)
-{
- drm_gem_put_pages(&gt->gem, gt->pages, true, false);
- gt->pages = NULL;
-}
-/**
- * psb_gtt_pin - pin pages into the GTT
- * @gt: range to pin
- *
- * Pin a set of pages into the GTT. The pins are refcounted so that
- * multiple pins need multiple unpins to undo.
- *
- * Non GEM backed objects treat this as a no-op as they are always GTT
- * backed objects.
- */
-int psb_gtt_pin(struct gtt_range *gt)
-{
- int ret = 0;
- struct drm_device *dev = gt->gem.dev;
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- u32 gpu_base = dev_priv->gtt.gatt_start;
+ /* Install scratch page for the resource */
- mutex_lock(&dev_priv->gtt_mutex);
+ pte = psb_gtt_mask_pte(page_to_pfn(pdev->scratch_page), PSB_MMU_CACHED_MEMORY);
- if (gt->in_gart == 0 && gt->stolen == 0) {
- ret = psb_gtt_attach_pages(gt);
- if (ret < 0)
- goto out;
- ret = psb_gtt_insert(dev, gt, 0);
- if (ret < 0) {
- psb_gtt_detach_pages(gt);
- goto out;
- }
- psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
- gt->pages, (gpu_base + gt->offset),
- gt->npage, 0, 0, PSB_MMU_CACHED_MEMORY);
- }
- gt->in_gart++;
-out:
- mutex_unlock(&dev_priv->gtt_mutex);
- return ret;
-}
+ npages = resource_size(res) >> PAGE_SHIFT;
+ gtt_slot = psb_gtt_entry(pdev, res);
-/**
- * psb_gtt_unpin - Drop a GTT pin requirement
- * @gt: range to pin
- *
- * Undoes the effect of psb_gtt_pin. On the last drop the GEM object
- * will be removed from the GTT which will also drop the page references
- * and allow the VM to clean up or page stuff.
- *
- * Non GEM backed objects treat this as a no-op as they are always GTT
- * backed objects.
- */
-void psb_gtt_unpin(struct gtt_range *gt)
-{
- struct drm_device *dev = gt->gem.dev;
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- u32 gpu_base = dev_priv->gtt.gatt_start;
+ for (i = 0; i < npages; ++i, ++gtt_slot)
+ iowrite32(pte, gtt_slot);
- mutex_lock(&dev_priv->gtt_mutex);
-
- WARN_ON(!gt->in_gart);
-
- gt->in_gart--;
- if (gt->in_gart == 0 && gt->stolen == 0) {
- psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
- (gpu_base + gt->offset), gt->npage, 0, 0);
- psb_gtt_remove(dev, gt);
- psb_gtt_detach_pages(gt);
- }
-
- mutex_unlock(&dev_priv->gtt_mutex);
-}
-
-/*
- * GTT resource allocator - allocate and manage GTT address space
- */
-
-/**
- * psb_gtt_alloc_range - allocate GTT address space
- * @dev: Our DRM device
- * @len: length (bytes) of address space required
- * @name: resource name
- * @backed: resource should be backed by stolen pages
- * @align: requested alignment
- *
- * Ask the kernel core to find us a suitable range of addresses
- * to use for a GTT mapping.
- *
- * Returns a gtt_range structure describing the object, or NULL on
- * error. On successful return the resource is both allocated and marked
- * as in use.
- */
-struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
- const char *name, int backed, u32 align)
-{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- struct gtt_range *gt;
- struct resource *r = dev_priv->gtt_mem;
- int ret;
- unsigned long start, end;
-
- if (backed) {
- /* The start of the GTT is the stolen pages */
- start = r->start;
- end = r->start + dev_priv->gtt.stolen_size - 1;
- } else {
- /* The rest we will use for GEM backed objects */
- start = r->start + dev_priv->gtt.stolen_size;
- end = r->end;
- }
-
- gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
- if (gt == NULL)
- return NULL;
- gt->resource.name = name;
- gt->stolen = backed;
- gt->in_gart = backed;
- /* Ensure this is set for non GEM objects */
- gt->gem.dev = dev;
- ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
- len, start, end, align, NULL, NULL);
- if (ret == 0) {
- gt->offset = gt->resource.start - r->start;
- return gt;
- }
- kfree(gt);
- return NULL;
-}
-
-/**
- * psb_gtt_free_range - release GTT address space
- * @dev: our DRM device
- * @gt: a mapping created with psb_gtt_alloc_range
- *
- * Release a resource that was allocated with psb_gtt_alloc_range. If the
- * object has been pinned by mmap users we clean this up here currently.
- */
-void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
-{
- /* Undo the mmap pin if we are destroying the object */
- if (gt->mmapping) {
- psb_gtt_unpin(gt);
- gt->mmapping = 0;
- }
- WARN_ON(gt->in_gart && !gt->stolen);
- release_resource(&gt->resource);
- kfree(gt);
+ /* Make sure all the entries are set before we return */
+ ioread32(gtt_slot - 1);
}
static void psb_gtt_alloc(struct drm_device *dev)
@@ -498,7 +303,7 @@ int psb_gtt_restore(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct resource *r = dev_priv->gtt_mem->child;
- struct gtt_range *range;
+ struct psb_gem_object *pobj;
unsigned int restored = 0, total = 0, size = 0;
/* On resume, the gtt_mutex is already initialized */
@@ -506,10 +311,15 @@ int psb_gtt_restore(struct drm_device *dev)
psb_gtt_init(dev, 1);
while (r != NULL) {
- range = container_of(r, struct gtt_range, resource);
- if (range->pages) {
- psb_gtt_insert(dev, range, 1);
- size += range->resource.end - range->resource.start;
+ /*
+ * TODO: GTT restoration needs a refactoring, so that we don't have to touch
+ * struct psb_gem_object here. The type represents a GEM object and is
+ * not related to the GTT itself.
+ */
+ pobj = container_of(r, struct psb_gem_object, resource);
+ if (pobj->pages) {
+ psb_gtt_insert_pages(dev_priv, &pobj->resource, pobj->pages);
+ size += pobj->resource.end - pobj->resource.start;
restored++;
}
r = r->sibling;
diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
index 2bf165849ebe..ff1dcdd1ff52 100644
--- a/drivers/gpu/drm/gma500/gtt.h
+++ b/drivers/gpu/drm/gma500/gtt.h
@@ -10,6 +10,8 @@
#include <drm/drm_gem.h>
+struct drm_psb_private;
+
/* This wants cleaning up with respect to the psb_dev and un-needed stuff */
struct psb_gtt {
uint32_t gatt_start;
@@ -26,27 +28,14 @@ struct psb_gtt {
/* Exported functions */
extern int psb_gtt_init(struct drm_device *dev, int resume);
extern void psb_gtt_takedown(struct drm_device *dev);
+extern int psb_gtt_restore(struct drm_device *dev);
-/* Each gtt_range describes an allocation in the GTT area */
-struct gtt_range {
- struct resource resource; /* Resource for our allocation */
- u32 offset; /* GTT offset of our object */
- struct drm_gem_object gem; /* GEM high level stuff */
- int in_gart; /* Currently in the GART (ref ct) */
- bool stolen; /* Backed from stolen RAM */
- bool mmapping; /* Is mmappable */
- struct page **pages; /* Backing pages if present */
- int npage; /* Number of backing pages */
-};
+int psb_gtt_allocate_resource(struct drm_psb_private *pdev, struct resource *res,
+ const char *name, resource_size_t size, resource_size_t align,
+ bool stolen, u32 *offset);
-#define to_gtt_range(x) container_of(x, struct gtt_range, gem)
+void psb_gtt_insert_pages(struct drm_psb_private *pdev, const struct resource *res,
+ struct page **pages);
+void psb_gtt_remove_pages(struct drm_psb_private *pdev, const struct resource *res);
-extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
- const char *name, int backed,
- u32 align);
-extern void psb_gtt_kref_put(struct gtt_range *gt);
-extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt);
-extern int psb_gtt_pin(struct gtt_range *gt);
-extern void psb_gtt_unpin(struct gtt_range *gt);
-extern int psb_gtt_restore(struct drm_device *dev);
#endif
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index c6b115954b7d..36c7c2686c90 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -10,6 +10,7 @@
#include <drm/drm_fourcc.h>
#include "framebuffer.h"
+#include "gem.h"
#include "gma_display.h"
#include "power.h"
#include "psb_drv.h"
@@ -608,7 +609,7 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
if (!gma_power_begin(dev, true))
return 0;
- start = to_gtt_range(fb->obj[0])->offset;
+ start = to_psb_gem_object(fb->obj[0])->offset;
offset = y * fb->pitches[0] + x * fb->format->cpp[0];
REG_WRITE(map->stride, fb->pitches[0]);
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 7a10bb39ef0b..65cf1c79dd7c 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -19,6 +19,7 @@
#include <acpi/video.h>
#include <drm/drm.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
@@ -448,6 +449,17 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct drm_device *dev;
int ret;
+ /*
+ * We cannot yet easily find the framebuffer's location in memory. So
+ * remove all framebuffers here.
+ *
+ * TODO: Refactor psb_driver_load() to map vdc_reg earlier. Then we
+ * might be able to read the framebuffer range from the device.
+ */
+ ret = drm_aperture_remove_framebuffers(true, &driver);
+ if (ret)
+ return ret;
+
ret = pcim_enable_device(pdev);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index f5f259fde88e..d5f95212934e 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -12,6 +12,7 @@
#include <drm/drm_plane_helper.h>
#include "framebuffer.h"
+#include "gem.h"
#include "gma_display.h"
#include "power.h"
#include "psb_drv.h"
@@ -454,23 +455,21 @@ static void psb_intel_cursor_init(struct drm_device *dev,
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
u32 base[3] = { CURABASE, CURBBASE, CURCBASE };
- struct gtt_range *cursor_gt;
+ struct psb_gem_object *cursor_pobj;
if (dev_priv->ops->cursor_needs_phys) {
/* Allocate 4 pages of stolen mem for a hardware cursor. That
* is enough for the 64 x 64 ARGB cursors we support.
*/
- cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1,
- PAGE_SIZE);
- if (!cursor_gt) {
- gma_crtc->cursor_gt = NULL;
+ cursor_pobj = psb_gem_create(dev, 4 * PAGE_SIZE, "cursor", true, PAGE_SIZE);
+ if (IS_ERR(cursor_pobj)) {
+ gma_crtc->cursor_pobj = NULL;
goto out;
}
- gma_crtc->cursor_gt = cursor_gt;
- gma_crtc->cursor_addr = dev_priv->stolen_base +
- cursor_gt->offset;
+ gma_crtc->cursor_pobj = cursor_pobj;
+ gma_crtc->cursor_addr = dev_priv->stolen_base + cursor_pobj->offset;
} else {
- gma_crtc->cursor_gt = NULL;
+ gma_crtc->cursor_pobj = NULL;
}
out:
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 5340225d6997..db3e757328fe 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -140,7 +140,7 @@ struct gma_crtc {
int pipe;
int plane;
uint32_t cursor_addr;
- struct gtt_range *cursor_gt;
+ struct psb_gem_object *cursor_pobj;
u8 lut_adj[256];
struct psb_intel_framebuffer *fbdev_fb;
/* a mode_set for fbdev users on this crtc */
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index daf75c178c2b..a150a5a4b5d4 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -74,7 +74,7 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format
if (!buf)
return 0;
- drm_fb_xrgb8888_to_gray8(buf, src, fb, rect);
+ drm_fb_xrgb8888_to_gray8(buf, 0, src, fb, rect);
pix8 = buf;
for (y = 0; y < height; y++) {
@@ -190,23 +190,23 @@ retry:
goto end_cpu_access;
}
} else if (format->format == DRM_FORMAT_R8) {
- drm_fb_xrgb8888_to_gray8(buf, vaddr, fb, rect);
+ drm_fb_xrgb8888_to_gray8(buf, 0, vaddr, fb, rect);
} else if (format->format == DRM_FORMAT_RGB332) {
- drm_fb_xrgb8888_to_rgb332(buf, vaddr, fb, rect);
+ drm_fb_xrgb8888_to_rgb332(buf, 0, vaddr, fb, rect);
} else if (format->format == DRM_FORMAT_RGB565) {
- drm_fb_xrgb8888_to_rgb565(buf, vaddr, fb, rect, gud_is_big_endian());
+ drm_fb_xrgb8888_to_rgb565(buf, 0, vaddr, fb, rect, gud_is_big_endian());
} else if (format->format == DRM_FORMAT_RGB888) {
- drm_fb_xrgb8888_to_rgb888(buf, vaddr, fb, rect);
+ drm_fb_xrgb8888_to_rgb888(buf, 0, vaddr, fb, rect);
} else {
len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect);
}
} else if (gud_is_big_endian() && format->cpp[0] > 1) {
- drm_fb_swab(buf, vaddr, fb, rect, !import_attach);
+ drm_fb_swab(buf, 0, vaddr, fb, rect, !import_attach);
} else if (compression && !import_attach && pitch == fb->pitches[0]) {
/* can compress directly from the framebuffer */
buf = vaddr + rect->y1 * pitch;
} else {
- drm_fb_memcpy(buf, vaddr, fb, rect);
+ drm_fb_memcpy(buf, 0, vaddr, fb, rect);
}
memset(req, 0, sizeof(*req));
diff --git a/drivers/gpu/drm/hisilicon/kirin/Kconfig b/drivers/gpu/drm/hisilicon/kirin/Kconfig
index 290553e2f6b4..b770f7662830 100644
--- a/drivers/gpu/drm/hisilicon/kirin/Kconfig
+++ b/drivers/gpu/drm/hisilicon/kirin/Kconfig
@@ -4,7 +4,6 @@ config DRM_HISI_KIRIN
depends on DRM && OF && ARM64
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_MIPI_DSI
help
Choose this option if you have a hisilicon Kirin chipsets(hi6220).
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index 952cfdb1961d..1d556482bb46 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -81,7 +81,7 @@ struct dsi_hw_ctx {
struct dw_dsi {
struct drm_encoder encoder;
- struct drm_bridge *bridge;
+ struct device *dev;
struct mipi_dsi_host host;
struct drm_display_mode cur_mode;
struct dsi_hw_ctx *ctx;
@@ -720,10 +720,13 @@ static int dw_drm_encoder_init(struct device *dev,
return 0;
}
+static const struct component_ops dsi_ops;
static int dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *mdsi)
{
struct dw_dsi *dsi = host_to_dsi(host);
+ struct device *dev = host->dev;
+ int ret;
if (mdsi->lanes < 1 || mdsi->lanes > 4) {
DRM_ERROR("dsi device params invalid\n");
@@ -734,13 +737,20 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
dsi->format = mdsi->format;
dsi->mode_flags = mdsi->mode_flags;
+ ret = component_add(dev, &dsi_ops);
+ if (ret)
+ return ret;
+
return 0;
}
static int dsi_host_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *mdsi)
{
- /* do nothing */
+ struct device *dev = host->dev;
+
+ component_del(dev, &dsi_ops);
+
return 0;
}
@@ -768,7 +778,17 @@ static int dsi_host_init(struct device *dev, struct dw_dsi *dsi)
static int dsi_bridge_init(struct drm_device *dev, struct dw_dsi *dsi)
{
struct drm_encoder *encoder = &dsi->encoder;
- struct drm_bridge *bridge = dsi->bridge;
+ struct drm_bridge *bridge;
+ struct device_node *np = dsi->dev->of_node;
+ int ret;
+
+ /*
+ * Get the endpoint node. In our case, dsi has one output port1
+ * to which the external HDMI bridge is connected.
+ */
+ ret = drm_of_find_panel_or_bridge(np, 1, 0, NULL, &bridge);
+ if (ret)
+ return ret;
/* associate the bridge to dsi encoder */
return drm_bridge_attach(encoder, bridge, NULL, 0);
@@ -785,10 +805,6 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- ret = dsi_host_init(dev, dsi);
- if (ret)
- return ret;
-
ret = dsi_bridge_init(drm_dev, dsi);
if (ret)
return ret;
@@ -809,17 +825,7 @@ static const struct component_ops dsi_ops = {
static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
{
struct dsi_hw_ctx *ctx = dsi->ctx;
- struct device_node *np = pdev->dev.of_node;
struct resource *res;
- int ret;
-
- /*
- * Get the endpoint node. In our case, dsi has one output port1
- * to which the external HDMI bridge is connected.
- */
- ret = drm_of_find_panel_or_bridge(np, 1, 0, NULL, &dsi->bridge);
- if (ret)
- return ret;
ctx->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(ctx->pclk)) {
@@ -852,6 +858,7 @@ static int dsi_probe(struct platform_device *pdev)
dsi = &data->dsi;
ctx = &data->ctx;
dsi->ctx = ctx;
+ dsi->dev = &pdev->dev;
ret = dsi_parse_dt(pdev, dsi);
if (ret)
@@ -859,12 +866,19 @@ static int dsi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
- return component_add(&pdev->dev, &dsi_ops);
+ ret = dsi_host_init(&pdev->dev, dsi);
+ if (ret)
+ return ret;
+
+ return 0;
}
static int dsi_remove(struct platform_device *pdev)
{
- component_del(&pdev->dev, &dsi_ops);
+ struct dsi_data *data = platform_get_drvdata(pdev);
+ struct dw_dsi *dsi = &data->dsi;
+
+ mipi_dsi_host_unregister(&dsi->host);
return 0;
}
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
index 8c97a20dfe23..93f51e70a951 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
@@ -23,13 +23,16 @@ static int hyperv_blit_to_vram_rect(struct drm_framebuffer *fb,
struct drm_rect *rect)
{
struct hyperv_drm_device *hv = to_hv(fb->dev);
+ void __iomem *dst = hv->vram;
void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */
int idx;
if (!drm_dev_enter(&hv->dev, &idx))
return -ENODEV;
- drm_fb_memcpy_dstclip(hv->vram, fb->pitches[0], vmap, fb, rect);
+ dst += drm_fb_clip_offset(fb->pitches[0], fb->format, rect);
+ drm_fb_memcpy_toio(dst, fb->pitches[0], vmap, fb, rect);
+
drm_dev_exit(idx);
return 0;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 84b6fc70cbf5..a4c94dc2e216 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -21,7 +21,7 @@ config DRM_I915
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
select SYNC_FILE
- select IOSF_MBI
+ select IOSF_MBI if X86
select CRC32
select SND_HDA_I915 if SND_HDA_CORE
select CEC_CORE if CEC_NOTIFIER
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 660bb03de6fc..1b62b9f65196 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -30,7 +30,7 @@ subdir-ccflags-y += -I$(srctree)/$(src)
# Please keep these build lists sorted!
# core driver code
-i915-y += i915_drv.o \
+i915-y += i915_driver.o \
i915_config.o \
i915_irq.o \
i915_getparam.o \
@@ -60,7 +60,6 @@ i915-y += i915_drv.o \
# core library code
i915-y += \
- dma_resv_utils.o \
i915_memcpy.o \
i915_mm.o \
i915_sw_fence.o \
@@ -154,6 +153,7 @@ gem-y += \
gem/i915_gem_throttle.o \
gem/i915_gem_tiling.o \
gem/i915_gem_ttm.o \
+ gem/i915_gem_ttm_move.o \
gem/i915_gem_ttm_pm.o \
gem/i915_gem_userptr.o \
gem/i915_gem_wait.o \
@@ -163,6 +163,7 @@ i915-y += \
i915_active.o \
i915_buddy.o \
i915_cmd_parser.o \
+ i915_deps.o \
i915_gem_evict.o \
i915_gem_gtt.o \
i915_gem_ww.o \
@@ -173,6 +174,7 @@ i915-y += \
i915_trace_points.o \
i915_ttm_buddy_manager.o \
i915_vma.o \
+ i915_vma_snapshot.o \
intel_wopcm.o
# general-purpose microcontroller (GuC) support
@@ -226,6 +228,8 @@ i915-y += \
display/intel_hotplug.o \
display/intel_lpe_audio.o \
display/intel_overlay.o \
+ display/intel_pch_display.o \
+ display/intel_pch_refclk.o \
display/intel_plane_initial.o \
display/intel_psr.o \
display/intel_quirks.o \
@@ -256,6 +260,7 @@ i915-y += \
display/intel_crt.o \
display/intel_ddi.o \
display/intel_ddi_buf_trans.o \
+ display/intel_display_trace.o \
display/intel_dp.o \
display/intel_dp_aux.o \
display/intel_dp_aux_backlight.o \
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index dc41868d01ef..f37677df6ebf 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -9,6 +9,7 @@
#include "intel_audio.h"
#include "intel_backlight.h"
#include "intel_connector.h"
+#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index f5b4dd5b4275..06e00b1eaa7c 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -8,6 +8,7 @@
#include "g4x_hdmi.h"
#include "intel_audio.h"
#include "intel_connector.h"
+#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dpio_phy.h"
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index b1439ba78f67..85950ff67609 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -13,6 +13,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fb.h"
+#include "intel_fbc.h"
#include "intel_sprite.h"
#include "i9xx_plane.h"
@@ -60,22 +61,11 @@ static const u32 vlv_primary_formats[] = {
DRM_FORMAT_XBGR16161616F,
};
-static const u64 i9xx_format_modifiers[] = {
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
- switch (modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- break;
- default:
+ if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier))
return false;
- }
switch (format) {
case DRM_FORMAT_C8:
@@ -92,13 +82,8 @@ static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
- switch (modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- break;
- default:
+ if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier))
return false;
- }
switch (format) {
case DRM_FORMAT_C8:
@@ -136,6 +121,15 @@ static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
return i9xx_plane == PLANE_A;
}
+static struct intel_fbc *i9xx_plane_fbc(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
+{
+ if (i9xx_plane_has_fbc(dev_priv, i9xx_plane))
+ return dev_priv->fbc;
+ else
+ return NULL;
+}
+
static bool i9xx_plane_has_windowing(struct intel_plane *plane)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -272,7 +266,7 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
u32 alignment = intel_surf_alignment(fb, 0);
int cpp = fb->format->cpp[0];
- while ((src_x + src_w) * cpp > plane_state->view.color_plane[0].stride) {
+ while ((src_x + src_w) * cpp > plane_state->view.color_plane[0].mapping_stride) {
if (offset == 0) {
drm_dbg_kms(&dev_priv->drm,
"Unable to find suitable display surface offset due to X-tiling\n");
@@ -418,38 +412,25 @@ static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
return DIV_ROUND_UP(pixel_rate * num, den);
}
-static void i9xx_update_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static void i9xx_plane_update_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- u32 linear_offset;
- int x = plane_state->view.color_plane[0].x;
- int y = plane_state->view.color_plane[0].y;
- int crtc_x = plane_state->uapi.dst.x1;
- int crtc_y = plane_state->uapi.dst.y1;
- int crtc_w = drm_rect_width(&plane_state->uapi.dst);
- int crtc_h = drm_rect_height(&plane_state->uapi.dst);
unsigned long irqflags;
- u32 dspaddr_offset;
- u32 dspcntr;
-
- dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
-
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
- if (DISPLAY_VER(dev_priv) >= 4)
- dspaddr_offset = plane_state->view.color_plane[0].offset;
- else
- dspaddr_offset = linear_offset;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
- plane_state->view.color_plane[0].stride);
+ plane_state->view.color_plane[0].mapping_stride);
if (DISPLAY_VER(dev_priv) < 4) {
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ int crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ int crtc_h = drm_rect_height(&plane_state->uapi.dst);
+
/*
* PLANE_A doesn't actually have a full window
* generator but let's assume we still need to
@@ -459,7 +440,39 @@ static void i9xx_update_plane(struct intel_plane *plane,
(crtc_y << 16) | crtc_x);
intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
((crtc_h - 1) << 16) | (crtc_w - 1));
- } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
+ }
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void i9xx_plane_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ int x = plane_state->view.color_plane[0].x;
+ int y = plane_state->view.color_plane[0].y;
+ u32 dspcntr, dspaddr_offset, linear_offset;
+ unsigned long irqflags;
+
+ dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
+
+ linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
+ if (DISPLAY_VER(dev_priv) >= 4)
+ dspaddr_offset = plane_state->view.color_plane[0].offset;
+ else
+ dspaddr_offset = linear_offset;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ int crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ int crtc_h = drm_rect_height(&plane_state->uapi.dst);
+
intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
(crtc_y << 16) | crtc_x);
intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
@@ -493,8 +506,22 @@ static void i9xx_update_plane(struct intel_plane *plane,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void i9xx_disable_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
+static void i830_plane_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ /*
+ * On i830/i845 all registers are self-arming [ALM040].
+ *
+ * Additional breakage on i830 causes register reads to return
+ * the last latched value instead of the last written value [ALM026].
+ */
+ i9xx_plane_update_noarm(plane, crtc_state, plane_state);
+ i9xx_plane_update_arm(plane, crtc_state, plane_state);
+}
+
+static void i9xx_plane_disable_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
@@ -768,6 +795,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
struct intel_plane *plane;
const struct drm_plane_funcs *plane_funcs;
unsigned int supported_rotations;
+ const u64 *modifiers;
const u32 *formats;
int num_formats;
int ret, zpos;
@@ -789,12 +817,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->id = PLANE_PRIMARY;
plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
- plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
- if (plane->has_fbc) {
- struct intel_fbc *fbc = &dev_priv->fbc;
-
- fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
- }
+ intel_fbc_add_plane(i9xx_plane_fbc(dev_priv, plane->i9xx_plane), plane);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
formats = vlv_primary_formats;
@@ -851,8 +874,13 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->max_stride = ilk_primary_max_stride;
}
- plane->update_plane = i9xx_update_plane;
- plane->disable_plane = i9xx_disable_plane;
+ if (IS_I830(dev_priv) || IS_I845G(dev_priv)) {
+ plane->update_arm = i830_plane_update_arm;
+ } else {
+ plane->update_noarm = i9xx_plane_update_noarm;
+ plane->update_arm = i9xx_plane_update_arm;
+ }
+ plane->disable_arm = i9xx_plane_disable_arm;
plane->get_hw_state = i9xx_plane_get_hw_state;
plane->check_plane = i9xx_plane_check;
@@ -875,21 +903,26 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->disable_flip_done = ilk_primary_disable_flip_done;
}
+ modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X);
+
if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
0, plane_funcs,
formats, num_formats,
- i9xx_format_modifiers,
+ modifiers,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
0, plane_funcs,
formats, num_formats,
- i9xx_format_modifiers,
+ modifiers,
DRM_PLANE_TYPE_PRIMARY,
"plane %c",
plane_name(plane->i9xx_plane));
+
+ kfree(modifiers);
+
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 71fbdcddd31f..5781e9fac8b4 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -28,6 +28,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_mipi_dsi.h>
+#include "icl_dsi.h"
#include "intel_atomic.h"
#include "intel_backlight.h"
#include "intel_combo_phy.h"
@@ -36,6 +37,7 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_dsi.h"
+#include "intel_dsi_vbt.h"
#include "intel_panel.h"
#include "intel_vdsc.h"
#include "skl_scaler.h"
@@ -183,6 +185,8 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
if (enable_lpdt)
tmp |= LP_DATA_TRANSFER;
+ else
+ tmp &= ~LP_DATA_TRANSFER;
tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK);
tmp |= ((packet->header[0] & VC_MASK) << VC_SHIFT);
@@ -1226,7 +1230,9 @@ static void gen11_dsi_pre_enable(struct intel_atomic_state *state,
/* step5: program and powerup panel */
gen11_dsi_powerup_panel(encoder);
- intel_dsc_enable(encoder, pipe_config);
+ intel_dsc_dsi_pps_write(encoder, pipe_config);
+
+ intel_dsc_enable(pipe_config);
/* step6c: configure transcoder timings */
gen11_dsi_set_transcoder_timings(encoder, pipe_config);
@@ -1623,7 +1629,7 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
/* FIXME: initialize from VBT */
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
- ret = intel_dsc_compute_params(encoder, crtc_state);
+ ret = intel_dsc_compute_params(crtc_state);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.h b/drivers/gpu/drm/i915/display/icl_dsi.h
new file mode 100644
index 000000000000..b4861b56b5b2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/icl_dsi.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __ICL_DSI_H__
+#define __ICL_DSI_H__
+
+struct drm_i915_private;
+struct intel_crtc_state;
+
+void icl_dsi_init(struct drm_i915_private *i915);
+void icl_dsi_frame_update(struct intel_crtc_state *crtc_state);
+
+#endif /* __ICL_DSI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index b4e7ac51aa31..a62550711e98 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -139,6 +139,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
new_conn_state->base.content_type != old_conn_state->base.content_type ||
new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode ||
+ new_conn_state->base.privacy_screen_sw_state != old_conn_state->base.privacy_screen_sw_state ||
!drm_connector_atomic_hdr_metadata_equal(old_state, new_state))
crtc_state->mode_changed = true;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 0be8c00e3db9..c2c512cd8ec0 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -35,14 +35,16 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
-#include "i915_trace.h"
+#include "gt/intel_rps.h"
+
#include "intel_atomic_plane.h"
#include "intel_cdclk.h"
+#include "intel_display_trace.h"
#include "intel_display_types.h"
+#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_pm.h"
#include "intel_sprite.h"
-#include "gt/intel_rps.h"
static void intel_plane_state_reset(struct intel_plane_state *plane_state,
struct intel_plane *plane)
@@ -394,7 +396,7 @@ int intel_plane_atomic_check(struct intel_atomic_state *state,
const struct intel_plane_state *old_plane_state =
intel_atomic_get_old_plane_state(state, plane);
const struct intel_plane_state *new_master_plane_state;
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, plane->pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(i915, plane->pipe);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
@@ -469,31 +471,72 @@ skl_next_plane_to_commit(struct intel_atomic_state *state,
return NULL;
}
-void intel_update_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+void intel_plane_update_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ trace_intel_plane_update_noarm(&plane->base, crtc);
+
+ if (plane->update_noarm)
+ plane->update_noarm(plane, crtc_state, plane_state);
+}
+
+void intel_plane_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- trace_intel_update_plane(&plane->base, crtc);
+ trace_intel_plane_update_arm(&plane->base, crtc);
if (crtc_state->uapi.async_flip && plane->async_flip)
plane->async_flip(plane, crtc_state, plane_state, true);
else
- plane->update_plane(plane, crtc_state, plane_state);
+ plane->update_arm(plane, crtc_state, plane_state);
}
-void intel_disable_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
+void intel_plane_disable_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- trace_intel_disable_plane(&plane->base, crtc);
- plane->disable_plane(plane, crtc_state);
+ trace_intel_plane_disable_arm(&plane->base, crtc);
+ plane->disable_arm(plane, crtc_state);
+}
+
+void intel_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u32 update_mask = new_crtc_state->update_planes;
+ struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ if (new_crtc_state->uapi.async_flip)
+ return;
+
+ /*
+ * Since we only write non-arming registers here,
+ * the order does not matter even for skl+.
+ */
+ for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
+ if (crtc->pipe != plane->pipe ||
+ !(update_mask & BIT(plane->id)))
+ continue;
+
+ /* TODO: for mailbox updates this should be skipped */
+ if (new_plane_state->uapi.visible ||
+ new_plane_state->planar_slave)
+ intel_plane_update_noarm(plane, new_crtc_state, new_plane_state);
+ }
}
-void skl_update_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+void skl_arm_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
@@ -515,17 +558,20 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state,
struct intel_plane_state *new_plane_state =
intel_atomic_get_new_plane_state(state, plane);
+ /*
+ * TODO: for mailbox updates intel_plane_update_noarm()
+ * would have to be called here as well.
+ */
if (new_plane_state->uapi.visible ||
- new_plane_state->planar_slave) {
- intel_update_plane(plane, new_crtc_state, new_plane_state);
- } else {
- intel_disable_plane(plane, new_crtc_state);
- }
+ new_plane_state->planar_slave)
+ intel_plane_update_arm(plane, new_crtc_state, new_plane_state);
+ else
+ intel_plane_disable_arm(plane, new_crtc_state);
}
}
-void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+void i9xx_arm_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -539,10 +585,14 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
!(update_mask & BIT(plane->id)))
continue;
+ /*
+ * TODO: for mailbox updates intel_plane_update_noarm()
+ * would have to be called here as well.
+ */
if (new_plane_state->uapi.visible)
- intel_update_plane(plane, new_crtc_state, new_plane_state);
+ intel_plane_update_arm(plane, new_crtc_state, new_plane_state);
else
- intel_disable_plane(plane, new_crtc_state);
+ intel_plane_disable_arm(plane, new_crtc_state);
}
}
@@ -738,6 +788,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
i915_gem_object_wait_priority(obj, 0, &attr);
if (!new_plane_state->uapi.fence) { /* implicit fencing */
+ struct dma_resv_iter cursor;
struct dma_fence *fence;
ret = i915_sw_fence_await_reservation(&state->commit_ready,
@@ -748,12 +799,12 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
if (ret < 0)
goto unpin_fb;
- fence = dma_resv_get_excl_unlocked(obj->base.resv);
- if (fence) {
+ dma_resv_iter_begin(&cursor, obj->base.resv, false);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
add_rps_boost_after_vblank(new_plane_state->hw.crtc,
fence);
- dma_fence_put(fence);
}
+ dma_resv_iter_end(&cursor);
} else {
add_rps_boost_after_vblank(new_plane_state->hw.crtc,
new_plane_state->uapi.fence);
@@ -768,7 +819,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* maximum clocks following a vblank miss (see do_rps_boost()).
*/
if (!state->rps_interactive) {
- intel_rps_mark_interactive(&dev_priv->gt.rps, true);
+ intel_rps_mark_interactive(&to_gt(dev_priv)->rps, true);
state->rps_interactive = true;
}
@@ -802,7 +853,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
return;
if (state->rps_interactive) {
- intel_rps_mark_interactive(&dev_priv->gt.rps, false);
+ intel_rps_mark_interactive(&to_gt(dev_priv)->rps, false);
state->rps_interactive = false;
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 62e5a2a77fd4..7907f601598e 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -30,20 +30,25 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
struct intel_crtc *crtc);
void intel_plane_copy_hw_state(struct intel_plane_state *plane_state,
const struct intel_plane_state *from_plane_state);
-void intel_update_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-void intel_disable_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state);
+void intel_plane_update_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+void intel_plane_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+void intel_plane_disable_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
struct intel_plane *intel_plane_alloc(void);
void intel_plane_free(struct intel_plane *plane);
struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
-void skl_update_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
-void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
+void intel_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void skl_arm_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void i9xx_arm_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 03e8c05a74f6..3bdca0fe2cee 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -31,6 +31,7 @@
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_cdclk.h"
+#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_lpe_audio.h"
@@ -62,6 +63,15 @@
* struct &i915_audio_component_audio_ops @audio_ops is called from i915 driver.
*/
+struct intel_audio_funcs {
+ void (*audio_codec_enable)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+ void (*audio_codec_disable)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state);
+};
+
/* DP N/M table */
#define LC_810M 810000
#define LC_540M 540000
@@ -388,7 +398,7 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = dev_priv->audio_component;
+ struct i915_audio_component *acomp = dev_priv->audio.component;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
const struct dp_aud_n_m *nm;
@@ -436,7 +446,7 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = dev_priv->audio_component;
+ struct i915_audio_component *acomp = dev_priv->audio.component;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
int n, rate;
@@ -494,7 +504,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
drm_dbg_kms(&dev_priv->drm, "Disable audio codec on transcoder %s\n",
transcoder_name(cpu_transcoder));
- mutex_lock(&dev_priv->av_mutex);
+ mutex_lock(&dev_priv->audio.mutex);
/* Disable timestamps */
tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
@@ -512,7 +522,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder);
intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
- mutex_unlock(&dev_priv->av_mutex);
+ mutex_unlock(&dev_priv->audio.mutex);
}
static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
@@ -641,7 +651,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
"Enable audio codec on transcoder %s, %u bytes ELD\n",
transcoder_name(cpu_transcoder), drm_eld_size(eld));
- mutex_lock(&dev_priv->av_mutex);
+ mutex_lock(&dev_priv->audio.mutex);
/* Enable Audio WA for 4k DSC usecases */
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
@@ -679,7 +689,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
/* Enable timestamps */
hsw_audio_config_update(encoder, crtc_state);
- mutex_unlock(&dev_priv->av_mutex);
+ mutex_unlock(&dev_priv->audio.mutex);
}
static void ilk_audio_codec_disable(struct intel_encoder *encoder,
@@ -826,7 +836,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = dev_priv->audio_component;
+ struct i915_audio_component *acomp = dev_priv->audio.component;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_connector *connector = conn_state->connector;
const struct drm_display_mode *adjusted_mode =
@@ -848,17 +858,17 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
- if (dev_priv->audio_funcs)
- dev_priv->audio_funcs->audio_codec_enable(encoder,
+ if (dev_priv->audio.funcs)
+ dev_priv->audio.funcs->audio_codec_enable(encoder,
crtc_state,
conn_state);
- mutex_lock(&dev_priv->av_mutex);
+ mutex_lock(&dev_priv->audio.mutex);
encoder->audio_connector = connector;
/* referred in audio callbacks */
- dev_priv->av_enc_map[pipe] = encoder;
- mutex_unlock(&dev_priv->av_mutex);
+ dev_priv->audio.encoder_map[pipe] = encoder;
+ mutex_unlock(&dev_priv->audio.mutex);
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
@@ -888,20 +898,20 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = dev_priv->audio_component;
+ struct i915_audio_component *acomp = dev_priv->audio.component;
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
- if (dev_priv->audio_funcs)
- dev_priv->audio_funcs->audio_codec_disable(encoder,
+ if (dev_priv->audio.funcs)
+ dev_priv->audio.funcs->audio_codec_disable(encoder,
old_crtc_state,
old_conn_state);
- mutex_lock(&dev_priv->av_mutex);
+ mutex_lock(&dev_priv->audio.mutex);
encoder->audio_connector = NULL;
- dev_priv->av_enc_map[pipe] = NULL;
- mutex_unlock(&dev_priv->av_mutex);
+ dev_priv->audio.encoder_map[pipe] = NULL;
+ mutex_unlock(&dev_priv->audio.mutex);
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
@@ -931,19 +941,53 @@ static const struct intel_audio_funcs hsw_audio_funcs = {
};
/**
- * intel_init_audio_hooks - Set up chip specific audio hooks
+ * intel_audio_hooks_init - Set up chip specific audio hooks
* @dev_priv: device private
*/
-void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
+void intel_audio_hooks_init(struct drm_i915_private *dev_priv)
{
if (IS_G4X(dev_priv)) {
- dev_priv->audio_funcs = &g4x_audio_funcs;
+ dev_priv->audio.funcs = &g4x_audio_funcs;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- dev_priv->audio_funcs = &ilk_audio_funcs;
+ dev_priv->audio.funcs = &ilk_audio_funcs;
} else if (IS_HASWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 8) {
- dev_priv->audio_funcs = &hsw_audio_funcs;
+ dev_priv->audio.funcs = &hsw_audio_funcs;
} else if (HAS_PCH_SPLIT(dev_priv)) {
- dev_priv->audio_funcs = &ilk_audio_funcs;
+ dev_priv->audio.funcs = &ilk_audio_funcs;
+ }
+}
+
+struct aud_ts_cdclk_m_n {
+ u8 m;
+ u16 n;
+};
+
+void intel_audio_cdclk_change_pre(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 13)
+ intel_de_rmw(i915, AUD_TS_CDCLK_M, AUD_TS_CDCLK_M_EN, 0);
+}
+
+static void get_aud_ts_cdclk_m_n(int refclk, int cdclk, struct aud_ts_cdclk_m_n *aud_ts)
+{
+ if (refclk == 24000)
+ aud_ts->m = 12;
+ else
+ aud_ts->m = 15;
+
+ aud_ts->n = cdclk * aud_ts->m / 24000;
+}
+
+void intel_audio_cdclk_change_post(struct drm_i915_private *i915)
+{
+ struct aud_ts_cdclk_m_n aud_ts;
+
+ if (DISPLAY_VER(i915) >= 13) {
+ get_aud_ts_cdclk_m_n(i915->cdclk.hw.ref, i915->cdclk.hw.cdclk, &aud_ts);
+
+ intel_de_write(i915, AUD_TS_CDCLK_N, aud_ts.n);
+ intel_de_write(i915, AUD_TS_CDCLK_M, aud_ts.m | AUD_TS_CDCLK_M_EN);
+ drm_dbg_kms(&i915->drm, "aud_ts_cdclk set to M=%u, N=%u\n", aud_ts.m, aud_ts.n);
}
}
@@ -976,7 +1020,7 @@ static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv,
struct intel_crtc *crtc;
int ret;
- crtc = intel_get_first_crtc(dev_priv);
+ crtc = intel_first_crtc(dev_priv);
if (!crtc)
return;
@@ -1014,13 +1058,13 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO_PLAYBACK);
- if (dev_priv->audio_power_refcount++ == 0) {
+ if (dev_priv->audio.power_refcount++ == 0) {
if (DISPLAY_VER(dev_priv) >= 9) {
intel_de_write(dev_priv, AUD_FREQ_CNTRL,
- dev_priv->audio_freq_cntrl);
+ dev_priv->audio.freq_cntrl);
drm_dbg_kms(&dev_priv->drm,
"restored AUD_FREQ_CNTRL to 0x%x\n",
- dev_priv->audio_freq_cntrl);
+ dev_priv->audio.freq_cntrl);
}
/* Force CDCLK to 2*BCLK as long as we need audio powered. */
@@ -1041,7 +1085,7 @@ static void i915_audio_component_put_power(struct device *kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
- if (--dev_priv->audio_power_refcount == 0)
+ if (--dev_priv->audio.power_refcount == 0)
if (IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, false);
@@ -1093,7 +1137,7 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
/*
* get the intel_encoder according to the parameter port and pipe
* intel_encoder is saved by the index of pipe
- * MST & (pipe >= 0): return the av_enc_map[pipe],
+ * MST & (pipe >= 0): return the audio.encoder_map[pipe],
* when port is matched
* MST & (pipe < 0): this is invalid
* Non-MST & (pipe >= 0): only pipe = 0 (the first device entry)
@@ -1108,10 +1152,10 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
/* MST */
if (pipe >= 0) {
if (drm_WARN_ON(&dev_priv->drm,
- pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
+ pipe >= ARRAY_SIZE(dev_priv->audio.encoder_map)))
return NULL;
- encoder = dev_priv->av_enc_map[pipe];
+ encoder = dev_priv->audio.encoder_map[pipe];
/*
* when bootup, audio driver may not know it is
* MST or not. So it will poll all the port & pipe
@@ -1127,7 +1171,7 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
return NULL;
for_each_pipe(dev_priv, pipe) {
- encoder = dev_priv->av_enc_map[pipe];
+ encoder = dev_priv->audio.encoder_map[pipe];
if (encoder == NULL)
continue;
@@ -1145,7 +1189,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
int pipe, int rate)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
- struct i915_audio_component *acomp = dev_priv->audio_component;
+ struct i915_audio_component *acomp = dev_priv->audio.component;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
unsigned long cookie;
@@ -1155,7 +1199,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
return 0;
cookie = i915_audio_component_get_power(kdev);
- mutex_lock(&dev_priv->av_mutex);
+ mutex_lock(&dev_priv->audio.mutex);
/* 1. get the pipe */
encoder = get_saved_enc(dev_priv, port, pipe);
@@ -1174,7 +1218,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
hsw_audio_config_update(encoder, crtc->config);
unlock:
- mutex_unlock(&dev_priv->av_mutex);
+ mutex_unlock(&dev_priv->audio.mutex);
i915_audio_component_put_power(kdev, cookie);
return err;
}
@@ -1188,13 +1232,13 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
const u8 *eld;
int ret = -EINVAL;
- mutex_lock(&dev_priv->av_mutex);
+ mutex_lock(&dev_priv->audio.mutex);
intel_encoder = get_saved_enc(dev_priv, port, pipe);
if (!intel_encoder) {
drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n",
port_name(port));
- mutex_unlock(&dev_priv->av_mutex);
+ mutex_unlock(&dev_priv->audio.mutex);
return ret;
}
@@ -1206,7 +1250,7 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
memcpy(buf, eld, min(max_bytes, ret));
}
- mutex_unlock(&dev_priv->av_mutex);
+ mutex_unlock(&dev_priv->audio.mutex);
return ret;
}
@@ -1241,7 +1285,7 @@ static int i915_audio_component_bind(struct device *i915_kdev,
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
acomp->aud_sample_rate[i] = 0;
- dev_priv->audio_component = acomp;
+ dev_priv->audio.component = acomp;
drm_modeset_unlock_all(&dev_priv->drm);
return 0;
@@ -1256,14 +1300,14 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
drm_modeset_lock_all(&dev_priv->drm);
acomp->base.ops = NULL;
acomp->base.dev = NULL;
- dev_priv->audio_component = NULL;
+ dev_priv->audio.component = NULL;
drm_modeset_unlock_all(&dev_priv->drm);
device_link_remove(hda_kdev, i915_kdev);
- if (dev_priv->audio_power_refcount)
+ if (dev_priv->audio.power_refcount)
drm_err(&dev_priv->drm, "audio power refcount %d after unbind\n",
- dev_priv->audio_power_refcount);
+ dev_priv->audio.power_refcount);
}
static const struct component_ops i915_audio_component_bind_ops = {
@@ -1327,10 +1371,13 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n",
aud_freq, aud_freq_init);
- dev_priv->audio_freq_cntrl = aud_freq;
+ dev_priv->audio.freq_cntrl = aud_freq;
}
- dev_priv->audio_component_registered = true;
+ /* init with current cdclk */
+ intel_audio_cdclk_change_post(dev_priv);
+
+ dev_priv->audio.component_registered = true;
}
/**
@@ -1342,11 +1389,11 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
*/
static void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
{
- if (!dev_priv->audio_component_registered)
+ if (!dev_priv->audio.component_registered)
return;
component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops);
- dev_priv->audio_component_registered = false;
+ dev_priv->audio.component_registered = false;
}
/**
@@ -1368,7 +1415,7 @@ void intel_audio_init(struct drm_i915_private *dev_priv)
*/
void intel_audio_deinit(struct drm_i915_private *dev_priv)
{
- if ((dev_priv)->lpe_audio.platdev != NULL)
+ if ((dev_priv)->audio.lpe.platdev != NULL)
intel_lpe_audio_teardown(dev_priv);
else
i915_audio_component_cleanup(dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
index a3657c7a7ba2..63b22131dc45 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_audio.h
@@ -11,13 +11,15 @@ struct drm_i915_private;
struct intel_crtc_state;
struct intel_encoder;
-void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
+void intel_audio_hooks_init(struct drm_i915_private *dev_priv);
void intel_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state);
+void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
+void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
void intel_audio_init(struct drm_i915_private *dev_priv);
void intel_audio_deinit(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 2b1423a43437..9d989c9f5da4 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1555,12 +1555,24 @@ static const u8 gen9bc_tgp_ddc_pin_map[] = {
[DDC_BUS_DDI_D] = GMBUS_PIN_10_TC2_ICP,
};
+static const u8 adlp_ddc_pin_map[] = {
+ [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [ADLP_DDC_BUS_PORT_TC1] = GMBUS_PIN_9_TC1_ICP,
+ [ADLP_DDC_BUS_PORT_TC2] = GMBUS_PIN_10_TC2_ICP,
+ [ADLP_DDC_BUS_PORT_TC3] = GMBUS_PIN_11_TC3_ICP,
+ [ADLP_DDC_BUS_PORT_TC4] = GMBUS_PIN_12_TC4_ICP,
+};
+
static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
{
const u8 *ddc_pin_map;
int n_entries;
- if (IS_ALDERLAKE_S(i915)) {
+ if (IS_ALDERLAKE_P(i915)) {
+ ddc_pin_map = adlp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(adlp_ddc_pin_map);
+ } else if (IS_ALDERLAKE_S(i915)) {
ddc_pin_map = adls_ddc_pin_map;
n_entries = ARRAY_SIZE(adls_ddc_pin_map);
} else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 8d9d888e9316..2da4aacc956b 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -27,6 +27,9 @@ struct intel_qgv_info {
u8 num_points;
u8 num_psf_points;
u8 t_bl;
+ u8 max_numchannels;
+ u8 channel_width;
+ u8 deinterleave;
};
static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
@@ -42,7 +45,7 @@ static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */
else
dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */
- sp->dclk = dclk_ratio * dclk_reference;
+ sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000);
val = intel_uncore_read(&dev_priv->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
if (val & DG1_GEAR_TYPE)
@@ -69,6 +72,7 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
int point)
{
u32 val = 0, val2 = 0;
+ u16 dclk;
int ret;
ret = sandybridge_pcode_read(dev_priv,
@@ -78,7 +82,8 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
if (ret)
return ret;
- sp->dclk = val & 0xffff;
+ dclk = val & 0xffff;
+ sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) > 11 ? 500 : 0), 1000);
sp->t_rp = (val & 0xff0000) >> 16;
sp->t_rcd = (val & 0xff000000) >> 24;
@@ -133,7 +138,8 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
}
static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
- struct intel_qgv_info *qi)
+ struct intel_qgv_info *qi,
+ bool is_y_tile)
{
const struct dram_info *dram_info = &dev_priv->dram_info;
int i, ret;
@@ -141,20 +147,44 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->num_points = dram_info->num_qgv_points;
qi->num_psf_points = dram_info->num_psf_gv_points;
- if (DISPLAY_VER(dev_priv) == 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
switch (dram_info->type) {
case INTEL_DRAM_DDR4:
- qi->t_bl = 4;
+ qi->t_bl = is_y_tile ? 8 : 4;
+ qi->max_numchannels = 2;
+ qi->channel_width = 64;
+ qi->deinterleave = is_y_tile ? 1 : 2;
break;
case INTEL_DRAM_DDR5:
- qi->t_bl = 8;
+ qi->t_bl = is_y_tile ? 16 : 8;
+ qi->max_numchannels = 4;
+ qi->channel_width = 32;
+ qi->deinterleave = is_y_tile ? 1 : 2;
+ break;
+ case INTEL_DRAM_LPDDR4:
+ if (IS_ROCKETLAKE(dev_priv)) {
+ qi->t_bl = 8;
+ qi->max_numchannels = 4;
+ qi->channel_width = 32;
+ qi->deinterleave = 2;
+ break;
+ }
+ fallthrough;
+ case INTEL_DRAM_LPDDR5:
+ qi->t_bl = 16;
+ qi->max_numchannels = 8;
+ qi->channel_width = 16;
+ qi->deinterleave = is_y_tile ? 2 : 4;
break;
default:
qi->t_bl = 16;
+ qi->max_numchannels = 1;
break;
}
- else if (DISPLAY_VER(dev_priv) == 11)
+ else if (DISPLAY_VER(dev_priv) == 11) {
qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
+ qi->max_numchannels = 1;
+ }
if (drm_WARN_ON(&dev_priv->drm,
qi->num_points > ARRAY_SIZE(qi->points)))
@@ -193,12 +223,6 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
return 0;
}
-static int icl_calc_bw(int dclk, int num, int den)
-{
- /* multiples of 16.666MHz (100/6) */
- return DIV_ROUND_CLOSEST(num * dclk * 100, den * 6);
-}
-
static int adl_calc_psf_bw(int clk)
{
/*
@@ -240,7 +264,7 @@ static const struct intel_sa_info tgl_sa_info = {
};
static const struct intel_sa_info rkl_sa_info = {
- .deburst = 16,
+ .deburst = 8,
.deprogbwlimit = 20, /* GB/s */
.displayrtids = 128,
.derating = 10,
@@ -265,35 +289,130 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
struct intel_qgv_info qi = {};
bool is_y_tile = true; /* assume y tile may be used */
int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
- int deinterleave;
- int ipqdepth, ipqdepthpch;
+ int ipqdepth, ipqdepthpch = 16;
int dclk_max;
int maxdebw;
+ int num_groups = ARRAY_SIZE(dev_priv->max_bw);
int i, ret;
- ret = icl_get_qgv_points(dev_priv, &qi);
+ ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
if (ret) {
drm_dbg_kms(&dev_priv->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
- deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
dclk_max = icl_sagv_max_dclk(&qi);
+ maxdebw = min(sa->deprogbwlimit * 1000, dclk_max * 16 * 6 / 10);
+ ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
+ qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
+
+ for (i = 0; i < num_groups; i++) {
+ struct intel_bw_info *bi = &dev_priv->max_bw[i];
+ int clpchgroup;
+ int j;
+
+ clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
+ bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
+
+ bi->num_qgv_points = qi.num_points;
+ bi->num_psf_gv_points = qi.num_psf_points;
+
+ for (j = 0; j < qi.num_points; j++) {
+ const struct intel_qgv_point *sp = &qi.points[j];
+ int ct, bw;
+
+ /*
+ * Max row cycle time
+ *
+ * FIXME what is the logic behind the
+ * assumed burst length?
+ */
+ ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
+ (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
+ bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct);
- ipqdepthpch = 16;
+ bi->deratedbw[j] = min(maxdebw,
+ bw * (100 - sa->derating) / 100);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
+ i, j, bi->num_planes, bi->deratedbw[j]);
+ }
+ }
+ /*
+ * In case if SAGV is disabled in BIOS, we always get 1
+ * SAGV point, but we can't send PCode commands to restrict it
+ * as it will fail and pointless anyway.
+ */
+ if (qi.num_points == 1)
+ dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
+ else
+ dev_priv->sagv_status = I915_SAGV_ENABLED;
+
+ return 0;
+}
+
+static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
+{
+ struct intel_qgv_info qi = {};
+ const struct dram_info *dram_info = &dev_priv->dram_info;
+ bool is_y_tile = true; /* assume y tile may be used */
+ int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
+ int ipqdepth, ipqdepthpch = 16;
+ int dclk_max;
+ int maxdebw, peakbw;
+ int clperchgroup;
+ int num_groups = ARRAY_SIZE(dev_priv->max_bw);
+ int i, ret;
+
+ ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to get memory subsystem information, ignoring bandwidth limits");
+ return ret;
+ }
+
+ if (dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5)
+ num_channels *= 2;
+
+ qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
+
+ if (num_channels < qi.max_numchannels && DISPLAY_VER(dev_priv) >= 12)
+ qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1);
+
+ if (DISPLAY_VER(dev_priv) > 11 && num_channels > qi.max_numchannels)
+ drm_warn(&dev_priv->drm, "Number of channels exceeds max number of channels.");
+ if (qi.max_numchannels != 0)
+ num_channels = min_t(u8, num_channels, qi.max_numchannels);
+
+ dclk_max = icl_sagv_max_dclk(&qi);
+
+ peakbw = num_channels * DIV_ROUND_UP(qi.channel_width, 8) * dclk_max;
+ maxdebw = min(sa->deprogbwlimit * 1000, peakbw * 6 / 10); /* 60% */
- maxdebw = min(sa->deprogbwlimit * 1000,
- icl_calc_bw(dclk_max, 16, 1) * 6 / 10); /* 60% */
ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
+ /*
+ * clperchgroup = 4kpagespermempage * clperchperblock,
+ * clperchperblock = 8 / num_channels * interleave
+ */
+ clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave;
- for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
+ for (i = 0; i < num_groups; i++) {
struct intel_bw_info *bi = &dev_priv->max_bw[i];
+ struct intel_bw_info *bi_next;
int clpchgroup;
int j;
- clpchgroup = (sa->deburst * deinterleave / num_channels) << i;
- bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
+ if (i < num_groups - 1)
+ bi_next = &dev_priv->max_bw[i + 1];
+
+ clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
+
+ if (i < num_groups - 1 && clpchgroup < clperchgroup)
+ bi_next->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
+ else
+ bi_next->num_planes = 0;
bi->num_qgv_points = qi.num_points;
bi->num_psf_gv_points = qi.num_psf_points;
@@ -310,7 +429,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
*/
ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
(clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
- bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct);
+ bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct);
bi->deratedbw[j] = min(maxdebw,
bw * (100 - sa->derating) / 100);
@@ -329,9 +448,6 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
"BW%d / PSF GV %d: num_planes=%d bw=%u\n",
i, j, bi->num_planes, bi->psf_bw[j]);
}
-
- if (bi->num_planes == 1)
- break;
}
/*
@@ -395,6 +511,34 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
return 0;
}
+static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv,
+ int num_planes, int qgv_point)
+{
+ int i;
+
+ /*
+ * Let's return max bw for 0 planes
+ */
+ num_planes = max(1, num_planes);
+
+ for (i = ARRAY_SIZE(dev_priv->max_bw) - 1; i >= 0; i--) {
+ const struct intel_bw_info *bi =
+ &dev_priv->max_bw[i];
+
+ /*
+ * Pcode will not expose all QGV points when
+ * SAGV is forced to off/min/med/max.
+ */
+ if (qgv_point >= bi->num_qgv_points)
+ return UINT_MAX;
+
+ if (num_planes <= bi->num_planes)
+ return bi->deratedbw[qgv_point];
+ }
+
+ return dev_priv->max_bw[0].deratedbw[qgv_point];
+}
+
static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv,
int psf_gv_point)
{
@@ -412,13 +556,13 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
if (IS_DG2(dev_priv))
dg2_get_bw_info(dev_priv);
else if (IS_ALDERLAKE_P(dev_priv))
- icl_get_bw_info(dev_priv, &adlp_sa_info);
+ tgl_get_bw_info(dev_priv, &adlp_sa_info);
else if (IS_ALDERLAKE_S(dev_priv))
- icl_get_bw_info(dev_priv, &adls_sa_info);
+ tgl_get_bw_info(dev_priv, &adls_sa_info);
else if (IS_ROCKETLAKE(dev_priv))
- icl_get_bw_info(dev_priv, &rkl_sa_info);
+ tgl_get_bw_info(dev_priv, &rkl_sa_info);
else if (DISPLAY_VER(dev_priv) == 12)
- icl_get_bw_info(dev_priv, &tgl_sa_info);
+ tgl_get_bw_info(dev_priv, &tgl_sa_info);
else if (DISPLAY_VER(dev_priv) == 11)
icl_get_bw_info(dev_priv, &icl_sa_info);
}
@@ -490,7 +634,7 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe)
data_rate += bw_state->data_rate[pipe];
- if (DISPLAY_VER(dev_priv) >= 13 && intel_vtd_active())
+ if (DISPLAY_VER(dev_priv) >= 13 && intel_vtd_active(dev_priv))
data_rate = data_rate * 105 / 100;
return data_rate;
@@ -746,7 +890,10 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
for (i = 0; i < num_qgv_points; i++) {
unsigned int max_data_rate;
- max_data_rate = icl_max_bw(dev_priv, num_active_planes, i);
+ if (DISPLAY_VER(dev_priv) > 11)
+ max_data_rate = tgl_max_bw(dev_priv, num_active_planes, i);
+ else
+ max_data_rate = icl_max_bw(dev_priv, num_active_planes, i);
/*
* We need to know which qgv point gives us
* maximum bandwidth in order to disable SAGV
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 868dd43a7542..c30cf8d2b835 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -24,8 +24,11 @@
#include <linux/time.h>
#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_audio.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
+#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_pcode.h"
@@ -66,7 +69,7 @@ void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv,
dev_priv->cdclk_funcs->get_cdclk(dev_priv, cdclk_config);
}
-int intel_cdclk_bw_calc_min_cdclk(struct intel_atomic_state *state)
+static int intel_cdclk_bw_calc_min_cdclk(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
return dev_priv->cdclk_funcs->bw_calc_min_cdclk(state);
@@ -1211,6 +1214,19 @@ static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
}
+static bool has_cdclk_squasher(struct drm_i915_private *i915)
+{
+ return IS_DG2(i915);
+}
+
+struct intel_cdclk_vals {
+ u32 cdclk;
+ u16 refclk;
+ u16 waveform;
+ u8 divider; /* CD2X divider * 2 */
+ u8 ratio;
+};
+
static const struct intel_cdclk_vals bxt_cdclk_table[] = {
{ .refclk = 19200, .cdclk = 144000, .divider = 8, .ratio = 60 },
{ .refclk = 19200, .cdclk = 288000, .divider = 4, .ratio = 60 },
@@ -1312,12 +1328,19 @@ static const struct intel_cdclk_vals adlp_cdclk_table[] = {
};
static const struct intel_cdclk_vals dg2_cdclk_table[] = {
- { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 9 },
- { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
- { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
- { .refclk = 38400, .cdclk = 326400, .divider = 4, .ratio = 34 },
- { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
- { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+ { .refclk = 38400, .cdclk = 163200, .divider = 2, .ratio = 34, .waveform = 0x8888 },
+ { .refclk = 38400, .cdclk = 204000, .divider = 2, .ratio = 34, .waveform = 0x9248 },
+ { .refclk = 38400, .cdclk = 244800, .divider = 2, .ratio = 34, .waveform = 0xa4a4 },
+ { .refclk = 38400, .cdclk = 285600, .divider = 2, .ratio = 34, .waveform = 0xa54a },
+ { .refclk = 38400, .cdclk = 326400, .divider = 2, .ratio = 34, .waveform = 0xaaaa },
+ { .refclk = 38400, .cdclk = 367200, .divider = 2, .ratio = 34, .waveform = 0xad5a },
+ { .refclk = 38400, .cdclk = 408000, .divider = 2, .ratio = 34, .waveform = 0xb6b6 },
+ { .refclk = 38400, .cdclk = 448800, .divider = 2, .ratio = 34, .waveform = 0xdbb6 },
+ { .refclk = 38400, .cdclk = 489600, .divider = 2, .ratio = 34, .waveform = 0xeeee },
+ { .refclk = 38400, .cdclk = 530400, .divider = 2, .ratio = 34, .waveform = 0xf7de },
+ { .refclk = 38400, .cdclk = 571200, .divider = 2, .ratio = 34, .waveform = 0xfefe },
+ { .refclk = 38400, .cdclk = 612000, .divider = 2, .ratio = 34, .waveform = 0xfffe },
+ { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34, .waveform = 0xffff },
{}
};
@@ -1453,6 +1476,7 @@ static void bxt_de_pll_readout(struct drm_i915_private *dev_priv,
static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config)
{
+ u32 squash_ctl = 0;
u32 divider;
int div;
@@ -1490,7 +1514,21 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
return;
}
- cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, div);
+ if (has_cdclk_squasher(dev_priv))
+ squash_ctl = intel_de_read(dev_priv, CDCLK_SQUASH_CTL);
+
+ if (squash_ctl & CDCLK_SQUASH_ENABLE) {
+ u16 waveform;
+ int size;
+
+ size = REG_FIELD_GET(CDCLK_SQUASH_WINDOW_SIZE_MASK, squash_ctl) + 1;
+ waveform = REG_FIELD_GET(CDCLK_SQUASH_WAVEFORM_MASK, squash_ctl) >> (16 - size);
+
+ cdclk_config->cdclk = DIV_ROUND_CLOSEST(hweight16(waveform) *
+ cdclk_config->vco, size * div);
+ } else {
+ cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, div);
+ }
out:
/*
@@ -1625,6 +1663,26 @@ static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv,
}
}
+static u32 cdclk_squash_waveform(struct drm_i915_private *dev_priv,
+ int cdclk)
+{
+ const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
+ int i;
+
+ if (cdclk == dev_priv->cdclk.hw.bypass)
+ return 0;
+
+ for (i = 0; table[i].refclk; i++)
+ if (table[i].refclk == dev_priv->cdclk.hw.ref &&
+ table[i].cdclk == cdclk)
+ return table[i].waveform;
+
+ drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n",
+ cdclk, dev_priv->cdclk.hw.ref);
+
+ return 0xffff;
+}
+
static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
@@ -1632,6 +1690,8 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
u32 val;
+ u16 waveform;
+ int clock;
int ret;
/* Inform power controller of upcoming frequency change. */
@@ -1675,7 +1735,24 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
bxt_de_pll_enable(dev_priv, vco);
}
- val = bxt_cdclk_cd2x_div_sel(dev_priv, cdclk, vco) |
+ waveform = cdclk_squash_waveform(dev_priv, cdclk);
+
+ if (waveform)
+ clock = vco / 2;
+ else
+ clock = cdclk;
+
+ if (has_cdclk_squasher(dev_priv)) {
+ u32 squash_ctl = 0;
+
+ if (waveform)
+ squash_ctl = CDCLK_SQUASH_ENABLE |
+ CDCLK_SQUASH_WINDOW_SIZE(0xf) | waveform;
+
+ intel_de_write(dev_priv, CDCLK_SQUASH_CTL, squash_ctl);
+ }
+
+ val = bxt_cdclk_cd2x_div_sel(dev_priv, clock, vco) |
bxt_cdclk_cd2x_pipe(dev_priv, pipe) |
skl_cdclk_decimal(cdclk);
@@ -1689,7 +1766,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, CDCLK_CTL, val);
if (pipe != INVALID_PIPE)
- intel_wait_for_vblank(dev_priv, pipe);
+ intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe));
if (DISPLAY_VER(dev_priv) >= 11) {
ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
@@ -1727,7 +1804,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
u32 cdctl, expected;
- int cdclk, vco;
+ int cdclk, clock, vco;
intel_update_cdclk(dev_priv);
intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
@@ -1763,8 +1840,12 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
expected = skl_cdclk_decimal(cdclk);
/* Figure out what CD2X divider we should be using for this cdclk */
- expected |= bxt_cdclk_cd2x_div_sel(dev_priv,
- dev_priv->cdclk.hw.cdclk,
+ if (has_cdclk_squasher(dev_priv))
+ clock = dev_priv->cdclk.hw.vco / 2;
+ else
+ clock = dev_priv->cdclk.hw.cdclk;
+
+ expected |= bxt_cdclk_cd2x_div_sel(dev_priv, clock,
dev_priv->cdclk.hw.vco);
/*
@@ -1880,6 +1961,25 @@ static bool intel_cdclk_can_crawl(struct drm_i915_private *dev_priv,
a->ref == b->ref;
}
+static bool intel_cdclk_can_squash(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b)
+{
+ /*
+ * FIXME should store a bit more state in intel_cdclk_config
+ * to differentiate squasher vs. cd2x divider properly. For
+ * the moment all platforms with squasher use a fixed cd2x
+ * divider.
+ */
+ if (!has_cdclk_squasher(dev_priv))
+ return false;
+
+ return a->cdclk != b->cdclk &&
+ a->vco != 0 &&
+ a->vco == b->vco &&
+ a->ref == b->ref;
+}
+
/**
* intel_cdclk_needs_modeset - Determine if changong between the CDCLK
* configurations requires a modeset on all pipes
@@ -1917,7 +2017,17 @@ static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv,
if (DISPLAY_VER(dev_priv) < 10 && !IS_BROXTON(dev_priv))
return false;
+ /*
+ * FIXME should store a bit more state in intel_cdclk_config
+ * to differentiate squasher vs. cd2x divider properly. For
+ * the moment all platforms with squasher use a fixed cd2x
+ * divider.
+ */
+ if (has_cdclk_squasher(dev_priv))
+ return false;
+
return a->cdclk != b->cdclk &&
+ a->vco != 0 &&
a->vco == b->vco &&
a->ref == b->ref;
}
@@ -1975,6 +2085,8 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
intel_psr_pause(intel_dp);
}
+ intel_audio_cdclk_change_pre(dev_priv);
+
/*
* Lock aux/gmbus while we change cdclk in case those
* functions use cdclk. Not all platforms/ports do,
@@ -2003,6 +2115,8 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
intel_psr_resume(intel_dp);
}
+ intel_audio_cdclk_change_post(dev_priv);
+
if (drm_WARN(&dev_priv->drm,
intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config),
"cdclk state doesn't match!\n")) {
@@ -2524,6 +2638,58 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state)
return to_intel_cdclk_state(cdclk_state);
}
+int intel_cdclk_atomic_check(struct intel_atomic_state *state,
+ bool *need_cdclk_calc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_cdclk_state *old_cdclk_state;
+ const struct intel_cdclk_state *new_cdclk_state;
+ struct intel_plane_state *plane_state;
+ struct intel_bw_state *new_bw_state;
+ struct intel_plane *plane;
+ int min_cdclk = 0;
+ enum pipe pipe;
+ int ret;
+ int i;
+
+ /*
+ * active_planes bitmask has been updated, and potentially affected
+ * planes are part of the state. We can now compute the minimum cdclk
+ * for each plane.
+ */
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
+ if (ret)
+ return ret;
+ }
+
+ old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
+ new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
+
+ if (new_cdclk_state &&
+ old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
+ *need_cdclk_calc = true;
+
+ ret = intel_cdclk_bw_calc_min_cdclk(state);
+ if (ret)
+ return ret;
+
+ new_bw_state = intel_atomic_get_new_bw_state(state);
+
+ if (!new_cdclk_state || !new_bw_state)
+ return 0;
+
+ for_each_pipe(i915, pipe) {
+ min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
+
+ /* Currently do this change only if we need to increase */
+ if (new_bw_state->min_cdclk > min_cdclk)
+ *need_cdclk_calc = true;
+ }
+
+ return 0;
+}
+
int intel_cdclk_init(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state *cdclk_state;
@@ -2587,7 +2753,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
struct intel_crtc_state *crtc_state;
pipe = ilog2(new_cdclk_state->active_pipes);
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ crtc = intel_crtc_for_pipe(dev_priv, pipe);
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
if (IS_ERR(crtc_state))
@@ -2597,9 +2763,14 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
pipe = INVALID_PIPE;
}
- if (intel_cdclk_can_crawl(dev_priv,
- &old_cdclk_state->actual,
- &new_cdclk_state->actual)) {
+ if (intel_cdclk_can_squash(dev_priv,
+ &old_cdclk_state->actual,
+ &new_cdclk_state->actual)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Can change cdclk via squasher\n");
+ } else if (intel_cdclk_can_crawl(dev_priv,
+ &old_cdclk_state->actual,
+ &new_cdclk_state->actual)) {
drm_dbg_kms(&dev_priv->drm,
"Can change cdclk via crawl\n");
} else if (pipe != INVALID_PIPE) {
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index 309b3f394e24..fc638522e445 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -16,13 +16,6 @@ struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc_state;
-struct intel_cdclk_vals {
- u32 cdclk;
- u16 refclk;
- u8 divider; /* CD2X divider * 2 */
- u8 ratio;
-};
-
struct intel_cdclk_state {
struct intel_global_state base;
@@ -70,7 +63,8 @@ void intel_dump_cdclk_config(const struct intel_cdclk_config *cdclk_config,
int intel_modeset_calc_cdclk(struct intel_atomic_state *state);
void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config);
-int intel_cdclk_bw_calc_min_cdclk(struct intel_atomic_state *state);
+int intel_cdclk_atomic_check(struct intel_atomic_state *state,
+ bool *need_cdclk_calc);
struct intel_cdclk_state *
intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 5359b7305a78..de3ded1e327a 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -26,7 +26,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dpll.h"
-#include "intel_dsi.h"
+#include "vlv_dsi_pll.h"
#define CTM_COEFF_SIGN (1ULL << 63)
@@ -552,8 +552,8 @@ static void i9xx_load_lut_8(struct intel_crtc *crtc,
lut = blob->data;
for (i = 0; i < 256; i++)
- intel_de_write(dev_priv, PALETTE(pipe, i),
- i9xx_lut_8(&lut[i]));
+ intel_de_write_fw(dev_priv, PALETTE(pipe, i),
+ i9xx_lut_8(&lut[i]));
}
static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
@@ -576,15 +576,15 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size - 1; i++) {
- intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 0),
- i965_lut_10p6_ldw(&lut[i]));
- intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 1),
- i965_lut_10p6_udw(&lut[i]));
+ intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 0),
+ i965_lut_10p6_ldw(&lut[i]));
+ intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 1),
+ i965_lut_10p6_udw(&lut[i]));
}
- intel_de_write(dev_priv, PIPEGCMAX(pipe, 0), lut[i].red);
- intel_de_write(dev_priv, PIPEGCMAX(pipe, 1), lut[i].green);
- intel_de_write(dev_priv, PIPEGCMAX(pipe, 2), lut[i].blue);
+ intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 0), lut[i].red);
+ intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 1), lut[i].green);
+ intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 2), lut[i].blue);
}
static void i965_load_luts(const struct intel_crtc_state *crtc_state)
@@ -618,8 +618,8 @@ static void ilk_load_lut_8(struct intel_crtc *crtc,
lut = blob->data;
for (i = 0; i < 256; i++)
- intel_de_write(dev_priv, LGC_PALETTE(pipe, i),
- i9xx_lut_8(&lut[i]));
+ intel_de_write_fw(dev_priv, LGC_PALETTE(pipe, i),
+ i9xx_lut_8(&lut[i]));
}
static void ilk_load_lut_10(struct intel_crtc *crtc,
@@ -631,8 +631,8 @@ static void ilk_load_lut_10(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++)
- intel_de_write(dev_priv, PREC_PALETTE(pipe, i),
- ilk_lut_10(&lut[i]));
+ intel_de_write_fw(dev_priv, PREC_PALETTE(pipe, i),
+ ilk_lut_10(&lut[i]));
}
static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
@@ -681,16 +681,16 @@ static void ivb_load_lut_10(struct intel_crtc *crtc,
const struct drm_color_lut *entry =
&lut[i * (lut_size - 1) / (hw_lut_size - 1)];
- intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), prec_index++);
- intel_de_write(dev_priv, PREC_PAL_DATA(pipe),
- ilk_lut_10(entry));
+ intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), prec_index++);
+ intel_de_write_fw(dev_priv, PREC_PAL_DATA(pipe),
+ ilk_lut_10(entry));
}
/*
* Reset the index, otherwise it prevents the legacy palette to be
* written properly.
*/
- intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
+ intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 0);
}
/* On BDW+ the index auto increment mode actually works */
@@ -704,23 +704,23 @@ static void bdw_load_lut_10(struct intel_crtc *crtc,
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
- intel_de_write(dev_priv, PREC_PAL_INDEX(pipe),
- prec_index | PAL_PREC_AUTO_INCREMENT);
+ intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe),
+ prec_index | PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < hw_lut_size; i++) {
/* We discard half the user entries in split gamma mode */
const struct drm_color_lut *entry =
&lut[i * (lut_size - 1) / (hw_lut_size - 1)];
- intel_de_write(dev_priv, PREC_PAL_DATA(pipe),
- ilk_lut_10(entry));
+ intel_de_write_fw(dev_priv, PREC_PAL_DATA(pipe),
+ ilk_lut_10(entry));
}
/*
* Reset the index, otherwise it prevents the legacy palette to be
* written properly.
*/
- intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
+ intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 0);
}
static void ivb_load_lut_ext_max(const struct intel_crtc_state *crtc_state)
@@ -808,6 +808,14 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
}
}
+static int glk_degamma_lut_size(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 13)
+ return 131;
+ else
+ return 35;
+}
+
static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -821,14 +829,14 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
* ignore the index bits, so we need to reset it to index 0
* separately.
*/
- intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
- intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
- PRE_CSC_GAMC_AUTO_INCREMENT);
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
+ PRE_CSC_GAMC_AUTO_INCREMENT);
for (i = 0; i < lut_size; i++) {
/*
- * First 33 entries represent range from 0 to 1.0
- * 34th and 35th entry will represent extended range
+ * First lut_size entries represent range from 0 to 1.0
+ * 3 additional lut entries will represent extended range
* inputs 3.0 and 7.0 respectively, currently clamped
* at 1.0. Since the precision is 16bit, the user
* value can be directly filled to register.
@@ -839,15 +847,15 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
* ToDo: Extend to max 7.0. Enable 32 bit input value
* as compared to just 16 to achieve this.
*/
- intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe),
- lut[i].green);
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe),
+ lut[i].green);
}
/* Clamp values > 1.0. */
- while (i++ < 35)
- intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+ while (i++ < glk_degamma_lut_size(dev_priv))
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
- intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
}
static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state)
@@ -862,21 +870,21 @@ static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_stat
* ignore the index bits, so we need to reset it to index 0
* separately.
*/
- intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
- intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
- PRE_CSC_GAMC_AUTO_INCREMENT);
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
+ PRE_CSC_GAMC_AUTO_INCREMENT);
for (i = 0; i < lut_size; i++) {
u32 v = (i << 16) / (lut_size - 1);
- intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), v);
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), v);
}
/* Clamp values > 1.0. */
while (i++ < 35)
- intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
- intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
}
static void glk_load_luts(const struct intel_crtc_state *crtc_state)
@@ -1071,10 +1079,10 @@ static void chv_load_cgm_degamma(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++) {
- intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0),
- chv_cgm_degamma_ldw(&lut[i]));
- intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1),
- chv_cgm_degamma_udw(&lut[i]));
+ intel_de_write_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0),
+ chv_cgm_degamma_ldw(&lut[i]));
+ intel_de_write_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1),
+ chv_cgm_degamma_udw(&lut[i]));
}
}
@@ -1105,10 +1113,10 @@ static void chv_load_cgm_gamma(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++) {
- intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0),
- chv_cgm_gamma_ldw(&lut[i]));
- intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1),
- chv_cgm_gamma_udw(&lut[i]));
+ intel_de_write_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0),
+ chv_cgm_gamma_ldw(&lut[i]));
+ intel_de_write_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1),
+ chv_cgm_gamma_udw(&lut[i]));
}
}
@@ -1131,8 +1139,8 @@ static void chv_load_luts(const struct intel_crtc_state *crtc_state)
else
i965_load_luts(crtc_state);
- intel_de_write(dev_priv, CGM_PIPE_MODE(crtc->pipe),
- crtc_state->cgm_mode);
+ intel_de_write_fw(dev_priv, CGM_PIPE_MODE(crtc->pipe),
+ crtc_state->cgm_mode);
}
void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
@@ -1574,6 +1582,8 @@ static int glk_color_check(struct intel_crtc_state *crtc_state)
static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u32 gamma_mode = 0;
if (crtc_state->hw.degamma_lut)
@@ -1586,6 +1596,13 @@ static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
if (!crtc_state->hw.gamma_lut ||
crtc_state_is_legacy_gamma(crtc_state))
gamma_mode |= GAMMA_MODE_MODE_8BIT;
+ /*
+ * Enable 10bit gamma for D13
+ * ToDo: Extend to Logarithmic Gamma once the new UAPI
+ * is acccepted and implemented by a userspace consumer
+ */
+ else if (DISPLAY_VER(i915) >= 13)
+ gamma_mode |= GAMMA_MODE_MODE_10BIT;
else
gamma_mode |= GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED;
@@ -1808,7 +1825,7 @@ static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
- u32 val = intel_de_read(dev_priv, PALETTE(pipe, i));
+ u32 val = intel_de_read_fw(dev_priv, PALETTE(pipe, i));
i9xx_lut_8_pack(&lut[i], val);
}
@@ -1843,15 +1860,15 @@ static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < lut_size - 1; i++) {
- u32 ldw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 0));
- u32 udw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 1));
+ u32 ldw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 0));
+ u32 udw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 1));
i965_lut_10p6_pack(&lut[i], ldw, udw);
}
- lut[i].red = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 0)));
- lut[i].green = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 1)));
- lut[i].blue = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 2)));
+ lut[i].red = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 0)));
+ lut[i].green = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 1)));
+ lut[i].blue = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 2)));
return blob;
}
@@ -1886,8 +1903,8 @@ static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < lut_size; i++) {
- u32 ldw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0));
- u32 udw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1));
+ u32 ldw = intel_de_read_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0));
+ u32 udw = intel_de_read_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1));
chv_cgm_gamma_pack(&lut[i], ldw, udw);
}
@@ -1922,7 +1939,7 @@ static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
- u32 val = intel_de_read(dev_priv, LGC_PALETTE(pipe, i));
+ u32 val = intel_de_read_fw(dev_priv, LGC_PALETTE(pipe, i));
i9xx_lut_8_pack(&lut[i], val);
}
@@ -1947,7 +1964,7 @@ static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < lut_size; i++) {
- u32 val = intel_de_read(dev_priv, PREC_PALETTE(pipe, i));
+ u32 val = intel_de_read_fw(dev_priv, PREC_PALETTE(pipe, i));
ilk_lut_10_pack(&lut[i], val);
}
@@ -1999,16 +2016,16 @@ static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc,
lut = blob->data;
- intel_de_write(dev_priv, PREC_PAL_INDEX(pipe),
- prec_index | PAL_PREC_AUTO_INCREMENT);
+ intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe),
+ prec_index | PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < lut_size; i++) {
- u32 val = intel_de_read(dev_priv, PREC_PAL_DATA(pipe));
+ u32 val = intel_de_read_fw(dev_priv, PREC_PAL_DATA(pipe));
ilk_lut_10_pack(&lut[i], val);
}
- intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
+ intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 0);
return blob;
}
@@ -2050,17 +2067,17 @@ icl_read_lut_multi_segment(struct intel_crtc *crtc)
lut = blob->data;
- intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe),
- PAL_PREC_AUTO_INCREMENT);
+ intel_de_write_fw(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < 9; i++) {
- u32 ldw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe));
- u32 udw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe));
+ u32 ldw = intel_de_read_fw(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe));
+ u32 udw = intel_de_read_fw(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe));
icl_lut_multi_seg_pack(&lut[i], ldw, udw);
}
- intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 0);
+ intel_de_write_fw(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 0);
/*
* FIXME readouts from PAL_PREC_DATA register aren't giving
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 634e8d449457..f628e0542933 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -301,7 +301,7 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
val = intel_de_read(dev_priv, ICL_PORT_CL_DW10(phy));
val &= ~PWR_DOWN_LN_MASK;
- val |= lane_mask << PWR_DOWN_LN_SHIFT;
+ val |= lane_mask;
intel_de_write(dev_priv, ICL_PORT_CL_DW10(phy), val);
}
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 1c161eeed82f..6a3893c8ff22 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -45,6 +45,7 @@
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hotplug.h"
+#include "intel_pch_display.h"
/* Here's the desired hotplug mode */
#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
@@ -143,7 +144,7 @@ static void intel_crt_get_config(struct intel_encoder *encoder,
static void hsw_crt_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ lpt_pch_get_config(pipe_config);
hsw_ddi_get_config(encoder, pipe_config);
@@ -152,8 +153,6 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NVSYNC);
pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
-
- pipe_config->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
}
/* Note: The caller is required to filter out dpms modes not supported by the
@@ -247,6 +246,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
intel_crtc_vblank_off(old_crtc_state);
@@ -261,10 +261,9 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state,
pch_post_disable_crt(state, encoder, old_crtc_state, old_conn_state);
- lpt_disable_pch_transcoder(dev_priv);
- lpt_disable_iclkip(dev_priv);
+ lpt_pch_disable(state, crtc);
- intel_ddi_fdi_post_disable(state, encoder, old_crtc_state, old_conn_state);
+ hsw_fdi_disable(encoder);
drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder);
@@ -316,14 +315,14 @@ static void hsw_enable_crt(struct intel_atomic_state *state,
intel_enable_transcoder(crtc_state);
- lpt_pch_enable(crtc_state);
+ lpt_pch_enable(state, crtc);
intel_crtc_vblank_on(crtc_state);
intel_crt_set_dpms(encoder, crtc_state, DRM_MODE_DPMS_ON);
- intel_wait_for_vblank(dev_priv, pipe);
- intel_wait_for_vblank(dev_priv, pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
+ intel_crtc_wait_for_next_vblank(crtc);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@@ -722,7 +721,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
intel_uncore_posting_read(uncore, pipeconf_reg);
/* Wait for next Vblank to substitue
* border color for Color info */
- intel_wait_for_vblank(dev_priv, pipe);
+ intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe));
st00 = intel_uncore_read8(uncore, _VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ?
connector_status_connected :
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 254e67141a77..16c3ca66d9f0 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -3,29 +3,31 @@
* Copyright © 2020 Intel Corporation
*/
#include <linux/kernel.h>
+#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_vblank_work.h>
-#include "i915_trace.h"
#include "i915_vgpu.h"
-
+#include "i9xx_plane.h"
+#include "icl_dsi.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_color.h"
#include "intel_crtc.h"
#include "intel_cursor.h"
#include "intel_display_debugfs.h"
+#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_pipe_crc.h"
#include "intel_psr.h"
#include "intel_sprite.h"
#include "intel_vrr.h"
-#include "i9xx_plane.h"
#include "skl_universal_plane.h"
static void assert_vblank_disabled(struct drm_crtc *crtc)
@@ -34,6 +36,38 @@ static void assert_vblank_disabled(struct drm_crtc *crtc)
drm_crtc_vblank_put(crtc);
}
+struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915)
+{
+ return to_intel_crtc(drm_crtc_from_index(&i915->drm, 0));
+}
+
+struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915,
+ enum pipe pipe)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ if (crtc->pipe == pipe)
+ return crtc;
+ }
+
+ return NULL;
+}
+
+void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc)
+{
+ drm_crtc_wait_one_vblank(&crtc->base);
+}
+
+void intel_wait_for_vblank_if_active(struct drm_i915_private *i915,
+ enum pipe pipe)
+{
+ struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
+
+ if (crtc->active)
+ intel_crtc_wait_for_next_vblank(crtc);
+}
+
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -167,6 +201,8 @@ static void intel_crtc_destroy(struct drm_crtc *_crtc)
{
struct intel_crtc *crtc = to_intel_crtc(_crtc);
+ cpu_latency_qos_remove_request(&crtc->vblank_pm_qos);
+
drm_crtc_cleanup(&crtc->base);
kfree(crtc);
}
@@ -323,18 +359,6 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
if (ret)
goto fail;
- BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
- dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
- dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
-
- if (DISPLAY_VER(dev_priv) < 9) {
- enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
-
- BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
- dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
- dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
- }
-
if (DISPLAY_VER(dev_priv) >= 11)
drm_crtc_create_scaling_filter_property(&crtc->base,
BIT(DRM_SCALING_FILTER_DEFAULT) |
@@ -344,6 +368,8 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_crtc_crc_init(crtc);
+ cpu_latency_qos_add_request(&crtc->vblank_pm_qos, PM_QOS_DEFAULT_VALUE);
+
drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
return 0;
@@ -354,6 +380,65 @@ fail:
return ret;
}
+static bool intel_crtc_needs_vblank_work(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->hw.active &&
+ !intel_crtc_needs_modeset(crtc_state) &&
+ !crtc_state->preload_luts &&
+ (crtc_state->uapi.color_mgmt_changed ||
+ crtc_state->update_pipe);
+}
+
+static void intel_crtc_vblank_work(struct kthread_work *base)
+{
+ struct drm_vblank_work *work = to_drm_vblank_work(base);
+ struct intel_crtc_state *crtc_state =
+ container_of(work, typeof(*crtc_state), vblank_work);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ trace_intel_crtc_vblank_work_start(crtc);
+
+ intel_color_load_luts(crtc_state);
+
+ if (crtc_state->uapi.event) {
+ spin_lock_irq(&crtc->base.dev->event_lock);
+ drm_crtc_send_vblank_event(&crtc->base, crtc_state->uapi.event);
+ crtc_state->uapi.event = NULL;
+ spin_unlock_irq(&crtc->base.dev->event_lock);
+ }
+
+ trace_intel_crtc_vblank_work_end(crtc);
+}
+
+static void intel_crtc_vblank_work_init(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ drm_vblank_work_init(&crtc_state->vblank_work, &crtc->base,
+ intel_crtc_vblank_work);
+ /*
+ * Interrupt latency is critical for getting the vblank
+ * work executed as early as possible during the vblank.
+ */
+ cpu_latency_qos_update_request(&crtc->vblank_pm_qos, 0);
+}
+
+void intel_wait_for_vblank_workers(struct intel_atomic_state *state)
+{
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ if (!intel_crtc_needs_vblank_work(crtc_state))
+ continue;
+
+ drm_vblank_work_flush(&crtc_state->vblank_work);
+ cpu_latency_qos_update_request(&crtc->vblank_pm_qos,
+ PM_QOS_DEFAULT_VALUE);
+ }
+}
+
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs)
{
@@ -387,7 +472,7 @@ static int intel_mode_vblank_start(const struct drm_display_mode *mode)
* until a subsequent call to intel_pipe_update_end(). That is done to
* avoid random delays.
*/
-void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
+void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -402,10 +487,17 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
if (new_crtc_state->uapi.async_flip)
return;
- if (new_crtc_state->vrr.enable)
- vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state);
- else
+ if (intel_crtc_needs_vblank_work(new_crtc_state))
+ intel_crtc_vblank_work_init(new_crtc_state);
+
+ if (new_crtc_state->vrr.enable) {
+ if (intel_vrr_is_push_sent(new_crtc_state))
+ vblank_start = intel_vrr_vmin_vblank_start(new_crtc_state);
+ else
+ vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state);
+ } else {
vblank_start = intel_mode_vblank_start(adjusted_mode);
+ }
/* FIXME needs to be calibrated sensibly */
min = vblank_start - intel_usecs_to_scanlines(adjusted_mode,
@@ -554,7 +646,11 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
* Would be slightly nice to just grab the vblank count and arm the
* event outside of the critical section - the spinlock might spin for a
* while ... */
- if (new_crtc_state->uapi.event) {
+ if (intel_crtc_needs_vblank_work(new_crtc_state)) {
+ drm_vblank_work_schedule(&new_crtc_state->vblank_work,
+ drm_crtc_accurate_vblank_count(&crtc->base) + 1,
+ false);
+ } else if (new_crtc_state->uapi.event) {
drm_WARN_ON(&dev_priv->drm,
drm_crtc_vblank_get(&crtc->base) != 0);
@@ -566,11 +662,24 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
new_crtc_state->uapi.event = NULL;
}
- local_irq_enable();
-
- /* Send VRR Push to terminate Vblank */
+ /*
+ * Send VRR Push to terminate Vblank. If we are already in vblank
+ * this has to be done _after_ sampling the frame counter, as
+ * otherwise the push would immediately terminate the vblank and
+ * the sampled frame counter would correspond to the next frame
+ * instead of the current frame.
+ *
+ * There is a tiny race here (iff vblank evasion failed us) where
+ * we might sample the frame counter just before vmax vblank start
+ * but the push would be sent just after it. That would cause the
+ * push to affect the next frame instead of the current frame,
+ * which would cause the next frame to terminate already at vmin
+ * vblank start instead of vmax vblank start.
+ */
intel_vrr_send_push(new_crtc_state);
+ local_irq_enable();
+
if (intel_vgpu_active(dev_priv))
return;
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h
index a5ae997581aa..73077137fb99 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.h
+++ b/drivers/gpu/drm/i915/display/intel_crtc.h
@@ -8,11 +8,16 @@
#include <linux/types.h>
+enum i9xx_plane_id;
enum pipe;
+struct drm_display_mode;
struct drm_i915_private;
+struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
+ int usecs);
u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state);
int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe);
struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
@@ -21,5 +26,14 @@ void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state);
void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
+void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state);
+void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
+void intel_wait_for_vblank_workers(struct intel_atomic_state *state);
+struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915);
+struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915,
+ enum pipe pipe);
+void intel_wait_for_vblank_if_active(struct drm_i915_private *i915,
+ enum pipe pipe);
+void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 11842f212613..16d34685d83f 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -28,11 +28,6 @@ static const u32 intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
};
-static const u64 cursor_format_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
@@ -195,7 +190,7 @@ static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
{
return CURSOR_ENABLE |
CURSOR_FORMAT_ARGB |
- CURSOR_STRIDE(plane_state->view.color_plane[0].stride);
+ CURSOR_STRIDE(plane_state->view.color_plane[0].mapping_stride);
}
static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
@@ -234,7 +229,7 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
}
drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
- plane_state->view.color_plane[0].stride != fb->pitches[0]);
+ plane_state->view.color_plane[0].mapping_stride != fb->pitches[0]);
switch (fb->pitches[0]) {
case 256:
@@ -253,9 +248,10 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
return 0;
}
-static void i845_update_cursor(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+/* TODO: split into noarm+arm pair */
+static void i845_cursor_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
u32 cntl = 0, base = 0, pos = 0, size = 0;
@@ -298,10 +294,10 @@ static void i845_update_cursor(struct intel_plane *plane,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void i845_disable_cursor(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
+static void i845_cursor_disable_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
- i845_update_cursor(plane, crtc_state, NULL);
+ i845_cursor_update_arm(plane, crtc_state, NULL);
}
static bool i845_cursor_get_hw_state(struct intel_plane *plane,
@@ -455,7 +451,7 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
}
drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
- plane_state->view.color_plane[0].stride != fb->pitches[0]);
+ plane_state->view.color_plane[0].mapping_stride != fb->pitches[0]);
if (fb->pitches[0] !=
drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
@@ -488,9 +484,10 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
return 0;
}
-static void i9xx_update_cursor(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+/* TODO: split into noarm+arm pair */
+static void i9xx_cursor_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -562,10 +559,10 @@ static void i9xx_update_cursor(struct intel_plane *plane,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void i9xx_disable_cursor(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
+static void i9xx_cursor_disable_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
- i9xx_update_cursor(plane, crtc_state, NULL);
+ i9xx_cursor_update_arm(plane, crtc_state, NULL);
}
static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
@@ -605,8 +602,10 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
- return modifier == DRM_FORMAT_MOD_LINEAR &&
- format == DRM_FORMAT_ARGB8888;
+ if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier))
+ return false;
+
+ return format == DRM_FORMAT_ARGB8888;
}
static int
@@ -717,10 +716,12 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
*/
crtc_state->active_planes = new_crtc_state->active_planes;
- if (new_plane_state->uapi.visible)
- intel_update_plane(plane, crtc_state, new_plane_state);
- else
- intel_disable_plane(plane, crtc_state);
+ if (new_plane_state->uapi.visible) {
+ intel_plane_update_noarm(plane, crtc_state, new_plane_state);
+ intel_plane_update_arm(plane, crtc_state, new_plane_state);
+ } else {
+ intel_plane_disable_arm(plane, crtc_state);
+ }
intel_plane_unpin_fb(old_plane_state);
@@ -754,6 +755,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
{
struct intel_plane *cursor;
int ret, zpos;
+ u64 *modifiers;
cursor = intel_plane_alloc();
if (IS_ERR(cursor))
@@ -766,14 +768,14 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
cursor->max_stride = i845_cursor_max_stride;
- cursor->update_plane = i845_update_cursor;
- cursor->disable_plane = i845_disable_cursor;
+ cursor->update_arm = i845_cursor_update_arm;
+ cursor->disable_arm = i845_cursor_disable_arm;
cursor->get_hw_state = i845_cursor_get_hw_state;
cursor->check_plane = i845_check_cursor;
} else {
cursor->max_stride = i9xx_cursor_max_stride;
- cursor->update_plane = i9xx_update_cursor;
- cursor->disable_plane = i9xx_disable_cursor;
+ cursor->update_arm = i9xx_cursor_update_arm;
+ cursor->disable_arm = i9xx_cursor_disable_arm;
cursor->get_hw_state = i9xx_cursor_get_hw_state;
cursor->check_plane = i9xx_check_cursor;
}
@@ -784,13 +786,18 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
cursor->cursor.size = ~0;
+ modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_NONE);
+
ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
0, &intel_cursor_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
- cursor_format_modifiers,
+ modifiers,
DRM_PLANE_TYPE_CURSOR,
"cursor %c", pipe_name(pipe));
+
+ kfree(modifiers);
+
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index cfb567df71b3..cab505277595 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -25,6 +25,7 @@
*
*/
+#include <drm/drm_privacy_screen_consumer.h>
#include <drm/drm_scdc_helper.h>
#include "i915_drv.h"
@@ -321,10 +322,11 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
{
int dotclock;
+ /* CRT dotclock is determined via other means */
if (pipe_config->has_pch_encoder)
- dotclock = intel_dotclock_calculate(pipe_config->port_clock,
- &pipe_config->fdi_m_n);
- else if (intel_crtc_has_dp_encoder(pipe_config))
+ return;
+
+ if (intel_crtc_has_dp_encoder(pipe_config))
dotclock = intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
@@ -1039,7 +1041,6 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int level = intel_ddi_level(encoder, crtc_state, 0);
const struct intel_ddi_buf_trans *trans;
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
int n_entries, ln;
@@ -1068,32 +1069,36 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
/* Program PORT_TX_DW2 */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
- val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
- RCOMP_SCALAR_MASK);
- val |= SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel);
- val |= SWING_SEL_LOWER(trans->entries[level].icl.dw2_swing_sel);
- /* Program Rcomp scalar for every table entry */
- val |= RCOMP_SCALAR(0x98);
- intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), val);
+ for (ln = 0; ln < 4; ln++) {
+ int level = intel_ddi_level(encoder, crtc_state, ln);
+
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_LN(ln, phy),
+ SWING_SEL_UPPER_MASK | SWING_SEL_LOWER_MASK | RCOMP_SCALAR_MASK,
+ SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel) |
+ SWING_SEL_LOWER(trans->entries[level].icl.dw2_swing_sel) |
+ RCOMP_SCALAR(0x98));
+ }
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overwrite individual loadgen. */
for (ln = 0; ln < 4; ln++) {
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy));
- val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
- CURSOR_COEFF_MASK);
- val |= POST_CURSOR_1(trans->entries[level].icl.dw4_post_cursor_1);
- val |= POST_CURSOR_2(trans->entries[level].icl.dw4_post_cursor_2);
- val |= CURSOR_COEFF(trans->entries[level].icl.dw4_cursor_coeff);
- intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val);
+ int level = intel_ddi_level(encoder, crtc_state, ln);
+
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy),
+ POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK,
+ POST_CURSOR_1(trans->entries[level].icl.dw4_post_cursor_1) |
+ POST_CURSOR_2(trans->entries[level].icl.dw4_post_cursor_2) |
+ CURSOR_COEFF(trans->entries[level].icl.dw4_cursor_coeff));
}
/* Program PORT_TX_DW7 */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW7_LN(0, phy));
- val &= ~N_SCALAR_MASK;
- val |= N_SCALAR(trans->entries[level].icl.dw7_n_scalar);
- intel_de_write(dev_priv, ICL_PORT_TX_DW7_GRP(phy), val);
+ for (ln = 0; ln < 4; ln++) {
+ int level = intel_ddi_level(encoder, crtc_state, ln);
+
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW7_LN(ln, phy),
+ N_SCALAR_MASK,
+ N_SCALAR(trans->entries[level].icl.dw7_n_scalar));
+ }
}
static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
@@ -1124,16 +1129,14 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln < 4; ln++) {
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy));
- val &= ~LOADGEN_SELECT;
- val |= icl_combo_phy_loadgen_select(crtc_state, ln);
- intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy),
+ LOADGEN_SELECT,
+ icl_combo_phy_loadgen_select(crtc_state, ln));
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
- val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
- val |= SUS_CLOCK_CONFIG;
- intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val);
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy),
+ 0, SUS_CLOCK_CONFIG);
/* 4. Clear training enable to change swing values */
val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
@@ -1154,10 +1157,8 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
- int level = intel_ddi_level(encoder, crtc_state, 0);
const struct intel_ddi_buf_trans *trans;
int n_entries, ln;
- u32 val;
if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder)))
return;
@@ -1166,53 +1167,51 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
return;
- /* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
for (ln = 0; ln < 2; ln++) {
- val = intel_de_read(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port));
- val &= ~CRI_USE_FS32;
- intel_de_write(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port), val);
-
- val = intel_de_read(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port));
- val &= ~CRI_USE_FS32;
- intel_de_write(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port), val);
+ intel_de_rmw(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port),
+ CRI_USE_FS32, 0);
+ intel_de_rmw(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port),
+ CRI_USE_FS32, 0);
}
/* Program MG_TX_SWINGCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = intel_de_read(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port));
- val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
- val |= CRI_TXDEEMPH_OVERRIDE_17_12(
- trans->entries[level].mg.cri_txdeemph_override_17_12);
- intel_de_write(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), val);
-
- val = intel_de_read(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port));
- val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
- val |= CRI_TXDEEMPH_OVERRIDE_17_12(
- trans->entries[level].mg.cri_txdeemph_override_17_12);
- intel_de_write(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), val);
+ int level;
+
+ level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
+
+ intel_de_rmw(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port),
+ CRI_TXDEEMPH_OVERRIDE_17_12_MASK,
+ CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12));
+
+ level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
+
+ intel_de_rmw(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port),
+ CRI_TXDEEMPH_OVERRIDE_17_12_MASK,
+ CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12));
}
/* Program MG_TX_DRVCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = intel_de_read(dev_priv, MG_TX1_DRVCTRL(ln, tc_port));
- val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
- CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
- val |= CRI_TXDEEMPH_OVERRIDE_5_0(
- trans->entries[level].mg.cri_txdeemph_override_5_0) |
- CRI_TXDEEMPH_OVERRIDE_11_6(
- trans->entries[level].mg.cri_txdeemph_override_11_6) |
- CRI_TXDEEMPH_OVERRIDE_EN;
- intel_de_write(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), val);
-
- val = intel_de_read(dev_priv, MG_TX2_DRVCTRL(ln, tc_port));
- val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
- CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
- val |= CRI_TXDEEMPH_OVERRIDE_5_0(
- trans->entries[level].mg.cri_txdeemph_override_5_0) |
- CRI_TXDEEMPH_OVERRIDE_11_6(
- trans->entries[level].mg.cri_txdeemph_override_11_6) |
- CRI_TXDEEMPH_OVERRIDE_EN;
- intel_de_write(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), val);
+ int level;
+
+ level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
+
+ intel_de_rmw(dev_priv, MG_TX1_DRVCTRL(ln, tc_port),
+ CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
+ CRI_TXDEEMPH_OVERRIDE_5_0_MASK,
+ CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) |
+ CRI_TXDEEMPH_OVERRIDE_5_0(trans->entries[level].mg.cri_txdeemph_override_5_0) |
+ CRI_TXDEEMPH_OVERRIDE_EN);
+
+ level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
+
+ intel_de_rmw(dev_priv, MG_TX2_DRVCTRL(ln, tc_port),
+ CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
+ CRI_TXDEEMPH_OVERRIDE_5_0_MASK,
+ CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) |
+ CRI_TXDEEMPH_OVERRIDE_5_0(trans->entries[level].mg.cri_txdeemph_override_5_0) |
+ CRI_TXDEEMPH_OVERRIDE_EN);
/* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */
}
@@ -1223,50 +1222,34 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
* values from table for which TX1 and TX2 enabled.
*/
for (ln = 0; ln < 2; ln++) {
- val = intel_de_read(dev_priv, MG_CLKHUB(ln, tc_port));
- if (crtc_state->port_clock < 300000)
- val |= CFG_LOW_RATE_LKREN_EN;
- else
- val &= ~CFG_LOW_RATE_LKREN_EN;
- intel_de_write(dev_priv, MG_CLKHUB(ln, tc_port), val);
+ intel_de_rmw(dev_priv, MG_CLKHUB(ln, tc_port),
+ CFG_LOW_RATE_LKREN_EN,
+ crtc_state->port_clock < 300000 ? CFG_LOW_RATE_LKREN_EN : 0);
}
/* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
for (ln = 0; ln < 2; ln++) {
- val = intel_de_read(dev_priv, MG_TX1_DCC(ln, tc_port));
- val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
- if (crtc_state->port_clock <= 500000) {
- val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
- } else {
- val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
- CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
- }
- intel_de_write(dev_priv, MG_TX1_DCC(ln, tc_port), val);
-
- val = intel_de_read(dev_priv, MG_TX2_DCC(ln, tc_port));
- val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
- if (crtc_state->port_clock <= 500000) {
- val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
- } else {
- val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
- CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
- }
- intel_de_write(dev_priv, MG_TX2_DCC(ln, tc_port), val);
+ intel_de_rmw(dev_priv, MG_TX1_DCC(ln, tc_port),
+ CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK |
+ CFG_AMI_CK_DIV_OVERRIDE_EN,
+ crtc_state->port_clock > 500000 ?
+ CFG_AMI_CK_DIV_OVERRIDE_VAL(1) |
+ CFG_AMI_CK_DIV_OVERRIDE_EN : 0);
+
+ intel_de_rmw(dev_priv, MG_TX2_DCC(ln, tc_port),
+ CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK |
+ CFG_AMI_CK_DIV_OVERRIDE_EN,
+ crtc_state->port_clock > 500000 ?
+ CFG_AMI_CK_DIV_OVERRIDE_VAL(1) |
+ CFG_AMI_CK_DIV_OVERRIDE_EN : 0);
}
/* Program MG_TX_PISO_READLOAD with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = intel_de_read(dev_priv,
- MG_TX1_PISO_READLOAD(ln, tc_port));
- val |= CRI_CALCINIT;
- intel_de_write(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port),
- val);
-
- val = intel_de_read(dev_priv,
- MG_TX2_PISO_READLOAD(ln, tc_port));
- val |= CRI_CALCINIT;
- intel_de_write(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port),
- val);
+ intel_de_rmw(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port),
+ 0, CRI_CALCINIT);
+ intel_de_rmw(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port),
+ 0, CRI_CALCINIT);
}
}
@@ -1275,9 +1258,7 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
- int level = intel_ddi_level(encoder, crtc_state, 0);
const struct intel_ddi_buf_trans *trans;
- u32 val, dpcnt_mask, dpcnt_val;
int n_entries, ln;
if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder)))
@@ -1287,33 +1268,58 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
return;
- dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK |
- DKL_TX_DE_EMPAHSIS_COEFF_MASK |
- DKL_TX_VSWING_CONTROL_MASK);
- dpcnt_val = DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing);
- dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis);
- dpcnt_val |= DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot);
-
for (ln = 0; ln < 2; ln++) {
+ int level;
+
intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
HIP_INDEX_VAL(tc_port, ln));
intel_de_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port), 0);
- /* All the registers are RMW */
- val = intel_de_read(dev_priv, DKL_TX_DPCNTL0(tc_port));
- val &= ~dpcnt_mask;
- val |= dpcnt_val;
- intel_de_write(dev_priv, DKL_TX_DPCNTL0(tc_port), val);
+ level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
+
+ intel_de_rmw(dev_priv, DKL_TX_DPCNTL0(tc_port),
+ DKL_TX_PRESHOOT_COEFF_MASK |
+ DKL_TX_DE_EMPAHSIS_COEFF_MASK |
+ DKL_TX_VSWING_CONTROL_MASK,
+ DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) |
+ DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) |
+ DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing));
+
+ level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
- val = intel_de_read(dev_priv, DKL_TX_DPCNTL1(tc_port));
- val &= ~dpcnt_mask;
- val |= dpcnt_val;
- intel_de_write(dev_priv, DKL_TX_DPCNTL1(tc_port), val);
+ intel_de_rmw(dev_priv, DKL_TX_DPCNTL1(tc_port),
+ DKL_TX_PRESHOOT_COEFF_MASK |
+ DKL_TX_DE_EMPAHSIS_COEFF_MASK |
+ DKL_TX_VSWING_CONTROL_MASK,
+ DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) |
+ DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) |
+ DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing));
- val = intel_de_read(dev_priv, DKL_TX_DPCNTL2(tc_port));
- val &= ~DKL_TX_DP20BITMODE;
- intel_de_write(dev_priv, DKL_TX_DPCNTL2(tc_port), val);
+ intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port),
+ DKL_TX_DP20BITMODE, 0);
+
+ if (IS_ALDERLAKE_P(dev_priv)) {
+ u32 val;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ if (ln == 0) {
+ val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(0);
+ val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(2);
+ } else {
+ val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(3);
+ val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(3);
+ }
+ } else {
+ val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(0);
+ val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(0);
+ }
+
+ intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port),
+ DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK |
+ DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK,
+ val);
+ }
}
}
@@ -1938,7 +1944,7 @@ void intel_ddi_enable_clock(struct intel_encoder *encoder,
encoder->enable_clock(encoder, crtc_state);
}
-static void intel_ddi_disable_clock(struct intel_encoder *encoder)
+void intel_ddi_disable_clock(struct intel_encoder *encoder)
{
if (encoder->disable_clock)
encoder->disable_clock(encoder);
@@ -2385,7 +2391,10 @@ static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 5.k Configure and enable FEC if needed */
intel_ddi_enable_fec(encoder, crtc_state);
- intel_dsc_enable(encoder, crtc_state);
+
+ intel_dsc_dp_pps_write(encoder, crtc_state);
+
+ intel_dsc_enable(crtc_state);
}
static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
@@ -2519,8 +2528,11 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 7.l Configure and enable FEC if needed */
intel_ddi_enable_fec(encoder, crtc_state);
+
+ intel_dsc_dp_pps_write(encoder, crtc_state);
+
if (!crtc_state->bigjoiner)
- intel_dsc_enable(encoder, crtc_state);
+ intel_dsc_enable(crtc_state);
}
static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
@@ -2585,8 +2597,10 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
if (!is_mst)
intel_ddi_enable_pipe_clock(encoder, crtc_state);
+ intel_dsc_dp_pps_write(encoder, crtc_state);
+
if (!crtc_state->bigjoiner)
- intel_dsc_enable(encoder, crtc_state);
+ intel_dsc_enable(crtc_state);
}
static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
@@ -2824,12 +2838,10 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
}
if (old_crtc_state->bigjoiner_linked_crtc) {
- struct intel_atomic_state *state =
- to_intel_atomic_state(old_crtc_state->uapi.state);
- struct intel_crtc *slave =
+ struct intel_crtc *slave_crtc =
old_crtc_state->bigjoiner_linked_crtc;
const struct intel_crtc_state *old_slave_crtc_state =
- intel_atomic_get_old_crtc_state(state, slave);
+ intel_atomic_get_old_crtc_state(state, slave_crtc);
intel_crtc_vblank_off(old_slave_crtc_state);
@@ -2866,41 +2878,6 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
intel_tc_port_put_link(dig_port);
}
-void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val;
-
- /*
- * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
- * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
- * step 13 is the correct place for it. Step 18 is where it was
- * originally before the BUN.
- */
- val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
- val &= ~FDI_RX_ENABLE;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
-
- intel_disable_ddi_buf(encoder, old_crtc_state);
- intel_ddi_disable_clock(encoder);
-
- val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
- val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
- intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val);
-
- val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
- val &= ~FDI_PCDCLK;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
-
- val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
- val &= ~FDI_RX_PLL_ENABLE;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
-}
-
static void trans_port_sync_stop_link_train(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
@@ -2951,6 +2928,7 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state,
if (port == PORT_A && DISPLAY_VER(dev_priv) < 9)
intel_dp_stop_link_train(intel_dp, crtc_state);
+ drm_connector_update_privacy_screen(conn_state);
intel_edp_backlight_on(crtc_state, conn_state);
if (!dig_port->lspcon.active || dig_port->dp.has_hdmi_sink)
@@ -3095,6 +3073,12 @@ static void intel_disable_ddi_dp(struct intel_atomic_state *state,
intel_dp->link_trained = false;
+ if (old_crtc_state->has_audio)
+ intel_audio_codec_disable(encoder,
+ old_crtc_state, old_conn_state);
+
+ intel_drrs_disable(intel_dp, old_crtc_state);
+ intel_psr_disable(intel_dp, old_crtc_state);
intel_edp_backlight_off(old_conn_state);
/* Disable the decompression in DP Sink */
intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state,
@@ -3112,6 +3096,10 @@ static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct drm_connector *connector = old_conn_state->connector;
+ if (old_crtc_state->has_audio)
+ intel_audio_codec_disable(encoder,
+ old_crtc_state, old_conn_state);
+
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
false, false))
drm_dbg_kms(&i915->drm,
@@ -3119,25 +3107,6 @@ static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
connector->base.id, connector->name);
}
-static void intel_pre_disable_ddi(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- struct intel_dp *intel_dp;
-
- if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder, old_crtc_state,
- old_conn_state);
-
- if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
- return;
-
- intel_dp = enc_to_intel_dp(encoder);
- intel_drrs_disable(intel_dp, old_crtc_state);
- intel_psr_disable(intel_dp, old_crtc_state);
-}
-
static void intel_disable_ddi(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
@@ -3166,6 +3135,7 @@ static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state,
intel_drrs_update(intel_dp, crtc_state);
intel_backlight_update(state, encoder, crtc_state, conn_state);
+ drm_connector_update_privacy_screen(conn_state);
}
void intel_ddi_update_pipe(struct intel_atomic_state *state,
@@ -3195,8 +3165,14 @@ intel_ddi_update_prepare(struct intel_atomic_state *state,
intel_tc_port_get_link(enc_to_dig_port(encoder),
required_lanes);
- if (crtc_state && crtc_state->hw.active)
+ if (crtc_state && crtc_state->hw.active) {
+ struct intel_crtc *slave_crtc = crtc_state->bigjoiner_linked_crtc;
+
intel_update_active_dpll(state, crtc, encoder);
+
+ if (slave_crtc)
+ intel_update_active_dpll(state, slave_crtc, encoder);
+ }
}
static void
@@ -3552,18 +3528,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)))
return;
- if (pipe_config->bigjoiner_slave) {
- /* read out pipe settings from master */
- enum transcoder save = pipe_config->cpu_transcoder;
-
- /* Our own transcoder needs to be disabled when reading it in intel_ddi_read_func_ctl() */
- WARN_ON(pipe_config->output_types);
- pipe_config->cpu_transcoder = (enum transcoder)pipe_config->bigjoiner_linked_crtc->pipe;
- intel_ddi_read_func_ctl(encoder, pipe_config);
- pipe_config->cpu_transcoder = save;
- } else {
- intel_ddi_read_func_ctl(encoder, pipe_config);
- }
+ intel_ddi_read_func_ctl(encoder, pipe_config);
intel_ddi_mso_get_config(encoder, pipe_config);
@@ -3591,8 +3556,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
}
- if (!pipe_config->bigjoiner_slave)
- ddi_dotclock_get(pipe_config);
+ ddi_dotclock_get(pipe_config);
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
pipe_config->lane_lat_optim_mask =
@@ -3983,6 +3947,19 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
return NULL;
}
+ if (dig_port->base.type == INTEL_OUTPUT_EDP) {
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_privacy_screen *privacy_screen;
+
+ privacy_screen = drm_privacy_screen_get(dev->dev, NULL);
+ if (!IS_ERR(privacy_screen)) {
+ drm_connector_attach_privacy_screen_provider(&connector->base,
+ privacy_screen);
+ } else if (PTR_ERR(privacy_screen) != -ENODEV) {
+ drm_warn(dev, "Error getting privacy-screen\n");
+ }
+ }
+
return connector;
}
@@ -4472,7 +4449,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->enable = intel_enable_ddi;
encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
encoder->pre_enable = intel_ddi_pre_enable;
- encoder->pre_disable = intel_pre_disable_ddi;
encoder->disable = intel_disable_ddi;
encoder->post_disable = intel_ddi_post_disable;
encoder->update_pipe = intel_ddi_update_pipe;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index d6971717ef9c..c2fea6562917 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -6,11 +6,11 @@
#ifndef __INTEL_DDI_H__
#define __INTEL_DDI_H__
-#include "intel_display.h"
#include "i915_reg.h"
struct drm_connector_state;
struct drm_i915_private;
+struct intel_atomic_state;
struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
@@ -18,6 +18,8 @@ struct intel_dp;
struct intel_dpll_hw_state;
struct intel_encoder;
struct intel_shared_dpll;
+enum pipe;
+enum port;
enum transcoder;
i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
@@ -30,6 +32,7 @@ void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state);
void intel_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+void intel_ddi_disable_clock(struct intel_encoder *encoder);
void intel_ddi_get_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct intel_shared_dpll *pll);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index 78cd8f77b49d..e2dfb93a82bd 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -477,14 +477,14 @@ static const struct intel_ddi_buf_trans icl_combo_phy_trans_hdmi = {
static const union intel_ddi_buf_trans_entry _ehl_combo_phy_trans_dp[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x33, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
- { .icl = { 0xA, 0x47, 0x36, 0x00, 0x09 } }, /* 350 500 3.1 */
- { .icl = { 0xC, 0x64, 0x34, 0x00, 0x0B } }, /* 350 700 6.0 */
- { .icl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 350 900 8.2 */
+ { .icl = { 0xA, 0x47, 0x38, 0x00, 0x07 } }, /* 350 500 3.1 */
+ { .icl = { 0xC, 0x64, 0x33, 0x00, 0x0C } }, /* 350 700 6.0 */
+ { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 350 900 8.2 */
{ .icl = { 0xA, 0x46, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
- { .icl = { 0xC, 0x64, 0x38, 0x00, 0x07 } }, /* 500 700 2.9 */
+ { .icl = { 0xC, 0x64, 0x37, 0x00, 0x08 } }, /* 500 700 2.9 */
{ .icl = { 0x6, 0x7F, 0x32, 0x00, 0x0D } }, /* 500 900 5.1 */
{ .icl = { 0xC, 0x61, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */
- { .icl = { 0x6, 0x7F, 0x38, 0x00, 0x07 } }, /* 600 900 3.5 */
+ { .icl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } }, /* 600 900 3.5 */
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
@@ -1032,6 +1032,21 @@ bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table)
return table == &tgl_combo_phy_trans_edp_hbr2_hobl;
}
+static bool use_edp_hobl(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ return i915->vbt.edp.hobl && !intel_dp->hobl_failed;
+}
+
+static bool use_edp_low_vswing(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return i915->vbt.edp.low_vswing;
+}
+
static const struct intel_ddi_buf_trans *
intel_get_buf_trans(const struct intel_ddi_buf_trans *trans, int *num_entries)
{
@@ -1057,14 +1072,12 @@ bdw_get_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
return intel_get_buf_trans(&bdw_trans_fdi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
return intel_get_buf_trans(&bdw_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
- i915->vbt.edp.low_vswing)
+ use_edp_low_vswing(encoder))
return intel_get_buf_trans(&bdw_trans_edp, n_entries);
else
return intel_get_buf_trans(&bdw_trans_dp, n_entries);
@@ -1094,12 +1107,10 @@ skl_y_get_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
return intel_get_buf_trans(&skl_y_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
- i915->vbt.edp.low_vswing)
+ use_edp_low_vswing(encoder))
return _skl_get_buf_trans_dp(encoder, &skl_y_trans_edp, n_entries);
else
return _skl_get_buf_trans_dp(encoder, &skl_y_trans_dp, n_entries);
@@ -1110,12 +1121,10 @@ skl_u_get_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
return intel_get_buf_trans(&skl_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
- i915->vbt.edp.low_vswing)
+ use_edp_low_vswing(encoder))
return _skl_get_buf_trans_dp(encoder, &skl_u_trans_edp, n_entries);
else
return _skl_get_buf_trans_dp(encoder, &skl_u_trans_dp, n_entries);
@@ -1126,12 +1135,10 @@ skl_get_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
return intel_get_buf_trans(&skl_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
- i915->vbt.edp.low_vswing)
+ use_edp_low_vswing(encoder))
return _skl_get_buf_trans_dp(encoder, &skl_trans_edp, n_entries);
else
return _skl_get_buf_trans_dp(encoder, &skl_trans_dp, n_entries);
@@ -1142,12 +1149,10 @@ kbl_y_get_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
return intel_get_buf_trans(&skl_y_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
- i915->vbt.edp.low_vswing)
+ use_edp_low_vswing(encoder))
return _skl_get_buf_trans_dp(encoder, &skl_y_trans_edp, n_entries);
else
return _skl_get_buf_trans_dp(encoder, &kbl_y_trans_dp, n_entries);
@@ -1158,12 +1163,10 @@ kbl_u_get_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
return intel_get_buf_trans(&skl_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
- i915->vbt.edp.low_vswing)
+ use_edp_low_vswing(encoder))
return _skl_get_buf_trans_dp(encoder, &skl_u_trans_edp, n_entries);
else
return _skl_get_buf_trans_dp(encoder, &kbl_u_trans_dp, n_entries);
@@ -1174,12 +1177,10 @@ kbl_get_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
return intel_get_buf_trans(&skl_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
- i915->vbt.edp.low_vswing)
+ use_edp_low_vswing(encoder))
return _skl_get_buf_trans_dp(encoder, &skl_trans_edp, n_entries);
else
return _skl_get_buf_trans_dp(encoder, &kbl_trans_dp, n_entries);
@@ -1190,12 +1191,10 @@ bxt_get_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
return intel_get_buf_trans(&bxt_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
- i915->vbt.edp.low_vswing)
+ use_edp_low_vswing(encoder))
return intel_get_buf_trans(&bxt_trans_edp, n_entries);
else
return intel_get_buf_trans(&bxt_trans_dp, n_entries);
@@ -1215,12 +1214,10 @@ icl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
if (crtc_state->port_clock > 540000) {
return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3,
n_entries);
- } else if (dev_priv->vbt.edp.low_vswing) {
+ } else if (use_edp_low_vswing(encoder)) {
return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2,
n_entries);
}
@@ -1282,12 +1279,10 @@ ehl_get_combo_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
- dev_priv->vbt.edp.low_vswing)
+ use_edp_low_vswing(encoder))
return ehl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
return intel_get_buf_trans(&ehl_combo_phy_trans_dp, n_entries);
@@ -1309,12 +1304,10 @@ jsl_get_combo_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
- dev_priv->vbt.edp.low_vswing)
+ use_edp_low_vswing(encoder))
return jsl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries);
@@ -1346,16 +1339,13 @@ tgl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
if (crtc_state->port_clock > 540000) {
return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3,
n_entries);
- } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) {
+ } else if (use_edp_hobl(encoder)) {
return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl,
n_entries);
- } else if (dev_priv->vbt.edp.low_vswing) {
+ } else if (use_edp_low_vswing(encoder)) {
return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2,
n_entries);
}
@@ -1394,16 +1384,13 @@ dg1_get_combo_buf_trans_edp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
if (crtc_state->port_clock > 540000)
return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3,
n_entries);
- else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed)
+ else if (use_edp_hobl(encoder))
return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl,
n_entries);
- else if (dev_priv->vbt.edp.low_vswing)
+ else if (use_edp_low_vswing(encoder))
return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2,
n_entries);
else
@@ -1439,16 +1426,13 @@ rkl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
if (crtc_state->port_clock > 540000) {
return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3,
n_entries);
- } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) {
+ } else if (use_edp_hobl(encoder)) {
return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl,
n_entries);
- } else if (dev_priv->vbt.edp.low_vswing) {
+ } else if (use_edp_low_vswing(encoder)) {
return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2,
n_entries);
}
@@ -1485,14 +1469,11 @@ adls_get_combo_buf_trans_edp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
if (crtc_state->port_clock > 540000)
return intel_get_buf_trans(&adls_combo_phy_trans_edp_hbr3, n_entries);
- else if (i915->vbt.edp.hobl && !intel_dp->hobl_failed)
+ else if (use_edp_hobl(encoder))
return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries);
- else if (i915->vbt.edp.low_vswing)
+ else if (use_edp_low_vswing(encoder))
return intel_get_buf_trans(&adls_combo_phy_trans_edp_hbr2, n_entries);
else
return adls_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
@@ -1527,16 +1508,13 @@ adlp_get_combo_buf_trans_edp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
if (crtc_state->port_clock > 540000) {
return intel_get_buf_trans(&adlp_combo_phy_trans_edp_hbr3,
n_entries);
- } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) {
+ } else if (use_edp_hobl(encoder)) {
return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl,
n_entries);
- } else if (dev_priv->vbt.edp.low_vswing) {
+ } else if (use_edp_low_vswing(encoder)) {
return intel_get_buf_trans(&adlp_combo_phy_trans_edp_up_to_hbr2,
n_entries);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index ec403e46a328..bf7ce684dd8e 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/dma-resv.h>
#include <linux/slab.h>
+#include <linux/vga_switcheroo.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -41,6 +42,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_privacy_screen_consumer.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
@@ -70,11 +72,10 @@
#include "gt/gen8_ppgtt.h"
-#include "pxp/intel_pxp.h"
-
#include "g4x_dp.h"
#include "g4x_hdmi.h"
#include "i915_drv.h"
+#include "icl_dsi.h"
#include "intel_acpi.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
@@ -96,6 +97,8 @@
#include "intel_hotplug.h"
#include "intel_overlay.h"
#include "intel_panel.h"
+#include "intel_pch_display.h"
+#include "intel_pch_refclk.h"
#include "intel_pcode.h"
#include "intel_pipe_crc.h"
#include "intel_plane_initial.h"
@@ -103,19 +106,15 @@
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_quirks.h"
-#include "intel_sbi.h"
#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vga.h"
#include "i9xx_plane.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
+#include "vlv_dsi_pll.h"
#include "vlv_sideband.h"
-
-static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
-static void ilk_pch_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
+#include "vlv_dsi.h"
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
@@ -341,6 +340,14 @@ is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
is_trans_port_sync_slave(crtc_state);
}
+static struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->bigjoiner_slave)
+ return crtc_state->bigjoiner_linked_crtc;
+ else
+ return to_intel_crtc(crtc_state->uapi.crtc);
+}
+
static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -454,80 +461,6 @@ static void assert_planes_disabled(struct intel_crtc *crtc)
assert_plane_disabled(plane);
}
-void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- u32 val;
- bool enabled;
-
- val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
- enabled = !!(val & TRANS_ENABLE);
- I915_STATE_WARN(enabled,
- "transcoder assertion failed, should be off on pipe %c but is still active\n",
- pipe_name(pipe));
-}
-
-static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum port port,
- i915_reg_t dp_reg)
-{
- enum pipe port_pipe;
- bool state;
-
- state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
-
- I915_STATE_WARN(state && port_pipe == pipe,
- "PCH DP %c enabled on transcoder %c, should be disabled\n",
- port_name(port), pipe_name(pipe));
-
- I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
- "IBX PCH DP %c still using transcoder B\n",
- port_name(port));
-}
-
-static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum port port,
- i915_reg_t hdmi_reg)
-{
- enum pipe port_pipe;
- bool state;
-
- state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
-
- I915_STATE_WARN(state && port_pipe == pipe,
- "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
- port_name(port), pipe_name(pipe));
-
- I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
- "IBX PCH HDMI %c still using transcoder B\n",
- port_name(port));
-}
-
-static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- enum pipe port_pipe;
-
- assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
- assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
- assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
-
- I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
- port_pipe == pipe,
- "PCH VGA enabled on transcoder %c, should be disabled\n",
- pipe_name(pipe));
-
- I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
- port_pipe == pipe,
- "PCH LVDS enabled on transcoder %c, should be disabled\n",
- pipe_name(pipe));
-
- /* PCH SDVOB multiplex with HDMIB */
- assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
- assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
- assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
-}
-
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dig_port,
unsigned int expected_mask)
@@ -562,154 +495,6 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
expected_mask);
}
-static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 val, pipeconf_val;
-
- /* Make sure PCH DPLL is enabled */
- assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
-
- /* FDI must be feeding us bits for PCH ports */
- assert_fdi_tx_enabled(dev_priv, pipe);
- assert_fdi_rx_enabled(dev_priv, pipe);
-
- if (HAS_PCH_CPT(dev_priv)) {
- reg = TRANS_CHICKEN2(pipe);
- val = intel_de_read(dev_priv, reg);
- /*
- * Workaround: Set the timing override bit
- * before enabling the pch transcoder.
- */
- val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
- /* Configure frame start delay to match the CPU */
- val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
- val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
- intel_de_write(dev_priv, reg, val);
- }
-
- reg = PCH_TRANSCONF(pipe);
- val = intel_de_read(dev_priv, reg);
- pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
-
- if (HAS_PCH_IBX(dev_priv)) {
- /* Configure frame start delay to match the CPU */
- val &= ~TRANS_FRAME_START_DELAY_MASK;
- val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
-
- /*
- * Make the BPC in transcoder be consistent with
- * that in pipeconf reg. For HDMI we must use 8bpc
- * here for both 8bpc and 12bpc.
- */
- val &= ~PIPECONF_BPC_MASK;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- val |= PIPECONF_8BPC;
- else
- val |= pipeconf_val & PIPECONF_BPC_MASK;
- }
-
- val &= ~TRANS_INTERLACE_MASK;
- if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
- if (HAS_PCH_IBX(dev_priv) &&
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
- val |= TRANS_LEGACY_INTERLACED_ILK;
- else
- val |= TRANS_INTERLACED;
- } else {
- val |= TRANS_PROGRESSIVE;
- }
-
- intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
- if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
- drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
- pipe_name(pipe));
-}
-
-static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder)
-{
- u32 val, pipeconf_val;
-
- /* FDI must be feeding us bits for PCH ports */
- assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
- assert_fdi_rx_enabled(dev_priv, PIPE_A);
-
- val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
- /* Workaround: set timing override bit. */
- val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
- /* Configure frame start delay to match the CPU */
- val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
- val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
- intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
-
- val = TRANS_ENABLE;
- pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
-
- if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
- PIPECONF_INTERLACED_ILK)
- val |= TRANS_INTERLACED;
- else
- val |= TRANS_PROGRESSIVE;
-
- intel_de_write(dev_priv, LPT_TRANSCONF, val);
- if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
- TRANS_STATE_ENABLE, 100))
- drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
-}
-
-static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- i915_reg_t reg;
- u32 val;
-
- /* FDI relies on the transcoder */
- assert_fdi_tx_disabled(dev_priv, pipe);
- assert_fdi_rx_disabled(dev_priv, pipe);
-
- /* Ports must be off as well */
- assert_pch_ports_disabled(dev_priv, pipe);
-
- reg = PCH_TRANSCONF(pipe);
- val = intel_de_read(dev_priv, reg);
- val &= ~TRANS_ENABLE;
- intel_de_write(dev_priv, reg, val);
- /* wait for PCH transcoder off, transcoder state */
- if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
- drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
- pipe_name(pipe));
-
- if (HAS_PCH_CPT(dev_priv)) {
- /* Workaround: Clear the timing override chicken bit again. */
- reg = TRANS_CHICKEN2(pipe);
- val = intel_de_read(dev_priv, reg);
- val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
- intel_de_write(dev_priv, reg, val);
- }
-}
-
-void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- val = intel_de_read(dev_priv, LPT_TRANSCONF);
- val &= ~TRANS_ENABLE;
- intel_de_write(dev_priv, LPT_TRANSCONF, val);
- /* wait for PCH transcoder off, transcoder state */
- if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
- TRANS_STATE_ENABLE, 50))
- drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
-
- /* Workaround: clear timing override bit. */
- val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
- val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
- intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
-}
-
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -823,14 +608,6 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
intel_wait_for_pipe_off(old_crtc_state);
}
-bool
-intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
- u64 modifier)
-{
- return info->is_yuv &&
- info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
-}
-
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
{
unsigned int size = 0;
@@ -850,7 +627,11 @@ unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info
for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
unsigned int plane_size;
- plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
+ if (rem_info->plane[i].linear)
+ plane_size = rem_info->plane[i].size;
+ else
+ plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
+
if (plane_size == 0)
continue;
@@ -869,7 +650,7 @@ bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
return DISPLAY_VER(dev_priv) < 4 ||
- (plane->has_fbc &&
+ (plane->fbc &&
plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
}
@@ -885,7 +666,7 @@ u32 intel_fb_xy_to_linear(int x, int y,
{
const struct drm_framebuffer *fb = state->hw.fb;
unsigned int cpp = fb->format->cpp[color_plane];
- unsigned int pitch = state->view.color_plane[color_plane].stride;
+ unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
return y * pitch + x * cpp;
}
@@ -904,136 +685,6 @@ void intel_add_fb_offsets(int *x, int *y,
*y += state->view.color_plane[color_plane].y;
}
-/*
- * From the Sky Lake PRM:
- * "The Color Control Surface (CCS) contains the compression status of
- * the cache-line pairs. The compression state of the cache-line pair
- * is specified by 2 bits in the CCS. Each CCS cache-line represents
- * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
- * cache-line-pairs. CCS is always Y tiled."
- *
- * Since cache line pairs refers to horizontally adjacent cache lines,
- * each cache line in the CCS corresponds to an area of 32x16 cache
- * lines on the main surface. Since each pixel is 4 bytes, this gives
- * us a ratio of one byte in the CCS for each 8x16 pixels in the
- * main surface.
- */
-static const struct drm_format_info skl_ccs_formats[] = {
- { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
- .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
- { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
- .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
- { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
- .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
- { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
- .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
-};
-
-/*
- * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
- * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
- * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
- * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
- * the main surface.
- */
-static const struct drm_format_info gen12_ccs_formats[] = {
- { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
- .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
- .hsub = 1, .vsub = 1, },
- { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
- .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
- .hsub = 1, .vsub = 1, },
- { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
- .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
- .hsub = 1, .vsub = 1, .has_alpha = true },
- { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
- .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
- .hsub = 1, .vsub = 1, .has_alpha = true },
- { .format = DRM_FORMAT_YUYV, .num_planes = 2,
- .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
- .hsub = 2, .vsub = 1, .is_yuv = true },
- { .format = DRM_FORMAT_YVYU, .num_planes = 2,
- .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
- .hsub = 2, .vsub = 1, .is_yuv = true },
- { .format = DRM_FORMAT_UYVY, .num_planes = 2,
- .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
- .hsub = 2, .vsub = 1, .is_yuv = true },
- { .format = DRM_FORMAT_VYUY, .num_planes = 2,
- .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
- .hsub = 2, .vsub = 1, .is_yuv = true },
- { .format = DRM_FORMAT_XYUV8888, .num_planes = 2,
- .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
- .hsub = 1, .vsub = 1, .is_yuv = true },
- { .format = DRM_FORMAT_NV12, .num_planes = 4,
- .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
- .hsub = 2, .vsub = 2, .is_yuv = true },
- { .format = DRM_FORMAT_P010, .num_planes = 4,
- .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
- .hsub = 2, .vsub = 2, .is_yuv = true },
- { .format = DRM_FORMAT_P012, .num_planes = 4,
- .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
- .hsub = 2, .vsub = 2, .is_yuv = true },
- { .format = DRM_FORMAT_P016, .num_planes = 4,
- .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
- .hsub = 2, .vsub = 2, .is_yuv = true },
-};
-
-/*
- * Same as gen12_ccs_formats[] above, but with additional surface used
- * to pass Clear Color information in plane 2 with 64 bits of data.
- */
-static const struct drm_format_info gen12_ccs_cc_formats[] = {
- { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
- .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
- .hsub = 1, .vsub = 1, },
- { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
- .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
- .hsub = 1, .vsub = 1, },
- { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
- .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
- .hsub = 1, .vsub = 1, .has_alpha = true },
- { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
- .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
- .hsub = 1, .vsub = 1, .has_alpha = true },
-};
-
-static const struct drm_format_info *
-lookup_format_info(const struct drm_format_info formats[],
- int num_formats, u32 format)
-{
- int i;
-
- for (i = 0; i < num_formats; i++) {
- if (formats[i].format == format)
- return &formats[i];
- }
-
- return NULL;
-}
-
-static const struct drm_format_info *
-intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
-{
- switch (cmd->modifier[0]) {
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- return lookup_format_info(skl_ccs_formats,
- ARRAY_SIZE(skl_ccs_formats),
- cmd->pixel_format);
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- return lookup_format_info(gen12_ccs_formats,
- ARRAY_SIZE(gen12_ccs_formats),
- cmd->pixel_format);
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
- return lookup_format_info(gen12_ccs_cc_formats,
- ARRAY_SIZE(gen12_ccs_cc_formats),
- cmd->pixel_format);
- default:
- return NULL;
- }
-}
-
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier)
{
@@ -1048,7 +699,7 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
* the highest stride limits of them all,
* if in case pipe A is disabled, use the first pipe from pipe_mask.
*/
- crtc = intel_get_first_crtc(dev_priv);
+ crtc = intel_first_crtc(dev_priv);
if (!crtc)
return 0;
@@ -1126,7 +777,7 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
*/
if (HAS_GMCH(dev_priv) &&
intel_set_memory_cxsr(dev_priv, false))
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
@@ -1135,8 +786,8 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
- intel_disable_plane(plane, crtc_state);
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ intel_plane_disable_arm(plane, crtc_state);
+ intel_crtc_wait_for_next_vblank(crtc);
}
unsigned int
@@ -1192,7 +843,7 @@ __intel_display_resume(struct drm_device *dev,
static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
{
return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
- intel_has_gpu_reset(&dev_priv->gt));
+ intel_has_gpu_reset(to_gt(dev_priv)));
}
void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
@@ -1211,14 +862,14 @@ void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
return;
/* We have a modeset vs reset deadlock, defensively unbreak it. */
- set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
+ set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
smp_mb__after_atomic();
- wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
+ wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET);
if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
drm_dbg_kms(&dev_priv->drm,
"Modeset potentially stuck, unbreaking through wedging\n");
- intel_gt_set_wedged(&dev_priv->gt);
+ intel_gt_set_wedged(to_gt(dev_priv));
}
/*
@@ -1269,7 +920,7 @@ void intel_display_finish_reset(struct drm_i915_private *dev_priv)
return;
/* reset doesn't touch the display */
- if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
+ if (!test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
return;
state = fetch_and_zero(&dev_priv->modeset_restore_state);
@@ -1307,27 +958,7 @@ unlock:
drm_modeset_acquire_fini(ctx);
mutex_unlock(&dev->mode_config.mutex);
- clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
-}
-
-static bool underrun_recovery_supported(const struct intel_crtc_state *crtc_state)
-{
- if (crtc_state->pch_pfit.enabled &&
- (crtc_state->pipe_src_w > drm_rect_width(&crtc_state->pch_pfit.dst) ||
- crtc_state->pipe_src_h > drm_rect_height(&crtc_state->pch_pfit.dst) ||
- crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420))
- return false;
-
- if (crtc_state->dsc.compression_enable)
- return false;
-
- if (crtc_state->has_psr2)
- return false;
-
- if (crtc_state->splitter.enable)
- return false;
-
- return true;
+ clear_bit_unlock(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
}
static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
@@ -1353,19 +984,18 @@ static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
*/
tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
- if (IS_DG2(dev_priv)) {
- /*
- * Underrun recovery must always be disabled on DG2. However
- * the chicken bit meaning is inverted compared to other
- * platforms.
- */
+ /*
+ * Underrun recovery must always be disabled on display 13+.
+ * DG2 chicken bit meaning is inverted compared to other platforms.
+ */
+ if (IS_DG2(dev_priv))
tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
- } else if (DISPLAY_VER(dev_priv) >= 13) {
- if (underrun_recovery_supported(crtc_state))
- tmp &= ~UNDERRUN_RECOVERY_DISABLE_ADLP;
- else
- tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
- }
+ else if (DISPLAY_VER(dev_priv) >= 13)
+ tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
+
+ /* Wa_14010547955:dg2 */
+ if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER))
+ tmp |= DG2_RENDER_CCSTAG_4_3_EN;
intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
}
@@ -1387,7 +1017,7 @@ bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
if (cleanup_done)
continue;
- drm_crtc_wait_one_vblank(crtc);
+ intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc));
return true;
}
@@ -1395,158 +1025,6 @@ bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
return false;
}
-void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
-{
- u32 temp;
-
- intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
-
- mutex_lock(&dev_priv->sb_lock);
-
- temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
- temp |= SBI_SSCCTL_DISABLE;
- intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
-
- mutex_unlock(&dev_priv->sb_lock);
-}
-
-/* Program iCLKIP clock to the desired frequency */
-static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int clock = crtc_state->hw.adjusted_mode.crtc_clock;
- u32 divsel, phaseinc, auxdiv, phasedir = 0;
- u32 temp;
-
- lpt_disable_iclkip(dev_priv);
-
- /* The iCLK virtual clock root frequency is in MHz,
- * but the adjusted_mode->crtc_clock in in KHz. To get the
- * divisors, it is necessary to divide one by another, so we
- * convert the virtual clock precision to KHz here for higher
- * precision.
- */
- for (auxdiv = 0; auxdiv < 2; auxdiv++) {
- u32 iclk_virtual_root_freq = 172800 * 1000;
- u32 iclk_pi_range = 64;
- u32 desired_divisor;
-
- desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
- clock << auxdiv);
- divsel = (desired_divisor / iclk_pi_range) - 2;
- phaseinc = desired_divisor % iclk_pi_range;
-
- /*
- * Near 20MHz is a corner case which is
- * out of range for the 7-bit divisor
- */
- if (divsel <= 0x7f)
- break;
- }
-
- /* This should not happen with any sane values */
- drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
- ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
- drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
- ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
-
- drm_dbg_kms(&dev_priv->drm,
- "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
- clock, auxdiv, divsel, phasedir, phaseinc);
-
- mutex_lock(&dev_priv->sb_lock);
-
- /* Program SSCDIVINTPHASE6 */
- temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
- temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
- temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
- temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
- temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
- temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
- temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
- intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
-
- /* Program SSCAUXDIV */
- temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
- temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
- temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
- intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
-
- /* Enable modulator and associated divider */
- temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
- temp &= ~SBI_SSCCTL_DISABLE;
- intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
-
- mutex_unlock(&dev_priv->sb_lock);
-
- /* Wait for initialization time */
- udelay(24);
-
- intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
-}
-
-int lpt_get_iclkip(struct drm_i915_private *dev_priv)
-{
- u32 divsel, phaseinc, auxdiv;
- u32 iclk_virtual_root_freq = 172800 * 1000;
- u32 iclk_pi_range = 64;
- u32 desired_divisor;
- u32 temp;
-
- if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
- return 0;
-
- mutex_lock(&dev_priv->sb_lock);
-
- temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
- if (temp & SBI_SSCCTL_DISABLE) {
- mutex_unlock(&dev_priv->sb_lock);
- return 0;
- }
-
- temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
- divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
- SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
- phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
- SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
-
- temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
- auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
- SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
-
- mutex_unlock(&dev_priv->sb_lock);
-
- desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
-
- return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
- desired_divisor << auxdiv);
-}
-
-static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
- enum pipe pch_transcoder)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-
- intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
- intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
- intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
- intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
-
- intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
- intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
- intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
- intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
- intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
-}
-
/*
* Finds the encoder associated with the given CRTC. This can only be
* used when we know that the CRTC isn't feeding multiple encoders!
@@ -1555,15 +1033,17 @@ struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_connector_state *connector_state;
const struct drm_connector *connector;
struct intel_encoder *encoder = NULL;
+ struct intel_crtc *master_crtc;
int num_encoders = 0;
int i;
+ master_crtc = intel_master_crtc(crtc_state);
+
for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
- if (connector_state->crtc != &crtc->base)
+ if (connector_state->crtc != &master_crtc->base)
continue;
encoder = to_intel_encoder(connector_state->best_encoder);
@@ -1572,111 +1052,11 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
drm_WARN(encoder->base.dev, num_encoders != 1,
"%d encoders for pipe %c\n",
- num_encoders, pipe_name(crtc->pipe));
+ num_encoders, pipe_name(master_crtc->pipe));
return encoder;
}
-/*
- * Enable PCH resources required for PCH ports:
- * - PCH PLLs
- * - FDI training & RX/TX
- * - update transcoder timings
- * - DP transcoding bits
- * - transcoder
- */
-static void ilk_pch_enable(const struct intel_atomic_state *state,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- u32 temp;
-
- assert_pch_transcoder_disabled(dev_priv, pipe);
-
- /* For PCH output, training FDI link */
- intel_fdi_link_train(crtc, crtc_state);
-
- /* We need to program the right clock selection before writing the pixel
- * mutliplier into the DPLL. */
- if (HAS_PCH_CPT(dev_priv)) {
- u32 sel;
-
- temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
- temp |= TRANS_DPLL_ENABLE(pipe);
- sel = TRANS_DPLLB_SEL(pipe);
- if (crtc_state->shared_dpll ==
- intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
- temp |= sel;
- else
- temp &= ~sel;
- intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
- }
-
- /* XXX: pch pll's can be enabled any time before we enable the PCH
- * transcoder, and we actually should do this to not upset any PCH
- * transcoder that already use the clock when we share it.
- *
- * Note that enable_shared_dpll tries to do the right thing, but
- * get_shared_dpll unconditionally resets the pll - we need that to have
- * the right LVDS enable sequence. */
- intel_enable_shared_dpll(crtc_state);
-
- /* set transcoder timing, panel must allow it */
- assert_pps_unlocked(dev_priv, pipe);
- ilk_pch_transcoder_set_timings(crtc_state, pipe);
-
- intel_fdi_normal_train(crtc);
-
- /* For PCH DP, enable TRANS_DP_CTL */
- if (HAS_PCH_CPT(dev_priv) &&
- intel_crtc_has_dp_encoder(crtc_state)) {
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
- u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
- i915_reg_t reg = TRANS_DP_CTL(pipe);
- enum port port;
-
- temp = intel_de_read(dev_priv, reg);
- temp &= ~(TRANS_DP_PORT_SEL_MASK |
- TRANS_DP_SYNC_MASK |
- TRANS_DP_BPC_MASK);
- temp |= TRANS_DP_OUTPUT_ENABLE;
- temp |= bpc << 9; /* same format but at 11:9 */
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
- temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
- if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
- temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
-
- port = intel_get_crtc_new_encoder(state, crtc_state)->port;
- drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
- temp |= TRANS_DP_PORT_SEL(port);
-
- intel_de_write(dev_priv, reg, temp);
- }
-
- ilk_enable_pch_transcoder(crtc_state);
-}
-
-void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-
- assert_pch_transcoder_disabled(dev_priv, PIPE_A);
-
- lpt_program_iclkip(crtc_state);
-
- /* Set transcoder timing. */
- ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
-
- lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
-}
-
static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -1784,7 +1164,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
}
/* We need to wait for a vblank before we can disable the plane. */
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
}
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
@@ -1919,7 +1299,7 @@ static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- return crtc_state->uapi.async_flip && intel_vtd_active() &&
+ return crtc_state->uapi.async_flip && intel_vtd_active(i915) &&
(DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
}
@@ -2015,7 +1395,6 @@ static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
@@ -2041,7 +1420,7 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
}
if (need_vbl_wait)
- intel_wait_for_vblank(i915, crtc->pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
}
static void intel_pre_plane_update(struct intel_atomic_state *state,
@@ -2054,11 +1433,13 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
+ intel_psr_pre_plane_update(state, crtc);
+
if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
hsw_disable_ips(old_crtc_state);
if (intel_fbc_pre_update(state, crtc))
- intel_wait_for_vblank(dev_priv, pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
if (!needs_async_flip_vtd_wa(old_crtc_state) &&
needs_async_flip_vtd_wa(new_crtc_state))
@@ -2090,7 +1471,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
*/
if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
- intel_wait_for_vblank(dev_priv, pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
/*
* IVB workaround: must disable low power watermarks for at least
@@ -2101,7 +1482,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
*/
if (old_crtc_state->hw.active &&
new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
- intel_wait_for_vblank(dev_priv, pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
/*
* If we're doing a modeset we don't need to do any
@@ -2165,7 +1546,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
!(update_mask & BIT(plane->id)))
continue;
- intel_disable_plane(plane, new_crtc_state);
+ intel_plane_disable_arm(plane, new_crtc_state);
if (old_plane_state->uapi.visible)
fb_bits |= plane->frontbuffer_bit;
@@ -2199,10 +1580,30 @@ intel_connector_primary_encoder(struct intel_connector *connector)
static void intel_encoders_update_prepare(struct intel_atomic_state *state)
{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ struct intel_crtc *crtc;
struct drm_connector_state *new_conn_state;
struct drm_connector *connector;
int i;
+ /*
+ * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
+ * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
+ */
+ if (i915->dpll.mgr) {
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
+ new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
+ }
+ }
+
+ if (!state->modeset)
+ return;
+
for_each_new_connector_in_state(&state->base, connector, new_conn_state,
i) {
struct intel_connector *intel_connector;
@@ -2229,6 +1630,9 @@ static void intel_encoders_update_complete(struct intel_atomic_state *state)
struct drm_connector *connector;
int i;
+ if (!state->modeset)
+ return;
+
for_each_new_connector_in_state(&state->base, connector, new_conn_state,
i) {
struct intel_connector *intel_connector;
@@ -2316,28 +1720,6 @@ static void intel_encoders_enable(struct intel_atomic_state *state,
}
}
-static void intel_encoders_pre_disable(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- const struct drm_connector_state *old_conn_state;
- struct drm_connector *conn;
- int i;
-
- for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
- struct intel_encoder *encoder =
- to_intel_encoder(old_conn_state->best_encoder);
-
- if (old_conn_state->crtc != &crtc->base)
- continue;
-
- if (encoder->pre_disable)
- encoder->pre_disable(state, encoder, old_crtc_state,
- old_conn_state);
- }
-}
-
static void intel_encoders_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -2432,7 +1814,7 @@ static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_stat
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
- plane->disable_plane(plane, crtc_state);
+ plane->disable_arm(plane, crtc_state);
}
static void ilk_crtc_enable(struct intel_atomic_state *state,
@@ -2500,7 +1882,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
intel_enable_transcoder(new_crtc_state);
if (new_crtc_state->has_pch_encoder)
- ilk_pch_enable(state, new_crtc_state);
+ ilk_pch_enable(state, crtc);
intel_crtc_vblank_on(new_crtc_state);
@@ -2516,8 +1898,8 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
* in case there are more corner cases we don't know about.
*/
if (new_crtc_state->has_pch_encoder) {
- intel_wait_for_vblank(dev_priv, pipe);
- intel_wait_for_vblank(dev_priv, pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
+ intel_crtc_wait_for_next_vblank(crtc);
}
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
@@ -2592,42 +1974,39 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(master->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *master_crtc_state;
+ struct intel_crtc *master_crtc;
struct drm_connector_state *conn_state;
struct drm_connector *conn;
struct intel_encoder *encoder = NULL;
int i;
- if (crtc_state->bigjoiner_slave)
- master = crtc_state->bigjoiner_linked_crtc;
-
- master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
+ master_crtc = intel_master_crtc(crtc_state);
+ master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
- if (conn_state->crtc != &master->base)
+ if (conn_state->crtc != &master_crtc->base)
continue;
encoder = to_intel_encoder(conn_state->best_encoder);
break;
}
- if (!crtc_state->bigjoiner_slave) {
- /* need to enable VDSC, which we skipped in pre-enable */
- intel_dsc_enable(encoder, crtc_state);
- } else {
- /*
- * Enable sequence steps 1-7 on bigjoiner master
- */
- intel_encoders_pre_pll_enable(state, master);
- if (master_crtc_state->shared_dpll)
- intel_enable_shared_dpll(master_crtc_state);
- intel_encoders_pre_enable(state, master);
+ /*
+ * Enable sequence steps 1-7 on bigjoiner master
+ */
+ if (crtc_state->bigjoiner_slave)
+ intel_encoders_pre_pll_enable(state, master_crtc);
- /* and DSC on slave */
- intel_dsc_enable(NULL, crtc_state);
- }
+ if (crtc_state->shared_dpll)
+ intel_enable_shared_dpll(crtc_state);
+
+ if (crtc_state->bigjoiner_slave)
+ intel_encoders_pre_enable(state, master_crtc);
+
+ /* need to enable VDSC, which we skipped in pre-enable */
+ intel_dsc_enable(crtc_state);
if (DISPLAY_VER(dev_priv) >= 13)
intel_uncompressed_joiner_enable(crtc_state);
@@ -2720,7 +2099,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
intel_encoders_enable(state, crtc);
if (psl_clkgate_wa) {
- intel_wait_for_vblank(dev_priv, pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
}
@@ -2728,8 +2107,12 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
* to change the workaround. */
hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
- intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
- intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
+ struct intel_crtc *wa_crtc;
+
+ wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
+
+ intel_crtc_wait_for_next_vblank(wa_crtc);
+ intel_crtc_wait_for_next_vblank(wa_crtc);
}
}
@@ -2774,33 +2157,12 @@ static void ilk_crtc_disable(struct intel_atomic_state *state,
ilk_pfit_disable(old_crtc_state);
if (old_crtc_state->has_pch_encoder)
- ilk_fdi_disable(crtc);
+ ilk_pch_disable(state, crtc);
intel_encoders_post_disable(state, crtc);
- if (old_crtc_state->has_pch_encoder) {
- ilk_disable_pch_transcoder(dev_priv, pipe);
-
- if (HAS_PCH_CPT(dev_priv)) {
- i915_reg_t reg;
- u32 temp;
-
- /* disable TRANS_DP_CTL */
- reg = TRANS_DP_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~(TRANS_DP_OUTPUT_ENABLE |
- TRANS_DP_PORT_SEL_MASK);
- temp |= TRANS_DP_PORT_SEL_NONE;
- intel_de_write(dev_priv, reg, temp);
-
- /* disable DPLL_SEL */
- temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
- temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
- intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
- }
-
- ilk_fdi_pll_disable(crtc);
- }
+ if (old_crtc_state->has_pch_encoder)
+ ilk_pch_post_disable(state, crtc);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
@@ -2809,12 +2171,17 @@ static void ilk_crtc_disable(struct intel_atomic_state *state,
static void hsw_crtc_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
/*
* FIXME collapse everything to one hook.
* Need care with mst->ddi interactions.
*/
- intel_encoders_disable(state, crtc);
- intel_encoders_post_disable(state, crtc);
+ if (!old_crtc_state->bigjoiner_slave) {
+ intel_encoders_disable(state, crtc);
+ intel_encoders_post_disable(state, crtc);
+ }
}
static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
@@ -3171,7 +2538,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
/* prevents spurious underruns */
if (DISPLAY_VER(dev_priv) == 2)
- intel_wait_for_vblank(dev_priv, pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
}
static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
@@ -3202,7 +2569,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
* wait for planes to fully turn off before disabling the pipe.
*/
if (DISPLAY_VER(dev_priv) == 2)
- intel_wait_for_vblank(dev_priv, pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
intel_encoders_disable(state, crtc);
@@ -4306,414 +3673,6 @@ out:
return ret;
}
-static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
-{
- struct intel_encoder *encoder;
- int i;
- u32 val, final;
- bool has_lvds = false;
- bool has_cpu_edp = false;
- bool has_panel = false;
- bool has_ck505 = false;
- bool can_ssc = false;
- bool using_ssc_source = false;
-
- /* We need to take the global config into account */
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- switch (encoder->type) {
- case INTEL_OUTPUT_LVDS:
- has_panel = true;
- has_lvds = true;
- break;
- case INTEL_OUTPUT_EDP:
- has_panel = true;
- if (encoder->port == PORT_A)
- has_cpu_edp = true;
- break;
- default:
- break;
- }
- }
-
- if (HAS_PCH_IBX(dev_priv)) {
- has_ck505 = dev_priv->vbt.display_clock_mode;
- can_ssc = has_ck505;
- } else {
- has_ck505 = false;
- can_ssc = true;
- }
-
- /* Check if any DPLLs are using the SSC source */
- for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
- u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
-
- if (!(temp & DPLL_VCO_ENABLE))
- continue;
-
- if ((temp & PLL_REF_INPUT_MASK) ==
- PLLB_REF_INPUT_SPREADSPECTRUMIN) {
- using_ssc_source = true;
- break;
- }
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
- has_panel, has_lvds, has_ck505, using_ssc_source);
-
- /* Ironlake: try to setup display ref clock before DPLL
- * enabling. This is only under driver's control after
- * PCH B stepping, previous chipset stepping should be
- * ignoring this setting.
- */
- val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
-
- /* As we must carefully and slowly disable/enable each source in turn,
- * compute the final state we want first and check if we need to
- * make any changes at all.
- */
- final = val;
- final &= ~DREF_NONSPREAD_SOURCE_MASK;
- if (has_ck505)
- final |= DREF_NONSPREAD_CK505_ENABLE;
- else
- final |= DREF_NONSPREAD_SOURCE_ENABLE;
-
- final &= ~DREF_SSC_SOURCE_MASK;
- final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
- final &= ~DREF_SSC1_ENABLE;
-
- if (has_panel) {
- final |= DREF_SSC_SOURCE_ENABLE;
-
- if (intel_panel_use_ssc(dev_priv) && can_ssc)
- final |= DREF_SSC1_ENABLE;
-
- if (has_cpu_edp) {
- if (intel_panel_use_ssc(dev_priv) && can_ssc)
- final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
- else
- final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
- } else
- final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
- } else if (using_ssc_source) {
- final |= DREF_SSC_SOURCE_ENABLE;
- final |= DREF_SSC1_ENABLE;
- }
-
- if (final == val)
- return;
-
- /* Always enable nonspread source */
- val &= ~DREF_NONSPREAD_SOURCE_MASK;
-
- if (has_ck505)
- val |= DREF_NONSPREAD_CK505_ENABLE;
- else
- val |= DREF_NONSPREAD_SOURCE_ENABLE;
-
- if (has_panel) {
- val &= ~DREF_SSC_SOURCE_MASK;
- val |= DREF_SSC_SOURCE_ENABLE;
-
- /* SSC must be turned on before enabling the CPU output */
- if (intel_panel_use_ssc(dev_priv) && can_ssc) {
- drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
- val |= DREF_SSC1_ENABLE;
- } else
- val &= ~DREF_SSC1_ENABLE;
-
- /* Get SSC going before enabling the outputs */
- intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
- intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
- udelay(200);
-
- val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
-
- /* Enable CPU source on CPU attached eDP */
- if (has_cpu_edp) {
- if (intel_panel_use_ssc(dev_priv) && can_ssc) {
- drm_dbg_kms(&dev_priv->drm,
- "Using SSC on eDP\n");
- val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
- } else
- val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
- } else
- val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
-
- intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
- intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
- udelay(200);
- } else {
- drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
-
- val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
-
- /* Turn off CPU output */
- val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
-
- intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
- intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
- udelay(200);
-
- if (!using_ssc_source) {
- drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
-
- /* Turn off the SSC source */
- val &= ~DREF_SSC_SOURCE_MASK;
- val |= DREF_SSC_SOURCE_DISABLE;
-
- /* Turn off SSC1 */
- val &= ~DREF_SSC1_ENABLE;
-
- intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
- intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
- udelay(200);
- }
- }
-
- BUG_ON(val != final);
-}
-
-/* Implements 3 different sequences from BSpec chapter "Display iCLK
- * Programming" based on the parameters passed:
- * - Sequence to enable CLKOUT_DP
- * - Sequence to enable CLKOUT_DP without spread
- * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
- */
-static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
- bool with_spread, bool with_fdi)
-{
- u32 reg, tmp;
-
- if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
- "FDI requires downspread\n"))
- with_spread = true;
- if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
- with_fdi, "LP PCH doesn't have FDI\n"))
- with_fdi = false;
-
- mutex_lock(&dev_priv->sb_lock);
-
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
- tmp &= ~SBI_SSCCTL_DISABLE;
- tmp |= SBI_SSCCTL_PATHALT;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
-
- udelay(24);
-
- if (with_spread) {
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
- tmp &= ~SBI_SSCCTL_PATHALT;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
-
- if (with_fdi)
- lpt_fdi_program_mphy(dev_priv);
- }
-
- reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
- tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
- tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
- intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
-
- mutex_unlock(&dev_priv->sb_lock);
-}
-
-/* Sequence to disable CLKOUT_DP */
-void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
-{
- u32 reg, tmp;
-
- mutex_lock(&dev_priv->sb_lock);
-
- reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
- tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
- tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
- intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
-
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
- if (!(tmp & SBI_SSCCTL_DISABLE)) {
- if (!(tmp & SBI_SSCCTL_PATHALT)) {
- tmp |= SBI_SSCCTL_PATHALT;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
- udelay(32);
- }
- tmp |= SBI_SSCCTL_DISABLE;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
- }
-
- mutex_unlock(&dev_priv->sb_lock);
-}
-
-#define BEND_IDX(steps) ((50 + (steps)) / 5)
-
-static const u16 sscdivintphase[] = {
- [BEND_IDX( 50)] = 0x3B23,
- [BEND_IDX( 45)] = 0x3B23,
- [BEND_IDX( 40)] = 0x3C23,
- [BEND_IDX( 35)] = 0x3C23,
- [BEND_IDX( 30)] = 0x3D23,
- [BEND_IDX( 25)] = 0x3D23,
- [BEND_IDX( 20)] = 0x3E23,
- [BEND_IDX( 15)] = 0x3E23,
- [BEND_IDX( 10)] = 0x3F23,
- [BEND_IDX( 5)] = 0x3F23,
- [BEND_IDX( 0)] = 0x0025,
- [BEND_IDX( -5)] = 0x0025,
- [BEND_IDX(-10)] = 0x0125,
- [BEND_IDX(-15)] = 0x0125,
- [BEND_IDX(-20)] = 0x0225,
- [BEND_IDX(-25)] = 0x0225,
- [BEND_IDX(-30)] = 0x0325,
- [BEND_IDX(-35)] = 0x0325,
- [BEND_IDX(-40)] = 0x0425,
- [BEND_IDX(-45)] = 0x0425,
- [BEND_IDX(-50)] = 0x0525,
-};
-
-/*
- * Bend CLKOUT_DP
- * steps -50 to 50 inclusive, in steps of 5
- * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
- * change in clock period = -(steps / 10) * 5.787 ps
- */
-static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
-{
- u32 tmp;
- int idx = BEND_IDX(steps);
-
- if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
- return;
-
- if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
- return;
-
- mutex_lock(&dev_priv->sb_lock);
-
- if (steps % 10 != 0)
- tmp = 0xAAAAAAAB;
- else
- tmp = 0x00000000;
- intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
-
- tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
- tmp &= 0xffff0000;
- tmp |= sscdivintphase[idx];
- intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
-
- mutex_unlock(&dev_priv->sb_lock);
-}
-
-#undef BEND_IDX
-
-static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
-{
- u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
- u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
-
- if ((ctl & SPLL_PLL_ENABLE) == 0)
- return false;
-
- if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
- (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
- return true;
-
- if (IS_BROADWELL(dev_priv) &&
- (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
- return true;
-
- return false;
-}
-
-static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
- enum intel_dpll_id id)
-{
- u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
- u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
-
- if ((ctl & WRPLL_PLL_ENABLE) == 0)
- return false;
-
- if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
- return true;
-
- if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
- (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
- (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
- return true;
-
- return false;
-}
-
-static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
-{
- struct intel_encoder *encoder;
- bool has_fdi = false;
-
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- switch (encoder->type) {
- case INTEL_OUTPUT_ANALOG:
- has_fdi = true;
- break;
- default:
- break;
- }
- }
-
- /*
- * The BIOS may have decided to use the PCH SSC
- * reference so we must not disable it until the
- * relevant PLLs have stopped relying on it. We'll
- * just leave the PCH SSC reference enabled in case
- * any active PLL is using it. It will get disabled
- * after runtime suspend if we don't have FDI.
- *
- * TODO: Move the whole reference clock handling
- * to the modeset sequence proper so that we can
- * actually enable/disable/reconfigure these things
- * safely. To do that we need to introduce a real
- * clock hierarchy. That would also allow us to do
- * clock bending finally.
- */
- dev_priv->pch_ssc_use = 0;
-
- if (spll_uses_pch_ssc(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
- dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
- }
-
- if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
- drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
- dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
- }
-
- if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
- drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
- dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
- }
-
- if (dev_priv->pch_ssc_use)
- return;
-
- if (has_fdi) {
- lpt_bend_clkout_dp(dev_priv, 0);
- lpt_enable_clkout_dp(dev_priv, true, true);
- } else {
- lpt_disable_clkout_dp(dev_priv);
- }
-}
-
-/*
- * Initialize reference clocks when the driver loads
- */
-void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
-{
- if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
- ilk_init_pch_refclk(dev_priv);
- else if (HAS_PCH_LPT(dev_priv))
- lpt_init_pch_refclk(dev_priv);
-}
-
static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -4978,8 +3937,8 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
&pipe_config->dp_m2_n2);
}
-static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
&pipe_config->fdi_m_n, NULL);
@@ -5116,50 +4075,9 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
i9xx_get_pipe_color_config(pipe_config);
intel_color_get_config(pipe_config);
- if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
- struct intel_shared_dpll *pll;
- enum intel_dpll_id pll_id;
- bool pll_active;
-
- pipe_config->has_pch_encoder = true;
-
- tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
- pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
- FDI_DP_PORT_WIDTH_SHIFT) + 1;
-
- ilk_get_fdi_m_n_config(crtc, pipe_config);
-
- if (HAS_PCH_IBX(dev_priv)) {
- /*
- * The pipe->pch transcoder and pch transcoder->pll
- * mapping is fixed.
- */
- pll_id = (enum intel_dpll_id) crtc->pipe;
- } else {
- tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
- if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
- pll_id = DPLL_ID_PCH_PLL_B;
- else
- pll_id= DPLL_ID_PCH_PLL_A;
- }
-
- pipe_config->shared_dpll =
- intel_get_shared_dpll_by_id(dev_priv, pll_id);
- pll = pipe_config->shared_dpll;
-
- pll_active = intel_dpll_get_hw_state(dev_priv, pll,
- &pipe_config->dpll_hw_state);
- drm_WARN_ON(dev, !pll_active);
-
- tmp = pipe_config->dpll_hw_state.dpll;
- pipe_config->pixel_multiplier =
- ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
- >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
+ pipe_config->pixel_multiplier = 1;
- ilk_pch_clock_get(crtc, pipe_config);
- } else {
- pipe_config->pixel_multiplier = 1;
- }
+ ilk_pch_get_config(pipe_config);
intel_get_transcoder_timings(crtc, pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
@@ -5174,6 +4092,16 @@ out:
return ret;
}
+static u8 bigjoiner_pipes(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 12)
+ return BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
+ else if (DISPLAY_VER(i915) >= 11)
+ return BIT(PIPE_B) | BIT(PIPE_C);
+ else
+ return 0;
+}
+
static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
{
@@ -5189,6 +4117,54 @@ static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
return tmp & TRANS_DDI_FUNC_ENABLE;
}
+static u8 enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv)
+{
+ u8 master_pipes = 0, slave_pipes = 0;
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = crtc->pipe;
+ intel_wakeref_t wakeref;
+
+ if ((bigjoiner_pipes(dev_priv) & BIT(pipe)) == 0)
+ continue;
+
+ power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
+ with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
+ u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
+
+ if (!(tmp & BIG_JOINER_ENABLE))
+ continue;
+
+ if (tmp & MASTER_BIG_JOINER_ENABLE)
+ master_pipes |= BIT(pipe);
+ else
+ slave_pipes |= BIT(pipe);
+ }
+
+ if (DISPLAY_VER(dev_priv) < 13)
+ continue;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
+ u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
+
+ if (tmp & UNCOMPRESSED_JOINER_MASTER)
+ master_pipes |= BIT(pipe);
+ if (tmp & UNCOMPRESSED_JOINER_SLAVE)
+ slave_pipes |= BIT(pipe);
+ }
+ }
+
+ /* Bigjoiner pipes should always be consecutive master and slave */
+ drm_WARN(&dev_priv->drm, slave_pipes != master_pipes << 1,
+ "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n",
+ master_pipes, slave_pipes);
+
+ return slave_pipes;
+}
+
static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
{
u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
@@ -5250,10 +4226,18 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
enabled_transcoders |= BIT(cpu_transcoder);
}
+ /* single pipe or bigjoiner master */
cpu_transcoder = (enum transcoder) crtc->pipe;
if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
enabled_transcoders |= BIT(cpu_transcoder);
+ /* bigjoiner slave -> consider the master pipe's transcoder as well */
+ if (enabled_bigjoiner_pipes(dev_priv) & BIT(crtc->pipe)) {
+ cpu_transcoder = (enum transcoder) crtc->pipe - 1;
+ if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
+ enabled_transcoders |= BIT(cpu_transcoder);
+ }
+
return enabled_transcoders;
}
@@ -5374,45 +4358,6 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
return transcoder_is_dsi(pipe_config->cpu_transcoder);
}
-static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
- enum port port;
- u32 tmp;
-
- if (transcoder_is_dsi(cpu_transcoder)) {
- port = (cpu_transcoder == TRANSCODER_DSI_A) ?
- PORT_A : PORT_B;
- } else {
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(cpu_transcoder));
- if (!(tmp & TRANS_DDI_FUNC_ENABLE))
- return;
- if (DISPLAY_VER(dev_priv) >= 12)
- port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
- else
- port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
- }
-
- /*
- * Haswell has only FDI/PCH transcoder A. It is which is connected to
- * DDI E. So just check whether this pipe is wired to DDI E and whether
- * the PCH transcoder is on.
- */
- if (DISPLAY_VER(dev_priv) < 9 &&
- (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
- pipe_config->has_pch_encoder = true;
-
- tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
- pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
- FDI_DP_PORT_WIDTH_SHIFT) + 1;
-
- ilk_get_fdi_m_n_config(crtc, pipe_config);
- }
-}
-
static bool hsw_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -5439,21 +4384,12 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable)
intel_uncompressed_joiner_get_config(pipe_config);
- if (!active) {
- /* bigjoiner slave doesn't enable transcoder */
- if (!pipe_config->bigjoiner_slave)
- goto out;
-
- active = true;
- pipe_config->pixel_multiplier = 1;
+ if (!active)
+ goto out;
- /* we cannot read out most state, so don't bother.. */
- pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
- } else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
- DISPLAY_VER(dev_priv) >= 11) {
- hsw_get_ddi_port_state(crtc, pipe_config);
+ if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
+ DISPLAY_VER(dev_priv) >= 11)
intel_get_transcoder_timings(crtc, pipe_config);
- }
if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
intel_vrr_get_config(crtc, pipe_config);
@@ -5521,10 +4457,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
}
}
- if (pipe_config->bigjoiner_slave) {
- /* Cannot be read out as a slave, set to 0. */
- pipe_config->pixel_multiplier = 0;
- } else if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
+ if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
pipe_config->pixel_multiplier =
intel_de_read(dev_priv,
@@ -5721,7 +4654,8 @@ found:
drm_atomic_state_put(state);
/* let the connector get through one full cycle before testing */
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
+
return true;
fail:
@@ -5782,8 +4716,8 @@ static int i9xx_pll_refclk(struct drm_device *dev,
}
/* Returns the clock of the currently programmed mode of the given pipe. */
-static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -5893,24 +4827,6 @@ int intel_dotclock_calculate(int link_freq,
return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
}
-static void ilk_pch_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- /* read out port_clock from the DPLL */
- i9xx_crtc_clock_get(crtc, pipe_config);
-
- /*
- * In case there is an active pipe without active ports,
- * we may need some idea for the dotclock anyway.
- * Calculate one based on the FDI configuration.
- */
- pipe_config->hw.adjusted_mode.crtc_clock =
- intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
- &pipe_config->fdi_m_n);
-}
-
/* Returns the currently programmed mode of the given encoder. */
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder)
@@ -5924,7 +4840,7 @@ intel_encoder_current_mode(struct intel_encoder *encoder)
if (!encoder->get_hw_state(encoder, &pipe))
return NULL;
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ crtc = intel_crtc_for_pipe(dev_priv, pipe);
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
@@ -6245,6 +5161,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
linked_state->color_ctl = plane_state->color_ctl;
linked_state->view = plane_state->view;
+ linked_state->decrypt = plane_state->decrypt;
intel_plane_copy_hw_state(linked_state, plane_state);
linked_state->uapi.src = plane_state->uapi.src;
@@ -6252,13 +5169,13 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
if (icl_is_hdr_plane(dev_priv, plane->id)) {
if (linked->id == PLANE_SPRITE5)
- plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
+ plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
else if (linked->id == PLANE_SPRITE4)
- plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
+ plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
else if (linked->id == PLANE_SPRITE3)
- plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
+ plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
else if (linked->id == PLANE_SPRITE2)
- plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
+ plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
else
MISSING_CASE(linked->id);
}
@@ -6371,8 +5288,6 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
crtc_state->update_wm_post = true;
if (mode_changed && crtc_state->hw.enable &&
- dev_priv->dpll_funcs &&
- !crtc_state->bigjoiner_slave &&
!drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
ret = dev_priv->dpll_funcs->crtc_compute_clock(crtc_state);
if (ret)
@@ -6928,18 +5843,15 @@ static void
intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state)
{
- const struct intel_crtc_state *from_crtc_state = crtc_state;
-
- if (crtc_state->bigjoiner_slave) {
- from_crtc_state = intel_atomic_get_new_crtc_state(state,
- crtc_state->bigjoiner_linked_crtc);
+ const struct intel_crtc_state *master_crtc_state;
+ struct intel_crtc *master_crtc;
- /* No need to copy state if the master state is unchanged */
- if (!from_crtc_state)
- return;
- }
+ master_crtc = intel_master_crtc(crtc_state);
+ master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
- intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
+ /* No need to copy state if the master state is unchanged */
+ if (master_crtc_state)
+ intel_crtc_copy_color_blobs(crtc_state, master_crtc_state);
}
static void
@@ -6982,7 +5894,6 @@ copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
const struct intel_crtc_state *from_crtc_state)
{
struct intel_crtc_state *saved_state;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
if (!saved_state)
@@ -7012,8 +5923,8 @@ copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
crtc_state->bigjoiner_slave = true;
- crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
- crtc_state->has_audio = false;
+ crtc_state->cpu_transcoder = from_crtc_state->cpu_transcoder;
+ crtc_state->has_audio = from_crtc_state->has_audio;
return 0;
}
@@ -7609,51 +6520,48 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(output_types);
- /* FIXME do the readout properly and get rid of this quirk */
- if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
-
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
-
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
-
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
-
- PIPE_CONF_CHECK_I(pixel_multiplier);
-
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
+
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
+
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
+
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
+
+ PIPE_CONF_CHECK_I(pixel_multiplier);
+
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
+ DRM_MODE_FLAG_INTERLACE);
+
+ if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
- DRM_MODE_FLAG_INTERLACE);
-
- if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
- PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
- DRM_MODE_FLAG_PHSYNC);
- PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
- DRM_MODE_FLAG_NHSYNC);
- PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
- DRM_MODE_FLAG_PVSYNC);
- PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
- DRM_MODE_FLAG_NVSYNC);
- }
+ DRM_MODE_FLAG_PHSYNC);
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
+ DRM_MODE_FLAG_NHSYNC);
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
+ DRM_MODE_FLAG_PVSYNC);
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
+ DRM_MODE_FLAG_NVSYNC);
}
PIPE_CONF_CHECK_I(output_format);
@@ -7665,9 +6573,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
PIPE_CONF_CHECK_BOOL(has_infoframe);
- /* FIXME do the readout properly and get rid of this quirk */
- if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
- PIPE_CONF_CHECK_BOOL(fec_enable);
+ PIPE_CONF_CHECK_BOOL(fec_enable);
PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
@@ -7696,9 +6602,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
}
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
- /* FIXME do the readout properly and get rid of this quirk */
- if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
- PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
+ PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
PIPE_CONF_CHECK_X(gamma_mode);
if (IS_CHERRYVIEW(dev_priv))
@@ -7725,11 +6629,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(double_wide);
- if (dev_priv->dpll.mgr)
+ if (dev_priv->dpll.mgr) {
PIPE_CONF_CHECK_P(shared_dpll);
- /* FIXME do the readout properly and get rid of this quirk */
- if (dev_priv->dpll.mgr && !PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
@@ -7763,19 +6665,17 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
}
- if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
- PIPE_CONF_CHECK_X(dsi_pll.ctrl);
- PIPE_CONF_CHECK_X(dsi_pll.div);
+ PIPE_CONF_CHECK_X(dsi_pll.ctrl);
+ PIPE_CONF_CHECK_X(dsi_pll.div);
- if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
- PIPE_CONF_CHECK_I(pipe_bpp);
+ if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
+ PIPE_CONF_CHECK_I(pipe_bpp);
- PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
- PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
- PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
+ PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
+ PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
+ PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
- PIPE_CONF_CHECK_I(min_voltage_level);
- }
+ PIPE_CONF_CHECK_I(min_voltage_level);
if (current_config->has_psr || pipe_config->has_psr)
PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
@@ -8049,7 +6949,7 @@ verify_crtc_state(struct intel_crtc *crtc,
struct intel_encoder *encoder;
struct intel_crtc_state *pipe_config = old_crtc_state;
struct drm_atomic_state *state = old_crtc_state->uapi.state;
- struct intel_crtc *master = crtc;
+ struct intel_crtc *master_crtc;
__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
intel_crtc_free_hw_state(old_crtc_state);
@@ -8077,10 +6977,9 @@ verify_crtc_state(struct intel_crtc *crtc,
"(expected %i, found %i)\n",
new_crtc_state->hw.active, crtc->active);
- if (new_crtc_state->bigjoiner_slave)
- master = new_crtc_state->bigjoiner_linked_crtc;
+ master_crtc = intel_master_crtc(new_crtc_state);
- for_each_encoder_on_crtc(dev, &master->base, encoder) {
+ for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) {
enum pipe pipe;
bool active;
@@ -8090,7 +6989,7 @@ verify_crtc_state(struct intel_crtc *crtc,
encoder->base.base.id, active,
new_crtc_state->hw.active);
- I915_STATE_WARN(active && master->pipe != pipe,
+ I915_STATE_WARN(active && master_crtc->pipe != pipe,
"Encoder connected to wrong pipe %c\n",
pipe_name(pipe));
@@ -8101,10 +7000,6 @@ verify_crtc_state(struct intel_crtc *crtc,
if (!new_crtc_state->hw.active)
return;
- if (new_crtc_state->bigjoiner_slave)
- /* No PLLs set for slave */
- pipe_config->shared_dpll = NULL;
-
intel_pipe_config_sanity_check(dev_priv, pipe_config);
if (!intel_pipe_config_compare(new_crtc_state,
@@ -8223,9 +7118,6 @@ verify_mpllb_state(struct intel_atomic_state *state,
if (!new_crtc_state->hw.active)
return;
- if (new_crtc_state->bigjoiner_slave)
- return;
-
encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
@@ -8607,28 +7499,13 @@ static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
return 0;
}
-static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
-
- return intel_pxp_key_check(&i915->gt.pxp, obj, false) == 0;
-}
-
-static bool pxp_is_borked(struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj);
-}
-
static int intel_atomic_check_planes(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_plane_state *plane_state;
struct intel_plane *plane;
- struct intel_plane_state *new_plane_state;
- struct intel_plane_state *old_plane_state;
struct intel_crtc *crtc;
- const struct drm_framebuffer *fb;
int i, ret;
ret = icl_add_linked_planes(state);
@@ -8676,72 +7553,6 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state)
return ret;
}
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- new_plane_state = intel_atomic_get_new_plane_state(state, plane);
- old_plane_state = intel_atomic_get_old_plane_state(state, plane);
- fb = new_plane_state->hw.fb;
- if (fb) {
- new_plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb));
- new_plane_state->force_black = pxp_is_borked(intel_fb_obj(fb));
- } else {
- new_plane_state->decrypt = old_plane_state->decrypt;
- new_plane_state->force_black = old_plane_state->force_black;
- }
- }
-
- return 0;
-}
-
-static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
- bool *need_cdclk_calc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_cdclk_state *old_cdclk_state;
- const struct intel_cdclk_state *new_cdclk_state;
- struct intel_plane_state *plane_state;
- struct intel_bw_state *new_bw_state;
- struct intel_plane *plane;
- int min_cdclk = 0;
- enum pipe pipe;
- int ret;
- int i;
- /*
- * active_planes bitmask has been updated, and potentially
- * affected planes are part of the state. We can now
- * compute the minimum cdclk for each plane.
- */
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
- if (ret)
- return ret;
- }
-
- old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
- new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
-
- if (new_cdclk_state &&
- old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
- *need_cdclk_calc = true;
-
- ret = intel_cdclk_bw_calc_min_cdclk(state);
- if (ret)
- return ret;
-
- new_bw_state = intel_atomic_get_new_bw_state(state);
-
- if (!new_cdclk_state || !new_bw_state)
- return 0;
-
- for_each_pipe(dev_priv, pipe) {
- min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
-
- /*
- * Currently do this change only if we need to increase
- */
- if (new_bw_state->min_cdclk > min_cdclk)
- *need_cdclk_calc = true;
- }
-
return 0;
}
@@ -8790,13 +7601,13 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
- struct intel_crtc *slave, *master;
+ struct intel_crtc *slave_crtc, *master_crtc;
/* slave being enabled, is master is still claiming this crtc? */
if (old_crtc_state->bigjoiner_slave) {
- slave = crtc;
- master = old_crtc_state->bigjoiner_linked_crtc;
- master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
+ slave_crtc = crtc;
+ master_crtc = old_crtc_state->bigjoiner_linked_crtc;
+ master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
goto claimed;
}
@@ -8804,17 +7615,17 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
if (!new_crtc_state->bigjoiner)
return 0;
- slave = intel_dsc_get_bigjoiner_secondary(crtc);
- if (!slave) {
+ slave_crtc = intel_dsc_get_bigjoiner_secondary(crtc);
+ if (!slave_crtc) {
DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
"CRTC + 1 to be used, doesn't exist\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
- new_crtc_state->bigjoiner_linked_crtc = slave;
- slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
- master = crtc;
+ new_crtc_state->bigjoiner_linked_crtc = slave_crtc;
+ slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
+ master_crtc = crtc;
if (IS_ERR(slave_crtc_state))
return PTR_ERR(slave_crtc_state);
@@ -8823,15 +7634,15 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
goto claimed;
DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
- slave->base.base.id, slave->base.name);
+ slave_crtc->base.base.id, slave_crtc->base.name);
return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
claimed:
DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
"[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
- slave->base.base.id, slave->base.name,
- master->base.base.id, master->base.name);
+ slave_crtc->base.base.id, slave_crtc->base.name,
+ master_crtc->base.base.id, master_crtc->base.name);
return -EINVAL;
}
@@ -8865,35 +7676,37 @@ static void kill_bigjoiner_slave(struct intel_atomic_state *state,
* correspond to the last vblank and have no relation to the actual time when
* the flip done event was sent.
*/
-static int intel_atomic_check_async(struct intel_atomic_state *state)
+static int intel_atomic_check_async(struct intel_atomic_state *state, struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
const struct intel_plane_state *new_plane_state, *old_plane_state;
- struct intel_crtc *crtc;
struct intel_plane *plane;
int i;
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- if (intel_crtc_needs_modeset(new_crtc_state)) {
- drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
- return -EINVAL;
- }
+ old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
+ new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- if (!new_crtc_state->hw.active) {
- drm_dbg_kms(&i915->drm, "CRTC inactive\n");
- return -EINVAL;
- }
- if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
- drm_dbg_kms(&i915->drm,
- "Active planes cannot be changed during async flip\n");
- return -EINVAL;
- }
+ if (intel_crtc_needs_modeset(new_crtc_state)) {
+ drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
+ return -EINVAL;
+ }
+
+ if (!new_crtc_state->hw.active) {
+ drm_dbg_kms(&i915->drm, "CRTC inactive\n");
+ return -EINVAL;
+ }
+ if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
+ drm_dbg_kms(&i915->drm,
+ "Active planes cannot be changed during async flip\n");
+ return -EINVAL;
}
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
+ if (plane->pipe != crtc->pipe)
+ continue;
+
/*
* TODO: Async flip is only supported through the page flip IOCTL
* as of now. So support currently added for primary plane only.
@@ -8920,8 +7733,14 @@ static int intel_atomic_check_async(struct intel_atomic_state *state)
return -EINVAL;
}
- if (old_plane_state->view.color_plane[0].stride !=
- new_plane_state->view.color_plane[0].stride) {
+ if (new_plane_state->hw.fb->format->num_planes > 1) {
+ drm_dbg_kms(&i915->drm,
+ "Planar formats not supported with async flips\n");
+ return -EINVAL;
+ }
+
+ if (old_plane_state->view.color_plane[0].mapping_stride !=
+ new_plane_state->view.color_plane[0].mapping_stride) {
drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
return -EINVAL;
}
@@ -9177,7 +7996,6 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- intel_fbc_choose_crtc(dev_priv, state);
ret = intel_compute_global_watermarks(state);
if (ret)
goto fail;
@@ -9186,7 +8004,7 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- ret = intel_atomic_check_cdclk(state, &any_ms);
+ ret = intel_cdclk_atomic_check(state, &any_ms);
if (ret)
goto fail;
@@ -9209,10 +8027,14 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+ ret = intel_fbc_atomic_check(state);
+ if (ret)
+ goto fail;
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (new_crtc_state->uapi.async_flip) {
- ret = intel_atomic_check_async(state);
+ ret = intel_atomic_check_async(state, crtc);
if (ret)
goto fail;
}
@@ -9417,15 +8239,17 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_fbc_update(state, crtc);
+ intel_update_planes_on_crtc(state, crtc);
+
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(new_crtc_state);
commit_pipe_pre_planes(state, crtc);
if (DISPLAY_VER(dev_priv) >= 9)
- skl_update_planes_on_crtc(state, crtc);
+ skl_arm_planes_on_crtc(state, crtc);
else
- i9xx_update_planes_on_crtc(state, crtc);
+ i9xx_arm_planes_on_crtc(state, crtc);
commit_pipe_post_planes(state, crtc);
@@ -9449,23 +8273,6 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
-
- intel_encoders_pre_disable(state, crtc);
-
- intel_crtc_disable_planes(state, crtc);
-
- /*
- * We still need special handling for disabling bigjoiner master
- * and slaves since for slave we do not have encoder or plls
- * so we dont need to disable those.
- */
- if (old_crtc_state->bigjoiner) {
- intel_crtc_disable_planes(state,
- old_crtc_state->bigjoiner_linked_crtc);
- old_crtc_state->bigjoiner_linked_crtc->active = false;
- }
-
/*
* We need to disable pipe CRC before disabling the pipe,
* or we race against vblank off.
@@ -9490,10 +8297,22 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
u32 handled = 0;
int i;
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (!intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ if (!old_crtc_state->hw.active)
+ continue;
+
+ intel_pre_plane_update(state, crtc);
+ intel_crtc_disable_planes(state, crtc);
+ }
+
/* Only disable port sync and MST slaves */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
+ if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
if (!old_crtc_state->hw.active)
@@ -9505,10 +8324,10 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
* Slave vblanks are masked till Master Vblanks.
*/
if (!is_trans_port_sync_slave(old_crtc_state) &&
- !intel_dp_mst_is_slave_trans(old_crtc_state))
+ !intel_dp_mst_is_slave_trans(old_crtc_state) &&
+ !old_crtc_state->bigjoiner_slave)
continue;
- intel_pre_plane_update(state, crtc);
intel_old_crtc_state_disables(state, old_crtc_state,
new_crtc_state, crtc);
handled |= BIT(crtc->pipe);
@@ -9518,21 +8337,14 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (!intel_crtc_needs_modeset(new_crtc_state) ||
- (handled & BIT(crtc->pipe)) ||
- old_crtc_state->bigjoiner_slave)
+ (handled & BIT(crtc->pipe)))
continue;
- intel_pre_plane_update(state, crtc);
- if (old_crtc_state->bigjoiner) {
- struct intel_crtc *slave =
- old_crtc_state->bigjoiner_linked_crtc;
-
- intel_pre_plane_update(state, slave);
- }
+ if (!old_crtc_state->hw.active)
+ continue;
- if (old_crtc_state->hw.active)
- intel_old_crtc_state_disables(state, old_crtc_state,
- new_crtc_state, crtc);
+ intel_old_crtc_state_disables(state, old_crtc_state,
+ new_crtc_state, crtc);
}
}
@@ -9610,7 +8422,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
&old_crtc_state->wm.skl.ddb) &&
(update_pipes | modeset_pipes))
- intel_wait_for_vblank(dev_priv, pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
}
}
@@ -9701,19 +8513,19 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
for (;;) {
prepare_to_wait(&intel_state->commit_ready.wait,
&wait_fence, TASK_UNINTERRUPTIBLE);
- prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
+ prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
I915_RESET_MODESET),
&wait_reset, TASK_UNINTERRUPTIBLE);
if (i915_sw_fence_done(&intel_state->commit_ready) ||
- test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
+ test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
break;
schedule();
}
finish_wait(&intel_state->commit_ready.wait, &wait_fence);
- finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
+ finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
I915_RESET_MODESET),
&wait_reset);
}
@@ -9752,10 +8564,14 @@ static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *s
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
struct drm_framebuffer *fb = plane_state->hw.fb;
+ int cc_plane;
int ret;
- if (!fb ||
- fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
+ if (!fb)
+ continue;
+
+ cc_plane = intel_fb_rc_ccs_cc_plane(fb);
+ if (cc_plane < 0)
continue;
/*
@@ -9772,7 +8588,7 @@ static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *s
* GPU write on it.
*/
ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
- fb->offsets[2] + 16,
+ fb->offsets[cc_plane] + 16,
&plane_state->ccval,
sizeof(plane_state->ccval));
/* The above could only fail if the FB obj has an unexpected backing store type. */
@@ -9840,11 +8656,9 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
}
}
- if (state->modeset)
- intel_encoders_update_prepare(state);
+ intel_encoders_update_prepare(state);
intel_dbuf_pre_plane_update(state);
- intel_psr_pre_plane_update(state);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->uapi.async_flip)
@@ -9854,11 +8668,12 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
dev_priv->display->commit_modeset_enables(state);
- if (state->modeset) {
- intel_encoders_update_complete(state);
+ intel_encoders_update_complete(state);
+ if (state->modeset)
intel_set_cdclk_post_plane_update(state);
- }
+
+ intel_wait_for_vblank_workers(state);
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
* already, but still need the state for the delayed optimization. To
@@ -9874,13 +8689,6 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->uapi.async_flip)
intel_crtc_disable_flip_done(state, crtc);
-
- if (new_crtc_state->hw.active &&
- !intel_crtc_needs_modeset(new_crtc_state) &&
- !new_crtc_state->preload_luts &&
- (new_crtc_state->uapi.color_mgmt_changed ||
- new_crtc_state->update_pipe))
- intel_color_load_luts(new_crtc_state);
}
/*
@@ -9967,7 +8775,7 @@ static void intel_atomic_commit_work(struct work_struct *work)
intel_atomic_commit_tail(state);
}
-static int __i915_sw_fence_call
+static int
intel_atomic_commit_ready(struct i915_sw_fence *fence,
enum i915_sw_fence_notify notify)
{
@@ -10114,8 +8922,8 @@ static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
struct intel_plane *plane;
for_each_intel_plane(&dev_priv->drm, plane) {
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
- plane->pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv,
+ plane->pipe);
plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
}
@@ -10580,7 +9388,7 @@ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
- .get_format_info = intel_get_format_info,
+ .get_format_info = intel_fb_get_format_info,
.output_poll_changed = intel_fbdev_output_poll_changed,
.mode_valid = intel_mode_valid,
.atomic_check = intel_atomic_check,
@@ -10640,7 +9448,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
return;
intel_init_cdclk_hooks(dev_priv);
- intel_init_audio_hooks(dev_priv);
+ intel_audio_hooks_init(dev_priv);
intel_dpll_init_clock_hook(dev_priv);
@@ -11108,7 +9916,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
/* 640x480@60Hz, ~25175 kHz */
struct dpll clock = {
.m1 = 18,
@@ -11181,7 +9989,7 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
pipe_name(pipe));
@@ -11233,7 +10041,7 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
"[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
plane->base.base.id, plane->base.name);
- plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ plane_crtc = intel_crtc_for_pipe(dev_priv, pipe);
intel_plane_disable_noatomic(plane_crtc, plane);
}
}
@@ -11486,7 +10294,7 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
visible = plane->get_hw_state(plane, &pipe);
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ crtc = intel_crtc_for_pipe(dev_priv, pipe);
crtc_state = to_intel_crtc_state(crtc->base.state);
intel_set_plane_visible(crtc_state, plane_state, visible);
@@ -11553,7 +10361,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
pipe = 0;
if (encoder->get_hw_state(encoder, &pipe)) {
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ crtc = intel_crtc_for_pipe(dev_priv, pipe);
crtc_state = to_intel_crtc_state(crtc->base.state);
encoder->base.crtc = &crtc->base;
@@ -11628,9 +10436,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct intel_plane *plane;
int min_cdclk = 0;
- if (crtc_state->bigjoiner_slave)
- continue;
-
if (crtc_state->hw.active) {
/*
* The initial mode needs to be set in order to keep
@@ -11690,39 +10495,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
intel_bw_crtc_update(bw_state, crtc_state);
intel_pipe_config_sanity_check(dev_priv, crtc_state);
-
- /* discard our incomplete slave state, copy it from master */
- if (crtc_state->bigjoiner && crtc_state->hw.active) {
- struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc;
- struct intel_crtc_state *slave_crtc_state =
- to_intel_crtc_state(slave->base.state);
-
- copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state);
- slave->base.mode = crtc->base.mode;
-
- cdclk_state->min_cdclk[slave->pipe] = min_cdclk;
- cdclk_state->min_voltage_level[slave->pipe] =
- crtc_state->min_voltage_level;
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) {
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
-
- /*
- * FIXME don't have the fb yet, so can't
- * use intel_plane_data_rate() :(
- */
- if (plane_state->uapi.visible)
- crtc_state->data_rate[plane->id] =
- 4 * crtc_state->pixel_rate;
- else
- crtc_state->data_rate[plane->id] = 0;
- }
-
- intel_bw_crtc_update(bw_state, slave_crtc_state);
- drm_calc_timestamping_constants(&slave->base,
- &slave_crtc_state->hw.adjusted_mode);
- }
}
}
@@ -12027,7 +10799,7 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
destroy_workqueue(i915->flip_wq);
destroy_workqueue(i915->modeset_wq);
- intel_fbc_cleanup_cfb(i915);
+ intel_fbc_cleanup(i915);
}
/* part #3: call after gem init */
@@ -12042,6 +10814,27 @@ void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
intel_bios_driver_remove(i915);
}
+bool intel_modeset_probe_defer(struct pci_dev *pdev)
+{
+ struct drm_privacy_screen *privacy_screen;
+
+ /*
+ * apple-gmux is needed on dual GPU MacBook Pro
+ * to probe the panel if we're the inactive GPU.
+ */
+ if (vga_switcheroo_client_probe_defer(pdev))
+ return true;
+
+ /* If the LCD panel has a privacy-screen, wait for it */
+ privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
+ if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
+ return true;
+
+ drm_privacy_screen_put(privacy_screen);
+
+ return false;
+}
+
void intel_display_driver_register(struct drm_i915_private *i915)
{
if (!HAS_DISPLAY(i915))
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 0c76bf57f86b..b61b75248ded 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -57,6 +57,7 @@ struct intel_plane;
struct intel_plane_state;
struct intel_remapped_info;
struct intel_rotation_info;
+struct pci_dev;
enum i915_gpio {
GPIOA,
@@ -346,9 +347,33 @@ enum phy_fia {
FIA3,
};
+enum hpd_pin {
+ HPD_NONE = 0,
+ HPD_TV = HPD_NONE, /* TV is known to be unreliable */
+ HPD_CRT,
+ HPD_SDVO_B,
+ HPD_SDVO_C,
+ HPD_PORT_A,
+ HPD_PORT_B,
+ HPD_PORT_C,
+ HPD_PORT_D,
+ HPD_PORT_E,
+ HPD_PORT_TC1,
+ HPD_PORT_TC2,
+ HPD_PORT_TC3,
+ HPD_PORT_TC4,
+ HPD_PORT_TC5,
+ HPD_PORT_TC6,
+
+ HPD_NUM_PINS
+};
+
+#define for_each_hpd_pin(__pin) \
+ for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
+
#define for_each_pipe(__dev_priv, __p) \
for ((__p) = 0; (__p) < I915_MAX_PIPES; (__p)++) \
- for_each_if(INTEL_INFO(__dev_priv)->pipe_mask & BIT(__p))
+ for_each_if(INTEL_INFO(__dev_priv)->display.pipe_mask & BIT(__p))
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
for_each_pipe(__dev_priv, __p) \
@@ -356,7 +381,7 @@ enum phy_fia {
#define for_each_cpu_transcoder(__dev_priv, __t) \
for ((__t) = 0; (__t) < I915_MAX_TRANSCODERS; (__t)++) \
- for_each_if (INTEL_INFO(__dev_priv)->cpu_transcoder_mask & BIT(__t))
+ for_each_if (INTEL_INFO(__dev_priv)->display.cpu_transcoder_mask & BIT(__t))
#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
for_each_cpu_transcoder(__dev_priv, __t) \
@@ -521,7 +546,6 @@ void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
bool constant_n, bool fec_enable);
-void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier);
enum drm_mode_status
@@ -542,9 +566,6 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
const char *name, u32 reg);
-void lpt_pch_enable(const struct intel_crtc_state *crtc_state);
-void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
-void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
void intel_init_display_hooks(struct drm_i915_private *dev_priv);
unsigned int intel_fb_xy_to_linear(int x, int y,
const struct intel_plane_state *state,
@@ -580,10 +601,6 @@ struct drm_framebuffer *
intel_framebuffer_create(struct drm_i915_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
-void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe);
-
-int lpt_get_iclkip(struct drm_i915_private *dev_priv);
bool intel_fuzzy_clock_check(int clock1, int clock2);
void intel_display_prepare_reset(struct drm_i915_private *dev_priv);
@@ -592,8 +609,11 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
enum link_m_n_set m_n);
+void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
+void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
-
bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
@@ -610,9 +630,6 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
bool intel_plane_uses_fence(const struct intel_plane_state *plane_state);
-bool
-intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
- u64 modifier);
struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
@@ -624,6 +641,7 @@ void intel_display_driver_register(struct drm_i915_private *i915);
void intel_display_driver_unregister(struct drm_i915_private *i915);
/* modesetting */
+bool intel_modeset_probe_defer(struct pci_dev *pdev);
void intel_modeset_init_hw(struct drm_i915_private *i915);
int intel_modeset_init_noirq(struct drm_i915_private *i915);
int intel_modeset_init_nogem(struct drm_i915_private *i915);
@@ -632,7 +650,6 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915);
void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915);
void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915);
void intel_display_resume(struct drm_device *dev);
-void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
int intel_modeset_all_pipes(struct intel_atomic_state *state);
/* modesetting asserts */
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index e04767695530..572445299b04 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -40,83 +40,6 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
return 0;
}
-static int i915_fbc_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_fbc *fbc = &dev_priv->fbc;
- intel_wakeref_t wakeref;
-
- if (!HAS_FBC(dev_priv))
- return -ENODEV;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- mutex_lock(&fbc->lock);
-
- if (intel_fbc_is_active(dev_priv))
- seq_puts(m, "FBC enabled\n");
- else
- seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
-
- if (intel_fbc_is_active(dev_priv)) {
- u32 mask;
-
- if (DISPLAY_VER(dev_priv) >= 8)
- mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
- else if (DISPLAY_VER(dev_priv) >= 7)
- mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
- else if (DISPLAY_VER(dev_priv) >= 5)
- mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
- else if (IS_G4X(dev_priv))
- mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
- else
- mask = intel_de_read(dev_priv, FBC_STATUS) &
- (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
-
- seq_printf(m, "Compressing: %s\n", yesno(mask));
- }
-
- mutex_unlock(&fbc->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int i915_fbc_false_color_get(void *data, u64 *val)
-{
- struct drm_i915_private *dev_priv = data;
-
- if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv))
- return -ENODEV;
-
- *val = dev_priv->fbc.false_color;
-
- return 0;
-}
-
-static int i915_fbc_false_color_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- u32 reg;
-
- if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv))
- return -ENODEV;
-
- mutex_lock(&dev_priv->fbc.lock);
-
- reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
- dev_priv->fbc.false_color = val;
-
- intel_de_write(dev_priv, ILK_DPFC_CONTROL,
- val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
-
- mutex_unlock(&dev_priv->fbc.lock);
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
- i915_fbc_false_color_get, i915_fbc_false_color_set,
- "%llu\n");
-
static int i915_ips_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -303,8 +226,7 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
};
val = intel_de_read(dev_priv,
EDP_PSR2_STATUS(intel_dp->psr.transcoder));
- status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
- EDP_PSR2_STATUS_STATE_SHIFT;
+ status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
if (status_val < ARRAY_SIZE(live_status))
status = live_status[status_val];
} else {
@@ -503,28 +425,9 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
static int i915_power_domain_info(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- int i;
-
- mutex_lock(&power_domains->lock);
-
- seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
- for (i = 0; i < power_domains->power_well_count; i++) {
- struct i915_power_well *power_well;
- enum intel_display_power_domain power_domain;
-
- power_well = &power_domains->power_wells[i];
- seq_printf(m, "%-25s %d\n", power_well->desc->name,
- power_well->count);
-
- for_each_power_domain(power_domain, power_well->desc->domains)
- seq_printf(m, " %-23s %d\n",
- intel_display_power_domain_str(power_domain),
- power_domains->domain_use_count[power_domain]);
- }
+ struct drm_i915_private *i915 = node_to_i915(m->private);
- mutex_unlock(&power_domains->lock);
+ intel_display_power_debug(i915, m);
return 0;
}
@@ -2095,9 +1998,7 @@ i915_fifo_underrun_reset_write(struct file *filp,
return ret;
}
- ret = intel_fbc_reset_underrun(dev_priv);
- if (ret)
- return ret;
+ intel_fbc_reset_underrun(dev_priv);
return cnt;
}
@@ -2111,7 +2012,6 @@ static const struct file_operations i915_fifo_underrun_reset_ops = {
static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
- {"i915_fbc_status", i915_fbc_status, 0},
{"i915_ips_status", i915_ips_status, 0},
{"i915_sr_status", i915_sr_status, 0},
{"i915_opregion", i915_opregion, 0},
@@ -2136,7 +2036,6 @@ static const struct {
{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
- {"i915_fbc_false_color", &i915_fbc_false_color_fops},
{"i915_dp_test_data", &i915_displayport_test_data_fops},
{"i915_dp_test_type", &i915_displayport_test_type_fops},
{"i915_dp_test_active", &i915_displayport_test_active_fops},
@@ -2163,6 +2062,8 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
drm_debugfs_create_files(intel_display_debugfs_list,
ARRAY_SIZE(intel_display_debugfs_list),
minor->debugfs_root, minor);
+
+ intel_fbc_debugfs_register(i915);
}
static int i915_panel_show(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 1672604f9ef7..05babdcf5f2e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -15,6 +15,7 @@
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_hotplug.h"
+#include "intel_pch_refclk.h"
#include "intel_pcode.h"
#include "intel_pm.h"
#include "intel_pps.h"
@@ -23,6 +24,98 @@
#include "intel_vga.h"
#include "vlv_sideband.h"
+struct i915_power_well_ops {
+ /*
+ * Synchronize the well's hw state to match the current sw state, for
+ * example enable/disable it based on the current refcount. Called
+ * during driver init and resume time, possibly after first calling
+ * the enable/disable handlers.
+ */
+ void (*sync_hw)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+ /*
+ * Enable the well and resources that depend on it (for example
+ * interrupts located on the well). Called after the 0->1 refcount
+ * transition.
+ */
+ void (*enable)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+ /*
+ * Disable the well and resources that depend on it. Called after
+ * the 1->0 refcount transition.
+ */
+ void (*disable)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+ /* Returns the hw enabled state. */
+ bool (*is_enabled)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+};
+
+struct i915_power_well_regs {
+ i915_reg_t bios;
+ i915_reg_t driver;
+ i915_reg_t kvmr;
+ i915_reg_t debug;
+};
+
+/* Power well structure for haswell */
+struct i915_power_well_desc {
+ const char *name;
+ bool always_on;
+ u64 domains;
+ /* unique identifier for this power well */
+ enum i915_power_well_id id;
+ /*
+ * Arbitraty data associated with this power well. Platform and power
+ * well specific.
+ */
+ union {
+ struct {
+ /*
+ * request/status flag index in the PUNIT power well
+ * control/status registers.
+ */
+ u8 idx;
+ } vlv;
+ struct {
+ enum dpio_phy phy;
+ } bxt;
+ struct {
+ const struct i915_power_well_regs *regs;
+ /*
+ * request/status flag index in the power well
+ * constrol/status registers.
+ */
+ u8 idx;
+ /* Mask of pipes whose IRQ logic is backed by the pw */
+ u8 irq_pipe_mask;
+ /*
+ * Instead of waiting for the status bit to ack enables,
+ * just wait a specific amount of time and then consider
+ * the well enabled.
+ */
+ u16 fixed_enable_delay;
+ /* The pw is backing the VGA functionality */
+ bool has_vga:1;
+ bool has_fuses:1;
+ /*
+ * The pw is for an ICL+ TypeC PHY port in
+ * Thunderbolt mode.
+ */
+ bool is_tc_tbt:1;
+ } hsw;
+ };
+ const struct i915_power_well_ops *ops;
+};
+
+struct i915_power_well {
+ const struct i915_power_well_desc *desc;
+ /* power well enable/disable usage count */
+ int count;
+ /* cached hw enabled state */
+ bool hw_enabled;
+};
+
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
enum i915_power_well_id power_well_id);
@@ -154,8 +247,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "MODESET";
case POWER_DOMAIN_GT_IRQ:
return "GT_IRQ";
- case POWER_DOMAIN_DPLL_DC_OFF:
- return "DPLL_DC_OFF";
+ case POWER_DOMAIN_DC_OFF:
+ return "DC_OFF";
case POWER_DOMAIN_TC_COLD_OFF:
return "TC_COLD_OFF";
default:
@@ -434,6 +527,11 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
SKL_PW_CTL_IDX_TO_PG(pw_idx);
+
+ /* Wa_16013190616:adlp */
+ if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1)
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC);
+
/*
* For PW1 we have to wait both for the PW0/PG0 fuse state
* before enabling the power well and PW1/PG1's own fuse
@@ -894,7 +992,7 @@ static u32
sanitize_target_dc_state(struct drm_i915_private *dev_priv,
u32 target_dc_state)
{
- u32 states[] = {
+ static const u32 states[] = {
DC_STATE_EN_UPTO_DC6,
DC_STATE_EN_UPTO_DC5,
DC_STATE_EN_DC3CO,
@@ -2802,7 +2900,7 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
ICL_PW_2_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \
+ BIT_ULL(POWER_DOMAIN_DC_OFF) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define ICL_DDI_IO_A_POWER_DOMAINS ( \
@@ -3105,6 +3203,7 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D_XELPD)
@@ -5271,7 +5370,7 @@ static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
static void icl_mbus_init(struct drm_i915_private *dev_priv)
{
- unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask;
+ unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask;
u32 mask, val, i;
if (IS_ALDERLAKE_P(dev_priv))
@@ -5731,7 +5830,7 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
enum intel_dram_type type = dev_priv->dram_info.type;
u8 num_channels = dev_priv->dram_info.num_channels;
const struct buddy_page_mask *table;
- unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask;
+ unsigned long abox_mask = INTEL_INFO(dev_priv)->display.abox_mask;
int config, i;
/* BW_BUDDY registers are not used on dgpu's beyond DG1 */
@@ -6390,3 +6489,28 @@ void intel_display_power_resume(struct drm_i915_private *i915)
hsw_disable_pc8(i915);
}
}
+
+void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m)
+{
+ struct i915_power_domains *power_domains = &i915->power_domains;
+ int i;
+
+ mutex_lock(&power_domains->lock);
+
+ seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
+ for (i = 0; i < power_domains->power_well_count; i++) {
+ struct i915_power_well *power_well;
+ enum intel_display_power_domain power_domain;
+
+ power_well = &power_domains->power_wells[i];
+ seq_printf(m, "%-25s %d\n", power_well->desc->name,
+ power_well->count);
+
+ for_each_power_domain(power_domain, power_well->desc->domains)
+ seq_printf(m, " %-23s %d\n",
+ intel_display_power_domain_str(power_domain),
+ power_domains->domain_use_count[power_domain]);
+ }
+
+ mutex_unlock(&power_domains->lock);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 0612e4b6e3c8..686d18eaa24c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -6,11 +6,13 @@
#ifndef __INTEL_DISPLAY_POWER_H__
#define __INTEL_DISPLAY_POWER_H__
-#include "intel_display.h"
#include "intel_runtime_pm.h"
#include "i915_reg.h"
+enum dpio_channel;
+enum dpio_phy;
struct drm_i915_private;
+struct i915_power_well;
struct intel_encoder;
enum intel_display_power_domain {
@@ -117,7 +119,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_GMBUS,
POWER_DOMAIN_MODESET,
POWER_DOMAIN_GT_IRQ,
- POWER_DOMAIN_DPLL_DC_OFF,
+ POWER_DOMAIN_DC_OFF,
POWER_DOMAIN_TC_COLD_OFF,
POWER_DOMAIN_INIT,
@@ -155,100 +157,6 @@ enum i915_power_well_id {
((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
(tran) + POWER_DOMAIN_TRANSCODER_A)
-struct i915_power_well;
-
-struct i915_power_well_ops {
- /*
- * Synchronize the well's hw state to match the current sw state, for
- * example enable/disable it based on the current refcount. Called
- * during driver init and resume time, possibly after first calling
- * the enable/disable handlers.
- */
- void (*sync_hw)(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well);
- /*
- * Enable the well and resources that depend on it (for example
- * interrupts located on the well). Called after the 0->1 refcount
- * transition.
- */
- void (*enable)(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well);
- /*
- * Disable the well and resources that depend on it. Called after
- * the 1->0 refcount transition.
- */
- void (*disable)(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well);
- /* Returns the hw enabled state. */
- bool (*is_enabled)(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well);
-};
-
-struct i915_power_well_regs {
- i915_reg_t bios;
- i915_reg_t driver;
- i915_reg_t kvmr;
- i915_reg_t debug;
-};
-
-/* Power well structure for haswell */
-struct i915_power_well_desc {
- const char *name;
- bool always_on;
- u64 domains;
- /* unique identifier for this power well */
- enum i915_power_well_id id;
- /*
- * Arbitraty data associated with this power well. Platform and power
- * well specific.
- */
- union {
- struct {
- /*
- * request/status flag index in the PUNIT power well
- * control/status registers.
- */
- u8 idx;
- } vlv;
- struct {
- enum dpio_phy phy;
- } bxt;
- struct {
- const struct i915_power_well_regs *regs;
- /*
- * request/status flag index in the power well
- * constrol/status registers.
- */
- u8 idx;
- /* Mask of pipes whose IRQ logic is backed by the pw */
- u8 irq_pipe_mask;
- /*
- * Instead of waiting for the status bit to ack enables,
- * just wait a specific amount of time and then consider
- * the well enabled.
- */
- u16 fixed_enable_delay;
- /* The pw is backing the VGA functionality */
- bool has_vga:1;
- bool has_fuses:1;
- /*
- * The pw is for an ICL+ TypeC PHY port in
- * Thunderbolt mode.
- */
- bool is_tc_tbt:1;
- } hsw;
- };
- const struct i915_power_well_ops *ops;
-};
-
-struct i915_power_well {
- const struct i915_power_well_desc *desc;
- /* power well enable/disable usage count */
- int count;
- /* cached hw enabled state */
- bool hw_enabled;
-};
-
struct i915_power_domains {
/*
* Power wells needed for initialization at driver init and suspend
@@ -391,6 +299,8 @@ intel_display_power_put_all_in_set(struct drm_i915_private *i915,
intel_display_power_put_mask_in_set(i915, power_domain_set, power_domain_set->mask);
}
+void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m);
+
/*
* FIXME: We should probably switch this to a 0-based scheme to be consistent
* with how we now name/number DBUF_CTL instances.
diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.c b/drivers/gpu/drm/i915/display/intel_display_trace.c
new file mode 100644
index 000000000000..737979ada869
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_trace.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "intel_display_trace.h"
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h
new file mode 100644
index 000000000000..4043e1276383
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_trace.h
@@ -0,0 +1,587 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM i915
+
+#if !defined(__INTEL_DISPLAY_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __INTEL_DISPLAY_TRACE_H__
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include "i915_drv.h"
+#include "intel_crtc.h"
+#include "intel_display_types.h"
+
+TRACE_EVENT(intel_pipe_enable,
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
+
+ TP_STRUCT__entry(
+ __array(u32, frame, 3)
+ __array(u32, scanline, 3)
+ __field(enum pipe, pipe)
+ ),
+ TP_fast_assign(
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc *it__;
+ for_each_intel_crtc(&dev_priv->drm, it__) {
+ __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
+ __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
+ }
+ __entry->pipe = crtc->pipe;
+ ),
+
+ TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
+ pipe_name(__entry->pipe),
+ __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
+ __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
+ __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
+);
+
+TRACE_EVENT(intel_pipe_disable,
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
+
+ TP_STRUCT__entry(
+ __array(u32, frame, 3)
+ __array(u32, scanline, 3)
+ __field(enum pipe, pipe)
+ ),
+
+ TP_fast_assign(
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc *it__;
+ for_each_intel_crtc(&dev_priv->drm, it__) {
+ __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
+ __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
+ }
+ __entry->pipe = crtc->pipe;
+ ),
+
+ TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
+ pipe_name(__entry->pipe),
+ __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
+ __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
+ __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
+);
+
+TRACE_EVENT(intel_pipe_crc,
+ TP_PROTO(struct intel_crtc *crtc, const u32 *crcs),
+ TP_ARGS(crtc, crcs),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __array(u32, crcs, 5)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ memcpy(__entry->crcs, crcs, sizeof(__entry->crcs));
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u crc=%08x %08x %08x %08x %08x",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline,
+ __entry->crcs[0], __entry->crcs[1], __entry->crcs[2],
+ __entry->crcs[3], __entry->crcs[4])
+);
+
+TRACE_EVENT(intel_cpu_fifo_underrun,
+ TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
+ TP_ARGS(dev_priv, pipe),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ __entry->pipe = pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe),
+ __entry->frame, __entry->scanline)
+);
+
+TRACE_EVENT(intel_pch_fifo_underrun,
+ TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pch_transcoder),
+ TP_ARGS(dev_priv, pch_transcoder),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ enum pipe pipe = pch_transcoder;
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ __entry->pipe = pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pch transcoder %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe),
+ __entry->frame, __entry->scanline)
+);
+
+TRACE_EVENT(intel_memory_cxsr,
+ TP_PROTO(struct drm_i915_private *dev_priv, bool old, bool new),
+ TP_ARGS(dev_priv, old, new),
+
+ TP_STRUCT__entry(
+ __array(u32, frame, 3)
+ __array(u32, scanline, 3)
+ __field(bool, old)
+ __field(bool, new)
+ ),
+
+ TP_fast_assign(
+ struct intel_crtc *crtc;
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc);
+ }
+ __entry->old = old;
+ __entry->new = new;
+ ),
+
+ TP_printk("%s->%s, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
+ onoff(__entry->old), onoff(__entry->new),
+ __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
+ __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
+ __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
+);
+
+TRACE_EVENT(g4x_wm,
+ TP_PROTO(struct intel_crtc *crtc, const struct g4x_wm_values *wm),
+ TP_ARGS(crtc, wm),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __field(u16, primary)
+ __field(u16, sprite)
+ __field(u16, cursor)
+ __field(u16, sr_plane)
+ __field(u16, sr_cursor)
+ __field(u16, sr_fbc)
+ __field(u16, hpll_plane)
+ __field(u16, hpll_cursor)
+ __field(u16, hpll_fbc)
+ __field(bool, cxsr)
+ __field(bool, hpll)
+ __field(bool, fbc)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY];
+ __entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0];
+ __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR];
+ __entry->sr_plane = wm->sr.plane;
+ __entry->sr_cursor = wm->sr.cursor;
+ __entry->sr_fbc = wm->sr.fbc;
+ __entry->hpll_plane = wm->hpll.plane;
+ __entry->hpll_cursor = wm->hpll.cursor;
+ __entry->hpll_fbc = wm->hpll.fbc;
+ __entry->cxsr = wm->cxsr;
+ __entry->hpll = wm->hpll_en;
+ __entry->fbc = wm->fbc_en;
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u, wm %d/%d/%d, sr %s/%d/%d/%d, hpll %s/%d/%d/%d, fbc %s",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline,
+ __entry->primary, __entry->sprite, __entry->cursor,
+ yesno(__entry->cxsr), __entry->sr_plane, __entry->sr_cursor, __entry->sr_fbc,
+ yesno(__entry->hpll), __entry->hpll_plane, __entry->hpll_cursor, __entry->hpll_fbc,
+ yesno(__entry->fbc))
+);
+
+TRACE_EVENT(vlv_wm,
+ TP_PROTO(struct intel_crtc *crtc, const struct vlv_wm_values *wm),
+ TP_ARGS(crtc, wm),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __field(u32, level)
+ __field(u32, cxsr)
+ __field(u32, primary)
+ __field(u32, sprite0)
+ __field(u32, sprite1)
+ __field(u32, cursor)
+ __field(u32, sr_plane)
+ __field(u32, sr_cursor)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ __entry->level = wm->level;
+ __entry->cxsr = wm->cxsr;
+ __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY];
+ __entry->sprite0 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0];
+ __entry->sprite1 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE1];
+ __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR];
+ __entry->sr_plane = wm->sr.plane;
+ __entry->sr_cursor = wm->sr.cursor;
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u, level=%d, cxsr=%d, wm %d/%d/%d/%d, sr %d/%d",
+ pipe_name(__entry->pipe), __entry->frame,
+ __entry->scanline, __entry->level, __entry->cxsr,
+ __entry->primary, __entry->sprite0, __entry->sprite1, __entry->cursor,
+ __entry->sr_plane, __entry->sr_cursor)
+);
+
+TRACE_EVENT(vlv_fifo_size,
+ TP_PROTO(struct intel_crtc *crtc, u32 sprite0_start, u32 sprite1_start, u32 fifo_size),
+ TP_ARGS(crtc, sprite0_start, sprite1_start, fifo_size),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __field(u32, sprite0_start)
+ __field(u32, sprite1_start)
+ __field(u32, fifo_size)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ __entry->sprite0_start = sprite0_start;
+ __entry->sprite1_start = sprite1_start;
+ __entry->fifo_size = fifo_size;
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u, %d/%d/%d",
+ pipe_name(__entry->pipe), __entry->frame,
+ __entry->scanline, __entry->sprite0_start,
+ __entry->sprite1_start, __entry->fifo_size)
+);
+
+TRACE_EVENT(intel_plane_update_noarm,
+ TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc),
+ TP_ARGS(plane, crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __array(int, src, 4)
+ __array(int, dst, 4)
+ __string(name, plane->name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, plane->name);
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ memcpy(__entry->src, &plane->state->src, sizeof(__entry->src));
+ memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst));
+ ),
+
+ TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
+ pipe_name(__entry->pipe), __get_str(name),
+ __entry->frame, __entry->scanline,
+ DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src),
+ DRM_RECT_ARG((const struct drm_rect *)__entry->dst))
+);
+
+TRACE_EVENT(intel_plane_update_arm,
+ TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc),
+ TP_ARGS(plane, crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __array(int, src, 4)
+ __array(int, dst, 4)
+ __string(name, plane->name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, plane->name);
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ memcpy(__entry->src, &plane->state->src, sizeof(__entry->src));
+ memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst));
+ ),
+
+ TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
+ pipe_name(__entry->pipe), __get_str(name),
+ __entry->frame, __entry->scanline,
+ DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src),
+ DRM_RECT_ARG((const struct drm_rect *)__entry->dst))
+);
+
+TRACE_EVENT(intel_plane_disable_arm,
+ TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc),
+ TP_ARGS(plane, crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __string(name, plane->name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, plane->name);
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, plane %s, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __get_str(name),
+ __entry->frame, __entry->scanline)
+);
+
+TRACE_EVENT(intel_fbc_activate,
+ TP_PROTO(struct intel_plane *plane),
+ TP_ARGS(plane),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
+ plane->pipe);
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline)
+);
+
+TRACE_EVENT(intel_fbc_deactivate,
+ TP_PROTO(struct intel_plane *plane),
+ TP_ARGS(plane),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
+ plane->pipe);
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline)
+);
+
+TRACE_EVENT(intel_fbc_nuke,
+ TP_PROTO(struct intel_plane *plane),
+ TP_ARGS(plane),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
+ plane->pipe);
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline)
+);
+
+TRACE_EVENT(intel_crtc_vblank_work_start,
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame,
+ __entry->scanline)
+);
+
+TRACE_EVENT(intel_crtc_vblank_work_end,
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame,
+ __entry->scanline)
+);
+
+TRACE_EVENT(intel_pipe_update_start,
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __field(u32, min)
+ __field(u32, max)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ __entry->min = crtc->debug.min_vbl;
+ __entry->max = crtc->debug.max_vbl;
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
+ pipe_name(__entry->pipe), __entry->frame,
+ __entry->scanline, __entry->min, __entry->max)
+);
+
+TRACE_EVENT(intel_pipe_update_vblank_evaded,
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __field(u32, min)
+ __field(u32, max)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = crtc->debug.start_vbl_count;
+ __entry->scanline = crtc->debug.scanline_start;
+ __entry->min = crtc->debug.min_vbl;
+ __entry->max = crtc->debug.max_vbl;
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
+ pipe_name(__entry->pipe), __entry->frame,
+ __entry->scanline, __entry->min, __entry->max)
+);
+
+TRACE_EVENT(intel_pipe_update_end,
+ TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end),
+ TP_ARGS(crtc, frame, scanline_end),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = frame;
+ __entry->scanline = scanline_end;
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame,
+ __entry->scanline)
+);
+
+TRACE_EVENT(intel_frontbuffer_invalidate,
+ TP_PROTO(unsigned int frontbuffer_bits, unsigned int origin),
+ TP_ARGS(frontbuffer_bits, origin),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, frontbuffer_bits)
+ __field(unsigned int, origin)
+ ),
+
+ TP_fast_assign(
+ __entry->frontbuffer_bits = frontbuffer_bits;
+ __entry->origin = origin;
+ ),
+
+ TP_printk("frontbuffer_bits=0x%08x, origin=%u",
+ __entry->frontbuffer_bits, __entry->origin)
+);
+
+TRACE_EVENT(intel_frontbuffer_flush,
+ TP_PROTO(unsigned int frontbuffer_bits, unsigned int origin),
+ TP_ARGS(frontbuffer_bits, origin),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, frontbuffer_bits)
+ __field(unsigned int, origin)
+ ),
+
+ TP_fast_assign(
+ __entry->frontbuffer_bits = frontbuffer_bits;
+ __entry->origin = origin;
+ ),
+
+ TP_printk("frontbuffer_bits=0x%08x, origin=%u",
+ __entry->frontbuffer_bits, __entry->origin)
+);
+
+#endif /* __INTEL_DISPLAY_TRACE_H__ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915/display
+#define TRACE_INCLUDE_FILE intel_display_trace
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index aa7238245b0e..c9c6efadf8b4 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -28,6 +28,7 @@
#include <linux/async.h>
#include <linux/i2c.h>
+#include <linux/pm_qos.h>
#include <linux/pwm.h>
#include <linux/sched/clock.h>
@@ -35,20 +36,30 @@
#include <drm/drm_crtc.h>
#include <drm/drm_dp_dual_mode_helper.h>
#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_dsc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_vblank_work.h>
#include <drm/i915_mei_hdcp_interface.h>
#include <media/cec-notifier.h>
-#include "i915_drv.h"
+#include "i915_vma.h"
+#include "i915_vma_types.h"
+#include "intel_bios.h"
+#include "intel_display.h"
+#include "intel_display_power.h"
+#include "intel_dpll_mgr.h"
+#include "intel_pm_types.h"
struct drm_printer;
struct __intel_global_objs_state;
struct intel_ddi_buf_trans;
+struct intel_fbc;
+struct intel_connector;
/*
* Display related stuff
@@ -115,7 +126,8 @@ struct intel_fb_view {
* bytes for 0/180 degree rotation
* pixels for 90/270 degree rotation
*/
- unsigned int stride;
+ unsigned int mapping_stride;
+ unsigned int scanout_stride;
} color_plane[4];
};
@@ -194,10 +206,6 @@ struct intel_encoder {
void (*update_complete)(struct intel_atomic_state *,
struct intel_encoder *,
struct intel_crtc *);
- void (*pre_disable)(struct intel_atomic_state *,
- struct intel_encoder *,
- const struct intel_crtc_state *,
- const struct drm_connector_state *);
void (*disable)(struct intel_atomic_state *,
struct intel_encoder *,
const struct intel_crtc_state *,
@@ -687,6 +695,8 @@ struct intel_plane_state {
/* Clear Color Value */
u64 ccval;
+
+ const char *no_fbc_reason;
};
struct intel_initial_plane_config {
@@ -949,7 +959,6 @@ struct intel_crtc_state {
* accordingly.
*/
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
-#define PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE (1<<1) /* bigjoiner slave, partial readout */
unsigned long quirks;
unsigned fb_bits; /* framebuffers to flip */
@@ -1118,8 +1127,6 @@ struct intel_crtc_state {
bool crc_enabled;
- bool enable_fbc;
-
bool double_wide;
int pbn;
@@ -1241,6 +1248,9 @@ struct intel_crtc_state {
u8 link_count;
u8 pixel_overlap;
} splitter;
+
+ /* for loading single buffered registers during vblank */
+ struct drm_vblank_work vblank_work;
};
enum intel_pipe_crc_source {
@@ -1325,6 +1335,9 @@ struct intel_crtc {
/* scalers available on this crtc */
int num_scalers;
+ /* for loading single buffered registers during vblank */
+ struct pm_qos_request vblank_pm_qos;
+
#ifdef CONFIG_DEBUG_FS
struct intel_pipe_crc pipe_crc;
#endif
@@ -1335,8 +1348,6 @@ struct intel_plane {
enum i9xx_plane_id i9xx_plane;
enum plane_id id;
enum pipe pipe;
- bool has_fbc;
- bool has_ccs;
bool need_async_flip_disable_wa;
u32 frontbuffer_bit;
@@ -1344,6 +1355,8 @@ struct intel_plane {
u32 base, cntl, size;
} cursor;
+ struct intel_fbc *fbc;
+
/*
* NOTE: Do not place new plane state fields here (e.g., when adding
* new plane properties). New runtime state should now be placed in
@@ -1362,11 +1375,17 @@ struct intel_plane {
unsigned int (*max_stride)(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
- void (*update_plane)(struct intel_plane *plane,
+ /* Write all non-self arming plane registers */
+ void (*update_noarm)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
- void (*disable_plane)(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state);
+ /* Write all self-arming plane registers */
+ void (*update_arm)(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+ /* Disable the plane, must arm */
+ void (*disable_arm)(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
int (*check_plane)(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
@@ -1563,6 +1582,8 @@ struct intel_dp {
int num_sink_rates;
int sink_rates[DP_MAX_SUPPORTED_RATES];
bool use_rate_select;
+ /* Max sink lane count as reported by DP_MAX_LANE_COUNT */
+ int max_sink_lane_count;
/* intersection of source and sink rates */
int num_common_rates;
int common_rates[DP_MAX_SUPPORTED_RATES];
@@ -1760,35 +1781,6 @@ vlv_pipe_to_channel(enum pipe pipe)
}
}
-static inline bool intel_pipe_valid(struct drm_i915_private *i915, enum pipe pipe)
-{
- return (pipe >= 0 &&
- pipe < ARRAY_SIZE(i915->pipe_to_crtc_mapping) &&
- INTEL_INFO(i915)->pipe_mask & BIT(pipe) &&
- i915->pipe_to_crtc_mapping[pipe]);
-}
-
-static inline struct intel_crtc *
-intel_get_first_crtc(struct drm_i915_private *dev_priv)
-{
- return to_intel_crtc(drm_crtc_from_index(&dev_priv->drm, 0));
-}
-
-static inline struct intel_crtc *
-intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- /* pipe_to_crtc_mapping may have hole on any of 3 display pipe system */
- drm_WARN_ON(&dev_priv->drm,
- !(INTEL_INFO(dev_priv)->pipe_mask & BIT(pipe)));
- return dev_priv->pipe_to_crtc_mapping[pipe];
-}
-
-static inline struct intel_crtc *
-intel_get_crtc_for_plane(struct drm_i915_private *dev_priv, enum i9xx_plane_id plane)
-{
- return dev_priv->plane_to_crtc_mapping[plane];
-}
-
struct intel_load_detect_pipe {
struct drm_atomic_state *restore_state;
};
@@ -1898,11 +1890,7 @@ dp_to_lspcon(struct intel_dp *intel_dp)
return &dp_to_dig_port(intel_dp)->lspcon;
}
-static inline struct drm_i915_private *
-dp_to_i915(struct intel_dp *intel_dp)
-{
- return to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
-}
+#define dp_to_i915(__intel_dp) to_i915(dp_to_dig_port(__intel_dp)->base.base.dev)
#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
(intel_dp)->psr.source_support)
@@ -2006,33 +1994,6 @@ intel_crtc_needs_modeset(const struct intel_crtc_state *crtc_state)
return drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
}
-static inline void
-intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-
- drm_crtc_wait_one_vblank(&crtc->base);
-}
-
-static inline void
-intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- const struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-
- if (crtc->active)
- intel_wait_for_vblank(dev_priv, pipe);
-}
-
-static inline bool intel_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier)
-{
- return DISPLAY_VER(i915) >= 13 && modifier != DRM_FORMAT_MOD_LINEAR;
-}
-
-static inline bool intel_fb_uses_dpt(const struct drm_framebuffer *fb)
-{
- return fb && intel_modifier_uses_dpt(to_i915(fb->dev), fb->modifier);
-}
-
static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state)
{
return i915_ggtt_offset(plane_state->ggtt_vma);
@@ -2044,20 +2005,4 @@ to_intel_frontbuffer(struct drm_framebuffer *fb)
return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
}
-static inline bool is_ccs_modifier(u64 modifier)
-{
- return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
- modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
- modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
- modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
-}
-
-static inline bool is_gen12_ccs_modifier(u64 modifier)
-{
- return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
- modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
- modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
-}
-
#endif /* __INTEL_DISPLAY_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index aef69522f0be..a69b28d65a9b 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -45,8 +45,10 @@
#define GEN12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
-#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 12)
-#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 12)
+#define GEN13_DMC_MAX_FW_SIZE 0x20000
+
+#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 14)
+#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 14)
MODULE_FIRMWARE(ADLP_DMC_PATH);
#define ADLS_DMC_PATH DMC_PATH(adls, 2, 01)
@@ -682,7 +684,7 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
if (IS_ALDERLAKE_P(dev_priv)) {
dmc->fw_path = ADLP_DMC_PATH;
dmc->required_version = ADLP_DMC_VERSION_REQUIRED;
- dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE;
+ dmc->max_fw_size = GEN13_DMC_MAX_FW_SIZE;
} else if (IS_ALDERLAKE_S(dev_priv)) {
dmc->fw_path = ADLS_DMC_PATH;
dmc->required_version = ADLS_DMC_VERSION_REQUIRED;
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h
index c3c00ff03869..b20f3441ca60 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc.h
@@ -20,6 +20,8 @@ enum {
DMC_FW_MAIN = 0,
DMC_FW_PIPEA,
DMC_FW_PIPEB,
+ DMC_FW_PIPEC,
+ DMC_FW_PIPED,
DMC_FW_MAX
};
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index a552f05a67e5..b5e2508db1cf 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -47,6 +47,7 @@
#include "intel_audio.h"
#include "intel_backlight.h"
#include "intel_connector.h"
+#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -128,7 +129,7 @@ static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp)
}
/* update sink rates from dpcd */
-static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
+static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp)
{
static const int dp_rates[] = {
162000, 270000, 540000, 810000
@@ -198,6 +199,54 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
intel_dp->num_sink_rates = i;
}
+static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
+{
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &intel_dig_port->base;
+
+ intel_dp_set_dpcd_sink_rates(intel_dp);
+
+ if (intel_dp->num_sink_rates)
+ return;
+
+ drm_err(&dp_to_i915(intel_dp)->drm,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD with no link rates, using defaults\n",
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name);
+
+ intel_dp_set_default_sink_rates(intel_dp);
+}
+
+static void intel_dp_set_default_max_sink_lane_count(struct intel_dp *intel_dp)
+{
+ intel_dp->max_sink_lane_count = 1;
+}
+
+static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp)
+{
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &intel_dig_port->base;
+
+ intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+
+ switch (intel_dp->max_sink_lane_count) {
+ case 1:
+ case 2:
+ case 4:
+ return;
+ }
+
+ drm_err(&dp_to_i915(intel_dp)->drm,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD max lane count (%d), using default\n",
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name,
+ intel_dp->max_sink_lane_count);
+
+ intel_dp_set_default_max_sink_lane_count(intel_dp);
+}
+
/* Get length of rates array potentially limited by max_rate. */
static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
{
@@ -220,10 +269,19 @@ static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
intel_dp->num_common_rates, max_rate);
}
+static int intel_dp_common_rate(struct intel_dp *intel_dp, int index)
+{
+ if (drm_WARN_ON(&dp_to_i915(intel_dp)->drm,
+ index < 0 || index >= intel_dp->num_common_rates))
+ return 162000;
+
+ return intel_dp->common_rates[index];
+}
+
/* Theoretical max between source and sink */
static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
{
- return intel_dp->common_rates[intel_dp->num_common_rates - 1];
+ return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1);
}
/* Theoretical max between source and sink */
@@ -231,7 +289,7 @@ static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
int source_max = dig_port->max_lanes;
- int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
+ int sink_max = intel_dp->max_sink_lane_count;
int fia_max = intel_tc_port_fia_max_lane_count(dig_port);
int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps);
@@ -243,7 +301,15 @@ static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
int intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
- return intel_dp->max_link_lane_count;
+ switch (intel_dp->max_link_lane_count) {
+ case 1:
+ case 2:
+ case 4:
+ return intel_dp->max_link_lane_count;
+ default:
+ MISSING_CASE(intel_dp->max_link_lane_count);
+ return 1;
+ }
}
/*
@@ -555,13 +621,13 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
if (index > 0) {
if (intel_dp_is_edp(intel_dp) &&
!intel_dp_can_link_train_fallback_for_edp(intel_dp,
- intel_dp->common_rates[index - 1],
+ intel_dp_common_rate(intel_dp, index - 1),
lane_count)) {
drm_dbg_kms(&i915->drm,
"Retrying Link training for eDP with same parameters\n");
return 0;
}
- intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
+ intel_dp->max_link_rate = intel_dp_common_rate(intel_dp, index - 1);
intel_dp->max_link_lane_count = lane_count;
} else if (lane_count > 1) {
if (intel_dp_is_edp(intel_dp) &&
@@ -1001,14 +1067,11 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
int
intel_dp_max_link_rate(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int len;
len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
- if (drm_WARN_ON(&i915->drm, len <= 0))
- return 162000;
- return intel_dp->common_rates[len - 1];
+ return intel_dp_common_rate(intel_dp, len - 1);
}
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
@@ -1205,7 +1268,7 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
output_bpp);
for (i = 0; i < intel_dp->num_common_rates; i++) {
- link_rate = intel_dp->common_rates[i];
+ link_rate = intel_dp_common_rate(intel_dp, i);
if (link_rate < limits->min_rate ||
link_rate > limits->max_rate)
continue;
@@ -1284,7 +1347,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
else
vdsc_cfg->slice_height = 2;
- ret = intel_dsc_compute_params(encoder, crtc_state);
+ ret = intel_dsc_compute_params(crtc_state);
if (ret)
return ret;
@@ -1453,17 +1516,10 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
&pipe_config->hw.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct link_config_limits limits;
- int common_len;
int ret;
- common_len = intel_dp_common_len_rate_limit(intel_dp,
- intel_dp->max_link_rate);
-
- /* No common link rates between source and sink */
- drm_WARN_ON(encoder->base.dev, common_len <= 0);
-
- limits.min_rate = intel_dp->common_rates[0];
- limits.max_rate = intel_dp->common_rates[common_len - 1];
+ limits.min_rate = intel_dp_common_rate(intel_dp, 0);
+ limits.max_rate = intel_dp_max_link_rate(intel_dp);
limits.min_lane_count = 1;
limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
@@ -2154,6 +2210,18 @@ static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp)
return max_frl_rate;
}
+static bool
+intel_dp_pcon_is_frl_trained(struct intel_dp *intel_dp,
+ u8 max_frl_bw_mask, u8 *frl_trained_mask)
+{
+ if (drm_dp_pcon_hdmi_link_active(&intel_dp->aux) &&
+ drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, frl_trained_mask) == DP_PCON_HDMI_MODE_FRL &&
+ *frl_trained_mask >= max_frl_bw_mask)
+ return true;
+
+ return false;
+}
+
static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
{
#define TIMEOUT_FRL_READY_MS 500
@@ -2164,10 +2232,6 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
u8 max_frl_bw_mask = 0, frl_trained_mask;
bool is_active;
- ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
- if (ret < 0)
- return ret;
-
max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
@@ -2179,6 +2243,12 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
if (max_frl_bw <= 0)
return -EINVAL;
+ max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
+ drm_dbg(&i915->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask);
+
+ if (intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask))
+ goto frl_trained;
+
ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false);
if (ret < 0)
return ret;
@@ -2188,7 +2258,6 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
if (!is_active)
return -ETIMEDOUT;
- max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw,
DP_PCON_ENABLE_SEQUENTIAL_LINK);
if (ret < 0)
@@ -2204,19 +2273,15 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
* Wait for FRL to be completed
* Check if the HDMI Link is up and active.
*/
- wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS);
+ wait_for(is_active =
+ intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask),
+ TIMEOUT_HDMI_LINK_ACTIVE_MS);
if (!is_active)
return -ETIMEDOUT;
- /* Verify HDMI Link configuration shows FRL Mode */
- if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) !=
- DP_PCON_HDMI_MODE_FRL) {
- drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n");
- return -EINVAL;
- }
- drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask);
-
+frl_trained:
+ drm_dbg(&i915->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask);
intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask);
intel_dp->frl.is_trained = true;
drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps);
@@ -2234,6 +2299,28 @@ static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp)
return false;
}
+static
+int intel_dp_pcon_set_tmds_mode(struct intel_dp *intel_dp)
+{
+ int ret;
+ u8 buf = 0;
+
+ /* Set PCON source control mode */
+ buf |= DP_PCON_ENABLE_SOURCE_CTL_MODE;
+
+ ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
+ if (ret < 0)
+ return ret;
+
+ /* Set HDMI LINK ENABLE */
+ buf |= DP_PCON_ENABLE_HDMI_LINK;
+ ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
void intel_dp_check_frl_training(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -2252,7 +2339,7 @@ void intel_dp_check_frl_training(struct intel_dp *intel_dp)
int ret, mode;
drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n");
- ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
+ ret = intel_dp_pcon_set_tmds_mode(intel_dp);
mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS)
@@ -2614,6 +2701,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
intel_dp->use_rate_select = true;
else
intel_dp_set_sink_rates(intel_dp);
+ intel_dp_set_max_sink_lane_count(intel_dp);
intel_dp_set_common_rates(intel_dp);
intel_dp_reset_max_link_params(intel_dp);
@@ -2659,6 +2747,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
drm_dp_is_branch(intel_dp->dpcd));
intel_dp_set_sink_rates(intel_dp);
+ intel_dp_set_max_sink_lane_count(intel_dp);
intel_dp_set_common_rates(intel_dp);
}
@@ -3817,7 +3906,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
to_intel_crtc_state(crtc->base.state);
/* Keep underrun reporting disabled until things are stable */
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
if (crtc_state->has_pch_encoder)
@@ -4971,7 +5060,7 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
DRM_MODE_LINK_STATUS_BAD);
mutex_unlock(&connector->dev->mode_config.mutex);
/* Send Hotplug uevent so userspace can reprobe */
- drm_kms_helper_hotplug_event(connector->dev);
+ drm_kms_helper_connector_hotplug_event(connector);
}
bool
@@ -5025,6 +5114,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_dp_set_source_rates(intel_dp);
intel_dp_set_default_sink_rates(intel_dp);
+ intel_dp_set_default_max_sink_lane_count(intel_dp);
intel_dp_set_common_rates(intel_dp);
intel_dp_reset_max_link_params(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 3897468140e0..97cf3cac0105 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -34,6 +34,7 @@
* for some reason.
*/
+#include "i915_drv.h"
#include "intel_backlight.h"
#include "intel_display_types.h"
#include "intel_dp.h"
@@ -287,6 +288,12 @@ intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state, u3
struct intel_panel *panel = &connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ if (!panel->backlight.edp.vesa.info.aux_set) {
+ const u32 pwm_level = intel_backlight_level_to_pwm(connector, level);
+
+ intel_backlight_set_pwm_level(conn_state, pwm_level);
+ }
+
drm_edp_backlight_set_level(&intel_dp->aux, &panel->backlight.edp.vesa.info, level);
}
@@ -299,8 +306,13 @@ intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
if (!panel->backlight.edp.vesa.info.aux_enable) {
- u32 pwm_level = intel_backlight_invert_pwm_level(connector,
- panel->backlight.pwm_level_max);
+ u32 pwm_level;
+
+ if (!panel->backlight.edp.vesa.info.aux_set)
+ pwm_level = intel_backlight_level_to_pwm(connector, level);
+ else
+ pwm_level = intel_backlight_invert_pwm_level(connector,
+ panel->backlight.pwm_level_max);
panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level);
}
@@ -337,7 +349,7 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
if (ret < 0)
return ret;
- if (!panel->backlight.edp.vesa.info.aux_enable) {
+ if (!panel->backlight.edp.vesa.info.aux_set || !panel->backlight.edp.vesa.info.aux_enable) {
ret = panel->backlight.pwm_funcs->setup(connector, pipe);
if (ret < 0) {
drm_err(&i915->drm,
@@ -346,14 +358,27 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
return ret;
}
}
- panel->backlight.max = panel->backlight.edp.vesa.info.max;
- panel->backlight.min = 0;
- if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
- panel->backlight.level = current_level;
- panel->backlight.enabled = panel->backlight.level != 0;
+
+ if (panel->backlight.edp.vesa.info.aux_set) {
+ panel->backlight.max = panel->backlight.edp.vesa.info.max;
+ panel->backlight.min = 0;
+ if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
+ panel->backlight.level = current_level;
+ panel->backlight.enabled = panel->backlight.level != 0;
+ } else {
+ panel->backlight.level = panel->backlight.max;
+ panel->backlight.enabled = false;
+ }
} else {
- panel->backlight.level = panel->backlight.max;
- panel->backlight.enabled = false;
+ panel->backlight.max = panel->backlight.pwm_level_max;
+ panel->backlight.min = panel->backlight.pwm_level_min;
+ if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_PWM) {
+ panel->backlight.level = panel->backlight.pwm_funcs->get(connector, pipe);
+ panel->backlight.enabled = panel->backlight.pwm_enabled;
+ } else {
+ panel->backlight.level = panel->backlight.max;
+ panel->backlight.enabled = false;
+ }
}
return 0;
@@ -437,11 +462,17 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
}
/*
- * A lot of eDP panels in the wild will report supporting both the
- * Intel proprietary backlight control interface, and the VESA
- * backlight control interface. Many of these panels are liars though,
- * and will only work with the Intel interface. So, always probe for
- * that first.
+ * Since Intel has their own backlight control interface, the majority of machines out there
+ * using DPCD backlight controls with Intel GPUs will be using this interface as opposed to
+ * the VESA interface. However, other GPUs (such as Nvidia's) will always use the VESA
+ * interface. This means that there's quite a number of panels out there that will advertise
+ * support for both interfaces, primarily systems with Intel/Nvidia hybrid GPU setups.
+ *
+ * There's a catch to this though: on many panels that advertise support for both
+ * interfaces, the VESA backlight interface will stop working once we've programmed the
+ * panel with Intel's OUI - which is also required for us to be able to detect Intel's
+ * backlight interface at all. This means that the only sensible way for us to detect both
+ * interfaces is to probe for Intel's first, and VESA's second.
*/
if (try_intel_interface && intel_dp_aux_supports_hdr_backlight(connector)) {
drm_dbg_kms(dev, "Using Intel proprietary eDP backlight controls\n");
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 85676c953e0a..9451f336f28f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -21,11 +21,11 @@
* IN THE SOFTWARE.
*/
+#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
-
static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
{
memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps));
@@ -301,7 +301,10 @@ static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
static bool has_per_lane_signal_levels(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
- return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) ||
+ DISPLAY_VER(i915) >= 11;
}
/* 128b/132b */
@@ -683,15 +686,6 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
return true;
}
-static void intel_dp_link_training_clock_recovery_delay(struct intel_dp *intel_dp,
- enum drm_dp_phy dp_phy)
-{
- if (dp_phy == DP_PHY_DPRX)
- drm_dp_link_train_clock_recovery_delay(&intel_dp->aux, intel_dp->dpcd);
- else
- drm_dp_lttpr_link_train_clock_recovery_delay();
-}
-
static bool intel_dp_adjust_request_changed(const struct intel_crtc_state *crtc_state,
const u8 old_link_status[DP_LINK_STATUS_SIZE],
const u8 new_link_status[DP_LINK_STATUS_SIZE])
@@ -750,6 +744,11 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
u8 link_status[DP_LINK_STATUS_SIZE];
bool max_vswing_reached = false;
char phy_name[10];
+ int delay_us;
+
+ delay_us = drm_dp_read_clock_recovery_delay(&intel_dp->aux,
+ intel_dp->dpcd, dp_phy,
+ intel_dp_is_uhbr(crtc_state));
intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
@@ -777,7 +776,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
voltage_tries = 1;
for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
- intel_dp_link_training_clock_recovery_delay(intel_dp, dp_phy);
+ usleep_range(delay_us, 2 * delay_us);
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
link_status) < 0) {
@@ -895,19 +894,6 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
return DP_TRAINING_PATTERN_2;
}
-static void
-intel_dp_link_training_channel_equalization_delay(struct intel_dp *intel_dp,
- enum drm_dp_phy dp_phy)
-{
- if (dp_phy == DP_PHY_DPRX) {
- drm_dp_link_train_channel_eq_delay(&intel_dp->aux, intel_dp->dpcd);
- } else {
- const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
-
- drm_dp_lttpr_link_train_channel_eq_delay(&intel_dp->aux, phy_caps);
- }
-}
-
/*
* Perform the link training channel equalization phase on the given DP PHY
* using one of training pattern 2, 3 or 4 depending on the source and
@@ -925,6 +911,11 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
u8 link_status[DP_LINK_STATUS_SIZE];
bool channel_eq = false;
char phy_name[10];
+ int delay_us;
+
+ delay_us = drm_dp_read_channel_eq_delay(&intel_dp->aux,
+ intel_dp->dpcd, dp_phy,
+ intel_dp_is_uhbr(crtc_state));
intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
@@ -944,8 +935,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
}
for (tries = 0; tries < 5; tries++) {
- intel_dp_link_training_channel_equalization_delay(intel_dp,
- dp_phy);
+ usleep_range(delay_us, 2 * delay_us);
+
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
link_status) < 0) {
drm_err(&i915->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 89d701e8ae9d..b8bc7d397c81 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -231,6 +231,7 @@ intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector,
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct drm_connector_list_iter connector_list_iter;
struct intel_connector *connector_iter;
+ int ret = 0;
if (DISPLAY_VER(dev_priv) < 12)
return 0;
@@ -243,7 +244,6 @@ intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector,
struct intel_digital_connector_state *conn_iter_state;
struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
- int ret;
if (connector_iter->mst_port != connector->mst_port ||
connector_iter == connector)
@@ -252,8 +252,8 @@ intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector,
conn_iter_state = intel_atomic_get_digital_connector_state(state,
connector_iter);
if (IS_ERR(conn_iter_state)) {
- drm_connector_list_iter_end(&connector_list_iter);
- return PTR_ERR(conn_iter_state);
+ ret = PTR_ERR(conn_iter_state);
+ break;
}
if (!conn_iter_state->base.crtc)
@@ -262,20 +262,18 @@ intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector,
crtc = to_intel_crtc(conn_iter_state->base.crtc);
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
if (IS_ERR(crtc_state)) {
- drm_connector_list_iter_end(&connector_list_iter);
- return PTR_ERR(crtc_state);
+ ret = PTR_ERR(crtc_state);
+ break;
}
ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
- if (ret) {
- drm_connector_list_iter_end(&connector_list_iter);
- return ret;
- }
+ if (ret)
+ break;
crtc_state->uapi.mode_changed = true;
}
drm_connector_list_iter_end(&connector_list_iter);
- return 0;
+ return ret;
}
static int
@@ -348,16 +346,6 @@ static void wait_for_act_sent(struct intel_encoder *encoder,
drm_dp_check_act_status(&intel_dp->mst_mgr);
}
-static void intel_mst_pre_disable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder, old_crtc_state,
- old_conn_state);
-}
-
static void intel_mst_disable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
@@ -382,6 +370,9 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
if (ret) {
drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret);
}
+ if (old_crtc_state->has_audio)
+ intel_audio_codec_disable(encoder,
+ old_crtc_state, old_conn_state);
}
static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
@@ -916,7 +907,6 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe
intel_encoder->compute_config = intel_dp_mst_compute_config;
intel_encoder->compute_config_late = intel_dp_mst_compute_config_late;
- intel_encoder->pre_disable = intel_mst_pre_disable_dp;
intel_encoder->disable = intel_mst_disable_dp;
intel_encoder->post_disable = intel_mst_post_disable_dp;
intel_encoder->update_pipe = intel_ddi_update_pipe;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 04a7af8340ca..1ce0c171f4fb 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -1823,7 +1823,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
const struct dpll *dpll)
{
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
struct intel_crtc_state *crtc_state;
crtc_state = intel_crtc_state_alloc(crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 0a7e04db04be..fc8fda77483a 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -26,6 +26,7 @@
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_dpll_mgr.h"
+#include "intel_pch_refclk.h"
#include "intel_tc.h"
/**
@@ -3740,7 +3741,7 @@ static void combo_pll_enable(struct drm_i915_private *dev_priv,
* domain.
*/
pll->wakeref = intel_display_power_get(dev_priv,
- POWER_DOMAIN_DPLL_DC_OFF);
+ POWER_DOMAIN_DC_OFF);
}
icl_pll_power_enable(dev_priv, pll, enable_reg);
@@ -3847,7 +3848,7 @@ static void combo_pll_disable(struct drm_i915_private *dev_priv,
if (IS_JSL_EHL(dev_priv) &&
pll->info->id == DPLL_ID_EHL_DPLL4)
- intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
pll->wakeref);
}
@@ -4231,7 +4232,7 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915,
if (IS_JSL_EHL(i915) && pll->on &&
pll->info->id == DPLL_ID_EHL_DPLL4) {
pll->wakeref = intel_display_power_get(i915,
- POWER_DOMAIN_DPLL_DC_OFF);
+ POWER_DOMAIN_DC_OFF);
}
pll->state.pipe_mask = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 2f59d863be4c..ef2889753807 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -27,7 +27,6 @@
#include <linux/types.h>
-#include "intel_display.h"
#include "intel_wakeref.h"
/*FIXME: Move this to a more appropriate place. */
@@ -37,6 +36,7 @@
(void) (&__a == &__b); \
__a > __b ? (__a - __b) : (__b - __a); })
+enum tc_port;
struct drm_device;
struct drm_i915_private;
struct intel_atomic_state;
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 8f7b1f7534a4..8f674745e7e0 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -167,6 +167,64 @@ void intel_dpt_unpin(struct i915_address_space *vm)
i915_vma_put(dpt->vma);
}
+/**
+ * intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume
+ * @i915: device instance
+ *
+ * Restore the memory mapping during system resume for all framebuffers which
+ * are mapped to HW via a GGTT->DPT page table. The content of these page
+ * tables are not stored in the hibernation image during S4 and S3RST->S4
+ * transitions, so here we reprogram the PTE entries in those tables.
+ *
+ * This function must be called after the mappings in GGTT have been restored calling
+ * i915_ggtt_resume().
+ */
+void intel_dpt_resume(struct drm_i915_private *i915)
+{
+ struct drm_framebuffer *drm_fb;
+
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ mutex_lock(&i915->drm.mode_config.fb_lock);
+ drm_for_each_fb(drm_fb, &i915->drm) {
+ struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
+
+ if (fb->dpt_vm)
+ i915_ggtt_resume_vm(fb->dpt_vm);
+ }
+ mutex_unlock(&i915->drm.mode_config.fb_lock);
+}
+
+/**
+ * intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend
+ * @i915: device instance
+ *
+ * Suspend the memory mapping during system suspend for all framebuffers which
+ * are mapped to HW via a GGTT->DPT page table.
+ *
+ * This function must be called before the mappings in GGTT are suspended calling
+ * i915_ggtt_suspend().
+ */
+void intel_dpt_suspend(struct drm_i915_private *i915)
+{
+ struct drm_framebuffer *drm_fb;
+
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ mutex_lock(&i915->drm.mode_config.fb_lock);
+
+ drm_for_each_fb(drm_fb, &i915->drm) {
+ struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
+
+ if (fb->dpt_vm)
+ i915_ggtt_suspend_vm(fb->dpt_vm);
+ }
+
+ mutex_unlock(&i915->drm.mode_config.fb_lock);
+}
+
struct i915_address_space *
intel_dpt_create(struct intel_framebuffer *fb)
{
@@ -206,7 +264,7 @@ intel_dpt_create(struct intel_framebuffer *fb)
vm = &dpt->vm;
- vm->gt = &i915->gt;
+ vm->gt = to_gt(i915);
vm->i915 = i915;
vm->dma = i915->drm.dev;
vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
@@ -221,8 +279,6 @@ intel_dpt_create(struct intel_framebuffer *fb)
vm->vma_ops.bind_vma = dpt_bind_vma;
vm->vma_ops.unbind_vma = dpt_unbind_vma;
- vm->vma_ops.set_pages = ggtt_set_pages;
- vm->vma_ops.clear_pages = clear_pages;
vm->pte_encode = gen8_ggtt_pte_encode;
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.h b/drivers/gpu/drm/i915/display/intel_dpt.h
index 45142b8f849f..e18a9f767b11 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.h
+++ b/drivers/gpu/drm/i915/display/intel_dpt.h
@@ -6,6 +6,8 @@
#ifndef __INTEL_DPT_H__
#define __INTEL_DPT_H__
+struct drm_i915_private;
+
struct i915_address_space;
struct i915_vma;
struct intel_framebuffer;
@@ -13,6 +15,8 @@ struct intel_framebuffer;
void intel_dpt_destroy(struct i915_address_space *vm);
struct i915_vma *intel_dpt_pin(struct i915_address_space *vm);
void intel_dpt_unpin(struct i915_address_space *vm);
+void intel_dpt_suspend(struct drm_i915_private *i915);
+void intel_dpt_resume(struct drm_i915_private *i915);
struct i915_address_space *
intel_dpt_create(struct intel_framebuffer *fb);
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 62a8a69f9f5d..83a69a4a4fea 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -100,7 +100,7 @@ void intel_dsb_indexed_reg_write(const struct intel_crtc_state *crtc_state,
u32 reg_val;
if (!dsb) {
- intel_de_write(dev_priv, reg, val);
+ intel_de_write_fw(dev_priv, reg, val);
return;
}
buf = dsb->cmd_buf;
@@ -177,7 +177,7 @@ void intel_dsb_reg_write(const struct intel_crtc_state *crtc_state,
dsb = crtc_state->dsb;
if (!dsb) {
- intel_de_write(dev_priv, reg, val);
+ intel_de_write_fw(dev_priv, reg, val);
return;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index 6b0301ba046e..a50422e03a7e 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -4,6 +4,8 @@
*/
#include <drm/drm_mipi_dsi.h>
+
+#include "i915_drv.h"
#include "intel_dsi.h"
#include "intel_panel.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index fbc40ffdc02e..a3a906cb097e 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -166,57 +166,15 @@ static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder)
return enc_to_intel_dsi(encoder)->ports;
}
-/* icl_dsi.c */
-void icl_dsi_init(struct drm_i915_private *dev_priv);
-void icl_dsi_frame_update(struct intel_crtc_state *crtc_state);
-
-/* intel_dsi.c */
int intel_dsi_bitrate(const struct intel_dsi *intel_dsi);
int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi);
enum drm_panel_orientation
intel_dsi_get_panel_orientation(struct intel_connector *connector);
-
-/* vlv_dsi.c */
-void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
-enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
int intel_dsi_get_modes(struct drm_connector *connector);
enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode);
struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
const struct mipi_dsi_host_ops *funcs,
enum port port);
-void vlv_dsi_init(struct drm_i915_private *dev_priv);
-
-/* vlv_dsi_pll.c */
-int vlv_dsi_pll_compute(struct intel_encoder *encoder,
- struct intel_crtc_state *config);
-void vlv_dsi_pll_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *config);
-void vlv_dsi_pll_disable(struct intel_encoder *encoder);
-u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
- struct intel_crtc_state *config);
-void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
-
-bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
-int bxt_dsi_pll_compute(struct intel_encoder *encoder,
- struct intel_crtc_state *config);
-void bxt_dsi_pll_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *config);
-void bxt_dsi_pll_disable(struct intel_encoder *encoder);
-u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
- struct intel_crtc_state *config);
-void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
-
-void assert_dsi_pll_enabled(struct drm_i915_private *i915);
-void assert_dsi_pll_disabled(struct drm_i915_private *i915);
-
-/* intel_dsi_vbt.c */
-bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
-void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on);
-void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi);
-void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
- enum mipi_seq seq_id);
-void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
-void intel_dsi_log_params(struct intel_dsi *intel_dsi);
#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index f61ed82e8867..7d234429e71e 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -71,6 +71,7 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32
u8 data[2] = {};
enum port port;
size_t len = panel->backlight.max > U8_MAX ? 2 : 1;
+ unsigned long mode_flags;
if (len == 1) {
data[0] = level;
@@ -81,8 +82,11 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32
for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
dsi_device = intel_dsi->dsi_hosts[port]->device;
+ mode_flags = dsi_device->mode_flags;
+ dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM;
mipi_dsi_dcs_write(dsi_device, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
&data, len);
+ dsi_device->mode_flags = mode_flags;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index f241bedb8597..0da91849efde 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -41,6 +41,8 @@
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
+#include "intel_dsi_vbt.h"
+#include "vlv_dsi.h"
#include "vlv_sideband.h"
#define MIPI_TRANSFER_MODE_SHIFT 0
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.h b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h
new file mode 100644
index 000000000000..dc642c1fe7ef
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_DSI_VBT_H__
+#define __INTEL_DSI_VBT_H__
+
+#include <linux/types.h>
+
+enum mipi_seq;
+struct intel_dsi;
+
+bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
+void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on);
+void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi);
+void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
+ enum mipi_seq seq_id);
+void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
+void intel_dsi_log_params(struct intel_dsi *intel_dsi);
+
+#endif /* __INTEL_DSI_VBT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index cb511b2b7069..23cfe2e5ce2a 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -6,6 +6,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_modeset_helper.h>
+#include "i915_drv.h"
#include "intel_display.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
@@ -13,26 +14,465 @@
#define check_array_bounds(i915, a, i) drm_WARN_ON(&(i915)->drm, (i) >= ARRAY_SIZE(a))
-bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
+/*
+ * From the Sky Lake PRM:
+ * "The Color Control Surface (CCS) contains the compression status of
+ * the cache-line pairs. The compression state of the cache-line pair
+ * is specified by 2 bits in the CCS. Each CCS cache-line represents
+ * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
+ * cache-line-pairs. CCS is always Y tiled."
+ *
+ * Since cache line pairs refers to horizontally adjacent cache lines,
+ * each cache line in the CCS corresponds to an area of 32x16 cache
+ * lines on the main surface. Since each pixel is 4 bytes, this gives
+ * us a ratio of one byte in the CCS for each 8x16 pixels in the
+ * main surface.
+ */
+static const struct drm_format_info skl_ccs_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
+ .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
+ .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
+ .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
+ .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
+};
+
+/*
+ * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
+ * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
+ * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
+ * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
+ * the main surface.
+ */
+static const struct drm_format_info gen12_ccs_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_YUYV, .num_planes = 2,
+ .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YVYU, .num_planes = 2,
+ .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_UYVY, .num_planes = 2,
+ .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_VYUY, .num_planes = 2,
+ .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_XYUV8888, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_NV12, .num_planes = 4,
+ .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_P010, .num_planes = 4,
+ .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_P012, .num_planes = 4,
+ .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_P016, .num_planes = 4,
+ .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
+};
+
+/*
+ * Same as gen12_ccs_formats[] above, but with additional surface used
+ * to pass Clear Color information in plane 2 with 64 bits of data.
+ */
+static const struct drm_format_info gen12_ccs_cc_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+};
+
+struct intel_modifier_desc {
+ u64 modifier;
+ struct {
+ u8 from;
+ u8 until;
+ } display_ver;
+#define DISPLAY_VER_ALL { 0, -1 }
+
+ const struct drm_format_info *formats;
+ int format_count;
+#define FORMAT_OVERRIDE(format_list) \
+ .formats = format_list, \
+ .format_count = ARRAY_SIZE(format_list)
+
+ u8 plane_caps;
+
+ struct {
+ u8 cc_planes:3;
+ u8 packed_aux_planes:4;
+ u8 planar_aux_planes:4;
+ } ccs;
+};
+
+#define INTEL_PLANE_CAP_CCS_MASK (INTEL_PLANE_CAP_CCS_RC | \
+ INTEL_PLANE_CAP_CCS_RC_CC | \
+ INTEL_PLANE_CAP_CCS_MC)
+#define INTEL_PLANE_CAP_TILING_MASK (INTEL_PLANE_CAP_TILING_X | \
+ INTEL_PLANE_CAP_TILING_Y | \
+ INTEL_PLANE_CAP_TILING_Yf)
+#define INTEL_PLANE_CAP_TILING_NONE 0
+
+static const struct intel_modifier_desc intel_modifiers[] = {
+ {
+ .modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
+ .display_ver = { 12, 13 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_MC,
+
+ .ccs.packed_aux_planes = BIT(1),
+ .ccs.planar_aux_planes = BIT(2) | BIT(3),
+
+ FORMAT_OVERRIDE(gen12_ccs_formats),
+ }, {
+ .modifier = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
+ .display_ver = { 12, 13 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC,
+
+ .ccs.packed_aux_planes = BIT(1),
+
+ FORMAT_OVERRIDE(gen12_ccs_formats),
+ }, {
+ .modifier = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
+ .display_ver = { 12, 13 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC_CC,
+
+ .ccs.cc_planes = BIT(2),
+ .ccs.packed_aux_planes = BIT(1),
+
+ FORMAT_OVERRIDE(gen12_ccs_cc_formats),
+ }, {
+ .modifier = I915_FORMAT_MOD_Yf_TILED_CCS,
+ .display_ver = { 9, 11 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_Yf | INTEL_PLANE_CAP_CCS_RC,
+
+ .ccs.packed_aux_planes = BIT(1),
+
+ FORMAT_OVERRIDE(skl_ccs_formats),
+ }, {
+ .modifier = I915_FORMAT_MOD_Y_TILED_CCS,
+ .display_ver = { 9, 11 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC,
+
+ .ccs.packed_aux_planes = BIT(1),
+
+ FORMAT_OVERRIDE(skl_ccs_formats),
+ }, {
+ .modifier = I915_FORMAT_MOD_Yf_TILED,
+ .display_ver = { 9, 11 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_Yf,
+ }, {
+ .modifier = I915_FORMAT_MOD_Y_TILED,
+ .display_ver = { 9, 13 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_Y,
+ }, {
+ .modifier = I915_FORMAT_MOD_X_TILED,
+ .display_ver = DISPLAY_VER_ALL,
+ .plane_caps = INTEL_PLANE_CAP_TILING_X,
+ }, {
+ .modifier = DRM_FORMAT_MOD_LINEAR,
+ .display_ver = DISPLAY_VER_ALL,
+ },
+};
+
+static const struct intel_modifier_desc *lookup_modifier_or_null(u64 modifier)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++)
+ if (intel_modifiers[i].modifier == modifier)
+ return &intel_modifiers[i];
+
+ return NULL;
+}
+
+static const struct intel_modifier_desc *lookup_modifier(u64 modifier)
+{
+ const struct intel_modifier_desc *md = lookup_modifier_or_null(modifier);
+
+ if (WARN_ON(!md))
+ return &intel_modifiers[0];
+
+ return md;
+}
+
+static const struct drm_format_info *
+lookup_format_info(const struct drm_format_info formats[],
+ int num_formats, u32 format)
+{
+ int i;
+
+ for (i = 0; i < num_formats; i++) {
+ if (formats[i].format == format)
+ return &formats[i];
+ }
+
+ return NULL;
+}
+
+/**
+ * intel_fb_get_format_info: Get a modifier specific format information
+ * @cmd: FB add command structure
+ *
+ * Returns:
+ * Returns the format information for @cmd->pixel_format specific to @cmd->modifier[0],
+ * or %NULL if the modifier doesn't override the format.
+ */
+const struct drm_format_info *
+intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
+{
+ const struct intel_modifier_desc *md = lookup_modifier_or_null(cmd->modifier[0]);
+
+ if (!md || !md->formats)
+ return NULL;
+
+ return lookup_format_info(md->formats, md->format_count, cmd->pixel_format);
+}
+
+static bool plane_caps_contain_any(u8 caps, u8 mask)
+{
+ return caps & mask;
+}
+
+static bool plane_caps_contain_all(u8 caps, u8 mask)
+{
+ return (caps & mask) == mask;
+}
+
+/**
+ * intel_fb_is_ccs_modifier: Check if a modifier is a CCS modifier type
+ * @modifier: Modifier to check
+ *
+ * Returns:
+ * Returns %true if @modifier is a render, render with color clear or
+ * media compression modifier.
+ */
+bool intel_fb_is_ccs_modifier(u64 modifier)
+{
+ return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps,
+ INTEL_PLANE_CAP_CCS_MASK);
+}
+
+/**
+ * intel_fb_is_rc_ccs_cc_modifier: Check if a modifier is an RC CCS CC modifier type
+ * @modifier: Modifier to check
+ *
+ * Returns:
+ * Returns %true if @modifier is a render with color clear modifier.
+ */
+bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier)
+{
+ return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps,
+ INTEL_PLANE_CAP_CCS_RC_CC);
+}
+
+/**
+ * intel_fb_is_mc_ccs_modifier: Check if a modifier is an MC CCS modifier type
+ * @modifier: Modifier to check
+ *
+ * Returns:
+ * Returns %true if @modifier is a media compression modifier.
+ */
+bool intel_fb_is_mc_ccs_modifier(u64 modifier)
+{
+ return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps,
+ INTEL_PLANE_CAP_CCS_MC);
+}
+
+static bool check_modifier_display_ver_range(const struct intel_modifier_desc *md,
+ u8 display_ver_from, u8 display_ver_until)
+{
+ return md->display_ver.from <= display_ver_until &&
+ display_ver_from <= md->display_ver.until;
+}
+
+static bool plane_has_modifier(struct drm_i915_private *i915,
+ u8 plane_caps,
+ const struct intel_modifier_desc *md)
+{
+ if (!IS_DISPLAY_VER(i915, md->display_ver.from, md->display_ver.until))
+ return false;
+
+ if (!plane_caps_contain_all(plane_caps, md->plane_caps))
+ return false;
+
+ return true;
+}
+
+/**
+ * intel_fb_plane_get_modifiers: Get the modifiers for the given platform and plane capabilities
+ * @i915: i915 device instance
+ * @plane_caps: capabilities for the plane the modifiers are queried for
+ *
+ * Returns:
+ * Returns the list of modifiers allowed by the @i915 platform and @plane_caps.
+ * The caller must free the returned buffer.
+ */
+u64 *intel_fb_plane_get_modifiers(struct drm_i915_private *i915,
+ u8 plane_caps)
+{
+ u64 *list, *p;
+ int count = 1; /* +1 for invalid modifier terminator */
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) {
+ if (plane_has_modifier(i915, plane_caps, &intel_modifiers[i]))
+ count++;
+ }
+
+ list = kmalloc_array(count, sizeof(*list), GFP_KERNEL);
+ if (drm_WARN_ON(&i915->drm, !list))
+ return NULL;
+
+ p = list;
+ for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) {
+ if (plane_has_modifier(i915, plane_caps, &intel_modifiers[i]))
+ *p++ = intel_modifiers[i].modifier;
+ }
+ *p++ = DRM_FORMAT_MOD_INVALID;
+
+ return list;
+}
+
+/**
+ * intel_fb_plane_supports_modifier: Determine if a modifier is supported by the given plane
+ * @plane: Plane to check the modifier support for
+ * @modifier: The modifier to check the support for
+ *
+ * Returns:
+ * %true if the @modifier is supported on @plane.
+ */
+bool intel_fb_plane_supports_modifier(struct intel_plane *plane, u64 modifier)
+{
+ int i;
+
+ for (i = 0; i < plane->base.modifier_count; i++)
+ if (plane->base.modifiers[i] == modifier)
+ return true;
+
+ return false;
+}
+
+static bool format_is_yuv_semiplanar(const struct intel_modifier_desc *md,
+ const struct drm_format_info *info)
{
- if (!is_ccs_modifier(fb->modifier))
+ int yuv_planes;
+
+ if (!info->is_yuv)
return false;
- return plane >= fb->format->num_planes / 2;
+ if (plane_caps_contain_any(md->plane_caps, INTEL_PLANE_CAP_CCS_MASK))
+ yuv_planes = 4;
+ else
+ yuv_planes = 2;
+
+ return info->num_planes == yuv_planes;
+}
+
+/**
+ * intel_format_info_is_yuv_semiplanar: Check if the given format is YUV semiplanar
+ * @info: format to check
+ * @modifier: modifier used with the format
+ *
+ * Returns:
+ * %true if @info / @modifier is YUV semiplanar.
+ */
+bool intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
+ u64 modifier)
+{
+ return format_is_yuv_semiplanar(lookup_modifier(modifier), info);
+}
+
+static u8 ccs_aux_plane_mask(const struct intel_modifier_desc *md,
+ const struct drm_format_info *format)
+{
+ if (format_is_yuv_semiplanar(md, format))
+ return md->ccs.planar_aux_planes;
+ else
+ return md->ccs.packed_aux_planes;
+}
+
+/**
+ * intel_fb_is_ccs_aux_plane: Check if a framebuffer color plane is a CCS AUX plane
+ * @fb: Framebuffer
+ * @color_plane: color plane index to check
+ *
+ * Returns:
+ * Returns %true if @fb's color plane at index @color_plane is a CCS AUX plane.
+ */
+bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane)
+{
+ const struct intel_modifier_desc *md = lookup_modifier(fb->modifier);
+
+ return ccs_aux_plane_mask(md, fb->format) & BIT(color_plane);
+}
+
+/**
+ * intel_fb_is_gen12_ccs_aux_plane: Check if a framebuffer color plane is a GEN12 CCS AUX plane
+ * @fb: Framebuffer
+ * @color_plane: color plane index to check
+ *
+ * Returns:
+ * Returns %true if @fb's color plane at index @color_plane is a GEN12 CCS AUX plane.
+ */
+static bool intel_fb_is_gen12_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane)
+{
+ const struct intel_modifier_desc *md = lookup_modifier(fb->modifier);
+
+ return check_modifier_display_ver_range(md, 12, 13) &&
+ ccs_aux_plane_mask(md, fb->format) & BIT(color_plane);
}
-bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
+/**
+ * intel_fb_rc_ccs_cc_plane: Get the CCS CC color plane index for a framebuffer
+ * @fb: Framebuffer
+ *
+ * Returns:
+ * Returns the index of the color clear plane for @fb, or -1 if @fb is not a
+ * framebuffer using a render compression/color clear modifier.
+ */
+int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb)
{
- return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
+ const struct intel_modifier_desc *md = lookup_modifier(fb->modifier);
+
+ if (!md->ccs.cc_planes)
+ return -1;
+
+ drm_WARN_ON_ONCE(fb->dev, hweight8(md->ccs.cc_planes) > 1);
+
+ return ilog2((int)md->ccs.cc_planes);
}
-bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane)
+static bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int color_plane)
{
- return fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC &&
- plane == 2;
+ return intel_fb_rc_ccs_cc_plane(fb) == color_plane;
}
-bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane)
+static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane)
{
return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
color_plane == 1;
@@ -41,12 +481,13 @@ bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane)
bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
{
return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
- is_gen12_ccs_plane(fb, color_plane);
+ intel_fb_is_gen12_ccs_aux_plane(fb, color_plane) ||
+ is_gen12_ccs_cc_plane(fb, color_plane);
}
int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
{
- drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
+ drm_WARN_ON(fb->dev, !intel_fb_is_ccs_modifier(fb->modifier) ||
(main_plane && main_plane >= fb->format->num_planes / 2));
return fb->format->num_planes / 2 + main_plane;
@@ -54,7 +495,7 @@ int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
{
- drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
+ drm_WARN_ON(fb->dev, !intel_fb_is_ccs_modifier(fb->modifier) ||
ccs_plane < fb->format->num_planes / 2);
if (is_gen12_ccs_cc_plane(fb, ccs_plane))
@@ -63,35 +504,12 @@ int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
return ccs_plane - fb->format->num_planes / 2;
}
-static unsigned int gen12_aligned_scanout_stride(const struct intel_framebuffer *fb,
- int color_plane)
-{
- struct drm_i915_private *i915 = to_i915(fb->base.dev);
- unsigned int stride = fb->base.pitches[color_plane];
-
- if (IS_ALDERLAKE_P(i915))
- return roundup_pow_of_two(max(stride,
- 8u * intel_tile_width_bytes(&fb->base, color_plane)));
-
- return stride;
-}
-
static unsigned int gen12_ccs_aux_stride(struct intel_framebuffer *fb, int ccs_plane)
{
- struct drm_i915_private *i915 = to_i915(fb->base.dev);
int main_plane = skl_ccs_to_main_plane(&fb->base, ccs_plane);
unsigned int main_stride = fb->base.pitches[main_plane];
unsigned int main_tile_width = intel_tile_width_bytes(&fb->base, main_plane);
- /*
- * On ADL-P the AUX stride must align with a power-of-two aligned main
- * surface stride. The stride of the allocated main surface object can
- * be less than this POT stride, which is then autopadded to the POT
- * size.
- */
- if (IS_ALDERLAKE_P(i915))
- main_stride = gen12_aligned_scanout_stride(fb, main_plane);
-
return DIV_ROUND_UP(main_stride, 4 * main_tile_width) * 64;
}
@@ -99,7 +517,7 @@ int skl_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
{
struct drm_i915_private *i915 = to_i915(fb->dev);
- if (is_ccs_modifier(fb->modifier))
+ if (intel_fb_is_ccs_modifier(fb->modifier))
return main_to_ccs_plane(fb, main_plane);
else if (DISPLAY_VER(i915) < 11 &&
intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
@@ -128,13 +546,14 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
else
return 512;
case I915_FORMAT_MOD_Y_TILED_CCS:
- if (is_ccs_plane(fb, color_plane))
+ if (intel_fb_is_ccs_aux_plane(fb, color_plane))
return 128;
fallthrough;
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- if (is_ccs_plane(fb, color_plane))
+ if (intel_fb_is_ccs_aux_plane(fb, color_plane) ||
+ is_gen12_ccs_cc_plane(fb, color_plane))
return 64;
fallthrough;
case I915_FORMAT_MOD_Y_TILED:
@@ -143,7 +562,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
else
return 512;
case I915_FORMAT_MOD_Yf_TILED_CCS:
- if (is_ccs_plane(fb, color_plane))
+ if (intel_fb_is_ccs_aux_plane(fb, color_plane))
return 128;
fallthrough;
case I915_FORMAT_MOD_Yf_TILED:
@@ -199,7 +618,7 @@ static void intel_tile_block_dims(const struct drm_framebuffer *fb, int color_pl
{
intel_tile_dims(fb, color_plane, tile_width, tile_height);
- if (is_gen12_ccs_plane(fb, color_plane))
+ if (intel_fb_is_gen12_ccs_aux_plane(fb, color_plane))
*tile_height = 1;
}
@@ -223,20 +642,33 @@ intel_fb_align_height(const struct drm_framebuffer *fb,
static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
{
- switch (fb_modifier) {
- case I915_FORMAT_MOD_X_TILED:
- return I915_TILING_X;
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ u8 tiling_caps = lookup_modifier(fb_modifier)->plane_caps &
+ INTEL_PLANE_CAP_TILING_MASK;
+
+ switch (tiling_caps) {
+ case INTEL_PLANE_CAP_TILING_Y:
return I915_TILING_Y;
+ case INTEL_PLANE_CAP_TILING_X:
+ return I915_TILING_X;
+ case INTEL_PLANE_CAP_TILING_Yf:
+ case INTEL_PLANE_CAP_TILING_NONE:
+ return I915_TILING_NONE;
default:
+ MISSING_CASE(tiling_caps);
return I915_TILING_NONE;
}
}
+static bool intel_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier)
+{
+ return DISPLAY_VER(i915) >= 13 && modifier != DRM_FORMAT_MOD_LINEAR;
+}
+
+bool intel_fb_uses_dpt(const struct drm_framebuffer *fb)
+{
+ return fb && intel_modifier_uses_dpt(to_i915(fb->dev), fb->modifier);
+}
+
unsigned int intel_cursor_alignment(const struct drm_i915_private *i915)
{
if (IS_I830(i915))
@@ -271,7 +703,7 @@ unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
return 512 * 4096;
/* AUX_DIST needs only 4K alignment */
- if (is_ccs_plane(fb, color_plane))
+ if (intel_fb_is_ccs_aux_plane(fb, color_plane))
return 4096;
if (is_semiplanar_uv_plane(fb, color_plane)) {
@@ -330,7 +762,7 @@ void intel_fb_plane_get_subsampling(int *hsub, int *vsub,
* TODO: Deduct the subsampling from the char block for all CCS
* formats and planes.
*/
- if (!is_gen12_ccs_plane(fb, color_plane)) {
+ if (!intel_fb_is_gen12_ccs_aux_plane(fb, color_plane)) {
*hsub = fb->format->hsub;
*vsub = fb->format->vsub;
@@ -357,24 +789,13 @@ void intel_fb_plane_get_subsampling(int *hsub, int *vsub,
static void intel_fb_plane_dims(const struct intel_framebuffer *fb, int color_plane, int *w, int *h)
{
- struct drm_i915_private *i915 = to_i915(fb->base.dev);
- int main_plane = is_ccs_plane(&fb->base, color_plane) ?
+ int main_plane = intel_fb_is_ccs_aux_plane(&fb->base, color_plane) ?
skl_ccs_to_main_plane(&fb->base, color_plane) : 0;
unsigned int main_width = fb->base.width;
unsigned int main_height = fb->base.height;
int main_hsub, main_vsub;
int hsub, vsub;
- /*
- * On ADL-P the CCS AUX surface layout always aligns with the
- * power-of-two aligned main surface stride. The main surface
- * stride in the allocated FB object may not be power-of-two
- * sized, in which case it is auto-padded to the POT size.
- */
- if (IS_ALDERLAKE_P(i915) && is_ccs_plane(&fb->base, color_plane))
- main_width = gen12_aligned_scanout_stride(fb, 0) /
- fb->base.format->cpp[0];
-
intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, &fb->base, main_plane);
intel_fb_plane_get_subsampling(&hsub, &vsub, &fb->base, color_plane);
@@ -409,6 +830,20 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
return new_offset;
}
+static u32 intel_adjust_linear_offset(int *x, int *y,
+ unsigned int cpp,
+ unsigned int pitch,
+ u32 old_offset,
+ u32 new_offset)
+{
+ old_offset += *y * pitch + *x * cpp;
+
+ *y = (old_offset - new_offset) / pitch;
+ *x = ((old_offset - new_offset) - *y * pitch) / cpp;
+
+ return new_offset;
+}
+
static u32 intel_adjust_aligned_offset(int *x, int *y,
const struct drm_framebuffer *fb,
int color_plane,
@@ -439,10 +874,8 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
tile_size, pitch_tiles,
old_offset, new_offset);
} else {
- old_offset += *y * pitch + *x * cpp;
-
- *y = (old_offset - new_offset) / pitch;
- *x = ((old_offset - new_offset) - *y * pitch) / cpp;
+ intel_adjust_linear_offset(x, y, cpp, pitch,
+ old_offset, new_offset);
}
return new_offset;
@@ -459,7 +892,7 @@ u32 intel_plane_adjust_aligned_offset(int *x, int *y,
{
return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
state->hw.rotation,
- state->view.color_plane[color_plane].stride,
+ state->view.color_plane[color_plane].mapping_stride,
old_offset, new_offset);
}
@@ -540,7 +973,7 @@ u32 intel_plane_compute_aligned_offset(int *x, int *y,
struct drm_i915_private *i915 = to_i915(intel_plane->base.dev);
const struct drm_framebuffer *fb = state->hw.fb;
unsigned int rotation = state->hw.rotation;
- int pitch = state->view.color_plane[color_plane].stride;
+ int pitch = state->view.color_plane[color_plane].mapping_stride;
u32 alignment;
if (intel_plane->id == PLANE_CURSOR)
@@ -562,6 +995,7 @@ static int intel_fb_offset_to_xy(int *x, int *y,
u32 alignment;
if (DISPLAY_VER(i915) >= 12 &&
+ !intel_fb_needs_pot_stride_remap(to_intel_framebuffer(fb)) &&
is_semiplanar_uv_plane(fb, color_plane))
alignment = intel_tile_row_size(fb, color_plane);
else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
@@ -610,7 +1044,7 @@ static int intel_fb_check_ccs_xy(const struct drm_framebuffer *fb, int ccs_plane
int ccs_x, ccs_y;
int main_x, main_y;
- if (!is_ccs_plane(fb, ccs_plane) || is_gen12_ccs_cc_plane(fb, ccs_plane))
+ if (!intel_fb_is_ccs_aux_plane(fb, ccs_plane))
return 0;
/*
@@ -673,7 +1107,7 @@ static bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
* The new CCS hash mode isn't compatible with remapping as
* the virtual address of the pages affects the compressed data.
*/
- if (is_ccs_modifier(fb->modifier))
+ if (intel_fb_is_ccs_modifier(fb->modifier))
return false;
/* Linear needs a page aligned stride for remapping */
@@ -699,11 +1133,11 @@ bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb)
static int intel_fb_pitch(const struct intel_framebuffer *fb, int color_plane, unsigned int rotation)
{
if (drm_rotation_90_or_270(rotation))
- return fb->rotated_view.color_plane[color_plane].stride;
+ return fb->rotated_view.color_plane[color_plane].mapping_stride;
else if (intel_fb_needs_pot_stride_remap(fb))
- return fb->remapped_view.color_plane[color_plane].stride;
+ return fb->remapped_view.color_plane[color_plane].mapping_stride;
else
- return fb->normal_view.color_plane[color_plane].stride;
+ return fb->normal_view.color_plane[color_plane].mapping_stride;
}
static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
@@ -814,18 +1248,32 @@ plane_view_dst_stride_tiles(const struct intel_framebuffer *fb, int color_plane,
unsigned int pitch_tiles)
{
if (intel_fb_needs_pot_stride_remap(fb)) {
- unsigned int min_stride = is_ccs_plane(&fb->base, color_plane) ? 2 : 8;
/*
* ADL_P, the only platform needing a POT stride has a minimum
- * of 8 main surface and 2 CCS AUX stride tiles.
+ * of 8 main surface tiles.
*/
- return roundup_pow_of_two(max(pitch_tiles, min_stride));
+ return roundup_pow_of_two(max(pitch_tiles, 8u));
} else {
return pitch_tiles;
}
}
static unsigned int
+plane_view_scanout_stride(const struct intel_framebuffer *fb, int color_plane,
+ unsigned int tile_width,
+ unsigned int src_stride_tiles, unsigned int dst_stride_tiles)
+{
+ unsigned int stride_tiles;
+
+ if (IS_ALDERLAKE_P(to_i915(fb->base.dev)))
+ stride_tiles = src_stride_tiles;
+ else
+ stride_tiles = dst_stride_tiles;
+
+ return stride_tiles * tile_width * fb->base.format->cpp[color_plane];
+}
+
+static unsigned int
plane_view_width_tiles(const struct intel_framebuffer *fb, int color_plane,
const struct fb_plane_view_dims *dims,
int x)
@@ -841,11 +1289,31 @@ plane_view_height_tiles(const struct intel_framebuffer *fb, int color_plane,
return DIV_ROUND_UP(y + dims->height, dims->tile_height);
}
+static unsigned int
+plane_view_linear_tiles(const struct intel_framebuffer *fb, int color_plane,
+ const struct fb_plane_view_dims *dims,
+ int x, int y)
+{
+ struct drm_i915_private *i915 = to_i915(fb->base.dev);
+ unsigned int size;
+
+ size = (y + dims->height) * fb->base.pitches[color_plane] +
+ x * fb->base.format->cpp[color_plane];
+
+ return DIV_ROUND_UP(size, intel_tile_size(i915));
+}
+
#define assign_chk_ovf(i915, var, val) ({ \
drm_WARN_ON(&(i915)->drm, overflows_type(val, var)); \
(var) = (val); \
})
+#define assign_bfld_chk_ovf(i915, var, val) ({ \
+ (var) = (val); \
+ drm_WARN_ON(&(i915)->drm, (var) != (val)); \
+ (var); \
+})
+
static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_plane,
const struct fb_plane_view_dims *dims,
u32 obj_offset, u32 gtt_offset, int x, int y,
@@ -860,12 +1328,26 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
struct drm_rect r;
u32 size = 0;
- assign_chk_ovf(i915, remap_info->offset, obj_offset);
- assign_chk_ovf(i915, remap_info->src_stride, plane_view_src_stride_tiles(fb, color_plane, dims));
- assign_chk_ovf(i915, remap_info->width, plane_view_width_tiles(fb, color_plane, dims, x));
- assign_chk_ovf(i915, remap_info->height, plane_view_height_tiles(fb, color_plane, dims, y));
+ assign_bfld_chk_ovf(i915, remap_info->offset, obj_offset);
+
+ if (intel_fb_is_gen12_ccs_aux_plane(&fb->base, color_plane)) {
+ remap_info->linear = 1;
+
+ assign_chk_ovf(i915, remap_info->size,
+ plane_view_linear_tiles(fb, color_plane, dims, x, y));
+ } else {
+ remap_info->linear = 0;
+
+ assign_chk_ovf(i915, remap_info->src_stride,
+ plane_view_src_stride_tiles(fb, color_plane, dims));
+ assign_chk_ovf(i915, remap_info->width,
+ plane_view_width_tiles(fb, color_plane, dims, x));
+ assign_chk_ovf(i915, remap_info->height,
+ plane_view_height_tiles(fb, color_plane, dims, y));
+ }
if (view->gtt.type == I915_GGTT_VIEW_ROTATED) {
+ drm_WARN_ON(&i915->drm, remap_info->linear);
check_array_bounds(i915, view->gtt.rotated.plane, color_plane);
assign_chk_ovf(i915, remap_info->dst_stride,
@@ -881,7 +1363,8 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
color_plane_info->x = r.x1;
color_plane_info->y = r.y1;
- color_plane_info->stride = remap_info->dst_stride * tile_height;
+ color_plane_info->mapping_stride = remap_info->dst_stride * tile_height;
+ color_plane_info->scanout_stride = color_plane_info->mapping_stride;
size += remap_info->dst_stride * remap_info->width;
@@ -900,16 +1383,29 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
gtt_offset = aligned_offset;
}
- assign_chk_ovf(i915, remap_info->dst_stride,
- plane_view_dst_stride_tiles(fb, color_plane, remap_info->width));
-
color_plane_info->x = x;
color_plane_info->y = y;
- color_plane_info->stride = remap_info->dst_stride * tile_width *
- fb->base.format->cpp[color_plane];
+ if (remap_info->linear) {
+ color_plane_info->mapping_stride = fb->base.pitches[color_plane];
+ color_plane_info->scanout_stride = color_plane_info->mapping_stride;
- size += remap_info->dst_stride * remap_info->height;
+ size += remap_info->size;
+ } else {
+ unsigned int dst_stride = plane_view_dst_stride_tiles(fb, color_plane,
+ remap_info->width);
+
+ assign_chk_ovf(i915, remap_info->dst_stride, dst_stride);
+ color_plane_info->mapping_stride = dst_stride *
+ tile_width *
+ fb->base.format->cpp[color_plane];
+ color_plane_info->scanout_stride =
+ plane_view_scanout_stride(fb, color_plane, tile_width,
+ remap_info->src_stride,
+ dst_stride);
+
+ size += dst_stride * remap_info->height;
+ }
}
/*
@@ -917,10 +1413,16 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
* the x/y offsets. x,y will hold the first pixel of the framebuffer
* plane from the start of the remapped/rotated gtt mapping.
*/
- intel_adjust_tile_offset(&color_plane_info->x, &color_plane_info->y,
- tile_width, tile_height,
- tile_size, remap_info->dst_stride,
- gtt_offset * tile_size, 0);
+ if (remap_info->linear)
+ intel_adjust_linear_offset(&color_plane_info->x, &color_plane_info->y,
+ fb->base.format->cpp[color_plane],
+ color_plane_info->mapping_stride,
+ gtt_offset * tile_size, 0);
+ else
+ intel_adjust_tile_offset(&color_plane_info->x, &color_plane_info->y,
+ tile_width, tile_height,
+ tile_size, remap_info->dst_stride,
+ gtt_offset * tile_size, 0);
return size;
}
@@ -933,15 +1435,10 @@ calc_plane_normal_size(const struct intel_framebuffer *fb, int color_plane,
const struct fb_plane_view_dims *dims,
int x, int y)
{
- struct drm_i915_private *i915 = to_i915(fb->base.dev);
unsigned int tiles;
if (is_surface_linear(&fb->base, color_plane)) {
- unsigned int size;
-
- size = (y + dims->height) * fb->base.pitches[color_plane] +
- x * fb->base.format->cpp[color_plane];
- tiles = DIV_ROUND_UP(size, intel_tile_size(i915));
+ tiles = plane_view_linear_tiles(fb, color_plane, dims, x, y);
} else {
tiles = plane_view_src_stride_tiles(fb, color_plane, dims) *
plane_view_height_tiles(fb, color_plane, dims, y);
@@ -1030,7 +1527,9 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
*/
fb->normal_view.color_plane[i].x = x;
fb->normal_view.color_plane[i].y = y;
- fb->normal_view.color_plane[i].stride = fb->base.pitches[i];
+ fb->normal_view.color_plane[i].mapping_stride = fb->base.pitches[i];
+ fb->normal_view.color_plane[i].scanout_stride =
+ fb->normal_view.color_plane[i].mapping_stride;
offset = calc_plane_aligned_offset(fb, i, &x, &y);
@@ -1080,7 +1579,7 @@ static void intel_plane_remap_gtt(struct intel_plane_state *plane_state)
src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- drm_WARN_ON(&i915->drm, is_ccs_modifier(fb->modifier));
+ drm_WARN_ON(&i915->drm, intel_fb_is_ccs_modifier(fb->modifier));
/* Make src coordinates relative to the viewport */
drm_rect_translate(&plane_state->uapi.src,
@@ -1143,7 +1642,7 @@ u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
*
* The new CCS hash mode makes remapping impossible
*/
- if (DISPLAY_VER(dev_priv) < 4 || is_ccs_modifier(modifier) ||
+ if (DISPLAY_VER(dev_priv) < 4 || intel_fb_is_ccs_modifier(modifier) ||
intel_modifier_uses_dpt(dev_priv, modifier))
return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
else if (DISPLAY_VER(dev_priv) >= 7)
@@ -1168,27 +1667,19 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
* we need the stride to be page aligned.
*/
if (fb->pitches[color_plane] > max_stride &&
- !is_ccs_modifier(fb->modifier))
+ !intel_fb_is_ccs_modifier(fb->modifier))
return intel_tile_size(dev_priv);
else
return 64;
}
tile_width = intel_tile_width_bytes(fb, color_plane);
- if (is_ccs_modifier(fb->modifier)) {
- /*
- * On ADL-P the stride must be either 8 tiles or a stride
- * that is aligned to 16 tiles, required by the 16 tiles =
- * 64 kbyte CCS AUX PTE granularity, allowing CCS FBs to be
- * remapped.
- */
- if (IS_ALDERLAKE_P(dev_priv))
- tile_width *= fb->pitches[0] <= tile_width * 8 ? 8 : 16;
+ if (intel_fb_is_ccs_modifier(fb->modifier)) {
/*
* On TGL the surface stride must be 4 tile aligned, mapped by
* one 64 byte cacheline on the CCS AUX surface.
*/
- else if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
tile_width *= 4;
/*
* Display WA #0531: skl,bxt,kbl,glk
@@ -1224,7 +1715,7 @@ static int intel_plane_check_stride(const struct intel_plane_state *plane_state)
return 0;
/* FIXME other color planes? */
- stride = plane_state->view.color_plane[0].stride;
+ stride = plane_state->view.color_plane[0].mapping_stride;
max_stride = plane->max_stride(plane, fb->format->format,
fb->modifier, rotation);
@@ -1430,7 +1921,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
goto err;
}
- if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
+ if (intel_fb_is_gen12_ccs_aux_plane(fb, i)) {
int ccs_aux_stride = gen12_ccs_aux_stride(intel_fb, i);
if (fb->pitches[i] != ccs_aux_stride) {
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
index 1cbdd84502bd..ba9df8986c1e 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -6,6 +6,7 @@
#ifndef __INTEL_FB_H__
#define __INTEL_FB_H__
+#include <linux/bits.h>
#include <linux/types.h>
struct drm_device;
@@ -16,12 +17,34 @@ struct drm_i915_private;
struct drm_mode_fb_cmd2;
struct intel_fb_view;
struct intel_framebuffer;
+struct intel_plane;
struct intel_plane_state;
-bool is_ccs_plane(const struct drm_framebuffer *fb, int plane);
-bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane);
-bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane);
-bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane);
+#define INTEL_PLANE_CAP_NONE 0
+#define INTEL_PLANE_CAP_CCS_RC BIT(0)
+#define INTEL_PLANE_CAP_CCS_RC_CC BIT(1)
+#define INTEL_PLANE_CAP_CCS_MC BIT(2)
+#define INTEL_PLANE_CAP_TILING_X BIT(3)
+#define INTEL_PLANE_CAP_TILING_Y BIT(4)
+#define INTEL_PLANE_CAP_TILING_Yf BIT(5)
+
+bool intel_fb_is_ccs_modifier(u64 modifier);
+bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier);
+bool intel_fb_is_mc_ccs_modifier(u64 modifier);
+
+bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane);
+int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb);
+
+u64 *intel_fb_plane_get_modifiers(struct drm_i915_private *i915,
+ u8 plane_caps);
+bool intel_fb_plane_supports_modifier(struct intel_plane *plane, u64 modifier);
+
+const struct drm_format_info *
+intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
+
+bool
+intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
+ u64 modifier);
bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane);
@@ -67,4 +90,6 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
const struct drm_mode_fb_cmd2 *user_mode_cmd);
+bool intel_fb_uses_dpt(const struct drm_framebuffer *fb);
+
#endif /* __INTEL_FB_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 3f77f3013584..31c15e5fca95 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -7,13 +7,13 @@
* DOC: display pinning helpers
*/
-#include "intel_display_types.h"
-#include "intel_fb_pin.h"
-#include "intel_fb.h"
+#include "gem/i915_gem_object.h"
+#include "i915_drv.h"
+#include "intel_display_types.h"
#include "intel_dpt.h"
-
-#include "gem/i915_gem_object.h"
+#include "intel_fb.h"
+#include "intel_fb_pin.h"
static struct i915_vma *
intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
@@ -142,13 +142,11 @@ retry:
if (ret)
goto err;
- if (!ret) {
- vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
- view, pinctl);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_unpin;
- }
+ vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
+ view, pinctl);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unpin;
}
if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 1f66de77a6b1..160fd2bdafe5 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -41,26 +41,71 @@
#include <drm/drm_fourcc.h>
#include "i915_drv.h"
-#include "i915_trace.h"
#include "i915_vgpu.h"
+#include "intel_cdclk.h"
#include "intel_de.h"
+#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
-/*
- * For SKL+, the plane source size used by the hardware is based on the value we
- * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
- * we wrote to PIPESRC.
- */
-static void intel_fbc_get_plane_source_size(const struct intel_fbc_state_cache *cache,
- int *width, int *height)
-{
- if (width)
- *width = cache->plane.src_w;
- if (height)
- *height = cache->plane.src_h;
-}
+struct intel_fbc_funcs {
+ void (*activate)(struct intel_fbc *fbc);
+ void (*deactivate)(struct intel_fbc *fbc);
+ bool (*is_active)(struct intel_fbc *fbc);
+ bool (*is_compressing)(struct intel_fbc *fbc);
+ void (*nuke)(struct intel_fbc *fbc);
+ void (*program_cfb)(struct intel_fbc *fbc);
+ void (*set_false_color)(struct intel_fbc *fbc, bool enable);
+};
+
+struct intel_fbc_state {
+ struct intel_plane *plane;
+ unsigned int cfb_stride;
+ unsigned int cfb_size;
+ unsigned int fence_y_offset;
+ u16 override_cfb_stride;
+ u16 interval;
+ s8 fence_id;
+};
+
+struct intel_fbc {
+ struct drm_i915_private *i915;
+ const struct intel_fbc_funcs *funcs;
+
+ /*
+ * This is always the inner lock when overlapping with
+ * struct_mutex and it's the outer lock when overlapping
+ * with stolen_lock.
+ */
+ struct mutex lock;
+ unsigned int possible_framebuffer_bits;
+ unsigned int busy_bits;
+
+ struct drm_mm_node compressed_fb;
+ struct drm_mm_node compressed_llb;
+
+ u8 limit;
+
+ bool false_color;
+
+ bool active;
+ bool activated;
+ bool flip_pending;
+
+ bool underrun_detected;
+ struct work_struct underrun_work;
+
+ /*
+ * This structure contains everything that's relevant to program the
+ * hardware registers. When we want to figure out if we need to disable
+ * and re-enable FBC for a new configuration we just check if there's
+ * something different in the struct. The genx_fbc_activate functions
+ * are supposed to read from it in order to program the registers.
+ */
+ struct intel_fbc_state state;
+ const char *no_fbc_reason;
+};
/* plane stride in pixels */
static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane_state)
@@ -68,7 +113,7 @@ static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int stride;
- stride = plane_state->view.color_plane[0].stride;
+ stride = plane_state->view.color_plane[0].mapping_stride;
if (!drm_rotation_90_or_270(plane_state->hw.rotation))
stride /= fb->format->cpp[0];
@@ -76,24 +121,25 @@ static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane
}
/* plane stride based cfb stride in bytes, assuming 1:1 compression limit */
-static unsigned int _intel_fbc_cfb_stride(const struct intel_fbc_state_cache *cache)
+static unsigned int _intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
{
unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
- return cache->fb.stride * cpp;
+ return intel_fbc_plane_stride(plane_state) * cpp;
}
/* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */
-static unsigned int skl_fbc_min_cfb_stride(struct drm_i915_private *i915,
- const struct intel_fbc_state_cache *cache)
+static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane_state)
{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
unsigned int limit = 4; /* 1:4 compression limit is the worst case */
unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
+ unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16;
unsigned int height = 4; /* FBC segment is 4 lines */
unsigned int stride;
/* minimum segment stride we can use */
- stride = cache->plane.src_w * cpp * height / limit;
+ stride = width * cpp * height / limit;
/*
* Wa_16011863758: icl+
@@ -113,10 +159,10 @@ static unsigned int skl_fbc_min_cfb_stride(struct drm_i915_private *i915,
}
/* properly aligned cfb stride in bytes, assuming 1:1 compression limit */
-static unsigned int intel_fbc_cfb_stride(struct drm_i915_private *i915,
- const struct intel_fbc_state_cache *cache)
+static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
{
- unsigned int stride = _intel_fbc_cfb_stride(cache);
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ unsigned int stride = _intel_fbc_cfb_stride(plane_state);
/*
* At least some of the platforms require each 4 line segment to
@@ -124,98 +170,202 @@ static unsigned int intel_fbc_cfb_stride(struct drm_i915_private *i915,
* that regardless of the compression limit we choose later.
*/
if (DISPLAY_VER(i915) >= 9)
- return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(i915, cache));
+ return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(plane_state));
else
return stride;
}
-static unsigned int intel_fbc_cfb_size(struct drm_i915_private *dev_priv,
- const struct intel_fbc_state_cache *cache)
+static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state)
{
- int lines = cache->plane.src_h;
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ int lines = drm_rect_height(&plane_state->uapi.src) >> 16;
- if (DISPLAY_VER(dev_priv) == 7)
+ if (DISPLAY_VER(i915) == 7)
lines = min(lines, 2048);
- else if (DISPLAY_VER(dev_priv) >= 8)
+ else if (DISPLAY_VER(i915) >= 8)
lines = min(lines, 2560);
- return lines * intel_fbc_cfb_stride(dev_priv, cache);
+ return lines * intel_fbc_cfb_stride(plane_state);
+}
+
+static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ unsigned int stride_aligned = intel_fbc_cfb_stride(plane_state);
+ unsigned int stride = _intel_fbc_cfb_stride(plane_state);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ /*
+ * Override stride in 64 byte units per 4 line segment.
+ *
+ * Gen9 hw miscalculates cfb stride for linear as
+ * PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so
+ * we always need to use the override there.
+ */
+ if (stride != stride_aligned ||
+ (DISPLAY_VER(i915) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR))
+ return stride_aligned * 4 / 64;
+
+ return 0;
}
-static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
+static u32 i8xx_fbc_ctl(struct intel_fbc *fbc)
{
+ const struct intel_fbc_state *fbc_state = &fbc->state;
+ struct drm_i915_private *i915 = fbc->i915;
+ unsigned int cfb_stride;
+ u32 fbc_ctl;
+
+ cfb_stride = fbc_state->cfb_stride / fbc->limit;
+
+ /* FBC_CTL wants 32B or 64B units */
+ if (DISPLAY_VER(i915) == 2)
+ cfb_stride = (cfb_stride / 32) - 1;
+ else
+ cfb_stride = (cfb_stride / 64) - 1;
+
+ fbc_ctl = FBC_CTL_PERIODIC |
+ FBC_CTL_INTERVAL(fbc_state->interval) |
+ FBC_CTL_STRIDE(cfb_stride);
+
+ if (IS_I945GM(i915))
+ fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
+
+ if (fbc_state->fence_id >= 0)
+ fbc_ctl |= FBC_CTL_FENCENO(fbc_state->fence_id);
+
+ return fbc_ctl;
+}
+
+static u32 i965_fbc_ctl2(struct intel_fbc *fbc)
+{
+ const struct intel_fbc_state *fbc_state = &fbc->state;
+ u32 fbc_ctl2;
+
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM |
+ FBC_CTL_PLANE(fbc_state->plane->i9xx_plane);
+
+ if (fbc_state->fence_id >= 0)
+ fbc_ctl2 |= FBC_CTL_CPU_FENCE_EN;
+
+ return fbc_ctl2;
+}
+
+static void i8xx_fbc_deactivate(struct intel_fbc *fbc)
+{
+ struct drm_i915_private *i915 = fbc->i915;
u32 fbc_ctl;
/* Disable compression */
- fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL);
+ fbc_ctl = intel_de_read(i915, FBC_CONTROL);
if ((fbc_ctl & FBC_CTL_EN) == 0)
return;
fbc_ctl &= ~FBC_CTL_EN;
- intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl);
+ intel_de_write(i915, FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- if (intel_de_wait_for_clear(dev_priv, FBC_STATUS,
+ if (intel_de_wait_for_clear(i915, FBC_STATUS,
FBC_STAT_COMPRESSING, 10)) {
- drm_dbg_kms(&dev_priv->drm, "FBC idle timed out\n");
+ drm_dbg_kms(&i915->drm, "FBC idle timed out\n");
return;
}
}
-static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
+static void i8xx_fbc_activate(struct intel_fbc *fbc)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
- const struct intel_fbc_reg_params *params = &fbc->params;
- int cfb_pitch;
+ const struct intel_fbc_state *fbc_state = &fbc->state;
+ struct drm_i915_private *i915 = fbc->i915;
int i;
- u32 fbc_ctl;
-
- cfb_pitch = params->cfb_stride / fbc->limit;
-
- /* FBC_CTL wants 32B or 64B units */
- if (DISPLAY_VER(dev_priv) == 2)
- cfb_pitch = (cfb_pitch / 32) - 1;
- else
- cfb_pitch = (cfb_pitch / 64) - 1;
/* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
- intel_de_write(dev_priv, FBC_TAG(i), 0);
-
- if (DISPLAY_VER(dev_priv) == 4) {
- u32 fbc_ctl2;
-
- /* Set it up... */
- fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM;
- fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane);
- if (params->fence_id >= 0)
- fbc_ctl2 |= FBC_CTL_CPU_FENCE;
- intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2);
- intel_de_write(dev_priv, FBC_FENCE_OFF,
- params->fence_y_offset);
+ intel_de_write(i915, FBC_TAG(i), 0);
+
+ if (DISPLAY_VER(i915) == 4) {
+ intel_de_write(i915, FBC_CONTROL2,
+ i965_fbc_ctl2(fbc));
+ intel_de_write(i915, FBC_FENCE_OFF,
+ fbc_state->fence_y_offset);
}
- /* enable it... */
- fbc_ctl = FBC_CTL_INTERVAL(params->interval);
- fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
- if (IS_I945GM(dev_priv))
- fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
- fbc_ctl |= FBC_CTL_STRIDE(cfb_pitch & 0xff);
- if (params->fence_id >= 0)
- fbc_ctl |= FBC_CTL_FENCENO(params->fence_id);
- intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl);
+ intel_de_write(i915, FBC_CONTROL,
+ FBC_CTL_EN | i8xx_fbc_ctl(fbc));
+}
+
+static bool i8xx_fbc_is_active(struct intel_fbc *fbc)
+{
+ return intel_de_read(fbc->i915, FBC_CONTROL) & FBC_CTL_EN;
+}
+
+static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc)
+{
+ return intel_de_read(fbc->i915, FBC_STATUS) &
+ (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
+}
+
+static void i8xx_fbc_nuke(struct intel_fbc *fbc)
+{
+ struct intel_fbc_state *fbc_state = &fbc->state;
+ enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
+ struct drm_i915_private *dev_priv = fbc->i915;
+
+ spin_lock_irq(&dev_priv->uncore.lock);
+ intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
+ intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane)));
+ spin_unlock_irq(&dev_priv->uncore.lock);
}
-static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
+static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
{
- return intel_de_read(dev_priv, FBC_CONTROL) & FBC_CTL_EN;
+ struct drm_i915_private *i915 = fbc->i915;
+
+ GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start,
+ fbc->compressed_fb.start, U32_MAX));
+ GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start,
+ fbc->compressed_llb.start, U32_MAX));
+
+ intel_de_write(i915, FBC_CFB_BASE,
+ i915->dsm.start + fbc->compressed_fb.start);
+ intel_de_write(i915, FBC_LL_BASE,
+ i915->dsm.start + fbc->compressed_llb.start);
}
-static u32 g4x_dpfc_ctl_limit(struct drm_i915_private *i915)
+static const struct intel_fbc_funcs i8xx_fbc_funcs = {
+ .activate = i8xx_fbc_activate,
+ .deactivate = i8xx_fbc_deactivate,
+ .is_active = i8xx_fbc_is_active,
+ .is_compressing = i8xx_fbc_is_compressing,
+ .nuke = i8xx_fbc_nuke,
+ .program_cfb = i8xx_fbc_program_cfb,
+};
+
+static void i965_fbc_nuke(struct intel_fbc *fbc)
{
- switch (i915->fbc.limit) {
+ struct intel_fbc_state *fbc_state = &fbc->state;
+ enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
+ struct drm_i915_private *dev_priv = fbc->i915;
+
+ spin_lock_irq(&dev_priv->uncore.lock);
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
+ intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane)));
+ spin_unlock_irq(&dev_priv->uncore.lock);
+}
+
+static const struct intel_fbc_funcs i965_fbc_funcs = {
+ .activate = i8xx_fbc_activate,
+ .deactivate = i8xx_fbc_deactivate,
+ .is_active = i8xx_fbc_is_active,
+ .is_compressing = i8xx_fbc_is_compressing,
+ .nuke = i965_fbc_nuke,
+ .program_cfb = i8xx_fbc_program_cfb,
+};
+
+static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc)
+{
+ switch (fbc->limit) {
default:
- MISSING_CASE(i915->fbc.limit);
+ MISSING_CASE(fbc->limit);
fallthrough;
case 1:
return DPFC_CTL_LIMIT_1X;
@@ -226,260 +376,306 @@ static u32 g4x_dpfc_ctl_limit(struct drm_i915_private *i915)
}
}
-static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
+static u32 g4x_dpfc_ctl(struct intel_fbc *fbc)
{
- struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
+ const struct intel_fbc_state *fbc_state = &fbc->state;
+ struct drm_i915_private *i915 = fbc->i915;
u32 dpfc_ctl;
- dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN;
+ dpfc_ctl = g4x_dpfc_ctl_limit(fbc) |
+ DPFC_CTL_PLANE_G4X(fbc_state->plane->i9xx_plane);
- dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv);
+ if (IS_G4X(i915))
+ dpfc_ctl |= DPFC_CTL_SR_EN;
- if (params->fence_id >= 0) {
- dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id;
- intel_de_write(dev_priv, DPFC_FENCE_YOFF,
- params->fence_y_offset);
- } else {
- intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0);
+ if (fbc_state->fence_id >= 0) {
+ dpfc_ctl |= DPFC_CTL_FENCE_EN_G4X;
+
+ if (DISPLAY_VER(i915) < 6)
+ dpfc_ctl |= DPFC_CTL_FENCENO(fbc_state->fence_id);
}
- /* enable it... */
- intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+ return dpfc_ctl;
}
-static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
+static void g4x_fbc_activate(struct intel_fbc *fbc)
{
+ const struct intel_fbc_state *fbc_state = &fbc->state;
+ struct drm_i915_private *i915 = fbc->i915;
+
+ intel_de_write(i915, DPFC_FENCE_YOFF,
+ fbc_state->fence_y_offset);
+
+ intel_de_write(i915, DPFC_CONTROL,
+ DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
+}
+
+static void g4x_fbc_deactivate(struct intel_fbc *fbc)
+{
+ struct drm_i915_private *i915 = fbc->i915;
u32 dpfc_ctl;
/* Disable compression */
- dpfc_ctl = intel_de_read(dev_priv, DPFC_CONTROL);
+ dpfc_ctl = intel_de_read(i915, DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
- intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl);
+ intel_de_write(i915, DPFC_CONTROL, dpfc_ctl);
}
}
-static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
+static bool g4x_fbc_is_active(struct intel_fbc *fbc)
{
- return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN;
+ return intel_de_read(fbc->i915, DPFC_CONTROL) & DPFC_CTL_EN;
}
-static void i8xx_fbc_recompress(struct drm_i915_private *dev_priv)
+static bool g4x_fbc_is_compressing(struct intel_fbc *fbc)
{
- struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
- enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane;
+ return intel_de_read(fbc->i915, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
+}
- spin_lock_irq(&dev_priv->uncore.lock);
- intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
- intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane)));
- spin_unlock_irq(&dev_priv->uncore.lock);
+static void g4x_fbc_program_cfb(struct intel_fbc *fbc)
+{
+ struct drm_i915_private *i915 = fbc->i915;
+
+ intel_de_write(i915, DPFC_CB_BASE, fbc->compressed_fb.start);
}
-static void i965_fbc_recompress(struct drm_i915_private *dev_priv)
+static const struct intel_fbc_funcs g4x_fbc_funcs = {
+ .activate = g4x_fbc_activate,
+ .deactivate = g4x_fbc_deactivate,
+ .is_active = g4x_fbc_is_active,
+ .is_compressing = g4x_fbc_is_compressing,
+ .nuke = i965_fbc_nuke,
+ .program_cfb = g4x_fbc_program_cfb,
+};
+
+static void ilk_fbc_activate(struct intel_fbc *fbc)
{
- struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
- enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane;
+ struct intel_fbc_state *fbc_state = &fbc->state;
+ struct drm_i915_private *i915 = fbc->i915;
- spin_lock_irq(&dev_priv->uncore.lock);
- intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
- intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane)));
- spin_unlock_irq(&dev_priv->uncore.lock);
+ intel_de_write(i915, ILK_DPFC_FENCE_YOFF,
+ fbc_state->fence_y_offset);
+
+ intel_de_write(i915, ILK_DPFC_CONTROL,
+ DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
}
-/* This function forces a CFB recompression through the nuke operation. */
-static void snb_fbc_recompress(struct drm_i915_private *dev_priv)
+static void ilk_fbc_deactivate(struct intel_fbc *fbc)
{
- intel_de_write(dev_priv, MSG_FBC_REND_STATE, FBC_REND_NUKE);
- intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE);
+ struct drm_i915_private *i915 = fbc->i915;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ intel_de_write(i915, ILK_DPFC_CONTROL, dpfc_ctl);
+ }
+}
+
+static bool ilk_fbc_is_active(struct intel_fbc *fbc)
+{
+ return intel_de_read(fbc->i915, ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
-static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
+static bool ilk_fbc_is_compressing(struct intel_fbc *fbc)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
+ return intel_de_read(fbc->i915, ILK_DPFC_STATUS) & DPFC_COMP_SEG_MASK;
+}
- trace_intel_fbc_nuke(fbc->crtc);
+static void ilk_fbc_program_cfb(struct intel_fbc *fbc)
+{
+ struct drm_i915_private *i915 = fbc->i915;
- if (DISPLAY_VER(dev_priv) >= 6)
- snb_fbc_recompress(dev_priv);
- else if (DISPLAY_VER(dev_priv) >= 4)
- i965_fbc_recompress(dev_priv);
- else
- i8xx_fbc_recompress(dev_priv);
+ intel_de_write(i915, ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
}
-static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
+static const struct intel_fbc_funcs ilk_fbc_funcs = {
+ .activate = ilk_fbc_activate,
+ .deactivate = ilk_fbc_deactivate,
+ .is_active = ilk_fbc_is_active,
+ .is_compressing = ilk_fbc_is_compressing,
+ .nuke = i965_fbc_nuke,
+ .program_cfb = ilk_fbc_program_cfb,
+};
+
+static void snb_fbc_program_fence(struct intel_fbc *fbc)
{
- struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
- u32 dpfc_ctl;
+ const struct intel_fbc_state *fbc_state = &fbc->state;
+ struct drm_i915_private *i915 = fbc->i915;
+ u32 ctl = 0;
- dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane);
+ if (fbc_state->fence_id >= 0)
+ ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(fbc_state->fence_id);
- dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv);
+ intel_de_write(i915, SNB_DPFC_CTL_SA, ctl);
+ intel_de_write(i915, SNB_DPFC_CPU_FENCE_OFFSET, fbc_state->fence_y_offset);
+}
- if (params->fence_id >= 0) {
- dpfc_ctl |= DPFC_CTL_FENCE_EN;
- if (IS_IRONLAKE(dev_priv))
- dpfc_ctl |= params->fence_id;
- if (IS_SANDYBRIDGE(dev_priv)) {
- intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | params->fence_id);
- intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
- params->fence_y_offset);
- }
- } else {
- if (IS_SANDYBRIDGE(dev_priv)) {
- intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0);
- intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0);
- }
- }
+static void snb_fbc_activate(struct intel_fbc *fbc)
+{
+ snb_fbc_program_fence(fbc);
- intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF,
- params->fence_y_offset);
- /* enable it... */
- intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+ ilk_fbc_activate(fbc);
}
-static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
+static void snb_fbc_nuke(struct intel_fbc *fbc)
{
- u32 dpfc_ctl;
+ struct drm_i915_private *i915 = fbc->i915;
- /* Disable compression */
- dpfc_ctl = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
- if (dpfc_ctl & DPFC_CTL_EN) {
- dpfc_ctl &= ~DPFC_CTL_EN;
- intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl);
- }
+ intel_de_write(i915, MSG_FBC_REND_STATE, FBC_REND_NUKE);
+ intel_de_posting_read(i915, MSG_FBC_REND_STATE);
+}
+
+static const struct intel_fbc_funcs snb_fbc_funcs = {
+ .activate = snb_fbc_activate,
+ .deactivate = ilk_fbc_deactivate,
+ .is_active = ilk_fbc_is_active,
+ .is_compressing = ilk_fbc_is_compressing,
+ .nuke = snb_fbc_nuke,
+ .program_cfb = ilk_fbc_program_cfb,
+};
+
+static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc)
+{
+ const struct intel_fbc_state *fbc_state = &fbc->state;
+ struct drm_i915_private *i915 = fbc->i915;
+ u32 val = 0;
+
+ if (fbc_state->override_cfb_stride)
+ val |= FBC_STRIDE_OVERRIDE |
+ FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit);
+
+ intel_de_write(i915, GLK_FBC_STRIDE, val);
}
-static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
+static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc)
{
- return intel_de_read(dev_priv, ILK_DPFC_CONTROL) & DPFC_CTL_EN;
+ const struct intel_fbc_state *fbc_state = &fbc->state;
+ struct drm_i915_private *i915 = fbc->i915;
+ u32 val = 0;
+
+ /* Display WA #0529: skl, kbl, bxt. */
+ if (fbc_state->override_cfb_stride)
+ val |= CHICKEN_FBC_STRIDE_OVERRIDE |
+ CHICKEN_FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit);
+
+ intel_de_rmw(i915, CHICKEN_MISC_4,
+ CHICKEN_FBC_STRIDE_OVERRIDE |
+ CHICKEN_FBC_STRIDE_MASK, val);
}
-static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
+static u32 ivb_dpfc_ctl(struct intel_fbc *fbc)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
- const struct intel_fbc_reg_params *params = &fbc->params;
+ const struct intel_fbc_state *fbc_state = &fbc->state;
+ struct drm_i915_private *i915 = fbc->i915;
u32 dpfc_ctl;
- if (DISPLAY_VER(dev_priv) >= 10) {
- u32 val = 0;
+ dpfc_ctl = g4x_dpfc_ctl_limit(fbc);
- if (params->override_cfb_stride)
- val |= FBC_STRIDE_OVERRIDE |
- FBC_STRIDE(params->override_cfb_stride / fbc->limit);
+ if (IS_IVYBRIDGE(i915))
+ dpfc_ctl |= DPFC_CTL_PLANE_IVB(fbc_state->plane->i9xx_plane);
- intel_de_write(dev_priv, GLK_FBC_STRIDE, val);
- } else if (DISPLAY_VER(dev_priv) == 9) {
- u32 val = 0;
+ if (fbc_state->fence_id >= 0)
+ dpfc_ctl |= DPFC_CTL_FENCE_EN_IVB;
- /* Display WA #0529: skl, kbl, bxt. */
- if (params->override_cfb_stride)
- val |= CHICKEN_FBC_STRIDE_OVERRIDE |
- CHICKEN_FBC_STRIDE(params->override_cfb_stride / fbc->limit);
+ if (fbc->false_color)
+ dpfc_ctl |= DPFC_CTL_FALSE_COLOR;
- intel_de_rmw(dev_priv, CHICKEN_MISC_4,
- CHICKEN_FBC_STRIDE_OVERRIDE |
- CHICKEN_FBC_STRIDE_MASK, val);
- }
+ return dpfc_ctl;
+}
- dpfc_ctl = 0;
- if (IS_IVYBRIDGE(dev_priv))
- dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane);
-
- dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv);
-
- if (params->fence_id >= 0) {
- dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
- intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | params->fence_id);
- intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
- params->fence_y_offset);
- } else if (dev_priv->ggtt.num_fences) {
- intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0);
- intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0);
- }
+static void ivb_fbc_activate(struct intel_fbc *fbc)
+{
+ struct drm_i915_private *i915 = fbc->i915;
+
+ if (DISPLAY_VER(i915) >= 10)
+ glk_fbc_program_cfb_stride(fbc);
+ else if (DISPLAY_VER(i915) == 9)
+ skl_fbc_program_cfb_stride(fbc);
- if (dev_priv->fbc.false_color)
- dpfc_ctl |= FBC_CTL_FALSE_COLOR;
+ if (i915->ggtt.num_fences)
+ snb_fbc_program_fence(fbc);
- intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+ intel_de_write(i915, ILK_DPFC_CONTROL,
+ DPFC_CTL_EN | ivb_dpfc_ctl(fbc));
}
-static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
+static bool ivb_fbc_is_compressing(struct intel_fbc *fbc)
{
- if (DISPLAY_VER(dev_priv) >= 5)
- return ilk_fbc_is_active(dev_priv);
- else if (IS_GM45(dev_priv))
- return g4x_fbc_is_active(dev_priv);
- else
- return i8xx_fbc_is_active(dev_priv);
+ return intel_de_read(fbc->i915, ILK_DPFC_STATUS2) & DPFC_COMP_SEG_MASK_IVB;
}
-static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
+static void ivb_fbc_set_false_color(struct intel_fbc *fbc,
+ bool enable)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
+ intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL,
+ DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0);
+}
- trace_intel_fbc_activate(fbc->crtc);
+static const struct intel_fbc_funcs ivb_fbc_funcs = {
+ .activate = ivb_fbc_activate,
+ .deactivate = ilk_fbc_deactivate,
+ .is_active = ilk_fbc_is_active,
+ .is_compressing = ivb_fbc_is_compressing,
+ .nuke = snb_fbc_nuke,
+ .program_cfb = ilk_fbc_program_cfb,
+ .set_false_color = ivb_fbc_set_false_color,
+};
+
+static bool intel_fbc_hw_is_active(struct intel_fbc *fbc)
+{
+ return fbc->funcs->is_active(fbc);
+}
+
+static void intel_fbc_hw_activate(struct intel_fbc *fbc)
+{
+ trace_intel_fbc_activate(fbc->state.plane);
fbc->active = true;
fbc->activated = true;
- if (DISPLAY_VER(dev_priv) >= 7)
- gen7_fbc_activate(dev_priv);
- else if (DISPLAY_VER(dev_priv) >= 5)
- ilk_fbc_activate(dev_priv);
- else if (IS_GM45(dev_priv))
- g4x_fbc_activate(dev_priv);
- else
- i8xx_fbc_activate(dev_priv);
+ fbc->funcs->activate(fbc);
}
-static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
+static void intel_fbc_hw_deactivate(struct intel_fbc *fbc)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
-
- trace_intel_fbc_deactivate(fbc->crtc);
+ trace_intel_fbc_deactivate(fbc->state.plane);
fbc->active = false;
- if (DISPLAY_VER(dev_priv) >= 5)
- ilk_fbc_deactivate(dev_priv);
- else if (IS_GM45(dev_priv))
- g4x_fbc_deactivate(dev_priv);
- else
- i8xx_fbc_deactivate(dev_priv);
+ fbc->funcs->deactivate(fbc);
}
-/**
- * intel_fbc_is_active - Is FBC active?
- * @dev_priv: i915 device instance
- *
- * This function is used to verify the current state of FBC.
- *
- * FIXME: This should be tracked in the plane config eventually
- * instead of queried at runtime for most callers.
- */
-bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
+static bool intel_fbc_is_compressing(struct intel_fbc *fbc)
+{
+ return fbc->funcs->is_compressing(fbc);
+}
+
+static void intel_fbc_nuke(struct intel_fbc *fbc)
{
- return dev_priv->fbc.active;
+ trace_intel_fbc_nuke(fbc->state.plane);
+
+ fbc->funcs->nuke(fbc);
}
-static void intel_fbc_activate(struct drm_i915_private *dev_priv)
+static void intel_fbc_activate(struct intel_fbc *fbc)
{
- intel_fbc_hw_activate(dev_priv);
- intel_fbc_recompress(dev_priv);
+ intel_fbc_hw_activate(fbc);
+ intel_fbc_nuke(fbc);
+
+ fbc->no_fbc_reason = NULL;
}
-static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
- const char *reason)
+static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
+ struct drm_i915_private *i915 = fbc->i915;
- drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock));
+ drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
if (fbc->active)
- intel_fbc_hw_deactivate(dev_priv);
+ intel_fbc_hw_deactivate(fbc);
fbc->no_fbc_reason = reason;
}
@@ -492,7 +688,7 @@ static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915)
return BIT_ULL(32);
}
-static u64 intel_fbc_stolen_end(struct drm_i915_private *dev_priv)
+static u64 intel_fbc_stolen_end(struct drm_i915_private *i915)
{
u64 end;
@@ -500,24 +696,24 @@ static u64 intel_fbc_stolen_end(struct drm_i915_private *dev_priv)
* reserved range size, so it always assumes the maximum (8mb) is used.
* If we enable FBC using a CFB on that memory range we'll get FIFO
* underruns, even if that range is not reserved by the BIOS. */
- if (IS_BROADWELL(dev_priv) || (DISPLAY_VER(dev_priv) == 9 &&
- !IS_BROXTON(dev_priv)))
- end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024;
+ if (IS_BROADWELL(i915) ||
+ (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)))
+ end = resource_size(&i915->dsm) - 8 * 1024 * 1024;
else
end = U64_MAX;
- return min(end, intel_fbc_cfb_base_max(dev_priv));
+ return min(end, intel_fbc_cfb_base_max(i915));
}
-static int intel_fbc_min_limit(int fb_cpp)
+static int intel_fbc_min_limit(const struct intel_plane_state *plane_state)
{
- return fb_cpp == 2 ? 2 : 1;
+ return plane_state->hw.fb->format->cpp[0] == 2 ? 2 : 1;
}
-static int intel_fbc_max_limit(struct drm_i915_private *dev_priv)
+static int intel_fbc_max_limit(struct drm_i915_private *i915)
{
/* WaFbcOnly1to1Ratio:ctg */
- if (IS_G4X(dev_priv))
+ if (IS_G4X(i915))
return 1;
/*
@@ -527,23 +723,23 @@ static int intel_fbc_max_limit(struct drm_i915_private *dev_priv)
return 4;
}
-static int find_compression_limit(struct drm_i915_private *dev_priv,
+static int find_compression_limit(struct intel_fbc *fbc,
unsigned int size, int min_limit)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
- u64 end = intel_fbc_stolen_end(dev_priv);
+ struct drm_i915_private *i915 = fbc->i915;
+ u64 end = intel_fbc_stolen_end(i915);
int ret, limit = min_limit;
size /= limit;
/* Try to over-allocate to reduce reallocations and fragmentation. */
- ret = i915_gem_stolen_insert_node_in_range(dev_priv, &fbc->compressed_fb,
+ ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
size <<= 1, 4096, 0, end);
if (ret == 0)
return limit;
- for (; limit <= intel_fbc_max_limit(dev_priv); limit <<= 1) {
- ret = i915_gem_stolen_insert_node_in_range(dev_priv, &fbc->compressed_fb,
+ for (; limit <= intel_fbc_max_limit(i915); limit <<= 1) {
+ ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
size >>= 1, 4096, 0, end);
if (ret == 0)
return limit;
@@ -552,34 +748,34 @@ static int find_compression_limit(struct drm_i915_private *dev_priv,
return 0;
}
-static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
+static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
unsigned int size, int min_limit)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
+ struct drm_i915_private *i915 = fbc->i915;
int ret;
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(&i915->drm,
drm_mm_node_allocated(&fbc->compressed_fb));
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(&i915->drm,
drm_mm_node_allocated(&fbc->compressed_llb));
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
- ret = i915_gem_stolen_insert_node(dev_priv, &fbc->compressed_llb,
+ if (DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) {
+ ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb,
4096, 4096);
if (ret)
goto err;
}
- ret = find_compression_limit(dev_priv, size, min_limit);
+ ret = find_compression_limit(fbc, size, min_limit);
if (!ret)
goto err_llb;
else if (ret > min_limit)
- drm_info_once(&dev_priv->drm,
+ drm_info_once(&i915->drm,
"Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
fbc->limit = ret;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n",
fbc->compressed_fb.size, fbc->limit);
@@ -587,83 +783,69 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
err_llb:
if (drm_mm_node_allocated(&fbc->compressed_llb))
- i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_llb);
+ i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
err:
- if (drm_mm_initialized(&dev_priv->mm.stolen))
- drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
+ if (drm_mm_initialized(&i915->mm.stolen))
+ drm_info_once(&i915->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
-static void intel_fbc_program_cfb(struct drm_i915_private *dev_priv)
+static void intel_fbc_program_cfb(struct intel_fbc *fbc)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
-
- if (DISPLAY_VER(dev_priv) >= 5) {
- intel_de_write(dev_priv, ILK_DPFC_CB_BASE,
- fbc->compressed_fb.start);
- } else if (IS_GM45(dev_priv)) {
- intel_de_write(dev_priv, DPFC_CB_BASE,
- fbc->compressed_fb.start);
- } else {
- GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start,
- fbc->compressed_fb.start,
- U32_MAX));
- GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start,
- fbc->compressed_llb.start,
- U32_MAX));
-
- intel_de_write(dev_priv, FBC_CFB_BASE,
- dev_priv->dsm.start + fbc->compressed_fb.start);
- intel_de_write(dev_priv, FBC_LL_BASE,
- dev_priv->dsm.start + fbc->compressed_llb.start);
- }
+ fbc->funcs->program_cfb(fbc);
}
-static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
+static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
+ struct drm_i915_private *i915 = fbc->i915;
- if (WARN_ON(intel_fbc_hw_is_active(dev_priv)))
+ if (WARN_ON(intel_fbc_hw_is_active(fbc)))
return;
if (drm_mm_node_allocated(&fbc->compressed_llb))
- i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_llb);
+ i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
if (drm_mm_node_allocated(&fbc->compressed_fb))
- i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
+ i915_gem_stolen_remove_node(i915, &fbc->compressed_fb);
}
-void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
+void intel_fbc_cleanup(struct drm_i915_private *i915)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
+ struct intel_fbc *fbc = i915->fbc;
- if (!HAS_FBC(dev_priv))
+ if (!fbc)
return;
mutex_lock(&fbc->lock);
- __intel_fbc_cleanup_cfb(dev_priv);
+ __intel_fbc_cleanup_cfb(fbc);
mutex_unlock(&fbc->lock);
+
+ kfree(fbc);
}
-static bool stride_is_valid(struct drm_i915_private *dev_priv,
- u64 modifier, unsigned int stride)
+static bool stride_is_valid(const struct intel_plane_state *plane_state)
{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int stride = intel_fbc_plane_stride(plane_state) *
+ fb->format->cpp[0];
+
/* This should have been caught earlier. */
- if (drm_WARN_ON_ONCE(&dev_priv->drm, (stride & (64 - 1)) != 0))
+ if (drm_WARN_ON_ONCE(&i915->drm, (stride & (64 - 1)) != 0))
return false;
/* Below are the additional FBC restrictions. */
if (stride < 512)
return false;
- if (DISPLAY_VER(dev_priv) == 2 || DISPLAY_VER(dev_priv) == 3)
+ if (DISPLAY_VER(i915) == 2 || DISPLAY_VER(i915) == 3)
return stride == 4096 || stride == 8192;
- if (DISPLAY_VER(dev_priv) == 4 && !IS_G4X(dev_priv) && stride < 2048)
+ if (DISPLAY_VER(i915) == 4 && !IS_G4X(i915) && stride < 2048)
return false;
/* Display WA #1105: skl,bxt,kbl,cfl,glk */
- if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) &&
- modifier == DRM_FORMAT_MOD_LINEAR && stride & 511)
+ if ((DISPLAY_VER(i915) == 9 || IS_GEMINILAKE(i915)) &&
+ fb->modifier == DRM_FORMAT_MOD_LINEAR && stride & 511)
return false;
if (stride > 16384)
@@ -672,20 +854,22 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
return true;
}
-static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
- u32 pixel_format)
+static bool pixel_format_is_valid(const struct intel_plane_state *plane_state)
{
- switch (pixel_format) {
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ switch (fb->format->format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
return true;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_RGB565:
/* 16bpp not supported on gen2 */
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(i915) == 2)
return false;
/* WaFbcOnly1to1Ratio:ctg */
- if (IS_G4X(dev_priv))
+ if (IS_G4X(i915))
return false;
return true;
default:
@@ -693,13 +877,16 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
}
}
-static bool rotation_is_valid(struct drm_i915_private *dev_priv,
- u32 pixel_format, unsigned int rotation)
+static bool rotation_is_valid(const struct intel_plane_state *plane_state)
{
- if (DISPLAY_VER(dev_priv) >= 9 && pixel_format == DRM_FORMAT_RGB565 &&
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+
+ if (DISPLAY_VER(i915) >= 9 && fb->format->format == DRM_FORMAT_RGB565 &&
drm_rotation_90_or_270(rotation))
return false;
- else if (DISPLAY_VER(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
+ else if (DISPLAY_VER(i915) <= 4 && !IS_G4X(i915) &&
rotation != DRM_MODE_ROTATE_0)
return false;
@@ -712,19 +899,18 @@ static bool rotation_is_valid(struct drm_i915_private *dev_priv,
* the X and Y offset registers. That's why we include the src x/y offsets
* instead of just looking at the plane size.
*/
-static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
+static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_fbc *fbc = &dev_priv->fbc;
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
unsigned int effective_w, effective_h, max_w, max_h;
- if (DISPLAY_VER(dev_priv) >= 10) {
+ if (DISPLAY_VER(i915) >= 10) {
max_w = 5120;
max_h = 4096;
- } else if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
+ } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) {
max_w = 4096;
max_h = 4096;
- } else if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) {
+ } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) {
max_w = 4096;
max_h = 2048;
} else {
@@ -732,22 +918,24 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
max_h = 1536;
}
- intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
- &effective_h);
- effective_w += fbc->state_cache.plane.adjusted_x;
- effective_h += fbc->state_cache.plane.adjusted_y;
+ effective_w = plane_state->view.color_plane[0].x +
+ (drm_rect_width(&plane_state->uapi.src) >> 16);
+ effective_h = plane_state->view.color_plane[0].y +
+ (drm_rect_height(&plane_state->uapi.src) >> 16);
return effective_w <= max_w && effective_h <= max_h;
}
-static bool tiling_is_valid(struct drm_i915_private *dev_priv,
- u64 modifier)
+static bool tiling_is_valid(const struct intel_plane_state *plane_state)
{
- switch (modifier) {
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
- return DISPLAY_VER(dev_priv) >= 9;
+ return DISPLAY_VER(i915) >= 9;
case I915_FORMAT_MOD_X_TILED:
return true;
default:
@@ -755,210 +943,163 @@ static bool tiling_is_valid(struct drm_i915_private *dev_priv,
}
}
-static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static void intel_fbc_update_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_plane *plane)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_fbc *fbc = &dev_priv->fbc;
- struct intel_fbc_state_cache *cache = &fbc->state_cache;
- struct drm_framebuffer *fb = plane_state->hw.fb;
-
- cache->plane.visible = plane_state->uapi.visible;
- if (!cache->plane.visible)
- return;
-
- cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
-
- cache->plane.rotation = plane_state->hw.rotation;
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- */
- cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- cache->plane.adjusted_x = plane_state->view.color_plane[0].x;
- cache->plane.adjusted_y = plane_state->view.color_plane[0].y;
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
+ struct intel_fbc *fbc = plane->fbc;
+ struct intel_fbc_state *fbc_state = &fbc->state;
- cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode;
+ WARN_ON(plane_state->no_fbc_reason);
- cache->fb.format = fb->format;
- cache->fb.modifier = fb->modifier;
- cache->fb.stride = intel_fbc_plane_stride(plane_state);
+ fbc_state->plane = plane;
/* FBC1 compression interval: arbitrary choice of 1 second */
- cache->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode);
+ fbc_state->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode);
- cache->fence_y_offset = intel_plane_fence_y_offset(plane_state);
+ fbc_state->fence_y_offset = intel_plane_fence_y_offset(plane_state);
- drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE &&
+ drm_WARN_ON(&i915->drm, plane_state->flags & PLANE_HAS_FENCE &&
!plane_state->ggtt_vma->fence);
if (plane_state->flags & PLANE_HAS_FENCE &&
plane_state->ggtt_vma->fence)
- cache->fence_id = plane_state->ggtt_vma->fence->id;
+ fbc_state->fence_id = plane_state->ggtt_vma->fence->id;
else
- cache->fence_id = -1;
+ fbc_state->fence_id = -1;
- cache->psr2_active = crtc_state->has_psr2;
+ fbc_state->cfb_stride = intel_fbc_cfb_stride(plane_state);
+ fbc_state->cfb_size = intel_fbc_cfb_size(plane_state);
+ fbc_state->override_cfb_stride = intel_fbc_override_cfb_stride(plane_state);
}
-static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
+static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
- return intel_fbc_cfb_size(dev_priv, &fbc->state_cache) >
- fbc->compressed_fb.size * fbc->limit;
-}
-
-static u16 intel_fbc_override_cfb_stride(struct drm_i915_private *dev_priv,
- const struct intel_fbc_state_cache *cache)
-{
- unsigned int stride = _intel_fbc_cfb_stride(cache);
- unsigned int stride_aligned = intel_fbc_cfb_stride(dev_priv, cache);
-
- /*
- * Override stride in 64 byte units per 4 line segment.
+ /* The use of a CPU fence is one of two ways to detect writes by the
+ * CPU to the scanout and trigger updates to the FBC.
*
- * Gen9 hw miscalculates cfb stride for linear as
- * PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so
- * we always need to use the override there.
+ * The other method is by software tracking (see
+ * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke
+ * the current compressed buffer and recompress it.
+ *
+ * Note that is possible for a tiled surface to be unmappable (and
+ * so have no fence associated with it) due to aperture constraints
+ * at the time of pinning.
+ *
+ * FIXME with 90/270 degree rotation we should use the fence on
+ * the normal GTT view (the rotated view doesn't even have a
+ * fence). Would need changes to the FBC fence Y offset as well.
+ * For now this will effectively disable FBC with 90/270 degree
+ * rotation.
*/
- if (stride != stride_aligned ||
- (DISPLAY_VER(dev_priv) == 9 &&
- cache->fb.modifier == DRM_FORMAT_MOD_LINEAR))
- return stride_aligned * 4 / 64;
-
- return 0;
+ return DISPLAY_VER(i915) >= 9 ||
+ (plane_state->flags & PLANE_HAS_FENCE &&
+ plane_state->ggtt_vma->fence);
}
-static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
+static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
-
- if (intel_vgpu_active(dev_priv)) {
- fbc->no_fbc_reason = "VGPU is active";
- return false;
- }
-
- if (!dev_priv->params.enable_fbc) {
- fbc->no_fbc_reason = "disabled per module param or by default";
- return false;
- }
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct intel_fbc *fbc = plane->fbc;
- if (fbc->underrun_detected) {
- fbc->no_fbc_reason = "underrun detected";
- return false;
- }
+ return intel_fbc_min_limit(plane_state) <= fbc->limit &&
+ intel_fbc_cfb_size(plane_state) <= fbc->compressed_fb.size * fbc->limit;
+}
- return true;
+static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state)
+{
+ return !plane_state->no_fbc_reason &&
+ intel_fbc_is_fence_ok(plane_state) &&
+ intel_fbc_is_cfb_ok(plane_state);
}
-static bool intel_fbc_can_activate(struct intel_crtc *crtc)
+static int intel_fbc_check_plane(struct intel_atomic_state *state,
+ struct intel_plane *plane)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_fbc *fbc = &dev_priv->fbc;
- struct intel_fbc_state_cache *cache = &fbc->state_cache;
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_plane_state *plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc);
+ const struct intel_crtc_state *crtc_state;
+ struct intel_fbc *fbc = plane->fbc;
- if (!intel_fbc_can_enable(dev_priv))
- return false;
+ if (!fbc)
+ return 0;
- if (!cache->plane.visible) {
- fbc->no_fbc_reason = "primary plane not visible";
- return false;
+ if (intel_vgpu_active(i915)) {
+ plane_state->no_fbc_reason = "VGPU active";
+ return 0;
}
- /* We don't need to use a state cache here since this information is
- * global for all CRTC.
- */
- if (fbc->underrun_detected) {
- fbc->no_fbc_reason = "underrun detected";
- return false;
+ if (!i915->params.enable_fbc) {
+ plane_state->no_fbc_reason = "disabled per module param or by default";
+ return 0;
}
- if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) {
- fbc->no_fbc_reason = "incompatible mode";
- return false;
+ if (!plane_state->uapi.visible) {
+ plane_state->no_fbc_reason = "plane not visible";
+ return 0;
}
- if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
- fbc->no_fbc_reason = "mode too large for compression";
- return false;
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ plane_state->no_fbc_reason = "interlaced mode not supported";
+ return 0;
}
- /* The use of a CPU fence is one of two ways to detect writes by the
- * CPU to the scanout and trigger updates to the FBC.
- *
- * The other method is by software tracking (see
- * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke
- * the current compressed buffer and recompress it.
- *
- * Note that is possible for a tiled surface to be unmappable (and
- * so have no fence associated with it) due to aperture constraints
- * at the time of pinning.
- *
- * FIXME with 90/270 degree rotation we should use the fence on
- * the normal GTT view (the rotated view doesn't even have a
- * fence). Would need changes to the FBC fence Y offset as well.
- * For now this will effectively disable FBC with 90/270 degree
- * rotation.
- */
- if (DISPLAY_VER(dev_priv) < 9 && cache->fence_id < 0) {
- fbc->no_fbc_reason = "framebuffer not tiled or fenced";
- return false;
+ if (crtc_state->double_wide) {
+ plane_state->no_fbc_reason = "double wide pipe not supported";
+ return 0;
}
- if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
- fbc->no_fbc_reason = "pixel format is invalid";
+ /*
+ * Display 12+ is not supporting FBC with PSR2.
+ * Recommendation is to keep this combination disabled
+ * Bspec: 50422 HSD: 14010260002
+ */
+ if (DISPLAY_VER(i915) >= 12 && crtc_state->has_psr2) {
+ plane_state->no_fbc_reason = "PSR2 enabled";
return false;
}
- if (!rotation_is_valid(dev_priv, cache->fb.format->format,
- cache->plane.rotation)) {
- fbc->no_fbc_reason = "rotation unsupported";
- return false;
+ if (!pixel_format_is_valid(plane_state)) {
+ plane_state->no_fbc_reason = "pixel format not supported";
+ return 0;
}
- if (!tiling_is_valid(dev_priv, cache->fb.modifier)) {
- fbc->no_fbc_reason = "tiling unsupported";
- return false;
+ if (!tiling_is_valid(plane_state)) {
+ plane_state->no_fbc_reason = "tiling not supported";
+ return 0;
}
- if (!stride_is_valid(dev_priv, cache->fb.modifier,
- cache->fb.stride * cache->fb.format->cpp[0])) {
- fbc->no_fbc_reason = "framebuffer stride not supported";
- return false;
+ if (!rotation_is_valid(plane_state)) {
+ plane_state->no_fbc_reason = "rotation not supported";
+ return 0;
}
- if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
- cache->fb.format->has_alpha) {
- fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
- return false;
+ if (!stride_is_valid(plane_state)) {
+ plane_state->no_fbc_reason = "stride not supported";
+ return 0;
}
- /* WaFbcExceedCdClockThreshold:hsw,bdw */
- if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
- cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {
- fbc->no_fbc_reason = "pixel rate is too big";
+ if (plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
+ fb->format->has_alpha) {
+ plane_state->no_fbc_reason = "per-pixel alpha not supported";
return false;
}
- /* It is possible for the required CFB size change without a
- * crtc->disable + crtc->enable since it is possible to change the
- * stride without triggering a full modeset. Since we try to
- * over-allocate the CFB, there's a chance we may keep FBC enabled even
- * if this happens, but if we exceed the current CFB size we'll have to
- * disable FBC. Notice that it would be possible to disable FBC, wait
- * for a frame, free the stolen node, then try to reenable FBC in case
- * we didn't get any invalidate/deactivate calls, but this would require
- * a lot of tracking just for a specific case. If we conclude it's an
- * important case, we can implement it later. */
- if (intel_fbc_cfb_size_changed(dev_priv)) {
- fbc->no_fbc_reason = "CFB requirements changed";
- return false;
+ if (!intel_fbc_hw_tracking_covers_screen(plane_state)) {
+ plane_state->no_fbc_reason = "plane size too big";
+ return 0;
}
/*
@@ -966,238 +1107,211 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
* having a Y offset that isn't divisible by 4 causes FIFO underrun
* and screen flicker.
*/
- if (DISPLAY_VER(dev_priv) >= 9 &&
- (fbc->state_cache.plane.adjusted_y & 3)) {
- fbc->no_fbc_reason = "plane Y offset is misaligned";
+ if (DISPLAY_VER(i915) >= 9 &&
+ plane_state->view.color_plane[0].y & 3) {
+ plane_state->no_fbc_reason = "plane start Y offset misaligned";
return false;
}
/* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
- if (DISPLAY_VER(dev_priv) >= 11 &&
- (cache->plane.src_h + cache->plane.adjusted_y) % 4) {
- fbc->no_fbc_reason = "plane height + offset is non-modulo of 4";
- return false;
- }
-
- /*
- * Display 12+ is not supporting FBC with PSR2.
- * Recommendation is to keep this combination disabled
- * Bspec: 50422 HSD: 14010260002
- */
- if (fbc->state_cache.psr2_active && DISPLAY_VER(dev_priv) >= 12) {
- fbc->no_fbc_reason = "not supported with PSR2";
+ if (DISPLAY_VER(i915) >= 11 &&
+ (plane_state->view.color_plane[0].y + drm_rect_height(&plane_state->uapi.src)) & 3) {
+ plane_state->no_fbc_reason = "plane end Y offset misaligned";
return false;
}
- return true;
-}
-
-static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
- struct intel_fbc_reg_params *params)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_fbc *fbc = &dev_priv->fbc;
- struct intel_fbc_state_cache *cache = &fbc->state_cache;
-
- /* Since all our fields are integer types, use memset here so the
- * comparison function can rely on memcmp because the padding will be
- * zero. */
- memset(params, 0, sizeof(*params));
-
- params->fence_id = cache->fence_id;
- params->fence_y_offset = cache->fence_y_offset;
-
- params->interval = cache->interval;
+ /* WaFbcExceedCdClockThreshold:hsw,bdw */
+ if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ const struct intel_cdclk_state *cdclk_state;
- params->crtc.pipe = crtc->pipe;
- params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
- params->fb.format = cache->fb.format;
- params->fb.modifier = cache->fb.modifier;
- params->fb.stride = cache->fb.stride;
+ if (crtc_state->pixel_rate >= cdclk_state->logical.cdclk * 95 / 100) {
+ plane_state->no_fbc_reason = "pixel rate too high";
+ return 0;
+ }
+ }
- params->cfb_stride = intel_fbc_cfb_stride(dev_priv, cache);
- params->cfb_size = intel_fbc_cfb_size(dev_priv, cache);
- params->override_cfb_stride = intel_fbc_override_cfb_stride(dev_priv, cache);
+ plane_state->no_fbc_reason = NULL;
- params->plane_visible = cache->plane.visible;
+ return 0;
}
-static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_fbc *fbc = &dev_priv->fbc;
- const struct intel_fbc_state_cache *cache = &fbc->state_cache;
- const struct intel_fbc_reg_params *params = &fbc->params;
- if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
- return false;
+static bool intel_fbc_can_flip_nuke(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_plane *plane)
+{
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *old_plane_state =
+ intel_atomic_get_old_plane_state(state, plane);
+ const struct intel_plane_state *new_plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
+ const struct drm_framebuffer *old_fb = old_plane_state->hw.fb;
+ const struct drm_framebuffer *new_fb = new_plane_state->hw.fb;
- if (!params->plane_visible)
+ if (drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi))
return false;
- if (!intel_fbc_can_activate(crtc))
+ if (!intel_fbc_is_ok(old_plane_state) ||
+ !intel_fbc_is_ok(new_plane_state))
return false;
- if (params->fb.format != cache->fb.format)
+ if (old_fb->format->format != new_fb->format->format)
return false;
- if (params->fb.modifier != cache->fb.modifier)
+ if (old_fb->modifier != new_fb->modifier)
return false;
- if (params->fb.stride != cache->fb.stride)
+ if (intel_fbc_plane_stride(old_plane_state) !=
+ intel_fbc_plane_stride(new_plane_state))
return false;
- if (params->cfb_stride != intel_fbc_cfb_stride(dev_priv, cache))
+ if (intel_fbc_cfb_stride(old_plane_state) !=
+ intel_fbc_cfb_stride(new_plane_state))
return false;
- if (params->cfb_size != intel_fbc_cfb_size(dev_priv, cache))
+ if (intel_fbc_cfb_size(old_plane_state) !=
+ intel_fbc_cfb_size(new_plane_state))
return false;
- if (params->override_cfb_stride != intel_fbc_override_cfb_stride(dev_priv, cache))
+ if (intel_fbc_override_cfb_stride(old_plane_state) !=
+ intel_fbc_override_cfb_stride(new_plane_state))
return false;
return true;
}
-bool intel_fbc_pre_update(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static bool __intel_fbc_pre_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_plane *plane)
{
- struct intel_plane *plane = to_intel_plane(crtc->base.primary);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_plane_state *plane_state =
- intel_atomic_get_new_plane_state(state, plane);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_fbc *fbc = &dev_priv->fbc;
- const char *reason = "update pending";
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_fbc *fbc = plane->fbc;
bool need_vblank_wait = false;
- if (!plane->has_fbc || !plane_state)
- return need_vblank_wait;
-
- mutex_lock(&fbc->lock);
+ fbc->flip_pending = true;
- if (fbc->crtc != crtc)
- goto unlock;
+ if (intel_fbc_can_flip_nuke(state, crtc, plane))
+ return need_vblank_wait;
- intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
- fbc->flip_pending = true;
+ intel_fbc_deactivate(fbc, "update pending");
- if (!intel_fbc_can_flip_nuke(crtc_state)) {
- intel_fbc_deactivate(dev_priv, reason);
-
- /*
- * Display WA #1198: glk+
- * Need an extra vblank wait between FBC disable and most plane
- * updates. Bspec says this is only needed for plane disable, but
- * that is not true. Touching most plane registers will cause the
- * corruption to appear. Also SKL/derivatives do not seem to be
- * affected.
- *
- * TODO: could optimize this a bit by sampling the frame
- * counter when we disable FBC (if it was already done earlier)
- * and skipping the extra vblank wait before the plane update
- * if at least one frame has already passed.
- */
- if (fbc->activated &&
- DISPLAY_VER(dev_priv) >= 10)
- need_vblank_wait = true;
- fbc->activated = false;
- }
-unlock:
- mutex_unlock(&fbc->lock);
+ /*
+ * Display WA #1198: glk+
+ * Need an extra vblank wait between FBC disable and most plane
+ * updates. Bspec says this is only needed for plane disable, but
+ * that is not true. Touching most plane registers will cause the
+ * corruption to appear. Also SKL/derivatives do not seem to be
+ * affected.
+ *
+ * TODO: could optimize this a bit by sampling the frame
+ * counter when we disable FBC (if it was already done earlier)
+ * and skipping the extra vblank wait before the plane update
+ * if at least one frame has already passed.
+ */
+ if (fbc->activated && DISPLAY_VER(i915) >= 10)
+ need_vblank_wait = true;
+ fbc->activated = false;
return need_vblank_wait;
}
-/**
- * __intel_fbc_disable - disable FBC
- * @dev_priv: i915 device instance
- *
- * This is the low level function that actually disables FBC. Callers should
- * grab the FBC lock.
- */
-static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
+bool intel_fbc_pre_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
- struct intel_crtc *crtc = fbc->crtc;
+ const struct intel_plane_state *plane_state;
+ bool need_vblank_wait = false;
+ struct intel_plane *plane;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ struct intel_fbc *fbc = plane->fbc;
- drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock));
- drm_WARN_ON(&dev_priv->drm, !fbc->crtc);
- drm_WARN_ON(&dev_priv->drm, fbc->active);
+ if (!fbc || plane->pipe != crtc->pipe)
+ continue;
- drm_dbg_kms(&dev_priv->drm, "Disabling FBC on pipe %c\n",
- pipe_name(crtc->pipe));
+ mutex_lock(&fbc->lock);
- __intel_fbc_cleanup_cfb(dev_priv);
+ if (fbc->state.plane == plane)
+ need_vblank_wait |= __intel_fbc_pre_update(state, crtc, plane);
- fbc->crtc = NULL;
+ mutex_unlock(&fbc->lock);
+ }
+
+ return need_vblank_wait;
}
-static void __intel_fbc_post_update(struct intel_crtc *crtc)
+static void __intel_fbc_disable(struct intel_fbc *fbc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_fbc *fbc = &dev_priv->fbc;
-
- drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock));
+ struct drm_i915_private *i915 = fbc->i915;
+ struct intel_plane *plane = fbc->state.plane;
- if (fbc->crtc != crtc)
- return;
+ drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
+ drm_WARN_ON(&i915->drm, fbc->active);
- fbc->flip_pending = false;
+ drm_dbg_kms(&i915->drm, "Disabling FBC on [PLANE:%d:%s]\n",
+ plane->base.base.id, plane->base.name);
- if (!dev_priv->params.enable_fbc) {
- intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
- __intel_fbc_disable(dev_priv);
+ __intel_fbc_cleanup_cfb(fbc);
- return;
- }
+ fbc->state.plane = NULL;
+}
- intel_fbc_get_reg_params(crtc, &fbc->params);
+static void __intel_fbc_post_update(struct intel_fbc *fbc)
+{
+ struct drm_i915_private *i915 = fbc->i915;
- if (!intel_fbc_can_activate(crtc))
- return;
+ drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
if (!fbc->busy_bits)
- intel_fbc_activate(dev_priv);
+ intel_fbc_activate(fbc);
else
- intel_fbc_deactivate(dev_priv, "frontbuffer write");
+ intel_fbc_deactivate(fbc, "frontbuffer write");
}
void intel_fbc_post_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_plane *plane = to_intel_plane(crtc->base.primary);
- const struct intel_plane_state *plane_state =
- intel_atomic_get_new_plane_state(state, plane);
- struct intel_fbc *fbc = &dev_priv->fbc;
+ const struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int i;
- if (!plane->has_fbc || !plane_state)
- return;
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ struct intel_fbc *fbc = plane->fbc;
- mutex_lock(&fbc->lock);
- __intel_fbc_post_update(crtc);
- mutex_unlock(&fbc->lock);
+ if (!fbc || plane->pipe != crtc->pipe)
+ continue;
+
+ mutex_lock(&fbc->lock);
+
+ if (fbc->state.plane == plane) {
+ fbc->flip_pending = false;
+ __intel_fbc_post_update(fbc);
+ }
+
+ mutex_unlock(&fbc->lock);
+ }
}
static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
{
- if (fbc->crtc)
- return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
+ if (fbc->state.plane)
+ return fbc->state.plane->frontbuffer_bit;
else
return fbc->possible_framebuffer_bits;
}
-void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
+void intel_fbc_invalidate(struct drm_i915_private *i915,
unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
+ struct intel_fbc *fbc = i915->fbc;
- if (!HAS_FBC(dev_priv))
+ if (!fbc)
return;
if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
@@ -1207,18 +1321,18 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
- if (fbc->crtc && fbc->busy_bits)
- intel_fbc_deactivate(dev_priv, "frontbuffer write");
+ if (fbc->state.plane && fbc->busy_bits)
+ intel_fbc_deactivate(fbc, "frontbuffer write");
mutex_unlock(&fbc->lock);
}
-void intel_fbc_flush(struct drm_i915_private *dev_priv,
+void intel_fbc_flush(struct drm_i915_private *i915,
unsigned int frontbuffer_bits, enum fb_op_origin origin)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
+ struct intel_fbc *fbc = i915->fbc;
- if (!HAS_FBC(dev_priv))
+ if (!fbc)
return;
mutex_lock(&fbc->lock);
@@ -1228,143 +1342,85 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
goto out;
- if (!fbc->busy_bits && fbc->crtc &&
+ if (!fbc->busy_bits && fbc->state.plane &&
(frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
if (fbc->active)
- intel_fbc_recompress(dev_priv);
+ intel_fbc_nuke(fbc);
else if (!fbc->flip_pending)
- __intel_fbc_post_update(fbc->crtc);
+ __intel_fbc_post_update(fbc);
}
out:
mutex_unlock(&fbc->lock);
}
-/**
- * intel_fbc_choose_crtc - select a CRTC to enable FBC on
- * @dev_priv: i915 device instance
- * @state: the atomic state structure
- *
- * This function looks at the proposed state for CRTCs and planes, then chooses
- * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to
- * true.
- *
- * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
- * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
- */
-void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
- struct intel_atomic_state *state)
+int intel_fbc_atomic_check(struct intel_atomic_state *state)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
- struct intel_plane *plane;
struct intel_plane_state *plane_state;
- bool crtc_chosen = false;
+ struct intel_plane *plane;
int i;
- mutex_lock(&fbc->lock);
-
- /* Does this atomic commit involve the CRTC currently tied to FBC? */
- if (fbc->crtc &&
- !intel_atomic_get_new_crtc_state(state, fbc->crtc))
- goto out;
-
- if (!intel_fbc_can_enable(dev_priv))
- goto out;
-
- /* Simply choose the first CRTC that is compatible and has a visible
- * plane. We could go for fancier schemes such as checking the plane
- * size, but this would just affect the few platforms that don't tie FBC
- * to pipe or plane A. */
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
-
- if (!plane->has_fbc)
- continue;
+ int ret;
- if (!plane_state->uapi.visible)
- continue;
-
- crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
-
- crtc_state->enable_fbc = true;
- crtc_chosen = true;
- break;
+ ret = intel_fbc_check_plane(state, plane);
+ if (ret)
+ return ret;
}
- if (!crtc_chosen)
- fbc->no_fbc_reason = "no suitable CRTC for FBC";
-
-out:
- mutex_unlock(&fbc->lock);
+ return 0;
}
-/**
- * intel_fbc_enable: tries to enable FBC on the CRTC
- * @crtc: the CRTC
- * @state: corresponding &drm_crtc_state for @crtc
- *
- * This function checks if the given CRTC was chosen for FBC, then enables it if
- * possible. Notice that it doesn't activate FBC. It is valid to call
- * intel_fbc_enable multiple times for the same pipe without an
- * intel_fbc_disable in the middle, as long as it is deactivated.
- */
-static void intel_fbc_enable(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static void __intel_fbc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_plane *plane)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_plane *plane = to_intel_plane(crtc->base.primary);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_plane_state *plane_state =
intel_atomic_get_new_plane_state(state, plane);
- struct intel_fbc *fbc = &dev_priv->fbc;
- struct intel_fbc_state_cache *cache = &fbc->state_cache;
- int min_limit;
+ struct intel_fbc *fbc = plane->fbc;
- if (!plane->has_fbc || !plane_state)
- return;
+ if (fbc->state.plane) {
+ if (fbc->state.plane != plane)
+ return;
- min_limit = intel_fbc_min_limit(plane_state->hw.fb ?
- plane_state->hw.fb->format->cpp[0] : 0);
+ if (intel_fbc_is_ok(plane_state)) {
+ intel_fbc_update_state(state, crtc, plane);
+ return;
+ }
- mutex_lock(&fbc->lock);
+ __intel_fbc_disable(fbc);
+ }
- if (fbc->crtc) {
- if (fbc->crtc != crtc)
- goto out;
+ drm_WARN_ON(&i915->drm, fbc->active);
- if (fbc->limit >= min_limit &&
- !intel_fbc_cfb_size_changed(dev_priv))
- goto out;
+ fbc->no_fbc_reason = plane_state->no_fbc_reason;
+ if (fbc->no_fbc_reason)
+ return;
- __intel_fbc_disable(dev_priv);
+ if (!intel_fbc_is_fence_ok(plane_state)) {
+ fbc->no_fbc_reason = "framebuffer not fenced";
+ return;
}
- drm_WARN_ON(&dev_priv->drm, fbc->active);
-
- intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
-
- /* FIXME crtc_state->enable_fbc lies :( */
- if (!cache->plane.visible)
- goto out;
+ if (fbc->underrun_detected) {
+ fbc->no_fbc_reason = "FIFO underrun";
+ return;
+ }
- if (intel_fbc_alloc_cfb(dev_priv,
- intel_fbc_cfb_size(dev_priv, cache), min_limit)) {
- cache->plane.visible = false;
+ if (intel_fbc_alloc_cfb(fbc, intel_fbc_cfb_size(plane_state),
+ intel_fbc_min_limit(plane_state))) {
fbc->no_fbc_reason = "not enough stolen memory";
- goto out;
+ return;
}
- drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n",
- pipe_name(crtc->pipe));
+ drm_dbg_kms(&i915->drm, "Enabling FBC on [PLANE:%d:%s]\n",
+ plane->base.base.id, plane->base.name);
fbc->no_fbc_reason = "FBC enabled but not active yet\n";
- fbc->crtc = crtc;
+ intel_fbc_update_state(state, crtc, plane);
- intel_fbc_program_cfb(dev_priv);
-out:
- mutex_unlock(&fbc->lock);
+ intel_fbc_program_cfb(fbc);
}
/**
@@ -1375,114 +1431,122 @@ out:
*/
void intel_fbc_disable(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_plane *plane = to_intel_plane(crtc->base.primary);
- struct intel_fbc *fbc = &dev_priv->fbc;
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
- if (!plane->has_fbc)
- return;
+ for_each_intel_plane(&i915->drm, plane) {
+ struct intel_fbc *fbc = plane->fbc;
- mutex_lock(&fbc->lock);
- if (fbc->crtc == crtc)
- __intel_fbc_disable(dev_priv);
- mutex_unlock(&fbc->lock);
+ if (!fbc || plane->pipe != crtc->pipe)
+ continue;
+
+ mutex_lock(&fbc->lock);
+ if (fbc->state.plane == plane)
+ __intel_fbc_disable(fbc);
+ mutex_unlock(&fbc->lock);
+ }
}
-/**
- * intel_fbc_update: enable/disable FBC on the CRTC
- * @state: atomic state
- * @crtc: the CRTC
- *
- * This function checks if the given CRTC was chosen for FBC, then enables it if
- * possible. Notice that it doesn't activate FBC. It is valid to call
- * intel_fbc_update multiple times for the same pipe without an
- * intel_fbc_disable in the middle.
- */
void intel_fbc_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int i;
- if (crtc_state->update_pipe && !crtc_state->enable_fbc)
- intel_fbc_disable(crtc);
- else
- intel_fbc_enable(state, crtc);
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ struct intel_fbc *fbc = plane->fbc;
+
+ if (!fbc || plane->pipe != crtc->pipe)
+ continue;
+
+ mutex_lock(&fbc->lock);
+
+ if (crtc_state->update_pipe && plane_state->no_fbc_reason) {
+ if (fbc->state.plane == plane)
+ __intel_fbc_disable(fbc);
+ } else {
+ __intel_fbc_enable(state, crtc, plane);
+ }
+
+ mutex_unlock(&fbc->lock);
+ }
}
/**
* intel_fbc_global_disable - globally disable FBC
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* This function disables FBC regardless of which CRTC is associated with it.
*/
-void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
+void intel_fbc_global_disable(struct drm_i915_private *i915)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
+ struct intel_fbc *fbc = i915->fbc;
- if (!HAS_FBC(dev_priv))
+ if (!fbc)
return;
mutex_lock(&fbc->lock);
- if (fbc->crtc) {
- drm_WARN_ON(&dev_priv->drm, fbc->crtc->active);
- __intel_fbc_disable(dev_priv);
- }
+ if (fbc->state.plane)
+ __intel_fbc_disable(fbc);
mutex_unlock(&fbc->lock);
}
static void intel_fbc_underrun_work_fn(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, fbc.underrun_work);
- struct intel_fbc *fbc = &dev_priv->fbc;
+ struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work);
+ struct drm_i915_private *i915 = fbc->i915;
mutex_lock(&fbc->lock);
/* Maybe we were scheduled twice. */
- if (fbc->underrun_detected || !fbc->crtc)
+ if (fbc->underrun_detected || !fbc->state.plane)
goto out;
- drm_dbg_kms(&dev_priv->drm, "Disabling FBC due to FIFO underrun.\n");
+ drm_dbg_kms(&i915->drm, "Disabling FBC due to FIFO underrun.\n");
fbc->underrun_detected = true;
- intel_fbc_deactivate(dev_priv, "FIFO underrun");
+ intel_fbc_deactivate(fbc, "FIFO underrun");
+ if (!fbc->flip_pending)
+ intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(i915, fbc->state.plane->pipe));
+ __intel_fbc_disable(fbc);
out:
mutex_unlock(&fbc->lock);
}
/*
* intel_fbc_reset_underrun - reset FBC fifo underrun status.
- * @dev_priv: i915 device instance
+ * @i915: the i915 device
*
* See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
* want to re-enable FBC after an underrun to increase test coverage.
*/
-int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv)
+void intel_fbc_reset_underrun(struct drm_i915_private *i915)
{
- int ret;
+ struct intel_fbc *fbc = i915->fbc;
- cancel_work_sync(&dev_priv->fbc.underrun_work);
+ if (!fbc)
+ return;
- ret = mutex_lock_interruptible(&dev_priv->fbc.lock);
- if (ret)
- return ret;
+ cancel_work_sync(&fbc->underrun_work);
- if (dev_priv->fbc.underrun_detected) {
- drm_dbg_kms(&dev_priv->drm,
+ mutex_lock(&fbc->lock);
+
+ if (fbc->underrun_detected) {
+ drm_dbg_kms(&i915->drm,
"Re-allowing FBC after fifo underrun\n");
- dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared";
+ fbc->no_fbc_reason = "FIFO underrun cleared";
}
- dev_priv->fbc.underrun_detected = false;
- mutex_unlock(&dev_priv->fbc.lock);
-
- return 0;
+ fbc->underrun_detected = false;
+ mutex_unlock(&fbc->lock);
}
/**
* intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
- * @dev_priv: i915 device instance
+ * @i915: i915 device
*
* Without FBC, most underruns are harmless and don't really cause too many
* problems, except for an annoying message on dmesg. With FBC, underruns can
@@ -1494,11 +1558,11 @@ int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv)
*
* This function is called from the IRQ handler.
*/
-void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
+void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
+ struct intel_fbc *fbc = i915->fbc;
- if (!HAS_FBC(dev_priv))
+ if (!fbc)
return;
/* There's no guarantee that underrun_detected won't be set to true
@@ -1522,26 +1586,26 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
* space to change the value during runtime without sanitizing it again. IGT
* relies on being able to change i915.enable_fbc at runtime.
*/
-static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
+static int intel_sanitize_fbc_option(struct drm_i915_private *i915)
{
- if (dev_priv->params.enable_fbc >= 0)
- return !!dev_priv->params.enable_fbc;
+ if (i915->params.enable_fbc >= 0)
+ return !!i915->params.enable_fbc;
- if (!HAS_FBC(dev_priv))
+ if (!HAS_FBC(i915))
return 0;
- if (IS_BROADWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 9)
+ if (IS_BROADWELL(i915) || DISPLAY_VER(i915) >= 9)
return 1;
return 0;
}
-static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
+static bool need_fbc_vtd_wa(struct drm_i915_private *i915)
{
/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
- if (intel_vtd_active() &&
- (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
- drm_info(&dev_priv->drm,
+ if (intel_vtd_active(i915) &&
+ (IS_SKYLAKE(i915) || IS_BROXTON(i915))) {
+ drm_info(&i915->drm,
"Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
return true;
}
@@ -1549,38 +1613,171 @@ static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
return false;
}
+void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane)
+{
+ if (!fbc)
+ return;
+
+ plane->fbc = fbc;
+ fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
+}
+
+static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915)
+{
+ struct intel_fbc *fbc;
+
+ fbc = kzalloc(sizeof(*fbc), GFP_KERNEL);
+ if (!fbc)
+ return NULL;
+
+ fbc->i915 = i915;
+ INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
+ mutex_init(&fbc->lock);
+
+ if (DISPLAY_VER(i915) >= 7)
+ fbc->funcs = &ivb_fbc_funcs;
+ else if (DISPLAY_VER(i915) == 6)
+ fbc->funcs = &snb_fbc_funcs;
+ else if (DISPLAY_VER(i915) == 5)
+ fbc->funcs = &ilk_fbc_funcs;
+ else if (IS_G4X(i915))
+ fbc->funcs = &g4x_fbc_funcs;
+ else if (DISPLAY_VER(i915) == 4)
+ fbc->funcs = &i965_fbc_funcs;
+ else
+ fbc->funcs = &i8xx_fbc_funcs;
+
+ return fbc;
+}
+
/**
* intel_fbc_init - Initialize FBC
- * @dev_priv: the i915 device
+ * @i915: the i915 device
*
* This function might be called during PM init process.
*/
-void intel_fbc_init(struct drm_i915_private *dev_priv)
+void intel_fbc_init(struct drm_i915_private *i915)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
+ struct intel_fbc *fbc;
- INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
- mutex_init(&fbc->lock);
- fbc->active = false;
+ if (!drm_mm_initialized(&i915->mm.stolen))
+ mkwrite_device_info(i915)->display.has_fbc = false;
- if (!drm_mm_initialized(&dev_priv->mm.stolen))
- mkwrite_device_info(dev_priv)->display.has_fbc = false;
+ if (need_fbc_vtd_wa(i915))
+ mkwrite_device_info(i915)->display.has_fbc = false;
- if (need_fbc_vtd_wa(dev_priv))
- mkwrite_device_info(dev_priv)->display.has_fbc = false;
+ i915->params.enable_fbc = intel_sanitize_fbc_option(i915);
+ drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n",
+ i915->params.enable_fbc);
- dev_priv->params.enable_fbc = intel_sanitize_fbc_option(dev_priv);
- drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n",
- dev_priv->params.enable_fbc);
+ if (!HAS_FBC(i915))
+ return;
- if (!HAS_FBC(dev_priv)) {
- fbc->no_fbc_reason = "unsupported by this chipset";
+ fbc = intel_fbc_create(i915);
+ if (!fbc)
return;
- }
/* We still don't have any sort of hardware state readout for FBC, so
* deactivate it in case the BIOS activated it to make sure software
* matches the hardware state. */
- if (intel_fbc_hw_is_active(dev_priv))
- intel_fbc_hw_deactivate(dev_priv);
+ if (intel_fbc_hw_is_active(fbc))
+ intel_fbc_hw_deactivate(fbc);
+
+ i915->fbc = fbc;
+}
+
+static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
+{
+ struct intel_fbc *fbc = m->private;
+ struct drm_i915_private *i915 = fbc->i915;
+ struct intel_plane *plane;
+ intel_wakeref_t wakeref;
+
+ drm_modeset_lock_all(&i915->drm);
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ mutex_lock(&fbc->lock);
+
+ if (fbc->active) {
+ seq_puts(m, "FBC enabled\n");
+ seq_printf(m, "Compressing: %s\n",
+ yesno(intel_fbc_is_compressing(fbc)));
+ } else {
+ seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
+ }
+
+ for_each_intel_plane(&i915->drm, plane) {
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ if (plane->fbc != fbc)
+ continue;
+
+ seq_printf(m, "%c [PLANE:%d:%s]: %s\n",
+ fbc->state.plane == plane ? '*' : ' ',
+ plane->base.base.id, plane->base.name,
+ plane_state->no_fbc_reason ?: "FBC possible");
+ }
+
+ mutex_unlock(&fbc->lock);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ drm_modeset_unlock_all(&i915->drm);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(intel_fbc_debugfs_status);
+
+static int intel_fbc_debugfs_false_color_get(void *data, u64 *val)
+{
+ struct intel_fbc *fbc = data;
+
+ *val = fbc->false_color;
+
+ return 0;
+}
+
+static int intel_fbc_debugfs_false_color_set(void *data, u64 val)
+{
+ struct intel_fbc *fbc = data;
+
+ mutex_lock(&fbc->lock);
+
+ fbc->false_color = val;
+
+ if (fbc->active)
+ fbc->funcs->set_false_color(fbc, fbc->false_color);
+
+ mutex_unlock(&fbc->lock);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(intel_fbc_debugfs_false_color_fops,
+ intel_fbc_debugfs_false_color_get,
+ intel_fbc_debugfs_false_color_set,
+ "%llu\n");
+
+static void intel_fbc_debugfs_add(struct intel_fbc *fbc)
+{
+ struct drm_i915_private *i915 = fbc->i915;
+ struct drm_minor *minor = i915->drm.primary;
+
+ debugfs_create_file("i915_fbc_status", 0444,
+ minor->debugfs_root, fbc,
+ &intel_fbc_debugfs_status_fops);
+
+ if (fbc->funcs->set_false_color)
+ debugfs_create_file("i915_fbc_false_color", 0644,
+ minor->debugfs_root, fbc,
+ &intel_fbc_debugfs_false_color_fops);
+}
+
+void intel_fbc_debugfs_register(struct drm_i915_private *i915)
+{
+ struct intel_fbc *fbc = i915->fbc;
+
+ if (fbc)
+ intel_fbc_debugfs_add(fbc);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index b97d908738e6..07ad0411fcc3 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -8,22 +8,22 @@
#include <linux/types.h>
-#include "intel_frontbuffer.h"
-
+enum fb_op_origin;
struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_fbc;
+struct intel_plane;
struct intel_plane_state;
-void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
- struct intel_atomic_state *state);
-bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
+int intel_fbc_atomic_check(struct intel_atomic_state *state);
bool intel_fbc_pre_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_post_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_init(struct drm_i915_private *dev_priv);
+void intel_fbc_cleanup(struct drm_i915_private *dev_priv);
void intel_fbc_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_disable(struct intel_crtc *crtc);
@@ -33,8 +33,9 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
enum fb_op_origin origin);
void intel_fbc_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits, enum fb_op_origin origin);
-void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
-void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
-int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv);
+void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane);
+void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915);
+void intel_fbc_reset_underrun(struct drm_i915_private *i915);
+void intel_fbc_debugfs_register(struct drm_i915_private *i915);
#endif /* __INTEL_FBC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index dd2cf0c59921..3d6e22923601 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -4,11 +4,11 @@
*/
#include "intel_atomic.h"
+#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
-#include "intel_sbi.h"
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
@@ -158,7 +158,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
if (pipe_config->fdi_lanes <= 2)
return 0;
- other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
+ other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_C);
other_crtc_state =
intel_atomic_get_crtc_state(state, other_crtc);
if (IS_ERR(other_crtc_state))
@@ -179,7 +179,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return -EINVAL;
}
- other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
+ other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_B);
other_crtc_state =
intel_atomic_get_crtc_state(state, other_crtc);
if (IS_ERR(other_crtc_state))
@@ -887,6 +887,43 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
DP_TP_CTL_ENABLE);
}
+void hsw_fdi_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 val;
+
+ /*
+ * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
+ * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
+ * step 13 is the correct place for it. Step 18 is where it was
+ * originally before the BUN.
+ */
+ val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
+ val &= ~FDI_RX_ENABLE;
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
+
+ val = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
+ val &= ~DDI_BUF_CTL_ENABLE;
+ intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), val);
+
+ intel_wait_ddi_buf_idle(dev_priv, PORT_E);
+
+ intel_ddi_disable_clock(encoder);
+
+ val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
+ val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val);
+
+ val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
+ val &= ~FDI_PCDCLK;
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
+
+ val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
+ val &= ~FDI_RX_PLL_ENABLE;
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
+}
+
void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1006,104 +1043,6 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
udelay(100);
}
-static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv)
-{
- u32 tmp;
-
- tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
- tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
- intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
-
- if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS, 100))
- drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
-
- tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
- tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
- intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
-
- if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
- drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
-}
-
-/* WaMPhyProgramming:hsw */
-void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv)
-{
- u32 tmp;
-
- lpt_fdi_reset_mphy(dev_priv);
-
- tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
- tmp &= ~(0xFF << 24);
- tmp |= (0x12 << 24);
- intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
- tmp |= (1 << 11);
- intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
- tmp |= (1 << 11);
- intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
- tmp |= (1 << 24) | (1 << 21) | (1 << 18);
- intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
- tmp |= (1 << 24) | (1 << 21) | (1 << 18);
- intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
- tmp &= ~(7 << 13);
- tmp |= (5 << 13);
- intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
- tmp &= ~(7 << 13);
- tmp |= (5 << 13);
- intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
- tmp &= ~0xFF;
- tmp |= 0x1C;
- intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
- tmp &= ~0xFF;
- tmp |= 0x1C;
- intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
- tmp &= ~(0xFF << 16);
- tmp |= (0x1C << 16);
- intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
- tmp &= ~(0xFF << 16);
- tmp |= (0x1C << 16);
- intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
- tmp |= (1 << 27);
- intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
- tmp |= (1 << 27);
- intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
- tmp &= ~(0xF << 28);
- tmp |= (4 << 28);
- intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
- tmp &= ~(0xF << 28);
- tmp |= (4 << 28);
- intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
-}
-
static const struct intel_fdi_funcs ilk_funcs = {
.fdi_link_train = ilk_fdi_link_train,
};
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.h b/drivers/gpu/drm/i915/display/intel_fdi.h
index 640d6585c137..1cdb86172702 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.h
+++ b/drivers/gpu/drm/i915/display/intel_fdi.h
@@ -23,8 +23,8 @@ void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state);
void intel_fdi_init_hook(struct drm_i915_private *dev_priv);
void hsw_fdi_link_train(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+void hsw_fdi_disable(struct intel_encoder *encoder);
void intel_fdi_pll_freq_update(struct drm_i915_private *i915);
-void lpt_fdi_program_mphy(struct drm_i915_private *i915);
void intel_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index eb841960840d..d636d21fa9ce 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -26,8 +26,8 @@
*/
#include "i915_drv.h"
-#include "i915_trace.h"
#include "intel_de.h"
+#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_fifo_underrun.h"
@@ -61,7 +61,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
lockdep_assert_held(&dev_priv->irq_lock);
for_each_pipe(dev_priv, pipe) {
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ crtc = intel_crtc_for_pipe(dev_priv, pipe);
if (crtc->cpu_fifo_underrun_disabled)
return false;
@@ -79,7 +79,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
lockdep_assert_held(&dev_priv->irq_lock);
for_each_pipe(dev_priv, pipe) {
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ crtc = intel_crtc_for_pipe(dev_priv, pipe);
if (crtc->pch_fifo_underrun_disabled)
return false;
@@ -279,7 +279,7 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
bool old;
lockdep_assert_held(&dev_priv->irq_lock);
@@ -348,7 +348,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
bool enable)
{
struct intel_crtc *crtc =
- intel_get_crtc_for_pipe(dev_priv, pch_transcoder);
+ intel_crtc_for_pipe(dev_priv, pch_transcoder);
unsigned long flags;
bool old;
@@ -391,7 +391,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
u32 underruns = 0;
/* We may be called too early in init, thanks BIOS! */
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 0492446cd04a..791248f812aa 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -55,14 +55,13 @@
* cancelled as soon as busyness is detected.
*/
-#include "display/intel_dp.h"
-
#include "i915_drv.h"
-#include "i915_trace.h"
+#include "intel_display_trace.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_drrs.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
-#include "intel_drrs.h"
#include "intel_psr.h"
/**
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index a88441edc8f9..ff0c37b079aa 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -28,7 +28,7 @@
#include <linux/kref.h>
#include "gem/i915_gem_object_types.h"
-#include "i915_active.h"
+#include "i915_active_types.h"
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index ceb1bf8a8c3c..3b8b84177085 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -334,6 +334,15 @@ intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin)
algo->data = bus;
}
+static bool has_gmbus_irq(struct drm_i915_private *i915)
+{
+ /*
+ * encoder->shutdown() may want to use GMBUS
+ * after irqs have already been disabled.
+ */
+ return HAS_GMBUS_IRQ(i915) && intel_irqs_enabled(i915);
+}
+
static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
{
DEFINE_WAIT(wait);
@@ -344,7 +353,7 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
* we also need to check for NAKs besides the hw ready/idle signal, we
* need to wake up periodically and check that ourselves.
*/
- if (!HAS_GMBUS_IRQ(dev_priv))
+ if (!has_gmbus_irq(dev_priv))
irq_en = 0;
add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
@@ -375,7 +384,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
/* Important: The hw handles only the first bit, so set only one! */
irq_enable = 0;
- if (HAS_GMBUS_IRQ(dev_priv))
+ if (has_gmbus_irq(dev_priv))
irq_enable = GMBUS_IDLE_EN;
add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 371736bdc01f..3b5b9e7b05b7 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -1800,6 +1800,11 @@ static bool intel_has_hdmi_sink(struct intel_hdmi *hdmi,
READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI;
}
+static bool intel_hdmi_is_ycbcr420(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420;
+}
+
static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
bool respect_downstream_limits,
bool has_hdmi_sink)
@@ -1864,8 +1869,12 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
return MODE_OK;
}
-static int intel_hdmi_port_clock(int clock, int bpc)
+static int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output)
{
+ /* YCBCR420 TMDS rate requirement is half the pixel clock */
+ if (ycbcr420_output)
+ clock /= 2;
+
/*
* Need to adjust the port link by:
* 1.5x for 12bpc
@@ -1874,18 +1883,29 @@ static int intel_hdmi_port_clock(int clock, int bpc)
return clock * bpc / 8;
}
-static bool intel_hdmi_bpc_possible(struct drm_connector *connector,
- int bpc, bool has_hdmi_sink, bool ycbcr420_output)
+static bool intel_hdmi_source_bpc_possible(struct drm_i915_private *i915, int bpc)
+{
+ switch (bpc) {
+ case 12:
+ return !HAS_GMCH(i915);
+ case 10:
+ return DISPLAY_VER(i915) >= 11;
+ case 8:
+ return true;
+ default:
+ MISSING_CASE(bpc);
+ return false;
+ }
+}
+
+static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector,
+ int bpc, bool has_hdmi_sink, bool ycbcr420_output)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
const struct drm_display_info *info = &connector->display_info;
const struct drm_hdmi_info *hdmi = &info->hdmi;
switch (bpc) {
case 12:
- if (HAS_GMCH(i915))
- return false;
-
if (!has_hdmi_sink)
return false;
@@ -1894,9 +1914,6 @@ static bool intel_hdmi_bpc_possible(struct drm_connector *connector,
else
return info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36;
case 10:
- if (DISPLAY_VER(i915) < 11)
- return false;
-
if (!has_hdmi_sink)
return false;
@@ -1916,26 +1933,26 @@ static enum drm_mode_status
intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
bool has_hdmi_sink, bool ycbcr420_output)
{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
enum drm_mode_status status;
- if (ycbcr420_output)
- clock /= 2;
-
/* check if we can do 8bpc */
- status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 8),
+ status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 8, ycbcr420_output),
true, has_hdmi_sink);
/* if we can't do 8bpc we may still be able to do 12bpc */
if (status != MODE_OK &&
- intel_hdmi_bpc_possible(connector, 12, has_hdmi_sink, ycbcr420_output))
- status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 12),
+ intel_hdmi_source_bpc_possible(i915, 12) &&
+ intel_hdmi_sink_bpc_possible(connector, 12, has_hdmi_sink, ycbcr420_output))
+ status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 12, ycbcr420_output),
true, has_hdmi_sink);
/* if we can't do 8,12bpc we may still be able to do 10bpc */
if (status != MODE_OK &&
- intel_hdmi_bpc_possible(connector, 10, has_hdmi_sink, ycbcr420_output))
- status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 10),
+ intel_hdmi_source_bpc_possible(i915, 10) &&
+ intel_hdmi_sink_bpc_possible(connector, 10, has_hdmi_sink, ycbcr420_output))
+ status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 10, ycbcr420_output),
true, has_hdmi_sink);
return status;
@@ -2000,7 +2017,7 @@ bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
if (connector_state->crtc != crtc_state->uapi.crtc)
continue;
- if (!intel_hdmi_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output))
+ if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output))
return false;
}
@@ -2015,6 +2032,9 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
+ if (!intel_hdmi_source_bpc_possible(dev_priv, bpc))
+ return false;
+
/*
* HDMI deep color affects the clocks, so it's only possible
* when not cloning with other encoder types.
@@ -2023,7 +2043,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
return false;
/* Display Wa_1405510057:icl,ehl */
- if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
+ if (intel_hdmi_is_ycbcr420(crtc_state) &&
bpc == 10 && DISPLAY_VER(dev_priv) == 11 &&
(adjusted_mode->crtc_hblank_end -
adjusted_mode->crtc_hblank_start) % 8 == 2)
@@ -2031,8 +2051,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
return intel_hdmi_deep_color_possible(crtc_state, bpc,
crtc_state->has_hdmi_sink,
- crtc_state->output_format ==
- INTEL_OUTPUT_FORMAT_YCBCR420);
+ intel_hdmi_is_ycbcr420(crtc_state));
}
static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
@@ -2040,12 +2059,13 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
int clock)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ bool ycbcr420_output = intel_hdmi_is_ycbcr420(crtc_state);
int bpc;
for (bpc = 12; bpc >= 10; bpc -= 2) {
if (hdmi_deep_color_possible(crtc_state, bpc) &&
hdmi_port_clock_valid(intel_hdmi,
- intel_hdmi_port_clock(clock, bpc),
+ intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output),
true, crtc_state->has_hdmi_sink) == MODE_OK)
return bpc;
}
@@ -2065,13 +2085,10 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
clock *= 2;
- /* YCBCR420 TMDS rate requirement is half the pixel clock */
- if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
- clock /= 2;
-
bpc = intel_hdmi_compute_bpc(encoder, crtc_state, clock);
- crtc_state->port_clock = intel_hdmi_port_clock(clock, bpc);
+ crtc_state->port_clock = intel_hdmi_tmds_clock(clock, bpc,
+ intel_hdmi_is_ycbcr420(crtc_state));
/*
* pipe_bpp could already be below 8bpc due to
@@ -2141,34 +2158,44 @@ static bool intel_hdmi_has_audio(struct intel_encoder *encoder,
return intel_conn_state->force_audio == HDMI_AUDIO_ON;
}
+static enum intel_output_format
+intel_hdmi_output_format(struct intel_connector *connector,
+ bool ycbcr_420_output)
+{
+ if (connector->base.ycbcr_420_allowed && ycbcr_420_output)
+ return INTEL_OUTPUT_FORMAT_YCBCR420;
+ else
+ return INTEL_OUTPUT_FORMAT_RGB;
+}
+
static int intel_hdmi_compute_output_format(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_connector *connector = conn_state->connector;
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ const struct drm_display_info *info = &connector->base.display_info;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ bool ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode);
int ret;
- bool ycbcr_420_only;
- ycbcr_420_only = drm_mode_is_420_only(&connector->display_info, adjusted_mode);
- if (connector->ycbcr_420_allowed && ycbcr_420_only) {
- crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
- } else {
- if (!connector->ycbcr_420_allowed && ycbcr_420_only)
- drm_dbg_kms(&i915->drm,
- "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n");
+ crtc_state->output_format = intel_hdmi_output_format(connector, ycbcr_420_only);
+
+ if (ycbcr_420_only && !intel_hdmi_is_ycbcr420(crtc_state)) {
+ drm_dbg_kms(&i915->drm,
+ "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n");
crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
}
ret = intel_hdmi_compute_clock(encoder, crtc_state);
if (ret) {
- if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420 &&
- connector->ycbcr_420_allowed &&
- drm_mode_is_420_also(&connector->display_info, adjusted_mode)) {
- crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
- ret = intel_hdmi_compute_clock(encoder, crtc_state);
- }
+ if (intel_hdmi_is_ycbcr420(crtc_state) ||
+ !connector->base.ycbcr_420_allowed ||
+ !drm_mode_is_420_also(info, adjusted_mode))
+ return ret;
+
+ crtc_state->output_format = intel_hdmi_output_format(connector, true);
+ ret = intel_hdmi_compute_clock(encoder, crtc_state);
}
return ret;
@@ -2208,7 +2235,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
+ if (intel_hdmi_is_ycbcr420(pipe_config)) {
ret = intel_panel_fitting(pipe_config, conn_state);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 7f3c638c8950..4970bf146c4a 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -74,7 +74,7 @@
#include "intel_de.h"
#include "intel_lpe_audio.h"
-#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->lpe_audio.platdev != NULL)
+#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->audio.lpe.platdev != NULL)
static struct platform_device *
lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
@@ -96,7 +96,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
return ERR_PTR(-ENOMEM);
}
- rsc[0].start = rsc[0].end = dev_priv->lpe_audio.irq;
+ rsc[0].start = rsc[0].end = dev_priv->audio.lpe.irq;
rsc[0].flags = IORESOURCE_IRQ;
rsc[0].name = "hdmi-lpe-audio-irq";
@@ -148,7 +148,7 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
* than us fiddle with its internals.
*/
- platform_device_unregister(dev_priv->lpe_audio.platdev);
+ platform_device_unregister(dev_priv->audio.lpe.platdev);
}
static void lpe_audio_irq_unmask(struct irq_data *d)
@@ -167,7 +167,7 @@ static struct irq_chip lpe_audio_irqchip = {
static int lpe_audio_irq_init(struct drm_i915_private *dev_priv)
{
- int irq = dev_priv->lpe_audio.irq;
+ int irq = dev_priv->audio.lpe.irq;
drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
irq_set_chip_and_handler_name(irq,
@@ -204,15 +204,15 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
{
int ret;
- dev_priv->lpe_audio.irq = irq_alloc_desc(0);
- if (dev_priv->lpe_audio.irq < 0) {
+ dev_priv->audio.lpe.irq = irq_alloc_desc(0);
+ if (dev_priv->audio.lpe.irq < 0) {
drm_err(&dev_priv->drm, "Failed to allocate IRQ desc: %d\n",
- dev_priv->lpe_audio.irq);
- ret = dev_priv->lpe_audio.irq;
+ dev_priv->audio.lpe.irq);
+ ret = dev_priv->audio.lpe.irq;
goto err;
}
- drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->lpe_audio.irq);
+ drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->audio.lpe.irq);
ret = lpe_audio_irq_init(dev_priv);
@@ -223,10 +223,10 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
goto err_free_irq;
}
- dev_priv->lpe_audio.platdev = lpe_audio_platdev_create(dev_priv);
+ dev_priv->audio.lpe.platdev = lpe_audio_platdev_create(dev_priv);
- if (IS_ERR(dev_priv->lpe_audio.platdev)) {
- ret = PTR_ERR(dev_priv->lpe_audio.platdev);
+ if (IS_ERR(dev_priv->audio.lpe.platdev)) {
+ ret = PTR_ERR(dev_priv->audio.lpe.platdev);
drm_err(&dev_priv->drm,
"Failed to create lpe audio platform device: %d\n",
ret);
@@ -241,10 +241,10 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
return 0;
err_free_irq:
- irq_free_desc(dev_priv->lpe_audio.irq);
+ irq_free_desc(dev_priv->audio.lpe.irq);
err:
- dev_priv->lpe_audio.irq = -1;
- dev_priv->lpe_audio.platdev = NULL;
+ dev_priv->audio.lpe.irq = -1;
+ dev_priv->audio.lpe.platdev = NULL;
return ret;
}
@@ -262,7 +262,7 @@ void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv)
if (!HAS_LPE_AUDIO(dev_priv))
return;
- ret = generic_handle_irq(dev_priv->lpe_audio.irq);
+ ret = generic_handle_irq(dev_priv->audio.lpe.irq);
if (ret)
drm_err_ratelimited(&dev_priv->drm,
"error handling LPE audio irq: %d\n", ret);
@@ -303,10 +303,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
lpe_audio_platdev_destroy(dev_priv);
- irq_free_desc(dev_priv->lpe_audio.irq);
+ irq_free_desc(dev_priv->audio.lpe.irq);
- dev_priv->lpe_audio.irq = -1;
- dev_priv->lpe_audio.platdev = NULL;
+ dev_priv->audio.lpe.irq = -1;
+ dev_priv->audio.lpe.platdev = NULL;
}
/**
@@ -333,7 +333,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
if (!HAS_LPE_AUDIO(dev_priv))
return;
- pdata = dev_get_platdata(&dev_priv->lpe_audio.platdev->dev);
+ pdata = dev_get_platdata(&dev_priv->audio.lpe.platdev->dev);
ppdata = &pdata->port[port - PORT_B];
spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags);
@@ -361,7 +361,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
}
if (pdata->notify_audio_lpe)
- pdata->notify_audio_lpe(dev_priv->lpe_audio.platdev, port - PORT_B);
+ pdata->notify_audio_lpe(dev_priv->audio.lpe.platdev, port - PORT_B);
spin_unlock_irqrestore(&pdata->lpe_audio_slock, irqflags);
}
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 7e3f5c6ca484..1a376e9a1ff3 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -1382,7 +1382,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
if (!HAS_OVERLAY(dev_priv))
return;
- engine = dev_priv->gt.engine[RCS0];
+ engine = to_gt(dev_priv)->engine[RCS0];
if (!engine || !engine->kernel_context)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
new file mode 100644
index 000000000000..a55c4bfacd0d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "g4x_dp.h"
+#include "intel_crt.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_fdi.h"
+#include "intel_lvds.h"
+#include "intel_pch_display.h"
+#include "intel_pch_refclk.h"
+#include "intel_pps.h"
+#include "intel_sdvo.h"
+
+static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum port port,
+ i915_reg_t dp_reg)
+{
+ enum pipe port_pipe;
+ bool state;
+
+ state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
+
+ I915_STATE_WARN(state && port_pipe == pipe,
+ "PCH DP %c enabled on transcoder %c, should be disabled\n",
+ port_name(port), pipe_name(pipe));
+
+ I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+ "IBX PCH DP %c still using transcoder B\n",
+ port_name(port));
+}
+
+static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum port port,
+ i915_reg_t hdmi_reg)
+{
+ enum pipe port_pipe;
+ bool state;
+
+ state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
+
+ I915_STATE_WARN(state && port_pipe == pipe,
+ "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
+ port_name(port), pipe_name(pipe));
+
+ I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+ "IBX PCH HDMI %c still using transcoder B\n",
+ port_name(port));
+}
+
+static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ enum pipe port_pipe;
+
+ assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
+ assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
+ assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
+
+ I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
+ port_pipe == pipe,
+ "PCH VGA enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
+
+ I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
+ port_pipe == pipe,
+ "PCH LVDS enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
+
+ /* PCH SDVOB multiplex with HDMIB */
+ assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
+}
+
+static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ u32 val;
+ bool enabled;
+
+ val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
+ enabled = !!(val & TRANS_ENABLE);
+ I915_STATE_WARN(enabled,
+ "transcoder assertion failed, should be off on pipe %c but is still active\n",
+ pipe_name(pipe));
+}
+
+static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
+ enum pipe pch_transcoder)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
+ intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
+ intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
+ intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
+
+ intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
+ intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
+ intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
+ intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
+ intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
+}
+
+static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 val, pipeconf_val;
+
+ /* Make sure PCH DPLL is enabled */
+ assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
+
+ /* FDI must be feeding us bits for PCH ports */
+ assert_fdi_tx_enabled(dev_priv, pipe);
+ assert_fdi_rx_enabled(dev_priv, pipe);
+
+ if (HAS_PCH_CPT(dev_priv)) {
+ reg = TRANS_CHICKEN2(pipe);
+ val = intel_de_read(dev_priv, reg);
+ /*
+ * Workaround: Set the timing override bit
+ * before enabling the pch transcoder.
+ */
+ val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ /* Configure frame start delay to match the CPU */
+ val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
+ intel_de_write(dev_priv, reg, val);
+ }
+
+ reg = PCH_TRANSCONF(pipe);
+ val = intel_de_read(dev_priv, reg);
+ pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
+
+ if (HAS_PCH_IBX(dev_priv)) {
+ /* Configure frame start delay to match the CPU */
+ val &= ~TRANS_FRAME_START_DELAY_MASK;
+ val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
+
+ /*
+ * Make the BPC in transcoder be consistent with
+ * that in pipeconf reg. For HDMI we must use 8bpc
+ * here for both 8bpc and 12bpc.
+ */
+ val &= ~PIPECONF_BPC_MASK;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ val |= PIPECONF_8BPC;
+ else
+ val |= pipeconf_val & PIPECONF_BPC_MASK;
+ }
+
+ val &= ~TRANS_INTERLACE_MASK;
+ if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
+ if (HAS_PCH_IBX(dev_priv) &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
+ val |= TRANS_LEGACY_INTERLACED_ILK;
+ else
+ val |= TRANS_INTERLACED;
+ } else {
+ val |= TRANS_PROGRESSIVE;
+ }
+
+ intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
+ if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
+ drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
+ pipe_name(pipe));
+}
+
+static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 val;
+
+ /* FDI relies on the transcoder */
+ assert_fdi_tx_disabled(dev_priv, pipe);
+ assert_fdi_rx_disabled(dev_priv, pipe);
+
+ /* Ports must be off as well */
+ assert_pch_ports_disabled(dev_priv, pipe);
+
+ reg = PCH_TRANSCONF(pipe);
+ val = intel_de_read(dev_priv, reg);
+ val &= ~TRANS_ENABLE;
+ intel_de_write(dev_priv, reg, val);
+ /* wait for PCH transcoder off, transcoder state */
+ if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
+ drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
+ pipe_name(pipe));
+
+ if (HAS_PCH_CPT(dev_priv)) {
+ /* Workaround: Clear the timing override chicken bit again. */
+ reg = TRANS_CHICKEN2(pipe);
+ val = intel_de_read(dev_priv, reg);
+ val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+ intel_de_write(dev_priv, reg, val);
+ }
+}
+
+/*
+ * Enable PCH resources required for PCH ports:
+ * - PCH PLLs
+ * - FDI training & RX/TX
+ * - update transcoder timings
+ * - DP transcoding bits
+ * - transcoder
+ */
+void ilk_pch_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ enum pipe pipe = crtc->pipe;
+ u32 temp;
+
+ assert_pch_transcoder_disabled(dev_priv, pipe);
+
+ /* For PCH output, training FDI link */
+ intel_fdi_link_train(crtc, crtc_state);
+
+ /*
+ * We need to program the right clock selection
+ * before writing the pixel multiplier into the DPLL.
+ */
+ if (HAS_PCH_CPT(dev_priv)) {
+ u32 sel;
+
+ temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
+ temp |= TRANS_DPLL_ENABLE(pipe);
+ sel = TRANS_DPLLB_SEL(pipe);
+ if (crtc_state->shared_dpll ==
+ intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
+ temp |= sel;
+ else
+ temp &= ~sel;
+ intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
+ }
+
+ /*
+ * XXX: pch pll's can be enabled any time before we enable the PCH
+ * transcoder, and we actually should do this to not upset any PCH
+ * transcoder that already use the clock when we share it.
+ *
+ * Note that enable_shared_dpll tries to do the right thing, but
+ * get_shared_dpll unconditionally resets the pll - we need that
+ * to have the right LVDS enable sequence.
+ */
+ intel_enable_shared_dpll(crtc_state);
+
+ /* set transcoder timing, panel must allow it */
+ assert_pps_unlocked(dev_priv, pipe);
+ ilk_pch_transcoder_set_timings(crtc_state, pipe);
+
+ intel_fdi_normal_train(crtc);
+
+ /* For PCH DP, enable TRANS_DP_CTL */
+ if (HAS_PCH_CPT(dev_priv) &&
+ intel_crtc_has_dp_encoder(crtc_state)) {
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
+ i915_reg_t reg = TRANS_DP_CTL(pipe);
+ enum port port;
+
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~(TRANS_DP_PORT_SEL_MASK |
+ TRANS_DP_SYNC_MASK |
+ TRANS_DP_BPC_MASK);
+ temp |= TRANS_DP_OUTPUT_ENABLE;
+ temp |= bpc << 9; /* same format but at 11:9 */
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
+
+ port = intel_get_crtc_new_encoder(state, crtc_state)->port;
+ drm_WARN_ON(&dev_priv->drm, port < PORT_B || port > PORT_D);
+ temp |= TRANS_DP_PORT_SEL(port);
+
+ intel_de_write(dev_priv, reg, temp);
+ }
+
+ ilk_enable_pch_transcoder(crtc_state);
+}
+
+void ilk_pch_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ ilk_fdi_disable(crtc);
+}
+
+void ilk_pch_post_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ ilk_disable_pch_transcoder(crtc);
+
+ if (HAS_PCH_CPT(dev_priv)) {
+ i915_reg_t reg;
+ u32 temp;
+
+ /* disable TRANS_DP_CTL */
+ reg = TRANS_DP_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~(TRANS_DP_OUTPUT_ENABLE |
+ TRANS_DP_PORT_SEL_MASK);
+ temp |= TRANS_DP_PORT_SEL_NONE;
+ intel_de_write(dev_priv, reg, temp);
+
+ /* disable DPLL_SEL */
+ temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
+ temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
+ intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
+ }
+
+ ilk_fdi_pll_disable(crtc);
+}
+
+static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ /* read out port_clock from the DPLL */
+ i9xx_crtc_clock_get(crtc, crtc_state);
+
+ /*
+ * In case there is an active pipe without active ports,
+ * we may need some idea for the dotclock anyway.
+ * Calculate one based on the FDI configuration.
+ */
+ crtc_state->hw.adjusted_mode.crtc_clock =
+ intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, crtc_state),
+ &crtc_state->fdi_m_n);
+}
+
+void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_shared_dpll *pll;
+ enum pipe pipe = crtc->pipe;
+ enum intel_dpll_id pll_id;
+ bool pll_active;
+ u32 tmp;
+
+ if ((intel_de_read(dev_priv, PCH_TRANSCONF(pipe)) & TRANS_ENABLE) == 0)
+ return;
+
+ crtc_state->has_pch_encoder = true;
+
+ tmp = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
+ crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
+ FDI_DP_PORT_WIDTH_SHIFT) + 1;
+
+ ilk_get_fdi_m_n_config(crtc, crtc_state);
+
+ if (HAS_PCH_IBX(dev_priv)) {
+ /*
+ * The pipe->pch transcoder and pch transcoder->pll
+ * mapping is fixed.
+ */
+ pll_id = (enum intel_dpll_id) pipe;
+ } else {
+ tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
+ if (tmp & TRANS_DPLLB_SEL(pipe))
+ pll_id = DPLL_ID_PCH_PLL_B;
+ else
+ pll_id = DPLL_ID_PCH_PLL_A;
+ }
+
+ crtc_state->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
+ pll = crtc_state->shared_dpll;
+
+ pll_active = intel_dpll_get_hw_state(dev_priv, pll,
+ &crtc_state->dpll_hw_state);
+ drm_WARN_ON(&dev_priv->drm, !pll_active);
+
+ tmp = crtc_state->dpll_hw_state.dpll;
+ crtc_state->pixel_multiplier =
+ ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
+ >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
+
+ ilk_pch_clock_get(crtc_state);
+}
+
+static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ u32 val, pipeconf_val;
+
+ /* FDI must be feeding us bits for PCH ports */
+ assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
+ assert_fdi_rx_enabled(dev_priv, PIPE_A);
+
+ val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
+ /* Workaround: set timing override bit. */
+ val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ /* Configure frame start delay to match the CPU */
+ val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
+ intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
+
+ val = TRANS_ENABLE;
+ pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
+
+ if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
+ PIPECONF_INTERLACED_ILK)
+ val |= TRANS_INTERLACED;
+ else
+ val |= TRANS_PROGRESSIVE;
+
+ intel_de_write(dev_priv, LPT_TRANSCONF, val);
+ if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
+ TRANS_STATE_ENABLE, 100))
+ drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
+}
+
+static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = intel_de_read(dev_priv, LPT_TRANSCONF);
+ val &= ~TRANS_ENABLE;
+ intel_de_write(dev_priv, LPT_TRANSCONF, val);
+ /* wait for PCH transcoder off, transcoder state */
+ if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
+ TRANS_STATE_ENABLE, 50))
+ drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
+
+ /* Workaround: clear timing override bit. */
+ val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
+ val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+ intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
+}
+
+void lpt_pch_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ assert_pch_transcoder_disabled(dev_priv, PIPE_A);
+
+ lpt_program_iclkip(crtc_state);
+
+ /* Set transcoder timing. */
+ ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
+
+ lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
+}
+
+void lpt_pch_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ lpt_disable_pch_transcoder(dev_priv);
+
+ lpt_disable_iclkip(dev_priv);
+}
+
+void lpt_pch_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 tmp;
+
+ if ((intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) == 0)
+ return;
+
+ crtc_state->has_pch_encoder = true;
+
+ tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
+ crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
+ FDI_DP_PORT_WIDTH_SHIFT) + 1;
+
+ ilk_get_fdi_m_n_config(crtc, crtc_state);
+
+ crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.h b/drivers/gpu/drm/i915/display/intel_pch_display.h
new file mode 100644
index 000000000000..2c387fe3a467
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _INTEL_PCH_DISPLAY_H_
+#define _INTEL_PCH_DISPLAY_H_
+
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+
+void ilk_pch_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void ilk_pch_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void ilk_pch_post_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void ilk_pch_get_config(struct intel_crtc_state *crtc_state);
+
+void lpt_pch_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void lpt_pch_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void lpt_pch_get_config(struct intel_crtc_state *crtc_state);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
new file mode 100644
index 000000000000..b688fd87e3da
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -0,0 +1,648 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_panel.h"
+#include "intel_pch_refclk.h"
+#include "intel_sbi.h"
+
+static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv)
+{
+ u32 tmp;
+
+ tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
+ tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+ intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
+
+ if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+ drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
+
+ tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
+ tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+ intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
+
+ if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
+ drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
+}
+
+/* WaMPhyProgramming:hsw */
+static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv)
+{
+ u32 tmp;
+
+ lpt_fdi_reset_mphy(dev_priv);
+
+ tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
+ tmp &= ~(0xFF << 24);
+ tmp |= (0x12 << 24);
+ intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
+ tmp |= (1 << 11);
+ intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
+ tmp |= (1 << 11);
+ intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
+ tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+ intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
+ tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+ intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+ tmp &= ~0xFF;
+ tmp |= 0x1C;
+ intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+ tmp &= ~0xFF;
+ tmp |= 0x1C;
+ intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+ tmp &= ~(0xFF << 16);
+ tmp |= (0x1C << 16);
+ intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+ tmp &= ~(0xFF << 16);
+ tmp |= (0x1C << 16);
+ intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+}
+
+void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
+{
+ u32 temp;
+
+ intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+ temp |= SBI_SSCCTL_DISABLE;
+ intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
+
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+/* Program iCLKIP clock to the desired frequency */
+void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int clock = crtc_state->hw.adjusted_mode.crtc_clock;
+ u32 divsel, phaseinc, auxdiv, phasedir = 0;
+ u32 temp;
+
+ lpt_disable_iclkip(dev_priv);
+
+ /* The iCLK virtual clock root frequency is in MHz,
+ * but the adjusted_mode->crtc_clock in KHz. To get the
+ * divisors, it is necessary to divide one by another, so we
+ * convert the virtual clock precision to KHz here for higher
+ * precision.
+ */
+ for (auxdiv = 0; auxdiv < 2; auxdiv++) {
+ u32 iclk_virtual_root_freq = 172800 * 1000;
+ u32 iclk_pi_range = 64;
+ u32 desired_divisor;
+
+ desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
+ clock << auxdiv);
+ divsel = (desired_divisor / iclk_pi_range) - 2;
+ phaseinc = desired_divisor % iclk_pi_range;
+
+ /*
+ * Near 20MHz is a corner case which is
+ * out of range for the 7-bit divisor
+ */
+ if (divsel <= 0x7f)
+ break;
+ }
+
+ /* This should not happen with any sane values */
+ drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
+ ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
+ drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
+ ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
+ clock, auxdiv, divsel, phasedir, phaseinc);
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ /* Program SSCDIVINTPHASE6 */
+ temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
+ temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
+ temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
+ temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
+ temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
+ temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
+ temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
+ intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
+
+ /* Program SSCAUXDIV */
+ temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
+ temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
+ temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
+ intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
+
+ /* Enable modulator and associated divider */
+ temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+ temp &= ~SBI_SSCCTL_DISABLE;
+ intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
+
+ mutex_unlock(&dev_priv->sb_lock);
+
+ /* Wait for initialization time */
+ udelay(24);
+
+ intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+}
+
+int lpt_get_iclkip(struct drm_i915_private *dev_priv)
+{
+ u32 divsel, phaseinc, auxdiv;
+ u32 iclk_virtual_root_freq = 172800 * 1000;
+ u32 iclk_pi_range = 64;
+ u32 desired_divisor;
+ u32 temp;
+
+ if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
+ return 0;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+ if (temp & SBI_SSCCTL_DISABLE) {
+ mutex_unlock(&dev_priv->sb_lock);
+ return 0;
+ }
+
+ temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
+ divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
+ SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
+ phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
+ SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
+
+ temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
+ auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
+ SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
+
+ mutex_unlock(&dev_priv->sb_lock);
+
+ desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
+
+ return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
+ desired_divisor << auxdiv);
+}
+
+/* Implements 3 different sequences from BSpec chapter "Display iCLK
+ * Programming" based on the parameters passed:
+ * - Sequence to enable CLKOUT_DP
+ * - Sequence to enable CLKOUT_DP without spread
+ * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
+ */
+static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
+ bool with_spread, bool with_fdi)
+{
+ u32 reg, tmp;
+
+ if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
+ "FDI requires downspread\n"))
+ with_spread = true;
+ if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
+ with_fdi, "LP PCH doesn't have FDI\n"))
+ with_fdi = false;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_DISABLE;
+ tmp |= SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+ udelay(24);
+
+ if (with_spread) {
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+ if (with_fdi)
+ lpt_fdi_program_mphy(dev_priv);
+ }
+
+ reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
+ tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+ tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
+ intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+/* Sequence to disable CLKOUT_DP */
+void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
+{
+ u32 reg, tmp;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
+ tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+ tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
+ intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ if (!(tmp & SBI_SSCCTL_DISABLE)) {
+ if (!(tmp & SBI_SSCCTL_PATHALT)) {
+ tmp |= SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ udelay(32);
+ }
+ tmp |= SBI_SSCCTL_DISABLE;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ }
+
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+#define BEND_IDX(steps) ((50 + (steps)) / 5)
+
+static const u16 sscdivintphase[] = {
+ [BEND_IDX( 50)] = 0x3B23,
+ [BEND_IDX( 45)] = 0x3B23,
+ [BEND_IDX( 40)] = 0x3C23,
+ [BEND_IDX( 35)] = 0x3C23,
+ [BEND_IDX( 30)] = 0x3D23,
+ [BEND_IDX( 25)] = 0x3D23,
+ [BEND_IDX( 20)] = 0x3E23,
+ [BEND_IDX( 15)] = 0x3E23,
+ [BEND_IDX( 10)] = 0x3F23,
+ [BEND_IDX( 5)] = 0x3F23,
+ [BEND_IDX( 0)] = 0x0025,
+ [BEND_IDX( -5)] = 0x0025,
+ [BEND_IDX(-10)] = 0x0125,
+ [BEND_IDX(-15)] = 0x0125,
+ [BEND_IDX(-20)] = 0x0225,
+ [BEND_IDX(-25)] = 0x0225,
+ [BEND_IDX(-30)] = 0x0325,
+ [BEND_IDX(-35)] = 0x0325,
+ [BEND_IDX(-40)] = 0x0425,
+ [BEND_IDX(-45)] = 0x0425,
+ [BEND_IDX(-50)] = 0x0525,
+};
+
+/*
+ * Bend CLKOUT_DP
+ * steps -50 to 50 inclusive, in steps of 5
+ * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
+ * change in clock period = -(steps / 10) * 5.787 ps
+ */
+static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
+{
+ u32 tmp;
+ int idx = BEND_IDX(steps);
+
+ if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
+ return;
+
+ if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
+ return;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ if (steps % 10 != 0)
+ tmp = 0xAAAAAAAB;
+ else
+ tmp = 0x00000000;
+ intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
+ tmp &= 0xffff0000;
+ tmp |= sscdivintphase[idx];
+ intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
+
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+#undef BEND_IDX
+
+static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
+{
+ u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
+ u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
+
+ if ((ctl & SPLL_PLL_ENABLE) == 0)
+ return false;
+
+ if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
+ (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
+ return true;
+
+ if (IS_BROADWELL(dev_priv) &&
+ (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
+ return true;
+
+ return false;
+}
+
+static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
+ enum intel_dpll_id id)
+{
+ u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
+ u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
+
+ if ((ctl & WRPLL_PLL_ENABLE) == 0)
+ return false;
+
+ if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
+ return true;
+
+ if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
+ (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
+ (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
+ return true;
+
+ return false;
+}
+
+static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+ bool has_fdi = false;
+
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_ANALOG:
+ has_fdi = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /*
+ * The BIOS may have decided to use the PCH SSC
+ * reference so we must not disable it until the
+ * relevant PLLs have stopped relying on it. We'll
+ * just leave the PCH SSC reference enabled in case
+ * any active PLL is using it. It will get disabled
+ * after runtime suspend if we don't have FDI.
+ *
+ * TODO: Move the whole reference clock handling
+ * to the modeset sequence proper so that we can
+ * actually enable/disable/reconfigure these things
+ * safely. To do that we need to introduce a real
+ * clock hierarchy. That would also allow us to do
+ * clock bending finally.
+ */
+ dev_priv->pch_ssc_use = 0;
+
+ if (spll_uses_pch_ssc(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
+ dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
+ }
+
+ if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
+ drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
+ dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
+ }
+
+ if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
+ drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
+ dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
+ }
+
+ if (dev_priv->pch_ssc_use)
+ return;
+
+ if (has_fdi) {
+ lpt_bend_clkout_dp(dev_priv, 0);
+ lpt_enable_clkout_dp(dev_priv, true, true);
+ } else {
+ lpt_disable_clkout_dp(dev_priv);
+ }
+}
+
+static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+ int i;
+ u32 val, final;
+ bool has_lvds = false;
+ bool has_cpu_edp = false;
+ bool has_panel = false;
+ bool has_ck505 = false;
+ bool can_ssc = false;
+ bool using_ssc_source = false;
+
+ /* We need to take the global config into account */
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ has_panel = true;
+ has_lvds = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ has_panel = true;
+ if (encoder->port == PORT_A)
+ has_cpu_edp = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (HAS_PCH_IBX(dev_priv)) {
+ has_ck505 = dev_priv->vbt.display_clock_mode;
+ can_ssc = has_ck505;
+ } else {
+ has_ck505 = false;
+ can_ssc = true;
+ }
+
+ /* Check if any DPLLs are using the SSC source */
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+ u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
+
+ if (!(temp & DPLL_VCO_ENABLE))
+ continue;
+
+ if ((temp & PLL_REF_INPUT_MASK) ==
+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+ using_ssc_source = true;
+ break;
+ }
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
+ has_panel, has_lvds, has_ck505, using_ssc_source);
+
+ /* Ironlake: try to setup display ref clock before DPLL
+ * enabling. This is only under driver's control after
+ * PCH B stepping, previous chipset stepping should be
+ * ignoring this setting.
+ */
+ val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
+
+ /* As we must carefully and slowly disable/enable each source in turn,
+ * compute the final state we want first and check if we need to
+ * make any changes at all.
+ */
+ final = val;
+ final &= ~DREF_NONSPREAD_SOURCE_MASK;
+ if (has_ck505)
+ final |= DREF_NONSPREAD_CK505_ENABLE;
+ else
+ final |= DREF_NONSPREAD_SOURCE_ENABLE;
+
+ final &= ~DREF_SSC_SOURCE_MASK;
+ final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+ final &= ~DREF_SSC1_ENABLE;
+
+ if (has_panel) {
+ final |= DREF_SSC_SOURCE_ENABLE;
+
+ if (intel_panel_use_ssc(dev_priv) && can_ssc)
+ final |= DREF_SSC1_ENABLE;
+
+ if (has_cpu_edp) {
+ if (intel_panel_use_ssc(dev_priv) && can_ssc)
+ final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+ else
+ final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+ } else {
+ final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+ }
+ } else if (using_ssc_source) {
+ final |= DREF_SSC_SOURCE_ENABLE;
+ final |= DREF_SSC1_ENABLE;
+ }
+
+ if (final == val)
+ return;
+
+ /* Always enable nonspread source */
+ val &= ~DREF_NONSPREAD_SOURCE_MASK;
+
+ if (has_ck505)
+ val |= DREF_NONSPREAD_CK505_ENABLE;
+ else
+ val |= DREF_NONSPREAD_SOURCE_ENABLE;
+
+ if (has_panel) {
+ val &= ~DREF_SSC_SOURCE_MASK;
+ val |= DREF_SSC_SOURCE_ENABLE;
+
+ /* SSC must be turned on before enabling the CPU output */
+ if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+ drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
+ val |= DREF_SSC1_ENABLE;
+ } else {
+ val &= ~DREF_SSC1_ENABLE;
+ }
+
+ /* Get SSC going before enabling the outputs */
+ intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
+ udelay(200);
+
+ val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+ /* Enable CPU source on CPU attached eDP */
+ if (has_cpu_edp) {
+ if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Using SSC on eDP\n");
+ val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+ } else {
+ val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+ }
+ } else {
+ val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+ }
+
+ intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
+ udelay(200);
+ } else {
+ drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
+
+ val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+ /* Turn off CPU output */
+ val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+
+ intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
+ udelay(200);
+
+ if (!using_ssc_source) {
+ drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
+
+ /* Turn off the SSC source */
+ val &= ~DREF_SSC_SOURCE_MASK;
+ val |= DREF_SSC_SOURCE_DISABLE;
+
+ /* Turn off SSC1 */
+ val &= ~DREF_SSC1_ENABLE;
+
+ intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
+ udelay(200);
+ }
+ }
+
+ BUG_ON(val != final);
+}
+
+/*
+ * Initialize reference clocks when the driver loads
+ */
+void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
+{
+ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
+ ilk_init_pch_refclk(dev_priv);
+ else if (HAS_PCH_LPT(dev_priv))
+ lpt_init_pch_refclk(dev_priv);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.h b/drivers/gpu/drm/i915/display/intel_pch_refclk.h
new file mode 100644
index 000000000000..12ab2c75a800
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _INTEL_PCH_REFCLK_H_
+#define _INTEL_PCH_REFCLK_H_
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_crtc_state;
+
+void lpt_program_iclkip(const struct intel_crtc_state *crtc_state);
+void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
+int lpt_get_iclkip(struct drm_i915_private *dev_priv);
+
+void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
+void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index dcd698a02da2..01ce1d72297f 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -3,11 +3,12 @@
* Copyright © 2021 Intel Corporation
*/
-#include "intel_display_types.h"
-#include "intel_plane_initial.h"
+#include "i915_drv.h"
#include "intel_atomic_plane.h"
#include "intel_display.h"
+#include "intel_display_types.h"
#include "intel_fb.h"
+#include "intel_plane_initial.h"
static bool
intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 7a205fd5023b..a1a663f362e7 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -28,13 +28,13 @@
#include "i915_drv.h"
#include "intel_atomic.h"
+#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp_aux.h"
#include "intel_hdmi.h"
#include "intel_psr.h"
#include "intel_snps_phy.h"
-#include "intel_sprite.h"
#include "skl_universal_plane.h"
/**
@@ -588,7 +588,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
static bool
transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
{
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (IS_ALDERLAKE_P(dev_priv))
+ return trans == TRANSCODER_A || trans == TRANSCODER_B;
+ else if (DISPLAY_VER(dev_priv) >= 12)
return trans == TRANSCODER_A;
else
return trans == TRANSCODER_EDP;
@@ -1346,6 +1348,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
*/
void intel_psr_pause(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
if (!CAN_PSR(intel_dp))
@@ -1358,6 +1361,9 @@ void intel_psr_pause(struct intel_dp *intel_dp)
return;
}
+ /* If we ever hit this, we will need to add refcount to pause/resume */
+ drm_WARN_ON(&dev_priv->drm, psr->paused);
+
intel_psr_exit(intel_dp);
intel_psr_wait_exit_locked(intel_dp);
psr->paused = true;
@@ -1463,10 +1469,19 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
val |= plane_state->uapi.dst.x1;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
- /* TODO: consider auxiliary surfaces */
- x = plane_state->uapi.src.x1 >> 16;
- y = (plane_state->uapi.src.y1 >> 16) + clip->y1;
+ x = plane_state->view.color_plane[color_plane].x;
+
+ /*
+ * From Bspec: UV surface Start Y Position = half of Y plane Y
+ * start position.
+ */
+ if (!color_plane)
+ y = plane_state->view.color_plane[color_plane].y + clip->y1;
+ else
+ y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
+
val = y << 16 | x;
+
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
val);
@@ -1558,9 +1573,6 @@ static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *c
* also planes are not updated if they have a negative X
* position so for now doing a full update in this cases
*
- * TODO: We are missing multi-planar formats handling, until it is
- * implemented it will send full frame updates.
- *
* Plane scaling and rotation is not supported by selective fetch and both
* properties can change without a modeset, so need to be check at every
* atomic commmit.
@@ -1570,7 +1582,6 @@ static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state
if (plane_state->uapi.dst.y1 < 0 ||
plane_state->uapi.dst.x1 < 0 ||
plane_state->scaler_id >= 0 ||
- plane_state->hw.fb->format->num_planes > 1 ||
plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
return false;
@@ -1696,6 +1707,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
struct drm_rect *sel_fetch_area, inter;
+ struct intel_plane *linked = new_plane_state->planar_linked_plane;
if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
!new_plane_state->uapi.visible)
@@ -1714,6 +1726,24 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
crtc_state->update_planes |= BIT(plane->id);
+
+ /*
+ * Sel_fetch_area is calculated for UV plane. Use
+ * same area for Y plane as well.
+ */
+ if (linked) {
+ struct intel_plane_state *linked_new_plane_state;
+ struct drm_rect *linked_sel_fetch_area;
+
+ linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
+ if (IS_ERR(linked_new_plane_state))
+ return PTR_ERR(linked_new_plane_state);
+
+ linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
+ linked_sel_fetch_area->y1 = sel_fetch_area->y1;
+ linked_sel_fetch_area->y2 = sel_fetch_area->y2;
+ crtc_state->update_planes |= BIT(linked->id);
+ }
}
skip_sel_fetch_set_loop:
@@ -1721,11 +1751,17 @@ skip_sel_fetch_set_loop:
return 0;
}
-static void _intel_psr_pre_plane_update(const struct intel_atomic_state *state,
- const struct intel_crtc_state *crtc_state)
+void intel_psr_pre_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder;
+ if (!HAS_PSR(i915))
+ return;
+
for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -1740,6 +1776,7 @@ static void _intel_psr_pre_plane_update(const struct intel_atomic_state *state,
* - All planes will go inactive
* - Changing between PSR versions
*/
+ needs_to_disable |= intel_crtc_needs_modeset(crtc_state);
needs_to_disable |= !crtc_state->has_psr;
needs_to_disable |= !crtc_state->active_planes;
needs_to_disable |= crtc_state->has_psr2 != psr->psr2_enabled;
@@ -1751,20 +1788,6 @@ static void _intel_psr_pre_plane_update(const struct intel_atomic_state *state,
}
}
-void intel_psr_pre_plane_update(const struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc;
- int i;
-
- if (!HAS_PSR(dev_priv))
- return;
-
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
- _intel_psr_pre_plane_update(state, crtc_state);
-}
-
static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
@@ -1809,15 +1832,21 @@ void intel_psr_post_plane_update(const struct intel_atomic_state *state)
_intel_psr_post_plane_update(state, crtc_state);
}
-/**
- * psr_wait_for_idle - wait for PSR1 to idle
- * @intel_dp: Intel DP
- * @out_value: PSR status in case of failure
- *
- * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
- *
- */
-static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value)
+static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ /*
+ * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
+ * As all higher states has bit 4 of PSR2 state set we can just wait for
+ * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
+ */
+ return intel_de_wait_for_clear(dev_priv,
+ EDP_PSR2_STATUS(intel_dp->psr.transcoder),
+ EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
+}
+
+static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -1827,15 +1856,13 @@ static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value)
* exit training time + 1.5 ms of aux channel handshake. 50 ms is
* defensive enough to cover everything.
*/
- return __intel_wait_for_register(&dev_priv->uncore,
- EDP_PSR_STATUS(intel_dp->psr.transcoder),
- EDP_PSR_STATUS_STATE_MASK,
- EDP_PSR_STATUS_STATE_IDLE, 2, 50,
- out_value);
+ return intel_de_wait_for_clear(dev_priv,
+ EDP_PSR_STATUS(intel_dp->psr.transcoder),
+ EDP_PSR_STATUS_STATE_MASK, 50);
}
/**
- * intel_psr_wait_for_idle - wait for PSR1 to idle
+ * intel_psr_wait_for_idle - wait for PSR be ready for a pipe update
* @new_crtc_state: new CRTC state
*
* This function is expected to be called from pipe_update_start() where it is
@@ -1852,19 +1879,23 @@ void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
new_crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- u32 psr_status;
+ int ret;
mutex_lock(&intel_dp->psr.lock);
- if (!intel_dp->psr.enabled || intel_dp->psr.psr2_enabled) {
+
+ if (!intel_dp->psr.enabled) {
mutex_unlock(&intel_dp->psr.lock);
continue;
}
- /* when the PSR1 is enabled */
- if (psr_wait_for_idle(intel_dp, &psr_status))
- drm_err(&dev_priv->drm,
- "PSR idle timed out 0x%x, atomic update may fail\n",
- psr_status);
+ if (intel_dp->psr.psr2_enabled)
+ ret = _psr2_ready_for_pipe_update_locked(intel_dp);
+ else
+ ret = _psr1_ready_for_pipe_update_locked(intel_dp);
+
+ if (ret)
+ drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
+
mutex_unlock(&intel_dp->psr.lock);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index facffbacd357..f6526d9ccfdc 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -6,21 +6,23 @@
#ifndef __INTEL_PSR_H__
#define __INTEL_PSR_H__
-#include "intel_frontbuffer.h"
+#include <linux/types.h>
+enum fb_op_origin;
struct drm_connector;
struct drm_connector_state;
struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc;
struct intel_crtc_state;
struct intel_dp;
-struct intel_crtc;
-struct intel_atomic_state;
-struct intel_plane_state;
-struct intel_plane;
struct intel_encoder;
+struct intel_plane;
+struct intel_plane_state;
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
-void intel_psr_pre_plane_update(const struct intel_atomic_state *state);
+void intel_psr_pre_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
void intel_psr_post_plane_update(const struct intel_atomic_state *state);
void intel_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 8a52b7a16774..c8488f5ebd04 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -5,6 +5,7 @@
#include <linux/dmi.h>
+#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_quirks.h"
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 2dc6c3742ba2..76e1188b01d4 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -1842,7 +1842,7 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
intel_sdvo_write_sdvox(intel_sdvo, temp);
for (i = 0; i < 2; i++)
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 5e20f340730f..09f405e4d363 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -58,7 +58,6 @@ void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct intel_ddi_buf_trans *trans;
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- int level = intel_ddi_level(encoder, crtc_state, 0);
int n_entries, ln;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
@@ -66,6 +65,7 @@ void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
return;
for (ln = 0; ln < 4; ln++) {
+ int level = intel_ddi_level(encoder, crtc_state, ln);
u32 val = 0;
val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, trans->entries[level].snps.vswing);
@@ -186,6 +186,7 @@ static const struct intel_mpllb_state dg2_dp_uhbr10_100 = {
REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) |
REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SHIM_DIV32_CLK_SEL, 1) |
REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2),
.mpllb_div2 =
REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
@@ -369,6 +370,7 @@ static const struct intel_mpllb_state dg2_dp_uhbr10_38_4 = {
REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) |
REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SHIM_DIV32_CLK_SEL, 1) |
REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2),
.mpllb_div2 =
REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 08116f41da26..2357a1301f48 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -40,14 +40,15 @@
#include <drm/drm_rect.h>
#include "i915_drv.h"
-#include "i915_trace.h"
#include "i915_vgpu.h"
+#include "i9xx_plane.h"
#include "intel_atomic_plane.h"
+#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_fb.h"
#include "intel_frontbuffer.h"
#include "intel_sprite.h"
-#include "i9xx_plane.h"
#include "intel_vrr.h"
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
@@ -118,7 +119,7 @@ static void i9xx_plane_linear_gamma(u16 gamma[8])
}
static void
-chv_update_csc(const struct intel_plane_state *plane_state)
+chv_sprite_update_csc(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -190,7 +191,7 @@ chv_update_csc(const struct intel_plane_state *plane_state)
#define COS_0 1
static void
-vlv_update_clrc(const struct intel_plane_state *plane_state)
+vlv_sprite_update_clrc(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -393,7 +394,7 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
return sprctl;
}
-static void vlv_update_gamma(const struct intel_plane_state *plane_state)
+static void vlv_sprite_update_gamma(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -417,45 +418,54 @@ static void vlv_update_gamma(const struct intel_plane_state *plane_state)
}
static void
-vlv_update_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+vlv_sprite_update_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
- u32 sprsurf_offset = plane_state->view.color_plane[0].offset;
- u32 linear_offset;
- const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id),
+ plane_state->view.color_plane[0].mapping_stride);
+ intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id),
+ ((crtc_h - 1) << 16) | (crtc_w - 1));
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+vlv_sprite_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ enum plane_id plane_id = plane->id;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ u32 sprsurf_offset = plane_state->view.color_plane[0].offset;
u32 x = plane_state->view.color_plane[0].x;
u32 y = plane_state->view.color_plane[0].y;
+ u32 sprctl, linear_offset;
unsigned long irqflags;
- u32 sprctl;
sprctl = plane_state->ctl | vlv_sprite_ctl_crtc(crtc_state);
- /* Sizes are 0 based */
- crtc_w--;
- crtc_h--;
-
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id),
- plane_state->view.color_plane[0].stride);
- intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id),
- (crtc_y << 16) | crtc_x);
- intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id),
- (crtc_h << 16) | crtc_w);
- intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0);
-
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
- chv_update_csc(plane_state);
+ chv_sprite_update_csc(plane_state);
if (key->flags) {
intel_de_write_fw(dev_priv, SPKEYMINVAL(pipe, plane_id),
@@ -466,6 +476,8 @@ vlv_update_plane(struct intel_plane *plane,
key->max_value);
}
+ intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0);
+
intel_de_write_fw(dev_priv, SPLINOFF(pipe, plane_id), linear_offset);
intel_de_write_fw(dev_priv, SPTILEOFF(pipe, plane_id), (y << 16) | x);
@@ -478,15 +490,15 @@ vlv_update_plane(struct intel_plane *plane,
intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
- vlv_update_clrc(plane_state);
- vlv_update_gamma(plane_state);
+ vlv_sprite_update_clrc(plane_state);
+ vlv_sprite_update_gamma(plane_state);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
-vlv_disable_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
+vlv_sprite_disable_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -502,8 +514,8 @@ vlv_disable_plane(struct intel_plane *plane,
}
static bool
-vlv_plane_get_hw_state(struct intel_plane *plane,
- enum pipe *pipe)
+vlv_sprite_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
@@ -805,7 +817,7 @@ static void ivb_sprite_linear_gamma(const struct intel_plane_state *plane_state,
i++;
}
-static void ivb_update_gamma(const struct intel_plane_state *plane_state)
+static void ivb_sprite_update_gamma(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -835,48 +847,56 @@ static void ivb_update_gamma(const struct intel_plane_state *plane_state)
}
static void
-ivb_update_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+ivb_sprite_update_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
- u32 sprsurf_offset = plane_state->view.color_plane[0].offset;
- u32 linear_offset;
- const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
- u32 x = plane_state->view.color_plane[0].x;
- u32 y = plane_state->view.color_plane[0].y;
u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- u32 sprctl, sprscale = 0;
+ u32 sprscale = 0;
unsigned long irqflags;
- sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state);
-
- /* Sizes are 0 based */
- src_w--;
- src_h--;
- crtc_w--;
- crtc_h--;
-
if (crtc_w != src_w || crtc_h != src_h)
- sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
-
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+ sprscale = SPRITE_SCALE_ENABLE | ((src_w - 1) << 16) | (src_h - 1);
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, SPRSTRIDE(pipe),
- plane_state->view.color_plane[0].stride);
+ plane_state->view.color_plane[0].mapping_stride);
intel_de_write_fw(dev_priv, SPRPOS(pipe), (crtc_y << 16) | crtc_x);
- intel_de_write_fw(dev_priv, SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+ intel_de_write_fw(dev_priv, SPRSIZE(pipe), ((crtc_h - 1) << 16) | (crtc_w - 1));
if (IS_IVYBRIDGE(dev_priv))
intel_de_write_fw(dev_priv, SPRSCALE(pipe), sprscale);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+ivb_sprite_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ u32 sprsurf_offset = plane_state->view.color_plane[0].offset;
+ u32 x = plane_state->view.color_plane[0].x;
+ u32 y = plane_state->view.color_plane[0].y;
+ u32 sprctl, linear_offset;
+ unsigned long irqflags;
+
+ sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state);
+
+ linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
if (key->flags) {
intel_de_write_fw(dev_priv, SPRKEYVAL(pipe), key->min_value);
intel_de_write_fw(dev_priv, SPRKEYMSK(pipe),
@@ -902,14 +922,14 @@ ivb_update_plane(struct intel_plane *plane,
intel_de_write_fw(dev_priv, SPRSURF(pipe),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
- ivb_update_gamma(plane_state);
+ ivb_sprite_update_gamma(plane_state);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
-ivb_disable_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
+ivb_sprite_disable_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -927,8 +947,8 @@ ivb_disable_plane(struct intel_plane *plane,
}
static bool
-ivb_plane_get_hw_state(struct intel_plane *plane,
- enum pipe *pipe)
+ivb_sprite_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
@@ -1106,7 +1126,7 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
return dvscntr;
}
-static void g4x_update_gamma(const struct intel_plane_state *plane_state)
+static void g4x_sprite_update_gamma(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -1136,7 +1156,7 @@ static void ilk_sprite_linear_gamma(u16 gamma[17])
gamma[i] = (i << 10) / 16;
}
-static void ilk_update_gamma(const struct intel_plane_state *plane_state)
+static void ilk_sprite_update_gamma(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -1163,47 +1183,55 @@ static void ilk_update_gamma(const struct intel_plane_state *plane_state)
}
static void
-g4x_update_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+g4x_sprite_update_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
- u32 dvssurf_offset = plane_state->view.color_plane[0].offset;
- u32 linear_offset;
- const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
- u32 x = plane_state->view.color_plane[0].x;
- u32 y = plane_state->view.color_plane[0].y;
u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- u32 dvscntr, dvsscale = 0;
+ u32 dvsscale = 0;
unsigned long irqflags;
- dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state);
-
- /* Sizes are 0 based */
- src_w--;
- src_h--;
- crtc_w--;
- crtc_h--;
-
if (crtc_w != src_w || crtc_h != src_h)
- dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
-
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+ dvsscale = DVS_SCALE_ENABLE | ((src_w - 1) << 16) | (src_h - 1);
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, DVSSTRIDE(pipe),
- plane_state->view.color_plane[0].stride);
+ plane_state->view.color_plane[0].mapping_stride);
intel_de_write_fw(dev_priv, DVSPOS(pipe), (crtc_y << 16) | crtc_x);
- intel_de_write_fw(dev_priv, DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+ intel_de_write_fw(dev_priv, DVSSIZE(pipe), ((crtc_h - 1) << 16) | (crtc_w - 1));
intel_de_write_fw(dev_priv, DVSSCALE(pipe), dvsscale);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+g4x_sprite_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ u32 dvssurf_offset = plane_state->view.color_plane[0].offset;
+ u32 x = plane_state->view.color_plane[0].x;
+ u32 y = plane_state->view.color_plane[0].y;
+ u32 dvscntr, linear_offset;
+ unsigned long irqflags;
+
+ dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state);
+
+ linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
if (key->flags) {
intel_de_write_fw(dev_priv, DVSKEYVAL(pipe), key->min_value);
intel_de_write_fw(dev_priv, DVSKEYMSK(pipe),
@@ -1224,16 +1252,16 @@ g4x_update_plane(struct intel_plane *plane,
intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
if (IS_G4X(dev_priv))
- g4x_update_gamma(plane_state);
+ g4x_sprite_update_gamma(plane_state);
else
- ilk_update_gamma(plane_state);
+ ilk_sprite_update_gamma(plane_state);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
-g4x_disable_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
+g4x_sprite_disable_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -1250,8 +1278,8 @@ g4x_disable_plane(struct intel_plane *plane,
}
static bool
-g4x_plane_get_hw_state(struct intel_plane *plane,
- enum pipe *pipe)
+g4x_sprite_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
@@ -1299,7 +1327,7 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
int src_x, src_w, src_h, crtc_w, crtc_h;
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- unsigned int stride = plane_state->view.color_plane[0].stride;
+ unsigned int stride = plane_state->view.color_plane[0].mapping_stride;
unsigned int cpp = fb->format->cpp[0];
unsigned int width_bytes;
int min_width, min_height;
@@ -1540,8 +1568,8 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
*/
if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
struct intel_crtc *crtc =
- intel_get_crtc_for_pipe(dev_priv,
- to_intel_plane(plane)->pipe);
+ intel_crtc_for_pipe(dev_priv,
+ to_intel_plane(plane)->pipe);
plane_state = drm_atomic_get_plane_state(state,
crtc->base.primary);
@@ -1567,7 +1595,7 @@ out:
return ret;
}
-static const u32 g4x_plane_formats[] = {
+static const u32 g4x_sprite_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
@@ -1575,13 +1603,7 @@ static const u32 g4x_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-static const u64 i9xx_plane_format_modifiers[] = {
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static const u32 snb_plane_formats[] = {
+static const u32 snb_sprite_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB2101010,
@@ -1594,7 +1616,7 @@ static const u32 snb_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-static const u32 vlv_plane_formats[] = {
+static const u32 vlv_sprite_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -1629,13 +1651,8 @@ static const u32 chv_pipe_b_sprite_formats[] = {
static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
- switch (modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- break;
- default:
+ if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier))
return false;
- }
switch (format) {
case DRM_FORMAT_XRGB8888:
@@ -1655,13 +1672,8 @@ static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
- switch (modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- break;
- default:
+ if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier))
return false;
- }
switch (format) {
case DRM_FORMAT_XRGB8888:
@@ -1686,13 +1698,8 @@ static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
- switch (modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- break;
- default:
+ if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier))
return false;
- }
switch (format) {
case DRM_FORMAT_C8:
@@ -1762,9 +1769,10 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
return plane;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- plane->update_plane = vlv_update_plane;
- plane->disable_plane = vlv_disable_plane;
- plane->get_hw_state = vlv_plane_get_hw_state;
+ plane->update_noarm = vlv_sprite_update_noarm;
+ plane->update_arm = vlv_sprite_update_arm;
+ plane->disable_arm = vlv_sprite_disable_arm;
+ plane->get_hw_state = vlv_sprite_get_hw_state;
plane->check_plane = vlv_sprite_check;
plane->max_stride = i965_plane_max_stride;
plane->min_cdclk = vlv_plane_min_cdclk;
@@ -1773,16 +1781,16 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
formats = chv_pipe_b_sprite_formats;
num_formats = ARRAY_SIZE(chv_pipe_b_sprite_formats);
} else {
- formats = vlv_plane_formats;
- num_formats = ARRAY_SIZE(vlv_plane_formats);
+ formats = vlv_sprite_formats;
+ num_formats = ARRAY_SIZE(vlv_sprite_formats);
}
- modifiers = i9xx_plane_format_modifiers;
plane_funcs = &vlv_sprite_funcs;
} else if (DISPLAY_VER(dev_priv) >= 7) {
- plane->update_plane = ivb_update_plane;
- plane->disable_plane = ivb_disable_plane;
- plane->get_hw_state = ivb_plane_get_hw_state;
+ plane->update_noarm = ivb_sprite_update_noarm;
+ plane->update_arm = ivb_sprite_update_arm;
+ plane->disable_arm = ivb_sprite_disable_arm;
+ plane->get_hw_state = ivb_sprite_get_hw_state;
plane->check_plane = g4x_sprite_check;
if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
@@ -1793,28 +1801,27 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->min_cdclk = ivb_sprite_min_cdclk;
}
- formats = snb_plane_formats;
- num_formats = ARRAY_SIZE(snb_plane_formats);
- modifiers = i9xx_plane_format_modifiers;
+ formats = snb_sprite_formats;
+ num_formats = ARRAY_SIZE(snb_sprite_formats);
plane_funcs = &snb_sprite_funcs;
} else {
- plane->update_plane = g4x_update_plane;
- plane->disable_plane = g4x_disable_plane;
- plane->get_hw_state = g4x_plane_get_hw_state;
+ plane->update_noarm = g4x_sprite_update_noarm;
+ plane->update_arm = g4x_sprite_update_arm;
+ plane->disable_arm = g4x_sprite_disable_arm;
+ plane->get_hw_state = g4x_sprite_get_hw_state;
plane->check_plane = g4x_sprite_check;
plane->max_stride = g4x_sprite_max_stride;
plane->min_cdclk = g4x_sprite_min_cdclk;
- modifiers = i9xx_plane_format_modifiers;
if (IS_SANDYBRIDGE(dev_priv)) {
- formats = snb_plane_formats;
- num_formats = ARRAY_SIZE(snb_plane_formats);
+ formats = snb_sprite_formats;
+ num_formats = ARRAY_SIZE(snb_sprite_formats);
plane_funcs = &snb_sprite_funcs;
} else {
- formats = g4x_plane_formats;
- num_formats = ARRAY_SIZE(g4x_plane_formats);
+ formats = g4x_sprite_formats;
+ num_formats = ARRAY_SIZE(g4x_sprite_formats);
plane_funcs = &g4x_sprite_funcs;
}
@@ -1833,11 +1840,15 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->id = PLANE_SPRITE0 + sprite;
plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
+ modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X);
+
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
0, plane_funcs,
formats, num_formats, modifiers,
DRM_PLANE_TYPE_OVERLAY,
"sprite %c", sprite_name(pipe, sprite));
+ kfree(modifiers);
+
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
index c085eb87705c..4f63e4967731 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.h
+++ b/drivers/gpu/drm/i915/display/intel_sprite.h
@@ -27,14 +27,10 @@ struct intel_plane_state;
#define VBLANK_EVASION_TIME_US 100
#endif
-int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
- int usecs);
struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, int plane);
int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
-void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 88a398df9621..8a39989b87ad 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -36,6 +36,7 @@
#include "i915_drv.h"
#include "intel_connector.h"
+#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_hotplug.h"
@@ -924,8 +925,7 @@ intel_enable_tv(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(dev);
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
- intel_wait_for_vblank(dev_priv,
- to_intel_crtc(pipe_config->uapi.crtc)->pipe);
+ intel_crtc_wait_for_next_vblank(to_intel_crtc(pipe_config->uapi.crtc));
intel_de_write(dev_priv, TV_CTL,
intel_de_read(dev_priv, TV_CTL) | TV_ENC_ENABLE);
@@ -1618,7 +1618,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
intel_de_write(dev_priv, TV_DAC, tv_dac);
intel_de_posting_read(dev_priv, TV_DAC);
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
type = -1;
tv_dac = intel_de_read(dev_priv, TV_DAC);
@@ -1651,7 +1651,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
intel_de_posting_read(dev_priv, TV_CTL);
/* For unknown reasons the hw barfs if we don't do this vblank wait. */
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index a2108a8f544d..f043d85ba64d 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -330,7 +330,12 @@ enum vbt_gmbus_ddi {
ADLS_DDC_BUS_PORT_TC1 = 0x2,
ADLS_DDC_BUS_PORT_TC2,
ADLS_DDC_BUS_PORT_TC3,
- ADLS_DDC_BUS_PORT_TC4
+ ADLS_DDC_BUS_PORT_TC4,
+ ADLP_DDC_BUS_PORT_TC1 = 0x3,
+ ADLP_DDC_BUS_PORT_TC2,
+ ADLP_DDC_BUS_PORT_TC3,
+ ADLP_DDC_BUS_PORT_TC4
+
};
#define DP_AUX_A 0x40
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 2275f99ce9d7..9b05f93ed8bc 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -6,12 +6,14 @@
* Manasi Navare <manasi.d.navare@intel.com>
*/
#include <linux/limits.h>
+
#include "i915_drv.h"
+#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
-#include "intel_vdsc.h"
#include "intel_qp_tables.h"
+#include "intel_vdsc.h"
enum ROW_INDEX_BPP {
ROW_INDEX_6BPP = 0,
@@ -442,10 +444,10 @@ calculate_rc_params(struct rc_parameters *rc,
}
}
-int intel_dsc_compute_params(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config;
u16 compressed_bpp = pipe_config->dsc.compressed_bpp;
const struct rc_parameters *rc_params;
@@ -598,7 +600,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val |= DSC_422_ENABLE;
if (vdsc_cfg->vbr_enable)
pps_val |= DSC_VBR_ENABLE;
- drm_info(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val);
+ drm_dbg_kms(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_0,
pps_val);
@@ -622,7 +624,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
/* Populate PICTURE_PARAMETER_SET_1 registers */
pps_val = 0;
pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel);
- drm_info(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val);
+ drm_dbg_kms(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_1,
pps_val);
@@ -647,7 +649,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val = 0;
pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
- drm_info(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val);
+ drm_dbg_kms(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_2,
pps_val);
@@ -672,7 +674,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val = 0;
pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) |
DSC_SLICE_WIDTH(vdsc_cfg->slice_width);
- drm_info(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val);
+ drm_dbg_kms(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_3,
pps_val);
@@ -697,7 +699,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val = 0;
pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) |
DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay);
- drm_info(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val);
+ drm_dbg_kms(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_4,
pps_val);
@@ -722,7 +724,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val = 0;
pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) |
DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval);
- drm_info(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val);
+ drm_dbg_kms(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_5,
pps_val);
@@ -749,7 +751,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
DSC_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) |
DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) |
DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp);
- drm_info(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val);
+ drm_dbg_kms(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_6,
pps_val);
@@ -774,7 +776,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val = 0;
pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) |
DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset);
- drm_info(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val);
+ drm_dbg_kms(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_7,
pps_val);
@@ -799,7 +801,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val = 0;
pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) |
DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset);
- drm_info(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val);
+ drm_dbg_kms(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_8,
pps_val);
@@ -824,7 +826,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val = 0;
pps_val |= DSC_RC_MODEL_SIZE(vdsc_cfg->rc_model_size) |
DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
- drm_info(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val);
+ drm_dbg_kms(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_9,
pps_val);
@@ -851,7 +853,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
DSC_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) |
DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) |
DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST);
- drm_info(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val);
+ drm_dbg_kms(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_10,
pps_val);
@@ -879,7 +881,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
vdsc_cfg->slice_width) |
DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height /
vdsc_cfg->slice_height);
- drm_info(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val);
+ drm_dbg_kms(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_16,
pps_val);
@@ -906,8 +908,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
rc_buf_thresh_dword[i / 4] |=
(u32)(vdsc_cfg->rc_buf_thresh[i] <<
BITS_PER_BYTE * (i % 4));
- drm_info(&dev_priv->drm, " RC_BUF_THRESH%d = 0x%08x\n", i,
- rc_buf_thresh_dword[i / 4]);
+ drm_dbg_kms(&dev_priv->drm, "RC_BUF_THRESH_%d = 0x%08x\n", i,
+ rc_buf_thresh_dword[i / 4]);
}
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0,
@@ -963,8 +965,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
RC_MAX_QP_SHIFT) |
(vdsc_cfg->rc_range_params[i].range_min_qp <<
RC_MIN_QP_SHIFT)) << 16 * (i % 2));
- drm_info(&dev_priv->drm, " RC_RANGE_PARAM_%d = 0x%08x\n", i,
- rc_range_params_dword[i / 2]);
+ drm_dbg_kms(&dev_priv->drm, "RC_RANGE_PARAM_%d = 0x%08x\n", i,
+ rc_range_params_dword[i / 2]);
}
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0,
@@ -1055,8 +1057,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
}
}
-static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
@@ -1064,6 +1066,9 @@ static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
struct drm_dsc_picture_parameter_set pps;
enum port port;
+ if (!crtc_state->dsc.compression_enable)
+ return;
+
drm_dsc_pps_payload_pack(&pps, vdsc_cfg);
for_each_dsi_port(port, intel_dsi->ports) {
@@ -1074,14 +1079,16 @@ static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
}
}
-static void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
+ if (!crtc_state->dsc.compression_enable)
+ return;
+
/* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */
drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp.pps_header);
@@ -1105,25 +1112,16 @@ static i915_reg_t dss_ctl2_reg(struct intel_crtc *crtc, enum transcoder cpu_tran
ICL_PIPE_DSS_CTL2(crtc->pipe) : DSS_CTL2;
}
-static struct intel_crtc *
-_get_crtc_for_pipe(struct drm_i915_private *i915, enum pipe pipe)
-{
- if (!intel_pipe_valid(i915, pipe))
- return NULL;
-
- return intel_get_crtc_for_pipe(i915, pipe);
-}
-
struct intel_crtc *
intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc)
{
- return _get_crtc_for_pipe(to_i915(primary_crtc->base.dev), primary_crtc->pipe + 1);
+ return intel_crtc_for_pipe(to_i915(primary_crtc->base.dev), primary_crtc->pipe + 1);
}
static struct intel_crtc *
intel_dsc_get_bigjoiner_primary(const struct intel_crtc *secondary_crtc)
{
- return _get_crtc_for_pipe(to_i915(secondary_crtc->base.dev), secondary_crtc->pipe - 1);
+ return intel_crtc_for_pipe(to_i915(secondary_crtc->base.dev), secondary_crtc->pipe - 1);
}
void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
@@ -1142,8 +1140,7 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
}
}
-void intel_dsc_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+void intel_dsc_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1155,13 +1152,6 @@ void intel_dsc_enable(struct intel_encoder *encoder,
intel_dsc_pps_configure(crtc_state);
- if (!crtc_state->bigjoiner_slave) {
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
- intel_dsc_dsi_pps_write(encoder, crtc_state);
- else
- intel_dsc_dp_pps_write(encoder, crtc_state);
- }
-
dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE;
if (crtc_state->dsc.dsc_split) {
dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE;
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index 0c5d80a572da..4ec75f715986 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -15,15 +15,17 @@ struct intel_encoder;
bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state);
void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state);
-void intel_dsc_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
+void intel_dsc_enable(const struct intel_crtc_state *crtc_state);
void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
-int intel_dsc_compute_params(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config);
+int intel_dsc_compute_params(struct intel_crtc_state *pipe_config);
void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state);
void intel_dsc_get_config(struct intel_crtc_state *crtc_state);
enum intel_display_power_domain
intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder);
struct intel_crtc *intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc);
+void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_VDSC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index c335b1dbafcf..139e8936edc5 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -60,7 +60,7 @@ intel_vrr_check_modeset(struct intel_atomic_state *state)
* Between those two points the vblank exit starts (and hence registers get
* latched) ASAP after a push is sent.
*
- * framestart_delay is programmable 0-3.
+ * framestart_delay is programmable 1-4.
*/
static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_state)
{
@@ -138,13 +138,13 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
i915->window2_delay;
else
/*
- * FIXME: s/4/framestart_delay+1/ to get consistent
+ * FIXME: s/4/framestart_delay/ to get consistent
* earliest/latest points for register latching regardless
* of the framestart_delay used?
*
* FIXME: this really needs the extra scanline to provide consistent
* behaviour for all framestart_delay values. Otherwise with
- * framestart_delay==3 we will end up extending the min vblank by
+ * framestart_delay==4 we will end up extending the min vblank by
* one extra line.
*/
crtc_state->vrr.pipeline_full =
@@ -193,6 +193,18 @@ void intel_vrr_send_push(const struct intel_crtc_state *crtc_state)
TRANS_PUSH_EN | TRANS_PUSH_SEND);
}
+bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!crtc_state->vrr.enable)
+ return false;
+
+ return intel_de_read(dev_priv, TRANS_PUSH(cpu_transcoder)) & TRANS_PUSH_SEND;
+}
+
void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
index 96f9c9c27ab9..1c2da572693d 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -23,6 +23,7 @@ void intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
void intel_vrr_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_vrr_send_push(const struct intel_crtc_state *crtc_state);
+bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state);
void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state);
void intel_vrr_get_config(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index 37eabeff8197..c2e94118566b 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -4,6 +4,7 @@
*/
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_fb.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index a0e53a3b267a..93a385396512 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -13,6 +13,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fb.h"
+#include "intel_fbc.h"
#include "intel_pm.h"
#include "intel_psr.h"
#include "intel_sprite.h"
@@ -163,50 +164,6 @@ static const u32 icl_hdr_plane_formats[] = {
DRM_FORMAT_XVYU16161616,
};
-static const u64 skl_plane_format_modifiers_noccs[] = {
- I915_FORMAT_MOD_Yf_TILED,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static const u64 skl_plane_format_modifiers_ccs[] = {
- I915_FORMAT_MOD_Yf_TILED_CCS,
- I915_FORMAT_MOD_Y_TILED_CCS,
- I915_FORMAT_MOD_Yf_TILED,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static const u64 gen12_plane_format_modifiers_mc_ccs[] = {
- I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
- I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
- I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static const u64 gen12_plane_format_modifiers_rc_ccs[] = {
- I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
- I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static const u64 adlp_step_a_plane_format_modifiers[] = {
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
{
switch (format) {
@@ -464,9 +421,19 @@ static int icl_plane_min_width(const struct drm_framebuffer *fb,
}
}
-static int icl_plane_max_width(const struct drm_framebuffer *fb,
- int color_plane,
- unsigned int rotation)
+static int icl_hdr_plane_max_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
+ return 4096;
+ else
+ return 5120;
+}
+
+static int icl_sdr_plane_max_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
{
return 5120;
}
@@ -633,7 +600,7 @@ static u32 skl_plane_stride(const struct intel_plane_state *plane_state,
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
- u32 stride = plane_state->view.color_plane[color_plane].stride;
+ u32 stride = plane_state->view.color_plane[color_plane].scanout_stride;
if (color_plane >= fb->format->num_planes)
return 0;
@@ -642,8 +609,8 @@ static u32 skl_plane_stride(const struct intel_plane_state *plane_state,
}
static void
-skl_disable_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
+skl_plane_disable_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
@@ -716,13 +683,13 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
case DRM_FORMAT_XYUV8888:
return PLANE_CTL_FORMAT_XYUV;
case DRM_FORMAT_YUYV:
- return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_YUYV;
case DRM_FORMAT_YVYU:
- return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_YVYU;
case DRM_FORMAT_UYVY:
- return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_UYVY;
case DRM_FORMAT_VYUY:
- return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_VYUY;
case DRM_FORMAT_NV12:
return PLANE_CTL_FORMAT_NV12;
case DRM_FORMAT_P010:
@@ -985,6 +952,9 @@ static u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
}
+ if (plane_state->force_black)
+ plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE;
+
return plane_color_ctl;
}
@@ -1008,74 +978,60 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state,
}
}
-static void intel_load_plane_csc_black(struct intel_plane *intel_plane)
+static u32 skl_plane_surf(const struct intel_plane_state *plane_state,
+ int color_plane)
{
- struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
- enum pipe pipe = intel_plane->pipe;
- enum plane_id plane = intel_plane->id;
- u16 postoff = 0;
+ u32 plane_surf;
- drm_dbg_kms(&dev_priv->drm, "plane color CTM to black %s:%d\n",
- intel_plane->base.name, plane);
- intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 0), 0);
- intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 1), 0);
+ plane_surf = intel_plane_ggtt_offset(plane_state) +
+ skl_surf_address(plane_state, color_plane);
- intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 2), 0);
- intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 3), 0);
+ if (plane_state->decrypt)
+ plane_surf |= PLANE_SURF_DECRYPT;
- intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 4), 0);
- intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 5), 0);
+ return plane_surf;
+}
- intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 0), 0);
- intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 1), 0);
- intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 2), 0);
+static void icl_plane_csc_load_black(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+
+ intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 0), 0);
+ intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 1), 0);
+
+ intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 2), 0);
+ intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 3), 0);
+
+ intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 4), 0);
+ intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 5), 0);
- intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 0), postoff);
- intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 1), postoff);
- intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 2), postoff);
+ intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 0), 0);
+ intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 1), 0);
+ intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 2), 0);
+
+ intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 0), 0);
+ intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 1), 0);
+ intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0);
}
static void
-skl_program_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int color_plane)
+skl_program_plane_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- u32 surf_addr = skl_surf_address(plane_state, color_plane);
u32 stride = skl_plane_stride(plane_state, color_plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- int aux_plane = skl_main_to_aux_plane(fb, color_plane);
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
- u32 x = plane_state->view.color_plane[color_plane].x;
- u32 y = plane_state->view.color_plane[color_plane].y;
u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- u8 alpha = plane_state->hw.alpha >> 8;
- u32 plane_color_ctl = 0, aux_dist = 0;
unsigned long irqflags;
- u32 keymsk, keymax, plane_surf;
- u32 plane_ctl = plane_state->ctl;
-
- plane_ctl |= skl_plane_ctl_crtc(crtc_state);
-
- if (DISPLAY_VER(dev_priv) >= 10)
- plane_color_ctl = plane_state->color_ctl |
- glk_plane_color_ctl_crtc(crtc_state);
-
- /* Sizes are 0 based */
- src_w--;
- src_h--;
-
- keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
-
- keymsk = key->channel_mask & 0x7ffffff;
- if (alpha < 0xff)
- keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
/* The scaler will handle the output position */
if (plane_state->scaler_id >= 0) {
@@ -1083,40 +1039,83 @@ skl_program_plane(struct intel_plane *plane,
crtc_y = 0;
}
- if (aux_plane) {
- aux_dist = skl_surf_address(plane_state, aux_plane) - surf_addr;
-
- if (DISPLAY_VER(dev_priv) < 12)
- aux_dist |= skl_plane_stride(plane_state, aux_plane);
- }
-
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ /*
+ * FIXME: pxp session invalidation can hit any time even at time of commit
+ * or after the commit, display content will be garbage.
+ */
+ if (plane_state->force_black)
+ icl_plane_csc_load_black(plane);
+
intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), stride);
intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id),
(crtc_y << 16) | crtc_x);
intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id),
- (src_h << 16) | src_w);
+ ((src_h - 1) << 16) | (src_w - 1));
- intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist);
+ if (intel_fb_is_rc_ccs_cc_modifier(fb->modifier)) {
+ intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 0),
+ lower_32_bits(plane_state->ccval));
+ intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 1),
+ upper_32_bits(plane_state->ccval));
+ }
if (icl_is_hdr_plane(dev_priv, plane_id))
intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id),
plane_state->cus_ctl);
- if (DISPLAY_VER(dev_priv) >= 10)
- intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id),
- plane_color_ctl);
-
if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
icl_program_input_csc(plane, crtc_state, plane_state);
- if (fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
- intel_uncore_write64_fw(&dev_priv->uncore,
- PLANE_CC_VAL(pipe, plane_id), plane_state->ccval);
-
skl_write_plane_wm(plane, crtc_state);
+ intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+skl_program_plane_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int aux_plane = skl_main_to_aux_plane(fb, color_plane);
+ u32 x = plane_state->view.color_plane[color_plane].x;
+ u32 y = plane_state->view.color_plane[color_plane].y;
+ u32 keymsk, keymax, aux_dist = 0, plane_color_ctl = 0;
+ u8 alpha = plane_state->hw.alpha >> 8;
+ u32 plane_ctl = plane_state->ctl;
+ unsigned long irqflags;
+
+ plane_ctl |= skl_plane_ctl_crtc(crtc_state);
+
+ if (DISPLAY_VER(dev_priv) >= 10)
+ plane_color_ctl = plane_state->color_ctl |
+ glk_plane_color_ctl_crtc(crtc_state);
+
+ keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
+
+ keymsk = key->channel_mask & 0x7ffffff;
+ if (alpha < 0xff)
+ keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
+
+ if (aux_plane) {
+ aux_dist = skl_surf_address(plane_state, aux_plane) -
+ skl_surf_address(plane_state, color_plane);
+
+ if (DISPLAY_VER(dev_priv) < 12)
+ aux_dist |= skl_plane_stride(plane_state, aux_plane);
+ }
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id),
key->min_value);
intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), keymsk);
@@ -1125,17 +1124,22 @@ skl_program_plane(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id),
(y << 16) | x);
+ intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist);
+
if (DISPLAY_VER(dev_priv) < 11)
intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id),
(plane_state->view.color_plane[1].y << 16) |
plane_state->view.color_plane[1].x);
- intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
+ if (DISPLAY_VER(dev_priv) >= 10)
+ intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
/*
* Enable the scaler before the plane so that we don't
* get a catastrophic underrun even if the two operations
* end up happening in two different frames.
+ *
+ * TODO: split into noarm+arm pair
*/
if (plane_state->scaler_id >= 0)
skl_program_plane_scaler(plane, crtc_state, plane_state);
@@ -1146,23 +1150,8 @@ skl_program_plane(struct intel_plane *plane,
* the control register just before the surface register.
*/
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
- plane_surf = intel_plane_ggtt_offset(plane_state) + surf_addr;
- plane_color_ctl = intel_de_read_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id));
-
- /*
- * FIXME: pxp session invalidation can hit any time even at time of commit
- * or after the commit, display content will be garbage.
- */
- if (plane_state->decrypt) {
- plane_surf |= PLANE_SURF_DECRYPT;
- } else if (plane_state->force_black) {
- intel_load_plane_csc_black(plane);
- plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE;
- }
-
- intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id),
- plane_color_ctl);
- intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), plane_surf);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
+ skl_plane_surf(plane_state, color_plane));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1177,7 +1166,6 @@ skl_plane_async_flip(struct intel_plane *plane,
unsigned long irqflags;
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- u32 surf_addr = plane_state->view.color_plane[0].offset;
u32 plane_ctl = plane_state->ctl;
plane_ctl |= skl_plane_ctl_crtc(crtc_state);
@@ -1189,15 +1177,29 @@ skl_plane_async_flip(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
- intel_plane_ggtt_offset(plane_state) + surf_addr);
+ skl_plane_surf(plane_state, 0));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
-skl_update_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+skl_plane_update_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ int color_plane = 0;
+
+ if (plane_state->planar_linked_plane && !plane_state->planar_slave)
+ /* Program the UV plane on planar master */
+ color_plane = 1;
+
+ skl_program_plane_noarm(plane, crtc_state, plane_state, color_plane);
+}
+
+static void
+skl_plane_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
int color_plane = 0;
@@ -1205,7 +1207,7 @@ skl_update_plane(struct intel_plane *plane,
/* Program the UV plane on planar master */
color_plane = 1;
- skl_program_plane(plane, crtc_state, plane_state, color_plane);
+ skl_program_plane_arm(plane, crtc_state, plane_state, color_plane);
}
static bool intel_format_is_p01x(u32 format)
@@ -1232,7 +1234,7 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
return 0;
if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) &&
- is_ccs_modifier(fb->modifier)) {
+ intel_fb_is_ccs_modifier(fb->modifier)) {
drm_dbg_kms(&dev_priv->drm,
"RC support only with 0/180 degree rotation (%x)\n",
rotation);
@@ -1284,13 +1286,8 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
/* Y-tiling is not supported in IF-ID Interlace mode */
if (crtc_state->hw.enable &&
crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
- (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)) {
+ fb->modifier != DRM_FORMAT_MOD_LINEAR &&
+ fb->modifier != I915_FORMAT_MOD_X_TILED) {
drm_dbg_kms(&dev_priv->drm,
"Y/Yf tiling not supported in IF-ID mode\n");
return -EINVAL;
@@ -1487,7 +1484,7 @@ int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
int cpp = fb->format->cpp[0];
- while ((*x + w) * cpp > plane_state->view.color_plane[0].stride) {
+ while ((*x + w) * cpp > plane_state->view.color_plane[0].mapping_stride) {
if (*offset == 0) {
drm_dbg_kms(&dev_priv->drm,
"Unable to find suitable display surface offset due to X-tiling\n");
@@ -1536,7 +1533,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
* CCS AUX surface doesn't have its own x/y offsets, we must make sure
* they match with the main surface x/y offsets.
*/
- if (is_ccs_modifier(fb->modifier)) {
+ if (intel_fb_is_ccs_modifier(fb->modifier)) {
while (!skl_check_main_ccs_coordinates(plane_state, x, y,
offset, aux_plane)) {
if (offset == 0)
@@ -1600,7 +1597,7 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
offset = intel_plane_compute_aligned_offset(&x, &y,
plane_state, uv_plane);
- if (is_ccs_modifier(fb->modifier)) {
+ if (intel_fb_is_ccs_modifier(fb->modifier)) {
int ccs_plane = main_to_ccs_plane(fb, uv_plane);
u32 aux_offset = plane_state->view.color_plane[ccs_plane].offset;
u32 alignment = intel_surf_alignment(fb, uv_plane);
@@ -1656,8 +1653,7 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
int hsub, vsub;
int x, y;
- if (!is_ccs_plane(fb, ccs_plane) ||
- is_gen12_ccs_cc_plane(fb, ccs_plane))
+ if (!intel_fb_is_ccs_aux_plane(fb, ccs_plane))
continue;
intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
@@ -1699,7 +1695,7 @@ static int skl_check_plane_surface(struct intel_plane_state *plane_state)
* Handle the AUX surface first since the main surface setup depends on
* it.
*/
- if (is_ccs_modifier(fb->modifier)) {
+ if (intel_fb_is_ccs_modifier(fb->modifier)) {
ret = skl_check_ccs_aux_surface(plane_state);
if (ret)
return ret;
@@ -1737,6 +1733,18 @@ static bool skl_fb_scalable(const struct drm_framebuffer *fb)
}
}
+static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ return intel_pxp_key_check(&to_gt(i915)->pxp, obj, false) == 0;
+}
+
+static bool pxp_is_borked(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj);
+}
+
static int skl_plane_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
@@ -1781,6 +1789,11 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ if (DISPLAY_VER(dev_priv) >= 11) {
+ plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb));
+ plane_state->force_black = pxp_is_borked(intel_fb_obj(fb));
+ }
+
/* HW only has 8 bits pixel precision, disable plane if invisible */
if (!(plane_state->hw.alpha >> 8))
plane_state->uapi.visible = false;
@@ -1812,6 +1825,15 @@ static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
}
+static struct intel_fbc *skl_plane_fbc(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ if (skl_plane_has_fbc(dev_priv, pipe, plane_id))
+ return dev_priv->fbc;
+ else
+ return NULL;
+}
+
static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
@@ -1870,49 +1892,20 @@ static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
}
}
-static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id)
-{
- if (plane_id == PLANE_CURSOR)
- return false;
-
- if (DISPLAY_VER(dev_priv) >= 11)
- return true;
-
- if (IS_GEMINILAKE(dev_priv))
- return pipe != PIPE_C;
-
- return pipe != PIPE_C &&
- (plane_id == PLANE_PRIMARY ||
- plane_id == PLANE_SPRITE0);
-}
-
static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
struct intel_plane *plane = to_intel_plane(_plane);
- switch (modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- break;
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- if (!plane->has_ccs)
- return false;
- break;
- default:
+ if (!intel_fb_plane_supports_modifier(plane, modifier))
return false;
- }
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
- if (is_ccs_modifier(modifier))
+ if (intel_fb_is_ccs_modifier(modifier))
return true;
fallthrough;
case DRM_FORMAT_RGB565:
@@ -1953,52 +1946,20 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
}
}
-static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv,
- enum plane_id plane_id)
-{
- /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */
- if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv) ||
- IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_D0))
- return false;
-
- /* Wa_22011186057 */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- return false;
-
- return plane_id < PLANE_SPRITE4;
-}
-
static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
- struct drm_i915_private *dev_priv = to_i915(_plane->dev);
struct intel_plane *plane = to_intel_plane(_plane);
- switch (modifier) {
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id))
- return false;
- fallthrough;
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- case I915_FORMAT_MOD_Y_TILED:
- break;
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
- /* Wa_22011186057 */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- return false;
- break;
- default:
+ if (!intel_fb_plane_supports_modifier(plane, modifier))
return false;
- }
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
- if (is_ccs_modifier(modifier))
+ if (intel_fb_is_ccs_modifier(modifier))
return true;
fallthrough;
case DRM_FORMAT_YUYV:
@@ -2010,7 +1971,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_P010:
case DRM_FORMAT_P012:
case DRM_FORMAT_P016:
- if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)
+ if (intel_fb_is_mc_ccs_modifier(modifier))
return true;
fallthrough;
case DRM_FORMAT_RGB565:
@@ -2039,18 +2000,6 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
}
}
-static const u64 *gen12_get_plane_modifiers(struct drm_i915_private *dev_priv,
- enum plane_id plane_id)
-{
- /* Wa_22011186057 */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- return adlp_step_a_plane_format_modifiers;
- else if (gen12_plane_supports_mc_ccs(dev_priv, plane_id))
- return gen12_plane_format_modifiers_mc_ccs;
- else
- return gen12_plane_format_modifiers_rc_ccs;
-}
-
static const struct drm_plane_funcs skl_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -2091,6 +2040,64 @@ skl_plane_disable_flip_done(struct intel_plane *plane)
spin_unlock_irq(&i915->irq_lock);
}
+static bool skl_plane_has_rc_ccs(struct drm_i915_private *i915,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ /* Wa_22011186057 */
+ if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
+ return false;
+
+ if (DISPLAY_VER(i915) >= 11)
+ return true;
+
+ if (IS_GEMINILAKE(i915))
+ return pipe != PIPE_C;
+
+ return pipe != PIPE_C &&
+ (plane_id == PLANE_PRIMARY ||
+ plane_id == PLANE_SPRITE0);
+}
+
+static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915,
+ enum plane_id plane_id)
+{
+ if (DISPLAY_VER(i915) < 12)
+ return false;
+
+ /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */
+ if (IS_DG1(i915) || IS_ROCKETLAKE(i915) ||
+ IS_TGL_DISPLAY_STEP(i915, STEP_A0, STEP_D0))
+ return false;
+
+ /* Wa_22011186057 */
+ if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
+ return false;
+
+ return plane_id < PLANE_SPRITE4;
+}
+
+static u8 skl_get_plane_caps(struct drm_i915_private *i915,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ u8 caps = INTEL_PLANE_CAP_TILING_X;
+
+ if (DISPLAY_VER(i915) < 13 || IS_ALDERLAKE_P(i915))
+ caps |= INTEL_PLANE_CAP_TILING_Y;
+ if (DISPLAY_VER(i915) < 12)
+ caps |= INTEL_PLANE_CAP_TILING_Yf;
+
+ if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) {
+ caps |= INTEL_PLANE_CAP_CCS_RC;
+ if (DISPLAY_VER(i915) >= 12)
+ caps |= INTEL_PLANE_CAP_CCS_RC_CC;
+ }
+
+ if (gen12_plane_has_mc_ccs(i915, plane_id))
+ caps |= INTEL_PLANE_CAP_CCS_MC;
+
+ return caps;
+}
+
struct intel_plane *
skl_universal_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
@@ -2113,16 +2120,14 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->id = plane_id;
plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id);
- plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id);
- if (plane->has_fbc) {
- struct intel_fbc *fbc = &dev_priv->fbc;
-
- fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
- }
+ intel_fbc_add_plane(skl_plane_fbc(dev_priv, pipe, plane_id), plane);
if (DISPLAY_VER(dev_priv) >= 11) {
plane->min_width = icl_plane_min_width;
- plane->max_width = icl_plane_max_width;
+ if (icl_is_hdr_plane(dev_priv, plane_id))
+ plane->max_width = icl_hdr_plane_max_width;
+ else
+ plane->max_width = icl_sdr_plane_max_width;
plane->max_height = icl_plane_max_height;
plane->min_cdclk = icl_plane_min_cdclk;
} else if (DISPLAY_VER(dev_priv) >= 10) {
@@ -2136,8 +2141,9 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
}
plane->max_stride = skl_plane_max_stride;
- plane->update_plane = skl_update_plane;
- plane->disable_plane = skl_disable_plane;
+ plane->update_noarm = skl_plane_update_noarm;
+ plane->update_arm = skl_plane_update_arm;
+ plane->disable_arm = skl_plane_disable_arm;
plane->get_hw_state = skl_plane_get_hw_state;
plane->check_plane = skl_plane_check;
@@ -2159,29 +2165,28 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
formats = skl_get_plane_formats(dev_priv, pipe,
plane_id, &num_formats);
- plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
- if (DISPLAY_VER(dev_priv) >= 12) {
- modifiers = gen12_get_plane_modifiers(dev_priv, plane_id);
+ if (DISPLAY_VER(dev_priv) >= 12)
plane_funcs = &gen12_plane_funcs;
- } else {
- if (plane->has_ccs)
- modifiers = skl_plane_format_modifiers_ccs;
- else
- modifiers = skl_plane_format_modifiers_noccs;
+ else
plane_funcs = &skl_plane_funcs;
- }
if (plane_id == PLANE_PRIMARY)
plane_type = DRM_PLANE_TYPE_PRIMARY;
else
plane_type = DRM_PLANE_TYPE_OVERLAY;
+ modifiers = intel_fb_plane_get_modifiers(dev_priv,
+ skl_get_plane_caps(dev_priv, pipe, plane_id));
+
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
0, plane_funcs,
formats, num_formats, modifiers,
plane_type,
"plane %d%c", plane_id + 1,
pipe_name(pipe));
+
+ kfree(modifiers);
+
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 07584695514b..20141f33ed64 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -38,9 +38,12 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
+#include "intel_dsi_vbt.h"
#include "intel_fifo_underrun.h"
#include "intel_panel.h"
#include "skl_scaler.h"
+#include "vlv_dsi.h"
+#include "vlv_dsi_pll.h"
#include "vlv_sideband.h"
/* return pixels in terms of txbyteclkhs */
@@ -1258,7 +1261,9 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 pclk;
+
drm_dbg_kms(&dev_priv->drm, "\n");
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
@@ -1270,6 +1275,9 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
pclk = vlv_dsi_get_pclk(encoder, pipe_config);
}
+ if (intel_dsi->dual_link)
+ pclk *= 2;
+
if (pclk) {
pipe_config->hw.adjusted_mode.crtc_clock = pclk;
pipe_config->port_clock = pclk;
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.h b/drivers/gpu/drm/i915/display/vlv_dsi.h
new file mode 100644
index 000000000000..0c2b279df9d4
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __VLV_DSI_H__
+#define __VLV_DSI_H__
+
+#include <linux/types.h>
+
+enum port;
+struct drm_i915_private;
+struct intel_dsi;
+
+void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
+enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
+void vlv_dsi_init(struct drm_i915_private *dev_priv);
+
+#endif /* __VLV_DSI_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 5413b52ab6ba..1b81797dd02e 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -31,6 +31,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
+#include "vlv_dsi_pll.h"
#include "vlv_sideband.h"
static const u16 lfsr_converts[] = {
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
new file mode 100644
index 000000000000..ab9291ad1e79
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __VLV_DSI_PLL_H__
+#define __VLV_DSI_PLL_H__
+
+#include <linux/types.h>
+
+enum port;
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_encoder;
+
+int vlv_dsi_pll_compute(struct intel_encoder *encoder,
+ struct intel_crtc_state *config);
+void vlv_dsi_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config);
+void vlv_dsi_pll_disable(struct intel_encoder *encoder);
+u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
+ struct intel_crtc_state *config);
+void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
+
+bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
+int bxt_dsi_pll_compute(struct intel_encoder *encoder,
+ struct intel_crtc_state *config);
+void bxt_dsi_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config);
+void bxt_dsi_pll_disable(struct intel_encoder *encoder);
+u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
+ struct intel_crtc_state *config);
+void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
+
+void assert_dsi_pll_enabled(struct drm_i915_private *i915);
+void assert_dsi_pll_disabled(struct drm_i915_private *i915);
+
+#endif /* __VLV_DSI_PLL_H__ */
diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c
deleted file mode 100644
index 7df91b7e4ca8..000000000000
--- a/drivers/gpu/drm/i915/dma_resv_utils.c
+++ /dev/null
@@ -1,17 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2020 Intel Corporation
- */
-
-#include <linux/dma-resv.h>
-
-#include "dma_resv_utils.h"
-
-void dma_resv_prune(struct dma_resv *resv)
-{
- if (dma_resv_trylock(resv)) {
- if (dma_resv_test_signaled(resv, true))
- dma_resv_add_excl_fence(resv, NULL);
- dma_resv_unlock(resv);
- }
-}
diff --git a/drivers/gpu/drm/i915/dma_resv_utils.h b/drivers/gpu/drm/i915/dma_resv_utils.h
deleted file mode 100644
index b9d8fb5f8367..000000000000
--- a/drivers/gpu/drm/i915/dma_resv_utils.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2020 Intel Corporation
- */
-
-#ifndef DMA_RESV_UTILS_H
-#define DMA_RESV_UTILS_H
-
-struct dma_resv;
-
-void dma_resv_prune(struct dma_resv *resv);
-
-#endif /* DMA_RESV_UTILS_H */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 7358bebef15c..470fdfd61a0f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -115,8 +115,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_busy *args = data;
struct drm_i915_gem_object *obj;
- struct dma_resv_list *list;
- unsigned int seq;
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
int err;
err = -ENOENT;
@@ -142,27 +142,20 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* to report the overall busyness. This is what the wait-ioctl does.
*
*/
-retry:
- seq = raw_read_seqcount(&obj->base.resv->seq);
-
- /* Translate the exclusive fence to the READ *and* WRITE engine */
- args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv));
-
- /* Translate shared fences to READ set of engines */
- list = dma_resv_shared_list(obj->base.resv);
- if (list) {
- unsigned int shared_count = list->shared_count, i;
-
- for (i = 0; i < shared_count; ++i) {
- struct dma_fence *fence =
- rcu_dereference(list->shared[i]);
-
+ args->busy = 0;
+ dma_resv_iter_begin(&cursor, obj->base.resv, true);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ if (dma_resv_iter_is_restarted(&cursor))
+ args->busy = 0;
+
+ if (dma_resv_iter_is_exclusive(&cursor))
+ /* Translate the exclusive fence to the READ *and* WRITE engine */
+ args->busy |= busy_check_writer(fence);
+ else
+ /* Translate shared fences to READ set of engines */
args->busy |= busy_check_reader(fence);
- }
}
-
- if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
- goto retry;
+ dma_resv_iter_end(&cursor);
err = 0;
out:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index f0435c6feb68..8a248003dfae 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -69,10 +69,16 @@ static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
unsigned int flags)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct clflush *clflush;
assert_object_held(obj);
+ if (IS_DGFX(i915)) {
+ WARN_ON_ONCE(obj->cache_dirty);
+ return false;
+ }
+
/*
* Stolen memory is always coherent with the GPU as it is explicitly
* marked as wc by the system, or the system is cache-coherent.
@@ -105,16 +111,24 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
if (clflush) {
i915_sw_fence_await_reservation(&clflush->base.chain,
obj->base.resv, NULL, true,
- i915_fence_timeout(to_i915(obj->base.dev)),
+ i915_fence_timeout(i915),
I915_FENCE_GFP);
dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
dma_fence_work_commit(&clflush->base);
+ /*
+ * We must have successfully populated the pages(since we are
+ * holding a pin on the pages as per the flush worker) to reach
+ * this point, which must mean we have already done the required
+ * flush-on-acquire, hence resetting cache_dirty here should be
+ * safe.
+ */
+ obj->cache_dirty = false;
} else if (obj->mm.pages) {
__do_clflush(obj);
+ obj->cache_dirty = false;
} else {
GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
}
- obj->cache_dirty = false;
return true;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index c37c9f0d8167..00327b750fbb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -237,7 +237,7 @@ static int proto_context_set_persistence(struct drm_i915_private *i915,
* colateral damage, and we should not pretend we can by
* exposing the interface.
*/
- if (!intel_has_reset_engine(&i915->gt))
+ if (!intel_has_reset_engine(to_gt(i915)))
return -ENODEV;
pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
@@ -254,7 +254,7 @@ static int proto_context_set_protected(struct drm_i915_private *i915,
if (!protected) {
pc->uses_protected_content = false;
- } else if (!intel_pxp_is_enabled(&i915->gt.pxp)) {
+ } else if (!intel_pxp_is_enabled(&to_gt(i915)->pxp)) {
ret = -ENODEV;
} else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) ||
!(pc->user_flags & BIT(UCONTEXT_BANNABLE))) {
@@ -268,8 +268,8 @@ static int proto_context_set_protected(struct drm_i915_private *i915,
*/
pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- if (!intel_pxp_is_active(&i915->gt.pxp))
- ret = intel_pxp_start(&i915->gt.pxp);
+ if (!intel_pxp_is_active(&to_gt(i915)->pxp))
+ ret = intel_pxp_start(&to_gt(i915)->pxp);
}
return ret;
@@ -479,7 +479,7 @@ set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) &&
!IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) {
drm_dbg(&i915->drm,
- "Bonding on gen12+ aside from TGL, RKL, and ADL_S not supported\n");
+ "Bonding not supported on this platform\n");
return -ENODEV;
}
@@ -572,7 +572,7 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
intel_engine_mask_t prev_mask;
/* FIXME: This is NIY for execlists */
- if (!(intel_uc_uses_guc_submission(&i915->gt.uc)))
+ if (!(intel_uc_uses_guc_submission(&to_gt(i915)->uc)))
return -ENODEV;
if (get_user(slot, &ext->engine_index))
@@ -833,7 +833,7 @@ static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
sseu = &pc->legacy_rcs_sseu;
}
- ret = i915_gem_user_to_context_sseu(&i915->gt, &user_sseu, sseu);
+ ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu);
if (ret)
return ret;
@@ -1001,7 +1001,7 @@ static void free_engines_rcu(struct rcu_head *rcu)
free_engines(engines);
}
-static int __i915_sw_fence_call
+static int
engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct i915_gem_engines *engines =
@@ -1044,7 +1044,7 @@ static struct i915_gem_engines *alloc_engines(unsigned int count)
static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
struct intel_sseu rcs_sseu)
{
- const struct intel_gt *gt = &ctx->i915->gt;
+ const struct intel_gt *gt = to_gt(ctx->i915);
struct intel_engine_cs *engine;
struct i915_gem_engines *e, *err;
enum intel_engine_id id;
@@ -1521,7 +1521,7 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
* colateral damage, and we should not pretend we can by
* exposing the interface.
*/
- if (!intel_has_reset_engine(&ctx->i915->gt))
+ if (!intel_has_reset_engine(to_gt(ctx->i915)))
return -ENODEV;
i915_gem_context_clear_persistence(ctx);
@@ -1559,7 +1559,7 @@ i915_gem_create_context(struct drm_i915_private *i915,
} else if (HAS_FULL_PPGTT(i915)) {
struct i915_ppgtt *ppgtt;
- ppgtt = i915_ppgtt_create(&i915->gt, 0);
+ ppgtt = i915_ppgtt_create(to_gt(i915), 0);
if (IS_ERR(ppgtt)) {
drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@@ -1742,7 +1742,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
if (args->flags)
return -EINVAL;
- ppgtt = i915_ppgtt_create(&i915->gt, 0);
+ ppgtt = i915_ppgtt_create(to_gt(i915), 0);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -2194,7 +2194,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
return -EINVAL;
- ret = intel_gt_terminally_wedged(&i915->gt);
+ ret = intel_gt_terminally_wedged(to_gt(i915));
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
index 8955d6abcef1..9402d4bf4ffc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -379,7 +379,7 @@ static int ext_set_protected(struct i915_user_extension __user *base, void *data
if (ext.flags)
return -EINVAL;
- if (!intel_pxp_is_enabled(&ext_data->i915->gt.pxp))
+ if (!intel_pxp_is_enabled(&to_gt(ext_data->i915)->pxp))
return -ENODEV;
ext_data->flags |= I915_BO_PROTECTED;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index e8a58c997170..1b526039a60d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -248,8 +248,19 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
if (IS_ERR(pages))
return PTR_ERR(pages);
- /* XXX: consider doing a vmap flush or something */
- if (!HAS_LLC(i915) || i915_gem_object_can_bypass_llc(obj))
+ /*
+ * DG1 is special here since it still snoops transactions even with
+ * CACHE_NONE. This is not the case with other HAS_SNOOP platforms. We
+ * might need to revisit this as we add new discrete platforms.
+ *
+ * XXX: Consider doing a vmap flush or something, where possible.
+ * Currently we just do a heavy handed wbinvd_on_all_cpus() here since
+ * the underlying sg_table might not even point to struct pages, so we
+ * can't just call drm_clflush_sg or similar, like we do elsewhere in
+ * the driver.
+ */
+ if (i915_gem_object_can_bypass_llc(obj) ||
+ (!HAS_LLC(i915) && !IS_DG1(i915)))
wbinvd_on_all_cpus();
sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index b684a62bf3b0..26532c07d467 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -18,10 +18,32 @@
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ if (IS_DGFX(i915))
+ return false;
+
return !(obj->cache_level == I915_CACHE_NONE ||
obj->cache_level == I915_CACHE_WT);
}
+bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ if (obj->cache_dirty)
+ return false;
+
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
+ return true;
+
+ if (IS_DGFX(i915))
+ return false;
+
+ /* Currently in use by HW (display engine)? Keep flushed. */
+ return i915_gem_object_is_framebuffer(obj);
+}
+
static void
flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
{
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index cb0bf6ffd0e3..3a5b247be738 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -29,6 +29,7 @@
#include "i915_gem_ioctls.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
+#include "i915_vma_snapshot.h"
struct eb_vma {
struct i915_vma *vma;
@@ -307,11 +308,15 @@ struct i915_execbuffer {
struct eb_fence *fences;
unsigned long num_fences;
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+ struct i915_capture_list *capture_lists[MAX_ENGINE_INSTANCE + 1];
+#endif
};
static int eb_parse(struct i915_execbuffer *eb);
static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle);
static void eb_unpin_engine(struct i915_execbuffer *eb);
+static void eb_capture_release(struct i915_execbuffer *eb);
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
@@ -990,7 +995,7 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
}
if (!(ev->flags & EXEC_OBJECT_WRITE)) {
- err = dma_resv_reserve_shared(vma->resv, 1);
+ err = dma_resv_reserve_shared(vma->obj->base.resv, 1);
if (err)
return err;
}
@@ -1043,6 +1048,7 @@ static void eb_release_vmas(struct i915_execbuffer *eb, bool final)
i915_vma_put(vma);
}
+ eb_capture_release(eb);
eb_unpin_engine(eb);
}
@@ -1092,6 +1098,47 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
return &i915->ggtt;
}
+static void reloc_cache_unmap(struct reloc_cache *cache)
+{
+ void *vaddr;
+
+ if (!cache->vaddr)
+ return;
+
+ vaddr = unmask_page(cache->vaddr);
+ if (cache->vaddr & KMAP)
+ kunmap_atomic(vaddr);
+ else
+ io_mapping_unmap_atomic((void __iomem *)vaddr);
+}
+
+static void reloc_cache_remap(struct reloc_cache *cache,
+ struct drm_i915_gem_object *obj)
+{
+ void *vaddr;
+
+ if (!cache->vaddr)
+ return;
+
+ if (cache->vaddr & KMAP) {
+ struct page *page = i915_gem_object_get_page(obj, cache->page);
+
+ vaddr = kmap_atomic(page);
+ cache->vaddr = unmask_flags(cache->vaddr) |
+ (unsigned long)vaddr;
+ } else {
+ struct i915_ggtt *ggtt = cache_to_ggtt(cache);
+ unsigned long offset;
+
+ offset = cache->node.start;
+ if (!drm_mm_node_allocated(&cache->node))
+ offset += cache->page << PAGE_SHIFT;
+
+ cache->vaddr = (unsigned long)
+ io_mapping_map_atomic_wc(&ggtt->iomap, offset);
+ }
+}
+
static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer *eb)
{
void *vaddr;
@@ -1356,10 +1403,17 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* batchbuffers.
*/
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
- GRAPHICS_VER(eb->i915) == 6) {
+ GRAPHICS_VER(eb->i915) == 6 &&
+ !i915_vma_is_bound(target->vma, I915_VMA_GLOBAL_BIND)) {
+ struct i915_vma *vma = target->vma;
+
+ reloc_cache_unmap(&eb->reloc_cache);
+ mutex_lock(&vma->vm->mutex);
err = i915_vma_bind(target->vma,
target->vma->obj->cache_level,
PIN_GLOBAL, NULL);
+ mutex_unlock(&vma->vm->mutex);
+ reloc_cache_remap(&eb->reloc_cache, ev->vma->obj);
if (err)
return err;
}
@@ -1880,36 +1934,113 @@ eb_find_first_request_added(struct i915_execbuffer *eb)
return NULL;
}
-static int eb_move_to_gpu(struct i915_execbuffer *eb)
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
+/* Stage with GFP_KERNEL allocations before we enter the signaling critical path */
+static void eb_capture_stage(struct i915_execbuffer *eb)
{
const unsigned int count = eb->buffer_count;
- unsigned int i = count;
- int err = 0, j;
+ unsigned int i = count, j;
+ struct i915_vma_snapshot *vsnap;
while (i--) {
struct eb_vma *ev = &eb->vma[i];
struct i915_vma *vma = ev->vma;
unsigned int flags = ev->flags;
- struct drm_i915_gem_object *obj = vma->obj;
- assert_vma_held(vma);
+ if (!(flags & EXEC_OBJECT_CAPTURE))
+ continue;
- if (flags & EXEC_OBJECT_CAPTURE) {
+ vsnap = i915_vma_snapshot_alloc(GFP_KERNEL);
+ if (!vsnap)
+ continue;
+
+ i915_vma_snapshot_init(vsnap, vma, "user");
+ for_each_batch_create_order(eb, j) {
struct i915_capture_list *capture;
- for_each_batch_create_order(eb, j) {
- if (!eb->requests[j])
- break;
+ capture = kmalloc(sizeof(*capture), GFP_KERNEL);
+ if (!capture)
+ continue;
- capture = kmalloc(sizeof(*capture), GFP_KERNEL);
- if (capture) {
- capture->next =
- eb->requests[j]->capture_list;
- capture->vma = vma;
- eb->requests[j]->capture_list = capture;
- }
- }
+ capture->next = eb->capture_lists[j];
+ capture->vma_snapshot = i915_vma_snapshot_get(vsnap);
+ eb->capture_lists[j] = capture;
+ }
+ i915_vma_snapshot_put(vsnap);
+ }
+}
+
+/* Commit once we're in the critical path */
+static void eb_capture_commit(struct i915_execbuffer *eb)
+{
+ unsigned int j;
+
+ for_each_batch_create_order(eb, j) {
+ struct i915_request *rq = eb->requests[j];
+
+ if (!rq)
+ break;
+
+ rq->capture_list = eb->capture_lists[j];
+ eb->capture_lists[j] = NULL;
+ }
+}
+
+/*
+ * Release anything that didn't get committed due to errors.
+ * The capture_list will otherwise be freed at request retire.
+ */
+static void eb_capture_release(struct i915_execbuffer *eb)
+{
+ unsigned int j;
+
+ for_each_batch_create_order(eb, j) {
+ if (eb->capture_lists[j]) {
+ i915_request_free_capture_list(eb->capture_lists[j]);
+ eb->capture_lists[j] = NULL;
}
+ }
+}
+
+static void eb_capture_list_clear(struct i915_execbuffer *eb)
+{
+ memset(eb->capture_lists, 0, sizeof(eb->capture_lists));
+}
+
+#else
+
+static void eb_capture_stage(struct i915_execbuffer *eb)
+{
+}
+
+static void eb_capture_commit(struct i915_execbuffer *eb)
+{
+}
+
+static void eb_capture_release(struct i915_execbuffer *eb)
+{
+}
+
+static void eb_capture_list_clear(struct i915_execbuffer *eb)
+{
+}
+
+#endif
+
+static int eb_move_to_gpu(struct i915_execbuffer *eb)
+{
+ const unsigned int count = eb->buffer_count;
+ unsigned int i = count;
+ int err = 0, j;
+
+ while (i--) {
+ struct eb_vma *ev = &eb->vma[i];
+ struct i915_vma *vma = ev->vma;
+ unsigned int flags = ev->flags;
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ assert_vma_held(vma);
/*
* If the GPU is not _reading_ through the CPU cache, we need
@@ -1990,6 +2121,8 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
/* Unconditionally flush any chipset caches (for streaming writes). */
intel_gt_chipset_flush(eb->gt);
+ eb_capture_commit(eb);
+
return 0;
err_skip:
@@ -2164,7 +2297,7 @@ static int eb_parse(struct i915_execbuffer *eb)
goto err_trampoline;
}
- err = dma_resv_reserve_shared(shadow->resv, 1);
+ err = dma_resv_reserve_shared(shadow->obj->base.resv, 1);
if (err)
goto err_trampoline;
@@ -2276,9 +2409,9 @@ static int eb_submit(struct i915_execbuffer *eb)
return err;
}
-static int num_vcs_engines(const struct drm_i915_private *i915)
+static int num_vcs_engines(struct drm_i915_private *i915)
{
- return hweight_long(VDBOX_MASK(&i915->gt));
+ return hweight_long(VDBOX_MASK(to_gt(i915)));
}
/*
@@ -3114,7 +3247,7 @@ eb_requests_create(struct i915_execbuffer *eb, struct dma_fence *in_fence,
/* Allocate a request for this batch buffer nice and early. */
eb->requests[i] = i915_request_create(eb_find_context(eb, i));
if (IS_ERR(eb->requests[i])) {
- out_fence = ERR_PTR(PTR_ERR(eb->requests[i]));
+ out_fence = ERR_CAST(eb->requests[i]);
eb->requests[i] = NULL;
return out_fence;
}
@@ -3132,13 +3265,14 @@ eb_requests_create(struct i915_execbuffer *eb, struct dma_fence *in_fence,
}
/*
- * Whilst this request exists, batch_obj will be on the
- * active_list, and so will hold the active reference. Only when
- * this request is retired will the batch_obj be moved onto
- * the inactive_list and lose its active reference. Hence we do
- * not need to explicitly hold another reference here.
+ * Not really on stack, but we don't want to call
+ * kfree on the batch_snapshot when we put it, so use the
+ * _onstack interface.
*/
- eb->requests[i]->batch = eb->batches[i]->vma;
+ if (eb->batches[i]->vma)
+ i915_vma_snapshot_init_onstack(&eb->requests[i]->batch_snapshot,
+ eb->batches[i]->vma,
+ "batch");
if (eb->batch_pool) {
GEM_BUG_ON(intel_context_is_parallel(eb->context));
intel_gt_buffer_pool_mark_active(eb->batch_pool,
@@ -3187,6 +3321,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.fences = NULL;
eb.num_fences = 0;
+ eb_capture_list_clear(&eb);
+
memset(eb.requests, 0, sizeof(struct i915_request *) *
ARRAY_SIZE(eb.requests));
eb.composite_fence = NULL;
@@ -3273,6 +3409,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
}
ww_acquire_done(&eb.ww.ctx);
+ eb_capture_stage(&eb);
out_fence = eb_requests_create(&eb, in_fence, out_fence_fd);
if (IS_ERR(out_fence)) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index a57a6b7013c2..c5150a1ee3d2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -145,24 +145,10 @@ static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
.put_pages = i915_gem_object_put_pages_internal,
};
-/**
- * i915_gem_object_create_internal: create an object with volatile pages
- * @i915: the i915 device
- * @size: the size in bytes of backing storage to allocate for the object
- *
- * Creates a new object that wraps some internal memory for private use.
- * This object is not backed by swappable storage, and as such its contents
- * are volatile and only valid whilst pinned. If the object is reaped by the
- * shrinker, its pages and data will be discarded. Equally, it is not a full
- * GEM object and so not valid for access from userspace. This makes it useful
- * for hardware interfaces like ringbuffers (which are pinned from the time
- * the request is written to the time the hardware stops accessing it), but
- * not for contexts (which need to be preserved when not active for later
- * reuse). Note that it is not cleared upon allocation.
- */
struct drm_i915_gem_object *
-i915_gem_object_create_internal(struct drm_i915_private *i915,
- phys_addr_t size)
+__i915_gem_object_create_internal(struct drm_i915_private *i915,
+ const struct drm_i915_gem_object_ops *ops,
+ phys_addr_t size)
{
static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
@@ -179,7 +165,7 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class, 0);
+ i915_gem_object_init(obj, ops, &lock_class, 0);
obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
/*
@@ -199,3 +185,25 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
return obj;
}
+
+/**
+ * i915_gem_object_create_internal: create an object with volatile pages
+ * @i915: the i915 device
+ * @size: the size in bytes of backing storage to allocate for the object
+ *
+ * Creates a new object that wraps some internal memory for private use.
+ * This object is not backed by swappable storage, and as such its contents
+ * are volatile and only valid whilst pinned. If the object is reaped by the
+ * shrinker, its pages and data will be discarded. Equally, it is not a full
+ * GEM object and so not valid for access from userspace. This makes it useful
+ * for hardware interfaces like ringbuffers (which are pinned from the time
+ * the request is written to the time the hardware stops accessing it), but
+ * not for contexts (which need to be preserved when not active for later
+ * reuse). Note that it is not cleared upon allocation.
+ */
+struct drm_i915_gem_object *
+i915_gem_object_create_internal(struct drm_i915_private *i915,
+ phys_addr_t size)
+{
+ return __i915_gem_object_create_internal(i915, &i915_gem_object_internal_ops, size);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 65fc6ff5f59d..1478c02a82cb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -17,6 +17,7 @@
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
#include "i915_gem_mman.h"
+#include "i915_mm.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
#include "i915_gem_ttm.h"
@@ -72,7 +73,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
if (args->flags & ~(I915_MMAP_WC))
return -EINVAL;
- if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
+ if (args->flags & I915_MMAP_WC && !pat_enabled())
return -ENODEV;
obj = i915_gem_object_lookup(file, args->handle);
@@ -537,6 +538,9 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
{
struct i915_mmap_offset *mmo, *mn;
+ if (obj->ops->unmap_virtual)
+ obj->ops->unmap_virtual(obj);
+
spin_lock(&obj->mmo.lock);
rbtree_postorder_for_each_entry_safe(mmo, mn,
&obj->mmo.offsets, offset) {
@@ -645,7 +649,7 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
goto insert;
/* Attempt to reap some mmap space from dead objects */
- err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT,
+ err = intel_gt_retire_requests_timeout(to_gt(i915), MAX_SCHEDULE_TIMEOUT,
NULL);
if (err)
goto err;
@@ -736,7 +740,7 @@ i915_gem_dumb_mmap_offset(struct drm_file *file,
if (HAS_LMEM(to_i915(dev)))
mmap_type = I915_MMAP_TYPE_FIXED;
- else if (boot_cpu_has(X86_FEATURE_PAT))
+ else if (pat_enabled())
mmap_type = I915_MMAP_TYPE_WC;
else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
return -ENODEV;
@@ -792,7 +796,7 @@ i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
break;
case I915_MMAP_OFFSET_WC:
- if (!boot_cpu_has(X86_FEATURE_PAT))
+ if (!pat_enabled())
return -ENODEV;
type = I915_MMAP_TYPE_WC;
break;
@@ -802,7 +806,7 @@ i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
break;
case I915_MMAP_OFFSET_UC:
- if (!boot_cpu_has(X86_FEATURE_PAT))
+ if (!pat_enabled())
return -ENODEV;
type = I915_MMAP_TYPE_UC;
break;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 1e426a42a36c..d87b508b59b1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -31,6 +31,7 @@
#include "i915_gem_context.h"
#include "i915_gem_mman.h"
#include "i915_gem_object.h"
+#include "i915_gem_ttm.h"
#include "i915_memcpy.h"
#include "i915_trace.h"
@@ -91,7 +92,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
}
/**
- * i915_gem_object_fini - Clean up a GEM object initialization
+ * __i915_gem_object_fini - Clean up a GEM object initialization
* @obj: The gem object to cleanup
*
* This function cleans up gem object fields that are set up by
@@ -107,25 +108,29 @@ void __i915_gem_object_fini(struct drm_i915_gem_object *obj)
}
/**
- * Mark up the object's coherency levels for a given cache_level
+ * i915_gem_object_set_cache_coherency - Mark up the object's coherency levels
+ * for a given cache_level
* @obj: #drm_i915_gem_object
* @cache_level: cache level
*/
void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
unsigned int cache_level)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
obj->cache_level = cache_level;
if (cache_level != I915_CACHE_NONE)
obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
I915_BO_CACHE_COHERENT_FOR_WRITE);
- else if (HAS_LLC(to_i915(obj->base.dev)))
+ else if (HAS_LLC(i915))
obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
else
obj->cache_coherent = 0;
obj->cache_dirty =
- !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
+ !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) &&
+ !IS_DGFX(i915);
}
bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj)
@@ -257,6 +262,8 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
*/
void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
{
+ assert_object_held(obj);
+
if (!list_empty(&obj->vma.list)) {
struct i915_vma *vma;
@@ -323,7 +330,16 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
obj->ops->delayed_free(obj);
continue;
}
+
+ if (!i915_gem_object_trylock(obj, NULL)) {
+ /* busy, toss it back to the pile */
+ if (llist_add(&obj->freed, &i915->mm.free_list))
+ queue_delayed_work(i915->wq, &i915->mm.free_work, msecs_to_jiffies(10));
+ continue;
+ }
+
__i915_gem_object_pages_fini(obj);
+ i915_gem_object_unlock(obj);
__i915_gem_free_object(obj);
/* But keep the pointer alive for RCU-protected lookups */
@@ -343,7 +359,7 @@ void i915_gem_flush_free_objects(struct drm_i915_private *i915)
static void __i915_gem_free_work(struct work_struct *work)
{
struct drm_i915_private *i915 =
- container_of(work, struct drm_i915_private, mm.free_work);
+ container_of(work, struct drm_i915_private, mm.free_work.work);
i915_gem_flush_free_objects(i915);
}
@@ -364,15 +380,6 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
atomic_inc(&i915->mm.free_count);
/*
- * This serializes freeing with the shrinker. Since the free
- * is delayed, first by RCU then by the workqueue, we want the
- * shrinker to be able to free pages of unreferenced objects,
- * or else we may oom whilst there are plenty of deferred
- * freed objects.
- */
- i915_gem_object_make_unshrinkable(obj);
-
- /*
* Since we require blocking on struct_mutex to unbind the freed
* object from the GPU before releasing resources back to the
* system, we can not do that directly from the RCU callback (which may
@@ -384,7 +391,7 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
*/
if (llist_add(&obj->freed, &i915->mm.free_list))
- queue_work(i915->wq, &i915->mm.free_work);
+ queue_delayed_work(i915->wq, &i915->mm.free_work, 0);
}
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
@@ -456,7 +463,7 @@ i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset
* from can't cross a page boundary. The caller must ensure that @obj pages
* are pinned and that @obj is synced wrt. any related writes.
*
- * Returns 0 on success or -ENODEV if the type of @obj's backing store is
+ * Return: %0 on success or -ENODEV if the type of @obj's backing store is
* unsupported.
*/
int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
@@ -709,7 +716,7 @@ bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
void i915_gem_init__objects(struct drm_i915_private *i915)
{
- INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
+ INIT_DELAYED_WORK(&i915->mm.free_work, __i915_gem_free_work);
}
void i915_objects_module_exit(void)
@@ -732,6 +739,57 @@ static const struct drm_gem_object_funcs i915_gem_object_funcs = {
.export = i915_gem_prime_export,
};
+/**
+ * i915_gem_object_get_moving_fence - Get the object's moving fence if any
+ * @obj: The object whose moving fence to get.
+ *
+ * A non-signaled moving fence means that there is an async operation
+ * pending on the object that needs to be waited on before setting up
+ * any GPU- or CPU PTEs to the object's pages.
+ *
+ * Return: A refcounted pointer to the object's moving fence if any,
+ * NULL otherwise.
+ */
+struct dma_fence *
+i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj)
+{
+ return dma_fence_get(i915_gem_to_ttm(obj)->moving);
+}
+
+/**
+ * i915_gem_object_wait_moving_fence - Wait for the object's moving fence if any
+ * @obj: The object whose moving fence to wait for.
+ * @intr: Whether to wait interruptible.
+ *
+ * If the moving fence signaled without an error, it is detached from the
+ * object and put.
+ *
+ * Return: 0 if successful, -ERESTARTSYS if the wait was interrupted,
+ * negative error code if the async operation represented by the
+ * moving fence failed.
+ */
+int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
+ bool intr)
+{
+ struct dma_fence *fence = i915_gem_to_ttm(obj)->moving;
+ int ret;
+
+ assert_object_held(obj);
+ if (!fence)
+ return 0;
+
+ ret = dma_fence_wait(fence, intr);
+ if (ret)
+ return ret;
+
+ if (fence->error)
+ return fence->error;
+
+ i915_gem_to_ttm(obj)->moving = NULL;
+ dma_fence_put(fence);
+ return 0;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/huge_gem_object.c"
#include "selftests/huge_pages.c"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 59201801cec5..f66d46882ea7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -93,7 +93,6 @@ void i915_gem_flush_free_objects(struct drm_i915_private *i915);
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
-void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
/**
* i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
@@ -211,9 +210,13 @@ static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object
return __i915_gem_object_lock(obj, ww, true);
}
-static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
+static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww)
{
- return dma_resv_trylock(obj->base.resv);
+ if (!ww)
+ return dma_resv_trylock(obj->base.resv);
+ else
+ return ww_mutex_trylock(&obj->base.resv->lock, &ww->ctx);
}
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
@@ -296,6 +299,12 @@ i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
}
static inline bool
+i915_gem_object_has_self_managed_shrink_list(const struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST);
+}
+
+static inline bool
i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
{
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
@@ -449,7 +458,7 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
}
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
-void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+int i915_gem_object_truncate(struct drm_i915_gem_object *obj);
void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
/**
@@ -512,11 +521,18 @@ i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj);
}
+struct dma_fence *
+i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj);
+
+int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
+ bool intr);
+
void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
unsigned int cache_level);
bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj);
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
+bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj);
int __must_check
i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
@@ -533,25 +549,15 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
+void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
+void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
-static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
-{
- if (obj->cache_dirty)
- return false;
-
- if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
- return true;
-
- /* Currently in use by HW (display engine)? Keep flushed. */
- return i915_gem_object_is_framebuffer(obj);
-}
-
static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
{
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
- if (cpu_write_needs_clflush(obj))
+ if (i915_gem_cpu_write_needs_clflush(obj))
obj->cache_dirty = true;
}
@@ -613,6 +619,14 @@ int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj,
bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
enum intel_memory_type type);
+int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
+ size_t size, struct intel_memory_region *mr,
+ struct address_space *mapping,
+ unsigned int max_segment);
+void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
+ bool dirty, bool backup);
+void __shmem_writeback(size_t size, struct address_space *mapping);
+
#ifdef CONFIG_MMU_NOTIFIER
static inline bool
i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index da85169006d4..0dd107dcecc2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -34,9 +34,11 @@ struct i915_lut_handle {
struct drm_i915_gem_object_ops {
unsigned int flags;
-#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
-#define I915_GEM_OBJECT_IS_PROXY BIT(2)
-#define I915_GEM_OBJECT_NO_MMAP BIT(3)
+#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
+/* Skip the shrinker management in set_pages/unset_pages */
+#define I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST BIT(2)
+#define I915_GEM_OBJECT_IS_PROXY BIT(3)
+#define I915_GEM_OBJECT_NO_MMAP BIT(4)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
@@ -54,14 +56,18 @@ struct drm_i915_gem_object_ops {
int (*get_pages)(struct drm_i915_gem_object *obj);
void (*put_pages)(struct drm_i915_gem_object *obj,
struct sg_table *pages);
- void (*truncate)(struct drm_i915_gem_object *obj);
+ int (*truncate)(struct drm_i915_gem_object *obj);
void (*writeback)(struct drm_i915_gem_object *obj);
+ int (*shrinker_release_pages)(struct drm_i915_gem_object *obj,
+ bool no_gpu_wait,
+ bool should_writeback);
int (*pread)(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pread *arg);
int (*pwrite)(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *arg);
u64 (*mmap_offset)(struct drm_i915_gem_object *obj);
+ void (*unmap_virtual)(struct drm_i915_gem_object *obj);
int (*dmabuf_export)(struct drm_i915_gem_object *obj);
@@ -305,6 +311,7 @@ struct drm_i915_gem_object {
#define I915_BO_READONLY BIT(6)
#define I915_TILING_QUIRK_BIT 7 /* unknown swizzling; do not release! */
#define I915_BO_PROTECTED BIT(8)
+#define I915_BO_WAS_BOUND_BIT 9
/**
* @mem_flags - Mutable placement-related flags
*
@@ -486,9 +493,37 @@ struct drm_i915_gem_object {
* instead go through the pin/unpin interfaces.
*/
atomic_t pages_pin_count;
+
+ /**
+ * @shrink_pin: Prevents the pages from being made visible to
+ * the shrinker, while the shrink_pin is non-zero. Most users
+ * should pretty much never have to care about this, outside of
+ * some special use cases.
+ *
+ * By default most objects will start out as visible to the
+ * shrinker(if I915_GEM_OBJECT_IS_SHRINKABLE) as soon as the
+ * backing pages are attached to the object, like in
+ * __i915_gem_object_set_pages(). They will then be removed the
+ * shrinker list once the pages are released.
+ *
+ * The @shrink_pin is incremented by calling
+ * i915_gem_object_make_unshrinkable(), which will also remove
+ * the object from the shrinker list, if the pin count was zero.
+ *
+ * Callers will then typically call
+ * i915_gem_object_make_shrinkable() or
+ * i915_gem_object_make_purgeable() to decrement the pin count,
+ * and make the pages visible again.
+ */
atomic_t shrink_pin;
/**
+ * @ttm_shrinkable: True when the object is using shmem pages
+ * underneath. Protected by the object lock.
+ */
+ bool ttm_shrinkable;
+
+ /**
* Priority list of potential placements for this object.
*/
struct intel_memory_region **placements;
@@ -512,6 +547,7 @@ struct drm_i915_gem_object {
*/
struct list_head region_link;
+ struct i915_refct_sgt *rsgt;
struct sg_table *pages;
void *mapping;
@@ -547,7 +583,7 @@ struct drm_i915_gem_object {
struct i915_gem_object_page_iter get_dma_page;
/**
- * Element within i915->mm.unbound_list or i915->mm.bound_list,
+ * Element within i915->mm.shrink_list or i915->mm.purge_list,
* locked by i915->mm.obj_lock.
*/
struct list_head link;
@@ -565,7 +601,7 @@ struct drm_i915_gem_object {
} mm;
struct {
- struct sg_table *cached_io_st;
+ struct i915_refct_sgt *cached_io_rsgt;
struct i915_gem_object_page_iter get_io_page;
struct drm_i915_gem_object *backup;
bool created:1;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 8eb1c3a6fc9c..a50f884973bc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -10,6 +10,8 @@
#include "i915_gem_lmem.h"
#include "i915_gem_mman.h"
+#include "gt/intel_gt.h"
+
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
unsigned int sg_page_sizes)
@@ -26,6 +28,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
/* Make the pages coherent with the GPU (flushing any swapin). */
if (obj->cache_dirty) {
+ WARN_ON_ONCE(IS_DGFX(i915));
obj->write_domain = 0;
if (i915_gem_object_has_struct_page(obj))
drm_clflush_sg(pages);
@@ -68,7 +71,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
shrinkable = false;
}
- if (shrinkable) {
+ if (shrinkable && !i915_gem_object_has_self_managed_shrink_list(obj)) {
struct list_head *list;
unsigned long flags;
@@ -158,11 +161,12 @@ retry:
}
/* Immediately discard the backing storage */
-void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
+int i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{
- drm_gem_free_mmap_offset(&obj->base);
if (obj->ops->truncate)
- obj->ops->truncate(obj);
+ return obj->ops->truncate(obj);
+
+ return 0;
}
/* Try to discard unwanted pages */
@@ -208,7 +212,8 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
if (i915_gem_object_is_volatile(obj))
obj->mm.madv = I915_MADV_WILLNEED;
- i915_gem_object_make_unshrinkable(obj);
+ if (!i915_gem_object_has_self_managed_shrink_list(obj))
+ i915_gem_object_make_unshrinkable(obj);
if (obj->mm.mapping) {
unmap_object(obj, page_mask_bits(obj->mm.mapping));
@@ -218,6 +223,14 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_reset_page_iter(obj);
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
+ if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref)
+ intel_gt_invalidate_tlbs(to_gt(i915));
+ }
+
return pages;
}
@@ -414,8 +427,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
}
if (!ptr) {
- if (GEM_WARN_ON(type == I915_MAP_WC &&
- !static_cpu_has(X86_FEATURE_PAT)))
+ err = i915_gem_object_wait_moving_fence(obj, true);
+ if (err) {
+ ptr = ERR_PTR(err);
+ goto err_unpin;
+ }
+
+ if (GEM_WARN_ON(type == I915_MAP_WC && !pat_enabled()))
ptr = ERR_PTR(-ENODEV);
else if (i915_gem_object_has_struct_page(obj))
ptr = i915_gem_object_map_page(obj, type);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 7986612f48fa..ca6faffcc496 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -19,6 +19,7 @@
static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
struct address_space *mapping = obj->base.filp->f_mapping;
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct scatterlist *sg;
struct sg_table *st;
dma_addr_t dma;
@@ -73,7 +74,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
dst += PAGE_SIZE;
}
- intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
+ intel_gt_chipset_flush(to_gt(i915));
/* We're no longer struct page backed */
obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE;
@@ -140,6 +141,7 @@ int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
{
void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
char __user *user_data = u64_to_user_ptr(args->data_ptr);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
int err;
err = i915_gem_object_wait(obj,
@@ -159,7 +161,7 @@ int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
return -EFAULT;
drm_clflush_virt_range(vaddr, args->size);
- intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
+ intel_gt_chipset_flush(to_gt(i915));
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 726b40e1fbb0..ac56124760e1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -35,7 +35,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
* state. Fortunately, the kernel_context is disposable and we do
* not rely on its state.
*/
- intel_gt_suspend_prepare(&i915->gt);
+ intel_gt_suspend_prepare(to_gt(i915));
i915_gem_drain_freed_objects(i915);
}
@@ -153,7 +153,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
* machine in an unusable condition.
*/
- intel_gt_suspend_late(&i915->gt);
+ intel_gt_suspend_late(to_gt(i915));
spin_lock_irqsave(&i915->mm.obj_lock, flags);
for (phase = phases; *phase; phase++) {
@@ -223,7 +223,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
- intel_gt_resume(&i915->gt);
+ intel_gt_resume(to_gt(i915));
ret = lmem_restore(i915, I915_TTM_BACKUP_ALLOW_GPU);
GEM_WARN_ON(ret);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index a016ccec36f3..a4350227e9ae 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -11,7 +11,7 @@
void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
struct intel_memory_region *mem)
{
- obj->mm.region = intel_memory_region_get(mem);
+ obj->mm.region = mem;
mutex_lock(&mem->objects.lock);
list_add(&obj->mm.region_link, &mem->objects.list);
@@ -25,8 +25,6 @@ void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
mutex_lock(&mem->objects.lock);
list_del(&obj->mm.region_link);
mutex_unlock(&mem->objects.lock);
-
- intel_memory_region_put(mem);
}
struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index d77da59fae04..cc9fe258fba7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -25,62 +25,67 @@ static void check_release_pagevec(struct pagevec *pvec)
cond_resched();
}
-static int shmem_get_pages(struct drm_i915_gem_object *obj)
+void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
+ bool dirty, bool backup)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct intel_memory_region *mem = obj->mm.region;
- const unsigned long page_count = obj->base.size / PAGE_SIZE;
+ struct sgt_iter sgt_iter;
+ struct pagevec pvec;
+ struct page *page;
+
+ mapping_clear_unevictable(mapping);
+
+ pagevec_init(&pvec);
+ for_each_sgt_page(page, sgt_iter, st) {
+ if (dirty)
+ set_page_dirty(page);
+
+ if (backup)
+ mark_page_accessed(page);
+
+ if (!pagevec_add(&pvec, page))
+ check_release_pagevec(&pvec);
+ }
+ if (pagevec_count(&pvec))
+ check_release_pagevec(&pvec);
+
+ sg_free_table(st);
+}
+
+int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
+ size_t size, struct intel_memory_region *mr,
+ struct address_space *mapping,
+ unsigned int max_segment)
+{
+ const unsigned long page_count = size / PAGE_SIZE;
unsigned long i;
- struct address_space *mapping;
- struct sg_table *st;
struct scatterlist *sg;
- struct sgt_iter sgt_iter;
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
- unsigned int max_segment = i915_sg_segment_size();
- unsigned int sg_page_sizes;
gfp_t noreclaim;
int ret;
/*
- * Assert that the object is not currently in any GPU domain. As it
- * wasn't in the GTT, there shouldn't be any way it could have been in
- * a GPU cache
- */
- GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
- GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
-
- /*
* If there's no chance of allocating enough pages for the whole
* object, bail early.
*/
- if (obj->base.size > resource_size(&mem->region))
+ if (size > resource_size(&mr->region))
return -ENOMEM;
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
+ if (sg_alloc_table(st, page_count, GFP_KERNEL))
return -ENOMEM;
-rebuild_st:
- if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
- kfree(st);
- return -ENOMEM;
- }
-
/*
* Get the list of pages out of our struct file. They'll be pinned
* at this point until we release them.
*
* Fail silently without starting the shrinker
*/
- mapping = obj->base.filp->f_mapping;
mapping_set_unevictable(mapping);
noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
sg = st->sgl;
st->nents = 0;
- sg_page_sizes = 0;
for (i = 0; i < page_count; i++) {
const unsigned int shrink[] = {
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
@@ -135,10 +140,9 @@ rebuild_st:
if (!i ||
sg->length >= max_segment ||
page_to_pfn(page) != last_pfn + 1) {
- if (i) {
- sg_page_sizes |= sg->length;
+ if (i)
sg = sg_next(sg);
- }
+
st->nents++;
sg_set_page(sg, page, PAGE_SIZE, 0);
} else {
@@ -149,14 +153,67 @@ rebuild_st:
/* Check that the i965g/gm workaround works. */
GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL);
}
- if (sg) { /* loop terminated early; short sg table */
- sg_page_sizes |= sg->length;
+ if (sg) /* loop terminated early; short sg table */
sg_mark_end(sg);
- }
/* Trim unused sg entries to avoid wasting memory. */
i915_sg_trim(st);
+ return 0;
+err_sg:
+ sg_mark_end(sg);
+ if (sg != st->sgl) {
+ shmem_sg_free_table(st, mapping, false, false);
+ } else {
+ mapping_clear_unevictable(mapping);
+ sg_free_table(st);
+ }
+
+ /*
+ * shmemfs first checks if there is enough memory to allocate the page
+ * and reports ENOSPC should there be insufficient, along with the usual
+ * ENOMEM for a genuine allocation failure.
+ *
+ * We use ENOSPC in our driver to mean that we have run out of aperture
+ * space and so want to translate the error from shmemfs back to our
+ * usual understanding of ENOMEM.
+ */
+ if (ret == -ENOSPC)
+ ret = -ENOMEM;
+
+ return ret;
+}
+
+static int shmem_get_pages(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_memory_region *mem = obj->mm.region;
+ struct address_space *mapping = obj->base.filp->f_mapping;
+ const unsigned long page_count = obj->base.size / PAGE_SIZE;
+ unsigned int max_segment = i915_sg_segment_size();
+ struct sg_table *st;
+ struct sgt_iter sgt_iter;
+ struct page *page;
+ int ret;
+
+ /*
+ * Assert that the object is not currently in any GPU domain. As it
+ * wasn't in the GTT, there shouldn't be any way it could have been in
+ * a GPU cache
+ */
+ GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+
+rebuild_st:
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ ret = shmem_sg_alloc_table(i915, st, obj->base.size, mem, mapping,
+ max_segment);
+ if (ret)
+ goto err_st;
+
ret = i915_gem_gtt_prepare_pages(obj, st);
if (ret) {
/*
@@ -168,6 +225,7 @@ rebuild_st:
for_each_sgt_page(page, sgt_iter, st)
put_page(page);
sg_free_table(st);
+ kfree(st);
max_segment = PAGE_SIZE;
goto rebuild_st;
@@ -185,28 +243,12 @@ rebuild_st:
if (i915_gem_object_can_bypass_llc(obj))
obj->cache_dirty = true;
- __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+ __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl));
return 0;
-err_sg:
- sg_mark_end(sg);
err_pages:
- mapping_clear_unevictable(mapping);
- if (sg != st->sgl) {
- struct pagevec pvec;
-
- pagevec_init(&pvec);
- for_each_sgt_page(page, sgt_iter, st) {
- if (!pagevec_add(&pvec, page))
- check_release_pagevec(&pvec);
- }
- if (pagevec_count(&pvec))
- check_release_pagevec(&pvec);
- }
- sg_free_table(st);
- kfree(st);
-
+ shmem_sg_free_table(st, mapping, false, false);
/*
* shmemfs first checks if there is enough memory to allocate the page
* and reports ENOSPC should there be insufficient, along with the usual
@@ -216,13 +258,16 @@ err_pages:
* space and so want to translate the error from shmemfs back to our
* usual understanding of ENOMEM.
*/
+err_st:
if (ret == -ENOSPC)
ret = -ENOMEM;
+ kfree(st);
+
return ret;
}
-static void
+static int
shmem_truncate(struct drm_i915_gem_object *obj)
{
/*
@@ -234,12 +279,12 @@ shmem_truncate(struct drm_i915_gem_object *obj)
shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
obj->mm.madv = __I915_MADV_PURGED;
obj->mm.pages = ERR_PTR(-EFAULT);
+
+ return 0;
}
-static void
-shmem_writeback(struct drm_i915_gem_object *obj)
+void __shmem_writeback(size_t size, struct address_space *mapping)
{
- struct address_space *mapping;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
.nr_to_write = SWAP_CLUSTER_MAX,
@@ -255,10 +300,9 @@ shmem_writeback(struct drm_i915_gem_object *obj)
* instead of invoking writeback so they are aged and paged out
* as normal.
*/
- mapping = obj->base.filp->f_mapping;
/* Begin writeback on each dirty page */
- for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
+ for (i = 0; i < size >> PAGE_SHIFT; i++) {
struct page *page;
page = find_lock_page(mapping, i);
@@ -281,6 +325,12 @@ put:
}
}
+static void
+shmem_writeback(struct drm_i915_gem_object *obj)
+{
+ __shmem_writeback(obj->base.size, obj->base.filp->f_mapping);
+}
+
void
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
struct sg_table *pages,
@@ -313,11 +363,6 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages)
{
- struct sgt_iter sgt_iter;
- struct pagevec pvec;
- struct page *page;
-
- GEM_WARN_ON(IS_DGFX(to_i915(obj->base.dev)));
__i915_gem_object_release_shmem(obj, pages, true);
i915_gem_gtt_finish_pages(obj, pages);
@@ -325,25 +370,10 @@ void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_save_bit_17_swizzle(obj, pages);
- mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
-
- pagevec_init(&pvec);
- for_each_sgt_page(page, sgt_iter, pages) {
- if (obj->mm.dirty)
- set_page_dirty(page);
-
- if (obj->mm.madv == I915_MADV_WILLNEED)
- mark_page_accessed(page);
-
- if (!pagevec_add(&pvec, page))
- check_release_pagevec(&pvec);
- }
- if (pagevec_count(&pvec))
- check_release_pagevec(&pvec);
- obj->mm.dirty = false;
-
- sg_free_table(pages);
+ shmem_sg_free_table(pages, file_inode(obj->base.filp)->i_mapping,
+ obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED);
kfree(pages);
+ obj->mm.dirty = false;
}
static void
@@ -634,9 +664,10 @@ static int init_shmem(struct intel_memory_region *mem)
return 0; /* Don't error, we can simply fallback to the kernel mnt */
}
-static void release_shmem(struct intel_memory_region *mem)
+static int release_shmem(struct intel_memory_region *mem)
{
i915_gemfs_fini(mem->i915);
+ return 0;
}
static const struct intel_memory_region_ops shmem_region_ops = {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 5ab136ffdeb2..cc927e49d21f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -15,7 +15,6 @@
#include "gt/intel_gt_requests.h"
-#include "dma_resv_utils.h"
#include "i915_trace.h"
static bool swap_available(void)
@@ -37,8 +36,8 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
}
-static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
- unsigned long shrink, bool trylock_vm)
+static int drop_pages(struct drm_i915_gem_object *obj,
+ unsigned long shrink, bool trylock_vm)
{
unsigned long flags;
@@ -56,19 +55,25 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
return false;
}
-static void try_to_writeback(struct drm_i915_gem_object *obj,
- unsigned int flags)
+static int try_to_writeback(struct drm_i915_gem_object *obj, unsigned int flags)
{
+ if (obj->ops->shrinker_release_pages)
+ return obj->ops->shrinker_release_pages(obj,
+ !(flags & I915_SHRINK_ACTIVE),
+ flags & I915_SHRINK_WRITEBACK);
+
switch (obj->mm.madv) {
case I915_MADV_DONTNEED:
i915_gem_object_truncate(obj);
- return;
+ return 0;
case __I915_MADV_PURGED:
- return;
+ return 0;
}
if (flags & I915_SHRINK_WRITEBACK)
i915_gem_object_writeback(obj);
+
+ return 0;
}
/**
@@ -148,7 +153,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
*/
if (shrink & I915_SHRINK_ACTIVE)
/* Retire requests to unpin all idle contexts */
- intel_gt_retire_requests(&i915->gt);
+ intel_gt_retire_requests(to_gt(i915));
/*
* As we may completely rewrite the (un)bound list whilst unbinding
@@ -209,27 +214,23 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- err = 0;
- if (unsafe_drop_pages(obj, shrink, trylock_vm)) {
- /* May arrive from get_pages on another bo */
- if (!ww) {
- if (!i915_gem_object_trylock(obj))
- goto skip;
- } else {
- err = i915_gem_object_lock(obj, ww);
- if (err)
- goto skip;
- }
-
- if (!__i915_gem_object_put_pages(obj)) {
- try_to_writeback(obj, shrink);
- count += obj->base.size >> PAGE_SHIFT;
- }
- if (!ww)
- i915_gem_object_unlock(obj);
+ /* May arrive from get_pages on another bo */
+ if (!ww) {
+ if (!i915_gem_object_trylock(obj, NULL))
+ goto skip;
+ } else {
+ err = i915_gem_object_lock(obj, ww);
+ if (err)
+ goto skip;
}
- dma_resv_prune(obj->base.resv);
+ if (drop_pages(obj, shrink, trylock_vm) &&
+ !__i915_gem_object_put_pages(obj) &&
+ !try_to_writeback(obj, shrink))
+ count += obj->base.size >> PAGE_SHIFT;
+
+ if (!ww)
+ i915_gem_object_unlock(obj);
scanned += obj->base.size >> PAGE_SHIFT;
skip:
@@ -404,12 +405,18 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
list_for_each_entry_safe(vma, next,
&i915->ggtt.vm.bound_list, vm_link) {
unsigned long count = vma->node.size >> PAGE_SHIFT;
+ struct drm_i915_gem_object *obj = vma->obj;
if (!vma->iomap || i915_vma_is_active(vma))
continue;
+ if (!i915_gem_object_trylock(obj, NULL))
+ continue;
+
if (__i915_vma_unbind(vma) == 0)
freed_pages += count;
+
+ i915_gem_object_unlock(obj);
}
mutex_unlock(&i915->ggtt.vm.mutex);
@@ -458,6 +465,16 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
+/**
+ * i915_gem_object_make_unshrinkable - Hide the object from the shrinker. By
+ * default all object types that support shrinking(see IS_SHRINKABLE), will also
+ * make the object visible to the shrinker after allocating the system memory
+ * pages.
+ * @obj: The GEM object.
+ *
+ * This is typically used for special kernel internal objects that can't be
+ * easily processed by the shrinker, like if they are perma-pinned.
+ */
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = obj_to_i915(obj);
@@ -482,13 +499,12 @@ void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
-static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
- struct list_head *head)
+static void ___i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
+ struct list_head *head)
{
struct drm_i915_private *i915 = obj_to_i915(obj);
unsigned long flags;
- GEM_BUG_ON(!i915_gem_object_has_pages(obj));
if (!i915_gem_object_is_shrinkable(obj))
return;
@@ -508,14 +524,67 @@ static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
+/**
+ * __i915_gem_object_make_shrinkable - Move the object to the tail of the
+ * shrinkable list. Objects on this list might be swapped out. Used with
+ * WILLNEED objects.
+ * @obj: The GEM object.
+ *
+ * DO NOT USE. This is intended to be called on very special objects that don't
+ * yet have mm.pages, but are guaranteed to have potentially reclaimable pages
+ * underneath.
+ */
+void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
+{
+ ___i915_gem_object_make_shrinkable(obj,
+ &obj_to_i915(obj)->mm.shrink_list);
+}
+
+/**
+ * __i915_gem_object_make_purgeable - Move the object to the tail of the
+ * purgeable list. Objects on this list might be swapped out. Used with
+ * DONTNEED objects.
+ * @obj: The GEM object.
+ *
+ * DO NOT USE. This is intended to be called on very special objects that don't
+ * yet have mm.pages, but are guaranteed to have potentially reclaimable pages
+ * underneath.
+ */
+void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
+{
+ ___i915_gem_object_make_shrinkable(obj,
+ &obj_to_i915(obj)->mm.purge_list);
+}
+
+/**
+ * i915_gem_object_make_shrinkable - Move the object to the tail of the
+ * shrinkable list. Objects on this list might be swapped out. Used with
+ * WILLNEED objects.
+ * @obj: The GEM object.
+ *
+ * MUST only be called on objects which have backing pages.
+ *
+ * MUST be balanced with previous call to i915_gem_object_make_unshrinkable().
+ */
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
{
- __i915_gem_object_make_shrinkable(obj,
- &obj_to_i915(obj)->mm.shrink_list);
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+ __i915_gem_object_make_shrinkable(obj);
}
+/**
+ * i915_gem_object_make_purgeable - Move the object to the tail of the purgeable
+ * list. Used with DONTNEED objects. Unlike with shrinkable objects, the
+ * shrinker will attempt to discard the backing pages, instead of trying to swap
+ * them out.
+ * @obj: The GEM object.
+ *
+ * MUST only be called on objects which have backing pages.
+ *
+ * MUST be balanced with previous call to i915_gem_object_make_unshrinkable().
+ */
void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
{
- __i915_gem_object_make_shrinkable(obj,
- &obj_to_i915(obj)->mm.purge_list);
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+ __i915_gem_object_make_purgeable(obj);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index ddd37ccb1362..7df50fd6cc7b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -399,7 +399,7 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
return 0;
}
- if (intel_vtd_active() && GRAPHICS_VER(i915) < 8) {
+ if (intel_vtd_active(i915) && GRAPHICS_VER(i915) < 8) {
drm_notice(&i915->drm,
"%s, disabling use of stolen memory\n",
"DMAR active");
@@ -488,6 +488,9 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
return 0;
}
+ /* Exclude the reserved region from driver use */
+ mem->region.end = reserved_base - 1;
+
/* It is possible for the reserved area to end before the end of stolen
* memory, so just consider the start. */
reserved_total = stolen_top - reserved_base;
@@ -653,7 +656,7 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
- if (WARN_ON(!i915_gem_object_trylock(obj)))
+ if (WARN_ON(!i915_gem_object_trylock(obj, NULL)))
return -EBUSY;
i915_gem_object_init_memory_region(obj, mem);
@@ -720,9 +723,10 @@ static int init_stolen_smem(struct intel_memory_region *mem)
return i915_gem_init_stolen(mem);
}
-static void release_stolen_smem(struct intel_memory_region *mem)
+static int release_stolen_smem(struct intel_memory_region *mem)
{
i915_gem_cleanup_stolen(mem->i915);
+ return 0;
}
static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
@@ -759,10 +763,11 @@ err_fini:
return err;
}
-static void release_stolen_lmem(struct intel_memory_region *mem)
+static int release_stolen_lmem(struct intel_memory_region *mem)
{
io_mapping_fini(&mem->iomap);
i915_gem_cleanup_stolen(mem->i915);
+ return 0;
}
static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
@@ -778,6 +783,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
struct intel_uncore *uncore = &i915->uncore;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct intel_memory_region *mem;
+ resource_size_t min_page_size;
resource_size_t io_start;
resource_size_t lmem_size;
u64 lmem_base;
@@ -789,8 +795,11 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
lmem_size = pci_resource_len(pdev, 2) - lmem_base;
io_start = pci_resource_start(pdev, 2) + lmem_base;
+ min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
+ I915_GTT_PAGE_SIZE_4K;
+
mem = intel_memory_region_create(i915, lmem_base, lmem_size,
- I915_GTT_PAGE_SIZE_4K, io_start,
+ min_page_size, io_start,
type, instance,
&i915_region_stolen_lmem_ops);
if (IS_ERR(mem))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
index 1929d6cf4150..75501db71041 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
@@ -38,12 +38,13 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
{
const unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_private *i915 = to_i915(dev);
struct i915_gem_context *ctx;
unsigned long idx;
long ret;
/* ABI: return -EIO if already wedged */
- ret = intel_gt_terminally_wedged(&to_i915(dev)->gt);
+ ret = intel_gt_terminally_wedged(to_gt(i915));
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 74a1ffd0d7dd..de3fe79b665a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -14,13 +14,9 @@
#include "gem/i915_gem_object.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_ttm.h"
+#include "gem/i915_gem_ttm_move.h"
#include "gem/i915_gem_ttm_pm.h"
-
-#include "gt/intel_engine_pm.h"
-#include "gt/intel_gt.h"
-#include "gt/intel_migrate.h"
-
#define I915_TTM_PRIO_PURGE 0
#define I915_TTM_PRIO_NO_PAGES 1
#define I915_TTM_PRIO_HAS_PAGES 2
@@ -34,7 +30,9 @@
* struct i915_ttm_tt - TTM page vector with additional private information
* @ttm: The base TTM page vector.
* @dev: The struct device used for dma mapping and unmapping.
- * @cached_st: The cached scatter-gather table.
+ * @cached_rsgt: The cached scatter-gather table.
+ * @is_shmem: Set if using shmem.
+ * @filp: The shmem file, if using shmem backend.
*
* Note that DMA may be going on right up to the point where the page-
* vector is unpopulated in delayed destroy. Hence keep the
@@ -45,7 +43,10 @@
struct i915_ttm_tt {
struct ttm_tt ttm;
struct device *dev;
- struct sg_table *cached_st;
+ struct i915_refct_sgt cached_rsgt;
+
+ bool is_shmem;
+ struct file *filp;
};
static const struct ttm_place sys_placement_flags = {
@@ -103,37 +104,15 @@ static int i915_ttm_err_to_gem(int err)
return err;
}
-static bool gpu_binds_iomem(struct ttm_resource *mem)
-{
- return mem->mem_type != TTM_PL_SYSTEM;
-}
-
-static bool cpu_maps_iomem(struct ttm_resource *mem)
-{
- /* Once / if we support GGTT, this is also false for cached ttm_tts */
- return mem->mem_type != TTM_PL_SYSTEM;
-}
-
-static enum i915_cache_level
-i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res,
- struct ttm_tt *ttm)
-{
- return ((HAS_LLC(i915) || HAS_SNOOP(i915)) && !gpu_binds_iomem(res) &&
- ttm->caching == ttm_cached) ? I915_CACHE_LLC :
- I915_CACHE_NONE;
-}
-
-static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj);
-
static enum ttm_caching
i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj)
{
/*
- * Objects only allowed in system get cached cpu-mappings.
- * Other objects get WC mapping for now. Even if in system.
+ * Objects only allowed in system get cached cpu-mappings, or when
+ * evicting lmem-only buffers to system for swapping. Other objects get
+ * WC mapping for now. Even if in system.
*/
- if (obj->mm.region->type == INTEL_MEMORY_SYSTEM &&
- obj->mm.n_placements <= 1)
+ if (obj->mm.n_placements <= 1)
return ttm_cached;
return ttm_write_combined;
@@ -179,15 +158,103 @@ i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
placement->busy_placement = busy;
}
+static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
+{
+ struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
+ struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM];
+ struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
+ const unsigned int max_segment = i915_sg_segment_size();
+ const size_t size = (size_t)ttm->num_pages << PAGE_SHIFT;
+ struct file *filp = i915_tt->filp;
+ struct sgt_iter sgt_iter;
+ struct sg_table *st;
+ struct page *page;
+ unsigned long i;
+ int err;
+
+ if (!filp) {
+ struct address_space *mapping;
+ gfp_t mask;
+
+ filp = shmem_file_setup("i915-shmem-tt", size, VM_NORESERVE);
+ if (IS_ERR(filp))
+ return PTR_ERR(filp);
+
+ mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
+
+ mapping = filp->f_mapping;
+ mapping_set_gfp_mask(mapping, mask);
+ GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
+
+ i915_tt->filp = filp;
+ }
+
+ st = &i915_tt->cached_rsgt.table;
+ err = shmem_sg_alloc_table(i915, st, size, mr, filp->f_mapping,
+ max_segment);
+ if (err)
+ return err;
+
+ err = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (err)
+ goto err_free_st;
+
+ i = 0;
+ for_each_sgt_page(page, sgt_iter, st)
+ ttm->pages[i++] = page;
+
+ if (ttm->page_flags & TTM_TT_FLAG_SWAPPED)
+ ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
+
+ return 0;
+
+err_free_st:
+ shmem_sg_free_table(st, filp->f_mapping, false, false);
+
+ return err;
+}
+
+static void i915_ttm_tt_shmem_unpopulate(struct ttm_tt *ttm)
+{
+ struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
+ bool backup = ttm->page_flags & TTM_TT_FLAG_SWAPPED;
+ struct sg_table *st = &i915_tt->cached_rsgt.table;
+
+ shmem_sg_free_table(st, file_inode(i915_tt->filp)->i_mapping,
+ backup, backup);
+}
+
+static void i915_ttm_tt_release(struct kref *ref)
+{
+ struct i915_ttm_tt *i915_tt =
+ container_of(ref, typeof(*i915_tt), cached_rsgt.kref);
+ struct sg_table *st = &i915_tt->cached_rsgt.table;
+
+ GEM_WARN_ON(st->sgl);
+
+ kfree(i915_tt);
+}
+
+static const struct i915_refct_sgt_ops tt_rsgt_ops = {
+ .release = i915_ttm_tt_release
+};
+
static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, bo->resource->mem_type);
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ enum ttm_caching caching;
struct i915_ttm_tt *i915_tt;
int ret;
+ if (!obj)
+ return NULL;
+
i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL);
if (!i915_tt)
return NULL;
@@ -196,38 +263,66 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
man->use_tt)
page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
- ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags,
- i915_ttm_select_tt_caching(obj));
- if (ret) {
- kfree(i915_tt);
- return NULL;
+ caching = i915_ttm_select_tt_caching(obj);
+ if (i915_gem_object_is_shrinkable(obj) && caching == ttm_cached) {
+ page_flags |= TTM_TT_FLAG_EXTERNAL |
+ TTM_TT_FLAG_EXTERNAL_MAPPABLE;
+ i915_tt->is_shmem = true;
}
+ ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, caching);
+ if (ret)
+ goto err_free;
+
+ __i915_refct_sgt_init(&i915_tt->cached_rsgt, bo->base.size,
+ &tt_rsgt_ops);
+
i915_tt->dev = obj->base.dev->dev;
return &i915_tt->ttm;
+
+err_free:
+ kfree(i915_tt);
+ return NULL;
+}
+
+static int i915_ttm_tt_populate(struct ttm_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
+{
+ struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
+
+ if (i915_tt->is_shmem)
+ return i915_ttm_tt_shmem_populate(bdev, ttm, ctx);
+
+ return ttm_pool_alloc(&bdev->pool, ttm, ctx);
}
static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
+ struct sg_table *st = &i915_tt->cached_rsgt.table;
- if (i915_tt->cached_st) {
- dma_unmap_sgtable(i915_tt->dev, i915_tt->cached_st,
- DMA_BIDIRECTIONAL, 0);
- sg_free_table(i915_tt->cached_st);
- kfree(i915_tt->cached_st);
- i915_tt->cached_st = NULL;
+ if (st->sgl)
+ dma_unmap_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
+
+ if (i915_tt->is_shmem) {
+ i915_ttm_tt_shmem_unpopulate(ttm);
+ } else {
+ sg_free_table(st);
+ ttm_pool_free(&bdev->pool, ttm);
}
- ttm_pool_free(&bdev->pool, ttm);
}
static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
+ if (i915_tt->filp)
+ fput(i915_tt->filp);
+
ttm_tt_fini(ttm);
- kfree(i915_tt);
+ i915_refct_sgt_put(&i915_tt->cached_rsgt);
}
static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
@@ -235,6 +330,17 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ if (!obj)
+ return false;
+
+ /*
+ * EXTERNAL objects should never be swapped out by TTM, instead we need
+ * to handle that ourselves. TTM will already skip such objects for us,
+ * but we would like to avoid grabbing locks for no good reason.
+ */
+ if (bo->ttm && bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
+ return false;
+
/* Will do for now. Our pinned objects are still on TTM's LRU lists */
return i915_gem_object_evictable(obj);
}
@@ -245,28 +351,19 @@ static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
*placement = i915_sys_placement;
}
-static int i915_ttm_move_notify(struct ttm_buffer_object *bo)
-{
- struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- int ret;
-
- ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
- if (ret)
- return ret;
-
- ret = __i915_gem_object_put_pages(obj);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj)
+/**
+ * i915_ttm_free_cached_io_rsgt - Free object cached LMEM information
+ * @obj: The GEM object
+ * This function frees any LMEM-related information that is cached on
+ * the object. For example the radix tree for fast page lookup and the
+ * cached refcounted sg-table
+ */
+void i915_ttm_free_cached_io_rsgt(struct drm_i915_gem_object *obj)
{
struct radix_tree_iter iter;
void __rcu **slot;
- if (!obj->ttm.cached_io_st)
+ if (!obj->ttm.cached_io_rsgt)
return;
rcu_read_lock();
@@ -274,93 +371,106 @@ static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj)
radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index);
rcu_read_unlock();
- sg_free_table(obj->ttm.cached_io_st);
- kfree(obj->ttm.cached_io_st);
- obj->ttm.cached_io_st = NULL;
+ i915_refct_sgt_put(obj->ttm.cached_io_rsgt);
+ obj->ttm.cached_io_rsgt = NULL;
}
-static void
-i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj)
+/**
+ * i915_ttm_purge - Clear an object of its memory
+ * @obj: The object
+ *
+ * This function is called to clear an object of it's memory when it is
+ * marked as not needed anymore.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int i915_ttm_purge(struct drm_i915_gem_object *obj)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ struct i915_ttm_tt *i915_tt =
+ container_of(bo->ttm, typeof(*i915_tt), ttm);
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false,
+ };
+ struct ttm_placement place = {};
+ int ret;
- if (cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) {
- obj->write_domain = I915_GEM_DOMAIN_WC;
- obj->read_domains = I915_GEM_DOMAIN_WC;
- } else {
- obj->write_domain = I915_GEM_DOMAIN_CPU;
- obj->read_domains = I915_GEM_DOMAIN_CPU;
- }
-}
+ if (obj->mm.madv == __I915_MADV_PURGED)
+ return 0;
-static void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
-{
- struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
- unsigned int cache_level;
- unsigned int i;
+ ret = ttm_bo_validate(bo, &place, &ctx);
+ if (ret)
+ return ret;
- /*
- * If object was moved to an allowable region, update the object
- * region to consider it migrated. Note that if it's currently not
- * in an allowable region, it's evicted and we don't update the
- * object region.
- */
- if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) {
- for (i = 0; i < obj->mm.n_placements; ++i) {
- struct intel_memory_region *mr = obj->mm.placements[i];
-
- if (intel_region_to_ttm_type(mr) == bo->resource->mem_type &&
- mr != obj->mm.region) {
- i915_gem_object_release_memory_region(obj);
- i915_gem_object_init_memory_region(obj, mr);
- break;
- }
- }
+ if (bo->ttm && i915_tt->filp) {
+ /*
+ * The below fput(which eventually calls shmem_truncate) might
+ * be delayed by worker, so when directly called to purge the
+ * pages(like by the shrinker) we should try to be more
+ * aggressive and release the pages immediately.
+ */
+ shmem_truncate_range(file_inode(i915_tt->filp),
+ 0, (loff_t)-1);
+ fput(fetch_and_zero(&i915_tt->filp));
}
- obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM);
-
- obj->mem_flags |= cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
- I915_BO_FLAG_STRUCT_PAGE;
+ obj->write_domain = 0;
+ obj->read_domains = 0;
+ i915_ttm_adjust_gem_after_move(obj);
+ i915_ttm_free_cached_io_rsgt(obj);
+ obj->mm.madv = __I915_MADV_PURGED;
- cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
- bo->ttm);
- i915_gem_object_set_cache_coherency(obj, cache_level);
+ return 0;
}
-static void i915_ttm_purge(struct drm_i915_gem_object *obj)
+static int i915_ttm_shrinker_release_pages(struct drm_i915_gem_object *obj,
+ bool no_wait_gpu,
+ bool should_writeback)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ struct i915_ttm_tt *i915_tt =
+ container_of(bo->ttm, typeof(*i915_tt), ttm);
struct ttm_operation_ctx ctx = {
.interruptible = true,
- .no_wait_gpu = false,
+ .no_wait_gpu = no_wait_gpu,
};
struct ttm_placement place = {};
int ret;
- if (obj->mm.madv == __I915_MADV_PURGED)
- return;
+ if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM)
+ return 0;
+
+ GEM_BUG_ON(!i915_tt->is_shmem);
+
+ if (!i915_tt->filp)
+ return 0;
+
+ ret = ttm_bo_wait_ctx(bo, &ctx);
+ if (ret)
+ return ret;
+
+ switch (obj->mm.madv) {
+ case I915_MADV_DONTNEED:
+ return i915_ttm_purge(obj);
+ case __I915_MADV_PURGED:
+ return 0;
+ }
+
+ if (bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED)
+ return 0;
- /* TTM's purge interface. Note that we might be reentering. */
+ bo->ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
ret = ttm_bo_validate(bo, &place, &ctx);
- if (!ret) {
- obj->write_domain = 0;
- obj->read_domains = 0;
- i915_ttm_adjust_gem_after_move(obj);
- i915_ttm_free_cached_io_st(obj);
- obj->mm.madv = __I915_MADV_PURGED;
+ if (ret) {
+ bo->ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
+ return ret;
}
-}
-static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
-{
- struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- int ret = i915_ttm_move_notify(bo);
+ if (should_writeback)
+ __shmem_writeback(obj->base.size, i915_tt->filp->f_mapping);
- GEM_WARN_ON(ret);
- GEM_WARN_ON(obj->ttm.cached_io_st);
- if (!ret && obj->mm.madv != I915_MADV_WILLNEED)
- i915_ttm_purge(obj);
+ return 0;
}
static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
@@ -369,232 +479,115 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
if (likely(obj)) {
__i915_gem_object_pages_fini(obj);
- i915_ttm_free_cached_io_st(obj);
+ i915_ttm_free_cached_io_rsgt(obj);
}
}
-static struct intel_memory_region *
-i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type)
-{
- struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
-
- /* There's some room for optimization here... */
- GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM &&
- ttm_mem_type < I915_PL_LMEM0);
- if (ttm_mem_type == I915_PL_SYSTEM)
- return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM,
- 0);
-
- return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL,
- ttm_mem_type - I915_PL_LMEM0);
-}
-
-static struct sg_table *i915_ttm_tt_get_st(struct ttm_tt *ttm)
+static struct i915_refct_sgt *i915_ttm_tt_get_st(struct ttm_tt *ttm)
{
struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
struct sg_table *st;
int ret;
- if (i915_tt->cached_st)
- return i915_tt->cached_st;
-
- st = kzalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- return ERR_PTR(-ENOMEM);
+ if (i915_tt->cached_rsgt.table.sgl)
+ return i915_refct_sgt_get(&i915_tt->cached_rsgt);
+ st = &i915_tt->cached_rsgt.table;
ret = sg_alloc_table_from_pages_segment(st,
ttm->pages, ttm->num_pages,
0, (unsigned long)ttm->num_pages << PAGE_SHIFT,
i915_sg_segment_size(), GFP_KERNEL);
if (ret) {
- kfree(st);
+ st->sgl = NULL;
return ERR_PTR(ret);
}
ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
if (ret) {
sg_free_table(st);
- kfree(st);
return ERR_PTR(ret);
}
- i915_tt->cached_st = st;
- return st;
+ return i915_refct_sgt_get(&i915_tt->cached_rsgt);
}
-static struct sg_table *
+/**
+ * i915_ttm_resource_get_st - Get a refcounted sg-table pointing to the
+ * resource memory
+ * @obj: The GEM object used for sg-table caching
+ * @res: The struct ttm_resource for which an sg-table is requested.
+ *
+ * This function returns a refcounted sg-table representing the memory
+ * pointed to by @res. If @res is the object's current resource it may also
+ * cache the sg_table on the object or attempt to access an already cached
+ * sg-table. The refcounted sg-table needs to be put when no-longer in use.
+ *
+ * Return: A valid pointer to a struct i915_refct_sgt or error pointer on
+ * failure.
+ */
+struct i915_refct_sgt *
i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
struct ttm_resource *res)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
- if (!gpu_binds_iomem(res))
+ if (!i915_ttm_gtt_binds_lmem(res))
return i915_ttm_tt_get_st(bo->ttm);
/*
* If CPU mapping differs, we need to add the ttm_tt pages to
* the resulting st. Might make sense for GGTT.
*/
- GEM_WARN_ON(!cpu_maps_iomem(res));
- return intel_region_ttm_resource_to_st(obj->mm.region, res);
-}
-
-static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
- bool clear,
- struct ttm_resource *dst_mem,
- struct ttm_tt *dst_ttm,
- struct sg_table *dst_st)
-{
- struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
- bdev);
- struct ttm_resource_manager *src_man =
- ttm_manager_type(bo->bdev, bo->resource->mem_type);
- struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- struct sg_table *src_st;
- struct i915_request *rq;
- struct ttm_tt *src_ttm = bo->ttm;
- enum i915_cache_level src_level, dst_level;
- int ret;
+ GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(res));
+ if (bo->resource == res) {
+ if (!obj->ttm.cached_io_rsgt) {
+ struct i915_refct_sgt *rsgt;
- if (!i915->gt.migrate.context || intel_gt_is_wedged(&i915->gt))
- return -EINVAL;
+ rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region,
+ res);
+ if (IS_ERR(rsgt))
+ return rsgt;
- dst_level = i915_ttm_cache_level(i915, dst_mem, dst_ttm);
- if (clear) {
- if (bo->type == ttm_bo_type_kernel)
- return -EINVAL;
-
- intel_engine_pm_get(i915->gt.migrate.context->engine);
- ret = intel_context_migrate_clear(i915->gt.migrate.context, NULL,
- dst_st->sgl, dst_level,
- gpu_binds_iomem(dst_mem),
- 0, &rq);
-
- if (!ret && rq) {
- i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
- i915_request_put(rq);
- }
- intel_engine_pm_put(i915->gt.migrate.context->engine);
- } else {
- src_st = src_man->use_tt ? i915_ttm_tt_get_st(src_ttm) :
- obj->ttm.cached_io_st;
-
- src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm);
- intel_engine_pm_get(i915->gt.migrate.context->engine);
- ret = intel_context_migrate_copy(i915->gt.migrate.context,
- NULL, src_st->sgl, src_level,
- gpu_binds_iomem(bo->resource),
- dst_st->sgl, dst_level,
- gpu_binds_iomem(dst_mem),
- &rq);
- if (!ret && rq) {
- i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
- i915_request_put(rq);
+ obj->ttm.cached_io_rsgt = rsgt;
}
- intel_engine_pm_put(i915->gt.migrate.context->engine);
+ return i915_refct_sgt_get(obj->ttm.cached_io_rsgt);
}
- return ret;
+ return intel_region_ttm_resource_to_rsgt(obj->mm.region, res);
}
-static void __i915_ttm_move(struct ttm_buffer_object *bo, bool clear,
- struct ttm_resource *dst_mem,
- struct ttm_tt *dst_ttm,
- struct sg_table *dst_st,
- bool allow_accel)
+static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
{
- int ret = -EINVAL;
+ struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ int err;
- if (allow_accel)
- ret = i915_ttm_accel_move(bo, clear, dst_mem, dst_ttm, dst_st);
- if (ret) {
- struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- struct intel_memory_region *dst_reg, *src_reg;
- union {
- struct ttm_kmap_iter_tt tt;
- struct ttm_kmap_iter_iomap io;
- } _dst_iter, _src_iter;
- struct ttm_kmap_iter *dst_iter, *src_iter;
-
- dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type);
- src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type);
- GEM_BUG_ON(!dst_reg || !src_reg);
-
- dst_iter = !cpu_maps_iomem(dst_mem) ?
- ttm_kmap_iter_tt_init(&_dst_iter.tt, dst_ttm) :
- ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap,
- dst_st, dst_reg->region.start);
-
- src_iter = !cpu_maps_iomem(bo->resource) ?
- ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) :
- ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap,
- obj->ttm.cached_io_st,
- src_reg->region.start);
-
- ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter);
- }
+ WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED);
+
+ err = i915_ttm_move_notify(bo);
+ if (err)
+ return err;
+
+ return i915_ttm_purge(obj);
}
-static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
- struct ttm_operation_ctx *ctx,
- struct ttm_resource *dst_mem,
- struct ttm_place *hop)
+static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- struct ttm_resource_manager *dst_man =
- ttm_manager_type(bo->bdev, dst_mem->mem_type);
- struct ttm_tt *ttm = bo->ttm;
- struct sg_table *dst_st;
- bool clear;
int ret;
- /* Sync for now. We could do the actual copy async. */
- ret = ttm_bo_wait_ctx(bo, ctx);
- if (ret)
- return ret;
+ if (!obj)
+ return;
ret = i915_ttm_move_notify(bo);
- if (ret)
- return ret;
-
- if (obj->mm.madv != I915_MADV_WILLNEED) {
+ GEM_WARN_ON(ret);
+ GEM_WARN_ON(obj->ttm.cached_io_rsgt);
+ if (!ret && obj->mm.madv != I915_MADV_WILLNEED)
i915_ttm_purge(obj);
- ttm_resource_free(bo, &dst_mem);
- return 0;
- }
-
- /* Populate ttm with pages if needed. Typically system memory. */
- if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) {
- ret = ttm_tt_populate(bo->bdev, ttm, ctx);
- if (ret)
- return ret;
- }
-
- dst_st = i915_ttm_resource_get_st(obj, dst_mem);
- if (IS_ERR(dst_st))
- return PTR_ERR(dst_st);
-
- clear = !cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm));
- if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
- __i915_ttm_move(bo, clear, dst_mem, bo->ttm, dst_st, true);
-
- ttm_bo_move_sync_cleanup(bo, dst_mem);
- i915_ttm_adjust_domains_after_move(obj);
- i915_ttm_free_cached_io_st(obj);
-
- if (gpu_binds_iomem(dst_mem) || cpu_maps_iomem(dst_mem)) {
- obj->ttm.cached_io_st = dst_st;
- obj->ttm.get_io_page.sg_pos = dst_st->sgl;
- obj->ttm.get_io_page.sg_idx = 0;
- }
-
- i915_ttm_adjust_gem_after_move(obj);
- return 0;
}
static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{
- if (!cpu_maps_iomem(mem))
+ if (!i915_ttm_cpu_maps_iomem(mem))
return 0;
mem->bus.caching = ttm_write_combined;
@@ -607,19 +600,26 @@ static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- unsigned long base = obj->mm.region->iomap.base - obj->mm.region->region.start;
struct scatterlist *sg;
+ unsigned long base;
unsigned int ofs;
+ GEM_BUG_ON(!obj);
GEM_WARN_ON(bo->ttm);
+ base = obj->mm.region->iomap.base - obj->mm.region->region.start;
sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true);
return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
}
+/*
+ * All callbacks need to take care not to downcast a struct ttm_buffer_object
+ * without checking its subclass, since it might be a TTM ghost object.
+ */
static struct ttm_device_funcs i915_ttm_bo_driver = {
.ttm_tt_create = i915_ttm_tt_create,
+ .ttm_tt_populate = i915_ttm_tt_populate,
.ttm_tt_unpopulate = i915_ttm_tt_unpopulate,
.ttm_tt_destroy = i915_ttm_tt_destroy,
.eviction_valuable = i915_ttm_eviction_valuable,
@@ -649,7 +649,6 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
.interruptible = true,
.no_wait_gpu = false,
};
- struct sg_table *st;
int real_num_busy;
int ret;
@@ -676,7 +675,6 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
return i915_ttm_err_to_gem(ret);
}
- i915_ttm_adjust_lru(obj);
if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) {
ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
if (ret)
@@ -687,14 +685,19 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
}
if (!i915_gem_object_has_pages(obj)) {
- /* Object either has a page vector or is an iomem object */
- st = bo->ttm ? i915_ttm_tt_get_st(bo->ttm) : obj->ttm.cached_io_st;
- if (IS_ERR(st))
- return PTR_ERR(st);
+ struct i915_refct_sgt *rsgt =
+ i915_ttm_resource_get_st(obj, bo->resource);
- __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl));
+ if (IS_ERR(rsgt))
+ return PTR_ERR(rsgt);
+
+ GEM_BUG_ON(obj->mm.rsgt);
+ obj->mm.rsgt = rsgt;
+ __i915_gem_object_set_pages(obj, &rsgt->table,
+ i915_sg_dma_sizes(rsgt->table.sgl));
}
+ i915_ttm_adjust_lru(obj);
return ret;
}
@@ -766,12 +769,21 @@ static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
* and shrinkers will move it out if needed.
*/
- i915_ttm_adjust_lru(obj);
+ if (obj->mm.rsgt)
+ i915_refct_sgt_put(fetch_and_zero(&obj->mm.rsgt));
}
-static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
+/**
+ * i915_ttm_adjust_lru - Adjust an object's position on relevant LRU lists.
+ * @obj: The object
+ */
+void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ struct i915_ttm_tt *i915_tt =
+ container_of(bo->ttm, typeof(*i915_tt), ttm);
+ bool shrinkable =
+ bo->ttm && i915_tt->filp && ttm_tt_is_populated(bo->ttm);
/*
* Don't manipulate the TTM LRUs while in TTM bo destruction.
@@ -781,10 +793,53 @@ static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
return;
/*
+ * We skip managing the shrinker LRU in set_pages() and just manage
+ * everything here. This does at least solve the issue with having
+ * temporary shmem mappings(like with evicted lmem) not being visible to
+ * the shrinker. Only our shmem objects are shrinkable, everything else
+ * we keep as unshrinkable.
+ *
+ * To make sure everything plays nice we keep an extra shrink pin in TTM
+ * if the underlying pages are not currently shrinkable. Once we release
+ * our pin, like when the pages are moved to shmem, the pages will then
+ * be added to the shrinker LRU, assuming the caller isn't also holding
+ * a pin.
+ *
+ * TODO: consider maybe also bumping the shrinker list here when we have
+ * already unpinned it, which should give us something more like an LRU.
+ *
+ * TODO: There is a small window of opportunity for this function to
+ * get called from eviction after we've dropped the last GEM refcount,
+ * but before the TTM deleted flag is set on the object. Avoid
+ * adjusting the shrinker list in such cases, since the object is
+ * not available to the shrinker anyway due to its zero refcount.
+ * To fix this properly we should move to a TTM shrinker LRU list for
+ * these objects.
+ */
+ if (kref_get_unless_zero(&obj->base.refcount)) {
+ if (shrinkable != obj->mm.ttm_shrinkable) {
+ if (shrinkable) {
+ if (obj->mm.madv == I915_MADV_WILLNEED)
+ __i915_gem_object_make_shrinkable(obj);
+ else
+ __i915_gem_object_make_purgeable(obj);
+ } else {
+ i915_gem_object_make_unshrinkable(obj);
+ }
+
+ obj->mm.ttm_shrinkable = shrinkable;
+ }
+ i915_gem_object_put(obj);
+ }
+
+ /*
* Put on the correct LRU list depending on the MADV status
*/
spin_lock(&bo->bdev->lru_lock);
- if (obj->mm.madv != I915_MADV_WILLNEED) {
+ if (shrinkable) {
+ /* Try to keep shmem_tt from being considered for shrinking. */
+ bo->priority = TTM_MAX_BO_PRIORITY - 1;
+ } else if (obj->mm.madv != I915_MADV_WILLNEED) {
bo->priority = I915_TTM_PRIO_PURGE;
} else if (!i915_gem_object_has_pages(obj)) {
if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
@@ -823,15 +878,44 @@ static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
{
struct vm_area_struct *area = vmf->vma;
- struct drm_i915_gem_object *obj =
- i915_ttm_to_gem(area->vm_private_data);
+ struct ttm_buffer_object *bo = area->vm_private_data;
+ struct drm_device *dev = bo->base.dev;
+ struct drm_i915_gem_object *obj;
+ vm_fault_t ret;
+ int idx;
+
+ obj = i915_ttm_to_gem(bo);
+ if (!obj)
+ return VM_FAULT_SIGBUS;
/* Sanity check that we allow writing into this object */
if (unlikely(i915_gem_object_is_readonly(obj) &&
area->vm_flags & VM_WRITE))
return VM_FAULT_SIGBUS;
- return ttm_bo_vm_fault(vmf);
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ if (obj->mm.madv != I915_MADV_WILLNEED) {
+ dma_resv_unlock(bo->base.resv);
+ return VM_FAULT_SIGBUS;
+ }
+
+ if (drm_dev_enter(dev, &idx)) {
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT);
+ drm_dev_exit(idx);
+ } else {
+ ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
+ }
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+ i915_ttm_adjust_lru(obj);
+
+ dma_resv_unlock(bo->base.resv);
+ return ret;
}
static int
@@ -880,16 +964,27 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
return drm_vma_node_offset_addr(&obj->base.vma_node);
}
+static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
+{
+ ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
+}
+
static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
.name = "i915_gem_object_ttm",
+ .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
+ I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST,
.get_pages = i915_ttm_get_pages,
.put_pages = i915_ttm_put_pages,
- .truncate = i915_ttm_purge,
+ .truncate = i915_ttm_truncate,
+ .shrinker_release_pages = i915_ttm_shrinker_release_pages,
+
.adjust_lru = i915_ttm_adjust_lru,
.delayed_free = i915_ttm_delayed_free,
.migrate = i915_ttm_migrate,
+
.mmap_offset = i915_ttm_mmap_offset,
+ .unmap_virtual = i915_ttm_unmap_virtual,
.mmap_ops = &vm_ops_ttm,
};
@@ -901,6 +996,18 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
mutex_destroy(&obj->ttm.get_io_page.lock);
if (obj->ttm.created) {
+ /*
+ * We freely manage the shrinker LRU outide of the mm.pages life
+ * cycle. As a result when destroying the object we should be
+ * extra paranoid and ensure we remove it from the LRU, before
+ * we free the object.
+ *
+ * Touching the ttm_shrinkable outside of the object lock here
+ * should be safe now that the last GEM object ref was dropped.
+ */
+ if (obj->mm.ttm_shrinkable)
+ i915_gem_object_make_unshrinkable(obj);
+
i915_ttm_backup_free(obj);
/* This releases all gem object bindings to the backend. */
@@ -940,10 +1047,9 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
/* Don't put on a region list until we're either locked or fully initialized. */
- obj->mm.region = intel_memory_region_get(mem);
+ obj->mm.region = mem;
INIT_LIST_HEAD(&obj->mm.region_link);
- i915_gem_object_make_unshrinkable(obj);
INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->ttm.get_io_page.lock);
bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
@@ -955,6 +1061,14 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
GEM_BUG_ON(page_size && obj->mm.n_placements);
/*
+ * Keep an extra shrink pin to prevent the object from being made
+ * shrinkable too early. If the ttm_tt is ever allocated in shmem, we
+ * drop the pin. The TTM backend manages the shrinker LRU itself,
+ * outside of the normal mm.pages life cycle.
+ */
+ i915_gem_object_make_unshrinkable(obj);
+
+ /*
* If this function fails, it will call the destructor, but
* our caller still owns the object. So no freeing in the
* destructor until obj->ttm.created is true.
@@ -980,6 +1094,7 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
static const struct intel_memory_region_ops ttm_system_region_ops = {
.init_object = __i915_gem_ttm_object_init,
+ .release = intel_region_ttm_fini,
};
struct intel_memory_region *
@@ -999,50 +1114,3 @@ i915_gem_ttm_system_setup(struct drm_i915_private *i915,
intel_memory_region_set_name(mr, "system-ttm");
return mr;
}
-
-/**
- * i915_gem_obj_copy_ttm - Copy the contents of one ttm-based gem object to
- * another
- * @dst: The destination object
- * @src: The source object
- * @allow_accel: Allow using the blitter. Otherwise TTM memcpy is used.
- * @intr: Whether to perform waits interruptible:
- *
- * Note: The caller is responsible for assuring that the underlying
- * TTM objects are populated if needed and locked.
- *
- * Return: Zero on success. Negative error code on error. If @intr == true,
- * then it may return -ERESTARTSYS or -EINTR.
- */
-int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
- struct drm_i915_gem_object *src,
- bool allow_accel, bool intr)
-{
- struct ttm_buffer_object *dst_bo = i915_gem_to_ttm(dst);
- struct ttm_buffer_object *src_bo = i915_gem_to_ttm(src);
- struct ttm_operation_ctx ctx = {
- .interruptible = intr,
- };
- struct sg_table *dst_st;
- int ret;
-
- assert_object_held(dst);
- assert_object_held(src);
-
- /*
- * Sync for now. This will change with async moves.
- */
- ret = ttm_bo_wait_ctx(dst_bo, &ctx);
- if (!ret)
- ret = ttm_bo_wait_ctx(src_bo, &ctx);
- if (ret)
- return ret;
-
- dst_st = gpu_binds_iomem(dst_bo->resource) ?
- dst->ttm.cached_io_st : i915_ttm_tt_get_st(dst_bo->ttm);
-
- __i915_ttm_move(src_bo, false, dst_bo->resource, dst_bo->ttm,
- dst_st, allow_accel);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
index 0b7291dd897c..9d698ad00853 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
@@ -5,6 +5,8 @@
#ifndef _I915_GEM_TTM_H_
#define _I915_GEM_TTM_H_
+#include <drm/ttm/ttm_placement.h>
+
#include "gem/i915_gem_object_types.h"
/**
@@ -35,7 +37,7 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo);
static inline struct drm_i915_gem_object *
i915_ttm_to_gem(struct ttm_buffer_object *bo)
{
- if (GEM_WARN_ON(bo->destroy != i915_ttm_bo_destroy))
+ if (bo->destroy != i915_ttm_bo_destroy)
return NULL;
return container_of(bo, struct drm_i915_gem_object, __do_not_access);
@@ -47,10 +49,6 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
resource_size_t page_size,
unsigned int flags);
-int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
- struct drm_i915_gem_object *src,
- bool allow_accel, bool intr);
-
/* Internal I915 TTM declarations and definitions below. */
#define I915_PL_LMEM0 TTM_PL_PRIV
@@ -60,4 +58,37 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
struct ttm_placement *i915_ttm_sys_placement(void);
+void i915_ttm_free_cached_io_rsgt(struct drm_i915_gem_object *obj);
+
+struct i915_refct_sgt *
+i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
+ struct ttm_resource *res);
+
+void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj);
+
+int i915_ttm_purge(struct drm_i915_gem_object *obj);
+
+/**
+ * i915_ttm_gtt_binds_lmem - Should the memory be viewed as LMEM by the GTT?
+ * @mem: struct ttm_resource representing the memory.
+ *
+ * Return: true if memory should be viewed as LMEM for GTT binding purposes,
+ * false otherwise.
+ */
+static inline bool i915_ttm_gtt_binds_lmem(struct ttm_resource *mem)
+{
+ return mem->mem_type != I915_PL_SYSTEM;
+}
+
+/**
+ * i915_ttm_cpu_maps_iomem - Should the memory be viewed as IOMEM by the CPU?
+ * @mem: struct ttm_resource representing the memory.
+ *
+ * Return: true if memory should be viewed as IOMEM for CPU mapping purposes.
+ */
+static inline bool i915_ttm_cpu_maps_iomem(struct ttm_resource *mem)
+{
+ /* Once / if we support GGTT, this is also false for cached ttm_tts */
+ return mem->mem_type != I915_PL_SYSTEM;
+}
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
new file mode 100644
index 000000000000..ee9612a3ee5e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -0,0 +1,627 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <drm/ttm/ttm_bo_driver.h>
+
+#include "i915_deps.h"
+#include "i915_drv.h"
+#include "intel_memory_region.h"
+#include "intel_region_ttm.h"
+
+#include "gem/i915_gem_object.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_ttm.h"
+#include "gem/i915_gem_ttm_move.h"
+
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_migrate.h"
+
+/**
+ * DOC: Selftest failure modes for failsafe migration:
+ *
+ * For fail_gpu_migration, the gpu blit scheduled is always a clear blit
+ * rather than a copy blit, and then we force the failure paths as if
+ * the blit fence returned an error.
+ *
+ * For fail_work_allocation we fail the kmalloc of the async worker, we
+ * sync the gpu blit. If it then fails, or fail_gpu_migration is set to
+ * true, then a memcpy operation is performed sync.
+ */
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+static bool fail_gpu_migration;
+static bool fail_work_allocation;
+
+void i915_ttm_migrate_set_failure_modes(bool gpu_migration,
+ bool work_allocation)
+{
+ fail_gpu_migration = gpu_migration;
+ fail_work_allocation = work_allocation;
+}
+#endif
+
+static enum i915_cache_level
+i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res,
+ struct ttm_tt *ttm)
+{
+ return ((HAS_LLC(i915) || HAS_SNOOP(i915)) &&
+ !i915_ttm_gtt_binds_lmem(res) &&
+ ttm->caching == ttm_cached) ? I915_CACHE_LLC :
+ I915_CACHE_NONE;
+}
+
+static struct intel_memory_region *
+i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type)
+{
+ struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
+
+ /* There's some room for optimization here... */
+ GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM &&
+ ttm_mem_type < I915_PL_LMEM0);
+ if (ttm_mem_type == I915_PL_SYSTEM)
+ return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM,
+ 0);
+
+ return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL,
+ ttm_mem_type - I915_PL_LMEM0);
+}
+
+/**
+ * i915_ttm_adjust_domains_after_move - Adjust the GEM domains after a
+ * TTM move
+ * @obj: The gem object
+ */
+void i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj)
+{
+ struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+
+ if (i915_ttm_cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) {
+ obj->write_domain = I915_GEM_DOMAIN_WC;
+ obj->read_domains = I915_GEM_DOMAIN_WC;
+ } else {
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ }
+}
+
+/**
+ * i915_ttm_adjust_gem_after_move - Adjust the GEM state after a TTM move
+ * @obj: The gem object
+ *
+ * Adjusts the GEM object's region, mem_flags and cache coherency after a
+ * TTM move.
+ */
+void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
+{
+ struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ unsigned int cache_level;
+ unsigned int i;
+
+ /*
+ * If object was moved to an allowable region, update the object
+ * region to consider it migrated. Note that if it's currently not
+ * in an allowable region, it's evicted and we don't update the
+ * object region.
+ */
+ if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) {
+ for (i = 0; i < obj->mm.n_placements; ++i) {
+ struct intel_memory_region *mr = obj->mm.placements[i];
+
+ if (intel_region_to_ttm_type(mr) == bo->resource->mem_type &&
+ mr != obj->mm.region) {
+ i915_gem_object_release_memory_region(obj);
+ i915_gem_object_init_memory_region(obj, mr);
+ break;
+ }
+ }
+ }
+
+ obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM);
+
+ obj->mem_flags |= i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
+ I915_BO_FLAG_STRUCT_PAGE;
+
+ cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
+ bo->ttm);
+ i915_gem_object_set_cache_coherency(obj, cache_level);
+}
+
+/**
+ * i915_ttm_move_notify - Prepare an object for move
+ * @bo: The ttm buffer object.
+ *
+ * This function prepares an object for move by removing all GPU bindings,
+ * removing all CPU mapings and finally releasing the pages sg-table.
+ *
+ * Return: 0 if successful, negative error code on error.
+ */
+int i915_ttm_move_notify(struct ttm_buffer_object *bo)
+{
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ int ret;
+
+ ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
+ if (ret)
+ return ret;
+
+ ret = __i915_gem_object_put_pages(obj);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo,
+ bool clear,
+ struct ttm_resource *dst_mem,
+ struct ttm_tt *dst_ttm,
+ struct sg_table *dst_st,
+ const struct i915_deps *deps)
+{
+ struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
+ bdev);
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ struct i915_request *rq;
+ struct ttm_tt *src_ttm = bo->ttm;
+ enum i915_cache_level src_level, dst_level;
+ int ret;
+
+ if (!to_gt(i915)->migrate.context || intel_gt_is_wedged(to_gt(i915)))
+ return ERR_PTR(-EINVAL);
+
+ /* With fail_gpu_migration, we always perform a GPU clear. */
+ if (I915_SELFTEST_ONLY(fail_gpu_migration))
+ clear = true;
+
+ dst_level = i915_ttm_cache_level(i915, dst_mem, dst_ttm);
+ if (clear) {
+ if (bo->type == ttm_bo_type_kernel &&
+ !I915_SELFTEST_ONLY(fail_gpu_migration))
+ return ERR_PTR(-EINVAL);
+
+ intel_engine_pm_get(to_gt(i915)->migrate.context->engine);
+ ret = intel_context_migrate_clear(to_gt(i915)->migrate.context, deps,
+ dst_st->sgl, dst_level,
+ i915_ttm_gtt_binds_lmem(dst_mem),
+ 0, &rq);
+ } else {
+ struct i915_refct_sgt *src_rsgt =
+ i915_ttm_resource_get_st(obj, bo->resource);
+
+ if (IS_ERR(src_rsgt))
+ return ERR_CAST(src_rsgt);
+
+ src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm);
+ intel_engine_pm_get(to_gt(i915)->migrate.context->engine);
+ ret = intel_context_migrate_copy(to_gt(i915)->migrate.context,
+ deps, src_rsgt->table.sgl,
+ src_level,
+ i915_ttm_gtt_binds_lmem(bo->resource),
+ dst_st->sgl, dst_level,
+ i915_ttm_gtt_binds_lmem(dst_mem),
+ &rq);
+
+ i915_refct_sgt_put(src_rsgt);
+ }
+
+ intel_engine_pm_put(to_gt(i915)->migrate.context->engine);
+
+ if (ret && rq) {
+ i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(rq);
+ }
+
+ return ret ? ERR_PTR(ret) : &rq->fence;
+}
+
+/**
+ * struct i915_ttm_memcpy_arg - argument for the bo memcpy functionality.
+ * @_dst_iter: Storage space for the destination kmap iterator.
+ * @_src_iter: Storage space for the source kmap iterator.
+ * @dst_iter: Pointer to the destination kmap iterator.
+ * @src_iter: Pointer to the source kmap iterator.
+ * @clear: Whether to clear instead of copy.
+ * @src_rsgt: Refcounted scatter-gather list of source memory.
+ * @dst_rsgt: Refcounted scatter-gather list of destination memory.
+ */
+struct i915_ttm_memcpy_arg {
+ union {
+ struct ttm_kmap_iter_tt tt;
+ struct ttm_kmap_iter_iomap io;
+ } _dst_iter,
+ _src_iter;
+ struct ttm_kmap_iter *dst_iter;
+ struct ttm_kmap_iter *src_iter;
+ unsigned long num_pages;
+ bool clear;
+ struct i915_refct_sgt *src_rsgt;
+ struct i915_refct_sgt *dst_rsgt;
+};
+
+/**
+ * struct i915_ttm_memcpy_work - Async memcpy worker under a dma-fence.
+ * @fence: The dma-fence.
+ * @work: The work struct use for the memcpy work.
+ * @lock: The fence lock. Not used to protect anything else ATM.
+ * @irq_work: Low latency worker to signal the fence since it can't be done
+ * from the callback for lockdep reasons.
+ * @cb: Callback for the accelerated migration fence.
+ * @arg: The argument for the memcpy functionality.
+ */
+struct i915_ttm_memcpy_work {
+ struct dma_fence fence;
+ struct work_struct work;
+ /* The fence lock */
+ spinlock_t lock;
+ struct irq_work irq_work;
+ struct dma_fence_cb cb;
+ struct i915_ttm_memcpy_arg arg;
+};
+
+static void i915_ttm_move_memcpy(struct i915_ttm_memcpy_arg *arg)
+{
+ ttm_move_memcpy(arg->clear, arg->num_pages,
+ arg->dst_iter, arg->src_iter);
+}
+
+static void i915_ttm_memcpy_init(struct i915_ttm_memcpy_arg *arg,
+ struct ttm_buffer_object *bo, bool clear,
+ struct ttm_resource *dst_mem,
+ struct ttm_tt *dst_ttm,
+ struct i915_refct_sgt *dst_rsgt)
+{
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ struct intel_memory_region *dst_reg, *src_reg;
+
+ dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type);
+ src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type);
+ GEM_BUG_ON(!dst_reg || !src_reg);
+
+ arg->dst_iter = !i915_ttm_cpu_maps_iomem(dst_mem) ?
+ ttm_kmap_iter_tt_init(&arg->_dst_iter.tt, dst_ttm) :
+ ttm_kmap_iter_iomap_init(&arg->_dst_iter.io, &dst_reg->iomap,
+ &dst_rsgt->table, dst_reg->region.start);
+
+ arg->src_iter = !i915_ttm_cpu_maps_iomem(bo->resource) ?
+ ttm_kmap_iter_tt_init(&arg->_src_iter.tt, bo->ttm) :
+ ttm_kmap_iter_iomap_init(&arg->_src_iter.io, &src_reg->iomap,
+ &obj->ttm.cached_io_rsgt->table,
+ src_reg->region.start);
+ arg->clear = clear;
+ arg->num_pages = bo->base.size >> PAGE_SHIFT;
+
+ arg->dst_rsgt = i915_refct_sgt_get(dst_rsgt);
+ arg->src_rsgt = clear ? NULL :
+ i915_ttm_resource_get_st(obj, bo->resource);
+}
+
+static void i915_ttm_memcpy_release(struct i915_ttm_memcpy_arg *arg)
+{
+ i915_refct_sgt_put(arg->src_rsgt);
+ i915_refct_sgt_put(arg->dst_rsgt);
+}
+
+static void __memcpy_work(struct work_struct *work)
+{
+ struct i915_ttm_memcpy_work *copy_work =
+ container_of(work, typeof(*copy_work), work);
+ struct i915_ttm_memcpy_arg *arg = &copy_work->arg;
+ bool cookie = dma_fence_begin_signalling();
+
+ i915_ttm_move_memcpy(arg);
+ dma_fence_end_signalling(cookie);
+
+ dma_fence_signal(&copy_work->fence);
+
+ i915_ttm_memcpy_release(arg);
+ dma_fence_put(&copy_work->fence);
+}
+
+static void __memcpy_irq_work(struct irq_work *irq_work)
+{
+ struct i915_ttm_memcpy_work *copy_work =
+ container_of(irq_work, typeof(*copy_work), irq_work);
+ struct i915_ttm_memcpy_arg *arg = &copy_work->arg;
+
+ dma_fence_signal(&copy_work->fence);
+ i915_ttm_memcpy_release(arg);
+ dma_fence_put(&copy_work->fence);
+}
+
+static void __memcpy_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ struct i915_ttm_memcpy_work *copy_work =
+ container_of(cb, typeof(*copy_work), cb);
+
+ if (unlikely(fence->error || I915_SELFTEST_ONLY(fail_gpu_migration))) {
+ INIT_WORK(&copy_work->work, __memcpy_work);
+ queue_work(system_unbound_wq, &copy_work->work);
+ } else {
+ init_irq_work(&copy_work->irq_work, __memcpy_irq_work);
+ irq_work_queue(&copy_work->irq_work);
+ }
+}
+
+static const char *get_driver_name(struct dma_fence *fence)
+{
+ return "i915_ttm_memcpy_work";
+}
+
+static const char *get_timeline_name(struct dma_fence *fence)
+{
+ return "unbound";
+}
+
+static const struct dma_fence_ops dma_fence_memcpy_ops = {
+ .get_driver_name = get_driver_name,
+ .get_timeline_name = get_timeline_name,
+};
+
+static struct dma_fence *
+i915_ttm_memcpy_work_arm(struct i915_ttm_memcpy_work *work,
+ struct dma_fence *dep)
+{
+ int ret;
+
+ spin_lock_init(&work->lock);
+ dma_fence_init(&work->fence, &dma_fence_memcpy_ops, &work->lock, 0, 0);
+ dma_fence_get(&work->fence);
+ ret = dma_fence_add_callback(dep, &work->cb, __memcpy_cb);
+ if (ret) {
+ if (ret != -ENOENT)
+ dma_fence_wait(dep, false);
+
+ return ERR_PTR(I915_SELFTEST_ONLY(fail_gpu_migration) ? -EINVAL :
+ dep->error);
+ }
+
+ return &work->fence;
+}
+
+static struct dma_fence *
+__i915_ttm_move(struct ttm_buffer_object *bo,
+ const struct ttm_operation_ctx *ctx, bool clear,
+ struct ttm_resource *dst_mem, struct ttm_tt *dst_ttm,
+ struct i915_refct_sgt *dst_rsgt, bool allow_accel,
+ const struct i915_deps *move_deps)
+{
+ struct i915_ttm_memcpy_work *copy_work = NULL;
+ struct i915_ttm_memcpy_arg _arg, *arg = &_arg;
+ struct dma_fence *fence = ERR_PTR(-EINVAL);
+
+ if (allow_accel) {
+ fence = i915_ttm_accel_move(bo, clear, dst_mem, dst_ttm,
+ &dst_rsgt->table, move_deps);
+
+ /*
+ * We only need to intercept the error when moving to lmem.
+ * When moving to system, TTM or shmem will provide us with
+ * cleared pages.
+ */
+ if (!IS_ERR(fence) && !i915_ttm_gtt_binds_lmem(dst_mem) &&
+ !I915_SELFTEST_ONLY(fail_gpu_migration ||
+ fail_work_allocation))
+ goto out;
+ }
+
+ /* If we've scheduled gpu migration. Try to arm error intercept. */
+ if (!IS_ERR(fence)) {
+ struct dma_fence *dep = fence;
+
+ if (!I915_SELFTEST_ONLY(fail_work_allocation))
+ copy_work = kzalloc(sizeof(*copy_work), GFP_KERNEL);
+
+ if (copy_work) {
+ arg = &copy_work->arg;
+ i915_ttm_memcpy_init(arg, bo, clear, dst_mem, dst_ttm,
+ dst_rsgt);
+ fence = i915_ttm_memcpy_work_arm(copy_work, dep);
+ } else {
+ dma_fence_wait(dep, false);
+ fence = ERR_PTR(I915_SELFTEST_ONLY(fail_gpu_migration) ?
+ -EINVAL : fence->error);
+ }
+ dma_fence_put(dep);
+
+ if (!IS_ERR(fence))
+ goto out;
+ } else if (move_deps) {
+ int err = i915_deps_sync(move_deps, ctx);
+
+ if (err)
+ return ERR_PTR(err);
+ }
+
+ /* Error intercept failed or no accelerated migration to start with */
+ if (!copy_work)
+ i915_ttm_memcpy_init(arg, bo, clear, dst_mem, dst_ttm,
+ dst_rsgt);
+ i915_ttm_move_memcpy(arg);
+ i915_ttm_memcpy_release(arg);
+ kfree(copy_work);
+
+ return NULL;
+out:
+ if (!fence && copy_work) {
+ i915_ttm_memcpy_release(arg);
+ kfree(copy_work);
+ }
+
+ return fence;
+}
+
+static int
+prev_deps(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
+ struct i915_deps *deps)
+{
+ int ret;
+
+ ret = i915_deps_add_dependency(deps, bo->moving, ctx);
+ if (!ret)
+ ret = i915_deps_add_resv(deps, bo->base.resv, ctx);
+
+ return ret;
+}
+
+/**
+ * i915_ttm_move - The TTM move callback used by i915.
+ * @bo: The buffer object.
+ * @evict: Whether this is an eviction.
+ * @dst_mem: The destination ttm resource.
+ * @hop: If we need multihop, what temporary memory type to move to.
+ *
+ * Return: 0 if successful, negative error code otherwise.
+ */
+int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *dst_mem,
+ struct ttm_place *hop)
+{
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ struct ttm_resource_manager *dst_man =
+ ttm_manager_type(bo->bdev, dst_mem->mem_type);
+ struct dma_fence *migration_fence = NULL;
+ struct ttm_tt *ttm = bo->ttm;
+ struct i915_refct_sgt *dst_rsgt;
+ bool clear;
+ int ret;
+
+ if (GEM_WARN_ON(!obj)) {
+ ttm_bo_move_null(bo, dst_mem);
+ return 0;
+ }
+
+ ret = i915_ttm_move_notify(bo);
+ if (ret)
+ return ret;
+
+ if (obj->mm.madv != I915_MADV_WILLNEED) {
+ i915_ttm_purge(obj);
+ ttm_resource_free(bo, &dst_mem);
+ return 0;
+ }
+
+ /* Populate ttm with pages if needed. Typically system memory. */
+ if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) {
+ ret = ttm_tt_populate(bo->bdev, ttm, ctx);
+ if (ret)
+ return ret;
+ }
+
+ dst_rsgt = i915_ttm_resource_get_st(obj, dst_mem);
+ if (IS_ERR(dst_rsgt))
+ return PTR_ERR(dst_rsgt);
+
+ clear = !i915_ttm_cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm));
+ if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) {
+ struct i915_deps deps;
+
+ i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
+ ret = prev_deps(bo, ctx, &deps);
+ if (ret) {
+ i915_refct_sgt_put(dst_rsgt);
+ return ret;
+ }
+
+ migration_fence = __i915_ttm_move(bo, ctx, clear, dst_mem, bo->ttm,
+ dst_rsgt, true, &deps);
+ i915_deps_fini(&deps);
+ }
+
+ /* We can possibly get an -ERESTARTSYS here */
+ if (IS_ERR(migration_fence)) {
+ i915_refct_sgt_put(dst_rsgt);
+ return PTR_ERR(migration_fence);
+ }
+
+ if (migration_fence) {
+ ret = ttm_bo_move_accel_cleanup(bo, migration_fence, evict,
+ true, dst_mem);
+ if (ret) {
+ dma_fence_wait(migration_fence, false);
+ ttm_bo_move_sync_cleanup(bo, dst_mem);
+ }
+ dma_fence_put(migration_fence);
+ } else {
+ ttm_bo_move_sync_cleanup(bo, dst_mem);
+ }
+
+ i915_ttm_adjust_domains_after_move(obj);
+ i915_ttm_free_cached_io_rsgt(obj);
+
+ if (i915_ttm_gtt_binds_lmem(dst_mem) || i915_ttm_cpu_maps_iomem(dst_mem)) {
+ obj->ttm.cached_io_rsgt = dst_rsgt;
+ obj->ttm.get_io_page.sg_pos = dst_rsgt->table.sgl;
+ obj->ttm.get_io_page.sg_idx = 0;
+ } else {
+ i915_refct_sgt_put(dst_rsgt);
+ }
+
+ i915_ttm_adjust_lru(obj);
+ i915_ttm_adjust_gem_after_move(obj);
+ return 0;
+}
+
+/**
+ * i915_gem_obj_copy_ttm - Copy the contents of one ttm-based gem object to
+ * another
+ * @dst: The destination object
+ * @src: The source object
+ * @allow_accel: Allow using the blitter. Otherwise TTM memcpy is used.
+ * @intr: Whether to perform waits interruptible:
+ *
+ * Note: The caller is responsible for assuring that the underlying
+ * TTM objects are populated if needed and locked.
+ *
+ * Return: Zero on success. Negative error code on error. If @intr == true,
+ * then it may return -ERESTARTSYS or -EINTR.
+ */
+int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
+ struct drm_i915_gem_object *src,
+ bool allow_accel, bool intr)
+{
+ struct ttm_buffer_object *dst_bo = i915_gem_to_ttm(dst);
+ struct ttm_buffer_object *src_bo = i915_gem_to_ttm(src);
+ struct ttm_operation_ctx ctx = {
+ .interruptible = intr,
+ };
+ struct i915_refct_sgt *dst_rsgt;
+ struct dma_fence *copy_fence;
+ struct i915_deps deps;
+ int ret;
+
+ assert_object_held(dst);
+ assert_object_held(src);
+ i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
+
+ ret = dma_resv_reserve_shared(src_bo->base.resv, 1);
+ if (ret)
+ return ret;
+
+ ret = i915_deps_add_resv(&deps, dst_bo->base.resv, &ctx);
+ if (ret)
+ return ret;
+
+ ret = i915_deps_add_resv(&deps, src_bo->base.resv, &ctx);
+ if (ret)
+ return ret;
+
+ dst_rsgt = i915_ttm_resource_get_st(dst, dst_bo->resource);
+ copy_fence = __i915_ttm_move(src_bo, &ctx, false, dst_bo->resource,
+ dst_bo->ttm, dst_rsgt, allow_accel,
+ &deps);
+
+ i915_deps_fini(&deps);
+ i915_refct_sgt_put(dst_rsgt);
+ if (IS_ERR_OR_NULL(copy_fence))
+ return PTR_ERR_OR_ZERO(copy_fence);
+
+ dma_resv_add_excl_fence(dst_bo->base.resv, copy_fence);
+ dma_resv_add_shared_fence(src_bo->base.resv, copy_fence);
+
+ dma_fence_put(copy_fence);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.h
new file mode 100644
index 000000000000..d2e7f149e05c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+#ifndef _I915_GEM_TTM_MOVE_H_
+#define _I915_GEM_TTM_MOVE_H_
+
+#include <linux/types.h>
+
+#include "i915_selftest.h"
+
+struct ttm_buffer_object;
+struct ttm_operation_ctx;
+struct ttm_place;
+struct ttm_resource;
+struct ttm_tt;
+
+struct drm_i915_gem_object;
+struct i915_refct_sgt;
+
+int i915_ttm_move_notify(struct ttm_buffer_object *bo);
+
+I915_SELFTEST_DECLARE(void i915_ttm_migrate_set_failure_modes(bool gpu_migration,
+ bool work_allocation));
+
+int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
+ struct drm_i915_gem_object *src,
+ bool allow_accel, bool intr);
+
+/* Internal I915 TTM declarations and definitions below. */
+
+int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *dst_mem,
+ struct ttm_place *hop);
+
+void i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj);
+
+void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
index 3b6d14b5c604..9aad84059d56 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
@@ -12,6 +12,7 @@
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_ttm.h"
+#include "gem/i915_gem_ttm_move.h"
#include "gem/i915_gem_ttm_pm.h"
/**
@@ -79,6 +80,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false);
GEM_WARN_ON(err);
+ ttm_bo_wait_ctx(backup_bo, &ctx);
obj->ttm.backup = backup;
return 0;
@@ -169,6 +171,7 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
false);
GEM_WARN_ON(err);
+ ttm_bo_wait_ctx(backup_bo, &ctx);
obj->ttm.backup = NULL;
err = 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 3173c9f9a040..3cc01c30dd62 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -529,7 +529,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
* On almost all of the older hw, we cannot tell the GPU that
* a page is readonly.
*/
- if (!dev_priv->gt.vm->has_read_only)
+ if (!to_gt(dev_priv)->vm->has_read_only)
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index f909aaa09d9c..dab3d30c09a0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -10,7 +10,6 @@
#include "gt/intel_engine.h"
-#include "dma_resv_utils.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
@@ -25,7 +24,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
return timeout;
if (dma_fence_is_i915(fence))
- return i915_request_wait(to_request(fence), flags, timeout);
+ return i915_request_wait_timeout(to_request(fence), flags, timeout);
return dma_fence_wait_timeout(fence,
flags & I915_WAIT_INTERRUPTIBLE,
@@ -37,58 +36,22 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
unsigned int flags,
long timeout)
{
- struct dma_fence *excl;
- bool prune_fences = false;
-
- if (flags & I915_WAIT_ALL) {
- struct dma_fence **shared;
- unsigned int count, i;
- int ret;
-
- ret = dma_resv_get_fences(resv, &excl, &count, &shared);
- if (ret)
- return ret;
-
- for (i = 0; i < count; i++) {
- timeout = i915_gem_object_wait_fence(shared[i],
- flags, timeout);
- if (timeout < 0)
- break;
-
- dma_fence_put(shared[i]);
- }
-
- for (; i < count; i++)
- dma_fence_put(shared[i]);
- kfree(shared);
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+ long ret = timeout ?: 1;
+
+ dma_resv_iter_begin(&cursor, resv, flags & I915_WAIT_ALL);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ ret = i915_gem_object_wait_fence(fence, flags, timeout);
+ if (ret <= 0)
+ break;
- /*
- * If both shared fences and an exclusive fence exist,
- * then by construction the shared fences must be later
- * than the exclusive fence. If we successfully wait for
- * all the shared fences, we know that the exclusive fence
- * must all be signaled. If all the shared fences are
- * signaled, we can prune the array and recover the
- * floating references on the fences/requests.
- */
- prune_fences = count && timeout >= 0;
- } else {
- excl = dma_resv_get_excl_unlocked(resv);
+ if (timeout)
+ timeout = ret;
}
+ dma_resv_iter_end(&cursor);
- if (excl && timeout >= 0)
- timeout = i915_gem_object_wait_fence(excl, flags, timeout);
-
- dma_fence_put(excl);
-
- /*
- * Opportunistically prune the fences iff we know they have *all* been
- * signaled.
- */
- if (prune_fences)
- dma_resv_prune(resv);
-
- return timeout;
+ return ret;
}
static void fence_set_priority(struct dma_fence *fence,
@@ -151,32 +114,13 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int flags,
const struct i915_sched_attr *attr)
{
- struct dma_fence *excl;
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
- if (flags & I915_WAIT_ALL) {
- struct dma_fence **shared;
- unsigned int count, i;
- int ret;
-
- ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
- &shared);
- if (ret)
- return ret;
-
- for (i = 0; i < count; i++) {
- i915_gem_fence_wait_priority(shared[i], attr);
- dma_fence_put(shared[i]);
- }
-
- kfree(shared);
- } else {
- excl = dma_resv_get_excl_unlocked(obj->base.resv);
- }
-
- if (excl) {
- i915_gem_fence_wait_priority(excl, attr);
- dma_fence_put(excl);
- }
+ dma_resv_iter_begin(&cursor, obj->base.resv, flags & I915_WAIT_ALL);
+ dma_resv_for_each_fence_unlocked(&cursor, fence)
+ i915_gem_fence_wait_priority(fence, attr);
+ dma_resv_iter_end(&cursor);
return 0;
}
@@ -196,7 +140,11 @@ i915_gem_object_wait(struct drm_i915_gem_object *obj,
timeout = i915_gem_object_wait_reservation(obj->base.resv,
flags, timeout);
- return timeout < 0 ? timeout : 0;
+
+ if (timeout < 0)
+ return timeout;
+
+ return !timeout ? -ETIME : 0;
}
static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
@@ -306,6 +254,6 @@ int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj,
unsigned int flags)
{
might_sleep();
- /* NOP for now. */
- return 0;
+
+ return i915_gem_object_wait_moving_fence(obj, !!(flags & I915_WAIT_INTERRUPTIBLE));
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
index dbdbdc344d87..7271fbf813fa 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c
@@ -12,6 +12,7 @@
int i915_gemfs_init(struct drm_i915_private *i915)
{
+ char huge_opt[] = "huge=within_size"; /* r/w */
struct file_system_type *type;
struct vfsmount *gemfs;
char *opts;
@@ -31,10 +32,8 @@ int i915_gemfs_init(struct drm_i915_private *i915)
*/
opts = NULL;
- if (intel_vtd_active()) {
+ if (intel_vtd_active(i915)) {
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
- static char huge_opt[] = "huge=within_size"; /* r/w */
-
opts = huge_opt;
drm_info(&i915->drm,
"Transparent Hugepage mode '%s'\n",
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index b2003133deaf..11f0aa65f8a3 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -22,6 +22,22 @@
#include "selftests/mock_region.h"
#include "selftests/i915_random.h"
+static struct i915_gem_context *hugepage_ctx(struct drm_i915_private *i915,
+ struct file *file)
+{
+ struct i915_gem_context *ctx = live_context(i915, file);
+ struct i915_address_space *vm;
+
+ if (IS_ERR(ctx))
+ return ctx;
+
+ vm = ctx->vm;
+ if (vm)
+ WRITE_ONCE(vm->scrub_64K, true);
+
+ return ctx;
+}
+
static const unsigned int page_sizes[] = {
I915_GTT_PAGE_SIZE_2M,
I915_GTT_PAGE_SIZE_64K,
@@ -552,7 +568,7 @@ out_unpin:
out_put:
i915_gem_object_put(obj);
out_region:
- intel_memory_region_put(mem);
+ intel_memory_region_destroy(mem);
return err;
}
@@ -959,6 +975,8 @@ static int igt_mock_ppgtt_64K(void *arg)
__i915_gem_object_put_pages(obj);
i915_gem_object_unlock(obj);
i915_gem_object_put(obj);
+
+ i915_gem_drain_freed_objects(i915);
}
}
@@ -1080,10 +1098,6 @@ static int __igt_write_huge(struct intel_context *ce,
if (IS_ERR(vma))
return PTR_ERR(vma);
- err = i915_vma_unbind(vma);
- if (err)
- return err;
-
err = i915_vma_pin(vma, size, 0, flags | offset);
if (err) {
/*
@@ -1117,7 +1131,7 @@ out_vma_unpin:
return err;
}
-static int igt_write_huge(struct i915_gem_context *ctx,
+static int igt_write_huge(struct drm_i915_private *i915,
struct drm_i915_gem_object *obj)
{
struct i915_gem_engines *engines;
@@ -1127,6 +1141,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
IGT_TIMEOUT(end_time);
unsigned int max_page_size;
unsigned int count;
+ struct i915_gem_context *ctx;
+ struct file *file;
u64 max;
u64 num;
u64 size;
@@ -1134,6 +1150,16 @@ static int igt_write_huge(struct i915_gem_context *ctx,
int i, n;
int err = 0;
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = hugepage_ctx(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out;
+ }
+
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
size = obj->base.size;
@@ -1153,7 +1179,7 @@ static int igt_write_huge(struct i915_gem_context *ctx,
}
i915_gem_context_unlock_engines(ctx);
if (!n)
- return 0;
+ goto out;
/*
* To keep things interesting when alternating between engines in our
@@ -1215,6 +1241,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
kfree(order);
+out:
+ fput(file);
return err;
}
@@ -1277,8 +1305,7 @@ static u32 igt_random_size(struct rnd_state *prng,
static int igt_ppgtt_smoke_huge(void *arg)
{
- struct i915_gem_context *ctx = arg;
- struct drm_i915_private *i915 = ctx->i915;
+ struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
I915_RND_STATE(prng);
struct {
@@ -1302,6 +1329,7 @@ static int igt_ppgtt_smoke_huge(void *arg)
u32 min = backends[i].min;
u32 max = backends[i].max;
u32 size = max;
+
try_again:
size = igt_random_size(&prng, min, rounddown_pow_of_two(size));
@@ -1336,7 +1364,7 @@ try_again:
goto out_unpin;
}
- err = igt_write_huge(ctx, obj);
+ err = igt_write_huge(i915, obj);
if (err) {
pr_err("%s write-huge failed with size=%u, i=%d\n",
__func__, size, i);
@@ -1363,8 +1391,7 @@ out_put:
static int igt_ppgtt_sanity_check(void *arg)
{
- struct i915_gem_context *ctx = arg;
- struct drm_i915_private *i915 = ctx->i915;
+ struct drm_i915_private *i915 = arg;
unsigned int supported = INTEL_INFO(i915)->page_sizes;
struct {
igt_create_fn fn;
@@ -1431,7 +1458,7 @@ static int igt_ppgtt_sanity_check(void *arg)
if (pages)
obj->mm.page_sizes.sg = pages;
- err = igt_write_huge(ctx, obj);
+ err = igt_write_huge(i915, obj);
i915_gem_object_lock(obj, NULL);
i915_gem_object_unpin_pages(obj);
@@ -1458,15 +1485,27 @@ out:
static int igt_tmpfs_fallback(void *arg)
{
- struct i915_gem_context *ctx = arg;
- struct drm_i915_private *i915 = ctx->i915;
+ struct drm_i915_private *i915 = arg;
+ struct i915_address_space *vm;
+ struct i915_gem_context *ctx;
struct vfsmount *gemfs = i915->mm.gemfs;
- struct i915_address_space *vm = i915_gem_context_get_eb_vm(ctx);
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
+ struct file *file;
u32 *vaddr;
int err = 0;
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = hugepage_ctx(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out;
+ }
+ vm = i915_gem_context_get_eb_vm(ctx);
+
/*
* Make sure that we don't burst into a ball of flames upon falling back
* to tmpfs, which we rely on if on the off-chance we encouter a failure
@@ -1510,33 +1549,47 @@ out_restore:
i915->mm.gemfs = gemfs;
i915_vm_put(vm);
+out:
+ fput(file);
return err;
}
static int igt_shrink_thp(void *arg)
{
- struct i915_gem_context *ctx = arg;
- struct drm_i915_private *i915 = ctx->i915;
- struct i915_address_space *vm = i915_gem_context_get_eb_vm(ctx);
+ struct drm_i915_private *i915 = arg;
+ struct i915_address_space *vm;
+ struct i915_gem_context *ctx;
struct drm_i915_gem_object *obj;
struct i915_gem_engines_iter it;
struct intel_context *ce;
struct i915_vma *vma;
+ struct file *file;
unsigned int flags = PIN_USER;
unsigned int n;
bool should_swap;
- int err = 0;
+ int err;
+
+ if (!igt_can_allocate_thp(i915)) {
+ pr_info("missing THP support, skipping\n");
+ return 0;
+ }
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = hugepage_ctx(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out;
+ }
+ vm = i915_gem_context_get_eb_vm(ctx);
/*
* Sanity check shrinking huge-paged object -- make sure nothing blows
* up.
*/
- if (!igt_can_allocate_thp(i915)) {
- pr_info("missing THP support, skipping\n");
- goto out_vm;
- }
-
obj = i915_gem_object_create_shmem(i915, SZ_2M);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
@@ -1626,7 +1679,8 @@ out_put:
i915_gem_object_put(obj);
out_vm:
i915_vm_put(vm);
-
+out:
+ fput(file);
return err;
}
@@ -1651,7 +1705,7 @@ int i915_gem_huge_page_mock_selftests(void)
mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
mkwrite_device_info(dev_priv)->ppgtt_size = 48;
- ppgtt = i915_ppgtt_create(&dev_priv->gt, 0);
+ ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
@@ -1687,36 +1741,14 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_ppgtt_smoke_huge),
SUBTEST(igt_ppgtt_sanity_check),
};
- struct i915_gem_context *ctx;
- struct i915_address_space *vm;
- struct file *file;
- int err;
if (!HAS_PPGTT(i915)) {
pr_info("PPGTT not supported, skipping live-selftests\n");
return 0;
}
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
- file = mock_file(i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- ctx = live_context(i915, file);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out_file;
- }
-
- vm = ctx->vm;
- if (vm)
- WRITE_ONCE(vm->scrub_64K, true);
-
- err = i915_subtests(tests, ctx);
-
-out_file:
- fput(file);
- return err;
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 8402ed925a69..75947e9dada2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -592,7 +592,7 @@ int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_client_tiled_blits),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
return i915_live_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index b32f7fed2d9c..3f41fe5ec9d4 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -88,9 +88,9 @@ static int live_nop_switch(void *arg)
rq = i915_request_get(this);
i915_request_add(this);
}
- if (i915_request_wait(rq, 0, HZ) < 0) {
+ if (i915_request_wait(rq, 0, 10 * HZ) < 0) {
pr_err("Failed to populated %d contexts\n", nctx);
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(to_gt(i915));
i915_request_put(rq);
err = -EIO;
goto out_file;
@@ -146,7 +146,7 @@ static int live_nop_switch(void *arg)
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Switching between %ld contexts timed out\n",
prime);
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(to_gt(i915));
i915_request_put(rq);
break;
}
@@ -1223,7 +1223,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
return 0;
if (flags & TEST_RESET)
- igt_global_reset_lock(&i915->gt);
+ igt_global_reset_lock(to_gt(i915));
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
@@ -1306,7 +1306,7 @@ out_put:
out_unlock:
if (flags & TEST_RESET)
- igt_global_reset_unlock(&i915->gt);
+ igt_global_reset_unlock(to_gt(i915));
if (ret)
pr_err("%s: Failed with %d!\n", name, ret);
@@ -1481,10 +1481,10 @@ static int check_scratch(struct i915_address_space *vm, u64 offset)
static int write_to_scratch(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
+ struct drm_i915_gem_object *obj,
u64 offset, u32 value)
{
struct drm_i915_private *i915 = ctx->i915;
- struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
struct i915_request *rq;
struct i915_vma *vma;
@@ -1497,15 +1497,9 @@ static int write_to_scratch(struct i915_gem_context *ctx,
if (err)
return err;
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
- if (IS_ERR(cmd)) {
- err = PTR_ERR(cmd);
- goto out;
- }
+ if (IS_ERR(cmd))
+ return PTR_ERR(cmd);
*cmd++ = MI_STORE_DWORD_IMM_GEN4;
if (GRAPHICS_VER(i915) >= 8) {
@@ -1569,17 +1563,19 @@ err_unpin:
i915_vma_unpin(vma);
out_vm:
i915_vm_put(vm);
-out:
- i915_gem_object_put(obj);
+
+ if (!err)
+ err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
+
return err;
}
static int read_from_scratch(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
+ struct drm_i915_gem_object *obj,
u64 offset, u32 *value)
{
struct drm_i915_private *i915 = ctx->i915;
- struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
const u32 result = 0x100;
struct i915_request *rq;
@@ -1594,10 +1590,6 @@ static int read_from_scratch(struct i915_gem_context *ctx,
if (err)
return err;
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
if (GRAPHICS_VER(i915) >= 8) {
const u32 GPR0 = engine->mmio_base + 0x600;
@@ -1615,7 +1607,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
- goto out;
+ goto err_unpin;
}
memset(cmd, POISON_INUSE, PAGE_SIZE);
@@ -1651,7 +1643,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
- goto out;
+ goto err_unpin;
}
memset(cmd, POISON_INUSE, PAGE_SIZE);
@@ -1722,8 +1714,10 @@ err_unpin:
i915_vma_unpin(vma);
out_vm:
i915_vm_put(vm);
-out:
- i915_gem_object_put(obj);
+
+ if (!err)
+ err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
+
return err;
}
@@ -1757,6 +1751,7 @@ static int igt_vm_isolation(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_a, *ctx_b;
+ struct drm_i915_gem_object *obj_a, *obj_b;
unsigned long num_engines, count;
struct intel_engine_cs *engine;
struct igt_live_test t;
@@ -1810,6 +1805,18 @@ static int igt_vm_isolation(void *arg)
vm_total = ctx_a->vm->total;
GEM_BUG_ON(ctx_b->vm->total != vm_total);
+ obj_a = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj_a)) {
+ err = PTR_ERR(obj_a);
+ goto out_file;
+ }
+
+ obj_b = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj_b)) {
+ err = PTR_ERR(obj_b);
+ goto put_a;
+ }
+
count = 0;
num_engines = 0;
for_each_uabi_engine(engine, i915) {
@@ -1832,13 +1839,13 @@ static int igt_vm_isolation(void *arg)
I915_GTT_PAGE_SIZE, vm_total,
sizeof(u32), alignof_dword);
- err = write_to_scratch(ctx_a, engine,
+ err = write_to_scratch(ctx_a, engine, obj_a,
offset, 0xdeadbeef);
if (err == 0)
- err = read_from_scratch(ctx_b, engine,
+ err = read_from_scratch(ctx_b, engine, obj_b,
offset, &value);
if (err)
- goto out_file;
+ goto put_b;
if (value != expected) {
pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
@@ -1847,7 +1854,7 @@ static int igt_vm_isolation(void *arg)
lower_32_bits(offset),
this);
err = -EINVAL;
- goto out_file;
+ goto put_b;
}
this++;
@@ -1858,6 +1865,10 @@ static int igt_vm_isolation(void *arg)
pr_info("Checked %lu scratch offsets across %lu engines\n",
count, num_engines);
+put_b:
+ i915_gem_object_put(obj_b);
+put_a:
+ i915_gem_object_put(obj_a);
out_file:
if (igt_live_test_end(&t))
err = -EIO;
@@ -1877,7 +1888,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_vm_isolation),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
return i915_live_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 4a6bb64c3a35..3cc74b0fed06 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -102,7 +102,7 @@ static int igt_dmabuf_import_same_driver_lmem(void *arg)
obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &lmem, 1);
if (IS_ERR(obj)) {
pr_err("__i915_gem_object_create_user failed with err=%ld\n",
- PTR_ERR(dmabuf));
+ PTR_ERR(obj));
err = PTR_ERR(obj);
goto out_ret;
}
@@ -158,7 +158,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
regions, num_regions);
if (IS_ERR(obj)) {
pr_err("__i915_gem_object_create_user failed with err=%ld\n",
- PTR_ERR(dmabuf));
+ PTR_ERR(obj));
err = PTR_ERR(obj);
goto out_ret;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
index 28a700f08b49..ecb691c81d1e 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
@@ -4,6 +4,7 @@
*/
#include "gt/intel_migrate.h"
+#include "gem/i915_gem_ttm_move.h"
static int igt_fill_check_buffer(struct drm_i915_gem_object *obj,
bool fill)
@@ -227,17 +228,38 @@ out_put:
return err;
}
+static int igt_lmem_pages_failsafe_migrate(void *arg)
+{
+ int fail_gpu, fail_alloc, ret;
+
+ for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) {
+ for (fail_alloc = 0; fail_alloc < 2; ++fail_alloc) {
+ pr_info("Simulated failure modes: gpu: %d, alloc: %d\n",
+ fail_gpu, fail_alloc);
+ i915_ttm_migrate_set_failure_modes(fail_gpu,
+ fail_alloc);
+ ret = igt_lmem_pages_migrate(arg);
+ if (ret)
+ goto out_err;
+ }
+ }
+
+out_err:
+ i915_ttm_migrate_set_failure_modes(false, false);
+ return ret;
+}
+
int i915_gem_migrate_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_smem_create_migrate),
SUBTEST(igt_lmem_create_migrate),
SUBTEST(igt_same_create_migrate),
- SUBTEST(igt_lmem_pages_migrate),
+ SUBTEST(igt_lmem_pages_failsafe_migrate),
};
if (!HAS_LMEM(i915))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 6d30cdfa80f3..c6291429b00c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -84,6 +84,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
struct rnd_state *prng)
{
const unsigned long npages = obj->base.size / PAGE_SIZE;
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt_view view;
struct i915_vma *vma;
unsigned long page;
@@ -141,7 +142,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
if (offset >= obj->base.size)
goto out;
- intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
+ intel_gt_flush_ggtt_writes(to_gt(i915));
p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
cpu = kmap(p) + offset_in_page(offset);
@@ -175,6 +176,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
{
const unsigned int nreal = obj->scratch / PAGE_SIZE;
const unsigned long npages = obj->base.size / PAGE_SIZE;
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_vma *vma;
unsigned long page;
int err;
@@ -234,7 +236,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
if (offset >= obj->base.size)
continue;
- intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
+ intel_gt_flush_ggtt_writes(to_gt(i915));
p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
cpu = kmap(p) + offset_in_page(offset);
@@ -616,14 +618,14 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
static void disable_retire_worker(struct drm_i915_private *i915)
{
i915_gem_driver_unregister__shrinker(i915);
- intel_gt_pm_get(&i915->gt);
- cancel_delayed_work_sync(&i915->gt.requests.retire_work);
+ intel_gt_pm_get(to_gt(i915));
+ cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
}
static void restore_retire_worker(struct drm_i915_private *i915)
{
igt_flush_test(i915);
- intel_gt_pm_put(&i915->gt);
+ intel_gt_pm_put(to_gt(i915));
i915_gem_driver_register__shrinker(i915);
}
@@ -651,8 +653,8 @@ static int igt_mmap_offset_exhaustion(void *arg)
/* Disable background reaper */
disable_retire_worker(i915);
- GEM_BUG_ON(!i915->gt.awake);
- intel_gt_retire_requests(&i915->gt);
+ GEM_BUG_ON(!to_gt(i915)->awake);
+ intel_gt_retire_requests(to_gt(i915));
i915_gem_drain_freed_objects(i915);
/* Trim the device mmap space to only a page */
@@ -728,7 +730,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
/* Now fill with busy dead objects that we expect to reap */
for (loop = 0; loop < 3; loop++) {
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
break;
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
@@ -942,7 +944,7 @@ static int __igt_mmap(struct drm_i915_private *i915,
}
if (type == I915_MMAP_TYPE_GTT)
- intel_gt_flush_ggtt_writes(&i915->gt);
+ intel_gt_flush_ggtt_writes(to_gt(i915));
err = wc_check(obj);
if (err == -ENXIO)
@@ -1049,7 +1051,7 @@ static int __igt_mmap_access(struct drm_i915_private *i915,
goto out_unmap;
}
- intel_gt_flush_ggtt_writes(&i915->gt);
+ intel_gt_flush_ggtt_writes(to_gt(i915));
err = access_process_vm(current, addr, &x, sizeof(x), 0);
if (err != sizeof(x)) {
@@ -1065,7 +1067,7 @@ static int __igt_mmap_access(struct drm_i915_private *i915,
goto out_unmap;
}
- intel_gt_flush_ggtt_writes(&i915->gt);
+ intel_gt_flush_ggtt_writes(to_gt(i915));
err = __get_user(y, ptr);
if (err) {
@@ -1165,7 +1167,7 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
}
if (type == I915_MMAP_TYPE_GTT)
- intel_gt_flush_ggtt_writes(&i915->gt);
+ intel_gt_flush_ggtt_writes(to_gt(i915));
for_each_uabi_engine(engine, i915) {
struct i915_request *rq;
@@ -1366,20 +1368,10 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
}
}
- if (!obj->ops->mmap_ops) {
- err = check_absent(addr, obj->base.size);
- if (err) {
- pr_err("%s: was not absent\n", obj->mm.region->name);
- goto out_unmap;
- }
- } else {
- /* ttm allows access to evicted regions by design */
-
- err = check_present(addr, obj->base.size);
- if (err) {
- pr_err("%s: was not present\n", obj->mm.region->name);
- goto out_unmap;
- }
+ err = check_absent(addr, obj->base.size);
+ if (err) {
+ pr_err("%s: was not absent\n", obj->mm.region->name);
+ goto out_unmap;
}
out_unmap:
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index 890191f286e3..6e9292918bfc 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -185,7 +185,6 @@ static void gen6_alloc_va_range(struct i915_address_space *vm,
pt = stash->pt[0];
__i915_gem_object_pin_pages(pt->base);
- i915_gem_object_make_unshrinkable(pt->base);
fill32_px(pt, vm->scratch[0]->encode);
@@ -262,30 +261,14 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- __i915_vma_put(ppgtt->vma);
-
gen6_ppgtt_free_pd(ppgtt);
free_scratch(vm);
mutex_destroy(&ppgtt->flush);
- mutex_destroy(&ppgtt->pin_mutex);
free_pd(&ppgtt->base.vm, ppgtt->base.pd);
}
-static int pd_vma_set_pages(struct i915_vma *vma)
-{
- vma->pages = ERR_PTR(-ENODEV);
- return 0;
-}
-
-static void pd_vma_clear_pages(struct i915_vma *vma)
-{
- GEM_BUG_ON(!vma->pages);
-
- vma->pages = NULL;
-}
-
static void pd_vma_bind(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma *vma,
@@ -325,43 +308,10 @@ static void pd_vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
}
static const struct i915_vma_ops pd_vma_ops = {
- .set_pages = pd_vma_set_pages,
- .clear_pages = pd_vma_clear_pages,
.bind_vma = pd_vma_bind,
.unbind_vma = pd_vma_unbind,
};
-static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
-{
- struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
- struct i915_vma *vma;
-
- GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
- GEM_BUG_ON(size > ggtt->vm.total);
-
- vma = i915_vma_alloc();
- if (!vma)
- return ERR_PTR(-ENOMEM);
-
- i915_active_init(&vma->active, NULL, NULL, 0);
-
- kref_init(&vma->ref);
- mutex_init(&vma->pages_mutex);
- vma->vm = i915_vm_get(&ggtt->vm);
- vma->ops = &pd_vma_ops;
- vma->private = ppgtt;
-
- vma->size = size;
- vma->fence_size = size;
- atomic_set(&vma->flags, I915_VMA_GGTT);
- vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
-
- INIT_LIST_HEAD(&vma->obj_link);
- INIT_LIST_HEAD(&vma->closed_link);
-
- return vma;
-}
-
int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
@@ -378,42 +328,92 @@ int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww)
if (atomic_add_unless(&ppgtt->pin_count, 1, 0))
return 0;
- if (mutex_lock_interruptible(&ppgtt->pin_mutex))
- return -EINTR;
+ /* grab the ppgtt resv to pin the object */
+ err = i915_vm_lock_objects(&ppgtt->base.vm, ww);
+ if (err)
+ return err;
/*
* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
* allocator works in address space sizes, so it's multiplied by page
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
- err = 0;
- if (!atomic_read(&ppgtt->pin_count))
+ if (!atomic_read(&ppgtt->pin_count)) {
err = i915_ggtt_pin(ppgtt->vma, ww, GEN6_PD_ALIGN, PIN_HIGH);
+
+ GEM_BUG_ON(ppgtt->vma->fence);
+ clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(ppgtt->vma));
+ }
if (!err)
atomic_inc(&ppgtt->pin_count);
- mutex_unlock(&ppgtt->pin_mutex);
return err;
}
-void gen6_ppgtt_unpin(struct i915_ppgtt *base)
+static int pd_dummy_obj_get_pages(struct drm_i915_gem_object *obj)
{
- struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ obj->mm.pages = ZERO_SIZE_PTR;
+ return 0;
+}
- GEM_BUG_ON(!atomic_read(&ppgtt->pin_count));
- if (atomic_dec_and_test(&ppgtt->pin_count))
- i915_vma_unpin(ppgtt->vma);
+static void pd_dummy_obj_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
}
-void gen6_ppgtt_unpin_all(struct i915_ppgtt *base)
+static const struct drm_i915_gem_object_ops pd_dummy_obj_ops = {
+ .name = "pd_dummy_obj",
+ .get_pages = pd_dummy_obj_get_pages,
+ .put_pages = pd_dummy_obj_put_pages,
+};
+
+static struct i915_page_directory *
+gen6_alloc_top_pd(struct gen6_ppgtt *ppgtt)
{
- struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ struct i915_ggtt * const ggtt = ppgtt->base.vm.gt->ggtt;
+ struct i915_page_directory *pd;
+ int err;
- if (!atomic_read(&ppgtt->pin_count))
- return;
+ pd = __alloc_pd(I915_PDES);
+ if (unlikely(!pd))
+ return ERR_PTR(-ENOMEM);
- i915_vma_unpin(ppgtt->vma);
- atomic_set(&ppgtt->pin_count, 0);
+ pd->pt.base = __i915_gem_object_create_internal(ppgtt->base.vm.gt->i915,
+ &pd_dummy_obj_ops,
+ I915_PDES * SZ_4K);
+ if (IS_ERR(pd->pt.base)) {
+ err = PTR_ERR(pd->pt.base);
+ pd->pt.base = NULL;
+ goto err_pd;
+ }
+
+ pd->pt.base->base.resv = i915_vm_resv_get(&ppgtt->base.vm);
+ pd->pt.base->shares_resv_from = &ppgtt->base.vm;
+
+ ppgtt->vma = i915_vma_instance(pd->pt.base, &ggtt->vm, NULL);
+ if (IS_ERR(ppgtt->vma)) {
+ err = PTR_ERR(ppgtt->vma);
+ ppgtt->vma = NULL;
+ goto err_pd;
+ }
+
+ /* The dummy object we create is special, override ops.. */
+ ppgtt->vma->ops = &pd_vma_ops;
+ ppgtt->vma->private = ppgtt;
+ return pd;
+
+err_pd:
+ free_pd(&ppgtt->base.vm, pd);
+ return ERR_PTR(err);
+}
+
+void gen6_ppgtt_unpin(struct i915_ppgtt *base)
+{
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
+
+ GEM_BUG_ON(!atomic_read(&ppgtt->pin_count));
+ if (atomic_dec_and_test(&ppgtt->pin_count))
+ i915_vma_unpin(ppgtt->vma);
}
struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
@@ -427,7 +427,6 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
return ERR_PTR(-ENOMEM);
mutex_init(&ppgtt->flush);
- mutex_init(&ppgtt->pin_mutex);
ppgtt_init(&ppgtt->base, gt, 0);
ppgtt->base.vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen6_pte_t));
@@ -440,21 +439,16 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.vm.alloc_pt_dma = alloc_pt_dma;
+ ppgtt->base.vm.alloc_scratch_dma = alloc_pt_dma;
ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
- ppgtt->base.pd = __alloc_pd(I915_PDES);
- if (!ppgtt->base.pd) {
- err = -ENOMEM;
- goto err_free;
- }
-
err = gen6_ppgtt_init_scratch(ppgtt);
if (err)
- goto err_pd;
+ goto err_free;
- ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
- if (IS_ERR(ppgtt->vma)) {
- err = PTR_ERR(ppgtt->vma);
+ ppgtt->base.pd = gen6_alloc_top_pd(ppgtt);
+ if (IS_ERR(ppgtt->base.pd)) {
+ err = PTR_ERR(ppgtt->base.pd);
goto err_scratch;
}
@@ -462,10 +456,7 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
err_scratch:
free_scratch(&ppgtt->base.vm);
-err_pd:
- free_pd(&ppgtt->base.vm, ppgtt->base.pd);
err_free:
- mutex_destroy(&ppgtt->pin_mutex);
kfree(ppgtt);
return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
index 6a61a5c3a85a..5e5cf2ec3309 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
@@ -19,7 +19,6 @@ struct gen6_ppgtt {
u32 pp_dir;
atomic_t pin_count;
- struct mutex pin_mutex;
bool scan_for_unused_pt;
};
@@ -71,7 +70,6 @@ static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base)
int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww);
void gen6_ppgtt_unpin(struct i915_ppgtt *base);
-void gen6_ppgtt_unpin_all(struct i915_ppgtt *base);
void gen6_ppgtt_enable(struct intel_gt *gt);
void gen7_ppgtt_enable(struct intel_gt *gt);
struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index 461844dffd7e..e320610dd0b8 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -42,7 +42,7 @@ int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
vf_flush_wa = true;
/* WaForGAMHang:kbl */
- if (IS_KBL_GT_STEP(rq->engine->i915, 0, STEP_C0))
+ if (IS_KBL_GRAPHICS_STEP(rq->engine->i915, 0, STEP_C0))
dc_flush_wa = true;
}
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 037a9a6e4889..b012c50f7ce7 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -18,7 +18,7 @@
static u64 gen8_pde_encode(const dma_addr_t addr,
const enum i915_cache_level level)
{
- u64 pde = addr | _PAGE_PRESENT | _PAGE_RW;
+ u64 pde = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
if (level != I915_CACHE_NONE)
pde |= PPAT_CACHED_PDE;
@@ -32,10 +32,10 @@ static u64 gen8_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags)
{
- gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
+ gen8_pte_t pte = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
if (unlikely(flags & PTE_READ_ONLY))
- pte &= ~_PAGE_RW;
+ pte &= ~GEN8_PAGE_RW;
if (flags & PTE_LM)
pte |= GEN12_PPGTT_PTE_LM;
@@ -301,7 +301,6 @@ static void __gen8_ppgtt_alloc(struct i915_address_space * const vm,
pt = stash->pt[!!lvl];
__i915_gem_object_pin_pages(pt->base);
- i915_gem_object_make_unshrinkable(pt->base);
fill_px(pt, vm->scratch[lvl]->encode);
@@ -652,7 +651,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
vm->scratch[0]->encode =
gen8_pte_encode(px_dma(vm->scratch[0]),
- I915_CACHE_LLC, pte_flags);
+ I915_CACHE_NONE, pte_flags);
for (i = 1; i <= vm->top; i++) {
struct drm_i915_gem_object *obj;
@@ -668,7 +667,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
}
fill_px(obj, vm->scratch[i - 1]->encode);
- obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_LLC);
+ obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_NONE);
vm->scratch[i] = obj;
}
@@ -777,10 +776,29 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
*/
ppgtt->vm.has_read_only = !IS_GRAPHICS_VER(gt->i915, 11, 12);
- if (HAS_LMEM(gt->i915))
+ if (HAS_LMEM(gt->i915)) {
ppgtt->vm.alloc_pt_dma = alloc_pt_lmem;
- else
+
+ /*
+ * On some platforms the hw has dropped support for 4K GTT pages
+ * when dealing with LMEM, and due to the design of 64K GTT
+ * pages in the hw, we can only mark the *entire* page-table as
+ * operating in 64K GTT mode, since the enable bit is still on
+ * the pde, and not the pte. And since we still need to allow
+ * 4K GTT pages for SMEM objects, we can't have a "normal" 4K
+ * page-table with scratch pointing to LMEM, since that's
+ * undefined from the hw pov. The simplest solution is to just
+ * move the 64K scratch page to SMEM on such platforms and call
+ * it a day, since that should work for all configurations.
+ */
+ if (HAS_64K_PAGES(gt->i915))
+ ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
+ else
+ ppgtt->vm.alloc_scratch_dma = alloc_pt_lmem;
+ } else {
ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
+ }
err = gen8_init_scratch(&ppgtt->vm);
if (err)
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 5634d14052bc..ba083d800a08 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -219,7 +219,7 @@ int __intel_context_do_pin_ww(struct intel_context *ce,
*/
err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww);
- if (!err && ce->ring->vma->obj)
+ if (!err)
err = i915_gem_object_lock(ce->ring->vma->obj, ww);
if (!err && ce->state)
err = i915_gem_object_lock(ce->state->obj, ww);
@@ -228,17 +228,17 @@ int __intel_context_do_pin_ww(struct intel_context *ce,
if (err)
return err;
- err = i915_active_acquire(&ce->active);
+ err = ce->ops->pre_pin(ce, ww, &vaddr);
if (err)
goto err_ctx_unpin;
- err = ce->ops->pre_pin(ce, ww, &vaddr);
+ err = i915_active_acquire(&ce->active);
if (err)
- goto err_release;
+ goto err_post_unpin;
err = mutex_lock_interruptible(&ce->pin_mutex);
if (err)
- goto err_post_unpin;
+ goto err_release;
intel_engine_pm_might_get(ce->engine);
@@ -273,11 +273,11 @@ int __intel_context_do_pin_ww(struct intel_context *ce,
err_unlock:
mutex_unlock(&ce->pin_mutex);
+err_release:
+ i915_active_release(&ce->active);
err_post_unpin:
if (!handoff)
ce->ops->post_unpin(ce);
-err_release:
- i915_active_release(&ce->active);
err_ctx_unpin:
intel_context_post_unpin(ce);
@@ -364,7 +364,7 @@ static int __intel_context_active(struct i915_active *active)
return 0;
}
-static int __i915_sw_fence_call
+static int
sw_fence_dummy_notify(struct i915_sw_fence *sf,
enum i915_sw_fence_notify state)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 246c37d72cd7..d8c74bbf9aae 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -211,7 +211,8 @@ static inline void intel_context_enter(struct intel_context *ce)
static inline void intel_context_mark_active(struct intel_context *ce)
{
- lockdep_assert_held(&ce->timeline->mutex);
+ lockdep_assert(lockdep_is_held(&ce->timeline->mutex) ||
+ test_bit(CONTEXT_IS_PARKING, &ce->flags));
++ce->active_count;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 9e0177dc5484..30cd81ad8911 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -118,6 +118,7 @@ struct intel_context {
#define CONTEXT_LRCA_DIRTY 9
#define CONTEXT_GUC_INIT 10
#define CONTEXT_PERMA_PIN 11
+#define CONTEXT_IS_PARKING 12
struct {
u64 timeout_us;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index ff6753ccb129..352254e001b4 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -325,6 +325,38 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
engine->id = id;
engine->legacy_idx = INVALID_ENGINE;
engine->mask = BIT(id);
+ if (GRAPHICS_VER(gt->i915) >= 11) {
+ static const u32 engine_reset_domains[] = {
+ [RCS0] = GEN11_GRDOM_RENDER,
+ [BCS0] = GEN11_GRDOM_BLT,
+ [VCS0] = GEN11_GRDOM_MEDIA,
+ [VCS1] = GEN11_GRDOM_MEDIA2,
+ [VCS2] = GEN11_GRDOM_MEDIA3,
+ [VCS3] = GEN11_GRDOM_MEDIA4,
+ [VCS4] = GEN11_GRDOM_MEDIA5,
+ [VCS5] = GEN11_GRDOM_MEDIA6,
+ [VCS6] = GEN11_GRDOM_MEDIA7,
+ [VCS7] = GEN11_GRDOM_MEDIA8,
+ [VECS0] = GEN11_GRDOM_VECS,
+ [VECS1] = GEN11_GRDOM_VECS2,
+ [VECS2] = GEN11_GRDOM_VECS3,
+ [VECS3] = GEN11_GRDOM_VECS4,
+ };
+ GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) ||
+ !engine_reset_domains[id]);
+ engine->reset_domain = engine_reset_domains[id];
+ } else {
+ static const u32 engine_reset_domains[] = {
+ [RCS0] = GEN6_GRDOM_RENDER,
+ [BCS0] = GEN6_GRDOM_BLT,
+ [VCS0] = GEN6_GRDOM_MEDIA,
+ [VCS1] = GEN8_GRDOM_MEDIA2,
+ [VECS0] = GEN6_GRDOM_VECS,
+ };
+ GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) ||
+ !engine_reset_domains[id]);
+ engine->reset_domain = engine_reset_domains[id];
+ }
engine->i915 = i915;
engine->gt = gt;
engine->uncore = gt->uncore;
@@ -363,7 +395,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
DRIVER_CAPS(i915)->has_logical_contexts = true;
ewma__engine_latency_init(&engine->latency);
- seqcount_init(&engine->stats.lock);
+ seqcount_init(&engine->stats.execlists.lock);
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
@@ -1676,14 +1708,18 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
{
+ struct i915_vma_snapshot *vsnap = &rq->batch_snapshot;
void *ring;
int size;
+ if (!i915_vma_snapshot_present(vsnap))
+ vsnap = NULL;
+
drm_printf(m,
"[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
rq->head, rq->postfix, rq->tail,
- rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
- rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+ vsnap ? upper_32_bits(vsnap->gtt_offset) : ~0u,
+ vsnap ? lower_32_bits(vsnap->gtt_offset) : ~0u);
size = rq->tail - rq->head;
if (rq->tail < rq->head)
@@ -1915,22 +1951,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
intel_engine_print_breadcrumbs(engine, m);
}
-static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
- ktime_t *now)
-{
- ktime_t total = engine->stats.total;
-
- /*
- * If the engine is executing something at the moment
- * add it to the total.
- */
- *now = ktime_get();
- if (READ_ONCE(engine->stats.active))
- total = ktime_add(total, ktime_sub(*now, engine->stats.start));
-
- return total;
-}
-
/**
* intel_engine_get_busy_time() - Return current accumulated engine busyness
* @engine: engine to report on
@@ -1940,15 +1960,7 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
*/
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
{
- unsigned int seq;
- ktime_t total;
-
- do {
- seq = read_seqcount_begin(&engine->stats.lock);
- total = __intel_engine_get_busy_time(engine, now);
- } while (read_seqcount_retry(&engine->stats.lock, seq));
-
- return total;
+ return engine->busyness(engine, now);
}
struct intel_context *
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index a1334b48dde7..b0a4a2dbe3ee 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -26,7 +26,7 @@ static void dbg_poison_ce(struct intel_context *ce)
int type = i915_coherent_map_type(ce->engine->i915, obj, true);
void *map;
- if (!i915_gem_object_trylock(obj))
+ if (!i915_gem_object_trylock(obj, NULL))
return;
map = i915_gem_object_pin_map(obj, type);
@@ -80,39 +80,6 @@ static int __engine_unpark(struct intel_wakeref *wf)
return 0;
}
-#if IS_ENABLED(CONFIG_LOCKDEP)
-
-static unsigned long __timeline_mark_lock(struct intel_context *ce)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
-
- return flags;
-}
-
-static void __timeline_mark_unlock(struct intel_context *ce,
- unsigned long flags)
-{
- mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_);
- local_irq_restore(flags);
-}
-
-#else
-
-static unsigned long __timeline_mark_lock(struct intel_context *ce)
-{
- return 0;
-}
-
-static void __timeline_mark_unlock(struct intel_context *ce,
- unsigned long flags)
-{
-}
-
-#endif /* !IS_ENABLED(CONFIG_LOCKDEP) */
-
static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct i915_request *rq = to_request(fence);
@@ -159,7 +126,6 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
{
struct intel_context *ce = engine->kernel_context;
struct i915_request *rq;
- unsigned long flags;
bool result = true;
/*
@@ -214,7 +180,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
* engine->wakeref.count, we may see the request completion and retire
* it causing an underflow of the engine->wakeref.
*/
- flags = __timeline_mark_lock(ce);
+ set_bit(CONTEXT_IS_PARKING, &ce->flags);
GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
rq = __i915_request_create(ce, GFP_NOWAIT);
@@ -246,7 +212,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
result = false;
out_unlock:
- __timeline_mark_unlock(ce, flags);
+ clear_bit(CONTEXT_IS_PARKING, &ce->flags);
return result;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_stats.h b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
index 24fbdd94351a..8e762d683e50 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_stats.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
@@ -15,45 +15,46 @@
static inline void intel_engine_context_in(struct intel_engine_cs *engine)
{
+ struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
unsigned long flags;
- if (engine->stats.active) {
- engine->stats.active++;
+ if (stats->active) {
+ stats->active++;
return;
}
/* The writer is serialised; but the pmu reader may be from hardirq */
local_irq_save(flags);
- write_seqcount_begin(&engine->stats.lock);
+ write_seqcount_begin(&stats->lock);
- engine->stats.start = ktime_get();
- engine->stats.active++;
+ stats->start = ktime_get();
+ stats->active++;
- write_seqcount_end(&engine->stats.lock);
+ write_seqcount_end(&stats->lock);
local_irq_restore(flags);
- GEM_BUG_ON(!engine->stats.active);
+ GEM_BUG_ON(!stats->active);
}
static inline void intel_engine_context_out(struct intel_engine_cs *engine)
{
+ struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
unsigned long flags;
- GEM_BUG_ON(!engine->stats.active);
- if (engine->stats.active > 1) {
- engine->stats.active--;
+ GEM_BUG_ON(!stats->active);
+ if (stats->active > 1) {
+ stats->active--;
return;
}
local_irq_save(flags);
- write_seqcount_begin(&engine->stats.lock);
+ write_seqcount_begin(&stats->lock);
- engine->stats.active--;
- engine->stats.total =
- ktime_add(engine->stats.total,
- ktime_sub(ktime_get(), engine->stats.start));
+ stats->active--;
+ stats->total = ktime_add(stats->total,
+ ktime_sub(ktime_get(), stats->start));
- write_seqcount_end(&engine->stats.lock);
+ write_seqcount_end(&stats->lock);
local_irq_restore(flags);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index e0f773585c29..36365bdbe1ee 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -257,6 +257,55 @@ struct intel_engine_execlists {
#define INTEL_ENGINE_CS_MAX_NAME 8
+struct intel_engine_execlists_stats {
+ /**
+ * @active: Number of contexts currently scheduled in.
+ */
+ unsigned int active;
+
+ /**
+ * @lock: Lock protecting the below fields.
+ */
+ seqcount_t lock;
+
+ /**
+ * @total: Total time this engine was busy.
+ *
+ * Accumulated time not counting the most recent block in cases where
+ * engine is currently busy (active > 0).
+ */
+ ktime_t total;
+
+ /**
+ * @start: Timestamp of the last idle to active transition.
+ *
+ * Idle is defined as active == 0, active is active > 0.
+ */
+ ktime_t start;
+};
+
+struct intel_engine_guc_stats {
+ /**
+ * @running: Active state of the engine when busyness was last sampled.
+ */
+ bool running;
+
+ /**
+ * @prev_total: Previous value of total runtime clock cycles.
+ */
+ u32 prev_total;
+
+ /**
+ * @total_gt_clks: Total gt clock cycles this engine was busy.
+ */
+ u64 total_gt_clks;
+
+ /**
+ * @start_gt_clk: GT clock time of last idle to active transition.
+ */
+ u64 start_gt_clk;
+};
+
struct intel_engine_cs {
struct drm_i915_private *i915;
struct intel_gt *gt;
@@ -269,6 +318,7 @@ struct intel_engine_cs {
unsigned int guc_id;
intel_engine_mask_t mask;
+ u32 reset_domain;
/**
* @logical_mask: logical mask of engine, reported to user space via
* query IOCTL and used to communicate with the GuC in logical space.
@@ -439,6 +489,12 @@ struct intel_engine_cs {
void (*add_active_request)(struct i915_request *rq);
void (*remove_active_request)(struct i915_request *rq);
+ /*
+ * Get engine busyness and the time at which the busyness was sampled.
+ */
+ ktime_t (*busyness)(struct intel_engine_cs *engine,
+ ktime_t *now);
+
struct intel_engine_execlists execlists;
/*
@@ -488,30 +544,10 @@ struct intel_engine_cs {
u32 (*get_cmd_length_mask)(u32 cmd_header);
struct {
- /**
- * @active: Number of contexts currently scheduled in.
- */
- unsigned int active;
-
- /**
- * @lock: Lock protecting the below fields.
- */
- seqcount_t lock;
-
- /**
- * @total: Total time this engine was busy.
- *
- * Accumulated time not counting the most recent block in cases
- * where engine is currently busy (active > 0).
- */
- ktime_t total;
-
- /**
- * @start: Timestamp of the last idle to active transition.
- *
- * Idle is defined as active == 0, active is active > 0.
- */
- ktime_t start;
+ union {
+ struct intel_engine_execlists_stats execlists;
+ struct intel_engine_guc_stats guc;
+ };
/**
* @rps: Utilisation at last RPS sampling.
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index 8f8bea08e734..9ce85a845105 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -116,7 +116,7 @@ static void set_scheduler_caps(struct drm_i915_private *i915)
disabled |= (I915_SCHEDULER_CAP_ENABLED |
I915_SCHEDULER_CAP_PRIORITY);
- if (intel_uc_uses_guc_submission(&i915->gt.uc))
+ if (intel_uc_uses_guc_submission(&to_gt(i915)->uc))
enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP;
for (i = 0; i < ARRAY_SIZE(map); i++) {
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index bedb80057046..a69df5e9e77a 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -2186,7 +2186,8 @@ struct execlists_capture {
static void execlists_capture_work(struct work_struct *work)
{
struct execlists_capture *cap = container_of(work, typeof(*cap), work);
- const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+ const gfp_t gfp = __GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL |
+ __GFP_NOWARN;
struct intel_engine_cs *engine = cap->rq->engine;
struct intel_gt_coredump *gt = cap->error->gt;
struct intel_engine_capture_vma *vma;
@@ -3293,6 +3294,38 @@ static void execlists_release(struct intel_engine_cs *engine)
lrc_fini_wa_ctx(engine);
}
+static ktime_t __execlists_engine_busyness(struct intel_engine_cs *engine,
+ ktime_t *now)
+{
+ struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
+ ktime_t total = stats->total;
+
+ /*
+ * If the engine is executing something at the moment
+ * add it to the total.
+ */
+ *now = ktime_get();
+ if (READ_ONCE(stats->active))
+ total = ktime_add(total, ktime_sub(*now, stats->start));
+
+ return total;
+}
+
+static ktime_t execlists_engine_busyness(struct intel_engine_cs *engine,
+ ktime_t *now)
+{
+ struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
+ unsigned int seq;
+ ktime_t total;
+
+ do {
+ seq = read_seqcount_begin(&stats->lock);
+ total = __execlists_engine_busyness(engine, now);
+ } while (read_seqcount_retry(&stats->lock, seq));
+
+ return total;
+}
+
static void
logical_ring_default_vfuncs(struct intel_engine_cs *engine)
{
@@ -3349,6 +3382,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->emit_bb_start = gen8_emit_bb_start;
else
engine->emit_bb_start = gen8_emit_bb_start_noarb;
+
+ engine->busyness = execlists_engine_busyness;
}
static void logical_ring_default_irqs(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 57c97554393b..5263dda7f8d5 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -3,12 +3,14 @@
* Copyright © 2020 Intel Corporation
*/
+#include <linux/agp_backend.h>
#include <linux/stop_machine.h>
#include <asm/set_memory.h>
#include <asm/smp.h>
#include <drm/i915_drm.h>
+#include <drm/intel-gtt.h>
#include "gem/i915_gem_lmem.h"
@@ -20,9 +22,6 @@
#include "intel_gtt.h"
#include "gen8_ppgtt.h"
-static int
-i915_get_ggtt_vma_pages(struct i915_vma *vma);
-
static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
unsigned long color,
u64 *start,
@@ -104,7 +103,7 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
- if (!intel_vtd_active())
+ if (!intel_vtd_active(i915))
return false;
if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
@@ -116,17 +115,26 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
return false;
}
-void i915_ggtt_suspend(struct i915_ggtt *ggtt)
+/**
+ * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
+ * @vm: The VM to suspend the mappings for
+ *
+ * Suspend the memory mappings for all objects mapped to HW via the GGTT or a
+ * DPT page table.
+ */
+void i915_ggtt_suspend_vm(struct i915_address_space *vm)
{
struct i915_vma *vma, *vn;
int open;
- mutex_lock(&ggtt->vm.mutex);
+ drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
+
+ mutex_lock(&vm->mutex);
/* Skip rewriting PTE on VMA unbind. */
- open = atomic_xchg(&ggtt->vm.open, 0);
+ open = atomic_xchg(&vm->open, 0);
- list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
+ list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
i915_vma_wait_for_bind(vma);
@@ -139,11 +147,17 @@ void i915_ggtt_suspend(struct i915_ggtt *ggtt)
}
}
- ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
- ggtt->invalidate(ggtt);
- atomic_set(&ggtt->vm.open, open);
+ vm->clear_range(vm, 0, vm->total);
- mutex_unlock(&ggtt->vm.mutex);
+ atomic_set(&vm->open, open);
+
+ mutex_unlock(&vm->mutex);
+}
+
+void i915_ggtt_suspend(struct i915_ggtt *ggtt)
+{
+ i915_ggtt_suspend_vm(&ggtt->vm);
+ ggtt->invalidate(ggtt);
intel_gt_check_and_clear_faults(ggtt->vm.gt);
}
@@ -192,7 +206,7 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags)
{
- gen8_pte_t pte = addr | _PAGE_PRESENT;
+ gen8_pte_t pte = addr | GEN8_PAGE_PRESENT;
if (flags & PTE_LM)
pte |= GEN12_GGTT_PTE_LM;
@@ -875,21 +889,6 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return 0;
}
-int ggtt_set_pages(struct i915_vma *vma)
-{
- int ret;
-
- GEM_BUG_ON(vma->pages);
-
- ret = i915_get_ggtt_vma_pages(vma);
- if (ret)
- return ret;
-
- vma->page_sizes = vma->obj->mm.page_sizes;
-
- return 0;
-}
-
static void gen6_gmch_remove(struct i915_address_space *vm)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
@@ -924,6 +923,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
size = gen8_get_total_gtt_size(snb_gmch_ctl);
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
@@ -950,8 +950,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
@@ -1077,6 +1075,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
ggtt->vm.clear_range = nop_clear_range;
if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
@@ -1100,8 +1099,6 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
return ggtt_probe_common(ggtt, size);
}
@@ -1129,6 +1126,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
(struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
if (needs_idle_maps(i915)) {
drm_notice(&i915->drm,
@@ -1145,8 +1143,6 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
if (unlikely(ggtt->do_idle_maps))
drm_notice(&i915->drm,
@@ -1212,11 +1208,11 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
{
int ret;
- ret = ggtt_probe_hw(&i915->ggtt, &i915->gt);
+ ret = ggtt_probe_hw(&i915->ggtt, to_gt(i915));
if (ret)
return ret;
- if (intel_vtd_active())
+ if (intel_vtd_active(i915))
drm_info(&i915->drm, "VT-d active for gfx access\n");
return 0;
@@ -1253,368 +1249,66 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
ggtt->invalidate(ggtt);
}
-void i915_ggtt_resume(struct i915_ggtt *ggtt)
+/**
+ * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
+ * @vm: The VM to restore the mappings for
+ *
+ * Restore the memory mappings for all objects mapped to HW via the GGTT or a
+ * DPT page table.
+ *
+ * Returns %true if restoring the mapping for any object that was in a write
+ * domain before suspend.
+ */
+bool i915_ggtt_resume_vm(struct i915_address_space *vm)
{
struct i915_vma *vma;
- bool flush = false;
+ bool write_domain_objs = false;
int open;
- intel_gt_check_and_clear_faults(ggtt->vm.gt);
+ drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
/* First fill our portion of the GTT with scratch pages */
- ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
+ vm->clear_range(vm, 0, vm->total);
/* Skip rewriting PTE on VMA unbind. */
- open = atomic_xchg(&ggtt->vm.open, 0);
+ open = atomic_xchg(&vm->open, 0);
/* clflush objects bound into the GGTT and rebind them. */
- list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
+ list_for_each_entry(vma, &vm->bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
unsigned int was_bound =
atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
GEM_BUG_ON(!was_bound);
- vma->ops->bind_vma(&ggtt->vm, NULL, vma,
+ vma->ops->bind_vma(vm, NULL, vma,
obj ? obj->cache_level : 0,
was_bound);
if (obj) { /* only used during resume => exclusive access */
- flush |= fetch_and_zero(&obj->write_domain);
+ write_domain_objs |= fetch_and_zero(&obj->write_domain);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
}
}
- atomic_set(&ggtt->vm.open, open);
- ggtt->invalidate(ggtt);
-
- if (flush)
- wbinvd_on_all_cpus();
-
- if (GRAPHICS_VER(ggtt->vm.i915) >= 8)
- setup_private_pat(ggtt->vm.gt->uncore);
-
- intel_ggtt_restore_fences(ggtt);
-}
-
-static struct scatterlist *
-rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
- unsigned int width, unsigned int height,
- unsigned int src_stride, unsigned int dst_stride,
- struct sg_table *st, struct scatterlist *sg)
-{
- unsigned int column, row;
- unsigned int src_idx;
-
- for (column = 0; column < width; column++) {
- unsigned int left;
-
- src_idx = src_stride * (height - 1) + column + offset;
- for (row = 0; row < height; row++) {
- st->nents++;
- /*
- * We don't need the pages, but need to initialize
- * the entries so the sg list can be happily traversed.
- * The only thing we need are DMA addresses.
- */
- sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
- sg_dma_address(sg) =
- i915_gem_object_get_dma_address(obj, src_idx);
- sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
- sg = sg_next(sg);
- src_idx -= src_stride;
- }
-
- left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
-
- if (!left)
- continue;
-
- st->nents++;
-
- /*
- * The DE ignores the PTEs for the padding tiles, the sg entry
- * here is just a conenience to indicate how many padding PTEs
- * to insert at this spot.
- */
- sg_set_page(sg, NULL, left, 0);
- sg_dma_address(sg) = 0;
- sg_dma_len(sg) = left;
- sg = sg_next(sg);
- }
-
- return sg;
-}
-
-static noinline struct sg_table *
-intel_rotate_pages(struct intel_rotation_info *rot_info,
- struct drm_i915_gem_object *obj)
-{
- unsigned int size = intel_rotation_info_size(rot_info);
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct sg_table *st;
- struct scatterlist *sg;
- int ret = -ENOMEM;
- int i;
-
- /* Allocate target SG list. */
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
-
- ret = sg_alloc_table(st, size, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
-
- st->nents = 0;
- sg = st->sgl;
-
- for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
- sg = rotate_pages(obj, rot_info->plane[i].offset,
- rot_info->plane[i].width, rot_info->plane[i].height,
- rot_info->plane[i].src_stride,
- rot_info->plane[i].dst_stride,
- st, sg);
-
- return st;
-
-err_sg_alloc:
- kfree(st);
-err_st_alloc:
-
- drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rot_info->plane[0].width,
- rot_info->plane[0].height, size);
-
- return ERR_PTR(ret);
-}
-
-static struct scatterlist *
-remap_pages(struct drm_i915_gem_object *obj,
- unsigned int offset, unsigned int alignment_pad,
- unsigned int width, unsigned int height,
- unsigned int src_stride, unsigned int dst_stride,
- struct sg_table *st, struct scatterlist *sg)
-{
- unsigned int row;
-
- if (!width || !height)
- return sg;
-
- if (alignment_pad) {
- st->nents++;
-
- /*
- * The DE ignores the PTEs for the padding tiles, the sg entry
- * here is just a convenience to indicate how many padding PTEs
- * to insert at this spot.
- */
- sg_set_page(sg, NULL, alignment_pad * 4096, 0);
- sg_dma_address(sg) = 0;
- sg_dma_len(sg) = alignment_pad * 4096;
- sg = sg_next(sg);
- }
-
- for (row = 0; row < height; row++) {
- unsigned int left = width * I915_GTT_PAGE_SIZE;
-
- while (left) {
- dma_addr_t addr;
- unsigned int length;
-
- /*
- * We don't need the pages, but need to initialize
- * the entries so the sg list can be happily traversed.
- * The only thing we need are DMA addresses.
- */
-
- addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
-
- length = min(left, length);
-
- st->nents++;
-
- sg_set_page(sg, NULL, length, 0);
- sg_dma_address(sg) = addr;
- sg_dma_len(sg) = length;
- sg = sg_next(sg);
-
- offset += length / I915_GTT_PAGE_SIZE;
- left -= length;
- }
-
- offset += src_stride - width;
-
- left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
-
- if (!left)
- continue;
-
- st->nents++;
-
- /*
- * The DE ignores the PTEs for the padding tiles, the sg entry
- * here is just a conenience to indicate how many padding PTEs
- * to insert at this spot.
- */
- sg_set_page(sg, NULL, left, 0);
- sg_dma_address(sg) = 0;
- sg_dma_len(sg) = left;
- sg = sg_next(sg);
- }
-
- return sg;
-}
-
-static noinline struct sg_table *
-intel_remap_pages(struct intel_remapped_info *rem_info,
- struct drm_i915_gem_object *obj)
-{
- unsigned int size = intel_remapped_info_size(rem_info);
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct sg_table *st;
- struct scatterlist *sg;
- unsigned int gtt_offset = 0;
- int ret = -ENOMEM;
- int i;
-
- /* Allocate target SG list. */
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
-
- ret = sg_alloc_table(st, size, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
-
- st->nents = 0;
- sg = st->sgl;
-
- for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
- unsigned int alignment_pad = 0;
-
- if (rem_info->plane_alignment)
- alignment_pad = ALIGN(gtt_offset, rem_info->plane_alignment) - gtt_offset;
-
- sg = remap_pages(obj,
- rem_info->plane[i].offset, alignment_pad,
- rem_info->plane[i].width, rem_info->plane[i].height,
- rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride,
- st, sg);
-
- gtt_offset += alignment_pad +
- rem_info->plane[i].dst_stride * rem_info->plane[i].height;
- }
-
- i915_sg_trim(st);
-
- return st;
-
-err_sg_alloc:
- kfree(st);
-err_st_alloc:
+ atomic_set(&vm->open, open);
- drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rem_info->plane[0].width,
- rem_info->plane[0].height, size);
-
- return ERR_PTR(ret);
+ return write_domain_objs;
}
-static noinline struct sg_table *
-intel_partial_pages(const struct i915_ggtt_view *view,
- struct drm_i915_gem_object *obj)
-{
- struct sg_table *st;
- struct scatterlist *sg, *iter;
- unsigned int count = view->partial.size;
- unsigned int offset;
- int ret = -ENOMEM;
-
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
-
- ret = sg_alloc_table(st, count, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
-
- iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset);
- GEM_BUG_ON(!iter);
-
- sg = st->sgl;
- st->nents = 0;
- do {
- unsigned int len;
-
- len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
- count << PAGE_SHIFT);
- sg_set_page(sg, NULL, len, 0);
- sg_dma_address(sg) =
- sg_dma_address(iter) + (offset << PAGE_SHIFT);
- sg_dma_len(sg) = len;
-
- st->nents++;
- count -= len >> PAGE_SHIFT;
- if (count == 0) {
- sg_mark_end(sg);
- i915_sg_trim(st); /* Drop any unused tail entries. */
-
- return st;
- }
-
- sg = __sg_next(sg);
- iter = __sg_next(iter);
- offset = 0;
- } while (1);
-
-err_sg_alloc:
- kfree(st);
-err_st_alloc:
- return ERR_PTR(ret);
-}
-
-static int
-i915_get_ggtt_vma_pages(struct i915_vma *vma)
+void i915_ggtt_resume(struct i915_ggtt *ggtt)
{
- int ret;
+ bool flush;
- /*
- * The vma->pages are only valid within the lifespan of the borrowed
- * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
- * must be the vma->pages. A simple rule is that vma->pages must only
- * be accessed when the obj->mm.pages are pinned.
- */
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
+ intel_gt_check_and_clear_faults(ggtt->vm.gt);
- switch (vma->ggtt_view.type) {
- default:
- GEM_BUG_ON(vma->ggtt_view.type);
- fallthrough;
- case I915_GGTT_VIEW_NORMAL:
- vma->pages = vma->obj->mm.pages;
- return 0;
+ flush = i915_ggtt_resume_vm(&ggtt->vm);
- case I915_GGTT_VIEW_ROTATED:
- vma->pages =
- intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
- break;
+ ggtt->invalidate(ggtt);
- case I915_GGTT_VIEW_REMAPPED:
- vma->pages =
- intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
- break;
+ if (flush)
+ wbinvd_on_all_cpus();
- case I915_GGTT_VIEW_PARTIAL:
- vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
- break;
- }
+ if (GRAPHICS_VER(ggtt->vm.i915) >= 8)
+ setup_private_pat(ggtt->vm.gt->uncore);
- ret = 0;
- if (IS_ERR(vma->pages)) {
- ret = PTR_ERR(vma->pages);
- vma->pages = NULL;
- drm_err(&vma->vm->i915->drm,
- "Failed to get pages for VMA view type %u (%d)!\n",
- vma->ggtt_view.type, ret);
- }
- return ret;
+ intel_ggtt_restore_fences(ggtt);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 1cb1948ac959..35d0fcd3a86c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -3,6 +3,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include <drm/intel-gtt.h>
+
#include "intel_gt_debugfs.h"
#include "gem/i915_gem_lmem.h"
@@ -23,13 +25,12 @@
#include "shmem_utils.h"
#include "pxp/intel_pxp.h"
-void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
+void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
{
- gt->i915 = i915;
- gt->uncore = &i915->uncore;
-
spin_lock_init(&gt->irq_lock);
+ mutex_init(&gt->tlb_invalidate_lock);
+
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
@@ -46,6 +47,12 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
intel_rps_init_early(&gt->rps);
}
+void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
+{
+ gt->i915 = i915;
+ gt->uncore = &i915->uncore;
+}
+
int intel_gt_probe_lmem(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
@@ -907,3 +914,109 @@ void intel_gt_info_print(const struct intel_gt_info *info,
intel_sseu_dump(&info->sseu, p);
}
+
+struct reg_and_bit {
+ i915_reg_t reg;
+ u32 bit;
+};
+
+static struct reg_and_bit
+get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
+ const i915_reg_t *regs, const unsigned int num)
+{
+ const unsigned int class = engine->class;
+ struct reg_and_bit rb = { };
+
+ if (drm_WARN_ON_ONCE(&engine->i915->drm,
+ class >= num || !regs[class].reg))
+ return rb;
+
+ rb.reg = regs[class];
+ if (gen8 && class == VIDEO_DECODE_CLASS)
+ rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
+ else
+ rb.bit = engine->instance;
+
+ rb.bit = BIT(rb.bit);
+
+ return rb;
+}
+
+void intel_gt_invalidate_tlbs(struct intel_gt *gt)
+{
+ static const i915_reg_t gen8_regs[] = {
+ [RENDER_CLASS] = GEN8_RTCR,
+ [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */
+ [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR,
+ [COPY_ENGINE_CLASS] = GEN8_BTCR,
+ };
+ static const i915_reg_t gen12_regs[] = {
+ [RENDER_CLASS] = GEN12_GFX_TLB_INV_CR,
+ [VIDEO_DECODE_CLASS] = GEN12_VD_TLB_INV_CR,
+ [VIDEO_ENHANCEMENT_CLASS] = GEN12_VE_TLB_INV_CR,
+ [COPY_ENGINE_CLASS] = GEN12_BLT_TLB_INV_CR,
+ };
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ const i915_reg_t *regs;
+ unsigned int num = 0;
+
+ if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
+ return;
+
+ if (GRAPHICS_VER(i915) == 12) {
+ regs = gen12_regs;
+ num = ARRAY_SIZE(gen12_regs);
+ } else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
+ regs = gen8_regs;
+ num = ARRAY_SIZE(gen8_regs);
+ } else if (GRAPHICS_VER(i915) < 8) {
+ return;
+ }
+
+ if (drm_WARN_ONCE(&i915->drm, !num,
+ "Platform does not implement TLB invalidation!"))
+ return;
+
+ GEM_TRACE("\n");
+
+ assert_rpm_wakelock_held(&i915->runtime_pm);
+
+ mutex_lock(&gt->tlb_invalidate_lock);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ for_each_engine(engine, gt, id) {
+ /*
+ * HW architecture suggest typical invalidation time at 40us,
+ * with pessimistic cases up to 100us and a recommendation to
+ * cap at 1ms. We go a bit higher just in case.
+ */
+ const unsigned int timeout_us = 100;
+ const unsigned int timeout_ms = 4;
+ struct reg_and_bit rb;
+
+ rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
+ if (!i915_mmio_reg_offset(rb.reg))
+ continue;
+
+ intel_uncore_write_fw(uncore, rb.reg, rb.bit);
+ if (__intel_wait_for_register_fw(uncore,
+ rb.reg, rb.bit, 0,
+ timeout_us, timeout_ms,
+ NULL))
+ drm_err_ratelimited(&gt->i915->drm,
+ "%s TLB invalidation did not complete in %ums!\n",
+ engine->name, timeout_ms);
+ }
+
+ /*
+ * Use delayed put since a) we mostly expect a flurry of TLB
+ * invalidations so it is good to avoid paying the forcewake cost and
+ * b) it works around a bug in Icelake which cannot cope with too rapid
+ * transitions.
+ */
+ intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
+ mutex_unlock(&gt->tlb_invalidate_lock);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 74e771871a9b..a913fb6ffec3 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -35,6 +35,7 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
}
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
+void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt);
int intel_gt_probe_lmem(struct intel_gt *gt);
int intel_gt_init_mmio(struct intel_gt *gt);
@@ -90,4 +91,6 @@ void intel_gt_info_print(const struct intel_gt_info *info,
void intel_gt_watchdog_work(struct work_struct *work);
+void intel_gt_invalidate_tlbs(struct intel_gt *gt);
+
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index acc49c56a9f3..9db3dcbd917f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -9,11 +9,6 @@
#include "intel_engine_pm.h"
#include "intel_gt_buffer_pool.h"
-static struct intel_gt *to_gt(struct intel_gt_buffer_pool *pool)
-{
- return container_of(pool, struct intel_gt, buffer_pool);
-}
-
static struct list_head *
bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz)
{
@@ -141,7 +136,7 @@ static struct intel_gt_buffer_pool_node *
node_create(struct intel_gt_buffer_pool *pool, size_t sz,
enum i915_map_type type)
{
- struct intel_gt *gt = to_gt(pool);
+ struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool);
struct intel_gt_buffer_pool_node *node;
struct drm_i915_gem_object *obj;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h
index e307ceb99031..17e79b735cfe 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h
@@ -10,11 +10,7 @@
struct intel_gt;
-#define DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(__name) \
- static int __name ## _open(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, __name ## _show, inode->i_private); \
-} \
+#define __GT_DEBUGFS_ATTRIBUTE_FOPS(__name) \
static const struct file_operations __name ## _fops = { \
.owner = THIS_MODULE, \
.open = __name ## _open, \
@@ -23,6 +19,21 @@ static const struct file_operations __name ## _fops = { \
.release = single_release, \
}
+#define DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+__GT_DEBUGFS_ATTRIBUTE_FOPS(__name)
+
+#define DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE_WITH_SIZE(__name, __size_vf) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open_size(file, __name ## _show, inode->i_private, \
+ __size_vf(inode->i_private)); \
+} \
+__GT_DEBUGFS_ATTRIBUTE_FOPS(__name)
+
void intel_gt_debugfs_register(struct intel_gt *gt);
struct intel_gt_debugfs_file {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 795689eb3fc7..c0fa41e4c803 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -86,6 +86,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
intel_rc6_unpark(&gt->rc6);
intel_rps_unpark(&gt->rps);
i915_pmu_gt_unparked(i915);
+ intel_guc_busyness_unpark(gt);
intel_gt_unpark_requests(gt);
runtime_begin(gt);
@@ -104,6 +105,7 @@ static int __gt_park(struct intel_wakeref *wf)
runtime_end(gt);
intel_gt_park_requests(gt);
+ intel_guc_busyness_park(gt);
i915_vma_parked(gt);
i915_pmu_gt_parked(i915);
intel_rps_park(&gt->rps);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 14216cc471b1..f20687796490 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -73,6 +73,8 @@ struct intel_gt {
struct intel_uc uc;
+ struct mutex tlb_invalidate_lock;
+
struct i915_wa_list wa_list;
struct intel_gt_timelines {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index b67f620c3d93..a94be0306464 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -8,6 +8,8 @@
#include <linux/fault-inject.h>
#include <linux/sched/mm.h>
+#include <drm/drm_cache.h>
+
#include "gem/i915_gem_lmem.h"
#include "i915_trace.h"
#include "intel_gt.h"
@@ -222,19 +224,6 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
INIT_LIST_HEAD(&vm->bound_list);
}
-void clear_pages(struct i915_vma *vma)
-{
- GEM_BUG_ON(!vma->pages);
-
- if (vma->pages != vma->obj->mm.pages) {
- sg_free_table(vma->pages);
- kfree(vma->pages);
- }
- vma->pages = NULL;
-
- memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
-}
-
void *__px_vaddr(struct drm_i915_gem_object *p)
{
enum i915_map_type type;
@@ -274,6 +263,7 @@ static void poison_scratch_page(struct drm_i915_gem_object *scratch)
val = POISON_FREE;
memset(vaddr, val, scratch->base.size);
+ drm_clflush_virt_range(vaddr, scratch->base.size);
}
int setup_scratch_page(struct i915_address_space *vm)
@@ -299,7 +289,7 @@ int setup_scratch_page(struct i915_address_space *vm)
do {
struct drm_i915_gem_object *obj;
- obj = vm->alloc_pt_dma(vm, size);
+ obj = vm->alloc_scratch_dma(vm, size);
if (IS_ERR(obj))
goto skip;
@@ -335,6 +325,18 @@ skip:
if (size == I915_GTT_PAGE_SIZE_4K)
return -ENOMEM;
+ /*
+ * If we need 64K minimum GTT pages for device local-memory,
+ * like on XEHPSDV, then we need to fail the allocation here,
+ * otherwise we can't safely support the insertion of
+ * local-memory pages for this vm, since the HW expects the
+ * correct physical alignment and size when the page-table is
+ * operating in 64K GTT mode, which includes any scratch PTEs,
+ * since userspace can still touch them.
+ */
+ if (HAS_64K_PAGES(vm->i915))
+ return -ENOMEM;
+
size = I915_GTT_PAGE_SIZE_4K;
} while (1);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index bc6750263359..177b42b935a1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -135,6 +135,9 @@ typedef u64 gen8_pte_t;
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
+#define GEN8_PAGE_PRESENT BIT_ULL(0)
+#define GEN8_PAGE_RW BIT_ULL(1)
+
#define GEN8_PDE_IPS_64K BIT(11)
#define GEN8_PDE_PS_2M BIT(7)
@@ -206,9 +209,6 @@ struct i915_vma_ops {
*/
void (*unbind_vma)(struct i915_address_space *vm,
struct i915_vma *vma);
-
- int (*set_pages)(struct i915_vma *vma);
- void (*clear_pages)(struct i915_vma *vma);
};
struct i915_address_space {
@@ -265,6 +265,8 @@ struct i915_address_space {
struct drm_i915_gem_object *
(*alloc_pt_dma)(struct i915_address_space *vm, int sz);
+ struct drm_i915_gem_object *
+ (*alloc_scratch_dma)(struct i915_address_space *vm, int sz);
u64 (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
@@ -544,6 +546,8 @@ int i915_ppgtt_init_hw(struct intel_gt *gt);
struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
unsigned long lmem_pt_obj_flags);
+void i915_ggtt_suspend_vm(struct i915_address_space *vm);
+bool i915_ggtt_resume_vm(struct i915_address_space *vm);
void i915_ggtt_suspend(struct i915_ggtt *gtt);
void i915_ggtt_resume(struct i915_ggtt *ggtt);
@@ -594,10 +598,6 @@ release_pd_entry(struct i915_page_directory * const pd,
const struct drm_i915_gem_object * const scratch);
void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
-int ggtt_set_pages(struct i915_vma *vma);
-int ppgtt_set_pages(struct i915_vma *vma);
-void clear_pages(struct i915_vma *vma);
-
void ppgtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma *vma,
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 56156cf18c41..b3489599e4de 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1167,6 +1167,11 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
cs = gen12_emit_cmd_buf_wa(ce, cs);
cs = gen12_emit_restore_scratch(ce, cs);
+ /* Wa_16013000631:dg2 */
+ if (IS_DG2_GRAPHICS_STEP(ce->engine->i915, G10, STEP_B0, STEP_C0) ||
+ IS_DG2_G11(ce->engine->i915))
+ cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0);
+
return cs;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index afb1cce9a352..18b44af56969 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -13,7 +13,6 @@
struct insert_pte_data {
u64 offset;
- bool is_lmem;
};
#define CHUNK_SZ SZ_8M /* ~1ms at 8GiB/s preemption delay */
@@ -40,7 +39,7 @@ static void insert_pte(struct i915_address_space *vm,
struct insert_pte_data *d = data;
vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE,
- d->is_lmem ? PTE_LM : 0);
+ i915_gem_object_is_lmem(pt->base) ? PTE_LM : 0);
d->offset += PAGE_SIZE;
}
@@ -134,8 +133,7 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt)
goto err_vm;
/* Now allow the GPU to rewrite the PTE via its own ppGTT */
- d.is_lmem = i915_gem_object_is_lmem(vm->vm.scratch[0]);
- vm->vm.foreach(&vm->vm, base, base + sz, insert_pte, &d);
+ vm->vm.foreach(&vm->vm, base, d.offset - base, insert_pte, &d);
}
return &vm->vm;
@@ -281,10 +279,10 @@ static int emit_pte(struct i915_request *rq,
GEM_BUG_ON(GRAPHICS_VER(rq->engine->i915) < 8);
/* Compute the page directory offset for the target address range */
- offset += (u64)rq->engine->instance << 32;
offset >>= 12;
offset *= sizeof(u64);
offset += 2 * CHUNK_SZ;
+ offset += (u64)rq->engine->instance << 32;
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -406,7 +404,7 @@ static int emit_copy(struct i915_request *rq, int size)
int
intel_context_migrate_copy(struct intel_context *ce,
- struct dma_fence *await,
+ const struct i915_deps *deps,
struct scatterlist *src,
enum i915_cache_level src_cache_level,
bool src_is_lmem,
@@ -433,8 +431,8 @@ intel_context_migrate_copy(struct intel_context *ce,
goto out_ce;
}
- if (await) {
- err = i915_request_await_dma_fence(rq, await);
+ if (deps) {
+ err = i915_request_await_deps(rq, deps);
if (err)
goto out_rq;
@@ -444,7 +442,7 @@ intel_context_migrate_copy(struct intel_context *ce,
goto out_rq;
}
- await = NULL;
+ deps = NULL;
}
/* The PTE updates + copy must not be interrupted. */
@@ -527,7 +525,7 @@ static int emit_clear(struct i915_request *rq, int size, u32 value)
int
intel_context_migrate_clear(struct intel_context *ce,
- struct dma_fence *await,
+ const struct i915_deps *deps,
struct scatterlist *sg,
enum i915_cache_level cache_level,
bool is_lmem,
@@ -552,8 +550,8 @@ intel_context_migrate_clear(struct intel_context *ce,
goto out_ce;
}
- if (await) {
- err = i915_request_await_dma_fence(rq, await);
+ if (deps) {
+ err = i915_request_await_deps(rq, deps);
if (err)
goto out_rq;
@@ -563,7 +561,7 @@ intel_context_migrate_clear(struct intel_context *ce,
goto out_rq;
}
- await = NULL;
+ deps = NULL;
}
/* The PTE updates + clear must not be interrupted. */
@@ -601,7 +599,7 @@ out_ce:
int intel_migrate_copy(struct intel_migrate *m,
struct i915_gem_ww_ctx *ww,
- struct dma_fence *await,
+ const struct i915_deps *deps,
struct scatterlist *src,
enum i915_cache_level src_cache_level,
bool src_is_lmem,
@@ -626,7 +624,7 @@ int intel_migrate_copy(struct intel_migrate *m,
if (err)
goto out;
- err = intel_context_migrate_copy(ce, await,
+ err = intel_context_migrate_copy(ce, deps,
src, src_cache_level, src_is_lmem,
dst, dst_cache_level, dst_is_lmem,
out);
@@ -640,7 +638,7 @@ out:
int
intel_migrate_clear(struct intel_migrate *m,
struct i915_gem_ww_ctx *ww,
- struct dma_fence *await,
+ const struct i915_deps *deps,
struct scatterlist *sg,
enum i915_cache_level cache_level,
bool is_lmem,
@@ -663,7 +661,7 @@ intel_migrate_clear(struct intel_migrate *m,
if (err)
goto out;
- err = intel_context_migrate_clear(ce, await, sg, cache_level,
+ err = intel_context_migrate_clear(ce, deps, sg, cache_level,
is_lmem, value, out);
intel_context_unpin(ce);
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.h b/drivers/gpu/drm/i915/gt/intel_migrate.h
index 4e18e755a00b..ccc677ec4aa3 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.h
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.h
@@ -11,6 +11,7 @@
#include "intel_migrate_types.h"
struct dma_fence;
+struct i915_deps;
struct i915_request;
struct i915_gem_ww_ctx;
struct intel_gt;
@@ -23,7 +24,7 @@ struct intel_context *intel_migrate_create_context(struct intel_migrate *m);
int intel_migrate_copy(struct intel_migrate *m,
struct i915_gem_ww_ctx *ww,
- struct dma_fence *await,
+ const struct i915_deps *deps,
struct scatterlist *src,
enum i915_cache_level src_cache_level,
bool src_is_lmem,
@@ -33,7 +34,7 @@ int intel_migrate_copy(struct intel_migrate *m,
struct i915_request **out);
int intel_context_migrate_copy(struct intel_context *ce,
- struct dma_fence *await,
+ const struct i915_deps *deps,
struct scatterlist *src,
enum i915_cache_level src_cache_level,
bool src_is_lmem,
@@ -45,7 +46,7 @@ int intel_context_migrate_copy(struct intel_context *ce,
int
intel_migrate_clear(struct intel_migrate *m,
struct i915_gem_ww_ctx *ww,
- struct dma_fence *await,
+ const struct i915_deps *deps,
struct scatterlist *sg,
enum i915_cache_level cache_level,
bool is_lmem,
@@ -53,7 +54,7 @@ intel_migrate_clear(struct intel_migrate *m,
struct i915_request **out);
int
intel_context_migrate_clear(struct intel_context *ce,
- struct dma_fence *await,
+ const struct i915_deps *deps,
struct scatterlist *sg,
enum i915_cache_level cache_level,
bool is_lmem,
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 15f9ada28a7a..9c253ba593c6 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -424,7 +424,7 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
table->unused_entries_index = I915_MOCS_PTE;
if (IS_DG2(i915)) {
- if (IS_DG2_GT_STEP(i915, G10, STEP_A0, STEP_B0)) {
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
table->size = ARRAY_SIZE(dg2_mocs_table_g10_ax);
table->table = dg2_mocs_table_g10_ax;
} else {
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index 4396bfd630d8..083b3090c69c 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -289,16 +289,6 @@ void i915_vm_free_pt_stash(struct i915_address_space *vm,
}
}
-int ppgtt_set_pages(struct i915_vma *vma)
-{
- GEM_BUG_ON(vma->pages);
-
- vma->pages = vma->obj->mm.pages;
- vma->page_sizes = vma->obj->mm.page_sizes;
-
- return 0;
-}
-
void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
unsigned long lmem_pt_obj_flags)
{
@@ -315,6 +305,4 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
- ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
- ppgtt->vm.vma_ops.clear_pages = clear_pages;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 43093dd2d0c9..c3155ee58689 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -117,10 +117,17 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
GEN6_RC_CTL_RC6_ENABLE |
GEN6_RC_CTL_EI_MODE(1);
- pg_enable =
- GEN9_RENDER_PG_ENABLE |
- GEN9_MEDIA_PG_ENABLE |
- GEN11_MEDIA_SAMPLER_PG_ENABLE;
+ /* Wa_16011777198 - Render powergating must remain disabled */
+ if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
+ IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0))
+ pg_enable =
+ GEN9_MEDIA_PG_ENABLE |
+ GEN11_MEDIA_SAMPLER_PG_ENABLE;
+ else
+ pg_enable =
+ GEN9_RENDER_PG_ENABLE |
+ GEN9_MEDIA_PG_ENABLE |
+ GEN11_MEDIA_SAMPLER_PG_ENABLE;
if (GRAPHICS_VER(gt->i915) >= 12) {
for (i = 0; i < I915_MAX_VCS; i++)
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index afb35d2e5c73..fde2dcb59809 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -66,12 +66,16 @@ static void release_fake_lmem_bar(struct intel_memory_region *mem)
DMA_ATTR_FORCE_CONTIGUOUS);
}
-static void
+static int
region_lmem_release(struct intel_memory_region *mem)
{
- intel_region_ttm_fini(mem);
+ int ret;
+
+ ret = intel_region_ttm_fini(mem);
io_mapping_fini(&mem->iomap);
release_fake_lmem_bar(mem);
+
+ return ret;
}
static int
@@ -158,7 +162,7 @@ intel_gt_setup_fake_lmem(struct intel_gt *gt)
static bool get_legacy_lowmem_region(struct intel_uncore *uncore,
u64 *start, u32 *size)
{
- if (!IS_DG1_GT_STEP(uncore->i915, STEP_A0, STEP_C0))
+ if (!IS_DG1_GRAPHICS_STEP(uncore->i915, STEP_A0, STEP_C0))
return false;
*start = 0;
@@ -193,6 +197,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
struct intel_uncore *uncore = gt->uncore;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct intel_memory_region *mem;
+ resource_size_t min_page_size;
resource_size_t io_start;
resource_size_t lmem_size;
int err;
@@ -207,10 +212,12 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
if (GEM_WARN_ON(lmem_size > pci_resource_len(pdev, 2)))
return ERR_PTR(-ENODEV);
+ min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
+ I915_GTT_PAGE_SIZE_4K;
mem = intel_memory_region_create(i915,
0,
lmem_size,
- I915_GTT_PAGE_SIZE_4K,
+ min_page_size,
io_start,
INTEL_MEMORY_LOCAL,
0,
@@ -231,7 +238,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
return mem;
err_region_put:
- intel_memory_region_put(mem);
+ intel_memory_region_destroy(mem);
return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 91200c43951f..7be0002d9d70 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -6,7 +6,7 @@
#include <linux/sched/mm.h>
#include <linux/stop_machine.h>
-#include "display/intel_display_types.h"
+#include "display/intel_display.h"
#include "display/intel_overlay.h"
#include "gem/i915_gem_context.h"
@@ -297,13 +297,6 @@ static int gen6_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- static const u32 hw_engine_mask[] = {
- [RCS0] = GEN6_GRDOM_RENDER,
- [BCS0] = GEN6_GRDOM_BLT,
- [VCS0] = GEN6_GRDOM_MEDIA,
- [VCS1] = GEN8_GRDOM_MEDIA2,
- [VECS0] = GEN6_GRDOM_VECS,
- };
struct intel_engine_cs *engine;
u32 hw_mask;
@@ -314,8 +307,7 @@ static int gen6_reset_engines(struct intel_gt *gt,
hw_mask = 0;
for_each_engine_masked(engine, gt, engine_mask, tmp) {
- GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
- hw_mask |= hw_engine_mask[engine->id];
+ hw_mask |= engine->reset_domain;
}
}
@@ -492,22 +484,6 @@ static int gen11_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- static const u32 hw_engine_mask[] = {
- [RCS0] = GEN11_GRDOM_RENDER,
- [BCS0] = GEN11_GRDOM_BLT,
- [VCS0] = GEN11_GRDOM_MEDIA,
- [VCS1] = GEN11_GRDOM_MEDIA2,
- [VCS2] = GEN11_GRDOM_MEDIA3,
- [VCS3] = GEN11_GRDOM_MEDIA4,
- [VCS4] = GEN11_GRDOM_MEDIA5,
- [VCS5] = GEN11_GRDOM_MEDIA6,
- [VCS6] = GEN11_GRDOM_MEDIA7,
- [VCS7] = GEN11_GRDOM_MEDIA8,
- [VECS0] = GEN11_GRDOM_VECS,
- [VECS1] = GEN11_GRDOM_VECS2,
- [VECS2] = GEN11_GRDOM_VECS3,
- [VECS3] = GEN11_GRDOM_VECS4,
- };
struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
u32 reset_mask, unlock_mask = 0;
@@ -518,8 +494,7 @@ static int gen11_reset_engines(struct intel_gt *gt,
} else {
reset_mask = 0;
for_each_engine_masked(engine, gt, engine_mask, tmp) {
- GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
- reset_mask |= hw_engine_mask[engine->id];
+ reset_mask |= engine->reset_domain;
ret = gen11_lock_sfc(engine, &reset_mask, &unlock_mask);
if (ret)
goto sfc_unlock;
@@ -1367,20 +1342,27 @@ void intel_gt_handle_error(struct intel_gt *gt,
/* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
synchronize_rcu_expedited();
- /* Prevent any other reset-engine attempt. */
- for_each_engine(engine, gt, tmp) {
- while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &gt->reset.flags))
- wait_on_bit(&gt->reset.flags,
- I915_RESET_ENGINE + engine->id,
- TASK_UNINTERRUPTIBLE);
+ /*
+ * Prevent any other reset-engine attempt. We don't do this for GuC
+ * submission the GuC owns the per-engine reset, not the i915.
+ */
+ if (!intel_uc_uses_guc_submission(&gt->uc)) {
+ for_each_engine(engine, gt, tmp) {
+ while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags))
+ wait_on_bit(&gt->reset.flags,
+ I915_RESET_ENGINE + engine->id,
+ TASK_UNINTERRUPTIBLE);
+ }
}
intel_gt_reset_global(gt, engine_mask, msg);
- for_each_engine(engine, gt, tmp)
- clear_bit_unlock(I915_RESET_ENGINE + engine->id,
- &gt->reset.flags);
+ if (!intel_uc_uses_guc_submission(&gt->uc)) {
+ for_each_engine(engine, gt, tmp)
+ clear_bit_unlock(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags);
+ }
clear_bit_unlock(I915_RESET_BACKOFF, &gt->reset.flags);
smp_mb__after_atomic();
wake_up_all(&gt->reset.queue);
@@ -1441,6 +1423,7 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt)
BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
I915_WEDGED_ON_INIT);
intel_gt_set_wedged(gt);
+ i915_disable_error_state(gt->i915, -ENODEV);
set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
/* Wedged on init is non-recoverable */
@@ -1450,6 +1433,7 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt)
void intel_gt_set_wedged_on_fini(struct intel_gt *gt)
{
intel_gt_set_wedged(gt);
+ i915_disable_error_state(gt->i915, -ENODEV);
set_bit(I915_WEDGED_ON_FINI, &gt->reset.flags);
intel_gt_retire_requests(gt); /* cleanup any wedged requests */
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 586dca1731ce..3e6fac0340ef 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -1357,7 +1357,7 @@ retry:
err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww);
if (!err && gen7_wa_vma)
err = i915_gem_object_lock(gen7_wa_vma->obj, &ww);
- if (!err && engine->legacy.ring->vma->obj)
+ if (!err)
err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww);
if (!err)
err = intel_timeline_pin(timeline, &ww);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 5e275f8dda8c..54e7df788dbf 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -936,8 +936,70 @@ void intel_rps_park(struct intel_rps *rps)
GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
}
+u32 intel_rps_get_boost_frequency(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc;
+
+ if (rps_uses_slpc(rps)) {
+ slpc = rps_to_slpc(rps);
+
+ return slpc->boost_freq;
+ } else {
+ return intel_gpu_freq(rps, rps->boost_freq);
+ }
+}
+
+static int rps_set_boost_freq(struct intel_rps *rps, u32 val)
+{
+ bool boost = false;
+
+ /* Validate against (static) hardware limits */
+ val = intel_freq_opcode(rps, val);
+ if (val < rps->min_freq || val > rps->max_freq)
+ return -EINVAL;
+
+ mutex_lock(&rps->lock);
+ if (val != rps->boost_freq) {
+ rps->boost_freq = val;
+ boost = atomic_read(&rps->num_waiters);
+ }
+ mutex_unlock(&rps->lock);
+ if (boost)
+ schedule_work(&rps->work);
+
+ return 0;
+}
+
+int intel_rps_set_boost_frequency(struct intel_rps *rps, u32 freq)
+{
+ struct intel_guc_slpc *slpc;
+
+ if (rps_uses_slpc(rps)) {
+ slpc = rps_to_slpc(rps);
+
+ return intel_guc_slpc_set_boost_freq(slpc, freq);
+ } else {
+ return rps_set_boost_freq(rps, freq);
+ }
+}
+
+void intel_rps_dec_waiters(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc;
+
+ if (rps_uses_slpc(rps)) {
+ slpc = rps_to_slpc(rps);
+
+ intel_guc_slpc_dec_waiters(slpc);
+ } else {
+ atomic_dec(&rps->num_waiters);
+ }
+}
+
void intel_rps_boost(struct i915_request *rq)
{
+ struct intel_guc_slpc *slpc;
+
if (i915_request_signaled(rq) || i915_request_has_waitboost(rq))
return;
@@ -945,6 +1007,16 @@ void intel_rps_boost(struct i915_request *rq)
if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) {
struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
+ if (rps_uses_slpc(rps)) {
+ slpc = rps_to_slpc(rps);
+
+ /* Return if old value is non zero */
+ if (!atomic_fetch_inc(&slpc->num_waiters))
+ schedule_work(&slpc->boost_work);
+
+ return;
+ }
+
if (atomic_fetch_inc(&rps->num_waiters))
return;
@@ -2154,6 +2226,65 @@ u32 intel_rps_read_state_cap(struct intel_rps *rps)
return intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
}
+static void intel_rps_set_manual(struct intel_rps *rps, bool enable)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u32 state = enable ? GEN9_RPSWCTL_ENABLE : GEN9_RPSWCTL_DISABLE;
+
+ /* Allow punit to process software requests */
+ intel_uncore_write(uncore, GEN6_RP_CONTROL, state);
+}
+
+void intel_rps_raise_unslice(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u32 rp0_unslice_req;
+
+ mutex_lock(&rps->lock);
+
+ if (rps_uses_slpc(rps)) {
+ /* RP limits have not been initialized yet for SLPC path */
+ rp0_unslice_req = ((intel_rps_read_state_cap(rps) >> 0)
+ & 0xff) * GEN9_FREQ_SCALER;
+
+ intel_rps_set_manual(rps, true);
+ intel_uncore_write(uncore, GEN6_RPNSWREQ,
+ ((rp0_unslice_req <<
+ GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) |
+ GEN9_IGNORE_SLICE_RATIO));
+ intel_rps_set_manual(rps, false);
+ } else {
+ intel_rps_set(rps, rps->rp0_freq);
+ }
+
+ mutex_unlock(&rps->lock);
+}
+
+void intel_rps_lower_unslice(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u32 rpn_unslice_req;
+
+ mutex_lock(&rps->lock);
+
+ if (rps_uses_slpc(rps)) {
+ /* RP limits have not been initialized yet for SLPC path */
+ rpn_unslice_req = ((intel_rps_read_state_cap(rps) >> 16)
+ & 0xff) * GEN9_FREQ_SCALER;
+
+ intel_rps_set_manual(rps, true);
+ intel_uncore_write(uncore, GEN6_RPNSWREQ,
+ ((rpn_unslice_req <<
+ GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) |
+ GEN9_IGNORE_SLICE_RATIO));
+ intel_rps_set_manual(rps, false);
+ } else {
+ intel_rps_set(rps, rps->min_freq);
+ }
+
+ mutex_unlock(&rps->lock);
+}
+
/* External interface for intel_ips.ko */
static struct drm_i915_private __rcu *ips_mchdev;
@@ -2230,7 +2361,7 @@ unsigned long i915_read_mch_val(void)
return 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- struct intel_ips *ips = &i915->gt.rps.ips;
+ struct intel_ips *ips = &to_gt(i915)->rps.ips;
spin_lock_irq(&mchdev_lock);
chipset_val = __ips_chipset_val(ips);
@@ -2257,7 +2388,7 @@ bool i915_gpu_raise(void)
if (!i915)
return false;
- rps = &i915->gt.rps;
+ rps = &to_gt(i915)->rps;
spin_lock_irq(&mchdev_lock);
if (rps->max_freq_softlimit < rps->max_freq)
@@ -2284,7 +2415,7 @@ bool i915_gpu_lower(void)
if (!i915)
return false;
- rps = &i915->gt.rps;
+ rps = &to_gt(i915)->rps;
spin_lock_irq(&mchdev_lock);
if (rps->max_freq_softlimit > rps->min_freq)
@@ -2310,7 +2441,7 @@ bool i915_gpu_busy(void)
if (!i915)
return false;
- ret = i915->gt.awake;
+ ret = to_gt(i915)->awake;
drm_dev_put(&i915->drm);
return ret;
@@ -2333,11 +2464,11 @@ bool i915_gpu_turbo_disable(void)
if (!i915)
return false;
- rps = &i915->gt.rps;
+ rps = &to_gt(i915)->rps;
spin_lock_irq(&mchdev_lock);
rps->max_freq_softlimit = rps->min_freq;
- ret = !__gen5_rps_set(&i915->gt.rps, rps->min_freq);
+ ret = !__gen5_rps_set(&to_gt(i915)->rps, rps->min_freq);
spin_unlock_irq(&mchdev_lock);
drm_dev_put(&i915->drm);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
index 11960d64ca82..c6d76a3d1331 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -23,6 +23,9 @@ void intel_rps_disable(struct intel_rps *rps);
void intel_rps_park(struct intel_rps *rps);
void intel_rps_unpark(struct intel_rps *rps);
void intel_rps_boost(struct i915_request *rq);
+void intel_rps_dec_waiters(struct intel_rps *rps);
+u32 intel_rps_get_boost_frequency(struct intel_rps *rps);
+int intel_rps_set_boost_frequency(struct intel_rps *rps, u32 freq);
int intel_rps_set(struct intel_rps *rps, u8 val);
void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive);
@@ -42,6 +45,8 @@ u32 intel_rps_get_rpn_frequency(struct intel_rps *rps);
u32 intel_rps_read_punit_req(struct intel_rps *rps);
u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps);
u32 intel_rps_read_state_cap(struct intel_rps *rps);
+void intel_rps_raise_unslice(struct intel_rps *rps);
+void intel_rps_lower_unslice(struct intel_rps *rps);
void gen5_rps_irq_handler(struct intel_rps *rps);
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 2400d6423ba5..ab3277a3d593 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -482,7 +482,7 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:kbl */
- if (IS_KBL_GT_STEP(i915, STEP_C0, STEP_FOREVER))
+ if (IS_KBL_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER))
wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
@@ -560,6 +560,22 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
/*
* These settings aren't actually workarounds, but general tuning settings that
+ * need to be programmed on dg2 platform.
+ */
+static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ wa_write_clr_set(wal, GEN11_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
+ REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
+ wa_add(wal,
+ FF_MODE2,
+ FF_MODE2_TDS_TIMER_MASK,
+ FF_MODE2_TDS_TIMER_128,
+ 0, false);
+}
+
+/*
+ * These settings aren't actually workarounds, but general tuning settings that
* need to be programmed on several platforms.
*/
static void gen12_ctx_gt_tuning_init(struct intel_engine_cs *engine,
@@ -637,6 +653,42 @@ static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
}
+static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ dg2_ctx_gt_tuning_init(engine, wal);
+
+ /* Wa_16011186671:dg2_g11 */
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) {
+ wa_masked_dis(wal, VFLSKPD, DIS_MULT_MISS_RD_SQUASH);
+ wa_masked_en(wal, VFLSKPD, DIS_OVER_FETCH_CACHE);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) {
+ /* Wa_14010469329:dg2_g10 */
+ wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
+ XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE);
+
+ /*
+ * Wa_22010465075:dg2_g10
+ * Wa_22010613112:dg2_g10
+ * Wa_14010698770:dg2_g10
+ */
+ wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
+ GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
+ }
+
+ /* Wa_16013271637:dg2 */
+ wa_masked_en(wal, SLICE_COMMON_ECO_CHICKEN1,
+ MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
+
+ /* Wa_22012532006:dg2 */
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_C0) ||
+ IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0))
+ wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
+ DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA);
+}
+
static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
@@ -723,7 +775,11 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
if (engine->class != RENDER_CLASS)
goto done;
- if (IS_DG1(i915))
+ if (IS_DG2(i915))
+ dg2_ctx_workarounds_init(engine, wal);
+ else if (IS_XEHPSDV(i915))
+ ; /* noop; none at this time */
+ else if (IS_DG1(i915))
dg1_ctx_workarounds_init(engine, wal);
else if (GRAPHICS_VER(i915) == 12)
gen12_ctx_workarounds_init(engine, wal);
@@ -871,10 +927,51 @@ hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
+gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
+{
+ const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu;
+ unsigned int slice, subslice;
+ u32 mcr, mcr_mask;
+
+ GEM_BUG_ON(GRAPHICS_VER(i915) != 9);
+
+ /*
+ * WaProgramMgsrForCorrectSliceSpecificMmioReads:gen9,glk,kbl,cml
+ * Before any MMIO read into slice/subslice specific registers, MCR
+ * packet control register needs to be programmed to point to any
+ * enabled s/ss pair. Otherwise, incorrect values will be returned.
+ * This means each subsequent MMIO read will be forwarded to an
+ * specific s/ss combination, but this is OK since these registers
+ * are consistent across s/ss in almost all cases. In the rare
+ * occasions, such as INSTDONE, where this value is dependent
+ * on s/ss combo, the read should be done with read_subslice_reg.
+ */
+ slice = ffs(sseu->slice_mask) - 1;
+ GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask));
+ subslice = ffs(intel_sseu_get_subslices(sseu, slice));
+ GEM_BUG_ON(!subslice);
+ subslice--;
+
+ /*
+ * We use GEN8_MCR..() macros to calculate the |mcr| value for
+ * Gen9 to address WaProgramMgsrForCorrectSliceSpecificMmioReads
+ */
+ mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
+ mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
+
+ drm_dbg(&i915->drm, "MCR slice:%d/subslice:%d = %x\n", slice, subslice, mcr);
+
+ wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
+}
+
+static void
gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
+ /* WaProgramMgsrForCorrectSliceSpecificMmioReads:glk,kbl,cml,gen9 */
+ gen9_wa_init_mcr(i915, wal);
+
/* WaDisableKillLogic:bxt,skl,kbl */
if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915))
wa_write_or(wal,
@@ -909,7 +1006,7 @@ skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:skl */
- if (IS_SKL_GT_STEP(gt->i915, STEP_A0, STEP_H0))
+ if (IS_SKL_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0))
wa_write_or(wal,
GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
@@ -921,7 +1018,7 @@ kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
gen9_gt_workarounds_init(gt, wal);
/* WaDisableDynamicCreditSharing:kbl */
- if (IS_KBL_GT_STEP(gt->i915, 0, STEP_C0))
+ if (IS_KBL_GRAPHICS_STEP(gt->i915, 0, STEP_C0))
wa_write_or(wal,
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
@@ -1138,7 +1235,7 @@ icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
/* Wa_1607087056:icl,ehl,jsl */
if (IS_ICELAKE(i915) ||
- IS_JSL_EHL_GT_STEP(i915, STEP_A0, STEP_B0))
+ IS_JSL_EHL_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
wa_write_or(wal,
SLICE_UNIT_LEVEL_CLKGATE,
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
@@ -1192,19 +1289,19 @@ tgl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
gen12_gt_workarounds_init(gt, wal);
/* Wa_1409420604:tgl */
- if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0))
+ if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
wa_write_or(wal,
SUBSLICE_UNIT_LEVEL_CLKGATE2,
CPSSUNIT_CLKGATE_DIS);
/* Wa_1607087056:tgl also know as BUG:1409180338 */
- if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0))
+ if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
wa_write_or(wal,
SLICE_UNIT_LEVEL_CLKGATE,
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
/* Wa_1408615072:tgl[a0] */
- if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0))
+ if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
VSUNIT_CLKGATE_DIS_TGL);
}
@@ -1217,7 +1314,7 @@ dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
gen12_gt_workarounds_init(gt, wal);
/* Wa_1607087056:dg1 */
- if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0))
+ if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
wa_write_or(wal,
SLICE_UNIT_LEVEL_CLKGATE,
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
@@ -1238,7 +1335,179 @@ dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
static void
xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
+ struct drm_i915_private *i915 = gt->i915;
+
+ xehp_init_mcr(gt, wal);
+
+ /* Wa_1409757795:xehpsdv */
+ wa_write_or(wal, SCCGCTL94DC, CG3DDISURB);
+
+ /* Wa_18011725039:xehpsdv */
+ if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) {
+ wa_masked_dis(wal, MLTICTXCTL, TDONRENDER);
+ wa_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH);
+ }
+
+ /* Wa_16011155590:xehpsdv */
+ if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+ TSGUNIT_CLKGATE_DIS);
+
+ /* Wa_14011780169:xehpsdv */
+ if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_B0, STEP_FOREVER)) {
+ wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS |
+ GAMTLBVDBOX7_CLKGATE_DIS |
+ GAMTLBVDBOX6_CLKGATE_DIS |
+ GAMTLBVDBOX5_CLKGATE_DIS |
+ GAMTLBVDBOX4_CLKGATE_DIS |
+ GAMTLBVDBOX3_CLKGATE_DIS |
+ GAMTLBVDBOX2_CLKGATE_DIS |
+ GAMTLBVDBOX1_CLKGATE_DIS |
+ GAMTLBVDBOX0_CLKGATE_DIS |
+ GAMTLBKCR_CLKGATE_DIS |
+ GAMTLBGUC_CLKGATE_DIS |
+ GAMTLBBLT_CLKGATE_DIS);
+ wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS |
+ GAMTLBGFXA1_CLKGATE_DIS |
+ GAMTLBCOMPA0_CLKGATE_DIS |
+ GAMTLBCOMPA1_CLKGATE_DIS |
+ GAMTLBCOMPB0_CLKGATE_DIS |
+ GAMTLBCOMPB1_CLKGATE_DIS |
+ GAMTLBCOMPC0_CLKGATE_DIS |
+ GAMTLBCOMPC1_CLKGATE_DIS |
+ GAMTLBCOMPD0_CLKGATE_DIS |
+ GAMTLBCOMPD1_CLKGATE_DIS |
+ GAMTLBMERT_CLKGATE_DIS |
+ GAMTLBVEBOX3_CLKGATE_DIS |
+ GAMTLBVEBOX2_CLKGATE_DIS |
+ GAMTLBVEBOX1_CLKGATE_DIS |
+ GAMTLBVEBOX0_CLKGATE_DIS);
+ }
+
+ /* Wa_14012362059:xehpsdv */
+ wa_write_or(wal, GEN12_MERT_MOD_CTRL, FORCE_MISS_FTLB);
+
+ /* Wa_16012725990:xehpsdv */
+ if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_FOREVER))
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VFUNIT_CLKGATE_DIS);
+
+ /* Wa_14011060649:xehpsdv */
+ wa_14011060649(gt, wal);
+
+ /* Wa_14014368820:xehpsdv */
+ wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS |
+ GLOBAL_INVALIDATION_MODE);
+}
+
+static void
+dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ struct intel_engine_cs *engine;
+ int id;
+
xehp_init_mcr(gt, wal);
+
+ /* Wa_14011060649:dg2 */
+ wa_14011060649(gt, wal);
+
+ /*
+ * Although there are per-engine instances of these registers,
+ * they technically exist outside the engine itself and are not
+ * impacted by engine resets. Furthermore, they're part of the
+ * GuC blacklist so trying to treat them as engine workarounds
+ * will result in GuC initialization failure and a wedged GPU.
+ */
+ for_each_engine(engine, gt, id) {
+ if (engine->class != VIDEO_DECODE_CLASS)
+ continue;
+
+ /* Wa_16010515920:dg2_g10 */
+ if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0))
+ wa_write_or(wal, VDBOX_CGCTL3F18(engine->mmio_base),
+ ALNUNIT_CLKGATE_DIS);
+ }
+
+ if (IS_DG2_G10(gt->i915)) {
+ /* Wa_22010523718:dg2 */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+ CG3DDISCFEG_CLKGATE_DIS);
+
+ /* Wa_14011006942:dg2 */
+ wa_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE,
+ DSS_ROUTER_CLKGATE_DIS);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)) {
+ /* Wa_14010680813:dg2_g10 */
+ wa_write_or(wal, GEN12_GAMSTLB_CTRL, CONTROL_BLOCK_CLKGATE_DIS |
+ EGRESS_BLOCK_CLKGATE_DIS | TAG_BLOCK_CLKGATE_DIS);
+
+ /* Wa_14010948348:dg2_g10 */
+ wa_write_or(wal, UNSLCGCTL9430, MSQDUNIT_CLKGATE_DIS);
+
+ /* Wa_14011037102:dg2_g10 */
+ wa_write_or(wal, UNSLCGCTL9444, LTCDD_CLKGATE_DIS);
+
+ /* Wa_14011371254:dg2_g10 */
+ wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, NODEDSS_CLKGATE_DIS);
+
+ /* Wa_14011431319:dg2_g10 */
+ wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS |
+ GAMTLBVDBOX7_CLKGATE_DIS |
+ GAMTLBVDBOX6_CLKGATE_DIS |
+ GAMTLBVDBOX5_CLKGATE_DIS |
+ GAMTLBVDBOX4_CLKGATE_DIS |
+ GAMTLBVDBOX3_CLKGATE_DIS |
+ GAMTLBVDBOX2_CLKGATE_DIS |
+ GAMTLBVDBOX1_CLKGATE_DIS |
+ GAMTLBVDBOX0_CLKGATE_DIS |
+ GAMTLBKCR_CLKGATE_DIS |
+ GAMTLBGUC_CLKGATE_DIS |
+ GAMTLBBLT_CLKGATE_DIS);
+ wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS |
+ GAMTLBGFXA1_CLKGATE_DIS |
+ GAMTLBCOMPA0_CLKGATE_DIS |
+ GAMTLBCOMPA1_CLKGATE_DIS |
+ GAMTLBCOMPB0_CLKGATE_DIS |
+ GAMTLBCOMPB1_CLKGATE_DIS |
+ GAMTLBCOMPC0_CLKGATE_DIS |
+ GAMTLBCOMPC1_CLKGATE_DIS |
+ GAMTLBCOMPD0_CLKGATE_DIS |
+ GAMTLBCOMPD1_CLKGATE_DIS |
+ GAMTLBMERT_CLKGATE_DIS |
+ GAMTLBVEBOX3_CLKGATE_DIS |
+ GAMTLBVEBOX2_CLKGATE_DIS |
+ GAMTLBVEBOX1_CLKGATE_DIS |
+ GAMTLBVEBOX0_CLKGATE_DIS);
+
+ /* Wa_14010569222:dg2_g10 */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+ GAMEDIA_CLKGATE_DIS);
+
+ /* Wa_14011028019:dg2_g10 */
+ wa_write_or(wal, SSMCGCTL9530, RTFUNIT_CLKGATE_DIS);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0) ||
+ IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)) {
+ /* Wa_14012362059:dg2 */
+ wa_write_or(wal, GEN12_MERT_MOD_CTRL, FORCE_MISS_FTLB);
+ }
+
+ /* Wa_1509235366:dg2 */
+ wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS |
+ GLOBAL_INVALIDATION_MODE);
+
+ /* Wa_14014830051:dg2 */
+ wa_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
+
+ /*
+ * The following are not actually "workarounds" but rather
+ * recommended tuning settings documented in the bspec's
+ * performance guide section.
+ */
+ wa_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
+ wa_write_or(wal, GEN12_SQCM, EN_32B_ACCESS);
}
static void
@@ -1246,7 +1515,9 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
- if (IS_XEHPSDV(i915))
+ if (IS_DG2(i915))
+ dg2_gt_workarounds_init(gt, wal);
+ else if (IS_XEHPSDV(i915))
xehpsdv_gt_workarounds_init(gt, wal);
else if (IS_DG1(i915))
dg1_gt_workarounds_init(gt, wal);
@@ -1520,7 +1791,7 @@ static void cfl_whitelist_build(struct intel_engine_cs *engine)
RING_FORCE_TO_NONPRIV_RANGE_4);
}
-static void cml_whitelist_build(struct intel_engine_cs *engine)
+static void allow_read_ctx_timestamp(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
@@ -1528,6 +1799,11 @@ static void cml_whitelist_build(struct intel_engine_cs *engine)
whitelist_reg_ext(w,
RING_CTX_TIMESTAMP(engine->mmio_base),
RING_FORCE_TO_NONPRIV_ACCESS_RD);
+}
+
+static void cml_whitelist_build(struct intel_engine_cs *engine)
+{
+ allow_read_ctx_timestamp(engine);
cfl_whitelist_build(engine);
}
@@ -1536,6 +1812,8 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
+ allow_read_ctx_timestamp(engine);
+
switch (engine->class) {
case RENDER_CLASS:
/* WaAllowUMDToModifyHalfSliceChicken7:icl */
@@ -1571,15 +1849,9 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
/* hucStatus2RegOffset */
whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
RING_FORCE_TO_NONPRIV_ACCESS_RD);
- whitelist_reg_ext(w,
- RING_CTX_TIMESTAMP(engine->mmio_base),
- RING_FORCE_TO_NONPRIV_ACCESS_RD);
break;
default:
- whitelist_reg_ext(w,
- RING_CTX_TIMESTAMP(engine->mmio_base),
- RING_FORCE_TO_NONPRIV_ACCESS_RD);
break;
}
}
@@ -1588,6 +1860,8 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
+ allow_read_ctx_timestamp(engine);
+
switch (engine->class) {
case RENDER_CLASS:
/*
@@ -1604,16 +1878,17 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
- /* Wa_1808121037:tgl */
+ /*
+ * Wa_1808121037:tgl
+ * Wa_14012131227:dg1
+ * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
+ */
whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
/* Wa_1806527549:tgl */
whitelist_reg(w, HIZ_CHICKEN);
break;
default:
- whitelist_reg_ext(w,
- RING_CTX_TIMESTAMP(engine->mmio_base),
- RING_FORCE_TO_NONPRIV_ACCESS_RD);
break;
}
}
@@ -1625,13 +1900,46 @@ static void dg1_whitelist_build(struct intel_engine_cs *engine)
tgl_whitelist_build(engine);
/* GEN:BUG:1409280441:dg1 */
- if (IS_DG1_GT_STEP(engine->i915, STEP_A0, STEP_B0) &&
+ if (IS_DG1_GRAPHICS_STEP(engine->i915, STEP_A0, STEP_B0) &&
(engine->class == RENDER_CLASS ||
engine->class == COPY_ENGINE_CLASS))
whitelist_reg_ext(w, RING_ID(engine->mmio_base),
RING_FORCE_TO_NONPRIV_ACCESS_RD);
}
+static void xehpsdv_whitelist_build(struct intel_engine_cs *engine)
+{
+ allow_read_ctx_timestamp(engine);
+}
+
+static void dg2_whitelist_build(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ allow_read_ctx_timestamp(engine);
+
+ switch (engine->class) {
+ case RENDER_CLASS:
+ /*
+ * Wa_1507100340:dg2_g10
+ *
+ * This covers 4 registers which are next to one another :
+ * - PS_INVOCATION_COUNT
+ * - PS_INVOCATION_COUNT_UDW
+ * - PS_DEPTH_COUNT
+ * - PS_DEPTH_COUNT_UDW
+ */
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0))
+ whitelist_reg_ext(w, PS_INVOCATION_COUNT,
+ RING_FORCE_TO_NONPRIV_ACCESS_RD |
+ RING_FORCE_TO_NONPRIV_RANGE_4);
+
+ break;
+ default:
+ break;
+ }
+}
+
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
@@ -1639,7 +1947,11 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
wa_init_start(w, "whitelist", engine->name);
- if (IS_DG1(i915))
+ if (IS_DG2(i915))
+ dg2_whitelist_build(engine);
+ else if (IS_XEHPSDV(i915))
+ xehpsdv_whitelist_build(engine);
+ else if (IS_DG1(i915))
dg1_whitelist_build(engine);
else if (GRAPHICS_VER(i915) == 12)
tgl_whitelist_build(engine);
@@ -1713,13 +2025,119 @@ engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
CMD_CCTL_MOCS_OVERRIDE(mocs, mocs));
}
}
+
+static bool needs_wa_1308578152(struct intel_engine_cs *engine)
+{
+ u64 dss_mask = intel_sseu_get_subslices(&engine->gt->info.sseu, 0);
+
+ return (dss_mask & GENMASK(GEN_DSS_PER_GSLICE - 1, 0)) == 0;
+}
+
static void
rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) ||
- IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) {
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) {
+ /* Wa_14013392000:dg2_g11 */
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_ENABLE_LARGE_GRF_MODE);
+
+ /* Wa_16011620976:dg2_g11 */
+ wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0) ||
+ IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) {
+ /* Wa_14012419201:dg2 */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4,
+ GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) ||
+ IS_DG2_G11(engine->i915)) {
+ /*
+ * Wa_22012826095:dg2
+ * Wa_22013059131:dg2
+ */
+ wa_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW,
+ MAXREQS_PER_BANK,
+ REG_FIELD_PREP(MAXREQS_PER_BANK, 2));
+
+ /* Wa_22013059131:dg2 */
+ wa_write_or(wal, LSC_CHICKEN_BIT_0,
+ FORCE_1_SUB_MESSAGE_PER_FRAGMENT);
+ }
+
+ /* Wa_1308578152:dg2_g10 when first gslice is fused off */
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) &&
+ needs_wa_1308578152(engine)) {
+ wa_masked_dis(wal, GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON,
+ GEN12_REPLAY_MODE_GRANULARITY);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_FOREVER) ||
+ IS_DG2_G11(engine->i915)) {
+ /* Wa_22013037850:dg2 */
+ wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
+ DISABLE_128B_EVICTION_COMMAND_UDW);
+
+ /* Wa_22012856258:dg2 */
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+ GEN12_DISABLE_READ_SUPPRESSION);
+
+ /*
+ * Wa_22010960976:dg2
+ * Wa_14013347512:dg2
+ */
+ wa_masked_dis(wal, GEN12_HDC_CHICKEN0,
+ LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) {
+ /*
+ * Wa_1608949956:dg2_g10
+ * Wa_14010198302:dg2_g10
+ */
+ wa_masked_en(wal, GEN8_ROW_CHICKEN,
+ MDQ_ARBITRATION_MODE | UGM_BACKUP_MODE);
+
+ /*
+ * Wa_14010918519:dg2_g10
+ *
+ * LSC_CHICKEN_BIT_0 always reads back as 0 is this stepping,
+ * so ignoring verification.
+ */
+ wa_add(wal, LSC_CHICKEN_BIT_0_UDW, 0,
+ FORCE_SLM_FENCE_SCOPE_TO_TILE | FORCE_UGM_FENCE_SCOPE_TO_TILE,
+ 0, false);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) {
+ /* Wa_22010430635:dg2 */
+ wa_masked_en(wal,
+ GEN9_ROW_CHICKEN4,
+ GEN12_DISABLE_GRF_CLEAR);
+
+ /* Wa_14010648519:dg2 */
+ wa_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_C0) ||
+ IS_DG2_G11(engine->i915)) {
+ /* Wa_22012654132:dg2 */
+ wa_add(wal, GEN10_CACHE_MODE_SS, 0,
+ _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
+ 0 /* write-only, so skip validation */,
+ true);
+ }
+
+ /* Wa_14013202645:dg2 */
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) ||
+ IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0))
+ wa_write_or(wal, RT_CTRL, DIS_NULL_QUERY);
+
+ if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
+ IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) {
/*
* Wa_1607138336:tgl[a0],dg1[a0]
* Wa_1607063988:tgl[a0],dg1[a0]
@@ -1729,7 +2147,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
}
- if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) {
+ if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) {
/*
* Wa_1606679103:tgl
* (see also Wa_1606682166:icl)
@@ -1764,7 +2182,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
}
if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
- IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) ||
+ IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/* Wa_1409804808:tgl,rkl,dg1[a0],adl-s,adl-p */
wa_masked_en(wal, GEN7_ROW_CHICKEN2,
@@ -1777,8 +2195,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
}
-
- if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) ||
+ if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/*
* Wa_1607030317:tgl
@@ -2131,7 +2548,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
struct drm_i915_private *i915 = engine->i915;
/* WaKBLVECSSemaphoreWaitPoll:kbl */
- if (IS_KBL_GT_STEP(i915, STEP_A0, STEP_F0)) {
+ if (IS_KBL_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) {
wa_write(wal,
RING_SEMA_WAIT_POLL(engine->mmio_base),
1);
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 8b89215afe46..c0637bf799a3 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -17,7 +17,7 @@ static int mock_timeline_pin(struct intel_timeline *tl)
{
int err;
- if (WARN_ON(!i915_gem_object_trylock(tl->hwsp_ggtt->obj)))
+ if (WARN_ON(!i915_gem_object_trylock(tl->hwsp_ggtt->obj, NULL)))
return -EBUSY;
err = intel_timeline_pin_map(tl);
@@ -35,9 +35,31 @@ static void mock_timeline_unpin(struct intel_timeline *tl)
atomic_dec(&tl->pin_count);
}
+static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
+{
+ struct i915_address_space *vm = &ggtt->vm;
+ struct drm_i915_private *i915 = vm->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ goto err;
+
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return vma;
+}
+
static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
{
- const unsigned long sz = PAGE_SIZE / 2;
+ const unsigned long sz = PAGE_SIZE;
struct intel_ring *ring;
ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
@@ -50,15 +72,11 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
ring->vaddr = (void *)(ring + 1);
atomic_set(&ring->pin_count, 1);
- ring->vma = i915_vma_alloc();
- if (!ring->vma) {
+ ring->vma = create_ring_vma(engine->gt->ggtt, PAGE_SIZE);
+ if (IS_ERR(ring->vma)) {
kfree(ring);
return NULL;
}
- i915_active_init(&ring->vma->active, NULL, NULL, 0);
- __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(ring->vma));
- __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &ring->vma->node.flags);
- ring->vma->node.size = sz;
intel_ring_update_space(ring);
@@ -67,8 +85,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
static void mock_ring_free(struct intel_ring *ring)
{
- i915_active_fini(&ring->vma->active);
- i915_vma_free(ring->vma);
+ i915_vma_put(ring->vma);
kfree(ring);
}
@@ -125,6 +142,7 @@ static void mock_context_unpin(struct intel_context *ce)
static void mock_context_post_unpin(struct intel_context *ce)
{
+ i915_vma_unpin(ce->ring->vma);
}
static void mock_context_destroy(struct kref *ref)
@@ -169,7 +187,7 @@ static int mock_context_alloc(struct intel_context *ce)
static int mock_context_pre_pin(struct intel_context *ce,
struct i915_gem_ww_ctx *ww, void **unused)
{
- return 0;
+ return i915_vma_pin_ww(ce->ring->vma, ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
}
static int mock_context_pin(struct intel_context *ce, void *unused)
@@ -327,7 +345,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
struct mock_engine *engine;
GEM_BUG_ON(id >= I915_NUM_ENGINES);
- GEM_BUG_ON(!i915->gt.uncore);
+ GEM_BUG_ON(!to_gt(i915)->uncore);
engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL);
if (!engine)
@@ -335,8 +353,8 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
/* minimal engine setup for requests */
engine->base.i915 = i915;
- engine->base.gt = &i915->gt;
- engine->base.uncore = i915->gt.uncore;
+ engine->base.gt = to_gt(i915);
+ engine->base.uncore = to_gt(i915)->uncore;
snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
engine->base.id = id;
engine->base.mask = BIT(id);
@@ -359,8 +377,8 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.release = mock_engine_release;
- i915->gt.engine[id] = &engine->base;
- i915->gt.engine_class[0][id] = &engine->base;
+ to_gt(i915)->engine[id] = &engine->base;
+ to_gt(i915)->engine_class[0][id] = &engine->base;
/* fake hw queue */
spin_lock_init(&engine->hw_lock);
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index fa7b99a671dd..76fbae358072 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -442,7 +442,7 @@ int intel_context_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_active_context),
SUBTEST(live_remote_context),
};
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
if (intel_gt_is_wedged(gt))
return 0;
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine.c b/drivers/gpu/drm/i915/gt/selftest_engine.c
index 262764f6d90a..57fea9ea1705 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine.c
@@ -12,7 +12,7 @@ int intel_engine_live_selftests(struct drm_i915_private *i915)
live_engine_pm_selftests,
NULL,
};
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
typeof(*tests) *fn;
for (fn = tests; *fn; fn++) {
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index 64abf5feabfa..1b75f478d1b8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -361,10 +361,10 @@ int intel_engine_cs_perf_selftests(struct drm_i915_private *i915)
SUBTEST(perf_mi_noop),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
static int intel_mmio_bases_check(void *arg)
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index 6e6e4d747cca..273d440a53e3 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -378,13 +378,13 @@ int intel_heartbeat_live_selftests(struct drm_i915_private *i915)
int saved_hangcheck;
int err;
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
saved_hangcheck = i915->params.enable_hangcheck;
i915->params.enable_hangcheck = INT_MAX;
- err = intel_gt_live_subtests(tests, &i915->gt);
+ err = intel_gt_live_subtests(tests, to_gt(i915));
i915->params.enable_hangcheck = saved_hangcheck;
return err;
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
index 75569666105d..8af261831470 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -214,6 +214,31 @@ static int live_engine_timestamps(void *arg)
return 0;
}
+static int __spin_until_busier(struct intel_engine_cs *engine, ktime_t busyness)
+{
+ ktime_t start, unused, dt;
+
+ if (!intel_engine_uses_guc(engine))
+ return 0;
+
+ /*
+ * In GuC mode of submission, the busyness stats may get updated after
+ * the batch starts running. Poll for a change in busyness and timeout
+ * after 500 us.
+ */
+ start = ktime_get();
+ while (intel_engine_get_busy_time(engine, &unused) == busyness) {
+ dt = ktime_get() - start;
+ if (dt > 10000000) {
+ pr_err("active wait timed out %lld\n", dt);
+ ENGINE_TRACE(engine, "active wait time out %lld\n", dt);
+ return -ETIME;
+ }
+ }
+
+ return 0;
+}
+
static int live_engine_busy_stats(void *arg)
{
struct intel_gt *gt = arg;
@@ -232,6 +257,7 @@ static int live_engine_busy_stats(void *arg)
GEM_BUG_ON(intel_gt_pm_is_awake(gt));
for_each_engine(engine, gt, id) {
struct i915_request *rq;
+ ktime_t busyness, dummy;
ktime_t de, dt;
ktime_t t[2];
@@ -274,16 +300,23 @@ static int live_engine_busy_stats(void *arg)
}
i915_request_add(rq);
+ busyness = intel_engine_get_busy_time(engine, &dummy);
if (!igt_wait_for_spinner(&spin, rq)) {
intel_gt_set_wedged(engine->gt);
err = -ETIME;
goto end;
}
+ err = __spin_until_busier(engine, busyness);
+ if (err) {
+ GEM_TRACE_DUMP();
+ goto end;
+ }
+
ENGINE_TRACE(engine, "measuring busy time\n");
preempt_disable();
de = intel_engine_get_busy_time(engine, &t[0]);
- udelay(100);
+ mdelay(10);
de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de);
preempt_enable();
dt = ktime_sub(t[1], t[0]);
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index b367ecfa42de..e10da897e07a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -4502,11 +4502,11 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_virtual_reset),
};
- if (i915->gt.submission_method != INTEL_SUBMISSION_ELSP)
+ if (to_gt(i915)->submission_method != INTEL_SUBMISSION_ELSP)
return 0;
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index b9441217ca3d..8bf62a5826cc 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -43,7 +43,7 @@ static void measure_clocks(struct intel_engine_cs *engine,
int i;
for (i = 0; i < 5; i++) {
- preempt_disable();
+ local_irq_disable();
cycles[i] = -ENGINE_READ_FW(engine, RING_TIMESTAMP);
dt[i] = ktime_get();
@@ -51,7 +51,7 @@ static void measure_clocks(struct intel_engine_cs *engine,
dt[i] = ktime_sub(ktime_get(), dt[i]);
cycles[i] += ENGINE_READ_FW(engine, RING_TIMESTAMP);
- preempt_enable();
+ local_irq_enable();
}
/* Use the median of both cycle/dt; close enough */
@@ -193,10 +193,10 @@ int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_gt_resume),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
int intel_gt_pm_late_selftests(struct drm_i915_private *i915)
@@ -210,8 +210,8 @@ int intel_gt_pm_late_selftests(struct drm_i915_private *i915)
SUBTEST(live_rc6_ctx_wa),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 7e2d99dd012d..15d63435ec4d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -471,7 +471,8 @@ static int igt_reset_nop_engine(void *arg)
count = 0;
st_engine_heartbeat_disable(engine);
- set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags));
do {
int i;
@@ -528,7 +529,7 @@ static int igt_reset_nop_engine(void *arg)
break;
}
} while (time_before(jiffies, end_time));
- clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
st_engine_heartbeat_enable(engine);
pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
@@ -582,7 +583,8 @@ static int igt_reset_fail_engine(void *arg)
}
st_engine_heartbeat_disable(engine);
- set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags));
force_reset_timeout(engine);
err = intel_engine_reset(engine, NULL);
@@ -679,7 +681,7 @@ static int igt_reset_fail_engine(void *arg)
out:
pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
skip:
- clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
st_engine_heartbeat_enable(engine);
intel_context_put(ce);
@@ -734,7 +736,8 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
reset_engine_count = i915_reset_engine_count(global, engine);
st_engine_heartbeat_disable(engine);
- set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags));
count = 0;
do {
struct i915_request *rq = NULL;
@@ -824,7 +827,7 @@ restore:
if (err)
break;
} while (time_before(jiffies, end_time));
- clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
st_engine_heartbeat_enable(engine);
pr_info("%s: Completed %lu %s resets\n",
engine->name, count, active ? "active" : "idle");
@@ -1042,7 +1045,8 @@ static int __igt_reset_engines(struct intel_gt *gt,
yield(); /* start all threads before we begin */
st_engine_heartbeat_disable_no_pm(engine);
- set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags));
do {
struct i915_request *rq = NULL;
struct intel_selftest_saved_policy saved;
@@ -1165,7 +1169,7 @@ restore:
if (err)
break;
} while (time_before(jiffies, end_time));
- clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
st_engine_heartbeat_enable_no_pm(engine);
pr_info("i915_reset_engine(%s:%s): %lu resets\n",
@@ -2014,7 +2018,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_reset_evict_fence),
SUBTEST(igt_handle_error),
};
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
intel_wakeref_t wakeref;
int err;
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index b0977a3b699b..618c905daa19 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -1847,5 +1847,5 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915)
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index 12ef2837c89b..fa4293d2944f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -49,6 +49,7 @@ static int copy(struct intel_migrate *migrate,
if (IS_ERR(src))
return 0;
+ sz = src->base.size;
dst = i915_gem_object_create_internal(i915, sz);
if (IS_ERR(dst))
goto err_free_src;
@@ -441,7 +442,7 @@ int intel_migrate_live_selftests(struct drm_i915_private *i915)
SUBTEST(thread_global_copy),
SUBTEST(thread_global_clear),
};
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
if (!gt->migrate.context)
return 0;
@@ -464,7 +465,7 @@ create_init_lmem_internal(struct intel_gt *gt, size_t sz, bool try_lmem)
return obj;
}
- i915_gem_object_trylock(obj);
+ i915_gem_object_trylock(obj, NULL);
err = i915_gem_object_pin_pages(obj);
if (err) {
i915_gem_object_unlock(obj);
@@ -657,7 +658,7 @@ int intel_migrate_perf_selftests(struct drm_i915_private *i915)
SUBTEST(perf_clear_blt),
SUBTEST(perf_copy_blt),
};
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
if (intel_gt_is_wedged(gt))
return 0;
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
index 13d25bf2a94a..c1d861333c44 100644
--- a/drivers/gpu/drm/i915/gt/selftest_mocs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -451,5 +451,5 @@ int intel_mocs_live_selftests(struct drm_i915_private *i915)
if (!get_mocs_settings(i915, &table))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 7a50c9f4071b..8a873f6bda7f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -376,7 +376,7 @@ int intel_reset_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_atomic_reset),
SUBTEST(igt_atomic_engine_reset),
};
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
if (!intel_has_gpu_reset(gt))
return 0;
diff --git a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
index 041954408d0f..70f9ac1ec2c7 100644
--- a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
@@ -291,8 +291,8 @@ int intel_ring_submission_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_ctx_switch_wa),
};
- if (i915->gt.submission_method > INTEL_SUBMISSION_RING)
+ if (to_gt(i915)->submission_method > INTEL_SUBMISSION_RING)
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c
index 9334bad131a2..b768cea5943d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_slpc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c
@@ -39,7 +39,7 @@ static int slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 freq)
static int live_slpc_clamp_min(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
struct intel_rps *rps = &gt->rps;
struct intel_engine_cs *engine;
@@ -166,7 +166,7 @@ static int live_slpc_clamp_min(void *arg)
static int live_slpc_clamp_max(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
struct intel_guc_slpc *slpc;
struct intel_rps *rps;
struct intel_engine_cs *engine;
@@ -304,7 +304,7 @@ int intel_slpc_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_slpc_clamp_min),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
return i915_live_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index d0b6a3afcf44..e2eb686a9763 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -159,7 +159,7 @@ static int mock_hwsp_freelist(void *arg)
INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL);
state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed);
- state.gt = &i915->gt;
+ state.gt = to_gt(i915);
/*
* Create a bunch of timelines and check that their HWSP do not overlap.
@@ -1416,8 +1416,8 @@ int intel_timeline_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_hwsp_rollover_user),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 962e91ba3be4..0287c2573c51 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -1387,8 +1387,8 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_engine_reset_workarounds),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
index ba10bd374cee..fe5d7d261797 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
@@ -144,6 +144,7 @@ enum intel_guc_action {
INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600,
INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
INTEL_GUC_ACTION_RESET_CLIENT = 0x5507,
+ INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
INTEL_GUC_ACTION_LIMIT
};
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 31cf9fb48c7e..f9240d4baa69 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -95,6 +95,11 @@ struct intel_guc {
*/
struct ida guc_ids;
/**
+ * @num_guc_ids: Number of guc_ids, selftest feature to be able
+ * to reduce this number while testing.
+ */
+ int num_guc_ids;
+ /**
* @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc
*/
unsigned long *guc_ids_bitmap;
@@ -138,6 +143,8 @@ struct intel_guc {
u32 ads_regset_size;
/** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
u32 ads_golden_ctxt_size;
+ /** @ads_engine_usage_size: size of engine usage in the ADS */
+ u32 ads_engine_usage_size;
/** @lrc_desc_pool: object allocated to hold the GuC LRC descriptor pool */
struct i915_vma *lrc_desc_pool;
@@ -172,6 +179,41 @@ struct intel_guc {
/** @send_mutex: used to serialize the intel_guc_send actions */
struct mutex send_mutex;
+
+ /**
+ * @timestamp: GT timestamp object that stores a copy of the timestamp
+ * and adjusts it for overflow using a worker.
+ */
+ struct {
+ /**
+ * @lock: Lock protecting the below fields and the engine stats.
+ */
+ spinlock_t lock;
+
+ /**
+ * @gt_stamp: 64 bit extended value of the GT timestamp.
+ */
+ u64 gt_stamp;
+
+ /**
+ * @ping_delay: Period for polling the GT timestamp for
+ * overflow.
+ */
+ unsigned long ping_delay;
+
+ /**
+ * @work: Periodic work to adjust GT timestamp, engine and
+ * context usage for overflows.
+ */
+ struct delayed_work work;
+ } timestamp;
+
+#ifdef CONFIG_DRM_I915_SELFTEST
+ /**
+ * @number_guc_id_stolen: The number of guc_ids that have been stolen
+ */
+ int number_guc_id_stolen;
+#endif
};
static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 621c893a009f..1a1edae67e4e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -26,6 +26,8 @@
* | guc_policies |
* +---------------------------------------+
* | guc_gt_system_info |
+ * +---------------------------------------+
+ * | guc_engine_usage |
* +---------------------------------------+ <== static
* | guc_mmio_reg[countA] (engine 0.0) |
* | guc_mmio_reg[countB] (engine 0.1) |
@@ -47,6 +49,7 @@ struct __guc_ads_blob {
struct guc_ads ads;
struct guc_policies policies;
struct guc_gt_system_info system_info;
+ struct guc_engine_usage engine_usage;
/* From here on, location is dynamic! Refer to above diagram. */
struct guc_mmio_reg regset[0];
} __packed;
@@ -628,3 +631,21 @@ void intel_guc_ads_reset(struct intel_guc *guc)
guc_ads_private_data_reset(guc);
}
+
+u32 intel_guc_engine_usage_offset(struct intel_guc *guc)
+{
+ struct __guc_ads_blob *blob = guc->ads_blob;
+ u32 base = intel_guc_ggtt_offset(guc, guc->ads_vma);
+ u32 offset = base + ptr_offset(blob, engine_usage);
+
+ return offset;
+}
+
+struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine)
+{
+ struct intel_guc *guc = &engine->gt->uc.guc;
+ struct __guc_ads_blob *blob = guc->ads_blob;
+ u8 guc_class = engine_class_to_guc_class(engine->class);
+
+ return &blob->engine_usage.engines[guc_class][ilog2(engine->logical_mask)];
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
index 3d85051d57e4..e74c110facff 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
@@ -6,8 +6,11 @@
#ifndef _INTEL_GUC_ADS_H_
#define _INTEL_GUC_ADS_H_
+#include <linux/types.h>
+
struct intel_guc;
struct drm_printer;
+struct intel_engine_cs;
int intel_guc_ads_create(struct intel_guc *guc);
void intel_guc_ads_destroy(struct intel_guc *guc);
@@ -15,5 +18,7 @@ void intel_guc_ads_init_late(struct intel_guc *guc);
void intel_guc_ads_reset(struct intel_guc *guc);
void intel_guc_ads_print_policy_info(struct intel_guc *guc,
struct drm_printer *p);
+struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine);
+u32 intel_guc_engine_usage_offset(struct intel_guc *guc);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index a0cc34be7b56..aa6dd6415202 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -523,6 +523,15 @@ static inline bool ct_deadlocked(struct intel_guc_ct *ct)
CT_ERROR(ct, "Communication stalled for %lld ms, desc status=%#x,%#x\n",
ktime_ms_delta(ktime_get(), ct->stall_time),
send->status, recv->status);
+ CT_ERROR(ct, "H2G Space: %u (Bytes)\n",
+ atomic_read(&ct->ctbs.send.space) * 4);
+ CT_ERROR(ct, "Head: %u (Dwords)\n", ct->ctbs.send.desc->head);
+ CT_ERROR(ct, "Tail: %u (Dwords)\n", ct->ctbs.send.desc->tail);
+ CT_ERROR(ct, "G2H Space: %u (Bytes)\n",
+ atomic_read(&ct->ctbs.recv.space) * 4);
+ CT_ERROR(ct, "Head: %u\n (Dwords)", ct->ctbs.recv.desc->head);
+ CT_ERROR(ct, "Tail: %u\n (Dwords)", ct->ctbs.recv.desc->tail);
+
ct->ctbs.send.broken = true;
}
@@ -582,12 +591,19 @@ static inline bool h2g_has_room(struct intel_guc_ct *ct, u32 len_dw)
static int has_room_nb(struct intel_guc_ct *ct, u32 h2g_dw, u32 g2h_dw)
{
+ bool h2g = h2g_has_room(ct, h2g_dw);
+ bool g2h = g2h_has_room(ct, g2h_dw);
+
lockdep_assert_held(&ct->ctbs.send.lock);
- if (unlikely(!h2g_has_room(ct, h2g_dw) || !g2h_has_room(ct, g2h_dw))) {
+ if (unlikely(!h2g || !g2h)) {
if (ct->stall_time == KTIME_MAX)
ct->stall_time = ktime_get();
+ /* Be paranoid and kick G2H tasklet to free credits */
+ if (!g2h)
+ tasklet_hi_schedule(&ct->receive_tasklet);
+
if (unlikely(ct_deadlocked(ct)))
return -EPIPE;
else
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 196424be0998..31420ce1ce6b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -40,9 +40,8 @@ static void guc_prepare_xfer(struct intel_uncore *uncore)
}
}
-/* Copy RSA signature from the fw image to HW for verification */
-static int guc_xfer_rsa(struct intel_uc_fw *guc_fw,
- struct intel_uncore *uncore)
+static int guc_xfer_rsa_mmio(struct intel_uc_fw *guc_fw,
+ struct intel_uncore *uncore)
{
u32 rsa[UOS_RSA_SCRATCH_COUNT];
size_t copied;
@@ -58,6 +57,27 @@ static int guc_xfer_rsa(struct intel_uc_fw *guc_fw,
return 0;
}
+static int guc_xfer_rsa_vma(struct intel_uc_fw *guc_fw,
+ struct intel_uncore *uncore)
+{
+ struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
+
+ intel_uncore_write(uncore, UOS_RSA_SCRATCH(0),
+ intel_guc_ggtt_offset(guc, guc_fw->rsa_data));
+
+ return 0;
+}
+
+/* Copy RSA signature from the fw image to HW for verification */
+static int guc_xfer_rsa(struct intel_uc_fw *guc_fw,
+ struct intel_uncore *uncore)
+{
+ if (guc_fw->rsa_data)
+ return guc_xfer_rsa_vma(guc_fw, uncore);
+ else
+ return guc_xfer_rsa_mmio(guc_fw, uncore);
+}
+
/*
* Read the GuC status register (GUC_STATUS) and store it in the
* specified location; then return a boolean indicating whether
@@ -142,7 +162,10 @@ int intel_guc_fw_upload(struct intel_guc *guc)
/*
* Note that GuC needs the CSS header plus uKernel code to be copied
* by the DMA engine in one operation, whereas the RSA signature is
- * loaded via MMIO.
+ * loaded separately, either by copying it to the UOS_RSA_SCRATCH
+ * register (if key size <= 256) or through a ggtt-pinned vma (if key
+ * size > 256). The RSA size and therefore the way we provide it to the
+ * HW is fixed for each platform and hard-coded in the bootrom.
*/
ret = guc_xfer_rsa(&guc->fw, uncore);
if (ret)
@@ -164,6 +187,6 @@ int intel_guc_fw_upload(struct intel_guc *guc)
return 0;
out:
- intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_FAIL);
+ intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 722933e26347..7072e30e99f4 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -294,6 +294,19 @@ struct guc_ads {
u32 reserved[15];
} __packed;
+/* Engine usage stats */
+struct guc_engine_usage_record {
+ u32 current_context_index;
+ u32 last_switch_in_stamp;
+ u32 reserved0;
+ u32 total_runtime;
+ u32 reserved1[4];
+} __packed;
+
+struct guc_engine_usage {
+ struct guc_engine_usage_record engines[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
+} __packed;
+
/* GuC logging structures */
enum guc_log_buffer_type {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
index ac1ee1d5ce10..fe6ab7550a14 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -15,9 +15,12 @@
struct intel_guc;
-#ifdef CONFIG_DRM_I915_DEBUG_GUC
+#if defined(CONFIG_DRM_I915_DEBUG_GUC)
#define CRASH_BUFFER_SIZE SZ_2M
#define DEBUG_BUFFER_SIZE SZ_16M
+#elif defined(CONFIG_DRM_I915_DEBUG_GEM)
+#define CRASH_BUFFER_SIZE SZ_1M
+#define DEBUG_BUFFER_SIZE SZ_2M
#else
#define CRASH_BUFFER_SIZE SZ_8K
#define DEBUG_BUFFER_SIZE SZ_64K
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
index 46026c2c1722..ddfbe334689f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
@@ -10,28 +10,80 @@
#include "intel_guc.h"
#include "intel_guc_log.h"
#include "intel_guc_log_debugfs.h"
+#include "intel_uc.h"
+
+static u32 obj_to_guc_log_dump_size(struct drm_i915_gem_object *obj)
+{
+ u32 size;
+
+ if (!obj)
+ return PAGE_SIZE;
+
+ /* "0x%08x 0x%08x 0x%08x 0x%08x\n" => 16 bytes -> 44 chars => x2.75 */
+ size = ((obj->base.size * 11) + 3) / 4;
+
+ /* Add padding for final blank line, any extra header info, etc. */
+ size = PAGE_ALIGN(size + PAGE_SIZE);
+
+ return size;
+}
+
+static u32 guc_log_dump_size(struct intel_guc_log *log)
+{
+ struct intel_guc *guc = log_to_guc(log);
+
+ if (!intel_guc_is_supported(guc))
+ return PAGE_SIZE;
+
+ if (!log->vma)
+ return PAGE_SIZE;
+
+ return obj_to_guc_log_dump_size(log->vma->obj);
+}
static int guc_log_dump_show(struct seq_file *m, void *data)
{
struct drm_printer p = drm_seq_file_printer(m);
+ int ret;
- return intel_guc_log_dump(m->private, &p, false);
+ ret = intel_guc_log_dump(m->private, &p, false);
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && seq_has_overflowed(m))
+ pr_warn_once("preallocated size:%zx for %s exceeded\n",
+ m->size, __func__);
+
+ return ret;
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE_WITH_SIZE(guc_log_dump, guc_log_dump_size);
+
+static u32 guc_load_err_dump_size(struct intel_guc_log *log)
+{
+ struct intel_guc *guc = log_to_guc(log);
+ struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
+
+ if (!intel_guc_is_supported(guc))
+ return PAGE_SIZE;
+
+ return obj_to_guc_log_dump_size(uc->load_err_log);
}
-DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_log_dump);
static int guc_load_err_log_dump_show(struct seq_file *m, void *data)
{
struct drm_printer p = drm_seq_file_printer(m);
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && seq_has_overflowed(m))
+ pr_warn_once("preallocated size:%zx for %s exceeded\n",
+ m->size, __func__);
+
return intel_guc_log_dump(m->private, &p, true);
}
-DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_load_err_log_dump);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE_WITH_SIZE(guc_load_err_log_dump, guc_load_err_dump_size);
static int guc_log_level_get(void *data, u64 *val)
{
struct intel_guc_log *log = data;
- if (!intel_guc_is_used(log_to_guc(log)))
+ if (!log->vma)
return -ENODEV;
*val = intel_guc_log_get_level(log);
@@ -43,7 +95,7 @@ static int guc_log_level_set(void *data, u64 val)
{
struct intel_guc_log *log = data;
- if (!intel_guc_is_used(log_to_guc(log)))
+ if (!log->vma)
return -ENODEV;
return intel_guc_log_set_level(log, val);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 65a3e7fdb2b2..13b27b8ff74e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -79,29 +79,6 @@ static void slpc_mem_set_disabled(struct slpc_shared_data *data,
slpc_mem_set_param(data, enable_id, 0);
}
-int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
-{
- struct intel_guc *guc = slpc_to_guc(slpc);
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
- u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
- int err;
-
- GEM_BUG_ON(slpc->vma);
-
- err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
- if (unlikely(err)) {
- drm_err(&i915->drm,
- "Failed to allocate SLPC struct (err=%pe)\n",
- ERR_PTR(err));
- return err;
- }
-
- slpc->max_freq_softlimit = 0;
- slpc->min_freq_softlimit = 0;
-
- return err;
-}
-
static u32 slpc_get_state(struct intel_guc_slpc *slpc)
{
struct slpc_shared_data *data;
@@ -203,6 +180,86 @@ static int slpc_unset_param(struct intel_guc_slpc *slpc,
return guc_action_slpc_unset_param(guc, id);
}
+static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ struct intel_guc *guc = slpc_to_guc(slpc);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ lockdep_assert_held(&slpc->lock);
+
+ if (!intel_guc_is_ready(guc))
+ return -ENODEV;
+
+ /*
+ * This function is a little different as compared to
+ * intel_guc_slpc_set_min_freq(). Softlimit will not be updated
+ * here since this is used to temporarily change min freq,
+ * for example, during a waitboost. Caller is responsible for
+ * checking bounds.
+ */
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+ freq);
+ if (ret)
+ drm_err(&i915->drm, "Unable to force min freq to %u: %d",
+ freq, ret);
+ }
+
+ return ret;
+}
+
+static void slpc_boost_work(struct work_struct *work)
+{
+ struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work);
+
+ /*
+ * Raise min freq to boost. It's possible that
+ * this is greater than current max. But it will
+ * certainly be limited by RP0. An error setting
+ * the min param is not fatal.
+ */
+ mutex_lock(&slpc->lock);
+ if (atomic_read(&slpc->num_waiters)) {
+ slpc_force_min_freq(slpc, slpc->boost_freq);
+ slpc->num_boosts++;
+ }
+ mutex_unlock(&slpc->lock);
+}
+
+int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
+{
+ struct intel_guc *guc = slpc_to_guc(slpc);
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
+ int err;
+
+ GEM_BUG_ON(slpc->vma);
+
+ err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
+ if (unlikely(err)) {
+ drm_err(&i915->drm,
+ "Failed to allocate SLPC struct (err=%pe)\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ slpc->max_freq_softlimit = 0;
+ slpc->min_freq_softlimit = 0;
+
+ slpc->boost_freq = 0;
+ atomic_set(&slpc->num_waiters, 0);
+ slpc->num_boosts = 0;
+
+ mutex_init(&slpc->lock);
+ INIT_WORK(&slpc->boost_work, slpc_boost_work);
+
+ return err;
+}
+
static const char *slpc_global_state_to_string(enum slpc_global_state state)
{
switch (state) {
@@ -393,7 +450,11 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
val > slpc->max_freq_softlimit)
return -EINVAL;
+ /* Need a lock now since waitboost can be modifying min as well */
+ mutex_lock(&slpc->lock);
+
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+
ret = slpc_set_param(slpc,
SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
val);
@@ -406,6 +467,8 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
if (!ret)
slpc->min_freq_softlimit = val;
+ mutex_unlock(&slpc->lock);
+
return ret;
}
@@ -522,6 +585,9 @@ static void slpc_get_rp_values(struct intel_guc_slpc *slpc)
GT_FREQUENCY_MULTIPLIER;
slpc->min_freq = REG_FIELD_GET(RPN_CAP_MASK, rp_state_cap) *
GT_FREQUENCY_MULTIPLIER;
+
+ if (!slpc->boost_freq)
+ slpc->boost_freq = slpc->rp0_freq;
}
/*
@@ -557,7 +623,7 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
if (unlikely(ret < 0))
return ret;
- intel_guc_pm_intrmsk_enable(&i915->gt);
+ intel_guc_pm_intrmsk_enable(to_gt(i915));
slpc_get_rp_values(slpc);
@@ -588,6 +654,47 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
return 0;
}
+int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val)
+{
+ int ret = 0;
+
+ if (val < slpc->min_freq || val > slpc->rp0_freq)
+ return -EINVAL;
+
+ mutex_lock(&slpc->lock);
+
+ if (slpc->boost_freq != val) {
+ /* Apply only if there are active waiters */
+ if (atomic_read(&slpc->num_waiters)) {
+ ret = slpc_force_min_freq(slpc, val);
+ if (ret) {
+ ret = -EIO;
+ goto done;
+ }
+ }
+
+ slpc->boost_freq = val;
+ }
+
+done:
+ mutex_unlock(&slpc->lock);
+ return ret;
+}
+
+void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc)
+{
+ /*
+ * Return min back to the softlimit.
+ * This is called during request retire,
+ * so we don't need to fail that if the
+ * set_param fails.
+ */
+ mutex_lock(&slpc->lock);
+ if (atomic_dec_and_test(&slpc->num_waiters))
+ slpc_force_min_freq(slpc, slpc->min_freq_softlimit);
+ mutex_unlock(&slpc->lock);
+}
+
int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p)
{
struct drm_i915_private *i915 = slpc_to_i915(slpc);
@@ -611,6 +718,8 @@ int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p
slpc_decode_max_freq(slpc));
drm_printf(p, "\tMin freq: %u MHz\n",
slpc_decode_min_freq(slpc));
+ drm_printf(p, "\twaitboosts: %u\n",
+ slpc->num_boosts);
}
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
index e45054d5b9b4..0caa8fee3c04 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
@@ -34,9 +34,12 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc);
void intel_guc_slpc_fini(struct intel_guc_slpc *slpc);
int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val);
int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val);
+int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val);
int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val);
int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val);
int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p);
void intel_guc_pm_intrmsk_enable(struct intel_gt *gt);
+void intel_guc_slpc_boost(struct intel_guc_slpc *slpc);
+void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
index 41d13527666f..bf5b9a563c09 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
@@ -6,6 +6,9 @@
#ifndef _INTEL_GUC_SLPC_TYPES_H_
#define _INTEL_GUC_SLPC_TYPES_H_
+#include <linux/atomic.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
#include <linux/types.h>
#define SLPC_RESET_TIMEOUT_MS 5
@@ -20,10 +23,20 @@ struct intel_guc_slpc {
u32 min_freq;
u32 rp0_freq;
u32 rp1_freq;
+ u32 boost_freq;
/* frequency softlimits */
u32 min_freq_softlimit;
u32 max_freq_softlimit;
+
+ /* Protects set/reset of boost freq
+ * and value of num_waiters
+ */
+ struct mutex lock;
+
+ struct work_struct boost_work;
+ atomic_t num_waiters;
+ u32 num_boosts;
};
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 302e9ff0602c..e7517206af82 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -13,6 +13,7 @@
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_clock_utils.h"
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
@@ -21,6 +22,7 @@
#include "gt/intel_mocs.h"
#include "gt/intel_ring.h"
+#include "intel_guc_ads.h"
#include "intel_guc_submission.h"
#include "i915_drv.h"
@@ -143,7 +145,8 @@ guc_create_parallel(struct intel_engine_cs **engines,
* use should be low and 1/16 should be sufficient. Minimum of 32 guc_ids for
* multi-lrc.
*/
-#define NUMBER_MULTI_LRC_GUC_ID (GUC_MAX_LRC_DESCRIPTORS / 16)
+#define NUMBER_MULTI_LRC_GUC_ID(guc) \
+ ((guc)->submission_state.num_guc_ids / 16)
/*
* Below is a set of functions which control the GuC scheduling state which
@@ -1038,8 +1041,6 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
spin_unlock(&ce->guc_state.lock);
- GEM_BUG_ON(!do_put && !destroyed);
-
if (pending_enable || destroyed || deregister) {
decr_outstanding_submission_g2h(guc);
if (deregister)
@@ -1077,6 +1078,271 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
xa_unlock_irqrestore(&guc->context_lookup, flags);
}
+/*
+ * GuC stores busyness stats for each engine at context in/out boundaries. A
+ * context 'in' logs execution start time, 'out' adds in -> out delta to total.
+ * i915/kmd accesses 'start', 'total' and 'context id' from memory shared with
+ * GuC.
+ *
+ * __i915_pmu_event_read samples engine busyness. When sampling, if context id
+ * is valid (!= ~0) and start is non-zero, the engine is considered to be
+ * active. For an active engine total busyness = total + (now - start), where
+ * 'now' is the time at which the busyness is sampled. For inactive engine,
+ * total busyness = total.
+ *
+ * All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain.
+ *
+ * The start and total values provided by GuC are 32 bits and wrap around in a
+ * few minutes. Since perf pmu provides busyness as 64 bit monotonically
+ * increasing ns values, there is a need for this implementation to account for
+ * overflows and extend the GuC provided values to 64 bits before returning
+ * busyness to the user. In order to do that, a worker runs periodically at
+ * frequency = 1/8th the time it takes for the timestamp to wrap (i.e. once in
+ * 27 seconds for a gt clock frequency of 19.2 MHz).
+ */
+
+#define WRAP_TIME_CLKS U32_MAX
+#define POLL_TIME_CLKS (WRAP_TIME_CLKS >> 3)
+
+static void
+__extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
+{
+ u32 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
+ u32 gt_stamp_last = lower_32_bits(guc->timestamp.gt_stamp);
+
+ if (new_start == lower_32_bits(*prev_start))
+ return;
+
+ if (new_start < gt_stamp_last &&
+ (new_start - gt_stamp_last) <= POLL_TIME_CLKS)
+ gt_stamp_hi++;
+
+ if (new_start > gt_stamp_last &&
+ (gt_stamp_last - new_start) <= POLL_TIME_CLKS && gt_stamp_hi)
+ gt_stamp_hi--;
+
+ *prev_start = ((u64)gt_stamp_hi << 32) | new_start;
+}
+
+static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
+{
+ struct guc_engine_usage_record *rec = intel_guc_engine_usage(engine);
+ struct intel_engine_guc_stats *stats = &engine->stats.guc;
+ struct intel_guc *guc = &engine->gt->uc.guc;
+ u32 last_switch = rec->last_switch_in_stamp;
+ u32 ctx_id = rec->current_context_index;
+ u32 total = rec->total_runtime;
+
+ lockdep_assert_held(&guc->timestamp.lock);
+
+ stats->running = ctx_id != ~0U && last_switch;
+ if (stats->running)
+ __extend_last_switch(guc, &stats->start_gt_clk, last_switch);
+
+ /*
+ * Instead of adjusting the total for overflow, just add the
+ * difference from previous sample stats->total_gt_clks
+ */
+ if (total && total != ~0U) {
+ stats->total_gt_clks += (u32)(total - stats->prev_total);
+ stats->prev_total = total;
+ }
+}
+
+static void guc_update_pm_timestamp(struct intel_guc *guc,
+ struct intel_engine_cs *engine,
+ ktime_t *now)
+{
+ u32 gt_stamp_now, gt_stamp_hi;
+
+ lockdep_assert_held(&guc->timestamp.lock);
+
+ gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
+ gt_stamp_now = intel_uncore_read(engine->uncore,
+ RING_TIMESTAMP(engine->mmio_base));
+ *now = ktime_get();
+
+ if (gt_stamp_now < lower_32_bits(guc->timestamp.gt_stamp))
+ gt_stamp_hi++;
+
+ guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_now;
+}
+
+/*
+ * Unlike the execlist mode of submission total and active times are in terms of
+ * gt clocks. The *now parameter is retained to return the cpu time at which the
+ * busyness was sampled.
+ */
+static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
+{
+ struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc;
+ struct i915_gpu_error *gpu_error = &engine->i915->gpu_error;
+ struct intel_gt *gt = engine->gt;
+ struct intel_guc *guc = &gt->uc.guc;
+ u64 total, gt_stamp_saved;
+ unsigned long flags;
+ u32 reset_count;
+ bool in_reset;
+
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
+
+ /*
+ * If a reset happened, we risk reading partially updated engine
+ * busyness from GuC, so we just use the driver stored copy of busyness.
+ * Synchronize with gt reset using reset_count and the
+ * I915_RESET_BACKOFF flag. Note that reset flow updates the reset_count
+ * after I915_RESET_BACKOFF flag, so ensure that the reset_count is
+ * usable by checking the flag afterwards.
+ */
+ reset_count = i915_reset_count(gpu_error);
+ in_reset = test_bit(I915_RESET_BACKOFF, &gt->reset.flags);
+
+ *now = ktime_get();
+
+ /*
+ * The active busyness depends on start_gt_clk and gt_stamp.
+ * gt_stamp is updated by i915 only when gt is awake and the
+ * start_gt_clk is derived from GuC state. To get a consistent
+ * view of activity, we query the GuC state only if gt is awake.
+ */
+ if (!in_reset && intel_gt_pm_get_if_awake(gt)) {
+ stats_saved = *stats;
+ gt_stamp_saved = guc->timestamp.gt_stamp;
+ guc_update_engine_gt_clks(engine);
+ guc_update_pm_timestamp(guc, engine, now);
+ intel_gt_pm_put_async(gt);
+ if (i915_reset_count(gpu_error) != reset_count) {
+ *stats = stats_saved;
+ guc->timestamp.gt_stamp = gt_stamp_saved;
+ }
+ }
+
+ total = intel_gt_clock_interval_to_ns(gt, stats->total_gt_clks);
+ if (stats->running) {
+ u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
+
+ total += intel_gt_clock_interval_to_ns(gt, clk);
+ }
+
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+
+ return ns_to_ktime(total);
+}
+
+static void __reset_guc_busyness_stats(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long flags;
+ ktime_t unused;
+
+ cancel_delayed_work_sync(&guc->timestamp.work);
+
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
+
+ for_each_engine(engine, gt, id) {
+ guc_update_pm_timestamp(guc, engine, &unused);
+ guc_update_engine_gt_clks(engine);
+ engine->stats.guc.prev_total = 0;
+ }
+
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+}
+
+static void __update_guc_busyness_stats(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long flags;
+ ktime_t unused;
+
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
+ for_each_engine(engine, gt, id) {
+ guc_update_pm_timestamp(guc, engine, &unused);
+ guc_update_engine_gt_clks(engine);
+ }
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+}
+
+static void guc_timestamp_ping(struct work_struct *wrk)
+{
+ struct intel_guc *guc = container_of(wrk, typeof(*guc),
+ timestamp.work.work);
+ struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
+ struct intel_gt *gt = guc_to_gt(guc);
+ intel_wakeref_t wakeref;
+ int srcu, ret;
+
+ /*
+ * Synchronize with gt reset to make sure the worker does not
+ * corrupt the engine/guc stats.
+ */
+ ret = intel_gt_reset_trylock(gt, &srcu);
+ if (ret)
+ return;
+
+ with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+ __update_guc_busyness_stats(guc);
+
+ intel_gt_reset_unlock(gt, srcu);
+
+ mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
+ guc->timestamp.ping_delay);
+}
+
+static int guc_action_enable_usage_stats(struct intel_guc *guc)
+{
+ u32 offset = intel_guc_engine_usage_offset(guc);
+ u32 action[] = {
+ INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF,
+ offset,
+ 0,
+ };
+
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+static void guc_init_engine_stats(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ intel_wakeref_t wakeref;
+
+ mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
+ guc->timestamp.ping_delay);
+
+ with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref) {
+ int ret = guc_action_enable_usage_stats(guc);
+
+ if (ret)
+ drm_err(&gt->i915->drm,
+ "Failed to enable usage stats: %d!\n", ret);
+ }
+}
+
+void intel_guc_busyness_park(struct intel_gt *gt)
+{
+ struct intel_guc *guc = &gt->uc.guc;
+
+ if (!guc_submission_initialized(guc))
+ return;
+
+ cancel_delayed_work(&guc->timestamp.work);
+ __update_guc_busyness_stats(guc);
+}
+
+void intel_guc_busyness_unpark(struct intel_gt *gt)
+{
+ struct intel_guc *guc = &gt->uc.guc;
+
+ if (!guc_submission_initialized(guc))
+ return;
+
+ mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
+ guc->timestamp.ping_delay);
+}
+
static inline bool
submission_disabled(struct intel_guc *guc)
{
@@ -1138,6 +1404,7 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
intel_gt_park_heartbeats(guc_to_gt(guc));
disable_submission(guc);
guc->interrupts.disable(guc);
+ __reset_guc_busyness_stats(guc);
/* Flush IRQ handler */
spin_lock_irq(&guc_to_gt(guc)->irq_lock);
@@ -1484,6 +1751,7 @@ static void destroyed_worker_func(struct work_struct *w);
*/
int intel_guc_submission_init(struct intel_guc *guc)
{
+ struct intel_gt *gt = guc_to_gt(guc);
int ret;
if (guc->lrc_desc_pool)
@@ -1508,10 +1776,14 @@ int intel_guc_submission_init(struct intel_guc *guc)
destroyed_worker_func);
guc->submission_state.guc_ids_bitmap =
- bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID, GFP_KERNEL);
+ bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
if (!guc->submission_state.guc_ids_bitmap)
return -ENOMEM;
+ spin_lock_init(&guc->timestamp.lock);
+ INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
+ guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
+
return 0;
}
@@ -1598,13 +1870,13 @@ static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
if (intel_context_is_parent(ce))
ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
- NUMBER_MULTI_LRC_GUC_ID,
+ NUMBER_MULTI_LRC_GUC_ID(guc),
order_base_2(ce->parallel.number_children
+ 1));
else
ret = ida_simple_get(&guc->submission_state.guc_ids,
- NUMBER_MULTI_LRC_GUC_ID,
- GUC_MAX_LRC_DESCRIPTORS,
+ NUMBER_MULTI_LRC_GUC_ID(guc),
+ guc->submission_state.num_guc_ids,
GFP_KERNEL | __GFP_RETRY_MAYFAIL |
__GFP_NOWARN);
if (unlikely(ret < 0))
@@ -1670,6 +1942,10 @@ static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce)
set_context_guc_id_invalid(cn);
+#ifdef CONFIG_DRM_I915_SELFTEST
+ guc->number_guc_id_stolen++;
+#endif
+
return 0;
} else {
return -EAGAIN;
@@ -2373,7 +2649,6 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
unsigned long flags;
bool disabled;
- lockdep_assert_held(&guc->submission_state.lock);
GEM_BUG_ON(!intel_gt_pm_is_awake(gt));
GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id));
GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
@@ -2389,7 +2664,7 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
}
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
if (unlikely(disabled)) {
- __release_guc_id(guc, ce);
+ release_guc_id(guc, ce);
__guc_context_destroy(ce);
return;
}
@@ -2423,36 +2698,48 @@ static void __guc_context_destroy(struct intel_context *ce)
static void guc_flush_destroyed_contexts(struct intel_guc *guc)
{
- struct intel_context *ce, *cn;
+ struct intel_context *ce;
unsigned long flags;
GEM_BUG_ON(!submission_disabled(guc) &&
guc_submission_initialized(guc));
- spin_lock_irqsave(&guc->submission_state.lock, flags);
- list_for_each_entry_safe(ce, cn,
- &guc->submission_state.destroyed_contexts,
- destroyed_link) {
- list_del_init(&ce->destroyed_link);
- __release_guc_id(guc, ce);
+ while (!list_empty(&guc->submission_state.destroyed_contexts)) {
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
+ struct intel_context,
+ destroyed_link);
+ if (ce)
+ list_del_init(&ce->destroyed_link);
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+
+ if (!ce)
+ break;
+
+ release_guc_id(guc, ce);
__guc_context_destroy(ce);
}
- spin_unlock_irqrestore(&guc->submission_state.lock, flags);
}
static void deregister_destroyed_contexts(struct intel_guc *guc)
{
- struct intel_context *ce, *cn;
+ struct intel_context *ce;
unsigned long flags;
- spin_lock_irqsave(&guc->submission_state.lock, flags);
- list_for_each_entry_safe(ce, cn,
- &guc->submission_state.destroyed_contexts,
- destroyed_link) {
- list_del_init(&ce->destroyed_link);
+ while (!list_empty(&guc->submission_state.destroyed_contexts)) {
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
+ struct intel_context,
+ destroyed_link);
+ if (ce)
+ list_del_init(&ce->destroyed_link);
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+
+ if (!ce)
+ break;
+
guc_lrc_desc_unpin(ce);
}
- spin_unlock_irqrestore(&guc->submission_state.lock, flags);
}
static void destroyed_worker_func(struct work_struct *w)
@@ -3369,7 +3656,9 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
engine->emit_flush = gen12_emit_flush_xcs;
}
engine->set_default_submission = guc_set_default_submission;
+ engine->busyness = guc_engine_busyness;
+ engine->flags |= I915_ENGINE_SUPPORTS_STATS;
engine->flags |= I915_ENGINE_HAS_PREEMPTION;
engine->flags |= I915_ENGINE_HAS_TIMESLICES;
@@ -3468,6 +3757,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
void intel_guc_submission_enable(struct intel_guc *guc)
{
guc_init_lrc_mapping(guc);
+ guc_init_engine_stats(guc);
}
void intel_guc_submission_disable(struct intel_guc *guc)
@@ -3494,6 +3784,7 @@ static bool __guc_submission_selected(struct intel_guc *guc)
void intel_guc_submission_init_early(struct intel_guc *guc)
{
+ guc->submission_state.num_guc_ids = GUC_MAX_LRC_DESCRIPTORS;
guc->submission_supported = __guc_submission_supported(guc);
guc->submission_selected = __guc_submission_selected(guc);
}
@@ -3695,6 +3986,7 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc,
const u32 *msg, u32 len)
{
struct intel_context *ce;
+ unsigned long flags;
int desc_idx;
if (unlikely(len != 1)) {
@@ -3703,11 +3995,24 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc,
}
desc_idx = msg[0];
+
+ /*
+ * The context lookup uses the xarray but lookups only require an RCU lock
+ * not the full spinlock. So take the lock explicitly and keep it until the
+ * context has been reference count locked to ensure it can't be destroyed
+ * asynchronously until the reset is done.
+ */
+ xa_lock_irqsave(&guc->context_lookup, flags);
ce = g2h_context_lookup(guc, desc_idx);
+ if (ce)
+ intel_context_get(ce);
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
+
if (unlikely(!ce))
return -EPROTO;
guc_handle_context_reset(guc, ce);
+ intel_context_put(ce);
return 0;
}
@@ -3728,11 +4033,12 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
const u32 *msg, u32 len)
{
struct intel_engine_cs *engine;
+ struct intel_gt *gt = guc_to_gt(guc);
u8 guc_class, instance;
u32 reason;
if (unlikely(len != 3)) {
- drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
+ drm_err(&gt->i915->drm, "Invalid length %u", len);
return -EPROTO;
}
@@ -3742,12 +4048,19 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
engine = guc_lookup_engine(guc, guc_class, instance);
if (unlikely(!engine)) {
- drm_err(&guc_to_gt(guc)->i915->drm,
+ drm_err(&gt->i915->drm,
"Invalid engine %d:%d", guc_class, instance);
return -EPROTO;
}
- intel_gt_handle_error(guc_to_gt(guc), engine->mask,
+ /*
+ * This is an unexpected failure of a hardware feature. So, log a real
+ * error message not just the informational that comes with the reset.
+ */
+ drm_err(&gt->i915->drm, "GuC engine reset request failed on %d:%d (%s) because 0x%08X",
+ guc_class, instance, engine->name, reason);
+
+ intel_gt_handle_error(gt, engine->mask,
I915_ERROR_CAPTURE,
"GuC failed to reset %s (reason=0x%08x)\n",
engine->name, reason);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index c7ef44fa0c36..5a95a9f0a8e3 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -28,6 +28,8 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
struct i915_request *hung_rq,
struct drm_printer *m);
+void intel_guc_busyness_park(struct intel_gt *gt);
+void intel_guc_busyness_unpark(struct intel_gt *gt);
bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index ff4b6869b80b..d10b227ac4aa 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -54,65 +54,6 @@ void intel_huc_init_early(struct intel_huc *huc)
}
}
-static int intel_huc_rsa_data_create(struct intel_huc *huc)
-{
- struct intel_gt *gt = huc_to_gt(huc);
- struct intel_guc *guc = &gt->uc.guc;
- struct i915_vma *vma;
- size_t copied;
- void *vaddr;
- int err;
-
- err = i915_inject_probe_error(gt->i915, -ENXIO);
- if (err)
- return err;
-
- /*
- * HuC firmware will sit above GUC_GGTT_TOP and will not map
- * through GTT. Unfortunately, this means GuC cannot perform
- * the HuC auth. as the rsa offset now falls within the GuC
- * inaccessible range. We resort to perma-pinning an additional
- * vma within the accessible range that only contains the rsa
- * signature. The GuC can use this extra pinning to perform
- * the authentication since its GGTT offset will be GuC
- * accessible.
- */
- GEM_BUG_ON(huc->fw.rsa_size > PAGE_SIZE);
- vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
- i915_coherent_map_type(gt->i915,
- vma->obj, true));
- if (IS_ERR(vaddr)) {
- i915_vma_unpin_and_release(&vma, 0);
- err = PTR_ERR(vaddr);
- goto unpin_out;
- }
-
- copied = intel_uc_fw_copy_rsa(&huc->fw, vaddr, vma->size);
- i915_gem_object_unpin_map(vma->obj);
-
- if (copied < huc->fw.rsa_size) {
- err = -ENOMEM;
- goto unpin_out;
- }
-
- huc->rsa_data = vma;
-
- return 0;
-
-unpin_out:
- i915_vma_unpin_and_release(&vma, 0);
- return err;
-}
-
-static void intel_huc_rsa_data_destroy(struct intel_huc *huc)
-{
- i915_vma_unpin_and_release(&huc->rsa_data, 0);
-}
-
int intel_huc_init(struct intel_huc *huc)
{
struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
@@ -122,21 +63,10 @@ int intel_huc_init(struct intel_huc *huc)
if (err)
goto out;
- /*
- * HuC firmware image is outside GuC accessible range.
- * Copy the RSA signature out of the image into
- * a perma-pinned region set aside for it
- */
- err = intel_huc_rsa_data_create(huc);
- if (err)
- goto out_fini;
-
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOADABLE);
return 0;
-out_fini:
- intel_uc_fw_fini(&huc->fw);
out:
i915_probe_error(i915, "failed with %d\n", err);
return err;
@@ -147,7 +77,6 @@ void intel_huc_fini(struct intel_huc *huc)
if (!intel_uc_fw_is_loadable(&huc->fw))
return;
- intel_huc_rsa_data_destroy(huc);
intel_uc_fw_fini(&huc->fw);
}
@@ -177,7 +106,7 @@ int intel_huc_auth(struct intel_huc *huc)
goto fail;
ret = intel_guc_auth_huc(guc,
- intel_guc_ggtt_offset(guc, huc->rsa_data));
+ intel_guc_ggtt_offset(guc, huc->fw.rsa_data));
if (ret) {
DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
goto fail;
@@ -199,7 +128,7 @@ int intel_huc_auth(struct intel_huc *huc)
fail:
i915_probe_error(gt->i915, "HuC: Authentication failed %d\n", ret);
- intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_FAIL);
+ intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index daee43b661d4..ae8c8a6c8cc8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -15,8 +15,6 @@ struct intel_huc {
struct intel_uc_fw fw;
/* HuC-specific additions */
- struct i915_vma *rsa_data;
-
struct {
i915_reg_t reg;
u32 mask;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 2fef3b0bbe95..09ed29df67bc 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -8,6 +8,7 @@
#include "intel_guc.h"
#include "intel_guc_ads.h"
#include "intel_guc_submission.h"
+#include "gt/intel_rps.h"
#include "intel_uc.h"
#include "i915_drv.h"
@@ -35,7 +36,7 @@ static void uc_expand_default_options(struct intel_uc *uc)
}
/* Intermediate platforms are HuC authentication only */
- if (IS_ALDERLAKE_S(i915)) {
+ if (IS_ALDERLAKE_S(i915) && !IS_ADLS_RPLS(i915)) {
i915->params.enable_guc = ENABLE_GUC_LOAD_HUC;
return;
}
@@ -462,6 +463,8 @@ static int __uc_init_hw(struct intel_uc *uc)
else
attempts = 1;
+ intel_rps_raise_unslice(&uc_to_gt(uc)->rps);
+
while (attempts--) {
/*
* Always reset the GuC just before (re)loading, so
@@ -499,6 +502,9 @@ static int __uc_init_hw(struct intel_uc *uc)
ret = intel_guc_slpc_enable(&guc->slpc);
if (ret)
goto err_submission;
+ } else {
+ /* Restore GT back to RPn for non-SLPC path */
+ intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
}
drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n",
@@ -529,6 +535,9 @@ err_submission:
err_log_capture:
__uc_capture_load_err_log(uc);
err_out:
+ /* Return GT back to RPn */
+ intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
+
__uc_sanitize(uc);
if (!ret) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 3aa87be4f2e4..a5af05bde6f2 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -48,22 +48,39 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
* Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
* firmware as TGL.
*/
-#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
- fw_def(ALDERLAKE_P, 0, guc_def(adlp, 62, 0, 3), huc_def(tgl, 7, 9, 3)) \
- fw_def(ALDERLAKE_S, 0, guc_def(tgl, 62, 0, 0), huc_def(tgl, 7, 9, 3)) \
- fw_def(DG1, 0, guc_def(dg1, 62, 0, 0), huc_def(dg1, 7, 9, 3)) \
- fw_def(ROCKETLAKE, 0, guc_def(tgl, 62, 0, 0), huc_def(tgl, 7, 9, 3)) \
- fw_def(TIGERLAKE, 0, guc_def(tgl, 62, 0, 0), huc_def(tgl, 7, 9, 3)) \
- fw_def(JASPERLAKE, 0, guc_def(ehl, 62, 0, 0), huc_def(ehl, 9, 0, 0)) \
- fw_def(ELKHARTLAKE, 0, guc_def(ehl, 62, 0, 0), huc_def(ehl, 9, 0, 0)) \
- fw_def(ICELAKE, 0, guc_def(icl, 62, 0, 0), huc_def(icl, 9, 0, 0)) \
- fw_def(COMETLAKE, 5, guc_def(cml, 62, 0, 0), huc_def(cml, 4, 0, 0)) \
- fw_def(COMETLAKE, 0, guc_def(kbl, 62, 0, 0), huc_def(kbl, 4, 0, 0)) \
- fw_def(COFFEELAKE, 0, guc_def(kbl, 62, 0, 0), huc_def(kbl, 4, 0, 0)) \
- fw_def(GEMINILAKE, 0, guc_def(glk, 62, 0, 0), huc_def(glk, 4, 0, 0)) \
- fw_def(KABYLAKE, 0, guc_def(kbl, 62, 0, 0), huc_def(kbl, 4, 0, 0)) \
- fw_def(BROXTON, 0, guc_def(bxt, 62, 0, 0), huc_def(bxt, 2, 0, 0)) \
- fw_def(SKYLAKE, 0, guc_def(skl, 62, 0, 0), huc_def(skl, 2, 0, 0))
+#define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \
+ fw_def(ALDERLAKE_P, 0, guc_def(adlp, 62, 0, 3)) \
+ fw_def(ALDERLAKE_S, 0, guc_def(tgl, 62, 0, 0)) \
+ fw_def(DG1, 0, guc_def(dg1, 62, 0, 0)) \
+ fw_def(ROCKETLAKE, 0, guc_def(tgl, 62, 0, 0)) \
+ fw_def(TIGERLAKE, 0, guc_def(tgl, 62, 0, 0)) \
+ fw_def(JASPERLAKE, 0, guc_def(ehl, 62, 0, 0)) \
+ fw_def(ELKHARTLAKE, 0, guc_def(ehl, 62, 0, 0)) \
+ fw_def(ICELAKE, 0, guc_def(icl, 62, 0, 0)) \
+ fw_def(COMETLAKE, 5, guc_def(cml, 62, 0, 0)) \
+ fw_def(COMETLAKE, 0, guc_def(kbl, 62, 0, 0)) \
+ fw_def(COFFEELAKE, 0, guc_def(kbl, 62, 0, 0)) \
+ fw_def(GEMINILAKE, 0, guc_def(glk, 62, 0, 0)) \
+ fw_def(KABYLAKE, 0, guc_def(kbl, 62, 0, 0)) \
+ fw_def(BROXTON, 0, guc_def(bxt, 62, 0, 0)) \
+ fw_def(SKYLAKE, 0, guc_def(skl, 62, 0, 0))
+
+#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
+ fw_def(ALDERLAKE_P, 0, huc_def(tgl, 7, 9, 3)) \
+ fw_def(ALDERLAKE_S, 0, huc_def(tgl, 7, 9, 3)) \
+ fw_def(DG1, 0, huc_def(dg1, 7, 9, 3)) \
+ fw_def(ROCKETLAKE, 0, huc_def(tgl, 7, 9, 3)) \
+ fw_def(TIGERLAKE, 0, huc_def(tgl, 7, 9, 3)) \
+ fw_def(JASPERLAKE, 0, huc_def(ehl, 9, 0, 0)) \
+ fw_def(ELKHARTLAKE, 0, huc_def(ehl, 9, 0, 0)) \
+ fw_def(ICELAKE, 0, huc_def(icl, 9, 0, 0)) \
+ fw_def(COMETLAKE, 5, huc_def(cml, 4, 0, 0)) \
+ fw_def(COMETLAKE, 0, huc_def(kbl, 4, 0, 0)) \
+ fw_def(COFFEELAKE, 0, huc_def(kbl, 4, 0, 0)) \
+ fw_def(GEMINILAKE, 0, huc_def(glk, 4, 0, 0)) \
+ fw_def(KABYLAKE, 0, huc_def(kbl, 4, 0, 0)) \
+ fw_def(BROXTON, 0, huc_def(bxt, 2, 0, 0)) \
+ fw_def(SKYLAKE, 0, huc_def(skl, 2, 0, 0))
#define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
"i915/" \
@@ -79,11 +96,11 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
__MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_)
/* All blobs need to be declared via MODULE_FIRMWARE() */
-#define INTEL_UC_MODULE_FW(platform_, revid_, guc_, huc_) \
- MODULE_FIRMWARE(guc_); \
- MODULE_FIRMWARE(huc_);
+#define INTEL_UC_MODULE_FW(platform_, revid_, uc_) \
+ MODULE_FIRMWARE(uc_);
-INTEL_UC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH, MAKE_HUC_FW_PATH)
+INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
+INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH)
/* The below structs and macros are used to iterate across the list of blobs */
struct __packed uc_fw_blob {
@@ -106,31 +123,47 @@ struct __packed uc_fw_blob {
struct __packed uc_fw_platform_requirement {
enum intel_platform p;
u8 rev; /* first platform rev using this FW */
- const struct uc_fw_blob blobs[INTEL_UC_FW_NUM_TYPES];
+ const struct uc_fw_blob blob;
};
-#define MAKE_FW_LIST(platform_, revid_, guc_, huc_) \
+#define MAKE_FW_LIST(platform_, revid_, uc_) \
{ \
.p = INTEL_##platform_, \
.rev = revid_, \
- .blobs[INTEL_UC_FW_TYPE_GUC] = guc_, \
- .blobs[INTEL_UC_FW_TYPE_HUC] = huc_, \
+ .blob = uc_, \
},
+struct fw_blobs_by_type {
+ const struct uc_fw_platform_requirement *blobs;
+ u32 count;
+};
+
static void
__uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
{
- static const struct uc_fw_platform_requirement fw_blobs[] = {
- INTEL_UC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, HUC_FW_BLOB)
+ static const struct uc_fw_platform_requirement blobs_guc[] = {
+ INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB)
+ };
+ static const struct uc_fw_platform_requirement blobs_huc[] = {
+ INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB)
};
+ static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = {
+ [INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
+ [INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
+ };
+ static const struct uc_fw_platform_requirement *fw_blobs;
enum intel_platform p = INTEL_INFO(i915)->platform;
+ u32 fw_count;
u8 rev = INTEL_REVID(i915);
int i;
- for (i = 0; i < ARRAY_SIZE(fw_blobs) && p <= fw_blobs[i].p; i++) {
+ GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
+ fw_blobs = blobs_all[uc_fw->type].blobs;
+ fw_count = blobs_all[uc_fw->type].count;
+
+ for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) {
if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
- const struct uc_fw_blob *blob =
- &fw_blobs[i].blobs[uc_fw->type];
+ const struct uc_fw_blob *blob = &fw_blobs[i].blob;
uc_fw->path = blob->path;
uc_fw->major_ver_wanted = blob->major;
uc_fw->minor_ver_wanted = blob->minor;
@@ -140,7 +173,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
/* make sure the list is ordered as expected */
if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
- for (i = 1; i < ARRAY_SIZE(fw_blobs); i++) {
+ for (i = 1; i < fw_count; i++) {
if (fw_blobs[i].p < fw_blobs[i - 1].p)
continue;
@@ -322,13 +355,6 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
/* now RSA */
- if (unlikely(css->key_size_dw != UOS_RSA_SCRATCH_COUNT)) {
- drm_warn(&i915->drm, "%s firmware %s: unexpected key size: %u != %u\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
- css->key_size_dw, UOS_RSA_SCRATCH_COUNT);
- err = -EPROTO;
- goto fail;
- }
uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
/* At least, it should have header, uCode and RSA. Size of all three. */
@@ -540,10 +566,79 @@ fail:
i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
err);
- intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_FAIL);
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return err;
}
+static inline bool uc_fw_need_rsa_in_memory(struct intel_uc_fw *uc_fw)
+{
+ /*
+ * The HW reads the GuC RSA from memory if the key size is > 256 bytes,
+ * while it reads it from the 64 RSA registers if it is smaller.
+ * The HuC RSA is always read from memory.
+ */
+ return uc_fw->type == INTEL_UC_FW_TYPE_HUC || uc_fw->rsa_size > 256;
+}
+
+static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
+{
+ struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
+ struct i915_vma *vma;
+ size_t copied;
+ void *vaddr;
+ int err;
+
+ err = i915_inject_probe_error(gt->i915, -ENXIO);
+ if (err)
+ return err;
+
+ if (!uc_fw_need_rsa_in_memory(uc_fw))
+ return 0;
+
+ /*
+ * uC firmwares will sit above GUC_GGTT_TOP and will not map through
+ * GGTT. Unfortunately, this means that the GuC HW cannot perform the uC
+ * authentication from memory, as the RSA offset now falls within the
+ * GuC inaccessible range. We resort to perma-pinning an additional vma
+ * within the accessible range that only contains the RSA signature.
+ * The GuC HW can use this extra pinning to perform the authentication
+ * since its GGTT offset will be GuC accessible.
+ */
+ GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE);
+ vma = intel_guc_allocate_vma(&gt->uc.guc, PAGE_SIZE);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
+ i915_coherent_map_type(gt->i915, vma->obj, true));
+ if (IS_ERR(vaddr)) {
+ i915_vma_unpin_and_release(&vma, 0);
+ err = PTR_ERR(vaddr);
+ goto unpin_out;
+ }
+
+ copied = intel_uc_fw_copy_rsa(uc_fw, vaddr, vma->size);
+ i915_gem_object_unpin_map(vma->obj);
+
+ if (copied < uc_fw->rsa_size) {
+ err = -ENOMEM;
+ goto unpin_out;
+ }
+
+ uc_fw->rsa_data = vma;
+
+ return 0;
+
+unpin_out:
+ i915_vma_unpin_and_release(&vma, 0);
+ return err;
+}
+
+static void uc_fw_rsa_data_destroy(struct intel_uc_fw *uc_fw)
+{
+ i915_vma_unpin_and_release(&uc_fw->rsa_data, 0);
+}
+
int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
{
int err;
@@ -558,14 +653,29 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
if (err) {
DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
intel_uc_fw_type_repr(uc_fw->type), err);
- intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_FAIL);
+ goto out;
}
+ err = uc_fw_rsa_data_create(uc_fw);
+ if (err) {
+ DRM_DEBUG_DRIVER("%s fw rsa data creation failed, err=%d\n",
+ intel_uc_fw_type_repr(uc_fw->type), err);
+ goto out_unpin;
+ }
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin_pages(uc_fw->obj);
+out:
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_INIT_FAIL);
return err;
}
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
{
+ uc_fw_rsa_data_destroy(uc_fw);
+
if (i915_gem_object_has_pinned_pages(uc_fw->obj))
i915_gem_object_unpin_pages(uc_fw->obj);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 1e00bf65639e..d9d1dc0b4cbb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -32,11 +32,12 @@ struct intel_gt;
* | | MISSING <--/ | \--> ERROR |
* | fetch | V |
* | | AVAILABLE |
- * +------------+- | -+
+ * +------------+- | \ -+
+ * | | | \--> INIT FAIL |
* | init | V |
* | | /------> LOADABLE <----<-----------\ |
* +------------+- \ / \ \ \ -+
- * | | FAIL <--< \--> TRANSFERRED \ |
+ * | | LOAD FAIL <--< \--> TRANSFERRED \ |
* | upload | \ / \ / |
* | | \---------/ \--> RUNNING |
* +------------+---------------------------------------------------+
@@ -50,8 +51,9 @@ enum intel_uc_fw_status {
INTEL_UC_FIRMWARE_MISSING, /* blob not found on the system */
INTEL_UC_FIRMWARE_ERROR, /* invalid format or version */
INTEL_UC_FIRMWARE_AVAILABLE, /* blob found and copied in mem */
+ INTEL_UC_FIRMWARE_INIT_FAIL, /* failed to prepare fw objects for load */
INTEL_UC_FIRMWARE_LOADABLE, /* all fw-required objects are ready */
- INTEL_UC_FIRMWARE_FAIL, /* failed to xfer or init/auth the fw */
+ INTEL_UC_FIRMWARE_LOAD_FAIL, /* failed to xfer or init/auth the fw */
INTEL_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */
INTEL_UC_FIRMWARE_RUNNING /* init/auth done */
};
@@ -84,6 +86,7 @@ struct intel_uc_fw {
* or during a GT reset (mutex guarantees single threaded).
*/
struct i915_vma dummy;
+ struct i915_vma *rsa_data;
/*
* The firmware build process will generate a version header file with major and
@@ -130,10 +133,12 @@ const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
return "ERROR";
case INTEL_UC_FIRMWARE_AVAILABLE:
return "AVAILABLE";
+ case INTEL_UC_FIRMWARE_INIT_FAIL:
+ return "INIT FAIL";
case INTEL_UC_FIRMWARE_LOADABLE:
return "LOADABLE";
- case INTEL_UC_FIRMWARE_FAIL:
- return "FAIL";
+ case INTEL_UC_FIRMWARE_LOAD_FAIL:
+ return "LOAD FAIL";
case INTEL_UC_FIRMWARE_TRANSFERRED:
return "TRANSFERRED";
case INTEL_UC_FIRMWARE_RUNNING:
@@ -155,7 +160,8 @@ static inline int intel_uc_fw_status_to_error(enum intel_uc_fw_status status)
return -ENOENT;
case INTEL_UC_FIRMWARE_ERROR:
return -ENOEXEC;
- case INTEL_UC_FIRMWARE_FAIL:
+ case INTEL_UC_FIRMWARE_INIT_FAIL:
+ case INTEL_UC_FIRMWARE_LOAD_FAIL:
return -EIO;
case INTEL_UC_FIRMWARE_SELECTED:
return -ESTALE;
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index fb0e4a7bd8ca..d3327b802b76 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -3,8 +3,21 @@
* Copyright �� 2021 Intel Corporation
*/
+#include "selftests/igt_spinner.h"
#include "selftests/intel_scheduler_helpers.h"
+static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
+{
+ int err = 0;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (spin && !igt_wait_for_spinner(spin, rq))
+ err = -ETIMEDOUT;
+
+ return err;
+}
+
static struct i915_request *nop_user_request(struct intel_context *ce,
struct i915_request *from)
{
@@ -110,12 +123,172 @@ err:
return ret;
}
+/*
+ * intel_guc_steal_guc_ids - Test to exhaust all guc_ids and then steal one
+ *
+ * This test creates a spinner which is used to block all subsequent submissions
+ * until it completes. Next, a loop creates a context and a NOP request each
+ * iteration until the guc_ids are exhausted (request creation returns -EAGAIN).
+ * The spinner is ended, unblocking all requests created in the loop. At this
+ * point all guc_ids are exhausted but are available to steal. Try to create
+ * another request which should successfully steal a guc_id. Wait on last
+ * request to complete, idle GPU, verify a guc_id was stolen via a counter, and
+ * exit the test. Test also artificially reduces the number of guc_ids so the
+ * test runs in a timely manner.
+ */
+static int intel_guc_steal_guc_ids(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_guc *guc = &gt->uc.guc;
+ int ret, sv, context_index = 0;
+ intel_wakeref_t wakeref;
+ struct intel_engine_cs *engine;
+ struct intel_context **ce;
+ struct igt_spinner spin;
+ struct i915_request *spin_rq = NULL, *rq, *last = NULL;
+ int number_guc_id_stolen = guc->number_guc_id_stolen;
+
+ ce = kzalloc(sizeof(*ce) * GUC_MAX_LRC_DESCRIPTORS, GFP_KERNEL);
+ if (!ce) {
+ pr_err("Context array allocation failed\n");
+ return -ENOMEM;
+ }
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+ engine = intel_selftest_find_any_engine(gt);
+ sv = guc->submission_state.num_guc_ids;
+ guc->submission_state.num_guc_ids = 4096;
+
+ /* Create spinner to block requests in below loop */
+ ce[context_index] = intel_context_create(engine);
+ if (IS_ERR(ce[context_index])) {
+ ret = PTR_ERR(ce[context_index]);
+ ce[context_index] = NULL;
+ pr_err("Failed to create context: %d\n", ret);
+ goto err_wakeref;
+ }
+ ret = igt_spinner_init(&spin, engine->gt);
+ if (ret) {
+ pr_err("Failed to create spinner: %d\n", ret);
+ goto err_contexts;
+ }
+ spin_rq = igt_spinner_create_request(&spin, ce[context_index],
+ MI_ARB_CHECK);
+ if (IS_ERR(spin_rq)) {
+ ret = PTR_ERR(spin_rq);
+ pr_err("Failed to create spinner request: %d\n", ret);
+ goto err_contexts;
+ }
+ ret = request_add_spin(spin_rq, &spin);
+ if (ret) {
+ pr_err("Failed to add Spinner request: %d\n", ret);
+ goto err_spin_rq;
+ }
+
+ /* Use all guc_ids */
+ while (ret != -EAGAIN) {
+ ce[++context_index] = intel_context_create(engine);
+ if (IS_ERR(ce[context_index])) {
+ ret = PTR_ERR(ce[context_index--]);
+ ce[context_index] = NULL;
+ pr_err("Failed to create context: %d\n", ret);
+ goto err_spin_rq;
+ }
+
+ rq = nop_user_request(ce[context_index], spin_rq);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ rq = NULL;
+ if (ret != -EAGAIN) {
+ pr_err("Failed to create request, %d: %d\n",
+ context_index, ret);
+ goto err_spin_rq;
+ }
+ } else {
+ if (last)
+ i915_request_put(last);
+ last = rq;
+ }
+ }
+
+ /* Release blocked requests */
+ igt_spinner_end(&spin);
+ ret = intel_selftest_wait_for_rq(spin_rq);
+ if (ret) {
+ pr_err("Spin request failed to complete: %d\n", ret);
+ i915_request_put(last);
+ goto err_spin_rq;
+ }
+ i915_request_put(spin_rq);
+ igt_spinner_fini(&spin);
+ spin_rq = NULL;
+
+ /* Wait for last request */
+ ret = i915_request_wait(last, 0, HZ * 30);
+ i915_request_put(last);
+ if (ret < 0) {
+ pr_err("Last request failed to complete: %d\n", ret);
+ goto err_spin_rq;
+ }
+
+ /* Try to steal guc_id */
+ rq = nop_user_request(ce[context_index], NULL);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ pr_err("Failed to steal guc_id, %d: %d\n", context_index, ret);
+ goto err_spin_rq;
+ }
+
+ /* Wait for request with stolen guc_id */
+ ret = i915_request_wait(rq, 0, HZ);
+ i915_request_put(rq);
+ if (ret < 0) {
+ pr_err("Request with stolen guc_id failed to complete: %d\n",
+ ret);
+ goto err_spin_rq;
+ }
+
+ /* Wait for idle */
+ ret = intel_gt_wait_for_idle(gt, HZ * 30);
+ if (ret < 0) {
+ pr_err("GT failed to idle: %d\n", ret);
+ goto err_spin_rq;
+ }
+
+ /* Verify a guc_id was stolen */
+ if (guc->number_guc_id_stolen == number_guc_id_stolen) {
+ pr_err("No guc_id was stolen");
+ ret = -EINVAL;
+ } else {
+ ret = 0;
+ }
+
+err_spin_rq:
+ if (spin_rq) {
+ igt_spinner_end(&spin);
+ intel_selftest_wait_for_rq(spin_rq);
+ i915_request_put(spin_rq);
+ igt_spinner_fini(&spin);
+ intel_gt_wait_for_idle(gt, HZ * 30);
+ }
+err_contexts:
+ for (; context_index >= 0 && ce[context_index]; --context_index)
+ intel_context_put(ce[context_index]);
+err_wakeref:
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ kfree(ce);
+ guc->submission_state.num_guc_ids = sv;
+
+ return ret;
+}
+
int intel_guc_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(intel_guc_scrub_ctbs),
+ SUBTEST(intel_guc_steal_guc_ids),
};
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
if (intel_gt_is_wedged(gt))
return 0;
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
index 50953c8e8b53..1297ddbf7f88 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
@@ -167,7 +167,7 @@ int intel_guc_multi_lrc_live_selftests(struct drm_i915_private *i915)
static const struct i915_subtest tests[] = {
SUBTEST(intel_guc_multi_lrc_basic),
};
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
if (intel_gt_is_wedged(gt))
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 11a8baba6822..9ec064199364 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -427,7 +427,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
plane->tiled = !!(val & SPRITE_TILED);
color_order = !!(val & SPRITE_RGB_ORDER_RGBX);
- yuv_order = (val & SPRITE_YUV_BYTE_ORDER_MASK) >>
+ yuv_order = (val & SPRITE_YUV_ORDER_MASK) >>
_SPRITE_YUV_ORDER_SHIFT;
fmt = (val & SPRITE_PIXFORMAT_MASK) >> _SPRITE_FMT_SHIFT;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 53d0cb327539..99d1781fa5f0 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -446,17 +446,17 @@ static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
|| e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
return (e->val64 != 0);
else
- return (e->val64 & _PAGE_PRESENT);
+ return (e->val64 & GEN8_PAGE_PRESENT);
}
static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
{
- e->val64 &= ~_PAGE_PRESENT;
+ e->val64 &= ~GEN8_PAGE_PRESENT;
}
static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
{
- e->val64 |= _PAGE_PRESENT;
+ e->val64 |= GEN8_PAGE_PRESENT;
}
static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
@@ -2439,7 +2439,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
/* The entry parameters like present/writeable/cache type
* set to the same as i915's scratch page tree.
*/
- se.val64 |= _PAGE_PRESENT | _PAGE_RW;
+ se.val64 |= GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
if (type == GTT_TYPE_PPGTT_PDE_PT)
se.val64 |= PPAT_CACHED;
@@ -2896,7 +2896,7 @@ void intel_gvt_restore_ggtt(struct intel_gvt *gvt)
offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
for (idx = 0; idx < num_low; idx++) {
pte = mm->ggtt_mm.host_ggtt_aperture[idx];
- if (pte & _PAGE_PRESENT)
+ if (pte & GEN8_PAGE_PRESENT)
write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);
}
@@ -2904,7 +2904,7 @@ void intel_gvt_restore_ggtt(struct intel_gvt *gvt)
offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
for (idx = 0; idx < num_hi; idx++) {
pte = mm->ggtt_mm.host_ggtt_hidden[idx];
- if (pte & _PAGE_PRESENT)
+ if (pte & GEN8_PAGE_PRESENT)
write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index cbac409f6c8a..f0b69e4dcb52 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -205,7 +205,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
spin_lock_init(&gvt->scheduler.mmio_context_lock);
mutex_init(&gvt->lock);
mutex_init(&gvt->sched_lock);
- gvt->gt = &i915->gt;
+ gvt->gt = to_gt(i915);
i915->gvt = gvt;
init_device_info(gvt);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 6c804102528b..42a0c9ae0a73 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1386,7 +1386,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
enum intel_engine_id i;
int ret;
- ppgtt = i915_ppgtt_create(&i915->gt, I915_BO_ALLOC_PM_EARLY);
+ ppgtt = i915_ppgtt_create(to_gt(i915), I915_BO_ALLOC_PM_EARLY);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 3103c1e1fd14..ee2b3a375362 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -426,8 +426,9 @@ replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
return true;
}
-int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
+int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
{
+ struct dma_fence *fence = &rq->fence;
struct i915_active_fence *active;
int err;
@@ -436,7 +437,7 @@ int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
if (err)
return err;
- active = active_instance(ref, idx);
+ active = active_instance(ref, i915_request_timeline(rq)->fence_context);
if (!active) {
err = -ENOMEM;
goto out;
@@ -477,29 +478,6 @@ __i915_active_set_fence(struct i915_active *ref,
return prev;
}
-static struct i915_active_fence *
-__active_fence(struct i915_active *ref, u64 idx)
-{
- struct active_node *it;
-
- it = __active_lookup(ref, idx);
- if (unlikely(!it)) { /* Contention with parallel tree builders! */
- spin_lock_irq(&ref->tree_lock);
- it = __active_lookup(ref, idx);
- spin_unlock_irq(&ref->tree_lock);
- }
- GEM_BUG_ON(!it); /* slot must be preallocated */
-
- return &it->base;
-}
-
-struct dma_fence *
-__i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
-{
- /* Only valid while active, see i915_active_acquire_for_context() */
- return __i915_active_set_fence(ref, __active_fence(ref, idx), fence);
-}
-
struct dma_fence *
i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
{
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 5fcdb0e2bc9e..7eb44132183a 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -164,26 +164,11 @@ void __i915_active_init(struct i915_active *ref,
__i915_active_init(ref, active, retire, flags, &__mkey, &__wkey); \
} while (0)
-struct dma_fence *
-__i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence);
-int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence);
-
-static inline int
-i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
-{
- return i915_active_ref(ref,
- i915_request_timeline(rq)->fence_context,
- &rq->fence);
-}
+int i915_active_add_request(struct i915_active *ref, struct i915_request *rq);
struct dma_fence *
i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f);
-static inline bool i915_active_has_exclusive(struct i915_active *ref)
-{
- return rcu_access_pointer(ref->excl.fence);
-}
-
int __i915_active_wait(struct i915_active *ref, int state);
static inline int i915_active_wait(struct i915_active *ref)
{
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
index c149f348a972..b02a78ac87db 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -15,8 +15,6 @@
#include <linux/rcupdate.h>
#include <linux/workqueue.h>
-#include "i915_utils.h"
-
struct i915_active_fence {
struct dma_fence __rcu *fence;
struct dma_fence_cb cb;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index fe638b5da7c0..e0e052cdf8b8 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -48,7 +48,6 @@
#include "i915_debugfs_params.h"
#include "i915_irq.h"
#include "i915_scheduler.h"
-#include "i915_trace.h"
#include "intel_pm.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
@@ -65,7 +64,8 @@ static int i915_capabilities(struct seq_file *m, void *data)
intel_device_info_print_static(INTEL_INFO(i915), &p);
intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
- intel_gt_info_print(&i915->gt.info, &p);
+ i915_print_iommu_status(i915, &p);
+ intel_gt_info_print(&to_gt(i915)->info, &p);
intel_driver_caps_print(&i915->caps, &p);
kernel_param_lock(THIS_MODULE);
@@ -293,7 +293,7 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file)
gpu = NULL;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- gpu = i915_gpu_coredump(&i915->gt, ALL_ENGINES);
+ gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES);
if (IS_ERR(gpu))
return PTR_ERR(gpu);
@@ -351,7 +351,7 @@ static const struct file_operations i915_error_state_fops = {
static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
struct drm_printer p = drm_seq_file_printer(m);
intel_gt_pm_frequency_dump(gt, &p);
@@ -439,11 +439,11 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_rps *rps = &dev_priv->gt.rps;
+ struct intel_rps *rps = &to_gt(dev_priv)->rps;
seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
- seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
+ seq_printf(m, "GPU busy? %s\n", yesno(to_gt(dev_priv)->awake));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
@@ -476,7 +476,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
seq_printf(m, "Runtime power status: %s\n",
enableddisabled(!dev_priv->power_domains.init_wakeref));
- seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
+ seq_printf(m, "GPU idle: %s\n", yesno(!to_gt(dev_priv)->awake));
seq_printf(m, "IRQs disabled: %s\n",
yesno(!intel_irqs_enabled(dev_priv)));
#ifdef CONFIG_PM
@@ -508,18 +508,18 @@ static int i915_engine_info(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
seq_printf(m, "GT awake? %s [%d], %llums\n",
- yesno(i915->gt.awake),
- atomic_read(&i915->gt.wakeref.count),
- ktime_to_ms(intel_gt_get_awake_time(&i915->gt)));
+ yesno(to_gt(i915)->awake),
+ atomic_read(&to_gt(i915)->wakeref.count),
+ ktime_to_ms(intel_gt_get_awake_time(to_gt(i915))));
seq_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
- i915->gt.clock_frequency,
- i915->gt.clock_period_ns);
+ to_gt(i915)->clock_frequency,
+ to_gt(i915)->clock_period_ns);
p = drm_seq_file_printer(m);
for_each_uabi_engine(engine, i915)
intel_engine_dump(engine, &p, "%s\n", engine->name);
- intel_gt_show_timelines(&i915->gt, &p, i915_request_show_with_schedule);
+ intel_gt_show_timelines(to_gt(i915), &p, i915_request_show_with_schedule);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
@@ -558,14 +558,14 @@ static int i915_wedged_get(void *data, u64 *val)
{
struct drm_i915_private *i915 = data;
- return intel_gt_debugfs_reset_show(&i915->gt, val);
+ return intel_gt_debugfs_reset_show(to_gt(i915), val);
}
static int i915_wedged_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
- return intel_gt_debugfs_reset_store(&i915->gt, val);
+ return intel_gt_debugfs_reset_store(to_gt(i915), val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
@@ -581,7 +581,7 @@ i915_perf_noa_delay_set(void *data, u64 val)
* This would lead to infinite waits as we're doing timestamp
* difference on the CS with only 32bits.
*/
- if (intel_gt_ns_to_clock_interval(&i915->gt, val) > U32_MAX)
+ if (intel_gt_ns_to_clock_interval(to_gt(i915), val) > U32_MAX)
return -EINVAL;
atomic64_set(&i915->perf.noa_programming_delay, val);
@@ -666,16 +666,18 @@ static int
i915_drop_caches_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
+ unsigned int flags;
int ret;
DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
val, val & DROP_ALL);
- ret = gt_drop_caches(&i915->gt, val);
+ ret = gt_drop_caches(to_gt(i915), val);
if (ret)
return ret;
fs_reclaim_acquire(GFP_KERNEL);
+ flags = memalloc_noreclaim_save();
if (val & DROP_BOUND)
i915_gem_shrink(NULL, i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
@@ -684,6 +686,7 @@ i915_drop_caches_set(void *data, u64 val)
if (val & DROP_SHRINK_ALL)
i915_gem_shrink_all(i915);
+ memalloc_noreclaim_restore(flags);
fs_reclaim_release(GFP_KERNEL);
if (val & DROP_RCU)
@@ -702,7 +705,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
static int i915_sseu_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
return intel_sseu_status(m, gt);
}
@@ -711,14 +714,14 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
- return intel_gt_pm_debugfs_forcewake_user_open(&i915->gt);
+ return intel_gt_pm_debugfs_forcewake_user_open(to_gt(i915));
}
static int i915_forcewake_release(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
- return intel_gt_pm_debugfs_forcewake_user_release(&i915->gt);
+ return intel_gt_pm_debugfs_forcewake_user_release(to_gt(i915));
}
static const struct file_operations i915_forcewake_fops = {
diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c
index 20424275d41e..783c8676eee2 100644
--- a/drivers/gpu/drm/i915/i915_debugfs_params.c
+++ b/drivers/gpu/drm/i915/i915_debugfs_params.c
@@ -40,8 +40,8 @@ static int notify_guc(struct drm_i915_private *i915)
{
int ret = 0;
- if (intel_uc_uses_guc_submission(&i915->gt.uc))
- ret = intel_guc_global_policies_update(&i915->gt.uc.guc);
+ if (intel_uc_uses_guc_submission(&to_gt(i915)->uc))
+ ret = intel_guc_global_policies_update(&to_gt(i915)->uc.guc);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_deps.c b/drivers/gpu/drm/i915/i915_deps.c
new file mode 100644
index 000000000000..999210b37325
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_deps.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <linux/dma-fence.h>
+#include <linux/slab.h>
+
+#include <drm/ttm/ttm_bo_api.h>
+
+#include "i915_deps.h"
+
+/**
+ * DOC: Set of utilities to dynamically collect dependencies into a
+ * structure which is fed into the GT migration code.
+ *
+ * Once we can do async unbinding, this is also needed to coalesce
+ * the migration fence with the unbind fences if these are coalesced
+ * post-migration.
+ *
+ * While collecting the individual dependencies, we store the refcounted
+ * struct dma_fence pointers in a realloc-managed pointer array, since
+ * that can be easily fed into a dma_fence_array. Other options are
+ * available, like for example an xarray for similarity with drm/sched.
+ * Can be changed easily if needed.
+ *
+ * A struct i915_deps need to be initialized using i915_deps_init().
+ * If i915_deps_add_dependency() or i915_deps_add_resv() return an
+ * error code they will internally call i915_deps_fini(), which frees
+ * all internal references and allocations.
+ */
+
+/* Min number of fence pointers in the array when an allocation occurs. */
+#define I915_DEPS_MIN_ALLOC_CHUNK 8U
+
+static void i915_deps_reset_fences(struct i915_deps *deps)
+{
+ if (deps->fences != &deps->single)
+ kfree(deps->fences);
+ deps->num_deps = 0;
+ deps->fences_size = 1;
+ deps->fences = &deps->single;
+}
+
+/**
+ * i915_deps_init - Initialize an i915_deps structure
+ * @deps: Pointer to the i915_deps structure to initialize.
+ * @gfp: The allocation mode for subsequenst allocations.
+ */
+void i915_deps_init(struct i915_deps *deps, gfp_t gfp)
+{
+ deps->fences = NULL;
+ deps->gfp = gfp;
+ i915_deps_reset_fences(deps);
+}
+
+/**
+ * i915_deps_fini - Finalize an i915_deps structure
+ * @deps: Pointer to the i915_deps structure to finalize.
+ *
+ * This function drops all fence references taken, conditionally frees and
+ * then resets the fences array.
+ */
+void i915_deps_fini(struct i915_deps *deps)
+{
+ unsigned int i;
+
+ for (i = 0; i < deps->num_deps; ++i)
+ dma_fence_put(deps->fences[i]);
+
+ if (deps->fences != &deps->single)
+ kfree(deps->fences);
+}
+
+static int i915_deps_grow(struct i915_deps *deps, struct dma_fence *fence,
+ const struct ttm_operation_ctx *ctx)
+{
+ int ret;
+
+ if (deps->num_deps >= deps->fences_size) {
+ unsigned int new_size = 2 * deps->fences_size;
+ struct dma_fence **new_fences;
+
+ new_size = max(new_size, I915_DEPS_MIN_ALLOC_CHUNK);
+ new_fences = kmalloc_array(new_size, sizeof(*new_fences), deps->gfp);
+ if (!new_fences)
+ goto sync;
+
+ memcpy(new_fences, deps->fences,
+ deps->fences_size * sizeof(*new_fences));
+ swap(new_fences, deps->fences);
+ if (new_fences != &deps->single)
+ kfree(new_fences);
+ deps->fences_size = new_size;
+ }
+ deps->fences[deps->num_deps++] = dma_fence_get(fence);
+ return 0;
+
+sync:
+ if (ctx->no_wait_gpu && !dma_fence_is_signaled(fence)) {
+ ret = -EBUSY;
+ goto unref;
+ }
+
+ ret = dma_fence_wait(fence, ctx->interruptible);
+ if (ret)
+ goto unref;
+
+ ret = fence->error;
+ if (ret)
+ goto unref;
+
+ return 0;
+
+unref:
+ i915_deps_fini(deps);
+ return ret;
+}
+
+/**
+ * i915_deps_sync - Wait for all the fences in the dependency collection
+ * @deps: Pointer to the i915_deps structure the fences of which to wait for.
+ * @ctx: Pointer to a struct ttm_operation_ctx indicating how the waits
+ * should be performed.
+ *
+ * This function waits for fences in the dependency collection. If it
+ * encounters an error during the wait or a fence error, the wait for
+ * further fences is aborted and the error returned.
+ *
+ * Return: Zero if successful, Negative error code on error.
+ */
+int i915_deps_sync(const struct i915_deps *deps, const struct ttm_operation_ctx *ctx)
+{
+ struct dma_fence **fences = deps->fences;
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < deps->num_deps; ++i, ++fences) {
+ if (ctx->no_wait_gpu && !dma_fence_is_signaled(*fences)) {
+ ret = -EBUSY;
+ break;
+ }
+
+ ret = dma_fence_wait(*fences, ctx->interruptible);
+ if (!ret)
+ ret = (*fences)->error;
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i915_deps_add_dependency - Add a fence to the dependency collection
+ * @deps: Pointer to the i915_deps structure a fence is to be added to.
+ * @fence: The fence to add.
+ * @ctx: Pointer to a struct ttm_operation_ctx indicating how waits are to
+ * be performed if waiting.
+ *
+ * Adds a fence to the dependency collection, and takes a reference on it.
+ * If the fence context is not zero and there was a later fence from the
+ * same fence context already added, then the fence is not added to the
+ * dependency collection. If the fence context is not zero and there was
+ * an earlier fence already added, then the fence will replace the older
+ * fence from the same context and the reference on the earlier fence will
+ * be dropped.
+ * If there is a failure to allocate memory to accommodate the new fence to
+ * be added, the new fence will instead be waited for and an error may
+ * be returned; depending on the value of @ctx, or if there was a fence
+ * error. If an error was returned, the dependency collection will be
+ * finalized and all fence reference dropped.
+ *
+ * Return: 0 if success. Negative error code on error.
+ */
+int i915_deps_add_dependency(struct i915_deps *deps,
+ struct dma_fence *fence,
+ const struct ttm_operation_ctx *ctx)
+{
+ unsigned int i;
+ int ret;
+
+ if (!fence)
+ return 0;
+
+ if (dma_fence_is_signaled(fence)) {
+ ret = fence->error;
+ if (ret)
+ i915_deps_fini(deps);
+ return ret;
+ }
+
+ for (i = 0; i < deps->num_deps; ++i) {
+ struct dma_fence *entry = deps->fences[i];
+
+ if (!entry->context || entry->context != fence->context)
+ continue;
+
+ if (dma_fence_is_later(fence, entry)) {
+ dma_fence_put(entry);
+ deps->fences[i] = dma_fence_get(fence);
+ }
+
+ return 0;
+ }
+
+ return i915_deps_grow(deps, fence, ctx);
+}
+
+/**
+ * i915_deps_add_resv - Add the fences of a reservation object to a dependency
+ * collection.
+ * @deps: Pointer to the i915_deps structure a fence is to be added to.
+ * @resv: The reservation object, then fences of which to add.
+ * @ctx: Pointer to a struct ttm_operation_ctx indicating how waits are to
+ * be performed if waiting.
+ *
+ * Calls i915_deps_add_depencency() on the indicated fences of @resv.
+ *
+ * Return: Zero on success. Negative error code on error.
+ */
+int i915_deps_add_resv(struct i915_deps *deps, struct dma_resv *resv,
+ const struct ttm_operation_ctx *ctx)
+{
+ struct dma_resv_iter iter;
+ struct dma_fence *fence;
+
+ dma_resv_assert_held(resv);
+ dma_resv_for_each_fence(&iter, resv, true, fence) {
+ int ret = i915_deps_add_dependency(deps, fence, ctx);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_deps.h b/drivers/gpu/drm/i915/i915_deps.h
new file mode 100644
index 000000000000..d76c0106c910
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_deps.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _I915_DEPS_H_
+#define _I915_DEPS_H_
+
+#include <linux/types.h>
+
+struct ttm_operation_ctx;
+struct dma_fence;
+struct dma_resv;
+
+/**
+ * struct i915_deps - Collect dependencies into a single dma-fence
+ * @single: Storage for pointer if the collection is a single fence.
+ * @fences: Allocated array of fence pointers if more than a single fence;
+ * otherwise points to the address of @single.
+ * @num_deps: Current number of dependency fences.
+ * @fences_size: Size of the @fences array in number of pointers.
+ * @gfp: Allocation mode.
+ */
+struct i915_deps {
+ struct dma_fence *single;
+ struct dma_fence **fences;
+ unsigned int num_deps;
+ unsigned int fences_size;
+ gfp_t gfp;
+};
+
+void i915_deps_init(struct i915_deps *deps, gfp_t gfp);
+
+void i915_deps_fini(struct i915_deps *deps);
+
+int i915_deps_add_dependency(struct i915_deps *deps,
+ struct dma_fence *fence,
+ const struct ttm_operation_ctx *ctx);
+
+int i915_deps_add_resv(struct i915_deps *deps, struct dma_resv *resv,
+ const struct ttm_operation_ctx *ctx);
+
+int i915_deps_sync(const struct i915_deps *deps,
+ const struct ttm_operation_ctx *ctx);
+#endif
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_driver.c
index b18a250e5d2e..95174938b160 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -29,8 +29,8 @@
#include <linux/acpi.h>
#include <linux/device.h>
-#include <linux/oom.h>
#include <linux/module.h>
+#include <linux/oom.h>
#include <linux/pci.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
@@ -48,12 +48,14 @@
#include "display/intel_acpi.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
-#include "display/intel_dmc.h"
#include "display/intel_display_types.h"
+#include "display/intel_dmc.h"
#include "display/intel_dp.h"
+#include "display/intel_dpt.h"
#include "display/intel_fbdev.h"
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
+#include "display/intel_pch_refclk.h"
#include "display/intel_pipe_crc.h"
#include "display/intel_pps.h"
#include "display/intel_sprite.h"
@@ -70,6 +72,7 @@
#include "pxp/intel_pxp_pm.h"
#include "i915_debugfs.h"
+#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_ioc32.h"
#include "i915_irq.h"
@@ -79,7 +82,6 @@
#include "i915_suspend.h"
#include "i915_switcheroo.h"
#include "i915_sysfs.h"
-#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_dram.h"
#include "intel_gvt.h"
@@ -89,7 +91,7 @@
#include "intel_region_ttm.h"
#include "vlv_suspend.h"
-static const struct drm_driver driver;
+static const struct drm_driver i915_drm_driver;
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
{
@@ -289,7 +291,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
static void sanitize_gpu(struct drm_i915_private *i915)
{
if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
- __intel_gt_reset(&i915->gt, ALL_ENGINES);
+ __intel_gt_reset(to_gt(i915), ALL_ENGINES);
}
/**
@@ -312,8 +314,9 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_device_info_subplatform_init(dev_priv);
intel_step_init(dev_priv);
+ intel_gt_init_early(to_gt(dev_priv), dev_priv);
intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
- intel_uncore_init_early(&dev_priv->uncore, dev_priv);
+ intel_uncore_init_early(&dev_priv->uncore, to_gt(dev_priv));
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
@@ -322,7 +325,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->sb_lock);
cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
- mutex_init(&dev_priv->av_mutex);
+ mutex_init(&dev_priv->audio.mutex);
mutex_init(&dev_priv->wm.wm_mutex);
mutex_init(&dev_priv->pps_mutex);
mutex_init(&dev_priv->hdcp_comp_mutex);
@@ -344,7 +347,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_wopcm_init_early(&dev_priv->wopcm);
- intel_gt_init_early(&dev_priv->gt, dev_priv);
+ __intel_gt_init_early(to_gt(dev_priv), dev_priv);
i915_gem_init_early(dev_priv);
@@ -365,7 +368,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
err_gem:
i915_gem_cleanup_early(dev_priv);
- intel_gt_driver_late_release(&dev_priv->gt);
+ intel_gt_driver_late_release(to_gt(dev_priv));
intel_region_ttm_device_fini(dev_priv);
err_ttm:
vlv_suspend_cleanup(dev_priv);
@@ -384,7 +387,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
intel_irq_fini(dev_priv);
intel_power_domains_cleanup(dev_priv);
i915_gem_cleanup_early(dev_priv);
- intel_gt_driver_late_release(&dev_priv->gt);
+ intel_gt_driver_late_release(to_gt(dev_priv));
intel_region_ttm_device_fini(dev_priv);
vlv_suspend_cleanup(dev_priv);
i915_workqueues_cleanup(dev_priv);
@@ -415,15 +418,19 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
if (ret < 0)
return ret;
- ret = intel_uncore_init_mmio(&dev_priv->uncore);
+ ret = intel_uncore_setup_mmio(&dev_priv->uncore);
if (ret < 0)
goto err_bridge;
+ ret = intel_uncore_init_mmio(&dev_priv->uncore);
+ if (ret)
+ goto err_mmio;
+
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev_priv);
intel_device_info_runtime_init(dev_priv);
- ret = intel_gt_init_mmio(&dev_priv->gt);
+ ret = intel_gt_init_mmio(to_gt(dev_priv));
if (ret)
goto err_uncore;
@@ -435,6 +442,8 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
err_uncore:
intel_teardown_mchbar(dev_priv);
intel_uncore_fini_mmio(&dev_priv->uncore);
+err_mmio:
+ intel_uncore_cleanup_mmio(&dev_priv->uncore);
err_bridge:
pci_dev_put(dev_priv->bridge_dev);
@@ -449,6 +458,7 @@ static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
{
intel_teardown_mchbar(dev_priv);
intel_uncore_fini_mmio(&dev_priv->uncore);
+ intel_uncore_cleanup_mmio(&dev_priv->uncore);
pci_dev_put(dev_priv->bridge_dev);
}
@@ -577,9 +587,9 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret)
goto err_ggtt;
- intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt);
+ intel_gt_init_hw_early(to_gt(dev_priv), &dev_priv->ggtt);
- ret = intel_gt_probe_lmem(&dev_priv->gt);
+ ret = intel_gt_probe_lmem(to_gt(dev_priv));
if (ret)
goto err_mem_regions;
@@ -692,7 +702,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
/* Depends on sysfs having been initialized */
i915_perf_register(dev_priv);
- intel_gt_driver_register(&dev_priv->gt);
+ intel_gt_driver_register(to_gt(dev_priv));
intel_display_driver_register(dev_priv);
@@ -720,7 +730,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
intel_display_driver_unregister(dev_priv);
- intel_gt_driver_unregister(&dev_priv->gt);
+ intel_gt_driver_unregister(to_gt(dev_priv));
i915_perf_unregister(dev_priv);
i915_pmu_unregister(dev_priv);
@@ -731,6 +741,12 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
i915_gem_driver_unregister(dev_priv);
}
+void
+i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p)
+{
+ drm_printf(p, "iommu: %s\n", enableddisabled(intel_vtd_active(i915)));
+}
+
static void i915_welcome_messages(struct drm_i915_private *dev_priv)
{
if (drm_debug_enabled(DRM_UT_DRIVER)) {
@@ -746,7 +762,8 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
- intel_gt_info_print(&dev_priv->gt.info, &p);
+ i915_print_iommu_status(dev_priv, &p);
+ intel_gt_info_print(&to_gt(dev_priv)->info, &p);
}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
@@ -766,7 +783,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
struct intel_device_info *device_info;
struct drm_i915_private *i915;
- i915 = devm_drm_dev_alloc(&pdev->dev, &driver,
+ i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver,
struct drm_i915_private, drm);
if (IS_ERR(i915))
return i915;
@@ -807,7 +824,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return PTR_ERR(i915);
/* Disable nuclear pageflip by default on pre-ILK */
- if (!i915->params.nuclear_pageflip && match_info->graphics_ver < 5)
+ if (!i915->params.nuclear_pageflip && match_info->graphics.ver < 5)
i915->drm.driver_features &= ~DRIVER_ATOMIC;
/*
@@ -1127,6 +1144,8 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_suspend_hw(dev_priv);
+ /* Must be called before GGTT is suspended. */
+ intel_dpt_suspend(dev_priv);
i915_ggtt_suspend(&dev_priv->ggtt);
i915_save_display(dev_priv);
@@ -1183,6 +1202,14 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
goto out;
}
+ /*
+ * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
+ * This should be totally removed when we handle the pci states properly
+ * on runtime PM and on s2idle cases.
+ */
+ if (suspend_to_idle(dev_priv))
+ pci_d3cold_disable(pdev);
+
pci_disable_device(pdev);
/*
* During hibernation on some platforms the BIOS may try to access
@@ -1207,7 +1234,8 @@ out:
return ret;
}
-int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
+int i915_driver_suspend_switcheroo(struct drm_i915_private *i915,
+ pm_message_t state)
{
int error;
@@ -1243,6 +1271,8 @@ static int i915_drm_resume(struct drm_device *dev)
drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
i915_ggtt_resume(&dev_priv->ggtt);
+ /* Must be called after GGTT is resumed. */
+ intel_dpt_resume(dev_priv);
intel_dmc_ucode_resume(dev_priv);
@@ -1344,6 +1374,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
pci_set_master(pdev);
+ pci_d3cold_enable(pdev);
+
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
ret = vlv_resume_prepare(dev_priv, false);
@@ -1353,7 +1385,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_resume_early(&dev_priv->uncore);
- intel_gt_check_and_clear_faults(&dev_priv->gt);
+ intel_gt_check_and_clear_faults(to_gt(dev_priv));
intel_display_power_resume_early(dev_priv);
@@ -1364,7 +1396,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
return ret;
}
-int i915_resume_switcheroo(struct drm_i915_private *i915)
+int i915_driver_resume_switcheroo(struct drm_i915_private *i915)
{
int ret;
@@ -1520,6 +1552,7 @@ static int intel_runtime_suspend(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
int ret;
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
@@ -1535,7 +1568,7 @@ static int intel_runtime_suspend(struct device *kdev)
*/
i915_gem_runtime_suspend(dev_priv);
- intel_gt_runtime_suspend(&dev_priv->gt);
+ intel_gt_runtime_suspend(to_gt(dev_priv));
intel_runtime_pm_disable_interrupts(dev_priv);
@@ -1551,7 +1584,7 @@ static int intel_runtime_suspend(struct device *kdev)
intel_runtime_pm_enable_interrupts(dev_priv);
- intel_gt_runtime_resume(&dev_priv->gt);
+ intel_gt_runtime_resume(to_gt(dev_priv));
enable_rpm_wakeref_asserts(rpm);
@@ -1565,6 +1598,12 @@ static int intel_runtime_suspend(struct device *kdev)
drm_err(&dev_priv->drm,
"Unclaimed access detected prior to suspending\n");
+ /*
+ * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
+ * This should be totally removed when we handle the pci states properly
+ * on runtime PM and on s2idle cases.
+ */
+ pci_d3cold_disable(pdev);
rpm->suspended = true;
/*
@@ -1603,6 +1642,7 @@ static int intel_runtime_resume(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
int ret;
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
@@ -1615,6 +1655,7 @@ static int intel_runtime_resume(struct device *kdev)
intel_opregion_notify_adapter(dev_priv, PCI_D0);
rpm->suspended = false;
+ pci_d3cold_enable(pdev);
if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
drm_dbg(&dev_priv->drm,
"Unclaimed access during suspend, bios?\n");
@@ -1631,7 +1672,7 @@ static int intel_runtime_resume(struct device *kdev)
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
*/
- intel_gt_runtime_resume(&dev_priv->gt);
+ intel_gt_runtime_resume(to_gt(dev_priv));
/*
* On VLV/CHV display interrupts are part of the display
@@ -1777,7 +1818,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
};
-static const struct drm_driver driver = {
+static const struct drm_driver i915_drm_driver = {
/* Don't use MTRRs here; the Xserver or userspace app should
* deal with them for Intel hardware.
*/
diff --git a/drivers/gpu/drm/i915/i915_driver.h b/drivers/gpu/drm/i915/i915_driver.h
new file mode 100644
index 000000000000..9ef8db4aa0a6
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_driver.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_DRIVER_H__
+#define __I915_DRIVER_H__
+
+#include <linux/pm.h>
+
+struct pci_dev;
+struct pci_device_id;
+struct drm_i915_private;
+
+extern const struct dev_pm_ops i915_pm_ops;
+
+int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+void i915_driver_remove(struct drm_i915_private *i915);
+void i915_driver_shutdown(struct drm_i915_private *i915);
+
+int i915_driver_resume_switcheroo(struct drm_i915_private *i915);
+int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
+
+#endif /* __I915_DRIVER_H__ */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 12256218634f..0c70ab08fc0c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -50,7 +50,6 @@
#include <linux/stackdepot.h>
#include <linux/xarray.h>
-#include <drm/intel-gtt.h>
#include <drm/drm_gem.h>
#include <drm/drm_auth.h>
#include <drm/drm_cache.h>
@@ -90,6 +89,7 @@
#include "intel_device_info.h"
#include "intel_memory_region.h"
#include "intel_pch.h"
+#include "intel_pm_types.h"
#include "intel_runtime_pm.h"
#include "intel_step.h"
#include "intel_uncore.h"
@@ -117,30 +117,6 @@
struct drm_i915_gem_object;
-enum hpd_pin {
- HPD_NONE = 0,
- HPD_TV = HPD_NONE, /* TV is known to be unreliable */
- HPD_CRT,
- HPD_SDVO_B,
- HPD_SDVO_C,
- HPD_PORT_A,
- HPD_PORT_B,
- HPD_PORT_C,
- HPD_PORT_D,
- HPD_PORT_E,
- HPD_PORT_TC1,
- HPD_PORT_TC2,
- HPD_PORT_TC3,
- HPD_PORT_TC4,
- HPD_PORT_TC5,
- HPD_PORT_TC6,
-
- HPD_NUM_PINS
-};
-
-#define for_each_hpd_pin(__pin) \
- for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
-
/* Threshold == 5 for long IRQs, 50 for short */
#define HPD_STORM_DEFAULT_THRESHOLD 50
@@ -191,8 +167,6 @@ struct i915_hotplug {
I915_GEM_DOMAIN_VERTEX)
struct drm_i915_private;
-struct i915_mm_struct;
-struct i915_mmu_object;
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
@@ -364,15 +338,6 @@ struct intel_color_funcs {
void (*read_luts)(struct intel_crtc_state *crtc_state);
};
-struct intel_audio_funcs {
- void (*audio_codec_enable)(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
- void (*audio_codec_disable)(struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state);
-};
-
struct intel_cdclk_funcs {
void (*get_cdclk)(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config);
@@ -411,102 +376,8 @@ struct drm_i915_display_funcs {
void (*commit_modeset_enables)(struct intel_atomic_state *state);
};
-
#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
-struct intel_fbc {
- /* This is always the inner lock when overlapping with struct_mutex and
- * it's the outer lock when overlapping with stolen_lock. */
- struct mutex lock;
- unsigned int possible_framebuffer_bits;
- unsigned int busy_bits;
- struct intel_crtc *crtc;
-
- struct drm_mm_node compressed_fb;
- struct drm_mm_node compressed_llb;
-
- u8 limit;
-
- bool false_color;
-
- bool active;
- bool activated;
- bool flip_pending;
-
- bool underrun_detected;
- struct work_struct underrun_work;
-
- /*
- * Due to the atomic rules we can't access some structures without the
- * appropriate locking, so we cache information here in order to avoid
- * these problems.
- */
- struct intel_fbc_state_cache {
- struct {
- unsigned int mode_flags;
- u32 hsw_bdw_pixel_rate;
- } crtc;
-
- struct {
- unsigned int rotation;
- int src_w;
- int src_h;
- bool visible;
- /*
- * Display surface base address adjustement for
- * pageflips. Note that on gen4+ this only adjusts up
- * to a tile, offsets within a tile are handled in
- * the hw itself (with the TILEOFF register).
- */
- int adjusted_x;
- int adjusted_y;
-
- u16 pixel_blend_mode;
- } plane;
-
- struct {
- const struct drm_format_info *format;
- unsigned int stride;
- u64 modifier;
- } fb;
-
- unsigned int fence_y_offset;
- u16 interval;
- s8 fence_id;
- bool psr2_active;
- } state_cache;
-
- /*
- * This structure contains everything that's relevant to program the
- * hardware registers. When we want to figure out if we need to disable
- * and re-enable FBC for a new configuration we just check if there's
- * something different in the struct. The genx_fbc_activate functions
- * are supposed to read from it in order to program the registers.
- */
- struct intel_fbc_reg_params {
- struct {
- enum pipe pipe;
- enum i9xx_plane_id i9xx_plane;
- } crtc;
-
- struct {
- const struct drm_format_info *format;
- unsigned int stride;
- u64 modifier;
- } fb;
-
- unsigned int cfb_stride;
- unsigned int cfb_size;
- unsigned int fence_y_offset;
- u16 override_cfb_stride;
- u16 interval;
- s8 fence_id;
- bool plane_visible;
- } params;
-
- const char *no_fbc_reason;
-};
-
/*
* HIGH_RR is the highest eDP panel refresh rate read from EDID
* LOW_RR is the lowest eDP panel refresh rate found from EDID
@@ -543,7 +414,6 @@ struct i915_drrs {
#define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8)
struct intel_fbdev;
-struct intel_fbc_work;
struct intel_gmbus {
struct i2c_adapter adapter;
@@ -603,7 +473,7 @@ struct i915_gem_mm {
* List of objects which are pending destruction.
*/
struct llist_head free_list;
- struct work_struct free_work;
+ struct delayed_work free_work;
/**
* Count of objects pending destructions. Used to skip needlessly
* waiting on an RCU barrier if no objects are waiting to be freed.
@@ -738,69 +608,6 @@ struct intel_vbt_data {
struct sdvo_device_mapping sdvo_mappings[2];
};
-enum intel_ddb_partitioning {
- INTEL_DDB_PART_1_2,
- INTEL_DDB_PART_5_6, /* IVB+ */
-};
-
-struct ilk_wm_values {
- u32 wm_pipe[3];
- u32 wm_lp[3];
- u32 wm_lp_spr[3];
- bool enable_fbc_wm;
- enum intel_ddb_partitioning partitioning;
-};
-
-struct g4x_pipe_wm {
- u16 plane[I915_MAX_PLANES];
- u16 fbc;
-};
-
-struct g4x_sr_wm {
- u16 plane;
- u16 cursor;
- u16 fbc;
-};
-
-struct vlv_wm_ddl_values {
- u8 plane[I915_MAX_PLANES];
-};
-
-struct vlv_wm_values {
- struct g4x_pipe_wm pipe[3];
- struct g4x_sr_wm sr;
- struct vlv_wm_ddl_values ddl[3];
- u8 level;
- bool cxsr;
-};
-
-struct g4x_wm_values {
- struct g4x_pipe_wm pipe[2];
- struct g4x_sr_wm sr;
- struct g4x_sr_wm hpll;
- bool cxsr;
- bool hpll_en;
- bool fbc_en;
-};
-
-struct skl_ddb_entry {
- u16 start, end; /* in number of blocks, 'end' is exclusive */
-};
-
-static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry)
-{
- return entry->end - entry->start;
-}
-
-static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
- const struct skl_ddb_entry *e2)
-{
- if (e1->start == e2->start && e1->end == e2->end)
- return true;
-
- return false;
-}
-
struct i915_frontbuffer_tracking {
spinlock_t lock;
@@ -828,6 +635,30 @@ struct i915_selftest_stash {
struct ida mock_region_instances;
};
+/* intel_audio.c private */
+struct intel_audio_funcs;
+struct intel_audio_private {
+ /* Display internal audio functions */
+ const struct intel_audio_funcs *funcs;
+
+ /* hda/i915 audio component */
+ struct i915_audio_component *component;
+ bool component_registered;
+ /* mutex for audio/video sync */
+ struct mutex mutex;
+ int power_refcount;
+ u32 freq_cntrl;
+
+ /* Used to save the pipe-to-encoder mapping for audio */
+ struct intel_encoder *encoder_map[I915_MAX_PIPES];
+
+ /* necessary resource sharing with HDMI LPE audio driver. */
+ struct {
+ struct platform_device *platdev;
+ int irq;
+ } lpe;
+};
+
struct drm_i915_private {
struct drm_device drm;
@@ -918,7 +749,7 @@ struct drm_i915_private {
u32 pipestat_irq_mask[I915_MAX_PIPES];
struct i915_hotplug hotplug;
- struct intel_fbc fbc;
+ struct intel_fbc *fbc;
struct i915_drrs drrs;
struct intel_opregion opregion;
struct intel_vbt_data vbt;
@@ -995,9 +826,6 @@ struct drm_i915_private {
/* Display internal color functions */
const struct intel_color_funcs *color_funcs;
- /* Display internal audio functions */
- const struct intel_audio_funcs *audio_funcs;
-
/* Display CDCLK functions */
const struct intel_cdclk_funcs *cdclk_funcs;
@@ -1016,9 +844,6 @@ struct drm_i915_private {
/* Kernel Modesetting */
- struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
- struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
-
/**
* dpll and cdclk state is protected by connection_mutex
* dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll.
@@ -1084,17 +909,6 @@ struct drm_i915_private {
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
- /* hda/i915 audio component */
- struct i915_audio_component *audio_component;
- bool audio_component_registered;
- /**
- * av_mutex - mutex for audio/video sync
- *
- */
- struct mutex av_mutex;
- int audio_power_refcount;
- u32 audio_freq_cntrl;
-
u32 fdi_rx_config;
/* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
@@ -1191,7 +1005,7 @@ struct drm_i915_private {
struct i915_perf perf;
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
- struct intel_gt gt;
+ struct intel_gt gt0;
struct {
struct i915_gem_contexts {
@@ -1227,14 +1041,7 @@ struct drm_i915_private {
bool ipc_enabled;
- /* Used to save the pipe-to-encoder mapping for audio */
- struct intel_encoder *av_enc_map[I915_MAX_PIPES];
-
- /* necessary resource sharing with HDMI LPE audio driver. */
- struct {
- struct platform_device *platdev;
- int irq;
- } lpe_audio;
+ struct intel_audio_private audio;
struct i915_pmu pmu;
@@ -1270,6 +1077,11 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
return pci_get_drvdata(pdev);
}
+static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
+{
+ return &i915->gt0;
+}
+
/* Simple iterator over all initialised engines */
#define for_each_engine(engine__, dev_priv__, id__) \
for ((id__) = 0; \
@@ -1327,15 +1139,15 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
#define IP_VER(ver, rel) ((ver) << 8 | (rel))
-#define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics_ver)
-#define GRAPHICS_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->graphics_ver, \
- INTEL_INFO(i915)->graphics_rel)
+#define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics.ver)
+#define GRAPHICS_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->graphics.ver, \
+ INTEL_INFO(i915)->graphics.rel)
#define IS_GRAPHICS_VER(i915, from, until) \
(GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until))
-#define MEDIA_VER(i915) (INTEL_INFO(i915)->media_ver)
-#define MEDIA_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->media_ver, \
- INTEL_INFO(i915)->media_rel)
+#define MEDIA_VER(i915) (INTEL_INFO(i915)->media.ver)
+#define MEDIA_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->media.arch, \
+ INTEL_INFO(i915)->media.rel)
#define IS_MEDIA_VER(i915, from, until) \
(MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until))
@@ -1348,15 +1160,20 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
#define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb)
#define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step)
-#define INTEL_GT_STEP(__i915) (RUNTIME_INFO(__i915)->step.gt_step)
+#define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step)
+#define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step)
#define IS_DISPLAY_STEP(__i915, since, until) \
(drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \
INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until))
-#define IS_GT_STEP(__i915, since, until) \
- (drm_WARN_ON(&(__i915)->drm, INTEL_GT_STEP(__i915) == STEP_NONE), \
- INTEL_GT_STEP(__i915) >= (since) && INTEL_GT_STEP(__i915) < (until))
+#define IS_GRAPHICS_STEP(__i915, since, until) \
+ (drm_WARN_ON(&(__i915)->drm, INTEL_GRAPHICS_STEP(__i915) == STEP_NONE), \
+ INTEL_GRAPHICS_STEP(__i915) >= (since) && INTEL_GRAPHICS_STEP(__i915) < (until))
+
+#define IS_MEDIA_STEP(__i915, since, until) \
+ (drm_WARN_ON(&(__i915)->drm, INTEL_MEDIA_STEP(__i915) == STEP_NONE), \
+ INTEL_MEDIA_STEP(__i915) >= (since) && INTEL_MEDIA_STEP(__i915) < (until))
static __always_inline unsigned int
__platform_mask_index(const struct intel_runtime_info *info,
@@ -1455,7 +1272,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
#define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE)
-#define IS_CANNONLAKE(dev_priv) 0
#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
#define IS_JSL_EHL(dev_priv) (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \
IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE))
@@ -1470,6 +1286,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G10)
#define IS_DG2_G11(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G11)
+#define IS_ADLS_RPLS(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL_S)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) \
@@ -1530,15 +1348,15 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_TGL_Y(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_SKL_GT_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GT_STEP(p, since, until))
+#define IS_SKL_GRAPHICS_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GRAPHICS_STEP(p, since, until))
-#define IS_KBL_GT_STEP(dev_priv, since, until) \
- (IS_KABYLAKE(dev_priv) && IS_GT_STEP(dev_priv, since, until))
+#define IS_KBL_GRAPHICS_STEP(dev_priv, since, until) \
+ (IS_KABYLAKE(dev_priv) && IS_GRAPHICS_STEP(dev_priv, since, until))
#define IS_KBL_DISPLAY_STEP(dev_priv, since, until) \
(IS_KABYLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, since, until))
-#define IS_JSL_EHL_GT_STEP(p, since, until) \
- (IS_JSL_EHL(p) && IS_GT_STEP(p, since, until))
+#define IS_JSL_EHL_GRAPHICS_STEP(p, since, until) \
+ (IS_JSL_EHL(p) && IS_GRAPHICS_STEP(p, since, until))
#define IS_JSL_EHL_DISPLAY_STEP(p, since, until) \
(IS_JSL_EHL(p) && IS_DISPLAY_STEP(p, since, until))
@@ -1546,19 +1364,19 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(IS_TIGERLAKE(__i915) && \
IS_DISPLAY_STEP(__i915, since, until))
-#define IS_TGL_UY_GT_STEP(__i915, since, until) \
+#define IS_TGL_UY_GRAPHICS_STEP(__i915, since, until) \
((IS_TGL_U(__i915) || IS_TGL_Y(__i915)) && \
- IS_GT_STEP(__i915, since, until))
+ IS_GRAPHICS_STEP(__i915, since, until))
-#define IS_TGL_GT_STEP(__i915, since, until) \
+#define IS_TGL_GRAPHICS_STEP(__i915, since, until) \
(IS_TIGERLAKE(__i915) && !(IS_TGL_U(__i915) || IS_TGL_Y(__i915)) && \
- IS_GT_STEP(__i915, since, until))
+ IS_GRAPHICS_STEP(__i915, since, until))
#define IS_RKL_DISPLAY_STEP(p, since, until) \
(IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until))
-#define IS_DG1_GT_STEP(p, since, until) \
- (IS_DG1(p) && IS_GT_STEP(p, since, until))
+#define IS_DG1_GRAPHICS_STEP(p, since, until) \
+ (IS_DG1(p) && IS_GRAPHICS_STEP(p, since, until))
#define IS_DG1_DISPLAY_STEP(p, since, until) \
(IS_DG1(p) && IS_DISPLAY_STEP(p, since, until))
@@ -1566,20 +1384,20 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(IS_ALDERLAKE_S(__i915) && \
IS_DISPLAY_STEP(__i915, since, until))
-#define IS_ADLS_GT_STEP(__i915, since, until) \
+#define IS_ADLS_GRAPHICS_STEP(__i915, since, until) \
(IS_ALDERLAKE_S(__i915) && \
- IS_GT_STEP(__i915, since, until))
+ IS_GRAPHICS_STEP(__i915, since, until))
#define IS_ADLP_DISPLAY_STEP(__i915, since, until) \
(IS_ALDERLAKE_P(__i915) && \
IS_DISPLAY_STEP(__i915, since, until))
-#define IS_ADLP_GT_STEP(__i915, since, until) \
+#define IS_ADLP_GRAPHICS_STEP(__i915, since, until) \
(IS_ALDERLAKE_P(__i915) && \
- IS_GT_STEP(__i915, since, until))
+ IS_GRAPHICS_STEP(__i915, since, until))
-#define IS_XEHPSDV_GT_STEP(__i915, since, until) \
- (IS_XEHPSDV(__i915) && IS_GT_STEP(__i915, since, until))
+#define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \
+ (IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until))
/*
* DG2 hardware steppings are a bit unusual. The hardware design was forked
@@ -1595,11 +1413,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
* and stepping-specific logic will be applied with a general DG2-wide stepping
* number.
*/
-#define IS_DG2_GT_STEP(__i915, variant, since, until) \
+#define IS_DG2_GRAPHICS_STEP(__i915, variant, since, until) \
(IS_SUBPLATFORM(__i915, INTEL_DG2, INTEL_SUBPLATFORM_##variant) && \
- IS_GT_STEP(__i915, since, until))
+ IS_GRAPHICS_STEP(__i915, since, until))
-#define IS_DG2_DISP_STEP(__i915, since, until) \
+#define IS_DG2_DISPLAY_STEP(__i915, since, until) \
(IS_DG2(__i915) && \
IS_DISPLAY_STEP(__i915, since, until))
@@ -1696,7 +1514,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_PSR_HW_TRACKING(dev_priv) \
(INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
#define HAS_PSR2_SEL_FETCH(dev_priv) (GRAPHICS_VER(dev_priv) >= 12)
-#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
+#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->display.cpu_transcoder_mask & BIT(trans)) != 0)
#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
@@ -1714,6 +1532,14 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_MSLICES(dev_priv) \
(INTEL_INFO(dev_priv)->has_mslices)
+/*
+ * Set this flag, when platform requires 64K GTT page sizes or larger for
+ * device local memory access. Also this flag implies that we require or
+ * at least support the compact PT layout for the ppGTT when using the 64K
+ * GTT pages.
+ */
+#define HAS_64K_PAGES(dev_priv) (INTEL_INFO(dev_priv)->has_64k_pages)
+
#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
@@ -1727,7 +1553,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_PXP(dev_priv) ((IS_ENABLED(CONFIG_DRM_I915_PXP) && \
INTEL_INFO(dev_priv)->has_pxp) && \
- VDBOX_MASK(&dev_priv->gt))
+ VDBOX_MASK(to_gt(dev_priv)))
#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
@@ -1741,11 +1567,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
-#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->pipe_mask))
+#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->display.pipe_mask))
-#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
+#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.pipe_mask != 0)
-#define HAS_VRR(i915) (GRAPHICS_VER(i915) >= 12)
+#define HAS_VRR(i915) (GRAPHICS_VER(i915) >= 11)
#define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5)
@@ -1761,26 +1587,27 @@ static inline bool run_as_guest(void)
#define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \
IS_ALDERLAKE_S(dev_priv))
-static inline bool intel_vtd_active(void)
+static inline bool intel_vtd_active(struct drm_i915_private *i915)
{
-#ifdef CONFIG_INTEL_IOMMU
- if (intel_iommu_gfx_mapped)
+ if (device_iommu_mapped(i915->drm.dev))
return true;
-#endif
/* Running as a guest, we assume the host is enforcing VT'd */
return run_as_guest();
}
+void
+i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p);
+
static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
{
- return GRAPHICS_VER(dev_priv) >= 6 && intel_vtd_active();
+ return GRAPHICS_VER(dev_priv) >= 6 && intel_vtd_active(dev_priv);
}
static inline bool
intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
{
- return IS_BROXTON(i915) && intel_vtd_active();
+ return IS_BROXTON(i915) && intel_vtd_active(i915);
}
static inline bool
@@ -1789,16 +1616,7 @@ intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915)
return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915);
}
-/* i915_drv.c */
-extern const struct dev_pm_ops i915_pm_ops;
-
-int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
-void i915_driver_remove(struct drm_i915_private *i915);
-void i915_driver_shutdown(struct drm_i915_private *i915);
-
-int i915_resume_switcheroo(struct drm_i915_private *i915);
-int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
-
+/* i915_getparam.c */
int i915_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1818,7 +1636,8 @@ static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
* armed the work again.
*/
while (atomic_read(&i915->mm.free_count)) {
- flush_work(&i915->mm.free_work);
+ flush_delayed_work(&i915->mm.free_work);
+ flush_delayed_work(&i915->bdev.wq);
rcu_barrier();
}
}
@@ -1851,13 +1670,10 @@ i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
u64 size, u64 alignment, u64 flags);
-static inline struct i915_vma * __must_check
+struct i915_vma * __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
- u64 size, u64 alignment, u64 flags)
-{
- return i915_gem_object_ggtt_pin_ww(obj, NULL, view, size, alignment, flags);
-}
+ u64 size, u64 alignment, u64 flags);
int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
unsigned long flags);
@@ -1933,6 +1749,10 @@ int i915_gem_evict_vm(struct i915_address_space *vm);
struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
phys_addr_t size);
+struct drm_i915_gem_object *
+__i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
+ const struct drm_i915_gem_object_ops *ops,
+ phys_addr_t size);
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
@@ -1972,14 +1792,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-/* i915_mm.c */
-int remap_io_mapping(struct vm_area_struct *vma,
- unsigned long addr, unsigned long pfn, unsigned long size,
- struct io_mapping *iomap);
-int remap_io_sg(struct vm_area_struct *vma,
- unsigned long addr, unsigned long size,
- struct scatterlist *sgl, resource_size_t iobase);
-
static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
{
if (GRAPHICS_VER(i915) >= 11)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 981e383d1a5d..915bf431f320 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -764,7 +764,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* perspective, requiring manual detiling by the client.
*/
if (!i915_gem_object_has_struct_page(obj) ||
- cpu_write_needs_clflush(obj))
+ i915_gem_cpu_write_needs_clflush(obj))
/* Note that the gtt paths might fail with non-page-backed user
* pointers (e.g. gtt mappings when moving data between
* textures). Fallback to the shmem path in that case.
@@ -877,6 +877,8 @@ i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int ret;
+ GEM_WARN_ON(!ww);
+
if (flags & PIN_MAPPABLE &&
(!view || view->type == I915_GGTT_VIEW_NORMAL)) {
/*
@@ -936,10 +938,7 @@ new_vma:
return ERR_PTR(ret);
}
- if (ww)
- ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL);
- else
- ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
+ ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL);
if (ret)
return ERR_PTR(ret);
@@ -959,6 +958,29 @@ new_vma:
return vma;
}
+struct i915_vma * __must_check
+i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view,
+ u64 size, u64 alignment, u64 flags)
+{
+ struct i915_gem_ww_ctx ww;
+ struct i915_vma *ret;
+ int err;
+
+ for_i915_gem_ww(&ww, err, true) {
+ err = i915_gem_object_lock(obj, &ww);
+ if (err)
+ continue;
+
+ ret = i915_gem_object_ggtt_pin_ww(obj, &ww, view, size,
+ alignment, flags);
+ if (IS_ERR(ret))
+ err = PTR_ERR(ret);
+ }
+
+ return err ? ERR_PTR(err) : ret;
+}
+
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -1005,7 +1027,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
obj->ops->adjust_lru(obj);
}
- if (i915_gem_object_has_pages(obj)) {
+ if (i915_gem_object_has_pages(obj) ||
+ i915_gem_object_has_self_managed_shrink_list(obj)) {
unsigned long flags;
spin_lock_irqsave(&i915->mm.obj_lock, flags);
@@ -1048,7 +1071,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret)
return ret;
- intel_uc_fetch_firmwares(&dev_priv->gt.uc);
+ intel_uc_fetch_firmwares(&to_gt(dev_priv)->uc);
intel_wopcm_init(&dev_priv->wopcm);
ret = i915_init_ggtt(dev_priv);
@@ -1068,7 +1091,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
*/
intel_init_clock_gating(dev_priv);
- ret = intel_gt_init(&dev_priv->gt);
+ ret = intel_gt_init(to_gt(dev_priv));
if (ret)
goto err_unlock;
@@ -1084,7 +1107,7 @@ err_unlock:
i915_gem_drain_workqueue(dev_priv);
if (ret != -EIO)
- intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
+ intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
if (ret == -EIO) {
/*
@@ -1092,10 +1115,10 @@ err_unlock:
* as wedged. But we only want to do this when the GPU is angry,
* for all other failure, such as an allocation failure, bail.
*/
- if (!intel_gt_is_wedged(&dev_priv->gt)) {
+ if (!intel_gt_is_wedged(to_gt(dev_priv))) {
i915_probe_error(dev_priv,
"Failed to initialize GPU, declaring it wedged!\n");
- intel_gt_set_wedged(&dev_priv->gt);
+ intel_gt_set_wedged(to_gt(dev_priv));
}
/* Minimal basic recovery for KMS */
@@ -1126,7 +1149,7 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
i915_gem_suspend_late(dev_priv);
- intel_gt_driver_remove(&dev_priv->gt);
+ intel_gt_driver_remove(to_gt(dev_priv));
dev_priv->uabi_engines = RB_ROOT;
/* Flush any outstanding unpin_work. */
@@ -1137,9 +1160,9 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{
- intel_gt_driver_release(&dev_priv->gt);
+ intel_gt_driver_release(to_gt(dev_priv));
- intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
+ intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
i915_gem_drain_freed_objects(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index 77490cb5ff9c..7f80ad247bc8 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -13,7 +13,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_private *i915 = to_i915(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
- const struct sseu_dev_info *sseu = &i915->gt.info.sseu;
+ const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu;
drm_i915_getparam_t *param = data;
int value = 0;
@@ -82,8 +82,8 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
break;
case I915_PARAM_HAS_GPU_RESET:
value = i915->params.enable_hangcheck &&
- intel_has_gpu_reset(&i915->gt);
- if (value && intel_has_reset_engine(&i915->gt))
+ intel_has_gpu_reset(to_gt(i915));
+ if (value && intel_has_reset_engine(to_gt(i915)))
value = 2;
break;
case I915_PARAM_HAS_RESOURCE_STREAMER:
@@ -96,7 +96,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = sseu->min_eu_in_pool;
break;
case I915_PARAM_HUC_STATUS:
- value = intel_huc_check_status(&i915->gt.uc.huc);
+ value = intel_huc_check_status(&to_gt(i915)->uc.huc);
if (value < 0)
return value;
break;
@@ -158,7 +158,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
break;
case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
- value = i915->gt.clock_frequency;
+ value = to_gt(i915)->clock_frequency;
break;
case I915_PARAM_MMAP_GTT_COHERENT:
value = INTEL_INFO(i915)->has_coherent_ggtt;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 2a2d7643b551..5ae812d60abe 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -48,8 +48,9 @@
#include "i915_gpu_error.h"
#include "i915_memcpy.h"
#include "i915_scatterlist.h"
+#include "i915_vma_snapshot.h"
-#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
+#define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
static void __sg_set_buf(struct scatterlist *sg,
@@ -275,16 +276,16 @@ static bool compress_start(struct i915_vma_compress *c)
static void *compress_next_page(struct i915_vma_compress *c,
struct i915_vma_coredump *dst)
{
- void *page;
+ void *page_addr;
+ struct page *page;
- if (dst->page_count >= dst->num_pages)
- return ERR_PTR(-ENOSPC);
-
- page = pool_alloc(&c->pool, ALLOW_FAIL);
- if (!page)
+ page_addr = pool_alloc(&c->pool, ALLOW_FAIL);
+ if (!page_addr)
return ERR_PTR(-ENOMEM);
- return dst->pages[dst->page_count++] = page;
+ page = virt_to_page(page_addr);
+ list_add_tail(&page->lru, &dst->page_list);
+ return page_addr;
}
static int compress_page(struct i915_vma_compress *c,
@@ -397,7 +398,7 @@ static int compress_page(struct i915_vma_compress *c,
if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
memcpy(ptr, src, PAGE_SIZE);
- dst->pages[dst->page_count++] = ptr;
+ list_add_tail(&virt_to_page(ptr)->lru, &dst->page_list);
cond_resched();
return 0;
@@ -504,7 +505,7 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct i915_gem_context_coredump *ctx)
{
- const u32 period = m->i915->gt.clock_period_ns;
+ const u32 period = to_gt(m->i915)->clock_period_ns;
err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
@@ -614,7 +615,7 @@ static void print_error_vma(struct drm_i915_error_state_buf *m,
const struct i915_vma_coredump *vma)
{
char out[ASCII85_BUFSZ];
- int page;
+ struct page *page;
if (!vma)
return;
@@ -628,16 +629,17 @@ static void print_error_vma(struct drm_i915_error_state_buf *m,
err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes);
err_compression_marker(m);
- for (page = 0; page < vma->page_count; page++) {
+ list_for_each_entry(page, &vma->page_list, lru) {
int i, len;
+ const u32 *addr = page_address(page);
len = PAGE_SIZE;
- if (page == vma->page_count - 1)
+ if (page == list_last_entry(&vma->page_list, typeof(*page), lru))
len -= vma->unused;
len = ascii85_encode_len(len);
for (i = 0; i < len; i++)
- err_puts(m, ascii85_encode(vma->pages[page][i], out));
+ err_puts(m, ascii85_encode(addr[i], out));
}
err_puts(m, "\n");
}
@@ -946,10 +948,12 @@ static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
{
while (vma) {
struct i915_vma_coredump *next = vma->next;
- int page;
+ struct page *page, *n;
- for (page = 0; page < vma->page_count; page++)
- free_page((unsigned long)vma->pages[page]);
+ list_for_each_entry_safe(page, n, &vma->page_list, lru) {
+ list_del_init(&page->lru);
+ __free_page(page);
+ }
kfree(vma);
vma = next;
@@ -1009,25 +1013,21 @@ void __i915_gpu_coredump_free(struct kref *error_ref)
static struct i915_vma_coredump *
i915_vma_coredump_create(const struct intel_gt *gt,
- const struct i915_vma *vma,
- const char *name,
+ const struct i915_vma_snapshot *vsnap,
struct i915_vma_compress *compress)
{
struct i915_ggtt *ggtt = gt->ggtt;
const u64 slot = ggtt->error_capture.start;
struct i915_vma_coredump *dst;
- unsigned long num_pages;
struct sgt_iter iter;
int ret;
might_sleep();
- if (!vma || !vma->pages || !compress)
+ if (!vsnap || !vsnap->pages || !compress)
return NULL;
- num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
- num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
- dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL);
+ dst = kmalloc(sizeof(*dst), ALLOW_FAIL);
if (!dst)
return NULL;
@@ -1036,14 +1036,13 @@ i915_vma_coredump_create(const struct intel_gt *gt,
return NULL;
}
- strcpy(dst->name, name);
+ INIT_LIST_HEAD(&dst->page_list);
+ strcpy(dst->name, vsnap->name);
dst->next = NULL;
- dst->gtt_offset = vma->node.start;
- dst->gtt_size = vma->node.size;
- dst->gtt_page_sizes = vma->page_sizes.gtt;
- dst->num_pages = num_pages;
- dst->page_count = 0;
+ dst->gtt_offset = vsnap->gtt_offset;
+ dst->gtt_size = vsnap->gtt_size;
+ dst->gtt_page_sizes = vsnap->page_sizes;
dst->unused = 0;
ret = -EINVAL;
@@ -1051,7 +1050,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
void __iomem *s;
dma_addr_t dma;
- for_each_sgt_daddr(dma, iter, vma->pages) {
+ for_each_sgt_daddr(dma, iter, vsnap->pages) {
mutex_lock(&ggtt->error_mutex);
ggtt->vm.insert_page(&ggtt->vm, dma, slot,
I915_CACHE_NONE, 0);
@@ -1069,11 +1068,11 @@ i915_vma_coredump_create(const struct intel_gt *gt,
if (ret)
break;
}
- } else if (__i915_gem_object_is_lmem(vma->obj)) {
- struct intel_memory_region *mem = vma->obj->mm.region;
+ } else if (vsnap->mr && vsnap->mr->type != INTEL_MEMORY_SYSTEM) {
+ struct intel_memory_region *mem = vsnap->mr;
dma_addr_t dma;
- for_each_sgt_daddr(dma, iter, vma->pages) {
+ for_each_sgt_daddr(dma, iter, vsnap->pages) {
void __iomem *s;
s = io_mapping_map_wc(&mem->iomap,
@@ -1089,7 +1088,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
} else {
struct page *page;
- for_each_sgt_page(page, iter, vma->pages) {
+ for_each_sgt_page(page, iter, vsnap->pages) {
void *s;
drm_clflush_pages(&page, 1);
@@ -1106,8 +1105,13 @@ i915_vma_coredump_create(const struct intel_gt *gt,
}
if (ret || compress_flush(compress, dst)) {
- while (dst->page_count--)
- pool_free(&compress->pool, dst->pages[dst->page_count]);
+ struct page *page, *n;
+
+ list_for_each_entry_safe_reverse(page, n, &dst->page_list, lru) {
+ list_del_init(&page->lru);
+ pool_free(&compress->pool, page_address(page));
+ }
+
kfree(dst);
dst = NULL;
}
@@ -1320,38 +1324,72 @@ static bool record_context(struct i915_gem_context_coredump *e,
struct intel_engine_capture_vma {
struct intel_engine_capture_vma *next;
- struct i915_vma *vma;
+ struct i915_vma_snapshot *vsnap;
char name[16];
+ bool lockdep_cookie;
};
static struct intel_engine_capture_vma *
-capture_vma(struct intel_engine_capture_vma *next,
- struct i915_vma *vma,
- const char *name,
- gfp_t gfp)
+capture_vma_snapshot(struct intel_engine_capture_vma *next,
+ struct i915_vma_snapshot *vsnap,
+ gfp_t gfp)
{
struct intel_engine_capture_vma *c;
- if (!vma)
+ if (!i915_vma_snapshot_present(vsnap))
return next;
c = kmalloc(sizeof(*c), gfp);
if (!c)
return next;
- if (!i915_active_acquire_if_busy(&vma->active)) {
+ if (!i915_vma_snapshot_resource_pin(vsnap, &c->lockdep_cookie)) {
kfree(c);
return next;
}
- strcpy(c->name, name);
- c->vma = vma; /* reference held while active */
+ strcpy(c->name, vsnap->name);
+ c->vsnap = vsnap;
+ i915_vma_snapshot_get(vsnap);
c->next = next;
return c;
}
static struct intel_engine_capture_vma *
+capture_vma(struct intel_engine_capture_vma *next,
+ struct i915_vma *vma,
+ const char *name,
+ gfp_t gfp)
+{
+ struct i915_vma_snapshot *vsnap;
+
+ if (!vma)
+ return next;
+
+ /*
+ * If the vma isn't pinned, then the vma should be snapshotted
+ * to a struct i915_vma_snapshot at command submission time.
+ * Not here.
+ */
+ GEM_WARN_ON(!i915_vma_is_pinned(vma));
+ if (!i915_vma_is_pinned(vma))
+ return next;
+
+ vsnap = i915_vma_snapshot_alloc(gfp);
+ if (!vsnap)
+ return next;
+
+ i915_vma_snapshot_init(vsnap, vma, name);
+ next = capture_vma_snapshot(next, vsnap, gfp);
+
+ /* FIXME: Replace on async unbind. */
+ i915_vma_snapshot_put(vsnap);
+
+ return next;
+}
+
+static struct intel_engine_capture_vma *
capture_user(struct intel_engine_capture_vma *capture,
const struct i915_request *rq,
gfp_t gfp)
@@ -1359,7 +1397,7 @@ capture_user(struct intel_engine_capture_vma *capture,
struct i915_capture_list *c;
for (c = rq->capture_list; c; c = c->next)
- capture = capture_vma(capture, c->vma, "user", gfp);
+ capture = capture_vma_snapshot(capture, c->vma_snapshot, gfp);
return capture;
}
@@ -1373,6 +1411,33 @@ static void add_vma(struct intel_engine_coredump *ee,
}
}
+static struct i915_vma_coredump *
+create_vma_coredump(const struct intel_gt *gt, struct i915_vma *vma,
+ const char *name, struct i915_vma_compress *compress)
+{
+ struct i915_vma_coredump *ret;
+ struct i915_vma_snapshot tmp;
+
+ if (!vma)
+ return NULL;
+
+ GEM_WARN_ON(!i915_vma_is_pinned(vma));
+ i915_vma_snapshot_init_onstack(&tmp, vma, name);
+ ret = i915_vma_coredump_create(gt, &tmp, compress);
+ i915_vma_snapshot_put_onstack(&tmp);
+
+ return ret;
+}
+
+static void add_vma_coredump(struct intel_engine_coredump *ee,
+ const struct intel_gt *gt,
+ struct i915_vma *vma,
+ const char *name,
+ struct i915_vma_compress *compress)
+{
+ add_vma(ee, create_vma_coredump(gt, vma, name, compress));
+}
+
struct intel_engine_coredump *
intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
{
@@ -1406,7 +1471,7 @@ intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
* as the simplest method to avoid being overwritten
* by userspace.
*/
- vma = capture_vma(vma, rq->batch, "batch", gfp);
+ vma = capture_vma_snapshot(vma, &rq->batch_snapshot, gfp);
vma = capture_user(vma, rq, gfp);
vma = capture_vma(vma, rq->ring->vma, "ring", gfp);
vma = capture_vma(vma, rq->context->state, "HW context", gfp);
@@ -1427,30 +1492,24 @@ intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
while (capture) {
struct intel_engine_capture_vma *this = capture;
- struct i915_vma *vma = this->vma;
+ struct i915_vma_snapshot *vsnap = this->vsnap;
add_vma(ee,
i915_vma_coredump_create(engine->gt,
- vma, this->name,
- compress));
+ vsnap, compress));
- i915_active_release(&vma->active);
+ i915_vma_snapshot_resource_unpin(vsnap, this->lockdep_cookie);
+ i915_vma_snapshot_put(vsnap);
capture = this->next;
kfree(this);
}
- add_vma(ee,
- i915_vma_coredump_create(engine->gt,
- engine->status_page.vma,
- "HW Status",
- compress));
+ add_vma_coredump(ee, engine->gt, engine->status_page.vma,
+ "HW Status", compress);
- add_vma(ee,
- i915_vma_coredump_create(engine->gt,
- engine->wa_ctx.vma,
- "WA context",
- compress));
+ add_vma_coredump(ee, engine->gt, engine->wa_ctx.vma,
+ "WA context", compress);
}
static struct intel_engine_coredump *
@@ -1486,17 +1545,25 @@ capture_engine(struct intel_engine_cs *engine,
}
}
if (rq)
- capture = intel_engine_coredump_add_request(ee, rq,
- ATOMIC_MAYFAIL);
+ rq = i915_request_get_rcu(rq);
+
+ if (!rq)
+ goto no_request_capture;
+
+ capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
if (!capture) {
-no_request_capture:
- kfree(ee);
- return NULL;
+ i915_request_put(rq);
+ goto no_request_capture;
}
intel_engine_coredump_add_vma(ee, capture, compress);
+ i915_request_put(rq);
return ee;
+
+no_request_capture:
+ kfree(ee);
+ return NULL;
}
static void
@@ -1550,10 +1617,8 @@ gt_record_uc(struct intel_gt_coredump *gt,
*/
error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
- error_uc->guc_log =
- i915_vma_coredump_create(gt->_gt,
- uc->guc.log.vma, "GuC log buffer",
- compress);
+ error_uc->guc_log = create_vma_coredump(gt->_gt, uc->guc.log.vma,
+ "GuC log buffer", compress);
return error_uc;
}
@@ -1750,10 +1815,7 @@ static void capture_gen(struct i915_gpu_coredump *error)
error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
error->suspended = i915->runtime_pm.suspended;
- error->iommu = -1;
-#ifdef CONFIG_INTEL_IOMMU
- error->iommu = intel_iommu_gfx_mapped;
-#endif
+ error->iommu = intel_vtd_active(i915);
error->reset_count = i915_reset_count(&i915->gpu_error);
error->suspend_count = i915->suspend_count;
@@ -1784,7 +1846,7 @@ i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
error->time = ktime_get_real();
error->boottime = ktime_get_boottime();
- error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time);
+ error->uptime = ktime_sub(ktime_get(), to_gt(i915)->last_init_time);
error->capture = jiffies;
capture_gen(error);
@@ -1839,8 +1901,8 @@ void i915_vma_capture_finish(struct intel_gt_coredump *gt,
kfree(compress);
}
-struct i915_gpu_coredump *
-i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
+static struct i915_gpu_coredump *
+__i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
{
struct drm_i915_private *i915 = gt->i915;
struct i915_gpu_coredump *error;
@@ -1881,6 +1943,22 @@ i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
return error;
}
+struct i915_gpu_coredump *
+i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
+{
+ static DEFINE_MUTEX(capture_mutex);
+ int ret = mutex_lock_interruptible(&capture_mutex);
+ struct i915_gpu_coredump *dump;
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ dump = __i915_gpu_coredump(gt, engine_mask);
+ mutex_unlock(&capture_mutex);
+
+ return dump;
+}
+
void i915_error_state_store(struct i915_gpu_coredump *error)
{
struct drm_i915_private *i915;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index b98d8cdbe4f2..5aedf5129814 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -39,10 +39,8 @@ struct i915_vma_coredump {
u64 gtt_size;
u32 gtt_page_sizes;
- int num_pages;
- int page_count;
int unused;
- u32 *pages[];
+ struct list_head page_list;
};
struct i915_request_coredump {
diff --git a/drivers/gpu/drm/i915/i915_iosf_mbi.h b/drivers/gpu/drm/i915/i915_iosf_mbi.h
new file mode 100644
index 000000000000..8f81b7603d37
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_iosf_mbi.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __I915_IOSF_MBI_H__
+#define __I915_IOSF_MBI_H__
+
+#if IS_ENABLED(CONFIG_IOSF_MBI)
+#include <asm/iosf_mbi.h>
+#else
+
+/* Stubs to compile for all non-x86 archs */
+#define MBI_PMIC_BUS_ACCESS_BEGIN 1
+#define MBI_PMIC_BUS_ACCESS_END 2
+
+struct notifier_block;
+
+static inline void iosf_mbi_punit_acquire(void) {}
+static inline void iosf_mbi_punit_release(void) {}
+static inline void iosf_mbi_assert_punit_acquired(void) {}
+
+static inline
+int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int
+iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline
+int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+#endif
+
+#endif /* __I915_IOSF_MBI_H__ */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 77680bca46ee..21f75b069fa8 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -35,6 +35,7 @@
#include <drm/drm_drv.h>
#include "display/intel_de.h"
+#include "display/intel_display_trace.h"
#include "display/intel_display_types.h"
#include "display/intel_fifo_underrun.h"
#include "display/intel_hotplug.h"
@@ -49,7 +50,6 @@
#include "i915_drv.h"
#include "i915_irq.h"
-#include "i915_trace.h"
#include "intel_pm.h"
/**
@@ -224,7 +224,7 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
static void
intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
drm_crtc_handle_vblank(&crtc->base);
}
@@ -1040,7 +1040,7 @@ static void ivb_parity_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), l3_parity.error_work);
- struct intel_gt *gt = &dev_priv->gt;
+ struct intel_gt *gt = to_gt(dev_priv);
u32 error_status, row, bank, subbank;
char *parity_event[6];
u32 misccpctl;
@@ -1318,7 +1318,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
u32 crc2, u32 crc3,
u32 crc4)
{
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
@@ -1357,7 +1357,7 @@ display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
static void flip_done_handler(struct drm_i915_private *i915,
enum pipe pipe)
{
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
struct drm_crtc_state *crtc_state = crtc->base.state;
struct drm_pending_vblank_event *e = crtc_state->event;
struct drm_device *dev = &i915->drm;
@@ -1718,9 +1718,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
if (gt_iir)
- gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
+ gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
if (pm_iir)
- gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
+ gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -1777,7 +1777,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
- gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
+ gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
@@ -2108,7 +2108,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
}
if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
- gen5_rps_irq_handler(&dev_priv->gt.rps);
+ gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
}
static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
@@ -2189,9 +2189,9 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
if (gt_iir) {
raw_reg_write(regs, GTIIR, gt_iir);
if (GRAPHICS_VER(i915) >= 6)
- gen6_gt_irq_handler(&i915->gt, gt_iir);
+ gen6_gt_irq_handler(to_gt(i915), gt_iir);
else
- gen5_gt_irq_handler(&i915->gt, gt_iir);
+ gen5_gt_irq_handler(to_gt(i915), gt_iir);
ret = IRQ_HANDLED;
}
@@ -2209,7 +2209,7 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
if (pm_iir) {
raw_reg_write(regs, GEN6_PMIIR, pm_iir);
- gen6_rps_irq_handler(&i915->gt.rps, pm_iir);
+ gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
ret = IRQ_HANDLED;
}
}
@@ -2635,7 +2635,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
}
/* Find, queue (onto bottom-halves), then clear each source */
- gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
+ gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & ~GEN8_GT_IRQS) {
@@ -2715,7 +2715,7 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private *i915 = arg;
void __iomem * const regs = i915->uncore.regs;
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
u32 master_ctl;
u32 gu_misc_iir;
@@ -2771,8 +2771,8 @@ static inline void dg1_master_intr_enable(void __iomem * const regs)
static irqreturn_t dg1_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = arg;
- struct intel_gt *gt = &i915->gt;
- void __iomem * const regs = i915->uncore.regs;
+ struct intel_gt *gt = to_gt(i915);
+ void __iomem * const regs = gt->uncore->regs;
u32 master_tile_ctl, master_ctl;
u32 gu_misc_iir;
@@ -3016,7 +3016,7 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
if (IS_CHERRYVIEW(dev_priv))
intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
- intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
+ intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
@@ -3075,7 +3075,7 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv)
intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
}
- gen5_gt_irq_reset(&dev_priv->gt);
+ gen5_gt_irq_reset(to_gt(dev_priv));
ibx_irq_reset(dev_priv);
}
@@ -3085,7 +3085,7 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
- gen5_gt_irq_reset(&dev_priv->gt);
+ gen5_gt_irq_reset(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -3119,7 +3119,7 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
gen8_master_intr_disable(dev_priv->uncore.regs);
- gen8_gt_irq_reset(&dev_priv->gt);
+ gen8_gt_irq_reset(to_gt(dev_priv));
gen8_display_irq_reset(dev_priv);
GEN3_IRQ_RESET(uncore, GEN8_PCU_);
@@ -3173,11 +3173,12 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_gt *gt = to_gt(dev_priv);
+ struct intel_uncore *uncore = gt->uncore;
gen11_master_intr_disable(dev_priv->uncore.regs);
- gen11_gt_irq_reset(&dev_priv->gt);
+ gen11_gt_irq_reset(gt);
gen11_display_irq_reset(dev_priv);
GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
@@ -3186,11 +3187,12 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
static void dg1_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_gt *gt = to_gt(dev_priv);
+ struct intel_uncore *uncore = gt->uncore;
dg1_master_intr_disable(dev_priv->uncore.regs);
- gen11_gt_irq_reset(&dev_priv->gt);
+ gen11_gt_irq_reset(gt);
gen11_display_irq_reset(dev_priv);
GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
@@ -3250,7 +3252,7 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
- gen8_gt_irq_reset(&dev_priv->gt);
+ gen8_gt_irq_reset(to_gt(dev_priv));
GEN3_IRQ_RESET(uncore, GEN8_PCU_);
@@ -3707,7 +3709,7 @@ static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
ibx_irq_postinstall(dev_priv);
- gen5_gt_irq_postinstall(&dev_priv->gt);
+ gen5_gt_irq_postinstall(to_gt(dev_priv));
GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
display_mask | extra_mask);
@@ -3744,7 +3746,7 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
{
- gen5_gt_irq_postinstall(&dev_priv->gt);
+ gen5_gt_irq_postinstall(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -3850,7 +3852,7 @@ static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
else if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev_priv);
- gen8_gt_irq_postinstall(&dev_priv->gt);
+ gen8_gt_irq_postinstall(to_gt(dev_priv));
gen8_de_irq_postinstall(dev_priv);
gen8_master_intr_enable(dev_priv->uncore.regs);
@@ -3869,13 +3871,14 @@ static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_gt *gt = to_gt(dev_priv);
+ struct intel_uncore *uncore = gt->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_irq_postinstall(dev_priv);
- gen11_gt_irq_postinstall(&dev_priv->gt);
+ gen11_gt_irq_postinstall(gt);
gen11_de_irq_postinstall(dev_priv);
GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
@@ -3886,10 +3889,11 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_gt *gt = to_gt(dev_priv);
+ struct intel_uncore *uncore = gt->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
- gen11_gt_irq_postinstall(&dev_priv->gt);
+ gen11_gt_irq_postinstall(gt);
GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
@@ -3900,13 +3904,13 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
GEN11_DISPLAY_IRQ_ENABLE);
}
- dg1_master_intr_enable(dev_priv->uncore.regs);
- intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_TILE_INTR);
+ dg1_master_intr_enable(uncore->regs);
+ intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
}
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
{
- gen8_gt_irq_postinstall(&dev_priv->gt);
+ gen8_gt_irq_postinstall(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -4069,7 +4073,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_cs_irq(dev_priv->gt.engine[RCS0], iir);
+ intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4177,7 +4181,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_cs_irq(dev_priv->gt.engine[RCS0], iir);
+ intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4322,11 +4326,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_cs_irq(dev_priv->gt.engine[RCS0],
+ intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
iir);
if (iir & I915_BSD_USER_INTERRUPT)
- intel_engine_cs_irq(dev_priv->gt.engine[VCS0],
+ intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
iir >> 25);
if (iir & I915_MASTER_ERROR_INTERRUPT)
@@ -4377,7 +4381,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
- dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
+ to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
if (!HAS_DISPLAY(dev_priv))
return;
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 666808cb3a32..7998bc74ab49 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -27,6 +27,7 @@
#include "i915_drv.h"
+#include "i915_mm.h"
struct remap_pfn {
struct mm_struct *mm;
@@ -37,17 +38,6 @@ struct remap_pfn {
resource_size_t iobase;
};
-static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
-{
- struct remap_pfn *r = data;
-
- /* Special PTE are not associated with any struct page */
- set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
- r->pfn++;
-
- return 0;
-}
-
#define use_dma(io) ((io) != -1)
static inline unsigned long sgt_pfn(const struct remap_pfn *r)
@@ -77,6 +67,20 @@ static int remap_sg(pte_t *pte, unsigned long addr, void *data)
return 0;
}
+#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+
+#if IS_ENABLED(CONFIG_X86)
+static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
+{
+ struct remap_pfn *r = data;
+
+ /* Special PTE are not associated with any struct page */
+ set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
+ r->pfn++;
+
+ return 0;
+}
+
/**
* remap_io_mapping - remap an IO mapping to userspace
* @vma: user vma to map to
@@ -94,7 +98,6 @@ int remap_io_mapping(struct vm_area_struct *vma,
struct remap_pfn r;
int err;
-#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
@@ -111,6 +114,7 @@ int remap_io_mapping(struct vm_area_struct *vma,
return 0;
}
+#endif
/**
* remap_io_sg - remap an IO mapping to userspace
diff --git a/drivers/gpu/drm/i915/i915_mm.h b/drivers/gpu/drm/i915/i915_mm.h
new file mode 100644
index 000000000000..76f1d53bdf34
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_mm.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __I915_MM_H__
+#define __I915_MM_H__
+
+#include <linux/types.h>
+
+struct vm_area_struct;
+struct io_mapping;
+struct scatterlist;
+
+#if IS_ENABLED(CONFIG_X86)
+int remap_io_mapping(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn, unsigned long size,
+ struct io_mapping *iomap);
+#else
+static inline
+int remap_io_mapping(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn, unsigned long size,
+ struct io_mapping *iomap)
+{
+ pr_err("Architecture has no %s() and shouldn't be calling this function\n", __func__);
+ WARN_ON_ONCE(1);
+ return 0;
+}
+#endif
+
+int remap_io_sg(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long size,
+ struct scatterlist *sgl, resource_size_t iobase);
+
+#endif /* __I915_MM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_module.c b/drivers/gpu/drm/i915/i915_module.c
index ab2295dd4500..f6bcd2f89257 100644
--- a/drivers/gpu/drm/i915/i915_module.c
+++ b/drivers/gpu/drm/i915/i915_module.c
@@ -4,7 +4,7 @@
* Copyright © 2021 Intel Corporation
*/
-#include <linux/console.h>
+#include <drm/drm_drv.h>
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_object.h"
@@ -24,14 +24,14 @@ static int i915_check_nomodeset(void)
/*
* Enable KMS by default, unless explicitly overriden by
- * either the i915.modeset prarameter or by the
- * vga_text_mode_force boot option.
+ * either the i915.modeset parameter or by the
+ * nomodeset boot option.
*/
if (i915_modparams.modeset == 0)
use_kms = false;
- if (vgacon_text_force() && i915_modparams.modeset == -1)
+ if (drm_firmware_drivers_only() && i915_modparams.modeset == -1)
use_kms = false;
if (!use_kms) {
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index e07f4cfea63a..525ae832aa9a 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -140,6 +140,9 @@ i915_param_named_unsafe(invert_brightness, int, 0400,
i915_param_named(disable_display, bool, 0400,
"Disable display (default: false)");
+i915_param_named(memtest, bool, 0400,
+ "Perform a read/write test of all device memory on module load (default: off)");
+
i915_param_named(mmio_debug, int, 0400,
"Enable the MMIO debug code for the first N failures (default: off). "
"This may negatively affect performance.");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 8d725b64592d..c9d53ff910a0 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -64,6 +64,7 @@ struct drm_printer;
param(char *, guc_firmware_path, NULL, 0400) \
param(char *, huc_firmware_path, NULL, 0400) \
param(char *, dmc_firmware_path, NULL, 0400) \
+ param(bool, memtest, false, 0400) \
param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \
param(int, edp_vswing, 0, 0400) \
param(unsigned int, reset, 3, 0600) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 169837de395d..261294df535c 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -22,18 +22,17 @@
*
*/
-#include <linux/vga_switcheroo.h>
-
#include <drm/drm_drv.h>
#include <drm/i915_pciids.h>
+#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_pci.h"
#define PLATFORM(x) .platform = (x)
#define GEN(x) \
- .graphics_ver = (x), \
- .media_ver = (x), \
+ .graphics.ver = (x), \
+ .media.ver = (x), \
.display.ver = (x)
#define I845_PIPE_OFFSETS \
@@ -145,6 +144,12 @@
.degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
DRM_COLOR_LUT_EQUAL_CHANNELS, \
}
+#define ICL_COLORS \
+ .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145, \
+ .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
+ DRM_COLOR_LUT_EQUAL_CHANNELS, \
+ .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
+ }
/* Keep in gen based order, and chronological order within a gen */
@@ -157,8 +162,8 @@
#define I830_FEATURES \
GEN(2), \
.is_mobile = 1, \
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_overlay = 1, \
.display.cursor_needs_physical = 1, \
.display.overlay_needs_physical = 1, \
@@ -178,8 +183,8 @@
#define I845_FEATURES \
GEN(2), \
- .pipe_mask = BIT(PIPE_A), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A), \
+ .display.pipe_mask = BIT(PIPE_A), \
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A), \
.display.has_overlay = 1, \
.display.overlay_needs_physical = 1, \
.display.has_gmch = 1, \
@@ -220,8 +225,8 @@ static const struct intel_device_info i865g_info = {
#define GEN3_FEATURES \
GEN(3), \
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
.platform_engine_mask = BIT(RCS0), \
@@ -310,8 +315,8 @@ static const struct intel_device_info pnv_m_info = {
#define GEN4_FEATURES \
GEN(4), \
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
@@ -363,8 +368,8 @@ static const struct intel_device_info gm45_info = {
#define GEN5_FEATURES \
GEN(5), \
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
.platform_engine_mask = BIT(RCS0) | BIT(VCS0), \
.has_snoop = true, \
@@ -393,8 +398,8 @@ static const struct intel_device_info ilk_m_info = {
#define GEN6_FEATURES \
GEN(6), \
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
@@ -444,8 +449,8 @@ static const struct intel_device_info snb_m_gt2_info = {
#define GEN7_FEATURES \
GEN(7), \
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
@@ -499,8 +504,8 @@ static const struct intel_device_info ivb_q_info = {
GEN7_FEATURES,
PLATFORM(INTEL_IVYBRIDGE),
.gt = 2,
- .pipe_mask = 0, /* legal, last one wins */
- .cpu_transcoder_mask = 0,
+ .display.pipe_mask = 0, /* legal, last one wins */
+ .display.cpu_transcoder_mask = 0,
.has_l3_dpf = 1,
};
@@ -508,8 +513,8 @@ static const struct intel_device_info vlv_info = {
PLATFORM(INTEL_VALLEYVIEW),
GEN(7),
.is_lp = 1,
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_reset_engine = true,
@@ -533,7 +538,7 @@ static const struct intel_device_info vlv_info = {
#define G75_FEATURES \
GEN7_FEATURES, \
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \
.display.has_ddi = 1, \
.display.has_fpga_dbg = 1, \
@@ -603,8 +608,8 @@ static const struct intel_device_info bdw_gt3_info = {
static const struct intel_device_info chv_info = {
PLATFORM(INTEL_CHERRYVIEW),
GEN(8),
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
.display.has_hotplug = 1,
.is_lp = 1,
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
@@ -681,8 +686,8 @@ static const struct intel_device_info skl_gt4_info = {
.dbuf.slice_mask = BIT(DBUF_S1), \
.display.has_hotplug = 1, \
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \
.has_64bit_reloc = 1, \
@@ -790,8 +795,8 @@ static const struct intel_device_info cml_gt2_info = {
#define GEN11_FEATURES \
GEN9_FEATURES, \
GEN11_DEFAULT_PAGE_SIZES, \
- .abox_mask = BIT(0), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ .display.abox_mask = BIT(0), \
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
.pipe_offsets = { \
@@ -811,7 +816,7 @@ static const struct intel_device_info cml_gt2_info = {
[TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
}, \
GEN(11), \
- .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }, \
+ ICL_COLORS, \
.dbuf.size = 2048, \
.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \
.display.has_dsc = 1, \
@@ -842,9 +847,9 @@ static const struct intel_device_info jsl_info = {
#define GEN12_FEATURES \
GEN11_FEATURES, \
GEN(12), \
- .abox_mask = GENMASK(2, 1), \
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ .display.abox_mask = GENMASK(2, 1), \
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
.pipe_offsets = { \
@@ -866,7 +871,7 @@ static const struct intel_device_info jsl_info = {
TGL_CURSOR_OFFSETS, \
.has_global_mocs = 1, \
.has_pxp = 1, \
- .display.has_dsb = 1
+ .display.has_dsb = 0 /* FIXME: LUT load is broken with DSB */
static const struct intel_device_info tgl_info = {
GEN12_FEATURES,
@@ -879,9 +884,9 @@ static const struct intel_device_info tgl_info = {
static const struct intel_device_info rkl_info = {
GEN12_FEATURES,
PLATFORM(INTEL_ROCKETLAKE),
- .abox_mask = BIT(0),
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ .display.abox_mask = BIT(0),
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C),
.display.has_hti = 1,
.display.has_psr_hw_tracking = 0,
@@ -899,9 +904,9 @@ static const struct intel_device_info rkl_info = {
static const struct intel_device_info dg1_info = {
GEN12_FEATURES,
DGFX_FEATURES,
- .graphics_rel = 10,
+ .graphics.rel = 10,
PLATFORM(INTEL_DG1),
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
.require_force_probe = 1,
.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) |
@@ -913,7 +918,7 @@ static const struct intel_device_info dg1_info = {
static const struct intel_device_info adl_s_info = {
GEN12_FEATURES,
PLATFORM(INTEL_ALDERLAKE_S),
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
.display.has_hti = 1,
.display.has_psr_hw_tracking = 0,
.platform_engine_mask =
@@ -930,10 +935,11 @@ static const struct intel_device_info adl_s_info = {
}
#define XE_LPD_FEATURES \
- .abox_mask = GENMASK(1, 0), \
- .color = { .degamma_lut_size = 0, .gamma_lut_size = 0 }, \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
- BIT(TRANSCODER_C) | BIT(TRANSCODER_D), \
+ .display.abox_mask = GENMASK(1, 0), \
+ .color = { .degamma_lut_size = 128, .gamma_lut_size = 1024, \
+ .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
+ DRM_COLOR_LUT_EQUAL_CHANNELS, \
+ }, \
.dbuf.size = 4096, \
.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \
BIT(DBUF_S4), \
@@ -949,18 +955,22 @@ static const struct intel_device_info adl_s_info = {
.display.has_ipc = 1, \
.display.has_psr = 1, \
.display.ver = 13, \
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
[TRANSCODER_C] = PIPE_C_OFFSET, \
[TRANSCODER_D] = PIPE_D_OFFSET, \
+ [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
+ [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
}, \
.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
[TRANSCODER_D] = TRANSCODER_D_OFFSET, \
+ [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
+ [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
}, \
XE_LPD_CURSOR_OFFSETS
@@ -968,7 +978,9 @@ static const struct intel_device_info adl_p_info = {
GEN12_FEATURES,
XE_LPD_FEATURES,
PLATFORM(INTEL_ALDERLAKE_P),
- .require_force_probe = 1,
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_D) |
+ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1),
.display.has_cdclk_crawl = 1,
.display.has_modular_fia = 1,
.display.has_psr_hw_tracking = 0,
@@ -986,8 +998,8 @@ static const struct intel_device_info adl_p_info = {
I915_GTT_PAGE_SIZE_2M
#define XE_HP_FEATURES \
- .graphics_ver = 12, \
- .graphics_rel = 50, \
+ .graphics.ver = 12, \
+ .graphics.rel = 50, \
XE_HP_PAGE_SIZES, \
.dma_mask_size = 46, \
.has_64bit_reloc = 1, \
@@ -1005,8 +1017,8 @@ static const struct intel_device_info adl_p_info = {
.ppgtt_type = INTEL_PPGTT_FULL
#define XE_HPM_FEATURES \
- .media_ver = 12, \
- .media_rel = 50
+ .media.ver = 12, \
+ .media.rel = 50
__maybe_unused
static const struct intel_device_info xehpsdv_info = {
@@ -1015,7 +1027,7 @@ static const struct intel_device_info xehpsdv_info = {
DGFX_FEATURES,
PLATFORM(INTEL_XEHPSDV),
.display = { },
- .pipe_mask = 0,
+ .has_64k_pages = 1,
.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) |
BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) |
@@ -1030,14 +1042,17 @@ static const struct intel_device_info dg2_info = {
XE_HPM_FEATURES,
XE_LPD_FEATURES,
DGFX_FEATURES,
- .graphics_rel = 55,
- .media_rel = 55,
+ .graphics.rel = 55,
+ .media.rel = 55,
PLATFORM(INTEL_DG2),
+ .has_64k_pages = 1,
.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) |
BIT(VECS0) | BIT(VECS1) |
BIT(VCS0) | BIT(VCS2),
.require_force_probe = 1,
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_D),
};
#undef PLATFORM
@@ -1117,6 +1132,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_ADLS_IDS(&adl_s_info),
INTEL_ADLP_IDS(&adl_p_info),
INTEL_DG1_IDS(&dg1_info),
+ INTEL_RPLS_IDS(&adl_s_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
@@ -1189,11 +1205,8 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (PCI_FUNC(pdev->devfn))
return -ENODEV;
- /*
- * apple-gmux is needed on dual GPU MacBook Pro
- * to probe the panel if we're the inactive GPU.
- */
- if (vga_switcheroo_client_probe_defer(pdev))
+ /* Detect if we need to wait for other drivers early on */
+ if (intel_modeset_probe_defer(pdev))
return -EPROBE_DEFER;
err = i915_driver_probe(pdev, ent);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 2f01b8c0284c..e27f3b7cf094 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -4273,26 +4273,6 @@ static struct ctl_table oa_table[] = {
{}
};
-static struct ctl_table i915_root[] = {
- {
- .procname = "i915",
- .maxlen = 0,
- .mode = 0555,
- .child = oa_table,
- },
- {}
-};
-
-static struct ctl_table dev_root[] = {
- {
- .procname = "dev",
- .maxlen = 0,
- .mode = 0555,
- .child = i915_root,
- },
- {}
-};
-
static void oa_init_supported_formats(struct i915_perf *perf)
{
struct drm_i915_private *i915 = perf->i915;
@@ -4443,7 +4423,7 @@ void i915_perf_init(struct drm_i915_private *i915)
mutex_init(&perf->lock);
/* Choose a representative limit */
- oa_sample_rate_hard_limit = i915->gt.clock_frequency / 2;
+ oa_sample_rate_hard_limit = to_gt(i915)->clock_frequency / 2;
mutex_init(&perf->metrics_lock);
idr_init_base(&perf->metrics_idr, 1);
@@ -4488,7 +4468,7 @@ static int destroy_config(int id, void *p, void *data)
int i915_perf_sysctl_register(void)
{
- sysctl_header = register_sysctl_table(dev_root);
+ sysctl_header = register_sysctl("dev/i915", oa_table);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 0b488d49694c..ea655161793e 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -210,8 +210,8 @@ static void init_rc6(struct i915_pmu *pmu)
struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
intel_wakeref_t wakeref;
- with_intel_runtime_pm(i915->gt.uncore->rpm, wakeref) {
- pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
+ with_intel_runtime_pm(to_gt(i915)->uncore->rpm, wakeref) {
+ pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(to_gt(i915));
pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur =
pmu->sample[__I915_SAMPLE_RC6].cur;
pmu->sleep_last = ktime_get_raw();
@@ -222,7 +222,7 @@ static void park_rc6(struct drm_i915_private *i915)
{
struct i915_pmu *pmu = &i915->pmu;
- pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
+ pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(to_gt(i915));
pmu->sleep_last = ktime_get_raw();
}
@@ -419,7 +419,7 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
struct drm_i915_private *i915 =
container_of(hrtimer, struct drm_i915_private, pmu.timer);
struct i915_pmu *pmu = &i915->pmu;
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
unsigned int period_ns;
ktime_t now;
@@ -476,7 +476,7 @@ engine_event_status(struct intel_engine_cs *engine,
static int
config_status(struct drm_i915_private *i915, u64 config)
{
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
switch (config) {
case I915_PMU_ACTUAL_FREQUENCY:
@@ -601,10 +601,10 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
val = READ_ONCE(pmu->irq_count);
break;
case I915_PMU_RC6_RESIDENCY:
- val = get_rc6(&i915->gt);
+ val = get_rc6(to_gt(i915));
break;
case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
- val = ktime_to_ns(intel_gt_get_awake_time(&i915->gt));
+ val = ktime_to_ns(intel_gt_get_awake_time(to_gt(i915)));
break;
}
}
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 51b368be0fc4..2dfbc22857a3 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -31,7 +31,7 @@ static int copy_query_item(void *query_hdr, size_t query_sz,
static int query_topology_info(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item)
{
- const struct sseu_dev_info *sseu = &dev_priv->gt.info.sseu;
+ const struct sseu_dev_info *sseu = &to_gt(dev_priv)->info.sseu;
struct drm_i915_query_topology_info topo;
u32 slice_length, subslice_length, eu_length, total_length;
int ret;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index bcee121bec5a..c32420cb8ed5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -371,6 +371,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define VLV_G3DCTL _MMIO(0x9024)
#define VLV_GSCKGCTL _MMIO(0x9028)
+#define FBC_LLC_READ_CTRL _MMIO(0x9044)
+#define FBC_LLC_FULLY_OPEN REG_BIT(30)
+
#define GEN6_MBCTL _MMIO(0x0907c)
#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
@@ -498,6 +501,18 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ECOBITS_PPGTT_CACHE64B (3 << 8)
#define ECOBITS_PPGTT_CACHE4B (0 << 8)
+#define GEN12_GAMCNTRL_CTRL _MMIO(0xcf54)
+#define INVALIDATION_BROADCAST_MODE_DIS REG_BIT(12)
+#define GLOBAL_INVALIDATION_MODE REG_BIT(2)
+
+#define GEN12_GAMSTLB_CTRL _MMIO(0xcf4c)
+#define CONTROL_BLOCK_CLKGATE_DIS REG_BIT(12)
+#define EGRESS_BLOCK_CLKGATE_DIS REG_BIT(11)
+#define TAG_BLOCK_CLKGATE_DIS REG_BIT(7)
+
+#define GEN12_MERT_MOD_CTRL _MMIO(0xcf28)
+#define FORCE_MISS_FTLB REG_BIT(3)
+
#define GAB_CTL _MMIO(0x24000)
#define GAB_CTL_CONT_AFTER_PAGEFAULT (1 << 8)
@@ -719,6 +734,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN12_OA_TLB_INV_CR _MMIO(0xceec)
+#define GEN12_SQCM _MMIO(0x8724)
+#define EN_32B_ACCESS REG_BIT(30)
+
/* Gen12 OAR unit */
#define GEN12_OAR_OACONTROL _MMIO(0x2960)
#define GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT 1
@@ -770,6 +788,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define EU_PERF_CNTL5 _MMIO(0xe55c)
#define EU_PERF_CNTL6 _MMIO(0xe65c)
+#define RT_CTRL _MMIO(0xe530)
+#define DIS_NULL_QUERY REG_BIT(10)
+
/*
* OA Boolean state
*/
@@ -2244,6 +2265,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define SNPS_PHY_MPLLB_DP2_MODE REG_BIT(9)
#define SNPS_PHY_MPLLB_WORD_DIV2_EN REG_BIT(8)
#define SNPS_PHY_MPLLB_TX_CLK_DIV REG_GENMASK(7, 5)
+#define SNPS_PHY_MPLLB_SHIM_DIV32_CLK_SEL REG_BIT(0)
#define SNPS_PHY_MPLLB_FRACN1(phy) _MMIO_SNPS(phy, 0x168008)
#define SNPS_PHY_MPLLB_FRACN_EN REG_BIT(31)
@@ -2662,6 +2684,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */
#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
+#define GUCPMTIMESTAMP _MMIO(0xC3E8)
+
/* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */
#define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8)
#define GEN8_RING_CS_GPR_UDW(base, n) _MMIO((base) + 0x600 + (n) * 8 + 4)
@@ -2697,6 +2721,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28)
#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24)
+#define GEN8_RTCR _MMIO(0x4260)
+#define GEN8_M1TCR _MMIO(0x4264)
+#define GEN8_M2TCR _MMIO(0x4268)
+#define GEN8_BTCR _MMIO(0x426c)
+#define GEN8_VTCR _MMIO(0x4270)
+
#if 0
#define PRB0_TAIL _MMIO(0x2030)
#define PRB0_HEAD _MMIO(0x2034)
@@ -2772,6 +2802,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define VDBOX_CGCTL3F10(base) _MMIO((base) + 0x3f10)
#define IECPUNIT_CLKGATE_DIS REG_BIT(22)
+#define VDBOX_CGCTL3F18(base) _MMIO((base) + 0x3f18)
+#define ALNUNIT_CLKGATE_DIS REG_BIT(13)
+
#define ERROR_GEN6 _MMIO(0x40a0)
#define GEN7_ERR_INT _MMIO(0x44040)
#define ERR_INT_POISON (1 << 31)
@@ -2792,15 +2825,20 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define FAULT_VA_HIGH_BITS (0xf << 0)
#define FAULT_GTT_SEL (1 << 4)
+#define GEN12_GFX_TLB_INV_CR _MMIO(0xced8)
+#define GEN12_VD_TLB_INV_CR _MMIO(0xcedc)
+#define GEN12_VE_TLB_INV_CR _MMIO(0xcee0)
+#define GEN12_BLT_TLB_INV_CR _MMIO(0xcee4)
+
#define GEN12_AUX_ERR_DBG _MMIO(0x43f4)
#define FPGA_DBG _MMIO(0x42300)
-#define FPGA_DBG_RM_NOCLAIM (1 << 31)
+#define FPGA_DBG_RM_NOCLAIM REG_BIT(31)
#define CLAIM_ER _MMIO(VLV_DISPLAY_BASE + 0x2028)
-#define CLAIM_ER_CLR (1 << 31)
-#define CLAIM_ER_OVERFLOW (1 << 16)
-#define CLAIM_ER_CTR_MASK 0xffff
+#define CLAIM_ER_CLR REG_BIT(31)
+#define CLAIM_ER_OVERFLOW REG_BIT(16)
+#define CLAIM_ER_CTR_MASK REG_GENMASK(15, 0)
#define DERRMR _MMIO(0x44050)
/* Note that HBLANK events are reserved on bdw+ */
@@ -2870,6 +2908,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
#define GEN11_ENABLE_32_PLANE_MODE (1 << 7)
+#define SCCGCTL94DC _MMIO(0x94dc)
+#define CG3DDISURB REG_BIT(14)
+
+#define MLTICTXCTL _MMIO(0xb170)
+#define TDONRENDER REG_BIT(2)
+
+#define L3SQCREG1_CCS0 _MMIO(0xb200)
+#define FLUSHALLNONCOH REG_BIT(5)
+
/* WaClearTdlStateAckDirtyBits */
#define GEN8_STATE_ACK _MMIO(0x20F0)
#define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8)
@@ -3106,7 +3153,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN9_RCS_FE_FSM2 _MMIO(0x22a4)
#define GEN10_CACHE_MODE_SS _MMIO(0xe420)
-#define FLOAT_BLEND_OPTIMIZATION_ENABLE (1 << 4)
+#define ENABLE_PREFETCH_INTO_IC REG_BIT(3)
+#define FLOAT_BLEND_OPTIMIZATION_ENABLE REG_BIT(4)
/* Fuse readout registers for GT */
#define HSW_PAVP_FUSE1 _MMIO(0x911C)
@@ -3307,93 +3355,98 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */
#define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */
#define FBC_CONTROL _MMIO(0x3208)
-#define FBC_CTL_EN REG_BIT(31)
-#define FBC_CTL_PERIODIC REG_BIT(30)
-#define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16)
-#define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x))
-#define FBC_CTL_STOP_ON_MOD REG_BIT(15)
-#define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */
-#define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm */
-#define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5)
-#define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x))
-#define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0)
-#define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x))
+#define FBC_CTL_EN REG_BIT(31)
+#define FBC_CTL_PERIODIC REG_BIT(30)
+#define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16)
+#define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x))
+#define FBC_CTL_STOP_ON_MOD REG_BIT(15)
+#define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */
+#define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm only */
+#define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5)
+#define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x))
+#define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0)
+#define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x))
#define FBC_COMMAND _MMIO(0x320c)
-#define FBC_CMD_COMPRESS (1 << 0)
+#define FBC_CMD_COMPRESS REG_BIT(0)
#define FBC_STATUS _MMIO(0x3210)
-#define FBC_STAT_COMPRESSING (1 << 31)
-#define FBC_STAT_COMPRESSED (1 << 30)
-#define FBC_STAT_MODIFIED (1 << 29)
-#define FBC_STAT_CURRENT_LINE_SHIFT (0)
-#define FBC_CONTROL2 _MMIO(0x3214)
-#define FBC_CTL_FENCE_DBL (0 << 4)
-#define FBC_CTL_IDLE_IMM (0 << 2)
-#define FBC_CTL_IDLE_FULL (1 << 2)
-#define FBC_CTL_IDLE_LINE (2 << 2)
-#define FBC_CTL_IDLE_DEBUG (3 << 2)
-#define FBC_CTL_CPU_FENCE (1 << 1)
-#define FBC_CTL_PLANE(plane) ((plane) << 0)
-#define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */
-#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4)
+#define FBC_STAT_COMPRESSING REG_BIT(31)
+#define FBC_STAT_COMPRESSED REG_BIT(30)
+#define FBC_STAT_MODIFIED REG_BIT(29)
+#define FBC_STAT_CURRENT_LINE_MASK REG_GENMASK(10, 0)
+#define FBC_CONTROL2 _MMIO(0x3214) /* i965gm only */
+#define FBC_CTL_FENCE_DBL REG_BIT(4)
+#define FBC_CTL_IDLE_MASK REG_GENMASK(3, 2)
+#define FBC_CTL_IDLE_IMM REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 0)
+#define FBC_CTL_IDLE_FULL REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 1)
+#define FBC_CTL_IDLE_LINE REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 2)
+#define FBC_CTL_IDLE_DEBUG REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 3)
+#define FBC_CTL_CPU_FENCE_EN REG_BIT(1)
+#define FBC_CTL_PLANE_MASK REG_GENMASK(1, 0)
+#define FBC_CTL_PLANE(i9xx_plane) REG_FIELD_PREP(FBC_CTL_PLANE_MASK, (i9xx_plane))
+#define FBC_FENCE_OFF _MMIO(0x3218) /* i965gm only, BSpec typo has 321Bh */
+#define FBC_MOD_NUM _MMIO(0x3220) /* i965gm only */
+#define FBC_MOD_NUM_MASK REG_GENMASK(31, 1)
+#define FBC_MOD_NUM_VALID REG_BIT(0)
+#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4) /* 49 reisters */
+#define FBC_TAG_MASK REG_GENMASK(1, 0) /* 16 tags per register */
+#define FBC_TAG_MODIFIED REG_FIELD_PREP(FBC_TAG_MASK, 0)
+#define FBC_TAG_UNCOMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 1)
+#define FBC_TAG_UNCOMPRESSIBLE REG_FIELD_PREP(FBC_TAG_MASK, 2)
+#define FBC_TAG_COMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 3)
#define FBC_LL_SIZE (1536)
-#define FBC_LLC_READ_CTRL _MMIO(0x9044)
-#define FBC_LLC_FULLY_OPEN (1 << 30)
-
/* Framebuffer compression for GM45+ */
#define DPFC_CB_BASE _MMIO(0x3200)
+#define ILK_DPFC_CB_BASE _MMIO(0x43200)
#define DPFC_CONTROL _MMIO(0x3208)
-#define DPFC_CTL_EN (1 << 31)
-#define DPFC_CTL_PLANE(plane) ((plane) << 30)
-#define IVB_DPFC_CTL_PLANE(plane) ((plane) << 29)
-#define DPFC_CTL_FENCE_EN (1 << 29)
-#define IVB_DPFC_CTL_FENCE_EN (1 << 28)
-#define DPFC_CTL_PERSISTENT_MODE (1 << 25)
-#define DPFC_SR_EN (1 << 10)
-#define DPFC_CTL_LIMIT_1X (0 << 6)
-#define DPFC_CTL_LIMIT_2X (1 << 6)
-#define DPFC_CTL_LIMIT_4X (2 << 6)
+#define ILK_DPFC_CONTROL _MMIO(0x43208)
+#define DPFC_CTL_EN REG_BIT(31)
+#define DPFC_CTL_PLANE_MASK_G4X REG_BIT(30) /* g4x-snb */
+#define DPFC_CTL_PLANE_G4X(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_G4X, (i9xx_plane))
+#define DPFC_CTL_FENCE_EN_G4X REG_BIT(29) /* g4x-snb */
+#define DPFC_CTL_PLANE_MASK_IVB REG_GENMASK(30, 29) /* ivb only */
+#define DPFC_CTL_PLANE_IVB(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_IVB, (i9xx_plane))
+#define DPFC_CTL_FENCE_EN_IVB REG_BIT(28) /* ivb+ */
+#define DPFC_CTL_PERSISTENT_MODE REG_BIT(25) /* g4x-snb */
+#define DPFC_CTL_FALSE_COLOR REG_BIT(10) /* ivb+ */
+#define DPFC_CTL_SR_EN REG_BIT(10) /* g4x only */
+#define DPFC_CTL_SR_EXIT_DIS REG_BIT(9) /* g4x only */
+#define DPFC_CTL_LIMIT_MASK REG_GENMASK(7, 6)
+#define DPFC_CTL_LIMIT_1X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 0)
+#define DPFC_CTL_LIMIT_2X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 1)
+#define DPFC_CTL_LIMIT_4X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 2)
+#define DPFC_CTL_FENCENO_MASK REG_GENMASK(3, 0)
+#define DPFC_CTL_FENCENO(fence) REG_FIELD_PREP(DPFC_CTL_FENCENO_MASK, (fence))
#define DPFC_RECOMP_CTL _MMIO(0x320c)
-#define DPFC_RECOMP_STALL_EN (1 << 27)
-#define DPFC_RECOMP_STALL_WM_SHIFT (16)
-#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
-#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
-#define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f)
+#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c)
+#define DPFC_RECOMP_STALL_EN REG_BIT(27)
+#define DPFC_RECOMP_STALL_WM_MASK REG_GENMASK(26, 16)
+#define DPFC_RECOMP_TIMER_COUNT_MASK REG_GENMASK(5, 0)
#define DPFC_STATUS _MMIO(0x3210)
-#define DPFC_INVAL_SEG_SHIFT (16)
-#define DPFC_INVAL_SEG_MASK (0x07ff0000)
-#define DPFC_COMP_SEG_SHIFT (0)
-#define DPFC_COMP_SEG_MASK (0x000007ff)
+#define ILK_DPFC_STATUS _MMIO(0x43210)
+#define DPFC_INVAL_SEG_MASK REG_GENMASK(26, 16)
+#define DPFC_COMP_SEG_MASK REG_GENMASK(10, 0)
#define DPFC_STATUS2 _MMIO(0x3214)
+#define ILK_DPFC_STATUS2 _MMIO(0x43214)
+#define DPFC_COMP_SEG_MASK_IVB REG_GENMASK(11, 0)
#define DPFC_FENCE_YOFF _MMIO(0x3218)
-#define DPFC_CHICKEN _MMIO(0x3224)
-#define DPFC_HT_MODIFY (1 << 31)
-
-/* Framebuffer compression for Ironlake */
-#define ILK_DPFC_CB_BASE _MMIO(0x43200)
-#define ILK_DPFC_CONTROL _MMIO(0x43208)
-#define FBC_CTL_FALSE_COLOR (1 << 10)
-/* The bit 28-8 is reserved */
-#define DPFC_RESERVED (0x1FFFFF00)
-#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c)
-#define ILK_DPFC_STATUS _MMIO(0x43210)
-#define ILK_DPFC_COMP_SEG_MASK 0x7ff
-#define IVB_FBC_STATUS2 _MMIO(0x43214)
-#define IVB_FBC_COMP_SEG_MASK 0x7ff
-#define BDW_FBC_COMP_SEG_MASK 0xfff
#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218)
+#define DPFC_CHICKEN _MMIO(0x3224)
#define ILK_DPFC_CHICKEN _MMIO(0x43224)
-#define ILK_DPFC_DISABLE_DUMMY0 (1 << 8)
-#define ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL (1 << 14)
-#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1 << 23)
+#define DPFC_HT_MODIFY REG_BIT(31) /* pre-ivb */
+#define DPFC_NUKE_ON_ANY_MODIFICATION REG_BIT(23) /* bdw+ */
+#define DPFC_CHICKEN_COMP_DUMMY_PIXEL REG_BIT(14) /* glk+ */
+#define DPFC_DISABLE_DUMMY0 REG_BIT(8) /* ivb+ */
+
#define GLK_FBC_STRIDE _MMIO(0x43228)
#define FBC_STRIDE_OVERRIDE REG_BIT(15)
#define FBC_STRIDE_MASK REG_GENMASK(14, 0)
#define FBC_STRIDE(x) REG_FIELD_PREP(FBC_STRIDE_MASK, (x))
+
#define ILK_FBC_RT_BASE _MMIO(0x2128)
-#define ILK_FBC_RT_VALID (1 << 0)
-#define SNB_FBC_FRONT_BUFFER (1 << 1)
+#define ILK_FBC_RT_VALID REG_BIT(0)
+#define SNB_FBC_FRONT_BUFFER REG_BIT(1)
#define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000)
#define ILK_FBCQ_DIS (1 << 22)
@@ -3417,8 +3470,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
* The following two registers are of type GTTMMADR
*/
#define SNB_DPFC_CTL_SA _MMIO(0x100100)
-#define SNB_CPU_FENCE_ENABLE (1 << 29)
-#define DPFC_CPU_FENCE_OFFSET _MMIO(0x100104)
+#define SNB_DPFC_FENCE_EN REG_BIT(29)
+#define SNB_DPFC_FENCENO_MASK REG_GENMASK(4, 0)
+#define SNB_DPFC_FENCENO(fence) REG_FIELD_PREP(SNB_DPFC_FENCENO_MASK, (fence))
+#define SNB_DPFC_CPU_FENCE_OFFSET _MMIO(0x100104)
/* Framebuffer compression for Ivybridge */
#define IVB_FBC_RT_BASE _MMIO(0x7020)
@@ -3428,8 +3483,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define IPS_ENABLE (1 << 31)
#define MSG_FBC_REND_STATE _MMIO(0x50380)
-#define FBC_REND_NUKE (1 << 2)
-#define FBC_REND_CACHE_CLEAN (1 << 1)
+#define FBC_REND_NUKE REG_BIT(2)
+#define FBC_REND_CACHE_CLEAN REG_BIT(1)
/*
* GPIO regs
@@ -4278,21 +4333,62 @@ enum {
/*
* GEN10 clock gating regs
*/
+
+#define UNSLCGCTL9440 _MMIO(0x9440)
+#define GAMTLBOACS_CLKGATE_DIS REG_BIT(28)
+#define GAMTLBVDBOX5_CLKGATE_DIS REG_BIT(27)
+#define GAMTLBVDBOX6_CLKGATE_DIS REG_BIT(26)
+#define GAMTLBVDBOX3_CLKGATE_DIS REG_BIT(24)
+#define GAMTLBVDBOX4_CLKGATE_DIS REG_BIT(23)
+#define GAMTLBVDBOX7_CLKGATE_DIS REG_BIT(22)
+#define GAMTLBVDBOX2_CLKGATE_DIS REG_BIT(21)
+#define GAMTLBVDBOX0_CLKGATE_DIS REG_BIT(17)
+#define GAMTLBKCR_CLKGATE_DIS REG_BIT(16)
+#define GAMTLBGUC_CLKGATE_DIS REG_BIT(15)
+#define GAMTLBBLT_CLKGATE_DIS REG_BIT(14)
+#define GAMTLBVDBOX1_CLKGATE_DIS REG_BIT(6)
+
+#define UNSLCGCTL9444 _MMIO(0x9444)
+#define GAMTLBGFXA0_CLKGATE_DIS REG_BIT(30)
+#define GAMTLBGFXA1_CLKGATE_DIS REG_BIT(29)
+#define GAMTLBCOMPA0_CLKGATE_DIS REG_BIT(28)
+#define GAMTLBCOMPA1_CLKGATE_DIS REG_BIT(27)
+#define GAMTLBCOMPB0_CLKGATE_DIS REG_BIT(26)
+#define GAMTLBCOMPB1_CLKGATE_DIS REG_BIT(25)
+#define GAMTLBCOMPC0_CLKGATE_DIS REG_BIT(24)
+#define GAMTLBCOMPC1_CLKGATE_DIS REG_BIT(23)
+#define GAMTLBCOMPD0_CLKGATE_DIS REG_BIT(22)
+#define GAMTLBCOMPD1_CLKGATE_DIS REG_BIT(21)
+#define GAMTLBMERT_CLKGATE_DIS REG_BIT(20)
+#define GAMTLBVEBOX3_CLKGATE_DIS REG_BIT(19)
+#define GAMTLBVEBOX2_CLKGATE_DIS REG_BIT(18)
+#define GAMTLBVEBOX1_CLKGATE_DIS REG_BIT(17)
+#define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16)
+#define LTCDD_CLKGATE_DIS REG_BIT(10)
+
#define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4)
#define SARBUNIT_CLKGATE_DIS (1 << 5)
#define RCCUNIT_CLKGATE_DIS (1 << 7)
#define MSCUNIT_CLKGATE_DIS (1 << 10)
+#define NODEDSS_CLKGATE_DIS REG_BIT(12)
#define L3_CLKGATE_DIS REG_BIT(16)
#define L3_CR2X_CLKGATE_DIS REG_BIT(17)
#define SUBSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9524)
-#define GWUNIT_CLKGATE_DIS (1 << 16)
+#define DSS_ROUTER_CLKGATE_DIS REG_BIT(28)
+#define GWUNIT_CLKGATE_DIS REG_BIT(16)
#define SUBSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x9528)
#define CPSSUNIT_CLKGATE_DIS REG_BIT(9)
+#define SSMCGCTL9530 _MMIO(0x9530)
+#define RTFUNIT_CLKGATE_DIS REG_BIT(18)
+
#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
#define VFUNIT_CLKGATE_DIS REG_BIT(20)
+#define TSGUNIT_CLKGATE_DIS REG_BIT(17) /* XEHPSDV */
+#define CG3DDISCFEG_CLKGATE_DIS REG_BIT(17) /* DG2 */
+#define GAMEDIA_CLKGATE_DIS REG_BIT(11)
#define HSUNIT_CLKGATE_DIS REG_BIT(8)
#define VSUNIT_CLKGATE_DIS REG_BIT(3)
@@ -4309,47 +4405,52 @@ enum {
/* Pipe A CRC regs */
#define _PIPE_CRC_CTL_A 0x60050
-#define PIPE_CRC_ENABLE (1 << 31)
+#define PIPE_CRC_ENABLE REG_BIT(31)
/* skl+ source selection */
-#define PIPE_CRC_SOURCE_PLANE_1_SKL (0 << 28)
-#define PIPE_CRC_SOURCE_PLANE_2_SKL (2 << 28)
-#define PIPE_CRC_SOURCE_DMUX_SKL (4 << 28)
-#define PIPE_CRC_SOURCE_PLANE_3_SKL (6 << 28)
-#define PIPE_CRC_SOURCE_PLANE_4_SKL (7 << 28)
-#define PIPE_CRC_SOURCE_PLANE_5_SKL (5 << 28)
-#define PIPE_CRC_SOURCE_PLANE_6_SKL (3 << 28)
-#define PIPE_CRC_SOURCE_PLANE_7_SKL (1 << 28)
+#define PIPE_CRC_SOURCE_MASK_SKL REG_GENMASK(30, 28)
+#define PIPE_CRC_SOURCE_PLANE_1_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 0)
+#define PIPE_CRC_SOURCE_PLANE_2_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 2)
+#define PIPE_CRC_SOURCE_DMUX_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 4)
+#define PIPE_CRC_SOURCE_PLANE_3_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 6)
+#define PIPE_CRC_SOURCE_PLANE_4_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 7)
+#define PIPE_CRC_SOURCE_PLANE_5_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 5)
+#define PIPE_CRC_SOURCE_PLANE_6_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 3)
+#define PIPE_CRC_SOURCE_PLANE_7_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 1)
/* ivb+ source selection */
-#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29)
-#define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29)
-#define PIPE_CRC_SOURCE_PF_IVB (2 << 29)
+#define PIPE_CRC_SOURCE_MASK_IVB REG_GENMASK(30, 29)
+#define PIPE_CRC_SOURCE_PRIMARY_IVB REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_IVB, 0)
+#define PIPE_CRC_SOURCE_SPRITE_IVB REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_IVB, 1)
+#define PIPE_CRC_SOURCE_PF_IVB REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_IVB, 2)
/* ilk+ source selection */
-#define PIPE_CRC_SOURCE_PRIMARY_ILK (0 << 28)
-#define PIPE_CRC_SOURCE_SPRITE_ILK (1 << 28)
-#define PIPE_CRC_SOURCE_PIPE_ILK (2 << 28)
-/* embedded DP port on the north display block, reserved on ivb */
-#define PIPE_CRC_SOURCE_PORT_A_ILK (4 << 28)
-#define PIPE_CRC_SOURCE_FDI_ILK (5 << 28) /* reserved on ivb */
+#define PIPE_CRC_SOURCE_MASK_ILK REG_GENMASK(30, 28)
+#define PIPE_CRC_SOURCE_PRIMARY_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 0)
+#define PIPE_CRC_SOURCE_SPRITE_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 1)
+#define PIPE_CRC_SOURCE_PIPE_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 2)
+/* embedded DP port on the north display block */
+#define PIPE_CRC_SOURCE_PORT_A_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 4)
+#define PIPE_CRC_SOURCE_FDI_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 5)
/* vlv source selection */
-#define PIPE_CRC_SOURCE_PIPE_VLV (0 << 27)
-#define PIPE_CRC_SOURCE_HDMIB_VLV (1 << 27)
-#define PIPE_CRC_SOURCE_HDMIC_VLV (2 << 27)
+#define PIPE_CRC_SOURCE_MASK_VLV REG_GENMASK(30, 27)
+#define PIPE_CRC_SOURCE_PIPE_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 0)
+#define PIPE_CRC_SOURCE_HDMIB_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 1)
+#define PIPE_CRC_SOURCE_HDMIC_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 2)
/* with DP port the pipe source is invalid */
-#define PIPE_CRC_SOURCE_DP_D_VLV (3 << 27)
-#define PIPE_CRC_SOURCE_DP_B_VLV (6 << 27)
-#define PIPE_CRC_SOURCE_DP_C_VLV (7 << 27)
+#define PIPE_CRC_SOURCE_DP_D_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 3)
+#define PIPE_CRC_SOURCE_DP_B_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 6)
+#define PIPE_CRC_SOURCE_DP_C_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 7)
/* gen3+ source selection */
-#define PIPE_CRC_SOURCE_PIPE_I9XX (0 << 28)
-#define PIPE_CRC_SOURCE_SDVOB_I9XX (1 << 28)
-#define PIPE_CRC_SOURCE_SDVOC_I9XX (2 << 28)
+#define PIPE_CRC_SOURCE_MASK_I9XX REG_GENMASK(30, 28)
+#define PIPE_CRC_SOURCE_PIPE_I9XX REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 0)
+#define PIPE_CRC_SOURCE_SDVOB_I9XX REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 1)
+#define PIPE_CRC_SOURCE_SDVOC_I9XX REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 2)
/* with DP/TV port the pipe source is invalid */
-#define PIPE_CRC_SOURCE_DP_D_G4X (3 << 28)
-#define PIPE_CRC_SOURCE_TV_PRE (4 << 28)
-#define PIPE_CRC_SOURCE_TV_POST (5 << 28)
-#define PIPE_CRC_SOURCE_DP_B_G4X (6 << 28)
-#define PIPE_CRC_SOURCE_DP_C_G4X (7 << 28)
+#define PIPE_CRC_SOURCE_DP_D_G4X REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 3)
+#define PIPE_CRC_SOURCE_TV_PRE REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 4)
+#define PIPE_CRC_SOURCE_TV_POST REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 5)
+#define PIPE_CRC_SOURCE_DP_B_G4X REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 6)
+#define PIPE_CRC_SOURCE_DP_C_G4X REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 7)
/* gen2 doesn't have source selection bits */
-#define PIPE_CRC_INCLUDE_BORDER_I8XX (1 << 30)
+#define PIPE_CRC_INCLUDE_BORDER_I8XX REG_BIT(30)
#define _PIPE_CRC_RES_1_A_IVB 0x60064
#define _PIPE_CRC_RES_2_A_IVB 0x60068
@@ -4698,11 +4799,11 @@ enum {
#define PSR_EVENT_LPSP_MODE_EXIT (1 << 1)
#define PSR_EVENT_PSR_DISABLE (1 << 0)
-#define _PSR2_STATUS_A 0x60940
-#define _PSR2_STATUS_EDP 0x6f940
-#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A)
-#define EDP_PSR2_STATUS_STATE_MASK (0xf << 28)
-#define EDP_PSR2_STATUS_STATE_SHIFT 28
+#define _PSR2_STATUS_A 0x60940
+#define _PSR2_STATUS_EDP 0x6f940
+#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A)
+#define EDP_PSR2_STATUS_STATE_MASK REG_GENMASK(31, 28)
+#define EDP_PSR2_STATUS_STATE_DEEP_SLEEP REG_FIELD_PREP(EDP_PSR2_STATUS_STATE_MASK, 0x8)
#define _PSR2_SU_STATUS_A 0x60914
#define _PSR2_SU_STATUS_EDP 0x6f914
@@ -4999,9 +5100,9 @@ enum {
#define PORT_DFT2_G4X _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61154)
#define DC_BALANCE_RESET_VLV (1 << 31)
#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0))
-#define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */
-#define PIPE_B_SCRAMBLE_RESET (1 << 1)
-#define PIPE_A_SCRAMBLE_RESET (1 << 0)
+#define PIPE_C_SCRAMBLE_RESET REG_BIT(14) /* chv */
+#define PIPE_B_SCRAMBLE_RESET REG_BIT(1)
+#define PIPE_A_SCRAMBLE_RESET REG_BIT(0)
/* Gen 3 SDVO bits: */
#define SDVO_ENABLE (1 << 31)
@@ -6266,55 +6367,55 @@ enum {
#define PIPE_STATUS_PORT_UNDERRUN_XELPD REG_BIT(26)
#define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028)
-#define PIPEB_LINE_COMPARE_INT_EN (1 << 29)
-#define PIPEB_HLINE_INT_EN (1 << 28)
-#define PIPEB_VBLANK_INT_EN (1 << 27)
-#define SPRITED_FLIP_DONE_INT_EN (1 << 26)
-#define SPRITEC_FLIP_DONE_INT_EN (1 << 25)
-#define PLANEB_FLIP_DONE_INT_EN (1 << 24)
-#define PIPE_PSR_INT_EN (1 << 22)
-#define PIPEA_LINE_COMPARE_INT_EN (1 << 21)
-#define PIPEA_HLINE_INT_EN (1 << 20)
-#define PIPEA_VBLANK_INT_EN (1 << 19)
-#define SPRITEB_FLIP_DONE_INT_EN (1 << 18)
-#define SPRITEA_FLIP_DONE_INT_EN (1 << 17)
-#define PLANEA_FLIPDONE_INT_EN (1 << 16)
-#define PIPEC_LINE_COMPARE_INT_EN (1 << 13)
-#define PIPEC_HLINE_INT_EN (1 << 12)
-#define PIPEC_VBLANK_INT_EN (1 << 11)
-#define SPRITEF_FLIPDONE_INT_EN (1 << 10)
-#define SPRITEE_FLIPDONE_INT_EN (1 << 9)
-#define PLANEC_FLIPDONE_INT_EN (1 << 8)
+#define PIPEB_LINE_COMPARE_INT_EN REG_BIT(29)
+#define PIPEB_HLINE_INT_EN REG_BIT(28)
+#define PIPEB_VBLANK_INT_EN REG_BIT(27)
+#define SPRITED_FLIP_DONE_INT_EN REG_BIT(26)
+#define SPRITEC_FLIP_DONE_INT_EN REG_BIT(25)
+#define PLANEB_FLIP_DONE_INT_EN REG_BIT(24)
+#define PIPE_PSR_INT_EN REG_BIT(22)
+#define PIPEA_LINE_COMPARE_INT_EN REG_BIT(21)
+#define PIPEA_HLINE_INT_EN REG_BIT(20)
+#define PIPEA_VBLANK_INT_EN REG_BIT(19)
+#define SPRITEB_FLIP_DONE_INT_EN REG_BIT(18)
+#define SPRITEA_FLIP_DONE_INT_EN REG_BIT(17)
+#define PLANEA_FLIPDONE_INT_EN REG_BIT(16)
+#define PIPEC_LINE_COMPARE_INT_EN REG_BIT(13)
+#define PIPEC_HLINE_INT_EN REG_BIT(12)
+#define PIPEC_VBLANK_INT_EN REG_BIT(11)
+#define SPRITEF_FLIPDONE_INT_EN REG_BIT(10)
+#define SPRITEE_FLIPDONE_INT_EN REG_BIT(9)
+#define PLANEC_FLIPDONE_INT_EN REG_BIT(8)
#define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
-#define SPRITEF_INVALID_GTT_INT_EN (1 << 27)
-#define SPRITEE_INVALID_GTT_INT_EN (1 << 26)
-#define PLANEC_INVALID_GTT_INT_EN (1 << 25)
-#define CURSORC_INVALID_GTT_INT_EN (1 << 24)
-#define CURSORB_INVALID_GTT_INT_EN (1 << 23)
-#define CURSORA_INVALID_GTT_INT_EN (1 << 22)
-#define SPRITED_INVALID_GTT_INT_EN (1 << 21)
-#define SPRITEC_INVALID_GTT_INT_EN (1 << 20)
-#define PLANEB_INVALID_GTT_INT_EN (1 << 19)
-#define SPRITEB_INVALID_GTT_INT_EN (1 << 18)
-#define SPRITEA_INVALID_GTT_INT_EN (1 << 17)
-#define PLANEA_INVALID_GTT_INT_EN (1 << 16)
-#define DPINVGTT_EN_MASK 0xff0000
-#define DPINVGTT_EN_MASK_CHV 0xfff0000
-#define SPRITEF_INVALID_GTT_STATUS (1 << 11)
-#define SPRITEE_INVALID_GTT_STATUS (1 << 10)
-#define PLANEC_INVALID_GTT_STATUS (1 << 9)
-#define CURSORC_INVALID_GTT_STATUS (1 << 8)
-#define CURSORB_INVALID_GTT_STATUS (1 << 7)
-#define CURSORA_INVALID_GTT_STATUS (1 << 6)
-#define SPRITED_INVALID_GTT_STATUS (1 << 5)
-#define SPRITEC_INVALID_GTT_STATUS (1 << 4)
-#define PLANEB_INVALID_GTT_STATUS (1 << 3)
-#define SPRITEB_INVALID_GTT_STATUS (1 << 2)
-#define SPRITEA_INVALID_GTT_STATUS (1 << 1)
-#define PLANEA_INVALID_GTT_STATUS (1 << 0)
-#define DPINVGTT_STATUS_MASK 0xff
-#define DPINVGTT_STATUS_MASK_CHV 0xfff
+#define DPINVGTT_EN_MASK_CHV REG_GENMASK(27, 16)
+#define DPINVGTT_EN_MASK_VLV REG_GENMASK(23, 16)
+#define SPRITEF_INVALID_GTT_INT_EN REG_BIT(27)
+#define SPRITEE_INVALID_GTT_INT_EN REG_BIT(26)
+#define PLANEC_INVALID_GTT_INT_EN REG_BIT(25)
+#define CURSORC_INVALID_GTT_INT_EN REG_BIT(24)
+#define CURSORB_INVALID_GTT_INT_EN REG_BIT(23)
+#define CURSORA_INVALID_GTT_INT_EN REG_BIT(22)
+#define SPRITED_INVALID_GTT_INT_EN REG_BIT(21)
+#define SPRITEC_INVALID_GTT_INT_EN REG_BIT(20)
+#define PLANEB_INVALID_GTT_INT_EN REG_BIT(19)
+#define SPRITEB_INVALID_GTT_INT_EN REG_BIT(18)
+#define SPRITEA_INVALID_GTT_INT_EN REG_BIT(17)
+#define PLANEA_INVALID_GTT_INT_EN REG_BIT(16)
+#define DPINVGTT_STATUS_MASK_CHV REG_GENMASK(11, 0)
+#define DPINVGTT_STATUS_MASK_VLV REG_GENMASK(7, 0)
+#define SPRITEF_INVALID_GTT_STATUS REG_BIT(11)
+#define SPRITEE_INVALID_GTT_STATUS REG_BIT(10)
+#define PLANEC_INVALID_GTT_STATUS REG_BIT(9)
+#define CURSORC_INVALID_GTT_STATUS REG_BIT(8)
+#define CURSORB_INVALID_GTT_STATUS REG_BIT(7)
+#define CURSORA_INVALID_GTT_STATUS REG_BIT(6)
+#define SPRITED_INVALID_GTT_STATUS REG_BIT(5)
+#define SPRITEC_INVALID_GTT_STATUS REG_BIT(4)
+#define PLANEB_INVALID_GTT_STATUS REG_BIT(3)
+#define SPRITEB_INVALID_GTT_STATUS REG_BIT(2)
+#define SPRITEA_INVALID_GTT_STATUS REG_BIT(1)
+#define PLANEA_INVALID_GTT_STATUS REG_BIT(0)
#define DSPARB _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70030)
#define DSPARB_CSTART_MASK (0x7f << 7)
@@ -6877,7 +6978,7 @@ enum {
#define DVS_SOURCE_KEY (1 << 22)
#define DVS_RGB_ORDER_XBGR (1 << 20)
#define DVS_YUV_FORMAT_BT709 (1 << 18)
-#define DVS_YUV_BYTE_ORDER_MASK (3 << 16)
+#define DVS_YUV_ORDER_MASK (3 << 16)
#define DVS_YUV_ORDER_YUYV (0 << 16)
#define DVS_YUV_ORDER_UYVY (1 << 16)
#define DVS_YUV_ORDER_YVYU (2 << 16)
@@ -6956,7 +7057,7 @@ enum {
#define SPRITE_RGB_ORDER_RGBX (1 << 20) /* only for 888 and 161616 */
#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1 << 19)
#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) /* 0 is BT601 */
-#define SPRITE_YUV_BYTE_ORDER_MASK (3 << 16)
+#define SPRITE_YUV_ORDER_MASK (3 << 16)
#define SPRITE_YUV_ORDER_YUYV (0 << 16)
#define SPRITE_YUV_ORDER_UYVY (1 << 16)
#define SPRITE_YUV_ORDER_YVYU (2 << 16)
@@ -7041,7 +7142,7 @@ enum {
#define SP_ALPHA_PREMULTIPLY (1 << 23) /* CHV pipe B */
#define SP_SOURCE_KEY (1 << 22)
#define SP_YUV_FORMAT_BT709 (1 << 18)
-#define SP_YUV_BYTE_ORDER_MASK (3 << 16)
+#define SP_YUV_ORDER_MASK (3 << 16)
#define SP_YUV_ORDER_YUYV (0 << 16)
#define SP_YUV_ORDER_UYVY (1 << 16)
#define SP_YUV_ORDER_YVYU (2 << 16)
@@ -7182,10 +7283,10 @@ enum {
#define PLANE_CTL_YUV420_Y_PLANE (1 << 19)
#define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18)
#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16)
-#define PLANE_CTL_YUV422_YUYV (0 << 16)
-#define PLANE_CTL_YUV422_UYVY (1 << 16)
-#define PLANE_CTL_YUV422_YVYU (2 << 16)
-#define PLANE_CTL_YUV422_VYUY (3 << 16)
+#define PLANE_CTL_YUV422_ORDER_YUYV (0 << 16)
+#define PLANE_CTL_YUV422_ORDER_UYVY (1 << 16)
+#define PLANE_CTL_YUV422_ORDER_YVYU (2 << 16)
+#define PLANE_CTL_YUV422_ORDER_VYUY (3 << 16)
#define PLANE_CTL_RENDER_DECOMPRESSION_ENABLE (1 << 15)
#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
#define PLANE_CTL_CLEAR_COLOR_DISABLE (1 << 13) /* TGL+ */
@@ -7239,10 +7340,10 @@ enum {
#define _PLANE_CUS_CTL_1_A 0x701c8
#define _PLANE_CUS_CTL_2_A 0x702c8
#define PLANE_CUS_ENABLE (1 << 31)
-#define PLANE_CUS_PLANE_4_RKL (0 << 30)
-#define PLANE_CUS_PLANE_5_RKL (1 << 30)
-#define PLANE_CUS_PLANE_6 (0 << 30)
-#define PLANE_CUS_PLANE_7 (1 << 30)
+#define PLANE_CUS_Y_PLANE_4_RKL (0 << 30)
+#define PLANE_CUS_Y_PLANE_5_RKL (1 << 30)
+#define PLANE_CUS_Y_PLANE_6_ICL (0 << 30)
+#define PLANE_CUS_Y_PLANE_7_ICL (1 << 30)
#define PLANE_CUS_HPHASE_SIGN_NEGATIVE (1 << 19)
#define PLANE_CUS_HPHASE_0 (0 << 16)
#define PLANE_CUS_HPHASE_0_25 (1 << 16)
@@ -7274,12 +7375,12 @@ enum {
#define _PLANE_NV12_BUF_CFG_1_A 0x70278
#define _PLANE_NV12_BUF_CFG_2_A 0x70378
-#define _PLANE_CC_VAL_1_B 0x711b4
-#define _PLANE_CC_VAL_2_B 0x712b4
-#define _PLANE_CC_VAL_1(pipe) _PIPE(pipe, _PLANE_CC_VAL_1_A, _PLANE_CC_VAL_1_B)
-#define _PLANE_CC_VAL_2(pipe) _PIPE(pipe, _PLANE_CC_VAL_2_A, _PLANE_CC_VAL_2_B)
-#define PLANE_CC_VAL(pipe, plane) \
- _MMIO_PLANE(plane, _PLANE_CC_VAL_1(pipe), _PLANE_CC_VAL_2(pipe))
+#define _PLANE_CC_VAL_1_B 0x711b4
+#define _PLANE_CC_VAL_2_B 0x712b4
+#define _PLANE_CC_VAL_1(pipe, dw) (_PIPE(pipe, _PLANE_CC_VAL_1_A, _PLANE_CC_VAL_1_B) + (dw) * 4)
+#define _PLANE_CC_VAL_2(pipe, dw) (_PIPE(pipe, _PLANE_CC_VAL_2_A, _PLANE_CC_VAL_2_B) + (dw) * 4)
+#define PLANE_CC_VAL(pipe, plane, dw) \
+ _MMIO_PLANE((plane), _PLANE_CC_VAL_1((pipe), (dw)), _PLANE_CC_VAL_2((pipe), (dw)))
/* Input CSC Register Definitions */
#define _PLANE_INPUT_CSC_RY_GY_1_A 0x701E0
@@ -8263,7 +8364,7 @@ enum {
/*
* The below are numbered starting from "S1" on gen11/gen12, but starting
- * with gen13 display, the bspec switches to a 0-based numbering scheme
+ * with display 13, the bspec switches to a 0-based numbering scheme
* (although the addresses stay the same so new S0 = old S1, new S1 = old S2).
* We'll just use the 0-based numbering here for all platforms since it's the
* way things will be named by the hardware team going forward, plus it's more
@@ -8308,9 +8409,10 @@ enum {
#define RESET_PCH_HANDSHAKE_ENABLE (1 << 4)
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
-#define SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30)
-#define ICL_DELAY_PMRSP (1 << 22)
-#define MASK_WAKEMEM (1 << 13)
+#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30)
+#define ICL_DELAY_PMRSP REG_BIT(22)
+#define DISABLE_FLR_SRC REG_BIT(15)
+#define MASK_WAKEMEM REG_BIT(13)
#define GEN11_CHICKEN_DCPR_2 _MMIO(0x46434)
#define DCPR_MASK_MAXLATENCY_MEMUP_CLR REG_BIT(27)
@@ -8351,6 +8453,9 @@ enum {
#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
#define GEN12_DISABLE_POSH_BUSY_FF_DOP_CG REG_BIT(11)
+#define GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON _MMIO(0x20EC)
+#define GEN12_REPLAY_MODE_GRANULARITY REG_BIT(0)
+
#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1 << 0)
#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1))
@@ -8374,9 +8479,10 @@ enum {
#define GEN8_ERRDETBCTRL (1 << 9)
#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
- #define DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN REG_BIT(12)
- #define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC REG_BIT(11)
- #define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE REG_BIT(9)
+#define DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN REG_BIT(12)
+#define XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE REG_BIT(12)
+#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC REG_BIT(11)
+#define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE REG_BIT(9)
#define HIZ_CHICKEN _MMIO(0x7018)
# define CHV_HZ_8X8_MODE_IN_1X REG_BIT(15)
@@ -8430,6 +8536,12 @@ enum {
#define GEN8_LQSC_FLUSH_COHERENT_LINES (1 << 21)
#define GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(22)
+#define GEN11_L3SQCREG5 _MMIO(0xb158)
+#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0)
+
+#define XEHP_L3SCQREG7 _MMIO(0xb188)
+#define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3)
+
/* GEN8 chicken */
#define HDC_CHICKEN0 _MMIO(0x7300)
#define ICL_HDC_MODE _MMIO(0xE5F4)
@@ -8440,6 +8552,12 @@ enum {
#define HDC_FORCE_NON_COHERENT (1 << 4)
#define HDC_BARRIER_PERFORMANCE_DISABLE (1 << 10)
+#define GEN12_HDC_CHICKEN0 _MMIO(0xE5F0)
+#define LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK REG_GENMASK(13, 11)
+
+#define SARB_CHICKEN1 _MMIO(0xe90c)
+#define COMP_CKN_IN REG_GENMASK(30, 29)
+
#define GEN8_HDC_CHICKEN1 _MMIO(0x7304)
/* GEN9 chicken */
@@ -8467,8 +8585,13 @@ enum {
_PIPEB_CHICKEN)
#define UNDERRUN_RECOVERY_DISABLE_ADLP REG_BIT(30)
#define UNDERRUN_RECOVERY_ENABLE_DG2 REG_BIT(30)
-#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU (1 << 15)
-#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7)
+#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU REG_BIT(15)
+#define DG2_RENDER_CCSTAG_4_3_EN REG_BIT(12)
+#define PER_PIXEL_ALPHA_BYPASS_EN REG_BIT(7)
+
+#define VFLSKPD _MMIO(0x62a8)
+#define DIS_OVER_FETCH_CACHE REG_BIT(1)
+#define DIS_MULT_MISS_RD_SQUASH REG_BIT(0)
#define FF_MODE2 _MMIO(0x6604)
#define FF_MODE2_GS_TIMER_MASK REG_GENMASK(31, 24)
@@ -9293,6 +9416,9 @@ enum {
#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1 << 14)
#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1 << 28)
+#define UNSLCGCTL9430 _MMIO(0x9430)
+#define MSQDUNIT_CLKGATE_DIS REG_BIT(3)
+
#define GEN6_GFXPAUSE _MMIO(0xA000)
#define GEN6_RPNSWREQ _MMIO(0xA008)
#define GEN6_TURBO_DISABLE (1 << 31)
@@ -9302,6 +9428,7 @@ enum {
#define GEN6_OFFSET(x) ((x) << 19)
#define GEN6_AGGRESSIVE_TURBO (0 << 15)
#define GEN9_SW_REQ_UNSLICE_RATIO_SHIFT 23
+#define GEN9_IGNORE_SLICE_RATIO (0 << 0)
#define GEN6_RC_VIDEO_FREQ _MMIO(0xA00C)
#define GEN6_RC_CONTROL _MMIO(0xA090)
@@ -9337,6 +9464,9 @@ enum {
#define GEN6_RP_UP_BUSY_CONT (0x4 << 3)
#define GEN6_RP_DOWN_IDLE_AVG (0x2 << 0)
#define GEN6_RP_DOWN_IDLE_CONT (0x1 << 0)
+#define GEN6_RPSWCTL_SHIFT 9
+#define GEN9_RPSWCTL_ENABLE (0x2 << GEN6_RPSWCTL_SHIFT)
+#define GEN9_RPSWCTL_DISABLE (0x0 << GEN6_RPSWCTL_SHIFT)
#define GEN6_RP_UP_THRESHOLD _MMIO(0xA02C)
#define GEN6_RP_DOWN_THRESHOLD _MMIO(0xA030)
#define GEN6_RP_CUR_UP_EI _MMIO(0xA050)
@@ -9608,24 +9738,39 @@ enum {
#define GEN9_CCS_TLB_PREFETCH_ENABLE (1 << 3)
#define GEN8_ROW_CHICKEN _MMIO(0xe4f0)
-#define FLOW_CONTROL_ENABLE (1 << 15)
-#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1 << 8)
-#define STALL_DOP_GATING_DISABLE (1 << 5)
-#define THROTTLE_12_5 (7 << 2)
-#define DISABLE_EARLY_EOT (1 << 1)
+#define FLOW_CONTROL_ENABLE REG_BIT(15)
+#define UGM_BACKUP_MODE REG_BIT(13)
+#define MDQ_ARBITRATION_MODE REG_BIT(12)
+#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE REG_BIT(8)
+#define STALL_DOP_GATING_DISABLE REG_BIT(5)
+#define THROTTLE_12_5 REG_GENMASK(4, 2)
+#define DISABLE_EARLY_EOT REG_BIT(1)
#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
+#define GEN12_DISABLE_READ_SUPPRESSION REG_BIT(15)
#define GEN12_DISABLE_EARLY_READ REG_BIT(14)
+#define GEN12_ENABLE_LARGE_GRF_MODE REG_BIT(12)
#define GEN12_PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8)
+#define LSC_CHICKEN_BIT_0 _MMIO(0xe7c8)
+#define FORCE_1_SUB_MESSAGE_PER_FRAGMENT REG_BIT(15)
+#define LSC_CHICKEN_BIT_0_UDW _MMIO(0xe7c8 + 4)
+#define DIS_CHAIN_2XSIMD8 REG_BIT(55 - 32)
+#define FORCE_SLM_FENCE_SCOPE_TO_TILE REG_BIT(42 - 32)
+#define FORCE_UGM_FENCE_SCOPE_TO_TILE REG_BIT(41 - 32)
+#define MAXREQS_PER_BANK REG_GENMASK(39 - 32, 37 - 32)
+#define DISABLE_128B_EVICTION_COMMAND_UDW REG_BIT(36 - 32)
+
#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
#define DOP_CLOCK_GATING_DISABLE (1 << 0)
#define PUSH_CONSTANT_DEREF_DISABLE (1 << 8)
#define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1)
-#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c)
-#define GEN12_DISABLE_TDL_PUSH REG_BIT(9)
-#define GEN11_DIS_PICK_2ND_EU REG_BIT(7)
+#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c)
+#define GEN12_DISABLE_GRF_CLEAR REG_BIT(13)
+#define GEN12_DISABLE_TDL_PUSH REG_BIT(9)
+#define GEN11_DIS_PICK_2ND_EU REG_BIT(7)
+#define GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX REG_BIT(4)
#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
@@ -9640,9 +9785,10 @@ enum {
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1 << 1)
#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
-#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR (1 << 8)
-#define GEN9_ENABLE_YV12_BUGFIX (1 << 4)
-#define GEN9_ENABLE_GPGPU_PREEMPTION (1 << 2)
+#define DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA REG_BIT(15)
+#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR REG_BIT(8)
+#define GEN9_ENABLE_YV12_BUGFIX REG_BIT(4)
+#define GEN9_ENABLE_GPGPU_PREEMPTION REG_BIT(2)
/* Audio */
#define G4X_AUD_VID_DID _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x62020)
@@ -9781,6 +9927,10 @@ enum {
#define AUD_PIN_BUF_CTL _MMIO(0x48414)
#define AUD_PIN_BUF_ENABLE REG_BIT(31)
+#define AUD_TS_CDCLK_M _MMIO(0x65ea0)
+#define AUD_TS_CDCLK_M_EN REG_BIT(31)
+#define AUD_TS_CDCLK_N _MMIO(0x65ea4)
+
/* Display Audio Config Reg */
#define AUD_CONFIG_BE _MMIO(0x65ef0)
#define HBLANK_EARLY_ENABLE_ICL(pipe) (0x1 << (20 - (pipe)))
@@ -10212,8 +10362,6 @@ enum skl_power_gate {
#define TGL_TRANS_DDI_PORT_MASK (0xf << TGL_TRANS_DDI_PORT_SHIFT)
#define TRANS_DDI_SELECT_PORT(x) ((x) << TRANS_DDI_PORT_SHIFT)
#define TGL_TRANS_DDI_SELECT_PORT(x) (((x) + 1) << TGL_TRANS_DDI_PORT_SHIFT)
-#define TRANS_DDI_FUNC_CTL_VAL_TO_PORT(val) (((val) & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT)
-#define TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(val) ((((val) & TGL_TRANS_DDI_PORT_MASK) >> TGL_TRANS_DDI_PORT_SHIFT) - 1)
#define TRANS_DDI_MODE_SELECT_MASK (7 << 24)
#define TRANS_DDI_MODE_SELECT_HDMI (0 << 24)
#define TRANS_DDI_MODE_SELECT_DVI (1 << 24)
@@ -10523,6 +10671,14 @@ enum skl_power_gate {
#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1 << 16)
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
+/* CDCLK_SQUASH_CTL */
+#define CDCLK_SQUASH_CTL _MMIO(0x46008)
+#define CDCLK_SQUASH_ENABLE REG_BIT(31)
+#define CDCLK_SQUASH_WINDOW_SIZE_MASK REG_GENMASK(27, 24)
+#define CDCLK_SQUASH_WINDOW_SIZE(x) REG_FIELD_PREP(CDCLK_SQUASH_WINDOW_SIZE_MASK, (x))
+#define CDCLK_SQUASH_WAVEFORM_MASK REG_GENMASK(15, 0)
+#define CDCLK_SQUASH_WAVEFORM(x) REG_FIELD_PREP(CDCLK_SQUASH_WAVEFORM_MASK, (x))
+
/* LCPLL_CTL */
#define LCPLL1_CTL _MMIO(0x46010)
#define LCPLL2_CTL _MMIO(0x46014)
@@ -11021,8 +11177,12 @@ enum skl_power_gate {
_DKL_PHY2_BASE) + \
_DKL_TX_DPCNTL1)
-#define _DKL_TX_DPCNTL2 0x2C8
-#define DKL_TX_DP20BITMODE (1 << 2)
+#define _DKL_TX_DPCNTL2 0x2C8
+#define DKL_TX_DP20BITMODE REG_BIT(2)
+#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK REG_GENMASK(4, 3)
+#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK, (val))
+#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK REG_GENMASK(6, 5)
+#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, (val))
#define DKL_TX_DPCNTL2(tc_port) _MMIO(_PORT(tc_port, \
_DKL_PHY1_BASE, \
_DKL_PHY2_BASE) + \
@@ -12466,11 +12626,19 @@ enum skl_power_gate {
#define PMFLUSH_GAPL3UNBLOCK (1 << 21)
#define PMFLUSHDONE_LNEBLK (1 << 22)
+#define XEHP_L3NODEARBCFG _MMIO(0xb0b4)
+#define XEHP_LNESPARE REG_BIT(19)
+
#define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */
#define GEN12_GSMBASE _MMIO(0x108100)
#define GEN12_DSMBASE _MMIO(0x1080C0)
+#define XEHP_CLOCK_GATE_DIS _MMIO(0x101014)
+#define SGSI_SIDECLK_DIS REG_BIT(17)
+#define SGGI_DIS REG_BIT(15)
+#define SGR_DIS REG_BIT(13)
+
/* gamt regs */
#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */
@@ -12847,4 +13015,7 @@ enum skl_power_gate {
#define CLKGATE_DIS_MISC _MMIO(0x46534)
#define CLKGATE_DIS_MISC_DMASC_GATING_DIS REG_BIT(21)
+#define SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731C)
+#define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 89cccefeea63..76cf5ac91e94 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -42,6 +42,7 @@
#include "gt/intel_rps.h"
#include "i915_active.h"
+#include "i915_deps.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_pm.h"
@@ -97,9 +98,9 @@ static signed long i915_fence_wait(struct dma_fence *fence,
bool interruptible,
signed long timeout)
{
- return i915_request_wait(to_request(fence),
- interruptible | I915_WAIT_PRIORITY,
- timeout);
+ return i915_request_wait_timeout(to_request(fence),
+ interruptible | I915_WAIT_PRIORITY,
+ timeout);
}
struct kmem_cache *i915_request_slab_cache(void)
@@ -114,6 +115,10 @@ static void i915_fence_release(struct dma_fence *fence)
GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT &&
rq->guc_prio != GUC_PRIO_FINI);
+ i915_request_free_capture_list(fetch_and_zero(&rq->capture_list));
+ if (i915_vma_snapshot_present(&rq->batch_snapshot))
+ i915_vma_snapshot_put_onstack(&rq->batch_snapshot);
+
/*
* The request is put onto a RCU freelist (i.e. the address
* is immediately reused), mark the fences as being freed now.
@@ -187,19 +192,6 @@ void i915_request_notify_execute_cb_imm(struct i915_request *rq)
__notify_execute_cb(rq, irq_work_imm);
}
-static void free_capture_list(struct i915_request *request)
-{
- struct i915_capture_list *capture;
-
- capture = fetch_and_zero(&request->capture_list);
- while (capture) {
- struct i915_capture_list *next = capture->next;
-
- kfree(capture);
- capture = next;
- }
-}
-
static void __i915_request_fill(struct i915_request *rq, u8 val)
{
void *vaddr = rq->ring->vaddr;
@@ -304,6 +296,38 @@ static void __rq_cancel_watchdog(struct i915_request *rq)
i915_request_put(rq);
}
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
+/**
+ * i915_request_free_capture_list - Free a capture list
+ * @capture: Pointer to the first list item or NULL
+ *
+ */
+void i915_request_free_capture_list(struct i915_capture_list *capture)
+{
+ while (capture) {
+ struct i915_capture_list *next = capture->next;
+
+ i915_vma_snapshot_put(capture->vma_snapshot);
+ kfree(capture);
+ capture = next;
+ }
+}
+
+#define assert_capture_list_is_null(_rq) GEM_BUG_ON((_rq)->capture_list)
+
+#define clear_capture_list(_rq) ((_rq)->capture_list = NULL)
+
+#else
+
+#define i915_request_free_capture_list(_a) do {} while (0)
+
+#define assert_capture_list_is_null(_a) do {} while (0)
+
+#define clear_capture_list(_rq) do {} while (0)
+
+#endif
+
bool i915_request_retire(struct i915_request *rq)
{
if (!__i915_request_is_complete(rq))
@@ -340,7 +364,7 @@ bool i915_request_retire(struct i915_request *rq)
}
if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags))
- atomic_dec(&rq->engine->gt->rps.num_waiters);
+ intel_rps_dec_waiters(&rq->engine->gt->rps);
/*
* We only loosely track inflight requests across preemption,
@@ -360,7 +384,6 @@ bool i915_request_retire(struct i915_request *rq)
intel_context_exit(rq->context);
intel_context_unpin(rq->context);
- free_capture_list(rq);
i915_sched_node_fini(&rq->sched);
i915_request_put(rq);
@@ -720,7 +743,7 @@ void i915_request_cancel(struct i915_request *rq, int error)
intel_context_cancel_request(rq->context, rq);
}
-static int __i915_sw_fence_call
+static int
submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct i915_request *request =
@@ -756,7 +779,7 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
-static int __i915_sw_fence_call
+static int
semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
@@ -830,11 +853,18 @@ static void __i915_request_ctor(void *arg)
i915_sw_fence_init(&rq->submit, submit_notify);
i915_sw_fence_init(&rq->semaphore, semaphore_notify);
- rq->capture_list = NULL;
+ clear_capture_list(rq);
+ rq->batch_snapshot.present = false;
init_llist_head(&rq->execute_cb);
}
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#define clear_batch_ptr(_rq) ((_rq)->batch = NULL)
+#else
+#define clear_batch_ptr(_a) do {} while (0)
+#endif
+
struct i915_request *
__i915_request_create(struct intel_context *ce, gfp_t gfp)
{
@@ -926,10 +956,11 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
i915_sched_node_reinit(&rq->sched);
/* No zalloc, everything must be cleared after use */
- rq->batch = NULL;
+ clear_batch_ptr(rq);
__rq_init_watchdog(rq);
- GEM_BUG_ON(rq->capture_list);
+ assert_capture_list_is_null(rq);
GEM_BUG_ON(!llist_empty(&rq->execute_cb));
+ GEM_BUG_ON(i915_vma_snapshot_present(&rq->batch_snapshot));
/*
* Reserve space in the ring buffer for all the commands required to
@@ -1514,6 +1545,27 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
}
/**
+ * i915_request_await_deps - set this request to (async) wait upon a struct
+ * i915_deps dma_fence collection
+ * @rq: request we are wishing to use
+ * @deps: The struct i915_deps containing the dependencies.
+ *
+ * Returns 0 if successful, negative error code on error.
+ */
+int i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps)
+{
+ int i, err;
+
+ for (i = 0; i < deps->num_deps; ++i) {
+ err = i915_request_await_dma_fence(rq, deps->fences[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
* i915_request_await_object - set this request to (async) wait upon a bo
* @to: request we are wishing to use
* @obj: object which may be in use on another ring.
@@ -1858,23 +1910,27 @@ static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
}
/**
- * i915_request_wait - wait until execution of request has finished
+ * i915_request_wait_timeout - wait until execution of request has finished
* @rq: the request to wait upon
* @flags: how to wait
* @timeout: how long to wait in jiffies
*
- * i915_request_wait() waits for the request to be completed, for a
+ * i915_request_wait_timeout() waits for the request to be completed, for a
* maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
* unbounded wait).
*
* Returns the remaining time (in jiffies) if the request completed, which may
- * be zero or -ETIME if the request is unfinished after the timeout expires.
+ * be zero if the request is unfinished after the timeout expires.
+ * If the timeout is 0, it will return 1 if the fence is signaled.
+ *
* May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
* pending before the request completes.
+ *
+ * NOTE: This function has the same wait semantics as dma-fence.
*/
-long i915_request_wait(struct i915_request *rq,
- unsigned int flags,
- long timeout)
+long i915_request_wait_timeout(struct i915_request *rq,
+ unsigned int flags,
+ long timeout)
{
const int state = flags & I915_WAIT_INTERRUPTIBLE ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
@@ -1884,7 +1940,7 @@ long i915_request_wait(struct i915_request *rq,
GEM_BUG_ON(timeout < 0);
if (dma_fence_is_signaled(&rq->fence))
- return timeout;
+ return timeout ?: 1;
if (!timeout)
return -ETIME;
@@ -1993,6 +2049,39 @@ out:
return timeout;
}
+/**
+ * i915_request_wait - wait until execution of request has finished
+ * @rq: the request to wait upon
+ * @flags: how to wait
+ * @timeout: how long to wait in jiffies
+ *
+ * i915_request_wait() waits for the request to be completed, for a
+ * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
+ * unbounded wait).
+ *
+ * Returns the remaining time (in jiffies) if the request completed, which may
+ * be zero or -ETIME if the request is unfinished after the timeout expires.
+ * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
+ * pending before the request completes.
+ *
+ * NOTE: This function behaves differently from dma-fence wait semantics for
+ * timeout = 0. It returns 0 on success, and -ETIME if not signaled.
+ */
+long i915_request_wait(struct i915_request *rq,
+ unsigned int flags,
+ long timeout)
+{
+ long ret = i915_request_wait_timeout(rq, flags, timeout);
+
+ if (!ret)
+ return -ETIME;
+
+ if (ret > 0 && !timeout)
+ return 0;
+
+ return ret;
+}
+
static int print_sched_attr(const struct i915_sched_attr *attr,
char *buf, int x, int len)
{
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index dc359242d1ae..170ee78c2858 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -40,19 +40,27 @@
#include "i915_scheduler.h"
#include "i915_selftest.h"
#include "i915_sw_fence.h"
+#include "i915_vma_snapshot.h"
#include <uapi/drm/i915_drm.h>
struct drm_file;
struct drm_i915_gem_object;
struct drm_printer;
+struct i915_deps;
struct i915_request;
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
struct i915_capture_list {
+ struct i915_vma_snapshot *vma_snapshot;
struct i915_capture_list *next;
- struct i915_vma *vma;
};
+void i915_request_free_capture_list(struct i915_capture_list *capture);
+#else
+#define i915_request_free_capture_list(_a) do {} while (0)
+#endif
+
#define RQ_TRACE(rq, fmt, ...) do { \
const struct i915_request *rq__ = (rq); \
ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \
@@ -289,10 +297,12 @@ struct i915_request {
/** Preallocate space in the ring for the emitting the request */
u32 reserved_space;
- /** Batch buffer related to this request if any (used for
- * error state dump only).
- */
- struct i915_vma *batch;
+ /** Batch buffer pointer for selftest internal use. */
+ I915_SELFTEST_DECLARE(struct i915_vma *batch);
+
+ struct i915_vma_snapshot batch_snapshot;
+
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
/**
* Additional buffers requested by userspace to be captured upon
* a GPU hang. The vma/obj on this list are protected by their
@@ -300,6 +310,7 @@ struct i915_request {
* on the active_list (of their final request).
*/
struct i915_capture_list *capture_list;
+#endif
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
@@ -401,6 +412,7 @@ int i915_request_await_object(struct i915_request *to,
bool write);
int i915_request_await_dma_fence(struct i915_request *rq,
struct dma_fence *fence);
+int i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps);
int i915_request_await_execution(struct i915_request *rq,
struct dma_fence *fence);
@@ -414,6 +426,11 @@ void i915_request_unsubmit(struct i915_request *request);
void i915_request_cancel(struct i915_request *rq, int error);
+long i915_request_wait_timeout(struct i915_request *rq,
+ unsigned int flags,
+ long timeout)
+ __attribute__((nonnull(1)));
+
long i915_request_wait(struct i915_request *rq,
unsigned int flags,
long timeout)
@@ -642,7 +659,8 @@ i915_request_timeline(const struct i915_request *rq)
{
/* Valid only while the request is being constructed (or retired). */
return rcu_dereference_protected(rq->timeline,
- lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
+ lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex) ||
+ test_bit(CONTEXT_IS_PARKING, &rq->context->flags));
}
static inline struct i915_gem_context *
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c
index 4a6712dca838..41f2adb6a583 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.c
+++ b/drivers/gpu/drm/i915/i915_scatterlist.c
@@ -41,8 +41,32 @@ bool i915_sg_trim(struct sg_table *orig_st)
return true;
}
+static void i915_refct_sgt_release(struct kref *ref)
+{
+ struct i915_refct_sgt *rsgt =
+ container_of(ref, typeof(*rsgt), kref);
+
+ sg_free_table(&rsgt->table);
+ kfree(rsgt);
+}
+
+static const struct i915_refct_sgt_ops rsgt_ops = {
+ .release = i915_refct_sgt_release
+};
+
+/**
+ * i915_refct_sgt_init - Initialize a struct i915_refct_sgt with default ops
+ * @rsgt: The struct i915_refct_sgt to initialize.
+ * size: The size of the underlying memory buffer.
+ */
+void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size)
+{
+ __i915_refct_sgt_init(rsgt, size, &rsgt_ops);
+}
+
/**
- * i915_sg_from_mm_node - Create an sg_table from a struct drm_mm_node
+ * i915_rsgt_from_mm_node - Create a refcounted sg_table from a struct
+ * drm_mm_node
* @node: The drm_mm_node.
* @region_start: An offset to add to the dma addresses of the sg list.
*
@@ -50,25 +74,28 @@ bool i915_sg_trim(struct sg_table *orig_st)
* taking a maximum segment length into account, splitting into segments
* if necessary.
*
- * Return: A pointer to a kmalloced struct sg_table on success, negative
+ * Return: A pointer to a kmalloced struct i915_refct_sgt on success, negative
* error code cast to an error pointer on failure.
*/
-struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node,
- u64 region_start)
+struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
+ u64 region_start)
{
const u64 max_segment = SZ_1G; /* Do we have a limit on this? */
u64 segment_pages = max_segment >> PAGE_SHIFT;
u64 block_size, offset, prev_end;
+ struct i915_refct_sgt *rsgt;
struct sg_table *st;
struct scatterlist *sg;
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
+ rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL);
+ if (!rsgt)
return ERR_PTR(-ENOMEM);
+ i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT);
+ st = &rsgt->table;
if (sg_alloc_table(st, DIV_ROUND_UP(node->size, segment_pages),
GFP_KERNEL)) {
- kfree(st);
+ i915_refct_sgt_put(rsgt);
return ERR_PTR(-ENOMEM);
}
@@ -104,11 +131,11 @@ struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node,
sg_mark_end(sg);
i915_sg_trim(st);
- return st;
+ return rsgt;
}
/**
- * i915_sg_from_buddy_resource - Create an sg_table from a struct
+ * i915_rsgt_from_buddy_resource - Create a refcounted sg_table from a struct
* i915_buddy_block list
* @res: The struct i915_ttm_buddy_resource.
* @region_start: An offset to add to the dma addresses of the sg list.
@@ -117,11 +144,11 @@ struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node,
* taking a maximum segment length into account, splitting into segments
* if necessary.
*
- * Return: A pointer to a kmalloced struct sg_table on success, negative
+ * Return: A pointer to a kmalloced struct i915_refct_sgts on success, negative
* error code cast to an error pointer on failure.
*/
-struct sg_table *i915_sg_from_buddy_resource(struct ttm_resource *res,
- u64 region_start)
+struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
+ u64 region_start)
{
struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
const u64 size = res->num_pages << PAGE_SHIFT;
@@ -129,18 +156,21 @@ struct sg_table *i915_sg_from_buddy_resource(struct ttm_resource *res,
struct i915_buddy_mm *mm = bman_res->mm;
struct list_head *blocks = &bman_res->blocks;
struct i915_buddy_block *block;
+ struct i915_refct_sgt *rsgt;
struct scatterlist *sg;
struct sg_table *st;
resource_size_t prev_end;
GEM_BUG_ON(list_empty(blocks));
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
+ rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL);
+ if (!rsgt)
return ERR_PTR(-ENOMEM);
+ i915_refct_sgt_init(rsgt, size);
+ st = &rsgt->table;
if (sg_alloc_table(st, res->num_pages, GFP_KERNEL)) {
- kfree(st);
+ i915_refct_sgt_put(rsgt);
return ERR_PTR(-ENOMEM);
}
@@ -181,7 +211,7 @@ struct sg_table *i915_sg_from_buddy_resource(struct ttm_resource *res,
sg_mark_end(sg);
i915_sg_trim(st);
- return st;
+ return rsgt;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h
index b8bd5925b03f..12c6a1684081 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.h
+++ b/drivers/gpu/drm/i915/i915_scatterlist.h
@@ -144,10 +144,78 @@ static inline unsigned int i915_sg_segment_size(void)
bool i915_sg_trim(struct sg_table *orig_st);
-struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node,
- u64 region_start);
+/**
+ * struct i915_refct_sgt_ops - Operations structure for struct i915_refct_sgt
+ */
+struct i915_refct_sgt_ops {
+ /**
+ * release() - Free the memory of the struct i915_refct_sgt
+ * @ref: struct kref that is embedded in the struct i915_refct_sgt
+ */
+ void (*release)(struct kref *ref);
+};
+
+/**
+ * struct i915_refct_sgt - A refcounted scatter-gather table
+ * @kref: struct kref for refcounting
+ * @table: struct sg_table holding the scatter-gather table itself. Note that
+ * @table->sgl = NULL can be used to determine whether a scatter-gather table
+ * is present or not.
+ * @size: The size in bytes of the underlying memory buffer
+ * @ops: The operations structure.
+ */
+struct i915_refct_sgt {
+ struct kref kref;
+ struct sg_table table;
+ size_t size;
+ const struct i915_refct_sgt_ops *ops;
+};
+
+/**
+ * i915_refct_sgt_put - Put a refcounted sg-table
+ * @rsgt the struct i915_refct_sgt to put.
+ */
+static inline void i915_refct_sgt_put(struct i915_refct_sgt *rsgt)
+{
+ if (rsgt)
+ kref_put(&rsgt->kref, rsgt->ops->release);
+}
+
+/**
+ * i915_refct_sgt_get - Get a refcounted sg-table
+ * @rsgt the struct i915_refct_sgt to get.
+ */
+static inline struct i915_refct_sgt *
+i915_refct_sgt_get(struct i915_refct_sgt *rsgt)
+{
+ kref_get(&rsgt->kref);
+ return rsgt;
+}
+
+/**
+ * __i915_refct_sgt_init - Initialize a refcounted sg-list with a custom
+ * operations structure
+ * @rsgt The struct i915_refct_sgt to initialize.
+ * @size: Size in bytes of the underlying memory buffer.
+ * @ops: A customized operations structure in case the refcounted sg-list
+ * is embedded into another structure.
+ */
+static inline void __i915_refct_sgt_init(struct i915_refct_sgt *rsgt,
+ size_t size,
+ const struct i915_refct_sgt_ops *ops)
+{
+ kref_init(&rsgt->kref);
+ rsgt->table.sgl = NULL;
+ rsgt->size = size;
+ rsgt->ops = ops;
+}
+
+void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size);
+
+struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
+ u64 region_start);
-struct sg_table *i915_sg_from_buddy_resource(struct ttm_resource *res,
- u64 region_start);
+struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
+ u64 region_start);
#endif
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index c589a681da77..2a74a9a1cafe 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -18,7 +18,9 @@
#define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
+#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG
static DEFINE_SPINLOCK(i915_sw_fence_lock);
+#endif
#define WQ_FLAG_BITS \
BITS_PER_TYPE(typeof_member(struct wait_queue_entry, flags))
@@ -34,7 +36,7 @@ enum {
static void *i915_sw_fence_debug_hint(void *addr)
{
- return (void *)(((struct i915_sw_fence *)addr)->flags & I915_SW_FENCE_MASK);
+ return (void *)(((struct i915_sw_fence *)addr)->fn);
}
#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
@@ -126,10 +128,7 @@ static inline void debug_fence_assert(struct i915_sw_fence *fence)
static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
enum i915_sw_fence_notify state)
{
- i915_sw_fence_notify_t fn;
-
- fn = (i915_sw_fence_notify_t)(fence->flags & I915_SW_FENCE_MASK);
- return fn(fence, state);
+ return fence->fn(fence, state);
}
#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
@@ -242,10 +241,13 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence,
const char *name,
struct lock_class_key *key)
{
- BUG_ON(!fn || (unsigned long)fn & ~I915_SW_FENCE_MASK);
+ BUG_ON(!fn);
__init_waitqueue_head(&fence->wait, name, key);
- fence->flags = (unsigned long)fn;
+ fence->fn = fn;
+#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG
+ fence->flags = 0;
+#endif
i915_sw_fence_reinit(fence);
}
@@ -257,7 +259,6 @@ void i915_sw_fence_reinit(struct i915_sw_fence *fence)
atomic_set(&fence->pending, 1);
fence->error = 0;
- I915_SW_FENCE_BUG_ON(!fence->flags);
I915_SW_FENCE_BUG_ON(!list_empty(&fence->wait.head));
}
@@ -279,6 +280,7 @@ static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags,
return 0;
}
+#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG
static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
const struct i915_sw_fence * const signaler)
{
@@ -322,9 +324,6 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
unsigned long flags;
bool err;
- if (!IS_ENABLED(CONFIG_DRM_I915_SW_FENCE_CHECK_DAG))
- return false;
-
spin_lock_irqsave(&i915_sw_fence_lock, flags);
err = __i915_sw_fence_check_if_after(fence, signaler);
__i915_sw_fence_clear_checked_bit(fence);
@@ -332,6 +331,13 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
return err;
}
+#else
+static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
+ const struct i915_sw_fence * const signaler)
+{
+ return false;
+}
+#endif
static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
struct i915_sw_fence *signaler,
@@ -572,56 +578,25 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
unsigned long timeout,
gfp_t gfp)
{
- struct dma_fence *excl;
+ struct dma_resv_iter cursor;
+ struct dma_fence *f;
int ret = 0, pending;
debug_fence_assert(fence);
might_sleep_if(gfpflags_allow_blocking(gfp));
- if (write) {
- struct dma_fence **shared;
- unsigned int count, i;
-
- ret = dma_resv_get_fences(resv, &excl, &count, &shared);
- if (ret)
- return ret;
-
- for (i = 0; i < count; i++) {
- if (shared[i]->ops == exclude)
- continue;
-
- pending = i915_sw_fence_await_dma_fence(fence,
- shared[i],
- timeout,
- gfp);
- if (pending < 0) {
- ret = pending;
- break;
- }
-
- ret |= pending;
- }
-
- for (i = 0; i < count; i++)
- dma_fence_put(shared[i]);
- kfree(shared);
- } else {
- excl = dma_resv_get_excl_unlocked(resv);
- }
-
- if (ret >= 0 && excl && excl->ops != exclude) {
- pending = i915_sw_fence_await_dma_fence(fence,
- excl,
- timeout,
+ dma_resv_iter_begin(&cursor, resv, write);
+ dma_resv_for_each_fence_unlocked(&cursor, f) {
+ pending = i915_sw_fence_await_dma_fence(fence, f, timeout,
gfp);
- if (pending < 0)
+ if (pending < 0) {
ret = pending;
- else
- ret |= pending;
- }
-
- dma_fence_put(excl);
+ break;
+ }
+ ret |= pending;
+ }
+ dma_resv_iter_end(&cursor);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 30a863353ee6..a7c603bc1b01 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -17,26 +17,27 @@
struct completion;
struct dma_resv;
+struct i915_sw_fence;
+
+enum i915_sw_fence_notify {
+ FENCE_COMPLETE,
+ FENCE_FREE
+};
+
+typedef int (*i915_sw_fence_notify_t)(struct i915_sw_fence *,
+ enum i915_sw_fence_notify state);
struct i915_sw_fence {
wait_queue_head_t wait;
+ i915_sw_fence_notify_t fn;
+#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG
unsigned long flags;
+#endif
atomic_t pending;
int error;
};
#define I915_SW_FENCE_CHECKED_BIT 0 /* used internally for DAG checking */
-#define I915_SW_FENCE_PRIVATE_BIT 1 /* available for use by owner */
-#define I915_SW_FENCE_MASK (~3)
-
-enum i915_sw_fence_notify {
- FENCE_COMPLETE,
- FENCE_FREE
-};
-
-typedef int (*i915_sw_fence_notify_t)(struct i915_sw_fence *,
- enum i915_sw_fence_notify state);
-#define __i915_sw_fence_call __aligned(4)
void __i915_sw_fence_init(struct i915_sw_fence *fence,
i915_sw_fence_notify_t fn,
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c
index 5b33ef23d54c..d2e56b387993 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence_work.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c
@@ -23,7 +23,7 @@ static void fence_work(struct work_struct *work)
dma_fence_put(&f->dma);
}
-static int __i915_sw_fence_call
+static int
fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct dma_fence_work *f = container_of(fence, typeof(*f), chain);
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
index de0e224b56ce..23777d500cdf 100644
--- a/drivers/gpu/drm/i915/i915_switcheroo.c
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -5,6 +5,7 @@
#include <linux/vga_switcheroo.h>
+#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_switcheroo.h"
@@ -24,12 +25,12 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev,
i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(pdev, PCI_D0);
- i915_resume_switcheroo(i915);
+ i915_driver_resume_switcheroo(i915);
i915->drm.switch_power_state = DRM_SWITCH_POWER_ON;
} else {
drm_info(&i915->drm, "switched off\n");
i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
- i915_suspend_switcheroo(i915, pmm);
+ i915_driver_suspend_switcheroo(i915, pmm);
i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 1804f4142740..fae4d1f4f275 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -52,7 +52,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv,
u64 res = 0;
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
- res = intel_rc6_residency_us(&dev_priv->gt.rc6, reg);
+ res = intel_rc6_residency_us(&to_gt(dev_priv)->rc6, reg);
return DIV_ROUND_CLOSEST_ULL(res, 1000);
}
@@ -260,7 +260,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &i915->gt.rps;
+ struct intel_rps *rps = &to_gt(i915)->rps;
return sysfs_emit(buf, "%d\n", intel_rps_read_actual_frequency(rps));
}
@@ -269,7 +269,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &i915->gt.rps;
+ struct intel_rps *rps = &to_gt(i915)->rps;
return sysfs_emit(buf, "%d\n", intel_rps_get_requested_frequency(rps));
}
@@ -277,9 +277,9 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &i915->gt.rps;
+ struct intel_rps *rps = &to_gt(i915)->rps;
- return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->boost_freq));
+ return sysfs_emit(buf, "%d\n", intel_rps_get_boost_frequency(rps));
}
static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
@@ -287,8 +287,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt.rps;
- bool boost = false;
+ struct intel_rps *rps = &to_gt(dev_priv)->rps;
ssize_t ret;
u32 val;
@@ -296,28 +295,16 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
- /* Validate against (static) hardware limits */
- val = intel_freq_opcode(rps, val);
- if (val < rps->min_freq || val > rps->max_freq)
- return -EINVAL;
-
- mutex_lock(&rps->lock);
- if (val != rps->boost_freq) {
- rps->boost_freq = val;
- boost = atomic_read(&rps->num_waiters);
- }
- mutex_unlock(&rps->lock);
- if (boost)
- schedule_work(&rps->work);
+ ret = intel_rps_set_boost_frequency(rps, val);
- return count;
+ return ret ?: count;
}
static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt.rps;
+ struct intel_rps *rps = &to_gt(dev_priv)->rps;
return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->efficient_freq));
}
@@ -325,7 +312,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_gt *gt = &dev_priv->gt;
+ struct intel_gt *gt = to_gt(dev_priv);
struct intel_rps *rps = &gt->rps;
return sysfs_emit(buf, "%d\n", intel_rps_get_max_frequency(rps));
@@ -336,7 +323,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_gt *gt = &dev_priv->gt;
+ struct intel_gt *gt = to_gt(dev_priv);
struct intel_rps *rps = &gt->rps;
ssize_t ret;
u32 val;
@@ -353,7 +340,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
struct intel_rps *rps = &gt->rps;
return sysfs_emit(buf, "%d\n", intel_rps_get_min_frequency(rps));
@@ -364,7 +351,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &i915->gt.rps;
+ struct intel_rps *rps = &to_gt(i915)->rps;
ssize_t ret;
u32 val;
@@ -394,7 +381,7 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt.rps;
+ struct intel_rps *rps = &to_gt(dev_priv)->rps;
u32 val;
if (attr == &dev_attr_gt_RP0_freq_mhz)
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 8104981a6604..37b5c9e9d260 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -1,4 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM i915
+
#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _I915_TRACE_H_
@@ -8,511 +12,11 @@
#include <drm/drm_drv.h>
-#include "display/intel_crtc.h"
-#include "display/intel_display_types.h"
#include "gt/intel_engine.h"
#include "i915_drv.h"
#include "i915_irq.h"
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM i915
-#define TRACE_INCLUDE_FILE i915_trace
-
-/* watermark/fifo updates */
-
-TRACE_EVENT(intel_pipe_enable,
- TP_PROTO(struct intel_crtc *crtc),
- TP_ARGS(crtc),
-
- TP_STRUCT__entry(
- __array(u32, frame, 3)
- __array(u32, scanline, 3)
- __field(enum pipe, pipe)
- ),
- TP_fast_assign(
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc *it__;
- for_each_intel_crtc(&dev_priv->drm, it__) {
- __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
- __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
- }
- __entry->pipe = crtc->pipe;
- ),
-
- TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
- pipe_name(__entry->pipe),
- __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
- __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
- __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
-);
-
-TRACE_EVENT(intel_pipe_disable,
- TP_PROTO(struct intel_crtc *crtc),
- TP_ARGS(crtc),
-
- TP_STRUCT__entry(
- __array(u32, frame, 3)
- __array(u32, scanline, 3)
- __field(enum pipe, pipe)
- ),
-
- TP_fast_assign(
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc *it__;
- for_each_intel_crtc(&dev_priv->drm, it__) {
- __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
- __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
- }
- __entry->pipe = crtc->pipe;
- ),
-
- TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
- pipe_name(__entry->pipe),
- __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
- __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
- __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
-);
-
-TRACE_EVENT(intel_pipe_crc,
- TP_PROTO(struct intel_crtc *crtc, const u32 *crcs),
- TP_ARGS(crtc, crcs),
-
- TP_STRUCT__entry(
- __field(enum pipe, pipe)
- __field(u32, frame)
- __field(u32, scanline)
- __array(u32, crcs, 5)
- ),
-
- TP_fast_assign(
- __entry->pipe = crtc->pipe;
- __entry->frame = intel_crtc_get_vblank_counter(crtc);
- __entry->scanline = intel_get_crtc_scanline(crtc);
- memcpy(__entry->crcs, crcs, sizeof(__entry->crcs));
- ),
-
- TP_printk("pipe %c, frame=%u, scanline=%u crc=%08x %08x %08x %08x %08x",
- pipe_name(__entry->pipe), __entry->frame, __entry->scanline,
- __entry->crcs[0], __entry->crcs[1], __entry->crcs[2],
- __entry->crcs[3], __entry->crcs[4])
-);
-
-TRACE_EVENT(intel_cpu_fifo_underrun,
- TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
- TP_ARGS(dev_priv, pipe),
-
- TP_STRUCT__entry(
- __field(enum pipe, pipe)
- __field(u32, frame)
- __field(u32, scanline)
- ),
-
- TP_fast_assign(
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- __entry->pipe = pipe;
- __entry->frame = intel_crtc_get_vblank_counter(crtc);
- __entry->scanline = intel_get_crtc_scanline(crtc);
- ),
-
- TP_printk("pipe %c, frame=%u, scanline=%u",
- pipe_name(__entry->pipe),
- __entry->frame, __entry->scanline)
-);
-
-TRACE_EVENT(intel_pch_fifo_underrun,
- TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pch_transcoder),
- TP_ARGS(dev_priv, pch_transcoder),
-
- TP_STRUCT__entry(
- __field(enum pipe, pipe)
- __field(u32, frame)
- __field(u32, scanline)
- ),
-
- TP_fast_assign(
- enum pipe pipe = pch_transcoder;
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- __entry->pipe = pipe;
- __entry->frame = intel_crtc_get_vblank_counter(crtc);
- __entry->scanline = intel_get_crtc_scanline(crtc);
- ),
-
- TP_printk("pch transcoder %c, frame=%u, scanline=%u",
- pipe_name(__entry->pipe),
- __entry->frame, __entry->scanline)
-);
-
-TRACE_EVENT(intel_memory_cxsr,
- TP_PROTO(struct drm_i915_private *dev_priv, bool old, bool new),
- TP_ARGS(dev_priv, old, new),
-
- TP_STRUCT__entry(
- __array(u32, frame, 3)
- __array(u32, scanline, 3)
- __field(bool, old)
- __field(bool, new)
- ),
-
- TP_fast_assign(
- struct intel_crtc *crtc;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc);
- __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc);
- }
- __entry->old = old;
- __entry->new = new;
- ),
-
- TP_printk("%s->%s, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
- onoff(__entry->old), onoff(__entry->new),
- __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
- __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
- __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
-);
-
-TRACE_EVENT(g4x_wm,
- TP_PROTO(struct intel_crtc *crtc, const struct g4x_wm_values *wm),
- TP_ARGS(crtc, wm),
-
- TP_STRUCT__entry(
- __field(enum pipe, pipe)
- __field(u32, frame)
- __field(u32, scanline)
- __field(u16, primary)
- __field(u16, sprite)
- __field(u16, cursor)
- __field(u16, sr_plane)
- __field(u16, sr_cursor)
- __field(u16, sr_fbc)
- __field(u16, hpll_plane)
- __field(u16, hpll_cursor)
- __field(u16, hpll_fbc)
- __field(bool, cxsr)
- __field(bool, hpll)
- __field(bool, fbc)
- ),
-
- TP_fast_assign(
- __entry->pipe = crtc->pipe;
- __entry->frame = intel_crtc_get_vblank_counter(crtc);
- __entry->scanline = intel_get_crtc_scanline(crtc);
- __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY];
- __entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0];
- __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR];
- __entry->sr_plane = wm->sr.plane;
- __entry->sr_cursor = wm->sr.cursor;
- __entry->sr_fbc = wm->sr.fbc;
- __entry->hpll_plane = wm->hpll.plane;
- __entry->hpll_cursor = wm->hpll.cursor;
- __entry->hpll_fbc = wm->hpll.fbc;
- __entry->cxsr = wm->cxsr;
- __entry->hpll = wm->hpll_en;
- __entry->fbc = wm->fbc_en;
- ),
-
- TP_printk("pipe %c, frame=%u, scanline=%u, wm %d/%d/%d, sr %s/%d/%d/%d, hpll %s/%d/%d/%d, fbc %s",
- pipe_name(__entry->pipe), __entry->frame, __entry->scanline,
- __entry->primary, __entry->sprite, __entry->cursor,
- yesno(__entry->cxsr), __entry->sr_plane, __entry->sr_cursor, __entry->sr_fbc,
- yesno(__entry->hpll), __entry->hpll_plane, __entry->hpll_cursor, __entry->hpll_fbc,
- yesno(__entry->fbc))
-);
-
-TRACE_EVENT(vlv_wm,
- TP_PROTO(struct intel_crtc *crtc, const struct vlv_wm_values *wm),
- TP_ARGS(crtc, wm),
-
- TP_STRUCT__entry(
- __field(enum pipe, pipe)
- __field(u32, frame)
- __field(u32, scanline)
- __field(u32, level)
- __field(u32, cxsr)
- __field(u32, primary)
- __field(u32, sprite0)
- __field(u32, sprite1)
- __field(u32, cursor)
- __field(u32, sr_plane)
- __field(u32, sr_cursor)
- ),
-
- TP_fast_assign(
- __entry->pipe = crtc->pipe;
- __entry->frame = intel_crtc_get_vblank_counter(crtc);
- __entry->scanline = intel_get_crtc_scanline(crtc);
- __entry->level = wm->level;
- __entry->cxsr = wm->cxsr;
- __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY];
- __entry->sprite0 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0];
- __entry->sprite1 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE1];
- __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR];
- __entry->sr_plane = wm->sr.plane;
- __entry->sr_cursor = wm->sr.cursor;
- ),
-
- TP_printk("pipe %c, frame=%u, scanline=%u, level=%d, cxsr=%d, wm %d/%d/%d/%d, sr %d/%d",
- pipe_name(__entry->pipe), __entry->frame,
- __entry->scanline, __entry->level, __entry->cxsr,
- __entry->primary, __entry->sprite0, __entry->sprite1, __entry->cursor,
- __entry->sr_plane, __entry->sr_cursor)
-);
-
-TRACE_EVENT(vlv_fifo_size,
- TP_PROTO(struct intel_crtc *crtc, u32 sprite0_start, u32 sprite1_start, u32 fifo_size),
- TP_ARGS(crtc, sprite0_start, sprite1_start, fifo_size),
-
- TP_STRUCT__entry(
- __field(enum pipe, pipe)
- __field(u32, frame)
- __field(u32, scanline)
- __field(u32, sprite0_start)
- __field(u32, sprite1_start)
- __field(u32, fifo_size)
- ),
-
- TP_fast_assign(
- __entry->pipe = crtc->pipe;
- __entry->frame = intel_crtc_get_vblank_counter(crtc);
- __entry->scanline = intel_get_crtc_scanline(crtc);
- __entry->sprite0_start = sprite0_start;
- __entry->sprite1_start = sprite1_start;
- __entry->fifo_size = fifo_size;
- ),
-
- TP_printk("pipe %c, frame=%u, scanline=%u, %d/%d/%d",
- pipe_name(__entry->pipe), __entry->frame,
- __entry->scanline, __entry->sprite0_start,
- __entry->sprite1_start, __entry->fifo_size)
-);
-
-/* plane updates */
-
-TRACE_EVENT(intel_update_plane,
- TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc),
- TP_ARGS(plane, crtc),
-
- TP_STRUCT__entry(
- __field(enum pipe, pipe)
- __field(u32, frame)
- __field(u32, scanline)
- __array(int, src, 4)
- __array(int, dst, 4)
- __string(name, plane->name)
- ),
-
- TP_fast_assign(
- __assign_str(name, plane->name);
- __entry->pipe = crtc->pipe;
- __entry->frame = intel_crtc_get_vblank_counter(crtc);
- __entry->scanline = intel_get_crtc_scanline(crtc);
- memcpy(__entry->src, &plane->state->src, sizeof(__entry->src));
- memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst));
- ),
-
- TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
- pipe_name(__entry->pipe), __get_str(name),
- __entry->frame, __entry->scanline,
- DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src),
- DRM_RECT_ARG((const struct drm_rect *)__entry->dst))
-);
-
-TRACE_EVENT(intel_disable_plane,
- TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc),
- TP_ARGS(plane, crtc),
-
- TP_STRUCT__entry(
- __field(enum pipe, pipe)
- __field(u32, frame)
- __field(u32, scanline)
- __string(name, plane->name)
- ),
-
- TP_fast_assign(
- __assign_str(name, plane->name);
- __entry->pipe = crtc->pipe;
- __entry->frame = intel_crtc_get_vblank_counter(crtc);
- __entry->scanline = intel_get_crtc_scanline(crtc);
- ),
-
- TP_printk("pipe %c, plane %s, frame=%u, scanline=%u",
- pipe_name(__entry->pipe), __get_str(name),
- __entry->frame, __entry->scanline)
-);
-
-/* fbc */
-
-TRACE_EVENT(intel_fbc_activate,
- TP_PROTO(struct intel_crtc *crtc),
- TP_ARGS(crtc),
-
- TP_STRUCT__entry(
- __field(enum pipe, pipe)
- __field(u32, frame)
- __field(u32, scanline)
- ),
-
- TP_fast_assign(
- __entry->pipe = crtc->pipe;
- __entry->frame = intel_crtc_get_vblank_counter(crtc);
- __entry->scanline = intel_get_crtc_scanline(crtc);
- ),
-
- TP_printk("pipe %c, frame=%u, scanline=%u",
- pipe_name(__entry->pipe), __entry->frame, __entry->scanline)
-);
-
-TRACE_EVENT(intel_fbc_deactivate,
- TP_PROTO(struct intel_crtc *crtc),
- TP_ARGS(crtc),
-
- TP_STRUCT__entry(
- __field(enum pipe, pipe)
- __field(u32, frame)
- __field(u32, scanline)
- ),
-
- TP_fast_assign(
- __entry->pipe = crtc->pipe;
- __entry->frame = intel_crtc_get_vblank_counter(crtc);
- __entry->scanline = intel_get_crtc_scanline(crtc);
- ),
-
- TP_printk("pipe %c, frame=%u, scanline=%u",
- pipe_name(__entry->pipe), __entry->frame, __entry->scanline)
-);
-
-TRACE_EVENT(intel_fbc_nuke,
- TP_PROTO(struct intel_crtc *crtc),
- TP_ARGS(crtc),
-
- TP_STRUCT__entry(
- __field(enum pipe, pipe)
- __field(u32, frame)
- __field(u32, scanline)
- ),
-
- TP_fast_assign(
- __entry->pipe = crtc->pipe;
- __entry->frame = intel_crtc_get_vblank_counter(crtc);
- __entry->scanline = intel_get_crtc_scanline(crtc);
- ),
-
- TP_printk("pipe %c, frame=%u, scanline=%u",
- pipe_name(__entry->pipe), __entry->frame, __entry->scanline)
-);
-
-/* pipe updates */
-
-TRACE_EVENT(intel_pipe_update_start,
- TP_PROTO(struct intel_crtc *crtc),
- TP_ARGS(crtc),
-
- TP_STRUCT__entry(
- __field(enum pipe, pipe)
- __field(u32, frame)
- __field(u32, scanline)
- __field(u32, min)
- __field(u32, max)
- ),
-
- TP_fast_assign(
- __entry->pipe = crtc->pipe;
- __entry->frame = intel_crtc_get_vblank_counter(crtc);
- __entry->scanline = intel_get_crtc_scanline(crtc);
- __entry->min = crtc->debug.min_vbl;
- __entry->max = crtc->debug.max_vbl;
- ),
-
- TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
- pipe_name(__entry->pipe), __entry->frame,
- __entry->scanline, __entry->min, __entry->max)
-);
-
-TRACE_EVENT(intel_pipe_update_vblank_evaded,
- TP_PROTO(struct intel_crtc *crtc),
- TP_ARGS(crtc),
-
- TP_STRUCT__entry(
- __field(enum pipe, pipe)
- __field(u32, frame)
- __field(u32, scanline)
- __field(u32, min)
- __field(u32, max)
- ),
-
- TP_fast_assign(
- __entry->pipe = crtc->pipe;
- __entry->frame = crtc->debug.start_vbl_count;
- __entry->scanline = crtc->debug.scanline_start;
- __entry->min = crtc->debug.min_vbl;
- __entry->max = crtc->debug.max_vbl;
- ),
-
- TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
- pipe_name(__entry->pipe), __entry->frame,
- __entry->scanline, __entry->min, __entry->max)
-);
-
-TRACE_EVENT(intel_pipe_update_end,
- TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end),
- TP_ARGS(crtc, frame, scanline_end),
-
- TP_STRUCT__entry(
- __field(enum pipe, pipe)
- __field(u32, frame)
- __field(u32, scanline)
- ),
-
- TP_fast_assign(
- __entry->pipe = crtc->pipe;
- __entry->frame = frame;
- __entry->scanline = scanline_end;
- ),
-
- TP_printk("pipe %c, frame=%u, scanline=%u",
- pipe_name(__entry->pipe), __entry->frame,
- __entry->scanline)
-);
-
-/* frontbuffer tracking */
-
-TRACE_EVENT(intel_frontbuffer_invalidate,
- TP_PROTO(unsigned int frontbuffer_bits, unsigned int origin),
- TP_ARGS(frontbuffer_bits, origin),
-
- TP_STRUCT__entry(
- __field(unsigned int, frontbuffer_bits)
- __field(unsigned int, origin)
- ),
-
- TP_fast_assign(
- __entry->frontbuffer_bits = frontbuffer_bits;
- __entry->origin = origin;
- ),
-
- TP_printk("frontbuffer_bits=0x%08x, origin=%u",
- __entry->frontbuffer_bits, __entry->origin)
-);
-
-TRACE_EVENT(intel_frontbuffer_flush,
- TP_PROTO(unsigned int frontbuffer_bits, unsigned int origin),
- TP_ARGS(frontbuffer_bits, origin),
-
- TP_STRUCT__entry(
- __field(unsigned int, frontbuffer_bits)
- __field(unsigned int, origin)
- ),
-
- TP_fast_assign(
- __entry->frontbuffer_bits = frontbuffer_bits;
- __entry->origin = origin;
- ),
-
- TP_printk("frontbuffer_bits=0x%08x, origin=%u",
- __entry->frontbuffer_bits, __entry->origin)
-);
-
/* object tracking */
TRACE_EVENT(i915_gem_object_create,
@@ -1260,5 +764,7 @@ DEFINE_EVENT(i915_context, i915_context_free,
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
+#define TRACE_INCLUDE_FILE i915_trace
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index bef795e265a6..c0d6d5526abe 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -40,12 +40,12 @@
static struct kmem_cache *slab_vmas;
-struct i915_vma *i915_vma_alloc(void)
+static struct i915_vma *i915_vma_alloc(void)
{
return kmem_cache_zalloc(slab_vmas, GFP_KERNEL);
}
-void i915_vma_free(struct i915_vma *vma)
+static void i915_vma_free(struct i915_vma *vma)
{
return kmem_cache_free(slab_vmas, vma);
}
@@ -109,11 +109,9 @@ vma_create(struct drm_i915_gem_object *obj,
return ERR_PTR(-ENOMEM);
kref_init(&vma->ref);
- mutex_init(&vma->pages_mutex);
vma->vm = i915_vm_get(vm);
vma->ops = &vm->vma_ops;
vma->obj = obj;
- vma->resv = obj->base.resv;
vma->size = obj->base.size;
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
@@ -346,7 +344,7 @@ int i915_vma_wait_for_bind(struct i915_vma *vma)
fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
rcu_read_unlock();
if (fence) {
- err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT);
+ err = dma_fence_wait(fence, true);
dma_fence_put(fence);
}
}
@@ -354,6 +352,28 @@ int i915_vma_wait_for_bind(struct i915_vma *vma)
return err;
}
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+static int i915_vma_verify_bind_complete(struct i915_vma *vma)
+{
+ struct dma_fence *fence = i915_active_fence_get(&vma->active.excl);
+ int err;
+
+ if (!fence)
+ return 0;
+
+ if (dma_fence_is_signaled(fence))
+ err = fence->error;
+ else
+ err = -EBUSY;
+
+ dma_fence_put(fence);
+
+ return err;
+}
+#else
+#define i915_vma_verify_bind_complete(_vma) 0
+#endif
+
/**
* i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
* @vma: VMA to map
@@ -373,6 +393,7 @@ int i915_vma_bind(struct i915_vma *vma,
u32 bind_flags;
u32 vma_flags;
+ lockdep_assert_held(&vma->vm->mutex);
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->size > vma->node.size);
@@ -394,7 +415,7 @@ int i915_vma_bind(struct i915_vma *vma,
if (bind_flags == 0)
return 0;
- GEM_BUG_ON(!vma->pages);
+ GEM_BUG_ON(!atomic_read(&vma->pages_count));
trace_i915_vma_bind(vma, bind_flags);
if (work && bind_flags & vma->vm->bind_async_flags) {
@@ -423,14 +444,22 @@ int i915_vma_bind(struct i915_vma *vma,
work->base.dma.error = 0; /* enable the queue_work() */
+ __i915_gem_object_pin_pages(vma->obj);
+ work->pinned = i915_gem_object_get(vma->obj);
+ } else {
if (vma->obj) {
- __i915_gem_object_pin_pages(vma->obj);
- work->pinned = i915_gem_object_get(vma->obj);
+ int ret;
+
+ ret = i915_gem_object_wait_moving_fence(vma->obj, true);
+ if (ret)
+ return ret;
}
- } else {
vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
}
+ if (vma->obj)
+ set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
+
atomic_or(bind_flags, &vma->flags);
return 0;
}
@@ -449,6 +478,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
+ GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
ptr = READ_ONCE(vma->iomap);
if (ptr == NULL) {
@@ -667,7 +697,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
}
color = 0;
- if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
+ if (i915_vm_has_cache_coloring(vma->vm))
color = vma->obj->cache_level;
if (flags & PIN_OFFSET_FIXED) {
@@ -789,40 +819,356 @@ unpinned:
return pinned;
}
-static int vma_get_pages(struct i915_vma *vma)
+static struct scatterlist *
+rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
+ unsigned int width, unsigned int height,
+ unsigned int src_stride, unsigned int dst_stride,
+ struct sg_table *st, struct scatterlist *sg)
{
- int err = 0;
- bool pinned_pages = false;
+ unsigned int column, row;
+ unsigned int src_idx;
- if (atomic_add_unless(&vma->pages_count, 1, 0))
- return 0;
+ for (column = 0; column < width; column++) {
+ unsigned int left;
- if (vma->obj) {
- err = i915_gem_object_pin_pages(vma->obj);
- if (err)
- return err;
- pinned_pages = true;
+ src_idx = src_stride * (height - 1) + column + offset;
+ for (row = 0; row < height; row++) {
+ st->nents++;
+ /*
+ * We don't need the pages, but need to initialize
+ * the entries so the sg list can be happily traversed.
+ * The only thing we need are DMA addresses.
+ */
+ sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
+ sg_dma_address(sg) =
+ i915_gem_object_get_dma_address(obj, src_idx);
+ sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
+ sg = sg_next(sg);
+ src_idx -= src_stride;
+ }
+
+ left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
+
+ if (!left)
+ continue;
+
+ st->nents++;
+
+ /*
+ * The DE ignores the PTEs for the padding tiles, the sg entry
+ * here is just a conenience to indicate how many padding PTEs
+ * to insert at this spot.
+ */
+ sg_set_page(sg, NULL, left, 0);
+ sg_dma_address(sg) = 0;
+ sg_dma_len(sg) = left;
+ sg = sg_next(sg);
}
- /* Allocations ahoy! */
- if (mutex_lock_interruptible(&vma->pages_mutex)) {
- err = -EINTR;
- goto unpin;
+ return sg;
+}
+
+static noinline struct sg_table *
+intel_rotate_pages(struct intel_rotation_info *rot_info,
+ struct drm_i915_gem_object *obj)
+{
+ unsigned int size = intel_rotation_info_size(rot_info);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ int ret = -ENOMEM;
+ int i;
+
+ /* Allocate target SG list. */
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, size, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ st->nents = 0;
+ sg = st->sgl;
+
+ for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
+ sg = rotate_pages(obj, rot_info->plane[i].offset,
+ rot_info->plane[i].width, rot_info->plane[i].height,
+ rot_info->plane[i].src_stride,
+ rot_info->plane[i].dst_stride,
+ st, sg);
+
+ return st;
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+
+ drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+ obj->base.size, rot_info->plane[0].width,
+ rot_info->plane[0].height, size);
+
+ return ERR_PTR(ret);
+}
+
+static struct scatterlist *
+remap_pages(struct drm_i915_gem_object *obj,
+ unsigned int offset, unsigned int alignment_pad,
+ unsigned int width, unsigned int height,
+ unsigned int src_stride, unsigned int dst_stride,
+ struct sg_table *st, struct scatterlist *sg)
+{
+ unsigned int row;
+
+ if (!width || !height)
+ return sg;
+
+ if (alignment_pad) {
+ st->nents++;
+
+ /*
+ * The DE ignores the PTEs for the padding tiles, the sg entry
+ * here is just a convenience to indicate how many padding PTEs
+ * to insert at this spot.
+ */
+ sg_set_page(sg, NULL, alignment_pad * 4096, 0);
+ sg_dma_address(sg) = 0;
+ sg_dma_len(sg) = alignment_pad * 4096;
+ sg = sg_next(sg);
}
- if (!atomic_read(&vma->pages_count)) {
- err = vma->ops->set_pages(vma);
- if (err)
- goto unlock;
- pinned_pages = false;
+ for (row = 0; row < height; row++) {
+ unsigned int left = width * I915_GTT_PAGE_SIZE;
+
+ while (left) {
+ dma_addr_t addr;
+ unsigned int length;
+
+ /*
+ * We don't need the pages, but need to initialize
+ * the entries so the sg list can be happily traversed.
+ * The only thing we need are DMA addresses.
+ */
+
+ addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
+
+ length = min(left, length);
+
+ st->nents++;
+
+ sg_set_page(sg, NULL, length, 0);
+ sg_dma_address(sg) = addr;
+ sg_dma_len(sg) = length;
+ sg = sg_next(sg);
+
+ offset += length / I915_GTT_PAGE_SIZE;
+ left -= length;
+ }
+
+ offset += src_stride - width;
+
+ left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
+
+ if (!left)
+ continue;
+
+ st->nents++;
+
+ /*
+ * The DE ignores the PTEs for the padding tiles, the sg entry
+ * here is just a conenience to indicate how many padding PTEs
+ * to insert at this spot.
+ */
+ sg_set_page(sg, NULL, left, 0);
+ sg_dma_address(sg) = 0;
+ sg_dma_len(sg) = left;
+ sg = sg_next(sg);
}
+
+ return sg;
+}
+
+static noinline struct sg_table *
+intel_remap_pages(struct intel_remapped_info *rem_info,
+ struct drm_i915_gem_object *obj)
+{
+ unsigned int size = intel_remapped_info_size(rem_info);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ unsigned int gtt_offset = 0;
+ int ret = -ENOMEM;
+ int i;
+
+ /* Allocate target SG list. */
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, size, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ st->nents = 0;
+ sg = st->sgl;
+
+ for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
+ unsigned int alignment_pad = 0;
+
+ if (rem_info->plane_alignment)
+ alignment_pad = ALIGN(gtt_offset, rem_info->plane_alignment) - gtt_offset;
+
+ sg = remap_pages(obj,
+ rem_info->plane[i].offset, alignment_pad,
+ rem_info->plane[i].width, rem_info->plane[i].height,
+ rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride,
+ st, sg);
+
+ gtt_offset += alignment_pad +
+ rem_info->plane[i].dst_stride * rem_info->plane[i].height;
+ }
+
+ i915_sg_trim(st);
+
+ return st;
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+
+ drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+ obj->base.size, rem_info->plane[0].width,
+ rem_info->plane[0].height, size);
+
+ return ERR_PTR(ret);
+}
+
+static noinline struct sg_table *
+intel_partial_pages(const struct i915_ggtt_view *view,
+ struct drm_i915_gem_object *obj)
+{
+ struct sg_table *st;
+ struct scatterlist *sg, *iter;
+ unsigned int count = view->partial.size;
+ unsigned int offset;
+ int ret = -ENOMEM;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, count, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset);
+ GEM_BUG_ON(!iter);
+
+ sg = st->sgl;
+ st->nents = 0;
+ do {
+ unsigned int len;
+
+ len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
+ count << PAGE_SHIFT);
+ sg_set_page(sg, NULL, len, 0);
+ sg_dma_address(sg) =
+ sg_dma_address(iter) + (offset << PAGE_SHIFT);
+ sg_dma_len(sg) = len;
+
+ st->nents++;
+ count -= len >> PAGE_SHIFT;
+ if (count == 0) {
+ sg_mark_end(sg);
+ i915_sg_trim(st); /* Drop any unused tail entries. */
+
+ return st;
+ }
+
+ sg = __sg_next(sg);
+ iter = __sg_next(iter);
+ offset = 0;
+ } while (1);
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+ return ERR_PTR(ret);
+}
+
+static int
+__i915_vma_get_pages(struct i915_vma *vma)
+{
+ struct sg_table *pages;
+ int ret;
+
+ /*
+ * The vma->pages are only valid within the lifespan of the borrowed
+ * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
+ * must be the vma->pages. A simple rule is that vma->pages must only
+ * be accessed when the obj->mm.pages are pinned.
+ */
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
+
+ switch (vma->ggtt_view.type) {
+ default:
+ GEM_BUG_ON(vma->ggtt_view.type);
+ fallthrough;
+ case I915_GGTT_VIEW_NORMAL:
+ pages = vma->obj->mm.pages;
+ break;
+
+ case I915_GGTT_VIEW_ROTATED:
+ pages =
+ intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
+ break;
+
+ case I915_GGTT_VIEW_REMAPPED:
+ pages =
+ intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
+ break;
+
+ case I915_GGTT_VIEW_PARTIAL:
+ pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
+ break;
+ }
+
+ ret = 0;
+ if (IS_ERR(pages)) {
+ ret = PTR_ERR(pages);
+ pages = NULL;
+ drm_err(&vma->vm->i915->drm,
+ "Failed to get pages for VMA view type %u (%d)!\n",
+ vma->ggtt_view.type, ret);
+ }
+
+ vma->pages = pages;
+
+ return ret;
+}
+
+I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
+{
+ int err;
+
+ if (atomic_add_unless(&vma->pages_count, 1, 0))
+ return 0;
+
+ err = i915_gem_object_pin_pages(vma->obj);
+ if (err)
+ return err;
+
+ err = __i915_vma_get_pages(vma);
+ if (err)
+ goto err_unpin;
+
+ vma->page_sizes = vma->obj->mm.page_sizes;
atomic_inc(&vma->pages_count);
-unlock:
- mutex_unlock(&vma->pages_mutex);
-unpin:
- if (pinned_pages)
- __i915_gem_object_unpin_pages(vma->obj);
+ return 0;
+
+err_unpin:
+ __i915_gem_object_unpin_pages(vma->obj);
return err;
}
@@ -830,18 +1176,31 @@ unpin:
static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
{
/* We allocate under vma_get_pages, so beware the shrinker */
- mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
+ struct sg_table *pages = READ_ONCE(vma->pages);
+
GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
+
if (atomic_sub_return(count, &vma->pages_count) == 0) {
- vma->ops->clear_pages(vma);
- GEM_BUG_ON(vma->pages);
- if (vma->obj)
- i915_gem_object_unpin_pages(vma->obj);
+ /*
+ * The atomic_sub_return is a read barrier for the READ_ONCE of
+ * vma->pages above.
+ *
+ * READ_ONCE is safe because this is either called from the same
+ * function (i915_vma_pin_ww), or guarded by vma->vm->mutex.
+ *
+ * TODO: We're leaving vma->pages dangling, until vma->obj->resv
+ * lock is required.
+ */
+ if (pages != vma->obj->mm.pages) {
+ sg_free_table(pages);
+ kfree(pages);
+ }
+
+ i915_gem_object_unpin_pages(vma->obj);
}
- mutex_unlock(&vma->pages_mutex);
}
-static void vma_put_pages(struct i915_vma *vma)
+I915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma)
{
if (atomic_add_unless(&vma->pages_count, -1, 1))
return;
@@ -867,14 +1226,13 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
u64 size, u64 alignment, u64 flags)
{
struct i915_vma_work *work = NULL;
+ struct dma_fence *moving = NULL;
intel_wakeref_t wakeref = 0;
unsigned int bound;
int err;
-#ifdef CONFIG_PROVE_LOCKING
- if (debug_locks && !WARN_ON(!ww) && vma->resv)
- assert_vma_held(vma);
-#endif
+ assert_vma_held(vma);
+ GEM_BUG_ON(!ww);
BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
@@ -885,14 +1243,15 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
return 0;
- err = vma_get_pages(vma);
+ err = i915_vma_get_pages(vma);
if (err)
return err;
if (flags & PIN_GLOBAL)
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
- if (flags & vma->vm->bind_async_flags) {
+ moving = vma->obj ? i915_gem_object_get_moving_fence(vma->obj) : NULL;
+ if (flags & vma->vm->bind_async_flags || moving) {
/* lock VM */
err = i915_vm_lock_objects(vma->vm, ww);
if (err)
@@ -906,6 +1265,8 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
work->vm = i915_vm_get(vma->vm);
+ dma_fence_work_chain(&work->base, moving);
+
/* Allocate enough page directories to used PTE */
if (vma->vm->allocate_va_range) {
err = i915_vm_alloc_pt_stash(vma->vm,
@@ -980,7 +1341,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
GEM_BUG_ON(!vma->pages);
err = i915_vma_bind(vma,
- vma->obj ? vma->obj->cache_level : 0,
+ vma->obj->cache_level,
flags, work);
if (err)
goto err_remove;
@@ -1010,7 +1371,11 @@ err_fence:
err_rpm:
if (wakeref)
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
- vma_put_pages(vma);
+
+ if (moving)
+ dma_fence_put(moving);
+
+ i915_vma_put_pages(vma);
return err;
}
@@ -1025,23 +1390,15 @@ static void flush_idle_contexts(struct intel_gt *gt)
intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
}
-int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
- u32 align, unsigned int flags)
+static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
+ u32 align, unsigned int flags)
{
struct i915_address_space *vm = vma->vm;
int err;
- GEM_BUG_ON(!i915_vma_is_ggtt(vma));
-
-#ifdef CONFIG_LOCKDEP
- WARN_ON(!ww && vma->resv && dma_resv_held(vma->resv));
-#endif
-
do {
- if (ww)
- err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
- else
- err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
+ err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
+
if (err != -ENOSPC) {
if (!err) {
err = i915_vma_wait_for_bind(vma);
@@ -1060,6 +1417,30 @@ int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
} while (1);
}
+int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
+ u32 align, unsigned int flags)
+{
+ struct i915_gem_ww_ctx _ww;
+ int err;
+
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+
+ if (ww)
+ return __i915_ggtt_pin(vma, ww, align, flags);
+
+#ifdef CONFIG_LOCKDEP
+ WARN_ON(dma_resv_held(vma->obj->base.resv));
+#endif
+
+ for_i915_gem_ww(&_ww, err, true) {
+ err = i915_gem_object_lock(vma->obj, &_ww);
+ if (!err)
+ err = __i915_ggtt_pin(vma, &_ww, align, flags);
+ }
+
+ return err;
+}
+
static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
{
/*
@@ -1113,6 +1494,7 @@ void i915_vma_reopen(struct i915_vma *vma)
void i915_vma_release(struct kref *ref)
{
struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
+ struct drm_i915_gem_object *obj = vma->obj;
if (drm_mm_node_allocated(&vma->node)) {
mutex_lock(&vma->vm->mutex);
@@ -1123,15 +1505,11 @@ void i915_vma_release(struct kref *ref)
}
GEM_BUG_ON(i915_vma_is_active(vma));
- if (vma->obj) {
- struct drm_i915_gem_object *obj = vma->obj;
-
- spin_lock(&obj->vma.lock);
- list_del(&vma->obj_link);
- if (!RB_EMPTY_NODE(&vma->obj_node))
- rb_erase(&vma->obj_node, &obj->vma.tree);
- spin_unlock(&obj->vma.lock);
- }
+ spin_lock(&obj->vma.lock);
+ list_del(&vma->obj_link);
+ if (!RB_EMPTY_NODE(&vma->obj_node))
+ rb_erase(&vma->obj_node, &obj->vma.tree);
+ spin_unlock(&obj->vma.lock);
__i915_vma_remove_closed(vma);
i915_vm_put(vma->vm);
@@ -1217,7 +1595,7 @@ __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
return __i915_request_await_exclusive(rq, &vma->active);
}
-int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
+static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
{
int err;
@@ -1256,19 +1634,19 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
}
if (fence) {
- dma_resv_add_excl_fence(vma->resv, fence);
+ dma_resv_add_excl_fence(vma->obj->base.resv, fence);
obj->write_domain = I915_GEM_DOMAIN_RENDER;
obj->read_domains = 0;
}
} else {
if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
- err = dma_resv_reserve_shared(vma->resv, 1);
+ err = dma_resv_reserve_shared(vma->obj->base.resv, 1);
if (unlikely(err))
return err;
}
if (fence) {
- dma_resv_add_shared_fence(vma->resv, fence);
+ dma_resv_add_shared_fence(vma->obj->base.resv, fence);
obj->write_domain = 0;
}
}
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 648dbe744c96..32719431b3df 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -55,8 +55,6 @@ static inline bool i915_vma_is_active(const struct i915_vma *vma)
/* do not reserve memory to prevent deadlocks */
#define __EXEC_OBJECT_NO_RESERVE BIT(31)
-int __must_check __i915_vma_move_to_active(struct i915_vma *vma,
- struct i915_request *rq);
int __must_check _i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq,
struct dma_fence *fence,
@@ -234,16 +232,16 @@ static inline void __i915_vma_put(struct i915_vma *vma)
kref_put(&vma->ref, i915_vma_release);
}
-#define assert_vma_held(vma) dma_resv_assert_held((vma)->resv)
+#define assert_vma_held(vma) dma_resv_assert_held((vma)->obj->base.resv)
static inline void i915_vma_lock(struct i915_vma *vma)
{
- dma_resv_lock(vma->resv, NULL);
+ dma_resv_lock(vma->obj->base.resv, NULL);
}
static inline void i915_vma_unlock(struct i915_vma *vma)
{
- dma_resv_unlock(vma->resv);
+ dma_resv_unlock(vma->obj->base.resv);
}
int __must_check
@@ -418,9 +416,6 @@ static inline void i915_vma_clear_scanout(struct i915_vma *vma)
list_for_each_entry(V, &(OBJ)->vma.list, obj_link) \
for_each_until(!i915_vma_is_ggtt(V))
-struct i915_vma *i915_vma_alloc(void);
-void i915_vma_free(struct i915_vma *vma);
-
struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma);
void i915_vma_make_shrinkable(struct i915_vma *vma);
void i915_vma_make_purgeable(struct i915_vma *vma);
@@ -436,4 +431,7 @@ static inline int i915_vma_sync(struct i915_vma *vma)
void i915_vma_module_exit(void);
int i915_vma_module_init(void);
+I915_SELFTEST_DECLARE(int i915_vma_get_pages(struct i915_vma *vma));
+I915_SELFTEST_DECLARE(void i915_vma_put_pages(struct i915_vma *vma));
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_vma_snapshot.c b/drivers/gpu/drm/i915/i915_vma_snapshot.c
new file mode 100644
index 000000000000..2949ceea9884
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vma_snapshot.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "i915_vma_snapshot.h"
+#include "i915_vma_types.h"
+#include "i915_vma.h"
+
+/**
+ * i915_vma_snapshot_init - Initialize a struct i915_vma_snapshot from
+ * a struct i915_vma.
+ * @vsnap: The i915_vma_snapshot to init.
+ * @vma: A struct i915_vma used to initialize @vsnap.
+ * @name: Name associated with the snapshot. The character pointer needs to
+ * stay alive over the lifitime of the shapsot
+ */
+void i915_vma_snapshot_init(struct i915_vma_snapshot *vsnap,
+ struct i915_vma *vma,
+ const char *name)
+{
+ if (!i915_vma_is_pinned(vma))
+ assert_object_held(vma->obj);
+
+ vsnap->name = name;
+ vsnap->size = vma->size;
+ vsnap->obj_size = vma->obj->base.size;
+ vsnap->gtt_offset = vma->node.start;
+ vsnap->gtt_size = vma->node.size;
+ vsnap->page_sizes = vma->page_sizes.gtt;
+ vsnap->pages = vma->pages;
+ vsnap->pages_rsgt = NULL;
+ vsnap->mr = NULL;
+ if (vma->obj->mm.rsgt)
+ vsnap->pages_rsgt = i915_refct_sgt_get(vma->obj->mm.rsgt);
+ vsnap->mr = vma->obj->mm.region;
+ kref_init(&vsnap->kref);
+ vsnap->vma_resource = &vma->active;
+ vsnap->onstack = false;
+ vsnap->present = true;
+}
+
+/**
+ * i915_vma_snapshot_init_onstack - Initialize a struct i915_vma_snapshot from
+ * a struct i915_vma, but avoid kfreeing it on last put.
+ * @vsnap: The i915_vma_snapshot to init.
+ * @vma: A struct i915_vma used to initialize @vsnap.
+ * @name: Name associated with the snapshot. The character pointer needs to
+ * stay alive over the lifitime of the shapsot
+ */
+void i915_vma_snapshot_init_onstack(struct i915_vma_snapshot *vsnap,
+ struct i915_vma *vma,
+ const char *name)
+{
+ i915_vma_snapshot_init(vsnap, vma, name);
+ vsnap->onstack = true;
+}
+
+static void vma_snapshot_release(struct kref *ref)
+{
+ struct i915_vma_snapshot *vsnap =
+ container_of(ref, typeof(*vsnap), kref);
+
+ vsnap->present = false;
+ if (vsnap->pages_rsgt)
+ i915_refct_sgt_put(vsnap->pages_rsgt);
+ if (!vsnap->onstack)
+ kfree(vsnap);
+}
+
+/**
+ * i915_vma_snapshot_put - Put an i915_vma_snapshot pointer reference
+ * @vsnap: The pointer reference
+ */
+void i915_vma_snapshot_put(struct i915_vma_snapshot *vsnap)
+{
+ kref_put(&vsnap->kref, vma_snapshot_release);
+}
+
+/**
+ * i915_vma_snapshot_put_onstack - Put an onstcak i915_vma_snapshot pointer
+ * reference and varify that the structure is released
+ * @vsnap: The pointer reference
+ *
+ * This function is intended to be paired with a i915_vma_init_onstack()
+ * and should be called before exiting the scope that declared or
+ * freeing the structure that embedded @vsnap to verify that all references
+ * have been released.
+ */
+void i915_vma_snapshot_put_onstack(struct i915_vma_snapshot *vsnap)
+{
+ if (!kref_put(&vsnap->kref, vma_snapshot_release))
+ GEM_BUG_ON(1);
+}
+
+/**
+ * i915_vma_snapshot_resource_pin - Temporarily block the memory the
+ * vma snapshot is pointing to from being released.
+ * @vsnap: The vma snapshot.
+ * @lockdep_cookie: Pointer to bool needed for lockdep support. This needs
+ * to be passed to the paired i915_vma_snapshot_resource_unpin.
+ *
+ * This function will temporarily try to hold up a fence or similar structure
+ * and will therefore enter a fence signaling critical section.
+ *
+ * Return: true if we succeeded in blocking the memory from being released,
+ * false otherwise.
+ */
+bool i915_vma_snapshot_resource_pin(struct i915_vma_snapshot *vsnap,
+ bool *lockdep_cookie)
+{
+ bool pinned = i915_active_acquire_if_busy(vsnap->vma_resource);
+
+ if (pinned)
+ *lockdep_cookie = dma_fence_begin_signalling();
+
+ return pinned;
+}
+
+/**
+ * i915_vma_snapshot_resource_unpin - Unblock vma snapshot memory from
+ * being released.
+ * @vsnap: The vma snapshot.
+ * @lockdep_cookie: Cookie returned from matching i915_vma_resource_pin().
+ *
+ * Might leave a fence signalling critical section and signal a fence.
+ */
+void i915_vma_snapshot_resource_unpin(struct i915_vma_snapshot *vsnap,
+ bool lockdep_cookie)
+{
+ dma_fence_end_signalling(lockdep_cookie);
+
+ return i915_active_release(vsnap->vma_resource);
+}
diff --git a/drivers/gpu/drm/i915/i915_vma_snapshot.h b/drivers/gpu/drm/i915/i915_vma_snapshot.h
new file mode 100644
index 000000000000..940581df4622
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vma_snapshot.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+#ifndef _I915_VMA_SNAPSHOT_H_
+#define _I915_VMA_SNAPSHOT_H_
+
+#include <linux/kref.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+struct i915_active;
+struct i915_refct_sgt;
+struct i915_vma;
+struct intel_memory_region;
+struct sg_table;
+
+/**
+ * DOC: Simple utilities for snapshotting GPU vma metadata, later used for
+ * error capture. Vi use a separate header for this to avoid issues due to
+ * recursive header includes.
+ */
+
+/**
+ * struct i915_vma_snapshot - Snapshot of vma metadata.
+ * @size: The vma size in bytes.
+ * @obj_size: The size of the underlying object in bytes.
+ * @gtt_offset: The gtt offset the vma is bound to.
+ * @gtt_size: The size in bytes allocated for the vma in the GTT.
+ * @pages: The struct sg_table pointing to the pages bound.
+ * @pages_rsgt: The refcounted sg_table holding the reference for @pages if any.
+ * @mr: The memory region pointed for the pages bound.
+ * @kref: Reference for this structure.
+ * @vma_resource: FIXME: A means to keep the unbind fence from signaling.
+ * Temporarily while we have only sync unbinds, and still use the vma
+ * active, we use that. With async unbinding we need a signaling refcount
+ * for the unbind fence.
+ * @page_sizes: The vma GTT page sizes information.
+ * @onstack: Whether the structure shouldn't be freed on final put.
+ * @present: Whether the structure is present and initialized.
+ */
+struct i915_vma_snapshot {
+ const char *name;
+ size_t size;
+ size_t obj_size;
+ size_t gtt_offset;
+ size_t gtt_size;
+ struct sg_table *pages;
+ struct i915_refct_sgt *pages_rsgt;
+ struct intel_memory_region *mr;
+ struct kref kref;
+ struct i915_active *vma_resource;
+ u32 page_sizes;
+ bool onstack:1;
+ bool present:1;
+};
+
+void i915_vma_snapshot_init(struct i915_vma_snapshot *vsnap,
+ struct i915_vma *vma,
+ const char *name);
+
+void i915_vma_snapshot_init_onstack(struct i915_vma_snapshot *vsnap,
+ struct i915_vma *vma,
+ const char *name);
+
+void i915_vma_snapshot_put(struct i915_vma_snapshot *vsnap);
+
+void i915_vma_snapshot_put_onstack(struct i915_vma_snapshot *vsnap);
+
+bool i915_vma_snapshot_resource_pin(struct i915_vma_snapshot *vsnap,
+ bool *lockdep_cookie);
+
+void i915_vma_snapshot_resource_unpin(struct i915_vma_snapshot *vsnap,
+ bool lockdep_cookie);
+
+/**
+ * i915_vma_snapshot_alloc - Allocate a struct i915_vma_snapshot
+ * @gfp: Allocation mode.
+ *
+ * Return: A pointer to a struct i915_vma_snapshot if successful.
+ * NULL otherwise.
+ */
+static inline struct i915_vma_snapshot *i915_vma_snapshot_alloc(gfp_t gfp)
+{
+ return kmalloc(sizeof(struct i915_vma_snapshot), gfp);
+}
+
+/**
+ * i915_vma_snapshot_get - Take a reference on a struct i915_vma_snapshot
+ *
+ * Return: A pointer to a struct i915_vma_snapshot.
+ */
+static inline struct i915_vma_snapshot *
+i915_vma_snapshot_get(struct i915_vma_snapshot *vsnap)
+{
+ kref_get(&vsnap->kref);
+ return vsnap;
+}
+
+/**
+ * i915_vma_snapshot_present - Whether a struct i915_vma_snapshot is
+ * present and initialized.
+ *
+ * Return: true if present and initialized; false otherwise.
+ */
+static inline bool
+i915_vma_snapshot_present(const struct i915_vma_snapshot *vsnap)
+{
+ return vsnap && vsnap->present;
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index 80e93bf00f2e..ca575e129ced 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -97,11 +97,20 @@ enum i915_cache_level;
struct intel_remapped_plane_info {
/* in gtt pages */
- u32 offset;
- u16 width;
- u16 height;
- u16 src_stride;
- u16 dst_stride;
+ u32 offset:31;
+ u32 linear:1;
+ union {
+ /* in gtt pages for !linear */
+ struct {
+ u16 width;
+ u16 height;
+ u16 src_stride;
+ u16 dst_stride;
+ };
+
+ /* in gtt pages for linear */
+ u32 size;
+ };
} __packed;
struct intel_remapped_info {
@@ -178,7 +187,6 @@ struct i915_vma {
const struct i915_vma_ops *ops;
struct drm_i915_gem_object *obj;
- struct dma_resv *resv; /** Alias of obj->resv */
struct sg_table *pages;
void __iomem *iomap;
@@ -262,7 +270,6 @@ struct i915_vma {
#define I915_VMA_PAGES_BIAS 24
#define I915_VMA_PAGES_ACTIVE (BIT(24) | 1)
atomic_t pages_count; /* number of active binds to the pages */
- struct mutex pages_mutex; /* protect acquire/release of backing pages */
/**
* Support different GGTT views into the same object.
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 305facedd284..04fd266d70e2 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -83,33 +83,26 @@ const char *intel_platform_name(enum intel_platform platform)
return platform_names[platform];
}
-static const char *iommu_name(void)
-{
- const char *msg = "n/a";
-
-#ifdef CONFIG_INTEL_IOMMU
- msg = enableddisabled(intel_iommu_gfx_mapped);
-#endif
-
- return msg;
-}
-
void intel_device_info_print_static(const struct intel_device_info *info,
struct drm_printer *p)
{
- if (info->graphics_rel)
- drm_printf(p, "graphics version: %u.%02u\n", info->graphics_ver, info->graphics_rel);
+ if (info->graphics.rel)
+ drm_printf(p, "graphics version: %u.%02u\n", info->graphics.ver,
+ info->graphics.rel);
else
- drm_printf(p, "graphics version: %u\n", info->graphics_ver);
+ drm_printf(p, "graphics version: %u\n", info->graphics.ver);
- if (info->media_rel)
- drm_printf(p, "media version: %u.%02u\n", info->media_ver, info->media_rel);
+ if (info->media.rel)
+ drm_printf(p, "media version: %u.%02u\n", info->media.ver, info->media.rel);
else
- drm_printf(p, "media version: %u\n", info->media_ver);
+ drm_printf(p, "media version: %u\n", info->media.ver);
+
+ if (info->display.rel)
+ drm_printf(p, "display version: %u.%02u\n", info->display.ver, info->display.rel);
+ else
+ drm_printf(p, "display version: %u\n", info->display.ver);
- drm_printf(p, "display version: %u\n", info->display.ver);
drm_printf(p, "gt: %d\n", info->gt);
- drm_printf(p, "iommu: %s\n", iommu_name());
drm_printf(p, "memory-regions: %x\n", info->memory_regions);
drm_printf(p, "page-sizes: %x\n", info->page_sizes);
drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
@@ -177,6 +170,10 @@ static const u16 subplatform_portf_ids[] = {
INTEL_ICL_PORT_F_IDS(0),
};
+static const u16 subplatform_rpls_ids[] = {
+ INTEL_RPLS_IDS(0),
+};
+
static bool find_devid(u16 id, const u16 *p, unsigned int num)
{
for (; num; num--, p++) {
@@ -213,6 +210,9 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915)
} else if (find_devid(devid, subplatform_portf_ids,
ARRAY_SIZE(subplatform_portf_ids))) {
mask = BIT(INTEL_SUBPLATFORM_PORTF);
+ } else if (find_devid(devid, subplatform_rpls_ids,
+ ARRAY_SIZE(subplatform_rpls_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_RPL_S);
}
if (IS_TIGERLAKE(i915)) {
@@ -326,33 +326,33 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
drm_info(&dev_priv->drm,
"Display fused off, disabling\n");
- info->pipe_mask = 0;
- info->cpu_transcoder_mask = 0;
+ info->display.pipe_mask = 0;
+ info->display.cpu_transcoder_mask = 0;
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
drm_info(&dev_priv->drm, "PipeC fused off\n");
- info->pipe_mask &= ~BIT(PIPE_C);
- info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
+ info->display.pipe_mask &= ~BIT(PIPE_C);
+ info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
} else if (HAS_DISPLAY(dev_priv) && DISPLAY_VER(dev_priv) >= 9) {
u32 dfsm = intel_de_read(dev_priv, SKL_DFSM);
if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
- info->pipe_mask &= ~BIT(PIPE_A);
- info->cpu_transcoder_mask &= ~BIT(TRANSCODER_A);
+ info->display.pipe_mask &= ~BIT(PIPE_A);
+ info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_A);
}
if (dfsm & SKL_DFSM_PIPE_B_DISABLE) {
- info->pipe_mask &= ~BIT(PIPE_B);
- info->cpu_transcoder_mask &= ~BIT(TRANSCODER_B);
+ info->display.pipe_mask &= ~BIT(PIPE_B);
+ info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_B);
}
if (dfsm & SKL_DFSM_PIPE_C_DISABLE) {
- info->pipe_mask &= ~BIT(PIPE_C);
- info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
+ info->display.pipe_mask &= ~BIT(PIPE_C);
+ info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
if (DISPLAY_VER(dev_priv) >= 12 &&
(dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
- info->pipe_mask &= ~BIT(PIPE_D);
- info->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
+ info->display.pipe_mask &= ~BIT(PIPE_D);
+ info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
}
if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
@@ -369,7 +369,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->display.has_dsc = 0;
}
- if (GRAPHICS_VER(dev_priv) == 6 && intel_vtd_active()) {
+ if (GRAPHICS_VER(dev_priv) == 6 && intel_vtd_active(dev_priv)) {
drm_info(&dev_priv->drm,
"Disabling ppGTT for VT-d support\n");
info->ppgtt_type = INTEL_PPGTT_NONE;
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 8e6f48d1eb7b..78597d382445 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -110,6 +110,9 @@ enum intel_platform {
#define INTEL_SUBPLATFORM_G10 0
#define INTEL_SUBPLATFORM_G11 1
+/* ADL-S */
+#define INTEL_SUBPLATFORM_RPL_S 0
+
enum intel_ppgtt_type {
INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING,
@@ -123,6 +126,7 @@ enum intel_ppgtt_type {
func(is_dgfx); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
+ func(has_64k_pages); \
func(gpu_reset_clobbers_display); \
func(has_reset_engine); \
func(has_global_mocs); \
@@ -166,11 +170,14 @@ enum intel_ppgtt_type {
func(overlay_needs_physical); \
func(supports_tv);
+struct ip_version {
+ u8 ver;
+ u8 rel;
+};
+
struct intel_device_info {
- u8 graphics_ver;
- u8 graphics_rel;
- u8 media_ver;
- u8 media_rel;
+ struct ip_version graphics;
+ struct ip_version media;
intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */
@@ -189,17 +196,17 @@ struct intel_device_info {
u8 gt; /* GT number, 0 if undefined */
- u8 pipe_mask;
- u8 cpu_transcoder_mask;
-
- u8 abox_mask;
-
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
struct {
u8 ver;
+ u8 rel;
+
+ u8 pipe_mask;
+ u8 cpu_transcoder_mask;
+ u8 abox_mask;
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_DISPLAY_FOR_EACH_FLAG(DEFINE_FLAG);
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 4e70c1a9ef2e..cf6e98962d82 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -109,7 +109,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
return 0;
}
- if (intel_uc_wants_guc_submission(&dev_priv->gt.uc)) {
+ if (intel_uc_wants_guc_submission(&to_gt(dev_priv)->uc)) {
drm_err(&dev_priv->drm,
"i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
return -EIO;
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index e7f7e6627750..c70d7e286a51 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -3,6 +3,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/prandom.h>
+
#include "intel_memory_region.h"
#include "i915_drv.h"
#include "i915_ttm_buddy_manager.h"
@@ -29,6 +31,110 @@ static const struct {
},
};
+static int __iopagetest(struct intel_memory_region *mem,
+ u8 __iomem *va, int pagesize,
+ u8 value, resource_size_t offset,
+ const void *caller)
+{
+ int byte = prandom_u32_max(pagesize);
+ u8 result[3];
+
+ memset_io(va, value, pagesize); /* or GPF! */
+ wmb();
+
+ result[0] = ioread8(va);
+ result[1] = ioread8(va + byte);
+ result[2] = ioread8(va + pagesize - 1);
+ if (memchr_inv(result, value, sizeof(result))) {
+ dev_err(mem->i915->drm.dev,
+ "Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n",
+ &mem->region, &mem->io_start, &offset, caller,
+ value, result[0], result[1], result[2]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int iopagetest(struct intel_memory_region *mem,
+ resource_size_t offset,
+ const void *caller)
+{
+ const u8 val[] = { 0x0, 0xa5, 0xc3, 0xf0 };
+ void __iomem *va;
+ int err;
+ int i;
+
+ va = ioremap_wc(mem->io_start + offset, PAGE_SIZE);
+ if (!va) {
+ dev_err(mem->i915->drm.dev,
+ "Failed to ioremap memory region [%pa + %pa] for %ps\n",
+ &mem->io_start, &offset, caller);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ err = __iopagetest(mem, va, PAGE_SIZE, val[i], offset, caller);
+ if (err)
+ break;
+
+ err = __iopagetest(mem, va, PAGE_SIZE, ~val[i], offset, caller);
+ if (err)
+ break;
+ }
+
+ iounmap(va);
+ return err;
+}
+
+static resource_size_t random_page(resource_size_t last)
+{
+ /* Limited to low 44b (16TiB), but should suffice for a spot check */
+ return prandom_u32_max(last >> PAGE_SHIFT) << PAGE_SHIFT;
+}
+
+static int iomemtest(struct intel_memory_region *mem,
+ bool test_all,
+ const void *caller)
+{
+ resource_size_t last = resource_size(&mem->region) - PAGE_SIZE;
+ resource_size_t page;
+ int err;
+
+ /*
+ * Quick test to check read/write access to the iomap (backing store).
+ *
+ * Write a byte, read it back. If the iomapping fails, we expect
+ * a GPF preventing further execution. If the backing store does not
+ * exist, the read back will return garbage. We check a couple of pages,
+ * the first and last of the specified region to confirm the backing
+ * store + iomap does cover the entire memory region; and we check
+ * a random offset within as a quick spot check for bad memory.
+ */
+
+ if (test_all) {
+ for (page = 0; page <= last; page += PAGE_SIZE) {
+ err = iopagetest(mem, page, caller);
+ if (err)
+ return err;
+ }
+ } else {
+ err = iopagetest(mem, 0, caller);
+ if (err)
+ return err;
+
+ err = iopagetest(mem, last, caller);
+ if (err)
+ return err;
+
+ err = iopagetest(mem, random_page(last), caller);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
struct intel_memory_region *
intel_memory_region_lookup(struct drm_i915_private *i915,
u16 class, u16 instance)
@@ -90,6 +196,21 @@ void intel_memory_region_debug(struct intel_memory_region *mr,
&mr->total, &mr->avail);
}
+static int intel_memory_region_memtest(struct intel_memory_region *mem,
+ void *caller)
+{
+ struct drm_i915_private *i915 = mem->i915;
+ int err = 0;
+
+ if (!mem->io_start)
+ return 0;
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest)
+ err = iomemtest(mem, i915->params.memtest, caller);
+
+ return err;
+}
+
struct intel_memory_region *
intel_memory_region_create(struct drm_i915_private *i915,
resource_size_t start,
@@ -126,9 +247,15 @@ intel_memory_region_create(struct drm_i915_private *i915,
goto err_free;
}
- kref_init(&mem->kref);
+ err = intel_memory_region_memtest(mem, (void *)_RET_IP_);
+ if (err)
+ goto err_release;
+
return mem;
+err_release:
+ if (mem->ops->release)
+ mem->ops->release(mem);
err_free:
kfree(mem);
return ERR_PTR(err);
@@ -144,28 +271,17 @@ void intel_memory_region_set_name(struct intel_memory_region *mem,
va_end(ap);
}
-static void __intel_memory_region_destroy(struct kref *kref)
+void intel_memory_region_destroy(struct intel_memory_region *mem)
{
- struct intel_memory_region *mem =
- container_of(kref, typeof(*mem), kref);
+ int ret = 0;
if (mem->ops->release)
- mem->ops->release(mem);
+ ret = mem->ops->release(mem);
+ GEM_WARN_ON(!list_empty_careful(&mem->objects.list));
mutex_destroy(&mem->objects.lock);
- kfree(mem);
-}
-
-struct intel_memory_region *
-intel_memory_region_get(struct intel_memory_region *mem)
-{
- kref_get(&mem->kref);
- return mem;
-}
-
-void intel_memory_region_put(struct intel_memory_region *mem)
-{
- kref_put(&mem->kref, __intel_memory_region_destroy);
+ if (!ret)
+ kfree(mem);
}
/* Global memory region registration -- only slight layer inversions! */
@@ -234,7 +350,7 @@ void intel_memory_regions_driver_release(struct drm_i915_private *i915)
fetch_and_zero(&i915->mm.regions[i]);
if (region)
- intel_memory_region_put(region);
+ intel_memory_region_destroy(region);
}
}
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 3feae3353d33..5625c9c38993 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -6,7 +6,6 @@
#ifndef __INTEL_MEMORY_REGION_H__
#define __INTEL_MEMORY_REGION_H__
-#include <linux/kref.h>
#include <linux/ioport.h>
#include <linux/mutex.h>
#include <linux/io-mapping.h>
@@ -51,7 +50,7 @@ struct intel_memory_region_ops {
unsigned int flags;
int (*init)(struct intel_memory_region *mem);
- void (*release)(struct intel_memory_region *mem);
+ int (*release)(struct intel_memory_region *mem);
int (*init_object)(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
@@ -71,8 +70,6 @@ struct intel_memory_region {
/* For fake LMEM */
struct drm_mm_node fake_mappable;
- struct kref kref;
-
resource_size_t io_start;
resource_size_t min_page_size;
resource_size_t total;
@@ -110,9 +107,7 @@ intel_memory_region_create(struct drm_i915_private *i915,
u16 instance,
const struct intel_memory_region_ops *ops);
-struct intel_memory_region *
-intel_memory_region_get(struct intel_memory_region *mem);
-void intel_memory_region_put(struct intel_memory_region *mem);
+void intel_memory_region_destroy(struct intel_memory_region *mem);
int intel_memory_regions_hw_probe(struct drm_i915_private *i915);
void intel_memory_regions_driver_release(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index d1d4b97b86f5..da8f82c2342f 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -129,6 +129,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
return PCH_JSP;
case INTEL_PCH_ADP_DEVICE_ID_TYPE:
case INTEL_PCH_ADP2_DEVICE_ID_TYPE:
+ case INTEL_PCH_ADP3_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Alder Lake PCH\n");
drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv) &&
!IS_ALDERLAKE_P(dev_priv));
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
index 7c0d83d292dc..6bff77521094 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -57,6 +57,7 @@ enum intel_pch {
#define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880
#define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80
#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
+#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ecbb3d141632..434b1f8b7fe3 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -36,7 +36,9 @@
#include "display/intel_atomic_plane.h"
#include "display/intel_bw.h"
#include "display/intel_de.h"
+#include "display/intel_display_trace.h"
#include "display/intel_display_types.h"
+#include "display/intel_fb.h"
#include "display/intel_fbc.h"
#include "display/intel_sprite.h"
#include "display/skl_universal_plane.h"
@@ -46,7 +48,6 @@
#include "i915_drv.h"
#include "i915_fixed.h"
#include "i915_irq.h"
-#include "i915_trace.h"
#include "intel_pcode.h"
#include "intel_pm.h"
#include "vlv_sideband.h"
@@ -97,7 +98,7 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
* "Plane N strech max must be programmed to 11b (x1)
* when Async flips are enabled on that plane."
*/
- if (!IS_GEMINILAKE(dev_priv) && intel_vtd_active())
+ if (!IS_GEMINILAKE(dev_priv) && intel_vtd_active(dev_priv))
intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
SKL_PLANE1_STRETCH_MAX_MASK, SKL_PLANE1_STRETCH_MAX_X1);
}
@@ -160,7 +161,7 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
* Display WA #0883: bxt
*/
intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
- ILK_DPFC_DISABLE_DUMMY0);
+ DPFC_DISABLE_DUMMY0);
}
static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -988,7 +989,7 @@ static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
enum pipe pipe;
for_each_pipe(dev_priv, pipe)
- trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
+ trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
intel_uncore_write(&dev_priv->uncore, DSPFW1,
FW_WM(wm->sr.plane, SR) |
@@ -1020,7 +1021,7 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
- trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
+ trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
(wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
@@ -2335,6 +2336,20 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
#undef FW_WM
+static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
+ enum i9xx_plane_id i9xx_plane)
+{
+ struct intel_plane *plane;
+
+ for_each_intel_plane(&i915->drm, plane) {
+ if (plane->id == PLANE_PRIMARY &&
+ plane->i9xx_plane == i9xx_plane)
+ return intel_crtc_for_pipe(i915, plane->pipe);
+ }
+
+ return NULL;
+}
+
static void i9xx_update_wm(struct drm_i915_private *dev_priv)
{
const struct intel_watermark_params *wm_info;
@@ -2356,7 +2371,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
fifo_size = i830_get_fifo_size(dev_priv, PLANE_A);
else
fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A);
- crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A);
+ crtc = intel_crtc_for_plane(dev_priv, PLANE_A);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *pipe_mode =
&crtc->config->hw.pipe_mode;
@@ -2386,7 +2401,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
fifo_size = i830_get_fifo_size(dev_priv, PLANE_B);
else
fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B);
- crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B);
+ crtc = intel_crtc_for_plane(dev_priv, PLANE_B);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *pipe_mode =
&crtc->config->hw.pipe_mode;
@@ -3062,9 +3077,9 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
* The BIOS provided WM memory latency values are often
* inadequate for high resolution displays. Adjust them.
*/
- changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
- ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
- ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
+ changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12);
+ changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12);
+ changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
if (!changed)
return;
@@ -3368,13 +3383,8 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
}
/* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
- /*
- * FIXME this is racy. FBC might get enabled later.
- * What we should check here is whether FBC can be
- * enabled sometime later.
- */
- if (DISPLAY_VER(dev_priv) == 5 && !merged->fbc_wm_enabled &&
- intel_fbc_is_active(dev_priv)) {
+ if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
+ dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) {
for (level = 2; level <= max_level; level++) {
struct intel_wm_level *wm = &merged->wm[level];
@@ -5094,6 +5104,18 @@ skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
}
}
+static bool icl_need_wm1_wa(struct drm_i915_private *i915,
+ enum plane_id plane_id)
+{
+ /*
+ * Wa_1408961008:icl, ehl
+ * Wa_14012656716:tgl, adl
+ * Underruns with WM1+ disabled
+ */
+ return DISPLAY_VER(i915) == 11 ||
+ (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR);
+}
+
static int
skl_allocate_plane_ddb(struct intel_atomic_state *state,
struct intel_crtc *crtc)
@@ -5264,11 +5286,7 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
skl_check_nv12_wm_level(&wm->wm[level], &wm->uv_wm[level],
total[plane_id], uv_total[plane_id]);
- /*
- * Wa_1408961008:icl, ehl
- * Underruns with WM1+ disabled
- */
- if (DISPLAY_VER(dev_priv) == 11 &&
+ if (icl_need_wm1_wa(dev_priv, plane_id) &&
level == 1 && wm->wm[0].enable) {
wm->wm[level].blocks = wm->wm[0].blocks;
wm->wm[level].lines = wm->wm[0].lines;
@@ -6900,7 +6918,7 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
for_each_intel_plane(&dev_priv->drm, plane) {
struct intel_crtc *crtc =
- intel_get_crtc_for_pipe(dev_priv, plane->pipe);
+ intel_crtc_for_pipe(dev_priv, plane->pipe);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane_state *plane_state =
@@ -7056,7 +7074,7 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
for_each_intel_plane(&dev_priv->drm, plane) {
struct intel_crtc *crtc =
- intel_get_crtc_for_pipe(dev_priv, plane->pipe);
+ intel_crtc_for_pipe(dev_priv, plane->pipe);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane_state *plane_state =
@@ -7434,7 +7452,7 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* Wa_1409120013:icl,ehl */
intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN,
- ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+ DPFC_CHICKEN_COMP_DUMMY_PIXEL);
/*Wa_14010594013:icl, ehl */
intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
@@ -7443,11 +7461,11 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
{
- /* Wa_1409120013:tgl,rkl,adl-s,dg1 */
+ /* Wa_1409120013:tgl,rkl,adl-s,dg1,dg2 */
if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv) ||
- IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv))
+ IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv) || IS_DG2(dev_priv))
intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN,
- ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+ DPFC_CHICKEN_COMP_DUMMY_PIXEL);
/* Wa_1409825376:tgl (pre-prod)*/
if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
@@ -7473,11 +7491,34 @@ static void dg1_init_clock_gating(struct drm_i915_private *dev_priv)
gen12lp_init_clock_gating(dev_priv);
/* Wa_1409836686:dg1[a0] */
- if (IS_DG1_GT_STEP(dev_priv, STEP_A0, STEP_B0))
+ if (IS_DG1_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0))
intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
DPT_GATING_DIS);
}
+static void xehpsdv_init_clock_gating(struct drm_i915_private *dev_priv)
+{
+ /* Wa_22010146351:xehpsdv */
+ if (IS_XEHPSDV_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0))
+ intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS);
+}
+
+static void dg2_init_clock_gating(struct drm_i915_private *i915)
+{
+ /* Wa_22010954014:dg2_g10 */
+ if (IS_DG2_G10(i915))
+ intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0,
+ SGSI_SIDECLK_DIS);
+
+ /*
+ * Wa_14010733611:dg2_g10
+ * Wa_22010146351:dg2_g10
+ */
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0))
+ intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0,
+ SGR_DIS | SGGI_DIS);
+}
+
static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (!HAS_PCH_CNP(dev_priv))
@@ -7509,7 +7550,7 @@ static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
* Display WA #0873: cfl
*/
intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
- ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
+ DPFC_NUKE_ON_ANY_MODIFICATION);
}
static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -7521,12 +7562,12 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
FBC_LLC_FULLY_OPEN);
/* WaDisableSDEUnitClockGating:kbl */
- if (IS_KBL_GT_STEP(dev_priv, 0, STEP_C0))
+ if (IS_KBL_GRAPHICS_STEP(dev_priv, 0, STEP_C0))
intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/* WaDisableGamClockGating:kbl */
- if (IS_KBL_GT_STEP(dev_priv, 0, STEP_C0))
+ if (IS_KBL_GRAPHICS_STEP(dev_priv, 0, STEP_C0))
intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
@@ -7542,7 +7583,7 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
* Display WA #0873: kbl
*/
intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
- ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
+ DPFC_NUKE_ON_ANY_MODIFICATION);
}
static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -7569,14 +7610,14 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
* Display WA #0873: skl
*/
intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
- ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
+ DPFC_NUKE_ON_ANY_MODIFICATION);
/*
* WaFbcHighMemBwCorruptionAvoidance:skl
* Display WA #0883: skl
*/
intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
- ILK_DPFC_DISABLE_DUMMY0);
+ DPFC_DISABLE_DUMMY0);
}
static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -7888,6 +7929,8 @@ static const struct drm_i915_clock_gating_funcs platform##_clock_gating_funcs =
.init_clock_gating = platform##_init_clock_gating, \
}
+CG_FUNCS(dg2);
+CG_FUNCS(xehpsdv);
CG_FUNCS(adlp);
CG_FUNCS(dg1);
CG_FUNCS(gen12lp);
@@ -7924,7 +7967,11 @@ CG_FUNCS(nop);
*/
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_ALDERLAKE_P(dev_priv))
+ if (IS_DG2(dev_priv))
+ dev_priv->clock_gating_funcs = &dg2_clock_gating_funcs;
+ else if (IS_XEHPSDV(dev_priv))
+ dev_priv->clock_gating_funcs = &xehpsdv_clock_gating_funcs;
+ else if (IS_ALDERLAKE_P(dev_priv))
dev_priv->clock_gating_funcs = &adlp_clock_gating_funcs;
else if (IS_DG1(dev_priv))
dev_priv->clock_gating_funcs = &dg1_clock_gating_funcs;
diff --git a/drivers/gpu/drm/i915/intel_pm_types.h b/drivers/gpu/drm/i915/intel_pm_types.h
new file mode 100644
index 000000000000..211632f58751
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pm_types.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_PM_TYPES_H__
+#define __INTEL_PM_TYPES_H__
+
+#include <linux/types.h>
+
+#include "display/intel_display.h"
+
+enum intel_ddb_partitioning {
+ INTEL_DDB_PART_1_2,
+ INTEL_DDB_PART_5_6, /* IVB+ */
+};
+
+struct ilk_wm_values {
+ u32 wm_pipe[3];
+ u32 wm_lp[3];
+ u32 wm_lp_spr[3];
+ bool enable_fbc_wm;
+ enum intel_ddb_partitioning partitioning;
+};
+
+struct g4x_pipe_wm {
+ u16 plane[I915_MAX_PLANES];
+ u16 fbc;
+};
+
+struct g4x_sr_wm {
+ u16 plane;
+ u16 cursor;
+ u16 fbc;
+};
+
+struct vlv_wm_ddl_values {
+ u8 plane[I915_MAX_PLANES];
+};
+
+struct vlv_wm_values {
+ struct g4x_pipe_wm pipe[3];
+ struct g4x_sr_wm sr;
+ struct vlv_wm_ddl_values ddl[3];
+ u8 level;
+ bool cxsr;
+};
+
+struct g4x_wm_values {
+ struct g4x_pipe_wm pipe[2];
+ struct g4x_sr_wm sr;
+ struct g4x_sr_wm hpll;
+ bool cxsr;
+ bool hpll_en;
+ bool fbc_en;
+};
+
+struct skl_ddb_entry {
+ u16 start, end; /* in number of blocks, 'end' is exclusive */
+};
+
+static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry)
+{
+ return entry->end - entry->start;
+}
+
+static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
+ const struct skl_ddb_entry *e2)
+{
+ if (e1->start == e2->start && e1->end == e2->end)
+ return true;
+
+ return false;
+}
+
+#endif /* __INTEL_PM_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c
index 98c7339bf8ba..f2b888c16958 100644
--- a/drivers/gpu/drm/i915/intel_region_ttm.c
+++ b/drivers/gpu/drm/i915/intel_region_ttm.c
@@ -104,19 +104,50 @@ int intel_region_ttm_init(struct intel_memory_region *mem)
* memory region, and if it was registered with the TTM device,
* removes that registration.
*/
-void intel_region_ttm_fini(struct intel_memory_region *mem)
+int intel_region_ttm_fini(struct intel_memory_region *mem)
{
- int ret;
+ struct ttm_resource_manager *man = mem->region_private;
+ int ret = -EBUSY;
+ int count;
+
+ /*
+ * Put the region's move fences. This releases requests that
+ * may hold on to contexts and vms that may hold on to buffer
+ * objects placed in this region.
+ */
+ if (man)
+ ttm_resource_manager_cleanup(man);
+
+ /* Flush objects from region. */
+ for (count = 0; count < 10; ++count) {
+ i915_gem_flush_free_objects(mem->i915);
+
+ mutex_lock(&mem->objects.lock);
+ if (list_empty(&mem->objects.list))
+ ret = 0;
+ mutex_unlock(&mem->objects.lock);
+ if (!ret)
+ break;
+
+ msleep(20);
+ flush_delayed_work(&mem->i915->bdev.wq);
+ }
+
+ /* If we leaked objects, Don't free the region causing use after free */
+ if (ret || !man)
+ return ret;
ret = i915_ttm_buddy_man_fini(&mem->i915->bdev,
intel_region_to_ttm_type(mem));
GEM_WARN_ON(ret);
mem->region_private = NULL;
+
+ return ret;
}
/**
- * intel_region_ttm_resource_to_st - Convert an opaque TTM resource manager resource
- * to an sg_table.
+ * intel_region_ttm_resource_to_rsgt -
+ * Convert an opaque TTM resource manager resource to a refcounted sg_table.
* @mem: The memory region.
* @res: The resource manager resource obtained from the TTM resource manager.
*
@@ -126,17 +157,18 @@ void intel_region_ttm_fini(struct intel_memory_region *mem)
*
* Return: A malloced sg_table on success, an error pointer on failure.
*/
-struct sg_table *intel_region_ttm_resource_to_st(struct intel_memory_region *mem,
- struct ttm_resource *res)
+struct i915_refct_sgt *
+intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
+ struct ttm_resource *res)
{
if (mem->is_range_manager) {
struct ttm_range_mgr_node *range_node =
to_ttm_range_mgr_node(res);
- return i915_sg_from_mm_node(&range_node->mm_nodes[0],
- mem->region.start);
+ return i915_rsgt_from_mm_node(&range_node->mm_nodes[0],
+ mem->region.start);
} else {
- return i915_sg_from_buddy_resource(res, mem->region.start);
+ return i915_rsgt_from_buddy_resource(res, mem->region.start);
}
}
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.h b/drivers/gpu/drm/i915/intel_region_ttm.h
index 6f44075920f2..fdee5e7bd46c 100644
--- a/drivers/gpu/drm/i915/intel_region_ttm.h
+++ b/drivers/gpu/drm/i915/intel_region_ttm.h
@@ -20,10 +20,11 @@ void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv);
int intel_region_ttm_init(struct intel_memory_region *mem);
-void intel_region_ttm_fini(struct intel_memory_region *mem);
+int intel_region_ttm_fini(struct intel_memory_region *mem);
-struct sg_table *intel_region_ttm_resource_to_st(struct intel_memory_region *mem,
- struct ttm_resource *res);
+struct i915_refct_sgt *
+intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
+ struct ttm_resource *res);
void intel_region_ttm_resource_free(struct intel_memory_region *mem,
struct ttm_resource *res);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 0d85f3c5c526..53f1ccb78849 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -68,6 +68,9 @@ static noinline depot_stack_handle_t __save_depot_stack(void)
static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
spin_lock_init(&rpm->debug.lock);
+
+ if (rpm->available)
+ stack_depot_init();
}
static noinline depot_stack_handle_t
@@ -590,6 +593,9 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
pm_runtime_use_autosuspend(kdev);
}
+ /* Enable by default */
+ pm_runtime_allow(kdev);
+
/*
* The core calls the driver load handler with an RPM reference held.
* We drop that here and will reacquire it during unloading in
diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c
index 6cf967631395..a4b16b9e2e55 100644
--- a/drivers/gpu/drm/i915/intel_step.c
+++ b/drivers/gpu/drm/i915/intel_step.c
@@ -23,7 +23,8 @@
* use a macro to define these to make it easier to identify the platforms
* where the two steppings can deviate.
*/
-#define COMMON_STEP(x) .gt_step = STEP_##x, .display_step = STEP_##x
+#define COMMON_STEP(x) .graphics_step = STEP_##x, .display_step = STEP_##x, .media_step = STEP_##x
+#define COMMON_GT_MEDIA_STEP(x) .graphics_step = STEP_##x, .media_step = STEP_##x
static const struct intel_step_info skl_revids[] = {
[0x6] = { COMMON_STEP(G0) },
@@ -33,13 +34,13 @@ static const struct intel_step_info skl_revids[] = {
};
static const struct intel_step_info kbl_revids[] = {
- [1] = { .gt_step = STEP_B0, .display_step = STEP_B0 },
- [2] = { .gt_step = STEP_C0, .display_step = STEP_B0 },
- [3] = { .gt_step = STEP_D0, .display_step = STEP_B0 },
- [4] = { .gt_step = STEP_F0, .display_step = STEP_C0 },
- [5] = { .gt_step = STEP_C0, .display_step = STEP_B1 },
- [6] = { .gt_step = STEP_D1, .display_step = STEP_B1 },
- [7] = { .gt_step = STEP_G0, .display_step = STEP_C0 },
+ [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 },
+ [2] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B0 },
+ [3] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_B0 },
+ [4] = { COMMON_GT_MEDIA_STEP(F0), .display_step = STEP_C0 },
+ [5] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B1 },
+ [6] = { COMMON_GT_MEDIA_STEP(D1), .display_step = STEP_B1 },
+ [7] = { COMMON_GT_MEDIA_STEP(G0), .display_step = STEP_C0 },
};
static const struct intel_step_info bxt_revids[] = {
@@ -63,16 +64,16 @@ static const struct intel_step_info jsl_ehl_revids[] = {
};
static const struct intel_step_info tgl_uy_revids[] = {
- [0] = { .gt_step = STEP_A0, .display_step = STEP_A0 },
- [1] = { .gt_step = STEP_B0, .display_step = STEP_C0 },
- [2] = { .gt_step = STEP_B1, .display_step = STEP_C0 },
- [3] = { .gt_step = STEP_C0, .display_step = STEP_D0 },
+ [0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 },
+ [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_C0 },
+ [2] = { COMMON_GT_MEDIA_STEP(B1), .display_step = STEP_C0 },
+ [3] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_D0 },
};
/* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */
static const struct intel_step_info tgl_revids[] = {
- [0] = { .gt_step = STEP_A0, .display_step = STEP_B0 },
- [1] = { .gt_step = STEP_B0, .display_step = STEP_D0 },
+ [0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_B0 },
+ [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_D0 },
};
static const struct intel_step_info rkl_revids[] = {
@@ -87,38 +88,38 @@ static const struct intel_step_info dg1_revids[] = {
};
static const struct intel_step_info adls_revids[] = {
- [0x0] = { .gt_step = STEP_A0, .display_step = STEP_A0 },
- [0x1] = { .gt_step = STEP_A0, .display_step = STEP_A2 },
- [0x4] = { .gt_step = STEP_B0, .display_step = STEP_B0 },
- [0x8] = { .gt_step = STEP_C0, .display_step = STEP_B0 },
- [0xC] = { .gt_step = STEP_D0, .display_step = STEP_C0 },
+ [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 },
+ [0x1] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A2 },
+ [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 },
+ [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B0 },
+ [0xC] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_C0 },
};
static const struct intel_step_info adlp_revids[] = {
- [0x0] = { .gt_step = STEP_A0, .display_step = STEP_A0 },
- [0x4] = { .gt_step = STEP_B0, .display_step = STEP_B0 },
- [0x8] = { .gt_step = STEP_C0, .display_step = STEP_C0 },
- [0xC] = { .gt_step = STEP_C0, .display_step = STEP_D0 },
+ [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 },
+ [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 },
+ [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_C0 },
+ [0xC] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_D0 },
};
static const struct intel_step_info xehpsdv_revids[] = {
- [0x0] = { .gt_step = STEP_A0 },
- [0x1] = { .gt_step = STEP_A1 },
- [0x4] = { .gt_step = STEP_B0 },
- [0x8] = { .gt_step = STEP_C0 },
+ [0x0] = { COMMON_GT_MEDIA_STEP(A0) },
+ [0x1] = { COMMON_GT_MEDIA_STEP(A1) },
+ [0x4] = { COMMON_GT_MEDIA_STEP(B0) },
+ [0x8] = { COMMON_GT_MEDIA_STEP(C0) },
};
static const struct intel_step_info dg2_g10_revid_step_tbl[] = {
- [0x0] = { .gt_step = STEP_A0, .display_step = STEP_A0 },
- [0x1] = { .gt_step = STEP_A1, .display_step = STEP_A0 },
- [0x4] = { .gt_step = STEP_B0, .display_step = STEP_B0 },
- [0x8] = { .gt_step = STEP_C0, .display_step = STEP_C0 },
+ [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 },
+ [0x1] = { COMMON_GT_MEDIA_STEP(A1), .display_step = STEP_A0 },
+ [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 },
+ [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_C0 },
};
static const struct intel_step_info dg2_g11_revid_step_tbl[] = {
- [0x0] = { .gt_step = STEP_A0, .display_step = STEP_B0 },
- [0x4] = { .gt_step = STEP_B0, .display_step = STEP_C0 },
- [0x5] = { .gt_step = STEP_B1, .display_step = STEP_C0 },
+ [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_B0 },
+ [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_C0 },
+ [0x5] = { COMMON_GT_MEDIA_STEP(B1), .display_step = STEP_C0 },
};
void intel_step_init(struct drm_i915_private *i915)
@@ -179,7 +180,7 @@ void intel_step_init(struct drm_i915_private *i915)
if (!revids)
return;
- if (revid < size && revids[revid].gt_step != STEP_NONE) {
+ if (revid < size && revids[revid].graphics_step != STEP_NONE) {
step = revids[revid];
} else {
drm_warn(&i915->drm, "Unknown revid 0x%02x\n", revid);
@@ -192,7 +193,7 @@ void intel_step_init(struct drm_i915_private *i915)
* steppings in the array are not monotonically increasing, but
* it's better than defaulting to 0.
*/
- while (revid < size && revids[revid].gt_step == STEP_NONE)
+ while (revid < size && revids[revid].graphics_step == STEP_NONE)
revid++;
if (revid < size) {
@@ -201,12 +202,12 @@ void intel_step_init(struct drm_i915_private *i915)
step = revids[revid];
} else {
drm_dbg(&i915->drm, "Using future steppings\n");
- step.gt_step = STEP_FUTURE;
+ step.graphics_step = STEP_FUTURE;
step.display_step = STEP_FUTURE;
}
}
- if (drm_WARN_ON(&i915->drm, step.gt_step == STEP_NONE))
+ if (drm_WARN_ON(&i915->drm, step.graphics_step == STEP_NONE))
return;
RUNTIME_INFO(i915)->step = step;
diff --git a/drivers/gpu/drm/i915/intel_step.h b/drivers/gpu/drm/i915/intel_step.h
index f6641e2a3c77..d71a99bd5179 100644
--- a/drivers/gpu/drm/i915/intel_step.h
+++ b/drivers/gpu/drm/i915/intel_step.h
@@ -11,8 +11,9 @@
struct drm_i915_private;
struct intel_step_info {
- u8 gt_step;
+ u8 graphics_step;
u8 display_step;
+ u8 media_step;
};
#define STEP_ENUM_VAL(name) STEP_##name,
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index e072054adac5..778da3179b3c 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -22,11 +22,11 @@
*/
#include <linux/pm_runtime.h>
-#include <asm/iosf_mbi.h>
#include "gt/intel_lrc_reg.h" /* for shadow reg list */
#include "i915_drv.h"
+#include "i915_iosf_mbi.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_pm.h"
@@ -724,7 +724,8 @@ void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
}
static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
- enum forcewake_domains fw_domains)
+ enum forcewake_domains fw_domains,
+ bool delayed)
{
struct intel_uncore_forcewake_domain *domain;
unsigned int tmp;
@@ -739,7 +740,11 @@ static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
continue;
}
- fw_domains_put(uncore, domain->mask);
+ if (delayed &&
+ !(domain->uncore->fw_domains_timer & domain->mask))
+ fw_domain_arm_timer(domain);
+ else
+ fw_domains_put(uncore, domain->mask);
}
}
@@ -760,7 +765,20 @@ void intel_uncore_forcewake_put(struct intel_uncore *uncore,
return;
spin_lock_irqsave(&uncore->lock, irqflags);
- __intel_uncore_forcewake_put(uncore, fw_domains);
+ __intel_uncore_forcewake_put(uncore, fw_domains, false);
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
+}
+
+void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
+ enum forcewake_domains fw_domains)
+{
+ unsigned long irqflags;
+
+ if (!uncore->fw_get_funcs)
+ return;
+
+ spin_lock_irqsave(&uncore->lock, irqflags);
+ __intel_uncore_forcewake_put(uncore, fw_domains, true);
spin_unlock_irqrestore(&uncore->lock, irqflags);
}
@@ -802,7 +820,7 @@ void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
if (!uncore->fw_get_funcs)
return;
- __intel_uncore_forcewake_put(uncore, fw_domains);
+ __intel_uncore_forcewake_put(uncore, fw_domains, false);
}
void assert_forcewakes_inactive(struct intel_uncore *uncore)
@@ -2020,7 +2038,7 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-static int uncore_mmio_setup(struct intel_uncore *uncore)
+int intel_uncore_setup_mmio(struct intel_uncore *uncore)
{
struct drm_i915_private *i915 = uncore->i915;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
@@ -2053,7 +2071,7 @@ static int uncore_mmio_setup(struct intel_uncore *uncore)
return 0;
}
-static void uncore_mmio_cleanup(struct intel_uncore *uncore)
+void intel_uncore_cleanup_mmio(struct intel_uncore *uncore)
{
struct pci_dev *pdev = to_pci_dev(uncore->i915->drm.dev);
@@ -2061,12 +2079,13 @@ static void uncore_mmio_cleanup(struct intel_uncore *uncore)
}
void intel_uncore_init_early(struct intel_uncore *uncore,
- struct drm_i915_private *i915)
+ struct intel_gt *gt)
{
spin_lock_init(&uncore->lock);
- uncore->i915 = i915;
- uncore->rpm = &i915->runtime_pm;
- uncore->debug = &i915->mmio_debug;
+ uncore->i915 = gt->i915;
+ uncore->gt = gt;
+ uncore->rpm = &gt->i915->runtime_pm;
+ uncore->debug = &gt->i915->mmio_debug;
}
static void uncore_raw_init(struct intel_uncore *uncore)
@@ -2146,10 +2165,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
struct drm_i915_private *i915 = uncore->i915;
int ret;
- ret = uncore_mmio_setup(uncore);
- if (ret)
- return ret;
-
/*
* The boot firmware initializes local memory and assesses its health.
* If memory training fails, the punit will have been instructed to
@@ -2170,7 +2185,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
} else {
ret = uncore_forcewake_init(uncore);
if (ret)
- goto out_mmio_cleanup;
+ return ret;
}
/* make sure fw funcs are set if and only if we have fw*/
@@ -2192,11 +2207,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
return 0;
-
-out_mmio_cleanup:
- uncore_mmio_cleanup(uncore);
-
- return ret;
}
/*
@@ -2261,8 +2271,6 @@ void intel_uncore_fini_mmio(struct intel_uncore *uncore)
intel_uncore_fw_domains_fini(uncore);
iosf_mbi_punit_release();
}
-
- uncore_mmio_cleanup(uncore);
}
static const struct reg_whitelist {
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 3248e4e2c540..2a15b2b2e2fc 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -129,6 +129,7 @@ struct intel_uncore {
void __iomem *regs;
struct drm_i915_private *i915;
+ struct intel_gt *gt;
struct intel_runtime_pm *rpm;
spinlock_t lock; /** lock is also taken in irq contexts. */
@@ -217,12 +218,14 @@ u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
void
intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
void intel_uncore_init_early(struct intel_uncore *uncore,
- struct drm_i915_private *i915);
+ struct intel_gt *gt);
+int intel_uncore_setup_mmio(struct intel_uncore *uncore);
int intel_uncore_init_mmio(struct intel_uncore *uncore);
void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
struct intel_gt *gt);
bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
+void intel_uncore_cleanup_mmio(struct intel_uncore *uncore);
void intel_uncore_fini_mmio(struct intel_uncore *uncore);
void intel_uncore_suspend(struct intel_uncore *uncore);
void intel_uncore_resume_early(struct intel_uncore *uncore);
@@ -243,6 +246,8 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
enum forcewake_domains domains);
void intel_uncore_forcewake_put(struct intel_uncore *uncore,
enum forcewake_domains domains);
+void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
+ enum forcewake_domains domains);
void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
enum forcewake_domains fw_domains);
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index 5e511bb891f9..f06d21005106 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -220,7 +220,7 @@ static bool __wopcm_regs_locked(struct intel_uncore *uncore,
void intel_wopcm_init(struct intel_wopcm *wopcm)
{
struct drm_i915_private *i915 = wopcm_to_i915(wopcm);
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
u32 guc_fw_size = intel_uc_fw_get_upload_size(&gt->uc.guc.fw);
u32 huc_fw_size = intel_uc_fw_get_upload_size(&gt->uc.huc.fw);
u32 ctx_rsvd = context_reserved_size(i915);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index e2314ad9546d..15311eaed848 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -44,6 +44,11 @@ struct intel_gt *pxp_to_gt(const struct intel_pxp *pxp)
return container_of(pxp, struct intel_gt, pxp);
}
+bool intel_pxp_is_enabled(const struct intel_pxp *pxp)
+{
+ return pxp->ce;
+}
+
bool intel_pxp_is_active(const struct intel_pxp *pxp)
{
return pxp->arb_is_valid;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h
index aa262258d4d4..73847e535cab 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h
@@ -6,17 +6,15 @@
#ifndef __INTEL_PXP_H__
#define __INTEL_PXP_H__
-#include "intel_pxp_types.h"
+#include <linux/errno.h>
+#include <linux/types.h>
+struct intel_pxp;
struct drm_i915_gem_object;
-static inline bool intel_pxp_is_enabled(const struct intel_pxp *pxp)
-{
- return pxp->ce;
-}
-
#ifdef CONFIG_DRM_I915_PXP
struct intel_gt *pxp_to_gt(const struct intel_pxp *pxp);
+bool intel_pxp_is_enabled(const struct intel_pxp *pxp);
bool intel_pxp_is_active(const struct intel_pxp *pxp);
void intel_pxp_init(struct intel_pxp *pxp);
@@ -48,6 +46,11 @@ static inline int intel_pxp_start(struct intel_pxp *pxp)
return -ENODEV;
}
+static inline bool intel_pxp_is_enabled(const struct intel_pxp *pxp)
+{
+ return false;
+}
+
static inline bool intel_pxp_is_active(const struct intel_pxp *pxp)
{
return false;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
index d02732f04757..598840b73dfa 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
@@ -3,7 +3,8 @@
* Copyright(c) 2020, Intel Corporation. All rights reserved.
*/
-#include "drm/i915_drm.h"
+#include <drm/i915_drm.h>
+
#include "i915_drv.h"
#include "intel_pxp.h"
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
index 49508f31dcb7..4b6f5655fab5 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
@@ -4,8 +4,10 @@
*/
#include <linux/component.h>
-#include "drm/i915_pxp_tee_interface.h"
-#include "drm/i915_component.h"
+
+#include <drm/i915_pxp_tee_interface.h>
+#include <drm/i915_component.h>
+
#include "i915_drv.h"
#include "intel_pxp.h"
#include "intel_pxp_session.h"
@@ -14,7 +16,9 @@
static inline struct intel_pxp *i915_dev_to_pxp(struct device *i915_kdev)
{
- return &kdev_to_i915(i915_kdev)->gt.pxp;
+ struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
+
+ return &to_gt(i915)->pxp;
}
static int intel_pxp_tee_io_message(struct intel_pxp *pxp,
@@ -103,9 +107,12 @@ static int i915_pxp_tee_component_bind(struct device *i915_kdev,
static void i915_pxp_tee_component_unbind(struct device *i915_kdev,
struct device *tee_kdev, void *data)
{
+ struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev);
+ intel_wakeref_t wakeref;
- intel_pxp_fini_hw(pxp);
+ with_intel_runtime_pm_if_in_use(&i915->runtime_pm, wakeref)
+ intel_pxp_fini_hw(pxp);
mutex_lock(&pxp->tee_mutex);
pxp->pxp_component = NULL;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
index 73ef7d1754e1..7ce5f37ee12e 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
@@ -7,9 +7,7 @@
#define __INTEL_PXP_TYPES_H__
#include <linux/completion.h>
-#include <linux/list.h>
#include <linux/mutex.h>
-#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/workqueue.h>
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 61bf4560d8af..2dac9be1de58 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -254,7 +254,7 @@ int i915_active_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_active_barrier),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 152d9ab135b1..b5576888cd78 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -248,7 +248,7 @@ int i915_gem_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_gem_ww_ctx),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
return i915_live_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index f99bb0113726..75b709c26dd3 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -442,6 +442,7 @@ static int igt_evict_contexts(void *arg)
/* Overfill the GGTT with context objects and so try to evict one. */
for_each_engine(engine, gt, id) {
struct i915_sw_fence fence;
+ struct i915_request *last = NULL;
count = 0;
onstack_fence_init(&fence);
@@ -479,6 +480,9 @@ static int igt_evict_contexts(void *arg)
i915_request_add(rq);
count++;
+ if (last)
+ i915_request_put(last);
+ last = i915_request_get(rq);
err = 0;
} while(1);
onstack_fence_fini(&fence);
@@ -486,6 +490,21 @@ static int igt_evict_contexts(void *arg)
count, engine->name);
if (err)
break;
+ if (last) {
+ if (i915_request_wait(last, 0, HZ) < 0) {
+ err = -EIO;
+ i915_request_put(last);
+ pr_err("Failed waiting for last request (on %s)",
+ engine->name);
+ break;
+ }
+ i915_request_put(last);
+ }
+ err = intel_gt_wait_for_idle(engine->gt, HZ * 3);
+ if (err) {
+ pr_err("Failed to idle GT (on %s)", engine->name);
+ break;
+ }
}
mutex_lock(&ggtt->vm.mutex);
@@ -526,7 +545,7 @@ int i915_gem_evict_mock_selftests(void)
return -ENOMEM;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- err = i915_subtests(tests, &i915->gt);
+ err = i915_subtests(tests, to_gt(i915));
mock_destroy_device(i915);
return err;
@@ -538,8 +557,8 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_evict_contexts),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 46f4236039a9..575705c3bce9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -155,7 +155,7 @@ static int igt_ppgtt_alloc(void *arg)
if (!HAS_PPGTT(dev_priv))
return 0;
- ppgtt = i915_ppgtt_create(&dev_priv->gt, 0);
+ ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -1053,7 +1053,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
if (IS_ERR(file))
return PTR_ERR(file);
- ppgtt = i915_ppgtt_create(&dev_priv->gt, 0);
+ ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_free;
@@ -1275,7 +1275,7 @@ static void track_vma_bind(struct i915_vma *vma)
__i915_gem_object_pin_pages(obj);
- GEM_BUG_ON(vma->pages);
+ GEM_BUG_ON(atomic_read(&vma->pages_count));
atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
__i915_gem_object_pin_pages(obj);
vma->pages = obj->mm.pages;
@@ -1953,7 +1953,9 @@ static int igt_cs_tlb(void *arg)
goto end;
}
- err = vma->ops->set_pages(vma);
+ i915_gem_object_lock(bbe, NULL);
+ err = i915_vma_get_pages(vma);
+ i915_gem_object_unlock(bbe);
if (err)
goto end;
@@ -1994,7 +1996,7 @@ end_ww:
i915_request_put(rq);
}
- vma->ops->clear_pages(vma);
+ i915_vma_put_pages(vma);
err = context_sync(ce);
if (err) {
@@ -2009,7 +2011,9 @@ end_ww:
goto end;
}
- err = vma->ops->set_pages(vma);
+ i915_gem_object_lock(act, NULL);
+ err = i915_vma_get_pages(vma);
+ i915_gem_object_unlock(act);
if (err)
goto end;
@@ -2047,7 +2051,7 @@ end_ww:
}
end_spin(batch, count - 1);
- vma->ops->clear_pages(vma);
+ i915_vma_put_pages(vma);
err = context_sync(ce);
if (err) {
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index 9e9a6cb1d9e5..88db2e3d81d0 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -424,7 +424,7 @@ int i915_perf_live_selftests(struct drm_i915_private *i915)
if (!perf->metrics_kobj || !perf->ops.enable_metric_set)
return 0;
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
err = alloc_empty_config(&i915->perf);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index d67710d10615..92a859b34190 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -209,6 +209,10 @@ static int igt_request_rewind(void *arg)
int err = -EINVAL;
ctx[0] = mock_context(i915, "A");
+ if (!ctx[0]) {
+ err = -ENOMEM;
+ goto err_ctx_0;
+ }
ce = i915_gem_context_get_engine(ctx[0], RCS0);
GEM_BUG_ON(IS_ERR(ce));
@@ -223,6 +227,10 @@ static int igt_request_rewind(void *arg)
i915_request_add(request);
ctx[1] = mock_context(i915, "B");
+ if (!ctx[1]) {
+ err = -ENOMEM;
+ goto err_ctx_1;
+ }
ce = i915_gem_context_get_engine(ctx[1], RCS0);
GEM_BUG_ON(IS_ERR(ce));
@@ -261,9 +269,11 @@ err:
i915_request_put(vip);
err_context_1:
mock_context_close(ctx[1]);
+err_ctx_1:
i915_request_put(request);
err_context_0:
mock_context_close(ctx[0]);
+err_ctx_0:
mock_device_flush(i915);
return err;
}
@@ -831,7 +841,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- intel_gt_chipset_flush(&i915->gt);
+ intel_gt_chipset_flush(to_gt(i915));
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
@@ -972,7 +982,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
if (IS_ERR(obj))
return ERR_CAST(obj);
- vma = i915_vma_instance(obj, i915->gt.vm, NULL);
+ vma = i915_vma_instance(obj, to_gt(i915)->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -1004,7 +1014,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- intel_gt_chipset_flush(&i915->gt);
+ intel_gt_chipset_flush(to_gt(i915));
return vma;
@@ -1690,7 +1700,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_breadcrumbs_smoketest),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
return i915_subtests(tests, i915);
@@ -2805,7 +2815,7 @@ static int p_sync0(void *arg)
i915_request_add(rq);
err = 0;
- if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ if (i915_request_wait(rq, 0, HZ) < 0)
err = -ETIME;
i915_request_put(rq);
if (err)
@@ -2876,7 +2886,7 @@ static int p_sync1(void *arg)
i915_request_add(rq);
err = 0;
- if (prev && i915_request_wait(prev, 0, HZ / 5) < 0)
+ if (prev && i915_request_wait(prev, 0, HZ) < 0)
err = -ETIME;
i915_request_put(prev);
prev = rq;
@@ -3081,7 +3091,7 @@ int i915_request_perf_selftests(struct drm_i915_private *i915)
SUBTEST(perf_parallel_engines),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index 484759c9409c..2d6d7bd13c3c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -298,10 +298,10 @@ int __i915_live_setup(void *data)
struct drm_i915_private *i915 = data;
/* The selftests expect an idle system */
- if (intel_gt_pm_wait_for_idle(&i915->gt))
+ if (intel_gt_pm_wait_for_idle(to_gt(i915)))
return -EIO;
- return intel_gt_terminally_wedged(&i915->gt);
+ return intel_gt_terminally_wedged(to_gt(i915));
}
int __i915_live_teardown(int err, void *data)
diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
index cbf45d85cbff..daa985e5a19b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
@@ -28,7 +28,7 @@
#include "../i915_selftest.h"
-static int __i915_sw_fence_call
+static int
fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
switch (state) {
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index 1f10fe36619b..5c5809dfe9b2 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -691,7 +691,11 @@ static int igt_vma_rotate_remap(void *arg)
}
i915_vma_unpin(vma);
-
+ err = i915_vma_unbind(vma);
+ if (err) {
+ pr_err("Unbinding returned %i\n", err);
+ goto out_object;
+ }
cond_resched();
}
}
@@ -848,6 +852,11 @@ static int igt_vma_partial(void *arg)
i915_vma_unpin(vma);
nvma++;
+ err = i915_vma_unbind(vma);
+ if (err) {
+ pr_err("Unbinding returned %i\n", err);
+ goto out_object;
+ }
cond_resched();
}
@@ -882,6 +891,12 @@ static int igt_vma_partial(void *arg)
i915_vma_unpin(vma);
+ err = i915_vma_unbind(vma);
+ if (err) {
+ pr_err("Unbinding returned %i\n", err);
+ goto out_object;
+ }
+
count = 0;
list_for_each_entry(vma, &obj->vma.list, obj_link)
count++;
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index a6c71fca61aa..b84594601d30 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -14,7 +14,7 @@
int igt_flush_test(struct drm_i915_private *i915)
{
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
int ret = intel_gt_is_wedged(gt) ? -EIO : 0;
cond_resched();
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c
index 1c721542e277..72b58b66692a 100644
--- a/drivers/gpu/drm/i915/selftests/igt_live_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c
@@ -16,7 +16,7 @@ int igt_live_test_begin(struct igt_live_test *t,
const char *func,
const char *name)
{
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
@@ -57,7 +57,7 @@ int igt_live_test_end(struct igt_live_test *t)
return -EIO;
}
- for_each_engine(engine, &i915->gt, id) {
+ for_each_engine(engine, to_gt(i915), id) {
if (t->reset_engine[id] ==
i915_reset_engine_count(&i915->gpu_error, engine))
continue;
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c
index 9f8590b868a9..a2838c65f8a5 100644
--- a/drivers/gpu/drm/i915/selftests/igt_reset.c
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.c
@@ -36,7 +36,7 @@ void igt_global_reset_unlock(struct intel_gt *gt)
enum intel_engine_id id;
for_each_engine(engine, gt, id)
- clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
clear_bit(I915_RESET_BACKOFF, &gt->reset.flags);
wake_up_all(&gt->reset.queue);
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 418caae84759..8255561ff853 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -225,7 +225,7 @@ static int igt_mock_reserve(void *arg)
out_close:
close_objects(mem, &objects);
- intel_memory_region_put(mem);
+ intel_memory_region_destroy(mem);
out_free_order:
kfree(order);
return err;
@@ -439,7 +439,7 @@ static int igt_mock_splintered_region(void *arg)
out_close:
close_objects(mem, &objects);
out_put:
- intel_memory_region_put(mem);
+ intel_memory_region_destroy(mem);
return err;
}
@@ -507,7 +507,7 @@ static int igt_mock_max_segment(void *arg)
out_close:
close_objects(mem, &objects);
out_put:
- intel_memory_region_put(mem);
+ intel_memory_region_destroy(mem);
return err;
}
@@ -1196,7 +1196,7 @@ int intel_memory_region_mock_selftests(void)
err = i915_subtests(tests, mem);
- intel_memory_region_put(mem);
+ intel_memory_region_destroy(mem);
out_unref:
mock_destroy_device(i915);
return err;
@@ -1217,7 +1217,7 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
return 0;
}
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
return i915_live_subtests(tests, i915);
@@ -1229,7 +1229,7 @@ int intel_memory_region_perf_selftests(struct drm_i915_private *i915)
SUBTEST(perf_memcpy),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
return i915_live_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index bc8128170a99..cdd196783535 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -344,5 +344,5 @@ int intel_uncore_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_forcewake_domains),
};
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
index 080b90b63d16..bf2752cc1e0b 100644
--- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
@@ -26,7 +26,7 @@
/* Small library of different fence types useful for writing tests */
-static int __i915_sw_fence_call
+static int
nop_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
return NOTIFY_DONE;
@@ -41,12 +41,12 @@ void __onstack_fence_init(struct i915_sw_fence *fence,
__init_waitqueue_head(&fence->wait, name, key);
atomic_set(&fence->pending, 1);
fence->error = 0;
- fence->flags = (unsigned long)nop_fence_notify;
+ fence->fn = nop_fence_notify;
}
void onstack_fence_fini(struct i915_sw_fence *fence)
{
- if (!fence->flags)
+ if (!fence->fn)
return;
i915_sw_fence_commit(fence);
@@ -89,7 +89,7 @@ struct heap_fence {
};
};
-static int __i915_sw_fence_call
+static int
heap_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct heap_fence *h = container_of(fence, typeof(*h), fence);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 4f8180146888..8aa7b1d33865 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -45,7 +45,7 @@
void mock_device_flush(struct drm_i915_private *i915)
{
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -64,7 +64,7 @@ static void mock_device_release(struct drm_device *dev)
goto out;
mock_device_flush(i915);
- intel_gt_driver_remove(&i915->gt);
+ intel_gt_driver_remove(to_gt(i915));
i915_gem_drain_workqueue(i915);
i915_gem_drain_freed_objects(i915);
@@ -73,7 +73,7 @@ static void mock_device_release(struct drm_device *dev)
destroy_workqueue(i915->wq);
intel_region_ttm_device_fini(i915);
- intel_gt_driver_late_release(&i915->gt);
+ intel_gt_driver_late_release(to_gt(i915));
intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
@@ -165,7 +165,7 @@ struct drm_i915_private *mock_gem_device(void)
/* Using the global GTT may ask questions about KMS users, so prepare */
drm_mode_config_init(&i915->drm);
- mkwrite_device_info(i915)->graphics_ver = -1;
+ mkwrite_device_info(i915)->graphics.ver = -1;
mkwrite_device_info(i915)->page_sizes =
I915_GTT_PAGE_SIZE_4K |
@@ -175,12 +175,14 @@ struct drm_i915_private *mock_gem_device(void)
mkwrite_device_info(i915)->memory_regions = REGION_SMEM;
intel_memory_regions_hw_probe(i915);
- mock_uncore_init(&i915->uncore, i915);
+ spin_lock_init(&i915->gpu_error.lock);
i915_gem_init__mm(i915);
- intel_gt_init_early(&i915->gt, i915);
- atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */
- i915->gt.awake = -ENODEV;
+ intel_gt_init_early(to_gt(i915), i915);
+ __intel_gt_init_early(to_gt(i915), i915);
+ mock_uncore_init(&i915->uncore, i915);
+ atomic_inc(&to_gt(i915)->wakeref.count); /* disable; no hw support */
+ to_gt(i915)->awake = -ENODEV;
ret = intel_region_ttm_device_init(i915);
if (ret)
@@ -193,19 +195,19 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_contexts(i915);
mock_init_ggtt(i915, &i915->ggtt);
- i915->gt.vm = i915_vm_get(&i915->ggtt.vm);
+ to_gt(i915)->vm = i915_vm_get(&i915->ggtt.vm);
mkwrite_device_info(i915)->platform_engine_mask = BIT(0);
- i915->gt.info.engine_mask = BIT(0);
+ to_gt(i915)->info.engine_mask = BIT(0);
- i915->gt.engine[RCS0] = mock_engine(i915, "mock", RCS0);
- if (!i915->gt.engine[RCS0])
+ to_gt(i915)->engine[RCS0] = mock_engine(i915, "mock", RCS0);
+ if (!to_gt(i915)->engine[RCS0])
goto err_unlock;
- if (mock_engine_init(i915->gt.engine[RCS0]))
+ if (mock_engine_init(to_gt(i915)->engine[RCS0]))
goto err_context;
- __clear_bit(I915_WEDGED, &i915->gt.reset.flags);
+ __clear_bit(I915_WEDGED, &to_gt(i915)->reset.flags);
intel_engines_driver_register(i915);
i915->do_release = true;
@@ -214,13 +216,13 @@ struct drm_i915_private *mock_gem_device(void)
return i915;
err_context:
- intel_gt_driver_remove(&i915->gt);
+ intel_gt_driver_remove(to_gt(i915));
err_unlock:
destroy_workqueue(i915->wq);
err_drv:
intel_region_ttm_device_fini(i915);
err_ttm:
- intel_gt_driver_late_release(&i915->gt);
+ intel_gt_driver_late_release(to_gt(i915));
intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
mock_destroy_device(i915);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index cc047ec594f9..1802baf80a17 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -70,7 +70,7 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
if (!ppgtt)
return NULL;
- ppgtt->vm.gt = &i915->gt;
+ ppgtt->vm.gt = to_gt(i915);
ppgtt->vm.i915 = i915;
ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
ppgtt->vm.dma = i915->drm.dev;
@@ -78,6 +78,7 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
ppgtt->vm.clear_range = mock_clear_range;
ppgtt->vm.insert_page = mock_insert_page;
@@ -86,8 +87,6 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
ppgtt->vm.vma_ops.bind_vma = mock_bind_ppgtt;
ppgtt->vm.vma_ops.unbind_vma = mock_unbind_ppgtt;
- ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
- ppgtt->vm.vma_ops.clear_pages = clear_pages;
return ppgtt;
}
@@ -109,7 +108,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
{
memset(ggtt, 0, sizeof(*ggtt));
- ggtt->vm.gt = &i915->gt;
+ ggtt->vm.gt = to_gt(i915);
ggtt->vm.i915 = i915;
ggtt->vm.is_ggtt = true;
@@ -118,6 +117,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
ggtt->vm.total = 4096 * PAGE_SIZE;
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
ggtt->vm.clear_range = mock_clear_range;
ggtt->vm.insert_page = mock_insert_page;
@@ -126,11 +126,9 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.bind_vma = mock_bind_ggtt;
ggtt->vm.vma_ops.unbind_vma = mock_unbind_ggtt;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
- i915->gt.ggtt = ggtt;
+ to_gt(i915)->ggtt = ggtt;
}
void mock_fini_ggtt(struct i915_ggtt *ggtt)
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index 75793008c4ef..19bff8afcaaa 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -15,9 +15,9 @@
static void mock_region_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
+ i915_refct_sgt_put(obj->mm.rsgt);
+ obj->mm.rsgt = NULL;
intel_region_ttm_resource_free(obj->mm.region, obj->mm.res);
- sg_free_table(pages);
- kfree(pages);
}
static int mock_region_get_pages(struct drm_i915_gem_object *obj)
@@ -36,12 +36,14 @@ static int mock_region_get_pages(struct drm_i915_gem_object *obj)
if (IS_ERR(obj->mm.res))
return PTR_ERR(obj->mm.res);
- pages = intel_region_ttm_resource_to_st(obj->mm.region, obj->mm.res);
- if (IS_ERR(pages)) {
- err = PTR_ERR(pages);
+ obj->mm.rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region,
+ obj->mm.res);
+ if (IS_ERR(obj->mm.rsgt)) {
+ err = PTR_ERR(obj->mm.rsgt);
goto err_free_resource;
}
+ pages = &obj->mm.rsgt->table;
__i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));
return 0;
@@ -82,13 +84,16 @@ static int mock_object_init(struct intel_memory_region *mem,
return 0;
}
-static void mock_region_fini(struct intel_memory_region *mem)
+static int mock_region_fini(struct intel_memory_region *mem)
{
struct drm_i915_private *i915 = mem->i915;
int instance = mem->instance;
+ int ret;
- intel_region_ttm_fini(mem);
+ ret = intel_region_ttm_fini(mem);
ida_free(&i915->selftest.mock_region_instances, instance);
+
+ return ret;
}
static const struct intel_memory_region_ops mock_region_ops = {
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c
index ca57e4008701..f2d6be5e1230 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c
@@ -42,7 +42,7 @@ __nop_read(64)
void mock_uncore_init(struct intel_uncore *uncore,
struct drm_i915_private *i915)
{
- intel_uncore_init_early(uncore, i915);
+ intel_uncore_init_early(uncore, to_gt(i915));
ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, nop);
ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, nop);
diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_sideband.c
index 35380738a951..ed2ac5752ac4 100644
--- a/drivers/gpu/drm/i915/vlv_sideband.c
+++ b/drivers/gpu/drm/i915/vlv_sideband.c
@@ -3,9 +3,8 @@
* Copyright © 2013-2021 Intel Corporation
*/
-#include <asm/iosf_mbi.h>
-
#include "i915_drv.h"
+#include "i915_iosf_mbi.h"
#include "vlv_sideband.h"
/*
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index b5fa0e45a839..bb9738c7c825 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -4,7 +4,7 @@ config DRM_IMX
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select DRM_GEM_CMA_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_KMS_HELPER
depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST)
depends on IMX_IPUV3_CORE
help
diff --git a/drivers/gpu/drm/imx/dcss/Kconfig b/drivers/gpu/drm/imx/dcss/Kconfig
index 2b17a964ff05..7374f1952762 100644
--- a/drivers/gpu/drm/imx/dcss/Kconfig
+++ b/drivers/gpu/drm/imx/dcss/Kconfig
@@ -1,7 +1,7 @@
config DRM_IMX_DCSS
tristate "i.MX8MQ DCSS"
select IMX_IRQSTEER
- select DRM_KMS_CMA_HELPER
+ select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
depends on DRM && ARCH_MXC && ARM64
help
diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig
index 3b57f8be007c..001f59fb06d5 100644
--- a/drivers/gpu/drm/ingenic/Kconfig
+++ b/drivers/gpu/drm/ingenic/Kconfig
@@ -8,7 +8,6 @@ config DRM_INGENIC
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index a5df1c8d34cd..b4943a56be09 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -21,6 +21,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
@@ -41,6 +42,8 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
+#define HWDESC_PALETTE 2
+
struct ingenic_dma_hwdesc {
u32 next;
u32 addr;
@@ -49,9 +52,7 @@ struct ingenic_dma_hwdesc {
} __aligned(16);
struct ingenic_dma_hwdescs {
- struct ingenic_dma_hwdesc hwdesc_f0;
- struct ingenic_dma_hwdesc hwdesc_f1;
- struct ingenic_dma_hwdesc hwdesc_pal;
+ struct ingenic_dma_hwdesc hwdesc[3];
u16 palette[256] __aligned(16);
};
@@ -64,6 +65,11 @@ struct jz_soc_info {
unsigned int num_formats_f0, num_formats_f1;
};
+struct ingenic_drm_private_state {
+ struct drm_private_state base;
+ bool use_palette;
+};
+
struct ingenic_drm {
struct drm_device drm;
/*
@@ -99,8 +105,53 @@ struct ingenic_drm {
struct mutex clk_mutex;
bool update_clk_rate;
struct notifier_block clock_nb;
+
+ struct drm_private_obj private_obj;
};
+struct ingenic_drm_bridge {
+ struct drm_encoder encoder;
+ struct drm_bridge bridge, *next_bridge;
+
+ struct drm_bus_cfg bus_cfg;
+};
+
+static inline struct ingenic_drm_bridge *
+to_ingenic_drm_bridge(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct ingenic_drm_bridge, encoder);
+}
+
+static inline struct ingenic_drm_private_state *
+to_ingenic_drm_priv_state(struct drm_private_state *state)
+{
+ return container_of(state, struct ingenic_drm_private_state, base);
+}
+
+static struct ingenic_drm_private_state *
+ingenic_drm_get_priv_state(struct ingenic_drm *priv, struct drm_atomic_state *state)
+{
+ struct drm_private_state *priv_state;
+
+ priv_state = drm_atomic_get_private_obj_state(state, &priv->private_obj);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_ingenic_drm_priv_state(priv_state);
+}
+
+static struct ingenic_drm_private_state *
+ingenic_drm_get_new_priv_state(struct ingenic_drm *priv, struct drm_atomic_state *state)
+{
+ struct drm_private_state *priv_state;
+
+ priv_state = drm_atomic_get_new_private_obj_state(state, &priv->private_obj);
+ if (!priv_state)
+ return NULL;
+
+ return to_ingenic_drm_priv_state(priv_state);
+}
+
static bool ingenic_drm_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
@@ -141,6 +192,14 @@ static inline struct ingenic_drm *drm_nb_get_priv(struct notifier_block *nb)
return container_of(nb, struct ingenic_drm, clock_nb);
}
+static inline dma_addr_t dma_hwdesc_addr(const struct ingenic_drm *priv,
+ unsigned int idx)
+{
+ u32 offset = offsetof(struct ingenic_dma_hwdescs, hwdesc[idx]);
+
+ return priv->dma_hwdescs_phys + offset;
+}
+
static int ingenic_drm_update_pixclk(struct notifier_block *nb,
unsigned long action,
void *data)
@@ -163,9 +222,20 @@ static void ingenic_drm_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
+ struct ingenic_drm_private_state *priv_state;
+ unsigned int next_id;
+
+ priv_state = ingenic_drm_get_priv_state(priv, state);
+ if (WARN_ON(IS_ERR(priv_state)))
+ return;
regmap_write(priv->map, JZ_REG_LCD_STATE, 0);
+ /* Set addresses of our DMA descriptor chains */
+ next_id = priv_state->use_palette ? HWDESC_PALETTE : 0;
+ regmap_write(priv->map, JZ_REG_LCD_DA0, dma_hwdesc_addr(priv, next_id));
+ regmap_write(priv->map, JZ_REG_LCD_DA1, dma_hwdesc_addr(priv, 1));
+
regmap_update_bits(priv->map, JZ_REG_LCD_CTRL,
JZ_LCD_CTRL_ENABLE | JZ_LCD_CTRL_DISABLE,
JZ_LCD_CTRL_ENABLE);
@@ -369,6 +439,7 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct ingenic_drm *priv = drm_device_get_priv(plane->dev);
+ struct ingenic_drm_private_state *priv_state;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc;
int ret;
@@ -381,6 +452,10 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane,
if (WARN_ON(!crtc_state))
return -EINVAL;
+ priv_state = ingenic_drm_get_priv_state(priv, state);
+ if (IS_ERR(priv_state))
+ return PTR_ERR(priv_state);
+
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
@@ -399,6 +474,9 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane,
(new_plane_state->src_h >> 16) != new_plane_state->crtc_h))
return -EINVAL;
+ priv_state->use_palette = new_plane_state->fb &&
+ new_plane_state->fb->format->format == DRM_FORMAT_C8;
+
/*
* Require full modeset if enabling or disabling a plane, or changing
* its position, size or depth.
@@ -558,9 +636,10 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
struct ingenic_drm *priv = drm_device_get_priv(plane->dev);
struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state, plane);
struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state, plane);
+ unsigned int width, height, cpp, next_id, plane_id;
+ struct ingenic_drm_private_state *priv_state;
struct drm_crtc_state *crtc_state;
struct ingenic_dma_hwdesc *hwdesc;
- unsigned int width, height, cpp, offset;
dma_addr_t addr;
u32 fourcc;
@@ -569,32 +648,26 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
drm_fb_cma_sync_non_coherent(&priv->drm, oldstate, newstate);
crtc_state = newstate->crtc->state;
+ plane_id = !!(priv->soc_info->has_osd && plane != &priv->f0);
addr = drm_fb_cma_get_gem_addr(newstate->fb, newstate, 0);
width = newstate->src_w >> 16;
height = newstate->src_h >> 16;
cpp = newstate->fb->format->cpp[0];
- if (!priv->soc_info->has_osd || plane == &priv->f0)
- hwdesc = &priv->dma_hwdescs->hwdesc_f0;
- else
- hwdesc = &priv->dma_hwdescs->hwdesc_f1;
+ priv_state = ingenic_drm_get_new_priv_state(priv, state);
+ next_id = (priv_state && priv_state->use_palette) ? HWDESC_PALETTE : plane_id;
+ hwdesc = &priv->dma_hwdescs->hwdesc[plane_id];
hwdesc->addr = addr;
hwdesc->cmd = JZ_LCD_CMD_EOF_IRQ | (width * height * cpp / 4);
+ hwdesc->next = dma_hwdesc_addr(priv, next_id);
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
fourcc = newstate->fb->format->format;
ingenic_drm_plane_config(priv->dev, plane, fourcc);
- if (fourcc == DRM_FORMAT_C8)
- offset = offsetof(struct ingenic_dma_hwdescs, hwdesc_pal);
- else
- offset = offsetof(struct ingenic_dma_hwdescs, hwdesc_f0);
-
- priv->dma_hwdescs->hwdesc_f0.next = priv->dma_hwdescs_phys + offset;
-
crtc_state->color_mgmt_changed = fourcc == DRM_FORMAT_C8;
}
@@ -609,11 +682,10 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
{
struct ingenic_drm *priv = drm_device_get_priv(encoder->dev);
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
- struct drm_connector *conn = conn_state->connector;
- struct drm_display_info *info = &conn->display_info;
+ struct ingenic_drm_bridge *bridge = to_ingenic_drm_bridge(encoder);
unsigned int cfg, rgbcfg = 0;
- priv->panel_is_sharp = info->bus_flags & DRM_BUS_FLAG_SHARP_SIGNALS;
+ priv->panel_is_sharp = bridge->bus_cfg.flags & DRM_BUS_FLAG_SHARP_SIGNALS;
if (priv->panel_is_sharp) {
cfg = JZ_LCD_CFG_MODE_SPECIAL_TFT_1 | JZ_LCD_CFG_REV_POLARITY;
@@ -626,19 +698,19 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
cfg |= JZ_LCD_CFG_HSYNC_ACTIVE_LOW;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
cfg |= JZ_LCD_CFG_VSYNC_ACTIVE_LOW;
- if (info->bus_flags & DRM_BUS_FLAG_DE_LOW)
+ if (bridge->bus_cfg.flags & DRM_BUS_FLAG_DE_LOW)
cfg |= JZ_LCD_CFG_DE_ACTIVE_LOW;
- if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
+ if (bridge->bus_cfg.flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
cfg |= JZ_LCD_CFG_PCLK_FALLING_EDGE;
if (!priv->panel_is_sharp) {
- if (conn->connector_type == DRM_MODE_CONNECTOR_TV) {
+ if (conn_state->connector->connector_type == DRM_MODE_CONNECTOR_TV) {
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
cfg |= JZ_LCD_CFG_MODE_TV_OUT_I;
else
cfg |= JZ_LCD_CFG_MODE_TV_OUT_P;
} else {
- switch (*info->bus_formats) {
+ switch (bridge->bus_cfg.format) {
case MEDIA_BUS_FMT_RGB565_1X16:
cfg |= JZ_LCD_CFG_MODE_GENERIC_16BIT;
break;
@@ -664,20 +736,29 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
regmap_write(priv->map, JZ_REG_LCD_RGBC, rgbcfg);
}
-static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+static int ingenic_drm_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct ingenic_drm_bridge *ib = to_ingenic_drm_bridge(bridge->encoder);
+
+ return drm_bridge_attach(bridge->encoder, ib->next_bridge,
+ &ib->bridge, flags);
+}
+
+static int ingenic_drm_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
- struct drm_display_info *info = &conn_state->connector->display_info;
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct ingenic_drm_bridge *ib = to_ingenic_drm_bridge(bridge->encoder);
- if (info->num_bus_formats != 1)
- return -EINVAL;
+ ib->bus_cfg = bridge_state->output_bus_cfg;
if (conn_state->connector->connector_type == DRM_MODE_CONNECTOR_TV)
return 0;
- switch (*info->bus_formats) {
+ switch (bridge_state->output_bus_cfg.format) {
case MEDIA_BUS_FMT_RGB888_3X8:
case MEDIA_BUS_FMT_RGB888_3X8_DELTA:
/*
@@ -764,6 +845,28 @@ ingenic_drm_gem_create_object(struct drm_device *drm, size_t size)
return &obj->base;
}
+static struct drm_private_state *
+ingenic_drm_duplicate_state(struct drm_private_obj *obj)
+{
+ struct ingenic_drm_private_state *state = to_ingenic_drm_priv_state(obj->state);
+
+ state = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ return &state->base;
+}
+
+static void ingenic_drm_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct ingenic_drm_private_state *priv_state = to_ingenic_drm_priv_state(state);
+
+ kfree(priv_state);
+}
+
DEFINE_DRM_GEM_CMA_FOPS(ingenic_drm_fops);
static const struct drm_driver ingenic_drm_driver_data = {
@@ -819,8 +922,16 @@ static const struct drm_crtc_helper_funcs ingenic_drm_crtc_helper_funcs = {
};
static const struct drm_encoder_helper_funcs ingenic_drm_encoder_helper_funcs = {
- .atomic_mode_set = ingenic_drm_encoder_atomic_mode_set,
- .atomic_check = ingenic_drm_encoder_atomic_check,
+ .atomic_mode_set = ingenic_drm_encoder_atomic_mode_set,
+};
+
+static const struct drm_bridge_funcs ingenic_drm_bridge_funcs = {
+ .attach = ingenic_drm_bridge_attach,
+ .atomic_check = ingenic_drm_bridge_atomic_check,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_get_input_bus_fmts = drm_atomic_helper_bridge_propagate_bus_fmt,
};
static const struct drm_mode_config_funcs ingenic_drm_mode_config_funcs = {
@@ -834,6 +945,11 @@ static struct drm_mode_config_helper_funcs ingenic_drm_mode_config_helpers = {
.atomic_commit_tail = drm_atomic_helper_commit_tail,
};
+static const struct drm_private_state_funcs ingenic_drm_private_state_funcs = {
+ .atomic_duplicate_state = ingenic_drm_duplicate_state,
+ .atomic_destroy_state = ingenic_drm_destroy_state,
+};
+
static void ingenic_drm_unbind_all(void *d)
{
struct ingenic_drm *priv = d;
@@ -846,21 +962,57 @@ static void __maybe_unused ingenic_drm_release_rmem(void *d)
of_reserved_mem_device_release(d);
}
+static void ingenic_drm_configure_hwdesc(struct ingenic_drm *priv,
+ unsigned int hwdesc,
+ unsigned int next_hwdesc, u32 id)
+{
+ struct ingenic_dma_hwdesc *desc = &priv->dma_hwdescs->hwdesc[hwdesc];
+
+ desc->next = dma_hwdesc_addr(priv, next_hwdesc);
+ desc->id = id;
+}
+
+static void ingenic_drm_configure_hwdesc_palette(struct ingenic_drm *priv)
+{
+ struct ingenic_dma_hwdesc *desc;
+
+ ingenic_drm_configure_hwdesc(priv, HWDESC_PALETTE, 0, 0xc0);
+
+ desc = &priv->dma_hwdescs->hwdesc[HWDESC_PALETTE];
+ desc->addr = priv->dma_hwdescs_phys
+ + offsetof(struct ingenic_dma_hwdescs, palette);
+ desc->cmd = JZ_LCD_CMD_ENABLE_PAL
+ | (sizeof(priv->dma_hwdescs->palette) / 4);
+}
+
+static void ingenic_drm_configure_hwdesc_plane(struct ingenic_drm *priv,
+ unsigned int plane)
+{
+ ingenic_drm_configure_hwdesc(priv, plane, plane, 0xf0 | plane);
+}
+
+static void ingenic_drm_atomic_private_obj_fini(struct drm_device *drm, void *private_obj)
+{
+ drm_atomic_private_obj_fini(private_obj);
+}
+
static int ingenic_drm_bind(struct device *dev, bool has_components)
{
struct platform_device *pdev = to_platform_device(dev);
+ struct ingenic_drm_private_state *private_state;
const struct jz_soc_info *soc_info;
struct ingenic_drm *priv;
struct clk *parent_clk;
struct drm_plane *primary;
struct drm_bridge *bridge;
struct drm_panel *panel;
+ struct drm_connector *connector;
struct drm_encoder *encoder;
+ struct ingenic_drm_bridge *ib;
struct drm_device *drm;
void __iomem *base;
long parent_rate;
unsigned int i, clone_mask = 0;
- dma_addr_t dma_hwdesc_phys_f0, dma_hwdesc_phys_f1;
int ret, irq;
soc_info = of_device_get_match_data(dev);
@@ -942,27 +1094,14 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
if (!priv->dma_hwdescs)
return -ENOMEM;
-
/* Configure DMA hwdesc for foreground0 plane */
- dma_hwdesc_phys_f0 = priv->dma_hwdescs_phys
- + offsetof(struct ingenic_dma_hwdescs, hwdesc_f0);
- priv->dma_hwdescs->hwdesc_f0.next = dma_hwdesc_phys_f0;
- priv->dma_hwdescs->hwdesc_f0.id = 0xf0;
+ ingenic_drm_configure_hwdesc_plane(priv, 0);
/* Configure DMA hwdesc for foreground1 plane */
- dma_hwdesc_phys_f1 = priv->dma_hwdescs_phys
- + offsetof(struct ingenic_dma_hwdescs, hwdesc_f1);
- priv->dma_hwdescs->hwdesc_f1.next = dma_hwdesc_phys_f1;
- priv->dma_hwdescs->hwdesc_f1.id = 0xf1;
+ ingenic_drm_configure_hwdesc_plane(priv, 1);
/* Configure DMA hwdesc for palette */
- priv->dma_hwdescs->hwdesc_pal.next = priv->dma_hwdescs_phys
- + offsetof(struct ingenic_dma_hwdescs, hwdesc_f0);
- priv->dma_hwdescs->hwdesc_pal.id = 0xc0;
- priv->dma_hwdescs->hwdesc_pal.addr = priv->dma_hwdescs_phys
- + offsetof(struct ingenic_dma_hwdescs, palette);
- priv->dma_hwdescs->hwdesc_pal.cmd = JZ_LCD_CMD_ENABLE_PAL
- | (sizeof(priv->dma_hwdescs->palette) / 4);
+ ingenic_drm_configure_hwdesc_palette(priv);
primary = priv->soc_info->has_osd ? &priv->f1 : &priv->f0;
@@ -1046,20 +1185,36 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
bridge = devm_drm_panel_bridge_add_typed(dev, panel,
DRM_MODE_CONNECTOR_DPI);
- encoder = drmm_plain_encoder_alloc(drm, NULL, DRM_MODE_ENCODER_DPI, NULL);
- if (IS_ERR(encoder)) {
- ret = PTR_ERR(encoder);
+ ib = drmm_encoder_alloc(drm, struct ingenic_drm_bridge, encoder,
+ NULL, DRM_MODE_ENCODER_DPI, NULL);
+ if (IS_ERR(ib)) {
+ ret = PTR_ERR(ib);
dev_err(dev, "Failed to init encoder: %d\n", ret);
return ret;
}
- encoder->possible_crtcs = 1;
+ encoder = &ib->encoder;
+ encoder->possible_crtcs = drm_crtc_mask(&priv->crtc);
drm_encoder_helper_add(encoder, &ingenic_drm_encoder_helper_funcs);
- ret = drm_bridge_attach(encoder, bridge, NULL, 0);
- if (ret)
+ ib->bridge.funcs = &ingenic_drm_bridge_funcs;
+ ib->next_bridge = bridge;
+
+ ret = drm_bridge_attach(encoder, &ib->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ dev_err(dev, "Unable to attach bridge\n");
return ret;
+ }
+
+ connector = drm_bridge_connector_init(drm, encoder);
+ if (IS_ERR(connector)) {
+ dev_err(dev, "Unable to init connector\n");
+ return PTR_ERR(connector);
+ }
+
+ drm_connector_attach_encoder(connector, encoder);
}
drm_for_each_encoder(encoder, drm) {
@@ -1112,10 +1267,6 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
}
}
- /* Set address of our DMA descriptor chain */
- regmap_write(priv->map, JZ_REG_LCD_DA0, dma_hwdesc_phys_f0);
- regmap_write(priv->map, JZ_REG_LCD_DA1, dma_hwdesc_phys_f1);
-
/* Enable OSD if available */
if (soc_info->has_osd)
regmap_write(priv->map, JZ_REG_LCD_OSDC, JZ_LCD_OSDC_OSDEN);
@@ -1130,6 +1281,20 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
goto err_devclk_disable;
}
+ private_state = kzalloc(sizeof(*private_state), GFP_KERNEL);
+ if (!private_state) {
+ ret = -ENOMEM;
+ goto err_clk_notifier_unregister;
+ }
+
+ drm_atomic_private_obj_init(drm, &priv->private_obj, &private_state->base,
+ &ingenic_drm_private_state_funcs);
+
+ ret = drmm_add_action_or_reset(drm, ingenic_drm_atomic_private_obj_fini,
+ &priv->private_obj);
+ if (ret)
+ goto err_private_state_free;
+
ret = drm_dev_register(drm, 0);
if (ret) {
dev_err(dev, "Failed to register DRM driver\n");
@@ -1140,6 +1305,8 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
return 0;
+err_private_state_free:
+ kfree(private_state);
err_clk_notifier_unregister:
clk_notifier_unregister(parent_clk, &priv->clock_nb);
err_devclk_disable:
diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c
index aeb8a757d213..2737fc521e15 100644
--- a/drivers/gpu/drm/ingenic/ingenic-ipu.c
+++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c
@@ -45,6 +45,12 @@ struct soc_info {
unsigned int weight, unsigned int offset);
};
+struct ingenic_ipu_private_state {
+ struct drm_private_state base;
+
+ unsigned int num_w, num_h, denom_w, denom_h;
+};
+
struct ingenic_ipu {
struct drm_plane plane;
struct drm_device *drm;
@@ -54,12 +60,12 @@ struct ingenic_ipu {
const struct soc_info *soc_info;
bool clk_enabled;
- unsigned int num_w, num_h, denom_w, denom_h;
-
dma_addr_t addr_y, addr_u, addr_v;
struct drm_property *sharpness_prop;
unsigned int sharpness;
+
+ struct drm_private_obj private_obj;
};
/* Signed 15.16 fixed-point math (for bicubic scaling coefficients) */
@@ -73,6 +79,36 @@ static inline struct ingenic_ipu *plane_to_ingenic_ipu(struct drm_plane *plane)
return container_of(plane, struct ingenic_ipu, plane);
}
+static inline struct ingenic_ipu_private_state *
+to_ingenic_ipu_priv_state(struct drm_private_state *state)
+{
+ return container_of(state, struct ingenic_ipu_private_state, base);
+}
+
+static struct ingenic_ipu_private_state *
+ingenic_ipu_get_priv_state(struct ingenic_ipu *priv, struct drm_atomic_state *state)
+{
+ struct drm_private_state *priv_state;
+
+ priv_state = drm_atomic_get_private_obj_state(state, &priv->private_obj);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_ingenic_ipu_priv_state(priv_state);
+}
+
+static struct ingenic_ipu_private_state *
+ingenic_ipu_get_new_priv_state(struct ingenic_ipu *priv, struct drm_atomic_state *state)
+{
+ struct drm_private_state *priv_state;
+
+ priv_state = drm_atomic_get_new_private_obj_state(state, &priv->private_obj);
+ if (!priv_state)
+ return NULL;
+
+ return to_ingenic_ipu_priv_state(priv_state);
+}
+
/*
* Apply conventional cubic convolution kernel. Both parameters
* and return value are 15.16 signed fixed-point.
@@ -293,11 +329,16 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane,
const struct drm_format_info *finfo;
u32 ctrl, stride = 0, coef_index = 0, format = 0;
bool needs_modeset, upscaling_w, upscaling_h;
+ struct ingenic_ipu_private_state *ipu_state;
int err;
if (!newstate || !newstate->fb)
return;
+ ipu_state = ingenic_ipu_get_new_priv_state(ipu, state);
+ if (WARN_ON(!ipu_state))
+ return;
+
finfo = drm_format_info(newstate->fb->format->format);
if (!ipu->clk_enabled) {
@@ -470,27 +511,27 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane,
if (ipu->soc_info->has_bicubic)
ctrl |= JZ_IPU_CTRL_ZOOM_SEL;
- upscaling_w = ipu->num_w > ipu->denom_w;
+ upscaling_w = ipu_state->num_w > ipu_state->denom_w;
if (upscaling_w)
ctrl |= JZ_IPU_CTRL_HSCALE;
- if (ipu->num_w != 1 || ipu->denom_w != 1) {
+ if (ipu_state->num_w != 1 || ipu_state->denom_w != 1) {
if (!ipu->soc_info->has_bicubic && !upscaling_w)
- coef_index |= (ipu->denom_w - 1) << 16;
+ coef_index |= (ipu_state->denom_w - 1) << 16;
else
- coef_index |= (ipu->num_w - 1) << 16;
+ coef_index |= (ipu_state->num_w - 1) << 16;
ctrl |= JZ_IPU_CTRL_HRSZ_EN;
}
- upscaling_h = ipu->num_h > ipu->denom_h;
+ upscaling_h = ipu_state->num_h > ipu_state->denom_h;
if (upscaling_h)
ctrl |= JZ_IPU_CTRL_VSCALE;
- if (ipu->num_h != 1 || ipu->denom_h != 1) {
+ if (ipu_state->num_h != 1 || ipu_state->denom_h != 1) {
if (!ipu->soc_info->has_bicubic && !upscaling_h)
- coef_index |= ipu->denom_h - 1;
+ coef_index |= ipu_state->denom_h - 1;
else
- coef_index |= ipu->num_h - 1;
+ coef_index |= ipu_state->num_h - 1;
ctrl |= JZ_IPU_CTRL_VRSZ_EN;
}
@@ -501,13 +542,13 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane,
/* Set the LUT index register */
regmap_write(ipu->map, JZ_REG_IPU_RSZ_COEF_INDEX, coef_index);
- if (ipu->num_w != 1 || ipu->denom_w != 1)
+ if (ipu_state->num_w != 1 || ipu_state->denom_w != 1)
ingenic_ipu_set_coefs(ipu, JZ_REG_IPU_HRSZ_COEF_LUT,
- ipu->num_w, ipu->denom_w);
+ ipu_state->num_w, ipu_state->denom_w);
- if (ipu->num_h != 1 || ipu->denom_h != 1)
+ if (ipu_state->num_h != 1 || ipu_state->denom_h != 1)
ingenic_ipu_set_coefs(ipu, JZ_REG_IPU_VRSZ_COEF_LUT,
- ipu->num_h, ipu->denom_h);
+ ipu_state->num_h, ipu_state->denom_h);
/* Clear STATUS register */
regmap_write(ipu->map, JZ_REG_IPU_STATUS, 0);
@@ -519,7 +560,8 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane,
dev_dbg(ipu->dev, "Scaling %ux%u to %ux%u (%u:%u horiz, %u:%u vert)\n",
newstate->src_w >> 16, newstate->src_h >> 16,
newstate->crtc_w, newstate->crtc_h,
- ipu->num_w, ipu->denom_w, ipu->num_h, ipu->denom_h);
+ ipu_state->num_w, ipu_state->denom_w,
+ ipu_state->num_h, ipu_state->denom_h);
}
static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane,
@@ -533,6 +575,7 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane,
struct ingenic_ipu *ipu = plane_to_ingenic_ipu(plane);
struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc;
struct drm_crtc_state *crtc_state;
+ struct ingenic_ipu_private_state *ipu_state;
if (!crtc)
return 0;
@@ -541,6 +584,10 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane,
if (WARN_ON(!crtc_state))
return -EINVAL;
+ ipu_state = ingenic_ipu_get_priv_state(ipu, state);
+ if (IS_ERR(ipu_state))
+ return PTR_ERR(ipu_state);
+
/* Request a full modeset if we are enabling or disabling the IPU. */
if (!old_plane_state->crtc ^ !new_plane_state->crtc)
crtc_state->mode_changed = true;
@@ -593,10 +640,10 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane,
if (num_h > max_h)
return -EINVAL;
- ipu->num_w = num_w;
- ipu->num_h = num_h;
- ipu->denom_w = denom_w;
- ipu->denom_h = denom_h;
+ ipu_state->num_w = num_w;
+ ipu_state->num_h = num_h;
+ ipu_state->denom_w = denom_w;
+ ipu_state->denom_h = denom_h;
out_check_damage:
if (ingenic_drm_map_noncoherent(ipu->master))
@@ -679,6 +726,33 @@ static const struct drm_plane_funcs ingenic_ipu_plane_funcs = {
.atomic_set_property = ingenic_ipu_plane_atomic_set_property,
};
+static struct drm_private_state *
+ingenic_ipu_duplicate_state(struct drm_private_obj *obj)
+{
+ struct ingenic_ipu_private_state *state = to_ingenic_ipu_priv_state(obj->state);
+
+ state = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ return &state->base;
+}
+
+static void ingenic_ipu_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct ingenic_ipu_private_state *priv_state = to_ingenic_ipu_priv_state(state);
+
+ kfree(priv_state);
+}
+
+static const struct drm_private_state_funcs ingenic_ipu_private_state_funcs = {
+ .atomic_duplicate_state = ingenic_ipu_duplicate_state,
+ .atomic_destroy_state = ingenic_ipu_destroy_state,
+};
+
static irqreturn_t ingenic_ipu_irq_handler(int irq, void *arg)
{
struct ingenic_ipu *ipu = arg;
@@ -717,6 +791,7 @@ static const struct regmap_config ingenic_ipu_regmap_config = {
static int ingenic_ipu_bind(struct device *dev, struct device *master, void *d)
{
struct platform_device *pdev = to_platform_device(dev);
+ struct ingenic_ipu_private_state *private_state;
const struct soc_info *soc_info;
struct drm_device *drm = d;
struct drm_plane *plane;
@@ -810,7 +885,20 @@ static int ingenic_ipu_bind(struct device *dev, struct device *master, void *d)
return err;
}
+ private_state = kzalloc(sizeof(*private_state), GFP_KERNEL);
+ if (!private_state) {
+ err = -ENOMEM;
+ goto err_clk_unprepare;
+ }
+
+ drm_atomic_private_obj_init(drm, &ipu->private_obj, &private_state->base,
+ &ingenic_ipu_private_state_funcs);
+
return 0;
+
+err_clk_unprepare:
+ clk_unprepare(ipu->clk);
+ return err;
}
static void ingenic_ipu_unbind(struct device *dev,
@@ -818,6 +906,7 @@ static void ingenic_ipu_unbind(struct device *dev,
{
struct ingenic_ipu *ipu = dev_get_drvdata(dev);
+ drm_atomic_private_obj_fini(&ipu->private_obj);
clk_unprepare(ipu->clk);
}
diff --git a/drivers/gpu/drm/kmb/Kconfig b/drivers/gpu/drm/kmb/Kconfig
index bc4cb5e1cd8a..5fdd43dad507 100644
--- a/drivers/gpu/drm/kmb/Kconfig
+++ b/drivers/gpu/drm/kmb/Kconfig
@@ -3,7 +3,6 @@ config DRM_KMB_DISPLAY
depends on DRM
depends on ARCH_KEEMBAY || COMPILE_TEST
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select DRM_MIPI_DSI
help
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index 961ac6fb5fcf..ed2424350773 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -15,6 +15,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
@@ -176,6 +177,7 @@ static int kmb_setup_mode_config(struct drm_device *drm)
drm->mode_config.min_height = KMB_FB_MIN_HEIGHT;
drm->mode_config.max_width = KMB_FB_MAX_WIDTH;
drm->mode_config.max_height = KMB_FB_MAX_HEIGHT;
+ drm->mode_config.preferred_depth = 24;
drm->mode_config.funcs = &kmb_mode_config_funcs;
ret = kmb_setup_crtc(drm);
@@ -559,6 +561,8 @@ static int kmb_probe(struct platform_device *pdev)
if (ret)
goto err_register;
+ drm_fbdev_generic_setup(&kmb->drm, 0);
+
return 0;
err_register:
diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c
index f74f8048af8f..02cef0cea657 100644
--- a/drivers/gpu/drm/lima/lima_device.c
+++ b/drivers/gpu/drm/lima/lima_device.c
@@ -358,6 +358,7 @@ int lima_device_init(struct lima_device *ldev)
int err, i;
dma_set_coherent_mask(ldev->dev, DMA_BIT_MASK(32));
+ dma_set_max_seg_size(ldev->dev, UINT_MAX);
err = lima_clk_init(ldev);
if (err)
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 640acc060467..f9a9198ef198 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -127,7 +127,7 @@ int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
if (err)
goto out;
} else {
- struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(obj);
+ struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(shmem);
if (IS_ERR(sgt)) {
err = PTR_ERR(sgt);
@@ -151,7 +151,7 @@ static void lima_gem_free_object(struct drm_gem_object *obj)
if (!list_empty(&bo->va))
dev_err(obj->dev->dev, "lima gem free bo still has va\n");
- drm_gem_shmem_free_object(obj);
+ drm_gem_shmem_free(&bo->base);
}
static int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
@@ -179,7 +179,7 @@ static int lima_gem_pin(struct drm_gem_object *obj)
if (bo->heap_size)
return -EINVAL;
- return drm_gem_shmem_pin(obj);
+ return drm_gem_shmem_pin(&bo->base);
}
static int lima_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
@@ -189,7 +189,7 @@ static int lima_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
if (bo->heap_size)
return -EINVAL;
- return drm_gem_shmem_vmap(obj, map);
+ return drm_gem_shmem_vmap(&bo->base, map);
}
static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
@@ -199,19 +199,19 @@ static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
if (bo->heap_size)
return -EINVAL;
- return drm_gem_shmem_mmap(obj, vma);
+ return drm_gem_shmem_mmap(&bo->base, vma);
}
static const struct drm_gem_object_funcs lima_gem_funcs = {
.free = lima_gem_free_object,
.open = lima_gem_object_open,
.close = lima_gem_object_close,
- .print_info = drm_gem_shmem_print_info,
+ .print_info = drm_gem_shmem_object_print_info,
.pin = lima_gem_pin,
- .unpin = drm_gem_shmem_unpin,
- .get_sg_table = drm_gem_shmem_get_sg_table,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
.vmap = lima_gem_vmap,
- .vunmap = drm_gem_shmem_vunmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
.mmap = lima_gem_mmap,
};
@@ -221,7 +221,7 @@ struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t siz
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
- return NULL;
+ return ERR_PTR(-ENOMEM);
mutex_init(&bo->lock);
INIT_LIST_HEAD(&bo->va);
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 99d5f6f1a882..5612d73f238f 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -371,7 +371,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
} else {
buffer_chunk->size = lima_bo_size(bo);
- ret = drm_gem_shmem_vmap(&bo->base.base, &map);
+ ret = drm_gem_shmem_vmap(&bo->base, &map);
if (ret) {
kvfree(et);
goto out;
@@ -379,7 +379,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size);
- drm_gem_shmem_vunmap(&bo->base.base, &map);
+ drm_gem_shmem_vunmap(&bo->base, &map);
}
buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;
diff --git a/drivers/gpu/drm/mcde/Kconfig b/drivers/gpu/drm/mcde/Kconfig
index 71c689b573c9..d0bf1bc8da3f 100644
--- a/drivers/gpu/drm/mcde/Kconfig
+++ b/drivers/gpu/drm/mcde/Kconfig
@@ -10,7 +10,6 @@ config DRM_MCDE
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
index 141cb36b9c07..3a53ebc4e172 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
@@ -205,9 +205,15 @@ static const struct mtk_disp_ccorr_data mt8183_ccorr_driver_data = {
.matrix_bits = 10,
};
+static const struct mtk_disp_ccorr_data mt8192_ccorr_driver_data = {
+ .matrix_bits = 11,
+};
+
static const struct of_device_id mtk_disp_ccorr_driver_dt_match[] = {
{ .compatible = "mediatek,mt8183-disp-ccorr",
.data = &mt8183_ccorr_driver_data},
+ { .compatible = "mediatek,mt8192-disp-ccorr",
+ .data = &mt8192_ccorr_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_ccorr_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 5326989d5206..2146299e5f52 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -456,6 +456,22 @@ static const struct mtk_disp_ovl_data mt8183_ovl_2l_driver_data = {
.fmt_rgb565_is_0 = true,
};
+static const struct mtk_disp_ovl_data mt8192_ovl_driver_data = {
+ .addr = DISP_REG_OVL_ADDR_MT8173,
+ .gmc_bits = 10,
+ .layer_nr = 4,
+ .fmt_rgb565_is_0 = true,
+ .smi_id_en = true,
+};
+
+static const struct mtk_disp_ovl_data mt8192_ovl_2l_driver_data = {
+ .addr = DISP_REG_OVL_ADDR_MT8173,
+ .gmc_bits = 10,
+ .layer_nr = 2,
+ .fmt_rgb565_is_0 = true,
+ .smi_id_en = true,
+};
+
static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = {
{ .compatible = "mediatek,mt2701-disp-ovl",
.data = &mt2701_ovl_driver_data},
@@ -465,6 +481,10 @@ static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = {
.data = &mt8183_ovl_driver_data},
{ .compatible = "mediatek,mt8183-disp-ovl-2l",
.data = &mt8183_ovl_2l_driver_data},
+ { .compatible = "mediatek,mt8192-disp-ovl",
+ .data = &mt8192_ovl_driver_data},
+ { .compatible = "mediatek,mt8192-disp-ovl-2l",
+ .data = &mt8192_ovl_2l_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 75d7f45579e2..d41a3970b944 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -353,6 +353,10 @@ static const struct mtk_disp_rdma_data mt8183_rdma_driver_data = {
.fifo_size = 5 * SZ_1K,
};
+static const struct mtk_disp_rdma_data mt8192_rdma_driver_data = {
+ .fifo_size = 5 * SZ_1K,
+};
+
static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = {
{ .compatible = "mediatek,mt2701-disp-rdma",
.data = &mt2701_rdma_driver_data},
@@ -360,6 +364,8 @@ static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = {
.data = &mt8173_rdma_driver_data},
{ .compatible = "mediatek,mt8183-disp-rdma",
.data = &mt8183_rdma_driver_data},
+ { .compatible = "mediatek,mt8192-disp-rdma",
+ .data = &mt8192_rdma_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index a4e80e499674..d661edf7e0fe 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -4,6 +4,8 @@
*/
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/mailbox_controller.h>
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
@@ -50,8 +52,10 @@ struct mtk_drm_crtc {
bool pending_async_planes;
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- struct cmdq_client *cmdq_client;
+ struct cmdq_client cmdq_client;
+ struct cmdq_pkt cmdq_handle;
u32 cmdq_event;
+ u32 cmdq_vblank_cnt;
#endif
struct device *mmsys_dev;
@@ -104,12 +108,60 @@ static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
}
}
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt,
+ size_t size)
+{
+ struct device *dev;
+ dma_addr_t dma_addr;
+
+ pkt->va_base = kzalloc(size, GFP_KERNEL);
+ if (!pkt->va_base) {
+ kfree(pkt);
+ return -ENOMEM;
+ }
+ pkt->buf_size = size;
+ pkt->cl = (void *)client;
+
+ dev = client->chan->mbox->dev;
+ dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
+ kfree(pkt->va_base);
+ kfree(pkt);
+ return -ENOMEM;
+ }
+
+ pkt->pa_base = dma_addr;
+
+ return 0;
+}
+
+static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt)
+{
+ struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
+
+ dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
+ DMA_TO_DEVICE);
+ kfree(pkt->va_base);
+ kfree(pkt);
+}
+#endif
+
static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
mtk_mutex_put(mtk_crtc->mutex);
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ mtk_drm_cmdq_pkt_destroy(&mtk_crtc->cmdq_handle);
+ if (mtk_crtc->cmdq_client.chan) {
+ mbox_free_channel(mtk_crtc->cmdq_client.chan);
+ mtk_crtc->cmdq_client.chan = NULL;
+ }
+#endif
drm_crtc_cleanup(crtc);
}
@@ -222,9 +274,46 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
-static void ddp_cmdq_cb(struct cmdq_cb_data data)
+static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
{
- cmdq_pkt_destroy(data.data);
+ struct cmdq_cb_data *data = mssg;
+ struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client);
+ struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client);
+ struct mtk_crtc_state *state;
+ unsigned int i;
+
+ if (data->sta < 0)
+ return;
+
+ state = to_mtk_crtc_state(mtk_crtc->base.state);
+
+ state->pending_config = false;
+
+ if (mtk_crtc->pending_planes) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
+ struct drm_plane *plane = &mtk_crtc->planes[i];
+ struct mtk_plane_state *plane_state;
+
+ plane_state = to_mtk_plane_state(plane->state);
+
+ plane_state->pending.config = false;
+ }
+ mtk_crtc->pending_planes = false;
+ }
+
+ if (mtk_crtc->pending_async_planes) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
+ struct drm_plane *plane = &mtk_crtc->planes[i];
+ struct mtk_plane_state *plane_state;
+
+ plane_state = to_mtk_plane_state(plane->state);
+
+ plane_state->pending.async_config = false;
+ }
+ mtk_crtc->pending_async_planes = false;
+ }
+
+ mtk_crtc->cmdq_vblank_cnt = 0;
}
#endif
@@ -378,7 +467,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
state->pending_vrefresh, 0,
cmdq_handle);
- state->pending_config = false;
+ if (!cmdq_handle)
+ state->pending_config = false;
}
if (mtk_crtc->pending_planes) {
@@ -398,9 +488,12 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
mtk_ddp_comp_layer_config(comp, local_layer,
plane_state,
cmdq_handle);
- plane_state->pending.config = false;
+ if (!cmdq_handle)
+ plane_state->pending.config = false;
}
- mtk_crtc->pending_planes = false;
+
+ if (!cmdq_handle)
+ mtk_crtc->pending_planes = false;
}
if (mtk_crtc->pending_async_planes) {
@@ -420,9 +513,12 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
mtk_ddp_comp_layer_config(comp, local_layer,
plane_state,
cmdq_handle);
- plane_state->pending.async_config = false;
+ if (!cmdq_handle)
+ plane_state->pending.async_config = false;
}
- mtk_crtc->pending_async_planes = false;
+
+ if (!cmdq_handle)
+ mtk_crtc->pending_async_planes = false;
}
}
@@ -430,7 +526,7 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
bool needs_vblank)
{
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- struct cmdq_pkt *cmdq_handle;
+ struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle;
#endif
struct drm_crtc *crtc = &mtk_crtc->base;
struct mtk_drm_private *priv = crtc->dev->dev_private;
@@ -468,14 +564,28 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
mtk_mutex_release(mtk_crtc->mutex);
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- if (mtk_crtc->cmdq_client) {
- mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
- cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
+ if (mtk_crtc->cmdq_client.chan) {
+ mbox_flush(mtk_crtc->cmdq_client.chan, 2000);
+ cmdq_handle->cmd_buf_size = 0;
cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
mtk_crtc_ddp_config(crtc, cmdq_handle);
cmdq_pkt_finalize(cmdq_handle);
- cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
+ dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev,
+ cmdq_handle->pa_base,
+ cmdq_handle->cmd_buf_size,
+ DMA_TO_DEVICE);
+ /*
+ * CMDQ command should execute in next 3 vblank.
+ * One vblank interrupt before send message (occasionally)
+ * and one vblank interrupt after cmdq done,
+ * so it's timeout after 3 vblank interrupt.
+ * If it fail to execute in next 3 vblank, timeout happen.
+ */
+ mtk_crtc->cmdq_vblank_cnt = 3;
+
+ mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
+ mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
}
#endif
mtk_crtc->config_updating = false;
@@ -489,12 +599,15 @@ static void mtk_crtc_ddp_irq(void *data)
struct mtk_drm_private *priv = crtc->dev->dev_private;
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
+ if (!priv->data->shadow_register && !mtk_crtc->cmdq_client.chan)
+ mtk_crtc_ddp_config(crtc, NULL);
+ else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0)
+ DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n",
+ drm_crtc_index(&mtk_crtc->base));
#else
if (!priv->data->shadow_register)
-#endif
mtk_crtc_ddp_config(crtc, NULL);
-
+#endif
mtk_drm_finish_page_flip(mtk_crtc);
}
@@ -829,16 +942,20 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mutex_init(&mtk_crtc->hw_lock);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- mtk_crtc->cmdq_client =
- cmdq_mbox_create(mtk_crtc->mmsys_dev,
- drm_crtc_index(&mtk_crtc->base));
- if (IS_ERR(mtk_crtc->cmdq_client)) {
+ mtk_crtc->cmdq_client.client.dev = mtk_crtc->mmsys_dev;
+ mtk_crtc->cmdq_client.client.tx_block = false;
+ mtk_crtc->cmdq_client.client.knows_txdone = true;
+ mtk_crtc->cmdq_client.client.rx_callback = ddp_cmdq_cb;
+ mtk_crtc->cmdq_client.chan =
+ mbox_request_channel(&mtk_crtc->cmdq_client.client,
+ drm_crtc_index(&mtk_crtc->base));
+ if (IS_ERR(mtk_crtc->cmdq_client.chan)) {
dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
drm_crtc_index(&mtk_crtc->base));
- mtk_crtc->cmdq_client = NULL;
+ mtk_crtc->cmdq_client.chan = NULL;
}
- if (mtk_crtc->cmdq_client) {
+ if (mtk_crtc->cmdq_client.chan) {
ret = of_property_read_u32_index(priv->mutex_node,
"mediatek,gce-events",
drm_crtc_index(&mtk_crtc->base),
@@ -846,8 +963,18 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
if (ret) {
dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
drm_crtc_index(&mtk_crtc->base));
- cmdq_mbox_destroy(mtk_crtc->cmdq_client);
- mtk_crtc->cmdq_client = NULL;
+ mbox_free_channel(mtk_crtc->cmdq_client.chan);
+ mtk_crtc->cmdq_client.chan = NULL;
+ } else {
+ ret = mtk_drm_cmdq_pkt_create(&mtk_crtc->cmdq_client,
+ &mtk_crtc->cmdq_handle,
+ PAGE_SIZE);
+ if (ret) {
+ dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
+ drm_crtc_index(&mtk_crtc->base));
+ mbox_free_channel(mtk_crtc->cmdq_client.chan);
+ mtk_crtc->cmdq_client.chan = NULL;
+ }
}
}
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 99cbf44463e4..b4b682bc1991 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -20,45 +20,39 @@
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_crtc.h"
-#define DISP_OD_EN 0x0000
-#define DISP_OD_INTEN 0x0008
-#define DISP_OD_INTSTA 0x000c
-#define DISP_OD_CFG 0x0020
-#define DISP_OD_SIZE 0x0030
-#define DISP_DITHER_5 0x0114
-#define DISP_DITHER_7 0x011c
-#define DISP_DITHER_15 0x013c
-#define DISP_DITHER_16 0x0140
-#define DISP_REG_UFO_START 0x0000
-
-#define DISP_DITHER_EN 0x0000
+#define DISP_REG_DITHER_EN 0x0000
#define DITHER_EN BIT(0)
-#define DISP_DITHER_CFG 0x0020
+#define DISP_REG_DITHER_CFG 0x0020
#define DITHER_RELAY_MODE BIT(0)
#define DITHER_ENGINE_EN BIT(1)
-#define DISP_DITHER_SIZE 0x0030
-
-#define LUT_10BIT_MASK 0x03ff
-
-#define OD_RELAYMODE BIT(0)
-
-#define UFO_BYPASS BIT(2)
-
#define DISP_DITHERING BIT(2)
+#define DISP_REG_DITHER_SIZE 0x0030
+#define DISP_REG_DITHER_5 0x0114
+#define DISP_REG_DITHER_7 0x011c
+#define DISP_REG_DITHER_15 0x013c
#define DITHER_LSB_ERR_SHIFT_R(x) (((x) & 0x7) << 28)
-#define DITHER_OVFLW_BIT_R(x) (((x) & 0x7) << 24)
#define DITHER_ADD_LSHIFT_R(x) (((x) & 0x7) << 20)
-#define DITHER_ADD_RSHIFT_R(x) (((x) & 0x7) << 16)
#define DITHER_NEW_BIT_MODE BIT(0)
+#define DISP_REG_DITHER_16 0x0140
#define DITHER_LSB_ERR_SHIFT_B(x) (((x) & 0x7) << 28)
-#define DITHER_OVFLW_BIT_B(x) (((x) & 0x7) << 24)
#define DITHER_ADD_LSHIFT_B(x) (((x) & 0x7) << 20)
-#define DITHER_ADD_RSHIFT_B(x) (((x) & 0x7) << 16)
#define DITHER_LSB_ERR_SHIFT_G(x) (((x) & 0x7) << 12)
-#define DITHER_OVFLW_BIT_G(x) (((x) & 0x7) << 8)
#define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4)
-#define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0)
+
+#define DISP_REG_OD_EN 0x0000
+#define DISP_REG_OD_CFG 0x0020
+#define OD_RELAYMODE BIT(0)
+#define DISP_REG_OD_SIZE 0x0030
+
+#define DISP_REG_POSTMASK_EN 0x0000
+#define POSTMASK_EN BIT(0)
+#define DISP_REG_POSTMASK_CFG 0x0020
+#define POSTMASK_RELAY_MODE BIT(0)
+#define DISP_REG_POSTMASK_SIZE 0x0030
+
+#define DISP_REG_UFO_START 0x0000
+#define UFO_BYPASS BIT(2)
struct mtk_ddp_comp_dev {
struct clk *clk;
@@ -134,25 +128,52 @@ void mtk_dither_set_common(void __iomem *regs, struct cmdq_client_reg *cmdq_reg,
return;
if (bpc >= MTK_MIN_BPC) {
- mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_DITHER_5);
- mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_DITHER_7);
+ mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_REG_DITHER_5);
+ mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_REG_DITHER_7);
mtk_ddp_write(cmdq_pkt,
DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) |
DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) |
DITHER_NEW_BIT_MODE,
- cmdq_reg, regs, DISP_DITHER_15);
+ cmdq_reg, regs, DISP_REG_DITHER_15);
mtk_ddp_write(cmdq_pkt,
DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) |
DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) |
DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) |
DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc),
- cmdq_reg, regs, DISP_DITHER_16);
+ cmdq_reg, regs, DISP_REG_DITHER_16);
mtk_ddp_write(cmdq_pkt, dither_en, cmdq_reg, regs, cfg);
}
}
+static void mtk_dither_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+{
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
+
+ mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE);
+ mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs,
+ DISP_REG_DITHER_CFG);
+ mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_REG_DITHER_CFG,
+ DITHER_ENGINE_EN, cmdq_pkt);
+}
+
+static void mtk_dither_start(struct device *dev)
+{
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
+
+ writel(DITHER_EN, priv->regs + DISP_REG_DITHER_EN);
+}
+
+static void mtk_dither_stop(struct device *dev)
+{
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
+
+ writel_relaxed(0x0, priv->regs + DISP_REG_DITHER_EN);
+}
+
static void mtk_dither_set(struct device *dev, unsigned int bpc,
- unsigned int cfg, struct cmdq_pkt *cmdq_pkt)
+ unsigned int cfg, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
@@ -166,49 +187,49 @@ static void mtk_od_config(struct device *dev, unsigned int w,
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_OD_SIZE);
- mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, &priv->cmdq_reg, priv->regs, DISP_OD_CFG);
- mtk_dither_set(dev, bpc, DISP_OD_CFG, cmdq_pkt);
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_OD_SIZE);
+ mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, &priv->cmdq_reg, priv->regs, DISP_REG_OD_CFG);
+ mtk_dither_set(dev, bpc, DISP_REG_OD_CFG, cmdq_pkt);
}
static void mtk_od_start(struct device *dev)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- writel(1, priv->regs + DISP_OD_EN);
+ writel(1, priv->regs + DISP_REG_OD_EN);
}
-static void mtk_ufoe_start(struct device *dev)
+static void mtk_postmask_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- writel(UFO_BYPASS, priv->regs + DISP_REG_UFO_START);
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs,
+ DISP_REG_POSTMASK_SIZE);
+ mtk_ddp_write(cmdq_pkt, POSTMASK_RELAY_MODE, &priv->cmdq_reg,
+ priv->regs, DISP_REG_POSTMASK_CFG);
}
-static void mtk_dither_config(struct device *dev, unsigned int w,
- unsigned int h, unsigned int vrefresh,
- unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+static void mtk_postmask_start(struct device *dev)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_DITHER_SIZE);
- mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs, DISP_DITHER_CFG);
- mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_DITHER_CFG,
- DITHER_ENGINE_EN, cmdq_pkt);
+ writel(POSTMASK_EN, priv->regs + DISP_REG_POSTMASK_EN);
}
-static void mtk_dither_start(struct device *dev)
+static void mtk_postmask_stop(struct device *dev)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- writel(DITHER_EN, priv->regs + DISP_DITHER_EN);
+ writel_relaxed(0x0, priv->regs + DISP_REG_POSTMASK_EN);
}
-static void mtk_dither_stop(struct device *dev)
+static void mtk_ufoe_start(struct device *dev)
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- writel_relaxed(0x0, priv->regs + DISP_DITHER_EN);
+ writel(UFO_BYPASS, priv->regs + DISP_REG_UFO_START);
}
static const struct mtk_ddp_comp_funcs ddp_aal = {
@@ -286,6 +307,14 @@ static const struct mtk_ddp_comp_funcs ddp_ovl = {
.bgclr_in_off = mtk_ovl_bgclr_in_off,
};
+static const struct mtk_ddp_comp_funcs ddp_postmask = {
+ .clk_enable = mtk_ddp_clk_enable,
+ .clk_disable = mtk_ddp_clk_disable,
+ .config = mtk_postmask_config,
+ .start = mtk_postmask_start,
+ .stop = mtk_postmask_stop,
+};
+
static const struct mtk_ddp_comp_funcs ddp_rdma = {
.clk_enable = mtk_rdma_clk_enable,
.clk_disable = mtk_rdma_clk_disable,
@@ -305,22 +334,23 @@ static const struct mtk_ddp_comp_funcs ddp_ufoe = {
};
static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = {
+ [MTK_DISP_AAL] = "aal",
+ [MTK_DISP_BLS] = "bls",
+ [MTK_DISP_CCORR] = "ccorr",
+ [MTK_DISP_COLOR] = "color",
+ [MTK_DISP_DITHER] = "dither",
+ [MTK_DISP_GAMMA] = "gamma",
+ [MTK_DISP_MUTEX] = "mutex",
+ [MTK_DISP_OD] = "od",
[MTK_DISP_OVL] = "ovl",
[MTK_DISP_OVL_2L] = "ovl-2l",
+ [MTK_DISP_POSTMASK] = "postmask",
+ [MTK_DISP_PWM] = "pwm",
[MTK_DISP_RDMA] = "rdma",
- [MTK_DISP_WDMA] = "wdma",
- [MTK_DISP_COLOR] = "color",
- [MTK_DISP_CCORR] = "ccorr",
- [MTK_DISP_AAL] = "aal",
- [MTK_DISP_GAMMA] = "gamma",
- [MTK_DISP_DITHER] = "dither",
[MTK_DISP_UFOE] = "ufoe",
- [MTK_DSI] = "dsi",
+ [MTK_DISP_WDMA] = "wdma",
[MTK_DPI] = "dpi",
- [MTK_DISP_PWM] = "pwm",
- [MTK_DISP_MUTEX] = "mutex",
- [MTK_DISP_OD] = "od",
- [MTK_DISP_BLS] = "bls",
+ [MTK_DSI] = "dsi",
};
struct mtk_ddp_comp_match {
@@ -330,35 +360,38 @@ struct mtk_ddp_comp_match {
};
static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
- [DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal },
- [DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal },
- [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL },
- [DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr },
- [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color },
- [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color },
- [DDP_COMPONENT_DITHER] = { MTK_DISP_DITHER, 0, &ddp_dither },
- [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, &ddp_dpi },
- [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, &ddp_dpi },
- [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, &ddp_dsi },
- [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, &ddp_dsi },
- [DDP_COMPONENT_DSI2] = { MTK_DSI, 2, &ddp_dsi },
- [DDP_COMPONENT_DSI3] = { MTK_DSI, 3, &ddp_dsi },
- [DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma },
- [DDP_COMPONENT_OD0] = { MTK_DISP_OD, 0, &ddp_od },
- [DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od },
- [DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, &ddp_ovl },
- [DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, &ddp_ovl },
- [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, &ddp_ovl },
- [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, &ddp_ovl },
- [DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL },
- [DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL },
- [DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL },
- [DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, &ddp_rdma },
- [DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, &ddp_rdma },
- [DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, &ddp_rdma },
- [DDP_COMPONENT_UFOE] = { MTK_DISP_UFOE, 0, &ddp_ufoe },
- [DDP_COMPONENT_WDMA0] = { MTK_DISP_WDMA, 0, NULL },
- [DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL },
+ [DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal },
+ [DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal },
+ [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL },
+ [DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr },
+ [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color },
+ [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color },
+ [DDP_COMPONENT_DITHER] = { MTK_DISP_DITHER, 0, &ddp_dither },
+ [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, &ddp_dpi },
+ [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, &ddp_dpi },
+ [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, &ddp_dsi },
+ [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, &ddp_dsi },
+ [DDP_COMPONENT_DSI2] = { MTK_DSI, 2, &ddp_dsi },
+ [DDP_COMPONENT_DSI3] = { MTK_DSI, 3, &ddp_dsi },
+ [DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma },
+ [DDP_COMPONENT_OD0] = { MTK_DISP_OD, 0, &ddp_od },
+ [DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od },
+ [DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, &ddp_ovl },
+ [DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, &ddp_ovl },
+ [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, &ddp_ovl },
+ [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, &ddp_ovl },
+ [DDP_COMPONENT_OVL_2L2] = { MTK_DISP_OVL_2L, 2, &ddp_ovl },
+ [DDP_COMPONENT_POSTMASK0] = { MTK_DISP_POSTMASK, 0, &ddp_postmask },
+ [DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL },
+ [DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL },
+ [DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL },
+ [DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, &ddp_rdma },
+ [DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, &ddp_rdma },
+ [DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, &ddp_rdma },
+ [DDP_COMPONENT_RDMA4] = { MTK_DISP_RDMA, 4, &ddp_rdma },
+ [DDP_COMPONENT_UFOE] = { MTK_DISP_UFOE, 0, &ddp_ufoe },
+ [DDP_COMPONENT_WDMA0] = { MTK_DISP_WDMA, 0, NULL },
+ [DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL },
};
static bool mtk_drm_find_comp_in_ddp(struct device *dev,
@@ -475,12 +508,12 @@ int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp,
type == MTK_DISP_CCORR ||
type == MTK_DISP_COLOR ||
type == MTK_DISP_GAMMA ||
- type == MTK_DPI ||
- type == MTK_DSI ||
type == MTK_DISP_OVL ||
type == MTK_DISP_OVL_2L ||
type == MTK_DISP_PWM ||
- type == MTK_DISP_RDMA)
+ type == MTK_DISP_RDMA ||
+ type == MTK_DPI ||
+ type == MTK_DSI)
return 0;
priv = devm_kzalloc(comp->dev, sizeof(*priv), GFP_KERNEL);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index bb914d976cf5..4c6a98662305 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -18,22 +18,23 @@ struct mtk_plane_state;
struct drm_crtc_state;
enum mtk_ddp_comp_type {
- MTK_DISP_OVL,
- MTK_DISP_OVL_2L,
- MTK_DISP_RDMA,
- MTK_DISP_WDMA,
- MTK_DISP_COLOR,
+ MTK_DISP_AAL,
+ MTK_DISP_BLS,
MTK_DISP_CCORR,
+ MTK_DISP_COLOR,
MTK_DISP_DITHER,
- MTK_DISP_AAL,
MTK_DISP_GAMMA,
- MTK_DISP_UFOE,
- MTK_DSI,
- MTK_DPI,
- MTK_DISP_PWM,
MTK_DISP_MUTEX,
MTK_DISP_OD,
- MTK_DISP_BLS,
+ MTK_DISP_OVL,
+ MTK_DISP_OVL_2L,
+ MTK_DISP_POSTMASK,
+ MTK_DISP_PWM,
+ MTK_DISP_RDMA,
+ MTK_DISP_UFOE,
+ MTK_DISP_WDMA,
+ MTK_DPI,
+ MTK_DSI,
MTK_DDP_COMP_TYPE_MAX,
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index aec39724ebeb..56ff8c57ef8f 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -158,6 +158,25 @@ static const enum mtk_ddp_comp_id mt8183_mtk_ddp_ext[] = {
DDP_COMPONENT_DPI0,
};
+static const enum mtk_ddp_comp_id mt8192_mtk_ddp_main[] = {
+ DDP_COMPONENT_OVL0,
+ DDP_COMPONENT_OVL_2L0,
+ DDP_COMPONENT_RDMA0,
+ DDP_COMPONENT_COLOR0,
+ DDP_COMPONENT_CCORR,
+ DDP_COMPONENT_AAL0,
+ DDP_COMPONENT_GAMMA,
+ DDP_COMPONENT_POSTMASK0,
+ DDP_COMPONENT_DITHER,
+ DDP_COMPONENT_DSI0,
+};
+
+static const enum mtk_ddp_comp_id mt8192_mtk_ddp_ext[] = {
+ DDP_COMPONENT_OVL_2L2,
+ DDP_COMPONENT_RDMA4,
+ DDP_COMPONENT_DPI0,
+};
+
static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
.main_path = mt2701_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt2701_mtk_ddp_main),
@@ -202,6 +221,13 @@ static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = {
.ext_len = ARRAY_SIZE(mt8183_mtk_ddp_ext),
};
+static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = {
+ .main_path = mt8192_mtk_ddp_main,
+ .main_len = ARRAY_SIZE(mt8192_mtk_ddp_main),
+ .ext_path = mt8192_mtk_ddp_ext,
+ .ext_len = ARRAY_SIZE(mt8192_mtk_ddp_ext),
+};
+
static int mtk_drm_kms_init(struct drm_device *drm)
{
struct mtk_drm_private *private = drm->dev_private;
@@ -397,68 +423,36 @@ static const struct component_master_ops mtk_drm_ops = {
};
static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
- { .compatible = "mediatek,mt2701-disp-ovl",
- .data = (void *)MTK_DISP_OVL },
- { .compatible = "mediatek,mt8167-disp-ovl",
- .data = (void *)MTK_DISP_OVL },
- { .compatible = "mediatek,mt8173-disp-ovl",
- .data = (void *)MTK_DISP_OVL },
- { .compatible = "mediatek,mt8183-disp-ovl",
- .data = (void *)MTK_DISP_OVL },
- { .compatible = "mediatek,mt8183-disp-ovl-2l",
- .data = (void *)MTK_DISP_OVL_2L },
- { .compatible = "mediatek,mt2701-disp-rdma",
- .data = (void *)MTK_DISP_RDMA },
- { .compatible = "mediatek,mt8167-disp-rdma",
- .data = (void *)MTK_DISP_RDMA },
- { .compatible = "mediatek,mt8173-disp-rdma",
- .data = (void *)MTK_DISP_RDMA },
- { .compatible = "mediatek,mt8183-disp-rdma",
- .data = (void *)MTK_DISP_RDMA },
- { .compatible = "mediatek,mt8173-disp-wdma",
- .data = (void *)MTK_DISP_WDMA },
+ { .compatible = "mediatek,mt8167-disp-aal",
+ .data = (void *)MTK_DISP_AAL},
+ { .compatible = "mediatek,mt8173-disp-aal",
+ .data = (void *)MTK_DISP_AAL},
+ { .compatible = "mediatek,mt8183-disp-aal",
+ .data = (void *)MTK_DISP_AAL},
+ { .compatible = "mediatek,mt8192-disp-aal",
+ .data = (void *)MTK_DISP_AAL},
{ .compatible = "mediatek,mt8167-disp-ccorr",
.data = (void *)MTK_DISP_CCORR },
{ .compatible = "mediatek,mt8183-disp-ccorr",
.data = (void *)MTK_DISP_CCORR },
+ { .compatible = "mediatek,mt8192-disp-ccorr",
+ .data = (void *)MTK_DISP_CCORR },
{ .compatible = "mediatek,mt2701-disp-color",
.data = (void *)MTK_DISP_COLOR },
{ .compatible = "mediatek,mt8167-disp-color",
.data = (void *)MTK_DISP_COLOR },
{ .compatible = "mediatek,mt8173-disp-color",
.data = (void *)MTK_DISP_COLOR },
- { .compatible = "mediatek,mt8167-disp-aal",
- .data = (void *)MTK_DISP_AAL},
- { .compatible = "mediatek,mt8173-disp-aal",
- .data = (void *)MTK_DISP_AAL},
- { .compatible = "mediatek,mt8183-disp-aal",
- .data = (void *)MTK_DISP_AAL},
+ { .compatible = "mediatek,mt8167-disp-dither",
+ .data = (void *)MTK_DISP_DITHER },
+ { .compatible = "mediatek,mt8183-disp-dither",
+ .data = (void *)MTK_DISP_DITHER },
{ .compatible = "mediatek,mt8167-disp-gamma",
.data = (void *)MTK_DISP_GAMMA, },
{ .compatible = "mediatek,mt8173-disp-gamma",
.data = (void *)MTK_DISP_GAMMA, },
{ .compatible = "mediatek,mt8183-disp-gamma",
.data = (void *)MTK_DISP_GAMMA, },
- { .compatible = "mediatek,mt8167-disp-dither",
- .data = (void *)MTK_DISP_DITHER },
- { .compatible = "mediatek,mt8183-disp-dither",
- .data = (void *)MTK_DISP_DITHER },
- { .compatible = "mediatek,mt8173-disp-ufoe",
- .data = (void *)MTK_DISP_UFOE },
- { .compatible = "mediatek,mt2701-dsi",
- .data = (void *)MTK_DSI },
- { .compatible = "mediatek,mt8167-dsi",
- .data = (void *)MTK_DSI },
- { .compatible = "mediatek,mt8173-dsi",
- .data = (void *)MTK_DSI },
- { .compatible = "mediatek,mt8183-dsi",
- .data = (void *)MTK_DSI },
- { .compatible = "mediatek,mt2701-dpi",
- .data = (void *)MTK_DPI },
- { .compatible = "mediatek,mt8173-dpi",
- .data = (void *)MTK_DPI },
- { .compatible = "mediatek,mt8183-dpi",
- .data = (void *)MTK_DPI },
{ .compatible = "mediatek,mt2701-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt2712-disp-mutex",
@@ -469,14 +463,60 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8183-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt8192-disp-mutex",
+ .data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt8173-disp-od",
+ .data = (void *)MTK_DISP_OD },
+ { .compatible = "mediatek,mt2701-disp-ovl",
+ .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt8167-disp-ovl",
+ .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt8173-disp-ovl",
+ .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt8183-disp-ovl",
+ .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt8192-disp-ovl",
+ .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt8183-disp-ovl-2l",
+ .data = (void *)MTK_DISP_OVL_2L },
+ { .compatible = "mediatek,mt8192-disp-ovl-2l",
+ .data = (void *)MTK_DISP_OVL_2L },
+ { .compatible = "mediatek,mt8192-disp-postmask",
+ .data = (void *)MTK_DISP_POSTMASK },
{ .compatible = "mediatek,mt2701-disp-pwm",
.data = (void *)MTK_DISP_BLS },
{ .compatible = "mediatek,mt8167-disp-pwm",
.data = (void *)MTK_DISP_PWM },
{ .compatible = "mediatek,mt8173-disp-pwm",
.data = (void *)MTK_DISP_PWM },
- { .compatible = "mediatek,mt8173-disp-od",
- .data = (void *)MTK_DISP_OD },
+ { .compatible = "mediatek,mt2701-disp-rdma",
+ .data = (void *)MTK_DISP_RDMA },
+ { .compatible = "mediatek,mt8167-disp-rdma",
+ .data = (void *)MTK_DISP_RDMA },
+ { .compatible = "mediatek,mt8173-disp-rdma",
+ .data = (void *)MTK_DISP_RDMA },
+ { .compatible = "mediatek,mt8183-disp-rdma",
+ .data = (void *)MTK_DISP_RDMA },
+ { .compatible = "mediatek,mt8192-disp-rdma",
+ .data = (void *)MTK_DISP_RDMA },
+ { .compatible = "mediatek,mt8173-disp-ufoe",
+ .data = (void *)MTK_DISP_UFOE },
+ { .compatible = "mediatek,mt8173-disp-wdma",
+ .data = (void *)MTK_DISP_WDMA },
+ { .compatible = "mediatek,mt2701-dpi",
+ .data = (void *)MTK_DPI },
+ { .compatible = "mediatek,mt8167-dsi",
+ .data = (void *)MTK_DSI },
+ { .compatible = "mediatek,mt8173-dpi",
+ .data = (void *)MTK_DPI },
+ { .compatible = "mediatek,mt8183-dpi",
+ .data = (void *)MTK_DPI },
+ { .compatible = "mediatek,mt2701-dsi",
+ .data = (void *)MTK_DSI },
+ { .compatible = "mediatek,mt8173-dsi",
+ .data = (void *)MTK_DSI },
+ { .compatible = "mediatek,mt8183-dsi",
+ .data = (void *)MTK_DSI },
{ }
};
@@ -493,6 +533,8 @@ static const struct of_device_id mtk_drm_of_ids[] = {
.data = &mt8173_mmsys_driver_data},
{ .compatible = "mediatek,mt8183-mmsys",
.data = &mt8183_mmsys_driver_data},
+ { .compatible = "mediatek,mt8192-mmsys",
+ .data = &mt8192_mmsys_driver_data},
{ }
};
MODULE_DEVICE_TABLE(of, mtk_drm_of_ids);
@@ -568,8 +610,8 @@ static int mtk_drm_probe(struct platform_device *pdev)
comp_type == MTK_DISP_OVL ||
comp_type == MTK_DISP_OVL_2L ||
comp_type == MTK_DISP_RDMA ||
- comp_type == MTK_DSI ||
- comp_type == MTK_DPI) {
+ comp_type == MTK_DPI ||
+ comp_type == MTK_DSI) {
dev_info(dev, "Adding component match for %pOF\n",
node);
drm_of_component_match_add(dev, &match, compare_of,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 734a1fb052df..075747a6d4aa 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -44,9 +44,10 @@ static void mtk_plane_reset(struct drm_plane *plane)
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return;
- plane->state = &state->base;
}
+ __drm_atomic_helper_plane_reset(plane, &state->base);
+
state->base.plane = plane;
state->pending.format = DRM_FORMAT_RGB565;
}
diff --git a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig
index 9f9281dd49f8..6c70fc3214af 100644
--- a/drivers/gpu/drm/meson/Kconfig
+++ b/drivers/gpu/drm/meson/Kconfig
@@ -4,11 +4,12 @@ config DRM_MESON
depends on DRM && OF && (ARM || ARM64)
depends on ARCH_MESON || COMPILE_TEST
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
+ select DRM_DISPLAY_CONNECTOR
select VIDEOMODE_HELPERS
select REGMAP_MMIO
select MESON_CANVAS
+ select CEC_CORE if CEC_NOTIFIER
config DRM_MESON_DW_HDMI
tristate "HDMI Synopsys Controller support for Amlogic Meson Display"
diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile
index 28a519cdf66b..3afa31bdc950 100644
--- a/drivers/gpu/drm/meson/Makefile
+++ b/drivers/gpu/drm/meson/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
-meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
+meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_encoder_cvbs.o
meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_overlay.o
meson-drm-y += meson_rdma.o meson_osd_afbcd.o
+meson-drm-y += meson_encoder_hdmi.o
obj-$(CONFIG_DRM_MESON) += meson-drm.o
obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 7f41a33592c8..80f1d439841a 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -31,7 +31,8 @@
#include "meson_plane.h"
#include "meson_osd_afbcd.h"
#include "meson_registers.h"
-#include "meson_venc_cvbs.h"
+#include "meson_encoder_cvbs.h"
+#include "meson_encoder_hdmi.h"
#include "meson_viu.h"
#include "meson_vpp.h"
#include "meson_rdma.h"
@@ -306,7 +307,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
/* Encoder Initialization */
- ret = meson_venc_cvbs_create(priv);
+ ret = meson_encoder_cvbs_init(priv);
if (ret)
goto free_drm;
@@ -318,6 +319,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
}
}
+ ret = meson_encoder_hdmi_init(priv);
+ if (ret)
+ goto free_drm;
+
ret = meson_plane_create(priv);
if (ret)
goto free_drm;
@@ -426,46 +431,6 @@ static int compare_of(struct device *dev, void *data)
return dev->of_node == data;
}
-/* Possible connectors nodes to ignore */
-static const struct of_device_id connectors_match[] = {
- { .compatible = "composite-video-connector" },
- { .compatible = "svideo-connector" },
- { .compatible = "hdmi-connector" },
- { .compatible = "dvi-connector" },
- {}
-};
-
-static int meson_probe_remote(struct platform_device *pdev,
- struct component_match **match,
- struct device_node *parent,
- struct device_node *remote)
-{
- struct device_node *ep, *remote_node;
- int count = 1;
-
- /* If node is a connector, return and do not add to match table */
- if (of_match_node(connectors_match, remote))
- return 1;
-
- component_match_add(&pdev->dev, match, compare_of, remote);
-
- for_each_endpoint_of_node(remote, ep) {
- remote_node = of_graph_get_remote_port_parent(ep);
- if (!remote_node ||
- remote_node == parent || /* Ignore parent endpoint */
- !of_device_is_available(remote_node)) {
- of_node_put(remote_node);
- continue;
- }
-
- count += meson_probe_remote(pdev, match, remote, remote_node);
-
- of_node_put(remote_node);
- }
-
- return count;
-}
-
static void meson_drv_shutdown(struct platform_device *pdev)
{
struct meson_drm *priv = dev_get_drvdata(&pdev->dev);
@@ -477,6 +442,13 @@ static void meson_drv_shutdown(struct platform_device *pdev)
drm_atomic_helper_shutdown(priv->drm);
}
+/* Possible connectors nodes to ignore */
+static const struct of_device_id connectors_match[] = {
+ { .compatible = "composite-video-connector" },
+ { .compatible = "svideo-connector" },
+ {}
+};
+
static int meson_drv_probe(struct platform_device *pdev)
{
struct component_match *match = NULL;
@@ -491,8 +463,21 @@ static int meson_drv_probe(struct platform_device *pdev)
continue;
}
- count += meson_probe_remote(pdev, &match, np, remote);
+ /* If an analog connector is detected, count it as an output */
+ if (of_match_node(connectors_match, remote)) {
+ ++count;
+ of_node_put(remote);
+ continue;
+ }
+
+ dev_dbg(&pdev->dev, "parent %pOF remote match add %pOF parent %s\n",
+ np, remote, dev_name(&pdev->dev));
+
+ component_match_add(&pdev->dev, &match, compare_of, remote);
+
of_node_put(remote);
+
+ ++count;
}
if (count && !match)
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 0afbd1e70bfc..5cd2b2ebbbd3 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -22,14 +22,11 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
-#include <linux/media-bus-format.h>
#include <linux/videodev2.h>
#include "meson_drv.h"
#include "meson_dw_hdmi.h"
#include "meson_registers.h"
-#include "meson_vclk.h"
-#include "meson_venc.h"
#define DRIVER_NAME "meson-dw-hdmi"
#define DRIVER_DESC "Amlogic Meson HDMI-TX DRM driver"
@@ -135,8 +132,6 @@ struct meson_dw_hdmi_data {
};
struct meson_dw_hdmi {
- struct drm_encoder encoder;
- struct drm_bridge bridge;
struct dw_hdmi_plat_data dw_plat_data;
struct meson_drm *priv;
struct device *dev;
@@ -148,12 +143,8 @@ struct meson_dw_hdmi {
struct regulator *hdmi_supply;
u32 irq_stat;
struct dw_hdmi *hdmi;
- unsigned long output_bus_fmt;
+ struct drm_bridge *bridge;
};
-#define encoder_to_meson_dw_hdmi(x) \
- container_of(x, struct meson_dw_hdmi, encoder)
-#define bridge_to_meson_dw_hdmi(x) \
- container_of(x, struct meson_dw_hdmi, bridge)
static inline int dw_hdmi_is_compatible(struct meson_dw_hdmi *dw_hdmi,
const char *compat)
@@ -295,14 +286,14 @@ static inline void dw_hdmi_dwc_write_bits(struct meson_dw_hdmi *dw_hdmi,
/* Setup PHY bandwidth modes */
static void meson_hdmi_phy_setup_mode(struct meson_dw_hdmi *dw_hdmi,
- const struct drm_display_mode *mode)
+ const struct drm_display_mode *mode,
+ bool mode_is_420)
{
struct meson_drm *priv = dw_hdmi->priv;
unsigned int pixel_clock = mode->clock;
/* For 420, pixel clock is half unlike venc clock */
- if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
- pixel_clock /= 2;
+ if (mode_is_420) pixel_clock /= 2;
if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi")) {
@@ -374,68 +365,25 @@ static inline void meson_dw_hdmi_phy_reset(struct meson_dw_hdmi *dw_hdmi)
mdelay(2);
}
-static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi,
- const struct drm_display_mode *mode)
-{
- struct meson_drm *priv = dw_hdmi->priv;
- int vic = drm_match_cea_mode(mode);
- unsigned int phy_freq;
- unsigned int vclk_freq;
- unsigned int venc_freq;
- unsigned int hdmi_freq;
-
- vclk_freq = mode->clock;
-
- /* For 420, pixel clock is half unlike venc clock */
- if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
- vclk_freq /= 2;
-
- /* TMDS clock is pixel_clock * 10 */
- phy_freq = vclk_freq * 10;
-
- if (!vic) {
- meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, phy_freq,
- vclk_freq, vclk_freq, vclk_freq, false);
- return;
- }
-
- /* 480i/576i needs global pixel doubling */
- if (mode->flags & DRM_MODE_FLAG_DBLCLK)
- vclk_freq *= 2;
-
- venc_freq = vclk_freq;
- hdmi_freq = vclk_freq;
-
- /* VENC double pixels for 1080i, 720p and YUV420 modes */
- if (meson_venc_hdmi_venc_repeat(vic) ||
- dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
- venc_freq *= 2;
-
- vclk_freq = max(venc_freq, hdmi_freq);
-
- if (mode->flags & DRM_MODE_FLAG_DBLCLK)
- venc_freq /= 2;
-
- DRM_DEBUG_DRIVER("vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n",
- phy_freq, vclk_freq, venc_freq, hdmi_freq,
- priv->venc.hdmi_use_enci);
-
- meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, phy_freq, vclk_freq,
- venc_freq, hdmi_freq, priv->venc.hdmi_use_enci);
-}
-
static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
const struct drm_display_info *display,
const struct drm_display_mode *mode)
{
struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data;
+ bool is_hdmi2_sink = display->hdmi.scdc.supported;
struct meson_drm *priv = dw_hdmi->priv;
unsigned int wr_clk =
readl_relaxed(priv->io_base + _REG(VPU_HDMI_SETTING));
+ bool mode_is_420 = false;
DRM_DEBUG_DRIVER("\"%s\" div%d\n", mode->name,
mode->clock > 340000 ? 40 : 10);
+ if (drm_mode_is_420_only(display, mode) ||
+ (!is_hdmi2_sink &&
+ drm_mode_is_420_also(display, mode)))
+ mode_is_420 = true;
+
/* Enable clocks */
regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100);
@@ -457,8 +405,7 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
/* TMDS pattern setup */
- if (mode->clock > 340000 &&
- dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_YUV8_1X24) {
+ if (mode->clock > 340000 && !mode_is_420) {
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
0);
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
@@ -476,7 +423,7 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x2);
/* Setup PHY parameters */
- meson_hdmi_phy_setup_mode(dw_hdmi, mode);
+ meson_hdmi_phy_setup_mode(dw_hdmi, mode, mode_is_420);
/* Setup PHY */
regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
@@ -622,214 +569,15 @@ static irqreturn_t dw_hdmi_top_thread_irq(int irq, void *dev_id)
dw_hdmi_setup_rx_sense(dw_hdmi->hdmi, hpd_connected,
hpd_connected);
- drm_helper_hpd_irq_event(dw_hdmi->encoder.dev);
+ drm_helper_hpd_irq_event(dw_hdmi->bridge->dev);
+ drm_bridge_hpd_notify(dw_hdmi->bridge,
+ hpd_connected ? connector_status_connected
+ : connector_status_disconnected);
}
return IRQ_HANDLED;
}
-static enum drm_mode_status
-dw_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data,
- const struct drm_display_info *display_info,
- const struct drm_display_mode *mode)
-{
- struct meson_dw_hdmi *dw_hdmi = data;
- struct meson_drm *priv = dw_hdmi->priv;
- bool is_hdmi2_sink = display_info->hdmi.scdc.supported;
- unsigned int phy_freq;
- unsigned int vclk_freq;
- unsigned int venc_freq;
- unsigned int hdmi_freq;
- int vic = drm_match_cea_mode(mode);
- enum drm_mode_status status;
-
- DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
-
- /* If sink does not support 540MHz, reject the non-420 HDMI2 modes */
- if (display_info->max_tmds_clock &&
- mode->clock > display_info->max_tmds_clock &&
- !drm_mode_is_420_only(display_info, mode) &&
- !drm_mode_is_420_also(display_info, mode))
- return MODE_BAD;
-
- /* Check against non-VIC supported modes */
- if (!vic) {
- status = meson_venc_hdmi_supported_mode(mode);
- if (status != MODE_OK)
- return status;
-
- return meson_vclk_dmt_supported_freq(priv, mode->clock);
- /* Check against supported VIC modes */
- } else if (!meson_venc_hdmi_supported_vic(vic))
- return MODE_BAD;
-
- vclk_freq = mode->clock;
-
- /* For 420, pixel clock is half unlike venc clock */
- if (drm_mode_is_420_only(display_info, mode) ||
- (!is_hdmi2_sink &&
- drm_mode_is_420_also(display_info, mode)))
- vclk_freq /= 2;
-
- /* TMDS clock is pixel_clock * 10 */
- phy_freq = vclk_freq * 10;
-
- /* 480i/576i needs global pixel doubling */
- if (mode->flags & DRM_MODE_FLAG_DBLCLK)
- vclk_freq *= 2;
-
- venc_freq = vclk_freq;
- hdmi_freq = vclk_freq;
-
- /* VENC double pixels for 1080i, 720p and YUV420 modes */
- if (meson_venc_hdmi_venc_repeat(vic) ||
- drm_mode_is_420_only(display_info, mode) ||
- (!is_hdmi2_sink &&
- drm_mode_is_420_also(display_info, mode)))
- venc_freq *= 2;
-
- vclk_freq = max(venc_freq, hdmi_freq);
-
- if (mode->flags & DRM_MODE_FLAG_DBLCLK)
- venc_freq /= 2;
-
- dev_dbg(dw_hdmi->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n",
- __func__, phy_freq, vclk_freq, venc_freq, hdmi_freq);
-
- return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq);
-}
-
-/* Encoder */
-
-static const u32 meson_dw_hdmi_out_bus_fmts[] = {
- MEDIA_BUS_FMT_YUV8_1X24,
- MEDIA_BUS_FMT_UYYVYY8_0_5X24,
-};
-
-static void meson_venc_hdmi_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs meson_venc_hdmi_encoder_funcs = {
- .destroy = meson_venc_hdmi_encoder_destroy,
-};
-
-static u32 *
-meson_venc_hdmi_encoder_get_inp_bus_fmts(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
- u32 output_fmt,
- unsigned int *num_input_fmts)
-{
- u32 *input_fmts = NULL;
- int i;
-
- *num_input_fmts = 0;
-
- for (i = 0 ; i < ARRAY_SIZE(meson_dw_hdmi_out_bus_fmts) ; ++i) {
- if (output_fmt == meson_dw_hdmi_out_bus_fmts[i]) {
- *num_input_fmts = 1;
- input_fmts = kcalloc(*num_input_fmts,
- sizeof(*input_fmts),
- GFP_KERNEL);
- if (!input_fmts)
- return NULL;
-
- input_fmts[0] = output_fmt;
-
- break;
- }
- }
-
- return input_fmts;
-}
-
-static int meson_venc_hdmi_encoder_atomic_check(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
-
- dw_hdmi->output_bus_fmt = bridge_state->output_bus_cfg.format;
-
- DRM_DEBUG_DRIVER("output_bus_fmt %lx\n", dw_hdmi->output_bus_fmt);
-
- return 0;
-}
-
-static void meson_venc_hdmi_encoder_disable(struct drm_bridge *bridge)
-{
- struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
- struct meson_drm *priv = dw_hdmi->priv;
-
- DRM_DEBUG_DRIVER("\n");
-
- writel_bits_relaxed(0x3, 0,
- priv->io_base + _REG(VPU_HDMI_SETTING));
-
- writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
- writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
-}
-
-static void meson_venc_hdmi_encoder_enable(struct drm_bridge *bridge)
-{
- struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
- struct meson_drm *priv = dw_hdmi->priv;
-
- DRM_DEBUG_DRIVER("%s\n", priv->venc.hdmi_use_enci ? "VENCI" : "VENCP");
-
- if (priv->venc.hdmi_use_enci)
- writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN));
- else
- writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN));
-}
-
-static void meson_venc_hdmi_encoder_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode)
-{
- struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
- struct meson_drm *priv = dw_hdmi->priv;
- int vic = drm_match_cea_mode(mode);
- unsigned int ycrcb_map = VPU_HDMI_OUTPUT_CBYCR;
- bool yuv420_mode = false;
-
- DRM_DEBUG_DRIVER("\"%s\" vic %d\n", mode->name, vic);
-
- if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) {
- ycrcb_map = VPU_HDMI_OUTPUT_CRYCB;
- yuv420_mode = true;
- }
-
- /* VENC + VENC-DVI Mode setup */
- meson_venc_hdmi_mode_set(priv, vic, ycrcb_map, yuv420_mode, mode);
-
- /* VCLK Set clock */
- dw_hdmi_set_vclk(dw_hdmi, mode);
-
- if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
- /* Setup YUV420 to HDMI-TX, no 10bit diphering */
- writel_relaxed(2 | (2 << 2),
- priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
- else
- /* Setup YUV444 to HDMI-TX, no 10bit diphering */
- writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
-}
-
-static const struct drm_bridge_funcs meson_venc_hdmi_encoder_bridge_funcs = {
- .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
- .atomic_get_input_bus_fmts = meson_venc_hdmi_encoder_get_inp_bus_fmts,
- .atomic_reset = drm_atomic_helper_bridge_reset,
- .atomic_check = meson_venc_hdmi_encoder_atomic_check,
- .enable = meson_venc_hdmi_encoder_enable,
- .disable = meson_venc_hdmi_encoder_disable,
- .mode_set = meson_venc_hdmi_encoder_mode_set,
-};
-
/* DW HDMI Regmap */
static int meson_dw_hdmi_reg_read(void *context, unsigned int reg,
@@ -876,28 +624,6 @@ static const struct meson_dw_hdmi_data meson_dw_hdmi_g12a_data = {
.dwc_write = dw_hdmi_g12a_dwc_write,
};
-static bool meson_hdmi_connector_is_available(struct device *dev)
-{
- struct device_node *ep, *remote;
-
- /* HDMI Connector is on the second port, first endpoint */
- ep = of_graph_get_endpoint_by_regs(dev->of_node, 1, 0);
- if (!ep)
- return false;
-
- /* If the endpoint node exists, consider it enabled */
- remote = of_graph_get_remote_port(ep);
- if (remote) {
- of_node_put(ep);
- return true;
- }
-
- of_node_put(ep);
- of_node_put(remote);
-
- return false;
-}
-
static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi)
{
struct meson_drm *priv = meson_dw_hdmi->priv;
@@ -976,18 +702,11 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
struct drm_device *drm = data;
struct meson_drm *priv = drm->dev_private;
struct dw_hdmi_plat_data *dw_plat_data;
- struct drm_bridge *next_bridge;
- struct drm_encoder *encoder;
int irq;
int ret;
DRM_DEBUG_DRIVER("\n");
- if (!meson_hdmi_connector_is_available(dev)) {
- dev_info(drm->dev, "HDMI Output connector not available\n");
- return -ENODEV;
- }
-
match = of_device_get_match_data(&pdev->dev);
if (!match) {
dev_err(&pdev->dev, "failed to get match data\n");
@@ -1003,7 +722,6 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
meson_dw_hdmi->dev = dev;
meson_dw_hdmi->data = match;
dw_plat_data = &meson_dw_hdmi->dw_plat_data;
- encoder = &meson_dw_hdmi->encoder;
meson_dw_hdmi->hdmi_supply = devm_regulator_get_optional(dev, "hdmi");
if (IS_ERR(meson_dw_hdmi->hdmi_supply)) {
@@ -1074,34 +792,18 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
return ret;
}
- /* Encoder */
-
- ret = drm_encoder_init(drm, encoder, &meson_venc_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, "meson_hdmi");
- if (ret) {
- dev_err(priv->dev, "Failed to init HDMI encoder\n");
- return ret;
- }
-
- meson_dw_hdmi->bridge.funcs = &meson_venc_hdmi_encoder_bridge_funcs;
- drm_bridge_attach(encoder, &meson_dw_hdmi->bridge, NULL, 0);
-
- encoder->possible_crtcs = BIT(0);
-
meson_dw_hdmi_init(meson_dw_hdmi);
- DRM_DEBUG_DRIVER("encoder initialized\n");
-
/* Bridge / Connector */
dw_plat_data->priv_data = meson_dw_hdmi;
- dw_plat_data->mode_valid = dw_hdmi_mode_valid;
dw_plat_data->phy_ops = &meson_dw_hdmi_phy_ops;
dw_plat_data->phy_name = "meson_dw_hdmi_phy";
dw_plat_data->phy_data = meson_dw_hdmi;
dw_plat_data->input_bus_encoding = V4L2_YCBCR_ENC_709;
dw_plat_data->ycbcr_420_allowed = true;
dw_plat_data->disable_cec = true;
+ dw_plat_data->output_port = 1;
if (dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxm-dw-hdmi") ||
@@ -1110,15 +812,11 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
platform_set_drvdata(pdev, meson_dw_hdmi);
- meson_dw_hdmi->hdmi = dw_hdmi_probe(pdev,
- &meson_dw_hdmi->dw_plat_data);
+ meson_dw_hdmi->hdmi = dw_hdmi_probe(pdev, &meson_dw_hdmi->dw_plat_data);
if (IS_ERR(meson_dw_hdmi->hdmi))
return PTR_ERR(meson_dw_hdmi->hdmi);
- next_bridge = of_drm_find_bridge(pdev->dev.of_node);
- if (next_bridge)
- drm_bridge_attach(encoder, next_bridge,
- &meson_dw_hdmi->bridge, 0);
+ meson_dw_hdmi->bridge = of_drm_find_bridge(pdev->dev.of_node);
DRM_DEBUG_DRIVER("HDMI controller initialized\n");
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
new file mode 100644
index 000000000000..fd8db97ba8ba
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ * Copyright (C) 2014 Endless Mobile
+ *
+ * Written by:
+ * Jasper St. Pierre <jstpierre@mecheye.net>
+ */
+
+#include <linux/export.h>
+#include <linux/of_graph.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_device.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include "meson_registers.h"
+#include "meson_vclk.h"
+#include "meson_encoder_cvbs.h"
+
+/* HHI VDAC Registers */
+#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
+#define HHI_VDAC_CNTL0_G12A 0x2EC /* 0xbd offset in data sheet */
+#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
+#define HHI_VDAC_CNTL1_G12A 0x2F0 /* 0xbe offset in data sheet */
+
+struct meson_encoder_cvbs {
+ struct drm_encoder encoder;
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ struct meson_drm *priv;
+};
+
+#define bridge_to_meson_encoder_cvbs(x) \
+ container_of(x, struct meson_encoder_cvbs, bridge)
+
+/* Supported Modes */
+
+struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = {
+ { /* PAL */
+ .enci = &meson_cvbs_enci_pal,
+ .mode = {
+ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500,
+ 720, 732, 795, 864, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_INTERLACE),
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3,
+ },
+ },
+ { /* NTSC */
+ .enci = &meson_cvbs_enci_ntsc,
+ .mode = {
+ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500,
+ 720, 739, 801, 858, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_INTERLACE),
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3,
+ },
+ },
+};
+
+static const struct meson_cvbs_mode *
+meson_cvbs_get_mode(const struct drm_display_mode *req_mode)
+{
+ int i;
+
+ for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
+ struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
+
+ if (drm_mode_match(req_mode, &meson_mode->mode,
+ DRM_MODE_MATCH_TIMINGS |
+ DRM_MODE_MATCH_CLOCK |
+ DRM_MODE_MATCH_FLAGS |
+ DRM_MODE_MATCH_3D_FLAGS))
+ return meson_mode;
+ }
+
+ return NULL;
+}
+
+static int meson_encoder_cvbs_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct meson_encoder_cvbs *meson_encoder_cvbs =
+ bridge_to_meson_encoder_cvbs(bridge);
+
+ return drm_bridge_attach(bridge->encoder, meson_encoder_cvbs->next_bridge,
+ &meson_encoder_cvbs->bridge, flags);
+}
+
+static int meson_encoder_cvbs_get_modes(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct meson_encoder_cvbs *meson_encoder_cvbs =
+ bridge_to_meson_encoder_cvbs(bridge);
+ struct meson_drm *priv = meson_encoder_cvbs->priv;
+ struct drm_display_mode *mode;
+ int i;
+
+ for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
+ struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
+
+ mode = drm_mode_duplicate(priv->drm, &meson_mode->mode);
+ if (!mode) {
+ dev_err(priv->dev, "Failed to create a new display mode\n");
+ return 0;
+ }
+
+ drm_mode_probed_add(connector, mode);
+ }
+
+ return i;
+}
+
+static int meson_encoder_cvbs_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *display_info,
+ const struct drm_display_mode *mode)
+{
+ if (meson_cvbs_get_mode(mode))
+ return MODE_OK;
+
+ return MODE_BAD;
+}
+
+static int meson_encoder_cvbs_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ if (meson_cvbs_get_mode(&crtc_state->mode))
+ return 0;
+
+ return -EINVAL;
+}
+
+static void meson_encoder_cvbs_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
+{
+ struct meson_encoder_cvbs *encoder_cvbs = bridge_to_meson_encoder_cvbs(bridge);
+ struct drm_atomic_state *state = bridge_state->base.state;
+ struct meson_drm *priv = encoder_cvbs->priv;
+ const struct meson_cvbs_mode *meson_mode;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector *connector;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ if (WARN_ON(!connector))
+ return;
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
+
+ meson_mode = meson_cvbs_get_mode(&crtc_state->adjusted_mode);
+ if (WARN_ON(!meson_mode))
+ return;
+
+ meson_venci_cvbs_mode_set(priv, meson_mode->enci);
+
+ /* Setup 27MHz vclk2 for ENCI and VDAC */
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS,
+ MESON_VCLK_CVBS, MESON_VCLK_CVBS,
+ MESON_VCLK_CVBS, MESON_VCLK_CVBS,
+ true);
+
+ /* VDAC0 source is not from ATV */
+ writel_bits_relaxed(VENC_VDAC_SEL_ATV_DMD, 0,
+ priv->io_base + _REG(VENC_VDAC_DACSEL0));
+
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0, 1);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0xf0001);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0x906001);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0);
+ }
+}
+
+static void meson_encoder_cvbs_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
+{
+ struct meson_encoder_cvbs *meson_encoder_cvbs =
+ bridge_to_meson_encoder_cvbs(bridge);
+ struct meson_drm *priv = meson_encoder_cvbs->priv;
+
+ /* Disable CVBS VDAC */
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0);
+ } else {
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
+ }
+}
+
+static const struct drm_bridge_funcs meson_encoder_cvbs_bridge_funcs = {
+ .attach = meson_encoder_cvbs_attach,
+ .mode_valid = meson_encoder_cvbs_mode_valid,
+ .get_modes = meson_encoder_cvbs_get_modes,
+ .atomic_enable = meson_encoder_cvbs_atomic_enable,
+ .atomic_disable = meson_encoder_cvbs_atomic_disable,
+ .atomic_check = meson_encoder_cvbs_atomic_check,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+};
+
+int meson_encoder_cvbs_init(struct meson_drm *priv)
+{
+ struct drm_device *drm = priv->drm;
+ struct meson_encoder_cvbs *meson_encoder_cvbs;
+ struct drm_connector *connector;
+ struct device_node *remote;
+ int ret;
+
+ meson_encoder_cvbs = devm_kzalloc(priv->dev, sizeof(*meson_encoder_cvbs), GFP_KERNEL);
+ if (!meson_encoder_cvbs)
+ return -ENOMEM;
+
+ /* CVBS Connector Bridge */
+ remote = of_graph_get_remote_node(priv->dev->of_node, 0, 0);
+ if (!remote) {
+ dev_info(drm->dev, "CVBS Output connector not available\n");
+ return 0;
+ }
+
+ meson_encoder_cvbs->next_bridge = of_drm_find_bridge(remote);
+ if (!meson_encoder_cvbs->next_bridge) {
+ dev_err(priv->dev, "Failed to find CVBS Connector bridge\n");
+ return -EPROBE_DEFER;
+ }
+
+ /* CVBS Encoder Bridge */
+ meson_encoder_cvbs->bridge.funcs = &meson_encoder_cvbs_bridge_funcs;
+ meson_encoder_cvbs->bridge.of_node = priv->dev->of_node;
+ meson_encoder_cvbs->bridge.type = DRM_MODE_CONNECTOR_Composite;
+ meson_encoder_cvbs->bridge.ops = DRM_BRIDGE_OP_MODES;
+ meson_encoder_cvbs->bridge.interlace_allowed = true;
+
+ drm_bridge_add(&meson_encoder_cvbs->bridge);
+
+ meson_encoder_cvbs->priv = priv;
+
+ /* Encoder */
+ ret = drm_simple_encoder_init(priv->drm, &meson_encoder_cvbs->encoder,
+ DRM_MODE_ENCODER_TVDAC);
+ if (ret) {
+ dev_err(priv->dev, "Failed to init CVBS encoder: %d\n", ret);
+ return ret;
+ }
+
+ meson_encoder_cvbs->encoder.possible_crtcs = BIT(0);
+
+ /* Attach CVBS Encoder Bridge to Encoder */
+ ret = drm_bridge_attach(&meson_encoder_cvbs->encoder, &meson_encoder_cvbs->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ dev_err(priv->dev, "Failed to attach bridge: %d\n", ret);
+ return ret;
+ }
+
+ /* Initialize & attach Bridge Connector */
+ connector = drm_bridge_connector_init(priv->drm, &meson_encoder_cvbs->encoder);
+ if (IS_ERR(connector)) {
+ dev_err(priv->dev, "Unable to create CVBS bridge connector\n");
+ return PTR_ERR(connector);
+ }
+ drm_connector_attach_encoder(connector, &meson_encoder_cvbs->encoder);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.h b/drivers/gpu/drm/meson/meson_encoder_cvbs.h
index ab7f76ba469c..61d9d183ce7f 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.h
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.h
@@ -24,6 +24,6 @@ struct meson_cvbs_mode {
/* Modes supported by the CVBS output */
extern struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT];
-int meson_venc_cvbs_create(struct meson_drm *priv);
+int meson_encoder_cvbs_init(struct meson_drm *priv);
#endif /* __MESON_VENC_CVBS_H */
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
new file mode 100644
index 000000000000..5e306de6f485
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+
+#include <media/cec-notifier.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_device.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include <linux/media-bus-format.h>
+#include <linux/videodev2.h>
+
+#include "meson_drv.h"
+#include "meson_registers.h"
+#include "meson_vclk.h"
+#include "meson_venc.h"
+#include "meson_encoder_hdmi.h"
+
+struct meson_encoder_hdmi {
+ struct drm_encoder encoder;
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ struct drm_connector *connector;
+ struct meson_drm *priv;
+ unsigned long output_bus_fmt;
+ struct cec_notifier *cec_notifier;
+};
+
+#define bridge_to_meson_encoder_hdmi(x) \
+ container_of(x, struct meson_encoder_hdmi, bridge)
+
+static int meson_encoder_hdmi_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
+
+ return drm_bridge_attach(bridge->encoder, encoder_hdmi->next_bridge,
+ &encoder_hdmi->bridge, flags);
+}
+
+static void meson_encoder_hdmi_detach(struct drm_bridge *bridge)
+{
+ struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
+
+ cec_notifier_conn_unregister(encoder_hdmi->cec_notifier);
+ encoder_hdmi->cec_notifier = NULL;
+}
+
+static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi,
+ const struct drm_display_mode *mode)
+{
+ struct meson_drm *priv = encoder_hdmi->priv;
+ int vic = drm_match_cea_mode(mode);
+ unsigned int phy_freq;
+ unsigned int vclk_freq;
+ unsigned int venc_freq;
+ unsigned int hdmi_freq;
+
+ vclk_freq = mode->clock;
+
+ /* For 420, pixel clock is half unlike venc clock */
+ if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
+ vclk_freq /= 2;
+
+ /* TMDS clock is pixel_clock * 10 */
+ phy_freq = vclk_freq * 10;
+
+ if (!vic) {
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, phy_freq,
+ vclk_freq, vclk_freq, vclk_freq, false);
+ return;
+ }
+
+ /* 480i/576i needs global pixel doubling */
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ vclk_freq *= 2;
+
+ venc_freq = vclk_freq;
+ hdmi_freq = vclk_freq;
+
+ /* VENC double pixels for 1080i, 720p and YUV420 modes */
+ if (meson_venc_hdmi_venc_repeat(vic) ||
+ encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
+ venc_freq *= 2;
+
+ vclk_freq = max(venc_freq, hdmi_freq);
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ venc_freq /= 2;
+
+ dev_dbg(priv->dev, "vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n",
+ phy_freq, vclk_freq, venc_freq, hdmi_freq,
+ priv->venc.hdmi_use_enci);
+
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, phy_freq, vclk_freq,
+ venc_freq, hdmi_freq, priv->venc.hdmi_use_enci);
+}
+
+static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *display_info,
+ const struct drm_display_mode *mode)
+{
+ struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
+ struct meson_drm *priv = encoder_hdmi->priv;
+ bool is_hdmi2_sink = display_info->hdmi.scdc.supported;
+ unsigned int phy_freq;
+ unsigned int vclk_freq;
+ unsigned int venc_freq;
+ unsigned int hdmi_freq;
+ int vic = drm_match_cea_mode(mode);
+ enum drm_mode_status status;
+
+ dev_dbg(priv->dev, "Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
+
+ /* If sink does not support 540MHz, reject the non-420 HDMI2 modes */
+ if (display_info->max_tmds_clock &&
+ mode->clock > display_info->max_tmds_clock &&
+ !drm_mode_is_420_only(display_info, mode) &&
+ !drm_mode_is_420_also(display_info, mode))
+ return MODE_BAD;
+
+ /* Check against non-VIC supported modes */
+ if (!vic) {
+ status = meson_venc_hdmi_supported_mode(mode);
+ if (status != MODE_OK)
+ return status;
+
+ return meson_vclk_dmt_supported_freq(priv, mode->clock);
+ /* Check against supported VIC modes */
+ } else if (!meson_venc_hdmi_supported_vic(vic))
+ return MODE_BAD;
+
+ vclk_freq = mode->clock;
+
+ /* For 420, pixel clock is half unlike venc clock */
+ if (drm_mode_is_420_only(display_info, mode) ||
+ (!is_hdmi2_sink &&
+ drm_mode_is_420_also(display_info, mode)))
+ vclk_freq /= 2;
+
+ /* TMDS clock is pixel_clock * 10 */
+ phy_freq = vclk_freq * 10;
+
+ /* 480i/576i needs global pixel doubling */
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ vclk_freq *= 2;
+
+ venc_freq = vclk_freq;
+ hdmi_freq = vclk_freq;
+
+ /* VENC double pixels for 1080i, 720p and YUV420 modes */
+ if (meson_venc_hdmi_venc_repeat(vic) ||
+ drm_mode_is_420_only(display_info, mode) ||
+ (!is_hdmi2_sink &&
+ drm_mode_is_420_also(display_info, mode)))
+ venc_freq *= 2;
+
+ vclk_freq = max(venc_freq, hdmi_freq);
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ venc_freq /= 2;
+
+ dev_dbg(priv->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n",
+ __func__, phy_freq, vclk_freq, venc_freq, hdmi_freq);
+
+ return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq);
+}
+
+static void meson_encoder_hdmi_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
+{
+ struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
+ struct drm_atomic_state *state = bridge_state->base.state;
+ unsigned int ycrcb_map = VPU_HDMI_OUTPUT_CBYCR;
+ struct meson_drm *priv = encoder_hdmi->priv;
+ struct drm_connector_state *conn_state;
+ const struct drm_display_mode *mode;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector *connector;
+ bool yuv420_mode = false;
+ int vic;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ if (WARN_ON(!connector))
+ return;
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
+
+ mode = &crtc_state->adjusted_mode;
+
+ vic = drm_match_cea_mode(mode);
+
+ dev_dbg(priv->dev, "\"%s\" vic %d\n", mode->name, vic);
+
+ if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) {
+ ycrcb_map = VPU_HDMI_OUTPUT_CRYCB;
+ yuv420_mode = true;
+ }
+
+ /* VENC + VENC-DVI Mode setup */
+ meson_venc_hdmi_mode_set(priv, vic, ycrcb_map, yuv420_mode, mode);
+
+ /* VCLK Set clock */
+ meson_encoder_hdmi_set_vclk(encoder_hdmi, mode);
+
+ if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
+ /* Setup YUV420 to HDMI-TX, no 10bit diphering */
+ writel_relaxed(2 | (2 << 2),
+ priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
+ else
+ /* Setup YUV444 to HDMI-TX, no 10bit diphering */
+ writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
+
+ dev_dbg(priv->dev, "%s\n", priv->venc.hdmi_use_enci ? "VENCI" : "VENCP");
+
+ if (priv->venc.hdmi_use_enci)
+ writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN));
+ else
+ writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN));
+}
+
+static void meson_encoder_hdmi_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
+{
+ struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
+ struct meson_drm *priv = encoder_hdmi->priv;
+
+ writel_bits_relaxed(0x3, 0,
+ priv->io_base + _REG(VPU_HDMI_SETTING));
+
+ writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
+ writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
+}
+
+static const u32 meson_encoder_hdmi_out_bus_fmts[] = {
+ MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_UYYVYY8_0_5X24,
+};
+
+static u32 *
+meson_encoder_hdmi_get_inp_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts = NULL;
+ int i;
+
+ *num_input_fmts = 0;
+
+ for (i = 0 ; i < ARRAY_SIZE(meson_encoder_hdmi_out_bus_fmts) ; ++i) {
+ if (output_fmt == meson_encoder_hdmi_out_bus_fmts[i]) {
+ *num_input_fmts = 1;
+ input_fmts = kcalloc(*num_input_fmts,
+ sizeof(*input_fmts),
+ GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ input_fmts[0] = output_fmt;
+
+ break;
+ }
+ }
+
+ return input_fmts;
+}
+
+static int meson_encoder_hdmi_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
+ struct drm_connector_state *old_conn_state =
+ drm_atomic_get_old_connector_state(conn_state->state, conn_state->connector);
+ struct meson_drm *priv = encoder_hdmi->priv;
+
+ encoder_hdmi->output_bus_fmt = bridge_state->output_bus_cfg.format;
+
+ dev_dbg(priv->dev, "output_bus_fmt %lx\n", encoder_hdmi->output_bus_fmt);
+
+ if (!drm_connector_atomic_hdr_metadata_equal(old_conn_state, conn_state))
+ crtc_state->mode_changed = true;
+
+ return 0;
+}
+
+static void meson_encoder_hdmi_hpd_notify(struct drm_bridge *bridge,
+ enum drm_connector_status status)
+{
+ struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
+ struct edid *edid;
+
+ if (!encoder_hdmi->cec_notifier)
+ return;
+
+ if (status == connector_status_connected) {
+ edid = drm_bridge_get_edid(encoder_hdmi->next_bridge, encoder_hdmi->connector);
+ if (!edid)
+ return;
+
+ cec_notifier_set_phys_addr_from_edid(encoder_hdmi->cec_notifier, edid);
+ } else
+ cec_notifier_phys_addr_invalidate(encoder_hdmi->cec_notifier);
+}
+
+static const struct drm_bridge_funcs meson_encoder_hdmi_bridge_funcs = {
+ .attach = meson_encoder_hdmi_attach,
+ .detach = meson_encoder_hdmi_detach,
+ .mode_valid = meson_encoder_hdmi_mode_valid,
+ .hpd_notify = meson_encoder_hdmi_hpd_notify,
+ .atomic_enable = meson_encoder_hdmi_atomic_enable,
+ .atomic_disable = meson_encoder_hdmi_atomic_disable,
+ .atomic_get_input_bus_fmts = meson_encoder_hdmi_get_inp_bus_fmts,
+ .atomic_check = meson_encoder_hdmi_atomic_check,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+};
+
+int meson_encoder_hdmi_init(struct meson_drm *priv)
+{
+ struct meson_encoder_hdmi *meson_encoder_hdmi;
+ struct platform_device *pdev;
+ struct device_node *remote;
+ int ret;
+
+ meson_encoder_hdmi = devm_kzalloc(priv->dev, sizeof(*meson_encoder_hdmi), GFP_KERNEL);
+ if (!meson_encoder_hdmi)
+ return -ENOMEM;
+
+ /* HDMI Transceiver Bridge */
+ remote = of_graph_get_remote_node(priv->dev->of_node, 1, 0);
+ if (!remote) {
+ dev_err(priv->dev, "HDMI transceiver device is disabled");
+ return 0;
+ }
+
+ meson_encoder_hdmi->next_bridge = of_drm_find_bridge(remote);
+ if (!meson_encoder_hdmi->next_bridge) {
+ dev_err(priv->dev, "Failed to find HDMI transceiver bridge\n");
+ return -EPROBE_DEFER;
+ }
+
+ /* HDMI Encoder Bridge */
+ meson_encoder_hdmi->bridge.funcs = &meson_encoder_hdmi_bridge_funcs;
+ meson_encoder_hdmi->bridge.of_node = priv->dev->of_node;
+ meson_encoder_hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+ meson_encoder_hdmi->bridge.interlace_allowed = true;
+
+ drm_bridge_add(&meson_encoder_hdmi->bridge);
+
+ meson_encoder_hdmi->priv = priv;
+
+ /* Encoder */
+ ret = drm_simple_encoder_init(priv->drm, &meson_encoder_hdmi->encoder,
+ DRM_MODE_ENCODER_TMDS);
+ if (ret) {
+ dev_err(priv->dev, "Failed to init HDMI encoder: %d\n", ret);
+ return ret;
+ }
+
+ meson_encoder_hdmi->encoder.possible_crtcs = BIT(0);
+
+ /* Attach HDMI Encoder Bridge to Encoder */
+ ret = drm_bridge_attach(&meson_encoder_hdmi->encoder, &meson_encoder_hdmi->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ dev_err(priv->dev, "Failed to attach bridge: %d\n", ret);
+ return ret;
+ }
+
+ /* Initialize & attach Bridge Connector */
+ meson_encoder_hdmi->connector = drm_bridge_connector_init(priv->drm,
+ &meson_encoder_hdmi->encoder);
+ if (IS_ERR(meson_encoder_hdmi->connector)) {
+ dev_err(priv->dev, "Unable to create HDMI bridge connector\n");
+ return PTR_ERR(meson_encoder_hdmi->connector);
+ }
+ drm_connector_attach_encoder(meson_encoder_hdmi->connector,
+ &meson_encoder_hdmi->encoder);
+
+ /*
+ * We should have now in place:
+ * encoder->[hdmi encoder bridge]->[dw-hdmi bridge]->[display connector bridge]->[display connector]
+ */
+
+ /*
+ * drm_connector_attach_max_bpc_property() requires the
+ * connector to have a state.
+ */
+ drm_atomic_helper_connector_reset(meson_encoder_hdmi->connector);
+
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ drm_connector_attach_hdr_output_metadata_property(meson_encoder_hdmi->connector);
+
+ drm_connector_attach_max_bpc_property(meson_encoder_hdmi->connector, 8, 8);
+
+ /* Handle this here until handled by drm_bridge_connector_init() */
+ meson_encoder_hdmi->connector->ycbcr_420_allowed = true;
+
+ pdev = of_find_device_by_node(remote);
+ if (pdev) {
+ struct cec_connector_info conn_info;
+ struct cec_notifier *notifier;
+
+ cec_fill_conn_info_from_drm(&conn_info, meson_encoder_hdmi->connector);
+
+ notifier = cec_notifier_conn_register(&pdev->dev, NULL, &conn_info);
+ if (!notifier)
+ return -ENOMEM;
+
+ meson_encoder_hdmi->cec_notifier = notifier;
+ }
+
+ dev_dbg(priv->dev, "HDMI encoder initialized\n");
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.h b/drivers/gpu/drm/meson/meson_encoder_hdmi.h
new file mode 100644
index 000000000000..ed19494f0956
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2021 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef __MESON_ENCODER_HDMI_H
+#define __MESON_ENCODER_HDMI_H
+
+int meson_encoder_hdmi_init(struct meson_drm *priv);
+
+#endif /* __MESON_ENCODER_HDMI_H */
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
deleted file mode 100644
index f1747fde1fe0..000000000000
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ /dev/null
@@ -1,293 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2016 BayLibre, SAS
- * Author: Neil Armstrong <narmstrong@baylibre.com>
- * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
- * Copyright (C) 2014 Endless Mobile
- *
- * Written by:
- * Jasper St. Pierre <jstpierre@mecheye.net>
- */
-
-#include <linux/export.h>
-#include <linux/of_graph.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_device.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_print.h>
-
-#include "meson_registers.h"
-#include "meson_vclk.h"
-#include "meson_venc_cvbs.h"
-
-/* HHI VDAC Registers */
-#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
-#define HHI_VDAC_CNTL0_G12A 0x2EC /* 0xbd offset in data sheet */
-#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
-#define HHI_VDAC_CNTL1_G12A 0x2F0 /* 0xbe offset in data sheet */
-
-struct meson_venc_cvbs {
- struct drm_encoder encoder;
- struct drm_connector connector;
- struct meson_drm *priv;
-};
-#define encoder_to_meson_venc_cvbs(x) \
- container_of(x, struct meson_venc_cvbs, encoder)
-
-#define connector_to_meson_venc_cvbs(x) \
- container_of(x, struct meson_venc_cvbs, connector)
-
-/* Supported Modes */
-
-struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = {
- { /* PAL */
- .enci = &meson_cvbs_enci_pal,
- .mode = {
- DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500,
- 720, 732, 795, 864, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_INTERLACE),
- .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3,
- },
- },
- { /* NTSC */
- .enci = &meson_cvbs_enci_ntsc,
- .mode = {
- DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500,
- 720, 739, 801, 858, 0, 480, 488, 494, 525, 0,
- DRM_MODE_FLAG_INTERLACE),
- .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3,
- },
- },
-};
-
-static const struct meson_cvbs_mode *
-meson_cvbs_get_mode(const struct drm_display_mode *req_mode)
-{
- int i;
-
- for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
- struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
-
- if (drm_mode_match(req_mode, &meson_mode->mode,
- DRM_MODE_MATCH_TIMINGS |
- DRM_MODE_MATCH_CLOCK |
- DRM_MODE_MATCH_FLAGS |
- DRM_MODE_MATCH_3D_FLAGS))
- return meson_mode;
- }
-
- return NULL;
-}
-
-/* Connector */
-
-static void meson_cvbs_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_cleanup(connector);
-}
-
-static enum drm_connector_status
-meson_cvbs_connector_detect(struct drm_connector *connector, bool force)
-{
- /* FIXME: Add load-detect or jack-detect if possible */
- return connector_status_connected;
-}
-
-static int meson_cvbs_connector_get_modes(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_display_mode *mode;
- int i;
-
- for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
- struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
-
- mode = drm_mode_duplicate(dev, &meson_mode->mode);
- if (!mode) {
- DRM_ERROR("Failed to create a new display mode\n");
- return 0;
- }
-
- drm_mode_probed_add(connector, mode);
- }
-
- return i;
-}
-
-static int meson_cvbs_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- /* Validate the modes added in get_modes */
- return MODE_OK;
-}
-
-static const struct drm_connector_funcs meson_cvbs_connector_funcs = {
- .detect = meson_cvbs_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = meson_cvbs_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const
-struct drm_connector_helper_funcs meson_cvbs_connector_helper_funcs = {
- .get_modes = meson_cvbs_connector_get_modes,
- .mode_valid = meson_cvbs_connector_mode_valid,
-};
-
-/* Encoder */
-
-static void meson_venc_cvbs_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs meson_venc_cvbs_encoder_funcs = {
- .destroy = meson_venc_cvbs_encoder_destroy,
-};
-
-static int meson_venc_cvbs_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- if (meson_cvbs_get_mode(&crtc_state->mode))
- return 0;
-
- return -EINVAL;
-}
-
-static void meson_venc_cvbs_encoder_disable(struct drm_encoder *encoder)
-{
- struct meson_venc_cvbs *meson_venc_cvbs =
- encoder_to_meson_venc_cvbs(encoder);
- struct meson_drm *priv = meson_venc_cvbs->priv;
-
- /* Disable CVBS VDAC */
- if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
- regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0);
- regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0);
- } else {
- regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
- regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
- }
-}
-
-static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder)
-{
- struct meson_venc_cvbs *meson_venc_cvbs =
- encoder_to_meson_venc_cvbs(encoder);
- struct meson_drm *priv = meson_venc_cvbs->priv;
-
- /* VDAC0 source is not from ATV */
- writel_bits_relaxed(VENC_VDAC_SEL_ATV_DMD, 0,
- priv->io_base + _REG(VENC_VDAC_DACSEL0));
-
- if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
- regmap_write(priv->hhi, HHI_VDAC_CNTL0, 1);
- regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
- } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
- meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
- regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0xf0001);
- regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
- } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
- regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0x906001);
- regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0);
- }
-}
-
-static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- const struct meson_cvbs_mode *meson_mode = meson_cvbs_get_mode(mode);
- struct meson_venc_cvbs *meson_venc_cvbs =
- encoder_to_meson_venc_cvbs(encoder);
- struct meson_drm *priv = meson_venc_cvbs->priv;
-
- if (meson_mode) {
- meson_venci_cvbs_mode_set(priv, meson_mode->enci);
-
- /* Setup 27MHz vclk2 for ENCI and VDAC */
- meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS,
- MESON_VCLK_CVBS, MESON_VCLK_CVBS,
- MESON_VCLK_CVBS, MESON_VCLK_CVBS,
- true);
- }
-}
-
-static const struct drm_encoder_helper_funcs
- meson_venc_cvbs_encoder_helper_funcs = {
- .atomic_check = meson_venc_cvbs_encoder_atomic_check,
- .disable = meson_venc_cvbs_encoder_disable,
- .enable = meson_venc_cvbs_encoder_enable,
- .mode_set = meson_venc_cvbs_encoder_mode_set,
-};
-
-static bool meson_venc_cvbs_connector_is_available(struct meson_drm *priv)
-{
- struct device_node *remote;
-
- remote = of_graph_get_remote_node(priv->dev->of_node, 0, 0);
- if (!remote)
- return false;
-
- of_node_put(remote);
- return true;
-}
-
-int meson_venc_cvbs_create(struct meson_drm *priv)
-{
- struct drm_device *drm = priv->drm;
- struct meson_venc_cvbs *meson_venc_cvbs;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
- int ret;
-
- if (!meson_venc_cvbs_connector_is_available(priv)) {
- dev_info(drm->dev, "CVBS Output connector not available\n");
- return 0;
- }
-
- meson_venc_cvbs = devm_kzalloc(priv->dev, sizeof(*meson_venc_cvbs),
- GFP_KERNEL);
- if (!meson_venc_cvbs)
- return -ENOMEM;
-
- meson_venc_cvbs->priv = priv;
- encoder = &meson_venc_cvbs->encoder;
- connector = &meson_venc_cvbs->connector;
-
- /* Connector */
-
- drm_connector_helper_add(connector,
- &meson_cvbs_connector_helper_funcs);
-
- ret = drm_connector_init(drm, connector, &meson_cvbs_connector_funcs,
- DRM_MODE_CONNECTOR_Composite);
- if (ret) {
- dev_err(priv->dev, "Failed to init CVBS connector\n");
- return ret;
- }
-
- connector->interlace_allowed = 1;
-
- /* Encoder */
-
- drm_encoder_helper_add(encoder, &meson_venc_cvbs_encoder_helper_funcs);
-
- ret = drm_encoder_init(drm, encoder, &meson_venc_cvbs_encoder_funcs,
- DRM_MODE_ENCODER_TVDAC, "meson_venc_cvbs");
- if (ret) {
- dev_err(priv->dev, "Failed to init CVBS encoder\n");
- return ret;
- }
-
- encoder->possible_crtcs = BIT(0);
-
- drm_connector_attach_encoder(connector, encoder);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 6b9243713b3c..740108a006ba 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -6,7 +6,6 @@
* Dave Airlie
*/
-#include <linux/console.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
@@ -378,7 +377,7 @@ static struct pci_driver mgag200_pci_driver = {
static int __init mgag200_init(void)
{
- if (vgacon_text_force() && mgag200_modeset == -1)
+ if (drm_firmware_drivers_only() && mgag200_modeset == -1)
return -EINVAL;
if (mgag200_modeset == 0)
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index fd98e8bbc550..b983541a4c53 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -847,9 +847,11 @@ static void
mgag200_handle_damage(struct mga_device *mdev, struct drm_framebuffer *fb,
struct drm_rect *clip, const struct dma_buf_map *map)
{
+ void __iomem *dst = mdev->vram;
void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */
- drm_fb_memcpy_dstclip(mdev->vram, fb->pitches[0], vmap, fb, clip);
+ dst += drm_fb_clip_offset(fb->pitches[0], fb->format, clip);
+ drm_fb_memcpy_toio(dst, fb->pitches[0], vmap, fb, clip);
/* Always scanout image at VRAM offset 0 */
mgag200_set_startadd(mdev, (u32)0);
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 39197b4beea7..1eae5a9645f4 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -65,6 +65,7 @@ config DRM_MSM_HDMI_HDCP
config DRM_MSM_DP
bool "Enable DisplayPort support in MSM DRM driver"
depends on DRM_MSM
+ select RATIONAL
default y
help
Compile in support for DP driver in MSM DRM driver. DP external
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 093454457545..03ab55c37beb 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -19,7 +19,7 @@ msm-y := \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
- hdmi/hdmi_connector.o \
+ hdmi/hdmi_hpd.o \
hdmi/hdmi_i2c.o \
hdmi/hdmi_phy.o \
hdmi/hdmi_phy_8960.o \
@@ -27,12 +27,6 @@ msm-y := \
hdmi/hdmi_phy_8x60.o \
hdmi/hdmi_phy_8x74.o \
hdmi/hdmi_pll_8960.o \
- edp/edp.o \
- edp/edp_aux.o \
- edp/edp_bridge.o \
- edp/edp_connector.o \
- edp/edp_ctrl.o \
- edp/edp_phy.o \
disp/mdp_format.o \
disp/mdp_kms.o \
disp/mdp4/mdp4_crtc.o \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index bdc989183c64..22e8295a5e2b 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -12,7 +12,6 @@ static bool a2xx_idle(struct msm_gpu *gpu);
static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
- struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
@@ -23,7 +22,7 @@ static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
- if (priv->lastctx == submit->queue->ctx)
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 8fb847c174ff..2e481e2692ba 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -30,7 +30,6 @@ static bool a3xx_idle(struct msm_gpu *gpu);
static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
- struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
@@ -41,7 +40,7 @@ static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
- if (priv->lastctx == submit->queue->ctx)
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index a96ee79cc5e0..c5524d6e8705 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -24,7 +24,6 @@ static bool a4xx_idle(struct msm_gpu *gpu);
static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
- struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
@@ -35,7 +34,7 @@ static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
- if (priv->lastctx == submit->queue->ctx)
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
index dd593ec2bc56..6bd397a85834 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -107,7 +107,7 @@ reset_set(void *data, u64 val)
* try to reset an active GPU.
*/
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&gpu->lock);
release_firmware(adreno_gpu->fw[ADRENO_FW_PM4]);
adreno_gpu->fw[ADRENO_FW_PM4] = NULL;
@@ -133,7 +133,7 @@ reset_set(void *data, u64 val)
gpu->funcs->recover(gpu);
pm_runtime_put_sync(&gpu->pdev->dev);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
return 0;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 5e2750eb3810..3d28fcf841a6 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -65,7 +65,6 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
- struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
struct msm_gem_object *obj;
uint32_t *ptr, dwords;
@@ -76,7 +75,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (priv->lastctx == submit->queue->ctx)
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -126,12 +125,11 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
- struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
- priv->lastctx = NULL;
+ gpu->cur_ctx_seqno = 0;
a5xx_submit_in_rb(gpu, submit);
return;
}
@@ -166,7 +164,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (priv->lastctx == submit->queue->ctx)
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -441,7 +439,7 @@ void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
const struct adreno_five_hwcg_regs *regs;
unsigned int i, sz;
- if (adreno_is_a508(adreno_gpu)) {
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu)) {
regs = a50x_hwcg;
sz = ARRAY_SIZE(a50x_hwcg);
} else if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu)) {
@@ -485,7 +483,7 @@ static int a5xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
/* Specify workarounds for various microcode issues */
- if (adreno_is_a530(adreno_gpu)) {
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a530(adreno_gpu)) {
/* Workaround for token end syncs
* Force a WFI after every direct-render 3D mode draw and every
* 2D mode 3 draw
@@ -620,8 +618,16 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int ret;
+ /*
+ * Adreno 506 have CPZ Retention feature and doesn't require
+ * to resume zap shader
+ */
+ if (adreno_is_a506(adreno_gpu))
+ return 0;
+
ret = qcom_scm_set_remote_state(SCM_GPU_ZAP_SHADER_RESUME, GPU_PAS_ID);
if (ret)
DRM_ERROR("%s: zap-shader resume failed: %d\n",
@@ -733,9 +739,10 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
0x00100000 + adreno_gpu->gmem - 1);
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
- if (adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu)) {
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
+ adreno_is_a510(adreno_gpu)) {
gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20);
- if (adreno_is_a508(adreno_gpu))
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu))
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
else
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
@@ -751,7 +758,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
}
- if (adreno_is_a508(adreno_gpu))
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu))
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
(0x100 << 11 | 0x100 << 22));
else if (adreno_is_a509(adreno_gpu) || adreno_is_a510(adreno_gpu) ||
@@ -769,8 +776,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
* Disable the RB sampler datapath DP2 clock gating optimization
* for 1-SP GPUs, as it is enabled by default.
*/
- if (adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) ||
- adreno_is_a512(adreno_gpu))
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
+ adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu))
gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, 0, (1 << 9));
/* Disable UCHE global filter as SP can invalidate/flush independently */
@@ -851,10 +858,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
/* UCHE */
gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
- if (adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) ||
- adreno_is_a510(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
- adreno_is_a530(adreno_gpu))
- gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
+ /* SMMU */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
ADRENO_PROTECT_RW(0x10000, 0x8000));
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
@@ -895,8 +900,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
- if (!(adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) ||
- adreno_is_a510(adreno_gpu) || adreno_is_a512(adreno_gpu)))
+ if (adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))
a5xx_gpmu_ucode_init(gpu);
ret = a5xx_ucode_init(gpu);
@@ -927,6 +931,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (IS_ERR(a5xx_gpu->shadow))
return PTR_ERR(a5xx_gpu->shadow);
+
+ msm_gem_object_set_name(a5xx_gpu->shadow_bo, "shadow");
}
gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
@@ -1254,6 +1260,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
/*
@@ -1263,6 +1270,11 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
+ if (priv->disable_err_irq) {
+ status &= A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS |
+ A5XX_RBBM_INT_0_MASK_CP_SW;
+ }
+
/* Pass status to a5xx_rbbm_err_irq because we've already cleared it */
if (status & RBBM_ERROR_MASK)
a5xx_rbbm_err_irq(gpu, status);
@@ -1338,7 +1350,7 @@ static int a5xx_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
- /* Adreno 508, 509, 510, 512 needs manual RBBM sus/res control */
+ /* Adreno 506, 508, 509, 510, 512 needs manual RBBM sus/res control */
if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) {
/* Halt the sp_input_clk at HM level */
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055);
@@ -1381,8 +1393,9 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
u32 mask = 0xf;
int i, ret;
- /* A508, A510 have 3 XIN ports in VBIF */
- if (adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu))
+ /* A506, A508, A510 have 3 XIN ports in VBIF */
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
+ adreno_is_a510(adreno_gpu))
mask = 0x7;
/* Clear the VBIF pipe before shutting down */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 71e52b2b2025..3e325e2a2b1b 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1146,7 +1146,7 @@ static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
}
static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
- size_t size, u64 iova)
+ size_t size, u64 iova, const char *name)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct drm_device *dev = a6xx_gpu->base.base.dev;
@@ -1181,6 +1181,8 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
bo->virt = msm_gem_get_vaddr(bo->obj);
bo->size = size;
+ msm_gem_object_set_name(bo->obj, name);
+
return 0;
}
@@ -1515,7 +1517,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
*/
gmu->dummy.size = SZ_4K;
if (adreno_is_a660_family(adreno_gpu)) {
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, 0x60400000);
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7,
+ 0x60400000, "debug");
if (ret)
goto err_memory;
@@ -1523,42 +1526,46 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
}
/* Allocate memory for the GMU dummy page */
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, 0x60000000);
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size,
+ 0x60000000, "dummy");
if (ret)
goto err_memory;
+ /* Note that a650 family also includes a660 family: */
if (adreno_is_a650_family(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
- SZ_16M - SZ_16K, 0x04000);
+ SZ_16M - SZ_16K, 0x04000, "icache");
if (ret)
goto err_memory;
} else if (adreno_is_a640_family(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
- SZ_256K - SZ_16K, 0x04000);
+ SZ_256K - SZ_16K, 0x04000, "icache");
if (ret)
goto err_memory;
ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache,
- SZ_256K - SZ_16K, 0x44000);
+ SZ_256K - SZ_16K, 0x44000, "dcache");
if (ret)
goto err_memory;
} else {
+ BUG_ON(adreno_is_a660_family(adreno_gpu));
+
/* HFI v1, has sptprac */
gmu->legacy = true;
/* Allocate memory for the GMU debug region */
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0);
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug");
if (ret)
goto err_memory;
}
/* Allocate memory for for the HFI queues */
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0);
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi");
if (ret)
goto err_memory;
/* Allocate memory for the GMU log region */
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0);
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0, "log");
if (ret)
goto err_memory;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 78aad5216a61..17cfad6424db 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -106,7 +106,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
u32 asid;
u64 memptr = rbmemptr(ring, ttbr0);
- if (ctx->seqno == a6xx_gpu->cur_ctx_seqno)
+ if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno)
return;
if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
@@ -138,14 +138,11 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, 0x31);
-
- a6xx_gpu->cur_ctx_seqno = ctx->seqno;
}
static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
- struct msm_drm_private *priv = gpu->dev->dev_private;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = submit->ring;
@@ -177,7 +174,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (priv->lastctx == submit->queue->ctx)
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -1071,6 +1068,8 @@ static int hw_init(struct msm_gpu *gpu)
if (IS_ERR(a6xx_gpu->shadow))
return PTR_ERR(a6xx_gpu->shadow);
+
+ msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow");
}
gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO,
@@ -1081,7 +1080,7 @@ static int hw_init(struct msm_gpu *gpu)
/* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0];
- a6xx_gpu->cur_ctx_seqno = 0;
+ gpu->cur_ctx_seqno = 0;
/* Enable the SQE_to start the CP engine */
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
@@ -1376,10 +1375,14 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
+ if (priv->disable_err_irq)
+ status &= A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS;
+
if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT)
a6xx_fault_detect_irq(gpu);
@@ -1557,6 +1560,8 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
for (i = 0; i < gpu->nr_rings; i++)
a6xx_gpu->shadow[i] = 0;
+ gpu->suspend_count++;
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 8e5527c881b1..86e0a7c3fe6d 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -20,16 +20,6 @@ struct a6xx_gpu {
struct msm_ringbuffer *cur_ring;
- /**
- * cur_ctx_seqno:
- *
- * The ctx->seqno value of the context with current pgtables
- * installed. Tracked by seqno rather than pointer value to
- * avoid dangling pointers, and cases where a ctx can be freed
- * and a new one created with the same address.
- */
- int cur_ctx_seqno;
-
struct a6xx_gmu gmu;
struct drm_gem_object *shadow_bo;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 6e90209cd543..55f443328d8e 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -42,7 +42,15 @@ struct a6xx_gpu_state {
struct a6xx_gpu_state_obj *cx_debugbus;
int nr_cx_debugbus;
+ struct msm_gpu_state_bo *gmu_log;
+ struct msm_gpu_state_bo *gmu_hfi;
+ struct msm_gpu_state_bo *gmu_debug;
+
+ s32 hfi_queue_history[2][HFI_HISTORY_SZ];
+
struct list_head objs;
+
+ bool gpu_initialized;
};
static inline int CRASHDUMP_WRITE(u64 *in, u32 reg, u32 val)
@@ -800,6 +808,45 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
&a6xx_state->gmu_registers[2], false);
}
+static struct msm_gpu_state_bo *a6xx_snapshot_gmu_bo(
+ struct a6xx_gpu_state *a6xx_state, struct a6xx_gmu_bo *bo)
+{
+ struct msm_gpu_state_bo *snapshot;
+
+ snapshot = state_kcalloc(a6xx_state, 1, sizeof(*snapshot));
+ if (!snapshot)
+ return NULL;
+
+ snapshot->iova = bo->iova;
+ snapshot->size = bo->size;
+ snapshot->data = kvzalloc(snapshot->size, GFP_KERNEL);
+ if (!snapshot->data)
+ return NULL;
+
+ memcpy(snapshot->data, bo->virt, bo->size);
+
+ return snapshot;
+}
+
+static void a6xx_snapshot_gmu_hfi_history(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ unsigned i, j;
+
+ BUILD_BUG_ON(ARRAY_SIZE(gmu->queues) != ARRAY_SIZE(a6xx_state->hfi_queue_history));
+
+ for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) {
+ struct a6xx_hfi_queue *queue = &gmu->queues[i];
+ for (j = 0; j < HFI_HISTORY_SZ; j++) {
+ unsigned idx = (j + queue->history_idx) % HFI_HISTORY_SZ;
+ a6xx_state->hfi_queue_history[i][j] = queue->history[idx];
+ }
+ }
+}
+
#define A6XX_GBIF_REGLIST_SIZE 1
static void a6xx_get_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
@@ -937,6 +984,12 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
a6xx_get_gmu_registers(gpu, a6xx_state);
+ a6xx_state->gmu_log = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.log);
+ a6xx_state->gmu_hfi = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.hfi);
+ a6xx_state->gmu_debug = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.debug);
+
+ a6xx_snapshot_gmu_hfi_history(gpu, a6xx_state);
+
/* If GX isn't on the rest of the data isn't going to be accessible */
if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
return &a6xx_state->base;
@@ -950,7 +1003,8 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
* write out GPU state, so we need to skip this when the SMMU is
* stalled in response to an iova fault
*/
- if (!stalled && !a6xx_crashdumper_init(gpu, &_dumper)) {
+ if (!stalled && !gpu->needs_hw_init &&
+ !a6xx_crashdumper_init(gpu, &_dumper)) {
dumper = &_dumper;
}
@@ -967,6 +1021,8 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
if (snapshot_debugbus)
a6xx_get_debugbus(gpu, a6xx_state);
+ a6xx_state->gpu_initialized = !gpu->needs_hw_init;
+
return &a6xx_state->base;
}
@@ -978,6 +1034,12 @@ static void a6xx_gpu_state_destroy(struct kref *kref)
struct a6xx_gpu_state *a6xx_state = container_of(state,
struct a6xx_gpu_state, base);
+ if (a6xx_state->gmu_log)
+ kvfree(a6xx_state->gmu_log->data);
+
+ if (a6xx_state->gmu_hfi)
+ kvfree(a6xx_state->gmu_hfi->data);
+
list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node)
kfree(obj);
@@ -1189,8 +1251,48 @@ void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
if (IS_ERR_OR_NULL(state))
return;
+ drm_printf(p, "gpu-initialized: %d\n", a6xx_state->gpu_initialized);
+
adreno_show(gpu, state, p);
+ drm_puts(p, "gmu-log:\n");
+ if (a6xx_state->gmu_log) {
+ struct msm_gpu_state_bo *gmu_log = a6xx_state->gmu_log;
+
+ drm_printf(p, " iova: 0x%016llx\n", gmu_log->iova);
+ drm_printf(p, " size: %zu\n", gmu_log->size);
+ adreno_show_object(p, &gmu_log->data, gmu_log->size,
+ &gmu_log->encoded);
+ }
+
+ drm_puts(p, "gmu-hfi:\n");
+ if (a6xx_state->gmu_hfi) {
+ struct msm_gpu_state_bo *gmu_hfi = a6xx_state->gmu_hfi;
+ unsigned i, j;
+
+ drm_printf(p, " iova: 0x%016llx\n", gmu_hfi->iova);
+ drm_printf(p, " size: %zu\n", gmu_hfi->size);
+ for (i = 0; i < ARRAY_SIZE(a6xx_state->hfi_queue_history); i++) {
+ drm_printf(p, " queue-history[%u]:", i);
+ for (j = 0; j < HFI_HISTORY_SZ; j++) {
+ drm_printf(p, " %d", a6xx_state->hfi_queue_history[i][j]);
+ }
+ drm_printf(p, "\n");
+ }
+ adreno_show_object(p, &gmu_hfi->data, gmu_hfi->size,
+ &gmu_hfi->encoded);
+ }
+
+ drm_puts(p, "gmu-debug:\n");
+ if (a6xx_state->gmu_debug) {
+ struct msm_gpu_state_bo *gmu_debug = a6xx_state->gmu_debug;
+
+ drm_printf(p, " iova: 0x%016llx\n", gmu_debug->iova);
+ drm_printf(p, " size: %zu\n", gmu_debug->size);
+ adreno_show_object(p, &gmu_debug->data, gmu_debug->size,
+ &gmu_debug->encoded);
+ }
+
drm_puts(p, "registers:\n");
for (i = 0; i < a6xx_state->nr_registers; i++) {
struct a6xx_gpu_state_obj *obj = &a6xx_state->registers[i];
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index d4c65bf0a1b7..d73fce5fdf1f 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -36,6 +36,8 @@ static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu,
hdr = queue->data[index];
+ queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index;
+
/*
* If we are to assume that the GMU firmware is in fact a rational actor
* and is programmed to not send us a larger response than we expect
@@ -75,6 +77,8 @@ static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
return -ENOSPC;
}
+ queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index;
+
for (i = 0; i < dwords; i++) {
queue->data[index] = data[i];
index = (index + 1) % header->size;
@@ -600,6 +604,9 @@ void a6xx_hfi_stop(struct a6xx_gmu *gmu)
queue->header->read_index = 0;
queue->header->write_index = 0;
+
+ memset(&queue->history, 0xff, sizeof(queue->history));
+ queue->history_idx = 0;
}
}
@@ -612,6 +619,9 @@ static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
queue->data = virt;
atomic_set(&queue->seqnum, 0);
+ memset(&queue->history, 0xff, sizeof(queue->history));
+ queue->history_idx = 0;
+
/* Set up the shared memory header */
header->iova = iova;
header->type = 10 << 8 | id;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
index 2bd670ca42d6..528110169398 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
@@ -33,6 +33,17 @@ struct a6xx_hfi_queue {
spinlock_t lock;
u32 *data;
atomic_t seqnum;
+
+ /*
+ * Tracking for the start index of the last N messages in the
+ * queue, for the benefit of devcore dump / crashdec (since
+ * parsing in the reverse direction to decode the last N
+ * messages is difficult to do and would rely on heuristics
+ * which are not guaranteed to be correct)
+ */
+#define HFI_HISTORY_SZ 8
+ s32 history[HFI_HISTORY_SZ];
+ u8 history_idx;
};
/* This is the outgoing queue to the GMU */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 2a6ce76656aa..fb261930ad1c 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -132,6 +132,24 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
+ .rev = ADRENO_REV(5, 0, 6, ANY_ID),
+ .revn = 506,
+ .name = "A506",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = (SZ_128K + SZ_8K),
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
+ ADRENO_QUIRK_LMLOADKILL_DISABLE,
+ .init = a5xx_gpu_init,
+ .zapfw = "a506_zap.mdt",
+ }, {
.rev = ADRENO_REV(5, 0, 8, ANY_ID),
.revn = 508,
.name = "A508",
@@ -408,9 +426,9 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
return NULL;
}
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&gpu->lock);
ret = msm_gpu_hw_init(gpu);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
pm_runtime_put_autosuspend(&pdev->dev);
if (ret) {
DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
@@ -427,13 +445,6 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
return gpu;
}
-static void set_gpu_pdev(struct drm_device *dev,
- struct platform_device *pdev)
-{
- struct msm_drm_private *priv = dev->dev_private;
- priv->gpu_pdev = pdev;
-}
-
static int find_chipid(struct device *dev, struct adreno_rev *rev)
{
struct device_node *node = dev->of_node;
@@ -482,8 +493,8 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
const struct adreno_info *info;
- struct drm_device *drm = dev_get_drvdata(master);
- struct msm_drm_private *priv = drm->dev_private;
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+ struct drm_device *drm = priv->dev;
struct msm_gpu *gpu;
int ret;
@@ -492,7 +503,7 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
return ret;
dev->platform_data = &config;
- set_gpu_pdev(drm, to_platform_device(dev));
+ priv->gpu_pdev = to_platform_device(dev);
info = adreno_info(config.rev);
@@ -521,12 +532,13 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
static void adreno_unbind(struct device *dev, struct device *master,
void *data)
{
+ struct msm_drm_private *priv = dev_get_drvdata(master);
struct msm_gpu *gpu = dev_to_gpu(dev);
pm_runtime_force_suspend(dev);
gpu->funcs->destroy(gpu);
- set_gpu_pdev(dev_get_drvdata(master), NULL);
+ priv->gpu_pdev = NULL;
}
static const struct component_ops a3xx_ops = {
@@ -596,9 +608,27 @@ static int adreno_resume(struct device *dev)
return gpu->funcs->pm_resume(gpu);
}
+static int active_submits(struct msm_gpu *gpu)
+{
+ int active_submits;
+ mutex_lock(&gpu->active_lock);
+ active_submits = gpu->active_submits;
+ mutex_unlock(&gpu->active_lock);
+ return active_submits;
+}
+
static int adreno_suspend(struct device *dev)
{
struct msm_gpu *gpu = dev_to_gpu(dev);
+ int remaining;
+
+ remaining = wait_event_timeout(gpu->retire_event,
+ active_submits(gpu) == 0,
+ msecs_to_jiffies(1000));
+ if (remaining == 0) {
+ dev_err(dev, "Timeout waiting for GPU to suspend\n");
+ return -EBUSY;
+ }
return gpu->funcs->pm_suspend(gpu);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 748665232d29..f33cfa4ef1c8 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -504,6 +504,8 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int i, count = 0;
+ WARN_ON(!mutex_is_locked(&gpu->lock));
+
kref_init(&state->ref);
ktime_get_real_ts64(&state->time);
@@ -630,7 +632,7 @@ static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
}
/* len is expected to be in bytes */
-static void adreno_show_object(struct drm_printer *p, void **ptr, int len,
+void adreno_show_object(struct drm_printer *p, void **ptr, int len,
bool *encoded)
{
if (!*ptr || !len)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 225c277a6223..cffabe7d33c1 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -201,6 +201,11 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu)
return gpu->revn == 430;
}
+static inline int adreno_is_a506(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 506;
+}
+
static inline int adreno_is_a508(struct adreno_gpu *gpu)
{
return gpu->revn == 508;
@@ -306,6 +311,8 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state);
int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
int adreno_gpu_state_put(struct msm_gpu_state *state);
+void adreno_show_object(struct drm_printer *p, void **ptr, int len,
+ bool *encoded);
/*
* Common helper function to initialize the default address space for arm-smmu
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 967245b8cc02..e7c9fe1a250f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -337,7 +337,8 @@ static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
}
static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
- struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
+ struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer,
+ struct dpu_hw_stage_cfg *stage_cfg)
{
struct drm_plane *plane;
struct drm_framebuffer *fb;
@@ -346,7 +347,6 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
struct dpu_plane_state *pstate = NULL;
struct dpu_format *format;
struct dpu_hw_ctl *ctl = mixer->lm_ctl;
- struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
u32 flush_mask;
uint32_t stage_idx, lm_idx;
@@ -422,6 +422,7 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
struct dpu_crtc_mixer *mixer = cstate->mixers;
struct dpu_hw_ctl *ctl;
struct dpu_hw_mixer *lm;
+ struct dpu_hw_stage_cfg stage_cfg;
int i;
DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
@@ -435,9 +436,9 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
}
/* initialize stage cfg */
- memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
+ memset(&stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
- _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
+ _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, &stage_cfg);
for (i = 0; i < cstate->num_mixers; i++) {
ctl = mixer[i].lm_ctl;
@@ -458,7 +459,7 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
mixer[i].flush_mask);
ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
- &dpu_crtc->stage_cfg);
+ &stage_cfg);
}
}
@@ -923,6 +924,20 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
return &cstate->base;
}
+static void dpu_crtc_atomic_print_state(struct drm_printer *p,
+ const struct drm_crtc_state *state)
+{
+ const struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
+ int i;
+
+ for (i = 0; i < cstate->num_mixers; i++) {
+ drm_printf(p, "\tlm[%d]=%d\n", i, cstate->mixers[i].hw_lm->idx - LM_0);
+ drm_printf(p, "\tctl[%d]=%d\n", i, cstate->mixers[i].lm_ctl->idx - CTL_0);
+ if (cstate->mixers[i].hw_dspp)
+ drm_printf(p, "\tdspp[%d]=%d\n", i, cstate->mixers[i].hw_dspp->idx - DSPP_0);
+ }
+}
+
static void dpu_crtc_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -1423,15 +1438,16 @@ DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dentry *debugfs_root;
- dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
+ debugfs_root = debugfs_create_dir(dpu_crtc->name,
crtc->dev->primary->debugfs_root);
debugfs_create_file("status", 0400,
- dpu_crtc->debugfs_root,
+ debugfs_root,
dpu_crtc, &_dpu_debugfs_status_fops);
debugfs_create_file("state", 0600,
- dpu_crtc->debugfs_root,
+ debugfs_root,
&dpu_crtc->base,
&dpu_crtc_debugfs_state_fops);
@@ -1449,13 +1465,6 @@ static int dpu_crtc_late_register(struct drm_crtc *crtc)
return _dpu_crtc_init_debugfs(crtc);
}
-static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
-{
- struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
-
- debugfs_remove_recursive(dpu_crtc->debugfs_root);
-}
-
static const struct drm_crtc_funcs dpu_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = dpu_crtc_destroy,
@@ -1463,8 +1472,8 @@ static const struct drm_crtc_funcs dpu_crtc_funcs = {
.reset = dpu_crtc_reset,
.atomic_duplicate_state = dpu_crtc_duplicate_state,
.atomic_destroy_state = dpu_crtc_destroy_state,
+ .atomic_print_state = dpu_crtc_atomic_print_state,
.late_register = dpu_crtc_late_register,
- .early_unregister = dpu_crtc_early_unregister,
.verify_crc_source = dpu_crtc_verify_crc_source,
.set_crc_source = dpu_crtc_set_crc_source,
.enable_vblank = msm_crtc_enable_vblank,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index ae9546ca1359..b8785c394fcc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -129,8 +129,6 @@ struct dpu_crtc_frame_event {
* @drm_requested_vblank : Whether vblanks have been enabled in the encoder
* @property_info : Opaque structure for generic property support
* @property_defaults : Array of default values for generic property support
- * @stage_cfg : H/w mixer stage configuration
- * @debugfs_root : Parent of debugfs node
* @vblank_cb_count : count of vblank callback since last reset
* @play_count : frame count between crtc enable and disable
* @vblank_cb_time : ktime at vblank count reset
@@ -161,9 +159,6 @@ struct dpu_crtc {
struct drm_pending_vblank_event *event;
u32 vsync_count;
- struct dpu_hw_stage_cfg stage_cfg;
- struct dentry *debugfs_root;
-
u32 vblank_cb_count;
u64 play_count;
ktime_t vblank_cb_time;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index e7ee4cfb8461..1e648db439f9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -995,9 +995,6 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
trace_dpu_enc_mode_set(DRMID(drm_enc));
- if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS)
- msm_dp_display_mode_set(dpu_enc->dp, drm_enc, mode, adj_mode);
-
list_for_each_entry(conn_iter, connector_list, head)
if (conn_iter->encoder == drm_enc)
conn = conn_iter;
@@ -1148,10 +1145,6 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
struct msm_drm_private *priv;
struct drm_display_mode *cur_mode = NULL;
- if (!drm_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
dpu_enc = to_dpu_encoder_virt(drm_enc);
mutex_lock(&dpu_enc->enc_lock);
@@ -1177,14 +1170,6 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
_dpu_encoder_virt_enable_helper(drm_enc);
- if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) {
- ret = msm_dp_display_enable(dpu_enc->dp, drm_enc);
- if (ret) {
- DPU_ERROR_ENC(dpu_enc, "dp display enable failed: %d\n",
- ret);
- goto out;
- }
- }
dpu_enc->enabled = true;
out:
@@ -1197,14 +1182,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
struct msm_drm_private *priv;
int i = 0;
- if (!drm_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- } else if (!drm_enc->dev) {
- DPU_ERROR("invalid dev\n");
- return;
- }
-
dpu_enc = to_dpu_encoder_virt(drm_enc);
DPU_DEBUG_ENC(dpu_enc, "\n");
@@ -1218,11 +1195,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
/* wait for idle */
dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
- if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) {
- if (msm_dp_display_pre_disable(dpu_enc->dp, drm_enc))
- DPU_ERROR_ENC(dpu_enc, "dp display push idle failed\n");
- }
-
dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
@@ -1247,11 +1219,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
- if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) {
- if (msm_dp_display_disable(dpu_enc->dp, drm_enc))
- DPU_ERROR_ENC(dpu_enc, "dp display disable failed\n");
- }
-
mutex_unlock(&dpu_enc->enc_lock);
}
@@ -2128,11 +2095,8 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t)
static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
.mode_set = dpu_encoder_virt_mode_set,
.disable = dpu_encoder_virt_disable,
- .enable = dpu_kms_encoder_enable,
+ .enable = dpu_encoder_virt_enable,
.atomic_check = dpu_encoder_virt_atomic_check,
-
- /* This is called by dpu_kms_encoder_enable */
- .commit = dpu_encoder_virt_enable,
};
static const struct drm_encoder_funcs dpu_encoder_funcs = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 185379b18572..ddd9d89cd456 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -698,17 +698,17 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
{
struct dpu_encoder_phys *phys_enc = NULL;
struct dpu_encoder_irq *irq;
- int i, ret = 0;
+ int i;
if (!p) {
- ret = -EINVAL;
- goto fail;
+ DPU_ERROR("failed to create encoder due to invalid parameter\n");
+ return ERR_PTR(-EINVAL);
}
phys_enc = kzalloc(sizeof(*phys_enc), GFP_KERNEL);
if (!phys_enc) {
- ret = -ENOMEM;
- goto fail;
+ DPU_ERROR("failed to create encoder due to memory allocation error\n");
+ return ERR_PTR(-ENOMEM);
}
phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
@@ -748,11 +748,4 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
DPU_DEBUG_VIDENC(phys_enc, "created intf idx:%d\n", p->intf_idx);
return phys_enc;
-
-fail:
- DPU_ERROR("failed to create encoder\n");
- if (phys_enc)
- dpu_encoder_phys_vid_destroy(phys_enc);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index ce6f32a919e5..aa75991903a6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -45,7 +45,7 @@
(PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
#define CTL_SC7280_MASK \
- (BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_FETCH_ACTIVE))
+ (BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_FETCH_ACTIVE) | BIT(DPU_CTL_VM_CFG))
#define MERGE_3D_SM8150_MASK (0)
@@ -856,9 +856,9 @@ static const struct dpu_intf_cfg sm8150_intf[] = {
};
static const struct dpu_intf_cfg sc7280_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x34000, INTF_DP, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+ INTF_BLK("intf_0", INTF_0, 0x34000, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
INTF_BLK("intf_1", INTF_1, 0x35000, INTF_DSI, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
- INTF_BLK("intf_5", INTF_5, 0x39000, INTF_EDP, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 22, 23),
+ INTF_BLK("intf_5", INTF_5, 0x39000, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 22, 23),
};
/*************************************************************
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 4ade44bbd37e..31af04afda7d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -179,13 +179,16 @@ enum {
/**
* CTL sub-blocks
- * @DPU_CTL_SPLIT_DISPLAY CTL supports video mode split display
+ * @DPU_CTL_SPLIT_DISPLAY: CTL supports video mode split display
+ * @DPU_CTL_FETCH_ACTIVE: Active CTL for fetch HW (SSPPs)
+ * @DPU_CTL_VM_CFG: CTL config to support multiple VMs
* @DPU_CTL_MAX
*/
enum {
DPU_CTL_SPLIT_DISPLAY = 0x1,
DPU_CTL_ACTIVE_CFG,
DPU_CTL_FETCH_ACTIVE,
+ DPU_CTL_VM_CFG,
DPU_CTL_MAX
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 64740ddb983e..02da9ecf71f1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -36,6 +36,7 @@
#define MERGE_3D_IDX 23
#define INTF_IDX 31
#define CTL_INVALID_BIT 0xffff
+#define CTL_DEFAULT_GROUP_ID 0xf
static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
@@ -498,6 +499,13 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
u32 intf_active = 0;
u32 mode_sel = 0;
+ /* CTL_TOP[31:28] carries group_id to collate CTL paths
+ * per VM. Explicitly disable it until VM support is
+ * added in SW. Power on reset value is not disable.
+ */
+ if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
+ mode_sel = CTL_DEFAULT_GROUP_ID << 28;
+
if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
mode_sel |= BIT(17);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
index a98e964c3b6f..355894a3b48c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
@@ -26,9 +26,16 @@ static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx,
struct dpu_hw_pcc_cfg *cfg)
{
- u32 base = ctx->cap->sblk->pcc.base;
+ u32 base;
- if (!ctx || !base) {
+ if (!ctx) {
+ DRM_ERROR("invalid ctx %pK\n", ctx);
+ return;
+ }
+
+ base = ctx->cap->sblk->pcc.base;
+
+ if (!base) {
DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base);
return;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index d2b6dca487e3..a77a5eaa78ad 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -30,6 +30,9 @@
#define MDP_AD4_INTR_STATUS_OFF 0x420
#define MDP_INTF_0_OFF_REV_7xxx 0x34000
#define MDP_INTF_1_OFF_REV_7xxx 0x35000
+#define MDP_INTF_2_OFF_REV_7xxx 0x36000
+#define MDP_INTF_3_OFF_REV_7xxx 0x37000
+#define MDP_INTF_4_OFF_REV_7xxx 0x38000
#define MDP_INTF_5_OFF_REV_7xxx 0x39000
/**
@@ -111,6 +114,21 @@ static const struct dpu_intr_reg dpu_intr_set[] = {
MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS
},
{
+ MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_CLEAR,
+ MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_EN,
+ MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_CLEAR,
+ MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_EN,
+ MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_CLEAR,
+ MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_EN,
+ MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_STATUS
+ },
+ {
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR,
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index d50e78c9f148..1ab75cccd145 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -26,6 +26,9 @@ enum dpu_hw_intr_reg {
MDP_AD4_1_INTR,
MDP_INTF0_7xxx_INTR,
MDP_INTF1_7xxx_INTR,
+ MDP_INTF2_7xxx_INTR,
+ MDP_INTF3_7xxx_INTR,
+ MDP_INTF4_7xxx_INTR,
MDP_INTF5_7xxx_INTR,
MDP_INTR_MAX,
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index f9460672176a..09cdc3576653 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -8,6 +8,8 @@
#include "dpu_hw_sspp.h"
#include "dpu_kms.h"
+#include <drm/drm_file.h>
+
#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
/* DPU_SSPP_SRC */
@@ -75,6 +77,7 @@
#define SSPP_TRAFFIC_SHAPER 0x130
#define SSPP_CDP_CNTL 0x134
#define SSPP_UBWC_ERROR_STATUS 0x138
+#define SSPP_CDP_CNTL_REC1 0x13c
#define SSPP_TRAFFIC_SHAPER_PREFILL 0x150
#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL 0x154
#define SSPP_TRAFFIC_SHAPER_REC1 0x158
@@ -413,13 +416,11 @@ static void dpu_hw_sspp_setup_pe_config(struct dpu_hw_pipe *ctx,
static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_pipe *ctx,
struct dpu_hw_pipe_cfg *sspp,
- struct dpu_hw_pixel_ext *pe,
void *scaler_cfg)
{
u32 idx;
struct dpu_hw_scaler3_cfg *scaler3_cfg = scaler_cfg;
- (void)pe;
if (_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx) || !sspp
|| !scaler3_cfg)
return;
@@ -539,7 +540,7 @@ static void dpu_hw_sspp_setup_sourceaddress(struct dpu_hw_pipe *ctx,
}
static void dpu_hw_sspp_setup_csc(struct dpu_hw_pipe *ctx,
- struct dpu_csc_cfg *data)
+ const struct dpu_csc_cfg *data)
{
u32 idx;
bool csc10 = false;
@@ -571,19 +572,20 @@ static void dpu_hw_sspp_setup_solidfill(struct dpu_hw_pipe *ctx, u32 color, enum
}
static void dpu_hw_sspp_setup_danger_safe_lut(struct dpu_hw_pipe *ctx,
- struct dpu_hw_pipe_qos_cfg *cfg)
+ u32 danger_lut,
+ u32 safe_lut)
{
u32 idx;
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
return;
- DPU_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, cfg->danger_lut);
- DPU_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, cfg->safe_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, danger_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, safe_lut);
}
static void dpu_hw_sspp_setup_creq_lut(struct dpu_hw_pipe *ctx,
- struct dpu_hw_pipe_qos_cfg *cfg)
+ u64 creq_lut)
{
u32 idx;
@@ -591,11 +593,11 @@ static void dpu_hw_sspp_setup_creq_lut(struct dpu_hw_pipe *ctx,
return;
if (ctx->cap && test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features)) {
- DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, cfg->creq_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, creq_lut);
DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_1 + idx,
- cfg->creq_lut >> 32);
+ creq_lut >> 32);
} else {
- DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, cfg->creq_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, creq_lut);
}
}
@@ -625,10 +627,12 @@ static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_pipe *ctx,
}
static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx,
- struct dpu_hw_pipe_cdp_cfg *cfg)
+ struct dpu_hw_pipe_cdp_cfg *cfg,
+ enum dpu_sspp_multirect_index index)
{
u32 idx;
u32 cdp_cntl = 0;
+ u32 cdp_cntl_offset = 0;
if (!ctx || !cfg)
return;
@@ -636,6 +640,11 @@ static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx,
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
return;
+ if (index == DPU_SSPP_RECT_SOLO || index == DPU_SSPP_RECT_0)
+ cdp_cntl_offset = SSPP_CDP_CNTL;
+ else
+ cdp_cntl_offset = SSPP_CDP_CNTL_REC1;
+
if (cfg->enable)
cdp_cntl |= BIT(0);
if (cfg->ubwc_meta_enable)
@@ -645,7 +654,7 @@ static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx,
if (cfg->preload_ahead == DPU_SSPP_CDP_PRELOAD_AHEAD_64)
cdp_cntl |= BIT(3);
- DPU_REG_WRITE(&ctx->hw, SSPP_CDP_CNTL, cdp_cntl);
+ DPU_REG_WRITE(&ctx->hw, cdp_cntl_offset, cdp_cntl);
}
static void _setup_layer_ops(struct dpu_hw_pipe *c,
@@ -685,6 +694,71 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
}
+#ifdef CONFIG_DEBUG_FS
+int _dpu_hw_sspp_init_debugfs(struct dpu_hw_pipe *hw_pipe, struct dpu_kms *kms, struct dentry *entry)
+{
+ const struct dpu_sspp_cfg *cfg = hw_pipe->cap;
+ const struct dpu_sspp_sub_blks *sblk = cfg->sblk;
+ struct dentry *debugfs_root;
+ char sspp_name[32];
+
+ snprintf(sspp_name, sizeof(sspp_name), "%d", hw_pipe->idx);
+
+ /* create overall sub-directory for the pipe */
+ debugfs_root =
+ debugfs_create_dir(sspp_name, entry);
+
+ /* don't error check these */
+ debugfs_create_xul("features", 0600,
+ debugfs_root, (unsigned long *)&hw_pipe->cap->features);
+
+ /* add register dump support */
+ dpu_debugfs_create_regset32("src_blk", 0400,
+ debugfs_root,
+ sblk->src_blk.base + cfg->base,
+ sblk->src_blk.len,
+ kms);
+
+ if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED3LITE) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED4))
+ dpu_debugfs_create_regset32("scaler_blk", 0400,
+ debugfs_root,
+ sblk->scaler_blk.base + cfg->base,
+ sblk->scaler_blk.len,
+ kms);
+
+ if (cfg->features & BIT(DPU_SSPP_CSC) ||
+ cfg->features & BIT(DPU_SSPP_CSC_10BIT))
+ dpu_debugfs_create_regset32("csc_blk", 0400,
+ debugfs_root,
+ sblk->csc_blk.base + cfg->base,
+ sblk->csc_blk.len,
+ kms);
+
+ debugfs_create_u32("xin_id",
+ 0400,
+ debugfs_root,
+ (u32 *) &cfg->xin_id);
+ debugfs_create_u32("clk_ctrl",
+ 0400,
+ debugfs_root,
+ (u32 *) &cfg->clk_ctrl);
+ debugfs_create_x32("creq_vblank",
+ 0600,
+ debugfs_root,
+ (u32 *) &sblk->creq_vblank);
+ debugfs_create_x32("danger_vblank",
+ 0600,
+ debugfs_root,
+ (u32 *) &sblk->danger_vblank);
+
+ return 0;
+}
+#endif
+
+
static const struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
void __iomem *addr,
struct dpu_mdss_cfg *catalog,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index fdfd4b46e2c6..92b071b78fdb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -25,11 +25,17 @@ struct dpu_hw_pipe;
/**
* Define all scaler feature bits in catalog
*/
-#define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \
- (1UL << DPU_SSPP_SCALER_QSEED2) | \
- (1UL << DPU_SSPP_SCALER_QSEED3) | \
- (1UL << DPU_SSPP_SCALER_QSEED3LITE) | \
- (1UL << DPU_SSPP_SCALER_QSEED4))
+#define DPU_SSPP_SCALER (BIT(DPU_SSPP_SCALER_RGB) | \
+ BIT(DPU_SSPP_SCALER_QSEED2) | \
+ BIT(DPU_SSPP_SCALER_QSEED3) | \
+ BIT(DPU_SSPP_SCALER_QSEED3LITE) | \
+ BIT(DPU_SSPP_SCALER_QSEED4))
+
+/*
+ * Define all CSC feature bits in catalog
+ */
+#define DPU_SSPP_CSC_ANY (BIT(DPU_SSPP_CSC) | \
+ BIT(DPU_SSPP_CSC_10BIT))
/**
* Component indices
@@ -166,18 +172,12 @@ struct dpu_hw_pipe_cfg {
/**
* struct dpu_hw_pipe_qos_cfg : Source pipe QoS configuration
- * @danger_lut: LUT for generate danger level based on fill level
- * @safe_lut: LUT for generate safe level based on fill level
- * @creq_lut: LUT for generate creq level based on fill level
* @creq_vblank: creq value generated to vbif during vertical blanking
* @danger_vblank: danger value generated during vertical blanking
* @vblank_en: enable creq_vblank and danger_vblank during vblank
* @danger_safe_en: enable danger safe generation
*/
struct dpu_hw_pipe_qos_cfg {
- u32 danger_lut;
- u32 safe_lut;
- u64 creq_lut;
u32 creq_vblank;
u32 danger_vblank;
bool vblank_en;
@@ -268,7 +268,7 @@ struct dpu_hw_sspp_ops {
* @ctx: Pointer to pipe context
* @data: Pointer to config structure
*/
- void (*setup_csc)(struct dpu_hw_pipe *ctx, struct dpu_csc_cfg *data);
+ void (*setup_csc)(struct dpu_hw_pipe *ctx, const struct dpu_csc_cfg *data);
/**
* setup_solidfill - enable/disable colorfill
@@ -302,20 +302,22 @@ struct dpu_hw_sspp_ops {
/**
* setup_danger_safe_lut - setup danger safe LUTs
* @ctx: Pointer to pipe context
- * @cfg: Pointer to pipe QoS configuration
+ * @danger_lut: LUT for generate danger level based on fill level
+ * @safe_lut: LUT for generate safe level based on fill level
*
*/
void (*setup_danger_safe_lut)(struct dpu_hw_pipe *ctx,
- struct dpu_hw_pipe_qos_cfg *cfg);
+ u32 danger_lut,
+ u32 safe_lut);
/**
* setup_creq_lut - setup CREQ LUT
* @ctx: Pointer to pipe context
- * @cfg: Pointer to pipe QoS configuration
+ * @creq_lut: LUT for generate creq level based on fill level
*
*/
void (*setup_creq_lut)(struct dpu_hw_pipe *ctx,
- struct dpu_hw_pipe_qos_cfg *cfg);
+ u64 creq_lut);
/**
* setup_qos_ctrl - setup QoS control
@@ -338,12 +340,10 @@ struct dpu_hw_sspp_ops {
* setup_scaler - setup scaler
* @ctx: Pointer to pipe context
* @pipe_cfg: Pointer to pipe configuration
- * @pe_cfg: Pointer to pixel extension configuration
* @scaler_cfg: Pointer to scaler configuration
*/
void (*setup_scaler)(struct dpu_hw_pipe *ctx,
struct dpu_hw_pipe_cfg *pipe_cfg,
- struct dpu_hw_pixel_ext *pe_cfg,
void *scaler_cfg);
/**
@@ -356,9 +356,11 @@ struct dpu_hw_sspp_ops {
* setup_cdp - setup client driven prefetch
* @ctx: Pointer to pipe context
* @cfg: Pointer to cdp configuration
+ * @index: rectangle index in multirect
*/
void (*setup_cdp)(struct dpu_hw_pipe *ctx,
- struct dpu_hw_pipe_cdp_cfg *cfg);
+ struct dpu_hw_pipe_cdp_cfg *cfg,
+ enum dpu_sspp_multirect_index index);
};
/**
@@ -385,6 +387,7 @@ struct dpu_hw_pipe {
struct dpu_hw_sspp_ops ops;
};
+struct dpu_kms;
/**
* dpu_hw_sspp_init - initializes the sspp hw driver object.
* Should be called once before accessing every pipe.
@@ -404,5 +407,8 @@ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
*/
void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx);
+void dpu_debugfs_sspp_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
+int _dpu_hw_sspp_init_debugfs(struct dpu_hw_pipe *hw_pipe, struct dpu_kms *kms, struct dentry *entry);
+
#endif /*_DPU_HW_SSPP_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
index f94584c982cd..aad85116b0a0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -374,7 +374,7 @@ u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
u32 csc_reg_off,
- struct dpu_csc_cfg *data, bool csc10)
+ const struct dpu_csc_cfg *data, bool csc10)
{
static const u32 matrix_shift = 7;
u32 clamp_shift = csc10 ? 16 : 8;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index 6d4911957e33..39134754579e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -322,6 +322,6 @@ u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
u32 csc_reg_off,
- struct dpu_csc_cfg *data, bool csc10);
+ const struct dpu_csc_cfg *data, bool csc10);
#endif /* _DPU_HW_UTIL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index a15b26428280..47fe11a84a77 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -21,14 +21,14 @@
#include "msm_gem.h"
#include "disp/msm_disp_snapshot.h"
-#include "dpu_kms.h"
#include "dpu_core_irq.h"
+#include "dpu_crtc.h"
+#include "dpu_encoder.h"
#include "dpu_formats.h"
#include "dpu_hw_vbif.h"
-#include "dpu_vbif.h"
-#include "dpu_encoder.h"
+#include "dpu_kms.h"
#include "dpu_plane.h"
-#include "dpu_crtc.h"
+#include "dpu_vbif.h"
#define CREATE_TRACE_POINTS
#include "dpu_trace.h"
@@ -73,8 +73,8 @@ static int _dpu_danger_signal_status(struct seq_file *s,
&status);
} else {
seq_puts(s, "\nSafe signal status:\n");
- if (kms->hw_mdp->ops.get_danger_status)
- kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+ if (kms->hw_mdp->ops.get_safe_status)
+ kms->hw_mdp->ops.get_safe_status(kms->hw_mdp,
&status);
}
pm_runtime_put_sync(&kms->pdev->dev);
@@ -82,7 +82,7 @@ static int _dpu_danger_signal_status(struct seq_file *s,
seq_printf(s, "MDP : 0x%x\n", status.mdp);
for (i = SSPP_VIG0; i < SSPP_MAX; i++)
- seq_printf(s, "SSPP%d : 0x%x \t", i - SSPP_VIG0,
+ seq_printf(s, "SSPP%d : 0x%x \n", i - SSPP_VIG0,
status.sspp[i]);
seq_puts(s, "\n");
@@ -101,6 +101,73 @@ static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
}
DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats);
+static ssize_t _dpu_plane_danger_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct dpu_kms *kms = file->private_data;
+ int len;
+ char buf[40];
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
+
+ return simple_read_from_buffer(buff, count, ppos, buf, len);
+}
+
+static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
+{
+ struct drm_plane *plane;
+
+ drm_for_each_plane(plane, kms->dev) {
+ if (plane->fb && plane->state) {
+ dpu_plane_danger_signal_ctrl(plane, enable);
+ DPU_DEBUG("plane:%d img:%dx%d ",
+ plane->base.id, plane->fb->width,
+ plane->fb->height);
+ DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->crtc_x, plane->state->crtc_y,
+ plane->state->crtc_w, plane->state->crtc_h);
+ } else {
+ DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
+ }
+ }
+}
+
+static ssize_t _dpu_plane_danger_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_kms *kms = file->private_data;
+ int disable_panic;
+ int ret;
+
+ ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic);
+ if (ret)
+ return ret;
+
+ if (disable_panic) {
+ /* Disable panic signal for all active pipes */
+ DPU_DEBUG("Disabling danger:\n");
+ _dpu_plane_set_danger_state(kms, false);
+ kms->has_danger_ctrl = false;
+ } else {
+ /* Enable panic signal for all active pipes */
+ DPU_DEBUG("Enabling danger:\n");
+ kms->has_danger_ctrl = true;
+ _dpu_plane_set_danger_state(kms, true);
+ }
+
+ return count;
+}
+
+static const struct file_operations dpu_plane_danger_enable = {
+ .open = simple_open,
+ .read = _dpu_plane_danger_read,
+ .write = _dpu_plane_danger_write,
+};
+
static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
{
@@ -110,8 +177,20 @@ static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
dpu_kms, &dpu_debugfs_danger_stats_fops);
debugfs_create_file("safe_status", 0600, entry,
dpu_kms, &dpu_debugfs_safe_stats_fops);
+ debugfs_create_file("disable_danger", 0600, entry,
+ dpu_kms, &dpu_plane_danger_enable);
+
}
+/*
+ * Companion structure for dpu_debugfs_create_regset32.
+ */
+struct dpu_debugfs_regset32 {
+ uint32_t offset;
+ uint32_t blk_len;
+ struct dpu_kms *dpu_kms;
+};
+
static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
{
struct dpu_debugfs_regset32 *regset = s->private;
@@ -159,24 +238,23 @@ static const struct file_operations dpu_fops_regset32 = {
.release = single_release,
};
-void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
+void dpu_debugfs_create_regset32(const char *name, umode_t mode,
+ void *parent,
uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
{
- if (regset) {
- regset->offset = offset;
- regset->blk_len = length;
- regset->dpu_kms = dpu_kms;
- }
-}
+ struct dpu_debugfs_regset32 *regset;
-void dpu_debugfs_create_regset32(const char *name, umode_t mode,
- void *parent, struct dpu_debugfs_regset32 *regset)
-{
- if (!name || !regset || !regset->dpu_kms || !regset->blk_len)
+ if (WARN_ON(!name || !dpu_kms || !length))
+ return;
+
+ regset = devm_kzalloc(&dpu_kms->pdev->dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
return;
/* make sure offset is a multiple of 4 */
- regset->offset = round_down(regset->offset, 4);
+ regset->offset = round_down(offset, 4);
+ regset->blk_len = length;
+ regset->dpu_kms = dpu_kms;
debugfs_create_file(name, mode, parent, regset, &dpu_fops_regset32);
}
@@ -203,6 +281,7 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
dpu_debugfs_danger_init(dpu_kms, entry);
dpu_debugfs_vbif_init(dpu_kms, entry);
dpu_debugfs_core_irq_init(dpu_kms, entry);
+ dpu_debugfs_sspp_init(dpu_kms, entry);
for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
if (priv->dp[i])
@@ -384,28 +463,6 @@ static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
}
}
-/*
- * Override the encoder enable since we need to setup the inline rotator and do
- * some crtc magic before enabling any bridge that might be present.
- */
-void dpu_kms_encoder_enable(struct drm_encoder *encoder)
-{
- const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
- struct drm_device *dev = encoder->dev;
- struct drm_crtc *crtc;
-
- /* Forward this enable call to the commit hook */
- if (funcs && funcs->commit)
- funcs->commit(encoder);
-
- drm_for_each_crtc(crtc, dev) {
- if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder)))
- continue;
-
- trace_dpu_kms_enc_enable(DRMID(crtc));
- }
-}
-
static void dpu_kms_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
@@ -863,6 +920,11 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len,
dpu_kms->mmio + cat->sspp[i].base, "sspp_%d", i);
+ /* dump LM sub-blocks HW regs info */
+ for (i = 0; i < cat->mixer_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->mixer[i].len,
+ dpu_kms->mmio + cat->mixer[i].base, "lm_%d", i);
+
msm_disp_snapshot_add_block(disp_state, top->hw.length,
dpu_kms->mmio + top->hw.blk_off, "top");
@@ -1153,9 +1215,9 @@ struct msm_kms *dpu_kms_init(struct drm_device *dev)
static int dpu_bind(struct device *dev, struct device *master, void *data)
{
- struct drm_device *ddev = dev_get_drvdata(master);
+ struct msm_drm_private *priv = dev_get_drvdata(master);
struct platform_device *pdev = to_platform_device(dev);
- struct msm_drm_private *priv = ddev->dev_private;
+ struct drm_device *ddev = priv->dev;
struct dpu_kms *dpu_kms;
struct dss_module_power *mp;
int ret = 0;
@@ -1285,7 +1347,7 @@ static const struct dev_pm_ops dpu_pm_ops = {
pm_runtime_force_resume)
};
-static const struct of_device_id dpu_dt_match[] = {
+const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,sdm845-dpu", },
{ .compatible = "qcom,sc7180-dpu", },
{ .compatible = "qcom,sc7280-dpu", },
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 775bcbda860f..2d385b4b7f5e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -160,34 +160,10 @@ struct dpu_global_state
*
* Documentation/filesystems/debugfs.rst
*
- * @dpu_debugfs_setup_regset32: Initialize data for dpu_debugfs_create_regset32
* @dpu_debugfs_create_regset32: Create 32-bit register dump file
- * @dpu_debugfs_get_root: Get root dentry for DPU_KMS's debugfs node
*/
/**
- * Companion structure for dpu_debugfs_create_regset32. Do not initialize the
- * members of this structure explicitly; use dpu_debugfs_setup_regset32 instead.
- */
-struct dpu_debugfs_regset32 {
- uint32_t offset;
- uint32_t blk_len;
- struct dpu_kms *dpu_kms;
-};
-
-/**
- * dpu_debugfs_setup_regset32 - Initialize register block definition for debugfs
- * This function is meant to initialize dpu_debugfs_regset32 structures for use
- * with dpu_debugfs_create_regset32.
- * @regset: opaque register definition structure
- * @offset: sub-block offset
- * @length: sub-block length, in bytes
- * @dpu_kms: pointer to dpu kms structure
- */
-void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
- uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms);
-
-/**
* dpu_debugfs_create_regset32 - Create register read back file for debugfs
*
* This function is almost identical to the standard debugfs_create_regset32()
@@ -195,20 +171,16 @@ void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
* names/offsets do not need to be provided. The 'read' function simply outputs
* sequential register values over a specified range.
*
- * Similar to the related debugfs_create_regset32 API, the structure pointed to
- * by regset needs to persist for the lifetime of the created file. The calling
- * code is responsible for initialization/management of this structure.
- *
- * The structure pointed to by regset is meant to be opaque. Please use
- * dpu_debugfs_setup_regset32 to initialize it.
- *
* @name: File name within debugfs
* @mode: File mode within debugfs
* @parent: Parent directory entry within debugfs, can be NULL
- * @regset: Pointer to persistent register block definition
+ * @offset: sub-block offset
+ * @length: sub-block length, in bytes
+ * @dpu_kms: pointer to dpu kms structure
*/
void dpu_debugfs_create_regset32(const char *name, umode_t mode,
- void *parent, struct dpu_debugfs_regset32 *regset);
+ void *parent,
+ uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms);
/**
* dpu_debugfs_get_root - Return root directory entry for KMS's debugfs
@@ -235,8 +207,6 @@ void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms);
int dpu_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
void dpu_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
-void dpu_kms_encoder_enable(struct drm_encoder *encoder);
-
/**
* dpu_kms_get_clk_rate() - get the clock rate
* @dpu_kms: pointer to dpu_kms structure
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index b466784d9822..131c1f1a869c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -111,7 +111,7 @@ static int _dpu_mdss_irq_domain_add(struct dpu_mdss *dpu_mdss)
struct device *dev;
struct irq_domain *domain;
- dev = dpu_mdss->base.dev->dev;
+ dev = dpu_mdss->base.dev;
domain = irq_domain_add_linear(dev->of_node, 32,
&dpu_mdss_irqdomain_ops, dpu_mdss);
@@ -184,16 +184,15 @@ static int dpu_mdss_disable(struct msm_mdss *mdss)
return ret;
}
-static void dpu_mdss_destroy(struct drm_device *dev)
+static void dpu_mdss_destroy(struct msm_mdss *mdss)
{
- struct platform_device *pdev = to_platform_device(dev->dev);
- struct msm_drm_private *priv = dev->dev_private;
- struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
+ struct platform_device *pdev = to_platform_device(mdss->dev);
+ struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
int irq;
- pm_runtime_suspend(dev->dev);
- pm_runtime_disable(dev->dev);
+ pm_runtime_suspend(mdss->dev);
+ pm_runtime_disable(mdss->dev);
_dpu_mdss_irq_domain_fini(dpu_mdss);
irq = platform_get_irq(pdev, 0);
irq_set_chained_handler_and_data(irq, NULL, NULL);
@@ -203,7 +202,6 @@ static void dpu_mdss_destroy(struct drm_device *dev)
if (dpu_mdss->mmio)
devm_iounmap(&pdev->dev, dpu_mdss->mmio);
dpu_mdss->mmio = NULL;
- priv->mdss = NULL;
}
static const struct msm_mdss_funcs mdss_funcs = {
@@ -212,16 +210,15 @@ static const struct msm_mdss_funcs mdss_funcs = {
.destroy = dpu_mdss_destroy,
};
-int dpu_mdss_init(struct drm_device *dev)
+int dpu_mdss_init(struct platform_device *pdev)
{
- struct platform_device *pdev = to_platform_device(dev->dev);
- struct msm_drm_private *priv = dev->dev_private;
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
struct dpu_mdss *dpu_mdss;
struct dss_module_power *mp;
int ret;
int irq;
- dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
+ dpu_mdss = devm_kzalloc(&pdev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
if (!dpu_mdss)
return -ENOMEM;
@@ -238,7 +235,7 @@ int dpu_mdss_init(struct drm_device *dev)
goto clk_parse_err;
}
- dpu_mdss->base.dev = dev;
+ dpu_mdss->base.dev = &pdev->dev;
dpu_mdss->base.funcs = &mdss_funcs;
ret = _dpu_mdss_irq_domain_add(dpu_mdss);
@@ -256,7 +253,7 @@ int dpu_mdss_init(struct drm_device *dev)
priv->mdss = &dpu_mdss->base;
- pm_runtime_enable(dev->dev);
+ pm_runtime_enable(&pdev->dev);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index a3e3b9d1b82e..ca75089c9d61 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -13,7 +13,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_damage_helper.h>
-#include <drm/drm_file.h>
#include <drm/drm_gem_atomic_helper.h>
#include "msm_drv.h"
@@ -90,7 +89,6 @@ enum dpu_plane_qos {
/*
* struct dpu_plane - local dpu plane structure
* @aspace: address space pointer
- * @csc_ptr: Points to dpu_csc_cfg structure to use for current
* @mplane_list: List of multirect planes of the same pipe
* @catalog: Points to dpu catalog structure
* @revalidate: force revalidation of all the plane properties
@@ -101,29 +99,14 @@ struct dpu_plane {
struct mutex lock;
enum dpu_sspp pipe;
- uint32_t features; /* capabilities from catalog */
struct dpu_hw_pipe *pipe_hw;
- struct dpu_hw_pipe_cfg pipe_cfg;
- struct dpu_hw_pipe_qos_cfg pipe_qos_cfg;
uint32_t color_fill;
bool is_error;
bool is_rt_pipe;
bool is_virtual;
struct list_head mplane_list;
struct dpu_mdss_cfg *catalog;
-
- struct dpu_csc_cfg *csc_ptr;
-
- const struct dpu_sspp_sub_blks *pipe_sblk;
- char pipe_name[DPU_NAME_SIZE];
-
- /* debugfs related stuff */
- struct dentry *debugfs_root;
- struct dpu_debugfs_regset32 debugfs_src;
- struct dpu_debugfs_regset32 debugfs_scaler;
- struct dpu_debugfs_regset32 debugfs_csc;
- bool debugfs_default_scale;
};
static const uint64_t supported_format_modifiers[] = {
@@ -145,14 +128,15 @@ static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
* _dpu_plane_calc_bw - calculate bandwidth required for a plane
* @plane: Pointer to drm plane.
* @fb: Pointer to framebuffer associated with the given plane
+ * @pipe_cfg: Pointer to pipe configuration
* Result: Updates calculated bandwidth in the plane state.
* BW Equation: src_w * src_h * bpp * fps * (v_total / v_dest)
* Prefill BW Equation: line src bytes * line_time
*/
static void _dpu_plane_calc_bw(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb,
+ struct dpu_hw_pipe_cfg *pipe_cfg)
{
- struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate;
struct drm_display_mode *mode;
const struct dpu_format *fmt = NULL;
@@ -169,9 +153,9 @@ static void _dpu_plane_calc_bw(struct drm_plane *plane,
fmt = dpu_get_dpu_format_ext(fb->format->format, fb->modifier);
- src_width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
- src_height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
- dst_height = drm_rect_height(&pdpu->pipe_cfg.dst_rect);
+ src_width = drm_rect_width(&pipe_cfg->src_rect);
+ src_height = drm_rect_height(&pipe_cfg->src_rect);
+ dst_height = drm_rect_height(&pipe_cfg->dst_rect);
fps = drm_mode_vrefresh(mode);
vbp = mode->vtotal - mode->vsync_end;
vpw = mode->vsync_end - mode->vsync_start;
@@ -202,12 +186,12 @@ static void _dpu_plane_calc_bw(struct drm_plane *plane,
/**
* _dpu_plane_calc_clk - calculate clock required for a plane
* @plane: Pointer to drm plane.
+ * @pipe_cfg: Pointer to pipe configuration
* Result: Updates calculated clock in the plane state.
* Clock equation: dst_w * v_total * fps * (src_h / dst_h)
*/
-static void _dpu_plane_calc_clk(struct drm_plane *plane)
+static void _dpu_plane_calc_clk(struct drm_plane *plane, struct dpu_hw_pipe_cfg *pipe_cfg)
{
- struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate;
struct drm_display_mode *mode;
int dst_width, src_height, dst_height, fps;
@@ -215,9 +199,9 @@ static void _dpu_plane_calc_clk(struct drm_plane *plane)
pstate = to_dpu_plane_state(plane->state);
mode = &plane->state->crtc->mode;
- src_height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
- dst_width = drm_rect_width(&pdpu->pipe_cfg.dst_rect);
- dst_height = drm_rect_height(&pdpu->pipe_cfg.dst_rect);
+ src_height = drm_rect_height(&pipe_cfg->src_rect);
+ dst_width = drm_rect_width(&pipe_cfg->dst_rect);
+ dst_height = drm_rect_height(&pipe_cfg->dst_rect);
fps = drm_mode_vrefresh(mode);
pstate->plane_clk =
@@ -254,14 +238,17 @@ static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
fixed_buff_size = pdpu->catalog->caps->pixel_ram_size;
list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) {
+ u32 tmp_width;
+
if (!tmp->base.state->visible)
continue;
+ tmp_width = drm_rect_width(&tmp->base.state->src) >> 16;
DPU_DEBUG("plane%d/%d src_width:%d/%d\n",
pdpu->base.base.id, tmp->base.base.id,
src_width,
- drm_rect_width(&tmp->pipe_cfg.src_rect));
+ tmp_width);
src_width = max_t(u32, src_width,
- drm_rect_width(&tmp->pipe_cfg.src_rect));
+ tmp_width);
}
if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
@@ -321,9 +308,10 @@ static u64 _dpu_plane_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
* _dpu_plane_set_qos_lut - set QoS LUT of the given plane
* @plane: Pointer to drm plane
* @fb: Pointer to framebuffer associated with the given plane
+ * @pipe_cfg: Pointer to pipe configuration
*/
static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb, struct dpu_hw_pipe_cfg *pipe_cfg)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
const struct dpu_format *fmt = NULL;
@@ -337,7 +325,7 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
fb->format->format,
fb->modifier);
total_fl = _dpu_plane_calc_fill_level(plane, fmt,
- drm_rect_width(&pdpu->pipe_cfg.src_rect));
+ drm_rect_width(&pipe_cfg->src_rect));
if (fmt && DPU_FORMAT_IS_LINEAR(fmt))
lut_usage = DPU_QOS_LUT_USAGE_LINEAR;
@@ -348,8 +336,6 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
qos_lut = _dpu_plane_get_qos_lut(
&pdpu->catalog->perf.qos_lut_tbl[lut_usage], total_fl);
- pdpu->pipe_qos_cfg.creq_lut = qos_lut;
-
trace_dpu_perf_set_qos_luts(pdpu->pipe - SSPP_VIG0,
(fmt) ? fmt->base.pixel_format : 0,
pdpu->is_rt_pipe, total_fl, qos_lut, lut_usage);
@@ -359,7 +345,7 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
fmt ? (char *)&fmt->base.pixel_format : NULL,
pdpu->is_rt_pipe, total_fl, qos_lut);
- pdpu->pipe_hw->ops.setup_creq_lut(pdpu->pipe_hw, &pdpu->pipe_qos_cfg);
+ pdpu->pipe_hw->ops.setup_creq_lut(pdpu->pipe_hw, qos_lut);
}
/**
@@ -397,24 +383,21 @@ static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
}
}
- pdpu->pipe_qos_cfg.danger_lut = danger_lut;
- pdpu->pipe_qos_cfg.safe_lut = safe_lut;
-
trace_dpu_perf_set_danger_luts(pdpu->pipe - SSPP_VIG0,
(fmt) ? fmt->base.pixel_format : 0,
(fmt) ? fmt->fetch_mode : 0,
- pdpu->pipe_qos_cfg.danger_lut,
- pdpu->pipe_qos_cfg.safe_lut);
+ danger_lut,
+ safe_lut);
DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n",
pdpu->pipe - SSPP_VIG0,
fmt ? (char *)&fmt->base.pixel_format : NULL,
fmt ? fmt->fetch_mode : -1,
- pdpu->pipe_qos_cfg.danger_lut,
- pdpu->pipe_qos_cfg.safe_lut);
+ danger_lut,
+ safe_lut);
pdpu->pipe_hw->ops.setup_danger_safe_lut(pdpu->pipe_hw,
- &pdpu->pipe_qos_cfg);
+ danger_lut, safe_lut);
}
/**
@@ -427,47 +410,51 @@ static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
bool enable, u32 flags)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_hw_pipe_qos_cfg pipe_qos_cfg;
+
+ memset(&pipe_qos_cfg, 0, sizeof(pipe_qos_cfg));
if (flags & DPU_PLANE_QOS_VBLANK_CTRL) {
- pdpu->pipe_qos_cfg.creq_vblank = pdpu->pipe_sblk->creq_vblank;
- pdpu->pipe_qos_cfg.danger_vblank =
- pdpu->pipe_sblk->danger_vblank;
- pdpu->pipe_qos_cfg.vblank_en = enable;
+ pipe_qos_cfg.creq_vblank = pdpu->pipe_hw->cap->sblk->creq_vblank;
+ pipe_qos_cfg.danger_vblank =
+ pdpu->pipe_hw->cap->sblk->danger_vblank;
+ pipe_qos_cfg.vblank_en = enable;
}
if (flags & DPU_PLANE_QOS_VBLANK_AMORTIZE) {
/* this feature overrules previous VBLANK_CTRL */
- pdpu->pipe_qos_cfg.vblank_en = false;
- pdpu->pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
+ pipe_qos_cfg.vblank_en = false;
+ pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
}
if (flags & DPU_PLANE_QOS_PANIC_CTRL)
- pdpu->pipe_qos_cfg.danger_safe_en = enable;
+ pipe_qos_cfg.danger_safe_en = enable;
if (!pdpu->is_rt_pipe) {
- pdpu->pipe_qos_cfg.vblank_en = false;
- pdpu->pipe_qos_cfg.danger_safe_en = false;
+ pipe_qos_cfg.vblank_en = false;
+ pipe_qos_cfg.danger_safe_en = false;
}
DPU_DEBUG_PLANE(pdpu, "pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n",
pdpu->pipe - SSPP_VIG0,
- pdpu->pipe_qos_cfg.danger_safe_en,
- pdpu->pipe_qos_cfg.vblank_en,
- pdpu->pipe_qos_cfg.creq_vblank,
- pdpu->pipe_qos_cfg.danger_vblank,
+ pipe_qos_cfg.danger_safe_en,
+ pipe_qos_cfg.vblank_en,
+ pipe_qos_cfg.creq_vblank,
+ pipe_qos_cfg.danger_vblank,
pdpu->is_rt_pipe);
pdpu->pipe_hw->ops.setup_qos_ctrl(pdpu->pipe_hw,
- &pdpu->pipe_qos_cfg);
+ &pipe_qos_cfg);
}
/**
* _dpu_plane_set_ot_limit - set OT limit for the given plane
* @plane: Pointer to drm plane
* @crtc: Pointer to drm crtc
+ * @pipe_cfg: Pointer to pipe configuration
*/
static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
- struct drm_crtc *crtc)
+ struct drm_crtc *crtc, struct dpu_hw_pipe_cfg *pipe_cfg)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_vbif_set_ot_params ot_params;
@@ -476,8 +463,8 @@ static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
memset(&ot_params, 0, sizeof(ot_params));
ot_params.xin_id = pdpu->pipe_hw->cap->xin_id;
ot_params.num = pdpu->pipe_hw->idx - SSPP_NONE;
- ot_params.width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
- ot_params.height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
+ ot_params.width = drm_rect_width(&pipe_cfg->src_rect);
+ ot_params.height = drm_rect_height(&pipe_cfg->src_rect);
ot_params.is_wfd = !pdpu->is_rt_pipe;
ot_params.frame_rate = drm_mode_vrefresh(&crtc->mode);
ot_params.vbif_idx = VBIF_RT;
@@ -541,14 +528,12 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
struct dpu_plane_state *pstate,
uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
struct dpu_hw_scaler3_cfg *scale_cfg,
+ struct dpu_hw_pixel_ext *pixel_ext,
const struct dpu_format *fmt,
uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
{
uint32_t i;
- memset(scale_cfg, 0, sizeof(*scale_cfg));
- memset(&pstate->pixel_ext, 0, sizeof(struct dpu_hw_pixel_ext));
-
scale_cfg->phase_step_x[DPU_SSPP_COMP_0] =
mult_frac((1 << PHASE_STEP_SHIFT), src_w, dst_w);
scale_cfg->phase_step_y[DPU_SSPP_COMP_0] =
@@ -587,9 +572,9 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
}
- pstate->pixel_ext.num_ext_pxls_top[i] =
+ pixel_ext->num_ext_pxls_top[i] =
scale_cfg->src_height[i];
- pstate->pixel_ext.num_ext_pxls_left[i] =
+ pixel_ext->num_ext_pxls_left[i] =
scale_cfg->src_width[i];
}
if (!(DPU_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
@@ -606,68 +591,97 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
scale_cfg->enable = 1;
}
-static void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
-{
- static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
- {
- /* S15.16 format */
- 0x00012A00, 0x00000000, 0x00019880,
- 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
- 0x00012A00, 0x00020480, 0x00000000,
+static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xfff0, 0xff80, 0xff80,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+ { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
+};
+
+static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
},
- /* signed bias */
- { 0xfff0, 0xff80, 0xff80,},
- { 0x0, 0x0, 0x0,},
- /* unsigned clamp */
- { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
- { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
- };
- static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = {
- {
- /* S15.16 format */
- 0x00012A00, 0x00000000, 0x00019880,
- 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
- 0x00012A00, 0x00020480, 0x00000000,
- },
- /* signed bias */
- { 0xffc0, 0xfe00, 0xfe00,},
- { 0x0, 0x0, 0x0,},
- /* unsigned clamp */
- { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
- { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
- };
+ /* signed bias */
+ { 0xffc0, 0xfe00, 0xfe00,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+ { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
+};
+
+static const struct dpu_csc_cfg *_dpu_plane_get_csc(struct dpu_plane *pdpu, const struct dpu_format *fmt)
+{
+ const struct dpu_csc_cfg *csc_ptr;
if (!pdpu) {
DPU_ERROR("invalid plane\n");
- return;
+ return NULL;
}
- if (BIT(DPU_SSPP_CSC_10BIT) & pdpu->features)
- pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc10_YUV2RGB_601L;
+ if (!DPU_FORMAT_IS_YUV(fmt))
+ return NULL;
+
+ if (BIT(DPU_SSPP_CSC_10BIT) & pdpu->pipe_hw->cap->features)
+ csc_ptr = &dpu_csc10_YUV2RGB_601L;
else
- pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc_YUV2RGB_601L;
+ csc_ptr = &dpu_csc_YUV2RGB_601L;
DPU_DEBUG_PLANE(pdpu, "using 0x%X 0x%X 0x%X...\n",
- pdpu->csc_ptr->csc_mv[0],
- pdpu->csc_ptr->csc_mv[1],
- pdpu->csc_ptr->csc_mv[2]);
+ csc_ptr->csc_mv[0],
+ csc_ptr->csc_mv[1],
+ csc_ptr->csc_mv[2]);
+
+ return csc_ptr;
}
static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu,
struct dpu_plane_state *pstate,
- const struct dpu_format *fmt, bool color_fill)
+ const struct dpu_format *fmt, bool color_fill,
+ struct dpu_hw_pipe_cfg *pipe_cfg)
{
const struct drm_format_info *info = drm_format_info(fmt->base.pixel_format);
+ struct dpu_hw_scaler3_cfg scaler3_cfg;
+ struct dpu_hw_pixel_ext pixel_ext;
+
+ memset(&scaler3_cfg, 0, sizeof(scaler3_cfg));
+ memset(&pixel_ext, 0, sizeof(pixel_ext));
/* don't chroma subsample if decimating */
/* update scaler. calculate default config for QSEED3 */
_dpu_plane_setup_scaler3(pdpu, pstate,
- drm_rect_width(&pdpu->pipe_cfg.src_rect),
- drm_rect_height(&pdpu->pipe_cfg.src_rect),
- drm_rect_width(&pdpu->pipe_cfg.dst_rect),
- drm_rect_height(&pdpu->pipe_cfg.dst_rect),
- &pstate->scaler3_cfg, fmt,
+ drm_rect_width(&pipe_cfg->src_rect),
+ drm_rect_height(&pipe_cfg->src_rect),
+ drm_rect_width(&pipe_cfg->dst_rect),
+ drm_rect_height(&pipe_cfg->dst_rect),
+ &scaler3_cfg, &pixel_ext, fmt,
info->hsub, info->vsub);
+
+ if (pdpu->pipe_hw->ops.setup_pe)
+ pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
+ &pixel_ext);
+
+ /**
+ * when programmed in multirect mode, scalar block will be
+ * bypassed. Still we need to update alpha and bitwidth
+ * ONLY for RECT0
+ */
+ if (pdpu->pipe_hw->ops.setup_scaler &&
+ pstate->multirect_index != DPU_SSPP_RECT_1)
+ pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
+ pipe_cfg,
+ &scaler3_cfg);
}
/**
@@ -683,6 +697,7 @@ static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
const struct dpu_format *fmt;
const struct drm_plane *plane = &pdpu->base;
struct dpu_plane_state *pstate = to_dpu_plane_state(plane->state);
+ struct dpu_hw_pipe_cfg pipe_cfg;
DPU_DEBUG_PLANE(pdpu, "\n");
@@ -699,13 +714,14 @@ static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
pstate->multirect_index);
/* override scaler/decimation if solid fill */
- pdpu->pipe_cfg.src_rect.x1 = 0;
- pdpu->pipe_cfg.src_rect.y1 = 0;
- pdpu->pipe_cfg.src_rect.x2 =
- drm_rect_width(&pdpu->pipe_cfg.dst_rect);
- pdpu->pipe_cfg.src_rect.y2 =
- drm_rect_height(&pdpu->pipe_cfg.dst_rect);
- _dpu_plane_setup_scaler(pdpu, pstate, fmt, true);
+ pipe_cfg.dst_rect = pstate->base.dst;
+
+ pipe_cfg.src_rect.x1 = 0;
+ pipe_cfg.src_rect.y1 = 0;
+ pipe_cfg.src_rect.x2 =
+ drm_rect_width(&pipe_cfg.dst_rect);
+ pipe_cfg.src_rect.y2 =
+ drm_rect_height(&pipe_cfg.dst_rect);
if (pdpu->pipe_hw->ops.setup_format)
pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw,
@@ -714,18 +730,10 @@ static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
if (pdpu->pipe_hw->ops.setup_rects)
pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
- &pdpu->pipe_cfg,
+ &pipe_cfg,
pstate->multirect_index);
- if (pdpu->pipe_hw->ops.setup_pe)
- pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
- &pstate->pixel_ext);
-
- if (pdpu->pipe_hw->ops.setup_scaler &&
- pstate->multirect_index != DPU_SSPP_RECT_1)
- pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
- &pdpu->pipe_cfg, &pstate->pixel_ext,
- &pstate->scaler3_cfg);
+ _dpu_plane_setup_scaler(pdpu, pstate, fmt, true, &pipe_cfg);
}
return 0;
@@ -964,10 +972,10 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
crtc_state = drm_atomic_get_new_crtc_state(state,
new_plane_state->crtc);
- min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxupscale);
+ min_scale = FRAC_16_16(1, pdpu->pipe_hw->cap->sblk->maxupscale);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
min_scale,
- pdpu->pipe_sblk->maxdwnscale << 16,
+ pdpu->pipe_hw->cap->sblk->maxdwnscale << 16,
true, true);
if (ret) {
DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
@@ -993,9 +1001,8 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
if (DPU_FORMAT_IS_YUV(fmt) &&
- (!(pdpu->features & DPU_SSPP_SCALER) ||
- !(pdpu->features & (BIT(DPU_SSPP_CSC)
- | BIT(DPU_SSPP_CSC_10BIT))))) {
+ (!(pdpu->pipe_hw->cap->features & DPU_SSPP_SCALER) ||
+ !(pdpu->pipe_hw->cap->features & DPU_SSPP_CSC_ANY))) {
DPU_DEBUG_PLANE(pdpu,
"plane doesn't have scaler/csc for yuv\n");
return -EINVAL;
@@ -1056,8 +1063,13 @@ void dpu_plane_flush(struct drm_plane *plane)
else if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG)
/* force 100% alpha */
_dpu_plane_color_fill(pdpu, pdpu->color_fill, 0xFF);
- else if (pdpu->pipe_hw && pdpu->csc_ptr && pdpu->pipe_hw->ops.setup_csc)
- pdpu->pipe_hw->ops.setup_csc(pdpu->pipe_hw, pdpu->csc_ptr);
+ else if (pdpu->pipe_hw && pdpu->pipe_hw->ops.setup_csc) {
+ const struct dpu_format *fmt = to_dpu_format(msm_framebuffer_format(plane->state->fb));
+ const struct dpu_csc_cfg *csc_ptr = _dpu_plane_get_csc(pdpu, fmt);
+
+ if (csc_ptr)
+ pdpu->pipe_hw->ops.setup_csc(pdpu->pipe_hw, csc_ptr);
+ }
/* flag h/w flush complete */
if (plane->state)
@@ -1091,10 +1103,11 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
bool is_rt_pipe, update_qos_remap;
const struct dpu_format *fmt =
to_dpu_format(msm_framebuffer_format(fb));
+ struct dpu_hw_pipe_cfg pipe_cfg;
- memset(&(pdpu->pipe_cfg), 0, sizeof(struct dpu_hw_pipe_cfg));
+ memset(&pipe_cfg, 0, sizeof(struct dpu_hw_pipe_cfg));
- _dpu_plane_set_scanout(plane, pstate, &pdpu->pipe_cfg, fb);
+ _dpu_plane_set_scanout(plane, pstate, &pipe_cfg, fb);
pstate->pending = true;
@@ -1106,17 +1119,15 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
crtc->base.id, DRM_RECT_ARG(&state->dst),
(char *)&fmt->base.pixel_format, DPU_FORMAT_IS_UBWC(fmt));
- pdpu->pipe_cfg.src_rect = state->src;
+ pipe_cfg.src_rect = state->src;
/* state->src is 16.16, src_rect is not */
- pdpu->pipe_cfg.src_rect.x1 >>= 16;
- pdpu->pipe_cfg.src_rect.x2 >>= 16;
- pdpu->pipe_cfg.src_rect.y1 >>= 16;
- pdpu->pipe_cfg.src_rect.y2 >>= 16;
-
- pdpu->pipe_cfg.dst_rect = state->dst;
+ pipe_cfg.src_rect.x1 >>= 16;
+ pipe_cfg.src_rect.x2 >>= 16;
+ pipe_cfg.src_rect.y1 >>= 16;
+ pipe_cfg.src_rect.y2 >>= 16;
- _dpu_plane_setup_scaler(pdpu, pstate, fmt, false);
+ pipe_cfg.dst_rect = state->dst;
/* override for color fill */
if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) {
@@ -1126,25 +1137,11 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
if (pdpu->pipe_hw->ops.setup_rects) {
pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
- &pdpu->pipe_cfg,
+ &pipe_cfg,
pstate->multirect_index);
}
- if (pdpu->pipe_hw->ops.setup_pe &&
- (pstate->multirect_index != DPU_SSPP_RECT_1))
- pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
- &pstate->pixel_ext);
-
- /**
- * when programmed in multirect mode, scalar block will be
- * bypassed. Still we need to update alpha and bitwidth
- * ONLY for RECT0
- */
- if (pdpu->pipe_hw->ops.setup_scaler &&
- pstate->multirect_index != DPU_SSPP_RECT_1)
- pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
- &pdpu->pipe_cfg, &pstate->pixel_ext,
- &pstate->scaler3_cfg);
+ _dpu_plane_setup_scaler(pdpu, pstate, fmt, false, &pipe_cfg);
if (pdpu->pipe_hw->ops.setup_multirect)
pdpu->pipe_hw->ops.setup_multirect(
@@ -1173,35 +1170,29 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
pstate->multirect_index);
if (pdpu->pipe_hw->ops.setup_cdp) {
- struct dpu_hw_pipe_cdp_cfg *cdp_cfg = &pstate->cdp_cfg;
+ struct dpu_hw_pipe_cdp_cfg cdp_cfg;
- memset(cdp_cfg, 0, sizeof(struct dpu_hw_pipe_cdp_cfg));
+ memset(&cdp_cfg, 0, sizeof(struct dpu_hw_pipe_cdp_cfg));
- cdp_cfg->enable = pdpu->catalog->perf.cdp_cfg
+ cdp_cfg.enable = pdpu->catalog->perf.cdp_cfg
[DPU_PERF_CDP_USAGE_RT].rd_enable;
- cdp_cfg->ubwc_meta_enable =
+ cdp_cfg.ubwc_meta_enable =
DPU_FORMAT_IS_UBWC(fmt);
- cdp_cfg->tile_amortize_enable =
+ cdp_cfg.tile_amortize_enable =
DPU_FORMAT_IS_UBWC(fmt) ||
DPU_FORMAT_IS_TILE(fmt);
- cdp_cfg->preload_ahead = DPU_SSPP_CDP_PRELOAD_AHEAD_64;
+ cdp_cfg.preload_ahead = DPU_SSPP_CDP_PRELOAD_AHEAD_64;
- pdpu->pipe_hw->ops.setup_cdp(pdpu->pipe_hw, cdp_cfg);
+ pdpu->pipe_hw->ops.setup_cdp(pdpu->pipe_hw, &cdp_cfg, pstate->multirect_index);
}
-
- /* update csc */
- if (DPU_FORMAT_IS_YUV(fmt))
- _dpu_plane_setup_csc(pdpu);
- else
- pdpu->csc_ptr = NULL;
}
- _dpu_plane_set_qos_lut(plane, fb);
+ _dpu_plane_set_qos_lut(plane, fb, &pipe_cfg);
_dpu_plane_set_danger_lut(plane, fb);
if (plane->type != DRM_PLANE_TYPE_CURSOR) {
_dpu_plane_set_qos_ctrl(plane, true, DPU_PLANE_QOS_PANIC_CTRL);
- _dpu_plane_set_ot_limit(plane, crtc);
+ _dpu_plane_set_ot_limit(plane, crtc, &pipe_cfg);
}
update_qos_remap = (is_rt_pipe != pdpu->is_rt_pipe) ||
@@ -1215,9 +1206,9 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
_dpu_plane_set_qos_remap(plane);
}
- _dpu_plane_calc_bw(plane, fb);
+ _dpu_plane_calc_bw(plane, fb, &pipe_cfg);
- _dpu_plane_calc_clk(plane);
+ _dpu_plane_calc_clk(plane, &pipe_cfg);
}
static void _dpu_plane_atomic_disable(struct drm_plane *plane)
@@ -1314,6 +1305,46 @@ dpu_plane_duplicate_state(struct drm_plane *plane)
return &pstate->base;
}
+static const char * const multirect_mode_name[] = {
+ [DPU_SSPP_MULTIRECT_NONE] = "none",
+ [DPU_SSPP_MULTIRECT_PARALLEL] = "parallel",
+ [DPU_SSPP_MULTIRECT_TIME_MX] = "time_mx",
+};
+
+static const char * const multirect_index_name[] = {
+ [DPU_SSPP_RECT_SOLO] = "solo",
+ [DPU_SSPP_RECT_0] = "rect_0",
+ [DPU_SSPP_RECT_1] = "rect_1",
+};
+
+static const char *dpu_get_multirect_mode(enum dpu_sspp_multirect_mode mode)
+{
+ if (WARN_ON(mode >= ARRAY_SIZE(multirect_mode_name)))
+ return "unknown";
+
+ return multirect_mode_name[mode];
+}
+
+static const char *dpu_get_multirect_index(enum dpu_sspp_multirect_index index)
+{
+ if (WARN_ON(index >= ARRAY_SIZE(multirect_index_name)))
+ return "unknown";
+
+ return multirect_index_name[index];
+}
+
+static void dpu_plane_atomic_print_state(struct drm_printer *p,
+ const struct drm_plane_state *state)
+{
+ const struct dpu_plane_state *pstate = to_dpu_plane_state(state);
+ const struct dpu_plane *pdpu = to_dpu_plane(state->plane);
+
+ drm_printf(p, "\tstage=%d\n", pstate->stage);
+ drm_printf(p, "\tsspp=%s\n", pdpu->pipe_hw->cap->name);
+ drm_printf(p, "\tmultirect_mode=%s\n", dpu_get_multirect_mode(pstate->multirect_mode));
+ drm_printf(p, "\tmultirect_index=%s\n", dpu_get_multirect_index(pstate->multirect_index));
+}
+
static void dpu_plane_reset(struct drm_plane *plane)
{
struct dpu_plane *pdpu;
@@ -1343,7 +1374,7 @@ static void dpu_plane_reset(struct drm_plane *plane)
}
#ifdef CONFIG_DEBUG_FS
-static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
+void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
@@ -1356,167 +1387,23 @@ static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
-static ssize_t _dpu_plane_danger_read(struct file *file,
- char __user *buff, size_t count, loff_t *ppos)
-{
- struct dpu_kms *kms = file->private_data;
- int len;
- char buf[40];
-
- len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
-
- return simple_read_from_buffer(buff, count, ppos, buf, len);
-}
-
-static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
+/* SSPP live inside dpu_plane private data only. Enumerate them here. */
+void dpu_debugfs_sspp_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
{
struct drm_plane *plane;
+ struct dentry *entry = debugfs_create_dir("sspp", debugfs_root);
- drm_for_each_plane(plane, kms->dev) {
- if (plane->fb && plane->state) {
- dpu_plane_danger_signal_ctrl(plane, enable);
- DPU_DEBUG("plane:%d img:%dx%d ",
- plane->base.id, plane->fb->width,
- plane->fb->height);
- DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
- plane->state->src_x >> 16,
- plane->state->src_y >> 16,
- plane->state->src_w >> 16,
- plane->state->src_h >> 16,
- plane->state->crtc_x, plane->state->crtc_y,
- plane->state->crtc_w, plane->state->crtc_h);
- } else {
- DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
- }
- }
-}
-
-static ssize_t _dpu_plane_danger_write(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
-{
- struct dpu_kms *kms = file->private_data;
- int disable_panic;
- int ret;
-
- ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic);
- if (ret)
- return ret;
-
- if (disable_panic) {
- /* Disable panic signal for all active pipes */
- DPU_DEBUG("Disabling danger:\n");
- _dpu_plane_set_danger_state(kms, false);
- kms->has_danger_ctrl = false;
- } else {
- /* Enable panic signal for all active pipes */
- DPU_DEBUG("Enabling danger:\n");
- kms->has_danger_ctrl = true;
- _dpu_plane_set_danger_state(kms, true);
- }
-
- return count;
-}
+ if (IS_ERR(entry))
+ return;
-static const struct file_operations dpu_plane_danger_enable = {
- .open = simple_open,
- .read = _dpu_plane_danger_read,
- .write = _dpu_plane_danger_write,
-};
+ drm_for_each_plane(plane, dpu_kms->dev) {
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
-static int _dpu_plane_init_debugfs(struct drm_plane *plane)
-{
- struct dpu_plane *pdpu = to_dpu_plane(plane);
- struct dpu_kms *kms = _dpu_plane_get_kms(plane);
- const struct dpu_sspp_cfg *cfg = pdpu->pipe_hw->cap;
- const struct dpu_sspp_sub_blks *sblk = cfg->sblk;
-
- /* create overall sub-directory for the pipe */
- pdpu->debugfs_root =
- debugfs_create_dir(pdpu->pipe_name,
- plane->dev->primary->debugfs_root);
-
- /* don't error check these */
- debugfs_create_x32("features", 0600,
- pdpu->debugfs_root, &pdpu->features);
-
- /* add register dump support */
- dpu_debugfs_setup_regset32(&pdpu->debugfs_src,
- sblk->src_blk.base + cfg->base,
- sblk->src_blk.len,
- kms);
- dpu_debugfs_create_regset32("src_blk", 0400,
- pdpu->debugfs_root, &pdpu->debugfs_src);
-
- if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
- cfg->features & BIT(DPU_SSPP_SCALER_QSEED3LITE) ||
- cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) ||
- cfg->features & BIT(DPU_SSPP_SCALER_QSEED4)) {
- dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler,
- sblk->scaler_blk.base + cfg->base,
- sblk->scaler_blk.len,
- kms);
- dpu_debugfs_create_regset32("scaler_blk", 0400,
- pdpu->debugfs_root,
- &pdpu->debugfs_scaler);
- debugfs_create_bool("default_scaling",
- 0600,
- pdpu->debugfs_root,
- &pdpu->debugfs_default_scale);
- }
-
- if (cfg->features & BIT(DPU_SSPP_CSC) ||
- cfg->features & BIT(DPU_SSPP_CSC_10BIT)) {
- dpu_debugfs_setup_regset32(&pdpu->debugfs_csc,
- sblk->csc_blk.base + cfg->base,
- sblk->csc_blk.len,
- kms);
- dpu_debugfs_create_regset32("csc_blk", 0400,
- pdpu->debugfs_root, &pdpu->debugfs_csc);
+ _dpu_hw_sspp_init_debugfs(pdpu->pipe_hw, dpu_kms, entry);
}
-
- debugfs_create_u32("xin_id",
- 0400,
- pdpu->debugfs_root,
- (u32 *) &cfg->xin_id);
- debugfs_create_u32("clk_ctrl",
- 0400,
- pdpu->debugfs_root,
- (u32 *) &cfg->clk_ctrl);
- debugfs_create_x32("creq_vblank",
- 0600,
- pdpu->debugfs_root,
- (u32 *) &sblk->creq_vblank);
- debugfs_create_x32("danger_vblank",
- 0600,
- pdpu->debugfs_root,
- (u32 *) &sblk->danger_vblank);
-
- debugfs_create_file("disable_danger",
- 0600,
- pdpu->debugfs_root,
- kms, &dpu_plane_danger_enable);
-
- return 0;
-}
-#else
-static int _dpu_plane_init_debugfs(struct drm_plane *plane)
-{
- return 0;
}
#endif
-static int dpu_plane_late_register(struct drm_plane *plane)
-{
- return _dpu_plane_init_debugfs(plane);
-}
-
-static void dpu_plane_early_unregister(struct drm_plane *plane)
-{
- struct dpu_plane *pdpu = to_dpu_plane(plane);
-
- debugfs_remove_recursive(pdpu->debugfs_root);
-}
-
static bool dpu_plane_format_mod_supported(struct drm_plane *plane,
uint32_t format, uint64_t modifier)
{
@@ -1541,8 +1428,7 @@ static const struct drm_plane_funcs dpu_plane_funcs = {
.reset = dpu_plane_reset,
.atomic_duplicate_state = dpu_plane_duplicate_state,
.atomic_destroy_state = dpu_plane_destroy_state,
- .late_register = dpu_plane_late_register,
- .early_unregister = dpu_plane_early_unregister,
+ .atomic_print_state = dpu_plane_atomic_print_state,
.format_mod_supported = dpu_plane_format_mod_supported,
};
@@ -1609,21 +1495,13 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
goto clean_sspp;
}
- /* cache features mask for later */
- pdpu->features = pdpu->pipe_hw->cap->features;
- pdpu->pipe_sblk = pdpu->pipe_hw->cap->sblk;
- if (!pdpu->pipe_sblk) {
- DPU_ERROR("[%u]invalid sblk\n", pipe);
- goto clean_sspp;
- }
-
if (pdpu->is_virtual) {
- format_list = pdpu->pipe_sblk->virt_format_list;
- num_formats = pdpu->pipe_sblk->virt_num_formats;
+ format_list = pdpu->pipe_hw->cap->sblk->virt_format_list;
+ num_formats = pdpu->pipe_hw->cap->sblk->virt_num_formats;
}
else {
- format_list = pdpu->pipe_sblk->format_list;
- num_formats = pdpu->pipe_sblk->num_formats;
+ format_list = pdpu->pipe_hw->cap->sblk->format_list;
+ num_formats = pdpu->pipe_hw->cap->sblk->num_formats;
}
ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
@@ -1663,12 +1541,9 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
/* success! finalize initialization */
drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
- /* save user friendly pipe name for later */
- snprintf(pdpu->pipe_name, DPU_NAME_SIZE, "plane%u", plane->base.id);
-
mutex_init(&pdpu->lock);
- DPU_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", pdpu->pipe_name,
+ DPU_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", plane->name,
pipe, plane->base.id, master_plane_id);
return plane;
@@ -1676,6 +1551,7 @@ clean_sspp:
if (pdpu && pdpu->pipe_hw)
dpu_hw_sspp_destroy(pdpu->pipe_hw);
clean_plane:
+ list_del(&pdpu->mplane_list);
kfree(pdpu);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index 34e03ac05f4a..9d51dad5c6a5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -23,9 +23,6 @@
* @multirect_index: index of the rectangle of SSPP
* @multirect_mode: parallel or time multiplex multirect mode
* @pending: whether the current update is still pending
- * @scaler3_cfg: configuration data for scaler3
- * @pixel_ext: configuration data for pixel extensions
- * @cdp_cfg: CDP configuration
* @plane_fetch_bw: calculated BW per plane
* @plane_clk: calculated clk per plane
*/
@@ -38,11 +35,6 @@ struct dpu_plane_state {
uint32_t multirect_mode;
bool pending;
- /* scaler configuration */
- struct dpu_hw_scaler3_cfg scaler3_cfg;
- struct dpu_hw_pixel_ext pixel_ext;
-
- struct dpu_hw_pipe_cdp_cfg cdp_cfg;
u64 plane_fetch_bw;
u64 plane_clk;
};
@@ -134,4 +126,10 @@ void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state);
int dpu_plane_color_fill(struct drm_plane *plane,
uint32_t color, uint32_t alpha);
+#ifdef CONFIG_DEBUG_FS
+void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable);
+#else
+static inline void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) {}
+#endif
+
#endif /* _DPU_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index 37bba57675a8..54d74341e690 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -266,10 +266,6 @@ DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_commit,
TP_PROTO(uint32_t drm_id),
TP_ARGS(drm_id)
);
-DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_enc_enable,
- TP_PROTO(uint32_t drm_id),
- TP_ARGS(drm_id)
-);
DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_commit,
TP_PROTO(uint32_t drm_id),
TP_ARGS(drm_id)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 7b242246d4e7..12a5f81e402b 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -370,22 +370,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
switch (intf->type) {
case INTF_eDP:
- if (!priv->edp)
- break;
-
- ctl = mdp5_ctlm_request(ctlm, intf->num);
- if (!ctl) {
- ret = -EINVAL;
- break;
- }
-
- encoder = construct_encoder(mdp5_kms, intf, ctl);
- if (IS_ERR(encoder)) {
- ret = PTR_ERR(encoder);
- break;
- }
-
- ret = msm_edp_modeset_init(priv->edp, dev, encoder);
+ DRM_DEV_INFO(dev->dev, "Skipping eDP interface %d\n", intf->num);
break;
case INTF_HDMI:
if (!priv->hdmi)
@@ -936,7 +921,8 @@ fail:
static int mdp5_bind(struct device *dev, struct device *master, void *data)
{
- struct drm_device *ddev = dev_get_drvdata(master);
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+ struct drm_device *ddev = priv->dev;
struct platform_device *pdev = to_platform_device(dev);
DBG("");
@@ -1031,7 +1017,7 @@ static const struct dev_pm_ops mdp5_pm_ops = {
SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL)
};
-static const struct of_device_id mdp5_dt_match[] = {
+const struct of_device_id mdp5_dt_match[] = {
{ .compatible = "qcom,mdp5", },
/* to support downstream DT files */
{ .compatible = "qcom,mdss_mdp", },
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
index 0ea53420bc40..b3f79c2277e9 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
@@ -16,8 +16,6 @@ struct mdp5_mdss {
void __iomem *mmio, *vbif;
- struct regulator *vdd;
-
struct clk *ahb_clk;
struct clk *axi_clk;
struct clk *vsync_clk;
@@ -114,7 +112,7 @@ static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
{
- struct device *dev = mdp5_mdss->base.dev->dev;
+ struct device *dev = mdp5_mdss->base.dev;
struct irq_domain *d;
d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
@@ -157,7 +155,7 @@ static int mdp5_mdss_disable(struct msm_mdss *mdss)
static int msm_mdss_get_clocks(struct mdp5_mdss *mdp5_mdss)
{
struct platform_device *pdev =
- to_platform_device(mdp5_mdss->base.dev->dev);
+ to_platform_device(mdp5_mdss->base.dev);
mdp5_mdss->ahb_clk = msm_clk_get(pdev, "iface");
if (IS_ERR(mdp5_mdss->ahb_clk))
@@ -174,10 +172,9 @@ static int msm_mdss_get_clocks(struct mdp5_mdss *mdp5_mdss)
return 0;
}
-static void mdp5_mdss_destroy(struct drm_device *dev)
+static void mdp5_mdss_destroy(struct msm_mdss *mdss)
{
- struct msm_drm_private *priv = dev->dev_private;
- struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(priv->mdss);
+ struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
if (!mdp5_mdss)
return;
@@ -185,9 +182,7 @@ static void mdp5_mdss_destroy(struct drm_device *dev)
irq_domain_remove(mdp5_mdss->irqcontroller.domain);
mdp5_mdss->irqcontroller.domain = NULL;
- regulator_disable(mdp5_mdss->vdd);
-
- pm_runtime_disable(dev->dev);
+ pm_runtime_disable(mdss->dev);
}
static const struct msm_mdss_funcs mdss_funcs = {
@@ -196,25 +191,24 @@ static const struct msm_mdss_funcs mdss_funcs = {
.destroy = mdp5_mdss_destroy,
};
-int mdp5_mdss_init(struct drm_device *dev)
+int mdp5_mdss_init(struct platform_device *pdev)
{
- struct platform_device *pdev = to_platform_device(dev->dev);
- struct msm_drm_private *priv = dev->dev_private;
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
struct mdp5_mdss *mdp5_mdss;
int ret;
DBG("");
- if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss"))
+ if (!of_device_is_compatible(pdev->dev.of_node, "qcom,mdss"))
return 0;
- mdp5_mdss = devm_kzalloc(dev->dev, sizeof(*mdp5_mdss), GFP_KERNEL);
+ mdp5_mdss = devm_kzalloc(&pdev->dev, sizeof(*mdp5_mdss), GFP_KERNEL);
if (!mdp5_mdss) {
ret = -ENOMEM;
goto fail;
}
- mdp5_mdss->base.dev = dev;
+ mdp5_mdss->base.dev = &pdev->dev;
mdp5_mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
if (IS_ERR(mdp5_mdss->mmio)) {
@@ -230,45 +224,29 @@ int mdp5_mdss_init(struct drm_device *dev)
ret = msm_mdss_get_clocks(mdp5_mdss);
if (ret) {
- DRM_DEV_ERROR(dev->dev, "failed to get clocks: %d\n", ret);
- goto fail;
- }
-
- /* Regulator to enable GDSCs in downstream kernels */
- mdp5_mdss->vdd = devm_regulator_get(dev->dev, "vdd");
- if (IS_ERR(mdp5_mdss->vdd)) {
- ret = PTR_ERR(mdp5_mdss->vdd);
- goto fail;
- }
-
- ret = regulator_enable(mdp5_mdss->vdd);
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n",
- ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to get clocks: %d\n", ret);
goto fail;
}
- ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
+ ret = devm_request_irq(&pdev->dev, platform_get_irq(pdev, 0),
mdss_irq, 0, "mdss_isr", mdp5_mdss);
if (ret) {
- DRM_DEV_ERROR(dev->dev, "failed to init irq: %d\n", ret);
- goto fail_irq;
+ DRM_DEV_ERROR(&pdev->dev, "failed to init irq: %d\n", ret);
+ goto fail;
}
ret = mdss_irq_domain_init(mdp5_mdss);
if (ret) {
- DRM_DEV_ERROR(dev->dev, "failed to init sub-block irqs: %d\n", ret);
- goto fail_irq;
+ DRM_DEV_ERROR(&pdev->dev, "failed to init sub-block irqs: %d\n", ret);
+ goto fail;
}
mdp5_mdss->base.funcs = &mdss_funcs;
priv->mdss = &mdp5_mdss->base;
- pm_runtime_enable(dev->dev);
+ pm_runtime_enable(&pdev->dev);
return 0;
-fail_irq:
- regulator_disable(mdp5_mdss->vdd);
fail:
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
index a4a7cb06bc87..e75b97127c0d 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
@@ -28,29 +28,42 @@ static ssize_t __maybe_unused disp_devcoredump_read(char *buffer, loff_t offset,
return count - iter.remain;
}
-static void _msm_disp_snapshot_work(struct kthread_work *work)
+struct msm_disp_state *
+msm_disp_snapshot_state_sync(struct msm_kms *kms)
{
- struct msm_kms *kms = container_of(work, struct msm_kms, dump_work);
struct drm_device *drm_dev = kms->dev;
struct msm_disp_state *disp_state;
- struct drm_printer p;
+
+ WARN_ON(!mutex_is_locked(&kms->dump_mutex));
disp_state = kzalloc(sizeof(struct msm_disp_state), GFP_KERNEL);
if (!disp_state)
- return;
+ return ERR_PTR(-ENOMEM);
disp_state->dev = drm_dev->dev;
disp_state->drm_dev = drm_dev;
INIT_LIST_HEAD(&disp_state->blocks);
- /* Serialize dumping here */
- mutex_lock(&kms->dump_mutex);
-
msm_disp_snapshot_capture_state(disp_state);
+ return disp_state;
+}
+
+static void _msm_disp_snapshot_work(struct kthread_work *work)
+{
+ struct msm_kms *kms = container_of(work, struct msm_kms, dump_work);
+ struct msm_disp_state *disp_state;
+ struct drm_printer p;
+
+ /* Serialize dumping here */
+ mutex_lock(&kms->dump_mutex);
+ disp_state = msm_disp_snapshot_state_sync(kms);
mutex_unlock(&kms->dump_mutex);
+ if (IS_ERR(disp_state))
+ return;
+
if (MSM_DISP_SNAPSHOT_DUMP_IN_CONSOLE) {
p = drm_info_printer(disp_state->drm_dev->dev);
msm_disp_state_print(disp_state, &p);
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
index 4c619307612c..b5f452bd7ada 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
@@ -39,7 +39,7 @@
* @dev: device pointer
* @drm_dev: drm device pointer
* @atomic_state: atomic state duplicated at the time of the error
- * @timestamp: timestamp at which the coredump was captured
+ * @time: timestamp at which the coredump was captured
*/
struct msm_disp_state {
struct device *dev;
@@ -49,7 +49,7 @@ struct msm_disp_state {
struct drm_atomic_state *atomic_state;
- ktime_t timestamp;
+ struct timespec64 time;
};
/**
@@ -85,6 +85,16 @@ int msm_disp_snapshot_init(struct drm_device *drm_dev);
void msm_disp_snapshot_destroy(struct drm_device *drm_dev);
/**
+ * msm_disp_snapshot_state_sync - synchronously snapshot display state
+ * @kms: the kms object
+ *
+ * Returns state or error
+ *
+ * Must be called with &kms->dump_mutex held
+ */
+struct msm_disp_state *msm_disp_snapshot_state_sync(struct msm_kms *kms);
+
+/**
* msm_disp_snapshot_state - trigger to dump the display snapshot
* @drm_dev: handle to drm device
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
index 2e1acb1bc390..5d2ff6791058 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
@@ -5,6 +5,8 @@
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <generated/utsrelease.h>
+
#include "msm_disp_snapshot.h"
static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *base_addr)
@@ -79,10 +81,11 @@ void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p)
}
drm_printf(p, "---\n");
-
+ drm_printf(p, "kernel: " UTS_RELEASE "\n");
drm_printf(p, "module: " KBUILD_MODNAME "\n");
drm_printf(p, "dpu devcoredump\n");
- drm_printf(p, "timestamp %lld\n", ktime_to_ns(state->timestamp));
+ drm_printf(p, "time: %lld.%09ld\n",
+ state->time.tv_sec, state->time.tv_nsec);
list_for_each_entry_safe(block, tmp, &state->blocks, node) {
drm_printf(p, "====================%s================\n", block->name);
@@ -100,7 +103,7 @@ static void msm_disp_capture_atomic_state(struct msm_disp_state *disp_state)
struct drm_device *ddev;
struct drm_modeset_acquire_ctx ctx;
- disp_state->timestamp = ktime_get();
+ ktime_get_real_ts64(&disp_state->time);
ddev = disp_state->drm_dev;
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 62e75dc8afc6..c724cb0bde9d 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -119,13 +119,13 @@ void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl)
static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
{
u32 config = 0, tbd;
- u8 *dpcd = ctrl->panel->dpcd;
+ const u8 *dpcd = ctrl->panel->dpcd;
/* Default-> LSCLK DIV: 1/4 LCLK */
config |= (2 << DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT);
/* Scrambler reset enable */
- if (dpcd[DP_EDP_CONFIGURATION_CAP] & DP_ALTERNATE_SCRAMBLER_RESET_CAP)
+ if (drm_dp_alternate_scrambler_reset_cap(dpcd))
config |= DP_CONFIGURATION_CTRL_ASSR;
tbd = dp_link_get_test_bits_depth(ctrl->link,
@@ -1228,7 +1228,10 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
int *training_step)
{
int ret = 0;
+ const u8 *dpcd = ctrl->panel->dpcd;
u8 encoding = DP_SET_ANSI_8B10B;
+ u8 ssc;
+ u8 assr;
struct dp_link_info link_info = {0};
dp_ctrl_config_ctrl(ctrl);
@@ -1238,9 +1241,21 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING;
dp_aux_link_configure(ctrl->aux, &link_info);
+
+ if (drm_dp_max_downspread(dpcd)) {
+ ssc = DP_SPREAD_AMP_0_5;
+ drm_dp_dpcd_write(ctrl->aux, DP_DOWNSPREAD_CTRL, &ssc, 1);
+ }
+
drm_dp_dpcd_write(ctrl->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
&encoding, 1);
+ if (drm_dp_alternate_scrambler_reset_cap(dpcd)) {
+ assr = DP_ALTERNATE_SCRAMBLER_RESET_ENABLE;
+ drm_dp_dpcd_write(ctrl->aux, DP_EDP_CONFIGURATION_SET,
+ &assr, 1);
+ }
+
ret = dp_ctrl_link_train_1(ctrl, training_step);
if (ret) {
DRM_ERROR("link training #1 failed. ret=%d\n", ret);
@@ -1312,9 +1327,11 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
struct dp_io *dp_io = &ctrl->parser->io;
struct phy *phy = dp_io->phy;
struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp;
+ const u8 *dpcd = ctrl->panel->dpcd;
opts_dp->lanes = ctrl->link->link_params.num_lanes;
opts_dp->link_rate = ctrl->link->link_params.rate / 100;
+ opts_dp->ssc = drm_dp_max_downspread(dpcd);
dp_ctrl_set_clock_rate(ctrl, DP_CTRL_PM, "ctrl_link",
ctrl->link->link_params.rate * 1000);
@@ -1406,7 +1423,7 @@ void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl)
static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl)
{
- u8 *dpcd = ctrl->panel->dpcd;
+ const u8 *dpcd = ctrl->panel->dpcd;
/*
* For better interop experience, used a fixed NVID=0x8000
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index aba8aa47ed76..7cc4d21f2091 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -135,8 +135,18 @@ static const struct msm_dp_config sc7180_dp_cfg = {
.num_descs = 1,
};
+static const struct msm_dp_config sc7280_dp_cfg = {
+ .descs = (const struct msm_dp_desc[]) {
+ [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
+ [MSM_DP_CONTROLLER_1] = { .io_start = 0x0aea0000, .connector_type = DRM_MODE_CONNECTOR_eDP },
+ },
+ .num_descs = 2,
+};
+
static const struct of_device_id dp_dt_match[] = {
{ .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_cfg },
+ { .compatible = "qcom,sc7280-dp", .data = &sc7280_dp_cfg },
+ { .compatible = "qcom,sc7280-edp", .data = &sc7280_dp_cfg },
{}
};
@@ -224,13 +234,10 @@ static int dp_display_bind(struct device *dev, struct device *master,
{
int rc = 0;
struct dp_display_private *dp = dev_get_dp_display_private(dev);
- struct msm_drm_private *priv;
- struct drm_device *drm;
-
- drm = dev_get_drvdata(master);
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+ struct drm_device *drm = priv->dev;
dp->dp_display.drm_dev = drm;
- priv = drm->dev_private;
priv->dp[dp->id] = &dp->dp_display;
rc = dp->parser->parse(dp->parser, dp->dp_display.connector_type);
@@ -266,8 +273,7 @@ static void dp_display_unbind(struct device *dev, struct device *master,
void *data)
{
struct dp_display_private *dp = dev_get_dp_display_private(dev);
- struct drm_device *drm = dev_get_drvdata(master);
- struct msm_drm_private *priv = drm->dev_private;
+ struct msm_drm_private *priv = dev_get_drvdata(master);
dp_power_client_deinit(dp->power);
dp_aux_unregister(dp->aux);
@@ -410,12 +416,11 @@ static int dp_display_usbpd_configure_cb(struct device *dev)
static int dp_display_usbpd_disconnect_cb(struct device *dev)
{
- int rc = 0;
struct dp_display_private *dp = dev_get_dp_display_private(dev);
dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
- return rc;
+ return 0;
}
static void dp_display_handle_video_request(struct dp_display_private *dp)
@@ -522,11 +527,8 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
dp->hpd_state = ST_CONNECT_PENDING;
- hpd->hpd_high = 1;
-
ret = dp_display_usbpd_configure_cb(&dp->pdev->dev);
if (ret) { /* link train failed */
- hpd->hpd_high = 0;
dp->hpd_state = ST_DISCONNECTED;
if (ret == -ECONNRESET) { /* cable unplugged */
@@ -603,7 +605,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
/* triggered by irq_hdp with sink_count = 0 */
if (dp->link->sink_count == 0) {
dp_ctrl_off_phy(dp->ctrl);
- hpd->hpd_high = 0;
dp->core_initialized = false;
}
mutex_unlock(&dp->event_mutex);
@@ -627,8 +628,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
/* disable HPD plug interrupts */
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, false);
- hpd->hpd_high = 0;
-
/*
* We don't need separate work for disconnect as
* connect/attention interrupts are disabled
@@ -693,9 +692,15 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
return 0;
}
- ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
- if (ret == -ECONNRESET) { /* cable unplugged */
- dp->core_initialized = false;
+ /*
+ * dp core (ahb/aux clks) must be initialized before
+ * irq_hpd be handled
+ */
+ if (dp->core_initialized) {
+ ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
+ if (ret == -ECONNRESET) { /* cable unplugged */
+ dp->core_initialized = false;
+ }
}
DRM_DEBUG_DP("hpd_state=%d\n", state);
@@ -707,9 +712,9 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
{
dp_debug_put(dp->debug);
+ dp_audio_put(dp->audio);
dp_panel_put(dp->panel);
dp_aux_put(dp->aux);
- dp_audio_put(dp->audio);
}
static int dp_init_sub_modules(struct dp_display_private *dp)
@@ -1481,6 +1486,18 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
}
priv->connectors[priv->num_connectors++] = dp_display->connector;
+
+ dp_display->bridge = msm_dp_bridge_init(dp_display, dev, encoder);
+ if (IS_ERR(dp_display->bridge)) {
+ ret = PTR_ERR(dp_display->bridge);
+ DRM_DEV_ERROR(dev->dev,
+ "failed to create dp bridge: %d\n", ret);
+ dp_display->bridge = NULL;
+ return ret;
+ }
+
+ priv->bridges[priv->num_bridges++] = dp_display->bridge;
+
return 0;
}
@@ -1584,8 +1601,8 @@ int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder)
}
void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
struct dp_display_private *dp_display;
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 8e80e3bac394..e3adcd578a90 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -13,6 +13,7 @@
struct msm_dp {
struct drm_device *drm_dev;
struct device *codec_dev;
+ struct drm_bridge *bridge;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_bridge *panel_bridge;
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 76856c4ee1d6..d4d360d19eba 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -12,6 +12,14 @@
#include "msm_kms.h"
#include "dp_drm.h"
+
+struct msm_dp_bridge {
+ struct drm_bridge bridge;
+ struct msm_dp *dp_display;
+};
+
+#define to_dp_display(x) container_of((x), struct msm_dp_bridge, bridge)
+
struct dp_connector {
struct drm_connector base;
struct msm_dp *dp_display;
@@ -173,3 +181,70 @@ struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display)
return connector;
}
+
+static void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct msm_dp_bridge *dp_bridge = to_dp_display(drm_bridge);
+ struct msm_dp *dp_display = dp_bridge->dp_display;
+
+ msm_dp_display_mode_set(dp_display, drm_bridge->encoder, mode, adjusted_mode);
+}
+
+static void dp_bridge_enable(struct drm_bridge *drm_bridge)
+{
+ struct msm_dp_bridge *dp_bridge = to_dp_display(drm_bridge);
+ struct msm_dp *dp_display = dp_bridge->dp_display;
+
+ msm_dp_display_enable(dp_display, drm_bridge->encoder);
+}
+
+static void dp_bridge_disable(struct drm_bridge *drm_bridge)
+{
+ struct msm_dp_bridge *dp_bridge = to_dp_display(drm_bridge);
+ struct msm_dp *dp_display = dp_bridge->dp_display;
+
+ msm_dp_display_pre_disable(dp_display, drm_bridge->encoder);
+}
+
+static void dp_bridge_post_disable(struct drm_bridge *drm_bridge)
+{
+ struct msm_dp_bridge *dp_bridge = to_dp_display(drm_bridge);
+ struct msm_dp *dp_display = dp_bridge->dp_display;
+
+ msm_dp_display_disable(dp_display, drm_bridge->encoder);
+}
+
+static const struct drm_bridge_funcs dp_bridge_ops = {
+ .enable = dp_bridge_enable,
+ .disable = dp_bridge_disable,
+ .post_disable = dp_bridge_post_disable,
+ .mode_set = dp_bridge_mode_set,
+};
+
+struct drm_bridge *msm_dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
+ struct drm_encoder *encoder)
+{
+ int rc;
+ struct msm_dp_bridge *dp_bridge;
+ struct drm_bridge *bridge;
+
+ dp_bridge = devm_kzalloc(dev->dev, sizeof(*dp_bridge), GFP_KERNEL);
+ if (!dp_bridge)
+ return ERR_PTR(-ENOMEM);
+
+ dp_bridge->dp_display = dp_display;
+
+ bridge = &dp_bridge->bridge;
+ bridge->funcs = &dp_bridge_ops;
+ bridge->encoder = encoder;
+
+ rc = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (rc) {
+ DRM_ERROR("failed to attach bridge, rc=%d\n", rc);
+ return ERR_PTR(rc);
+ }
+
+ return bridge;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c
index e1c90fa47411..db98a1d431eb 100644
--- a/drivers/gpu/drm/msm/dp/dp_hpd.c
+++ b/drivers/gpu/drm/msm/dp/dp_hpd.c
@@ -32,8 +32,6 @@ int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd)
hpd_priv = container_of(dp_usbpd, struct dp_hpd_private,
dp_usbpd);
- dp_usbpd->hpd_high = hpd;
-
if (!hpd_priv->dp_cb || !hpd_priv->dp_cb->configure
|| !hpd_priv->dp_cb->disconnect) {
pr_err("hpd dp_cb not initialized\n");
diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.h b/drivers/gpu/drm/msm/dp/dp_hpd.h
index 5bc5bb64680f..8feec5aa5027 100644
--- a/drivers/gpu/drm/msm/dp/dp_hpd.h
+++ b/drivers/gpu/drm/msm/dp/dp_hpd.h
@@ -26,7 +26,6 @@ enum plug_orientation {
* @multi_func: multi-function preferred
* @usb_config_req: request to switch to usb
* @exit_dp_mode: request exit from displayport mode
- * @hpd_high: Hot Plug Detect signal is high.
* @hpd_irq: Change in the status since last message
* @alt_mode_cfg_done: bool to specify alt mode status
* @debug_en: bool to specify debug mode
@@ -39,7 +38,6 @@ struct dp_usbpd {
bool multi_func;
bool usb_config_req;
bool exit_dp_mode;
- bool hpd_high;
bool hpd_irq;
bool alt_mode_cfg_done;
bool debug_en;
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index a5bdfc5029de..d4d31e5bda07 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -737,18 +737,25 @@ static int dp_link_parse_sink_count(struct dp_link *dp_link)
return 0;
}
-static void dp_link_parse_sink_status_field(struct dp_link_private *link)
+static int dp_link_parse_sink_status_field(struct dp_link_private *link)
{
int len = 0;
link->prev_sink_count = link->dp_link.sink_count;
- dp_link_parse_sink_count(&link->dp_link);
+ len = dp_link_parse_sink_count(&link->dp_link);
+ if (len < 0) {
+ DRM_ERROR("DP parse sink count failed\n");
+ return len;
+ }
len = drm_dp_dpcd_read_link_status(link->aux,
link->link_status);
- if (len < DP_LINK_STATUS_SIZE)
+ if (len < DP_LINK_STATUS_SIZE) {
DRM_ERROR("DP link status read failed\n");
- dp_link_parse_request(link);
+ return len;
+ }
+
+ return dp_link_parse_request(link);
}
/**
@@ -1023,7 +1030,9 @@ int dp_link_process_request(struct dp_link *dp_link)
dp_link_reset_data(link);
- dp_link_parse_sink_status_field(link);
+ ret = dp_link_parse_sink_status_field(link);
+ if (ret)
+ return ret;
if (link->request.test_requested == DP_TEST_LINK_EDID_READ) {
dp_link->sink_request |= DP_TEST_LINK_EDID_READ;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 75ae3008b68f..0fe02529b5e7 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -40,7 +40,12 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
of_node_put(phy_node);
- if (!phy_pdev || !msm_dsi->phy) {
+ if (!phy_pdev) {
+ DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
+ return -EPROBE_DEFER;
+ }
+ if (!msm_dsi->phy) {
+ put_device(&phy_pdev->dev);
DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
return -EPROBE_DEFER;
}
@@ -110,20 +115,8 @@ destroy_dsi:
static int dsi_bind(struct device *dev, struct device *master, void *data)
{
- struct drm_device *drm = dev_get_drvdata(master);
- struct msm_drm_private *priv = drm->dev_private;
- struct platform_device *pdev = to_platform_device(dev);
- struct msm_dsi *msm_dsi;
-
- DBG("");
- msm_dsi = dsi_init(pdev);
- if (IS_ERR(msm_dsi)) {
- /* Don't fail the bind if the dsi port is not connected */
- if (PTR_ERR(msm_dsi) == -ENODEV)
- return 0;
- else
- return PTR_ERR(msm_dsi);
- }
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+ struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
priv->dsi[msm_dsi->id] = msm_dsi;
@@ -133,15 +126,10 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
static void dsi_unbind(struct device *dev, struct device *master,
void *data)
{
- struct drm_device *drm = dev_get_drvdata(master);
- struct msm_drm_private *priv = drm->dev_private;
+ struct msm_drm_private *priv = dev_get_drvdata(master);
struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
- int id = msm_dsi->id;
- if (priv->dsi[id]) {
- dsi_destroy(msm_dsi);
- priv->dsi[id] = NULL;
- }
+ priv->dsi[msm_dsi->id] = NULL;
}
static const struct component_ops dsi_ops = {
@@ -149,15 +137,40 @@ static const struct component_ops dsi_ops = {
.unbind = dsi_unbind,
};
-static int dsi_dev_probe(struct platform_device *pdev)
+int dsi_dev_attach(struct platform_device *pdev)
{
return component_add(&pdev->dev, &dsi_ops);
}
+void dsi_dev_detach(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dsi_ops);
+}
+
+static int dsi_dev_probe(struct platform_device *pdev)
+{
+ struct msm_dsi *msm_dsi;
+
+ DBG("");
+ msm_dsi = dsi_init(pdev);
+ if (IS_ERR(msm_dsi)) {
+ /* Don't fail the bind if the dsi port is not connected */
+ if (PTR_ERR(msm_dsi) == -ENODEV)
+ return 0;
+ else
+ return PTR_ERR(msm_dsi);
+ }
+
+ return 0;
+}
+
static int dsi_dev_remove(struct platform_device *pdev)
{
+ struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
+
DBG("");
- component_del(&pdev->dev, &dsi_ops);
+ dsi_destroy(msm_dsi);
+
return 0;
}
@@ -215,9 +228,13 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail;
}
- if (!msm_dsi_manager_validate_current_config(msm_dsi->id)) {
- ret = -EINVAL;
- goto fail;
+ if (msm_dsi_is_bonded_dsi(msm_dsi) &&
+ !msm_dsi_is_master_dsi(msm_dsi)) {
+ /*
+ * Do not return an eror here,
+ * Just skip creating encoder/connector for the slave-DSI.
+ */
+ return 0;
}
msm_dsi->encoder = encoder;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 569c8ff062ba..c8dedc95428c 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -82,7 +82,6 @@ int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
-bool msm_dsi_manager_validate_current_config(u8 id);
void msm_dsi_manager_tpg_enable(void);
/* msm dsi */
@@ -118,8 +117,10 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host);
unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host);
struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host);
-int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
+int msm_dsi_host_register(struct mipi_dsi_host *host);
void msm_dsi_host_unregister(struct mipi_dsi_host *host);
+void msm_dsi_host_set_phy_mode(struct mipi_dsi_host *host,
+ struct msm_dsi_phy *src_phy);
int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
struct msm_dsi_phy *src_phy);
void msm_dsi_host_reset_phy(struct mipi_dsi_host *host);
@@ -173,8 +174,6 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
void msm_dsi_phy_disable(struct msm_dsi_phy *phy);
void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
enum msm_dsi_phy_usecase uc);
-int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
- struct clk **byte_clk_provider, struct clk **pixel_clk_provider);
void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy);
int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy);
void msm_dsi_phy_snapshot(struct msm_disp_state *disp_state, struct msm_dsi_phy *phy);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 0afc3b756f92..6b3ced4aaaf5 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1586,6 +1586,10 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
if (ret)
return ret;
+ ret = dsi_dev_attach(msm_host->pdev);
+ if (ret)
+ return ret;
+
DBG("id=%d", msm_host->id);
if (msm_host->dev)
queue_work(msm_host->workqueue, &msm_host->hpd_work);
@@ -1598,6 +1602,8 @@ static int dsi_host_detach(struct mipi_dsi_host *host,
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ dsi_dev_detach(msm_host->pdev);
+
msm_host->device_node = NULL;
DBG("id=%d", msm_host->id);
@@ -1933,7 +1939,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
return 0;
}
-int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
+int msm_dsi_host_register(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
int ret;
@@ -1947,20 +1953,6 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
return ret;
msm_host->registered = true;
-
- /* If the panel driver has not been probed after host register,
- * we should defer the host's probe.
- * It makes sure panel is connected when fbcon detects
- * connector status and gets the proper display mode to
- * create framebuffer.
- * Don't try to defer if there is nothing connected to the dsi
- * output
- */
- if (check_defer && msm_host->device_node) {
- if (IS_ERR(of_drm_find_panel(msm_host->device_node)))
- if (!of_drm_find_bridge(msm_host->device_node))
- return -EPROBE_DEFER;
- }
}
return 0;
@@ -2028,7 +2020,7 @@ void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
/* TODO: unvote for bus bandwidth */
cfg_hnd->ops->link_clk_disable(msm_host);
- pm_runtime_put_autosuspend(&msm_host->pdev->dev);
+ pm_runtime_put(&msm_host->pdev->dev);
}
int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
@@ -2187,57 +2179,12 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base,
wmb();
}
-int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
+void msm_dsi_host_set_phy_mode(struct mipi_dsi_host *host,
struct msm_dsi_phy *src_phy)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- struct clk *byte_clk_provider, *pixel_clk_provider;
- int ret;
msm_host->cphy_mode = src_phy->cphy_mode;
-
- ret = msm_dsi_phy_get_clk_provider(src_phy,
- &byte_clk_provider, &pixel_clk_provider);
- if (ret) {
- pr_info("%s: can't get provider from pll, don't set parent\n",
- __func__);
- return 0;
- }
-
- ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider);
- if (ret) {
- pr_err("%s: can't set parent to byte_clk_src. ret=%d\n",
- __func__, ret);
- goto exit;
- }
-
- ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider);
- if (ret) {
- pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n",
- __func__, ret);
- goto exit;
- }
-
- if (msm_host->dsi_clk_src) {
- ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
- if (ret) {
- pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
- __func__, ret);
- goto exit;
- }
- }
-
- if (msm_host->esc_clk_src) {
- ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
- if (ret) {
- pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
- __func__, ret);
- goto exit;
- }
- }
-
-exit:
- return ret;
}
void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
@@ -2305,7 +2252,7 @@ int msm_dsi_host_enable(struct mipi_dsi_host *host)
*/
/* if (msm_panel->mode == MSM_DSI_CMD_MODE) {
* dsi_link_clk_disable(msm_host);
- * pm_runtime_put_autosuspend(&msm_host->pdev->dev);
+ * pm_runtime_put(&msm_host->pdev->dev);
* }
*/
msm_host->enabled = true;
@@ -2397,7 +2344,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
fail_disable_clk:
cfg_hnd->ops->link_clk_disable(msm_host);
- pm_runtime_put_autosuspend(&msm_host->pdev->dev);
+ pm_runtime_put(&msm_host->pdev->dev);
fail_disable_reg:
dsi_host_regulator_disable(msm_host);
unlock_ret:
@@ -2424,7 +2371,7 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
cfg_hnd->ops->link_clk_disable(msm_host);
- pm_runtime_put_autosuspend(&msm_host->pdev->dev);
+ pm_runtime_put(&msm_host->pdev->dev);
dsi_host_regulator_disable(msm_host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 20c4d650fd80..f19bae475c96 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -74,15 +74,13 @@ static int dsi_mgr_setup_components(int id)
int ret;
if (!IS_BONDED_DSI()) {
- ret = msm_dsi_host_register(msm_dsi->host, true);
+ ret = msm_dsi_host_register(msm_dsi->host);
if (ret)
return ret;
msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
- ret = msm_dsi_host_set_src_pll(msm_dsi->host, msm_dsi->phy);
- } else if (!other_dsi) {
- ret = 0;
- } else {
+ msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
+ } else if (other_dsi) {
struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ?
msm_dsi : other_dsi;
struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ?
@@ -94,10 +92,10 @@ static int dsi_mgr_setup_components(int id)
* because only master DSI device adds the panel to global
* panel list. The panel's device is the master DSI device.
*/
- ret = msm_dsi_host_register(slave_link_dsi->host, false);
+ ret = msm_dsi_host_register(slave_link_dsi->host);
if (ret)
return ret;
- ret = msm_dsi_host_register(master_link_dsi->host, true);
+ ret = msm_dsi_host_register(master_link_dsi->host);
if (ret)
return ret;
@@ -106,13 +104,11 @@ static int dsi_mgr_setup_components(int id)
MSM_DSI_PHY_MASTER);
msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
MSM_DSI_PHY_SLAVE);
- ret = msm_dsi_host_set_src_pll(msm_dsi->host, clk_master_dsi->phy);
- if (ret)
- return ret;
- ret = msm_dsi_host_set_src_pll(other_dsi->host, clk_master_dsi->phy);
+ msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
+ msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy);
}
- return ret;
+ return 0;
}
static int enable_phy(struct msm_dsi *msm_dsi,
@@ -649,23 +645,6 @@ fail:
return ERR_PTR(ret);
}
-bool msm_dsi_manager_validate_current_config(u8 id)
-{
- bool is_bonded_dsi = IS_BONDED_DSI();
-
- /*
- * For bonded DSI, we only have one drm panel. For this
- * use case, we register only one bridge/connector.
- * Skip bridge/connector initialisation if it is
- * slave-DSI for bonded DSI configuration.
- */
- if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id)) {
- DBG("Skip bridge registration for slave DSI->id: %d\n", id);
- return false;
- }
- return true;
-}
-
/* initialize bridge */
struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
{
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 9842e04b5858..2027b38617ab 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -602,7 +602,7 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
{
clk_disable_unprepare(phy->ahb_clk);
- pm_runtime_put_autosuspend(&phy->pdev->dev);
+ pm_runtime_put(&phy->pdev->dev);
}
static const struct of_device_id dsi_phy_dt_match[] = {
@@ -808,12 +808,14 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
struct msm_dsi_phy_clk_request *clk_req,
struct msm_dsi_phy_shared_timings *shared_timings)
{
- struct device *dev = &phy->pdev->dev;
+ struct device *dev;
int ret;
if (!phy || !phy->cfg->ops.enable)
return -EINVAL;
+ dev = &phy->pdev->dev;
+
ret = dsi_phy_enable_resource(phy);
if (ret) {
DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
@@ -892,17 +894,6 @@ bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
return phy->cfg->ops.set_continuous_clock(phy, enable);
}
-int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
- struct clk **byte_clk_provider, struct clk **pixel_clk_provider)
-{
- if (byte_clk_provider)
- *byte_clk_provider = phy->provided_clocks->hws[DSI_BYTE_PLL_CLK]->clk;
- if (pixel_clk_provider)
- *pixel_clk_provider = phy->provided_clocks->hws[DSI_PIXEL_PLL_CLK]->clk;
-
- return 0;
-}
-
void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy)
{
if (phy->cfg->ops.save_pll_state) {
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
deleted file mode 100644
index 106a67473af5..000000000000
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ /dev/null
@@ -1,198 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/of_irq.h>
-#include "edp.h"
-
-static irqreturn_t edp_irq(int irq, void *dev_id)
-{
- struct msm_edp *edp = dev_id;
-
- /* Process eDP irq */
- return msm_edp_ctrl_irq(edp->ctrl);
-}
-
-static void edp_destroy(struct platform_device *pdev)
-{
- struct msm_edp *edp = platform_get_drvdata(pdev);
-
- if (!edp)
- return;
-
- if (edp->ctrl) {
- msm_edp_ctrl_destroy(edp->ctrl);
- edp->ctrl = NULL;
- }
-
- platform_set_drvdata(pdev, NULL);
-}
-
-/* construct eDP at bind/probe time, grab all the resources. */
-static struct msm_edp *edp_init(struct platform_device *pdev)
-{
- struct msm_edp *edp = NULL;
- int ret;
-
- if (!pdev) {
- pr_err("no eDP device\n");
- ret = -ENXIO;
- goto fail;
- }
-
- edp = devm_kzalloc(&pdev->dev, sizeof(*edp), GFP_KERNEL);
- if (!edp) {
- ret = -ENOMEM;
- goto fail;
- }
- DBG("eDP probed=%p", edp);
-
- edp->pdev = pdev;
- platform_set_drvdata(pdev, edp);
-
- ret = msm_edp_ctrl_init(edp);
- if (ret)
- goto fail;
-
- return edp;
-
-fail:
- if (edp)
- edp_destroy(pdev);
-
- return ERR_PTR(ret);
-}
-
-static int edp_bind(struct device *dev, struct device *master, void *data)
-{
- struct drm_device *drm = dev_get_drvdata(master);
- struct msm_drm_private *priv = drm->dev_private;
- struct msm_edp *edp;
-
- DBG("");
- edp = edp_init(to_platform_device(dev));
- if (IS_ERR(edp))
- return PTR_ERR(edp);
- priv->edp = edp;
-
- return 0;
-}
-
-static void edp_unbind(struct device *dev, struct device *master, void *data)
-{
- struct drm_device *drm = dev_get_drvdata(master);
- struct msm_drm_private *priv = drm->dev_private;
-
- DBG("");
- if (priv->edp) {
- edp_destroy(to_platform_device(dev));
- priv->edp = NULL;
- }
-}
-
-static const struct component_ops edp_ops = {
- .bind = edp_bind,
- .unbind = edp_unbind,
-};
-
-static int edp_dev_probe(struct platform_device *pdev)
-{
- DBG("");
- return component_add(&pdev->dev, &edp_ops);
-}
-
-static int edp_dev_remove(struct platform_device *pdev)
-{
- DBG("");
- component_del(&pdev->dev, &edp_ops);
- return 0;
-}
-
-static const struct of_device_id dt_match[] = {
- { .compatible = "qcom,mdss-edp" },
- {}
-};
-
-static struct platform_driver edp_driver = {
- .probe = edp_dev_probe,
- .remove = edp_dev_remove,
- .driver = {
- .name = "msm_edp",
- .of_match_table = dt_match,
- },
-};
-
-void __init msm_edp_register(void)
-{
- DBG("");
- platform_driver_register(&edp_driver);
-}
-
-void __exit msm_edp_unregister(void)
-{
- DBG("");
- platform_driver_unregister(&edp_driver);
-}
-
-/* Second part of initialization, the drm/kms level modeset_init */
-int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
- struct drm_encoder *encoder)
-{
- struct platform_device *pdev = edp->pdev;
- struct msm_drm_private *priv = dev->dev_private;
- int ret;
-
- edp->encoder = encoder;
- edp->dev = dev;
-
- edp->bridge = msm_edp_bridge_init(edp);
- if (IS_ERR(edp->bridge)) {
- ret = PTR_ERR(edp->bridge);
- DRM_DEV_ERROR(dev->dev, "failed to create eDP bridge: %d\n", ret);
- edp->bridge = NULL;
- goto fail;
- }
-
- edp->connector = msm_edp_connector_init(edp);
- if (IS_ERR(edp->connector)) {
- ret = PTR_ERR(edp->connector);
- DRM_DEV_ERROR(dev->dev, "failed to create eDP connector: %d\n", ret);
- edp->connector = NULL;
- goto fail;
- }
-
- edp->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (edp->irq < 0) {
- ret = edp->irq;
- DRM_DEV_ERROR(dev->dev, "failed to get IRQ: %d\n", ret);
- goto fail;
- }
-
- ret = devm_request_irq(&pdev->dev, edp->irq,
- edp_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "edp_isr", edp);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
- edp->irq, ret);
- goto fail;
- }
-
- priv->bridges[priv->num_bridges++] = edp->bridge;
- priv->connectors[priv->num_connectors++] = edp->connector;
-
- return 0;
-
-fail:
- /* bridge/connector are normally destroyed by drm */
- if (edp->bridge) {
- edp_bridge_destroy(edp->bridge);
- edp->bridge = NULL;
- }
- if (edp->connector) {
- edp->connector->funcs->destroy(edp->connector);
- edp->connector = NULL;
- }
-
- return ret;
-}
diff --git a/drivers/gpu/drm/msm/edp/edp.h b/drivers/gpu/drm/msm/edp/edp.h
deleted file mode 100644
index 8590f2ce274d..000000000000
--- a/drivers/gpu/drm/msm/edp/edp.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __EDP_CONNECTOR_H__
-#define __EDP_CONNECTOR_H__
-
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <drm/drm_bridge.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_dp_helper.h>
-
-#include "msm_drv.h"
-
-#define edp_read(offset) msm_readl((offset))
-#define edp_write(offset, data) msm_writel((data), (offset))
-
-struct edp_ctrl;
-struct edp_aux;
-struct edp_phy;
-
-struct msm_edp {
- struct drm_device *dev;
- struct platform_device *pdev;
-
- struct drm_connector *connector;
- struct drm_bridge *bridge;
-
- /* the encoder we are hooked to (outside of eDP block) */
- struct drm_encoder *encoder;
-
- struct edp_ctrl *ctrl;
-
- int irq;
-};
-
-/* eDP bridge */
-struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp);
-void edp_bridge_destroy(struct drm_bridge *bridge);
-
-/* eDP connector */
-struct drm_connector *msm_edp_connector_init(struct msm_edp *edp);
-
-/* AUX */
-void *msm_edp_aux_init(struct msm_edp *edp, void __iomem *regbase, struct drm_dp_aux **drm_aux);
-void msm_edp_aux_destroy(struct device *dev, struct edp_aux *aux);
-irqreturn_t msm_edp_aux_irq(struct edp_aux *aux, u32 isr);
-void msm_edp_aux_ctrl(struct edp_aux *aux, int enable);
-
-/* Phy */
-bool msm_edp_phy_ready(struct edp_phy *phy);
-void msm_edp_phy_ctrl(struct edp_phy *phy, int enable);
-void msm_edp_phy_vm_pe_init(struct edp_phy *phy);
-void msm_edp_phy_vm_pe_cfg(struct edp_phy *phy, u32 v0, u32 v1);
-void msm_edp_phy_lane_power_ctrl(struct edp_phy *phy, bool up, u32 max_lane);
-void *msm_edp_phy_init(struct device *dev, void __iomem *regbase);
-
-/* Ctrl */
-irqreturn_t msm_edp_ctrl_irq(struct edp_ctrl *ctrl);
-void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on);
-int msm_edp_ctrl_init(struct msm_edp *edp);
-void msm_edp_ctrl_destroy(struct edp_ctrl *ctrl);
-bool msm_edp_ctrl_panel_connected(struct edp_ctrl *ctrl);
-int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl,
- struct drm_connector *connector, struct edid **edid);
-int msm_edp_ctrl_timing_cfg(struct edp_ctrl *ctrl,
- const struct drm_display_mode *mode,
- const struct drm_display_info *info);
-/* @pixel_rate is in kHz */
-bool msm_edp_ctrl_pixel_clock_valid(struct edp_ctrl *ctrl,
- u32 pixel_rate, u32 *pm, u32 *pn);
-
-#endif /* __EDP_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
deleted file mode 100644
index 7907e0f5988f..000000000000
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ /dev/null
@@ -1,388 +0,0 @@
-#ifndef EDP_XML
-#define EDP_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
-
-The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 981 bytes, from 2021-06-05 21:37:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 15291 bytes, from 2021-06-15 22:36:13)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-06-05 21:37:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-05-21 19:18:08)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-05-21 19:18:08)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-05-21 19:18:08)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-05-21 19:18:08)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-05-21 19:18:08)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 10953 bytes, from 2021-05-21 19:18:08)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_5nm.xml ( 10900 bytes, from 2021-05-21 19:18:08)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-02-18 16:45:44)
-
-Copyright (C) 2013-2021 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-
-
-enum edp_color_depth {
- EDP_6BIT = 0,
- EDP_8BIT = 1,
- EDP_10BIT = 2,
- EDP_12BIT = 3,
- EDP_16BIT = 4,
-};
-
-enum edp_component_format {
- EDP_RGB = 0,
- EDP_YUV422 = 1,
- EDP_YUV444 = 2,
-};
-
-#define REG_EDP_MAINLINK_CTRL 0x00000004
-#define EDP_MAINLINK_CTRL_ENABLE 0x00000001
-#define EDP_MAINLINK_CTRL_RESET 0x00000002
-
-#define REG_EDP_STATE_CTRL 0x00000008
-#define EDP_STATE_CTRL_TRAIN_PATTERN_1 0x00000001
-#define EDP_STATE_CTRL_TRAIN_PATTERN_2 0x00000002
-#define EDP_STATE_CTRL_TRAIN_PATTERN_3 0x00000004
-#define EDP_STATE_CTRL_SYMBOL_ERR_RATE_MEAS 0x00000008
-#define EDP_STATE_CTRL_PRBS7 0x00000010
-#define EDP_STATE_CTRL_CUSTOM_80_BIT_PATTERN 0x00000020
-#define EDP_STATE_CTRL_SEND_VIDEO 0x00000040
-#define EDP_STATE_CTRL_PUSH_IDLE 0x00000080
-
-#define REG_EDP_CONFIGURATION_CTRL 0x0000000c
-#define EDP_CONFIGURATION_CTRL_SYNC_CLK 0x00000001
-#define EDP_CONFIGURATION_CTRL_STATIC_MVID 0x00000002
-#define EDP_CONFIGURATION_CTRL_PROGRESSIVE 0x00000004
-#define EDP_CONFIGURATION_CTRL_LANES__MASK 0x00000030
-#define EDP_CONFIGURATION_CTRL_LANES__SHIFT 4
-static inline uint32_t EDP_CONFIGURATION_CTRL_LANES(uint32_t val)
-{
- return ((val) << EDP_CONFIGURATION_CTRL_LANES__SHIFT) & EDP_CONFIGURATION_CTRL_LANES__MASK;
-}
-#define EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING 0x00000040
-#define EDP_CONFIGURATION_CTRL_COLOR__MASK 0x00000100
-#define EDP_CONFIGURATION_CTRL_COLOR__SHIFT 8
-static inline uint32_t EDP_CONFIGURATION_CTRL_COLOR(enum edp_color_depth val)
-{
- return ((val) << EDP_CONFIGURATION_CTRL_COLOR__SHIFT) & EDP_CONFIGURATION_CTRL_COLOR__MASK;
-}
-
-#define REG_EDP_SOFTWARE_MVID 0x00000014
-
-#define REG_EDP_SOFTWARE_NVID 0x00000018
-
-#define REG_EDP_TOTAL_HOR_VER 0x0000001c
-#define EDP_TOTAL_HOR_VER_HORIZ__MASK 0x0000ffff
-#define EDP_TOTAL_HOR_VER_HORIZ__SHIFT 0
-static inline uint32_t EDP_TOTAL_HOR_VER_HORIZ(uint32_t val)
-{
- return ((val) << EDP_TOTAL_HOR_VER_HORIZ__SHIFT) & EDP_TOTAL_HOR_VER_HORIZ__MASK;
-}
-#define EDP_TOTAL_HOR_VER_VERT__MASK 0xffff0000
-#define EDP_TOTAL_HOR_VER_VERT__SHIFT 16
-static inline uint32_t EDP_TOTAL_HOR_VER_VERT(uint32_t val)
-{
- return ((val) << EDP_TOTAL_HOR_VER_VERT__SHIFT) & EDP_TOTAL_HOR_VER_VERT__MASK;
-}
-
-#define REG_EDP_START_HOR_VER_FROM_SYNC 0x00000020
-#define EDP_START_HOR_VER_FROM_SYNC_HORIZ__MASK 0x0000ffff
-#define EDP_START_HOR_VER_FROM_SYNC_HORIZ__SHIFT 0
-static inline uint32_t EDP_START_HOR_VER_FROM_SYNC_HORIZ(uint32_t val)
-{
- return ((val) << EDP_START_HOR_VER_FROM_SYNC_HORIZ__SHIFT) & EDP_START_HOR_VER_FROM_SYNC_HORIZ__MASK;
-}
-#define EDP_START_HOR_VER_FROM_SYNC_VERT__MASK 0xffff0000
-#define EDP_START_HOR_VER_FROM_SYNC_VERT__SHIFT 16
-static inline uint32_t EDP_START_HOR_VER_FROM_SYNC_VERT(uint32_t val)
-{
- return ((val) << EDP_START_HOR_VER_FROM_SYNC_VERT__SHIFT) & EDP_START_HOR_VER_FROM_SYNC_VERT__MASK;
-}
-
-#define REG_EDP_HSYNC_VSYNC_WIDTH_POLARITY 0x00000024
-#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__MASK 0x00007fff
-#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__SHIFT 0
-static inline uint32_t EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ(uint32_t val)
-{
- return ((val) << EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__SHIFT) & EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__MASK;
-}
-#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_NHSYNC 0x00008000
-#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__MASK 0x7fff0000
-#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__SHIFT 16
-static inline uint32_t EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT(uint32_t val)
-{
- return ((val) << EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__SHIFT) & EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__MASK;
-}
-#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_NVSYNC 0x80000000
-
-#define REG_EDP_ACTIVE_HOR_VER 0x00000028
-#define EDP_ACTIVE_HOR_VER_HORIZ__MASK 0x0000ffff
-#define EDP_ACTIVE_HOR_VER_HORIZ__SHIFT 0
-static inline uint32_t EDP_ACTIVE_HOR_VER_HORIZ(uint32_t val)
-{
- return ((val) << EDP_ACTIVE_HOR_VER_HORIZ__SHIFT) & EDP_ACTIVE_HOR_VER_HORIZ__MASK;
-}
-#define EDP_ACTIVE_HOR_VER_VERT__MASK 0xffff0000
-#define EDP_ACTIVE_HOR_VER_VERT__SHIFT 16
-static inline uint32_t EDP_ACTIVE_HOR_VER_VERT(uint32_t val)
-{
- return ((val) << EDP_ACTIVE_HOR_VER_VERT__SHIFT) & EDP_ACTIVE_HOR_VER_VERT__MASK;
-}
-
-#define REG_EDP_MISC1_MISC0 0x0000002c
-#define EDP_MISC1_MISC0_MISC0__MASK 0x000000ff
-#define EDP_MISC1_MISC0_MISC0__SHIFT 0
-static inline uint32_t EDP_MISC1_MISC0_MISC0(uint32_t val)
-{
- return ((val) << EDP_MISC1_MISC0_MISC0__SHIFT) & EDP_MISC1_MISC0_MISC0__MASK;
-}
-#define EDP_MISC1_MISC0_SYNC 0x00000001
-#define EDP_MISC1_MISC0_COMPONENT_FORMAT__MASK 0x00000006
-#define EDP_MISC1_MISC0_COMPONENT_FORMAT__SHIFT 1
-static inline uint32_t EDP_MISC1_MISC0_COMPONENT_FORMAT(enum edp_component_format val)
-{
- return ((val) << EDP_MISC1_MISC0_COMPONENT_FORMAT__SHIFT) & EDP_MISC1_MISC0_COMPONENT_FORMAT__MASK;
-}
-#define EDP_MISC1_MISC0_CEA 0x00000008
-#define EDP_MISC1_MISC0_BT709_5 0x00000010
-#define EDP_MISC1_MISC0_COLOR__MASK 0x000000e0
-#define EDP_MISC1_MISC0_COLOR__SHIFT 5
-static inline uint32_t EDP_MISC1_MISC0_COLOR(enum edp_color_depth val)
-{
- return ((val) << EDP_MISC1_MISC0_COLOR__SHIFT) & EDP_MISC1_MISC0_COLOR__MASK;
-}
-#define EDP_MISC1_MISC0_MISC1__MASK 0x0000ff00
-#define EDP_MISC1_MISC0_MISC1__SHIFT 8
-static inline uint32_t EDP_MISC1_MISC0_MISC1(uint32_t val)
-{
- return ((val) << EDP_MISC1_MISC0_MISC1__SHIFT) & EDP_MISC1_MISC0_MISC1__MASK;
-}
-#define EDP_MISC1_MISC0_INTERLACED_ODD 0x00000100
-#define EDP_MISC1_MISC0_STEREO__MASK 0x00000600
-#define EDP_MISC1_MISC0_STEREO__SHIFT 9
-static inline uint32_t EDP_MISC1_MISC0_STEREO(uint32_t val)
-{
- return ((val) << EDP_MISC1_MISC0_STEREO__SHIFT) & EDP_MISC1_MISC0_STEREO__MASK;
-}
-
-#define REG_EDP_PHY_CTRL 0x00000074
-#define EDP_PHY_CTRL_SW_RESET_PLL 0x00000001
-#define EDP_PHY_CTRL_SW_RESET 0x00000004
-
-#define REG_EDP_MAINLINK_READY 0x00000084
-#define EDP_MAINLINK_READY_TRAIN_PATTERN_1_READY 0x00000008
-#define EDP_MAINLINK_READY_TRAIN_PATTERN_2_READY 0x00000010
-#define EDP_MAINLINK_READY_TRAIN_PATTERN_3_READY 0x00000020
-
-#define REG_EDP_AUX_CTRL 0x00000300
-#define EDP_AUX_CTRL_ENABLE 0x00000001
-#define EDP_AUX_CTRL_RESET 0x00000002
-
-#define REG_EDP_INTERRUPT_REG_1 0x00000308
-#define EDP_INTERRUPT_REG_1_HPD 0x00000001
-#define EDP_INTERRUPT_REG_1_HPD_ACK 0x00000002
-#define EDP_INTERRUPT_REG_1_HPD_EN 0x00000004
-#define EDP_INTERRUPT_REG_1_AUX_I2C_DONE 0x00000008
-#define EDP_INTERRUPT_REG_1_AUX_I2C_DONE_ACK 0x00000010
-#define EDP_INTERRUPT_REG_1_AUX_I2C_DONE_EN 0x00000020
-#define EDP_INTERRUPT_REG_1_WRONG_ADDR 0x00000040
-#define EDP_INTERRUPT_REG_1_WRONG_ADDR_ACK 0x00000080
-#define EDP_INTERRUPT_REG_1_WRONG_ADDR_EN 0x00000100
-#define EDP_INTERRUPT_REG_1_TIMEOUT 0x00000200
-#define EDP_INTERRUPT_REG_1_TIMEOUT_ACK 0x00000400
-#define EDP_INTERRUPT_REG_1_TIMEOUT_EN 0x00000800
-#define EDP_INTERRUPT_REG_1_NACK_DEFER 0x00001000
-#define EDP_INTERRUPT_REG_1_NACK_DEFER_ACK 0x00002000
-#define EDP_INTERRUPT_REG_1_NACK_DEFER_EN 0x00004000
-#define EDP_INTERRUPT_REG_1_WRONG_DATA_CNT 0x00008000
-#define EDP_INTERRUPT_REG_1_WRONG_DATA_CNT_ACK 0x00010000
-#define EDP_INTERRUPT_REG_1_WRONG_DATA_CNT_EN 0x00020000
-#define EDP_INTERRUPT_REG_1_I2C_NACK 0x00040000
-#define EDP_INTERRUPT_REG_1_I2C_NACK_ACK 0x00080000
-#define EDP_INTERRUPT_REG_1_I2C_NACK_EN 0x00100000
-#define EDP_INTERRUPT_REG_1_I2C_DEFER 0x00200000
-#define EDP_INTERRUPT_REG_1_I2C_DEFER_ACK 0x00400000
-#define EDP_INTERRUPT_REG_1_I2C_DEFER_EN 0x00800000
-#define EDP_INTERRUPT_REG_1_PLL_UNLOCK 0x01000000
-#define EDP_INTERRUPT_REG_1_PLL_UNLOCK_ACK 0x02000000
-#define EDP_INTERRUPT_REG_1_PLL_UNLOCK_EN 0x04000000
-#define EDP_INTERRUPT_REG_1_AUX_ERROR 0x08000000
-#define EDP_INTERRUPT_REG_1_AUX_ERROR_ACK 0x10000000
-#define EDP_INTERRUPT_REG_1_AUX_ERROR_EN 0x20000000
-
-#define REG_EDP_INTERRUPT_REG_2 0x0000030c
-#define EDP_INTERRUPT_REG_2_READY_FOR_VIDEO 0x00000001
-#define EDP_INTERRUPT_REG_2_READY_FOR_VIDEO_ACK 0x00000002
-#define EDP_INTERRUPT_REG_2_READY_FOR_VIDEO_EN 0x00000004
-#define EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT 0x00000008
-#define EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT_ACK 0x00000010
-#define EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT_EN 0x00000020
-#define EDP_INTERRUPT_REG_2_FRAME_END 0x00000200
-#define EDP_INTERRUPT_REG_2_FRAME_END_ACK 0x00000080
-#define EDP_INTERRUPT_REG_2_FRAME_END_EN 0x00000100
-#define EDP_INTERRUPT_REG_2_CRC_UPDATED 0x00000200
-#define EDP_INTERRUPT_REG_2_CRC_UPDATED_ACK 0x00000400
-#define EDP_INTERRUPT_REG_2_CRC_UPDATED_EN 0x00000800
-
-#define REG_EDP_INTERRUPT_TRANS_NUM 0x00000310
-
-#define REG_EDP_AUX_DATA 0x00000314
-#define EDP_AUX_DATA_READ 0x00000001
-#define EDP_AUX_DATA_DATA__MASK 0x0000ff00
-#define EDP_AUX_DATA_DATA__SHIFT 8
-static inline uint32_t EDP_AUX_DATA_DATA(uint32_t val)
-{
- return ((val) << EDP_AUX_DATA_DATA__SHIFT) & EDP_AUX_DATA_DATA__MASK;
-}
-#define EDP_AUX_DATA_INDEX__MASK 0x00ff0000
-#define EDP_AUX_DATA_INDEX__SHIFT 16
-static inline uint32_t EDP_AUX_DATA_INDEX(uint32_t val)
-{
- return ((val) << EDP_AUX_DATA_INDEX__SHIFT) & EDP_AUX_DATA_INDEX__MASK;
-}
-#define EDP_AUX_DATA_INDEX_WRITE 0x80000000
-
-#define REG_EDP_AUX_TRANS_CTRL 0x00000318
-#define EDP_AUX_TRANS_CTRL_I2C 0x00000100
-#define EDP_AUX_TRANS_CTRL_GO 0x00000200
-
-#define REG_EDP_AUX_STATUS 0x00000324
-
-static inline uint32_t REG_EDP_PHY_LN(uint32_t i0) { return 0x00000400 + 0x40*i0; }
-
-static inline uint32_t REG_EDP_PHY_LN_PD_CTL(uint32_t i0) { return 0x00000404 + 0x40*i0; }
-
-#define REG_EDP_PHY_GLB_VM_CFG0 0x00000510
-
-#define REG_EDP_PHY_GLB_VM_CFG1 0x00000514
-
-#define REG_EDP_PHY_GLB_MISC9 0x00000518
-
-#define REG_EDP_PHY_GLB_CFG 0x00000528
-
-#define REG_EDP_PHY_GLB_PD_CTL 0x0000052c
-
-#define REG_EDP_PHY_GLB_PHY_STATUS 0x00000598
-
-#define REG_EDP_28nm_PHY_PLL_REFCLK_CFG 0x00000000
-
-#define REG_EDP_28nm_PHY_PLL_POSTDIV1_CFG 0x00000004
-
-#define REG_EDP_28nm_PHY_PLL_CHGPUMP_CFG 0x00000008
-
-#define REG_EDP_28nm_PHY_PLL_VCOLPF_CFG 0x0000000c
-
-#define REG_EDP_28nm_PHY_PLL_VREG_CFG 0x00000010
-
-#define REG_EDP_28nm_PHY_PLL_PWRGEN_CFG 0x00000014
-
-#define REG_EDP_28nm_PHY_PLL_DMUX_CFG 0x00000018
-
-#define REG_EDP_28nm_PHY_PLL_AMUX_CFG 0x0000001c
-
-#define REG_EDP_28nm_PHY_PLL_GLB_CFG 0x00000020
-#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B 0x00000001
-#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B 0x00000002
-#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B 0x00000004
-#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE 0x00000008
-
-#define REG_EDP_28nm_PHY_PLL_POSTDIV2_CFG 0x00000024
-
-#define REG_EDP_28nm_PHY_PLL_POSTDIV3_CFG 0x00000028
-
-#define REG_EDP_28nm_PHY_PLL_LPFR_CFG 0x0000002c
-
-#define REG_EDP_28nm_PHY_PLL_LPFC1_CFG 0x00000030
-
-#define REG_EDP_28nm_PHY_PLL_LPFC2_CFG 0x00000034
-
-#define REG_EDP_28nm_PHY_PLL_SDM_CFG0 0x00000038
-
-#define REG_EDP_28nm_PHY_PLL_SDM_CFG1 0x0000003c
-
-#define REG_EDP_28nm_PHY_PLL_SDM_CFG2 0x00000040
-
-#define REG_EDP_28nm_PHY_PLL_SDM_CFG3 0x00000044
-
-#define REG_EDP_28nm_PHY_PLL_SDM_CFG4 0x00000048
-
-#define REG_EDP_28nm_PHY_PLL_SSC_CFG0 0x0000004c
-
-#define REG_EDP_28nm_PHY_PLL_SSC_CFG1 0x00000050
-
-#define REG_EDP_28nm_PHY_PLL_SSC_CFG2 0x00000054
-
-#define REG_EDP_28nm_PHY_PLL_SSC_CFG3 0x00000058
-
-#define REG_EDP_28nm_PHY_PLL_LKDET_CFG0 0x0000005c
-
-#define REG_EDP_28nm_PHY_PLL_LKDET_CFG1 0x00000060
-
-#define REG_EDP_28nm_PHY_PLL_LKDET_CFG2 0x00000064
-
-#define REG_EDP_28nm_PHY_PLL_TEST_CFG 0x00000068
-#define EDP_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET 0x00000001
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG0 0x0000006c
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG1 0x00000070
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG2 0x00000074
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG3 0x00000078
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG4 0x0000007c
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG5 0x00000080
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG6 0x00000084
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG7 0x00000088
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG8 0x0000008c
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG9 0x00000090
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG10 0x00000094
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG11 0x00000098
-
-#define REG_EDP_28nm_PHY_PLL_EFUSE_CFG 0x0000009c
-
-#define REG_EDP_28nm_PHY_PLL_DEBUG_BUS_SEL 0x000000a0
-
-
-#endif /* EDP_XML */
diff --git a/drivers/gpu/drm/msm/edp/edp_aux.c b/drivers/gpu/drm/msm/edp/edp_aux.c
deleted file mode 100644
index e3d85c622cfb..000000000000
--- a/drivers/gpu/drm/msm/edp/edp_aux.c
+++ /dev/null
@@ -1,265 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#include "edp.h"
-#include "edp.xml.h"
-
-#define AUX_CMD_FIFO_LEN 144
-#define AUX_CMD_NATIVE_MAX 16
-#define AUX_CMD_I2C_MAX 128
-
-#define EDP_INTR_AUX_I2C_ERR \
- (EDP_INTERRUPT_REG_1_WRONG_ADDR | EDP_INTERRUPT_REG_1_TIMEOUT | \
- EDP_INTERRUPT_REG_1_NACK_DEFER | EDP_INTERRUPT_REG_1_WRONG_DATA_CNT | \
- EDP_INTERRUPT_REG_1_I2C_NACK | EDP_INTERRUPT_REG_1_I2C_DEFER)
-#define EDP_INTR_TRANS_STATUS \
- (EDP_INTERRUPT_REG_1_AUX_I2C_DONE | EDP_INTR_AUX_I2C_ERR)
-
-struct edp_aux {
- void __iomem *base;
- bool msg_err;
-
- struct completion msg_comp;
-
- /* To prevent the message transaction routine from reentry. */
- struct mutex msg_mutex;
-
- struct drm_dp_aux drm_aux;
-};
-#define to_edp_aux(x) container_of(x, struct edp_aux, drm_aux)
-
-static int edp_msg_fifo_tx(struct edp_aux *aux, struct drm_dp_aux_msg *msg)
-{
- u32 data[4];
- u32 reg, len;
- bool native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
- bool read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
- u8 *msgdata = msg->buffer;
- int i;
-
- if (read)
- len = 4;
- else
- len = msg->size + 4;
-
- /*
- * cmd fifo only has depth of 144 bytes
- */
- if (len > AUX_CMD_FIFO_LEN)
- return -EINVAL;
-
- /* Pack cmd and write to HW */
- data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */
- if (read)
- data[0] |= BIT(4); /* R/W */
-
- data[1] = (msg->address >> 8) & 0xff; /* addr[15:8] */
- data[2] = msg->address & 0xff; /* addr[7:0] */
- data[3] = (msg->size - 1) & 0xff; /* len[7:0] */
-
- for (i = 0; i < len; i++) {
- reg = (i < 4) ? data[i] : msgdata[i - 4];
- reg = EDP_AUX_DATA_DATA(reg); /* index = 0, write */
- if (i == 0)
- reg |= EDP_AUX_DATA_INDEX_WRITE;
- edp_write(aux->base + REG_EDP_AUX_DATA, reg);
- }
-
- reg = 0; /* Transaction number is always 1 */
- if (!native) /* i2c */
- reg |= EDP_AUX_TRANS_CTRL_I2C;
-
- reg |= EDP_AUX_TRANS_CTRL_GO;
- edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, reg);
-
- return 0;
-}
-
-static int edp_msg_fifo_rx(struct edp_aux *aux, struct drm_dp_aux_msg *msg)
-{
- u32 data;
- u8 *dp;
- int i;
- u32 len = msg->size;
-
- edp_write(aux->base + REG_EDP_AUX_DATA,
- EDP_AUX_DATA_INDEX_WRITE | EDP_AUX_DATA_READ); /* index = 0 */
-
- dp = msg->buffer;
-
- /* discard first byte */
- data = edp_read(aux->base + REG_EDP_AUX_DATA);
- for (i = 0; i < len; i++) {
- data = edp_read(aux->base + REG_EDP_AUX_DATA);
- dp[i] = (u8)((data >> 8) & 0xff);
- }
-
- return 0;
-}
-
-/*
- * This function does the real job to process an AUX transaction.
- * It will call msm_edp_aux_ctrl() function to reset the AUX channel,
- * if the waiting is timeout.
- * The caller who triggers the transaction should avoid the
- * msm_edp_aux_ctrl() running concurrently in other threads, i.e.
- * start transaction only when AUX channel is fully enabled.
- */
-static ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux,
- struct drm_dp_aux_msg *msg)
-{
- struct edp_aux *aux = to_edp_aux(drm_aux);
- ssize_t ret;
- unsigned long time_left;
- bool native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
- bool read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
-
- /* Ignore address only message */
- if ((msg->size == 0) || (msg->buffer == NULL)) {
- msg->reply = native ?
- DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
- return msg->size;
- }
-
- /* msg sanity check */
- if ((native && (msg->size > AUX_CMD_NATIVE_MAX)) ||
- (msg->size > AUX_CMD_I2C_MAX)) {
- pr_err("%s: invalid msg: size(%zu), request(%x)\n",
- __func__, msg->size, msg->request);
- return -EINVAL;
- }
-
- mutex_lock(&aux->msg_mutex);
-
- aux->msg_err = false;
- reinit_completion(&aux->msg_comp);
-
- ret = edp_msg_fifo_tx(aux, msg);
- if (ret < 0)
- goto unlock_exit;
-
- DBG("wait_for_completion");
- time_left = wait_for_completion_timeout(&aux->msg_comp,
- msecs_to_jiffies(300));
- if (!time_left) {
- /*
- * Clear GO and reset AUX channel
- * to cancel the current transaction.
- */
- edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0);
- msm_edp_aux_ctrl(aux, 1);
- pr_err("%s: aux timeout,\n", __func__);
- ret = -ETIMEDOUT;
- goto unlock_exit;
- }
- DBG("completion");
-
- if (!aux->msg_err) {
- if (read) {
- ret = edp_msg_fifo_rx(aux, msg);
- if (ret < 0)
- goto unlock_exit;
- }
-
- msg->reply = native ?
- DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
- } else {
- /* Reply defer to retry */
- msg->reply = native ?
- DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
- /*
- * The sleep time in caller is not long enough to make sure
- * our H/W completes transactions. Add more defer time here.
- */
- msleep(100);
- }
-
- /* Return requested size for success or retry */
- ret = msg->size;
-
-unlock_exit:
- mutex_unlock(&aux->msg_mutex);
- return ret;
-}
-
-void *msm_edp_aux_init(struct msm_edp *edp, void __iomem *regbase, struct drm_dp_aux **drm_aux)
-{
- struct device *dev = &edp->pdev->dev;
- struct edp_aux *aux = NULL;
- int ret;
-
- DBG("");
- aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL);
- if (!aux)
- return NULL;
-
- aux->base = regbase;
- mutex_init(&aux->msg_mutex);
- init_completion(&aux->msg_comp);
-
- aux->drm_aux.name = "msm_edp_aux";
- aux->drm_aux.dev = dev;
- aux->drm_aux.drm_dev = edp->dev;
- aux->drm_aux.transfer = edp_aux_transfer;
- ret = drm_dp_aux_register(&aux->drm_aux);
- if (ret) {
- pr_err("%s: failed to register drm aux: %d\n", __func__, ret);
- mutex_destroy(&aux->msg_mutex);
- }
-
- if (drm_aux && aux)
- *drm_aux = &aux->drm_aux;
-
- return aux;
-}
-
-void msm_edp_aux_destroy(struct device *dev, struct edp_aux *aux)
-{
- if (aux) {
- drm_dp_aux_unregister(&aux->drm_aux);
- mutex_destroy(&aux->msg_mutex);
- }
-}
-
-irqreturn_t msm_edp_aux_irq(struct edp_aux *aux, u32 isr)
-{
- if (isr & EDP_INTR_TRANS_STATUS) {
- DBG("isr=%x", isr);
- edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0);
-
- if (isr & EDP_INTR_AUX_I2C_ERR)
- aux->msg_err = true;
- else
- aux->msg_err = false;
-
- complete(&aux->msg_comp);
- }
-
- return IRQ_HANDLED;
-}
-
-void msm_edp_aux_ctrl(struct edp_aux *aux, int enable)
-{
- u32 data;
-
- DBG("enable=%d", enable);
- data = edp_read(aux->base + REG_EDP_AUX_CTRL);
-
- if (enable) {
- data |= EDP_AUX_CTRL_RESET;
- edp_write(aux->base + REG_EDP_AUX_CTRL, data);
- /* Make sure full reset */
- wmb();
- usleep_range(500, 1000);
-
- data &= ~EDP_AUX_CTRL_RESET;
- data |= EDP_AUX_CTRL_ENABLE;
- edp_write(aux->base + REG_EDP_AUX_CTRL, data);
- } else {
- data &= ~EDP_AUX_CTRL_ENABLE;
- edp_write(aux->base + REG_EDP_AUX_CTRL, data);
- }
-}
-
diff --git a/drivers/gpu/drm/msm/edp/edp_bridge.c b/drivers/gpu/drm/msm/edp/edp_bridge.c
deleted file mode 100644
index c69a37e0c708..000000000000
--- a/drivers/gpu/drm/msm/edp/edp_bridge.c
+++ /dev/null
@@ -1,111 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#include "edp.h"
-
-struct edp_bridge {
- struct drm_bridge base;
- struct msm_edp *edp;
-};
-#define to_edp_bridge(x) container_of(x, struct edp_bridge, base)
-
-void edp_bridge_destroy(struct drm_bridge *bridge)
-{
-}
-
-static void edp_bridge_pre_enable(struct drm_bridge *bridge)
-{
- struct edp_bridge *edp_bridge = to_edp_bridge(bridge);
- struct msm_edp *edp = edp_bridge->edp;
-
- DBG("");
- msm_edp_ctrl_power(edp->ctrl, true);
-}
-
-static void edp_bridge_enable(struct drm_bridge *bridge)
-{
- DBG("");
-}
-
-static void edp_bridge_disable(struct drm_bridge *bridge)
-{
- DBG("");
-}
-
-static void edp_bridge_post_disable(struct drm_bridge *bridge)
-{
- struct edp_bridge *edp_bridge = to_edp_bridge(bridge);
- struct msm_edp *edp = edp_bridge->edp;
-
- DBG("");
- msm_edp_ctrl_power(edp->ctrl, false);
-}
-
-static void edp_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode)
-{
- struct drm_device *dev = bridge->dev;
- struct drm_connector *connector;
- struct edp_bridge *edp_bridge = to_edp_bridge(bridge);
- struct msm_edp *edp = edp_bridge->edp;
-
- DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_encoder *encoder = connector->encoder;
- struct drm_bridge *first_bridge;
-
- if (!connector->encoder)
- continue;
-
- first_bridge = drm_bridge_chain_get_first_bridge(encoder);
- if (bridge == first_bridge) {
- msm_edp_ctrl_timing_cfg(edp->ctrl,
- adjusted_mode, &connector->display_info);
- break;
- }
- }
-}
-
-static const struct drm_bridge_funcs edp_bridge_funcs = {
- .pre_enable = edp_bridge_pre_enable,
- .enable = edp_bridge_enable,
- .disable = edp_bridge_disable,
- .post_disable = edp_bridge_post_disable,
- .mode_set = edp_bridge_mode_set,
-};
-
-/* initialize bridge */
-struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp)
-{
- struct drm_bridge *bridge = NULL;
- struct edp_bridge *edp_bridge;
- int ret;
-
- edp_bridge = devm_kzalloc(edp->dev->dev,
- sizeof(*edp_bridge), GFP_KERNEL);
- if (!edp_bridge) {
- ret = -ENOMEM;
- goto fail;
- }
-
- edp_bridge->edp = edp;
-
- bridge = &edp_bridge->base;
- bridge->funcs = &edp_bridge_funcs;
-
- ret = drm_bridge_attach(edp->encoder, bridge, NULL, 0);
- if (ret)
- goto fail;
-
- return bridge;
-
-fail:
- if (bridge)
- edp_bridge_destroy(bridge);
-
- return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c
deleted file mode 100644
index 73cb5fd97a5a..000000000000
--- a/drivers/gpu/drm/msm/edp/edp_connector.c
+++ /dev/null
@@ -1,132 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#include "drm/drm_edid.h"
-#include "msm_kms.h"
-#include "edp.h"
-
-struct edp_connector {
- struct drm_connector base;
- struct msm_edp *edp;
-};
-#define to_edp_connector(x) container_of(x, struct edp_connector, base)
-
-static enum drm_connector_status edp_connector_detect(
- struct drm_connector *connector, bool force)
-{
- struct edp_connector *edp_connector = to_edp_connector(connector);
- struct msm_edp *edp = edp_connector->edp;
-
- DBG("");
- return msm_edp_ctrl_panel_connected(edp->ctrl) ?
- connector_status_connected : connector_status_disconnected;
-}
-
-static void edp_connector_destroy(struct drm_connector *connector)
-{
- struct edp_connector *edp_connector = to_edp_connector(connector);
-
- DBG("");
-
- drm_connector_cleanup(connector);
-
- kfree(edp_connector);
-}
-
-static int edp_connector_get_modes(struct drm_connector *connector)
-{
- struct edp_connector *edp_connector = to_edp_connector(connector);
- struct msm_edp *edp = edp_connector->edp;
-
- struct edid *drm_edid = NULL;
- int ret = 0;
-
- DBG("");
- ret = msm_edp_ctrl_get_panel_info(edp->ctrl, connector, &drm_edid);
- if (ret)
- return ret;
-
- drm_connector_update_edid_property(connector, drm_edid);
- if (drm_edid)
- ret = drm_add_edid_modes(connector, drm_edid);
-
- return ret;
-}
-
-static int edp_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct edp_connector *edp_connector = to_edp_connector(connector);
- struct msm_edp *edp = edp_connector->edp;
- struct msm_drm_private *priv = connector->dev->dev_private;
- struct msm_kms *kms = priv->kms;
- long actual, requested;
-
- requested = 1000 * mode->clock;
- actual = kms->funcs->round_pixclk(kms,
- requested, edp_connector->edp->encoder);
-
- DBG("requested=%ld, actual=%ld", requested, actual);
- if (actual != requested)
- return MODE_CLOCK_RANGE;
-
- if (!msm_edp_ctrl_pixel_clock_valid(
- edp->ctrl, mode->clock, NULL, NULL))
- return MODE_CLOCK_RANGE;
-
- /* Invalidate all modes if color format is not supported */
- if (connector->display_info.bpc > 8)
- return MODE_BAD;
-
- return MODE_OK;
-}
-
-static const struct drm_connector_funcs edp_connector_funcs = {
- .detect = edp_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = edp_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs edp_connector_helper_funcs = {
- .get_modes = edp_connector_get_modes,
- .mode_valid = edp_connector_mode_valid,
-};
-
-/* initialize connector */
-struct drm_connector *msm_edp_connector_init(struct msm_edp *edp)
-{
- struct drm_connector *connector = NULL;
- struct edp_connector *edp_connector;
- int ret;
-
- edp_connector = kzalloc(sizeof(*edp_connector), GFP_KERNEL);
- if (!edp_connector)
- return ERR_PTR(-ENOMEM);
-
- edp_connector->edp = edp;
-
- connector = &edp_connector->base;
-
- ret = drm_connector_init(edp->dev, connector, &edp_connector_funcs,
- DRM_MODE_CONNECTOR_eDP);
- if (ret)
- return ERR_PTR(ret);
-
- drm_connector_helper_add(connector, &edp_connector_helper_funcs);
-
- /* We don't support HPD, so only poll status until connected. */
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
-
- /* Display driver doesn't support interlace now. */
- connector->interlace_allowed = false;
- connector->doublescan_allowed = false;
-
- drm_connector_attach_encoder(connector, edp->encoder);
-
- return connector;
-}
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
deleted file mode 100644
index a68a4a1867c1..000000000000
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ /dev/null
@@ -1,1373 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/clk.h>
-#include <linux/gpio/consumer.h>
-#include <linux/regulator/consumer.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_dp_helper.h>
-#include <drm/drm_edid.h>
-
-#include "edp.h"
-#include "edp.xml.h"
-
-#define VDDA_UA_ON_LOAD 100000 /* uA units */
-#define VDDA_UA_OFF_LOAD 100 /* uA units */
-
-#define DPCD_LINK_VOLTAGE_MAX 4
-#define DPCD_LINK_PRE_EMPHASIS_MAX 4
-
-#define EDP_LINK_BW_MAX DP_LINK_BW_2_7
-
-/* Link training return value */
-#define EDP_TRAIN_FAIL -1
-#define EDP_TRAIN_SUCCESS 0
-#define EDP_TRAIN_RECONFIG 1
-
-#define EDP_CLK_MASK_AHB BIT(0)
-#define EDP_CLK_MASK_AUX BIT(1)
-#define EDP_CLK_MASK_LINK BIT(2)
-#define EDP_CLK_MASK_PIXEL BIT(3)
-#define EDP_CLK_MASK_MDP_CORE BIT(4)
-#define EDP_CLK_MASK_LINK_CHAN (EDP_CLK_MASK_LINK | EDP_CLK_MASK_PIXEL)
-#define EDP_CLK_MASK_AUX_CHAN \
- (EDP_CLK_MASK_AHB | EDP_CLK_MASK_AUX | EDP_CLK_MASK_MDP_CORE)
-#define EDP_CLK_MASK_ALL (EDP_CLK_MASK_AUX_CHAN | EDP_CLK_MASK_LINK_CHAN)
-
-#define EDP_BACKLIGHT_MAX 255
-
-#define EDP_INTR_STATUS1 \
- (EDP_INTERRUPT_REG_1_HPD | EDP_INTERRUPT_REG_1_AUX_I2C_DONE | \
- EDP_INTERRUPT_REG_1_WRONG_ADDR | EDP_INTERRUPT_REG_1_TIMEOUT | \
- EDP_INTERRUPT_REG_1_NACK_DEFER | EDP_INTERRUPT_REG_1_WRONG_DATA_CNT | \
- EDP_INTERRUPT_REG_1_I2C_NACK | EDP_INTERRUPT_REG_1_I2C_DEFER | \
- EDP_INTERRUPT_REG_1_PLL_UNLOCK | EDP_INTERRUPT_REG_1_AUX_ERROR)
-#define EDP_INTR_MASK1 (EDP_INTR_STATUS1 << 2)
-#define EDP_INTR_STATUS2 \
- (EDP_INTERRUPT_REG_2_READY_FOR_VIDEO | \
- EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT | \
- EDP_INTERRUPT_REG_2_FRAME_END | EDP_INTERRUPT_REG_2_CRC_UPDATED)
-#define EDP_INTR_MASK2 (EDP_INTR_STATUS2 << 2)
-
-struct edp_ctrl {
- struct platform_device *pdev;
-
- void __iomem *base;
-
- /* regulators */
- struct regulator *vdda_vreg; /* 1.8 V */
- struct regulator *lvl_vreg;
-
- /* clocks */
- struct clk *aux_clk;
- struct clk *pixel_clk;
- struct clk *ahb_clk;
- struct clk *link_clk;
- struct clk *mdp_core_clk;
-
- /* gpios */
- struct gpio_desc *panel_en_gpio;
- struct gpio_desc *panel_hpd_gpio;
-
- /* completion and mutex */
- struct completion idle_comp;
- struct mutex dev_mutex; /* To protect device power status */
-
- /* work queue */
- struct work_struct on_work;
- struct work_struct off_work;
- struct workqueue_struct *workqueue;
-
- /* Interrupt register lock */
- spinlock_t irq_lock;
-
- bool edp_connected;
- bool power_on;
-
- /* edid raw data */
- struct edid *edid;
-
- struct drm_dp_aux *drm_aux;
-
- /* dpcd raw data */
- u8 dpcd[DP_RECEIVER_CAP_SIZE];
-
- /* Link status */
- u8 link_rate;
- u8 lane_cnt;
- u8 v_level;
- u8 p_level;
-
- /* Timing status */
- u8 interlaced;
- u32 pixel_rate; /* in kHz */
- u32 color_depth;
-
- struct edp_aux *aux;
- struct edp_phy *phy;
-};
-
-struct edp_pixel_clk_div {
- u32 rate; /* in kHz */
- u32 m;
- u32 n;
-};
-
-#define EDP_PIXEL_CLK_NUM 8
-static const struct edp_pixel_clk_div clk_divs[2][EDP_PIXEL_CLK_NUM] = {
- { /* Link clock = 162MHz, source clock = 810MHz */
- {119000, 31, 211}, /* WSXGA+ 1680x1050@60Hz CVT */
- {130250, 32, 199}, /* UXGA 1600x1200@60Hz CVT */
- {148500, 11, 60}, /* FHD 1920x1080@60Hz */
- {154000, 50, 263}, /* WUXGA 1920x1200@60Hz CVT */
- {209250, 31, 120}, /* QXGA 2048x1536@60Hz CVT */
- {268500, 119, 359}, /* WQXGA 2560x1600@60Hz CVT */
- {138530, 33, 193}, /* AUO B116HAN03.0 Panel */
- {141400, 48, 275}, /* AUO B133HTN01.2 Panel */
- },
- { /* Link clock = 270MHz, source clock = 675MHz */
- {119000, 52, 295}, /* WSXGA+ 1680x1050@60Hz CVT */
- {130250, 11, 57}, /* UXGA 1600x1200@60Hz CVT */
- {148500, 11, 50}, /* FHD 1920x1080@60Hz */
- {154000, 47, 206}, /* WUXGA 1920x1200@60Hz CVT */
- {209250, 31, 100}, /* QXGA 2048x1536@60Hz CVT */
- {268500, 107, 269}, /* WQXGA 2560x1600@60Hz CVT */
- {138530, 63, 307}, /* AUO B116HAN03.0 Panel */
- {141400, 53, 253}, /* AUO B133HTN01.2 Panel */
- },
-};
-
-static int edp_clk_init(struct edp_ctrl *ctrl)
-{
- struct platform_device *pdev = ctrl->pdev;
- int ret;
-
- ctrl->aux_clk = msm_clk_get(pdev, "core");
- if (IS_ERR(ctrl->aux_clk)) {
- ret = PTR_ERR(ctrl->aux_clk);
- pr_err("%s: Can't find core clock, %d\n", __func__, ret);
- ctrl->aux_clk = NULL;
- return ret;
- }
-
- ctrl->pixel_clk = msm_clk_get(pdev, "pixel");
- if (IS_ERR(ctrl->pixel_clk)) {
- ret = PTR_ERR(ctrl->pixel_clk);
- pr_err("%s: Can't find pixel clock, %d\n", __func__, ret);
- ctrl->pixel_clk = NULL;
- return ret;
- }
-
- ctrl->ahb_clk = msm_clk_get(pdev, "iface");
- if (IS_ERR(ctrl->ahb_clk)) {
- ret = PTR_ERR(ctrl->ahb_clk);
- pr_err("%s: Can't find iface clock, %d\n", __func__, ret);
- ctrl->ahb_clk = NULL;
- return ret;
- }
-
- ctrl->link_clk = msm_clk_get(pdev, "link");
- if (IS_ERR(ctrl->link_clk)) {
- ret = PTR_ERR(ctrl->link_clk);
- pr_err("%s: Can't find link clock, %d\n", __func__, ret);
- ctrl->link_clk = NULL;
- return ret;
- }
-
- /* need mdp core clock to receive irq */
- ctrl->mdp_core_clk = msm_clk_get(pdev, "mdp_core");
- if (IS_ERR(ctrl->mdp_core_clk)) {
- ret = PTR_ERR(ctrl->mdp_core_clk);
- pr_err("%s: Can't find mdp_core clock, %d\n", __func__, ret);
- ctrl->mdp_core_clk = NULL;
- return ret;
- }
-
- return 0;
-}
-
-static int edp_clk_enable(struct edp_ctrl *ctrl, u32 clk_mask)
-{
- int ret;
-
- DBG("mask=%x", clk_mask);
- /* ahb_clk should be enabled first */
- if (clk_mask & EDP_CLK_MASK_AHB) {
- ret = clk_prepare_enable(ctrl->ahb_clk);
- if (ret) {
- pr_err("%s: Failed to enable ahb clk\n", __func__);
- goto f0;
- }
- }
- if (clk_mask & EDP_CLK_MASK_AUX) {
- ret = clk_set_rate(ctrl->aux_clk, 19200000);
- if (ret) {
- pr_err("%s: Failed to set rate aux clk\n", __func__);
- goto f1;
- }
- ret = clk_prepare_enable(ctrl->aux_clk);
- if (ret) {
- pr_err("%s: Failed to enable aux clk\n", __func__);
- goto f1;
- }
- }
- /* Need to set rate and enable link_clk prior to pixel_clk */
- if (clk_mask & EDP_CLK_MASK_LINK) {
- DBG("edp->link_clk, set_rate %ld",
- (unsigned long)ctrl->link_rate * 27000000);
- ret = clk_set_rate(ctrl->link_clk,
- (unsigned long)ctrl->link_rate * 27000000);
- if (ret) {
- pr_err("%s: Failed to set rate to link clk\n",
- __func__);
- goto f2;
- }
-
- ret = clk_prepare_enable(ctrl->link_clk);
- if (ret) {
- pr_err("%s: Failed to enable link clk\n", __func__);
- goto f2;
- }
- }
- if (clk_mask & EDP_CLK_MASK_PIXEL) {
- DBG("edp->pixel_clk, set_rate %ld",
- (unsigned long)ctrl->pixel_rate * 1000);
- ret = clk_set_rate(ctrl->pixel_clk,
- (unsigned long)ctrl->pixel_rate * 1000);
- if (ret) {
- pr_err("%s: Failed to set rate to pixel clk\n",
- __func__);
- goto f3;
- }
-
- ret = clk_prepare_enable(ctrl->pixel_clk);
- if (ret) {
- pr_err("%s: Failed to enable pixel clk\n", __func__);
- goto f3;
- }
- }
- if (clk_mask & EDP_CLK_MASK_MDP_CORE) {
- ret = clk_prepare_enable(ctrl->mdp_core_clk);
- if (ret) {
- pr_err("%s: Failed to enable mdp core clk\n", __func__);
- goto f4;
- }
- }
-
- return 0;
-
-f4:
- if (clk_mask & EDP_CLK_MASK_PIXEL)
- clk_disable_unprepare(ctrl->pixel_clk);
-f3:
- if (clk_mask & EDP_CLK_MASK_LINK)
- clk_disable_unprepare(ctrl->link_clk);
-f2:
- if (clk_mask & EDP_CLK_MASK_AUX)
- clk_disable_unprepare(ctrl->aux_clk);
-f1:
- if (clk_mask & EDP_CLK_MASK_AHB)
- clk_disable_unprepare(ctrl->ahb_clk);
-f0:
- return ret;
-}
-
-static void edp_clk_disable(struct edp_ctrl *ctrl, u32 clk_mask)
-{
- if (clk_mask & EDP_CLK_MASK_MDP_CORE)
- clk_disable_unprepare(ctrl->mdp_core_clk);
- if (clk_mask & EDP_CLK_MASK_PIXEL)
- clk_disable_unprepare(ctrl->pixel_clk);
- if (clk_mask & EDP_CLK_MASK_LINK)
- clk_disable_unprepare(ctrl->link_clk);
- if (clk_mask & EDP_CLK_MASK_AUX)
- clk_disable_unprepare(ctrl->aux_clk);
- if (clk_mask & EDP_CLK_MASK_AHB)
- clk_disable_unprepare(ctrl->ahb_clk);
-}
-
-static int edp_regulator_init(struct edp_ctrl *ctrl)
-{
- struct device *dev = &ctrl->pdev->dev;
- int ret;
-
- DBG("");
- ctrl->vdda_vreg = devm_regulator_get(dev, "vdda");
- ret = PTR_ERR_OR_ZERO(ctrl->vdda_vreg);
- if (ret) {
- pr_err("%s: Could not get vdda reg, ret = %d\n", __func__,
- ret);
- ctrl->vdda_vreg = NULL;
- return ret;
- }
- ctrl->lvl_vreg = devm_regulator_get(dev, "lvl-vdd");
- ret = PTR_ERR_OR_ZERO(ctrl->lvl_vreg);
- if (ret) {
- pr_err("%s: Could not get lvl-vdd reg, ret = %d\n", __func__,
- ret);
- ctrl->lvl_vreg = NULL;
- return ret;
- }
-
- return 0;
-}
-
-static int edp_regulator_enable(struct edp_ctrl *ctrl)
-{
- int ret;
-
- ret = regulator_set_load(ctrl->vdda_vreg, VDDA_UA_ON_LOAD);
- if (ret < 0) {
- pr_err("%s: vdda_vreg set regulator mode failed.\n", __func__);
- goto vdda_set_fail;
- }
-
- ret = regulator_enable(ctrl->vdda_vreg);
- if (ret) {
- pr_err("%s: Failed to enable vdda_vreg regulator.\n", __func__);
- goto vdda_enable_fail;
- }
-
- ret = regulator_enable(ctrl->lvl_vreg);
- if (ret) {
- pr_err("Failed to enable lvl-vdd reg regulator, %d", ret);
- goto lvl_enable_fail;
- }
-
- DBG("exit");
- return 0;
-
-lvl_enable_fail:
- regulator_disable(ctrl->vdda_vreg);
-vdda_enable_fail:
- regulator_set_load(ctrl->vdda_vreg, VDDA_UA_OFF_LOAD);
-vdda_set_fail:
- return ret;
-}
-
-static void edp_regulator_disable(struct edp_ctrl *ctrl)
-{
- regulator_disable(ctrl->lvl_vreg);
- regulator_disable(ctrl->vdda_vreg);
- regulator_set_load(ctrl->vdda_vreg, VDDA_UA_OFF_LOAD);
-}
-
-static int edp_gpio_config(struct edp_ctrl *ctrl)
-{
- struct device *dev = &ctrl->pdev->dev;
- int ret;
-
- ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd", GPIOD_IN);
- if (IS_ERR(ctrl->panel_hpd_gpio)) {
- ret = PTR_ERR(ctrl->panel_hpd_gpio);
- ctrl->panel_hpd_gpio = NULL;
- pr_err("%s: cannot get panel-hpd-gpios, %d\n", __func__, ret);
- return ret;
- }
-
- ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en", GPIOD_OUT_LOW);
- if (IS_ERR(ctrl->panel_en_gpio)) {
- ret = PTR_ERR(ctrl->panel_en_gpio);
- ctrl->panel_en_gpio = NULL;
- pr_err("%s: cannot get panel-en-gpios, %d\n", __func__, ret);
- return ret;
- }
-
- DBG("gpio on");
-
- return 0;
-}
-
-static void edp_ctrl_irq_enable(struct edp_ctrl *ctrl, int enable)
-{
- unsigned long flags;
-
- DBG("%d", enable);
- spin_lock_irqsave(&ctrl->irq_lock, flags);
- if (enable) {
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, EDP_INTR_MASK1);
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, EDP_INTR_MASK2);
- } else {
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, 0x0);
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, 0x0);
- }
- spin_unlock_irqrestore(&ctrl->irq_lock, flags);
- DBG("exit");
-}
-
-static void edp_fill_link_cfg(struct edp_ctrl *ctrl)
-{
- u32 prate;
- u32 lrate;
- u32 bpp;
- u8 max_lane = drm_dp_max_lane_count(ctrl->dpcd);
- u8 lane;
-
- prate = ctrl->pixel_rate;
- bpp = ctrl->color_depth * 3;
-
- /*
- * By default, use the maximum link rate and minimum lane count,
- * so that we can do rate down shift during link training.
- */
- ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE];
-
- prate *= bpp;
- prate /= 8; /* in kByte */
-
- lrate = 270000; /* in kHz */
- lrate *= ctrl->link_rate;
- lrate /= 10; /* in kByte, 10 bits --> 8 bits */
-
- for (lane = 1; lane <= max_lane; lane <<= 1) {
- if (lrate >= prate)
- break;
- lrate <<= 1;
- }
-
- ctrl->lane_cnt = lane;
- DBG("rate=%d lane=%d", ctrl->link_rate, ctrl->lane_cnt);
-}
-
-static void edp_config_ctrl(struct edp_ctrl *ctrl)
-{
- u32 data;
- enum edp_color_depth depth;
-
- data = EDP_CONFIGURATION_CTRL_LANES(ctrl->lane_cnt - 1);
-
- if (drm_dp_enhanced_frame_cap(ctrl->dpcd))
- data |= EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING;
-
- depth = EDP_6BIT;
- if (ctrl->color_depth == 8)
- depth = EDP_8BIT;
-
- data |= EDP_CONFIGURATION_CTRL_COLOR(depth);
-
- if (!ctrl->interlaced) /* progressive */
- data |= EDP_CONFIGURATION_CTRL_PROGRESSIVE;
-
- data |= (EDP_CONFIGURATION_CTRL_SYNC_CLK |
- EDP_CONFIGURATION_CTRL_STATIC_MVID);
-
- edp_write(ctrl->base + REG_EDP_CONFIGURATION_CTRL, data);
-}
-
-static void edp_state_ctrl(struct edp_ctrl *ctrl, u32 state)
-{
- edp_write(ctrl->base + REG_EDP_STATE_CTRL, state);
- /* Make sure H/W status is set */
- wmb();
-}
-
-static int edp_lane_set_write(struct edp_ctrl *ctrl,
- u8 voltage_level, u8 pre_emphasis_level)
-{
- int i;
- u8 buf[4];
-
- if (voltage_level >= DPCD_LINK_VOLTAGE_MAX)
- voltage_level |= 0x04;
-
- if (pre_emphasis_level >= DPCD_LINK_PRE_EMPHASIS_MAX)
- pre_emphasis_level |= 0x04;
-
- pre_emphasis_level <<= 3;
-
- for (i = 0; i < 4; i++)
- buf[i] = voltage_level | pre_emphasis_level;
-
- DBG("%s: p|v=0x%x", __func__, voltage_level | pre_emphasis_level);
- if (drm_dp_dpcd_write(ctrl->drm_aux, 0x103, buf, 4) < 4) {
- pr_err("%s: Set sw/pe to panel failed\n", __func__);
- return -ENOLINK;
- }
-
- return 0;
-}
-
-static int edp_train_pattern_set_write(struct edp_ctrl *ctrl, u8 pattern)
-{
- u8 p = pattern;
-
- DBG("pattern=%x", p);
- if (drm_dp_dpcd_write(ctrl->drm_aux,
- DP_TRAINING_PATTERN_SET, &p, 1) < 1) {
- pr_err("%s: Set training pattern to panel failed\n", __func__);
- return -ENOLINK;
- }
-
- return 0;
-}
-
-static void edp_sink_train_set_adjust(struct edp_ctrl *ctrl,
- const u8 *link_status)
-{
- int i;
- u8 max = 0;
- u8 data;
-
- /* use the max level across lanes */
- for (i = 0; i < ctrl->lane_cnt; i++) {
- data = drm_dp_get_adjust_request_voltage(link_status, i);
- DBG("lane=%d req_voltage_swing=0x%x", i, data);
- if (max < data)
- max = data;
- }
-
- ctrl->v_level = max >> DP_TRAIN_VOLTAGE_SWING_SHIFT;
-
- /* use the max level across lanes */
- max = 0;
- for (i = 0; i < ctrl->lane_cnt; i++) {
- data = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
- DBG("lane=%d req_pre_emphasis=0x%x", i, data);
- if (max < data)
- max = data;
- }
-
- ctrl->p_level = max >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
- DBG("v_level=%d, p_level=%d", ctrl->v_level, ctrl->p_level);
-}
-
-static void edp_host_train_set(struct edp_ctrl *ctrl, u32 train)
-{
- int cnt = 10;
- u32 data;
- u32 shift = train - 1;
-
- DBG("train=%d", train);
-
- edp_state_ctrl(ctrl, EDP_STATE_CTRL_TRAIN_PATTERN_1 << shift);
- while (--cnt) {
- data = edp_read(ctrl->base + REG_EDP_MAINLINK_READY);
- if (data & (EDP_MAINLINK_READY_TRAIN_PATTERN_1_READY << shift))
- break;
- }
-
- if (cnt == 0)
- pr_err("%s: set link_train=%d failed\n", __func__, train);
-}
-
-static const u8 vm_pre_emphasis[4][4] = {
- {0x03, 0x06, 0x09, 0x0C}, /* pe0, 0 db */
- {0x03, 0x06, 0x09, 0xFF}, /* pe1, 3.5 db */
- {0x03, 0x06, 0xFF, 0xFF}, /* pe2, 6.0 db */
- {0x03, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */
-};
-
-/* voltage swing, 0.2v and 1.0v are not support */
-static const u8 vm_voltage_swing[4][4] = {
- {0x14, 0x18, 0x1A, 0x1E}, /* sw0, 0.4v */
- {0x18, 0x1A, 0x1E, 0xFF}, /* sw1, 0.6 v */
- {0x1A, 0x1E, 0xFF, 0xFF}, /* sw1, 0.8 v */
- {0x1E, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */
-};
-
-static int edp_voltage_pre_emphasise_set(struct edp_ctrl *ctrl)
-{
- u32 value0;
- u32 value1;
-
- DBG("v=%d p=%d", ctrl->v_level, ctrl->p_level);
-
- value0 = vm_pre_emphasis[(int)(ctrl->v_level)][(int)(ctrl->p_level)];
- value1 = vm_voltage_swing[(int)(ctrl->v_level)][(int)(ctrl->p_level)];
-
- /* Configure host and panel only if both values are allowed */
- if (value0 != 0xFF && value1 != 0xFF) {
- msm_edp_phy_vm_pe_cfg(ctrl->phy, value0, value1);
- return edp_lane_set_write(ctrl, ctrl->v_level, ctrl->p_level);
- }
-
- return -EINVAL;
-}
-
-static int edp_start_link_train_1(struct edp_ctrl *ctrl)
-{
- u8 link_status[DP_LINK_STATUS_SIZE];
- u8 old_v_level;
- int tries;
- int ret;
- int rlen;
-
- DBG("");
-
- edp_host_train_set(ctrl, DP_TRAINING_PATTERN_1);
- ret = edp_voltage_pre_emphasise_set(ctrl);
- if (ret)
- return ret;
- ret = edp_train_pattern_set_write(ctrl,
- DP_TRAINING_PATTERN_1 | DP_RECOVERED_CLOCK_OUT_EN);
- if (ret)
- return ret;
-
- tries = 0;
- old_v_level = ctrl->v_level;
- while (1) {
- drm_dp_link_train_clock_recovery_delay(ctrl->drm_aux, ctrl->dpcd);
-
- rlen = drm_dp_dpcd_read_link_status(ctrl->drm_aux, link_status);
- if (rlen < DP_LINK_STATUS_SIZE) {
- pr_err("%s: read link status failed\n", __func__);
- return -ENOLINK;
- }
- if (drm_dp_clock_recovery_ok(link_status, ctrl->lane_cnt)) {
- ret = 0;
- break;
- }
-
- if (ctrl->v_level == DPCD_LINK_VOLTAGE_MAX) {
- ret = -1;
- break;
- }
-
- if (old_v_level == ctrl->v_level) {
- tries++;
- if (tries >= 5) {
- ret = -1;
- break;
- }
- } else {
- tries = 0;
- old_v_level = ctrl->v_level;
- }
-
- edp_sink_train_set_adjust(ctrl, link_status);
- ret = edp_voltage_pre_emphasise_set(ctrl);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-static int edp_start_link_train_2(struct edp_ctrl *ctrl)
-{
- u8 link_status[DP_LINK_STATUS_SIZE];
- int tries = 0;
- int ret;
- int rlen;
-
- DBG("");
-
- edp_host_train_set(ctrl, DP_TRAINING_PATTERN_2);
- ret = edp_voltage_pre_emphasise_set(ctrl);
- if (ret)
- return ret;
-
- ret = edp_train_pattern_set_write(ctrl,
- DP_TRAINING_PATTERN_2 | DP_RECOVERED_CLOCK_OUT_EN);
- if (ret)
- return ret;
-
- while (1) {
- drm_dp_link_train_channel_eq_delay(ctrl->drm_aux, ctrl->dpcd);
-
- rlen = drm_dp_dpcd_read_link_status(ctrl->drm_aux, link_status);
- if (rlen < DP_LINK_STATUS_SIZE) {
- pr_err("%s: read link status failed\n", __func__);
- return -ENOLINK;
- }
- if (drm_dp_channel_eq_ok(link_status, ctrl->lane_cnt)) {
- ret = 0;
- break;
- }
-
- tries++;
- if (tries > 10) {
- ret = -1;
- break;
- }
-
- edp_sink_train_set_adjust(ctrl, link_status);
- ret = edp_voltage_pre_emphasise_set(ctrl);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-static int edp_link_rate_down_shift(struct edp_ctrl *ctrl)
-{
- u32 prate, lrate, bpp;
- u8 rate, lane, max_lane;
- int changed = 0;
-
- rate = ctrl->link_rate;
- lane = ctrl->lane_cnt;
- max_lane = drm_dp_max_lane_count(ctrl->dpcd);
-
- bpp = ctrl->color_depth * 3;
- prate = ctrl->pixel_rate;
- prate *= bpp;
- prate /= 8; /* in kByte */
-
- if (rate > DP_LINK_BW_1_62 && rate <= EDP_LINK_BW_MAX) {
- rate -= 4; /* reduce rate */
- changed++;
- }
-
- if (changed) {
- if (lane >= 1 && lane < max_lane)
- lane <<= 1; /* increase lane */
-
- lrate = 270000; /* in kHz */
- lrate *= rate;
- lrate /= 10; /* kByte, 10 bits --> 8 bits */
- lrate *= lane;
-
- DBG("new lrate=%u prate=%u(kHz) rate=%d lane=%d p=%u b=%d",
- lrate, prate, rate, lane,
- ctrl->pixel_rate,
- bpp);
-
- if (lrate > prate) {
- ctrl->link_rate = rate;
- ctrl->lane_cnt = lane;
- DBG("new rate=%d %d", rate, lane);
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
-static int edp_clear_training_pattern(struct edp_ctrl *ctrl)
-{
- int ret;
-
- ret = edp_train_pattern_set_write(ctrl, 0);
-
- drm_dp_link_train_channel_eq_delay(ctrl->drm_aux, ctrl->dpcd);
-
- return ret;
-}
-
-static int edp_do_link_train(struct edp_ctrl *ctrl)
-{
- u8 values[2];
- int ret;
-
- DBG("");
- /*
- * Set the current link rate and lane cnt to panel. They may have been
- * adjusted and the values are different from them in DPCD CAP
- */
- values[0] = ctrl->lane_cnt;
- values[1] = ctrl->link_rate;
-
- if (drm_dp_enhanced_frame_cap(ctrl->dpcd))
- values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
-
- if (drm_dp_dpcd_write(ctrl->drm_aux, DP_LINK_BW_SET, values,
- sizeof(values)) < 0)
- return EDP_TRAIN_FAIL;
-
- ctrl->v_level = 0; /* start from default level */
- ctrl->p_level = 0;
-
- edp_state_ctrl(ctrl, 0);
- if (edp_clear_training_pattern(ctrl))
- return EDP_TRAIN_FAIL;
-
- ret = edp_start_link_train_1(ctrl);
- if (ret < 0) {
- if (edp_link_rate_down_shift(ctrl) == 0) {
- DBG("link reconfig");
- ret = EDP_TRAIN_RECONFIG;
- goto clear;
- } else {
- pr_err("%s: Training 1 failed", __func__);
- ret = EDP_TRAIN_FAIL;
- goto clear;
- }
- }
- DBG("Training 1 completed successfully");
-
- edp_state_ctrl(ctrl, 0);
- if (edp_clear_training_pattern(ctrl))
- return EDP_TRAIN_FAIL;
-
- ret = edp_start_link_train_2(ctrl);
- if (ret < 0) {
- if (edp_link_rate_down_shift(ctrl) == 0) {
- DBG("link reconfig");
- ret = EDP_TRAIN_RECONFIG;
- goto clear;
- } else {
- pr_err("%s: Training 2 failed", __func__);
- ret = EDP_TRAIN_FAIL;
- goto clear;
- }
- }
- DBG("Training 2 completed successfully");
-
- edp_state_ctrl(ctrl, EDP_STATE_CTRL_SEND_VIDEO);
-clear:
- edp_clear_training_pattern(ctrl);
-
- return ret;
-}
-
-static void edp_clock_synchrous(struct edp_ctrl *ctrl, int sync)
-{
- u32 data;
- enum edp_color_depth depth;
-
- data = edp_read(ctrl->base + REG_EDP_MISC1_MISC0);
-
- if (sync)
- data |= EDP_MISC1_MISC0_SYNC;
- else
- data &= ~EDP_MISC1_MISC0_SYNC;
-
- /* only legacy rgb mode supported */
- depth = EDP_6BIT; /* Default */
- if (ctrl->color_depth == 8)
- depth = EDP_8BIT;
- else if (ctrl->color_depth == 10)
- depth = EDP_10BIT;
- else if (ctrl->color_depth == 12)
- depth = EDP_12BIT;
- else if (ctrl->color_depth == 16)
- depth = EDP_16BIT;
-
- data |= EDP_MISC1_MISC0_COLOR(depth);
-
- edp_write(ctrl->base + REG_EDP_MISC1_MISC0, data);
-}
-
-static int edp_sw_mvid_nvid(struct edp_ctrl *ctrl, u32 m, u32 n)
-{
- u32 n_multi, m_multi = 5;
-
- if (ctrl->link_rate == DP_LINK_BW_1_62) {
- n_multi = 1;
- } else if (ctrl->link_rate == DP_LINK_BW_2_7) {
- n_multi = 2;
- } else {
- pr_err("%s: Invalid link rate, %d\n", __func__,
- ctrl->link_rate);
- return -EINVAL;
- }
-
- edp_write(ctrl->base + REG_EDP_SOFTWARE_MVID, m * m_multi);
- edp_write(ctrl->base + REG_EDP_SOFTWARE_NVID, n * n_multi);
-
- return 0;
-}
-
-static void edp_mainlink_ctrl(struct edp_ctrl *ctrl, int enable)
-{
- u32 data = 0;
-
- edp_write(ctrl->base + REG_EDP_MAINLINK_CTRL, EDP_MAINLINK_CTRL_RESET);
- /* Make sure fully reset */
- wmb();
- usleep_range(500, 1000);
-
- if (enable)
- data |= EDP_MAINLINK_CTRL_ENABLE;
-
- edp_write(ctrl->base + REG_EDP_MAINLINK_CTRL, data);
-}
-
-static void edp_ctrl_phy_aux_enable(struct edp_ctrl *ctrl, int enable)
-{
- if (enable) {
- edp_regulator_enable(ctrl);
- edp_clk_enable(ctrl, EDP_CLK_MASK_AUX_CHAN);
- msm_edp_phy_ctrl(ctrl->phy, 1);
- msm_edp_aux_ctrl(ctrl->aux, 1);
- gpiod_set_value(ctrl->panel_en_gpio, 1);
- } else {
- gpiod_set_value(ctrl->panel_en_gpio, 0);
- msm_edp_aux_ctrl(ctrl->aux, 0);
- msm_edp_phy_ctrl(ctrl->phy, 0);
- edp_clk_disable(ctrl, EDP_CLK_MASK_AUX_CHAN);
- edp_regulator_disable(ctrl);
- }
-}
-
-static void edp_ctrl_link_enable(struct edp_ctrl *ctrl, int enable)
-{
- u32 m, n;
-
- if (enable) {
- /* Enable link channel clocks */
- edp_clk_enable(ctrl, EDP_CLK_MASK_LINK_CHAN);
-
- msm_edp_phy_lane_power_ctrl(ctrl->phy, true, ctrl->lane_cnt);
-
- msm_edp_phy_vm_pe_init(ctrl->phy);
-
- /* Make sure phy is programed */
- wmb();
- msm_edp_phy_ready(ctrl->phy);
-
- edp_config_ctrl(ctrl);
- msm_edp_ctrl_pixel_clock_valid(ctrl, ctrl->pixel_rate, &m, &n);
- edp_sw_mvid_nvid(ctrl, m, n);
- edp_mainlink_ctrl(ctrl, 1);
- } else {
- edp_mainlink_ctrl(ctrl, 0);
-
- msm_edp_phy_lane_power_ctrl(ctrl->phy, false, 0);
- edp_clk_disable(ctrl, EDP_CLK_MASK_LINK_CHAN);
- }
-}
-
-static int edp_ctrl_training(struct edp_ctrl *ctrl)
-{
- int ret;
-
- /* Do link training only when power is on */
- if (!ctrl->power_on)
- return -EINVAL;
-
-train_start:
- ret = edp_do_link_train(ctrl);
- if (ret == EDP_TRAIN_RECONFIG) {
- /* Re-configure main link */
- edp_ctrl_irq_enable(ctrl, 0);
- edp_ctrl_link_enable(ctrl, 0);
- msm_edp_phy_ctrl(ctrl->phy, 0);
-
- /* Make sure link is fully disabled */
- wmb();
- usleep_range(500, 1000);
-
- msm_edp_phy_ctrl(ctrl->phy, 1);
- edp_ctrl_link_enable(ctrl, 1);
- edp_ctrl_irq_enable(ctrl, 1);
- goto train_start;
- }
-
- return ret;
-}
-
-static void edp_ctrl_on_worker(struct work_struct *work)
-{
- struct edp_ctrl *ctrl = container_of(
- work, struct edp_ctrl, on_work);
- u8 value;
- int ret;
-
- mutex_lock(&ctrl->dev_mutex);
-
- if (ctrl->power_on) {
- DBG("already on");
- goto unlock_ret;
- }
-
- edp_ctrl_phy_aux_enable(ctrl, 1);
- edp_ctrl_link_enable(ctrl, 1);
-
- edp_ctrl_irq_enable(ctrl, 1);
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) {
- ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value);
- if (ret < 0)
- goto fail;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D0;
-
- ret = drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value);
- if (ret < 0)
- goto fail;
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must
- * exit the power saving state within 1 ms" (Section 2.5.3.1,
- * Table 5-52, "Sink Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
- }
-
- ctrl->power_on = true;
-
- /* Start link training */
- ret = edp_ctrl_training(ctrl);
- if (ret != EDP_TRAIN_SUCCESS)
- goto fail;
-
- DBG("DONE");
- goto unlock_ret;
-
-fail:
- edp_ctrl_irq_enable(ctrl, 0);
- edp_ctrl_link_enable(ctrl, 0);
- edp_ctrl_phy_aux_enable(ctrl, 0);
- ctrl->power_on = false;
-unlock_ret:
- mutex_unlock(&ctrl->dev_mutex);
-}
-
-static void edp_ctrl_off_worker(struct work_struct *work)
-{
- struct edp_ctrl *ctrl = container_of(
- work, struct edp_ctrl, off_work);
- unsigned long time_left;
-
- mutex_lock(&ctrl->dev_mutex);
-
- if (!ctrl->power_on) {
- DBG("already off");
- goto unlock_ret;
- }
-
- reinit_completion(&ctrl->idle_comp);
- edp_state_ctrl(ctrl, EDP_STATE_CTRL_PUSH_IDLE);
-
- time_left = wait_for_completion_timeout(&ctrl->idle_comp,
- msecs_to_jiffies(500));
- if (!time_left)
- DBG("%s: idle pattern timedout\n", __func__);
-
- edp_state_ctrl(ctrl, 0);
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) {
- u8 value;
- int ret;
-
- ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value);
- if (ret > 0) {
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D3;
-
- drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value);
- }
- }
-
- edp_ctrl_irq_enable(ctrl, 0);
-
- edp_ctrl_link_enable(ctrl, 0);
-
- edp_ctrl_phy_aux_enable(ctrl, 0);
-
- ctrl->power_on = false;
-
-unlock_ret:
- mutex_unlock(&ctrl->dev_mutex);
-}
-
-irqreturn_t msm_edp_ctrl_irq(struct edp_ctrl *ctrl)
-{
- u32 isr1, isr2, mask1, mask2;
- u32 ack;
-
- DBG("");
- spin_lock(&ctrl->irq_lock);
- isr1 = edp_read(ctrl->base + REG_EDP_INTERRUPT_REG_1);
- isr2 = edp_read(ctrl->base + REG_EDP_INTERRUPT_REG_2);
-
- mask1 = isr1 & EDP_INTR_MASK1;
- mask2 = isr2 & EDP_INTR_MASK2;
-
- isr1 &= ~mask1; /* remove masks bit */
- isr2 &= ~mask2;
-
- DBG("isr=%x mask=%x isr2=%x mask2=%x",
- isr1, mask1, isr2, mask2);
-
- ack = isr1 & EDP_INTR_STATUS1;
- ack <<= 1; /* ack bits */
- ack |= mask1;
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, ack);
-
- ack = isr2 & EDP_INTR_STATUS2;
- ack <<= 1; /* ack bits */
- ack |= mask2;
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, ack);
- spin_unlock(&ctrl->irq_lock);
-
- if (isr1 & EDP_INTERRUPT_REG_1_HPD)
- DBG("edp_hpd");
-
- if (isr2 & EDP_INTERRUPT_REG_2_READY_FOR_VIDEO)
- DBG("edp_video_ready");
-
- if (isr2 & EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT) {
- DBG("idle_patterns_sent");
- complete(&ctrl->idle_comp);
- }
-
- msm_edp_aux_irq(ctrl->aux, isr1);
-
- return IRQ_HANDLED;
-}
-
-void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on)
-{
- if (on)
- queue_work(ctrl->workqueue, &ctrl->on_work);
- else
- queue_work(ctrl->workqueue, &ctrl->off_work);
-}
-
-int msm_edp_ctrl_init(struct msm_edp *edp)
-{
- struct edp_ctrl *ctrl = NULL;
- struct device *dev;
- int ret;
-
- if (!edp) {
- pr_err("%s: edp is NULL!\n", __func__);
- return -EINVAL;
- }
-
- dev = &edp->pdev->dev;
- ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
- if (!ctrl)
- return -ENOMEM;
-
- edp->ctrl = ctrl;
- ctrl->pdev = edp->pdev;
-
- ctrl->base = msm_ioremap(ctrl->pdev, "edp", "eDP");
- if (IS_ERR(ctrl->base))
- return PTR_ERR(ctrl->base);
-
- /* Get regulator, clock, gpio, pwm */
- ret = edp_regulator_init(ctrl);
- if (ret) {
- pr_err("%s:regulator init fail\n", __func__);
- return ret;
- }
- ret = edp_clk_init(ctrl);
- if (ret) {
- pr_err("%s:clk init fail\n", __func__);
- return ret;
- }
- ret = edp_gpio_config(ctrl);
- if (ret) {
- pr_err("%s:failed to configure GPIOs: %d", __func__, ret);
- return ret;
- }
-
- /* Init aux and phy */
- ctrl->aux = msm_edp_aux_init(edp, ctrl->base, &ctrl->drm_aux);
- if (!ctrl->aux || !ctrl->drm_aux) {
- pr_err("%s:failed to init aux\n", __func__);
- return -ENOMEM;
- }
-
- ctrl->phy = msm_edp_phy_init(dev, ctrl->base);
- if (!ctrl->phy) {
- pr_err("%s:failed to init phy\n", __func__);
- ret = -ENOMEM;
- goto err_destory_aux;
- }
-
- spin_lock_init(&ctrl->irq_lock);
- mutex_init(&ctrl->dev_mutex);
- init_completion(&ctrl->idle_comp);
-
- /* setup workqueue */
- ctrl->workqueue = alloc_ordered_workqueue("edp_drm_work", 0);
- INIT_WORK(&ctrl->on_work, edp_ctrl_on_worker);
- INIT_WORK(&ctrl->off_work, edp_ctrl_off_worker);
-
- return 0;
-
-err_destory_aux:
- msm_edp_aux_destroy(dev, ctrl->aux);
- ctrl->aux = NULL;
- return ret;
-}
-
-void msm_edp_ctrl_destroy(struct edp_ctrl *ctrl)
-{
- if (!ctrl)
- return;
-
- if (ctrl->workqueue) {
- destroy_workqueue(ctrl->workqueue);
- ctrl->workqueue = NULL;
- }
-
- if (ctrl->aux) {
- msm_edp_aux_destroy(&ctrl->pdev->dev, ctrl->aux);
- ctrl->aux = NULL;
- }
-
- kfree(ctrl->edid);
- ctrl->edid = NULL;
-
- mutex_destroy(&ctrl->dev_mutex);
-}
-
-bool msm_edp_ctrl_panel_connected(struct edp_ctrl *ctrl)
-{
- mutex_lock(&ctrl->dev_mutex);
- DBG("connect status = %d", ctrl->edp_connected);
- if (ctrl->edp_connected) {
- mutex_unlock(&ctrl->dev_mutex);
- return true;
- }
-
- if (!ctrl->power_on) {
- edp_ctrl_phy_aux_enable(ctrl, 1);
- edp_ctrl_irq_enable(ctrl, 1);
- }
-
- if (drm_dp_dpcd_read(ctrl->drm_aux, DP_DPCD_REV, ctrl->dpcd,
- DP_RECEIVER_CAP_SIZE) < DP_RECEIVER_CAP_SIZE) {
- pr_err("%s: AUX channel is NOT ready\n", __func__);
- memset(ctrl->dpcd, 0, DP_RECEIVER_CAP_SIZE);
- } else {
- ctrl->edp_connected = true;
- }
-
- if (!ctrl->power_on) {
- edp_ctrl_irq_enable(ctrl, 0);
- edp_ctrl_phy_aux_enable(ctrl, 0);
- }
-
- DBG("exit: connect status=%d", ctrl->edp_connected);
-
- mutex_unlock(&ctrl->dev_mutex);
-
- return ctrl->edp_connected;
-}
-
-int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl,
- struct drm_connector *connector, struct edid **edid)
-{
- mutex_lock(&ctrl->dev_mutex);
-
- if (ctrl->edid) {
- if (edid) {
- DBG("Just return edid buffer");
- *edid = ctrl->edid;
- }
- goto unlock_ret;
- }
-
- if (!ctrl->power_on) {
- edp_ctrl_phy_aux_enable(ctrl, 1);
- edp_ctrl_irq_enable(ctrl, 1);
- }
-
- /* Initialize link rate as panel max link rate */
- ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE];
-
- ctrl->edid = drm_get_edid(connector, &ctrl->drm_aux->ddc);
- if (!ctrl->edid) {
- pr_err("%s: edid read fail\n", __func__);
- goto disable_ret;
- }
-
- if (edid)
- *edid = ctrl->edid;
-
-disable_ret:
- if (!ctrl->power_on) {
- edp_ctrl_irq_enable(ctrl, 0);
- edp_ctrl_phy_aux_enable(ctrl, 0);
- }
-unlock_ret:
- mutex_unlock(&ctrl->dev_mutex);
- return 0;
-}
-
-int msm_edp_ctrl_timing_cfg(struct edp_ctrl *ctrl,
- const struct drm_display_mode *mode,
- const struct drm_display_info *info)
-{
- u32 hstart_from_sync, vstart_from_sync;
- u32 data;
- int ret = 0;
-
- mutex_lock(&ctrl->dev_mutex);
- /*
- * Need to keep color depth, pixel rate and
- * interlaced information in ctrl context
- */
- ctrl->color_depth = info->bpc;
- ctrl->pixel_rate = mode->clock;
- ctrl->interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
-
- /* Fill initial link config based on passed in timing */
- edp_fill_link_cfg(ctrl);
-
- if (edp_clk_enable(ctrl, EDP_CLK_MASK_AHB)) {
- pr_err("%s, fail to prepare enable ahb clk\n", __func__);
- ret = -EINVAL;
- goto unlock_ret;
- }
- edp_clock_synchrous(ctrl, 1);
-
- /* Configure eDP timing to HW */
- edp_write(ctrl->base + REG_EDP_TOTAL_HOR_VER,
- EDP_TOTAL_HOR_VER_HORIZ(mode->htotal) |
- EDP_TOTAL_HOR_VER_VERT(mode->vtotal));
-
- vstart_from_sync = mode->vtotal - mode->vsync_start;
- hstart_from_sync = mode->htotal - mode->hsync_start;
- edp_write(ctrl->base + REG_EDP_START_HOR_VER_FROM_SYNC,
- EDP_START_HOR_VER_FROM_SYNC_HORIZ(hstart_from_sync) |
- EDP_START_HOR_VER_FROM_SYNC_VERT(vstart_from_sync));
-
- data = EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT(
- mode->vsync_end - mode->vsync_start);
- data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ(
- mode->hsync_end - mode->hsync_start);
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_NVSYNC;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_NHSYNC;
- edp_write(ctrl->base + REG_EDP_HSYNC_VSYNC_WIDTH_POLARITY, data);
-
- edp_write(ctrl->base + REG_EDP_ACTIVE_HOR_VER,
- EDP_ACTIVE_HOR_VER_HORIZ(mode->hdisplay) |
- EDP_ACTIVE_HOR_VER_VERT(mode->vdisplay));
-
- edp_clk_disable(ctrl, EDP_CLK_MASK_AHB);
-
-unlock_ret:
- mutex_unlock(&ctrl->dev_mutex);
- return ret;
-}
-
-bool msm_edp_ctrl_pixel_clock_valid(struct edp_ctrl *ctrl,
- u32 pixel_rate, u32 *pm, u32 *pn)
-{
- const struct edp_pixel_clk_div *divs;
- u32 err = 1; /* 1% error tolerance */
- u32 clk_err;
- int i;
-
- if (ctrl->link_rate == DP_LINK_BW_1_62) {
- divs = clk_divs[0];
- } else if (ctrl->link_rate == DP_LINK_BW_2_7) {
- divs = clk_divs[1];
- } else {
- pr_err("%s: Invalid link rate,%d\n", __func__, ctrl->link_rate);
- return false;
- }
-
- for (i = 0; i < EDP_PIXEL_CLK_NUM; i++) {
- clk_err = abs(divs[i].rate - pixel_rate);
- if ((divs[i].rate * err / 100) >= clk_err) {
- if (pm)
- *pm = divs[i].m;
- if (pn)
- *pn = divs[i].n;
- return true;
- }
- }
-
- DBG("pixel clock %d(kHz) not supported", pixel_rate);
-
- return false;
-}
-
diff --git a/drivers/gpu/drm/msm/edp/edp_phy.c b/drivers/gpu/drm/msm/edp/edp_phy.c
deleted file mode 100644
index fcaf7b7ecdd2..000000000000
--- a/drivers/gpu/drm/msm/edp/edp_phy.c
+++ /dev/null
@@ -1,98 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#include "edp.h"
-#include "edp.xml.h"
-
-#define EDP_MAX_LANE 4
-
-struct edp_phy {
- void __iomem *base;
-};
-
-bool msm_edp_phy_ready(struct edp_phy *phy)
-{
- u32 status;
- int cnt = 100;
-
- while (--cnt) {
- status = edp_read(phy->base +
- REG_EDP_PHY_GLB_PHY_STATUS);
- if (status & 0x01)
- break;
- usleep_range(500, 1000);
- }
-
- if (cnt == 0) {
- pr_err("%s: PHY NOT ready\n", __func__);
- return false;
- } else {
- return true;
- }
-}
-
-void msm_edp_phy_ctrl(struct edp_phy *phy, int enable)
-{
- DBG("enable=%d", enable);
- if (enable) {
- /* Reset */
- edp_write(phy->base + REG_EDP_PHY_CTRL,
- EDP_PHY_CTRL_SW_RESET | EDP_PHY_CTRL_SW_RESET_PLL);
- /* Make sure fully reset */
- wmb();
- usleep_range(500, 1000);
- edp_write(phy->base + REG_EDP_PHY_CTRL, 0x000);
- edp_write(phy->base + REG_EDP_PHY_GLB_PD_CTL, 0x3f);
- edp_write(phy->base + REG_EDP_PHY_GLB_CFG, 0x1);
- } else {
- edp_write(phy->base + REG_EDP_PHY_GLB_PD_CTL, 0xc0);
- }
-}
-
-/* voltage mode and pre emphasis cfg */
-void msm_edp_phy_vm_pe_init(struct edp_phy *phy)
-{
- edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG0, 0x3);
- edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG1, 0x64);
- edp_write(phy->base + REG_EDP_PHY_GLB_MISC9, 0x6c);
-}
-
-void msm_edp_phy_vm_pe_cfg(struct edp_phy *phy, u32 v0, u32 v1)
-{
- edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG0, v0);
- edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG1, v1);
-}
-
-void msm_edp_phy_lane_power_ctrl(struct edp_phy *phy, bool up, u32 max_lane)
-{
- u32 i;
- u32 data;
-
- if (up)
- data = 0; /* power up */
- else
- data = 0x7; /* power down */
-
- for (i = 0; i < max_lane; i++)
- edp_write(phy->base + REG_EDP_PHY_LN_PD_CTL(i) , data);
-
- /* power down unused lane */
- data = 0x7; /* power down */
- for (i = max_lane; i < EDP_MAX_LANE; i++)
- edp_write(phy->base + REG_EDP_PHY_LN_PD_CTL(i) , data);
-}
-
-void *msm_edp_phy_init(struct device *dev, void __iomem *regbase)
-{
- struct edp_phy *phy = NULL;
-
- phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
- if (!phy)
- return NULL;
-
- phy->base = regbase;
- return phy;
-}
-
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 75b64e6ae035..719720709e9e 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -8,6 +8,8 @@
#include <linux/of_irq.h>
#include <linux/of_gpio.h>
+#include <drm/drm_bridge_connector.h>
+
#include <sound/hdmi-codec.h>
#include "hdmi.h"
@@ -41,7 +43,7 @@ static irqreturn_t msm_hdmi_irq(int irq, void *dev_id)
struct hdmi *hdmi = dev_id;
/* Process HPD: */
- msm_hdmi_connector_irq(hdmi->connector);
+ msm_hdmi_hpd_irq(hdmi->bridge);
/* Process DDC: */
msm_hdmi_i2c_irq(hdmi->i2c);
@@ -95,8 +97,13 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi)
of_node_put(phy_node);
- if (!phy_pdev || !hdmi->phy) {
+ if (!phy_pdev) {
+ DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
+ return -EPROBE_DEFER;
+ }
+ if (!hdmi->phy) {
DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
+ put_device(&phy_pdev->dev);
return -EPROBE_DEFER;
}
@@ -281,7 +288,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- hdmi->connector = msm_hdmi_connector_init(hdmi);
+ hdmi->connector = drm_bridge_connector_init(hdmi->dev, encoder);
if (IS_ERR(hdmi->connector)) {
ret = PTR_ERR(hdmi->connector);
DRM_DEV_ERROR(dev->dev, "failed to create HDMI connector: %d\n", ret);
@@ -289,6 +296,8 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
+ drm_connector_attach_encoder(hdmi->connector, hdmi->encoder);
+
hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (hdmi->irq < 0) {
ret = hdmi->irq;
@@ -305,7 +314,9 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- ret = msm_hdmi_hpd_enable(hdmi->connector);
+ drm_bridge_connector_enable_hpd(hdmi->connector);
+
+ ret = msm_hdmi_hpd_enable(hdmi->bridge);
if (ret < 0) {
DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
goto fail;
@@ -514,8 +525,7 @@ static int msm_hdmi_register_audio_driver(struct hdmi *hdmi, struct device *dev)
static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
{
- struct drm_device *drm = dev_get_drvdata(master);
- struct msm_drm_private *priv = drm->dev_private;
+ struct msm_drm_private *priv = dev_get_drvdata(master);
struct hdmi_platform_config *hdmi_cfg;
struct hdmi *hdmi;
struct device_node *of_node = dev->of_node;
@@ -586,8 +596,8 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
static void msm_hdmi_unbind(struct device *dev, struct device *master,
void *data)
{
- struct drm_device *drm = dev_get_drvdata(master);
- struct msm_drm_private *priv = drm->dev_private;
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+
if (priv->hdmi) {
if (priv->hdmi->audio_pdev)
platform_device_unregister(priv->hdmi->audio_pdev);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 82261078c6b1..736f348befb3 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -114,6 +114,13 @@ struct hdmi_platform_config {
struct hdmi_gpio_data gpios[HDMI_MAX_NUM_GPIO];
};
+struct hdmi_bridge {
+ struct drm_bridge base;
+ struct hdmi *hdmi;
+ struct work_struct hpd_work;
+};
+#define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base)
+
void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on);
static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
@@ -230,13 +237,11 @@ void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate);
struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi);
void msm_hdmi_bridge_destroy(struct drm_bridge *bridge);
-/*
- * hdmi connector:
- */
-
-void msm_hdmi_connector_irq(struct drm_connector *connector);
-struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi);
-int msm_hdmi_hpd_enable(struct drm_connector *connector);
+void msm_hdmi_hpd_irq(struct drm_bridge *bridge);
+enum drm_connector_status msm_hdmi_bridge_detect(
+ struct drm_bridge *bridge);
+int msm_hdmi_hpd_enable(struct drm_bridge *bridge);
+void msm_hdmi_hpd_disable(struct hdmi_bridge *hdmi_bridge);
/*
* i2c adapter for ddc:
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index f04eb4a70f0d..68fba4bf7212 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -5,17 +5,16 @@
*/
#include <linux/delay.h>
+#include <drm/drm_bridge_connector.h>
+#include "msm_kms.h"
#include "hdmi.h"
-struct hdmi_bridge {
- struct drm_bridge base;
- struct hdmi *hdmi;
-};
-#define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base)
-
void msm_hdmi_bridge_destroy(struct drm_bridge *bridge)
{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+
+ msm_hdmi_hpd_disable(hdmi_bridge);
}
static void msm_hdmi_power_on(struct drm_bridge *bridge)
@@ -70,7 +69,7 @@ static void power_off(struct drm_bridge *bridge)
if (ret)
DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %d\n", ret);
- pm_runtime_put_autosuspend(&hdmi->pdev->dev);
+ pm_runtime_put(&hdmi->pdev->dev);
}
#define AVI_IFRAME_LINE_NUMBER 1
@@ -251,14 +250,76 @@ static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge,
msm_hdmi_audio_update(hdmi);
}
+static struct edid *msm_hdmi_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ struct edid *edid;
+ uint32_t hdmi_ctrl;
+
+ hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
+
+ edid = drm_get_edid(connector, hdmi->i2c);
+
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
+
+ hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
+
+ return edid;
+}
+
+static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct msm_drm_private *priv = bridge->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ long actual, requested;
+
+ requested = 1000 * mode->clock;
+ actual = kms->funcs->round_pixclk(kms,
+ requested, hdmi_bridge->hdmi->encoder);
+
+ /* for mdp5/apq8074, we manage our own pixel clk (as opposed to
+ * mdp4/dtv stuff where pixel clk is assigned to mdp/encoder
+ * instead):
+ */
+ if (config->pwr_clk_cnt > 0)
+ actual = clk_round_rate(hdmi->pwr_clks[0], actual);
+
+ DBG("requested=%ld, actual=%ld", requested, actual);
+
+ if (actual != requested)
+ return MODE_CLOCK_RANGE;
+
+ return 0;
+}
+
static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = {
.pre_enable = msm_hdmi_bridge_pre_enable,
.enable = msm_hdmi_bridge_enable,
.disable = msm_hdmi_bridge_disable,
.post_disable = msm_hdmi_bridge_post_disable,
.mode_set = msm_hdmi_bridge_mode_set,
+ .mode_valid = msm_hdmi_bridge_mode_valid,
+ .get_edid = msm_hdmi_bridge_get_edid,
+ .detect = msm_hdmi_bridge_detect,
};
+static void
+msm_hdmi_hotplug_work(struct work_struct *work)
+{
+ struct hdmi_bridge *hdmi_bridge =
+ container_of(work, struct hdmi_bridge, hpd_work);
+ struct drm_bridge *bridge = &hdmi_bridge->base;
+
+ drm_bridge_hpd_notify(bridge, drm_bridge_detect(bridge));
+}
/* initialize bridge */
struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
@@ -275,11 +336,17 @@ struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
}
hdmi_bridge->hdmi = hdmi;
+ INIT_WORK(&hdmi_bridge->hpd_work, msm_hdmi_hotplug_work);
bridge = &hdmi_bridge->base;
bridge->funcs = &msm_hdmi_bridge_funcs;
+ bridge->ddc = hdmi->i2c;
+ bridge->type = DRM_MODE_CONNECTOR_HDMIA;
+ bridge->ops = DRM_BRIDGE_OP_HPD |
+ DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_EDID;
- ret = drm_bridge_attach(hdmi->encoder, bridge, NULL, 0);
+ ret = drm_bridge_attach(hdmi->encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
index a7f729cdec7b..75605ddac7c4 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
@@ -11,13 +11,6 @@
#include "msm_kms.h"
#include "hdmi.h"
-struct hdmi_connector {
- struct drm_connector base;
- struct hdmi *hdmi;
- struct work_struct hpd_work;
-};
-#define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
-
static void msm_hdmi_phy_reset(struct hdmi *hdmi)
{
unsigned int val;
@@ -139,10 +132,10 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
}
}
-int msm_hdmi_hpd_enable(struct drm_connector *connector)
+int msm_hdmi_hpd_enable(struct drm_bridge *bridge)
{
- struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
- struct hdmi *hdmi = hdmi_connector->hdmi;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
uint32_t hpd_ctrl;
@@ -199,9 +192,9 @@ fail:
return ret;
}
-static void hdp_disable(struct hdmi_connector *hdmi_connector)
+void msm_hdmi_hpd_disable(struct hdmi_bridge *hdmi_bridge)
{
- struct hdmi *hdmi = hdmi_connector->hdmi;
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
int ret;
@@ -212,7 +205,7 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector)
msm_hdmi_set_mode(hdmi, false);
enable_hpd_clocks(hdmi, false);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put(dev);
ret = gpio_config(hdmi, false);
if (ret)
@@ -227,19 +220,10 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector)
dev_warn(dev, "failed to disable hpd regulator: %d\n", ret);
}
-static void
-msm_hdmi_hotplug_work(struct work_struct *work)
-{
- struct hdmi_connector *hdmi_connector =
- container_of(work, struct hdmi_connector, hpd_work);
- struct drm_connector *connector = &hdmi_connector->base;
- drm_helper_hpd_irq_event(connector->dev);
-}
-
-void msm_hdmi_connector_irq(struct drm_connector *connector)
+void msm_hdmi_hpd_irq(struct drm_bridge *bridge)
{
- struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
- struct hdmi *hdmi = hdmi_connector->hdmi;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
uint32_t hpd_int_status, hpd_int_ctrl;
/* Process HPD: */
@@ -262,7 +246,7 @@ void msm_hdmi_connector_irq(struct drm_connector *connector)
hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
- queue_work(hdmi->workq, &hdmi_connector->hpd_work);
+ queue_work(hdmi->workq, &hdmi_bridge->hpd_work);
}
}
@@ -276,7 +260,7 @@ static enum drm_connector_status detect_reg(struct hdmi *hdmi)
hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
enable_hpd_clocks(hdmi, false);
- pm_runtime_put_autosuspend(&hdmi->pdev->dev);
+ pm_runtime_put(&hdmi->pdev->dev);
return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
connector_status_connected : connector_status_disconnected;
@@ -293,11 +277,11 @@ static enum drm_connector_status detect_gpio(struct hdmi *hdmi)
connector_status_disconnected;
}
-static enum drm_connector_status hdmi_connector_detect(
- struct drm_connector *connector, bool force)
+enum drm_connector_status msm_hdmi_bridge_detect(
+ struct drm_bridge *bridge)
{
- struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
- struct hdmi *hdmi = hdmi_connector->hdmi;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct hdmi_gpio_data hpd_gpio = config->gpios[HPD_GPIO_INDEX];
enum drm_connector_status stat_gpio, stat_reg;
@@ -331,115 +315,3 @@ static enum drm_connector_status hdmi_connector_detect(
return stat_gpio;
}
-
-static void hdmi_connector_destroy(struct drm_connector *connector)
-{
- struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
-
- hdp_disable(hdmi_connector);
-
- drm_connector_cleanup(connector);
-
- kfree(hdmi_connector);
-}
-
-static int msm_hdmi_connector_get_modes(struct drm_connector *connector)
-{
- struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
- struct hdmi *hdmi = hdmi_connector->hdmi;
- struct edid *edid;
- uint32_t hdmi_ctrl;
- int ret = 0;
-
- hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
- hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
-
- edid = drm_get_edid(connector, hdmi->i2c);
-
- hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
-
- hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
- drm_connector_update_edid_property(connector, edid);
-
- if (edid) {
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
- }
-
- return ret;
-}
-
-static int msm_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
- struct hdmi *hdmi = hdmi_connector->hdmi;
- const struct hdmi_platform_config *config = hdmi->config;
- struct msm_drm_private *priv = connector->dev->dev_private;
- struct msm_kms *kms = priv->kms;
- long actual, requested;
-
- requested = 1000 * mode->clock;
- actual = kms->funcs->round_pixclk(kms,
- requested, hdmi_connector->hdmi->encoder);
-
- /* for mdp5/apq8074, we manage our own pixel clk (as opposed to
- * mdp4/dtv stuff where pixel clk is assigned to mdp/encoder
- * instead):
- */
- if (config->pwr_clk_cnt > 0)
- actual = clk_round_rate(hdmi->pwr_clks[0], actual);
-
- DBG("requested=%ld, actual=%ld", requested, actual);
-
- if (actual != requested)
- return MODE_CLOCK_RANGE;
-
- return 0;
-}
-
-static const struct drm_connector_funcs hdmi_connector_funcs = {
- .detect = hdmi_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = hdmi_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs msm_hdmi_connector_helper_funcs = {
- .get_modes = msm_hdmi_connector_get_modes,
- .mode_valid = msm_hdmi_connector_mode_valid,
-};
-
-/* initialize connector */
-struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
-{
- struct drm_connector *connector = NULL;
- struct hdmi_connector *hdmi_connector;
-
- hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
- if (!hdmi_connector)
- return ERR_PTR(-ENOMEM);
-
- hdmi_connector->hdmi = hdmi;
- INIT_WORK(&hdmi_connector->hpd_work, msm_hdmi_hotplug_work);
-
- connector = &hdmi_connector->base;
-
- drm_connector_init_with_ddc(hdmi->dev, connector,
- &hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA,
- hdmi->i2c);
- drm_connector_helper_add(connector, &msm_hdmi_connector_helper_funcs);
-
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
-
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- drm_connector_attach_encoder(connector, hdmi->encoder);
-
- return connector;
-}
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index dee13fedee3b..0804c31e8962 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -15,6 +15,11 @@
#include "msm_gpu.h"
#include "msm_kms.h"
#include "msm_debugfs.h"
+#include "disp/msm_disp_snapshot.h"
+
+/*
+ * GPU Snapshot:
+ */
struct msm_gpu_show_priv {
struct msm_gpu_state *state;
@@ -29,14 +34,14 @@ static int msm_gpu_show(struct seq_file *m, void *arg)
struct msm_gpu *gpu = priv->gpu;
int ret;
- ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
+ ret = mutex_lock_interruptible(&gpu->lock);
if (ret)
return ret;
drm_printf(&p, "%s Status:\n", gpu->name);
gpu->funcs->show(gpu, show_priv->state, &p);
- mutex_unlock(&show_priv->dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
return 0;
}
@@ -48,9 +53,9 @@ static int msm_gpu_release(struct inode *inode, struct file *file)
struct msm_drm_private *priv = show_priv->dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
- mutex_lock(&show_priv->dev->struct_mutex);
+ mutex_lock(&gpu->lock);
gpu->funcs->gpu_state_put(show_priv->state);
- mutex_unlock(&show_priv->dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
kfree(show_priv);
@@ -72,7 +77,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
if (!show_priv)
return -ENOMEM;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&gpu->lock);
if (ret)
goto free_priv;
@@ -81,7 +86,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
show_priv->state = gpu->funcs->gpu_state_get(gpu);
pm_runtime_put_sync(&gpu->pdev->dev);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
if (IS_ERR(show_priv->state)) {
ret = PTR_ERR(show_priv->state);
@@ -109,6 +114,73 @@ static const struct file_operations msm_gpu_fops = {
.release = msm_gpu_release,
};
+/*
+ * Display Snapshot:
+ */
+
+static int msm_kms_show(struct seq_file *m, void *arg)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct msm_disp_state *state = m->private;
+
+ msm_disp_state_print(state, &p);
+
+ return 0;
+}
+
+static int msm_kms_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *m = file->private_data;
+ struct msm_disp_state *state = m->private;
+
+ msm_disp_state_free(state);
+
+ return single_release(inode, file);
+}
+
+static int msm_kms_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_disp_state *state;
+ int ret;
+
+ if (!priv->kms)
+ return -ENODEV;
+
+ ret = mutex_lock_interruptible(&priv->kms->dump_mutex);
+ if (ret)
+ return ret;
+
+ state = msm_disp_snapshot_state_sync(priv->kms);
+
+ mutex_unlock(&priv->kms->dump_mutex);
+
+ if (IS_ERR(state)) {
+ return PTR_ERR(state);
+ }
+
+ ret = single_open(file, msm_kms_show, state);
+ if (ret) {
+ msm_disp_state_free(state);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct file_operations msm_kms_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_kms_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = msm_kms_release,
+};
+
+/*
+ * Other debugfs:
+ */
+
static unsigned long last_shrink_freed;
static int
@@ -134,8 +206,10 @@ DEFINE_SIMPLE_ATTRIBUTE(shrink_fops,
"0x%08llx\n");
-static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
+static int msm_gem_show(struct seq_file *m, void *arg)
{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
struct msm_drm_private *priv = dev->dev_private;
int ret;
@@ -150,8 +224,10 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
return 0;
}
-static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
+static int msm_mm_show(struct seq_file *m, void *arg)
{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
struct drm_printer p = drm_seq_file_printer(m);
drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
@@ -159,8 +235,10 @@ static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
return 0;
}
-static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
+static int msm_fb_show(struct seq_file *m, void *arg)
{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_framebuffer *fb, *fbdev_fb = NULL;
@@ -183,29 +261,10 @@ static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
return 0;
}
-static int show_locked(struct seq_file *m, void *arg)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- int (*show)(struct drm_device *dev, struct seq_file *m) =
- node->info_ent->data;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- ret = show(dev, m);
-
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
static struct drm_info_list msm_debugfs_list[] = {
- {"gem", show_locked, 0, msm_gem_show},
- { "mm", show_locked, 0, msm_mm_show },
- { "fb", show_locked, 0, msm_fb_show },
+ {"gem", msm_gem_show},
+ { "mm", msm_mm_show },
+ { "fb", msm_fb_show },
};
static int late_init_minor(struct drm_minor *minor)
@@ -252,9 +311,15 @@ void msm_debugfs_init(struct drm_minor *minor)
debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
dev, &msm_gpu_fops);
+ debugfs_create_file("kms", S_IRUSR, minor->debugfs_root,
+ dev, &msm_kms_fops);
+
debugfs_create_u32("hangcheck_period_ms", 0600, minor->debugfs_root,
&priv->hangcheck_period);
+ debugfs_create_bool("disable_err_irq", 0600, minor->debugfs_root,
+ &priv->disable_err_irq);
+
debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root,
dev, &shrink_fops);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 892c04365239..555666e3f960 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -339,10 +339,9 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
static int msm_drm_uninit(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *ddev = platform_get_drvdata(pdev);
- struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct drm_device *ddev = priv->dev;
struct msm_kms *kms = priv->kms;
- struct msm_mdss *mdss = priv->mdss;
int i;
/*
@@ -402,14 +401,10 @@ static int msm_drm_uninit(struct device *dev)
component_unbind_all(dev, ddev);
- if (mdss && mdss->funcs)
- mdss->funcs->destroy(ddev);
-
ddev->dev_private = NULL;
drm_dev_put(ddev);
destroy_workqueue(priv->wq);
- kfree(priv);
return 0;
}
@@ -466,7 +461,7 @@ static int msm_init_vram(struct drm_device *dev)
of_node_put(node);
if (ret)
return ret;
- size = r.end - r.start;
+ size = r.end - r.start + 1;
DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
/* if we have no IOMMU, then we need to use carveout allocator.
@@ -512,10 +507,9 @@ static int msm_init_vram(struct drm_device *dev)
static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
{
struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev;
- struct msm_drm_private *priv;
struct msm_kms *kms;
- struct msm_mdss *mdss;
int ret, i;
ddev = drm_dev_alloc(drv, dev);
@@ -523,34 +517,9 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
return PTR_ERR(ddev);
}
-
- platform_set_drvdata(pdev, ddev);
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- ret = -ENOMEM;
- goto err_put_drm_dev;
- }
-
ddev->dev_private = priv;
priv->dev = ddev;
- switch (get_mdp_ver(pdev)) {
- case KMS_MDP5:
- ret = mdp5_mdss_init(ddev);
- break;
- case KMS_DPU:
- ret = dpu_mdss_init(ddev);
- break;
- default:
- ret = 0;
- break;
- }
- if (ret)
- goto err_free_priv;
-
- mdss = priv->mdss;
-
priv->wq = alloc_ordered_workqueue("msm", 0);
priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
@@ -571,12 +540,12 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
ret = msm_init_vram(ddev);
if (ret)
- goto err_destroy_mdss;
+ return ret;
/* Bind all our sub-components: */
ret = component_bind_all(dev, ddev);
if (ret)
- goto err_destroy_mdss;
+ return ret;
dma_set_max_seg_size(dev, UINT_MAX);
@@ -682,15 +651,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
err_msm_uninit:
msm_drm_uninit(dev);
return ret;
-err_destroy_mdss:
- if (mdss && mdss->funcs)
- mdss->funcs->destroy(ddev);
-err_free_priv:
- kfree(priv);
-err_put_drm_dev:
- drm_dev_put(ddev);
- platform_set_drvdata(pdev, NULL);
- return ret;
}
/*
@@ -752,14 +712,8 @@ static void context_close(struct msm_file_private *ctx)
static void msm_postclose(struct drm_device *dev, struct drm_file *file)
{
- struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx = file->driver_priv;
- mutex_lock(&dev->struct_mutex);
- if (ctx == priv->lastctx)
- priv->lastctx = NULL;
- mutex_unlock(&dev->struct_mutex);
-
context_close(ctx);
}
@@ -973,7 +927,7 @@ static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
struct dma_fence *fence;
int ret;
- if (fence_id > queue->last_fence) {
+ if (fence_after(fence_id, queue->last_fence)) {
DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
fence_id, queue->last_fence);
return -EINVAL;
@@ -1142,8 +1096,7 @@ static const struct drm_driver msm_driver = {
static int __maybe_unused msm_runtime_suspend(struct device *dev)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_drm_private *priv = dev_get_drvdata(dev);
struct msm_mdss *mdss = priv->mdss;
DBG("");
@@ -1156,8 +1109,7 @@ static int __maybe_unused msm_runtime_suspend(struct device *dev)
static int __maybe_unused msm_runtime_resume(struct device *dev)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_drm_private *priv = dev_get_drvdata(dev);
struct msm_mdss *mdss = priv->mdss;
DBG("");
@@ -1187,8 +1139,8 @@ static int __maybe_unused msm_pm_resume(struct device *dev)
static int __maybe_unused msm_pm_prepare(struct device *dev)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
+ struct msm_drm_private *priv = dev_get_drvdata(dev);
+ struct drm_device *ddev = priv ? priv->dev : NULL;
if (!priv || !priv->kms)
return 0;
@@ -1198,8 +1150,8 @@ static int __maybe_unused msm_pm_prepare(struct device *dev)
static void __maybe_unused msm_pm_complete(struct device *dev)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
+ struct msm_drm_private *priv = dev_get_drvdata(dev);
+ struct drm_device *ddev = priv ? priv->dev : NULL;
if (!priv || !priv->kms)
return;
@@ -1292,9 +1244,10 @@ static int add_components_mdp(struct device *mdp_dev,
return 0;
}
-static int compare_name_mdp(struct device *dev, void *data)
+static int find_mdp_node(struct device *dev, void *data)
{
- return (strstr(dev_name(dev), "mdp") != NULL);
+ return of_match_node(dpu_dt_match, dev->of_node) ||
+ of_match_node(mdp5_dt_match, dev->of_node);
}
static int add_display_components(struct platform_device *pdev,
@@ -1319,7 +1272,7 @@ static int add_display_components(struct platform_device *pdev,
return ret;
}
- mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
+ mdp_dev = device_find_child(dev, NULL, find_mdp_node);
if (!mdp_dev) {
DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n");
of_platform_depopulate(dev);
@@ -1397,12 +1350,35 @@ static const struct component_master_ops msm_drm_ops = {
static int msm_pdev_probe(struct platform_device *pdev)
{
struct component_match *match = NULL;
+ struct msm_drm_private *priv;
int ret;
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ switch (get_mdp_ver(pdev)) {
+ case KMS_MDP5:
+ ret = mdp5_mdss_init(pdev);
+ break;
+ case KMS_DPU:
+ ret = dpu_mdss_init(pdev);
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+ if (ret) {
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+ }
+
if (get_mdp_ver(pdev)) {
ret = add_display_components(pdev, &match);
if (ret)
- return ret;
+ goto fail;
}
ret = add_gpu_components(&pdev->dev, &match);
@@ -1424,21 +1400,31 @@ static int msm_pdev_probe(struct platform_device *pdev)
fail:
of_platform_depopulate(&pdev->dev);
+
+ if (priv->mdss && priv->mdss->funcs)
+ priv->mdss->funcs->destroy(priv->mdss);
+
return ret;
}
static int msm_pdev_remove(struct platform_device *pdev)
{
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct msm_mdss *mdss = priv->mdss;
+
component_master_del(&pdev->dev, &msm_drm_ops);
of_platform_depopulate(&pdev->dev);
+ if (mdss && mdss->funcs)
+ mdss->funcs->destroy(mdss);
+
return 0;
}
static void msm_pdev_shutdown(struct platform_device *pdev)
{
- struct drm_device *drm = platform_get_drvdata(pdev);
- struct msm_drm_private *priv = drm ? drm->dev_private : NULL;
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct drm_device *drm = priv ? priv->dev : NULL;
if (!priv || !priv->kms)
return;
@@ -1478,7 +1464,6 @@ static int __init msm_drm_register(void)
msm_mdp_register();
msm_dpu_register();
msm_dsi_register();
- msm_edp_register();
msm_hdmi_register();
msm_dp_register();
adreno_register();
@@ -1492,7 +1477,6 @@ static void __exit msm_drm_unregister(void)
msm_dp_unregister();
msm_hdmi_unregister();
adreno_unregister();
- msm_edp_unregister();
msm_dsi_unregister();
msm_mdp_unregister();
msm_dpu_unregister();
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 69952b239384..d7574e6bd4e4 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -151,12 +151,6 @@ struct msm_drm_private {
*/
struct hdmi *hdmi;
- /* eDP is for mdp5 only, but kms has not been created
- * when edp_bind() and edp_init() are called. Here is the only
- * place to keep the edp instance.
- */
- struct msm_edp *edp;
-
/* DSI is shared by mdp4 and mdp5 */
struct msm_dsi *dsi[2];
@@ -164,7 +158,7 @@ struct msm_drm_private {
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
- struct msm_file_private *lastctx;
+
/* gpu is only set on open(), but we need this info earlier */
bool is_a2xx;
bool has_cached_coherent;
@@ -246,6 +240,15 @@ struct msm_drm_private {
/* For hang detection, in ms */
unsigned int hangcheck_period;
+
+ /**
+ * disable_err_irq:
+ *
+ * Disable handling of GPU hw error interrupts, to force fallback to
+ * sw hangcheck timer. Written (via debugfs) by igt tests to test
+ * the sw hangcheck mechanism.
+ */
+ bool disable_err_irq;
};
struct msm_format {
@@ -335,14 +338,10 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
void __init msm_hdmi_register(void);
void __exit msm_hdmi_unregister(void);
-struct msm_edp;
-void __init msm_edp_register(void);
-void __exit msm_edp_unregister(void);
-int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
- struct drm_encoder *encoder);
-
struct msm_dsi;
#ifdef CONFIG_DRM_MSM_DSI
+int dsi_dev_attach(struct platform_device *pdev);
+void dsi_dev_detach(struct platform_device *pdev);
void __init msm_dsi_register(void);
void __exit msm_dsi_unregister(void);
int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
@@ -390,8 +389,12 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder);
int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder);
int msm_dp_display_pre_disable(struct msm_dp *dp, struct drm_encoder *encoder);
void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode);
+
+struct drm_bridge *msm_dp_bridge_init(struct msm_dp *dp_display,
+ struct drm_device *dev,
+ struct drm_encoder *encoder);
void msm_dp_irq_postinstall(struct msm_dp *dp_display);
void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display);
@@ -428,8 +431,8 @@ static inline int msm_dp_display_pre_disable(struct msm_dp *dp,
}
static inline void msm_dp_display_mode_set(struct msm_dp *dp,
struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
}
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 0daaeb54ff6f..4c39ef9dd75d 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -81,8 +81,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
bo = msm_framebuffer_bo(fb, 0);
- mutex_lock(&dev->struct_mutex);
-
/*
* NOTE: if we can be guaranteed to be able to map buffer
* in panic (ie. lock-safe, etc) we could avoid pinning the
@@ -91,14 +89,14 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
ret = msm_gem_get_and_pin_iova(bo, priv->kms->aspace, &paddr);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret);
- goto fail_unlock;
+ goto fail;
}
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n");
ret = PTR_ERR(fbi);
- goto fail_unlock;
+ goto fail;
}
DBG("fbi=%p, dev=%p", fbi, dev);
@@ -115,7 +113,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
fbi->screen_base = msm_gem_get_vaddr(bo);
if (IS_ERR(fbi->screen_base)) {
ret = PTR_ERR(fbi->screen_base);
- goto fail_unlock;
+ goto fail;
}
fbi->screen_size = bo->size;
fbi->fix.smem_start = paddr;
@@ -124,12 +122,9 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
- mutex_unlock(&dev->struct_mutex);
-
return 0;
-fail_unlock:
- mutex_unlock(&dev->struct_mutex);
+fail:
drm_framebuffer_remove(fb);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
index 4783db528bcc..17ee3822b423 100644
--- a/drivers/gpu/drm/msm/msm_fence.h
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -60,4 +60,16 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx);
+static inline bool
+fence_before(uint32_t a, uint32_t b)
+{
+ return (int32_t)(a - b) < 0;
+}
+
+static inline bool
+fence_after(uint32_t a, uint32_t b)
+{
+ return (int32_t)(a - b) > 0;
+}
+
#endif
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 512d55eecbaf..02b9ae65a96a 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -866,23 +866,11 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj)
}
#ifdef CONFIG_DEBUG_FS
-static void describe_fence(struct dma_fence *fence, const char *type,
- struct seq_file *m)
-{
- if (!dma_fence_is_signaled(fence))
- seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence),
- fence->seqno);
-}
-
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
struct msm_gem_stats *stats)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct dma_resv *robj = obj->resv;
- struct dma_resv_list *fobj;
- struct dma_fence *fence;
struct msm_gem_vma *vma;
uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv;
@@ -956,22 +944,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
seq_puts(m, "\n");
}
- rcu_read_lock();
- fobj = dma_resv_shared_list(robj);
- if (fobj) {
- unsigned int i, shared_count = fobj->shared_count;
-
- for (i = 0; i < shared_count; i++) {
- fence = rcu_dereference(fobj->shared[i]);
- describe_fence(fence, "Shared", m);
- }
- }
-
- fence = dma_resv_excl_fence(robj);
- if (fence)
- describe_fence(fence, "Exclusive", m);
- rcu_read_unlock();
-
+ dma_resv_describe(robj, m);
msm_gem_unlock(obj);
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 282628d6b72c..6cfa984dee6a 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -881,7 +881,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
* to the underlying fence.
*/
submit->fence_id = idr_alloc_cyclic(&queue->fence_idr,
- submit->user_fence, 0, INT_MAX, GFP_KERNEL);
+ submit->user_fence, 1, INT_MAX, GFP_KERNEL);
if (submit->fence_id < 0) {
ret = submit->fence_id = 0;
submit->fence_id = 0;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 2c46cd968ac4..2c1049c0ea14 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -150,7 +150,7 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
{
int ret;
- WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&gpu->lock));
if (!gpu->needs_hw_init)
return 0;
@@ -172,7 +172,7 @@ static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
spin_lock_irqsave(&ring->submit_lock, flags);
list_for_each_entry(submit, &ring->submits, node) {
- if (submit->seqno > fence)
+ if (fence_after(submit->seqno, fence))
break;
msm_update_fence(submit->ring->fctx,
@@ -361,7 +361,7 @@ static void recover_worker(struct kthread_work *work)
char *comm = NULL, *cmd = NULL;
int i;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&gpu->lock);
DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
@@ -442,7 +442,7 @@ static void recover_worker(struct kthread_work *work)
}
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
msm_gpu_retire(gpu);
}
@@ -450,12 +450,11 @@ static void recover_worker(struct kthread_work *work)
static void fault_worker(struct kthread_work *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
- struct drm_device *dev = gpu->dev;
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
char *comm = NULL, *cmd = NULL;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&gpu->lock);
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
if (submit && submit->fault_dumped)
@@ -490,7 +489,7 @@ resume_smmu:
memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
}
static void hangcheck_timer_reset(struct msm_gpu *gpu)
@@ -510,7 +509,7 @@ static void hangcheck_handler(struct timer_list *t)
if (fence != ring->hangcheck_fence) {
/* some progress has been made.. ya! */
ring->hangcheck_fence = fence;
- } else if (fence < ring->seqno) {
+ } else if (fence_before(fence, ring->seqno)) {
/* no progress and not done.. hung! */
ring->hangcheck_fence = fence;
DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
@@ -524,7 +523,7 @@ static void hangcheck_handler(struct timer_list *t)
}
/* if still more pending work, reset the hangcheck timer: */
- if (ring->seqno > ring->hangcheck_fence)
+ if (fence_after(ring->seqno, ring->hangcheck_fence))
hangcheck_timer_reset(gpu);
/* workaround for missing irq: */
@@ -704,6 +703,8 @@ static void retire_submits(struct msm_gpu *gpu)
}
}
}
+
+ wake_up_all(&gpu->retire_event);
}
static void retire_worker(struct kthread_work *work)
@@ -733,7 +734,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_ringbuffer *ring = submit->ring;
unsigned long flags;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&gpu->lock));
pm_runtime_get_sync(&gpu->pdev->dev);
@@ -763,7 +764,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
mutex_unlock(&gpu->active_lock);
gpu->funcs->submit(gpu, submit);
- priv->lastctx = submit->queue->ctx;
+ gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
hangcheck_timer_reset(gpu);
}
@@ -848,6 +849,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
INIT_LIST_HEAD(&gpu->active_list);
mutex_init(&gpu->active_lock);
+ mutex_init(&gpu->lock);
+ init_waitqueue_head(&gpu->retire_event);
kthread_init_work(&gpu->retire_work, retire_worker);
kthread_init_work(&gpu->recover_work, recover_worker);
kthread_init_work(&gpu->fault_work, fault_worker);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 48ea2de911f1..92aa1e9196c6 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -88,6 +88,21 @@ struct msm_gpu_devfreq {
struct devfreq *devfreq;
/**
+ * idle_constraint:
+ *
+ * A PM QoS constraint to limit max freq while the GPU is idle.
+ */
+ struct dev_pm_qos_request idle_freq;
+
+ /**
+ * boost_constraint:
+ *
+ * A PM QoS constraint to boost min freq for a period of time
+ * until the boost expires.
+ */
+ struct dev_pm_qos_request boost_freq;
+
+ /**
* busy_cycles:
*
* Used by implementation of gpu->gpu_busy() to track the last
@@ -103,22 +118,19 @@ struct msm_gpu_devfreq {
ktime_t idle_time;
/**
- * idle_freq:
+ * idle_work:
*
- * Shadow frequency used while the GPU is idle. From the PoV of
- * the devfreq governor, we are continuing to sample busyness and
- * adjust frequency while the GPU is idle, but we use this shadow
- * value as the GPU is actually clamped to minimum frequency while
- * it is inactive.
+ * Used to delay clamping to idle freq on active->idle transition.
*/
- unsigned long idle_freq;
+ struct msm_hrtimer_work idle_work;
/**
- * idle_work:
+ * boost_work:
*
- * Used to delay clamping to idle freq on active->idle transition.
+ * Used to reset the boost_constraint after the boost period has
+ * elapsed
*/
- struct msm_hrtimer_work idle_work;
+ struct msm_hrtimer_work boost_work;
};
struct msm_gpu {
@@ -144,6 +156,17 @@ struct msm_gpu {
struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
int nr_rings;
+ /**
+ * cur_ctx_seqno:
+ *
+ * The ctx->seqno value of the last context to submit rendering,
+ * and the one with current pgtables installed (for generations
+ * that support per-context pgtables). Tracked by seqno rather
+ * than pointer value to avoid dangling pointers, and cases where
+ * a ctx can be freed and a new one created with the same address.
+ */
+ int cur_ctx_seqno;
+
/*
* List of GEM active objects on this gpu. Protected by
* msm_drm_private::mm_lock
@@ -151,12 +174,22 @@ struct msm_gpu {
struct list_head active_list;
/**
+ * lock:
+ *
+ * General lock for serializing all the gpu things.
+ *
+ * TODO move to per-ring locking where feasible (ie. submit/retire
+ * path, etc)
+ */
+ struct mutex lock;
+
+ /**
* active_submits:
*
* The number of submitted but not yet retired submits, used to
* determine transitions between active and idle.
*
- * Protected by lock
+ * Protected by active_lock
*/
int active_submits;
@@ -197,6 +230,9 @@ struct msm_gpu {
/* work for handling GPU recovery: */
struct kthread_work recover_work;
+ /** retire_event: notified when submits are retired: */
+ wait_queue_head_t retire_event;
+
/* work for handling active-list retiring: */
struct kthread_work retire_work;
@@ -241,7 +277,7 @@ static inline bool msm_gpu_active(struct msm_gpu *gpu)
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
- if (ring->seqno > ring->memptrs->fence)
+ if (fence_after(ring->seqno, ring->memptrs->fence))
return true;
}
@@ -501,6 +537,7 @@ void msm_devfreq_init(struct msm_gpu *gpu);
void msm_devfreq_cleanup(struct msm_gpu *gpu);
void msm_devfreq_resume(struct msm_gpu *gpu);
void msm_devfreq_suspend(struct msm_gpu *gpu);
+void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor);
void msm_devfreq_active(struct msm_gpu *gpu);
void msm_devfreq_idle(struct msm_gpu *gpu);
@@ -537,28 +574,28 @@ static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
{
struct msm_gpu_state *state = NULL;
- mutex_lock(&gpu->dev->struct_mutex);
+ mutex_lock(&gpu->lock);
if (gpu->crashstate) {
kref_get(&gpu->crashstate->ref);
state = gpu->crashstate;
}
- mutex_unlock(&gpu->dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
return state;
}
static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
{
- mutex_lock(&gpu->dev->struct_mutex);
+ mutex_lock(&gpu->lock);
if (gpu->crashstate) {
if (gpu->funcs->gpu_state_put(gpu->crashstate))
gpu->crashstate = NULL;
}
- mutex_unlock(&gpu->dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
}
/*
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index 384e90c4b2a7..9bf319be11f6 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -9,6 +9,7 @@
#include <linux/devfreq.h>
#include <linux/devfreq_cooling.h>
+#include <linux/units.h>
/*
* Power Management:
@@ -25,17 +26,6 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
* to something that actually is in the opp table:
*/
opp = devfreq_recommended_opp(dev, freq, flags);
-
- /*
- * If the GPU is idle, devfreq is not aware, so just ignore
- * it's requests
- */
- if (gpu->devfreq.idle_freq) {
- gpu->devfreq.idle_freq = *freq;
- dev_pm_opp_put(opp);
- return 0;
- }
-
if (IS_ERR(opp))
return PTR_ERR(opp);
@@ -53,9 +43,6 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
static unsigned long get_freq(struct msm_gpu *gpu)
{
- if (gpu->devfreq.idle_freq)
- return gpu->devfreq.idle_freq;
-
if (gpu->funcs->gpu_get_freq)
return gpu->funcs->gpu_get_freq(gpu);
@@ -93,6 +80,7 @@ static struct devfreq_dev_profile msm_devfreq_profile = {
.get_cur_freq = msm_devfreq_get_cur_freq,
};
+static void msm_devfreq_boost_work(struct kthread_work *work);
static void msm_devfreq_idle_work(struct kthread_work *work);
void msm_devfreq_init(struct msm_gpu *gpu)
@@ -103,6 +91,12 @@ void msm_devfreq_init(struct msm_gpu *gpu)
if (!gpu->funcs->gpu_busy)
return;
+ dev_pm_qos_add_request(&gpu->pdev->dev, &df->idle_freq,
+ DEV_PM_QOS_MAX_FREQUENCY,
+ PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
+ dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq,
+ DEV_PM_QOS_MIN_FREQUENCY, 0);
+
msm_devfreq_profile.initial_freq = gpu->fast_rate;
/*
@@ -133,13 +127,31 @@ void msm_devfreq_init(struct msm_gpu *gpu)
gpu->cooling = NULL;
}
+ msm_hrtimer_work_init(&df->boost_work, gpu->worker, msm_devfreq_boost_work,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
msm_hrtimer_work_init(&df->idle_work, gpu->worker, msm_devfreq_idle_work,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
+static void cancel_idle_work(struct msm_gpu_devfreq *df)
+{
+ hrtimer_cancel(&df->idle_work.timer);
+ kthread_cancel_work_sync(&df->idle_work.work);
+}
+
+static void cancel_boost_work(struct msm_gpu_devfreq *df)
+{
+ hrtimer_cancel(&df->boost_work.timer);
+ kthread_cancel_work_sync(&df->boost_work.work);
+}
+
void msm_devfreq_cleanup(struct msm_gpu *gpu)
{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+
devfreq_cooling_unregister(gpu->cooling);
+ dev_pm_qos_remove_request(&df->boost_freq);
+ dev_pm_qos_remove_request(&df->idle_freq);
}
void msm_devfreq_resume(struct msm_gpu *gpu)
@@ -152,7 +164,41 @@ void msm_devfreq_resume(struct msm_gpu *gpu)
void msm_devfreq_suspend(struct msm_gpu *gpu)
{
- devfreq_suspend_device(gpu->devfreq.devfreq);
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+
+ devfreq_suspend_device(df->devfreq);
+
+ cancel_idle_work(df);
+ cancel_boost_work(df);
+}
+
+static void msm_devfreq_boost_work(struct kthread_work *work)
+{
+ struct msm_gpu_devfreq *df = container_of(work,
+ struct msm_gpu_devfreq, boost_work.work);
+
+ dev_pm_qos_update_request(&df->boost_freq, 0);
+}
+
+void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor)
+{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+ uint64_t freq;
+
+ freq = get_freq(gpu);
+ freq *= factor;
+
+ /*
+ * A nice little trap is that PM QoS operates in terms of KHz,
+ * while devfreq operates in terms of Hz:
+ */
+ do_div(freq, HZ_PER_KHZ);
+
+ dev_pm_qos_update_request(&df->boost_freq, freq);
+
+ msm_hrtimer_queue_work(&df->boost_work,
+ ms_to_ktime(msm_devfreq_profile.polling_ms),
+ HRTIMER_MODE_REL);
}
void msm_devfreq_active(struct msm_gpu *gpu)
@@ -160,7 +206,6 @@ void msm_devfreq_active(struct msm_gpu *gpu)
struct msm_gpu_devfreq *df = &gpu->devfreq;
struct devfreq_dev_status status;
unsigned int idle_time;
- unsigned long target_freq = df->idle_freq;
if (!df->devfreq)
return;
@@ -168,13 +213,7 @@ void msm_devfreq_active(struct msm_gpu *gpu)
/*
* Cancel any pending transition to idle frequency:
*/
- hrtimer_cancel(&df->idle_work.timer);
-
- /*
- * Hold devfreq lock to synchronize with get_dev_status()/
- * target() callbacks
- */
- mutex_lock(&df->devfreq->lock);
+ cancel_idle_work(df);
idle_time = ktime_to_ms(ktime_sub(ktime_get(), df->idle_time));
@@ -183,21 +222,18 @@ void msm_devfreq_active(struct msm_gpu *gpu)
* interval, then we won't meet the threshold of busyness for
* the governor to ramp up the freq.. so give some boost
*/
- if (idle_time > msm_devfreq_profile.polling_ms/2) {
- target_freq *= 2;
+ if (idle_time > msm_devfreq_profile.polling_ms) {
+ msm_devfreq_boost(gpu, 2);
}
- df->idle_freq = 0;
-
- msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
+ dev_pm_qos_update_request(&df->idle_freq,
+ PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
/*
* Reset the polling interval so we aren't inconsistent
* about freq vs busy/total cycles
*/
msm_devfreq_get_dev_status(&gpu->pdev->dev, &status);
-
- mutex_unlock(&df->devfreq->lock);
}
@@ -206,23 +242,11 @@ static void msm_devfreq_idle_work(struct kthread_work *work)
struct msm_gpu_devfreq *df = container_of(work,
struct msm_gpu_devfreq, idle_work.work);
struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq);
- unsigned long idle_freq, target_freq = 0;
-
- /*
- * Hold devfreq lock to synchronize with get_dev_status()/
- * target() callbacks
- */
- mutex_lock(&df->devfreq->lock);
-
- idle_freq = get_freq(gpu);
-
- if (gpu->clamp_to_idle)
- msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
df->idle_time = ktime_get();
- df->idle_freq = idle_freq;
- mutex_unlock(&df->devfreq->lock);
+ if (gpu->clamp_to_idle)
+ dev_pm_qos_update_request(&df->idle_freq, 0);
}
void msm_devfreq_idle(struct msm_gpu *gpu)
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 6a42b819abc4..2a4f0526cb98 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -198,19 +198,22 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev);
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
struct msm_kms *dpu_kms_init(struct drm_device *dev);
+extern const struct of_device_id dpu_dt_match[];
+extern const struct of_device_id mdp5_dt_match[];
+
struct msm_mdss_funcs {
int (*enable)(struct msm_mdss *mdss);
int (*disable)(struct msm_mdss *mdss);
- void (*destroy)(struct drm_device *dev);
+ void (*destroy)(struct msm_mdss *mdss);
};
struct msm_mdss {
- struct drm_device *dev;
+ struct device *dev;
const struct msm_mdss_funcs *funcs;
};
-int mdp5_mdss_init(struct drm_device *dev);
-int dpu_mdss_init(struct drm_device *dev);
+int mdp5_mdss_init(struct platform_device *dev);
+int dpu_mdss_init(struct platform_device *dev);
#define for_each_crtc_mask(dev, crtc, crtc_mask) \
drm_for_each_crtc(crtc, dev) \
diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c
index 3a27153eef08..3d3da79fec2a 100644
--- a/drivers/gpu/drm/msm/msm_perf.c
+++ b/drivers/gpu/drm/msm/msm_perf.c
@@ -155,9 +155,12 @@ static int perf_open(struct inode *inode, struct file *file)
struct msm_gpu *gpu = priv->gpu;
int ret = 0;
- mutex_lock(&dev->struct_mutex);
+ if (!gpu)
+ return -ENODEV;
- if (perf->open || !gpu) {
+ mutex_lock(&gpu->lock);
+
+ if (perf->open) {
ret = -EBUSY;
goto out;
}
@@ -171,7 +174,7 @@ static int perf_open(struct inode *inode, struct file *file)
perf->next_jiffies = jiffies + SAMPLE_TIME;
out:
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index b55398a34fa4..81432ec07012 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -86,7 +86,7 @@ struct msm_rd_state {
struct msm_gem_submit *submit;
/* fifo access is synchronized on the producer side by
- * struct_mutex held by submit code (otherwise we could
+ * gpu->lock held by submit code (otherwise we could
* end up w/ cmds logged in different order than they
* were executed). And read_lock synchronizes the reads
*/
@@ -181,9 +181,12 @@ static int rd_open(struct inode *inode, struct file *file)
uint32_t gpu_id;
int ret = 0;
- mutex_lock(&dev->struct_mutex);
+ if (!gpu)
+ return -ENODEV;
- if (rd->open || !gpu) {
+ mutex_lock(&gpu->lock);
+
+ if (rd->open) {
ret = -EBUSY;
goto out;
}
@@ -200,7 +203,7 @@ static int rd_open(struct inode *inode, struct file *file)
rd_write_section(rd, RD_GPU_ID, &gpu_id, sizeof(gpu_id));
out:
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
return ret;
}
@@ -340,11 +343,10 @@ out_unlock:
msm_gem_unlock(&obj->base);
}
-/* called under struct_mutex */
+/* called under gpu->lock */
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...)
{
- struct drm_device *dev = submit->dev;
struct task_struct *task;
char msg[256];
int i, n;
@@ -355,7 +357,7 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
/* writing into fifo is serialized by caller, and
* rd->read_lock is used to serialize the reads
*/
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&submit->gpu->lock));
if (fmt) {
va_list args;
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 652b1dedd7c1..3bbf574c3bdc 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -21,11 +21,11 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
pm_runtime_get_sync(&gpu->pdev->dev);
/* TODO move submit path over to using a per-ring lock.. */
- mutex_lock(&gpu->dev->struct_mutex);
+ mutex_lock(&gpu->lock);
msm_gpu_submit(gpu, submit);
- mutex_unlock(&gpu->dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
pm_runtime_put(&gpu->pdev->dev);
diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig
index ee22cd25d3e3..987170e16ebd 100644
--- a/drivers/gpu/drm/mxsfb/Kconfig
+++ b/drivers/gpu/drm/mxsfb/Kconfig
@@ -10,7 +10,7 @@ config DRM_MXSFB
depends on COMMON_CLK
select DRM_MXS
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
select DRM_PANEL
select DRM_PANEL_BRIDGE
help
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 7739f46470d3..99fee4d8cd31 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -205,7 +205,7 @@ nv04_display_destroy(struct drm_device *dev)
nvif_notify_dtor(&disp->flip);
nouveau_display(dev)->priv = NULL;
- kfree(disp);
+ vfree(disp);
nvif_object_unmap(&drm->client.device.object);
}
@@ -223,7 +223,7 @@ nv04_display_create(struct drm_device *dev)
struct nv04_display *disp;
int i, ret;
- disp = kzalloc(sizeof(*disp), GFP_KERNEL);
+ disp = vzalloc(sizeof(*disp));
if (!disp)
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
index 4488e1c061b3..28be2912ff74 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -13,6 +13,7 @@ nouveau-y += dispnv50/corec57d.o
nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crc.o
nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crc907d.o
nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcc37d.o
+nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcc57d.o
nouveau-y += dispnv50/dac507d.o
nouveau-y += dispnv50/dac907d.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
index 5396e3707cc4..e6b0417c325b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base907c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
@@ -103,12 +103,9 @@ base907c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
return 0;
}
-static bool
+static void
base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
{
- if (size != 256 && size != 1024)
- return false;
-
if (size == 1024)
asyw->xlut.i.mode = NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE;
else
@@ -116,7 +113,6 @@ base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
asyw->xlut.i.enable = NV907C_SET_BASE_LUT_LO_ENABLE_ENABLE;
asyw->xlut.i.load = head907d_olut_load;
- return true;
}
static inline u32
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
index 75876546eac1..53b1e2a569c1 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
@@ -69,7 +69,7 @@ corec57d = {
.head = &headc57d,
.sor = &sorc37d,
#if IS_ENABLED(CONFIG_DEBUG_FS)
- .crc = &crcc37d,
+ .crc = &crcc57d,
#endif
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.c b/drivers/gpu/drm/nouveau/dispnv50/crc.c
index 66f32d965c72..29428e770f14 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc.c
@@ -84,7 +84,10 @@ static void nv50_crc_ctx_flip_work(struct kthread_work *base)
struct nv50_crc *crc = container_of(work, struct nv50_crc, flip_work);
struct nv50_head *head = container_of(crc, struct nv50_head, crc);
struct drm_crtc *crtc = &head->base.base;
- struct nv50_disp *disp = nv50_disp(crtc->dev);
+ struct drm_device *dev = crtc->dev;
+ struct nv50_disp *disp = nv50_disp(dev);
+ const uint64_t start_vbl = drm_crtc_vblank_count(crtc);
+ uint64_t end_vbl;
u8 new_idx = crc->ctx_idx ^ 1;
/*
@@ -92,23 +95,24 @@ static void nv50_crc_ctx_flip_work(struct kthread_work *base)
* try again for the next vblank if we don't grab the lock
*/
if (!mutex_trylock(&disp->mutex)) {
- DRM_DEV_DEBUG_KMS(crtc->dev->dev,
- "Lock contended, delaying CRC ctx flip for head-%d\n",
- head->base.index);
- drm_vblank_work_schedule(work,
- drm_crtc_vblank_count(crtc) + 1,
- true);
+ drm_dbg_kms(dev, "Lock contended, delaying CRC ctx flip for %s\n", crtc->name);
+ drm_vblank_work_schedule(work, start_vbl + 1, true);
return;
}
- DRM_DEV_DEBUG_KMS(crtc->dev->dev,
- "Flipping notifier ctx for head %d (%d -> %d)\n",
- drm_crtc_index(crtc), crc->ctx_idx, new_idx);
+ drm_dbg_kms(dev, "Flipping notifier ctx for %s (%d -> %d)\n",
+ crtc->name, crc->ctx_idx, new_idx);
nv50_crc_program_ctx(head, NULL);
nv50_crc_program_ctx(head, &crc->ctx[new_idx]);
mutex_unlock(&disp->mutex);
+ end_vbl = drm_crtc_vblank_count(crtc);
+ if (unlikely(end_vbl != start_vbl))
+ NV_ERROR(nouveau_drm(dev),
+ "Failed to flip CRC context on %s on time (%llu > %llu)\n",
+ crtc->name, end_vbl, start_vbl);
+
spin_lock_irq(&crc->lock);
crc->ctx_changed = true;
spin_unlock_irq(&crc->lock);
@@ -189,9 +193,9 @@ void nv50_crc_handle_vblank(struct nv50_head *head)
* updates back-to-back without waiting, we'll just be
* optimistic and assume we always miss exactly one frame.
*/
- DRM_DEV_DEBUG_KMS(head->base.base.dev->dev,
- "Notifier ctx flip for head-%d finished, lost CRC for frame %llu\n",
- head->base.index, crc->frame);
+ drm_dbg_kms(head->base.base.dev,
+ "Notifier ctx flip for head-%d finished, lost CRC for frame %llu\n",
+ head->base.index, crc->frame);
crc->frame++;
nv50_crc_reset_ctx(ctx);
@@ -347,8 +351,6 @@ int nv50_crc_atomic_check_head(struct nv50_head *head,
struct nv50_head_atom *armh)
{
struct nv50_atom *atom = nv50_atom(asyh->state.state);
- struct drm_device *dev = head->base.base.dev;
- struct nv50_disp *disp = nv50_disp(dev);
bool changed = armh->crc.src != asyh->crc.src;
if (!armh->crc.src && !asyh->crc.src) {
@@ -357,30 +359,7 @@ int nv50_crc_atomic_check_head(struct nv50_head *head,
return 0;
}
- /* While we don't care about entry tags, Volta+ hw always needs the
- * controlling wndw channel programmed to a wndw that's owned by our
- * head
- */
- if (asyh->crc.src && disp->disp->object.oclass >= GV100_DISP &&
- !(BIT(asyh->crc.wndw) & asyh->wndw.owned)) {
- if (!asyh->wndw.owned) {
- /* TODO: once we support flexible channel ownership,
- * we should write some code here to handle attempting
- * to "steal" a plane: e.g. take a plane that is
- * currently not-visible and owned by another head,
- * and reassign it to this head. If we fail to do so,
- * we shuld reject the mode outright as CRC capture
- * then becomes impossible.
- */
- NV_ATOMIC(nouveau_drm(dev),
- "No available wndws for CRC readback\n");
- return -EINVAL;
- }
- asyh->crc.wndw = ffs(asyh->wndw.owned) - 1;
- }
-
- if (drm_atomic_crtc_needs_modeset(&asyh->state) || changed ||
- armh->crc.wndw != asyh->crc.wndw) {
+ if (drm_atomic_crtc_needs_modeset(&asyh->state) || changed) {
asyh->clr.crc = armh->crc.src && armh->state.active;
asyh->set.crc = asyh->crc.src && asyh->state.active;
if (changed)
@@ -467,9 +446,8 @@ void nv50_crc_atomic_set(struct nv50_head *head,
struct nouveau_encoder *outp =
nv50_real_outp(nv50_head_atom_get_encoder(asyh));
- func->set_src(head, outp->or,
- nv50_crc_source_type(outp, asyh->crc.src),
- &crc->ctx[crc->ctx_idx], asyh->crc.wndw);
+ func->set_src(head, outp->or, nv50_crc_source_type(outp, asyh->crc.src),
+ &crc->ctx[crc->ctx_idx]);
}
void nv50_crc_atomic_clr(struct nv50_head *head)
@@ -477,7 +455,7 @@ void nv50_crc_atomic_clr(struct nv50_head *head)
const struct nv50_crc_func *func =
nv50_disp(head->base.base.dev)->core->func->crc;
- func->set_src(head, 0, NV50_CRC_SOURCE_TYPE_NONE, NULL, 0);
+ func->set_src(head, 0, NV50_CRC_SOURCE_TYPE_NONE, NULL);
}
static inline int
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.h b/drivers/gpu/drm/nouveau/dispnv50/crc.h
index 4fce871b04c8..4823f1fde2dd 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc.h
@@ -45,13 +45,11 @@ struct nv50_crc_notifier_ctx {
struct nv50_crc_atom {
enum nv50_crc_source src;
- /* Only used for gv100+ */
- u8 wndw : 4;
};
struct nv50_crc_func {
- int (*set_src)(struct nv50_head *, int or, enum nv50_crc_source_type,
- struct nv50_crc_notifier_ctx *, u32 wndw);
+ int (*set_src)(struct nv50_head *, int or, enum nv50_crc_source_type type,
+ struct nv50_crc_notifier_ctx *ctx);
int (*set_ctx)(struct nv50_head *, struct nv50_crc_notifier_ctx *);
u32 (*get_entry)(struct nv50_head *, struct nv50_crc_notifier_ctx *,
enum nv50_crc_source, int idx);
@@ -95,6 +93,7 @@ void nv50_crc_atomic_clr(struct nv50_head *);
extern const struct nv50_crc_func crc907d;
extern const struct nv50_crc_func crcc37d;
+extern const struct nv50_crc_func crcc57d;
#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
struct nv50_crc {};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc907d.c b/drivers/gpu/drm/nouveau/dispnv50/crc907d.c
index 0fb0fdb9f119..f9ad641555b7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc907d.c
@@ -23,9 +23,8 @@ struct crc907d_notifier {
} __packed;
static int
-crc907d_set_src(struct nv50_head *head, int or,
- enum nv50_crc_source_type source,
- struct nv50_crc_notifier_ctx *ctx, u32 wndw)
+crc907d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source,
+ struct nv50_crc_notifier_ctx *ctx)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
@@ -33,7 +32,8 @@ crc907d_set_src(struct nv50_head *head, int or,
NVDEF(NV907D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
NVDEF(NV907D, HEAD_SET_CRC_CONTROL, TIMESTAMP_MODE, FALSE) |
NVDEF(NV907D, HEAD_SET_CRC_CONTROL, SECONDARY_OUTPUT, NONE) |
- NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE);
+ NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE) |
+ NVDEF(NV907D, HEAD_SET_CRC_CONTROL, WIDE_PIPE_CRC, ENABLE);
int ret;
switch (source) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
index 814e5bd97446..f10f6c484408 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
@@ -2,6 +2,7 @@
#include <drm/drm_crtc.h>
#include "crc.h"
+#include "crcc37d.h"
#include "core.h"
#include "disp.h"
#include "head.h"
@@ -10,38 +11,13 @@
#include <nvhw/class/clc37d.h>
-#define CRCC37D_MAX_ENTRIES 2047
-
-struct crcc37d_notifier {
- u32 status;
-
- /* reserved */
- u32 :32;
- u32 :32;
- u32 :32;
- u32 :32;
- u32 :32;
- u32 :32;
- u32 :32;
-
- struct crcc37d_entry {
- u32 status[2];
- u32 :32; /* reserved */
- u32 compositor_crc;
- u32 rg_crc;
- u32 output_crc[2];
- u32 :32; /* reserved */
- } entries[CRCC37D_MAX_ENTRIES];
-} __packed;
-
static int
-crcc37d_set_src(struct nv50_head *head, int or,
- enum nv50_crc_source_type source,
- struct nv50_crc_notifier_ctx *ctx, u32 wndw)
+crcc37d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source,
+ struct nv50_crc_notifier_ctx *ctx)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
- u32 crc_args = NVVAL(NVC37D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, wndw) |
+ u32 crc_args = NVVAL(NVC37D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, i * 4) |
NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, SECONDARY_CRC, NONE) |
NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE);
@@ -75,8 +51,7 @@ crcc37d_set_src(struct nv50_head *head, int or,
return 0;
}
-static int
-crcc37d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
+int crcc37d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
@@ -89,9 +64,8 @@ crcc37d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
return 0;
}
-static u32 crcc37d_get_entry(struct nv50_head *head,
- struct nv50_crc_notifier_ctx *ctx,
- enum nv50_crc_source source, int idx)
+u32 crcc37d_get_entry(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx,
+ enum nv50_crc_source source, int idx)
{
struct crcc37d_notifier __iomem *notifier = ctx->mem.object.map.ptr;
struct crcc37d_entry __iomem *entry = &notifier->entries[idx];
@@ -105,8 +79,7 @@ static u32 crcc37d_get_entry(struct nv50_head *head,
return ioread32_native(crc_addr);
}
-static bool crcc37d_ctx_finished(struct nv50_head *head,
- struct nv50_crc_notifier_ctx *ctx)
+bool crcc37d_ctx_finished(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
{
struct nouveau_drm *drm = nouveau_drm(head->base.base.dev);
struct crcc37d_notifier __iomem *notifier = ctx->mem.object.map.ptr;
@@ -148,7 +121,7 @@ const struct nv50_crc_func crcc37d = {
.set_ctx = crcc37d_set_ctx,
.get_entry = crcc37d_get_entry,
.ctx_finished = crcc37d_ctx_finished,
- .flip_threshold = CRCC37D_MAX_ENTRIES - 30,
+ .flip_threshold = CRCC37D_FLIP_THRESHOLD,
.num_entries = CRCC37D_MAX_ENTRIES,
.notifier_len = sizeof(struct crcc37d_notifier),
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.h b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.h
new file mode 100644
index 000000000000..5775137b832d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef __CRCC37D_H__
+#define __CRCC37D_H__
+
+#include <linux/types.h>
+
+#include "crc.h"
+
+#define CRCC37D_MAX_ENTRIES 2047
+#define CRCC37D_FLIP_THRESHOLD (CRCC37D_MAX_ENTRIES - 30)
+
+struct crcc37d_notifier {
+ u32 status;
+
+ /* reserved */
+ u32:32;
+ u32:32;
+ u32:32;
+ u32:32;
+ u32:32;
+ u32:32;
+ u32:32;
+
+ struct crcc37d_entry {
+ u32 status[2];
+ u32:32; /* reserved */
+ u32 compositor_crc;
+ u32 rg_crc;
+ u32 output_crc[2];
+ u32:32; /* reserved */
+ } entries[CRCC37D_MAX_ENTRIES];
+} __packed;
+
+int crcc37d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx);
+u32 crcc37d_get_entry(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx,
+ enum nv50_crc_source source, int idx);
+bool crcc37d_ctx_finished(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx);
+
+#endif /* !__CRCC37D_H__ */
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c b/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c
new file mode 100644
index 000000000000..cc0130e3d496
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: MIT
+
+#include "crc.h"
+#include "crcc37d.h"
+#include "core.h"
+#include "disp.h"
+#include "head.h"
+
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc57d.h>
+
+static int crcc57d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source,
+ struct nv50_crc_notifier_ctx *ctx)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ u32 crc_args = NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
+ NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
+ NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, SECONDARY_CRC, NONE) |
+ NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE);
+ int ret;
+
+ switch (source) {
+ case NV50_CRC_SOURCE_TYPE_SOR:
+ crc_args |= NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, SOR(or));
+ break;
+ case NV50_CRC_SOURCE_TYPE_SF:
+ crc_args |= NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, SF);
+ break;
+ default:
+ break;
+ }
+
+ ret = PUSH_WAIT(push, 4);
+ if (ret)
+ return ret;
+
+ if (source) {
+ PUSH_MTHD(push, NVC57D, HEAD_SET_CONTEXT_DMA_CRC(i), ctx->ntfy.handle);
+ PUSH_MTHD(push, NVC57D, HEAD_SET_CRC_CONTROL(i), crc_args);
+ } else {
+ PUSH_MTHD(push, NVC57D, HEAD_SET_CRC_CONTROL(i), 0);
+ PUSH_MTHD(push, NVC57D, HEAD_SET_CONTEXT_DMA_CRC(i), 0);
+ }
+
+ return 0;
+}
+
+const struct nv50_crc_func crcc57d = {
+ .set_src = crcc57d_set_src,
+ .set_ctx = crcc37d_set_ctx,
+ .get_entry = crcc37d_get_entry,
+ .ctx_finished = crcc37d_ctx_finished,
+ .flip_threshold = CRCC37D_FLIP_THRESHOLD,
+ .num_entries = CRCC37D_MAX_ENTRIES,
+ .notifier_len = sizeof(struct crcc37d_notifier),
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index 54fbd6fe751d..00e19fd959ea 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -98,6 +98,7 @@ static int
curs507a_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
+ struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
struct nv50_head *head = nv50_head(asyw->state.crtc);
int ret;
@@ -109,8 +110,20 @@ curs507a_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
if (ret || !asyh->curs.visible)
return ret;
- if (asyw->image.w != asyw->image.h)
+ if (asyw->state.crtc_w != asyw->state.crtc_h) {
+ NV_ATOMIC(drm, "Plane width/height must be equal for cursors\n");
return -EINVAL;
+ }
+
+ if (asyw->image.w != asyw->state.crtc_w) {
+ NV_ATOMIC(drm, "Plane width must be equal to fb width for cursors (height can be larger though)\n");
+ return -EINVAL;
+ }
+
+ if (asyw->state.src_x || asyw->state.src_y) {
+ NV_ATOMIC(drm, "Cursor planes do not support framebuffer offsets\n");
+ return -EINVAL;
+ }
ret = head->func->curs_layout(head, asyw, asyh);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 8e28403ea9b1..ae1f41205520 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -852,6 +852,9 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi,
&nv_connector->base, mode);
if (!ret) {
+ drm_hdmi_avi_infoframe_quant_range(&avi_frame.avi,
+ &nv_connector->base, mode,
+ HDMI_QUANTIZATION_RANGE_FULL);
/* We have an AVI InfoFrame, populate it to the display */
args.pwr.avi_infoframe_length
= hdmi_infoframe_pack(&avi_frame, args.infoframes, 17);
@@ -1387,12 +1390,11 @@ nv50_mstm_cleanup(struct nv50_mstm *mstm)
{
struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
struct drm_encoder *encoder;
- int ret;
NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
- ret = drm_dp_check_act_status(&mstm->mgr);
+ drm_dp_check_act_status(&mstm->mgr);
- ret = drm_dp_update_payload_part2(&mstm->mgr);
+ drm_dp_update_payload_part2(&mstm->mgr);
drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
@@ -1411,10 +1413,9 @@ nv50_mstm_prepare(struct nv50_mstm *mstm)
{
struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
struct drm_encoder *encoder;
- int ret;
NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
- ret = drm_dp_update_payload_part1(&mstm->mgr, 1);
+ drm_dp_update_payload_part1(&mstm->mgr, 1);
drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 72099d1e4816..c3c57be54e1c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -226,10 +226,24 @@ static int
nv50_head_atomic_check_lut(struct nv50_head *head,
struct nv50_head_atom *asyh)
{
- struct nv50_disp *disp = nv50_disp(head->base.base.dev);
- struct drm_property_blob *olut = asyh->state.gamma_lut;
+ struct drm_device *dev = head->base.base.dev;
+ struct drm_crtc *crtc = &head->base.base;
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct drm_property_blob *olut = asyh->state.gamma_lut,
+ *ilut = asyh->state.degamma_lut;
int size;
+ /* Ensure that the ilut is valid */
+ if (ilut) {
+ size = drm_color_lut_size(ilut);
+ if (!head->func->ilut_check(size)) {
+ NV_ATOMIC(drm, "Invalid size %d for degamma on [CRTC:%d:%s]\n",
+ size, crtc->base.id, crtc->name);
+ return -EINVAL;
+ }
+ }
+
/* Determine whether core output LUT should be enabled. */
if (olut) {
/* Check if any window(s) have stolen the core output LUT
@@ -256,7 +270,8 @@ nv50_head_atomic_check_lut(struct nv50_head *head,
}
if (!head->func->olut(head, asyh, size)) {
- DRM_DEBUG_KMS("Invalid olut\n");
+ NV_ATOMIC(drm, "Invalid size %d for gamma on [CRTC:%d:%s]\n",
+ size, crtc->base.id, crtc->name);
return -EINVAL;
}
asyh->olut.handle = disp->core->chan.vram.handle;
@@ -330,8 +345,17 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
struct drm_connector_state *conns;
struct drm_connector *conn;
int i, ret;
+ bool check_lut = asyh->state.color_mgmt_changed ||
+ memcmp(&armh->wndw, &asyh->wndw, sizeof(asyh->wndw));
NV_ATOMIC(drm, "%s atomic_check %d\n", crtc->name, asyh->state.active);
+
+ if (check_lut) {
+ ret = nv50_head_atomic_check_lut(head, asyh);
+ if (ret)
+ return ret;
+ }
+
if (asyh->state.active) {
for_each_new_connector_in_state(asyh->state.state, conn, conns, i) {
if (conns->crtc == crtc) {
@@ -357,14 +381,8 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
if (asyh->state.mode_changed || asyh->state.connectors_changed)
nv50_head_atomic_check_mode(head, asyh);
- if (asyh->state.color_mgmt_changed ||
- memcmp(&armh->wndw, &asyh->wndw, sizeof(asyh->wndw))) {
- int ret = nv50_head_atomic_check_lut(head, asyh);
- if (ret)
- return ret;
-
+ if (check_lut)
asyh->olut.visible = asyh->olut.handle != 0;
- }
if (asyc) {
if (asyc->set.scaler)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
index 0bac6be9ba34..41c8788dfb31 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -29,6 +29,7 @@ struct nv50_head_func {
int (*view)(struct nv50_head *, struct nv50_head_atom *);
int (*mode)(struct nv50_head *, struct nv50_head_atom *);
bool (*olut)(struct nv50_head *, struct nv50_head_atom *, int);
+ bool (*ilut_check)(int size);
bool olut_identity;
int olut_size;
int (*olut_set)(struct nv50_head *, struct nv50_head_atom *);
@@ -71,6 +72,7 @@ extern const struct nv50_head_func head907d;
int head907d_view(struct nv50_head *, struct nv50_head_atom *);
int head907d_mode(struct nv50_head *, struct nv50_head_atom *);
bool head907d_olut(struct nv50_head *, struct nv50_head_atom *, int);
+bool head907d_ilut_check(int size);
int head907d_olut_set(struct nv50_head *, struct nv50_head_atom *);
int head907d_olut_clr(struct nv50_head *);
int head907d_core_set(struct nv50_head *, struct nv50_head_atom *);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
index 85648d790743..18fe4c1e2d6a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -314,6 +314,11 @@ head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
return true;
}
+bool head907d_ilut_check(int size)
+{
+ return size == 256 || size == 1024;
+}
+
int
head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
@@ -409,6 +414,7 @@ head907d = {
.view = head907d_view,
.mode = head907d_mode,
.olut = head907d_olut,
+ .ilut_check = head907d_ilut_check,
.olut_size = 1024,
.olut_set = head907d_olut_set,
.olut_clr = head907d_olut_clr,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
index ea9f8667305e..4ce47b55f72c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
@@ -119,6 +119,7 @@ head917d = {
.view = head907d_view,
.mode = head907d_mode,
.olut = head907d_olut,
+ .ilut_check = head907d_ilut_check,
.olut_size = 1024,
.olut_set = head907d_olut_set,
.olut_clr = head907d_olut_clr,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
index 63adfeba50e5..a4a3b78ea42c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
@@ -285,6 +285,7 @@ headc37d = {
.view = headc37d_view,
.mode = headc37d_mode,
.olut = headc37d_olut,
+ .ilut_check = head907d_ilut_check,
.olut_size = 1024,
.olut_set = headc37d_olut_set,
.olut_clr = headc37d_olut_clr,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
index fd51527b56b8..543f08ceaad6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
@@ -169,7 +169,7 @@ headc57d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
-bool
+static bool
headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
{
if (size != 0 && size != 256 && size != 1024)
@@ -236,6 +236,7 @@ headc57d = {
.view = headc37d_view,
.mode = headc57d_mode,
.olut = headc57d_olut,
+ .ilut_check = head907d_ilut_check,
.olut_identity = true,
.olut_size = 1024,
.olut_set = headc57d_olut_set,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 8d048bacd6f0..133c8736426a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -403,10 +403,7 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
/* Recalculate LUT state. */
memset(&asyw->xlut, 0x00, sizeof(asyw->xlut));
if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) {
- if (!wndw->func->ilut(wndw, asyw, drm_color_lut_size(ilut))) {
- DRM_DEBUG_KMS("Invalid ilut\n");
- return -EINVAL;
- }
+ wndw->func->ilut(wndw, asyw, drm_color_lut_size(ilut));
asyw->xlut.handle = wndw->wndw.vram.handle;
asyw->xlut.i.buffer = !asyw->xlut.i.buffer;
asyw->set.xlut = true;
@@ -539,6 +536,8 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
struct nouveau_bo *nvbo;
struct nv50_head_atom *asyh;
struct nv50_wndw_ctxdma *ctxdma;
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
int ret;
NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, fb);
@@ -561,7 +560,13 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
asyw->image.handle[0] = ctxdma->object.handle;
}
- asyw->state.fence = dma_resv_get_excl_unlocked(nvbo->bo.base.resv);
+ dma_resv_iter_begin(&cursor, nvbo->bo.base.resv, false);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ /* TODO: We only use the first writer here */
+ asyw->state.fence = dma_fence_get(fence);
+ break;
+ }
+ dma_resv_iter_end(&cursor);
asyw->image.offset[0] = nvbo->offset;
if (wndw->func->prepare) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index f4e0c5080034..9c9f2c2a71a5 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -64,7 +64,7 @@ struct nv50_wndw_func {
int (*ntfy_clr)(struct nv50_wndw *);
int (*ntfy_wait_begun)(struct nouveau_bo *, u32 offset,
struct nvif_device *);
- bool (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *, int);
+ void (*ilut)(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyh, int size);
void (*csc)(struct nv50_wndw *, struct nv50_wndw_atom *,
const struct drm_color_ctm *);
int (*csc_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
@@ -129,7 +129,7 @@ int wndwc37e_update(struct nv50_wndw *, u32 *);
int wndwc57e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
struct nv50_wndw **);
-bool wndwc57e_ilut(struct nv50_wndw *, struct nv50_wndw_atom *, int);
+void wndwc57e_ilut(struct nv50_wndw *, struct nv50_wndw_atom *, int);
int wndwc57e_ilut_set(struct nv50_wndw *, struct nv50_wndw_atom *);
int wndwc57e_ilut_clr(struct nv50_wndw *);
int wndwc57e_csc_set(struct nv50_wndw *, struct nv50_wndw_atom *);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
index 57df997c5ff3..183d2c0e65b6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
@@ -82,18 +82,14 @@ wndwc37e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
return 0;
}
-static bool
+static void
wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
{
- if (size != 256 && size != 1024)
- return false;
-
asyw->xlut.i.size = size == 1024 ? NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_1025 :
NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_257;
asyw->xlut.i.range = NVC37E_SET_CONTROL_INPUT_LUT_RANGE_UNITY;
asyw->xlut.i.output_mode = NVC37E_SET_CONTROL_INPUT_LUT_OUTPUT_MODE_INTERPOLATE;
asyw->xlut.i.load = head907d_olut_load;
- return true;
}
int
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
index abdd3bb658b3..37f6da8b3f2a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -179,11 +179,11 @@ wndwc57e_ilut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
-bool
+void
wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
{
- if (size = size ? size : 1024, size != 256 && size != 1024)
- return false;
+ if (!size)
+ size = 1024;
if (size == 256)
asyw->xlut.i.mode = NVC57E_SET_ILUT_CONTROL_MODE_DIRECT8;
@@ -193,7 +193,6 @@ wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
asyw->xlut.i.size = 4 /* VSS header. */ + size + 1 /* Entries. */;
asyw->xlut.i.output_mode = NVC57E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE;
asyw->xlut.i.load = wndwc57e_ilut_load;
- return true;
}
/****************************************************************
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h
index 79aff6ff3138..f972ef1409f4 100644
--- a/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h
@@ -246,6 +246,9 @@
#define NV907D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 5:5
#define NV907D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000)
#define NV907D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001)
+#define NV907D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC 6:6
+#define NV907D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_DISABLE (0x00000000)
+#define NV907D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_ENABLE (0x00000001)
#define NV907D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00000438 + (a)*0x00000300)
#define NV907D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0
#define NV907D_HEAD_SET_OUTPUT_LUT_LO(a) (0x00000448 + (a)*0x00000300)
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc57d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc57d.h
index d83ac815e06c..d4bad2da3e56 100644
--- a/drivers/gpu/drm/nouveau/include/nvhw/class/clc57d.h
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clc57d.h
@@ -265,6 +265,75 @@
#define NVC57D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000400)
#define NVC57D_HEAD_SET_RASTER_BLANK_START_X 14:0
#define NVC57D_HEAD_SET_RASTER_BLANK_START_Y 30:16
+#define NVC57D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00002180 + (a)*0x00000400)
+#define NVC57D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0
+#define NVC57D_HEAD_SET_CRC_CONTROL(a) (0x00002184 + (a)*0x00000400)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 5:0
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_0 (0x00000000)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_1 (0x00000001)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_2 (0x00000002)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_3 (0x00000003)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_4 (0x00000004)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_5 (0x00000005)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_6 (0x00000006)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_7 (0x00000007)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_8 (0x00000008)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_9 (0x00000009)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_10 (0x0000000A)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_11 (0x0000000B)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_12 (0x0000000C)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_13 (0x0000000D)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_14 (0x0000000E)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_15 (0x0000000F)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_16 (0x00000010)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_17 (0x00000011)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_18 (0x00000012)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_19 (0x00000013)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_20 (0x00000014)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_21 (0x00000015)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_22 (0x00000016)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_23 (0x00000017)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_24 (0x00000018)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_25 (0x00000019)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_26 (0x0000001A)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_27 (0x0000001B)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_28 (0x0000001C)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_29 (0x0000001D)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_30 (0x0000001E)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_31 (0x0000001F)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000020)
+#define NVC57D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 8:8
+#define NVC57D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000)
+#define NVC57D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001)
+#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC 19:12
+#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE (0x00000000)
+#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF (0x00000030)
+#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i) (0x00000050 +(i))
+#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1 8
+#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0 (0x00000050)
+#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1 (0x00000051)
+#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2 (0x00000052)
+#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3 (0x00000053)
+#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4 (0x00000054)
+#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5 (0x00000055)
+#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6 (0x00000056)
+#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7 (0x00000057)
+#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC 27:20
+#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE (0x00000000)
+#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF (0x00000030)
+#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i) (0x00000050 +(i))
+#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1 8
+#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0 (0x00000050)
+#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1 (0x00000051)
+#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2 (0x00000052)
+#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3 (0x00000053)
+#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4 (0x00000054)
+#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5 (0x00000055)
+#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6 (0x00000056)
+#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7 (0x00000057)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 9:9
+#define NVC57D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000)
+#define NVC57D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001)
#define NVC57D_HEAD_SET_OLUT_CONTROL(a) (0x00002280 + (a)*0x00000400)
#define NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE 0:0
#define NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_DISABLE (0x00000000)
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 1cbd71abc80a..ae2f2abc8f5a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -308,7 +308,10 @@ nv50_backlight_init(struct nouveau_backlight *bl,
if (ret < 0)
return ret;
- if (drm_edp_backlight_supported(edp_dpcd)) {
+ /* TODO: Add support for hybrid PWM/DPCD panels */
+ if (drm_edp_backlight_supported(edp_dpcd) &&
+ (edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
+ (edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP)) {
NV_DEBUG(drm, "DPCD backlight controls supported on %s\n",
nv_conn->base.name);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index e8c445eb1100..41b78e9ecd4e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -2045,7 +2045,6 @@ nouveau_run_vbios_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvbios *bios = &drm->vbios;
- int ret = 0;
/* Reset the BIOS head to 0. */
bios->state.crtchead = 0;
@@ -2058,7 +2057,7 @@ nouveau_run_vbios_init(struct drm_device *dev)
bios->fp.lvds_init_run = false;
}
- return ret;
+ return 0;
}
static bool
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 929de41c281f..2b460835a438 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -306,7 +306,7 @@ nouveau_framebuffer_new(struct drm_device *dev,
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct drm_framebuffer *fb;
const struct drm_format_info *info;
- unsigned int width, height, i;
+ unsigned int height, i;
uint32_t tile_mode;
uint8_t kind;
int ret;
@@ -343,9 +343,6 @@ nouveau_framebuffer_new(struct drm_device *dev,
info = drm_get_format_info(dev, mode_cmd);
for (i = 0; i < info->num_planes; i++) {
- width = drm_format_info_plane_width(info,
- mode_cmd->width,
- i);
height = drm_format_info_plane_height(info,
mode_cmd->height,
i);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index e7efd9ede8e4..561309d447e0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -22,7 +22,6 @@
* Authors: Ben Skeggs
*/
-#include <linux/console.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -32,6 +31,7 @@
#include <drm/drm_aperture.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_vblank.h>
@@ -1358,7 +1358,7 @@ nouveau_drm_init(void)
nouveau_display_options();
if (nouveau_modeset == -1) {
- if (vgacon_text_force())
+ if (drm_firmware_drivers_only())
nouveau_modeset = 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 0ae416aa76dc..a3a04e0d76ec 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -339,70 +339,54 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
}
int
-nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
+nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
+ bool exclusive, bool intr)
{
struct nouveau_fence_chan *fctx = chan->fence;
- struct dma_fence *fence;
struct dma_resv *resv = nvbo->bo.base.resv;
- struct dma_resv_list *fobj;
- struct nouveau_fence *f;
- int ret = 0, i;
+ int i, ret;
if (!exclusive) {
ret = dma_resv_reserve_shared(resv, 1);
-
if (ret)
return ret;
-
- fobj = NULL;
- } else {
- fobj = dma_resv_shared_list(resv);
}
/* Waiting for the exclusive fence first causes performance regressions
* under some circumstances. So manually wait for the shared ones first.
*/
- for (i = 0; i < (fobj ? fobj->shared_count : 0) && !ret; ++i) {
- struct nouveau_channel *prev = NULL;
- bool must_wait = true;
-
- fence = rcu_dereference_protected(fobj->shared[i],
- dma_resv_held(resv));
-
- f = nouveau_local_fence(fence, chan->drm);
- if (f) {
- rcu_read_lock();
- prev = rcu_dereference(f->channel);
- if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
- must_wait = false;
- rcu_read_unlock();
- }
+ for (i = 0; i < 2; ++i) {
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&cursor, resv, exclusive, fence) {
+ struct nouveau_fence *f;
+
+ if (i == 0 && dma_resv_iter_is_exclusive(&cursor))
+ continue;
+
+ f = nouveau_local_fence(fence, chan->drm);
+ if (f) {
+ struct nouveau_channel *prev;
+ bool must_wait = true;
+
+ rcu_read_lock();
+ prev = rcu_dereference(f->channel);
+ if (prev && (prev == chan ||
+ fctx->sync(f, prev, chan) == 0))
+ must_wait = false;
+ rcu_read_unlock();
+ if (!must_wait)
+ continue;
+ }
- if (must_wait)
ret = dma_fence_wait(fence, intr);
- }
-
- fence = dma_resv_excl_fence(resv);
- if (fence) {
- struct nouveau_channel *prev = NULL;
- bool must_wait = true;
-
- f = nouveau_local_fence(fence, chan->drm);
- if (f) {
- rcu_read_lock();
- prev = rcu_dereference(f->channel);
- if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
- must_wait = false;
- rcu_read_unlock();
+ if (ret)
+ return ret;
}
-
- if (must_wait)
- ret = dma_fence_wait(fence, intr);
-
- return ret;
}
- return ret;
+ return 0;
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c
index ac671202919e..0c8c55c73b12 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/client.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c
@@ -60,7 +60,7 @@ nvkm_uclient_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
return 0;
}
-const struct nvkm_sclass
+static const struct nvkm_sclass
nvkm_uclient_sclass = {
.oclass = NVIF_CLASS_CLIENT,
.minver = 0,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index f28894fdede9..113ddc103ac2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -161,8 +161,8 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
if (imem && args->v0.ram_size > 0)
args->v0.ram_user = args->v0.ram_user - imem->reserved;
- strncpy(args->v0.chip, device->chip->name, sizeof(args->v0.chip));
- strncpy(args->v0.name, device->name, sizeof(args->v0.name));
+ snprintf(args->v0.chip, sizeof(args->v0.chip), "%s", device->chip->name);
+ snprintf(args->v0.name, sizeof(args->v0.name), "%s", device->name);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c
index e20a48f201f6..448a515057c7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c
@@ -106,6 +106,8 @@ gv100_disp_core_mthd_head = {
{ 0x20a4, 0x6820a4 },
{ 0x20a8, 0x6820a8 },
{ 0x20ac, 0x6820ac },
+ { 0x2180, 0x682180 },
+ { 0x2184, 0x682184 },
{ 0x218c, 0x68218c },
{ 0x2194, 0x682194 },
{ 0x2198, 0x682198 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
index e417044cc347..260b197f81bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
@@ -49,7 +49,7 @@ tu102_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
/*XXX: how to wait? can you even wait? */
}
-const struct gk104_fifo_runlist_func
+static const struct gk104_fifo_runlist_func
tu102_fifo_runlist = {
.size = 16,
.cgrp = gv100_fifo_runlist_cgrp,
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
index 262641a014b0..c91130a6be2a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -117,8 +117,12 @@ nvkm_falcon_disable(struct nvkm_falcon *falcon)
int
nvkm_falcon_reset(struct nvkm_falcon *falcon)
{
- nvkm_falcon_disable(falcon);
- return nvkm_falcon_enable(falcon);
+ if (!falcon->func->reset) {
+ nvkm_falcon_disable(falcon);
+ return nvkm_falcon_enable(falcon);
+ }
+
+ return falcon->func->reset(falcon);
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index 9de74f41dcd2..142079403864 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -401,7 +401,7 @@ init_table_(struct nvbios_init *init, u16 offset, const char *name)
#define init_macro_table(b) init_table_((b), 0x04, "macro table")
#define init_condition_table(b) init_table_((b), 0x06, "condition table")
#define init_io_condition_table(b) init_table_((b), 0x08, "io condition table")
-#define init_io_flag_condition_table(b) init_table_((b), 0x0a, "io flag conditon table")
+#define init_io_flag_condition_table(b) init_table_((b), 0x0a, "io flag condition table")
#define init_function_table(b) init_table_((b), 0x0c, "function table")
#define init_xlat_table(b) init_table_((b), 0x10, "xlat table");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
index fb90d47e1225..a9cdf2411187 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
@@ -32,7 +32,6 @@ mcp89_devinit_disable(struct nvkm_devinit *init)
struct nvkm_device *device = init->subdev.device;
u32 r001540 = nvkm_rd32(device, 0x001540);
u32 r00154c = nvkm_rd32(device, 0x00154c);
- u64 disable = 0;
if (!(r001540 & 0x40000000)) {
nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0);
@@ -48,7 +47,7 @@ mcp89_devinit_disable(struct nvkm_devinit *init)
if (!(r00154c & 0x00000200))
nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
- return disable;
+ return 0;
}
static const struct nvkm_devinit_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index 24382875fb4f..455e95a89259 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -94,20 +94,13 @@ nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
return 0;
}
-static int
+static void
nvkm_pmu_reset(struct nvkm_pmu *pmu)
{
struct nvkm_device *device = pmu->subdev.device;
if (!pmu->func->enabled(pmu))
- return 0;
-
- /* Inhibit interrupts, and wait for idle. */
- nvkm_wr32(device, 0x10a014, 0x0000ffff);
- nvkm_msec(device, 2000,
- if (!nvkm_rd32(device, 0x10a04c))
- break;
- );
+ return;
/* Reset. */
if (pmu->func->reset)
@@ -118,25 +111,37 @@ nvkm_pmu_reset(struct nvkm_pmu *pmu)
if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
break;
);
-
- return 0;
}
static int
nvkm_pmu_preinit(struct nvkm_subdev *subdev)
{
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
- return nvkm_pmu_reset(pmu);
+ nvkm_pmu_reset(pmu);
+ return 0;
}
static int
nvkm_pmu_init(struct nvkm_subdev *subdev)
{
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
- int ret = nvkm_pmu_reset(pmu);
- if (ret == 0 && pmu->func->init)
- ret = pmu->func->init(pmu);
- return ret;
+ struct nvkm_device *device = pmu->subdev.device;
+
+ if (!pmu->func->init)
+ return 0;
+
+ if (pmu->func->enabled(pmu)) {
+ /* Inhibit interrupts, and wait for idle. */
+ nvkm_wr32(device, 0x10a014, 0x0000ffff);
+ nvkm_msec(device, 2000,
+ if (!nvkm_rd32(device, 0x10a04c))
+ break;
+ );
+
+ nvkm_pmu_reset(pmu);
+ }
+
+ return pmu->func->init(pmu);
}
static void *
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c
index 5968c7696596..40439e329aa9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c
@@ -23,9 +23,38 @@
*/
#include "priv.h"
+static int
+gm200_pmu_flcn_reset(struct nvkm_falcon *falcon)
+{
+ struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon);
+
+ nvkm_falcon_wr32(falcon, 0x014, 0x0000ffff);
+ pmu->func->reset(pmu);
+ return nvkm_falcon_enable(falcon);
+}
+
+const struct nvkm_falcon_func
+gm200_pmu_flcn = {
+ .debug = 0xc08,
+ .fbif = 0xe00,
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .bind_context = nvkm_falcon_v1_bind_context,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+ .start = nvkm_falcon_v1_start,
+ .enable = nvkm_falcon_v1_enable,
+ .disable = nvkm_falcon_v1_disable,
+ .reset = gm200_pmu_flcn_reset,
+ .cmdq = { 0x4a0, 0x4b0, 4 },
+ .msgq = { 0x4c8, 0x4cc, 0 },
+};
+
static const struct nvkm_pmu_func
gm200_pmu = {
- .flcn = &gt215_pmu_flcn,
+ .flcn = &gm200_pmu_flcn,
.enabled = gf100_pmu_enabled,
.reset = gf100_pmu_reset,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
index 148706977eec..e1772211b0a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
@@ -211,7 +211,7 @@ gm20b_pmu_recv(struct nvkm_pmu *pmu)
static const struct nvkm_pmu_func
gm20b_pmu = {
- .flcn = &gt215_pmu_flcn,
+ .flcn = &gm200_pmu_flcn,
.enabled = gf100_pmu_enabled,
.intr = gt215_pmu_intr,
.recv = gm20b_pmu_recv,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
index 00da1b873ce8..6bf7fc1bd1e3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
@@ -39,7 +39,7 @@ gp102_pmu_enabled(struct nvkm_pmu *pmu)
static const struct nvkm_pmu_func
gp102_pmu = {
- .flcn = &gt215_pmu_flcn,
+ .flcn = &gm200_pmu_flcn,
.enabled = gp102_pmu_enabled,
.reset = gp102_pmu_reset,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
index 461f722656e2..ba1583bb618b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
@@ -78,7 +78,7 @@ gp10b_pmu_acr = {
static const struct nvkm_pmu_func
gp10b_pmu = {
- .flcn = &gt215_pmu_flcn,
+ .flcn = &gm200_pmu_flcn,
.enabled = gf100_pmu_enabled,
.intr = gt215_pmu_intr,
.recv = gm20b_pmu_recv,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index e7860d177353..bcaade758ff7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -44,6 +44,8 @@ void gf100_pmu_reset(struct nvkm_pmu *);
void gk110_pmu_pgob(struct nvkm_pmu *, bool);
+extern const struct nvkm_falcon_func gm200_pmu_flcn;
+
void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64);
void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *);
int gm20b_pmu_acr_boot(struct nvkm_falcon *);
diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
index 21e8277ff88f..710b4e0abcf0 100644
--- a/drivers/gpu/drm/omapdrm/Makefile
+++ b/drivers/gpu/drm/omapdrm/Makefile
@@ -9,6 +9,7 @@ omapdrm-y := omap_drv.o \
omap_debugfs.o \
omap_crtc.o \
omap_plane.o \
+ omap_overlay.o \
omap_encoder.o \
omap_fb.o \
omap_gem.o \
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 5619420cc2cc..c4de142cc85b 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -92,6 +92,8 @@ struct dispc_features {
u8 mgr_height_start;
u16 mgr_width_max;
u16 mgr_height_max;
+ u16 ovl_width_max;
+ u16 ovl_height_max;
unsigned long max_lcd_pclk;
unsigned long max_tv_pclk;
unsigned int max_downscale;
@@ -1279,8 +1281,8 @@ static u32 dispc_ovl_get_burst_size(struct dispc_device *dispc,
return dispc->feat->burst_size_unit * 8;
}
-static bool dispc_ovl_color_mode_supported(struct dispc_device *dispc,
- enum omap_plane_id plane, u32 fourcc)
+bool dispc_ovl_color_mode_supported(struct dispc_device *dispc,
+ enum omap_plane_id plane, u32 fourcc)
{
const u32 *modes;
unsigned int i;
@@ -2487,6 +2489,11 @@ static int dispc_ovl_calc_scaling_44xx(struct dispc_device *dispc,
return 0;
}
+enum omap_overlay_caps dispc_ovl_get_caps(struct dispc_device *dispc, enum omap_plane_id plane)
+{
+ return dispc->feat->overlay_caps[plane];
+}
+
#define DIV_FRAC(dividend, divisor) \
((dividend) * 100 / (divisor) - ((dividend) / (divisor) * 100))
@@ -2599,6 +2606,12 @@ static int dispc_ovl_calc_scaling(struct dispc_device *dispc,
return 0;
}
+void dispc_ovl_get_max_size(struct dispc_device *dispc, u16 *width, u16 *height)
+{
+ *width = dispc->feat->ovl_width_max;
+ *height = dispc->feat->ovl_height_max;
+}
+
static int dispc_ovl_setup_common(struct dispc_device *dispc,
enum omap_plane_id plane,
enum omap_overlay_caps caps,
@@ -4240,6 +4253,8 @@ static const struct dispc_features omap24xx_dispc_feats = {
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
+ .ovl_width_max = 2048,
+ .ovl_height_max = 2048,
.max_lcd_pclk = 66500000,
.max_downscale = 2,
/*
@@ -4278,6 +4293,8 @@ static const struct dispc_features omap34xx_rev1_0_dispc_feats = {
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
+ .ovl_width_max = 2048,
+ .ovl_height_max = 2048,
.max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000,
.max_downscale = 4,
@@ -4313,6 +4330,8 @@ static const struct dispc_features omap34xx_rev3_0_dispc_feats = {
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
+ .ovl_width_max = 2048,
+ .ovl_height_max = 2048,
.max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000,
.max_downscale = 4,
@@ -4348,6 +4367,8 @@ static const struct dispc_features omap36xx_dispc_feats = {
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
+ .ovl_width_max = 2048,
+ .ovl_height_max = 2048,
.max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000,
.max_downscale = 4,
@@ -4383,6 +4404,8 @@ static const struct dispc_features am43xx_dispc_feats = {
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
+ .ovl_width_max = 2048,
+ .ovl_height_max = 2048,
.max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000,
.max_downscale = 4,
@@ -4418,6 +4441,8 @@ static const struct dispc_features omap44xx_dispc_feats = {
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
+ .ovl_width_max = 2048,
+ .ovl_height_max = 2048,
.max_lcd_pclk = 170000000,
.max_tv_pclk = 185625000,
.max_downscale = 4,
@@ -4457,8 +4482,10 @@ static const struct dispc_features omap54xx_dispc_feats = {
.mgr_height_start = 27,
.mgr_width_max = 4096,
.mgr_height_max = 4096,
+ .ovl_width_max = 2048,
+ .ovl_height_max = 4096,
.max_lcd_pclk = 170000000,
- .max_tv_pclk = 186000000,
+ .max_tv_pclk = 192000000,
.max_downscale = 4,
.max_line_width = 2048,
.min_pcd = 1,
@@ -4725,7 +4752,6 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
struct dispc_device *dispc;
u32 rev;
int r = 0;
- struct resource *dispc_mem;
struct device_node *np = pdev->dev.of_node;
dispc = kzalloc(sizeof(*dispc), GFP_KERNEL);
@@ -4750,8 +4776,7 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
if (r)
goto err_free;
- dispc_mem = platform_get_resource(dispc->pdev, IORESOURCE_MEM, 0);
- dispc->base = devm_ioremap_resource(&pdev->dev, dispc_mem);
+ dispc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dispc->base)) {
r = PTR_ERR(dispc->base);
goto err_free;
@@ -4844,7 +4869,7 @@ static int dispc_remove(struct platform_device *pdev)
return 0;
}
-static int dispc_runtime_suspend(struct device *dev)
+static __maybe_unused int dispc_runtime_suspend(struct device *dev)
{
struct dispc_device *dispc = dev_get_drvdata(dev);
@@ -4859,7 +4884,7 @@ static int dispc_runtime_suspend(struct device *dev)
return 0;
}
-static int dispc_runtime_resume(struct device *dev)
+static __maybe_unused int dispc_runtime_resume(struct device *dev)
{
struct dispc_device *dispc = dev_get_drvdata(dev);
@@ -4887,8 +4912,7 @@ static int dispc_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops dispc_pm_ops = {
- .runtime_suspend = dispc_runtime_suspend,
- .runtime_resume = dispc_runtime_resume,
+ SET_RUNTIME_PM_OPS(dispc_runtime_suspend, dispc_runtime_resume, NULL)
SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 503b5d4bf2c2..a6845856cbce 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -4884,7 +4884,6 @@ static int dsi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct dsi_data *dsi;
struct resource *dsi_mem;
- struct resource *res;
unsigned int i;
int r;
@@ -4921,13 +4920,11 @@ static int dsi_probe(struct platform_device *pdev)
if (IS_ERR(dsi->proto_base))
return PTR_ERR(dsi->proto_base);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
- dsi->phy_base = devm_ioremap_resource(dev, res);
+ dsi->phy_base = devm_platform_ioremap_resource_byname(pdev, "phy");
if (IS_ERR(dsi->phy_base))
return PTR_ERR(dsi->phy_base);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll");
- dsi->pll_base = devm_ioremap_resource(dev, res);
+ dsi->pll_base = devm_platform_ioremap_resource_byname(pdev, "pll");
if (IS_ERR(dsi->pll_base))
return PTR_ERR(dsi->pll_base);
@@ -5061,7 +5058,7 @@ static int dsi_remove(struct platform_device *pdev)
return 0;
}
-static int dsi_runtime_suspend(struct device *dev)
+static __maybe_unused int dsi_runtime_suspend(struct device *dev)
{
struct dsi_data *dsi = dev_get_drvdata(dev);
@@ -5074,7 +5071,7 @@ static int dsi_runtime_suspend(struct device *dev)
return 0;
}
-static int dsi_runtime_resume(struct device *dev)
+static __maybe_unused int dsi_runtime_resume(struct device *dev)
{
struct dsi_data *dsi = dev_get_drvdata(dev);
@@ -5086,8 +5083,7 @@ static int dsi_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops dsi_pm_ops = {
- .runtime_suspend = dsi_runtime_suspend,
- .runtime_resume = dsi_runtime_resume,
+ SET_RUNTIME_PM_OPS(dsi_runtime_suspend, dsi_runtime_resume, NULL)
SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index d6a5862b4dbf..69b3e15b9356 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1424,7 +1424,6 @@ static int dss_probe(struct platform_device *pdev)
const struct soc_device_attribute *soc;
struct dss_component_match_data cmatch;
struct component_match *match = NULL;
- struct resource *dss_mem;
struct dss_device *dss;
int r;
@@ -1452,8 +1451,7 @@ static int dss_probe(struct platform_device *pdev)
dss->feat = of_match_device(dss_of_match, &pdev->dev)->data;
/* Map I/O registers, get and setup clocks. */
- dss_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dss->base = devm_ioremap_resource(&pdev->dev, dss_mem);
+ dss->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dss->base)) {
r = PTR_ERR(dss->base);
goto err_free_dss;
@@ -1571,7 +1569,7 @@ static void dss_shutdown(struct platform_device *pdev)
DSSDBG("shutdown\n");
}
-static int dss_runtime_suspend(struct device *dev)
+static __maybe_unused int dss_runtime_suspend(struct device *dev)
{
struct dss_device *dss = dev_get_drvdata(dev);
@@ -1583,7 +1581,7 @@ static int dss_runtime_suspend(struct device *dev)
return 0;
}
-static int dss_runtime_resume(struct device *dev)
+static __maybe_unused int dss_runtime_resume(struct device *dev)
{
struct dss_device *dss = dev_get_drvdata(dev);
int r;
@@ -1606,8 +1604,7 @@ static int dss_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops dss_pm_ops = {
- .runtime_suspend = dss_runtime_suspend,
- .runtime_resume = dss_runtime_resume,
+ SET_RUNTIME_PM_OPS(dss_runtime_suspend, dss_runtime_resume, NULL)
SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index a547527bb2f3..4ff02fbc0e71 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -397,6 +397,11 @@ int dispc_get_num_mgrs(struct dispc_device *dispc);
const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc,
enum omap_plane_id plane);
+void dispc_ovl_get_max_size(struct dispc_device *dispc, u16 *width, u16 *height);
+bool dispc_ovl_color_mode_supported(struct dispc_device *dispc,
+ enum omap_plane_id plane, u32 fourcc);
+enum omap_overlay_caps dispc_ovl_get_caps(struct dispc_device *dispc, enum omap_plane_id plane);
+
u32 dispc_read_irqstatus(struct dispc_device *dispc);
void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask);
void dispc_write_irqenable(struct dispc_device *dispc, u32 mask);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
index 43592c1cf081..852987e67e40 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* HDMI CEC
*
@@ -10,19 +11,6 @@
* Heavily modified to use the linux CEC framework:
*
* Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/kernel.h>
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h
index 0292337c97cc..1ca5b5ca8a99 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* HDMI header definition for OMAP4 HDMI CEC IP
*
* Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _HDMI4_CEC_H_
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index 35faa7f028c4..8720bf4f18fe 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -870,7 +870,6 @@ static const struct soc_device_attribute hdmi4_soc_devices[] = {
int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
{
const struct hdmi4_features *features;
- struct resource *res;
const struct soc_device_attribute *soc;
soc = soc_device_match(hdmi4_soc_devices);
@@ -881,8 +880,7 @@ int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
core->cts_swmode = features->cts_swmode;
core->audio_use_mclk = features->audio_use_mclk;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
- core->base = devm_ioremap_resource(&pdev->dev, res);
+ core->base = devm_platform_ioremap_resource_byname(pdev, "core");
if (IS_ERR(core->base))
return PTR_ERR(core->base);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 6cc2ad7a420c..21564c38234f 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -872,10 +872,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
int hdmi5_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
{
- struct resource *res;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
- core->base = devm_ioremap_resource(&pdev->dev, res);
+ core->base = devm_platform_ioremap_resource_byname(pdev, "core");
if (IS_ERR(core->base))
return PTR_ERR(core->base);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
index 5dc200f09c3c..060e8f76f2be 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
@@ -182,15 +182,12 @@ static const struct hdmi_phy_features omap54xx_phy_feats = {
int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy,
unsigned int version)
{
- struct resource *res;
-
if (version == 4)
phy->features = &omap44xx_phy_feats;
else
phy->features = &omap54xx_phy_feats;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
- phy->base = devm_ioremap_resource(&pdev->dev, res);
+ phy->base = devm_platform_ioremap_resource_byname(pdev, "phy");
if (IS_ERR(phy->base))
return PTR_ERR(phy->base);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
index 13bf649aba52..eea719243eaf 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
@@ -162,13 +162,11 @@ int hdmi_pll_init(struct dss_device *dss, struct platform_device *pdev,
struct hdmi_pll_data *pll, struct hdmi_wp_data *wp)
{
int r;
- struct resource *res;
pll->pdev = pdev;
pll->wp = wp;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll");
- pll->base = devm_ioremap_resource(&pdev->dev, res);
+ pll->base = devm_platform_ioremap_resource_byname(pdev, "pll");
if (IS_ERR(pll->base))
return PTR_ERR(pll->base);
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index e522c17955d0..4480b69ab5a7 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -806,7 +806,6 @@ static const struct soc_device_attribute venc_soc_devices[] = {
static int venc_probe(struct platform_device *pdev)
{
struct venc_device *venc;
- struct resource *venc_mem;
int r;
venc = kzalloc(sizeof(*venc), GFP_KERNEL);
@@ -823,8 +822,7 @@ static int venc_probe(struct platform_device *pdev)
venc->config = &venc_config_pal_trm;
- venc_mem = platform_get_resource(venc->pdev, IORESOURCE_MEM, 0);
- venc->base = devm_ioremap_resource(&pdev->dev, venc_mem);
+ venc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(venc->base)) {
r = PTR_ERR(venc->base);
goto err_free;
@@ -881,7 +879,7 @@ static int venc_remove(struct platform_device *pdev)
return 0;
}
-static int venc_runtime_suspend(struct device *dev)
+static __maybe_unused int venc_runtime_suspend(struct device *dev)
{
struct venc_device *venc = dev_get_drvdata(dev);
@@ -891,7 +889,7 @@ static int venc_runtime_suspend(struct device *dev)
return 0;
}
-static int venc_runtime_resume(struct device *dev)
+static __maybe_unused int venc_runtime_resume(struct device *dev)
{
struct venc_device *venc = dev_get_drvdata(dev);
@@ -902,8 +900,7 @@ static int venc_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops venc_pm_ops = {
- .runtime_suspend = venc_runtime_suspend,
- .runtime_resume = venc_runtime_resume,
+ SET_RUNTIME_PM_OPS(venc_runtime_suspend, venc_runtime_resume, NULL)
SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c
index b72c3ffddc9a..b6b52049f753 100644
--- a/drivers/gpu/drm/omapdrm/dss/video-pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c
@@ -137,7 +137,6 @@ struct dss_pll *dss_video_pll_init(struct dss_device *dss,
const char * const clkctrl_name[] = { "pll1_clkctrl", "pll2_clkctrl" };
const char * const clkin_name[] = { "video1_clk", "video2_clk" };
- struct resource *res;
struct dss_video_pll *vpll;
void __iomem *pll_base, *clkctrl_base;
struct clk *clk;
@@ -146,16 +145,13 @@ struct dss_pll *dss_video_pll_init(struct dss_device *dss,
/* PLL CONTROL */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, reg_name[id]);
- pll_base = devm_ioremap_resource(&pdev->dev, res);
+ pll_base = devm_platform_ioremap_resource_byname(pdev, reg_name[id]);
if (IS_ERR(pll_base))
return ERR_CAST(pll_base);
/* CLOCK CONTROL */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- clkctrl_name[id]);
- clkctrl_base = devm_ioremap_resource(&pdev->dev, res);
+ clkctrl_base = devm_platform_ioremap_resource_byname(pdev, clkctrl_name[id]);
if (IS_ERR(clkctrl_base))
return ERR_CAST(clkctrl_base);
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
index 58a8239d3e69..2288ed56f23d 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
* Author: Rob Clark <rob@ti.com>
* Andy Gross <andy.gross@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef OMAP_DMM_PRIV_H
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index ed770caf55c2..852e78a5f142 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* DMM IOMMU driver support functions for TI OMAP processors.
*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
* Author: Rob Clark <rob@ti.com>
* Andy Gross <andy.gross@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/completion.h>
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
index 2f8918fe06d5..87a32b3cd3b0 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
* Author: Rob Clark <rob@ti.com>
* Andy Gross <andy.gross@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef OMAP_DMM_TILER_H
#define OMAP_DMM_TILER_H
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index c05d3975cb31..2720a58ccd90 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -117,6 +117,102 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
dispc_runtime_put(priv->dispc);
}
+static int drm_atomic_state_normalized_zpos_cmp(const void *a, const void *b)
+{
+ const struct drm_plane_state *sa = *(struct drm_plane_state **)a;
+ const struct drm_plane_state *sb = *(struct drm_plane_state **)b;
+
+ if (sa->normalized_zpos != sb->normalized_zpos)
+ return sa->normalized_zpos - sb->normalized_zpos;
+ else
+ return sa->plane->base.id - sb->plane->base.id;
+}
+
+/*
+ * This replaces the drm_atomic_normalize_zpos to handle the dual overlay case.
+ *
+ * Since both halves need to be 'appear' side by side the zpos is
+ * recalculated when dealing with dual overlay cases so that the other
+ * planes zpos is consistent.
+ */
+static int omap_atomic_update_normalize_zpos(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_state, *new_state;
+ struct drm_plane *plane;
+ int c, i, n, inc;
+ int total_planes = dev->mode_config.num_total_plane;
+ struct drm_plane_state **states;
+ int ret = 0;
+
+ states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL);
+ if (!states)
+ return -ENOMEM;
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_state, new_state, c) {
+ if (old_state->plane_mask == new_state->plane_mask &&
+ !new_state->zpos_changed)
+ continue;
+
+ /* Reset plane increment and index value for every crtc */
+ n = 0;
+
+ /*
+ * Normalization process might create new states for planes
+ * which normalized_zpos has to be recalculated.
+ */
+ drm_for_each_plane_mask(plane, dev, new_state->plane_mask) {
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_plane_state(new_state->state,
+ plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto done;
+ }
+ states[n++] = plane_state;
+ }
+
+ sort(states, n, sizeof(*states),
+ drm_atomic_state_normalized_zpos_cmp, NULL);
+
+ for (i = 0, inc = 0; i < n; i++) {
+ plane = states[i]->plane;
+
+ states[i]->normalized_zpos = i + inc;
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] updated normalized zpos value %d\n",
+ plane->base.id, plane->name,
+ states[i]->normalized_zpos);
+
+ if (is_omap_plane_dual_overlay(states[i]))
+ inc++;
+ }
+ new_state->zpos_changed = true;
+ }
+
+done:
+ kfree(states);
+ return ret;
+}
+
+static int omap_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int ret;
+
+ ret = drm_atomic_helper_check(dev, state);
+ if (ret)
+ return ret;
+
+ if (dev->mode_config.normalize_zpos) {
+ ret = omap_atomic_update_normalize_zpos(dev, state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs = {
.atomic_commit_tail = omap_atomic_commit_tail,
};
@@ -124,10 +220,86 @@ static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs =
static const struct drm_mode_config_funcs omap_mode_config_funcs = {
.fb_create = omap_framebuffer_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
- .atomic_check = drm_atomic_helper_check,
+ .atomic_check = omap_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
+/* Global/shared object state funcs */
+
+/*
+ * This is a helper that returns the private state currently in operation.
+ * Note that this would return the "old_state" if called in the atomic check
+ * path, and the "new_state" after the atomic swap has been done.
+ */
+struct omap_global_state *
+omap_get_existing_global_state(struct omap_drm_private *priv)
+{
+ return to_omap_global_state(priv->glob_obj.state);
+}
+
+/*
+ * This acquires the modeset lock set aside for global state, creates
+ * a new duplicated private object state.
+ */
+struct omap_global_state *__must_check
+omap_get_global_state(struct drm_atomic_state *s)
+{
+ struct omap_drm_private *priv = s->dev->dev_private;
+ struct drm_private_state *priv_state;
+
+ priv_state = drm_atomic_get_private_obj_state(s, &priv->glob_obj);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_omap_global_state(priv_state);
+}
+
+static struct drm_private_state *
+omap_global_duplicate_state(struct drm_private_obj *obj)
+{
+ struct omap_global_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ return &state->base;
+}
+
+static void omap_global_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct omap_global_state *omap_state = to_omap_global_state(state);
+
+ kfree(omap_state);
+}
+
+static const struct drm_private_state_funcs omap_global_state_funcs = {
+ .atomic_duplicate_state = omap_global_duplicate_state,
+ .atomic_destroy_state = omap_global_destroy_state,
+};
+
+static int omap_global_obj_init(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_global_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ drm_atomic_private_obj_init(dev, &priv->glob_obj, &state->base,
+ &omap_global_state_funcs);
+ return 0;
+}
+
+static void omap_global_obj_fini(struct omap_drm_private *priv)
+{
+ drm_atomic_private_obj_fini(&priv->glob_obj);
+}
+
static void omap_disconnect_pipelines(struct drm_device *ddev)
{
struct omap_drm_private *priv = ddev->dev_private;
@@ -231,8 +403,6 @@ static int omap_modeset_init(struct drm_device *dev)
if (!omapdss_stack_is_ready())
return -EPROBE_DEFER;
- drm_mode_config_init(dev);
-
ret = omap_modeset_init_properties(dev);
if (ret < 0)
return ret;
@@ -583,10 +753,20 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
omap_gem_init(ddev);
+ drm_mode_config_init(ddev);
+
+ ret = omap_global_obj_init(ddev);
+ if (ret)
+ goto err_gem_deinit;
+
+ ret = omap_hwoverlays_init(priv);
+ if (ret)
+ goto err_free_priv_obj;
+
ret = omap_modeset_init(ddev);
if (ret) {
dev_err(priv->dev, "omap_modeset_init failed: ret=%d\n", ret);
- goto err_gem_deinit;
+ goto err_free_overlays;
}
/* Initialize vblank handling, start with all CRTCs disabled. */
@@ -618,7 +798,12 @@ err_cleanup_helpers:
omap_fbdev_fini(ddev);
err_cleanup_modeset:
omap_modeset_fini(ddev);
+err_free_overlays:
+ omap_hwoverlays_destroy(priv);
+err_free_priv_obj:
+ omap_global_obj_fini(priv);
err_gem_deinit:
+ drm_mode_config_cleanup(ddev);
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
omap_disconnect_pipelines(ddev);
@@ -642,6 +827,9 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
drm_atomic_helper_shutdown(ddev);
omap_modeset_fini(ddev);
+ omap_hwoverlays_destroy(priv);
+ omap_global_obj_fini(priv);
+ drm_mode_config_cleanup(ddev);
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 591d4c273f02..825960fd3ea9 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -14,6 +14,7 @@
#include "dss/omapdss.h"
#include "dss/dss.h"
+#include <drm/drm_atomic.h>
#include <drm/drm_gem.h>
#include <drm/omap_drm.h>
@@ -24,6 +25,7 @@
#include "omap_gem.h"
#include "omap_irq.h"
#include "omap_plane.h"
+#include "omap_overlay.h"
#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) /* verbose debug */
@@ -40,6 +42,19 @@ struct omap_drm_pipeline {
unsigned int alias_id;
};
+/*
+ * Global private object state for tracking resources that are shared across
+ * multiple kms objects (planes/crtcs/etc).
+ */
+#define to_omap_global_state(x) container_of(x, struct omap_global_state, base)
+
+struct omap_global_state {
+ struct drm_private_state base;
+
+ /* global atomic state of assignment between overlays and planes */
+ struct drm_plane *hwoverlay_to_plane[8];
+};
+
struct omap_drm_private {
struct drm_device *ddev;
struct device *dev;
@@ -57,6 +72,11 @@ struct omap_drm_private {
unsigned int num_planes;
struct drm_plane *planes[8];
+ unsigned int num_ovls;
+ struct omap_hw_overlay *overlays[8];
+
+ struct drm_private_obj glob_obj;
+
struct drm_fb_helper *fbdev;
struct workqueue_struct *wq;
@@ -85,4 +105,8 @@ struct omap_drm_private {
void omap_debugfs_init(struct drm_minor *minor);
+struct omap_global_state * __must_check omap_get_global_state(struct drm_atomic_state *s);
+
+struct omap_global_state *omap_get_existing_global_state(struct omap_drm_private *priv);
+
#endif /* __OMAPDRM_DRV_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 190afc564914..895e66b08a81 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -131,7 +131,9 @@ static u32 drm_rotation_to_tiler(unsigned int drm_rot)
/* update ovl info for scanout, handles cases of multi-planar fb's, etc.
*/
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
- struct drm_plane_state *state, struct omap_overlay_info *info)
+ struct drm_plane_state *state,
+ struct omap_overlay_info *info,
+ struct omap_overlay_info *r_info)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
const struct drm_format_info *format = omap_fb->format;
@@ -218,6 +220,35 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
} else {
info->p_uv_addr = 0;
}
+
+ if (r_info) {
+ info->width /= 2;
+ info->out_width /= 2;
+
+ *r_info = *info;
+
+ if (fb->format->is_yuv) {
+ if (info->width & 1) {
+ info->width++;
+ r_info->width--;
+ }
+
+ if (info->out_width & 1) {
+ info->out_width++;
+ r_info->out_width--;
+ }
+ }
+
+ r_info->pos_x = info->pos_x + info->out_width;
+
+ r_info->paddr = get_linear_addr(fb, format, 0,
+ x + info->width, y);
+ if (fb->format->format == DRM_FORMAT_NV12) {
+ r_info->p_uv_addr =
+ get_linear_addr(fb, format, 1,
+ x + info->width, y);
+ }
+ }
}
/* pin, prepare for scanout: */
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.h b/drivers/gpu/drm/omapdrm/omap_fb.h
index c0e19aed8220..b75f0b5ef1d8 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.h
+++ b/drivers/gpu/drm/omapdrm/omap_fb.h
@@ -26,7 +26,9 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
int omap_framebuffer_pin(struct drm_framebuffer *fb);
void omap_framebuffer_unpin(struct drm_framebuffer *fb);
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
- struct drm_plane_state *state, struct omap_overlay_info *info);
+ struct drm_plane_state *state,
+ struct omap_overlay_info *info,
+ struct omap_overlay_info *r_info);
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb);
void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 38af6195d959..b0fa17409b66 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -789,7 +789,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
if (omap_obj->flags & OMAP_BO_TILED_MASK) {
block = tiler_reserve_2d(fmt,
omap_obj->width,
- omap_obj->height, 0);
+ omap_obj->height, PAGE_SIZE);
} else {
block = tiler_reserve_1d(obj->size);
}
@@ -851,6 +851,11 @@ static void omap_gem_unpin_locked(struct drm_gem_object *obj)
return;
if (refcount_dec_and_test(&omap_obj->dma_addr_cnt)) {
+ if (omap_obj->sgt) {
+ sg_free_table(omap_obj->sgt);
+ kfree(omap_obj->sgt);
+ omap_obj->sgt = NULL;
+ }
ret = tiler_unpin(omap_obj->block);
if (ret) {
dev_err(obj->dev->dev,
@@ -963,6 +968,78 @@ int omap_gem_put_pages(struct drm_gem_object *obj)
return 0;
}
+struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ dma_addr_t addr;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ unsigned int count, len, stride, i;
+ int ret;
+
+ ret = omap_gem_pin(obj, &addr);
+ if (ret)
+ return ERR_PTR(ret);
+
+ mutex_lock(&omap_obj->lock);
+
+ sgt = omap_obj->sgt;
+ if (sgt)
+ goto out;
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ ret = -ENOMEM;
+ goto err_unpin;
+ }
+
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
+ enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
+
+ len = omap_obj->width << (int)fmt;
+ count = omap_obj->height;
+ stride = tiler_stride(fmt, 0);
+ } else {
+ len = obj->size;
+ count = 1;
+ stride = 0;
+ }
+
+ ret = sg_alloc_table(sgt, count, GFP_KERNEL);
+ if (ret)
+ goto err_free;
+
+ for_each_sg(sgt->sgl, sg, count, i) {
+ sg_set_page(sg, phys_to_page(addr), len, offset_in_page(addr));
+ sg_dma_address(sg) = addr;
+ sg_dma_len(sg) = len;
+
+ addr += stride;
+ }
+
+ omap_obj->sgt = sgt;
+out:
+ mutex_unlock(&omap_obj->lock);
+ return sgt;
+
+err_free:
+ kfree(sgt);
+err_unpin:
+ mutex_unlock(&omap_obj->lock);
+ omap_gem_unpin(obj);
+ return ERR_PTR(ret);
+}
+
+void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+ if (WARN_ON(omap_obj->sgt != sgt))
+ return;
+
+ omap_gem_unpin(obj);
+}
+
#ifdef CONFIG_DRM_FBDEV_EMULATION
/*
* Get kernel virtual address for CPU access.. this more or less only
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.h b/drivers/gpu/drm/omapdrm/omap_gem.h
index eda9b4839c30..19209e319663 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.h
+++ b/drivers/gpu/drm/omapdrm/omap_gem.h
@@ -82,5 +82,7 @@ u32 omap_gem_flags(struct drm_gem_object *obj);
int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
int x, int y, dma_addr_t *dma_addr);
int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient);
+struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj);
+void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt);
#endif /* __OMAPDRM_GEM_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 809f86cfc540..57af3d97be77 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -23,45 +23,21 @@ static struct sg_table *omap_gem_map_dma_buf(
{
struct drm_gem_object *obj = attachment->dmabuf->priv;
struct sg_table *sg;
- dma_addr_t dma_addr;
- int ret;
-
- sg = kzalloc(sizeof(*sg), GFP_KERNEL);
- if (!sg)
- return ERR_PTR(-ENOMEM);
-
- /* camera, etc, need physically contiguous.. but we need a
- * better way to know this..
- */
- ret = omap_gem_pin(obj, &dma_addr);
- if (ret)
- goto out;
-
- ret = sg_alloc_table(sg, 1, GFP_KERNEL);
- if (ret)
- goto out;
-
- sg_init_table(sg->sgl, 1);
- sg_dma_len(sg->sgl) = obj->size;
- sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(dma_addr)), obj->size, 0);
- sg_dma_address(sg->sgl) = dma_addr;
+ sg = omap_gem_get_sg(obj);
+ if (IS_ERR(sg))
+ return sg;
/* this must be after omap_gem_pin() to ensure we have pages attached */
omap_gem_dma_sync_buffer(obj, dir);
return sg;
-out:
- kfree(sg);
- return ERR_PTR(ret);
}
static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *sg, enum dma_data_direction dir)
{
struct drm_gem_object *obj = attachment->dmabuf->priv;
- omap_gem_unpin(obj);
- sg_free_table(sg);
- kfree(sg);
+ omap_gem_put_sg(obj, sg);
}
static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
@@ -114,7 +90,7 @@ struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags)
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
exp_info.ops = &omap_dmabuf_ops;
- exp_info.size = obj->size;
+ exp_info.size = omap_gem_mmap_size(obj);
exp_info.flags = flags;
exp_info.priv = obj;
diff --git a/drivers/gpu/drm/omapdrm/omap_overlay.c b/drivers/gpu/drm/omapdrm/omap_overlay.c
new file mode 100644
index 000000000000..10730c9b2752
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_overlay.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Benoit Parrot <bparrot@ti.com>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "omap_dmm_tiler.h"
+#include "omap_drv.h"
+
+/*
+ * overlay funcs
+ */
+static const char * const overlay_id_to_name[] = {
+ [OMAP_DSS_GFX] = "gfx",
+ [OMAP_DSS_VIDEO1] = "vid1",
+ [OMAP_DSS_VIDEO2] = "vid2",
+ [OMAP_DSS_VIDEO3] = "vid3",
+};
+
+/*
+ * Find a free overlay with the required caps and supported fourcc
+ */
+static struct omap_hw_overlay *
+omap_plane_find_free_overlay(struct drm_device *dev, struct drm_plane *hwoverlay_to_plane[],
+ u32 caps, u32 fourcc)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ int i;
+
+ DBG("caps: %x fourcc: %x", caps, fourcc);
+
+ for (i = 0; i < priv->num_ovls; i++) {
+ struct omap_hw_overlay *cur = priv->overlays[i];
+
+ DBG("%d: id: %d cur->caps: %x",
+ cur->idx, cur->id, cur->caps);
+
+ /* skip if already in-use */
+ if (hwoverlay_to_plane[cur->idx])
+ continue;
+
+ /* skip if doesn't support some required caps: */
+ if (caps & ~cur->caps)
+ continue;
+
+ /* check supported format */
+ if (!dispc_ovl_color_mode_supported(priv->dispc,
+ cur->id, fourcc))
+ continue;
+
+ return cur;
+ }
+
+ DBG("no match");
+ return NULL;
+}
+
+/*
+ * Assign a new overlay to a plane with the required caps and supported fourcc
+ * If a plane need a new overlay, the previous one should have been released
+ * with omap_overlay_release()
+ * This should be called from the plane atomic_check() in order to prepare the
+ * next global overlay_map to be enabled when atomic transaction is valid.
+ */
+int omap_overlay_assign(struct drm_atomic_state *s, struct drm_plane *plane,
+ u32 caps, u32 fourcc, struct omap_hw_overlay **overlay,
+ struct omap_hw_overlay **r_overlay)
+{
+ /* Get the global state of the current atomic transaction */
+ struct omap_global_state *state = omap_get_global_state(s);
+ struct drm_plane **overlay_map = state->hwoverlay_to_plane;
+ struct omap_hw_overlay *ovl, *r_ovl;
+
+ ovl = omap_plane_find_free_overlay(s->dev, overlay_map, caps, fourcc);
+ if (!ovl)
+ return -ENOMEM;
+
+ overlay_map[ovl->idx] = plane;
+ *overlay = ovl;
+
+ if (r_overlay) {
+ r_ovl = omap_plane_find_free_overlay(s->dev, overlay_map,
+ caps, fourcc);
+ if (!r_ovl) {
+ overlay_map[r_ovl->idx] = NULL;
+ *overlay = NULL;
+ return -ENOMEM;
+ }
+
+ overlay_map[r_ovl->idx] = plane;
+ *r_overlay = r_ovl;
+ }
+
+ DBG("%s: assign to plane %s caps %x", ovl->name, plane->name, caps);
+
+ if (r_overlay) {
+ DBG("%s: assign to right of plane %s caps %x",
+ r_ovl->name, plane->name, caps);
+ }
+
+ return 0;
+}
+
+/*
+ * Release an overlay from a plane if the plane gets not visible or the plane
+ * need a new overlay if overlay caps changes.
+ * This should be called from the plane atomic_check() in order to prepare the
+ * next global overlay_map to be enabled when atomic transaction is valid.
+ */
+void omap_overlay_release(struct drm_atomic_state *s, struct omap_hw_overlay *overlay)
+{
+ /* Get the global state of the current atomic transaction */
+ struct omap_global_state *state = omap_get_global_state(s);
+ struct drm_plane **overlay_map = state->hwoverlay_to_plane;
+
+ if (!overlay)
+ return;
+
+ if (WARN_ON(!overlay_map[overlay->idx]))
+ return;
+
+ DBG("%s: release from plane %s", overlay->name, overlay_map[overlay->idx]->name);
+
+ overlay_map[overlay->idx] = NULL;
+}
+
+/*
+ * Update an overlay state that was attached to a plane before the current atomic state.
+ * This should be called from the plane atomic_update() or atomic_disable(),
+ * where an overlay association to a plane could have changed between the old and current
+ * atomic state.
+ */
+void omap_overlay_update_state(struct omap_drm_private *priv,
+ struct omap_hw_overlay *overlay)
+{
+ struct omap_global_state *state = omap_get_existing_global_state(priv);
+ struct drm_plane **overlay_map = state->hwoverlay_to_plane;
+
+ /* Check if this overlay is not used anymore, then disable it */
+ if (!overlay_map[overlay->idx]) {
+ DBG("%s: disabled", overlay->name);
+
+ /* disable the overlay */
+ dispc_ovl_enable(priv->dispc, overlay->id, false);
+ }
+}
+
+static void omap_overlay_destroy(struct omap_hw_overlay *overlay)
+{
+ kfree(overlay);
+}
+
+static struct omap_hw_overlay *omap_overlay_init(enum omap_plane_id overlay_id,
+ enum omap_overlay_caps caps)
+{
+ struct omap_hw_overlay *overlay;
+
+ overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
+ if (!overlay)
+ return ERR_PTR(-ENOMEM);
+
+ overlay->name = overlay_id_to_name[overlay_id];
+ overlay->id = overlay_id;
+ overlay->caps = caps;
+
+ return overlay;
+}
+
+int omap_hwoverlays_init(struct omap_drm_private *priv)
+{
+ static const enum omap_plane_id hw_plane_ids[] = {
+ OMAP_DSS_GFX, OMAP_DSS_VIDEO1,
+ OMAP_DSS_VIDEO2, OMAP_DSS_VIDEO3,
+ };
+ u32 num_overlays = dispc_get_num_ovls(priv->dispc);
+ enum omap_overlay_caps caps;
+ int i, ret;
+
+ for (i = 0; i < num_overlays; i++) {
+ struct omap_hw_overlay *overlay;
+
+ caps = dispc_ovl_get_caps(priv->dispc, hw_plane_ids[i]);
+ overlay = omap_overlay_init(hw_plane_ids[i], caps);
+ if (IS_ERR(overlay)) {
+ ret = PTR_ERR(overlay);
+ dev_err(priv->dev, "failed to construct overlay for %s (%d)\n",
+ overlay_id_to_name[i], ret);
+ omap_hwoverlays_destroy(priv);
+ return ret;
+ }
+ overlay->idx = priv->num_ovls;
+ priv->overlays[priv->num_ovls++] = overlay;
+ }
+
+ return 0;
+}
+
+void omap_hwoverlays_destroy(struct omap_drm_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_ovls; i++) {
+ omap_overlay_destroy(priv->overlays[i]);
+ priv->overlays[i] = NULL;
+ }
+
+ priv->num_ovls = 0;
+}
diff --git a/drivers/gpu/drm/omapdrm/omap_overlay.h b/drivers/gpu/drm/omapdrm/omap_overlay.h
new file mode 100644
index 000000000000..e36a43f35563
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_overlay.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Benoit Parrot <bparrot@ti.com>
+ */
+
+#ifndef __OMAPDRM_OVERLAY_H__
+#define __OMAPDRM_OVERLAY_H__
+
+#include <linux/types.h>
+
+enum drm_plane_type;
+
+struct drm_device;
+struct drm_mode_object;
+struct drm_plane;
+
+/* Used to associate a HW overlay/plane to a plane */
+struct omap_hw_overlay {
+ unsigned int idx;
+
+ const char *name;
+ enum omap_plane_id id;
+
+ enum omap_overlay_caps caps;
+};
+
+int omap_hwoverlays_init(struct omap_drm_private *priv);
+void omap_hwoverlays_destroy(struct omap_drm_private *priv);
+int omap_overlay_assign(struct drm_atomic_state *s, struct drm_plane *plane,
+ u32 caps, u32 fourcc, struct omap_hw_overlay **overlay,
+ struct omap_hw_overlay **r_overlay);
+void omap_overlay_release(struct drm_atomic_state *s, struct omap_hw_overlay *overlay);
+void omap_overlay_update_state(struct omap_drm_private *priv, struct omap_hw_overlay *overlay);
+#endif /* __OMAPDRM_OVERLAY_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 512af976b7e9..b35205c4e979 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -8,6 +8,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_fourcc.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
@@ -16,14 +17,30 @@
* plane funcs
*/
+#define to_omap_plane_state(x) container_of(x, struct omap_plane_state, base)
+
+struct omap_plane_state {
+ /* Must be first. */
+ struct drm_plane_state base;
+
+ struct omap_hw_overlay *overlay;
+ struct omap_hw_overlay *r_overlay; /* right overlay */
+};
+
#define to_omap_plane(x) container_of(x, struct omap_plane, base)
struct omap_plane {
struct drm_plane base;
enum omap_plane_id id;
- const char *name;
};
+bool is_omap_plane_dual_overlay(struct drm_plane_state *state)
+{
+ struct omap_plane_state *omap_state = to_omap_plane_state(state);
+
+ return !!omap_state->r_overlay;
+}
+
static int omap_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
@@ -46,13 +63,35 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct omap_drm_private *priv = plane->dev->dev_private;
- struct omap_plane *omap_plane = to_omap_plane(plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
- struct omap_overlay_info info;
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct omap_plane_state *new_omap_state;
+ struct omap_plane_state *old_omap_state;
+ struct omap_overlay_info info, r_info;
+ enum omap_plane_id ovl_id, r_ovl_id;
int ret;
+ bool dual_ovl;
+
+ new_omap_state = to_omap_plane_state(new_state);
+ old_omap_state = to_omap_plane_state(old_state);
- DBG("%s, crtc=%p fb=%p", omap_plane->name, new_state->crtc,
+ dual_ovl = is_omap_plane_dual_overlay(new_state);
+
+ /* Cleanup previously held overlay if needed */
+ if (old_omap_state->overlay)
+ omap_overlay_update_state(priv, old_omap_state->overlay);
+ if (old_omap_state->r_overlay)
+ omap_overlay_update_state(priv, old_omap_state->r_overlay);
+
+ if (!new_omap_state->overlay) {
+ DBG("[PLANE:%d:%s] no overlay attached", plane->base.id, plane->name);
+ return;
+ }
+
+ ovl_id = new_omap_state->overlay->id;
+ DBG("%s, crtc=%p fb=%p", plane->name, new_state->crtc,
new_state->fb);
memset(&info, 0, sizeof(info));
@@ -67,65 +106,155 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
info.color_encoding = new_state->color_encoding;
info.color_range = new_state->color_range;
+ r_info = info;
+
/* update scanout: */
- omap_framebuffer_update_scanout(new_state->fb, new_state, &info);
+ omap_framebuffer_update_scanout(new_state->fb, new_state, &info,
+ dual_ovl ? &r_info : NULL);
- DBG("%dx%d -> %dx%d (%d)", info.width, info.height,
- info.out_width, info.out_height,
- info.screen_width);
+ DBG("%s: %dx%d -> %dx%d (%d)",
+ new_omap_state->overlay->name, info.width, info.height,
+ info.out_width, info.out_height, info.screen_width);
DBG("%d,%d %pad %pad", info.pos_x, info.pos_y,
&info.paddr, &info.p_uv_addr);
+ if (dual_ovl) {
+ r_ovl_id = new_omap_state->r_overlay->id;
+ /*
+ * If the current plane uses 2 hw planes the very next
+ * zorder is used by the r_overlay so we just use the
+ * main overlay zorder + 1
+ */
+ r_info.zorder = info.zorder + 1;
+
+ DBG("%s: %dx%d -> %dx%d (%d)",
+ new_omap_state->r_overlay->name,
+ r_info.width, r_info.height,
+ r_info.out_width, r_info.out_height, r_info.screen_width);
+ DBG("%d,%d %pad %pad", r_info.pos_x, r_info.pos_y,
+ &r_info.paddr, &r_info.p_uv_addr);
+ }
+
/* and finally, update omapdss: */
- ret = dispc_ovl_setup(priv->dispc, omap_plane->id, &info,
+ ret = dispc_ovl_setup(priv->dispc, ovl_id, &info,
omap_crtc_timings(new_state->crtc), false,
omap_crtc_channel(new_state->crtc));
if (ret) {
dev_err(plane->dev->dev, "Failed to setup plane %s\n",
- omap_plane->name);
- dispc_ovl_enable(priv->dispc, omap_plane->id, false);
+ plane->name);
+ dispc_ovl_enable(priv->dispc, ovl_id, false);
return;
}
- dispc_ovl_enable(priv->dispc, omap_plane->id, true);
+ dispc_ovl_enable(priv->dispc, ovl_id, true);
+
+ if (dual_ovl) {
+ ret = dispc_ovl_setup(priv->dispc, r_ovl_id, &r_info,
+ omap_crtc_timings(new_state->crtc), false,
+ omap_crtc_channel(new_state->crtc));
+ if (ret) {
+ dev_err(plane->dev->dev, "Failed to setup plane right-overlay %s\n",
+ plane->name);
+ dispc_ovl_enable(priv->dispc, r_ovl_id, false);
+ dispc_ovl_enable(priv->dispc, ovl_id, false);
+ return;
+ }
+
+ dispc_ovl_enable(priv->dispc, r_ovl_id, true);
+ }
}
static void omap_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
struct omap_drm_private *priv = plane->dev->dev_private;
struct omap_plane *omap_plane = to_omap_plane(plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct omap_plane_state *new_omap_state;
+ struct omap_plane_state *old_omap_state;
+
+ new_omap_state = to_omap_plane_state(new_state);
+ old_omap_state = to_omap_plane_state(old_state);
+
+ if (!old_omap_state->overlay)
+ return;
new_state->rotation = DRM_MODE_ROTATE_0;
new_state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : omap_plane->id;
- dispc_ovl_enable(priv->dispc, omap_plane->id, false);
+ omap_overlay_update_state(priv, old_omap_state->overlay);
+ new_omap_state->overlay = NULL;
+
+ if (is_omap_plane_dual_overlay(old_state)) {
+ omap_overlay_update_state(priv, old_omap_state->r_overlay);
+ new_omap_state->r_overlay = NULL;
+ }
}
+#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
+
static int omap_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct omap_drm_private *priv = plane->dev->dev_private;
+ struct omap_plane_state *omap_state = to_omap_plane_state(new_plane_state);
+ struct omap_global_state *omap_overlay_global_state;
struct drm_crtc_state *crtc_state;
+ bool new_r_hw_overlay = false;
+ bool new_hw_overlay = false;
+ u32 max_width, max_height;
+ struct drm_crtc *crtc;
+ u16 width, height;
+ u32 caps = 0;
+ u32 fourcc;
+ int ret;
- if (!new_plane_state->fb)
- return 0;
+ omap_overlay_global_state = omap_get_global_state(state);
+ if (IS_ERR(omap_overlay_global_state))
+ return PTR_ERR(omap_overlay_global_state);
+
+ dispc_ovl_get_max_size(priv->dispc, &width, &height);
+ max_width = width << 16;
+ max_height = height << 16;
- /* crtc should only be NULL when disabling (i.e., !new_plane_state->fb) */
- if (WARN_ON(!new_plane_state->crtc))
+ crtc = new_plane_state->crtc ? new_plane_state->crtc : plane->state->crtc;
+ if (!crtc)
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- new_plane_state->crtc);
+ crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
/* we should have a crtc state if the plane is attached to a crtc */
if (WARN_ON(!crtc_state))
return 0;
- if (!crtc_state->enable)
+ /*
+ * Note: these are just sanity checks to filter out totally bad scaling
+ * factors. The real limits must be calculated case by case, and
+ * unfortunately we currently do those checks only at the commit
+ * phase in dispc.
+ */
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+ FRAC_16_16(1, 8), FRAC_16_16(8, 1),
+ true, true);
+ if (ret)
+ return ret;
+
+ DBG("%s: visible %d -> %d", plane->name,
+ old_plane_state->visible, new_plane_state->visible);
+
+ if (!new_plane_state->visible) {
+ omap_overlay_release(state, omap_state->overlay);
+ omap_overlay_release(state, omap_state->r_overlay);
+ omap_state->overlay = NULL;
+ omap_state->r_overlay = NULL;
return 0;
+ }
if (new_plane_state->crtc_x < 0 || new_plane_state->crtc_y < 0)
return -EINVAL;
@@ -136,10 +265,96 @@ static int omap_plane_atomic_check(struct drm_plane *plane,
if (new_plane_state->crtc_y + new_plane_state->crtc_h > crtc_state->adjusted_mode.vdisplay)
return -EINVAL;
+ /* Make sure dimensions are within bounds. */
+ if (new_plane_state->src_h > max_height || new_plane_state->crtc_h > height)
+ return -EINVAL;
+
+
+ if (new_plane_state->src_w > max_width || new_plane_state->crtc_w > width) {
+ bool is_fourcc_yuv = new_plane_state->fb->format->is_yuv;
+
+ if (is_fourcc_yuv && (((new_plane_state->src_w >> 16) / 2 & 1) ||
+ new_plane_state->crtc_w / 2 & 1)) {
+ /*
+ * When calculating the split overlay width
+ * and it yield an odd value we will need to adjust
+ * the indivual width +/- 1. So make sure it fits
+ */
+ if (new_plane_state->src_w <= ((2 * width - 1) << 16) &&
+ new_plane_state->crtc_w <= (2 * width - 1))
+ new_r_hw_overlay = true;
+ else
+ return -EINVAL;
+ } else {
+ if (new_plane_state->src_w <= (2 * max_width) &&
+ new_plane_state->crtc_w <= (2 * width))
+ new_r_hw_overlay = true;
+ else
+ return -EINVAL;
+ }
+ }
+
if (new_plane_state->rotation != DRM_MODE_ROTATE_0 &&
!omap_framebuffer_supports_rotation(new_plane_state->fb))
return -EINVAL;
+ if ((new_plane_state->src_w >> 16) != new_plane_state->crtc_w ||
+ (new_plane_state->src_h >> 16) != new_plane_state->crtc_h)
+ caps |= OMAP_DSS_OVL_CAP_SCALE;
+
+ fourcc = new_plane_state->fb->format->format;
+
+ /*
+ * (re)allocate hw overlay if we don't have one or
+ * there is a caps mismatch
+ */
+ if (!omap_state->overlay || (caps & ~omap_state->overlay->caps)) {
+ new_hw_overlay = true;
+ } else {
+ /* check supported format */
+ if (!dispc_ovl_color_mode_supported(priv->dispc, omap_state->overlay->id,
+ fourcc))
+ new_hw_overlay = true;
+ }
+
+ /*
+ * check if we need two overlays and only have 1 or
+ * if we had 2 overlays but will only need 1
+ */
+ if ((new_r_hw_overlay && !omap_state->r_overlay) ||
+ (!new_r_hw_overlay && omap_state->r_overlay))
+ new_hw_overlay = true;
+
+ if (new_hw_overlay) {
+ struct omap_hw_overlay *old_ovl = omap_state->overlay;
+ struct omap_hw_overlay *old_r_ovl = omap_state->r_overlay;
+ struct omap_hw_overlay *new_ovl = NULL;
+ struct omap_hw_overlay *new_r_ovl = NULL;
+
+ omap_overlay_release(state, old_ovl);
+ omap_overlay_release(state, old_r_ovl);
+
+ ret = omap_overlay_assign(state, plane, caps, fourcc, &new_ovl,
+ new_r_hw_overlay ? &new_r_ovl : NULL);
+ if (ret) {
+ DBG("%s: failed to assign hw_overlay", plane->name);
+ omap_state->overlay = NULL;
+ omap_state->r_overlay = NULL;
+ return ret;
+ }
+
+ omap_state->overlay = new_ovl;
+ if (new_r_hw_overlay)
+ omap_state->r_overlay = new_r_ovl;
+ else
+ omap_state->r_overlay = NULL;
+ }
+
+ DBG("plane: %s overlay_id: %d", plane->name, omap_state->overlay->id);
+
+ if (omap_state->r_overlay)
+ DBG("plane: %s r_overlay_id: %d", plane->name, omap_state->r_overlay->id);
+
return 0;
}
@@ -155,7 +370,7 @@ static void omap_plane_destroy(struct drm_plane *plane)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
- DBG("%s", omap_plane->name);
+ DBG("%s", plane->name);
drm_plane_cleanup(plane);
@@ -189,11 +404,17 @@ void omap_plane_install_properties(struct drm_plane *plane,
static void omap_plane_reset(struct drm_plane *plane)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
+ struct omap_plane_state *omap_state;
+
+ if (plane->state)
+ drm_atomic_helper_plane_destroy_state(plane, plane->state);
- drm_atomic_helper_plane_reset(plane);
- if (!plane->state)
+ omap_state = kzalloc(sizeof(*omap_state), GFP_KERNEL);
+ if (!omap_state)
return;
+ __drm_atomic_helper_plane_reset(plane, &omap_state->base);
+
/*
* Set the zpos default depending on whether we are a primary or overlay
* plane.
@@ -204,6 +425,47 @@ static void omap_plane_reset(struct drm_plane *plane)
plane->state->color_range = DRM_COLOR_YCBCR_FULL_RANGE;
}
+static struct drm_plane_state *
+omap_plane_atomic_duplicate_state(struct drm_plane *plane)
+{
+ struct omap_plane_state *state, *current_state;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ current_state = to_omap_plane_state(plane->state);
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &state->base);
+
+ state->overlay = current_state->overlay;
+ state->r_overlay = current_state->r_overlay;
+
+ return &state->base;
+}
+
+static void omap_plane_atomic_print_state(struct drm_printer *p,
+ const struct drm_plane_state *state)
+{
+ struct omap_plane_state *omap_state = to_omap_plane_state(state);
+
+ if (omap_state->overlay)
+ drm_printf(p, "\toverlay=%s (caps=0x%x)\n",
+ omap_state->overlay->name,
+ omap_state->overlay->caps);
+ else
+ drm_printf(p, "\toverlay=None\n");
+ if (omap_state->r_overlay)
+ drm_printf(p, "\tr_overlay=%s (caps=0x%x)\n",
+ omap_state->r_overlay->name,
+ omap_state->r_overlay->caps);
+ else
+ drm_printf(p, "\tr_overlay=None\n");
+}
+
static int omap_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
@@ -239,10 +501,11 @@ static const struct drm_plane_funcs omap_plane_funcs = {
.disable_plane = drm_atomic_helper_disable_plane,
.reset = omap_plane_reset,
.destroy = omap_plane_destroy,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_duplicate_state = omap_plane_atomic_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.atomic_set_property = omap_plane_atomic_set_property,
.atomic_get_property = omap_plane_atomic_get_property,
+ .atomic_print_state = omap_plane_atomic_print_state,
};
static bool omap_plane_supports_yuv(struct drm_plane *plane)
@@ -261,20 +524,6 @@ static bool omap_plane_supports_yuv(struct drm_plane *plane)
return false;
}
-static const char *plane_id_to_name[] = {
- [OMAP_DSS_GFX] = "gfx",
- [OMAP_DSS_VIDEO1] = "vid1",
- [OMAP_DSS_VIDEO2] = "vid2",
- [OMAP_DSS_VIDEO3] = "vid3",
-};
-
-static const enum omap_plane_id plane_idx_to_id[] = {
- OMAP_DSS_GFX,
- OMAP_DSS_VIDEO1,
- OMAP_DSS_VIDEO2,
- OMAP_DSS_VIDEO3,
-};
-
/* initialize plane */
struct drm_plane *omap_plane_init(struct drm_device *dev,
int idx, enum drm_plane_type type,
@@ -284,27 +533,25 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
unsigned int num_planes = dispc_get_num_ovls(priv->dispc);
struct drm_plane *plane;
struct omap_plane *omap_plane;
- enum omap_plane_id id;
int ret;
u32 nformats;
const u32 *formats;
- if (WARN_ON(idx >= ARRAY_SIZE(plane_idx_to_id)))
+ if (WARN_ON(idx >= num_planes))
return ERR_PTR(-EINVAL);
- id = plane_idx_to_id[idx];
-
- DBG("%s: type=%d", plane_id_to_name[id], type);
-
omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
if (!omap_plane)
return ERR_PTR(-ENOMEM);
- formats = dispc_ovl_get_color_modes(priv->dispc, id);
+ omap_plane->id = idx;
+
+ DBG("%d: type=%d", omap_plane->id, type);
+ DBG(" crtc_mask: 0x%04x", possible_crtcs);
+
+ formats = dispc_ovl_get_color_modes(priv->dispc, omap_plane->id);
for (nformats = 0; formats[nformats]; ++nformats)
;
- omap_plane->id = id;
- omap_plane->name = plane_id_to_name[id];
plane = &omap_plane->base;
@@ -334,8 +581,8 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
return plane;
error:
- dev_err(dev->dev, "%s(): could not create plane: %s\n",
- __func__, plane_id_to_name[id]);
+ dev_err(dev->dev, "%s(): could not create plane: %d\n",
+ __func__, omap_plane->id);
kfree(omap_plane);
return NULL;
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.h b/drivers/gpu/drm/omapdrm/omap_plane.h
index 0c28fe8ffa20..a9a33e12722a 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.h
+++ b/drivers/gpu/drm/omapdrm/omap_plane.h
@@ -22,5 +22,6 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
u32 possible_crtcs);
void omap_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
+bool is_omap_plane_dual_overlay(struct drm_plane_state *state);
#endif /* __OMAPDRM_PLANE_H__ */
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c
index 8338dc665301..fde0208ec01e 100644
--- a/drivers/gpu/drm/omapdrm/tcm-sita.c
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* SImple Tiler Allocator (SiTA): 2D and 1D allocation(reservation) algorithm
*
@@ -6,15 +7,6 @@
* Andy Gross <andy.gross@ti.com>
*
* Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- *
*/
#include <linux/init.h>
#include <linux/module.h>
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index cfc8d644cedf..434c2861bb40 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -37,6 +37,17 @@ config DRM_PANEL_ASUS_Z00T_TM5P5_NT35596
NT35596 1080x1920 video mode panel as found in some Asus
Zenfone 2 Laser Z00T devices.
+config DRM_PANEL_BOE_BF060Y8M_AJ0
+ tristate "Boe BF060Y8M-AJ0 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Boe BF060Y8M-AJ0
+ 5.99" AMOLED modules. The panel has a 1080x2160 resolution and
+ uses 24 bit RGB per pixel. It provides a MIPI DSI interface to
+ the host and backlight is controlled through DSI commands.
+
config DRM_PANEL_BOE_HIMAX8279D
tristate "Boe Himax8279d panel"
depends on OF
@@ -141,7 +152,7 @@ config DRM_PANEL_ILITEK_ILI9341
tristate "Ilitek ILI9341 240x320 QVGA panels"
depends on OF && SPI
depends on DRM_KMS_HELPER
- depends on DRM_KMS_CMA_HELPER
+ depends on DRM_GEM_CMA_HELPER
depends on BACKLIGHT_CLASS_DEVICE
select DRM_MIPI_DBI
help
@@ -189,6 +200,15 @@ config DRM_PANEL_JDI_LT070ME05000
The panel has a 1200(RGB)×1920 (WUXGA) resolution and uses
24 bit per pixel.
+config DRM_PANEL_JDI_R63452
+ tristate "JDI R63452 Full HD DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the JDI R63452
+ DSI command mode panel as found in Xiaomi Mi 5 Devices.
+
config DRM_PANEL_KHADAS_TS050
tristate "Khadas TS050 panel"
depends on OF
@@ -272,6 +292,17 @@ config DRM_PANEL_NOVATEK_NT35510
around the Novatek NT35510 display controller, such as some
Hydis panels.
+config DRM_PANEL_NOVATEK_NT35950
+ tristate "Novatek NT35950 DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the panels built
+ around the Novatek NT35950 display controller, such as some
+ Sharp panels used in Sony Xperia Z5 Premium and XZ Premium
+ mobile phones.
+
config DRM_PANEL_NOVATEK_NT36672A
tristate "Novatek NT36672A DSI panel"
depends on OF
@@ -580,6 +611,16 @@ config DRM_PANEL_SONY_ACX565AKM
Say Y here if you want to enable support for the Sony ACX565AKM
800x600 3.5" panel (found on the Nokia N900).
+config DRM_PANEL_SONY_TULIP_TRULY_NT35521
+ tristate "Sony Tulip Truly NT35521 panel"
+ depends on GPIOLIB && OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Sony Tulip
+ NT35521 1280x720 video mode panel as found on Sony Xperia M4
+ Aqua phone.
+
config DRM_PANEL_TDO_TL070WSH30
tristate "TDO TL070WSH30 DSI panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index bca4cc1f2715..d99fbbce49d1 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_DRM_PANEL_ABT_Y030XX067A) += panel-abt-y030xx067a.o
obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
obj-$(CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596) += panel-asus-z00t-tm5p5-n35596.o
+obj-$(CONFIG_DRM_PANEL_BOE_BF060Y8M_AJ0) += panel-boe-bf060y8m-aj0.o
obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
obj-$(CONFIG_DRM_PANEL_DSI_CM) += panel-dsi-cm.o
@@ -17,6 +18,7 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_EJ030NA) += panel-innolux-ej030na.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
+obj-$(CONFIG_DRM_PANEL_JDI_R63452) += panel-jdi-fhd-r63452.o
obj-$(CONFIG_DRM_PANEL_KHADAS_TS050) += panel-khadas-ts050.o
obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o
obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o
@@ -25,6 +27,7 @@ obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35510) += panel-novatek-nt35510.o
+obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35950) += panel-novatek-nt35950.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672A) += panel-novatek-nt36672a.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
obj-$(CONFIG_DRM_PANEL_MANTIX_MLAF057WE51) += panel-mantix-mlaf057we51.o
@@ -59,6 +62,7 @@ obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7703) += panel-sitronix-st7703.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
obj-$(CONFIG_DRM_PANEL_SONY_ACX424AKP) += panel-sony-acx424akp.o
obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
+obj-$(CONFIG_DRM_PANEL_SONY_TULIP_TRULY_NT35521) += panel-sony-tulip-truly-nt35521.o
obj-$(CONFIG_DRM_PANEL_TDO_TL070WSH30) += panel-tdo-tl070wsh30.o
obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
diff --git a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
index 3d8a9ab47cae..f043b484055b 100644
--- a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
+++ b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
@@ -272,16 +272,14 @@ static int y030xx067a_probe(struct spi_device *spi)
return -EINVAL;
priv->supply = devm_regulator_get(dev, "power");
- if (IS_ERR(priv->supply)) {
- dev_err(dev, "Failed to get power supply\n");
- return PTR_ERR(priv->supply);
- }
+ if (IS_ERR(priv->supply))
+ return dev_err_probe(dev, PTR_ERR(priv->supply),
+ "Failed to get power supply\n");
priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(priv->reset_gpio)) {
- dev_err(dev, "Failed to get reset GPIO\n");
- return PTR_ERR(priv->reset_gpio);
- }
+ if (IS_ERR(priv->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
+ "Failed to get reset GPIO\n");
drm_panel_init(&priv->panel, dev, &y030xx067a_funcs,
DRM_MODE_CONNECTOR_DPI);
diff --git a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
new file mode 100644
index 000000000000..ef00cd67dc40
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * BOE BF060Y8M-AJ0 5.99" MIPI-DSI OLED Panel on SW43404 DriverIC
+ *
+ * Copyright (c) 2020 AngeloGioacchino Del Regno
+ * <angelogioacchino.delregno@somainline.org>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <video/mipi_display.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define DCS_ALLOW_HBM_RANGE 0x0c
+#define DCS_DISALLOW_HBM_RANGE 0x08
+
+enum boe_bf060y8m_aj0_supplies {
+ BF060Y8M_VREG_VCC,
+ BF060Y8M_VREG_VDDIO,
+ BF060Y8M_VREG_VCI,
+ BF060Y8M_VREG_EL_VDD,
+ BF060Y8M_VREG_EL_VSS,
+ BF060Y8M_VREG_MAX
+};
+
+struct boe_bf060y8m_aj0 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data vregs[BF060Y8M_VREG_MAX];
+ struct gpio_desc *reset_gpio;
+ bool prepared;
+};
+
+static inline
+struct boe_bf060y8m_aj0 *to_boe_bf060y8m_aj0(struct drm_panel *panel)
+{
+ return container_of(panel, struct boe_bf060y8m_aj0, panel);
+}
+
+#define dsi_dcs_write_seq(dsi, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static void boe_bf060y8m_aj0_reset(struct boe_bf060y8m_aj0 *boe)
+{
+ gpiod_set_value_cansleep(boe->reset_gpio, 0);
+ usleep_range(2000, 3000);
+ gpiod_set_value_cansleep(boe->reset_gpio, 1);
+ usleep_range(15000, 16000);
+ gpiod_set_value_cansleep(boe->reset_gpio, 0);
+ usleep_range(5000, 6000);
+}
+
+static int boe_bf060y8m_aj0_on(struct boe_bf060y8m_aj0 *boe)
+{
+ struct mipi_dsi_device *dsi = boe->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00);
+ dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0x4c);
+ dsi_dcs_write_seq(dsi, MIPI_DCS_SET_3D_CONTROL, 0x10);
+ dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, DCS_ALLOW_HBM_RANGE);
+ dsi_dcs_write_seq(dsi, 0xf8,
+ 0x00, 0x08, 0x10, 0x00, 0x22, 0x00, 0x00, 0x2d);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(30);
+
+ dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00);
+ dsi_dcs_write_seq(dsi, 0xc0,
+ 0x08, 0x48, 0x65, 0x33, 0x33, 0x33,
+ 0x2a, 0x31, 0x39, 0x20, 0x09);
+ dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x00, 0x00, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ dsi_dcs_write_seq(dsi, 0xe2, 0x20, 0x04, 0x10, 0x12, 0x92,
+ 0x4f, 0x8f, 0x44, 0x84, 0x83, 0x83, 0x83,
+ 0x5c, 0x5c, 0x5c);
+ dsi_dcs_write_seq(dsi, 0xde, 0x01, 0x2c, 0x00, 0x77, 0x3e);
+
+ msleep(30);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display on: %d\n", ret);
+ return ret;
+ }
+ msleep(50);
+
+ return 0;
+}
+
+static int boe_bf060y8m_aj0_off(struct boe_bf060y8m_aj0 *boe)
+{
+ struct mipi_dsi_device *dsi = boe->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ /* OFF commands sent in HS mode */
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+ usleep_range(1000, 2000);
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static int boe_bf060y8m_aj0_prepare(struct drm_panel *panel)
+{
+ struct boe_bf060y8m_aj0 *boe = to_boe_bf060y8m_aj0(panel);
+ struct device *dev = &boe->dsi->dev;
+ int ret;
+
+ if (boe->prepared)
+ return 0;
+
+ /*
+ * Enable EL Driving Voltage first - doing that at the beginning
+ * or at the end of the power sequence doesn't matter, so enable
+ * it here to avoid yet another usleep at the end.
+ */
+ ret = regulator_enable(boe->vregs[BF060Y8M_VREG_EL_VDD].consumer);
+ if (ret)
+ return ret;
+ ret = regulator_enable(boe->vregs[BF060Y8M_VREG_EL_VSS].consumer);
+ if (ret)
+ goto err_elvss;
+
+ ret = regulator_enable(boe->vregs[BF060Y8M_VREG_VCC].consumer);
+ if (ret)
+ goto err_vcc;
+ usleep_range(1000, 2000);
+ ret = regulator_enable(boe->vregs[BF060Y8M_VREG_VDDIO].consumer);
+ if (ret)
+ goto err_vddio;
+ usleep_range(500, 1000);
+ ret = regulator_enable(boe->vregs[BF060Y8M_VREG_VCI].consumer);
+ if (ret)
+ goto err_vci;
+ usleep_range(2000, 3000);
+
+ boe_bf060y8m_aj0_reset(boe);
+
+ ret = boe_bf060y8m_aj0_on(boe);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(boe->reset_gpio, 1);
+ return ret;
+ }
+
+ boe->prepared = true;
+ return 0;
+
+err_vci:
+ regulator_disable(boe->vregs[BF060Y8M_VREG_VDDIO].consumer);
+err_vddio:
+ regulator_disable(boe->vregs[BF060Y8M_VREG_VCC].consumer);
+err_vcc:
+ regulator_disable(boe->vregs[BF060Y8M_VREG_EL_VSS].consumer);
+err_elvss:
+ regulator_disable(boe->vregs[BF060Y8M_VREG_EL_VDD].consumer);
+ return ret;
+}
+
+static int boe_bf060y8m_aj0_unprepare(struct drm_panel *panel)
+{
+ struct boe_bf060y8m_aj0 *boe = to_boe_bf060y8m_aj0(panel);
+ struct device *dev = &boe->dsi->dev;
+ int ret;
+
+ if (!boe->prepared)
+ return 0;
+
+ ret = boe_bf060y8m_aj0_off(boe);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(boe->reset_gpio, 1);
+ ret = regulator_bulk_disable(ARRAY_SIZE(boe->vregs), boe->vregs);
+
+ boe->prepared = false;
+ return 0;
+}
+
+static const struct drm_display_mode boe_bf060y8m_aj0_mode = {
+ .clock = 165268,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 36,
+ .hsync_end = 1080 + 36 + 24,
+ .htotal = 1080 + 36 + 24 + 96,
+ .vdisplay = 2160,
+ .vsync_start = 2160 + 16,
+ .vsync_end = 2160 + 16 + 1,
+ .vtotal = 2160 + 16 + 1 + 15,
+ .width_mm = 68, /* 68.04 mm */
+ .height_mm = 136, /* 136.08 mm */
+};
+
+static int boe_bf060y8m_aj0_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &boe_bf060y8m_aj0_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs boe_bf060y8m_aj0_panel_funcs = {
+ .prepare = boe_bf060y8m_aj0_prepare,
+ .unprepare = boe_bf060y8m_aj0_unprepare,
+ .get_modes = boe_bf060y8m_aj0_get_modes,
+};
+
+static int boe_bf060y8m_aj0_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int boe_bf060y8m_aj0_bl_get_brightness(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness;
+ int ret;
+
+ ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness);
+ if (ret < 0)
+ return ret;
+
+ return brightness & 0xff;
+}
+
+static const struct backlight_ops boe_bf060y8m_aj0_bl_ops = {
+ .update_status = boe_bf060y8m_aj0_bl_update_status,
+ .get_brightness = boe_bf060y8m_aj0_bl_get_brightness,
+};
+
+static struct backlight_device *
+boe_bf060y8m_aj0_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 127,
+ .max_brightness = 255,
+ .scale = BACKLIGHT_SCALE_NON_LINEAR,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &boe_bf060y8m_aj0_bl_ops, &props);
+}
+
+static int boe_bf060y8m_aj0_init_vregs(struct boe_bf060y8m_aj0 *boe,
+ struct device *dev)
+{
+ struct regulator *vreg;
+ int ret;
+
+ boe->vregs[BF060Y8M_VREG_VCC].supply = "vcc";
+ boe->vregs[BF060Y8M_VREG_VDDIO].supply = "vddio";
+ boe->vregs[BF060Y8M_VREG_VCI].supply = "vci";
+ boe->vregs[BF060Y8M_VREG_EL_VDD].supply = "elvdd";
+ boe->vregs[BF060Y8M_VREG_EL_VSS].supply = "elvss";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(boe->vregs),
+ boe->vregs);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get regulators: %d\n", ret);
+ return ret;
+ }
+
+ vreg = boe->vregs[BF060Y8M_VREG_VCC].consumer;
+ ret = regulator_is_supported_voltage(vreg, 2700000, 3600000);
+ if (!ret)
+ return ret;
+
+ vreg = boe->vregs[BF060Y8M_VREG_VDDIO].consumer;
+ ret = regulator_is_supported_voltage(vreg, 1620000, 1980000);
+ if (!ret)
+ return ret;
+
+ vreg = boe->vregs[BF060Y8M_VREG_VCI].consumer;
+ ret = regulator_is_supported_voltage(vreg, 2600000, 3600000);
+ if (!ret)
+ return ret;
+
+ vreg = boe->vregs[BF060Y8M_VREG_EL_VDD].consumer;
+ ret = regulator_is_supported_voltage(vreg, 4400000, 4800000);
+ if (!ret)
+ return ret;
+
+ /* ELVSS is negative: -5.00V to -1.40V */
+ vreg = boe->vregs[BF060Y8M_VREG_EL_VSS].consumer;
+ ret = regulator_is_supported_voltage(vreg, 1400000, 5000000);
+ if (!ret)
+ return ret;
+
+ /*
+ * Set min/max rated current, known only for VCI and VDDIO and,
+ * in case of failure, just go on gracefully, as this step is not
+ * guaranteed to succeed on all regulator HW but do a debug print
+ * to inform the developer during debugging.
+ * In any case, these two supplies are also optional, so they may
+ * be fixed-regulator which, at the time of writing, does not
+ * support fake current limiting.
+ */
+ vreg = boe->vregs[BF060Y8M_VREG_VDDIO].consumer;
+ ret = regulator_set_current_limit(vreg, 1500, 2500);
+ if (ret)
+ dev_dbg(dev, "Current limit cannot be set on %s: %d\n",
+ boe->vregs[1].supply, ret);
+
+ vreg = boe->vregs[BF060Y8M_VREG_VCI].consumer;
+ ret = regulator_set_current_limit(vreg, 20000, 40000);
+ if (ret)
+ dev_dbg(dev, "Current limit cannot be set on %s: %d\n",
+ boe->vregs[2].supply, ret);
+
+ return 0;
+}
+
+static int boe_bf060y8m_aj0_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct boe_bf060y8m_aj0 *boe;
+ int ret;
+
+ boe = devm_kzalloc(dev, sizeof(*boe), GFP_KERNEL);
+ if (!boe)
+ return -ENOMEM;
+
+ ret = boe_bf060y8m_aj0_init_vregs(boe, dev);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to initialize supplies.\n");
+
+ boe->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(boe->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(boe->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ boe->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, boe);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_NO_EOT_PACKET |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_LPM;
+
+ drm_panel_init(&boe->panel, dev, &boe_bf060y8m_aj0_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ boe->panel.backlight = boe_bf060y8m_aj0_create_backlight(dsi);
+ if (IS_ERR(boe->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(boe->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&boe->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int boe_bf060y8m_aj0_remove(struct mipi_dsi_device *dsi)
+{
+ struct boe_bf060y8m_aj0 *boe = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&boe->panel);
+
+ return 0;
+}
+
+static const struct of_device_id boe_bf060y8m_aj0_of_match[] = {
+ { .compatible = "boe,bf060y8m-aj0" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, boe_bf060y8m_aj0_of_match);
+
+static struct mipi_dsi_driver boe_bf060y8m_aj0_driver = {
+ .probe = boe_bf060y8m_aj0_probe,
+ .remove = boe_bf060y8m_aj0_remove,
+ .driver = {
+ .name = "panel-sw43404-boe-fhd-amoled",
+ .of_match_table = boe_bf060y8m_aj0_of_match,
+ },
+};
+module_mipi_dsi_driver(boe_bf060y8m_aj0_driver);
+
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@somainline.org>");
+MODULE_DESCRIPTION("BOE BF060Y8M-AJ0 MIPI-DSI OLED panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index 529561b4fbbc..5fcbde789ddb 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -84,8 +84,8 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = {
_INIT_DCS_CMD(0x0D, 0x63),
_INIT_DCS_CMD(0x0E, 0x91),
_INIT_DCS_CMD(0x0F, 0x73),
- _INIT_DCS_CMD(0x95, 0xEB),
- _INIT_DCS_CMD(0x96, 0xEB),
+ _INIT_DCS_CMD(0x95, 0xE6),
+ _INIT_DCS_CMD(0x96, 0xF0),
_INIT_DCS_CMD(0x30, 0x11),
_INIT_DCS_CMD(0x6D, 0x66),
_INIT_DCS_CMD(0x75, 0xA2),
@@ -111,18 +111,18 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = {
_INIT_DCS_CMD(0xB0, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
_INIT_DCS_CMD(0xB1, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
_INIT_DCS_CMD(0xB2, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
- _INIT_DCS_CMD(0xB3, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7),
+ _INIT_DCS_CMD(0xB3, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xA7, 0x03, 0xCF, 0x03, 0xDE, 0x03, 0xE0),
_INIT_DCS_CMD(0xB4, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
_INIT_DCS_CMD(0xB5, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
_INIT_DCS_CMD(0xB6, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
- _INIT_DCS_CMD(0xB7, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7),
+ _INIT_DCS_CMD(0xB7, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xA7, 0x03, 0xCF, 0x03, 0xDE, 0x03, 0xE0),
_INIT_DCS_CMD(0xB8, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
_INIT_DCS_CMD(0xB9, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
_INIT_DCS_CMD(0xBA, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
- _INIT_DCS_CMD(0xBB, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7),
+ _INIT_DCS_CMD(0xBB, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xA7, 0x03, 0xCF, 0x03, 0xDE, 0x03, 0xE0),
_INIT_DCS_CMD(0xFF, 0x24),
_INIT_DCS_CMD(0xFB, 0x01),
@@ -225,6 +225,7 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = {
_INIT_DCS_CMD(0x7F, 0x3C),
_INIT_DCS_CMD(0x82, 0x04),
_INIT_DCS_CMD(0x97, 0xC0),
+
_INIT_DCS_CMD(0xB6, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x05, 0x00, 0x00),
_INIT_DCS_CMD(0x91, 0x44),
_INIT_DCS_CMD(0x92, 0xA9),
@@ -332,12 +333,39 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = {
_INIT_DCS_CMD(0x34, 0x78),
_INIT_DCS_CMD(0x35, 0x16),
_INIT_DCS_CMD(0xC8, 0x04),
- _INIT_DCS_CMD(0xC9, 0x80),
+ _INIT_DCS_CMD(0xC9, 0x9E),
_INIT_DCS_CMD(0xCA, 0x4E),
_INIT_DCS_CMD(0xCB, 0x00),
- _INIT_DCS_CMD(0xA9, 0x4C),
- _INIT_DCS_CMD(0xAA, 0x47),
+ _INIT_DCS_CMD(0xA9, 0x49),
+ _INIT_DCS_CMD(0xAA, 0x4B),
+ _INIT_DCS_CMD(0xAB, 0x48),
+ _INIT_DCS_CMD(0xAC, 0x43),
+ _INIT_DCS_CMD(0xAD, 0x40),
+ _INIT_DCS_CMD(0xAE, 0x50),
+ _INIT_DCS_CMD(0xAF, 0x44),
+ _INIT_DCS_CMD(0xB0, 0x54),
+ _INIT_DCS_CMD(0xB1, 0x4E),
+ _INIT_DCS_CMD(0xB2, 0x4D),
+ _INIT_DCS_CMD(0xB3, 0x4C),
+ _INIT_DCS_CMD(0xB4, 0x41),
+ _INIT_DCS_CMD(0xB5, 0x47),
+ _INIT_DCS_CMD(0xB6, 0x53),
+ _INIT_DCS_CMD(0xB7, 0x3E),
+ _INIT_DCS_CMD(0xB8, 0x51),
+ _INIT_DCS_CMD(0xB9, 0x3C),
+ _INIT_DCS_CMD(0xBA, 0x3B),
+ _INIT_DCS_CMD(0xBB, 0x46),
+ _INIT_DCS_CMD(0xBC, 0x45),
+ _INIT_DCS_CMD(0xBD, 0x55),
+ _INIT_DCS_CMD(0xBE, 0x3D),
+ _INIT_DCS_CMD(0xBF, 0x3F),
+ _INIT_DCS_CMD(0xC0, 0x52),
+ _INIT_DCS_CMD(0xC1, 0x4A),
+ _INIT_DCS_CMD(0xC2, 0x39),
+ _INIT_DCS_CMD(0xC3, 0x4F),
+ _INIT_DCS_CMD(0xC4, 0x3A),
+ _INIT_DCS_CMD(0xC5, 0x42),
_INIT_DCS_CMD(0xFF, 0x27),
_INIT_DCS_CMD(0xFB, 0x01),
@@ -419,7 +447,7 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = {
{},
};
-static const struct panel_init_cmd inx_init_cmd[] = {
+static const struct panel_init_cmd inx_hj110iz_init_cmd[] = {
_INIT_DCS_CMD(0xFF, 0x20),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x05, 0xD1),
@@ -428,10 +456,10 @@ static const struct panel_init_cmd inx_init_cmd[] = {
_INIT_DCS_CMD(0x08, 0x4B),
_INIT_DCS_CMD(0x0E, 0x91),
_INIT_DCS_CMD(0x0F, 0x69),
- _INIT_DCS_CMD(0x95, 0xFF),
- _INIT_DCS_CMD(0x96, 0xFF),
- _INIT_DCS_CMD(0x9D, 0x0A),
- _INIT_DCS_CMD(0x9E, 0x0A),
+ _INIT_DCS_CMD(0x95, 0xF5),
+ _INIT_DCS_CMD(0x96, 0xF5),
+ _INIT_DCS_CMD(0x9D, 0x00),
+ _INIT_DCS_CMD(0x9E, 0x00),
_INIT_DCS_CMD(0x69, 0x98),
_INIT_DCS_CMD(0x75, 0xA2),
_INIT_DCS_CMD(0x77, 0xB3),
@@ -493,17 +521,17 @@ static const struct panel_init_cmd inx_init_cmd[] = {
_INIT_DCS_CMD(0x2A, 0x03),
_INIT_DCS_CMD(0x2B, 0x03),
- _INIT_DCS_CMD(0x2F, 0x06),
+ _INIT_DCS_CMD(0x2F, 0x05),
_INIT_DCS_CMD(0x30, 0x32),
_INIT_DCS_CMD(0x31, 0x43),
- _INIT_DCS_CMD(0x33, 0x06),
+ _INIT_DCS_CMD(0x33, 0x05),
_INIT_DCS_CMD(0x34, 0x32),
_INIT_DCS_CMD(0x35, 0x43),
_INIT_DCS_CMD(0x37, 0x44),
_INIT_DCS_CMD(0x38, 0x40),
_INIT_DCS_CMD(0x39, 0x00),
- _INIT_DCS_CMD(0x3A, 0x01),
- _INIT_DCS_CMD(0x3B, 0x48),
+ _INIT_DCS_CMD(0x3A, 0x18),
+ _INIT_DCS_CMD(0x3B, 0x00),
_INIT_DCS_CMD(0x3D, 0x93),
_INIT_DCS_CMD(0xAB, 0x44),
_INIT_DCS_CMD(0xAC, 0x40),
@@ -520,8 +548,8 @@ static const struct panel_init_cmd inx_init_cmd[] = {
_INIT_DCS_CMD(0x56, 0x08),
_INIT_DCS_CMD(0x58, 0x21),
_INIT_DCS_CMD(0x59, 0x40),
- _INIT_DCS_CMD(0x5A, 0x09),
- _INIT_DCS_CMD(0x5B, 0x48),
+ _INIT_DCS_CMD(0x5A, 0x00),
+ _INIT_DCS_CMD(0x5B, 0x2C),
_INIT_DCS_CMD(0x5E, 0x00, 0x10),
_INIT_DCS_CMD(0x5F, 0x00),
@@ -558,33 +586,36 @@ static const struct panel_init_cmd inx_init_cmd[] = {
_INIT_DCS_CMD(0xEF, 0x01),
_INIT_DCS_CMD(0xF0, 0x7A),
+ _INIT_DCS_CMD(0xB6, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x05, 0x00, 0x00),
_INIT_DCS_CMD(0xFF, 0x25),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x05, 0x00),
+ _INIT_DCS_CMD(0x13, 0x02),
+ _INIT_DCS_CMD(0x14, 0xDF),
_INIT_DCS_CMD(0xF1, 0x10),
_INIT_DCS_CMD(0x1E, 0x00),
- _INIT_DCS_CMD(0x1F, 0x09),
- _INIT_DCS_CMD(0x20, 0x46),
+ _INIT_DCS_CMD(0x1F, 0x00),
+ _INIT_DCS_CMD(0x20, 0x2C),
_INIT_DCS_CMD(0x25, 0x00),
- _INIT_DCS_CMD(0x26, 0x09),
- _INIT_DCS_CMD(0x27, 0x46),
+ _INIT_DCS_CMD(0x26, 0x00),
+ _INIT_DCS_CMD(0x27, 0x2C),
_INIT_DCS_CMD(0x3F, 0x80),
_INIT_DCS_CMD(0x40, 0x00),
_INIT_DCS_CMD(0x43, 0x00),
- _INIT_DCS_CMD(0x44, 0x09),
- _INIT_DCS_CMD(0x45, 0x46),
+ _INIT_DCS_CMD(0x44, 0x18),
+ _INIT_DCS_CMD(0x45, 0x00),
- _INIT_DCS_CMD(0x48, 0x09),
- _INIT_DCS_CMD(0x49, 0x46),
+ _INIT_DCS_CMD(0x48, 0x00),
+ _INIT_DCS_CMD(0x49, 0x2C),
_INIT_DCS_CMD(0x5B, 0x80),
_INIT_DCS_CMD(0x5C, 0x00),
- _INIT_DCS_CMD(0x5D, 0x01),
- _INIT_DCS_CMD(0x5E, 0x46),
- _INIT_DCS_CMD(0x61, 0x01),
- _INIT_DCS_CMD(0x62, 0x46),
+ _INIT_DCS_CMD(0x5D, 0x00),
+ _INIT_DCS_CMD(0x5E, 0x00),
+ _INIT_DCS_CMD(0x61, 0x00),
+ _INIT_DCS_CMD(0x62, 0x2C),
_INIT_DCS_CMD(0x68, 0x10),
_INIT_DCS_CMD(0xFF, 0x26),
_INIT_DCS_CMD(0xFB, 0x01),
@@ -700,16 +731,22 @@ static const struct panel_init_cmd inx_init_cmd[] = {
_INIT_DCS_CMD(0xA3, 0x30),
_INIT_DCS_CMD(0xA4, 0xC0),
_INIT_DCS_CMD(0xE8, 0x00),
+ _INIT_DCS_CMD(0x97, 0x3C),
+ _INIT_DCS_CMD(0x98, 0x02),
+ _INIT_DCS_CMD(0x99, 0x95),
+ _INIT_DCS_CMD(0x9A, 0x06),
+ _INIT_DCS_CMD(0x9B, 0x00),
+ _INIT_DCS_CMD(0x9C, 0x0B),
+ _INIT_DCS_CMD(0x9D, 0x0A),
+ _INIT_DCS_CMD(0x9E, 0x90),
_INIT_DCS_CMD(0xFF, 0xF0),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x3A, 0x08),
_INIT_DCS_CMD(0xFF, 0xD0),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x00, 0x33),
- _INIT_DCS_CMD(0x02, 0x77),
_INIT_DCS_CMD(0x08, 0x01),
_INIT_DCS_CMD(0x09, 0xBF),
- _INIT_DCS_CMD(0x28, 0x30),
_INIT_DCS_CMD(0x2F, 0x33),
_INIT_DCS_CMD(0xFF, 0x23),
_INIT_DCS_CMD(0xFB, 0x01),
@@ -718,6 +755,9 @@ static const struct panel_init_cmd inx_init_cmd[] = {
_INIT_DCS_CMD(0xFF, 0x20),
_INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x30, 0x00),
+ _INIT_DCS_CMD(0xFF, 0x24),
+ _INIT_DCS_CMD(0x5C, 0x88),
+ _INIT_DCS_CMD(0x5D, 0x08),
_INIT_DCS_CMD(0xFF, 0x10),
_INIT_DCS_CMD(0xB9, 0x01),
_INIT_DCS_CMD(0xFF, 0x20),
@@ -1312,7 +1352,7 @@ static const struct panel_desc inx_hj110iz_desc = {
| MIPI_DSI_MODE_VIDEO_HSE
| MIPI_DSI_CLOCK_NON_CONTINUOUS
| MIPI_DSI_MODE_VIDEO_BURST,
- .init_cmds = inx_init_cmd,
+ .init_cmds = inx_hj110iz_init_cmd,
};
static const struct drm_display_mode boe_tv101wum_nl6_default_mode = {
diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c
index da4a69067e18..b58cb064975f 100644
--- a/drivers/gpu/drm/panel/panel-dsi-cm.c
+++ b/drivers/gpu/drm/panel/panel-dsi-cm.c
@@ -248,7 +248,7 @@ static ssize_t num_dsi_errors_show(struct device *dev,
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%d\n", errors);
+ return sysfs_emit(buf, "%d\n", errors);
}
static ssize_t hw_revision_show(struct device *dev,
@@ -268,7 +268,7 @@ static ssize_t hw_revision_show(struct device *dev,
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3);
+ return sysfs_emit(buf, "%02x.%02x.%02x\n", id1, id2, id3);
}
static DEVICE_ATTR_RO(num_dsi_errors);
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index fc03046de134..176ef0c3cc1d 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -196,10 +196,10 @@ struct edp_panel_entry {
/** @panel_id: 32-bit ID for panel, encoded with drm_edid_encode_panel_id(). */
u32 panel_id;
- /* @delay: The power sequencing delays needed for this panel. */
+ /** @delay: The power sequencing delays needed for this panel. */
const struct panel_delay *delay;
- /* @name: Name of this panel (for printing to logs). */
+ /** @name: Name of this panel (for printing to logs). */
const char *name;
};
diff --git a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
index 2a602aee61c3..cb0bb3076099 100644
--- a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
+++ b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
@@ -456,16 +456,13 @@ static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi)
ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ctx->supplies),
ctx->supplies);
- if (ret < 0) {
- dev_err(&dsi->dev, "Couldn't get regulators\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&dsi->dev, ret, "Couldn't get regulators\n");
ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(ctx->reset)) {
- dev_err(&dsi->dev, "Couldn't get our reset GPIO\n");
- return PTR_ERR(ctx->reset);
- }
+ if (IS_ERR(ctx->reset))
+ return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset),
+ "Couldn't get our reset GPIO\n");
drm_panel_init(&ctx->panel, &dsi->dev, &k101_im2ba02_funcs,
DRM_MODE_CONNECTOR_DSI);
diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
index 581661b506f8..a9cd7135cb51 100644
--- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -200,22 +200,19 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
DRM_MODE_CONNECTOR_DSI);
ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd");
- if (IS_ERR(ctx->dvdd)) {
- dev_err(&dsi->dev, "Couldn't get dvdd regulator\n");
- return PTR_ERR(ctx->dvdd);
- }
+ if (IS_ERR(ctx->dvdd))
+ return dev_err_probe(&dsi->dev, PTR_ERR(ctx->dvdd),
+ "Couldn't get dvdd regulator\n");
ctx->avdd = devm_regulator_get(&dsi->dev, "avdd");
- if (IS_ERR(ctx->avdd)) {
- dev_err(&dsi->dev, "Couldn't get avdd regulator\n");
- return PTR_ERR(ctx->avdd);
- }
+ if (IS_ERR(ctx->avdd))
+ return dev_err_probe(&dsi->dev, PTR_ERR(ctx->avdd),
+ "Couldn't get avdd regulator\n");
ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(ctx->reset)) {
- dev_err(&dsi->dev, "Couldn't get our reset GPIO\n");
- return PTR_ERR(ctx->reset);
- }
+ if (IS_ERR(ctx->reset))
+ return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset),
+ "Couldn't get our reset GPIO\n");
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
@@ -227,7 +224,13 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->lanes = 4;
- return mipi_dsi_attach(dsi);
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
}
static int feiyang_dsi_remove(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 534dd7414d42..ba30d11547ad 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -42,6 +42,7 @@ struct ili9881c_desc {
const struct ili9881c_instr *init;
const size_t init_length;
const struct drm_display_mode *mode;
+ const unsigned long mode_flags;
};
struct ili9881c {
@@ -51,6 +52,8 @@ struct ili9881c {
struct regulator *power;
struct gpio_desc *reset;
+
+ enum drm_panel_orientation orientation;
};
#define ILI9881C_SWITCH_PAGE_INSTR(_page) \
@@ -453,6 +456,213 @@ static const struct ili9881c_instr k101_im2byl02_init[] = {
ILI9881C_COMMAND_INSTR(0xD3, 0x3F), /* VN0 */
};
+static const struct ili9881c_instr w552946ab_init[] = {
+ ILI9881C_SWITCH_PAGE_INSTR(3),
+ ILI9881C_COMMAND_INSTR(0x01, 0x00),
+ ILI9881C_COMMAND_INSTR(0x02, 0x00),
+ ILI9881C_COMMAND_INSTR(0x03, 0x53),
+ ILI9881C_COMMAND_INSTR(0x04, 0x53),
+ ILI9881C_COMMAND_INSTR(0x05, 0x13),
+ ILI9881C_COMMAND_INSTR(0x06, 0x04),
+ ILI9881C_COMMAND_INSTR(0x07, 0x02),
+ ILI9881C_COMMAND_INSTR(0x08, 0x02),
+ ILI9881C_COMMAND_INSTR(0x09, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0A, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0B, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0C, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0D, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0E, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0F, 0x00),
+
+ ILI9881C_COMMAND_INSTR(0x10, 0x00),
+ ILI9881C_COMMAND_INSTR(0x11, 0x00),
+ ILI9881C_COMMAND_INSTR(0x12, 0x00),
+ ILI9881C_COMMAND_INSTR(0x13, 0x00),
+ ILI9881C_COMMAND_INSTR(0x14, 0x00),
+ ILI9881C_COMMAND_INSTR(0x15, 0x08),
+ ILI9881C_COMMAND_INSTR(0x16, 0x10),
+ ILI9881C_COMMAND_INSTR(0x17, 0x00),
+ ILI9881C_COMMAND_INSTR(0x18, 0x08),
+ ILI9881C_COMMAND_INSTR(0x19, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1A, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1B, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1C, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1D, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1E, 0xC0),
+ ILI9881C_COMMAND_INSTR(0x1F, 0x80),
+
+ ILI9881C_COMMAND_INSTR(0x20, 0x02),
+ ILI9881C_COMMAND_INSTR(0x21, 0x09),
+ ILI9881C_COMMAND_INSTR(0x22, 0x00),
+ ILI9881C_COMMAND_INSTR(0x23, 0x00),
+ ILI9881C_COMMAND_INSTR(0x24, 0x00),
+ ILI9881C_COMMAND_INSTR(0x25, 0x00),
+ ILI9881C_COMMAND_INSTR(0x26, 0x00),
+ ILI9881C_COMMAND_INSTR(0x27, 0x00),
+ ILI9881C_COMMAND_INSTR(0x28, 0x55),
+ ILI9881C_COMMAND_INSTR(0x29, 0x03),
+ ILI9881C_COMMAND_INSTR(0x2A, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2B, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2C, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2D, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2E, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2F, 0x00),
+
+ ILI9881C_COMMAND_INSTR(0x30, 0x00),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x32, 0x00),
+ ILI9881C_COMMAND_INSTR(0x33, 0x00),
+ ILI9881C_COMMAND_INSTR(0x34, 0x04),
+ ILI9881C_COMMAND_INSTR(0x35, 0x05),
+ ILI9881C_COMMAND_INSTR(0x36, 0x05),
+ ILI9881C_COMMAND_INSTR(0x37, 0x00),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3C),
+ ILI9881C_COMMAND_INSTR(0x39, 0x35),
+ ILI9881C_COMMAND_INSTR(0x3A, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3B, 0x40),
+ ILI9881C_COMMAND_INSTR(0x3C, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3D, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3E, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3F, 0x00),
+
+ ILI9881C_COMMAND_INSTR(0x40, 0x00),
+ ILI9881C_COMMAND_INSTR(0x41, 0x88),
+ ILI9881C_COMMAND_INSTR(0x42, 0x00),
+ ILI9881C_COMMAND_INSTR(0x43, 0x00),
+ ILI9881C_COMMAND_INSTR(0x44, 0x1F),
+
+ ILI9881C_COMMAND_INSTR(0x50, 0x01),
+ ILI9881C_COMMAND_INSTR(0x51, 0x23),
+ ILI9881C_COMMAND_INSTR(0x52, 0x45),
+ ILI9881C_COMMAND_INSTR(0x53, 0x67),
+ ILI9881C_COMMAND_INSTR(0x54, 0x89),
+ ILI9881C_COMMAND_INSTR(0x55, 0xaB),
+ ILI9881C_COMMAND_INSTR(0x56, 0x01),
+ ILI9881C_COMMAND_INSTR(0x57, 0x23),
+ ILI9881C_COMMAND_INSTR(0x58, 0x45),
+ ILI9881C_COMMAND_INSTR(0x59, 0x67),
+ ILI9881C_COMMAND_INSTR(0x5A, 0x89),
+ ILI9881C_COMMAND_INSTR(0x5B, 0xAB),
+ ILI9881C_COMMAND_INSTR(0x5C, 0xCD),
+ ILI9881C_COMMAND_INSTR(0x5D, 0xEF),
+ ILI9881C_COMMAND_INSTR(0x5E, 0x03),
+ ILI9881C_COMMAND_INSTR(0x5F, 0x14),
+
+ ILI9881C_COMMAND_INSTR(0x60, 0x15),
+ ILI9881C_COMMAND_INSTR(0x61, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x62, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x63, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x64, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x65, 0x10),
+ ILI9881C_COMMAND_INSTR(0x66, 0x11),
+ ILI9881C_COMMAND_INSTR(0x67, 0x08),
+ ILI9881C_COMMAND_INSTR(0x68, 0x02),
+ ILI9881C_COMMAND_INSTR(0x69, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x6A, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6B, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6C, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6D, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6E, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6F, 0x02),
+
+ ILI9881C_COMMAND_INSTR(0x70, 0x02),
+ ILI9881C_COMMAND_INSTR(0x71, 0x02),
+ ILI9881C_COMMAND_INSTR(0x72, 0x06),
+ ILI9881C_COMMAND_INSTR(0x73, 0x02),
+ ILI9881C_COMMAND_INSTR(0x74, 0x02),
+ ILI9881C_COMMAND_INSTR(0x75, 0x14),
+ ILI9881C_COMMAND_INSTR(0x76, 0x15),
+ ILI9881C_COMMAND_INSTR(0x77, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x78, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x79, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x7A, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x7B, 0x11),
+ ILI9881C_COMMAND_INSTR(0x7C, 0x10),
+ ILI9881C_COMMAND_INSTR(0x7D, 0x06),
+ ILI9881C_COMMAND_INSTR(0x7E, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7F, 0x0A),
+
+ ILI9881C_COMMAND_INSTR(0x80, 0x02),
+ ILI9881C_COMMAND_INSTR(0x81, 0x02),
+ ILI9881C_COMMAND_INSTR(0x82, 0x02),
+ ILI9881C_COMMAND_INSTR(0x83, 0x02),
+ ILI9881C_COMMAND_INSTR(0x84, 0x02),
+ ILI9881C_COMMAND_INSTR(0x85, 0x02),
+ ILI9881C_COMMAND_INSTR(0x86, 0x02),
+ ILI9881C_COMMAND_INSTR(0x87, 0x02),
+ ILI9881C_COMMAND_INSTR(0x88, 0x08),
+ ILI9881C_COMMAND_INSTR(0x89, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+
+ ILI9881C_SWITCH_PAGE_INSTR(4),
+ ILI9881C_COMMAND_INSTR(0x00, 0x80),
+ ILI9881C_COMMAND_INSTR(0x70, 0x00),
+ ILI9881C_COMMAND_INSTR(0x71, 0x00),
+ ILI9881C_COMMAND_INSTR(0x66, 0xFE),
+ ILI9881C_COMMAND_INSTR(0x82, 0x15),
+ ILI9881C_COMMAND_INSTR(0x84, 0x15),
+ ILI9881C_COMMAND_INSTR(0x85, 0x15),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x24),
+ ILI9881C_COMMAND_INSTR(0x32, 0xAC),
+ ILI9881C_COMMAND_INSTR(0x8C, 0x80),
+ ILI9881C_COMMAND_INSTR(0x3C, 0xF5),
+ ILI9881C_COMMAND_INSTR(0x88, 0x33),
+
+ ILI9881C_SWITCH_PAGE_INSTR(1),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x53, 0x78),
+ ILI9881C_COMMAND_INSTR(0x50, 0x5B),
+ ILI9881C_COMMAND_INSTR(0x51, 0x5B),
+ ILI9881C_COMMAND_INSTR(0x60, 0x20),
+ ILI9881C_COMMAND_INSTR(0x61, 0x00),
+ ILI9881C_COMMAND_INSTR(0x62, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x63, 0x00),
+
+ ILI9881C_COMMAND_INSTR(0xA0, 0x00),
+ ILI9881C_COMMAND_INSTR(0xA1, 0x10),
+ ILI9881C_COMMAND_INSTR(0xA2, 0x1C),
+ ILI9881C_COMMAND_INSTR(0xA3, 0x13),
+ ILI9881C_COMMAND_INSTR(0xA4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xA5, 0x26),
+ ILI9881C_COMMAND_INSTR(0xA6, 0x1A),
+ ILI9881C_COMMAND_INSTR(0xA7, 0x1D),
+ ILI9881C_COMMAND_INSTR(0xA8, 0x67),
+ ILI9881C_COMMAND_INSTR(0xA9, 0x1C),
+ ILI9881C_COMMAND_INSTR(0xAA, 0x29),
+ ILI9881C_COMMAND_INSTR(0xAB, 0x5B),
+ ILI9881C_COMMAND_INSTR(0xAC, 0x26),
+ ILI9881C_COMMAND_INSTR(0xAD, 0x28),
+ ILI9881C_COMMAND_INSTR(0xAE, 0x5C),
+ ILI9881C_COMMAND_INSTR(0xAF, 0x30),
+ ILI9881C_COMMAND_INSTR(0xB0, 0x31),
+ ILI9881C_COMMAND_INSTR(0xB1, 0x2E),
+ ILI9881C_COMMAND_INSTR(0xB2, 0x32),
+ ILI9881C_COMMAND_INSTR(0xB3, 0x00),
+
+ ILI9881C_COMMAND_INSTR(0xC0, 0x00),
+ ILI9881C_COMMAND_INSTR(0xC1, 0x10),
+ ILI9881C_COMMAND_INSTR(0xC2, 0x1C),
+ ILI9881C_COMMAND_INSTR(0xC3, 0x13),
+ ILI9881C_COMMAND_INSTR(0xC4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xC5, 0x26),
+ ILI9881C_COMMAND_INSTR(0xC6, 0x1A),
+ ILI9881C_COMMAND_INSTR(0xC7, 0x1D),
+ ILI9881C_COMMAND_INSTR(0xC8, 0x67),
+ ILI9881C_COMMAND_INSTR(0xC9, 0x1C),
+ ILI9881C_COMMAND_INSTR(0xCA, 0x29),
+ ILI9881C_COMMAND_INSTR(0xCB, 0x5B),
+ ILI9881C_COMMAND_INSTR(0xCC, 0x26),
+ ILI9881C_COMMAND_INSTR(0xCD, 0x28),
+ ILI9881C_COMMAND_INSTR(0xCE, 0x5C),
+ ILI9881C_COMMAND_INSTR(0xCF, 0x30),
+ ILI9881C_COMMAND_INSTR(0xD0, 0x31),
+ ILI9881C_COMMAND_INSTR(0xD1, 0x2E),
+ ILI9881C_COMMAND_INSTR(0xD2, 0x32),
+ ILI9881C_COMMAND_INSTR(0xD3, 0x00),
+ ILI9881C_SWITCH_PAGE_INSTR(0),
+};
+
static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel)
{
return container_of(panel, struct ili9881c, panel);
@@ -603,6 +813,23 @@ static const struct drm_display_mode k101_im2byl02_default_mode = {
.height_mm = 217,
};
+static const struct drm_display_mode w552946aba_default_mode = {
+ .clock = 64000,
+
+ .hdisplay = 720,
+ .hsync_start = 720 + 40,
+ .hsync_end = 720 + 40 + 10,
+ .htotal = 720 + 40 + 10 + 40,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 22,
+ .vsync_end = 1280 + 22 + 4,
+ .vtotal = 1280 + 22 + 4 + 11,
+
+ .width_mm = 68,
+ .height_mm = 121,
+};
+
static int ili9881c_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
@@ -626,6 +853,8 @@ static int ili9881c_get_modes(struct drm_panel *panel,
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
+ drm_connector_set_panel_orientation(connector, ctx->orientation);
+
return 1;
}
@@ -653,15 +882,20 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
DRM_MODE_CONNECTOR_DSI);
ctx->power = devm_regulator_get(&dsi->dev, "power");
- if (IS_ERR(ctx->power)) {
- dev_err(&dsi->dev, "Couldn't get our power regulator\n");
- return PTR_ERR(ctx->power);
- }
-
- ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(ctx->reset)) {
- dev_err(&dsi->dev, "Couldn't get our reset GPIO\n");
- return PTR_ERR(ctx->reset);
+ if (IS_ERR(ctx->power))
+ return dev_err_probe(&dsi->dev, PTR_ERR(ctx->power),
+ "Couldn't get our power regulator\n");
+
+ ctx->reset = devm_gpiod_get_optional(&dsi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset))
+ return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset),
+ "Couldn't get our reset GPIO\n");
+
+ ret = of_drm_get_panel_orientation(dsi->dev.of_node, &ctx->orientation);
+ if (ret) {
+ dev_err(&dsi->dev, "%pOF: failed to get orientation: %d\n",
+ dsi->dev.of_node, ret);
+ return ret;
}
ret = drm_panel_of_backlight(&ctx->panel);
@@ -670,7 +904,7 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
drm_panel_add(&ctx->panel);
- dsi->mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+ dsi->mode_flags = ctx->desc->mode_flags;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->lanes = 4;
@@ -691,17 +925,28 @@ static const struct ili9881c_desc lhr050h41_desc = {
.init = lhr050h41_init,
.init_length = ARRAY_SIZE(lhr050h41_init),
.mode = &lhr050h41_default_mode,
+ .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
};
static const struct ili9881c_desc k101_im2byl02_desc = {
.init = k101_im2byl02_init,
.init_length = ARRAY_SIZE(k101_im2byl02_init),
.mode = &k101_im2byl02_default_mode,
+ .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+};
+
+static const struct ili9881c_desc w552946aba_desc = {
+ .init = w552946ab_init,
+ .init_length = ARRAY_SIZE(w552946ab_init),
+ .mode = &w552946aba_default_mode,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
};
static const struct of_device_id ili9881c_of_match[] = {
{ .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc },
{ .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc },
+ { .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc },
{ }
};
MODULE_DEVICE_TABLE(of, ili9881c_of_match);
diff --git a/drivers/gpu/drm/panel/panel-innolux-ej030na.c b/drivers/gpu/drm/panel/panel-innolux-ej030na.c
index 34b98f70bd22..c558de3f99be 100644
--- a/drivers/gpu/drm/panel/panel-innolux-ej030na.c
+++ b/drivers/gpu/drm/panel/panel-innolux-ej030na.c
@@ -198,16 +198,14 @@ static int ej030na_probe(struct spi_device *spi)
return -EINVAL;
priv->supply = devm_regulator_get(dev, "power");
- if (IS_ERR(priv->supply)) {
- dev_err(dev, "Failed to get power supply\n");
- return PTR_ERR(priv->supply);
- }
+ if (IS_ERR(priv->supply))
+ return dev_err_probe(dev, PTR_ERR(priv->supply),
+ "Failed to get power supply\n");
priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(priv->reset_gpio)) {
- dev_err(dev, "Failed to get reset GPIO\n");
- return PTR_ERR(priv->reset_gpio);
- }
+ if (IS_ERR(priv->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
+ "Failed to get reset GPIO\n");
drm_panel_init(&priv->panel, dev, &ej030na_funcs,
DRM_MODE_CONNECTOR_DPI);
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index aea316225391..f194b62e290c 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -484,6 +484,7 @@ static void innolux_panel_del(struct innolux_panel *innolux)
static int innolux_panel_probe(struct mipi_dsi_device *dsi)
{
const struct panel_desc *desc;
+ struct innolux_panel *innolux;
int err;
desc = of_device_get_match_data(&dsi->dev);
@@ -495,7 +496,14 @@ static int innolux_panel_probe(struct mipi_dsi_device *dsi)
if (err < 0)
return err;
- return mipi_dsi_attach(dsi);
+ err = mipi_dsi_attach(dsi);
+ if (err < 0) {
+ innolux = mipi_dsi_get_drvdata(dsi);
+ innolux_panel_del(innolux);
+ return err;
+ }
+
+ return 0;
}
static int innolux_panel_remove(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
new file mode 100644
index 000000000000..31eafbc38ec0
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 Raffaele Tranquillini <raffaele.tranquillini@gmail.com>
+ *
+ * Generated using linux-mdss-dsi-panel-driver-generator from Lineage OS device tree:
+ * https://github.com/LineageOS/android_kernel_xiaomi_msm8996/blob/lineage-18.1/arch/arm/boot/dts/qcom/a1-msm8996-mtp.dtsi
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct jdi_fhd_r63452 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct gpio_desc *reset_gpio;
+ bool prepared;
+};
+
+static inline struct jdi_fhd_r63452 *to_jdi_fhd_r63452(struct drm_panel *panel)
+{
+ return container_of(panel, struct jdi_fhd_r63452, panel);
+}
+
+#define dsi_generic_write_seq(dsi, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+#define dsi_dcs_write_seq(dsi, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static void jdi_fhd_r63452_reset(struct jdi_fhd_r63452 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+}
+
+static int jdi_fhd_r63452_on(struct jdi_fhd_r63452 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ dsi_generic_write_seq(dsi, 0xb0, 0x00);
+ dsi_generic_write_seq(dsi, 0xd6, 0x01);
+ dsi_generic_write_seq(dsi, 0xec,
+ 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b,
+ 0x13, 0x15, 0x68, 0x0b, 0xb5);
+ dsi_generic_write_seq(dsi, 0xb0, 0x03);
+
+ ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set tear on: %d\n", ret);
+ return ret;
+ }
+
+ dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+
+ ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x77);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set pixel format: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_set_column_address(dsi, 0x0000, 0x0437);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set column address: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_set_page_address(dsi, 0x0000, 0x077f);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set page address: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_set_tear_scanline(dsi, 0x0000);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set tear scanline: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_set_display_brightness(dsi, 0x00ff);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display brightness: %d\n", ret);
+ return ret;
+ }
+
+ dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
+ dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00);
+ dsi_dcs_write_seq(dsi, 0x84, 0x00);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display on: %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(80);
+
+ dsi_generic_write_seq(dsi, 0xb0, 0x04);
+ dsi_dcs_write_seq(dsi, 0x84, 0x00);
+ dsi_generic_write_seq(dsi, 0xc8, 0x11);
+ dsi_generic_write_seq(dsi, 0xb0, 0x03);
+
+ return 0;
+}
+
+static int jdi_fhd_r63452_off(struct jdi_fhd_r63452 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ dsi_generic_write_seq(dsi, 0xb0, 0x00);
+ dsi_generic_write_seq(dsi, 0xd6, 0x01);
+ dsi_generic_write_seq(dsi, 0xec,
+ 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b,
+ 0x13, 0x15, 0x68, 0x0b, 0x95);
+ dsi_generic_write_seq(dsi, 0xb0, 0x03);
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+ usleep_range(2000, 3000);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ return 0;
+}
+
+static int jdi_fhd_r63452_prepare(struct drm_panel *panel)
+{
+ struct jdi_fhd_r63452 *ctx = to_jdi_fhd_r63452(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ jdi_fhd_r63452_reset(ctx);
+
+ ret = jdi_fhd_r63452_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ return ret;
+ }
+
+ ctx->prepared = true;
+ return 0;
+}
+
+static int jdi_fhd_r63452_unprepare(struct drm_panel *panel)
+{
+ struct jdi_fhd_r63452 *ctx = to_jdi_fhd_r63452(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = jdi_fhd_r63452_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
+ ctx->prepared = false;
+ return 0;
+}
+
+static const struct drm_display_mode jdi_fhd_r63452_mode = {
+ .clock = (1080 + 120 + 16 + 40) * (1920 + 4 + 2 + 4) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 120,
+ .hsync_end = 1080 + 120 + 16,
+ .htotal = 1080 + 120 + 16 + 40,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 4,
+ .vsync_end = 1920 + 4 + 2,
+ .vtotal = 1920 + 4 + 2 + 4,
+ .width_mm = 64,
+ .height_mm = 114,
+};
+
+static int jdi_fhd_r63452_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &jdi_fhd_r63452_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs jdi_fhd_r63452_panel_funcs = {
+ .prepare = jdi_fhd_r63452_prepare,
+ .unprepare = jdi_fhd_r63452_unprepare,
+ .get_modes = jdi_fhd_r63452_get_modes,
+};
+
+static int jdi_fhd_r63452_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct jdi_fhd_r63452 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ drm_panel_init(&ctx->panel, dev, &jdi_fhd_r63452_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int jdi_fhd_r63452_remove(struct mipi_dsi_device *dsi)
+{
+ struct jdi_fhd_r63452 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id jdi_fhd_r63452_of_match[] = {
+ { .compatible = "jdi,fhd-r63452" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, jdi_fhd_r63452_of_match);
+
+static struct mipi_dsi_driver jdi_fhd_r63452_driver = {
+ .probe = jdi_fhd_r63452_probe,
+ .remove = jdi_fhd_r63452_remove,
+ .driver = {
+ .name = "panel-jdi-fhd-r63452",
+ .of_match_table = jdi_fhd_r63452_of_match,
+ },
+};
+module_mipi_dsi_driver(jdi_fhd_r63452_driver);
+
+MODULE_AUTHOR("Raffaele Tranquillini <raffaele.tranquillini@gmail.com>");
+MODULE_DESCRIPTION("DRM driver for JDI FHD R63452 DSI panel, command mode");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index 733010b5e4f5..3c86ad262d5e 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -473,7 +473,13 @@ static int jdi_panel_probe(struct mipi_dsi_device *dsi)
if (ret < 0)
return ret;
- return mipi_dsi_attach(dsi);
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ jdi_panel_del(jdi);
+ return ret;
+ }
+
+ return 0;
}
static int jdi_panel_remove(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
index 86e4213e8bb1..daccb1fd5fda 100644
--- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
+++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
@@ -406,7 +406,13 @@ static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi)
if (err < 0)
return err;
- return mipi_dsi_attach(dsi);
+ err = mipi_dsi_attach(dsi);
+ if (err < 0) {
+ kingdisplay_panel_del(kingdisplay);
+ return err;
+ }
+
+ return 0;
}
static int kingdisplay_panel_remove(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index 59a8d99e777d..27a1c9923b09 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -20,6 +20,7 @@
#include <video/videomode.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_of.h>
#include <drm/drm_panel.h>
struct panel_lvds {
@@ -116,7 +117,6 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds)
{
struct device_node *np = lvds->dev->of_node;
struct display_timing timing;
- const char *mapping;
int ret;
ret = of_drm_get_panel_orientation(np, &lvds->orientation);
@@ -149,24 +149,14 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds)
of_property_read_string(np, "label", &lvds->label);
- ret = of_property_read_string(np, "data-mapping", &mapping);
+ ret = drm_of_lvds_get_data_mapping(np);
if (ret < 0) {
dev_err(lvds->dev, "%pOF: invalid or missing %s DT property\n",
np, "data-mapping");
- return -ENODEV;
+ return ret;
}
- if (!strcmp(mapping, "jeida-18")) {
- lvds->bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG;
- } else if (!strcmp(mapping, "jeida-24")) {
- lvds->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA;
- } else if (!strcmp(mapping, "vesa-24")) {
- lvds->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG;
- } else {
- dev_err(lvds->dev, "%pOF: invalid or missing %s DT property\n",
- np, "data-mapping");
- return -EINVAL;
- }
+ lvds->bus_format = ret;
lvds->data_mirror = of_property_read_bool(np, "data-mirror");
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
new file mode 100644
index 000000000000..288c7fa83ecc
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
@@ -0,0 +1,702 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Novatek NT35950 DriverIC panels driver
+ *
+ * Copyright (c) 2021 AngeloGioacchino Del Regno
+ * <angelogioacchino.delregno@somainline.org>
+ */
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define MCS_CMD_MAUCCTR 0xf0 /* Manufacturer command enable */
+#define MCS_PARAM_SCALER_FUNCTION 0x58 /* Scale-up function */
+#define MCS_PARAM_SCALEUP_MODE 0xc9
+ #define MCS_SCALEUP_SIMPLE 0x0
+ #define MCS_SCALEUP_BILINEAR BIT(0)
+ #define MCS_SCALEUP_DUPLICATE (BIT(0) | BIT(4))
+
+/* VESA Display Stream Compression param */
+#define MCS_PARAM_VESA_DSC_ON 0x03
+
+/* Data Compression mode */
+#define MCS_PARAM_DATA_COMPRESSION 0x90
+ #define MCS_DATA_COMPRESSION_NONE 0x00
+ #define MCS_DATA_COMPRESSION_FBC 0x02
+ #define MCS_DATA_COMPRESSION_DSC 0x03
+
+/* Display Output control */
+#define MCS_PARAM_DISP_OUTPUT_CTRL 0xb4
+ #define MCS_DISP_OUT_SRAM_EN BIT(0)
+ #define MCS_DISP_OUT_VIDEO_MODE BIT(4)
+
+/* VESA Display Stream Compression setting */
+#define MCS_PARAM_VESA_DSC_SETTING 0xc0
+
+/* SubPixel Rendering (SPR) */
+#define MCS_PARAM_SPR_EN 0xe3
+#define MCS_PARAM_SPR_MODE 0xef
+ #define MCS_SPR_MODE_YYG_RAINBOW_RGB 0x01
+
+#define NT35950_VREG_MAX 4
+
+struct nt35950 {
+ struct drm_panel panel;
+ struct drm_connector *connector;
+ struct mipi_dsi_device *dsi[2];
+ struct regulator_bulk_data vregs[NT35950_VREG_MAX];
+ struct gpio_desc *reset_gpio;
+ const struct nt35950_panel_desc *desc;
+
+ int cur_mode;
+ u8 last_page;
+ bool prepared;
+};
+
+struct nt35950_panel_mode {
+ const struct drm_display_mode mode;
+
+ bool enable_sram;
+ bool is_video_mode;
+ u8 scaler_on;
+ u8 scaler_mode;
+ u8 compression;
+ u8 spr_en;
+ u8 spr_mode;
+};
+
+struct nt35950_panel_desc {
+ const char *model_name;
+ const struct mipi_dsi_device_info dsi_info;
+ const struct nt35950_panel_mode *mode_data;
+
+ bool is_dual_dsi;
+ u8 num_lanes;
+ u8 num_modes;
+};
+
+static inline struct nt35950 *to_nt35950(struct drm_panel *panel)
+{
+ return container_of(panel, struct nt35950, panel);
+}
+
+#define dsi_dcs_write_seq(dsi, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static void nt35950_reset(struct nt35950 *nt)
+{
+ gpiod_set_value_cansleep(nt->reset_gpio, 1);
+ usleep_range(12000, 13000);
+ gpiod_set_value_cansleep(nt->reset_gpio, 0);
+ usleep_range(300, 400);
+ gpiod_set_value_cansleep(nt->reset_gpio, 1);
+ usleep_range(12000, 13000);
+}
+
+/*
+ * nt35950_set_cmd2_page - Select manufacturer control (CMD2) page
+ * @nt: Main driver structure
+ * @page: Page number (0-7)
+ *
+ * Return: Number of transferred bytes or negative number on error
+ */
+static int nt35950_set_cmd2_page(struct nt35950 *nt, u8 page)
+{
+ const u8 mauc_cmd2_page[] = { MCS_CMD_MAUCCTR, 0x55, 0xaa, 0x52,
+ 0x08, page };
+ int ret;
+
+ ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], mauc_cmd2_page,
+ ARRAY_SIZE(mauc_cmd2_page));
+ if (ret < 0)
+ return ret;
+
+ nt->last_page = page;
+ return 0;
+}
+
+/*
+ * nt35950_set_data_compression - Set data compression mode
+ * @nt: Main driver structure
+ * @comp_mode: Compression mode
+ *
+ * Return: Number of transferred bytes or negative number on error
+ */
+static int nt35950_set_data_compression(struct nt35950 *nt, u8 comp_mode)
+{
+ u8 cmd_data_compression[] = { MCS_PARAM_DATA_COMPRESSION, comp_mode };
+ u8 cmd_vesa_dsc_on[] = { MCS_PARAM_VESA_DSC_ON, !!comp_mode };
+ u8 cmd_vesa_dsc_setting[] = { MCS_PARAM_VESA_DSC_SETTING, 0x03 };
+ u8 last_page = nt->last_page;
+ int ret;
+
+ /* Set CMD2 Page 0 if we're not there yet */
+ if (last_page != 0) {
+ ret = nt35950_set_cmd2_page(nt, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_data_compression,
+ ARRAY_SIZE(cmd_data_compression));
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_vesa_dsc_on,
+ ARRAY_SIZE(cmd_vesa_dsc_on));
+ if (ret < 0)
+ return ret;
+
+ /* Set the vesa dsc setting on Page 4 */
+ ret = nt35950_set_cmd2_page(nt, 4);
+ if (ret < 0)
+ return ret;
+
+ /* Display Stream Compression setting, always 0x03 */
+ ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_vesa_dsc_setting,
+ ARRAY_SIZE(cmd_vesa_dsc_setting));
+ if (ret < 0)
+ return ret;
+
+ /* Get back to the previously set page */
+ return nt35950_set_cmd2_page(nt, last_page);
+}
+
+/*
+ * nt35950_set_scaler - Enable/disable resolution upscaling
+ * @nt: Main driver structure
+ * @scale_up: Scale up function control
+ *
+ * Return: Number of transferred bytes or negative number on error
+ */
+static int nt35950_set_scaler(struct nt35950 *nt, u8 scale_up)
+{
+ u8 cmd_scaler[] = { MCS_PARAM_SCALER_FUNCTION, scale_up };
+
+ return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_scaler,
+ ARRAY_SIZE(cmd_scaler));
+}
+
+/*
+ * nt35950_set_scale_mode - Resolution upscaling mode
+ * @nt: Main driver structure
+ * @mode: Scaler mode (MCS_DATA_COMPRESSION_*)
+ *
+ * Return: Number of transferred bytes or negative number on error
+ */
+static int nt35950_set_scale_mode(struct nt35950 *nt, u8 mode)
+{
+ u8 cmd_scaler[] = { MCS_PARAM_SCALEUP_MODE, mode };
+
+ return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_scaler,
+ ARRAY_SIZE(cmd_scaler));
+}
+
+/*
+ * nt35950_inject_black_image - Display a completely black image
+ * @nt: Main driver structure
+ *
+ * After IC setup, the attached panel may show random data
+ * due to driveric behavior changes (resolution, compression,
+ * scaling, etc). This function, called after parameters setup,
+ * makes the driver ic to output a completely black image to
+ * the display.
+ * It makes sense to push a black image before sending the sleep-out
+ * and display-on commands.
+ *
+ * Return: Number of transferred bytes or negative number on error
+ */
+static int nt35950_inject_black_image(struct nt35950 *nt)
+{
+ const u8 cmd0_black_img[] = { 0x6f, 0x01 };
+ const u8 cmd1_black_img[] = { 0xf3, 0x10 };
+ u8 cmd_test[] = { 0xff, 0xaa, 0x55, 0xa5, 0x80 };
+ int ret;
+
+ /* Enable test command */
+ ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_test, ARRAY_SIZE(cmd_test));
+ if (ret < 0)
+ return ret;
+
+ /* Send a black image */
+ ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd0_black_img,
+ ARRAY_SIZE(cmd0_black_img));
+ if (ret < 0)
+ return ret;
+ ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd1_black_img,
+ ARRAY_SIZE(cmd1_black_img));
+ if (ret < 0)
+ return ret;
+
+ /* Disable test command */
+ cmd_test[ARRAY_SIZE(cmd_test) - 1] = 0x00;
+ return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_test, ARRAY_SIZE(cmd_test));
+}
+
+/*
+ * nt35950_set_dispout - Set Display Output register parameters
+ * @nt: Main driver structure
+ *
+ * Return: Number of transferred bytes or negative number on error
+ */
+static int nt35950_set_dispout(struct nt35950 *nt)
+{
+ u8 cmd_dispout[] = { MCS_PARAM_DISP_OUTPUT_CTRL, 0x00 };
+ const struct nt35950_panel_mode *mode_data = nt->desc->mode_data;
+
+ if (mode_data[nt->cur_mode].is_video_mode)
+ cmd_dispout[1] |= MCS_DISP_OUT_VIDEO_MODE;
+ if (mode_data[nt->cur_mode].enable_sram)
+ cmd_dispout[1] |= MCS_DISP_OUT_SRAM_EN;
+
+ return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_dispout,
+ ARRAY_SIZE(cmd_dispout));
+}
+
+static int nt35950_get_current_mode(struct nt35950 *nt)
+{
+ struct drm_connector *connector = nt->connector;
+ struct drm_crtc_state *crtc_state;
+ int i;
+
+ /* Return the default (first) mode if no info available yet */
+ if (!connector->state || !connector->state->crtc)
+ return 0;
+
+ crtc_state = connector->state->crtc->state;
+
+ for (i = 0; i < nt->desc->num_modes; i++) {
+ if (drm_mode_match(&crtc_state->mode,
+ &nt->desc->mode_data[i].mode,
+ DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_CLOCK))
+ return i;
+ }
+
+ return 0;
+}
+
+static int nt35950_on(struct nt35950 *nt)
+{
+ const struct nt35950_panel_mode *mode_data = nt->desc->mode_data;
+ struct mipi_dsi_device *dsi = nt->dsi[0];
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ nt->cur_mode = nt35950_get_current_mode(nt);
+ nt->dsi[0]->mode_flags |= MIPI_DSI_MODE_LPM;
+ nt->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ ret = nt35950_set_cmd2_page(nt, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = nt35950_set_data_compression(nt, mode_data[nt->cur_mode].compression);
+ if (ret < 0)
+ return ret;
+
+ ret = nt35950_set_scale_mode(nt, mode_data[nt->cur_mode].scaler_mode);
+ if (ret < 0)
+ return ret;
+
+ ret = nt35950_set_scaler(nt, mode_data[nt->cur_mode].scaler_on);
+ if (ret < 0)
+ return ret;
+
+ ret = nt35950_set_dispout(nt);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set tear on: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_set_tear_scanline(dsi, 0);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set tear scanline: %d\n", ret);
+ return ret;
+ }
+
+ /* CMD2 Page 1 */
+ ret = nt35950_set_cmd2_page(nt, 1);
+ if (ret < 0)
+ return ret;
+
+ /* Unknown command */
+ dsi_dcs_write_seq(dsi, 0xd4, 0x88, 0x88);
+
+ /* CMD2 Page 7 */
+ ret = nt35950_set_cmd2_page(nt, 7);
+ if (ret < 0)
+ return ret;
+
+ /* Enable SubPixel Rendering */
+ dsi_dcs_write_seq(dsi, MCS_PARAM_SPR_EN, 0x01);
+
+ /* SPR Mode: YYG Rainbow-RGB */
+ dsi_dcs_write_seq(dsi, MCS_PARAM_SPR_MODE, MCS_SPR_MODE_YYG_RAINBOW_RGB);
+
+ /* CMD3 */
+ ret = nt35950_inject_black_image(nt);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0)
+ return ret;
+ msleep(120);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0)
+ return ret;
+ msleep(120);
+
+ nt->dsi[0]->mode_flags &= ~MIPI_DSI_MODE_LPM;
+ nt->dsi[1]->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static int nt35950_off(struct nt35950 *nt)
+{
+ struct device *dev = &nt->dsi[0]->dev;
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(nt->dsi[0]);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ goto set_lpm;
+ }
+ usleep_range(10000, 11000);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(nt->dsi[0]);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ goto set_lpm;
+ }
+ msleep(150);
+
+set_lpm:
+ nt->dsi[0]->mode_flags |= MIPI_DSI_MODE_LPM;
+ nt->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static int nt35950_sharp_init_vregs(struct nt35950 *nt, struct device *dev)
+{
+ int ret;
+
+ nt->vregs[0].supply = "vddio";
+ nt->vregs[1].supply = "avdd";
+ nt->vregs[2].supply = "avee";
+ nt->vregs[3].supply = "dvdd";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(nt->vregs),
+ nt->vregs);
+ if (ret < 0)
+ return ret;
+
+ ret = regulator_is_supported_voltage(nt->vregs[0].consumer,
+ 1750000, 1950000);
+ if (!ret)
+ return -EINVAL;
+ ret = regulator_is_supported_voltage(nt->vregs[1].consumer,
+ 5200000, 5900000);
+ if (!ret)
+ return -EINVAL;
+ /* AVEE is negative: -5.90V to -5.20V */
+ ret = regulator_is_supported_voltage(nt->vregs[2].consumer,
+ 5200000, 5900000);
+ if (!ret)
+ return -EINVAL;
+
+ ret = regulator_is_supported_voltage(nt->vregs[3].consumer,
+ 1300000, 1400000);
+ if (!ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nt35950_prepare(struct drm_panel *panel)
+{
+ struct nt35950 *nt = to_nt35950(panel);
+ struct device *dev = &nt->dsi[0]->dev;
+ int ret;
+
+ if (nt->prepared)
+ return 0;
+
+ ret = regulator_enable(nt->vregs[0].consumer);
+ if (ret)
+ return ret;
+ usleep_range(2000, 5000);
+
+ ret = regulator_enable(nt->vregs[3].consumer);
+ if (ret)
+ goto end;
+ usleep_range(15000, 18000);
+
+ ret = regulator_enable(nt->vregs[1].consumer);
+ if (ret)
+ goto end;
+
+ ret = regulator_enable(nt->vregs[2].consumer);
+ if (ret)
+ goto end;
+ usleep_range(12000, 13000);
+
+ nt35950_reset(nt);
+
+ ret = nt35950_on(nt);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ goto end;
+ }
+ nt->prepared = true;
+
+end:
+ if (ret < 0) {
+ regulator_bulk_disable(ARRAY_SIZE(nt->vregs), nt->vregs);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nt35950_unprepare(struct drm_panel *panel)
+{
+ struct nt35950 *nt = to_nt35950(panel);
+ struct device *dev = &nt->dsi[0]->dev;
+ int ret;
+
+ if (!nt->prepared)
+ return 0;
+
+ ret = nt35950_off(nt);
+ if (ret < 0)
+ dev_err(dev, "Failed to deinitialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(nt->reset_gpio, 0);
+ regulator_bulk_disable(ARRAY_SIZE(nt->vregs), nt->vregs);
+
+ nt->prepared = false;
+ return 0;
+}
+
+static int nt35950_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct nt35950 *nt = to_nt35950(panel);
+ int i;
+
+ for (i = 0; i < nt->desc->num_modes; i++) {
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev,
+ &nt->desc->mode_data[i].mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+ if (nt->desc->num_modes == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, mode);
+ }
+
+ connector->display_info.bpc = 8;
+ connector->display_info.height_mm = nt->desc->mode_data[0].mode.height_mm;
+ connector->display_info.width_mm = nt->desc->mode_data[0].mode.width_mm;
+ nt->connector = connector;
+
+ return nt->desc->num_modes;
+}
+
+static const struct drm_panel_funcs nt35950_panel_funcs = {
+ .prepare = nt35950_prepare,
+ .unprepare = nt35950_unprepare,
+ .get_modes = nt35950_get_modes,
+};
+
+static int nt35950_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct device_node *dsi_r;
+ struct mipi_dsi_host *dsi_r_host;
+ struct nt35950 *nt;
+ const struct mipi_dsi_device_info *info;
+ int i, num_dsis = 1, ret;
+
+ nt = devm_kzalloc(dev, sizeof(*nt), GFP_KERNEL);
+ if (!nt)
+ return -ENOMEM;
+
+ ret = nt35950_sharp_init_vregs(nt, dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Regulator init failure.\n");
+
+ nt->desc = of_device_get_match_data(dev);
+ if (!nt->desc)
+ return -ENODEV;
+
+ nt->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(nt->reset_gpio)) {
+ return dev_err_probe(dev, PTR_ERR(nt->reset_gpio),
+ "Failed to get reset gpio\n");
+ }
+
+ /* If the panel is connected on two DSIs then DSI0 left, DSI1 right */
+ if (nt->desc->is_dual_dsi) {
+ info = &nt->desc->dsi_info;
+ dsi_r = of_graph_get_remote_node(dsi->dev.of_node, 1, -1);
+ if (!dsi_r) {
+ dev_err(dev, "Cannot get secondary DSI node.\n");
+ return -ENODEV;
+ }
+ dsi_r_host = of_find_mipi_dsi_host_by_node(dsi_r);
+ of_node_put(dsi_r);
+ if (!dsi_r_host) {
+ dev_err(dev, "Cannot get secondary DSI host\n");
+ return -EPROBE_DEFER;
+ }
+
+ nt->dsi[1] = mipi_dsi_device_register_full(dsi_r_host, info);
+ if (!nt->dsi[1]) {
+ dev_err(dev, "Cannot get secondary DSI node\n");
+ return -ENODEV;
+ }
+ num_dsis++;
+ }
+
+ nt->dsi[0] = dsi;
+ mipi_dsi_set_drvdata(dsi, nt);
+
+ drm_panel_init(&nt->panel, dev, &nt35950_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&nt->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&nt->panel);
+
+ for (i = 0; i < num_dsis; i++) {
+ nt->dsi[i]->lanes = nt->desc->num_lanes;
+ nt->dsi[i]->format = MIPI_DSI_FMT_RGB888;
+
+ nt->dsi[i]->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_LPM;
+
+ if (nt->desc->mode_data[0].is_video_mode)
+ nt->dsi[i]->mode_flags |= MIPI_DSI_MODE_VIDEO;
+
+ ret = mipi_dsi_attach(nt->dsi[i]);
+ if (ret < 0) {
+ return dev_err_probe(dev, ret,
+ "Cannot attach to DSI%d host.\n", i);
+ }
+ }
+
+ /* Make sure to set RESX LOW before starting the power-on sequence */
+ gpiod_set_value_cansleep(nt->reset_gpio, 0);
+ return 0;
+}
+
+static int nt35950_remove(struct mipi_dsi_device *dsi)
+{
+ struct nt35950 *nt = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(nt->dsi[0]);
+ if (ret < 0)
+ dev_err(&dsi->dev,
+ "Failed to detach from DSI0 host: %d\n", ret);
+
+ if (nt->dsi[1]) {
+ ret = mipi_dsi_detach(nt->dsi[1]);
+ if (ret < 0)
+ dev_err(&dsi->dev,
+ "Failed to detach from DSI1 host: %d\n", ret);
+ mipi_dsi_device_unregister(nt->dsi[1]);
+ }
+
+ drm_panel_remove(&nt->panel);
+
+ return 0;
+}
+
+static const struct nt35950_panel_mode sharp_ls055d1sx04_modes[] = {
+ {
+ /* 1920x1080 60Hz no compression */
+ .mode = {
+ .clock = 214537,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 400,
+ .hsync_end = 1080 + 400 + 40,
+ .htotal = 1080 + 400 + 40 + 300,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 12,
+ .vsync_end = 1920 + 12 + 2,
+ .vtotal = 1920 + 12 + 2 + 10,
+ .width_mm = 68,
+ .height_mm = 121,
+ },
+ .compression = MCS_DATA_COMPRESSION_NONE,
+ .enable_sram = true,
+ .is_video_mode = false,
+ .scaler_on = 1,
+ .scaler_mode = MCS_SCALEUP_DUPLICATE,
+ },
+ /* TODO: Add 2160x3840 60Hz when DSC is supported */
+};
+
+static const struct nt35950_panel_desc sharp_ls055d1sx04 = {
+ .model_name = "Sharp LS055D1SX04",
+ .dsi_info = {
+ .type = "LS055D1SX04",
+ .channel = 0,
+ .node = NULL,
+ },
+ .mode_data = sharp_ls055d1sx04_modes,
+ .num_modes = ARRAY_SIZE(sharp_ls055d1sx04_modes),
+ .is_dual_dsi = true,
+ .num_lanes = 4,
+};
+
+static const struct of_device_id nt35950_of_match[] = {
+ { .compatible = "sharp,ls055d1sx04", .data = &sharp_ls055d1sx04 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nt35950_of_match);
+
+static struct mipi_dsi_driver nt35950_driver = {
+ .probe = nt35950_probe,
+ .remove = nt35950_remove,
+ .driver = {
+ .name = "panel-novatek-nt35950",
+ .of_match_table = nt35950_of_match,
+ },
+};
+module_mipi_dsi_driver(nt35950_driver);
+
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@somainline.org>");
+MODULE_DESCRIPTION("Novatek NT35950 DriverIC panels driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
index 533cd3934b8b..231f371901e8 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
@@ -618,7 +618,7 @@ static int nt36672a_panel_add(struct nt36672a_panel *pinfo)
ret = regulator_set_load(pinfo->supplies[i].consumer,
nt36672a_regulator_enable_loads[i]);
if (ret)
- return dev_err_probe(dev, ret, "failed to set regulator enable loads\n");
+ return dev_err_probe(dev, ret, "failed to set regulator enable loads\n");
}
pinfo->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
@@ -656,7 +656,13 @@ static int nt36672a_panel_probe(struct mipi_dsi_device *dsi)
if (err < 0)
return err;
- return mipi_dsi_attach(dsi);
+ err = mipi_dsi_attach(dsi);
+ if (err < 0) {
+ drm_panel_remove(&pinfo->base);
+ return err;
+ }
+
+ return 0;
}
static int nt36672a_panel_remove(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
index f8151fe3ac9a..d036853db865 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
@@ -258,16 +258,13 @@ static int nt39016_probe(struct spi_device *spi)
return -EINVAL;
panel->supply = devm_regulator_get(dev, "power");
- if (IS_ERR(panel->supply)) {
- dev_err(dev, "Failed to get power supply\n");
- return PTR_ERR(panel->supply);
- }
+ if (IS_ERR(panel->supply))
+ return dev_err_probe(dev, PTR_ERR(panel->supply),
+ "Failed to get power supply\n");
panel->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(panel->reset_gpio)) {
- dev_err(dev, "Failed to get reset GPIO\n");
- return PTR_ERR(panel->reset_gpio);
- }
+ if (IS_ERR(panel->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(panel->reset_gpio), "Failed to get reset GPIO\n");
spi->bits_per_word = 8;
spi->mode = SPI_MODE_3 | SPI_3WIRE;
@@ -287,11 +284,8 @@ static int nt39016_probe(struct spi_device *spi)
DRM_MODE_CONNECTOR_DPI);
err = drm_panel_of_backlight(&panel->drm_panel);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(dev, "Failed to get backlight handle\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "Failed to get backlight handle\n");
drm_panel_add(&panel->drm_panel);
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
index 3c20beeb1781..3991f5d950af 100644
--- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -241,7 +241,13 @@ static int wuxga_nt_panel_probe(struct mipi_dsi_device *dsi)
if (ret < 0)
return ret;
- return mipi_dsi_attach(dsi);
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ wuxga_nt_panel_del(wuxga_nt);
+ return ret;
+ }
+
+ return 0;
}
static int wuxga_nt_panel_remove(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
index a3782830ae3c..1fb579a574d9 100644
--- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -199,7 +199,13 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->lanes = 4;
- return mipi_dsi_attach(dsi);
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
}
static int rb070d30_panel_dsi_remove(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index ccc8ed6fe3ae..e38262b67ff7 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -452,27 +452,22 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
ctx->supplies[1].supply = "vci";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
ctx->supplies);
- if (ret < 0) {
- dev_err(dev, "failed to get regulators: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(ctx->reset_gpio)) {
- dev_err(dev, "cannot get reset-gpio: %ld\n",
- PTR_ERR(ctx->reset_gpio));
- return PTR_ERR(ctx->reset_gpio);
- }
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "cannot get reset-gpio\n");
drm_panel_init(&ctx->panel, dev, &s6e63j0x03_funcs,
DRM_MODE_CONNECTOR_DSI);
ctx->bl_dev = backlight_device_register("s6e63j0x03", dev, ctx,
&s6e63j0x03_bl_ops, NULL);
- if (IS_ERR(ctx->bl_dev)) {
- dev_err(dev, "failed to register backlight device\n");
- return PTR_ERR(ctx->bl_dev);
- }
+ if (IS_ERR(ctx->bl_dev))
+ return dev_err_probe(dev, PTR_ERR(ctx->bl_dev),
+ "failed to register backlight device\n");
ctx->bl_dev->props.max_brightness = MAX_BRIGHTNESS;
ctx->bl_dev->props.brightness = DEFAULT_BRIGHTNESS;
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
index ea63799ff2a1..29fde3823212 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
@@ -247,6 +247,7 @@ static int s6e88a0_ams452ef01_probe(struct mipi_dsi_device *dsi)
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
return ret;
}
diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
index 8cb1853574bb..1fb37fda4ba9 100644
--- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c
+++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
@@ -270,18 +270,14 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi)
}
ctx->supply = devm_regulator_get(dev, "vddio");
- if (IS_ERR(ctx->supply)) {
- ret = PTR_ERR(ctx->supply);
- dev_err(dev, "Failed to get vddio regulator: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(ctx->supply))
+ return dev_err_probe(dev, PTR_ERR(ctx->supply),
+ "Failed to get vddio regulator\n");
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(ctx->reset_gpio)) {
- ret = PTR_ERR(ctx->reset_gpio);
- dev_warn(dev, "Failed to get reset-gpios: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
ctx->dsi = dsi;
mipi_dsi_set_drvdata(dsi, ctx);
@@ -302,6 +298,7 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi)
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
return ret;
}
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
index 94992f45113a..a07d0f6c3e69 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
@@ -146,22 +146,19 @@ static int ls037v7dw01_probe(struct platform_device *pdev)
lcd->pdev = pdev;
lcd->vdd = devm_regulator_get(&pdev->dev, "envdd");
- if (IS_ERR(lcd->vdd)) {
- dev_err(&pdev->dev, "failed to get regulator\n");
- return PTR_ERR(lcd->vdd);
- }
+ if (IS_ERR(lcd->vdd))
+ return dev_err_probe(&pdev->dev, PTR_ERR(lcd->vdd),
+ "failed to get regulator\n");
lcd->ini_gpio = devm_gpiod_get(&pdev->dev, "enable", GPIOD_OUT_LOW);
- if (IS_ERR(lcd->ini_gpio)) {
- dev_err(&pdev->dev, "failed to get enable gpio\n");
- return PTR_ERR(lcd->ini_gpio);
- }
+ if (IS_ERR(lcd->ini_gpio))
+ return dev_err_probe(&pdev->dev, PTR_ERR(lcd->ini_gpio),
+ "failed to get enable gpio\n");
lcd->resb_gpio = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(lcd->resb_gpio)) {
- dev_err(&pdev->dev, "failed to get reset gpio\n");
- return PTR_ERR(lcd->resb_gpio);
- }
+ if (IS_ERR(lcd->resb_gpio))
+ return dev_err_probe(&pdev->dev, PTR_ERR(lcd->resb_gpio),
+ "failed to get reset gpio\n");
lcd->mo_gpio = devm_gpiod_get_index(&pdev->dev, "mode", 0,
GPIOD_OUT_LOW);
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index b937e24dac8e..25829a0a8e80 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -296,7 +296,13 @@ static int sharp_nt_panel_probe(struct mipi_dsi_device *dsi)
if (ret < 0)
return ret;
- return mipi_dsi_attach(dsi);
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ sharp_nt_panel_del(sharp_nt);
+ return ret;
+ }
+
+ return 0;
}
static int sharp_nt_panel_remove(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index eb475a3a774b..9e46db5e359c 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2031,6 +2031,31 @@ static const struct panel_desc innolux_g070y2_l01 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct drm_display_mode innolux_g070y2_t02_mode = {
+ .clock = 33333,
+ .hdisplay = 800,
+ .hsync_start = 800 + 210,
+ .hsync_end = 800 + 210 + 20,
+ .htotal = 800 + 210 + 20 + 46,
+ .vdisplay = 480,
+ .vsync_start = 480 + 22,
+ .vsync_end = 480 + 22 + 10,
+ .vtotal = 480 + 22 + 23 + 10,
+};
+
+static const struct panel_desc innolux_g070y2_t02 = {
+ .modes = &innolux_g070y2_t02_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 152,
+ .height = 92,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
static const struct display_timing innolux_g101ice_l01_timing = {
.pixelclock = { 60400000, 71100000, 74700000 },
.hactive = { 1280, 1280, 1280 },
@@ -3227,6 +3252,33 @@ static const struct panel_desc starry_kr070pe2t = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
+static const struct display_timing tsd_tst043015cmhx_timing = {
+ .pixelclock = { 5000000, 9000000, 12000000 },
+ .hactive = { 480, 480, 480 },
+ .hfront_porch = { 4, 5, 65 },
+ .hback_porch = { 36, 40, 255 },
+ .hsync_len = { 1, 1, 1 },
+ .vactive = { 272, 272, 272 },
+ .vfront_porch = { 2, 8, 97 },
+ .vback_porch = { 3, 8, 31 },
+ .vsync_len = { 1, 1, 1 },
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
+};
+
+static const struct panel_desc tsd_tst043015cmhx = {
+ .timings = &tsd_tst043015cmhx_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 105,
+ .height = 67,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
+};
+
static const struct drm_display_mode tfc_s9700rtwv43tr_01b_mode = {
.clock = 30000,
.hdisplay = 800,
@@ -3473,6 +3525,31 @@ static const struct panel_desc urt_umsh_8596md_parallel = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode vivax_tpc9150_panel_mode = {
+ .clock = 60000,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 160,
+ .hsync_end = 1024 + 160 + 100,
+ .htotal = 1024 + 160 + 100 + 60,
+ .vdisplay = 600,
+ .vsync_start = 600 + 12,
+ .vsync_end = 600 + 12 + 10,
+ .vtotal = 600 + 12 + 10 + 13,
+};
+
+static const struct panel_desc vivax_tpc9150_panel = {
+ .modes = &vivax_tpc9150_panel_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 200,
+ .height = 115,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode vl050_8048nt_c01_mode = {
.clock = 33333,
.hdisplay = 800,
@@ -3738,6 +3815,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,g070y2-l01",
.data = &innolux_g070y2_l01,
}, {
+ .compatible = "innolux,g070y2-t02",
+ .data = &innolux_g070y2_t02,
+ }, {
.compatible = "innolux,g101ice-l01",
.data = &innolux_g101ice_l01
}, {
@@ -3876,6 +3956,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "starry,kr070pe2t",
.data = &starry_kr070pe2t,
}, {
+ .compatible = "team-source-display,tst043015cmhx",
+ .data = &tsd_tst043015cmhx,
+ }, {
.compatible = "tfc,s9700rtwv43tr-01b",
.data = &tfc_s9700rtwv43tr_01b,
}, {
@@ -3921,6 +4004,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "urt,umsh-8596md-20t",
.data = &urt_umsh_8596md_parallel,
}, {
+ .compatible = "vivax,tpc9150-panel",
+ .data = &vivax_tpc9150_panel,
+ }, {
.compatible = "vxt,vl050-8048nt-c01",
.data = &vl050_8048nt_c01,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
new file mode 100644
index 000000000000..69f07b15fca4
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
@@ -0,0 +1,552 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, Linaro Limited
+ *
+ * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct truly_nt35521 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *blen_gpio;
+ bool prepared;
+ bool enabled;
+};
+
+static inline
+struct truly_nt35521 *to_truly_nt35521(struct drm_panel *panel)
+{
+ return container_of(panel, struct truly_nt35521, panel);
+}
+
+#define dsi_generic_write_seq(dsi, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static void truly_nt35521_reset(struct truly_nt35521 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(150);
+}
+
+static int truly_nt35521_on(struct truly_nt35521 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
+ dsi_generic_write_seq(dsi, 0xff, 0xaa, 0x55, 0xa5, 0x80);
+ dsi_generic_write_seq(dsi, 0x6f, 0x11, 0x00);
+ dsi_generic_write_seq(dsi, 0xf7, 0x20, 0x00);
+ dsi_generic_write_seq(dsi, 0x6f, 0x01);
+ dsi_generic_write_seq(dsi, 0xb1, 0x21);
+ dsi_generic_write_seq(dsi, 0xbd, 0x01, 0xa0, 0x10, 0x08, 0x01);
+ dsi_generic_write_seq(dsi, 0xb8, 0x01, 0x02, 0x0c, 0x02);
+ dsi_generic_write_seq(dsi, 0xbb, 0x11, 0x11);
+ dsi_generic_write_seq(dsi, 0xbc, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, 0xb6, 0x02);
+ dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x01);
+ dsi_generic_write_seq(dsi, 0xb0, 0x09, 0x09);
+ dsi_generic_write_seq(dsi, 0xb1, 0x09, 0x09);
+ dsi_generic_write_seq(dsi, 0xbc, 0x8c, 0x00);
+ dsi_generic_write_seq(dsi, 0xbd, 0x8c, 0x00);
+ dsi_generic_write_seq(dsi, 0xca, 0x00);
+ dsi_generic_write_seq(dsi, 0xc0, 0x04);
+ dsi_generic_write_seq(dsi, 0xbe, 0xb5);
+ dsi_generic_write_seq(dsi, 0xb3, 0x35, 0x35);
+ dsi_generic_write_seq(dsi, 0xb4, 0x25, 0x25);
+ dsi_generic_write_seq(dsi, 0xb9, 0x43, 0x43);
+ dsi_generic_write_seq(dsi, 0xba, 0x24, 0x24);
+ dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x02);
+ dsi_generic_write_seq(dsi, 0xee, 0x03);
+ dsi_generic_write_seq(dsi, 0xb0,
+ 0x00, 0xb2, 0x00, 0xb3, 0x00, 0xb6, 0x00, 0xc3,
+ 0x00, 0xce, 0x00, 0xe1, 0x00, 0xf3, 0x01, 0x11);
+ dsi_generic_write_seq(dsi, 0xb1,
+ 0x01, 0x2e, 0x01, 0x5c, 0x01, 0x82, 0x01, 0xc3,
+ 0x01, 0xfe, 0x02, 0x00, 0x02, 0x37, 0x02, 0x77);
+ dsi_generic_write_seq(dsi, 0xb2,
+ 0x02, 0xa1, 0x02, 0xd7, 0x02, 0xfe, 0x03, 0x2c,
+ 0x03, 0x4b, 0x03, 0x63, 0x03, 0x8f, 0x03, 0x90);
+ dsi_generic_write_seq(dsi, 0xb3, 0x03, 0x96, 0x03, 0x98);
+ dsi_generic_write_seq(dsi, 0xb4,
+ 0x00, 0x81, 0x00, 0x8b, 0x00, 0x9c, 0x00, 0xa9,
+ 0x00, 0xb5, 0x00, 0xcb, 0x00, 0xdf, 0x01, 0x02);
+ dsi_generic_write_seq(dsi, 0xb5,
+ 0x01, 0x1f, 0x01, 0x51, 0x01, 0x7a, 0x01, 0xbf,
+ 0x01, 0xfa, 0x01, 0xfc, 0x02, 0x34, 0x02, 0x76);
+ dsi_generic_write_seq(dsi, 0xb6,
+ 0x02, 0x9f, 0x02, 0xd7, 0x02, 0xfc, 0x03, 0x2c,
+ 0x03, 0x4a, 0x03, 0x63, 0x03, 0x8f, 0x03, 0xa2);
+ dsi_generic_write_seq(dsi, 0xb7, 0x03, 0xb8, 0x03, 0xba);
+ dsi_generic_write_seq(dsi, 0xb8,
+ 0x00, 0x01, 0x00, 0x02, 0x00, 0x0e, 0x00, 0x2a,
+ 0x00, 0x41, 0x00, 0x67, 0x00, 0x87, 0x00, 0xb9);
+ dsi_generic_write_seq(dsi, 0xb9,
+ 0x00, 0xe2, 0x01, 0x22, 0x01, 0x54, 0x01, 0xa3,
+ 0x01, 0xe6, 0x01, 0xe7, 0x02, 0x24, 0x02, 0x67);
+ dsi_generic_write_seq(dsi, 0xba,
+ 0x02, 0x93, 0x02, 0xcd, 0x02, 0xf6, 0x03, 0x31,
+ 0x03, 0x6c, 0x03, 0xe9, 0x03, 0xef, 0x03, 0xf4);
+ dsi_generic_write_seq(dsi, 0xbb, 0x03, 0xf6, 0x03, 0xf7);
+ dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x03);
+ dsi_generic_write_seq(dsi, 0xb0, 0x22, 0x00);
+ dsi_generic_write_seq(dsi, 0xb1, 0x22, 0x00);
+ dsi_generic_write_seq(dsi, 0xb2, 0x05, 0x00, 0x60, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, 0xb3, 0x05, 0x00, 0x60, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, 0xb4, 0x05, 0x00, 0x60, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, 0xb5, 0x05, 0x00, 0x60, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, 0xba, 0x53, 0x00, 0x60, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, 0xbb, 0x53, 0x00, 0x60, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, 0xbc, 0x53, 0x00, 0x60, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, 0xbd, 0x53, 0x00, 0x60, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, 0xc0, 0x00, 0x34, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, 0xc1, 0x00, 0x00, 0x34, 0x00);
+ dsi_generic_write_seq(dsi, 0xc2, 0x00, 0x00, 0x34, 0x00);
+ dsi_generic_write_seq(dsi, 0xc3, 0x00, 0x00, 0x34, 0x00);
+ dsi_generic_write_seq(dsi, 0xc4, 0x60);
+ dsi_generic_write_seq(dsi, 0xc5, 0xc0);
+ dsi_generic_write_seq(dsi, 0xc6, 0x00);
+ dsi_generic_write_seq(dsi, 0xc7, 0x00);
+ dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x05);
+ dsi_generic_write_seq(dsi, 0xb0, 0x17, 0x06);
+ dsi_generic_write_seq(dsi, 0xb1, 0x17, 0x06);
+ dsi_generic_write_seq(dsi, 0xb2, 0x17, 0x06);
+ dsi_generic_write_seq(dsi, 0xb3, 0x17, 0x06);
+ dsi_generic_write_seq(dsi, 0xb4, 0x17, 0x06);
+ dsi_generic_write_seq(dsi, 0xb5, 0x17, 0x06);
+ dsi_generic_write_seq(dsi, 0xb6, 0x17, 0x06);
+ dsi_generic_write_seq(dsi, 0xb7, 0x17, 0x06);
+ dsi_generic_write_seq(dsi, 0xb8, 0x00);
+ dsi_generic_write_seq(dsi, 0xb9, 0x00, 0x03);
+ dsi_generic_write_seq(dsi, 0xba, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, 0xbb, 0x02, 0x03);
+ dsi_generic_write_seq(dsi, 0xbc, 0x02, 0x03);
+ dsi_generic_write_seq(dsi, 0xbd, 0x03, 0x03, 0x00, 0x03, 0x03);
+ dsi_generic_write_seq(dsi, 0xc0, 0x0b);
+ dsi_generic_write_seq(dsi, 0xc1, 0x09);
+ dsi_generic_write_seq(dsi, 0xc2, 0xa6);
+ dsi_generic_write_seq(dsi, 0xc3, 0x05);
+ dsi_generic_write_seq(dsi, 0xc4, 0x00);
+ dsi_generic_write_seq(dsi, 0xc5, 0x02);
+ dsi_generic_write_seq(dsi, 0xc6, 0x22);
+ dsi_generic_write_seq(dsi, 0xc7, 0x03);
+ dsi_generic_write_seq(dsi, 0xc8, 0x07, 0x20);
+ dsi_generic_write_seq(dsi, 0xc9, 0x03, 0x20);
+ dsi_generic_write_seq(dsi, 0xca, 0x01, 0x60);
+ dsi_generic_write_seq(dsi, 0xcb, 0x01, 0x60);
+ dsi_generic_write_seq(dsi, 0xcc, 0x00, 0x00, 0x02);
+ dsi_generic_write_seq(dsi, 0xcd, 0x00, 0x00, 0x02);
+ dsi_generic_write_seq(dsi, 0xce, 0x00, 0x00, 0x02);
+ dsi_generic_write_seq(dsi, 0xcf, 0x00, 0x00, 0x02);
+ dsi_generic_write_seq(dsi, 0xd1, 0x00, 0x05, 0x01, 0x07, 0x10);
+ dsi_generic_write_seq(dsi, 0xd2, 0x10, 0x05, 0x05, 0x03, 0x10);
+ dsi_generic_write_seq(dsi, 0xd3, 0x20, 0x00, 0x43, 0x07, 0x10);
+ dsi_generic_write_seq(dsi, 0xd4, 0x30, 0x00, 0x43, 0x07, 0x10);
+ dsi_generic_write_seq(dsi, 0xd0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, 0xd5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, 0xd6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, 0xd7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, 0xe5, 0x06);
+ dsi_generic_write_seq(dsi, 0xe6, 0x06);
+ dsi_generic_write_seq(dsi, 0xe7, 0x00);
+ dsi_generic_write_seq(dsi, 0xe8, 0x06);
+ dsi_generic_write_seq(dsi, 0xe9, 0x06);
+ dsi_generic_write_seq(dsi, 0xea, 0x06);
+ dsi_generic_write_seq(dsi, 0xeb, 0x00);
+ dsi_generic_write_seq(dsi, 0xec, 0x00);
+ dsi_generic_write_seq(dsi, 0xed, 0x30);
+ dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x06);
+ dsi_generic_write_seq(dsi, 0xb0, 0x31, 0x31);
+ dsi_generic_write_seq(dsi, 0xb1, 0x31, 0x31);
+ dsi_generic_write_seq(dsi, 0xb2, 0x2d, 0x2e);
+ dsi_generic_write_seq(dsi, 0xb3, 0x31, 0x34);
+ dsi_generic_write_seq(dsi, 0xb4, 0x29, 0x2a);
+ dsi_generic_write_seq(dsi, 0xb5, 0x12, 0x10);
+ dsi_generic_write_seq(dsi, 0xb6, 0x18, 0x16);
+ dsi_generic_write_seq(dsi, 0xb7, 0x00, 0x02);
+ dsi_generic_write_seq(dsi, 0xb8, 0x08, 0x31);
+ dsi_generic_write_seq(dsi, 0xb9, 0x31, 0x31);
+ dsi_generic_write_seq(dsi, 0xba, 0x31, 0x31);
+ dsi_generic_write_seq(dsi, 0xbb, 0x31, 0x08);
+ dsi_generic_write_seq(dsi, 0xbc, 0x03, 0x01);
+ dsi_generic_write_seq(dsi, 0xbd, 0x17, 0x19);
+ dsi_generic_write_seq(dsi, 0xbe, 0x11, 0x13);
+ dsi_generic_write_seq(dsi, 0xbf, 0x2a, 0x29);
+ dsi_generic_write_seq(dsi, 0xc0, 0x34, 0x31);
+ dsi_generic_write_seq(dsi, 0xc1, 0x2e, 0x2d);
+ dsi_generic_write_seq(dsi, 0xc2, 0x31, 0x31);
+ dsi_generic_write_seq(dsi, 0xc3, 0x31, 0x31);
+ dsi_generic_write_seq(dsi, 0xc4, 0x31, 0x31);
+ dsi_generic_write_seq(dsi, 0xc5, 0x31, 0x31);
+ dsi_generic_write_seq(dsi, 0xc6, 0x2e, 0x2d);
+ dsi_generic_write_seq(dsi, 0xc7, 0x31, 0x34);
+ dsi_generic_write_seq(dsi, 0xc8, 0x29, 0x2a);
+ dsi_generic_write_seq(dsi, 0xc9, 0x17, 0x19);
+ dsi_generic_write_seq(dsi, 0xca, 0x11, 0x13);
+ dsi_generic_write_seq(dsi, 0xcb, 0x03, 0x01);
+ dsi_generic_write_seq(dsi, 0xcc, 0x08, 0x31);
+ dsi_generic_write_seq(dsi, 0xcd, 0x31, 0x31);
+ dsi_generic_write_seq(dsi, 0xce, 0x31, 0x31);
+ dsi_generic_write_seq(dsi, 0xcf, 0x31, 0x08);
+ dsi_generic_write_seq(dsi, 0xd0, 0x00, 0x02);
+ dsi_generic_write_seq(dsi, 0xd1, 0x12, 0x10);
+ dsi_generic_write_seq(dsi, 0xd2, 0x18, 0x16);
+ dsi_generic_write_seq(dsi, 0xd3, 0x2a, 0x29);
+ dsi_generic_write_seq(dsi, 0xd4, 0x34, 0x31);
+ dsi_generic_write_seq(dsi, 0xd5, 0x2d, 0x2e);
+ dsi_generic_write_seq(dsi, 0xd6, 0x31, 0x31);
+ dsi_generic_write_seq(dsi, 0xd7, 0x31, 0x31);
+ dsi_generic_write_seq(dsi, 0xe5, 0x31, 0x31);
+ dsi_generic_write_seq(dsi, 0xe6, 0x31, 0x31);
+ dsi_generic_write_seq(dsi, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, 0xe7, 0x00);
+ dsi_generic_write_seq(dsi, 0x6f, 0x02);
+ dsi_generic_write_seq(dsi, 0xf7, 0x47);
+ dsi_generic_write_seq(dsi, 0x6f, 0x0a);
+ dsi_generic_write_seq(dsi, 0xf7, 0x02);
+ dsi_generic_write_seq(dsi, 0x6f, 0x17);
+ dsi_generic_write_seq(dsi, 0xf4, 0x60);
+ dsi_generic_write_seq(dsi, 0x6f, 0x01);
+ dsi_generic_write_seq(dsi, 0xf9, 0x46);
+ dsi_generic_write_seq(dsi, 0x6f, 0x11);
+ dsi_generic_write_seq(dsi, 0xf3, 0x01);
+ dsi_generic_write_seq(dsi, 0x35, 0x00);
+ dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
+ dsi_generic_write_seq(dsi, 0xd9, 0x02, 0x03, 0x00);
+ dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
+ dsi_generic_write_seq(dsi, 0xb1, 0x6c, 0x21);
+ dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, 0x35, 0x00);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display on: %d\n", ret);
+ return ret;
+ }
+ usleep_range(1000, 2000);
+
+ dsi_generic_write_seq(dsi, 0x53, 0x24);
+
+ return 0;
+}
+
+static int truly_nt35521_off(struct truly_nt35521 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+ msleep(50);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(150);
+
+ return 0;
+}
+
+static int truly_nt35521_prepare(struct drm_panel *panel)
+{
+ struct truly_nt35521 *ctx = to_truly_nt35521(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ truly_nt35521_reset(ctx);
+
+ ret = truly_nt35521_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ return ret;
+ }
+
+ ctx->prepared = true;
+ return 0;
+}
+
+static int truly_nt35521_unprepare(struct drm_panel *panel)
+{
+ struct truly_nt35521 *ctx = to_truly_nt35521(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = truly_nt35521_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+
+ ctx->prepared = false;
+ return 0;
+}
+
+static int truly_nt35521_enable(struct drm_panel *panel)
+{
+ struct truly_nt35521 *ctx = to_truly_nt35521(panel);
+
+ if (ctx->enabled)
+ return 0;
+
+ gpiod_set_value_cansleep(ctx->blen_gpio, 1);
+
+ ctx->enabled = true;
+ return 0;
+}
+
+static int truly_nt35521_disable(struct drm_panel *panel)
+{
+ struct truly_nt35521 *ctx = to_truly_nt35521(panel);
+
+ if (!ctx->enabled)
+ return 0;
+
+ gpiod_set_value_cansleep(ctx->blen_gpio, 0);
+
+ ctx->enabled = false;
+ return 0;
+}
+
+static const struct drm_display_mode truly_nt35521_mode = {
+ .clock = (720 + 232 + 20 + 112) * (1280 + 18 + 1 + 18) * 60 / 1000,
+ .hdisplay = 720,
+ .hsync_start = 720 + 232,
+ .hsync_end = 720 + 232 + 20,
+ .htotal = 720 + 232 + 20 + 112,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 18,
+ .vsync_end = 1280 + 18 + 1,
+ .vtotal = 1280 + 18 + 1 + 18,
+ .width_mm = 65,
+ .height_mm = 116,
+};
+
+static int truly_nt35521_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &truly_nt35521_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs truly_nt35521_panel_funcs = {
+ .prepare = truly_nt35521_prepare,
+ .unprepare = truly_nt35521_unprepare,
+ .enable = truly_nt35521_enable,
+ .disable = truly_nt35521_disable,
+ .get_modes = truly_nt35521_get_modes,
+};
+
+static int truly_nt35521_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int truly_nt35521_bl_get_brightness(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness;
+ int ret;
+
+ ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness);
+ if (ret < 0)
+ return ret;
+
+ return brightness & 0xff;
+}
+
+static const struct backlight_ops truly_nt35521_bl_ops = {
+ .update_status = truly_nt35521_bl_update_status,
+ .get_brightness = truly_nt35521_bl_get_brightness,
+};
+
+static struct backlight_device *
+truly_nt35521_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 255,
+ .max_brightness = 255,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &truly_nt35521_bl_ops, &props);
+}
+
+static int truly_nt35521_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct truly_nt35521 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->supplies[0].supply = "positive5";
+ ctx->supplies[1].supply = "negative5";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get regulators: %d\n", ret);
+ return ret;
+ }
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->blen_gpio = devm_gpiod_get(dev, "backlight", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->blen_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->blen_gpio),
+ "Failed to get backlight-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_NO_EOT_PACKET |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ drm_panel_init(&ctx->panel, dev, &truly_nt35521_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ctx->panel.backlight = truly_nt35521_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int truly_nt35521_remove(struct mipi_dsi_device *dsi)
+{
+ struct truly_nt35521 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id truly_nt35521_of_match[] = {
+ { .compatible = "sony,tulip-truly-nt35521" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, truly_nt35521_of_match);
+
+static struct mipi_dsi_driver truly_nt35521_driver = {
+ .probe = truly_nt35521_probe,
+ .remove = truly_nt35521_remove,
+ .driver = {
+ .name = "panel-truly-nt35521",
+ .of_match_table = truly_nt35521_of_match,
+ },
+};
+module_mipi_dsi_driver(truly_nt35521_driver);
+
+MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+MODULE_DESCRIPTION("DRM driver for Sony Tulip Truly NT35521 panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
index bacaf1b7fb70..1866cdb8f9c1 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
@@ -431,16 +431,14 @@ static int td043mtea1_probe(struct spi_device *spi)
memcpy(lcd->gamma, td043mtea1_def_gamma, sizeof(lcd->gamma));
lcd->vcc_reg = devm_regulator_get(&spi->dev, "vcc");
- if (IS_ERR(lcd->vcc_reg)) {
- dev_err(&spi->dev, "failed to get VCC regulator\n");
- return PTR_ERR(lcd->vcc_reg);
- }
+ if (IS_ERR(lcd->vcc_reg))
+ return dev_err_probe(&spi->dev, PTR_ERR(lcd->vcc_reg),
+ "failed to get VCC regulator\n");
lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(lcd->reset_gpio)) {
- dev_err(&spi->dev, "failed to get reset GPIO\n");
- return PTR_ERR(lcd->reset_gpio);
- }
+ if (IS_ERR(lcd->reset_gpio))
+ return dev_err_probe(&spi->dev, PTR_ERR(lcd->reset_gpio),
+ "failed to get reset GPIO\n");
spi->bits_per_word = 16;
spi->mode = SPI_MODE_0;
diff --git a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
index d17aae8b71d7..8177f5a360fb 100644
--- a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
+++ b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
@@ -283,26 +283,19 @@ static int xpp055c272_probe(struct mipi_dsi_device *dsi)
return -ENOMEM;
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(ctx->reset_gpio)) {
- dev_err(dev, "cannot get reset gpio\n");
- return PTR_ERR(ctx->reset_gpio);
- }
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "cannot get reset gpio\n");
ctx->vci = devm_regulator_get(dev, "vci");
- if (IS_ERR(ctx->vci)) {
- ret = PTR_ERR(ctx->vci);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request vci regulator: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(ctx->vci))
+ return dev_err_probe(dev, PTR_ERR(ctx->vci),
+ "Failed to request vci regulator\n");
ctx->iovcc = devm_regulator_get(dev, "iovcc");
- if (IS_ERR(ctx->iovcc)) {
- ret = PTR_ERR(ctx->iovcc);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request iovcc regulator: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(ctx->iovcc))
+ return dev_err_probe(dev, PTR_ERR(ctx->iovcc),
+ "Failed to request iovcc regulator\n");
mipi_dsi_set_drvdata(dsi, ctx);
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 82ad9a67f251..96bb5a465627 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -427,7 +427,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
}
}
- args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
+ args->retained = drm_gem_shmem_madvise(&bo->base, args->madv);
if (args->retained) {
if (args->madv == PANFROST_MADV_DONTNEED)
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 23377481f4e3..ead65f5fa2bc 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -49,7 +49,7 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
kvfree(bo->sgts);
}
- drm_gem_shmem_free_object(obj);
+ drm_gem_shmem_free(&bo->base);
}
struct panfrost_gem_mapping *
@@ -187,23 +187,25 @@ void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
static int panfrost_gem_pin(struct drm_gem_object *obj)
{
- if (to_panfrost_bo(obj)->is_heap)
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+
+ if (bo->is_heap)
return -EINVAL;
- return drm_gem_shmem_pin(obj);
+ return drm_gem_shmem_pin(&bo->base);
}
static const struct drm_gem_object_funcs panfrost_gem_funcs = {
.free = panfrost_gem_free_object,
.open = panfrost_gem_open,
.close = panfrost_gem_close,
- .print_info = drm_gem_shmem_print_info,
+ .print_info = drm_gem_shmem_object_print_info,
.pin = panfrost_gem_pin,
- .unpin = drm_gem_shmem_unpin,
- .get_sg_table = drm_gem_shmem_get_sg_table,
- .vmap = drm_gem_shmem_vmap,
- .vunmap = drm_gem_shmem_vunmap,
- .mmap = drm_gem_shmem_mmap,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
+ .mmap = drm_gem_shmem_object_mmap,
};
/**
@@ -221,7 +223,7 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
- return NULL;
+ return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&obj->mappings.list);
mutex_init(&obj->mappings.lock);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index 1b9f68d8e9aa..b0142341e223 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -52,7 +52,7 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
goto unlock_mappings;
panfrost_gem_teardown_mappings_locked(bo);
- drm_gem_shmem_purge_locked(obj);
+ drm_gem_shmem_purge_locked(&bo->base);
ret = true;
mutex_unlock(&shmem->pages_lock);
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index f51d3f791a17..39562f2d11a4 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -304,7 +304,8 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
{
struct panfrost_gem_object *bo = mapping->obj;
- struct drm_gem_object *obj = &bo->base.base;
+ struct drm_gem_shmem_object *shmem = &bo->base;
+ struct drm_gem_object *obj = &shmem->base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct sg_table *sgt;
int prot = IOMMU_READ | IOMMU_WRITE;
@@ -315,7 +316,7 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
if (bo->noexec)
prot |= IOMMU_NOEXEC;
- sgt = drm_gem_shmem_get_pages_sgt(obj);
+ sgt = drm_gem_shmem_get_pages_sgt(shmem);
if (WARN_ON(IS_ERR(sgt)))
return PTR_ERR(sgt);
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index e116a4d9b8e5..1d36df5af98d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -105,7 +105,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
goto err_close_bo;
}
- ret = drm_gem_shmem_vmap(&bo->base, &map);
+ ret = drm_gem_shmem_vmap(bo, &map);
if (ret)
goto err_put_mapping;
perfcnt->buf = map.vaddr;
@@ -164,7 +164,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
return 0;
err_vunmap:
- drm_gem_shmem_vunmap(&bo->base, &map);
+ drm_gem_shmem_vunmap(bo, &map);
err_put_mapping:
panfrost_gem_mapping_put(perfcnt->mapping);
err_close_bo:
@@ -194,7 +194,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
perfcnt->user = NULL;
- drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, &map);
+ drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base, &map);
perfcnt->buf = NULL;
panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
diff --git a/drivers/gpu/drm/pl111/Kconfig b/drivers/gpu/drm/pl111/Kconfig
index 3aae387a96af..91ee05b01303 100644
--- a/drivers/gpu/drm/pl111/Kconfig
+++ b/drivers/gpu/drm/pl111/Kconfig
@@ -6,7 +6,6 @@ config DRM_PL111
depends on VEXPRESS_CONFIG || VEXPRESS_CONFIG=n
depends on COMMON_CLK
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 1f9a59601bb1..6a36b0fd845c 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -57,13 +57,16 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
struct qxl_bo *bo;
list_for_each_entry(bo, &qdev->gem.objects, list) {
- struct dma_resv_list *fobj;
- int rel;
-
- rcu_read_lock();
- fobj = dma_resv_shared_list(bo->tbo.base.resv);
- rel = fobj ? fobj->shared_count : 0;
- rcu_read_unlock();
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+ int rel = 0;
+
+ dma_resv_iter_begin(&cursor, bo->tbo.base.resv, true);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ if (dma_resv_iter_is_restarted(&cursor))
+ rel = 0;
+ ++rel;
+ }
seq_printf(m, "size %ld, pc %d, num releases %d\n",
(unsigned long)bo->tbo.base.size,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index fc47b0deb021..e4b16421500b 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -29,7 +29,6 @@
#include "qxl_drv.h"
-#include <linux/console.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/vgaarb.h>
@@ -295,7 +294,7 @@ static struct drm_driver qxl_driver = {
static int __init qxl_init(void)
{
- if (vgacon_text_force() && qxl_modeset == -1)
+ if (drm_firmware_drivers_only() && qxl_modeset == -1)
return -EINVAL;
if (qxl_modeset == 0)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index b74cebca1f89..956c72b5aa33 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -31,7 +31,6 @@
#include <linux/compat.h>
-#include <linux/console.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
@@ -637,15 +636,11 @@ static struct pci_driver radeon_kms_pci_driver = {
static int __init radeon_module_init(void)
{
- if (vgacon_text_force() && radeon_modeset == -1) {
- DRM_INFO("VGACON disable radeon kernel modesetting.\n");
+ if (drm_firmware_drivers_only() && radeon_modeset == -1)
radeon_modeset = 0;
- }
- if (radeon_modeset == 0) {
- DRM_ERROR("No UMS support in radeon module!\n");
+ if (radeon_modeset == 0)
return -EINVAL;
- }
DRM_INFO("radeon kernel modesetting enabled.\n");
radeon_register_atpx_handler();
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 482fb0ae6cb5..11ad210919c8 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -168,7 +168,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
if (!r) {
acpi_status = radeon_acpi_init(rdev);
if (acpi_status)
- dev_dbg(dev->dev, "Error during ACPI methods call\n");
+ dev_dbg(dev->dev, "Error during ACPI methods call\n");
}
if (radeon_is_px(dev)) {
@@ -648,6 +648,8 @@ void radeon_driver_lastclose_kms(struct drm_device *dev)
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{
struct radeon_device *rdev = dev->dev_private;
+ struct radeon_fpriv *fpriv;
+ struct radeon_vm *vm;
int r;
file_priv->driver_priv = NULL;
@@ -660,48 +662,52 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN) {
- struct radeon_fpriv *fpriv;
- struct radeon_vm *vm;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
if (unlikely(!fpriv)) {
r = -ENOMEM;
- goto out_suspend;
+ goto err_suspend;
}
if (rdev->accel_working) {
vm = &fpriv->vm;
r = radeon_vm_init(rdev, vm);
- if (r) {
- kfree(fpriv);
- goto out_suspend;
- }
+ if (r)
+ goto err_fpriv;
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
- if (r) {
- radeon_vm_fini(rdev, vm);
- kfree(fpriv);
- goto out_suspend;
- }
+ if (r)
+ goto err_vm_fini;
/* map the ib pool buffer read only into
* virtual address space */
vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
rdev->ring_tmp_bo.bo);
+ if (!vm->ib_bo_va) {
+ r = -ENOMEM;
+ goto err_vm_fini;
+ }
+
r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
RADEON_VA_IB_OFFSET,
RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_SNOOPED);
- if (r) {
- radeon_vm_fini(rdev, vm);
- kfree(fpriv);
- goto out_suspend;
- }
+ if (r)
+ goto err_vm_fini;
}
file_priv->driver_priv = fpriv;
}
-out_suspend:
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+ return 0;
+
+err_vm_fini:
+ radeon_vm_fini(rdev, vm);
+err_fpriv:
+ kfree(fpriv);
+
+err_suspend:
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index 9257b60144c4..b991ba1bcd51 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -91,33 +91,17 @@ int radeon_sync_resv(struct radeon_device *rdev,
struct dma_resv *resv,
bool shared)
{
- struct dma_resv_list *flist;
- struct dma_fence *f;
+ struct dma_resv_iter cursor;
struct radeon_fence *fence;
- unsigned i;
+ struct dma_fence *f;
int r = 0;
- /* always sync to the exclusive fence */
- f = dma_resv_excl_fence(resv);
- fence = f ? to_radeon_fence(f) : NULL;
- if (fence && fence->rdev == rdev)
- radeon_sync_fence(sync, fence);
- else if (f)
- r = dma_fence_wait(f, true);
-
- flist = dma_resv_shared_list(resv);
- if (shared || !flist || r)
- return r;
-
- for (i = 0; i < flist->shared_count; ++i) {
- f = rcu_dereference_protected(flist->shared[i],
- dma_resv_held(resv));
+ dma_resv_for_each_fence(&cursor, resv, shared, f) {
fence = to_radeon_fence(f);
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
else
r = dma_fence_wait(f, true);
-
if (r)
break;
}
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 2ea86919d953..377f9cdb5b53 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -469,7 +469,6 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
{
int32_t *msg, msg_type, handle;
unsigned img_size = 0;
- struct dma_fence *f;
void *ptr;
int i, r;
@@ -479,13 +478,11 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return -EINVAL;
}
- f = dma_resv_excl_fence(bo->tbo.base.resv);
- if (f) {
- r = radeon_fence_wait((struct radeon_fence *)f, false);
- if (r) {
- DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
- return r;
- }
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
+ MAX_SCHEDULE_TIMEOUT);
+ if (r <= 0) {
+ DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
+ return r ? r : -ETIME;
}
r = radeon_bo_kmap(bo, &ptr);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 511a942e851d..ca4a36464340 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -513,7 +513,7 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
* @allocated: allocated a new handle?
*
* Validates the handle and return the found session index or -EINVAL
- * we we don't have another free session index.
+ * we don't have another free session index.
*/
static int radeon_vce_validate_handle(struct radeon_cs_parser *p,
uint32_t handle, bool *allocated)
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index b47e74421e34..f6e6a6d5d987 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -4,23 +4,24 @@ config DRM_RCAR_DU
depends on DRM && OF
depends on ARM || ARM64
depends on ARCH_RENESAS || COMPILE_TEST
- imply DRM_RCAR_CMM
- imply DRM_RCAR_LVDS
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS
help
Choose this option if you have an R-Car chipset.
If M is selected the module will be called rcar-du-drm.
-config DRM_RCAR_CMM
- tristate "R-Car DU Color Management Module (CMM) Support"
- depends on DRM && OF
+config DRM_RCAR_USE_CMM
+ bool "R-Car DU Color Management Module (CMM) Support"
depends on DRM_RCAR_DU
+ default DRM_RCAR_DU
help
Enable support for R-Car Color Management Module (CMM).
+config DRM_RCAR_CMM
+ def_tristate DRM_RCAR_DU
+ depends on DRM_RCAR_USE_CMM
+
config DRM_RCAR_DW_HDMI
tristate "R-Car Gen3 and RZ/G2 DU HDMI Encoder Support"
depends on DRM && OF
@@ -28,15 +29,27 @@ config DRM_RCAR_DW_HDMI
help
Enable support for R-Car Gen3 or RZ/G2 internal HDMI encoder.
+config DRM_RCAR_USE_LVDS
+ bool "R-Car DU LVDS Encoder Support"
+ depends on DRM_BRIDGE && OF
+ default DRM_RCAR_DU
+ help
+ Enable support for the R-Car Display Unit embedded LVDS encoders.
+
config DRM_RCAR_LVDS
- tristate "R-Car DU LVDS Encoder Support"
- depends on DRM && DRM_BRIDGE && OF
+ def_tristate DRM_RCAR_DU
+ depends on DRM_RCAR_USE_LVDS
select DRM_KMS_HELPER
select DRM_PANEL
select OF_FLATTREE
select OF_OVERLAY
+
+config DRM_RCAR_MIPI_DSI
+ tristate "R-Car DU MIPI DSI Encoder Support"
+ depends on DRM && DRM_BRIDGE && OF
+ select DRM_MIPI_DSI
help
- Enable support for the R-Car Display Unit embedded LVDS encoders.
+ Enable support for the R-Car Display Unit embedded MIPI DSI encoders.
config DRM_RCAR_VSP
bool "R-Car DU VSP Compositor Support" if ARM
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 4d1187ccc3e5..286bc81b3e7c 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_DRM_RCAR_CMM) += rcar_cmm.o
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o
obj-$(CONFIG_DRM_RCAR_LVDS) += rcar_lvds.o
+obj-$(CONFIG_DRM_RCAR_MIPI_DSI) += rcar_mipi_dsi.o
# 'remote-endpoint' is fixed up at run-time
DTC_FLAGS_rcar_du_of_lvds_r8a7790 += -Wno-graph_endpoint
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 5672830ca184..f361a604337f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -215,6 +215,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
struct rcar_du_device *rcdu = rcrtc->dev;
unsigned long mode_clock = mode->clock * 1000;
+ unsigned int hdse_offset;
u32 dsmr;
u32 escr;
@@ -261,12 +262,13 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr);
escr = ESCR_DCLKSEL_DCLKIN | div;
- } else if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index)) {
+ } else if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) ||
+ rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) {
/*
- * Use the LVDS PLL output as the dot clock when outputting to
- * the LVDS encoder on an SoC that supports this clock routing
- * option. We use the clock directly in that case, without any
- * additional divider.
+ * Use the external LVDS or DSI PLL output as the dot clock when
+ * outputting to the LVDS or DSI encoder on an SoC that supports
+ * this clock routing option. We use the clock directly in that
+ * case, without any additional divider.
*/
escr = ESCR_DCLKSEL_DCLKIN;
} else {
@@ -298,10 +300,15 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
| DSMR_DIPM_DISP | DSMR_CSPM;
rcar_du_crtc_write(rcrtc, DSMR, dsmr);
+ hdse_offset = 19;
+ if (rcrtc->group->cmms_mask & BIT(rcrtc->index % 2))
+ hdse_offset += 25;
+
/* Display timings */
- rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - 19);
+ rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start -
+ hdse_offset);
rcar_du_crtc_write(rcrtc, HDER, mode->htotal - mode->hsync_start +
- mode->hdisplay - 19);
+ mode->hdisplay - hdse_offset);
rcar_du_crtc_write(rcrtc, HSWR, mode->hsync_end -
mode->hsync_start - 1);
rcar_du_crtc_write(rcrtc, HCR, mode->htotal - 1);
@@ -836,6 +843,7 @@ rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_device *rcdu = rcrtc->dev;
bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+ unsigned int min_sync_porch;
unsigned int vbp;
if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED))
@@ -843,9 +851,14 @@ rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
/*
* The hardware requires a minimum combined horizontal sync and back
- * porch of 20 pixels and a minimum vertical back porch of 3 lines.
+ * porch of 20 pixels (when CMM isn't used) or 45 pixels (when CMM is
+ * used), and a minimum vertical back porch of 3 lines.
*/
- if (mode->htotal - mode->hsync_start < 20)
+ min_sync_porch = 20;
+ if (rcrtc->group->cmms_mask & BIT(rcrtc->index % 2))
+ min_sync_porch += 25;
+
+ if (mode->htotal - mode->hsync_start < min_sync_porch)
return MODE_HBLANK_NARROW;
vbp = (mode->vtotal - mode->vsync_end) / (interlaced ? 2 : 1);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 5612a9e7a905..5a8131ef81d5 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -544,10 +544,12 @@ const char *rcar_du_output_name(enum rcar_du_output output)
static const char * const names[] = {
[RCAR_DU_OUTPUT_DPAD0] = "DPAD0",
[RCAR_DU_OUTPUT_DPAD1] = "DPAD1",
- [RCAR_DU_OUTPUT_LVDS0] = "LVDS0",
- [RCAR_DU_OUTPUT_LVDS1] = "LVDS1",
+ [RCAR_DU_OUTPUT_DSI0] = "DSI0",
+ [RCAR_DU_OUTPUT_DSI1] = "DSI1",
[RCAR_DU_OUTPUT_HDMI0] = "HDMI0",
[RCAR_DU_OUTPUT_HDMI1] = "HDMI1",
+ [RCAR_DU_OUTPUT_LVDS0] = "LVDS0",
+ [RCAR_DU_OUTPUT_LVDS1] = "LVDS1",
[RCAR_DU_OUTPUT_TCON] = "TCON",
};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index eacb1f17f747..190dbb7f15dd 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -327,11 +327,11 @@ const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
*/
static const struct drm_gem_object_funcs rcar_du_gem_funcs = {
- .free = drm_gem_cma_free_object,
- .print_info = drm_gem_cma_print_info,
- .get_sg_table = drm_gem_cma_get_sg_table,
- .vmap = drm_gem_cma_vmap,
- .mmap = drm_gem_cma_mmap,
+ .free = drm_gem_cma_object_free,
+ .print_info = drm_gem_cma_object_print_info,
+ .get_sg_table = drm_gem_cma_object_get_sg_table,
+ .vmap = drm_gem_cma_object_vmap,
+ .mmap = drm_gem_cma_object_mmap,
.vm_ops = &drm_gem_cma_vm_ops,
};
diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c
new file mode 100644
index 000000000000..891bb956fd61
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c
@@ -0,0 +1,819 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * rcar_mipi_dsi.c -- R-Car MIPI DSI Encoder
+ *
+ * Copyright (C) 2020 Renesas Electronics Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+#include "rcar_mipi_dsi_regs.h"
+
+struct rcar_mipi_dsi {
+ struct device *dev;
+ const struct rcar_mipi_dsi_device_info *info;
+ struct reset_control *rstc;
+
+ struct mipi_dsi_host host;
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ struct drm_connector connector;
+
+ void __iomem *mmio;
+ struct {
+ struct clk *mod;
+ struct clk *pll;
+ struct clk *dsi;
+ } clocks;
+
+ enum mipi_dsi_pixel_format format;
+ unsigned int num_data_lanes;
+ unsigned int lanes;
+};
+
+static inline struct rcar_mipi_dsi *
+bridge_to_rcar_mipi_dsi(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct rcar_mipi_dsi, bridge);
+}
+
+static inline struct rcar_mipi_dsi *
+host_to_rcar_mipi_dsi(struct mipi_dsi_host *host)
+{
+ return container_of(host, struct rcar_mipi_dsi, host);
+}
+
+static const u32 phtw[] = {
+ 0x01020114, 0x01600115, /* General testing */
+ 0x01030116, 0x0102011d, /* General testing */
+ 0x011101a4, 0x018601a4, /* 1Gbps testing */
+ 0x014201a0, 0x010001a3, /* 1Gbps testing */
+ 0x0101011f, /* 1Gbps testing */
+};
+
+static const u32 phtw2[] = {
+ 0x010c0130, 0x010c0140, /* General testing */
+ 0x010c0150, 0x010c0180, /* General testing */
+ 0x010c0190,
+ 0x010a0160, 0x010a0170,
+ 0x01800164, 0x01800174, /* 1Gbps testing */
+};
+
+static const u32 hsfreqrange_table[][2] = {
+ { 80000000U, 0x00 }, { 90000000U, 0x10 }, { 100000000U, 0x20 },
+ { 110000000U, 0x30 }, { 120000000U, 0x01 }, { 130000000U, 0x11 },
+ { 140000000U, 0x21 }, { 150000000U, 0x31 }, { 160000000U, 0x02 },
+ { 170000000U, 0x12 }, { 180000000U, 0x22 }, { 190000000U, 0x32 },
+ { 205000000U, 0x03 }, { 220000000U, 0x13 }, { 235000000U, 0x23 },
+ { 250000000U, 0x33 }, { 275000000U, 0x04 }, { 300000000U, 0x14 },
+ { 325000000U, 0x25 }, { 350000000U, 0x35 }, { 400000000U, 0x05 },
+ { 450000000U, 0x16 }, { 500000000U, 0x26 }, { 550000000U, 0x37 },
+ { 600000000U, 0x07 }, { 650000000U, 0x18 }, { 700000000U, 0x28 },
+ { 750000000U, 0x39 }, { 800000000U, 0x09 }, { 850000000U, 0x19 },
+ { 900000000U, 0x29 }, { 950000000U, 0x3a }, { 1000000000U, 0x0a },
+ { 1050000000U, 0x1a }, { 1100000000U, 0x2a }, { 1150000000U, 0x3b },
+ { 1200000000U, 0x0b }, { 1250000000U, 0x1b }, { 1300000000U, 0x2b },
+ { 1350000000U, 0x3c }, { 1400000000U, 0x0c }, { 1450000000U, 0x1c },
+ { 1500000000U, 0x2c }, { 1550000000U, 0x3d }, { 1600000000U, 0x0d },
+ { 1650000000U, 0x1d }, { 1700000000U, 0x2e }, { 1750000000U, 0x3e },
+ { 1800000000U, 0x0e }, { 1850000000U, 0x1e }, { 1900000000U, 0x2f },
+ { 1950000000U, 0x3f }, { 2000000000U, 0x0f }, { 2050000000U, 0x40 },
+ { 2100000000U, 0x41 }, { 2150000000U, 0x42 }, { 2200000000U, 0x43 },
+ { 2250000000U, 0x44 }, { 2300000000U, 0x45 }, { 2350000000U, 0x46 },
+ { 2400000000U, 0x47 }, { 2450000000U, 0x48 }, { 2500000000U, 0x49 },
+ { /* sentinel */ },
+};
+
+struct vco_cntrl_value {
+ u32 min_freq;
+ u32 max_freq;
+ u16 value;
+};
+
+static const struct vco_cntrl_value vco_cntrl_table[] = {
+ { .min_freq = 40000000U, .max_freq = 55000000U, .value = 0x3f },
+ { .min_freq = 52500000U, .max_freq = 80000000U, .value = 0x39 },
+ { .min_freq = 80000000U, .max_freq = 110000000U, .value = 0x2f },
+ { .min_freq = 105000000U, .max_freq = 160000000U, .value = 0x29 },
+ { .min_freq = 160000000U, .max_freq = 220000000U, .value = 0x1f },
+ { .min_freq = 210000000U, .max_freq = 320000000U, .value = 0x19 },
+ { .min_freq = 320000000U, .max_freq = 440000000U, .value = 0x0f },
+ { .min_freq = 420000000U, .max_freq = 660000000U, .value = 0x09 },
+ { .min_freq = 630000000U, .max_freq = 1149000000U, .value = 0x03 },
+ { .min_freq = 1100000000U, .max_freq = 1152000000U, .value = 0x01 },
+ { .min_freq = 1150000000U, .max_freq = 1250000000U, .value = 0x01 },
+ { /* sentinel */ },
+};
+
+static void rcar_mipi_dsi_write(struct rcar_mipi_dsi *dsi, u32 reg, u32 data)
+{
+ iowrite32(data, dsi->mmio + reg);
+}
+
+static u32 rcar_mipi_dsi_read(struct rcar_mipi_dsi *dsi, u32 reg)
+{
+ return ioread32(dsi->mmio + reg);
+}
+
+static void rcar_mipi_dsi_clr(struct rcar_mipi_dsi *dsi, u32 reg, u32 clr)
+{
+ rcar_mipi_dsi_write(dsi, reg, rcar_mipi_dsi_read(dsi, reg) & ~clr);
+}
+
+static void rcar_mipi_dsi_set(struct rcar_mipi_dsi *dsi, u32 reg, u32 set)
+{
+ rcar_mipi_dsi_write(dsi, reg, rcar_mipi_dsi_read(dsi, reg) | set);
+}
+
+static int rcar_mipi_dsi_phtw_test(struct rcar_mipi_dsi *dsi, u32 phtw)
+{
+ u32 status;
+ int ret;
+
+ rcar_mipi_dsi_write(dsi, PHTW, phtw);
+
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ !(status & (PHTW_DWEN | PHTW_CWEN)),
+ 2000, 10000, false, dsi, PHTW);
+ if (ret < 0) {
+ dev_err(dsi->dev, "PHY test interface write timeout (0x%08x)\n",
+ phtw);
+ return ret;
+ }
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Hardware Setup
+ */
+
+struct dsi_setup_info {
+ unsigned long fout;
+ u16 vco_cntrl;
+ u16 prop_cntrl;
+ u16 hsfreqrange;
+ u16 div;
+ unsigned int m;
+ unsigned int n;
+};
+
+static void rcar_mipi_dsi_parameters_calc(struct rcar_mipi_dsi *dsi,
+ struct clk *clk, unsigned long target,
+ struct dsi_setup_info *setup_info)
+{
+
+ const struct vco_cntrl_value *vco_cntrl;
+ unsigned long fout_target;
+ unsigned long fin, fout;
+ unsigned long hsfreq;
+ unsigned int best_err = -1;
+ unsigned int divider;
+ unsigned int n;
+ unsigned int i;
+ unsigned int err;
+
+ /*
+ * Calculate Fout = dot clock * ColorDepth / (2 * Lane Count)
+ * The range out Fout is [40 - 1250] Mhz
+ */
+ fout_target = target * mipi_dsi_pixel_format_to_bpp(dsi->format)
+ / (2 * dsi->lanes);
+ if (fout_target < 40000000 || fout_target > 1250000000)
+ return;
+
+ /* Find vco_cntrl */
+ for (vco_cntrl = vco_cntrl_table; vco_cntrl->min_freq != 0; vco_cntrl++) {
+ if (fout_target > vco_cntrl->min_freq &&
+ fout_target <= vco_cntrl->max_freq) {
+ setup_info->vco_cntrl = vco_cntrl->value;
+ if (fout_target >= 1150000000)
+ setup_info->prop_cntrl = 0x0c;
+ else
+ setup_info->prop_cntrl = 0x0b;
+ break;
+ }
+ }
+
+ /* Add divider */
+ setup_info->div = (setup_info->vco_cntrl & 0x30) >> 4;
+
+ /* Find hsfreqrange */
+ hsfreq = fout_target * 2;
+ for (i = 0; i < ARRAY_SIZE(hsfreqrange_table); i++) {
+ if (hsfreqrange_table[i][0] >= hsfreq) {
+ setup_info->hsfreqrange = hsfreqrange_table[i][1];
+ break;
+ }
+ }
+
+ /*
+ * Calculate n and m for PLL clock
+ * Following the HW manual the ranges of n and m are
+ * n = [3-8] and m = [64-625]
+ */
+ fin = clk_get_rate(clk);
+ divider = 1 << setup_info->div;
+ for (n = 3; n < 9; n++) {
+ unsigned long fpfd;
+ unsigned int m;
+
+ fpfd = fin / n;
+
+ for (m = 64; m < 626; m++) {
+ fout = fpfd * m / divider;
+ err = abs((long)(fout - fout_target) * 10000 /
+ (long)fout_target);
+ if (err < best_err) {
+ setup_info->m = m - 2;
+ setup_info->n = n - 1;
+ setup_info->fout = fout;
+ best_err = err;
+ if (err == 0)
+ goto done;
+ }
+ }
+ }
+
+done:
+ dev_dbg(dsi->dev,
+ "%pC %lu Hz -> Fout %lu Hz (target %lu Hz, error %d.%02u%%), PLL M/N/DIV %u/%u/%u\n",
+ clk, fin, setup_info->fout, fout_target, best_err / 100,
+ best_err % 100, setup_info->m, setup_info->n, setup_info->div);
+ dev_dbg(dsi->dev,
+ "vco_cntrl = 0x%x\tprop_cntrl = 0x%x\thsfreqrange = 0x%x\n",
+ setup_info->vco_cntrl, setup_info->prop_cntrl,
+ setup_info->hsfreqrange);
+}
+
+static void rcar_mipi_dsi_set_display_timing(struct rcar_mipi_dsi *dsi,
+ const struct drm_display_mode *mode)
+{
+ u32 setr;
+ u32 vprmset0r;
+ u32 vprmset1r;
+ u32 vprmset2r;
+ u32 vprmset3r;
+ u32 vprmset4r;
+
+ /* Configuration for Pixel Stream and Packet Header */
+ if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 24)
+ rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB24);
+ else if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 18)
+ rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB18);
+ else if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 16)
+ rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB16);
+ else {
+ dev_warn(dsi->dev, "unsupported format");
+ return;
+ }
+
+ /* Configuration for Blanking sequence and Input Pixel */
+ setr = TXVMSETR_HSABPEN_EN | TXVMSETR_HBPBPEN_EN
+ | TXVMSETR_HFPBPEN_EN | TXVMSETR_SYNSEQ_PULSES
+ | TXVMSETR_PIXWDTH | TXVMSETR_VSTPM;
+ rcar_mipi_dsi_write(dsi, TXVMSETR, setr);
+
+ /* Configuration for Video Parameters */
+ vprmset0r = (mode->flags & DRM_MODE_FLAG_PVSYNC ?
+ TXVMVPRMSET0R_VSPOL_HIG : TXVMVPRMSET0R_VSPOL_LOW)
+ | (mode->flags & DRM_MODE_FLAG_PHSYNC ?
+ TXVMVPRMSET0R_HSPOL_HIG : TXVMVPRMSET0R_HSPOL_LOW)
+ | TXVMVPRMSET0R_CSPC_RGB | TXVMVPRMSET0R_BPP_24;
+
+ vprmset1r = TXVMVPRMSET1R_VACTIVE(mode->vdisplay)
+ | TXVMVPRMSET1R_VSA(mode->vsync_end - mode->vsync_start);
+
+ vprmset2r = TXVMVPRMSET2R_VFP(mode->vsync_start - mode->vdisplay)
+ | TXVMVPRMSET2R_VBP(mode->vtotal - mode->vsync_end);
+
+ vprmset3r = TXVMVPRMSET3R_HACTIVE(mode->hdisplay)
+ | TXVMVPRMSET3R_HSA(mode->hsync_end - mode->hsync_start);
+
+ vprmset4r = TXVMVPRMSET4R_HFP(mode->hsync_start - mode->hdisplay)
+ | TXVMVPRMSET4R_HBP(mode->htotal - mode->hsync_end);
+
+ rcar_mipi_dsi_write(dsi, TXVMVPRMSET0R, vprmset0r);
+ rcar_mipi_dsi_write(dsi, TXVMVPRMSET1R, vprmset1r);
+ rcar_mipi_dsi_write(dsi, TXVMVPRMSET2R, vprmset2r);
+ rcar_mipi_dsi_write(dsi, TXVMVPRMSET3R, vprmset3r);
+ rcar_mipi_dsi_write(dsi, TXVMVPRMSET4R, vprmset4r);
+}
+
+static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
+ const struct drm_display_mode *mode)
+{
+ struct dsi_setup_info setup_info = {};
+ unsigned int timeout;
+ int ret, i;
+ int dsi_format;
+ u32 phy_setup;
+ u32 clockset2, clockset3;
+ u32 ppisetr;
+ u32 vclkset;
+
+ /* Checking valid format */
+ dsi_format = mipi_dsi_pixel_format_to_bpp(dsi->format);
+ if (dsi_format < 0) {
+ dev_warn(dsi->dev, "invalid format");
+ return -EINVAL;
+ }
+
+ /* Parameters Calculation */
+ rcar_mipi_dsi_parameters_calc(dsi, dsi->clocks.pll,
+ mode->clock * 1000, &setup_info);
+
+ /* LPCLK enable */
+ rcar_mipi_dsi_set(dsi, LPCLKSET, LPCLKSET_CKEN);
+
+ /* CFGCLK enabled */
+ rcar_mipi_dsi_set(dsi, CFGCLKSET, CFGCLKSET_CKEN);
+
+ rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_RSTZ);
+ rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ);
+
+ rcar_mipi_dsi_set(dsi, PHTC, PHTC_TESTCLR);
+ rcar_mipi_dsi_clr(dsi, PHTC, PHTC_TESTCLR);
+
+ /* PHY setting */
+ phy_setup = rcar_mipi_dsi_read(dsi, PHYSETUP);
+ phy_setup &= ~PHYSETUP_HSFREQRANGE_MASK;
+ phy_setup |= PHYSETUP_HSFREQRANGE(setup_info.hsfreqrange);
+ rcar_mipi_dsi_write(dsi, PHYSETUP, phy_setup);
+
+ for (i = 0; i < ARRAY_SIZE(phtw); i++) {
+ ret = rcar_mipi_dsi_phtw_test(dsi, phtw[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* PLL Clock Setting */
+ rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_SHADOW_CLEAR);
+ rcar_mipi_dsi_set(dsi, CLOCKSET1, CLOCKSET1_SHADOW_CLEAR);
+ rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_SHADOW_CLEAR);
+
+ clockset2 = CLOCKSET2_M(setup_info.m) | CLOCKSET2_N(setup_info.n)
+ | CLOCKSET2_VCO_CNTRL(setup_info.vco_cntrl);
+ clockset3 = CLOCKSET3_PROP_CNTRL(setup_info.prop_cntrl)
+ | CLOCKSET3_INT_CNTRL(0)
+ | CLOCKSET3_CPBIAS_CNTRL(0x10)
+ | CLOCKSET3_GMP_CNTRL(1);
+ rcar_mipi_dsi_write(dsi, CLOCKSET2, clockset2);
+ rcar_mipi_dsi_write(dsi, CLOCKSET3, clockset3);
+
+ rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL);
+ rcar_mipi_dsi_set(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL);
+ udelay(10);
+ rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL);
+
+ ppisetr = PPISETR_DLEN_3 | PPISETR_CLEN;
+ rcar_mipi_dsi_write(dsi, PPISETR, ppisetr);
+
+ rcar_mipi_dsi_set(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ);
+ rcar_mipi_dsi_set(dsi, PHYSETUP, PHYSETUP_RSTZ);
+ usleep_range(400, 500);
+
+ /* Checking PPI clock status register */
+ for (timeout = 10; timeout > 0; --timeout) {
+ if ((rcar_mipi_dsi_read(dsi, PPICLSR) & PPICLSR_STPST) &&
+ (rcar_mipi_dsi_read(dsi, PPIDLSR) & PPIDLSR_STPST) &&
+ (rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK))
+ break;
+
+ usleep_range(1000, 2000);
+ }
+
+ if (!timeout) {
+ dev_err(dsi->dev, "failed to enable PPI clock\n");
+ return -ETIMEDOUT;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(phtw2); i++) {
+ ret = rcar_mipi_dsi_phtw_test(dsi, phtw2[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable DOT clock */
+ vclkset = VCLKSET_CKEN;
+ rcar_mipi_dsi_set(dsi, VCLKSET, vclkset);
+
+ if (dsi_format == 24)
+ vclkset |= VCLKSET_BPP_24;
+ else if (dsi_format == 18)
+ vclkset |= VCLKSET_BPP_18;
+ else if (dsi_format == 16)
+ vclkset |= VCLKSET_BPP_16;
+ else {
+ dev_warn(dsi->dev, "unsupported format");
+ return -EINVAL;
+ }
+ vclkset |= VCLKSET_COLOR_RGB | VCLKSET_DIV(setup_info.div)
+ | VCLKSET_LANE(dsi->lanes - 1);
+
+ rcar_mipi_dsi_set(dsi, VCLKSET, vclkset);
+
+ /* After setting VCLKSET register, enable VCLKEN */
+ rcar_mipi_dsi_set(dsi, VCLKEN, VCLKEN_CKEN);
+
+ dev_dbg(dsi->dev, "DSI device is started\n");
+
+ return 0;
+}
+
+static void rcar_mipi_dsi_shutdown(struct rcar_mipi_dsi *dsi)
+{
+ rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_RSTZ);
+ rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ);
+
+ dev_dbg(dsi->dev, "DSI device is shutdown\n");
+}
+
+static int rcar_mipi_dsi_clk_enable(struct rcar_mipi_dsi *dsi)
+{
+ int ret;
+
+ reset_control_deassert(dsi->rstc);
+
+ ret = clk_prepare_enable(dsi->clocks.mod);
+ if (ret < 0)
+ goto err_reset;
+
+ ret = clk_prepare_enable(dsi->clocks.dsi);
+ if (ret < 0)
+ goto err_clock;
+
+ return 0;
+
+err_clock:
+ clk_disable_unprepare(dsi->clocks.mod);
+err_reset:
+ reset_control_assert(dsi->rstc);
+ return ret;
+}
+
+static void rcar_mipi_dsi_clk_disable(struct rcar_mipi_dsi *dsi)
+{
+ clk_disable_unprepare(dsi->clocks.dsi);
+ clk_disable_unprepare(dsi->clocks.mod);
+
+ reset_control_assert(dsi->rstc);
+}
+
+static int rcar_mipi_dsi_start_hs_clock(struct rcar_mipi_dsi *dsi)
+{
+ /*
+ * In HW manual, we need to check TxDDRClkHS-Q Stable? but it dont
+ * write how to check. So we skip this check in this patch
+ */
+ u32 status;
+ int ret;
+
+ /* Start HS clock. */
+ rcar_mipi_dsi_set(dsi, PPICLCR, PPICLCR_TXREQHS);
+
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ status & PPICLSR_TOHS,
+ 2000, 10000, false, dsi, PPICLSR);
+ if (ret < 0) {
+ dev_err(dsi->dev, "failed to enable HS clock\n");
+ return ret;
+ }
+
+ rcar_mipi_dsi_set(dsi, PPICLSCR, PPICLSCR_TOHS);
+
+ return 0;
+}
+
+static int rcar_mipi_dsi_start_video(struct rcar_mipi_dsi *dsi)
+{
+ u32 status;
+ int ret;
+
+ /* Wait for the link to be ready. */
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ !(status & (LINKSR_LPBUSY | LINKSR_HSBUSY)),
+ 2000, 10000, false, dsi, LINKSR);
+ if (ret < 0) {
+ dev_err(dsi->dev, "Link failed to become ready\n");
+ return ret;
+ }
+
+ /* De-assert video FIFO clear. */
+ rcar_mipi_dsi_clr(dsi, TXVMCR, TXVMCR_VFCLR);
+
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ status & TXVMSR_VFRDY,
+ 2000, 10000, false, dsi, TXVMSR);
+ if (ret < 0) {
+ dev_err(dsi->dev, "Failed to de-assert video FIFO clear\n");
+ return ret;
+ }
+
+ /* Enable transmission in video mode. */
+ rcar_mipi_dsi_set(dsi, TXVMCR, TXVMCR_EN_VIDEO);
+
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ status & TXVMSR_RDY,
+ 2000, 10000, false, dsi, TXVMSR);
+ if (ret < 0) {
+ dev_err(dsi->dev, "Failed to enable video transmission\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Bridge
+ */
+
+static int rcar_mipi_dsi_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
+
+ return drm_bridge_attach(bridge->encoder, dsi->next_bridge, bridge,
+ flags);
+}
+
+static void rcar_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct drm_atomic_state *state = old_bridge_state->base.state;
+ struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
+ const struct drm_display_mode *mode;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+ int ret;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
+ mode = &drm_atomic_get_new_crtc_state(state, crtc)->adjusted_mode;
+
+ ret = rcar_mipi_dsi_clk_enable(dsi);
+ if (ret < 0) {
+ dev_err(dsi->dev, "failed to enable DSI clocks\n");
+ return;
+ }
+
+ ret = rcar_mipi_dsi_startup(dsi, mode);
+ if (ret < 0)
+ goto err_dsi_startup;
+
+ rcar_mipi_dsi_set_display_timing(dsi, mode);
+
+ ret = rcar_mipi_dsi_start_hs_clock(dsi);
+ if (ret < 0)
+ goto err_dsi_start_hs;
+
+ rcar_mipi_dsi_start_video(dsi);
+
+ return;
+
+err_dsi_start_hs:
+ rcar_mipi_dsi_shutdown(dsi);
+err_dsi_startup:
+ rcar_mipi_dsi_clk_disable(dsi);
+}
+
+static void rcar_mipi_dsi_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
+
+ rcar_mipi_dsi_shutdown(dsi);
+ rcar_mipi_dsi_clk_disable(dsi);
+}
+
+static enum drm_mode_status
+rcar_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ if (mode->clock > 297000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static const struct drm_bridge_funcs rcar_mipi_dsi_bridge_ops = {
+ .attach = rcar_mipi_dsi_attach,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_enable = rcar_mipi_dsi_atomic_enable,
+ .atomic_disable = rcar_mipi_dsi_atomic_disable,
+ .mode_valid = rcar_mipi_dsi_bridge_mode_valid,
+};
+
+/* -----------------------------------------------------------------------------
+ * Host setting
+ */
+
+static int rcar_mipi_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host);
+ int ret;
+
+ if (device->lanes > dsi->num_data_lanes)
+ return -EINVAL;
+
+ dsi->lanes = device->lanes;
+ dsi->format = device->format;
+
+ dsi->next_bridge = devm_drm_of_get_bridge(dsi->dev, dsi->dev->of_node,
+ 1, 0);
+ if (IS_ERR(dsi->next_bridge)) {
+ ret = PTR_ERR(dsi->next_bridge);
+ dev_err(dsi->dev, "failed to get next bridge: %d\n", ret);
+ return ret;
+ }
+
+ /* Initialize the DRM bridge. */
+ dsi->bridge.funcs = &rcar_mipi_dsi_bridge_ops;
+ dsi->bridge.of_node = dsi->dev->of_node;
+ drm_bridge_add(&dsi->bridge);
+
+ return 0;
+}
+
+static int rcar_mipi_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host);
+
+ drm_bridge_remove(&dsi->bridge);
+
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops rcar_mipi_dsi_host_ops = {
+ .attach = rcar_mipi_dsi_host_attach,
+ .detach = rcar_mipi_dsi_host_detach,
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe & Remove
+ */
+
+static int rcar_mipi_dsi_parse_dt(struct rcar_mipi_dsi *dsi)
+{
+ struct device_node *ep;
+ u32 data_lanes[4];
+ int ret;
+
+ ep = of_graph_get_endpoint_by_regs(dsi->dev->of_node, 1, 0);
+ if (!ep) {
+ dev_dbg(dsi->dev, "unconnected port@1\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_variable_u32_array(ep, "data-lanes", data_lanes,
+ 1, 4);
+ of_node_put(ep);
+
+ if (ret < 0) {
+ dev_err(dsi->dev, "missing or invalid data-lanes property\n");
+ return -ENODEV;
+ }
+
+ dsi->num_data_lanes = ret;
+ return 0;
+}
+
+static struct clk *rcar_mipi_dsi_get_clock(struct rcar_mipi_dsi *dsi,
+ const char *name,
+ bool optional)
+{
+ struct clk *clk;
+
+ clk = devm_clk_get(dsi->dev, name);
+ if (!IS_ERR(clk))
+ return clk;
+
+ if (PTR_ERR(clk) == -ENOENT && optional)
+ return NULL;
+
+ dev_err_probe(dsi->dev, PTR_ERR(clk), "failed to get %s clock\n",
+ name ? name : "module");
+
+ return clk;
+}
+
+static int rcar_mipi_dsi_get_clocks(struct rcar_mipi_dsi *dsi)
+{
+ dsi->clocks.mod = rcar_mipi_dsi_get_clock(dsi, NULL, false);
+ if (IS_ERR(dsi->clocks.mod))
+ return PTR_ERR(dsi->clocks.mod);
+
+ dsi->clocks.pll = rcar_mipi_dsi_get_clock(dsi, "pll", true);
+ if (IS_ERR(dsi->clocks.pll))
+ return PTR_ERR(dsi->clocks.pll);
+
+ dsi->clocks.dsi = rcar_mipi_dsi_get_clock(dsi, "dsi", true);
+ if (IS_ERR(dsi->clocks.dsi))
+ return PTR_ERR(dsi->clocks.dsi);
+
+ if (!dsi->clocks.pll && !dsi->clocks.dsi) {
+ dev_err(dsi->dev, "no input clock (pll, dsi)\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rcar_mipi_dsi_probe(struct platform_device *pdev)
+{
+ struct rcar_mipi_dsi *dsi;
+ struct resource *mem;
+ int ret;
+
+ dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
+ if (dsi == NULL)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, dsi);
+
+ dsi->dev = &pdev->dev;
+ dsi->info = of_device_get_match_data(&pdev->dev);
+
+ ret = rcar_mipi_dsi_parse_dt(dsi);
+ if (ret < 0)
+ return ret;
+
+ /* Acquire resources. */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dsi->mmio = devm_ioremap_resource(dsi->dev, mem);
+ if (IS_ERR(dsi->mmio))
+ return PTR_ERR(dsi->mmio);
+
+ ret = rcar_mipi_dsi_get_clocks(dsi);
+ if (ret < 0)
+ return ret;
+
+ dsi->rstc = devm_reset_control_get(dsi->dev, NULL);
+ if (IS_ERR(dsi->rstc)) {
+ dev_err(dsi->dev, "failed to get cpg reset\n");
+ return PTR_ERR(dsi->rstc);
+ }
+
+ /* Initialize the DSI host. */
+ dsi->host.dev = dsi->dev;
+ dsi->host.ops = &rcar_mipi_dsi_host_ops;
+ ret = mipi_dsi_host_register(&dsi->host);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int rcar_mipi_dsi_remove(struct platform_device *pdev)
+{
+ struct rcar_mipi_dsi *dsi = platform_get_drvdata(pdev);
+
+ mipi_dsi_host_unregister(&dsi->host);
+
+ return 0;
+}
+
+static const struct of_device_id rcar_mipi_dsi_of_table[] = {
+ { .compatible = "renesas,r8a779a0-dsi-csi2-tx" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, rcar_mipi_dsi_of_table);
+
+static struct platform_driver rcar_mipi_dsi_platform_driver = {
+ .probe = rcar_mipi_dsi_probe,
+ .remove = rcar_mipi_dsi_remove,
+ .driver = {
+ .name = "rcar-mipi-dsi",
+ .of_match_table = rcar_mipi_dsi_of_table,
+ },
+};
+
+module_platform_driver(rcar_mipi_dsi_platform_driver);
+
+MODULE_DESCRIPTION("Renesas R-Car MIPI DSI Encoder Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h
new file mode 100644
index 000000000000..0e7a9274749f
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * rcar_mipi_dsi_regs.h -- R-Car MIPI DSI Interface Registers Definitions
+ *
+ * Copyright (C) 2020 Renesas Electronics Corporation
+ */
+
+#ifndef __RCAR_MIPI_DSI_REGS_H__
+#define __RCAR_MIPI_DSI_REGS_H__
+
+#define LINKSR 0x010
+#define LINKSR_LPBUSY (1 << 1)
+#define LINKSR_HSBUSY (1 << 0)
+
+/*
+ * Video Mode Register
+ */
+#define TXVMSETR 0x180
+#define TXVMSETR_SYNSEQ_PULSES (0 << 16)
+#define TXVMSETR_SYNSEQ_EVENTS (1 << 16)
+#define TXVMSETR_VSTPM (1 << 15)
+#define TXVMSETR_PIXWDTH (1 << 8)
+#define TXVMSETR_VSEN_EN (1 << 4)
+#define TXVMSETR_VSEN_DIS (0 << 4)
+#define TXVMSETR_HFPBPEN_EN (1 << 2)
+#define TXVMSETR_HFPBPEN_DIS (0 << 2)
+#define TXVMSETR_HBPBPEN_EN (1 << 1)
+#define TXVMSETR_HBPBPEN_DIS (0 << 1)
+#define TXVMSETR_HSABPEN_EN (1 << 0)
+#define TXVMSETR_HSABPEN_DIS (0 << 0)
+
+#define TXVMCR 0x190
+#define TXVMCR_VFCLR (1 << 12)
+#define TXVMCR_EN_VIDEO (1 << 0)
+
+#define TXVMSR 0x1a0
+#define TXVMSR_STR (1 << 16)
+#define TXVMSR_VFRDY (1 << 12)
+#define TXVMSR_ACT (1 << 8)
+#define TXVMSR_RDY (1 << 0)
+
+#define TXVMSCR 0x1a4
+#define TXVMSCR_STR (1 << 16)
+
+#define TXVMPSPHSETR 0x1c0
+#define TXVMPSPHSETR_DT_RGB16 (0x0e << 16)
+#define TXVMPSPHSETR_DT_RGB18 (0x1e << 16)
+#define TXVMPSPHSETR_DT_RGB18_LS (0x2e << 16)
+#define TXVMPSPHSETR_DT_RGB24 (0x3e << 16)
+#define TXVMPSPHSETR_DT_YCBCR16 (0x2c << 16)
+
+#define TXVMVPRMSET0R 0x1d0
+#define TXVMVPRMSET0R_HSPOL_HIG (0 << 17)
+#define TXVMVPRMSET0R_HSPOL_LOW (1 << 17)
+#define TXVMVPRMSET0R_VSPOL_HIG (0 << 16)
+#define TXVMVPRMSET0R_VSPOL_LOW (1 << 16)
+#define TXVMVPRMSET0R_CSPC_RGB (0 << 4)
+#define TXVMVPRMSET0R_CSPC_YCbCr (1 << 4)
+#define TXVMVPRMSET0R_BPP_16 (0 << 0)
+#define TXVMVPRMSET0R_BPP_18 (1 << 0)
+#define TXVMVPRMSET0R_BPP_24 (2 << 0)
+
+#define TXVMVPRMSET1R 0x1d4
+#define TXVMVPRMSET1R_VACTIVE(x) (((x) & 0x7fff) << 16)
+#define TXVMVPRMSET1R_VSA(x) (((x) & 0xfff) << 0)
+
+#define TXVMVPRMSET2R 0x1d8
+#define TXVMVPRMSET2R_VFP(x) (((x) & 0x1fff) << 16)
+#define TXVMVPRMSET2R_VBP(x) (((x) & 0x1fff) << 0)
+
+#define TXVMVPRMSET3R 0x1dc
+#define TXVMVPRMSET3R_HACTIVE(x) (((x) & 0x7fff) << 16)
+#define TXVMVPRMSET3R_HSA(x) (((x) & 0xfff) << 0)
+
+#define TXVMVPRMSET4R 0x1e0
+#define TXVMVPRMSET4R_HFP(x) (((x) & 0x1fff) << 16)
+#define TXVMVPRMSET4R_HBP(x) (((x) & 0x1fff) << 0)
+
+/*
+ * PHY-Protocol Interface (PPI) Registers
+ */
+#define PPISETR 0x700
+#define PPISETR_DLEN_0 (0x1 << 0)
+#define PPISETR_DLEN_1 (0x3 << 0)
+#define PPISETR_DLEN_2 (0x7 << 0)
+#define PPISETR_DLEN_3 (0xf << 0)
+#define PPISETR_CLEN (1 << 8)
+
+#define PPICLCR 0x710
+#define PPICLCR_TXREQHS (1 << 8)
+#define PPICLCR_TXULPSEXT (1 << 1)
+#define PPICLCR_TXULPSCLK (1 << 0)
+
+#define PPICLSR 0x720
+#define PPICLSR_HSTOLP (1 << 27)
+#define PPICLSR_TOHS (1 << 26)
+#define PPICLSR_STPST (1 << 0)
+
+#define PPICLSCR 0x724
+#define PPICLSCR_HSTOLP (1 << 27)
+#define PPICLSCR_TOHS (1 << 26)
+
+#define PPIDLSR 0x760
+#define PPIDLSR_STPST (0xf << 0)
+
+/*
+ * Clocks registers
+ */
+#define LPCLKSET 0x1000
+#define LPCLKSET_CKEN (1 << 8)
+#define LPCLKSET_LPCLKDIV(x) (((x) & 0x3f) << 0)
+
+#define CFGCLKSET 0x1004
+#define CFGCLKSET_CKEN (1 << 8)
+#define CFGCLKSET_CFGCLKDIV(x) (((x) & 0x3f) << 0)
+
+#define DOTCLKDIV 0x1008
+#define DOTCLKDIV_CKEN (1 << 8)
+#define DOTCLKDIV_DOTCLKDIV(x) (((x) & 0x3f) << 0)
+
+#define VCLKSET 0x100c
+#define VCLKSET_CKEN (1 << 16)
+#define VCLKSET_COLOR_RGB (0 << 8)
+#define VCLKSET_COLOR_YCC (1 << 8)
+#define VCLKSET_DIV(x) (((x) & 0x3) << 4)
+#define VCLKSET_BPP_16 (0 << 2)
+#define VCLKSET_BPP_18 (1 << 2)
+#define VCLKSET_BPP_18L (2 << 2)
+#define VCLKSET_BPP_24 (3 << 2)
+#define VCLKSET_LANE(x) (((x) & 0x3) << 0)
+
+#define VCLKEN 0x1010
+#define VCLKEN_CKEN (1 << 0)
+
+#define PHYSETUP 0x1014
+#define PHYSETUP_HSFREQRANGE(x) (((x) & 0x7f) << 16)
+#define PHYSETUP_HSFREQRANGE_MASK (0x7f << 16)
+#define PHYSETUP_CFGCLKFREQRANGE(x) (((x) & 0x3f) << 8)
+#define PHYSETUP_SHUTDOWNZ (1 << 1)
+#define PHYSETUP_RSTZ (1 << 0)
+
+#define CLOCKSET1 0x101c
+#define CLOCKSET1_LOCK_PHY (1 << 17)
+#define CLOCKSET1_LOCK (1 << 16)
+#define CLOCKSET1_CLKSEL (1 << 8)
+#define CLOCKSET1_CLKINSEL_EXTAL (0 << 2)
+#define CLOCKSET1_CLKINSEL_DIG (1 << 2)
+#define CLOCKSET1_CLKINSEL_DU (1 << 3)
+#define CLOCKSET1_SHADOW_CLEAR (1 << 1)
+#define CLOCKSET1_UPDATEPLL (1 << 0)
+
+#define CLOCKSET2 0x1020
+#define CLOCKSET2_M(x) (((x) & 0xfff) << 16)
+#define CLOCKSET2_VCO_CNTRL(x) (((x) & 0x3f) << 8)
+#define CLOCKSET2_N(x) (((x) & 0xf) << 0)
+
+#define CLOCKSET3 0x1024
+#define CLOCKSET3_PROP_CNTRL(x) (((x) & 0x3f) << 24)
+#define CLOCKSET3_INT_CNTRL(x) (((x) & 0x3f) << 16)
+#define CLOCKSET3_CPBIAS_CNTRL(x) (((x) & 0x7f) << 8)
+#define CLOCKSET3_GMP_CNTRL(x) (((x) & 0x3) << 0)
+
+#define PHTW 0x1034
+#define PHTW_DWEN (1 << 24)
+#define PHTW_TESTDIN_DATA(x) (((x) & 0xff) << 16)
+#define PHTW_CWEN (1 << 8)
+#define PHTW_TESTDIN_CODE(x) (((x) & 0xff) << 0)
+
+#define PHTC 0x103c
+#define PHTC_TESTCLR (1 << 0)
+
+#endif /* __RCAR_MIPI_DSI_REGS_H__ */
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index 17a9e7eb2130..1a56f696558c 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -5,7 +5,6 @@
rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \
rockchip_drm_gem.o rockchip_drm_vop.o rockchip_vop_reg.o
-rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o
rockchipdrm-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp-core.o cdn-dp-reg.o
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index a9acbcc420d0..4ed7a6868197 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -267,6 +267,8 @@ struct dw_mipi_dsi_rockchip {
struct dw_mipi_dsi *dmd;
const struct rockchip_dw_dsi_chip_data *cdata;
struct dw_mipi_dsi_plat_data pdata;
+
+ bool dsi_bound;
};
struct dphy_pll_parameter_map {
@@ -772,10 +774,6 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
if (mux < 0)
return;
- pm_runtime_get_sync(dsi->dev);
- if (dsi->slave)
- pm_runtime_get_sync(dsi->slave->dev);
-
/*
* For the RK3399, the clk of grf must be enabled before writing grf
* register. And for RK3288 or other soc, this grf_clk must be NULL,
@@ -794,20 +792,10 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
clk_disable_unprepare(dsi->grf_clk);
}
-static void dw_mipi_dsi_encoder_disable(struct drm_encoder *encoder)
-{
- struct dw_mipi_dsi_rockchip *dsi = to_dsi(encoder);
-
- if (dsi->slave)
- pm_runtime_put(dsi->slave->dev);
- pm_runtime_put(dsi->dev);
-}
-
static const struct drm_encoder_helper_funcs
dw_mipi_dsi_encoder_helper_funcs = {
.atomic_check = dw_mipi_dsi_encoder_atomic_check,
.enable = dw_mipi_dsi_encoder_enable,
- .disable = dw_mipi_dsi_encoder_disable,
};
static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi,
@@ -937,10 +925,14 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev,
put_device(second);
}
+ pm_runtime_get_sync(dsi->dev);
+ if (dsi->slave)
+ pm_runtime_get_sync(dsi->slave->dev);
+
ret = clk_prepare_enable(dsi->pllref_clk);
if (ret) {
DRM_DEV_ERROR(dev, "Failed to enable pllref_clk: %d\n", ret);
- return ret;
+ goto out_pm_runtime;
}
/*
@@ -952,7 +944,7 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev,
ret = clk_prepare_enable(dsi->grf_clk);
if (ret) {
DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
- return ret;
+ goto out_pll_clk;
}
dw_mipi_dsi_rockchip_config(dsi);
@@ -964,16 +956,27 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev,
ret = rockchip_dsi_drm_create_encoder(dsi, drm_dev);
if (ret) {
DRM_DEV_ERROR(dev, "Failed to create drm encoder\n");
- return ret;
+ goto out_pll_clk;
}
ret = dw_mipi_dsi_bind(dsi->dmd, &dsi->encoder);
if (ret) {
DRM_DEV_ERROR(dev, "Failed to bind: %d\n", ret);
- return ret;
+ goto out_pll_clk;
}
+ dsi->dsi_bound = true;
+
return 0;
+
+out_pll_clk:
+ clk_disable_unprepare(dsi->pllref_clk);
+out_pm_runtime:
+ pm_runtime_put(dsi->dev);
+ if (dsi->slave)
+ pm_runtime_put(dsi->slave->dev);
+
+ return ret;
}
static void dw_mipi_dsi_rockchip_unbind(struct device *dev,
@@ -985,9 +988,15 @@ static void dw_mipi_dsi_rockchip_unbind(struct device *dev,
if (dsi->is_slave)
return;
+ dsi->dsi_bound = false;
+
dw_mipi_dsi_unbind(dsi->dmd);
clk_disable_unprepare(dsi->pllref_clk);
+
+ pm_runtime_put(dsi->dev);
+ if (dsi->slave)
+ pm_runtime_put(dsi->slave->dev);
}
static const struct component_ops dw_mipi_dsi_rockchip_ops = {
@@ -1275,6 +1284,36 @@ static const struct phy_ops dw_mipi_dsi_dphy_ops = {
.exit = dw_mipi_dsi_dphy_exit,
};
+static int __maybe_unused dw_mipi_dsi_rockchip_resume(struct device *dev)
+{
+ struct dw_mipi_dsi_rockchip *dsi = dev_get_drvdata(dev);
+ int ret;
+
+ /*
+ * Re-configure DSI state, if we were previously initialized. We need
+ * to do this before rockchip_drm_drv tries to re-enable() any panels.
+ */
+ if (dsi->dsi_bound) {
+ ret = clk_prepare_enable(dsi->grf_clk);
+ if (ret) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
+ return ret;
+ }
+
+ dw_mipi_dsi_rockchip_config(dsi);
+ if (dsi->slave)
+ dw_mipi_dsi_rockchip_config(dsi->slave);
+
+ clk_disable_unprepare(dsi->grf_clk);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops dw_mipi_dsi_rockchip_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, dw_mipi_dsi_rockchip_resume)
+};
+
static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1396,14 +1435,10 @@ static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev)
if (ret != -EPROBE_DEFER)
DRM_DEV_ERROR(dev,
"Failed to probe dw_mipi_dsi: %d\n", ret);
- goto err_clkdisable;
+ return ret;
}
return 0;
-
-err_clkdisable:
- clk_disable_unprepare(dsi->pllref_clk);
- return ret;
}
static int dw_mipi_dsi_rockchip_remove(struct platform_device *pdev)
@@ -1592,6 +1627,7 @@ struct platform_driver dw_mipi_dsi_rockchip_driver = {
.remove = dw_mipi_dsi_rockchip_remove,
.driver = {
.of_match_table = dw_mipi_dsi_rockchip_dt_ids,
+ .pm = &dw_mipi_dsi_rockchip_pm_ops,
.name = "dw-mipi-dsi-rockchip",
},
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index e4ebe60b3cc1..bec207de4544 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -26,7 +26,6 @@
#include "rockchip_drm_drv.h"
#include "rockchip_drm_fb.h"
-#include "rockchip_drm_fbdev.h"
#include "rockchip_drm_gem.h"
#define DRIVER_NAME "rockchip"
@@ -159,10 +158,6 @@ static int rockchip_drm_bind(struct device *dev)
drm_mode_config_reset(drm_dev);
- ret = rockchip_drm_fbdev_init(drm_dev);
- if (ret)
- goto err_unbind_all;
-
/* init kms poll for handling hpd */
drm_kms_helper_poll_init(drm_dev);
@@ -170,10 +165,11 @@ static int rockchip_drm_bind(struct device *dev)
if (ret)
goto err_kms_helper_poll_fini;
+ drm_fbdev_generic_setup(drm_dev, 0);
+
return 0;
err_kms_helper_poll_fini:
drm_kms_helper_poll_fini(drm_dev);
- rockchip_drm_fbdev_fini(drm_dev);
err_unbind_all:
component_unbind_all(dev, drm_dev);
err_iommu_cleanup:
@@ -189,7 +185,6 @@ static void rockchip_drm_unbind(struct device *dev)
drm_dev_unregister(drm_dev);
- rockchip_drm_fbdev_fini(drm_dev);
drm_kms_helper_poll_fini(drm_dev);
drm_atomic_helper_shutdown(drm_dev);
@@ -199,25 +194,15 @@ static void rockchip_drm_unbind(struct device *dev)
drm_dev_put(drm_dev);
}
-static const struct file_operations rockchip_drm_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .mmap = rockchip_gem_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .unlocked_ioctl = drm_ioctl,
- .compat_ioctl = drm_compat_ioctl,
- .release = drm_release,
-};
+DEFINE_DRM_GEM_FOPS(rockchip_drm_driver_fops);
static const struct drm_driver rockchip_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
- .lastclose = drm_fb_helper_lastclose,
.dumb_create = rockchip_gem_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = rockchip_gem_prime_import_sg_table,
- .gem_prime_mmap = rockchip_gem_mmap_buf,
+ .gem_prime_mmap = drm_gem_prime_mmap,
.fops = &rockchip_drm_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index aa0909e8edf9..143a48330f84 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -43,8 +43,6 @@ struct rockchip_crtc_state {
* @mm_lock: protect drm_mm on multi-threads.
*/
struct rockchip_drm_private {
- struct drm_fb_helper fbdev_helper;
- struct drm_gem_object *fbdev_bo;
struct iommu_domain *domain;
struct mutex mm_lock;
struct drm_mm mm;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
deleted file mode 100644
index 2fdc455c4ad7..000000000000
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ /dev/null
@@ -1,163 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
- * Author:Mark Yao <mark.yao@rock-chips.com>
- */
-
-#include <drm/drm.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_probe_helper.h>
-
-#include "rockchip_drm_drv.h"
-#include "rockchip_drm_gem.h"
-#include "rockchip_drm_fb.h"
-#include "rockchip_drm_fbdev.h"
-
-#define PREFERRED_BPP 32
-#define to_drm_private(x) \
- container_of(x, struct rockchip_drm_private, fbdev_helper)
-
-static int rockchip_fbdev_mmap(struct fb_info *info,
- struct vm_area_struct *vma)
-{
- struct drm_fb_helper *helper = info->par;
- struct rockchip_drm_private *private = to_drm_private(helper);
-
- return rockchip_gem_mmap_buf(private->fbdev_bo, vma);
-}
-
-static const struct fb_ops rockchip_drm_fbdev_ops = {
- .owner = THIS_MODULE,
- DRM_FB_HELPER_DEFAULT_OPS,
- .fb_mmap = rockchip_fbdev_mmap,
- .fb_fillrect = drm_fb_helper_cfb_fillrect,
- .fb_copyarea = drm_fb_helper_cfb_copyarea,
- .fb_imageblit = drm_fb_helper_cfb_imageblit,
-};
-
-static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct rockchip_drm_private *private = to_drm_private(helper);
- struct drm_mode_fb_cmd2 mode_cmd = { 0 };
- struct drm_device *dev = helper->dev;
- struct rockchip_gem_object *rk_obj;
- struct drm_framebuffer *fb;
- unsigned int bytes_per_pixel;
- unsigned long offset;
- struct fb_info *fbi;
- size_t size;
- int ret;
-
- bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
-
- size = mode_cmd.pitches[0] * mode_cmd.height;
-
- rk_obj = rockchip_gem_create_object(dev, size, true);
- if (IS_ERR(rk_obj))
- return -ENOMEM;
-
- private->fbdev_bo = &rk_obj->base;
-
- fbi = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(fbi)) {
- DRM_DEV_ERROR(dev->dev, "Failed to create framebuffer info.\n");
- ret = PTR_ERR(fbi);
- goto out;
- }
-
- helper->fb = rockchip_drm_framebuffer_init(dev, &mode_cmd,
- private->fbdev_bo);
- if (IS_ERR(helper->fb)) {
- DRM_DEV_ERROR(dev->dev,
- "Failed to allocate DRM framebuffer.\n");
- ret = PTR_ERR(helper->fb);
- goto out;
- }
-
- fbi->fbops = &rockchip_drm_fbdev_ops;
-
- fb = helper->fb;
- drm_fb_helper_fill_info(fbi, helper, sizes);
-
- offset = fbi->var.xoffset * bytes_per_pixel;
- offset += fbi->var.yoffset * fb->pitches[0];
-
- dev->mode_config.fb_base = 0;
- fbi->screen_base = rk_obj->kvaddr + offset;
- fbi->screen_size = rk_obj->base.size;
- fbi->fix.smem_len = rk_obj->base.size;
-
- DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%zu\n",
- fb->width, fb->height, fb->format->depth,
- rk_obj->kvaddr,
- offset, size);
-
- return 0;
-
-out:
- rockchip_gem_free_object(&rk_obj->base);
- return ret;
-}
-
-static const struct drm_fb_helper_funcs rockchip_drm_fb_helper_funcs = {
- .fb_probe = rockchip_drm_fbdev_create,
-};
-
-int rockchip_drm_fbdev_init(struct drm_device *dev)
-{
- struct rockchip_drm_private *private = dev->dev_private;
- struct drm_fb_helper *helper;
- int ret;
-
- if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
- return -EINVAL;
-
- helper = &private->fbdev_helper;
-
- drm_fb_helper_prepare(dev, helper, &rockchip_drm_fb_helper_funcs);
-
- ret = drm_fb_helper_init(dev, helper);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev,
- "Failed to initialize drm fb helper - %d.\n",
- ret);
- return ret;
- }
-
- ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev,
- "Failed to set initial hw config - %d.\n",
- ret);
- goto err_drm_fb_helper_fini;
- }
-
- return 0;
-
-err_drm_fb_helper_fini:
- drm_fb_helper_fini(helper);
- return ret;
-}
-
-void rockchip_drm_fbdev_fini(struct drm_device *dev)
-{
- struct rockchip_drm_private *private = dev->dev_private;
- struct drm_fb_helper *helper;
-
- helper = &private->fbdev_helper;
-
- drm_fb_helper_unregister_fbi(helper);
-
- if (helper->fb)
- drm_framebuffer_put(helper->fb);
-
- drm_fb_helper_fini(helper);
-}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
deleted file mode 100644
index 5fb7ac2371a8..000000000000
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
- * Author:Mark Yao <mark.yao@rock-chips.com>
- */
-
-#ifndef _ROCKCHIP_DRM_FBDEV_H
-#define _ROCKCHIP_DRM_FBDEV_H
-
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-int rockchip_drm_fbdev_init(struct drm_device *dev);
-void rockchip_drm_fbdev_fini(struct drm_device *dev);
-#else
-static inline int rockchip_drm_fbdev_init(struct drm_device *dev)
-{
- return 0;
-}
-
-static inline void rockchip_drm_fbdev_fini(struct drm_device *dev)
-{
-}
-#endif
-
-#endif /* _ROCKCHIP_DRM_FBDEV_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 7971f57436dd..63eb73b624aa 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -241,11 +241,21 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
/*
+ * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
+ * whole buffer from the start.
+ */
+ vma->vm_pgoff = 0;
+
+ /*
* We allocated a struct page table for rk_obj, so clear
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
*/
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
if (rk_obj->pages)
ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
else
@@ -257,39 +267,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
return ret;
}
-int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
-{
- int ret;
-
- ret = drm_gem_mmap_obj(obj, obj->size, vma);
- if (ret)
- return ret;
-
- return rockchip_drm_gem_object_mmap(obj, vma);
-}
-
-/* drm driver mmap file operations */
-int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_gem_object *obj;
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
-
- /*
- * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
- * whole buffer from the start.
- */
- vma->vm_pgoff = 0;
-
- obj = vma->vm_private_data;
-
- return rockchip_drm_gem_object_mmap(obj, vma);
-}
-
static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
{
drm_gem_object_release(&rk_obj->base);
@@ -301,6 +278,7 @@ static const struct drm_gem_object_funcs rockchip_gem_object_funcs = {
.get_sg_table = rockchip_gem_prime_get_sg_table,
.vmap = rockchip_gem_prime_vmap,
.vunmap = rockchip_gem_prime_vunmap,
+ .mmap = rockchip_drm_gem_object_mmap,
.vm_ops = &drm_gem_cma_vm_ops,
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
index 5a70a56cd406..47c1861eece0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
@@ -34,13 +34,6 @@ rockchip_gem_prime_import_sg_table(struct drm_device *dev,
int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map);
-/* drm driver mmap file operations */
-int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-
-/* mmap a gem object to userspace. */
-int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
-
struct rockchip_gem_object *
rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
bool alloc_kmap);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index a25b98b7f5bd..3e8d9e2d1b67 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -726,7 +726,9 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
spin_unlock(&vop->reg_lock);
- wait_for_completion(&vop->dsp_hold_completion);
+ if (!wait_for_completion_timeout(&vop->dsp_hold_completion,
+ msecs_to_jiffies(200)))
+ WARN(1, "%s: timed out waiting for DSP hold", crtc->name);
vop_dsp_hold_valid_irq_disable(vop);
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 27e1573af96e..191c56064f19 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -190,6 +190,16 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
}
EXPORT_SYMBOL(drm_sched_entity_flush);
+static void drm_sched_entity_kill_jobs_irq_work(struct irq_work *wrk)
+{
+ struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
+
+ drm_sched_fence_finished(job->s_fence);
+ WARN_ON(job->s_fence->parent);
+ job->sched->ops->free_job(job);
+}
+
+
/* Signal the scheduler finished fence when the entity in question is killed. */
static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
struct dma_fence_cb *cb)
@@ -197,9 +207,8 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
finish_cb);
- drm_sched_fence_finished(job->s_fence);
- WARN_ON(job->s_fence->parent);
- job->sched->ops->free_job(job);
+ init_irq_work(&job->work, drm_sched_entity_kill_jobs_irq_work);
+ irq_work_queue(&job->work);
}
static struct dma_fence *
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index e2a6c82c8252..288b838a904a 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -5,7 +5,6 @@ config DRM_SHMOBILE
depends on ARCH_SHMOBILE || COMPILE_TEST
select BACKLIGHT_CLASS_DEVICE
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
help
Choose this option if you have an SH Mobile chipset.
diff --git a/drivers/gpu/drm/sprd/Kconfig b/drivers/gpu/drm/sprd/Kconfig
new file mode 100644
index 000000000000..3edeaeca0e65
--- /dev/null
+++ b/drivers/gpu/drm/sprd/Kconfig
@@ -0,0 +1,13 @@
+config DRM_SPRD
+ tristate "DRM Support for Unisoc SoCs Platform"
+ depends on ARCH_SPRD || COMPILE_TEST
+ depends on DRM && OF
+ select DRM_GEM_CMA_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select VIDEOMODE_HELPERS
+ help
+ Choose this option if you have a Unisoc chipset.
+ If M is selected the module will be called sprd_drm.
+
diff --git a/drivers/gpu/drm/sprd/Makefile b/drivers/gpu/drm/sprd/Makefile
new file mode 100644
index 000000000000..e82e6a6f89de
--- /dev/null
+++ b/drivers/gpu/drm/sprd/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+
+sprd-drm-y := sprd_drm.o \
+ sprd_dpu.o \
+ sprd_dsi.o \
+ megacores_pll.o
+
+obj-$(CONFIG_DRM_SPRD) += sprd-drm.o \ No newline at end of file
diff --git a/drivers/gpu/drm/sprd/megacores_pll.c b/drivers/gpu/drm/sprd/megacores_pll.c
new file mode 100644
index 000000000000..3091dfdc11e3
--- /dev/null
+++ b/drivers/gpu/drm/sprd/megacores_pll.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Unisoc Inc.
+ */
+
+#include <asm/div64.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+
+#include "sprd_dsi.h"
+
+#define L 0
+#define H 1
+#define CLK 0
+#define DATA 1
+#define INFINITY 0xffffffff
+#define MIN_OUTPUT_FREQ (100)
+
+#define AVERAGE(a, b) (min(a, b) + abs((b) - (a)) / 2)
+
+/* sharkle */
+#define VCO_BAND_LOW 750
+#define VCO_BAND_MID 1100
+#define VCO_BAND_HIGH 1500
+#define PHY_REF_CLK 26000
+
+static int dphy_calc_pll_param(struct dphy_pll *pll)
+{
+ const u32 khz = 1000;
+ const u32 mhz = 1000000;
+ const unsigned long long factor = 100;
+ unsigned long long tmp;
+ int i;
+
+ pll->potential_fvco = pll->freq / khz;
+ pll->ref_clk = PHY_REF_CLK / khz;
+
+ for (i = 0; i < 4; ++i) {
+ if (pll->potential_fvco >= VCO_BAND_LOW &&
+ pll->potential_fvco <= VCO_BAND_HIGH) {
+ pll->fvco = pll->potential_fvco;
+ pll->out_sel = BIT(i);
+ break;
+ }
+ pll->potential_fvco <<= 1;
+ }
+ if (pll->fvco == 0)
+ return -EINVAL;
+
+ if (pll->fvco >= VCO_BAND_LOW && pll->fvco <= VCO_BAND_MID) {
+ /* vco band control */
+ pll->vco_band = 0x0;
+ /* low pass filter control */
+ pll->lpf_sel = 1;
+ } else if (pll->fvco > VCO_BAND_MID && pll->fvco <= VCO_BAND_HIGH) {
+ pll->vco_band = 0x1;
+ pll->lpf_sel = 0;
+ } else {
+ return -EINVAL;
+ }
+
+ pll->nint = pll->fvco / pll->ref_clk;
+ tmp = pll->fvco * factor * mhz;
+ do_div(tmp, pll->ref_clk);
+ tmp = tmp - pll->nint * factor * mhz;
+ tmp *= BIT(20);
+ do_div(tmp, 100000000);
+ pll->kint = (u32)tmp;
+ pll->refin = 3; /* pre-divider bypass */
+ pll->sdm_en = true; /* use fraction N PLL */
+ pll->fdk_s = 0x1; /* fraction */
+ pll->cp_s = 0x0;
+ pll->det_delay = 0x1;
+
+ return 0;
+}
+
+static void dphy_set_pll_reg(struct dphy_pll *pll, struct regmap *regmap)
+{
+ u8 reg_val[9] = {0};
+ int i;
+
+ u8 reg_addr[] = {
+ 0x03, 0x04, 0x06, 0x08, 0x09,
+ 0x0a, 0x0b, 0x0e, 0x0f
+ };
+
+ reg_val[0] = 1 | (1 << 1) | (pll->lpf_sel << 2);
+ reg_val[1] = pll->div | (1 << 3) | (pll->cp_s << 5) | (pll->fdk_s << 7);
+ reg_val[2] = pll->nint;
+ reg_val[3] = pll->vco_band | (pll->sdm_en << 1) | (pll->refin << 2);
+ reg_val[4] = pll->kint >> 12;
+ reg_val[5] = pll->kint >> 4;
+ reg_val[6] = pll->out_sel | ((pll->kint << 4) & 0xf);
+ reg_val[7] = 1 << 4;
+ reg_val[8] = pll->det_delay;
+
+ for (i = 0; i < sizeof(reg_addr); ++i) {
+ regmap_write(regmap, reg_addr[i], reg_val[i]);
+ DRM_DEBUG("%02x: %02x\n", reg_addr[i], reg_val[i]);
+ }
+}
+
+int dphy_pll_config(struct dsi_context *ctx)
+{
+ struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx);
+ struct regmap *regmap = ctx->regmap;
+ struct dphy_pll *pll = &ctx->pll;
+ int ret;
+
+ pll->freq = dsi->slave->hs_rate;
+
+ /* FREQ = 26M * (NINT + KINT / 2^20) / out_sel */
+ ret = dphy_calc_pll_param(pll);
+ if (ret) {
+ drm_err(dsi->drm, "failed to calculate dphy pll parameters\n");
+ return ret;
+ }
+ dphy_set_pll_reg(pll, regmap);
+
+ return 0;
+}
+
+static void dphy_set_timing_reg(struct regmap *regmap, int type, u8 val[])
+{
+ switch (type) {
+ case REQUEST_TIME:
+ regmap_write(regmap, 0x31, val[CLK]);
+ regmap_write(regmap, 0x41, val[DATA]);
+ regmap_write(regmap, 0x51, val[DATA]);
+ regmap_write(regmap, 0x61, val[DATA]);
+ regmap_write(regmap, 0x71, val[DATA]);
+
+ regmap_write(regmap, 0x90, val[CLK]);
+ regmap_write(regmap, 0xa0, val[DATA]);
+ regmap_write(regmap, 0xb0, val[DATA]);
+ regmap_write(regmap, 0xc0, val[DATA]);
+ regmap_write(regmap, 0xd0, val[DATA]);
+ break;
+ case PREPARE_TIME:
+ regmap_write(regmap, 0x32, val[CLK]);
+ regmap_write(regmap, 0x42, val[DATA]);
+ regmap_write(regmap, 0x52, val[DATA]);
+ regmap_write(regmap, 0x62, val[DATA]);
+ regmap_write(regmap, 0x72, val[DATA]);
+
+ regmap_write(regmap, 0x91, val[CLK]);
+ regmap_write(regmap, 0xa1, val[DATA]);
+ regmap_write(regmap, 0xb1, val[DATA]);
+ regmap_write(regmap, 0xc1, val[DATA]);
+ regmap_write(regmap, 0xd1, val[DATA]);
+ break;
+ case ZERO_TIME:
+ regmap_write(regmap, 0x33, val[CLK]);
+ regmap_write(regmap, 0x43, val[DATA]);
+ regmap_write(regmap, 0x53, val[DATA]);
+ regmap_write(regmap, 0x63, val[DATA]);
+ regmap_write(regmap, 0x73, val[DATA]);
+
+ regmap_write(regmap, 0x92, val[CLK]);
+ regmap_write(regmap, 0xa2, val[DATA]);
+ regmap_write(regmap, 0xb2, val[DATA]);
+ regmap_write(regmap, 0xc2, val[DATA]);
+ regmap_write(regmap, 0xd2, val[DATA]);
+ break;
+ case TRAIL_TIME:
+ regmap_write(regmap, 0x34, val[CLK]);
+ regmap_write(regmap, 0x44, val[DATA]);
+ regmap_write(regmap, 0x54, val[DATA]);
+ regmap_write(regmap, 0x64, val[DATA]);
+ regmap_write(regmap, 0x74, val[DATA]);
+
+ regmap_write(regmap, 0x93, val[CLK]);
+ regmap_write(regmap, 0xa3, val[DATA]);
+ regmap_write(regmap, 0xb3, val[DATA]);
+ regmap_write(regmap, 0xc3, val[DATA]);
+ regmap_write(regmap, 0xd3, val[DATA]);
+ break;
+ case EXIT_TIME:
+ regmap_write(regmap, 0x36, val[CLK]);
+ regmap_write(regmap, 0x46, val[DATA]);
+ regmap_write(regmap, 0x56, val[DATA]);
+ regmap_write(regmap, 0x66, val[DATA]);
+ regmap_write(regmap, 0x76, val[DATA]);
+
+ regmap_write(regmap, 0x95, val[CLK]);
+ regmap_write(regmap, 0xA5, val[DATA]);
+ regmap_write(regmap, 0xB5, val[DATA]);
+ regmap_write(regmap, 0xc5, val[DATA]);
+ regmap_write(regmap, 0xd5, val[DATA]);
+ break;
+ case CLKPOST_TIME:
+ regmap_write(regmap, 0x35, val[CLK]);
+ regmap_write(regmap, 0x94, val[CLK]);
+ break;
+
+ /* the following just use default value */
+ case SETTLE_TIME:
+ fallthrough;
+ case TA_GET:
+ fallthrough;
+ case TA_GO:
+ fallthrough;
+ case TA_SURE:
+ fallthrough;
+ default:
+ break;
+ }
+}
+
+void dphy_timing_config(struct dsi_context *ctx)
+{
+ struct regmap *regmap = ctx->regmap;
+ struct dphy_pll *pll = &ctx->pll;
+ const u32 factor = 2;
+ const u32 scale = 100;
+ u32 t_ui, t_byteck, t_half_byteck;
+ u32 range[2], constant;
+ u8 val[2];
+ u32 tmp = 0;
+
+ /* t_ui: 1 ui, byteck: 8 ui, half byteck: 4 ui */
+ t_ui = 1000 * scale / (pll->freq / 1000);
+ t_byteck = t_ui << 3;
+ t_half_byteck = t_ui << 2;
+ constant = t_ui << 1;
+
+ /* REQUEST_TIME: HS T-LPX: LP-01
+ * For T-LPX, mipi spec defined min value is 50ns,
+ * but maybe it shouldn't be too small, because BTA,
+ * LP-10, LP-00, LP-01, all of this is related to T-LPX.
+ */
+ range[L] = 50 * scale;
+ range[H] = INFINITY;
+ val[CLK] = DIV_ROUND_UP(range[L] * (factor << 1), t_byteck) - 2;
+ val[DATA] = val[CLK];
+ dphy_set_timing_reg(regmap, REQUEST_TIME, val);
+
+ /* PREPARE_TIME: HS sequence: LP-00 */
+ range[L] = 38 * scale;
+ range[H] = 95 * scale;
+ tmp = AVERAGE(range[L], range[H]);
+ val[CLK] = DIV_ROUND_UP(AVERAGE(range[L], range[H]), t_half_byteck) - 1;
+ range[L] = 40 * scale + 4 * t_ui;
+ range[H] = 85 * scale + 6 * t_ui;
+ tmp |= AVERAGE(range[L], range[H]) << 16;
+ val[DATA] = DIV_ROUND_UP(AVERAGE(range[L], range[H]), t_half_byteck) - 1;
+ dphy_set_timing_reg(regmap, PREPARE_TIME, val);
+
+ /* ZERO_TIME: HS-ZERO */
+ range[L] = 300 * scale;
+ range[H] = INFINITY;
+ val[CLK] = DIV_ROUND_UP(range[L] * factor + (tmp & 0xffff)
+ - 525 * t_byteck / 100, t_byteck) - 2;
+ range[L] = 145 * scale + 10 * t_ui;
+ val[DATA] = DIV_ROUND_UP(range[L] * factor
+ + ((tmp >> 16) & 0xffff) - 525 * t_byteck / 100,
+ t_byteck) - 2;
+ dphy_set_timing_reg(regmap, ZERO_TIME, val);
+
+ /* TRAIL_TIME: HS-TRAIL */
+ range[L] = 60 * scale;
+ range[H] = INFINITY;
+ val[CLK] = DIV_ROUND_UP(range[L] * factor - constant, t_half_byteck);
+ range[L] = max(8 * t_ui, 60 * scale + 4 * t_ui);
+ val[DATA] = DIV_ROUND_UP(range[L] * 3 / 2 - constant, t_half_byteck) - 2;
+ dphy_set_timing_reg(regmap, TRAIL_TIME, val);
+
+ /* EXIT_TIME: */
+ range[L] = 100 * scale;
+ range[H] = INFINITY;
+ val[CLK] = DIV_ROUND_UP(range[L] * factor, t_byteck) - 2;
+ val[DATA] = val[CLK];
+ dphy_set_timing_reg(regmap, EXIT_TIME, val);
+
+ /* CLKPOST_TIME: */
+ range[L] = 60 * scale + 52 * t_ui;
+ range[H] = INFINITY;
+ val[CLK] = DIV_ROUND_UP(range[L] * factor, t_byteck) - 2;
+ val[DATA] = val[CLK];
+ dphy_set_timing_reg(regmap, CLKPOST_TIME, val);
+
+ /* SETTLE_TIME:
+ * This time is used for receiver. So for transmitter,
+ * it can be ignored.
+ */
+
+ /* TA_GO:
+ * transmitter drives bridge state(LP-00) before releasing control,
+ * reg 0x1f default value: 0x04, which is good.
+ */
+
+ /* TA_SURE:
+ * After LP-10 state and before bridge state(LP-00),
+ * reg 0x20 default value: 0x01, which is good.
+ */
+
+ /* TA_GET:
+ * receiver drives Bridge state(LP-00) before releasing control
+ * reg 0x21 default value: 0x03, which is good.
+ */
+}
diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c
new file mode 100644
index 000000000000..06a3414ee43a
--- /dev/null
+++ b/drivers/gpu/drm/sprd/sprd_dpu.c
@@ -0,0 +1,880 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Unisoc Inc.
+ */
+
+#include <linux/component.h>
+#include <linux/delay.h>
+#include <linux/dma-buf.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/of_irq.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "sprd_drm.h"
+#include "sprd_dpu.h"
+#include "sprd_dsi.h"
+
+/* Global control registers */
+#define REG_DPU_CTRL 0x04
+#define REG_DPU_CFG0 0x08
+#define REG_PANEL_SIZE 0x20
+#define REG_BLEND_SIZE 0x24
+#define REG_BG_COLOR 0x2C
+
+/* Layer0 control registers */
+#define REG_LAY_BASE_ADDR0 0x30
+#define REG_LAY_BASE_ADDR1 0x34
+#define REG_LAY_BASE_ADDR2 0x38
+#define REG_LAY_CTRL 0x40
+#define REG_LAY_SIZE 0x44
+#define REG_LAY_PITCH 0x48
+#define REG_LAY_POS 0x4C
+#define REG_LAY_ALPHA 0x50
+#define REG_LAY_CROP_START 0x5C
+
+/* Interrupt control registers */
+#define REG_DPU_INT_EN 0x1E0
+#define REG_DPU_INT_CLR 0x1E4
+#define REG_DPU_INT_STS 0x1E8
+
+/* DPI control registers */
+#define REG_DPI_CTRL 0x1F0
+#define REG_DPI_H_TIMING 0x1F4
+#define REG_DPI_V_TIMING 0x1F8
+
+/* MMU control registers */
+#define REG_MMU_EN 0x800
+#define REG_MMU_VPN_RANGE 0x80C
+#define REG_MMU_PPN1 0x83C
+#define REG_MMU_RANGE1 0x840
+#define REG_MMU_PPN2 0x844
+#define REG_MMU_RANGE2 0x848
+
+/* Global control bits */
+#define BIT_DPU_RUN BIT(0)
+#define BIT_DPU_STOP BIT(1)
+#define BIT_DPU_REG_UPDATE BIT(2)
+#define BIT_DPU_IF_EDPI BIT(0)
+
+/* Layer control bits */
+#define BIT_DPU_LAY_EN BIT(0)
+#define BIT_DPU_LAY_LAYER_ALPHA (0x01 << 2)
+#define BIT_DPU_LAY_COMBO_ALPHA (0x02 << 2)
+#define BIT_DPU_LAY_FORMAT_YUV422_2PLANE (0x00 << 4)
+#define BIT_DPU_LAY_FORMAT_YUV420_2PLANE (0x01 << 4)
+#define BIT_DPU_LAY_FORMAT_YUV420_3PLANE (0x02 << 4)
+#define BIT_DPU_LAY_FORMAT_ARGB8888 (0x03 << 4)
+#define BIT_DPU_LAY_FORMAT_RGB565 (0x04 << 4)
+#define BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3 (0x00 << 8)
+#define BIT_DPU_LAY_DATA_ENDIAN_B3B2B1B0 (0x01 << 8)
+#define BIT_DPU_LAY_NO_SWITCH (0x00 << 10)
+#define BIT_DPU_LAY_RB_OR_UV_SWITCH (0x01 << 10)
+#define BIT_DPU_LAY_MODE_BLEND_NORMAL (0x00 << 16)
+#define BIT_DPU_LAY_MODE_BLEND_PREMULT (0x01 << 16)
+#define BIT_DPU_LAY_ROTATION_0 (0x00 << 20)
+#define BIT_DPU_LAY_ROTATION_90 (0x01 << 20)
+#define BIT_DPU_LAY_ROTATION_180 (0x02 << 20)
+#define BIT_DPU_LAY_ROTATION_270 (0x03 << 20)
+#define BIT_DPU_LAY_ROTATION_0_M (0x04 << 20)
+#define BIT_DPU_LAY_ROTATION_90_M (0x05 << 20)
+#define BIT_DPU_LAY_ROTATION_180_M (0x06 << 20)
+#define BIT_DPU_LAY_ROTATION_270_M (0x07 << 20)
+
+/* Interrupt control & status bits */
+#define BIT_DPU_INT_DONE BIT(0)
+#define BIT_DPU_INT_TE BIT(1)
+#define BIT_DPU_INT_ERR BIT(2)
+#define BIT_DPU_INT_UPDATE_DONE BIT(4)
+#define BIT_DPU_INT_VSYNC BIT(5)
+
+/* DPI control bits */
+#define BIT_DPU_EDPI_TE_EN BIT(8)
+#define BIT_DPU_EDPI_FROM_EXTERNAL_PAD BIT(10)
+#define BIT_DPU_DPI_HALT_EN BIT(16)
+
+static const u32 layer_fmts[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YVU420,
+};
+
+struct sprd_plane {
+ struct drm_plane base;
+};
+
+static int dpu_wait_stop_done(struct sprd_dpu *dpu)
+{
+ struct dpu_context *ctx = &dpu->ctx;
+ int rc;
+
+ if (ctx->stopped)
+ return 0;
+
+ rc = wait_event_interruptible_timeout(ctx->wait_queue, ctx->evt_stop,
+ msecs_to_jiffies(500));
+ ctx->evt_stop = false;
+
+ ctx->stopped = true;
+
+ if (!rc) {
+ drm_err(dpu->drm, "dpu wait for stop done time out!\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int dpu_wait_update_done(struct sprd_dpu *dpu)
+{
+ struct dpu_context *ctx = &dpu->ctx;
+ int rc;
+
+ ctx->evt_update = false;
+
+ rc = wait_event_interruptible_timeout(ctx->wait_queue, ctx->evt_update,
+ msecs_to_jiffies(500));
+
+ if (!rc) {
+ drm_err(dpu->drm, "dpu wait for reg update done time out!\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static u32 drm_format_to_dpu(struct drm_framebuffer *fb)
+{
+ u32 format = 0;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_BGRA8888:
+ /* BGRA8888 -> ARGB8888 */
+ format |= BIT_DPU_LAY_DATA_ENDIAN_B3B2B1B0;
+ format |= BIT_DPU_LAY_FORMAT_ARGB8888;
+ break;
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_RGBA8888:
+ /* RGBA8888 -> ABGR8888 */
+ format |= BIT_DPU_LAY_DATA_ENDIAN_B3B2B1B0;
+ fallthrough;
+ case DRM_FORMAT_ABGR8888:
+ /* RB switch */
+ format |= BIT_DPU_LAY_RB_OR_UV_SWITCH;
+ fallthrough;
+ case DRM_FORMAT_ARGB8888:
+ format |= BIT_DPU_LAY_FORMAT_ARGB8888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ /* RB switch */
+ format |= BIT_DPU_LAY_RB_OR_UV_SWITCH;
+ fallthrough;
+ case DRM_FORMAT_XRGB8888:
+ format |= BIT_DPU_LAY_FORMAT_ARGB8888;
+ break;
+ case DRM_FORMAT_BGR565:
+ /* RB switch */
+ format |= BIT_DPU_LAY_RB_OR_UV_SWITCH;
+ fallthrough;
+ case DRM_FORMAT_RGB565:
+ format |= BIT_DPU_LAY_FORMAT_RGB565;
+ break;
+ case DRM_FORMAT_NV12:
+ /* 2-Lane: Yuv420 */
+ format |= BIT_DPU_LAY_FORMAT_YUV420_2PLANE;
+ /* Y endian */
+ format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3;
+ /* UV endian */
+ format |= BIT_DPU_LAY_NO_SWITCH;
+ break;
+ case DRM_FORMAT_NV21:
+ /* 2-Lane: Yuv420 */
+ format |= BIT_DPU_LAY_FORMAT_YUV420_2PLANE;
+ /* Y endian */
+ format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3;
+ /* UV endian */
+ format |= BIT_DPU_LAY_RB_OR_UV_SWITCH;
+ break;
+ case DRM_FORMAT_NV16:
+ /* 2-Lane: Yuv422 */
+ format |= BIT_DPU_LAY_FORMAT_YUV422_2PLANE;
+ /* Y endian */
+ format |= BIT_DPU_LAY_DATA_ENDIAN_B3B2B1B0;
+ /* UV endian */
+ format |= BIT_DPU_LAY_RB_OR_UV_SWITCH;
+ break;
+ case DRM_FORMAT_NV61:
+ /* 2-Lane: Yuv422 */
+ format |= BIT_DPU_LAY_FORMAT_YUV422_2PLANE;
+ /* Y endian */
+ format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3;
+ /* UV endian */
+ format |= BIT_DPU_LAY_NO_SWITCH;
+ break;
+ case DRM_FORMAT_YUV420:
+ format |= BIT_DPU_LAY_FORMAT_YUV420_3PLANE;
+ /* Y endian */
+ format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3;
+ /* UV endian */
+ format |= BIT_DPU_LAY_NO_SWITCH;
+ break;
+ case DRM_FORMAT_YVU420:
+ format |= BIT_DPU_LAY_FORMAT_YUV420_3PLANE;
+ /* Y endian */
+ format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3;
+ /* UV endian */
+ format |= BIT_DPU_LAY_RB_OR_UV_SWITCH;
+ break;
+ default:
+ break;
+ }
+
+ return format;
+}
+
+static u32 drm_rotation_to_dpu(struct drm_plane_state *state)
+{
+ u32 rotation = 0;
+
+ switch (state->rotation) {
+ default:
+ case DRM_MODE_ROTATE_0:
+ rotation = BIT_DPU_LAY_ROTATION_0;
+ break;
+ case DRM_MODE_ROTATE_90:
+ rotation = BIT_DPU_LAY_ROTATION_90;
+ break;
+ case DRM_MODE_ROTATE_180:
+ rotation = BIT_DPU_LAY_ROTATION_180;
+ break;
+ case DRM_MODE_ROTATE_270:
+ rotation = BIT_DPU_LAY_ROTATION_270;
+ break;
+ case DRM_MODE_REFLECT_Y:
+ rotation = BIT_DPU_LAY_ROTATION_180_M;
+ break;
+ case (DRM_MODE_REFLECT_Y | DRM_MODE_ROTATE_90):
+ rotation = BIT_DPU_LAY_ROTATION_90_M;
+ break;
+ case DRM_MODE_REFLECT_X:
+ rotation = BIT_DPU_LAY_ROTATION_0_M;
+ break;
+ case (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90):
+ rotation = BIT_DPU_LAY_ROTATION_270_M;
+ break;
+ }
+
+ return rotation;
+}
+
+static u32 drm_blend_to_dpu(struct drm_plane_state *state)
+{
+ u32 blend = 0;
+
+ switch (state->pixel_blend_mode) {
+ case DRM_MODE_BLEND_COVERAGE:
+ /* alpha mode select - combo alpha */
+ blend |= BIT_DPU_LAY_COMBO_ALPHA;
+ /* Normal mode */
+ blend |= BIT_DPU_LAY_MODE_BLEND_NORMAL;
+ break;
+ case DRM_MODE_BLEND_PREMULTI:
+ /* alpha mode select - combo alpha */
+ blend |= BIT_DPU_LAY_COMBO_ALPHA;
+ /* Pre-mult mode */
+ blend |= BIT_DPU_LAY_MODE_BLEND_PREMULT;
+ break;
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ default:
+ /* don't do blending, maybe RGBX */
+ /* alpha mode select - layer alpha */
+ blend |= BIT_DPU_LAY_LAYER_ALPHA;
+ break;
+ }
+
+ return blend;
+}
+
+static void sprd_dpu_layer(struct sprd_dpu *dpu, struct drm_plane_state *state)
+{
+ struct dpu_context *ctx = &dpu->ctx;
+ struct drm_gem_cma_object *cma_obj;
+ struct drm_framebuffer *fb = state->fb;
+ u32 addr, size, offset, pitch, blend, format, rotation;
+ u32 src_x = state->src_x >> 16;
+ u32 src_y = state->src_y >> 16;
+ u32 src_w = state->src_w >> 16;
+ u32 src_h = state->src_h >> 16;
+ u32 dst_x = state->crtc_x;
+ u32 dst_y = state->crtc_y;
+ u32 alpha = state->alpha;
+ u32 index = state->zpos;
+ int i;
+
+ offset = (dst_x & 0xffff) | (dst_y << 16);
+ size = (src_w & 0xffff) | (src_h << 16);
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ cma_obj = drm_fb_cma_get_gem_obj(fb, i);
+ addr = cma_obj->paddr + fb->offsets[i];
+
+ if (i == 0)
+ layer_reg_wr(ctx, REG_LAY_BASE_ADDR0, addr, index);
+ else if (i == 1)
+ layer_reg_wr(ctx, REG_LAY_BASE_ADDR1, addr, index);
+ else
+ layer_reg_wr(ctx, REG_LAY_BASE_ADDR2, addr, index);
+ }
+
+ if (fb->format->num_planes == 3) {
+ /* UV pitch is 1/2 of Y pitch */
+ pitch = (fb->pitches[0] / fb->format->cpp[0]) |
+ (fb->pitches[0] / fb->format->cpp[0] << 15);
+ } else {
+ pitch = fb->pitches[0] / fb->format->cpp[0];
+ }
+
+ layer_reg_wr(ctx, REG_LAY_POS, offset, index);
+ layer_reg_wr(ctx, REG_LAY_SIZE, size, index);
+ layer_reg_wr(ctx, REG_LAY_CROP_START,
+ src_y << 16 | src_x, index);
+ layer_reg_wr(ctx, REG_LAY_ALPHA, alpha, index);
+ layer_reg_wr(ctx, REG_LAY_PITCH, pitch, index);
+
+ format = drm_format_to_dpu(fb);
+ blend = drm_blend_to_dpu(state);
+ rotation = drm_rotation_to_dpu(state);
+
+ layer_reg_wr(ctx, REG_LAY_CTRL, BIT_DPU_LAY_EN |
+ format |
+ blend |
+ rotation,
+ index);
+}
+
+static void sprd_dpu_flip(struct sprd_dpu *dpu)
+{
+ struct dpu_context *ctx = &dpu->ctx;
+
+ /*
+ * Make sure the dpu is in stop status. DPU has no shadow
+ * registers in EDPI mode. So the config registers can only be
+ * updated in the rising edge of DPU_RUN bit.
+ */
+ if (ctx->if_type == SPRD_DPU_IF_EDPI)
+ dpu_wait_stop_done(dpu);
+
+ /* update trigger and wait */
+ if (ctx->if_type == SPRD_DPU_IF_DPI) {
+ if (!ctx->stopped) {
+ dpu_reg_set(ctx, REG_DPU_CTRL, BIT_DPU_REG_UPDATE);
+ dpu_wait_update_done(dpu);
+ }
+
+ dpu_reg_set(ctx, REG_DPU_INT_EN, BIT_DPU_INT_ERR);
+ } else if (ctx->if_type == SPRD_DPU_IF_EDPI) {
+ dpu_reg_set(ctx, REG_DPU_CTRL, BIT_DPU_RUN);
+
+ ctx->stopped = false;
+ }
+}
+
+static void sprd_dpu_init(struct sprd_dpu *dpu)
+{
+ struct dpu_context *ctx = &dpu->ctx;
+ u32 int_mask = 0;
+
+ writel(0x00, ctx->base + REG_BG_COLOR);
+ writel(0x00, ctx->base + REG_MMU_EN);
+ writel(0x00, ctx->base + REG_MMU_PPN1);
+ writel(0xffff, ctx->base + REG_MMU_RANGE1);
+ writel(0x00, ctx->base + REG_MMU_PPN2);
+ writel(0xffff, ctx->base + REG_MMU_RANGE2);
+ writel(0x1ffff, ctx->base + REG_MMU_VPN_RANGE);
+
+ if (ctx->if_type == SPRD_DPU_IF_DPI) {
+ /* use dpi as interface */
+ dpu_reg_clr(ctx, REG_DPU_CFG0, BIT_DPU_IF_EDPI);
+ /* disable Halt function for SPRD DSI */
+ dpu_reg_clr(ctx, REG_DPI_CTRL, BIT_DPU_DPI_HALT_EN);
+ /* select te from external pad */
+ dpu_reg_set(ctx, REG_DPI_CTRL, BIT_DPU_EDPI_FROM_EXTERNAL_PAD);
+
+ /* enable dpu update done INT */
+ int_mask |= BIT_DPU_INT_UPDATE_DONE;
+ /* enable dpu done INT */
+ int_mask |= BIT_DPU_INT_DONE;
+ /* enable dpu dpi vsync */
+ int_mask |= BIT_DPU_INT_VSYNC;
+ /* enable dpu TE INT */
+ int_mask |= BIT_DPU_INT_TE;
+ /* enable underflow err INT */
+ int_mask |= BIT_DPU_INT_ERR;
+ } else if (ctx->if_type == SPRD_DPU_IF_EDPI) {
+ /* use edpi as interface */
+ dpu_reg_set(ctx, REG_DPU_CFG0, BIT_DPU_IF_EDPI);
+ /* use external te */
+ dpu_reg_set(ctx, REG_DPI_CTRL, BIT_DPU_EDPI_FROM_EXTERNAL_PAD);
+ /* enable te */
+ dpu_reg_set(ctx, REG_DPI_CTRL, BIT_DPU_EDPI_TE_EN);
+
+ /* enable stop done INT */
+ int_mask |= BIT_DPU_INT_DONE;
+ /* enable TE INT */
+ int_mask |= BIT_DPU_INT_TE;
+ }
+
+ writel(int_mask, ctx->base + REG_DPU_INT_EN);
+}
+
+static void sprd_dpu_fini(struct sprd_dpu *dpu)
+{
+ struct dpu_context *ctx = &dpu->ctx;
+
+ writel(0x00, ctx->base + REG_DPU_INT_EN);
+ writel(0xff, ctx->base + REG_DPU_INT_CLR);
+}
+
+static void sprd_dpi_init(struct sprd_dpu *dpu)
+{
+ struct dpu_context *ctx = &dpu->ctx;
+ u32 reg_val;
+ u32 size;
+
+ size = (ctx->vm.vactive << 16) | ctx->vm.hactive;
+ writel(size, ctx->base + REG_PANEL_SIZE);
+ writel(size, ctx->base + REG_BLEND_SIZE);
+
+ if (ctx->if_type == SPRD_DPU_IF_DPI) {
+ /* set dpi timing */
+ reg_val = ctx->vm.hsync_len << 0 |
+ ctx->vm.hback_porch << 8 |
+ ctx->vm.hfront_porch << 20;
+ writel(reg_val, ctx->base + REG_DPI_H_TIMING);
+
+ reg_val = ctx->vm.vsync_len << 0 |
+ ctx->vm.vback_porch << 8 |
+ ctx->vm.vfront_porch << 20;
+ writel(reg_val, ctx->base + REG_DPI_V_TIMING);
+ }
+}
+
+void sprd_dpu_run(struct sprd_dpu *dpu)
+{
+ struct dpu_context *ctx = &dpu->ctx;
+
+ dpu_reg_set(ctx, REG_DPU_CTRL, BIT_DPU_RUN);
+
+ ctx->stopped = false;
+}
+
+void sprd_dpu_stop(struct sprd_dpu *dpu)
+{
+ struct dpu_context *ctx = &dpu->ctx;
+
+ if (ctx->if_type == SPRD_DPU_IF_DPI)
+ dpu_reg_set(ctx, REG_DPU_CTRL, BIT_DPU_STOP);
+
+ dpu_wait_stop_done(dpu);
+}
+
+static int sprd_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_crtc_state *crtc_state;
+ u32 fmt;
+
+ if (!plane_state->fb || !plane_state->crtc)
+ return 0;
+
+ fmt = drm_format_to_dpu(plane_state->fb);
+ if (!fmt)
+ return -EINVAL;
+
+ crtc_state = drm_atomic_get_crtc_state(plane_state->state, plane_state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ return drm_atomic_helper_check_plane_state(plane_state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
+}
+
+static void sprd_plane_atomic_update(struct drm_plane *drm_plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ drm_plane);
+ struct sprd_dpu *dpu = to_sprd_crtc(new_state->crtc);
+
+ /* start configure dpu layers */
+ sprd_dpu_layer(dpu, new_state);
+}
+
+static void sprd_plane_atomic_disable(struct drm_plane *drm_plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ drm_plane);
+ struct sprd_dpu *dpu = to_sprd_crtc(old_state->crtc);
+
+ layer_reg_wr(&dpu->ctx, REG_LAY_CTRL, 0x00, old_state->zpos);
+}
+
+static void sprd_plane_create_properties(struct sprd_plane *plane, int index)
+{
+ unsigned int supported_modes = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE);
+
+ /* create rotation property */
+ drm_plane_create_rotation_property(&plane->base,
+ DRM_MODE_ROTATE_0,
+ DRM_MODE_ROTATE_MASK |
+ DRM_MODE_REFLECT_MASK);
+
+ /* create alpha property */
+ drm_plane_create_alpha_property(&plane->base);
+
+ /* create blend mode property */
+ drm_plane_create_blend_mode_property(&plane->base, supported_modes);
+
+ /* create zpos property */
+ drm_plane_create_zpos_immutable_property(&plane->base, index);
+}
+
+static const struct drm_plane_helper_funcs sprd_plane_helper_funcs = {
+ .atomic_check = sprd_plane_atomic_check,
+ .atomic_update = sprd_plane_atomic_update,
+ .atomic_disable = sprd_plane_atomic_disable,
+};
+
+static const struct drm_plane_funcs sprd_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static struct sprd_plane *sprd_planes_init(struct drm_device *drm)
+{
+ struct sprd_plane *plane, *primary;
+ enum drm_plane_type plane_type;
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY :
+ DRM_PLANE_TYPE_OVERLAY;
+
+ plane = drmm_universal_plane_alloc(drm, struct sprd_plane, base,
+ 1, &sprd_plane_funcs,
+ layer_fmts, ARRAY_SIZE(layer_fmts),
+ NULL, plane_type, NULL);
+ if (IS_ERR(plane)) {
+ drm_err(drm, "failed to init drm plane: %d\n", i);
+ return plane;
+ }
+
+ drm_plane_helper_add(&plane->base, &sprd_plane_helper_funcs);
+
+ sprd_plane_create_properties(plane, i);
+
+ if (i == 0)
+ primary = plane;
+ }
+
+ return primary;
+}
+
+static void sprd_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct sprd_dpu *dpu = to_sprd_crtc(crtc);
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct drm_encoder *encoder;
+ struct sprd_dsi *dsi;
+
+ drm_display_mode_to_videomode(mode, &dpu->ctx.vm);
+
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask) {
+ dsi = encoder_to_dsi(encoder);
+
+ if (dsi->slave->mode_flags & MIPI_DSI_MODE_VIDEO)
+ dpu->ctx.if_type = SPRD_DPU_IF_DPI;
+ else
+ dpu->ctx.if_type = SPRD_DPU_IF_EDPI;
+ }
+
+ sprd_dpi_init(dpu);
+}
+
+static void sprd_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct sprd_dpu *dpu = to_sprd_crtc(crtc);
+
+ sprd_dpu_init(dpu);
+
+ drm_crtc_vblank_on(&dpu->base);
+}
+
+static void sprd_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct sprd_dpu *dpu = to_sprd_crtc(crtc);
+ struct drm_device *drm = dpu->base.dev;
+
+ drm_crtc_vblank_off(&dpu->base);
+
+ sprd_dpu_fini(dpu);
+
+ spin_lock_irq(&drm->event_lock);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&drm->event_lock);
+}
+
+static void sprd_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+
+{
+ struct sprd_dpu *dpu = to_sprd_crtc(crtc);
+ struct drm_device *drm = dpu->base.dev;
+
+ sprd_dpu_flip(dpu);
+
+ spin_lock_irq(&drm->event_lock);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&drm->event_lock);
+}
+
+static int sprd_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct sprd_dpu *dpu = to_sprd_crtc(crtc);
+
+ dpu_reg_set(&dpu->ctx, REG_DPU_INT_EN, BIT_DPU_INT_VSYNC);
+
+ return 0;
+}
+
+static void sprd_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct sprd_dpu *dpu = to_sprd_crtc(crtc);
+
+ dpu_reg_clr(&dpu->ctx, REG_DPU_INT_EN, BIT_DPU_INT_VSYNC);
+}
+
+static const struct drm_crtc_helper_funcs sprd_crtc_helper_funcs = {
+ .mode_set_nofb = sprd_crtc_mode_set_nofb,
+ .atomic_flush = sprd_crtc_atomic_flush,
+ .atomic_enable = sprd_crtc_atomic_enable,
+ .atomic_disable = sprd_crtc_atomic_disable,
+};
+
+static const struct drm_crtc_funcs sprd_crtc_funcs = {
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = sprd_crtc_enable_vblank,
+ .disable_vblank = sprd_crtc_disable_vblank,
+};
+
+static struct sprd_dpu *sprd_crtc_init(struct drm_device *drm,
+ struct drm_plane *primary, struct device *dev)
+{
+ struct device_node *port;
+ struct sprd_dpu *dpu;
+
+ dpu = drmm_crtc_alloc_with_planes(drm, struct sprd_dpu, base,
+ primary, NULL,
+ &sprd_crtc_funcs, NULL);
+ if (IS_ERR(dpu)) {
+ drm_err(drm, "failed to init crtc\n");
+ return dpu;
+ }
+ drm_crtc_helper_add(&dpu->base, &sprd_crtc_helper_funcs);
+
+ /*
+ * set crtc port so that drm_of_find_possible_crtcs call works
+ */
+ port = of_graph_get_port_by_id(dev->of_node, 0);
+ if (!port) {
+ drm_err(drm, "failed to found crtc output port for %s\n",
+ dev->of_node->full_name);
+ return ERR_PTR(-EINVAL);
+ }
+ dpu->base.port = port;
+ of_node_put(port);
+
+ return dpu;
+}
+
+static irqreturn_t sprd_dpu_isr(int irq, void *data)
+{
+ struct sprd_dpu *dpu = data;
+ struct dpu_context *ctx = &dpu->ctx;
+ u32 reg_val, int_mask = 0;
+
+ reg_val = readl(ctx->base + REG_DPU_INT_STS);
+
+ /* disable err interrupt */
+ if (reg_val & BIT_DPU_INT_ERR) {
+ int_mask |= BIT_DPU_INT_ERR;
+ drm_warn(dpu->drm, "Warning: dpu underflow!\n");
+ }
+
+ /* dpu update done isr */
+ if (reg_val & BIT_DPU_INT_UPDATE_DONE) {
+ ctx->evt_update = true;
+ wake_up_interruptible_all(&ctx->wait_queue);
+ }
+
+ /* dpu stop done isr */
+ if (reg_val & BIT_DPU_INT_DONE) {
+ ctx->evt_stop = true;
+ wake_up_interruptible_all(&ctx->wait_queue);
+ }
+
+ if (reg_val & BIT_DPU_INT_VSYNC)
+ drm_crtc_handle_vblank(&dpu->base);
+
+ writel(reg_val, ctx->base + REG_DPU_INT_CLR);
+ dpu_reg_clr(ctx, REG_DPU_INT_EN, int_mask);
+
+ return IRQ_HANDLED;
+}
+
+static int sprd_dpu_context_init(struct sprd_dpu *dpu,
+ struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dpu_context *ctx = &dpu->ctx;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctx->base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!ctx->base) {
+ dev_err(dev, "failed to map dpu registers\n");
+ return -EFAULT;
+ }
+
+ ctx->irq = platform_get_irq(pdev, 0);
+ if (ctx->irq < 0) {
+ dev_err(dev, "failed to get dpu irq\n");
+ return ctx->irq;
+ }
+
+ /* disable and clear interrupts before register dpu IRQ. */
+ writel(0x00, ctx->base + REG_DPU_INT_EN);
+ writel(0xff, ctx->base + REG_DPU_INT_CLR);
+
+ ret = devm_request_irq(dev, ctx->irq, sprd_dpu_isr,
+ IRQF_TRIGGER_NONE, "DPU", dpu);
+ if (ret) {
+ dev_err(dev, "failed to register dpu irq handler\n");
+ return ret;
+ }
+
+ init_waitqueue_head(&ctx->wait_queue);
+
+ return 0;
+}
+
+static int sprd_dpu_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *drm = data;
+ struct sprd_dpu *dpu;
+ struct sprd_plane *plane;
+ int ret;
+
+ plane = sprd_planes_init(drm);
+ if (IS_ERR(plane))
+ return PTR_ERR(plane);
+
+ dpu = sprd_crtc_init(drm, &plane->base, dev);
+ if (IS_ERR(dpu))
+ return PTR_ERR(dpu);
+
+ dpu->drm = drm;
+ dev_set_drvdata(dev, dpu);
+
+ ret = sprd_dpu_context_init(dpu, dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct component_ops dpu_component_ops = {
+ .bind = sprd_dpu_bind,
+};
+
+static const struct of_device_id dpu_match_table[] = {
+ { .compatible = "sprd,sharkl3-dpu" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, dpu_match_table);
+
+static int sprd_dpu_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &dpu_component_ops);
+}
+
+static int sprd_dpu_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dpu_component_ops);
+
+ return 0;
+}
+
+struct platform_driver sprd_dpu_driver = {
+ .probe = sprd_dpu_probe,
+ .remove = sprd_dpu_remove,
+ .driver = {
+ .name = "sprd-dpu-drv",
+ .of_match_table = dpu_match_table,
+ },
+};
+
+MODULE_AUTHOR("Leon He <leon.he@unisoc.com>");
+MODULE_AUTHOR("Kevin Tang <kevin.tang@unisoc.com>");
+MODULE_DESCRIPTION("Unisoc Display Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/sprd/sprd_dpu.h b/drivers/gpu/drm/sprd/sprd_dpu.h
new file mode 100644
index 000000000000..157a78f24dc1
--- /dev/null
+++ b/drivers/gpu/drm/sprd/sprd_dpu.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Unisoc Inc.
+ */
+
+#ifndef __SPRD_DPU_H__
+#define __SPRD_DPU_H__
+
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <video/videomode.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+#include <uapi/drm/drm_mode.h>
+
+/* DPU Layer registers offset */
+#define DPU_LAY_REG_OFFSET 0x30
+
+enum {
+ SPRD_DPU_IF_DPI,
+ SPRD_DPU_IF_EDPI,
+ SPRD_DPU_IF_LIMIT
+};
+
+/**
+ * Sprd DPU context structure
+ *
+ * @base: DPU controller base address
+ * @irq: IRQ number to install the handler for
+ * @if_type: The type of DPI interface, default is DPI mode.
+ * @vm: videomode structure to use for DPU and DPI initialization
+ * @stopped: indicates whether DPU are stopped
+ * @wait_queue: wait queue, used to wait for DPU shadow register update done and
+ * DPU stop register done interrupt signal.
+ * @evt_update: wait queue condition for DPU shadow register
+ * @evt_stop: wait queue condition for DPU stop register
+ */
+struct dpu_context {
+ void __iomem *base;
+ int irq;
+ u8 if_type;
+ struct videomode vm;
+ bool stopped;
+ wait_queue_head_t wait_queue;
+ bool evt_update;
+ bool evt_stop;
+};
+
+/**
+ * Sprd DPU device structure
+ *
+ * @crtc: crtc object
+ * @drm: A point to drm device
+ * @ctx: DPU's implementation specific context object
+ */
+struct sprd_dpu {
+ struct drm_crtc base;
+ struct drm_device *drm;
+ struct dpu_context ctx;
+};
+
+static inline struct sprd_dpu *to_sprd_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct sprd_dpu, base);
+}
+
+static inline void
+dpu_reg_set(struct dpu_context *ctx, u32 offset, u32 set_bits)
+{
+ u32 bits = readl_relaxed(ctx->base + offset);
+
+ writel(bits | set_bits, ctx->base + offset);
+}
+
+static inline void
+dpu_reg_clr(struct dpu_context *ctx, u32 offset, u32 clr_bits)
+{
+ u32 bits = readl_relaxed(ctx->base + offset);
+
+ writel(bits & ~clr_bits, ctx->base + offset);
+}
+
+static inline u32
+layer_reg_rd(struct dpu_context *ctx, u32 offset, int index)
+{
+ u32 layer_offset = offset + index * DPU_LAY_REG_OFFSET;
+
+ return readl(ctx->base + layer_offset);
+}
+
+static inline void
+layer_reg_wr(struct dpu_context *ctx, u32 offset, u32 cfg_bits, int index)
+{
+ u32 layer_offset = offset + index * DPU_LAY_REG_OFFSET;
+
+ writel(cfg_bits, ctx->base + layer_offset);
+}
+
+void sprd_dpu_run(struct sprd_dpu *dpu);
+void sprd_dpu_stop(struct sprd_dpu *dpu);
+
+#endif
diff --git a/drivers/gpu/drm/sprd/sprd_drm.c b/drivers/gpu/drm/sprd/sprd_drm.c
new file mode 100644
index 000000000000..a077e2d4d721
--- /dev/null
+++ b/drivers/gpu/drm/sprd/sprd_drm.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Unisoc Inc.
+ */
+
+#include <linux/component.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "sprd_drm.h"
+
+#define DRIVER_NAME "sprd"
+#define DRIVER_DESC "Spreadtrum SoCs' DRM Driver"
+#define DRIVER_DATE "20200201"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static const struct drm_mode_config_helper_funcs sprd_drm_mode_config_helper = {
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+};
+
+static const struct drm_mode_config_funcs sprd_drm_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void sprd_drm_mode_config_init(struct drm_device *drm)
+{
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+ drm->mode_config.max_width = 8192;
+ drm->mode_config.max_height = 8192;
+ drm->mode_config.allow_fb_modifiers = true;
+
+ drm->mode_config.funcs = &sprd_drm_mode_config_funcs;
+ drm->mode_config.helper_private = &sprd_drm_mode_config_helper;
+}
+
+DEFINE_DRM_GEM_CMA_FOPS(sprd_drm_fops);
+
+static struct drm_driver sprd_drm_drv = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &sprd_drm_fops,
+
+ /* GEM Operations */
+ DRM_GEM_CMA_DRIVER_OPS,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+static int sprd_drm_bind(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm;
+ struct sprd_drm *sprd;
+ int ret;
+
+ sprd = devm_drm_dev_alloc(dev, &sprd_drm_drv, struct sprd_drm, drm);
+ if (IS_ERR(sprd))
+ return PTR_ERR(sprd);
+
+ drm = &sprd->drm;
+ platform_set_drvdata(pdev, drm);
+
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
+
+ sprd_drm_mode_config_init(drm);
+
+ /* bind and init sub drivers */
+ ret = component_bind_all(drm->dev, drm);
+ if (ret) {
+ drm_err(drm, "failed to bind all component.\n");
+ return ret;
+ }
+
+ /* vblank init */
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret) {
+ drm_err(drm, "failed to initialize vblank.\n");
+ goto err_unbind_all;
+ }
+
+ /* reset all the states of crtc/plane/encoder/connector */
+ drm_mode_config_reset(drm);
+
+ /* init kms poll for handling hpd */
+ drm_kms_helper_poll_init(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret < 0)
+ goto err_kms_helper_poll_fini;
+
+ return 0;
+
+err_kms_helper_poll_fini:
+ drm_kms_helper_poll_fini(drm);
+err_unbind_all:
+ component_unbind_all(drm->dev, drm);
+ return ret;
+}
+
+static void sprd_drm_unbind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+
+ drm_dev_unregister(drm);
+
+ drm_kms_helper_poll_fini(drm);
+
+ component_unbind_all(drm->dev, drm);
+}
+
+static const struct component_master_ops drm_component_ops = {
+ .bind = sprd_drm_bind,
+ .unbind = sprd_drm_unbind,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int sprd_drm_probe(struct platform_device *pdev)
+{
+ return drm_of_component_probe(&pdev->dev, compare_of, &drm_component_ops);
+}
+
+static int sprd_drm_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &drm_component_ops);
+ return 0;
+}
+
+static void sprd_drm_shutdown(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ if (!drm) {
+ drm_warn(drm, "drm device is not available, no shutdown\n");
+ return;
+ }
+
+ drm_atomic_helper_shutdown(drm);
+}
+
+static const struct of_device_id drm_match_table[] = {
+ { .compatible = "sprd,display-subsystem", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, drm_match_table);
+
+static struct platform_driver sprd_drm_driver = {
+ .probe = sprd_drm_probe,
+ .remove = sprd_drm_remove,
+ .shutdown = sprd_drm_shutdown,
+ .driver = {
+ .name = "sprd-drm-drv",
+ .of_match_table = drm_match_table,
+ },
+};
+
+static struct platform_driver *sprd_drm_drivers[] = {
+ &sprd_drm_driver,
+ &sprd_dpu_driver,
+ &sprd_dsi_driver,
+};
+
+static int __init sprd_drm_init(void)
+{
+ return platform_register_drivers(sprd_drm_drivers,
+ ARRAY_SIZE(sprd_drm_drivers));
+}
+
+static void __exit sprd_drm_exit(void)
+{
+ platform_unregister_drivers(sprd_drm_drivers,
+ ARRAY_SIZE(sprd_drm_drivers));
+}
+
+module_init(sprd_drm_init);
+module_exit(sprd_drm_exit);
+
+MODULE_AUTHOR("Leon He <leon.he@unisoc.com>");
+MODULE_AUTHOR("Kevin Tang <kevin.tang@unisoc.com>");
+MODULE_DESCRIPTION("Unisoc DRM KMS Master Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/sprd/sprd_drm.h b/drivers/gpu/drm/sprd/sprd_drm.h
new file mode 100644
index 000000000000..95d1b972f333
--- /dev/null
+++ b/drivers/gpu/drm/sprd/sprd_drm.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Unisoc Inc.
+ */
+
+#ifndef _SPRD_DRM_H_
+#define _SPRD_DRM_H_
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_print.h>
+
+struct sprd_drm {
+ struct drm_device drm;
+};
+
+extern struct platform_driver sprd_dpu_driver;
+extern struct platform_driver sprd_dsi_driver;
+
+#endif /* _SPRD_DRM_H_ */
diff --git a/drivers/gpu/drm/sprd/sprd_dsi.c b/drivers/gpu/drm/sprd/sprd_dsi.c
new file mode 100644
index 000000000000..911b3cddc264
--- /dev/null
+++ b/drivers/gpu/drm/sprd/sprd_dsi.c
@@ -0,0 +1,1073 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Unisoc Inc.
+ */
+
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_graph.h>
+#include <video/mipi_display.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+
+#include "sprd_drm.h"
+#include "sprd_dpu.h"
+#include "sprd_dsi.h"
+
+#define SOFT_RESET 0x04
+#define MASK_PROTOCOL_INT 0x0C
+#define MASK_INTERNAL_INT 0x14
+#define DSI_MODE_CFG 0x18
+
+#define VIRTUAL_CHANNEL_ID 0x1C
+#define GEN_RX_VCID GENMASK(1, 0)
+#define VIDEO_PKT_VCID GENMASK(3, 2)
+
+#define DPI_VIDEO_FORMAT 0x20
+#define DPI_VIDEO_MODE_FORMAT GENMASK(5, 0)
+#define LOOSELY18_EN BIT(6)
+
+#define VIDEO_PKT_CONFIG 0x24
+#define VIDEO_PKT_SIZE GENMASK(15, 0)
+#define VIDEO_LINE_CHUNK_NUM GENMASK(31, 16)
+
+#define VIDEO_LINE_HBLK_TIME 0x28
+#define VIDEO_LINE_HBP_TIME GENMASK(15, 0)
+#define VIDEO_LINE_HSA_TIME GENMASK(31, 16)
+
+#define VIDEO_LINE_TIME 0x2C
+
+#define VIDEO_VBLK_LINES 0x30
+#define VFP_LINES GENMASK(9, 0)
+#define VBP_LINES GENMASK(19, 10)
+#define VSA_LINES GENMASK(29, 20)
+
+#define VIDEO_VACTIVE_LINES 0x34
+
+#define VID_MODE_CFG 0x38
+#define VID_MODE_TYPE GENMASK(1, 0)
+#define LP_VSA_EN BIT(8)
+#define LP_VBP_EN BIT(9)
+#define LP_VFP_EN BIT(10)
+#define LP_VACT_EN BIT(11)
+#define LP_HBP_EN BIT(12)
+#define LP_HFP_EN BIT(13)
+#define FRAME_BTA_ACK_EN BIT(14)
+
+#define TIMEOUT_CNT_CLK_CONFIG 0x40
+#define HTX_TO_CONFIG 0x44
+#define LRX_H_TO_CONFIG 0x48
+
+#define TX_ESC_CLK_CONFIG 0x5C
+
+#define CMD_MODE_CFG 0x68
+#define TEAR_FX_EN BIT(0)
+
+#define GEN_HDR 0x6C
+#define GEN_DT GENMASK(5, 0)
+#define GEN_VC GENMASK(7, 6)
+
+#define GEN_PLD_DATA 0x70
+
+#define PHY_CLK_LANE_LP_CTRL 0x74
+#define PHY_CLKLANE_TX_REQ_HS BIT(0)
+#define AUTO_CLKLANE_CTRL_EN BIT(1)
+
+#define PHY_INTERFACE_CTRL 0x78
+#define RF_PHY_SHUTDOWN BIT(0)
+#define RF_PHY_RESET_N BIT(1)
+#define RF_PHY_CLK_EN BIT(2)
+
+#define CMD_MODE_STATUS 0x98
+#define GEN_CMD_RDATA_FIFO_EMPTY BIT(1)
+#define GEN_CMD_WDATA_FIFO_EMPTY BIT(3)
+#define GEN_CMD_CMD_FIFO_EMPTY BIT(5)
+#define GEN_CMD_RDCMD_DONE BIT(7)
+
+#define PHY_STATUS 0x9C
+#define PHY_LOCK BIT(1)
+
+#define PHY_MIN_STOP_TIME 0xA0
+#define PHY_LANE_NUM_CONFIG 0xA4
+
+#define PHY_CLKLANE_TIME_CONFIG 0xA8
+#define PHY_CLKLANE_LP_TO_HS_TIME GENMASK(15, 0)
+#define PHY_CLKLANE_HS_TO_LP_TIME GENMASK(31, 16)
+
+#define PHY_DATALANE_TIME_CONFIG 0xAC
+#define PHY_DATALANE_LP_TO_HS_TIME GENMASK(15, 0)
+#define PHY_DATALANE_HS_TO_LP_TIME GENMASK(31, 16)
+
+#define MAX_READ_TIME 0xB0
+
+#define RX_PKT_CHECK_CONFIG 0xB4
+#define RX_PKT_ECC_EN BIT(0)
+#define RX_PKT_CRC_EN BIT(1)
+
+#define TA_EN 0xB8
+
+#define EOTP_EN 0xBC
+#define TX_EOTP_EN BIT(0)
+#define RX_EOTP_EN BIT(1)
+
+#define VIDEO_NULLPKT_SIZE 0xC0
+#define DCS_WM_PKT_SIZE 0xC4
+
+#define VIDEO_SIG_DELAY_CONFIG 0xD0
+#define VIDEO_SIG_DELAY GENMASK(23, 0)
+
+#define PHY_TST_CTRL0 0xF0
+#define PHY_TESTCLR BIT(0)
+#define PHY_TESTCLK BIT(1)
+
+#define PHY_TST_CTRL1 0xF4
+#define PHY_TESTDIN GENMASK(7, 0)
+#define PHY_TESTDOUT GENMASK(15, 8)
+#define PHY_TESTEN BIT(16)
+
+#define host_to_dsi(host) \
+ container_of(host, struct sprd_dsi, host)
+
+static inline u32
+dsi_reg_rd(struct dsi_context *ctx, u32 offset, u32 mask,
+ u32 shift)
+{
+ return (readl(ctx->base + offset) & mask) >> shift;
+}
+
+static inline void
+dsi_reg_wr(struct dsi_context *ctx, u32 offset, u32 mask,
+ u32 shift, u32 val)
+{
+ u32 ret;
+
+ ret = readl(ctx->base + offset);
+ ret &= ~mask;
+ ret |= (val << shift) & mask;
+ writel(ret, ctx->base + offset);
+}
+
+static inline void
+dsi_reg_up(struct dsi_context *ctx, u32 offset, u32 mask,
+ u32 val)
+{
+ u32 ret = readl(ctx->base + offset);
+
+ writel((ret & ~mask) | (val & mask), ctx->base + offset);
+}
+
+static int regmap_tst_io_write(void *context, u32 reg, u32 val)
+{
+ struct sprd_dsi *dsi = context;
+ struct dsi_context *ctx = &dsi->ctx;
+
+ if (val > 0xff || reg > 0xff)
+ return -EINVAL;
+
+ drm_dbg(dsi->drm, "reg = 0x%02x, val = 0x%02x\n", reg, val);
+
+ dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, PHY_TESTEN);
+ dsi_reg_wr(ctx, PHY_TST_CTRL1, PHY_TESTDIN, 0, reg);
+ dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, PHY_TESTCLK);
+ dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, 0);
+ dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, 0);
+ dsi_reg_wr(ctx, PHY_TST_CTRL1, PHY_TESTDIN, 0, val);
+ dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, PHY_TESTCLK);
+ dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, 0);
+
+ return 0;
+}
+
+static int regmap_tst_io_read(void *context, u32 reg, u32 *val)
+{
+ struct sprd_dsi *dsi = context;
+ struct dsi_context *ctx = &dsi->ctx;
+ int ret;
+
+ if (reg > 0xff)
+ return -EINVAL;
+
+ dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, PHY_TESTEN);
+ dsi_reg_wr(ctx, PHY_TST_CTRL1, PHY_TESTDIN, 0, reg);
+ dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, PHY_TESTCLK);
+ dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, 0);
+ dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, 0);
+
+ udelay(1);
+
+ ret = dsi_reg_rd(ctx, PHY_TST_CTRL1, PHY_TESTDOUT, 8);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+
+ drm_dbg(dsi->drm, "reg = 0x%02x, val = 0x%02x\n", reg, *val);
+ return 0;
+}
+
+static struct regmap_bus regmap_tst_io = {
+ .reg_write = regmap_tst_io_write,
+ .reg_read = regmap_tst_io_read,
+};
+
+static const struct regmap_config byte_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int dphy_wait_pll_locked(struct dsi_context *ctx)
+{
+ struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx);
+ int i;
+
+ for (i = 0; i < 50000; i++) {
+ if (dsi_reg_rd(ctx, PHY_STATUS, PHY_LOCK, 1))
+ return 0;
+ udelay(3);
+ }
+
+ drm_err(dsi->drm, "dphy pll can not be locked\n");
+ return -ETIMEDOUT;
+}
+
+static int dsi_wait_tx_payload_fifo_empty(struct dsi_context *ctx)
+{
+ int i;
+
+ for (i = 0; i < 5000; i++) {
+ if (dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_WDATA_FIFO_EMPTY, 3))
+ return 0;
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int dsi_wait_tx_cmd_fifo_empty(struct dsi_context *ctx)
+{
+ int i;
+
+ for (i = 0; i < 5000; i++) {
+ if (dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_CMD_FIFO_EMPTY, 5))
+ return 0;
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int dsi_wait_rd_resp_completed(struct dsi_context *ctx)
+{
+ int i;
+
+ for (i = 0; i < 10000; i++) {
+ if (dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_RDCMD_DONE, 7))
+ return 0;
+ udelay(10);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static u16 calc_bytes_per_pixel_x100(int coding)
+{
+ u16 bpp_x100;
+
+ switch (coding) {
+ case COLOR_CODE_16BIT_CONFIG1:
+ case COLOR_CODE_16BIT_CONFIG2:
+ case COLOR_CODE_16BIT_CONFIG3:
+ bpp_x100 = 200;
+ break;
+ case COLOR_CODE_18BIT_CONFIG1:
+ case COLOR_CODE_18BIT_CONFIG2:
+ bpp_x100 = 225;
+ break;
+ case COLOR_CODE_24BIT:
+ bpp_x100 = 300;
+ break;
+ case COLOR_CODE_COMPRESSTION:
+ bpp_x100 = 100;
+ break;
+ case COLOR_CODE_20BIT_YCC422_LOOSELY:
+ bpp_x100 = 250;
+ break;
+ case COLOR_CODE_24BIT_YCC422:
+ bpp_x100 = 300;
+ break;
+ case COLOR_CODE_16BIT_YCC422:
+ bpp_x100 = 200;
+ break;
+ case COLOR_CODE_30BIT:
+ bpp_x100 = 375;
+ break;
+ case COLOR_CODE_36BIT:
+ bpp_x100 = 450;
+ break;
+ case COLOR_CODE_12BIT_YCC420:
+ bpp_x100 = 150;
+ break;
+ default:
+ DRM_ERROR("invalid color coding");
+ bpp_x100 = 0;
+ break;
+ }
+
+ return bpp_x100;
+}
+
+static u8 calc_video_size_step(int coding)
+{
+ u8 video_size_step;
+
+ switch (coding) {
+ case COLOR_CODE_16BIT_CONFIG1:
+ case COLOR_CODE_16BIT_CONFIG2:
+ case COLOR_CODE_16BIT_CONFIG3:
+ case COLOR_CODE_18BIT_CONFIG1:
+ case COLOR_CODE_18BIT_CONFIG2:
+ case COLOR_CODE_24BIT:
+ case COLOR_CODE_COMPRESSTION:
+ return video_size_step = 1;
+ case COLOR_CODE_20BIT_YCC422_LOOSELY:
+ case COLOR_CODE_24BIT_YCC422:
+ case COLOR_CODE_16BIT_YCC422:
+ case COLOR_CODE_30BIT:
+ case COLOR_CODE_36BIT:
+ case COLOR_CODE_12BIT_YCC420:
+ return video_size_step = 2;
+ default:
+ DRM_ERROR("invalid color coding");
+ return 0;
+ }
+}
+
+static u16 round_video_size(int coding, u16 video_size)
+{
+ switch (coding) {
+ case COLOR_CODE_16BIT_YCC422:
+ case COLOR_CODE_24BIT_YCC422:
+ case COLOR_CODE_20BIT_YCC422_LOOSELY:
+ case COLOR_CODE_12BIT_YCC420:
+ /* round up active H pixels to a multiple of 2 */
+ if ((video_size % 2) != 0)
+ video_size += 1;
+ break;
+ default:
+ break;
+ }
+
+ return video_size;
+}
+
+#define SPRD_MIPI_DSI_FMT_DSC 0xff
+static u32 fmt_to_coding(u32 fmt)
+{
+ switch (fmt) {
+ case MIPI_DSI_FMT_RGB565:
+ return COLOR_CODE_16BIT_CONFIG1;
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ return COLOR_CODE_18BIT_CONFIG1;
+ case MIPI_DSI_FMT_RGB888:
+ return COLOR_CODE_24BIT;
+ case SPRD_MIPI_DSI_FMT_DSC:
+ return COLOR_CODE_COMPRESSTION;
+ default:
+ DRM_ERROR("Unsupported format (%d)\n", fmt);
+ return COLOR_CODE_24BIT;
+ }
+}
+
+#define ns_to_cycle(ns, byte_clk) \
+ DIV_ROUND_UP((ns) * (byte_clk), 1000000)
+
+static void sprd_dsi_init(struct dsi_context *ctx)
+{
+ struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx);
+ u32 byte_clk = dsi->slave->hs_rate / 8;
+ u16 data_hs2lp, data_lp2hs, clk_hs2lp, clk_lp2hs;
+ u16 max_rd_time;
+ int div;
+
+ writel(0, ctx->base + SOFT_RESET);
+ writel(0xffffffff, ctx->base + MASK_PROTOCOL_INT);
+ writel(0xffffffff, ctx->base + MASK_INTERNAL_INT);
+ writel(1, ctx->base + DSI_MODE_CFG);
+ dsi_reg_up(ctx, EOTP_EN, RX_EOTP_EN, 0);
+ dsi_reg_up(ctx, EOTP_EN, TX_EOTP_EN, 0);
+ dsi_reg_up(ctx, RX_PKT_CHECK_CONFIG, RX_PKT_ECC_EN, RX_PKT_ECC_EN);
+ dsi_reg_up(ctx, RX_PKT_CHECK_CONFIG, RX_PKT_CRC_EN, RX_PKT_CRC_EN);
+ writel(1, ctx->base + TA_EN);
+ dsi_reg_up(ctx, VIRTUAL_CHANNEL_ID, VIDEO_PKT_VCID, 0);
+ dsi_reg_up(ctx, VIRTUAL_CHANNEL_ID, GEN_RX_VCID, 0);
+
+ div = DIV_ROUND_UP(byte_clk, dsi->slave->lp_rate);
+ writel(div, ctx->base + TX_ESC_CLK_CONFIG);
+
+ max_rd_time = ns_to_cycle(ctx->max_rd_time, byte_clk);
+ writel(max_rd_time, ctx->base + MAX_READ_TIME);
+
+ data_hs2lp = ns_to_cycle(ctx->data_hs2lp, byte_clk);
+ data_lp2hs = ns_to_cycle(ctx->data_lp2hs, byte_clk);
+ clk_hs2lp = ns_to_cycle(ctx->clk_hs2lp, byte_clk);
+ clk_lp2hs = ns_to_cycle(ctx->clk_lp2hs, byte_clk);
+ dsi_reg_wr(ctx, PHY_DATALANE_TIME_CONFIG,
+ PHY_DATALANE_HS_TO_LP_TIME, 16, data_hs2lp);
+ dsi_reg_wr(ctx, PHY_DATALANE_TIME_CONFIG,
+ PHY_DATALANE_LP_TO_HS_TIME, 0, data_lp2hs);
+ dsi_reg_wr(ctx, PHY_CLKLANE_TIME_CONFIG,
+ PHY_CLKLANE_HS_TO_LP_TIME, 16, clk_hs2lp);
+ dsi_reg_wr(ctx, PHY_CLKLANE_TIME_CONFIG,
+ PHY_CLKLANE_LP_TO_HS_TIME, 0, clk_lp2hs);
+
+ writel(1, ctx->base + SOFT_RESET);
+}
+
+/*
+ * Free up resources and shutdown host controller and PHY
+ */
+static void sprd_dsi_fini(struct dsi_context *ctx)
+{
+ writel(0xffffffff, ctx->base + MASK_PROTOCOL_INT);
+ writel(0xffffffff, ctx->base + MASK_INTERNAL_INT);
+ writel(0, ctx->base + SOFT_RESET);
+}
+
+/*
+ * If not in burst mode, it will compute the video and null packet sizes
+ * according to necessity.
+ * Configure timers for data lanes and/or clock lane to return to LP when
+ * bandwidth is not filled by data.
+ */
+static int sprd_dsi_dpi_video(struct dsi_context *ctx)
+{
+ struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx);
+ struct videomode *vm = &ctx->vm;
+ u32 byte_clk = dsi->slave->hs_rate / 8;
+ u16 bpp_x100;
+ u16 video_size;
+ u32 ratio_x1000;
+ u16 null_pkt_size = 0;
+ u8 video_size_step;
+ u32 hs_to;
+ u32 total_bytes;
+ u32 bytes_per_chunk;
+ u32 chunks = 0;
+ u32 bytes_left = 0;
+ u32 chunk_overhead;
+ const u8 pkt_header = 6;
+ u8 coding;
+ int div;
+ u16 hline;
+ u16 byte_cycle;
+
+ coding = fmt_to_coding(dsi->slave->format);
+ video_size = round_video_size(coding, vm->hactive);
+ bpp_x100 = calc_bytes_per_pixel_x100(coding);
+ video_size_step = calc_video_size_step(coding);
+ ratio_x1000 = byte_clk * 1000 / (vm->pixelclock / 1000);
+ hline = vm->hactive + vm->hsync_len + vm->hfront_porch +
+ vm->hback_porch;
+
+ writel(0, ctx->base + SOFT_RESET);
+ dsi_reg_wr(ctx, VID_MODE_CFG, FRAME_BTA_ACK_EN, 15, ctx->frame_ack_en);
+ dsi_reg_wr(ctx, DPI_VIDEO_FORMAT, DPI_VIDEO_MODE_FORMAT, 0, coding);
+ dsi_reg_wr(ctx, VID_MODE_CFG, VID_MODE_TYPE, 0, ctx->burst_mode);
+ byte_cycle = 95 * hline * ratio_x1000 / 100000;
+ dsi_reg_wr(ctx, VIDEO_SIG_DELAY_CONFIG, VIDEO_SIG_DELAY, 0, byte_cycle);
+ byte_cycle = hline * ratio_x1000 / 1000;
+ writel(byte_cycle, ctx->base + VIDEO_LINE_TIME);
+ byte_cycle = vm->hsync_len * ratio_x1000 / 1000;
+ dsi_reg_wr(ctx, VIDEO_LINE_HBLK_TIME, VIDEO_LINE_HSA_TIME, 16, byte_cycle);
+ byte_cycle = vm->hback_porch * ratio_x1000 / 1000;
+ dsi_reg_wr(ctx, VIDEO_LINE_HBLK_TIME, VIDEO_LINE_HBP_TIME, 0, byte_cycle);
+ writel(vm->vactive, ctx->base + VIDEO_VACTIVE_LINES);
+ dsi_reg_wr(ctx, VIDEO_VBLK_LINES, VFP_LINES, 0, vm->vfront_porch);
+ dsi_reg_wr(ctx, VIDEO_VBLK_LINES, VBP_LINES, 10, vm->vback_porch);
+ dsi_reg_wr(ctx, VIDEO_VBLK_LINES, VSA_LINES, 20, vm->vsync_len);
+ dsi_reg_up(ctx, VID_MODE_CFG, LP_HBP_EN | LP_HFP_EN | LP_VACT_EN |
+ LP_VFP_EN | LP_VBP_EN | LP_VSA_EN, LP_HBP_EN | LP_HFP_EN |
+ LP_VACT_EN | LP_VFP_EN | LP_VBP_EN | LP_VSA_EN);
+
+ hs_to = (hline * vm->vactive) + (2 * bpp_x100) / 100;
+ for (div = 0x80; (div < hs_to) && (div > 2); div--) {
+ if ((hs_to % div) == 0) {
+ writel(div, ctx->base + TIMEOUT_CNT_CLK_CONFIG);
+ writel(hs_to / div, ctx->base + LRX_H_TO_CONFIG);
+ writel(hs_to / div, ctx->base + HTX_TO_CONFIG);
+ break;
+ }
+ }
+
+ if (ctx->burst_mode == VIDEO_BURST_WITH_SYNC_PULSES) {
+ dsi_reg_wr(ctx, VIDEO_PKT_CONFIG, VIDEO_PKT_SIZE, 0, video_size);
+ writel(0, ctx->base + VIDEO_NULLPKT_SIZE);
+ dsi_reg_up(ctx, VIDEO_PKT_CONFIG, VIDEO_LINE_CHUNK_NUM, 0);
+ } else {
+ /* non burst transmission */
+ null_pkt_size = 0;
+
+ /* bytes to be sent - first as one chunk */
+ bytes_per_chunk = vm->hactive * bpp_x100 / 100 + pkt_header;
+
+ /* hline total bytes from the DPI interface */
+ total_bytes = (vm->hactive + vm->hfront_porch) *
+ ratio_x1000 / dsi->slave->lanes / 1000;
+
+ /* check if the pixels actually fit on the DSI link */
+ if (total_bytes < bytes_per_chunk) {
+ drm_err(dsi->drm, "current resolution can not be set\n");
+ return -EINVAL;
+ }
+
+ chunk_overhead = total_bytes - bytes_per_chunk;
+
+ /* overhead higher than 1 -> enable multi packets */
+ if (chunk_overhead > 1) {
+ /* multi packets */
+ for (video_size = video_size_step;
+ video_size < vm->hactive;
+ video_size += video_size_step) {
+ if (vm->hactive * 1000 / video_size % 1000)
+ continue;
+
+ chunks = vm->hactive / video_size;
+ bytes_per_chunk = bpp_x100 * video_size / 100
+ + pkt_header;
+ if (total_bytes >= (bytes_per_chunk * chunks)) {
+ bytes_left = total_bytes -
+ bytes_per_chunk * chunks;
+ break;
+ }
+ }
+
+ /* prevent overflow (unsigned - unsigned) */
+ if (bytes_left > (pkt_header * chunks)) {
+ null_pkt_size = (bytes_left -
+ pkt_header * chunks) / chunks;
+ /* avoid register overflow */
+ if (null_pkt_size > 1023)
+ null_pkt_size = 1023;
+ }
+
+ } else {
+ /* single packet */
+ chunks = 1;
+
+ /* must be a multiple of 4 except 18 loosely */
+ for (video_size = vm->hactive;
+ (video_size % video_size_step) != 0;
+ video_size++)
+ ;
+ }
+
+ dsi_reg_wr(ctx, VIDEO_PKT_CONFIG, VIDEO_PKT_SIZE, 0, video_size);
+ writel(null_pkt_size, ctx->base + VIDEO_NULLPKT_SIZE);
+ dsi_reg_wr(ctx, VIDEO_PKT_CONFIG, VIDEO_LINE_CHUNK_NUM, 16, chunks);
+ }
+
+ writel(ctx->int0_mask, ctx->base + MASK_PROTOCOL_INT);
+ writel(ctx->int1_mask, ctx->base + MASK_INTERNAL_INT);
+ writel(1, ctx->base + SOFT_RESET);
+
+ return 0;
+}
+
+static void sprd_dsi_edpi_video(struct dsi_context *ctx)
+{
+ struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx);
+ const u32 fifo_depth = 1096;
+ const u32 word_length = 4;
+ u32 hactive = ctx->vm.hactive;
+ u32 bpp_x100;
+ u32 max_fifo_len;
+ u8 coding;
+
+ coding = fmt_to_coding(dsi->slave->format);
+ bpp_x100 = calc_bytes_per_pixel_x100(coding);
+ max_fifo_len = word_length * fifo_depth * 100 / bpp_x100;
+
+ writel(0, ctx->base + SOFT_RESET);
+ dsi_reg_wr(ctx, DPI_VIDEO_FORMAT, DPI_VIDEO_MODE_FORMAT, 0, coding);
+ dsi_reg_wr(ctx, CMD_MODE_CFG, TEAR_FX_EN, 0, ctx->te_ack_en);
+
+ if (max_fifo_len > hactive)
+ writel(hactive, ctx->base + DCS_WM_PKT_SIZE);
+ else
+ writel(max_fifo_len, ctx->base + DCS_WM_PKT_SIZE);
+
+ writel(ctx->int0_mask, ctx->base + MASK_PROTOCOL_INT);
+ writel(ctx->int1_mask, ctx->base + MASK_INTERNAL_INT);
+ writel(1, ctx->base + SOFT_RESET);
+}
+
+/*
+ * Send a packet on the generic interface,
+ * this function has an active delay to wait for the buffer to clear.
+ * The delay is limited to:
+ * (param_length / 4) x DSIH_FIFO_ACTIVE_WAIT x register access time
+ * the controller restricts the sending of.
+ *
+ * This function will not be able to send Null and Blanking packets due to
+ * controller restriction
+ */
+static int sprd_dsi_wr_pkt(struct dsi_context *ctx, u8 vc, u8 type,
+ const u8 *param, u16 len)
+{
+ struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx);
+ u8 wc_lsbyte, wc_msbyte;
+ u32 payload;
+ int i, j, ret;
+
+ if (vc > 3)
+ return -EINVAL;
+
+ /* 1st: for long packet, must config payload first */
+ ret = dsi_wait_tx_payload_fifo_empty(ctx);
+ if (ret) {
+ drm_err(dsi->drm, "tx payload fifo is not empty\n");
+ return ret;
+ }
+
+ if (len > 2) {
+ for (i = 0, j = 0; i < len; i += j) {
+ payload = 0;
+ for (j = 0; (j < 4) && ((j + i) < (len)); j++)
+ payload |= param[i + j] << (j * 8);
+
+ writel(payload, ctx->base + GEN_PLD_DATA);
+ }
+ wc_lsbyte = len & 0xff;
+ wc_msbyte = len >> 8;
+ } else {
+ wc_lsbyte = (len > 0) ? param[0] : 0;
+ wc_msbyte = (len > 1) ? param[1] : 0;
+ }
+
+ /* 2nd: then set packet header */
+ ret = dsi_wait_tx_cmd_fifo_empty(ctx);
+ if (ret) {
+ drm_err(dsi->drm, "tx cmd fifo is not empty\n");
+ return ret;
+ }
+
+ writel(type | (vc << 6) | (wc_lsbyte << 8) | (wc_msbyte << 16),
+ ctx->base + GEN_HDR);
+
+ return 0;
+}
+
+/*
+ * Send READ packet to peripheral using the generic interface,
+ * this will force command mode and stop video mode (because of BTA).
+ *
+ * This function has an active delay to wait for the buffer to clear,
+ * the delay is limited to 2 x DSIH_FIFO_ACTIVE_WAIT
+ * (waiting for command buffer, and waiting for receiving)
+ * @note this function will enable BTA
+ */
+static int sprd_dsi_rd_pkt(struct dsi_context *ctx, u8 vc, u8 type,
+ u8 msb_byte, u8 lsb_byte,
+ u8 *buffer, u8 bytes_to_read)
+{
+ struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx);
+ int i, ret;
+ int count = 0;
+ u32 temp;
+
+ if (vc > 3)
+ return -EINVAL;
+
+ /* 1st: send read command to peripheral */
+ ret = dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_CMD_FIFO_EMPTY, 5);
+ if (!ret)
+ return -EIO;
+
+ writel(type | (vc << 6) | (lsb_byte << 8) | (msb_byte << 16),
+ ctx->base + GEN_HDR);
+
+ /* 2nd: wait peripheral response completed */
+ ret = dsi_wait_rd_resp_completed(ctx);
+ if (ret) {
+ drm_err(dsi->drm, "wait read response time out\n");
+ return ret;
+ }
+
+ /* 3rd: get data from rx payload fifo */
+ ret = dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_RDATA_FIFO_EMPTY, 1);
+ if (ret) {
+ drm_err(dsi->drm, "rx payload fifo empty\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < 100; i++) {
+ temp = readl(ctx->base + GEN_PLD_DATA);
+
+ if (count < bytes_to_read)
+ buffer[count++] = temp & 0xff;
+ if (count < bytes_to_read)
+ buffer[count++] = (temp >> 8) & 0xff;
+ if (count < bytes_to_read)
+ buffer[count++] = (temp >> 16) & 0xff;
+ if (count < bytes_to_read)
+ buffer[count++] = (temp >> 24) & 0xff;
+
+ ret = dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_RDATA_FIFO_EMPTY, 1);
+ if (ret)
+ return count;
+ }
+
+ return 0;
+}
+
+static void sprd_dsi_set_work_mode(struct dsi_context *ctx, u8 mode)
+{
+ if (mode == DSI_MODE_CMD)
+ writel(1, ctx->base + DSI_MODE_CFG);
+ else
+ writel(0, ctx->base + DSI_MODE_CFG);
+}
+
+static void sprd_dsi_state_reset(struct dsi_context *ctx)
+{
+ writel(0, ctx->base + SOFT_RESET);
+ udelay(100);
+ writel(1, ctx->base + SOFT_RESET);
+}
+
+static int sprd_dphy_init(struct dsi_context *ctx)
+{
+ struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx);
+ int ret;
+
+ dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, 0);
+ dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_SHUTDOWN, 0);
+ dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_CLK_EN, 0);
+
+ dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLR, 0);
+ dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLR, PHY_TESTCLR);
+ dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLR, 0);
+
+ dphy_pll_config(ctx);
+ dphy_timing_config(ctx);
+
+ dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_SHUTDOWN, RF_PHY_SHUTDOWN);
+ dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, RF_PHY_RESET_N);
+ writel(0x1C, ctx->base + PHY_MIN_STOP_TIME);
+ dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_CLK_EN, RF_PHY_CLK_EN);
+ writel(dsi->slave->lanes - 1, ctx->base + PHY_LANE_NUM_CONFIG);
+
+ ret = dphy_wait_pll_locked(ctx);
+ if (ret) {
+ drm_err(dsi->drm, "dphy initial failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sprd_dphy_fini(struct dsi_context *ctx)
+{
+ dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, 0);
+ dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_SHUTDOWN, 0);
+ dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, RF_PHY_RESET_N);
+}
+
+static void sprd_dsi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct sprd_dsi *dsi = encoder_to_dsi(encoder);
+
+ drm_display_mode_to_videomode(adj_mode, &dsi->ctx.vm);
+}
+
+static void sprd_dsi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct sprd_dsi *dsi = encoder_to_dsi(encoder);
+ struct sprd_dpu *dpu = to_sprd_crtc(encoder->crtc);
+ struct dsi_context *ctx = &dsi->ctx;
+
+ if (ctx->enabled) {
+ drm_warn(dsi->drm, "dsi is initialized\n");
+ return;
+ }
+
+ sprd_dsi_init(ctx);
+ if (ctx->work_mode == DSI_MODE_VIDEO)
+ sprd_dsi_dpi_video(ctx);
+ else
+ sprd_dsi_edpi_video(ctx);
+
+ sprd_dphy_init(ctx);
+
+ sprd_dsi_set_work_mode(ctx, ctx->work_mode);
+ sprd_dsi_state_reset(ctx);
+
+ if (dsi->slave->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
+ dsi_reg_up(ctx, PHY_CLK_LANE_LP_CTRL, AUTO_CLKLANE_CTRL_EN,
+ AUTO_CLKLANE_CTRL_EN);
+ } else {
+ dsi_reg_up(ctx, PHY_CLK_LANE_LP_CTRL, RF_PHY_CLK_EN, RF_PHY_CLK_EN);
+ dsi_reg_up(ctx, PHY_CLK_LANE_LP_CTRL, PHY_CLKLANE_TX_REQ_HS,
+ PHY_CLKLANE_TX_REQ_HS);
+ dphy_wait_pll_locked(ctx);
+ }
+
+ sprd_dpu_run(dpu);
+
+ ctx->enabled = true;
+}
+
+static void sprd_dsi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct sprd_dsi *dsi = encoder_to_dsi(encoder);
+ struct sprd_dpu *dpu = to_sprd_crtc(encoder->crtc);
+ struct dsi_context *ctx = &dsi->ctx;
+
+ if (!ctx->enabled) {
+ drm_warn(dsi->drm, "dsi isn't initialized\n");
+ return;
+ }
+
+ sprd_dpu_stop(dpu);
+ sprd_dphy_fini(ctx);
+ sprd_dsi_fini(ctx);
+
+ ctx->enabled = false;
+}
+
+static const struct drm_encoder_helper_funcs sprd_encoder_helper_funcs = {
+ .mode_set = sprd_dsi_encoder_mode_set,
+ .enable = sprd_dsi_encoder_enable,
+ .disable = sprd_dsi_encoder_disable
+};
+
+static const struct drm_encoder_funcs sprd_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int sprd_dsi_encoder_init(struct sprd_dsi *dsi,
+ struct device *dev)
+{
+ struct drm_encoder *encoder = &dsi->encoder;
+ u32 crtc_mask;
+ int ret;
+
+ crtc_mask = drm_of_find_possible_crtcs(dsi->drm, dev->of_node);
+ if (!crtc_mask) {
+ drm_err(dsi->drm, "failed to find crtc mask\n");
+ return -EINVAL;
+ }
+
+ drm_dbg(dsi->drm, "find possible crtcs: 0x%08x\n", crtc_mask);
+
+ encoder->possible_crtcs = crtc_mask;
+ ret = drm_encoder_init(dsi->drm, encoder, &sprd_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, NULL);
+ if (ret) {
+ drm_err(dsi->drm, "failed to init dsi encoder\n");
+ return ret;
+ }
+
+ drm_encoder_helper_add(encoder, &sprd_encoder_helper_funcs);
+
+ return 0;
+}
+
+static int sprd_dsi_bridge_init(struct sprd_dsi *dsi,
+ struct device *dev)
+{
+ int ret;
+
+ dsi->panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
+ if (IS_ERR(dsi->panel_bridge))
+ return PTR_ERR(dsi->panel_bridge);
+
+ ret = drm_bridge_attach(&dsi->encoder, dsi->panel_bridge, NULL, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int sprd_dsi_context_init(struct sprd_dsi *dsi,
+ struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dsi_context *ctx = &dsi->ctx;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctx->base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!ctx->base) {
+ drm_err(dsi->drm, "failed to map dsi host registers\n");
+ return -ENXIO;
+ }
+
+ ctx->regmap = devm_regmap_init(dev, &regmap_tst_io, dsi, &byte_config);
+ if (IS_ERR(ctx->regmap)) {
+ drm_err(dsi->drm, "dphy regmap init failed\n");
+ return PTR_ERR(ctx->regmap);
+ }
+
+ ctx->data_hs2lp = 120;
+ ctx->data_lp2hs = 500;
+ ctx->clk_hs2lp = 4;
+ ctx->clk_lp2hs = 15;
+ ctx->max_rd_time = 6000;
+ ctx->int0_mask = 0xffffffff;
+ ctx->int1_mask = 0xffffffff;
+ ctx->enabled = true;
+
+ return 0;
+}
+
+static int sprd_dsi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *drm = data;
+ struct sprd_dsi *dsi = dev_get_drvdata(dev);
+ int ret;
+
+ dsi->drm = drm;
+
+ ret = sprd_dsi_encoder_init(dsi, dev);
+ if (ret)
+ return ret;
+
+ ret = sprd_dsi_bridge_init(dsi, dev);
+ if (ret)
+ return ret;
+
+ ret = sprd_dsi_context_init(dsi, dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void sprd_dsi_unbind(struct device *dev,
+ struct device *master, void *data)
+{
+ struct sprd_dsi *dsi = dev_get_drvdata(dev);
+
+ drm_of_panel_bridge_remove(dev->of_node, 1, 0);
+
+ drm_encoder_cleanup(&dsi->encoder);
+}
+
+static const struct component_ops dsi_component_ops = {
+ .bind = sprd_dsi_bind,
+ .unbind = sprd_dsi_unbind,
+};
+
+static int sprd_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *slave)
+{
+ struct sprd_dsi *dsi = host_to_dsi(host);
+ struct dsi_context *ctx = &dsi->ctx;
+
+ dsi->slave = slave;
+
+ if (slave->mode_flags & MIPI_DSI_MODE_VIDEO)
+ ctx->work_mode = DSI_MODE_VIDEO;
+ else
+ ctx->work_mode = DSI_MODE_CMD;
+
+ if (slave->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ ctx->burst_mode = VIDEO_BURST_WITH_SYNC_PULSES;
+ else if (slave->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ ctx->burst_mode = VIDEO_NON_BURST_WITH_SYNC_PULSES;
+ else
+ ctx->burst_mode = VIDEO_NON_BURST_WITH_SYNC_EVENTS;
+
+ return component_add(host->dev, &dsi_component_ops);
+}
+
+static int sprd_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *slave)
+{
+ component_del(host->dev, &dsi_component_ops);
+
+ return 0;
+}
+
+static ssize_t sprd_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct sprd_dsi *dsi = host_to_dsi(host);
+ const u8 *tx_buf = msg->tx_buf;
+
+ if (msg->rx_buf && msg->rx_len) {
+ u8 lsb = (msg->tx_len > 0) ? tx_buf[0] : 0;
+ u8 msb = (msg->tx_len > 1) ? tx_buf[1] : 0;
+
+ return sprd_dsi_rd_pkt(&dsi->ctx, msg->channel, msg->type,
+ msb, lsb, msg->rx_buf, msg->rx_len);
+ }
+
+ if (msg->tx_buf && msg->tx_len)
+ return sprd_dsi_wr_pkt(&dsi->ctx, msg->channel, msg->type,
+ tx_buf, msg->tx_len);
+
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops sprd_dsi_host_ops = {
+ .attach = sprd_dsi_host_attach,
+ .detach = sprd_dsi_host_detach,
+ .transfer = sprd_dsi_host_transfer,
+};
+
+static const struct of_device_id dsi_match_table[] = {
+ { .compatible = "sprd,sharkl3-dsi-host" },
+ { /* sentinel */ },
+};
+
+static int sprd_dsi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sprd_dsi *dsi;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, dsi);
+
+ dsi->host.ops = &sprd_dsi_host_ops;
+ dsi->host.dev = dev;
+
+ return mipi_dsi_host_register(&dsi->host);
+}
+
+static int sprd_dsi_remove(struct platform_device *pdev)
+{
+ struct sprd_dsi *dsi = dev_get_drvdata(&pdev->dev);
+
+ mipi_dsi_host_unregister(&dsi->host);
+
+ return 0;
+}
+
+struct platform_driver sprd_dsi_driver = {
+ .probe = sprd_dsi_probe,
+ .remove = sprd_dsi_remove,
+ .driver = {
+ .name = "sprd-dsi-drv",
+ .of_match_table = dsi_match_table,
+ },
+};
+
+MODULE_AUTHOR("Leon He <leon.he@unisoc.com>");
+MODULE_AUTHOR("Kevin Tang <kevin.tang@unisoc.com>");
+MODULE_DESCRIPTION("Unisoc MIPI DSI HOST Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/sprd/sprd_dsi.h b/drivers/gpu/drm/sprd/sprd_dsi.h
new file mode 100644
index 000000000000..d858ebb11115
--- /dev/null
+++ b/drivers/gpu/drm/sprd/sprd_dsi.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Unisoc Inc.
+ */
+
+#ifndef __SPRD_DSI_H__
+#define __SPRD_DSI_H__
+
+#include <linux/of.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <video/videomode.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
+#include <drm/drm_panel.h>
+
+#define encoder_to_dsi(encoder) \
+ container_of(encoder, struct sprd_dsi, encoder)
+
+enum dsi_work_mode {
+ DSI_MODE_CMD = 0,
+ DSI_MODE_VIDEO
+};
+
+enum video_burst_mode {
+ VIDEO_NON_BURST_WITH_SYNC_PULSES = 0,
+ VIDEO_NON_BURST_WITH_SYNC_EVENTS,
+ VIDEO_BURST_WITH_SYNC_PULSES
+};
+
+enum dsi_color_coding {
+ COLOR_CODE_16BIT_CONFIG1 = 0,
+ COLOR_CODE_16BIT_CONFIG2,
+ COLOR_CODE_16BIT_CONFIG3,
+ COLOR_CODE_18BIT_CONFIG1,
+ COLOR_CODE_18BIT_CONFIG2,
+ COLOR_CODE_24BIT,
+ COLOR_CODE_20BIT_YCC422_LOOSELY,
+ COLOR_CODE_24BIT_YCC422,
+ COLOR_CODE_16BIT_YCC422,
+ COLOR_CODE_30BIT,
+ COLOR_CODE_36BIT,
+ COLOR_CODE_12BIT_YCC420,
+ COLOR_CODE_COMPRESSTION,
+ COLOR_CODE_MAX
+};
+
+enum pll_timing {
+ NONE,
+ REQUEST_TIME,
+ PREPARE_TIME,
+ SETTLE_TIME,
+ ZERO_TIME,
+ TRAIL_TIME,
+ EXIT_TIME,
+ CLKPOST_TIME,
+ TA_GET,
+ TA_GO,
+ TA_SURE,
+ TA_WAIT,
+};
+
+struct dphy_pll {
+ u8 refin; /* Pre-divider control signal */
+ u8 cp_s; /* 00: SDM_EN=1, 10: SDM_EN=0 */
+ u8 fdk_s; /* PLL mode control: integer or fraction */
+ u8 sdm_en;
+ u8 div;
+ u8 int_n; /* integer N PLL */
+ u32 ref_clk; /* dphy reference clock, unit: MHz */
+ u32 freq; /* panel config, unit: KHz */
+ u32 fvco;
+ u32 potential_fvco;
+ u32 nint; /* sigma delta modulator NINT control */
+ u32 kint; /* sigma delta modulator KINT control */
+ u8 lpf_sel; /* low pass filter control */
+ u8 out_sel; /* post divider control */
+ u8 vco_band; /* vco range */
+ u8 det_delay;
+};
+
+struct dsi_context {
+ void __iomem *base;
+ struct regmap *regmap;
+ struct dphy_pll pll;
+ struct videomode vm;
+ bool enabled;
+
+ u8 work_mode;
+ u8 burst_mode;
+ u32 int0_mask;
+ u32 int1_mask;
+
+ /* maximum time (ns) for data lanes from HS to LP */
+ u16 data_hs2lp;
+ /* maximum time (ns) for data lanes from LP to HS */
+ u16 data_lp2hs;
+ /* maximum time (ns) for clk lanes from HS to LP */
+ u16 clk_hs2lp;
+ /* maximum time (ns) for clk lanes from LP to HS */
+ u16 clk_lp2hs;
+ /* maximum time (ns) for BTA operation - REQUIRED */
+ u16 max_rd_time;
+ /* enable receiving frame ack packets - for video mode */
+ bool frame_ack_en;
+ /* enable receiving tear effect ack packets - for cmd mode */
+ bool te_ack_en;
+};
+
+struct sprd_dsi {
+ struct drm_device *drm;
+ struct mipi_dsi_host host;
+ struct mipi_dsi_device *slave;
+ struct drm_encoder encoder;
+ struct drm_bridge *panel_bridge;
+ struct dsi_context ctx;
+};
+
+int dphy_pll_config(struct dsi_context *ctx);
+void dphy_timing_config(struct dsi_context *ctx);
+
+#endif /* __SPRD_DSI_H__ */
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index d0cfdd36b38f..246a94afbe74 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -5,7 +5,6 @@ config DRM_STI
select RESET_CONTROLLER
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_PANEL
select FW_LOADER
select SND_SOC_HDMI_CODEC if SND_SOC
diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig
index b7d66915a2be..e0379488cd0d 100644
--- a/drivers/gpu/drm/stm/Kconfig
+++ b/drivers/gpu/drm/stm/Kconfig
@@ -4,7 +4,6 @@ config DRM_STM
depends on DRM && (ARCH_STM32 || ARCH_MULTIPLATFORM)
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_PANEL_BRIDGE
select VIDEOMODE_HELPERS
select FB_PROVIDE_GET_FB_UNMAPPED_AREA if FB
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 8c796de53222..befc5a80222d 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -5,7 +5,6 @@ config DRM_SUN4I
depends on ARCH_SUNXI || COMPILE_TEST
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_PANEL
select REGMAP_MMIO
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 54dd562e294c..b630614b3d72 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -53,7 +53,7 @@ static const struct drm_driver sun4i_drv_driver = {
.minor = 0,
/* GEM Operations */
- DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(drm_sun4i_gem_dumb_create),
+ DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_sun4i_gem_dumb_create),
};
static int sun4i_drv_bind(struct device *dev)
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index b64d93da651d..5e2b0175df36 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -658,8 +658,10 @@ int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
return -EPROBE_DEFER;
phy = platform_get_drvdata(pdev);
- if (!phy)
+ if (!phy) {
+ put_device(&pdev->dev);
return -EPROBE_DEFER;
+ }
hdmi->phy = phy;
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 1650a448eabd..8cf5aeb9db6c 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -12,6 +12,9 @@ config DRM_TEGRA
select INTERCONNECT
select IOMMU_IOVA
select CEC_CORE if CEC_NOTIFIER
+ select SND_SIMPLE_CARD if SND_SOC_TEGRA20_SPDIF
+ select SND_SOC_HDMI_CODEC if SND_SOC_TEGRA20_SPDIF
+ select SND_AUDIO_GRAPH_CARD if SND_SOC_TEGRA20_SPDIF
help
Choose this option if you have an NVIDIA Tegra SoC.
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index d801909182cf..df6cc986aeba 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -23,7 +23,8 @@ tegra-drm-y := \
gr2d.o \
gr3d.o \
falcon.o \
- vic.o
+ vic.o \
+ nvdec.o
tegra-drm-y += trace.o
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index a29d64f87563..eb70eee8992a 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -11,9 +11,12 @@
#include <linux/interconnect.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <soc/tegra/common.h>
#include <soc/tegra/pmc.h>
#include <drm/drm_atomic.h>
@@ -890,11 +893,9 @@ static int tegra_cursor_atomic_check(struct drm_plane *plane,
return 0;
}
-static void tegra_cursor_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
+static void __tegra_cursor_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
{
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
struct tegra_drm *tegra = plane->dev->dev_private;
@@ -990,6 +991,14 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
}
+static void tegra_cursor_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+
+ __tegra_cursor_atomic_update(plane, new_state);
+}
+
static void tegra_cursor_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -1009,12 +1018,78 @@ static void tegra_cursor_atomic_disable(struct drm_plane *plane,
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
}
+static int tegra_cursor_atomic_async_check(struct drm_plane *plane, struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc_state *crtc_state;
+ int min_scale, max_scale;
+ int err;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state, new_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ if (!crtc_state->active)
+ return -EINVAL;
+
+ if (plane->state->crtc != new_state->crtc ||
+ plane->state->src_w != new_state->src_w ||
+ plane->state->src_h != new_state->src_h ||
+ plane->state->crtc_w != new_state->crtc_w ||
+ plane->state->crtc_h != new_state->crtc_h ||
+ plane->state->fb != new_state->fb ||
+ plane->state->fb == NULL)
+ return -EINVAL;
+
+ min_scale = (1 << 16) / 8;
+ max_scale = (8 << 16) / 1;
+
+ err = drm_atomic_helper_check_plane_state(new_state, crtc_state, min_scale, max_scale,
+ true, true);
+ if (err < 0)
+ return err;
+
+ if (new_state->visible != plane->state->visible)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void tegra_cursor_atomic_async_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+ struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
+
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+
+ if (new_state->visible) {
+ struct tegra_plane *p = to_tegra_plane(plane);
+ u32 value;
+
+ __tegra_cursor_atomic_update(plane, new_state);
+
+ value = (WIN_A_ACT_REQ << p->index) << 8 | GENERAL_UPDATE;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+ (void)tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
+
+ value = (WIN_A_ACT_REQ << p->index) | GENERAL_ACT_REQ;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+ (void)tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
+ }
+}
+
static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
.prepare_fb = tegra_plane_prepare_fb,
.cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_cursor_atomic_check,
.atomic_update = tegra_cursor_atomic_update,
.atomic_disable = tegra_cursor_atomic_disable,
+ .atomic_async_check = tegra_cursor_atomic_async_check,
+ .atomic_async_update = tegra_cursor_atomic_async_update,
};
static const uint64_t linear_modifiers[] = {
@@ -1267,9 +1342,9 @@ static struct drm_plane *tegra_dc_add_planes(struct drm_device *drm,
err = PTR_ERR(planes[i]);
while (i--)
- tegra_plane_funcs.destroy(planes[i]);
+ planes[i]->funcs->destroy(planes[i]);
- tegra_plane_funcs.destroy(primary);
+ primary->funcs->destroy(primary);
return ERR_PTR(err);
}
}
@@ -1762,10 +1837,55 @@ int tegra_dc_state_setup_clock(struct tegra_dc *dc,
return 0;
}
-static void tegra_dc_commit_state(struct tegra_dc *dc,
- struct tegra_dc_state *state)
+static void tegra_dc_update_voltage_state(struct tegra_dc *dc,
+ struct tegra_dc_state *state)
+{
+ unsigned long rate, pstate;
+ struct dev_pm_opp *opp;
+ int err;
+
+ if (!dc->has_opp_table)
+ return;
+
+ /* calculate actual pixel clock rate which depends on internal divider */
+ rate = DIV_ROUND_UP(clk_get_rate(dc->clk) * 2, state->div + 2);
+
+ /* find suitable OPP for the rate */
+ opp = dev_pm_opp_find_freq_ceil(dc->dev, &rate);
+
+ /*
+ * Very high resolution modes may results in a clock rate that is
+ * above the characterized maximum. In this case it's okay to fall
+ * back to the characterized maximum.
+ */
+ if (opp == ERR_PTR(-ERANGE))
+ opp = dev_pm_opp_find_freq_floor(dc->dev, &rate);
+
+ if (IS_ERR(opp)) {
+ dev_err(dc->dev, "failed to find OPP for %luHz: %pe\n",
+ rate, opp);
+ return;
+ }
+
+ pstate = dev_pm_opp_get_required_pstate(opp, 0);
+ dev_pm_opp_put(opp);
+
+ /*
+ * The minimum core voltage depends on the pixel clock rate (which
+ * depends on internal clock divider of the CRTC) and not on the
+ * rate of the display controller clock. This is why we're not using
+ * dev_pm_opp_set_rate() API and instead controlling the power domain
+ * directly.
+ */
+ err = dev_pm_genpd_set_performance_state(dc->dev, pstate);
+ if (err)
+ dev_err(dc->dev, "failed to set power domain state to %lu: %d\n",
+ pstate, err);
+}
+
+static void tegra_dc_set_clock_rate(struct tegra_dc *dc,
+ struct tegra_dc_state *state)
{
- u32 value;
int err;
err = clk_set_parent(dc->clk, state->clk);
@@ -1797,10 +1917,7 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
state->div);
DRM_DEBUG_KMS("pclk: %lu\n", state->pclk);
- if (!dc->soc->has_nvdisplay) {
- value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1;
- tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
- }
+ tegra_dc_update_voltage_state(dc, state);
}
static void tegra_dc_stop(struct tegra_dc *dc)
@@ -1991,6 +2108,13 @@ static void tegra_crtc_atomic_disable(struct drm_crtc *crtc,
err = host1x_client_suspend(&dc->client);
if (err < 0)
dev_err(dc->dev, "failed to suspend: %d\n", err);
+
+ if (dc->has_opp_table) {
+ err = dev_pm_genpd_set_performance_state(dc->dev, 0);
+ if (err)
+ dev_err(dc->dev,
+ "failed to clear power domain state: %d\n", err);
+ }
}
static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
@@ -2002,6 +2126,9 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
u32 value;
int err;
+ /* apply PLL changes */
+ tegra_dc_set_clock_rate(dc, crtc_state);
+
err = host1x_client_resume(&dc->client);
if (err < 0) {
dev_err(dc->dev, "failed to resume: %d\n", err);
@@ -2076,8 +2203,11 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
else
tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
- /* apply PLL and pixel clock changes */
- tegra_dc_commit_state(dc, crtc_state);
+ /* apply pixel clock changes */
+ if (!dc->soc->has_nvdisplay) {
+ value = SHIFT_CLK_DIVIDER(crtc_state->div) | PIXEL_CLK_DIVIDER_PCD1;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
+ }
/* program display mode */
tegra_dc_set_timings(dc, mode);
@@ -2107,6 +2237,12 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
tegra_dc_writel(dc, value, DC_COM_RG_UNDERFLOW);
}
+ if (dc->rgb) {
+ /* XXX: parameterize? */
+ value = SC0_H_QUALIFIER_NONE | SC1_H_QUALIFIER_NONE;
+ tegra_dc_writel(dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS);
+ }
+
tegra_dc_commit(dc);
drm_crtc_vblank_on(crtc);
@@ -2685,6 +2821,7 @@ static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
.has_win_b_vfilter_mem_client = true,
.has_win_c_without_vert_filter = true,
.plane_tiled_memory_bandwidth_x2 = false,
+ .has_pll_d2_out0 = false,
};
static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
@@ -2707,6 +2844,7 @@ static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
.has_win_b_vfilter_mem_client = true,
.has_win_c_without_vert_filter = false,
.plane_tiled_memory_bandwidth_x2 = true,
+ .has_pll_d2_out0 = true,
};
static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
@@ -2729,6 +2867,7 @@ static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
.has_win_b_vfilter_mem_client = false,
.has_win_c_without_vert_filter = false,
.plane_tiled_memory_bandwidth_x2 = true,
+ .has_pll_d2_out0 = true,
};
static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
@@ -2751,6 +2890,7 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
.has_win_b_vfilter_mem_client = false,
.has_win_c_without_vert_filter = false,
.plane_tiled_memory_bandwidth_x2 = false,
+ .has_pll_d2_out0 = true,
};
static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
@@ -2773,6 +2913,7 @@ static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
.has_win_b_vfilter_mem_client = false,
.has_win_c_without_vert_filter = false,
.plane_tiled_memory_bandwidth_x2 = false,
+ .has_pll_d2_out0 = true,
};
static const struct tegra_windowgroup_soc tegra186_dc_wgrps[] = {
@@ -2823,6 +2964,7 @@ static const struct tegra_dc_soc_info tegra186_dc_soc_info = {
.wgrps = tegra186_dc_wgrps,
.num_wgrps = ARRAY_SIZE(tegra186_dc_wgrps),
.plane_tiled_memory_bandwidth_x2 = false,
+ .has_pll_d2_out0 = false,
};
static const struct tegra_windowgroup_soc tegra194_dc_wgrps[] = {
@@ -2873,6 +3015,7 @@ static const struct tegra_dc_soc_info tegra194_dc_soc_info = {
.wgrps = tegra194_dc_wgrps,
.num_wgrps = ARRAY_SIZE(tegra194_dc_wgrps),
.plane_tiled_memory_bandwidth_x2 = false,
+ .has_pll_d2_out0 = false,
};
static const struct of_device_id tegra_dc_of_match[] = {
@@ -2973,6 +3116,23 @@ static int tegra_dc_couple(struct tegra_dc *dc)
return 0;
}
+static int tegra_dc_init_opp_table(struct tegra_dc *dc)
+{
+ struct tegra_core_opp_params opp_params = {};
+ int err;
+
+ err = devm_tegra_core_dev_init_opp_table(dc->dev, &opp_params);
+ if (err && err != -ENODEV)
+ return err;
+
+ if (err)
+ dc->has_opp_table = false;
+ else
+ dc->has_opp_table = true;
+
+ return 0;
+}
+
static int tegra_dc_probe(struct platform_device *pdev)
{
u64 dma_mask = dma_get_mask(pdev->dev.parent);
@@ -3038,6 +3198,10 @@ static int tegra_dc_probe(struct platform_device *pdev)
tegra_powergate_power_off(dc->powergate);
}
+ err = tegra_dc_init_opp_table(dc);
+ if (err < 0)
+ return err;
+
dc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dc->regs))
return PTR_ERR(dc->regs);
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 40378308d527..3f91a10ea6c7 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -76,6 +76,7 @@ struct tegra_dc_soc_info {
bool has_win_b_vfilter_mem_client;
bool has_win_c_without_vert_filter;
bool plane_tiled_memory_bandwidth_x2;
+ bool has_pll_d2_out0;
};
struct tegra_dc {
@@ -100,6 +101,8 @@ struct tegra_dc {
struct drm_info_list *debugfs_files;
const struct tegra_dc_soc_info *soc;
+
+ bool has_opp_table;
};
static inline struct tegra_dc *
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 8d37d6b00562..e9de91a4e7e8 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -10,6 +10,7 @@
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic.h>
@@ -21,6 +22,10 @@
#include <drm/drm_prime.h>
#include <drm/drm_vblank.h>
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#endif
+
#include "dc.h"
#include "drm.h"
#include "gem.h"
@@ -116,6 +121,7 @@ static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
static void tegra_drm_context_free(struct tegra_drm_context *context)
{
context->client->ops->close_channel(context);
+ pm_runtime_put(context->client->base.dev);
kfree(context);
}
@@ -427,13 +433,20 @@ static int tegra_client_open(struct tegra_drm_file *fpriv,
{
int err;
+ err = pm_runtime_resume_and_get(client->base.dev);
+ if (err)
+ return err;
+
err = client->ops->open_channel(client, context);
- if (err < 0)
+ if (err < 0) {
+ pm_runtime_put(client->base.dev);
return err;
+ }
err = idr_alloc(&fpriv->legacy_contexts, context, 1, 0, GFP_KERNEL);
if (err < 0) {
client->ops->close_channel(context);
+ pm_runtime_put(client->base.dev);
return err;
}
@@ -936,6 +949,17 @@ int host1x_client_iommu_attach(struct host1x_client *client)
struct iommu_group *group = NULL;
int err;
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+ if (client->dev->archdata.mapping) {
+ struct dma_iommu_mapping *mapping =
+ to_dma_iommu_mapping(client->dev);
+ arm_iommu_detach_device(client->dev);
+ arm_iommu_release_mapping(mapping);
+
+ domain = iommu_get_domain_for_dev(client->dev);
+ }
+#endif
+
/*
* If the host1x client is already attached to an IOMMU domain that is
* not the shared IOMMU domain, don't try to attach it to a different
@@ -1344,15 +1368,18 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra210-sor", },
{ .compatible = "nvidia,tegra210-sor1", },
{ .compatible = "nvidia,tegra210-vic", },
+ { .compatible = "nvidia,tegra210-nvdec", },
{ .compatible = "nvidia,tegra186-display", },
{ .compatible = "nvidia,tegra186-dc", },
{ .compatible = "nvidia,tegra186-sor", },
{ .compatible = "nvidia,tegra186-sor1", },
{ .compatible = "nvidia,tegra186-vic", },
+ { .compatible = "nvidia,tegra186-nvdec", },
{ .compatible = "nvidia,tegra194-display", },
{ .compatible = "nvidia,tegra194-dc", },
{ .compatible = "nvidia,tegra194-sor", },
{ .compatible = "nvidia,tegra194-vic", },
+ { .compatible = "nvidia,tegra194-nvdec", },
{ /* sentinel */ }
};
@@ -1376,6 +1403,7 @@ static struct platform_driver * const drivers[] = {
&tegra_gr2d_driver,
&tegra_gr3d_driver,
&tegra_vic_driver,
+ &tegra_nvdec_driver,
};
static int __init host1x_drm_init(void)
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 8b28327c931c..fc0a19554eac 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -202,5 +202,6 @@ extern struct platform_driver tegra_sor_driver;
extern struct platform_driver tegra_gr2d_driver;
extern struct platform_driver tegra_gr3d_driver;
extern struct platform_driver tegra_vic_driver;
+extern struct platform_driver tegra_nvdec_driver;
#endif /* HOST1X_DRM_H */
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index d38fd7e12b57..fce0e52973c2 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -23,86 +23,97 @@
MODULE_IMPORT_NS(DMA_BUF);
-static void tegra_bo_put(struct host1x_bo *bo)
+static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents)
{
- struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+ dma_addr_t next = ~(dma_addr_t)0;
+ unsigned int count = 0, i;
+ struct scatterlist *s;
- drm_gem_object_put(&obj->gem);
-}
+ for_each_sg(sgl, s, nents, i) {
+ /* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */
+ if (!sg_dma_len(s))
+ continue;
-/* XXX move this into lib/scatterlist.c? */
-static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg,
- unsigned int nents, gfp_t gfp_mask)
-{
- struct scatterlist *dst;
- unsigned int i;
- int err;
+ if (sg_dma_address(s) != next) {
+ next = sg_dma_address(s) + sg_dma_len(s);
+ count++;
+ }
+ }
- err = sg_alloc_table(sgt, nents, gfp_mask);
- if (err < 0)
- return err;
+ return count;
+}
- dst = sgt->sgl;
+static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt)
+{
+ return sg_dma_count_chunks(sgt->sgl, sgt->nents);
+}
- for (i = 0; i < nents; i++) {
- sg_set_page(dst, sg_page(sg), sg->length, 0);
- dst = sg_next(dst);
- sg = sg_next(sg);
- }
+static void tegra_bo_put(struct host1x_bo *bo)
+{
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
- return 0;
+ drm_gem_object_put(&obj->gem);
}
-static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
- dma_addr_t *phys)
+static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
+ enum dma_data_direction direction)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
- struct sg_table *sgt;
+ struct drm_gem_object *gem = &obj->gem;
+ struct host1x_bo_mapping *map;
int err;
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&map->ref);
+ map->bo = host1x_bo_get(bo);
+ map->direction = direction;
+ map->dev = dev;
+
/*
- * If we've manually mapped the buffer object through the IOMMU, make
- * sure to return the IOVA address of our mapping.
- *
- * Similarly, for buffers that have been allocated by the DMA API the
- * physical address can be used for devices that are not attached to
- * an IOMMU. For these devices, callers must pass a valid pointer via
- * the @phys argument.
- *
- * Imported buffers were also already mapped at import time, so the
- * existing mapping can be reused.
+ * Imported buffers need special treatment to satisfy the semantics of DMA-BUF.
*/
- if (phys) {
- *phys = obj->iova;
- return NULL;
+ if (gem->import_attach) {
+ struct dma_buf *buf = gem->import_attach->dmabuf;
+
+ map->attach = dma_buf_attach(buf, dev);
+ if (IS_ERR(map->attach)) {
+ err = PTR_ERR(map->attach);
+ goto free;
+ }
+
+ map->sgt = dma_buf_map_attachment(map->attach, direction);
+ if (IS_ERR(map->sgt)) {
+ dma_buf_detach(buf, map->attach);
+ err = PTR_ERR(map->sgt);
+ goto free;
+ }
+
+ err = sgt_dma_count_chunks(map->sgt);
+ map->size = gem->size;
+
+ goto out;
}
/*
* If we don't have a mapping for this buffer yet, return an SG table
* so that host1x can do the mapping for us via the DMA API.
*/
- sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
- if (!sgt)
- return ERR_PTR(-ENOMEM);
+ map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
+ if (!map->sgt) {
+ err = -ENOMEM;
+ goto free;
+ }
if (obj->pages) {
/*
* If the buffer object was allocated from the explicit IOMMU
* API code paths, construct an SG table from the pages.
*/
- err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
- 0, obj->gem.size, GFP_KERNEL);
- if (err < 0)
- goto free;
- } else if (obj->sgt) {
- /*
- * If the buffer object already has an SG table but no pages
- * were allocated for it, it means the buffer was imported and
- * the SG table needs to be copied to avoid overwriting any
- * other potential users of the original SG table.
- */
- err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl,
- obj->sgt->orig_nents, GFP_KERNEL);
+ err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size,
+ GFP_KERNEL);
if (err < 0)
goto free;
} else {
@@ -111,25 +122,53 @@ static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
* not imported, it had to be allocated with the DMA API, so
* the DMA API helper can be used.
*/
- err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
- obj->gem.size);
+ err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size);
if (err < 0)
goto free;
}
- return sgt;
+ err = dma_map_sgtable(dev, map->sgt, direction, 0);
+ if (err)
+ goto free_sgt;
+
+out:
+ /*
+ * If we've manually mapped the buffer object through the IOMMU, make sure to return the
+ * existing IOVA address of our mapping.
+ */
+ if (!obj->mm) {
+ map->phys = sg_dma_address(map->sgt->sgl);
+ map->chunks = err;
+ } else {
+ map->phys = obj->iova;
+ map->chunks = 1;
+ }
+
+ map->size = gem->size;
+ return map;
+
+free_sgt:
+ sg_free_table(map->sgt);
free:
- kfree(sgt);
+ kfree(map->sgt);
+ kfree(map);
return ERR_PTR(err);
}
-static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
+static void tegra_bo_unpin(struct host1x_bo_mapping *map)
{
- if (sgt) {
- sg_free_table(sgt);
- kfree(sgt);
+ if (map->attach) {
+ dma_buf_unmap_attachment(map->attach, map->sgt, map->direction);
+ dma_buf_detach(map->attach->dmabuf, map->attach);
+ } else {
+ dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
+ sg_free_table(map->sgt);
+ kfree(map->sgt);
}
+
+ host1x_bo_put(map->bo);
+ kfree(map);
}
static void *tegra_bo_mmap(struct host1x_bo *bo)
@@ -452,8 +491,18 @@ free:
void tegra_bo_free_object(struct drm_gem_object *gem)
{
struct tegra_drm *tegra = gem->dev->dev_private;
+ struct host1x_bo_mapping *mapping, *tmp;
struct tegra_bo *bo = to_tegra_bo(gem);
+ /* remove all mappings of this buffer object from any caches */
+ list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) {
+ if (mapping->cache)
+ host1x_bo_unpin(mapping);
+ else
+ dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping,
+ dev_name(mapping->dev));
+ }
+
if (tegra->domain)
tegra_bo_iommu_unmap(tegra, bo);
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index de288cba3905..e3bb4c99ed39 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -4,14 +4,25 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <soc/tegra/common.h>
#include "drm.h"
#include "gem.h"
#include "gr2d.h"
+enum {
+ RST_MC,
+ RST_GR2D,
+ RST_GR2D_MAX,
+};
+
struct gr2d_soc {
unsigned int version;
};
@@ -21,6 +32,9 @@ struct gr2d {
struct host1x_channel *channel;
struct clk *clk;
+ struct reset_control_bulk_data resets[RST_GR2D_MAX];
+ unsigned int nresets;
+
const struct gr2d_soc *soc;
DECLARE_BITMAP(addr_regs, GR2D_NUM_REGS);
@@ -56,15 +70,22 @@ static int gr2d_init(struct host1x_client *client)
goto free;
}
+ pm_runtime_enable(client->dev);
+ pm_runtime_use_autosuspend(client->dev);
+ pm_runtime_set_autosuspend_delay(client->dev, 200);
+
err = tegra_drm_register_client(dev->dev_private, drm);
if (err < 0) {
dev_err(client->dev, "failed to register client: %d\n", err);
- goto detach;
+ goto disable_rpm;
}
return 0;
-detach:
+disable_rpm:
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
host1x_client_iommu_detach(client);
free:
host1x_syncpt_put(client->syncpts[0]);
@@ -85,10 +106,15 @@ static int gr2d_exit(struct host1x_client *client)
if (err < 0)
return err;
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
host1x_client_iommu_detach(client);
host1x_syncpt_put(client->syncpts[0]);
host1x_channel_put(gr2d->channel);
+ gr2d->channel = NULL;
+
return 0;
}
@@ -190,6 +216,27 @@ static const u32 gr2d_addr_regs[] = {
GR2D_VA_BASE_ADDR_SB,
};
+static int gr2d_get_resets(struct device *dev, struct gr2d *gr2d)
+{
+ int err;
+
+ gr2d->resets[RST_MC].id = "mc";
+ gr2d->resets[RST_GR2D].id = "2d";
+ gr2d->nresets = RST_GR2D_MAX;
+
+ err = devm_reset_control_bulk_get_optional_exclusive_released(
+ dev, gr2d->nresets, gr2d->resets);
+ if (err) {
+ dev_err(dev, "failed to get reset: %d\n", err);
+ return err;
+ }
+
+ if (WARN_ON(!gr2d->resets[RST_GR2D].rstc))
+ return -ENOENT;
+
+ return 0;
+}
+
static int gr2d_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -202,6 +249,8 @@ static int gr2d_probe(struct platform_device *pdev)
if (!gr2d)
return -ENOMEM;
+ platform_set_drvdata(pdev, gr2d);
+
gr2d->soc = of_device_get_match_data(dev);
syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
@@ -214,11 +263,9 @@ static int gr2d_probe(struct platform_device *pdev)
return PTR_ERR(gr2d->clk);
}
- err = clk_prepare_enable(gr2d->clk);
- if (err) {
- dev_err(dev, "cannot turn on clock\n");
+ err = gr2d_get_resets(dev, gr2d);
+ if (err)
return err;
- }
INIT_LIST_HEAD(&gr2d->client.base.list);
gr2d->client.base.ops = &gr2d_client_ops;
@@ -231,10 +278,13 @@ static int gr2d_probe(struct platform_device *pdev)
gr2d->client.version = gr2d->soc->version;
gr2d->client.ops = &gr2d_ops;
+ err = devm_tegra_core_dev_init_opp_table_common(dev);
+ if (err)
+ return err;
+
err = host1x_client_register(&gr2d->client.base);
if (err < 0) {
dev_err(dev, "failed to register host1x client: %d\n", err);
- clk_disable_unprepare(gr2d->clk);
return err;
}
@@ -242,8 +292,6 @@ static int gr2d_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); i++)
set_bit(gr2d_addr_regs[i], gr2d->addr_regs);
- platform_set_drvdata(pdev, gr2d);
-
return 0;
}
@@ -259,15 +307,100 @@ static int gr2d_remove(struct platform_device *pdev)
return err;
}
+ return 0;
+}
+
+static int __maybe_unused gr2d_runtime_suspend(struct device *dev)
+{
+ struct gr2d *gr2d = dev_get_drvdata(dev);
+ int err;
+
+ host1x_channel_stop(gr2d->channel);
+ reset_control_bulk_release(gr2d->nresets, gr2d->resets);
+
+ /*
+ * GR2D module shouldn't be reset while hardware is idling, otherwise
+ * host1x's cmdproc will stuck on trying to access any G2 register
+ * after reset. GR2D module could be either hot-reset or reset after
+ * power-gating of the HEG partition. Hence we will put in reset only
+ * the memory client part of the module, the HEG GENPD will take care
+ * of resetting GR2D module across power-gating.
+ *
+ * On Tegra20 there is no HEG partition, but it's okay to have
+ * undetermined h/w state since userspace is expected to reprogram
+ * the state on each job submission anyways.
+ */
+ err = reset_control_acquire(gr2d->resets[RST_MC].rstc);
+ if (err) {
+ dev_err(dev, "failed to acquire MC reset: %d\n", err);
+ goto acquire_reset;
+ }
+
+ err = reset_control_assert(gr2d->resets[RST_MC].rstc);
+ reset_control_release(gr2d->resets[RST_MC].rstc);
+ if (err) {
+ dev_err(dev, "failed to assert MC reset: %d\n", err);
+ goto acquire_reset;
+ }
+
clk_disable_unprepare(gr2d->clk);
return 0;
+
+acquire_reset:
+ reset_control_bulk_acquire(gr2d->nresets, gr2d->resets);
+ reset_control_bulk_deassert(gr2d->nresets, gr2d->resets);
+
+ return err;
}
+static int __maybe_unused gr2d_runtime_resume(struct device *dev)
+{
+ struct gr2d *gr2d = dev_get_drvdata(dev);
+ int err;
+
+ err = reset_control_bulk_acquire(gr2d->nresets, gr2d->resets);
+ if (err) {
+ dev_err(dev, "failed to acquire reset: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(gr2d->clk);
+ if (err) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ goto release_reset;
+ }
+
+ usleep_range(2000, 4000);
+
+ /* this is a reset array which deasserts both 2D MC and 2D itself */
+ err = reset_control_bulk_deassert(gr2d->nresets, gr2d->resets);
+ if (err) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ goto disable_clk;
+ }
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(gr2d->clk);
+release_reset:
+ reset_control_bulk_release(gr2d->nresets, gr2d->resets);
+
+ return err;
+}
+
+static const struct dev_pm_ops tegra_gr2d_pm = {
+ SET_RUNTIME_PM_OPS(gr2d_runtime_suspend, gr2d_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
struct platform_driver tegra_gr2d_driver = {
.driver = {
.name = "tegra-gr2d",
.of_match_table = gr2d_match,
+ .pm = &tegra_gr2d_pm,
},
.probe = gr2d_probe,
.remove = gr2d_remove,
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 24442ade0da3..a1fd3113ea96 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -5,32 +5,47 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/host1x.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <soc/tegra/common.h>
#include <soc/tegra/pmc.h>
#include "drm.h"
#include "gem.h"
#include "gr3d.h"
+enum {
+ RST_MC,
+ RST_GR3D,
+ RST_MC2,
+ RST_GR3D2,
+ RST_GR3D_MAX,
+};
+
struct gr3d_soc {
unsigned int version;
+ unsigned int num_clocks;
+ unsigned int num_resets;
};
struct gr3d {
struct tegra_drm_client client;
struct host1x_channel *channel;
- struct clk *clk_secondary;
- struct clk *clk;
- struct reset_control *rst_secondary;
- struct reset_control *rst;
const struct gr3d_soc *soc;
+ struct clk_bulk_data *clocks;
+ unsigned int nclocks;
+ struct reset_control_bulk_data resets[RST_GR3D_MAX];
+ unsigned int nresets;
DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS);
};
@@ -65,15 +80,22 @@ static int gr3d_init(struct host1x_client *client)
goto free;
}
+ pm_runtime_enable(client->dev);
+ pm_runtime_use_autosuspend(client->dev);
+ pm_runtime_set_autosuspend_delay(client->dev, 200);
+
err = tegra_drm_register_client(dev->dev_private, drm);
if (err < 0) {
dev_err(client->dev, "failed to register client: %d\n", err);
- goto detach;
+ goto disable_rpm;
}
return 0;
-detach:
+disable_rpm:
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
host1x_client_iommu_detach(client);
free:
host1x_syncpt_put(client->syncpts[0]);
@@ -93,10 +115,15 @@ static int gr3d_exit(struct host1x_client *client)
if (err < 0)
return err;
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
host1x_client_iommu_detach(client);
host1x_syncpt_put(client->syncpts[0]);
host1x_channel_put(gr3d->channel);
+ gr3d->channel = NULL;
+
return 0;
}
@@ -155,14 +182,20 @@ static const struct tegra_drm_client_ops gr3d_ops = {
static const struct gr3d_soc tegra20_gr3d_soc = {
.version = 0x20,
+ .num_clocks = 1,
+ .num_resets = 2,
};
static const struct gr3d_soc tegra30_gr3d_soc = {
.version = 0x30,
+ .num_clocks = 2,
+ .num_resets = 4,
};
static const struct gr3d_soc tegra114_gr3d_soc = {
.version = 0x35,
+ .num_clocks = 1,
+ .num_resets = 2,
};
static const struct of_device_id tegra_gr3d_match[] = {
@@ -278,69 +311,216 @@ static const u32 gr3d_addr_regs[] = {
GR3D_GLOBAL_SAMP23SURFADDR(15),
};
-static int gr3d_probe(struct platform_device *pdev)
+static int gr3d_power_up_legacy_domain(struct device *dev, const char *name,
+ unsigned int id)
{
- struct device_node *np = pdev->dev.of_node;
- struct host1x_syncpt **syncpts;
- struct gr3d *gr3d;
+ struct gr3d *gr3d = dev_get_drvdata(dev);
+ struct reset_control *reset;
+ struct clk *clk;
unsigned int i;
int err;
- gr3d = devm_kzalloc(&pdev->dev, sizeof(*gr3d), GFP_KERNEL);
- if (!gr3d)
- return -ENOMEM;
-
- gr3d->soc = of_device_get_match_data(&pdev->dev);
+ /*
+ * Tegra20 device-tree doesn't specify 3d clock name and there is only
+ * one clock for Tegra20. Tegra30+ device-trees always specified names
+ * for the clocks.
+ */
+ if (gr3d->nclocks == 1) {
+ if (id == TEGRA_POWERGATE_3D1)
+ return 0;
+
+ clk = gr3d->clocks[0].clk;
+ } else {
+ for (i = 0; i < gr3d->nclocks; i++) {
+ if (WARN_ON(!gr3d->clocks[i].id))
+ continue;
+
+ if (!strcmp(gr3d->clocks[i].id, name)) {
+ clk = gr3d->clocks[i].clk;
+ break;
+ }
+ }
- syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL);
- if (!syncpts)
- return -ENOMEM;
+ if (WARN_ON(i == gr3d->nclocks))
+ return -EINVAL;
+ }
- gr3d->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(gr3d->clk)) {
- dev_err(&pdev->dev, "cannot get clock\n");
- return PTR_ERR(gr3d->clk);
+ /*
+ * We use array of resets, which includes MC resets, and MC
+ * reset shouldn't be asserted while hardware is gated because
+ * MC flushing will fail for gated hardware. Hence for legacy
+ * PD we request the individual reset separately.
+ */
+ reset = reset_control_get_exclusive_released(dev, name);
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
+ err = reset_control_acquire(reset);
+ if (err) {
+ dev_err(dev, "failed to acquire %s reset: %d\n", name, err);
+ } else {
+ err = tegra_powergate_sequence_power_up(id, clk, reset);
+ reset_control_release(reset);
}
- gr3d->rst = devm_reset_control_get(&pdev->dev, "3d");
- if (IS_ERR(gr3d->rst)) {
- dev_err(&pdev->dev, "cannot get reset\n");
- return PTR_ERR(gr3d->rst);
+ reset_control_put(reset);
+ if (err)
+ return err;
+
+ /*
+ * tegra_powergate_sequence_power_up() leaves clocks enabled,
+ * while GENPD not. Hence keep clock-enable balanced.
+ */
+ clk_disable_unprepare(clk);
+
+ return 0;
+}
+
+static void gr3d_del_link(void *link)
+{
+ device_link_del(link);
+}
+
+static int gr3d_init_power(struct device *dev, struct gr3d *gr3d)
+{
+ static const char * const opp_genpd_names[] = { "3d0", "3d1", NULL };
+ const u32 link_flags = DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME;
+ struct device **opp_virt_devs, *pd_dev;
+ struct device_link *link;
+ unsigned int i;
+ int err;
+
+ err = of_count_phandle_with_args(dev->of_node, "power-domains",
+ "#power-domain-cells");
+ if (err < 0) {
+ if (err != -ENOENT)
+ return err;
+
+ /*
+ * Older device-trees don't use GENPD. In this case we should
+ * toggle power domain manually.
+ */
+ err = gr3d_power_up_legacy_domain(dev, "3d",
+ TEGRA_POWERGATE_3D);
+ if (err)
+ return err;
+
+ err = gr3d_power_up_legacy_domain(dev, "3d2",
+ TEGRA_POWERGATE_3D1);
+ if (err)
+ return err;
+
+ return 0;
}
- if (of_device_is_compatible(np, "nvidia,tegra30-gr3d")) {
- gr3d->clk_secondary = devm_clk_get(&pdev->dev, "3d2");
- if (IS_ERR(gr3d->clk_secondary)) {
- dev_err(&pdev->dev, "cannot get secondary clock\n");
- return PTR_ERR(gr3d->clk_secondary);
+ /*
+ * The PM domain core automatically attaches a single power domain,
+ * otherwise it skips attaching completely. We have a single domain
+ * on Tegra20 and two domains on Tegra30+.
+ */
+ if (dev->pm_domain)
+ return 0;
+
+ err = devm_pm_opp_attach_genpd(dev, opp_genpd_names, &opp_virt_devs);
+ if (err)
+ return err;
+
+ for (i = 0; opp_genpd_names[i]; i++) {
+ pd_dev = opp_virt_devs[i];
+ if (!pd_dev) {
+ dev_err(dev, "failed to get %s power domain\n",
+ opp_genpd_names[i]);
+ return -EINVAL;
}
- gr3d->rst_secondary = devm_reset_control_get(&pdev->dev,
- "3d2");
- if (IS_ERR(gr3d->rst_secondary)) {
- dev_err(&pdev->dev, "cannot get secondary reset\n");
- return PTR_ERR(gr3d->rst_secondary);
+ link = device_link_add(dev, pd_dev, link_flags);
+ if (!link) {
+ dev_err(dev, "failed to link to %s\n", dev_name(pd_dev));
+ return -EINVAL;
}
+
+ err = devm_add_action_or_reset(dev, gr3d_del_link, link);
+ if (err)
+ return err;
}
- err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D, gr3d->clk,
- gr3d->rst);
+ return 0;
+}
+
+static int gr3d_get_clocks(struct device *dev, struct gr3d *gr3d)
+{
+ int err;
+
+ err = devm_clk_bulk_get_all(dev, &gr3d->clocks);
if (err < 0) {
- dev_err(&pdev->dev, "failed to power up 3D unit\n");
+ dev_err(dev, "failed to get clock: %d\n", err);
return err;
}
+ gr3d->nclocks = err;
- if (gr3d->clk_secondary) {
- err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D1,
- gr3d->clk_secondary,
- gr3d->rst_secondary);
- if (err < 0) {
- dev_err(&pdev->dev,
- "failed to power up secondary 3D unit\n");
- return err;
- }
+ if (gr3d->nclocks != gr3d->soc->num_clocks) {
+ dev_err(dev, "invalid number of clocks: %u\n", gr3d->nclocks);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int gr3d_get_resets(struct device *dev, struct gr3d *gr3d)
+{
+ int err;
+
+ gr3d->resets[RST_MC].id = "mc";
+ gr3d->resets[RST_MC2].id = "mc2";
+ gr3d->resets[RST_GR3D].id = "3d";
+ gr3d->resets[RST_GR3D2].id = "3d2";
+ gr3d->nresets = gr3d->soc->num_resets;
+
+ err = devm_reset_control_bulk_get_optional_exclusive_released(
+ dev, gr3d->nresets, gr3d->resets);
+ if (err) {
+ dev_err(dev, "failed to get reset: %d\n", err);
+ return err;
}
+ if (WARN_ON(!gr3d->resets[RST_GR3D].rstc) ||
+ WARN_ON(!gr3d->resets[RST_GR3D2].rstc && gr3d->nresets == 4))
+ return -ENOENT;
+
+ return 0;
+}
+
+static int gr3d_probe(struct platform_device *pdev)
+{
+ struct host1x_syncpt **syncpts;
+ struct gr3d *gr3d;
+ unsigned int i;
+ int err;
+
+ gr3d = devm_kzalloc(&pdev->dev, sizeof(*gr3d), GFP_KERNEL);
+ if (!gr3d)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, gr3d);
+
+ gr3d->soc = of_device_get_match_data(&pdev->dev);
+
+ syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL);
+ if (!syncpts)
+ return -ENOMEM;
+
+ err = gr3d_get_clocks(&pdev->dev, gr3d);
+ if (err)
+ return err;
+
+ err = gr3d_get_resets(&pdev->dev, gr3d);
+ if (err)
+ return err;
+
+ err = gr3d_init_power(&pdev->dev, gr3d);
+ if (err)
+ return err;
+
INIT_LIST_HEAD(&gr3d->client.base.list);
gr3d->client.base.ops = &gr3d_client_ops;
gr3d->client.base.dev = &pdev->dev;
@@ -352,6 +532,10 @@ static int gr3d_probe(struct platform_device *pdev)
gr3d->client.version = gr3d->soc->version;
gr3d->client.ops = &gr3d_ops;
+ err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (err)
+ return err;
+
err = host1x_client_register(&gr3d->client.base);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
@@ -363,8 +547,6 @@ static int gr3d_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(gr3d_addr_regs); i++)
set_bit(gr3d_addr_regs[i], gr3d->addr_regs);
- platform_set_drvdata(pdev, gr3d);
-
return 0;
}
@@ -380,23 +562,80 @@ static int gr3d_remove(struct platform_device *pdev)
return err;
}
- if (gr3d->clk_secondary) {
- reset_control_assert(gr3d->rst_secondary);
- tegra_powergate_power_off(TEGRA_POWERGATE_3D1);
- clk_disable_unprepare(gr3d->clk_secondary);
+ return 0;
+}
+
+static int __maybe_unused gr3d_runtime_suspend(struct device *dev)
+{
+ struct gr3d *gr3d = dev_get_drvdata(dev);
+ int err;
+
+ host1x_channel_stop(gr3d->channel);
+
+ err = reset_control_bulk_assert(gr3d->nresets, gr3d->resets);
+ if (err) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+
+ usleep_range(10, 20);
+
+ /*
+ * Older device-trees don't specify MC resets and power-gating can't
+ * be done safely in that case. Hence we will keep the power ungated
+ * for older DTBs. For newer DTBs, GENPD will perform the power-gating.
+ */
+
+ clk_bulk_disable_unprepare(gr3d->nclocks, gr3d->clocks);
+ reset_control_bulk_release(gr3d->nresets, gr3d->resets);
+
+ return 0;
+}
+
+static int __maybe_unused gr3d_runtime_resume(struct device *dev)
+{
+ struct gr3d *gr3d = dev_get_drvdata(dev);
+ int err;
+
+ err = reset_control_bulk_acquire(gr3d->nresets, gr3d->resets);
+ if (err) {
+ dev_err(dev, "failed to acquire reset: %d\n", err);
+ return err;
+ }
+
+ err = clk_bulk_prepare_enable(gr3d->nclocks, gr3d->clocks);
+ if (err) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ goto release_reset;
}
- reset_control_assert(gr3d->rst);
- tegra_powergate_power_off(TEGRA_POWERGATE_3D);
- clk_disable_unprepare(gr3d->clk);
+ err = reset_control_bulk_deassert(gr3d->nresets, gr3d->resets);
+ if (err) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ goto disable_clk;
+ }
return 0;
+
+disable_clk:
+ clk_bulk_disable_unprepare(gr3d->nclocks, gr3d->clocks);
+release_reset:
+ reset_control_bulk_release(gr3d->nresets, gr3d->resets);
+
+ return err;
}
+static const struct dev_pm_ops tegra_gr3d_pm = {
+ SET_RUNTIME_PM_OPS(gr3d_runtime_suspend, gr3d_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
struct platform_driver tegra_gr3d_driver = {
.driver = {
.name = "tegra-gr3d",
.of_match_table = tegra_gr3d_match,
+ .pm = &tegra_gr3d_pm,
},
.probe = gr3d_probe,
.remove = gr3d_remove,
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index e5d2a4026028..8845af5d325f 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -11,10 +11,14 @@
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
+#include <soc/tegra/common.h>
+#include <sound/hdmi-codec.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_debugfs.h>
@@ -78,6 +82,9 @@ struct tegra_hdmi {
bool dvi;
struct drm_info_list *debugfs_files;
+
+ struct platform_device *audio_pdev;
+ struct mutex audio_lock;
};
static inline struct tegra_hdmi *
@@ -360,6 +367,18 @@ static const struct tmds_config tegra124_tmds_config[] = {
},
};
+static void tegra_hdmi_audio_lock(struct tegra_hdmi *hdmi)
+{
+ mutex_lock(&hdmi->audio_lock);
+ disable_irq(hdmi->irq);
+}
+
+static void tegra_hdmi_audio_unlock(struct tegra_hdmi *hdmi)
+{
+ enable_irq(hdmi->irq);
+ mutex_unlock(&hdmi->audio_lock);
+}
+
static int
tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pix_clock,
struct tegra_hdmi_audio_config *config)
@@ -829,6 +848,23 @@ static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi,
HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
}
+static int tegra_hdmi_reconfigure_audio(struct tegra_hdmi *hdmi)
+{
+ int err;
+
+ err = tegra_hdmi_setup_audio(hdmi);
+ if (err < 0) {
+ tegra_hdmi_disable_audio_infoframe(hdmi);
+ tegra_hdmi_disable_audio(hdmi);
+ } else {
+ tegra_hdmi_setup_audio_infoframe(hdmi);
+ tegra_hdmi_enable_audio_infoframe(hdmi);
+ tegra_hdmi_enable_audio(hdmi);
+ }
+
+ return err;
+}
+
static bool tegra_output_is_hdmi(struct tegra_output *output)
{
struct edid *edid;
@@ -1135,6 +1171,8 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
u32 value;
int err;
+ tegra_hdmi_audio_lock(hdmi);
+
/*
* The following accesses registers of the display controller, so make
* sure it's only executed when the output is attached to one.
@@ -1159,6 +1197,10 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_ENABLE);
tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_MASK);
+ hdmi->pixel_clock = 0;
+
+ tegra_hdmi_audio_unlock(hdmi);
+
err = host1x_client_suspend(&hdmi->client);
if (err < 0)
dev_err(hdmi->dev, "failed to suspend: %d\n", err);
@@ -1182,6 +1224,8 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
return;
}
+ tegra_hdmi_audio_lock(hdmi);
+
/*
* Enable and unmask the HDA codec SCRATCH0 register interrupt. This
* is used for interoperability between the HDA codec driver and the
@@ -1195,7 +1239,7 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
h_back_porch = mode->htotal - mode->hsync_end;
h_front_porch = mode->hsync_start - mode->hdisplay;
- err = clk_set_rate(hdmi->clk, hdmi->pixel_clock);
+ err = dev_pm_opp_set_rate(hdmi->dev, hdmi->pixel_clock);
if (err < 0) {
dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n",
err);
@@ -1387,6 +1431,8 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
}
/* TODO: add HDCP support */
+
+ tegra_hdmi_audio_unlock(hdmi);
}
static int
@@ -1416,6 +1462,91 @@ static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = {
.atomic_check = tegra_hdmi_encoder_atomic_check,
};
+static int tegra_hdmi_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct tegra_hdmi *hdmi = data;
+ int ret = 0;
+
+ tegra_hdmi_audio_lock(hdmi);
+
+ hdmi->format.sample_rate = hparms->sample_rate;
+ hdmi->format.channels = hparms->channels;
+
+ if (hdmi->pixel_clock && !hdmi->dvi)
+ ret = tegra_hdmi_reconfigure_audio(hdmi);
+
+ tegra_hdmi_audio_unlock(hdmi);
+
+ return ret;
+}
+
+static int tegra_hdmi_audio_startup(struct device *dev, void *data)
+{
+ struct tegra_hdmi *hdmi = data;
+ int ret;
+
+ ret = host1x_client_resume(&hdmi->client);
+ if (ret < 0)
+ dev_err(hdmi->dev, "failed to resume: %d\n", ret);
+
+ return ret;
+}
+
+static void tegra_hdmi_audio_shutdown(struct device *dev, void *data)
+{
+ struct tegra_hdmi *hdmi = data;
+ int ret;
+
+ tegra_hdmi_audio_lock(hdmi);
+
+ hdmi->format.sample_rate = 0;
+ hdmi->format.channels = 0;
+
+ tegra_hdmi_audio_unlock(hdmi);
+
+ ret = host1x_client_suspend(&hdmi->client);
+ if (ret < 0)
+ dev_err(hdmi->dev, "failed to suspend: %d\n", ret);
+}
+
+static const struct hdmi_codec_ops tegra_hdmi_codec_ops = {
+ .hw_params = tegra_hdmi_hw_params,
+ .audio_startup = tegra_hdmi_audio_startup,
+ .audio_shutdown = tegra_hdmi_audio_shutdown,
+};
+
+static int tegra_hdmi_codec_register(struct tegra_hdmi *hdmi)
+{
+ struct hdmi_codec_pdata codec_data = {};
+
+ if (hdmi->config->has_hda)
+ return 0;
+
+ codec_data.ops = &tegra_hdmi_codec_ops;
+ codec_data.data = hdmi;
+ codec_data.spdif = 1;
+
+ hdmi->audio_pdev = platform_device_register_data(hdmi->dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_data,
+ sizeof(codec_data));
+ if (IS_ERR(hdmi->audio_pdev))
+ return PTR_ERR(hdmi->audio_pdev);
+
+ hdmi->format.channels = 2;
+
+ return 0;
+}
+
+static void tegra_hdmi_codec_unregister(struct tegra_hdmi *hdmi)
+{
+ if (hdmi->audio_pdev)
+ platform_device_unregister(hdmi->audio_pdev);
+}
+
static int tegra_hdmi_init(struct host1x_client *client)
{
struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
@@ -1453,28 +1584,47 @@ static int tegra_hdmi_init(struct host1x_client *client)
if (err < 0) {
dev_err(client->dev, "failed to enable HDMI regulator: %d\n",
err);
- return err;
+ goto output_exit;
}
err = regulator_enable(hdmi->pll);
if (err < 0) {
dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err);
- return err;
+ goto disable_hdmi;
}
err = regulator_enable(hdmi->vdd);
if (err < 0) {
dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
- return err;
+ goto disable_pll;
+ }
+
+ err = tegra_hdmi_codec_register(hdmi);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to register audio codec: %d\n", err);
+ goto disable_vdd;
}
return 0;
+
+disable_vdd:
+ regulator_disable(hdmi->vdd);
+disable_pll:
+ regulator_disable(hdmi->pll);
+disable_hdmi:
+ regulator_disable(hdmi->hdmi);
+output_exit:
+ tegra_output_exit(&hdmi->output);
+
+ return err;
}
static int tegra_hdmi_exit(struct host1x_client *client)
{
struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+ tegra_hdmi_codec_unregister(hdmi);
+
tegra_output_exit(&hdmi->output);
regulator_disable(hdmi->vdd);
@@ -1599,7 +1749,6 @@ static irqreturn_t tegra_hdmi_irq(int irq, void *data)
{
struct tegra_hdmi *hdmi = data;
u32 value;
- int err;
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_INT_STATUS);
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_INT_STATUS);
@@ -1614,16 +1763,7 @@ static irqreturn_t tegra_hdmi_irq(int irq, void *data)
format = value & SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK;
tegra_hda_parse_format(format, &hdmi->format);
-
- err = tegra_hdmi_setup_audio(hdmi);
- if (err < 0) {
- tegra_hdmi_disable_audio_infoframe(hdmi);
- tegra_hdmi_disable_audio(hdmi);
- } else {
- tegra_hdmi_setup_audio_infoframe(hdmi);
- tegra_hdmi_enable_audio_infoframe(hdmi);
- tegra_hdmi_enable_audio(hdmi);
- }
+ tegra_hdmi_reconfigure_audio(hdmi);
} else {
tegra_hdmi_disable_audio_infoframe(hdmi);
tegra_hdmi_disable_audio(hdmi);
@@ -1651,6 +1791,8 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
hdmi->stereo = false;
hdmi->dvi = false;
+ mutex_init(&hdmi->audio_lock);
+
hdmi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(hdmi->clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
@@ -1732,7 +1874,14 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, hdmi);
- pm_runtime_enable(&pdev->dev);
+
+ err = devm_pm_runtime_enable(&pdev->dev);
+ if (err)
+ return err;
+
+ err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (err)
+ return err;
INIT_LIST_HEAD(&hdmi->client.list);
hdmi->client.ops = &hdmi_client_ops;
@@ -1753,8 +1902,6 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
int err;
- pm_runtime_disable(&pdev->dev);
-
err = host1x_client_unregister(&hdmi->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h
index 3efa1be07ff8..23c4b2115ed1 100644
--- a/drivers/gpu/drm/tegra/hub.h
+++ b/drivers/gpu/drm/tegra/hub.h
@@ -72,7 +72,6 @@ to_tegra_display_hub_state(struct drm_private_state *priv)
return container_of(priv, struct tegra_display_hub_state, base);
}
-struct tegra_dc;
struct tegra_plane;
int tegra_display_hub_prepare(struct tegra_display_hub *hub);
diff --git a/drivers/gpu/drm/tegra/nvdec.c b/drivers/gpu/drm/tegra/nvdec.c
new file mode 100644
index 000000000000..79e1e88203cf
--- /dev/null
+++ b/drivers/gpu/drm/tegra/nvdec.c
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2021, NVIDIA Corporation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/host1x.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <soc/tegra/pmc.h>
+
+#include "drm.h"
+#include "falcon.h"
+#include "vic.h"
+
+struct nvdec_config {
+ const char *firmware;
+ unsigned int version;
+ bool supports_sid;
+};
+
+struct nvdec {
+ struct falcon falcon;
+
+ void __iomem *regs;
+ struct tegra_drm_client client;
+ struct host1x_channel *channel;
+ struct device *dev;
+ struct clk *clk;
+
+ /* Platform configuration */
+ const struct nvdec_config *config;
+};
+
+static inline struct nvdec *to_nvdec(struct tegra_drm_client *client)
+{
+ return container_of(client, struct nvdec, client);
+}
+
+static inline void nvdec_writel(struct nvdec *nvdec, u32 value,
+ unsigned int offset)
+{
+ writel(value, nvdec->regs + offset);
+}
+
+static int nvdec_boot(struct nvdec *nvdec)
+{
+#ifdef CONFIG_IOMMU_API
+ struct iommu_fwspec *spec = dev_iommu_fwspec_get(nvdec->dev);
+#endif
+ int err;
+
+#ifdef CONFIG_IOMMU_API
+ if (nvdec->config->supports_sid && spec) {
+ u32 value;
+
+ value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | TRANSCFG_ATT(0, TRANSCFG_SID_HW);
+ nvdec_writel(nvdec, value, VIC_TFBIF_TRANSCFG);
+
+ if (spec->num_ids > 0) {
+ value = spec->ids[0] & 0xffff;
+
+ nvdec_writel(nvdec, value, VIC_THI_STREAMID0);
+ nvdec_writel(nvdec, value, VIC_THI_STREAMID1);
+ }
+ }
+#endif
+
+ err = falcon_boot(&nvdec->falcon);
+ if (err < 0)
+ return err;
+
+ err = falcon_wait_idle(&nvdec->falcon);
+ if (err < 0) {
+ dev_err(nvdec->dev, "falcon boot timed out\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int nvdec_init(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct drm_device *dev = dev_get_drvdata(client->host);
+ struct tegra_drm *tegra = dev->dev_private;
+ struct nvdec *nvdec = to_nvdec(drm);
+ int err;
+
+ err = host1x_client_iommu_attach(client);
+ if (err < 0 && err != -ENODEV) {
+ dev_err(nvdec->dev, "failed to attach to domain: %d\n", err);
+ return err;
+ }
+
+ nvdec->channel = host1x_channel_request(client);
+ if (!nvdec->channel) {
+ err = -ENOMEM;
+ goto detach;
+ }
+
+ client->syncpts[0] = host1x_syncpt_request(client, 0);
+ if (!client->syncpts[0]) {
+ err = -ENOMEM;
+ goto free_channel;
+ }
+
+ pm_runtime_enable(client->dev);
+ pm_runtime_use_autosuspend(client->dev);
+ pm_runtime_set_autosuspend_delay(client->dev, 500);
+
+ err = tegra_drm_register_client(tegra, drm);
+ if (err < 0)
+ goto disable_rpm;
+
+ /*
+ * Inherit the DMA parameters (such as maximum segment size) from the
+ * parent host1x device.
+ */
+ client->dev->dma_parms = client->host->dma_parms;
+
+ return 0;
+
+disable_rpm:
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
+ host1x_syncpt_put(client->syncpts[0]);
+free_channel:
+ host1x_channel_put(nvdec->channel);
+detach:
+ host1x_client_iommu_detach(client);
+
+ return err;
+}
+
+static int nvdec_exit(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct drm_device *dev = dev_get_drvdata(client->host);
+ struct tegra_drm *tegra = dev->dev_private;
+ struct nvdec *nvdec = to_nvdec(drm);
+ int err;
+
+ /* avoid a dangling pointer just in case this disappears */
+ client->dev->dma_parms = NULL;
+
+ err = tegra_drm_unregister_client(tegra, drm);
+ if (err < 0)
+ return err;
+
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
+ host1x_syncpt_put(client->syncpts[0]);
+ host1x_channel_put(nvdec->channel);
+ host1x_client_iommu_detach(client);
+
+ nvdec->channel = NULL;
+
+ if (client->group) {
+ dma_unmap_single(nvdec->dev, nvdec->falcon.firmware.phys,
+ nvdec->falcon.firmware.size, DMA_TO_DEVICE);
+ tegra_drm_free(tegra, nvdec->falcon.firmware.size,
+ nvdec->falcon.firmware.virt,
+ nvdec->falcon.firmware.iova);
+ } else {
+ dma_free_coherent(nvdec->dev, nvdec->falcon.firmware.size,
+ nvdec->falcon.firmware.virt,
+ nvdec->falcon.firmware.iova);
+ }
+
+ return 0;
+}
+
+static const struct host1x_client_ops nvdec_client_ops = {
+ .init = nvdec_init,
+ .exit = nvdec_exit,
+};
+
+static int nvdec_load_firmware(struct nvdec *nvdec)
+{
+ struct host1x_client *client = &nvdec->client.base;
+ struct tegra_drm *tegra = nvdec->client.drm;
+ dma_addr_t iova;
+ size_t size;
+ void *virt;
+ int err;
+
+ if (nvdec->falcon.firmware.virt)
+ return 0;
+
+ err = falcon_read_firmware(&nvdec->falcon, nvdec->config->firmware);
+ if (err < 0)
+ return err;
+
+ size = nvdec->falcon.firmware.size;
+
+ if (!client->group) {
+ virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL);
+
+ err = dma_mapping_error(nvdec->dev, iova);
+ if (err < 0)
+ return err;
+ } else {
+ virt = tegra_drm_alloc(tegra, size, &iova);
+ }
+
+ nvdec->falcon.firmware.virt = virt;
+ nvdec->falcon.firmware.iova = iova;
+
+ err = falcon_load_firmware(&nvdec->falcon);
+ if (err < 0)
+ goto cleanup;
+
+ /*
+ * In this case we have received an IOVA from the shared domain, so we
+ * need to make sure to get the physical address so that the DMA API
+ * knows what memory pages to flush the cache for.
+ */
+ if (client->group) {
+ dma_addr_t phys;
+
+ phys = dma_map_single(nvdec->dev, virt, size, DMA_TO_DEVICE);
+
+ err = dma_mapping_error(nvdec->dev, phys);
+ if (err < 0)
+ goto cleanup;
+
+ nvdec->falcon.firmware.phys = phys;
+ }
+
+ return 0;
+
+cleanup:
+ if (!client->group)
+ dma_free_coherent(nvdec->dev, size, virt, iova);
+ else
+ tegra_drm_free(tegra, size, virt, iova);
+
+ return err;
+}
+
+
+static __maybe_unused int nvdec_runtime_resume(struct device *dev)
+{
+ struct nvdec *nvdec = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(nvdec->clk);
+ if (err < 0)
+ return err;
+
+ usleep_range(10, 20);
+
+ err = nvdec_load_firmware(nvdec);
+ if (err < 0)
+ goto disable;
+
+ err = nvdec_boot(nvdec);
+ if (err < 0)
+ goto disable;
+
+ return 0;
+
+disable:
+ clk_disable_unprepare(nvdec->clk);
+ return err;
+}
+
+static __maybe_unused int nvdec_runtime_suspend(struct device *dev)
+{
+ struct nvdec *nvdec = dev_get_drvdata(dev);
+
+ host1x_channel_stop(nvdec->channel);
+
+ clk_disable_unprepare(nvdec->clk);
+
+ return 0;
+}
+
+static int nvdec_open_channel(struct tegra_drm_client *client,
+ struct tegra_drm_context *context)
+{
+ struct nvdec *nvdec = to_nvdec(client);
+
+ context->channel = host1x_channel_get(nvdec->channel);
+ if (!context->channel)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void nvdec_close_channel(struct tegra_drm_context *context)
+{
+ host1x_channel_put(context->channel);
+}
+
+static const struct tegra_drm_client_ops nvdec_ops = {
+ .open_channel = nvdec_open_channel,
+ .close_channel = nvdec_close_channel,
+ .submit = tegra_drm_submit,
+};
+
+#define NVIDIA_TEGRA_210_NVDEC_FIRMWARE "nvidia/tegra210/nvdec.bin"
+
+static const struct nvdec_config nvdec_t210_config = {
+ .firmware = NVIDIA_TEGRA_210_NVDEC_FIRMWARE,
+ .version = 0x21,
+ .supports_sid = false,
+};
+
+#define NVIDIA_TEGRA_186_NVDEC_FIRMWARE "nvidia/tegra186/nvdec.bin"
+
+static const struct nvdec_config nvdec_t186_config = {
+ .firmware = NVIDIA_TEGRA_186_NVDEC_FIRMWARE,
+ .version = 0x18,
+ .supports_sid = true,
+};
+
+#define NVIDIA_TEGRA_194_NVDEC_FIRMWARE "nvidia/tegra194/nvdec.bin"
+
+static const struct nvdec_config nvdec_t194_config = {
+ .firmware = NVIDIA_TEGRA_194_NVDEC_FIRMWARE,
+ .version = 0x19,
+ .supports_sid = true,
+};
+
+static const struct of_device_id tegra_nvdec_of_match[] = {
+ { .compatible = "nvidia,tegra210-nvdec", .data = &nvdec_t210_config },
+ { .compatible = "nvidia,tegra186-nvdec", .data = &nvdec_t186_config },
+ { .compatible = "nvidia,tegra194-nvdec", .data = &nvdec_t194_config },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_nvdec_of_match);
+
+static int nvdec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct host1x_syncpt **syncpts;
+ struct nvdec *nvdec;
+ u32 host_class;
+ int err;
+
+ /* inherit DMA mask from host1x parent */
+ err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
+ nvdec = devm_kzalloc(dev, sizeof(*nvdec), GFP_KERNEL);
+ if (!nvdec)
+ return -ENOMEM;
+
+ nvdec->config = of_device_get_match_data(dev);
+
+ syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
+ if (!syncpts)
+ return -ENOMEM;
+
+ nvdec->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(nvdec->regs))
+ return PTR_ERR(nvdec->regs);
+
+ nvdec->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(nvdec->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(nvdec->clk);
+ }
+
+ err = clk_set_rate(nvdec->clk, ULONG_MAX);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set clock rate\n");
+ return err;
+ }
+
+ err = of_property_read_u32(dev->of_node, "nvidia,host1x-class", &host_class);
+ if (err < 0)
+ host_class = HOST1X_CLASS_NVDEC;
+
+ nvdec->falcon.dev = dev;
+ nvdec->falcon.regs = nvdec->regs;
+
+ err = falcon_init(&nvdec->falcon);
+ if (err < 0)
+ return err;
+
+ platform_set_drvdata(pdev, nvdec);
+
+ INIT_LIST_HEAD(&nvdec->client.base.list);
+ nvdec->client.base.ops = &nvdec_client_ops;
+ nvdec->client.base.dev = dev;
+ nvdec->client.base.class = host_class;
+ nvdec->client.base.syncpts = syncpts;
+ nvdec->client.base.num_syncpts = 1;
+ nvdec->dev = dev;
+
+ INIT_LIST_HEAD(&nvdec->client.list);
+ nvdec->client.version = nvdec->config->version;
+ nvdec->client.ops = &nvdec_ops;
+
+ err = host1x_client_register(&nvdec->client.base);
+ if (err < 0) {
+ dev_err(dev, "failed to register host1x client: %d\n", err);
+ goto exit_falcon;
+ }
+
+ return 0;
+
+exit_falcon:
+ falcon_exit(&nvdec->falcon);
+
+ return err;
+}
+
+static int nvdec_remove(struct platform_device *pdev)
+{
+ struct nvdec *nvdec = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&nvdec->client.base);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ falcon_exit(&nvdec->falcon);
+
+ return 0;
+}
+
+static const struct dev_pm_ops nvdec_pm_ops = {
+ SET_RUNTIME_PM_OPS(nvdec_runtime_suspend, nvdec_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+struct platform_driver tegra_nvdec_driver = {
+ .driver = {
+ .name = "tegra-nvdec",
+ .of_match_table = tegra_nvdec_of_match,
+ .pm = &nvdec_pm_ops
+ },
+ .probe = nvdec_probe,
+ .remove = nvdec_remove,
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_210_NVDEC_FIRMWARE);
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_186_NVDEC_FIRMWARE);
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_194_NVDEC_FIRMWARE);
+#endif
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index 16a1cdc28657..321cb1f13da6 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -74,7 +74,7 @@ tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
for (i = 0; i < 3; i++) {
copy->iova[i] = DMA_MAPPING_ERROR;
- copy->sgt[i] = NULL;
+ copy->map[i] = NULL;
}
return &copy->base;
@@ -138,55 +138,37 @@ const struct drm_plane_funcs tegra_plane_funcs = {
static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dc->dev);
unsigned int i;
int err;
for (i = 0; i < state->base.fb->format->num_planes; i++) {
struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
- dma_addr_t phys_addr, *phys;
- struct sg_table *sgt;
+ struct host1x_bo_mapping *map;
- /*
- * If we're not attached to a domain, we already stored the
- * physical address when the buffer was allocated. If we're
- * part of a group that's shared between all display
- * controllers, we've also already mapped the framebuffer
- * through the SMMU. In both cases we can short-circuit the
- * code below and retrieve the stored IOV address.
- */
- if (!domain || dc->client.group)
- phys = &phys_addr;
- else
- phys = NULL;
-
- sgt = host1x_bo_pin(dc->dev, &bo->base, phys);
- if (IS_ERR(sgt)) {
- err = PTR_ERR(sgt);
+ map = host1x_bo_pin(dc->dev, &bo->base, DMA_TO_DEVICE, &dc->client.cache);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
goto unpin;
}
- if (sgt) {
- err = dma_map_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
- if (err)
- goto unpin;
-
+ if (!dc->client.group) {
/*
* The display controller needs contiguous memory, so
* fail if the buffer is discontiguous and we fail to
* map its SG table to a single contiguous chunk of
* I/O virtual memory.
*/
- if (sgt->nents > 1) {
+ if (map->chunks > 1) {
err = -EINVAL;
goto unpin;
}
- state->iova[i] = sg_dma_address(sgt->sgl);
- state->sgt[i] = sgt;
+ state->iova[i] = map->phys;
} else {
- state->iova[i] = phys_addr;
+ state->iova[i] = bo->iova;
}
+
+ state->map[i] = map;
}
return 0;
@@ -195,15 +177,9 @@ unpin:
dev_err(dc->dev, "failed to map plane %u: %d\n", i, err);
while (i--) {
- struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
- struct sg_table *sgt = state->sgt[i];
-
- if (sgt)
- dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
-
- host1x_bo_unpin(dc->dev, &bo->base, sgt);
+ host1x_bo_unpin(state->map[i]);
state->iova[i] = DMA_MAPPING_ERROR;
- state->sgt[i] = NULL;
+ state->map[i] = NULL;
}
return err;
@@ -214,15 +190,9 @@ static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
unsigned int i;
for (i = 0; i < state->base.fb->format->num_planes; i++) {
- struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
- struct sg_table *sgt = state->sgt[i];
-
- if (sgt)
- dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
-
- host1x_bo_unpin(dc->dev, &bo->base, sgt);
+ host1x_bo_unpin(state->map[i]);
state->iova[i] = DMA_MAPPING_ERROR;
- state->sgt[i] = NULL;
+ state->map[i] = NULL;
}
}
@@ -230,11 +200,14 @@ int tegra_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct tegra_dc *dc = to_tegra_dc(state->crtc);
+ int err;
if (!state->fb)
return 0;
- drm_gem_plane_helper_prepare_fb(plane, state);
+ err = drm_gem_plane_helper_prepare_fb(plane, state);
+ if (err < 0)
+ return err;
return tegra_dc_pin(dc, to_tegra_plane_state(state));
}
diff --git a/drivers/gpu/drm/tegra/plane.h b/drivers/gpu/drm/tegra/plane.h
index d9470780c803..dfb20712fbd7 100644
--- a/drivers/gpu/drm/tegra/plane.h
+++ b/drivers/gpu/drm/tegra/plane.h
@@ -43,7 +43,7 @@ struct tegra_plane_legacy_blending_state {
struct tegra_plane_state {
struct drm_plane_state base;
- struct sg_table *sgt[3];
+ struct host1x_bo_mapping *map[3];
dma_addr_t iova[3];
struct tegra_bo_tiling tiling;
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 606c78a2b988..ff8fce36d2aa 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -17,6 +17,8 @@ struct tegra_rgb {
struct tegra_output output;
struct tegra_dc *dc;
+ struct clk *pll_d_out0;
+ struct clk *pll_d2_out0;
struct clk *clk_parent;
struct clk *clk;
};
@@ -116,13 +118,21 @@ static void tegra_rgb_encoder_enable(struct drm_encoder *encoder)
DISP_ORDER_RED_BLUE;
tegra_dc_writel(rgb->dc, value, DC_DISP_DISP_INTERFACE_CONTROL);
- /* XXX: parameterize? */
- value = SC0_H_QUALIFIER_NONE | SC1_H_QUALIFIER_NONE;
- tegra_dc_writel(rgb->dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS);
-
tegra_dc_commit(rgb->dc);
}
+static bool tegra_rgb_pll_rate_change_allowed(struct tegra_rgb *rgb)
+{
+ if (!rgb->pll_d2_out0)
+ return false;
+
+ if (!clk_is_match(rgb->clk_parent, rgb->pll_d_out0) &&
+ !clk_is_match(rgb->clk_parent, rgb->pll_d2_out0))
+ return false;
+
+ return true;
+}
+
static int
tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -151,8 +161,17 @@ tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder,
* and hope that the desired frequency can be matched (or at least
* matched sufficiently close that the panel will still work).
*/
- div = ((clk_get_rate(rgb->clk) * 2) / pclk) - 2;
- pclk = 0;
+ if (tegra_rgb_pll_rate_change_allowed(rgb)) {
+ /*
+ * Set display controller clock to x2 of PCLK in order to
+ * produce higher resolution pulse positions.
+ */
+ div = 2;
+ pclk *= 2;
+ } else {
+ div = ((clk_get_rate(rgb->clk) * 2) / pclk) - 2;
+ pclk = 0;
+ }
err = tegra_dc_state_setup_clock(dc, crtc_state, rgb->clk_parent,
pclk, div);
@@ -210,6 +229,22 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
return err;
}
+ rgb->pll_d_out0 = clk_get_sys(NULL, "pll_d_out0");
+ if (IS_ERR(rgb->pll_d_out0)) {
+ err = PTR_ERR(rgb->pll_d_out0);
+ dev_err(dc->dev, "failed to get pll_d_out0: %d\n", err);
+ return err;
+ }
+
+ if (dc->soc->has_pll_d2_out0) {
+ rgb->pll_d2_out0 = clk_get_sys(NULL, "pll_d2_out0");
+ if (IS_ERR(rgb->pll_d2_out0)) {
+ err = PTR_ERR(rgb->pll_d2_out0);
+ dev_err(dc->dev, "failed to get pll_d2_out0: %d\n", err);
+ return err;
+ }
+ }
+
dc->rgb = &rgb->output;
return 0;
@@ -217,9 +252,15 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
int tegra_dc_rgb_remove(struct tegra_dc *dc)
{
+ struct tegra_rgb *rgb;
+
if (!dc->rgb)
return 0;
+ rgb = to_rgb(dc->rgb);
+ clk_put(rgb->pll_d2_out0);
+ clk_put(rgb->pll_d_out0);
+
tegra_output_remove(dc->rgb);
dc->rgb = NULL;
diff --git a/drivers/gpu/drm/tegra/submit.c b/drivers/gpu/drm/tegra/submit.c
index 776f825df52f..6d6dd8c35475 100644
--- a/drivers/gpu/drm/tegra/submit.c
+++ b/drivers/gpu/drm/tegra/submit.c
@@ -64,33 +64,62 @@ static void gather_bo_put(struct host1x_bo *host_bo)
kref_put(&bo->ref, gather_bo_release);
}
-static struct sg_table *
-gather_bo_pin(struct device *dev, struct host1x_bo *host_bo, dma_addr_t *phys)
+static struct host1x_bo_mapping *
+gather_bo_pin(struct device *dev, struct host1x_bo *bo, enum dma_data_direction direction)
{
- struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
- struct sg_table *sgt;
+ struct gather_bo *gather = container_of(bo, struct gather_bo, base);
+ struct host1x_bo_mapping *map;
int err;
- sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
- if (!sgt)
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
return ERR_PTR(-ENOMEM);
- err = dma_get_sgtable(bo->dev, sgt, bo->gather_data, bo->gather_data_dma,
- bo->gather_data_words * 4);
- if (err) {
- kfree(sgt);
- return ERR_PTR(err);
+ kref_init(&map->ref);
+ map->bo = host1x_bo_get(bo);
+ map->direction = direction;
+ map->dev = dev;
+
+ map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
+ if (!map->sgt) {
+ err = -ENOMEM;
+ goto free;
}
- return sgt;
+ err = dma_get_sgtable(gather->dev, map->sgt, gather->gather_data, gather->gather_data_dma,
+ gather->gather_data_words * 4);
+ if (err)
+ goto free_sgt;
+
+ err = dma_map_sgtable(dev, map->sgt, direction, 0);
+ if (err)
+ goto free_sgt;
+
+ map->phys = sg_dma_address(map->sgt->sgl);
+ map->size = gather->gather_data_words * 4;
+ map->chunks = err;
+
+ return map;
+
+free_sgt:
+ sg_free_table(map->sgt);
+ kfree(map->sgt);
+free:
+ kfree(map);
+ return ERR_PTR(err);
}
-static void gather_bo_unpin(struct device *dev, struct sg_table *sgt)
+static void gather_bo_unpin(struct host1x_bo_mapping *map)
{
- if (sgt) {
- sg_free_table(sgt);
- kfree(sgt);
- }
+ if (!map)
+ return;
+
+ dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
+ sg_free_table(map->sgt);
+ kfree(map->sgt);
+ host1x_bo_put(map->bo);
+
+ kfree(map);
}
static void *gather_bo_mmap(struct host1x_bo *host_bo)
@@ -475,8 +504,8 @@ static void release_job(struct host1x_job *job)
kfree(job_data->used_mappings);
kfree(job_data);
- if (pm_runtime_enabled(client->base.dev))
- pm_runtime_put_autosuspend(client->base.dev);
+ pm_runtime_mark_last_busy(client->base.dev);
+ pm_runtime_put_autosuspend(client->base.dev);
}
int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data,
@@ -560,12 +589,10 @@ int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data,
}
/* Boot engine. */
- if (pm_runtime_enabled(context->client->base.dev)) {
- err = pm_runtime_resume_and_get(context->client->base.dev);
- if (err < 0) {
- SUBMIT_ERR(context, "could not power up engine: %d", err);
- goto unpin_job;
- }
+ err = pm_runtime_resume_and_get(context->client->base.dev);
+ if (err < 0) {
+ SUBMIT_ERR(context, "could not power up engine: %d", err);
+ goto unpin_job;
}
job->user_data = job_data;
diff --git a/drivers/gpu/drm/tegra/uapi.c b/drivers/gpu/drm/tegra/uapi.c
index 690a339c52ec..9ab9179d2026 100644
--- a/drivers/gpu/drm/tegra/uapi.c
+++ b/drivers/gpu/drm/tegra/uapi.c
@@ -17,11 +17,7 @@ static void tegra_drm_mapping_release(struct kref *ref)
struct tegra_drm_mapping *mapping =
container_of(ref, struct tegra_drm_mapping, ref);
- if (mapping->sgt)
- dma_unmap_sgtable(mapping->dev, mapping->sgt, mapping->direction,
- DMA_ATTR_SKIP_CPU_SYNC);
-
- host1x_bo_unpin(mapping->dev, mapping->bo, mapping->sgt);
+ host1x_bo_unpin(mapping->map);
host1x_bo_put(mapping->bo);
kfree(mapping);
@@ -159,6 +155,7 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f
struct drm_tegra_channel_map *args = data;
struct tegra_drm_mapping *mapping;
struct tegra_drm_context *context;
+ enum dma_data_direction direction;
int err = 0;
if (args->flags & ~DRM_TEGRA_CHANNEL_MAP_READ_WRITE)
@@ -180,68 +177,53 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f
kref_init(&mapping->ref);
- mapping->dev = context->client->base.dev;
mapping->bo = tegra_gem_lookup(file, args->handle);
if (!mapping->bo) {
err = -EINVAL;
- goto unlock;
+ goto free;
}
- if (context->client->base.group) {
- /* IOMMU domain managed directly using IOMMU API */
- host1x_bo_pin(mapping->dev, mapping->bo, &mapping->iova);
- } else {
- switch (args->flags & DRM_TEGRA_CHANNEL_MAP_READ_WRITE) {
- case DRM_TEGRA_CHANNEL_MAP_READ_WRITE:
- mapping->direction = DMA_BIDIRECTIONAL;
- break;
-
- case DRM_TEGRA_CHANNEL_MAP_WRITE:
- mapping->direction = DMA_FROM_DEVICE;
- break;
-
- case DRM_TEGRA_CHANNEL_MAP_READ:
- mapping->direction = DMA_TO_DEVICE;
- break;
+ switch (args->flags & DRM_TEGRA_CHANNEL_MAP_READ_WRITE) {
+ case DRM_TEGRA_CHANNEL_MAP_READ_WRITE:
+ direction = DMA_BIDIRECTIONAL;
+ break;
- default:
- return -EINVAL;
- }
+ case DRM_TEGRA_CHANNEL_MAP_WRITE:
+ direction = DMA_FROM_DEVICE;
+ break;
- mapping->sgt = host1x_bo_pin(mapping->dev, mapping->bo, NULL);
- if (IS_ERR(mapping->sgt)) {
- err = PTR_ERR(mapping->sgt);
- goto put_gem;
- }
+ case DRM_TEGRA_CHANNEL_MAP_READ:
+ direction = DMA_TO_DEVICE;
+ break;
- err = dma_map_sgtable(mapping->dev, mapping->sgt, mapping->direction,
- DMA_ATTR_SKIP_CPU_SYNC);
- if (err)
- goto unpin;
+ default:
+ err = -EINVAL;
+ goto put_gem;
+ }
- mapping->iova = sg_dma_address(mapping->sgt->sgl);
+ mapping->map = host1x_bo_pin(context->client->base.dev, mapping->bo, direction, NULL);
+ if (IS_ERR(mapping->map)) {
+ err = PTR_ERR(mapping->map);
+ goto put_gem;
}
+ mapping->iova = mapping->map->phys;
mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size;
err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX),
GFP_KERNEL);
if (err < 0)
- goto unmap;
+ goto unpin;
mutex_unlock(&fpriv->lock);
return 0;
-unmap:
- if (mapping->sgt) {
- dma_unmap_sgtable(mapping->dev, mapping->sgt, mapping->direction,
- DMA_ATTR_SKIP_CPU_SYNC);
- }
unpin:
- host1x_bo_unpin(mapping->dev, mapping->bo, mapping->sgt);
+ host1x_bo_unpin(mapping->map);
put_gem:
host1x_bo_put(mapping->bo);
+free:
kfree(mapping);
unlock:
mutex_unlock(&fpriv->lock);
diff --git a/drivers/gpu/drm/tegra/uapi.h b/drivers/gpu/drm/tegra/uapi.h
index 12adad770ad3..92ff1e44ff15 100644
--- a/drivers/gpu/drm/tegra/uapi.h
+++ b/drivers/gpu/drm/tegra/uapi.h
@@ -27,10 +27,9 @@ struct tegra_drm_file {
struct tegra_drm_mapping {
struct kref ref;
- struct device *dev;
+ struct host1x_bo_mapping *map;
struct host1x_bo *bo;
- struct sg_table *sgt;
- enum dma_data_direction direction;
+
dma_addr_t iova;
dma_addr_t iova_end;
};
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index c02010ff2b7f..1e342fa3d27b 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/host1x.h>
#include <linux/iommu.h>
#include <linux/module.h>
@@ -151,9 +152,13 @@ static int vic_init(struct host1x_client *client)
goto free_channel;
}
+ pm_runtime_enable(client->dev);
+ pm_runtime_use_autosuspend(client->dev);
+ pm_runtime_set_autosuspend_delay(client->dev, 500);
+
err = tegra_drm_register_client(tegra, drm);
if (err < 0)
- goto free_syncpt;
+ goto disable_rpm;
/*
* Inherit the DMA parameters (such as maximum segment size) from the
@@ -163,7 +168,10 @@ static int vic_init(struct host1x_client *client)
return 0;
-free_syncpt:
+disable_rpm:
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
host1x_syncpt_put(client->syncpts[0]);
free_channel:
host1x_channel_put(vic->channel);
@@ -188,10 +196,15 @@ static int vic_exit(struct host1x_client *client)
if (err < 0)
return err;
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
host1x_syncpt_put(client->syncpts[0]);
host1x_channel_put(vic->channel);
host1x_client_iommu_detach(client);
+ vic->channel = NULL;
+
if (client->group) {
dma_unmap_single(vic->dev, vic->falcon.firmware.phys,
vic->falcon.firmware.size, DMA_TO_DEVICE);
@@ -232,12 +245,12 @@ static int vic_load_firmware(struct vic *vic)
if (!client->group) {
virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL);
-
- err = dma_mapping_error(vic->dev, iova);
- if (err < 0)
- return err;
+ if (!virt)
+ return -ENOMEM;
} else {
virt = tegra_drm_alloc(tegra, size, &iova);
+ if (IS_ERR(virt))
+ return PTR_ERR(virt);
}
vic->falcon.firmware.virt = virt;
@@ -315,6 +328,8 @@ static int vic_runtime_suspend(struct device *dev)
struct vic *vic = dev_get_drvdata(dev);
int err;
+ host1x_channel_stop(vic->channel);
+
err = reset_control_assert(vic->rst);
if (err < 0)
return err;
@@ -330,27 +345,17 @@ static int vic_open_channel(struct tegra_drm_client *client,
struct tegra_drm_context *context)
{
struct vic *vic = to_vic(client);
- int err;
-
- err = pm_runtime_resume_and_get(vic->dev);
- if (err < 0)
- return err;
context->channel = host1x_channel_get(vic->channel);
- if (!context->channel) {
- pm_runtime_put(vic->dev);
+ if (!context->channel)
return -ENOMEM;
- }
return 0;
}
static void vic_close_channel(struct tegra_drm_context *context)
{
- struct vic *vic = to_vic(context->client);
-
host1x_channel_put(context->channel);
- pm_runtime_put(vic->dev);
}
static const struct tegra_drm_client_ops vic_ops = {
@@ -441,6 +446,12 @@ static int vic_probe(struct platform_device *pdev)
return PTR_ERR(vic->clk);
}
+ err = clk_set_rate(vic->clk, ULONG_MAX);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set clock rate\n");
+ return err;
+ }
+
if (!dev->pm_domain) {
vic->rst = devm_reset_control_get(dev, "vic");
if (IS_ERR(vic->rst)) {
@@ -476,17 +487,8 @@ static int vic_probe(struct platform_device *pdev)
goto exit_falcon;
}
- pm_runtime_enable(&pdev->dev);
- if (!pm_runtime_enabled(&pdev->dev)) {
- err = vic_runtime_resume(&pdev->dev);
- if (err < 0)
- goto unregister_client;
- }
-
return 0;
-unregister_client:
- host1x_client_unregister(&vic->client.base);
exit_falcon:
falcon_exit(&vic->falcon);
@@ -505,11 +507,6 @@ static int vic_remove(struct platform_device *pdev)
return err;
}
- if (pm_runtime_enabled(&pdev->dev))
- pm_runtime_disable(&pdev->dev);
- else
- vic_runtime_suspend(&pdev->dev);
-
falcon_exit(&vic->falcon);
return 0;
@@ -517,6 +514,8 @@ static int vic_remove(struct platform_device *pdev)
static const struct dev_pm_ops vic_pm_ops = {
SET_RUNTIME_PM_OPS(vic_runtime_suspend, vic_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
struct platform_driver tegra_vic_driver = {
diff --git a/drivers/gpu/drm/tidss/Kconfig b/drivers/gpu/drm/tidss/Kconfig
index f790a5215302..bc4fa59b6fa9 100644
--- a/drivers/gpu/drm/tidss/Kconfig
+++ b/drivers/gpu/drm/tidss/Kconfig
@@ -3,7 +3,6 @@ config DRM_TIDSS
depends on DRM && OF
depends on ARM || ARM64 || COMPILE_TEST
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
help
The TI Keystone family SoCs introduced a new generation of
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index d620f35688da..7c784e90e40e 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -88,16 +88,11 @@ static int __maybe_unused tidss_resume(struct device *dev)
return drm_mode_config_helper_resume(&tidss->ddev);
}
-#ifdef CONFIG_PM
-
-static const struct dev_pm_ops tidss_pm_ops = {
- .runtime_suspend = tidss_pm_runtime_suspend,
- .runtime_resume = tidss_pm_runtime_resume,
+static __maybe_unused const struct dev_pm_ops tidss_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tidss_suspend, tidss_resume)
+ SET_RUNTIME_PM_OPS(tidss_pm_runtime_suspend, tidss_pm_runtime_resume, NULL)
};
-#endif /* CONFIG_PM */
-
/* DRM device Information */
static void tidss_release(struct drm_device *ddev)
@@ -250,9 +245,7 @@ static struct platform_driver tidss_platform_driver = {
.shutdown = tidss_shutdown,
.driver = {
.name = "tidss",
-#ifdef CONFIG_PM
- .pm = &tidss_pm_ops,
-#endif
+ .pm = pm_ptr(&tidss_pm_ops),
.of_match_table = tidss_of_table,
.suppress_bind_attrs = true,
},
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index 9f505a149990..e315591eb36b 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -3,7 +3,6 @@ config DRM_TILCDC
tristate "DRM Support for TI LCDC Display Controller"
depends on DRM && OF && ARM
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 1ceb93fbdc50..712e0004e96e 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -3,7 +3,7 @@
config DRM_ARCPGU
tristate "ARC PGU"
depends on DRM && OF
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
help
Choose this option if you have an ARC PGU controller.
@@ -71,7 +71,7 @@ config TINYDRM_HX8357D
tristate "DRM support for HX8357D display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
@@ -80,11 +80,24 @@ config TINYDRM_HX8357D
If M is selected the module will be called hx8357d.
+config TINYDRM_ILI9163
+ tristate "DRM support for ILI9163 display panels"
+ depends on DRM && SPI
+ select BACKLIGHT_CLASS_DEVICE
+ select DRM_GEM_CMA_HELPER
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DBI
+ help
+ DRM driver for the following Ilitek ILI9163 panels:
+ * NHD-1.8-128160EF 128x160 TFT
+
+ If M is selected the module will be called ili9163.
+
config TINYDRM_ILI9225
tristate "DRM support for ILI9225 display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
select DRM_MIPI_DBI
help
DRM driver for the following Ilitek ILI9225 panels:
@@ -96,7 +109,7 @@ config TINYDRM_ILI9341
tristate "DRM support for ILI9341 display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
@@ -109,7 +122,7 @@ config TINYDRM_ILI9486
tristate "DRM support for ILI9486 display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
@@ -123,7 +136,7 @@ config TINYDRM_MI0283QT
tristate "DRM support for MI0283QT"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
@@ -134,7 +147,7 @@ config TINYDRM_REPAPER
tristate "DRM support for Pervasive Displays RePaper panels (V231)"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
help
DRM driver for the following Pervasive Displays panels:
1.44" TFT EPD Panel (E1144CS021)
@@ -148,7 +161,7 @@ config TINYDRM_ST7586
tristate "DRM support for Sitronix ST7586 display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
select DRM_MIPI_DBI
help
DRM driver for the following Sitronix ST7586 panels:
@@ -160,7 +173,7 @@ config TINYDRM_ST7735R
tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index e09942895c77..5d5505d40e7b 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
obj-$(CONFIG_DRM_SIMPLEDRM) += simpledrm.o
obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
+obj-$(CONFIG_TINYDRM_ILI9163) += ili9163.o
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
obj-$(CONFIG_TINYDRM_ILI9341) += ili9341.o
obj-$(CONFIG_TINYDRM_ILI9486) += ili9486.o
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index 2ce3bd903b70..fc26a1ce11ee 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-#include <linux/console.h>
#include <linux/pci.h>
#include <drm/drm_aperture.h>
@@ -719,7 +718,7 @@ static struct pci_driver bochs_pci_driver = {
static int __init bochs_init(void)
{
- if (vgacon_text_force() && bochs_modeset == -1)
+ if (drm_firmware_drivers_only() && bochs_modeset == -1)
return -EINVAL;
if (bochs_modeset == 0)
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
index 4611ec408506..c95d9ff7d600 100644
--- a/drivers/gpu/drm/tiny/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus.c
@@ -16,7 +16,6 @@
* Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com>
*/
-#include <linux/console.h>
#include <linux/dma-buf-map.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -317,28 +316,28 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb, const struct dma_buf_
struct drm_rect *rect)
{
struct cirrus_device *cirrus = to_cirrus(fb->dev);
+ void __iomem *dst = cirrus->vram;
void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */
int idx;
if (!drm_dev_enter(&cirrus->dev, &idx))
return -ENODEV;
- if (cirrus->cpp == fb->format->cpp[0])
- drm_fb_memcpy_dstclip(cirrus->vram, fb->pitches[0],
- vmap, fb, rect);
+ if (cirrus->cpp == fb->format->cpp[0]) {
+ dst += drm_fb_clip_offset(fb->pitches[0], fb->format, rect);
+ drm_fb_memcpy_toio(dst, fb->pitches[0], vmap, fb, rect);
- else if (fb->format->cpp[0] == 4 && cirrus->cpp == 2)
- drm_fb_xrgb8888_to_rgb565_dstclip(cirrus->vram,
- cirrus->pitch,
- vmap, fb, rect, false);
+ } else if (fb->format->cpp[0] == 4 && cirrus->cpp == 2) {
+ dst += drm_fb_clip_offset(cirrus->pitch, fb->format, rect);
+ drm_fb_xrgb8888_to_rgb565_toio(dst, cirrus->pitch, vmap, fb, rect, false);
- else if (fb->format->cpp[0] == 4 && cirrus->cpp == 3)
- drm_fb_xrgb8888_to_rgb888_dstclip(cirrus->vram,
- cirrus->pitch,
- vmap, fb, rect);
+ } else if (fb->format->cpp[0] == 4 && cirrus->cpp == 3) {
+ dst += drm_fb_clip_offset(cirrus->pitch, fb->format, rect);
+ drm_fb_xrgb8888_to_rgb888_toio(dst, cirrus->pitch, vmap, fb, rect);
- else
+ } else {
WARN_ON_ONCE("cpp mismatch");
+ }
drm_dev_exit(idx);
@@ -636,8 +635,9 @@ static struct pci_driver cirrus_pci_driver = {
static int __init cirrus_init(void)
{
- if (vgacon_text_force())
+ if (drm_firmware_drivers_only())
return -EINVAL;
+
return pci_register_driver(&cirrus_pci_driver);
}
diff --git a/drivers/gpu/drm/tiny/ili9163.c b/drivers/gpu/drm/tiny/ili9163.c
new file mode 100644
index 000000000000..bcc181351236
--- /dev/null
+++ b/drivers/gpu/drm/tiny/ili9163.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_mipi_dbi.h>
+#include <drm/drm_modeset_helper.h>
+
+#include <video/mipi_display.h>
+
+#define ILI9163_FRMCTR1 0xb1
+
+#define ILI9163_PWCTRL1 0xc0
+#define ILI9163_PWCTRL2 0xc1
+#define ILI9163_VMCTRL1 0xc5
+#define ILI9163_VMCTRL2 0xc7
+#define ILI9163_PWCTRLA 0xcb
+#define ILI9163_PWCTRLB 0xcf
+
+#define ILI9163_EN3GAM 0xf2
+
+#define ILI9163_MADCTL_BGR BIT(3)
+#define ILI9163_MADCTL_MV BIT(5)
+#define ILI9163_MADCTL_MX BIT(6)
+#define ILI9163_MADCTL_MY BIT(7)
+
+static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct mipi_dbi *dbi = &dbidev->dbi;
+ u8 addr_mode;
+ int ret, idx;
+
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
+
+ DRM_DEBUG_KMS("\n");
+
+ ret = mipi_dbi_poweron_conditional_reset(dbidev);
+ if (ret < 0)
+ goto out_exit;
+ if (ret == 1)
+ goto out_enable;
+
+ /* Gamma */
+ mipi_dbi_command(dbi, MIPI_DCS_SET_GAMMA_CURVE, 0x04);
+ mipi_dbi_command(dbi, ILI9163_EN3GAM, 0x00);
+
+ /* Frame Rate */
+ mipi_dbi_command(dbi, ILI9163_FRMCTR1, 0x0a, 0x14);
+
+ /* Power Control */
+ mipi_dbi_command(dbi, ILI9163_PWCTRL1, 0x0a, 0x00);
+ mipi_dbi_command(dbi, ILI9163_PWCTRL2, 0x02);
+
+ /* VCOM */
+ mipi_dbi_command(dbi, ILI9163_VMCTRL1, 0x2f, 0x3e);
+ mipi_dbi_command(dbi, ILI9163_VMCTRL2, 0x40);
+
+ /* Memory Access Control */
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
+
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
+ msleep(100);
+
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
+ msleep(100);
+
+out_enable:
+ switch (dbidev->rotation) {
+ default:
+ addr_mode = ILI9163_MADCTL_MX | ILI9163_MADCTL_MY;
+ break;
+ case 90:
+ addr_mode = ILI9163_MADCTL_MX | ILI9163_MADCTL_MV;
+ break;
+ case 180:
+ addr_mode = 0;
+ break;
+ case 270:
+ addr_mode = ILI9163_MADCTL_MY | ILI9163_MADCTL_MV;
+ break;
+ }
+ addr_mode |= ILI9163_MADCTL_BGR;
+ mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+ mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
+out_exit:
+ drm_dev_exit(idx);
+}
+
+static const struct drm_simple_display_pipe_funcs ili9163_pipe_funcs = {
+ .enable = yx240qv29_enable,
+ .disable = mipi_dbi_pipe_disable,
+ .update = mipi_dbi_pipe_update,
+ .prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
+};
+
+static const struct drm_display_mode yx240qv29_mode = {
+ DRM_SIMPLE_MODE(128, 160, 28, 35),
+};
+
+DEFINE_DRM_GEM_CMA_FOPS(ili9163_fops);
+
+static struct drm_driver ili9163_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &ili9163_fops,
+ DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ .debugfs_init = mipi_dbi_debugfs_init,
+ .name = "ili9163",
+ .desc = "Ilitek ILI9163",
+ .date = "20210208",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id ili9163_of_match[] = {
+ { .compatible = "newhaven,1.8-128160EF" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ili9163_of_match);
+
+static const struct spi_device_id ili9163_id[] = {
+ { "nhd-1.8-128160EF", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ili9163_id);
+
+static int ili9163_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct mipi_dbi_dev *dbidev;
+ struct drm_device *drm;
+ struct mipi_dbi *dbi;
+ struct gpio_desc *dc;
+ u32 rotation = 0;
+ int ret;
+
+ dbidev = devm_drm_dev_alloc(dev, &ili9163_driver,
+ struct mipi_dbi_dev, drm);
+ if (IS_ERR(dbidev))
+ return PTR_ERR(dbidev);
+
+ dbi = &dbidev->dbi;
+ drm = &dbidev->drm;
+
+ spi_set_drvdata(spi, drm);
+
+ dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(dbi->reset)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
+ return PTR_ERR(dbi->reset);
+ }
+
+ dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
+ if (IS_ERR(dc)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
+ return PTR_ERR(dc);
+ }
+
+ dbidev->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(dbidev->backlight))
+ return PTR_ERR(dbidev->backlight);
+
+ device_property_read_u32(dev, "rotation", &rotation);
+
+ ret = mipi_dbi_spi_init(spi, dbi, dc);
+ if (ret)
+ return ret;
+
+ ret = mipi_dbi_dev_init(dbidev, &ili9163_pipe_funcs, &yx240qv29_mode, rotation);
+ if (ret)
+ return ret;
+
+ drm_mode_config_reset(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
+}
+
+static int ili9163_remove(struct spi_device *spi)
+{
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
+
+ return 0;
+}
+
+static void ili9163_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
+}
+
+static struct spi_driver ili9163_spi_driver = {
+ .driver = {
+ .name = "ili9163",
+ .of_match_table = ili9163_of_match,
+ },
+ .id_table = ili9163_id,
+ .probe = ili9163_probe,
+ .remove = ili9163_remove,
+ .shutdown = ili9163_shutdown,
+};
+module_spi_driver(ili9163_spi_driver);
+
+MODULE_DESCRIPTION("Ilitek ILI9163 DRM driver");
+MODULE_AUTHOR("Daniel Mack <daniel@zonque.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 4d07b21a16e6..97a775c48cea 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -560,7 +560,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
if (ret)
goto out_free;
- drm_fb_xrgb8888_to_gray8(buf, cma_obj->vaddr, fb, &clip);
+ drm_fb_xrgb8888_to_gray8(buf, 0, cma_obj->vaddr, fb, &clip);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index 5a6e89825bc2..04146da2d1d8 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -2,6 +2,7 @@
#include <linux/clk.h>
#include <linux/of_clk.h>
+#include <linux/minmax.h>
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
@@ -570,8 +571,8 @@ static const uint32_t simpledrm_default_formats[] = {
//DRM_FORMAT_XRGB1555,
//DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGB888,
- //DRM_FORMAT_XRGB2101010,
- //DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_ARGB2101010,
};
static const uint64_t simpledrm_format_modifiers[] = {
@@ -641,16 +642,25 @@ simpledrm_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_framebuffer *fb = plane_state->fb;
void *vmap = shadow_plane_state->data[0].vaddr; /* TODO: Use mapping abstraction */
struct drm_device *dev = &sdev->dev;
+ void __iomem *dst = sdev->screen_base;
+ struct drm_rect src_clip, dst_clip;
int idx;
if (!fb)
return;
+ drm_rect_fp_to_int(&src_clip, &plane_state->src);
+
+ dst_clip = plane_state->dst;
+ if (!drm_rect_intersect(&dst_clip, &src_clip))
+ return;
+
if (!drm_dev_enter(dev, &idx))
return;
- drm_fb_blit_dstclip(sdev->screen_base, sdev->pitch,
- sdev->format->format, vmap, fb);
+ dst += drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip);
+ drm_fb_blit_toio(dst, sdev->pitch, sdev->format->format, vmap, fb, &src_clip);
+
drm_dev_exit(idx);
}
@@ -680,20 +690,25 @@ simpledrm_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
void *vmap = shadow_plane_state->data[0].vaddr; /* TODO: Use mapping abstraction */
struct drm_framebuffer *fb = plane_state->fb;
struct drm_device *dev = &sdev->dev;
- struct drm_rect clip;
+ void __iomem *dst = sdev->screen_base;
+ struct drm_rect src_clip, dst_clip;
int idx;
if (!fb)
return;
- if (!drm_atomic_helper_damage_merged(old_plane_state, plane_state, &clip))
+ if (!drm_atomic_helper_damage_merged(old_plane_state, plane_state, &src_clip))
+ return;
+
+ dst_clip = plane_state->dst;
+ if (!drm_rect_intersect(&dst_clip, &src_clip))
return;
if (!drm_dev_enter(dev, &idx))
return;
- drm_fb_blit_rect_dstclip(sdev->screen_base, sdev->pitch,
- sdev->format->format, vmap, fb, &clip);
+ dst += drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip);
+ drm_fb_blit_toio(dst, sdev->pitch, sdev->format->format, vmap, fb, &src_clip);
drm_dev_exit(idx);
}
@@ -758,6 +773,7 @@ static int simpledrm_device_init_modeset(struct simpledrm_device *sdev)
struct drm_display_mode *mode = &sdev->mode;
struct drm_connector *connector = &sdev->connector;
struct drm_simple_display_pipe *pipe = &sdev->pipe;
+ unsigned long max_width, max_height;
const uint32_t *formats;
size_t nformats;
int ret;
@@ -766,10 +782,13 @@ static int simpledrm_device_init_modeset(struct simpledrm_device *sdev)
if (ret)
return ret;
+ max_width = max_t(unsigned long, mode->hdisplay, DRM_SHADOW_PLANE_MAX_WIDTH);
+ max_height = max_t(unsigned long, mode->vdisplay, DRM_SHADOW_PLANE_MAX_HEIGHT);
+
dev->mode_config.min_width = mode->hdisplay;
- dev->mode_config.max_width = mode->hdisplay;
+ dev->mode_config.max_width = max_width;
dev->mode_config.min_height = mode->vdisplay;
- dev->mode_config.max_height = mode->vdisplay;
+ dev->mode_config.max_height = max_height;
dev->mode_config.prefer_shadow_fbdev = true;
dev->mode_config.preferred_depth = sdev->format->cpp[0] * 8;
dev->mode_config.funcs = &simpledrm_mode_config_funcs;
@@ -788,6 +807,8 @@ static int simpledrm_device_init_modeset(struct simpledrm_device *sdev)
if (ret)
return ret;
+ drm_plane_enable_fb_damage_clips(&pipe->plane);
+
drm_mode_config_reset(dev);
return 0;
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index ad0faa8723c2..51b9b9fb3ead 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -73,7 +73,7 @@ static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
if (!buf)
return;
- drm_fb_xrgb8888_to_gray8(buf, vaddr, fb, clip);
+ drm_fb_xrgb8888_to_gray8(buf, 0, vaddr, fb, clip);
src = buf;
for (y = clip->y1; y < clip->y2; y++) {
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 047adc42d9a0..db3dc7ef5382 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -727,6 +727,8 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
ret = ttm_bo_evict(bo, ctx);
if (locked)
ttm_bo_unreserve(bo);
+ else
+ ttm_bo_move_to_lru_tail_unlocked(bo);
ttm_bo_put(bo);
return ret;
@@ -1084,7 +1086,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
if (timeout == 0)
return -EBUSY;
- dma_resv_add_excl_fence(bo->base.resv, NULL);
return 0;
}
EXPORT_SYMBOL(ttm_bo_wait);
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index 0037eefe3239..a3ad7c9736ec 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -68,9 +68,11 @@ pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp)
#if defined(__i386__) || defined(__x86_64__)
if (caching == ttm_write_combined)
tmp = pgprot_writecombine(tmp);
+#ifndef CONFIG_UML
else if (boot_cpu_data.x86 > 3)
tmp = pgprot_noncached(tmp);
-#endif
+#endif /* CONFIG_UML */
+#endif /* __i386__ || __x86_64__ */
#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
defined(__powerpc__) || defined(__mips__)
if (caching == ttm_write_combined)
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index 67d68a4a8640..072e0baf2ab4 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -128,15 +128,17 @@ static const struct ttm_resource_manager_func ttm_range_manager_func = {
};
/**
- * ttm_range_man_init
+ * ttm_range_man_init_nocheck - Initialise a generic range manager for the
+ * selected memory type.
*
* @bdev: ttm device
* @type: memory manager type
* @use_tt: if the memory manager uses tt
* @p_size: size of area to be managed in pages.
*
- * Initialise a generic range manager for the selected memory type.
* The range manager is installed for this device in the type slot.
+ *
+ * Return: %0 on success or a negative error code on failure
*/
int ttm_range_man_init_nocheck(struct ttm_device *bdev,
unsigned type, bool use_tt,
@@ -166,12 +168,13 @@ int ttm_range_man_init_nocheck(struct ttm_device *bdev,
EXPORT_SYMBOL(ttm_range_man_init_nocheck);
/**
- * ttm_range_man_fini
+ * ttm_range_man_fini_nocheck - Remove the generic range manager from a slot
+ * and tear it down.
*
* @bdev: ttm device
* @type: memory manager type
*
- * Remove the generic range manager from a slot and tear it down.
+ * Return: %0 on success or a negative error code on failure
*/
int ttm_range_man_fini_nocheck(struct ttm_device *bdev,
unsigned type)
diff --git a/drivers/gpu/drm/tve200/Kconfig b/drivers/gpu/drm/tve200/Kconfig
index e2d163c74ed6..47a7dbe6c114 100644
--- a/drivers/gpu/drm/tve200/Kconfig
+++ b/drivers/gpu/drm/tve200/Kconfig
@@ -8,7 +8,6 @@ config DRM_TVE200
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index 6a8731ab9d7d..6e3113f419f4 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -47,18 +47,18 @@ void v3d_free_object(struct drm_gem_object *obj)
/* GPU execution may have dirtied any pages in the BO. */
bo->base.pages_mark_dirty_on_put = true;
- drm_gem_shmem_free_object(obj);
+ drm_gem_shmem_free(&bo->base);
}
static const struct drm_gem_object_funcs v3d_gem_funcs = {
.free = v3d_free_object,
- .print_info = drm_gem_shmem_print_info,
- .pin = drm_gem_shmem_pin,
- .unpin = drm_gem_shmem_unpin,
- .get_sg_table = drm_gem_shmem_get_sg_table,
- .vmap = drm_gem_shmem_vmap,
- .vunmap = drm_gem_shmem_vunmap,
- .mmap = drm_gem_shmem_mmap,
+ .print_info = drm_gem_shmem_object_print_info,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
+ .mmap = drm_gem_shmem_object_mmap,
};
/* gem_create_object function for allocating a BO struct and doing
@@ -70,11 +70,11 @@ struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size)
struct drm_gem_object *obj;
if (size == 0)
- return NULL;
+ return ERR_PTR(-EINVAL);
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
- return NULL;
+ return ERR_PTR(-ENOMEM);
obj = &bo->base.base;
obj->funcs = &v3d_gem_funcs;
@@ -95,7 +95,7 @@ v3d_bo_create_finish(struct drm_gem_object *obj)
/* So far we pin the BO in the MMU for its lifetime, so use
* shmem's helper for getting a lifetime sgt.
*/
- sgt = drm_gem_shmem_get_pages_sgt(&bo->base.base);
+ sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
if (IS_ERR(sgt))
return PTR_ERR(sgt);
@@ -141,7 +141,7 @@ struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
return bo;
free_obj:
- drm_gem_shmem_free_object(&shmem_obj->base);
+ drm_gem_shmem_free(shmem_obj);
return ERR_PTR(ret);
}
@@ -159,7 +159,7 @@ v3d_prime_import_sg_table(struct drm_device *dev,
ret = v3d_bo_create_finish(obj);
if (ret) {
- drm_gem_shmem_free_object(obj);
+ drm_gem_shmem_free(&to_v3d_bo(obj)->base);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index e47ae40a865a..c7ed2e1cbab6 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -774,7 +774,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job),
- v3d_job_free, 0, 0, V3D_CACHE_CLEAN);
+ v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
if (ret)
goto fail;
@@ -1007,7 +1007,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
goto fail;
ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job),
- v3d_job_free, 0, 0, V3D_CACHE_CLEAN);
+ v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index a6c81af37345..f35d9e44c6b7 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -7,7 +7,6 @@
* Michael Thayer <michael.thayer@oracle.com,
* Hans de Goede <hdegoede@redhat.com>
*/
-#include <linux/console.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/vt_kern.h>
@@ -193,10 +192,8 @@ static const struct drm_driver driver = {
static int __init vbox_init(void)
{
-#ifdef CONFIG_VGA_CONSOLE
- if (vgacon_text_force() && vbox_modeset == -1)
+ if (drm_firmware_drivers_only() && vbox_modeset == -1)
return -EINVAL;
-#endif
if (vbox_modeset == 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index f28779715ccd..c9e8b3a63c62 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -127,8 +127,8 @@ int vbox_hw_init(struct vbox_private *vbox)
/* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
vbox->guest_pool = devm_gen_pool_create(vbox->ddev.dev, 4, -1,
"vboxvideo-accel");
- if (!vbox->guest_pool)
- return -ENOMEM;
+ if (IS_ERR(vbox->guest_pool))
+ return PTR_ERR(vbox->guest_pool);
ret = gen_pool_add_virt(vbox->guest_pool,
(unsigned long)vbox->guest_heap,
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 345a5570a3da..de3424fed2fc 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -6,7 +6,6 @@ config DRM_VC4
depends on SND && SND_SOC
depends on COMMON_CLK
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select DRM_PANEL_BRIDGE
select SND_PCM
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index f642bd6e71ff..6d1281a343e9 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -177,7 +177,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
bo->validated_shader = NULL;
}
- drm_gem_cma_free_object(obj);
+ drm_gem_cma_free(&bo->base);
}
static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
@@ -391,7 +391,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
- return NULL;
+ return ERR_PTR(-ENOMEM);
bo->madv = VC4_MADV_WILLNEED;
refcount_set(&bo->usecnt, 0);
@@ -720,7 +720,7 @@ static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
return -EINVAL;
}
- return drm_gem_cma_mmap(obj, vma);
+ return drm_gem_cma_mmap(&bo->base, vma);
}
static const struct vm_operations_struct vc4_vm_ops = {
@@ -732,8 +732,8 @@ static const struct vm_operations_struct vc4_vm_ops = {
static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
.free = vc4_free_object,
.export = vc4_prime_export,
- .get_sg_table = drm_gem_cma_get_sg_table,
- .vmap = drm_gem_cma_vmap,
+ .get_sg_table = drm_gem_cma_object_get_sg_table,
+ .vmap = drm_gem_cma_object_vmap,
.mmap = vc4_gem_object_mmap,
.vm_ops = &vc4_vm_ops,
};
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 18f5009ce90e..287dbc89ad64 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -32,6 +32,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -42,6 +43,7 @@
#include <drm/drm_vblank.h>
#include "vc4_drv.h"
+#include "vc4_hdmi.h"
#include "vc4_regs.h"
#define HVS_FIFO_LATENCY_PIX 6
@@ -279,27 +281,15 @@ static u32 vc4_crtc_get_fifo_full_level_bits(struct vc4_crtc *vc4_crtc,
* allows drivers to push pixels to more than one encoder from the
* same CRTC.
*/
-static struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc,
- struct drm_atomic_state *state,
- struct drm_connector_state *(*get_state)(struct drm_atomic_state *state,
- struct drm_connector *connector))
+struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
{
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
+ struct drm_encoder *encoder;
- drm_connector_list_iter_begin(crtc->dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct drm_connector_state *conn_state = get_state(state, connector);
+ WARN_ON(hweight32(state->encoder_mask) > 1);
- if (!conn_state)
- continue;
-
- if (conn_state->crtc == crtc) {
- drm_connector_list_iter_end(&conn_iter);
- return connector->encoder;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
+ drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask)
+ return encoder;
return NULL;
}
@@ -313,12 +303,11 @@ static void vc4_crtc_pixelvalve_reset(struct drm_crtc *crtc)
CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_FIFO_CLR);
}
-static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_atomic_state *state)
+static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, state,
- drm_atomic_get_new_connector_state);
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
@@ -496,8 +485,10 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
enum vc4_encoder_type encoder_type;
const struct vc4_pv_data *pv_data;
struct drm_encoder *encoder;
+ struct vc4_hdmi *vc4_hdmi;
unsigned encoder_sel;
int channel;
+ int ret;
if (!(of_device_is_compatible(vc4_crtc->pdev->dev.of_node,
"brcm,bcm2711-pixelvalve2") ||
@@ -525,7 +516,20 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
if (WARN_ON(!encoder))
return 0;
- return vc4_crtc_disable(crtc, encoder, NULL, channel);
+ vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = vc4_crtc_disable(crtc, encoder, NULL, channel);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
+ if (ret)
+ return ret;
+
+ return 0;
}
static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -534,10 +538,12 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct vc4_crtc_state *old_vc4_state = to_vc4_crtc_state(old_state);
- struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, state,
- drm_atomic_get_old_connector_state);
+ struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, old_state);
struct drm_device *dev = crtc->dev;
+ drm_dbg(dev, "Disabling CRTC %s (%u) connected to Encoder %s (%u)",
+ crtc->name, crtc->base.id, encoder->name, encoder->base.id);
+
require_hvs_enabled(dev);
/* Disable vblank irq handling before crtc is disabled. */
@@ -562,12 +568,16 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
struct drm_device *dev = crtc->dev;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
- struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, state,
- drm_atomic_get_new_connector_state);
+ struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, new_state);
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
+ drm_dbg(dev, "Enabling CRTC %s (%u) connected to Encoder %s (%u)",
+ crtc->name, crtc->base.id, encoder->name, encoder->base.id);
+
require_hvs_enabled(dev);
/* Enable vblank irq handling before crtc is started otherwise
@@ -580,7 +590,7 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
if (vc4_encoder->pre_crtc_configure)
vc4_encoder->pre_crtc_configure(encoder, state);
- vc4_crtc_config_pv(crtc, state);
+ vc4_crtc_config_pv(crtc, encoder, state);
CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_EN);
@@ -649,12 +659,27 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
struct drm_connector *conn;
struct drm_connector_state *conn_state;
+ struct drm_encoder *encoder;
int ret, i;
ret = vc4_hvs_atomic_check(crtc, state);
if (ret)
return ret;
+ encoder = vc4_get_crtc_encoder(crtc, crtc_state);
+ if (encoder) {
+ const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
+
+ mode = &crtc_state->adjusted_mode;
+ if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) {
+ vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000,
+ mode->clock * 9 / 10) * 1000;
+ } else {
+ vc4_state->hvs_load = mode->clock * 1000;
+ }
+ }
+
for_each_new_connector_in_state(state, conn, conn_state,
i) {
if (conn_state->crtc != crtc)
@@ -691,14 +716,14 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
struct drm_crtc *crtc = &vc4_crtc->base;
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
- u32 chan = vc4_state->assigned_channel;
+ u32 chan = vc4_crtc->current_hvs_channel;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock(&vc4_crtc->irq_lock);
if (vc4_crtc->event &&
- (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)) ||
- vc4_state->feed_txp)) {
+ (vc4_crtc->current_dlist == HVS_READ(SCALER_DISPLACTX(chan)) ||
+ vc4_crtc->feeds_txp)) {
drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
vc4_crtc->event = NULL;
drm_crtc_vblank_put(crtc);
@@ -711,6 +736,7 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
*/
vc4_hvs_unmask_underrun(dev, chan);
}
+ spin_unlock(&vc4_crtc->irq_lock);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -876,7 +902,6 @@ struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc)
return NULL;
old_vc4_state = to_vc4_crtc_state(crtc->state);
- vc4_state->feed_txp = old_vc4_state->feed_txp;
vc4_state->margins = old_vc4_state->margins;
vc4_state->assigned_channel = old_vc4_state->assigned_channel;
@@ -937,6 +962,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
.mode_valid = vc4_crtc_mode_valid,
.atomic_check = vc4_crtc_atomic_check,
+ .atomic_begin = vc4_hvs_atomic_begin,
.atomic_flush = vc4_hvs_atomic_flush,
.atomic_enable = vc4_crtc_atomic_enable,
.atomic_disable = vc4_crtc_atomic_disable,
@@ -1111,6 +1137,7 @@ int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
return PTR_ERR(primary_plane);
}
+ spin_lock_init(&vc4_crtc->irq_lock);
drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
crtc_funcs, NULL);
drm_crtc_helper_add(crtc, crtc_helper_funcs);
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index 6da22af4ee91..ba2d8ea562af 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -7,6 +7,7 @@
#include <linux/circ_buf.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
+#include <linux/platform_device.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -26,8 +27,10 @@ vc4_debugfs_init(struct drm_minor *minor)
struct vc4_dev *vc4 = to_vc4_dev(minor->dev);
struct vc4_debugfs_info_entry *entry;
- debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR,
- minor->debugfs_root, &vc4->load_tracker_enabled);
+ if (!of_device_is_compatible(vc4->hvs->pdev->dev.of_node,
+ "brcm,bcm2711-vc5"))
+ debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR,
+ minor->debugfs_root, &vc4->load_tracker_enabled);
list_for_each_entry(entry, &vc4->debugfs_list, link) {
drm_debugfs_create_files(&entry->info, 1,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index ef73e0aaf726..4329e09d357c 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -202,9 +202,6 @@ struct vc4_dev {
int power_refcount;
- /* Set to true when the load tracker is supported. */
- bool load_tracker_available;
-
/* Set to true when the load tracker is active. */
bool load_tracker_enabled;
@@ -495,6 +492,33 @@ struct vc4_crtc {
struct drm_pending_vblank_event *event;
struct debugfs_regset32 regset;
+
+ /**
+ * @feeds_txp: True if the CRTC feeds our writeback controller.
+ */
+ bool feeds_txp;
+
+ /**
+ * @irq_lock: Spinlock protecting the resources shared between
+ * the atomic code and our vblank handler.
+ */
+ spinlock_t irq_lock;
+
+ /**
+ * @current_dlist: Start offset of the display list currently
+ * set in the HVS for that CRTC. Protected by @irq_lock, and
+ * copied in vc4_hvs_update_dlist() for the CRTC interrupt
+ * handler to have access to that value.
+ */
+ unsigned int current_dlist;
+
+ /**
+ * @current_hvs_channel: HVS channel currently assigned to the
+ * CRTC. Protected by @irq_lock, and copied in
+ * vc4_hvs_atomic_begin() for the CRTC interrupt handler to have
+ * access to that value.
+ */
+ unsigned int current_hvs_channel;
};
static inline struct vc4_crtc *
@@ -517,11 +541,13 @@ vc4_crtc_to_vc4_pv_data(const struct vc4_crtc *crtc)
return container_of(data, struct vc4_pv_data, base);
}
+struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
struct vc4_crtc_state {
struct drm_crtc_state base;
/* Dlist area for this CRTC configuration. */
struct drm_mm_node mm;
- bool feed_txp;
bool txp_armed;
unsigned int assigned_channel;
@@ -532,6 +558,8 @@ struct vc4_crtc_state {
unsigned int bottom;
} margins;
+ unsigned long hvs_load;
+
/* Transitional state below, only valid during atomic commits */
bool update_muxing;
};
@@ -908,6 +936,7 @@ extern struct platform_driver vc4_hvs_driver;
void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int output);
int vc4_hvs_get_fifo_from_output(struct drm_device *dev, unsigned int output);
int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state);
+void vc4_hvs_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state);
void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state);
void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state);
void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index a229da58962a..9300d3354c51 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -1262,7 +1262,6 @@ static int vc4_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct vc4_dsi *dsi = host_to_dsi(host);
- int ret;
dsi->lanes = device->lanes;
dsi->channel = device->channel;
@@ -1297,18 +1296,15 @@ static int vc4_dsi_host_attach(struct mipi_dsi_host *host,
return 0;
}
- ret = component_add(&dsi->pdev->dev, &vc4_dsi_ops);
- if (ret) {
- mipi_dsi_host_unregister(&dsi->dsi_host);
- return ret;
- }
-
- return 0;
+ return component_add(&dsi->pdev->dev, &vc4_dsi_ops);
}
static int vc4_dsi_host_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
+ struct vc4_dsi *dsi = host_to_dsi(host);
+
+ component_del(&dsi->pdev->dev, &vc4_dsi_ops);
return 0;
}
@@ -1686,9 +1682,7 @@ static int vc4_dsi_dev_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct vc4_dsi *dsi = dev_get_drvdata(dev);
- component_del(&pdev->dev, &vc4_dsi_ops);
mipi_dsi_host_unregister(&dsi->dsi_host);
-
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index b284623e2863..053fbaf765ca 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -94,6 +94,7 @@
# define VC4_HD_M_SW_RST BIT(2)
# define VC4_HD_M_ENABLE BIT(0)
+#define HSM_MIN_CLOCK_FREQ 120000000
#define CEC_CLOCK_FREQ 40000
#define HDMI_14_MAX_TMDS_CLK (340 * 1000 * 1000)
@@ -117,6 +118,10 @@ static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
static void vc4_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+
HDMI_WRITE(HDMI_M_CTL, VC4_HD_M_SW_RST);
udelay(1);
HDMI_WRITE(HDMI_M_CTL, 0);
@@ -128,24 +133,36 @@ static void vc4_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
VC4_HDMI_SW_RESET_FORMAT_DETECT);
HDMI_WRITE(HDMI_SW_RESET_CONTROL, 0);
+
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
static void vc5_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
{
+ unsigned long flags;
+
reset_control_reset(vc4_hdmi->reset);
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+
HDMI_WRITE(HDMI_DVP_CTL, 0);
HDMI_WRITE(HDMI_CLOCK_STOP,
HDMI_READ(HDMI_CLOCK_STOP) | VC4_DVP_HT_CLOCK_STOP_PIXEL);
+
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
#ifdef CONFIG_DRM_VC4_HDMI_CEC
static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi)
{
+ unsigned long cec_rate = clk_get_rate(vc4_hdmi->cec_clock);
+ unsigned long flags;
u16 clk_cnt;
u32 value;
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+
value = HDMI_READ(HDMI_CEC_CNTRL_1);
value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK;
@@ -153,27 +170,41 @@ static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi)
* Set the clock divider: the hsm_clock rate and this divider
* setting will give a 40 kHz CEC clock.
*/
- clk_cnt = clk_get_rate(vc4_hdmi->cec_clock) / CEC_CLOCK_FREQ;
+ clk_cnt = cec_rate / CEC_CLOCK_FREQ;
value |= clk_cnt << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT;
HDMI_WRITE(HDMI_CEC_CNTRL_1, value);
+
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
#else
static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) {}
#endif
+static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder);
+
static enum drm_connector_status
vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
bool connected = false;
- if (vc4_hdmi->hpd_gpio &&
- gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) {
- connected = true;
- } else if (drm_probe_ddc(vc4_hdmi->ddc)) {
- connected = true;
- } else if (HDMI_READ(HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED) {
- connected = true;
+ mutex_lock(&vc4_hdmi->mutex);
+
+ WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
+
+ if (vc4_hdmi->hpd_gpio) {
+ if (gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio))
+ connected = true;
+ } else {
+ unsigned long flags;
+ u32 hotplug;
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+ hotplug = HDMI_READ(HDMI_HOTPLUG);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ if (hotplug & VC4_HDMI_HOTPLUG_CONNECTED)
+ connected = true;
}
if (connected) {
@@ -187,10 +218,15 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
}
}
+ vc4_hdmi_enable_scrambling(&vc4_hdmi->encoder.base.base);
+ pm_runtime_put(&vc4_hdmi->pdev->dev);
+ mutex_unlock(&vc4_hdmi->mutex);
return connector_status_connected;
}
cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
+ pm_runtime_put(&vc4_hdmi->pdev->dev);
+ mutex_unlock(&vc4_hdmi->mutex);
return connector_status_disconnected;
}
@@ -207,10 +243,14 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
int ret = 0;
struct edid *edid;
+ mutex_lock(&vc4_hdmi->mutex);
+
edid = drm_get_edid(connector, vc4_hdmi->ddc);
cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
- if (!edid)
- return -ENODEV;
+ if (!edid) {
+ ret = -ENODEV;
+ goto out;
+ }
vc4_encoder->hdmi_monitor = drm_detect_hdmi_monitor(edid);
@@ -230,6 +270,9 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
}
}
+out:
+ mutex_unlock(&vc4_hdmi->mutex);
+
return ret;
}
@@ -364,9 +407,12 @@ static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
u32 packet_id = type - 0x80;
+ unsigned long flags;
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_RAM_PACKET_CONFIG,
HDMI_READ(HDMI_RAM_PACKET_CONFIG) & ~BIT(packet_id));
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
if (!poll)
return 0;
@@ -386,6 +432,7 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
void __iomem *base = __vc4_hdmi_get_field_base(vc4_hdmi,
ram_packet_start->reg);
uint8_t buffer[VC4_HDMI_PACKET_STRIDE];
+ unsigned long flags;
ssize_t len, i;
int ret;
@@ -403,6 +450,8 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
return;
}
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+
for (i = 0; i < len; i += 7) {
writel(buffer[i + 0] << 0 |
buffer[i + 1] << 8 |
@@ -420,6 +469,9 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
HDMI_WRITE(HDMI_RAM_PACKET_CONFIG,
HDMI_READ(HDMI_RAM_PACKET_CONFIG) | BIT(packet_id));
+
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
ret = wait_for((HDMI_READ(HDMI_RAM_PACKET_STATUS) &
BIT(packet_id)), 100);
if (ret)
@@ -432,11 +484,12 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_connector_state *cstate = connector->state;
- struct drm_crtc *crtc = encoder->crtc;
- const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
union hdmi_infoframe frame;
int ret;
+ lockdep_assert_held(&vc4_hdmi->mutex);
+
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
connector, mode);
if (ret < 0) {
@@ -488,6 +541,8 @@ static void vc4_hdmi_set_hdr_infoframe(struct drm_encoder *encoder)
struct drm_connector_state *conn_state = connector->state;
union hdmi_infoframe frame;
+ lockdep_assert_held(&vc4_hdmi->mutex);
+
if (!vc4_hdmi->variant->supports_hdr)
return;
@@ -504,6 +559,8 @@ static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ lockdep_assert_held(&vc4_hdmi->mutex);
+
vc4_hdmi_set_avi_infoframe(encoder);
vc4_hdmi_set_spd_infoframe(encoder);
/*
@@ -523,6 +580,8 @@ static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder,
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct drm_display_info *display = &vc4_hdmi->connector.display_info;
+ lockdep_assert_held(&vc4_hdmi->mutex);
+
if (!vc4_encoder->hdmi_monitor)
return false;
@@ -537,8 +596,11 @@ static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder,
static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
{
- struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
+ unsigned long flags;
+
+ lockdep_assert_held(&vc4_hdmi->mutex);
if (!vc4_hdmi_supports_scrambling(encoder, mode))
return;
@@ -549,8 +611,12 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
drm_scdc_set_high_tmds_clock_ratio(vc4_hdmi->ddc, true);
drm_scdc_set_scrambling(vc4_hdmi->ddc, true);
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_SCRAMBLER_CTL, HDMI_READ(HDMI_SCRAMBLER_CTL) |
VC5_HDMI_SCRAMBLER_CTL_ENABLE);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ vc4_hdmi->scdc_enabled = true;
queue_delayed_work(system_wq, &vc4_hdmi->scrambling_work,
msecs_to_jiffies(SCRAMBLING_POLLING_DELAY_MS));
@@ -559,24 +625,22 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- struct drm_crtc *crtc = encoder->crtc;
+ unsigned long flags;
- /*
- * At boot, encoder->crtc will be NULL. Since we don't know the
- * state of the scrambler and in order to avoid any
- * inconsistency, let's disable it all the time.
- */
- if (crtc && !vc4_hdmi_supports_scrambling(encoder, &crtc->mode))
- return;
+ lockdep_assert_held(&vc4_hdmi->mutex);
- if (crtc && !vc4_hdmi_mode_needs_scrambling(&crtc->mode))
+ if (!vc4_hdmi->scdc_enabled)
return;
+ vc4_hdmi->scdc_enabled = false;
+
if (delayed_work_pending(&vc4_hdmi->scrambling_work))
cancel_delayed_work_sync(&vc4_hdmi->scrambling_work);
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_SCRAMBLER_CTL, HDMI_READ(HDMI_SCRAMBLER_CTL) &
~VC5_HDMI_SCRAMBLER_CTL_ENABLE);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
drm_scdc_set_scrambling(vc4_hdmi->ddc, false);
drm_scdc_set_high_tmds_clock_ratio(vc4_hdmi->ddc, false);
@@ -602,47 +666,73 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ unsigned long flags;
+
+ mutex_lock(&vc4_hdmi->mutex);
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, 0);
HDMI_WRITE(HDMI_VID_CTL, HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_CLRRGB);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
mdelay(1);
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_VID_CTL,
HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
vc4_hdmi_disable_scrambling(encoder);
+
+ mutex_unlock(&vc4_hdmi->mutex);
}
static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ unsigned long flags;
int ret;
+ mutex_lock(&vc4_hdmi->mutex);
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_VID_CTL,
HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_BLANKPIX);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
if (vc4_hdmi->variant->phy_disable)
vc4_hdmi->variant->phy_disable(vc4_hdmi);
clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
if (ret < 0)
DRM_ERROR("Failed to release power domain: %d\n", ret);
+
+ mutex_unlock(&vc4_hdmi->mutex);
}
static void vc4_hdmi_encoder_disable(struct drm_encoder *encoder)
{
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+
+ mutex_lock(&vc4_hdmi->mutex);
+ vc4_hdmi->output_enabled = false;
+ mutex_unlock(&vc4_hdmi->mutex);
}
static void vc4_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable)
{
+ unsigned long flags;
u32 csc_ctl;
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+
csc_ctl = VC4_SET_FIELD(VC4_HD_CSC_CTL_ORDER_BGR,
VC4_HD_CSC_CTL_ORDER);
@@ -672,14 +762,19 @@ static void vc4_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable)
/* The RGB order applies even when CSC is disabled. */
HDMI_WRITE(HDMI_CSC_CTL, csc_ctl);
+
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable)
{
+ unsigned long flags;
u32 csc_ctl;
csc_ctl = 0x07; /* RGB_CONVERT_MODE = custom matrix, || USE_RGB_TO_YCBCR */
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+
if (enable) {
/* CEA VICs other than #1 requre limited range RGB
* output unless overridden by an AVI infoframe.
@@ -711,6 +806,8 @@ static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable)
}
HDMI_WRITE(HDMI_CSC_CTL, csc_ctl);
+
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
@@ -734,6 +831,9 @@ static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
mode->crtc_vsync_end -
interlaced,
VC4_HDMI_VERTB_VBP));
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_HORZA,
(vsync_pos ? VC4_HDMI_HORZA_VPOS : 0) |
@@ -757,6 +857,8 @@ static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_VERTB0, vertb_even);
HDMI_WRITE(HDMI_VERTB1, vertb);
+
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
@@ -780,10 +882,13 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
mode->crtc_vsync_end -
interlaced,
VC4_HDMI_VERTB_VBP));
+ unsigned long flags;
unsigned char gcp;
bool gcp_en;
u32 reg;
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+
HDMI_WRITE(HDMI_VEC_INTERFACE_XBAR, 0x354021);
HDMI_WRITE(HDMI_HORZA,
(vsync_pos ? VC5_HDMI_HORZA_VPOS : 0) |
@@ -842,13 +947,18 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_GCP_CONFIG, reg);
HDMI_WRITE(HDMI_CLOCK_STOP, 0);
+
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi)
{
+ unsigned long flags;
u32 drift;
int ret;
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+
drift = HDMI_READ(HDMI_FIFO_CTL);
drift &= VC4_HDMI_FIFO_VALID_WRITE_MASK;
@@ -856,12 +966,20 @@ static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi)
drift & ~VC4_HDMI_FIFO_CTL_RECENTER);
HDMI_WRITE(HDMI_FIFO_CTL,
drift | VC4_HDMI_FIFO_CTL_RECENTER);
+
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
usleep_range(1000, 1100);
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+
HDMI_WRITE(HDMI_FIFO_CTL,
drift & ~VC4_HDMI_FIFO_CTL_RECENTER);
HDMI_WRITE(HDMI_FIFO_CTL,
drift | VC4_HDMI_FIFO_CTL_RECENTER);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
ret = wait_for(HDMI_READ(HDMI_FIFO_CTL) &
VC4_HDMI_FIFO_CTL_RECENTER_DONE, 1);
WARN_ONCE(ret, "Timeout waiting for "
@@ -891,29 +1009,14 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
vc4_hdmi_encoder_get_connector_state(encoder, state);
struct vc4_hdmi_connector_state *vc4_conn_state =
conn_state_to_vc4_hdmi_conn_state(conn_state);
- struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- unsigned long bvb_rate, pixel_rate, hsm_rate;
+ struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
+ unsigned long pixel_rate = vc4_conn_state->pixel_rate;
+ unsigned long bvb_rate, hsm_rate;
+ unsigned long flags;
int ret;
- ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
- if (ret < 0) {
- DRM_ERROR("Failed to retain power domain: %d\n", ret);
- return;
- }
-
- pixel_rate = vc4_conn_state->pixel_rate;
- ret = clk_set_rate(vc4_hdmi->pixel_clock, pixel_rate);
- if (ret) {
- DRM_ERROR("Failed to set pixel clock rate: %d\n", ret);
- return;
- }
-
- ret = clk_prepare_enable(vc4_hdmi->pixel_clock);
- if (ret) {
- DRM_ERROR("Failed to turn on pixel clock: %d\n", ret);
- return;
- }
+ mutex_lock(&vc4_hdmi->mutex);
/*
* As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must
@@ -935,16 +1038,28 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = clk_set_min_rate(vc4_hdmi->hsm_clock, hsm_rate);
if (ret) {
DRM_ERROR("Failed to set HSM clock rate: %d\n", ret);
- return;
+ goto out;
}
- ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
+ ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
+ if (ret < 0) {
+ DRM_ERROR("Failed to retain power domain: %d\n", ret);
+ goto out;
+ }
+
+ ret = clk_set_rate(vc4_hdmi->pixel_clock, pixel_rate);
if (ret) {
- DRM_ERROR("Failed to turn on HSM clock: %d\n", ret);
- clk_disable_unprepare(vc4_hdmi->pixel_clock);
- return;
+ DRM_ERROR("Failed to set pixel clock rate: %d\n", ret);
+ goto err_put_runtime_pm;
}
+ ret = clk_prepare_enable(vc4_hdmi->pixel_clock);
+ if (ret) {
+ DRM_ERROR("Failed to turn on pixel clock: %d\n", ret);
+ goto err_put_runtime_pm;
+ }
+
+
vc4_hdmi_cec_update_clk_div(vc4_hdmi);
if (pixel_rate > 297000000)
@@ -957,37 +1072,52 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate);
if (ret) {
DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
- clk_disable_unprepare(vc4_hdmi->pixel_clock);
- return;
+ goto err_disable_pixel_clock;
}
ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
if (ret) {
DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
- clk_disable_unprepare(vc4_hdmi->pixel_clock);
- return;
+ goto err_disable_pixel_clock;
}
if (vc4_hdmi->variant->phy_init)
vc4_hdmi->variant->phy_init(vc4_hdmi, vc4_conn_state);
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+
HDMI_WRITE(HDMI_SCHEDULER_CONTROL,
HDMI_READ(HDMI_SCHEDULER_CONTROL) |
VC4_HDMI_SCHEDULER_CONTROL_MANUAL_FORMAT |
VC4_HDMI_SCHEDULER_CONTROL_IGNORE_VSYNC_PREDICTS);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
if (vc4_hdmi->variant->set_timings)
vc4_hdmi->variant->set_timings(vc4_hdmi, conn_state, mode);
+
+ mutex_unlock(&vc4_hdmi->mutex);
+
+ return;
+
+err_disable_pixel_clock:
+ clk_disable_unprepare(vc4_hdmi->pixel_clock);
+err_put_runtime_pm:
+ pm_runtime_put(&vc4_hdmi->pdev->dev);
+out:
+ mutex_unlock(&vc4_hdmi->mutex);
+ return;
}
static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
- struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
- struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
+ struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
+ unsigned long flags;
+
+ mutex_lock(&vc4_hdmi->mutex);
if (vc4_encoder->hdmi_monitor &&
drm_default_rgb_quant_range(mode) == HDMI_QUANTIZATION_RANGE_LIMITED) {
@@ -1002,19 +1132,28 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
vc4_encoder->limited_rgb_range = false;
}
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ mutex_unlock(&vc4_hdmi->mutex);
}
static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
- struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC;
+ unsigned long flags;
int ret;
+ mutex_lock(&vc4_hdmi->mutex);
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+
HDMI_WRITE(HDMI_VID_CTL,
VC4_HD_VID_CTL_ENABLE |
VC4_HD_VID_CTL_CLRRGB |
@@ -1031,6 +1170,8 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
HDMI_READ(HDMI_SCHEDULER_CONTROL) |
VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
ret = wait_for(HDMI_READ(HDMI_SCHEDULER_CONTROL) &
VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE, 1000);
WARN_ONCE(ret, "Timeout waiting for "
@@ -1043,6 +1184,8 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
HDMI_READ(HDMI_SCHEDULER_CONTROL) &
~VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
ret = wait_for(!(HDMI_READ(HDMI_SCHEDULER_CONTROL) &
VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE), 1000);
WARN_ONCE(ret, "Timeout waiting for "
@@ -1050,6 +1193,8 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
}
if (vc4_encoder->hdmi_monitor) {
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+
WARN_ON(!(HDMI_READ(HDMI_SCHEDULER_CONTROL) &
VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE));
HDMI_WRITE(HDMI_SCHEDULER_CONTROL,
@@ -1059,15 +1204,37 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
HDMI_WRITE(HDMI_RAM_PACKET_CONFIG,
VC4_HDMI_RAM_PACKET_ENABLE);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
vc4_hdmi_set_infoframes(encoder);
}
vc4_hdmi_recenter_fifo(vc4_hdmi);
vc4_hdmi_enable_scrambling(encoder);
+
+ mutex_unlock(&vc4_hdmi->mutex);
}
static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
{
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+
+ mutex_lock(&vc4_hdmi->mutex);
+ vc4_hdmi->output_enabled = true;
+ mutex_unlock(&vc4_hdmi->mutex);
+}
+
+static void vc4_hdmi_encoder_atomic_mode_set(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+
+ mutex_lock(&vc4_hdmi->mutex);
+ memcpy(&vc4_hdmi->saved_adjusted_mode,
+ &crtc_state->adjusted_mode,
+ sizeof(vc4_hdmi->saved_adjusted_mode));
+ mutex_unlock(&vc4_hdmi->mutex);
}
#define WIFI_2_4GHz_CH1_MIN_FREQ 2400000000ULL
@@ -1146,6 +1313,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = {
.atomic_check = vc4_hdmi_encoder_atomic_check,
+ .atomic_mode_set = vc4_hdmi_encoder_atomic_mode_set,
.mode_valid = vc4_hdmi_encoder_mode_valid,
.disable = vc4_hdmi_encoder_disable,
.enable = vc4_hdmi_encoder_enable,
@@ -1180,6 +1348,7 @@ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
unsigned int samplerate)
{
u32 hsm_clock = clk_get_rate(vc4_hdmi->audio_clock);
+ unsigned long flags;
unsigned long n, m;
rational_best_approximation(hsm_clock, samplerate,
@@ -1189,19 +1358,22 @@ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
VC4_HD_MAI_SMP_M_SHIFT) + 1,
&n, &m);
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_MAI_SMP,
VC4_SET_FIELD(n, VC4_HD_MAI_SMP_N) |
VC4_SET_FIELD(m - 1, VC4_HD_MAI_SMP_M));
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate)
{
- struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
- struct drm_crtc *crtc = encoder->crtc;
- const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
u32 n, cts;
u64 tmp;
+ lockdep_assert_held(&vc4_hdmi->mutex);
+ lockdep_assert_held(&vc4_hdmi->hw_lock);
+
n = 128 * samplerate / 1000;
tmp = (u64)(mode->clock * 1000) * n;
do_div(tmp, 128 * samplerate);
@@ -1227,31 +1399,54 @@ static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai)
return snd_soc_card_get_drvdata(card);
}
-static int vc4_hdmi_audio_startup(struct device *dev, void *data)
+static bool vc4_hdmi_audio_can_stream(struct vc4_hdmi *vc4_hdmi)
{
- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
- struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
+ lockdep_assert_held(&vc4_hdmi->mutex);
+
+ /*
+ * If the controller is disabled, prevent any ALSA output.
+ */
+ if (!vc4_hdmi->output_enabled)
+ return false;
/*
- * If the HDMI encoder hasn't probed, or the encoder is
- * currently in DVI mode, treat the codec dai as missing.
+ * If the encoder is currently in DVI mode, treat the codec DAI
+ * as missing.
*/
- if (!encoder->crtc || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
- VC4_HDMI_RAM_PACKET_ENABLE))
+ if (!(HDMI_READ(HDMI_RAM_PACKET_CONFIG) & VC4_HDMI_RAM_PACKET_ENABLE))
+ return false;
+
+ return true;
+}
+
+static int vc4_hdmi_audio_startup(struct device *dev, void *data)
+{
+ struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ mutex_lock(&vc4_hdmi->mutex);
+
+ if (!vc4_hdmi_audio_can_stream(vc4_hdmi)) {
+ mutex_unlock(&vc4_hdmi->mutex);
return -ENODEV;
+ }
vc4_hdmi->audio.streaming = true;
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_MAI_CTL,
VC4_HD_MAI_CTL_RESET |
VC4_HD_MAI_CTL_FLUSH |
VC4_HD_MAI_CTL_DLATE |
VC4_HD_MAI_CTL_ERRORE |
VC4_HD_MAI_CTL_ERRORF);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
if (vc4_hdmi->variant->phy_rng_enable)
vc4_hdmi->variant->phy_rng_enable(vc4_hdmi);
+ mutex_unlock(&vc4_hdmi->mutex);
+
return 0;
}
@@ -1259,32 +1454,48 @@ static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi)
{
struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
struct device *dev = &vc4_hdmi->pdev->dev;
+ unsigned long flags;
int ret;
+ lockdep_assert_held(&vc4_hdmi->mutex);
+
vc4_hdmi->audio.streaming = false;
ret = vc4_hdmi_stop_packet(encoder, HDMI_INFOFRAME_TYPE_AUDIO, false);
if (ret)
dev_err(dev, "Failed to stop audio infoframe: %d\n", ret);
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+
HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_RESET);
HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_ERRORF);
HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_FLUSH);
+
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
static void vc4_hdmi_audio_shutdown(struct device *dev, void *data)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ mutex_lock(&vc4_hdmi->mutex);
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_MAI_CTL,
VC4_HD_MAI_CTL_DLATE |
VC4_HD_MAI_CTL_ERRORE |
VC4_HD_MAI_CTL_ERRORF);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
if (vc4_hdmi->variant->phy_rng_disable)
vc4_hdmi->variant->phy_rng_disable(vc4_hdmi);
vc4_hdmi->audio.streaming = false;
vc4_hdmi_audio_reset(vc4_hdmi);
+
+ mutex_unlock(&vc4_hdmi->mutex);
}
static int sample_rate_to_mai_fmt(int samplerate)
@@ -1334,6 +1545,7 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
unsigned int sample_rate = params->sample_rate;
unsigned int channels = params->channels;
+ unsigned long flags;
u32 audio_packet_config, channel_mask;
u32 channel_map;
u32 mai_audio_format;
@@ -1342,14 +1554,22 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
dev_dbg(dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
sample_rate, params->sample_width, channels);
+ mutex_lock(&vc4_hdmi->mutex);
+
+ if (!vc4_hdmi_audio_can_stream(vc4_hdmi)) {
+ mutex_unlock(&vc4_hdmi->mutex);
+ return -EINVAL;
+ }
+
+ vc4_hdmi_audio_set_mai_clock(vc4_hdmi, sample_rate);
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_MAI_CTL,
VC4_SET_FIELD(channels, VC4_HD_MAI_CTL_CHNUM) |
VC4_HD_MAI_CTL_WHOLSMP |
VC4_HD_MAI_CTL_CHALIGN |
VC4_HD_MAI_CTL_ENABLE);
- vc4_hdmi_audio_set_mai_clock(vc4_hdmi, sample_rate);
-
mai_sample_rate = sample_rate_to_mai_fmt(sample_rate);
if (params->iec.status[0] & IEC958_AES0_NONAUDIO &&
params->channels == 8)
@@ -1387,11 +1607,16 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
channel_map = vc4_hdmi->variant->channel_map(vc4_hdmi, channel_mask);
HDMI_WRITE(HDMI_MAI_CHANNEL_MAP, channel_map);
HDMI_WRITE(HDMI_AUDIO_PACKET_CONFIG, audio_packet_config);
+
vc4_hdmi_set_n_cts(vc4_hdmi, sample_rate);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
memcpy(&vc4_hdmi->audio.infoframe, &params->cea, sizeof(params->cea));
vc4_hdmi_set_audio_infoframe(encoder);
+ mutex_unlock(&vc4_hdmi->mutex);
+
return 0;
}
@@ -1434,7 +1659,9 @@ static int vc4_hdmi_audio_get_eld(struct device *dev, void *data,
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
struct drm_connector *connector = &vc4_hdmi->connector;
+ mutex_lock(&vc4_hdmi->mutex);
memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
+ mutex_unlock(&vc4_hdmi->mutex);
return 0;
}
@@ -1656,6 +1883,8 @@ static void vc4_cec_read_msg(struct vc4_hdmi *vc4_hdmi, u32 cntrl1)
struct cec_msg *msg = &vc4_hdmi->cec_rx_msg;
unsigned int i;
+ lockdep_assert_held(&vc4_hdmi->hw_lock);
+
msg->len = 1 + ((cntrl1 & VC4_HDMI_CEC_REC_WRD_CNT_MASK) >>
VC4_HDMI_CEC_REC_WRD_CNT_SHIFT);
@@ -1674,11 +1903,12 @@ static void vc4_cec_read_msg(struct vc4_hdmi *vc4_hdmi, u32 cntrl1)
}
}
-static irqreturn_t vc4_cec_irq_handler_tx_bare(int irq, void *priv)
+static irqreturn_t vc4_cec_irq_handler_tx_bare_locked(struct vc4_hdmi *vc4_hdmi)
{
- struct vc4_hdmi *vc4_hdmi = priv;
u32 cntrl1;
+ lockdep_assert_held(&vc4_hdmi->hw_lock);
+
cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1);
vc4_hdmi->cec_tx_ok = cntrl1 & VC4_HDMI_CEC_TX_STATUS_GOOD;
cntrl1 &= ~VC4_HDMI_CEC_START_XMIT_BEGIN;
@@ -1687,11 +1917,24 @@ static irqreturn_t vc4_cec_irq_handler_tx_bare(int irq, void *priv)
return IRQ_WAKE_THREAD;
}
-static irqreturn_t vc4_cec_irq_handler_rx_bare(int irq, void *priv)
+static irqreturn_t vc4_cec_irq_handler_tx_bare(int irq, void *priv)
{
struct vc4_hdmi *vc4_hdmi = priv;
+ irqreturn_t ret;
+
+ spin_lock(&vc4_hdmi->hw_lock);
+ ret = vc4_cec_irq_handler_tx_bare_locked(vc4_hdmi);
+ spin_unlock(&vc4_hdmi->hw_lock);
+
+ return ret;
+}
+
+static irqreturn_t vc4_cec_irq_handler_rx_bare_locked(struct vc4_hdmi *vc4_hdmi)
+{
u32 cntrl1;
+ lockdep_assert_held(&vc4_hdmi->hw_lock);
+
vc4_hdmi->cec_rx_msg.len = 0;
cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1);
vc4_cec_read_msg(vc4_hdmi, cntrl1);
@@ -1704,6 +1947,18 @@ static irqreturn_t vc4_cec_irq_handler_rx_bare(int irq, void *priv)
return IRQ_WAKE_THREAD;
}
+static irqreturn_t vc4_cec_irq_handler_rx_bare(int irq, void *priv)
+{
+ struct vc4_hdmi *vc4_hdmi = priv;
+ irqreturn_t ret;
+
+ spin_lock(&vc4_hdmi->hw_lock);
+ ret = vc4_cec_irq_handler_rx_bare_locked(vc4_hdmi);
+ spin_unlock(&vc4_hdmi->hw_lock);
+
+ return ret;
+}
+
static irqreturn_t vc4_cec_irq_handler(int irq, void *priv)
{
struct vc4_hdmi *vc4_hdmi = priv;
@@ -1714,69 +1969,142 @@ static irqreturn_t vc4_cec_irq_handler(int irq, void *priv)
if (!(stat & VC4_HDMI_CPU_CEC))
return IRQ_NONE;
+ spin_lock(&vc4_hdmi->hw_lock);
cntrl5 = HDMI_READ(HDMI_CEC_CNTRL_5);
vc4_hdmi->cec_irq_was_rx = cntrl5 & VC4_HDMI_CEC_RX_CEC_INT;
if (vc4_hdmi->cec_irq_was_rx)
- ret = vc4_cec_irq_handler_rx_bare(irq, priv);
+ ret = vc4_cec_irq_handler_rx_bare_locked(vc4_hdmi);
else
- ret = vc4_cec_irq_handler_tx_bare(irq, priv);
+ ret = vc4_cec_irq_handler_tx_bare_locked(vc4_hdmi);
HDMI_WRITE(HDMI_CEC_CPU_CLEAR, VC4_HDMI_CPU_CEC);
+ spin_unlock(&vc4_hdmi->hw_lock);
+
return ret;
}
-static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
+static int vc4_hdmi_cec_enable(struct cec_adapter *adap)
{
struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
/* clock period in microseconds */
const u32 usecs = 1000000 / CEC_CLOCK_FREQ;
- u32 val = HDMI_READ(HDMI_CEC_CNTRL_5);
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ /*
+ * NOTE: This function should really take vc4_hdmi->mutex, but doing so
+ * results in a reentrancy since cec_s_phys_addr_from_edid() called in
+ * .detect or .get_modes might call .adap_enable, which leads to this
+ * function being called with that mutex held.
+ *
+ * Concurrency is not an issue for the moment since we don't share any
+ * state with KMS, so we can ignore the lock for now, but we need to
+ * keep it in mind if we were to change that assumption.
+ */
+ ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+
+ val = HDMI_READ(HDMI_CEC_CNTRL_5);
val &= ~(VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET |
VC4_HDMI_CEC_CNT_TO_4700_US_MASK |
VC4_HDMI_CEC_CNT_TO_4500_US_MASK);
val |= ((4700 / usecs) << VC4_HDMI_CEC_CNT_TO_4700_US_SHIFT) |
((4500 / usecs) << VC4_HDMI_CEC_CNT_TO_4500_US_SHIFT);
- if (enable) {
- HDMI_WRITE(HDMI_CEC_CNTRL_5, val |
- VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET);
- HDMI_WRITE(HDMI_CEC_CNTRL_5, val);
- HDMI_WRITE(HDMI_CEC_CNTRL_2,
- ((1500 / usecs) << VC4_HDMI_CEC_CNT_TO_1500_US_SHIFT) |
- ((1300 / usecs) << VC4_HDMI_CEC_CNT_TO_1300_US_SHIFT) |
- ((800 / usecs) << VC4_HDMI_CEC_CNT_TO_800_US_SHIFT) |
- ((600 / usecs) << VC4_HDMI_CEC_CNT_TO_600_US_SHIFT) |
- ((400 / usecs) << VC4_HDMI_CEC_CNT_TO_400_US_SHIFT));
- HDMI_WRITE(HDMI_CEC_CNTRL_3,
- ((2750 / usecs) << VC4_HDMI_CEC_CNT_TO_2750_US_SHIFT) |
- ((2400 / usecs) << VC4_HDMI_CEC_CNT_TO_2400_US_SHIFT) |
- ((2050 / usecs) << VC4_HDMI_CEC_CNT_TO_2050_US_SHIFT) |
- ((1700 / usecs) << VC4_HDMI_CEC_CNT_TO_1700_US_SHIFT));
- HDMI_WRITE(HDMI_CEC_CNTRL_4,
- ((4300 / usecs) << VC4_HDMI_CEC_CNT_TO_4300_US_SHIFT) |
- ((3900 / usecs) << VC4_HDMI_CEC_CNT_TO_3900_US_SHIFT) |
- ((3600 / usecs) << VC4_HDMI_CEC_CNT_TO_3600_US_SHIFT) |
- ((3500 / usecs) << VC4_HDMI_CEC_CNT_TO_3500_US_SHIFT));
-
- if (!vc4_hdmi->variant->external_irq_controller)
- HDMI_WRITE(HDMI_CEC_CPU_MASK_CLEAR, VC4_HDMI_CPU_CEC);
- } else {
- if (!vc4_hdmi->variant->external_irq_controller)
- HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, VC4_HDMI_CPU_CEC);
- HDMI_WRITE(HDMI_CEC_CNTRL_5, val |
- VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET);
- }
+ HDMI_WRITE(HDMI_CEC_CNTRL_5, val |
+ VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET);
+ HDMI_WRITE(HDMI_CEC_CNTRL_5, val);
+ HDMI_WRITE(HDMI_CEC_CNTRL_2,
+ ((1500 / usecs) << VC4_HDMI_CEC_CNT_TO_1500_US_SHIFT) |
+ ((1300 / usecs) << VC4_HDMI_CEC_CNT_TO_1300_US_SHIFT) |
+ ((800 / usecs) << VC4_HDMI_CEC_CNT_TO_800_US_SHIFT) |
+ ((600 / usecs) << VC4_HDMI_CEC_CNT_TO_600_US_SHIFT) |
+ ((400 / usecs) << VC4_HDMI_CEC_CNT_TO_400_US_SHIFT));
+ HDMI_WRITE(HDMI_CEC_CNTRL_3,
+ ((2750 / usecs) << VC4_HDMI_CEC_CNT_TO_2750_US_SHIFT) |
+ ((2400 / usecs) << VC4_HDMI_CEC_CNT_TO_2400_US_SHIFT) |
+ ((2050 / usecs) << VC4_HDMI_CEC_CNT_TO_2050_US_SHIFT) |
+ ((1700 / usecs) << VC4_HDMI_CEC_CNT_TO_1700_US_SHIFT));
+ HDMI_WRITE(HDMI_CEC_CNTRL_4,
+ ((4300 / usecs) << VC4_HDMI_CEC_CNT_TO_4300_US_SHIFT) |
+ ((3900 / usecs) << VC4_HDMI_CEC_CNT_TO_3900_US_SHIFT) |
+ ((3600 / usecs) << VC4_HDMI_CEC_CNT_TO_3600_US_SHIFT) |
+ ((3500 / usecs) << VC4_HDMI_CEC_CNT_TO_3500_US_SHIFT));
+
+ if (!vc4_hdmi->variant->external_irq_controller)
+ HDMI_WRITE(HDMI_CEC_CPU_MASK_CLEAR, VC4_HDMI_CPU_CEC);
+
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
return 0;
}
+static int vc4_hdmi_cec_disable(struct cec_adapter *adap)
+{
+ struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ unsigned long flags;
+
+ /*
+ * NOTE: This function should really take vc4_hdmi->mutex, but doing so
+ * results in a reentrancy since cec_s_phys_addr_from_edid() called in
+ * .detect or .get_modes might call .adap_enable, which leads to this
+ * function being called with that mutex held.
+ *
+ * Concurrency is not an issue for the moment since we don't share any
+ * state with KMS, so we can ignore the lock for now, but we need to
+ * keep it in mind if we were to change that assumption.
+ */
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+
+ if (!vc4_hdmi->variant->external_irq_controller)
+ HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, VC4_HDMI_CPU_CEC);
+
+ HDMI_WRITE(HDMI_CEC_CNTRL_5, HDMI_READ(HDMI_CEC_CNTRL_5) |
+ VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET);
+
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ pm_runtime_put(&vc4_hdmi->pdev->dev);
+
+ return 0;
+}
+
+static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ if (enable)
+ return vc4_hdmi_cec_enable(adap);
+ else
+ return vc4_hdmi_cec_disable(adap);
+}
+
static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
{
struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ unsigned long flags;
+
+ /*
+ * NOTE: This function should really take vc4_hdmi->mutex, but doing so
+ * results in a reentrancy since cec_s_phys_addr_from_edid() called in
+ * .detect or .get_modes might call .adap_enable, which leads to this
+ * function being called with that mutex held.
+ *
+ * Concurrency is not an issue for the moment since we don't share any
+ * state with KMS, so we can ignore the lock for now, but we need to
+ * keep it in mind if we were to change that assumption.
+ */
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_CEC_CNTRL_1,
(HDMI_READ(HDMI_CEC_CNTRL_1) & ~VC4_HDMI_CEC_ADDR_MASK) |
(log_addr & 0xf) << VC4_HDMI_CEC_ADDR_SHIFT);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
return 0;
}
@@ -1785,14 +2113,28 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
{
struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
struct drm_device *dev = vc4_hdmi->connector.dev;
+ unsigned long flags;
u32 val;
unsigned int i;
+ /*
+ * NOTE: This function should really take vc4_hdmi->mutex, but doing so
+ * results in a reentrancy since cec_s_phys_addr_from_edid() called in
+ * .detect or .get_modes might call .adap_enable, which leads to this
+ * function being called with that mutex held.
+ *
+ * Concurrency is not an issue for the moment since we don't share any
+ * state with KMS, so we can ignore the lock for now, but we need to
+ * keep it in mind if we were to change that assumption.
+ */
+
if (msg->len > 16) {
drm_err(dev, "Attempting to transmit too much data (%d)\n", msg->len);
return -ENOMEM;
}
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+
for (i = 0; i < msg->len; i += 4)
HDMI_WRITE(HDMI_CEC_TX_DATA_1 + (i >> 2),
(msg->msg[i]) |
@@ -1808,6 +2150,9 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
val |= VC4_HDMI_CEC_START_XMIT_BEGIN;
HDMI_WRITE(HDMI_CEC_CNTRL_1, val);
+
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
return 0;
}
@@ -1822,6 +2167,7 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
struct cec_connector_info conn_info;
struct platform_device *pdev = vc4_hdmi->pdev;
struct device *dev = &pdev->dev;
+ unsigned long flags;
u32 value;
int ret;
@@ -1841,10 +2187,12 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
cec_fill_conn_info_from_drm(&conn_info, &vc4_hdmi->connector);
cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info);
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
value = HDMI_READ(HDMI_CEC_CNTRL_1);
/* Set the logical address to Unregistered */
value |= VC4_HDMI_CEC_ADDR_MASK;
HDMI_WRITE(HDMI_CEC_CNTRL_1, value);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
vc4_hdmi_cec_update_clk_div(vc4_hdmi);
@@ -1863,7 +2211,9 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
if (ret)
goto err_remove_cec_rx_handler;
} else {
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
ret = request_threaded_irq(platform_get_irq(pdev, 0),
vc4_cec_irq_handler,
@@ -2099,6 +2449,27 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return 0;
}
+static int __maybe_unused vc4_hdmi_runtime_suspend(struct device *dev)
+{
+ struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
+
+ return 0;
+}
+
+static int vc4_hdmi_runtime_resume(struct device *dev)
+{
+ struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
{
const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
@@ -2112,6 +2483,8 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
vc4_hdmi = devm_kzalloc(dev, sizeof(*vc4_hdmi), GFP_KERNEL);
if (!vc4_hdmi)
return -ENOMEM;
+ mutex_init(&vc4_hdmi->mutex);
+ spin_lock_init(&vc4_hdmi->hw_lock);
INIT_DELAYED_WORK(&vc4_hdmi->scrambling_work, vc4_hdmi_scrambling_wq);
dev_set_drvdata(dev, vc4_hdmi);
@@ -2125,6 +2498,14 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
vc4_hdmi->pdev = pdev;
vc4_hdmi->variant = variant;
+ /*
+ * Since we don't know the state of the controller and its
+ * display (if any), let's assume it's always enabled.
+ * vc4_hdmi_disable_scrambling() will thus run at boot, make
+ * sure it's disabled, and avoid any inconsistency.
+ */
+ vc4_hdmi->scdc_enabled = true;
+
ret = variant->init_resources(vc4_hdmi);
if (ret)
return ret;
@@ -2162,6 +2543,31 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
vc4_hdmi->disable_4kp60 = true;
}
+ /*
+ * If we boot without any cable connected to the HDMI connector,
+ * the firmware will skip the HSM initialization and leave it
+ * with a rate of 0, resulting in a bus lockup when we're
+ * accessing the registers even if it's enabled.
+ *
+ * Let's put a sensible default at runtime_resume so that we
+ * don't end up in this situation.
+ */
+ ret = clk_set_min_rate(vc4_hdmi->hsm_clock, HSM_MIN_CLOCK_FREQ);
+ if (ret)
+ goto err_put_ddc;
+
+ /*
+ * We need to have the device powered up at this point to call
+ * our reset hook and for the CEC init.
+ */
+ ret = vc4_hdmi_runtime_resume(dev);
+ if (ret)
+ goto err_put_ddc;
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
if (vc4_hdmi->variant->reset)
vc4_hdmi->variant->reset(vc4_hdmi);
@@ -2173,8 +2579,6 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
}
- pm_runtime_enable(dev);
-
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &vc4_hdmi_encoder_helper_funcs);
@@ -2198,6 +2602,8 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
vc4_hdmi_debugfs_regs,
vc4_hdmi);
+ pm_runtime_put_sync(dev);
+
return 0;
err_free_cec:
@@ -2208,6 +2614,7 @@ err_destroy_conn:
vc4_hdmi_connector_destroy(&vc4_hdmi->connector);
err_destroy_encoder:
drm_encoder_cleanup(encoder);
+ pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
err_put_ddc:
put_device(&vc4_hdmi->ddc->dev);
@@ -2294,7 +2701,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
.encoder_type = VC4_ENCODER_TYPE_HDMI0,
.debugfs_name = "hdmi0_regs",
.card_name = "vc4-hdmi-0",
- .max_pixel_clock = HDMI_14_MAX_TMDS_CLK,
+ .max_pixel_clock = 600000000,
.registers = vc5_hdmi_hdmi0_fields,
.num_registers = ARRAY_SIZE(vc5_hdmi_hdmi0_fields),
.phy_lane_mapping = {
@@ -2353,11 +2760,18 @@ static const struct of_device_id vc4_hdmi_dt_match[] = {
{}
};
+static const struct dev_pm_ops vc4_hdmi_pm_ops = {
+ SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend,
+ vc4_hdmi_runtime_resume,
+ NULL)
+};
+
struct platform_driver vc4_hdmi_driver = {
.probe = vc4_hdmi_dev_probe,
.remove = vc4_hdmi_dev_remove,
.driver = {
.name = "vc4_hdmi",
.of_match_table = vc4_hdmi_dt_match,
+ .pm = &vc4_hdmi_pm_ops,
},
};
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index 33e9f665ab8e..36c0b082a43b 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -178,6 +178,43 @@ struct vc4_hdmi {
struct debugfs_regset32 hdmi_regset;
struct debugfs_regset32 hd_regset;
+
+ /**
+ * @hw_lock: Spinlock protecting device register access.
+ */
+ spinlock_t hw_lock;
+
+ /**
+ * @mutex: Mutex protecting the driver access across multiple
+ * frameworks (KMS, ALSA).
+ *
+ * NOTE: While supported, CEC has been left out since
+ * cec_s_phys_addr_from_edid() might call .adap_enable and lead to a
+ * reentrancy issue between .get_modes (or .detect) and .adap_enable.
+ * Since we don't share any state between the CEC hooks and KMS', it's
+ * not a big deal. The only trouble might come from updating the CEC
+ * clock divider which might be affected by a modeset, but CEC should
+ * be resilient to that.
+ */
+ struct mutex mutex;
+
+ /**
+ * @saved_adjusted_mode: Copy of @drm_crtc_state.adjusted_mode
+ * for use by ALSA hooks and interrupt handlers. Protected by @mutex.
+ */
+ struct drm_display_mode saved_adjusted_mode;
+
+ /**
+ * @output_enabled: Is the HDMI controller currently active?
+ * Protected by @mutex.
+ */
+ bool output_enabled;
+
+ /**
+ * @scdc_enabled: Is the HDMI controller currently running with
+ * the scrambler on? Protected by @mutex.
+ */
+ bool scdc_enabled;
};
static inline struct vc4_hdmi *
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
index 36535480f8e2..62148f0dc284 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
@@ -130,31 +130,49 @@
void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
struct vc4_hdmi_connector_state *conn_state)
{
+ unsigned long flags;
+
/* PHY should be in reset, like
* vc4_hdmi_encoder_disable() does.
*/
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+
HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0xf << 16);
HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0);
+
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
void vc4_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0xf << 16);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
void vc4_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_TX_PHY_CTL_0,
HDMI_READ(HDMI_TX_PHY_CTL_0) &
~VC4_HDMI_TX_PHY_RNG_PWRDN);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
void vc4_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_TX_PHY_CTL_0,
HDMI_READ(HDMI_TX_PHY_CTL_0) |
VC4_HDMI_TX_PHY_RNG_PWRDN);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
static unsigned long long
@@ -336,6 +354,8 @@ phy_get_channel_settings(enum vc4_hdmi_phy_channel chan,
static void vc5_hdmi_reset_phy(struct vc4_hdmi *vc4_hdmi)
{
+ lockdep_assert_held(&vc4_hdmi->hw_lock);
+
HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0x0f);
HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, BIT(10));
}
@@ -348,10 +368,13 @@ void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
unsigned long long pixel_freq = conn_state->pixel_rate;
unsigned long long vco_freq;
unsigned char word_sel;
+ unsigned long flags;
u8 vco_sel, vco_div;
vco_freq = phy_get_vco_freq(pixel_freq, &vco_sel, &vco_div);
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+
vc5_hdmi_reset_phy(vc4_hdmi);
HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL,
@@ -501,23 +524,37 @@ void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
HDMI_READ(HDMI_TX_PHY_RESET_CTL) |
VC4_HDMI_TX_PHY_RESET_CTL_PLL_RESETB |
VC4_HDMI_TX_PHY_RESET_CTL_PLLDIV_RESETB);
+
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
void vc5_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
vc5_hdmi_reset_phy(vc4_hdmi);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
void vc5_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL,
HDMI_READ(HDMI_TX_PHY_POWERDOWN_CTL) &
~VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL,
HDMI_READ(HDMI_TX_PHY_POWERDOWN_CTL) |
VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
index 19d2fdc446bc..fc971506bd4f 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
@@ -1,6 +1,8 @@
#ifndef _VC4_HDMI_REGS_H_
#define _VC4_HDMI_REGS_H_
+#include <linux/pm_runtime.h>
+
#include "vc4_hdmi.h"
#define VC4_HDMI_PACKET_STRIDE 0x24
@@ -412,6 +414,8 @@ static inline u32 vc4_hdmi_read(struct vc4_hdmi *hdmi,
const struct vc4_hdmi_variant *variant = hdmi->variant;
void __iomem *base;
+ WARN_ON(!pm_runtime_active(&hdmi->pdev->dev));
+
if (reg >= variant->num_registers) {
dev_warn(&hdmi->pdev->dev,
"Invalid register ID %u\n", reg);
@@ -438,6 +442,10 @@ static inline void vc4_hdmi_write(struct vc4_hdmi *hdmi,
const struct vc4_hdmi_variant *variant = hdmi->variant;
void __iomem *base;
+ lockdep_assert_held(&hdmi->hw_lock);
+
+ WARN_ON(!pm_runtime_active(&hdmi->pdev->dev));
+
if (reg >= variant->num_registers) {
dev_warn(&hdmi->pdev->dev,
"Invalid register ID %u\n", reg);
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index c239045e05d6..604933e20e6a 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -365,17 +365,16 @@ static void vc4_hvs_update_dlist(struct drm_crtc *crtc)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ unsigned long flags;
if (crtc->state->event) {
- unsigned long flags;
-
crtc->state->event->pipe = drm_crtc_index(crtc);
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
spin_lock_irqsave(&dev->event_lock, flags);
- if (!vc4_state->feed_txp || vc4_state->txp_armed) {
+ if (!vc4_crtc->feeds_txp || vc4_state->txp_armed) {
vc4_crtc->event = crtc->state->event;
crtc->state->event = NULL;
}
@@ -388,6 +387,22 @@ static void vc4_hvs_update_dlist(struct drm_crtc *crtc)
HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel),
vc4_state->mm.start);
}
+
+ spin_lock_irqsave(&vc4_crtc->irq_lock, flags);
+ vc4_crtc->current_dlist = vc4_state->mm.start;
+ spin_unlock_irqrestore(&vc4_crtc->irq_lock, flags);
+}
+
+void vc4_hvs_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc4_crtc->irq_lock, flags);
+ vc4_crtc->current_hvs_channel = vc4_state->assigned_channel;
+ spin_unlock_irqrestore(&vc4_crtc->irq_lock, flags);
}
void vc4_hvs_atomic_enable(struct drm_crtc *crtc,
@@ -395,10 +410,9 @@ void vc4_hvs_atomic_enable(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
- struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(new_crtc_state);
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
- bool oneshot = vc4_state->feed_txp;
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ bool oneshot = vc4_crtc->feeds_txp;
vc4_hvs_update_dlist(crtc);
vc4_hvs_init_channel(vc4, crtc, mode, oneshot);
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index b61792d2aa65..24de29bc1cda 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -39,9 +39,11 @@ static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
struct vc4_hvs_state {
struct drm_private_state base;
+ unsigned long core_clock_rate;
struct {
unsigned in_use: 1;
+ unsigned long fifo_load;
struct drm_crtc_commit *pending_commit;
} fifo_state[HVS_NUM_CHANNELS];
};
@@ -233,6 +235,7 @@ static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
unsigned int i;
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
u32 dispctrl;
u32 dsp3_mux;
@@ -253,7 +256,7 @@ static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
* TXP IP, and we need to disable the FIFO2 -> pixelvalve1
* route.
*/
- if (vc4_state->feed_txp)
+ if (vc4_crtc->feeds_txp)
dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
else
dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
@@ -338,11 +341,20 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
struct drm_crtc_state *new_crtc_state;
+ struct vc4_hvs_state *new_hvs_state;
struct drm_crtc *crtc;
struct vc4_hvs_state *old_hvs_state;
unsigned int channel;
int i;
+ old_hvs_state = vc4_hvs_get_old_global_state(state);
+ if (WARN_ON(IS_ERR(old_hvs_state)))
+ return;
+
+ new_hvs_state = vc4_hvs_get_new_global_state(state);
+ if (WARN_ON(IS_ERR(new_hvs_state)))
+ return;
+
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
struct vc4_crtc_state *vc4_crtc_state;
@@ -353,10 +365,6 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
}
- old_hvs_state = vc4_hvs_get_old_global_state(state);
- if (IS_ERR(old_hvs_state))
- return;
-
for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
struct drm_crtc_commit *commit;
int ret;
@@ -376,9 +384,13 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
old_hvs_state->fifo_state[channel].pending_commit = NULL;
}
- if (vc4->hvs->hvs5)
- clk_set_min_rate(hvs->core_clk, 500000000);
+ if (vc4->hvs->hvs5) {
+ unsigned long core_rate = max_t(unsigned long,
+ 500000000,
+ new_hvs_state->core_clock_rate);
+ clk_set_min_rate(hvs->core_clk, core_rate);
+ }
drm_atomic_helper_commit_modeset_disables(dev, state);
vc4_ctm_commit(vc4, state);
@@ -400,8 +412,12 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_cleanup_planes(dev, state);
- if (vc4->hvs->hvs5)
- clk_set_min_rate(hvs->core_clk, 0);
+ if (vc4->hvs->hvs5) {
+ drm_dbg(dev, "Running the core clock at %lu Hz\n",
+ new_hvs_state->core_clock_rate);
+
+ clk_set_min_rate(hvs->core_clk, new_hvs_state->core_clock_rate);
+ }
}
static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
@@ -553,9 +569,6 @@ static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
struct drm_plane *plane;
int i;
- if (!vc4->load_tracker_available)
- return 0;
-
priv_state = drm_atomic_get_private_obj_state(state,
&vc4->load_tracker);
if (IS_ERR(priv_state))
@@ -630,9 +643,6 @@ static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- if (!vc4->load_tracker_available)
- return;
-
drm_atomic_private_obj_fini(&vc4->load_tracker);
}
@@ -640,9 +650,6 @@ static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
{
struct vc4_load_tracker_state *load_state;
- if (!vc4->load_tracker_available)
- return 0;
-
load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
if (!load_state)
return -ENOMEM;
@@ -667,11 +674,13 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
-
for (i = 0; i < HVS_NUM_CHANNELS; i++) {
state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
+ state->fifo_state[i].fifo_load = old_state->fifo_state[i].fifo_load;
}
+ state->core_clock_rate = old_state->core_clock_rate;
+
return &state->base;
}
@@ -827,6 +836,76 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
}
static int
+vc4_core_clock_atomic_check(struct drm_atomic_state *state)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(state->dev);
+ struct drm_private_state *priv_state;
+ struct vc4_hvs_state *hvs_new_state;
+ struct vc4_load_tracker_state *load_state;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_crtc *crtc;
+ unsigned int num_outputs;
+ unsigned long pixel_rate;
+ unsigned long cob_rate;
+ unsigned int i;
+
+ priv_state = drm_atomic_get_private_obj_state(state,
+ &vc4->load_tracker);
+ if (IS_ERR(priv_state))
+ return PTR_ERR(priv_state);
+
+ load_state = to_vc4_load_tracker_state(priv_state);
+
+ hvs_new_state = vc4_hvs_get_global_state(state);
+ if (IS_ERR(hvs_new_state))
+ return PTR_ERR(hvs_new_state);
+
+ for_each_oldnew_crtc_in_state(state, crtc,
+ old_crtc_state,
+ new_crtc_state,
+ i) {
+ if (old_crtc_state->active) {
+ struct vc4_crtc_state *old_vc4_state =
+ to_vc4_crtc_state(old_crtc_state);
+ unsigned int channel = old_vc4_state->assigned_channel;
+
+ hvs_new_state->fifo_state[channel].fifo_load = 0;
+ }
+
+ if (new_crtc_state->active) {
+ struct vc4_crtc_state *new_vc4_state =
+ to_vc4_crtc_state(new_crtc_state);
+ unsigned int channel = new_vc4_state->assigned_channel;
+
+ hvs_new_state->fifo_state[channel].fifo_load =
+ new_vc4_state->hvs_load;
+ }
+ }
+
+ cob_rate = 0;
+ num_outputs = 0;
+ for (i = 0; i < HVS_NUM_CHANNELS; i++) {
+ if (!hvs_new_state->fifo_state[i].in_use)
+ continue;
+
+ num_outputs++;
+ cob_rate += hvs_new_state->fifo_state[i].fifo_load;
+ }
+
+ pixel_rate = load_state->hvs_load;
+ if (num_outputs > 1) {
+ pixel_rate = (pixel_rate * 40) / 100;
+ } else {
+ pixel_rate = (pixel_rate * 60) / 100;
+ }
+
+ hvs_new_state->core_clock_rate = max(cob_rate, pixel_rate);
+
+ return 0;
+}
+
+
+static int
vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
{
int ret;
@@ -843,7 +922,11 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
if (ret)
return ret;
- return vc4_load_tracker_atomic_check(state);
+ ret = vc4_load_tracker_atomic_check(state);
+ if (ret)
+ return ret;
+
+ return vc4_core_clock_atomic_check(state);
}
static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = {
@@ -864,9 +947,12 @@ int vc4_kms_load(struct drm_device *dev)
"brcm,bcm2711-vc5");
int ret;
+ /*
+ * The limits enforced by the load tracker aren't relevant for
+ * the BCM2711, but the load tracker computations are used for
+ * the core clock rate calculation.
+ */
if (!is_vc5) {
- vc4->load_tracker_available = true;
-
/* Start with the load tracker enabled. Can be
* disabled through the debugfs load_tracker file.
*/
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 19161b6ab27f..920a9eefe426 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -33,6 +33,7 @@ static const struct hvs_format {
u32 hvs; /* HVS_FORMAT_* */
u32 pixel_order;
u32 pixel_order_hvs5;
+ bool hvs5_only;
} hvs_formats[] = {
{
.drm = DRM_FORMAT_XRGB8888,
@@ -128,6 +129,12 @@ static const struct hvs_format {
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCRCB,
},
+ {
+ .drm = DRM_FORMAT_P030,
+ .hvs = HVS_PIXEL_FORMAT_YCBCR_10BIT,
+ .pixel_order = HVS_PIXEL_ORDER_XYCBCR,
+ .hvs5_only = true,
+ },
};
static const struct hvs_format *vc4_get_hvs_format(u32 drm_format)
@@ -529,11 +536,6 @@ static void vc4_plane_calc_load(struct drm_plane_state *state)
struct vc4_plane_state *vc4_state;
struct drm_crtc_state *crtc_state;
unsigned int vscale_factor;
- struct vc4_dev *vc4;
-
- vc4 = to_vc4_dev(state->plane->dev);
- if (!vc4->load_tracker_available)
- return;
vc4_state = to_vc4_plane_state(state);
crtc_state = drm_atomic_get_existing_crtc_state(state->state,
@@ -621,6 +623,51 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
return 0;
}
+/*
+ * The colorspace conversion matrices are held in 3 entries in the dlist.
+ * Create an array of them, with entries for each full and limited mode, and
+ * each supported colorspace.
+ */
+static const u32 colorspace_coeffs[2][DRM_COLOR_ENCODING_MAX][3] = {
+ {
+ /* Limited range */
+ {
+ /* BT601 */
+ SCALER_CSC0_ITR_R_601_5,
+ SCALER_CSC1_ITR_R_601_5,
+ SCALER_CSC2_ITR_R_601_5,
+ }, {
+ /* BT709 */
+ SCALER_CSC0_ITR_R_709_3,
+ SCALER_CSC1_ITR_R_709_3,
+ SCALER_CSC2_ITR_R_709_3,
+ }, {
+ /* BT2020 */
+ SCALER_CSC0_ITR_R_2020,
+ SCALER_CSC1_ITR_R_2020,
+ SCALER_CSC2_ITR_R_2020,
+ }
+ }, {
+ /* Full range */
+ {
+ /* JFIF */
+ SCALER_CSC0_JPEG_JFIF,
+ SCALER_CSC1_JPEG_JFIF,
+ SCALER_CSC2_JPEG_JFIF,
+ }, {
+ /* BT709 */
+ SCALER_CSC0_ITR_R_709_3_FR,
+ SCALER_CSC1_ITR_R_709_3_FR,
+ SCALER_CSC2_ITR_R_709_3_FR,
+ }, {
+ /* BT2020 */
+ SCALER_CSC0_ITR_R_2020_FR,
+ SCALER_CSC1_ITR_R_2020_FR,
+ SCALER_CSC2_ITR_R_2020_FR,
+ }
+ }
+};
+
/* Writes out a full display list for an active plane to the plane's
* private dlist state.
*/
@@ -767,47 +814,90 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
case DRM_FORMAT_MOD_BROADCOM_SAND128:
case DRM_FORMAT_MOD_BROADCOM_SAND256: {
uint32_t param = fourcc_mod_broadcom_param(fb->modifier);
- u32 tile_w, tile, x_off, pix_per_tile;
-
- hvs_format = HVS_PIXEL_FORMAT_H264;
-
- switch (base_format_mod) {
- case DRM_FORMAT_MOD_BROADCOM_SAND64:
- tiling = SCALER_CTL0_TILING_64B;
- tile_w = 64;
- break;
- case DRM_FORMAT_MOD_BROADCOM_SAND128:
- tiling = SCALER_CTL0_TILING_128B;
- tile_w = 128;
- break;
- case DRM_FORMAT_MOD_BROADCOM_SAND256:
- tiling = SCALER_CTL0_TILING_256B_OR_T;
- tile_w = 256;
- break;
- default:
- break;
- }
if (param > SCALER_TILE_HEIGHT_MASK) {
- DRM_DEBUG_KMS("SAND height too large (%d)\n", param);
+ DRM_DEBUG_KMS("SAND height too large (%d)\n",
+ param);
return -EINVAL;
}
- pix_per_tile = tile_w / fb->format->cpp[0];
- tile = vc4_state->src_x / pix_per_tile;
- x_off = vc4_state->src_x % pix_per_tile;
+ if (fb->format->format == DRM_FORMAT_P030) {
+ hvs_format = HVS_PIXEL_FORMAT_YCBCR_10BIT;
+ tiling = SCALER_CTL0_TILING_128B;
+ } else {
+ hvs_format = HVS_PIXEL_FORMAT_H264;
+
+ switch (base_format_mod) {
+ case DRM_FORMAT_MOD_BROADCOM_SAND64:
+ tiling = SCALER_CTL0_TILING_64B;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ tiling = SCALER_CTL0_TILING_128B;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND256:
+ tiling = SCALER_CTL0_TILING_256B_OR_T;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
/* Adjust the base pointer to the first pixel to be scanned
* out.
+ *
+ * For P030, y_ptr [31:4] is the 128bit word for the start pixel
+ * y_ptr [3:0] is the pixel (0-11) contained within that 128bit
+ * word that should be taken as the first pixel.
+ * Ditto uv_ptr [31:4] vs [3:0], however [3:0] contains the
+ * element within the 128bit word, eg for pixel 3 the value
+ * should be 6.
*/
for (i = 0; i < num_planes; i++) {
+ u32 tile_w, tile, x_off, pix_per_tile;
+
+ if (fb->format->format == DRM_FORMAT_P030) {
+ /*
+ * Spec says: bits [31:4] of the given address
+ * should point to the 128-bit word containing
+ * the desired starting pixel, and bits[3:0]
+ * should be between 0 and 11, indicating which
+ * of the 12-pixels in that 128-bit word is the
+ * first pixel to be used
+ */
+ u32 remaining_pixels = vc4_state->src_x % 96;
+ u32 aligned = remaining_pixels / 12;
+ u32 last_bits = remaining_pixels % 12;
+
+ x_off = aligned * 16 + last_bits;
+ tile_w = 128;
+ pix_per_tile = 96;
+ } else {
+ switch (base_format_mod) {
+ case DRM_FORMAT_MOD_BROADCOM_SAND64:
+ tile_w = 64;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ tile_w = 128;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND256:
+ tile_w = 256;
+ break;
+ default:
+ return -EINVAL;
+ }
+ pix_per_tile = tile_w / fb->format->cpp[0];
+ x_off = (vc4_state->src_x % pix_per_tile) /
+ (i ? h_subsample : 1) *
+ fb->format->cpp[i];
+ }
+
+ tile = vc4_state->src_x / pix_per_tile;
+
vc4_state->offsets[i] += param * tile_w * tile;
vc4_state->offsets[i] += src_y /
(i ? v_subsample : 1) *
tile_w;
- vc4_state->offsets[i] += x_off /
- (i ? h_subsample : 1) *
- fb->format->cpp[i];
+ vc4_state->offsets[i] += x_off & ~(i ? 1 : 0);
}
pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT);
@@ -960,7 +1050,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
/* Pitch word 1/2 */
for (i = 1; i < num_planes; i++) {
- if (hvs_format != HVS_PIXEL_FORMAT_H264) {
+ if (hvs_format != HVS_PIXEL_FORMAT_H264 &&
+ hvs_format != HVS_PIXEL_FORMAT_YCBCR_10BIT) {
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(fb->pitches[i],
SCALER_SRC_PITCH));
@@ -971,9 +1062,20 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
/* Colorspace conversion words */
if (vc4_state->is_yuv) {
- vc4_dlist_write(vc4_state, SCALER_CSC0_ITR_R_601_5);
- vc4_dlist_write(vc4_state, SCALER_CSC1_ITR_R_601_5);
- vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
+ enum drm_color_encoding color_encoding = state->color_encoding;
+ enum drm_color_range color_range = state->color_range;
+ const u32 *ccm;
+
+ if (color_encoding >= DRM_COLOR_ENCODING_MAX)
+ color_encoding = DRM_COLOR_YCBCR_BT601;
+ if (color_range >= DRM_COLOR_RANGE_MAX)
+ color_range = DRM_COLOR_YCBCR_LIMITED_RANGE;
+
+ ccm = colorspace_coeffs[color_range][color_encoding];
+
+ vc4_dlist_write(vc4_state, ccm[0]);
+ vc4_dlist_write(vc4_state, ccm[1]);
+ vc4_dlist_write(vc4_state, ccm[2]);
}
vc4_state->lbm_offset = 0;
@@ -1320,6 +1422,13 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
default:
return false;
}
+ case DRM_FORMAT_P030:
+ switch (fourcc_mod_broadcom_mod(modifier)) {
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ return true;
+ default:
+ return false;
+ }
case DRM_FORMAT_RGBX1010102:
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_RGBA1010102:
@@ -1352,8 +1461,11 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
struct drm_plane *plane = NULL;
struct vc4_plane *vc4_plane;
u32 formats[ARRAY_SIZE(hvs_formats)];
+ int num_formats = 0;
int ret = 0;
unsigned i;
+ bool hvs5 = of_device_is_compatible(dev->dev->of_node,
+ "brcm,bcm2711-vc5");
static const uint64_t modifiers[] = {
DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
DRM_FORMAT_MOD_BROADCOM_SAND128,
@@ -1368,13 +1480,17 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
if (!vc4_plane)
return ERR_PTR(-ENOMEM);
- for (i = 0; i < ARRAY_SIZE(hvs_formats); i++)
- formats[i] = hvs_formats[i].drm;
+ for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
+ if (!hvs_formats[i].hvs5_only || hvs5) {
+ formats[num_formats] = hvs_formats[i].drm;
+ num_formats++;
+ }
+ }
plane = &vc4_plane->base;
ret = drm_universal_plane_init(dev, plane, 0,
&vc4_plane_funcs,
- formats, ARRAY_SIZE(formats),
+ formats, num_formats,
modifiers, type, NULL);
if (ret)
return ERR_PTR(ret);
@@ -1388,6 +1504,15 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
DRM_MODE_REFLECT_X |
DRM_MODE_REFLECT_Y);
+ drm_plane_create_color_properties(plane,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709) |
+ BIT(DRM_COLOR_YCBCR_BT2020),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+ DRM_COLOR_YCBCR_BT709,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
+
return plane;
}
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 489f921ef44d..7538b84a6dca 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -975,7 +975,10 @@ enum hvs_pixel_format {
#define SCALER_CSC0_COEF_CR_OFS_SHIFT 0
#define SCALER_CSC0_ITR_R_601_5 0x00f00000
#define SCALER_CSC0_ITR_R_709_3 0x00f00000
+#define SCALER_CSC0_ITR_R_2020 0x00f00000
#define SCALER_CSC0_JPEG_JFIF 0x00000000
+#define SCALER_CSC0_ITR_R_709_3_FR 0x00000000
+#define SCALER_CSC0_ITR_R_2020_FR 0x00000000
/* S2.8 contribution of Cb to Green */
#define SCALER_CSC1_COEF_CB_GRN_MASK VC4_MASK(31, 22)
@@ -990,8 +993,11 @@ enum hvs_pixel_format {
#define SCALER_CSC1_COEF_CR_BLU_MASK VC4_MASK(1, 0)
#define SCALER_CSC1_COEF_CR_BLU_SHIFT 0
#define SCALER_CSC1_ITR_R_601_5 0xe73304a8
-#define SCALER_CSC1_ITR_R_709_3 0xf2b784a8
-#define SCALER_CSC1_JPEG_JFIF 0xea34a400
+#define SCALER_CSC1_ITR_R_709_3 0xf27784a8
+#define SCALER_CSC1_ITR_R_2020 0xf43594a8
+#define SCALER_CSC1_JPEG_JFIF 0xea349400
+#define SCALER_CSC1_ITR_R_709_3_FR 0xf4388400
+#define SCALER_CSC1_ITR_R_2020_FR 0xf5b6d400
/* S2.8 contribution of Cb to Red */
#define SCALER_CSC2_COEF_CB_RED_MASK VC4_MASK(29, 20)
@@ -1002,9 +1008,12 @@ enum hvs_pixel_format {
/* S2.8 contribution of Cb to Blue */
#define SCALER_CSC2_COEF_CB_BLU_MASK VC4_MASK(19, 10)
#define SCALER_CSC2_COEF_CB_BLU_SHIFT 10
-#define SCALER_CSC2_ITR_R_601_5 0x00066204
-#define SCALER_CSC2_ITR_R_709_3 0x00072a1c
-#define SCALER_CSC2_JPEG_JFIF 0x000599c5
+#define SCALER_CSC2_ITR_R_601_5 0x00066604
+#define SCALER_CSC2_ITR_R_709_3 0x00072e1d
+#define SCALER_CSC2_ITR_R_2020 0x0006b624
+#define SCALER_CSC2_JPEG_JFIF 0x00059dc6
+#define SCALER_CSC2_ITR_R_709_3_FR 0x00064ddb
+#define SCALER_CSC2_ITR_R_2020_FR 0x0005e5e2
#define SCALER_TPZ0_VERT_RECALC BIT(31)
#define SCALER_TPZ0_SCALE_MASK VC4_MASK(28, 8)
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 2fc7f4b5fa09..9809ca3e2945 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -391,7 +391,6 @@ static int vc4_txp_atomic_check(struct drm_crtc *crtc,
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
- struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
int ret;
ret = vc4_hvs_atomic_check(crtc, state);
@@ -399,7 +398,6 @@ static int vc4_txp_atomic_check(struct drm_crtc *crtc,
return ret;
crtc_state->no_vblank = true;
- vc4_state->feed_txp = true;
return 0;
}
@@ -437,6 +435,7 @@ static void vc4_txp_atomic_disable(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs vc4_txp_crtc_helper_funcs = {
.atomic_check = vc4_txp_atomic_check,
+ .atomic_begin = vc4_hvs_atomic_begin,
.atomic_flush = vc4_hvs_atomic_flush,
.atomic_enable = vc4_txp_atomic_enable,
.atomic_disable = vc4_txp_atomic_disable,
@@ -482,6 +481,7 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
vc4_crtc->pdev = pdev;
vc4_crtc->data = &vc4_txp_crtc_data;
+ vc4_crtc->feeds_txp = true;
txp->pdev = pdev;
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index a87eafa89e9f..c5e3e5457737 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -97,7 +97,7 @@ static struct drm_gem_object *vgem_gem_create_object(struct drm_device *dev, siz
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
- return NULL;
+ return ERR_PTR(-ENOMEM);
/*
* vgem doesn't have any begin/end cpu access ioctls, therefore must use
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 5072dbb0669a..5f25a8d15464 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -27,7 +27,6 @@
*/
#include <linux/module.h>
-#include <linux/console.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/wait.h>
@@ -104,7 +103,7 @@ static int virtio_gpu_probe(struct virtio_device *vdev)
struct drm_device *dev;
int ret;
- if (vgacon_text_force() && virtio_gpu_modeset == -1)
+ if (drm_firmware_drivers_only() && virtio_gpu_modeset == -1)
return -EINVAL;
if (virtio_gpu_modeset == 0)
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 3607646d3229..c708bab555c6 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -774,7 +774,7 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
goto out_unlock;
}
- if ((vgdev->capset_id_mask & (1 << value)) == 0) {
+ if ((vgdev->capset_id_mask & (1ULL << value)) == 0) {
ret = -EINVAL;
goto out_unlock;
}
@@ -819,7 +819,7 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
if (vfpriv->ring_idx_mask) {
valid_ring_mask = 0;
for (i = 0; i < vfpriv->num_rings; i++)
- valid_ring_mask |= 1 << i;
+ valid_ring_mask |= 1ULL << i;
if (~valid_ring_mask & vfpriv->ring_idx_mask) {
ret = -EINVAL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 21f410901694..3313b92db531 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -279,7 +279,7 @@ void virtio_gpu_deinit(struct drm_device *dev)
flush_work(&vgdev->ctrlq.dequeue_work);
flush_work(&vgdev->cursorq.dequeue_work);
flush_work(&vgdev->config_changed_work);
- vgdev->vdev->config->reset(vgdev->vdev);
+ virtio_reset_device(vgdev->vdev);
vgdev->vdev->config->del_vqs(vgdev->vdev);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index f648b0e24447..baef2c5f2aaf 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -79,10 +79,10 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
sg_free_table(shmem->pages);
kfree(shmem->pages);
shmem->pages = NULL;
- drm_gem_shmem_unpin(&bo->base.base);
+ drm_gem_shmem_unpin(&bo->base);
}
- drm_gem_shmem_free_object(&bo->base.base);
+ drm_gem_shmem_free(&bo->base);
} else if (virtio_gpu_is_vram(bo)) {
struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
@@ -116,15 +116,14 @@ static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
.free = virtio_gpu_free_object,
.open = virtio_gpu_gem_object_open,
.close = virtio_gpu_gem_object_close,
-
- .print_info = drm_gem_shmem_print_info,
+ .print_info = drm_gem_shmem_object_print_info,
.export = virtgpu_gem_prime_export,
- .pin = drm_gem_shmem_pin,
- .unpin = drm_gem_shmem_unpin,
- .get_sg_table = drm_gem_shmem_get_sg_table,
- .vmap = drm_gem_shmem_vmap,
- .vunmap = drm_gem_shmem_vunmap,
- .mmap = drm_gem_shmem_mmap,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
+ .mmap = drm_gem_shmem_object_mmap,
};
bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
@@ -140,7 +139,7 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
if (!shmem)
- return NULL;
+ return ERR_PTR(-ENOMEM);
dshmem = &shmem->base.base;
dshmem->base.funcs = &virtio_gpu_shmem_funcs;
@@ -157,7 +156,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
struct scatterlist *sg;
int si, ret;
- ret = drm_gem_shmem_pin(&bo->base.base);
+ ret = drm_gem_shmem_pin(&bo->base);
if (ret < 0)
return -EINVAL;
@@ -167,9 +166,9 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
* dma-ops. This is discouraged for other drivers, but should be fine
* since virtio_gpu doesn't support dma-buf import from other devices.
*/
- shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
+ shmem->pages = drm_gem_shmem_get_sg_table(&bo->base);
if (!shmem->pages) {
- drm_gem_shmem_unpin(&bo->base.base);
+ drm_gem_shmem_unpin(&bo->base);
return -EINVAL;
}
@@ -277,6 +276,6 @@ err_put_objs:
err_put_id:
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
err_free_gem:
- drm_gem_shmem_free_object(&shmem_obj->base);
+ drm_gem_shmem_free(shmem_obj);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index c9ce47c448e0..a4fabe208d9f 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -4,6 +4,7 @@ config DRM_VMWGFX
depends on DRM && PCI && MMU
depends on X86 || ARM64
select DRM_TTM
+ select DRM_TTM_HELPER
select MAPPING_DIRTY_HELPERS
# Only needed for the transitional use of drm_crtc_init - can be removed
# again once vmwgfx sets up the primary plane itself.
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index bc323f7d4032..eee73b9aa404 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
+vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_hashtab.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \
@@ -9,9 +9,9 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
- vmwgfx_devcaps.o ttm_object.o ttm_memory.o
+ vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \
+ vmwgfx_gem.o
vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o
-vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
index 945c84b27e81..d90d940ad3f4 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 2012-2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga3d_cmd.h --
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
index 379ec15c7758..815d0ab0053f 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 1998-2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga3d_devcaps.h --
@@ -347,6 +347,10 @@ typedef uint32 SVGA3dDevCapIndex;
#define SVGA3D_DEVCAP_SM5 258
#define SVGA3D_DEVCAP_MULTISAMPLE_8X 259
+#define SVGA3D_DEVCAP_MAX_FORCED_SAMPLE_COUNT 260
+
+#define SVGA3D_DEVCAP_GL43 261
+
#define SVGA3D_DEVCAP_MAX 262
#define SVGA3D_DXFMT_SUPPORTED (1 << 0)
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
index 5af442dad542..925bf4b93f01 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 2012-2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga3d_dx.h --
@@ -508,11 +508,11 @@ typedef struct SVGA3dCmdDXSetPredication {
#pragma pack(pop)
#pragma pack(push, 1)
-typedef struct MKS3dDXSOState {
+typedef struct SVGA3dDXSOState {
uint32 offset;
uint32 intOffset;
- uint32 vertexCount;
- uint32 dead;
+ uint32 dead1;
+ uint32 dead2;
} SVGA3dDXSOState;
#pragma pack(pop)
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
index 35494a728c7a..6103b41fe92b 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 2012-2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga3d_limits.h --
@@ -82,4 +82,6 @@
#define SVGA3D_MIN_SBX_DATA_SIZE (GBYTES_2_BYTES(1))
#define SVGA3D_MAX_SBX_DATA_SIZE (GBYTES_2_BYTES(4))
+#define SVGA3D_MIN_SBX_DATA_SIZE_DVM (MBYTES_2_BYTES(900))
+#define SVGA3D_MAX_SBX_DATA_SIZE_DVM (MBYTES_2_BYTES(910))
#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
index 988d8509c472..b24b4f55c941 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 1998-2015 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga3d_reg.h --
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
index 70b88ee16cf6..e9219eb380a2 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 2012-2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga3d_types.h --
@@ -370,7 +370,6 @@ typedef enum SVGA3dSurfaceFormat {
#define SVGA3D_SURFACE_TRANSFER_FROM_BUFFER (CONST64U(1) << 30)
#define SVGA3D_SURFACE_RESERVED1 (CONST64U(1) << 31)
-#define SVGA3D_SURFACE_VADECODE SVGA3D_SURFACE_RESERVED1
#define SVGA3D_SURFACE_MULTISAMPLE (CONST64U(1) << 32)
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
index bf242c21f352..405f20fc26c2 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 2007,2020 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga_escape.h --
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
index aec17c3c6c29..691f48f77e33 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 2007-2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga_overlay.h --
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
index b3602557de2e..acabdb550c10 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
@@ -1,6 +1,6 @@
-/**********************************************************
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
* Copyright 1998-2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -22,7 +22,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- **********************************************************/
+ */
/*
* svga_reg.h --
@@ -442,6 +442,7 @@ typedef struct {
#define SVGA_CAP2_TRACE_FULL_FB 0x00002000
#define SVGA_CAP2_EXTRA_REGS 0x00004000
#define SVGA_CAP2_LO_STAGING 0x00008000
+#define SVGA_CAP2_VIDEO_BLT 0x00010000
#define SVGA_CAP2_RESERVED 0x80000000
typedef enum {
@@ -450,9 +451,10 @@ typedef enum {
SVGABackdoorCap3dHWVersion = 2,
SVGABackdoorCapDeviceCaps2 = 3,
SVGABackdoorCapDevelCaps = 4,
- SVGABackdoorDevelRenderer = 5,
- SVGABackdoorDevelUsingISB = 6,
- SVGABackdoorCapMax = 7,
+ SVGABackdoorCapDevCaps = 5,
+ SVGABackdoorDevelRenderer = 6,
+ SVGABackdoorDevelUsingISB = 7,
+ SVGABackdoorCapMax = 8,
} SVGABackdoorCapType;
enum {
diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.c b/drivers/gpu/drm/vmwgfx/ttm_memory.c
deleted file mode 100644
index 7f7fe35fc21d..000000000000
--- a/drivers/gpu/drm/vmwgfx/ttm_memory.c
+++ /dev/null
@@ -1,683 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-/**************************************************************************
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#define pr_fmt(fmt) "[TTM] " fmt
-
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/swap.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_file.h>
-#include <drm/ttm/ttm_device.h>
-
-#include "ttm_memory.h"
-
-#define TTM_MEMORY_ALLOC_RETRIES 4
-
-struct ttm_mem_global ttm_mem_glob;
-EXPORT_SYMBOL(ttm_mem_glob);
-
-struct ttm_mem_zone {
- struct kobject kobj;
- struct ttm_mem_global *glob;
- const char *name;
- uint64_t zone_mem;
- uint64_t emer_mem;
- uint64_t max_mem;
- uint64_t swap_limit;
- uint64_t used_mem;
-};
-
-static struct attribute ttm_mem_sys = {
- .name = "zone_memory",
- .mode = S_IRUGO
-};
-static struct attribute ttm_mem_emer = {
- .name = "emergency_memory",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_mem_max = {
- .name = "available_memory",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_mem_swap = {
- .name = "swap_limit",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_mem_used = {
- .name = "used_memory",
- .mode = S_IRUGO
-};
-
-static void ttm_mem_zone_kobj_release(struct kobject *kobj)
-{
- struct ttm_mem_zone *zone =
- container_of(kobj, struct ttm_mem_zone, kobj);
-
- pr_info("Zone %7s: Used memory at exit: %llu KiB\n",
- zone->name, (unsigned long long)zone->used_mem >> 10);
- kfree(zone);
-}
-
-static ssize_t ttm_mem_zone_show(struct kobject *kobj,
- struct attribute *attr,
- char *buffer)
-{
- struct ttm_mem_zone *zone =
- container_of(kobj, struct ttm_mem_zone, kobj);
- uint64_t val = 0;
-
- spin_lock(&zone->glob->lock);
- if (attr == &ttm_mem_sys)
- val = zone->zone_mem;
- else if (attr == &ttm_mem_emer)
- val = zone->emer_mem;
- else if (attr == &ttm_mem_max)
- val = zone->max_mem;
- else if (attr == &ttm_mem_swap)
- val = zone->swap_limit;
- else if (attr == &ttm_mem_used)
- val = zone->used_mem;
- spin_unlock(&zone->glob->lock);
-
- return snprintf(buffer, PAGE_SIZE, "%llu\n",
- (unsigned long long) val >> 10);
-}
-
-static void ttm_check_swapping(struct ttm_mem_global *glob);
-
-static ssize_t ttm_mem_zone_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t size)
-{
- struct ttm_mem_zone *zone =
- container_of(kobj, struct ttm_mem_zone, kobj);
- int chars;
- unsigned long val;
- uint64_t val64;
-
- chars = sscanf(buffer, "%lu", &val);
- if (chars == 0)
- return size;
-
- val64 = val;
- val64 <<= 10;
-
- spin_lock(&zone->glob->lock);
- if (val64 > zone->zone_mem)
- val64 = zone->zone_mem;
- if (attr == &ttm_mem_emer) {
- zone->emer_mem = val64;
- if (zone->max_mem > val64)
- zone->max_mem = val64;
- } else if (attr == &ttm_mem_max) {
- zone->max_mem = val64;
- if (zone->emer_mem < val64)
- zone->emer_mem = val64;
- } else if (attr == &ttm_mem_swap)
- zone->swap_limit = val64;
- spin_unlock(&zone->glob->lock);
-
- ttm_check_swapping(zone->glob);
-
- return size;
-}
-
-static struct attribute *ttm_mem_zone_attrs[] = {
- &ttm_mem_sys,
- &ttm_mem_emer,
- &ttm_mem_max,
- &ttm_mem_swap,
- &ttm_mem_used,
- NULL
-};
-
-static const struct sysfs_ops ttm_mem_zone_ops = {
- .show = &ttm_mem_zone_show,
- .store = &ttm_mem_zone_store
-};
-
-static struct kobj_type ttm_mem_zone_kobj_type = {
- .release = &ttm_mem_zone_kobj_release,
- .sysfs_ops = &ttm_mem_zone_ops,
- .default_attrs = ttm_mem_zone_attrs,
-};
-
-static struct attribute ttm_mem_global_lower_mem_limit = {
- .name = "lower_mem_limit",
- .mode = S_IRUGO | S_IWUSR
-};
-
-static ssize_t ttm_mem_global_show(struct kobject *kobj,
- struct attribute *attr,
- char *buffer)
-{
- struct ttm_mem_global *glob =
- container_of(kobj, struct ttm_mem_global, kobj);
- uint64_t val = 0;
-
- spin_lock(&glob->lock);
- val = glob->lower_mem_limit;
- spin_unlock(&glob->lock);
- /* convert from number of pages to KB */
- val <<= (PAGE_SHIFT - 10);
- return snprintf(buffer, PAGE_SIZE, "%llu\n",
- (unsigned long long) val);
-}
-
-static ssize_t ttm_mem_global_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t size)
-{
- int chars;
- uint64_t val64;
- unsigned long val;
- struct ttm_mem_global *glob =
- container_of(kobj, struct ttm_mem_global, kobj);
-
- chars = sscanf(buffer, "%lu", &val);
- if (chars == 0)
- return size;
-
- val64 = val;
- /* convert from KB to number of pages */
- val64 >>= (PAGE_SHIFT - 10);
-
- spin_lock(&glob->lock);
- glob->lower_mem_limit = val64;
- spin_unlock(&glob->lock);
-
- return size;
-}
-
-static struct attribute *ttm_mem_global_attrs[] = {
- &ttm_mem_global_lower_mem_limit,
- NULL
-};
-
-static const struct sysfs_ops ttm_mem_global_ops = {
- .show = &ttm_mem_global_show,
- .store = &ttm_mem_global_store,
-};
-
-static struct kobj_type ttm_mem_glob_kobj_type = {
- .sysfs_ops = &ttm_mem_global_ops,
- .default_attrs = ttm_mem_global_attrs,
-};
-
-static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
- bool from_wq, uint64_t extra)
-{
- unsigned int i;
- struct ttm_mem_zone *zone;
- uint64_t target;
-
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
-
- if (from_wq)
- target = zone->swap_limit;
- else if (capable(CAP_SYS_ADMIN))
- target = zone->emer_mem;
- else
- target = zone->max_mem;
-
- target = (extra > target) ? 0ULL : target;
-
- if (zone->used_mem > target)
- return true;
- }
- return false;
-}
-
-/*
- * At this point we only support a single shrink callback.
- * Extend this if needed, perhaps using a linked list of callbacks.
- * Note that this function is reentrant:
- * many threads may try to swap out at any given time.
- */
-
-static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
- uint64_t extra, struct ttm_operation_ctx *ctx)
-{
- int ret;
-
- spin_lock(&glob->lock);
-
- while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
- spin_unlock(&glob->lock);
- ret = ttm_global_swapout(ctx, GFP_KERNEL);
- spin_lock(&glob->lock);
- if (unlikely(ret <= 0))
- break;
- }
-
- spin_unlock(&glob->lock);
-}
-
-static void ttm_shrink_work(struct work_struct *work)
-{
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
- struct ttm_mem_global *glob =
- container_of(work, struct ttm_mem_global, work);
-
- ttm_shrink(glob, true, 0ULL, &ctx);
-}
-
-static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
- const struct sysinfo *si)
-{
- struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
- uint64_t mem;
- int ret;
-
- if (unlikely(!zone))
- return -ENOMEM;
-
- mem = si->totalram - si->totalhigh;
- mem *= si->mem_unit;
-
- zone->name = "kernel";
- zone->zone_mem = mem;
- zone->max_mem = mem >> 1;
- zone->emer_mem = (mem >> 1) + (mem >> 2);
- zone->swap_limit = zone->max_mem - (mem >> 3);
- zone->used_mem = 0;
- zone->glob = glob;
- glob->zone_kernel = zone;
- ret = kobject_init_and_add(
- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
- if (unlikely(ret != 0)) {
- kobject_put(&zone->kobj);
- return ret;
- }
- glob->zones[glob->num_zones++] = zone;
- return 0;
-}
-
-#ifdef CONFIG_HIGHMEM
-static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
- const struct sysinfo *si)
-{
- struct ttm_mem_zone *zone;
- uint64_t mem;
- int ret;
-
- if (si->totalhigh == 0)
- return 0;
-
- zone = kzalloc(sizeof(*zone), GFP_KERNEL);
- if (unlikely(!zone))
- return -ENOMEM;
-
- mem = si->totalram;
- mem *= si->mem_unit;
-
- zone->name = "highmem";
- zone->zone_mem = mem;
- zone->max_mem = mem >> 1;
- zone->emer_mem = (mem >> 1) + (mem >> 2);
- zone->swap_limit = zone->max_mem - (mem >> 3);
- zone->used_mem = 0;
- zone->glob = glob;
- glob->zone_highmem = zone;
- ret = kobject_init_and_add(
- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s",
- zone->name);
- if (unlikely(ret != 0)) {
- kobject_put(&zone->kobj);
- return ret;
- }
- glob->zones[glob->num_zones++] = zone;
- return 0;
-}
-#else
-static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
- const struct sysinfo *si)
-{
- struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
- uint64_t mem;
- int ret;
-
- if (unlikely(!zone))
- return -ENOMEM;
-
- mem = si->totalram;
- mem *= si->mem_unit;
-
- /**
- * No special dma32 zone needed.
- */
-
- if (mem <= ((uint64_t) 1ULL << 32)) {
- kfree(zone);
- return 0;
- }
-
- /*
- * Limit max dma32 memory to 4GB for now
- * until we can figure out how big this
- * zone really is.
- */
-
- mem = ((uint64_t) 1ULL << 32);
- zone->name = "dma32";
- zone->zone_mem = mem;
- zone->max_mem = mem >> 1;
- zone->emer_mem = (mem >> 1) + (mem >> 2);
- zone->swap_limit = zone->max_mem - (mem >> 3);
- zone->used_mem = 0;
- zone->glob = glob;
- glob->zone_dma32 = zone;
- ret = kobject_init_and_add(
- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
- if (unlikely(ret != 0)) {
- kobject_put(&zone->kobj);
- return ret;
- }
- glob->zones[glob->num_zones++] = zone;
- return 0;
-}
-#endif
-
-int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev)
-{
- struct sysinfo si;
- int ret;
- int i;
- struct ttm_mem_zone *zone;
-
- spin_lock_init(&glob->lock);
- glob->swap_queue = create_singlethread_workqueue("ttm_swap");
- INIT_WORK(&glob->work, ttm_shrink_work);
-
- ret = kobject_init_and_add(&glob->kobj, &ttm_mem_glob_kobj_type,
- &dev->kobj, "memory_accounting");
- if (unlikely(ret != 0)) {
- kobject_put(&glob->kobj);
- return ret;
- }
-
- si_meminfo(&si);
-
- spin_lock(&glob->lock);
- /* set it as 0 by default to keep original behavior of OOM */
- glob->lower_mem_limit = 0;
- spin_unlock(&glob->lock);
-
- ret = ttm_mem_init_kernel_zone(glob, &si);
- if (unlikely(ret != 0))
- goto out_no_zone;
-#ifdef CONFIG_HIGHMEM
- ret = ttm_mem_init_highmem_zone(glob, &si);
- if (unlikely(ret != 0))
- goto out_no_zone;
-#else
- ret = ttm_mem_init_dma32_zone(glob, &si);
- if (unlikely(ret != 0))
- goto out_no_zone;
-#endif
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- pr_info("Zone %7s: Available graphics memory: %llu KiB\n",
- zone->name, (unsigned long long)zone->max_mem >> 10);
- }
- return 0;
-out_no_zone:
- ttm_mem_global_release(glob);
- return ret;
-}
-
-void ttm_mem_global_release(struct ttm_mem_global *glob)
-{
- struct ttm_mem_zone *zone;
- unsigned int i;
-
- destroy_workqueue(glob->swap_queue);
- glob->swap_queue = NULL;
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- kobject_del(&zone->kobj);
- kobject_put(&zone->kobj);
- }
- kobject_del(&glob->kobj);
- kobject_put(&glob->kobj);
- memset(glob, 0, sizeof(*glob));
-}
-
-static void ttm_check_swapping(struct ttm_mem_global *glob)
-{
- bool needs_swapping = false;
- unsigned int i;
- struct ttm_mem_zone *zone;
-
- spin_lock(&glob->lock);
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- if (zone->used_mem > zone->swap_limit) {
- needs_swapping = true;
- break;
- }
- }
-
- spin_unlock(&glob->lock);
-
- if (unlikely(needs_swapping))
- (void)queue_work(glob->swap_queue, &glob->work);
-
-}
-
-static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
- struct ttm_mem_zone *single_zone,
- uint64_t amount)
-{
- unsigned int i;
- struct ttm_mem_zone *zone;
-
- spin_lock(&glob->lock);
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- if (single_zone && zone != single_zone)
- continue;
- zone->used_mem -= amount;
- }
- spin_unlock(&glob->lock);
-}
-
-void ttm_mem_global_free(struct ttm_mem_global *glob,
- uint64_t amount)
-{
- return ttm_mem_global_free_zone(glob, glob->zone_kernel, amount);
-}
-EXPORT_SYMBOL(ttm_mem_global_free);
-
-/*
- * check if the available mem is under lower memory limit
- *
- * a. if no swap disk at all or free swap space is under swap_mem_limit
- * but available system mem is bigger than sys_mem_limit, allow TTM
- * allocation;
- *
- * b. if the available system mem is less than sys_mem_limit but free
- * swap disk is bigger than swap_mem_limit, allow TTM allocation.
- */
-bool
-ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
- uint64_t num_pages,
- struct ttm_operation_ctx *ctx)
-{
- int64_t available;
-
- /* We allow over commit during suspend */
- if (ctx->force_alloc)
- return false;
-
- available = get_nr_swap_pages() + si_mem_available();
- available -= num_pages;
- if (available < glob->lower_mem_limit)
- return true;
-
- return false;
-}
-
-static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
- struct ttm_mem_zone *single_zone,
- uint64_t amount, bool reserve)
-{
- uint64_t limit;
- int ret = -ENOMEM;
- unsigned int i;
- struct ttm_mem_zone *zone;
-
- spin_lock(&glob->lock);
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- if (single_zone && zone != single_zone)
- continue;
-
- limit = (capable(CAP_SYS_ADMIN)) ?
- zone->emer_mem : zone->max_mem;
-
- if (zone->used_mem > limit)
- goto out_unlock;
- }
-
- if (reserve) {
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- if (single_zone && zone != single_zone)
- continue;
- zone->used_mem += amount;
- }
- }
-
- ret = 0;
-out_unlock:
- spin_unlock(&glob->lock);
- ttm_check_swapping(glob);
-
- return ret;
-}
-
-
-static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
- struct ttm_mem_zone *single_zone,
- uint64_t memory,
- struct ttm_operation_ctx *ctx)
-{
- int count = TTM_MEMORY_ALLOC_RETRIES;
-
- while (unlikely(ttm_mem_global_reserve(glob,
- single_zone,
- memory, true)
- != 0)) {
- if (ctx->no_wait_gpu)
- return -ENOMEM;
- if (unlikely(count-- == 0))
- return -ENOMEM;
- ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx);
- }
-
- return 0;
-}
-
-int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
- struct ttm_operation_ctx *ctx)
-{
- /**
- * Normal allocations of kernel memory are registered in
- * the kernel zone.
- */
-
- return ttm_mem_global_alloc_zone(glob, glob->zone_kernel, memory, ctx);
-}
-EXPORT_SYMBOL(ttm_mem_global_alloc);
-
-int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
- struct page *page, uint64_t size,
- struct ttm_operation_ctx *ctx)
-{
- struct ttm_mem_zone *zone = NULL;
-
- /**
- * Page allocations may be registed in a single zone
- * only if highmem or !dma32.
- */
-
-#ifdef CONFIG_HIGHMEM
- if (PageHighMem(page) && glob->zone_highmem != NULL)
- zone = glob->zone_highmem;
-#else
- if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
- zone = glob->zone_kernel;
-#endif
- return ttm_mem_global_alloc_zone(glob, zone, size, ctx);
-}
-
-void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
- uint64_t size)
-{
- struct ttm_mem_zone *zone = NULL;
-
-#ifdef CONFIG_HIGHMEM
- if (PageHighMem(page) && glob->zone_highmem != NULL)
- zone = glob->zone_highmem;
-#else
- if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
- zone = glob->zone_kernel;
-#endif
- ttm_mem_global_free_zone(glob, zone, size);
-}
-
-size_t ttm_round_pot(size_t size)
-{
- if ((size & (size - 1)) == 0)
- return size;
- else if (size > PAGE_SIZE)
- return PAGE_ALIGN(size);
- else {
- size_t tmp_size = 4;
-
- while (tmp_size < size)
- tmp_size <<= 1;
-
- return tmp_size;
- }
- return 0;
-}
-EXPORT_SYMBOL(ttm_round_pot);
diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.h b/drivers/gpu/drm/vmwgfx/ttm_memory.h
deleted file mode 100644
index c50dba774485..000000000000
--- a/drivers/gpu/drm/vmwgfx/ttm_memory.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifndef TTM_MEMORY_H
-#define TTM_MEMORY_H
-
-#include <linux/workqueue.h>
-#include <linux/spinlock.h>
-#include <linux/bug.h>
-#include <linux/wait.h>
-#include <linux/errno.h>
-#include <linux/kobject.h>
-#include <linux/mm.h>
-
-#include <drm/ttm/ttm_bo_api.h>
-
-/**
- * struct ttm_mem_global - Global memory accounting structure.
- *
- * @shrink: A single callback to shrink TTM memory usage. Extend this
- * to a linked list to be able to handle multiple callbacks when needed.
- * @swap_queue: A workqueue to handle shrinking in low memory situations. We
- * need a separate workqueue since it will spend a lot of time waiting
- * for the GPU, and this will otherwise block other workqueue tasks(?)
- * At this point we use only a single-threaded workqueue.
- * @work: The workqueue callback for the shrink queue.
- * @lock: Lock to protect the @shrink - and the memory accounting members,
- * that is, essentially the whole structure with some exceptions.
- * @lower_mem_limit: include lower limit of swap space and lower limit of
- * system memory.
- * @zones: Array of pointers to accounting zones.
- * @num_zones: Number of populated entries in the @zones array.
- * @zone_kernel: Pointer to the kernel zone.
- * @zone_highmem: Pointer to the highmem zone if there is one.
- * @zone_dma32: Pointer to the dma32 zone if there is one.
- *
- * Note that this structure is not per device. It should be global for all
- * graphics devices.
- */
-
-#define TTM_MEM_MAX_ZONES 2
-struct ttm_mem_zone;
-extern struct ttm_mem_global {
- struct kobject kobj;
- struct workqueue_struct *swap_queue;
- struct work_struct work;
- spinlock_t lock;
- uint64_t lower_mem_limit;
- struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
- unsigned int num_zones;
- struct ttm_mem_zone *zone_kernel;
-#ifdef CONFIG_HIGHMEM
- struct ttm_mem_zone *zone_highmem;
-#else
- struct ttm_mem_zone *zone_dma32;
-#endif
-} ttm_mem_glob;
-
-int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev);
-void ttm_mem_global_release(struct ttm_mem_global *glob);
-int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
- struct ttm_operation_ctx *ctx);
-void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount);
-int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
- struct page *page, uint64_t size,
- struct ttm_operation_ctx *ctx);
-void ttm_mem_global_free_page(struct ttm_mem_global *glob,
- struct page *page, uint64_t size);
-size_t ttm_round_pot(size_t size);
-bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob, uint64_t num_pages,
- struct ttm_operation_ctx *ctx);
-#endif
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index 899945f54dc7..26a55fef1ab5 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -50,6 +50,7 @@
#include <linux/atomic.h>
#include <linux/module.h>
#include "ttm_object.h"
+#include "vmwgfx_drv.h"
MODULE_IMPORT_NS(DMA_BUF);
@@ -73,7 +74,7 @@ struct ttm_object_file {
struct ttm_object_device *tdev;
spinlock_t lock;
struct list_head ref_list;
- struct drm_open_hash ref_hash[TTM_REF_NUM];
+ struct vmwgfx_open_hash ref_hash;
struct kref refcount;
};
@@ -91,12 +92,10 @@ struct ttm_object_file {
struct ttm_object_device {
spinlock_t object_lock;
- struct drm_open_hash object_hash;
+ struct vmwgfx_open_hash object_hash;
atomic_t object_count;
- struct ttm_mem_global *mem_glob;
struct dma_buf_ops ops;
void (*dmabuf_release)(struct dma_buf *dma_buf);
- size_t dma_buf_size;
struct idr idr;
};
@@ -123,10 +122,9 @@ struct ttm_object_device {
struct ttm_ref_object {
struct rcu_head rcu_head;
- struct drm_hash_item hash;
+ struct vmwgfx_hash_item hash;
struct list_head head;
struct kref kref;
- enum ttm_ref_type ref_type;
struct ttm_base_object *obj;
struct ttm_object_file *tfile;
};
@@ -162,9 +160,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
struct ttm_base_object *base,
bool shareable,
enum ttm_object_type object_type,
- void (*refcount_release) (struct ttm_base_object **),
- void (*ref_obj_release) (struct ttm_base_object *,
- enum ttm_ref_type ref_type))
+ void (*refcount_release) (struct ttm_base_object **))
{
struct ttm_object_device *tdev = tfile->tdev;
int ret;
@@ -172,7 +168,6 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
base->shareable = shareable;
base->tfile = ttm_object_file_ref(tfile);
base->refcount_release = refcount_release;
- base->ref_obj_release = ref_obj_release;
base->object_type = object_type;
kref_init(&base->refcount);
idr_preload(GFP_KERNEL);
@@ -184,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
return ret;
base->handle = ret;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
+ ret = ttm_ref_object_add(tfile, base, NULL, false);
if (unlikely(ret != 0))
goto out_err1;
@@ -247,12 +242,12 @@ void ttm_base_object_unref(struct ttm_base_object **p_base)
struct ttm_base_object *
ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key)
{
- struct drm_hash_item *hash;
- struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
+ struct vmwgfx_hash_item *hash;
+ struct vmwgfx_open_hash *ht = &tfile->ref_hash;
int ret;
rcu_read_lock();
- ret = drm_ht_find_item_rcu(ht, key, &hash);
+ ret = vmwgfx_ht_find_item_rcu(ht, key, &hash);
if (ret) {
rcu_read_unlock();
return NULL;
@@ -267,12 +262,12 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
uint32_t key)
{
struct ttm_base_object *base = NULL;
- struct drm_hash_item *hash;
- struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
+ struct vmwgfx_hash_item *hash;
+ struct vmwgfx_open_hash *ht = &tfile->ref_hash;
int ret;
rcu_read_lock();
- ret = drm_ht_find_item_rcu(ht, key, &hash);
+ ret = vmwgfx_ht_find_item_rcu(ht, key, &hash);
if (likely(ret == 0)) {
base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
@@ -299,64 +294,14 @@ ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
return base;
}
-/**
- * ttm_ref_object_exists - Check whether a caller has a valid ref object
- * (has opened) a base object.
- *
- * @tfile: Pointer to a struct ttm_object_file identifying the caller.
- * @base: Pointer to a struct base object.
- *
- * Checks wether the caller identified by @tfile has put a valid USAGE
- * reference object on the base object identified by @base.
- */
-bool ttm_ref_object_exists(struct ttm_object_file *tfile,
- struct ttm_base_object *base)
-{
- struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
- struct drm_hash_item *hash;
- struct ttm_ref_object *ref;
-
- rcu_read_lock();
- if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0))
- goto out_false;
-
- /*
- * Verify that the ref object is really pointing to our base object.
- * Our base object could actually be dead, and the ref object pointing
- * to another base object with the same handle.
- */
- ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
- if (unlikely(base != ref->obj))
- goto out_false;
-
- /*
- * Verify that the ref->obj pointer was actually valid!
- */
- rmb();
- if (unlikely(kref_read(&ref->kref) == 0))
- goto out_false;
-
- rcu_read_unlock();
- return true;
-
- out_false:
- rcu_read_unlock();
- return false;
-}
-
int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
- enum ttm_ref_type ref_type, bool *existed,
+ bool *existed,
bool require_existed)
{
- struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+ struct vmwgfx_open_hash *ht = &tfile->ref_hash;
struct ttm_ref_object *ref;
- struct drm_hash_item *hash;
- struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
+ struct vmwgfx_hash_item *hash;
int ret = -EINVAL;
if (base->tfile != tfile && !base->shareable)
@@ -367,7 +312,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
while (ret == -EINVAL) {
rcu_read_lock();
- ret = drm_ht_find_item_rcu(ht, base->handle, &hash);
+ ret = vmwgfx_ht_find_item_rcu(ht, base->handle, &hash);
if (ret == 0) {
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
@@ -381,24 +326,18 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
if (require_existed)
return -EPERM;
- ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
- &ctx);
- if (unlikely(ret != 0))
- return ret;
ref = kmalloc(sizeof(*ref), GFP_KERNEL);
if (unlikely(ref == NULL)) {
- ttm_mem_global_free(mem_glob, sizeof(*ref));
return -ENOMEM;
}
ref->hash.key = base->handle;
ref->obj = base;
ref->tfile = tfile;
- ref->ref_type = ref_type;
kref_init(&ref->kref);
spin_lock(&tfile->lock);
- ret = drm_ht_insert_item_rcu(ht, &ref->hash);
+ ret = vmwgfx_ht_insert_item_rcu(ht, &ref->hash);
if (likely(ret == 0)) {
list_add_tail(&ref->head, &tfile->ref_list);
@@ -412,7 +351,6 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
spin_unlock(&tfile->lock);
BUG_ON(ret != -EINVAL);
- ttm_mem_global_free(mem_glob, sizeof(*ref));
kfree(ref);
}
@@ -424,35 +362,29 @@ ttm_ref_object_release(struct kref *kref)
{
struct ttm_ref_object *ref =
container_of(kref, struct ttm_ref_object, kref);
- struct ttm_base_object *base = ref->obj;
struct ttm_object_file *tfile = ref->tfile;
- struct drm_open_hash *ht;
- struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+ struct vmwgfx_open_hash *ht;
- ht = &tfile->ref_hash[ref->ref_type];
- (void)drm_ht_remove_item_rcu(ht, &ref->hash);
+ ht = &tfile->ref_hash;
+ (void)vmwgfx_ht_remove_item_rcu(ht, &ref->hash);
list_del(&ref->head);
spin_unlock(&tfile->lock);
- if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
- base->ref_obj_release(base, ref->ref_type);
-
ttm_base_object_unref(&ref->obj);
- ttm_mem_global_free(mem_glob, sizeof(*ref));
kfree_rcu(ref, rcu_head);
spin_lock(&tfile->lock);
}
int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
- unsigned long key, enum ttm_ref_type ref_type)
+ unsigned long key)
{
- struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+ struct vmwgfx_open_hash *ht = &tfile->ref_hash;
struct ttm_ref_object *ref;
- struct drm_hash_item *hash;
+ struct vmwgfx_hash_item *hash;
int ret;
spin_lock(&tfile->lock);
- ret = drm_ht_find_item(ht, key, &hash);
+ ret = vmwgfx_ht_find_item(ht, key, &hash);
if (unlikely(ret != 0)) {
spin_unlock(&tfile->lock);
return -EINVAL;
@@ -467,7 +399,6 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
{
struct ttm_ref_object *ref;
struct list_head *list;
- unsigned int i;
struct ttm_object_file *tfile = *p_tfile;
*p_tfile = NULL;
@@ -485,8 +416,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
}
spin_unlock(&tfile->lock);
- for (i = 0; i < TTM_REF_NUM; ++i)
- drm_ht_remove(&tfile->ref_hash[i]);
+ vmwgfx_ht_remove(&tfile->ref_hash);
ttm_object_file_unref(&tfile);
}
@@ -495,8 +425,6 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
unsigned int hash_order)
{
struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
- unsigned int i;
- unsigned int j = 0;
int ret;
if (unlikely(tfile == NULL))
@@ -507,18 +435,13 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
kref_init(&tfile->refcount);
INIT_LIST_HEAD(&tfile->ref_list);
- for (i = 0; i < TTM_REF_NUM; ++i) {
- ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
- if (ret) {
- j = i;
- goto out_err;
- }
- }
+ ret = vmwgfx_ht_create(&tfile->ref_hash, hash_order);
+ if (ret)
+ goto out_err;
return tfile;
out_err:
- for (i = 0; i < j; ++i)
- drm_ht_remove(&tfile->ref_hash[i]);
+ vmwgfx_ht_remove(&tfile->ref_hash);
kfree(tfile);
@@ -526,8 +449,7 @@ out_err:
}
struct ttm_object_device *
-ttm_object_device_init(struct ttm_mem_global *mem_glob,
- unsigned int hash_order,
+ttm_object_device_init(unsigned int hash_order,
const struct dma_buf_ops *ops)
{
struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
@@ -536,19 +458,24 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob,
if (unlikely(tdev == NULL))
return NULL;
- tdev->mem_glob = mem_glob;
spin_lock_init(&tdev->object_lock);
atomic_set(&tdev->object_count, 0);
- ret = drm_ht_create(&tdev->object_hash, hash_order);
+ ret = vmwgfx_ht_create(&tdev->object_hash, hash_order);
if (ret != 0)
goto out_no_object_hash;
- idr_init_base(&tdev->idr, 1);
+ /*
+ * Our base is at VMWGFX_NUM_MOB + 1 because we want to create
+ * a seperate namespace for GEM handles (which are
+ * 1..VMWGFX_NUM_MOB) and the surface handles. Some ioctl's
+ * can take either handle as an argument so we want to
+ * easily be able to tell whether the handle refers to a
+ * GEM buffer or a surface.
+ */
+ idr_init_base(&tdev->idr, VMWGFX_NUM_MOB + 1);
tdev->ops = *ops;
tdev->dmabuf_release = tdev->ops.release;
tdev->ops.release = ttm_prime_dmabuf_release;
- tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
- ttm_round_pot(sizeof(struct file));
return tdev;
out_no_object_hash:
@@ -564,7 +491,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
idr_destroy(&tdev->idr);
- drm_ht_remove(&tdev->object_hash);
+ vmwgfx_ht_remove(&tdev->object_hash);
kfree(tdev);
}
@@ -633,7 +560,6 @@ static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
if (prime->dma_buf == dma_buf)
prime->dma_buf = NULL;
mutex_unlock(&prime->mutex);
- ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
ttm_base_object_unref(&base);
}
@@ -667,7 +593,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
prime = (struct ttm_prime_object *) dma_buf->priv;
base = &prime->base;
*handle = base->handle;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
+ ret = ttm_ref_object_add(tfile, base, NULL, false);
dma_buf_put(dma_buf);
@@ -715,30 +641,18 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
dma_buf = prime->dma_buf;
if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
exp_info.ops = &tdev->ops;
exp_info.size = prime->size;
exp_info.flags = flags;
exp_info.priv = prime;
/*
- * Need to create a new dma_buf, with memory accounting.
+ * Need to create a new dma_buf
*/
- ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
- &ctx);
- if (unlikely(ret != 0)) {
- mutex_unlock(&prime->mutex);
- goto out_unref;
- }
dma_buf = dma_buf_export(&exp_info);
if (IS_ERR(dma_buf)) {
ret = PTR_ERR(dma_buf);
- ttm_mem_global_free(tdev->mem_glob,
- tdev->dma_buf_size);
mutex_unlock(&prime->mutex);
goto out_unref;
}
@@ -773,7 +687,6 @@ out_unref:
* @shareable: See ttm_base_object_init
* @type: See ttm_base_object_init
* @refcount_release: See ttm_base_object_init
- * @ref_obj_release: See ttm_base_object_init
*
* Initializes an object which is compatible with the drm_prime model
* for data sharing between processes and devices.
@@ -781,9 +694,7 @@ out_unref:
int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
struct ttm_prime_object *prime, bool shareable,
enum ttm_object_type type,
- void (*refcount_release) (struct ttm_base_object **),
- void (*ref_obj_release) (struct ttm_base_object *,
- enum ttm_ref_type ref_type))
+ void (*refcount_release) (struct ttm_base_object **))
{
mutex_init(&prime->mutex);
prime->size = PAGE_ALIGN(size);
@@ -792,6 +703,5 @@ int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
prime->refcount_release = refcount_release;
return ttm_base_object_init(tfile, &prime->base, shareable,
ttm_prime_type,
- ttm_prime_refcount_release,
- ref_obj_release);
+ ttm_prime_refcount_release);
}
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.h b/drivers/gpu/drm/vmwgfx/ttm_object.h
index 49b064f0cb19..4c8700027c6d 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.h
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.h
@@ -42,31 +42,7 @@
#include <linux/list.h>
#include <linux/rcupdate.h>
-#include <drm/drm_hashtab.h>
-
-#include "ttm_memory.h"
-
-/**
- * enum ttm_ref_type
- *
- * Describes what type of reference a ref object holds.
- *
- * TTM_REF_USAGE is a simple refcount on a base object.
- *
- * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
- * buffer object.
- *
- * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
- * buffer object.
- *
- */
-
-enum ttm_ref_type {
- TTM_REF_USAGE,
- TTM_REF_SYNCCPU_READ,
- TTM_REF_SYNCCPU_WRITE,
- TTM_REF_NUM
-};
+#include "vmwgfx_hashtab.h"
/**
* enum ttm_object_type
@@ -78,7 +54,6 @@ enum ttm_ref_type {
enum ttm_object_type {
ttm_fence_type,
- ttm_buffer_type,
ttm_lock_type,
ttm_prime_type,
ttm_driver_type0 = 256,
@@ -129,8 +104,6 @@ struct ttm_base_object {
struct ttm_object_file *tfile;
struct kref refcount;
void (*refcount_release) (struct ttm_base_object **base);
- void (*ref_obj_release) (struct ttm_base_object *base,
- enum ttm_ref_type ref_type);
u32 handle;
enum ttm_object_type object_type;
u32 shareable;
@@ -179,11 +152,7 @@ extern int ttm_base_object_init(struct ttm_object_file *tfile,
bool shareable,
enum ttm_object_type type,
void (*refcount_release) (struct ttm_base_object
- **),
- void (*ref_obj_release) (struct ttm_base_object
- *,
- enum ttm_ref_type
- ref_type));
+ **));
/**
* ttm_base_object_lookup
@@ -247,12 +216,9 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
*/
extern int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
- enum ttm_ref_type ref_type, bool *existed,
+ bool *existed,
bool require_existed);
-extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
- struct ttm_base_object *base);
-
/**
* ttm_ref_object_base_unref
*
@@ -265,8 +231,7 @@ extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
* will be unreferenced.
*/
extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
- unsigned long key,
- enum ttm_ref_type ref_type);
+ unsigned long key);
/**
* ttm_object_file_init - initialize a struct ttm_object file
@@ -297,7 +262,6 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
/**
* ttm_object device init - initialize a struct ttm_object_device
*
- * @mem_glob: struct ttm_mem_global for memory accounting.
* @hash_order: Order of hash table used to hash the base objects.
* @ops: DMA buf ops for prime objects of this device.
*
@@ -306,8 +270,7 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
*/
extern struct ttm_object_device *
-ttm_object_device_init(struct ttm_mem_global *mem_glob,
- unsigned int hash_order,
+ttm_object_device_init(unsigned int hash_order,
const struct dma_buf_ops *ops);
/**
@@ -332,10 +295,7 @@ extern int ttm_prime_object_init(struct ttm_object_file *tfile,
bool shareable,
enum ttm_object_type type,
void (*refcount_release)
- (struct ttm_base_object **),
- void (*ref_obj_release)
- (struct ttm_base_object *,
- enum ttm_ref_type ref_type));
+ (struct ttm_base_object **));
static inline enum ttm_object_type
ttm_base_object_type(struct ttm_base_object *base)
@@ -353,13 +313,6 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
#define ttm_prime_object_kfree(__obj, __prime) \
kfree_rcu(__obj, __prime.base.rhead)
-/*
- * Extra memory required by the base object's idr storage, which is allocated
- * separately from the base object itself. We estimate an on-average 128 bytes
- * per idr.
- */
-#define TTM_OBJ_EXTRA_SIZE 128
-
struct ttm_base_object *
ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index 6f27d69bad0e..ae2de914eb89 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -354,6 +354,27 @@ void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
}
/**
+ * vmw_binding_cb_offset_update: Update the offset of a cb binding
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ * @shader_slot: The shader slot of the binding.
+ * @slot: The slot of the binding.
+ * @offsetInBytes: The new offset of the binding.
+ *
+ * Updates the offset of an existing cb binding in the context binding
+ * state structure @cbs.
+ */
+void vmw_binding_cb_offset_update(struct vmw_ctx_binding_state *cbs,
+ u32 shader_slot, u32 slot, u32 offsetInBytes)
+{
+ struct vmw_ctx_bindinfo *loc =
+ vmw_binding_loc(cbs, vmw_ctx_binding_cb, shader_slot, slot);
+ struct vmw_ctx_bindinfo_cb *loc_cb =
+ (struct vmw_ctx_bindinfo_cb *)((u8 *) loc);
+ loc_cb->offset = offsetInBytes;
+}
+
+/**
* vmw_binding_add_uav_index - Add UAV index for tracking.
* @cbs: Pointer to the context binding state tracker.
* @slot: UAV type to which bind this index.
@@ -1070,7 +1091,7 @@ static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
size_t cmd_size, view_id_size;
const struct vmw_resource *ctx = vmw_cbs_context(cbs);
- vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
+ vmw_collect_view_ids(cbs, loc, vmw_max_num_uavs(cbs->dev_priv));
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
@@ -1100,7 +1121,7 @@ static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
size_t cmd_size, view_id_size;
const struct vmw_resource *ctx = vmw_cbs_context(cbs);
- vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
+ vmw_collect_view_ids(cbs, loc, vmw_max_num_uavs(cbs->dev_priv));
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
@@ -1327,8 +1348,7 @@ static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
}
/**
- * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
- * memory accounting.
+ * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state.
*
* @dev_priv: Pointer to a device private structure.
*
@@ -1338,20 +1358,9 @@ struct vmw_ctx_binding_state *
vmw_binding_state_alloc(struct vmw_private *dev_priv)
{
struct vmw_ctx_binding_state *cbs;
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
- int ret;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
- &ctx);
- if (ret)
- return ERR_PTR(ret);
cbs = vzalloc(sizeof(*cbs));
if (!cbs) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
return ERR_PTR(-ENOMEM);
}
@@ -1362,17 +1371,13 @@ vmw_binding_state_alloc(struct vmw_private *dev_priv)
}
/**
- * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its
- * memory accounting info.
+ * vmw_binding_state_free - Free a struct vmw_ctx_binding_state.
*
* @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
*/
void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
{
- struct vmw_private *dev_priv = cbs->dev_priv;
-
vfree(cbs);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
index dcb71fd0bb3b..85b90f7d398d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
@@ -200,7 +200,7 @@ struct vmw_dx_shader_bindings {
* @splice_index: The device splice index set by user-space.
*/
struct vmw_ctx_bindinfo_uav {
- struct vmw_ctx_bindinfo_view views[SVGA3D_MAX_UAVIEWS];
+ struct vmw_ctx_bindinfo_view views[SVGA3D_DX11_1_MAX_UAVIEWS];
uint32 index;
};
@@ -217,6 +217,8 @@ struct vmw_ctx_bindinfo_so {
extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
const struct vmw_ctx_bindinfo *ci,
u32 shader_slot, u32 slot);
+extern void vmw_binding_cb_offset_update(struct vmw_ctx_binding_state *cbs,
+ u32 shader_slot, u32 slot, u32 offsetInBytes);
extern void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs,
uint32 slot, uint32 splice_index);
extern void
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index fd007f1c1776..31aecc46624b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -33,18 +33,6 @@
/**
- * struct vmw_user_buffer_object - User-space-visible buffer object
- *
- * @prime: The prime object providing user visibility.
- * @vbo: The struct vmw_buffer_object
- */
-struct vmw_user_buffer_object {
- struct ttm_prime_object prime;
- struct vmw_buffer_object vbo;
-};
-
-
-/**
* vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
* vmw_buffer_object.
*
@@ -60,23 +48,6 @@ vmw_buffer_object(struct ttm_buffer_object *bo)
/**
- * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
- * vmw_user_buffer_object.
- *
- * @bo: Pointer to the TTM buffer object.
- * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
- * object.
- */
-static struct vmw_user_buffer_object *
-vmw_user_buffer_object(struct ttm_buffer_object *bo)
-{
- struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
-
- return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
-}
-
-
-/**
* vmw_bo_pin_in_placement - Validate a buffer to placement.
*
* @dev_priv: Driver private.
@@ -392,39 +363,6 @@ void vmw_bo_unmap(struct vmw_buffer_object *vbo)
/**
- * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
- *
- * @dev_priv: Pointer to a struct vmw_private identifying the device.
- * @size: The requested buffer size.
- * @user: Whether this is an ordinary dma buffer or a user dma buffer.
- */
-static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
- bool user)
-{
- static size_t struct_size, user_struct_size;
- size_t num_pages = PFN_UP(size);
- size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
-
- if (unlikely(struct_size == 0)) {
- size_t backend_size = ttm_round_pot(vmw_tt_size);
-
- struct_size = backend_size +
- ttm_round_pot(sizeof(struct vmw_buffer_object));
- user_struct_size = backend_size +
- ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
- TTM_OBJ_EXTRA_SIZE;
- }
-
- if (dev_priv->map_mode == vmw_dma_alloc_coherent)
- page_array_size +=
- ttm_round_pot(num_pages * sizeof(dma_addr_t));
-
- return ((user) ? user_struct_size : struct_size) +
- page_array_size;
-}
-
-
-/**
* vmw_bo_bo_free - vmw buffer object destructor
*
* @bo: Pointer to the embedded struct ttm_buffer_object
@@ -436,27 +374,10 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo)
WARN_ON(vmw_bo->dirty);
WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
vmw_bo_unmap(vmw_bo);
- dma_resv_fini(&bo->base._resv);
+ drm_gem_object_release(&bo->base);
kfree(vmw_bo);
}
-
-/**
- * vmw_user_bo_destroy - vmw buffer object destructor
- *
- * @bo: Pointer to the embedded struct ttm_buffer_object
- */
-static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
-{
- struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
- struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
-
- WARN_ON(vbo->dirty);
- WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
- vmw_bo_unmap(vbo);
- ttm_prime_object_kfree(vmw_user_bo, prime);
-}
-
/**
* vmw_bo_create_kernel - Create a pinned BO for internal kernel use.
*
@@ -471,33 +392,27 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
struct ttm_placement *placement,
struct ttm_buffer_object **p_bo)
{
- struct ttm_operation_ctx ctx = { false, false };
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
struct ttm_buffer_object *bo;
- size_t acc_size;
+ struct drm_device *vdev = &dev_priv->drm;
int ret;
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (unlikely(!bo))
return -ENOMEM;
- acc_size = ttm_round_pot(sizeof(*bo));
- acc_size += ttm_round_pot(PFN_UP(size) * sizeof(void *));
- acc_size += ttm_round_pot(sizeof(struct ttm_tt));
-
- ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
- if (unlikely(ret))
- goto error_free;
-
+ size = ALIGN(size, PAGE_SIZE);
- bo->base.size = size;
- dma_resv_init(&bo->base._resv);
- drm_vma_node_reset(&bo->base.vma_node);
+ drm_gem_private_object_init(vdev, &bo->base, size);
ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
- ttm_bo_type_device, placement, 0,
+ ttm_bo_type_kernel, placement, 0,
&ctx, NULL, NULL, NULL);
if (unlikely(ret))
- goto error_account;
+ goto error_free;
ttm_bo_pin(bo);
ttm_bo_unreserve(bo);
@@ -505,14 +420,38 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
return 0;
-error_account:
- ttm_mem_global_free(&ttm_mem_glob, acc_size);
-
error_free:
kfree(bo);
return ret;
}
+int vmw_bo_create(struct vmw_private *vmw,
+ size_t size, struct ttm_placement *placement,
+ bool interruptible, bool pin,
+ void (*bo_free)(struct ttm_buffer_object *bo),
+ struct vmw_buffer_object **p_bo)
+{
+ int ret;
+
+ *p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL);
+ if (unlikely(!*p_bo)) {
+ DRM_ERROR("Failed to allocate a buffer.\n");
+ return -ENOMEM;
+ }
+
+ ret = vmw_bo_init(vmw, *p_bo, size,
+ placement, interruptible, pin,
+ bo_free);
+ if (unlikely(ret != 0))
+ goto out_error;
+
+ return ret;
+out_error:
+ kfree(*p_bo);
+ *p_bo = NULL;
+ return ret;
+}
+
/**
* vmw_bo_init - Initialize a vmw buffer object
*
@@ -533,192 +472,44 @@ int vmw_bo_init(struct vmw_private *dev_priv,
bool interruptible, bool pin,
void (*bo_free)(struct ttm_buffer_object *bo))
{
- struct ttm_operation_ctx ctx = { interruptible, false };
+ struct ttm_operation_ctx ctx = {
+ .interruptible = interruptible,
+ .no_wait_gpu = false
+ };
struct ttm_device *bdev = &dev_priv->bdev;
- size_t acc_size;
+ struct drm_device *vdev = &dev_priv->drm;
int ret;
- bool user = (bo_free == &vmw_user_bo_destroy);
-
- WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
- acc_size = vmw_bo_acc_size(dev_priv, size, user);
+ WARN_ON_ONCE(!bo_free);
memset(vmw_bo, 0, sizeof(*vmw_bo));
BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
vmw_bo->base.priority = 3;
vmw_bo->res_tree = RB_ROOT;
- ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
- if (unlikely(ret))
- return ret;
-
- vmw_bo->base.base.size = size;
- dma_resv_init(&vmw_bo->base.base._resv);
- drm_vma_node_reset(&vmw_bo->base.base.vma_node);
+ size = ALIGN(size, PAGE_SIZE);
+ drm_gem_private_object_init(vdev, &vmw_bo->base.base, size);
ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
- ttm_bo_type_device, placement,
+ ttm_bo_type_device,
+ placement,
0, &ctx, NULL, NULL, bo_free);
if (unlikely(ret)) {
- ttm_mem_global_free(&ttm_mem_glob, acc_size);
return ret;
}
if (pin)
ttm_bo_pin(&vmw_bo->base);
ttm_bo_unreserve(&vmw_bo->base);
- return 0;
-}
-
-
-/**
- * vmw_user_bo_release - TTM reference base object release callback for
- * vmw user buffer objects
- *
- * @p_base: The TTM base object pointer about to be unreferenced.
- *
- * Clears the TTM base object pointer and drops the reference the
- * base object has on the underlying struct vmw_buffer_object.
- */
-static void vmw_user_bo_release(struct ttm_base_object **p_base)
-{
- struct vmw_user_buffer_object *vmw_user_bo;
- struct ttm_base_object *base = *p_base;
-
- *p_base = NULL;
-
- if (unlikely(base == NULL))
- return;
-
- vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
- prime.base);
- ttm_bo_put(&vmw_user_bo->vbo.base);
-}
-
-
-/**
- * vmw_user_bo_ref_obj_release - TTM synccpu reference object release callback
- * for vmw user buffer objects
- *
- * @base: Pointer to the TTM base object
- * @ref_type: Reference type of the reference reaching zero.
- *
- * Called when user-space drops its last synccpu reference on the buffer
- * object, Either explicitly or as part of a cleanup file close.
- */
-static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
- enum ttm_ref_type ref_type)
-{
- struct vmw_user_buffer_object *user_bo;
-
- user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
-
- switch (ref_type) {
- case TTM_REF_SYNCCPU_WRITE:
- atomic_dec(&user_bo->vbo.cpu_writers);
- break;
- default:
- WARN_ONCE(true, "Undefined buffer object reference release.\n");
- }
-}
-
-
-/**
- * vmw_user_bo_alloc - Allocate a user buffer object
- *
- * @dev_priv: Pointer to a struct device private.
- * @tfile: Pointer to a struct ttm_object_file on which to register the user
- * object.
- * @size: Size of the buffer object.
- * @shareable: Boolean whether the buffer is shareable with other open files.
- * @handle: Pointer to where the handle value should be assigned.
- * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
- * should be assigned.
- * @p_base: The TTM base object pointer about to be allocated.
- * Return: Zero on success, negative error code on error.
- */
-int vmw_user_bo_alloc(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t size,
- bool shareable,
- uint32_t *handle,
- struct vmw_buffer_object **p_vbo,
- struct ttm_base_object **p_base)
-{
- struct vmw_user_buffer_object *user_bo;
- int ret;
-
- user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
- if (unlikely(!user_bo)) {
- DRM_ERROR("Failed to allocate a buffer.\n");
- return -ENOMEM;
- }
-
- ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
- (dev_priv->has_mob) ?
- &vmw_sys_placement :
- &vmw_vram_sys_placement, true, false,
- &vmw_user_bo_destroy);
- if (unlikely(ret != 0))
- return ret;
-
- ttm_bo_get(&user_bo->vbo.base);
- ret = ttm_prime_object_init(tfile,
- size,
- &user_bo->prime,
- shareable,
- ttm_buffer_type,
- &vmw_user_bo_release,
- &vmw_user_bo_ref_obj_release);
- if (unlikely(ret != 0)) {
- ttm_bo_put(&user_bo->vbo.base);
- goto out_no_base_object;
- }
-
- *p_vbo = &user_bo->vbo;
- if (p_base) {
- *p_base = &user_bo->prime.base;
- kref_get(&(*p_base)->refcount);
- }
- *handle = user_bo->prime.base.handle;
-
-out_no_base_object:
- return ret;
-}
-
-/**
- * vmw_user_bo_verify_access - verify access permissions on this
- * buffer object.
- *
- * @bo: Pointer to the buffer object being accessed
- * @tfile: Identifying the caller.
- */
-int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
- struct ttm_object_file *tfile)
-{
- struct vmw_user_buffer_object *vmw_user_bo;
-
- if (unlikely(bo->destroy != vmw_user_bo_destroy))
- return -EPERM;
-
- vmw_user_bo = vmw_user_buffer_object(bo);
-
- /* Check that the caller has opened the object. */
- if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
- return 0;
-
- DRM_ERROR("Could not grant buffer access.\n");
- return -EPERM;
+ return 0;
}
-
/**
- * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
+ * vmw_user_bo_synccpu_grab - Grab a struct vmw_buffer_object for cpu
* access, idling previous GPU operations on the buffer and optionally
* blocking it for further command submissions.
*
- * @user_bo: Pointer to the buffer object being grabbed for CPU access
- * @tfile: Identifying the caller.
+ * @vmw_bo: Pointer to the buffer object being grabbed for CPU access
* @flags: Flags indicating how the grab should be performed.
* Return: Zero on success, Negative error code on error. In particular,
* -EBUSY will be returned if a dontblock operation is requested and the
@@ -727,13 +518,11 @@ int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
*
* A blocking grab will be automatically released when @tfile is closed.
*/
-static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
- struct ttm_object_file *tfile,
+static int vmw_user_bo_synccpu_grab(struct vmw_buffer_object *vmw_bo,
uint32_t flags)
{
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
- struct ttm_buffer_object *bo = &user_bo->vbo.base;
- bool existed;
+ struct ttm_buffer_object *bo = &vmw_bo->base;
int ret;
if (flags & drm_vmw_synccpu_allow_cs) {
@@ -755,17 +544,12 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
ret = ttm_bo_wait(bo, true, nonblock);
if (likely(ret == 0))
- atomic_inc(&user_bo->vbo.cpu_writers);
+ atomic_inc(&vmw_bo->cpu_writers);
ttm_bo_unreserve(bo);
if (unlikely(ret != 0))
return ret;
- ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_SYNCCPU_WRITE, &existed, false);
- if (ret != 0 || existed)
- atomic_dec(&user_bo->vbo.cpu_writers);
-
return ret;
}
@@ -773,19 +557,25 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
* vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
* and unblock command submission on the buffer if blocked.
*
+ * @filp: Identifying the caller.
* @handle: Handle identifying the buffer object.
- * @tfile: Identifying the caller.
* @flags: Flags indicating the type of release.
*/
-static int vmw_user_bo_synccpu_release(uint32_t handle,
- struct ttm_object_file *tfile,
- uint32_t flags)
+static int vmw_user_bo_synccpu_release(struct drm_file *filp,
+ uint32_t handle,
+ uint32_t flags)
{
- if (!(flags & drm_vmw_synccpu_allow_cs))
- return ttm_ref_object_base_unref(tfile, handle,
- TTM_REF_SYNCCPU_WRITE);
+ struct vmw_buffer_object *vmw_bo;
+ int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo);
- return 0;
+ if (!ret) {
+ if (!(flags & drm_vmw_synccpu_allow_cs)) {
+ atomic_dec(&vmw_bo->cpu_writers);
+ }
+ ttm_bo_put(&vmw_bo->base);
+ }
+
+ return ret;
}
@@ -807,9 +597,6 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_synccpu_arg *arg =
(struct drm_vmw_synccpu_arg *) data;
struct vmw_buffer_object *vbo;
- struct vmw_user_buffer_object *user_bo;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct ttm_base_object *buffer_base;
int ret;
if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
@@ -822,16 +609,12 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
switch (arg->op) {
case drm_vmw_synccpu_grab:
- ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
- &buffer_base);
+ ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo);
if (unlikely(ret != 0))
return ret;
- user_bo = container_of(vbo, struct vmw_user_buffer_object,
- vbo);
- ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
+ ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
vmw_bo_unreference(&vbo);
- ttm_base_object_unref(&buffer_base);
if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
ret != -EBUSY)) {
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
@@ -840,7 +623,8 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
}
break;
case drm_vmw_synccpu_release:
- ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
+ ret = vmw_user_bo_synccpu_release(file_priv,
+ arg->handle,
arg->flags);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
@@ -856,50 +640,6 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
return 0;
}
-
-/**
- * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
- * allocation functionality.
- *
- * @dev: Identifies the drm device.
- * @data: Pointer to the ioctl argument.
- * @file_priv: Identifies the caller.
- * Return: Zero on success, negative error code on error.
- *
- * This function checks the ioctl arguments for validity and allocates a
- * struct vmw_user_buffer_object bo.
- */
-int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- union drm_vmw_alloc_dmabuf_arg *arg =
- (union drm_vmw_alloc_dmabuf_arg *)data;
- struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
- struct drm_vmw_dmabuf_rep *rep = &arg->rep;
- struct vmw_buffer_object *vbo;
- uint32_t handle;
- int ret;
-
- ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
- req->size, false, &handle, &vbo,
- NULL);
- if (unlikely(ret != 0))
- goto out_no_bo;
-
- rep->handle = handle;
- rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
- rep->cur_gmr_id = handle;
- rep->cur_gmr_offset = 0;
-
- vmw_bo_unreference(&vbo);
-
-out_no_bo:
-
- return ret;
-}
-
-
/**
* vmw_bo_unref_ioctl - Generic handle close ioctl.
*
@@ -917,65 +657,48 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_unref_dmabuf_arg *arg =
(struct drm_vmw_unref_dmabuf_arg *)data;
- return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- arg->handle,
- TTM_REF_USAGE);
+ drm_gem_handle_delete(file_priv, arg->handle);
+ return 0;
}
/**
* vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
*
- * @tfile: The TTM object file the handle is registered with.
+ * @filp: The file the handle is registered with.
* @handle: The user buffer object handle
* @out: Pointer to a where a pointer to the embedded
* struct vmw_buffer_object should be placed.
- * @p_base: Pointer to where a pointer to the TTM base object should be
- * placed, or NULL if no such pointer is required.
* Return: Zero on success, Negative error code on error.
*
- * Both the output base object pointer and the vmw buffer object pointer
- * will be refcounted.
+ * The vmw buffer object pointer will be refcounted.
*/
-int vmw_user_bo_lookup(struct ttm_object_file *tfile,
- uint32_t handle, struct vmw_buffer_object **out,
- struct ttm_base_object **p_base)
+int vmw_user_bo_lookup(struct drm_file *filp,
+ uint32_t handle,
+ struct vmw_buffer_object **out)
{
- struct vmw_user_buffer_object *vmw_user_bo;
- struct ttm_base_object *base;
+ struct drm_gem_object *gobj;
- base = ttm_base_object_lookup(tfile, handle);
- if (unlikely(base == NULL)) {
+ gobj = drm_gem_object_lookup(filp, handle);
+ if (!gobj) {
DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle);
return -ESRCH;
}
- if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
- ttm_base_object_unref(&base);
- DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
- (unsigned long)handle);
- return -EINVAL;
- }
-
- vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
- prime.base);
- ttm_bo_get(&vmw_user_bo->vbo.base);
- if (p_base)
- *p_base = base;
- else
- ttm_base_object_unref(&base);
- *out = &vmw_user_bo->vbo;
+ *out = gem_to_vmw_bo(gobj);
+ ttm_bo_get(&(*out)->base);
+ drm_gem_object_put(gobj);
return 0;
}
/**
* vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
- * @tfile: The TTM object file the handle is registered with.
+ * @filp: The TTM object file the handle is registered with.
* @handle: The user buffer object handle.
*
- * This function looks up a struct vmw_user_bo and returns a pointer to the
+ * This function looks up a struct vmw_bo and returns a pointer to the
* struct vmw_buffer_object it derives from without refcounting the pointer.
* The returned pointer is only valid until vmw_user_bo_noref_release() is
* called, and the object pointed to by the returned pointer may be doomed.
@@ -988,52 +711,23 @@ int vmw_user_bo_lookup(struct ttm_object_file *tfile,
* error pointer on failure.
*/
struct vmw_buffer_object *
-vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
+vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle)
{
- struct vmw_user_buffer_object *vmw_user_bo;
- struct ttm_base_object *base;
+ struct vmw_buffer_object *vmw_bo;
+ struct ttm_buffer_object *bo;
+ struct drm_gem_object *gobj = drm_gem_object_lookup(filp, handle);
- base = ttm_base_object_noref_lookup(tfile, handle);
- if (!base) {
+ if (!gobj) {
DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle);
return ERR_PTR(-ESRCH);
}
+ vmw_bo = gem_to_vmw_bo(gobj);
+ bo = ttm_bo_get_unless_zero(&vmw_bo->base);
+ vmw_bo = vmw_buffer_object(bo);
+ drm_gem_object_put(gobj);
- if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
- ttm_base_object_noref_release();
- DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
- (unsigned long)handle);
- return ERR_PTR(-EINVAL);
- }
-
- vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
- prime.base);
- return &vmw_user_bo->vbo;
-}
-
-/**
- * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
- *
- * @tfile: The TTM object file to register the handle with.
- * @vbo: The embedded vmw buffer object.
- * @handle: Pointer to where the new handle should be placed.
- * Return: Zero on success, Negative error code on error.
- */
-int vmw_user_bo_reference(struct ttm_object_file *tfile,
- struct vmw_buffer_object *vbo,
- uint32_t *handle)
-{
- struct vmw_user_buffer_object *user_bo;
-
- if (vbo->base.destroy != vmw_user_bo_destroy)
- return -EINVAL;
-
- user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
-
- *handle = user_bo->prime.base.handle;
- return ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_USAGE, NULL, false);
+ return vmw_bo;
}
@@ -1087,68 +781,15 @@ int vmw_dumb_create(struct drm_file *file_priv,
int ret;
args->pitch = args->width * ((args->bpp + 7) / 8);
- args->size = args->pitch * args->height;
+ args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
- ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
- args->size, false, &args->handle,
- &vbo, NULL);
- if (unlikely(ret != 0))
- goto out_no_bo;
+ ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
+ args->size, &args->handle,
+ &vbo);
- vmw_bo_unreference(&vbo);
-out_no_bo:
return ret;
}
-
-/**
- * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @handle: Handle identifying the dumb buffer.
- * @offset: The address space offset returned.
- * Return: Zero on success, negative error code on failure.
- *
- * This is a driver callback for the core drm dumb_map_offset functionality.
- */
-int vmw_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset)
-{
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_buffer_object *out_buf;
- int ret;
-
- ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
- if (ret != 0)
- return -EINVAL;
-
- *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
- vmw_bo_unreference(&out_buf);
- return 0;
-}
-
-
-/**
- * vmw_dumb_destroy - Destroy a dumb boffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @handle: Handle identifying the dumb buffer.
- * Return: Zero on success, negative error code on failure.
- *
- * This is a driver callback for the core drm dumb_destroy functionality.
- */
-int vmw_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle)
-{
- return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- handle, TTM_REF_USAGE);
-}
-
-
/**
* vmw_bo_swap_notify - swapout notify callback.
*
@@ -1157,8 +798,7 @@ int vmw_dumb_destroy(struct drm_file *file_priv,
void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
{
/* Is @bo embedded in a struct vmw_buffer_object? */
- if (bo->destroy != vmw_bo_bo_free &&
- bo->destroy != vmw_user_bo_destroy)
+ if (vmw_bo_is_vmw_bo(bo))
return;
/* Kill any cached kernel maps before swapout */
@@ -1182,8 +822,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct vmw_buffer_object *vbo;
/* Make sure @bo is embedded in a struct vmw_buffer_object? */
- if (bo->destroy != vmw_bo_bo_free &&
- bo->destroy != vmw_user_bo_destroy)
+ if (vmw_bo_is_vmw_bo(bo))
return;
vbo = container_of(bo, struct vmw_buffer_object, base);
@@ -1204,3 +843,22 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
vmw_resource_unbind_list(vbo);
}
+
+/**
+ * vmw_bo_is_vmw_bo - check if the buffer object is a &vmw_buffer_object
+ * @bo: buffer object to be checked
+ *
+ * Uses destroy function associated with the object to determine if this is
+ * a &vmw_buffer_object.
+ *
+ * Returns:
+ * true if the object is of &vmw_buffer_object type, false if not.
+ */
+bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &vmw_bo_bo_free ||
+ bo->destroy == &vmw_gem_destroy)
+ return true;
+
+ return false;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index 67db472d3493..a3bfbb6c3e14 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -145,6 +145,13 @@ struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv)
(unsigned int) max,
(unsigned int) min,
(unsigned int) fifo->capabilities);
+
+ if (unlikely(min >= max)) {
+ drm_warn(&dev_priv->drm,
+ "FIFO memory is not usable. Driver failed to initialize.");
+ return ERR_PTR(-ENXIO);
+ }
+
return fifo;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 8381750db81b..415774fde796 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -42,7 +42,7 @@
*/
struct vmw_cmdbuf_res {
struct vmw_resource *res;
- struct drm_hash_item hash;
+ struct vmwgfx_hash_item hash;
struct list_head head;
enum vmw_cmdbuf_res_state state;
struct vmw_cmdbuf_res_manager *man;
@@ -59,7 +59,7 @@ struct vmw_cmdbuf_res {
* @resources and @list are protected by the cmdbuf mutex for now.
*/
struct vmw_cmdbuf_res_manager {
- struct drm_open_hash resources;
+ struct vmwgfx_open_hash resources;
struct list_head list;
struct vmw_private *dev_priv;
};
@@ -81,11 +81,11 @@ vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
u32 user_key)
{
- struct drm_hash_item *hash;
+ struct vmwgfx_hash_item *hash;
int ret;
unsigned long key = user_key | (res_type << 24);
- ret = drm_ht_find_item(&man->resources, key, &hash);
+ ret = vmwgfx_ht_find_item(&man->resources, key, &hash);
if (unlikely(ret != 0))
return ERR_PTR(ret);
@@ -105,7 +105,7 @@ static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man,
struct vmw_cmdbuf_res *entry)
{
list_del(&entry->head);
- WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash));
+ WARN_ON(vmwgfx_ht_remove_item(&man->resources, &entry->hash));
vmw_resource_unreference(&entry->res);
kfree(entry);
}
@@ -167,7 +167,7 @@ void vmw_cmdbuf_res_revert(struct list_head *list)
vmw_cmdbuf_res_free(entry->man, entry);
break;
case VMW_CMDBUF_RES_DEL:
- ret = drm_ht_insert_item(&entry->man->resources, &entry->hash);
+ ret = vmwgfx_ht_insert_item(&entry->man->resources, &entry->hash);
BUG_ON(ret);
list_move_tail(&entry->head, &entry->man->list);
entry->state = VMW_CMDBUF_RES_COMMITTED;
@@ -206,7 +206,7 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
return -ENOMEM;
cres->hash.key = user_key | (res_type << 24);
- ret = drm_ht_insert_item(&man->resources, &cres->hash);
+ ret = vmwgfx_ht_insert_item(&man->resources, &cres->hash);
if (unlikely(ret != 0)) {
kfree(cres);
goto out_invalid_key;
@@ -244,10 +244,10 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
struct vmw_resource **res_p)
{
struct vmw_cmdbuf_res *entry;
- struct drm_hash_item *hash;
+ struct vmwgfx_hash_item *hash;
int ret;
- ret = drm_ht_find_item(&man->resources, user_key | (res_type << 24),
+ ret = vmwgfx_ht_find_item(&man->resources, user_key | (res_type << 24),
&hash);
if (likely(ret != 0))
return -EINVAL;
@@ -260,7 +260,7 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
*res_p = NULL;
break;
case VMW_CMDBUF_RES_COMMITTED:
- (void) drm_ht_remove_item(&man->resources, &entry->hash);
+ (void) vmwgfx_ht_remove_item(&man->resources, &entry->hash);
list_del(&entry->head);
entry->state = VMW_CMDBUF_RES_DEL;
list_add_tail(&entry->head, list);
@@ -295,7 +295,7 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
man->dev_priv = dev_priv;
INIT_LIST_HEAD(&man->list);
- ret = drm_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
+ ret = vmwgfx_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
if (ret == 0)
return man;
@@ -320,26 +320,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
list_for_each_entry_safe(entry, next, &man->list, head)
vmw_cmdbuf_res_free(man, entry);
- drm_ht_remove(&man->resources);
+ vmwgfx_ht_remove(&man->resources);
kfree(man);
}
-/**
- * vmw_cmdbuf_res_man_size - Return the size of a command buffer managed
- * resource manager
- *
- * Returns the approximate allocation size of a command buffer managed
- * resource manager.
- */
-size_t vmw_cmdbuf_res_man_size(void)
-{
- static size_t res_man_size;
-
- if (unlikely(res_man_size == 0))
- res_man_size =
- ttm_round_pot(sizeof(struct vmw_cmdbuf_res_manager)) +
- ttm_round_pot(sizeof(struct hlist_head) <<
- VMW_CMDBUF_RES_MAN_HT_ORDER);
-
- return res_man_size;
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 4446758b6880..e0f48cd9529b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -60,8 +60,6 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf);
static int vmw_dx_context_destroy(struct vmw_resource *res);
-static uint64_t vmw_user_context_size;
-
static const struct vmw_user_resource_conv user_context_conv = {
.object_type = VMW_RES_CONTEXT,
.base_obj_to_res = vmw_user_context_base_to_res,
@@ -686,7 +684,6 @@ static void vmw_user_context_free(struct vmw_resource *res)
{
struct vmw_user_context *ctx =
container_of(res, struct vmw_user_context, res);
- struct vmw_private *dev_priv = res->dev_priv;
if (ctx->cbs)
vmw_binding_state_free(ctx->cbs);
@@ -694,8 +691,6 @@ static void vmw_user_context_free(struct vmw_resource *res)
(void) vmw_context_bind_dx_query(res, NULL);
ttm_base_object_kfree(ctx, base);
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_context_size);
}
/*
@@ -720,7 +715,7 @@ int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
+ return ttm_ref_object_base_unref(tfile, arg->cid);
}
static int vmw_context_define(struct drm_device *dev, void *data,
@@ -732,10 +727,6 @@ static int vmw_context_define(struct drm_device *dev, void *data,
struct vmw_resource *tmp;
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct ttm_operation_ctx ttm_opt_ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
if (!has_sm4_context(dev_priv) && dx) {
@@ -743,25 +734,8 @@ static int vmw_context_define(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (unlikely(vmw_user_context_size == 0))
- vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
- ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
- + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- vmw_user_context_size,
- &ttm_opt_ctx);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for context"
- " creation.\n");
- goto out_ret;
- }
-
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (unlikely(!ctx)) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_context_size);
ret = -ENOMEM;
goto out_ret;
}
@@ -780,7 +754,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
tmp = vmw_resource_reference(&ctx->res);
ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
- &vmw_user_context_base_release, NULL);
+ &vmw_user_context_base_release);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 17a98db00017..16f986b6cbea 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -407,12 +407,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* for the new COTable. Initially pin the buffer object to make sure
* we can use tryreserve without failure.
*/
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_placement,
- true, true, vmw_bo_bo_free);
+ ret = vmw_bo_create(dev_priv, new_size, &vmw_mob_placement,
+ true, true, vmw_bo_bo_free, &buf);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
return ret;
@@ -546,8 +542,6 @@ static void vmw_hw_cotable_destroy(struct vmw_resource *res)
(void) vmw_cotable_destroy(res);
}
-static size_t cotable_acc_size;
-
/**
* vmw_cotable_free - Cotable resource destructor
*
@@ -555,10 +549,7 @@ static size_t cotable_acc_size;
*/
static void vmw_cotable_free(struct vmw_resource *res)
{
- struct vmw_private *dev_priv = res->dev_priv;
-
kfree(res);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
}
/**
@@ -574,21 +565,9 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
u32 type)
{
struct vmw_cotable *vcotbl;
- struct ttm_operation_ctx ttm_opt_ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
u32 num_entries;
- if (unlikely(cotable_acc_size == 0))
- cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- cotable_acc_size, &ttm_opt_ctx);
- if (unlikely(ret))
- return ERR_PTR(ret);
-
vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
if (unlikely(!vcotbl)) {
ret = -ENOMEM;
@@ -622,7 +601,6 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
out_no_init:
kfree(vcotbl);
out_no_alloc:
- ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index bfd71c86faa5..fe36efdb7ff5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -25,7 +25,6 @@
*
**************************************************************************/
-#include <linux/console.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -35,6 +34,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_sysfs.h>
+#include <drm/drm_gem_ttm_helper.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_placement.h>
@@ -51,9 +51,6 @@
#define VMW_MIN_INITIAL_WIDTH 800
#define VMW_MIN_INITIAL_HEIGHT 600
-#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
-
-
/*
* Fully encoded drm commands. Might move to vmw_drm.h
*/
@@ -166,7 +163,7 @@
static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_IOCTL_DEF_DRV(VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
+ DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_gem_object_create_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
DRM_RENDER_ALLOW),
@@ -258,8 +255,8 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
};
static const struct pci_device_id vmw_pci_id_list[] = {
- { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2) },
- { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA2) },
+ { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA3) },
{ }
};
MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
@@ -367,6 +364,7 @@ static void vmw_print_sm_type(struct vmw_private *dev_priv)
[VMW_SM_4] = "SM4",
[VMW_SM_4_1] = "SM4_1",
[VMW_SM_5] = "SM_5",
+ [VMW_SM_5_1X] = "SM_5_1X",
[VMW_SM_MAX] = "Invalid"
};
BUILD_BUG_ON(ARRAY_SIZE(names) != (VMW_SM_MAX + 1));
@@ -400,13 +398,9 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
* immediately succeed. This is because we're the only
* user of the bo currently.
*/
- vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
- if (!vbo)
- return -ENOMEM;
-
- ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
- &vmw_sys_placement, false, true,
- &vmw_bo_bo_free);
+ ret = vmw_bo_create(dev_priv, PAGE_SIZE,
+ &vmw_sys_placement, false, true,
+ &vmw_bo_bo_free, &vbo);
if (unlikely(ret != 0))
return ret;
@@ -707,23 +701,15 @@ static int vmw_dma_masks(struct vmw_private *dev_priv)
static int vmw_vram_manager_init(struct vmw_private *dev_priv)
{
int ret;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- ret = vmw_thp_init(dev_priv);
-#else
ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false,
dev_priv->vram_size >> PAGE_SHIFT);
-#endif
ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false);
return ret;
}
static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
{
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- vmw_thp_fini(dev_priv);
-#else
ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM);
-#endif
}
static int vmw_setup_pci_resources(struct vmw_private *dev,
@@ -987,8 +973,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
goto out_err0;
}
- dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
- &vmw_prime_dmabuf_ops);
+ dev_priv->tdev = ttm_object_device_init(12, &vmw_prime_dmabuf_ops);
if (unlikely(dev_priv->tdev == NULL)) {
drm_err(&dev_priv->drm,
@@ -1071,6 +1056,12 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
"3D will be disabled.\n");
dev_priv->has_mob = false;
}
+ if (vmw_sys_man_init(dev_priv) != 0) {
+ drm_info(&dev_priv->drm,
+ "No MOB page table memory available. "
+ "3D will be disabled.\n");
+ dev_priv->has_mob = false;
+ }
}
if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
@@ -1078,8 +1069,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
dev_priv->sm_type = VMW_SM_4;
}
- vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
-
/* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
if (has_sm4_context(dev_priv) &&
(dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
@@ -1087,8 +1076,11 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
dev_priv->sm_type = VMW_SM_4_1;
if (has_sm4_1_context(dev_priv) &&
(dev_priv->capabilities2 & SVGA_CAP2_DX3)) {
- if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM5))
+ if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM5)) {
dev_priv->sm_type = VMW_SM_5;
+ if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_GL43))
+ dev_priv->sm_type = VMW_SM_5_1X;
+ }
}
}
@@ -1121,8 +1113,10 @@ out_no_fifo:
vmw_overlay_close(dev_priv);
vmw_kms_close(dev_priv);
out_no_kms:
- if (dev_priv->has_mob)
+ if (dev_priv->has_mob) {
vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
+ vmw_sys_man_fini(dev_priv);
+ }
if (dev_priv->has_gmr)
vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
vmw_devcaps_destroy(dev_priv);
@@ -1156,7 +1150,7 @@ static void vmw_driver_unload(struct drm_device *dev)
unregister_pm_notifier(&dev_priv->pm_nb);
if (dev_priv->ctx.res_ht_initialized)
- drm_ht_remove(&dev_priv->ctx.res_ht);
+ vmwgfx_ht_remove(&dev_priv->ctx.res_ht);
vfree(dev_priv->ctx.cmd_bounce);
if (dev_priv->enable_fb) {
vmw_fb_off(dev_priv);
@@ -1172,8 +1166,10 @@ static void vmw_driver_unload(struct drm_device *dev)
vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
vmw_release_device_early(dev_priv);
- if (dev_priv->has_mob)
+ if (dev_priv->has_mob) {
vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
+ vmw_sys_man_fini(dev_priv);
+ }
vmw_devcaps_destroy(dev_priv);
vmw_vram_manager_fini(dev_priv);
ttm_device_fini(&dev_priv->bdev);
@@ -1388,7 +1384,6 @@ static void vmw_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- ttm_mem_global_release(&ttm_mem_glob);
drm_dev_unregister(dev);
vmw_driver_unload(dev);
}
@@ -1576,7 +1571,7 @@ static const struct file_operations vmwgfx_driver_fops = {
static const struct drm_driver driver = {
.driver_features =
- DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
+ DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM,
.ioctls = vmw_ioctls,
.num_ioctls = ARRAY_SIZE(vmw_ioctls),
.master_set = vmw_master_set,
@@ -1585,8 +1580,7 @@ static const struct drm_driver driver = {
.postclose = vmw_postclose,
.dumb_create = vmw_dumb_create,
- .dumb_map_offset = vmw_dumb_map_offset,
- .dumb_destroy = vmw_dumb_destroy,
+ .dumb_map_offset = drm_gem_ttm_dumb_map_offset,
.prime_fd_to_handle = vmw_prime_fd_to_handle,
.prime_handle_to_fd = vmw_prime_handle_to_fd,
@@ -1617,41 +1611,43 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
if (ret)
- return ret;
+ goto out_error;
ret = pcim_enable_device(pdev);
if (ret)
- return ret;
+ goto out_error;
vmw = devm_drm_dev_alloc(&pdev->dev, &driver,
struct vmw_private, drm);
- if (IS_ERR(vmw))
- return PTR_ERR(vmw);
+ if (IS_ERR(vmw)) {
+ ret = PTR_ERR(vmw);
+ goto out_error;
+ }
pci_set_drvdata(pdev, &vmw->drm);
- ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev);
- if (ret)
- return ret;
-
ret = vmw_driver_load(vmw, ent->device);
if (ret)
- return ret;
+ goto out_error;
ret = drm_dev_register(&vmw->drm, 0);
- if (ret) {
- vmw_driver_unload(&vmw->drm);
- return ret;
- }
+ if (ret)
+ goto out_unload;
+
+ vmw_debugfs_gem_init(vmw);
return 0;
+out_unload:
+ vmw_driver_unload(&vmw->drm);
+out_error:
+ return ret;
}
static int __init vmwgfx_init(void)
{
int ret;
- if (vgacon_text_force())
+ if (drm_firmware_drivers_only())
return -EINVAL;
ret = pci_register_driver(&vmw_pci_driver);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 858aff99a3fe..ea3ecdda561d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -34,7 +34,6 @@
#include <drm/drm_auth.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/drm_hashtab.h>
#include <drm/drm_rect.h>
#include <drm/ttm/ttm_bo_driver.h>
@@ -43,6 +42,7 @@
#include "ttm_object.h"
#include "vmwgfx_fence.h"
+#include "vmwgfx_hashtab.h"
#include "vmwgfx_reg.h"
#include "vmwgfx_validation.h"
@@ -54,16 +54,13 @@
#define VMWGFX_DRIVER_NAME "vmwgfx"
-#define VMWGFX_DRIVER_DATE "20210722"
+#define VMWGFX_DRIVER_DATE "20211206"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 19
+#define VMWGFX_DRIVER_MINOR 20
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
-#define VMWGFX_MAX_RELOCATIONS 2048
-#define VMWGFX_MAX_VALIDATIONS 2048
#define VMWGFX_MAX_DISPLAYS 16
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
-#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
#define VMWGFX_PCI_ID_SVGA2 0x0405
#define VMWGFX_PCI_ID_SVGA3 0x0406
@@ -82,8 +79,9 @@
VMWGFX_NUM_GB_SURFACE +\
VMWGFX_NUM_GB_SCREEN_TARGET)
-#define VMW_PL_GMR (TTM_PL_PRIV + 0)
-#define VMW_PL_MOB (TTM_PL_PRIV + 1)
+#define VMW_PL_GMR (TTM_PL_PRIV + 0)
+#define VMW_PL_MOB (TTM_PL_PRIV + 1)
+#define VMW_PL_SYSTEM (TTM_PL_PRIV + 2)
#define VMW_RES_CONTEXT ttm_driver_type0
#define VMW_RES_SURFACE ttm_driver_type1
@@ -133,7 +131,7 @@ struct vmw_buffer_object {
*/
struct vmw_validate_buffer {
struct ttm_validate_buffer base;
- struct drm_hash_item hash;
+ struct vmwgfx_hash_item hash;
bool validate_as_mob;
};
@@ -332,7 +330,6 @@ struct vmw_sg_table {
struct page **pages;
const dma_addr_t *addrs;
struct sg_table *sgt;
- unsigned long num_regions;
unsigned long num_pages;
};
@@ -360,6 +357,19 @@ struct vmw_piter {
dma_addr_t (*dma_address)(struct vmw_piter *);
};
+
+struct vmw_ttm_tt {
+ struct ttm_tt dma_ttm;
+ struct vmw_private *dev_priv;
+ int gmr_id;
+ struct vmw_mob *mob;
+ int mem_type;
+ struct sg_table sgt;
+ struct vmw_sg_table vsgt;
+ bool mapped;
+ bool bound;
+};
+
/*
* enum vmw_display_unit_type - Describes the display unit
*/
@@ -406,10 +416,11 @@ struct vmw_ctx_validation_info;
* @ctx: The validation context
*/
struct vmw_sw_context{
- struct drm_open_hash res_ht;
+ struct vmwgfx_open_hash res_ht;
bool res_ht_initialized;
bool kernel;
struct vmw_fpriv *fp;
+ struct drm_file *filp;
uint32_t *cmd_bounce;
uint32_t cmd_bounce_size;
struct vmw_buffer_object *cur_query_bo;
@@ -473,6 +484,7 @@ enum {
* @VMW_SM_4: Context support upto SM4.
* @VMW_SM_4_1: Context support upto SM4_1.
* @VMW_SM_5: Context support up to SM5.
+ * @VMW_SM_5_1X: Adds support for sm5_1 and gl43 extensions.
* @VMW_SM_MAX: Should be the last.
*/
enum vmw_sm_type {
@@ -480,6 +492,7 @@ enum vmw_sm_type {
VMW_SM_4,
VMW_SM_4_1,
VMW_SM_5,
+ VMW_SM_5_1X,
VMW_SM_MAX
};
@@ -627,9 +640,6 @@ struct vmw_private {
struct vmw_cmdbuf_man *cman;
DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
- /* Validation memory reservation */
- struct vmw_validation_mem vvm;
-
uint32 *devcaps;
/*
@@ -645,6 +655,11 @@ struct vmw_private {
#endif
};
+static inline struct vmw_buffer_object *gem_to_vmw_bo(struct drm_gem_object *gobj)
+{
+ return container_of((gobj), struct vmw_buffer_object, base.base);
+}
+
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
{
return container_of(res, struct vmw_surface, res);
@@ -738,6 +753,24 @@ static inline bool has_sm5_context(const struct vmw_private *dev_priv)
return (dev_priv->sm_type >= VMW_SM_5);
}
+/**
+ * has_gl43_context - Does the device support GL43 context.
+ * @dev_priv: Device private.
+ *
+ * Return: Bool value if device support SM5 context or not.
+ */
+static inline bool has_gl43_context(const struct vmw_private *dev_priv)
+{
+ return (dev_priv->sm_type >= VMW_SM_5_1X);
+}
+
+
+static inline u32 vmw_max_num_uavs(struct vmw_private *dev_priv)
+{
+ return (has_gl43_context(dev_priv) ?
+ SVGA3D_DX11_1_MAX_UAVIEWS : SVGA3D_MAX_UAVIEWS);
+}
+
extern void vmw_svga_enable(struct vmw_private *dev_priv);
extern void vmw_svga_disable(struct vmw_private *dev_priv);
@@ -767,7 +800,7 @@ extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
+ struct drm_file *filp,
uint32_t handle,
struct vmw_surface **out_surf,
struct vmw_buffer_object **out_buf);
@@ -833,6 +866,7 @@ static inline void vmw_user_resource_noref_release(void)
/**
* Buffer object helper functions - vmwgfx_bo.c
*/
+extern bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo);
extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
struct vmw_buffer_object *bo,
struct ttm_placement *placement,
@@ -857,32 +891,23 @@ extern int vmw_bo_create_kernel(struct vmw_private *dev_priv,
unsigned long size,
struct ttm_placement *placement,
struct ttm_buffer_object **p_bo);
+extern int vmw_bo_create(struct vmw_private *dev_priv,
+ size_t size, struct ttm_placement *placement,
+ bool interruptible, bool pin,
+ void (*bo_free)(struct ttm_buffer_object *bo),
+ struct vmw_buffer_object **p_bo);
extern int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_buffer_object *vmw_bo,
size_t size, struct ttm_placement *placement,
bool interruptible, bool pin,
void (*bo_free)(struct ttm_buffer_object *bo));
-extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
- struct ttm_object_file *tfile);
-extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t size,
- bool shareable,
- uint32_t *handle,
- struct vmw_buffer_object **p_dma_buf,
- struct ttm_base_object **p_base);
-extern int vmw_user_bo_reference(struct ttm_object_file *tfile,
- struct vmw_buffer_object *dma_buf,
- uint32_t *handle);
-extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int vmw_user_bo_lookup(struct ttm_object_file *tfile,
- uint32_t id, struct vmw_buffer_object **out,
- struct ttm_base_object **base);
+extern int vmw_user_bo_lookup(struct drm_file *filp,
+ uint32_t handle,
+ struct vmw_buffer_object **out);
extern void vmw_bo_fence_single(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence);
extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo);
@@ -891,16 +916,7 @@ extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem);
extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
extern struct vmw_buffer_object *
-vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle);
-
-/**
- * vmw_user_bo_noref_release - release a buffer object pointer looked up
- * without reference
- */
-static inline void vmw_user_bo_noref_release(void)
-{
- ttm_base_object_noref_release();
-}
+vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle);
/**
* vmw_bo_adjust_prio - Adjust the buffer object eviction priority
@@ -952,6 +968,19 @@ static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio)
}
/**
+ * GEM related functionality - vmwgfx_gem.c
+ */
+extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
+ struct drm_file *filp,
+ uint32_t size,
+ uint32_t *handle,
+ struct vmw_buffer_object **p_vbo);
+extern int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+extern void vmw_gem_destroy(struct ttm_buffer_object *bo);
+extern void vmw_debugfs_gem_init(struct vmw_private *vdev);
+
+/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
*/
@@ -1027,9 +1056,6 @@ vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv)
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
-extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
- size_t gran);
-
/**
* TTM buffer object driver - vmwgfx_ttm_buffer.c
*/
@@ -1039,7 +1065,6 @@ extern struct ttm_placement vmw_vram_placement;
extern struct ttm_placement vmw_vram_sys_placement;
extern struct ttm_placement vmw_vram_gmr_placement;
extern struct ttm_placement vmw_sys_placement;
-extern struct ttm_placement vmw_evictable_placement;
extern struct ttm_placement vmw_srf_placement;
extern struct ttm_placement vmw_mob_placement;
extern struct ttm_placement vmw_nonfixed_placement;
@@ -1115,15 +1140,14 @@ extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
struct vmw_private *dev_priv,
struct vmw_fence_obj **p_fence,
uint32_t *p_handle);
-extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
+extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
struct vmw_fpriv *vmw_fp,
int ret,
struct drm_vmw_fence_rep __user
*user_fence_rep,
struct vmw_fence_obj *fence,
uint32_t fence_handle,
- int32_t out_fence_fd,
- struct sync_file *sync_file);
+ int32_t out_fence_fd);
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
/**
@@ -1218,13 +1242,6 @@ void vmw_kms_lost_device(struct drm_device *dev);
int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-
-int vmw_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset);
-int vmw_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle);
extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
extern void vmw_resource_unpin(struct vmw_resource *res);
extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
@@ -1252,6 +1269,12 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type);
void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type);
/**
+ * System memory manager
+ */
+int vmw_sys_man_init(struct vmw_private *dev_priv);
+void vmw_sys_man_fini(struct vmw_private *dev_priv);
+
+/**
* Prime - vmwgfx_prime.c
*/
@@ -1322,18 +1345,6 @@ extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int vmw_surface_gb_priv_define(struct drm_device *dev,
- uint32_t user_accounting_size,
- SVGA3dSurfaceAllFlags svga3d_flags,
- SVGA3dSurfaceFormat format,
- bool for_scanout,
- uint32_t num_mip_levels,
- uint32_t multisample_count,
- uint32_t array_size,
- struct drm_vmw_size size,
- SVGA3dMSPattern multisample_pattern,
- SVGA3dMSQualityLevel quality_level,
- struct vmw_surface **srf_out);
extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file_priv);
@@ -1342,7 +1353,6 @@ extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev,
struct drm_file *file_priv);
int vmw_gb_surface_define(struct vmw_private *dev_priv,
- uint32_t user_accounting_size,
const struct vmw_surface_metadata *req,
struct vmw_surface **srf_out);
@@ -1403,7 +1413,6 @@ void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv,
extern struct vmw_cmdbuf_res_manager *
vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
-extern size_t vmw_cmdbuf_res_man_size(void);
extern struct vmw_resource *
vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
@@ -1551,11 +1560,6 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
-/* Transparent hugepage support - vmwgfx_thp.c */
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern int vmw_thp_init(struct vmw_private *dev_priv);
-void vmw_thp_fini(struct vmw_private *dev_priv);
-#endif
/**
* VMW_DEBUG_KMS - Debug output for kernel mode-setting
@@ -1600,11 +1604,6 @@ vmw_bo_reference(struct vmw_buffer_object *buf)
return buf;
}
-static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
-{
- return &ttm_mem_glob;
-}
-
static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
{
atomic_inc(&dev_priv->num_fifo_resources);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 5f2ffa9de5c8..dd2ff441068e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1171,14 +1171,13 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
int ret;
vmw_validation_preload_bo(sw_context->ctx);
- vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
- if (IS_ERR(vmw_bo)) {
+ vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
+ if (IS_ERR_OR_NULL(vmw_bo)) {
VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
return PTR_ERR(vmw_bo);
}
-
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
- vmw_user_bo_noref_release();
+ ttm_bo_put(&vmw_bo->base);
if (unlikely(ret != 0))
return ret;
@@ -1226,14 +1225,13 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
int ret;
vmw_validation_preload_bo(sw_context->ctx);
- vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
- if (IS_ERR(vmw_bo)) {
+ vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
+ if (IS_ERR_OR_NULL(vmw_bo)) {
VMW_DEBUG_USER("Could not find or use GMR region.\n");
return PTR_ERR(vmw_bo);
}
-
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
- vmw_user_bo_noref_release();
+ ttm_bo_put(&vmw_bo->base);
if (unlikely(ret != 0))
return ret;
@@ -2166,6 +2164,44 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
}
/**
+ * vmw_cmd_dx_set_constant_buffer_offset - Validate
+ * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int
+vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset);
+
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+ u32 shader_slot;
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ if (!ctx_node)
+ return -EINVAL;
+
+ cmd = container_of(header, typeof(*cmd), header);
+ if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
+ VMW_DEBUG_USER("Illegal const buffer slot %u.\n",
+ (unsigned int) cmd->body.slot);
+ return -EINVAL;
+ }
+
+ shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET;
+ vmw_binding_cb_offset_update(ctx_node->staged, shader_slot,
+ cmd->body.slot, cmd->body.offsetInBytes);
+
+ return 0;
+}
+
+/**
* vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
* command
*
@@ -2918,7 +2954,7 @@ static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
if (!has_sm5_context(dev_priv))
return -EINVAL;
- if (num_uav > SVGA3D_MAX_UAVIEWS) {
+ if (num_uav > vmw_max_num_uavs(dev_priv)) {
VMW_DEBUG_USER("Invalid UAV binding.\n");
return -EINVAL;
}
@@ -2950,7 +2986,7 @@ static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
if (!has_sm5_context(dev_priv))
return -EINVAL;
- if (num_uav > SVGA3D_MAX_UAVIEWS) {
+ if (num_uav > vmw_max_num_uavs(dev_priv)) {
VMW_DEBUG_USER("Invalid UAV binding.\n");
return -EINVAL;
}
@@ -3528,6 +3564,24 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
&vmw_cmd_dx_transfer_from_buffer,
true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET,
+ &vmw_cmd_dx_set_constant_buffer_offset,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET,
+ &vmw_cmd_dx_set_constant_buffer_offset,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET,
+ &vmw_cmd_dx_set_constant_buffer_offset,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET,
+ &vmw_cmd_dx_set_constant_buffer_offset,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET,
+ &vmw_cmd_dx_set_constant_buffer_offset,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET,
+ &vmw_cmd_dx_set_constant_buffer_offset,
+ true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
true, false, true),
@@ -3561,6 +3615,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
&vmw_cmd_dx_define_streamoutput, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
&vmw_cmd_dx_bind_streamoutput, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2,
+ &vmw_cmd_dx_so_define, true, false, true),
};
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
@@ -3823,17 +3879,17 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
* Also if copying fails, user-space will be unable to signal the fence object
* so we wait for it immediately, and then unreference the user-space reference.
*/
-void
+int
vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
struct vmw_fpriv *vmw_fp, int ret,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj *fence, uint32_t fence_handle,
- int32_t out_fence_fd, struct sync_file *sync_file)
+ int32_t out_fence_fd)
{
struct drm_vmw_fence_rep fence_rep;
if (user_fence_rep == NULL)
- return;
+ return 0;
memset(&fence_rep, 0, sizeof(fence_rep));
@@ -3861,20 +3917,13 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
* handle.
*/
if (unlikely(ret != 0) && (fence_rep.error == 0)) {
- if (sync_file)
- fput(sync_file->file);
-
- if (fence_rep.fd != -1) {
- put_unused_fd(fence_rep.fd);
- fence_rep.fd = -1;
- }
-
- ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
- TTM_REF_USAGE);
+ ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle);
VMW_DEBUG_USER("Fence copy error. Syncing.\n");
(void) vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
}
+
+ return ret ? -EFAULT : 0;
}
/**
@@ -4054,8 +4103,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
struct sync_file *sync_file = NULL;
DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
- vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
-
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
@@ -4101,6 +4148,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->kernel = true;
}
+ sw_context->filp = file_priv;
sw_context->fp = vmw_fpriv(file_priv);
INIT_LIST_HEAD(&sw_context->ctx_list);
sw_context->cur_query_bo = dev_priv->pinned_bo;
@@ -4117,7 +4165,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_binding_state_reset(sw_context->staged_bindings);
if (!sw_context->res_ht_initialized) {
- ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
+ ret = vmwgfx_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
if (unlikely(ret != 0))
goto out_unlock;
@@ -4212,16 +4260,23 @@ int vmw_execbuf_process(struct drm_file *file_priv,
(void) vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
+ }
+ }
+
+ ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
+ user_fence_rep, fence, handle, out_fence_fd);
+
+ if (sync_file) {
+ if (ret) {
+ /* usercopy of fence failed, put the file object */
+ fput(sync_file->file);
+ put_unused_fd(out_fence_fd);
} else {
/* Link the fence with the FD created earlier */
fd_install(out_fence_fd, sync_file->file);
}
}
- vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
- user_fence_rep, fence, handle, out_fence_fd,
- sync_file);
-
/* Don't unreference when handing fence out */
if (unlikely(out_fence != NULL)) {
*out_fence = fence;
@@ -4239,7 +4294,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
*/
vmw_validation_unref_lists(&val_ctx);
- return 0;
+ return ret;
out_unlock_binding:
mutex_unlock(&dev_priv->binding_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index d18c6a56e3dc..8ee34576c7d0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -394,22 +394,15 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
struct vmw_buffer_object *vmw_bo;
int ret;
- vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
- if (!vmw_bo) {
- ret = -ENOMEM;
- goto err_unlock;
- }
-
- ret = vmw_bo_init(vmw_priv, vmw_bo, size,
+ ret = vmw_bo_create(vmw_priv, size,
&vmw_sys_placement,
false, false,
- &vmw_bo_bo_free);
+ &vmw_bo_bo_free, &vmw_bo);
if (unlikely(ret != 0))
- goto err_unlock; /* init frees the buffer on failure */
+ return ret;
*out = vmw_bo;
-err_unlock:
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 9fe12329a4d5..5001b87aebe8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -37,9 +37,6 @@ struct vmw_fence_manager {
spinlock_t lock;
struct list_head fence_list;
struct work_struct work;
- u32 user_fence_size;
- u32 fence_size;
- u32 event_fence_action_size;
bool fifo_down;
struct list_head cleanup_list;
uint32_t pending_actions[VMW_ACTION_MAX];
@@ -304,11 +301,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
INIT_LIST_HEAD(&fman->cleanup_list);
INIT_WORK(&fman->work, &vmw_fence_work_func);
fman->fifo_down = true;
- fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
- TTM_OBJ_EXTRA_SIZE;
- fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
- fman->event_fence_action_size =
- ttm_round_pot(sizeof(struct vmw_event_fence_action));
mutex_init(&fman->goal_irq_mutex);
fman->ctx = dma_fence_context_alloc(1);
@@ -560,14 +552,8 @@ static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
{
struct vmw_user_fence *ufence =
container_of(fence, struct vmw_user_fence, fence);
- struct vmw_fence_manager *fman = fman_from_fence(fence);
ttm_base_object_kfree(ufence, base);
- /*
- * Free kernel space accounting.
- */
- ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
- fman->user_fence_size);
}
static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
@@ -590,23 +576,8 @@ int vmw_user_fence_create(struct drm_file *file_priv,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_user_fence *ufence;
struct vmw_fence_obj *tmp;
- struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
int ret;
- /*
- * Kernel memory space accounting, since this object may
- * be created by a user-space request.
- */
-
- ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
- &ctx);
- if (unlikely(ret != 0))
- return ret;
-
ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
if (unlikely(!ufence)) {
ret = -ENOMEM;
@@ -625,9 +596,10 @@ int vmw_user_fence_create(struct drm_file *file_priv,
* vmw_user_fence_base_release.
*/
tmp = vmw_fence_obj_reference(&ufence->fence);
+
ret = ttm_base_object_init(tfile, &ufence->base, false,
VMW_RES_FENCE,
- &vmw_user_fence_base_release, NULL);
+ &vmw_user_fence_base_release);
if (unlikely(ret != 0)) {
@@ -646,7 +618,6 @@ out_err:
tmp = &ufence->fence;
vmw_fence_obj_unreference(&tmp);
out_no_object:
- ttm_mem_global_free(mem_glob, fman->user_fence_size);
return ret;
}
@@ -831,8 +802,7 @@ out:
*/
if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
- return ttm_ref_object_base_unref(tfile, arg->handle,
- TTM_REF_USAGE);
+ return ttm_ref_object_base_unref(tfile, arg->handle);
return ret;
}
@@ -874,8 +844,7 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_fence_arg *) data;
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- arg->handle,
- TTM_REF_USAGE);
+ arg->handle);
}
/**
@@ -1121,7 +1090,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
if (user_fence_rep != NULL) {
ret = ttm_ref_object_add(vmw_fp->tfile, base,
- TTM_REF_USAGE, NULL, false);
+ NULL, false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to reference a fence "
"object.\n");
@@ -1159,12 +1128,12 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
}
vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
- handle, -1, NULL);
+ handle, -1);
vmw_fence_obj_unreference(&fence);
return 0;
out_no_create:
if (user_fence_rep != NULL)
- ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+ ttm_ref_object_base_unref(tfile, handle);
out_no_ref_obj:
vmw_fence_obj_unreference(&fence);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
new file mode 100644
index 000000000000..ce609e7d758f
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright 2021 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include "vmwgfx_drv.h"
+
+#include "drm/drm_prime.h"
+#include "drm/drm_gem_ttm_helper.h"
+
+/**
+ * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
+ * vmw_buffer_object.
+ *
+ * @bo: Pointer to the TTM buffer object.
+ * Return: Pointer to the struct vmw_buffer_object embedding the
+ * TTM buffer object.
+ */
+static struct vmw_buffer_object *
+vmw_buffer_object(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct vmw_buffer_object, base);
+}
+
+static void vmw_gem_object_free(struct drm_gem_object *gobj)
+{
+ struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj);
+ if (bo) {
+ ttm_bo_put(bo);
+ }
+}
+
+static int vmw_gem_object_open(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
+{
+ return 0;
+}
+
+static void vmw_gem_object_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
+{
+}
+
+static int vmw_gem_pin_private(struct drm_gem_object *obj, bool do_pin)
+{
+ struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
+ struct vmw_buffer_object *vbo = vmw_buffer_object(bo);
+ int ret;
+
+ ret = ttm_bo_reserve(bo, false, false, NULL);
+ if (unlikely(ret != 0))
+ goto err;
+
+ vmw_bo_pin_reserved(vbo, do_pin);
+
+ ttm_bo_unreserve(bo);
+
+err:
+ return ret;
+}
+
+
+static int vmw_gem_object_pin(struct drm_gem_object *obj)
+{
+ return vmw_gem_pin_private(obj, true);
+}
+
+static void vmw_gem_object_unpin(struct drm_gem_object *obj)
+{
+ vmw_gem_pin_private(obj, false);
+}
+
+static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj)
+{
+ struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
+ struct vmw_ttm_tt *vmw_tt =
+ container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
+
+ if (vmw_tt->vsgt.sgt)
+ return vmw_tt->vsgt.sgt;
+
+ return drm_prime_pages_to_sg(obj->dev, vmw_tt->dma_ttm.pages, vmw_tt->dma_ttm.num_pages);
+}
+
+
+static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
+ .free = vmw_gem_object_free,
+ .open = vmw_gem_object_open,
+ .close = vmw_gem_object_close,
+ .print_info = drm_gem_ttm_print_info,
+ .pin = vmw_gem_object_pin,
+ .unpin = vmw_gem_object_unpin,
+ .get_sg_table = vmw_gem_object_get_sg_table,
+ .vmap = drm_gem_ttm_vmap,
+ .vunmap = drm_gem_ttm_vunmap,
+ .mmap = drm_gem_ttm_mmap,
+};
+
+/**
+ * vmw_gem_destroy - vmw buffer object destructor
+ *
+ * @bo: Pointer to the embedded struct ttm_buffer_object
+ */
+void vmw_gem_destroy(struct ttm_buffer_object *bo)
+{
+ struct vmw_buffer_object *vbo = vmw_buffer_object(bo);
+
+ WARN_ON(vbo->dirty);
+ WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
+ vmw_bo_unmap(vbo);
+ drm_gem_object_release(&vbo->base.base);
+ kfree(vbo);
+}
+
+int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
+ struct drm_file *filp,
+ uint32_t size,
+ uint32_t *handle,
+ struct vmw_buffer_object **p_vbo)
+{
+ int ret;
+
+ ret = vmw_bo_create(dev_priv, size,
+ (dev_priv->has_mob) ?
+ &vmw_sys_placement :
+ &vmw_vram_sys_placement,
+ true, false, &vmw_gem_destroy, p_vbo);
+
+ (*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
+ if (ret != 0)
+ goto out_no_bo;
+
+ ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle);
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_put(&(*p_vbo)->base.base);
+out_no_bo:
+ return ret;
+}
+
+
+int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ union drm_vmw_alloc_dmabuf_arg *arg =
+ (union drm_vmw_alloc_dmabuf_arg *)data;
+ struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
+ struct drm_vmw_dmabuf_rep *rep = &arg->rep;
+ struct vmw_buffer_object *vbo;
+ uint32_t handle;
+ int ret;
+
+ ret = vmw_gem_object_create_with_handle(dev_priv, filp,
+ req->size, &handle, &vbo);
+ if (ret)
+ goto out_no_bo;
+
+ rep->handle = handle;
+ rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
+ rep->cur_gmr_id = handle;
+ rep->cur_gmr_offset = 0;
+out_no_bo:
+ return ret;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static void vmw_bo_print_info(int id, struct vmw_buffer_object *bo, struct seq_file *m)
+{
+ const char *placement;
+ const char *type;
+
+ switch (bo->base.resource->mem_type) {
+ case TTM_PL_SYSTEM:
+ placement = " CPU";
+ break;
+ case VMW_PL_GMR:
+ placement = " GMR";
+ break;
+ case VMW_PL_MOB:
+ placement = " MOB";
+ break;
+ case VMW_PL_SYSTEM:
+ placement = "VCPU";
+ break;
+ case TTM_PL_VRAM:
+ placement = "VRAM";
+ break;
+ default:
+ placement = "None";
+ break;
+ }
+
+ switch (bo->base.type) {
+ case ttm_bo_type_device:
+ type = "device";
+ break;
+ case ttm_bo_type_kernel:
+ type = "kernel";
+ break;
+ case ttm_bo_type_sg:
+ type = "sg ";
+ break;
+ default:
+ type = "none ";
+ break;
+ }
+
+ seq_printf(m, "\t\t0x%08x: %12zu bytes %s, type = %s",
+ id, bo->base.base.size, placement, type);
+ seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d, TTM refs = %d",
+ bo->base.priority,
+ bo->base.pin_count,
+ kref_read(&bo->base.base.refcount),
+ kref_read(&bo->base.kref));
+ seq_puts(m, "\n");
+}
+
+static int vmw_debugfs_gem_info_show(struct seq_file *m, void *unused)
+{
+ struct vmw_private *vdev = (struct vmw_private *)m->private;
+ struct drm_device *dev = &vdev->drm;
+ struct drm_file *file;
+ int r;
+
+ r = mutex_lock_interruptible(&dev->filelist_mutex);
+ if (r)
+ return r;
+
+ list_for_each_entry(file, &dev->filelist, lhead) {
+ struct task_struct *task;
+ struct drm_gem_object *gobj;
+ int id;
+
+ /*
+ * Although we have a valid reference on file->pid, that does
+ * not guarantee that the task_struct who called get_pid() is
+ * still alive (e.g. get_pid(current) => fork() => exit()).
+ * Therefore, we need to protect this ->comm access using RCU.
+ */
+ rcu_read_lock();
+ task = pid_task(file->pid, PIDTYPE_PID);
+ seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
+ task ? task->comm : "<unknown>");
+ rcu_read_unlock();
+
+ spin_lock(&file->table_lock);
+ idr_for_each_entry(&file->object_idr, gobj, id) {
+ struct vmw_buffer_object *bo = gem_to_vmw_bo(gobj);
+
+ vmw_bo_print_info(id, bo, m);
+ }
+ spin_unlock(&file->table_lock);
+ }
+
+ mutex_unlock(&dev->filelist_mutex);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(vmw_debugfs_gem_info);
+
+#endif
+
+void vmw_debugfs_gem_init(struct vmw_private *vdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = vdev->drm.primary;
+ struct dentry *root = minor->debugfs_root;
+
+ debugfs_create_file("vmwgfx_gem_info", 0444, root, vdev,
+ &vmw_debugfs_gem_info_fops);
+#endif
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index b2c4af331c9d..ebb4505a31a3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -42,6 +42,7 @@ struct vmwgfx_gmrid_man {
uint32_t max_gmr_ids;
uint32_t max_gmr_pages;
uint32_t used_gmr_pages;
+ uint8_t type;
};
static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *man)
@@ -132,6 +133,18 @@ static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
kfree(res);
}
+static void vmw_gmrid_man_debug(struct ttm_resource_manager *man,
+ struct drm_printer *printer)
+{
+ struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
+
+ BUG_ON(gman->type != VMW_PL_GMR && gman->type != VMW_PL_MOB);
+
+ drm_printf(printer, "%s's used: %u pages, max: %u pages, %u id's\n",
+ (gman->type == VMW_PL_MOB) ? "Mob" : "GMR",
+ gman->used_gmr_pages, gman->max_gmr_pages, gman->max_gmr_ids);
+}
+
static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type)
@@ -146,12 +159,12 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type)
man = &gman->manager;
man->func = &vmw_gmrid_manager_func;
- /* TODO: This is most likely not correct */
man->use_tt = true;
ttm_resource_manager_init(man, 0);
spin_lock_init(&gman->lock);
gman->used_gmr_pages = 0;
ida_init(&gman->gmr_ida);
+ gman->type = type;
switch (type) {
case VMW_PL_GMR:
@@ -190,4 +203,5 @@ void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type)
static const struct ttm_resource_manager_func vmw_gmrid_manager_func = {
.alloc = vmw_gmrid_man_get_node,
.free = vmw_gmrid_man_put_node,
+ .debug = vmw_gmrid_man_debug
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c
new file mode 100644
index 000000000000..06aebc12774e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Simple open hash tab implementation.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include <linux/export.h>
+#include <linux/hash.h>
+#include <linux/mm.h>
+#include <linux/rculist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm_print.h>
+
+#include "vmwgfx_hashtab.h"
+
+int vmwgfx_ht_create(struct vmwgfx_open_hash *ht, unsigned int order)
+{
+ unsigned int size = 1 << order;
+
+ ht->order = order;
+ ht->table = NULL;
+ if (size <= PAGE_SIZE / sizeof(*ht->table))
+ ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
+ else
+ ht->table = vzalloc(array_size(size, sizeof(*ht->table)));
+ if (!ht->table) {
+ DRM_ERROR("Out of memory for hash table\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void vmwgfx_ht_verbose_list(struct vmwgfx_open_hash *ht, unsigned long key)
+{
+ struct vmwgfx_hash_item *entry;
+ struct hlist_head *h_list;
+ unsigned int hashed_key;
+ int count = 0;
+
+ hashed_key = hash_long(key, ht->order);
+ DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
+ h_list = &ht->table[hashed_key];
+ hlist_for_each_entry(entry, h_list, head)
+ DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
+}
+
+static struct hlist_node *vmwgfx_ht_find_key(struct vmwgfx_open_hash *ht, unsigned long key)
+{
+ struct vmwgfx_hash_item *entry;
+ struct hlist_head *h_list;
+ unsigned int hashed_key;
+
+ hashed_key = hash_long(key, ht->order);
+ h_list = &ht->table[hashed_key];
+ hlist_for_each_entry(entry, h_list, head) {
+ if (entry->key == key)
+ return &entry->head;
+ if (entry->key > key)
+ break;
+ }
+ return NULL;
+}
+
+static struct hlist_node *vmwgfx_ht_find_key_rcu(struct vmwgfx_open_hash *ht, unsigned long key)
+{
+ struct vmwgfx_hash_item *entry;
+ struct hlist_head *h_list;
+ unsigned int hashed_key;
+
+ hashed_key = hash_long(key, ht->order);
+ h_list = &ht->table[hashed_key];
+ hlist_for_each_entry_rcu(entry, h_list, head) {
+ if (entry->key == key)
+ return &entry->head;
+ if (entry->key > key)
+ break;
+ }
+ return NULL;
+}
+
+int vmwgfx_ht_insert_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item)
+{
+ struct vmwgfx_hash_item *entry;
+ struct hlist_head *h_list;
+ struct hlist_node *parent;
+ unsigned int hashed_key;
+ unsigned long key = item->key;
+
+ hashed_key = hash_long(key, ht->order);
+ h_list = &ht->table[hashed_key];
+ parent = NULL;
+ hlist_for_each_entry(entry, h_list, head) {
+ if (entry->key == key)
+ return -EINVAL;
+ if (entry->key > key)
+ break;
+ parent = &entry->head;
+ }
+ if (parent)
+ hlist_add_behind_rcu(&item->head, parent);
+ else
+ hlist_add_head_rcu(&item->head, h_list);
+ return 0;
+}
+
+/*
+ * Just insert an item and return any "bits" bit key that hasn't been
+ * used before.
+ */
+int vmwgfx_ht_just_insert_please(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item,
+ unsigned long seed, int bits, int shift,
+ unsigned long add)
+{
+ int ret;
+ unsigned long mask = (1UL << bits) - 1;
+ unsigned long first, unshifted_key;
+
+ unshifted_key = hash_long(seed, bits);
+ first = unshifted_key;
+ do {
+ item->key = (unshifted_key << shift) + add;
+ ret = vmwgfx_ht_insert_item(ht, item);
+ if (ret)
+ unshifted_key = (unshifted_key + 1) & mask;
+ } while (ret && (unshifted_key != first));
+
+ if (ret) {
+ DRM_ERROR("Available key bit space exhausted\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int vmwgfx_ht_find_item(struct vmwgfx_open_hash *ht, unsigned long key,
+ struct vmwgfx_hash_item **item)
+{
+ struct hlist_node *list;
+
+ list = vmwgfx_ht_find_key_rcu(ht, key);
+ if (!list)
+ return -EINVAL;
+
+ *item = hlist_entry(list, struct vmwgfx_hash_item, head);
+ return 0;
+}
+
+int vmwgfx_ht_remove_key(struct vmwgfx_open_hash *ht, unsigned long key)
+{
+ struct hlist_node *list;
+
+ list = vmwgfx_ht_find_key(ht, key);
+ if (list) {
+ hlist_del_init_rcu(list);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+int vmwgfx_ht_remove_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item)
+{
+ hlist_del_init_rcu(&item->head);
+ return 0;
+}
+
+void vmwgfx_ht_remove(struct vmwgfx_open_hash *ht)
+{
+ if (ht->table) {
+ kvfree(ht->table);
+ ht->table = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h
new file mode 100644
index 000000000000..a9ce12922e21
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Simple open hash tab implementation.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+/*
+ * TODO: Replace this hashtable with Linux' generic implementation
+ * from <linux/hashtable.h>.
+ */
+
+#ifndef VMWGFX_HASHTAB_H
+#define VMWGFX_HASHTAB_H
+
+#include <linux/list.h>
+
+#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
+
+struct vmwgfx_hash_item {
+ struct hlist_node head;
+ unsigned long key;
+};
+
+struct vmwgfx_open_hash {
+ struct hlist_head *table;
+ u8 order;
+};
+
+int vmwgfx_ht_create(struct vmwgfx_open_hash *ht, unsigned int order);
+int vmwgfx_ht_insert_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item);
+int vmwgfx_ht_just_insert_please(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item,
+ unsigned long seed, int bits, int shift,
+ unsigned long add);
+int vmwgfx_ht_find_item(struct vmwgfx_open_hash *ht, unsigned long key,
+ struct vmwgfx_hash_item **item);
+
+void vmwgfx_ht_verbose_list(struct vmwgfx_open_hash *ht, unsigned long key);
+int vmwgfx_ht_remove_key(struct vmwgfx_open_hash *ht, unsigned long key);
+int vmwgfx_ht_remove_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item);
+void vmwgfx_ht_remove(struct vmwgfx_open_hash *ht);
+
+/*
+ * RCU-safe interface
+ *
+ * The user of this API needs to make sure that two or more instances of the
+ * hash table manipulation functions are never run simultaneously.
+ * The lookup function vmwgfx_ht_find_item_rcu may, however, run simultaneously
+ * with any of the manipulation functions as long as it's called from within
+ * an RCU read-locked section.
+ */
+#define vmwgfx_ht_insert_item_rcu vmwgfx_ht_insert_item
+#define vmwgfx_ht_just_insert_please_rcu vmwgfx_ht_just_insert_please
+#define vmwgfx_ht_remove_key_rcu vmwgfx_ht_remove_key
+#define vmwgfx_ht_remove_item_rcu vmwgfx_ht_remove_item
+#define vmwgfx_ht_find_item_rcu vmwgfx_ht_find_item
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 28af34ab6ed6..471da2b4c177 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -105,6 +105,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_SM5:
param->value = has_sm5_context(dev_priv);
break;
+ case DRM_VMW_PARAM_GL43:
+ param->value = has_gl43_context(dev_priv);
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 74fa41909213..bbd2f4ec08ec 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -843,8 +843,6 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
drm_framebuffer_cleanup(framebuffer);
vmw_surface_unreference(&vfbs->surface);
- if (vfbs->base.user_obj)
- ttm_base_object_unref(&vfbs->base.user_obj);
kfree(vfbs);
}
@@ -989,6 +987,16 @@ out_err1:
* Buffer-object framebuffer code
*/
+static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct vmw_framebuffer_bo *vfbd =
+ vmw_framebuffer_to_vfbd(fb);
+
+ return drm_gem_handle_create(file_priv, &vfbd->buffer->base.base, handle);
+}
+
static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_bo *vfbd =
@@ -996,8 +1004,6 @@ static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
drm_framebuffer_cleanup(framebuffer);
vmw_bo_unreference(&vfbd->buffer);
- if (vfbd->base.user_obj)
- ttm_base_object_unref(&vfbd->base.user_obj);
kfree(vfbd);
}
@@ -1063,6 +1069,7 @@ static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
}
static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
+ .create_handle = vmw_framebuffer_bo_create_handle,
.destroy = vmw_framebuffer_bo_destroy,
.dirty = vmw_framebuffer_bo_dirty_ext,
};
@@ -1188,7 +1195,7 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
metadata.base_size.depth = 1;
metadata.scanout = true;
- ret = vmw_gb_surface_define(vmw_priv(dev), 0, &metadata, srf_out);
+ ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
if (ret) {
DRM_ERROR("Failed to allocate proxy content buffer\n");
return ret;
@@ -1251,6 +1258,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
goto out_err1;
}
+ vfbd->base.base.obj[0] = &bo->base.base;
drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
vfbd->base.bo = true;
vfbd->buffer = vmw_bo_reference(bo);
@@ -1368,34 +1376,13 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_framebuffer *vfb = NULL;
struct vmw_surface *surface = NULL;
struct vmw_buffer_object *bo = NULL;
- struct ttm_base_object *user_obj;
int ret;
- /*
- * Take a reference on the user object of the resource
- * backing the kms fb. This ensures that user-space handle
- * lookups on that resource will always work as long as
- * it's registered with a kms framebuffer. This is important,
- * since vmw_execbuf_process identifies resources in the
- * command stream using user-space handles.
- */
-
- user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]);
- if (unlikely(user_obj == NULL)) {
- DRM_ERROR("Could not locate requested kms frame buffer.\n");
- return ERR_PTR(-ENOENT);
- }
-
- /**
- * End conditioned code.
- */
-
/* returns either a bo or surface */
- ret = vmw_user_lookup_handle(dev_priv, tfile,
+ ret = vmw_user_lookup_handle(dev_priv, file_priv,
mode_cmd->handles[0],
&surface, &bo);
if (ret)
@@ -1428,10 +1415,8 @@ err_out:
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
- ttm_base_object_unref(&user_obj);
return ERR_PTR(ret);
- } else
- vfb->user_obj = user_obj;
+ }
return &vfb->base;
}
@@ -2516,7 +2501,7 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
if (file_priv)
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
ret, user_fence_rep, fence,
- handle, -1, NULL);
+ handle, -1);
if (out_fence)
*out_fence = fence;
else
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index bbc809f7bd8a..4d36e8507380 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -219,7 +219,6 @@ struct vmw_framebuffer {
int (*pin)(struct vmw_framebuffer *fb);
int (*unpin)(struct vmw_framebuffer *fb);
bool bo;
- struct ttm_base_object *user_obj;
uint32_t user_handle;
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index f9394207dd3c..0a8cc28d6606 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2012-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2012-2021 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -29,12 +29,6 @@
#include "vmwgfx_drv.h"
-/*
- * If we set up the screen target otable, screen objects stop working.
- */
-
-#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1))
-
#ifdef CONFIG_64BIT
#define VMW_PPN_SIZE 8
#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PT64_0
@@ -75,7 +69,7 @@ static const struct vmw_otable pre_dx_tables[] = {
{VMWGFX_NUM_GB_CONTEXT * sizeof(SVGAOTableContextEntry), NULL, true},
{VMWGFX_NUM_GB_SHADER * sizeof(SVGAOTableShaderEntry), NULL, true},
{VMWGFX_NUM_GB_SCREEN_TARGET * sizeof(SVGAOTableScreenTargetEntry),
- NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}
+ NULL, true}
};
static const struct vmw_otable dx_tables[] = {
@@ -84,7 +78,7 @@ static const struct vmw_otable dx_tables[] = {
{VMWGFX_NUM_GB_CONTEXT * sizeof(SVGAOTableContextEntry), NULL, true},
{VMWGFX_NUM_GB_SHADER * sizeof(SVGAOTableShaderEntry), NULL, true},
{VMWGFX_NUM_GB_SCREEN_TARGET * sizeof(SVGAOTableScreenTargetEntry),
- NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE},
+ NULL, true},
{VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
};
@@ -146,9 +140,6 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
if (otable->size <= PAGE_SIZE) {
mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
mob->pt_root_page = vmw_piter_dma_addr(&iter);
- } else if (vsgt->num_regions == 1) {
- mob->pt_level = SVGA3D_MOBFMT_RANGE;
- mob->pt_root_page = vmw_piter_dma_addr(&iter);
} else {
ret = vmw_mob_pt_populate(dev_priv, mob);
if (unlikely(ret != 0))
@@ -413,10 +404,9 @@ struct vmw_mob *vmw_mob_create(unsigned long data_pages)
* @mob: Pointer to the mob the pagetable of which we want to
* populate.
*
- * This function allocates memory to be used for the pagetable, and
- * adjusts TTM memory accounting accordingly. Returns ENOMEM if
- * memory resources aren't sufficient and may cause TTM buffer objects
- * to be swapped out by using the TTM memory accounting function.
+ * This function allocates memory to be used for the pagetable.
+ * Returns ENOMEM if memory resources aren't sufficient and may
+ * cause TTM buffer objects to be swapped out.
*/
static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
struct vmw_mob *mob)
@@ -624,9 +614,6 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
if (likely(num_data_pages == 1)) {
mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
- } else if (vsgt->num_regions == 1) {
- mob->pt_level = SVGA3D_MOBFMT_RANGE;
- mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
} else if (unlikely(mob->pt_bo == NULL)) {
ret = vmw_mob_pt_populate(dev_priv, mob);
if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 54c5d16eb3b7..e9f5c89b4ca6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -451,7 +451,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = vmw_user_bo_lookup(tfile, arg->handle, &buf, NULL);
+ ret = vmw_user_bo_lookup(file_priv, arg->handle, &buf);
if (ret)
goto out_unlock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 922317d1acc8..7bc99b1279f7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -57,7 +57,6 @@ enum vmw_bo_dirty_method {
* @ref_count: Reference count for this structure
* @bitmap_size: The size of the bitmap in bits. Typically equal to the
* nuber of pages in the bo.
- * @size: The accounting size for this struct.
* @bitmap: A bitmap where each bit represents a page. A set bit means a
* dirty page.
*/
@@ -68,7 +67,6 @@ struct vmw_bo_dirty {
unsigned int change_count;
unsigned int ref_count;
unsigned long bitmap_size;
- size_t size;
unsigned long bitmap[];
};
@@ -233,12 +231,8 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
pgoff_t num_pages = vbo->base.resource->num_pages;
- size_t size, acc_size;
+ size_t size;
int ret;
- static struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
if (dirty) {
dirty->ref_count++;
@@ -246,20 +240,12 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
}
size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long);
- acc_size = ttm_round_pot(size);
- ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
- if (ret) {
- VMW_DEBUG_USER("Out of graphics memory for buffer object "
- "dirty tracker.\n");
- return ret;
- }
dirty = kvzalloc(size, GFP_KERNEL);
if (!dirty) {
ret = -ENOMEM;
goto out_no_dirty;
}
- dirty->size = acc_size;
dirty->bitmap_size = num_pages;
dirty->start = dirty->bitmap_size;
dirty->end = 0;
@@ -285,7 +271,6 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
return 0;
out_no_dirty:
- ttm_mem_global_free(&ttm_mem_glob, acc_size);
return ret;
}
@@ -304,10 +289,7 @@ void vmw_bo_dirty_release(struct vmw_buffer_object *vbo)
struct vmw_bo_dirty *dirty = vbo->dirty;
if (dirty && --dirty->ref_count == 0) {
- size_t acc_size = dirty->size;
-
kvfree(dirty);
- ttm_mem_global_free(&ttm_mem_glob, acc_size);
vbo->dirty = NULL;
}
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index d9552a1efd13..2d72a5ee7c0c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -85,6 +85,5 @@ int vmw_prime_handle_to_fd(struct drm_device *dev,
int *prime_fd)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-
return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 8d1e869cc196..708899ba2102 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -320,11 +320,12 @@ vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
* The pointer this pointed at by out_surf and out_buf needs to be null.
*/
int vmw_user_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
+ struct drm_file *filp,
uint32_t handle,
struct vmw_surface **out_surf,
struct vmw_buffer_object **out_buf)
{
+ struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
struct vmw_resource *res;
int ret;
@@ -339,7 +340,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
}
*out_surf = NULL;
- ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
+ ret = vmw_user_bo_lookup(filp, handle, out_buf);
return ret;
}
@@ -362,14 +363,10 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
return 0;
}
- backup = kzalloc(sizeof(*backup), GFP_KERNEL);
- if (unlikely(!backup))
- return -ENOMEM;
-
- ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
- res->func->backup_placement,
- interruptible, false,
- &vmw_bo_bo_free);
+ ret = vmw_bo_create(res->dev_priv, res->backup_size,
+ res->func->backup_placement,
+ interruptible, false,
+ &vmw_bo_bo_free, &backup);
if (unlikely(ret != 0))
goto out_no_bo;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index bd157fb21b45..3004c7a719e9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -442,19 +442,15 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
vps->bo_size = 0;
}
- vps->bo = kzalloc(sizeof(*vps->bo), GFP_KERNEL);
- if (!vps->bo)
- return -ENOMEM;
-
vmw_svga_enable(dev_priv);
/* After we have alloced the backing store might not be able to
* resume the overlays, this is preferred to failing to alloc.
*/
vmw_overlay_pause_all(dev_priv);
- ret = vmw_bo_init(dev_priv, vps->bo, size,
- &vmw_vram_placement,
- false, true, &vmw_bo_bo_free);
+ ret = vmw_bo_create(dev_priv, size,
+ &vmw_vram_placement,
+ false, true, &vmw_bo_bo_free, &vps->bo);
vmw_overlay_resume_all(dev_priv);
if (ret) {
vps->bo = NULL; /* vmw_bo_init frees on error */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index b8dd62529104..108a496b5d18 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -53,10 +53,6 @@ struct vmw_dx_shader {
struct list_head cotable_head;
};
-static uint64_t vmw_user_shader_size;
-static uint64_t vmw_shader_size;
-static size_t vmw_shader_dx_size;
-
static void vmw_user_shader_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_shader_base_to_res(struct ttm_base_object *base);
@@ -79,7 +75,6 @@ static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
enum vmw_cmdbuf_res_state state);
static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type);
static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type);
-static uint64_t vmw_user_shader_size;
static const struct vmw_user_resource_conv user_shader_conv = {
.object_type = VMW_RES_SHADER,
@@ -563,16 +558,14 @@ void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
*
* @res: The shader resource
*
- * Frees the DX shader resource and updates memory accounting.
+ * Frees the DX shader resource.
*/
static void vmw_dx_shader_res_free(struct vmw_resource *res)
{
- struct vmw_private *dev_priv = res->dev_priv;
struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
vmw_resource_unreference(&shader->cotable);
kfree(shader);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
}
/**
@@ -594,30 +587,13 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
struct vmw_dx_shader *shader;
struct vmw_resource *res;
struct vmw_private *dev_priv = ctx->dev_priv;
- struct ttm_operation_ctx ttm_opt_ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
- if (!vmw_shader_dx_size)
- vmw_shader_dx_size = ttm_round_pot(sizeof(*shader));
-
if (!vmw_shader_id_ok(user_key, shader_type))
return -EINVAL;
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size,
- &ttm_opt_ctx);
- if (ret) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for shader "
- "creation.\n");
- return ret;
- }
-
shader = kmalloc(sizeof(*shader), GFP_KERNEL);
if (!shader) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
return -ENOMEM;
}
@@ -669,21 +645,15 @@ static void vmw_user_shader_free(struct vmw_resource *res)
{
struct vmw_user_shader *ushader =
container_of(res, struct vmw_user_shader, shader.res);
- struct vmw_private *dev_priv = res->dev_priv;
ttm_base_object_kfree(ushader, base);
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_shader_size);
}
static void vmw_shader_free(struct vmw_resource *res)
{
struct vmw_shader *shader = vmw_res_to_shader(res);
- struct vmw_private *dev_priv = res->dev_priv;
kfree(shader);
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_shader_size);
}
/*
@@ -706,8 +676,7 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- return ttm_ref_object_base_unref(tfile, arg->handle,
- TTM_REF_USAGE);
+ return ttm_ref_object_base_unref(tfile, arg->handle);
}
static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
@@ -722,31 +691,10 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
{
struct vmw_user_shader *ushader;
struct vmw_resource *res, *tmp;
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
- if (unlikely(vmw_user_shader_size == 0))
- vmw_user_shader_size =
- ttm_round_pot(sizeof(struct vmw_user_shader)) +
- VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- vmw_user_shader_size,
- &ctx);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for shader "
- "creation.\n");
- goto out;
- }
-
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
if (unlikely(!ushader)) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_shader_size);
ret = -ENOMEM;
goto out;
}
@@ -769,7 +717,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &ushader->base, false,
VMW_RES_SHADER,
- &vmw_user_shader_base_release, NULL);
+ &vmw_user_shader_base_release);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
@@ -793,31 +741,10 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
{
struct vmw_shader *shader;
struct vmw_resource *res;
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
- if (unlikely(vmw_shader_size == 0))
- vmw_shader_size =
- ttm_round_pot(sizeof(struct vmw_shader)) +
- VMW_IDA_ACC_SIZE;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- vmw_shader_size,
- &ctx);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for shader "
- "creation.\n");
- goto out_err;
- }
-
shader = kzalloc(sizeof(*shader), GFP_KERNEL);
if (unlikely(!shader)) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_shader_size);
ret = -ENOMEM;
goto out_err;
}
@@ -849,8 +776,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
int ret;
if (buffer_handle != SVGA3D_INVALID_ID) {
- ret = vmw_user_bo_lookup(tfile, buffer_handle,
- &buffer, NULL);
+ ret = vmw_user_bo_lookup(file_priv, buffer_handle, &buffer);
if (unlikely(ret != 0)) {
VMW_DEBUG_USER("Couldn't find buffer for shader creation.\n");
return ret;
@@ -966,13 +892,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
if (!vmw_shader_id_ok(user_key, shader_type))
return -EINVAL;
- /* Allocate and pin a DMA buffer */
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (unlikely(!buf))
- return -ENOMEM;
-
- ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_placement,
- true, true, vmw_bo_bo_free);
+ ret = vmw_bo_create(dev_priv, size, &vmw_sys_placement,
+ true, true, vmw_bo_bo_free, &buf);
if (unlikely(ret != 0))
goto out;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
index 33b69a70cfe3..483ad544ea54 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
@@ -32,12 +32,10 @@
* struct vmw_user_simple_resource - User-space simple resource struct
*
* @base: The TTM base object implementing user-space visibility.
- * @account_size: How much memory was accounted for this object.
* @simple: The embedded struct vmw_simple_resource.
*/
struct vmw_user_simple_resource {
struct ttm_base_object base;
- size_t account_size;
struct vmw_simple_resource simple;
/*
* Nothing to be placed after @simple, since size of @simple is
@@ -91,18 +89,15 @@ static int vmw_simple_resource_init(struct vmw_private *dev_priv,
*
* @res: The struct vmw_resource member of the simple resource object.
*
- * Frees memory and memory accounting for the object.
+ * Frees memory for the object.
*/
static void vmw_simple_resource_free(struct vmw_resource *res)
{
struct vmw_user_simple_resource *usimple =
container_of(res, struct vmw_user_simple_resource,
simple.res);
- struct vmw_private *dev_priv = res->dev_priv;
- size_t size = usimple->account_size;
ttm_base_object_kfree(usimple, base);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
}
/**
@@ -149,39 +144,19 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
struct vmw_resource *res;
struct vmw_resource *tmp;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
size_t alloc_size;
- size_t account_size;
int ret;
alloc_size = offsetof(struct vmw_user_simple_resource, simple) +
func->size;
- account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE +
- TTM_OBJ_EXTRA_SIZE;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), account_size,
- &ctx);
- if (ret) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for %s"
- " creation.\n", func->res_func.type_name);
-
- goto out_ret;
- }
usimple = kzalloc(alloc_size, GFP_KERNEL);
if (!usimple) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- account_size);
ret = -ENOMEM;
goto out_ret;
}
usimple->simple.func = func;
- usimple->account_size = account_size;
res = &usimple->simple.res;
usimple->base.shareable = false;
usimple->base.tfile = NULL;
@@ -197,7 +172,7 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &usimple->base, false,
func->ttm_res_type,
- &vmw_simple_resource_base_release, NULL);
+ &vmw_simple_resource_base_release);
if (ret) {
vmw_resource_unreference(&tmp);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index 9efb4463ce99..4ea32b01efc0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -279,18 +279,15 @@ static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type)
*
* @res: Pointer to a struct vmw_resource
*
- * Frees memory and memory accounting held by a struct vmw_view.
+ * Frees memory held by the struct vmw_view.
*/
static void vmw_view_res_free(struct vmw_resource *res)
{
struct vmw_view *view = vmw_view(res);
- size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size;
- struct vmw_private *dev_priv = res->dev_priv;
vmw_resource_unreference(&view->cotable);
vmw_resource_unreference(&view->srf);
kfree_rcu(view, rcu);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
}
/**
@@ -327,10 +324,6 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
struct vmw_private *dev_priv = ctx->dev_priv;
struct vmw_resource *res;
struct vmw_view *view;
- struct ttm_operation_ctx ttm_opt_ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
size_t size;
int ret;
@@ -347,16 +340,8 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
size = offsetof(struct vmw_view, cmd) + cmd_size;
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ttm_opt_ctx);
- if (ret) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for view creation\n");
- return ret;
- }
-
view = kmalloc(size, GFP_KERNEL);
if (!view) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
return -ENOMEM;
}
@@ -582,4 +567,8 @@ static void vmw_so_build_asserts(void)
offsetof(SVGA3dCmdDXDefineRenderTargetView, sid));
BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
offsetof(SVGA3dCmdDXDefineDepthStencilView, sid));
+ BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+ offsetof(SVGA3dCmdDXDefineUAView, sid));
+ BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+ offsetof(SVGA3dCmdDXDefineDepthStencilView_v2, sid));
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
index f48b84bfeeac..01c701e7466e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
@@ -93,7 +93,10 @@ static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id)
id == SVGA_3D_CMD_DX_DESTROY_UA_VIEW)
return vmw_view_ua;
- if (tmp > (u32)vmw_view_max)
+ if (id == SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2)
+ return vmw_view_ds;
+
+ if (tmp > (u32)vmw_view_ds)
return vmw_view_max;
return (enum vmw_view_type) tmp;
@@ -123,6 +126,7 @@ static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id)
case SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE:
return vmw_so_ds;
case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE:
+ case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2:
case SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE:
return vmw_so_rs;
case SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index d85310b2608d..ae9a6044d448 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1123,7 +1123,7 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
}
if (!vps->surf) {
- ret = vmw_gb_surface_define(dev_priv, 0, &metadata,
+ ret = vmw_gb_surface_define(dev_priv, &metadata,
&vps->surf);
if (ret != 0) {
DRM_ERROR("Couldn't allocate STDU surface.\n");
@@ -1872,8 +1872,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
int i, ret;
- /* Do nothing if Screen Target support is turned off */
- if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE || !dev_priv->has_mob)
+ /* Do nothing if there's no support for MOBs */
+ if (!dev_priv->has_mob)
return -ENOSYS;
if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
index c8efa4a6c995..2de97419d5c9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
@@ -60,8 +60,6 @@ static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res,
enum vmw_cmdbuf_res_state state);
-static size_t vmw_streamoutput_size;
-
static const struct vmw_res_func vmw_dx_streamoutput_func = {
.res_type = vmw_res_streamoutput,
.needs_backup = true,
@@ -254,12 +252,10 @@ vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man,
static void vmw_dx_streamoutput_res_free(struct vmw_resource *res)
{
- struct vmw_private *dev_priv = res->dev_priv;
struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
vmw_resource_unreference(&so->cotable);
kfree(so);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_streamoutput_size);
}
static void vmw_dx_streamoutput_hw_destroy(struct vmw_resource *res)
@@ -284,27 +280,10 @@ int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man,
struct vmw_dx_streamoutput *so;
struct vmw_resource *res;
struct vmw_private *dev_priv = ctx->dev_priv;
- struct ttm_operation_ctx ttm_opt_ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
- if (!vmw_streamoutput_size)
- vmw_streamoutput_size = ttm_round_pot(sizeof(*so));
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- vmw_streamoutput_size, &ttm_opt_ctx);
- if (ret) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for streamout.\n");
- return ret;
- }
-
so = kmalloc(sizeof(*so), GFP_KERNEL);
if (!so) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_streamoutput_size);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 5d53a5f9d123..00e8e27e4884 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -45,16 +45,12 @@
* @prime: The TTM prime object.
* @base: The TTM base object handling user-space visibility.
* @srf: The surface metadata.
- * @size: TTM accounting size for the surface.
* @master: Master of the creating client. Used for security check.
- * @backup_base: The TTM base object of the backup buffer.
*/
struct vmw_user_surface {
struct ttm_prime_object prime;
struct vmw_surface srf;
- uint32_t size;
struct drm_master *master;
- struct ttm_base_object *backup_base;
};
/**
@@ -74,13 +70,11 @@ struct vmw_surface_offset {
/**
* struct vmw_surface_dirty - Surface dirty-tracker
* @cache: Cached layout information of the surface.
- * @size: Accounting size for the struct vmw_surface_dirty.
* @num_subres: Number of subresources.
* @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource.
*/
struct vmw_surface_dirty {
struct vmw_surface_cache cache;
- size_t size;
u32 num_subres;
SVGA3dBox boxes[];
};
@@ -129,9 +123,6 @@ static const struct vmw_user_resource_conv user_surface_conv = {
const struct vmw_user_resource_conv *user_surface_converter =
&user_surface_conv;
-
-static uint64_t vmw_user_surface_size;
-
static const struct vmw_res_func vmw_legacy_surface_func = {
.res_type = vmw_res_surface,
.needs_backup = false,
@@ -359,7 +350,7 @@ static void vmw_surface_dma_encode(struct vmw_surface *srf,
* vmw_surface.
*
* Destroys a the device surface associated with a struct vmw_surface if
- * any, and adjusts accounting and resource count accordingly.
+ * any, and adjusts resource count accordingly.
*/
static void vmw_hw_surface_destroy(struct vmw_resource *res)
{
@@ -666,8 +657,6 @@ static void vmw_user_surface_free(struct vmw_resource *res)
struct vmw_surface *srf = vmw_res_to_srf(res);
struct vmw_user_surface *user_srf =
container_of(srf, struct vmw_user_surface, srf);
- struct vmw_private *dev_priv = srf->res.dev_priv;
- uint32_t size = user_srf->size;
WARN_ON_ONCE(res->dirty);
if (user_srf->master)
@@ -676,7 +665,6 @@ static void vmw_user_surface_free(struct vmw_resource *res)
kfree(srf->metadata.sizes);
kfree(srf->snooper.image);
ttm_prime_object_kfree(user_srf, prime);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
}
/**
@@ -696,8 +684,6 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
struct vmw_resource *res = &user_srf->srf.res;
*p_base = NULL;
- if (user_srf->backup_base)
- ttm_base_object_unref(&user_srf->backup_base);
vmw_resource_unreference(&res);
}
@@ -715,7 +701,7 @@ int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
+ return ttm_ref_object_base_unref(tfile, arg->sid);
}
/**
@@ -740,23 +726,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_surface_create_req *req = &arg->req;
struct drm_vmw_surface_arg *rep = &arg->rep;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
int i, j;
uint32_t cur_bo_offset;
struct drm_vmw_size *cur_size;
struct vmw_surface_offset *cur_offset;
uint32_t num_sizes;
- uint32_t size;
const SVGA3dSurfaceDesc *desc;
- if (unlikely(vmw_user_surface_size == 0))
- vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
- VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
-
num_sizes = 0;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
@@ -768,10 +745,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
num_sizes == 0)
return -EINVAL;
- size = vmw_user_surface_size +
- ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
- ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
-
desc = vmw_surface_get_desc(req->format);
if (unlikely(desc->blockDesc == SVGA3DBLOCKDESC_NONE)) {
VMW_DEBUG_USER("Invalid format %d for surface creation.\n",
@@ -779,18 +752,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- size, &ctx);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for surface.\n");
- goto out_unlock;
- }
-
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
if (unlikely(!user_srf)) {
ret = -ENOMEM;
- goto out_no_user_srf;
+ goto out_unlock;
}
srf = &user_srf->srf;
@@ -805,7 +770,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
memcpy(metadata->mip_levels, req->mip_levels,
sizeof(metadata->mip_levels));
metadata->num_sizes = num_sizes;
- user_srf->size = size;
metadata->sizes =
memdup_user((struct drm_vmw_size __user *)(unsigned long)
req->size_addr,
@@ -883,22 +847,22 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (dev_priv->has_mob && req->shareable) {
uint32_t backup_handle;
- ret = vmw_user_bo_alloc(dev_priv, tfile,
- res->backup_size,
- true,
- &backup_handle,
- &res->backup,
- &user_srf->backup_base);
+ ret = vmw_gem_object_create_with_handle(dev_priv,
+ file_priv,
+ res->backup_size,
+ &backup_handle,
+ &res->backup);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
+ vmw_bo_reference(res->backup);
}
tmp = vmw_resource_reference(&srf->res);
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
req->shareable, VMW_RES_SURFACE,
- &vmw_user_surface_base_release, NULL);
+ &vmw_user_surface_base_release);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
@@ -916,8 +880,6 @@ out_no_offsets:
kfree(metadata->sizes);
out_no_sizes:
ttm_prime_object_kfree(user_srf, prime);
-out_no_user_srf:
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock:
return ret;
}
@@ -955,7 +917,6 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
VMW_DEBUG_USER("Referenced object is not a surface.\n");
goto out_bad_resource;
}
-
if (handle_type != DRM_VMW_HANDLE_PRIME) {
bool require_exist = false;
@@ -980,8 +941,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
if (unlikely(drm_is_render_client(file_priv)))
require_exist = true;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
- require_exist);
+ ret = ttm_ref_object_add(tfile, base, NULL, require_exist);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_bad_resource;
@@ -995,7 +955,7 @@ out_bad_resource:
ttm_base_object_unref(&base);
out_no_lookup:
if (handle_type == DRM_VMW_HANDLE_PRIME)
- (void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+ (void) ttm_ref_object_base_unref(tfile, handle);
return ret;
}
@@ -1045,7 +1005,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0)) {
VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
srf->metadata.num_sizes);
- ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE);
+ ttm_ref_object_base_unref(tfile, base->handle);
ret = -EFAULT;
}
@@ -1459,7 +1419,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
struct vmw_resource *res;
struct vmw_resource *tmp;
int ret = 0;
- uint32_t size;
uint32_t backup_handle = 0;
SVGA3dSurfaceAllFlags svga3d_flags_64 =
SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
@@ -1506,12 +1465,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
return -EINVAL;
}
- if (unlikely(vmw_user_surface_size == 0))
- vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
- VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
-
- size = vmw_user_surface_size;
-
metadata.flags = svga3d_flags_64;
metadata.format = req->base.format;
metadata.mip_levels[0] = req->base.mip_levels;
@@ -1526,7 +1479,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
drm_vmw_surface_flag_scanout;
/* Define a surface based on the parameters. */
- ret = vmw_gb_surface_define(dev_priv, size, &metadata, &srf);
+ ret = vmw_gb_surface_define(dev_priv, &metadata, &srf);
if (ret != 0) {
VMW_DEBUG_USER("Failed to define surface.\n");
return ret;
@@ -1539,9 +1492,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
res = &user_srf->srf.res;
if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
- ret = vmw_user_bo_lookup(tfile, req->base.buffer_handle,
- &res->backup,
- &user_srf->backup_base);
+ ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle,
+ &res->backup);
if (ret == 0) {
if (res->backup->base.base.size < res->backup_size) {
VMW_DEBUG_USER("Surface backup buffer too small.\n");
@@ -1554,14 +1506,15 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
} else if (req->base.drm_surface_flags &
(drm_vmw_surface_flag_create_buffer |
- drm_vmw_surface_flag_coherent))
- ret = vmw_user_bo_alloc(dev_priv, tfile,
- res->backup_size,
- req->base.drm_surface_flags &
- drm_vmw_surface_flag_shareable,
- &backup_handle,
- &res->backup,
- &user_srf->backup_base);
+ drm_vmw_surface_flag_coherent)) {
+ ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
+ res->backup_size,
+ &backup_handle,
+ &res->backup);
+ if (ret == 0)
+ vmw_bo_reference(res->backup);
+
+ }
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
@@ -1593,7 +1546,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
req->base.drm_surface_flags &
drm_vmw_surface_flag_shareable,
VMW_RES_SURFACE,
- &vmw_user_surface_base_release, NULL);
+ &vmw_user_surface_base_release);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
@@ -1613,7 +1566,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
rep->buffer_size = 0;
rep->buffer_handle = SVGA3D_INVALID_ID;
}
-
vmw_resource_unreference(&res);
out_unlock:
@@ -1636,12 +1588,11 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_surface *srf;
struct vmw_user_surface *user_srf;
struct vmw_surface_metadata *metadata;
struct ttm_base_object *base;
- uint32_t backup_handle;
+ u32 backup_handle;
int ret;
ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
@@ -1658,14 +1609,12 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
metadata = &srf->metadata;
mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
- ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle);
+ ret = drm_gem_handle_create(file_priv, &srf->res.backup->base.base,
+ &backup_handle);
mutex_unlock(&dev_priv->cmdbuf_mutex);
-
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not add a reference to a GB surface "
- "backup buffer.\n");
- (void) ttm_ref_object_base_unref(tfile, base->handle,
- TTM_REF_USAGE);
+ if (ret != 0) {
+ drm_err(dev, "Wasn't able to create a backing handle for surface sid = %u.\n",
+ req->sid);
goto out_bad_resource;
}
@@ -1955,11 +1904,7 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
u32 num_mip;
u32 num_subres;
u32 num_samples;
- size_t dirty_size, acc_size;
- static struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
+ size_t dirty_size;
int ret;
if (metadata->array_size)
@@ -1973,14 +1918,6 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
num_subres = num_layers * num_mip;
dirty_size = struct_size(dirty, boxes, num_subres);
- acc_size = ttm_round_pot(dirty_size);
- ret = ttm_mem_global_alloc(vmw_mem_glob(res->dev_priv),
- acc_size, &ctx);
- if (ret) {
- VMW_DEBUG_USER("Out of graphics memory for surface "
- "dirty tracker.\n");
- return ret;
- }
dirty = kvzalloc(dirty_size, GFP_KERNEL);
if (!dirty) {
@@ -1990,13 +1927,12 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
num_samples = max_t(u32, 1, metadata->multisample_count);
ret = vmw_surface_setup_cache(&metadata->base_size, metadata->format,
- num_mip, num_layers, num_samples,
- &dirty->cache);
+ num_mip, num_layers, num_samples,
+ &dirty->cache);
if (ret)
goto out_no_cache;
dirty->num_subres = num_subres;
- dirty->size = acc_size;
res->dirty = (struct vmw_resource_dirty *) dirty;
return 0;
@@ -2004,7 +1940,6 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
out_no_cache:
kvfree(dirty);
out_no_dirty:
- ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size);
return ret;
}
@@ -2015,10 +1950,8 @@ static void vmw_surface_dirty_free(struct vmw_resource *res)
{
struct vmw_surface_dirty *dirty =
(struct vmw_surface_dirty *) res->dirty;
- size_t acc_size = dirty->size;
kvfree(dirty);
- ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size);
res->dirty = NULL;
}
@@ -2051,8 +1984,6 @@ static int vmw_surface_clean(struct vmw_resource *res)
* vmw_gb_surface_define - Define a private GB surface
*
* @dev_priv: Pointer to a device private.
- * @user_accounting_size: Used to track user-space memory usage, set
- * to 0 for kernel mode only memory
* @metadata: Metadata representing the surface to create.
* @user_srf_out: allocated user_srf. Set to NULL on failure.
*
@@ -2062,17 +1993,12 @@ static int vmw_surface_clean(struct vmw_resource *res)
* it available to user mode drivers.
*/
int vmw_gb_surface_define(struct vmw_private *dev_priv,
- uint32_t user_accounting_size,
const struct vmw_surface_metadata *req,
struct vmw_surface **srf_out)
{
struct vmw_surface_metadata *metadata;
struct vmw_user_surface *user_srf;
struct vmw_surface *srf;
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
u32 sample_count = 1;
u32 num_layers = 1;
int ret;
@@ -2113,22 +2039,13 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
if (req->sizes != NULL)
return -EINVAL;
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- user_accounting_size, &ctx);
- if (ret != 0) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for surface.\n");
- goto out_unlock;
- }
-
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
if (unlikely(!user_srf)) {
ret = -ENOMEM;
- goto out_no_user_srf;
+ goto out_unlock;
}
*srf_out = &user_srf->srf;
- user_srf->size = user_accounting_size;
user_srf->prime.base.shareable = false;
user_srf->prime.base.tfile = NULL;
@@ -2179,9 +2096,6 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
return ret;
-out_no_user_srf:
- ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
-
out_unlock:
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c
new file mode 100644
index 000000000000..b0005b03a617
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright 2021 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include "vmwgfx_drv.h"
+
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_resource.h>
+#include <linux/slab.h>
+
+
+static int vmw_sys_man_alloc(struct ttm_resource_manager *man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource **res)
+{
+ *res = kzalloc(sizeof(**res), GFP_KERNEL);
+ if (!*res)
+ return -ENOMEM;
+
+ ttm_resource_init(bo, place, *res);
+ return 0;
+}
+
+static void vmw_sys_man_free(struct ttm_resource_manager *man,
+ struct ttm_resource *res)
+{
+ kfree(res);
+}
+
+static const struct ttm_resource_manager_func vmw_sys_manager_func = {
+ .alloc = vmw_sys_man_alloc,
+ .free = vmw_sys_man_free,
+};
+
+int vmw_sys_man_init(struct vmw_private *dev_priv)
+{
+ struct ttm_device *bdev = &dev_priv->bdev;
+ struct ttm_resource_manager *man =
+ kzalloc(sizeof(*man), GFP_KERNEL);
+
+ if (!man)
+ return -ENOMEM;
+
+ man->use_tt = true;
+ man->func = &vmw_sys_manager_func;
+
+ ttm_resource_manager_init(man, 0);
+ ttm_set_driver_manager(bdev, VMW_PL_SYSTEM, man);
+ ttm_resource_manager_set_used(man, true);
+ return 0;
+}
+
+void vmw_sys_man_fini(struct vmw_private *dev_priv)
+{
+ struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev,
+ VMW_PL_SYSTEM);
+
+ ttm_resource_manager_evict_all(&dev_priv->bdev, man);
+
+ ttm_resource_manager_set_used(man, false);
+ ttm_resource_manager_cleanup(man);
+
+ ttm_set_driver_manager(&dev_priv->bdev, VMW_PL_SYSTEM, NULL);
+ kfree(man);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
deleted file mode 100644
index 2a3d3468e4e0..000000000000
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
+++ /dev/null
@@ -1,184 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/*
- * Huge page-table-entry support for IO memory.
- *
- * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
- */
-#include "vmwgfx_drv.h"
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_range_manager.h>
-
-/**
- * struct vmw_thp_manager - Range manager implementing huge page alignment
- *
- * @manager: TTM resource manager.
- * @mm: The underlying range manager. Protected by @lock.
- * @lock: Manager lock.
- */
-struct vmw_thp_manager {
- struct ttm_resource_manager manager;
- struct drm_mm mm;
- spinlock_t lock;
-};
-
-static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man)
-{
- return container_of(man, struct vmw_thp_manager, manager);
-}
-
-static const struct ttm_resource_manager_func vmw_thp_func;
-
-static int vmw_thp_insert_aligned(struct ttm_buffer_object *bo,
- struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long align_pages,
- const struct ttm_place *place,
- struct ttm_resource *mem,
- unsigned long lpfn,
- enum drm_mm_insert_mode mode)
-{
- if (align_pages >= bo->page_alignment &&
- (!bo->page_alignment || align_pages % bo->page_alignment == 0)) {
- return drm_mm_insert_node_in_range(mm, node,
- mem->num_pages,
- align_pages, 0,
- place->fpfn, lpfn, mode);
- }
-
- return -ENOSPC;
-}
-
-static int vmw_thp_get_node(struct ttm_resource_manager *man,
- struct ttm_buffer_object *bo,
- const struct ttm_place *place,
- struct ttm_resource **res)
-{
- struct vmw_thp_manager *rman = to_thp_manager(man);
- struct drm_mm *mm = &rman->mm;
- struct ttm_range_mgr_node *node;
- unsigned long align_pages;
- unsigned long lpfn;
- enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
- int ret;
-
- node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
-
- ttm_resource_init(bo, place, &node->base);
-
- lpfn = place->lpfn;
- if (!lpfn)
- lpfn = man->size;
-
- mode = DRM_MM_INSERT_BEST;
- if (place->flags & TTM_PL_FLAG_TOPDOWN)
- mode = DRM_MM_INSERT_HIGH;
-
- spin_lock(&rman->lock);
- if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
- align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
- if (node->base.num_pages >= align_pages) {
- ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
- align_pages, place,
- &node->base, lpfn, mode);
- if (!ret)
- goto found_unlock;
- }
- }
-
- align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
- if (node->base.num_pages >= align_pages) {
- ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
- align_pages, place, &node->base,
- lpfn, mode);
- if (!ret)
- goto found_unlock;
- }
-
- ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
- node->base.num_pages,
- bo->page_alignment, 0,
- place->fpfn, lpfn, mode);
-found_unlock:
- spin_unlock(&rman->lock);
-
- if (unlikely(ret)) {
- kfree(node);
- } else {
- node->base.start = node->mm_nodes[0].start;
- *res = &node->base;
- }
-
- return ret;
-}
-
-static void vmw_thp_put_node(struct ttm_resource_manager *man,
- struct ttm_resource *res)
-{
- struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
- struct vmw_thp_manager *rman = to_thp_manager(man);
-
- spin_lock(&rman->lock);
- drm_mm_remove_node(&node->mm_nodes[0]);
- spin_unlock(&rman->lock);
-
- kfree(node);
-}
-
-int vmw_thp_init(struct vmw_private *dev_priv)
-{
- struct vmw_thp_manager *rman;
-
- rman = kzalloc(sizeof(*rman), GFP_KERNEL);
- if (!rman)
- return -ENOMEM;
-
- ttm_resource_manager_init(&rman->manager,
- dev_priv->vram_size >> PAGE_SHIFT);
-
- rman->manager.func = &vmw_thp_func;
- drm_mm_init(&rman->mm, 0, rman->manager.size);
- spin_lock_init(&rman->lock);
-
- ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, &rman->manager);
- ttm_resource_manager_set_used(&rman->manager, true);
- return 0;
-}
-
-void vmw_thp_fini(struct vmw_private *dev_priv)
-{
- struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
- struct vmw_thp_manager *rman = to_thp_manager(man);
- struct drm_mm *mm = &rman->mm;
- int ret;
-
- ttm_resource_manager_set_used(man, false);
-
- ret = ttm_resource_manager_evict_all(&dev_priv->bdev, man);
- if (ret)
- return;
- spin_lock(&rman->lock);
- drm_mm_clean(mm);
- drm_mm_takedown(mm);
- spin_unlock(&rman->lock);
- ttm_resource_manager_cleanup(man);
- ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, NULL);
- kfree(rman);
-}
-
-static void vmw_thp_debug(struct ttm_resource_manager *man,
- struct drm_printer *printer)
-{
- struct vmw_thp_manager *rman = to_thp_manager(man);
-
- spin_lock(&rman->lock);
- drm_mm_print(&rman->mm, printer);
- spin_unlock(&rman->lock);
-}
-
-static const struct ttm_resource_manager_func vmw_thp_func = {
- .alloc = vmw_thp_get_node,
- .free = vmw_thp_put_node,
- .debug = vmw_thp_debug
-};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index e899a936a42a..b84ecc6d6611 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -92,6 +92,13 @@ static const struct ttm_place gmr_vram_placement_flags[] = {
}
};
+static const struct ttm_place vmw_sys_placement_flags = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = VMW_PL_SYSTEM,
+ .flags = 0
+};
+
struct ttm_placement vmw_vram_gmr_placement = {
.num_placement = 2,
.placement = vram_gmr_placement_flags,
@@ -113,28 +120,11 @@ struct ttm_placement vmw_sys_placement = {
.busy_placement = &sys_placement_flags
};
-static const struct ttm_place evictable_placement_flags[] = {
- {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = TTM_PL_SYSTEM,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = TTM_PL_VRAM,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_GMR,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_MOB,
- .flags = 0
- }
+struct ttm_placement vmw_pt_sys_placement = {
+ .num_placement = 1,
+ .placement = &vmw_sys_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &vmw_sys_placement_flags
};
static const struct ttm_place nonfixed_placement_flags[] = {
@@ -156,13 +146,6 @@ static const struct ttm_place nonfixed_placement_flags[] = {
}
};
-struct ttm_placement vmw_evictable_placement = {
- .num_placement = 4,
- .placement = evictable_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_placement_flags
-};
-
struct ttm_placement vmw_srf_placement = {
.num_placement = 1,
.num_busy_placement = 2,
@@ -184,19 +167,6 @@ struct ttm_placement vmw_nonfixed_placement = {
.busy_placement = &sys_placement_flags
};
-struct vmw_ttm_tt {
- struct ttm_tt dma_ttm;
- struct vmw_private *dev_priv;
- int gmr_id;
- struct vmw_mob *mob;
- int mem_type;
- struct sg_table sgt;
- struct vmw_sg_table vsgt;
- uint64_t sg_alloc_size;
- bool mapped;
- bool bound;
-};
-
const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
/**
@@ -317,17 +287,8 @@ static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
{
struct vmw_private *dev_priv = vmw_tt->dev_priv;
- struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
- struct vmw_piter iter;
- dma_addr_t old;
int ret = 0;
- static size_t sgl_size;
- static size_t sgt_size;
if (vmw_tt->mapped)
return 0;
@@ -336,20 +297,12 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
vsgt->pages = vmw_tt->dma_ttm.pages;
vsgt->num_pages = vmw_tt->dma_ttm.num_pages;
vsgt->addrs = vmw_tt->dma_ttm.dma_address;
- vsgt->sgt = &vmw_tt->sgt;
+ vsgt->sgt = NULL;
switch (dev_priv->map_mode) {
case vmw_dma_map_bind:
case vmw_dma_map_populate:
- if (unlikely(!sgl_size)) {
- sgl_size = ttm_round_pot(sizeof(struct scatterlist));
- sgt_size = ttm_round_pot(sizeof(struct sg_table));
- }
- vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
- ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
- if (unlikely(ret != 0))
- return ret;
-
+ vsgt->sgt = &vmw_tt->sgt;
ret = sg_alloc_table_from_pages_segment(
&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
(unsigned long)vsgt->num_pages << PAGE_SHIFT,
@@ -357,15 +310,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
if (ret)
goto out_sg_alloc_fail;
- if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
- uint64_t over_alloc =
- sgl_size * (vsgt->num_pages -
- vmw_tt->sgt.orig_nents);
-
- ttm_mem_global_free(glob, over_alloc);
- vmw_tt->sg_alloc_size -= over_alloc;
- }
-
ret = vmw_ttm_map_for_dma(vmw_tt);
if (unlikely(ret != 0))
goto out_map_fail;
@@ -375,16 +319,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
break;
}
- old = ~((dma_addr_t) 0);
- vmw_tt->vsgt.num_regions = 0;
- for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
- dma_addr_t cur = vmw_piter_dma_addr(&iter);
-
- if (cur != old + PAGE_SIZE)
- vmw_tt->vsgt.num_regions++;
- old = cur;
- }
-
vmw_tt->mapped = true;
return 0;
@@ -392,7 +326,6 @@ out_map_fail:
sg_free_table(vmw_tt->vsgt.sgt);
vmw_tt->vsgt.sgt = NULL;
out_sg_alloc_fail:
- ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
return ret;
}
@@ -418,8 +351,6 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
vmw_ttm_unmap_from_dma(vmw_tt);
sg_free_table(vmw_tt->vsgt.sgt);
vmw_tt->vsgt.sgt = NULL;
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_tt->sg_alloc_size);
break;
default:
break;
@@ -484,6 +415,9 @@ static int vmw_ttm_bind(struct ttm_device *bdev,
&vmw_be->vsgt, ttm->num_pages,
vmw_be->gmr_id);
break;
+ case VMW_PL_SYSTEM:
+ /* Nothing to be done for a system bind */
+ break;
default:
BUG();
}
@@ -507,6 +441,8 @@ static void vmw_ttm_unbind(struct ttm_device *bdev,
case VMW_PL_MOB:
vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
break;
+ case VMW_PL_SYSTEM:
+ break;
default:
BUG();
}
@@ -534,7 +470,6 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
static int vmw_ttm_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
- unsigned int i;
int ret;
/* TODO: maybe completely drop this ? */
@@ -542,22 +477,7 @@ static int vmw_ttm_populate(struct ttm_device *bdev,
return 0;
ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
- if (ret)
- return ret;
-
- for (i = 0; i < ttm->num_pages; ++i) {
- ret = ttm_mem_global_alloc_page(&ttm_mem_glob, ttm->pages[i],
- PAGE_SIZE, ctx);
- if (ret)
- goto error;
- }
- return 0;
-error:
- while (i--)
- ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i],
- PAGE_SIZE);
- ttm_pool_free(&bdev->pool, ttm);
return ret;
}
@@ -566,7 +486,6 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
{
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
dma_ttm);
- unsigned int i;
vmw_ttm_unbind(bdev, ttm);
@@ -577,10 +496,6 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
vmw_ttm_unmap_dma(vmw_tt);
- for (i = 0; i < ttm->num_pages; ++i)
- ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i],
- PAGE_SIZE);
-
ttm_pool_free(&bdev->pool, ttm);
}
@@ -624,6 +539,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
+ case VMW_PL_SYSTEM:
case VMW_PL_GMR:
case VMW_PL_MOB:
return 0;
@@ -670,6 +586,11 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo)
(void) ttm_bo_wait(bo, false, false);
}
+static bool vmw_memtype_is_system(uint32_t mem_type)
+{
+ return mem_type == TTM_PL_SYSTEM || mem_type == VMW_PL_SYSTEM;
+}
+
static int vmw_move(struct ttm_buffer_object *bo,
bool evict,
struct ttm_operation_ctx *ctx,
@@ -680,7 +601,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
int ret;
- if (new_man->use_tt && new_mem->mem_type != TTM_PL_SYSTEM) {
+ if (new_man->use_tt && !vmw_memtype_is_system(new_mem->mem_type)) {
ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem);
if (ret)
return ret;
@@ -689,7 +610,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
vmw_move_notify(bo, bo->resource, new_mem);
if (old_man->use_tt && new_man->use_tt) {
- if (bo->resource->mem_type == TTM_PL_SYSTEM) {
+ if (vmw_memtype_is_system(bo->resource->mem_type)) {
ttm_bo_move_null(bo, new_mem);
return 0;
}
@@ -736,7 +657,7 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
int ret;
ret = vmw_bo_create_kernel(dev_priv, bo_size,
- &vmw_sys_placement,
+ &vmw_pt_sys_placement,
&bo);
if (unlikely(ret != 0))
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 0a4c340252ec..265f7c48d856 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -27,30 +27,44 @@
#include "vmwgfx_drv.h"
-static struct ttm_buffer_object *vmw_bo_vm_lookup(struct ttm_device *bdev,
- unsigned long offset,
- unsigned long pages)
+static int vmw_bo_vm_lookup(struct ttm_device *bdev,
+ struct drm_file *filp,
+ unsigned long offset,
+ unsigned long pages,
+ struct ttm_buffer_object **p_bo)
{
struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
struct drm_device *drm = &dev_priv->drm;
struct drm_vma_offset_node *node;
- struct ttm_buffer_object *bo = NULL;
+ int ret;
+
+ *p_bo = NULL;
drm_vma_offset_lock_lookup(bdev->vma_manager);
node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages);
if (likely(node)) {
- bo = container_of(node, struct ttm_buffer_object,
+ *p_bo = container_of(node, struct ttm_buffer_object,
base.vma_node);
- bo = ttm_bo_get_unless_zero(bo);
+ *p_bo = ttm_bo_get_unless_zero(*p_bo);
}
drm_vma_offset_unlock_lookup(bdev->vma_manager);
- if (!bo)
+ if (!*p_bo) {
drm_err(drm, "Could not find buffer object to map\n");
+ return -EINVAL;
+ }
+
+ if (!drm_vma_node_is_allowed(node, filp)) {
+ ret = -EACCES;
+ goto out_no_access;
+ }
- return bo;
+ return 0;
+out_no_access:
+ ttm_bo_put(*p_bo);
+ return ret;
}
int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -64,7 +78,6 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
};
struct drm_file *file_priv = filp->private_data;
struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct ttm_device *bdev = &dev_priv->bdev;
struct ttm_buffer_object *bo;
int ret;
@@ -72,13 +85,9 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
return -EINVAL;
- bo = vmw_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
- if (unlikely(!bo))
- return -EINVAL;
-
- ret = vmw_user_bo_verify_access(bo, tfile);
+ ret = vmw_bo_vm_lookup(bdev, file_priv, vma->vm_pgoff, vma_pages(vma), &bo);
if (unlikely(ret != 0))
- goto out_unref;
+ return ret;
ret = ttm_bo_mmap_obj(vma, bo);
if (unlikely(ret != 0))
@@ -99,38 +108,3 @@ out_unref:
return ret;
}
-/* struct vmw_validation_mem callback */
-static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size)
-{
- static struct ttm_operation_ctx ctx = {.interruptible = false,
- .no_wait_gpu = false};
- struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
-
- return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx);
-}
-
-/* struct vmw_validation_mem callback */
-static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size)
-{
- struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
-
- return ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
-}
-
-/**
- * vmw_validation_mem_init_ttm - Interface the validation memory tracker
- * to ttm.
- * @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private
- * rather than a struct vmw_validation_mem is to make sure assumption in the
- * callbacks that struct vmw_private derives from struct vmw_validation_mem
- * holds true.
- * @gran: The recommended allocation granularity
- */
-void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran)
-{
- struct vmw_validation_mem *vvm = &dev_priv->vvm;
-
- vvm->reserve_mem = vmw_vmt_reserve;
- vvm->unreserve_mem = vmw_vmt_unreserve;
- vvm->gran = gran;
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
index ebc1d83c34b4..6ad744ae07f5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
@@ -117,7 +117,7 @@ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- arg->stream_id, TTM_REF_USAGE);
+ arg->stream_id);
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index b09094b50c5d..f46891012be3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -29,6 +29,9 @@
#include "vmwgfx_validation.h"
#include "vmwgfx_drv.h"
+
+#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
+
/**
* struct vmw_validation_bo_node - Buffer object validation metadata.
* @base: Metadata used for TTM reservation- and validation.
@@ -43,7 +46,7 @@
*/
struct vmw_validation_bo_node {
struct ttm_validate_buffer base;
- struct drm_hash_item hash;
+ struct vmwgfx_hash_item hash;
unsigned int coherent_count;
u32 as_mob : 1;
u32 cpu_blit : 1;
@@ -72,7 +75,7 @@ struct vmw_validation_bo_node {
*/
struct vmw_validation_res_node {
struct list_head head;
- struct drm_hash_item hash;
+ struct vmwgfx_hash_item hash;
struct vmw_resource *res;
struct vmw_buffer_object *new_backup;
unsigned long new_backup_offset;
@@ -113,13 +116,8 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
struct page *page;
if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
- int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
-
- if (ret)
- return NULL;
-
- ctx->vm_size_left += ctx->vm->gran;
- ctx->total_mem += ctx->vm->gran;
+ ctx->vm_size_left += VMWGFX_VALIDATION_MEM_GRAN;
+ ctx->total_mem += VMWGFX_VALIDATION_MEM_GRAN;
}
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
@@ -159,7 +157,6 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
ctx->mem_size_left = 0;
if (ctx->vm && ctx->total_mem) {
- ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
ctx->total_mem = 0;
ctx->vm_size_left = 0;
}
@@ -184,9 +181,9 @@ vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
return NULL;
if (ctx->ht) {
- struct drm_hash_item *hash;
+ struct vmwgfx_hash_item *hash;
- if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
+ if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
bo_node = container_of(hash, typeof(*bo_node), hash);
} else {
struct vmw_validation_bo_node *entry;
@@ -221,9 +218,9 @@ vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
return NULL;
if (ctx->ht) {
- struct drm_hash_item *hash;
+ struct vmwgfx_hash_item *hash;
- if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash))
+ if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) res, &hash))
res_node = container_of(hash, typeof(*res_node), hash);
} else {
struct vmw_validation_res_node *entry;
@@ -280,7 +277,7 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
if (ctx->ht) {
bo_node->hash.key = (unsigned long) vbo;
- ret = drm_ht_insert_item(ctx->ht, &bo_node->hash);
+ ret = vmwgfx_ht_insert_item(ctx->ht, &bo_node->hash);
if (ret) {
DRM_ERROR("Failed to initialize a buffer "
"validation entry.\n");
@@ -335,7 +332,7 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx,
if (ctx->ht) {
node->hash.key = (unsigned long) res;
- ret = drm_ht_insert_item(ctx->ht, &node->hash);
+ ret = vmwgfx_ht_insert_item(ctx->ht, &node->hash);
if (ret) {
DRM_ERROR("Failed to initialize a resource validation "
"entry.\n");
@@ -688,13 +685,13 @@ void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
return;
list_for_each_entry(entry, &ctx->bo_list, base.head)
- (void) drm_ht_remove_item(ctx->ht, &entry->hash);
+ (void) vmwgfx_ht_remove_item(ctx->ht, &entry->hash);
list_for_each_entry(val, &ctx->resource_list, head)
- (void) drm_ht_remove_item(ctx->ht, &val->hash);
+ (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash);
list_for_each_entry(val, &ctx->resource_ctx_list, head)
- (void) drm_ht_remove_item(ctx->ht, &val->hash);
+ (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash);
ctx->ht = NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index 739906d1b3eb..f21df053882b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -31,29 +31,15 @@
#include <linux/list.h>
#include <linux/ww_mutex.h>
-#include <drm/drm_hashtab.h>
#include <drm/ttm/ttm_execbuf_util.h>
+#include "vmwgfx_hashtab.h"
+
#define VMW_RES_DIRTY_NONE 0
#define VMW_RES_DIRTY_SET BIT(0)
#define VMW_RES_DIRTY_CLEAR BIT(1)
/**
- * struct vmw_validation_mem - Custom interface to provide memory reservations
- * for the validation code.
- * @reserve_mem: Callback to reserve memory
- * @unreserve_mem: Callback to unreserve memory
- * @gran: Reservation granularity. Contains a hint how much memory should
- * be reserved in each call to @reserve_mem(). A slow implementation may want
- * reservation to be done in large batches.
- */
-struct vmw_validation_mem {
- int (*reserve_mem)(struct vmw_validation_mem *m, size_t size);
- void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size);
- size_t gran;
-};
-
-/**
* struct vmw_validation_context - Per command submission validation context
* @ht: Hash table used to find resource- or buffer object duplicates
* @resource_list: List head for resource validation metadata
@@ -73,7 +59,7 @@ struct vmw_validation_mem {
* @total_mem: Amount of reserved memory.
*/
struct vmw_validation_context {
- struct drm_open_hash *ht;
+ struct vmwgfx_open_hash *ht;
struct list_head resource_list;
struct list_head resource_ctx_list;
struct list_head bo_list;
@@ -129,21 +115,6 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx)
}
/**
- * vmw_validation_set_val_mem - Register a validation mem object for
- * validation memory reservation
- * @ctx: The validation context
- * @vm: Pointer to a struct vmw_validation_mem
- *
- * Must be set before the first attempt to allocate validation memory.
- */
-static inline void
-vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
- struct vmw_validation_mem *vm)
-{
- ctx->vm = vm;
-}
-
-/**
* vmw_validation_set_ht - Register a hash table for duplicate finding
* @ctx: The validation context
* @ht: Pointer to a hash table to use for duplicate finding
@@ -151,7 +122,7 @@ vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
* available at validation context declaration time
*/
static inline void vmw_validation_set_ht(struct vmw_validation_context *ctx,
- struct drm_open_hash *ht)
+ struct vmwgfx_open_hash *ht)
{
ctx->ht = ht;
}
@@ -190,22 +161,6 @@ vmw_validation_bo_fence(struct vmw_validation_context *ctx,
}
/**
- * vmw_validation_context_init - Initialize a validation context
- * @ctx: Pointer to the validation context to initialize
- *
- * This function initializes a validation context with @merge_dups set
- * to false
- */
-static inline void
-vmw_validation_context_init(struct vmw_validation_context *ctx)
-{
- memset(ctx, 0, sizeof(*ctx));
- INIT_LIST_HEAD(&ctx->resource_list);
- INIT_LIST_HEAD(&ctx->resource_ctx_list);
- INIT_LIST_HEAD(&ctx->bo_list);
-}
-
-/**
* vmw_validation_align - Align a validation memory allocation
* @val: The size to be aligned
*
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index bc7605324db3..e63088c2121d 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -469,19 +469,7 @@ static void xen_drm_drv_release(struct drm_device *dev)
kfree(drm_info);
}
-static const struct file_operations xen_drm_dev_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = drm_compat_ioctl,
-#endif
- .poll = drm_poll,
- .read = drm_read,
- .llseek = no_llseek,
- .mmap = xen_drm_front_gem_mmap,
-};
+DEFINE_DRM_GEM_FOPS(xen_drm_dev_fops);
static const struct drm_driver xen_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
@@ -489,7 +477,7 @@ static const struct drm_driver xen_drm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
- .gem_prime_mmap = xen_drm_front_gem_prime_mmap,
+ .gem_prime_mmap = drm_gem_prime_mmap,
.dumb_create = xen_drm_drv_dumb_create,
.fops = &xen_drm_dev_fops,
.name = "xendrm-du",
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index b293c67230ef..dd358ba2bf8e 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -57,6 +57,47 @@ static void gem_free_pages_array(struct xen_gem_object *xen_obj)
xen_obj->pages = NULL;
}
+static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj,
+ struct vm_area_struct *vma)
+{
+ struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
+ int ret;
+
+ vma->vm_ops = gem_obj->funcs->vm_ops;
+
+ /*
+ * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
+ * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
+ * the whole buffer.
+ */
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_pgoff = 0;
+
+ /*
+ * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
+ * all memory which is shared with other entities in the system
+ * (including the hypervisor and other guests) must reside in memory
+ * which is mapped as Normal Inner Write-Back Outer Write-Back
+ * Inner-Shareable.
+ */
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+
+ /*
+ * vm_operations_struct.fault handler will be called if CPU access
+ * to VM is here. For GPUs this isn't the case, because CPU doesn't
+ * touch the memory. Insert pages now, so both CPU and GPU are happy.
+ *
+ * FIXME: as we insert all the pages now then no .fault handler must
+ * be called, so don't provide one
+ */
+ ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
+ if (ret < 0)
+ DRM_ERROR("Failed to map pages into vma: %d\n", ret);
+
+ return ret;
+}
+
static const struct vm_operations_struct xen_drm_drv_vm_ops = {
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
@@ -67,6 +108,7 @@ static const struct drm_gem_object_funcs xen_drm_front_gem_object_funcs = {
.get_sg_table = xen_drm_front_gem_get_sg_table,
.vmap = xen_drm_front_gem_prime_vmap,
.vunmap = xen_drm_front_gem_prime_vunmap,
+ .mmap = xen_drm_front_gem_object_mmap,
.vm_ops = &xen_drm_drv_vm_ops,
};
@@ -238,58 +280,6 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
return &xen_obj->base;
}
-static int gem_mmap_obj(struct xen_gem_object *xen_obj,
- struct vm_area_struct *vma)
-{
- int ret;
-
- /*
- * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
- * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
- * the whole buffer.
- */
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_pgoff = 0;
- /*
- * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
- * all memory which is shared with other entities in the system
- * (including the hypervisor and other guests) must reside in memory
- * which is mapped as Normal Inner Write-Back Outer Write-Back
- * Inner-Shareable.
- */
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-
- /*
- * vm_operations_struct.fault handler will be called if CPU access
- * to VM is here. For GPUs this isn't the case, because CPU
- * doesn't touch the memory. Insert pages now, so both CPU and GPU are
- * happy.
- * FIXME: as we insert all the pages now then no .fault handler must
- * be called, so don't provide one
- */
- ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
- if (ret < 0)
- DRM_ERROR("Failed to map pages into vma: %d\n", ret);
-
- return ret;
-}
-
-int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct xen_gem_object *xen_obj;
- struct drm_gem_object *gem_obj;
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret < 0)
- return ret;
-
- gem_obj = vma->vm_private_data;
- xen_obj = to_xen_gem_obj(gem_obj);
- return gem_mmap_obj(xen_obj, vma);
-}
-
int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj, struct dma_buf_map *map)
{
struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
@@ -313,17 +303,3 @@ void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
{
vunmap(map->vaddr);
}
-
-int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj,
- struct vm_area_struct *vma)
-{
- struct xen_gem_object *xen_obj;
- int ret;
-
- ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
- if (ret < 0)
- return ret;
-
- xen_obj = to_xen_gem_obj(gem_obj);
- return gem_mmap_obj(xen_obj, vma);
-}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.h b/drivers/gpu/drm/xen/xen_drm_front_gem.h
index a4e67d0a149c..eaea470f7001 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.h
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.h
@@ -15,9 +15,7 @@ struct dma_buf_attachment;
struct dma_buf_map;
struct drm_device;
struct drm_gem_object;
-struct file;
struct sg_table;
-struct vm_area_struct;
struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
size_t size);
@@ -33,15 +31,10 @@ struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *obj);
void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj);
-int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-
int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj,
struct dma_buf_map *map);
void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
struct dma_buf_map *map);
-int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj,
- struct vm_area_struct *vma);
-
#endif /* __XEN_DRM_FRONT_GEM_H */
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
index c3d08269faa9..d8d38d86d5c6 100644
--- a/drivers/gpu/drm/xlnx/Kconfig
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -7,7 +7,6 @@ config DRM_ZYNQMP_DPSUB
depends on XILINX_ZYNQMP_DPDMA
select DMA_ENGINE
select DRM_GEM_CMA_HELPER
- select DRM_KMS_CMA_HELPER
select DRM_KMS_HELPER
select GENERIC_PHY
help
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
index ff2b308d8651..11c409cbc88e 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -24,6 +24,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma/xilinx_dpdma.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
@@ -1058,14 +1059,18 @@ static void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer,
zynqmp_disp_avbuf_set_format(layer->disp, layer, layer->disp_fmt);
/*
- * Set slave_id for each DMA channel to indicate they're part of a
+ * Set pconfig for each DMA channel to indicate they're part of a
* video group.
*/
for (i = 0; i < info->num_planes; i++) {
struct zynqmp_disp_layer_dma *dma = &layer->dmas[i];
+ struct xilinx_dpdma_peripheral_config pconfig = {
+ .video_group = true,
+ };
struct dma_slave_config config = {
.direction = DMA_MEM_TO_DEV,
- .slave_id = 1,
+ .peripheral_config = &pconfig,
+ .peripheral_size = sizeof(pconfig),
};
dmaengine_slave_config(dma->chan, &config);
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
index 6dab94adf25e..6815b4db17c1 100644
--- a/drivers/gpu/host1x/Kconfig
+++ b/drivers/gpu/host1x/Kconfig
@@ -2,6 +2,7 @@
config TEGRA_HOST1X
tristate "NVIDIA Tegra host1x driver"
depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
+ select DMA_SHARED_BUFFER
select IOMMU_IOVA
help
Driver for the NVIDIA Tegra host1x hardware.
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 218e3718fd68..bdee16a0bb8e 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -5,6 +5,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
#include <linux/host1x.h>
#include <linux/of.h>
#include <linux/seq_file.h>
@@ -742,6 +743,7 @@ EXPORT_SYMBOL(host1x_driver_unregister);
*/
void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key)
{
+ host1x_bo_cache_init(&client->cache);
INIT_LIST_HEAD(&client->list);
__mutex_init(&client->lock, "host1x client lock", key);
client->usecount = 0;
@@ -761,7 +763,6 @@ EXPORT_SYMBOL(host1x_client_exit);
/**
* __host1x_client_register() - register a host1x client
* @client: host1x client
- * @key: lock class key for the client-specific mutex
*
* Registers a host1x client with each host1x controller instance. Note that
* each client will only match their parent host1x controller and will only be
@@ -830,6 +831,8 @@ int host1x_client_unregister(struct host1x_client *client)
mutex_unlock(&clients_lock);
+ host1x_bo_cache_destroy(&client->cache);
+
return 0;
}
EXPORT_SYMBOL(host1x_client_unregister);
@@ -904,3 +907,78 @@ unlock:
return err;
}
EXPORT_SYMBOL(host1x_client_resume);
+
+struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
+ enum dma_data_direction dir,
+ struct host1x_bo_cache *cache)
+{
+ struct host1x_bo_mapping *mapping;
+
+ if (cache) {
+ mutex_lock(&cache->lock);
+
+ list_for_each_entry(mapping, &cache->mappings, entry) {
+ if (mapping->bo == bo && mapping->direction == dir) {
+ kref_get(&mapping->ref);
+ goto unlock;
+ }
+ }
+ }
+
+ mapping = bo->ops->pin(dev, bo, dir);
+ if (IS_ERR(mapping))
+ goto unlock;
+
+ spin_lock(&mapping->bo->lock);
+ list_add_tail(&mapping->list, &bo->mappings);
+ spin_unlock(&mapping->bo->lock);
+
+ if (cache) {
+ INIT_LIST_HEAD(&mapping->entry);
+ mapping->cache = cache;
+
+ list_add_tail(&mapping->entry, &cache->mappings);
+
+ /* bump reference count to track the copy in the cache */
+ kref_get(&mapping->ref);
+ }
+
+unlock:
+ if (cache)
+ mutex_unlock(&cache->lock);
+
+ return mapping;
+}
+EXPORT_SYMBOL(host1x_bo_pin);
+
+static void __host1x_bo_unpin(struct kref *ref)
+{
+ struct host1x_bo_mapping *mapping = to_host1x_bo_mapping(ref);
+
+ /*
+ * When the last reference of the mapping goes away, make sure to remove the mapping from
+ * the cache.
+ */
+ if (mapping->cache)
+ list_del(&mapping->entry);
+
+ spin_lock(&mapping->bo->lock);
+ list_del(&mapping->list);
+ spin_unlock(&mapping->bo->lock);
+
+ mapping->bo->ops->unpin(mapping);
+}
+
+void host1x_bo_unpin(struct host1x_bo_mapping *mapping)
+{
+ struct host1x_bo_cache *cache = mapping->cache;
+
+ if (cache)
+ mutex_lock(&cache->lock);
+
+ kref_put(&mapping->ref, __host1x_bo_unpin);
+
+ if (cache)
+ mutex_unlock(&cache->lock);
+}
+EXPORT_SYMBOL(host1x_bo_unpin);
diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c
index 4cd212bb570d..2a9a3a8d5931 100644
--- a/drivers/gpu/host1x/channel.c
+++ b/drivers/gpu/host1x/channel.c
@@ -75,6 +75,14 @@ struct host1x_channel *host1x_channel_get_index(struct host1x *host,
return ch;
}
+void host1x_channel_stop(struct host1x_channel *channel)
+{
+ struct host1x *host = dev_get_drvdata(channel->dev->parent);
+
+ host1x_hw_cdma_stop(host, &channel->cdma);
+}
+EXPORT_SYMBOL(host1x_channel_stop);
+
static void release_channel(struct kref *kref)
{
struct host1x_channel *channel =
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
index 8a14880c61bb..18d9c8d206e3 100644
--- a/drivers/gpu/host1x/debug.c
+++ b/drivers/gpu/host1x/debug.c
@@ -7,6 +7,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
@@ -52,6 +53,11 @@ static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo)
{
struct host1x *m = dev_get_drvdata(ch->dev->parent);
struct output *o = data;
+ int err;
+
+ err = pm_runtime_resume_and_get(m->dev);
+ if (err < 0)
+ return err;
mutex_lock(&ch->cdma.lock);
mutex_lock(&debug_lock);
@@ -64,6 +70,8 @@ static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo)
mutex_unlock(&debug_lock);
mutex_unlock(&ch->cdma.lock);
+ pm_runtime_put(m->dev);
+
return 0;
}
@@ -71,9 +79,14 @@ static void show_syncpts(struct host1x *m, struct output *o)
{
struct list_head *pos;
unsigned int i;
+ int err;
host1x_debug_output(o, "---- syncpts ----\n");
+ err = pm_runtime_resume_and_get(m->dev);
+ if (err < 0)
+ return;
+
for (i = 0; i < host1x_syncpt_nb_pts(m); i++) {
u32 max = host1x_syncpt_read_max(m->syncpt + i);
u32 min = host1x_syncpt_load(m->syncpt + i);
@@ -101,6 +114,8 @@ static void show_syncpts(struct host1x *m, struct output *o)
base_val);
}
+ pm_runtime_put(m->dev);
+
host1x_debug_output(o, "\n");
}
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index fbb6447b8659..6994f8c0e02e 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -6,18 +6,26 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <soc/tegra/common.h>
+
#define CREATE_TRACE_POINTS
#include <trace/events/host1x.h>
#undef CREATE_TRACE_POINTS
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#endif
+
#include "bus.h"
#include "channel.h"
#include "debug.h"
@@ -132,6 +140,12 @@ static const struct host1x_sid_entry tegra186_sid_table[] = {
.offset = 0x30,
.limit = 0x34
},
+ {
+ /* NVDEC */
+ .base = 0x1b00,
+ .offset = 0x30,
+ .limit = 0x34
+ },
};
static const struct host1x_info host1x06_info = {
@@ -156,6 +170,18 @@ static const struct host1x_sid_entry tegra194_sid_table[] = {
.offset = 0x30,
.limit = 0x34
},
+ {
+ /* NVDEC */
+ .base = 0x1b00,
+ .offset = 0x30,
+ .limit = 0x34
+ },
+ {
+ /* NVDEC1 */
+ .base = 0x1bc0,
+ .offset = 0x30,
+ .limit = 0x34
+ },
};
static const struct host1x_info host1x07_info = {
@@ -190,6 +216,9 @@ static void host1x_setup_sid_table(struct host1x *host)
const struct host1x_info *info = host->info;
unsigned int i;
+ if (!info->has_hypervisor)
+ return;
+
for (i = 0; i < info->num_sid_entries; i++) {
const struct host1x_sid_entry *entry = &info->sid_table[i];
@@ -238,6 +267,17 @@ static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
int err;
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+ if (host->dev->archdata.mapping) {
+ struct dma_iommu_mapping *mapping =
+ to_dma_iommu_mapping(host->dev);
+ arm_iommu_detach_device(host->dev);
+ arm_iommu_release_mapping(mapping);
+
+ domain = iommu_get_domain_for_dev(host->dev);
+ }
+#endif
+
/*
* We may not always want to enable IOMMU support (for example if the
* host1x firewall is already enabled and we don't support addressing
@@ -347,6 +387,27 @@ static void host1x_iommu_exit(struct host1x *host)
}
}
+static int host1x_get_resets(struct host1x *host)
+{
+ int err;
+
+ host->resets[0].id = "mc";
+ host->resets[1].id = "host1x";
+ host->nresets = ARRAY_SIZE(host->resets);
+
+ err = devm_reset_control_bulk_get_optional_exclusive_released(
+ host->dev, host->nresets, host->resets);
+ if (err) {
+ dev_err(host->dev, "failed to get reset: %d\n", err);
+ return err;
+ }
+
+ if (WARN_ON(!host->resets[1].rstc))
+ return -ENOENT;
+
+ return 0;
+}
+
static int host1x_probe(struct platform_device *pdev)
{
struct host1x *host;
@@ -386,6 +447,7 @@ static int host1x_probe(struct platform_device *pdev)
if (syncpt_irq < 0)
return syncpt_irq;
+ host1x_bo_cache_init(&host->cache);
mutex_init(&host->devices_lock);
INIT_LIST_HEAD(&host->devices);
INIT_LIST_HEAD(&host->list);
@@ -423,12 +485,9 @@ static int host1x_probe(struct platform_device *pdev)
return err;
}
- host->rst = devm_reset_control_get(&pdev->dev, "host1x");
- if (IS_ERR(host->rst)) {
- err = PTR_ERR(host->rst);
- dev_err(&pdev->dev, "failed to get reset: %d\n", err);
+ err = host1x_get_resets(host);
+ if (err)
return err;
- }
err = host1x_iommu_init(host);
if (err < 0) {
@@ -443,22 +502,10 @@ static int host1x_probe(struct platform_device *pdev)
goto iommu_exit;
}
- err = clk_prepare_enable(host->clk);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable clock\n");
- goto free_channels;
- }
-
- err = reset_control_deassert(host->rst);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to deassert reset: %d\n", err);
- goto unprepare_disable;
- }
-
err = host1x_syncpt_init(host);
if (err) {
dev_err(&pdev->dev, "failed to initialize syncpts\n");
- goto reset_assert;
+ goto free_channels;
}
err = host1x_intr_init(host, syncpt_irq);
@@ -467,10 +514,18 @@ static int host1x_probe(struct platform_device *pdev)
goto deinit_syncpt;
}
- host1x_debug_init(host);
+ pm_runtime_enable(&pdev->dev);
+
+ err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (err)
+ goto pm_disable;
- if (host->info->has_hypervisor)
- host1x_setup_sid_table(host);
+ /* the driver's code isn't ready yet for the dynamic RPM */
+ err = pm_runtime_resume_and_get(&pdev->dev);
+ if (err)
+ goto pm_disable;
+
+ host1x_debug_init(host);
err = host1x_register(host);
if (err < 0)
@@ -486,13 +541,14 @@ unregister:
host1x_unregister(host);
deinit_debugfs:
host1x_debug_deinit(host);
+
+ pm_runtime_put_sync_suspend(&pdev->dev);
+pm_disable:
+ pm_runtime_disable(&pdev->dev);
+
host1x_intr_deinit(host);
deinit_syncpt:
host1x_syncpt_deinit(host);
-reset_assert:
- reset_control_assert(host->rst);
-unprepare_disable:
- clk_disable_unprepare(host->clk);
free_channels:
host1x_channel_list_free(&host->channel_list);
iommu_exit:
@@ -507,19 +563,94 @@ static int host1x_remove(struct platform_device *pdev)
host1x_unregister(host);
host1x_debug_deinit(host);
+
+ pm_runtime_force_suspend(&pdev->dev);
+
host1x_intr_deinit(host);
host1x_syncpt_deinit(host);
- reset_control_assert(host->rst);
- clk_disable_unprepare(host->clk);
host1x_iommu_exit(host);
+ host1x_bo_cache_destroy(&host->cache);
return 0;
}
+static int __maybe_unused host1x_runtime_suspend(struct device *dev)
+{
+ struct host1x *host = dev_get_drvdata(dev);
+ int err;
+
+ host1x_intr_stop(host);
+ host1x_syncpt_save(host);
+
+ err = reset_control_bulk_assert(host->nresets, host->resets);
+ if (err) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ goto resume_host1x;
+ }
+
+ usleep_range(1000, 2000);
+
+ clk_disable_unprepare(host->clk);
+ reset_control_bulk_release(host->nresets, host->resets);
+
+ return 0;
+
+resume_host1x:
+ host1x_setup_sid_table(host);
+ host1x_syncpt_restore(host);
+ host1x_intr_start(host);
+
+ return err;
+}
+
+static int __maybe_unused host1x_runtime_resume(struct device *dev)
+{
+ struct host1x *host = dev_get_drvdata(dev);
+ int err;
+
+ err = reset_control_bulk_acquire(host->nresets, host->resets);
+ if (err) {
+ dev_err(dev, "failed to acquire reset: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(host->clk);
+ if (err) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ goto release_reset;
+ }
+
+ err = reset_control_bulk_deassert(host->nresets, host->resets);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ goto disable_clk;
+ }
+
+ host1x_setup_sid_table(host);
+ host1x_syncpt_restore(host);
+ host1x_intr_start(host);
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(host->clk);
+release_reset:
+ reset_control_bulk_release(host->nresets, host->resets);
+
+ return err;
+}
+
+static const struct dev_pm_ops host1x_pm_ops = {
+ SET_RUNTIME_PM_OPS(host1x_runtime_suspend, host1x_runtime_resume,
+ NULL)
+ /* TODO: add system suspend-resume once driver will be ready for that */
+};
+
static struct platform_driver tegra_host1x_driver = {
.driver = {
.name = "tegra-host1x",
.of_match_table = host1x_of_match,
+ .pm = &host1x_pm_ops,
},
.probe = host1x_probe,
.remove = host1x_remove,
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index fa6d4bc46e98..ca4b082f0cd4 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -118,7 +118,8 @@ struct host1x {
struct host1x_syncpt_base *bases;
struct device *dev;
struct clk *clk;
- struct reset_control *rst;
+ struct reset_control_bulk_data resets[2];
+ unsigned int nresets;
struct iommu_group *group;
struct iommu_domain *domain;
@@ -149,6 +150,8 @@ struct host1x {
struct list_head list;
struct device_dma_parameters dma_parms;
+
+ struct host1x_bo_cache cache;
};
void host1x_hypervisor_writel(struct host1x *host1x, u32 r, u32 v);
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index 1999780a7203..6b40e9af1e88 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -159,6 +159,27 @@ static void host1x_channel_set_streamid(struct host1x_channel *channel)
#endif
}
+static void host1x_enable_gather_filter(struct host1x_channel *ch)
+{
+#if HOST1X_HW >= 6
+ struct host1x *host = dev_get_drvdata(ch->dev->parent);
+ u32 val;
+
+ if (!host->hv_regs)
+ return;
+
+ val = host1x_hypervisor_readl(
+ host, HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(ch->id / 32));
+ val |= BIT(ch->id % 32);
+ host1x_hypervisor_writel(
+ host, val, HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(ch->id / 32));
+#elif HOST1X_HW >= 4
+ host1x_ch_writel(ch,
+ HOST1X_CHANNEL_CHANNELCTRL_KERNEL_FILTER_GBUFFER(1),
+ HOST1X_CHANNEL_CHANNELCTRL);
+#endif
+}
+
static int channel_submit(struct host1x_job *job)
{
struct host1x_channel *ch = job->channel;
@@ -190,6 +211,7 @@ static int channel_submit(struct host1x_job *job)
}
host1x_channel_set_streamid(ch);
+ host1x_enable_gather_filter(ch);
/* begin a CDMA submit */
err = host1x_cdma_begin(&ch->cdma, job);
@@ -249,27 +271,6 @@ error:
return err;
}
-static void enable_gather_filter(struct host1x *host,
- struct host1x_channel *ch)
-{
-#if HOST1X_HW >= 6
- u32 val;
-
- if (!host->hv_regs)
- return;
-
- val = host1x_hypervisor_readl(
- host, HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(ch->id / 32));
- val |= BIT(ch->id % 32);
- host1x_hypervisor_writel(
- host, val, HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(ch->id / 32));
-#elif HOST1X_HW >= 4
- host1x_ch_writel(ch,
- HOST1X_CHANNEL_CHANNELCTRL_KERNEL_FILTER_GBUFFER(1),
- HOST1X_CHANNEL_CHANNELCTRL);
-#endif
-}
-
static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev,
unsigned int index)
{
@@ -278,7 +279,6 @@ static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev,
#else
ch->regs = dev->regs + index * 0x100;
#endif
- enable_gather_filter(dev, ch);
return 0;
}
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
index 45b6be927ec4..965ba21818b1 100644
--- a/drivers/gpu/host1x/intr.c
+++ b/drivers/gpu/host1x/intr.c
@@ -297,14 +297,11 @@ int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
"host1x_sp_%02u", id);
}
- host1x_intr_start(host);
-
return 0;
}
void host1x_intr_deinit(struct host1x *host)
{
- host1x_intr_stop(host);
}
void host1x_intr_start(struct host1x *host)
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 0eef6df7c89e..5e8c183167b7 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -134,20 +134,20 @@ EXPORT_SYMBOL(host1x_job_add_wait);
static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
{
+ unsigned long mask = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
struct host1x_client *client = job->client;
struct device *dev = client->dev;
struct host1x_job_gather *g;
- struct iommu_domain *domain;
- struct sg_table *sgt;
unsigned int i;
int err;
- domain = iommu_get_domain_for_dev(dev);
job->num_unpins = 0;
for (i = 0; i < job->num_relocs; i++) {
struct host1x_reloc *reloc = &job->relocs[i];
- dma_addr_t phys_addr, *phys;
+ enum dma_data_direction direction;
+ struct host1x_bo_mapping *map;
+ struct host1x_bo *bo;
reloc->target.bo = host1x_bo_get(reloc->target.bo);
if (!reloc->target.bo) {
@@ -155,64 +155,44 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
- /*
- * If the client device is not attached to an IOMMU, the
- * physical address of the buffer object can be used.
- *
- * Similarly, when an IOMMU domain is shared between all
- * host1x clients, the IOVA is already available, so no
- * need to map the buffer object again.
- *
- * XXX Note that this isn't always safe to do because it
- * relies on an assumption that no cache maintenance is
- * needed on the buffer objects.
- */
- if (!domain || client->group)
- phys = &phys_addr;
- else
- phys = NULL;
-
- sgt = host1x_bo_pin(dev, reloc->target.bo, phys);
- if (IS_ERR(sgt)) {
- err = PTR_ERR(sgt);
- goto unpin;
- }
+ bo = reloc->target.bo;
- if (sgt) {
- unsigned long mask = HOST1X_RELOC_READ |
- HOST1X_RELOC_WRITE;
- enum dma_data_direction dir;
-
- switch (reloc->flags & mask) {
- case HOST1X_RELOC_READ:
- dir = DMA_TO_DEVICE;
- break;
+ switch (reloc->flags & mask) {
+ case HOST1X_RELOC_READ:
+ direction = DMA_TO_DEVICE;
+ break;
- case HOST1X_RELOC_WRITE:
- dir = DMA_FROM_DEVICE;
- break;
+ case HOST1X_RELOC_WRITE:
+ direction = DMA_FROM_DEVICE;
+ break;
- case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
- dir = DMA_BIDIRECTIONAL;
- break;
+ case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
+ direction = DMA_BIDIRECTIONAL;
+ break;
- default:
- err = -EINVAL;
- goto unpin;
- }
+ default:
+ err = -EINVAL;
+ goto unpin;
+ }
- err = dma_map_sgtable(dev, sgt, dir, 0);
- if (err)
- goto unpin;
+ map = host1x_bo_pin(dev, bo, direction, &client->cache);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto unpin;
+ }
- job->unpins[job->num_unpins].dev = dev;
- job->unpins[job->num_unpins].dir = dir;
- phys_addr = sg_dma_address(sgt->sgl);
+ /*
+ * host1x clients are generally not able to do scatter-gather themselves, so fail
+ * if the buffer is discontiguous and we fail to map its SG table to a single
+ * contiguous chunk of I/O virtual memory.
+ */
+ if (map->chunks > 1) {
+ err = -EINVAL;
+ goto unpin;
}
- job->addr_phys[job->num_unpins] = phys_addr;
- job->unpins[job->num_unpins].bo = reloc->target.bo;
- job->unpins[job->num_unpins].sgt = sgt;
+ job->addr_phys[job->num_unpins] = map->phys;
+ job->unpins[job->num_unpins].map = map;
job->num_unpins++;
}
@@ -224,12 +204,11 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
return 0;
for (i = 0; i < job->num_cmds; i++) {
+ struct host1x_bo_mapping *map;
size_t gather_size = 0;
struct scatterlist *sg;
- dma_addr_t phys_addr;
unsigned long shift;
struct iova *alloc;
- dma_addr_t *phys;
unsigned int j;
if (job->cmds[i].is_wait)
@@ -243,25 +222,16 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
- /**
- * If the host1x is not attached to an IOMMU, there is no need
- * to map the buffer object for the host1x, since the physical
- * address can simply be used.
- */
- if (!iommu_get_domain_for_dev(host->dev))
- phys = &phys_addr;
- else
- phys = NULL;
-
- sgt = host1x_bo_pin(host->dev, g->bo, phys);
- if (IS_ERR(sgt)) {
- err = PTR_ERR(sgt);
- goto put;
+ map = host1x_bo_pin(host->dev, g->bo, DMA_TO_DEVICE, &host->cache);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto unpin;
}
if (host->domain) {
- for_each_sgtable_sg(sgt, sg, j)
+ for_each_sgtable_sg(map->sgt, sg, j)
gather_size += sg->length;
+
gather_size = iova_align(&host->iova, gather_size);
shift = iova_shift(&host->iova);
@@ -272,33 +242,23 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto put;
}
- err = iommu_map_sgtable(host->domain,
- iova_dma_addr(&host->iova, alloc),
- sgt, IOMMU_READ);
+ err = iommu_map_sgtable(host->domain, iova_dma_addr(&host->iova, alloc),
+ map->sgt, IOMMU_READ);
if (err == 0) {
__free_iova(&host->iova, alloc);
err = -EINVAL;
goto put;
}
- job->unpins[job->num_unpins].size = gather_size;
- phys_addr = iova_dma_addr(&host->iova, alloc);
- } else if (sgt) {
- err = dma_map_sgtable(host->dev, sgt, DMA_TO_DEVICE, 0);
- if (err)
- goto put;
-
- job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
- job->unpins[job->num_unpins].dev = host->dev;
- phys_addr = sg_dma_address(sgt->sgl);
+ map->phys = iova_dma_addr(&host->iova, alloc);
+ map->size = gather_size;
}
- job->addr_phys[job->num_unpins] = phys_addr;
- job->gather_addr_phys[i] = phys_addr;
-
- job->unpins[job->num_unpins].bo = g->bo;
- job->unpins[job->num_unpins].sgt = sgt;
+ job->addr_phys[job->num_unpins] = map->phys;
+ job->unpins[job->num_unpins].map = map;
job->num_unpins++;
+
+ job->gather_addr_phys[i] = map->phys;
}
return 0;
@@ -690,22 +650,16 @@ void host1x_job_unpin(struct host1x_job *job)
unsigned int i;
for (i = 0; i < job->num_unpins; i++) {
- struct host1x_job_unpin_data *unpin = &job->unpins[i];
- struct device *dev = unpin->dev ?: host->dev;
- struct sg_table *sgt = unpin->sgt;
-
- if (!job->enable_firewall && unpin->size && host->domain) {
- iommu_unmap(host->domain, job->addr_phys[i],
- unpin->size);
- free_iova(&host->iova,
- iova_pfn(&host->iova, job->addr_phys[i]));
- }
+ struct host1x_bo_mapping *map = job->unpins[i].map;
+ struct host1x_bo *bo = map->bo;
- if (unpin->dev && sgt)
- dma_unmap_sgtable(unpin->dev, sgt, unpin->dir, 0);
+ if (!job->enable_firewall && map->size && host->domain) {
+ iommu_unmap(host->domain, job->addr_phys[i], map->size);
+ free_iova(&host->iova, iova_pfn(&host->iova, job->addr_phys[i]));
+ }
- host1x_bo_unpin(dev, unpin->bo, sgt);
- host1x_bo_put(unpin->bo);
+ host1x_bo_unpin(map);
+ host1x_bo_put(bo);
}
job->num_unpins = 0;
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
index b4428c5495c9..dad5a1946693 100644
--- a/drivers/gpu/host1x/job.h
+++ b/drivers/gpu/host1x/job.h
@@ -35,11 +35,7 @@ struct host1x_job_cmd {
};
struct host1x_job_unpin_data {
- struct host1x_bo *bo;
- struct sg_table *sgt;
- struct device *dev;
- size_t size;
- enum dma_data_direction dir;
+ struct host1x_bo_mapping *map;
};
/*
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index d198a10848c6..e08e331e46ae 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -143,6 +143,8 @@ void host1x_syncpt_restore(struct host1x *host)
for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
+ host1x_hw_syncpt_enable_protection(host);
+
wmb();
}
@@ -366,9 +368,6 @@ int host1x_syncpt_init(struct host1x *host)
host->syncpt = syncpt;
host->bases = bases;
- host1x_syncpt_restore(host);
- host1x_hw_syncpt_enable_protection(host);
-
/* Allocate sync point to use for clearing waits for expired fences */
host->nop_sp = host1x_syncpt_alloc(host, 0, "reserved-nop");
if (!host->nop_sp)
diff --git a/drivers/greybus/es2.c b/drivers/greybus/es2.c
index 15661c7f3633..e89cca015095 100644
--- a/drivers/greybus/es2.c
+++ b/drivers/greybus/es2.c
@@ -78,7 +78,7 @@ struct es2_cport_in {
* @hd: pointer to our gb_host_device structure
*
* @cport_in: endpoint, urbs and buffer for cport in messages
- * @cport_out_endpoint: endpoint for for cport out messages
+ * @cport_out_endpoint: endpoint for cport out messages
* @cport_out_urb: array of urbs for the CPort out messages
* @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
* not.
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index a7c78ac96270..f5544157576c 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -558,6 +558,20 @@ config HID_LENOVO
- ThinkPad Compact Bluetooth Keyboard with TrackPoint (supports Fn keys)
- ThinkPad Compact USB Keyboard with TrackPoint (supports Fn keys)
+config HID_LETSKETCH
+ tristate "Letsketch WP9620N tablets"
+ depends on USB_HID
+ help
+ Driver for the LetSketch / VSON WP9620N drawing tablet. This
+ drawing tablet is also sold under other brand names such as Case U,
+ presumably this driver will work for all of them. But it has only been
+ tested with a LetSketch WP9620N model.
+
+ These tablets also work without a special HID driver, but then only
+ part of the active area works and both the pad and stylus buttons are
+ hardwired to special key-combos. E.g. the 2 stylus buttons send right
+ mouse clicks / resp. "e" key presses.
+
config HID_LOGITECH
tristate "Logitech devices"
depends on USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 55a6fa3eca5a..6d3e630e81af 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -66,6 +66,7 @@ obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
obj-$(CONFIG_HID_LCPOWER) += hid-lcpower.o
obj-$(CONFIG_HID_LENOVO) += hid-lenovo.o
+obj-$(CONFIG_HID_LETSKETCH) += hid-letsketch.o
obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
obj-$(CONFIG_HID_LOGITECH) += hid-lg-g15.o
obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 2c9c5faa74a9..24802a4a636e 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -16,24 +16,28 @@
#include <linux/device.h>
#include <linux/hid.h>
+#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/timer.h>
#include "hid-ids.h"
-#define APPLE_RDESC_JIS 0x0001
-#define APPLE_IGNORE_MOUSE 0x0002
-#define APPLE_HAS_FN 0x0004
-/* 0x0008 reserved, was: APPLE_HIDDEV */
-#define APPLE_ISO_TILDE_QUIRK 0x0010
-#define APPLE_MIGHTYMOUSE 0x0020
-#define APPLE_INVERT_HWHEEL 0x0040
-/* 0x0080 reserved, was: APPLE_IGNORE_HIDINPUT */
-#define APPLE_NUMLOCK_EMULATION 0x0100
+#define APPLE_RDESC_JIS BIT(0)
+#define APPLE_IGNORE_MOUSE BIT(1)
+#define APPLE_HAS_FN BIT(2)
+/* BIT(3) reserved, was: APPLE_HIDDEV */
+#define APPLE_ISO_TILDE_QUIRK BIT(4)
+#define APPLE_MIGHTYMOUSE BIT(5)
+#define APPLE_INVERT_HWHEEL BIT(6)
+/* BIT(7) reserved, was: APPLE_IGNORE_HIDINPUT */
+#define APPLE_NUMLOCK_EMULATION BIT(8)
+#define APPLE_RDESC_BATTERY BIT(9)
#define APPLE_FLAG_FKEY 0x01
#define HID_COUNTRY_INTERNATIONAL_ISO 13
+#define APPLE_BATTERY_TIMEOUT_MS 60000
static unsigned int fnmode = 1;
module_param(fnmode, uint, 0644);
@@ -58,10 +62,12 @@ MODULE_PARM_DESC(swap_fn_leftctrl, "Swap the Fn and left Control keys. "
"[0] = as-is, Mac layout, 1 = swapped, PC layout)");
struct apple_sc {
+ struct hid_device *hdev;
unsigned long quirks;
unsigned int fn_on;
unsigned int fn_found;
DECLARE_BITMAP(pressed_numlock, KEY_CNT);
+ struct timer_list battery_timer;
};
struct apple_key_translation {
@@ -70,6 +76,28 @@ struct apple_key_translation {
u8 flags;
};
+static const struct apple_key_translation apple2021_fn_keys[] = {
+ { KEY_BACKSPACE, KEY_DELETE },
+ { KEY_ENTER, KEY_INSERT },
+ { KEY_F1, KEY_BRIGHTNESSDOWN, APPLE_FLAG_FKEY },
+ { KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY },
+ { KEY_F3, KEY_SCALE, APPLE_FLAG_FKEY },
+ { KEY_F4, KEY_SEARCH, APPLE_FLAG_FKEY },
+ { KEY_F5, KEY_MICMUTE, APPLE_FLAG_FKEY },
+ { KEY_F6, KEY_SLEEP, APPLE_FLAG_FKEY },
+ { KEY_F7, KEY_PREVIOUSSONG, APPLE_FLAG_FKEY },
+ { KEY_F8, KEY_PLAYPAUSE, APPLE_FLAG_FKEY },
+ { KEY_F9, KEY_NEXTSONG, APPLE_FLAG_FKEY },
+ { KEY_F10, KEY_MUTE, APPLE_FLAG_FKEY },
+ { KEY_F11, KEY_VOLUMEDOWN, APPLE_FLAG_FKEY },
+ { KEY_F12, KEY_VOLUMEUP, APPLE_FLAG_FKEY },
+ { KEY_UP, KEY_PAGEUP },
+ { KEY_DOWN, KEY_PAGEDOWN },
+ { KEY_LEFT, KEY_HOME },
+ { KEY_RIGHT, KEY_END },
+ { }
+};
+
static const struct apple_key_translation macbookair_fn_keys[] = {
{ KEY_BACKSPACE, KEY_DELETE },
{ KEY_ENTER, KEY_INSERT },
@@ -214,7 +242,11 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
}
if (fnmode) {
- if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
+ if (hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021 ||
+ hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021 ||
+ hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021)
+ table = apple2021_fn_keys;
+ else if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
table = macbookair_fn_keys;
else if (hid->product < 0x21d || hid->product >= 0x300)
@@ -333,6 +365,43 @@ static int apple_event(struct hid_device *hdev, struct hid_field *field,
return 0;
}
+static int apple_fetch_battery(struct hid_device *hdev)
+{
+#ifdef CONFIG_HID_BATTERY_STRENGTH
+ struct apple_sc *asc = hid_get_drvdata(hdev);
+ struct hid_report_enum *report_enum;
+ struct hid_report *report;
+
+ if (!(asc->quirks & APPLE_RDESC_BATTERY) || !hdev->battery)
+ return -1;
+
+ report_enum = &hdev->report_enum[hdev->battery_report_type];
+ report = report_enum->report_id_hash[hdev->battery_report_id];
+
+ if (!report || report->maxfield < 1)
+ return -1;
+
+ if (hdev->battery_capacity == hdev->battery_max)
+ return -1;
+
+ hid_hw_request(hdev, report, HID_REQ_GET_REPORT);
+ return 0;
+#else
+ return -1;
+#endif
+}
+
+static void apple_battery_timer_tick(struct timer_list *t)
+{
+ struct apple_sc *asc = from_timer(asc, t, battery_timer);
+ struct hid_device *hdev = asc->hdev;
+
+ if (apple_fetch_battery(hdev) == 0) {
+ mod_timer(&asc->battery_timer,
+ jiffies + msecs_to_jiffies(APPLE_BATTERY_TIMEOUT_MS));
+ }
+}
+
/*
* MacBook JIS keyboard has wrong logical maximum
* Magic Keyboard JIS has wrong logical maximum
@@ -354,6 +423,30 @@ static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc,
"fixing up MacBook JIS keyboard report descriptor\n");
rdesc[53] = rdesc[59] = 0xe7;
}
+
+ /*
+ * Change the usage from:
+ * 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page 1) 0
+ * 0x09, 0x0b, // Usage (Vendor Usage 0x0b) 3
+ * To:
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ * 0x09, 0x06, // Usage (Keyboard) 2
+ */
+ if ((asc->quirks & APPLE_RDESC_BATTERY) && *rsize == 83 &&
+ rdesc[46] == 0x84 && rdesc[58] == 0x85) {
+ hid_info(hdev,
+ "fixing up Magic Keyboard battery report descriptor\n");
+ *rsize = *rsize - 1;
+ rdesc = kmemdup(rdesc + 1, *rsize, GFP_KERNEL);
+ if (!rdesc)
+ return NULL;
+
+ rdesc[0] = 0x05;
+ rdesc[1] = 0x01;
+ rdesc[2] = 0x09;
+ rdesc[3] = 0x06;
+ }
+
return rdesc;
}
@@ -376,6 +469,9 @@ static void apple_setup_input(struct input_dev *input)
for (trans = apple_iso_keyboard; trans->from; trans++)
set_bit(trans->to, input->keybit);
+ for (trans = apple2021_fn_keys; trans->from; trans++)
+ set_bit(trans->to, input->keybit);
+
if (swap_fn_leftctrl) {
for (trans = swapped_fn_leftctrl_keys; trans->from; trans++)
set_bit(trans->to, input->keybit);
@@ -428,7 +524,7 @@ static int apple_input_configured(struct hid_device *hdev,
if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) {
hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n");
- asc->quirks = 0;
+ asc->quirks &= ~APPLE_HAS_FN;
}
return 0;
@@ -447,6 +543,7 @@ static int apple_probe(struct hid_device *hdev,
return -ENOMEM;
}
+ asc->hdev = hdev;
asc->quirks = quirks;
hid_set_drvdata(hdev, asc);
@@ -463,9 +560,23 @@ static int apple_probe(struct hid_device *hdev,
return ret;
}
+ timer_setup(&asc->battery_timer, apple_battery_timer_tick, 0);
+ mod_timer(&asc->battery_timer,
+ jiffies + msecs_to_jiffies(APPLE_BATTERY_TIMEOUT_MS));
+ apple_fetch_battery(hdev);
+
return 0;
}
+static void apple_remove(struct hid_device *hdev)
+{
+ struct apple_sc *asc = hid_get_drvdata(hdev);
+
+ del_timer_sync(&asc->battery_timer);
+
+ hid_hw_stop(hdev);
+}
+
static const struct hid_device_id apple_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE),
.driver_data = APPLE_MIGHTYMOUSE | APPLE_INVERT_HWHEEL },
@@ -540,11 +651,11 @@ static const struct hid_device_id apple_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2015),
- .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2015),
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2015),
- .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2015),
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
@@ -640,6 +751,14 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021),
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ }
};
@@ -650,6 +769,7 @@ static struct hid_driver apple_driver = {
.id_table = apple_devices,
.report_fixup = apple_report_fixup,
.probe = apple_probe,
+ .remove = apple_remove,
.event = apple_event,
.input_mapping = apple_input_mapping,
.input_mapped = apple_input_mapped,
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index dbed2524fd47..f1aed5bbd000 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2126,6 +2126,99 @@ void hid_hw_close(struct hid_device *hdev)
}
EXPORT_SYMBOL_GPL(hid_hw_close);
+/**
+ * hid_hw_request - send report request to device
+ *
+ * @hdev: hid device
+ * @report: report to send
+ * @reqtype: hid request type
+ */
+void hid_hw_request(struct hid_device *hdev,
+ struct hid_report *report, int reqtype)
+{
+ if (hdev->ll_driver->request)
+ return hdev->ll_driver->request(hdev, report, reqtype);
+
+ __hid_request(hdev, report, reqtype);
+}
+EXPORT_SYMBOL_GPL(hid_hw_request);
+
+/**
+ * hid_hw_raw_request - send report request to device
+ *
+ * @hdev: hid device
+ * @reportnum: report ID
+ * @buf: in/out data to transfer
+ * @len: length of buf
+ * @rtype: HID report type
+ * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT
+ *
+ * Return: count of data transferred, negative if error
+ *
+ * Same behavior as hid_hw_request, but with raw buffers instead.
+ */
+int hid_hw_raw_request(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf,
+ size_t len, unsigned char rtype, int reqtype)
+{
+ if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
+ return -EINVAL;
+
+ return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
+ rtype, reqtype);
+}
+EXPORT_SYMBOL_GPL(hid_hw_raw_request);
+
+/**
+ * hid_hw_output_report - send output report to device
+ *
+ * @hdev: hid device
+ * @buf: raw data to transfer
+ * @len: length of buf
+ *
+ * Return: count of data transferred, negative if error
+ */
+int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
+{
+ if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
+ return -EINVAL;
+
+ if (hdev->ll_driver->output_report)
+ return hdev->ll_driver->output_report(hdev, buf, len);
+
+ return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(hid_hw_output_report);
+
+#ifdef CONFIG_PM
+int hid_driver_suspend(struct hid_device *hdev, pm_message_t state)
+{
+ if (hdev->driver && hdev->driver->suspend)
+ return hdev->driver->suspend(hdev, state);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hid_driver_suspend);
+
+int hid_driver_reset_resume(struct hid_device *hdev)
+{
+ if (hdev->driver && hdev->driver->reset_resume)
+ return hdev->driver->reset_resume(hdev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hid_driver_reset_resume);
+
+int hid_driver_resume(struct hid_device *hdev)
+{
+ if (hdev->driver && hdev->driver->resume)
+ return hdev->driver->resume(hdev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hid_driver_resume);
+#endif /* CONFIG_PM */
+
struct hid_dynid {
struct list_head list;
struct hid_device_id id;
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 7a92e2a04a09..26c31d759914 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -141,8 +141,10 @@ static const struct hid_usage_entry hid_usage_table[] = {
{0, 0x33, "Touch"},
{0, 0x34, "UnTouch"},
{0, 0x35, "Tap"},
+ {0, 0x38, "Transducer Index"},
{0, 0x39, "TabletFunctionKey"},
{0, 0x3a, "ProgramChangeKey"},
+ {0, 0x3B, "Battery Strength"},
{0, 0x3c, "Invert"},
{0, 0x42, "TipSwitch"},
{0, 0x43, "SecondaryTipSwitch"},
@@ -160,7 +162,40 @@ static const struct hid_usage_entry hid_usage_table[] = {
{0, 0x59, "ButtonType"},
{0, 0x5A, "SecondaryBarrelSwitch"},
{0, 0x5B, "TransducerSerialNumber"},
+ {0, 0x5C, "Preferred Color"},
+ {0, 0x5D, "Preferred Color is Locked"},
+ {0, 0x5E, "Preferred Line Width"},
+ {0, 0x5F, "Preferred Line Width is Locked"},
{0, 0x6e, "TransducerSerialNumber2"},
+ {0, 0x70, "Preferred Line Style"},
+ {0, 0x71, "Preferred Line Style is Locked"},
+ {0, 0x72, "Ink"},
+ {0, 0x73, "Pencil"},
+ {0, 0x74, "Highlighter"},
+ {0, 0x75, "Chisel Marker"},
+ {0, 0x76, "Brush"},
+ {0, 0x77, "No Preference"},
+ {0, 0x80, "Digitizer Diagnostic"},
+ {0, 0x81, "Digitizer Error"},
+ {0, 0x82, "Err Normal Status"},
+ {0, 0x83, "Err Transducers Exceeded"},
+ {0, 0x84, "Err Full Trans Features Unavailable"},
+ {0, 0x85, "Err Charge Low"},
+ {0, 0x90, "Transducer Software Info"},
+ {0, 0x91, "Transducer Vendor Id"},
+ {0, 0x92, "Transducer Product Id"},
+ {0, 0x93, "Device Supported Protocols"},
+ {0, 0x94, "Transducer Supported Protocols"},
+ {0, 0x95, "No Protocol"},
+ {0, 0x96, "Wacom AES Protocol"},
+ {0, 0x97, "USI Protocol"},
+ {0, 0x98, "Microsoft Pen Protocol"},
+ {0, 0xA0, "Supported Report Rates"},
+ {0, 0xA1, "Report Rate"},
+ {0, 0xA2, "Transducer Connected"},
+ {0, 0xA3, "Switch Disabled"},
+ {0, 0xA4, "Switch Unimplemented"},
+ {0, 0xA5, "Transducer Switches"},
{ 15, 0, "PhysicalInterfaceDevice" },
{0, 0x00, "Undefined"},
{0, 0x01, "Physical_Interface_Device"},
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 19da07777d62..85975031389b 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -175,6 +175,8 @@
#define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242
#define USB_DEVICE_ID_APPLE_IRCONTROL5 0x8243
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021 0x029c
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021 0x029a
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021 0x029f
#define USB_VENDOR_ID_ASUS 0x0486
#define USB_DEVICE_ID_ASUS_T91MT 0x0185
@@ -398,6 +400,7 @@
#define USB_DEVICE_ID_HP_X2 0x074d
#define USB_DEVICE_ID_HP_X2_10_COVER 0x0755
#define I2C_DEVICE_ID_HP_ENVY_X360_15 0x2d05
+#define I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100 0x29CF
#define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817
#define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544
#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
@@ -763,6 +766,9 @@
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E 0x602e
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6093 0x6093
+#define USB_VENDOR_ID_LETSKETCH 0x6161
+#define USB_DEVICE_ID_WP9620N 0x4d15
+
#define USB_VENDOR_ID_LG 0x1fd2
#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
#define USB_DEVICE_ID_LG_MELFAS_MT 0x6007
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 03f994541981..112901d2d8d2 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -52,6 +52,7 @@ static const struct {
#define map_rel(c) hid_map_usage(hidinput, usage, &bit, &max, EV_REL, (c))
#define map_key(c) hid_map_usage(hidinput, usage, &bit, &max, EV_KEY, (c))
#define map_led(c) hid_map_usage(hidinput, usage, &bit, &max, EV_LED, (c))
+#define map_msc(c) hid_map_usage(hidinput, usage, &bit, &max, EV_MSC, (c))
#define map_abs_clear(c) hid_map_usage_clear(hidinput, usage, &bit, \
&max, EV_ABS, (c))
@@ -329,6 +330,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100),
+ HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN),
@@ -876,10 +879,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x5b: /* TransducerSerialNumber */
case 0x6e: /* TransducerSerialNumber2 */
- usage->type = EV_MSC;
- usage->code = MSC_SERIAL;
- bit = input->mscbit;
- max = MSC_MAX;
+ map_msc(MSC_SERIAL);
break;
default: goto unknown;
@@ -1333,6 +1333,12 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
input = field->hidinput->input;
+ if (usage->type == EV_ABS &&
+ (((*quirks & HID_QUIRK_X_INVERT) && usage->code == ABS_X) ||
+ ((*quirks & HID_QUIRK_Y_INVERT) && usage->code == ABS_Y))) {
+ value = field->logical_maximum - value;
+ }
+
if (usage->hat_min < usage->hat_max || usage->hat_dir) {
int hat_dir = usage->hat_dir;
if (!hat_dir)
@@ -1465,7 +1471,8 @@ void hidinput_report_event(struct hid_device *hid, struct hid_report *report)
}
EXPORT_SYMBOL_GPL(hidinput_report_event);
-int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field)
+static int hidinput_find_field(struct hid_device *hid, unsigned int type,
+ unsigned int code, struct hid_field **field)
{
struct hid_report *report;
int i, j;
@@ -1480,7 +1487,6 @@ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int
}
return -1;
}
-EXPORT_SYMBOL_GPL(hidinput_find_field);
struct hid_field *hidinput_get_led_field(struct hid_device *hid)
{
@@ -1743,6 +1749,16 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid,
case HID_GD_MOUSE:
suffix = "Mouse";
break;
+ case HID_DG_PEN:
+ /*
+ * yes, there is an issue here:
+ * DG_PEN -> "Stylus"
+ * DG_STYLUS -> "Pen"
+ * But changing this now means users with config snippets
+ * will have to change it and the test suite will not be happy.
+ */
+ suffix = "Stylus";
+ break;
case HID_DG_STYLUS:
suffix = "Pen";
break;
diff --git a/drivers/hid/hid-letsketch.c b/drivers/hid/hid-letsketch.c
new file mode 100644
index 000000000000..74d17cf518ba
--- /dev/null
+++ b/drivers/hid/hid-letsketch.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Driver for the LetSketch / VSON WP9620N drawing tablet.
+ * This drawing tablet is also sold under other brand names such as Case U,
+ * presumably this driver will work for all of them. But it has only been
+ * tested with a LetSketch WP9620N model.
+ *
+ * These tablets also work without a special HID driver, but then only part
+ * of the active area works and both the pad and stylus buttons are hardwired
+ * to special key-combos. E.g. the 2 stylus buttons send right mouse clicks /
+ * resp. "e" key presses.
+ *
+ * This device has 4 USB interfaces:
+ *
+ * Interface 0 EP 0x81 bootclass mouse, rdesc len 18, report id 0x08,
+ * Application(ff00.0001)
+ * This interface sends raw event input reports in a custom format, but only
+ * after doing the special dance from letsketch_probe(). After enabling this
+ * interface the other 3 interfaces are disabled.
+ *
+ * Interface 1 EP 0x82 bootclass mouse, rdesc len 83, report id 0x0a, Tablet
+ * This interface sends absolute events for the pen, including pressure,
+ * but only for some part of the active area due to special "aspect ratio"
+ * correction and only half by default since it assumes it will be used
+ * with a phone in portraid mode, while using the tablet in landscape mode.
+ * Also stylus + pad button events are not reported here.
+ *
+ * Interface 2 EP 0x83 bootclass keybd, rdesc len 64, report id none, Std Kbd
+ * This interfaces send various hard-coded key-combos for the pad buttons
+ * and "e" keypresses for the 2nd stylus button
+ *
+ * Interface 3 EP 0x84 bootclass mouse, rdesc len 75, report id 0x01, Std Mouse
+ * This reports right-click mouse-button events for the 1st stylus button
+ */
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/usb.h>
+
+#include <asm/unaligned.h>
+
+#include "hid-ids.h"
+
+#define LETSKETCH_RAW_IF 0
+
+#define LETSKETCH_RAW_DATA_LEN 12
+#define LETSKETCH_RAW_REPORT_ID 8
+
+#define LETSKETCH_PAD_BUTTONS 5
+
+#define LETSKETCH_INFO_STR_IDX_BEGIN 0xc8
+#define LETSKETCH_INFO_STR_IDX_END 0xca
+
+#define LETSKETCH_GET_STRING_RETRIES 5
+
+struct letsketch_data {
+ struct hid_device *hdev;
+ struct input_dev *input_tablet;
+ struct input_dev *input_tablet_pad;
+ struct timer_list inrange_timer;
+};
+
+static int letsketch_open(struct input_dev *dev)
+{
+ struct letsketch_data *data = input_get_drvdata(dev);
+
+ return hid_hw_open(data->hdev);
+}
+
+static void letsketch_close(struct input_dev *dev)
+{
+ struct letsketch_data *data = input_get_drvdata(dev);
+
+ hid_hw_close(data->hdev);
+}
+
+static struct input_dev *letsketch_alloc_input_dev(struct letsketch_data *data)
+{
+ struct input_dev *input;
+
+ input = devm_input_allocate_device(&data->hdev->dev);
+ if (!input)
+ return NULL;
+
+ input->id.bustype = data->hdev->bus;
+ input->id.vendor = data->hdev->vendor;
+ input->id.product = data->hdev->product;
+ input->id.version = data->hdev->bus;
+ input->phys = data->hdev->phys;
+ input->uniq = data->hdev->uniq;
+ input->open = letsketch_open;
+ input->close = letsketch_close;
+
+ input_set_drvdata(input, data);
+
+ return input;
+}
+
+static int letsketch_setup_input_tablet(struct letsketch_data *data)
+{
+ struct input_dev *input;
+
+ input = letsketch_alloc_input_dev(data);
+ if (!input)
+ return -ENOMEM;
+
+ input_set_abs_params(input, ABS_X, 0, 50800, 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, 31750, 0, 0);
+ input_set_abs_params(input, ABS_PRESSURE, 0, 8192, 0, 0);
+ input_abs_set_res(input, ABS_X, 240);
+ input_abs_set_res(input, ABS_Y, 225);
+ input_set_capability(input, EV_KEY, BTN_TOUCH);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
+ input_set_capability(input, EV_KEY, BTN_STYLUS);
+ input_set_capability(input, EV_KEY, BTN_STYLUS2);
+
+ /* All known brands selling this tablet use WP9620[N] as model name */
+ input->name = "WP9620 Tablet";
+
+ data->input_tablet = input;
+
+ return input_register_device(data->input_tablet);
+}
+
+static int letsketch_setup_input_tablet_pad(struct letsketch_data *data)
+{
+ struct input_dev *input;
+ int i;
+
+ input = letsketch_alloc_input_dev(data);
+ if (!input)
+ return -ENOMEM;
+
+ for (i = 0; i < LETSKETCH_PAD_BUTTONS; i++)
+ input_set_capability(input, EV_KEY, BTN_0 + i);
+
+ /*
+ * These are never send on the pad input_dev, but must be set
+ * on the Pad to make udev / libwacom happy.
+ */
+ input_set_abs_params(input, ABS_X, 0, 1, 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, 1, 0, 0);
+ input_set_capability(input, EV_KEY, BTN_STYLUS);
+
+ input->name = "WP9620 Pad";
+
+ data->input_tablet_pad = input;
+
+ return input_register_device(data->input_tablet_pad);
+}
+
+static void letsketch_inrange_timeout(struct timer_list *t)
+{
+ struct letsketch_data *data = from_timer(data, t, inrange_timer);
+ struct input_dev *input = data->input_tablet;
+
+ input_report_key(input, BTN_TOOL_PEN, 0);
+ input_sync(input);
+}
+
+static int letsketch_raw_event(struct hid_device *hdev,
+ struct hid_report *report,
+ u8 *raw_data, int size)
+{
+ struct letsketch_data *data = hid_get_drvdata(hdev);
+ struct input_dev *input;
+ int i;
+
+ if (size != LETSKETCH_RAW_DATA_LEN || raw_data[0] != LETSKETCH_RAW_REPORT_ID)
+ return 0;
+
+ switch (raw_data[1] & 0xf0) {
+ case 0x80: /* Pen data */
+ input = data->input_tablet;
+ input_report_key(input, BTN_TOOL_PEN, 1);
+ input_report_key(input, BTN_TOUCH, raw_data[1] & 0x01);
+ input_report_key(input, BTN_STYLUS, raw_data[1] & 0x02);
+ input_report_key(input, BTN_STYLUS2, raw_data[1] & 0x04);
+ input_report_abs(input, ABS_X,
+ get_unaligned_le16(raw_data + 2));
+ input_report_abs(input, ABS_Y,
+ get_unaligned_le16(raw_data + 4));
+ input_report_abs(input, ABS_PRESSURE,
+ get_unaligned_le16(raw_data + 6));
+ /*
+ * There is no out of range event, so use a timer for this
+ * when in range we get an event approx. every 8 ms.
+ */
+ mod_timer(&data->inrange_timer, jiffies + msecs_to_jiffies(100));
+ break;
+ case 0xe0: /* Pad data */
+ input = data->input_tablet_pad;
+ for (i = 0; i < LETSKETCH_PAD_BUTTONS; i++)
+ input_report_key(input, BTN_0 + i, raw_data[4] == (i + 1));
+ break;
+ default:
+ hid_warn(data->hdev, "Warning unknown data header: 0x%02x\n",
+ raw_data[0]);
+ return 0;
+ }
+
+ input_sync(input);
+ return 0;
+}
+
+/*
+ * The tablets magic handshake to put it in raw mode relies on getting
+ * string descriptors. But the firmware is buggy and does not like it if
+ * we do this too fast. Even if we go slow sometimes the usb_string() call
+ * fails. Ignore errors and retry it a couple of times if necessary.
+ */
+static int letsketch_get_string(struct usb_device *udev, int index, char *buf, int size)
+{
+ int i, ret;
+
+ for (i = 0; i < LETSKETCH_GET_STRING_RETRIES; i++) {
+ usleep_range(5000, 7000);
+ ret = usb_string(udev, index, buf, size);
+ if (ret > 0)
+ return 0;
+ }
+
+ dev_err(&udev->dev, "Max retries (%d) exceeded reading string descriptor %d\n",
+ LETSKETCH_GET_STRING_RETRIES, index);
+ return ret ? ret : -EIO;
+}
+
+static int letsketch_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct device *dev = &hdev->dev;
+ struct letsketch_data *data;
+ struct usb_interface *intf;
+ struct usb_device *udev;
+ char buf[256];
+ int i, ret;
+
+ if (!hid_is_using_ll_driver(hdev, &usb_hid_driver))
+ return -ENODEV;
+
+ intf = to_usb_interface(hdev->dev.parent);
+ if (intf->altsetting->desc.bInterfaceNumber != LETSKETCH_RAW_IF)
+ return -ENODEV; /* Ignore the other interfaces */
+
+ udev = interface_to_usbdev(intf);
+
+ /*
+ * Instead of using a set-feature request, or even a custom USB ctrl
+ * message the tablet needs this elaborate magic reading of USB
+ * string descriptors to kick it into raw mode. This is what the
+ * Windows drivers are seen doing in an USB trace under Windows.
+ */
+ for (i = LETSKETCH_INFO_STR_IDX_BEGIN; i <= LETSKETCH_INFO_STR_IDX_END; i++) {
+ ret = letsketch_get_string(udev, i, buf, sizeof(buf));
+ if (ret)
+ return ret;
+
+ hid_info(hdev, "Device info: %s\n", buf);
+ }
+
+ for (i = 1; i <= 250; i++) {
+ ret = letsketch_get_string(udev, i, buf, sizeof(buf));
+ if (ret)
+ return ret;
+ }
+
+ ret = letsketch_get_string(udev, 0x64, buf, sizeof(buf));
+ if (ret)
+ return ret;
+
+ ret = letsketch_get_string(udev, LETSKETCH_INFO_STR_IDX_BEGIN, buf, sizeof(buf));
+ if (ret)
+ return ret;
+
+ /*
+ * The tablet should be in raw mode now, end with a final delay before
+ * doing further IO to the device.
+ */
+ usleep_range(5000, 7000);
+
+ ret = hid_parse(hdev);
+ if (ret)
+ return ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->hdev = hdev;
+ timer_setup(&data->inrange_timer, letsketch_inrange_timeout, 0);
+ hid_set_drvdata(hdev, data);
+
+ ret = letsketch_setup_input_tablet(data);
+ if (ret)
+ return ret;
+
+ ret = letsketch_setup_input_tablet_pad(data);
+ if (ret)
+ return ret;
+
+ return hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+}
+
+static const struct hid_device_id letsketch_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_LETSKETCH, USB_DEVICE_ID_WP9620N) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, letsketch_devices);
+
+static struct hid_driver letsketch_driver = {
+ .name = "letsketch",
+ .id_table = letsketch_devices,
+ .probe = letsketch_probe,
+ .raw_event = letsketch_raw_event,
+};
+module_hid_driver(letsketch_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index d7687ce70614..664a624a363d 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -51,12 +51,16 @@ static bool report_undeciphered;
module_param(report_undeciphered, bool, 0644);
MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state field using a MSC_RAW event");
+#define TRACKPAD2_2021_BT_VERSION 0x110
+
#define TRACKPAD_REPORT_ID 0x28
#define TRACKPAD2_USB_REPORT_ID 0x02
#define TRACKPAD2_BT_REPORT_ID 0x31
#define MOUSE_REPORT_ID 0x29
#define MOUSE2_REPORT_ID 0x12
#define DOUBLE_REPORT_ID 0xf7
+#define USB_BATTERY_TIMEOUT_MS 60000
+
/* These definitions are not precise, but they're close enough. (Bits
* 0x03 seem to indicate the aspect ratio of the touch, bits 0x70 seem
* to be some kind of bit mask -- 0x20 may be a near-field reading,
@@ -140,6 +144,7 @@ struct magicmouse_sc {
struct hid_device *hdev;
struct delayed_work work;
+ struct timer_list battery_timer;
};
static int magicmouse_firm_touch(struct magicmouse_sc *msc)
@@ -538,10 +543,22 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__set_bit(REL_HWHEEL_HI_RES, input->relbit);
}
} else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
- /* setting the device name to ensure the same driver settings
- * get loaded, whether connected through bluetooth or USB
+ /* If the trackpad has been connected to a Mac, the name is
+ * automatically personalized, e.g., "José Expósito's Trackpad".
+ * When connected through Bluetooth, the personalized name is
+ * reported, however, when connected through USB the generic
+ * name is reported.
+ * Set the device name to ensure the same driver settings get
+ * loaded, whether connected through bluetooth or USB.
*/
- input->name = "Apple Inc. Magic Trackpad 2";
+ if (hdev->vendor == BT_VENDOR_ID_APPLE) {
+ if (input->id.version == TRACKPAD2_2021_BT_VERSION)
+ input->name = "Apple Inc. Magic Trackpad";
+ else
+ input->name = "Apple Inc. Magic Trackpad 2";
+ } else { /* USB_VENDOR_ID_APPLE */
+ input->name = hdev->name;
+ }
__clear_bit(EV_MSC, input->evbit);
__clear_bit(BTN_0, input->keybit);
@@ -738,6 +755,44 @@ static void magicmouse_enable_mt_work(struct work_struct *work)
hid_err(msc->hdev, "unable to request touch data (%d)\n", ret);
}
+static int magicmouse_fetch_battery(struct hid_device *hdev)
+{
+#ifdef CONFIG_HID_BATTERY_STRENGTH
+ struct hid_report_enum *report_enum;
+ struct hid_report *report;
+
+ if (!hdev->battery || hdev->vendor != USB_VENDOR_ID_APPLE ||
+ (hdev->product != USB_DEVICE_ID_APPLE_MAGICMOUSE2 &&
+ hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2))
+ return -1;
+
+ report_enum = &hdev->report_enum[hdev->battery_report_type];
+ report = report_enum->report_id_hash[hdev->battery_report_id];
+
+ if (!report || report->maxfield < 1)
+ return -1;
+
+ if (hdev->battery_capacity == hdev->battery_max)
+ return -1;
+
+ hid_hw_request(hdev, report, HID_REQ_GET_REPORT);
+ return 0;
+#else
+ return -1;
+#endif
+}
+
+static void magicmouse_battery_timer_tick(struct timer_list *t)
+{
+ struct magicmouse_sc *msc = from_timer(msc, t, battery_timer);
+ struct hid_device *hdev = msc->hdev;
+
+ if (magicmouse_fetch_battery(hdev) == 0) {
+ mod_timer(&msc->battery_timer,
+ jiffies + msecs_to_jiffies(USB_BATTERY_TIMEOUT_MS));
+ }
+}
+
static int magicmouse_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@@ -745,11 +800,6 @@ static int magicmouse_probe(struct hid_device *hdev,
struct hid_report *report;
int ret;
- if (id->vendor == USB_VENDOR_ID_APPLE &&
- id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
- hdev->type != HID_TYPE_USBMOUSE)
- return -ENODEV;
-
msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL);
if (msc == NULL) {
hid_err(hdev, "can't alloc magicmouse descriptor\n");
@@ -775,6 +825,16 @@ static int magicmouse_probe(struct hid_device *hdev,
return ret;
}
+ timer_setup(&msc->battery_timer, magicmouse_battery_timer_tick, 0);
+ mod_timer(&msc->battery_timer,
+ jiffies + msecs_to_jiffies(USB_BATTERY_TIMEOUT_MS));
+ magicmouse_fetch_battery(hdev);
+
+ if (id->vendor == USB_VENDOR_ID_APPLE &&
+ (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 && hdev->type != HID_TYPE_USBMOUSE)))
+ return 0;
+
if (!msc->input) {
hid_err(hdev, "magicmouse input not registered\n");
ret = -ENOMEM;
@@ -827,6 +887,7 @@ static int magicmouse_probe(struct hid_device *hdev,
return 0;
err_stop_hw:
+ del_timer_sync(&msc->battery_timer);
hid_hw_stop(hdev);
return ret;
}
@@ -835,17 +896,52 @@ static void magicmouse_remove(struct hid_device *hdev)
{
struct magicmouse_sc *msc = hid_get_drvdata(hdev);
- if (msc)
+ if (msc) {
cancel_delayed_work_sync(&msc->work);
+ del_timer_sync(&msc->battery_timer);
+ }
hid_hw_stop(hdev);
}
+static __u8 *magicmouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ /*
+ * Change the usage from:
+ * 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page 1) 0
+ * 0x09, 0x0b, // Usage (Vendor Usage 0x0b) 3
+ * To:
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ * 0x09, 0x02, // Usage (Mouse) 2
+ */
+ if (hdev->vendor == USB_VENDOR_ID_APPLE &&
+ (hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) &&
+ *rsize == 83 && rdesc[46] == 0x84 && rdesc[58] == 0x85) {
+ hid_info(hdev,
+ "fixing up magicmouse battery report descriptor\n");
+ *rsize = *rsize - 1;
+ rdesc = kmemdup(rdesc + 1, *rsize, GFP_KERNEL);
+ if (!rdesc)
+ return NULL;
+
+ rdesc[0] = 0x05;
+ rdesc[1] = 0x01;
+ rdesc[2] = 0x09;
+ rdesc[3] = 0x02;
+ }
+
+ return rdesc;
+}
+
static const struct hid_device_id magic_mice[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICMOUSE), .driver_data = 0 },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICMOUSE2), .driver_data = 0 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICMOUSE2), .driver_data = 0 },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICTRACKPAD), .driver_data = 0 },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE,
@@ -861,6 +957,7 @@ static struct hid_driver magicmouse_driver = {
.id_table = magic_mice,
.probe = magicmouse_probe,
.remove = magicmouse_remove,
+ .report_fixup = magicmouse_report_fixup,
.raw_event = magicmouse_raw_event,
.event = magicmouse_event,
.input_mapping = magicmouse_input_mapping,
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 082376a6cb3d..99eabfb4145b 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1606,9 +1606,6 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
case HID_DG_STYLUS:
/* force BTN_STYLUS to allow tablet matching in udev */
__set_bit(BTN_STYLUS, hi->input->keybit);
- fallthrough;
- case HID_DG_PEN:
- suffix = "Stylus";
break;
default:
suffix = "UNKNOWN";
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index ee7e504e7279..9af1dc8ae3a2 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -304,6 +304,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021) },
#endif
#if IS_ENABLED(CONFIG_HID_APPLEIR)
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index 90acef304536..4040cd98dafe 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -78,7 +78,6 @@ static int tmff_play(struct input_dev *dev, void *data,
struct hid_field *ff_field = tmff->ff_field;
int x, y;
int left, right; /* Rumbling */
- int motor_swap;
switch (effect->type) {
case FF_CONSTANT:
@@ -104,11 +103,8 @@ static int tmff_play(struct input_dev *dev, void *data,
ff_field->logical_maximum);
/* 2-in-1 strong motor is left */
- if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) {
- motor_swap = left;
- left = right;
- right = motor_swap;
- }
+ if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT)
+ swap(left, right);
dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
ff_field->value[0] = left;
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index adff1bd68d9f..3e70f969fb84 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -66,7 +66,7 @@ static int uclogic_params_get_str_desc(__u8 **pbuf, struct hid_device *hdev,
__u8 idx, size_t len)
{
int rc;
- struct usb_device *udev = hid_to_usb_dev(hdev);
+ struct usb_device *udev;
__u8 *buf = NULL;
/* Check arguments */
@@ -75,6 +75,8 @@ static int uclogic_params_get_str_desc(__u8 **pbuf, struct hid_device *hdev,
goto cleanup;
}
+ udev = hid_to_usb_dev(hdev);
+
buf = kmalloc(len, GFP_KERNEL);
if (buf == NULL) {
rc = -ENOMEM;
@@ -450,7 +452,7 @@ static int uclogic_params_frame_init_v1_buttonpad(
{
int rc;
bool found = false;
- struct usb_device *usb_dev = hid_to_usb_dev(hdev);
+ struct usb_device *usb_dev;
char *str_buf = NULL;
const size_t str_len = 16;
@@ -460,6 +462,8 @@ static int uclogic_params_frame_init_v1_buttonpad(
goto cleanup;
}
+ usb_dev = hid_to_usb_dev(hdev);
+
/*
* Enable generic button mode
*/
@@ -707,9 +711,9 @@ static int uclogic_params_huion_init(struct uclogic_params *params,
struct hid_device *hdev)
{
int rc;
- struct usb_device *udev = hid_to_usb_dev(hdev);
- struct usb_interface *iface = to_usb_interface(hdev->dev.parent);
- __u8 bInterfaceNumber = iface->cur_altsetting->desc.bInterfaceNumber;
+ struct usb_device *udev;
+ struct usb_interface *iface;
+ __u8 bInterfaceNumber;
bool found;
/* The resulting parameters (noop) */
struct uclogic_params p = {0, };
@@ -723,6 +727,10 @@ static int uclogic_params_huion_init(struct uclogic_params *params,
goto cleanup;
}
+ udev = hid_to_usb_dev(hdev);
+ iface = to_usb_interface(hdev->dev.parent);
+ bInterfaceNumber = iface->cur_altsetting->desc.bInterfaceNumber;
+
/* If it's not a pen interface */
if (bInterfaceNumber != 0) {
/* TODO: Consider marking the interface invalid */
@@ -834,10 +842,10 @@ int uclogic_params_init(struct uclogic_params *params,
struct hid_device *hdev)
{
int rc;
- struct usb_device *udev = hid_to_usb_dev(hdev);
- __u8 bNumInterfaces = udev->config->desc.bNumInterfaces;
- struct usb_interface *iface = to_usb_interface(hdev->dev.parent);
- __u8 bInterfaceNumber = iface->cur_altsetting->desc.bInterfaceNumber;
+ struct usb_device *udev;
+ __u8 bNumInterfaces;
+ struct usb_interface *iface;
+ __u8 bInterfaceNumber;
bool found;
/* The resulting parameters (noop) */
struct uclogic_params p = {0, };
@@ -848,6 +856,11 @@ int uclogic_params_init(struct uclogic_params *params,
goto cleanup;
}
+ udev = hid_to_usb_dev(hdev);
+ bNumInterfaces = udev->config->desc.bNumInterfaces;
+ iface = to_usb_interface(hdev->dev.parent);
+ bInterfaceNumber = iface->cur_altsetting->desc.bInterfaceNumber;
+
/*
* Set replacement report descriptor if the original matches the
* specified size. Otherwise keep interface unchanged.
diff --git a/drivers/hid/hid-vivaldi.c b/drivers/hid/hid-vivaldi.c
index 72957a9f7117..efa6140915f4 100644
--- a/drivers/hid/hid-vivaldi.c
+++ b/drivers/hid/hid-vivaldi.c
@@ -6,16 +6,17 @@
* Author: Sean O'Brien <seobrien@chromium.org>
*/
+#include <linux/device.h>
#include <linux/hid.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/sysfs.h>
#define MIN_FN_ROW_KEY 1
#define MAX_FN_ROW_KEY 24
#define HID_VD_FN_ROW_PHYSMAP 0x00000001
#define HID_USAGE_FN_ROW_PHYSMAP (HID_UP_GOOGLEVENDOR | HID_VD_FN_ROW_PHYSMAP)
-static struct hid_driver hid_vivaldi;
-
struct vivaldi_data {
u32 function_row_physmap[MAX_FN_ROW_KEY - MIN_FN_ROW_KEY + 1];
int max_function_row_key;
@@ -40,7 +41,7 @@ static ssize_t function_row_physmap_show(struct device *dev,
return size;
}
-DEVICE_ATTR_RO(function_row_physmap);
+static DEVICE_ATTR_RO(function_row_physmap);
static struct attribute *sysfs_attrs[] = {
&dev_attr_function_row_physmap.attr,
NULL
@@ -74,10 +75,11 @@ static void vivaldi_feature_mapping(struct hid_device *hdev,
struct hid_usage *usage)
{
struct vivaldi_data *drvdata = hid_get_drvdata(hdev);
+ struct hid_report *report = field->report;
int fn_key;
int ret;
u32 report_len;
- u8 *buf;
+ u8 *report_data, *buf;
if (field->logical != HID_USAGE_FN_ROW_PHYSMAP ||
(usage->hid & HID_USAGE_PAGE) != HID_UP_ORDINAL)
@@ -89,12 +91,24 @@ static void vivaldi_feature_mapping(struct hid_device *hdev,
if (fn_key > drvdata->max_function_row_key)
drvdata->max_function_row_key = fn_key;
- buf = hid_alloc_report_buf(field->report, GFP_KERNEL);
- if (!buf)
+ report_data = buf = hid_alloc_report_buf(report, GFP_KERNEL);
+ if (!report_data)
return;
- report_len = hid_report_len(field->report);
- ret = hid_hw_raw_request(hdev, field->report->id, buf,
+ report_len = hid_report_len(report);
+ if (!report->id) {
+ /*
+ * hid_hw_raw_request() will stuff report ID (which will be 0)
+ * into the first byte of the buffer even for unnumbered
+ * reports, so we need to account for this to avoid getting
+ * -EOVERFLOW in return.
+ * Note that hid_alloc_report_buf() adds 7 bytes to the size
+ * so we can safely say that we have space for an extra byte.
+ */
+ report_len++;
+ }
+
+ ret = hid_hw_raw_request(hdev, report->id, report_data,
report_len, HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
if (ret < 0) {
@@ -103,7 +117,16 @@ static void vivaldi_feature_mapping(struct hid_device *hdev,
goto out;
}
- ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, buf,
+ if (!report->id) {
+ /*
+ * Undo the damage from hid_hw_raw_request() for unnumbered
+ * reports.
+ */
+ report_data++;
+ report_len--;
+ }
+
+ ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, report_data,
report_len, 0);
if (ret) {
dev_warn(&hdev->dev, "failed to report feature %d\n",
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 79faac87a06f..681614a8302a 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -34,7 +34,7 @@ static int hidraw_major;
static struct cdev hidraw_cdev;
static struct class *hidraw_class;
static struct hidraw *hidraw_table[HIDRAW_MAX_DEVICES];
-static DEFINE_MUTEX(minors_lock);
+static DECLARE_RWSEM(minors_rwsem);
static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
@@ -107,7 +107,7 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer,
__u8 *buf;
int ret = 0;
- lockdep_assert_held(&minors_lock);
+ lockdep_assert_held(&minors_rwsem);
if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
ret = -ENODEV;
@@ -160,9 +160,9 @@ out:
static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
ssize_t ret;
- mutex_lock(&minors_lock);
+ down_read(&minors_rwsem);
ret = hidraw_send_report(file, buffer, count, HID_OUTPUT_REPORT);
- mutex_unlock(&minors_lock);
+ up_read(&minors_rwsem);
return ret;
}
@@ -182,7 +182,7 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
int ret = 0, len;
unsigned char report_number;
- lockdep_assert_held(&minors_lock);
+ lockdep_assert_held(&minors_rwsem);
if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
ret = -ENODEV;
@@ -272,7 +272,7 @@ static int hidraw_open(struct inode *inode, struct file *file)
goto out;
}
- mutex_lock(&minors_lock);
+ down_read(&minors_rwsem);
if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
err = -ENODEV;
goto out_unlock;
@@ -301,7 +301,7 @@ static int hidraw_open(struct inode *inode, struct file *file)
spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
file->private_data = list;
out_unlock:
- mutex_unlock(&minors_lock);
+ up_read(&minors_rwsem);
out:
if (err < 0)
kfree(list);
@@ -347,7 +347,7 @@ static int hidraw_release(struct inode * inode, struct file * file)
struct hidraw_list *list = file->private_data;
unsigned long flags;
- mutex_lock(&minors_lock);
+ down_write(&minors_rwsem);
spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
list_del(&list->node);
@@ -356,7 +356,7 @@ static int hidraw_release(struct inode * inode, struct file * file)
drop_ref(hidraw_table[minor], 0);
- mutex_unlock(&minors_lock);
+ up_write(&minors_rwsem);
return 0;
}
@@ -369,7 +369,7 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
struct hidraw *dev;
void __user *user_arg = (void __user*) arg;
- mutex_lock(&minors_lock);
+ down_read(&minors_rwsem);
dev = hidraw_table[minor];
if (!dev || !dev->exist) {
ret = -ENODEV;
@@ -487,7 +487,7 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
ret = -ENOTTY;
}
out:
- mutex_unlock(&minors_lock);
+ up_read(&minors_rwsem);
return ret;
}
@@ -546,7 +546,7 @@ int hidraw_connect(struct hid_device *hid)
result = -EINVAL;
- mutex_lock(&minors_lock);
+ down_write(&minors_rwsem);
for (minor = 0; minor < HIDRAW_MAX_DEVICES; minor++) {
if (hidraw_table[minor])
@@ -557,7 +557,7 @@ int hidraw_connect(struct hid_device *hid)
}
if (result) {
- mutex_unlock(&minors_lock);
+ up_write(&minors_rwsem);
kfree(dev);
goto out;
}
@@ -567,7 +567,7 @@ int hidraw_connect(struct hid_device *hid)
if (IS_ERR(dev->dev)) {
hidraw_table[minor] = NULL;
- mutex_unlock(&minors_lock);
+ up_write(&minors_rwsem);
result = PTR_ERR(dev->dev);
kfree(dev);
goto out;
@@ -583,7 +583,7 @@ int hidraw_connect(struct hid_device *hid)
dev->exist = 1;
hid->hidraw = dev;
- mutex_unlock(&minors_lock);
+ up_write(&minors_rwsem);
out:
return result;
@@ -594,11 +594,11 @@ void hidraw_disconnect(struct hid_device *hid)
{
struct hidraw *hidraw = hid->hidraw;
- mutex_lock(&minors_lock);
+ down_write(&minors_rwsem);
drop_ref(hidraw, 1);
- mutex_unlock(&minors_lock);
+ up_write(&minors_rwsem);
}
EXPORT_SYMBOL_GPL(hidraw_disconnect);
diff --git a/drivers/hid/i2c-hid/i2c-hid-acpi.c b/drivers/hid/i2c-hid/i2c-hid-acpi.c
index a6f0257a26de..b96ae15e0ad9 100644
--- a/drivers/hid/i2c-hid/i2c-hid-acpi.c
+++ b/drivers/hid/i2c-hid/i2c-hid-acpi.c
@@ -111,7 +111,7 @@ static int i2c_hid_acpi_probe(struct i2c_client *client)
}
return i2c_hid_core_probe(client, &ihid_acpi->ops,
- hid_descriptor_address);
+ hid_descriptor_address, 0);
}
static const struct acpi_device_id i2c_hid_acpi_match[] = {
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 517141138b00..6726567d7297 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -522,9 +522,12 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf);
- if (test_bit(I2C_HID_STARTED, &ihid->flags))
+ if (test_bit(I2C_HID_STARTED, &ihid->flags)) {
+ pm_wakeup_event(&ihid->client->dev, 0);
+
hid_input_report(ihid->hid, HID_INPUT_REPORT, ihid->inbuf + 2,
ret_size - 2, 1);
+ }
return;
}
@@ -912,7 +915,7 @@ static void i2c_hid_core_shutdown_tail(struct i2c_hid *ihid)
}
int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
- u16 hid_descriptor_address)
+ u16 hid_descriptor_address, u32 quirks)
{
int ret;
struct i2c_hid *ihid;
@@ -1009,6 +1012,8 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
goto err_mem_free;
}
+ hid->quirks |= quirks;
+
return 0;
err_mem_free:
@@ -1063,11 +1068,9 @@ static int i2c_hid_core_suspend(struct device *dev)
int ret;
int wake_status;
- if (hid->driver && hid->driver->suspend) {
- ret = hid->driver->suspend(hid, PMSG_SUSPEND);
- if (ret < 0)
- return ret;
- }
+ ret = hid_driver_suspend(hid, PMSG_SUSPEND);
+ if (ret < 0)
+ return ret;
/* Save some power */
i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
@@ -1125,12 +1128,7 @@ static int i2c_hid_core_resume(struct device *dev)
if (ret)
return ret;
- if (hid->driver && hid->driver->reset_resume) {
- ret = hid->driver->reset_resume(hid);
- return ret;
- }
-
- return 0;
+ return hid_driver_reset_resume(hid);
}
#endif
diff --git a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
index 52674149a275..b4dad66fa954 100644
--- a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
+++ b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
@@ -150,7 +150,7 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client,
goodix_i2c_hid_deassert_reset(ihid_goodix, true);
mutex_unlock(&ihid_goodix->regulator_mutex);
- return i2c_hid_core_probe(client, &ihid_goodix->ops, 0x0001);
+ return i2c_hid_core_probe(client, &ihid_goodix->ops, 0x0001, 0);
}
static const struct goodix_i2c_hid_timing_data goodix_gt7375p_timing_data = {
diff --git a/drivers/hid/i2c-hid/i2c-hid-of.c b/drivers/hid/i2c-hid/i2c-hid-of.c
index 4bf7cea92637..97a27a803f58 100644
--- a/drivers/hid/i2c-hid/i2c-hid-of.c
+++ b/drivers/hid/i2c-hid/i2c-hid-of.c
@@ -21,6 +21,7 @@
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/hid.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -71,6 +72,7 @@ static int i2c_hid_of_probe(struct i2c_client *client,
struct device *dev = &client->dev;
struct i2c_hid_of *ihid_of;
u16 hid_descriptor_address;
+ u32 quirks = 0;
int ret;
u32 val;
@@ -105,8 +107,14 @@ static int i2c_hid_of_probe(struct i2c_client *client,
if (ret)
return ret;
+ if (device_property_read_bool(dev, "touchscreen-inverted-x"))
+ quirks |= HID_QUIRK_X_INVERT;
+
+ if (device_property_read_bool(dev, "touchscreen-inverted-y"))
+ quirks |= HID_QUIRK_Y_INVERT;
+
return i2c_hid_core_probe(client, &ihid_of->ops,
- hid_descriptor_address);
+ hid_descriptor_address, quirks);
}
static const struct of_device_id i2c_hid_of_match[] = {
diff --git a/drivers/hid/i2c-hid/i2c-hid.h b/drivers/hid/i2c-hid/i2c-hid.h
index 05a7827d211a..236cc062d5ef 100644
--- a/drivers/hid/i2c-hid/i2c-hid.h
+++ b/drivers/hid/i2c-hid/i2c-hid.h
@@ -32,7 +32,7 @@ struct i2chid_ops {
};
int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
- u16 hid_descriptor_address);
+ u16 hid_descriptor_address, u32 quirks);
int i2c_hid_core_remove(struct i2c_client *client);
void i2c_hid_core_shutdown(struct i2c_client *client);
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index 45e0c7b1c9ec..8ccb246b0114 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -909,7 +909,11 @@ static uint32_t ish_ipc_get_header(struct ishtp_device *dev, int length,
*/
static bool _dma_no_cache_snooping(struct ishtp_device *dev)
{
- return dev->pdev->device == EHL_Ax_DEVICE_ID;
+ return (dev->pdev->device == EHL_Ax_DEVICE_ID ||
+ dev->pdev->device == TGL_LP_DEVICE_ID ||
+ dev->pdev->device == TGL_H_DEVICE_ID ||
+ dev->pdev->device == ADL_S_DEVICE_ID ||
+ dev->pdev->device == ADL_P_DEVICE_ID);
}
static const struct ishtp_hw_ops ish_hw_ops = {
diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
index 0e1183e96147..e24988586710 100644
--- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
+++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
@@ -268,7 +268,8 @@ static int get_firmware_variant(struct ishtp_cl_data *client_data,
}
/**
- * loader_cl_send() Send message from host to firmware
+ * loader_cl_send() - Send message from host to firmware
+ *
* @client_data: Client data instance
* @out_msg: Message buffer to be sent to firmware
* @out_size: Size of out going message
diff --git a/drivers/hid/surface-hid/surface_hid_core.c b/drivers/hid/surface-hid/surface_hid_core.c
index 5571e74abe91..e46330b2e561 100644
--- a/drivers/hid/surface-hid/surface_hid_core.c
+++ b/drivers/hid/surface-hid/surface_hid_core.c
@@ -204,50 +204,35 @@ static int surface_hid_suspend(struct device *dev)
{
struct surface_hid_device *d = dev_get_drvdata(dev);
- if (d->hid->driver && d->hid->driver->suspend)
- return d->hid->driver->suspend(d->hid, PMSG_SUSPEND);
-
- return 0;
+ return hid_driver_suspend(d->hid, PMSG_SUSPEND);
}
static int surface_hid_resume(struct device *dev)
{
struct surface_hid_device *d = dev_get_drvdata(dev);
- if (d->hid->driver && d->hid->driver->resume)
- return d->hid->driver->resume(d->hid);
-
- return 0;
+ return hid_driver_resume(d->hid);
}
static int surface_hid_freeze(struct device *dev)
{
struct surface_hid_device *d = dev_get_drvdata(dev);
- if (d->hid->driver && d->hid->driver->suspend)
- return d->hid->driver->suspend(d->hid, PMSG_FREEZE);
-
- return 0;
+ return hid_driver_suspend(d->hid, PMSG_FREEZE);
}
static int surface_hid_poweroff(struct device *dev)
{
struct surface_hid_device *d = dev_get_drvdata(dev);
- if (d->hid->driver && d->hid->driver->suspend)
- return d->hid->driver->suspend(d->hid, PMSG_HIBERNATE);
-
- return 0;
+ return hid_driver_suspend(d->hid, PMSG_HIBERNATE);
}
static int surface_hid_restore(struct device *dev)
{
struct surface_hid_device *d = dev_get_drvdata(dev);
- if (d->hid->driver && d->hid->driver->reset_resume)
- return d->hid->driver->reset_resume(d->hid);
-
- return 0;
+ return hid_driver_reset_resume(d->hid);
}
const struct dev_pm_ops surface_hid_pm_ops = {
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 8fe3efcb8327..614adb510dbd 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -28,11 +28,22 @@
struct uhid_device {
struct mutex devlock;
+
+ /* This flag tracks whether the HID device is usable for commands from
+ * userspace. The flag is already set before hid_add_device(), which
+ * runs in workqueue context, to allow hid_add_device() to communicate
+ * with userspace.
+ * However, if hid_add_device() fails, the flag is cleared without
+ * holding devlock.
+ * We guarantee that if @running changes from true to false while you're
+ * holding @devlock, it's still fine to access @hid.
+ */
bool running;
__u8 *rd_data;
uint rd_size;
+ /* When this is NULL, userspace may use UHID_CREATE/UHID_CREATE2. */
struct hid_device *hid;
struct uhid_event input_buf;
@@ -63,9 +74,18 @@ static void uhid_device_add_worker(struct work_struct *work)
if (ret) {
hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret);
- hid_destroy_device(uhid->hid);
- uhid->hid = NULL;
- uhid->running = false;
+ /* We used to call hid_destroy_device() here, but that's really
+ * messy to get right because we have to coordinate with
+ * concurrent writes from userspace that might be in the middle
+ * of using uhid->hid.
+ * Just leave uhid->hid as-is for now, and clean it up when
+ * userspace tries to close or reinitialize the uhid instance.
+ *
+ * However, we do have to clear the ->running flag and do a
+ * wakeup to make sure userspace knows that the device is gone.
+ */
+ WRITE_ONCE(uhid->running, false);
+ wake_up_interruptible(&uhid->report_wait);
}
}
@@ -174,9 +194,9 @@ static int __uhid_report_queue_and_wait(struct uhid_device *uhid,
spin_unlock_irqrestore(&uhid->qlock, flags);
ret = wait_event_interruptible_timeout(uhid->report_wait,
- !uhid->report_running || !uhid->running,
+ !uhid->report_running || !READ_ONCE(uhid->running),
5 * HZ);
- if (!ret || !uhid->running || uhid->report_running)
+ if (!ret || !READ_ONCE(uhid->running) || uhid->report_running)
ret = -EIO;
else if (ret < 0)
ret = -ERESTARTSYS;
@@ -217,7 +237,7 @@ static int uhid_hid_get_report(struct hid_device *hid, unsigned char rnum,
struct uhid_event *ev;
int ret;
- if (!uhid->running)
+ if (!READ_ONCE(uhid->running))
return -EIO;
ev = kzalloc(sizeof(*ev), GFP_KERNEL);
@@ -259,7 +279,7 @@ static int uhid_hid_set_report(struct hid_device *hid, unsigned char rnum,
struct uhid_event *ev;
int ret;
- if (!uhid->running || count > UHID_DATA_MAX)
+ if (!READ_ONCE(uhid->running) || count > UHID_DATA_MAX)
return -EIO;
ev = kzalloc(sizeof(*ev), GFP_KERNEL);
@@ -474,7 +494,7 @@ static int uhid_dev_create2(struct uhid_device *uhid,
void *rd_data;
int ret;
- if (uhid->running)
+ if (uhid->hid)
return -EALREADY;
rd_size = ev->u.create2.rd_size;
@@ -556,15 +576,16 @@ static int uhid_dev_create(struct uhid_device *uhid,
static int uhid_dev_destroy(struct uhid_device *uhid)
{
- if (!uhid->running)
+ if (!uhid->hid)
return -EINVAL;
- uhid->running = false;
+ WRITE_ONCE(uhid->running, false);
wake_up_interruptible(&uhid->report_wait);
cancel_work_sync(&uhid->worker);
hid_destroy_device(uhid->hid);
+ uhid->hid = NULL;
kfree(uhid->rd_data);
return 0;
@@ -572,7 +593,7 @@ static int uhid_dev_destroy(struct uhid_device *uhid)
static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev)
{
- if (!uhid->running)
+ if (!READ_ONCE(uhid->running))
return -EINVAL;
hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input.data,
@@ -583,7 +604,7 @@ static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev)
static int uhid_dev_input2(struct uhid_device *uhid, struct uhid_event *ev)
{
- if (!uhid->running)
+ if (!READ_ONCE(uhid->running))
return -EINVAL;
hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input2.data,
@@ -595,7 +616,7 @@ static int uhid_dev_input2(struct uhid_device *uhid, struct uhid_event *ev)
static int uhid_dev_get_report_reply(struct uhid_device *uhid,
struct uhid_event *ev)
{
- if (!uhid->running)
+ if (!READ_ONCE(uhid->running))
return -EINVAL;
uhid_report_wake_up(uhid, ev->u.get_report_reply.id, ev);
@@ -605,7 +626,7 @@ static int uhid_dev_get_report_reply(struct uhid_device *uhid,
static int uhid_dev_set_report_reply(struct uhid_device *uhid,
struct uhid_event *ev)
{
- if (!uhid->running)
+ if (!READ_ONCE(uhid->running))
return -EINVAL;
uhid_report_wake_up(uhid, ev->u.set_report_reply.id, ev);
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 2dcaf31eb9cd..54752c85604b 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1563,8 +1563,8 @@ static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
int status = 0;
hid_restart_io(hid);
- if (driver_suspended && hid->driver && hid->driver->resume)
- status = hid->driver->resume(hid);
+ if (driver_suspended)
+ status = hid_driver_resume(hid);
return status;
}
@@ -1588,11 +1588,9 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
{
set_bit(HID_SUSPENDED, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
- if (hid->driver && hid->driver->suspend) {
- status = hid->driver->suspend(hid, message);
- if (status < 0)
- goto failed;
- }
+ status = hid_driver_suspend(hid, message);
+ if (status < 0)
+ goto failed;
driver_suspended = true;
} else {
usbhid_mark_busy(usbhid);
@@ -1602,8 +1600,7 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
} else {
/* TODO: resume() might need to handle suspend failure */
- if (hid->driver && hid->driver->suspend)
- status = hid->driver->suspend(hid, message);
+ status = hid_driver_suspend(hid, message);
driver_suspended = true;
spin_lock_irq(&usbhid->lock);
set_bit(HID_SUSPENDED, &usbhid->iofl);
@@ -1644,8 +1641,8 @@ static int hid_reset_resume(struct usb_interface *intf)
int status;
status = hid_post_reset(intf);
- if (status >= 0 && hid->driver && hid->driver->reset_resume) {
- int ret = hid->driver->reset_resume(hid);
+ if (status >= 0) {
+ int ret = hid_driver_reset_resume(hid);
if (ret < 0)
status = ret;
}
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 2a4cc39962e7..a7176fc0635d 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2588,6 +2588,24 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
}
}
+static bool wacom_wac_slot_is_active(struct input_dev *dev, int key)
+{
+ struct input_mt *mt = dev->mt;
+ struct input_mt_slot *s;
+
+ if (!mt)
+ return false;
+
+ for (s = mt->slots; s != mt->slots + mt->num_slots; s++) {
+ if (s->key == key &&
+ input_mt_get_value(s, ABS_MT_TRACKING_ID) >= 0) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
static void wacom_wac_finger_event(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage, __s32 value)
{
@@ -2638,9 +2656,14 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
}
if (usage->usage_index + 1 == field->report_count) {
- if (equivalent_usage == wacom_wac->hid_data.last_slot_field &&
- wacom_wac->hid_data.confidence)
- wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
+ if (equivalent_usage == wacom_wac->hid_data.last_slot_field) {
+ bool touch_removed = wacom_wac_slot_is_active(wacom_wac->touch_input,
+ wacom_wac->hid_data.id) && !wacom_wac->hid_data.tipswitch;
+
+ if (wacom_wac->hid_data.confidence || touch_removed) {
+ wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
+ }
+ }
}
}
@@ -2659,6 +2682,10 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
hid_data->confidence = true;
+ hid_data->cc_report = 0;
+ hid_data->cc_index = -1;
+ hid_data->cc_value_index = -1;
+
for (i = 0; i < report->maxfield; i++) {
struct hid_field *field = report->field[i];
int j;
@@ -2692,11 +2719,14 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
hid_data->cc_index >= 0) {
struct hid_field *field = report->field[hid_data->cc_index];
int value = field->value[hid_data->cc_value_index];
- if (value)
+ if (value) {
hid_data->num_expected = value;
+ hid_data->num_received = 0;
+ }
}
else {
hid_data->num_expected = wacom_wac->features.touch_max;
+ hid_data->num_received = 0;
}
}
@@ -2724,6 +2754,7 @@ static void wacom_wac_finger_report(struct hid_device *hdev,
input_sync(input);
wacom_wac->hid_data.num_received = 0;
+ wacom_wac->hid_data.num_expected = 0;
/* keep touch state for pen event */
wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(wacom_wac);
diff --git a/drivers/hsi/hsi_core.c b/drivers/hsi/hsi_core.c
index ec90713564e3..884066109699 100644
--- a/drivers/hsi/hsi_core.c
+++ b/drivers/hsi/hsi_core.c
@@ -102,6 +102,7 @@ struct hsi_client *hsi_new_client(struct hsi_port *port,
if (device_register(&cl->device) < 0) {
pr_err("hsi: failed to register client: %s\n", info->name);
put_device(&cl->device);
+ goto err;
}
return cl;
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 2829575fd9b7..60375879612f 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -1554,7 +1554,7 @@ int vmbus_request_offers(void)
struct vmbus_channel_msginfo *msginfo;
int ret;
- msginfo = kmalloc(sizeof(*msginfo) +
+ msginfo = kzalloc(sizeof(*msginfo) +
sizeof(struct vmbus_channel_message_header),
GFP_KERNEL);
if (!msginfo)
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index ca873a3b98db..f2d05bff4245 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1660,6 +1660,13 @@ static int balloon_connect_vsp(struct hv_device *dev)
unsigned long t;
int ret;
+ /*
+ * max_pkt_size should be large enough for one vmbus packet header plus
+ * our receive buffer size. Hyper-V sends messages up to
+ * HV_HYP_PAGE_SIZE bytes long on balloon channel.
+ */
+ dev->channel->max_pkt_size = HV_HYP_PAGE_SIZE * 2;
+
ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
balloon_onchannelcallback, dev);
if (ret)
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index 7be173a99f27..181d16bbf49d 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -44,10 +44,10 @@ EXPORT_SYMBOL_GPL(hv_vp_index);
u32 hv_max_vp_index;
EXPORT_SYMBOL_GPL(hv_max_vp_index);
-void __percpu **hyperv_pcpu_input_arg;
+void * __percpu *hyperv_pcpu_input_arg;
EXPORT_SYMBOL_GPL(hyperv_pcpu_input_arg);
-void __percpu **hyperv_pcpu_output_arg;
+void * __percpu *hyperv_pcpu_output_arg;
EXPORT_SYMBOL_GPL(hyperv_pcpu_output_arg);
/*
@@ -295,3 +295,14 @@ u64 __weak hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_s
return HV_STATUS_INVALID_PARAMETER;
}
EXPORT_SYMBOL_GPL(hv_ghcb_hypercall);
+
+void __weak *hv_map_memory(void *addr, unsigned long size)
+{
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(hv_map_memory);
+
+void __weak hv_unmap_memory(void *addr)
+{
+}
+EXPORT_SYMBOL_GPL(hv_unmap_memory);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 392c1ac4f819..17bf55fe3169 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -33,6 +33,7 @@
#include <linux/random.h>
#include <linux/kernel.h>
#include <linux/syscore_ops.h>
+#include <linux/dma-map-ops.h>
#include <clocksource/hyperv_timer.h>
#include "hyperv_vmbus.h"
@@ -1381,7 +1382,7 @@ static void vmbus_isr(void)
tasklet_schedule(&hv_cpu->msg_dpc);
}
- add_interrupt_randomness(vmbus_interrupt, 0);
+ add_interrupt_randomness(vmbus_interrupt);
}
static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
@@ -2078,6 +2079,7 @@ struct hv_device *vmbus_device_create(const guid_t *type,
return child_device_obj;
}
+static u64 vmbus_dma_mask = DMA_BIT_MASK(64);
/*
* vmbus_device_register - Register the child device
*/
@@ -2118,6 +2120,8 @@ int vmbus_device_register(struct hv_device *child_device_obj)
}
hv_debug_add_dev_dir(child_device_obj);
+ child_device_obj->device.dma_mask = &vmbus_dma_mask;
+ child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
return 0;
err_kset_unregister:
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 64bd3dfba2c4..8df25f1079ba 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1414,8 +1414,8 @@ config SENSORS_PC87427
will be called pc87427.
config SENSORS_NTC_THERMISTOR
- tristate "NTC thermistor support from Murata"
- depends on !OF || IIO=n || IIO
+ tristate "NTC thermistor support"
+ depends on IIO
depends on THERMAL || !THERMAL_OF
help
This driver supports NTC thermistors sensor reading and its
@@ -1513,6 +1513,16 @@ config SENSORS_NZXT_KRAKEN2
This driver can also be built as a module. If so, the module
will be called nzxt-kraken2.
+config SENSORS_NZXT_SMART2
+ tristate "NZXT RGB & Fan Controller/Smart Device v2"
+ depends on USB_HID
+ help
+ If you say yes here you get support for hardware monitoring for the
+ NZXT RGB & Fan Controller/Smart Device v2.
+
+ This driver can also be built as a module. If so, the module
+ will be called nzxt-smart2.
+
source "drivers/hwmon/occ/Kconfig"
config SENSORS_PCF8591
@@ -1872,6 +1882,18 @@ config SENSORS_INA2XX
This driver can also be built as a module. If so, the module
will be called ina2xx.
+config SENSORS_INA238
+ tristate "Texas Instruments INA238"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the INA238 power monitor
+ chip. This driver supports voltage, current, power and temperature
+ measurements as well as alarm configuration.
+
+ This driver can also be built as a module. If so, the module
+ will be called ina238.
+
config SENSORS_INA3221
tristate "Texas Instruments INA3221 Triple Power Monitor"
depends on I2C
@@ -1939,6 +1961,7 @@ config SENSORS_TMP108
config SENSORS_TMP401
tristate "Texas Instruments TMP401 and compatibles"
depends on I2C
+ select REGMAP
help
If you say yes here you get support for Texas Instruments TMP401,
TMP411, TMP431, TMP432, and TMP435 temperature sensor chips.
@@ -2215,6 +2238,30 @@ config SENSORS_ATK0110
This driver can also be built as a module. If so, the module
will be called asus_atk0110.
+config SENSORS_ASUS_WMI
+ tristate "ASUS WMI X370/X470/B450/X399"
+ depends on ACPI_WMI
+ help
+ If you say yes here you get support for the ACPI hardware monitoring
+ interface found in X370/X470/B450/X399 ASUS motherboards. This driver
+ will provide readings of fans, voltages and temperatures through the system
+ firmware.
+
+ This driver can also be built as a module. If so, the module
+ will be called asus_wmi_sensors.
+
+config SENSORS_ASUS_WMI_EC
+ tristate "ASUS WMI B550/X570"
+ depends on ACPI_WMI
+ help
+ If you say yes here you get support for the ACPI embedded controller
+ hardware monitoring interface found in B550/X570 ASUS motherboards.
+ This driver will provide readings of fans, voltages and temperatures
+ through the system firmware.
+
+ This driver can also be built as a module. If so, the module
+ will be called asus_wmi_sensors_ec.
+
endif # ACPI
endif # HWMON
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index baee6a8d4dd1..185f946d698b 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -9,6 +9,8 @@ obj-$(CONFIG_HWMON_VID) += hwmon-vid.o
# APCI drivers
obj-$(CONFIG_SENSORS_ACPI_POWER) += acpi_power_meter.o
obj-$(CONFIG_SENSORS_ATK0110) += asus_atk0110.o
+obj-$(CONFIG_SENSORS_ASUS_WMI) += asus_wmi_sensors.o
+obj-$(CONFIG_SENSORS_ASUS_WMI_EC) += asus_wmi_ec_sensors.o
# Native drivers
# asb100, then w83781d go first, as they can override other drivers' addresses.
@@ -90,6 +92,7 @@ obj-$(CONFIG_SENSORS_IBMPOWERNV)+= ibmpowernv.o
obj-$(CONFIG_SENSORS_IIO_HWMON) += iio_hwmon.o
obj-$(CONFIG_SENSORS_INA209) += ina209.o
obj-$(CONFIG_SENSORS_INA2XX) += ina2xx.o
+obj-$(CONFIG_SENSORS_INA238) += ina238.o
obj-$(CONFIG_SENSORS_INA3221) += ina3221.o
obj-$(CONFIG_SENSORS_INTEL_M10_BMC_HWMON) += intel-m10-bmc-hwmon.o
obj-$(CONFIG_SENSORS_IT87) += it87.o
@@ -157,6 +160,7 @@ obj-$(CONFIG_SENSORS_NPCM7XX) += npcm750-pwm-fan.o
obj-$(CONFIG_SENSORS_NSA320) += nsa320-hwmon.o
obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o
obj-$(CONFIG_SENSORS_NZXT_KRAKEN2) += nzxt-kraken2.o
+obj-$(CONFIG_SENSORS_NZXT_SMART2) += nzxt-smart2.o
obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
obj-$(CONFIG_SENSORS_PC87427) += pc87427.o
obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 38b447c6e8cd..91ecfee243bf 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -324,7 +324,7 @@ static int adm1021_detect(struct i2c_client *client,
{
struct i2c_adapter *adapter = client->adapter;
const char *type_name;
- int conv_rate, status, config, man_id, dev_id;
+ int reg, conv_rate, status, config, man_id, dev_id;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
pr_debug("detect failed, smbus byte data not supported!\n");
@@ -349,9 +349,19 @@ static int adm1021_detect(struct i2c_client *client,
if (man_id < 0 || dev_id < 0)
return -ENODEV;
- if (man_id == 0x4d && dev_id == 0x01)
+ if (man_id == 0x4d && dev_id == 0x01) {
+ /*
+ * dev_id 0x01 matches MAX6680, MAX6695, MAX6696, and possibly
+ * others. Read register which is unsupported on MAX1617 but
+ * exists on all those chips and compare with the dev_id
+ * register. If it matches, it may be a MAX1617A.
+ */
+ reg = i2c_smbus_read_byte_data(client,
+ ADM1023_REG_REM_TEMP_PREC);
+ if (reg != dev_id)
+ return -ENODEV;
type_name = "max1617a";
- else if (man_id == 0x41) {
+ } else if (man_id == 0x41) {
if ((dev_id & 0xF0) == 0x30)
type_name = "adm1023";
else if ((dev_id & 0xF0) == 0x00)
@@ -395,13 +405,18 @@ static int adm1021_detect(struct i2c_client *client,
/*
* LM84 Mfr ID is in a different place,
- * and it has more unused bits.
+ * and it has more unused bits. Registers at 0xfe and 0xff
+ * are undefined and return the most recently read value,
+ * here the value of the configuration register.
*/
if (conv_rate == 0x00
+ && man_id == config && dev_id == config
&& (config & 0x7F) == 0x00
&& (status & 0xAB) == 0x00) {
type_name = "lm84";
} else {
+ if ((config & 0x3f) || (status & 0x03))
+ return -ENODEV;
/* fail if low limits are larger than high limits */
if ((s8)llo > lhi || (s8)rlo > rhi)
return -ENODEV;
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 257ec53ae723..ac841fa3a369 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -242,9 +242,8 @@ static int FAN_TO_REG(int reg, int div)
static int AUTO_TEMP_MAX_TO_REG(int val, int reg, int pwm)
{
int ret;
- int range = val - AUTO_TEMP_MIN_FROM_REG(reg);
+ int range = ((val - AUTO_TEMP_MIN_FROM_REG(reg)) * 10) / (16 - pwm);
- range = ((val - AUTO_TEMP_MIN_FROM_REG(reg))*10)/(16 - pwm);
ret = ((reg & 0xf8) |
(range < 10000 ? 0 :
range < 20000 ? 1 :
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index d519aca4a9d6..fb6d14d213a1 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -662,6 +662,9 @@ static int adt7470_fan_write(struct device *dev, u32 attr, int channel, long val
struct adt7470_data *data = dev_get_drvdata(dev);
int err;
+ if (val <= 0)
+ return -EINVAL;
+
val = FAN_RPM_TO_PERIOD(val);
val = clamp_val(val, 1, 65534);
diff --git a/drivers/hwmon/asus_wmi_ec_sensors.c b/drivers/hwmon/asus_wmi_ec_sensors.c
new file mode 100644
index 000000000000..22a1459305a7
--- /dev/null
+++ b/drivers/hwmon/asus_wmi_ec_sensors.c
@@ -0,0 +1,621 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * HWMON driver for ASUS B550/X570 motherboards that publish sensor
+ * values via the embedded controller registers.
+ *
+ * Copyright (C) 2021 Eugene Shalygin <eugene.shalygin@gmail.com>
+ * Copyright (C) 2018-2019 Ed Brindley <kernel@maidavale.org>
+ *
+ * EC provides:
+ * - Chipset temperature
+ * - CPU temperature
+ * - Motherboard temperature
+ * - T_Sensor temperature
+ * - VRM temperature
+ * - Water In temperature
+ * - Water Out temperature
+ * - CPU Optional Fan RPM
+ * - Chipset Fan RPM
+ * - Water Flow Fan RPM
+ * - CPU current
+ */
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/hwmon.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nls.h>
+#include <linux/units.h>
+#include <linux/wmi.h>
+
+#include <asm/unaligned.h>
+
+#define ASUSWMI_MONITORING_GUID "466747A0-70EC-11DE-8A39-0800200C9A66"
+#define ASUSWMI_METHODID_BLOCK_READ_EC 0x42524543 /* BREC */
+/* From the ASUS DSDT source */
+#define ASUSWMI_BREC_REGISTERS_MAX 16
+#define ASUSWMI_MAX_BUF_LEN 128
+#define SENSOR_LABEL_LEN 16
+
+static u32 hwmon_attributes[hwmon_max] = {
+ [hwmon_chip] = HWMON_C_REGISTER_TZ,
+ [hwmon_temp] = HWMON_T_INPUT | HWMON_T_LABEL,
+ [hwmon_in] = HWMON_I_INPUT | HWMON_I_LABEL,
+ [hwmon_curr] = HWMON_C_INPUT | HWMON_C_LABEL,
+ [hwmon_fan] = HWMON_F_INPUT | HWMON_F_LABEL,
+};
+
+struct asus_wmi_ec_sensor_address {
+ u8 index;
+ u8 bank;
+ u8 size;
+};
+
+#define MAKE_SENSOR_ADDRESS(size_i, bank_i, index_i) { \
+ .size = size_i, \
+ .bank = bank_i, \
+ .index = index_i, \
+}
+
+struct ec_sensor_info {
+ struct asus_wmi_ec_sensor_address addr;
+ char label[SENSOR_LABEL_LEN];
+ enum hwmon_sensor_types type;
+};
+
+#define EC_SENSOR(sensor_label, sensor_type, size, bank, index) { \
+ .addr = MAKE_SENSOR_ADDRESS(size, bank, index), \
+ .label = sensor_label, \
+ .type = sensor_type, \
+}
+
+enum known_ec_sensor {
+ SENSOR_TEMP_CHIPSET,
+ SENSOR_TEMP_CPU,
+ SENSOR_TEMP_MB,
+ SENSOR_TEMP_T_SENSOR,
+ SENSOR_TEMP_VRM,
+ SENSOR_FAN_CPU_OPT,
+ SENSOR_FAN_CHIPSET,
+ SENSOR_FAN_VRM_HS,
+ SENSOR_FAN_WATER_FLOW,
+ SENSOR_CURR_CPU,
+ SENSOR_TEMP_WATER_IN,
+ SENSOR_TEMP_WATER_OUT,
+ SENSOR_MAX
+};
+
+/* All known sensors for ASUS EC controllers */
+static const struct ec_sensor_info known_ec_sensors[] = {
+ [SENSOR_TEMP_CHIPSET] = EC_SENSOR("Chipset", hwmon_temp, 1, 0x00, 0x3a),
+ [SENSOR_TEMP_CPU] = EC_SENSOR("CPU", hwmon_temp, 1, 0x00, 0x3b),
+ [SENSOR_TEMP_MB] = EC_SENSOR("Motherboard", hwmon_temp, 1, 0x00, 0x3c),
+ [SENSOR_TEMP_T_SENSOR] = EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x3d),
+ [SENSOR_TEMP_VRM] = EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x3e),
+ [SENSOR_FAN_CPU_OPT] = EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
+ [SENSOR_FAN_VRM_HS] = EC_SENSOR("VRM HS", hwmon_fan, 2, 0x00, 0xb2),
+ [SENSOR_FAN_CHIPSET] = EC_SENSOR("Chipset", hwmon_fan, 2, 0x00, 0xb4),
+ [SENSOR_FAN_WATER_FLOW] = EC_SENSOR("Water_Flow", hwmon_fan, 2, 0x00, 0xbc),
+ [SENSOR_CURR_CPU] = EC_SENSOR("CPU", hwmon_curr, 1, 0x00, 0xf4),
+ [SENSOR_TEMP_WATER_IN] = EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00),
+ [SENSOR_TEMP_WATER_OUT] = EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
+};
+
+struct asus_wmi_data {
+ const enum known_ec_sensor known_board_sensors[SENSOR_MAX + 1];
+};
+
+/* boards with EC support */
+static struct asus_wmi_data sensors_board_PW_X570_P = {
+ .known_board_sensors = {
+ SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB, SENSOR_TEMP_VRM,
+ SENSOR_FAN_CHIPSET,
+ SENSOR_MAX
+ },
+};
+
+static struct asus_wmi_data sensors_board_PW_X570_A = {
+ .known_board_sensors = {
+ SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB, SENSOR_TEMP_VRM,
+ SENSOR_FAN_CHIPSET,
+ SENSOR_CURR_CPU,
+ SENSOR_MAX
+ },
+};
+
+static struct asus_wmi_data sensors_board_R_C8H = {
+ .known_board_sensors = {
+ SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB,
+ SENSOR_TEMP_T_SENSOR, SENSOR_TEMP_VRM,
+ SENSOR_TEMP_WATER_IN, SENSOR_TEMP_WATER_OUT,
+ SENSOR_FAN_CPU_OPT, SENSOR_FAN_CHIPSET, SENSOR_FAN_WATER_FLOW,
+ SENSOR_CURR_CPU,
+ SENSOR_MAX
+ },
+};
+
+/* Same as Hero but without chipset fan */
+static struct asus_wmi_data sensors_board_R_C8DH = {
+ .known_board_sensors = {
+ SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB,
+ SENSOR_TEMP_T_SENSOR, SENSOR_TEMP_VRM,
+ SENSOR_TEMP_WATER_IN, SENSOR_TEMP_WATER_OUT,
+ SENSOR_FAN_CPU_OPT, SENSOR_FAN_WATER_FLOW,
+ SENSOR_CURR_CPU,
+ SENSOR_MAX
+ },
+};
+
+/* Same as Hero but without water */
+static struct asus_wmi_data sensors_board_R_C8F = {
+ .known_board_sensors = {
+ SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB,
+ SENSOR_TEMP_T_SENSOR, SENSOR_TEMP_VRM,
+ SENSOR_FAN_CPU_OPT, SENSOR_FAN_CHIPSET,
+ SENSOR_CURR_CPU,
+ SENSOR_MAX
+ },
+};
+
+static struct asus_wmi_data sensors_board_RS_B550_E_G = {
+ .known_board_sensors = {
+ SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB,
+ SENSOR_TEMP_T_SENSOR, SENSOR_TEMP_VRM,
+ SENSOR_FAN_CPU_OPT,
+ SENSOR_MAX
+ },
+};
+
+static struct asus_wmi_data sensors_board_RS_B550_I_G = {
+ .known_board_sensors = {
+ SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB,
+ SENSOR_TEMP_T_SENSOR, SENSOR_TEMP_VRM,
+ SENSOR_FAN_VRM_HS,
+ SENSOR_CURR_CPU,
+ SENSOR_MAX
+ },
+};
+
+static struct asus_wmi_data sensors_board_RS_X570_E_G = {
+ .known_board_sensors = {
+ SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB,
+ SENSOR_TEMP_T_SENSOR, SENSOR_TEMP_VRM,
+ SENSOR_FAN_CHIPSET,
+ SENSOR_CURR_CPU,
+ SENSOR_MAX
+ },
+};
+
+#define DMI_EXACT_MATCH_ASUS_BOARD_NAME(name, sensors) { \
+ .matches = { \
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."), \
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, name), \
+ }, \
+ .driver_data = sensors, \
+}
+
+static const struct dmi_system_id asus_wmi_ec_dmi_table[] = {
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X570-PRO", &sensors_board_PW_X570_P),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("Pro WS X570-ACE", &sensors_board_PW_X570_A),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII DARK HERO", &sensors_board_R_C8DH),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII FORMULA", &sensors_board_R_C8F),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII HERO", &sensors_board_R_C8H),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-E GAMING", &sensors_board_RS_B550_E_G),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-I GAMING", &sensors_board_RS_B550_I_G),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-E GAMING", &sensors_board_RS_X570_E_G),
+ {}
+};
+MODULE_DEVICE_TABLE(dmi, asus_wmi_ec_dmi_table);
+
+struct ec_sensor {
+ enum known_ec_sensor info_index;
+ long cached_value;
+};
+
+/**
+ * struct asus_wmi_ec_info - sensor info.
+ * @sensors: list of sensors.
+ * @read_arg: UTF-16LE string to pass to BRxx() WMI function.
+ * @read_buffer: decoded output from WMI result.
+ * @nr_sensors: number of board EC sensors.
+ * @nr_registers: number of EC registers to read (sensor might span more than 1 register).
+ * @last_updated: in jiffies.
+ */
+struct asus_wmi_ec_info {
+ struct ec_sensor sensors[SENSOR_MAX];
+ char read_arg[(ASUSWMI_BREC_REGISTERS_MAX * 4 + 1) * 2];
+ u8 read_buffer[ASUSWMI_BREC_REGISTERS_MAX];
+ unsigned int nr_sensors;
+ unsigned int nr_registers;
+ unsigned long last_updated;
+};
+
+struct asus_wmi_sensors {
+ struct asus_wmi_ec_info ec;
+ /* lock access to internal cache */
+ struct mutex lock;
+};
+
+static int asus_wmi_ec_fill_board_sensors(struct asus_wmi_ec_info *ec,
+ const enum known_ec_sensor *bsi)
+{
+ struct ec_sensor *s = ec->sensors;
+ int i;
+
+ ec->nr_sensors = 0;
+ ec->nr_registers = 0;
+
+ for (i = 0; bsi[i] != SENSOR_MAX; i++) {
+ s[i].info_index = bsi[i];
+ ec->nr_sensors++;
+ ec->nr_registers += known_ec_sensors[bsi[i]].addr.size;
+ }
+
+ return 0;
+}
+
+/*
+ * The next four functions convert to or from BRxx string argument format.
+ * The format of the string is as follows:
+ * - The string consists of two-byte UTF-16LE characters.
+ * - The value of the very first byte in the string is equal to the total
+ * length of the next string in bytes, thus excluding the first two-byte
+ * character.
+ * - The rest of the string encodes the pairs of (bank, index) pairs, where
+ * both values are byte-long (0x00 to 0xFF).
+ * - Numbers are encoded as UTF-16LE hex values.
+ */
+static int asus_wmi_ec_decode_reply_buffer(const u8 *in, u32 length, u8 *out)
+{
+ char buffer[ASUSWMI_MAX_BUF_LEN * 2];
+ u32 len = min_t(u32, get_unaligned_le16(in), length - 2);
+
+ utf16s_to_utf8s((wchar_t *)(in + 2), len / 2, UTF16_LITTLE_ENDIAN, buffer, sizeof(buffer));
+
+ return hex2bin(out, buffer, len / 4);
+}
+
+static void asus_wmi_ec_encode_registers(const u8 *in, u32 len, char *out)
+{
+ char buffer[ASUSWMI_MAX_BUF_LEN * 2];
+
+ bin2hex(buffer, in, len);
+
+ utf8s_to_utf16s(buffer, len * 2, UTF16_LITTLE_ENDIAN, (wchar_t *)(out + 2), len * 2);
+
+ put_unaligned_le16(len * 4, out);
+}
+
+static void asus_wmi_ec_make_block_read_query(struct asus_wmi_ec_info *ec)
+{
+ u8 registers[ASUSWMI_BREC_REGISTERS_MAX * 2];
+ const struct ec_sensor_info *si;
+ int i, j, offset;
+
+ offset = 0;
+ for (i = 0; i < ec->nr_sensors; i++) {
+ si = &known_ec_sensors[ec->sensors[i].info_index];
+ for (j = 0; j < si->addr.size; j++) {
+ registers[offset++] = si->addr.bank;
+ registers[offset++] = si->addr.index + j;
+ }
+ }
+
+ asus_wmi_ec_encode_registers(registers, offset, ec->read_arg);
+}
+
+static int asus_wmi_ec_block_read(u32 method_id, char *query, u8 *out)
+{
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer input;
+ union acpi_object *obj;
+ acpi_status status;
+ int ret;
+
+ /* The first byte of the BRxx() argument string has to be the string size. */
+ input.length = query[0] + 2;
+ input.pointer = query;
+ status = wmi_evaluate_method(ASUSWMI_MONITORING_GUID, 0, method_id, &input, &output);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ obj = output.pointer;
+ if (!obj)
+ return -EIO;
+
+ if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 2) {
+ ret = -EIO;
+ goto out_free_obj;
+ }
+
+ ret = asus_wmi_ec_decode_reply_buffer(obj->buffer.pointer, obj->buffer.length, out);
+
+out_free_obj:
+ ACPI_FREE(obj);
+ return ret;
+}
+
+static inline long get_sensor_value(const struct ec_sensor_info *si, u8 *data)
+{
+ switch (si->addr.size) {
+ case 1:
+ return *data;
+ case 2:
+ return get_unaligned_be16(data);
+ case 4:
+ return get_unaligned_be32(data);
+ default:
+ return 0;
+ }
+}
+
+static void asus_wmi_ec_update_ec_sensors(struct asus_wmi_ec_info *ec)
+{
+ const struct ec_sensor_info *si;
+ struct ec_sensor *s;
+ u8 i_sensor;
+ u8 *data;
+
+ data = ec->read_buffer;
+ for (i_sensor = 0; i_sensor < ec->nr_sensors; i_sensor++) {
+ s = &ec->sensors[i_sensor];
+ si = &known_ec_sensors[s->info_index];
+ s->cached_value = get_sensor_value(si, data);
+ data += si->addr.size;
+ }
+}
+
+static long asus_wmi_ec_scale_sensor_value(long value, int data_type)
+{
+ switch (data_type) {
+ case hwmon_curr:
+ case hwmon_temp:
+ case hwmon_in:
+ return value * MILLI;
+ default:
+ return value;
+ }
+}
+
+static int asus_wmi_ec_find_sensor_index(const struct asus_wmi_ec_info *ec,
+ enum hwmon_sensor_types type, int channel)
+{
+ int i;
+
+ for (i = 0; i < ec->nr_sensors; i++) {
+ if (known_ec_sensors[ec->sensors[i].info_index].type == type) {
+ if (channel == 0)
+ return i;
+
+ channel--;
+ }
+ }
+ return -EINVAL;
+}
+
+static int asus_wmi_ec_get_cached_value_or_update(struct asus_wmi_sensors *sensor_data,
+ int sensor_index,
+ long *value)
+{
+ struct asus_wmi_ec_info *ec = &sensor_data->ec;
+ int ret = 0;
+
+ mutex_lock(&sensor_data->lock);
+
+ if (time_after(jiffies, ec->last_updated + HZ)) {
+ ret = asus_wmi_ec_block_read(ASUSWMI_METHODID_BLOCK_READ_EC,
+ ec->read_arg, ec->read_buffer);
+ if (ret)
+ goto unlock;
+
+ asus_wmi_ec_update_ec_sensors(ec);
+ ec->last_updated = jiffies;
+ }
+
+ *value = ec->sensors[sensor_index].cached_value;
+
+unlock:
+ mutex_unlock(&sensor_data->lock);
+
+ return ret;
+}
+
+/* Now follow the functions that implement the hwmon interface */
+
+static int asus_wmi_ec_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct asus_wmi_sensors *sensor_data = dev_get_drvdata(dev);
+ struct asus_wmi_ec_info *ec = &sensor_data->ec;
+ int ret, sidx, info_index;
+ long value = 0;
+
+ sidx = asus_wmi_ec_find_sensor_index(ec, type, channel);
+ if (sidx < 0)
+ return sidx;
+
+ ret = asus_wmi_ec_get_cached_value_or_update(sensor_data, sidx, &value);
+ if (ret)
+ return ret;
+
+ info_index = ec->sensors[sidx].info_index;
+ *val = asus_wmi_ec_scale_sensor_value(value, known_ec_sensors[info_index].type);
+
+ return ret;
+}
+
+static int asus_wmi_ec_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ struct asus_wmi_sensors *sensor_data = dev_get_drvdata(dev);
+ struct asus_wmi_ec_info *ec = &sensor_data->ec;
+ int sensor_index;
+
+ sensor_index = asus_wmi_ec_find_sensor_index(ec, type, channel);
+ *str = known_ec_sensors[ec->sensors[sensor_index].info_index].label;
+
+ return 0;
+}
+
+static umode_t asus_wmi_ec_hwmon_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ const struct asus_wmi_sensors *sensor_data = drvdata;
+ const struct asus_wmi_ec_info *ec = &sensor_data->ec;
+ int index;
+
+ index = asus_wmi_ec_find_sensor_index(ec, type, channel);
+
+ return index < 0 ? 0 : 0444;
+}
+
+static int asus_wmi_hwmon_add_chan_info(struct hwmon_channel_info *asus_wmi_hwmon_chan,
+ struct device *dev, int num,
+ enum hwmon_sensor_types type, u32 config)
+{
+ u32 *cfg;
+
+ cfg = devm_kcalloc(dev, num + 1, sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return -ENOMEM;
+
+ asus_wmi_hwmon_chan->type = type;
+ asus_wmi_hwmon_chan->config = cfg;
+ memset32(cfg, config, num);
+
+ return 0;
+}
+
+static const struct hwmon_ops asus_wmi_ec_hwmon_ops = {
+ .is_visible = asus_wmi_ec_hwmon_is_visible,
+ .read = asus_wmi_ec_hwmon_read,
+ .read_string = asus_wmi_ec_hwmon_read_string,
+};
+
+static struct hwmon_chip_info asus_wmi_ec_chip_info = {
+ .ops = &asus_wmi_ec_hwmon_ops,
+};
+
+static int asus_wmi_ec_configure_sensor_setup(struct device *dev,
+ const enum known_ec_sensor *bsi)
+{
+ struct asus_wmi_sensors *sensor_data = dev_get_drvdata(dev);
+ struct asus_wmi_ec_info *ec = &sensor_data->ec;
+ struct hwmon_channel_info *asus_wmi_hwmon_chan;
+ const struct hwmon_channel_info **asus_wmi_ci;
+ int nr_count[hwmon_max] = {}, nr_types = 0;
+ const struct hwmon_chip_info *chip_info;
+ const struct ec_sensor_info *si;
+ enum hwmon_sensor_types type;
+ struct device *hwdev;
+ int i, ret;
+
+ ret = asus_wmi_ec_fill_board_sensors(ec, bsi);
+ if (ret)
+ return ret;
+
+ if (!sensor_data->ec.nr_sensors)
+ return -ENODEV;
+
+ for (i = 0; i < ec->nr_sensors; i++) {
+ si = &known_ec_sensors[ec->sensors[i].info_index];
+ if (!nr_count[si->type])
+ nr_types++;
+ nr_count[si->type]++;
+ }
+
+ if (nr_count[hwmon_temp]) {
+ nr_count[hwmon_chip]++;
+ nr_types++;
+ }
+
+ /*
+ * If we can get values for all the registers in a single query,
+ * the query will not change from call to call.
+ */
+ asus_wmi_ec_make_block_read_query(ec);
+
+ asus_wmi_hwmon_chan = devm_kcalloc(dev, nr_types, sizeof(*asus_wmi_hwmon_chan),
+ GFP_KERNEL);
+ if (!asus_wmi_hwmon_chan)
+ return -ENOMEM;
+
+ asus_wmi_ci = devm_kcalloc(dev, nr_types + 1, sizeof(*asus_wmi_ci), GFP_KERNEL);
+ if (!asus_wmi_ci)
+ return -ENOMEM;
+
+ asus_wmi_ec_chip_info.info = asus_wmi_ci;
+ chip_info = &asus_wmi_ec_chip_info;
+
+ for (type = 0; type < hwmon_max; type++) {
+ if (!nr_count[type])
+ continue;
+
+ ret = asus_wmi_hwmon_add_chan_info(asus_wmi_hwmon_chan, dev,
+ nr_count[type], type,
+ hwmon_attributes[type]);
+ if (ret)
+ return ret;
+
+ *asus_wmi_ci++ = asus_wmi_hwmon_chan++;
+ }
+
+ dev_dbg(dev, "board has %d EC sensors that span %d registers",
+ ec->nr_sensors, ec->nr_registers);
+
+ hwdev = devm_hwmon_device_register_with_info(dev, "asus_wmi_ec_sensors",
+ sensor_data, chip_info, NULL);
+
+ return PTR_ERR_OR_ZERO(hwdev);
+}
+
+static int asus_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+ struct asus_wmi_sensors *sensor_data;
+ struct asus_wmi_data *board_sensors;
+ const struct dmi_system_id *dmi_id;
+ const enum known_ec_sensor *bsi;
+ struct device *dev = &wdev->dev;
+
+ dmi_id = dmi_first_match(asus_wmi_ec_dmi_table);
+ if (!dmi_id)
+ return -ENODEV;
+
+ board_sensors = dmi_id->driver_data;
+ bsi = board_sensors->known_board_sensors;
+
+ sensor_data = devm_kzalloc(dev, sizeof(*sensor_data), GFP_KERNEL);
+ if (!sensor_data)
+ return -ENOMEM;
+
+ mutex_init(&sensor_data->lock);
+
+ dev_set_drvdata(dev, sensor_data);
+
+ return asus_wmi_ec_configure_sensor_setup(dev, bsi);
+}
+
+static const struct wmi_device_id asus_ec_wmi_id_table[] = {
+ { ASUSWMI_MONITORING_GUID, NULL },
+ { }
+};
+
+static struct wmi_driver asus_sensors_wmi_driver = {
+ .driver = {
+ .name = "asus_wmi_ec_sensors",
+ },
+ .id_table = asus_ec_wmi_id_table,
+ .probe = asus_wmi_probe,
+};
+module_wmi_driver(asus_sensors_wmi_driver);
+
+MODULE_AUTHOR("Ed Brindley <kernel@maidavale.org>");
+MODULE_AUTHOR("Eugene Shalygin <eugene.shalygin@gmail.com>");
+MODULE_DESCRIPTION("Asus WMI Sensors Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/asus_wmi_sensors.c b/drivers/hwmon/asus_wmi_sensors.c
new file mode 100644
index 000000000000..c80eee874b6c
--- /dev/null
+++ b/drivers/hwmon/asus_wmi_sensors.c
@@ -0,0 +1,664 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * HWMON driver for ASUS motherboards that provides sensor readouts via WMI
+ * interface present in the UEFI of the X370/X470/B450/X399 Ryzen motherboards.
+ *
+ * Copyright (C) 2018-2019 Ed Brindley <kernel@maidavale.org>
+ *
+ * WMI interface provides:
+ * - CPU Core Voltage,
+ * - CPU SOC Voltage,
+ * - DRAM Voltage,
+ * - VDDP Voltage,
+ * - 1.8V PLL Voltage,
+ * - +12V Voltage,
+ * - +5V Voltage,
+ * - 3VSB Voltage,
+ * - VBAT Voltage,
+ * - AVCC3 Voltage,
+ * - SB 1.05V Voltage,
+ * - CPU Core Voltage,
+ * - CPU SOC Voltage,
+ * - DRAM Voltage,
+ * - CPU Fan RPM,
+ * - Chassis Fan 1 RPM,
+ * - Chassis Fan 2 RPM,
+ * - Chassis Fan 3 RPM,
+ * - HAMP Fan RPM,
+ * - Water Pump RPM,
+ * - CPU OPT RPM,
+ * - Water Flow RPM,
+ * - AIO Pump RPM,
+ * - CPU Temperature,
+ * - CPU Socket Temperature,
+ * - Motherboard Temperature,
+ * - Chipset Temperature,
+ * - Tsensor 1 Temperature,
+ * - CPU VRM Temperature,
+ * - Water In,
+ * - Water Out,
+ * - CPU VRM Output Current.
+ */
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/hwmon.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/units.h>
+#include <linux/wmi.h>
+
+#define ASUSWMI_MONITORING_GUID "466747A0-70EC-11DE-8A39-0800200C9A66"
+#define ASUSWMI_METHODID_GET_VALUE 0x52574543 /* RWEC */
+#define ASUSWMI_METHODID_UPDATE_BUFFER 0x51574543 /* QWEC */
+#define ASUSWMI_METHODID_GET_INFO 0x50574543 /* PWEC */
+#define ASUSWMI_METHODID_GET_NUMBER 0x50574572 /* PWEr */
+#define ASUSWMI_METHODID_GET_VERSION 0x50574574 /* PWEt */
+
+#define ASUS_WMI_MAX_STR_SIZE 32
+
+#define DMI_EXACT_MATCH_ASUS_BOARD_NAME(name) { \
+ .matches = { \
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."), \
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, name), \
+ }, \
+}
+
+static const struct dmi_system_id asus_wmi_dmi_table[] = {
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X399-A"),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X470-PRO"),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VI EXTREME"),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VI HERO"),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VI HERO (WI-FI AC)"),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VII HERO"),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VII HERO (WI-FI)"),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B450-E GAMING"),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B450-F GAMING"),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B450-I GAMING"),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X399-E GAMING"),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X470-F GAMING"),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X470-I GAMING"),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG ZENITH EXTREME"),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG ZENITH EXTREME ALPHA"),
+ {}
+};
+MODULE_DEVICE_TABLE(dmi, asus_wmi_dmi_table);
+
+enum asus_wmi_sensor_class {
+ VOLTAGE = 0x0,
+ TEMPERATURE_C = 0x1,
+ FAN_RPM = 0x2,
+ CURRENT = 0x3,
+ WATER_FLOW = 0x4,
+};
+
+enum asus_wmi_location {
+ CPU = 0x0,
+ CPU_SOC = 0x1,
+ DRAM = 0x2,
+ MOTHERBOARD = 0x3,
+ CHIPSET = 0x4,
+ AUX = 0x5,
+ VRM = 0x6,
+ COOLER = 0x7
+};
+
+enum asus_wmi_type {
+ SIGNED_INT = 0x0,
+ UNSIGNED_INT = 0x1,
+ SCALED = 0x3,
+};
+
+enum asus_wmi_source {
+ SIO = 0x1,
+ EC = 0x2
+};
+
+static enum hwmon_sensor_types asus_data_types[] = {
+ [VOLTAGE] = hwmon_in,
+ [TEMPERATURE_C] = hwmon_temp,
+ [FAN_RPM] = hwmon_fan,
+ [CURRENT] = hwmon_curr,
+ [WATER_FLOW] = hwmon_fan,
+};
+
+static u32 hwmon_attributes[hwmon_max] = {
+ [hwmon_chip] = HWMON_C_REGISTER_TZ,
+ [hwmon_temp] = HWMON_T_INPUT | HWMON_T_LABEL,
+ [hwmon_in] = HWMON_I_INPUT | HWMON_I_LABEL,
+ [hwmon_curr] = HWMON_C_INPUT | HWMON_C_LABEL,
+ [hwmon_fan] = HWMON_F_INPUT | HWMON_F_LABEL,
+};
+
+/**
+ * struct asus_wmi_sensor_info - sensor info.
+ * @id: sensor id.
+ * @data_type: sensor class e.g. voltage, temp etc.
+ * @location: sensor location.
+ * @name: sensor name.
+ * @source: sensor source.
+ * @type: sensor type signed, unsigned etc.
+ * @cached_value: cached sensor value.
+ */
+struct asus_wmi_sensor_info {
+ u32 id;
+ int data_type;
+ int location;
+ char name[ASUS_WMI_MAX_STR_SIZE];
+ int source;
+ int type;
+ long cached_value;
+};
+
+struct asus_wmi_wmi_info {
+ unsigned long source_last_updated[3]; /* in jiffies */
+ int sensor_count;
+
+ const struct asus_wmi_sensor_info **info[hwmon_max];
+ struct asus_wmi_sensor_info **info_by_id;
+};
+
+struct asus_wmi_sensors {
+ struct asus_wmi_wmi_info wmi;
+ /* lock access to internal cache */
+ struct mutex lock;
+};
+
+/*
+ * Universal method for calling WMI method
+ */
+static int asus_wmi_call_method(u32 method_id, u32 *args, struct acpi_buffer *output)
+{
+ struct acpi_buffer input = {(acpi_size) sizeof(*args), args };
+ acpi_status status;
+
+ status = wmi_evaluate_method(ASUSWMI_MONITORING_GUID, 0,
+ method_id, &input, output);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Gets the version of the ASUS sensors interface implemented
+ */
+static int asus_wmi_get_version(u32 *version)
+{
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ u32 args[] = {0, 0, 0};
+ union acpi_object *obj;
+ int err;
+
+ err = asus_wmi_call_method(ASUSWMI_METHODID_GET_VERSION, args, &output);
+ if (err)
+ return err;
+
+ obj = output.pointer;
+ if (!obj)
+ return -EIO;
+
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ err = -EIO;
+ goto out_free_obj;
+ }
+
+ err = 0;
+ *version = obj->integer.value;
+
+out_free_obj:
+ ACPI_FREE(obj);
+ return err;
+}
+
+/*
+ * Gets the number of sensor items
+ */
+static int asus_wmi_get_item_count(u32 *count)
+{
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ u32 args[] = {0, 0, 0};
+ union acpi_object *obj;
+ int err;
+
+ err = asus_wmi_call_method(ASUSWMI_METHODID_GET_NUMBER, args, &output);
+ if (err)
+ return err;
+
+ obj = output.pointer;
+ if (!obj)
+ return -EIO;
+
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ err = -EIO;
+ goto out_free_obj;
+ }
+
+ err = 0;
+ *count = obj->integer.value;
+
+out_free_obj:
+ ACPI_FREE(obj);
+ return err;
+}
+
+static int asus_wmi_hwmon_add_chan_info(struct hwmon_channel_info *asus_wmi_hwmon_chan,
+ struct device *dev, int num,
+ enum hwmon_sensor_types type, u32 config)
+{
+ u32 *cfg;
+
+ cfg = devm_kcalloc(dev, num + 1, sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return -ENOMEM;
+
+ asus_wmi_hwmon_chan->type = type;
+ asus_wmi_hwmon_chan->config = cfg;
+ memset32(cfg, config, num);
+
+ return 0;
+}
+
+/*
+ * For a given sensor item returns details e.g. type (voltage/temperature/fan speed etc), bank etc
+ */
+static int asus_wmi_sensor_info(int index, struct asus_wmi_sensor_info *s)
+{
+ union acpi_object name_obj, data_type_obj, location_obj, source_obj, type_obj;
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ u32 args[] = {index, 0};
+ union acpi_object *obj;
+ int err;
+
+ err = asus_wmi_call_method(ASUSWMI_METHODID_GET_INFO, args, &output);
+ if (err)
+ return err;
+
+ s->id = index;
+
+ obj = output.pointer;
+ if (!obj)
+ return -EIO;
+
+ if (obj->type != ACPI_TYPE_PACKAGE) {
+ err = -EIO;
+ goto out_free_obj;
+ }
+
+ if (obj->package.count != 5) {
+ err = -EIO;
+ goto out_free_obj;
+ }
+
+ name_obj = obj->package.elements[0];
+ if (name_obj.type != ACPI_TYPE_STRING) {
+ err = -EIO;
+ goto out_free_obj;
+ }
+
+ strncpy(s->name, name_obj.string.pointer, sizeof(s->name) - 1);
+
+ data_type_obj = obj->package.elements[1];
+ if (data_type_obj.type != ACPI_TYPE_INTEGER) {
+ err = -EIO;
+ goto out_free_obj;
+ }
+
+ s->data_type = data_type_obj.integer.value;
+
+ location_obj = obj->package.elements[2];
+ if (location_obj.type != ACPI_TYPE_INTEGER) {
+ err = -EIO;
+ goto out_free_obj;
+ }
+
+ s->location = location_obj.integer.value;
+
+ source_obj = obj->package.elements[3];
+ if (source_obj.type != ACPI_TYPE_INTEGER) {
+ err = -EIO;
+ goto out_free_obj;
+ }
+
+ s->source = source_obj.integer.value;
+
+ type_obj = obj->package.elements[4];
+ if (type_obj.type != ACPI_TYPE_INTEGER) {
+ err = -EIO;
+ goto out_free_obj;
+ }
+
+ err = 0;
+ s->type = type_obj.integer.value;
+
+out_free_obj:
+ ACPI_FREE(obj);
+ return err;
+}
+
+static int asus_wmi_update_buffer(int source)
+{
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ u32 args[] = {source, 0};
+
+ return asus_wmi_call_method(ASUSWMI_METHODID_UPDATE_BUFFER, args, &output);
+}
+
+static int asus_wmi_get_sensor_value(u8 index, long *value)
+{
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ u32 args[] = {index, 0};
+ union acpi_object *obj;
+ int err;
+
+ err = asus_wmi_call_method(ASUSWMI_METHODID_GET_VALUE, args, &output);
+ if (err)
+ return err;
+
+ obj = output.pointer;
+ if (!obj)
+ return -EIO;
+
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ err = -EIO;
+ goto out_free_obj;
+ }
+
+ err = 0;
+ *value = obj->integer.value;
+
+out_free_obj:
+ ACPI_FREE(obj);
+ return err;
+}
+
+static int asus_wmi_update_values_for_source(u8 source, struct asus_wmi_sensors *sensor_data)
+{
+ struct asus_wmi_sensor_info *sensor;
+ long value = 0;
+ int ret;
+ int i;
+
+ for (i = 0; i < sensor_data->wmi.sensor_count; i++) {
+ sensor = sensor_data->wmi.info_by_id[i];
+ if (sensor && sensor->source == source) {
+ ret = asus_wmi_get_sensor_value(sensor->id, &value);
+ if (ret)
+ return ret;
+
+ sensor->cached_value = value;
+ }
+ }
+
+ return 0;
+}
+
+static int asus_wmi_scale_sensor_value(u32 value, int data_type)
+{
+ /* FAN_RPM and WATER_FLOW don't need scaling */
+ switch (data_type) {
+ case VOLTAGE:
+ /* value in microVolts */
+ return DIV_ROUND_CLOSEST(value, KILO);
+ case TEMPERATURE_C:
+ /* value in Celsius */
+ return value * MILLIDEGREE_PER_DEGREE;
+ case CURRENT:
+ /* value in Amperes */
+ return value * MILLI;
+ }
+ return value;
+}
+
+static int asus_wmi_get_cached_value_or_update(const struct asus_wmi_sensor_info *sensor,
+ struct asus_wmi_sensors *sensor_data,
+ u32 *value)
+{
+ int ret = 0;
+
+ mutex_lock(&sensor_data->lock);
+
+ if (time_after(jiffies, sensor_data->wmi.source_last_updated[sensor->source] + HZ)) {
+ ret = asus_wmi_update_buffer(sensor->source);
+ if (ret)
+ goto unlock;
+
+ ret = asus_wmi_update_values_for_source(sensor->source, sensor_data);
+ if (ret)
+ goto unlock;
+
+ sensor_data->wmi.source_last_updated[sensor->source] = jiffies;
+ }
+
+ *value = sensor->cached_value;
+
+unlock:
+ mutex_unlock(&sensor_data->lock);
+
+ return ret;
+}
+
+/* Now follow the functions that implement the hwmon interface */
+static int asus_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ const struct asus_wmi_sensor_info *sensor;
+ u32 value = 0;
+ int ret;
+
+ struct asus_wmi_sensors *sensor_data = dev_get_drvdata(dev);
+
+ sensor = *(sensor_data->wmi.info[type] + channel);
+
+ ret = asus_wmi_get_cached_value_or_update(sensor, sensor_data, &value);
+ if (ret)
+ return ret;
+
+ *val = asus_wmi_scale_sensor_value(value, sensor->data_type);
+
+ return ret;
+}
+
+static int asus_wmi_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ struct asus_wmi_sensors *sensor_data = dev_get_drvdata(dev);
+ const struct asus_wmi_sensor_info *sensor;
+
+ sensor = *(sensor_data->wmi.info[type] + channel);
+ *str = sensor->name;
+
+ return 0;
+}
+
+static umode_t asus_wmi_hwmon_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ const struct asus_wmi_sensors *sensor_data = drvdata;
+ const struct asus_wmi_sensor_info *sensor;
+
+ sensor = *(sensor_data->wmi.info[type] + channel);
+ if (sensor)
+ return 0444;
+
+ return 0;
+}
+
+static const struct hwmon_ops asus_wmi_hwmon_ops = {
+ .is_visible = asus_wmi_hwmon_is_visible,
+ .read = asus_wmi_hwmon_read,
+ .read_string = asus_wmi_hwmon_read_string,
+};
+
+static struct hwmon_chip_info asus_wmi_chip_info = {
+ .ops = &asus_wmi_hwmon_ops,
+ .info = NULL,
+};
+
+static int asus_wmi_configure_sensor_setup(struct device *dev,
+ struct asus_wmi_sensors *sensor_data)
+{
+ const struct hwmon_channel_info **ptr_asus_wmi_ci;
+ struct hwmon_channel_info *asus_wmi_hwmon_chan;
+ int nr_count[hwmon_max] = {}, nr_types = 0;
+ struct asus_wmi_sensor_info *temp_sensor;
+ const struct hwmon_chip_info *chip_info;
+ enum hwmon_sensor_types type;
+ struct device *hwdev;
+ int i, idx;
+ int err;
+
+ temp_sensor = devm_kcalloc(dev, 1, sizeof(*temp_sensor), GFP_KERNEL);
+ if (!temp_sensor)
+ return -ENOMEM;
+
+ for (i = 0; i < sensor_data->wmi.sensor_count; i++) {
+ err = asus_wmi_sensor_info(i, temp_sensor);
+ if (err)
+ return err;
+
+ switch (temp_sensor->data_type) {
+ case TEMPERATURE_C:
+ case VOLTAGE:
+ case CURRENT:
+ case FAN_RPM:
+ case WATER_FLOW:
+ type = asus_data_types[temp_sensor->data_type];
+ if (!nr_count[type])
+ nr_types++;
+ nr_count[type]++;
+ break;
+ }
+ }
+
+ if (nr_count[hwmon_temp])
+ nr_count[hwmon_chip]++, nr_types++;
+
+ asus_wmi_hwmon_chan = devm_kcalloc(dev, nr_types,
+ sizeof(*asus_wmi_hwmon_chan),
+ GFP_KERNEL);
+ if (!asus_wmi_hwmon_chan)
+ return -ENOMEM;
+
+ ptr_asus_wmi_ci = devm_kcalloc(dev, nr_types + 1,
+ sizeof(*ptr_asus_wmi_ci), GFP_KERNEL);
+ if (!ptr_asus_wmi_ci)
+ return -ENOMEM;
+
+ asus_wmi_chip_info.info = ptr_asus_wmi_ci;
+ chip_info = &asus_wmi_chip_info;
+
+ sensor_data->wmi.info_by_id = devm_kcalloc(dev, sensor_data->wmi.sensor_count,
+ sizeof(*sensor_data->wmi.info_by_id),
+ GFP_KERNEL);
+
+ if (!sensor_data->wmi.info_by_id)
+ return -ENOMEM;
+
+ for (type = 0; type < hwmon_max; type++) {
+ if (!nr_count[type])
+ continue;
+
+ err = asus_wmi_hwmon_add_chan_info(asus_wmi_hwmon_chan, dev,
+ nr_count[type], type,
+ hwmon_attributes[type]);
+ if (err)
+ return err;
+
+ *ptr_asus_wmi_ci++ = asus_wmi_hwmon_chan++;
+
+ sensor_data->wmi.info[type] = devm_kcalloc(dev,
+ nr_count[type],
+ sizeof(*sensor_data->wmi.info),
+ GFP_KERNEL);
+ if (!sensor_data->wmi.info[type])
+ return -ENOMEM;
+ }
+
+ for (i = sensor_data->wmi.sensor_count - 1; i >= 0; i--) {
+ temp_sensor = devm_kzalloc(dev, sizeof(*temp_sensor), GFP_KERNEL);
+ if (!temp_sensor)
+ return -ENOMEM;
+
+ err = asus_wmi_sensor_info(i, temp_sensor);
+ if (err)
+ continue;
+
+ switch (temp_sensor->data_type) {
+ case TEMPERATURE_C:
+ case VOLTAGE:
+ case CURRENT:
+ case FAN_RPM:
+ case WATER_FLOW:
+ type = asus_data_types[temp_sensor->data_type];
+ idx = --nr_count[type];
+ *(sensor_data->wmi.info[type] + idx) = temp_sensor;
+ sensor_data->wmi.info_by_id[i] = temp_sensor;
+ break;
+ }
+ }
+
+ dev_dbg(dev, "board has %d sensors",
+ sensor_data->wmi.sensor_count);
+
+ hwdev = devm_hwmon_device_register_with_info(dev, "asus_wmi_sensors",
+ sensor_data, chip_info, NULL);
+
+ return PTR_ERR_OR_ZERO(hwdev);
+}
+
+static int asus_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+ struct asus_wmi_sensors *sensor_data;
+ struct device *dev = &wdev->dev;
+ u32 version = 0;
+
+ if (!dmi_check_system(asus_wmi_dmi_table))
+ return -ENODEV;
+
+ sensor_data = devm_kzalloc(dev, sizeof(*sensor_data), GFP_KERNEL);
+ if (!sensor_data)
+ return -ENOMEM;
+
+ if (asus_wmi_get_version(&version))
+ return -ENODEV;
+
+ if (asus_wmi_get_item_count(&sensor_data->wmi.sensor_count))
+ return -ENODEV;
+
+ if (sensor_data->wmi.sensor_count <= 0 || version < 2) {
+ dev_info(dev, "version: %u with %d sensors is unsupported\n",
+ version, sensor_data->wmi.sensor_count);
+
+ return -ENODEV;
+ }
+
+ mutex_init(&sensor_data->lock);
+
+ dev_set_drvdata(dev, sensor_data);
+
+ return asus_wmi_configure_sensor_setup(dev, sensor_data);
+}
+
+static const struct wmi_device_id asus_wmi_id_table[] = {
+ { ASUSWMI_MONITORING_GUID, NULL },
+ { }
+};
+
+static struct wmi_driver asus_sensors_wmi_driver = {
+ .driver = {
+ .name = "asus_wmi_sensors",
+ },
+ .id_table = asus_wmi_id_table,
+ .probe = asus_wmi_probe,
+};
+module_wmi_driver(asus_sensors_wmi_driver);
+
+MODULE_AUTHOR("Ed Brindley <kernel@maidavale.org>");
+MODULE_DESCRIPTION("Asus WMI Sensors Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 5596c211f38d..9949eeb79378 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -113,12 +113,12 @@ MODULE_PARM_DESC(fan_max, "Maximum configurable fan speed (default: autodetect)"
struct smm_regs {
unsigned int eax;
- unsigned int ebx __packed;
- unsigned int ecx __packed;
- unsigned int edx __packed;
- unsigned int esi __packed;
- unsigned int edi __packed;
-};
+ unsigned int ebx;
+ unsigned int ecx;
+ unsigned int edx;
+ unsigned int esi;
+ unsigned int edi;
+} __packed;
static const char * const temp_labels[] = {
"CPU",
@@ -449,13 +449,12 @@ static int i8k_get_power_status(void)
* Procfs interface
*/
-static int
-i8k_ioctl_unlocked(struct file *fp, struct dell_smm_data *data, unsigned int cmd, unsigned long arg)
+static long i8k_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
{
- int val = 0;
- int speed, err;
- unsigned char buff[16];
+ struct dell_smm_data *data = pde_data(file_inode(fp));
int __user *argp = (int __user *)arg;
+ int speed, err;
+ int val = 0;
if (!argp)
return -EINVAL;
@@ -468,15 +467,19 @@ i8k_ioctl_unlocked(struct file *fp, struct dell_smm_data *data, unsigned int cmd
val = (data->bios_version[0] << 16) |
(data->bios_version[1] << 8) | data->bios_version[2];
- break;
+ if (copy_to_user(argp, &val, sizeof(val)))
+ return -EFAULT;
+
+ return 0;
case I8K_MACHINE_ID:
if (restricted && !capable(CAP_SYS_ADMIN))
return -EPERM;
- strscpy_pad(buff, data->bios_machineid, sizeof(buff));
- break;
+ if (copy_to_user(argp, data->bios_machineid, sizeof(data->bios_machineid)))
+ return -EFAULT;
+ return 0;
case I8K_FN_STATUS:
val = i8k_get_fn_status();
break;
@@ -513,11 +516,13 @@ i8k_ioctl_unlocked(struct file *fp, struct dell_smm_data *data, unsigned int cmd
if (copy_from_user(&speed, argp + 1, sizeof(int)))
return -EFAULT;
+ mutex_lock(&data->i8k_mutex);
err = i8k_set_fan(data, val, speed);
if (err < 0)
- return err;
-
- val = i8k_get_fan_status(data, val);
+ val = err;
+ else
+ val = i8k_get_fan_status(data, val);
+ mutex_unlock(&data->i8k_mutex);
break;
default:
@@ -527,39 +532,12 @@ i8k_ioctl_unlocked(struct file *fp, struct dell_smm_data *data, unsigned int cmd
if (val < 0)
return val;
- switch (cmd) {
- case I8K_BIOS_VERSION:
- if (copy_to_user(argp, &val, 4))
- return -EFAULT;
-
- break;
- case I8K_MACHINE_ID:
- if (copy_to_user(argp, buff, 16))
- return -EFAULT;
-
- break;
- default:
- if (copy_to_user(argp, &val, sizeof(int)))
- return -EFAULT;
-
- break;
- }
+ if (copy_to_user(argp, &val, sizeof(int)))
+ return -EFAULT;
return 0;
}
-static long i8k_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
-{
- struct dell_smm_data *data = PDE_DATA(file_inode(fp));
- long ret;
-
- mutex_lock(&data->i8k_mutex);
- ret = i8k_ioctl_unlocked(fp, data, cmd, arg);
- mutex_unlock(&data->i8k_mutex);
-
- return ret;
-}
-
/*
* Print the information for /proc/i8k.
*/
@@ -607,7 +585,7 @@ static int i8k_proc_show(struct seq_file *seq, void *offset)
static int i8k_open_fs(struct inode *inode, struct file *file)
{
- return single_open(file, i8k_proc_show, PDE_DATA(inode));
+ return single_open(file, i8k_proc_show, pde_data(inode));
}
static const struct proc_ops i8k_proc_ops = {
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 4673d403759a..938a8b9ec70d 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -49,6 +49,7 @@
#define SIO_F81768D_ID 0x1210 /* Chipset ID */
#define SIO_F81865_ID 0x0704 /* Chipset ID */
#define SIO_F81866_ID 0x1010 /* Chipset ID */
+#define SIO_F81966_ID 0x1502 /* Chipset ID */
#define REGION_LENGTH 8
#define ADDR_REG_OFFSET 5
@@ -2672,6 +2673,7 @@ static int __init f71882fg_find(int sioaddr, struct f71882fg_sio_data *sio_data)
sio_data->type = f81865f;
break;
case SIO_F81866_ID:
+ case SIO_F81966_ID:
sio_data->type = f81866a;
break;
default:
diff --git a/drivers/hwmon/ina238.c b/drivers/hwmon/ina238.c
new file mode 100644
index 000000000000..50eb9c5e132e
--- /dev/null
+++ b/drivers/hwmon/ina238.c
@@ -0,0 +1,644 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Texas Instruments INA238 power monitor chip
+ * Datasheet: https://www.ti.com/product/ina238
+ *
+ * Copyright (C) 2021 Nathan Rossi <nathan.rossi@digi.com>
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <linux/platform_data/ina2xx.h>
+
+/* INA238 register definitions */
+#define INA238_CONFIG 0x0
+#define INA238_ADC_CONFIG 0x1
+#define INA238_SHUNT_CALIBRATION 0x2
+#define INA238_SHUNT_VOLTAGE 0x4
+#define INA238_BUS_VOLTAGE 0x5
+#define INA238_DIE_TEMP 0x6
+#define INA238_CURRENT 0x7
+#define INA238_POWER 0x8
+#define INA238_DIAG_ALERT 0xb
+#define INA238_SHUNT_OVER_VOLTAGE 0xc
+#define INA238_SHUNT_UNDER_VOLTAGE 0xd
+#define INA238_BUS_OVER_VOLTAGE 0xe
+#define INA238_BUS_UNDER_VOLTAGE 0xf
+#define INA238_TEMP_LIMIT 0x10
+#define INA238_POWER_LIMIT 0x11
+#define INA238_DEVICE_ID 0x3f
+
+#define INA238_CONFIG_ADCRANGE BIT(4)
+
+#define INA238_DIAG_ALERT_TMPOL BIT(7)
+#define INA238_DIAG_ALERT_SHNTOL BIT(6)
+#define INA238_DIAG_ALERT_SHNTUL BIT(5)
+#define INA238_DIAG_ALERT_BUSOL BIT(4)
+#define INA238_DIAG_ALERT_BUSUL BIT(3)
+#define INA238_DIAG_ALERT_POL BIT(2)
+
+#define INA238_REGISTERS 0x11
+
+#define INA238_RSHUNT_DEFAULT 10000 /* uOhm */
+
+/* Default configuration of device on reset. */
+#define INA238_CONFIG_DEFAULT 0
+/* 16 sample averaging, 1052us conversion time, continuous mode */
+#define INA238_ADC_CONFIG_DEFAULT 0xfb6a
+/* Configure alerts to be based on averaged value (SLOWALERT) */
+#define INA238_DIAG_ALERT_DEFAULT 0x2000
+/*
+ * This driver uses a fixed calibration value in order to scale current/power
+ * based on a fixed shunt resistor value. This allows for conversion within the
+ * device to avoid integer limits whilst current/power accuracy is scaled
+ * relative to the shunt resistor value within the driver. This is similar to
+ * how the ina2xx driver handles current/power scaling.
+ *
+ * The end result of this is that increasing shunt values (from a fixed 20 mOhm
+ * shunt) increase the effective current/power accuracy whilst limiting the
+ * range and decreasing shunt values decrease the effective accuracy but
+ * increase the range.
+ *
+ * The value of the Current register is calculated given the following:
+ * Current (A) = (shunt voltage register * 5) * calibration / 81920
+ *
+ * The maximum shunt voltage is 163.835 mV (0x7fff, ADC_RANGE = 0, gain = 4).
+ * With the maximum current value of 0x7fff and a fixed shunt value results in
+ * a calibration value of 16384 (0x4000).
+ *
+ * 0x7fff = (0x7fff * 5) * calibration / 81920
+ * calibration = 0x4000
+ *
+ * Equivalent calibration is applied for the Power register (maximum value for
+ * bus voltage is 102396.875 mV, 0x7fff), where the maximum power that can
+ * occur is ~16776192 uW (register value 0x147a8):
+ *
+ * This scaling means the resulting values for Current and Power registers need
+ * to be scaled by the difference between the fixed shunt resistor and the
+ * actual shunt resistor:
+ *
+ * shunt = 0x4000 / (819.2 * 10^6) / 0.001 = 20000 uOhms (with 1mA/lsb)
+ *
+ * Current (mA) = register value * 20000 / rshunt / 4 * gain
+ * Power (W) = 0.2 * register value * 20000 / rshunt / 4 * gain
+ */
+#define INA238_CALIBRATION_VALUE 16384
+#define INA238_FIXED_SHUNT 20000
+
+#define INA238_SHUNT_VOLTAGE_LSB 5 /* 5 uV/lsb */
+#define INA238_BUS_VOLTAGE_LSB 3125 /* 3.125 mV/lsb */
+#define INA238_DIE_TEMP_LSB 125 /* 125 mC/lsb */
+
+static struct regmap_config ina238_regmap_config = {
+ .max_register = INA238_REGISTERS,
+ .reg_bits = 8,
+ .val_bits = 16,
+};
+
+struct ina238_data {
+ struct i2c_client *client;
+ struct mutex config_lock;
+ struct regmap *regmap;
+ u32 rshunt;
+ int gain;
+};
+
+static int ina238_read_reg24(const struct i2c_client *client, u8 reg, u32 *val)
+{
+ u8 data[3];
+ int err;
+
+ /* 24-bit register read */
+ err = i2c_smbus_read_i2c_block_data(client, reg, 3, data);
+ if (err < 0)
+ return err;
+ if (err != 3)
+ return -EIO;
+ *val = (data[0] << 16) | (data[1] << 8) | data[2];
+
+ return 0;
+}
+
+static int ina238_read_in(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct ina238_data *data = dev_get_drvdata(dev);
+ int reg, mask;
+ int regval;
+ int err;
+
+ switch (channel) {
+ case 0:
+ switch (attr) {
+ case hwmon_in_input:
+ reg = INA238_SHUNT_VOLTAGE;
+ break;
+ case hwmon_in_max:
+ reg = INA238_SHUNT_OVER_VOLTAGE;
+ break;
+ case hwmon_in_min:
+ reg = INA238_SHUNT_UNDER_VOLTAGE;
+ break;
+ case hwmon_in_max_alarm:
+ reg = INA238_DIAG_ALERT;
+ mask = INA238_DIAG_ALERT_SHNTOL;
+ break;
+ case hwmon_in_min_alarm:
+ reg = INA238_DIAG_ALERT;
+ mask = INA238_DIAG_ALERT_SHNTUL;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case 1:
+ switch (attr) {
+ case hwmon_in_input:
+ reg = INA238_BUS_VOLTAGE;
+ break;
+ case hwmon_in_max:
+ reg = INA238_BUS_OVER_VOLTAGE;
+ break;
+ case hwmon_in_min:
+ reg = INA238_BUS_UNDER_VOLTAGE;
+ break;
+ case hwmon_in_max_alarm:
+ reg = INA238_DIAG_ALERT;
+ mask = INA238_DIAG_ALERT_BUSOL;
+ break;
+ case hwmon_in_min_alarm:
+ reg = INA238_DIAG_ALERT;
+ mask = INA238_DIAG_ALERT_BUSUL;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = regmap_read(data->regmap, reg, &regval);
+ if (err < 0)
+ return err;
+
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_max:
+ case hwmon_in_min:
+ /* signed register, value in mV */
+ regval = (s16)regval;
+ if (channel == 0)
+ /* gain of 1 -> LSB / 4 */
+ *val = (regval * INA238_SHUNT_VOLTAGE_LSB) /
+ (1000 * (4 - data->gain + 1));
+ else
+ *val = (regval * INA238_BUS_VOLTAGE_LSB) / 1000;
+ break;
+ case hwmon_in_max_alarm:
+ case hwmon_in_min_alarm:
+ *val = !!(regval & mask);
+ break;
+ }
+
+ return 0;
+}
+
+static int ina238_write_in(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct ina238_data *data = dev_get_drvdata(dev);
+ int regval;
+
+ if (attr != hwmon_in_max && attr != hwmon_in_min)
+ return -EOPNOTSUPP;
+
+ /* convert decimal to register value */
+ switch (channel) {
+ case 0:
+ /* signed value, clamp to max range +/-163 mV */
+ regval = clamp_val(val, -163, 163);
+ regval = (regval * 1000 * (4 - data->gain + 1)) /
+ INA238_SHUNT_VOLTAGE_LSB;
+ regval = clamp_val(regval, S16_MIN, S16_MAX);
+
+ switch (attr) {
+ case hwmon_in_max:
+ return regmap_write(data->regmap,
+ INA238_SHUNT_OVER_VOLTAGE, regval);
+ case hwmon_in_min:
+ return regmap_write(data->regmap,
+ INA238_SHUNT_UNDER_VOLTAGE, regval);
+ default:
+ return -EOPNOTSUPP;
+ }
+ case 1:
+ /* signed value, positive values only. Clamp to max 102.396 V */
+ regval = clamp_val(val, 0, 102396);
+ regval = (regval * 1000) / INA238_BUS_VOLTAGE_LSB;
+ regval = clamp_val(regval, 0, S16_MAX);
+
+ switch (attr) {
+ case hwmon_in_max:
+ return regmap_write(data->regmap,
+ INA238_BUS_OVER_VOLTAGE, regval);
+ case hwmon_in_min:
+ return regmap_write(data->regmap,
+ INA238_BUS_UNDER_VOLTAGE, regval);
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ina238_read_current(struct device *dev, u32 attr, long *val)
+{
+ struct ina238_data *data = dev_get_drvdata(dev);
+ int regval;
+ int err;
+
+ switch (attr) {
+ case hwmon_curr_input:
+ err = regmap_read(data->regmap, INA238_CURRENT, &regval);
+ if (err < 0)
+ return err;
+
+ /* Signed register, fixed 1mA current lsb. result in mA */
+ *val = div_s64((s16)regval * INA238_FIXED_SHUNT * data->gain,
+ data->rshunt * 4);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ina238_read_power(struct device *dev, u32 attr, long *val)
+{
+ struct ina238_data *data = dev_get_drvdata(dev);
+ long long power;
+ int regval;
+ int err;
+
+ switch (attr) {
+ case hwmon_power_input:
+ err = ina238_read_reg24(data->client, INA238_POWER, &regval);
+ if (err)
+ return err;
+
+ /* Fixed 1mA lsb, scaled by 1000000 to have result in uW */
+ power = div_u64(regval * 1000ULL * INA238_FIXED_SHUNT *
+ data->gain, 20 * data->rshunt);
+ /* Clamp value to maximum value of long */
+ *val = clamp_val(power, 0, LONG_MAX);
+ break;
+ case hwmon_power_max:
+ err = regmap_read(data->regmap, INA238_POWER_LIMIT, &regval);
+ if (err)
+ return err;
+
+ /*
+ * Truncated 24-bit compare register, lower 8-bits are
+ * truncated. Same conversion to/from uW as POWER register.
+ */
+ power = div_u64((regval << 8) * 1000ULL * INA238_FIXED_SHUNT *
+ data->gain, 20 * data->rshunt);
+ /* Clamp value to maximum value of long */
+ *val = clamp_val(power, 0, LONG_MAX);
+ break;
+ case hwmon_power_max_alarm:
+ err = regmap_read(data->regmap, INA238_DIAG_ALERT, &regval);
+ if (err)
+ return err;
+
+ *val = !!(regval & INA238_DIAG_ALERT_POL);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ina238_write_power(struct device *dev, u32 attr, long val)
+{
+ struct ina238_data *data = dev_get_drvdata(dev);
+ long regval;
+
+ if (attr != hwmon_power_max)
+ return -EOPNOTSUPP;
+
+ /*
+ * Unsigned postive values. Compared against the 24-bit power register,
+ * lower 8-bits are truncated. Same conversion to/from uW as POWER
+ * register.
+ */
+ regval = clamp_val(val, 0, LONG_MAX);
+ regval = div_u64(val * 20ULL * data->rshunt,
+ 1000ULL * INA238_FIXED_SHUNT * data->gain);
+ regval = clamp_val(regval >> 8, 0, U16_MAX);
+
+ return regmap_write(data->regmap, INA238_POWER_LIMIT, regval);
+}
+
+static int ina238_read_temp(struct device *dev, u32 attr, long *val)
+{
+ struct ina238_data *data = dev_get_drvdata(dev);
+ int regval;
+ int err;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ err = regmap_read(data->regmap, INA238_DIE_TEMP, &regval);
+ if (err)
+ return err;
+
+ /* Signed, bits 15-4 of register, result in mC */
+ *val = ((s16)regval >> 4) * INA238_DIE_TEMP_LSB;
+ break;
+ case hwmon_temp_max:
+ err = regmap_read(data->regmap, INA238_TEMP_LIMIT, &regval);
+ if (err)
+ return err;
+
+ /* Signed, bits 15-4 of register, result in mC */
+ *val = ((s16)regval >> 4) * INA238_DIE_TEMP_LSB;
+ break;
+ case hwmon_temp_max_alarm:
+ err = regmap_read(data->regmap, INA238_DIAG_ALERT, &regval);
+ if (err)
+ return err;
+
+ *val = !!(regval & INA238_DIAG_ALERT_TMPOL);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ina238_write_temp(struct device *dev, u32 attr, long val)
+{
+ struct ina238_data *data = dev_get_drvdata(dev);
+ int regval;
+
+ if (attr != hwmon_temp_max)
+ return -EOPNOTSUPP;
+
+ /* Signed, bits 15-4 of register */
+ regval = (val / INA238_DIE_TEMP_LSB) << 4;
+ regval = clamp_val(regval, S16_MIN, S16_MAX) & 0xfff0;
+
+ return regmap_write(data->regmap, INA238_TEMP_LIMIT, regval);
+}
+
+static int ina238_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_in:
+ return ina238_read_in(dev, attr, channel, val);
+ case hwmon_curr:
+ return ina238_read_current(dev, attr, val);
+ case hwmon_power:
+ return ina238_read_power(dev, attr, val);
+ case hwmon_temp:
+ return ina238_read_temp(dev, attr, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int ina238_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct ina238_data *data = dev_get_drvdata(dev);
+ int err;
+
+ mutex_lock(&data->config_lock);
+
+ switch (type) {
+ case hwmon_in:
+ err = ina238_write_in(dev, attr, channel, val);
+ break;
+ case hwmon_power:
+ err = ina238_write_power(dev, attr, val);
+ break;
+ case hwmon_temp:
+ err = ina238_write_temp(dev, attr, val);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&data->config_lock);
+ return err;
+}
+
+static umode_t ina238_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_max_alarm:
+ case hwmon_in_min_alarm:
+ return 0444;
+ case hwmon_in_max:
+ case hwmon_in_min:
+ return 0644;
+ default:
+ return 0;
+ }
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ return 0444;
+ default:
+ return 0;
+ }
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_input:
+ case hwmon_power_max_alarm:
+ return 0444;
+ case hwmon_power_max:
+ return 0644;
+ default:
+ return 0;
+ }
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_max_alarm:
+ return 0444;
+ case hwmon_temp_max:
+ return 0644;
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+#define INA238_HWMON_IN_CONFIG (HWMON_I_INPUT | \
+ HWMON_I_MAX | HWMON_I_MAX_ALARM | \
+ HWMON_I_MIN | HWMON_I_MIN_ALARM)
+
+static const struct hwmon_channel_info *ina238_info[] = {
+ HWMON_CHANNEL_INFO(in,
+ /* 0: shunt voltage */
+ INA238_HWMON_IN_CONFIG,
+ /* 1: bus voltage */
+ INA238_HWMON_IN_CONFIG),
+ HWMON_CHANNEL_INFO(curr,
+ /* 0: current through shunt */
+ HWMON_C_INPUT),
+ HWMON_CHANNEL_INFO(power,
+ /* 0: power */
+ HWMON_P_INPUT | HWMON_P_MAX | HWMON_P_MAX_ALARM),
+ HWMON_CHANNEL_INFO(temp,
+ /* 0: die temperature */
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_ALARM),
+ NULL
+};
+
+static const struct hwmon_ops ina238_hwmon_ops = {
+ .is_visible = ina238_is_visible,
+ .read = ina238_read,
+ .write = ina238_write,
+};
+
+static const struct hwmon_chip_info ina238_chip_info = {
+ .ops = &ina238_hwmon_ops,
+ .info = ina238_info,
+};
+
+static int ina238_probe(struct i2c_client *client)
+{
+ struct ina2xx_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct ina238_data *data;
+ int config;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ mutex_init(&data->config_lock);
+
+ data->regmap = devm_regmap_init_i2c(client, &ina238_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(dev, "failed to allocate register map\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ /* load shunt value */
+ data->rshunt = INA238_RSHUNT_DEFAULT;
+ if (device_property_read_u32(dev, "shunt-resistor", &data->rshunt) < 0 && pdata)
+ data->rshunt = pdata->shunt_uohms;
+ if (data->rshunt == 0) {
+ dev_err(dev, "invalid shunt resister value %u\n", data->rshunt);
+ return -EINVAL;
+ }
+
+ /* load shunt gain value */
+ if (device_property_read_u32(dev, "ti,shunt-gain", &data->gain) < 0)
+ data->gain = 4; /* Default of ADCRANGE = 0 */
+ if (data->gain != 1 && data->gain != 4) {
+ dev_err(dev, "invalid shunt gain value %u\n", data->gain);
+ return -EINVAL;
+ }
+
+ /* Setup CONFIG register */
+ config = INA238_CONFIG_DEFAULT;
+ if (data->gain == 1)
+ config |= INA238_CONFIG_ADCRANGE; /* ADCRANGE = 1 is /1 */
+ ret = regmap_write(data->regmap, INA238_CONFIG, config);
+ if (ret < 0) {
+ dev_err(dev, "error configuring the device: %d\n", ret);
+ return -ENODEV;
+ }
+
+ /* Setup ADC_CONFIG register */
+ ret = regmap_write(data->regmap, INA238_ADC_CONFIG,
+ INA238_ADC_CONFIG_DEFAULT);
+ if (ret < 0) {
+ dev_err(dev, "error configuring the device: %d\n", ret);
+ return -ENODEV;
+ }
+
+ /* Setup SHUNT_CALIBRATION register with fixed value */
+ ret = regmap_write(data->regmap, INA238_SHUNT_CALIBRATION,
+ INA238_CALIBRATION_VALUE);
+ if (ret < 0) {
+ dev_err(dev, "error configuring the device: %d\n", ret);
+ return -ENODEV;
+ }
+
+ /* Setup alert/alarm configuration */
+ ret = regmap_write(data->regmap, INA238_DIAG_ALERT,
+ INA238_DIAG_ALERT_DEFAULT);
+ if (ret < 0) {
+ dev_err(dev, "error configuring the device: %d\n", ret);
+ return -ENODEV;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data,
+ &ina238_chip_info,
+ NULL);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ dev_info(dev, "power monitor %s (Rshunt = %u uOhm, gain = %u)\n",
+ client->name, data->rshunt, data->gain);
+
+ return 0;
+}
+
+static const struct i2c_device_id ina238_id[] = {
+ { "ina238", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ina238_id);
+
+static const struct of_device_id __maybe_unused ina238_of_match[] = {
+ { .compatible = "ti,ina238" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ina238_of_match);
+
+static struct i2c_driver ina238_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "ina238",
+ .of_match_table = of_match_ptr(ina238_of_match),
+ },
+ .probe_new = ina238_probe,
+ .id_table = ina238_id,
+};
+
+module_i2c_driver(ina238_driver);
+
+MODULE_AUTHOR("Nathan Rossi <nathan.rossi@digi.com>");
+MODULE_DESCRIPTION("ina238 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 4a03d010ec5a..cb347a6bd8d9 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -137,6 +137,9 @@ static const unsigned short normal_i2c[] = {
#define CAT34TS04_DEVID 0x2200
#define CAT34TS04_DEVID_MASK 0xfff0
+#define N34TS04_DEVID 0x2230
+#define N34TS04_DEVID_MASK 0xfff0
+
/* ST Microelectronics */
#define STTS424_DEVID 0x0101
#define STTS424_DEVID_MASK 0xffff
@@ -181,6 +184,7 @@ static struct jc42_chips jc42_chips[] = {
{ ONS_MANID, CAT6095_DEVID, CAT6095_DEVID_MASK },
{ ONS_MANID, CAT34TS02C_DEVID, CAT34TS02C_DEVID_MASK },
{ ONS_MANID, CAT34TS04_DEVID, CAT34TS04_DEVID_MASK },
+ { ONS_MANID, N34TS04_DEVID, N34TS04_DEVID_MASK },
{ NXP_MANID, SE98_DEVID, SE98_DEVID_MASK },
{ STM_MANID, STTS424_DEVID, STTS424_DEVID_MASK },
{ STM_MANID, STTS424E_DEVID, STTS424E_DEVID_MASK },
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 3618a924e78e..4e239bd75b1d 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -76,26 +76,6 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
#define ZEN_CUR_TEMP_SHIFT 21
#define ZEN_CUR_TEMP_RANGE_SEL_MASK BIT(19)
-#define ZEN_SVI_BASE 0x0005A000
-
-/* F17h thermal registers through SMN */
-#define F17H_M01H_SVI_TEL_PLANE0 (ZEN_SVI_BASE + 0xc)
-#define F17H_M01H_SVI_TEL_PLANE1 (ZEN_SVI_BASE + 0x10)
-#define F17H_M31H_SVI_TEL_PLANE0 (ZEN_SVI_BASE + 0x14)
-#define F17H_M31H_SVI_TEL_PLANE1 (ZEN_SVI_BASE + 0x10)
-
-#define F17H_M01H_CFACTOR_ICORE 1000000 /* 1A / LSB */
-#define F17H_M01H_CFACTOR_ISOC 250000 /* 0.25A / LSB */
-#define F17H_M31H_CFACTOR_ICORE 1000000 /* 1A / LSB */
-#define F17H_M31H_CFACTOR_ISOC 310000 /* 0.31A / LSB */
-
-/* F19h thermal registers through SMN */
-#define F19H_M01_SVI_TEL_PLANE0 (ZEN_SVI_BASE + 0x14)
-#define F19H_M01_SVI_TEL_PLANE1 (ZEN_SVI_BASE + 0x10)
-
-#define F19H_M01H_CFACTOR_ICORE 1000000 /* 1A / LSB */
-#define F19H_M01H_CFACTOR_ISOC 310000 /* 0.31A / LSB */
-
struct k10temp_data {
struct pci_dev *pdev;
void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
@@ -191,6 +171,10 @@ static const char *k10temp_temp_label[] = {
"Tccd6",
"Tccd7",
"Tccd8",
+ "Tccd9",
+ "Tccd10",
+ "Tccd11",
+ "Tccd12",
};
static int k10temp_read_labels(struct device *dev,
@@ -226,7 +210,7 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
if (*val < 0)
*val = 0;
break;
- case 2 ... 9: /* Tccd{1-8} */
+ case 2 ... 13: /* Tccd{1-12} */
amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
ZEN_CCD_TEMP(data->ccd_offset, channel - 2),
&regval);
@@ -361,6 +345,10 @@ static const struct hwmon_channel_info *k10temp_info[] = {
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL),
NULL
};
@@ -457,6 +445,11 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
data->ccd_offset = 0x300;
k10temp_get_ccd_support(pdev, data, 8);
break;
+ case 0x10 ... 0x1f:
+ case 0xa0 ... 0xaf:
+ data->ccd_offset = 0x300;
+ k10temp_get_ccd_support(pdev, data, 12);
+ break;
}
} else {
data->read_htcreg = read_htcreg_pci;
@@ -497,6 +490,7 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
{ PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 74019dff2550..1c9493c70813 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -373,7 +373,7 @@ static const struct lm90_params lm90_params[] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
| LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
- .max_convrate = 8,
+ .max_convrate = 7,
},
[lm86] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
@@ -394,12 +394,13 @@ static const struct lm90_params lm90_params[] = {
.max_convrate = 9,
},
[max6646] = {
- .flags = LM90_HAVE_CRIT,
+ .flags = LM90_HAVE_CRIT | LM90_HAVE_BROKEN_ALERT,
.alert_alarms = 0x7c,
.max_convrate = 6,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[max6654] = {
+ .flags = LM90_HAVE_BROKEN_ALERT,
.alert_alarms = 0x7c,
.max_convrate = 7,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
@@ -418,7 +419,7 @@ static const struct lm90_params lm90_params[] = {
},
[max6680] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_CRIT
- | LM90_HAVE_CRIT_ALRM_SWP,
+ | LM90_HAVE_CRIT_ALRM_SWP | LM90_HAVE_BROKEN_ALERT,
.alert_alarms = 0x7c,
.max_convrate = 7,
},
@@ -848,7 +849,7 @@ static int lm90_update_device(struct device *dev)
* Re-enable ALERT# output if it was originally enabled and
* relevant alarms are all clear
*/
- if (!(data->config_orig & 0x80) &&
+ if ((client->irq || !(data->config_orig & 0x80)) &&
!(data->alarms & data->alert_alarms)) {
if (data->config & 0x80) {
dev_dbg(&client->dev, "Re-enabling ALERT#\n");
@@ -1807,22 +1808,22 @@ static bool lm90_is_tripped(struct i2c_client *client, u16 *status)
if (st & LM90_STATUS_LLOW)
hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_min, 0);
+ hwmon_temp_min_alarm, 0);
if (st & LM90_STATUS_RLOW)
hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_min, 1);
+ hwmon_temp_min_alarm, 1);
if (st2 & MAX6696_STATUS2_R2LOW)
hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_min, 2);
+ hwmon_temp_min_alarm, 2);
if (st & LM90_STATUS_LHIGH)
hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_max, 0);
+ hwmon_temp_max_alarm, 0);
if (st & LM90_STATUS_RHIGH)
hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_max, 1);
+ hwmon_temp_max_alarm, 1);
if (st2 & MAX6696_STATUS2_R2HIGH)
hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_max, 2);
+ hwmon_temp_max_alarm, 2);
return true;
}
diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
index 2a4bed0ab226..7352d2b3c756 100644
--- a/drivers/hwmon/ltc2992.c
+++ b/drivers/hwmon/ltc2992.c
@@ -248,8 +248,7 @@ static int ltc2992_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask
gpio_status = reg;
- gpio_nr = 0;
- for_each_set_bit_from(gpio_nr, mask, LTC2992_GPIO_NR) {
+ for_each_set_bit(gpio_nr, mask, LTC2992_GPIO_NR) {
if (test_bit(LTC2992_GPIO_BIT(gpio_nr), &gpio_status))
set_bit(gpio_nr, bits);
}
diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c
index 868243dba1ee..1ba1e3145969 100644
--- a/drivers/hwmon/mr75203.c
+++ b/drivers/hwmon/mr75203.c
@@ -93,7 +93,7 @@
#define VM_CH_REQ BIT(21)
#define IP_TMR 0x05
-#define POWER_DELAY_CYCLE_256 0x80
+#define POWER_DELAY_CYCLE_256 0x100
#define POWER_DELAY_CYCLE_64 0x40
#define PVT_POLL_DELAY_US 20
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 57ce8633a725..098d12b9ecda 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -1175,7 +1175,7 @@ static inline u8 in_to_reg(u32 val, u8 nr)
struct nct6775_data {
int addr; /* IO base of hw monitor block */
- int sioreg; /* SIO register address */
+ struct nct6775_sio_data *sio_data;
enum kinds kind;
const char *name;
@@ -3154,10 +3154,8 @@ store_speed_tolerance(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- high = fan_from_reg16(data->target_speed[nr],
- data->fan_div[nr]) + val;
- low = fan_from_reg16(data->target_speed[nr],
- data->fan_div[nr]) - val;
+ high = fan_from_reg16(data->target_speed[nr], data->fan_div[nr]) + val;
+ low = fan_from_reg16(data->target_speed[nr], data->fan_div[nr]) - val;
if (low <= 0)
low = 1;
if (high < low)
@@ -3561,7 +3559,7 @@ clear_caseopen(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct nct6775_data *data = dev_get_drvdata(dev);
- struct nct6775_sio_data *sio_data = dev_get_platdata(dev);
+ struct nct6775_sio_data *sio_data = data->sio_data;
int nr = to_sensor_dev_attr(attr)->index - INTRUSION_ALARM_BASE;
unsigned long val;
u8 reg;
@@ -3969,7 +3967,7 @@ static int nct6775_probe(struct platform_device *pdev)
return -ENOMEM;
data->kind = sio_data->kind;
- data->sioreg = sio_data->sioreg;
+ data->sio_data = sio_data;
if (sio_data->access == access_direct) {
data->addr = res->start;
@@ -4995,11 +4993,13 @@ static const char * const asus_wmi_boards[] = {
"ROG CROSSHAIR VIII FORMULA",
"ROG CROSSHAIR VIII HERO",
"ROG CROSSHAIR VIII IMPACT",
+ "ROG STRIX B550-A GAMING",
"ROG STRIX B550-E GAMING",
"ROG STRIX B550-F GAMING",
"ROG STRIX B550-F GAMING (WI-FI)",
"ROG STRIX B550-I GAMING",
"ROG STRIX X570-F GAMING",
+ "ROG STRIX X570-I GAMING",
"ROG STRIX Z390-E GAMING",
"ROG STRIX Z490-I GAMING",
"TUF GAMING B550M-PLUS",
@@ -5038,7 +5038,7 @@ static int __init sensors_nct6775_init(void)
board_name);
if (err >= 0) {
/* if reading chip id via WMI succeeds, use WMI */
- if (!nct6775_asuswmi_read(0, NCT6775_PORT_CHIPID, &tmp)) {
+ if (!nct6775_asuswmi_read(0, NCT6775_PORT_CHIPID, &tmp) && tmp) {
pr_info("Using Asus WMI to access %#x chip.\n", tmp);
access = access_asuswmi;
} else {
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index cf26c44f2b88..414204f5704c 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -9,18 +9,23 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/err.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/fixp-arith.h>
-
-#include <linux/platform_data/ntc_thermistor.h>
-
#include <linux/iio/consumer.h>
-
#include <linux/hwmon.h>
+enum ntc_thermistor_type {
+ TYPE_B57330V2103,
+ TYPE_B57891S0103,
+ TYPE_NCPXXWB473,
+ TYPE_NCPXXWF104,
+ TYPE_NCPXXWL333,
+ TYPE_NCPXXXH103,
+};
+
struct ntc_compensation {
int temp_c;
unsigned int ohm;
@@ -40,6 +45,7 @@ enum {
NTC_NCP15XH103,
NTC_NCP18WB473,
NTC_NCP21WB473,
+ NTC_SSG1404001221,
NTC_LAST,
};
@@ -53,6 +59,7 @@ static const struct platform_device_id ntc_thermistor_id[] = {
[NTC_NCP15XH103] = { "ncp15xh103", TYPE_NCPXXXH103 },
[NTC_NCP18WB473] = { "ncp18wb473", TYPE_NCPXXWB473 },
[NTC_NCP21WB473] = { "ncp21wb473", TYPE_NCPXXWB473 },
+ [NTC_SSG1404001221] = { "ssg1404-001221", TYPE_NCPXXWB473 },
[NTC_LAST] = { },
};
@@ -313,16 +320,30 @@ static const struct ntc_type ntc_type[] = {
NTC_TYPE(TYPE_NCPXXXH103, ncpXXxh103),
};
+/*
+ * pullup_uV, pullup_ohm, pulldown_ohm, and connect are required.
+ *
+ * How to setup pullup_ohm, pulldown_ohm, and connect is
+ * described at Documentation/hwmon/ntc_thermistor.rst
+ *
+ * pullup/down_ohm: 0 for infinite / not-connected
+ *
+ * chan: iio_channel pointer to communicate with the ADC which the
+ * thermistor is using for conversion of the analog values.
+ */
struct ntc_data {
- struct ntc_thermistor_platform_data *pdata;
const struct ntc_compensation *comp;
int n_comp;
+ unsigned int pullup_uv;
+ unsigned int pullup_ohm;
+ unsigned int pulldown_ohm;
+ enum { NTC_CONNECTED_POSITIVE, NTC_CONNECTED_GROUND } connect;
+ struct iio_channel *chan;
};
-#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
-static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
+static int ntc_adc_iio_read(struct ntc_data *data)
{
- struct iio_channel *channel = pdata->chan;
+ struct iio_channel *channel = data->chan;
int uv, ret;
ret = iio_read_channel_processed_scale(channel, &uv, 1000);
@@ -342,103 +363,13 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
ret = iio_convert_raw_to_processed(channel, raw, &uv, 1000);
if (ret < 0) {
/* Assume 12 bit ADC with vref at pullup_uv */
- uv = (pdata->pullup_uv * (s64)raw) >> 12;
+ uv = (data->pullup_uv * (s64)raw) >> 12;
}
}
return uv;
}
-static const struct of_device_id ntc_match[] = {
- { .compatible = "epcos,b57330v2103",
- .data = &ntc_thermistor_id[NTC_B57330V2103]},
- { .compatible = "epcos,b57891s0103",
- .data = &ntc_thermistor_id[NTC_B57891S0103] },
- { .compatible = "murata,ncp03wb473",
- .data = &ntc_thermistor_id[NTC_NCP03WB473] },
- { .compatible = "murata,ncp03wf104",
- .data = &ntc_thermistor_id[NTC_NCP03WF104] },
- { .compatible = "murata,ncp15wb473",
- .data = &ntc_thermistor_id[NTC_NCP15WB473] },
- { .compatible = "murata,ncp15wl333",
- .data = &ntc_thermistor_id[NTC_NCP15WL333] },
- { .compatible = "murata,ncp15xh103",
- .data = &ntc_thermistor_id[NTC_NCP15XH103] },
- { .compatible = "murata,ncp18wb473",
- .data = &ntc_thermistor_id[NTC_NCP18WB473] },
- { .compatible = "murata,ncp21wb473",
- .data = &ntc_thermistor_id[NTC_NCP21WB473] },
-
- /* Usage of vendor name "ntc" is deprecated */
- { .compatible = "ntc,ncp03wb473",
- .data = &ntc_thermistor_id[NTC_NCP03WB473] },
- { .compatible = "ntc,ncp15wb473",
- .data = &ntc_thermistor_id[NTC_NCP15WB473] },
- { .compatible = "ntc,ncp15wl333",
- .data = &ntc_thermistor_id[NTC_NCP15WL333] },
- { .compatible = "ntc,ncp18wb473",
- .data = &ntc_thermistor_id[NTC_NCP18WB473] },
- { .compatible = "ntc,ncp21wb473",
- .data = &ntc_thermistor_id[NTC_NCP21WB473] },
- { },
-};
-MODULE_DEVICE_TABLE(of, ntc_match);
-
-static struct ntc_thermistor_platform_data *
-ntc_thermistor_parse_dt(struct device *dev)
-{
- struct iio_channel *chan;
- enum iio_chan_type type;
- struct device_node *np = dev->of_node;
- struct ntc_thermistor_platform_data *pdata;
- int ret;
-
- if (!np)
- return NULL;
-
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- chan = devm_iio_channel_get(dev, NULL);
- if (IS_ERR(chan))
- return ERR_CAST(chan);
-
- ret = iio_get_channel_type(chan, &type);
- if (ret < 0)
- return ERR_PTR(ret);
-
- if (type != IIO_VOLTAGE)
- return ERR_PTR(-EINVAL);
-
- if (of_property_read_u32(np, "pullup-uv", &pdata->pullup_uv))
- return ERR_PTR(-ENODEV);
- if (of_property_read_u32(np, "pullup-ohm", &pdata->pullup_ohm))
- return ERR_PTR(-ENODEV);
- if (of_property_read_u32(np, "pulldown-ohm", &pdata->pulldown_ohm))
- return ERR_PTR(-ENODEV);
-
- if (of_find_property(np, "connected-positive", NULL))
- pdata->connect = NTC_CONNECTED_POSITIVE;
- else /* status change should be possible if not always on. */
- pdata->connect = NTC_CONNECTED_GROUND;
-
- pdata->chan = chan;
- pdata->read_uv = ntc_adc_iio_read;
-
- return pdata;
-}
-#else
-static struct ntc_thermistor_platform_data *
-ntc_thermistor_parse_dt(struct device *dev)
-{
- return NULL;
-}
-
-#define ntc_match NULL
-
-#endif
-
static inline u64 div64_u64_safe(u64 dividend, u64 divisor)
{
if (divisor == 0 && dividend == 0)
@@ -450,24 +381,23 @@ static inline u64 div64_u64_safe(u64 dividend, u64 divisor)
static int get_ohm_of_thermistor(struct ntc_data *data, unsigned int uv)
{
- struct ntc_thermistor_platform_data *pdata = data->pdata;
- u32 puv = pdata->pullup_uv;
+ u32 puv = data->pullup_uv;
u64 n, puo, pdo;
- puo = pdata->pullup_ohm;
- pdo = pdata->pulldown_ohm;
+ puo = data->pullup_ohm;
+ pdo = data->pulldown_ohm;
if (uv == 0)
- return (pdata->connect == NTC_CONNECTED_POSITIVE) ?
+ return (data->connect == NTC_CONNECTED_POSITIVE) ?
INT_MAX : 0;
if (uv >= puv)
- return (pdata->connect == NTC_CONNECTED_POSITIVE) ?
+ return (data->connect == NTC_CONNECTED_POSITIVE) ?
0 : INT_MAX;
- if (pdata->connect == NTC_CONNECTED_POSITIVE && puo == 0)
+ if (data->connect == NTC_CONNECTED_POSITIVE && puo == 0)
n = div_u64(pdo * (puv - uv), uv);
- else if (pdata->connect == NTC_CONNECTED_GROUND && pdo == 0)
+ else if (data->connect == NTC_CONNECTED_GROUND && pdo == 0)
n = div_u64(puo * uv, puv - uv);
- else if (pdata->connect == NTC_CONNECTED_POSITIVE)
+ else if (data->connect == NTC_CONNECTED_POSITIVE)
n = div64_u64_safe(pdo * puo * (puv - uv),
puo * uv - pdo * (puv - uv));
else
@@ -567,16 +497,10 @@ static int ntc_thermistor_get_ohm(struct ntc_data *data)
{
int read_uv;
- if (data->pdata->read_ohm)
- return data->pdata->read_ohm();
-
- if (data->pdata->read_uv) {
- read_uv = data->pdata->read_uv(data->pdata);
- if (read_uv < 0)
- return read_uv;
- return get_ohm_of_thermistor(data, read_uv);
- }
- return -EINVAL;
+ read_uv = ntc_adc_iio_read(data);
+ if (read_uv < 0)
+ return read_uv;
+ return get_ohm_of_thermistor(data, read_uv);
}
static int ntc_read(struct device *dev, enum hwmon_sensor_types type,
@@ -638,58 +562,74 @@ static const struct hwmon_chip_info ntc_chip_info = {
.info = ntc_info,
};
-static int ntc_thermistor_probe(struct platform_device *pdev)
+static int ntc_thermistor_parse_props(struct device *dev,
+ struct ntc_data *data)
{
- struct device *dev = &pdev->dev;
- const struct of_device_id *of_id =
- of_match_device(of_match_ptr(ntc_match), dev);
- const struct platform_device_id *pdev_id;
- struct ntc_thermistor_platform_data *pdata;
- struct device *hwmon_dev;
- struct ntc_data *data;
+ struct iio_channel *chan;
+ enum iio_chan_type type;
+ int ret;
- pdata = ntc_thermistor_parse_dt(dev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- else if (pdata == NULL)
- pdata = dev_get_platdata(dev);
+ chan = devm_iio_channel_get(dev, NULL);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
- if (!pdata) {
- dev_err(dev, "No platform init data supplied.\n");
- return -ENODEV;
- }
+ ret = iio_get_channel_type(chan, &type);
+ if (ret < 0)
+ return ret;
- /* Either one of the two is required. */
- if (!pdata->read_uv && !pdata->read_ohm) {
- dev_err(dev,
- "Both read_uv and read_ohm missing. Need either one of the two.\n");
+ if (type != IIO_VOLTAGE)
return -EINVAL;
- }
- if (pdata->read_uv && pdata->read_ohm) {
- dev_warn(dev,
- "Only one of read_uv and read_ohm is needed; ignoring read_uv.\n");
- pdata->read_uv = NULL;
- }
+ ret = device_property_read_u32(dev, "pullup-uv", &data->pullup_uv);
+ if (ret)
+ return dev_err_probe(dev, ret, "pullup-uv not specified\n");
- if (pdata->read_uv && (pdata->pullup_uv == 0 ||
- (pdata->pullup_ohm == 0 && pdata->connect ==
- NTC_CONNECTED_GROUND) ||
- (pdata->pulldown_ohm == 0 && pdata->connect ==
- NTC_CONNECTED_POSITIVE) ||
- (pdata->connect != NTC_CONNECTED_POSITIVE &&
- pdata->connect != NTC_CONNECTED_GROUND))) {
- dev_err(dev, "Required data to use read_uv not supplied.\n");
- return -EINVAL;
- }
+ ret = device_property_read_u32(dev, "pullup-ohm", &data->pullup_ohm);
+ if (ret)
+ return dev_err_probe(dev, ret, "pullup-ohm not specified\n");
+
+ ret = device_property_read_u32(dev, "pulldown-ohm", &data->pulldown_ohm);
+ if (ret)
+ return dev_err_probe(dev, ret, "pulldown-ohm not specified\n");
+
+ if (device_property_read_bool(dev, "connected-positive"))
+ data->connect = NTC_CONNECTED_POSITIVE;
+ else /* status change should be possible if not always on. */
+ data->connect = NTC_CONNECTED_GROUND;
+
+ data->chan = chan;
+
+ return 0;
+}
- data = devm_kzalloc(dev, sizeof(struct ntc_data), GFP_KERNEL);
+static int ntc_thermistor_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct platform_device_id *pdev_id;
+ struct device *hwmon_dev;
+ struct ntc_data *data;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- pdev_id = of_id ? of_id->data : platform_get_device_id(pdev);
+ ret = ntc_thermistor_parse_props(dev, data);
+ if (ret)
+ return ret;
+
+ if (data->pullup_uv == 0 ||
+ (data->pullup_ohm == 0 && data->connect ==
+ NTC_CONNECTED_GROUND) ||
+ (data->pulldown_ohm == 0 && data->connect ==
+ NTC_CONNECTED_POSITIVE) ||
+ (data->connect != NTC_CONNECTED_POSITIVE &&
+ data->connect != NTC_CONNECTED_GROUND)) {
+ dev_err(dev, "Required data to use NTC driver not supplied.\n");
+ return -EINVAL;
+ }
- data->pdata = pdata;
+ pdev_id = device_get_match_data(dev);
if (pdev_id->driver_data >= ARRAY_SIZE(ntc_type)) {
dev_err(dev, "Unknown device type: %lu(%s)\n",
@@ -714,10 +654,47 @@ static int ntc_thermistor_probe(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ntc_match[] = {
+ { .compatible = "epcos,b57330v2103",
+ .data = &ntc_thermistor_id[NTC_B57330V2103]},
+ { .compatible = "epcos,b57891s0103",
+ .data = &ntc_thermistor_id[NTC_B57891S0103] },
+ { .compatible = "murata,ncp03wb473",
+ .data = &ntc_thermistor_id[NTC_NCP03WB473] },
+ { .compatible = "murata,ncp03wf104",
+ .data = &ntc_thermistor_id[NTC_NCP03WF104] },
+ { .compatible = "murata,ncp15wb473",
+ .data = &ntc_thermistor_id[NTC_NCP15WB473] },
+ { .compatible = "murata,ncp15wl333",
+ .data = &ntc_thermistor_id[NTC_NCP15WL333] },
+ { .compatible = "murata,ncp15xh103",
+ .data = &ntc_thermistor_id[NTC_NCP15XH103] },
+ { .compatible = "murata,ncp18wb473",
+ .data = &ntc_thermistor_id[NTC_NCP18WB473] },
+ { .compatible = "murata,ncp21wb473",
+ .data = &ntc_thermistor_id[NTC_NCP21WB473] },
+ { .compatible = "samsung,1404-001221",
+ .data = &ntc_thermistor_id[NTC_SSG1404001221] },
+
+ /* Usage of vendor name "ntc" is deprecated */
+ { .compatible = "ntc,ncp03wb473",
+ .data = &ntc_thermistor_id[NTC_NCP03WB473] },
+ { .compatible = "ntc,ncp15wb473",
+ .data = &ntc_thermistor_id[NTC_NCP15WB473] },
+ { .compatible = "ntc,ncp15wl333",
+ .data = &ntc_thermistor_id[NTC_NCP15WL333] },
+ { .compatible = "ntc,ncp18wb473",
+ .data = &ntc_thermistor_id[NTC_NCP18WB473] },
+ { .compatible = "ntc,ncp21wb473",
+ .data = &ntc_thermistor_id[NTC_NCP21WB473] },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ntc_match);
+
static struct platform_driver ntc_thermistor_driver = {
.driver = {
.name = "ntc-thermistor",
- .of_match_table = of_match_ptr(ntc_match),
+ .of_match_table = ntc_match,
},
.probe = ntc_thermistor_probe,
.id_table = ntc_thermistor_id,
diff --git a/drivers/hwmon/nzxt-smart2.c b/drivers/hwmon/nzxt-smart2.c
new file mode 100644
index 000000000000..dd892ff5a3e8
--- /dev/null
+++ b/drivers/hwmon/nzxt-smart2.c
@@ -0,0 +1,829 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Reverse-engineered NZXT RGB & Fan Controller/Smart Device v2 driver.
+ *
+ * Copyright (c) 2021 Aleksandr Mezin
+ */
+
+#include <linux/hid.h>
+#include <linux/hwmon.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+/*
+ * The device has only 3 fan channels/connectors. But all HID reports have
+ * space reserved for up to 8 channels.
+ */
+#define FAN_CHANNELS 3
+#define FAN_CHANNELS_MAX 8
+
+#define UPDATE_INTERVAL_DEFAULT_MS 1000
+
+/* These strings match labels on the device exactly */
+static const char *const fan_label[] = {
+ "FAN 1",
+ "FAN 2",
+ "FAN 3",
+};
+
+static const char *const curr_label[] = {
+ "FAN 1 Current",
+ "FAN 2 Current",
+ "FAN 3 Current",
+};
+
+static const char *const in_label[] = {
+ "FAN 1 Voltage",
+ "FAN 2 Voltage",
+ "FAN 3 Voltage",
+};
+
+enum {
+ INPUT_REPORT_ID_FAN_CONFIG = 0x61,
+ INPUT_REPORT_ID_FAN_STATUS = 0x67,
+};
+
+enum {
+ FAN_STATUS_REPORT_SPEED = 0x02,
+ FAN_STATUS_REPORT_VOLTAGE = 0x04,
+};
+
+enum {
+ FAN_TYPE_NONE = 0,
+ FAN_TYPE_DC = 1,
+ FAN_TYPE_PWM = 2,
+};
+
+struct unknown_static_data {
+ /*
+ * Some configuration data? Stays the same after fan speed changes,
+ * changes in fan configuration, reboots and driver reloads.
+ *
+ * The same data in multiple report types.
+ *
+ * Byte 12 seems to be the number of fan channels, but I am not sure.
+ */
+ u8 unknown1[14];
+} __packed;
+
+/*
+ * The device sends this input report in response to "detect fans" command:
+ * a 2-byte output report { 0x60, 0x03 }.
+ */
+struct fan_config_report {
+ /* report_id should be INPUT_REPORT_ID_FAN_CONFIG = 0x61 */
+ u8 report_id;
+ /* Always 0x03 */
+ u8 magic;
+ struct unknown_static_data unknown_data;
+ /* Fan type as detected by the device. See FAN_TYPE_* enum. */
+ u8 fan_type[FAN_CHANNELS_MAX];
+} __packed;
+
+/*
+ * The device sends these reports at a fixed interval (update interval) -
+ * one report with type = FAN_STATUS_REPORT_SPEED, and one report with type =
+ * FAN_STATUS_REPORT_VOLTAGE per update interval.
+ */
+struct fan_status_report {
+ /* report_id should be INPUT_REPORT_ID_STATUS = 0x67 */
+ u8 report_id;
+ /* FAN_STATUS_REPORT_SPEED = 0x02 or FAN_STATUS_REPORT_VOLTAGE = 0x04 */
+ u8 type;
+ struct unknown_static_data unknown_data;
+ /* Fan type as detected by the device. See FAN_TYPE_* enum. */
+ u8 fan_type[FAN_CHANNELS_MAX];
+
+ union {
+ /* When type == FAN_STATUS_REPORT_SPEED */
+ struct {
+ /*
+ * Fan speed, in RPM. Zero for channels without fans
+ * connected.
+ */
+ __le16 fan_rpm[FAN_CHANNELS_MAX];
+ /*
+ * Fan duty cycle, in percent. Non-zero even for
+ * channels without fans connected.
+ */
+ u8 duty_percent[FAN_CHANNELS_MAX];
+ /*
+ * Exactly the same values as duty_percent[], non-zero
+ * for disconnected fans too.
+ */
+ u8 duty_percent_dup[FAN_CHANNELS_MAX];
+ /* "Case Noise" in db */
+ u8 noise_db;
+ } __packed fan_speed;
+ /* When type == FAN_STATUS_REPORT_VOLTAGE */
+ struct {
+ /*
+ * Voltage, in millivolts. Non-zero even when fan is
+ * not connected.
+ */
+ __le16 fan_in[FAN_CHANNELS_MAX];
+ /*
+ * Current, in milliamperes. Near-zero when
+ * disconnected.
+ */
+ __le16 fan_current[FAN_CHANNELS_MAX];
+ } __packed fan_voltage;
+ } __packed;
+} __packed;
+
+#define OUTPUT_REPORT_SIZE 64
+
+enum {
+ OUTPUT_REPORT_ID_INIT_COMMAND = 0x60,
+ OUTPUT_REPORT_ID_SET_FAN_SPEED = 0x62,
+};
+
+enum {
+ INIT_COMMAND_SET_UPDATE_INTERVAL = 0x02,
+ INIT_COMMAND_DETECT_FANS = 0x03,
+};
+
+/*
+ * This output report sets pwm duty cycle/target fan speed for one or more
+ * channels.
+ */
+struct set_fan_speed_report {
+ /* report_id should be OUTPUT_REPORT_ID_SET_FAN_SPEED = 0x62 */
+ u8 report_id;
+ /* Should be 0x01 */
+ u8 magic;
+ /* To change fan speed on i-th channel, set i-th bit here */
+ u8 channel_bit_mask;
+ /*
+ * Fan duty cycle/target speed in percent. For voltage-controlled fans,
+ * the minimal voltage (duty_percent = 1) is about 9V.
+ * Setting duty_percent to 0 (if the channel is selected in
+ * channel_bit_mask) turns off the fan completely (regardless of the
+ * control mode).
+ */
+ u8 duty_percent[FAN_CHANNELS_MAX];
+} __packed;
+
+struct drvdata {
+ struct hid_device *hid;
+ struct device *hwmon;
+
+ u8 fan_duty_percent[FAN_CHANNELS];
+ u16 fan_rpm[FAN_CHANNELS];
+ bool pwm_status_received;
+
+ u16 fan_in[FAN_CHANNELS];
+ u16 fan_curr[FAN_CHANNELS];
+ bool voltage_status_received;
+
+ u8 fan_type[FAN_CHANNELS];
+ bool fan_config_received;
+
+ /*
+ * wq is used to wait for *_received flags to become true.
+ * All accesses to *_received flags and fan_* arrays are performed with
+ * wq.lock held.
+ */
+ wait_queue_head_t wq;
+ /*
+ * mutex is used to:
+ * 1) Prevent concurrent conflicting changes to update interval and pwm
+ * values (after sending an output hid report, the corresponding field
+ * in drvdata must be updated, and only then new output reports can be
+ * sent).
+ * 2) Synchronize access to output_buffer (well, the buffer is here,
+ * because synchronization is necessary anyway - so why not get rid of
+ * a kmalloc?).
+ */
+ struct mutex mutex;
+ long update_interval;
+ u8 output_buffer[OUTPUT_REPORT_SIZE];
+};
+
+static long scale_pwm_value(long val, long orig_max, long new_max)
+{
+ if (val <= 0)
+ return 0;
+
+ /*
+ * Positive values should not become zero: 0 completely turns off the
+ * fan.
+ */
+ return max(1L, DIV_ROUND_CLOSEST(min(val, orig_max) * new_max, orig_max));
+}
+
+static void handle_fan_config_report(struct drvdata *drvdata, void *data, int size)
+{
+ struct fan_config_report *report = data;
+ int i;
+
+ if (size < sizeof(struct fan_config_report))
+ return;
+
+ if (report->magic != 0x03)
+ return;
+
+ spin_lock(&drvdata->wq.lock);
+
+ for (i = 0; i < FAN_CHANNELS; i++)
+ drvdata->fan_type[i] = report->fan_type[i];
+
+ drvdata->fan_config_received = true;
+ wake_up_all_locked(&drvdata->wq);
+ spin_unlock(&drvdata->wq.lock);
+}
+
+static void handle_fan_status_report(struct drvdata *drvdata, void *data, int size)
+{
+ struct fan_status_report *report = data;
+ int i;
+
+ if (size < sizeof(struct fan_status_report))
+ return;
+
+ spin_lock(&drvdata->wq.lock);
+
+ /*
+ * The device sends INPUT_REPORT_ID_FAN_CONFIG = 0x61 report in response
+ * to "detect fans" command. Only accept other data after getting 0x61,
+ * to make sure that fan detection is complete. In particular, fan
+ * detection resets pwm values.
+ */
+ if (!drvdata->fan_config_received) {
+ spin_unlock(&drvdata->wq.lock);
+ return;
+ }
+
+ for (i = 0; i < FAN_CHANNELS; i++) {
+ if (drvdata->fan_type[i] == report->fan_type[i])
+ continue;
+
+ /*
+ * This should not happen (if my expectations about the device
+ * are correct).
+ *
+ * Even if the userspace sends fan detect command through
+ * hidraw, fan config report should arrive first.
+ */
+ hid_warn_once(drvdata->hid,
+ "Fan %d type changed unexpectedly from %d to %d",
+ i, drvdata->fan_type[i], report->fan_type[i]);
+ drvdata->fan_type[i] = report->fan_type[i];
+ }
+
+ switch (report->type) {
+ case FAN_STATUS_REPORT_SPEED:
+ for (i = 0; i < FAN_CHANNELS; i++) {
+ drvdata->fan_rpm[i] =
+ get_unaligned_le16(&report->fan_speed.fan_rpm[i]);
+ drvdata->fan_duty_percent[i] =
+ report->fan_speed.duty_percent[i];
+ }
+
+ drvdata->pwm_status_received = true;
+ wake_up_all_locked(&drvdata->wq);
+ break;
+
+ case FAN_STATUS_REPORT_VOLTAGE:
+ for (i = 0; i < FAN_CHANNELS; i++) {
+ drvdata->fan_in[i] =
+ get_unaligned_le16(&report->fan_voltage.fan_in[i]);
+ drvdata->fan_curr[i] =
+ get_unaligned_le16(&report->fan_voltage.fan_current[i]);
+ }
+
+ drvdata->voltage_status_received = true;
+ wake_up_all_locked(&drvdata->wq);
+ break;
+ }
+
+ spin_unlock(&drvdata->wq.lock);
+}
+
+static umode_t nzxt_smart2_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ case hwmon_pwm_enable:
+ return 0644;
+
+ default:
+ return 0444;
+ }
+
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return 0644;
+
+ default:
+ return 0444;
+ }
+
+ default:
+ return 0444;
+ }
+}
+
+static int nzxt_smart2_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct drvdata *drvdata = dev_get_drvdata(dev);
+ int res = -EINVAL;
+
+ if (type == hwmon_chip) {
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ *val = drvdata->update_interval;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ spin_lock_irq(&drvdata->wq.lock);
+
+ switch (type) {
+ case hwmon_pwm:
+ /*
+ * fancontrol:
+ * 1) remembers pwm* values when it starts
+ * 2) needs pwm*_enable to be 1 on controlled fans
+ * So make sure we have correct data before allowing pwm* reads.
+ * Returning errors for pwm of fan speed read can even cause
+ * fancontrol to shut down. So the wait is unavoidable.
+ */
+ switch (attr) {
+ case hwmon_pwm_enable:
+ res = wait_event_interruptible_locked_irq(drvdata->wq,
+ drvdata->fan_config_received);
+ if (res)
+ goto unlock;
+
+ *val = drvdata->fan_type[channel] != FAN_TYPE_NONE;
+ break;
+
+ case hwmon_pwm_mode:
+ res = wait_event_interruptible_locked_irq(drvdata->wq,
+ drvdata->fan_config_received);
+ if (res)
+ goto unlock;
+
+ *val = drvdata->fan_type[channel] == FAN_TYPE_PWM;
+ break;
+
+ case hwmon_pwm_input:
+ res = wait_event_interruptible_locked_irq(drvdata->wq,
+ drvdata->pwm_status_received);
+ if (res)
+ goto unlock;
+
+ *val = scale_pwm_value(drvdata->fan_duty_percent[channel],
+ 100, 255);
+ break;
+ }
+ break;
+
+ case hwmon_fan:
+ /*
+ * It's not strictly necessary to wait for *_received in the
+ * remaining cases (fancontrol doesn't care about them). But I'm
+ * doing it to have consistent behavior.
+ */
+ if (attr == hwmon_fan_input) {
+ res = wait_event_interruptible_locked_irq(drvdata->wq,
+ drvdata->pwm_status_received);
+ if (res)
+ goto unlock;
+
+ *val = drvdata->fan_rpm[channel];
+ }
+ break;
+
+ case hwmon_in:
+ if (attr == hwmon_in_input) {
+ res = wait_event_interruptible_locked_irq(drvdata->wq,
+ drvdata->voltage_status_received);
+ if (res)
+ goto unlock;
+
+ *val = drvdata->fan_in[channel];
+ }
+ break;
+
+ case hwmon_curr:
+ if (attr == hwmon_curr_input) {
+ res = wait_event_interruptible_locked_irq(drvdata->wq,
+ drvdata->voltage_status_received);
+ if (res)
+ goto unlock;
+
+ *val = drvdata->fan_curr[channel];
+ }
+ break;
+
+ default:
+ break;
+ }
+
+unlock:
+ spin_unlock_irq(&drvdata->wq.lock);
+ return res;
+}
+
+static int send_output_report(struct drvdata *drvdata, const void *data,
+ size_t data_size)
+{
+ int ret;
+
+ if (data_size > sizeof(drvdata->output_buffer))
+ return -EINVAL;
+
+ memcpy(drvdata->output_buffer, data, data_size);
+
+ if (data_size < sizeof(drvdata->output_buffer))
+ memset(drvdata->output_buffer + data_size, 0,
+ sizeof(drvdata->output_buffer) - data_size);
+
+ ret = hid_hw_output_report(drvdata->hid, drvdata->output_buffer,
+ sizeof(drvdata->output_buffer));
+ return ret < 0 ? ret : 0;
+}
+
+static int set_pwm(struct drvdata *drvdata, int channel, long val)
+{
+ int ret;
+ u8 duty_percent = scale_pwm_value(val, 255, 100);
+
+ struct set_fan_speed_report report = {
+ .report_id = OUTPUT_REPORT_ID_SET_FAN_SPEED,
+ .magic = 1,
+ .channel_bit_mask = 1 << channel
+ };
+
+ ret = mutex_lock_interruptible(&drvdata->mutex);
+ if (ret)
+ return ret;
+
+ report.duty_percent[channel] = duty_percent;
+ ret = send_output_report(drvdata, &report, sizeof(report));
+ if (ret)
+ goto unlock;
+
+ /*
+ * pwmconfig and fancontrol scripts expect pwm writes to take effect
+ * immediately (i. e. read from pwm* sysfs should return the value
+ * written into it). The device seems to always accept pwm values - even
+ * when there is no fan connected - so update pwm status without waiting
+ * for a report, to make pwmconfig and fancontrol happy. Worst case -
+ * if the device didn't accept new pwm value for some reason (never seen
+ * this in practice) - it will be reported incorrectly only until next
+ * update. This avoids "fan stuck" messages from pwmconfig, and
+ * fancontrol setting fan speed to 100% during shutdown.
+ */
+ spin_lock_bh(&drvdata->wq.lock);
+ drvdata->fan_duty_percent[channel] = duty_percent;
+ spin_unlock_bh(&drvdata->wq.lock);
+
+unlock:
+ mutex_unlock(&drvdata->mutex);
+ return ret;
+}
+
+/*
+ * Workaround for fancontrol/pwmconfig trying to write to pwm*_enable even if it
+ * already is 1 and read-only. Otherwise, fancontrol won't restore pwm on
+ * shutdown properly.
+ */
+static int set_pwm_enable(struct drvdata *drvdata, int channel, long val)
+{
+ long expected_val;
+ int res;
+
+ spin_lock_irq(&drvdata->wq.lock);
+
+ res = wait_event_interruptible_locked_irq(drvdata->wq,
+ drvdata->fan_config_received);
+ if (res) {
+ spin_unlock_irq(&drvdata->wq.lock);
+ return res;
+ }
+
+ expected_val = drvdata->fan_type[channel] != FAN_TYPE_NONE;
+
+ spin_unlock_irq(&drvdata->wq.lock);
+
+ return (val == expected_val) ? 0 : -EOPNOTSUPP;
+}
+
+/*
+ * Control byte | Actual update interval in seconds
+ * 0xff | 65.5
+ * 0xf7 | 63.46
+ * 0x7f | 32.74
+ * 0x3f | 16.36
+ * 0x1f | 8.17
+ * 0x0f | 4.07
+ * 0x07 | 2.02
+ * 0x03 | 1.00
+ * 0x02 | 0.744
+ * 0x01 | 0.488
+ * 0x00 | 0.25
+ */
+static u8 update_interval_to_control_byte(long interval)
+{
+ if (interval <= 250)
+ return 0;
+
+ return clamp_val(1 + DIV_ROUND_CLOSEST(interval - 488, 256), 0, 255);
+}
+
+static long control_byte_to_update_interval(u8 control_byte)
+{
+ if (control_byte == 0)
+ return 250;
+
+ return 488 + (control_byte - 1) * 256;
+}
+
+static int set_update_interval(struct drvdata *drvdata, long val)
+{
+ u8 control = update_interval_to_control_byte(val);
+ u8 report[] = {
+ OUTPUT_REPORT_ID_INIT_COMMAND,
+ INIT_COMMAND_SET_UPDATE_INTERVAL,
+ 0x01,
+ 0xe8,
+ control,
+ 0x01,
+ 0xe8,
+ control,
+ };
+ int ret;
+
+ ret = send_output_report(drvdata, report, sizeof(report));
+ if (ret)
+ return ret;
+
+ drvdata->update_interval = control_byte_to_update_interval(control);
+ return 0;
+}
+
+static int init_device(struct drvdata *drvdata, long update_interval)
+{
+ int ret;
+ static const u8 detect_fans_report[] = {
+ OUTPUT_REPORT_ID_INIT_COMMAND,
+ INIT_COMMAND_DETECT_FANS,
+ };
+
+ ret = send_output_report(drvdata, detect_fans_report,
+ sizeof(detect_fans_report));
+ if (ret)
+ return ret;
+
+ return set_update_interval(drvdata, update_interval);
+}
+
+static int nzxt_smart2_hwmon_write(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ struct drvdata *drvdata = dev_get_drvdata(dev);
+ int ret;
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_enable:
+ return set_pwm_enable(drvdata, channel, val);
+
+ case hwmon_pwm_input:
+ return set_pwm(drvdata, channel, val);
+
+ default:
+ return -EINVAL;
+ }
+
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ ret = mutex_lock_interruptible(&drvdata->mutex);
+ if (ret)
+ return ret;
+
+ ret = set_update_interval(drvdata, val);
+
+ mutex_unlock(&drvdata->mutex);
+ return ret;
+
+ default:
+ return -EINVAL;
+ }
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int nzxt_smart2_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_fan:
+ *str = fan_label[channel];
+ return 0;
+ case hwmon_curr:
+ *str = curr_label[channel];
+ return 0;
+ case hwmon_in:
+ *str = in_label[channel];
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct hwmon_ops nzxt_smart2_hwmon_ops = {
+ .is_visible = nzxt_smart2_hwmon_is_visible,
+ .read = nzxt_smart2_hwmon_read,
+ .read_string = nzxt_smart2_hwmon_read_string,
+ .write = nzxt_smart2_hwmon_write,
+};
+
+static const struct hwmon_channel_info *nzxt_smart2_channel_info[] = {
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL),
+ HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT | HWMON_PWM_MODE | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_MODE | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_MODE | HWMON_PWM_ENABLE),
+ HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(curr, HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL),
+ HWMON_CHANNEL_INFO(chip, HWMON_C_UPDATE_INTERVAL),
+ NULL
+};
+
+static const struct hwmon_chip_info nzxt_smart2_chip_info = {
+ .ops = &nzxt_smart2_hwmon_ops,
+ .info = nzxt_smart2_channel_info,
+};
+
+static int nzxt_smart2_hid_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ struct drvdata *drvdata = hid_get_drvdata(hdev);
+ u8 report_id = *data;
+
+ switch (report_id) {
+ case INPUT_REPORT_ID_FAN_CONFIG:
+ handle_fan_config_report(drvdata, data, size);
+ break;
+
+ case INPUT_REPORT_ID_FAN_STATUS:
+ handle_fan_status_report(drvdata, data, size);
+ break;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused nzxt_smart2_hid_reset_resume(struct hid_device *hdev)
+{
+ struct drvdata *drvdata = hid_get_drvdata(hdev);
+
+ /*
+ * Userspace is still frozen (so no concurrent sysfs attribute access
+ * is possible), but raw_event can already be called concurrently.
+ */
+ spin_lock_bh(&drvdata->wq.lock);
+ drvdata->fan_config_received = false;
+ drvdata->pwm_status_received = false;
+ drvdata->voltage_status_received = false;
+ spin_unlock_bh(&drvdata->wq.lock);
+
+ return init_device(drvdata, drvdata->update_interval);
+}
+
+static int nzxt_smart2_hid_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct drvdata *drvdata;
+ int ret;
+
+ drvdata = devm_kzalloc(&hdev->dev, sizeof(struct drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->hid = hdev;
+ hid_set_drvdata(hdev, drvdata);
+
+ init_waitqueue_head(&drvdata->wq);
+
+ mutex_init(&drvdata->mutex);
+ devm_add_action(&hdev->dev, (void (*)(void *))mutex_destroy,
+ &drvdata->mutex);
+
+ ret = hid_parse(hdev);
+ if (ret)
+ return ret;
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret)
+ return ret;
+
+ ret = hid_hw_open(hdev);
+ if (ret)
+ goto out_hw_stop;
+
+ hid_device_io_start(hdev);
+
+ init_device(drvdata, UPDATE_INTERVAL_DEFAULT_MS);
+
+ drvdata->hwmon =
+ hwmon_device_register_with_info(&hdev->dev, "nzxtsmart2", drvdata,
+ &nzxt_smart2_chip_info, NULL);
+ if (IS_ERR(drvdata->hwmon)) {
+ ret = PTR_ERR(drvdata->hwmon);
+ goto out_hw_close;
+ }
+
+ return 0;
+
+out_hw_close:
+ hid_hw_close(hdev);
+
+out_hw_stop:
+ hid_hw_stop(hdev);
+ return ret;
+}
+
+static void nzxt_smart2_hid_remove(struct hid_device *hdev)
+{
+ struct drvdata *drvdata = hid_get_drvdata(hdev);
+
+ hwmon_device_unregister(drvdata->hwmon);
+
+ hid_hw_close(hdev);
+ hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id nzxt_smart2_hid_id_table[] = {
+ { HID_USB_DEVICE(0x1e71, 0x2006) }, /* NZXT Smart Device V2 */
+ { HID_USB_DEVICE(0x1e71, 0x200d) }, /* NZXT Smart Device V2 */
+ { HID_USB_DEVICE(0x1e71, 0x2009) }, /* NZXT RGB & Fan Controller */
+ { HID_USB_DEVICE(0x1e71, 0x200e) }, /* NZXT RGB & Fan Controller */
+ { HID_USB_DEVICE(0x1e71, 0x2010) }, /* NZXT RGB & Fan Controller */
+ {},
+};
+
+static struct hid_driver nzxt_smart2_hid_driver = {
+ .name = "nzxt-smart2",
+ .id_table = nzxt_smart2_hid_id_table,
+ .probe = nzxt_smart2_hid_probe,
+ .remove = nzxt_smart2_hid_remove,
+ .raw_event = nzxt_smart2_hid_raw_event,
+#ifdef CONFIG_PM
+ .reset_resume = nzxt_smart2_hid_reset_resume,
+#endif
+};
+
+static int __init nzxt_smart2_init(void)
+{
+ return hid_register_driver(&nzxt_smart2_hid_driver);
+}
+
+static void __exit nzxt_smart2_exit(void)
+{
+ hid_unregister_driver(&nzxt_smart2_hid_driver);
+}
+
+MODULE_DEVICE_TABLE(hid, nzxt_smart2_hid_id_table);
+MODULE_AUTHOR("Aleksandr Mezin <mezin.alexander@gmail.com>");
+MODULE_DESCRIPTION("Driver for NZXT RGB & Fan Controller/Smart Device V2");
+MODULE_LICENSE("GPL");
+
+/*
+ * With module_init()/module_hid_driver() and the driver built into the kernel:
+ *
+ * Driver 'nzxt_smart2' was unable to register with bus_type 'hid' because the
+ * bus was not initialized.
+ */
+late_initcall(nzxt_smart2_init);
+module_exit(nzxt_smart2_exit);
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index ffb609cee3a4..41f6cbf96d3b 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -66,6 +66,16 @@ config SENSORS_BPA_RS600
This driver can also be built as a module. If so, the module will
be called bpa-rs600.
+config SENSORS_DELTA_AHE50DC_FAN
+ tristate "Delta AHE-50DC fan control module"
+ help
+ If you say yes here you get hardware monitoring support for
+ the integrated fan control module of the Delta AHE-50DC
+ Open19 power shelf.
+
+ This driver can also be built as a module. If so, the module
+ will be called delta-ahe50dc-fan.
+
config SENSORS_FSP_3Y
tristate "FSP/3Y-Power power supplies"
help
@@ -123,14 +133,20 @@ config SENSORS_IR36021
be called ir36021.
config SENSORS_IR38064
- tristate "Infineon IR38064"
+ tristate "Infineon IR38064 and compatibles"
help
If you say yes here you get hardware monitoring support for Infineon
- IR38064.
+ IR38060, IR38064, IR38164 and IR38263.
This driver can also be built as a module. If so, the module will
be called ir38064.
+config SENSORS_IR38064_REGULATOR
+ bool "Regulator support for IR38064 and compatibles"
+ depends on SENSORS_IR38064 && REGULATOR
+ help
+ Uses the IR38064 or compatible as regulator.
+
config SENSORS_IRPS5401
tristate "Infineon IRPS5401"
help
@@ -276,6 +292,15 @@ config SENSORS_MP2975
This driver can also be built as a module. If so, the module will
be called mp2975.
+config SENSORS_MP5023
+ tristate "MPS MP5023"
+ help
+ If you say yes here you get hardware monitoring support for MPS
+ MP5023.
+
+ This driver can also be built as a module. If so, the module will
+ be called mp5023.
+
config SENSORS_PIM4328
tristate "Flex PIM4328 and compatibles"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 0ed4d596a948..e5935f70c9e0 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_SENSORS_ADM1266) += adm1266.o
obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
obj-$(CONFIG_SENSORS_BEL_PFE) += bel-pfe.o
obj-$(CONFIG_SENSORS_BPA_RS600) += bpa-rs600.o
+obj-$(CONFIG_SENSORS_DELTA_AHE50DC_FAN) += delta-ahe50dc-fan.o
obj-$(CONFIG_SENSORS_FSP_3Y) += fsp-3y.o
obj-$(CONFIG_SENSORS_IBM_CFFPS) += ibm-cffps.o
obj-$(CONFIG_SENSORS_DPS920AB) += dps920ab.o
@@ -31,6 +32,7 @@ obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
obj-$(CONFIG_SENSORS_MP2888) += mp2888.o
obj-$(CONFIG_SENSORS_MP2975) += mp2975.o
+obj-$(CONFIG_SENSORS_MP5023) += mp5023.o
obj-$(CONFIG_SENSORS_PM6764TR) += pm6764tr.o
obj-$(CONFIG_SENSORS_PXE1610) += pxe1610.o
obj-$(CONFIG_SENSORS_Q54SJ108A2) += q54sj108a2.o
diff --git a/drivers/hwmon/pmbus/delta-ahe50dc-fan.c b/drivers/hwmon/pmbus/delta-ahe50dc-fan.c
new file mode 100644
index 000000000000..40dffd9c4cbf
--- /dev/null
+++ b/drivers/hwmon/pmbus/delta-ahe50dc-fan.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Delta AHE-50DC power shelf fan control module driver
+ *
+ * Copyright 2021 Zev Weiss <zev@bewilderbeest.net>
+ */
+
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pmbus.h>
+
+#include "pmbus.h"
+
+#define AHE50DC_PMBUS_READ_TEMP4 0xd0
+
+static int ahe50dc_fan_read_word_data(struct i2c_client *client, int page, int phase, int reg)
+{
+ /* temp1 in (virtual) page 1 is remapped to mfr-specific temp4 */
+ if (page == 1) {
+ if (reg == PMBUS_READ_TEMPERATURE_1)
+ return i2c_smbus_read_word_data(client, AHE50DC_PMBUS_READ_TEMP4);
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * There's a fairly limited set of commands this device actually
+ * supports, so here we block attempts to read anything else (which
+ * return 0xffff and would cause confusion elsewhere).
+ */
+ switch (reg) {
+ case PMBUS_STATUS_WORD:
+ case PMBUS_FAN_COMMAND_1:
+ case PMBUS_FAN_COMMAND_2:
+ case PMBUS_FAN_COMMAND_3:
+ case PMBUS_FAN_COMMAND_4:
+ case PMBUS_STATUS_FAN_12:
+ case PMBUS_STATUS_FAN_34:
+ case PMBUS_READ_VIN:
+ case PMBUS_READ_TEMPERATURE_1:
+ case PMBUS_READ_TEMPERATURE_2:
+ case PMBUS_READ_TEMPERATURE_3:
+ case PMBUS_READ_FAN_SPEED_1:
+ case PMBUS_READ_FAN_SPEED_2:
+ case PMBUS_READ_FAN_SPEED_3:
+ case PMBUS_READ_FAN_SPEED_4:
+ return -ENODATA;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct pmbus_driver_info ahe50dc_fan_info = {
+ .pages = 2,
+ .format[PSC_FAN] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .m[PSC_FAN] = 1,
+ .b[PSC_FAN] = 0,
+ .R[PSC_FAN] = 0,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 1,
+ .m[PSC_VOLTAGE_IN] = 1,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 3,
+ .func[0] = PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
+ PMBUS_HAVE_VIN | PMBUS_HAVE_FAN12 | PMBUS_HAVE_FAN34 |
+ PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_STATUS_FAN34 | PMBUS_PAGE_VIRTUAL,
+ .func[1] = PMBUS_HAVE_TEMP | PMBUS_PAGE_VIRTUAL,
+ .read_word_data = ahe50dc_fan_read_word_data,
+};
+
+/*
+ * CAPABILITY returns 0xff, which appears to be this device's way indicating
+ * it doesn't support something (and if we enable I2C_CLIENT_PEC on seeing bit
+ * 7 being set it generates bad PECs, so let's not go there).
+ */
+static struct pmbus_platform_data ahe50dc_fan_data = {
+ .flags = PMBUS_NO_CAPABILITY,
+};
+
+static int ahe50dc_fan_probe(struct i2c_client *client)
+{
+ client->dev.platform_data = &ahe50dc_fan_data;
+ return pmbus_do_probe(client, &ahe50dc_fan_info);
+}
+
+static const struct i2c_device_id ahe50dc_fan_id[] = {
+ { "ahe50dc_fan" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ahe50dc_fan_id);
+
+static const struct of_device_id __maybe_unused ahe50dc_fan_of_match[] = {
+ { .compatible = "delta,ahe50dc-fan" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ahe50dc_fan_of_match);
+
+static struct i2c_driver ahe50dc_fan_driver = {
+ .driver = {
+ .name = "ahe50dc_fan",
+ .of_match_table = of_match_ptr(ahe50dc_fan_of_match),
+ },
+ .probe_new = ahe50dc_fan_probe,
+ .id_table = ahe50dc_fan_id,
+};
+module_i2c_driver(ahe50dc_fan_driver);
+
+MODULE_AUTHOR("Zev Weiss <zev@bewilderbeest.net>");
+MODULE_DESCRIPTION("Driver for Delta AHE-50DC power shelf fan control module");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/pmbus/ir38064.c b/drivers/hwmon/pmbus/ir38064.c
index 1fb7f1248639..09276e397194 100644
--- a/drivers/hwmon/pmbus/ir38064.c
+++ b/drivers/hwmon/pmbus/ir38064.c
@@ -16,8 +16,16 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/driver.h>
#include "pmbus.h"
+#if IS_ENABLED(CONFIG_SENSORS_IR38064_REGULATOR)
+static const struct regulator_desc ir38064_reg_desc[] = {
+ PMBUS_REGULATOR("vout", 0),
+};
+#endif /* CONFIG_SENSORS_IR38064_REGULATOR */
+
static struct pmbus_driver_info ir38064_info = {
.pages = 1,
.format[PSC_VOLTAGE_IN] = linear,
@@ -33,6 +41,10 @@ static struct pmbus_driver_info ir38064_info = {
| PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
| PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
| PMBUS_HAVE_POUT,
+#if IS_ENABLED(CONFIG_SENSORS_IR38064_REGULATOR)
+ .num_regulators = 1,
+ .reg_desc = ir38064_reg_desc,
+#endif
};
static int ir38064_probe(struct i2c_client *client)
@@ -41,16 +53,30 @@ static int ir38064_probe(struct i2c_client *client)
}
static const struct i2c_device_id ir38064_id[] = {
+ {"ir38060", 0},
{"ir38064", 0},
+ {"ir38164", 0},
+ {"ir38263", 0},
{}
};
MODULE_DEVICE_TABLE(i2c, ir38064_id);
+static const struct of_device_id __maybe_unused ir38064_of_match[] = {
+ { .compatible = "infineon,ir38060" },
+ { .compatible = "infineon,ir38064" },
+ { .compatible = "infineon,ir38164" },
+ { .compatible = "infineon,ir38263" },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, ir38064_of_match);
+
/* This is the driver that will be inserted */
static struct i2c_driver ir38064_driver = {
.driver = {
.name = "ir38064",
+ .of_match_table = of_match_ptr(ir38064_of_match),
},
.probe_new = ir38064_probe,
.id_table = ir38064_id,
@@ -59,6 +85,6 @@ static struct i2c_driver ir38064_driver = {
module_i2c_driver(ir38064_driver);
MODULE_AUTHOR("Maxim Sloyko <maxims@google.com>");
-MODULE_DESCRIPTION("PMBus driver for Infineon IR38064");
+MODULE_DESCRIPTION("PMBus driver for Infineon IR38064 and compatible chips");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/pmbus/mp5023.c b/drivers/hwmon/pmbus/mp5023.c
new file mode 100644
index 000000000000..791a06c3c54a
--- /dev/null
+++ b/drivers/hwmon/pmbus/mp5023.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for MPS MP5023 Hot-Swap Controller
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include "pmbus.h"
+
+static struct pmbus_driver_info mp5023_info = {
+ .pages = 1,
+
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_POWER] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+
+ .m[PSC_VOLTAGE_IN] = 32,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 0,
+ .m[PSC_VOLTAGE_OUT] = 32,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 0,
+ .m[PSC_CURRENT_OUT] = 16,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 0,
+ .m[PSC_POWER] = 1,
+ .b[PSC_POWER] = 0,
+ .R[PSC_POWER] = 0,
+ .m[PSC_TEMPERATURE] = 2,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 0,
+
+ .func[0] =
+ PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_PIN |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP,
+};
+
+static int mp5023_probe(struct i2c_client *client)
+{
+ return pmbus_do_probe(client, &mp5023_info);
+}
+
+static const struct of_device_id __maybe_unused mp5023_of_match[] = {
+ { .compatible = "mps,mp5023", },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, mp5023_of_match);
+
+static struct i2c_driver mp5023_driver = {
+ .driver = {
+ .name = "mp5023",
+ .of_match_table = of_match_ptr(mp5023_of_match),
+ },
+ .probe_new = mp5023_probe,
+};
+
+module_i2c_driver(mp5023_driver);
+
+MODULE_AUTHOR("Howard Chiu <howard.chiu@quantatw.com>");
+MODULE_DESCRIPTION("PMBus driver for MPS MP5023 HSC");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c
index 573f53d52912..1650d3b4c26e 100644
--- a/drivers/hwmon/raspberrypi-hwmon.c
+++ b/drivers/hwmon/raspberrypi-hwmon.c
@@ -120,6 +120,8 @@ static int rpi_hwmon_probe(struct platform_device *pdev)
data,
&rpi_chip_info,
NULL);
+ if (IS_ERR(data->hwmon_dev))
+ return PTR_ERR(data->hwmon_dev);
ret = devm_delayed_work_autocancel(dev, &data->get_values_poll_work,
get_values_poll);
@@ -127,10 +129,9 @@ static int rpi_hwmon_probe(struct platform_device *pdev)
return ret;
platform_set_drvdata(pdev, data);
- if (!PTR_ERR_OR_ZERO(data->hwmon_dev))
- schedule_delayed_work(&data->get_values_poll_work, 2 * HZ);
+ schedule_delayed_work(&data->get_values_poll_work, 2 * HZ);
- return PTR_ERR_OR_ZERO(data->hwmon_dev);
+ return 0;
}
static struct platform_driver rpi_hwmon_driver = {
diff --git a/drivers/hwmon/sht4x.c b/drivers/hwmon/sht4x.c
index 3415d7a0e0fc..c19df3ade48e 100644
--- a/drivers/hwmon/sht4x.c
+++ b/drivers/hwmon/sht4x.c
@@ -281,9 +281,16 @@ static const struct i2c_device_id sht4x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, sht4x_id);
+static const struct of_device_id sht4x_of_match[] = {
+ { .compatible = "sensirion,sht4x" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sht4x_of_match);
+
static struct i2c_driver sht4x_driver = {
.driver = {
.name = "sht4x",
+ .of_match_table = sht4x_of_match,
},
.probe = sht4x_probe,
.id_table = sht4x_id,
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index b31f4964f852..b86d9df7105d 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -18,17 +18,15 @@
* and thus has 16 bits registers for its value and limit instead of 8 bits.
*/
-#include <linux/module.h>
-#include <linux/init.h>
#include <linux/bitops.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
+#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/sysfs.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c, 0x4d,
@@ -41,44 +39,19 @@ enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 };
* reading and writing
*/
#define TMP401_STATUS 0x02
-#define TMP401_CONFIG_READ 0x03
-#define TMP401_CONFIG_WRITE 0x09
-#define TMP401_CONVERSION_RATE_READ 0x04
-#define TMP401_CONVERSION_RATE_WRITE 0x0A
+#define TMP401_CONFIG 0x03
+#define TMP401_CONVERSION_RATE 0x04
#define TMP401_TEMP_CRIT_HYST 0x21
#define TMP401_MANUFACTURER_ID_REG 0xFE
#define TMP401_DEVICE_ID_REG 0xFF
-static const u8 TMP401_TEMP_MSB_READ[7][2] = {
- { 0x00, 0x01 }, /* temp */
- { 0x06, 0x08 }, /* low limit */
- { 0x05, 0x07 }, /* high limit */
- { 0x20, 0x19 }, /* therm (crit) limit */
- { 0x30, 0x34 }, /* lowest */
- { 0x32, 0x36 }, /* highest */
-};
-
-static const u8 TMP401_TEMP_MSB_WRITE[7][2] = {
- { 0, 0 }, /* temp (unused) */
- { 0x0C, 0x0E }, /* low limit */
- { 0x0B, 0x0D }, /* high limit */
- { 0x20, 0x19 }, /* therm (crit) limit */
- { 0x30, 0x34 }, /* lowest */
- { 0x32, 0x36 }, /* highest */
-};
-
-static const u8 TMP432_TEMP_MSB_READ[4][3] = {
+static const u8 TMP401_TEMP_MSB[7][3] = {
{ 0x00, 0x01, 0x23 }, /* temp */
{ 0x06, 0x08, 0x16 }, /* low limit */
{ 0x05, 0x07, 0x15 }, /* high limit */
- { 0x20, 0x19, 0x1A }, /* therm (crit) limit */
-};
-
-static const u8 TMP432_TEMP_MSB_WRITE[4][3] = {
- { 0, 0, 0 }, /* temp - unused */
- { 0x0C, 0x0E, 0x16 }, /* low limit */
- { 0x0B, 0x0D, 0x15 }, /* high limit */
- { 0x20, 0x19, 0x1A }, /* therm (crit) limit */
+ { 0x20, 0x19, 0x1a }, /* therm (crit) limit */
+ { 0x30, 0x34, 0x00 }, /* lowest */
+ { 0x32, 0xf6, 0x00 }, /* highest */
};
/* [0] = fault, [1] = low, [2] = high, [3] = therm/crit */
@@ -131,311 +104,323 @@ MODULE_DEVICE_TABLE(i2c, tmp401_id);
struct tmp401_data {
struct i2c_client *client;
- const struct attribute_group *groups[3];
+ struct regmap *regmap;
struct mutex update_lock;
- bool valid; /* false until following fields are valid */
- unsigned long last_updated; /* in jiffies */
enum chips kind;
- unsigned int update_interval; /* in milliseconds */
+ bool extended_range;
- /* register values */
- u8 status[4];
- u8 config;
- u16 temp[7][3];
- u8 temp_crit_hyst;
+ /* hwmon API configuration data */
+ u32 chip_channel_config[4];
+ struct hwmon_channel_info chip_info;
+ u32 temp_channel_config[4];
+ struct hwmon_channel_info temp_info;
+ const struct hwmon_channel_info *info[3];
+ struct hwmon_chip_info chip;
};
-/*
- * Sysfs attr show / store functions
- */
-
-static int tmp401_register_to_temp(u16 reg, u8 config)
-{
- int temp = reg;
-
- if (config & TMP401_CONFIG_RANGE)
- temp -= 64 * 256;
-
- return DIV_ROUND_CLOSEST(temp * 125, 32);
-}
+/* regmap */
-static u16 tmp401_temp_to_register(long temp, u8 config, int zbits)
+static bool tmp401_regmap_is_volatile(struct device *dev, unsigned int reg)
{
- if (config & TMP401_CONFIG_RANGE) {
- temp = clamp_val(temp, -64000, 191000);
- temp += 64000;
- } else
- temp = clamp_val(temp, 0, 127000);
-
- return DIV_ROUND_CLOSEST(temp * (1 << (8 - zbits)), 1000) << zbits;
-}
-
-static int tmp401_update_device_reg16(struct i2c_client *client,
- struct tmp401_data *data)
-{
- int i, j, val;
- int num_regs = data->kind == tmp411 ? 6 : 4;
- int num_sensors = data->kind == tmp432 ? 3 : 2;
-
- for (i = 0; i < num_sensors; i++) { /* local / r1 / r2 */
- for (j = 0; j < num_regs; j++) { /* temp / low / ... */
- u8 regaddr;
-
- regaddr = data->kind == tmp432 ?
- TMP432_TEMP_MSB_READ[j][i] :
- TMP401_TEMP_MSB_READ[j][i];
- if (j == 3) { /* crit is msb only */
- val = i2c_smbus_read_byte_data(client, regaddr);
- } else {
- val = i2c_smbus_read_word_swapped(client,
- regaddr);
- }
- if (val < 0)
- return val;
-
- data->temp[j][i] = j == 3 ? val << 8 : val;
- }
+ switch (reg) {
+ case 0: /* local temp msb */
+ case 1: /* remote temp msb */
+ case 2: /* status */
+ case 0x10: /* remote temp lsb */
+ case 0x15: /* local temp lsb */
+ case 0x1b: /* status (tmp432) */
+ case 0x23 ... 0x24: /* remote temp 2 msb / lsb */
+ case 0x30 ... 0x37: /* lowest/highest temp; status (tmp432) */
+ return true;
+ default:
+ return false;
}
- return 0;
}
-static struct tmp401_data *tmp401_update_device(struct device *dev)
+static int tmp401_reg_read(void *context, unsigned int reg, unsigned int *val)
{
- struct tmp401_data *data = dev_get_drvdata(dev);
+ struct tmp401_data *data = context;
struct i2c_client *client = data->client;
- struct tmp401_data *ret = data;
- int i, val;
- unsigned long next_update;
-
- mutex_lock(&data->update_lock);
+ int regval;
- next_update = data->last_updated +
- msecs_to_jiffies(data->update_interval);
- if (time_after(jiffies, next_update) || !data->valid) {
- if (data->kind != tmp432) {
- /*
- * The driver uses the TMP432 status format internally.
- * Convert status to TMP432 format for other chips.
- */
- val = i2c_smbus_read_byte_data(client, TMP401_STATUS);
- if (val < 0) {
- ret = ERR_PTR(val);
- goto abort;
- }
- data->status[0] =
- (val & TMP401_STATUS_REMOTE_OPEN) >> 1;
- data->status[1] =
- ((val & TMP401_STATUS_REMOTE_LOW) >> 2) |
- ((val & TMP401_STATUS_LOCAL_LOW) >> 5);
- data->status[2] =
- ((val & TMP401_STATUS_REMOTE_HIGH) >> 3) |
- ((val & TMP401_STATUS_LOCAL_HIGH) >> 6);
- data->status[3] = val & (TMP401_STATUS_LOCAL_CRIT
- | TMP401_STATUS_REMOTE_CRIT);
- } else {
- for (i = 0; i < ARRAY_SIZE(data->status); i++) {
- val = i2c_smbus_read_byte_data(client,
- TMP432_STATUS_REG[i]);
- if (val < 0) {
- ret = ERR_PTR(val);
- goto abort;
- }
- data->status[i] = val;
- }
- }
-
- val = i2c_smbus_read_byte_data(client, TMP401_CONFIG_READ);
- if (val < 0) {
- ret = ERR_PTR(val);
- goto abort;
- }
- data->config = val;
- val = tmp401_update_device_reg16(client, data);
- if (val < 0) {
- ret = ERR_PTR(val);
- goto abort;
+ switch (reg) {
+ case 0: /* local temp msb */
+ case 1: /* remote temp msb */
+ case 5: /* local temp high limit msb */
+ case 6: /* local temp low limit msb */
+ case 7: /* remote temp ligh limit msb */
+ case 8: /* remote temp low limit msb */
+ case 0x15: /* remote temp 2 high limit msb */
+ case 0x16: /* remote temp 2 low limit msb */
+ case 0x23: /* remote temp 2 msb */
+ case 0x30: /* local temp minimum, tmp411 */
+ case 0x32: /* local temp maximum, tmp411 */
+ case 0x34: /* remote temp minimum, tmp411 */
+ case 0xf6: /* remote temp maximum, tmp411 (really 0x36) */
+ /* work around register overlap between TMP411 and TMP432 */
+ if (reg == 0xf6)
+ reg = 0x36;
+ regval = i2c_smbus_read_word_swapped(client, reg);
+ if (regval < 0)
+ return regval;
+ *val = regval;
+ break;
+ case 0x19: /* critical limits, 8-bit registers */
+ case 0x1a:
+ case 0x20:
+ regval = i2c_smbus_read_byte_data(client, reg);
+ if (regval < 0)
+ return regval;
+ *val = regval << 8;
+ break;
+ case 0x1b:
+ case 0x35 ... 0x37:
+ if (data->kind == tmp432) {
+ regval = i2c_smbus_read_byte_data(client, reg);
+ if (regval < 0)
+ return regval;
+ *val = regval;
+ break;
}
- val = i2c_smbus_read_byte_data(client, TMP401_TEMP_CRIT_HYST);
- if (val < 0) {
- ret = ERR_PTR(val);
- goto abort;
+ /* simulate TMP432 status registers */
+ regval = i2c_smbus_read_byte_data(client, TMP401_STATUS);
+ if (regval < 0)
+ return regval;
+ *val = 0;
+ switch (reg) {
+ case 0x1b: /* open / fault */
+ if (regval & TMP401_STATUS_REMOTE_OPEN)
+ *val |= BIT(1);
+ break;
+ case 0x35: /* high limit */
+ if (regval & TMP401_STATUS_LOCAL_HIGH)
+ *val |= BIT(0);
+ if (regval & TMP401_STATUS_REMOTE_HIGH)
+ *val |= BIT(1);
+ break;
+ case 0x36: /* low limit */
+ if (regval & TMP401_STATUS_LOCAL_LOW)
+ *val |= BIT(0);
+ if (regval & TMP401_STATUS_REMOTE_LOW)
+ *val |= BIT(1);
+ break;
+ case 0x37: /* therm / crit limit */
+ if (regval & TMP401_STATUS_LOCAL_CRIT)
+ *val |= BIT(0);
+ if (regval & TMP401_STATUS_REMOTE_CRIT)
+ *val |= BIT(1);
+ break;
}
- data->temp_crit_hyst = val;
-
- data->last_updated = jiffies;
- data->valid = true;
+ break;
+ default:
+ regval = i2c_smbus_read_byte_data(client, reg);
+ if (regval < 0)
+ return regval;
+ *val = regval;
+ break;
}
-
-abort:
- mutex_unlock(&data->update_lock);
- return ret;
+ return 0;
}
-static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
+static int tmp401_reg_write(void *context, unsigned int reg, unsigned int val)
{
- int nr = to_sensor_dev_attr_2(devattr)->nr;
- int index = to_sensor_dev_attr_2(devattr)->index;
- struct tmp401_data *data = tmp401_update_device(dev);
-
- if (IS_ERR(data))
- return PTR_ERR(data);
+ struct tmp401_data *data = context;
+ struct i2c_client *client = data->client;
- return sprintf(buf, "%d\n",
- tmp401_register_to_temp(data->temp[nr][index], data->config));
+ switch (reg) {
+ case 0x05: /* local temp high limit msb */
+ case 0x06: /* local temp low limit msb */
+ case 0x07: /* remote temp ligh limit msb */
+ case 0x08: /* remote temp low limit msb */
+ reg += 6; /* adjust for register write address */
+ fallthrough;
+ case 0x15: /* remote temp 2 high limit msb */
+ case 0x16: /* remote temp 2 low limit msb */
+ return i2c_smbus_write_word_swapped(client, reg, val);
+ case 0x19: /* critical limits, 8-bit registers */
+ case 0x1a:
+ case 0x20:
+ return i2c_smbus_write_byte_data(client, reg, val >> 8);
+ case TMP401_CONVERSION_RATE:
+ case TMP401_CONFIG:
+ reg += 6; /* adjust for register write address */
+ fallthrough;
+ default:
+ return i2c_smbus_write_byte_data(client, reg, val);
+ }
}
-static ssize_t temp_crit_hyst_show(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- int temp, index = to_sensor_dev_attr(devattr)->index;
- struct tmp401_data *data = tmp401_update_device(dev);
-
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- mutex_lock(&data->update_lock);
- temp = tmp401_register_to_temp(data->temp[3][index], data->config);
- temp -= data->temp_crit_hyst * 1000;
- mutex_unlock(&data->update_lock);
+static const struct regmap_config tmp401_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = tmp401_regmap_is_volatile,
+ .reg_read = tmp401_reg_read,
+ .reg_write = tmp401_reg_write,
+};
- return sprintf(buf, "%d\n", temp);
-}
+/* temperature conversion */
-static ssize_t status_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int tmp401_register_to_temp(u16 reg, bool extended)
{
- int nr = to_sensor_dev_attr_2(devattr)->nr;
- int mask = to_sensor_dev_attr_2(devattr)->index;
- struct tmp401_data *data = tmp401_update_device(dev);
+ int temp = reg;
- if (IS_ERR(data))
- return PTR_ERR(data);
+ if (extended)
+ temp -= 64 * 256;
- return sprintf(buf, "%d\n", !!(data->status[nr] & mask));
+ return DIV_ROUND_CLOSEST(temp * 125, 32);
}
-static ssize_t temp_store(struct device *dev,
- struct device_attribute *devattr, const char *buf,
- size_t count)
+static u16 tmp401_temp_to_register(long temp, bool extended, int zbits)
{
- int nr = to_sensor_dev_attr_2(devattr)->nr;
- int index = to_sensor_dev_attr_2(devattr)->index;
- struct tmp401_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- long val;
- u16 reg;
- u8 regaddr;
-
- if (kstrtol(buf, 10, &val))
- return -EINVAL;
-
- reg = tmp401_temp_to_register(val, data->config, nr == 3 ? 8 : 4);
-
- mutex_lock(&data->update_lock);
-
- regaddr = data->kind == tmp432 ? TMP432_TEMP_MSB_WRITE[nr][index]
- : TMP401_TEMP_MSB_WRITE[nr][index];
- if (nr == 3) { /* crit is msb only */
- i2c_smbus_write_byte_data(client, regaddr, reg >> 8);
+ if (extended) {
+ temp = clamp_val(temp, -64000, 191000);
+ temp += 64000;
} else {
- /* Hardware expects big endian data --> use _swapped */
- i2c_smbus_write_word_swapped(client, regaddr, reg);
+ temp = clamp_val(temp, 0, 127000);
}
- data->temp[nr][index] = reg;
-
- mutex_unlock(&data->update_lock);
- return count;
+ return DIV_ROUND_CLOSEST(temp * (1 << (8 - zbits)), 1000) << zbits;
}
-static ssize_t temp_crit_hyst_store(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- int temp, index = to_sensor_dev_attr(devattr)->index;
- struct tmp401_data *data = tmp401_update_device(dev);
- long val;
- u8 reg;
-
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- if (kstrtol(buf, 10, &val))
- return -EINVAL;
-
- if (data->config & TMP401_CONFIG_RANGE)
- val = clamp_val(val, -64000, 191000);
- else
- val = clamp_val(val, 0, 127000);
-
- mutex_lock(&data->update_lock);
- temp = tmp401_register_to_temp(data->temp[3][index], data->config);
- val = clamp_val(val, temp - 255000, temp);
- reg = ((temp - val) + 500) / 1000;
-
- i2c_smbus_write_byte_data(data->client, TMP401_TEMP_CRIT_HYST,
- reg);
+/* hwmon API functions */
- data->temp_crit_hyst = reg;
+static const u8 tmp401_temp_reg_index[] = {
+ [hwmon_temp_input] = 0,
+ [hwmon_temp_min] = 1,
+ [hwmon_temp_max] = 2,
+ [hwmon_temp_crit] = 3,
+ [hwmon_temp_lowest] = 4,
+ [hwmon_temp_highest] = 5,
+};
- mutex_unlock(&data->update_lock);
+static const u8 tmp401_status_reg_index[] = {
+ [hwmon_temp_fault] = 0,
+ [hwmon_temp_min_alarm] = 1,
+ [hwmon_temp_max_alarm] = 2,
+ [hwmon_temp_crit_alarm] = 3,
+};
- return count;
+static int tmp401_temp_read(struct device *dev, u32 attr, int channel, long *val)
+{
+ struct tmp401_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ unsigned int regval;
+ int reg, ret;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ case hwmon_temp_crit:
+ case hwmon_temp_lowest:
+ case hwmon_temp_highest:
+ reg = TMP401_TEMP_MSB[tmp401_temp_reg_index[attr]][channel];
+ ret = regmap_read(regmap, reg, &regval);
+ if (ret < 0)
+ return ret;
+ *val = tmp401_register_to_temp(regval, data->extended_range);
+ break;
+ case hwmon_temp_crit_hyst:
+ mutex_lock(&data->update_lock);
+ reg = TMP401_TEMP_MSB[3][channel];
+ ret = regmap_read(regmap, reg, &regval);
+ if (ret < 0)
+ goto unlock;
+ *val = tmp401_register_to_temp(regval, data->extended_range);
+ ret = regmap_read(regmap, TMP401_TEMP_CRIT_HYST, &regval);
+ if (ret < 0)
+ goto unlock;
+ *val -= regval * 1000;
+unlock:
+ mutex_unlock(&data->update_lock);
+ if (ret < 0)
+ return ret;
+ break;
+ case hwmon_temp_fault:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ reg = TMP432_STATUS_REG[tmp401_status_reg_index[attr]];
+ ret = regmap_read(regmap, reg, &regval);
+ if (ret < 0)
+ return ret;
+ *val = !!(regval & BIT(channel));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-/*
- * Resets the historical measurements of minimum and maximum temperatures.
- * This is done by writing any value to any of the minimum/maximum registers
- * (0x30-0x37).
- */
-static ssize_t reset_temp_history_store(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static int tmp401_temp_write(struct device *dev, u32 attr, int channel,
+ long val)
{
struct tmp401_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- long val;
-
- if (kstrtol(buf, 10, &val))
- return -EINVAL;
+ struct regmap *regmap = data->regmap;
+ unsigned int regval;
+ int reg, ret, temp;
- if (val != 1) {
- dev_err(dev,
- "temp_reset_history value %ld not supported. Use 1 to reset the history!\n",
- val);
- return -EINVAL;
- }
mutex_lock(&data->update_lock);
- i2c_smbus_write_byte_data(client, TMP401_TEMP_MSB_WRITE[5][0], val);
- data->valid = false;
+ switch (attr) {
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ case hwmon_temp_crit:
+ reg = TMP401_TEMP_MSB[tmp401_temp_reg_index[attr]][channel];
+ regval = tmp401_temp_to_register(val, data->extended_range,
+ attr == hwmon_temp_crit ? 8 : 4);
+ ret = regmap_write(regmap, reg, regval);
+ break;
+ case hwmon_temp_crit_hyst:
+ if (data->extended_range)
+ val = clamp_val(val, -64000, 191000);
+ else
+ val = clamp_val(val, 0, 127000);
+
+ reg = TMP401_TEMP_MSB[3][channel];
+ ret = regmap_read(regmap, reg, &regval);
+ if (ret < 0)
+ break;
+ temp = tmp401_register_to_temp(regval, data->extended_range);
+ val = clamp_val(val, temp - 255000, temp);
+ regval = ((temp - val) + 500) / 1000;
+ ret = regmap_write(regmap, TMP401_TEMP_CRIT_HYST, regval);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
mutex_unlock(&data->update_lock);
-
- return count;
+ return ret;
}
-static ssize_t update_interval_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int tmp401_chip_read(struct device *dev, u32 attr, int channel, long *val)
{
struct tmp401_data *data = dev_get_drvdata(dev);
+ u32 regval;
+ int ret;
+
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ ret = regmap_read(data->regmap, TMP401_CONVERSION_RATE, &regval);
+ if (ret < 0)
+ return ret;
+ *val = (1 << (7 - regval)) * 125;
+ break;
+ case hwmon_chip_temp_reset_history:
+ *val = 0;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
- return sprintf(buf, "%u\n", data->update_interval);
+ return 0;
}
-static ssize_t update_interval_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int tmp401_set_convrate(struct regmap *regmap, long val)
{
- struct tmp401_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- unsigned long val;
- int err, rate;
-
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
+ int rate;
/*
* For valid rates, interval can be calculated as
@@ -447,153 +432,137 @@ static ssize_t update_interval_store(struct device *dev,
*/
val = clamp_val(val, 125, 16000);
rate = 7 - __fls(val * 4 / (125 * 3));
+ return regmap_write(regmap, TMP401_CONVERSION_RATE, rate);
+}
+
+static int tmp401_chip_write(struct device *dev, u32 attr, int channel, long val)
+{
+ struct tmp401_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ int err;
+
mutex_lock(&data->update_lock);
- i2c_smbus_write_byte_data(client, TMP401_CONVERSION_RATE_WRITE, rate);
- data->update_interval = (1 << (7 - rate)) * 125;
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ err = tmp401_set_convrate(regmap, val);
+ break;
+ case hwmon_chip_temp_reset_history:
+ if (val != 1) {
+ err = -EINVAL;
+ break;
+ }
+ /*
+ * Reset history by writing any value to any of the
+ * minimum/maximum registers (0x30-0x37).
+ */
+ err = regmap_write(regmap, 0x30, 0);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
mutex_unlock(&data->update_lock);
- return count;
+ return err;
}
-static SENSOR_DEVICE_ATTR_2_RO(temp1_input, temp, 0, 0);
-static SENSOR_DEVICE_ATTR_2_RW(temp1_min, temp, 1, 0);
-static SENSOR_DEVICE_ATTR_2_RW(temp1_max, temp, 2, 0);
-static SENSOR_DEVICE_ATTR_2_RW(temp1_crit, temp, 3, 0);
-static SENSOR_DEVICE_ATTR_RW(temp1_crit_hyst, temp_crit_hyst, 0);
-static SENSOR_DEVICE_ATTR_2_RO(temp1_min_alarm, status, 1,
- TMP432_STATUS_LOCAL);
-static SENSOR_DEVICE_ATTR_2_RO(temp1_max_alarm, status, 2,
- TMP432_STATUS_LOCAL);
-static SENSOR_DEVICE_ATTR_2_RO(temp1_crit_alarm, status, 3,
- TMP432_STATUS_LOCAL);
-static SENSOR_DEVICE_ATTR_2_RO(temp2_input, temp, 0, 1);
-static SENSOR_DEVICE_ATTR_2_RW(temp2_min, temp, 1, 1);
-static SENSOR_DEVICE_ATTR_2_RW(temp2_max, temp, 2, 1);
-static SENSOR_DEVICE_ATTR_2_RW(temp2_crit, temp, 3, 1);
-static SENSOR_DEVICE_ATTR_RO(temp2_crit_hyst, temp_crit_hyst, 1);
-static SENSOR_DEVICE_ATTR_2_RO(temp2_fault, status, 0, TMP432_STATUS_REMOTE1);
-static SENSOR_DEVICE_ATTR_2_RO(temp2_min_alarm, status, 1,
- TMP432_STATUS_REMOTE1);
-static SENSOR_DEVICE_ATTR_2_RO(temp2_max_alarm, status, 2,
- TMP432_STATUS_REMOTE1);
-static SENSOR_DEVICE_ATTR_2_RO(temp2_crit_alarm, status, 3,
- TMP432_STATUS_REMOTE1);
-
-static DEVICE_ATTR_RW(update_interval);
-
-static struct attribute *tmp401_attributes[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_min.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
-
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp2_min.dev_attr.attr,
- &sensor_dev_attr_temp2_max.dev_attr.attr,
- &sensor_dev_attr_temp2_crit.dev_attr.attr,
- &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp2_fault.dev_attr.attr,
- &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
-
- &dev_attr_update_interval.attr,
-
- NULL
-};
-
-static const struct attribute_group tmp401_group = {
- .attrs = tmp401_attributes,
-};
-
-/*
- * Additional features of the TMP411 chip.
- * The TMP411 stores the minimum and maximum
- * temperature measured since power-on, chip-reset, or
- * minimum and maximum register reset for both the local
- * and remote channels.
- */
-static SENSOR_DEVICE_ATTR_2_RO(temp1_lowest, temp, 4, 0);
-static SENSOR_DEVICE_ATTR_2_RO(temp1_highest, temp, 5, 0);
-static SENSOR_DEVICE_ATTR_2_RO(temp2_lowest, temp, 4, 1);
-static SENSOR_DEVICE_ATTR_2_RO(temp2_highest, temp, 5, 1);
-static SENSOR_DEVICE_ATTR_WO(temp_reset_history, reset_temp_history, 0);
-
-static struct attribute *tmp411_attributes[] = {
- &sensor_dev_attr_temp1_highest.dev_attr.attr,
- &sensor_dev_attr_temp1_lowest.dev_attr.attr,
- &sensor_dev_attr_temp2_highest.dev_attr.attr,
- &sensor_dev_attr_temp2_lowest.dev_attr.attr,
- &sensor_dev_attr_temp_reset_history.dev_attr.attr,
- NULL
-};
+static int tmp401_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_chip:
+ return tmp401_chip_read(dev, attr, channel, val);
+ case hwmon_temp:
+ return tmp401_temp_read(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
-static const struct attribute_group tmp411_group = {
- .attrs = tmp411_attributes,
-};
+static int tmp401_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_chip:
+ return tmp401_chip_write(dev, attr, channel, val);
+ case hwmon_temp:
+ return tmp401_temp_write(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
-static SENSOR_DEVICE_ATTR_2_RO(temp3_input, temp, 0, 2);
-static SENSOR_DEVICE_ATTR_2_RW(temp3_min, temp, 1, 2);
-static SENSOR_DEVICE_ATTR_2_RW(temp3_max, temp, 2, 2);
-static SENSOR_DEVICE_ATTR_2_RW(temp3_crit, temp, 3, 2);
-static SENSOR_DEVICE_ATTR_RO(temp3_crit_hyst, temp_crit_hyst, 2);
-static SENSOR_DEVICE_ATTR_2_RO(temp3_fault, status, 0, TMP432_STATUS_REMOTE2);
-static SENSOR_DEVICE_ATTR_2_RO(temp3_min_alarm, status, 1,
- TMP432_STATUS_REMOTE2);
-static SENSOR_DEVICE_ATTR_2_RO(temp3_max_alarm, status, 2,
- TMP432_STATUS_REMOTE2);
-static SENSOR_DEVICE_ATTR_2_RO(temp3_crit_alarm, status, 3,
- TMP432_STATUS_REMOTE2);
-
-static struct attribute *tmp432_attributes[] = {
- &sensor_dev_attr_temp3_input.dev_attr.attr,
- &sensor_dev_attr_temp3_min.dev_attr.attr,
- &sensor_dev_attr_temp3_max.dev_attr.attr,
- &sensor_dev_attr_temp3_crit.dev_attr.attr,
- &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp3_fault.dev_attr.attr,
- &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
-
- NULL
-};
+static umode_t tmp401_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ case hwmon_chip_temp_reset_history:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_fault:
+ case hwmon_temp_lowest:
+ case hwmon_temp_highest:
+ return 0444;
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ case hwmon_temp_crit:
+ case hwmon_temp_crit_hyst:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
-static const struct attribute_group tmp432_group = {
- .attrs = tmp432_attributes,
+static const struct hwmon_ops tmp401_ops = {
+ .is_visible = tmp401_is_visible,
+ .read = tmp401_read,
+ .write = tmp401_write,
};
-/*
- * Begin non sysfs callback code (aka Real code)
- */
+/* chip initialization, detect, probe */
-static int tmp401_init_client(struct tmp401_data *data,
- struct i2c_client *client)
+static int tmp401_init_client(struct tmp401_data *data)
{
- int config, config_orig, status = 0;
+ struct regmap *regmap = data->regmap;
+ u32 config, config_orig;
+ int ret;
- /* Set the conversion rate to 2 Hz */
- i2c_smbus_write_byte_data(client, TMP401_CONVERSION_RATE_WRITE, 5);
- data->update_interval = 500;
+ /* Set conversion rate to 2 Hz */
+ ret = regmap_write(regmap, TMP401_CONVERSION_RATE, 5);
+ if (ret < 0)
+ return ret;
/* Start conversions (disable shutdown if necessary) */
- config = i2c_smbus_read_byte_data(client, TMP401_CONFIG_READ);
- if (config < 0)
- return config;
+ ret = regmap_read(regmap, TMP401_CONFIG, &config);
+ if (ret < 0)
+ return ret;
config_orig = config;
config &= ~TMP401_CONFIG_SHUTDOWN;
+ data->extended_range = !!(config & TMP401_CONFIG_RANGE);
+
if (config != config_orig)
- status = i2c_smbus_write_byte_data(client,
- TMP401_CONFIG_WRITE,
- config);
+ ret = regmap_write(regmap, TMP401_CONFIG, config);
- return status;
+ return ret;
}
static int tmp401_detect(struct i2c_client *client,
@@ -651,11 +620,11 @@ static int tmp401_detect(struct i2c_client *client,
return -ENODEV;
}
- reg = i2c_smbus_read_byte_data(client, TMP401_CONFIG_READ);
+ reg = i2c_smbus_read_byte_data(client, TMP401_CONFIG);
if (reg & 0x1b)
return -ENODEV;
- reg = i2c_smbus_read_byte_data(client, TMP401_CONVERSION_RATE_READ);
+ reg = i2c_smbus_read_byte_data(client, TMP401_CONVERSION_RATE);
/* Datasheet says: 0x1-0x6 */
if (reg > 15)
return -ENODEV;
@@ -671,9 +640,10 @@ static int tmp401_probe(struct i2c_client *client)
"TMP401", "TMP411", "TMP431", "TMP432", "TMP435"
};
struct device *dev = &client->dev;
+ struct hwmon_channel_info *info;
struct device *hwmon_dev;
struct tmp401_data *data;
- int groups = 0, status;
+ int status;
data = devm_kzalloc(dev, sizeof(struct tmp401_data), GFP_KERNEL);
if (!data)
@@ -683,24 +653,53 @@ static int tmp401_probe(struct i2c_client *client)
mutex_init(&data->update_lock);
data->kind = i2c_match_id(tmp401_id, client)->driver_data;
- /* Initialize the TMP401 chip */
- status = tmp401_init_client(data, client);
- if (status < 0)
- return status;
+ data->regmap = devm_regmap_init(dev, NULL, data, &tmp401_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
- /* Register sysfs hooks */
- data->groups[groups++] = &tmp401_group;
+ /* initialize configuration data */
+ data->chip.ops = &tmp401_ops;
+ data->chip.info = data->info;
- /* Register additional tmp411 sysfs hooks */
- if (data->kind == tmp411)
- data->groups[groups++] = &tmp411_group;
+ data->info[0] = &data->chip_info;
+ data->info[1] = &data->temp_info;
- /* Register additional tmp432 sysfs hooks */
- if (data->kind == tmp432)
- data->groups[groups++] = &tmp432_group;
+ info = &data->chip_info;
+ info->type = hwmon_chip;
+ info->config = data->chip_channel_config;
+
+ data->chip_channel_config[0] = HWMON_C_UPDATE_INTERVAL;
+
+ info = &data->temp_info;
+ info->type = hwmon_temp;
+ info->config = data->temp_channel_config;
+
+ data->temp_channel_config[0] = HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM;
+ data->temp_channel_config[1] = HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_FAULT;
+
+ if (data->kind == tmp411) {
+ data->temp_channel_config[0] |= HWMON_T_HIGHEST | HWMON_T_LOWEST;
+ data->temp_channel_config[1] |= HWMON_T_HIGHEST | HWMON_T_LOWEST;
+ data->chip_channel_config[0] |= HWMON_C_TEMP_RESET_HISTORY;
+ }
+
+ if (data->kind == tmp432) {
+ data->temp_channel_config[2] = HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_FAULT;
+ }
+
+ /* Initialize the TMP401 chip */
+ status = tmp401_init_client(data);
+ if (status < 0)
+ return status;
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
- data, data->groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data,
+ &data->chip, NULL);
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index 30aae8642069..5cde837bfd09 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -659,8 +659,10 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
&pdev->dev);
- if (!acpi_id)
- return -EINVAL;
+ if (!acpi_id) {
+ rc = -EINVAL;
+ goto out_mbox_free;
+ }
version = (int)acpi_id->driver_data;
diff --git a/drivers/hwspinlock/stm32_hwspinlock.c b/drivers/hwspinlock/stm32_hwspinlock.c
index 3ad0ce0da4d9..5bd11a7fab65 100644
--- a/drivers/hwspinlock/stm32_hwspinlock.c
+++ b/drivers/hwspinlock/stm32_hwspinlock.c
@@ -54,8 +54,23 @@ static const struct hwspinlock_ops stm32_hwspinlock_ops = {
.relax = stm32_hwspinlock_relax,
};
+static void stm32_hwspinlock_disable_clk(void *data)
+{
+ struct platform_device *pdev = data;
+ struct stm32_hwspinlock *hw = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ pm_runtime_get_sync(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+
+ clk_disable_unprepare(hw->clk);
+}
+
static int stm32_hwspinlock_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct stm32_hwspinlock *hw;
void __iomem *io_base;
size_t array_size;
@@ -66,41 +81,43 @@ static int stm32_hwspinlock_probe(struct platform_device *pdev)
return PTR_ERR(io_base);
array_size = STM32_MUTEX_NUM_LOCKS * sizeof(struct hwspinlock);
- hw = devm_kzalloc(&pdev->dev, sizeof(*hw) + array_size, GFP_KERNEL);
+ hw = devm_kzalloc(dev, sizeof(*hw) + array_size, GFP_KERNEL);
if (!hw)
return -ENOMEM;
- hw->clk = devm_clk_get(&pdev->dev, "hsem");
+ hw->clk = devm_clk_get(dev, "hsem");
if (IS_ERR(hw->clk))
return PTR_ERR(hw->clk);
- for (i = 0; i < STM32_MUTEX_NUM_LOCKS; i++)
- hw->bank.lock[i].priv = io_base + i * sizeof(u32);
+ ret = clk_prepare_enable(hw->clk);
+ if (ret) {
+ dev_err(dev, "Failed to prepare_enable clock\n");
+ return ret;
+ }
platform_set_drvdata(pdev, hw);
- pm_runtime_enable(&pdev->dev);
- ret = hwspin_lock_register(&hw->bank, &pdev->dev, &stm32_hwspinlock_ops,
- 0, STM32_MUTEX_NUM_LOCKS);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_put(dev);
- if (ret)
- pm_runtime_disable(&pdev->dev);
+ ret = devm_add_action_or_reset(dev, stm32_hwspinlock_disable_clk, pdev);
+ if (ret) {
+ dev_err(dev, "Failed to register action\n");
+ return ret;
+ }
- return ret;
-}
+ for (i = 0; i < STM32_MUTEX_NUM_LOCKS; i++)
+ hw->bank.lock[i].priv = io_base + i * sizeof(u32);
-static int stm32_hwspinlock_remove(struct platform_device *pdev)
-{
- struct stm32_hwspinlock *hw = platform_get_drvdata(pdev);
- int ret;
+ ret = devm_hwspin_lock_register(dev, &hw->bank, &stm32_hwspinlock_ops,
+ 0, STM32_MUTEX_NUM_LOCKS);
- ret = hwspin_lock_unregister(&hw->bank);
if (ret)
- dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
-
- pm_runtime_disable(&pdev->dev);
+ dev_err(dev, "Failed to register hwspinlock\n");
- return 0;
+ return ret;
}
static int __maybe_unused stm32_hwspinlock_runtime_suspend(struct device *dev)
@@ -135,7 +152,6 @@ MODULE_DEVICE_TABLE(of, stm32_hwpinlock_ids);
static struct platform_driver stm32_hwspinlock_driver = {
.probe = stm32_hwspinlock_probe,
- .remove = stm32_hwspinlock_remove,
.driver = {
.name = "stm32_hwspinlock",
.of_match_table = stm32_hwpinlock_ids,
diff --git a/drivers/hwtracing/coresight/coresight-cfg-preload.c b/drivers/hwtracing/coresight/coresight-cfg-preload.c
index 751af3710d56..e237a4edfa09 100644
--- a/drivers/hwtracing/coresight/coresight-cfg-preload.c
+++ b/drivers/hwtracing/coresight/coresight-cfg-preload.c
@@ -24,8 +24,13 @@ static struct cscfg_config_desc *preload_cfgs[] = {
NULL
};
+static struct cscfg_load_owner_info preload_owner = {
+ .type = CSCFG_OWNER_PRELOAD,
+};
+
/* preload called on initialisation */
-int cscfg_preload(void)
+int cscfg_preload(void *owner_handle)
{
- return cscfg_load_config_sets(preload_cfgs, preload_feats);
+ preload_owner.owner_handle = owner_handle;
+ return cscfg_load_config_sets(preload_cfgs, preload_feats, &preload_owner);
}
diff --git a/drivers/hwtracing/coresight/coresight-config.h b/drivers/hwtracing/coresight/coresight-config.h
index 25eb6c632692..9bd44b940add 100644
--- a/drivers/hwtracing/coresight/coresight-config.h
+++ b/drivers/hwtracing/coresight/coresight-config.h
@@ -97,6 +97,8 @@ struct cscfg_regval_desc {
* @params_desc: array of parameters used.
* @nr_regs: number of registers used.
* @regs_desc: array of registers used.
+ * @load_owner: handle to load owner for dynamic load and unload of features.
+ * @fs_group: reference to configfs group for dynamic unload.
*/
struct cscfg_feature_desc {
const char *name;
@@ -107,6 +109,8 @@ struct cscfg_feature_desc {
struct cscfg_parameter_desc *params_desc;
int nr_regs;
struct cscfg_regval_desc *regs_desc;
+ void *load_owner;
+ struct config_group *fs_group;
};
/**
@@ -128,7 +132,8 @@ struct cscfg_feature_desc {
* @presets: Array of preset values.
* @event_ea: Extended attribute for perf event value
* @active_cnt: ref count for activate on this configuration.
- *
+ * @load_owner: handle to load owner for dynamic load and unload of configs.
+ * @fs_group: reference to configfs group for dynamic unload.
*/
struct cscfg_config_desc {
const char *name;
@@ -141,6 +146,8 @@ struct cscfg_config_desc {
const u64 *presets; /* nr_presets * nr_total_params */
struct dev_ext_attribute *event_ea;
atomic_t active_cnt;
+ void *load_owner;
+ struct config_group *fs_group;
};
/**
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index 8a18c71df37a..88653d1c06a4 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -729,7 +729,7 @@ static inline void coresight_put_ref(struct coresight_device *csdev)
* coresight_grab_device - Power up this device and any of the helper
* devices connected to it for trace operation. Since the helper devices
* don't appear on the trace path, they should be handled along with the
- * the master device.
+ * master device.
*/
static int coresight_grab_device(struct coresight_device *csdev)
{
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 86a313857b58..bf18128cf5de 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -722,7 +722,16 @@ static int etm4_enable_sysfs(struct coresight_device *csdev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct etm4_enable_arg arg = { };
- int ret;
+ unsigned long cfg_hash;
+ int ret, preset;
+
+ /* enable any config activated by configfs */
+ cscfg_config_sysfs_get_active_cfg(&cfg_hash, &preset);
+ if (cfg_hash) {
+ ret = cscfg_csdev_enable_active_config(csdev, cfg_hash, preset);
+ if (ret)
+ return ret;
+ }
spin_lock(&drvdata->spinlock);
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 58062a5a8238..bb14a3a8a921 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -856,13 +856,11 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
void __iomem *base;
- unsigned long *guaranteed;
struct device *dev = &adev->dev;
struct coresight_platform_data *pdata = NULL;
struct stm_drvdata *drvdata;
struct resource *res = &adev->res;
struct resource ch_res;
- size_t bitmap_size;
struct coresight_desc desc = { 0 };
desc.name = coresight_alloc_device_name(&stm_devs, dev);
@@ -904,12 +902,10 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
else
drvdata->numsp = stm_num_stimulus_port(drvdata);
- bitmap_size = BITS_TO_LONGS(drvdata->numsp) * sizeof(long);
-
- guaranteed = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
- if (!guaranteed)
+ drvdata->chs.guaranteed = devm_bitmap_zalloc(dev, drvdata->numsp,
+ GFP_KERNEL);
+ if (!drvdata->chs.guaranteed)
return -ENOMEM;
- drvdata->chs.guaranteed = guaranteed;
spin_lock_init(&drvdata->spinlock);
diff --git a/drivers/hwtracing/coresight/coresight-syscfg-configfs.c b/drivers/hwtracing/coresight/coresight-syscfg-configfs.c
index c547816b9000..433ede94dd63 100644
--- a/drivers/hwtracing/coresight/coresight-syscfg-configfs.c
+++ b/drivers/hwtracing/coresight/coresight-syscfg-configfs.c
@@ -6,6 +6,7 @@
#include <linux/configfs.h>
+#include "coresight-config.h"
#include "coresight-syscfg-configfs.h"
/* create a default ci_type. */
@@ -87,9 +88,75 @@ static ssize_t cscfg_cfg_values_show(struct config_item *item, char *page)
}
CONFIGFS_ATTR_RO(cscfg_cfg_, values);
+static ssize_t cscfg_cfg_enable_show(struct config_item *item, char *page)
+{
+ struct cscfg_fs_config *fs_config = container_of(to_config_group(item),
+ struct cscfg_fs_config, group);
+
+ return scnprintf(page, PAGE_SIZE, "%d\n", fs_config->active);
+}
+
+static ssize_t cscfg_cfg_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct cscfg_fs_config *fs_config = container_of(to_config_group(item),
+ struct cscfg_fs_config, group);
+ int err;
+ bool val;
+
+ err = kstrtobool(page, &val);
+ if (!err)
+ err = cscfg_config_sysfs_activate(fs_config->config_desc, val);
+ if (!err) {
+ fs_config->active = val;
+ if (val)
+ cscfg_config_sysfs_set_preset(fs_config->preset);
+ }
+ return err ? err : count;
+}
+CONFIGFS_ATTR(cscfg_cfg_, enable);
+
+static ssize_t cscfg_cfg_preset_show(struct config_item *item, char *page)
+{
+ struct cscfg_fs_config *fs_config = container_of(to_config_group(item),
+ struct cscfg_fs_config, group);
+
+ return scnprintf(page, PAGE_SIZE, "%d\n", fs_config->preset);
+}
+
+static ssize_t cscfg_cfg_preset_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct cscfg_fs_config *fs_config = container_of(to_config_group(item),
+ struct cscfg_fs_config, group);
+ int preset, err;
+
+ err = kstrtoint(page, 0, &preset);
+ if (!err) {
+ /*
+ * presets start at 1, and go up to max (15),
+ * but the config may provide fewer.
+ */
+ if ((preset < 1) || (preset > fs_config->config_desc->nr_presets))
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ /* set new value */
+ fs_config->preset = preset;
+ /* set on system if active */
+ if (fs_config->active)
+ cscfg_config_sysfs_set_preset(fs_config->preset);
+ }
+ return err ? err : count;
+}
+CONFIGFS_ATTR(cscfg_cfg_, preset);
+
static struct configfs_attribute *cscfg_config_view_attrs[] = {
&cscfg_cfg_attr_description,
&cscfg_cfg_attr_feature_refs,
+ &cscfg_cfg_attr_enable,
+ &cscfg_cfg_attr_preset,
NULL,
};
@@ -334,9 +401,19 @@ int cscfg_configfs_add_config(struct cscfg_config_desc *config_desc)
if (IS_ERR(new_group))
return PTR_ERR(new_group);
err = configfs_register_group(&cscfg_configs_grp, new_group);
+ if (!err)
+ config_desc->fs_group = new_group;
return err;
}
+void cscfg_configfs_del_config(struct cscfg_config_desc *config_desc)
+{
+ if (config_desc->fs_group) {
+ configfs_unregister_group(config_desc->fs_group);
+ config_desc->fs_group = NULL;
+ }
+}
+
static struct config_item_type cscfg_features_type = {
.ct_owner = THIS_MODULE,
};
@@ -358,9 +435,19 @@ int cscfg_configfs_add_feature(struct cscfg_feature_desc *feat_desc)
if (IS_ERR(new_group))
return PTR_ERR(new_group);
err = configfs_register_group(&cscfg_features_grp, new_group);
+ if (!err)
+ feat_desc->fs_group = new_group;
return err;
}
+void cscfg_configfs_del_feature(struct cscfg_feature_desc *feat_desc)
+{
+ if (feat_desc->fs_group) {
+ configfs_unregister_group(feat_desc->fs_group);
+ feat_desc->fs_group = NULL;
+ }
+}
+
int cscfg_configfs_init(struct cscfg_manager *cscfg_mgr)
{
struct configfs_subsystem *subsys;
diff --git a/drivers/hwtracing/coresight/coresight-syscfg-configfs.h b/drivers/hwtracing/coresight/coresight-syscfg-configfs.h
index 7d6ffe35ca4c..373d84d43268 100644
--- a/drivers/hwtracing/coresight/coresight-syscfg-configfs.h
+++ b/drivers/hwtracing/coresight/coresight-syscfg-configfs.h
@@ -15,6 +15,8 @@
struct cscfg_fs_config {
struct cscfg_config_desc *config_desc;
struct config_group group;
+ bool active;
+ int preset;
};
/* container for feature view */
@@ -41,5 +43,7 @@ int cscfg_configfs_init(struct cscfg_manager *cscfg_mgr);
void cscfg_configfs_release(struct cscfg_manager *cscfg_mgr);
int cscfg_configfs_add_config(struct cscfg_config_desc *config_desc);
int cscfg_configfs_add_feature(struct cscfg_feature_desc *feat_desc);
+void cscfg_configfs_del_config(struct cscfg_config_desc *config_desc);
+void cscfg_configfs_del_feature(struct cscfg_feature_desc *feat_desc);
#endif /* CORESIGHT_SYSCFG_CONFIGFS_H */
diff --git a/drivers/hwtracing/coresight/coresight-syscfg.c b/drivers/hwtracing/coresight/coresight-syscfg.c
index 43054568430f..098fc34c4829 100644
--- a/drivers/hwtracing/coresight/coresight-syscfg.c
+++ b/drivers/hwtracing/coresight/coresight-syscfg.c
@@ -250,6 +250,13 @@ static int cscfg_check_feat_for_cfg(struct cscfg_config_desc *config_desc)
static int cscfg_load_feat(struct cscfg_feature_desc *feat_desc)
{
int err;
+ struct cscfg_feature_desc *feat_desc_exist;
+
+ /* new feature must have unique name */
+ list_for_each_entry(feat_desc_exist, &cscfg_mgr->feat_desc_list, item) {
+ if (!strcmp(feat_desc_exist->name, feat_desc->name))
+ return -EEXIST;
+ }
/* add feature to any matching registered devices */
err = cscfg_add_feat_to_csdevs(feat_desc);
@@ -267,6 +274,13 @@ static int cscfg_load_feat(struct cscfg_feature_desc *feat_desc)
static int cscfg_load_config(struct cscfg_config_desc *config_desc)
{
int err;
+ struct cscfg_config_desc *config_desc_exist;
+
+ /* new configuration must have a unique name */
+ list_for_each_entry(config_desc_exist, &cscfg_mgr->config_desc_list, item) {
+ if (!strcmp(config_desc_exist->name, config_desc->name))
+ return -EEXIST;
+ }
/* validate features are present */
err = cscfg_check_feat_for_cfg(config_desc);
@@ -354,6 +368,92 @@ unlock_exit:
return err;
}
+/*
+ * Conditionally up reference count on owner to prevent unload.
+ *
+ * module loaded configs need to be locked in to prevent premature unload.
+ */
+static int cscfg_owner_get(struct cscfg_load_owner_info *owner_info)
+{
+ if ((owner_info->type == CSCFG_OWNER_MODULE) &&
+ (!try_module_get(owner_info->owner_handle)))
+ return -EINVAL;
+ return 0;
+}
+
+/* conditionally lower ref count on an owner */
+static void cscfg_owner_put(struct cscfg_load_owner_info *owner_info)
+{
+ if (owner_info->type == CSCFG_OWNER_MODULE)
+ module_put(owner_info->owner_handle);
+}
+
+static void cscfg_remove_owned_csdev_configs(struct coresight_device *csdev, void *load_owner)
+{
+ struct cscfg_config_csdev *config_csdev, *tmp;
+
+ if (list_empty(&csdev->config_csdev_list))
+ return;
+
+ list_for_each_entry_safe(config_csdev, tmp, &csdev->config_csdev_list, node) {
+ if (config_csdev->config_desc->load_owner == load_owner)
+ list_del(&config_csdev->node);
+ }
+}
+
+static void cscfg_remove_owned_csdev_features(struct coresight_device *csdev, void *load_owner)
+{
+ struct cscfg_feature_csdev *feat_csdev, *tmp;
+
+ if (list_empty(&csdev->feature_csdev_list))
+ return;
+
+ list_for_each_entry_safe(feat_csdev, tmp, &csdev->feature_csdev_list, node) {
+ if (feat_csdev->feat_desc->load_owner == load_owner)
+ list_del(&feat_csdev->node);
+ }
+}
+
+/*
+ * removal is relatively easy - just remove from all lists, anything that
+ * matches the owner. Memory for the descriptors will be managed by the owner,
+ * memory for the csdev items is devm_ allocated with the individual csdev
+ * devices.
+ */
+static void cscfg_unload_owned_cfgs_feats(void *load_owner)
+{
+ struct cscfg_config_desc *config_desc, *cfg_tmp;
+ struct cscfg_feature_desc *feat_desc, *feat_tmp;
+ struct cscfg_registered_csdev *csdev_item;
+
+ /* remove from each csdev instance feature and config lists */
+ list_for_each_entry(csdev_item, &cscfg_mgr->csdev_desc_list, item) {
+ /*
+ * for each csdev, check the loaded lists and remove if
+ * referenced descriptor is owned
+ */
+ cscfg_remove_owned_csdev_configs(csdev_item->csdev, load_owner);
+ cscfg_remove_owned_csdev_features(csdev_item->csdev, load_owner);
+ }
+
+ /* remove from the config descriptor lists */
+ list_for_each_entry_safe(config_desc, cfg_tmp, &cscfg_mgr->config_desc_list, item) {
+ if (config_desc->load_owner == load_owner) {
+ cscfg_configfs_del_config(config_desc);
+ etm_perf_del_symlink_cscfg(config_desc);
+ list_del(&config_desc->item);
+ }
+ }
+
+ /* remove from the feature descriptor lists */
+ list_for_each_entry_safe(feat_desc, feat_tmp, &cscfg_mgr->feat_desc_list, item) {
+ if (feat_desc->load_owner == load_owner) {
+ cscfg_configfs_del_feature(feat_desc);
+ list_del(&feat_desc->item);
+ }
+ }
+}
+
/**
* cscfg_load_config_sets - API function to load feature and config sets.
*
@@ -361,13 +461,22 @@ unlock_exit:
* descriptors and load into the system.
* Features are loaded first to ensure configuration dependencies can be met.
*
+ * To facilitate dynamic loading and unloading, features and configurations
+ * have a "load_owner", to allow later unload by the same owner. An owner may
+ * be a loadable module or configuration dynamically created via configfs.
+ * As later loaded configurations can use earlier loaded features, creating load
+ * dependencies, a load order list is maintained. Unload is strictly in the
+ * reverse order to load.
+ *
* @config_descs: 0 terminated array of configuration descriptors.
* @feat_descs: 0 terminated array of feature descriptors.
+ * @owner_info: Information on the owner of this set.
*/
int cscfg_load_config_sets(struct cscfg_config_desc **config_descs,
- struct cscfg_feature_desc **feat_descs)
+ struct cscfg_feature_desc **feat_descs,
+ struct cscfg_load_owner_info *owner_info)
{
- int err, i = 0;
+ int err = 0, i = 0;
mutex_lock(&cscfg_mutex);
@@ -380,8 +489,10 @@ int cscfg_load_config_sets(struct cscfg_config_desc **config_descs,
if (err) {
pr_err("coresight-syscfg: Failed to load feature %s\n",
feat_descs[i]->name);
+ cscfg_unload_owned_cfgs_feats(owner_info);
goto exit_unlock;
}
+ feat_descs[i]->load_owner = owner_info;
i++;
}
}
@@ -396,18 +507,86 @@ int cscfg_load_config_sets(struct cscfg_config_desc **config_descs,
if (err) {
pr_err("coresight-syscfg: Failed to load configuration %s\n",
config_descs[i]->name);
+ cscfg_unload_owned_cfgs_feats(owner_info);
goto exit_unlock;
}
+ config_descs[i]->load_owner = owner_info;
i++;
}
}
+ /* add the load owner to the load order list */
+ list_add_tail(&owner_info->item, &cscfg_mgr->load_order_list);
+ if (!list_is_singular(&cscfg_mgr->load_order_list)) {
+ /* lock previous item in load order list */
+ err = cscfg_owner_get(list_prev_entry(owner_info, item));
+ if (err) {
+ cscfg_unload_owned_cfgs_feats(owner_info);
+ list_del(&owner_info->item);
+ }
+ }
+
exit_unlock:
mutex_unlock(&cscfg_mutex);
return err;
}
EXPORT_SYMBOL_GPL(cscfg_load_config_sets);
+/**
+ * cscfg_unload_config_sets - unload a set of configurations by owner.
+ *
+ * Dynamic unload of configuration and feature sets is done on the basis of
+ * the load owner of that set. Later loaded configurations can depend on
+ * features loaded earlier.
+ *
+ * Therefore, unload is only possible if:-
+ * 1) no configurations are active.
+ * 2) the set being unloaded was the last to be loaded to maintain dependencies.
+ *
+ * @owner_info: Information on owner for set being unloaded.
+ */
+int cscfg_unload_config_sets(struct cscfg_load_owner_info *owner_info)
+{
+ int err = 0;
+ struct cscfg_load_owner_info *load_list_item = NULL;
+
+ mutex_lock(&cscfg_mutex);
+
+ /* cannot unload if anything is active */
+ if (atomic_read(&cscfg_mgr->sys_active_cnt)) {
+ err = -EBUSY;
+ goto exit_unlock;
+ }
+
+ /* cannot unload if not last loaded in load order */
+ if (!list_empty(&cscfg_mgr->load_order_list)) {
+ load_list_item = list_last_entry(&cscfg_mgr->load_order_list,
+ struct cscfg_load_owner_info, item);
+ if (load_list_item != owner_info)
+ load_list_item = NULL;
+ }
+
+ if (!load_list_item) {
+ err = -EINVAL;
+ goto exit_unlock;
+ }
+
+ /* unload all belonging to load_owner */
+ cscfg_unload_owned_cfgs_feats(owner_info);
+
+ /* remove from load order list */
+ if (!list_is_singular(&cscfg_mgr->load_order_list)) {
+ /* unlock previous item in load order list */
+ cscfg_owner_put(list_prev_entry(owner_info, item));
+ }
+ list_del(&owner_info->item);
+
+exit_unlock:
+ mutex_unlock(&cscfg_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(cscfg_unload_config_sets);
+
/* Handle coresight device registration and add configs and features to devices */
/* iterate through config lists and load matching configs to device */
@@ -566,32 +745,26 @@ unlock_exit:
}
EXPORT_SYMBOL_GPL(cscfg_csdev_reset_feats);
-/**
- * cscfg_activate_config - Mark a configuration descriptor as active.
- *
- * This will be seen when csdev devices are enabled in the system.
- * Only activated configurations can be enabled on individual devices.
- * Activation protects the configuration from alteration or removal while
- * active.
- *
- * Selection by hash value - generated from the configuration name when it
- * was loaded and added to the cs_etm/configurations file system for selection
- * by perf.
+/*
+ * This activate configuration for either perf or sysfs. Perf can have multiple
+ * active configs, selected per event, sysfs is limited to one.
*
* Increments the configuration descriptor active count and the global active
* count.
*
* @cfg_hash: Hash value of the selected configuration name.
*/
-int cscfg_activate_config(unsigned long cfg_hash)
+static int _cscfg_activate_config(unsigned long cfg_hash)
{
struct cscfg_config_desc *config_desc;
int err = -EINVAL;
- mutex_lock(&cscfg_mutex);
-
list_for_each_entry(config_desc, &cscfg_mgr->config_desc_list, item) {
if ((unsigned long)config_desc->event_ea->var == cfg_hash) {
+ /* must ensure that config cannot be unloaded in use */
+ err = cscfg_owner_get(config_desc->load_owner);
+ if (err)
+ break;
/*
* increment the global active count - control changes to
* active configurations
@@ -609,6 +782,101 @@ int cscfg_activate_config(unsigned long cfg_hash)
break;
}
}
+ return err;
+}
+
+static void _cscfg_deactivate_config(unsigned long cfg_hash)
+{
+ struct cscfg_config_desc *config_desc;
+
+ list_for_each_entry(config_desc, &cscfg_mgr->config_desc_list, item) {
+ if ((unsigned long)config_desc->event_ea->var == cfg_hash) {
+ atomic_dec(&config_desc->active_cnt);
+ atomic_dec(&cscfg_mgr->sys_active_cnt);
+ cscfg_owner_put(config_desc->load_owner);
+ dev_dbg(cscfg_device(), "Deactivate config %s.\n", config_desc->name);
+ break;
+ }
+ }
+}
+
+/*
+ * called from configfs to set/clear the active configuration for use when
+ * using sysfs to control trace.
+ */
+int cscfg_config_sysfs_activate(struct cscfg_config_desc *config_desc, bool activate)
+{
+ unsigned long cfg_hash;
+ int err = 0;
+
+ mutex_lock(&cscfg_mutex);
+
+ cfg_hash = (unsigned long)config_desc->event_ea->var;
+
+ if (activate) {
+ /* cannot be a current active value to activate this */
+ if (cscfg_mgr->sysfs_active_config) {
+ err = -EBUSY;
+ goto exit_unlock;
+ }
+ err = _cscfg_activate_config(cfg_hash);
+ if (!err)
+ cscfg_mgr->sysfs_active_config = cfg_hash;
+ } else {
+ /* disable if matching current value */
+ if (cscfg_mgr->sysfs_active_config == cfg_hash) {
+ _cscfg_deactivate_config(cfg_hash);
+ cscfg_mgr->sysfs_active_config = 0;
+ } else
+ err = -EINVAL;
+ }
+
+exit_unlock:
+ mutex_unlock(&cscfg_mutex);
+ return err;
+}
+
+/* set the sysfs preset value */
+void cscfg_config_sysfs_set_preset(int preset)
+{
+ mutex_lock(&cscfg_mutex);
+ cscfg_mgr->sysfs_active_preset = preset;
+ mutex_unlock(&cscfg_mutex);
+}
+
+/*
+ * Used by a device to get the config and preset selected as active in configfs,
+ * when using sysfs to control trace.
+ */
+void cscfg_config_sysfs_get_active_cfg(unsigned long *cfg_hash, int *preset)
+{
+ mutex_lock(&cscfg_mutex);
+ *preset = cscfg_mgr->sysfs_active_preset;
+ *cfg_hash = cscfg_mgr->sysfs_active_config;
+ mutex_unlock(&cscfg_mutex);
+}
+EXPORT_SYMBOL_GPL(cscfg_config_sysfs_get_active_cfg);
+
+/**
+ * cscfg_activate_config - Mark a configuration descriptor as active.
+ *
+ * This will be seen when csdev devices are enabled in the system.
+ * Only activated configurations can be enabled on individual devices.
+ * Activation protects the configuration from alteration or removal while
+ * active.
+ *
+ * Selection by hash value - generated from the configuration name when it
+ * was loaded and added to the cs_etm/configurations file system for selection
+ * by perf.
+ *
+ * @cfg_hash: Hash value of the selected configuration name.
+ */
+int cscfg_activate_config(unsigned long cfg_hash)
+{
+ int err = 0;
+
+ mutex_lock(&cscfg_mutex);
+ err = _cscfg_activate_config(cfg_hash);
mutex_unlock(&cscfg_mutex);
return err;
@@ -624,18 +892,8 @@ EXPORT_SYMBOL_GPL(cscfg_activate_config);
*/
void cscfg_deactivate_config(unsigned long cfg_hash)
{
- struct cscfg_config_desc *config_desc;
-
mutex_lock(&cscfg_mutex);
-
- list_for_each_entry(config_desc, &cscfg_mgr->config_desc_list, item) {
- if ((unsigned long)config_desc->event_ea->var == cfg_hash) {
- atomic_dec(&config_desc->active_cnt);
- atomic_dec(&cscfg_mgr->sys_active_cnt);
- dev_dbg(cscfg_device(), "Deactivate config %s.\n", config_desc->name);
- break;
- }
- }
+ _cscfg_deactivate_config(cfg_hash);
mutex_unlock(&cscfg_mutex);
}
EXPORT_SYMBOL_GPL(cscfg_deactivate_config);
@@ -827,10 +1085,11 @@ int __init cscfg_init(void)
INIT_LIST_HEAD(&cscfg_mgr->csdev_desc_list);
INIT_LIST_HEAD(&cscfg_mgr->feat_desc_list);
INIT_LIST_HEAD(&cscfg_mgr->config_desc_list);
+ INIT_LIST_HEAD(&cscfg_mgr->load_order_list);
atomic_set(&cscfg_mgr->sys_active_cnt, 0);
/* preload built-in configurations */
- err = cscfg_preload();
+ err = cscfg_preload(THIS_MODULE);
if (err)
goto exit_err;
diff --git a/drivers/hwtracing/coresight/coresight-syscfg.h b/drivers/hwtracing/coresight/coresight-syscfg.h
index 8d018efd6ead..9106ffab4833 100644
--- a/drivers/hwtracing/coresight/coresight-syscfg.h
+++ b/drivers/hwtracing/coresight/coresight-syscfg.h
@@ -25,16 +25,22 @@
* @csdev_desc_list: List of coresight devices registered with the configuration manager.
* @feat_desc_list: List of feature descriptors to load into registered devices.
* @config_desc_list: List of system configuration descriptors to load into registered devices.
+ * @load_order_list: Ordered list of owners for dynamically loaded configurations.
* @sys_active_cnt: Total number of active config descriptor references.
* @cfgfs_subsys: configfs subsystem used to manage configurations.
+ * @sysfs_active_config:Active config hash used if CoreSight controlled from sysfs.
+ * @sysfs_active_preset:Active preset index used if CoreSight controlled from sysfs.
*/
struct cscfg_manager {
struct device dev;
struct list_head csdev_desc_list;
struct list_head feat_desc_list;
struct list_head config_desc_list;
+ struct list_head load_order_list;
atomic_t sys_active_cnt;
struct configfs_subsystem cfgfs_subsys;
+ u32 sysfs_active_config;
+ int sysfs_active_preset;
};
/* get reference to dev in cscfg_manager */
@@ -56,18 +62,44 @@ struct cscfg_registered_csdev {
struct list_head item;
};
+/* owner types for loading and unloading of config and feature sets */
+enum cscfg_load_owner_type {
+ CSCFG_OWNER_PRELOAD,
+ CSCFG_OWNER_MODULE,
+};
+
+/**
+ * Load item - item to add to the load order list allowing dynamic load and
+ * unload of configurations and features. Caller loading a config
+ * set provides a context handle for unload. API ensures that
+ * items unloaded strictly in reverse order from load to ensure
+ * dependencies are respected.
+ *
+ * @item: list entry for load order list.
+ * @type: type of owner - allows interpretation of owner_handle.
+ * @owner_handle: load context - handle for owner of loaded configs.
+ */
+struct cscfg_load_owner_info {
+ struct list_head item;
+ int type;
+ void *owner_handle;
+};
+
/* internal core operations for cscfg */
int __init cscfg_init(void);
void cscfg_exit(void);
-int cscfg_preload(void);
+int cscfg_preload(void *owner_handle);
const struct cscfg_feature_desc *cscfg_get_named_feat_desc(const char *name);
int cscfg_update_feat_param_val(struct cscfg_feature_desc *feat_desc,
int param_idx, u64 value);
-
+int cscfg_config_sysfs_activate(struct cscfg_config_desc *cfg_desc, bool activate);
+void cscfg_config_sysfs_set_preset(int preset);
/* syscfg manager external API */
int cscfg_load_config_sets(struct cscfg_config_desc **cfg_descs,
- struct cscfg_feature_desc **feat_descs);
+ struct cscfg_feature_desc **feat_descs,
+ struct cscfg_load_owner_info *owner_info);
+int cscfg_unload_config_sets(struct cscfg_load_owner_info *owner_info);
int cscfg_register_csdev(struct coresight_device *csdev, u32 match_flags,
struct cscfg_csdev_feat_ops *ops);
void cscfg_unregister_csdev(struct coresight_device *csdev);
@@ -77,5 +109,6 @@ void cscfg_csdev_reset_feats(struct coresight_device *csdev);
int cscfg_csdev_enable_active_config(struct coresight_device *csdev,
unsigned long cfg_hash, int preset);
void cscfg_csdev_disable_active_config(struct coresight_device *csdev);
+void cscfg_config_sysfs_get_active_cfg(unsigned long *cfg_hash, int *preset);
#endif /* CORESIGHT_SYSCFG_H */
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index dce392839017..42da31c1ab70 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -617,7 +617,7 @@ config I2C_EXYNOS5
help
High-speed I2C controller on Samsung Exynos5 and newer Samsung SoCs:
Exynos5250, Exynos5260, Exynos5410, Exynos542x, Exynos5800,
- Exynos5433 and Exynos7.
+ Exynos5433, Exynos7, Exynos850 and ExynosAutoV9.
Choose Y here only if you build for such Samsung SoC.
config I2C_GPIO
@@ -963,16 +963,10 @@ config I2C_RK3X
This driver can also be built as a module. If so, the module will
be called i2c-rk3x.
-config HAVE_S3C2410_I2C
- bool
- help
- This will include I2C support for Samsung SoCs. If you want to
- include I2C support for any machine, kindly select this in the
- respective Kconfig file.
-
config I2C_S3C2410
tristate "S3C/Exynos I2C Driver"
- depends on HAVE_S3C2410_I2C || COMPILE_TEST
+ depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || \
+ ARCH_S5PV210 || COMPILE_TEST
help
Say Y here to include support for I2C controller in the
Samsung SoCs (S3C, S5Pv210, Exynos).
@@ -1159,22 +1153,12 @@ config I2C_XILINX
This driver can also be built as a module. If so, the module
will be called xilinx_i2c.
-config I2C_XLR
- tristate "Netlogic XLR I2C support"
- depends on CPU_XLR || COMPILE_TEST
- help
- This driver enables support for the on-chip I2C interface of
- the Netlogic XLR/XLS MIPS processors and Sigma Designs SOCs.
-
- This driver can also be built as a module. If so, the module
- will be called i2c-xlr.
-
config I2C_XLP9XX
- tristate "XLP9XX I2C support"
- depends on CPU_XLP || ARCH_THUNDER2 || COMPILE_TEST
+ tristate "Cavium ThunderX2 I2C support"
+ depends on ARCH_THUNDER2 || COMPILE_TEST
help
This driver enables support for the on-chip I2C interface of
- the Broadcom XLP9xx/XLP5xx MIPS and Vulcan ARM64 processors.
+ the Cavium ThunderX2 processors. (Originally on Netlogic XLP SoCs.)
This driver can also be built as a module. If so, the module will
be called i2c-xlp9xx.
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index d85899fef8c7..1d00dce77098 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -119,7 +119,6 @@ obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o
i2c-thunderx-objs := i2c-octeon-core.o i2c-thunderx-pcidrv.o
obj-$(CONFIG_I2C_THUNDERX) += i2c-thunderx.o
obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
-obj-$(CONFIG_I2C_XLR) += i2c-xlr.o
obj-$(CONFIG_I2C_XLP9XX) += i2c-xlp9xx.o
obj-$(CONFIG_I2C_RCAR) += i2c-rcar.o
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 67e8b97c0c95..771e53d3d197 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -16,8 +16,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
-#include <linux/irqchip/chained_irq.h>
-#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index 37443edbf754..dfc534065595 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -402,7 +402,7 @@ static const struct i2c_adapter_quirks bcm2835_i2c_quirks = {
static int bcm2835_i2c_probe(struct platform_device *pdev)
{
struct bcm2835_i2c_dev *i2c_dev;
- struct resource *mem, *irq;
+ struct resource *mem;
int ret;
struct i2c_adapter *adap;
struct clk *mclk;
@@ -452,12 +452,9 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
return ret;
}
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq) {
- dev_err(&pdev->dev, "No IRQ resource\n");
- return -ENODEV;
- }
- i2c_dev->irq = irq->start;
+ i2c_dev->irq = platform_get_irq(pdev, 0);
+ if (i2c_dev->irq < 0)
+ return i2c_dev->irq;
ret = request_irq(i2c_dev->irq, bcm2835_i2c_isr, IRQF_SHARED,
dev_name(&pdev->dev), i2c_dev);
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 60a2e750cee9..4b26cba40139 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -191,23 +191,26 @@ struct reset_control;
* @cmd_complete: tx completion indicator
* @clk: input reference clock
* @pclk: clock required to access the registers
+ * @rst: optional reset for the controller
* @slave: represent an I2C slave device
+ * @get_clk_rate_khz: callback to retrieve IP specific bus speed
* @cmd_err: run time hadware error code
* @msgs: points to an array of messages currently being transferred
* @msgs_num: the number of elements in msgs
- * @msg_write_idx: the element index of the current tx message in the msgs
- * array
+ * @msg_write_idx: the element index of the current tx message in the msgs array
* @tx_buf_len: the length of the current tx buffer
* @tx_buf: the current tx buffer
- * @msg_read_idx: the element index of the current rx message in the msgs
- * array
+ * @msg_read_idx: the element index of the current rx message in the msgs array
* @rx_buf_len: the length of the current rx buffer
* @rx_buf: the current rx buffer
* @msg_err: error status of the current transfer
* @status: i2c master status, one of STATUS_*
* @abort_source: copy of the TX_ABRT_SOURCE register
* @irq: interrupt number for the i2c master
+ * @flags: platform specific flags like type of IO accessors or model
* @adapter: i2c subsystem adapter node
+ * @functionality: I2C_FUNC_* ORed bits to reflect what controller does support
+ * @master_cfg: configuration for the master device
* @slave_cfg: configuration for the slave device
* @tx_fifo_depth: depth of the hardware tx fifo
* @rx_fifo_depth: depth of the hardware rx fifo
@@ -228,7 +231,9 @@ struct reset_control;
* @disable: function to disable the controller
* @disable_int: function to disable all interrupts
* @init: function to initialize the I2C hardware
+ * @set_sda_hold_time: callback to retrieve IP specific SDA hold timing
* @mode: operation mode - DW_IC_MASTER or DW_IC_SLAVE
+ * @rinfo: I²C GPIO recovery information
* @suspended: set to true if the controller is suspended
*
* HCNT and LCNT parameters can be used if the platform knows more accurate
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index 9b08bb5df38d..9177463c2cbb 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -701,7 +701,8 @@ static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy);
if (stat & DW_IC_INTR_ACTIVITY)
regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy);
- if (stat & DW_IC_INTR_STOP_DET)
+ if ((stat & DW_IC_INTR_STOP_DET) &&
+ ((dev->rx_outstanding == 0) || (stat & DW_IC_INTR_RX_FULL)))
regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy);
if (stat & DW_IC_INTR_START_DET)
regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy);
@@ -723,6 +724,7 @@ static int i2c_dw_irq_handler_master(struct dw_i2c_dev *dev)
if (stat & DW_IC_INTR_TX_ABRT) {
dev->cmd_err |= DW_IC_ERR_TX_ABRT;
dev->status = STATUS_IDLE;
+ dev->rx_outstanding = 0;
/*
* Anytime TX_ABRT is set, the contents of the tx/rx
@@ -745,7 +747,8 @@ static int i2c_dw_irq_handler_master(struct dw_i2c_dev *dev)
*/
tx_aborted:
- if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
+ if (((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) &&
+ (dev->rx_outstanding == 0))
complete(&dev->cmd_complete);
else if (unlikely(dev->flags & ACCESS_INTR_MASK)) {
/* Workaround to trigger pending interrupt */
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 0f409a4c2da0..ef4250f8852b 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -38,11 +38,18 @@ enum dw_pci_ctl_id_t {
navi_amd,
};
+/*
+ * This is a legacy structure to describe the hardware counters
+ * to configure signal timings on the bus. For Device Tree platforms
+ * one should use the respective properties and for ACPI there is
+ * a set of ACPI methods that provide these counters. No new
+ * platform should use this structure.
+ */
struct dw_scl_sda_cfg {
- u32 ss_hcnt;
- u32 fs_hcnt;
- u32 ss_lcnt;
- u32 fs_lcnt;
+ u16 ss_hcnt;
+ u16 fs_hcnt;
+ u16 ss_lcnt;
+ u16 fs_lcnt;
u32 sda_hold;
};
@@ -206,8 +213,7 @@ static struct dw_pci_controller dw_pci_controllers[] = {
},
};
-#ifdef CONFIG_PM
-static int i2c_dw_pci_suspend(struct device *dev)
+static int __maybe_unused i2c_dw_pci_suspend(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
@@ -217,7 +223,7 @@ static int i2c_dw_pci_suspend(struct device *dev)
return 0;
}
-static int i2c_dw_pci_resume(struct device *dev)
+static int __maybe_unused i2c_dw_pci_resume(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
int ret;
@@ -227,7 +233,6 @@ static int i2c_dw_pci_resume(struct device *dev)
return ret;
}
-#endif
static UNIVERSAL_DEV_PM_OPS(i2c_dw_pm_ops, i2c_dw_pci_suspend,
i2c_dw_pci_resume, NULL);
@@ -241,28 +246,24 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
struct dw_pci_controller *controller;
struct dw_scl_sda_cfg *cfg;
- if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers)) {
- dev_err(&pdev->dev, "%s: invalid driver data %ld\n", __func__,
- id->driver_data);
- return -EINVAL;
- }
+ if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers))
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "Invalid driver data %ld\n",
+ id->driver_data);
controller = &dw_pci_controllers[id->driver_data];
r = pcim_enable_device(pdev);
- if (r) {
- dev_err(&pdev->dev, "Failed to enable I2C PCI device (%d)\n",
- r);
- return r;
- }
+ if (r)
+ return dev_err_probe(&pdev->dev, r,
+ "Failed to enable I2C PCI device\n");
pci_set_master(pdev);
r = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
- if (r) {
- dev_err(&pdev->dev, "I/O memory remapping failed\n");
- return r;
- }
+ if (r)
+ return dev_err_probe(&pdev->dev, r,
+ "I/O memory remapping failed\n");
dev = devm_kzalloc(&pdev->dev, sizeof(struct dw_i2c_dev), GFP_KERNEL);
if (!dev)
@@ -352,9 +353,6 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdev);
}
-/* work with hotplug and coldplug */
-MODULE_ALIAS("i2c_designware-pci");
-
static const struct pci_device_id i2_designware_pci_ids[] = {
/* Medfield */
{ PCI_VDEVICE(INTEL, 0x0817), medfield },
@@ -411,9 +409,10 @@ static struct pci_driver dw_i2c_driver = {
.pm = &i2c_dw_pm_ops,
},
};
-
module_pci_driver(dw_i2c_driver);
+/* Work with hotplug and coldplug */
+MODULE_ALIAS("i2c_designware-pci");
MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
MODULE_DESCRIPTION("Synopsys DesignWare PCI I2C bus adapter");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 21113665ddea..2bd81abc86f6 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -293,6 +293,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
DPM_FLAG_MAY_SKIP_RESUME);
}
+ device_enable_async_suspend(&pdev->dev);
+
/* The code below assumes runtime PM to be disabled. */
WARN_ON(pm_runtime_enabled(&pdev->dev));
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index 97d4f3ac0abd..b812d1090c0f 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -169,6 +169,7 @@
enum i2c_type_exynos {
I2C_TYPE_EXYNOS5,
I2C_TYPE_EXYNOS7,
+ I2C_TYPE_EXYNOSAUTOV9,
};
struct exynos5_i2c {
@@ -181,7 +182,8 @@ struct exynos5_i2c {
unsigned int irq;
void __iomem *regs;
- struct clk *clk;
+ struct clk *clk; /* operating clock */
+ struct clk *pclk; /* bus clock */
struct device *dev;
int state;
@@ -230,6 +232,11 @@ static const struct exynos_hsi2c_variant exynos7_hsi2c_data = {
.hw = I2C_TYPE_EXYNOS7,
};
+static const struct exynos_hsi2c_variant exynosautov9_hsi2c_data = {
+ .fifo_depth = 64,
+ .hw = I2C_TYPE_EXYNOSAUTOV9,
+};
+
static const struct of_device_id exynos5_i2c_match[] = {
{
.compatible = "samsung,exynos5-hsi2c",
@@ -243,6 +250,9 @@ static const struct of_device_id exynos5_i2c_match[] = {
}, {
.compatible = "samsung,exynos7-hsi2c",
.data = &exynos7_hsi2c_data
+ }, {
+ .compatible = "samsung,exynosautov9-hsi2c",
+ .data = &exynosautov9_hsi2c_data
}, {},
};
MODULE_DEVICE_TABLE(of, exynos5_i2c_match);
@@ -282,6 +292,31 @@ static int exynos5_i2c_set_timing(struct exynos5_i2c *i2c, bool hs_timings)
int div, clk_cycle, temp;
/*
+ * In case of HSI2C controllers in ExynosAutoV9:
+ *
+ * FSCL = IPCLK / ((CLK_DIV + 1) * 16)
+ * T_SCL_LOW = IPCLK * (CLK_DIV + 1) * (N + M)
+ * [N : number of 0's in the TSCL_H_HS]
+ * [M : number of 0's in the TSCL_L_HS]
+ * T_SCL_HIGH = IPCLK * (CLK_DIV + 1) * (N + M)
+ * [N : number of 1's in the TSCL_H_HS]
+ * [M : number of 1's in the TSCL_L_HS]
+ *
+ * Result of (N + M) is always 8.
+ * In general case, we don't need to control timing_s1 and timing_s2.
+ */
+ if (i2c->variant->hw == I2C_TYPE_EXYNOSAUTOV9) {
+ div = ((clkin / (16 * i2c->op_clock)) - 1);
+ i2c_timing_s3 = div << 16;
+ if (hs_timings)
+ writel(i2c_timing_s3, i2c->regs + HSI2C_TIMING_HS3);
+ else
+ writel(i2c_timing_s3, i2c->regs + HSI2C_TIMING_FS3);
+
+ return 0;
+ }
+
+ /*
* In case of HSI2C controller in Exynos5 series
* FPCLK / FI2C =
* (CLK_DIV + 1) * (TSCLK_L + TSCLK_H + 2) + 8 + 2 * FLT_CYCLE
@@ -422,7 +457,10 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
writel(int_status, i2c->regs + HSI2C_INT_STATUS);
/* handle interrupt related to the transfer status */
- if (i2c->variant->hw == I2C_TYPE_EXYNOS7) {
+ switch (i2c->variant->hw) {
+ case I2C_TYPE_EXYNOSAUTOV9:
+ fallthrough;
+ case I2C_TYPE_EXYNOS7:
if (int_status & HSI2C_INT_TRANS_DONE) {
i2c->trans_done = 1;
i2c->state = 0;
@@ -443,7 +481,12 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
i2c->state = -ETIMEDOUT;
goto stop;
}
- } else if (int_status & HSI2C_INT_I2C) {
+
+ break;
+ case I2C_TYPE_EXYNOS5:
+ if (!(int_status & HSI2C_INT_I2C))
+ break;
+
trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS);
if (trans_status & HSI2C_NO_DEV_ACK) {
dev_dbg(i2c->dev, "No ACK from device\n");
@@ -465,6 +508,8 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
i2c->trans_done = 1;
i2c->state = 0;
}
+
+ break;
}
if ((i2c->msg->flags & I2C_M_RD) && (int_status &
@@ -569,13 +614,13 @@ static void exynos5_i2c_bus_check(struct exynos5_i2c *i2c)
{
unsigned long timeout;
- if (i2c->variant->hw != I2C_TYPE_EXYNOS7)
+ if (i2c->variant->hw == I2C_TYPE_EXYNOS5)
return;
/*
- * HSI2C_MASTER_ST_LOSE state in EXYNOS7 variant before transaction
- * indicates that bus is stuck (SDA is low). In such case bus recovery
- * can be performed.
+ * HSI2C_MASTER_ST_LOSE state (in Exynos7 and ExynosAutoV9 variants)
+ * before transaction indicates that bus is stuck (SDA is low).
+ * In such case bus recovery can be performed.
*/
timeout = jiffies + msecs_to_jiffies(100);
for (;;) {
@@ -611,10 +656,10 @@ static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop)
unsigned long flags;
unsigned short trig_lvl;
- if (i2c->variant->hw == I2C_TYPE_EXYNOS7)
- int_en |= HSI2C_INT_I2C_TRANS;
- else
+ if (i2c->variant->hw == I2C_TYPE_EXYNOS5)
int_en |= HSI2C_INT_I2C;
+ else
+ int_en |= HSI2C_INT_I2C_TRANS;
i2c_ctl = readl(i2c->regs + HSI2C_CTL);
i2c_ctl &= ~(HSI2C_TXCHON | HSI2C_RXCHON);
@@ -713,10 +758,14 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
struct exynos5_i2c *i2c = adap->algo_data;
int i, ret;
- ret = clk_enable(i2c->clk);
+ ret = clk_enable(i2c->pclk);
if (ret)
return ret;
+ ret = clk_enable(i2c->clk);
+ if (ret)
+ goto err_pclk;
+
for (i = 0; i < num; ++i) {
ret = exynos5_i2c_xfer_msg(i2c, msgs + i, i + 1 == num);
if (ret)
@@ -724,6 +773,8 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
}
clk_disable(i2c->clk);
+err_pclk:
+ clk_disable(i2c->pclk);
return ret ?: num;
}
@@ -763,10 +814,20 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
return -ENOENT;
}
- ret = clk_prepare_enable(i2c->clk);
+ i2c->pclk = devm_clk_get_optional(&pdev->dev, "hsi2c_pclk");
+ if (IS_ERR(i2c->pclk)) {
+ return dev_err_probe(&pdev->dev, PTR_ERR(i2c->pclk),
+ "cannot get pclk");
+ }
+
+ ret = clk_prepare_enable(i2c->pclk);
if (ret)
return ret;
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ goto err_pclk;
+
i2c->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(i2c->regs)) {
ret = PTR_ERR(i2c->regs);
@@ -809,11 +870,15 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, i2c);
clk_disable(i2c->clk);
+ clk_disable(i2c->pclk);
return 0;
err_clk:
clk_disable_unprepare(i2c->clk);
+
+ err_pclk:
+ clk_disable_unprepare(i2c->pclk);
return ret;
}
@@ -824,6 +889,7 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c->adap);
clk_unprepare(i2c->clk);
+ clk_unprepare(i2c->pclk);
return 0;
}
@@ -835,6 +901,7 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
i2c_mark_adapter_suspended(&i2c->adap);
clk_unprepare(i2c->clk);
+ clk_unprepare(i2c->pclk);
return 0;
}
@@ -844,21 +911,30 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
struct exynos5_i2c *i2c = dev_get_drvdata(dev);
int ret = 0;
- ret = clk_prepare_enable(i2c->clk);
+ ret = clk_prepare_enable(i2c->pclk);
if (ret)
return ret;
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ goto err_pclk;
+
ret = exynos5_hsi2c_clock_setup(i2c);
- if (ret) {
- clk_disable_unprepare(i2c->clk);
- return ret;
- }
+ if (ret)
+ goto err_clk;
exynos5_i2c_init(i2c);
clk_disable(i2c->clk);
+ clk_disable(i2c->pclk);
i2c_mark_adapter_resumed(&i2c->adap);
return 0;
+
+err_clk:
+ clk_disable_unprepare(i2c->clk);
+err_pclk:
+ clk_disable_unprepare(i2c->pclk);
+ return ret;
}
#endif
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 41446f9cc52d..7428cc6af5cc 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -328,22 +328,14 @@ static int i801_check_pre(struct i801_priv *priv)
status = inb_p(SMBHSTSTS(priv));
if (status & SMBHSTSTS_HOST_BUSY) {
- dev_err(&priv->pci_dev->dev, "SMBus is busy, can't use it!\n");
+ pci_err(priv->pci_dev, "SMBus is busy, can't use it!\n");
return -EBUSY;
}
status &= STATUS_FLAGS;
if (status) {
- dev_dbg(&priv->pci_dev->dev, "Clearing status flags (%02x)\n",
- status);
+ pci_dbg(priv->pci_dev, "Clearing status flags (%02x)\n", status);
outb_p(status, SMBHSTSTS(priv));
- status = inb_p(SMBHSTSTS(priv)) & STATUS_FLAGS;
- if (status) {
- dev_err(&priv->pci_dev->dev,
- "Failed clearing status flags (%02x)\n",
- status);
- return -EBUSY;
- }
}
/*
@@ -356,27 +348,14 @@ static int i801_check_pre(struct i801_priv *priv)
if (priv->features & FEATURE_SMBUS_PEC) {
status = inb_p(SMBAUXSTS(priv)) & SMBAUXSTS_CRCE;
if (status) {
- dev_dbg(&priv->pci_dev->dev,
- "Clearing aux status flags (%02x)\n", status);
+ pci_dbg(priv->pci_dev, "Clearing aux status flags (%02x)\n", status);
outb_p(status, SMBAUXSTS(priv));
- status = inb_p(SMBAUXSTS(priv)) & SMBAUXSTS_CRCE;
- if (status) {
- dev_err(&priv->pci_dev->dev,
- "Failed clearing aux status flags (%02x)\n",
- status);
- return -EBUSY;
- }
}
}
return 0;
}
-/*
- * Convert the status register to an error code, and clear it.
- * Note that status only contains the bits we want to clear, not the
- * actual register value.
- */
static int i801_check_post(struct i801_priv *priv, int status)
{
int result = 0;
@@ -401,7 +380,6 @@ static int i801_check_post(struct i801_priv *priv, int status)
!(status & SMBHSTSTS_FAILED))
dev_err(&priv->pci_dev->dev,
"Failed terminating the transaction\n");
- outb_p(STATUS_FLAGS, SMBHSTSTS(priv));
return -ETIMEDOUT;
}
@@ -440,9 +418,6 @@ static int i801_check_post(struct i801_priv *priv, int status)
dev_dbg(&priv->pci_dev->dev, "Lost arbitration\n");
}
- /* Clear status flags except BYTE_DONE, to be cleared by caller */
- outb_p(status, SMBHSTSTS(priv));
-
return result;
}
@@ -523,9 +498,11 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
return -EOPNOTSUPP;
}
+ /* Set block buffer mode */
+ outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv));
+
inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
- /* Use 32-byte buffer to process this transaction */
if (read_write == I2C_SMBUS_WRITE) {
len = data->block[0];
outb_p(len, SMBHSTDAT0(priv));
@@ -760,14 +737,6 @@ exit:
return i801_check_post(priv, status);
}
-static int i801_set_block_buffer_mode(struct i801_priv *priv)
-{
- outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv));
- if ((inb_p(SMBAUXCTL(priv)) & SMBAUXCTL_E32B) == 0)
- return -EIO;
- return 0;
-}
-
/* Block transaction function */
static int i801_block_transaction(struct i801_priv *priv, union i2c_smbus_data *data,
char read_write, int command)
@@ -775,6 +744,11 @@ static int i801_block_transaction(struct i801_priv *priv, union i2c_smbus_data *
int result = 0;
unsigned char hostc;
+ if (read_write == I2C_SMBUS_READ && command == I2C_SMBUS_BLOCK_DATA)
+ data->block[0] = I2C_SMBUS_BLOCK_MAX;
+ else if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX)
+ return -EPROTO;
+
if (command == I2C_SMBUS_I2C_BLOCK_DATA) {
if (read_write == I2C_SMBUS_WRITE) {
/* set I2C_EN bit in configuration register */
@@ -788,22 +762,11 @@ static int i801_block_transaction(struct i801_priv *priv, union i2c_smbus_data *
}
}
- if (read_write == I2C_SMBUS_WRITE
- || command == I2C_SMBUS_I2C_BLOCK_DATA) {
- if (data->block[0] < 1)
- data->block[0] = 1;
- if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
- data->block[0] = I2C_SMBUS_BLOCK_MAX;
- } else {
- data->block[0] = 32; /* max for SMBus block reads */
- }
-
/* Experience has shown that the block buffer can only be used for
SMBus (not I2C) block transactions, even though the datasheet
doesn't mention this limitation. */
- if ((priv->features & FEATURE_BLOCK_BUFFER)
- && command != I2C_SMBUS_I2C_BLOCK_DATA
- && i801_set_block_buffer_mode(priv) == 0)
+ if ((priv->features & FEATURE_BLOCK_BUFFER) &&
+ command != I2C_SMBUS_I2C_BLOCK_DATA)
result = i801_block_transaction_by_block(priv, data,
read_write,
command);
@@ -951,8 +914,11 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
}
out:
- /* Unlock the SMBus device for use by BIOS/ACPI */
- outb_p(SMBHSTSTS_INUSE_STS, SMBHSTSTS(priv));
+ /*
+ * Unlock the SMBus device for use by BIOS/ACPI,
+ * and clear status flags if not done already.
+ */
+ outb_p(SMBHSTSTS_INUSE_STS | STATUS_FLAGS, SMBHSTSTS(priv));
pm_runtime_mark_last_busy(&priv->pci_dev->dev);
pm_runtime_put_autosuspend(&priv->pci_dev->dev);
@@ -1009,66 +975,72 @@ static const struct i2c_algorithm smbus_algorithm = {
.functionality = i801_func,
};
+#define FEATURES_ICH5 (FEATURE_BLOCK_PROC | FEATURE_I2C_BLOCK_READ | \
+ FEATURE_IRQ | FEATURE_SMBUS_PEC | \
+ FEATURE_BLOCK_BUFFER | FEATURE_HOST_NOTIFY)
+#define FEATURES_ICH4 (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER | \
+ FEATURE_HOST_NOTIFY)
+
static const struct pci_device_id i801_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_3) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_3) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_2) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_3) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_3) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_3) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_4) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_16) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_17) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_17) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_5) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_6) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EP80579_1) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GEMINILAKE_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CDF_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EBG_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_N_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_H_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS) },
+ { PCI_DEVICE_DATA(INTEL, 82801AA_3, 0) },
+ { PCI_DEVICE_DATA(INTEL, 82801AB_3, 0) },
+ { PCI_DEVICE_DATA(INTEL, 82801BA_2, 0) },
+ { PCI_DEVICE_DATA(INTEL, 82801CA_3, FEATURE_HOST_NOTIFY) },
+ { PCI_DEVICE_DATA(INTEL, 82801DB_3, FEATURES_ICH4) },
+ { PCI_DEVICE_DATA(INTEL, 82801EB_3, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ESB_4, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ICH6_16, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ICH7_17, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ESB2_17, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ICH8_5, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ICH9_6, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, EP80579_1, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ICH10_4, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ICH10_5, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, 5_3400_SERIES_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, COUGARPOINT_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS_IDF0, FEATURES_ICH5 | FEATURE_IDF) },
+ { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS_IDF1, FEATURES_ICH5 | FEATURE_IDF) },
+ { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS_IDF2, FEATURES_ICH5 | FEATURE_IDF) },
+ { PCI_DEVICE_DATA(INTEL, DH89XXCC_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, PANTHERPOINT_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, LYNXPOINT_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, LYNXPOINT_LP_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, AVOTON_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS_MS0, FEATURES_ICH5 | FEATURE_IDF) },
+ { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS_MS1, FEATURES_ICH5 | FEATURE_IDF) },
+ { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS_MS2, FEATURES_ICH5 | FEATURE_IDF) },
+ { PCI_DEVICE_DATA(INTEL, COLETOCREEK_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, GEMINILAKE_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, WILDCATPOINT_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, WILDCATPOINT_LP_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, BAYTRAIL_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, BRASWELL_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, SUNRISEPOINT_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
+ { PCI_DEVICE_DATA(INTEL, SUNRISEPOINT_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
+ { PCI_DEVICE_DATA(INTEL, CDF_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, DNV_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
+ { PCI_DEVICE_DATA(INTEL, EBG_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, BROXTON_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, LEWISBURG_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
+ { PCI_DEVICE_DATA(INTEL, LEWISBURG_SSKU_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
+ { PCI_DEVICE_DATA(INTEL, KABYLAKE_PCH_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
+ { PCI_DEVICE_DATA(INTEL, CANNONLAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, CANNONLAKE_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, ICELAKE_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, ICELAKE_N_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, COMETLAKE_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, COMETLAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, COMETLAKE_V_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
+ { PCI_DEVICE_DATA(INTEL, ELKHART_LAKE_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, TIGERLAKE_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, TIGERLAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, JASPER_LAKE_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, ALDER_LAKE_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, ALDER_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, ALDER_LAKE_M_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ 0, }
};
@@ -1493,15 +1465,14 @@ static inline unsigned int i801_get_adapter_class(struct i801_priv *priv)
}
#endif
-static const struct itco_wdt_platform_data spt_tco_platform_data = {
- .name = "Intel PCH",
- .version = 4,
-};
-
static struct platform_device *
i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
struct resource *tco_res)
{
+ static const struct itco_wdt_platform_data pldata = {
+ .name = "Intel PCH",
+ .version = 4,
+ };
struct resource *res;
unsigned int devfn;
u64 base64_addr;
@@ -1544,22 +1515,20 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
res->flags = IORESOURCE_MEM;
return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
- tco_res, 2, &spt_tco_platform_data,
- sizeof(spt_tco_platform_data));
+ tco_res, 2, &pldata, sizeof(pldata));
}
-static const struct itco_wdt_platform_data cnl_tco_platform_data = {
- .name = "Intel PCH",
- .version = 6,
-};
-
static struct platform_device *
i801_add_tco_cnl(struct i801_priv *priv, struct pci_dev *pci_dev,
struct resource *tco_res)
{
- return platform_device_register_resndata(&pci_dev->dev,
- "iTCO_wdt", -1, tco_res, 1, &cnl_tco_platform_data,
- sizeof(cnl_tco_platform_data));
+ static const struct itco_wdt_platform_data pldata = {
+ .name = "Intel PCH",
+ .version = 6,
+ };
+
+ return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
+ tco_res, 1, &pldata, sizeof(pldata));
}
static void i801_add_tco(struct i801_priv *priv)
@@ -1697,72 +1666,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
mutex_init(&priv->acpi_lock);
priv->pci_dev = dev;
- switch (dev->device) {
- case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS:
- case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS:
- case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
- case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
- case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
- case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
- case PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS:
- priv->features |= FEATURE_BLOCK_PROC;
- priv->features |= FEATURE_I2C_BLOCK_READ;
- priv->features |= FEATURE_IRQ;
- priv->features |= FEATURE_SMBUS_PEC;
- priv->features |= FEATURE_BLOCK_BUFFER;
- priv->features |= FEATURE_TCO_SPT;
- priv->features |= FEATURE_HOST_NOTIFY;
- break;
-
- case PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS:
- case PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS:
- case PCI_DEVICE_ID_INTEL_CDF_SMBUS:
- case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS:
- case PCI_DEVICE_ID_INTEL_ICELAKE_N_SMBUS:
- case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS:
- case PCI_DEVICE_ID_INTEL_COMETLAKE_H_SMBUS:
- case PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS:
- case PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS:
- case PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS:
- case PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS:
- case PCI_DEVICE_ID_INTEL_EBG_SMBUS:
- case PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS:
- case PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS:
- case PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS:
- priv->features |= FEATURE_BLOCK_PROC;
- priv->features |= FEATURE_I2C_BLOCK_READ;
- priv->features |= FEATURE_IRQ;
- priv->features |= FEATURE_SMBUS_PEC;
- priv->features |= FEATURE_BLOCK_BUFFER;
- priv->features |= FEATURE_TCO_CNL;
- priv->features |= FEATURE_HOST_NOTIFY;
- break;
-
- case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0:
- case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1:
- case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2:
- case PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0:
- case PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1:
- case PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2:
- priv->features |= FEATURE_IDF;
- fallthrough;
- default:
- priv->features |= FEATURE_BLOCK_PROC;
- priv->features |= FEATURE_I2C_BLOCK_READ;
- priv->features |= FEATURE_IRQ;
- fallthrough;
- case PCI_DEVICE_ID_INTEL_82801DB_3:
- priv->features |= FEATURE_SMBUS_PEC;
- priv->features |= FEATURE_BLOCK_BUFFER;
- fallthrough;
- case PCI_DEVICE_ID_INTEL_82801CA_3:
- priv->features |= FEATURE_HOST_NOTIFY;
- fallthrough;
- case PCI_DEVICE_ID_INTEL_82801BA_2:
- case PCI_DEVICE_ID_INTEL_82801AB_3:
- case PCI_DEVICE_ID_INTEL_82801AA_3:
- break;
- }
+ priv->features = id->driver_data;
/* Disable features on user request */
for (i = 0; i < ARRAY_SIZE(i801_feature_names); i++) {
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 3576b63a6c03..27f969b3dc07 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -37,6 +37,8 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/hrtimer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -51,6 +53,8 @@
/* This will be the driver name the kernel reports */
#define DRIVER_NAME "imx-i2c"
+#define I2C_IMX_CHECK_DELAY 30000 /* Time to check for bus idle, in NS */
+
/*
* Enable DMA if transfer byte size is bigger than this threshold.
* As the hardware request, it must bigger than 4 bytes.\
@@ -210,6 +214,10 @@ struct imx_i2c_struct {
struct imx_i2c_dma *dma;
struct i2c_client *slave;
enum i2c_slave_event last_slave_event;
+
+ /* For checking slave events. */
+ spinlock_t slave_lock;
+ struct hrtimer slave_timer;
};
static const struct imx_i2c_hwdata imx1_i2c_hwdata = {
@@ -680,7 +688,7 @@ static void i2c_imx_slave_event(struct imx_i2c_struct *i2c_imx,
static void i2c_imx_slave_finish_op(struct imx_i2c_struct *i2c_imx)
{
- u8 val;
+ u8 val = 0;
while (i2c_imx->last_slave_event != I2C_SLAVE_STOP) {
switch (i2c_imx->last_slave_event) {
@@ -701,10 +709,11 @@ static void i2c_imx_slave_finish_op(struct imx_i2c_struct *i2c_imx)
}
}
-static irqreturn_t i2c_imx_slave_isr(struct imx_i2c_struct *i2c_imx,
- unsigned int status, unsigned int ctl)
+/* Returns true if the timer should be restarted, false if not. */
+static irqreturn_t i2c_imx_slave_handle(struct imx_i2c_struct *i2c_imx,
+ unsigned int status, unsigned int ctl)
{
- u8 value;
+ u8 value = 0;
if (status & I2SR_IAL) { /* Arbitration lost */
i2c_imx_clear_irq(i2c_imx, I2SR_IAL);
@@ -712,6 +721,16 @@ static irqreturn_t i2c_imx_slave_isr(struct imx_i2c_struct *i2c_imx,
return IRQ_HANDLED;
}
+ if (!(status & I2SR_IBB)) {
+ /* No master on the bus, that could mean a stop condition. */
+ i2c_imx_slave_finish_op(i2c_imx);
+ return IRQ_HANDLED;
+ }
+
+ if (!(status & I2SR_ICF))
+ /* Data transfer still in progress, ignore this. */
+ goto out;
+
if (status & I2SR_IAAS) { /* Addressed as a slave */
i2c_imx_slave_finish_op(i2c_imx);
if (status & I2SR_SRW) { /* Master wants to read from us*/
@@ -737,16 +756,9 @@ static irqreturn_t i2c_imx_slave_isr(struct imx_i2c_struct *i2c_imx,
imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
}
} else if (!(ctl & I2CR_MTX)) { /* Receive mode */
- if (status & I2SR_IBB) { /* No STOP signal detected */
- value = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
- i2c_imx_slave_event(i2c_imx,
- I2C_SLAVE_WRITE_RECEIVED, &value);
- } else { /* STOP signal is detected */
- dev_dbg(&i2c_imx->adapter.dev,
- "STOP signal detected");
- i2c_imx_slave_event(i2c_imx,
- I2C_SLAVE_STOP, &value);
- }
+ value = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
+ i2c_imx_slave_event(i2c_imx,
+ I2C_SLAVE_WRITE_RECEIVED, &value);
} else if (!(status & I2SR_RXAK)) { /* Transmit mode received ACK */
ctl |= I2CR_MTX;
imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR);
@@ -755,15 +767,43 @@ static irqreturn_t i2c_imx_slave_isr(struct imx_i2c_struct *i2c_imx,
I2C_SLAVE_READ_PROCESSED, &value);
imx_i2c_write_reg(value, i2c_imx, IMX_I2C_I2DR);
- } else { /* Transmit mode received NAK */
+ } else { /* Transmit mode received NAK, operation is done */
ctl &= ~I2CR_MTX;
imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR);
imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
+ i2c_imx_slave_finish_op(i2c_imx);
+ return IRQ_HANDLED;
}
+out:
+ /*
+ * No need to check the return value here. If it returns 0 or
+ * 1, then everything is fine. If it returns -1, then the
+ * timer is running in the handler. This will still work,
+ * though it may be redone (or already have been done) by the
+ * timer function.
+ */
+ hrtimer_try_to_cancel(&i2c_imx->slave_timer);
+ hrtimer_forward_now(&i2c_imx->slave_timer, I2C_IMX_CHECK_DELAY);
+ hrtimer_restart(&i2c_imx->slave_timer);
return IRQ_HANDLED;
}
+static enum hrtimer_restart i2c_imx_slave_timeout(struct hrtimer *t)
+{
+ struct imx_i2c_struct *i2c_imx = container_of(t, struct imx_i2c_struct,
+ slave_timer);
+ unsigned int ctl, status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i2c_imx->slave_lock, flags);
+ status = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
+ ctl = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ i2c_imx_slave_handle(i2c_imx, status, ctl);
+ spin_unlock_irqrestore(&i2c_imx->slave_lock, flags);
+ return HRTIMER_NORESTART;
+}
+
static void i2c_imx_slave_init(struct imx_i2c_struct *i2c_imx)
{
int temp;
@@ -843,7 +883,9 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id)
{
struct imx_i2c_struct *i2c_imx = dev_id;
unsigned int ctl, status;
+ unsigned long flags;
+ spin_lock_irqsave(&i2c_imx->slave_lock, flags);
status = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
ctl = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
@@ -851,14 +893,20 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id)
i2c_imx_clear_irq(i2c_imx, I2SR_IIF);
if (i2c_imx->slave) {
if (!(ctl & I2CR_MSTA)) {
- return i2c_imx_slave_isr(i2c_imx, status, ctl);
- } else if (i2c_imx->last_slave_event !=
- I2C_SLAVE_STOP) {
- i2c_imx_slave_finish_op(i2c_imx);
+ irqreturn_t ret;
+
+ ret = i2c_imx_slave_handle(i2c_imx,
+ status, ctl);
+ spin_unlock_irqrestore(&i2c_imx->slave_lock,
+ flags);
+ return ret;
}
+ i2c_imx_slave_finish_op(i2c_imx);
}
+ spin_unlock_irqrestore(&i2c_imx->slave_lock, flags);
return i2c_imx_master_isr(i2c_imx, status);
}
+ spin_unlock_irqrestore(&i2c_imx->slave_lock, flags);
return IRQ_NONE;
}
@@ -1378,6 +1426,10 @@ static int i2c_imx_probe(struct platform_device *pdev)
if (!i2c_imx)
return -ENOMEM;
+ spin_lock_init(&i2c_imx->slave_lock);
+ hrtimer_init(&i2c_imx->slave_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ i2c_imx->slave_timer.function = i2c_imx_slave_timeout;
+
match = device_get_match_data(&pdev->dev);
if (match)
i2c_imx->hwdata = match;
@@ -1491,6 +1543,8 @@ static int i2c_imx_remove(struct platform_device *pdev)
if (ret < 0)
return ret;
+ hrtimer_cancel(&i2c_imx->slave_timer);
+
/* remove adapter */
dev_dbg(&i2c_imx->adapter.dev, "adapter removed\n");
i2c_del_adapter(&i2c_imx->adapter);
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index db26cc36e13f..6c698c10d3cd 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -119,23 +119,30 @@ static inline void writeccr(struct mpc_i2c *i2c, u32 x)
/* Sometimes 9th clock pulse isn't generated, and slave doesn't release
* the bus, because it wants to send ACK.
* Following sequence of enabling/disabling and sending start/stop generates
- * the 9 pulses, so it's all OK.
+ * the 9 pulses, each with a START then ending with STOP, so it's all OK.
*/
static void mpc_i2c_fixup(struct mpc_i2c *i2c)
{
int k;
- u32 delay_val = 1000000 / i2c->real_clk + 1;
-
- if (delay_val < 2)
- delay_val = 2;
+ unsigned long flags;
for (k = 9; k; k--) {
writeccr(i2c, 0);
- writeccr(i2c, CCR_MSTA | CCR_MTX | CCR_MEN);
+ writeb(0, i2c->base + MPC_I2C_SR); /* clear any status bits */
+ writeccr(i2c, CCR_MEN | CCR_MSTA); /* START */
+ readb(i2c->base + MPC_I2C_DR); /* init xfer */
+ udelay(15); /* let it hit the bus */
+ local_irq_save(flags); /* should not be delayed further */
+ writeccr(i2c, CCR_MEN | CCR_MSTA | CCR_RSTA); /* delay SDA */
readb(i2c->base + MPC_I2C_DR);
- writeccr(i2c, CCR_MEN);
- udelay(delay_val << 1);
+ if (k != 1)
+ udelay(5);
+ local_irq_restore(flags);
}
+ writeccr(i2c, CCR_MEN); /* Initiate STOP */
+ readb(i2c->base + MPC_I2C_DR);
+ udelay(15); /* Let STOP propagate */
+ writeccr(i2c, 0);
}
static int i2c_mpc_wait_sr(struct mpc_i2c *i2c, int mask)
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index fc13511f4562..f71c730f9838 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -367,11 +367,15 @@ static void rcar_i2c_next_msg(struct rcar_i2c_priv *priv)
rcar_i2c_prepare_msg(priv);
}
-static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
+static void rcar_i2c_cleanup_dma(struct rcar_i2c_priv *priv, bool terminate)
{
struct dma_chan *chan = priv->dma_direction == DMA_FROM_DEVICE
? priv->dma_rx : priv->dma_tx;
+ /* only allowed from thread context! */
+ if (terminate)
+ dmaengine_terminate_sync(chan);
+
dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
sg_dma_len(&priv->sg), priv->dma_direction);
@@ -386,25 +390,13 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
rcar_i2c_write(priv, ICDMAER, 0);
}
-static void rcar_i2c_cleanup_dma(struct rcar_i2c_priv *priv)
-{
- if (priv->dma_direction == DMA_NONE)
- return;
- else if (priv->dma_direction == DMA_FROM_DEVICE)
- dmaengine_terminate_all(priv->dma_rx);
- else if (priv->dma_direction == DMA_TO_DEVICE)
- dmaengine_terminate_all(priv->dma_tx);
-
- rcar_i2c_dma_unmap(priv);
-}
-
static void rcar_i2c_dma_callback(void *data)
{
struct rcar_i2c_priv *priv = data;
priv->pos += sg_dma_len(&priv->sg);
- rcar_i2c_dma_unmap(priv);
+ rcar_i2c_cleanup_dma(priv, false);
}
static bool rcar_i2c_dma(struct rcar_i2c_priv *priv)
@@ -456,7 +448,7 @@ static bool rcar_i2c_dma(struct rcar_i2c_priv *priv)
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!txdesc) {
dev_dbg(dev, "dma prep slave sg failed, using PIO\n");
- rcar_i2c_cleanup_dma(priv);
+ rcar_i2c_cleanup_dma(priv, false);
return false;
}
@@ -466,7 +458,7 @@ static bool rcar_i2c_dma(struct rcar_i2c_priv *priv)
cookie = dmaengine_submit(txdesc);
if (dma_submit_error(cookie)) {
dev_dbg(dev, "submitting dma failed, using PIO\n");
- rcar_i2c_cleanup_dma(priv);
+ rcar_i2c_cleanup_dma(priv, false);
return false;
}
@@ -846,7 +838,7 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
/* cleanup DMA if it couldn't complete properly due to an error */
if (priv->dma_direction != DMA_NONE)
- rcar_i2c_cleanup_dma(priv);
+ rcar_i2c_cleanup_dma(priv, true);
if (!time_left) {
rcar_i2c_init(priv);
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index 78b84445ee6a..8dfd27dc6149 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -433,12 +433,12 @@ static int riic_i2c_probe(struct platform_device *pdev)
}
for (i = 0; i < ARRAY_SIZE(riic_irqs); i++) {
- res = platform_get_resource(pdev, IORESOURCE_IRQ, riic_irqs[i].res_num);
- if (!res)
- return -ENODEV;
+ ret = platform_get_irq(pdev, riic_irqs[i].res_num);
+ if (ret < 0)
+ return ret;
- ret = devm_request_irq(&pdev->dev, res->start, riic_irqs[i].isr,
- 0, riic_irqs[i].name, riic);
+ ret = devm_request_irq(&pdev->dev, ret, riic_irqs[i].isr,
+ 0, riic_irqs[i].name, riic);
if (ret) {
dev_err(&pdev->dev, "failed to request irq %s\n", riic_irqs[i].name);
return ret;
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 02ddb237f69a..989040a73626 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -1338,8 +1338,15 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
goto err_pclk;
}
+ ret = clk_enable(i2c->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Can't enable bus clk: %d\n", ret);
+ goto err_clk_notifier;
+ }
+
clk_rate = clk_get_rate(i2c->clk);
rk3x_i2c_adapt_div(i2c, clk_rate);
+ clk_disable(i2c->clk);
ret = i2c_add_adapter(&i2c->adap);
if (ret < 0)
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index db8fa4186814..72f024a0c363 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -442,34 +442,26 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void sh_mobile_i2c_dma_unmap(struct sh_mobile_i2c_data *pd)
+static void sh_mobile_i2c_cleanup_dma(struct sh_mobile_i2c_data *pd, bool terminate)
{
struct dma_chan *chan = pd->dma_direction == DMA_FROM_DEVICE
? pd->dma_rx : pd->dma_tx;
+ /* only allowed from thread context! */
+ if (terminate)
+ dmaengine_terminate_sync(chan);
+
dma_unmap_single(chan->device->dev, sg_dma_address(&pd->sg),
pd->msg->len, pd->dma_direction);
pd->dma_direction = DMA_NONE;
}
-static void sh_mobile_i2c_cleanup_dma(struct sh_mobile_i2c_data *pd)
-{
- if (pd->dma_direction == DMA_NONE)
- return;
- else if (pd->dma_direction == DMA_FROM_DEVICE)
- dmaengine_terminate_sync(pd->dma_rx);
- else if (pd->dma_direction == DMA_TO_DEVICE)
- dmaengine_terminate_sync(pd->dma_tx);
-
- sh_mobile_i2c_dma_unmap(pd);
-}
-
static void sh_mobile_i2c_dma_callback(void *data)
{
struct sh_mobile_i2c_data *pd = data;
- sh_mobile_i2c_dma_unmap(pd);
+ sh_mobile_i2c_cleanup_dma(pd, false);
pd->pos = pd->msg->len;
pd->stop_after_dma = true;
@@ -549,7 +541,7 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!txdesc) {
dev_dbg(pd->dev, "dma prep slave sg failed, using PIO\n");
- sh_mobile_i2c_cleanup_dma(pd);
+ sh_mobile_i2c_cleanup_dma(pd, false);
return;
}
@@ -559,7 +551,7 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
cookie = dmaengine_submit(txdesc);
if (dma_submit_error(cookie)) {
dev_dbg(pd->dev, "submitting dma failed, using PIO\n");
- sh_mobile_i2c_cleanup_dma(pd);
+ sh_mobile_i2c_cleanup_dma(pd, false);
return;
}
@@ -698,7 +690,7 @@ static int sh_mobile_xfer(struct sh_mobile_i2c_data *pd,
if (!time_left) {
dev_err(pd->dev, "Transfer request timed out\n");
if (pd->dma_direction != DMA_NONE)
- sh_mobile_i2c_cleanup_dma(pd);
+ sh_mobile_i2c_cleanup_dma(pd, true);
err = -ETIMEDOUT;
break;
@@ -838,20 +830,38 @@ static void sh_mobile_i2c_release_dma(struct sh_mobile_i2c_data *pd)
static int sh_mobile_i2c_hook_irqs(struct platform_device *dev, struct sh_mobile_i2c_data *pd)
{
- struct resource *res;
- resource_size_t n;
+ struct device_node *np = dev_of_node(&dev->dev);
int k = 0, ret;
- while ((res = platform_get_resource(dev, IORESOURCE_IRQ, k))) {
- for (n = res->start; n <= res->end; n++) {
- ret = devm_request_irq(&dev->dev, n, sh_mobile_i2c_isr,
- 0, dev_name(&dev->dev), pd);
+ if (np) {
+ int irq;
+
+ while ((irq = platform_get_irq_optional(dev, k)) != -ENXIO) {
+ if (irq < 0)
+ return irq;
+ ret = devm_request_irq(&dev->dev, irq, sh_mobile_i2c_isr,
+ 0, dev_name(&dev->dev), pd);
if (ret) {
- dev_err(&dev->dev, "cannot request IRQ %pa\n", &n);
+ dev_err(&dev->dev, "cannot request IRQ %d\n", irq);
return ret;
}
+ k++;
+ }
+ } else {
+ struct resource *res;
+ resource_size_t n;
+
+ while ((res = platform_get_resource(dev, IORESOURCE_IRQ, k))) {
+ for (n = res->start; n <= res->end; n++) {
+ ret = devm_request_irq(&dev->dev, n, sh_mobile_i2c_isr,
+ 0, dev_name(&dev->dev), pd);
+ if (ret) {
+ dev_err(&dev->dev, "cannot request IRQ %pa\n", &n);
+ return ret;
+ }
+ }
+ k++;
}
- k++;
}
return k > 0 ? 0 : -ENOENT;
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 66145d2b9b55..6d4aa64b195d 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -828,18 +828,14 @@ static void stm32f7_i2c_smbus_reload(struct stm32f7_i2c_dev *i2c_dev)
writel_relaxed(cr2, i2c_dev->base + STM32F7_I2C_CR2);
}
-static int stm32f7_i2c_release_bus(struct i2c_adapter *i2c_adap)
+static void stm32f7_i2c_release_bus(struct i2c_adapter *i2c_adap)
{
struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap);
- dev_info(i2c_dev->dev, "Trying to recover bus\n");
-
stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_PE);
stm32f7_i2c_hw_config(i2c_dev);
-
- return 0;
}
static int stm32f7_i2c_wait_free_bus(struct stm32f7_i2c_dev *i2c_dev)
@@ -854,13 +850,7 @@ static int stm32f7_i2c_wait_free_bus(struct stm32f7_i2c_dev *i2c_dev)
if (!ret)
return 0;
- dev_info(i2c_dev->dev, "bus busy\n");
-
- ret = stm32f7_i2c_release_bus(&i2c_dev->adap);
- if (ret) {
- dev_err(i2c_dev->dev, "Failed to recover the bus (%d)\n", ret);
- return ret;
- }
+ stm32f7_i2c_release_bus(&i2c_dev->adap);
return -EBUSY;
}
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index b3184c422826..03cea102ab76 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -6,6 +6,7 @@
* Author: Colin Cross <ccross@android.com>
*/
+#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -245,7 +246,7 @@ struct tegra_i2c_hw_feature {
* @msg_buf: pointer to current message data
* @msg_buf_remaining: size of unsent data in the message buffer
* @msg_read: indicates that the transfer is a read access
- * @bus_clk_rate: current I2C bus clock rate
+ * @timings: i2c timings information like bus frequency
* @multimaster_mode: indicates that I2C controller is in multi-master mode
* @tx_dma_chan: DMA transmit channel
* @rx_dma_chan: DMA receive channel
@@ -272,7 +273,7 @@ struct tegra_i2c_dev {
unsigned int nclocks;
struct clk *div_clk;
- u32 bus_clk_rate;
+ struct i2c_timings timings;
struct completion msg_complete;
size_t msg_buf_remaining;
@@ -608,6 +609,8 @@ static int tegra_i2c_wait_for_config_load(struct tegra_i2c_dev *i2c_dev)
static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
{
u32 val, clk_divisor, clk_multiplier, tsu_thd, tlow, thigh, non_hs_mode;
+ acpi_handle handle = ACPI_HANDLE(i2c_dev->dev);
+ struct i2c_timings *t = &i2c_dev->timings;
int err;
/*
@@ -618,7 +621,11 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
* emit a noisy warning on error, which won't stay unnoticed and
* won't hose machine entirely.
*/
- err = reset_control_reset(i2c_dev->rst);
+ if (handle)
+ err = acpi_evaluate_object(handle, "_RST", NULL, NULL);
+ else
+ err = reset_control_reset(i2c_dev->rst);
+
WARN_ON_ONCE(err);
if (i2c_dev->is_dvc)
@@ -636,14 +643,14 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
if (i2c_dev->is_vi)
tegra_i2c_vi_init(i2c_dev);
- switch (i2c_dev->bus_clk_rate) {
+ switch (t->bus_freq_hz) {
case I2C_MAX_STANDARD_MODE_FREQ + 1 ... I2C_MAX_FAST_MODE_PLUS_FREQ:
default:
tlow = i2c_dev->hw->tlow_fast_fastplus_mode;
thigh = i2c_dev->hw->thigh_fast_fastplus_mode;
tsu_thd = i2c_dev->hw->setup_hold_time_fast_fast_plus_mode;
- if (i2c_dev->bus_clk_rate > I2C_MAX_FAST_MODE_FREQ)
+ if (t->bus_freq_hz > I2C_MAX_FAST_MODE_FREQ)
non_hs_mode = i2c_dev->hw->clk_divisor_fast_plus_mode;
else
non_hs_mode = i2c_dev->hw->clk_divisor_fast_mode;
@@ -679,7 +686,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
clk_multiplier = (tlow + thigh + 2) * (non_hs_mode + 1);
err = clk_set_rate(i2c_dev->div_clk,
- i2c_dev->bus_clk_rate * clk_multiplier);
+ t->bus_freq_hz * clk_multiplier);
if (err) {
dev_err(i2c_dev->dev, "failed to set div-clk rate: %d\n", err);
return err;
@@ -718,7 +725,7 @@ static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev)
* before disabling the controller so that the STOP condition has
* been delivered properly.
*/
- udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
+ udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->timings.bus_freq_hz));
cnfg = i2c_readl(i2c_dev, I2C_CNFG);
if (cnfg & I2C_CNFG_PACKET_MODE_EN)
@@ -1248,7 +1255,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
* Total bits = 9 bits per byte (including ACK bit) + Start & stop bits
*/
xfer_time += DIV_ROUND_CLOSEST(((xfer_size * 9) + 2) * MSEC_PER_SEC,
- i2c_dev->bus_clk_rate);
+ i2c_dev->timings.bus_freq_hz);
int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
tegra_i2c_unmask_irq(i2c_dev, int_mask);
@@ -1625,14 +1632,10 @@ static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev)
{
struct device_node *np = i2c_dev->dev->of_node;
bool multi_mode;
- int err;
- err = of_property_read_u32(np, "clock-frequency",
- &i2c_dev->bus_clk_rate);
- if (err)
- i2c_dev->bus_clk_rate = I2C_MAX_STANDARD_MODE_FREQ;
+ i2c_parse_fw_timings(i2c_dev->dev, &i2c_dev->timings, true);
- multi_mode = of_property_read_bool(np, "multi-master");
+ multi_mode = device_property_read_bool(i2c_dev->dev, "multi-master");
i2c_dev->multimaster_mode = multi_mode;
if (of_device_is_compatible(np, "nvidia,tegra20-i2c-dvc"))
@@ -1642,10 +1645,26 @@ static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev)
i2c_dev->is_vi = true;
}
+static int tegra_i2c_init_reset(struct tegra_i2c_dev *i2c_dev)
+{
+ if (ACPI_HANDLE(i2c_dev->dev))
+ return 0;
+
+ i2c_dev->rst = devm_reset_control_get_exclusive(i2c_dev->dev, "i2c");
+ if (IS_ERR(i2c_dev->rst))
+ return dev_err_probe(i2c_dev->dev, PTR_ERR(i2c_dev->rst),
+ "failed to get reset control\n");
+
+ return 0;
+}
+
static int tegra_i2c_init_clocks(struct tegra_i2c_dev *i2c_dev)
{
int err;
+ if (ACPI_HANDLE(i2c_dev->dev))
+ return 0;
+
i2c_dev->clocks[i2c_dev->nclocks++].id = "div-clk";
if (i2c_dev->hw == &tegra20_i2c_hw || i2c_dev->hw == &tegra30_i2c_hw)
@@ -1720,7 +1739,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
init_completion(&i2c_dev->msg_complete);
init_completion(&i2c_dev->dma_complete);
- i2c_dev->hw = of_device_get_match_data(&pdev->dev);
+ i2c_dev->hw = device_get_match_data(&pdev->dev);
i2c_dev->cont_id = pdev->id;
i2c_dev->dev = &pdev->dev;
@@ -1746,15 +1765,12 @@ static int tegra_i2c_probe(struct platform_device *pdev)
if (err)
return err;
- i2c_dev->rst = devm_reset_control_get_exclusive(i2c_dev->dev, "i2c");
- if (IS_ERR(i2c_dev->rst)) {
- dev_err_probe(i2c_dev->dev, PTR_ERR(i2c_dev->rst),
- "failed to get reset control\n");
- return PTR_ERR(i2c_dev->rst);
- }
-
tegra_i2c_parse_dt(i2c_dev);
+ err = tegra_i2c_init_reset(i2c_dev);
+ if (err)
+ return err;
+
err = tegra_i2c_init_clocks(i2c_dev);
if (err)
return err;
@@ -1923,12 +1939,21 @@ static const struct dev_pm_ops tegra_i2c_pm = {
NULL)
};
+static const struct acpi_device_id tegra_i2c_acpi_match[] = {
+ {.id = "NVDA0101", .driver_data = (kernel_ulong_t)&tegra210_i2c_hw},
+ {.id = "NVDA0201", .driver_data = (kernel_ulong_t)&tegra186_i2c_hw},
+ {.id = "NVDA0301", .driver_data = (kernel_ulong_t)&tegra194_i2c_hw},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, tegra_i2c_acpi_match);
+
static struct platform_driver tegra_i2c_driver = {
.probe = tegra_i2c_probe,
.remove = tegra_i2c_remove,
.driver = {
.name = "tegra-i2c",
.of_match_table = tegra_i2c_of_match,
+ .acpi_match_table = tegra_i2c_acpi_match,
.pm = &tegra_i2c_pm,
},
};
diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c
index 41eb0dcc3204..4b9536f50800 100644
--- a/drivers/i2c/busses/i2c-virtio.c
+++ b/drivers/i2c/busses/i2c-virtio.c
@@ -165,7 +165,7 @@ err_free:
static void virtio_i2c_del_vqs(struct virtio_device *vdev)
{
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
}
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index 6d24dc385522..4e3b11c0f732 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -572,12 +572,6 @@ static int xlp9xx_i2c_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id xlp9xx_i2c_of_match[] = {
- { .compatible = "netlogic,xlp980-i2c", },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, xlp9xx_i2c_of_match);
-
#ifdef CONFIG_ACPI
static const struct acpi_device_id xlp9xx_i2c_acpi_ids[] = {
{"BRCM9007", 0},
@@ -592,7 +586,6 @@ static struct platform_driver xlp9xx_i2c_driver = {
.remove = xlp9xx_i2c_remove,
.driver = {
.name = "xlp9xx-i2c",
- .of_match_table = xlp9xx_i2c_of_match,
.acpi_match_table = ACPI_PTR(xlp9xx_i2c_acpi_ids),
},
};
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
deleted file mode 100644
index 9ce20652d494..000000000000
--- a/drivers/i2c/busses/i2c-xlr.c
+++ /dev/null
@@ -1,470 +0,0 @@
-/*
- * Copyright 2011, Netlogic Microsystems Inc.
- * Copyright 2004, Matt Porter <mporter@kernel.crashing.org>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/err.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/clk.h>
-#include <linux/interrupt.h>
-#include <linux/wait.h>
-
-/* XLR I2C REGISTERS */
-#define XLR_I2C_CFG 0x00
-#define XLR_I2C_CLKDIV 0x01
-#define XLR_I2C_DEVADDR 0x02
-#define XLR_I2C_ADDR 0x03
-#define XLR_I2C_DATAOUT 0x04
-#define XLR_I2C_DATAIN 0x05
-#define XLR_I2C_STATUS 0x06
-#define XLR_I2C_STARTXFR 0x07
-#define XLR_I2C_BYTECNT 0x08
-#define XLR_I2C_HDSTATIM 0x09
-
-/* Sigma Designs additional registers */
-#define XLR_I2C_INT_EN 0x09
-#define XLR_I2C_INT_STAT 0x0a
-
-/* XLR I2C REGISTERS FLAGS */
-#define XLR_I2C_BUS_BUSY 0x01
-#define XLR_I2C_SDOEMPTY 0x02
-#define XLR_I2C_RXRDY 0x04
-#define XLR_I2C_ACK_ERR 0x08
-#define XLR_I2C_ARB_STARTERR 0x30
-
-/* Register Values */
-#define XLR_I2C_CFG_ADDR 0xF8
-#define XLR_I2C_CFG_NOADDR 0xFA
-#define XLR_I2C_STARTXFR_ND 0x02 /* No Data */
-#define XLR_I2C_STARTXFR_RD 0x01 /* Read */
-#define XLR_I2C_STARTXFR_WR 0x00 /* Write */
-
-#define XLR_I2C_TIMEOUT 10 /* timeout per byte in msec */
-
-/*
- * On XLR/XLS, we need to use __raw_ IO to read the I2C registers
- * because they are in the big-endian MMIO area on the SoC.
- *
- * The readl/writel implementation on XLR/XLS byteswaps, because
- * those are for its little-endian PCI space (see arch/mips/Kconfig).
- */
-static inline void xlr_i2c_wreg(u32 __iomem *base, unsigned int reg, u32 val)
-{
- __raw_writel(val, base + reg);
-}
-
-static inline u32 xlr_i2c_rdreg(u32 __iomem *base, unsigned int reg)
-{
- return __raw_readl(base + reg);
-}
-
-#define XLR_I2C_FLAG_IRQ 1
-
-struct xlr_i2c_config {
- u32 flags; /* optional feature support */
- u32 status_busy; /* value of STATUS[0] when busy */
- u32 cfg_extra; /* extra CFG bits to set */
-};
-
-struct xlr_i2c_private {
- struct i2c_adapter adap;
- u32 __iomem *iobase;
- int irq;
- int pos;
- struct i2c_msg *msg;
- const struct xlr_i2c_config *cfg;
- wait_queue_head_t wait;
- struct clk *clk;
-};
-
-static int xlr_i2c_busy(struct xlr_i2c_private *priv, u32 status)
-{
- return (status & XLR_I2C_BUS_BUSY) == priv->cfg->status_busy;
-}
-
-static int xlr_i2c_idle(struct xlr_i2c_private *priv)
-{
- return !xlr_i2c_busy(priv, xlr_i2c_rdreg(priv->iobase, XLR_I2C_STATUS));
-}
-
-static int xlr_i2c_wait(struct xlr_i2c_private *priv, unsigned long timeout)
-{
- int status;
- int t;
-
- t = wait_event_timeout(priv->wait, xlr_i2c_idle(priv),
- msecs_to_jiffies(timeout));
- if (!t)
- return -ETIMEDOUT;
-
- status = xlr_i2c_rdreg(priv->iobase, XLR_I2C_STATUS);
-
- return status & XLR_I2C_ACK_ERR ? -EIO : 0;
-}
-
-static void xlr_i2c_tx_irq(struct xlr_i2c_private *priv, u32 status)
-{
- struct i2c_msg *msg = priv->msg;
-
- if (status & XLR_I2C_SDOEMPTY)
- xlr_i2c_wreg(priv->iobase, XLR_I2C_DATAOUT,
- msg->buf[priv->pos++]);
-}
-
-static void xlr_i2c_rx_irq(struct xlr_i2c_private *priv, u32 status)
-{
- struct i2c_msg *msg = priv->msg;
-
- if (status & XLR_I2C_RXRDY)
- msg->buf[priv->pos++] =
- xlr_i2c_rdreg(priv->iobase, XLR_I2C_DATAIN);
-}
-
-static irqreturn_t xlr_i2c_irq(int irq, void *dev_id)
-{
- struct xlr_i2c_private *priv = dev_id;
- struct i2c_msg *msg = priv->msg;
- u32 int_stat, status;
-
- int_stat = xlr_i2c_rdreg(priv->iobase, XLR_I2C_INT_STAT);
- if (!int_stat)
- return IRQ_NONE;
-
- xlr_i2c_wreg(priv->iobase, XLR_I2C_INT_STAT, int_stat);
-
- if (!msg)
- return IRQ_HANDLED;
-
- status = xlr_i2c_rdreg(priv->iobase, XLR_I2C_STATUS);
-
- if (priv->pos < msg->len) {
- if (msg->flags & I2C_M_RD)
- xlr_i2c_rx_irq(priv, status);
- else
- xlr_i2c_tx_irq(priv, status);
- }
-
- if (!xlr_i2c_busy(priv, status))
- wake_up(&priv->wait);
-
- return IRQ_HANDLED;
-}
-
-static int xlr_i2c_tx(struct xlr_i2c_private *priv, u16 len,
- u8 *buf, u16 addr)
-{
- struct i2c_adapter *adap = &priv->adap;
- unsigned long timeout, stoptime, checktime;
- u32 i2c_status;
- int pos, timedout;
- u8 offset;
- u32 xfer;
-
- offset = buf[0];
- xlr_i2c_wreg(priv->iobase, XLR_I2C_ADDR, offset);
- xlr_i2c_wreg(priv->iobase, XLR_I2C_DEVADDR, addr);
- xlr_i2c_wreg(priv->iobase, XLR_I2C_CFG,
- XLR_I2C_CFG_ADDR | priv->cfg->cfg_extra);
-
- timeout = msecs_to_jiffies(XLR_I2C_TIMEOUT);
- stoptime = jiffies + timeout;
- timedout = 0;
-
- if (len == 1) {
- xlr_i2c_wreg(priv->iobase, XLR_I2C_BYTECNT, len - 1);
- xfer = XLR_I2C_STARTXFR_ND;
- pos = 1;
- } else {
- xlr_i2c_wreg(priv->iobase, XLR_I2C_BYTECNT, len - 2);
- xlr_i2c_wreg(priv->iobase, XLR_I2C_DATAOUT, buf[1]);
- xfer = XLR_I2C_STARTXFR_WR;
- pos = 2;
- }
-
- priv->pos = pos;
-
-retry:
- /* retry can only happen on the first byte */
- xlr_i2c_wreg(priv->iobase, XLR_I2C_STARTXFR, xfer);
-
- if (priv->irq > 0)
- return xlr_i2c_wait(priv, XLR_I2C_TIMEOUT * len);
-
- while (!timedout) {
- checktime = jiffies;
- i2c_status = xlr_i2c_rdreg(priv->iobase, XLR_I2C_STATUS);
-
- if ((i2c_status & XLR_I2C_SDOEMPTY) && pos < len) {
- xlr_i2c_wreg(priv->iobase, XLR_I2C_DATAOUT, buf[pos++]);
-
- /* reset timeout on successful xmit */
- stoptime = jiffies + timeout;
- }
- timedout = time_after(checktime, stoptime);
-
- if (i2c_status & XLR_I2C_ARB_STARTERR) {
- if (timedout)
- break;
- goto retry;
- }
-
- if (i2c_status & XLR_I2C_ACK_ERR)
- return -EIO;
-
- if (!xlr_i2c_busy(priv, i2c_status) && pos >= len)
- return 0;
- }
- dev_err(&adap->dev, "I2C transmit timeout\n");
- return -ETIMEDOUT;
-}
-
-static int xlr_i2c_rx(struct xlr_i2c_private *priv, u16 len, u8 *buf, u16 addr)
-{
- struct i2c_adapter *adap = &priv->adap;
- u32 i2c_status;
- unsigned long timeout, stoptime, checktime;
- int nbytes, timedout;
-
- xlr_i2c_wreg(priv->iobase, XLR_I2C_CFG,
- XLR_I2C_CFG_NOADDR | priv->cfg->cfg_extra);
- xlr_i2c_wreg(priv->iobase, XLR_I2C_BYTECNT, len - 1);
- xlr_i2c_wreg(priv->iobase, XLR_I2C_DEVADDR, addr);
-
- priv->pos = 0;
-
- timeout = msecs_to_jiffies(XLR_I2C_TIMEOUT);
- stoptime = jiffies + timeout;
- timedout = 0;
- nbytes = 0;
-retry:
- xlr_i2c_wreg(priv->iobase, XLR_I2C_STARTXFR, XLR_I2C_STARTXFR_RD);
-
- if (priv->irq > 0)
- return xlr_i2c_wait(priv, XLR_I2C_TIMEOUT * len);
-
- while (!timedout) {
- checktime = jiffies;
- i2c_status = xlr_i2c_rdreg(priv->iobase, XLR_I2C_STATUS);
- if (i2c_status & XLR_I2C_RXRDY) {
- if (nbytes >= len)
- return -EIO; /* should not happen */
-
- buf[nbytes++] =
- xlr_i2c_rdreg(priv->iobase, XLR_I2C_DATAIN);
-
- /* reset timeout on successful read */
- stoptime = jiffies + timeout;
- }
-
- timedout = time_after(checktime, stoptime);
- if (i2c_status & XLR_I2C_ARB_STARTERR) {
- if (timedout)
- break;
- goto retry;
- }
-
- if (i2c_status & XLR_I2C_ACK_ERR)
- return -EIO;
-
- if (!xlr_i2c_busy(priv, i2c_status))
- return 0;
- }
-
- dev_err(&adap->dev, "I2C receive timeout\n");
- return -ETIMEDOUT;
-}
-
-static int xlr_i2c_xfer(struct i2c_adapter *adap,
- struct i2c_msg *msgs, int num)
-{
- struct i2c_msg *msg;
- int i;
- int ret = 0;
- struct xlr_i2c_private *priv = i2c_get_adapdata(adap);
-
- ret = clk_enable(priv->clk);
- if (ret)
- return ret;
-
- if (priv->irq)
- xlr_i2c_wreg(priv->iobase, XLR_I2C_INT_EN, 0xf);
-
-
- for (i = 0; ret == 0 && i < num; i++) {
- msg = &msgs[i];
- priv->msg = msg;
- if (msg->flags & I2C_M_RD)
- ret = xlr_i2c_rx(priv, msg->len, &msg->buf[0],
- msg->addr);
- else
- ret = xlr_i2c_tx(priv, msg->len, &msg->buf[0],
- msg->addr);
- }
-
- if (priv->irq)
- xlr_i2c_wreg(priv->iobase, XLR_I2C_INT_EN, 0);
-
- clk_disable(priv->clk);
- priv->msg = NULL;
-
- return (ret != 0) ? ret : num;
-}
-
-static u32 xlr_func(struct i2c_adapter *adap)
-{
- /* Emulate SMBUS over I2C */
- return (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) | I2C_FUNC_I2C;
-}
-
-static const struct i2c_algorithm xlr_i2c_algo = {
- .master_xfer = xlr_i2c_xfer,
- .functionality = xlr_func,
-};
-
-static const struct i2c_adapter_quirks xlr_i2c_quirks = {
- .flags = I2C_AQ_NO_ZERO_LEN,
-};
-
-static const struct xlr_i2c_config xlr_i2c_config_default = {
- .status_busy = XLR_I2C_BUS_BUSY,
- .cfg_extra = 0,
-};
-
-static const struct xlr_i2c_config xlr_i2c_config_tangox = {
- .flags = XLR_I2C_FLAG_IRQ,
- .status_busy = 0,
- .cfg_extra = 1 << 8,
-};
-
-static const struct of_device_id xlr_i2c_dt_ids[] = {
- {
- .compatible = "sigma,smp8642-i2c",
- .data = &xlr_i2c_config_tangox,
- },
- { }
-};
-MODULE_DEVICE_TABLE(of, xlr_i2c_dt_ids);
-
-static int xlr_i2c_probe(struct platform_device *pdev)
-{
- const struct of_device_id *match;
- struct xlr_i2c_private *priv;
- struct clk *clk;
- unsigned long clk_rate;
- unsigned long clk_div;
- u32 busfreq;
- int irq;
- int ret;
-
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- match = of_match_device(xlr_i2c_dt_ids, &pdev->dev);
- if (match)
- priv->cfg = match->data;
- else
- priv->cfg = &xlr_i2c_config_default;
-
- priv->iobase = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(priv->iobase))
- return PTR_ERR(priv->iobase);
-
- irq = platform_get_irq(pdev, 0);
-
- if (irq > 0 && (priv->cfg->flags & XLR_I2C_FLAG_IRQ)) {
- priv->irq = irq;
-
- xlr_i2c_wreg(priv->iobase, XLR_I2C_INT_EN, 0);
- xlr_i2c_wreg(priv->iobase, XLR_I2C_INT_STAT, 0xf);
-
- ret = devm_request_irq(&pdev->dev, priv->irq, xlr_i2c_irq,
- IRQF_SHARED, dev_name(&pdev->dev),
- priv);
- if (ret)
- return ret;
-
- init_waitqueue_head(&priv->wait);
- }
-
- if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
- &busfreq))
- busfreq = I2C_MAX_STANDARD_MODE_FREQ;
-
- clk = devm_clk_get(&pdev->dev, NULL);
- if (!IS_ERR(clk)) {
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
- clk_rate = clk_get_rate(clk);
- clk_div = DIV_ROUND_UP(clk_rate, 2 * busfreq);
- xlr_i2c_wreg(priv->iobase, XLR_I2C_CLKDIV, clk_div);
-
- clk_disable(clk);
- priv->clk = clk;
- }
-
- priv->adap.dev.parent = &pdev->dev;
- priv->adap.dev.of_node = pdev->dev.of_node;
- priv->adap.owner = THIS_MODULE;
- priv->adap.algo_data = priv;
- priv->adap.algo = &xlr_i2c_algo;
- priv->adap.quirks = &xlr_i2c_quirks;
- priv->adap.nr = pdev->id;
- priv->adap.class = I2C_CLASS_HWMON;
- snprintf(priv->adap.name, sizeof(priv->adap.name), "xlr-i2c");
-
- i2c_set_adapdata(&priv->adap, priv);
- ret = i2c_add_numbered_adapter(&priv->adap);
- if (ret < 0)
- goto err_unprepare_clk;
-
- platform_set_drvdata(pdev, priv);
- dev_info(&priv->adap.dev, "Added I2C Bus.\n");
- return 0;
-
-err_unprepare_clk:
- clk_unprepare(clk);
- return ret;
-}
-
-static int xlr_i2c_remove(struct platform_device *pdev)
-{
- struct xlr_i2c_private *priv;
-
- priv = platform_get_drvdata(pdev);
- i2c_del_adapter(&priv->adap);
- clk_unprepare(priv->clk);
-
- return 0;
-}
-
-static struct platform_driver xlr_i2c_driver = {
- .probe = xlr_i2c_probe,
- .remove = xlr_i2c_remove,
- .driver = {
- .name = "xlr-i2cbus",
- .of_match_table = xlr_i2c_dt_ids,
- },
-};
-
-module_platform_driver(xlr_i2c_driver);
-
-MODULE_AUTHOR("Ganesan Ramalingam <ganesanr@netlogicmicro.com>");
-MODULE_DESCRIPTION("XLR/XLS SoC I2C Controller driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:xlr-i2cbus");
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 92c1cc07ed46..85ed4c1d4924 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -144,9 +144,12 @@ static int i2c_acpi_do_lookup(struct acpi_device *adev,
struct list_head resource_list;
int ret;
- if (acpi_bus_get_status(adev) || !adev->status.present)
+ if (acpi_bus_get_status(adev))
return -EINVAL;
+ if (!acpi_dev_ready_for_enumeration(adev))
+ return -ENODEV;
+
if (acpi_match_device_ids(adev, i2c_acpi_ignored_device_ids) == 0)
return -ENODEV;
@@ -254,6 +257,13 @@ static void i2c_acpi_register_device(struct i2c_adapter *adapter,
struct acpi_device *adev,
struct i2c_board_info *info)
{
+ /*
+ * Skip registration on boards where the ACPI tables are
+ * known to contain bogus I2C devices.
+ */
+ if (acpi_quirk_skip_i2c_client_enumeration(adev))
+ return;
+
adev->power.flags.ignore_parent = true;
acpi_device_set_enumerated(adev);
@@ -473,8 +483,8 @@ struct notifier_block i2c_acpi_notifier = {
};
/**
- * i2c_acpi_new_device - Create i2c-client for the Nth I2cSerialBus resource
- * @dev: Device owning the ACPI resources to get the client from
+ * i2c_acpi_new_device_by_fwnode - Create i2c-client for the Nth I2cSerialBus resource
+ * @fwnode: fwnode with the ACPI resources to get the client from
* @index: Index of ACPI resource to get
* @info: describes the I2C device; note this is modified (addr gets set)
* Context: can sleep
@@ -490,15 +500,20 @@ struct notifier_block i2c_acpi_notifier = {
* Returns a pointer to the new i2c-client, or error pointer in case of failure.
* Specifically, -EPROBE_DEFER is returned if the adapter is not found.
*/
-struct i2c_client *i2c_acpi_new_device(struct device *dev, int index,
- struct i2c_board_info *info)
+struct i2c_client *i2c_acpi_new_device_by_fwnode(struct fwnode_handle *fwnode,
+ int index,
+ struct i2c_board_info *info)
{
- struct acpi_device *adev = ACPI_COMPANION(dev);
struct i2c_acpi_lookup lookup;
struct i2c_adapter *adapter;
+ struct acpi_device *adev;
LIST_HEAD(resource_list);
int ret;
+ adev = to_acpi_device_node(fwnode);
+ if (!adev)
+ return ERR_PTR(-ENODEV);
+
memset(&lookup, 0, sizeof(lookup));
lookup.info = info;
lookup.device_handle = acpi_device_handle(adev);
@@ -520,7 +535,7 @@ struct i2c_client *i2c_acpi_new_device(struct device *dev, int index,
return i2c_new_client_device(adapter, info);
}
-EXPORT_SYMBOL_GPL(i2c_acpi_new_device);
+EXPORT_SYMBOL_GPL(i2c_acpi_new_device_by_fwnode);
bool i2c_acpi_waive_d0_probe(struct device *dev)
{
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 73253e667de1..2c59dd748a49 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -953,6 +953,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
client->dev.of_node = of_node_get(info->of_node);
client->dev.fwnode = info->fwnode;
+ device_enable_async_suspend(&client->dev);
i2c_dev_set_name(adap, client, info);
if (info->swnode) {
@@ -1482,6 +1483,7 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
if (res)
goto out_reg;
+ device_enable_async_suspend(&adap->dev);
pm_runtime_no_callbacks(&adap->dev);
pm_suspend_ignore_children(&adap->dev, true);
pm_runtime_enable(&adap->dev);
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index bac415a52b78..73a23e117ebe 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -7,6 +7,7 @@
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
+#include <linux/overflow.h>
#include <linux/platform_data/i2c-mux-gpio.h>
#include <linux/platform_device.h>
#include <linux/module.h>
@@ -49,49 +50,11 @@ static int i2c_mux_gpio_deselect(struct i2c_mux_core *muxc, u32 chan)
return 0;
}
-#ifdef CONFIG_ACPI
-
-static int i2c_mux_gpio_get_acpi_adr(struct device *dev,
- struct fwnode_handle *fwdev,
- unsigned int *adr)
-
-{
- unsigned long long adr64;
- acpi_status status;
-
- status = acpi_evaluate_integer(ACPI_HANDLE_FWNODE(fwdev),
- METHOD_NAME__ADR,
- NULL, &adr64);
-
- if (!ACPI_SUCCESS(status)) {
- dev_err(dev, "Cannot get address\n");
- return -EINVAL;
- }
-
- *adr = adr64;
- if (*adr != adr64) {
- dev_err(dev, "Address out of range\n");
- return -ERANGE;
- }
-
- return 0;
-}
-
-#else
-
-static int i2c_mux_gpio_get_acpi_adr(struct device *dev,
- struct fwnode_handle *fwdev,
- unsigned int *adr)
-{
- return -EINVAL;
-}
-
-#endif
-
static int i2c_mux_gpio_probe_fw(struct gpiomux *mux,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
struct device_node *np = dev->of_node;
struct device_node *adapter_np;
struct i2c_adapter *adapter = NULL;
@@ -99,7 +62,7 @@ static int i2c_mux_gpio_probe_fw(struct gpiomux *mux,
unsigned *values;
int rc, i = 0;
- if (is_of_node(dev->fwnode)) {
+ if (is_of_node(fwnode)) {
if (!np)
return -ENODEV;
@@ -111,7 +74,7 @@ static int i2c_mux_gpio_probe_fw(struct gpiomux *mux,
adapter = of_find_i2c_adapter_by_node(adapter_np);
of_node_put(adapter_np);
- } else if (is_acpi_node(dev->fwnode)) {
+ } else if (is_acpi_node(fwnode)) {
/*
* In ACPI land the mux should be a direct child of the i2c
* bus it muxes.
@@ -141,16 +104,16 @@ static int i2c_mux_gpio_probe_fw(struct gpiomux *mux,
fwnode_property_read_u32(child, "reg", values + i);
} else if (is_acpi_node(child)) {
- rc = i2c_mux_gpio_get_acpi_adr(dev, child, values + i);
+ rc = acpi_get_local_address(ACPI_HANDLE_FWNODE(child), values + i);
if (rc)
- return rc;
+ return dev_err_probe(dev, rc, "Cannot get address\n");
}
i++;
}
mux->data.values = values;
- if (fwnode_property_read_u32(dev->fwnode, "idle-state", &mux->data.idle))
+ if (device_property_read_u32(dev, "idle-state", &mux->data.idle))
mux->data.idle = I2C_MUX_GPIO_NO_IDLE;
return 0;
@@ -190,7 +153,7 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values,
- ngpios * sizeof(*mux->gpios), 0,
+ array_size(ngpios, sizeof(*mux->gpios)), 0,
i2c_mux_gpio_select, NULL);
if (!muxc) {
ret = -ENOMEM;
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index c3b4c677b442..dfe18dcd008d 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -343,7 +343,8 @@ struct bus_type i3c_bus_type = {
static enum i3c_addr_slot_status
i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr)
{
- int status, bitpos = addr * 2;
+ unsigned long status;
+ int bitpos = addr * 2;
if (addr > I2C_MAX_ADDR)
return I3C_ADDR_SLOT_RSVD;
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index 03a368da51b9..51a8608203de 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -793,6 +793,10 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
return -ENOMEM;
pos = dw_i3c_master_get_free_pos(master);
+ if (pos < 0) {
+ dw_i3c_master_free_xfer(xfer);
+ return pos;
+ }
cmd = &xfer->cmds[0];
cmd->cmd_hi = 0x1;
cmd->cmd_lo = COMMAND_PORT_DEV_COUNT(master->maxdevs - pos) |
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index 1b73647cc3b1..8c01123dc4ed 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -662,7 +662,7 @@ static int i3c_hci_init(struct i3c_hci *hci)
/* Make sure our data ordering fits the host's */
regval = reg_read(HC_CONTROL);
- if (IS_ENABLED(CONFIG_BIG_ENDIAN)) {
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
regval |= HC_CONTROL_DATA_BIG_ENDIAN;
reg_write(HC_CONTROL, regval);
diff --git a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
index 783e551a2c85..97bb49ff5b53 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
@@ -160,9 +160,7 @@ static int hci_dat_v1_get_index(struct i3c_hci *hci, u8 dev_addr)
unsigned int dat_idx;
u32 dat_w0;
- for (dat_idx = find_first_bit(hci->DAT_data, hci->DAT_entries);
- dat_idx < hci->DAT_entries;
- dat_idx = find_next_bit(hci->DAT_data, hci->DAT_entries, dat_idx)) {
+ for_each_set_bit(dat_idx, hci->DAT_data, hci->DAT_entries) {
dat_w0 = dat_w0_read(dat_idx);
if (FIELD_GET(DAT_0_DYNAMIC_ADDRESS, dat_w0) == dev_addr)
return dat_idx;
diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
index af873a9be050..2990ac9eaade 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
@@ -223,7 +223,7 @@ static int hci_dma_init(struct i3c_hci *hci)
}
if (nr_rings > XFER_RINGS)
nr_rings = XFER_RINGS;
- rings = kzalloc(sizeof(*rings) + nr_rings * sizeof(*rh), GFP_KERNEL);
+ rings = kzalloc(struct_size(rings, headers, nr_rings), GFP_KERNEL);
if (!rings)
return -ENOMEM;
hci->io_data = rings;
diff --git a/drivers/i3c/master/mipi-i3c-hci/hci.h b/drivers/i3c/master/mipi-i3c-hci/hci.h
index 80beb1d5be8f..f109923f6c3f 100644
--- a/drivers/i3c/master/mipi-i3c-hci/hci.h
+++ b/drivers/i3c/master/mipi-i3c-hci/hci.h
@@ -98,7 +98,7 @@ struct hci_xfer {
static inline struct hci_xfer *hci_alloc_xfer(unsigned int n)
{
- return kzalloc(sizeof(struct hci_xfer) * n, GFP_KERNEL);
+ return kcalloc(n, sizeof(struct hci_xfer), GFP_KERNEL);
}
static inline void hci_free_xfer(struct hci_xfer *xfer, unsigned int n)
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index 879e5a64acaf..7550dad64ecf 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -17,7 +17,9 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
/* Master Mode Registers */
#define SVC_I3C_MCONFIG 0x000
@@ -119,6 +121,7 @@
#define SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x))
#define SVC_I3C_MAX_DEVS 32
+#define SVC_I3C_PM_TIMEOUT_MS 1000
/* This parameter depends on the implementation and may be tuned */
#define SVC_I3C_FIFO_SIZE 16
@@ -236,6 +239,40 @@ static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master)
writel(mask, master->regs + SVC_I3C_MINTCLR);
}
+static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
+{
+ /* Clear pending warnings */
+ writel(readl(master->regs + SVC_I3C_MERRWARN),
+ master->regs + SVC_I3C_MERRWARN);
+}
+
+static void svc_i3c_master_flush_fifo(struct svc_i3c_master *master)
+{
+ /* Flush FIFOs */
+ writel(SVC_I3C_MDATACTRL_FLUSHTB | SVC_I3C_MDATACTRL_FLUSHRB,
+ master->regs + SVC_I3C_MDATACTRL);
+}
+
+static void svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master *master)
+{
+ u32 reg;
+
+ /* Set RX and TX tigger levels, flush FIFOs */
+ reg = SVC_I3C_MDATACTRL_FLUSHTB |
+ SVC_I3C_MDATACTRL_FLUSHRB |
+ SVC_I3C_MDATACTRL_UNLOCK_TRIG |
+ SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
+ SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
+ writel(reg, master->regs + SVC_I3C_MDATACTRL);
+}
+
+static void svc_i3c_master_reset(struct svc_i3c_master *master)
+{
+ svc_i3c_master_clear_merrwarn(master);
+ svc_i3c_master_reset_fifo_trigger(master);
+ svc_i3c_master_disable_interrupts(master);
+}
+
static inline struct svc_i3c_master *
to_svc_i3c_master(struct i3c_master_controller *master)
{
@@ -279,12 +316,6 @@ static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
udelay(1);
}
-static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
-{
- writel(readl(master->regs + SVC_I3C_MERRWARN),
- master->regs + SVC_I3C_MERRWARN);
-}
-
static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
struct i3c_dev_desc *dev)
{
@@ -449,13 +480,23 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
struct i3c_device_info info = {};
unsigned long fclk_rate, fclk_period_ns;
unsigned int high_period_ns, od_low_period_ns;
- u32 ppbaud, pplow, odhpp, odbaud, i2cbaud, reg;
+ u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg;
int ret;
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev,
+ "<%s> cannot resume i3c bus master, err: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
/* Timings derivation */
fclk_rate = clk_get_rate(master->fclk);
- if (!fclk_rate)
- return -EINVAL;
+ if (!fclk_rate) {
+ ret = -EINVAL;
+ goto rpm_out;
+ }
fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
@@ -479,6 +520,7 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
switch (bus->mode) {
case I3C_BUS_MODE_PURE:
i2cbaud = 0;
+ odstop = 0;
break;
case I3C_BUS_MODE_MIXED_FAST:
case I3C_BUS_MODE_MIXED_LIMITED:
@@ -487,6 +529,7 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
* between the high and low period does not really matter.
*/
i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2;
+ odstop = 1;
break;
case I3C_BUS_MODE_MIXED_SLOW:
/*
@@ -494,15 +537,16 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
* constraints as the FM+ mode.
*/
i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2;
+ odstop = 1;
break;
default:
- return -EINVAL;
+ goto rpm_out;
}
reg = SVC_I3C_MCONFIG_MASTER_EN |
SVC_I3C_MCONFIG_DISTO(0) |
SVC_I3C_MCONFIG_HKEEP(0) |
- SVC_I3C_MCONFIG_ODSTOP(0) |
+ SVC_I3C_MCONFIG_ODSTOP(odstop) |
SVC_I3C_MCONFIG_PPBAUD(ppbaud) |
SVC_I3C_MCONFIG_PPLOW(pplow) |
SVC_I3C_MCONFIG_ODBAUD(odbaud) |
@@ -514,7 +558,7 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
/* Master core's registration */
ret = i3c_master_get_free_addr(m, 0);
if (ret < 0)
- return ret;
+ goto rpm_out;
info.dyn_addr = ret;
@@ -523,21 +567,33 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
ret = i3c_master_set_info(&master->base, &info);
if (ret)
- return ret;
+ goto rpm_out;
- svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
+rpm_out:
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
- return 0;
+ return ret;
}
static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
{
struct svc_i3c_master *master = to_svc_i3c_master(m);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
+ return;
+ }
svc_i3c_master_disable_interrupts(master);
/* Disable master */
writel(0, master->regs + SVC_I3C_MCONFIG);
+
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
}
static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master)
@@ -656,8 +712,10 @@ static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst,
u32 reg;
for (i = 0; i < len; i++) {
- ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
- SVC_I3C_MSTATUS_RXPEND(reg), 0, 1000);
+ ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
+ reg,
+ SVC_I3C_MSTATUS_RXPEND(reg),
+ 0, 1000);
if (ret)
return ret;
@@ -687,10 +745,11 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
* Either one slave will send its ID, or the assignment process
* is done.
*/
- ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
- SVC_I3C_MSTATUS_RXPEND(reg) |
- SVC_I3C_MSTATUS_MCTRLDONE(reg),
- 1, 1000);
+ ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
+ reg,
+ SVC_I3C_MSTATUS_RXPEND(reg) |
+ SVC_I3C_MSTATUS_MCTRLDONE(reg),
+ 1, 1000);
if (ret)
return ret;
@@ -744,11 +803,12 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
}
/* Wait for the slave to be ready to receive its address */
- ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
- SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
- SVC_I3C_MSTATUS_STATE_DAA(reg) &&
- SVC_I3C_MSTATUS_BETWEEN(reg),
- 0, 1000);
+ ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
+ reg,
+ SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
+ SVC_I3C_MSTATUS_STATE_DAA(reg) &&
+ SVC_I3C_MSTATUS_BETWEEN(reg),
+ 0, 1000);
if (ret)
return ret;
@@ -832,31 +892,36 @@ static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
unsigned int dev_nb;
int ret, i;
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
+ return ret;
+ }
+
spin_lock_irqsave(&master->xferqueue.lock, flags);
ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
spin_unlock_irqrestore(&master->xferqueue.lock, flags);
- if (ret)
- goto emit_stop;
+ if (ret) {
+ svc_i3c_master_emit_stop(master);
+ svc_i3c_master_clear_merrwarn(master);
+ goto rpm_out;
+ }
/* Register all devices who participated to the core */
for (i = 0; i < dev_nb; i++) {
ret = i3c_master_add_i3c_dev_locked(m, addrs[i]);
if (ret)
- return ret;
+ goto rpm_out;
}
/* Configure IBI auto-rules */
ret = svc_i3c_update_ibirules(master);
- if (ret) {
+ if (ret)
dev_err(master->dev, "Cannot handle such a list of devices");
- return ret;
- }
- return 0;
-
-emit_stop:
- svc_i3c_master_emit_stop(master);
- svc_i3c_master_clear_merrwarn(master);
+rpm_out:
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
return ret;
}
@@ -864,27 +929,35 @@ emit_stop:
static int svc_i3c_master_read(struct svc_i3c_master *master,
u8 *in, unsigned int len)
{
- int offset = 0, i, ret;
- u32 mdctrl;
+ int offset = 0, i;
+ u32 mdctrl, mstatus;
+ bool completed = false;
+ unsigned int count;
+ unsigned long start = jiffies;
- while (offset < len) {
- unsigned int count;
+ while (!completed) {
+ mstatus = readl(master->regs + SVC_I3C_MSTATUS);
+ if (SVC_I3C_MSTATUS_COMPLETE(mstatus) != 0)
+ completed = true;
- ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
- mdctrl,
- !(mdctrl & SVC_I3C_MDATACTRL_RXEMPTY),
- 0, 1000);
- if (ret)
- return ret;
+ if (time_after(jiffies, start + msecs_to_jiffies(1000))) {
+ dev_dbg(master->dev, "I3C read timeout\n");
+ return -ETIMEDOUT;
+ }
+ mdctrl = readl(master->regs + SVC_I3C_MDATACTRL);
count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl);
+ if (offset + count > len) {
+ dev_err(master->dev, "I3C receive length too long!\n");
+ return -EINVAL;
+ }
for (i = 0; i < count; i++)
in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB);
offset += count;
}
- return 0;
+ return offset;
}
static int svc_i3c_master_write(struct svc_i3c_master *master,
@@ -917,7 +990,7 @@ static int svc_i3c_master_write(struct svc_i3c_master *master,
static int svc_i3c_master_xfer(struct svc_i3c_master *master,
bool rnw, unsigned int xfer_type, u8 addr,
u8 *in, const u8 *out, unsigned int xfer_len,
- unsigned int read_len, bool continued)
+ unsigned int *read_len, bool continued)
{
u32 reg;
int ret;
@@ -927,7 +1000,7 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
SVC_I3C_MCTRL_IBIRESP_NACK |
SVC_I3C_MCTRL_DIR(rnw) |
SVC_I3C_MCTRL_ADDR(addr) |
- SVC_I3C_MCTRL_RDTERM(read_len),
+ SVC_I3C_MCTRL_RDTERM(*read_len),
master->regs + SVC_I3C_MCTRL);
ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
@@ -939,17 +1012,27 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
ret = svc_i3c_master_read(master, in, xfer_len);
else
ret = svc_i3c_master_write(master, out, xfer_len);
- if (ret)
+ if (ret < 0)
goto emit_stop;
+ if (rnw)
+ *read_len = ret;
+
ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
if (ret)
goto emit_stop;
- if (!continued)
+ writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS);
+
+ if (!continued) {
svc_i3c_master_emit_stop(master);
+ /* Wait idle if stop is sent. */
+ readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
+ SVC_I3C_MSTATUS_STATE_IDLE(reg), 0, 1000);
+ }
+
return 0;
emit_stop:
@@ -1007,17 +1090,29 @@ static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
if (!xfer)
return;
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
+ return;
+ }
+
+ svc_i3c_master_clear_merrwarn(master);
+ svc_i3c_master_flush_fifo(master);
+
for (i = 0; i < xfer->ncmds; i++) {
struct svc_i3c_cmd *cmd = &xfer->cmds[i];
ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
cmd->addr, cmd->in, cmd->out,
- cmd->len, cmd->read_len,
+ cmd->len, &cmd->read_len,
cmd->continued);
if (ret)
break;
}
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
+
xfer->ret = ret;
complete(&xfer->comp);
@@ -1141,6 +1236,9 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
svc_i3c_master_dequeue_xfer(master, xfer);
+ if (cmd->read_len != xfer_len)
+ ccc->dests[0].payload.len = cmd->read_len;
+
ret = xfer->ret;
svc_i3c_master_free_xfer(xfer);
@@ -1291,6 +1389,16 @@ static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev)
static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
+ return ret;
+ }
+
+ svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
}
@@ -1298,8 +1406,17 @@ static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ int ret;
+
+ svc_i3c_master_disable_interrupts(master);
- return i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+ ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
+
+ return ret;
}
static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
@@ -1330,23 +1447,35 @@ static const struct i3c_master_controller_ops svc_i3c_master_ops = {
.disable_ibi = svc_i3c_master_disable_ibi,
};
-static void svc_i3c_master_reset(struct svc_i3c_master *master)
+static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
{
- u32 reg;
+ int ret = 0;
- /* Clear pending warnings */
- writel(readl(master->regs + SVC_I3C_MERRWARN),
- master->regs + SVC_I3C_MERRWARN);
+ ret = clk_prepare_enable(master->pclk);
+ if (ret)
+ return ret;
- /* Set RX and TX tigger levels, flush FIFOs */
- reg = SVC_I3C_MDATACTRL_FLUSHTB |
- SVC_I3C_MDATACTRL_FLUSHRB |
- SVC_I3C_MDATACTRL_UNLOCK_TRIG |
- SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
- SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
- writel(reg, master->regs + SVC_I3C_MDATACTRL);
+ ret = clk_prepare_enable(master->fclk);
+ if (ret) {
+ clk_disable_unprepare(master->pclk);
+ return ret;
+ }
- svc_i3c_master_disable_interrupts(master);
+ ret = clk_prepare_enable(master->sclk);
+ if (ret) {
+ clk_disable_unprepare(master->pclk);
+ clk_disable_unprepare(master->fclk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master)
+{
+ clk_disable_unprepare(master->pclk);
+ clk_disable_unprepare(master->fclk);
+ clk_disable_unprepare(master->sclk);
}
static int svc_i3c_master_probe(struct platform_device *pdev)
@@ -1381,26 +1510,16 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
master->dev = dev;
- svc_i3c_master_reset(master);
-
- ret = clk_prepare_enable(master->pclk);
+ ret = svc_i3c_master_prepare_clks(master);
if (ret)
return ret;
- ret = clk_prepare_enable(master->fclk);
- if (ret)
- goto err_disable_pclk;
-
- ret = clk_prepare_enable(master->sclk);
- if (ret)
- goto err_disable_fclk;
-
INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
IRQF_NO_SUSPEND, "svc-i3c-irq", master);
if (ret)
- goto err_disable_sclk;
+ goto err_disable_clks;
master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0);
@@ -1414,27 +1533,38 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!master->ibi.slots) {
ret = -ENOMEM;
- goto err_disable_sclk;
+ goto err_disable_clks;
}
platform_set_drvdata(pdev, master);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SVC_I3C_PM_TIMEOUT_MS);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ svc_i3c_master_reset(master);
+
/* Register the master */
ret = i3c_master_register(&master->base, &pdev->dev,
&svc_i3c_master_ops, false);
if (ret)
- goto err_disable_sclk;
+ goto rpm_disable;
- return 0;
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
-err_disable_sclk:
- clk_disable_unprepare(master->sclk);
+ return 0;
-err_disable_fclk:
- clk_disable_unprepare(master->fclk);
+rpm_disable:
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
-err_disable_pclk:
- clk_disable_unprepare(master->pclk);
+err_disable_clks:
+ svc_i3c_master_unprepare_clks(master);
return ret;
}
@@ -1448,17 +1578,45 @@ static int svc_i3c_master_remove(struct platform_device *pdev)
if (ret)
return ret;
- clk_disable_unprepare(master->pclk);
- clk_disable_unprepare(master->fclk);
- clk_disable_unprepare(master->sclk);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
+static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
+{
+ struct svc_i3c_master *master = dev_get_drvdata(dev);
+
+ svc_i3c_master_unprepare_clks(master);
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
+{
+ struct svc_i3c_master *master = dev_get_drvdata(dev);
+ int ret = 0;
+
+ pinctrl_pm_select_default_state(dev);
+ svc_i3c_master_prepare_clks(master);
+
+ return ret;
+}
+
+static const struct dev_pm_ops svc_i3c_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(svc_i3c_runtime_suspend,
+ svc_i3c_runtime_resume, NULL)
+};
+
static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
{ .compatible = "silvaco,i3c-master" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
static struct platform_driver svc_i3c_master = {
.probe = svc_i3c_master_probe,
@@ -1466,6 +1624,7 @@ static struct platform_driver svc_i3c_master = {
.driver = {
.name = "silvaco-i3c-master",
.of_match_table = svc_i3c_master_of_match_tbl,
+ .pm = &svc_i3c_pm_ops,
},
};
module_platform_driver(svc_i3c_master);
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 2334ad249b46..b190846c3dc2 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -70,6 +70,7 @@ config IIO_TRIGGERED_EVENT
source "drivers/iio/accel/Kconfig"
source "drivers/iio/adc/Kconfig"
+source "drivers/iio/addac/Kconfig"
source "drivers/iio/afe/Kconfig"
source "drivers/iio/amplifiers/Kconfig"
source "drivers/iio/cdc/Kconfig"
@@ -77,6 +78,7 @@ source "drivers/iio/chemical/Kconfig"
source "drivers/iio/common/Kconfig"
source "drivers/iio/dac/Kconfig"
source "drivers/iio/dummy/Kconfig"
+source "drivers/iio/filter/Kconfig"
source "drivers/iio/frequency/Kconfig"
source "drivers/iio/gyro/Kconfig"
source "drivers/iio/health/Kconfig"
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index 65e39bd4f934..3be08cdadd7e 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_IIO_TRIGGERED_EVENT) += industrialio-triggered-event.o
obj-y += accel/
obj-y += adc/
+obj-y += addac/
obj-y += afe/
obj-y += amplifiers/
obj-y += buffer/
@@ -24,6 +25,7 @@ obj-y += common/
obj-y += dac/
obj-y += dummy/
obj-y += gyro/
+obj-y += filter/
obj-y += frequency/
obj-y += health/
obj-y += humidity/
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 2edfcb4819b7..d8a454c266d5 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -658,7 +658,7 @@ static const struct iio_chan_spec_ext_info bma023_ext_info[] = {
static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
IIO_ENUM("power_mode", IIO_SHARED_BY_TYPE, &bma180_power_mode_enum),
- IIO_ENUM_AVAILABLE("power_mode", &bma180_power_mode_enum),
+ IIO_ENUM_AVAILABLE("power_mode", IIO_SHARED_BY_TYPE, &bma180_power_mode_enum),
IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, bma180_accel_get_mount_matrix),
{ }
};
@@ -938,7 +938,7 @@ static int bma180_probe(struct i2c_client *client,
i2c_set_clientdata(client, indio_dev);
data->client = client;
if (client->dev.of_node)
- chip = (enum chip_ids)of_device_get_match_data(dev);
+ chip = (uintptr_t)of_device_get_match_data(dev);
else
chip = id->driver_data;
data->part_info = &bma180_part_info[chip];
diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c
index bc4c626e454d..74024d7ce5ac 100644
--- a/drivers/iio/accel/bma220_spi.c
+++ b/drivers/iio/accel/bma220_spi.c
@@ -27,7 +27,6 @@
#define BMA220_CHIP_ID 0xDD
#define BMA220_READ_MASK BIT(7)
#define BMA220_RANGE_MASK GENMASK(1, 0)
-#define BMA220_DATA_SHIFT 2
#define BMA220_SUSPEND_SLEEP 0xFF
#define BMA220_SUSPEND_WAKE 0x00
@@ -45,7 +44,7 @@
.sign = 's', \
.realbits = 6, \
.storagebits = 8, \
- .shift = BMA220_DATA_SHIFT, \
+ .shift = 2, \
.endianness = IIO_CPU, \
}, \
}
@@ -125,7 +124,8 @@ static int bma220_read_raw(struct iio_dev *indio_dev,
ret = bma220_read_reg(data->spi_device, chan->address);
if (ret < 0)
return -EINVAL;
- *val = sign_extend32(ret >> BMA220_DATA_SHIFT, 5);
+ *val = sign_extend32(ret >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
ret = bma220_read_reg(data->spi_device, BMA220_REG_RANGE);
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index b0678c351e82..e6081dd0a880 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -170,7 +170,7 @@ static const struct {
{1000, 0, 0x0E},
{2000, 0, 0x0F} };
-static const struct {
+static __maybe_unused const struct {
int bw_bits;
int msec;
} bmc150_accel_sample_upd_time[] = { {0x08, 64},
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 24c9387c2968..0fe570316848 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -315,7 +315,7 @@ static const char *const kxtf9_samp_freq_avail =
"25 50 100 200 400 800";
/* Refer to section 4 of the specification */
-static const struct {
+static __maybe_unused const struct {
int odr_bits;
int usec;
} odr_start_up_times[KX_MAX_CHIPS][12] = {
@@ -927,7 +927,8 @@ static int kxcjk1013_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&data->mutex);
return ret;
}
- *val = sign_extend32(ret >> 4, 11);
+ *val = sign_extend32(ret >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
ret = kxcjk1013_set_power_state(data, false);
}
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c
index 777c6c384b09..e6739ba74edf 100644
--- a/drivers/iio/accel/mma7455_core.c
+++ b/drivers/iio/accel/mma7455_core.c
@@ -134,7 +134,8 @@ static int mma7455_read_raw(struct iio_dev *indio_dev,
if (ret)
return ret;
- *val = sign_extend32(le16_to_cpu(data), 9);
+ *val = sign_extend32(le16_to_cpu(data),
+ chan->scan_type.realbits - 1);
return IIO_VAL_INT;
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
index cd6cdf2c51b0..24b83ccdb950 100644
--- a/drivers/iio/accel/mma7660.c
+++ b/drivers/iio/accel/mma7660.c
@@ -210,10 +210,16 @@ static int mma7660_probe(struct i2c_client *client,
static int mma7660_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ int ret;
iio_device_unregister(indio_dev);
- return mma7660_set_mode(iio_priv(indio_dev), MMA7660_MODE_STANDBY);
+ ret = mma7660_set_mode(iio_priv(indio_dev), MMA7660_MODE_STANDBY);
+ if (ret)
+ dev_warn(&client->dev, "Failed to put device in stand-by mode (%pe), ignoring\n",
+ ERR_PTR(ret));
+
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 09c7f10fefb6..64b82b4503ad 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1053,7 +1053,7 @@ static irqreturn_t mma8452_interrupt(int irq, void *p)
{
struct iio_dev *indio_dev = p;
struct mma8452_data *data = iio_priv(indio_dev);
- int ret = IRQ_NONE;
+ irqreturn_t ret = IRQ_NONE;
int src;
src = i2c_smbus_read_byte_data(data->client, MMA8452_INT_SRC);
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index ba3ecb3b57dc..0570ab1cc064 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -917,7 +917,7 @@ static const struct iio_enum mma9553_calibgender_enum = {
static const struct iio_chan_spec_ext_info mma9553_ext_info[] = {
IIO_ENUM("calibgender", IIO_SHARED_BY_TYPE, &mma9553_calibgender_enum),
- IIO_ENUM_AVAILABLE("calibgender", &mma9553_calibgender_enum),
+ IIO_ENUM_AVAILABLE("calibgender", IIO_SHARED_BY_TYPE, &mma9553_calibgender_enum),
{},
};
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
index c6b75308148a..43ecacbdc95a 100644
--- a/drivers/iio/accel/sca3000.c
+++ b/drivers/iio/accel/sca3000.c
@@ -534,6 +534,13 @@ static const struct iio_chan_spec sca3000_channels_with_temp[] = {
BIT(IIO_CHAN_INFO_OFFSET),
/* No buffer support */
.scan_index = -1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 9,
+ .storagebits = 16,
+ .shift = 5,
+ .endianness = IIO_BE,
+ },
},
{
.type = IIO_ACCEL,
@@ -730,8 +737,9 @@ static int sca3000_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&st->lock);
return ret;
}
- *val = (be16_to_cpup((__be16 *)st->rx) >> 3) & 0x1FFF;
- *val = sign_extend32(*val, 12);
+ *val = sign_extend32(be16_to_cpup((__be16 *)st->rx) >>
+ chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
} else {
/* get the temperature when available */
ret = sca3000_read_data_short(st,
@@ -741,8 +749,9 @@ static int sca3000_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&st->lock);
return ret;
}
- *val = ((st->rx[0] & 0x3F) << 3) |
- ((st->rx[1] & 0xE0) >> 5);
+ *val = (be16_to_cpup((__be16 *)st->rx) >>
+ chan->scan_type.shift) &
+ GENMASK(chan->scan_type.realbits - 1, 0);
}
mutex_unlock(&st->lock);
return IIO_VAL_INT;
diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
index 43c621d0f11e..de0cdf8c1f94 100644
--- a/drivers/iio/accel/stk8312.c
+++ b/drivers/iio/accel/stk8312.c
@@ -355,7 +355,7 @@ static int stk8312_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&data->lock);
return ret;
}
- *val = sign_extend32(ret, 7);
+ *val = sign_extend32(ret, chan->scan_type.realbits - 1);
ret = stk8312_set_mode(data,
data->mode & (~STK8312_MODE_ACTIVE));
mutex_unlock(&data->lock);
diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
index e137a34b5c9a..517c57ed9e94 100644
--- a/drivers/iio/accel/stk8ba50.c
+++ b/drivers/iio/accel/stk8ba50.c
@@ -227,7 +227,8 @@ static int stk8ba50_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&data->lock);
return -EINVAL;
}
- *val = sign_extend32(ret >> STK8BA50_DATA_SHIFT, 9);
+ *val = sign_extend32(ret >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
stk8ba50_set_power(data, STK8BA50_MODE_SUSPEND);
mutex_unlock(&data->lock);
return IIO_VAL_INT;
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 3363af15a43f..4fdc8bfbb407 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -1146,7 +1146,7 @@ config TI_ADS7950
config TI_ADS8344
tristate "Texas Instruments ADS8344"
- depends on SPI && OF
+ depends on SPI
help
If you say yes here you get support for Texas Instruments ADS8344
ADC chips
@@ -1156,7 +1156,7 @@ config TI_ADS8344
config TI_ADS8688
tristate "Texas Instruments ADS8688"
- depends on SPI && OF
+ depends on SPI
help
If you say yes here you get support for Texas Instruments ADS8684 and
and ADS8688 ADC chips
@@ -1166,7 +1166,7 @@ config TI_ADS8688
config TI_ADS124S08
tristate "Texas Instruments ADS124S08"
- depends on SPI && OF
+ depends on SPI
help
If you say yes here you get support for Texas Instruments ADS124S08
and ADS124S06 ADC chips
@@ -1288,4 +1288,19 @@ config XILINX_XADC
The driver can also be build as a module. If so, the module will be called
xilinx-xadc.
+config XILINX_AMS
+ tristate "Xilinx AMS driver"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Say yes here to have support for the Xilinx AMS for Ultrascale/Ultrascale+
+ System Monitor. With this you can measure and monitor the Voltages and
+ Temperature values on the SOC.
+
+ The driver supports Voltage and Temperature monitoring on Xilinx Ultrascale
+ devices.
+
+ The driver can also be built as a module. If so, the module will be called
+ xilinx-ams.
+
endmenu
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index d3f53549720c..4a8f1833993b 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -115,4 +115,5 @@ obj-$(CONFIG_VF610_ADC) += vf610_adc.o
obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
xilinx-xadc-y := xilinx-xadc-core.o xilinx-xadc-events.o
obj-$(CONFIG_XILINX_XADC) += xilinx-xadc.o
+obj-$(CONFIG_XILINX_AMS) += xilinx-ams.o
obj-$(CONFIG_SD_ADC_MODULATOR) += sd_adc_modulator.o
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index e45c600fccc0..bc2cfa5f9592 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -347,7 +347,7 @@ static int ad7124_find_free_config_slot(struct ad7124_state *st)
{
unsigned int free_cfg_slot;
- free_cfg_slot = find_next_zero_bit(&st->cfg_slots_status, AD7124_MAX_CONFIGS, 0);
+ free_cfg_slot = find_first_zero_bit(&st->cfg_slots_status, AD7124_MAX_CONFIGS);
if (free_cfg_slot == AD7124_MAX_CONFIGS)
return -1;
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index 2121a812b0c3..cc990205f306 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -257,7 +257,8 @@ static const struct iio_chan_spec_ext_info ad7192_calibsys_ext_info[] = {
},
IIO_ENUM("sys_calibration_mode", IIO_SEPARATE,
&ad7192_syscalib_mode_enum),
- IIO_ENUM_AVAILABLE("sys_calibration_mode", &ad7192_syscalib_mode_enum),
+ IIO_ENUM_AVAILABLE("sys_calibration_mode", IIO_SHARED_BY_TYPE,
+ &ad7192_syscalib_mode_enum),
{}
};
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index a8ec3efd659e..1d345d66742d 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -159,7 +159,8 @@ static int ad7266_read_raw(struct iio_dev *indio_dev,
*val = (*val >> 2) & 0xfff;
if (chan->scan_type.sign == 's')
- *val = sign_extend32(*val, 11);
+ *val = sign_extend32(*val,
+ chan->scan_type.realbits - 1);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
diff --git a/drivers/iio/adc/ad7606.h b/drivers/iio/adc/ad7606.h
index 9350ef1f63b5..4f82d7c9acfd 100644
--- a/drivers/iio/adc/ad7606.h
+++ b/drivers/iio/adc/ad7606.h
@@ -62,7 +62,7 @@ struct ad7606_chip_info {
* struct ad7606_state - driver instance specific data
* @dev pointer to kernel device
* @chip_info entry in the table of chips that describes this device
- * @reg regulator info for the the power supply of the device
+ * @reg regulator info for the power supply of the device
* @bops bus operations (SPI or parallel)
* @range voltage range selection, selects which scale to apply
* @oversampling oversampling selection
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 1d652d9b2f5c..cd418bd8bd87 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -467,9 +467,6 @@ int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig)
}
EXPORT_SYMBOL_GPL(ad_sd_validate_trigger);
-static const struct iio_trigger_ops ad_sd_trigger_ops = {
-};
-
static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_dev)
{
struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
@@ -486,7 +483,6 @@ static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_de
if (sigma_delta->trig == NULL)
return -ENOMEM;
- sigma_delta->trig->ops = &ad_sd_trigger_ops;
init_completion(&sigma_delta->completion);
sigma_delta->irq_dis = true;
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 92a57cf10fba..854b1f81d807 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -1662,10 +1662,9 @@ static int at91_adc_write_raw(struct iio_dev *indio_dev,
}
}
-static void at91_adc_dma_init(struct platform_device *pdev)
+static void at91_adc_dma_init(struct at91_adc_state *st)
{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
- struct at91_adc_state *st = iio_priv(indio_dev);
+ struct device *dev = &st->indio_dev->dev;
struct dma_slave_config config = {0};
/* we have 2 bytes for each channel */
unsigned int sample_size = st->soc_info.platform->nr_channels * 2;
@@ -1680,9 +1679,9 @@ static void at91_adc_dma_init(struct platform_device *pdev)
if (st->dma_st.dma_chan)
return;
- st->dma_st.dma_chan = dma_request_chan(&pdev->dev, "rx");
+ st->dma_st.dma_chan = dma_request_chan(dev, "rx");
if (IS_ERR(st->dma_st.dma_chan)) {
- dev_info(&pdev->dev, "can't get DMA channel\n");
+ dev_info(dev, "can't get DMA channel\n");
st->dma_st.dma_chan = NULL;
goto dma_exit;
}
@@ -1692,7 +1691,7 @@ static void at91_adc_dma_init(struct platform_device *pdev)
&st->dma_st.rx_dma_buf,
GFP_KERNEL);
if (!st->dma_st.rx_buf) {
- dev_info(&pdev->dev, "can't allocate coherent DMA area\n");
+ dev_info(dev, "can't allocate coherent DMA area\n");
goto dma_chan_disable;
}
@@ -1705,11 +1704,11 @@ static void at91_adc_dma_init(struct platform_device *pdev)
config.dst_maxburst = 1;
if (dmaengine_slave_config(st->dma_st.dma_chan, &config)) {
- dev_info(&pdev->dev, "can't configure DMA slave\n");
+ dev_info(dev, "can't configure DMA slave\n");
goto dma_free_area;
}
- dev_info(&pdev->dev, "using %s for rx DMA transfers\n",
+ dev_info(dev, "using %s for rx DMA transfers\n",
dma_chan_name(st->dma_st.dma_chan));
return;
@@ -1721,13 +1720,12 @@ dma_chan_disable:
dma_release_channel(st->dma_st.dma_chan);
st->dma_st.dma_chan = NULL;
dma_exit:
- dev_info(&pdev->dev, "continuing without DMA support\n");
+ dev_info(dev, "continuing without DMA support\n");
}
-static void at91_adc_dma_disable(struct platform_device *pdev)
+static void at91_adc_dma_disable(struct at91_adc_state *st)
{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
- struct at91_adc_state *st = iio_priv(indio_dev);
+ struct device *dev = &st->indio_dev->dev;
/* we have 2 bytes for each channel */
unsigned int sample_size = st->soc_info.platform->nr_channels * 2;
unsigned int pages = DIV_ROUND_UP(AT91_HWFIFO_MAX_SIZE *
@@ -1745,7 +1743,7 @@ static void at91_adc_dma_disable(struct platform_device *pdev)
dma_release_channel(st->dma_st.dma_chan);
st->dma_st.dma_chan = NULL;
- dev_info(&pdev->dev, "continuing without DMA support\n");
+ dev_info(dev, "continuing without DMA support\n");
}
static int at91_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
@@ -1771,9 +1769,9 @@ static int at91_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
*/
if (val == 1)
- at91_adc_dma_disable(to_platform_device(&indio_dev->dev));
+ at91_adc_dma_disable(st);
else if (val > 1)
- at91_adc_dma_init(to_platform_device(&indio_dev->dev));
+ at91_adc_dma_init(st);
/*
* We can start the DMA only after setting the watermark and
@@ -1781,7 +1779,7 @@ static int at91_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
*/
ret = at91_adc_buffer_prepare(indio_dev);
if (ret)
- at91_adc_dma_disable(to_platform_device(&indio_dev->dev));
+ at91_adc_dma_disable(st);
return ret;
}
@@ -1828,7 +1826,7 @@ static void at91_adc_hw_init(struct iio_dev *indio_dev)
static ssize_t at91_adc_get_fifo_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct at91_adc_state *st = iio_priv(indio_dev);
return scnprintf(buf, PAGE_SIZE, "%d\n", !!st->dma_st.dma_chan);
@@ -1837,7 +1835,7 @@ static ssize_t at91_adc_get_fifo_state(struct device *dev,
static ssize_t at91_adc_get_watermark(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct at91_adc_state *st = iio_priv(indio_dev);
return scnprintf(buf, PAGE_SIZE, "%d\n", st->dma_st.watermark);
@@ -2078,7 +2076,7 @@ static int at91_adc_probe(struct platform_device *pdev)
return 0;
dma_disable:
- at91_adc_dma_disable(pdev);
+ at91_adc_dma_disable(st);
per_clk_disable_unprepare:
clk_disable_unprepare(st->per_clk);
vref_disable:
@@ -2095,7 +2093,7 @@ static int at91_adc_remove(struct platform_device *pdev)
iio_device_unregister(indio_dev);
- at91_adc_dma_disable(pdev);
+ at91_adc_dma_disable(st);
clk_disable_unprepare(st->per_clk);
diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c
index df99f1365c39..53bf7d4899d2 100644
--- a/drivers/iio/adc/axp20x_adc.c
+++ b/drivers/iio/adc/axp20x_adc.c
@@ -186,6 +186,8 @@ static const struct iio_chan_spec axp20x_adc_channels[] = {
AXP20X_BATT_CHRG_I_H),
AXP20X_ADC_CHANNEL(AXP20X_BATT_DISCHRG_I, "batt_dischrg_i", IIO_CURRENT,
AXP20X_BATT_DISCHRG_I_H),
+ AXP20X_ADC_CHANNEL(AXP20X_TS_IN, "ts_v", IIO_VOLTAGE,
+ AXP20X_TS_IN_H),
};
static const struct iio_chan_spec axp22x_adc_channels[] = {
@@ -203,6 +205,8 @@ static const struct iio_chan_spec axp22x_adc_channels[] = {
AXP20X_BATT_CHRG_I_H),
AXP20X_ADC_CHANNEL(AXP22X_BATT_DISCHRG_I, "batt_dischrg_i", IIO_CURRENT,
AXP20X_BATT_DISCHRG_I_H),
+ AXP20X_ADC_CHANNEL(AXP22X_TS_IN, "ts_v", IIO_VOLTAGE,
+ AXP22X_TS_ADC_H),
};
static const struct iio_chan_spec axp813_adc_channels[] = {
@@ -222,6 +226,8 @@ static const struct iio_chan_spec axp813_adc_channels[] = {
AXP20X_BATT_CHRG_I_H),
AXP20X_ADC_CHANNEL(AXP22X_BATT_DISCHRG_I, "batt_dischrg_i", IIO_CURRENT,
AXP20X_BATT_DISCHRG_I_H),
+ AXP20X_ADC_CHANNEL(AXP813_TS_IN, "ts_v", IIO_VOLTAGE,
+ AXP288_TS_ADC_H),
};
static int axp20x_adc_raw(struct iio_dev *indio_dev,
@@ -296,11 +302,36 @@ static int axp20x_adc_scale_voltage(int channel, int *val, int *val2)
*val2 = 400000;
return IIO_VAL_INT_PLUS_MICRO;
+ case AXP20X_TS_IN:
+ /* 0.8 mV per LSB */
+ *val = 0;
+ *val2 = 800000;
+ return IIO_VAL_INT_PLUS_MICRO;
+
default:
return -EINVAL;
}
}
+static int axp22x_adc_scale_voltage(int channel, int *val, int *val2)
+{
+ switch (channel) {
+ case AXP22X_BATT_V:
+ /* 1.1 mV per LSB */
+ *val = 1;
+ *val2 = 100000;
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ case AXP22X_TS_IN:
+ /* 0.8 mV per LSB */
+ *val = 0;
+ *val2 = 800000;
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ default:
+ return -EINVAL;
+ }
+}
static int axp813_adc_scale_voltage(int channel, int *val, int *val2)
{
switch (channel) {
@@ -314,6 +345,12 @@ static int axp813_adc_scale_voltage(int channel, int *val, int *val2)
*val2 = 100000;
return IIO_VAL_INT_PLUS_MICRO;
+ case AXP813_TS_IN:
+ /* 0.8 mV per LSB */
+ *val = 0;
+ *val2 = 800000;
+ return IIO_VAL_INT_PLUS_MICRO;
+
default:
return -EINVAL;
}
@@ -367,12 +404,7 @@ static int axp22x_adc_scale(struct iio_chan_spec const *chan, int *val,
{
switch (chan->type) {
case IIO_VOLTAGE:
- if (chan->channel != AXP22X_BATT_V)
- return -EINVAL;
-
- *val = 1;
- *val2 = 100000;
- return IIO_VAL_INT_PLUS_MICRO;
+ return axp22x_adc_scale_voltage(chan->channel, val, val2);
case IIO_CURRENT:
*val = 1;
@@ -476,6 +508,7 @@ static int axp22x_read_raw(struct iio_dev *indio_dev,
{
switch (mask) {
case IIO_CHAN_INFO_OFFSET:
+ /* For PMIC temp only */
*val = -2677;
return IIO_VAL_INT;
diff --git a/drivers/iio/adc/envelope-detector.c b/drivers/iio/adc/envelope-detector.c
index d73eac36153f..e911c25d106d 100644
--- a/drivers/iio/adc/envelope-detector.c
+++ b/drivers/iio/adc/envelope-detector.c
@@ -31,14 +31,13 @@
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/iio/consumer.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c
index 8b353e26668e..e665e14c6e54 100644
--- a/drivers/iio/adc/hi8435.c
+++ b/drivers/iio/adc/hi8435.c
@@ -350,7 +350,7 @@ static const struct iio_enum hi8435_sensing_mode = {
static const struct iio_chan_spec_ext_info hi8435_ext_info[] = {
IIO_ENUM("sensing_mode", IIO_SEPARATE, &hi8435_sensing_mode),
- IIO_ENUM_AVAILABLE("sensing_mode", &hi8435_sensing_mode),
+ IIO_ENUM_AVAILABLE("sensing_mode", IIO_SHARED_BY_TYPE, &hi8435_sensing_mode),
{},
};
diff --git a/drivers/iio/adc/imx7d_adc.c b/drivers/iio/adc/imx7d_adc.c
index 092f8d296527..12f5b8e34c84 100644
--- a/drivers/iio/adc/imx7d_adc.c
+++ b/drivers/iio/adc/imx7d_adc.c
@@ -522,12 +522,11 @@ static int imx7d_adc_probe(struct platform_device *pdev)
imx7d_adc_feature_config(info);
- ret = imx7d_adc_enable(&indio_dev->dev);
+ ret = imx7d_adc_enable(dev);
if (ret)
return ret;
- ret = devm_add_action_or_reset(dev, __imx7d_adc_disable,
- &indio_dev->dev);
+ ret = devm_add_action_or_reset(dev, __imx7d_adc_disable, dev);
if (ret)
return ret;
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index a4b2ff9e0dd5..4f9992a51e64 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -550,7 +550,7 @@ static ssize_t ina2xx_allow_async_readout_store(struct device *dev,
bool val;
int ret;
- ret = strtobool((const char *) buf, &val);
+ ret = strtobool(buf, &val);
if (ret)
return ret;
@@ -842,15 +842,13 @@ static int ina2xx_buffer_enable(struct iio_dev *indio_dev)
dev_dbg(&indio_dev->dev, "Async readout mode: %d\n",
chip->allow_async_readout);
- task = kthread_create(ina2xx_capture_thread, (void *)indio_dev,
- "%s:%d-%uus", indio_dev->name,
- iio_device_id(indio_dev),
- sampling_us);
+ task = kthread_run(ina2xx_capture_thread, (void *)indio_dev,
+ "%s:%d-%uus", indio_dev->name,
+ iio_device_id(indio_dev),
+ sampling_us);
if (IS_ERR(task))
return PTR_ERR(task);
- get_task_struct(task);
- wake_up_process(task);
chip->task = task;
return 0;
@@ -862,7 +860,6 @@ static int ina2xx_buffer_disable(struct iio_dev *indio_dev)
if (chip->task) {
kthread_stop(chip->task);
- put_task_struct(chip->task);
chip->task = NULL;
}
@@ -974,7 +971,7 @@ static int ina2xx_probe(struct i2c_client *client,
}
if (client->dev.of_node)
- type = (enum ina2xx_ids)of_device_get_match_data(&client->dev);
+ type = (uintptr_t)of_device_get_match_data(&client->dev);
else
type = id->driver_data;
chip->config = &ina2xx_config[type];
diff --git a/drivers/iio/adc/lpc18xx_adc.c b/drivers/iio/adc/lpc18xx_adc.c
index ceefa4d793cf..ae9c9384f23e 100644
--- a/drivers/iio/adc/lpc18xx_adc.c
+++ b/drivers/iio/adc/lpc18xx_adc.c
@@ -157,9 +157,6 @@ static int lpc18xx_adc_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(adc->clk),
"error getting clock\n");
- rate = clk_get_rate(adc->clk);
- clkdiv = DIV_ROUND_UP(rate, LPC18XX_ADC_CLK_TARGET);
-
adc->vref = devm_regulator_get(&pdev->dev, "vref");
if (IS_ERR(adc->vref))
return dev_err_probe(&pdev->dev, PTR_ERR(adc->vref),
@@ -192,6 +189,9 @@ static int lpc18xx_adc_probe(struct platform_device *pdev)
if (ret)
return ret;
+ rate = clk_get_rate(adc->clk);
+ clkdiv = DIV_ROUND_UP(rate, LPC18XX_ADC_CLK_TARGET);
+
adc->cr_reg = (clkdiv << LPC18XX_ADC_CR_CLKDIV_SHIFT) |
LPC18XX_ADC_CR_PDN;
writel(adc->cr_reg, adc->base + LPC18XX_ADC_CR);
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index 052ab23f10b2..01a4275e9c46 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -22,7 +22,8 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#define DRIVER_NAME "max9611"
@@ -513,11 +514,9 @@ static int max9611_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const char * const shunt_res_prop = "shunt-resistor-micro-ohms";
- const struct device_node *of_node = client->dev.of_node;
- const struct of_device_id *of_id =
- of_match_device(max9611_of_table, &client->dev);
struct max9611_dev *max9611;
struct iio_dev *indio_dev;
+ struct device *dev = &client->dev;
unsigned int of_shunt;
int ret;
@@ -528,15 +527,14 @@ static int max9611_probe(struct i2c_client *client,
i2c_set_clientdata(client, indio_dev);
max9611 = iio_priv(indio_dev);
- max9611->dev = &client->dev;
+ max9611->dev = dev;
max9611->i2c_client = client;
mutex_init(&max9611->lock);
- ret = of_property_read_u32(of_node, shunt_res_prop, &of_shunt);
+ ret = device_property_read_u32(dev, shunt_res_prop, &of_shunt);
if (ret) {
- dev_err(&client->dev,
- "Missing %s property for %pOF node\n",
- shunt_res_prop, of_node);
+ dev_err(dev, "Missing %s property for %pfw node\n",
+ shunt_res_prop, dev_fwnode(dev));
return ret;
}
max9611->shunt_resistor_uohm = of_shunt;
@@ -545,13 +543,13 @@ static int max9611_probe(struct i2c_client *client,
if (ret)
return ret;
- indio_dev->name = of_id->data;
+ indio_dev->name = device_get_match_data(dev);
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &indio_info;
indio_dev->channels = max9611_channels;
indio_dev->num_channels = ARRAY_SIZE(max9611_channels);
- return devm_iio_device_register(&client->dev, indio_dev);
+ return devm_iio_device_register(dev, indio_dev);
}
static struct i2c_driver max9611_driver = {
diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c
index e573da5397bb..13535f148c4c 100644
--- a/drivers/iio/adc/mcp3911.c
+++ b/drivers/iio/adc/mcp3911.c
@@ -10,6 +10,8 @@
#include <linux/err.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
@@ -200,12 +202,13 @@ static const struct iio_info mcp3911_info = {
.write_raw = mcp3911_write_raw,
};
-static int mcp3911_config(struct mcp3911 *adc, struct device_node *of_node)
+static int mcp3911_config(struct mcp3911 *adc)
{
+ struct device *dev = &adc->spi->dev;
u32 configreg;
int ret;
- of_property_read_u32(of_node, "device-addr", &adc->dev_addr);
+ device_property_read_u32(dev, "device-addr", &adc->dev_addr);
if (adc->dev_addr > 3) {
dev_err(&adc->spi->dev,
"invalid device address (%i). Must be in range 0-3.\n",
@@ -289,7 +292,7 @@ static int mcp3911_probe(struct spi_device *spi)
}
}
- ret = mcp3911_config(adc, spi->dev.of_node);
+ ret = mcp3911_config(adc);
if (ret)
goto clk_disable;
diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
index a48895046408..727ea6c68049 100644
--- a/drivers/iio/adc/rcar-gyroadc.c
+++ b/drivers/iio/adc/rcar-gyroadc.c
@@ -511,8 +511,7 @@ static int rcar_gyroadc_probe(struct platform_device *pdev)
if (ret)
return ret;
- priv->model = (enum rcar_gyroadc_model)
- of_device_get_match_data(&pdev->dev);
+ priv->model = (uintptr_t)of_device_get_match_data(&pdev->dev);
platform_set_drvdata(pdev, indio_dev);
diff --git a/drivers/iio/adc/rzg2l_adc.c b/drivers/iio/adc/rzg2l_adc.c
index 32fbf57c362f..9d5be52bd948 100644
--- a/drivers/iio/adc/rzg2l_adc.c
+++ b/drivers/iio/adc/rzg2l_adc.c
@@ -506,10 +506,8 @@ static int rzg2l_adc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "no irq resource\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(dev, irq, rzg2l_adc_isr,
0, dev_name(dev), adc);
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 8cd258cb2682..897166d9e45c 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -2025,7 +2025,8 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev,
if (strlen(name) >= STM32_ADC_CH_SZ) {
dev_err(&indio_dev->dev, "Label %s exceeds %d characters\n",
name, STM32_ADC_CH_SZ);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
strncpy(adc->chan_name[val], name, STM32_ADC_CH_SZ);
ret = stm32_adc_populate_int_ch(indio_dev, name, val);
diff --git a/drivers/iio/adc/stmpe-adc.c b/drivers/iio/adc/stmpe-adc.c
index fba659bfdb40..d2d405388499 100644
--- a/drivers/iio/adc/stmpe-adc.c
+++ b/drivers/iio/adc/stmpe-adc.c
@@ -256,6 +256,7 @@ static int stmpe_adc_probe(struct platform_device *pdev)
struct stmpe_adc *info;
struct device_node *np;
u32 norequest_mask = 0;
+ unsigned long bits;
int irq_temp, irq_adc;
int num_chan = 0;
int i = 0;
@@ -309,8 +310,8 @@ static int stmpe_adc_probe(struct platform_device *pdev)
of_property_read_u32(np, "st,norequest-mask", &norequest_mask);
- for_each_clear_bit(i, (unsigned long *) &norequest_mask,
- (STMPE_ADC_LAST_NR + 1)) {
+ bits = norequest_mask;
+ for_each_clear_bit(i, &bits, (STMPE_ADC_LAST_NR + 1)) {
stmpe_adc_voltage_chan(&info->stmpe_adc_iio_channels[num_chan], i);
num_chan++;
}
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index 16fc608db36a..bd48b073e720 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -19,6 +19,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -156,13 +157,16 @@ static int adc081c_probe(struct i2c_client *client,
{
struct iio_dev *iio;
struct adc081c *adc;
- struct adcxx1c_model *model;
+ const struct adcxx1c_model *model;
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -EOPNOTSUPP;
- model = &adcxx1c_models[id->driver_data];
+ if (dev_fwnode(&client->dev))
+ model = device_get_match_data(&client->dev);
+ else
+ model = &adcxx1c_models[id->driver_data];
iio = devm_iio_device_alloc(&client->dev, sizeof(*adc));
if (!iio)
@@ -210,10 +214,17 @@ static const struct i2c_device_id adc081c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, adc081c_id);
+static const struct acpi_device_id adc081c_acpi_match[] = {
+ /* Used on some AAEON boards */
+ { "ADC081C", (kernel_ulong_t)&adcxx1c_models[ADC081C] },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, adc081c_acpi_match);
+
static const struct of_device_id adc081c_of_match[] = {
- { .compatible = "ti,adc081c" },
- { .compatible = "ti,adc101c" },
- { .compatible = "ti,adc121c" },
+ { .compatible = "ti,adc081c", .data = &adcxx1c_models[ADC081C] },
+ { .compatible = "ti,adc101c", .data = &adcxx1c_models[ADC101C] },
+ { .compatible = "ti,adc121c", .data = &adcxx1c_models[ADC121C] },
{ }
};
MODULE_DEVICE_TABLE(of, adc081c_of_match);
@@ -222,6 +233,7 @@ static struct i2c_driver adc081c_driver = {
.driver = {
.name = "adc081c",
.of_match_table = adc081c_of_match,
+ .acpi_match_table = adc081c_acpi_match,
},
.probe = adc081c_probe,
.id_table = adc081c_id,
diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c
index fcd5d39dd03e..6eb62b564dae 100644
--- a/drivers/iio/adc/ti-adc12138.c
+++ b/drivers/iio/adc/ti-adc12138.c
@@ -11,6 +11,7 @@
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/clk.h>
+#include <linux/property.h>
#include <linux/spi/spi.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -239,7 +240,8 @@ static int adc12138_read_raw(struct iio_dev *iio,
if (ret)
return ret;
- *value = sign_extend32(be16_to_cpu(data) >> 3, 12);
+ *value = sign_extend32(be16_to_cpu(data) >> channel->scan_type.shift,
+ channel->scan_type.realbits - 1);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
@@ -429,8 +431,8 @@ static int adc12138_probe(struct spi_device *spi)
return -EINVAL;
}
- ret = of_property_read_u32(spi->dev.of_node, "ti,acquisition-time",
- &adc->acquisition_time);
+ ret = device_property_read_u32(&spi->dev, "ti,acquisition-time",
+ &adc->acquisition_time);
if (ret)
adc->acquisition_time = 10;
@@ -516,8 +518,6 @@ static int adc12138_remove(struct spi_device *spi)
return 0;
}
-#ifdef CONFIG_OF
-
static const struct of_device_id adc12138_dt_ids[] = {
{ .compatible = "ti,adc12130", },
{ .compatible = "ti,adc12132", },
@@ -526,8 +526,6 @@ static const struct of_device_id adc12138_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, adc12138_dt_ids);
-#endif
-
static const struct spi_device_id adc12138_id[] = {
{ "adc12130", adc12130 },
{ "adc12132", adc12132 },
@@ -539,7 +537,7 @@ MODULE_DEVICE_TABLE(spi, adc12138_id);
static struct spi_driver adc12138_driver = {
.driver = {
.name = "adc12138",
- .of_match_table = of_match_ptr(adc12138_dt_ids),
+ .of_match_table = adc12138_dt_ids,
},
.probe = adc12138_probe,
.remove = adc12138_remove,
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index b0352e91ac16..068efbce1710 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -464,9 +464,7 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
mutex_lock(&data->lock);
switch (mask) {
- case IIO_CHAN_INFO_RAW: {
- int shift = chan->scan_type.shift;
-
+ case IIO_CHAN_INFO_RAW:
ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
break;
@@ -487,7 +485,8 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
goto release_direct;
}
- *val = sign_extend32(*val >> shift, 15 - shift);
+ *val = sign_extend32(*val >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
ret = ads1015_set_power_state(data, false);
if (ret < 0)
@@ -497,7 +496,6 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
release_direct:
iio_device_release_direct_mode(indio_dev);
break;
- }
case IIO_CHAN_INFO_SCALE:
idx = data->channel_data[chan->address].pga;
*val = ads1015_fullscale_range[idx];
@@ -952,7 +950,7 @@ static int ads1015_probe(struct i2c_client *client,
indio_dev->name = ADS1015_DRV_NAME;
indio_dev->modes = INDIO_DIRECT_MODE;
- chip = (enum chip_ids)device_get_match_data(&client->dev);
+ chip = (uintptr_t)device_get_match_data(&client->dev);
if (chip == ADSXXXX)
chip = id->driver_data;
switch (chip) {
diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c
index 17d0da5877a9..767b3b634809 100644
--- a/drivers/iio/adc/ti-ads124s08.c
+++ b/drivers/iio/adc/ti-ads124s08.c
@@ -8,8 +8,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 79c803537dc4..2e24717d7f55 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -281,12 +281,10 @@ static int ads8688_write_reg_range(struct iio_dev *indio_dev,
enum ads8688_range range)
{
unsigned int tmp;
- int ret;
tmp = ADS8688_PROG_REG_RANGE_CH(chan->channel);
- ret = ads8688_prog_write(indio_dev, tmp, range);
- return ret;
+ return ads8688_prog_write(indio_dev, tmp, range);
}
static int ads8688_write_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
new file mode 100644
index 000000000000..8343c5f74121
--- /dev/null
+++ b/drivers/iio/adc/xilinx-ams.c
@@ -0,0 +1,1451 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx AMS driver
+ *
+ * Copyright (C) 2021 Xilinx, Inc.
+ *
+ * Manish Narani <mnarani@xilinx.com>
+ * Rajnikant Bhojani <rajnikant.bhojani@xilinx.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+
+/* AMS registers definitions */
+#define AMS_ISR_0 0x010
+#define AMS_ISR_1 0x014
+#define AMS_IER_0 0x020
+#define AMS_IER_1 0x024
+#define AMS_IDR_0 0x028
+#define AMS_IDR_1 0x02C
+#define AMS_PS_CSTS 0x040
+#define AMS_PL_CSTS 0x044
+
+#define AMS_VCC_PSPLL0 0x060
+#define AMS_VCC_PSPLL3 0x06C
+#define AMS_VCCINT 0x078
+#define AMS_VCCBRAM 0x07C
+#define AMS_VCCAUX 0x080
+#define AMS_PSDDRPLL 0x084
+#define AMS_PSINTFPDDR 0x09C
+
+#define AMS_VCC_PSPLL0_CH 48
+#define AMS_VCC_PSPLL3_CH 51
+#define AMS_VCCINT_CH 54
+#define AMS_VCCBRAM_CH 55
+#define AMS_VCCAUX_CH 56
+#define AMS_PSDDRPLL_CH 57
+#define AMS_PSINTFPDDR_CH 63
+
+#define AMS_REG_CONFIG0 0x100
+#define AMS_REG_CONFIG1 0x104
+#define AMS_REG_CONFIG3 0x10C
+#define AMS_REG_CONFIG4 0x110
+#define AMS_REG_SEQ_CH0 0x120
+#define AMS_REG_SEQ_CH1 0x124
+#define AMS_REG_SEQ_CH2 0x118
+
+#define AMS_VUSER0_MASK BIT(0)
+#define AMS_VUSER1_MASK BIT(1)
+#define AMS_VUSER2_MASK BIT(2)
+#define AMS_VUSER3_MASK BIT(3)
+
+#define AMS_TEMP 0x000
+#define AMS_SUPPLY1 0x004
+#define AMS_SUPPLY2 0x008
+#define AMS_VP_VN 0x00C
+#define AMS_VREFP 0x010
+#define AMS_VREFN 0x014
+#define AMS_SUPPLY3 0x018
+#define AMS_SUPPLY4 0x034
+#define AMS_SUPPLY5 0x038
+#define AMS_SUPPLY6 0x03C
+#define AMS_SUPPLY7 0x200
+#define AMS_SUPPLY8 0x204
+#define AMS_SUPPLY9 0x208
+#define AMS_SUPPLY10 0x20C
+#define AMS_VCCAMS 0x210
+#define AMS_TEMP_REMOTE 0x214
+
+#define AMS_REG_VAUX(x) (0x40 + 4 * (x))
+
+#define AMS_PS_RESET_VALUE 0xFFFF
+#define AMS_PL_RESET_VALUE 0xFFFF
+
+#define AMS_CONF0_CHANNEL_NUM_MASK GENMASK(6, 0)
+
+#define AMS_CONF1_SEQ_MASK GENMASK(15, 12)
+#define AMS_CONF1_SEQ_DEFAULT FIELD_PREP(AMS_CONF1_SEQ_MASK, 0)
+#define AMS_CONF1_SEQ_CONTINUOUS FIELD_PREP(AMS_CONF1_SEQ_MASK, 1)
+#define AMS_CONF1_SEQ_SINGLE_CHANNEL FIELD_PREP(AMS_CONF1_SEQ_MASK, 2)
+
+#define AMS_REG_SEQ0_MASK GENMASK(15, 0)
+#define AMS_REG_SEQ2_MASK GENMASK(21, 16)
+#define AMS_REG_SEQ1_MASK GENMASK_ULL(37, 22)
+
+#define AMS_PS_SEQ_MASK GENMASK(21, 0)
+#define AMS_PL_SEQ_MASK GENMASK_ULL(59, 22)
+
+#define AMS_ALARM_TEMP 0x140
+#define AMS_ALARM_SUPPLY1 0x144
+#define AMS_ALARM_SUPPLY2 0x148
+#define AMS_ALARM_SUPPLY3 0x160
+#define AMS_ALARM_SUPPLY4 0x164
+#define AMS_ALARM_SUPPLY5 0x168
+#define AMS_ALARM_SUPPLY6 0x16C
+#define AMS_ALARM_SUPPLY7 0x180
+#define AMS_ALARM_SUPPLY8 0x184
+#define AMS_ALARM_SUPPLY9 0x188
+#define AMS_ALARM_SUPPLY10 0x18C
+#define AMS_ALARM_VCCAMS 0x190
+#define AMS_ALARM_TEMP_REMOTE 0x194
+#define AMS_ALARM_THRESHOLD_OFF_10 0x10
+#define AMS_ALARM_THRESHOLD_OFF_20 0x20
+
+#define AMS_ALARM_THR_DIRECT_MASK BIT(1)
+#define AMS_ALARM_THR_MIN 0x0000
+#define AMS_ALARM_THR_MAX (BIT(16) - 1)
+
+#define AMS_ALARM_MASK GENMASK_ULL(63, 0)
+#define AMS_NO_OF_ALARMS 32
+#define AMS_PL_ALARM_START 16
+#define AMS_PL_ALARM_MASK GENMASK(31, 16)
+#define AMS_ISR0_ALARM_MASK GENMASK(31, 0)
+#define AMS_ISR1_ALARM_MASK (GENMASK(31, 29) | GENMASK(4, 0))
+#define AMS_ISR1_EOC_MASK BIT(3)
+#define AMS_ISR1_INTR_MASK GENMASK_ULL(63, 32)
+#define AMS_ISR0_ALARM_2_TO_0_MASK GENMASK(2, 0)
+#define AMS_ISR0_ALARM_6_TO_3_MASK GENMASK(6, 3)
+#define AMS_ISR0_ALARM_12_TO_7_MASK GENMASK(13, 8)
+#define AMS_CONF1_ALARM_2_TO_0_MASK GENMASK(3, 1)
+#define AMS_CONF1_ALARM_6_TO_3_MASK GENMASK(11, 8)
+#define AMS_CONF1_ALARM_12_TO_7_MASK GENMASK(5, 0)
+#define AMS_REGCFG1_ALARM_MASK \
+ (AMS_CONF1_ALARM_2_TO_0_MASK | AMS_CONF1_ALARM_6_TO_3_MASK | BIT(0))
+#define AMS_REGCFG3_ALARM_MASK AMS_CONF1_ALARM_12_TO_7_MASK
+
+#define AMS_PS_CSTS_PS_READY (BIT(27) | BIT(16))
+#define AMS_PL_CSTS_ACCESS_MASK BIT(1)
+
+#define AMS_PL_MAX_FIXED_CHANNEL 10
+#define AMS_PL_MAX_EXT_CHANNEL 20
+
+#define AMS_INIT_POLL_TIME_US 200
+#define AMS_INIT_TIMEOUT_US 10000
+#define AMS_UNMASK_TIMEOUT_MS 500
+
+/*
+ * Following scale and offset value is derived from
+ * UG580 (v1.7) December 20, 2016
+ */
+#define AMS_SUPPLY_SCALE_1VOLT_mV 1000
+#define AMS_SUPPLY_SCALE_3VOLT_mV 3000
+#define AMS_SUPPLY_SCALE_6VOLT_mV 6000
+#define AMS_SUPPLY_SCALE_DIV_BIT 16
+
+#define AMS_TEMP_SCALE 509314
+#define AMS_TEMP_SCALE_DIV_BIT 16
+#define AMS_TEMP_OFFSET -((280230LL << 16) / 509314)
+
+enum ams_alarm_bit {
+ AMS_ALARM_BIT_TEMP = 0,
+ AMS_ALARM_BIT_SUPPLY1 = 1,
+ AMS_ALARM_BIT_SUPPLY2 = 2,
+ AMS_ALARM_BIT_SUPPLY3 = 3,
+ AMS_ALARM_BIT_SUPPLY4 = 4,
+ AMS_ALARM_BIT_SUPPLY5 = 5,
+ AMS_ALARM_BIT_SUPPLY6 = 6,
+ AMS_ALARM_BIT_RESERVED = 7,
+ AMS_ALARM_BIT_SUPPLY7 = 8,
+ AMS_ALARM_BIT_SUPPLY8 = 9,
+ AMS_ALARM_BIT_SUPPLY9 = 10,
+ AMS_ALARM_BIT_SUPPLY10 = 11,
+ AMS_ALARM_BIT_VCCAMS = 12,
+ AMS_ALARM_BIT_TEMP_REMOTE = 13,
+};
+
+enum ams_seq {
+ AMS_SEQ_VCC_PSPLL = 0,
+ AMS_SEQ_VCC_PSBATT = 1,
+ AMS_SEQ_VCCINT = 2,
+ AMS_SEQ_VCCBRAM = 3,
+ AMS_SEQ_VCCAUX = 4,
+ AMS_SEQ_PSDDRPLL = 5,
+ AMS_SEQ_INTDDR = 6,
+};
+
+enum ams_ps_pl_seq {
+ AMS_SEQ_CALIB = 0,
+ AMS_SEQ_RSVD_1 = 1,
+ AMS_SEQ_RSVD_2 = 2,
+ AMS_SEQ_TEST = 3,
+ AMS_SEQ_RSVD_4 = 4,
+ AMS_SEQ_SUPPLY4 = 5,
+ AMS_SEQ_SUPPLY5 = 6,
+ AMS_SEQ_SUPPLY6 = 7,
+ AMS_SEQ_TEMP = 8,
+ AMS_SEQ_SUPPLY2 = 9,
+ AMS_SEQ_SUPPLY1 = 10,
+ AMS_SEQ_VP_VN = 11,
+ AMS_SEQ_VREFP = 12,
+ AMS_SEQ_VREFN = 13,
+ AMS_SEQ_SUPPLY3 = 14,
+ AMS_SEQ_CURRENT_MON = 15,
+ AMS_SEQ_SUPPLY7 = 16,
+ AMS_SEQ_SUPPLY8 = 17,
+ AMS_SEQ_SUPPLY9 = 18,
+ AMS_SEQ_SUPPLY10 = 19,
+ AMS_SEQ_VCCAMS = 20,
+ AMS_SEQ_TEMP_REMOTE = 21,
+ AMS_SEQ_MAX = 22
+};
+
+#define AMS_PS_SEQ_MAX AMS_SEQ_MAX
+#define AMS_SEQ(x) (AMS_SEQ_MAX + (x))
+#define PS_SEQ(x) (x)
+#define PL_SEQ(x) (AMS_PS_SEQ_MAX + (x))
+#define AMS_CTRL_SEQ_BASE (AMS_PS_SEQ_MAX * 3)
+
+#define AMS_CHAN_TEMP(_scan_index, _addr) { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .address = (_addr), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .event_spec = ams_temp_events, \
+ .scan_index = _scan_index, \
+ .num_event_specs = ARRAY_SIZE(ams_temp_events), \
+}
+
+#define AMS_CHAN_VOLTAGE(_scan_index, _addr, _alarm) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .address = (_addr), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .event_spec = (_alarm) ? ams_voltage_events : NULL, \
+ .scan_index = _scan_index, \
+ .num_event_specs = (_alarm) ? ARRAY_SIZE(ams_voltage_events) : 0, \
+}
+
+#define AMS_PS_CHAN_TEMP(_scan_index, _addr) \
+ AMS_CHAN_TEMP(PS_SEQ(_scan_index), _addr)
+#define AMS_PS_CHAN_VOLTAGE(_scan_index, _addr) \
+ AMS_CHAN_VOLTAGE(PS_SEQ(_scan_index), _addr, true)
+
+#define AMS_PL_CHAN_TEMP(_scan_index, _addr) \
+ AMS_CHAN_TEMP(PL_SEQ(_scan_index), _addr)
+#define AMS_PL_CHAN_VOLTAGE(_scan_index, _addr, _alarm) \
+ AMS_CHAN_VOLTAGE(PL_SEQ(_scan_index), _addr, _alarm)
+#define AMS_PL_AUX_CHAN_VOLTAGE(_auxno) \
+ AMS_CHAN_VOLTAGE(PL_SEQ(AMS_SEQ(_auxno)), AMS_REG_VAUX(_auxno), false)
+#define AMS_CTRL_CHAN_VOLTAGE(_scan_index, _addr) \
+ AMS_CHAN_VOLTAGE(PL_SEQ(AMS_SEQ(AMS_SEQ(_scan_index))), _addr, false)
+
+/**
+ * struct ams - This structure contains necessary state for xilinx-ams to operate
+ * @base: physical base address of device
+ * @ps_base: physical base address of PS device
+ * @pl_base: physical base address of PL device
+ * @clk: clocks associated with the device
+ * @dev: pointer to device struct
+ * @lock: to handle multiple user interaction
+ * @intr_lock: to protect interrupt mask values
+ * @alarm_mask: alarm configuration
+ * @current_masked_alarm: currently masked due to alarm
+ * @intr_mask: interrupt configuration
+ * @ams_unmask_work: re-enables event once the event condition disappears
+ *
+ */
+struct ams {
+ void __iomem *base;
+ void __iomem *ps_base;
+ void __iomem *pl_base;
+ struct clk *clk;
+ struct device *dev;
+ struct mutex lock;
+ spinlock_t intr_lock;
+ unsigned int alarm_mask;
+ unsigned int current_masked_alarm;
+ u64 intr_mask;
+ struct delayed_work ams_unmask_work;
+};
+
+static inline void ams_ps_update_reg(struct ams *ams, unsigned int offset,
+ u32 mask, u32 data)
+{
+ u32 val, regval;
+
+ val = readl(ams->ps_base + offset);
+ regval = (val & ~mask) | (data & mask);
+ writel(regval, ams->ps_base + offset);
+}
+
+static inline void ams_pl_update_reg(struct ams *ams, unsigned int offset,
+ u32 mask, u32 data)
+{
+ u32 val, regval;
+
+ val = readl(ams->pl_base + offset);
+ regval = (val & ~mask) | (data & mask);
+ writel(regval, ams->pl_base + offset);
+}
+
+static void ams_update_intrmask(struct ams *ams, u64 mask, u64 val)
+{
+ u32 regval;
+
+ ams->intr_mask = (ams->intr_mask & ~mask) | (val & mask);
+
+ regval = ~(ams->intr_mask | ams->current_masked_alarm);
+ writel(regval, ams->base + AMS_IER_0);
+
+ regval = ~(FIELD_GET(AMS_ISR1_INTR_MASK, ams->intr_mask));
+ writel(regval, ams->base + AMS_IER_1);
+
+ regval = ams->intr_mask | ams->current_masked_alarm;
+ writel(regval, ams->base + AMS_IDR_0);
+
+ regval = FIELD_GET(AMS_ISR1_INTR_MASK, ams->intr_mask);
+ writel(regval, ams->base + AMS_IDR_1);
+}
+
+static void ams_disable_all_alarms(struct ams *ams)
+{
+ /* disable PS module alarm */
+ if (ams->ps_base) {
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK,
+ AMS_REGCFG1_ALARM_MASK);
+ ams_ps_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK,
+ AMS_REGCFG3_ALARM_MASK);
+ }
+
+ /* disable PL module alarm */
+ if (ams->pl_base) {
+ ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK,
+ AMS_REGCFG1_ALARM_MASK);
+ ams_pl_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK,
+ AMS_REGCFG3_ALARM_MASK);
+ }
+}
+
+static void ams_update_ps_alarm(struct ams *ams, unsigned long alarm_mask)
+{
+ u32 cfg;
+ u32 val;
+
+ val = FIELD_GET(AMS_ISR0_ALARM_2_TO_0_MASK, alarm_mask);
+ cfg = ~(FIELD_PREP(AMS_CONF1_ALARM_2_TO_0_MASK, val));
+
+ val = FIELD_GET(AMS_ISR0_ALARM_6_TO_3_MASK, alarm_mask);
+ cfg &= ~(FIELD_PREP(AMS_CONF1_ALARM_6_TO_3_MASK, val));
+
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK, cfg);
+
+ val = FIELD_GET(AMS_ISR0_ALARM_12_TO_7_MASK, alarm_mask);
+ cfg = ~(FIELD_PREP(AMS_CONF1_ALARM_12_TO_7_MASK, val));
+ ams_ps_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK, cfg);
+}
+
+static void ams_update_pl_alarm(struct ams *ams, unsigned long alarm_mask)
+{
+ unsigned long pl_alarm_mask;
+ u32 cfg;
+ u32 val;
+
+ pl_alarm_mask = FIELD_GET(AMS_PL_ALARM_MASK, alarm_mask);
+
+ val = FIELD_GET(AMS_ISR0_ALARM_2_TO_0_MASK, pl_alarm_mask);
+ cfg = ~(FIELD_PREP(AMS_CONF1_ALARM_2_TO_0_MASK, val));
+
+ val = FIELD_GET(AMS_ISR0_ALARM_6_TO_3_MASK, pl_alarm_mask);
+ cfg &= ~(FIELD_PREP(AMS_CONF1_ALARM_6_TO_3_MASK, val));
+
+ ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK, cfg);
+
+ val = FIELD_GET(AMS_ISR0_ALARM_12_TO_7_MASK, pl_alarm_mask);
+ cfg = ~(FIELD_PREP(AMS_CONF1_ALARM_12_TO_7_MASK, val));
+ ams_pl_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK, cfg);
+}
+
+static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask)
+{
+ unsigned long flags;
+
+ if (ams->ps_base)
+ ams_update_ps_alarm(ams, alarm_mask);
+
+ if (ams->pl_base)
+ ams_update_pl_alarm(ams, alarm_mask);
+
+ spin_lock_irqsave(&ams->intr_lock, flags);
+ ams_update_intrmask(ams, AMS_ISR0_ALARM_MASK, ~alarm_mask);
+ spin_unlock_irqrestore(&ams->intr_lock, flags);
+}
+
+static void ams_enable_channel_sequence(struct iio_dev *indio_dev)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ unsigned long long scan_mask;
+ int i;
+ u32 regval;
+
+ /*
+ * Enable channel sequence. First 22 bits of scan_mask represent
+ * PS channels, and next remaining bits represent PL channels.
+ */
+
+ /* Run calibration of PS & PL as part of the sequence */
+ scan_mask = BIT(0) | BIT(AMS_PS_SEQ_MAX);
+ for (i = 0; i < indio_dev->num_channels; i++)
+ scan_mask |= BIT_ULL(indio_dev->channels[i].scan_index);
+
+ if (ams->ps_base) {
+ /* put sysmon in a soft reset to change the sequence */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_DEFAULT);
+
+ /* configure basic channels */
+ regval = FIELD_GET(AMS_REG_SEQ0_MASK, scan_mask);
+ writel(regval, ams->ps_base + AMS_REG_SEQ_CH0);
+
+ regval = FIELD_GET(AMS_REG_SEQ2_MASK, scan_mask);
+ writel(regval, ams->ps_base + AMS_REG_SEQ_CH2);
+
+ /* set continuous sequence mode */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_CONTINUOUS);
+ }
+
+ if (ams->pl_base) {
+ /* put sysmon in a soft reset to change the sequence */
+ ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_DEFAULT);
+
+ /* configure basic channels */
+ scan_mask = FIELD_GET(AMS_PL_SEQ_MASK, scan_mask);
+
+ regval = FIELD_GET(AMS_REG_SEQ0_MASK, scan_mask);
+ writel(regval, ams->pl_base + AMS_REG_SEQ_CH0);
+
+ regval = FIELD_GET(AMS_REG_SEQ1_MASK, scan_mask);
+ writel(regval, ams->pl_base + AMS_REG_SEQ_CH1);
+
+ regval = FIELD_GET(AMS_REG_SEQ2_MASK, scan_mask);
+ writel(regval, ams->pl_base + AMS_REG_SEQ_CH2);
+
+ /* set continuous sequence mode */
+ ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_CONTINUOUS);
+ }
+}
+
+static int ams_init_device(struct ams *ams)
+{
+ u32 expect = AMS_PS_CSTS_PS_READY;
+ u32 reg, value;
+ int ret;
+
+ /* reset AMS */
+ if (ams->ps_base) {
+ writel(AMS_PS_RESET_VALUE, ams->ps_base + AMS_VP_VN);
+
+ ret = readl_poll_timeout(ams->base + AMS_PS_CSTS, reg, (reg & expect),
+ AMS_INIT_POLL_TIME_US, AMS_INIT_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ /* put sysmon in a default state */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_DEFAULT);
+ }
+
+ if (ams->pl_base) {
+ value = readl(ams->base + AMS_PL_CSTS);
+ if (value == 0)
+ return 0;
+
+ writel(AMS_PL_RESET_VALUE, ams->pl_base + AMS_VP_VN);
+
+ /* put sysmon in a default state */
+ ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_DEFAULT);
+ }
+
+ ams_disable_all_alarms(ams);
+
+ /* Disable interrupt */
+ ams_update_intrmask(ams, AMS_ALARM_MASK, AMS_ALARM_MASK);
+
+ /* Clear any pending interrupt */
+ writel(AMS_ISR0_ALARM_MASK, ams->base + AMS_ISR_0);
+ writel(AMS_ISR1_ALARM_MASK, ams->base + AMS_ISR_1);
+
+ return 0;
+}
+
+static int ams_enable_single_channel(struct ams *ams, unsigned int offset)
+{
+ u8 channel_num;
+
+ switch (offset) {
+ case AMS_VCC_PSPLL0:
+ channel_num = AMS_VCC_PSPLL0_CH;
+ break;
+ case AMS_VCC_PSPLL3:
+ channel_num = AMS_VCC_PSPLL3_CH;
+ break;
+ case AMS_VCCINT:
+ channel_num = AMS_VCCINT_CH;
+ break;
+ case AMS_VCCBRAM:
+ channel_num = AMS_VCCBRAM_CH;
+ break;
+ case AMS_VCCAUX:
+ channel_num = AMS_VCCAUX_CH;
+ break;
+ case AMS_PSDDRPLL:
+ channel_num = AMS_PSDDRPLL_CH;
+ break;
+ case AMS_PSINTFPDDR:
+ channel_num = AMS_PSINTFPDDR_CH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set single channel, sequencer off mode */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_SINGLE_CHANNEL);
+
+ /* write the channel number */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG0, AMS_CONF0_CHANNEL_NUM_MASK,
+ channel_num);
+
+ return 0;
+}
+
+static int ams_read_vcc_reg(struct ams *ams, unsigned int offset, u32 *data)
+{
+ u32 expect = AMS_ISR1_EOC_MASK;
+ u32 reg;
+ int ret;
+
+ ret = ams_enable_single_channel(ams, offset);
+ if (ret)
+ return ret;
+
+ ret = readl_poll_timeout(ams->base + AMS_ISR_1, reg, (reg & expect),
+ AMS_INIT_POLL_TIME_US, AMS_INIT_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ *data = readl(ams->base + offset);
+
+ return 0;
+}
+
+static int ams_get_ps_scale(int address)
+{
+ int val;
+
+ switch (address) {
+ case AMS_SUPPLY1:
+ case AMS_SUPPLY2:
+ case AMS_SUPPLY3:
+ case AMS_SUPPLY4:
+ case AMS_SUPPLY9:
+ case AMS_SUPPLY10:
+ case AMS_VCCAMS:
+ val = AMS_SUPPLY_SCALE_3VOLT_mV;
+ break;
+ case AMS_SUPPLY5:
+ case AMS_SUPPLY6:
+ case AMS_SUPPLY7:
+ case AMS_SUPPLY8:
+ val = AMS_SUPPLY_SCALE_6VOLT_mV;
+ break;
+ default:
+ val = AMS_SUPPLY_SCALE_1VOLT_mV;
+ break;
+ }
+
+ return val;
+}
+
+static int ams_get_pl_scale(struct ams *ams, int address)
+{
+ int val, regval;
+
+ switch (address) {
+ case AMS_SUPPLY1:
+ case AMS_SUPPLY2:
+ case AMS_SUPPLY3:
+ case AMS_SUPPLY4:
+ case AMS_SUPPLY5:
+ case AMS_SUPPLY6:
+ case AMS_VCCAMS:
+ case AMS_VREFP:
+ case AMS_VREFN:
+ val = AMS_SUPPLY_SCALE_3VOLT_mV;
+ break;
+ case AMS_SUPPLY7:
+ regval = readl(ams->pl_base + AMS_REG_CONFIG4);
+ if (FIELD_GET(AMS_VUSER0_MASK, regval))
+ val = AMS_SUPPLY_SCALE_6VOLT_mV;
+ else
+ val = AMS_SUPPLY_SCALE_3VOLT_mV;
+ break;
+ case AMS_SUPPLY8:
+ regval = readl(ams->pl_base + AMS_REG_CONFIG4);
+ if (FIELD_GET(AMS_VUSER1_MASK, regval))
+ val = AMS_SUPPLY_SCALE_6VOLT_mV;
+ else
+ val = AMS_SUPPLY_SCALE_3VOLT_mV;
+ break;
+ case AMS_SUPPLY9:
+ regval = readl(ams->pl_base + AMS_REG_CONFIG4);
+ if (FIELD_GET(AMS_VUSER2_MASK, regval))
+ val = AMS_SUPPLY_SCALE_6VOLT_mV;
+ else
+ val = AMS_SUPPLY_SCALE_3VOLT_mV;
+ break;
+ case AMS_SUPPLY10:
+ regval = readl(ams->pl_base + AMS_REG_CONFIG4);
+ if (FIELD_GET(AMS_VUSER3_MASK, regval))
+ val = AMS_SUPPLY_SCALE_6VOLT_mV;
+ else
+ val = AMS_SUPPLY_SCALE_3VOLT_mV;
+ break;
+ case AMS_VP_VN:
+ case AMS_REG_VAUX(0) ... AMS_REG_VAUX(15):
+ val = AMS_SUPPLY_SCALE_1VOLT_mV;
+ break;
+ default:
+ val = AMS_SUPPLY_SCALE_1VOLT_mV;
+ break;
+ }
+
+ return val;
+}
+
+static int ams_get_ctrl_scale(int address)
+{
+ int val;
+
+ switch (address) {
+ case AMS_VCC_PSPLL0:
+ case AMS_VCC_PSPLL3:
+ case AMS_VCCINT:
+ case AMS_VCCBRAM:
+ case AMS_VCCAUX:
+ case AMS_PSDDRPLL:
+ case AMS_PSINTFPDDR:
+ val = AMS_SUPPLY_SCALE_3VOLT_mV;
+ break;
+ default:
+ val = AMS_SUPPLY_SCALE_1VOLT_mV;
+ break;
+ }
+
+ return val;
+}
+
+static int ams_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&ams->lock);
+ if (chan->scan_index >= AMS_CTRL_SEQ_BASE) {
+ ret = ams_read_vcc_reg(ams, chan->address, val);
+ if (ret)
+ goto unlock_mutex;
+ ams_enable_channel_sequence(indio_dev);
+ } else if (chan->scan_index >= AMS_PS_SEQ_MAX)
+ *val = readl(ams->pl_base + chan->address);
+ else
+ *val = readl(ams->ps_base + chan->address);
+
+ ret = IIO_VAL_INT;
+unlock_mutex:
+ mutex_unlock(&ams->lock);
+ return ret;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chan->scan_index < AMS_PS_SEQ_MAX)
+ *val = ams_get_ps_scale(chan->address);
+ else if (chan->scan_index >= AMS_PS_SEQ_MAX &&
+ chan->scan_index < AMS_CTRL_SEQ_BASE)
+ *val = ams_get_pl_scale(ams, chan->address);
+ else
+ *val = ams_get_ctrl_scale(chan->address);
+
+ *val2 = AMS_SUPPLY_SCALE_DIV_BIT;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_TEMP:
+ *val = AMS_TEMP_SCALE;
+ *val2 = AMS_TEMP_SCALE_DIV_BIT;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ /* Only the temperature channel has an offset */
+ *val = AMS_TEMP_OFFSET;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ams_get_alarm_offset(int scan_index, enum iio_event_direction dir)
+{
+ int offset;
+
+ if (scan_index >= AMS_PS_SEQ_MAX)
+ scan_index -= AMS_PS_SEQ_MAX;
+
+ if (dir == IIO_EV_DIR_FALLING) {
+ if (scan_index < AMS_SEQ_SUPPLY7)
+ offset = AMS_ALARM_THRESHOLD_OFF_10;
+ else
+ offset = AMS_ALARM_THRESHOLD_OFF_20;
+ } else {
+ offset = 0;
+ }
+
+ switch (scan_index) {
+ case AMS_SEQ_TEMP:
+ return AMS_ALARM_TEMP + offset;
+ case AMS_SEQ_SUPPLY1:
+ return AMS_ALARM_SUPPLY1 + offset;
+ case AMS_SEQ_SUPPLY2:
+ return AMS_ALARM_SUPPLY2 + offset;
+ case AMS_SEQ_SUPPLY3:
+ return AMS_ALARM_SUPPLY3 + offset;
+ case AMS_SEQ_SUPPLY4:
+ return AMS_ALARM_SUPPLY4 + offset;
+ case AMS_SEQ_SUPPLY5:
+ return AMS_ALARM_SUPPLY5 + offset;
+ case AMS_SEQ_SUPPLY6:
+ return AMS_ALARM_SUPPLY6 + offset;
+ case AMS_SEQ_SUPPLY7:
+ return AMS_ALARM_SUPPLY7 + offset;
+ case AMS_SEQ_SUPPLY8:
+ return AMS_ALARM_SUPPLY8 + offset;
+ case AMS_SEQ_SUPPLY9:
+ return AMS_ALARM_SUPPLY9 + offset;
+ case AMS_SEQ_SUPPLY10:
+ return AMS_ALARM_SUPPLY10 + offset;
+ case AMS_SEQ_VCCAMS:
+ return AMS_ALARM_VCCAMS + offset;
+ case AMS_SEQ_TEMP_REMOTE:
+ return AMS_ALARM_TEMP_REMOTE + offset;
+ default:
+ return 0;
+ }
+}
+
+static const struct iio_chan_spec *ams_event_to_channel(struct iio_dev *dev,
+ u32 event)
+{
+ int scan_index = 0, i;
+
+ if (event >= AMS_PL_ALARM_START) {
+ event -= AMS_PL_ALARM_START;
+ scan_index = AMS_PS_SEQ_MAX;
+ }
+
+ switch (event) {
+ case AMS_ALARM_BIT_TEMP:
+ scan_index += AMS_SEQ_TEMP;
+ break;
+ case AMS_ALARM_BIT_SUPPLY1:
+ scan_index += AMS_SEQ_SUPPLY1;
+ break;
+ case AMS_ALARM_BIT_SUPPLY2:
+ scan_index += AMS_SEQ_SUPPLY2;
+ break;
+ case AMS_ALARM_BIT_SUPPLY3:
+ scan_index += AMS_SEQ_SUPPLY3;
+ break;
+ case AMS_ALARM_BIT_SUPPLY4:
+ scan_index += AMS_SEQ_SUPPLY4;
+ break;
+ case AMS_ALARM_BIT_SUPPLY5:
+ scan_index += AMS_SEQ_SUPPLY5;
+ break;
+ case AMS_ALARM_BIT_SUPPLY6:
+ scan_index += AMS_SEQ_SUPPLY6;
+ break;
+ case AMS_ALARM_BIT_SUPPLY7:
+ scan_index += AMS_SEQ_SUPPLY7;
+ break;
+ case AMS_ALARM_BIT_SUPPLY8:
+ scan_index += AMS_SEQ_SUPPLY8;
+ break;
+ case AMS_ALARM_BIT_SUPPLY9:
+ scan_index += AMS_SEQ_SUPPLY9;
+ break;
+ case AMS_ALARM_BIT_SUPPLY10:
+ scan_index += AMS_SEQ_SUPPLY10;
+ break;
+ case AMS_ALARM_BIT_VCCAMS:
+ scan_index += AMS_SEQ_VCCAMS;
+ break;
+ case AMS_ALARM_BIT_TEMP_REMOTE:
+ scan_index += AMS_SEQ_TEMP_REMOTE;
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < dev->num_channels; i++)
+ if (dev->channels[i].scan_index == scan_index)
+ break;
+
+ return &dev->channels[i];
+}
+
+static int ams_get_alarm_mask(int scan_index)
+{
+ int bit = 0;
+
+ if (scan_index >= AMS_PS_SEQ_MAX) {
+ bit = AMS_PL_ALARM_START;
+ scan_index -= AMS_PS_SEQ_MAX;
+ }
+
+ switch (scan_index) {
+ case AMS_SEQ_TEMP:
+ return BIT(AMS_ALARM_BIT_TEMP + bit);
+ case AMS_SEQ_SUPPLY1:
+ return BIT(AMS_ALARM_BIT_SUPPLY1 + bit);
+ case AMS_SEQ_SUPPLY2:
+ return BIT(AMS_ALARM_BIT_SUPPLY2 + bit);
+ case AMS_SEQ_SUPPLY3:
+ return BIT(AMS_ALARM_BIT_SUPPLY3 + bit);
+ case AMS_SEQ_SUPPLY4:
+ return BIT(AMS_ALARM_BIT_SUPPLY4 + bit);
+ case AMS_SEQ_SUPPLY5:
+ return BIT(AMS_ALARM_BIT_SUPPLY5 + bit);
+ case AMS_SEQ_SUPPLY6:
+ return BIT(AMS_ALARM_BIT_SUPPLY6 + bit);
+ case AMS_SEQ_SUPPLY7:
+ return BIT(AMS_ALARM_BIT_SUPPLY7 + bit);
+ case AMS_SEQ_SUPPLY8:
+ return BIT(AMS_ALARM_BIT_SUPPLY8 + bit);
+ case AMS_SEQ_SUPPLY9:
+ return BIT(AMS_ALARM_BIT_SUPPLY9 + bit);
+ case AMS_SEQ_SUPPLY10:
+ return BIT(AMS_ALARM_BIT_SUPPLY10 + bit);
+ case AMS_SEQ_VCCAMS:
+ return BIT(AMS_ALARM_BIT_VCCAMS + bit);
+ case AMS_SEQ_TEMP_REMOTE:
+ return BIT(AMS_ALARM_BIT_TEMP_REMOTE + bit);
+ default:
+ return 0;
+ }
+}
+
+static int ams_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct ams *ams = iio_priv(indio_dev);
+
+ return !!(ams->alarm_mask & ams_get_alarm_mask(chan->scan_index));
+}
+
+static int ams_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ unsigned int alarm;
+
+ alarm = ams_get_alarm_mask(chan->scan_index);
+
+ mutex_lock(&ams->lock);
+
+ if (state)
+ ams->alarm_mask |= alarm;
+ else
+ ams->alarm_mask &= ~alarm;
+
+ ams_update_alarm(ams, ams->alarm_mask);
+
+ mutex_unlock(&ams->lock);
+
+ return 0;
+}
+
+static int ams_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val, int *val2)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ unsigned int offset = ams_get_alarm_offset(chan->scan_index, dir);
+
+ mutex_lock(&ams->lock);
+
+ if (chan->scan_index >= AMS_PS_SEQ_MAX)
+ *val = readl(ams->pl_base + offset);
+ else
+ *val = readl(ams->ps_base + offset);
+
+ mutex_unlock(&ams->lock);
+
+ return IIO_VAL_INT;
+}
+
+static int ams_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ unsigned int offset;
+
+ mutex_lock(&ams->lock);
+
+ /* Set temperature channel threshold to direct threshold */
+ if (chan->type == IIO_TEMP) {
+ offset = ams_get_alarm_offset(chan->scan_index, IIO_EV_DIR_FALLING);
+
+ if (chan->scan_index >= AMS_PS_SEQ_MAX)
+ ams_pl_update_reg(ams, offset,
+ AMS_ALARM_THR_DIRECT_MASK,
+ AMS_ALARM_THR_DIRECT_MASK);
+ else
+ ams_ps_update_reg(ams, offset,
+ AMS_ALARM_THR_DIRECT_MASK,
+ AMS_ALARM_THR_DIRECT_MASK);
+ }
+
+ offset = ams_get_alarm_offset(chan->scan_index, dir);
+ if (chan->scan_index >= AMS_PS_SEQ_MAX)
+ writel(val, ams->pl_base + offset);
+ else
+ writel(val, ams->ps_base + offset);
+
+ mutex_unlock(&ams->lock);
+
+ return 0;
+}
+
+static void ams_handle_event(struct iio_dev *indio_dev, u32 event)
+{
+ const struct iio_chan_spec *chan;
+
+ chan = ams_event_to_channel(indio_dev, event);
+
+ if (chan->type == IIO_TEMP) {
+ /*
+ * The temperature channel only supports over-temperature
+ * events.
+ */
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns(indio_dev));
+ } else {
+ /*
+ * For other channels we don't know whether it is a upper or
+ * lower threshold event. Userspace will have to check the
+ * channel value if it wants to know.
+ */
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ iio_get_time_ns(indio_dev));
+ }
+}
+
+static void ams_handle_events(struct iio_dev *indio_dev, unsigned long events)
+{
+ unsigned int bit;
+
+ for_each_set_bit(bit, &events, AMS_NO_OF_ALARMS)
+ ams_handle_event(indio_dev, bit);
+}
+
+/**
+ * ams_unmask_worker - ams alarm interrupt unmask worker
+ * @work: work to be done
+ *
+ * The ZynqMP threshold interrupts are level sensitive. Since we can't make the
+ * threshold condition go way from within the interrupt handler, this means as
+ * soon as a threshold condition is present we would enter the interrupt handler
+ * again and again. To work around this we mask all active threshold interrupts
+ * in the interrupt handler and start a timer. In this timer we poll the
+ * interrupt status and only if the interrupt is inactive we unmask it again.
+ */
+static void ams_unmask_worker(struct work_struct *work)
+{
+ struct ams *ams = container_of(work, struct ams, ams_unmask_work.work);
+ unsigned int status, unmask;
+
+ spin_lock_irq(&ams->intr_lock);
+
+ status = readl(ams->base + AMS_ISR_0);
+
+ /* Clear those bits which are not active anymore */
+ unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm;
+
+ /* Clear status of disabled alarm */
+ unmask |= ams->intr_mask;
+
+ ams->current_masked_alarm &= status;
+
+ /* Also clear those which are masked out anyway */
+ ams->current_masked_alarm &= ~ams->intr_mask;
+
+ /* Clear the interrupts before we unmask them */
+ writel(unmask, ams->base + AMS_ISR_0);
+
+ ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK);
+
+ spin_unlock_irq(&ams->intr_lock);
+
+ /* If still pending some alarm re-trigger the timer */
+ if (ams->current_masked_alarm)
+ schedule_delayed_work(&ams->ams_unmask_work,
+ msecs_to_jiffies(AMS_UNMASK_TIMEOUT_MS));
+}
+
+static irqreturn_t ams_irq(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct ams *ams = iio_priv(indio_dev);
+ u32 isr0;
+
+ spin_lock(&ams->intr_lock);
+
+ isr0 = readl(ams->base + AMS_ISR_0);
+
+ /* Only process alarms that are not masked */
+ isr0 &= ~((ams->intr_mask & AMS_ISR0_ALARM_MASK) | ams->current_masked_alarm);
+ if (!isr0) {
+ spin_unlock(&ams->intr_lock);
+ return IRQ_NONE;
+ }
+
+ /* Clear interrupt */
+ writel(isr0, ams->base + AMS_ISR_0);
+
+ /* Mask the alarm interrupts until cleared */
+ ams->current_masked_alarm |= isr0;
+ ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK);
+
+ ams_handle_events(indio_dev, isr0);
+
+ schedule_delayed_work(&ams->ams_unmask_work,
+ msecs_to_jiffies(AMS_UNMASK_TIMEOUT_MS));
+
+ spin_unlock(&ams->intr_lock);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_event_spec ams_temp_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) | BIT(IIO_EV_INFO_VALUE),
+ },
+};
+
+static const struct iio_event_spec ams_voltage_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec ams_ps_channels[] = {
+ AMS_PS_CHAN_TEMP(AMS_SEQ_TEMP, AMS_TEMP),
+ AMS_PS_CHAN_TEMP(AMS_SEQ_TEMP_REMOTE, AMS_TEMP_REMOTE),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY1, AMS_SUPPLY1),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY2, AMS_SUPPLY2),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY3, AMS_SUPPLY3),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY4, AMS_SUPPLY4),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY5, AMS_SUPPLY5),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY6, AMS_SUPPLY6),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY7, AMS_SUPPLY7),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY8, AMS_SUPPLY8),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY9, AMS_SUPPLY9),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY10, AMS_SUPPLY10),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_VCCAMS, AMS_VCCAMS),
+};
+
+static const struct iio_chan_spec ams_pl_channels[] = {
+ AMS_PL_CHAN_TEMP(AMS_SEQ_TEMP, AMS_TEMP),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY1, AMS_SUPPLY1, true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY2, AMS_SUPPLY2, true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VREFP, AMS_VREFP, false),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VREFN, AMS_VREFN, false),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY3, AMS_SUPPLY3, true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY4, AMS_SUPPLY4, true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY5, AMS_SUPPLY5, true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY6, AMS_SUPPLY6, true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VCCAMS, AMS_VCCAMS, true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VP_VN, AMS_VP_VN, false),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY7, AMS_SUPPLY7, true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY8, AMS_SUPPLY8, true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY9, AMS_SUPPLY9, true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY10, AMS_SUPPLY10, true),
+ AMS_PL_AUX_CHAN_VOLTAGE(0),
+ AMS_PL_AUX_CHAN_VOLTAGE(1),
+ AMS_PL_AUX_CHAN_VOLTAGE(2),
+ AMS_PL_AUX_CHAN_VOLTAGE(3),
+ AMS_PL_AUX_CHAN_VOLTAGE(4),
+ AMS_PL_AUX_CHAN_VOLTAGE(5),
+ AMS_PL_AUX_CHAN_VOLTAGE(6),
+ AMS_PL_AUX_CHAN_VOLTAGE(7),
+ AMS_PL_AUX_CHAN_VOLTAGE(8),
+ AMS_PL_AUX_CHAN_VOLTAGE(9),
+ AMS_PL_AUX_CHAN_VOLTAGE(10),
+ AMS_PL_AUX_CHAN_VOLTAGE(11),
+ AMS_PL_AUX_CHAN_VOLTAGE(12),
+ AMS_PL_AUX_CHAN_VOLTAGE(13),
+ AMS_PL_AUX_CHAN_VOLTAGE(14),
+ AMS_PL_AUX_CHAN_VOLTAGE(15),
+};
+
+static const struct iio_chan_spec ams_ctrl_channels[] = {
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCC_PSPLL, AMS_VCC_PSPLL0),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCC_PSBATT, AMS_VCC_PSPLL3),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCINT, AMS_VCCINT),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCBRAM, AMS_VCCBRAM),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCAUX, AMS_VCCAUX),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_PSDDRPLL, AMS_PSDDRPLL),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_INTDDR, AMS_PSINTFPDDR),
+};
+
+static int ams_get_ext_chan(struct fwnode_handle *chan_node,
+ struct iio_chan_spec *channels, int num_channels)
+{
+ struct iio_chan_spec *chan;
+ struct fwnode_handle *child;
+ unsigned int reg, ext_chan;
+ int ret;
+
+ fwnode_for_each_child_node(chan_node, child) {
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret || reg > AMS_PL_MAX_EXT_CHANNEL + 30)
+ continue;
+
+ chan = &channels[num_channels];
+ ext_chan = reg + AMS_PL_MAX_FIXED_CHANNEL - 30;
+ memcpy(chan, &ams_pl_channels[ext_chan], sizeof(*channels));
+
+ if (fwnode_property_read_bool(child, "xlnx,bipolar"))
+ chan->scan_type.sign = 's';
+
+ num_channels++;
+ }
+
+ return num_channels;
+}
+
+static void ams_iounmap_ps(void *data)
+{
+ struct ams *ams = data;
+
+ iounmap(ams->ps_base);
+}
+
+static void ams_iounmap_pl(void *data)
+{
+ struct ams *ams = data;
+
+ iounmap(ams->pl_base);
+}
+
+static int ams_init_module(struct iio_dev *indio_dev,
+ struct fwnode_handle *fwnode,
+ struct iio_chan_spec *channels)
+{
+ struct device *dev = indio_dev->dev.parent;
+ struct ams *ams = iio_priv(indio_dev);
+ int num_channels = 0;
+ int ret;
+
+ if (fwnode_property_match_string(fwnode, "compatible",
+ "xlnx,zynqmp-ams-ps") == 0) {
+ ams->ps_base = fwnode_iomap(fwnode, 0);
+ if (!ams->ps_base)
+ return -ENXIO;
+ ret = devm_add_action_or_reset(dev, ams_iounmap_ps, ams);
+ if (ret < 0)
+ return ret;
+
+ /* add PS channels to iio device channels */
+ memcpy(channels, ams_ps_channels, sizeof(ams_ps_channels));
+ } else if (fwnode_property_match_string(fwnode, "compatible",
+ "xlnx,zynqmp-ams-pl") == 0) {
+ ams->pl_base = fwnode_iomap(fwnode, 0);
+ if (!ams->pl_base)
+ return -ENXIO;
+
+ ret = devm_add_action_or_reset(dev, ams_iounmap_pl, ams);
+ if (ret < 0)
+ return ret;
+
+ /* Copy only first 10 fix channels */
+ memcpy(channels, ams_pl_channels, AMS_PL_MAX_FIXED_CHANNEL * sizeof(*channels));
+ num_channels += AMS_PL_MAX_FIXED_CHANNEL;
+ num_channels = ams_get_ext_chan(fwnode, channels,
+ num_channels);
+ } else if (fwnode_property_match_string(fwnode, "compatible",
+ "xlnx,zynqmp-ams") == 0) {
+ /* add AMS channels to iio device channels */
+ memcpy(channels, ams_ctrl_channels, sizeof(ams_ctrl_channels));
+ num_channels += ARRAY_SIZE(ams_ctrl_channels);
+ } else {
+ return -EINVAL;
+ }
+
+ return num_channels;
+}
+
+static int ams_parse_firmware(struct iio_dev *indio_dev)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ struct iio_chan_spec *ams_channels, *dev_channels;
+ struct device *dev = indio_dev->dev.parent;
+ struct fwnode_handle *child = NULL;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ size_t ams_size, dev_size;
+ int ret, ch_cnt = 0, i, rising_off, falling_off;
+ unsigned int num_channels = 0;
+
+ ams_size = ARRAY_SIZE(ams_ps_channels) + ARRAY_SIZE(ams_pl_channels) +
+ ARRAY_SIZE(ams_ctrl_channels);
+
+ /* Initialize buffer for channel specification */
+ ams_channels = devm_kcalloc(dev, ams_size, sizeof(*ams_channels), GFP_KERNEL);
+ if (!ams_channels)
+ return -ENOMEM;
+
+ if (fwnode_device_is_available(fwnode)) {
+ ret = ams_init_module(indio_dev, fwnode, ams_channels);
+ if (ret < 0)
+ return ret;
+
+ num_channels += ret;
+ }
+
+ fwnode_for_each_child_node(fwnode, child) {
+ if (fwnode_device_is_available(child)) {
+ ret = ams_init_module(indio_dev, child, ams_channels + num_channels);
+ if (ret < 0) {
+ fwnode_handle_put(child);
+ return ret;
+ }
+
+ num_channels += ret;
+ }
+ }
+
+ for (i = 0; i < num_channels; i++) {
+ ams_channels[i].channel = ch_cnt++;
+
+ if (ams_channels[i].scan_index < AMS_CTRL_SEQ_BASE) {
+ /* set threshold to max and min for each channel */
+ falling_off =
+ ams_get_alarm_offset(ams_channels[i].scan_index,
+ IIO_EV_DIR_FALLING);
+ rising_off =
+ ams_get_alarm_offset(ams_channels[i].scan_index,
+ IIO_EV_DIR_RISING);
+ if (ams_channels[i].scan_index >= AMS_PS_SEQ_MAX) {
+ writel(AMS_ALARM_THR_MIN,
+ ams->pl_base + falling_off);
+ writel(AMS_ALARM_THR_MAX,
+ ams->pl_base + rising_off);
+ } else {
+ writel(AMS_ALARM_THR_MIN,
+ ams->ps_base + falling_off);
+ writel(AMS_ALARM_THR_MAX,
+ ams->ps_base + rising_off);
+ }
+ }
+ }
+
+ dev_size = array_size(sizeof(*dev_channels), num_channels);
+ if (dev_size == SIZE_MAX)
+ return -ENOMEM;
+
+ dev_channels = devm_krealloc(dev, ams_channels, dev_size, GFP_KERNEL);
+ if (!dev_channels)
+ ret = -ENOMEM;
+
+ indio_dev->channels = dev_channels;
+ indio_dev->num_channels = num_channels;
+
+ return 0;
+}
+
+static const struct iio_info iio_ams_info = {
+ .read_raw = &ams_read_raw,
+ .read_event_config = &ams_read_event_config,
+ .write_event_config = &ams_write_event_config,
+ .read_event_value = &ams_read_event_value,
+ .write_event_value = &ams_write_event_value,
+};
+
+static const struct of_device_id ams_of_match_table[] = {
+ { .compatible = "xlnx,zynqmp-ams" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ams_of_match_table);
+
+static void ams_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static void ams_cancel_delayed_work(void *data)
+{
+ cancel_delayed_work(data);
+}
+
+static int ams_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct ams *ams;
+ int ret;
+ int irq;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*ams));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ ams = iio_priv(indio_dev);
+ mutex_init(&ams->lock);
+ spin_lock_init(&ams->intr_lock);
+
+ indio_dev->name = "xilinx-ams";
+
+ indio_dev->info = &iio_ams_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ams->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ams->base))
+ return PTR_ERR(ams->base);
+
+ ams->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ams->clk))
+ return PTR_ERR(ams->clk);
+
+ ret = clk_prepare_enable(ams->clk);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(&pdev->dev, ams_clk_disable_unprepare, ams->clk);
+ if (ret < 0)
+ return ret;
+
+ INIT_DELAYED_WORK(&ams->ams_unmask_work, ams_unmask_worker);
+ ret = devm_add_action_or_reset(&pdev->dev, ams_cancel_delayed_work,
+ &ams->ams_unmask_work);
+ if (ret < 0)
+ return ret;
+
+ ret = ams_parse_firmware(indio_dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failure in parsing DT\n");
+
+ ret = ams_init_device(ams);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to initialize AMS\n");
+
+ ams_enable_channel_sequence(indio_dev);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return ret;
+
+ ret = devm_request_irq(&pdev->dev, irq, &ams_irq, 0, "ams-irq",
+ indio_dev);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "failed to register interrupt\n");
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ return devm_iio_device_register(&pdev->dev, indio_dev);
+}
+
+static int __maybe_unused ams_suspend(struct device *dev)
+{
+ struct ams *ams = iio_priv(dev_get_drvdata(dev));
+
+ clk_disable_unprepare(ams->clk);
+
+ return 0;
+}
+
+static int __maybe_unused ams_resume(struct device *dev)
+{
+ struct ams *ams = iio_priv(dev_get_drvdata(dev));
+
+ return clk_prepare_enable(ams->clk);
+}
+
+static SIMPLE_DEV_PM_OPS(ams_pm_ops, ams_suspend, ams_resume);
+
+static struct platform_driver ams_driver = {
+ .probe = ams_probe,
+ .driver = {
+ .name = "xilinx-ams",
+ .pm = &ams_pm_ops,
+ .of_match_table = ams_of_match_table,
+ },
+};
+module_platform_driver(ams_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Xilinx, Inc.");
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 83bea5ef765d..823c8e5f9809 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -107,6 +107,7 @@ static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT = 500;
#define XADC_AXI_INT_ALARM_MASK 0x3c0f
#define XADC_FLAGS_BUFFERED BIT(0)
+#define XADC_FLAGS_IRQ_OPTIONAL BIT(1)
/*
* The XADC hardware supports a samplerate of up to 1MSPS. Unfortunately it does
@@ -562,7 +563,7 @@ static const struct xadc_ops xadc_7s_axi_ops = {
.get_dclk_rate = xadc_axi_get_dclk,
.update_alarm = xadc_axi_update_alarm,
.interrupt_handler = xadc_axi_interrupt_handler,
- .flags = XADC_FLAGS_BUFFERED,
+ .flags = XADC_FLAGS_BUFFERED | XADC_FLAGS_IRQ_OPTIONAL,
.type = XADC_TYPE_S7,
};
@@ -573,7 +574,7 @@ static const struct xadc_ops xadc_us_axi_ops = {
.get_dclk_rate = xadc_axi_get_dclk,
.update_alarm = xadc_axi_update_alarm,
.interrupt_handler = xadc_axi_interrupt_handler,
- .flags = XADC_FLAGS_BUFFERED,
+ .flags = XADC_FLAGS_BUFFERED | XADC_FLAGS_IRQ_OPTIONAL,
.type = XADC_TYPE_US,
};
@@ -943,7 +944,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
*val = 1000;
break;
}
- *val2 = chan->scan_type.realbits;
+ *val2 = bits;
return IIO_VAL_FRACTIONAL_LOG2;
case IIO_TEMP:
/* Temp in C = (val * 503.975) / 2**bits - 273.15 */
@@ -1182,7 +1183,7 @@ static const struct of_device_id xadc_of_match_table[] = {
MODULE_DEVICE_TABLE(of, xadc_of_match_table);
static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
- unsigned int *conf)
+ unsigned int *conf, int irq)
{
struct device *dev = indio_dev->dev.parent;
struct xadc *xadc = iio_priv(indio_dev);
@@ -1195,6 +1196,7 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
u32 ext_mux_chan;
u32 reg;
int ret;
+ int i;
*conf = 0;
@@ -1273,6 +1275,14 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
}
of_node_put(chan_node);
+ /* No IRQ => no events */
+ if (irq <= 0) {
+ for (i = 0; i < num_channels; i++) {
+ channels[i].event_spec = NULL;
+ channels[i].num_event_specs = 0;
+ }
+ }
+
indio_dev->num_channels = num_channels;
indio_dev->channels = devm_krealloc(dev, channels,
sizeof(*channels) * num_channels,
@@ -1307,6 +1317,7 @@ static int xadc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct of_device_id *id;
+ const struct xadc_ops *ops;
struct iio_dev *indio_dev;
unsigned int bipolar_mask;
unsigned int conf0;
@@ -1322,9 +1333,12 @@ static int xadc_probe(struct platform_device *pdev)
if (!id)
return -EINVAL;
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -ENXIO;
+ ops = id->data;
+
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq < 0 &&
+ (irq != -ENXIO || !(ops->flags & XADC_FLAGS_IRQ_OPTIONAL)))
+ return irq;
indio_dev = devm_iio_device_alloc(dev, sizeof(*xadc));
if (!indio_dev)
@@ -1345,7 +1359,7 @@ static int xadc_probe(struct platform_device *pdev)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &xadc_info;
- ret = xadc_parse_dt(indio_dev, dev->of_node, &conf0);
+ ret = xadc_parse_dt(indio_dev, dev->of_node, &conf0, irq);
if (ret)
return ret;
@@ -1357,14 +1371,16 @@ static int xadc_probe(struct platform_device *pdev)
if (ret)
return ret;
- xadc->convst_trigger = xadc_alloc_trigger(indio_dev, "convst");
- if (IS_ERR(xadc->convst_trigger))
- return PTR_ERR(xadc->convst_trigger);
+ if (irq > 0) {
+ xadc->convst_trigger = xadc_alloc_trigger(indio_dev, "convst");
+ if (IS_ERR(xadc->convst_trigger))
+ return PTR_ERR(xadc->convst_trigger);
- xadc->samplerate_trigger = xadc_alloc_trigger(indio_dev,
- "samplerate");
- if (IS_ERR(xadc->samplerate_trigger))
- return PTR_ERR(xadc->samplerate_trigger);
+ xadc->samplerate_trigger = xadc_alloc_trigger(indio_dev,
+ "samplerate");
+ if (IS_ERR(xadc->samplerate_trigger))
+ return PTR_ERR(xadc->samplerate_trigger);
+ }
}
xadc->clk = devm_clk_get(dev, NULL);
@@ -1396,15 +1412,17 @@ static int xadc_probe(struct platform_device *pdev)
}
}
- ret = devm_request_irq(dev, irq, xadc->ops->interrupt_handler, 0,
- dev_name(dev), indio_dev);
- if (ret)
- return ret;
+ if (irq > 0) {
+ ret = devm_request_irq(dev, irq, xadc->ops->interrupt_handler,
+ 0, dev_name(dev), indio_dev);
+ if (ret)
+ return ret;
- ret = devm_add_action_or_reset(dev, xadc_cancel_delayed_work,
- &xadc->zynq_unmask_work);
- if (ret)
- return ret;
+ ret = devm_add_action_or_reset(dev, xadc_cancel_delayed_work,
+ &xadc->zynq_unmask_work);
+ if (ret)
+ return ret;
+ }
ret = xadc->ops->setup(pdev, indio_dev, irq);
if (ret)
diff --git a/drivers/iio/addac/Kconfig b/drivers/iio/addac/Kconfig
new file mode 100644
index 000000000000..138492362f20
--- /dev/null
+++ b/drivers/iio/addac/Kconfig
@@ -0,0 +1,20 @@
+#
+# ADC DAC drivers
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Analog to digital and digital to analog converters"
+
+config AD74413R
+ tristate "Analog Devices AD74412R/AD74413R driver"
+ depends on GPIOLIB && SPI
+ select REGMAP_SPI
+ select CRC8
+ help
+ Say yes here to build support for Analog Devices AD74412R/AD74413R
+ quad-channel software configurable input/output solution.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad74413r.
+
+endmenu
diff --git a/drivers/iio/addac/Makefile b/drivers/iio/addac/Makefile
new file mode 100644
index 000000000000..cfd4bbe64ad3
--- /dev/null
+++ b/drivers/iio/addac/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for industrial I/O ADDAC drivers
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_AD74413R) += ad74413r.o
diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c
new file mode 100644
index 000000000000..5271073bb74e
--- /dev/null
+++ b/drivers/iio/addac/ad74413r.c
@@ -0,0 +1,1475 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Analog Devices, Inc.
+ * Author: Cosmin Tanislav <cosmin.tanislav@analog.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/bitfield.h>
+#include <linux/crc8.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <dt-bindings/iio/addac/adi,ad74413r.h>
+
+#define AD74413R_CRC_POLYNOMIAL 0x7
+DECLARE_CRC8_TABLE(ad74413r_crc8_table);
+
+#define AD74413R_CHANNEL_MAX 4
+
+#define AD74413R_FRAME_SIZE 4
+
+struct ad74413r_chip_info {
+ const char *name;
+ bool hart_support;
+};
+
+struct ad74413r_channel_config {
+ u32 func;
+ bool gpo_comparator;
+ bool initialized;
+};
+
+struct ad74413r_channels {
+ struct iio_chan_spec *channels;
+ unsigned int num_channels;
+};
+
+struct ad74413r_state {
+ struct ad74413r_channel_config channel_configs[AD74413R_CHANNEL_MAX];
+ unsigned int gpo_gpio_offsets[AD74413R_CHANNEL_MAX];
+ unsigned int comp_gpio_offsets[AD74413R_CHANNEL_MAX];
+ struct gpio_chip gpo_gpiochip;
+ struct gpio_chip comp_gpiochip;
+ struct completion adc_data_completion;
+ unsigned int num_gpo_gpios;
+ unsigned int num_comparator_gpios;
+ u32 sense_resistor_ohms;
+
+ /*
+ * Synchronize consecutive operations when doing a one-shot
+ * conversion and when updating the ADC samples SPI message.
+ */
+ struct mutex lock;
+
+ const struct ad74413r_chip_info *chip_info;
+ struct spi_device *spi;
+ struct regulator *refin_reg;
+ struct regmap *regmap;
+ struct device *dev;
+ struct iio_trigger *trig;
+
+ size_t adc_active_channels;
+ struct spi_message adc_samples_msg;
+ struct spi_transfer adc_samples_xfer[AD74413R_CHANNEL_MAX + 1];
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ struct {
+ u8 rx_buf[AD74413R_FRAME_SIZE * AD74413R_CHANNEL_MAX];
+ s64 timestamp;
+ } adc_samples_buf ____cacheline_aligned;
+
+ u8 adc_samples_tx_buf[AD74413R_FRAME_SIZE * AD74413R_CHANNEL_MAX];
+ u8 reg_tx_buf[AD74413R_FRAME_SIZE];
+ u8 reg_rx_buf[AD74413R_FRAME_SIZE];
+};
+
+#define AD74413R_REG_NOP 0x00
+
+#define AD74413R_REG_CH_FUNC_SETUP_X(x) (0x01 + (x))
+#define AD74413R_CH_FUNC_SETUP_MASK GENMASK(3, 0)
+
+#define AD74413R_REG_ADC_CONFIG_X(x) (0x05 + (x))
+#define AD74413R_ADC_CONFIG_RANGE_MASK GENMASK(7, 5)
+#define AD74413R_ADC_CONFIG_REJECTION_MASK GENMASK(4, 3)
+#define AD74413R_ADC_RANGE_10V 0b000
+#define AD74413R_ADC_RANGE_2P5V_EXT_POW 0b001
+#define AD74413R_ADC_RANGE_2P5V_INT_POW 0b010
+#define AD74413R_ADC_RANGE_5V_BI_DIR 0b011
+#define AD74413R_ADC_REJECTION_50_60 0b00
+#define AD74413R_ADC_REJECTION_NONE 0b01
+#define AD74413R_ADC_REJECTION_50_60_HART 0b10
+#define AD74413R_ADC_REJECTION_HART 0b11
+
+#define AD74413R_REG_DIN_CONFIG_X(x) (0x09 + (x))
+#define AD74413R_DIN_DEBOUNCE_MASK GENMASK(4, 0)
+#define AD74413R_DIN_DEBOUNCE_LEN BIT(5)
+
+#define AD74413R_REG_DAC_CODE_X(x) (0x16 + (x))
+#define AD74413R_DAC_CODE_MAX GENMASK(12, 0)
+#define AD74413R_DAC_VOLTAGE_MAX 11000
+
+#define AD74413R_REG_GPO_PAR_DATA 0x0d
+#define AD74413R_REG_GPO_CONFIG_X(x) (0x0e + (x))
+#define AD74413R_GPO_CONFIG_DATA_MASK BIT(3)
+#define AD74413R_GPO_CONFIG_SELECT_MASK GENMASK(2, 0)
+#define AD74413R_GPO_CONFIG_100K_PULL_DOWN 0b000
+#define AD74413R_GPO_CONFIG_LOGIC 0b001
+#define AD74413R_GPO_CONFIG_LOGIC_PARALLEL 0b010
+#define AD74413R_GPO_CONFIG_COMPARATOR 0b011
+#define AD74413R_GPO_CONFIG_HIGH_IMPEDANCE 0b100
+
+#define AD74413R_REG_ADC_CONV_CTRL 0x23
+#define AD74413R_CONV_SEQ_MASK GENMASK(9, 8)
+#define AD74413R_CONV_SEQ_ON 0b00
+#define AD74413R_CONV_SEQ_SINGLE 0b01
+#define AD74413R_CONV_SEQ_CONTINUOUS 0b10
+#define AD74413R_CONV_SEQ_OFF 0b11
+#define AD74413R_CH_EN_MASK(x) BIT(x)
+
+#define AD74413R_REG_DIN_COMP_OUT 0x25
+#define AD74413R_DIN_COMP_OUT_SHIFT_X(x) x
+
+#define AD74413R_REG_ADC_RESULT_X(x) (0x26 + (x))
+#define AD74413R_ADC_RESULT_MAX GENMASK(15, 0)
+
+#define AD74413R_REG_READ_SELECT 0x41
+
+#define AD74413R_REG_CMD_KEY 0x44
+#define AD74413R_CMD_KEY_LDAC 0x953a
+#define AD74413R_CMD_KEY_RESET1 0x15fa
+#define AD74413R_CMD_KEY_RESET2 0xaf51
+
+static const int ad74413r_adc_sampling_rates[] = {
+ 20, 4800,
+};
+
+static const int ad74413r_adc_sampling_rates_hart[] = {
+ 10, 20, 1200, 4800,
+};
+
+static int ad74413r_crc(u8 *buf)
+{
+ return crc8(ad74413r_crc8_table, buf, 3, 0);
+}
+
+static void ad74413r_format_reg_write(u8 reg, u16 val, u8 *buf)
+{
+ buf[0] = reg;
+ put_unaligned_be16(val, &buf[1]);
+ buf[3] = ad74413r_crc(buf);
+}
+
+static int ad74413r_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct ad74413r_state *st = context;
+
+ ad74413r_format_reg_write(reg, val, st->reg_tx_buf);
+
+ return spi_write(st->spi, st->reg_tx_buf, AD74413R_FRAME_SIZE);
+}
+
+static int ad74413r_crc_check(struct ad74413r_state *st, u8 *buf)
+{
+ u8 expected_crc = ad74413r_crc(buf);
+
+ if (buf[3] != expected_crc) {
+ dev_err(st->dev, "Bad CRC %02x for %02x%02x%02x\n",
+ buf[3], buf[0], buf[1], buf[2]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ad74413r_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct ad74413r_state *st = context;
+ struct spi_transfer reg_read_xfer[] = {
+ {
+ .tx_buf = st->reg_tx_buf,
+ .len = AD74413R_FRAME_SIZE,
+ .cs_change = 1,
+ },
+ {
+ .rx_buf = st->reg_rx_buf,
+ .len = AD74413R_FRAME_SIZE,
+ },
+ };
+ int ret;
+
+ ad74413r_format_reg_write(AD74413R_REG_READ_SELECT, reg,
+ st->reg_tx_buf);
+
+ ret = spi_sync_transfer(st->spi, reg_read_xfer,
+ ARRAY_SIZE(reg_read_xfer));
+ if (ret)
+ return ret;
+
+ ret = ad74413r_crc_check(st, st->reg_rx_buf);
+ if (ret)
+ return ret;
+
+ *val = get_unaligned_be16(&st->reg_rx_buf[1]);
+
+ return 0;
+}
+
+static const struct regmap_config ad74413r_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .reg_read = ad74413r_reg_read,
+ .reg_write = ad74413r_reg_write,
+};
+
+static int ad74413r_set_gpo_config(struct ad74413r_state *st,
+ unsigned int offset, u8 mode)
+{
+ return regmap_update_bits(st->regmap, AD74413R_REG_GPO_CONFIG_X(offset),
+ AD74413R_GPO_CONFIG_SELECT_MASK, mode);
+}
+
+static const unsigned int ad74413r_debounce_map[AD74413R_DIN_DEBOUNCE_LEN] = {
+ 0, 13, 18, 24, 32, 42, 56, 75,
+ 100, 130, 180, 240, 320, 420, 560, 750,
+ 1000, 1300, 1800, 2400, 3200, 4200, 5600, 7500,
+ 10000, 13000, 18000, 24000, 32000, 42000, 56000, 75000,
+};
+
+static int ad74413r_set_comp_debounce(struct ad74413r_state *st,
+ unsigned int offset,
+ unsigned int debounce)
+{
+ unsigned int val = AD74413R_DIN_DEBOUNCE_LEN - 1;
+ unsigned int i;
+
+ for (i = 0; i < AD74413R_DIN_DEBOUNCE_LEN; i++)
+ if (debounce <= ad74413r_debounce_map[i]) {
+ val = i;
+ break;
+ }
+
+ return regmap_update_bits(st->regmap,
+ AD74413R_REG_DIN_CONFIG_X(offset),
+ AD74413R_DIN_DEBOUNCE_MASK,
+ val);
+}
+
+static void ad74413r_gpio_set(struct gpio_chip *chip,
+ unsigned int offset, int val)
+{
+ struct ad74413r_state *st = gpiochip_get_data(chip);
+ unsigned int real_offset = st->gpo_gpio_offsets[offset];
+ int ret;
+
+ ret = ad74413r_set_gpo_config(st, real_offset,
+ AD74413R_GPO_CONFIG_LOGIC);
+ if (ret)
+ return;
+
+ regmap_update_bits(st->regmap, AD74413R_REG_GPO_CONFIG_X(real_offset),
+ AD74413R_GPO_CONFIG_DATA_MASK,
+ val ? AD74413R_GPO_CONFIG_DATA_MASK : 0);
+}
+
+static void ad74413r_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *mask,
+ unsigned long *bits)
+{
+ struct ad74413r_state *st = gpiochip_get_data(chip);
+ unsigned long real_mask = 0;
+ unsigned long real_bits = 0;
+ unsigned int offset = 0;
+ int ret;
+
+ for_each_set_bit_from(offset, mask, AD74413R_CHANNEL_MAX) {
+ unsigned int real_offset = st->gpo_gpio_offsets[offset];
+
+ ret = ad74413r_set_gpo_config(st, real_offset,
+ AD74413R_GPO_CONFIG_LOGIC_PARALLEL);
+ if (ret)
+ return;
+
+ real_mask |= BIT(real_offset);
+ if (*bits & offset)
+ real_bits |= BIT(real_offset);
+ }
+
+ regmap_update_bits(st->regmap, AD74413R_REG_GPO_PAR_DATA,
+ real_mask, real_bits);
+}
+
+static int ad74413r_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct ad74413r_state *st = gpiochip_get_data(chip);
+ unsigned int real_offset = st->comp_gpio_offsets[offset];
+ unsigned int status;
+ int ret;
+
+ ret = regmap_read(st->regmap, AD74413R_REG_DIN_COMP_OUT, &status);
+ if (ret)
+ return ret;
+
+ status &= AD74413R_DIN_COMP_OUT_SHIFT_X(real_offset);
+
+ return status ? 1 : 0;
+}
+
+static int ad74413r_gpio_get_multiple(struct gpio_chip *chip,
+ unsigned long *mask,
+ unsigned long *bits)
+{
+ struct ad74413r_state *st = gpiochip_get_data(chip);
+ unsigned int offset = 0;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(st->regmap, AD74413R_REG_DIN_COMP_OUT, &val);
+ if (ret)
+ return ret;
+
+ for_each_set_bit_from(offset, mask, AD74413R_CHANNEL_MAX) {
+ unsigned int real_offset = st->comp_gpio_offsets[offset];
+
+ if (val & BIT(real_offset))
+ *bits |= offset;
+ }
+
+ return ret;
+}
+
+static int ad74413r_gpio_get_gpo_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int ad74413r_gpio_get_comp_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int ad74413r_gpio_set_gpo_config(struct gpio_chip *chip,
+ unsigned int offset,
+ unsigned long config)
+{
+ struct ad74413r_state *st = gpiochip_get_data(chip);
+ unsigned int real_offset = st->gpo_gpio_offsets[offset];
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ return ad74413r_set_gpo_config(st, real_offset,
+ AD74413R_GPO_CONFIG_100K_PULL_DOWN);
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ return ad74413r_set_gpo_config(st, real_offset,
+ AD74413R_GPO_CONFIG_HIGH_IMPEDANCE);
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ad74413r_gpio_set_comp_config(struct gpio_chip *chip,
+ unsigned int offset,
+ unsigned long config)
+{
+ struct ad74413r_state *st = gpiochip_get_data(chip);
+ unsigned int real_offset = st->comp_gpio_offsets[offset];
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ return ad74413r_set_comp_debounce(st, real_offset,
+ pinconf_to_config_argument(config));
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ad74413r_reset(struct ad74413r_state *st)
+{
+ int ret;
+
+ ret = regmap_write(st->regmap, AD74413R_REG_CMD_KEY,
+ AD74413R_CMD_KEY_RESET1);
+ if (ret)
+ return ret;
+
+ return regmap_write(st->regmap, AD74413R_REG_CMD_KEY,
+ AD74413R_CMD_KEY_RESET2);
+}
+
+static int ad74413r_set_channel_dac_code(struct ad74413r_state *st,
+ unsigned int channel, int dac_code)
+{
+ struct reg_sequence reg_seq[2] = {
+ { AD74413R_REG_DAC_CODE_X(channel), dac_code },
+ { AD74413R_REG_CMD_KEY, AD74413R_CMD_KEY_LDAC },
+ };
+
+ return regmap_multi_reg_write(st->regmap, reg_seq, 2);
+}
+
+static int ad74413r_set_channel_function(struct ad74413r_state *st,
+ unsigned int channel, u8 func)
+{
+ return regmap_update_bits(st->regmap,
+ AD74413R_REG_CH_FUNC_SETUP_X(channel),
+ AD74413R_CH_FUNC_SETUP_MASK, func);
+}
+
+static int ad74413r_set_adc_conv_seq(struct ad74413r_state *st,
+ unsigned int status)
+{
+ int ret;
+
+ /*
+ * These bits do not clear when a conversion completes.
+ * To enable a subsequent conversion, repeat the write.
+ */
+ ret = regmap_write_bits(st->regmap, AD74413R_REG_ADC_CONV_CTRL,
+ AD74413R_CONV_SEQ_MASK,
+ FIELD_PREP(AD74413R_CONV_SEQ_MASK, status));
+ if (ret)
+ return ret;
+
+ /*
+ * Wait 100us before starting conversions.
+ */
+ usleep_range(100, 120);
+
+ return 0;
+}
+
+static int ad74413r_set_adc_channel_enable(struct ad74413r_state *st,
+ unsigned int channel,
+ bool status)
+{
+ return regmap_update_bits(st->regmap, AD74413R_REG_ADC_CONV_CTRL,
+ AD74413R_CH_EN_MASK(channel),
+ status ? AD74413R_CH_EN_MASK(channel) : 0);
+}
+
+static int ad74413r_get_adc_range(struct ad74413r_state *st,
+ unsigned int channel,
+ unsigned int *val)
+{
+ int ret;
+
+ ret = regmap_read(st->regmap, AD74413R_REG_ADC_CONFIG_X(channel), val);
+ if (ret)
+ return ret;
+
+ *val = FIELD_GET(AD74413R_ADC_CONFIG_RANGE_MASK, *val);
+
+ return 0;
+}
+
+static int ad74413r_get_adc_rejection(struct ad74413r_state *st,
+ unsigned int channel,
+ unsigned int *val)
+{
+ int ret;
+
+ ret = regmap_read(st->regmap, AD74413R_REG_ADC_CONFIG_X(channel), val);
+ if (ret)
+ return ret;
+
+ *val = FIELD_GET(AD74413R_ADC_CONFIG_REJECTION_MASK, *val);
+
+ return 0;
+}
+
+static int ad74413r_set_adc_rejection(struct ad74413r_state *st,
+ unsigned int channel,
+ unsigned int val)
+{
+ return regmap_update_bits(st->regmap,
+ AD74413R_REG_ADC_CONFIG_X(channel),
+ AD74413R_ADC_CONFIG_REJECTION_MASK,
+ FIELD_PREP(AD74413R_ADC_CONFIG_REJECTION_MASK,
+ val));
+}
+
+static int ad74413r_rejection_to_rate(struct ad74413r_state *st,
+ unsigned int rej, int *val)
+{
+ switch (rej) {
+ case AD74413R_ADC_REJECTION_50_60:
+ *val = 20;
+ return 0;
+ case AD74413R_ADC_REJECTION_NONE:
+ *val = 4800;
+ return 0;
+ case AD74413R_ADC_REJECTION_50_60_HART:
+ *val = 10;
+ return 0;
+ case AD74413R_ADC_REJECTION_HART:
+ *val = 1200;
+ return 0;
+ default:
+ dev_err(st->dev, "ADC rejection invalid\n");
+ return -EINVAL;
+ }
+}
+
+static int ad74413r_rate_to_rejection(struct ad74413r_state *st,
+ int rate, unsigned int *val)
+{
+ switch (rate) {
+ case 20:
+ *val = AD74413R_ADC_REJECTION_50_60;
+ return 0;
+ case 4800:
+ *val = AD74413R_ADC_REJECTION_NONE;
+ return 0;
+ case 10:
+ *val = AD74413R_ADC_REJECTION_50_60_HART;
+ return 0;
+ case 1200:
+ *val = AD74413R_ADC_REJECTION_HART;
+ return 0;
+ default:
+ dev_err(st->dev, "ADC rate invalid\n");
+ return -EINVAL;
+ }
+}
+
+static int ad74413r_range_to_voltage_range(struct ad74413r_state *st,
+ unsigned int range, int *val)
+{
+ switch (range) {
+ case AD74413R_ADC_RANGE_10V:
+ *val = 10000;
+ return 0;
+ case AD74413R_ADC_RANGE_2P5V_EXT_POW:
+ case AD74413R_ADC_RANGE_2P5V_INT_POW:
+ *val = 2500;
+ return 0;
+ case AD74413R_ADC_RANGE_5V_BI_DIR:
+ *val = 5000;
+ return 0;
+ default:
+ dev_err(st->dev, "ADC range invalid\n");
+ return -EINVAL;
+ }
+}
+
+static int ad74413r_range_to_voltage_offset(struct ad74413r_state *st,
+ unsigned int range, int *val)
+{
+ switch (range) {
+ case AD74413R_ADC_RANGE_10V:
+ case AD74413R_ADC_RANGE_2P5V_EXT_POW:
+ *val = 0;
+ return 0;
+ case AD74413R_ADC_RANGE_2P5V_INT_POW:
+ case AD74413R_ADC_RANGE_5V_BI_DIR:
+ *val = -2500;
+ return 0;
+ default:
+ dev_err(st->dev, "ADC range invalid\n");
+ return -EINVAL;
+ }
+}
+
+static int ad74413r_range_to_voltage_offset_raw(struct ad74413r_state *st,
+ unsigned int range, int *val)
+{
+ switch (range) {
+ case AD74413R_ADC_RANGE_10V:
+ case AD74413R_ADC_RANGE_2P5V_EXT_POW:
+ *val = 0;
+ return 0;
+ case AD74413R_ADC_RANGE_2P5V_INT_POW:
+ *val = -((int)AD74413R_ADC_RESULT_MAX);
+ return 0;
+ case AD74413R_ADC_RANGE_5V_BI_DIR:
+ *val = -((int)AD74413R_ADC_RESULT_MAX / 2);
+ return 0;
+ default:
+ dev_err(st->dev, "ADC range invalid\n");
+ return -EINVAL;
+ }
+}
+
+static int ad74413r_get_output_voltage_scale(struct ad74413r_state *st,
+ int *val, int *val2)
+{
+ *val = AD74413R_DAC_VOLTAGE_MAX;
+ *val2 = AD74413R_DAC_CODE_MAX;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int ad74413r_get_output_current_scale(struct ad74413r_state *st,
+ int *val, int *val2)
+{
+ *val = regulator_get_voltage(st->refin_reg);
+ *val2 = st->sense_resistor_ohms * AD74413R_DAC_CODE_MAX * 1000;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int ad74413r_get_input_voltage_scale(struct ad74413r_state *st,
+ unsigned int channel,
+ int *val, int *val2)
+{
+ unsigned int range;
+ int ret;
+
+ ret = ad74413r_get_adc_range(st, channel, &range);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_range_to_voltage_range(st, range, val);
+ if (ret)
+ return ret;
+
+ *val2 = AD74413R_ADC_RESULT_MAX;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int ad74413r_get_input_voltage_offset(struct ad74413r_state *st,
+ unsigned int channel, int *val)
+{
+ unsigned int range;
+ int ret;
+
+ ret = ad74413r_get_adc_range(st, channel, &range);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_range_to_voltage_offset_raw(st, range, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+}
+
+static int ad74413r_get_input_current_scale(struct ad74413r_state *st,
+ unsigned int channel, int *val,
+ int *val2)
+{
+ unsigned int range;
+ int ret;
+
+ ret = ad74413r_get_adc_range(st, channel, &range);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_range_to_voltage_range(st, range, val);
+ if (ret)
+ return ret;
+
+ *val2 = AD74413R_ADC_RESULT_MAX * st->sense_resistor_ohms;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int ad74413_get_input_current_offset(struct ad74413r_state *st,
+ unsigned int channel, int *val)
+{
+ unsigned int range;
+ int voltage_range;
+ int voltage_offset;
+ int ret;
+
+ ret = ad74413r_get_adc_range(st, channel, &range);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_range_to_voltage_range(st, range, &voltage_range);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_range_to_voltage_offset(st, range, &voltage_offset);
+ if (ret)
+ return ret;
+
+ *val = voltage_offset * AD74413R_ADC_RESULT_MAX / voltage_range;
+
+ return IIO_VAL_INT;
+}
+
+static int ad74413r_get_adc_rate(struct ad74413r_state *st,
+ unsigned int channel, int *val)
+{
+ unsigned int rejection;
+ int ret;
+
+ ret = ad74413r_get_adc_rejection(st, channel, &rejection);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_rejection_to_rate(st, rejection, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+}
+
+static int ad74413r_set_adc_rate(struct ad74413r_state *st,
+ unsigned int channel, int val)
+{
+ unsigned int rejection;
+ int ret;
+
+ ret = ad74413r_rate_to_rejection(st, val, &rejection);
+ if (ret)
+ return ret;
+
+ return ad74413r_set_adc_rejection(st, channel, rejection);
+}
+
+static irqreturn_t ad74413r_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad74413r_state *st = iio_priv(indio_dev);
+ u8 *rx_buf = st->adc_samples_buf.rx_buf;
+ unsigned int i;
+ int ret;
+
+ ret = spi_sync(st->spi, &st->adc_samples_msg);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < st->adc_active_channels; i++)
+ ad74413r_crc_check(st, &rx_buf[i * AD74413R_FRAME_SIZE]);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->adc_samples_buf,
+ iio_get_time_ns(indio_dev));
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ad74413r_adc_data_interrupt(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct ad74413r_state *st = iio_priv(indio_dev);
+
+ if (iio_buffer_enabled(indio_dev))
+ iio_trigger_poll(st->trig);
+ else
+ complete(&st->adc_data_completion);
+
+ return IRQ_HANDLED;
+}
+
+static int _ad74413r_get_single_adc_result(struct ad74413r_state *st,
+ unsigned int channel, int *val)
+{
+ unsigned int uval;
+ int ret;
+
+ reinit_completion(&st->adc_data_completion);
+
+ ret = ad74413r_set_adc_channel_enable(st, channel, true);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_set_adc_conv_seq(st, AD74413R_CONV_SEQ_SINGLE);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&st->adc_data_completion,
+ msecs_to_jiffies(1000));
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ return ret;
+ }
+
+ ret = regmap_read(st->regmap, AD74413R_REG_ADC_RESULT_X(channel),
+ &uval);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_set_adc_conv_seq(st, AD74413R_CONV_SEQ_OFF);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_set_adc_channel_enable(st, channel, false);
+ if (ret)
+ return ret;
+
+ *val = uval;
+
+ return IIO_VAL_INT;
+}
+
+static int ad74413r_get_single_adc_result(struct iio_dev *indio_dev,
+ unsigned int channel, int *val)
+{
+ struct ad74413r_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ mutex_lock(&st->lock);
+ ret = _ad74413r_get_single_adc_result(st, channel, val);
+ mutex_unlock(&st->lock);
+
+ iio_device_release_direct_mode(indio_dev);
+
+ return ret;
+}
+
+static void ad74413r_adc_to_resistance_result(int adc_result, int *val)
+{
+ if (adc_result == AD74413R_ADC_RESULT_MAX)
+ adc_result = AD74413R_ADC_RESULT_MAX - 1;
+
+ *val = DIV_ROUND_CLOSEST(adc_result * 2100,
+ AD74413R_ADC_RESULT_MAX - adc_result);
+}
+
+static int ad74413r_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *active_scan_mask)
+{
+ struct ad74413r_state *st = iio_priv(indio_dev);
+ struct spi_transfer *xfer = st->adc_samples_xfer;
+ u8 *rx_buf = &st->adc_samples_buf.rx_buf[-1 * AD74413R_FRAME_SIZE];
+ u8 *tx_buf = st->adc_samples_tx_buf;
+ unsigned int channel;
+ int ret = -EINVAL;
+
+ mutex_lock(&st->lock);
+
+ spi_message_init(&st->adc_samples_msg);
+ st->adc_active_channels = 0;
+
+ for_each_clear_bit(channel, active_scan_mask, AD74413R_CHANNEL_MAX) {
+ ret = ad74413r_set_adc_channel_enable(st, channel, false);
+ if (ret)
+ goto out;
+ }
+
+ if (*active_scan_mask == 0)
+ goto out;
+
+ /*
+ * The read select register is used to select which register's value
+ * will be sent by the slave on the next SPI frame.
+ *
+ * Create an SPI message that, on each step, writes to the read select
+ * register to select the ADC result of the next enabled channel, and
+ * reads the ADC result of the previous enabled channel.
+ *
+ * Example:
+ * W: [WCH1] [WCH2] [WCH2] [WCH3] [ ]
+ * R: [ ] [RCH1] [RCH2] [RCH3] [RCH4]
+ */
+
+ for_each_set_bit(channel, active_scan_mask, AD74413R_CHANNEL_MAX) {
+ ret = ad74413r_set_adc_channel_enable(st, channel, true);
+ if (ret)
+ goto out;
+
+ st->adc_active_channels++;
+
+ if (xfer == st->adc_samples_xfer)
+ xfer->rx_buf = NULL;
+ else
+ xfer->rx_buf = rx_buf;
+
+ xfer->tx_buf = tx_buf;
+ xfer->len = AD74413R_FRAME_SIZE;
+ xfer->cs_change = 1;
+
+ ad74413r_format_reg_write(AD74413R_REG_READ_SELECT,
+ AD74413R_REG_ADC_RESULT_X(channel),
+ tx_buf);
+
+ spi_message_add_tail(xfer, &st->adc_samples_msg);
+
+ xfer++;
+ tx_buf += AD74413R_FRAME_SIZE;
+ rx_buf += AD74413R_FRAME_SIZE;
+ }
+
+ xfer->rx_buf = rx_buf;
+ xfer->tx_buf = NULL;
+ xfer->len = AD74413R_FRAME_SIZE;
+ xfer->cs_change = 0;
+
+ spi_message_add_tail(xfer, &st->adc_samples_msg);
+
+out:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ad74413r_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct ad74413r_state *st = iio_priv(indio_dev);
+
+ return ad74413r_set_adc_conv_seq(st, AD74413R_CONV_SEQ_CONTINUOUS);
+}
+
+static int ad74413r_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct ad74413r_state *st = iio_priv(indio_dev);
+
+ return ad74413r_set_adc_conv_seq(st, AD74413R_CONV_SEQ_OFF);
+}
+
+static int ad74413r_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long info)
+{
+ struct ad74413r_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chan->output)
+ return ad74413r_get_output_voltage_scale(st,
+ val, val2);
+ else
+ return ad74413r_get_input_voltage_scale(st,
+ chan->channel, val, val2);
+ case IIO_CURRENT:
+ if (chan->output)
+ return ad74413r_get_output_current_scale(st,
+ val, val2);
+ else
+ return ad74413r_get_input_current_scale(st,
+ chan->channel, val, val2);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ return ad74413r_get_input_voltage_offset(st,
+ chan->channel, val);
+ case IIO_CURRENT:
+ return ad74413_get_input_current_offset(st,
+ chan->channel, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_RAW:
+ if (chan->output)
+ return -EINVAL;
+
+ return ad74413r_get_single_adc_result(indio_dev, chan->channel,
+ val);
+ case IIO_CHAN_INFO_PROCESSED: {
+ int ret;
+
+ ret = ad74413r_get_single_adc_result(indio_dev, chan->channel,
+ val);
+ if (ret)
+ return ret;
+
+ ad74413r_adc_to_resistance_result(*val, val);
+
+ return ret;
+ }
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return ad74413r_get_adc_rate(st, chan->channel, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad74413r_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long info)
+{
+ struct ad74413r_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ if (!chan->output)
+ return -EINVAL;
+
+ if (val < 0 || val > AD74413R_DAC_CODE_MAX) {
+ dev_err(st->dev, "Invalid DAC code\n");
+ return -EINVAL;
+ }
+
+ return ad74413r_set_channel_dac_code(st, chan->channel, val);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return ad74413r_set_adc_rate(st, chan->channel, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad74413r_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long info)
+{
+ struct ad74413r_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (st->chip_info->hart_support) {
+ *vals = ad74413r_adc_sampling_rates_hart;
+ *length = ARRAY_SIZE(ad74413r_adc_sampling_rates_hart);
+ } else {
+ *vals = ad74413r_adc_sampling_rates;
+ *length = ARRAY_SIZE(ad74413r_adc_sampling_rates);
+ }
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_buffer_setup_ops ad74413r_buffer_ops = {
+ .postenable = &ad74413r_buffer_postenable,
+ .predisable = &ad74413r_buffer_predisable,
+};
+
+static const struct iio_trigger_ops ad74413r_trigger_ops = {
+ .validate_device = iio_trigger_validate_own_device,
+};
+
+static const struct iio_info ad74413r_info = {
+ .read_raw = &ad74413r_read_raw,
+ .write_raw = &ad74413r_write_raw,
+ .read_avail = &ad74413r_read_avail,
+ .update_scan_mode = &ad74413r_update_scan_mode,
+};
+
+#define AD74413R_DAC_CHANNEL(_type, extra_mask_separate) \
+ { \
+ .type = (_type), \
+ .indexed = 1, \
+ .output = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) \
+ | (extra_mask_separate), \
+ }
+
+#define AD74413R_ADC_CHANNEL(_type, extra_mask_separate) \
+ { \
+ .type = (_type), \
+ .indexed = 1, \
+ .output = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) \
+ | BIT(IIO_CHAN_INFO_SAMP_FREQ) \
+ | (extra_mask_separate), \
+ .info_mask_separate_available = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 32, \
+ .shift = 8, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define AD74413R_ADC_VOLTAGE_CHANNEL \
+ AD74413R_ADC_CHANNEL(IIO_VOLTAGE, BIT(IIO_CHAN_INFO_SCALE) \
+ | BIT(IIO_CHAN_INFO_OFFSET))
+
+#define AD74413R_ADC_CURRENT_CHANNEL \
+ AD74413R_ADC_CHANNEL(IIO_CURRENT, BIT(IIO_CHAN_INFO_SCALE) \
+ | BIT(IIO_CHAN_INFO_OFFSET))
+
+static struct iio_chan_spec ad74413r_voltage_output_channels[] = {
+ AD74413R_DAC_CHANNEL(IIO_VOLTAGE, BIT(IIO_CHAN_INFO_SCALE)),
+ AD74413R_ADC_CURRENT_CHANNEL,
+};
+
+static struct iio_chan_spec ad74413r_current_output_channels[] = {
+ AD74413R_DAC_CHANNEL(IIO_CURRENT, BIT(IIO_CHAN_INFO_SCALE)),
+ AD74413R_ADC_VOLTAGE_CHANNEL,
+};
+
+static struct iio_chan_spec ad74413r_voltage_input_channels[] = {
+ AD74413R_ADC_VOLTAGE_CHANNEL,
+};
+
+static struct iio_chan_spec ad74413r_current_input_channels[] = {
+ AD74413R_ADC_CURRENT_CHANNEL,
+};
+
+static struct iio_chan_spec ad74413r_resistance_input_channels[] = {
+ AD74413R_ADC_CHANNEL(IIO_RESISTANCE, BIT(IIO_CHAN_INFO_PROCESSED)),
+};
+
+static struct iio_chan_spec ad74413r_digital_input_channels[] = {
+ AD74413R_ADC_VOLTAGE_CHANNEL,
+};
+
+#define _AD74413R_CHANNELS(_channels) \
+ { \
+ .channels = _channels, \
+ .num_channels = ARRAY_SIZE(_channels), \
+ }
+
+#define AD74413R_CHANNELS(name) \
+ _AD74413R_CHANNELS(ad74413r_ ## name ## _channels)
+
+static const struct ad74413r_channels ad74413r_channels_map[] = {
+ [CH_FUNC_HIGH_IMPEDANCE] = AD74413R_CHANNELS(voltage_input),
+ [CH_FUNC_VOLTAGE_OUTPUT] = AD74413R_CHANNELS(voltage_output),
+ [CH_FUNC_CURRENT_OUTPUT] = AD74413R_CHANNELS(current_output),
+ [CH_FUNC_VOLTAGE_INPUT] = AD74413R_CHANNELS(voltage_input),
+ [CH_FUNC_CURRENT_INPUT_EXT_POWER] = AD74413R_CHANNELS(current_input),
+ [CH_FUNC_CURRENT_INPUT_LOOP_POWER] = AD74413R_CHANNELS(current_input),
+ [CH_FUNC_RESISTANCE_INPUT] = AD74413R_CHANNELS(resistance_input),
+ [CH_FUNC_DIGITAL_INPUT_LOGIC] = AD74413R_CHANNELS(digital_input),
+ [CH_FUNC_DIGITAL_INPUT_LOOP_POWER] = AD74413R_CHANNELS(digital_input),
+ [CH_FUNC_CURRENT_INPUT_EXT_POWER_HART] = AD74413R_CHANNELS(current_input),
+ [CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART] = AD74413R_CHANNELS(current_input),
+};
+
+static int ad74413r_parse_channel_config(struct iio_dev *indio_dev,
+ struct fwnode_handle *channel_node)
+{
+ struct ad74413r_state *st = iio_priv(indio_dev);
+ struct ad74413r_channel_config *config;
+ u32 index;
+ int ret;
+
+ ret = fwnode_property_read_u32(channel_node, "reg", &index);
+ if (ret) {
+ dev_err(st->dev, "Failed to read channel reg: %d\n", ret);
+ return ret;
+ }
+
+ if (index >= AD74413R_CHANNEL_MAX) {
+ dev_err(st->dev, "Channel index %u is too large\n", index);
+ return -EINVAL;
+ }
+
+ config = &st->channel_configs[index];
+ if (config->initialized) {
+ dev_err(st->dev, "Channel %u already initialized\n", index);
+ return -EINVAL;
+ }
+
+ config->func = CH_FUNC_HIGH_IMPEDANCE;
+ fwnode_property_read_u32(channel_node, "adi,ch-func", &config->func);
+
+ if (config->func < CH_FUNC_MIN || config->func > CH_FUNC_MAX) {
+ dev_err(st->dev, "Invalid channel function %u\n", config->func);
+ return -EINVAL;
+ }
+
+ if (!st->chip_info->hart_support &&
+ (config->func == CH_FUNC_CURRENT_INPUT_EXT_POWER_HART ||
+ config->func == CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART)) {
+ dev_err(st->dev, "Unsupported HART function %u\n", config->func);
+ return -EINVAL;
+ }
+
+ if (config->func == CH_FUNC_DIGITAL_INPUT_LOGIC ||
+ config->func == CH_FUNC_DIGITAL_INPUT_LOOP_POWER)
+ st->num_comparator_gpios++;
+
+ config->gpo_comparator = fwnode_property_read_bool(channel_node,
+ "adi,gpo-comparator");
+
+ if (!config->gpo_comparator)
+ st->num_gpo_gpios++;
+
+ indio_dev->num_channels += ad74413r_channels_map[config->func].num_channels;
+
+ config->initialized = true;
+
+ return 0;
+}
+
+static int ad74413r_parse_channel_configs(struct iio_dev *indio_dev)
+{
+ struct ad74413r_state *st = iio_priv(indio_dev);
+ struct fwnode_handle *channel_node = NULL;
+ int ret;
+
+ fwnode_for_each_available_child_node(dev_fwnode(st->dev), channel_node) {
+ ret = ad74413r_parse_channel_config(indio_dev, channel_node);
+ if (ret)
+ goto put_channel_node;
+ }
+
+ return 0;
+
+put_channel_node:
+ fwnode_handle_put(channel_node);
+
+ return ret;
+}
+
+static int ad74413r_setup_channels(struct iio_dev *indio_dev)
+{
+ struct ad74413r_state *st = iio_priv(indio_dev);
+ struct ad74413r_channel_config *config;
+ struct iio_chan_spec *channels, *chans;
+ unsigned int i, num_chans, chan_i;
+ int ret;
+
+ channels = devm_kcalloc(st->dev, sizeof(*channels),
+ indio_dev->num_channels, GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ indio_dev->channels = channels;
+
+ for (i = 0; i < AD74413R_CHANNEL_MAX; i++) {
+ config = &st->channel_configs[i];
+ chans = ad74413r_channels_map[config->func].channels;
+ num_chans = ad74413r_channels_map[config->func].num_channels;
+
+ memcpy(channels, chans, num_chans * sizeof(*chans));
+
+ for (chan_i = 0; chan_i < num_chans; chan_i++) {
+ struct iio_chan_spec *chan = &channels[chan_i];
+
+ chan->channel = i;
+ if (chan->output)
+ chan->scan_index = -1;
+ else
+ chan->scan_index = i;
+ }
+
+ ret = ad74413r_set_channel_function(st, i, config->func);
+ if (ret)
+ return ret;
+
+ channels += num_chans;
+ }
+
+ return 0;
+}
+
+static int ad74413r_setup_gpios(struct ad74413r_state *st)
+{
+ struct ad74413r_channel_config *config;
+ unsigned int comp_gpio_i = 0;
+ unsigned int gpo_gpio_i = 0;
+ unsigned int i;
+ u8 gpo_config;
+ int ret;
+
+ for (i = 0; i < AD74413R_CHANNEL_MAX; i++) {
+ config = &st->channel_configs[i];
+
+ if (config->gpo_comparator) {
+ gpo_config = AD74413R_GPO_CONFIG_COMPARATOR;
+ } else {
+ gpo_config = AD74413R_GPO_CONFIG_LOGIC;
+ st->gpo_gpio_offsets[gpo_gpio_i++] = i;
+ }
+
+ if (config->func == CH_FUNC_DIGITAL_INPUT_LOGIC ||
+ config->func == CH_FUNC_DIGITAL_INPUT_LOOP_POWER)
+ st->comp_gpio_offsets[comp_gpio_i++] = i;
+
+ ret = ad74413r_set_gpo_config(st, i, gpo_config);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ad74413r_regulator_disable(void *regulator)
+{
+ regulator_disable(regulator);
+}
+
+static int ad74413r_probe(struct spi_device *spi)
+{
+ struct ad74413r_state *st;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ st->spi = spi;
+ st->dev = &spi->dev;
+ st->chip_info = device_get_match_data(&spi->dev);
+ mutex_init(&st->lock);
+ init_completion(&st->adc_data_completion);
+
+ st->regmap = devm_regmap_init(st->dev, NULL, st,
+ &ad74413r_regmap_config);
+ if (IS_ERR(st->regmap))
+ return PTR_ERR(st->regmap);
+
+ st->refin_reg = devm_regulator_get(st->dev, "refin");
+ if (IS_ERR(st->refin_reg))
+ return dev_err_probe(st->dev, PTR_ERR(st->refin_reg),
+ "Failed to get refin regulator\n");
+
+ ret = regulator_enable(st->refin_reg);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(st->dev, ad74413r_regulator_disable,
+ st->refin_reg);
+ if (ret)
+ return ret;
+
+ st->sense_resistor_ohms = 100000000;
+ device_property_read_u32(st->dev, "shunt-resistor-micro-ohms",
+ &st->sense_resistor_ohms);
+ st->sense_resistor_ohms /= 1000000;
+
+ st->trig = devm_iio_trigger_alloc(st->dev, "%s-dev%d",
+ st->chip_info->name, iio_device_id(indio_dev));
+ if (!st->trig)
+ return -ENOMEM;
+
+ st->trig->ops = &ad74413r_trigger_ops;
+ iio_trigger_set_drvdata(st->trig, st);
+
+ ret = devm_iio_trigger_register(st->dev, st->trig);
+ if (ret)
+ return ret;
+
+ indio_dev->name = st->chip_info->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &ad74413r_info;
+ indio_dev->trig = iio_trigger_get(st->trig);
+
+ ret = ad74413r_reset(st);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_parse_channel_configs(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_setup_channels(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_setup_gpios(st);
+ if (ret)
+ return ret;
+
+ if (st->num_gpo_gpios) {
+ st->gpo_gpiochip.owner = THIS_MODULE;
+ st->gpo_gpiochip.label = st->chip_info->name;
+ st->gpo_gpiochip.base = -1;
+ st->gpo_gpiochip.ngpio = st->num_gpo_gpios;
+ st->gpo_gpiochip.parent = st->dev;
+ st->gpo_gpiochip.can_sleep = true;
+ st->gpo_gpiochip.set = ad74413r_gpio_set;
+ st->gpo_gpiochip.set_multiple = ad74413r_gpio_set_multiple;
+ st->gpo_gpiochip.set_config = ad74413r_gpio_set_gpo_config;
+ st->gpo_gpiochip.get_direction =
+ ad74413r_gpio_get_gpo_direction;
+
+ ret = devm_gpiochip_add_data(st->dev, &st->gpo_gpiochip, st);
+ if (ret)
+ return ret;
+ }
+
+ if (st->num_comparator_gpios) {
+ st->comp_gpiochip.owner = THIS_MODULE;
+ st->comp_gpiochip.label = st->chip_info->name;
+ st->comp_gpiochip.base = -1;
+ st->comp_gpiochip.ngpio = st->num_comparator_gpios;
+ st->comp_gpiochip.parent = st->dev;
+ st->comp_gpiochip.can_sleep = true;
+ st->comp_gpiochip.get = ad74413r_gpio_get;
+ st->comp_gpiochip.get_multiple = ad74413r_gpio_get_multiple;
+ st->comp_gpiochip.set_config = ad74413r_gpio_set_comp_config;
+ st->comp_gpiochip.get_direction =
+ ad74413r_gpio_get_comp_direction;
+
+ ret = devm_gpiochip_add_data(st->dev, &st->comp_gpiochip, st);
+ if (ret)
+ return ret;
+ }
+
+ ret = ad74413r_set_adc_conv_seq(st, AD74413R_CONV_SEQ_OFF);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(st->dev, spi->irq, ad74413r_adc_data_interrupt,
+ 0, st->chip_info->name, indio_dev);
+ if (ret)
+ return dev_err_probe(st->dev, ret, "Failed to request irq\n");
+
+ ret = devm_iio_triggered_buffer_setup(st->dev, indio_dev,
+ &iio_pollfunc_store_time,
+ &ad74413r_trigger_handler,
+ &ad74413r_buffer_ops);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(st->dev, indio_dev);
+}
+
+static int ad74413r_unregister_driver(struct spi_driver *spi)
+{
+ spi_unregister_driver(spi);
+
+ return 0;
+}
+
+static int __init ad74413r_register_driver(struct spi_driver *spi)
+{
+ crc8_populate_msb(ad74413r_crc8_table, AD74413R_CRC_POLYNOMIAL);
+
+ return spi_register_driver(spi);
+}
+
+static const struct ad74413r_chip_info ad74412r_chip_info_data = {
+ .hart_support = false,
+ .name = "ad74412r",
+};
+
+static const struct ad74413r_chip_info ad74413r_chip_info_data = {
+ .hart_support = true,
+ .name = "ad74413r",
+};
+
+static const struct of_device_id ad74413r_dt_id[] = {
+ {
+ .compatible = "adi,ad74412r",
+ .data = &ad74412r_chip_info_data,
+ },
+ {
+ .compatible = "adi,ad74413r",
+ .data = &ad74413r_chip_info_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ad74413r_dt_id);
+
+static struct spi_driver ad74413r_driver = {
+ .driver = {
+ .name = "ad74413r",
+ .of_match_table = ad74413r_dt_id,
+ },
+ .probe = ad74413r_probe,
+};
+
+module_driver(ad74413r_driver,
+ ad74413r_register_driver,
+ ad74413r_unregister_driver);
+
+MODULE_AUTHOR("Cosmin Tanislav <cosmin.tanislav@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD74413R ADDAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/amplifiers/hmc425a.c b/drivers/iio/amplifiers/hmc425a.c
index 9efa692151f0..16c0a77f6a1c 100644
--- a/drivers/iio/amplifiers/hmc425a.c
+++ b/drivers/iio/amplifiers/hmc425a.c
@@ -192,7 +192,7 @@ static int hmc425a_probe(struct platform_device *pdev)
return -ENOMEM;
st = iio_priv(indio_dev);
- st->type = (enum hmc425a_type)of_device_get_match_data(&pdev->dev);
+ st->type = (uintptr_t)of_device_get_match_data(&pdev->dev);
st->chip_info = &hmc425a_chip_info_tbl[st->type];
indio_dev->num_channels = st->chip_info->num_channels;
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index 1ac94c4e9792..f8ce26a24c57 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -67,7 +67,7 @@ static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
dma_cookie_t cookie;
block->bytes_used = min(block->size, dmaengine_buffer->max_size);
- block->bytes_used = rounddown(block->bytes_used,
+ block->bytes_used = round_down(block->bytes_used,
dmaengine_buffer->align);
desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c
index 9cb99585b6ff..04b44a327614 100644
--- a/drivers/iio/chemical/atlas-sensor.c
+++ b/drivers/iio/chemical/atlas-sensor.c
@@ -434,9 +434,6 @@ static int atlas_buffer_predisable(struct iio_dev *indio_dev)
return 0;
}
-static const struct iio_trigger_ops atlas_interrupt_trigger_ops = {
-};
-
static const struct iio_buffer_setup_ops atlas_buffer_setup_ops = {
.postenable = atlas_buffer_postenable,
.predisable = atlas_buffer_predisable,
@@ -645,7 +642,6 @@ static int atlas_probe(struct i2c_client *client,
data->client = client;
data->trig = trig;
data->chip = chip;
- trig->ops = &atlas_interrupt_trigger_ops;
iio_trigger_set_drvdata(trig, indio_dev);
i2c_set_clientdata(client, indio_dev);
diff --git a/drivers/iio/chemical/sunrise_co2.c b/drivers/iio/chemical/sunrise_co2.c
index 233bd0f379c9..8440dc0c77cf 100644
--- a/drivers/iio/chemical/sunrise_co2.c
+++ b/drivers/iio/chemical/sunrise_co2.c
@@ -407,24 +407,24 @@ static int sunrise_read_raw(struct iio_dev *iio_dev,
mutex_lock(&sunrise->lock);
ret = sunrise_read_word(sunrise, SUNRISE_CO2_FILTERED_COMP_REG,
&value);
- *val = value;
mutex_unlock(&sunrise->lock);
if (ret)
return ret;
+ *val = value;
return IIO_VAL_INT;
case IIO_TEMP:
mutex_lock(&sunrise->lock);
ret = sunrise_read_word(sunrise, SUNRISE_CHIP_TEMPERATURE_REG,
&value);
- *val = value;
mutex_unlock(&sunrise->lock);
if (ret)
return ret;
+ *val = value;
return IIO_VAL_INT;
default:
diff --git a/drivers/iio/chemical/vz89x.c b/drivers/iio/chemical/vz89x.c
index 23b22a5f5c1c..e7e1c74a351e 100644
--- a/drivers/iio/chemical/vz89x.c
+++ b/drivers/iio/chemical/vz89x.c
@@ -242,7 +242,7 @@ static int vz89x_get_resistance_reading(struct vz89x_data *data,
struct iio_chan_spec const *chan,
int *val)
{
- u8 *tmp = (u8 *) &data->buffer[chan->address];
+ u8 *tmp = &data->buffer[chan->address];
switch (chan->scan_type.endianness) {
case IIO_LE:
diff --git a/drivers/iio/common/scmi_sensors/scmi_iio.c b/drivers/iio/common/scmi_sensors/scmi_iio.c
index 7cf2bf282cef..d538bf3ab1ef 100644
--- a/drivers/iio/common/scmi_sensors/scmi_iio.c
+++ b/drivers/iio/common/scmi_sensors/scmi_iio.c
@@ -279,6 +279,52 @@ static int scmi_iio_get_odr_val(struct iio_dev *iio_dev, int *val, int *val2)
return 0;
}
+static int scmi_iio_read_channel_data(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *ch, int *val, int *val2)
+{
+ struct scmi_iio_priv *sensor = iio_priv(iio_dev);
+ u32 sensor_config;
+ struct scmi_sensor_reading readings[SCMI_IIO_NUM_OF_AXIS];
+ int err;
+
+ sensor_config = FIELD_PREP(SCMI_SENS_CFG_SENSOR_ENABLED_MASK,
+ SCMI_SENS_CFG_SENSOR_ENABLE);
+ err = sensor->sensor_ops->config_set(
+ sensor->ph, sensor->sensor_info->id, sensor_config);
+ if (err) {
+ dev_err(&iio_dev->dev,
+ "Error in enabling sensor %s err %d",
+ sensor->sensor_info->name, err);
+ return err;
+ }
+
+ err = sensor->sensor_ops->reading_get_timestamped(
+ sensor->ph, sensor->sensor_info->id,
+ sensor->sensor_info->num_axis, readings);
+ if (err) {
+ dev_err(&iio_dev->dev,
+ "Error in reading raw attribute for sensor %s err %d",
+ sensor->sensor_info->name, err);
+ return err;
+ }
+
+ sensor_config = FIELD_PREP(SCMI_SENS_CFG_SENSOR_ENABLED_MASK,
+ SCMI_SENS_CFG_SENSOR_DISABLE);
+ err = sensor->sensor_ops->config_set(
+ sensor->ph, sensor->sensor_info->id, sensor_config);
+ if (err) {
+ dev_err(&iio_dev->dev,
+ "Error in disabling sensor %s err %d",
+ sensor->sensor_info->name, err);
+ return err;
+ }
+
+ *val = lower_32_bits(readings[ch->scan_index].value);
+ *val2 = upper_32_bits(readings[ch->scan_index].value);
+
+ return IIO_VAL_INT_64;
+}
+
static int scmi_iio_read_raw(struct iio_dev *iio_dev,
struct iio_chan_spec const *ch, int *val,
int *val2, long mask)
@@ -300,6 +346,14 @@ static int scmi_iio_read_raw(struct iio_dev *iio_dev,
case IIO_CHAN_INFO_SAMP_FREQ:
ret = scmi_iio_get_odr_val(iio_dev, val, val2);
return ret ? ret : IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(iio_dev);
+ if (ret)
+ return ret;
+
+ ret = scmi_iio_read_channel_data(iio_dev, ch, val, val2);
+ iio_device_release_direct_mode(iio_dev);
+ return ret;
default:
return -EINVAL;
}
@@ -381,7 +435,8 @@ static void scmi_iio_set_data_channel(struct iio_chan_spec *iio_chan,
iio_chan->type = type;
iio_chan->modified = 1;
iio_chan->channel2 = mod;
- iio_chan->info_mask_separate = BIT(IIO_CHAN_INFO_SCALE);
+ iio_chan->info_mask_separate =
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_RAW);
iio_chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ);
iio_chan->info_mask_shared_by_type_available =
BIT(IIO_CHAN_INFO_SAMP_FREQ);
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 1de395bda03e..eb452d0c423c 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -638,7 +638,7 @@ ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
struct device_attribute *attr, char *buf)
{
int i, len = 0;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct st_sensor_data *sdata = iio_priv(indio_dev);
mutex_lock(&indio_dev->mlock);
@@ -660,7 +660,7 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
struct device_attribute *attr, char *buf)
{
int i, len = 0, q, r;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct st_sensor_data *sdata = iio_priv(indio_dev);
mutex_lock(&indio_dev->mlock);
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 75e1f2b48638..bfcf7568de32 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -6,6 +6,16 @@
menu "Digital to analog converters"
+config AD3552R
+ tristate "Analog Devices AD3552R DAC driver"
+ depends on SPI_MASTER
+ help
+ Say yes here to build support for Analog Devices AD3552R
+ Digital to Analog Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad3552r.
+
config AD5064
tristate "Analog Devices AD5064 and similar multi-channel DAC driver"
depends on (SPI_MASTER && I2C!=m) || I2C
@@ -221,6 +231,17 @@ config AD5791
To compile this driver as a module, choose M here: the
module will be called ad5791.
+config AD7293
+ tristate "Analog Devices AD7293 Power Amplifier Current Controller"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD7293
+ Power Amplifier Current Controller with
+ ADC, DACs, and Temperature and Current Sensors
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7293.
+
config AD7303
tristate "Analog Devices AD7303 DAC driver"
depends on SPI
@@ -329,7 +350,6 @@ config MAX517
config MAX5821
tristate "Maxim MAX5821 DAC driver"
depends on I2C
- depends on OF
help
Say yes here to build support for Maxim MAX5821
10 bits DAC.
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 33e16f14902a..01a50131572f 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -4,6 +4,7 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_AD3552R) += ad3552r.o
obj-$(CONFIG_AD5360) += ad5360.o
obj-$(CONFIG_AD5380) += ad5380.o
obj-$(CONFIG_AD5421) += ad5421.o
@@ -25,6 +26,7 @@ obj-$(CONFIG_AD5791) += ad5791.o
obj-$(CONFIG_AD5686) += ad5686.o
obj-$(CONFIG_AD5686_SPI) += ad5686-spi.o
obj-$(CONFIG_AD5696_I2C) += ad5696-i2c.o
+obj-$(CONFIG_AD7293) += ad7293.o
obj-$(CONFIG_AD7303) += ad7303.o
obj-$(CONFIG_AD8801) += ad8801.o
obj-$(CONFIG_CIO_DAC) += cio-dac.o
diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c
new file mode 100644
index 000000000000..97f13c0b9631
--- /dev/null
+++ b/drivers/iio/dac/ad3552r.c
@@ -0,0 +1,1138 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Analog Devices AD3552R
+ * Digital to Analog converter driver
+ *
+ * Copyright 2021 Analog Devices Inc.
+ */
+#include <asm/unaligned.h>
+#include <linux/device.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+/* Register addresses */
+/* Primary address space */
+#define AD3552R_REG_ADDR_INTERFACE_CONFIG_A 0x00
+#define AD3552R_MASK_SOFTWARE_RESET (BIT(7) | BIT(0))
+#define AD3552R_MASK_ADDR_ASCENSION BIT(5)
+#define AD3552R_MASK_SDO_ACTIVE BIT(4)
+#define AD3552R_REG_ADDR_INTERFACE_CONFIG_B 0x01
+#define AD3552R_MASK_SINGLE_INST BIT(7)
+#define AD3552R_MASK_SHORT_INSTRUCTION BIT(3)
+#define AD3552R_REG_ADDR_DEVICE_CONFIG 0x02
+#define AD3552R_MASK_DEVICE_STATUS(n) BIT(4 + (n))
+#define AD3552R_MASK_CUSTOM_MODES GENMASK(3, 2)
+#define AD3552R_MASK_OPERATING_MODES GENMASK(1, 0)
+#define AD3552R_REG_ADDR_CHIP_TYPE 0x03
+#define AD3552R_MASK_CLASS GENMASK(7, 0)
+#define AD3552R_REG_ADDR_PRODUCT_ID_L 0x04
+#define AD3552R_REG_ADDR_PRODUCT_ID_H 0x05
+#define AD3552R_REG_ADDR_CHIP_GRADE 0x06
+#define AD3552R_MASK_GRADE GENMASK(7, 4)
+#define AD3552R_MASK_DEVICE_REVISION GENMASK(3, 0)
+#define AD3552R_REG_ADDR_SCRATCH_PAD 0x0A
+#define AD3552R_REG_ADDR_SPI_REVISION 0x0B
+#define AD3552R_REG_ADDR_VENDOR_L 0x0C
+#define AD3552R_REG_ADDR_VENDOR_H 0x0D
+#define AD3552R_REG_ADDR_STREAM_MODE 0x0E
+#define AD3552R_MASK_LENGTH GENMASK(7, 0)
+#define AD3552R_REG_ADDR_TRANSFER_REGISTER 0x0F
+#define AD3552R_MASK_MULTI_IO_MODE GENMASK(7, 6)
+#define AD3552R_MASK_STREAM_LENGTH_KEEP_VALUE BIT(2)
+#define AD3552R_REG_ADDR_INTERFACE_CONFIG_C 0x10
+#define AD3552R_MASK_CRC_ENABLE (GENMASK(7, 6) |\
+ GENMASK(1, 0))
+#define AD3552R_MASK_STRICT_REGISTER_ACCESS BIT(5)
+#define AD3552R_REG_ADDR_INTERFACE_STATUS_A 0x11
+#define AD3552R_MASK_INTERFACE_NOT_READY BIT(7)
+#define AD3552R_MASK_CLOCK_COUNTING_ERROR BIT(5)
+#define AD3552R_MASK_INVALID_OR_NO_CRC BIT(3)
+#define AD3552R_MASK_WRITE_TO_READ_ONLY_REGISTER BIT(2)
+#define AD3552R_MASK_PARTIAL_REGISTER_ACCESS BIT(1)
+#define AD3552R_MASK_REGISTER_ADDRESS_INVALID BIT(0)
+#define AD3552R_REG_ADDR_INTERFACE_CONFIG_D 0x14
+#define AD3552R_MASK_ALERT_ENABLE_PULLUP BIT(6)
+#define AD3552R_MASK_MEM_CRC_EN BIT(4)
+#define AD3552R_MASK_SDO_DRIVE_STRENGTH GENMASK(3, 2)
+#define AD3552R_MASK_DUAL_SPI_SYNCHROUNOUS_EN BIT(1)
+#define AD3552R_MASK_SPI_CONFIG_DDR BIT(0)
+#define AD3552R_REG_ADDR_SH_REFERENCE_CONFIG 0x15
+#define AD3552R_MASK_IDUMP_FAST_MODE BIT(6)
+#define AD3552R_MASK_SAMPLE_HOLD_DIFFERENTIAL_USER_EN BIT(5)
+#define AD3552R_MASK_SAMPLE_HOLD_USER_TRIM GENMASK(4, 3)
+#define AD3552R_MASK_SAMPLE_HOLD_USER_ENABLE BIT(2)
+#define AD3552R_MASK_REFERENCE_VOLTAGE_SEL GENMASK(1, 0)
+#define AD3552R_REG_ADDR_ERR_ALARM_MASK 0x16
+#define AD3552R_MASK_REF_RANGE_ALARM BIT(6)
+#define AD3552R_MASK_CLOCK_COUNT_ERR_ALARM BIT(5)
+#define AD3552R_MASK_MEM_CRC_ERR_ALARM BIT(4)
+#define AD3552R_MASK_SPI_CRC_ERR_ALARM BIT(3)
+#define AD3552R_MASK_WRITE_TO_READ_ONLY_ALARM BIT(2)
+#define AD3552R_MASK_PARTIAL_REGISTER_ACCESS_ALARM BIT(1)
+#define AD3552R_MASK_REGISTER_ADDRESS_INVALID_ALARM BIT(0)
+#define AD3552R_REG_ADDR_ERR_STATUS 0x17
+#define AD3552R_MASK_REF_RANGE_ERR_STATUS BIT(6)
+#define AD3552R_MASK_DUAL_SPI_STREAM_EXCEEDS_DAC_ERR_STATUS BIT(5)
+#define AD3552R_MASK_MEM_CRC_ERR_STATUS BIT(4)
+#define AD3552R_MASK_RESET_STATUS BIT(0)
+#define AD3552R_REG_ADDR_POWERDOWN_CONFIG 0x18
+#define AD3552R_MASK_CH_DAC_POWERDOWN(ch) BIT(4 + (ch))
+#define AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(ch) BIT(ch)
+#define AD3552R_REG_ADDR_CH0_CH1_OUTPUT_RANGE 0x19
+#define AD3552R_MASK_CH_OUTPUT_RANGE_SEL(ch) ((ch) ? GENMASK(7, 4) :\
+ GENMASK(3, 0))
+#define AD3552R_REG_ADDR_CH_OFFSET(ch) (0x1B + (ch) * 2)
+#define AD3552R_MASK_CH_OFFSET_BITS_0_7 GENMASK(7, 0)
+#define AD3552R_REG_ADDR_CH_GAIN(ch) (0x1C + (ch) * 2)
+#define AD3552R_MASK_CH_RANGE_OVERRIDE BIT(7)
+#define AD3552R_MASK_CH_GAIN_SCALING_N GENMASK(6, 5)
+#define AD3552R_MASK_CH_GAIN_SCALING_P GENMASK(4, 3)
+#define AD3552R_MASK_CH_OFFSET_POLARITY BIT(2)
+#define AD3552R_MASK_CH_OFFSET_BIT_8 BIT(0)
+/*
+ * Secondary region
+ * For multibyte registers specify the highest address because the access is
+ * done in descending order
+ */
+#define AD3552R_SECONDARY_REGION_START 0x28
+#define AD3552R_REG_ADDR_HW_LDAC_16B 0x28
+#define AD3552R_REG_ADDR_CH_DAC_16B(ch) (0x2C - (1 - ch) * 2)
+#define AD3552R_REG_ADDR_DAC_PAGE_MASK_16B 0x2E
+#define AD3552R_REG_ADDR_CH_SELECT_16B 0x2F
+#define AD3552R_REG_ADDR_INPUT_PAGE_MASK_16B 0x31
+#define AD3552R_REG_ADDR_SW_LDAC_16B 0x32
+#define AD3552R_REG_ADDR_CH_INPUT_16B(ch) (0x36 - (1 - ch) * 2)
+/* 3 bytes registers */
+#define AD3552R_REG_START_24B 0x37
+#define AD3552R_REG_ADDR_HW_LDAC_24B 0x37
+#define AD3552R_REG_ADDR_CH_DAC_24B(ch) (0x3D - (1 - ch) * 3)
+#define AD3552R_REG_ADDR_DAC_PAGE_MASK_24B 0x40
+#define AD3552R_REG_ADDR_CH_SELECT_24B 0x41
+#define AD3552R_REG_ADDR_INPUT_PAGE_MASK_24B 0x44
+#define AD3552R_REG_ADDR_SW_LDAC_24B 0x45
+#define AD3552R_REG_ADDR_CH_INPUT_24B(ch) (0x4B - (1 - ch) * 3)
+
+/* Useful defines */
+#define AD3552R_NUM_CH 2
+#define AD3552R_MASK_CH(ch) BIT(ch)
+#define AD3552R_MASK_ALL_CH GENMASK(1, 0)
+#define AD3552R_MAX_REG_SIZE 3
+#define AD3552R_READ_BIT BIT(7)
+#define AD3552R_ADDR_MASK GENMASK(6, 0)
+#define AD3552R_MASK_DAC_12B 0xFFF0
+#define AD3552R_DEFAULT_CONFIG_B_VALUE 0x8
+#define AD3552R_SCRATCH_PAD_TEST_VAL1 0x34
+#define AD3552R_SCRATCH_PAD_TEST_VAL2 0xB2
+#define AD3552R_GAIN_SCALE 1000
+#define AD3552R_LDAC_PULSE_US 100
+
+enum ad3552r_ch_vref_select {
+ /* Internal source with Vref I/O floating */
+ AD3552R_INTERNAL_VREF_PIN_FLOATING,
+ /* Internal source with Vref I/O at 2.5V */
+ AD3552R_INTERNAL_VREF_PIN_2P5V,
+ /* External source with Vref I/O as input */
+ AD3552R_EXTERNAL_VREF_PIN_INPUT
+};
+
+enum ad3542r_id {
+ AD3542R_ID = 0x4008,
+ AD3552R_ID = 0x4009,
+};
+
+enum ad3552r_ch_output_range {
+ /* Range from 0 V to 2.5 V. Requires Rfb1x connection */
+ AD3552R_CH_OUTPUT_RANGE_0__2P5V,
+ /* Range from 0 V to 5 V. Requires Rfb1x connection */
+ AD3552R_CH_OUTPUT_RANGE_0__5V,
+ /* Range from 0 V to 10 V. Requires Rfb2x connection */
+ AD3552R_CH_OUTPUT_RANGE_0__10V,
+ /* Range from -5 V to 5 V. Requires Rfb2x connection */
+ AD3552R_CH_OUTPUT_RANGE_NEG_5__5V,
+ /* Range from -10 V to 10 V. Requires Rfb4x connection */
+ AD3552R_CH_OUTPUT_RANGE_NEG_10__10V,
+};
+
+static const s32 ad3552r_ch_ranges[][2] = {
+ [AD3552R_CH_OUTPUT_RANGE_0__2P5V] = {0, 2500},
+ [AD3552R_CH_OUTPUT_RANGE_0__5V] = {0, 5000},
+ [AD3552R_CH_OUTPUT_RANGE_0__10V] = {0, 10000},
+ [AD3552R_CH_OUTPUT_RANGE_NEG_5__5V] = {-5000, 5000},
+ [AD3552R_CH_OUTPUT_RANGE_NEG_10__10V] = {-10000, 10000}
+};
+
+enum ad3542r_ch_output_range {
+ /* Range from 0 V to 2.5 V. Requires Rfb1x connection */
+ AD3542R_CH_OUTPUT_RANGE_0__2P5V,
+ /* Range from 0 V to 3 V. Requires Rfb1x connection */
+ AD3542R_CH_OUTPUT_RANGE_0__3V,
+ /* Range from 0 V to 5 V. Requires Rfb1x connection */
+ AD3542R_CH_OUTPUT_RANGE_0__5V,
+ /* Range from 0 V to 10 V. Requires Rfb2x connection */
+ AD3542R_CH_OUTPUT_RANGE_0__10V,
+ /* Range from -2.5 V to 7.5 V. Requires Rfb2x connection */
+ AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V,
+ /* Range from -5 V to 5 V. Requires Rfb2x connection */
+ AD3542R_CH_OUTPUT_RANGE_NEG_5__5V,
+};
+
+static const s32 ad3542r_ch_ranges[][2] = {
+ [AD3542R_CH_OUTPUT_RANGE_0__2P5V] = {0, 2500},
+ [AD3542R_CH_OUTPUT_RANGE_0__3V] = {0, 3000},
+ [AD3542R_CH_OUTPUT_RANGE_0__5V] = {0, 5000},
+ [AD3542R_CH_OUTPUT_RANGE_0__10V] = {0, 10000},
+ [AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V] = {-2500, 7500},
+ [AD3542R_CH_OUTPUT_RANGE_NEG_5__5V] = {-5000, 5000}
+};
+
+enum ad3552r_ch_gain_scaling {
+ /* Gain scaling of 1 */
+ AD3552R_CH_GAIN_SCALING_1,
+ /* Gain scaling of 0.5 */
+ AD3552R_CH_GAIN_SCALING_0_5,
+ /* Gain scaling of 0.25 */
+ AD3552R_CH_GAIN_SCALING_0_25,
+ /* Gain scaling of 0.125 */
+ AD3552R_CH_GAIN_SCALING_0_125,
+};
+
+/* Gain * AD3552R_GAIN_SCALE */
+static const s32 gains_scaling_table[] = {
+ [AD3552R_CH_GAIN_SCALING_1] = 1000,
+ [AD3552R_CH_GAIN_SCALING_0_5] = 500,
+ [AD3552R_CH_GAIN_SCALING_0_25] = 250,
+ [AD3552R_CH_GAIN_SCALING_0_125] = 125
+};
+
+enum ad3552r_dev_attributes {
+ /* - Direct register values */
+ /* From 0-3 */
+ AD3552R_SDO_DRIVE_STRENGTH,
+ /*
+ * 0 -> Internal Vref, vref_io pin floating (default)
+ * 1 -> Internal Vref, vref_io driven by internal vref
+ * 2 or 3 -> External Vref
+ */
+ AD3552R_VREF_SELECT,
+ /* Read registers in ascending order if set. Else descending */
+ AD3552R_ADDR_ASCENSION,
+};
+
+enum ad3552r_ch_attributes {
+ /* DAC powerdown */
+ AD3552R_CH_DAC_POWERDOWN,
+ /* DAC amplifier powerdown */
+ AD3552R_CH_AMPLIFIER_POWERDOWN,
+ /* Select the output range. Select from enum ad3552r_ch_output_range */
+ AD3552R_CH_OUTPUT_RANGE_SEL,
+ /*
+ * Over-rider the range selector in order to manually set the output
+ * voltage range
+ */
+ AD3552R_CH_RANGE_OVERRIDE,
+ /* Manually set the offset voltage */
+ AD3552R_CH_GAIN_OFFSET,
+ /* Sets the polarity of the offset. */
+ AD3552R_CH_GAIN_OFFSET_POLARITY,
+ /* PDAC gain scaling */
+ AD3552R_CH_GAIN_SCALING_P,
+ /* NDAC gain scaling */
+ AD3552R_CH_GAIN_SCALING_N,
+ /* Rfb value */
+ AD3552R_CH_RFB,
+ /* Channel select. When set allow Input -> DAC and Mask -> DAC */
+ AD3552R_CH_SELECT,
+};
+
+struct ad3552r_ch_data {
+ s32 scale_int;
+ s32 scale_dec;
+ s32 offset_int;
+ s32 offset_dec;
+ s16 gain_offset;
+ u16 rfb;
+ u8 n;
+ u8 p;
+ u8 range;
+ bool range_override;
+};
+
+struct ad3552r_desc {
+ /* Used to look the spi bus for atomic operations where needed */
+ struct mutex lock;
+ struct gpio_desc *gpio_reset;
+ struct gpio_desc *gpio_ldac;
+ struct spi_device *spi;
+ struct ad3552r_ch_data ch_data[AD3552R_NUM_CH];
+ struct iio_chan_spec channels[AD3552R_NUM_CH + 1];
+ unsigned long enabled_ch;
+ unsigned int num_ch;
+ enum ad3542r_id chip_id;
+};
+
+static const u16 addr_mask_map[][2] = {
+ [AD3552R_ADDR_ASCENSION] = {
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_A,
+ AD3552R_MASK_ADDR_ASCENSION
+ },
+ [AD3552R_SDO_DRIVE_STRENGTH] = {
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
+ AD3552R_MASK_SDO_DRIVE_STRENGTH
+ },
+ [AD3552R_VREF_SELECT] = {
+ AD3552R_REG_ADDR_SH_REFERENCE_CONFIG,
+ AD3552R_MASK_REFERENCE_VOLTAGE_SEL
+ },
+};
+
+/* 0 -> reg addr, 1->ch0 mask, 2->ch1 mask */
+static const u16 addr_mask_map_ch[][3] = {
+ [AD3552R_CH_DAC_POWERDOWN] = {
+ AD3552R_REG_ADDR_POWERDOWN_CONFIG,
+ AD3552R_MASK_CH_DAC_POWERDOWN(0),
+ AD3552R_MASK_CH_DAC_POWERDOWN(1)
+ },
+ [AD3552R_CH_AMPLIFIER_POWERDOWN] = {
+ AD3552R_REG_ADDR_POWERDOWN_CONFIG,
+ AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(0),
+ AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(1)
+ },
+ [AD3552R_CH_OUTPUT_RANGE_SEL] = {
+ AD3552R_REG_ADDR_CH0_CH1_OUTPUT_RANGE,
+ AD3552R_MASK_CH_OUTPUT_RANGE_SEL(0),
+ AD3552R_MASK_CH_OUTPUT_RANGE_SEL(1)
+ },
+ [AD3552R_CH_SELECT] = {
+ AD3552R_REG_ADDR_CH_SELECT_16B,
+ AD3552R_MASK_CH(0),
+ AD3552R_MASK_CH(1)
+ }
+};
+
+static u8 _ad3552r_reg_len(u8 addr)
+{
+ switch (addr) {
+ case AD3552R_REG_ADDR_HW_LDAC_16B:
+ case AD3552R_REG_ADDR_CH_SELECT_16B:
+ case AD3552R_REG_ADDR_SW_LDAC_16B:
+ case AD3552R_REG_ADDR_HW_LDAC_24B:
+ case AD3552R_REG_ADDR_CH_SELECT_24B:
+ case AD3552R_REG_ADDR_SW_LDAC_24B:
+ return 1;
+ default:
+ break;
+ }
+
+ if (addr > AD3552R_REG_ADDR_HW_LDAC_24B)
+ return 3;
+ if (addr > AD3552R_REG_ADDR_HW_LDAC_16B)
+ return 2;
+
+ return 1;
+}
+
+/* SPI transfer to device */
+static int ad3552r_transfer(struct ad3552r_desc *dac, u8 addr, u32 len,
+ u8 *data, bool is_read)
+{
+ /* Maximum transfer: Addr (1B) + 2 * (Data Reg (3B)) + SW LDAC(1B) */
+ u8 buf[8];
+
+ buf[0] = addr & AD3552R_ADDR_MASK;
+ buf[0] |= is_read ? AD3552R_READ_BIT : 0;
+ if (is_read)
+ return spi_write_then_read(dac->spi, buf, 1, data, len);
+
+ memcpy(buf + 1, data, len);
+ return spi_write_then_read(dac->spi, buf, len + 1, NULL, 0);
+}
+
+static int ad3552r_write_reg(struct ad3552r_desc *dac, u8 addr, u16 val)
+{
+ u8 reg_len;
+ u8 buf[AD3552R_MAX_REG_SIZE] = { 0 };
+
+ reg_len = _ad3552r_reg_len(addr);
+ if (reg_len == 2)
+ /* Only DAC register are 2 bytes wide */
+ val &= AD3552R_MASK_DAC_12B;
+ if (reg_len == 1)
+ buf[0] = val & 0xFF;
+ else
+ /* reg_len can be 2 or 3, but 3rd bytes needs to be set to 0 */
+ put_unaligned_be16(val, buf);
+
+ return ad3552r_transfer(dac, addr, reg_len, buf, false);
+}
+
+static int ad3552r_read_reg(struct ad3552r_desc *dac, u8 addr, u16 *val)
+{
+ int err;
+ u8 reg_len, buf[AD3552R_MAX_REG_SIZE] = { 0 };
+
+ reg_len = _ad3552r_reg_len(addr);
+ err = ad3552r_transfer(dac, addr, reg_len, buf, true);
+ if (err)
+ return err;
+
+ if (reg_len == 1)
+ *val = buf[0];
+ else
+ /* reg_len can be 2 or 3, but only first 2 bytes are relevant */
+ *val = get_unaligned_be16(buf);
+
+ return 0;
+}
+
+static u16 ad3552r_field_prep(u16 val, u16 mask)
+{
+ return (val << __ffs(mask)) & mask;
+}
+
+/* Update field of a register, shift val if needed */
+static int ad3552r_update_reg_field(struct ad3552r_desc *dac, u8 addr, u16 mask,
+ u16 val)
+{
+ int ret;
+ u16 reg;
+
+ ret = ad3552r_read_reg(dac, addr, &reg);
+ if (ret < 0)
+ return ret;
+
+ reg &= ~mask;
+ reg |= ad3552r_field_prep(val, mask);
+
+ return ad3552r_write_reg(dac, addr, reg);
+}
+
+static int ad3552r_set_ch_value(struct ad3552r_desc *dac,
+ enum ad3552r_ch_attributes attr,
+ u8 ch,
+ u16 val)
+{
+ /* Update register related to attributes in chip */
+ return ad3552r_update_reg_field(dac, addr_mask_map_ch[attr][0],
+ addr_mask_map_ch[attr][ch + 1], val);
+}
+
+#define AD3552R_CH_DAC(_idx) ((struct iio_chan_spec) { \
+ .type = IIO_VOLTAGE, \
+ .output = true, \
+ .indexed = true, \
+ .channel = _idx, \
+ .scan_index = _idx, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_ENABLE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+})
+
+static int ad3552r_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct ad3552r_desc *dac = iio_priv(indio_dev);
+ u16 tmp_val;
+ int err;
+ u8 ch = chan->channel;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&dac->lock);
+ err = ad3552r_read_reg(dac, AD3552R_REG_ADDR_CH_DAC_24B(ch),
+ &tmp_val);
+ mutex_unlock(&dac->lock);
+ if (err < 0)
+ return err;
+ *val = tmp_val;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_ENABLE:
+ mutex_lock(&dac->lock);
+ err = ad3552r_read_reg(dac, AD3552R_REG_ADDR_POWERDOWN_CONFIG,
+ &tmp_val);
+ mutex_unlock(&dac->lock);
+ if (err < 0)
+ return err;
+ *val = !((tmp_val & AD3552R_MASK_CH_DAC_POWERDOWN(ch)) >>
+ __ffs(AD3552R_MASK_CH_DAC_POWERDOWN(ch)));
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = dac->ch_data[ch].scale_int;
+ *val2 = dac->ch_data[ch].scale_dec;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = dac->ch_data[ch].offset_int;
+ *val2 = dac->ch_data[ch].offset_dec;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad3552r_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad3552r_desc *dac = iio_priv(indio_dev);
+ int err;
+
+ mutex_lock(&dac->lock);
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ err = ad3552r_write_reg(dac,
+ AD3552R_REG_ADDR_CH_DAC_24B(chan->channel),
+ val);
+ break;
+ case IIO_CHAN_INFO_ENABLE:
+ err = ad3552r_set_ch_value(dac, AD3552R_CH_DAC_POWERDOWN,
+ chan->channel, !val);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ mutex_unlock(&dac->lock);
+
+ return err;
+}
+
+static const struct iio_info ad3552r_iio_info = {
+ .read_raw = ad3552r_read_raw,
+ .write_raw = ad3552r_write_raw
+};
+
+static int32_t ad3552r_trigger_hw_ldac(struct gpio_desc *ldac)
+{
+ gpiod_set_value_cansleep(ldac, 0);
+ usleep_range(AD3552R_LDAC_PULSE_US, AD3552R_LDAC_PULSE_US + 10);
+ gpiod_set_value_cansleep(ldac, 1);
+
+ return 0;
+}
+
+static int ad3552r_write_all_channels(struct ad3552r_desc *dac, u8 *data)
+{
+ int err, len;
+ u8 addr, buff[AD3552R_NUM_CH * AD3552R_MAX_REG_SIZE + 1];
+
+ addr = AD3552R_REG_ADDR_CH_INPUT_24B(1);
+ /* CH1 */
+ memcpy(buff, data + 2, 2);
+ buff[2] = 0;
+ /* CH0 */
+ memcpy(buff + 3, data, 2);
+ buff[5] = 0;
+ len = 6;
+ if (!dac->gpio_ldac) {
+ /* Software LDAC */
+ buff[6] = AD3552R_MASK_ALL_CH;
+ ++len;
+ }
+ err = ad3552r_transfer(dac, addr, len, buff, false);
+ if (err)
+ return err;
+
+ if (dac->gpio_ldac)
+ return ad3552r_trigger_hw_ldac(dac->gpio_ldac);
+
+ return 0;
+}
+
+static int ad3552r_write_codes(struct ad3552r_desc *dac, u32 mask, u8 *data)
+{
+ int err;
+ u8 addr, buff[AD3552R_MAX_REG_SIZE];
+
+ if (mask == AD3552R_MASK_ALL_CH) {
+ if (memcmp(data, data + 2, 2) != 0)
+ return ad3552r_write_all_channels(dac, data);
+
+ addr = AD3552R_REG_ADDR_INPUT_PAGE_MASK_24B;
+ } else {
+ addr = AD3552R_REG_ADDR_CH_INPUT_24B(__ffs(mask));
+ }
+
+ memcpy(buff, data, 2);
+ buff[2] = 0;
+ err = ad3552r_transfer(dac, addr, 3, data, false);
+ if (err)
+ return err;
+
+ if (dac->gpio_ldac)
+ return ad3552r_trigger_hw_ldac(dac->gpio_ldac);
+
+ return ad3552r_write_reg(dac, AD3552R_REG_ADDR_SW_LDAC_24B, mask);
+}
+
+static irqreturn_t ad3552r_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct iio_buffer *buf = indio_dev->buffer;
+ struct ad3552r_desc *dac = iio_priv(indio_dev);
+ /* Maximum size of a scan */
+ u8 buff[AD3552R_NUM_CH * AD3552R_MAX_REG_SIZE];
+ int err;
+
+ memset(buff, 0, sizeof(buff));
+ err = iio_pop_from_buffer(buf, buff);
+ if (err)
+ goto end;
+
+ mutex_lock(&dac->lock);
+ ad3552r_write_codes(dac, *indio_dev->active_scan_mask, buff);
+ mutex_unlock(&dac->lock);
+end:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int ad3552r_check_scratch_pad(struct ad3552r_desc *dac)
+{
+ const u16 val1 = AD3552R_SCRATCH_PAD_TEST_VAL1;
+ const u16 val2 = AD3552R_SCRATCH_PAD_TEST_VAL2;
+ u16 val;
+ int err;
+
+ err = ad3552r_write_reg(dac, AD3552R_REG_ADDR_SCRATCH_PAD, val1);
+ if (err < 0)
+ return err;
+
+ err = ad3552r_read_reg(dac, AD3552R_REG_ADDR_SCRATCH_PAD, &val);
+ if (err < 0)
+ return err;
+
+ if (val1 != val)
+ return -ENODEV;
+
+ err = ad3552r_write_reg(dac, AD3552R_REG_ADDR_SCRATCH_PAD, val2);
+ if (err < 0)
+ return err;
+
+ err = ad3552r_read_reg(dac, AD3552R_REG_ADDR_SCRATCH_PAD, &val);
+ if (err < 0)
+ return err;
+
+ if (val2 != val)
+ return -ENODEV;
+
+ return 0;
+}
+
+struct reg_addr_pool {
+ struct ad3552r_desc *dac;
+ u8 addr;
+};
+
+static int ad3552r_read_reg_wrapper(struct reg_addr_pool *addr)
+{
+ int err;
+ u16 val;
+
+ err = ad3552r_read_reg(addr->dac, addr->addr, &val);
+ if (err)
+ return err;
+
+ return val;
+}
+
+static int ad3552r_reset(struct ad3552r_desc *dac)
+{
+ struct reg_addr_pool addr;
+ int ret;
+ u16 val;
+
+ dac->gpio_reset = devm_gpiod_get_optional(&dac->spi->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(dac->gpio_reset))
+ return dev_err_probe(&dac->spi->dev, PTR_ERR(dac->gpio_reset),
+ "Error while getting gpio reset");
+
+ if (dac->gpio_reset) {
+ /* Perform hardware reset */
+ usleep_range(10, 20);
+ gpiod_set_value_cansleep(dac->gpio_reset, 1);
+ } else {
+ /* Perform software reset if no GPIO provided */
+ ret = ad3552r_update_reg_field(dac,
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_A,
+ AD3552R_MASK_SOFTWARE_RESET,
+ AD3552R_MASK_SOFTWARE_RESET);
+ if (ret < 0)
+ return ret;
+
+ }
+
+ addr.dac = dac;
+ addr.addr = AD3552R_REG_ADDR_INTERFACE_CONFIG_B;
+ ret = readx_poll_timeout(ad3552r_read_reg_wrapper, &addr, val,
+ val == AD3552R_DEFAULT_CONFIG_B_VALUE ||
+ val < 0,
+ 5000, 50000);
+ if (val < 0)
+ ret = val;
+ if (ret) {
+ dev_err(&dac->spi->dev, "Error while resetting");
+ return ret;
+ }
+
+ ret = readx_poll_timeout(ad3552r_read_reg_wrapper, &addr, val,
+ !(val & AD3552R_MASK_INTERFACE_NOT_READY) ||
+ val < 0,
+ 5000, 50000);
+ if (val < 0)
+ ret = val;
+ if (ret) {
+ dev_err(&dac->spi->dev, "Error while resetting");
+ return ret;
+ }
+
+ return ad3552r_update_reg_field(dac,
+ addr_mask_map[AD3552R_ADDR_ASCENSION][0],
+ addr_mask_map[AD3552R_ADDR_ASCENSION][1],
+ val);
+}
+
+static void ad3552r_get_custom_range(struct ad3552r_desc *dac, s32 i, s32 *v_min,
+ s32 *v_max)
+{
+ s64 vref, tmp, common, offset, gn, gp;
+ /*
+ * From datasheet formula (In Volts):
+ * Vmin = 2.5 + [(GainN + Offset / 1024) * 2.5 * Rfb * 1.03]
+ * Vmax = 2.5 - [(GainP + Offset / 1024) * 2.5 * Rfb * 1.03]
+ * Calculus are converted to milivolts
+ */
+ vref = 2500;
+ /* 2.5 * 1.03 * 1000 (To mV) */
+ common = 2575 * dac->ch_data[i].rfb;
+ offset = dac->ch_data[i].gain_offset;
+
+ gn = gains_scaling_table[dac->ch_data[i].n];
+ tmp = (1024 * gn + AD3552R_GAIN_SCALE * offset) * common;
+ tmp = div_s64(tmp, 1024 * AD3552R_GAIN_SCALE);
+ *v_max = vref + tmp;
+
+ gp = gains_scaling_table[dac->ch_data[i].p];
+ tmp = (1024 * gp - AD3552R_GAIN_SCALE * offset) * common;
+ tmp = div_s64(tmp, 1024 * AD3552R_GAIN_SCALE);
+ *v_min = vref - tmp;
+}
+
+static void ad3552r_calc_gain_and_offset(struct ad3552r_desc *dac, s32 ch)
+{
+ s32 idx, v_max, v_min, span, rem;
+ s64 tmp;
+
+ if (dac->ch_data[ch].range_override) {
+ ad3552r_get_custom_range(dac, ch, &v_min, &v_max);
+ } else {
+ /* Normal range */
+ idx = dac->ch_data[ch].range;
+ if (dac->chip_id == AD3542R_ID) {
+ v_min = ad3542r_ch_ranges[idx][0];
+ v_max = ad3542r_ch_ranges[idx][1];
+ } else {
+ v_min = ad3552r_ch_ranges[idx][0];
+ v_max = ad3552r_ch_ranges[idx][1];
+ }
+ }
+
+ /*
+ * From datasheet formula:
+ * Vout = Span * (D / 65536) + Vmin
+ * Converted to scale and offset:
+ * Scale = Span / 65536
+ * Offset = 65536 * Vmin / Span
+ *
+ * Reminders are in micros in order to be printed as
+ * IIO_VAL_INT_PLUS_MICRO
+ */
+ span = v_max - v_min;
+ dac->ch_data[ch].scale_int = div_s64_rem(span, 65536, &rem);
+ /* Do operations in microvolts */
+ dac->ch_data[ch].scale_dec = DIV_ROUND_CLOSEST((s64)rem * 1000000,
+ 65536);
+
+ dac->ch_data[ch].offset_int = div_s64_rem(v_min * 65536, span, &rem);
+ tmp = (s64)rem * 1000000;
+ dac->ch_data[ch].offset_dec = div_s64(tmp, span);
+}
+
+static int ad3552r_find_range(u16 id, s32 *vals)
+{
+ int i, len;
+ const s32 (*ranges)[2];
+
+ if (id == AD3542R_ID) {
+ len = ARRAY_SIZE(ad3542r_ch_ranges);
+ ranges = ad3542r_ch_ranges;
+ } else {
+ len = ARRAY_SIZE(ad3552r_ch_ranges);
+ ranges = ad3552r_ch_ranges;
+ }
+
+ for (i = 0; i < len; i++)
+ if (vals[0] == ranges[i][0] * 1000 &&
+ vals[1] == ranges[i][1] * 1000)
+ return i;
+
+ return -EINVAL;
+}
+
+static int ad3552r_configure_custom_gain(struct ad3552r_desc *dac,
+ struct fwnode_handle *child,
+ u32 ch)
+{
+ struct device *dev = &dac->spi->dev;
+ struct fwnode_handle *gain_child;
+ u32 val;
+ int err;
+ u8 addr;
+ u16 reg = 0, offset;
+
+ gain_child = fwnode_get_named_child_node(child,
+ "custom-output-range-config");
+ if (IS_ERR(gain_child)) {
+ dev_err(dev,
+ "mandatory custom-output-range-config property missing\n");
+ return PTR_ERR(gain_child);
+ }
+
+ dac->ch_data[ch].range_override = 1;
+ reg |= ad3552r_field_prep(1, AD3552R_MASK_CH_RANGE_OVERRIDE);
+
+ err = fwnode_property_read_u32(gain_child, "adi,gain-scaling-p", &val);
+ if (err) {
+ dev_err(dev, "mandatory adi,gain-scaling-p property missing\n");
+ goto put_child;
+ }
+ reg |= ad3552r_field_prep(val, AD3552R_MASK_CH_GAIN_SCALING_P);
+ dac->ch_data[ch].p = val;
+
+ err = fwnode_property_read_u32(gain_child, "adi,gain-scaling-n", &val);
+ if (err) {
+ dev_err(dev, "mandatory adi,gain-scaling-n property missing\n");
+ goto put_child;
+ }
+ reg |= ad3552r_field_prep(val, AD3552R_MASK_CH_GAIN_SCALING_N);
+ dac->ch_data[ch].n = val;
+
+ err = fwnode_property_read_u32(gain_child, "adi,rfb-ohms", &val);
+ if (err) {
+ dev_err(dev, "mandatory adi,rfb-ohms property missing\n");
+ goto put_child;
+ }
+ dac->ch_data[ch].rfb = val;
+
+ err = fwnode_property_read_u32(gain_child, "adi,gain-offset", &val);
+ if (err) {
+ dev_err(dev, "mandatory adi,gain-offset property missing\n");
+ goto put_child;
+ }
+ dac->ch_data[ch].gain_offset = val;
+
+ offset = abs((s32)val);
+ reg |= ad3552r_field_prep((offset >> 8), AD3552R_MASK_CH_OFFSET_BIT_8);
+
+ reg |= ad3552r_field_prep((s32)val < 0, AD3552R_MASK_CH_OFFSET_POLARITY);
+ addr = AD3552R_REG_ADDR_CH_GAIN(ch);
+ err = ad3552r_write_reg(dac, addr,
+ offset & AD3552R_MASK_CH_OFFSET_BITS_0_7);
+ if (err) {
+ dev_err(dev, "Error writing register\n");
+ goto put_child;
+ }
+
+ err = ad3552r_write_reg(dac, addr, reg);
+ if (err) {
+ dev_err(dev, "Error writing register\n");
+ goto put_child;
+ }
+
+put_child:
+ fwnode_handle_put(gain_child);
+
+ return err;
+}
+
+static void ad3552r_reg_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
+static int ad3552r_configure_device(struct ad3552r_desc *dac)
+{
+ struct device *dev = &dac->spi->dev;
+ struct fwnode_handle *child;
+ struct regulator *vref;
+ int err, cnt = 0, voltage, delta = 100000;
+ u32 vals[2], val, ch;
+
+ dac->gpio_ldac = devm_gpiod_get_optional(dev, "ldac", GPIOD_OUT_HIGH);
+ if (IS_ERR(dac->gpio_ldac))
+ return dev_err_probe(dev, PTR_ERR(dac->gpio_ldac),
+ "Error getting gpio ldac");
+
+ vref = devm_regulator_get_optional(dev, "vref");
+ if (IS_ERR(vref)) {
+ if (PTR_ERR(vref) != -ENODEV)
+ return dev_err_probe(dev, PTR_ERR(vref),
+ "Error getting vref");
+
+ if (device_property_read_bool(dev, "adi,vref-out-en"))
+ val = AD3552R_INTERNAL_VREF_PIN_2P5V;
+ else
+ val = AD3552R_INTERNAL_VREF_PIN_FLOATING;
+ } else {
+ err = regulator_enable(vref);
+ if (err) {
+ dev_err(dev, "Failed to enable external vref supply\n");
+ return err;
+ }
+
+ err = devm_add_action_or_reset(dev, ad3552r_reg_disable, vref);
+ if (err) {
+ regulator_disable(vref);
+ return err;
+ }
+
+ voltage = regulator_get_voltage(vref);
+ if (voltage > 2500000 + delta || voltage < 2500000 - delta) {
+ dev_warn(dev, "vref-supply must be 2.5V");
+ return -EINVAL;
+ }
+ val = AD3552R_EXTERNAL_VREF_PIN_INPUT;
+ }
+
+ err = ad3552r_update_reg_field(dac,
+ addr_mask_map[AD3552R_VREF_SELECT][0],
+ addr_mask_map[AD3552R_VREF_SELECT][1],
+ val);
+ if (err)
+ return err;
+
+ err = device_property_read_u32(dev, "adi,sdo-drive-strength", &val);
+ if (!err) {
+ if (val > 3) {
+ dev_err(dev, "adi,sdo-drive-strength must be less than 4\n");
+ return -EINVAL;
+ }
+
+ err = ad3552r_update_reg_field(dac,
+ addr_mask_map[AD3552R_SDO_DRIVE_STRENGTH][0],
+ addr_mask_map[AD3552R_SDO_DRIVE_STRENGTH][1],
+ val);
+ if (err)
+ return err;
+ }
+
+ dac->num_ch = device_get_child_node_count(dev);
+ if (!dac->num_ch) {
+ dev_err(dev, "No channels defined\n");
+ return -ENODEV;
+ }
+
+ device_for_each_child_node(dev, child) {
+ err = fwnode_property_read_u32(child, "reg", &ch);
+ if (err) {
+ dev_err(dev, "mandatory reg property missing\n");
+ goto put_child;
+ }
+ if (ch >= AD3552R_NUM_CH) {
+ dev_err(dev, "reg must be less than %d\n",
+ AD3552R_NUM_CH);
+ err = -EINVAL;
+ goto put_child;
+ }
+
+ if (fwnode_property_present(child, "adi,output-range-microvolt")) {
+ err = fwnode_property_read_u32_array(child,
+ "adi,output-range-microvolt",
+ vals,
+ 2);
+ if (err) {
+ dev_err(dev,
+ "adi,output-range-microvolt property could not be parsed\n");
+ goto put_child;
+ }
+
+ err = ad3552r_find_range(dac->chip_id, vals);
+ if (err < 0) {
+ dev_err(dev,
+ "Invalid adi,output-range-microvolt value\n");
+ goto put_child;
+ }
+ val = err;
+ err = ad3552r_set_ch_value(dac,
+ AD3552R_CH_OUTPUT_RANGE_SEL,
+ ch, val);
+ if (err)
+ goto put_child;
+
+ dac->ch_data[ch].range = val;
+ } else if (dac->chip_id == AD3542R_ID) {
+ dev_err(dev,
+ "adi,output-range-microvolt is required for ad3542r\n");
+ err = -EINVAL;
+ goto put_child;
+ } else {
+ err = ad3552r_configure_custom_gain(dac, child, ch);
+ if (err)
+ goto put_child;
+ }
+
+ ad3552r_calc_gain_and_offset(dac, ch);
+ dac->enabled_ch |= BIT(ch);
+
+ err = ad3552r_set_ch_value(dac, AD3552R_CH_SELECT, ch, 1);
+ if (err < 0)
+ goto put_child;
+
+ dac->channels[cnt] = AD3552R_CH_DAC(ch);
+ ++cnt;
+
+ }
+
+ /* Disable unused channels */
+ for_each_clear_bit(ch, &dac->enabled_ch, AD3552R_NUM_CH) {
+ err = ad3552r_set_ch_value(dac, AD3552R_CH_AMPLIFIER_POWERDOWN,
+ ch, 1);
+ if (err)
+ return err;
+ }
+
+ dac->num_ch = cnt;
+
+ return 0;
+put_child:
+ fwnode_handle_put(child);
+
+ return err;
+}
+
+static int ad3552r_init(struct ad3552r_desc *dac)
+{
+ int err;
+ u16 val, id;
+
+ err = ad3552r_reset(dac);
+ if (err) {
+ dev_err(&dac->spi->dev, "Reset failed\n");
+ return err;
+ }
+
+ err = ad3552r_check_scratch_pad(dac);
+ if (err) {
+ dev_err(&dac->spi->dev, "Scratch pad test failed\n");
+ return err;
+ }
+
+ err = ad3552r_read_reg(dac, AD3552R_REG_ADDR_PRODUCT_ID_L, &val);
+ if (err) {
+ dev_err(&dac->spi->dev, "Fail read PRODUCT_ID_L\n");
+ return err;
+ }
+
+ id = val;
+ err = ad3552r_read_reg(dac, AD3552R_REG_ADDR_PRODUCT_ID_H, &val);
+ if (err) {
+ dev_err(&dac->spi->dev, "Fail read PRODUCT_ID_H\n");
+ return err;
+ }
+
+ id |= val << 8;
+ if (id != dac->chip_id) {
+ dev_err(&dac->spi->dev, "Product id not matching\n");
+ return -ENODEV;
+ }
+
+ return ad3552r_configure_device(dac);
+}
+
+static int ad3552r_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct ad3552r_desc *dac;
+ struct iio_dev *indio_dev;
+ int err;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*dac));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ dac = iio_priv(indio_dev);
+ dac->spi = spi;
+ dac->chip_id = id->driver_data;
+
+ mutex_init(&dac->lock);
+
+ err = ad3552r_init(dac);
+ if (err)
+ return err;
+
+ /* Config triggered buffer device */
+ if (dac->chip_id == AD3552R_ID)
+ indio_dev->name = "ad3552r";
+ else
+ indio_dev->name = "ad3542r";
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &ad3552r_iio_info;
+ indio_dev->num_channels = dac->num_ch;
+ indio_dev->channels = dac->channels;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ err = devm_iio_triggered_buffer_setup_ext(&indio_dev->dev, indio_dev, NULL,
+ &ad3552r_trigger_handler,
+ IIO_BUFFER_DIRECTION_OUT,
+ NULL,
+ NULL);
+ if (err)
+ return err;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct spi_device_id ad3552r_id[] = {
+ { "ad3542r", AD3542R_ID },
+ { "ad3552r", AD3552R_ID },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad3552r_id);
+
+static const struct of_device_id ad3552r_of_match[] = {
+ { .compatible = "adi,ad3542r"},
+ { .compatible = "adi,ad3552r"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad3552r_of_match);
+
+static struct spi_driver ad3552r_driver = {
+ .driver = {
+ .name = "ad3552r",
+ .of_match_table = ad3552r_of_match,
+ },
+ .probe = ad3552r_probe,
+ .id_table = ad3552r_id
+};
+module_spi_driver(ad3552r_driver);
+
+MODULE_AUTHOR("Mihail Chindris <mihail.chindris@analog.com>");
+MODULE_DESCRIPTION("Analog Device AD3552R DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index fd9cac4f6321..27ee2c63c5d4 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -377,7 +377,7 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
.shared = IIO_SEPARATE,
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ad5064_powerdown_mode_enum),
- IIO_ENUM_AVAILABLE("powerdown_mode", &ad5064_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5064_powerdown_mode_enum),
{ },
};
@@ -389,7 +389,7 @@ static const struct iio_chan_spec_ext_info ltc2617_ext_info[] = {
.shared = IIO_SEPARATE,
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ltc2617_powerdown_mode_enum),
- IIO_ENUM_AVAILABLE("powerdown_mode", &ltc2617_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ltc2617_powerdown_mode_enum),
{ },
};
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 8ca26bb4b62f..e38860a6a9f3 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -249,7 +249,7 @@ static const struct iio_chan_spec_ext_info ad5380_ext_info[] = {
},
IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE,
&ad5380_powerdown_mode_enum),
- IIO_ENUM_AVAILABLE("powerdown_mode", &ad5380_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5380_powerdown_mode_enum),
{ },
};
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 3cc5513a6cbf..1c9b54c012a7 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -142,7 +142,7 @@ static const struct iio_chan_spec_ext_info ad5446_ext_info_powerdown[] = {
.shared = IIO_SEPARATE,
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ad5446_powerdown_mode_enum),
- IIO_ENUM_AVAILABLE("powerdown_mode", &ad5446_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5446_powerdown_mode_enum),
{ },
};
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index 19cdf9890d02..b631261efa97 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -241,7 +241,7 @@ static const struct iio_chan_spec_ext_info ad5504_ext_info[] = {
},
IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE,
&ad5504_powerdown_mode_enum),
- IIO_ENUM_AVAILABLE("powerdown_mode", &ad5504_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5504_powerdown_mode_enum),
{ },
};
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index 530529feebb5..3c98941b9f99 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -159,7 +159,7 @@ static const struct iio_chan_spec_ext_info ad5624r_ext_info[] = {
},
IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE,
&ad5624r_powerdown_mode_enum),
- IIO_ENUM_AVAILABLE("powerdown_mode", &ad5624r_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5624r_powerdown_mode_enum),
{ },
};
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index 8f001db775f4..e592a995f404 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -184,7 +184,7 @@ static const struct iio_chan_spec_ext_info ad5686_ext_info[] = {
.shared = IIO_SEPARATE,
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ad5686_powerdown_mode_enum),
- IIO_ENUM_AVAILABLE("powerdown_mode", &ad5686_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5686_powerdown_mode_enum),
{ },
};
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index cabc38d54085..7a62e6e1d5f1 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -13,10 +13,10 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/delay.h>
-#include <linux/of.h>
+#include <linux/property.h>
+
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#include <linux/platform_data/ad5755.h>
#define AD5755_NUM_CHANNELS 4
@@ -63,6 +63,101 @@
#define AD5755_SLEW_RATE_SHIFT 3
#define AD5755_SLEW_ENABLE BIT(12)
+enum ad5755_mode {
+ AD5755_MODE_VOLTAGE_0V_5V = 0,
+ AD5755_MODE_VOLTAGE_0V_10V = 1,
+ AD5755_MODE_VOLTAGE_PLUSMINUS_5V = 2,
+ AD5755_MODE_VOLTAGE_PLUSMINUS_10V = 3,
+ AD5755_MODE_CURRENT_4mA_20mA = 4,
+ AD5755_MODE_CURRENT_0mA_20mA = 5,
+ AD5755_MODE_CURRENT_0mA_24mA = 6,
+};
+
+enum ad5755_dc_dc_phase {
+ AD5755_DC_DC_PHASE_ALL_SAME_EDGE = 0,
+ AD5755_DC_DC_PHASE_A_B_SAME_EDGE_C_D_OPP_EDGE = 1,
+ AD5755_DC_DC_PHASE_A_C_SAME_EDGE_B_D_OPP_EDGE = 2,
+ AD5755_DC_DC_PHASE_90_DEGREE = 3,
+};
+
+enum ad5755_dc_dc_freq {
+ AD5755_DC_DC_FREQ_250kHZ = 0,
+ AD5755_DC_DC_FREQ_410kHZ = 1,
+ AD5755_DC_DC_FREQ_650kHZ = 2,
+};
+
+enum ad5755_dc_dc_maxv {
+ AD5755_DC_DC_MAXV_23V = 0,
+ AD5755_DC_DC_MAXV_24V5 = 1,
+ AD5755_DC_DC_MAXV_27V = 2,
+ AD5755_DC_DC_MAXV_29V5 = 3,
+};
+
+enum ad5755_slew_rate {
+ AD5755_SLEW_RATE_64k = 0,
+ AD5755_SLEW_RATE_32k = 1,
+ AD5755_SLEW_RATE_16k = 2,
+ AD5755_SLEW_RATE_8k = 3,
+ AD5755_SLEW_RATE_4k = 4,
+ AD5755_SLEW_RATE_2k = 5,
+ AD5755_SLEW_RATE_1k = 6,
+ AD5755_SLEW_RATE_500 = 7,
+ AD5755_SLEW_RATE_250 = 8,
+ AD5755_SLEW_RATE_125 = 9,
+ AD5755_SLEW_RATE_64 = 10,
+ AD5755_SLEW_RATE_32 = 11,
+ AD5755_SLEW_RATE_16 = 12,
+ AD5755_SLEW_RATE_8 = 13,
+ AD5755_SLEW_RATE_4 = 14,
+ AD5755_SLEW_RATE_0_5 = 15,
+};
+
+enum ad5755_slew_step_size {
+ AD5755_SLEW_STEP_SIZE_1 = 0,
+ AD5755_SLEW_STEP_SIZE_2 = 1,
+ AD5755_SLEW_STEP_SIZE_4 = 2,
+ AD5755_SLEW_STEP_SIZE_8 = 3,
+ AD5755_SLEW_STEP_SIZE_16 = 4,
+ AD5755_SLEW_STEP_SIZE_32 = 5,
+ AD5755_SLEW_STEP_SIZE_64 = 6,
+ AD5755_SLEW_STEP_SIZE_128 = 7,
+ AD5755_SLEW_STEP_SIZE_256 = 8,
+};
+
+/**
+ * struct ad5755_platform_data - AD5755 DAC driver platform data
+ * @ext_dc_dc_compenstation_resistor: Whether an external DC-DC converter
+ * compensation register is used.
+ * @dc_dc_phase: DC-DC converter phase.
+ * @dc_dc_freq: DC-DC converter frequency.
+ * @dc_dc_maxv: DC-DC maximum allowed boost voltage.
+ * @dac: Per DAC instance parameters.
+ * @dac.mode: The mode to be used for the DAC output.
+ * @dac.ext_current_sense_resistor: Whether an external current sense resistor
+ * is used.
+ * @dac.enable_voltage_overrange: Whether to enable 20% voltage output overrange.
+ * @dac.slew.enable: Whether to enable digital slew.
+ * @dac.slew.rate: Slew rate of the digital slew.
+ * @dac.slew.step_size: Slew step size of the digital slew.
+ **/
+struct ad5755_platform_data {
+ bool ext_dc_dc_compenstation_resistor;
+ enum ad5755_dc_dc_phase dc_dc_phase;
+ enum ad5755_dc_dc_freq dc_dc_freq;
+ enum ad5755_dc_dc_maxv dc_dc_maxv;
+
+ struct {
+ enum ad5755_mode mode;
+ bool ext_current_sense_resistor;
+ bool enable_voltage_overrange;
+ struct {
+ bool enable;
+ enum ad5755_slew_rate rate;
+ enum ad5755_slew_step_size step_size;
+ } slew;
+ } dac[4];
+};
+
/**
* struct ad5755_chip_info - chip specific information
* @channel_template: channel specification
@@ -111,7 +206,6 @@ enum ad5755_type {
ID_AD5737,
};
-#ifdef CONFIG_OF
static const int ad5755_dcdc_freq_table[][2] = {
{ 250000, AD5755_DC_DC_FREQ_250kHZ },
{ 410000, AD5755_DC_DC_FREQ_410kHZ },
@@ -154,7 +248,6 @@ static const int ad5755_slew_step_table[][2] = {
{ 2, AD5755_SLEW_STEP_SIZE_2 },
{ 1, AD5755_SLEW_STEP_SIZE_1 },
};
-#endif
static int ad5755_write_unlocked(struct iio_dev *indio_dev,
unsigned int reg, unsigned int val)
@@ -604,30 +697,29 @@ static const struct ad5755_platform_data ad5755_default_pdata = {
},
};
-#ifdef CONFIG_OF
-static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
+static struct ad5755_platform_data *ad5755_parse_fw(struct device *dev)
{
- struct device_node *np = dev->of_node;
- struct device_node *pp;
+ struct fwnode_handle *pp;
struct ad5755_platform_data *pdata;
unsigned int tmp;
unsigned int tmparray[3];
int devnr, i;
+ if (!dev_fwnode(dev))
+ return NULL;
+
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
pdata->ext_dc_dc_compenstation_resistor =
- of_property_read_bool(np, "adi,ext-dc-dc-compenstation-resistor");
+ device_property_read_bool(dev, "adi,ext-dc-dc-compenstation-resistor");
- if (!of_property_read_u32(np, "adi,dc-dc-phase", &tmp))
- pdata->dc_dc_phase = tmp;
- else
- pdata->dc_dc_phase = AD5755_DC_DC_PHASE_ALL_SAME_EDGE;
+ pdata->dc_dc_phase = AD5755_DC_DC_PHASE_ALL_SAME_EDGE;
+ device_property_read_u32(dev, "adi,dc-dc-phase", &pdata->dc_dc_phase);
pdata->dc_dc_freq = AD5755_DC_DC_FREQ_410kHZ;
- if (!of_property_read_u32(np, "adi,dc-dc-freq-hz", &tmp)) {
+ if (!device_property_read_u32(dev, "adi,dc-dc-freq-hz", &tmp)) {
for (i = 0; i < ARRAY_SIZE(ad5755_dcdc_freq_table); i++) {
if (tmp == ad5755_dcdc_freq_table[i][0]) {
pdata->dc_dc_freq = ad5755_dcdc_freq_table[i][1];
@@ -641,7 +733,7 @@ static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
}
pdata->dc_dc_maxv = AD5755_DC_DC_MAXV_23V;
- if (!of_property_read_u32(np, "adi,dc-dc-max-microvolt", &tmp)) {
+ if (!device_property_read_u32(dev, "adi,dc-dc-max-microvolt", &tmp)) {
for (i = 0; i < ARRAY_SIZE(ad5755_dcdc_maxv_table); i++) {
if (tmp == ad5755_dcdc_maxv_table[i][0]) {
pdata->dc_dc_maxv = ad5755_dcdc_maxv_table[i][1];
@@ -654,25 +746,23 @@ static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
}
devnr = 0;
- for_each_child_of_node(np, pp) {
+ device_for_each_child_node(dev, pp) {
if (devnr >= AD5755_NUM_CHANNELS) {
dev_err(dev,
"There are too many channels defined in DT\n");
goto error_out;
}
- if (!of_property_read_u32(pp, "adi,mode", &tmp))
- pdata->dac[devnr].mode = tmp;
- else
- pdata->dac[devnr].mode = AD5755_MODE_CURRENT_4mA_20mA;
+ pdata->dac[devnr].mode = AD5755_MODE_CURRENT_4mA_20mA;
+ fwnode_property_read_u32(pp, "adi,mode", &pdata->dac[devnr].mode);
pdata->dac[devnr].ext_current_sense_resistor =
- of_property_read_bool(pp, "adi,ext-current-sense-resistor");
+ fwnode_property_read_bool(pp, "adi,ext-current-sense-resistor");
pdata->dac[devnr].enable_voltage_overrange =
- of_property_read_bool(pp, "adi,enable-voltage-overrange");
+ fwnode_property_read_bool(pp, "adi,enable-voltage-overrange");
- if (!of_property_read_u32_array(pp, "adi,slew", tmparray, 3)) {
+ if (!fwnode_property_read_u32_array(pp, "adi,slew", tmparray, 3)) {
pdata->dac[devnr].slew.enable = tmparray[0];
pdata->dac[devnr].slew.rate = AD5755_SLEW_RATE_64k;
@@ -715,18 +805,11 @@ static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
devm_kfree(dev, pdata);
return NULL;
}
-#else
-static
-struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
-{
- return NULL;
-}
-#endif
static int ad5755_probe(struct spi_device *spi)
{
enum ad5755_type type = spi_get_device_id(spi)->driver_data;
- const struct ad5755_platform_data *pdata = dev_get_platdata(&spi->dev);
+ const struct ad5755_platform_data *pdata;
struct iio_dev *indio_dev;
struct ad5755_state *st;
int ret;
@@ -751,13 +834,10 @@ static int ad5755_probe(struct spi_device *spi)
mutex_init(&st->lock);
- if (spi->dev.of_node)
- pdata = ad5755_parse_dt(&spi->dev);
- else
- pdata = spi->dev.platform_data;
+ pdata = ad5755_parse_fw(&spi->dev);
if (!pdata) {
- dev_warn(&spi->dev, "no platform data? using default\n");
+ dev_warn(&spi->dev, "no firmware provided parameters? using default\n");
pdata = &ad5755_default_pdata;
}
diff --git a/drivers/iio/dac/ad5758.c b/drivers/iio/dac/ad5758.c
index 0572ef518101..98771e37a7b5 100644
--- a/drivers/iio/dac/ad5758.c
+++ b/drivers/iio/dac/ad5758.c
@@ -10,9 +10,8 @@
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/property.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/gpio/consumer.h>
diff --git a/drivers/iio/dac/ad5766.c b/drivers/iio/dac/ad5766.c
index b0d220c3a126..43189af2fb1f 100644
--- a/drivers/iio/dac/ad5766.c
+++ b/drivers/iio/dac/ad5766.c
@@ -426,14 +426,6 @@ static ssize_t ad5766_write_ext(struct iio_dev *indio_dev,
.shared = _shared, \
}
-#define IIO_ENUM_AVAILABLE_SHARED(_name, _shared, _e) \
-{ \
- .name = (_name "_available"), \
- .shared = _shared, \
- .read = iio_enum_available_read, \
- .private = (uintptr_t)(_e), \
-}
-
static const struct iio_chan_spec_ext_info ad5766_ext_info[] = {
_AD5766_CHAN_EXT_INFO("dither_enable", AD5766_DITHER_ENABLE,
@@ -443,9 +435,8 @@ static const struct iio_chan_spec_ext_info ad5766_ext_info[] = {
_AD5766_CHAN_EXT_INFO("dither_source", AD5766_DITHER_SOURCE,
IIO_SEPARATE),
IIO_ENUM("dither_scale", IIO_SEPARATE, &ad5766_dither_scale_enum),
- IIO_ENUM_AVAILABLE_SHARED("dither_scale",
- IIO_SEPARATE,
- &ad5766_dither_scale_enum),
+ IIO_ENUM_AVAILABLE("dither_scale", IIO_SEPARATE,
+ &ad5766_dither_scale_enum),
{}
};
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index a0923b76e8b6..7b4579d73d18 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -285,7 +285,7 @@ static const struct iio_chan_spec_ext_info ad5791_ext_info[] = {
},
IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE,
&ad5791_powerdown_mode_enum),
- IIO_ENUM_AVAILABLE("powerdown_mode", &ad5791_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5791_powerdown_mode_enum),
{ },
};
diff --git a/drivers/iio/dac/ad7293.c b/drivers/iio/dac/ad7293.c
new file mode 100644
index 000000000000..59a38ca4c3c7
--- /dev/null
+++ b/drivers/iio/dac/ad7293.c
@@ -0,0 +1,934 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AD7293 driver
+ *
+ * Copyright 2021 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <asm/unaligned.h>
+
+#define AD7293_R1B BIT(16)
+#define AD7293_R2B BIT(17)
+#define AD7293_PAGE_ADDR_MSK GENMASK(15, 8)
+#define AD7293_PAGE(x) FIELD_PREP(AD7293_PAGE_ADDR_MSK, x)
+
+/* AD7293 Register Map Common */
+#define AD7293_REG_NO_OP (AD7293_R1B | AD7293_PAGE(0x0) | 0x0)
+#define AD7293_REG_PAGE_SELECT (AD7293_R1B | AD7293_PAGE(0x0) | 0x1)
+#define AD7293_REG_CONV_CMD (AD7293_R2B | AD7293_PAGE(0x0) | 0x2)
+#define AD7293_REG_RESULT (AD7293_R1B | AD7293_PAGE(0x0) | 0x3)
+#define AD7293_REG_DAC_EN (AD7293_R1B | AD7293_PAGE(0x0) | 0x4)
+#define AD7293_REG_DEVICE_ID (AD7293_R2B | AD7293_PAGE(0x0) | 0xC)
+#define AD7293_REG_SOFT_RESET (AD7293_R2B | AD7293_PAGE(0x0) | 0xF)
+
+/* AD7293 Register Map Page 0x0 */
+#define AD7293_REG_VIN0 (AD7293_R2B | AD7293_PAGE(0x0) | 0x10)
+#define AD7293_REG_VIN1 (AD7293_R2B | AD7293_PAGE(0x0) | 0x11)
+#define AD7293_REG_VIN2 (AD7293_R2B | AD7293_PAGE(0x0) | 0x12)
+#define AD7293_REG_VIN3 (AD7293_R2B | AD7293_PAGE(0x0) | 0x13)
+#define AD7293_REG_TSENSE_INT (AD7293_R2B | AD7293_PAGE(0x0) | 0x20)
+#define AD7293_REG_TSENSE_D0 (AD7293_R2B | AD7293_PAGE(0x0) | 0x21)
+#define AD7293_REG_TSENSE_D1 (AD7293_R2B | AD7293_PAGE(0x0) | 0x22)
+#define AD7293_REG_ISENSE_0 (AD7293_R2B | AD7293_PAGE(0x0) | 0x28)
+#define AD7293_REG_ISENSE_1 (AD7293_R2B | AD7293_PAGE(0x0) | 0x29)
+#define AD7293_REG_ISENSE_2 (AD7293_R2B | AD7293_PAGE(0x0) | 0x2A)
+#define AD7293_REG_ISENSE_3 (AD7293_R2B | AD7293_PAGE(0x0) | 0x2B)
+#define AD7293_REG_UNI_VOUT0 (AD7293_R2B | AD7293_PAGE(0x0) | 0x30)
+#define AD7293_REG_UNI_VOUT1 (AD7293_R2B | AD7293_PAGE(0x0) | 0x31)
+#define AD7293_REG_UNI_VOUT2 (AD7293_R2B | AD7293_PAGE(0x0) | 0x32)
+#define AD7293_REG_UNI_VOUT3 (AD7293_R2B | AD7293_PAGE(0x0) | 0x33)
+#define AD7293_REG_BI_VOUT0 (AD7293_R2B | AD7293_PAGE(0x0) | 0x34)
+#define AD7293_REG_BI_VOUT1 (AD7293_R2B | AD7293_PAGE(0x0) | 0x35)
+#define AD7293_REG_BI_VOUT2 (AD7293_R2B | AD7293_PAGE(0x0) | 0x36)
+#define AD7293_REG_BI_VOUT3 (AD7293_R2B | AD7293_PAGE(0x0) | 0x37)
+
+/* AD7293 Register Map Page 0x2 */
+#define AD7293_REG_DIGITAL_OUT_EN (AD7293_R2B | AD7293_PAGE(0x2) | 0x11)
+#define AD7293_REG_DIGITAL_INOUT_FUNC (AD7293_R2B | AD7293_PAGE(0x2) | 0x12)
+#define AD7293_REG_DIGITAL_FUNC_POL (AD7293_R2B | AD7293_PAGE(0x2) | 0x13)
+#define AD7293_REG_GENERAL (AD7293_R2B | AD7293_PAGE(0x2) | 0x14)
+#define AD7293_REG_VINX_RANGE0 (AD7293_R2B | AD7293_PAGE(0x2) | 0x15)
+#define AD7293_REG_VINX_RANGE1 (AD7293_R2B | AD7293_PAGE(0x2) | 0x16)
+#define AD7293_REG_VINX_DIFF_SE (AD7293_R2B | AD7293_PAGE(0x2) | 0x17)
+#define AD7293_REG_VINX_FILTER (AD7293_R2B | AD7293_PAGE(0x2) | 0x18)
+#define AD7293_REG_BG_EN (AD7293_R2B | AD7293_PAGE(0x2) | 0x19)
+#define AD7293_REG_CONV_DELAY (AD7293_R2B | AD7293_PAGE(0x2) | 0x1A)
+#define AD7293_REG_TSENSE_BG_EN (AD7293_R2B | AD7293_PAGE(0x2) | 0x1B)
+#define AD7293_REG_ISENSE_BG_EN (AD7293_R2B | AD7293_PAGE(0x2) | 0x1C)
+#define AD7293_REG_ISENSE_GAIN (AD7293_R2B | AD7293_PAGE(0x2) | 0x1D)
+#define AD7293_REG_DAC_SNOOZE_O (AD7293_R2B | AD7293_PAGE(0x2) | 0x1F)
+#define AD7293_REG_DAC_SNOOZE_1 (AD7293_R2B | AD7293_PAGE(0x2) | 0x20)
+#define AD7293_REG_RSX_MON_BG_EN (AD7293_R2B | AD7293_PAGE(0x2) | 0x23)
+#define AD7293_REG_INTEGR_CL (AD7293_R2B | AD7293_PAGE(0x2) | 0x28)
+#define AD7293_REG_PA_ON_CTRL (AD7293_R2B | AD7293_PAGE(0x2) | 0x29)
+#define AD7293_REG_RAMP_TIME_0 (AD7293_R2B | AD7293_PAGE(0x2) | 0x2A)
+#define AD7293_REG_RAMP_TIME_1 (AD7293_R2B | AD7293_PAGE(0x2) | 0x2B)
+#define AD7293_REG_RAMP_TIME_2 (AD7293_R2B | AD7293_PAGE(0x2) | 0x2C)
+#define AD7293_REG_RAMP_TIME_3 (AD7293_R2B | AD7293_PAGE(0x2) | 0x2D)
+#define AD7293_REG_CL_FR_IT (AD7293_R2B | AD7293_PAGE(0x2) | 0x2E)
+#define AD7293_REG_INTX_AVSS_AVDD (AD7293_R2B | AD7293_PAGE(0x2) | 0x2F)
+
+/* AD7293 Register Map Page 0x3 */
+#define AD7293_REG_VINX_SEQ (AD7293_R2B | AD7293_PAGE(0x3) | 0x10)
+#define AD7293_REG_ISENSEX_TSENSEX_SEQ (AD7293_R2B | AD7293_PAGE(0x3) | 0x11)
+#define AD7293_REG_RSX_MON_BI_VOUTX_SEQ (AD7293_R2B | AD7293_PAGE(0x3) | 0x12)
+
+/* AD7293 Register Map Page 0xE */
+#define AD7293_REG_VIN0_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x10)
+#define AD7293_REG_VIN1_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x11)
+#define AD7293_REG_VIN2_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x12)
+#define AD7293_REG_VIN3_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x13)
+#define AD7293_REG_TSENSE_INT_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x20)
+#define AD7293_REG_TSENSE_D0_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x21)
+#define AD7293_REG_TSENSE_D1_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x22)
+#define AD7293_REG_ISENSE0_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x28)
+#define AD7293_REG_ISENSE1_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x29)
+#define AD7293_REG_ISENSE2_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x2A)
+#define AD7293_REG_ISENSE3_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x2B)
+#define AD7293_REG_UNI_VOUT0_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x30)
+#define AD7293_REG_UNI_VOUT1_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x31)
+#define AD7293_REG_UNI_VOUT2_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x32)
+#define AD7293_REG_UNI_VOUT3_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x33)
+#define AD7293_REG_BI_VOUT0_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x34)
+#define AD7293_REG_BI_VOUT1_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x35)
+#define AD7293_REG_BI_VOUT2_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x36)
+#define AD7293_REG_BI_VOUT3_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x37)
+
+/* AD7293 Miscellaneous Definitions */
+#define AD7293_READ BIT(7)
+#define AD7293_TRANSF_LEN_MSK GENMASK(17, 16)
+
+#define AD7293_REG_ADDR_MSK GENMASK(7, 0)
+#define AD7293_REG_VOUT_OFFSET_MSK GENMASK(5, 4)
+#define AD7293_REG_DATA_RAW_MSK GENMASK(15, 4)
+#define AD7293_REG_VINX_RANGE_GET_CH_MSK(x, ch) (((x) >> (ch)) & 0x1)
+#define AD7293_REG_VINX_RANGE_SET_CH_MSK(x, ch) (((x) & 0x1) << (ch))
+#define AD7293_CHIP_ID 0x18
+
+enum ad7293_ch_type {
+ AD7293_ADC_VINX,
+ AD7293_ADC_TSENSE,
+ AD7293_ADC_ISENSE,
+ AD7293_DAC,
+};
+
+enum ad7293_max_offset {
+ AD7293_TSENSE_MIN_OFFSET_CH = 4,
+ AD7293_ISENSE_MIN_OFFSET_CH = 7,
+ AD7293_VOUT_MIN_OFFSET_CH = 11,
+ AD7293_VOUT_MAX_OFFSET_CH = 18,
+};
+
+static const int dac_offset_table[] = {0, 1, 2};
+
+static const int isense_gain_table[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
+
+static const int adc_range_table[] = {0, 1, 2, 3};
+
+struct ad7293_state {
+ struct spi_device *spi;
+ /* Protect against concurrent accesses to the device, page selection and data content */
+ struct mutex lock;
+ struct gpio_desc *gpio_reset;
+ struct regulator *reg_avdd;
+ struct regulator *reg_vdrive;
+ u8 page_select;
+ u8 data[3] ____cacheline_aligned;
+};
+
+static int ad7293_page_select(struct ad7293_state *st, unsigned int reg)
+{
+ int ret;
+
+ if (st->page_select != FIELD_GET(AD7293_PAGE_ADDR_MSK, reg)) {
+ st->data[0] = FIELD_GET(AD7293_REG_ADDR_MSK, AD7293_REG_PAGE_SELECT);
+ st->data[1] = FIELD_GET(AD7293_PAGE_ADDR_MSK, reg);
+
+ ret = spi_write(st->spi, &st->data[0], 2);
+ if (ret)
+ return ret;
+
+ st->page_select = FIELD_GET(AD7293_PAGE_ADDR_MSK, reg);
+ }
+
+ return 0;
+}
+
+static int __ad7293_spi_read(struct ad7293_state *st, unsigned int reg,
+ u16 *val)
+{
+ int ret;
+ unsigned int length;
+ struct spi_transfer t = {0};
+
+ length = FIELD_GET(AD7293_TRANSF_LEN_MSK, reg);
+
+ ret = ad7293_page_select(st, reg);
+ if (ret)
+ return ret;
+
+ st->data[0] = AD7293_READ | FIELD_GET(AD7293_REG_ADDR_MSK, reg);
+ st->data[1] = 0x0;
+ st->data[2] = 0x0;
+
+ t.tx_buf = &st->data[0];
+ t.rx_buf = &st->data[0];
+ t.len = length + 1;
+
+ ret = spi_sync_transfer(st->spi, &t, 1);
+ if (ret)
+ return ret;
+
+ if (length == 1)
+ *val = st->data[1];
+ else
+ *val = get_unaligned_be16(&st->data[1]);
+
+ return 0;
+}
+
+static int ad7293_spi_read(struct ad7293_state *st, unsigned int reg,
+ u16 *val)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __ad7293_spi_read(st, reg, val);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int __ad7293_spi_write(struct ad7293_state *st, unsigned int reg,
+ u16 val)
+{
+ int ret;
+ unsigned int length;
+
+ length = FIELD_GET(AD7293_TRANSF_LEN_MSK, reg);
+
+ ret = ad7293_page_select(st, reg);
+ if (ret)
+ return ret;
+
+ st->data[0] = FIELD_GET(AD7293_REG_ADDR_MSK, reg);
+
+ if (length == 1)
+ st->data[1] = val;
+ else
+ put_unaligned_be16(val, &st->data[1]);
+
+ return spi_write(st->spi, &st->data[0], length + 1);
+}
+
+static int ad7293_spi_write(struct ad7293_state *st, unsigned int reg,
+ u16 val)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __ad7293_spi_write(st, reg, val);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int __ad7293_spi_update_bits(struct ad7293_state *st, unsigned int reg,
+ u16 mask, u16 val)
+{
+ int ret;
+ u16 data, temp;
+
+ ret = __ad7293_spi_read(st, reg, &data);
+ if (ret)
+ return ret;
+
+ temp = (data & ~mask) | (val & mask);
+
+ return __ad7293_spi_write(st, reg, temp);
+}
+
+static int ad7293_spi_update_bits(struct ad7293_state *st, unsigned int reg,
+ u16 mask, u16 val)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __ad7293_spi_update_bits(st, reg, mask, val);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ad7293_adc_get_scale(struct ad7293_state *st, unsigned int ch,
+ u16 *range)
+{
+ int ret;
+ u16 data;
+
+ mutex_lock(&st->lock);
+
+ ret = __ad7293_spi_read(st, AD7293_REG_VINX_RANGE1, &data);
+ if (ret)
+ goto exit;
+
+ *range = AD7293_REG_VINX_RANGE_GET_CH_MSK(data, ch);
+
+ ret = __ad7293_spi_read(st, AD7293_REG_VINX_RANGE0, &data);
+ if (ret)
+ goto exit;
+
+ *range |= AD7293_REG_VINX_RANGE_GET_CH_MSK(data, ch) << 1;
+
+exit:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ad7293_adc_set_scale(struct ad7293_state *st, unsigned int ch,
+ u16 range)
+{
+ int ret;
+ unsigned int ch_msk = BIT(ch);
+
+ mutex_lock(&st->lock);
+ ret = __ad7293_spi_update_bits(st, AD7293_REG_VINX_RANGE1, ch_msk,
+ AD7293_REG_VINX_RANGE_SET_CH_MSK(range, ch));
+ if (ret)
+ goto exit;
+
+ ret = __ad7293_spi_update_bits(st, AD7293_REG_VINX_RANGE0, ch_msk,
+ AD7293_REG_VINX_RANGE_SET_CH_MSK((range >> 1), ch));
+
+exit:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ad7293_get_offset(struct ad7293_state *st, unsigned int ch,
+ u16 *offset)
+{
+ if (ch < AD7293_TSENSE_MIN_OFFSET_CH)
+ return ad7293_spi_read(st, AD7293_REG_VIN0_OFFSET + ch, offset);
+ else if (ch < AD7293_ISENSE_MIN_OFFSET_CH)
+ return ad7293_spi_read(st, AD7293_REG_TSENSE_INT_OFFSET + (ch - 4), offset);
+ else if (ch < AD7293_VOUT_MIN_OFFSET_CH)
+ return ad7293_spi_read(st, AD7293_REG_ISENSE0_OFFSET + (ch - 7), offset);
+ else if (ch <= AD7293_VOUT_MAX_OFFSET_CH)
+ return ad7293_spi_read(st, AD7293_REG_UNI_VOUT0_OFFSET + (ch - 11), offset);
+
+ return -EINVAL;
+}
+
+static int ad7293_set_offset(struct ad7293_state *st, unsigned int ch,
+ u16 offset)
+{
+ if (ch < AD7293_TSENSE_MIN_OFFSET_CH)
+ return ad7293_spi_write(st, AD7293_REG_VIN0_OFFSET + ch,
+ offset);
+ else if (ch < AD7293_ISENSE_MIN_OFFSET_CH)
+ return ad7293_spi_write(st,
+ AD7293_REG_TSENSE_INT_OFFSET +
+ (ch - AD7293_TSENSE_MIN_OFFSET_CH),
+ offset);
+ else if (ch < AD7293_VOUT_MIN_OFFSET_CH)
+ return ad7293_spi_write(st,
+ AD7293_REG_ISENSE0_OFFSET +
+ (ch - AD7293_ISENSE_MIN_OFFSET_CH),
+ offset);
+ else if (ch <= AD7293_VOUT_MAX_OFFSET_CH)
+ return ad7293_spi_update_bits(st,
+ AD7293_REG_UNI_VOUT0_OFFSET +
+ (ch - AD7293_VOUT_MIN_OFFSET_CH),
+ AD7293_REG_VOUT_OFFSET_MSK,
+ FIELD_PREP(AD7293_REG_VOUT_OFFSET_MSK, offset));
+
+ return -EINVAL;
+}
+
+static int ad7293_isense_set_scale(struct ad7293_state *st, unsigned int ch,
+ u16 gain)
+{
+ unsigned int ch_msk = (0xf << (4 * ch));
+
+ return ad7293_spi_update_bits(st, AD7293_REG_ISENSE_GAIN, ch_msk,
+ gain << (4 * ch));
+}
+
+static int ad7293_isense_get_scale(struct ad7293_state *st, unsigned int ch,
+ u16 *gain)
+{
+ int ret;
+
+ ret = ad7293_spi_read(st, AD7293_REG_ISENSE_GAIN, gain);
+ if (ret)
+ return ret;
+
+ *gain = (*gain >> (4 * ch)) & 0xf;
+
+ return ret;
+}
+
+static int ad7293_dac_write_raw(struct ad7293_state *st, unsigned int ch,
+ u16 raw)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+
+ ret = __ad7293_spi_update_bits(st, AD7293_REG_DAC_EN, BIT(ch), BIT(ch));
+ if (ret)
+ goto exit;
+
+ ret = __ad7293_spi_write(st, AD7293_REG_UNI_VOUT0 + ch,
+ FIELD_PREP(AD7293_REG_DATA_RAW_MSK, raw));
+
+exit:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ad7293_ch_read_raw(struct ad7293_state *st, enum ad7293_ch_type type,
+ unsigned int ch, u16 *raw)
+{
+ int ret;
+ unsigned int reg_wr, reg_rd, data_wr;
+
+ switch (type) {
+ case AD7293_ADC_VINX:
+ reg_wr = AD7293_REG_VINX_SEQ;
+ reg_rd = AD7293_REG_VIN0 + ch;
+ data_wr = BIT(ch);
+
+ break;
+ case AD7293_ADC_TSENSE:
+ reg_wr = AD7293_REG_ISENSEX_TSENSEX_SEQ;
+ reg_rd = AD7293_REG_TSENSE_INT + ch;
+ data_wr = BIT(ch);
+
+ break;
+ case AD7293_ADC_ISENSE:
+ reg_wr = AD7293_REG_ISENSEX_TSENSEX_SEQ;
+ reg_rd = AD7293_REG_ISENSE_0 + ch;
+ data_wr = BIT(ch) << 8;
+
+ break;
+ case AD7293_DAC:
+ reg_rd = AD7293_REG_UNI_VOUT0 + ch;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&st->lock);
+
+ if (type != AD7293_DAC) {
+ if (type == AD7293_ADC_TSENSE) {
+ ret = __ad7293_spi_write(st, AD7293_REG_TSENSE_BG_EN,
+ BIT(ch));
+ if (ret)
+ goto exit;
+
+ usleep_range(9000, 9900);
+ } else if (type == AD7293_ADC_ISENSE) {
+ ret = __ad7293_spi_write(st, AD7293_REG_ISENSE_BG_EN,
+ BIT(ch));
+ if (ret)
+ goto exit;
+
+ usleep_range(2000, 7000);
+ }
+
+ ret = __ad7293_spi_write(st, reg_wr, data_wr);
+ if (ret)
+ goto exit;
+
+ ret = __ad7293_spi_write(st, AD7293_REG_CONV_CMD, 0x82);
+ if (ret)
+ goto exit;
+ }
+
+ ret = __ad7293_spi_read(st, reg_rd, raw);
+
+ *raw = FIELD_GET(AD7293_REG_DATA_RAW_MSK, *raw);
+
+exit:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ad7293_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long info)
+{
+ struct ad7293_state *st = iio_priv(indio_dev);
+ int ret;
+ u16 data;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chan->output)
+ ret = ad7293_ch_read_raw(st, AD7293_DAC,
+ chan->channel, &data);
+ else
+ ret = ad7293_ch_read_raw(st, AD7293_ADC_VINX,
+ chan->channel, &data);
+
+ break;
+ case IIO_CURRENT:
+ ret = ad7293_ch_read_raw(st, AD7293_ADC_ISENSE,
+ chan->channel, &data);
+
+ break;
+ case IIO_TEMP:
+ ret = ad7293_ch_read_raw(st, AD7293_ADC_TSENSE,
+ chan->channel, &data);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ *val = data;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chan->output) {
+ ret = ad7293_get_offset(st,
+ chan->channel + AD7293_VOUT_MIN_OFFSET_CH,
+ &data);
+
+ data = FIELD_GET(AD7293_REG_VOUT_OFFSET_MSK, data);
+ } else {
+ ret = ad7293_get_offset(st, chan->channel, &data);
+ }
+
+ break;
+ case IIO_CURRENT:
+ ret = ad7293_get_offset(st,
+ chan->channel + AD7293_ISENSE_MIN_OFFSET_CH,
+ &data);
+
+ break;
+ case IIO_TEMP:
+ ret = ad7293_get_offset(st,
+ chan->channel + AD7293_TSENSE_MIN_OFFSET_CH,
+ &data);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret)
+ return ret;
+
+ *val = data;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ ret = ad7293_adc_get_scale(st, chan->channel, &data);
+ if (ret)
+ return ret;
+
+ *val = data;
+
+ return IIO_VAL_INT;
+ case IIO_CURRENT:
+ ret = ad7293_isense_get_scale(st, chan->channel, &data);
+ if (ret)
+ return ret;
+
+ *val = data;
+
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ *val = 1;
+ *val2 = 8;
+
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7293_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long info)
+{
+ struct ad7293_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (!chan->output)
+ return -EINVAL;
+
+ return ad7293_dac_write_raw(st, chan->channel, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chan->output)
+ return ad7293_set_offset(st,
+ chan->channel +
+ AD7293_VOUT_MIN_OFFSET_CH,
+ val);
+ else
+ return ad7293_set_offset(st, chan->channel, val);
+ case IIO_CURRENT:
+ return ad7293_set_offset(st,
+ chan->channel +
+ AD7293_ISENSE_MIN_OFFSET_CH,
+ val);
+ case IIO_TEMP:
+ return ad7293_set_offset(st,
+ chan->channel +
+ AD7293_TSENSE_MIN_OFFSET_CH,
+ val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ return ad7293_adc_set_scale(st, chan->channel, val);
+ case IIO_CURRENT:
+ return ad7293_isense_set_scale(st, chan->channel, val);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7293_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int write_val,
+ unsigned int *read_val)
+{
+ struct ad7293_state *st = iio_priv(indio_dev);
+ int ret;
+
+ if (read_val) {
+ u16 temp;
+ ret = ad7293_spi_read(st, reg, &temp);
+ *read_val = temp;
+ } else {
+ ret = ad7293_spi_write(st, reg, (u16)write_val);
+ }
+
+ return ret;
+}
+
+static int ad7293_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_OFFSET:
+ *vals = dac_offset_table;
+ *type = IIO_VAL_INT;
+ *length = ARRAY_SIZE(dac_offset_table);
+
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_SCALE:
+ *type = IIO_VAL_INT;
+
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *vals = adc_range_table;
+ *length = ARRAY_SIZE(adc_range_table);
+ return IIO_AVAIL_LIST;
+ case IIO_CURRENT:
+ *vals = isense_gain_table;
+ *length = ARRAY_SIZE(isense_gain_table);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+#define AD7293_CHAN_ADC(_channel) { \
+ .type = IIO_VOLTAGE, \
+ .output = 0, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE) \
+}
+
+#define AD7293_CHAN_DAC(_channel) { \
+ .type = IIO_VOLTAGE, \
+ .output = 1, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_OFFSET) \
+}
+
+#define AD7293_CHAN_ISENSE(_channel) { \
+ .type = IIO_CURRENT, \
+ .output = 0, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE) \
+}
+
+#define AD7293_CHAN_TEMP(_channel) { \
+ .type = IIO_TEMP, \
+ .output = 0, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
+}
+
+static const struct iio_chan_spec ad7293_channels[] = {
+ AD7293_CHAN_ADC(0),
+ AD7293_CHAN_ADC(1),
+ AD7293_CHAN_ADC(2),
+ AD7293_CHAN_ADC(3),
+ AD7293_CHAN_ISENSE(0),
+ AD7293_CHAN_ISENSE(1),
+ AD7293_CHAN_ISENSE(2),
+ AD7293_CHAN_ISENSE(3),
+ AD7293_CHAN_TEMP(0),
+ AD7293_CHAN_TEMP(1),
+ AD7293_CHAN_TEMP(2),
+ AD7293_CHAN_DAC(0),
+ AD7293_CHAN_DAC(1),
+ AD7293_CHAN_DAC(2),
+ AD7293_CHAN_DAC(3),
+ AD7293_CHAN_DAC(4),
+ AD7293_CHAN_DAC(5),
+ AD7293_CHAN_DAC(6),
+ AD7293_CHAN_DAC(7)
+};
+
+static int ad7293_soft_reset(struct ad7293_state *st)
+{
+ int ret;
+
+ ret = __ad7293_spi_write(st, AD7293_REG_SOFT_RESET, 0x7293);
+ if (ret)
+ return ret;
+
+ return __ad7293_spi_write(st, AD7293_REG_SOFT_RESET, 0x0000);
+}
+
+static int ad7293_reset(struct ad7293_state *st)
+{
+ if (st->gpio_reset) {
+ gpiod_set_value(st->gpio_reset, 0);
+ usleep_range(100, 1000);
+ gpiod_set_value(st->gpio_reset, 1);
+ usleep_range(100, 1000);
+
+ return 0;
+ }
+
+ /* Perform a software reset */
+ return ad7293_soft_reset(st);
+}
+
+static int ad7293_properties_parse(struct ad7293_state *st)
+{
+ struct spi_device *spi = st->spi;
+
+ st->gpio_reset = devm_gpiod_get_optional(&st->spi->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(st->gpio_reset))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_reset),
+ "failed to get the reset GPIO\n");
+
+ st->reg_avdd = devm_regulator_get(&spi->dev, "avdd");
+ if (IS_ERR(st->reg_avdd))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->reg_avdd),
+ "failed to get the AVDD voltage\n");
+
+ st->reg_vdrive = devm_regulator_get(&spi->dev, "vdrive");
+ if (IS_ERR(st->reg_vdrive))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->reg_vdrive),
+ "failed to get the VDRIVE voltage\n");
+
+ return 0;
+}
+
+static void ad7293_reg_disable(void *data)
+{
+ regulator_disable(data);
+}
+
+static int ad7293_init(struct ad7293_state *st)
+{
+ int ret;
+ u16 chip_id;
+ struct spi_device *spi = st->spi;
+
+ ret = ad7293_properties_parse(st);
+ if (ret)
+ return ret;
+
+ ret = ad7293_reset(st);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(st->reg_avdd);
+ if (ret) {
+ dev_err(&spi->dev,
+ "Failed to enable specified AVDD Voltage!\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&spi->dev, ad7293_reg_disable,
+ st->reg_avdd);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(st->reg_vdrive);
+ if (ret) {
+ dev_err(&spi->dev,
+ "Failed to enable specified VDRIVE Voltage!\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&spi->dev, ad7293_reg_disable,
+ st->reg_vdrive);
+ if (ret)
+ return ret;
+
+ ret = regulator_get_voltage(st->reg_avdd);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Failed to read avdd regulator: %d\n", ret);
+ return ret;
+ }
+
+ if (ret > 5500000 || ret < 4500000)
+ return -EINVAL;
+
+ ret = regulator_get_voltage(st->reg_vdrive);
+ if (ret < 0) {
+ dev_err(&spi->dev,
+ "Failed to read vdrive regulator: %d\n", ret);
+ return ret;
+ }
+ if (ret > 5500000 || ret < 1700000)
+ return -EINVAL;
+
+ /* Check Chip ID */
+ ret = __ad7293_spi_read(st, AD7293_REG_DEVICE_ID, &chip_id);
+ if (ret)
+ return ret;
+
+ if (chip_id != AD7293_CHIP_ID) {
+ dev_err(&spi->dev, "Invalid Chip ID.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct iio_info ad7293_info = {
+ .read_raw = ad7293_read_raw,
+ .write_raw = ad7293_write_raw,
+ .read_avail = &ad7293_read_avail,
+ .debugfs_reg_access = &ad7293_reg_access,
+};
+
+static int ad7293_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct ad7293_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ indio_dev->info = &ad7293_info;
+ indio_dev->name = "ad7293";
+ indio_dev->channels = ad7293_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad7293_channels);
+
+ st->spi = spi;
+ st->page_select = 0;
+
+ mutex_init(&st->lock);
+
+ ret = ad7293_init(st);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct spi_device_id ad7293_id[] = {
+ { "ad7293", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad7293_id);
+
+static const struct of_device_id ad7293_of_match[] = {
+ { .compatible = "adi,ad7293" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ad7293_of_match);
+
+static struct spi_driver ad7293_driver = {
+ .driver = {
+ .name = "ad7293",
+ .of_match_table = ad7293_of_match,
+ },
+ .probe = ad7293_probe,
+ .id_table = ad7293_id,
+};
+module_spi_driver(ad7293_driver);
+
+MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com");
+MODULE_DESCRIPTION("Analog Devices AD7293");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/dpot-dac.c b/drivers/iio/dac/dpot-dac.c
index 5d1819448102..83ce9489259c 100644
--- a/drivers/iio/dac/dpot-dac.c
+++ b/drivers/iio/dac/dpot-dac.c
@@ -30,7 +30,7 @@
#include <linux/iio/consumer.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/iio/dac/lpc18xx_dac.c b/drivers/iio/dac/lpc18xx_dac.c
index 5502e4f62f0d..60467c6f2c6e 100644
--- a/drivers/iio/dac/lpc18xx_dac.c
+++ b/drivers/iio/dac/lpc18xx_dac.c
@@ -16,9 +16,8 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/iio/dac/max5821.c b/drivers/iio/dac/max5821.c
index 7da4710a6408..fce640b7f1c8 100644
--- a/drivers/iio/dac/max5821.c
+++ b/drivers/iio/dac/max5821.c
@@ -137,7 +137,7 @@ static const struct iio_chan_spec_ext_info max5821_ext_info[] = {
.shared = IIO_SEPARATE,
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &max5821_powerdown_mode_enum),
- IIO_ENUM_AVAILABLE("powerdown_mode", &max5821_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &max5821_powerdown_mode_enum),
{ },
};
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 34b14aafb630..842bad57cb88 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -221,8 +221,8 @@ static const struct iio_chan_spec_ext_info mcp4725_ext_info[] = {
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE,
&mcp472x_powerdown_mode_enum[MCP4725]),
- IIO_ENUM_AVAILABLE("powerdown_mode",
- &mcp472x_powerdown_mode_enum[MCP4725]),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE,
+ &mcp472x_powerdown_mode_enum[MCP4725]),
{ },
};
@@ -235,8 +235,8 @@ static const struct iio_chan_spec_ext_info mcp4726_ext_info[] = {
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE,
&mcp472x_powerdown_mode_enum[MCP4726]),
- IIO_ENUM_AVAILABLE("powerdown_mode",
- &mcp472x_powerdown_mode_enum[MCP4726]),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE,
+ &mcp472x_powerdown_mode_enum[MCP4726]),
{ },
};
@@ -386,7 +386,7 @@ static int mcp4725_probe(struct i2c_client *client,
i2c_set_clientdata(client, indio_dev);
data->client = client;
if (dev_fwnode(&client->dev))
- data->id = (enum chip_id)device_get_match_data(&client->dev);
+ data->id = (uintptr_t)device_get_match_data(&client->dev);
else
data->id = id->driver_data;
pdata = dev_get_platdata(&client->dev);
diff --git a/drivers/iio/dac/stm32-dac.c b/drivers/iio/dac/stm32-dac.c
index dd2e306824e7..cd71cc4553a7 100644
--- a/drivers/iio/dac/stm32-dac.c
+++ b/drivers/iio/dac/stm32-dac.c
@@ -246,7 +246,7 @@ static const struct iio_chan_spec_ext_info stm32_dac_ext_info[] = {
.shared = IIO_SEPARATE,
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &stm32_dac_powerdown_mode_en),
- IIO_ENUM_AVAILABLE("powerdown_mode", &stm32_dac_powerdown_mode_en),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &stm32_dac_powerdown_mode_en),
{},
};
diff --git a/drivers/iio/dac/ti-dac082s085.c b/drivers/iio/dac/ti-dac082s085.c
index 5c14bfb16521..6beda2193683 100644
--- a/drivers/iio/dac/ti-dac082s085.c
+++ b/drivers/iio/dac/ti-dac082s085.c
@@ -160,7 +160,7 @@ static const struct iio_chan_spec_ext_info ti_dac_ext_info[] = {
.shared = IIO_SHARED_BY_TYPE,
},
IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE, &ti_dac_powerdown_mode),
- IIO_ENUM_AVAILABLE("powerdown_mode", &ti_dac_powerdown_mode),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ti_dac_powerdown_mode),
{ },
};
diff --git a/drivers/iio/dac/ti-dac5571.c b/drivers/iio/dac/ti-dac5571.c
index 546a4cf6c5ef..4a3b8d875518 100644
--- a/drivers/iio/dac/ti-dac5571.c
+++ b/drivers/iio/dac/ti-dac5571.c
@@ -212,7 +212,7 @@ static const struct iio_chan_spec_ext_info dac5571_ext_info[] = {
.shared = IIO_SEPARATE,
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &dac5571_powerdown_mode),
- IIO_ENUM_AVAILABLE("powerdown_mode", &dac5571_powerdown_mode),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &dac5571_powerdown_mode),
{},
};
diff --git a/drivers/iio/dac/ti-dac7311.c b/drivers/iio/dac/ti-dac7311.c
index 09218c3029f0..99f275829ec2 100644
--- a/drivers/iio/dac/ti-dac7311.c
+++ b/drivers/iio/dac/ti-dac7311.c
@@ -146,7 +146,7 @@ static const struct iio_chan_spec_ext_info ti_dac_ext_info[] = {
.shared = IIO_SHARED_BY_TYPE,
},
IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE, &ti_dac_powerdown_mode),
- IIO_ENUM_AVAILABLE("powerdown_mode", &ti_dac_powerdown_mode),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ti_dac_powerdown_mode),
{ },
};
diff --git a/drivers/iio/dummy/iio_simple_dummy_buffer.c b/drivers/iio/dummy/iio_simple_dummy_buffer.c
index 59aa60d4ca37..d81c2b2dad82 100644
--- a/drivers/iio/dummy/iio_simple_dummy_buffer.c
+++ b/drivers/iio/dummy/iio_simple_dummy_buffer.c
@@ -45,7 +45,6 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
- int len = 0;
u16 *data;
data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
@@ -79,7 +78,6 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
indio_dev->masklength, j);
/* random access read from the 'device' */
data[i] = fakedata[j];
- len += 2;
}
}
diff --git a/drivers/iio/filter/Kconfig b/drivers/iio/filter/Kconfig
new file mode 100644
index 000000000000..3ae35817ad82
--- /dev/null
+++ b/drivers/iio/filter/Kconfig
@@ -0,0 +1,18 @@
+#
+# Filter drivers
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Filters"
+
+config ADMV8818
+ tristate "Analog Devices ADMV8818 High-Pass and Low-Pass Filter"
+ depends on SPI && COMMON_CLK && 64BIT
+ help
+ Say yes here to build support for Analog Devices ADMV8818
+ 2 GHz to 18 GHz, Digitally Tunable, High-Pass and Low-Pass Filter.
+
+ To compile this driver as a module, choose M here: the
+ modiule will be called admv8818.
+
+endmenu
diff --git a/drivers/iio/filter/Makefile b/drivers/iio/filter/Makefile
new file mode 100644
index 000000000000..55e228c0dd20
--- /dev/null
+++ b/drivers/iio/filter/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for industrial I/O Filter drivers
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_ADMV8818) += admv8818.o
diff --git a/drivers/iio/filter/admv8818.c b/drivers/iio/filter/admv8818.c
new file mode 100644
index 000000000000..68de45fe21b4
--- /dev/null
+++ b/drivers/iio/filter/admv8818.c
@@ -0,0 +1,665 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ADMV8818 driver
+ *
+ * Copyright 2021 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/units.h>
+
+/* ADMV8818 Register Map */
+#define ADMV8818_REG_SPI_CONFIG_A 0x0
+#define ADMV8818_REG_SPI_CONFIG_B 0x1
+#define ADMV8818_REG_CHIPTYPE 0x3
+#define ADMV8818_REG_PRODUCT_ID_L 0x4
+#define ADMV8818_REG_PRODUCT_ID_H 0x5
+#define ADMV8818_REG_FAST_LATCH_POINTER 0x10
+#define ADMV8818_REG_FAST_LATCH_STOP 0x11
+#define ADMV8818_REG_FAST_LATCH_START 0x12
+#define ADMV8818_REG_FAST_LATCH_DIRECTION 0x13
+#define ADMV8818_REG_FAST_LATCH_STATE 0x14
+#define ADMV8818_REG_WR0_SW 0x20
+#define ADMV8818_REG_WR0_FILTER 0x21
+#define ADMV8818_REG_WR1_SW 0x22
+#define ADMV8818_REG_WR1_FILTER 0x23
+#define ADMV8818_REG_WR2_SW 0x24
+#define ADMV8818_REG_WR2_FILTER 0x25
+#define ADMV8818_REG_WR3_SW 0x26
+#define ADMV8818_REG_WR3_FILTER 0x27
+#define ADMV8818_REG_WR4_SW 0x28
+#define ADMV8818_REG_WR4_FILTER 0x29
+#define ADMV8818_REG_LUT0_SW 0x100
+#define ADMV8818_REG_LUT0_FILTER 0x101
+#define ADMV8818_REG_LUT127_SW 0x1FE
+#define ADMV8818_REG_LUT127_FILTER 0x1FF
+
+/* ADMV8818_REG_SPI_CONFIG_A Map */
+#define ADMV8818_SOFTRESET_N_MSK BIT(7)
+#define ADMV8818_LSB_FIRST_N_MSK BIT(6)
+#define ADMV8818_ENDIAN_N_MSK BIT(5)
+#define ADMV8818_SDOACTIVE_N_MSK BIT(4)
+#define ADMV8818_SDOACTIVE_MSK BIT(3)
+#define ADMV8818_ENDIAN_MSK BIT(2)
+#define ADMV8818_LSBFIRST_MSK BIT(1)
+#define ADMV8818_SOFTRESET_MSK BIT(0)
+
+/* ADMV8818_REG_SPI_CONFIG_B Map */
+#define ADMV8818_SINGLE_INSTRUCTION_MSK BIT(7)
+#define ADMV8818_CSB_STALL_MSK BIT(6)
+#define ADMV8818_MASTER_SLAVE_RB_MSK BIT(5)
+#define ADMV8818_MASTER_SLAVE_TRANSFER_MSK BIT(0)
+
+/* ADMV8818_REG_WR0_SW Map */
+#define ADMV8818_SW_IN_SET_WR0_MSK BIT(7)
+#define ADMV8818_SW_OUT_SET_WR0_MSK BIT(6)
+#define ADMV8818_SW_IN_WR0_MSK GENMASK(5, 3)
+#define ADMV8818_SW_OUT_WR0_MSK GENMASK(2, 0)
+
+/* ADMV8818_REG_WR0_FILTER Map */
+#define ADMV8818_HPF_WR0_MSK GENMASK(7, 4)
+#define ADMV8818_LPF_WR0_MSK GENMASK(3, 0)
+
+enum {
+ ADMV8818_BW_FREQ,
+ ADMV8818_CENTER_FREQ
+};
+
+enum {
+ ADMV8818_AUTO_MODE,
+ ADMV8818_MANUAL_MODE,
+};
+
+struct admv8818_state {
+ struct spi_device *spi;
+ struct regmap *regmap;
+ struct clk *clkin;
+ struct notifier_block nb;
+ /* Protect against concurrent accesses to the device and data content*/
+ struct mutex lock;
+ unsigned int filter_mode;
+ u64 cf_hz;
+};
+
+static const unsigned long long freq_range_hpf[4][2] = {
+ {1750000000ULL, 3550000000ULL},
+ {3400000000ULL, 7250000000ULL},
+ {6600000000, 12000000000},
+ {12500000000, 19900000000}
+};
+
+static const unsigned long long freq_range_lpf[4][2] = {
+ {2050000000ULL, 3850000000ULL},
+ {3350000000ULL, 7250000000ULL},
+ {7000000000, 13000000000},
+ {12550000000, 18500000000}
+};
+
+static const struct regmap_config admv8818_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .read_flag_mask = 0x80,
+ .max_register = 0x1FF,
+};
+
+static const char * const admv8818_modes[] = {
+ [0] = "auto",
+ [1] = "manual"
+};
+
+static int __admv8818_hpf_select(struct admv8818_state *st, u64 freq)
+{
+ unsigned int hpf_step = 0, hpf_band = 0, i, j;
+ u64 freq_step;
+ int ret;
+
+ if (freq < freq_range_hpf[0][0])
+ goto hpf_write;
+
+ if (freq > freq_range_hpf[3][1]) {
+ hpf_step = 15;
+ hpf_band = 4;
+
+ goto hpf_write;
+ }
+
+ for (i = 0; i < 4; i++) {
+ freq_step = div_u64((freq_range_hpf[i][1] -
+ freq_range_hpf[i][0]), 15);
+
+ if (freq > freq_range_hpf[i][0] &&
+ (freq < freq_range_hpf[i][1] + freq_step)) {
+ hpf_band = i + 1;
+
+ for (j = 1; j <= 16; j++) {
+ if (freq < (freq_range_hpf[i][0] + (freq_step * j))) {
+ hpf_step = j - 1;
+ break;
+ }
+ }
+ break;
+ }
+ }
+
+ /* Close HPF frequency gap between 12 and 12.5 GHz */
+ if (freq >= 12000 * HZ_PER_MHZ && freq <= 12500 * HZ_PER_MHZ) {
+ hpf_band = 3;
+ hpf_step = 15;
+ }
+
+hpf_write:
+ ret = regmap_update_bits(st->regmap, ADMV8818_REG_WR0_SW,
+ ADMV8818_SW_IN_SET_WR0_MSK |
+ ADMV8818_SW_IN_WR0_MSK,
+ FIELD_PREP(ADMV8818_SW_IN_SET_WR0_MSK, 1) |
+ FIELD_PREP(ADMV8818_SW_IN_WR0_MSK, hpf_band));
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(st->regmap, ADMV8818_REG_WR0_FILTER,
+ ADMV8818_HPF_WR0_MSK,
+ FIELD_PREP(ADMV8818_HPF_WR0_MSK, hpf_step));
+}
+
+static int admv8818_hpf_select(struct admv8818_state *st, u64 freq)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __admv8818_hpf_select(st, freq);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int __admv8818_lpf_select(struct admv8818_state *st, u64 freq)
+{
+ unsigned int lpf_step = 0, lpf_band = 0, i, j;
+ u64 freq_step;
+ int ret;
+
+ if (freq > freq_range_lpf[3][1])
+ goto lpf_write;
+
+ if (freq < freq_range_lpf[0][0]) {
+ lpf_band = 1;
+
+ goto lpf_write;
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (freq > freq_range_lpf[i][0] && freq < freq_range_lpf[i][1]) {
+ lpf_band = i + 1;
+ freq_step = div_u64((freq_range_lpf[i][1] - freq_range_lpf[i][0]), 15);
+
+ for (j = 0; j <= 15; j++) {
+ if (freq < (freq_range_lpf[i][0] + (freq_step * j))) {
+ lpf_step = j;
+ break;
+ }
+ }
+ break;
+ }
+ }
+
+lpf_write:
+ ret = regmap_update_bits(st->regmap, ADMV8818_REG_WR0_SW,
+ ADMV8818_SW_OUT_SET_WR0_MSK |
+ ADMV8818_SW_OUT_WR0_MSK,
+ FIELD_PREP(ADMV8818_SW_OUT_SET_WR0_MSK, 1) |
+ FIELD_PREP(ADMV8818_SW_OUT_WR0_MSK, lpf_band));
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(st->regmap, ADMV8818_REG_WR0_FILTER,
+ ADMV8818_LPF_WR0_MSK,
+ FIELD_PREP(ADMV8818_LPF_WR0_MSK, lpf_step));
+}
+
+static int admv8818_lpf_select(struct admv8818_state *st, u64 freq)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __admv8818_lpf_select(st, freq);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int admv8818_rfin_band_select(struct admv8818_state *st)
+{
+ int ret;
+
+ st->cf_hz = clk_get_rate(st->clkin);
+
+ mutex_lock(&st->lock);
+
+ ret = __admv8818_hpf_select(st, st->cf_hz);
+ if (ret)
+ goto exit;
+
+ ret = __admv8818_lpf_select(st, st->cf_hz);
+exit:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static int __admv8818_read_hpf_freq(struct admv8818_state *st, u64 *hpf_freq)
+{
+ unsigned int data, hpf_band, hpf_state;
+ int ret;
+
+ ret = regmap_read(st->regmap, ADMV8818_REG_WR0_SW, &data);
+ if (ret)
+ return ret;
+
+ hpf_band = FIELD_GET(ADMV8818_SW_IN_WR0_MSK, data);
+ if (!hpf_band) {
+ *hpf_freq = 0;
+ return ret;
+ }
+
+ ret = regmap_read(st->regmap, ADMV8818_REG_WR0_FILTER, &data);
+ if (ret)
+ return ret;
+
+ hpf_state = FIELD_GET(ADMV8818_HPF_WR0_MSK, data);
+
+ *hpf_freq = div_u64(freq_range_hpf[hpf_band - 1][1] - freq_range_hpf[hpf_band - 1][0], 15);
+ *hpf_freq = freq_range_hpf[hpf_band - 1][0] + (*hpf_freq * hpf_state);
+
+ return ret;
+}
+
+static int admv8818_read_hpf_freq(struct admv8818_state *st, u64 *hpf_freq)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __admv8818_read_hpf_freq(st, hpf_freq);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int __admv8818_read_lpf_freq(struct admv8818_state *st, u64 *lpf_freq)
+{
+ unsigned int data, lpf_band, lpf_state;
+ int ret;
+
+ ret = regmap_read(st->regmap, ADMV8818_REG_WR0_SW, &data);
+ if (ret)
+ return ret;
+
+ lpf_band = FIELD_GET(ADMV8818_SW_OUT_WR0_MSK, data);
+ if (!lpf_band) {
+ *lpf_freq = 0;
+ return ret;
+ }
+
+ ret = regmap_read(st->regmap, ADMV8818_REG_WR0_FILTER, &data);
+ if (ret)
+ return ret;
+
+ lpf_state = FIELD_GET(ADMV8818_LPF_WR0_MSK, data);
+
+ *lpf_freq = div_u64(freq_range_lpf[lpf_band - 1][1] - freq_range_lpf[lpf_band - 1][0], 15);
+ *lpf_freq = freq_range_lpf[lpf_band - 1][0] + (*lpf_freq * lpf_state);
+
+ return ret;
+}
+
+static int admv8818_read_lpf_freq(struct admv8818_state *st, u64 *lpf_freq)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __admv8818_read_lpf_freq(st, lpf_freq);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int admv8818_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long info)
+{
+ struct admv8818_state *st = iio_priv(indio_dev);
+
+ u64 freq = ((u64)val2 << 32 | (u32)val);
+
+ switch (info) {
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return admv8818_lpf_select(st, freq);
+ case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
+ return admv8818_hpf_select(st, freq);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int admv8818_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long info)
+{
+ struct admv8818_state *st = iio_priv(indio_dev);
+ int ret;
+ u64 freq;
+
+ switch (info) {
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ ret = admv8818_read_lpf_freq(st, &freq);
+ if (ret)
+ return ret;
+
+ *val = (u32)freq;
+ *val2 = (u32)(freq >> 32);
+
+ return IIO_VAL_INT_64;
+ case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
+ ret = admv8818_read_hpf_freq(st, &freq);
+ if (ret)
+ return ret;
+
+ *val = (u32)freq;
+ *val2 = (u32)(freq >> 32);
+
+ return IIO_VAL_INT_64;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int admv8818_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int write_val,
+ unsigned int *read_val)
+{
+ struct admv8818_state *st = iio_priv(indio_dev);
+
+ if (read_val)
+ return regmap_read(st->regmap, reg, read_val);
+ else
+ return regmap_write(st->regmap, reg, write_val);
+}
+
+static int admv8818_get_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct admv8818_state *st = iio_priv(indio_dev);
+
+ return st->filter_mode;
+}
+
+static int admv8818_set_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
+{
+ struct admv8818_state *st = iio_priv(indio_dev);
+ int ret = 0;
+
+ if (!st->clkin) {
+ if (mode == ADMV8818_MANUAL_MODE)
+ return 0;
+
+ return -EINVAL;
+ }
+
+ switch (mode) {
+ case ADMV8818_AUTO_MODE:
+ if (!st->filter_mode)
+ return 0;
+
+ ret = clk_prepare_enable(st->clkin);
+ if (ret)
+ return ret;
+
+ ret = clk_notifier_register(st->clkin, &st->nb);
+ if (ret) {
+ clk_disable_unprepare(st->clkin);
+
+ return ret;
+ }
+
+ break;
+ case ADMV8818_MANUAL_MODE:
+ if (st->filter_mode)
+ return 0;
+
+ clk_disable_unprepare(st->clkin);
+
+ ret = clk_notifier_unregister(st->clkin, &st->nb);
+ if (ret)
+ return ret;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ st->filter_mode = mode;
+
+ return ret;
+}
+
+static const struct iio_info admv8818_info = {
+ .write_raw = admv8818_write_raw,
+ .read_raw = admv8818_read_raw,
+ .debugfs_reg_access = &admv8818_reg_access,
+};
+
+static const struct iio_enum admv8818_mode_enum = {
+ .items = admv8818_modes,
+ .num_items = ARRAY_SIZE(admv8818_modes),
+ .get = admv8818_get_mode,
+ .set = admv8818_set_mode,
+};
+
+static const struct iio_chan_spec_ext_info admv8818_ext_info[] = {
+ IIO_ENUM("filter_mode", IIO_SHARED_BY_ALL, &admv8818_mode_enum),
+ IIO_ENUM_AVAILABLE("filter_mode", IIO_SHARED_BY_ALL, &admv8818_mode_enum),
+ { },
+};
+
+#define ADMV8818_CHAN(_channel) { \
+ .type = IIO_ALTVOLTAGE, \
+ .output = 1, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .info_mask_separate = \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) | \
+ BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY) \
+}
+
+#define ADMV8818_CHAN_BW_CF(_channel, _admv8818_ext_info) { \
+ .type = IIO_ALTVOLTAGE, \
+ .output = 1, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .ext_info = _admv8818_ext_info, \
+}
+
+static const struct iio_chan_spec admv8818_channels[] = {
+ ADMV8818_CHAN(0),
+ ADMV8818_CHAN_BW_CF(0, admv8818_ext_info),
+};
+
+static int admv8818_freq_change(struct notifier_block *nb, unsigned long action, void *data)
+{
+ struct admv8818_state *st = container_of(nb, struct admv8818_state, nb);
+
+ if (action == POST_RATE_CHANGE)
+ return notifier_from_errno(admv8818_rfin_band_select(st));
+
+ return NOTIFY_OK;
+}
+
+static void admv8818_clk_notifier_unreg(void *data)
+{
+ struct admv8818_state *st = data;
+
+ if (st->filter_mode == 0)
+ clk_notifier_unregister(st->clkin, &st->nb);
+}
+
+static void admv8818_clk_disable(void *data)
+{
+ struct admv8818_state *st = data;
+
+ if (st->filter_mode == 0)
+ clk_disable_unprepare(st->clkin);
+}
+
+static int admv8818_init(struct admv8818_state *st)
+{
+ int ret;
+ struct spi_device *spi = st->spi;
+ unsigned int chip_id;
+
+ ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
+ ADMV8818_SOFTRESET_N_MSK |
+ ADMV8818_SOFTRESET_MSK,
+ FIELD_PREP(ADMV8818_SOFTRESET_N_MSK, 1) |
+ FIELD_PREP(ADMV8818_SOFTRESET_MSK, 1));
+ if (ret) {
+ dev_err(&spi->dev, "ADMV8818 Soft Reset failed.\n");
+ return ret;
+ }
+
+ ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
+ ADMV8818_SDOACTIVE_N_MSK |
+ ADMV8818_SDOACTIVE_MSK,
+ FIELD_PREP(ADMV8818_SDOACTIVE_N_MSK, 1) |
+ FIELD_PREP(ADMV8818_SDOACTIVE_MSK, 1));
+ if (ret) {
+ dev_err(&spi->dev, "ADMV8818 SDO Enable failed.\n");
+ return ret;
+ }
+
+ ret = regmap_read(st->regmap, ADMV8818_REG_CHIPTYPE, &chip_id);
+ if (ret) {
+ dev_err(&spi->dev, "ADMV8818 Chip ID read failed.\n");
+ return ret;
+ }
+
+ if (chip_id != 0x1) {
+ dev_err(&spi->dev, "ADMV8818 Invalid Chip ID.\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_B,
+ ADMV8818_SINGLE_INSTRUCTION_MSK,
+ FIELD_PREP(ADMV8818_SINGLE_INSTRUCTION_MSK, 1));
+ if (ret) {
+ dev_err(&spi->dev, "ADMV8818 Single Instruction failed.\n");
+ return ret;
+ }
+
+ if (st->clkin)
+ return admv8818_rfin_band_select(st);
+ else
+ return 0;
+}
+
+static int admv8818_clk_setup(struct admv8818_state *st)
+{
+ struct spi_device *spi = st->spi;
+ int ret;
+
+ st->clkin = devm_clk_get_optional(&spi->dev, "rf_in");
+ if (IS_ERR(st->clkin))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->clkin),
+ "failed to get the input clock\n");
+ else if (!st->clkin)
+ return 0;
+
+ ret = clk_prepare_enable(st->clkin);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev, admv8818_clk_disable, st);
+ if (ret)
+ return ret;
+
+ st->nb.notifier_call = admv8818_freq_change;
+ ret = clk_notifier_register(st->clkin, &st->nb);
+ if (ret < 0)
+ return ret;
+
+ return devm_add_action_or_reset(&spi->dev, admv8818_clk_notifier_unreg, st);
+}
+
+static int admv8818_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct regmap *regmap;
+ struct admv8818_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ regmap = devm_regmap_init_spi(spi, &admv8818_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ st = iio_priv(indio_dev);
+ st->regmap = regmap;
+
+ indio_dev->info = &admv8818_info;
+ indio_dev->name = "admv8818";
+ indio_dev->channels = admv8818_channels;
+ indio_dev->num_channels = ARRAY_SIZE(admv8818_channels);
+
+ st->spi = spi;
+
+ ret = admv8818_clk_setup(st);
+ if (ret)
+ return ret;
+
+ mutex_init(&st->lock);
+
+ ret = admv8818_init(st);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct spi_device_id admv8818_id[] = {
+ { "admv8818", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, admv8818_id);
+
+static const struct of_device_id admv8818_of_match[] = {
+ { .compatible = "adi,admv8818" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, admv8818_of_match);
+
+static struct spi_driver admv8818_driver = {
+ .driver = {
+ .name = "admv8818",
+ .of_match_table = admv8818_of_match,
+ },
+ .probe = admv8818_probe,
+ .id_table = admv8818_id,
+};
+module_spi_driver(admv8818_driver);
+
+MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com");
+MODULE_DESCRIPTION("Analog Devices ADMV8818");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/frequency/Kconfig b/drivers/iio/frequency/Kconfig
index 2c9e0559e8a4..b44036f843af 100644
--- a/drivers/iio/frequency/Kconfig
+++ b/drivers/iio/frequency/Kconfig
@@ -50,6 +50,16 @@ config ADF4371
To compile this driver as a module, choose M here: the
module will be called adf4371.
+config ADMV1013
+ tristate "Analog Devices ADMV1013 Microwave Upconverter"
+ depends on SPI && COMMON_CLK
+ help
+ Say yes here to build support for Analog Devices ADMV1013
+ 24 GHz to 44 GHz, Wideband, Microwave Upconverter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called admv1013.
+
config ADRF6780
tristate "Analog Devices ADRF6780 Microwave Upconverter"
depends on SPI
diff --git a/drivers/iio/frequency/Makefile b/drivers/iio/frequency/Makefile
index ae3136c79202..ae6899856c99 100644
--- a/drivers/iio/frequency/Makefile
+++ b/drivers/iio/frequency/Makefile
@@ -7,4 +7,5 @@
obj-$(CONFIG_AD9523) += ad9523.o
obj-$(CONFIG_ADF4350) += adf4350.o
obj-$(CONFIG_ADF4371) += adf4371.o
+obj-$(CONFIG_ADMV1013) += admv1013.o
obj-$(CONFIG_ADRF6780) += adrf6780.o
diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c
new file mode 100644
index 000000000000..6cdeb50143af
--- /dev/null
+++ b/drivers/iio/frequency/admv1013.c
@@ -0,0 +1,656 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ADMV1013 driver
+ *
+ * Copyright 2021 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/notifier.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/units.h>
+
+#include <asm/unaligned.h>
+
+/* ADMV1013 Register Map */
+#define ADMV1013_REG_SPI_CONTROL 0x00
+#define ADMV1013_REG_ALARM 0x01
+#define ADMV1013_REG_ALARM_MASKS 0x02
+#define ADMV1013_REG_ENABLE 0x03
+#define ADMV1013_REG_LO_AMP_I 0x05
+#define ADMV1013_REG_LO_AMP_Q 0x06
+#define ADMV1013_REG_OFFSET_ADJUST_I 0x07
+#define ADMV1013_REG_OFFSET_ADJUST_Q 0x08
+#define ADMV1013_REG_QUAD 0x09
+#define ADMV1013_REG_VVA_TEMP_COMP 0x0A
+
+/* ADMV1013_REG_SPI_CONTROL Map */
+#define ADMV1013_PARITY_EN_MSK BIT(15)
+#define ADMV1013_SPI_SOFT_RESET_MSK BIT(14)
+#define ADMV1013_CHIP_ID_MSK GENMASK(11, 4)
+#define ADMV1013_CHIP_ID 0xA
+#define ADMV1013_REVISION_ID_MSK GENMASK(3, 0)
+
+/* ADMV1013_REG_ALARM Map */
+#define ADMV1013_PARITY_ERROR_MSK BIT(15)
+#define ADMV1013_TOO_FEW_ERRORS_MSK BIT(14)
+#define ADMV1013_TOO_MANY_ERRORS_MSK BIT(13)
+#define ADMV1013_ADDRESS_RANGE_ERROR_MSK BIT(12)
+
+/* ADMV1013_REG_ENABLE Map */
+#define ADMV1013_VGA_PD_MSK BIT(15)
+#define ADMV1013_MIXER_PD_MSK BIT(14)
+#define ADMV1013_QUAD_PD_MSK GENMASK(13, 11)
+#define ADMV1013_BG_PD_MSK BIT(10)
+#define ADMV1013_MIXER_IF_EN_MSK BIT(7)
+#define ADMV1013_DET_EN_MSK BIT(5)
+
+/* ADMV1013_REG_LO_AMP Map */
+#define ADMV1013_LOAMP_PH_ADJ_FINE_MSK GENMASK(13, 7)
+#define ADMV1013_MIXER_VGATE_MSK GENMASK(6, 0)
+
+/* ADMV1013_REG_OFFSET_ADJUST Map */
+#define ADMV1013_MIXER_OFF_ADJ_P_MSK GENMASK(15, 9)
+#define ADMV1013_MIXER_OFF_ADJ_N_MSK GENMASK(8, 2)
+
+/* ADMV1013_REG_QUAD Map */
+#define ADMV1013_QUAD_SE_MODE_MSK GENMASK(9, 6)
+#define ADMV1013_QUAD_FILTERS_MSK GENMASK(3, 0)
+
+/* ADMV1013_REG_VVA_TEMP_COMP Map */
+#define ADMV1013_VVA_TEMP_COMP_MSK GENMASK(15, 0)
+
+/* ADMV1013 Miscellaneous Defines */
+#define ADMV1013_READ BIT(7)
+#define ADMV1013_REG_ADDR_READ_MSK GENMASK(6, 1)
+#define ADMV1013_REG_ADDR_WRITE_MSK GENMASK(22, 17)
+#define ADMV1013_REG_DATA_MSK GENMASK(16, 1)
+
+enum {
+ ADMV1013_IQ_MODE,
+ ADMV1013_IF_MODE
+};
+
+enum {
+ ADMV1013_RFMOD_I_CALIBPHASE,
+ ADMV1013_RFMOD_Q_CALIBPHASE,
+};
+
+enum {
+ ADMV1013_SE_MODE_POS = 6,
+ ADMV1013_SE_MODE_NEG = 9,
+ ADMV1013_SE_MODE_DIFF = 12
+};
+
+struct admv1013_state {
+ struct spi_device *spi;
+ struct clk *clkin;
+ /* Protect against concurrent accesses to the device and to data */
+ struct mutex lock;
+ struct regulator *reg;
+ struct notifier_block nb;
+ unsigned int input_mode;
+ unsigned int quad_se_mode;
+ bool det_en;
+ u8 data[3] ____cacheline_aligned;
+};
+
+static int __admv1013_spi_read(struct admv1013_state *st, unsigned int reg,
+ unsigned int *val)
+{
+ int ret;
+ struct spi_transfer t = {0};
+
+ st->data[0] = ADMV1013_READ | FIELD_PREP(ADMV1013_REG_ADDR_READ_MSK, reg);
+ st->data[1] = 0x0;
+ st->data[2] = 0x0;
+
+ t.rx_buf = &st->data[0];
+ t.tx_buf = &st->data[0];
+ t.len = 3;
+
+ ret = spi_sync_transfer(st->spi, &t, 1);
+ if (ret)
+ return ret;
+
+ *val = FIELD_GET(ADMV1013_REG_DATA_MSK, get_unaligned_be24(&st->data[0]));
+
+ return ret;
+}
+
+static int admv1013_spi_read(struct admv1013_state *st, unsigned int reg,
+ unsigned int *val)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __admv1013_spi_read(st, reg, val);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int __admv1013_spi_write(struct admv1013_state *st,
+ unsigned int reg,
+ unsigned int val)
+{
+ put_unaligned_be24(FIELD_PREP(ADMV1013_REG_DATA_MSK, val) |
+ FIELD_PREP(ADMV1013_REG_ADDR_WRITE_MSK, reg), &st->data[0]);
+
+ return spi_write(st->spi, &st->data[0], 3);
+}
+
+static int admv1013_spi_write(struct admv1013_state *st, unsigned int reg,
+ unsigned int val)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __admv1013_spi_write(st, reg, val);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int __admv1013_spi_update_bits(struct admv1013_state *st, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ int ret;
+ unsigned int data, temp;
+
+ ret = __admv1013_spi_read(st, reg, &data);
+ if (ret)
+ return ret;
+
+ temp = (data & ~mask) | (val & mask);
+
+ return __admv1013_spi_write(st, reg, temp);
+}
+
+static int admv1013_spi_update_bits(struct admv1013_state *st, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __admv1013_spi_update_bits(st, reg, mask, val);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int admv1013_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long info)
+{
+ struct admv1013_state *st = iio_priv(indio_dev);
+ unsigned int data, addr;
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->channel) {
+ case IIO_MOD_I:
+ addr = ADMV1013_REG_OFFSET_ADJUST_I;
+ break;
+ case IIO_MOD_Q:
+ addr = ADMV1013_REG_OFFSET_ADJUST_Q;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = admv1013_spi_read(st, addr, &data);
+ if (ret)
+ return ret;
+
+ if (!chan->channel)
+ *val = FIELD_GET(ADMV1013_MIXER_OFF_ADJ_P_MSK, data);
+ else
+ *val = FIELD_GET(ADMV1013_MIXER_OFF_ADJ_N_MSK, data);
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int admv1013_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long info)
+{
+ struct admv1013_state *st = iio_priv(indio_dev);
+ unsigned int addr, data, msk;
+
+ switch (info) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->channel2) {
+ case IIO_MOD_I:
+ addr = ADMV1013_REG_OFFSET_ADJUST_I;
+ break;
+ case IIO_MOD_Q:
+ addr = ADMV1013_REG_OFFSET_ADJUST_Q;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!chan->channel) {
+ msk = ADMV1013_MIXER_OFF_ADJ_P_MSK;
+ data = FIELD_PREP(ADMV1013_MIXER_OFF_ADJ_P_MSK, val);
+ } else {
+ msk = ADMV1013_MIXER_OFF_ADJ_N_MSK;
+ data = FIELD_PREP(ADMV1013_MIXER_OFF_ADJ_N_MSK, val);
+ }
+
+ return admv1013_spi_update_bits(st, addr, msk, data);
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t admv1013_read(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct admv1013_state *st = iio_priv(indio_dev);
+ unsigned int data, addr;
+ int ret;
+
+ switch ((u32)private) {
+ case ADMV1013_RFMOD_I_CALIBPHASE:
+ addr = ADMV1013_REG_LO_AMP_I;
+ break;
+ case ADMV1013_RFMOD_Q_CALIBPHASE:
+ addr = ADMV1013_REG_LO_AMP_Q;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = admv1013_spi_read(st, addr, &data);
+ if (ret)
+ return ret;
+
+ data = FIELD_GET(ADMV1013_LOAMP_PH_ADJ_FINE_MSK, data);
+
+ return sysfs_emit(buf, "%u\n", data);
+}
+
+static ssize_t admv1013_write(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct admv1013_state *st = iio_priv(indio_dev);
+ unsigned int data;
+ int ret;
+
+ ret = kstrtou32(buf, 10, &data);
+ if (ret)
+ return ret;
+
+ data = FIELD_PREP(ADMV1013_LOAMP_PH_ADJ_FINE_MSK, data);
+
+ switch ((u32)private) {
+ case ADMV1013_RFMOD_I_CALIBPHASE:
+ ret = admv1013_spi_update_bits(st, ADMV1013_REG_LO_AMP_I,
+ ADMV1013_LOAMP_PH_ADJ_FINE_MSK,
+ data);
+ if (ret)
+ return ret;
+ break;
+ case ADMV1013_RFMOD_Q_CALIBPHASE:
+ ret = admv1013_spi_update_bits(st, ADMV1013_REG_LO_AMP_Q,
+ ADMV1013_LOAMP_PH_ADJ_FINE_MSK,
+ data);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret ? ret : len;
+}
+
+static int admv1013_update_quad_filters(struct admv1013_state *st)
+{
+ unsigned int filt_raw;
+ u64 rate = clk_get_rate(st->clkin);
+
+ if (rate >= (5400 * HZ_PER_MHZ) && rate <= (7000 * HZ_PER_MHZ))
+ filt_raw = 15;
+ else if (rate >= (5400 * HZ_PER_MHZ) && rate <= (8000 * HZ_PER_MHZ))
+ filt_raw = 10;
+ else if (rate >= (6600 * HZ_PER_MHZ) && rate <= (9200 * HZ_PER_MHZ))
+ filt_raw = 5;
+ else
+ filt_raw = 0;
+
+ return __admv1013_spi_update_bits(st, ADMV1013_REG_QUAD,
+ ADMV1013_QUAD_FILTERS_MSK,
+ FIELD_PREP(ADMV1013_QUAD_FILTERS_MSK, filt_raw));
+}
+
+static int admv1013_update_mixer_vgate(struct admv1013_state *st)
+{
+ unsigned int vcm, mixer_vgate;
+
+ vcm = regulator_get_voltage(st->reg);
+
+ if (vcm >= 0 && vcm < 1800000)
+ mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100;
+ else if (vcm > 1800000 && vcm < 2600000)
+ mixer_vgate = (2375 * vcm / 1000000 + 125) / 100;
+ else
+ return -EINVAL;
+
+ return __admv1013_spi_update_bits(st, ADMV1013_REG_LO_AMP_I,
+ ADMV1013_MIXER_VGATE_MSK,
+ FIELD_PREP(ADMV1013_MIXER_VGATE_MSK, mixer_vgate));
+}
+
+static int admv1013_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int write_val,
+ unsigned int *read_val)
+{
+ struct admv1013_state *st = iio_priv(indio_dev);
+
+ if (read_val)
+ return admv1013_spi_read(st, reg, read_val);
+ else
+ return admv1013_spi_write(st, reg, write_val);
+}
+
+static const struct iio_info admv1013_info = {
+ .read_raw = admv1013_read_raw,
+ .write_raw = admv1013_write_raw,
+ .debugfs_reg_access = &admv1013_reg_access,
+};
+
+static int admv1013_freq_change(struct notifier_block *nb, unsigned long action, void *data)
+{
+ struct admv1013_state *st = container_of(nb, struct admv1013_state, nb);
+ int ret;
+
+ if (action == POST_RATE_CHANGE) {
+ mutex_lock(&st->lock);
+ ret = notifier_from_errno(admv1013_update_quad_filters(st));
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+
+ return NOTIFY_OK;
+}
+
+#define _ADMV1013_EXT_INFO(_name, _shared, _ident) { \
+ .name = _name, \
+ .read = admv1013_read, \
+ .write = admv1013_write, \
+ .private = _ident, \
+ .shared = _shared, \
+}
+
+static const struct iio_chan_spec_ext_info admv1013_ext_info[] = {
+ _ADMV1013_EXT_INFO("i_calibphase", IIO_SEPARATE, ADMV1013_RFMOD_I_CALIBPHASE),
+ _ADMV1013_EXT_INFO("q_calibphase", IIO_SEPARATE, ADMV1013_RFMOD_Q_CALIBPHASE),
+ { },
+};
+
+#define ADMV1013_CHAN_PHASE(_channel, _channel2, _admv1013_ext_info) { \
+ .type = IIO_ALTVOLTAGE, \
+ .output = 0, \
+ .indexed = 1, \
+ .channel2 = _channel2, \
+ .channel = _channel, \
+ .differential = 1, \
+ .ext_info = _admv1013_ext_info, \
+ }
+
+#define ADMV1013_CHAN_CALIB(_channel, rf_comp) { \
+ .type = IIO_ALTVOLTAGE, \
+ .output = 0, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .channel2 = IIO_MOD_##rf_comp, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ }
+
+static const struct iio_chan_spec admv1013_channels[] = {
+ ADMV1013_CHAN_PHASE(0, 1, admv1013_ext_info),
+ ADMV1013_CHAN_CALIB(0, I),
+ ADMV1013_CHAN_CALIB(0, Q),
+ ADMV1013_CHAN_CALIB(1, I),
+ ADMV1013_CHAN_CALIB(1, Q),
+};
+
+static int admv1013_init(struct admv1013_state *st)
+{
+ int ret;
+ unsigned int data;
+ struct spi_device *spi = st->spi;
+
+ /* Perform a software reset */
+ ret = __admv1013_spi_update_bits(st, ADMV1013_REG_SPI_CONTROL,
+ ADMV1013_SPI_SOFT_RESET_MSK,
+ FIELD_PREP(ADMV1013_SPI_SOFT_RESET_MSK, 1));
+ if (ret)
+ return ret;
+
+ ret = __admv1013_spi_update_bits(st, ADMV1013_REG_SPI_CONTROL,
+ ADMV1013_SPI_SOFT_RESET_MSK,
+ FIELD_PREP(ADMV1013_SPI_SOFT_RESET_MSK, 0));
+ if (ret)
+ return ret;
+
+ ret = __admv1013_spi_read(st, ADMV1013_REG_SPI_CONTROL, &data);
+ if (ret)
+ return ret;
+
+ data = FIELD_GET(ADMV1013_CHIP_ID_MSK, data);
+ if (data != ADMV1013_CHIP_ID) {
+ dev_err(&spi->dev, "Invalid Chip ID.\n");
+ return -EINVAL;
+ }
+
+ ret = __admv1013_spi_write(st, ADMV1013_REG_VVA_TEMP_COMP, 0xE700);
+ if (ret)
+ return ret;
+
+ data = FIELD_PREP(ADMV1013_QUAD_SE_MODE_MSK, st->quad_se_mode);
+
+ ret = __admv1013_spi_update_bits(st, ADMV1013_REG_QUAD,
+ ADMV1013_QUAD_SE_MODE_MSK, data);
+ if (ret)
+ return ret;
+
+ ret = admv1013_update_mixer_vgate(st);
+ if (ret)
+ return ret;
+
+ ret = admv1013_update_quad_filters(st);
+ if (ret)
+ return ret;
+
+ return __admv1013_spi_update_bits(st, ADMV1013_REG_ENABLE,
+ ADMV1013_DET_EN_MSK |
+ ADMV1013_MIXER_IF_EN_MSK,
+ st->det_en |
+ st->input_mode);
+}
+
+static void admv1013_clk_disable(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static void admv1013_reg_disable(void *data)
+{
+ regulator_disable(data);
+}
+
+static void admv1013_powerdown(void *data)
+{
+ unsigned int enable_reg, enable_reg_msk;
+
+ /* Disable all components in the Enable Register */
+ enable_reg_msk = ADMV1013_VGA_PD_MSK |
+ ADMV1013_MIXER_PD_MSK |
+ ADMV1013_QUAD_PD_MSK |
+ ADMV1013_BG_PD_MSK |
+ ADMV1013_MIXER_IF_EN_MSK |
+ ADMV1013_DET_EN_MSK;
+
+ enable_reg = FIELD_PREP(ADMV1013_VGA_PD_MSK, 1) |
+ FIELD_PREP(ADMV1013_MIXER_PD_MSK, 1) |
+ FIELD_PREP(ADMV1013_QUAD_PD_MSK, 7) |
+ FIELD_PREP(ADMV1013_BG_PD_MSK, 1) |
+ FIELD_PREP(ADMV1013_MIXER_IF_EN_MSK, 0) |
+ FIELD_PREP(ADMV1013_DET_EN_MSK, 0);
+
+ admv1013_spi_update_bits(data, ADMV1013_REG_ENABLE, enable_reg_msk, enable_reg);
+}
+
+static int admv1013_properties_parse(struct admv1013_state *st)
+{
+ int ret;
+ const char *str;
+ struct spi_device *spi = st->spi;
+
+ st->det_en = device_property_read_bool(&spi->dev, "adi,detector-enable");
+
+ ret = device_property_read_string(&spi->dev, "adi,input-mode", &str);
+ if (ret)
+ st->input_mode = ADMV1013_IQ_MODE;
+
+ if (!strcmp(str, "iq"))
+ st->input_mode = ADMV1013_IQ_MODE;
+ else if (!strcmp(str, "if"))
+ st->input_mode = ADMV1013_IF_MODE;
+ else
+ return -EINVAL;
+
+ ret = device_property_read_string(&spi->dev, "adi,quad-se-mode", &str);
+ if (ret)
+ st->quad_se_mode = ADMV1013_SE_MODE_DIFF;
+
+ if (!strcmp(str, "diff"))
+ st->quad_se_mode = ADMV1013_SE_MODE_DIFF;
+ else if (!strcmp(str, "se-pos"))
+ st->quad_se_mode = ADMV1013_SE_MODE_POS;
+ else if (!strcmp(str, "se-neg"))
+ st->quad_se_mode = ADMV1013_SE_MODE_NEG;
+ else
+ return -EINVAL;
+
+ st->reg = devm_regulator_get(&spi->dev, "vcm");
+ if (IS_ERR(st->reg))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->reg),
+ "failed to get the common-mode voltage\n");
+
+ st->clkin = devm_clk_get(&spi->dev, "lo_in");
+ if (IS_ERR(st->clkin))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->clkin),
+ "failed to get the LO input clock\n");
+
+ return 0;
+}
+
+static int admv1013_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct admv1013_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ indio_dev->info = &admv1013_info;
+ indio_dev->name = "admv1013";
+ indio_dev->channels = admv1013_channels;
+ indio_dev->num_channels = ARRAY_SIZE(admv1013_channels);
+
+ st->spi = spi;
+
+ ret = admv1013_properties_parse(st);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(st->reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable specified Common-Mode Voltage!\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&spi->dev, admv1013_reg_disable,
+ st->reg);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(st->clkin);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev, admv1013_clk_disable, st->clkin);
+ if (ret)
+ return ret;
+
+ st->nb.notifier_call = admv1013_freq_change;
+ ret = devm_clk_notifier_register(&spi->dev, st->clkin, &st->nb);
+ if (ret)
+ return ret;
+
+ mutex_init(&st->lock);
+
+ ret = admv1013_init(st);
+ if (ret) {
+ dev_err(&spi->dev, "admv1013 init failed\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&spi->dev, admv1013_powerdown, st);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct spi_device_id admv1013_id[] = {
+ { "admv1013", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, admv1013_id);
+
+static const struct of_device_id admv1013_of_match[] = {
+ { .compatible = "adi,admv1013" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, admv1013_of_match);
+
+static struct spi_driver admv1013_driver = {
+ .driver = {
+ .name = "admv1013",
+ .of_match_table = admv1013_of_match,
+ },
+ .probe = admv1013_probe,
+ .id_table = admv1013_id,
+};
+module_spi_driver(admv1013_driver);
+
+MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com");
+MODULE_DESCRIPTION("Analog Devices ADMV1013");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 97b82f9a8e45..273f16dcaff8 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -345,9 +345,6 @@ err:
return IRQ_HANDLED;
}
-static const struct iio_trigger_ops afe4403_trigger_ops = {
-};
-
#define AFE4403_TIMING_PAIRS \
{ AFE440X_LED2STC, 0x000050 }, \
{ AFE440X_LED2ENDC, 0x0003e7 }, \
@@ -530,8 +527,6 @@ static int afe4403_probe(struct spi_device *spi)
iio_trigger_set_drvdata(afe->trig, indio_dev);
- afe->trig->ops = &afe4403_trigger_ops;
-
ret = iio_trigger_register(afe->trig);
if (ret) {
dev_err(afe->dev, "Unable to register IIO trigger\n");
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index 7ef3f5e34de5..aa9311e1e655 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -347,9 +347,6 @@ err:
return IRQ_HANDLED;
}
-static const struct iio_trigger_ops afe4404_trigger_ops = {
-};
-
/* Default timings from data-sheet */
#define AFE4404_TIMING_PAIRS \
{ AFE440X_PRPCOUNT, 39999 }, \
@@ -537,8 +534,6 @@ static int afe4404_probe(struct i2c_client *client,
iio_trigger_set_drvdata(afe->trig, indio_dev);
- afe->trig->ops = &afe4404_trigger_ops;
-
ret = iio_trigger_register(afe->trig);
if (ret) {
dev_err(afe->dev, "Unable to register IIO trigger\n");
diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h
index 61e318431de9..501e286702ef 100644
--- a/drivers/iio/iio_core.h
+++ b/drivers/iio/iio_core.h
@@ -16,7 +16,7 @@ struct iio_buffer;
struct iio_chan_spec;
struct iio_dev;
-extern struct device_type iio_device_type;
+extern const struct device_type iio_device_type;
struct iio_dev_buffer_pair {
struct iio_dev *indio_dev;
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
index 85b1934cec60..33d9afb1ba91 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
@@ -58,7 +58,7 @@ static int inv_icm42600_probe(struct i2c_client *client)
match = device_get_match_data(&client->dev);
if (!match)
return -EINVAL;
- chip = (enum inv_icm42600_chip)match;
+ chip = (uintptr_t)match;
regmap = devm_regmap_init_i2c(client, &inv_icm42600_regmap_config);
if (IS_ERR(regmap))
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
index 323789697a08..e6305e5fa975 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
@@ -57,7 +57,7 @@ static int inv_icm42600_probe(struct spi_device *spi)
match = device_get_match_data(&spi->dev);
if (!match)
return -EINVAL;
- chip = (enum inv_icm42600_chip)match;
+ chip = (uintptr_t)match;
regmap = devm_regmap_init_spi(spi, &inv_icm42600_regmap_config);
if (IS_ERR(regmap))
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 3ef17e3f50e2..fe03707ec2d3 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -110,7 +110,7 @@ static int inv_mpu_probe(struct i2c_client *client,
match = device_get_match_data(&client->dev);
if (match) {
- chip_type = (enum inv_devices)match;
+ chip_type = (uintptr_t)match;
name = client->name;
} else if (id) {
chip_type = (enum inv_devices)
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index b056f3fe2561..6800356b25fb 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -45,7 +45,7 @@ static int inv_mpu_probe(struct spi_device *spi)
chip_type = (enum inv_devices)spi_id->driver_data;
name = spi_id->name;
} else if ((match = device_get_match_data(&spi->dev))) {
- chip_type = (enum inv_devices)match;
+ chip_type = (uintptr_t)match;
name = dev_name(&spi->dev);
} else {
return -ENODEV;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index f2cbbc756459..727b4b6ac696 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -2244,7 +2244,9 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
return err;
hub_settings = &hw->settings->shub_settings;
- if (hub_settings->master_en.addr) {
+ if (hub_settings->master_en.addr &&
+ (!dev_fwnode(dev) ||
+ !device_property_read_bool(dev, "st,disable-sensor-hub"))) {
err = st_lsm6dsx_shub_probe(hw, name);
if (err < 0)
return err;
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index e180728914c0..94eb9f6cf128 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -1727,8 +1727,7 @@ int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
const struct iio_chan_spec *channels;
struct iio_buffer *buffer;
- int unwind_idx;
- int ret, i;
+ int ret, i, idx;
size_t sz;
channels = indio_dev->channels;
@@ -1743,15 +1742,12 @@ int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
if (!iio_dev_opaque->attached_buffers_cnt)
return 0;
- for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
- buffer = iio_dev_opaque->attached_buffers[i];
- ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, i);
- if (ret) {
- unwind_idx = i - 1;
+ for (idx = 0; idx < iio_dev_opaque->attached_buffers_cnt; idx++) {
+ buffer = iio_dev_opaque->attached_buffers[idx];
+ ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, idx);
+ if (ret)
goto error_unwind_sysfs_and_mask;
- }
}
- unwind_idx = iio_dev_opaque->attached_buffers_cnt - 1;
sz = sizeof(*(iio_dev_opaque->buffer_ioctl_handler));
iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL);
@@ -1767,9 +1763,9 @@ int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
return 0;
error_unwind_sysfs_and_mask:
- for (; unwind_idx >= 0; unwind_idx--) {
- buffer = iio_dev_opaque->attached_buffers[unwind_idx];
- __iio_buffer_free_sysfs_and_mask(buffer, indio_dev, unwind_idx);
+ while (idx--) {
+ buffer = iio_dev_opaque->attached_buffers[idx];
+ __iio_buffer_free_sysfs_and_mask(buffer, indio_dev, idx);
}
return ret;
}
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 463a63d5bf56..409c278a4c2c 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -702,6 +702,9 @@ static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type,
}
case IIO_VAL_CHAR:
return sysfs_emit_at(buf, offset, "%c", (char)vals[0]);
+ case IIO_VAL_INT_64:
+ tmp2 = (s64)((((u64)vals[1]) << 32) | (u32)vals[0]);
+ return sysfs_emit_at(buf, offset, "%lld", tmp2);
default:
return 0;
}
@@ -1619,7 +1622,7 @@ static void iio_dev_release(struct device *device)
kfree(iio_dev_opaque);
}
-struct device_type iio_device_type = {
+const struct device_type iio_device_type = {
.name = "iio_device",
.release = iio_dev_release,
};
@@ -1653,7 +1656,6 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
indio_dev->dev.type = &iio_device_type;
indio_dev->dev.bus = &iio_bus_type;
device_initialize(&indio_dev->dev);
- iio_device_set_drvdata(indio_dev, (void *)indio_dev);
mutex_init(&indio_dev->mlock);
mutex_init(&iio_dev_opaque->info_exist_lock);
INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list);
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 93990ff1dfe3..f504ed351b3e 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -162,6 +162,39 @@ static struct iio_trigger *iio_trigger_acquire_by_name(const char *name)
return trig;
}
+static void iio_reenable_work_fn(struct work_struct *work)
+{
+ struct iio_trigger *trig = container_of(work, struct iio_trigger,
+ reenable_work);
+
+ /*
+ * This 'might' occur after the trigger state is set to disabled -
+ * in that case the driver should skip reenabling.
+ */
+ trig->ops->reenable(trig);
+}
+
+/*
+ * In general, reenable callbacks may need to sleep and this path is
+ * not performance sensitive, so just queue up a work item
+ * to reneable the trigger for us.
+ *
+ * Races that can cause this.
+ * 1) A handler occurs entirely in interrupt context so the counter
+ * the final decrement is still in this interrupt.
+ * 2) The trigger has been removed, but one last interrupt gets through.
+ *
+ * For (1) we must call reenable, but not in atomic context.
+ * For (2) it should be safe to call reenanble, if drivers never blindly
+ * reenable after state is off.
+ */
+static void iio_trigger_notify_done_atomic(struct iio_trigger *trig)
+{
+ if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
+ trig->ops->reenable)
+ schedule_work(&trig->reenable_work);
+}
+
void iio_trigger_poll(struct iio_trigger *trig)
{
int i;
@@ -173,7 +206,7 @@ void iio_trigger_poll(struct iio_trigger *trig)
if (trig->subirqs[i].enabled)
generic_handle_irq(trig->subirq_base + i);
else
- iio_trigger_notify_done(trig);
+ iio_trigger_notify_done_atomic(trig);
}
}
}
@@ -535,6 +568,7 @@ struct iio_trigger *viio_trigger_alloc(struct device *parent,
trig->dev.type = &iio_trig_type;
trig->dev.bus = &iio_bus_type;
device_initialize(&trig->dev);
+ INIT_WORK(&trig->reenable_work, iio_reenable_work_fn);
mutex_init(&trig->pool_lock);
trig->subirq_base = irq_alloc_descs(-1, 0,
diff --git a/drivers/iio/light/cm3605.c b/drivers/iio/light/cm3605.c
index 3e7fb16ab1f6..50d34a98839c 100644
--- a/drivers/iio/light/cm3605.c
+++ b/drivers/iio/light/cm3605.c
@@ -10,6 +10,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
@@ -18,7 +19,7 @@
#include <linux/init.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
-#include <linux/of.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
@@ -156,7 +157,6 @@ static int cm3605_probe(struct platform_device *pdev)
struct cm3605 *cm3605;
struct iio_dev *indio_dev;
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
enum iio_chan_type ch_type;
u32 rset;
int irq;
@@ -171,7 +171,7 @@ static int cm3605_probe(struct platform_device *pdev)
cm3605->dev = dev;
cm3605->dir = IIO_EV_DIR_FALLING;
- ret = of_property_read_u32(np, "capella,aset-resistance-ohms", &rset);
+ ret = device_property_read_u32(dev, "capella,aset-resistance-ohms", &rset);
if (ret) {
dev_info(dev, "no RSET specified, assuming 100K\n");
rset = 100000;
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
index d1d9f2d319e4..b820041159f7 100644
--- a/drivers/iio/light/gp2ap020a00f.c
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -1467,9 +1467,6 @@ static const struct iio_buffer_setup_ops gp2ap020a00f_buffer_setup_ops = {
.predisable = &gp2ap020a00f_buffer_predisable,
};
-static const struct iio_trigger_ops gp2ap020a00f_trigger_ops = {
-};
-
static int gp2ap020a00f_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1550,8 +1547,6 @@ static int gp2ap020a00f_probe(struct i2c_client *client,
goto error_uninit_buffer;
}
- data->trig->ops = &gp2ap020a00f_trigger_ops;
-
init_irq_work(&data->work, gp2ap020a00f_iio_trigger_work);
err = iio_trigger_register(data->trig);
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index b2983b1a9ed1..47d61ec2bb50 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * ltr501.c - Support for Lite-On LTR501 ambient light and proximity sensor
+ * Support for Lite-On LTR501 and similar ambient light and proximity sensors.
*
* Copyright 2014 Peter Meerwald <pmeerw@pmeerw.net>
*
@@ -98,6 +98,7 @@ enum {
ltr501 = 0,
ltr559,
ltr301,
+ ltr303,
};
struct ltr501_gain {
@@ -165,6 +166,7 @@ struct ltr501_data {
struct regmap_field *reg_ps_rate;
struct regmap_field *reg_als_prst;
struct regmap_field *reg_ps_prst;
+ uint32_t near_level;
};
static const struct ltr501_samp_table ltr501_als_samp_table[] = {
@@ -524,6 +526,25 @@ static int ltr501_write_intr_prst(struct ltr501_data *data,
return -EINVAL;
}
+static ssize_t ltr501_read_near_level(struct iio_dev *indio_dev,
+ uintptr_t priv,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct ltr501_data *data = iio_priv(indio_dev);
+
+ return sprintf(buf, "%u\n", data->near_level);
+}
+
+static const struct iio_chan_spec_ext_info ltr501_ext_info[] = {
+ {
+ .name = "nearlevel",
+ .shared = IIO_SEPARATE,
+ .read = ltr501_read_near_level,
+ },
+ { /* sentinel */ }
+};
+
static const struct iio_event_spec ltr501_als_event_spec[] = {
{
.type = IIO_EV_TYPE_THRESH,
@@ -608,6 +629,7 @@ static const struct iio_chan_spec ltr501_channels[] = {
},
.event_spec = ltr501_pxs_event_spec,
.num_event_specs = ARRAY_SIZE(ltr501_pxs_event_spec),
+ .ext_info = ltr501_ext_info,
},
IIO_CHAN_SOFT_TIMESTAMP(3),
};
@@ -1231,6 +1253,18 @@ static const struct ltr501_chip_info ltr501_chip_info_tbl[] = {
.channels = ltr301_channels,
.no_channels = ARRAY_SIZE(ltr301_channels),
},
+ [ltr303] = {
+ .partid = 0x0A,
+ .als_gain = ltr559_als_gain_tbl,
+ .als_gain_tbl_size = ARRAY_SIZE(ltr559_als_gain_tbl),
+ .als_mode_active = BIT(0),
+ .als_gain_mask = BIT(2) | BIT(3) | BIT(4),
+ .als_gain_shift = 2,
+ .info = &ltr301_info,
+ .info_no_irq = &ltr301_info_no_irq,
+ .channels = ltr301_channels,
+ .no_channels = ARRAY_SIZE(ltr301_channels),
+ },
};
static int ltr501_write_contr(struct ltr501_data *data, u8 als_val, u8 ps_val)
@@ -1518,6 +1552,10 @@ static int ltr501_probe(struct i2c_client *client,
if ((partid >> 4) != data->chip_info->partid)
return -ENODEV;
+ if (device_property_read_u32(&client->dev, "proximity-near-level",
+ &data->near_level))
+ data->near_level = 0;
+
indio_dev->info = data->chip_info->info;
indio_dev->channels = data->chip_info->channels;
indio_dev->num_channels = data->chip_info->no_channels;
@@ -1605,6 +1643,7 @@ static const struct i2c_device_id ltr501_id[] = {
{ "ltr501", ltr501},
{ "ltr559", ltr559},
{ "ltr301", ltr301},
+ { "ltr303", ltr303},
{ }
};
MODULE_DEVICE_TABLE(i2c, ltr501_id);
@@ -1613,6 +1652,7 @@ static const struct of_device_id ltr501_of_match[] = {
{ .compatible = "liteon,ltr501", },
{ .compatible = "liteon,ltr559", },
{ .compatible = "liteon,ltr301", },
+ { .compatible = "liteon,ltr303", },
{}
};
MODULE_DEVICE_TABLE(of, ltr501_of_match);
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 6e82dc54a417..55879a20ae52 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -929,7 +929,7 @@ static int ak8975_probe(struct i2c_client *client,
/* id will be NULL when enumerated via ACPI */
match = device_get_match_data(&client->dev);
if (match) {
- chipset = (enum asahi_compass_chipset)(match);
+ chipset = (uintptr_t)match;
name = dev_name(&client->dev);
} else if (id) {
chipset = (enum asahi_compass_chipset)(id->driver_data);
diff --git a/drivers/iio/magnetometer/hmc5843_core.c b/drivers/iio/magnetometer/hmc5843_core.c
index f08726bf5ec3..5a730d9bdbb0 100644
--- a/drivers/iio/magnetometer/hmc5843_core.c
+++ b/drivers/iio/magnetometer/hmc5843_core.c
@@ -246,7 +246,7 @@ static const struct iio_enum hmc5843_meas_conf_enum = {
static const struct iio_chan_spec_ext_info hmc5843_ext_info[] = {
IIO_ENUM("meas_conf", IIO_SHARED_BY_TYPE, &hmc5843_meas_conf_enum),
- IIO_ENUM_AVAILABLE("meas_conf", &hmc5843_meas_conf_enum),
+ IIO_ENUM_AVAILABLE("meas_conf", IIO_SHARED_BY_TYPE, &hmc5843_meas_conf_enum),
IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, hmc5843_get_mount_matrix),
{ }
};
@@ -260,7 +260,7 @@ static const struct iio_enum hmc5983_meas_conf_enum = {
static const struct iio_chan_spec_ext_info hmc5983_ext_info[] = {
IIO_ENUM("meas_conf", IIO_SHARED_BY_TYPE, &hmc5983_meas_conf_enum),
- IIO_ENUM_AVAILABLE("meas_conf", &hmc5983_meas_conf_enum),
+ IIO_ENUM_AVAILABLE("meas_conf", IIO_SHARED_BY_TYPE, &hmc5983_meas_conf_enum),
IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, hmc5843_get_mount_matrix),
{ }
};
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index c96415a1aead..17c62d806218 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -291,7 +291,8 @@ static int mag3110_read_raw(struct iio_dev *indio_dev,
if (ret < 0)
goto release;
*val = sign_extend32(
- be16_to_cpu(buffer[chan->scan_index]), 15);
+ be16_to_cpu(buffer[chan->scan_index]),
+ chan->scan_type.realbits - 1);
ret = IIO_VAL_INT;
break;
case IIO_TEMP: /* in 1 C / LSB */
@@ -306,7 +307,8 @@ static int mag3110_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&data->lock);
if (ret < 0)
goto release;
- *val = sign_extend32(ret, 7);
+ *val = sign_extend32(ret,
+ chan->scan_type.realbits - 1);
ret = IIO_VAL_INT;
break;
default:
diff --git a/drivers/iio/potentiometer/mcp41010.c b/drivers/iio/potentiometer/mcp41010.c
index 79ccac6d4be0..30a4594d4e11 100644
--- a/drivers/iio/potentiometer/mcp41010.c
+++ b/drivers/iio/potentiometer/mcp41010.c
@@ -21,9 +21,9 @@
#include <linux/iio/iio.h>
#include <linux/iio/types.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/spi/spi.h>
#define MCP41010_MAX_WIPERS 2
@@ -146,7 +146,7 @@ static int mcp41010_probe(struct spi_device *spi)
data = iio_priv(indio_dev);
spi_set_drvdata(spi, indio_dev);
data->spi = spi;
- data->cfg = of_device_get_match_data(&spi->dev);
+ data->cfg = device_get_match_data(&spi->dev);
if (!data->cfg)
data->cfg = &mcp41010_cfg[spi_get_device_id(spi)->driver_data];
diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
index ed30bdaa10ec..fe514f0b5506 100644
--- a/drivers/iio/potentiostat/lmp91000.c
+++ b/drivers/iio/potentiostat/lmp91000.c
@@ -271,9 +271,6 @@ static int lmp91000_buffer_cb(const void *val, void *private)
return 0;
}
-static const struct iio_trigger_ops lmp91000_trigger_ops = {
-};
-
static int lmp91000_buffer_postenable(struct iio_dev *indio_dev)
{
struct lmp91000_data *data = iio_priv(indio_dev);
@@ -330,7 +327,6 @@ static int lmp91000_probe(struct i2c_client *client,
return -ENOMEM;
}
- data->trig->ops = &lmp91000_trigger_ops;
init_completion(&data->completion);
ret = lmp91000_read_config(data);
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 6b7da40f99c8..bf8167f43c56 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -1138,7 +1138,6 @@ int bmp280_common_probe(struct device *dev,
}
EXPORT_SYMBOL(bmp280_common_probe);
-#ifdef CONFIG_PM
static int bmp280_runtime_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
@@ -1159,15 +1158,9 @@ static int bmp280_runtime_resume(struct device *dev)
usleep_range(data->start_up_time, data->start_up_time + 100);
return data->chip_info->chip_config(data);
}
-#endif /* CONFIG_PM */
-const struct dev_pm_ops bmp280_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(bmp280_runtime_suspend,
- bmp280_runtime_resume, NULL)
-};
-EXPORT_SYMBOL(bmp280_dev_pm_ops);
+EXPORT_RUNTIME_DEV_PM_OPS(bmp280_dev_pm_ops, bmp280_runtime_suspend,
+ bmp280_runtime_resume, NULL);
MODULE_AUTHOR("Vlad Dogaru <vlad.dogaru@intel.com>");
MODULE_DESCRIPTION("Driver for Bosch Sensortec BMP180/BMP280 pressure and temperature sensor");
diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c
index 8b03ea15c0d0..35045bd92846 100644
--- a/drivers/iio/pressure/bmp280-i2c.c
+++ b/drivers/iio/pressure/bmp280-i2c.c
@@ -58,7 +58,7 @@ static struct i2c_driver bmp280_i2c_driver = {
.driver = {
.name = "bmp280",
.of_match_table = bmp280_of_i2c_match,
- .pm = &bmp280_dev_pm_ops,
+ .pm = pm_ptr(&bmp280_dev_pm_ops),
},
.probe = bmp280_i2c_probe,
.id_table = bmp280_i2c_id,
diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
index 625b86878ad8..41f6cc56d229 100644
--- a/drivers/iio/pressure/bmp280-spi.c
+++ b/drivers/iio/pressure/bmp280-spi.c
@@ -109,7 +109,7 @@ static struct spi_driver bmp280_spi_driver = {
.driver = {
.name = "bmp280",
.of_match_table = bmp280_of_spi_match,
- .pm = &bmp280_dev_pm_ops,
+ .pm = pm_ptr(&bmp280_dev_pm_ops),
},
.id_table = bmp280_spi_id,
.probe = bmp280_spi_probe,
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index 1eb9e7b29e05..e95b9a5475b4 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -74,7 +74,6 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct mpl3115_data *data = iio_priv(indio_dev);
- __be32 tmp = 0;
int ret;
switch (mask) {
@@ -84,7 +83,9 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
return ret;
switch (chan->type) {
- case IIO_PRESSURE: /* in 0.25 pascal / LSB */
+ case IIO_PRESSURE: { /* in 0.25 pascal / LSB */
+ __be32 tmp = 0;
+
mutex_lock(&data->lock);
ret = mpl3115_request(data);
if (ret < 0) {
@@ -96,10 +97,13 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&data->lock);
if (ret < 0)
break;
- *val = be32_to_cpu(tmp) >> 12;
+ *val = be32_to_cpu(tmp) >> chan->scan_type.shift;
ret = IIO_VAL_INT;
break;
- case IIO_TEMP: /* in 0.0625 celsius / LSB */
+ }
+ case IIO_TEMP: { /* in 0.0625 celsius / LSB */
+ __be16 tmp;
+
mutex_lock(&data->lock);
ret = mpl3115_request(data);
if (ret < 0) {
@@ -111,9 +115,11 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&data->lock);
if (ret < 0)
break;
- *val = sign_extend32(be32_to_cpu(tmp) >> 20, 11);
+ *val = sign_extend32(be16_to_cpu(tmp) >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
ret = IIO_VAL_INT;
break;
+ }
default:
ret = -EINVAL;
break;
diff --git a/drivers/iio/pressure/ms5611.h b/drivers/iio/pressure/ms5611.h
index 86b1c4b1820d..cbc9349c342a 100644
--- a/drivers/iio/pressure/ms5611.h
+++ b/drivers/iio/pressure/ms5611.h
@@ -50,9 +50,9 @@ struct ms5611_state {
const struct ms5611_osr *pressure_osr;
const struct ms5611_osr *temp_osr;
- int (*reset)(struct device *dev);
- int (*read_prom_word)(struct device *dev, int index, u16 *word);
- int (*read_adc_temp_and_pressure)(struct device *dev,
+ int (*reset)(struct ms5611_state *st);
+ int (*read_prom_word)(struct ms5611_state *st, int index, u16 *word);
+ int (*read_adc_temp_and_pressure)(struct ms5611_state *st,
s32 *temp, s32 *pressure);
struct ms5611_chip_info *chip_info;
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index ee75f08655c9..a4d0b54cde9b 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -85,8 +85,7 @@ static int ms5611_read_prom(struct iio_dev *indio_dev)
struct ms5611_state *st = iio_priv(indio_dev);
for (i = 0; i < MS5611_PROM_WORDS_NB; i++) {
- ret = st->read_prom_word(&indio_dev->dev,
- i, &st->chip_info->prom[i]);
+ ret = st->read_prom_word(st, i, &st->chip_info->prom[i]);
if (ret < 0) {
dev_err(&indio_dev->dev,
"failed to read prom at %d\n", i);
@@ -108,7 +107,7 @@ static int ms5611_read_temp_and_pressure(struct iio_dev *indio_dev,
int ret;
struct ms5611_state *st = iio_priv(indio_dev);
- ret = st->read_adc_temp_and_pressure(&indio_dev->dev, temp, pressure);
+ ret = st->read_adc_temp_and_pressure(st, temp, pressure);
if (ret < 0) {
dev_err(&indio_dev->dev,
"failed to read temperature and pressure\n");
@@ -196,7 +195,7 @@ static int ms5611_reset(struct iio_dev *indio_dev)
int ret;
struct ms5611_state *st = iio_priv(indio_dev);
- ret = st->reset(&indio_dev->dev);
+ ret = st->reset(st);
if (ret < 0) {
dev_err(&indio_dev->dev, "failed to reset device\n");
return ret;
diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c
index 5c82d80f85b6..1047a85527a9 100644
--- a/drivers/iio/pressure/ms5611_i2c.c
+++ b/drivers/iio/pressure/ms5611_i2c.c
@@ -20,17 +20,15 @@
#include "ms5611.h"
-static int ms5611_i2c_reset(struct device *dev)
+static int ms5611_i2c_reset(struct ms5611_state *st)
{
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
-
return i2c_smbus_write_byte(st->client, MS5611_RESET);
}
-static int ms5611_i2c_read_prom_word(struct device *dev, int index, u16 *word)
+static int ms5611_i2c_read_prom_word(struct ms5611_state *st, int index,
+ u16 *word)
{
int ret;
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
ret = i2c_smbus_read_word_swapped(st->client,
MS5611_READ_PROM_WORD + (index << 1));
@@ -57,11 +55,10 @@ static int ms5611_i2c_read_adc(struct ms5611_state *st, s32 *val)
return 0;
}
-static int ms5611_i2c_read_adc_temp_and_pressure(struct device *dev,
+static int ms5611_i2c_read_adc_temp_and_pressure(struct ms5611_state *st,
s32 *temp, s32 *pressure)
{
int ret;
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
const struct ms5611_osr *osr = st->temp_osr;
ret = i2c_smbus_write_byte(st->client, osr->cmd);
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index 79bed64c9b68..9fa2dcd71760 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -15,18 +15,17 @@
#include "ms5611.h"
-static int ms5611_spi_reset(struct device *dev)
+static int ms5611_spi_reset(struct ms5611_state *st)
{
u8 cmd = MS5611_RESET;
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
return spi_write_then_read(st->client, &cmd, 1, NULL, 0);
}
-static int ms5611_spi_read_prom_word(struct device *dev, int index, u16 *word)
+static int ms5611_spi_read_prom_word(struct ms5611_state *st, int index,
+ u16 *word)
{
int ret;
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
ret = spi_w8r16be(st->client, MS5611_READ_PROM_WORD + (index << 1));
if (ret < 0)
@@ -37,11 +36,10 @@ static int ms5611_spi_read_prom_word(struct device *dev, int index, u16 *word)
return 0;
}
-static int ms5611_spi_read_adc(struct device *dev, s32 *val)
+static int ms5611_spi_read_adc(struct ms5611_state *st, s32 *val)
{
int ret;
u8 buf[3] = { MS5611_READ_ADC };
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
ret = spi_write_then_read(st->client, buf, 1, buf, 3);
if (ret < 0)
@@ -52,11 +50,10 @@ static int ms5611_spi_read_adc(struct device *dev, s32 *val)
return 0;
}
-static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev,
+static int ms5611_spi_read_adc_temp_and_pressure(struct ms5611_state *st,
s32 *temp, s32 *pressure)
{
int ret;
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
const struct ms5611_osr *osr = st->temp_osr;
/*
@@ -68,7 +65,7 @@ static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev,
return ret;
usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL));
- ret = ms5611_spi_read_adc(dev, temp);
+ ret = ms5611_spi_read_adc(st, temp);
if (ret < 0)
return ret;
@@ -78,7 +75,7 @@ static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev,
return ret;
usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL));
- return ms5611_spi_read_adc(dev, pressure);
+ return ms5611_spi_read_adc(st, pressure);
}
static int ms5611_spi_probe(struct spi_device *spi)
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 3797a8f54276..51f4f92ae84a 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -133,7 +133,7 @@ static ssize_t as3935_sensor_sensitivity_store(struct device *dev,
unsigned long val;
int ret;
- ret = kstrtoul((const char *) buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return -EINVAL;
@@ -238,9 +238,6 @@ err_read:
return IRQ_HANDLED;
}
-static const struct iio_trigger_ops iio_interrupt_trigger_ops = {
-};
-
static void as3935_event_work(struct work_struct *work)
{
struct as3935_state *st;
@@ -417,7 +414,6 @@ static int as3935_probe(struct spi_device *spi)
st->trig = trig;
st->noise_tripped = jiffies - HZ;
iio_trigger_set_drvdata(trig, indio_dev);
- trig->ops = &iio_interrupt_trigger_ops;
ret = devm_iio_trigger_register(dev, trig);
if (ret) {
diff --git a/drivers/iio/test/iio-test-format.c b/drivers/iio/test/iio-test-format.c
index f1e951eddb43..237321436b83 100644
--- a/drivers/iio/test/iio-test-format.c
+++ b/drivers/iio/test/iio-test-format.c
@@ -14,10 +14,13 @@
static void iio_test_iio_format_value_integer(struct kunit *test)
{
- char *buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
+ char *buf;
int val;
int ret;
+ buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+
val = 42;
ret = iio_format_value(buf, IIO_VAL_INT, 1, &val);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "42\n");
@@ -41,153 +44,219 @@ static void iio_test_iio_format_value_integer(struct kunit *test)
static void iio_test_iio_format_value_fixedpoint(struct kunit *test)
{
- char *buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
int values[2];
+ char *buf;
int ret;
+ buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+
/* positive >= 1 */
values[0] = 1;
values[1] = 10;
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "1.000010\n");
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "1.000010 dB\n");
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "1.000000010\n");
/* positive < 1 */
values[0] = 0;
values[1] = 12;
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.000012\n");
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.000012 dB\n");
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.000000012\n");
/* negative <= -1 */
values[0] = -1;
values[1] = 10;
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-1.000010\n");
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-1.000010 dB\n");
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-1.000000010\n");
/* negative > -1 */
values[0] = 0;
values[1] = -123;
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-0.000123\n");
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-0.000123 dB\n");
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-0.000000123\n");
}
static void iio_test_iio_format_value_fractional(struct kunit *test)
{
- char *buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
int values[2];
+ char *buf;
int ret;
+ buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+
/* positive < 1 */
values[0] = 1;
values[1] = 10;
- ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.100000000\n");
/* positive >= 1 */
values[0] = 100;
values[1] = 3;
- ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "33.333333333\n");
/* negative > -1 */
values[0] = -1;
values[1] = 1000000000;
- ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-0.000000001\n");
/* negative <= -1 */
values[0] = -200;
values[1] = 3;
- ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-66.666666666\n");
/* Zero */
values[0] = 0;
values[1] = -10;
- ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.000000000\n");
}
static void iio_test_iio_format_value_fractional_log2(struct kunit *test)
{
- char *buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
int values[2];
+ char *buf;
int ret;
+ buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+
/* positive < 1 */
values[0] = 123;
values[1] = 10;
- ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.120117187\n");
/* positive >= 1 */
values[0] = 1234567;
values[1] = 10;
- ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "1205.631835937\n");
/* negative > -1 */
values[0] = -123;
values[1] = 10;
- ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-0.120117187\n");
/* negative <= -1 */
values[0] = -1234567;
values[1] = 10;
- ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-1205.631835937\n");
/* Zero */
values[0] = 0;
values[1] = 10;
- ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.000000000\n");
}
static void iio_test_iio_format_value_multiple(struct kunit *test)
{
- char *buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
int values[] = {1, -2, 3, -4, 5};
+ char *buf;
int ret;
+ buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+
ret = iio_format_value(buf, IIO_VAL_INT_MULTIPLE,
ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "1 -2 3 -4 5 \n");
}
+static void iio_test_iio_format_value_integer_64(struct kunit *test)
+{
+ int values[2];
+ s64 value;
+ char *buf;
+ int ret;
+
+ buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+
+ value = 24;
+ values[0] = lower_32_bits(value);
+ values[1] = upper_32_bits(value);
+ ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values);
+ IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "24\n");
+
+ value = -24;
+ values[0] = lower_32_bits(value);
+ values[1] = upper_32_bits(value);
+ ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values);
+ IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-24\n");
+
+ value = 0;
+ values[0] = lower_32_bits(value);
+ values[1] = upper_32_bits(value);
+ ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values);
+ IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0\n");
+
+ value = UINT_MAX;
+ values[0] = lower_32_bits(value);
+ values[1] = upper_32_bits(value);
+ ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values);
+ IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "4294967295\n");
+
+ value = -((s64)UINT_MAX);
+ values[0] = lower_32_bits(value);
+ values[1] = upper_32_bits(value);
+ ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values);
+ IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-4294967295\n");
+
+ value = LLONG_MAX;
+ values[0] = lower_32_bits(value);
+ values[1] = upper_32_bits(value);
+ ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values);
+ IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "9223372036854775807\n");
+
+ value = LLONG_MIN;
+ values[0] = lower_32_bits(value);
+ values[1] = upper_32_bits(value);
+ ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values);
+ IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-9223372036854775808\n");
+}
+
static struct kunit_case iio_format_test_cases[] = {
KUNIT_CASE(iio_test_iio_format_value_integer),
KUNIT_CASE(iio_test_iio_format_value_fixedpoint),
KUNIT_CASE(iio_test_iio_format_value_fractional),
KUNIT_CASE(iio_test_iio_format_value_fractional_log2),
KUNIT_CASE(iio_test_iio_format_value_multiple),
+ KUNIT_CASE(iio_test_iio_format_value_integer_64),
{}
};
diff --git a/drivers/iio/trigger/iio-trig-interrupt.c b/drivers/iio/trigger/iio-trig-interrupt.c
index f746c460bf2a..5f49cd105fae 100644
--- a/drivers/iio/trigger/iio-trig-interrupt.c
+++ b/drivers/iio/trigger/iio-trig-interrupt.c
@@ -25,9 +25,6 @@ static irqreturn_t iio_interrupt_trigger_poll(int irq, void *private)
return IRQ_HANDLED;
}
-static const struct iio_trigger_ops iio_interrupt_trigger_ops = {
-};
-
static int iio_interrupt_trigger_probe(struct platform_device *pdev)
{
struct iio_interrupt_trigger_info *trig_info;
@@ -58,7 +55,6 @@ static int iio_interrupt_trigger_probe(struct platform_device *pdev)
}
iio_trigger_set_drvdata(trig, trig_info);
trig_info->irq = irq;
- trig->ops = &iio_interrupt_trigger_ops;
ret = request_irq(irq, iio_interrupt_trigger_poll,
irqflags, trig->name, trig);
if (ret) {
diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c
index e9adfff45b39..2a4b75897910 100644
--- a/drivers/iio/trigger/iio-trig-sysfs.c
+++ b/drivers/iio/trigger/iio-trig-sysfs.c
@@ -124,9 +124,6 @@ static const struct attribute_group *iio_sysfs_trigger_attr_groups[] = {
NULL
};
-static const struct iio_trigger_ops iio_sysfs_trigger_ops = {
-};
-
static int iio_sysfs_trigger_probe(int id)
{
struct iio_sysfs_trig *t;
@@ -156,7 +153,6 @@ static int iio_sysfs_trigger_probe(int id)
}
t->trig->dev.groups = iio_sysfs_trigger_attr_groups;
- t->trig->ops = &iio_sysfs_trigger_ops;
iio_trigger_set_drvdata(t->trig, t);
t->work = IRQ_WORK_INIT_HARD(iio_sysfs_trigger_work);
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index 4353b749ecef..4f9461e1412c 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -696,9 +696,9 @@ static const struct iio_chan_spec_ext_info stm32_trigger_count_info[] = {
.write = stm32_count_set_preset
},
IIO_ENUM("enable_mode", IIO_SEPARATE, &stm32_enable_mode_enum),
- IIO_ENUM_AVAILABLE("enable_mode", &stm32_enable_mode_enum),
+ IIO_ENUM_AVAILABLE("enable_mode", IIO_SHARED_BY_TYPE, &stm32_enable_mode_enum),
IIO_ENUM("trigger_mode", IIO_SEPARATE, &stm32_trigger_mode_enum),
- IIO_ENUM_AVAILABLE("trigger_mode", &stm32_trigger_mode_enum),
+ IIO_ENUM_AVAILABLE("trigger_mode", IIO_SHARED_BY_TYPE, &stm32_trigger_mode_enum),
{}
};
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 0c98dd3dee67..f6aa1a964573 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -33,6 +33,7 @@
* SOFTWARE.
*/
+#include <linux/if_vlan.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
@@ -955,7 +956,7 @@ int rdma_query_gid(struct ib_device *device, u32 port_num,
{
struct ib_gid_table *table;
unsigned long flags;
- int res = -EINVAL;
+ int res;
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
@@ -963,9 +964,15 @@ int rdma_query_gid(struct ib_device *device, u32 port_num,
table = rdma_gid_table(device, port_num);
read_lock_irqsave(&table->rwlock, flags);
- if (index < 0 || index >= table->sz ||
- !is_gid_entry_valid(table->data_vec[index]))
+ if (index < 0 || index >= table->sz) {
+ res = -EINVAL;
goto done;
+ }
+
+ if (!is_gid_entry_valid(table->data_vec[index])) {
+ res = -ENOENT;
+ goto done;
+ }
memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid));
res = 0;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 835ac54d4a24..27a00ce2e101 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -766,6 +766,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
unsigned int p;
u16 pkey, index;
enum ib_port_state port_state;
+ int ret;
int i;
cma_dev = NULL;
@@ -784,9 +785,14 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
if (ib_get_cached_port_state(cur_dev->device, p, &port_state))
continue;
- for (i = 0; !rdma_query_gid(cur_dev->device,
- p, i, &gid);
- i++) {
+
+ for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len;
+ ++i) {
+ ret = rdma_query_gid(cur_dev->device, p, i,
+ &gid);
+ if (ret)
+ continue;
+
if (!memcmp(&gid, dgid, sizeof(gid))) {
cma_dev = cur_dev;
sgid = gid;
@@ -4033,8 +4039,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv);
- req.private_data_len = offset + conn_param->private_data_len;
- if (req.private_data_len < conn_param->private_data_len)
+ if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len))
return -EINVAL;
if (req.private_data_len) {
@@ -4093,8 +4098,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv);
- req.private_data_len = offset + conn_param->private_data_len;
- if (req.private_data_len < conn_param->private_data_len)
+ if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len))
return -EINVAL;
if (req.private_data_len) {
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 22a4adda7981..a311df07b1bd 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -2461,7 +2461,8 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid,
++i) {
ret = rdma_query_gid(device, port, i, &tmp_gid);
if (ret)
- return ret;
+ continue;
+
if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
*port_num = port;
if (index)
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index a3f84b50c46a..84c53bd2a52d 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -433,6 +433,7 @@ static struct attribute *port_default_attrs[] = {
&ib_port_attr_link_layer.attr,
NULL
};
+ATTRIBUTE_GROUPS(port_default);
static ssize_t print_ndev(const struct ib_gid_attr *gid_attr, char *buf)
{
@@ -774,7 +775,7 @@ static void ib_port_gid_attr_release(struct kobject *kobj)
static struct kobj_type port_type = {
.release = ib_port_release,
.sysfs_ops = &port_sysfs_ops,
- .default_attrs = port_default_attrs
+ .default_groups = port_default_groups,
};
static struct kobj_type gid_attr_type = {
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 7a47343d11f9..aead24c1a682 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -227,7 +227,6 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
const struct mmu_interval_notifier_ops *ops)
{
struct ib_umem_odp *umem_odp;
- struct mm_struct *mm;
int ret;
if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND)))
@@ -241,7 +240,7 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
umem_odp->umem.length = size;
umem_odp->umem.address = addr;
umem_odp->umem.writable = ib_access_writable(access);
- umem_odp->umem.owning_mm = mm = current->mm;
+ umem_odp->umem.owning_mm = current->mm;
umem_odp->notifier.ops = ops;
umem_odp->page_shift = PAGE_SHIFT;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index d1345d76d9b1..6b6393176b3c 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1399,7 +1399,6 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
IB_SIGNAL_REQ_WR;
attr.qp_type = cmd->qp_type;
- attr.create_flags = 0;
attr.cap.max_send_wr = cmd->max_send_wr;
attr.cap.max_recv_wr = cmd->max_recv_wr;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 29cc0d14399a..3224f18a66e5 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -262,13 +262,12 @@ void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
u16 index, u16 *pkey)
{
- struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ if (index > 0)
+ return -EINVAL;
- /* Ignore port_num */
+ *pkey = IB_DEFAULT_PKEY_FULL;
- memset(pkey, 0, sizeof(*pkey));
- return bnxt_qplib_get_pkey(&rdev->qplib_res,
- &rdev->qplib_res.pkey_tbl, index, pkey);
+ return 0;
}
int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index b44944fb9b24..3d6834d3d4fb 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -893,7 +893,6 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq,
qplib_srq);
struct ib_event ib_event;
- int rc = 0;
ib_event.device = &srq->rdev->ibdev;
ib_event.element.srq = &srq->ib_srq;
@@ -907,7 +906,7 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
(*srq->ib_srq.event_handler)(&ib_event,
srq->ib_srq.srq_context);
}
- return rc;
+ return 0;
}
static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index ca88849559bf..96e581ced50e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -46,6 +46,7 @@
#include <linux/delay.h>
#include <linux/prefetch.h>
#include <linux/if_ether.h>
+#include <rdma/ib_mad.h>
#include "roce_hsi.h"
@@ -1232,7 +1233,7 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_modify_qp req;
struct creq_modify_qp_resp resp;
- u16 cmd_flags = 0, pkey;
+ u16 cmd_flags = 0;
u32 temp32[4];
u32 bmask;
int rc;
@@ -1255,11 +1256,9 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
req.access = qp->access;
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
- if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
- qp->pkey_index, &pkey))
- req.pkey = cpu_to_le16(pkey);
- }
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
+ req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
+
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
req.qkey = cpu_to_le32(qp->qkey);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 3de854727460..061b2895dd9b 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -555,7 +555,7 @@ skip_ctx_setup:
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
{
- kfree(rcfw->cmdq.cmdq_bitmap);
+ bitmap_free(rcfw->cmdq.cmdq_bitmap);
kfree(rcfw->qp_tbl);
kfree(rcfw->crsqe_tbl);
bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq);
@@ -572,7 +572,6 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
struct bnxt_qplib_sg_info sginfo = {};
struct bnxt_qplib_cmdq_ctx *cmdq;
struct bnxt_qplib_creq_ctx *creq;
- u32 bmap_size = 0;
rcfw->pdev = res->pdev;
cmdq = &rcfw->cmdq;
@@ -613,13 +612,10 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
if (!rcfw->crsqe_tbl)
goto fail;
- bmap_size = BITS_TO_LONGS(rcfw->cmdq_depth) * sizeof(unsigned long);
- cmdq->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
+ cmdq->cmdq_bitmap = bitmap_zalloc(rcfw->cmdq_depth, GFP_KERNEL);
if (!cmdq->cmdq_bitmap)
goto fail;
- cmdq->bmap_size = bmap_size;
-
/* Allocate one extra to hold the QP1 entries */
rcfw->qp_tbl_size = qp_tbl_sz + 1;
rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node),
@@ -667,8 +663,8 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
iounmap(cmdq->cmdq_mbox.reg.bar_reg);
iounmap(creq->creq_db.reg.bar_reg);
- indx = find_first_bit(cmdq->cmdq_bitmap, cmdq->bmap_size);
- if (indx != cmdq->bmap_size)
+ indx = find_first_bit(cmdq->cmdq_bitmap, rcfw->cmdq_depth);
+ if (indx != rcfw->cmdq_depth)
dev_err(&rcfw->pdev->dev,
"disabling RCFW with pending cmd-bit %lx\n", indx);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 82faa4e4cda8..0a3d8e7da3d4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -152,7 +152,6 @@ struct bnxt_qplib_cmdq_ctx {
wait_queue_head_t waitq;
unsigned long flags;
unsigned long *cmdq_bitmap;
- u32 bmap_size;
u32 seq_num;
};
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index bc1ba4b51ba4..126d4f26f75a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -649,31 +649,6 @@ static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
}
-static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl)
-{
- if (!pkey_tbl->tbl)
- dev_dbg(&res->pdev->dev, "PKEY tbl not present\n");
- else
- kfree(pkey_tbl->tbl);
-
- pkey_tbl->tbl = NULL;
- pkey_tbl->max = 0;
- pkey_tbl->active = 0;
-}
-
-static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl,
- u16 max)
-{
- pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL);
- if (!pkey_tbl->tbl)
- return -ENOMEM;
-
- pkey_tbl->max = max;
- return 0;
-};
-
/* PDs */
int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
{
@@ -843,24 +818,6 @@ unmap_io:
return -ENOMEM;
}
-/* PKEYs */
-static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl)
-{
- memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
- pkey_tbl->active = 0;
-}
-
-static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl)
-{
- u16 pkey = 0xFFFF;
-
- memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
-
- /* pkey default = 0xFFFF */
- bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false);
-}
-
/* Stats */
static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
struct bnxt_qplib_stats *stats)
@@ -891,21 +848,18 @@ static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
{
- bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl);
bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
}
int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
{
bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
- bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl);
return 0;
}
void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
{
- bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl);
bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
bnxt_qplib_free_pd_tbl(&res->pd_tbl);
bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
@@ -924,10 +878,6 @@ int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
if (rc)
goto fail;
- rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey);
- if (rc)
- goto fail;
-
rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
if (rc)
goto fail;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index e1411a2352a7..982e2c96dac2 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -185,12 +185,6 @@ struct bnxt_qplib_sgid_tbl {
u8 *vlan;
};
-struct bnxt_qplib_pkey_tbl {
- u16 *tbl;
- u16 max;
- u16 active;
-};
-
struct bnxt_qplib_dpi {
u32 dpi;
void __iomem *dbr;
@@ -258,7 +252,6 @@ struct bnxt_qplib_res {
struct bnxt_qplib_rcfw *rcfw;
struct bnxt_qplib_pd_tbl pd_tbl;
struct bnxt_qplib_sgid_tbl sgid_tbl;
- struct bnxt_qplib_pkey_tbl pkey_tbl;
struct bnxt_qplib_dpi_tbl dpi_tbl;
bool prio;
bool is_vf;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 379e715ebd30..b802981b7171 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -146,17 +146,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_srq = le16_to_cpu(sb->max_srq);
attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1;
attr->max_srq_sges = sb->max_srq_sge;
- attr->max_pkey = le32_to_cpu(sb->max_pkeys);
- /*
- * Some versions of FW reports more than 0xFFFF.
- * Restrict it for now to 0xFFFF to avoid
- * reporting trucated value
- */
- if (attr->max_pkey > 0xFFFF) {
- /* ib_port_attr::pkey_tbl_len is u16 */
- attr->max_pkey = 0xFFFF;
- }
-
+ attr->max_pkey = 1;
attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
attr->l2_db_size = (sb->l2_db_space_size + 1) *
(0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
@@ -414,93 +404,6 @@ int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
return rc;
}
-/* pkeys */
-int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index,
- u16 *pkey)
-{
- if (index == 0xFFFF) {
- *pkey = 0xFFFF;
- return 0;
- }
- if (index >= pkey_tbl->max) {
- dev_err(&res->pdev->dev,
- "Index %d exceeded PKEY table max (%d)\n",
- index, pkey_tbl->max);
- return -EINVAL;
- }
- memcpy(pkey, &pkey_tbl->tbl[index], sizeof(*pkey));
- return 0;
-}
-
-int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
- bool update)
-{
- int i, rc = 0;
-
- if (!pkey_tbl) {
- dev_err(&res->pdev->dev, "PKEY table not allocated\n");
- return -EINVAL;
- }
-
- /* Do we need a pkey_lock here? */
- if (!pkey_tbl->active) {
- dev_err(&res->pdev->dev, "PKEY table has no active entries\n");
- return -ENOMEM;
- }
- for (i = 0; i < pkey_tbl->max; i++) {
- if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey)))
- break;
- }
- if (i == pkey_tbl->max) {
- dev_err(&res->pdev->dev,
- "PKEY 0x%04x not found in the pkey table\n", *pkey);
- return -ENOMEM;
- }
- memset(&pkey_tbl->tbl[i], 0, sizeof(*pkey));
- pkey_tbl->active--;
-
- /* unlock */
- return rc;
-}
-
-int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
- bool update)
-{
- int i, free_idx, rc = 0;
-
- if (!pkey_tbl) {
- dev_err(&res->pdev->dev, "PKEY table not allocated\n");
- return -EINVAL;
- }
-
- /* Do we need a pkey_lock here? */
- if (pkey_tbl->active == pkey_tbl->max) {
- dev_err(&res->pdev->dev, "PKEY table is full\n");
- return -ENOMEM;
- }
- free_idx = pkey_tbl->max;
- for (i = 0; i < pkey_tbl->max; i++) {
- if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey)))
- return -EALREADY;
- else if (!pkey_tbl->tbl[i] && free_idx == pkey_tbl->max)
- free_idx = i;
- }
- if (free_idx == pkey_tbl->max) {
- dev_err(&res->pdev->dev,
- "PKEY table is FULL but count is not MAX??\n");
- return -ENOMEM;
- }
- /* Add PKEY to the pkey_tbl */
- memcpy(&pkey_tbl->tbl[free_idx], pkey, sizeof(*pkey));
- pkey_tbl->active++;
-
- /* unlock */
- return rc;
-}
-
/* AH */
int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
bool block)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index a18f568cb23e..5939e8fc8353 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -255,15 +255,6 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 gid_idx,
const u8 *smac);
-int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index,
- u16 *pkey);
-int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
- bool update);
-int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
- bool update);
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_dev_attr *attr, bool vf);
int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 913f39ee4416..c16017f6e8db 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -2471,7 +2471,8 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
skb_get(skb);
rpl = cplhdr(skb);
if (!is_t4(adapter_type)) {
- skb_trim(skb, roundup(sizeof(*rpl5), 16));
+ BUILD_BUG_ON(sizeof(*rpl5) != roundup(sizeof(*rpl5), 16));
+ skb_trim(skb, sizeof(*rpl5));
rpl5 = (void *)rpl;
INIT_TP_WR(rpl5, ep->hwtid);
} else {
@@ -2487,7 +2488,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
opt2 |= T5_ISS_F;
rpl5 = (void *)rpl;
- memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
+ memset_after(rpl5, 0, iss);
if (peer2peer)
isn += 4;
rpl5->iss = cpu_to_be32(isn);
diff --git a/drivers/infiniband/hw/cxgb4/id_table.c b/drivers/infiniband/hw/cxgb4/id_table.c
index 724d23297b35..f64e7e02b129 100644
--- a/drivers/infiniband/hw/cxgb4/id_table.c
+++ b/drivers/infiniband/hw/cxgb4/id_table.c
@@ -59,7 +59,7 @@ u32 c4iw_id_alloc(struct c4iw_id_table *alloc)
alloc->last = obj + 1;
if (alloc->last >= alloc->max)
alloc->last = 0;
- set_bit(obj, alloc->table);
+ __set_bit(obj, alloc->table);
obj += alloc->start;
} else
obj = -1;
@@ -75,37 +75,32 @@ void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj)
obj -= alloc->start;
spin_lock_irqsave(&alloc->lock, flags);
- clear_bit(obj, alloc->table);
+ __clear_bit(obj, alloc->table);
spin_unlock_irqrestore(&alloc->lock, flags);
}
int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
u32 reserved, u32 flags)
{
- int i;
-
alloc->start = start;
alloc->flags = flags;
if (flags & C4IW_ID_TABLE_F_RANDOM)
alloc->last = prandom_u32() % RANDOM_SKIP;
else
alloc->last = 0;
- alloc->max = num;
+ alloc->max = num;
spin_lock_init(&alloc->lock);
- alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long),
- GFP_KERNEL);
+ alloc->table = bitmap_zalloc(num, GFP_KERNEL);
if (!alloc->table)
return -ENOMEM;
- bitmap_zero(alloc->table, num);
if (!(alloc->flags & C4IW_ID_TABLE_F_EMPTY))
- for (i = 0; i < reserved; ++i)
- set_bit(i, alloc->table);
+ bitmap_set(alloc->table, 0, reserved);
return 0;
}
void c4iw_id_table_free(struct c4iw_id_table *alloc)
{
- kfree(alloc->table);
+ bitmap_free(alloc->table);
}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 0c8fd5a85fcb..89f36a3a9af0 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -41,6 +41,7 @@
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/inetdevice.h>
+#include <net/addrconf.h>
#include <linux/io.h>
#include <asm/irq.h>
@@ -264,7 +265,8 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
return -EINVAL;
dev = to_c4iw_dev(ibdev);
- memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
+ addrconf_addr_eui48((u8 *)&props->sys_image_guid,
+ dev->rdev.lldi.ports[0]->dev_addr);
props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type);
props->fw_ver = dev->rdev.lldi.fw_vers;
props->device_cap_flags = dev->device_cap_flags;
@@ -525,8 +527,8 @@ void c4iw_register_device(struct work_struct *work)
struct c4iw_dev *dev = ctx->dev;
pr_debug("c4iw_dev %p\n", dev);
- memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
- memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
+ addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid,
+ dev->rdev.lldi.ports[0]->dev_addr);
dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
if (fastreg_support)
dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index d20b4ef2c853..ffbd9a89981e 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2460,6 +2460,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
memset(attr, 0, sizeof(*attr));
memset(init_attr, 0, sizeof(*init_attr));
attr->qp_state = to_ib_qp_state(qhp->attr.state);
+ attr->cur_qp_state = to_ib_qp_state(qhp->attr.state);
init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 5b11c8282744..a71c5a36ceba 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -161,9 +161,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
if (!pq->reqs)
goto pq_reqs_nomem;
- pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size),
- sizeof(*pq->req_in_use),
- GFP_KERNEL);
+ pq->req_in_use = bitmap_zalloc(hfi1_sdma_comp_ring_size, GFP_KERNEL);
if (!pq->req_in_use)
goto pq_reqs_no_in_use;
@@ -210,7 +208,7 @@ cq_comps_nomem:
cq_nomem:
kmem_cache_destroy(pq->txreq_cache);
pq_txreq_nomem:
- kfree(pq->req_in_use);
+ bitmap_free(pq->req_in_use);
pq_reqs_no_in_use:
kfree(pq->reqs);
pq_reqs_nomem:
@@ -257,7 +255,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
pq->wait,
!atomic_read(&pq->n_reqs));
kfree(pq->reqs);
- kfree(pq->req_in_use);
+ bitmap_free(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache);
flush_pq_iowait(pq);
kfree(pq);
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
index 18d10ebf900b..ab3fbba70789 100644
--- a/drivers/infiniband/hw/hns/Kconfig
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -5,22 +5,9 @@ config INFINIBAND_HNS
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on (HNS_DSAF && HNS_ENET) || HNS3
help
- This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
- is used in Hisilicon Hip06 and more further ICT SoC based on
- platform device.
+ This is a RoCE/RDMA driver for the Hisilicon RoCE engine.
- To compile HIP06 or HIP08 driver as module, choose M here.
-
-config INFINIBAND_HNS_HIP06
- bool "Hisilicon Hip06 Family RoCE support"
- depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
- depends on INFINIBAND_HNS=m || (HNS_DSAF=y && HNS_ENET=y)
- help
- RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
- Hip07 SoC. These RoCE engines are platform devices.
-
- To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
- module will be called hns-roce-hw-v1
+ To compile HIP08 driver as module, choose M here.
config INFINIBAND_HNS_HIP08
bool "Hisilicon Hip08 Family RoCE support"
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index e105945b94a1..9f04f25d9631 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -9,11 +9,6 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
-ifdef CONFIG_INFINIBAND_HNS_HIP06
-hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o
-endif
-
ifdef CONFIG_INFINIBAND_HNS_HIP08
hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs)
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index cc258edec331..492b122d0521 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/platform_device.h>
#include <linux/pci.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
@@ -61,7 +60,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct hns_roce_ah *ah = to_hr_ah(ibah);
int ret = 0;
- if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 && udata)
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata)
return -EOPNOTSUPP;
ah->av.port = rdma_ah_get_port_num(ah_attr);
@@ -80,7 +79,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
/* HIP08 needs to record vlan info in Address Vector */
- if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08) {
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
ret = rdma_read_gid_l2_fields(ah_attr->grh.sgid_attr,
&ah->av.vlan_id, NULL);
if (ret)
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index d4fa0fd52294..11a78ceae568 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -31,10 +31,9 @@
* SOFTWARE.
*/
-#include <linux/platform_device.h>
#include <linux/vmalloc.h>
-#include "hns_roce_device.h"
#include <rdma/ib_umem.h>
+#include "hns_roce_device.h"
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf)
{
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index 84f3f2b5f097..4b693d542ace 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -31,7 +31,6 @@
*/
#include <linux/dmapool.h>
-#include <linux/platform_device.h>
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
@@ -61,7 +60,7 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
CMD_POLL_TOKEN, 0);
if (ret) {
dev_err_ratelimited(hr_dev->dev,
- "failed to post mailbox %x in poll mode, ret = %d.\n",
+ "failed to post mailbox 0x%x in poll mode, ret = %d.\n",
op, ret);
return ret;
}
@@ -91,7 +90,7 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
if (unlikely(token != context->token)) {
dev_err_ratelimited(hr_dev->dev,
- "[cmd] invalid ae token %x,context token is %x!\n",
+ "[cmd] invalid ae token 0x%x, context token is 0x%x.\n",
token, context->token);
return;
}
@@ -130,14 +129,14 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
context->token, 1);
if (ret) {
dev_err_ratelimited(dev,
- "failed to post mailbox %x in event mode, ret = %d.\n",
+ "failed to post mailbox 0x%x in event mode, ret = %d.\n",
op, ret);
goto out;
}
if (!wait_for_completion_timeout(&context->done,
msecs_to_jiffies(timeout))) {
- dev_err_ratelimited(dev, "[cmd] token %x mailbox %x timeout.\n",
+ dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x timeout.\n",
context->token, op);
ret = -EBUSY;
goto out;
@@ -145,7 +144,7 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
ret = context->result;
if (ret)
- dev_err_ratelimited(dev, "[cmd] token %x mailbox %x error %d\n",
+ dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x error %d.\n",
context->token, op, ret);
out:
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index b73e55de83ac..465d1f914b6c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -104,208 +104,6 @@
#define hr_reg_read(ptr, field) _hr_reg_read(ptr, field)
-#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3
-#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4
-
-#define ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S 5
-
-#define ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S 6
-
-#define ROCEE_GLB_CFG_ROCEE_PORT_ST_S 10
-#define ROCEE_GLB_CFG_ROCEE_PORT_ST_M \
- (((1UL << 6) - 1) << ROCEE_GLB_CFG_ROCEE_PORT_ST_S)
-
-#define ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S 16
-
-#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S 0
-#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M \
- (((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S)
-
-#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S 24
-#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M \
- (((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S)
-
-#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S 0
-#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M \
- (((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S)
-
-#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S 24
-#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M \
- (((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S)
-
-#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S 0
-#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M \
- (((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S)
-
-#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S 16
-#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M \
- (((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S)
-
-#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S 0
-#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M \
- (((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S)
-
-#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S 16
-#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M \
- (((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S)
-
-#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_S 0
-#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_M \
- (((1UL << 8) - 1) << ROCEE_RAQ_WL_ROCEE_RAQ_WL_S)
-
-#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S 0
-#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M \
- (((1UL << 15) - 1) << \
- ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S)
-
-#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S 16
-#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M \
- (((1UL << 4) - 1) << \
- ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S)
-
-#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S 20
-
-#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE 21
-
-#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S 0
-#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M \
- (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S)
-
-#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S 5
-#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M \
- (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S)
-
-#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S 0
-#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M \
- (((1UL << 5) - 1) << ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S)
-
-#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S 5
-#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M \
- (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S)
-
-#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S 0
-#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M \
- (((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S)
-
-#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S 8
-#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M \
- (((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S)
-
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S 0
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M \
- (((1UL << 19) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S)
-
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_S 19
-
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S 20
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M \
- (((1UL << 2) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S)
-
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S 22
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M \
- (((1UL << 5) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S)
-
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S 31
-
-#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S 0
-#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M \
- (((1UL << 3) - 1) << ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S)
-
-#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S 0
-#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M \
- (((1UL << 15) - 1) << ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S)
-
-#define ROCEE_MB6_ROCEE_MB_CMD_S 0
-#define ROCEE_MB6_ROCEE_MB_CMD_M \
- (((1UL << 8) - 1) << ROCEE_MB6_ROCEE_MB_CMD_S)
-
-#define ROCEE_MB6_ROCEE_MB_CMD_MDF_S 8
-#define ROCEE_MB6_ROCEE_MB_CMD_MDF_M \
- (((1UL << 4) - 1) << ROCEE_MB6_ROCEE_MB_CMD_MDF_S)
-
-#define ROCEE_MB6_ROCEE_MB_EVENT_S 14
-
-#define ROCEE_MB6_ROCEE_MB_HW_RUN_S 15
-
-#define ROCEE_MB6_ROCEE_MB_TOKEN_S 16
-#define ROCEE_MB6_ROCEE_MB_TOKEN_M \
- (((1UL << 16) - 1) << ROCEE_MB6_ROCEE_MB_TOKEN_S)
-
-#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S 0
-#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M \
- (((1UL << 24) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S)
-
-#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S 24
-#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M \
- (((1UL << 4) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S)
-
-#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S 28
-#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M \
- (((1UL << 3) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S)
-
-#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S 31
-
-#define ROCEE_SMAC_H_ROCEE_SMAC_H_S 0
-#define ROCEE_SMAC_H_ROCEE_SMAC_H_M \
- (((1UL << 16) - 1) << ROCEE_SMAC_H_ROCEE_SMAC_H_S)
-
-#define ROCEE_SMAC_H_ROCEE_PORT_MTU_S 16
-#define ROCEE_SMAC_H_ROCEE_PORT_MTU_M \
- (((1UL << 4) - 1) << ROCEE_SMAC_H_ROCEE_PORT_MTU_S)
-
-#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S 0
-#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M \
- (((1UL << 2) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S)
-
-#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S 8
-#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M \
- (((1UL << 4) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S)
-
-#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S 17
-
-#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S 0
-#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M \
- (((1UL << 5) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S)
-
-#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S 16
-#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M \
- (((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S)
-
-#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S 0
-#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M \
- (((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S)
-
-#define ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S 16
-#define ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S 1
-#define ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S 0
-
-#define ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S 0
-#define ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S 1
-
-#define ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S 0
-
-#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S 0
-#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M \
- (((1UL << 28) - 1) << ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S)
-
-#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S 0
-#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M \
- (((1UL << 28) - 1) << ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)
-
-#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_S 0
-#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_M \
- (((1UL << 16) - 1) << ROCEE_SDB_INV_CNT_SDB_INV_CNT_S)
-
-#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S 0
-#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M \
- (((1UL << 16) - 1) << ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S)
-
-#define ROCEE_SDB_CNT_CMP_BITS 16
-
-#define ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S 20
-
-#define ROCEE_CNT_CLR_CE_CNT_CLR_CE_S 0
-
/*************ROCEE_REG DEFINITION****************/
#define ROCEE_VENDOR_ID_REG 0x0
#define ROCEE_VENDOR_PART_ID_REG 0x4
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index d763f097599f..55057dcbb2dc 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/platform_device.h>
#include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
#include "hns_roce_device.h"
@@ -406,15 +405,6 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
goto err_cqn;
}
- /*
- * For the QP created by kernel space, tptr value should be initialized
- * to zero; For the QP created by user space, it will cause synchronous
- * problems if tptr is set to zero here, so we initialize it in user
- * space.
- */
- if (!udata && hr_cq->tptr_addr)
- *hr_cq->tptr_addr = 0;
-
if (udata) {
resp.cqn = hr_cq->cqn;
ret = ib_copy_to_udata(udata, &resp,
@@ -441,9 +431,6 @@ int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
- if (hr_dev->hw->destroy_cq)
- hr_dev->hw->destroy_cq(ib_cq, udata);
-
free_cqc(hr_dev, hr_cq);
free_cqn(hr_dev, hr_cq->cqn);
free_cq_db(hr_dev, hr_cq, udata);
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
index 751470c7a2ce..5c4c0480832b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_db.c
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -4,7 +4,6 @@
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*/
-#include <linux/platform_device.h>
#include <rdma/ib_umem.h>
#include "hns_roce_device.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 43e17d61cb63..1e0bae136997 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -36,36 +36,18 @@
#include <rdma/ib_verbs.h>
#include <rdma/hns-abi.h>
-#define DRV_NAME "hns_roce"
-
#define PCI_REVISION_ID_HIP08 0x21
#define PCI_REVISION_ID_HIP09 0x30
-#define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6')
-
#define HNS_ROCE_MAX_MSG_LEN 0x80000000
#define HNS_ROCE_IB_MIN_SQ_STRIDE 6
#define BA_BYTE_LEN 8
-/* Hardware specification only for v1 engine */
#define HNS_ROCE_MIN_CQE_NUM 0x40
-#define HNS_ROCE_MIN_WQE_NUM 0x20
#define HNS_ROCE_MIN_SRQ_WQE_NUM 1
-/* Hardware specification only for v1 engine */
-#define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7
-#define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000
-
-#define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20
-#define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \
- (5000 / HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS)
-#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
-#define HNS_ROCE_MIN_CQE_CNT 16
-
-#define HNS_ROCE_RESERVED_SGE 1
-
#define HNS_ROCE_MAX_IRQ_NUM 128
#define HNS_ROCE_SGE_IN_WQE 2
@@ -102,18 +84,12 @@
#define HNS_ROCE_FRMR_MAX_PA 512
#define PKEY_ID 0xffff
-#define GUID_LEN 8
#define NODE_DESC_SIZE 64
#define DB_REG_OFFSET 0x1000
/* Configure to HW for PAGE_SIZE larger than 4KB */
#define PG_SHIFT_OFFSET (PAGE_SHIFT - 12)
-#define PAGES_SHIFT_8 8
-#define PAGES_SHIFT_16 16
-#define PAGES_SHIFT_24 24
-#define PAGES_SHIFT_32 32
-
#define HNS_ROCE_IDX_QUE_ENTRY_SZ 4
#define SRQ_DB_REG 0x230
@@ -122,11 +98,6 @@
#define CQ_BANKID_SHIFT 2
-/* The chip implementation of the consumer index is calculated
- * according to twice the actual EQ depth
- */
-#define EQ_DEPTH_COEFF 2
-
enum {
SERV_TYPE_RC,
SERV_TYPE_UC,
@@ -182,6 +153,7 @@ enum {
HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9),
HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
+ HNS_ROCE_CAP_FLAG_DIRECT_WQE = BIT(12),
HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14),
HNS_ROCE_CAP_FLAG_STASH = BIT(17),
};
@@ -227,7 +199,7 @@ struct hns_roce_uar {
enum hns_roce_mmap_type {
HNS_ROCE_MMAP_TYPE_DB = 1,
- HNS_ROCE_MMAP_TYPE_TPTR,
+ HNS_ROCE_MMAP_TYPE_DWQE,
};
struct hns_user_mmap_entry {
@@ -242,7 +214,6 @@ struct hns_roce_ucontext {
struct list_head page_list;
struct mutex page_mutex;
struct hns_user_mmap_entry *db_mmap_entry;
- struct hns_user_mmap_entry *tptr_mmap_entry;
};
struct hns_roce_pd {
@@ -345,19 +316,16 @@ struct hns_roce_mw {
u32 pbl_buf_pg_sz;
};
-/* Only support 4K page size for mr register */
-#define MR_SIZE_4K 0
-
struct hns_roce_mr {
struct ib_mr ibmr;
u64 iova; /* MR's virtual original addr */
u64 size; /* Address range of MR */
u32 key; /* Key of MR */
u32 pd; /* PD num of MR */
- u32 access; /* Access permission of MR */
+ u32 access; /* Access permission of MR */
int enabled; /* MR's active status */
- int type; /* MR's register type */
- u32 pbl_hop_num; /* multi-hop number */
+ int type; /* MR's register type */
+ u32 pbl_hop_num; /* multi-hop number */
struct hns_roce_mtr pbl_mtr;
u32 npages;
dma_addr_t *page_list;
@@ -374,17 +342,17 @@ struct hns_roce_wq {
u32 wqe_cnt; /* WQE num */
u32 max_gs;
u32 rsv_sge;
- int offset;
- int wqe_shift; /* WQE size */
+ u32 offset;
+ u32 wqe_shift; /* WQE size */
u32 head;
u32 tail;
void __iomem *db_reg;
};
struct hns_roce_sge {
- unsigned int sge_cnt; /* SGE num */
- int offset;
- int sge_shift; /* SGE size */
+ unsigned int sge_cnt; /* SGE num */
+ u32 offset;
+ u32 sge_shift; /* SGE size */
};
struct hns_roce_buf_list {
@@ -453,7 +421,6 @@ struct hns_roce_cq {
u32 cons_index;
u32 *set_ci_db;
void __iomem *db_reg;
- u16 *tptr_addr;
int arm_sn;
int cqe_size;
unsigned long cqn;
@@ -468,7 +435,7 @@ struct hns_roce_cq {
struct hns_roce_idx_que {
struct hns_roce_mtr mtr;
- int entry_shift;
+ u32 entry_shift;
unsigned long *bitmap;
u32 head;
u32 tail;
@@ -480,7 +447,7 @@ struct hns_roce_srq {
u32 wqe_cnt;
int max_gs;
u32 rsv_sge;
- int wqe_shift;
+ u32 wqe_shift;
u32 cqn;
u32 xrcdn;
void __iomem *db_reg;
@@ -539,10 +506,6 @@ struct hns_roce_srq_table {
struct hns_roce_hem_table table;
};
-struct hns_roce_raq_table {
- struct hns_roce_buf_list *e_raq_buf;
-};
-
struct hns_roce_av {
u8 port;
u8 gid_index;
@@ -627,10 +590,6 @@ struct hns_roce_work {
u32 queue_num;
};
-enum {
- HNS_ROCE_QP_CAP_DIRECT_WQE = BIT(5),
-};
-
struct hns_roce_qp {
struct ib_qp ibqp;
struct hns_roce_wq rq;
@@ -650,9 +609,7 @@ struct hns_roce_qp {
u8 sl;
u8 resp_depth;
u8 state;
- u32 access_flags;
u32 atomic_rd_en;
- u32 pkey_index;
u32 qkey;
void (*event)(struct hns_roce_qp *qp,
enum hns_roce_event event_type);
@@ -672,9 +629,10 @@ struct hns_roce_qp {
unsigned long flush_flag;
struct hns_roce_work flush_work;
struct hns_roce_rinl_buf rq_inl_buf;
- struct list_head node; /* all qps are on a list */
- struct list_head rq_node; /* all recv qps are on a list */
- struct list_head sq_node; /* all send qps are on a list */
+ struct list_head node; /* all qps are on a list */
+ struct list_head rq_node; /* all recv qps are on a list */
+ struct list_head sq_node; /* all send qps are on a list */
+ struct hns_user_mmap_entry *dwqe_mmap_entry;
};
struct hns_roce_ib_iboe {
@@ -684,11 +642,6 @@ struct hns_roce_ib_iboe {
u8 phy_port[HNS_ROCE_MAX_PORTS];
};
-enum {
- HNS_ROCE_EQ_STAT_INVALID = 0,
- HNS_ROCE_EQ_STAT_VALID = 2,
-};
-
struct hns_roce_ceqe {
__le32 comp;
__le32 rsv[15];
@@ -720,12 +673,9 @@ struct hns_roce_eq {
int type_flag; /* Aeq:1 ceq:0 */
int eqn;
u32 entries;
- u32 log_entries;
int eqe_size;
int irq;
- int log_page_size;
u32 cons_index;
- struct hns_roce_buf_list *buf_list;
int over_ignore;
int coalesce;
int arm_st;
@@ -740,7 +690,6 @@ struct hns_roce_eq {
struct hns_roce_eq_table {
struct hns_roce_eq *eq;
- void __iomem **eqc_base; /* only for hw v1 */
};
enum cong_type {
@@ -767,7 +716,7 @@ struct hns_roce_caps {
u32 reserved_qps;
int num_qpc_timer;
int num_cqc_timer;
- int num_srqs;
+ u32 num_srqs;
u32 max_wqes;
u32 max_srq_wrs;
u32 max_srq_sges;
@@ -781,7 +730,7 @@ struct hns_roce_caps {
u32 min_cqes;
u32 min_wqes;
u32 reserved_cqs;
- int reserved_srqs;
+ u32 reserved_srqs;
int num_aeq_vectors;
int num_comp_vectors;
int num_other_vectors;
@@ -855,7 +804,7 @@ struct hns_roce_caps {
u32 cqc_timer_ba_pg_sz;
u32 cqc_timer_buf_pg_sz;
u32 cqc_timer_hop_num;
- u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */
+ u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */
u32 cqe_buf_pg_sz;
u32 cqe_hop_num;
u32 srqwqe_ba_pg_sz;
@@ -874,7 +823,7 @@ struct hns_roce_caps {
u32 gmv_hop_num;
u32 sl_num;
u32 llm_buf_pg_sz;
- u32 chunk_sz; /* chunk size in non multihop mode */
+ u32 chunk_sz; /* chunk size in non multihop mode */
u64 flags;
u16 default_ceq_max_cnt;
u16 default_ceq_period;
@@ -897,7 +846,6 @@ enum hns_roce_device_state {
};
struct hns_roce_hw {
- int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
int (*cmq_init)(struct hns_roce_dev *hr_dev);
void (*cmq_exit)(struct hns_roce_dev *hr_dev);
int (*hw_profile)(struct hns_roce_dev *hr_dev);
@@ -909,14 +857,12 @@ struct hns_roce_hw {
int (*poll_mbox_done)(struct hns_roce_dev *hr_dev,
unsigned int timeout);
bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy);
- int (*set_gid)(struct hns_roce_dev *hr_dev, u32 port, int gid_index,
+ int (*set_gid)(struct hns_roce_dev *hr_dev, int gid_index,
const union ib_gid *gid, const struct ib_gid_attr *attr);
int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port,
const u8 *addr);
- void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
- enum ib_mtu mtu);
int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
- struct hns_roce_mr *mr, unsigned long mtpt_idx);
+ struct hns_roce_mr *mr);
int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr, int flags,
void *mb_buf);
@@ -936,9 +882,6 @@ struct hns_roce_hw {
enum ib_qp_state new_state);
int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp);
- int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
- struct ib_udata *udata);
- int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
int (*init_eq)(struct hns_roce_dev *hr_dev);
void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
@@ -948,13 +891,11 @@ struct hns_roce_hw {
struct hns_roce_dev {
struct ib_device ib_dev;
- struct platform_device *pdev;
struct pci_dev *pci_dev;
struct device *dev;
struct hns_roce_uar priv_uar;
const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
spinlock_t sm_lock;
- spinlock_t bt_cmd_lock;
bool active;
bool is_reset;
bool dis_db;
@@ -1001,8 +942,6 @@ struct hns_roce_dev {
int loop_idc;
u32 sdb_offset;
u32 odb_offset;
- dma_addr_t tptr_dma_addr; /* only for hw v1 */
- u32 tptr_size; /* only for hw v1 */
const struct hns_roce_hw *hw;
void *priv;
struct workqueue_struct *irq_workq;
@@ -1010,6 +949,7 @@ struct hns_roce_dev {
u32 func_num;
u32 is_vf;
u32 cong_algo_tmpl_id;
+ u64 dwqe_page;
};
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
@@ -1158,7 +1098,7 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
/* hns roce hw need current block and next block addr from mtt */
#define MTT_MIN_COUNT 2
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
+ u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct hns_roce_buf_attr *buf_attr,
unsigned int page_shift, struct ib_udata *udata,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index fa15d79eabb3..8917365cc6b8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/platform_device.h>
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
#include "hns_roce_common.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
deleted file mode 100644
index f4af3992ba95..000000000000
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ /dev/null
@@ -1,4675 +0,0 @@
-/*
- * Copyright (c) 2016 Hisilicon Limited.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/platform_device.h>
-#include <linux/acpi.h>
-#include <linux/etherdevice.h>
-#include <linux/interrupt.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <rdma/ib_umem.h>
-#include "hns_roce_common.h"
-#include "hns_roce_device.h"
-#include "hns_roce_cmd.h"
-#include "hns_roce_hem.h"
-#include "hns_roce_hw_v1.h"
-
-/**
- * hns_get_gid_index - Get gid index.
- * @hr_dev: pointer to structure hns_roce_dev.
- * @port: port, value range: 0 ~ MAX
- * @gid_index: gid_index, value range: 0 ~ MAX
- * Description:
- * N ports shared gids, allocation method as follow:
- * GID[0][0], GID[1][0],.....GID[N - 1][0],
- * GID[0][0], GID[1][0],.....GID[N - 1][0],
- * And so on
- */
-u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index)
-{
- return gid_index * hr_dev->caps.num_ports + port;
-}
-
-static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
-{
- dseg->lkey = cpu_to_le32(sg->lkey);
- dseg->addr = cpu_to_le64(sg->addr);
- dseg->len = cpu_to_le32(sg->length);
-}
-
-static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
- u32 rkey)
-{
- rseg->raddr = cpu_to_le64(remote_addr);
- rseg->rkey = cpu_to_le32(rkey);
- rseg->len = 0;
-}
-
-static int hns_roce_v1_post_send(struct ib_qp *ibqp,
- const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
- struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL;
- struct hns_roce_wqe_ctrl_seg *ctrl = NULL;
- struct hns_roce_wqe_data_seg *dseg = NULL;
- struct hns_roce_qp *qp = to_hr_qp(ibqp);
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_sq_db sq_db = {};
- int ps_opcode, i;
- unsigned long flags = 0;
- void *wqe = NULL;
- __le32 doorbell[2];
- const u8 *smac;
- int ret = 0;
- int loopback;
- u32 wqe_idx;
- int nreq;
-
- if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
- ibqp->qp_type != IB_QPT_RC)) {
- dev_err(dev, "un-supported QP type\n");
- *bad_wr = NULL;
- return -EOPNOTSUPP;
- }
-
- spin_lock_irqsave(&qp->sq.lock, flags);
-
- for (nreq = 0; wr; ++nreq, wr = wr->next) {
- if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
- ret = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
-
- wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
-
- if (unlikely(wr->num_sge > qp->sq.max_gs)) {
- dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
- wr->num_sge, qp->sq.max_gs);
- ret = -EINVAL;
- *bad_wr = wr;
- goto out;
- }
-
- wqe = hns_roce_get_send_wqe(qp, wqe_idx);
- qp->sq.wrid[wqe_idx] = wr->wr_id;
-
- /* Corresponding to the RC and RD type wqe process separately */
- if (ibqp->qp_type == IB_QPT_GSI) {
- ud_sq_wqe = wqe;
- roce_set_field(ud_sq_wqe->dmac_h,
- UD_SEND_WQE_U32_4_DMAC_0_M,
- UD_SEND_WQE_U32_4_DMAC_0_S,
- ah->av.mac[0]);
- roce_set_field(ud_sq_wqe->dmac_h,
- UD_SEND_WQE_U32_4_DMAC_1_M,
- UD_SEND_WQE_U32_4_DMAC_1_S,
- ah->av.mac[1]);
- roce_set_field(ud_sq_wqe->dmac_h,
- UD_SEND_WQE_U32_4_DMAC_2_M,
- UD_SEND_WQE_U32_4_DMAC_2_S,
- ah->av.mac[2]);
- roce_set_field(ud_sq_wqe->dmac_h,
- UD_SEND_WQE_U32_4_DMAC_3_M,
- UD_SEND_WQE_U32_4_DMAC_3_S,
- ah->av.mac[3]);
-
- roce_set_field(ud_sq_wqe->u32_8,
- UD_SEND_WQE_U32_8_DMAC_4_M,
- UD_SEND_WQE_U32_8_DMAC_4_S,
- ah->av.mac[4]);
- roce_set_field(ud_sq_wqe->u32_8,
- UD_SEND_WQE_U32_8_DMAC_5_M,
- UD_SEND_WQE_U32_8_DMAC_5_S,
- ah->av.mac[5]);
-
- smac = (const u8 *)hr_dev->dev_addr[qp->port];
- loopback = ether_addr_equal_unaligned(ah->av.mac,
- smac) ? 1 : 0;
- roce_set_bit(ud_sq_wqe->u32_8,
- UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S,
- loopback);
-
- roce_set_field(ud_sq_wqe->u32_8,
- UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
- UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
- HNS_ROCE_WQE_OPCODE_SEND);
- roce_set_field(ud_sq_wqe->u32_8,
- UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M,
- UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S,
- 2);
- roce_set_bit(ud_sq_wqe->u32_8,
- UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S,
- 1);
-
- ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ?
- cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
- (wr->send_flags & IB_SEND_SOLICITED ?
- cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
- ((wr->opcode == IB_WR_SEND_WITH_IMM) ?
- cpu_to_le32(HNS_ROCE_WQE_IMM) : 0);
-
- roce_set_field(ud_sq_wqe->u32_16,
- UD_SEND_WQE_U32_16_DEST_QP_M,
- UD_SEND_WQE_U32_16_DEST_QP_S,
- ud_wr(wr)->remote_qpn);
- roce_set_field(ud_sq_wqe->u32_16,
- UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M,
- UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S,
- ah->av.stat_rate);
-
- roce_set_field(ud_sq_wqe->u32_36,
- UD_SEND_WQE_U32_36_FLOW_LABEL_M,
- UD_SEND_WQE_U32_36_FLOW_LABEL_S,
- ah->av.flowlabel);
- roce_set_field(ud_sq_wqe->u32_36,
- UD_SEND_WQE_U32_36_PRIORITY_M,
- UD_SEND_WQE_U32_36_PRIORITY_S,
- ah->av.sl);
- roce_set_field(ud_sq_wqe->u32_36,
- UD_SEND_WQE_U32_36_SGID_INDEX_M,
- UD_SEND_WQE_U32_36_SGID_INDEX_S,
- hns_get_gid_index(hr_dev, qp->phy_port,
- ah->av.gid_index));
-
- roce_set_field(ud_sq_wqe->u32_40,
- UD_SEND_WQE_U32_40_HOP_LIMIT_M,
- UD_SEND_WQE_U32_40_HOP_LIMIT_S,
- ah->av.hop_limit);
- roce_set_field(ud_sq_wqe->u32_40,
- UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
- UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S,
- ah->av.tclass);
-
- memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
-
- ud_sq_wqe->va0_l =
- cpu_to_le32((u32)wr->sg_list[0].addr);
- ud_sq_wqe->va0_h =
- cpu_to_le32((wr->sg_list[0].addr) >> 32);
- ud_sq_wqe->l_key0 =
- cpu_to_le32(wr->sg_list[0].lkey);
-
- ud_sq_wqe->va1_l =
- cpu_to_le32((u32)wr->sg_list[1].addr);
- ud_sq_wqe->va1_h =
- cpu_to_le32((wr->sg_list[1].addr) >> 32);
- ud_sq_wqe->l_key1 =
- cpu_to_le32(wr->sg_list[1].lkey);
- } else if (ibqp->qp_type == IB_QPT_RC) {
- u32 tmp_len = 0;
-
- ctrl = wqe;
- memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg));
- for (i = 0; i < wr->num_sge; i++)
- tmp_len += wr->sg_list[i].length;
-
- ctrl->msg_length =
- cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len);
-
- ctrl->sgl_pa_h = 0;
- ctrl->flag = 0;
-
- switch (wr->opcode) {
- case IB_WR_SEND_WITH_IMM:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- ctrl->imm_data = wr->ex.imm_data;
- break;
- case IB_WR_SEND_WITH_INV:
- ctrl->inv_key =
- cpu_to_le32(wr->ex.invalidate_rkey);
- break;
- default:
- ctrl->imm_data = 0;
- break;
- }
-
- /* Ctrl field, ctrl set type: sig, solic, imm, fence */
- /* SO wait for conforming application scenarios */
- ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ?
- cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
- (wr->send_flags & IB_SEND_SOLICITED ?
- cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
- ((wr->opcode == IB_WR_SEND_WITH_IMM ||
- wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ?
- cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) |
- (wr->send_flags & IB_SEND_FENCE ?
- (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
-
- wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
-
- switch (wr->opcode) {
- case IB_WR_RDMA_READ:
- ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
- set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
- rdma_wr(wr)->rkey);
- break;
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
- set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
- rdma_wr(wr)->rkey);
- break;
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_INV:
- case IB_WR_SEND_WITH_IMM:
- ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
- break;
- case IB_WR_LOCAL_INV:
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- case IB_WR_LSO:
- default:
- ps_opcode = HNS_ROCE_WQE_OPCODE_MASK;
- break;
- }
- ctrl->flag |= cpu_to_le32(ps_opcode);
- wqe += sizeof(struct hns_roce_wqe_raddr_seg);
-
- dseg = wqe;
- if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
- if (le32_to_cpu(ctrl->msg_length) >
- hr_dev->caps.max_sq_inline) {
- ret = -EINVAL;
- *bad_wr = wr;
- dev_err(dev, "inline len(1-%d)=%d, illegal",
- le32_to_cpu(ctrl->msg_length),
- hr_dev->caps.max_sq_inline);
- goto out;
- }
- for (i = 0; i < wr->num_sge; i++) {
- memcpy(wqe, ((void *) (uintptr_t)
- wr->sg_list[i].addr),
- wr->sg_list[i].length);
- wqe += wr->sg_list[i].length;
- }
- ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE);
- } else {
- /* sqe num is two */
- for (i = 0; i < wr->num_sge; i++)
- set_data_seg(dseg + i, wr->sg_list + i);
-
- ctrl->flag |= cpu_to_le32(wr->num_sge <<
- HNS_ROCE_WQE_SGE_NUM_BIT);
- }
- }
- }
-
-out:
- /* Set DB return */
- if (likely(nreq)) {
- qp->sq.head += nreq;
-
- roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
- SQ_DOORBELL_U32_4_SQ_HEAD_S,
- (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
- roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M,
- SQ_DOORBELL_U32_4_SL_S, qp->sl);
- roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
- SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
- roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
- SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
- roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
-
- doorbell[0] = sq_db.u32_4;
- doorbell[1] = sq_db.u32_8;
-
- hns_roce_write64_k(doorbell, qp->sq.db_reg);
- }
-
- spin_unlock_irqrestore(&qp->sq.lock, flags);
-
- return ret;
-}
-
-static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
- const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr)
-{
- struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
- struct hns_roce_wqe_data_seg *scat = NULL;
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_rq_db rq_db = {};
- __le32 doorbell[2] = {0};
- unsigned long flags = 0;
- unsigned int wqe_idx;
- int ret = 0;
- int nreq;
- int i;
- u32 reg_val;
-
- spin_lock_irqsave(&hr_qp->rq.lock, flags);
-
- for (nreq = 0; wr; ++nreq, wr = wr->next) {
- if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
- hr_qp->ibqp.recv_cq)) {
- ret = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
-
- wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
-
- if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
- dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
- wr->num_sge, hr_qp->rq.max_gs);
- ret = -EINVAL;
- *bad_wr = wr;
- goto out;
- }
-
- ctrl = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
-
- roce_set_field(ctrl->rwqe_byte_12,
- RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
- RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S,
- wr->num_sge);
-
- scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1);
-
- for (i = 0; i < wr->num_sge; i++)
- set_data_seg(scat + i, wr->sg_list + i);
-
- hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
- }
-
-out:
- if (likely(nreq)) {
- hr_qp->rq.head += nreq;
-
- if (ibqp->qp_type == IB_QPT_GSI) {
- __le32 tmp;
-
- /* SW update GSI rq header */
- reg_val = roce_read(to_hr_dev(ibqp->device),
- ROCEE_QP1C_CFG3_0_REG +
- QP1C_CFGN_OFFSET * hr_qp->phy_port);
- tmp = cpu_to_le32(reg_val);
- roce_set_field(tmp,
- ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
- ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
- hr_qp->rq.head);
- reg_val = le32_to_cpu(tmp);
- roce_write(to_hr_dev(ibqp->device),
- ROCEE_QP1C_CFG3_0_REG +
- QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
- } else {
- roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
- RQ_DOORBELL_U32_4_RQ_HEAD_S,
- hr_qp->rq.head);
- roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
- RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
- roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
- RQ_DOORBELL_U32_8_CMD_S, 1);
- roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
- 1);
-
- doorbell[0] = rq_db.u32_4;
- doorbell[1] = rq_db.u32_8;
-
- hns_roce_write64_k(doorbell, hr_qp->rq.db_reg);
- }
- }
- spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
-
- return ret;
-}
-
-static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev,
- int sdb_mode, int odb_mode)
-{
- __le32 tmp;
- u32 val;
-
- val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
- tmp = cpu_to_le32(val);
- roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
- roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
-}
-
-static int hns_roce_v1_set_hem(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table, int obj,
- int step_idx)
-{
- spinlock_t *lock = &hr_dev->bt_cmd_lock;
- struct device *dev = hr_dev->dev;
- struct hns_roce_hem_iter iter;
- void __iomem *bt_cmd;
- __le32 bt_cmd_val[2];
- __le32 bt_cmd_h = 0;
- unsigned long flags;
- __le32 bt_cmd_l;
- int ret = 0;
- u64 bt_ba;
- long end;
-
- /* Find the HEM(Hardware Entry Memory) entry */
- unsigned long i = obj / (table->table_chunk_size / table->obj_size);
-
- switch (table->type) {
- case HEM_TYPE_QPC:
- case HEM_TYPE_MTPT:
- case HEM_TYPE_CQC:
- case HEM_TYPE_SRQC:
- roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type);
- break;
- default:
- return ret;
- }
-
- roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
- roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
- roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
-
- /* Currently iter only a chunk */
- for (hns_roce_hem_first(table->hem[i], &iter);
- !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
- bt_ba = hns_roce_hem_addr(&iter) >> HNS_HW_PAGE_SHIFT;
-
- spin_lock_irqsave(lock, flags);
-
- bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
-
- end = HW_SYNC_TIMEOUT_MSECS;
- while (end > 0) {
- if (!(readl(bt_cmd) >> BT_CMD_SYNC_SHIFT))
- break;
-
- mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
- end -= HW_SYNC_SLEEP_TIME_INTERVAL;
- }
-
- if (end <= 0) {
- dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
- spin_unlock_irqrestore(lock, flags);
- return -EBUSY;
- }
-
- bt_cmd_l = cpu_to_le32(bt_ba);
- roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S,
- upper_32_bits(bt_ba));
-
- bt_cmd_val[0] = bt_cmd_l;
- bt_cmd_val[1] = bt_cmd_h;
- hns_roce_write64_k(bt_cmd_val,
- hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
- spin_unlock_irqrestore(lock, flags);
- }
-
- return ret;
-}
-
-static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode,
- u32 odb_mode)
-{
- __le32 tmp;
- u32 val;
-
- /* Configure SDB/ODB extend mode */
- val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
- tmp = cpu_to_le32(val);
- roce_set_bit(tmp, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
- roce_set_bit(tmp, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
-}
-
-static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept,
- u32 sdb_alful)
-{
- __le32 tmp;
- u32 val;
-
- /* Configure SDB */
- val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
- ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful);
- roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
- ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val);
-}
-
-static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
- u32 odb_alful)
-{
- __le32 tmp;
- u32 val;
-
- /* Configure ODB */
- val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
- ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful);
- roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
- ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val);
-}
-
-static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
- u32 ext_sdb_alful)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_db_table *db = &priv->db_table;
- struct device *dev = &hr_dev->pdev->dev;
- dma_addr_t sdb_dma_addr;
- __le32 tmp;
- u32 val;
-
- /* Configure extend SDB threshold */
- roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept);
- roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful);
-
- /* Configure extend SDB base addr */
- sdb_dma_addr = db->ext_db->sdb_buf_list->map;
- roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12));
-
- /* Configure extend SDB depth */
- val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
- ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S,
- db->ext_db->esdb_dep);
- /*
- * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
- * using 4K page, and shift more 32 because of
- * calculating the high 32 bit value evaluated to hardware.
- */
- roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
- ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
-
- dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
- dev_dbg(dev, "ext SDB threshold: empty: 0x%x, ful: 0x%x\n",
- ext_sdb_alept, ext_sdb_alful);
-}
-
-static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
- u32 ext_odb_alful)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_db_table *db = &priv->db_table;
- struct device *dev = &hr_dev->pdev->dev;
- dma_addr_t odb_dma_addr;
- __le32 tmp;
- u32 val;
-
- /* Configure extend ODB threshold */
- roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept);
- roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful);
-
- /* Configure extend ODB base addr */
- odb_dma_addr = db->ext_db->odb_buf_list->map;
- roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12));
-
- /* Configure extend ODB depth */
- val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
- ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S,
- db->ext_db->eodb_dep);
- roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
- ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S,
- db->ext_db->eodb_dep);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val);
-
- dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep);
- dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n",
- ext_odb_alept, ext_odb_alful);
-}
-
-static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
- u32 odb_ext_mod)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_db_table *db = &priv->db_table;
- struct device *dev = &hr_dev->pdev->dev;
- dma_addr_t sdb_dma_addr;
- dma_addr_t odb_dma_addr;
- int ret = 0;
-
- db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
- if (!db->ext_db)
- return -ENOMEM;
-
- if (sdb_ext_mod) {
- db->ext_db->sdb_buf_list = kmalloc(
- sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL);
- if (!db->ext_db->sdb_buf_list) {
- ret = -ENOMEM;
- goto ext_sdb_buf_fail_out;
- }
-
- db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev,
- HNS_ROCE_V1_EXT_SDB_SIZE,
- &sdb_dma_addr, GFP_KERNEL);
- if (!db->ext_db->sdb_buf_list->buf) {
- ret = -ENOMEM;
- goto alloc_sq_db_buf_fail;
- }
- db->ext_db->sdb_buf_list->map = sdb_dma_addr;
-
- db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH);
- hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT,
- HNS_ROCE_V1_EXT_SDB_ALFUL);
- } else
- hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT,
- HNS_ROCE_V1_SDB_ALFUL);
-
- if (odb_ext_mod) {
- db->ext_db->odb_buf_list = kmalloc(
- sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL);
- if (!db->ext_db->odb_buf_list) {
- ret = -ENOMEM;
- goto ext_odb_buf_fail_out;
- }
-
- db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev,
- HNS_ROCE_V1_EXT_ODB_SIZE,
- &odb_dma_addr, GFP_KERNEL);
- if (!db->ext_db->odb_buf_list->buf) {
- ret = -ENOMEM;
- goto alloc_otr_db_buf_fail;
- }
- db->ext_db->odb_buf_list->map = odb_dma_addr;
-
- db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH);
- hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT,
- HNS_ROCE_V1_EXT_ODB_ALFUL);
- } else
- hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT,
- HNS_ROCE_V1_ODB_ALFUL);
-
- hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod);
-
- return 0;
-
-alloc_otr_db_buf_fail:
- kfree(db->ext_db->odb_buf_list);
-
-ext_odb_buf_fail_out:
- if (sdb_ext_mod) {
- dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
- db->ext_db->sdb_buf_list->buf,
- db->ext_db->sdb_buf_list->map);
- }
-
-alloc_sq_db_buf_fail:
- if (sdb_ext_mod)
- kfree(db->ext_db->sdb_buf_list);
-
-ext_sdb_buf_fail_out:
- kfree(db->ext_db);
- return ret;
-}
-
-static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
- struct ib_pd *pd)
-{
- struct device *dev = &hr_dev->pdev->dev;
- struct ib_qp_init_attr init_attr;
- struct ib_qp *qp;
-
- memset(&init_attr, 0, sizeof(struct ib_qp_init_attr));
- init_attr.qp_type = IB_QPT_RC;
- init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
- init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM;
- init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM;
-
- qp = ib_create_qp(pd, &init_attr);
- if (IS_ERR(qp)) {
- dev_err(dev, "Create loop qp for mr free failed!");
- return NULL;
- }
-
- return to_hr_qp(qp);
-}
-
-static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_free_mr *free_mr = &priv->free_mr;
- struct hns_roce_caps *caps = &hr_dev->caps;
- struct ib_device *ibdev = &hr_dev->ib_dev;
- struct device *dev = &hr_dev->pdev->dev;
- struct ib_cq_init_attr cq_init_attr;
- struct ib_qp_attr attr = { 0 };
- struct hns_roce_qp *hr_qp;
- struct ib_cq *cq;
- struct ib_pd *pd;
- union ib_gid dgid;
- __be64 subnet_prefix;
- int attr_mask = 0;
- int ret;
- int i, j;
- u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
- u8 phy_port;
- u32 port = 0;
- u8 sl;
-
- /* Reserved cq for loop qp */
- cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2;
- cq_init_attr.comp_vector = 0;
-
- cq = rdma_zalloc_drv_obj(ibdev, ib_cq);
- if (!cq)
- return -ENOMEM;
-
- ret = hns_roce_create_cq(cq, &cq_init_attr, NULL);
- if (ret) {
- dev_err(dev, "Create cq for reserved loop qp failed!");
- goto alloc_cq_failed;
- }
- free_mr->mr_free_cq = to_hr_cq(cq);
- free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev;
- free_mr->mr_free_cq->ib_cq.uobject = NULL;
- free_mr->mr_free_cq->ib_cq.comp_handler = NULL;
- free_mr->mr_free_cq->ib_cq.event_handler = NULL;
- free_mr->mr_free_cq->ib_cq.cq_context = NULL;
- atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
-
- pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
- if (!pd) {
- ret = -ENOMEM;
- goto alloc_mem_failed;
- }
-
- pd->device = ibdev;
- ret = hns_roce_alloc_pd(pd, NULL);
- if (ret)
- goto alloc_pd_failed;
-
- free_mr->mr_free_pd = to_hr_pd(pd);
- free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
- free_mr->mr_free_pd->ibpd.uobject = NULL;
- free_mr->mr_free_pd->ibpd.__internal_mr = NULL;
- atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
-
- attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE;
- attr.pkey_index = 0;
- attr.min_rnr_timer = 0;
- /* Disable read ability */
- attr.max_dest_rd_atomic = 0;
- attr.max_rd_atomic = 0;
- /* Use arbitrary values as rq_psn and sq_psn */
- attr.rq_psn = 0x0808;
- attr.sq_psn = 0x0808;
- attr.retry_cnt = 7;
- attr.rnr_retry = 7;
- attr.timeout = 0x12;
- attr.path_mtu = IB_MTU_256;
- attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
- rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
- rdma_ah_set_static_rate(&attr.ah_attr, 3);
-
- subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
- for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
- phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
- (i % HNS_ROCE_MAX_PORTS);
- sl = i / HNS_ROCE_MAX_PORTS;
-
- for (j = 0; j < caps->num_ports; j++) {
- if (hr_dev->iboe.phy_port[j] == phy_port) {
- queue_en[i] = 1;
- port = j;
- break;
- }
- }
-
- if (!queue_en[i])
- continue;
-
- free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
- if (!free_mr->mr_free_qp[i]) {
- dev_err(dev, "Create loop qp failed!\n");
- ret = -ENOMEM;
- goto create_lp_qp_failed;
- }
- hr_qp = free_mr->mr_free_qp[i];
-
- hr_qp->port = port;
- hr_qp->phy_port = phy_port;
- hr_qp->ibqp.qp_type = IB_QPT_RC;
- hr_qp->ibqp.device = &hr_dev->ib_dev;
- hr_qp->ibqp.uobject = NULL;
- atomic_set(&hr_qp->ibqp.usecnt, 0);
- hr_qp->ibqp.pd = pd;
- hr_qp->ibqp.recv_cq = cq;
- hr_qp->ibqp.send_cq = cq;
-
- rdma_ah_set_port_num(&attr.ah_attr, port + 1);
- rdma_ah_set_sl(&attr.ah_attr, sl);
- attr.port_num = port + 1;
-
- attr.dest_qp_num = hr_qp->qpn;
- memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
- hr_dev->dev_addr[port],
- ETH_ALEN);
-
- memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
- memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
- memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
- dgid.raw[11] = 0xff;
- dgid.raw[12] = 0xfe;
- dgid.raw[8] ^= 2;
- rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
-
- ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
- IB_QPS_RESET, IB_QPS_INIT);
- if (ret) {
- dev_err(dev, "modify qp failed(%d)!\n", ret);
- goto create_lp_qp_failed;
- }
-
- ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN,
- IB_QPS_INIT, IB_QPS_RTR);
- if (ret) {
- dev_err(dev, "modify qp failed(%d)!\n", ret);
- goto create_lp_qp_failed;
- }
-
- ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
- IB_QPS_RTR, IB_QPS_RTS);
- if (ret) {
- dev_err(dev, "modify qp failed(%d)!\n", ret);
- goto create_lp_qp_failed;
- }
- }
-
- return 0;
-
-create_lp_qp_failed:
- for (i -= 1; i >= 0; i--) {
- hr_qp = free_mr->mr_free_qp[i];
- if (ib_destroy_qp(&hr_qp->ibqp))
- dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
- }
-
- hns_roce_dealloc_pd(pd, NULL);
-
-alloc_pd_failed:
- kfree(pd);
-
-alloc_mem_failed:
- hns_roce_destroy_cq(cq, NULL);
-alloc_cq_failed:
- kfree(cq);
- return ret;
-}
-
-static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_free_mr *free_mr = &priv->free_mr;
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_qp *hr_qp;
- int ret;
- int i;
-
- for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
- hr_qp = free_mr->mr_free_qp[i];
- if (!hr_qp)
- continue;
-
- ret = ib_destroy_qp(&hr_qp->ibqp);
- if (ret)
- dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
- i, ret);
- }
-
- hns_roce_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
- kfree(&free_mr->mr_free_cq->ib_cq);
- hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
- kfree(&free_mr->mr_free_pd->ibpd);
-}
-
-static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_db_table *db = &priv->db_table;
- struct device *dev = &hr_dev->pdev->dev;
- u32 sdb_ext_mod;
- u32 odb_ext_mod;
- u32 sdb_evt_mod;
- u32 odb_evt_mod;
- int ret;
-
- memset(db, 0, sizeof(*db));
-
- /* Default DB mode */
- sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE;
- odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE;
- sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE;
- odb_evt_mod = HNS_ROCE_ODB_POLL_MODE;
-
- db->sdb_ext_mod = sdb_ext_mod;
- db->odb_ext_mod = odb_ext_mod;
-
- /* Init extend DB */
- ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod);
- if (ret) {
- dev_err(dev, "Failed in extend DB configuration.\n");
- return ret;
- }
-
- hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod);
-
- return 0;
-}
-
-static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
-{
- struct hns_roce_recreate_lp_qp_work *lp_qp_work;
- struct hns_roce_dev *hr_dev;
-
- lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work,
- work);
- hr_dev = to_hr_dev(lp_qp_work->ib_dev);
-
- hns_roce_v1_release_lp_qp(hr_dev);
-
- if (hns_roce_v1_rsv_lp_qp(hr_dev))
- dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n");
-
- if (lp_qp_work->comp_flag)
- complete(lp_qp_work->comp);
-
- kfree(lp_qp_work);
-}
-
-static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
-{
- long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_free_mr *free_mr = &priv->free_mr;
- struct hns_roce_recreate_lp_qp_work *lp_qp_work;
- struct device *dev = &hr_dev->pdev->dev;
- struct completion comp;
-
- lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
- GFP_KERNEL);
- if (!lp_qp_work)
- return -ENOMEM;
-
- INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
-
- lp_qp_work->ib_dev = &(hr_dev->ib_dev);
- lp_qp_work->comp = &comp;
- lp_qp_work->comp_flag = 1;
-
- init_completion(lp_qp_work->comp);
-
- queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
-
- while (end > 0) {
- if (try_wait_for_completion(&comp))
- return 0;
- msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
- end -= HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE;
- }
-
- lp_qp_work->comp_flag = 0;
- if (try_wait_for_completion(&comp))
- return 0;
-
- dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n");
- return -ETIMEDOUT;
-}
-
-static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
- struct device *dev = &hr_dev->pdev->dev;
- struct ib_send_wr send_wr;
- const struct ib_send_wr *bad_wr;
- int ret;
-
- memset(&send_wr, 0, sizeof(send_wr));
- send_wr.next = NULL;
- send_wr.num_sge = 0;
- send_wr.send_flags = 0;
- send_wr.sg_list = NULL;
- send_wr.wr_id = (unsigned long long)&send_wr;
- send_wr.opcode = IB_WR_RDMA_WRITE;
-
- ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr);
- if (ret) {
- dev_err(dev, "Post write wqe for mr free failed(%d)!", ret);
- return ret;
- }
-
- return 0;
-}
-
-static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
-{
- unsigned long end =
- msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
- struct hns_roce_mr_free_work *mr_work =
- container_of(work, struct hns_roce_mr_free_work, work);
- struct hns_roce_dev *hr_dev = to_hr_dev(mr_work->ib_dev);
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_free_mr *free_mr = &priv->free_mr;
- struct hns_roce_cq *mr_free_cq = free_mr->mr_free_cq;
- struct hns_roce_mr *hr_mr = mr_work->mr;
- struct device *dev = &hr_dev->pdev->dev;
- struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
- struct hns_roce_qp *hr_qp;
- int ne = 0;
- int ret;
- int i;
-
- for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
- hr_qp = free_mr->mr_free_qp[i];
- if (!hr_qp)
- continue;
- ne++;
-
- ret = hns_roce_v1_send_lp_wqe(hr_qp);
- if (ret) {
- dev_err(dev,
- "Send wqe (qp:0x%lx) for mr free failed(%d)!\n",
- hr_qp->qpn, ret);
- goto free_work;
- }
- }
-
- if (!ne) {
- dev_err(dev, "Reserved loop qp is absent!\n");
- goto free_work;
- }
-
- do {
- ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
- if (ret < 0 && hr_qp) {
- dev_err(dev,
- "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
- hr_qp->qpn, ret, hr_mr->key, ne);
- goto free_work;
- }
- ne -= ret;
- usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
- (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
- } while (ne && time_before_eq(jiffies, end));
-
- if (ne != 0)
- dev_err(dev,
- "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n",
- hr_mr->key, ne);
-
-free_work:
- if (mr_work->comp_flag)
- complete(mr_work->comp);
- kfree(mr_work);
-}
-
-static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
- struct hns_roce_mr *mr, struct ib_udata *udata)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_free_mr *free_mr = &priv->free_mr;
- long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_mr_free_work *mr_work;
- unsigned long start = jiffies;
- struct completion comp;
- int ret = 0;
-
- if (mr->enabled) {
- if (hns_roce_hw_destroy_mpt(hr_dev, NULL,
- key_to_hw_index(mr->key) &
- (hr_dev->caps.num_mtpts - 1)))
- dev_warn(dev, "DESTROY_MPT failed!\n");
- }
-
- mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
- if (!mr_work) {
- ret = -ENOMEM;
- goto free_mr;
- }
-
- INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn);
-
- mr_work->ib_dev = &(hr_dev->ib_dev);
- mr_work->comp = &comp;
- mr_work->comp_flag = 1;
- mr_work->mr = (void *)mr;
- init_completion(mr_work->comp);
-
- queue_work(free_mr->free_mr_wq, &(mr_work->work));
-
- while (end > 0) {
- if (try_wait_for_completion(&comp))
- goto free_mr;
- msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
- end -= HNS_ROCE_V1_FREE_MR_WAIT_VALUE;
- }
-
- mr_work->comp_flag = 0;
- if (try_wait_for_completion(&comp))
- goto free_mr;
-
- dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key);
- ret = -ETIMEDOUT;
-
-free_mr:
- dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
- mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
-
- ida_free(&hr_dev->mr_table.mtpt_ida.ida, (int)key_to_hw_index(mr->key));
- hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
- kfree(mr);
-
- return ret;
-}
-
-static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_db_table *db = &priv->db_table;
- struct device *dev = &hr_dev->pdev->dev;
-
- if (db->sdb_ext_mod) {
- dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
- db->ext_db->sdb_buf_list->buf,
- db->ext_db->sdb_buf_list->map);
- kfree(db->ext_db->sdb_buf_list);
- }
-
- if (db->odb_ext_mod) {
- dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE,
- db->ext_db->odb_buf_list->buf,
- db->ext_db->odb_buf_list->map);
- kfree(db->ext_db->odb_buf_list);
- }
-
- kfree(db->ext_db);
-}
-
-static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_raq_table *raq = &priv->raq_table;
- struct device *dev = &hr_dev->pdev->dev;
- dma_addr_t addr;
- int raq_shift;
- __le32 tmp;
- u32 val;
- int ret;
-
- raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
- if (!raq->e_raq_buf)
- return -ENOMEM;
-
- raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE,
- &addr, GFP_KERNEL);
- if (!raq->e_raq_buf->buf) {
- ret = -ENOMEM;
- goto err_dma_alloc_raq;
- }
- raq->e_raq_buf->map = addr;
-
- /* Configure raq extended address. 48bit 4K align */
- roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12);
-
- /* Configure raq_shift */
- raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY);
- val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
- ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift);
- /*
- * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
- * using 4K page, and shift more 32 because of
- * calculating the high 32 bit value evaluated to hardware.
- */
- roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
- ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
- raq->e_raq_buf->map >> 44);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val);
- dev_dbg(dev, "Configure raq_shift 0x%x.\n", val);
-
- /* Configure raq threshold */
- val = roce_read(hr_dev, ROCEE_RAQ_WL_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
- ROCEE_RAQ_WL_ROCEE_RAQ_WL_S,
- HNS_ROCE_V1_EXT_RAQ_WF);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_RAQ_WL_REG, val);
- dev_dbg(dev, "Configure raq_wl 0x%x.\n", val);
-
- /* Enable extend raq */
- val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp,
- ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M,
- ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S,
- POL_TIME_INTERVAL_VAL);
- roce_set_bit(tmp, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
- roce_set_field(tmp,
- ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M,
- ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S,
- 2);
- roce_set_bit(tmp,
- ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val);
- dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val);
-
- /* Enable raq drop */
- val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
- tmp = cpu_to_le32(val);
- roce_set_bit(tmp, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
- dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val);
-
- return 0;
-
-err_dma_alloc_raq:
- kfree(raq->e_raq_buf);
- return ret;
-}
-
-static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_raq_table *raq = &priv->raq_table;
- struct device *dev = &hr_dev->pdev->dev;
-
- dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
- raq->e_raq_buf->map);
- kfree(raq->e_raq_buf);
-}
-
-static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
-{
- __le32 tmp;
- u32 val;
-
- if (enable_flag) {
- val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
- /* Open all ports */
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
- ROCEE_GLB_CFG_ROCEE_PORT_ST_S,
- ALL_PORT_VAL_OPEN);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
- } else {
- val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
- /* Close all ports */
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
- ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
- }
-}
-
-static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct device *dev = &hr_dev->pdev->dev;
- int ret;
-
- priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
- HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
- GFP_KERNEL);
- if (!priv->bt_table.qpc_buf.buf)
- return -ENOMEM;
-
- priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
- HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
- GFP_KERNEL);
- if (!priv->bt_table.mtpt_buf.buf) {
- ret = -ENOMEM;
- goto err_failed_alloc_mtpt_buf;
- }
-
- priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
- HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
- GFP_KERNEL);
- if (!priv->bt_table.cqc_buf.buf) {
- ret = -ENOMEM;
- goto err_failed_alloc_cqc_buf;
- }
-
- return 0;
-
-err_failed_alloc_cqc_buf:
- dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
- priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
-
-err_failed_alloc_mtpt_buf:
- dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
- priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
-
- return ret;
-}
-
-static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct device *dev = &hr_dev->pdev->dev;
-
- dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
- priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
-
- dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
- priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
-
- dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
- priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
-}
-
-static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
- struct device *dev = &hr_dev->pdev->dev;
-
- /*
- * This buffer will be used for CQ's tptr(tail pointer), also
- * named ci(customer index). Every CQ will use 2 bytes to save
- * cqe ci in hip06. Hardware will read this area to get new ci
- * when the queue is almost full.
- */
- tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
- &tptr_buf->map, GFP_KERNEL);
- if (!tptr_buf->buf)
- return -ENOMEM;
-
- hr_dev->tptr_dma_addr = tptr_buf->map;
- hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
-
- return 0;
-}
-
-static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
- struct device *dev = &hr_dev->pdev->dev;
-
- dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
- tptr_buf->buf, tptr_buf->map);
-}
-
-static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_free_mr *free_mr = &priv->free_mr;
- struct device *dev = &hr_dev->pdev->dev;
- int ret;
-
- free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
- if (!free_mr->free_mr_wq) {
- dev_err(dev, "Create free mr workqueue failed!\n");
- return -ENOMEM;
- }
-
- ret = hns_roce_v1_rsv_lp_qp(hr_dev);
- if (ret) {
- dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
- destroy_workqueue(free_mr->free_mr_wq);
- }
-
- return ret;
-}
-
-static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_free_mr *free_mr = &priv->free_mr;
-
- destroy_workqueue(free_mr->free_mr_wq);
-
- hns_roce_v1_release_lp_qp(hr_dev);
-}
-
-/**
- * hns_roce_v1_reset - reset RoCE
- * @hr_dev: RoCE device struct pointer
- * @dereset: true -- drop reset, false -- reset
- * return 0 - success , negative --fail
- */
-static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
-{
- struct device_node *dsaf_node;
- struct device *dev = &hr_dev->pdev->dev;
- struct device_node *np = dev->of_node;
- struct fwnode_handle *fwnode;
- int ret;
-
- /* check if this is DT/ACPI case */
- if (dev_of_node(dev)) {
- dsaf_node = of_parse_phandle(np, "dsaf-handle", 0);
- if (!dsaf_node) {
- dev_err(dev, "could not find dsaf-handle\n");
- return -EINVAL;
- }
- fwnode = &dsaf_node->fwnode;
- } else if (is_acpi_device_node(dev->fwnode)) {
- struct fwnode_reference_args args;
-
- ret = acpi_node_get_property_reference(dev->fwnode,
- "dsaf-handle", 0, &args);
- if (ret) {
- dev_err(dev, "could not find dsaf-handle\n");
- return ret;
- }
- fwnode = args.fwnode;
- } else {
- dev_err(dev, "cannot read data from DT or ACPI\n");
- return -ENXIO;
- }
-
- ret = hns_dsaf_roce_reset(fwnode, false);
- if (ret)
- return ret;
-
- if (dereset) {
- msleep(SLEEP_TIME_INTERVAL);
- ret = hns_dsaf_roce_reset(fwnode, true);
- }
-
- return ret;
-}
-
-static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_caps *caps = &hr_dev->caps;
- int i;
-
- hr_dev->vendor_id = roce_read(hr_dev, ROCEE_VENDOR_ID_REG);
- hr_dev->vendor_part_id = roce_read(hr_dev, ROCEE_VENDOR_PART_ID_REG);
- hr_dev->sys_image_guid = roce_read(hr_dev, ROCEE_SYS_IMAGE_GUID_L_REG) |
- ((u64)roce_read(hr_dev,
- ROCEE_SYS_IMAGE_GUID_H_REG) << 32);
- hr_dev->hw_rev = HNS_ROCE_HW_VER1;
-
- caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
- caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
- caps->min_wqes = HNS_ROCE_MIN_WQE_NUM;
- caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM;
- caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
- caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM;
- caps->max_sq_sg = HNS_ROCE_V1_SG_NUM;
- caps->max_rq_sg = HNS_ROCE_V1_SG_NUM;
- caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE;
- caps->num_uars = HNS_ROCE_V1_UAR_NUM;
- caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM;
- caps->num_aeq_vectors = HNS_ROCE_V1_AEQE_VEC_NUM;
- caps->num_comp_vectors = HNS_ROCE_V1_COMP_VEC_NUM;
- caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM;
- caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM;
- caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS;
- caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM;
- caps->max_qp_init_rdma = HNS_ROCE_V1_MAX_QP_INIT_RDMA;
- caps->max_qp_dest_rdma = HNS_ROCE_V1_MAX_QP_DEST_RDMA;
- caps->max_sq_desc_sz = HNS_ROCE_V1_MAX_SQ_DESC_SZ;
- caps->max_rq_desc_sz = HNS_ROCE_V1_MAX_RQ_DESC_SZ;
- caps->qpc_sz = HNS_ROCE_V1_QPC_SIZE;
- caps->irrl_entry_sz = HNS_ROCE_V1_IRRL_ENTRY_SIZE;
- caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE;
- caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE;
- caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE;
- caps->cqe_sz = HNS_ROCE_V1_CQE_SIZE;
- caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
- caps->reserved_lkey = 0;
- caps->reserved_pds = 0;
- caps->reserved_mrws = 1;
- caps->reserved_uars = 0;
- caps->reserved_cqs = 0;
- caps->reserved_qps = 12; /* 2 SQP per port, six ports total 12 */
- caps->chunk_sz = HNS_ROCE_V1_TABLE_CHUNK_SIZE;
-
- for (i = 0; i < caps->num_ports; i++)
- caps->pkey_table_len[i] = 1;
-
- for (i = 0; i < caps->num_ports; i++) {
- /* Six ports shared 16 GID in v1 engine */
- if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports))
- caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
- caps->num_ports;
- else
- caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
- caps->num_ports + 1;
- }
-
- caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM;
- caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM;
- caps->local_ca_ack_delay = roce_read(hr_dev, ROCEE_ACK_DELAY_REG);
- caps->max_mtu = IB_MTU_2048;
-
- return 0;
-}
-
-static int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
-{
- int ret;
- u32 val;
- __le32 tmp;
- struct device *dev = &hr_dev->pdev->dev;
-
- /* DMAE user config */
- val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
- ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf);
- roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
- ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S,
- 1 << PAGES_SHIFT_16);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val);
-
- val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
- ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf);
- roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
- ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S,
- 1 << PAGES_SHIFT_16);
-
- ret = hns_roce_db_init(hr_dev);
- if (ret) {
- dev_err(dev, "doorbell init failed!\n");
- return ret;
- }
-
- ret = hns_roce_raq_init(hr_dev);
- if (ret) {
- dev_err(dev, "raq init failed!\n");
- goto error_failed_raq_init;
- }
-
- ret = hns_roce_bt_init(hr_dev);
- if (ret) {
- dev_err(dev, "bt init failed!\n");
- goto error_failed_bt_init;
- }
-
- ret = hns_roce_tptr_init(hr_dev);
- if (ret) {
- dev_err(dev, "tptr init failed!\n");
- goto error_failed_tptr_init;
- }
-
- ret = hns_roce_free_mr_init(hr_dev);
- if (ret) {
- dev_err(dev, "free mr init failed!\n");
- goto error_failed_free_mr_init;
- }
-
- hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
-
- return 0;
-
-error_failed_free_mr_init:
- hns_roce_tptr_free(hr_dev);
-
-error_failed_tptr_init:
- hns_roce_bt_free(hr_dev);
-
-error_failed_bt_init:
- hns_roce_raq_free(hr_dev);
-
-error_failed_raq_init:
- hns_roce_db_free(hr_dev);
- return ret;
-}
-
-static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
-{
- hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
- hns_roce_free_mr_free(hr_dev);
- hns_roce_tptr_free(hr_dev);
- hns_roce_bt_free(hr_dev);
- hns_roce_raq_free(hr_dev);
- hns_roce_db_free(hr_dev);
-}
-
-static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev)
-{
- u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG);
-
- return (!!(status & (1 << HCR_GO_BIT)));
-}
-
-static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
- u64 out_param, u32 in_modifier, u8 op_modifier,
- u16 op, u16 token, int event)
-{
- u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG);
- unsigned long end;
- u32 val = 0;
- __le32 tmp;
-
- end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
- while (hns_roce_v1_cmd_pending(hr_dev)) {
- if (time_after(jiffies, end)) {
- dev_err(hr_dev->dev, "jiffies=%d end=%d\n",
- (int)jiffies, (int)end);
- return -EAGAIN;
- }
- cond_resched();
- }
-
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
- op);
- roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
- ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
- roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
- roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
- roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_TOKEN_M,
- ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
-
- val = le32_to_cpu(tmp);
- writeq(in_param, hcr + 0);
- writeq(out_param, hcr + 2);
- writel(in_modifier, hcr + 4);
- /* Memory barrier */
- wmb();
-
- writel(val, hcr + 5);
-
- return 0;
-}
-
-static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
- unsigned int timeout)
-{
- u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG;
- unsigned long end;
- u32 status = 0;
-
- end = msecs_to_jiffies(timeout) + jiffies;
- while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end))
- cond_resched();
-
- if (hns_roce_v1_cmd_pending(hr_dev)) {
- dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
- return -ETIMEDOUT;
- }
-
- status = le32_to_cpu((__force __le32)
- __raw_readl(hcr + HCR_STATUS_OFFSET));
- if ((status & STATUS_MASK) != 0x1) {
- dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status);
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u32 port,
- int gid_index, const union ib_gid *gid,
- const struct ib_gid_attr *attr)
-{
- unsigned long flags;
- u32 *p = NULL;
- u8 gid_idx;
-
- gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
-
- spin_lock_irqsave(&hr_dev->iboe.lock, flags);
-
- p = (u32 *)&gid->raw[0];
- roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
- (HNS_ROCE_V1_GID_NUM * gid_idx));
-
- p = (u32 *)&gid->raw[4];
- roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG +
- (HNS_ROCE_V1_GID_NUM * gid_idx));
-
- p = (u32 *)&gid->raw[8];
- roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG +
- (HNS_ROCE_V1_GID_NUM * gid_idx));
-
- p = (u32 *)&gid->raw[0xc];
- roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
- (HNS_ROCE_V1_GID_NUM * gid_idx));
-
- spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
-
- return 0;
-}
-
-static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
- const u8 *addr)
-{
- u32 reg_smac_l;
- u16 reg_smac_h;
- __le32 tmp;
- u16 *p_h;
- u32 *p;
- u32 val;
-
- /*
- * When mac changed, loopback may fail
- * because of smac not equal to dmac.
- * We Need to release and create reserved qp again.
- */
- if (hr_dev->hw->dereg_mr) {
- int ret;
-
- ret = hns_roce_v1_recreate_lp_qp(hr_dev);
- if (ret && ret != -ETIMEDOUT)
- return ret;
- }
-
- p = (u32 *)(&addr[0]);
- reg_smac_l = *p;
- roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
- PHY_PORT_OFFSET * phy_port);
-
- val = roce_read(hr_dev,
- ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
- tmp = cpu_to_le32(val);
- p_h = (u16 *)(&addr[4]);
- reg_smac_h = *p_h;
- roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
- ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
- val);
-
- return 0;
-}
-
-static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
- enum ib_mtu mtu)
-{
- __le32 tmp;
- u32 val;
-
- val = roce_read(hr_dev,
- ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
- ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
- val);
-}
-
-static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf,
- struct hns_roce_mr *mr,
- unsigned long mtpt_idx)
-{
- u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 };
- struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_v1_mpt_entry *mpt_entry;
- dma_addr_t pbl_ba;
- int count;
- int i;
-
- /* MPT filled into mailbox buf */
- mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf;
- memset(mpt_entry, 0, sizeof(*mpt_entry));
-
- roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M,
- MPT_BYTE_4_KEY_STATE_S, KEY_VALID);
- roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M,
- MPT_BYTE_4_KEY_S, mr->key);
- roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M,
- MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K);
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0);
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S,
- (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0);
- roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M,
- MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type);
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0);
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S,
- (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S,
- (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S,
- (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S,
- 0);
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0);
-
- roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
- MPT_BYTE_12_PBL_ADDR_H_S, 0);
- roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M,
- MPT_BYTE_12_MW_BIND_COUNTER_S, 0);
-
- mpt_entry->virt_addr_l = cpu_to_le32((u32)mr->iova);
- mpt_entry->virt_addr_h = cpu_to_le32((u32)(mr->iova >> 32));
- mpt_entry->length = cpu_to_le32((u32)mr->size);
-
- roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M,
- MPT_BYTE_28_PD_S, mr->pd);
- roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M,
- MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx);
- roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M,
- MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT);
-
- /* DMA memory register */
- if (mr->type == MR_TYPE_DMA)
- return 0;
-
- count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
- ARRAY_SIZE(pages), &pbl_ba);
- if (count < 1) {
- ibdev_err(ibdev, "failed to find PBL mtr, count = %d.", count);
- return -ENOBUFS;
- }
-
- /* Register user mr */
- for (i = 0; i < count; i++) {
- switch (i) {
- case 0:
- mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i]));
- roce_set_field(mpt_entry->mpt_byte_36,
- MPT_BYTE_36_PA0_H_M,
- MPT_BYTE_36_PA0_H_S,
- (u32)(pages[i] >> PAGES_SHIFT_32));
- break;
- case 1:
- roce_set_field(mpt_entry->mpt_byte_36,
- MPT_BYTE_36_PA1_L_M,
- MPT_BYTE_36_PA1_L_S, (u32)(pages[i]));
- roce_set_field(mpt_entry->mpt_byte_40,
- MPT_BYTE_40_PA1_H_M,
- MPT_BYTE_40_PA1_H_S,
- (u32)(pages[i] >> PAGES_SHIFT_24));
- break;
- case 2:
- roce_set_field(mpt_entry->mpt_byte_40,
- MPT_BYTE_40_PA2_L_M,
- MPT_BYTE_40_PA2_L_S, (u32)(pages[i]));
- roce_set_field(mpt_entry->mpt_byte_44,
- MPT_BYTE_44_PA2_H_M,
- MPT_BYTE_44_PA2_H_S,
- (u32)(pages[i] >> PAGES_SHIFT_16));
- break;
- case 3:
- roce_set_field(mpt_entry->mpt_byte_44,
- MPT_BYTE_44_PA3_L_M,
- MPT_BYTE_44_PA3_L_S, (u32)(pages[i]));
- roce_set_field(mpt_entry->mpt_byte_48,
- MPT_BYTE_48_PA3_H_M,
- MPT_BYTE_48_PA3_H_S,
- (u32)(pages[i] >> PAGES_SHIFT_8));
- break;
- case 4:
- mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i]));
- roce_set_field(mpt_entry->mpt_byte_56,
- MPT_BYTE_56_PA4_H_M,
- MPT_BYTE_56_PA4_H_S,
- (u32)(pages[i] >> PAGES_SHIFT_32));
- break;
- case 5:
- roce_set_field(mpt_entry->mpt_byte_56,
- MPT_BYTE_56_PA5_L_M,
- MPT_BYTE_56_PA5_L_S, (u32)(pages[i]));
- roce_set_field(mpt_entry->mpt_byte_60,
- MPT_BYTE_60_PA5_H_M,
- MPT_BYTE_60_PA5_H_S,
- (u32)(pages[i] >> PAGES_SHIFT_24));
- break;
- case 6:
- roce_set_field(mpt_entry->mpt_byte_60,
- MPT_BYTE_60_PA6_L_M,
- MPT_BYTE_60_PA6_L_S, (u32)(pages[i]));
- roce_set_field(mpt_entry->mpt_byte_64,
- MPT_BYTE_64_PA6_H_M,
- MPT_BYTE_64_PA6_H_S,
- (u32)(pages[i] >> PAGES_SHIFT_16));
- break;
- default:
- break;
- }
- }
-
- mpt_entry->pbl_addr_l = cpu_to_le32(pbl_ba);
- roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
- MPT_BYTE_12_PBL_ADDR_H_S, upper_32_bits(pbl_ba));
-
- return 0;
-}
-
-static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
-{
- return hns_roce_buf_offset(hr_cq->mtr.kmem, n * HNS_ROCE_V1_CQE_SIZE);
-}
-
-static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
-{
- struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe);
-
- /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
- return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
- !!(n & hr_cq->cq_depth)) ? hr_cqe : NULL;
-}
-
-static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
-{
- return get_sw_cqe(hr_cq, hr_cq->cons_index);
-}
-
-static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
-{
- __le32 doorbell[2];
-
- doorbell[0] = cpu_to_le32(cons_index & ((hr_cq->cq_depth << 1) - 1));
- doorbell[1] = 0;
- roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
- roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
- ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
- roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
- ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0);
- roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
- ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn);
-
- hns_roce_write64_k(doorbell, hr_cq->db_reg);
-}
-
-static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
- struct hns_roce_srq *srq)
-{
- struct hns_roce_cqe *cqe, *dest;
- u32 prod_index;
- int nfreed = 0;
- u8 owner_bit;
-
- for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index);
- ++prod_index) {
- if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
- break;
- }
-
- /*
- * Now backwards through the CQ, removing CQ entries
- * that match our QP by overwriting them with next entries.
- */
- while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
- cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
- if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
- CQE_BYTE_16_LOCAL_QPN_S) &
- HNS_ROCE_CQE_QPN_MASK) == qpn) {
- /* In v1 engine, not support SRQ */
- ++nfreed;
- } else if (nfreed) {
- dest = get_cqe(hr_cq, (prod_index + nfreed) &
- hr_cq->ib_cq.cqe);
- owner_bit = roce_get_bit(dest->cqe_byte_4,
- CQE_BYTE_4_OWNER_S);
- memcpy(dest, cqe, sizeof(*cqe));
- roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S,
- owner_bit);
- }
- }
-
- if (nfreed) {
- hr_cq->cons_index += nfreed;
- hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
- }
-}
-
-static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
- struct hns_roce_srq *srq)
-{
- spin_lock_irq(&hr_cq->lock);
- __hns_roce_v1_cq_clean(hr_cq, qpn, srq);
- spin_unlock_irq(&hr_cq->lock);
-}
-
-static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq, void *mb_buf,
- u64 *mtts, dma_addr_t dma_handle)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
- struct hns_roce_cq_context *cq_context = mb_buf;
- dma_addr_t tptr_dma_addr;
- int offset;
-
- memset(cq_context, 0, sizeof(*cq_context));
-
- /* Get the tptr for this CQ. */
- offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE;
- tptr_dma_addr = tptr_buf->map + offset;
- hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset);
-
- /* Register cq_context members */
- roce_set_field(cq_context->cqc_byte_4,
- CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M,
- CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID);
- roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M,
- CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn);
-
- cq_context->cq_bt_l = cpu_to_le32((u32)dma_handle);
-
- roce_set_field(cq_context->cqc_byte_12,
- CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M,
- CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S,
- ((u64)dma_handle >> 32));
- roce_set_field(cq_context->cqc_byte_12,
- CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
- CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
- ilog2(hr_cq->cq_depth));
- roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
- CQ_CONTEXT_CQC_BYTE_12_CEQN_S, hr_cq->vector);
-
- cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0]));
-
- roce_set_field(cq_context->cqc_byte_20,
- CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M,
- CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S, (mtts[0]) >> 32);
- /* Dedicated hardware, directly set 0 */
- roce_set_field(cq_context->cqc_byte_20,
- CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M,
- CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0);
- /**
- * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
- * using 4K page, and shift more 32 because of
- * calculating the high 32 bit value evaluated to hardware.
- */
- roce_set_field(cq_context->cqc_byte_20,
- CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
- CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
- tptr_dma_addr >> 44);
-
- cq_context->cqe_tptr_addr_l = cpu_to_le32((u32)(tptr_dma_addr >> 12));
-
- roce_set_field(cq_context->cqc_byte_32,
- CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
- CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0);
- roce_set_bit(cq_context->cqc_byte_32,
- CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0);
- roce_set_bit(cq_context->cqc_byte_32,
- CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0);
- roce_set_bit(cq_context->cqc_byte_32,
- CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0);
- roce_set_bit(cq_context->cqc_byte_32,
- CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
- 0);
- /* The initial value of cq's ci is 0 */
- roce_set_field(cq_context->cqc_byte_32,
- CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
- CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
-}
-
-static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
- enum ib_cq_notify_flags flags)
-{
- struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
- u32 notification_flag;
- __le32 doorbell[2] = {};
-
- notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
- IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
- /*
- * flags = 0; Notification Flag = 1, next
- * flags = 1; Notification Flag = 0, solocited
- */
- doorbell[0] =
- cpu_to_le32(hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
- roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
- roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
- ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
- roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
- ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1);
- roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
- ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S,
- hr_cq->cqn | notification_flag);
-
- hns_roce_write64_k(doorbell, hr_cq->db_reg);
-
- return 0;
-}
-
-static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
- struct hns_roce_qp **cur_qp, struct ib_wc *wc)
-{
- int qpn;
- int is_send;
- u16 wqe_ctr;
- u32 status;
- u32 opcode;
- struct hns_roce_cqe *cqe;
- struct hns_roce_qp *hr_qp;
- struct hns_roce_wq *wq;
- struct hns_roce_wqe_ctrl_seg *sq_wqe;
- struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
- struct device *dev = &hr_dev->pdev->dev;
-
- /* Find cqe according consumer index */
- cqe = next_cqe_sw(hr_cq);
- if (!cqe)
- return -EAGAIN;
-
- ++hr_cq->cons_index;
- /* Memory barrier */
- rmb();
- /* 0->SQ, 1->RQ */
- is_send = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S));
-
- /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */
- if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
- CQE_BYTE_16_LOCAL_QPN_S) <= 1) {
- qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M,
- CQE_BYTE_20_PORT_NUM_S) +
- roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
- CQE_BYTE_16_LOCAL_QPN_S) *
- HNS_ROCE_MAX_PORTS;
- } else {
- qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
- CQE_BYTE_16_LOCAL_QPN_S);
- }
-
- if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) {
- hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
- if (unlikely(!hr_qp)) {
- dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n",
- hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK));
- return -EINVAL;
- }
-
- *cur_qp = hr_qp;
- }
-
- wc->qp = &(*cur_qp)->ibqp;
- wc->vendor_err = 0;
-
- status = roce_get_field(cqe->cqe_byte_4,
- CQE_BYTE_4_STATUS_OF_THE_OPERATION_M,
- CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) &
- HNS_ROCE_CQE_STATUS_MASK;
- switch (status) {
- case HNS_ROCE_CQE_SUCCESS:
- wc->status = IB_WC_SUCCESS;
- break;
- case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR:
- wc->status = IB_WC_LOC_LEN_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR:
- wc->status = IB_WC_LOC_QP_OP_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR:
- wc->status = IB_WC_LOC_PROT_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR:
- wc->status = IB_WC_WR_FLUSH_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR:
- wc->status = IB_WC_MW_BIND_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR:
- wc->status = IB_WC_BAD_RESP_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR:
- wc->status = IB_WC_LOC_ACCESS_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
- wc->status = IB_WC_REM_INV_REQ_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR:
- wc->status = IB_WC_REM_ACCESS_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR:
- wc->status = IB_WC_REM_OP_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
- wc->status = IB_WC_RETRY_EXC_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
- wc->status = IB_WC_RNR_RETRY_EXC_ERR;
- break;
- default:
- wc->status = IB_WC_GENERAL_ERR;
- break;
- }
-
- /* CQE status error, directly return */
- if (wc->status != IB_WC_SUCCESS)
- return 0;
-
- if (is_send) {
- /* SQ conrespond to CQE */
- sq_wqe = hns_roce_get_send_wqe(*cur_qp,
- roce_get_field(cqe->cqe_byte_4,
- CQE_BYTE_4_WQE_INDEX_M,
- CQE_BYTE_4_WQE_INDEX_S) &
- ((*cur_qp)->sq.wqe_cnt-1));
- switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) {
- case HNS_ROCE_WQE_OPCODE_SEND:
- wc->opcode = IB_WC_SEND;
- break;
- case HNS_ROCE_WQE_OPCODE_RDMA_READ:
- wc->opcode = IB_WC_RDMA_READ;
- wc->byte_len = le32_to_cpu(cqe->byte_cnt);
- break;
- case HNS_ROCE_WQE_OPCODE_RDMA_WRITE:
- wc->opcode = IB_WC_RDMA_WRITE;
- break;
- case HNS_ROCE_WQE_OPCODE_LOCAL_INV:
- wc->opcode = IB_WC_LOCAL_INV;
- break;
- case HNS_ROCE_WQE_OPCODE_UD_SEND:
- wc->opcode = IB_WC_SEND;
- break;
- default:
- wc->status = IB_WC_GENERAL_ERR;
- break;
- }
- wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ?
- IB_WC_WITH_IMM : 0);
-
- wq = &(*cur_qp)->sq;
- if ((*cur_qp)->sq_signal_bits) {
- /*
- * If sg_signal_bit is 1,
- * firstly tail pointer updated to wqe
- * which current cqe correspond to
- */
- wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
- CQE_BYTE_4_WQE_INDEX_M,
- CQE_BYTE_4_WQE_INDEX_S);
- wq->tail += (wqe_ctr - (u16)wq->tail) &
- (wq->wqe_cnt - 1);
- }
- wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
- ++wq->tail;
- } else {
- /* RQ conrespond to CQE */
- wc->byte_len = le32_to_cpu(cqe->byte_cnt);
- opcode = roce_get_field(cqe->cqe_byte_4,
- CQE_BYTE_4_OPERATION_TYPE_M,
- CQE_BYTE_4_OPERATION_TYPE_S) &
- HNS_ROCE_CQE_OPCODE_MASK;
- switch (opcode) {
- case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
- wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
- wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data =
- cpu_to_be32(le32_to_cpu(cqe->immediate_data));
- break;
- case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
- if (roce_get_bit(cqe->cqe_byte_4,
- CQE_BYTE_4_IMM_INDICATOR_S)) {
- wc->opcode = IB_WC_RECV;
- wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = cpu_to_be32(
- le32_to_cpu(cqe->immediate_data));
- } else {
- wc->opcode = IB_WC_RECV;
- wc->wc_flags = 0;
- }
- break;
- default:
- wc->status = IB_WC_GENERAL_ERR;
- break;
- }
-
- /* Update tail pointer, record wr_id */
- wq = &(*cur_qp)->rq;
- wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
- ++wq->tail;
- wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M,
- CQE_BYTE_20_SL_S);
- wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20,
- CQE_BYTE_20_REMOTE_QPN_M,
- CQE_BYTE_20_REMOTE_QPN_S);
- wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20,
- CQE_BYTE_20_GRH_PRESENT_S) ?
- IB_WC_GRH : 0);
- wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28,
- CQE_BYTE_28_P_KEY_IDX_M,
- CQE_BYTE_28_P_KEY_IDX_S);
- }
-
- return 0;
-}
-
-int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
-{
- struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
- struct hns_roce_qp *cur_qp = NULL;
- unsigned long flags;
- int npolled;
- int ret;
-
- spin_lock_irqsave(&hr_cq->lock, flags);
-
- for (npolled = 0; npolled < num_entries; ++npolled) {
- ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled);
- if (ret)
- break;
- }
-
- if (npolled) {
- *hr_cq->tptr_addr = hr_cq->cons_index &
- ((hr_cq->cq_depth << 1) - 1);
-
- hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
- }
-
- spin_unlock_irqrestore(&hr_cq->lock, flags);
-
- if (ret == 0 || ret == -EAGAIN)
- return npolled;
- else
- return ret;
-}
-
-static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table, int obj,
- int step_idx)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct device *dev = &hr_dev->pdev->dev;
- long end = HW_SYNC_TIMEOUT_MSECS;
- __le32 bt_cmd_val[2] = {0};
- unsigned long flags = 0;
- void __iomem *bt_cmd;
- u64 bt_ba = 0;
-
- switch (table->type) {
- case HEM_TYPE_QPC:
- bt_ba = priv->bt_table.qpc_buf.map >> 12;
- break;
- case HEM_TYPE_MTPT:
- bt_ba = priv->bt_table.mtpt_buf.map >> 12;
- break;
- case HEM_TYPE_CQC:
- bt_ba = priv->bt_table.cqc_buf.map >> 12;
- break;
- case HEM_TYPE_SRQC:
- dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
- return -EINVAL;
- default:
- return 0;
- }
- roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type);
- roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
- roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
- roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
-
- spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
-
- bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
-
- while (1) {
- if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
- if (!end) {
- dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
- spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
- flags);
- return -EBUSY;
- }
- } else {
- break;
- }
- mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
- end -= HW_SYNC_SLEEP_TIME_INTERVAL;
- }
-
- bt_cmd_val[0] = cpu_to_le32(bt_ba);
- roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
- hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
-
- spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
-
- return 0;
-}
-
-static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
- enum hns_roce_qp_state cur_state,
- enum hns_roce_qp_state new_state,
- struct hns_roce_qp_context *context,
- struct hns_roce_qp *hr_qp)
-{
- static const u16
- op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = {
- [HNS_ROCE_QP_STATE_RST] = {
- [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
- [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
- [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
- },
- [HNS_ROCE_QP_STATE_INIT] = {
- [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
- [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
- /* Note: In v1 engine, HW doesn't support RST2INIT.
- * We use RST2INIT cmd instead of INIT2INIT.
- */
- [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
- [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP,
- },
- [HNS_ROCE_QP_STATE_RTR] = {
- [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
- [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
- [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP,
- },
- [HNS_ROCE_QP_STATE_RTS] = {
- [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
- [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
- [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP,
- [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP,
- },
- [HNS_ROCE_QP_STATE_SQD] = {
- [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
- [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
- [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP,
- [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP,
- },
- [HNS_ROCE_QP_STATE_ERR] = {
- [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
- [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
- }
- };
-
- struct hns_roce_cmd_mailbox *mailbox;
- struct device *dev = &hr_dev->pdev->dev;
- int ret;
-
- if (cur_state >= HNS_ROCE_QP_NUM_STATE ||
- new_state >= HNS_ROCE_QP_NUM_STATE ||
- !op[cur_state][new_state]) {
- dev_err(dev, "[modify_qp]not support state %d to %d\n",
- cur_state, new_state);
- return -EINVAL;
- }
-
- if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
- return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
- HNS_ROCE_CMD_2RST_QP,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
-
- if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
- return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
- HNS_ROCE_CMD_2ERR_QP,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
-
- mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
-
- memcpy(mailbox->buf, context, sizeof(*context));
-
- ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
- op[cur_state][new_state],
- HNS_ROCE_CMD_TIMEOUT_MSECS);
-
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
- return ret;
-}
-
-static int find_wqe_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
- u64 *sq_ba, u64 *rq_ba, dma_addr_t *bt_ba)
-{
- struct ib_device *ibdev = &hr_dev->ib_dev;
- int count;
-
- count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, sq_ba, 1, bt_ba);
- if (count < 1) {
- ibdev_err(ibdev, "Failed to find SQ ba\n");
- return -ENOBUFS;
- }
-
- count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, rq_ba,
- 1, NULL);
- if (!count) {
- ibdev_err(ibdev, "Failed to find RQ ba\n");
- return -ENOBUFS;
- }
-
- return 0;
-}
-
-static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
- int attr_mask, enum ib_qp_state cur_state,
- enum ib_qp_state new_state)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct hns_roce_sqp_context *context;
- dma_addr_t dma_handle = 0;
- u32 __iomem *addr;
- u64 sq_ba = 0;
- u64 rq_ba = 0;
- __le32 tmp;
- u32 reg_val;
-
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return -ENOMEM;
-
- /* Search QP buf's MTTs */
- if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle))
- goto out;
-
- if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
- roce_set_field(context->qp1c_bytes_4,
- QP1C_BYTES_4_SQ_WQE_SHIFT_M,
- QP1C_BYTES_4_SQ_WQE_SHIFT_S,
- ilog2((unsigned int)hr_qp->sq.wqe_cnt));
- roce_set_field(context->qp1c_bytes_4,
- QP1C_BYTES_4_RQ_WQE_SHIFT_M,
- QP1C_BYTES_4_RQ_WQE_SHIFT_S,
- ilog2((unsigned int)hr_qp->rq.wqe_cnt));
- roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
- QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
-
- context->sq_rq_bt_l = cpu_to_le32(dma_handle);
- roce_set_field(context->qp1c_bytes_12,
- QP1C_BYTES_12_SQ_RQ_BT_H_M,
- QP1C_BYTES_12_SQ_RQ_BT_H_S,
- upper_32_bits(dma_handle));
-
- roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
- QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
- roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
- QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
- roce_set_bit(context->qp1c_bytes_16,
- QP1C_BYTES_16_SIGNALING_TYPE_S,
- hr_qp->sq_signal_bits);
- roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
- 1);
- roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
- 1);
- roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S,
- 0);
-
- roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M,
- QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head);
- roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M,
- QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
-
- context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba);
-
- roce_set_field(context->qp1c_bytes_28,
- QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
- QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S,
- upper_32_bits(rq_ba));
- roce_set_field(context->qp1c_bytes_28,
- QP1C_BYTES_28_RQ_CUR_IDX_M,
- QP1C_BYTES_28_RQ_CUR_IDX_S, 0);
-
- roce_set_field(context->qp1c_bytes_32,
- QP1C_BYTES_32_RX_CQ_NUM_M,
- QP1C_BYTES_32_RX_CQ_NUM_S,
- to_hr_cq(ibqp->recv_cq)->cqn);
- roce_set_field(context->qp1c_bytes_32,
- QP1C_BYTES_32_TX_CQ_NUM_M,
- QP1C_BYTES_32_TX_CQ_NUM_S,
- to_hr_cq(ibqp->send_cq)->cqn);
-
- context->cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
-
- roce_set_field(context->qp1c_bytes_40,
- QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
- QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S,
- upper_32_bits(sq_ba));
- roce_set_field(context->qp1c_bytes_40,
- QP1C_BYTES_40_SQ_CUR_IDX_M,
- QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
-
- /* Copy context to QP1C register */
- addr = (u32 __iomem *)(hr_dev->reg_base +
- ROCEE_QP1C_CFG0_0_REG +
- hr_qp->phy_port * sizeof(*context));
-
- writel(le32_to_cpu(context->qp1c_bytes_4), addr);
- writel(le32_to_cpu(context->sq_rq_bt_l), addr + 1);
- writel(le32_to_cpu(context->qp1c_bytes_12), addr + 2);
- writel(le32_to_cpu(context->qp1c_bytes_16), addr + 3);
- writel(le32_to_cpu(context->qp1c_bytes_20), addr + 4);
- writel(le32_to_cpu(context->cur_rq_wqe_ba_l), addr + 5);
- writel(le32_to_cpu(context->qp1c_bytes_28), addr + 6);
- writel(le32_to_cpu(context->qp1c_bytes_32), addr + 7);
- writel(le32_to_cpu(context->cur_sq_wqe_ba_l), addr + 8);
- writel(le32_to_cpu(context->qp1c_bytes_40), addr + 9);
- }
-
- /* Modify QP1C status */
- reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
- hr_qp->phy_port * sizeof(*context));
- tmp = cpu_to_le32(reg_val);
- roce_set_field(tmp, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
- ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
- reg_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
- hr_qp->phy_port * sizeof(*context), reg_val);
-
- hr_qp->state = new_state;
- if (new_state == IB_QPS_RESET) {
- hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
- ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
- if (ibqp->send_cq != ibqp->recv_cq)
- hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
- hr_qp->qpn, NULL);
-
- hr_qp->rq.head = 0;
- hr_qp->rq.tail = 0;
- hr_qp->sq.head = 0;
- hr_qp->sq.tail = 0;
- }
-
- kfree(context);
- return 0;
-
-out:
- kfree(context);
- return -EINVAL;
-}
-
-static bool check_qp_state(enum ib_qp_state cur_state,
- enum ib_qp_state new_state)
-{
- static const bool sm[][IB_QPS_ERR + 1] = {
- [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
- [IB_QPS_INIT] = true },
- [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
- [IB_QPS_INIT] = true,
- [IB_QPS_RTR] = true,
- [IB_QPS_ERR] = true },
- [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
- [IB_QPS_RTS] = true,
- [IB_QPS_ERR] = true },
- [IB_QPS_RTS] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true },
- [IB_QPS_SQD] = {},
- [IB_QPS_SQE] = {},
- [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
- };
-
- return sm[cur_state][new_state];
-}
-
-static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
- int attr_mask, enum ib_qp_state cur_state,
- enum ib_qp_state new_state)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_qp_context *context;
- const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
- dma_addr_t dma_handle_2 = 0;
- dma_addr_t dma_handle = 0;
- __le32 doorbell[2] = {0};
- u64 *mtts_2 = NULL;
- int ret = -EINVAL;
- const u8 *smac;
- u64 sq_ba = 0;
- u64 rq_ba = 0;
- u32 port;
- u32 port_num;
- u8 *dmac;
-
- if (!check_qp_state(cur_state, new_state)) {
- ibdev_err(ibqp->device,
- "not support QP(%u) status from %d to %d\n",
- ibqp->qp_num, cur_state, new_state);
- return -EINVAL;
- }
-
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return -ENOMEM;
-
- /* Search qp buf's mtts */
- if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle))
- goto out;
-
- /* Search IRRL's mtts */
- mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
- hr_qp->qpn, &dma_handle_2);
- if (mtts_2 == NULL) {
- dev_err(dev, "qp irrl_table find failed\n");
- goto out;
- }
-
- /*
- * Reset to init
- * Mandatory param:
- * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
- * Optional param: NA
- */
- if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
- QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
- to_hr_qp_type(hr_qp->ibqp.qp_type));
-
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
- );
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
- );
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
- QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
- ilog2((unsigned int)hr_qp->sq.wqe_cnt));
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
- QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
- ilog2((unsigned int)hr_qp->rq.wqe_cnt));
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_PD_M,
- QP_CONTEXT_QPC_BYTES_4_PD_S,
- to_hr_pd(ibqp->pd)->pdn);
- hr_qp->access_flags = attr->qp_access_flags;
- roce_set_field(context->qpc_bytes_8,
- QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
- QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
- to_hr_cq(ibqp->send_cq)->cqn);
- roce_set_field(context->qpc_bytes_8,
- QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
- QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
- to_hr_cq(ibqp->recv_cq)->cqn);
-
- if (ibqp->srq)
- roce_set_field(context->qpc_bytes_12,
- QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
- QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
- to_hr_srq(ibqp->srq)->srqn);
-
- roce_set_field(context->qpc_bytes_12,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
- attr->pkey_index);
- hr_qp->pkey_index = attr->pkey_index;
- roce_set_field(context->qpc_bytes_16,
- QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
- QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
- } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
- QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
- to_hr_qp_type(hr_qp->ibqp.qp_type));
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
- if (attr_mask & IB_QP_ACCESS_FLAGS) {
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
- !!(attr->qp_access_flags &
- IB_ACCESS_REMOTE_READ));
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
- !!(attr->qp_access_flags &
- IB_ACCESS_REMOTE_WRITE));
- } else {
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
- !!(hr_qp->access_flags &
- IB_ACCESS_REMOTE_READ));
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
- !!(hr_qp->access_flags &
- IB_ACCESS_REMOTE_WRITE));
- }
-
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
- QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
- ilog2((unsigned int)hr_qp->sq.wqe_cnt));
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
- QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
- ilog2((unsigned int)hr_qp->rq.wqe_cnt));
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_PD_M,
- QP_CONTEXT_QPC_BYTES_4_PD_S,
- to_hr_pd(ibqp->pd)->pdn);
-
- roce_set_field(context->qpc_bytes_8,
- QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
- QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
- to_hr_cq(ibqp->send_cq)->cqn);
- roce_set_field(context->qpc_bytes_8,
- QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
- QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
- to_hr_cq(ibqp->recv_cq)->cqn);
-
- if (ibqp->srq)
- roce_set_field(context->qpc_bytes_12,
- QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
- QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
- to_hr_srq(ibqp->srq)->srqn);
- if (attr_mask & IB_QP_PKEY_INDEX)
- roce_set_field(context->qpc_bytes_12,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
- attr->pkey_index);
- else
- roce_set_field(context->qpc_bytes_12,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
- hr_qp->pkey_index);
-
- roce_set_field(context->qpc_bytes_16,
- QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
- QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
- } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
- if ((attr_mask & IB_QP_ALT_PATH) ||
- (attr_mask & IB_QP_ACCESS_FLAGS) ||
- (attr_mask & IB_QP_PKEY_INDEX) ||
- (attr_mask & IB_QP_QKEY)) {
- dev_err(dev, "INIT2RTR attr_mask error\n");
- goto out;
- }
-
- dmac = (u8 *)attr->ah_attr.roce.dmac;
-
- context->sq_rq_bt_l = cpu_to_le32(dma_handle);
- roce_set_field(context->qpc_bytes_24,
- QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
- QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
- upper_32_bits(dma_handle));
- roce_set_bit(context->qpc_bytes_24,
- QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S,
- 1);
- roce_set_field(context->qpc_bytes_24,
- QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
- QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S,
- attr->min_rnr_timer);
- context->irrl_ba_l = cpu_to_le32((u32)(dma_handle_2));
- roce_set_field(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M,
- QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S,
- ((u32)(dma_handle_2 >> 32)) &
- QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M);
- roce_set_field(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M,
- QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0);
- roce_set_bit(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S,
- 1);
- roce_set_bit(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
- hr_qp->sq_signal_bits);
-
- port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
- hr_qp->port;
- smac = (const u8 *)hr_dev->dev_addr[port];
- /* when dmac equals smac or loop_idc is 1, it should loopback */
- if (ether_addr_equal_unaligned(dmac, smac) ||
- hr_dev->loop_idc == 0x1)
- roce_set_bit(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
-
- roce_set_bit(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
- rdma_ah_get_ah_flags(&attr->ah_attr));
- roce_set_field(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
- QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
- ilog2((unsigned int)attr->max_dest_rd_atomic));
-
- if (attr_mask & IB_QP_DEST_QPN)
- roce_set_field(context->qpc_bytes_36,
- QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
- QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
- attr->dest_qp_num);
-
- /* Configure GID index */
- port_num = rdma_ah_get_port_num(&attr->ah_attr);
- roce_set_field(context->qpc_bytes_36,
- QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
- QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S,
- hns_get_gid_index(hr_dev,
- port_num - 1,
- grh->sgid_index));
-
- memcpy(&(context->dmac_l), dmac, 4);
-
- roce_set_field(context->qpc_bytes_44,
- QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
- QP_CONTEXT_QPC_BYTES_44_DMAC_H_S,
- *((u16 *)(&dmac[4])));
- roce_set_field(context->qpc_bytes_44,
- QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M,
- QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S,
- rdma_ah_get_static_rate(&attr->ah_attr));
- roce_set_field(context->qpc_bytes_44,
- QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
- QP_CONTEXT_QPC_BYTES_44_HOPLMT_S,
- grh->hop_limit);
-
- roce_set_field(context->qpc_bytes_48,
- QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
- QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S,
- grh->flow_label);
- roce_set_field(context->qpc_bytes_48,
- QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
- QP_CONTEXT_QPC_BYTES_48_TCLASS_S,
- grh->traffic_class);
- roce_set_field(context->qpc_bytes_48,
- QP_CONTEXT_QPC_BYTES_48_MTU_M,
- QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu);
-
- memcpy(context->dgid, grh->dgid.raw,
- sizeof(grh->dgid.raw));
-
- dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l,
- roce_get_field(context->qpc_bytes_44,
- QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
- QP_CONTEXT_QPC_BYTES_44_DMAC_H_S));
-
- roce_set_field(context->qpc_bytes_68,
- QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
- QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
- hr_qp->rq.head);
- roce_set_field(context->qpc_bytes_68,
- QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
- QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
-
- context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba);
-
- roce_set_field(context->qpc_bytes_76,
- QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
- QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S,
- upper_32_bits(rq_ba));
- roce_set_field(context->qpc_bytes_76,
- QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M,
- QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0);
-
- context->rx_rnr_time = 0;
-
- roce_set_field(context->qpc_bytes_84,
- QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M,
- QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S,
- attr->rq_psn - 1);
- roce_set_field(context->qpc_bytes_84,
- QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M,
- QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0);
-
- roce_set_field(context->qpc_bytes_88,
- QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
- QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S,
- attr->rq_psn);
- roce_set_bit(context->qpc_bytes_88,
- QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0);
- roce_set_bit(context->qpc_bytes_88,
- QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0);
- roce_set_field(context->qpc_bytes_88,
- QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M,
- QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S,
- 0);
- roce_set_field(context->qpc_bytes_88,
- QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M,
- QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S,
- 0);
-
- context->dma_length = 0;
- context->r_key = 0;
- context->va_l = 0;
- context->va_h = 0;
-
- roce_set_field(context->qpc_bytes_108,
- QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M,
- QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0);
- roce_set_bit(context->qpc_bytes_108,
- QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0);
- roce_set_bit(context->qpc_bytes_108,
- QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0);
-
- roce_set_field(context->qpc_bytes_112,
- QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M,
- QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0);
- roce_set_field(context->qpc_bytes_112,
- QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M,
- QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0);
-
- /* For chip resp ack */
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
- QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
- hr_qp->phy_port);
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_SL_M,
- QP_CONTEXT_QPC_BYTES_156_SL_S,
- rdma_ah_get_sl(&attr->ah_attr));
- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
- } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
- /* If exist optional param, return error */
- if ((attr_mask & IB_QP_ALT_PATH) ||
- (attr_mask & IB_QP_ACCESS_FLAGS) ||
- (attr_mask & IB_QP_QKEY) ||
- (attr_mask & IB_QP_PATH_MIG_STATE) ||
- (attr_mask & IB_QP_CUR_STATE) ||
- (attr_mask & IB_QP_MIN_RNR_TIMER)) {
- dev_err(dev, "RTR2RTS attr_mask error\n");
- goto out;
- }
-
- context->rx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
-
- roce_set_field(context->qpc_bytes_120,
- QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
- QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S,
- upper_32_bits(sq_ba));
-
- roce_set_field(context->qpc_bytes_124,
- QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M,
- QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0);
- roce_set_field(context->qpc_bytes_124,
- QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M,
- QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0);
-
- roce_set_field(context->qpc_bytes_128,
- QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M,
- QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S,
- attr->sq_psn);
- roce_set_bit(context->qpc_bytes_128,
- QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0);
- roce_set_field(context->qpc_bytes_128,
- QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M,
- QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S,
- 0);
- roce_set_bit(context->qpc_bytes_128,
- QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0);
-
- roce_set_field(context->qpc_bytes_132,
- QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M,
- QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0);
- roce_set_field(context->qpc_bytes_132,
- QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M,
- QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0);
-
- roce_set_field(context->qpc_bytes_136,
- QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M,
- QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S,
- attr->sq_psn);
- roce_set_field(context->qpc_bytes_136,
- QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M,
- QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S,
- attr->sq_psn);
-
- roce_set_field(context->qpc_bytes_140,
- QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M,
- QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S,
- (attr->sq_psn >> SQ_PSN_SHIFT));
- roce_set_field(context->qpc_bytes_140,
- QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M,
- QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0);
- roce_set_bit(context->qpc_bytes_140,
- QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
-
- roce_set_field(context->qpc_bytes_148,
- QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
- QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
- roce_set_field(context->qpc_bytes_148,
- QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
- QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
- attr->retry_cnt);
- roce_set_field(context->qpc_bytes_148,
- QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
- QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
- attr->rnr_retry);
- roce_set_field(context->qpc_bytes_148,
- QP_CONTEXT_QPC_BYTES_148_LSN_M,
- QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
-
- context->rnr_retry = 0;
-
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
- QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
- attr->retry_cnt);
- if (attr->timeout < 0x12) {
- dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
- attr->timeout);
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
- 0x12);
- } else {
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
- attr->timeout);
- }
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
- QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
- attr->rnr_retry);
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
- QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
- hr_qp->phy_port);
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_SL_M,
- QP_CONTEXT_QPC_BYTES_156_SL_S,
- rdma_ah_get_sl(&attr->ah_attr));
- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
- QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S,
- ilog2((unsigned int)attr->max_rd_atomic));
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M,
- QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0);
- context->pkt_use_len = 0;
-
- roce_set_field(context->qpc_bytes_164,
- QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
- QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn);
- roce_set_field(context->qpc_bytes_164,
- QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M,
- QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0);
-
- roce_set_field(context->qpc_bytes_168,
- QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M,
- QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S,
- attr->sq_psn);
- roce_set_field(context->qpc_bytes_168,
- QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M,
- QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0);
- roce_set_field(context->qpc_bytes_168,
- QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M,
- QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0);
- roce_set_bit(context->qpc_bytes_168,
- QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0);
- roce_set_bit(context->qpc_bytes_168,
- QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0);
- roce_set_bit(context->qpc_bytes_168,
- QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0);
- context->sge_use_len = 0;
-
- roce_set_field(context->qpc_bytes_176,
- QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M,
- QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0);
- roce_set_field(context->qpc_bytes_176,
- QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M,
- QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S,
- 0);
- roce_set_field(context->qpc_bytes_180,
- QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M,
- QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0);
- roce_set_field(context->qpc_bytes_180,
- QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
- QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
-
- context->tx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
-
- roce_set_field(context->qpc_bytes_188,
- QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
- QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S,
- upper_32_bits(sq_ba));
- roce_set_bit(context->qpc_bytes_188,
- QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0);
- roce_set_field(context->qpc_bytes_188,
- QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
- QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
- 0);
- }
-
- /* Every status migrate must change state */
- roce_set_field(context->qpc_bytes_144,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
-
- /* SW pass context to HW */
- ret = hns_roce_v1_qp_modify(hr_dev, to_hns_roce_state(cur_state),
- to_hns_roce_state(new_state), context,
- hr_qp);
- if (ret) {
- dev_err(dev, "hns_roce_qp_modify failed\n");
- goto out;
- }
-
- /*
- * Use rst2init to instead of init2init with drv,
- * need to hw to flash RQ HEAD by DB again
- */
- if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
- roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
- RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
- roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
- RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
- roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
- RQ_DOORBELL_U32_8_CMD_S, 1);
- roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
-
- if (ibqp->uobject) {
- hr_qp->rq.db_reg = hr_dev->reg_base +
- hr_dev->odb_offset +
- DB_REG_OFFSET * hr_dev->priv_uar.index;
- }
-
- hns_roce_write64_k(doorbell, hr_qp->rq.db_reg);
- }
-
- hr_qp->state = new_state;
-
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
- hr_qp->resp_depth = attr->max_dest_rd_atomic;
- if (attr_mask & IB_QP_PORT) {
- hr_qp->port = attr->port_num - 1;
- hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
- }
-
- if (new_state == IB_QPS_RESET && !ibqp->uobject) {
- hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
- ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
- if (ibqp->send_cq != ibqp->recv_cq)
- hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
- hr_qp->qpn, NULL);
-
- hr_qp->rq.head = 0;
- hr_qp->rq.tail = 0;
- hr_qp->sq.head = 0;
- hr_qp->sq.tail = 0;
- }
-out:
- kfree(context);
- return ret;
-}
-
-static int hns_roce_v1_modify_qp(struct ib_qp *ibqp,
- const struct ib_qp_attr *attr, int attr_mask,
- enum ib_qp_state cur_state,
- enum ib_qp_state new_state)
-{
- if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
- return -EOPNOTSUPP;
-
- if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
- return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state,
- new_state);
- else
- return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state,
- new_state);
-}
-
-static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state)
-{
- switch (state) {
- case HNS_ROCE_QP_STATE_RST:
- return IB_QPS_RESET;
- case HNS_ROCE_QP_STATE_INIT:
- return IB_QPS_INIT;
- case HNS_ROCE_QP_STATE_RTR:
- return IB_QPS_RTR;
- case HNS_ROCE_QP_STATE_RTS:
- return IB_QPS_RTS;
- case HNS_ROCE_QP_STATE_SQD:
- return IB_QPS_SQD;
- case HNS_ROCE_QP_STATE_ERR:
- return IB_QPS_ERR;
- default:
- return IB_QPS_ERR;
- }
-}
-
-static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp,
- struct hns_roce_qp_context *hr_context)
-{
- struct hns_roce_cmd_mailbox *mailbox;
- int ret;
-
- mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
-
- ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
- HNS_ROCE_CMD_QUERY_QP,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
- if (!ret)
- memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
- else
- dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n");
-
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
-
- return ret;
-}
-
-static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
- int qp_attr_mask,
- struct ib_qp_init_attr *qp_init_attr)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct hns_roce_sqp_context context;
- u32 addr;
-
- mutex_lock(&hr_qp->mutex);
-
- if (hr_qp->state == IB_QPS_RESET) {
- qp_attr->qp_state = IB_QPS_RESET;
- goto done;
- }
-
- addr = ROCEE_QP1C_CFG0_0_REG +
- hr_qp->port * sizeof(struct hns_roce_sqp_context);
- context.qp1c_bytes_4 = cpu_to_le32(roce_read(hr_dev, addr));
- context.sq_rq_bt_l = cpu_to_le32(roce_read(hr_dev, addr + 1));
- context.qp1c_bytes_12 = cpu_to_le32(roce_read(hr_dev, addr + 2));
- context.qp1c_bytes_16 = cpu_to_le32(roce_read(hr_dev, addr + 3));
- context.qp1c_bytes_20 = cpu_to_le32(roce_read(hr_dev, addr + 4));
- context.cur_rq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 5));
- context.qp1c_bytes_28 = cpu_to_le32(roce_read(hr_dev, addr + 6));
- context.qp1c_bytes_32 = cpu_to_le32(roce_read(hr_dev, addr + 7));
- context.cur_sq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 8));
- context.qp1c_bytes_40 = cpu_to_le32(roce_read(hr_dev, addr + 9));
-
- hr_qp->state = roce_get_field(context.qp1c_bytes_4,
- QP1C_BYTES_4_QP_STATE_M,
- QP1C_BYTES_4_QP_STATE_S);
- qp_attr->qp_state = hr_qp->state;
- qp_attr->path_mtu = IB_MTU_256;
- qp_attr->path_mig_state = IB_MIG_ARMED;
- qp_attr->qkey = QKEY_VAL;
- qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
- qp_attr->rq_psn = 0;
- qp_attr->sq_psn = 0;
- qp_attr->dest_qp_num = 1;
- qp_attr->qp_access_flags = 6;
-
- qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20,
- QP1C_BYTES_20_PKEY_IDX_M,
- QP1C_BYTES_20_PKEY_IDX_S);
- qp_attr->port_num = hr_qp->port + 1;
- qp_attr->sq_draining = 0;
- qp_attr->max_rd_atomic = 0;
- qp_attr->max_dest_rd_atomic = 0;
- qp_attr->min_rnr_timer = 0;
- qp_attr->timeout = 0;
- qp_attr->retry_cnt = 0;
- qp_attr->rnr_retry = 0;
- qp_attr->alt_timeout = 0;
-
-done:
- qp_attr->cur_qp_state = qp_attr->qp_state;
- qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
- qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
- qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
- qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
- qp_attr->cap.max_inline_data = 0;
- qp_init_attr->cap = qp_attr->cap;
- qp_init_attr->create_flags = 0;
-
- mutex_unlock(&hr_qp->mutex);
-
- return 0;
-}
-
-static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
- int qp_attr_mask,
- struct ib_qp_init_attr *qp_init_attr)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_qp_context *context;
- int tmp_qp_state;
- int ret = 0;
- int state;
-
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return -ENOMEM;
-
- memset(qp_attr, 0, sizeof(*qp_attr));
- memset(qp_init_attr, 0, sizeof(*qp_init_attr));
-
- mutex_lock(&hr_qp->mutex);
-
- if (hr_qp->state == IB_QPS_RESET) {
- qp_attr->qp_state = IB_QPS_RESET;
- goto done;
- }
-
- ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context);
- if (ret) {
- dev_err(dev, "query qpc error\n");
- ret = -EINVAL;
- goto out;
- }
-
- state = roce_get_field(context->qpc_bytes_144,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_S);
- tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state);
- if (tmp_qp_state == -1) {
- dev_err(dev, "to_ib_qp_state error\n");
- ret = -EINVAL;
- goto out;
- }
- hr_qp->state = (u8)tmp_qp_state;
- qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
- qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48,
- QP_CONTEXT_QPC_BYTES_48_MTU_M,
- QP_CONTEXT_QPC_BYTES_48_MTU_S);
- qp_attr->path_mig_state = IB_MIG_ARMED;
- qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
- if (hr_qp->ibqp.qp_type == IB_QPT_UD)
- qp_attr->qkey = QKEY_VAL;
-
- qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88,
- QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
- QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S);
- qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164,
- QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
- QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S);
- qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36,
- QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
- QP_CONTEXT_QPC_BYTES_36_DEST_QP_S);
- qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) |
- ((roce_get_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) |
- ((roce_get_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3);
-
- if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
- struct ib_global_route *grh =
- rdma_ah_retrieve_grh(&qp_attr->ah_attr);
-
- rdma_ah_set_sl(&qp_attr->ah_attr,
- roce_get_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_SL_M,
- QP_CONTEXT_QPC_BYTES_156_SL_S));
- rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
- grh->flow_label =
- roce_get_field(context->qpc_bytes_48,
- QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
- QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S);
- grh->sgid_index =
- roce_get_field(context->qpc_bytes_36,
- QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
- QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S);
- grh->hop_limit =
- roce_get_field(context->qpc_bytes_44,
- QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
- QP_CONTEXT_QPC_BYTES_44_HOPLMT_S);
- grh->traffic_class =
- roce_get_field(context->qpc_bytes_48,
- QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
- QP_CONTEXT_QPC_BYTES_48_TCLASS_S);
-
- memcpy(grh->dgid.raw, context->dgid,
- sizeof(grh->dgid.raw));
- }
-
- qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
- qp_attr->port_num = hr_qp->port + 1;
- qp_attr->sq_draining = 0;
- qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
- QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S);
- qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
- QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S);
- qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24,
- QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
- QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S));
- qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S));
- qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
- QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
- QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
- qp_attr->rnr_retry = (u8)le32_to_cpu(context->rnr_retry);
-
-done:
- qp_attr->cur_qp_state = qp_attr->qp_state;
- qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
- qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
-
- if (!ibqp->uobject) {
- qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
- qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
- } else {
- qp_attr->cap.max_send_wr = 0;
- qp_attr->cap.max_send_sge = 0;
- }
-
- qp_init_attr->cap = qp_attr->cap;
-
-out:
- mutex_unlock(&hr_qp->mutex);
- kfree(context);
- return ret;
-}
-
-static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
- int qp_attr_mask,
- struct ib_qp_init_attr *qp_init_attr)
-{
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
-
- return hr_qp->doorbell_qpn <= 1 ?
- hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) :
- hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
-}
-
-int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct hns_roce_cq *send_cq, *recv_cq;
- int ret;
-
- ret = hns_roce_v1_modify_qp(ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET);
- if (ret)
- return ret;
-
- send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
- recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
-
- hns_roce_lock_cqs(send_cq, recv_cq);
- if (!udata) {
- if (recv_cq)
- __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn,
- (hr_qp->ibqp.srq ?
- to_hr_srq(hr_qp->ibqp.srq) :
- NULL));
-
- if (send_cq && send_cq != recv_cq)
- __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
- }
- hns_roce_qp_remove(hr_dev, hr_qp);
- hns_roce_unlock_cqs(send_cq, recv_cq);
-
- hns_roce_qp_destroy(hr_dev, hr_qp, udata);
-
- return 0;
-}
-
-static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
- struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
- struct device *dev = &hr_dev->pdev->dev;
- u32 cqe_cnt_ori;
- u32 cqe_cnt_cur;
- int wait_time = 0;
-
- /*
- * Before freeing cq buffer, we need to ensure that the outstanding CQE
- * have been written by checking the CQE counter.
- */
- cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
- while (1) {
- if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) &
- HNS_ROCE_CQE_WCMD_EMPTY_BIT)
- break;
-
- cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
- if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT)
- break;
-
- msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS);
- if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) {
- dev_warn(dev, "Destroy cq 0x%lx timeout!\n",
- hr_cq->cqn);
- break;
- }
- wait_time++;
- }
- return 0;
-}
-
-static void set_eq_cons_index_v1(struct hns_roce_eq *eq, u32 req_not)
-{
- roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
- (req_not << eq->log_entries), eq->db_reg);
-}
-
-static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe, int qpn)
-{
- struct device *dev = &hr_dev->pdev->dev;
-
- dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
- switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
- case HNS_ROCE_LWQCE_QPC_ERROR:
- dev_warn(dev, "QP %d, QPC error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_MTU_ERROR:
- dev_warn(dev, "QP %d, MTU error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
- dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
- dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
- dev_warn(dev, "QP %d, WQE shift error\n", qpn);
- break;
- case HNS_ROCE_LWQCE_SL_ERROR:
- dev_warn(dev, "QP %d, SL error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_PORT_ERROR:
- dev_warn(dev, "QP %d, port error.\n", qpn);
- break;
- default:
- break;
- }
-}
-
-static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int qpn)
-{
- struct device *dev = &hr_dev->pdev->dev;
-
- dev_warn(dev, "Local Access Violation Work Queue Error.\n");
- switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
- case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
- dev_warn(dev, "QP %d, R_key violation.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_LENGTH_ERROR:
- dev_warn(dev, "QP %d, length error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_VA_ERROR:
- dev_warn(dev, "QP %d, VA error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_PD_ERROR:
- dev_err(dev, "QP %d, PD error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
- dev_warn(dev, "QP %d, rw acc error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
- dev_warn(dev, "QP %d, key state error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
- dev_warn(dev, "QP %d, MR operation error.\n", qpn);
- break;
- default:
- break;
- }
-}
-
-static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int event_type)
-{
- struct device *dev = &hr_dev->pdev->dev;
- int phy_port;
- int qpn;
-
- qpn = roce_get_field(aeqe->event.queue_event.num,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
- phy_port = roce_get_field(aeqe->event.queue_event.num,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
- if (qpn <= 1)
- qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
-
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
- "QP %d, phy_port %d.\n", qpn, phy_port);
- break;
- case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn);
- break;
- case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn);
- break;
- default:
- break;
- }
-
- hns_roce_qp_event(hr_dev, qpn, event_type);
-}
-
-static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int event_type)
-{
- struct device *dev = &hr_dev->pdev->dev;
- u32 cqn;
-
- cqn = roce_get_field(aeqe->event.queue_event.num,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S);
-
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- dev_warn(dev, "CQ 0x%x access err.\n", cqn);
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- dev_warn(dev, "CQ 0x%x overflow\n", cqn);
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
- dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
- break;
- default:
- break;
- }
-
- hns_roce_cq_event(hr_dev, cqn, event_type);
-}
-
-static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe)
-{
- struct device *dev = &hr_dev->pdev->dev;
-
- switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
- case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
- dev_warn(dev, "SDB overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
- dev_warn(dev, "SDB almost overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
- dev_warn(dev, "SDB almost empty.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
- dev_warn(dev, "ODB overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
- dev_warn(dev, "ODB almost overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
- dev_warn(dev, "SDB almost empty.\n");
- break;
- default:
- break;
- }
-}
-
-static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry)
-{
- unsigned long off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQE_SIZE;
-
- return (struct hns_roce_aeqe *)((u8 *)
- (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
- off % HNS_ROCE_BA_SIZE);
-}
-
-static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq)
-{
- struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index);
-
- return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
- !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
-}
-
-static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_aeqe *aeqe;
- int aeqes_found = 0;
- int event_type;
-
- while ((aeqe = next_aeqe_sw_v1(eq))) {
- /* Make sure we read the AEQ entry after we have checked the
- * ownership bit
- */
- dma_rmb();
-
- dev_dbg(dev, "aeqe = %pK, aeqe->asyn.event_type = 0x%lx\n",
- aeqe,
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
- event_type = roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_PATH_MIG:
- dev_warn(dev, "PATH MIG not supported\n");
- break;
- case HNS_ROCE_EVENT_TYPE_COMM_EST:
- dev_warn(dev, "COMMUNICATION established\n");
- break;
- case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
- dev_warn(dev, "SQ DRAINED not supported\n");
- break;
- case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
- dev_warn(dev, "PATH MIG failed\n");
- break;
- case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type);
- break;
- case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
- case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
- case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
- dev_warn(dev, "SRQ not support!\n");
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
- hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type);
- break;
- case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
- dev_warn(dev, "port change.\n");
- break;
- case HNS_ROCE_EVENT_TYPE_MB:
- hns_roce_cmd_event(hr_dev,
- le16_to_cpu(aeqe->event.cmd.token),
- aeqe->event.cmd.status,
- le64_to_cpu(aeqe->event.cmd.out_param
- ));
- break;
- case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
- hns_roce_v1_db_overflow_handle(hr_dev, aeqe);
- break;
- default:
- dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n",
- event_type, eq->eqn, eq->cons_index);
- break;
- }
-
- eq->cons_index++;
- aeqes_found = 1;
-
- if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1)
- eq->cons_index = 0;
- }
-
- set_eq_cons_index_v1(eq, 0);
-
- return aeqes_found;
-}
-
-static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry)
-{
- unsigned long off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQE_SIZE;
-
- return (struct hns_roce_ceqe *)((u8 *)
- (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
- off % HNS_ROCE_BA_SIZE);
-}
-
-static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq)
-{
- struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index);
-
- return (!!(roce_get_bit(ceqe->comp,
- HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
- (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
-}
-
-static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- struct hns_roce_ceqe *ceqe;
- int ceqes_found = 0;
- u32 cqn;
-
- while ((ceqe = next_ceqe_sw_v1(eq))) {
- /* Make sure we read CEQ entry after we have checked the
- * ownership bit
- */
- dma_rmb();
-
- cqn = roce_get_field(ceqe->comp,
- HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
- HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
- hns_roce_cq_completion(hr_dev, cqn);
-
- ++eq->cons_index;
- ceqes_found = 1;
-
- if (eq->cons_index >
- EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1)
- eq->cons_index = 0;
- }
-
- set_eq_cons_index_v1(eq, 0);
-
- return ceqes_found;
-}
-
-static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr)
-{
- struct hns_roce_eq *eq = eq_ptr;
- struct hns_roce_dev *hr_dev = eq->hr_dev;
- int int_work;
-
- if (eq->type_flag == HNS_ROCE_CEQ)
- /* CEQ irq routine, CEQ is pulse irq, not clear */
- int_work = hns_roce_v1_ceq_int(hr_dev, eq);
- else
- /* AEQ irq routine, AEQ is pulse irq, not clear */
- int_work = hns_roce_v1_aeq_int(hr_dev, eq);
-
- return IRQ_RETVAL(int_work);
-}
-
-static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
-{
- struct hns_roce_dev *hr_dev = dev_id;
- struct device *dev = &hr_dev->pdev->dev;
- int int_work = 0;
- u32 caepaemask_val;
- u32 cealmovf_val;
- u32 caepaest_val;
- u32 aeshift_val;
- u32 ceshift_val;
- u32 cemask_val;
- __le32 tmp;
- int i;
-
- /*
- * Abnormal interrupt:
- * AEQ overflow, ECC multi-bit err, CEQ overflow must clear
- * interrupt, mask irq, clear irq, cancel mask operation
- */
- aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
- tmp = cpu_to_le32(aeshift_val);
-
- /* AEQE overflow */
- if (roce_get_bit(tmp,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
- dev_warn(dev, "AEQ overflow!\n");
-
- /* Set mask */
- caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- tmp = cpu_to_le32(caepaemask_val);
- roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_ENABLE);
- caepaemask_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
-
- /* Clear int state(INT_WC : write 1 clear) */
- caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
- tmp = cpu_to_le32(caepaest_val);
- roce_set_bit(tmp, ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
- caepaest_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
-
- /* Clear mask */
- caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- tmp = cpu_to_le32(caepaemask_val);
- roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_DISABLE);
- caepaemask_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
- }
-
- /* CEQ almost overflow */
- for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
- ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
- i * CEQ_REG_OFFSET);
- tmp = cpu_to_le32(ceshift_val);
-
- if (roce_get_bit(tmp,
- ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
- dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
- int_work++;
-
- /* Set mask */
- cemask_val = roce_read(hr_dev,
- ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET);
- tmp = cpu_to_le32(cemask_val);
- roce_set_bit(tmp,
- ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_ENABLE);
- cemask_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET, cemask_val);
-
- /* Clear int state(INT_WC : write 1 clear) */
- cealmovf_val = roce_read(hr_dev,
- ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
- i * CEQ_REG_OFFSET);
- tmp = cpu_to_le32(cealmovf_val);
- roce_set_bit(tmp,
- ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
- 1);
- cealmovf_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
- i * CEQ_REG_OFFSET, cealmovf_val);
-
- /* Clear mask */
- cemask_val = roce_read(hr_dev,
- ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET);
- tmp = cpu_to_le32(cemask_val);
- roce_set_bit(tmp,
- ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_DISABLE);
- cemask_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET, cemask_val);
- }
- }
-
- /* ECC multi-bit error alarm */
- dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
- roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
- roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
- roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
-
- dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
- roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
- roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
- roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
-
- return IRQ_RETVAL(int_work);
-}
-
-static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev)
-{
- u32 aemask_val;
- int masken = 0;
- __le32 tmp;
- int i;
-
- /* AEQ INT */
- aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- tmp = cpu_to_le32(aemask_val);
- roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
- masken);
- roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
- aemask_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
-
- /* CEQ INT */
- for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
- /* IRQ mask */
- roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET, masken);
- }
-}
-
-static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
- HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
- int i;
-
- if (!eq->buf_list)
- return;
-
- for (i = 0; i < npages; ++i)
- dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
- eq->buf_list[i].buf, eq->buf_list[i].map);
-
- kfree(eq->buf_list);
-}
-
-static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
- int enable_flag)
-{
- void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
- __le32 tmp;
- u32 val;
-
- val = readl(eqc);
- tmp = cpu_to_le32(val);
-
- if (enable_flag)
- roce_set_field(tmp,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
- HNS_ROCE_EQ_STAT_VALID);
- else
- roce_set_field(tmp,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
- HNS_ROCE_EQ_STAT_INVALID);
-
- val = le32_to_cpu(tmp);
- writel(val, eqc);
-}
-
-static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
- struct device *dev = &hr_dev->pdev->dev;
- dma_addr_t tmp_dma_addr;
- u32 eqcuridx_val;
- u32 eqconsindx_val;
- u32 eqshift_val;
- __le32 tmp2 = 0;
- __le32 tmp1 = 0;
- __le32 tmp = 0;
- int num_bas;
- int ret;
- int i;
-
- num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
- HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
-
- if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
- dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
- (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
- num_bas);
- return -EINVAL;
- }
-
- eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
- if (!eq->buf_list)
- return -ENOMEM;
-
- for (i = 0; i < num_bas; ++i) {
- eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
- &tmp_dma_addr,
- GFP_KERNEL);
- if (!eq->buf_list[i].buf) {
- ret = -ENOMEM;
- goto err_out_free_pages;
- }
-
- eq->buf_list[i].map = tmp_dma_addr;
- }
- eq->cons_index = 0;
- roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
- HNS_ROCE_EQ_STAT_INVALID);
- roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
- eq->log_entries);
- eqshift_val = le32_to_cpu(tmp);
- writel(eqshift_val, eqc);
-
- /* Configure eq extended address 12~44bit */
- writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
-
- /*
- * Configure eq extended address 45~49 bit.
- * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
- * using 4K page, and shift more 32 because of
- * calculating the high 32 bit value evaluated to hardware.
- */
- roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
- ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
- eq->buf_list[0].map >> 44);
- roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
- ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
- eqcuridx_val = le32_to_cpu(tmp1);
- writel(eqcuridx_val, eqc + 8);
-
- /* Configure eq consumer index */
- roce_set_field(tmp2, ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
- ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
- eqconsindx_val = le32_to_cpu(tmp2);
- writel(eqconsindx_val, eqc + 0xc);
-
- return 0;
-
-err_out_free_pages:
- for (i -= 1; i >= 0; i--)
- dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
- eq->buf_list[i].map);
-
- kfree(eq->buf_list);
- return ret;
-}
-
-static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_eq *eq;
- int irq_num;
- int eq_num;
- int ret;
- int i, j;
-
- eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
- irq_num = eq_num + hr_dev->caps.num_other_vectors;
-
- eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
- if (!eq_table->eq)
- return -ENOMEM;
-
- eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
- GFP_KERNEL);
- if (!eq_table->eqc_base) {
- ret = -ENOMEM;
- goto err_eqc_base_alloc_fail;
- }
-
- for (i = 0; i < eq_num; i++) {
- eq = &eq_table->eq[i];
- eq->hr_dev = hr_dev;
- eq->eqn = i;
- eq->irq = hr_dev->irq[i];
- eq->log_page_size = PAGE_SHIFT;
-
- if (i < hr_dev->caps.num_comp_vectors) {
- /* CEQ */
- eq_table->eqc_base[i] = hr_dev->reg_base +
- ROCEE_CAEP_CEQC_SHIFT_0_REG +
- CEQ_REG_OFFSET * i;
- eq->type_flag = HNS_ROCE_CEQ;
- eq->db_reg = hr_dev->reg_base +
- ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
- CEQ_REG_OFFSET * i;
- eq->entries = hr_dev->caps.ceqe_depth;
- eq->log_entries = ilog2(eq->entries);
- eq->eqe_size = HNS_ROCE_CEQE_SIZE;
- } else {
- /* AEQ */
- eq_table->eqc_base[i] = hr_dev->reg_base +
- ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
- eq->type_flag = HNS_ROCE_AEQ;
- eq->db_reg = hr_dev->reg_base +
- ROCEE_CAEP_AEQE_CONS_IDX_REG;
- eq->entries = hr_dev->caps.aeqe_depth;
- eq->log_entries = ilog2(eq->entries);
- eq->eqe_size = HNS_ROCE_AEQE_SIZE;
- }
- }
-
- /* Disable irq */
- hns_roce_v1_int_mask_enable(hr_dev);
-
- /* Configure ce int interval */
- roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
- HNS_ROCE_CEQ_DEFAULT_INTERVAL);
-
- /* Configure ce int burst num */
- roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
- HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
-
- for (i = 0; i < eq_num; i++) {
- ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]);
- if (ret) {
- dev_err(dev, "eq create failed\n");
- goto err_create_eq_fail;
- }
- }
-
- for (j = 0; j < irq_num; j++) {
- if (j < eq_num)
- ret = request_irq(hr_dev->irq[j],
- hns_roce_v1_msix_interrupt_eq, 0,
- hr_dev->irq_names[j],
- &eq_table->eq[j]);
- else
- ret = request_irq(hr_dev->irq[j],
- hns_roce_v1_msix_interrupt_abn, 0,
- hr_dev->irq_names[j], hr_dev);
-
- if (ret) {
- dev_err(dev, "request irq error!\n");
- goto err_request_irq_fail;
- }
- }
-
- for (i = 0; i < eq_num; i++)
- hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE);
-
- return 0;
-
-err_request_irq_fail:
- for (j -= 1; j >= 0; j--)
- free_irq(hr_dev->irq[j], &eq_table->eq[j]);
-
-err_create_eq_fail:
- for (i -= 1; i >= 0; i--)
- hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
-
- kfree(eq_table->eqc_base);
-
-err_eqc_base_alloc_fail:
- kfree(eq_table->eq);
-
- return ret;
-}
-
-static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
- int irq_num;
- int eq_num;
- int i;
-
- eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
- irq_num = eq_num + hr_dev->caps.num_other_vectors;
- for (i = 0; i < eq_num; i++) {
- /* Disable EQ */
- hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE);
-
- free_irq(hr_dev->irq[i], &eq_table->eq[i]);
-
- hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
- }
- for (i = eq_num; i < irq_num; i++)
- free_irq(hr_dev->irq[i], hr_dev);
-
- kfree(eq_table->eqc_base);
- kfree(eq_table->eq);
-}
-
-static const struct ib_device_ops hns_roce_v1_dev_ops = {
- .destroy_qp = hns_roce_v1_destroy_qp,
- .poll_cq = hns_roce_v1_poll_cq,
- .post_recv = hns_roce_v1_post_recv,
- .post_send = hns_roce_v1_post_send,
- .query_qp = hns_roce_v1_query_qp,
- .req_notify_cq = hns_roce_v1_req_notify_cq,
-};
-
-static const struct hns_roce_hw hns_roce_hw_v1 = {
- .reset = hns_roce_v1_reset,
- .hw_profile = hns_roce_v1_profile,
- .hw_init = hns_roce_v1_init,
- .hw_exit = hns_roce_v1_exit,
- .post_mbox = hns_roce_v1_post_mbox,
- .poll_mbox_done = hns_roce_v1_chk_mbox,
- .set_gid = hns_roce_v1_set_gid,
- .set_mac = hns_roce_v1_set_mac,
- .set_mtu = hns_roce_v1_set_mtu,
- .write_mtpt = hns_roce_v1_write_mtpt,
- .write_cqc = hns_roce_v1_write_cqc,
- .set_hem = hns_roce_v1_set_hem,
- .clear_hem = hns_roce_v1_clear_hem,
- .modify_qp = hns_roce_v1_modify_qp,
- .dereg_mr = hns_roce_v1_dereg_mr,
- .destroy_cq = hns_roce_v1_destroy_cq,
- .init_eq = hns_roce_v1_init_eq_table,
- .cleanup_eq = hns_roce_v1_cleanup_eq_table,
- .hns_roce_dev_ops = &hns_roce_v1_dev_ops,
-};
-
-static const struct of_device_id hns_roce_of_match[] = {
- { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
- {},
-};
-MODULE_DEVICE_TABLE(of, hns_roce_of_match);
-
-static const struct acpi_device_id hns_roce_acpi_match[] = {
- { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
- {},
-};
-MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
-
-static struct
-platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
-{
- struct device *dev;
-
- /* get the 'device' corresponding to the matching 'fwnode' */
- dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
- /* get the platform device */
- return dev ? to_platform_device(dev) : NULL;
-}
-
-static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
-{
- struct device *dev = &hr_dev->pdev->dev;
- struct platform_device *pdev = NULL;
- struct net_device *netdev = NULL;
- struct device_node *net_node;
- int port_cnt = 0;
- u8 phy_port;
- int ret;
- int i;
-
- /* check if we are compatible with the underlying SoC */
- if (dev_of_node(dev)) {
- const struct of_device_id *of_id;
-
- of_id = of_match_node(hns_roce_of_match, dev->of_node);
- if (!of_id) {
- dev_err(dev, "device is not compatible!\n");
- return -ENXIO;
- }
- hr_dev->hw = (const struct hns_roce_hw *)of_id->data;
- if (!hr_dev->hw) {
- dev_err(dev, "couldn't get H/W specific DT data!\n");
- return -ENXIO;
- }
- } else if (is_acpi_device_node(dev->fwnode)) {
- const struct acpi_device_id *acpi_id;
-
- acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
- if (!acpi_id) {
- dev_err(dev, "device is not compatible!\n");
- return -ENXIO;
- }
- hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data;
- if (!hr_dev->hw) {
- dev_err(dev, "couldn't get H/W specific ACPI data!\n");
- return -ENXIO;
- }
- } else {
- dev_err(dev, "can't read compatibility data from DT or ACPI\n");
- return -ENXIO;
- }
-
- /* get the mapped register base address */
- hr_dev->reg_base = devm_platform_ioremap_resource(hr_dev->pdev, 0);
- if (IS_ERR(hr_dev->reg_base))
- return PTR_ERR(hr_dev->reg_base);
-
- /* read the node_guid of IB device from the DT or ACPI */
- ret = device_property_read_u8_array(dev, "node-guid",
- (u8 *)&hr_dev->ib_dev.node_guid,
- GUID_LEN);
- if (ret) {
- dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
- return ret;
- }
-
- /* get the RoCE associated ethernet ports or netdevices */
- for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
- if (dev_of_node(dev)) {
- net_node = of_parse_phandle(dev->of_node, "eth-handle",
- i);
- if (!net_node)
- continue;
- pdev = of_find_device_by_node(net_node);
- } else if (is_acpi_device_node(dev->fwnode)) {
- struct fwnode_reference_args args;
-
- ret = acpi_node_get_property_reference(dev->fwnode,
- "eth-handle",
- i, &args);
- if (ret)
- continue;
- pdev = hns_roce_find_pdev(args.fwnode);
- } else {
- dev_err(dev, "cannot read data from DT or ACPI\n");
- return -ENXIO;
- }
-
- if (pdev) {
- netdev = platform_get_drvdata(pdev);
- phy_port = (u8)i;
- if (netdev) {
- hr_dev->iboe.netdevs[port_cnt] = netdev;
- hr_dev->iboe.phy_port[port_cnt] = phy_port;
- } else {
- dev_err(dev, "no netdev found with pdev %s\n",
- pdev->name);
- return -ENODEV;
- }
- port_cnt++;
- }
- }
-
- if (port_cnt == 0) {
- dev_err(dev, "unable to get eth-handle for available ports!\n");
- return -EINVAL;
- }
-
- hr_dev->caps.num_ports = port_cnt;
-
- /* cmd issue mode: 0 is poll, 1 is event */
- hr_dev->cmd_mod = 1;
- hr_dev->loop_idc = 0;
- hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
- hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG;
-
- /* read the interrupt names from the DT or ACPI */
- ret = device_property_read_string_array(dev, "interrupt-names",
- hr_dev->irq_names,
- HNS_ROCE_V1_MAX_IRQ_NUM);
- if (ret < 0) {
- dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
- return ret;
- }
-
- /* fetch the interrupt numbers */
- for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) {
- hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
- if (hr_dev->irq[i] <= 0)
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * hns_roce_probe - RoCE driver entrance
- * @pdev: pointer to platform device
- * Return : int
- *
- */
-static int hns_roce_probe(struct platform_device *pdev)
-{
- int ret;
- struct hns_roce_dev *hr_dev;
- struct device *dev = &pdev->dev;
-
- hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
- if (!hr_dev)
- return -ENOMEM;
-
- hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL);
- if (!hr_dev->priv) {
- ret = -ENOMEM;
- goto error_failed_kzalloc;
- }
-
- hr_dev->pdev = pdev;
- hr_dev->dev = dev;
- platform_set_drvdata(pdev, hr_dev);
-
- if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
- dev_err(dev, "Not usable DMA addressing mode\n");
- ret = -EIO;
- goto error_failed_get_cfg;
- }
-
- ret = hns_roce_get_cfg(hr_dev);
- if (ret) {
- dev_err(dev, "Get Configuration failed!\n");
- goto error_failed_get_cfg;
- }
-
- ret = hns_roce_init(hr_dev);
- if (ret) {
- dev_err(dev, "RoCE engine init failed!\n");
- goto error_failed_get_cfg;
- }
-
- return 0;
-
-error_failed_get_cfg:
- kfree(hr_dev->priv);
-
-error_failed_kzalloc:
- ib_dealloc_device(&hr_dev->ib_dev);
-
- return ret;
-}
-
-/**
- * hns_roce_remove - remove RoCE device
- * @pdev: pointer to platform device
- */
-static int hns_roce_remove(struct platform_device *pdev)
-{
- struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
-
- hns_roce_exit(hr_dev);
- kfree(hr_dev->priv);
- ib_dealloc_device(&hr_dev->ib_dev);
-
- return 0;
-}
-
-static struct platform_driver hns_roce_driver = {
- .probe = hns_roce_probe,
- .remove = hns_roce_remove,
- .driver = {
- .name = DRV_NAME,
- .of_match_table = hns_roce_of_match,
- .acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
- },
-};
-
-module_platform_driver(hns_roce_driver);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
-MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
-MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
-MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver");
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
deleted file mode 100644
index 60fdcbae6729..000000000000
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ /dev/null
@@ -1,1147 +0,0 @@
-/*
- * Copyright (c) 2016 Hisilicon Limited.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _HNS_ROCE_HW_V1_H
-#define _HNS_ROCE_HW_V1_H
-
-#define CQ_STATE_VALID 2
-
-#define HNS_ROCE_V1_MAX_PD_NUM 0x8000
-#define HNS_ROCE_V1_MAX_CQ_NUM 0x10000
-#define HNS_ROCE_V1_MAX_CQE_NUM 0x8000
-
-#define HNS_ROCE_V1_MAX_QP_NUM 0x40000
-#define HNS_ROCE_V1_MAX_WQE_NUM 0x4000
-
-#define HNS_ROCE_V1_MAX_MTPT_NUM 0x80000
-
-#define HNS_ROCE_V1_MAX_MTT_SEGS 0x100000
-
-#define HNS_ROCE_V1_MAX_QP_INIT_RDMA 128
-#define HNS_ROCE_V1_MAX_QP_DEST_RDMA 128
-
-#define HNS_ROCE_V1_MAX_SQ_DESC_SZ 64
-#define HNS_ROCE_V1_MAX_RQ_DESC_SZ 64
-#define HNS_ROCE_V1_SG_NUM 2
-#define HNS_ROCE_V1_INLINE_SIZE 32
-
-#define HNS_ROCE_V1_UAR_NUM 256
-#define HNS_ROCE_V1_PHY_UAR_NUM 8
-
-#define HNS_ROCE_V1_GID_NUM 16
-#define HNS_ROCE_V1_RESV_QP 8
-
-#define HNS_ROCE_V1_MAX_IRQ_NUM 34
-#define HNS_ROCE_V1_COMP_VEC_NUM 32
-#define HNS_ROCE_V1_AEQE_VEC_NUM 1
-#define HNS_ROCE_V1_ABNORMAL_VEC_NUM 1
-
-#define HNS_ROCE_V1_COMP_EQE_NUM 0x8000
-#define HNS_ROCE_V1_ASYNC_EQE_NUM 0x400
-
-#define HNS_ROCE_V1_QPC_SIZE 256
-#define HNS_ROCE_V1_IRRL_ENTRY_SIZE 8
-#define HNS_ROCE_V1_CQC_ENTRY_SIZE 64
-#define HNS_ROCE_V1_MTPT_ENTRY_SIZE 64
-#define HNS_ROCE_V1_MTT_ENTRY_SIZE 64
-
-#define HNS_ROCE_V1_CQE_SIZE 32
-#define HNS_ROCE_V1_PAGE_SIZE_SUPPORT 0xFFFFF000
-
-#define HNS_ROCE_V1_TABLE_CHUNK_SIZE (1 << 17)
-
-#define HNS_ROCE_V1_EXT_RAQ_WF 8
-#define HNS_ROCE_V1_RAQ_ENTRY 64
-#define HNS_ROCE_V1_RAQ_DEPTH 32768
-#define HNS_ROCE_V1_RAQ_SIZE (HNS_ROCE_V1_RAQ_ENTRY * HNS_ROCE_V1_RAQ_DEPTH)
-
-#define HNS_ROCE_V1_SDB_DEPTH 0x400
-#define HNS_ROCE_V1_ODB_DEPTH 0x400
-
-#define HNS_ROCE_V1_DB_RSVD 0x80
-
-#define HNS_ROCE_V1_SDB_ALEPT HNS_ROCE_V1_DB_RSVD
-#define HNS_ROCE_V1_SDB_ALFUL (HNS_ROCE_V1_SDB_DEPTH - HNS_ROCE_V1_DB_RSVD)
-#define HNS_ROCE_V1_ODB_ALEPT HNS_ROCE_V1_DB_RSVD
-#define HNS_ROCE_V1_ODB_ALFUL (HNS_ROCE_V1_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
-
-#define HNS_ROCE_V1_EXT_SDB_DEPTH 0x4000
-#define HNS_ROCE_V1_EXT_ODB_DEPTH 0x4000
-#define HNS_ROCE_V1_EXT_SDB_ENTRY 16
-#define HNS_ROCE_V1_EXT_ODB_ENTRY 16
-#define HNS_ROCE_V1_EXT_SDB_SIZE \
- (HNS_ROCE_V1_EXT_SDB_DEPTH * HNS_ROCE_V1_EXT_SDB_ENTRY)
-#define HNS_ROCE_V1_EXT_ODB_SIZE \
- (HNS_ROCE_V1_EXT_ODB_DEPTH * HNS_ROCE_V1_EXT_ODB_ENTRY)
-
-#define HNS_ROCE_V1_EXT_SDB_ALEPT HNS_ROCE_V1_DB_RSVD
-#define HNS_ROCE_V1_EXT_SDB_ALFUL \
- (HNS_ROCE_V1_EXT_SDB_DEPTH - HNS_ROCE_V1_DB_RSVD)
-#define HNS_ROCE_V1_EXT_ODB_ALEPT HNS_ROCE_V1_DB_RSVD
-#define HNS_ROCE_V1_EXT_ODB_ALFUL \
- (HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
-
-#define HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS 50000
-#define HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS 10000
-#define HNS_ROCE_V1_FREE_MR_WAIT_VALUE 5
-#define HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE 20
-
-#define HNS_ROCE_BT_RSV_BUF_SIZE (1 << 17)
-
-#define HNS_ROCE_V1_TPTR_ENTRY_SIZE 2
-#define HNS_ROCE_V1_TPTR_BUF_SIZE \
- (HNS_ROCE_V1_TPTR_ENTRY_SIZE * HNS_ROCE_V1_MAX_CQ_NUM)
-
-#define HNS_ROCE_ODB_POLL_MODE 0
-
-#define HNS_ROCE_SDB_NORMAL_MODE 0
-#define HNS_ROCE_SDB_EXTEND_MODE 1
-
-#define HNS_ROCE_ODB_EXTEND_MODE 1
-
-#define KEY_VALID 0x02
-
-#define HNS_ROCE_CQE_QPN_MASK 0x3ffff
-#define HNS_ROCE_CQE_STATUS_MASK 0x1f
-#define HNS_ROCE_CQE_OPCODE_MASK 0xf
-
-#define HNS_ROCE_CQE_SUCCESS 0x00
-#define HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR 0x01
-#define HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR 0x02
-#define HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR 0x03
-#define HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR 0x04
-#define HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR 0x05
-#define HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR 0x06
-#define HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR 0x07
-#define HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR 0x08
-#define HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR 0x09
-#define HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR 0x0a
-#define HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR 0x0b
-#define HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR 0x0c
-
-#define QP1C_CFGN_OFFSET 0x28
-#define PHY_PORT_OFFSET 0x8
-#define MTPT_IDX_SHIFT 16
-#define ALL_PORT_VAL_OPEN 0x3f
-#define POL_TIME_INTERVAL_VAL 0x80
-#define SLEEP_TIME_INTERVAL 20
-#define SQ_PSN_SHIFT 8
-#define QKEY_VAL 0x80010000
-#define SDB_INV_CNT_OFFSET 8
-
-#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x10
-#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x10
-
-#define HNS_ROCE_INT_MASK_DISABLE 0
-#define HNS_ROCE_INT_MASK_ENABLE 1
-
-#define CEQ_REG_OFFSET 0x18
-
-#define HNS_ROCE_CEQE_CEQE_COMP_OWNER_S 0
-
-#define HNS_ROCE_V1_CONS_IDX_M GENMASK(15, 0)
-
-#define HNS_ROCE_CEQE_CEQE_COMP_CQN_S 16
-#define HNS_ROCE_CEQE_CEQE_COMP_CQN_M GENMASK(31, 16)
-
-#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S 16
-#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M GENMASK(23, 16)
-
-#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S 24
-#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M GENMASK(30, 24)
-
-#define HNS_ROCE_AEQE_U32_4_OWNER_S 31
-
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S 0
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M GENMASK(23, 0)
-
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M GENMASK(27, 25)
-
-#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0
-#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M GENMASK(15, 0)
-
-#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S 0
-#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M GENMASK(4, 0)
-
-/* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */
-enum {
- HNS_ROCE_LWQCE_QPC_ERROR = 1,
- HNS_ROCE_LWQCE_MTU_ERROR,
- HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR,
- HNS_ROCE_LWQCE_WQE_ADDR_ERROR,
- HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR,
- HNS_ROCE_LWQCE_SL_ERROR,
- HNS_ROCE_LWQCE_PORT_ERROR,
-};
-
-/* Local Access Violation Work Queue Error,SUBTYPE 0x7 */
-enum {
- HNS_ROCE_LAVWQE_R_KEY_VIOLATION = 1,
- HNS_ROCE_LAVWQE_LENGTH_ERROR,
- HNS_ROCE_LAVWQE_VA_ERROR,
- HNS_ROCE_LAVWQE_PD_ERROR,
- HNS_ROCE_LAVWQE_RW_ACC_ERROR,
- HNS_ROCE_LAVWQE_KEY_STATE_ERROR,
- HNS_ROCE_LAVWQE_MR_OPERATION_ERROR,
-};
-
-/* DOORBELL overflow subtype */
-enum {
- HNS_ROCE_DB_SUBTYPE_SDB_OVF = 1,
- HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF,
- HNS_ROCE_DB_SUBTYPE_ODB_OVF,
- HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF,
- HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP,
- HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP,
-};
-
-enum {
- /* RQ&SRQ related operations */
- HNS_ROCE_OPCODE_SEND_DATA_RECEIVE = 0x06,
- HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE,
-};
-
-enum {
- HNS_ROCE_PORT_DOWN = 0,
- HNS_ROCE_PORT_UP,
-};
-
-struct hns_roce_cq_context {
- __le32 cqc_byte_4;
- __le32 cq_bt_l;
- __le32 cqc_byte_12;
- __le32 cur_cqe_ba0_l;
- __le32 cqc_byte_20;
- __le32 cqe_tptr_addr_l;
- __le32 cur_cqe_ba1_l;
- __le32 cqc_byte_32;
-};
-
-#define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S 0
-#define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M \
- (((1UL << 2) - 1) << CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S)
-
-#define CQ_CONTEXT_CQC_BYTE_4_CQN_S 16
-#define CQ_CONTEXT_CQC_BYTE_4_CQN_M \
- (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_4_CQN_S)
-
-#define CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S 0
-#define CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M \
- (((1UL << 17) - 1) << CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S)
-
-#define CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S 20
-#define CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M \
- (((1UL << 4) - 1) << CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S)
-
-#define CQ_CONTEXT_CQC_BYTE_12_CEQN_S 24
-#define CQ_CONTEXT_CQC_BYTE_12_CEQN_M \
- (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_12_CEQN_S)
-
-#define CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S 0
-#define CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M \
- (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S)
-
-#define CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S 16
-#define CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M \
- (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S)
-
-#define CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S 8
-#define CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M \
- (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S)
-
-#define CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S 0
-#define CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M \
- (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S)
-
-#define CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S 9
-
-#define CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S 8
-#define CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S 14
-#define CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S 15
-
-#define CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S 16
-#define CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M \
- (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S)
-
-struct hns_roce_cqe {
- __le32 cqe_byte_4;
- union {
- __le32 r_key;
- __le32 immediate_data;
- };
- __le32 byte_cnt;
- __le32 cqe_byte_16;
- __le32 cqe_byte_20;
- __le32 s_mac_l;
- __le32 cqe_byte_28;
- __le32 reserved;
-};
-
-#define CQE_BYTE_4_OWNER_S 7
-#define CQE_BYTE_4_SQ_RQ_FLAG_S 14
-
-#define CQE_BYTE_4_STATUS_OF_THE_OPERATION_S 8
-#define CQE_BYTE_4_STATUS_OF_THE_OPERATION_M \
- (((1UL << 5) - 1) << CQE_BYTE_4_STATUS_OF_THE_OPERATION_S)
-
-#define CQE_BYTE_4_WQE_INDEX_S 16
-#define CQE_BYTE_4_WQE_INDEX_M (((1UL << 14) - 1) << CQE_BYTE_4_WQE_INDEX_S)
-
-#define CQE_BYTE_4_OPERATION_TYPE_S 0
-#define CQE_BYTE_4_OPERATION_TYPE_M \
- (((1UL << 4) - 1) << CQE_BYTE_4_OPERATION_TYPE_S)
-
-#define CQE_BYTE_4_IMM_INDICATOR_S 15
-
-#define CQE_BYTE_16_LOCAL_QPN_S 0
-#define CQE_BYTE_16_LOCAL_QPN_M (((1UL << 24) - 1) << CQE_BYTE_16_LOCAL_QPN_S)
-
-#define CQE_BYTE_20_PORT_NUM_S 26
-#define CQE_BYTE_20_PORT_NUM_M (((1UL << 3) - 1) << CQE_BYTE_20_PORT_NUM_S)
-
-#define CQE_BYTE_20_SL_S 24
-#define CQE_BYTE_20_SL_M (((1UL << 2) - 1) << CQE_BYTE_20_SL_S)
-
-#define CQE_BYTE_20_REMOTE_QPN_S 0
-#define CQE_BYTE_20_REMOTE_QPN_M \
- (((1UL << 24) - 1) << CQE_BYTE_20_REMOTE_QPN_S)
-
-#define CQE_BYTE_20_GRH_PRESENT_S 29
-
-#define CQE_BYTE_28_P_KEY_IDX_S 16
-#define CQE_BYTE_28_P_KEY_IDX_M (((1UL << 16) - 1) << CQE_BYTE_28_P_KEY_IDX_S)
-
-#define CQ_DB_REQ_NOT_SOL 0
-#define CQ_DB_REQ_NOT (1 << 16)
-
-struct hns_roce_v1_mpt_entry {
- __le32 mpt_byte_4;
- __le32 pbl_addr_l;
- __le32 mpt_byte_12;
- __le32 virt_addr_l;
- __le32 virt_addr_h;
- __le32 length;
- __le32 mpt_byte_28;
- __le32 pa0_l;
- __le32 mpt_byte_36;
- __le32 mpt_byte_40;
- __le32 mpt_byte_44;
- __le32 mpt_byte_48;
- __le32 pa4_l;
- __le32 mpt_byte_56;
- __le32 mpt_byte_60;
- __le32 mpt_byte_64;
-};
-
-#define MPT_BYTE_4_KEY_STATE_S 0
-#define MPT_BYTE_4_KEY_STATE_M (((1UL << 2) - 1) << MPT_BYTE_4_KEY_STATE_S)
-
-#define MPT_BYTE_4_KEY_S 8
-#define MPT_BYTE_4_KEY_M (((1UL << 8) - 1) << MPT_BYTE_4_KEY_S)
-
-#define MPT_BYTE_4_PAGE_SIZE_S 16
-#define MPT_BYTE_4_PAGE_SIZE_M (((1UL << 2) - 1) << MPT_BYTE_4_PAGE_SIZE_S)
-
-#define MPT_BYTE_4_MW_TYPE_S 20
-
-#define MPT_BYTE_4_MW_BIND_ENABLE_S 21
-
-#define MPT_BYTE_4_OWN_S 22
-
-#define MPT_BYTE_4_MEMORY_LOCATION_TYPE_S 24
-#define MPT_BYTE_4_MEMORY_LOCATION_TYPE_M \
- (((1UL << 2) - 1) << MPT_BYTE_4_MEMORY_LOCATION_TYPE_S)
-
-#define MPT_BYTE_4_REMOTE_ATOMIC_S 26
-#define MPT_BYTE_4_LOCAL_WRITE_S 27
-#define MPT_BYTE_4_REMOTE_WRITE_S 28
-#define MPT_BYTE_4_REMOTE_READ_S 29
-#define MPT_BYTE_4_REMOTE_INVAL_ENABLE_S 30
-#define MPT_BYTE_4_ADDRESS_TYPE_S 31
-
-#define MPT_BYTE_12_PBL_ADDR_H_S 0
-#define MPT_BYTE_12_PBL_ADDR_H_M \
- (((1UL << 17) - 1) << MPT_BYTE_12_PBL_ADDR_H_S)
-
-#define MPT_BYTE_12_MW_BIND_COUNTER_S 17
-#define MPT_BYTE_12_MW_BIND_COUNTER_M \
- (((1UL << 15) - 1) << MPT_BYTE_12_MW_BIND_COUNTER_S)
-
-#define MPT_BYTE_28_PD_S 0
-#define MPT_BYTE_28_PD_M (((1UL << 16) - 1) << MPT_BYTE_28_PD_S)
-
-#define MPT_BYTE_28_L_KEY_IDX_L_S 16
-#define MPT_BYTE_28_L_KEY_IDX_L_M \
- (((1UL << 16) - 1) << MPT_BYTE_28_L_KEY_IDX_L_S)
-
-#define MPT_BYTE_36_PA0_H_S 0
-#define MPT_BYTE_36_PA0_H_M (((1UL << 5) - 1) << MPT_BYTE_36_PA0_H_S)
-
-#define MPT_BYTE_36_PA1_L_S 8
-#define MPT_BYTE_36_PA1_L_M (((1UL << 24) - 1) << MPT_BYTE_36_PA1_L_S)
-
-#define MPT_BYTE_40_PA1_H_S 0
-#define MPT_BYTE_40_PA1_H_M (((1UL << 13) - 1) << MPT_BYTE_40_PA1_H_S)
-
-#define MPT_BYTE_40_PA2_L_S 16
-#define MPT_BYTE_40_PA2_L_M (((1UL << 16) - 1) << MPT_BYTE_40_PA2_L_S)
-
-#define MPT_BYTE_44_PA2_H_S 0
-#define MPT_BYTE_44_PA2_H_M (((1UL << 21) - 1) << MPT_BYTE_44_PA2_H_S)
-
-#define MPT_BYTE_44_PA3_L_S 24
-#define MPT_BYTE_44_PA3_L_M (((1UL << 8) - 1) << MPT_BYTE_44_PA3_L_S)
-
-#define MPT_BYTE_48_PA3_H_S 0
-#define MPT_BYTE_48_PA3_H_M (((1UL << 29) - 1) << MPT_BYTE_48_PA3_H_S)
-
-#define MPT_BYTE_56_PA4_H_S 0
-#define MPT_BYTE_56_PA4_H_M (((1UL << 5) - 1) << MPT_BYTE_56_PA4_H_S)
-
-#define MPT_BYTE_56_PA5_L_S 8
-#define MPT_BYTE_56_PA5_L_M (((1UL << 24) - 1) << MPT_BYTE_56_PA5_L_S)
-
-#define MPT_BYTE_60_PA5_H_S 0
-#define MPT_BYTE_60_PA5_H_M (((1UL << 13) - 1) << MPT_BYTE_60_PA5_H_S)
-
-#define MPT_BYTE_60_PA6_L_S 16
-#define MPT_BYTE_60_PA6_L_M (((1UL << 16) - 1) << MPT_BYTE_60_PA6_L_S)
-
-#define MPT_BYTE_64_PA6_H_S 0
-#define MPT_BYTE_64_PA6_H_M (((1UL << 21) - 1) << MPT_BYTE_64_PA6_H_S)
-
-#define MPT_BYTE_64_L_KEY_IDX_H_S 24
-#define MPT_BYTE_64_L_KEY_IDX_H_M \
- (((1UL << 8) - 1) << MPT_BYTE_64_L_KEY_IDX_H_S)
-
-struct hns_roce_wqe_ctrl_seg {
- __le32 sgl_pa_h;
- __le32 flag;
- union {
- __be32 imm_data;
- __le32 inv_key;
- };
- __le32 msg_length;
-};
-
-struct hns_roce_wqe_data_seg {
- __le64 addr;
- __le32 lkey;
- __le32 len;
-};
-
-struct hns_roce_wqe_raddr_seg {
- __le32 rkey;
- __le32 len; /* reserved */
- __le64 raddr;
-};
-
-struct hns_roce_rq_wqe_ctrl {
- __le32 rwqe_byte_4;
- __le32 rocee_sgl_ba_l;
- __le32 rwqe_byte_12;
- __le32 reserved[5];
-};
-
-#define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S 16
-#define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M \
- (((1UL << 6) - 1) << RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S)
-
-#define HNS_ROCE_QP_DESTROY_TIMEOUT_MSECS 10000
-
-#define GID_LEN 16
-
-struct hns_roce_ud_send_wqe {
- __le32 dmac_h;
- __le32 u32_8;
- __le32 immediate_data;
-
- __le32 u32_16;
- union {
- unsigned char dgid[GID_LEN];
- struct {
- __le32 u32_20;
- __le32 u32_24;
- __le32 u32_28;
- __le32 u32_32;
- };
- };
-
- __le32 u32_36;
- __le32 u32_40;
-
- __le32 va0_l;
- __le32 va0_h;
- __le32 l_key0;
-
- __le32 va1_l;
- __le32 va1_h;
- __le32 l_key1;
-};
-
-#define UD_SEND_WQE_U32_4_DMAC_0_S 0
-#define UD_SEND_WQE_U32_4_DMAC_0_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_0_S)
-
-#define UD_SEND_WQE_U32_4_DMAC_1_S 8
-#define UD_SEND_WQE_U32_4_DMAC_1_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_1_S)
-
-#define UD_SEND_WQE_U32_4_DMAC_2_S 16
-#define UD_SEND_WQE_U32_4_DMAC_2_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_2_S)
-
-#define UD_SEND_WQE_U32_4_DMAC_3_S 24
-#define UD_SEND_WQE_U32_4_DMAC_3_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_3_S)
-
-#define UD_SEND_WQE_U32_8_DMAC_4_S 0
-#define UD_SEND_WQE_U32_8_DMAC_4_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_4_S)
-
-#define UD_SEND_WQE_U32_8_DMAC_5_S 8
-#define UD_SEND_WQE_U32_8_DMAC_5_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_5_S)
-
-#define UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S 22
-
-#define UD_SEND_WQE_U32_8_OPERATION_TYPE_S 16
-#define UD_SEND_WQE_U32_8_OPERATION_TYPE_M \
- (((1UL << 4) - 1) << UD_SEND_WQE_U32_8_OPERATION_TYPE_S)
-
-#define UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S 24
-#define UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M \
- (((1UL << 6) - 1) << UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S)
-
-#define UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S 31
-
-#define UD_SEND_WQE_U32_16_DEST_QP_S 0
-#define UD_SEND_WQE_U32_16_DEST_QP_M \
- (((1UL << 24) - 1) << UD_SEND_WQE_U32_16_DEST_QP_S)
-
-#define UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S 24
-#define UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S)
-
-#define UD_SEND_WQE_U32_36_FLOW_LABEL_S 0
-#define UD_SEND_WQE_U32_36_FLOW_LABEL_M \
- (((1UL << 20) - 1) << UD_SEND_WQE_U32_36_FLOW_LABEL_S)
-
-#define UD_SEND_WQE_U32_36_PRIORITY_S 20
-#define UD_SEND_WQE_U32_36_PRIORITY_M \
- (((1UL << 4) - 1) << UD_SEND_WQE_U32_36_PRIORITY_S)
-
-#define UD_SEND_WQE_U32_36_SGID_INDEX_S 24
-#define UD_SEND_WQE_U32_36_SGID_INDEX_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_36_SGID_INDEX_S)
-
-#define UD_SEND_WQE_U32_40_HOP_LIMIT_S 0
-#define UD_SEND_WQE_U32_40_HOP_LIMIT_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_40_HOP_LIMIT_S)
-
-#define UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S 8
-#define UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S)
-
-struct hns_roce_sqp_context {
- __le32 qp1c_bytes_4;
- __le32 sq_rq_bt_l;
- __le32 qp1c_bytes_12;
- __le32 qp1c_bytes_16;
- __le32 qp1c_bytes_20;
- __le32 cur_rq_wqe_ba_l;
- __le32 qp1c_bytes_28;
- __le32 qp1c_bytes_32;
- __le32 cur_sq_wqe_ba_l;
- __le32 qp1c_bytes_40;
-};
-
-#define QP1C_BYTES_4_QP_STATE_S 0
-#define QP1C_BYTES_4_QP_STATE_M \
- (((1UL << 3) - 1) << QP1C_BYTES_4_QP_STATE_S)
-
-#define QP1C_BYTES_4_SQ_WQE_SHIFT_S 8
-#define QP1C_BYTES_4_SQ_WQE_SHIFT_M \
- (((1UL << 4) - 1) << QP1C_BYTES_4_SQ_WQE_SHIFT_S)
-
-#define QP1C_BYTES_4_RQ_WQE_SHIFT_S 12
-#define QP1C_BYTES_4_RQ_WQE_SHIFT_M \
- (((1UL << 4) - 1) << QP1C_BYTES_4_RQ_WQE_SHIFT_S)
-
-#define QP1C_BYTES_4_PD_S 16
-#define QP1C_BYTES_4_PD_M (((1UL << 16) - 1) << QP1C_BYTES_4_PD_S)
-
-#define QP1C_BYTES_12_SQ_RQ_BT_H_S 0
-#define QP1C_BYTES_12_SQ_RQ_BT_H_M \
- (((1UL << 17) - 1) << QP1C_BYTES_12_SQ_RQ_BT_H_S)
-
-#define QP1C_BYTES_16_RQ_HEAD_S 0
-#define QP1C_BYTES_16_RQ_HEAD_M (((1UL << 15) - 1) << QP1C_BYTES_16_RQ_HEAD_S)
-
-#define QP1C_BYTES_16_PORT_NUM_S 16
-#define QP1C_BYTES_16_PORT_NUM_M \
- (((1UL << 3) - 1) << QP1C_BYTES_16_PORT_NUM_S)
-
-#define QP1C_BYTES_16_SIGNALING_TYPE_S 27
-#define QP1C_BYTES_16_LOCAL_ENABLE_E2E_CREDIT_S 28
-#define QP1C_BYTES_16_RQ_BA_FLG_S 29
-#define QP1C_BYTES_16_SQ_BA_FLG_S 30
-#define QP1C_BYTES_16_QP1_ERR_S 31
-
-#define QP1C_BYTES_20_SQ_HEAD_S 0
-#define QP1C_BYTES_20_SQ_HEAD_M (((1UL << 15) - 1) << QP1C_BYTES_20_SQ_HEAD_S)
-
-#define QP1C_BYTES_20_PKEY_IDX_S 16
-#define QP1C_BYTES_20_PKEY_IDX_M \
- (((1UL << 16) - 1) << QP1C_BYTES_20_PKEY_IDX_S)
-
-#define QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S 0
-#define QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M \
- (((1UL << 5) - 1) << QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S)
-
-#define QP1C_BYTES_28_RQ_CUR_IDX_S 16
-#define QP1C_BYTES_28_RQ_CUR_IDX_M \
- (((1UL << 15) - 1) << QP1C_BYTES_28_RQ_CUR_IDX_S)
-
-#define QP1C_BYTES_32_TX_CQ_NUM_S 0
-#define QP1C_BYTES_32_TX_CQ_NUM_M \
- (((1UL << 16) - 1) << QP1C_BYTES_32_TX_CQ_NUM_S)
-
-#define QP1C_BYTES_32_RX_CQ_NUM_S 16
-#define QP1C_BYTES_32_RX_CQ_NUM_M \
- (((1UL << 16) - 1) << QP1C_BYTES_32_RX_CQ_NUM_S)
-
-#define QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S 0
-#define QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M \
- (((1UL << 5) - 1) << QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S)
-
-#define QP1C_BYTES_40_SQ_CUR_IDX_S 16
-#define QP1C_BYTES_40_SQ_CUR_IDX_M \
- (((1UL << 15) - 1) << QP1C_BYTES_40_SQ_CUR_IDX_S)
-
-#define HNS_ROCE_WQE_INLINE (1UL<<31)
-#define HNS_ROCE_WQE_SE (1UL<<30)
-
-#define HNS_ROCE_WQE_SGE_NUM_BIT 24
-#define HNS_ROCE_WQE_IMM (1UL<<23)
-#define HNS_ROCE_WQE_FENCE (1UL<<21)
-#define HNS_ROCE_WQE_CQ_NOTIFY (1UL<<20)
-
-#define HNS_ROCE_WQE_OPCODE_SEND (0<<16)
-#define HNS_ROCE_WQE_OPCODE_RDMA_READ (1<<16)
-#define HNS_ROCE_WQE_OPCODE_RDMA_WRITE (2<<16)
-#define HNS_ROCE_WQE_OPCODE_LOCAL_INV (4<<16)
-#define HNS_ROCE_WQE_OPCODE_UD_SEND (7<<16)
-#define HNS_ROCE_WQE_OPCODE_MASK (15<<16)
-
-struct hns_roce_qp_context {
- __le32 qpc_bytes_4;
- __le32 qpc_bytes_8;
- __le32 qpc_bytes_12;
- __le32 qpc_bytes_16;
- __le32 sq_rq_bt_l;
- __le32 qpc_bytes_24;
- __le32 irrl_ba_l;
- __le32 qpc_bytes_32;
- __le32 qpc_bytes_36;
- __le32 dmac_l;
- __le32 qpc_bytes_44;
- __le32 qpc_bytes_48;
- u8 dgid[16];
- __le32 qpc_bytes_68;
- __le32 cur_rq_wqe_ba_l;
- __le32 qpc_bytes_76;
- __le32 rx_rnr_time;
- __le32 qpc_bytes_84;
- __le32 qpc_bytes_88;
- union {
- __le32 rx_sge_len;
- __le32 dma_length;
- };
- union {
- __le32 rx_sge_num;
- __le32 rx_send_pktn;
- __le32 r_key;
- };
- __le32 va_l;
- __le32 va_h;
- __le32 qpc_bytes_108;
- __le32 qpc_bytes_112;
- __le32 rx_cur_sq_wqe_ba_l;
- __le32 qpc_bytes_120;
- __le32 qpc_bytes_124;
- __le32 qpc_bytes_128;
- __le32 qpc_bytes_132;
- __le32 qpc_bytes_136;
- __le32 qpc_bytes_140;
- __le32 qpc_bytes_144;
- __le32 qpc_bytes_148;
- union {
- __le32 rnr_retry;
- __le32 ack_time;
- };
- __le32 qpc_bytes_156;
- __le32 pkt_use_len;
- __le32 qpc_bytes_164;
- __le32 qpc_bytes_168;
- union {
- __le32 sge_use_len;
- __le32 pa_use_len;
- };
- __le32 qpc_bytes_176;
- __le32 qpc_bytes_180;
- __le32 tx_cur_sq_wqe_ba_l;
- __le32 qpc_bytes_188;
- __le32 rvd21;
-};
-
-#define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S 0
-#define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M \
- (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S)
-
-#define QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S 3
-#define QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S 4
-#define QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S 5
-#define QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S 6
-#define QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S 7
-
-#define QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S 8
-#define QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M \
- (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S)
-
-#define QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S 12
-#define QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M \
- (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S)
-
-#define QP_CONTEXT_QPC_BYTES_4_PD_S 16
-#define QP_CONTEXT_QPC_BYTES_4_PD_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_4_PD_S)
-
-#define QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S 0
-#define QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S)
-
-#define QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S 16
-#define QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S)
-
-#define QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S 0
-#define QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S)
-
-#define QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S 16
-#define QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S)
-
-#define QP_CONTEXT_QPC_BYTES_16_QP_NUM_S 0
-#define QP_CONTEXT_QPC_BYTES_16_QP_NUM_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_16_QP_NUM_S)
-
-#define QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S 0
-#define QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M \
- (((1UL << 17) - 1) << QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S)
-
-#define QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S 18
-#define QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M \
- (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S)
-
-#define QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S 23
-
-#define QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S 0
-#define QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M \
- (((1UL << 17) - 1) << QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S)
-
-#define QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S 18
-#define QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S)
-
-#define QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S 20
-#define QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S 21
-#define QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S 22
-#define QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S 23
-
-#define QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S 24
-#define QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S)
-
-#define QP_CONTEXT_QPC_BYTES_36_DEST_QP_S 0
-#define QP_CONTEXT_QPC_BYTES_36_DEST_QP_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_36_DEST_QP_S)
-
-#define QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S 24
-#define QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S)
-
-#define QP_CONTEXT_QPC_BYTES_44_DMAC_H_S 0
-#define QP_CONTEXT_QPC_BYTES_44_DMAC_H_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_44_DMAC_H_S)
-
-#define QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S 16
-#define QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S)
-
-#define QP_CONTEXT_QPC_BYTES_44_HOPLMT_S 24
-#define QP_CONTEXT_QPC_BYTES_44_HOPLMT_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_44_HOPLMT_S)
-
-#define QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S 0
-#define QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M \
- (((1UL << 20) - 1) << QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S)
-
-#define QP_CONTEXT_QPC_BYTES_48_TCLASS_S 20
-#define QP_CONTEXT_QPC_BYTES_48_TCLASS_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_48_TCLASS_S)
-
-#define QP_CONTEXT_QPC_BYTES_48_MTU_S 28
-#define QP_CONTEXT_QPC_BYTES_48_MTU_M \
- (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_48_MTU_S)
-
-#define QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S 0
-#define QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S)
-
-#define QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S 16
-#define QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S)
-
-#define QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S 0
-#define QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M \
- (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S)
-
-#define QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S 8
-#define QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S 0
-#define QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S 24
-#define QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S)
-
-#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S 0
-#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S 24
-#define QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S 25
-
-#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S 26
-#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M \
- (((1UL << 2) - 1) << \
- QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S)
-
-#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S 29
-#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S)
-
-#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S 0
-#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S 24
-#define QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S 25
-
-#define QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S 0
-#define QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S 24
-#define QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S)
-
-#define QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S 0
-#define QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M \
- (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S)
-
-#define QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S 0
-#define QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S 16
-#define QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S)
-
-#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S 0
-#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S 24
-
-#define QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S 25
-#define QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S)
-
-#define QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S 27
-
-#define QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S 0
-#define QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S 24
-#define QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S)
-
-#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S 0
-#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S 24
-#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S)
-
-#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S 0
-#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S)
-
-#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S 16
-#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S 31
-
-#define QP_CONTEXT_QPC_BYTES_144_QP_STATE_S 0
-#define QP_CONTEXT_QPC_BYTES_144_QP_STATE_M \
- (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_144_QP_STATE_S)
-
-#define QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S 0
-#define QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S)
-
-#define QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S 2
-#define QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M \
- (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S)
-
-#define QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S 5
-#define QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M \
- (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S)
-
-#define QP_CONTEXT_QPC_BYTES_148_LSN_S 8
-#define QP_CONTEXT_QPC_BYTES_148_LSN_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_148_LSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S 0
-#define QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M \
- (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S)
-
-#define QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S 3
-#define QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M \
- (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S)
-
-#define QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S 8
-#define QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M \
- (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S)
-
-#define QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S 11
-#define QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M \
- (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S)
-
-#define QP_CONTEXT_QPC_BYTES_156_SL_S 14
-#define QP_CONTEXT_QPC_BYTES_156_SL_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_156_SL_S)
-
-#define QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S 16
-#define QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S)
-
-#define QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S 24
-#define QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S)
-
-#define QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S 0
-#define QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S 24
-#define QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S)
-
-#define QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S 0
-#define QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S 24
-#define QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S)
-
-#define QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S 26
-#define QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S)
-
-#define QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S 28
-#define QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S 29
-#define QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S 30
-
-#define QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S 0
-#define QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S)
-
-#define QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S 16
-#define QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S)
-
-#define QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S 0
-#define QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S)
-
-#define QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S 16
-#define QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S)
-
-#define QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S 0
-#define QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M \
- (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S)
-
-#define QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S 8
-
-#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S 16
-#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S)
-
-#define STATUS_MASK 0xff
-#define GO_BIT_TIMEOUT_MSECS 10000
-#define HCR_STATUS_OFFSET 0x18
-#define HCR_GO_BIT 15
-
-struct hns_roce_rq_db {
- __le32 u32_4;
- __le32 u32_8;
-};
-
-#define RQ_DOORBELL_U32_4_RQ_HEAD_S 0
-#define RQ_DOORBELL_U32_4_RQ_HEAD_M \
- (((1UL << 15) - 1) << RQ_DOORBELL_U32_4_RQ_HEAD_S)
-
-#define RQ_DOORBELL_U32_8_QPN_S 0
-#define RQ_DOORBELL_U32_8_QPN_M (((1UL << 24) - 1) << RQ_DOORBELL_U32_8_QPN_S)
-
-#define RQ_DOORBELL_U32_8_CMD_S 28
-#define RQ_DOORBELL_U32_8_CMD_M (((1UL << 3) - 1) << RQ_DOORBELL_U32_8_CMD_S)
-
-#define RQ_DOORBELL_U32_8_HW_SYNC_S 31
-
-struct hns_roce_sq_db {
- __le32 u32_4;
- __le32 u32_8;
-};
-
-#define SQ_DOORBELL_U32_4_SQ_HEAD_S 0
-#define SQ_DOORBELL_U32_4_SQ_HEAD_M \
- (((1UL << 15) - 1) << SQ_DOORBELL_U32_4_SQ_HEAD_S)
-
-#define SQ_DOORBELL_U32_4_SL_S 16
-#define SQ_DOORBELL_U32_4_SL_M \
- (((1UL << 2) - 1) << SQ_DOORBELL_U32_4_SL_S)
-
-#define SQ_DOORBELL_U32_4_PORT_S 18
-#define SQ_DOORBELL_U32_4_PORT_M (((1UL << 3) - 1) << SQ_DOORBELL_U32_4_PORT_S)
-
-#define SQ_DOORBELL_U32_8_QPN_S 0
-#define SQ_DOORBELL_U32_8_QPN_M (((1UL << 24) - 1) << SQ_DOORBELL_U32_8_QPN_S)
-
-#define SQ_DOORBELL_HW_SYNC_S 31
-
-struct hns_roce_ext_db {
- int esdb_dep;
- int eodb_dep;
- struct hns_roce_buf_list *sdb_buf_list;
- struct hns_roce_buf_list *odb_buf_list;
-};
-
-struct hns_roce_db_table {
- int sdb_ext_mod;
- int odb_ext_mod;
- struct hns_roce_ext_db *ext_db;
-};
-
-#define HW_SYNC_SLEEP_TIME_INTERVAL 20
-#define HW_SYNC_TIMEOUT_MSECS (25 * HW_SYNC_SLEEP_TIME_INTERVAL)
-#define BT_CMD_SYNC_SHIFT 31
-#define HNS_ROCE_BA_SIZE (32 * 4096)
-
-struct hns_roce_bt_table {
- struct hns_roce_buf_list qpc_buf;
- struct hns_roce_buf_list mtpt_buf;
- struct hns_roce_buf_list cqc_buf;
-};
-
-struct hns_roce_tptr_table {
- struct hns_roce_buf_list tptr_buf;
-};
-
-struct hns_roce_qp_work {
- struct work_struct work;
- struct ib_device *ib_dev;
- struct hns_roce_qp *qp;
- u32 db_wait_stage;
- u32 sdb_issue_ptr;
- u32 sdb_inv_cnt;
- u32 sche_cnt;
-};
-
-struct hns_roce_mr_free_work {
- struct work_struct work;
- struct ib_device *ib_dev;
- struct completion *comp;
- int comp_flag;
- void *mr;
-};
-
-struct hns_roce_recreate_lp_qp_work {
- struct work_struct work;
- struct ib_device *ib_dev;
- struct completion *comp;
- int comp_flag;
-};
-
-struct hns_roce_free_mr {
- struct workqueue_struct *free_mr_wq;
- struct hns_roce_qp *mr_free_qp[HNS_ROCE_V1_RESV_QP];
- struct hns_roce_cq *mr_free_cq;
- struct hns_roce_pd *mr_free_pd;
-};
-
-struct hns_roce_v1_priv {
- struct hns_roce_db_table db_table;
- struct hns_roce_raq_table raq_table;
- struct hns_roce_bt_table bt_table;
- struct hns_roce_tptr_table tptr_table;
- struct hns_roce_free_mr free_mr;
-};
-
-int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
-int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
-int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
-
-#endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index eb0defa80d0d..b33e948fd060 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -678,6 +678,7 @@ static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val,
static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
void *wqe)
{
+#define HNS_ROCE_SL_SHIFT 2
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
/* All kinds of DirectWQE have the same header field layout */
@@ -685,7 +686,8 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M,
V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S, qp->sl);
roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M,
- V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S, qp->sl >> 2);
+ V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S,
+ qp->sl >> HNS_ROCE_SL_SHIFT);
roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M,
V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head);
@@ -1305,14 +1307,14 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
continue;
dev_err_ratelimited(hr_dev->dev,
- "Cmdq IO error, opcode = %x, return = %x\n",
+ "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n",
desc->opcode, desc_ret);
ret = -EIO;
}
} else {
/* FW/HW reset or incorrect number of desc */
tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
- dev_warn(hr_dev->dev, "CMDQ move tail from %d to %d\n",
+ dev_warn(hr_dev->dev, "CMDQ move tail from %u to %u.\n",
csq->head, tail);
csq->head = tail;
@@ -1571,7 +1573,7 @@ static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev)
struct hns_roce_cmq_desc desc;
int ret;
- if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) {
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
hr_dev->func_num = 1;
return 0;
}
@@ -2003,7 +2005,8 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
- caps->flags |= HNS_ROCE_CAP_FLAG_STASH;
+ caps->flags |= HNS_ROCE_CAP_FLAG_STASH |
+ HNS_ROCE_CAP_FLAG_DIRECT_WQE;
caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE;
} else {
caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
@@ -2144,7 +2147,6 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
- caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
@@ -2161,6 +2163,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
(u32)priv->handle->rinfo.num_vectors - 2);
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+ caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM;
caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
@@ -2181,6 +2184,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
} else {
u32 func_num = max_t(u32, 1, hr_dev->func_num);
+ caps->eqe_hop_num = HNS_ROCE_V2_EQE_HOP_NUM;
caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
caps->gid_table_len[0] /= func_num;
@@ -2393,7 +2397,7 @@ static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
struct hns_roce_caps *caps = &hr_dev->caps;
int ret;
- if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
return 0;
ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_QPC_SIZE,
@@ -2967,8 +2971,8 @@ static int config_gmv_table(struct hns_roce_dev *hr_dev,
return hns_roce_cmq_send(hr_dev, desc, 2);
}
-static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u32 port,
- int gid_index, const union ib_gid *gid,
+static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, int gid_index,
+ const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
@@ -3063,8 +3067,7 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
}
static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
- void *mb_buf, struct hns_roce_mr *mr,
- unsigned long mtpt_idx)
+ void *mb_buf, struct hns_roce_mr *mr)
{
struct hns_roce_v2_mpt_entry *mpt_entry;
int ret;
@@ -4488,14 +4491,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
return 0;
}
-static inline u16 get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
-{
- if (!fl)
- fl = rdma_calc_flow_label(lqpn, rqpn);
-
- return rdma_flow_label_to_udp_sport(fl);
-}
-
static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
u32 *dip_idx)
{
@@ -4712,8 +4707,9 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
}
hr_reg_write(context, QPC_UDPSPN,
- is_udp ? get_udp_sport(grh->flow_label, ibqp->qp_num,
- attr->dest_qp_num) : 0);
+ is_udp ? rdma_get_udp_sport(grh->flow_label, ibqp->qp_num,
+ attr->dest_qp_num) :
+ 0);
hr_reg_clear(qpc_mask, QPC_UDPSPN);
@@ -4739,7 +4735,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
ibdev_err(ibdev,
- "failed to fill QPC, sl (%d) shouldn't be larger than %d.\n",
+ "failed to fill QPC, sl (%u) shouldn't be larger than %d.\n",
hr_qp->sl, MAX_SERVICE_LEVEL);
return -EINVAL;
}
@@ -4768,7 +4764,8 @@ static bool check_qp_state(enum ib_qp_state cur_state,
[IB_QPS_ERR] = true },
[IB_QPS_SQD] = {},
[IB_QPS_SQE] = {},
- [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
+ [IB_QPS_ERR] = { [IB_QPS_RESET] = true,
+ [IB_QPS_ERR] = true }
};
return sm[cur_state][new_state];
@@ -5868,7 +5865,7 @@ static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag);
}
-static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
+static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn)
{
struct device *dev = hr_dev->dev;
int ret;
@@ -5882,7 +5879,7 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
0, HNS_ROCE_CMD_DESTROY_AEQC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret)
- dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
+ dev_err(dev, "[mailbox cmd] destroy eqc(%u) failed.\n", eqn);
}
static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
@@ -6394,7 +6391,7 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
if (!id)
return 0;
- if (id->driver_data && handle->pdev->revision < PCI_REVISION_ID_HIP09)
+ if (id->driver_data && handle->pdev->revision == PCI_REVISION_ID_HIP08)
return 0;
ret = __hns_roce_hw_v2_init_instance(handle);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 35c61da7ba15..12be85f0986e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -35,26 +35,15 @@
#include <linux/bitops.h>
-#define HNS_ROCE_VF_QPC_BT_NUM 256
-#define HNS_ROCE_VF_SCCC_BT_NUM 64
-#define HNS_ROCE_VF_SRQC_BT_NUM 64
-#define HNS_ROCE_VF_CQC_BT_NUM 64
-#define HNS_ROCE_VF_MPT_BT_NUM 64
-#define HNS_ROCE_VF_SMAC_NUM 32
-#define HNS_ROCE_VF_SL_NUM 8
-#define HNS_ROCE_VF_GMV_BT_NUM 256
-
#define HNS_ROCE_V2_MAX_QP_NUM 0x1000
#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
#define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
-#define HNS_ROCE_V2_MAX_SRQ 0x100000
#define HNS_ROCE_V2_MAX_SRQ_WR 0x8000
#define HNS_ROCE_V2_MAX_SRQ_SGE 64
#define HNS_ROCE_V2_MAX_CQ_NUM 0x100000
#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100
#define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000
-#define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000
#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 64
#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64
#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000
@@ -63,13 +52,10 @@
#define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32
#define HNS_ROCE_V2_UAR_NUM 256
#define HNS_ROCE_V2_PHY_UAR_NUM 1
-#define HNS_ROCE_V2_MAX_IRQ_NUM 65
-#define HNS_ROCE_V2_COMP_VEC_NUM 63
#define HNS_ROCE_V2_AEQE_VEC_NUM 1
#define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1
#define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000
#define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000
-#define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_PD_NUM 0x1000000
@@ -81,7 +67,6 @@
#define HNS_ROCE_V2_MAX_RQ_DESC_SZ 16
#define HNS_ROCE_V2_MAX_SRQ_DESC_SZ 64
#define HNS_ROCE_V2_IRRL_ENTRY_SZ 64
-#define HNS_ROCE_V2_TRRL_ENTRY_SZ 48
#define HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ 100
#define HNS_ROCE_V2_CQC_ENTRY_SZ 64
#define HNS_ROCE_V2_SRQC_ENTRY_SZ 64
@@ -103,7 +88,6 @@
#define HNS_ROCE_INVALID_LKEY 0x0
#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000
#define HNS_ROCE_CMQ_TX_TIMEOUT 30000
-#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
#define HNS_ROCE_V2_RSV_QPS 8
#define HNS_ROCE_V2_HW_RST_TIMEOUT 1000
@@ -117,12 +101,14 @@
#define HNS_ROCE_CQE_HOP_NUM 1
#define HNS_ROCE_SRQWQE_HOP_NUM 1
#define HNS_ROCE_PBL_HOP_NUM 2
-#define HNS_ROCE_EQE_HOP_NUM 2
#define HNS_ROCE_IDX_HOP_NUM 1
#define HNS_ROCE_SQWQE_HOP_NUM 2
#define HNS_ROCE_EXT_SGE_HOP_NUM 1
#define HNS_ROCE_RQWQE_HOP_NUM 2
+#define HNS_ROCE_V2_EQE_HOP_NUM 2
+#define HNS_ROCE_V3_EQE_HOP_NUM 1
+
#define HNS_ROCE_BA_PG_SZ_SUPPORTED_256K 6
#define HNS_ROCE_BA_PG_SZ_SUPPORTED_16K 2
#define HNS_ROCE_V2_GID_INDEX_NUM 16
@@ -1441,7 +1427,7 @@ struct hns_roce_v2_priv {
struct hns_roce_dip {
u8 dgid[GID_LEN_V2];
u32 dip_idx;
- struct list_head node; /* all dips are on a list */
+ struct list_head node; /* all dips are on a list */
};
/* only for RNR timeout issue of HIP08 */
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 4194b626f3c6..f73ba619f375 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
#include <linux/acpi.h>
-#include <linux/of_platform.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <rdma/ib_addr.h>
@@ -70,7 +69,7 @@ static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
if (port >= hr_dev->caps.num_ports)
return -EINVAL;
- ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr);
+ ret = hr_dev->hw->set_gid(hr_dev, attr->index, &attr->gid, attr);
return ret;
}
@@ -84,7 +83,7 @@ static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
if (port >= hr_dev->caps.num_ports)
return -EINVAL;
- ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, NULL, NULL);
+ ret = hr_dev->hw->set_gid(hr_dev, attr->index, NULL, NULL);
return ret;
}
@@ -152,9 +151,6 @@ static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
u8 i;
for (i = 0; i < hr_dev->caps.num_ports; i++) {
- if (hr_dev->hw->set_mtu)
- hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i],
- hr_dev->caps.max_mtu);
ret = hns_roce_set_mac(hr_dev, i,
hr_dev->iboe.netdevs[i]->dev_addr);
if (ret)
@@ -270,6 +266,9 @@ static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
static int hns_roce_query_pkey(struct ib_device *ib_dev, u32 port, u16 index,
u16 *pkey)
{
+ if (index > 0)
+ return -EINVAL;
+
*pkey = PKEY_ID;
return 0;
@@ -307,9 +306,22 @@ hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
entry->address = address;
entry->mmap_type = mmap_type;
- ret = rdma_user_mmap_entry_insert_exact(
- ucontext, &entry->rdma_entry, length,
- mmap_type == HNS_ROCE_MMAP_TYPE_DB ? 0 : 1);
+ switch (mmap_type) {
+ /* pgoff 0 must be used by DB for compatibility */
+ case HNS_ROCE_MMAP_TYPE_DB:
+ ret = rdma_user_mmap_entry_insert_exact(
+ ucontext, &entry->rdma_entry, length, 0);
+ break;
+ case HNS_ROCE_MMAP_TYPE_DWQE:
+ ret = rdma_user_mmap_entry_insert_range(
+ ucontext, &entry->rdma_entry, length, 1,
+ U32_MAX);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
if (ret) {
kfree(entry);
return NULL;
@@ -323,18 +335,12 @@ static void hns_roce_dealloc_uar_entry(struct hns_roce_ucontext *context)
if (context->db_mmap_entry)
rdma_user_mmap_entry_remove(
&context->db_mmap_entry->rdma_entry);
-
- if (context->tptr_mmap_entry)
- rdma_user_mmap_entry_remove(
- &context->tptr_mmap_entry->rdma_entry);
}
static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
{
struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
- struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
u64 address;
- int ret;
address = context->uar.pfn << PAGE_SHIFT;
context->db_mmap_entry = hns_roce_user_mmap_entry_insert(
@@ -342,27 +348,7 @@ static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
if (!context->db_mmap_entry)
return -ENOMEM;
- if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
- return 0;
-
- /*
- * FIXME: using io_remap_pfn_range on the dma address returned
- * by dma_alloc_coherent is totally wrong.
- */
- context->tptr_mmap_entry =
- hns_roce_user_mmap_entry_insert(uctx, hr_dev->tptr_dma_addr,
- hr_dev->tptr_size,
- HNS_ROCE_MMAP_TYPE_TPTR);
- if (!context->tptr_mmap_entry) {
- ret = -ENOMEM;
- goto err;
- }
-
return 0;
-
-err:
- hns_roce_dealloc_uar_entry(context);
- return ret;
}
static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
@@ -436,10 +422,15 @@ static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
entry = to_hns_mmap(rdma_entry);
pfn = entry->address >> PAGE_SHIFT;
- prot = vma->vm_page_prot;
- if (entry->mmap_type != HNS_ROCE_MMAP_TYPE_TPTR)
- prot = pgprot_noncached(prot);
+ switch (entry->mmap_type) {
+ case HNS_ROCE_MMAP_TYPE_DB:
+ case HNS_ROCE_MMAP_TYPE_DWQE:
+ prot = pgprot_device(vma->vm_page_prot);
+ break;
+ default:
+ return -EINVAL;
+ }
ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE,
prot, rdma_entry);
@@ -816,7 +807,6 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
int ret;
spin_lock_init(&hr_dev->sm_lock);
- spin_lock_init(&hr_dev->bt_cmd_lock);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
@@ -907,20 +897,13 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
struct device *dev = hr_dev->dev;
int ret;
- if (hr_dev->hw->reset) {
- ret = hr_dev->hw->reset(hr_dev, true);
- if (ret) {
- dev_err(dev, "Reset RoCE engine failed!\n");
- return ret;
- }
- }
hr_dev->is_reset = false;
if (hr_dev->hw->cmq_init) {
ret = hr_dev->hw->cmq_init(hr_dev);
if (ret) {
dev_err(dev, "Init RoCE Command Queue failed!\n");
- goto error_failed_cmq_init;
+ return ret;
}
}
@@ -1003,12 +986,6 @@ error_failed_cmd_init:
if (hr_dev->hw->cmq_exit)
hr_dev->hw->cmq_exit(hr_dev);
-error_failed_cmq_init:
- if (hr_dev->hw->reset) {
- if (hr_dev->hw->reset(hr_dev, false))
- dev_err(dev, "Dereset RoCE engine failed!\n");
- }
-
return ret;
}
@@ -1028,8 +1005,6 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev)
hns_roce_cmd_cleanup(hr_dev);
if (hr_dev->hw->cmq_exit)
hr_dev->hw->cmq_exit(hr_dev);
- if (hr_dev->hw->reset)
- hr_dev->hw->reset(hr_dev, false);
}
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 7089ac780291..2ee06b906b60 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <rdma/ib_umem.h>
#include "hns_roce_device.h"
@@ -81,7 +80,7 @@ static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
return -ENOMEM;
}
- mr->key = hw_index_to_key(id); /* MR key */
+ mr->key = hw_index_to_key(id); /* MR key */
err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table,
(unsigned long)id);
@@ -173,8 +172,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
}
if (mr->type != MR_TYPE_FRMR)
- ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr,
- mtpt_idx);
+ ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr);
else
ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
if (ret) {
@@ -363,12 +361,8 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
struct hns_roce_mr *mr = to_hr_mr(ibmr);
int ret = 0;
- if (hr_dev->hw->dereg_mr) {
- ret = hr_dev->hw->dereg_mr(hr_dev, mr, udata);
- } else {
- hns_roce_mr_free(hr_dev, mr);
- kfree(mr);
- }
+ hns_roce_mr_free(hr_dev, mr);
+ kfree(mr);
return ret;
}
@@ -614,10 +608,7 @@ static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
return -ENOBUFS;
for (i = 0; i < count && npage < max_count; i++) {
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- addr = to_hr_hw_page_addr(pages[npage]);
- else
- addr = pages[npage];
+ addr = pages[npage];
mtts[i] = cpu_to_le64(addr);
npage++;
@@ -824,11 +815,11 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
}
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
+ u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
{
struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
int mtt_count, left;
- int start_index;
+ u32 start_index;
int total = 0;
__le64 *mtts;
u32 npage;
@@ -847,10 +838,7 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
continue;
addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- mtt_buf[total] = to_hr_hw_page_addr(addr);
- else
- mtt_buf[total] = addr;
+ mtt_buf[total] = addr;
total++;
}
@@ -884,10 +872,10 @@ done:
static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
struct hns_roce_buf_attr *attr,
struct hns_roce_hem_cfg *cfg,
- unsigned int *buf_page_shift, int unalinged_size)
+ unsigned int *buf_page_shift, u64 unalinged_size)
{
struct hns_roce_buf_region *r;
- int first_region_padding;
+ u64 first_region_padding;
int page_cnt, region_cnt;
unsigned int page_shift;
size_t buf_size;
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index 81ffad77ae42..783e71852c50 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/platform_device.h>
#include <linux/pci.h>
#include "hns_roce_device.h"
@@ -86,7 +85,6 @@ int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
{
struct hns_roce_ida *uar_ida = &hr_dev->uar_ida;
- struct resource *res;
int id;
/* Using bitmap to manager UAR index */
@@ -104,18 +102,9 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
else
uar->index = 0;
- if (!dev_is_pci(hr_dev->dev)) {
- res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ida_free(&uar_ida->ida, id);
- dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
- return -EINVAL;
- }
- uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
- } else {
- uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2))
- >> PAGE_SHIFT);
- }
+ uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2)) >> PAGE_SHIFT);
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE)
+ hr_dev->dwqe_page = pci_resource_start(hr_dev->pci_dev, 4);
return 0;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 9af4509894e6..d78373e10aab 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -32,7 +32,6 @@
*/
#include <linux/pci.h>
-#include <linux/platform_device.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
@@ -110,12 +109,11 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
return;
}
- if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
- (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
- event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
- event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR ||
- event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
- event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH)) {
+ if (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
+ event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
+ event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR ||
+ event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
+ event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH) {
qp->state = IB_QPS_ERR;
flush_cqe(hr_dev, qp);
@@ -219,13 +217,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
int ret;
if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
- /* when hw version is v1, the sqpn is allocated */
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- num = HNS_ROCE_MAX_PORTS +
- hr_dev->iboe.phy_port[hr_qp->port];
- else
- num = 1;
-
+ num = 1;
hr_qp->doorbell_qpn = 1;
} else {
mutex_lock(&qp_table->bank_mutex);
@@ -324,11 +316,6 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
if (!hr_qp->qpn)
return -EINVAL;
- /* In v1 engine, GSI QP context is saved in the RoCE hw's register */
- if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
- hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- return 0;
-
/* Alloc memory for QPC */
ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
if (ret) {
@@ -379,6 +366,11 @@ err_out:
return ret;
}
+static void qp_user_mmap_entry_remove(struct hns_roce_qp *hr_qp)
+{
+ rdma_user_mmap_entry_remove(&hr_qp->dwqe_mmap_entry->rdma_entry);
+}
+
void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
struct xarray *xa = &hr_dev->qp_table_xa;
@@ -402,11 +394,6 @@ static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
- /* In v1 engine, GSI QP context is saved in the RoCE hw's register */
- if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
- hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- return;
-
if (hr_dev->caps.trrl_entry_sz)
hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
@@ -535,11 +522,6 @@ static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt,
hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
- hr_qp->sq.max_gs = HNS_ROCE_SGE_IN_WQE;
- return;
- }
-
hr_qp->sq.max_gs = max(1U, cap->max_send_sge);
wqe_sge_cnt = get_wqe_ext_sge_cnt(hr_qp);
@@ -780,7 +762,11 @@ static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
goto err_inline;
}
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE)
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE;
+
return 0;
+
err_inline:
free_rq_inline_buf(hr_qp);
@@ -822,6 +808,35 @@ static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
hns_roce_qp_has_rq(init_attr));
}
+static int qp_mmap_entry(struct hns_roce_qp *hr_qp,
+ struct hns_roce_dev *hr_dev,
+ struct ib_udata *udata,
+ struct hns_roce_ib_create_qp_resp *resp)
+{
+ struct hns_roce_ucontext *uctx =
+ rdma_udata_to_drv_context(udata,
+ struct hns_roce_ucontext, ibucontext);
+ struct rdma_user_mmap_entry *rdma_entry;
+ u64 address;
+
+ address = hr_dev->dwqe_page + hr_qp->qpn * HNS_ROCE_DWQE_SIZE;
+
+ hr_qp->dwqe_mmap_entry =
+ hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address,
+ HNS_ROCE_DWQE_SIZE,
+ HNS_ROCE_MMAP_TYPE_DWQE);
+
+ if (!hr_qp->dwqe_mmap_entry) {
+ ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n");
+ return -ENOMEM;
+ }
+
+ rdma_entry = &hr_qp->dwqe_mmap_entry->rdma_entry;
+ resp->dwqe_mmap_key = rdma_user_mmap_get_offset(rdma_entry);
+
+ return 0;
+}
+
static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp,
struct ib_qp_init_attr *init_attr,
@@ -909,10 +924,16 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB;
if (udata) {
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) {
+ ret = qp_mmap_entry(hr_qp, hr_dev, udata, resp);
+ if (ret)
+ return ret;
+ }
+
ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd,
resp);
if (ret)
- return ret;
+ goto err_remove_qp;
} else {
ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr);
if (ret)
@@ -920,6 +941,12 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
}
return 0;
+
+err_remove_qp:
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE)
+ qp_user_mmap_entry_remove(hr_qp);
+
+ return ret;
}
static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
@@ -933,6 +960,8 @@ static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE)
+ qp_user_mmap_entry_remove(hr_qp);
} else {
if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_qp->rdb);
@@ -1158,7 +1187,7 @@ static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type,
goto out;
break;
case IB_QPT_UD:
- if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 &&
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 &&
is_user)
goto out;
break;
@@ -1391,7 +1420,7 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
}
}
-static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
+static inline void *get_wqe(struct hns_roce_qp *hr_qp, u32 offset)
{
return hns_roce_buf_offset(hr_qp->mtr.kmem, offset);
}
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index 7264f8c2f7d5..3141a9c85de5 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include <linux/etherdevice.h>
+
#include "osdep.h"
#include "status.h"
#include "hmc.h"
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index b4c657f5f2f9..89234d04cc65 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -550,7 +550,7 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf,
struct irdma_sc_dev *dev = &rf->sc_dev;
dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
- irq_set_affinity_hint(msix_vec->irq, NULL);
+ irq_update_affinity_hint(msix_vec->irq, NULL);
free_irq(msix_vec->irq, dev_id);
}
@@ -1100,7 +1100,7 @@ irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
}
cpumask_clear(&msix_vec->mask);
cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
- irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask);
+ irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask);
if (status) {
ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n");
return IRDMA_ERR_CFG;
@@ -1709,14 +1709,14 @@ clean_msixtbl:
*/
static void irdma_get_used_rsrc(struct irdma_device *iwdev)
{
- iwdev->rf->used_pds = find_next_zero_bit(iwdev->rf->allocated_pds,
- iwdev->rf->max_pd, 0);
- iwdev->rf->used_qps = find_next_zero_bit(iwdev->rf->allocated_qps,
- iwdev->rf->max_qp, 0);
- iwdev->rf->used_cqs = find_next_zero_bit(iwdev->rf->allocated_cqs,
- iwdev->rf->max_cq, 0);
- iwdev->rf->used_mrs = find_next_zero_bit(iwdev->rf->allocated_mrs,
- iwdev->rf->max_mr, 0);
+ iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds,
+ iwdev->rf->max_pd);
+ iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps,
+ iwdev->rf->max_qp);
+ iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs,
+ iwdev->rf->max_cq);
+ iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs,
+ iwdev->rf->max_mr);
}
void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
diff --git a/drivers/infiniband/hw/irdma/i40iw_if.c b/drivers/infiniband/hw/irdma/i40iw_if.c
index d219f64b2c3d..43e962b97d6a 100644
--- a/drivers/infiniband/hw/irdma/i40iw_if.c
+++ b/drivers/infiniband/hw/irdma/i40iw_if.c
@@ -198,7 +198,7 @@ static void i40iw_remove(struct auxiliary_device *aux_dev)
aux_dev);
struct i40e_info *cdev_info = i40e_adev->ldev;
- return i40e_client_device_unregister(cdev_info);
+ i40e_client_device_unregister(cdev_info);
}
static const struct auxiliary_device_id i40iw_auxiliary_id_table[] = {
diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
index 51a41359e0b4..9fab29039f1c 100644
--- a/drivers/infiniband/hw/irdma/main.c
+++ b/drivers/infiniband/hw/irdma/main.c
@@ -207,7 +207,7 @@ static void irdma_remove(struct auxiliary_device *aux_dev)
struct iidc_auxiliary_dev,
adev);
struct ice_pf *pf = iidc_adev->pf;
- struct irdma_device *iwdev = dev_get_drvdata(&aux_dev->dev);
+ struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev);
irdma_ib_unregister_device(iwdev);
ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, false);
@@ -228,7 +228,8 @@ static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf
rf->msix_count = pf->num_rdma_msix;
rf->msix_entries = &pf->msix_entries[pf->rdma_base_vector];
rf->default_vsi.vsi_idx = vsi->vsi_num;
- rf->protocol_used = IRDMA_ROCE_PROTOCOL_ONLY;
+ rf->protocol_used = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ?
+ IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY;
rf->rdma_ver = IRDMA_GEN_2;
rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
@@ -294,7 +295,7 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, true);
ibdev_dbg(&iwdev->ibdev, "INIT: Gen2 PF[%d] device probe success\n", PCI_FUNC(rf->pcidev->devfn));
- dev_set_drvdata(&aux_dev->dev, iwdev);
+ auxiliary_set_drvdata(aux_dev, iwdev);
return 0;
diff --git a/drivers/infiniband/hw/irdma/pble.h b/drivers/infiniband/hw/irdma/pble.h
index aa20827dcc9d..d0d4f2b77d34 100644
--- a/drivers/infiniband/hw/irdma/pble.h
+++ b/drivers/infiniband/hw/irdma/pble.h
@@ -69,7 +69,7 @@ struct irdma_add_page_info {
struct irdma_chunk {
struct list_head list;
struct irdma_dma_info dmainfo;
- void *bitmapbuf;
+ unsigned long *bitmapbuf;
u32 sizeofbitmap;
u64 size;
diff --git a/drivers/infiniband/hw/irdma/uda.c b/drivers/infiniband/hw/irdma/uda.c
index f5b1b6150cdc..7a9988ddbd01 100644
--- a/drivers/infiniband/hw/irdma/uda.c
+++ b/drivers/infiniband/hw/irdma/uda.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
/* Copyright (c) 2016 - 2021 Intel Corporation */
+#include <linux/etherdevice.h>
+
#include "osdep.h"
#include "status.h"
#include "hmc.h"
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 8cd5f9261692..460e757d3fe6 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -21,7 +21,8 @@ static int irdma_query_device(struct ib_device *ibdev,
return -EINVAL;
memset(props, 0, sizeof(*props));
- ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
+ addrconf_addr_eui48((u8 *)&props->sys_image_guid,
+ iwdev->netdev->dev_addr);
props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
irdma_fw_minor_ver(&rf->sc_dev);
props->device_cap_flags = iwdev->device_cap_flags;
@@ -1170,6 +1171,10 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
udp_info->ttl = attr->ah_attr.grh.hop_limit;
udp_info->flow_label = attr->ah_attr.grh.flow_label;
udp_info->tos = attr->ah_attr.grh.traffic_class;
+ udp_info->src_port =
+ rdma_get_udp_sport(udp_info->flow_label,
+ ibqp->qp_num,
+ roce_info->dest_qp);
irdma_qp_rem_qos(&iwqp->sc_qp);
dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
ctx_info->user_pri = rt_tos2priority(udp_info->tos);
@@ -4321,24 +4326,6 @@ static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
return IB_LINK_LAYER_ETHERNET;
}
-static __be64 irdma_mac_to_guid(struct net_device *ndev)
-{
- const unsigned char *mac = ndev->dev_addr;
- __be64 guid;
- unsigned char *dst = (unsigned char *)&guid;
-
- dst[0] = mac[0] ^ 2;
- dst[1] = mac[1];
- dst[2] = mac[2];
- dst[3] = 0xff;
- dst[4] = 0xfe;
- dst[5] = mac[3];
- dst[6] = mac[4];
- dst[7] = mac[5];
-
- return guid;
-}
-
static const struct ib_device_ops irdma_roce_dev_ops = {
.attach_mcast = irdma_attach_mcast,
.create_ah = irdma_create_ah,
@@ -4408,7 +4395,8 @@ static const struct ib_device_ops irdma_dev_ops = {
static void irdma_init_roce_device(struct irdma_device *iwdev)
{
iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
- iwdev->ibdev.node_guid = irdma_mac_to_guid(iwdev->netdev);
+ addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
+ iwdev->netdev->dev_addr);
ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops);
}
@@ -4421,7 +4409,8 @@ static int irdma_init_iw_device(struct irdma_device *iwdev)
struct net_device *netdev = iwdev->netdev;
iwdev->ibdev.node_type = RDMA_NODE_RNIC;
- ether_addr_copy((u8 *)&iwdev->ibdev.node_guid, netdev->dev_addr);
+ addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
+ netdev->dev_addr);
iwdev->ibdev.ops.iw_add_ref = irdma_qp_add_ref;
iwdev->ibdev.ops.iw_rem_ref = irdma_qp_rem_ref;
iwdev->ibdev.ops.iw_get_qp = irdma_get_qp;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 0d2fa3338784..1c3d97229988 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -85,14 +85,6 @@ static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
static struct workqueue_struct *wq;
-static void init_query_mad(struct ib_smp *mad)
-{
- mad->base_version = 1;
- mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- mad->class_version = 1;
- mad->method = IB_MGMT_METHOD_GET;
-}
-
static int check_flow_steering_support(struct mlx4_dev *dev)
{
int eth_num_ports = 0;
@@ -471,7 +463,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
@@ -669,7 +661,7 @@ static int ib_link_query_port(struct ib_device *ibdev, u32 port,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -721,7 +713,7 @@ static int ib_link_query_port(struct ib_device *ibdev, u32 port,
/* If reported active speed is QDR, check if is FDR-10 */
if (props->active_speed == IB_SPEED_QDR) {
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -848,7 +840,7 @@ int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -870,7 +862,7 @@ int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
}
}
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
in_mad->attr_mod = cpu_to_be32(index / 8);
@@ -917,7 +909,7 @@ static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u32 port,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE;
in_mad->attr_mod = 0;
@@ -971,7 +963,7 @@ int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
in_mad->attr_mod = cpu_to_be32(index / 32);
@@ -1990,7 +1982,7 @@ static int init_node_data(struct mlx4_ib_dev *dev)
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
if (mlx4_is_master(dev->dev))
mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
@@ -2784,10 +2776,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (err)
goto err_counter;
- ibdev->ib_uc_qpns_bitmap =
- kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count),
- sizeof(long),
- GFP_KERNEL);
+ ibdev->ib_uc_qpns_bitmap = bitmap_alloc(ibdev->steer_qpn_count,
+ GFP_KERNEL);
if (!ibdev->ib_uc_qpns_bitmap)
goto err_steer_qp_release;
@@ -2875,7 +2865,7 @@ err_diag_counters:
mlx4_ib_diag_cleanup(ibdev);
err_steer_free_bitmap:
- kfree(ibdev->ib_uc_qpns_bitmap);
+ bitmap_free(ibdev->ib_uc_qpns_bitmap);
err_steer_qp_release:
mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
@@ -2988,7 +2978,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
ibdev->steer_qpn_count);
- kfree(ibdev->ib_uc_qpns_bitmap);
+ bitmap_free(ibdev->ib_uc_qpns_bitmap);
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index a190fb581591..08371a80fdc2 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -328,8 +328,11 @@ static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
}
wc->vendor_err = cqe->vendor_err_synd;
- if (dump)
+ if (dump) {
+ mlx5_ib_warn(dev, "WC error: %d, Message: %s\n", wc->status,
+ ib_wc_status_msg(wc->status));
dump_cqe(dev, cqe);
+ }
}
static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c
index 6398e2f48579..e32111117a5e 100644
--- a/drivers/infiniband/hw/mlx5/doorbell.c
+++ b/drivers/infiniband/hw/mlx5/doorbell.c
@@ -32,6 +32,7 @@
#include <linux/kref.h>
#include <linux/slab.h>
+#include <linux/sched/mm.h>
#include <rdma/ib_umem.h>
#include "mlx5_ib.h"
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index b780185d9dc6..661ed2b44508 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -1508,7 +1508,7 @@ _get_flow_table(struct mlx5_ib_dev *dev,
!esw_encap)
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
break;
- case MLX5_FLOW_NAMESPACE_FDB:
+ case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
max_table_size = BIT(
MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size));
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, decap) && esw_encap)
@@ -1517,7 +1517,7 @@ _get_flow_table(struct mlx5_ib_dev *dev,
reformat_l3_tunnel_to_l2) &&
esw_encap)
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- priority = FDB_BYPASS_PATH;
+ priority = fs_matcher->priority;
break;
case MLX5_FLOW_NAMESPACE_RDMA_RX:
max_table_size = BIT(
@@ -1546,8 +1546,8 @@ _get_flow_table(struct mlx5_ib_dev *dev,
case MLX5_FLOW_NAMESPACE_EGRESS:
prio = &dev->flow_db->egress_prios[priority];
break;
- case MLX5_FLOW_NAMESPACE_FDB:
- prio = &dev->flow_db->fdb;
+ case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
+ prio = &dev->flow_db->fdb[priority];
break;
case MLX5_FLOW_NAMESPACE_RDMA_RX:
prio = &dev->flow_db->rdma_rx[priority];
@@ -1937,7 +1937,7 @@ mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type,
*namespace = MLX5_FLOW_NAMESPACE_EGRESS;
break;
case MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB:
- *namespace = MLX5_FLOW_NAMESPACE_FDB;
+ *namespace = MLX5_FLOW_NAMESPACE_FDB_BYPASS;
break;
case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX:
*namespace = MLX5_FLOW_NAMESPACE_RDMA_RX;
@@ -2029,8 +2029,8 @@ static int get_dests(struct uverbs_attr_bundle *attrs,
}
/* Allow only DEVX object, drop as dest for FDB */
- if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !(dest_devx ||
- (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)))
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB_BYPASS &&
+ !(dest_devx || (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)))
return -EINVAL;
/* Allow only DEVX object or QP as dest when inserting to RDMA_RX */
@@ -2050,7 +2050,7 @@ static int get_dests(struct uverbs_attr_bundle *attrs,
if (!is_flow_dest(devx_obj, dest_id, dest_type))
return -EINVAL;
/* Allow only flow table as dest when inserting to FDB or RDMA_RX */
- if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB ||
+ if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB_BYPASS ||
fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) &&
*dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
return -EINVAL;
@@ -2320,7 +2320,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
if (err)
goto end;
- if (obj->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
+ if (obj->ns_type == MLX5_FLOW_NAMESPACE_FDB_BYPASS &&
mlx5_eswitch_mode(dev->mdev) != MLX5_ESWITCH_OFFLOADS) {
err = -EINVAL;
goto end;
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index ec242a5a17a3..293ed709e5ed 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -291,7 +291,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port)
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -318,7 +318,7 @@ static int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
if (!in_mad)
return -ENOMEM;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad,
@@ -405,7 +405,7 @@ int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
@@ -430,7 +430,7 @@ int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid)
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
@@ -456,7 +456,7 @@ int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
in_mad->attr_mod = cpu_to_be32(index / 32);
@@ -485,7 +485,7 @@ int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -496,7 +496,7 @@ int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
memcpy(gid->raw, out_mad->data + 8, 8);
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
in_mad->attr_mod = cpu_to_be32(index / 8);
@@ -530,7 +530,7 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
/* props being zeroed by the caller, avoid zeroing it here */
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -584,6 +584,11 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
props->port_cap_flags2 & IB_PORT_LINK_SPEED_HDR_SUP)
props->active_speed = IB_SPEED_HDR;
break;
+ case 8:
+ if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP &&
+ props->port_cap_flags2 & IB_PORT_LINK_SPEED_NDR_SUP)
+ props->active_speed = IB_SPEED_NDR;
+ break;
}
}
@@ -591,7 +596,7 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
if (props->active_speed == 4) {
if (dev->port_caps[port - 1].ext_port_cap &
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 5ec8bd2f0b2f..85f526c861e9 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -4422,7 +4422,7 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev,
}
mutex_unlock(&mlx5_ib_multiport_mutex);
- dev_set_drvdata(&adev->dev, mpi);
+ auxiliary_set_drvdata(adev, mpi);
return 0;
}
@@ -4430,7 +4430,7 @@ static void mlx5r_mp_remove(struct auxiliary_device *adev)
{
struct mlx5_ib_multiport_info *mpi;
- mpi = dev_get_drvdata(&adev->dev);
+ mpi = auxiliary_get_drvdata(adev);
mutex_lock(&mlx5_ib_multiport_mutex);
if (mpi->ibdev)
mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
@@ -4480,7 +4480,7 @@ static int mlx5r_probe(struct auxiliary_device *adev,
return ret;
}
- dev_set_drvdata(&adev->dev, dev);
+ auxiliary_set_drvdata(adev, dev);
return 0;
}
@@ -4488,7 +4488,7 @@ static void mlx5r_remove(struct auxiliary_device *adev)
{
struct mlx5_ib_dev *dev;
- dev = dev_get_drvdata(&adev->dev);
+ dev = auxiliary_get_drvdata(adev);
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index e636e954f6bf..cbc20e400be0 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -232,6 +232,7 @@ enum {
#define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
#define MLX5_IB_NUM_SNIFFER_FTS 2
#define MLX5_IB_NUM_EGRESS_FTS 1
+#define MLX5_IB_NUM_FDB_FTS MLX5_BY_PASS_NUM_REGULAR_PRIOS
struct mlx5_ib_flow_prio {
struct mlx5_flow_table *flow_table;
unsigned int refcount;
@@ -276,7 +277,7 @@ struct mlx5_ib_flow_db {
struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT];
struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS];
- struct mlx5_ib_flow_prio fdb;
+ struct mlx5_ib_flow_prio fdb[MLX5_IB_NUM_FDB_FTS];
struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT];
struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT];
struct mlx5_ib_flow_prio opfcs[MLX5_IB_OPCOUNTER_MAX];
@@ -664,9 +665,9 @@ struct mlx5_ib_mr {
/* User MR data */
struct mlx5_cache_ent *cache_ent;
+ /* Everything after cache_ent is zero'd when MR allocated */
struct ib_umem *umem;
- /* This is zero'd when the MR is allocated */
union {
/* Used only while the MR is in the cache */
struct {
@@ -718,7 +719,7 @@ struct mlx5_ib_mr {
/* Zero the fields in the mr that are variant depending on usage */
static inline void mlx5_clear_mr(struct mlx5_ib_mr *mr)
{
- memset(mr->out, 0, sizeof(*mr) - offsetof(struct mlx5_ib_mr, out));
+ memset_after(mr, 0, cache_ent);
}
static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
@@ -1465,14 +1466,6 @@ extern const struct uapi_definition mlx5_ib_flow_defs[];
extern const struct uapi_definition mlx5_ib_qos_defs[];
extern const struct uapi_definition mlx5_ib_std_types_defs[];
-static inline void init_query_mad(struct ib_smp *mad)
-{
- mad->base_version = 1;
- mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- mad->class_version = 1;
- mad->method = IB_MGMT_METHOD_GET;
-}
-
static inline int is_qp1(enum ib_qp_type qp_type)
{
return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 91eb615b89ee..86842cd580ba 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1541,16 +1541,10 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
param = (struct mlx5_eq_param) {
- .irq_index = MLX5_IRQ_EQ_CTRL,
.nent = MLX5_IB_NUM_PF_EQE,
};
param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
- if (!zalloc_cpumask_var(&param.affinity, GFP_KERNEL)) {
- err = -ENOMEM;
- goto err_wq;
- }
eq->core = mlx5_eq_create_generic(dev->mdev, &param);
- free_cpumask_var(param.affinity);
if (IS_ERR(eq->core)) {
err = PTR_ERR(eq->core);
goto err_wq;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index e5abbcfc1d57..29475cf8c7c3 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include <linux/etherdevice.h>
#include <linux/module.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_cache.h>
diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c
index aef1d274a14e..9f0f79d02d3c 100644
--- a/drivers/infiniband/hw/mthca/mthca_allocator.c
+++ b/drivers/infiniband/hw/mthca/mthca_allocator.c
@@ -51,7 +51,7 @@ u32 mthca_alloc(struct mthca_alloc *alloc)
}
if (obj < alloc->max) {
- set_bit(obj, alloc->table);
+ __set_bit(obj, alloc->table);
obj |= alloc->top;
} else
obj = -1;
@@ -69,7 +69,7 @@ void mthca_free(struct mthca_alloc *alloc, u32 obj)
spin_lock_irqsave(&alloc->lock, flags);
- clear_bit(obj, alloc->table);
+ __clear_bit(obj, alloc->table);
alloc->last = min(alloc->last, obj);
alloc->top = (alloc->top + alloc->max) & alloc->mask;
@@ -79,8 +79,6 @@ void mthca_free(struct mthca_alloc *alloc, u32 obj)
int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask,
u32 reserved)
{
- int i;
-
/* num must be a power of 2 */
if (num != 1 << (ffs(num) - 1))
return -EINVAL;
@@ -90,21 +88,18 @@ int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask,
alloc->max = num;
alloc->mask = mask;
spin_lock_init(&alloc->lock);
- alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long),
- GFP_KERNEL);
+ alloc->table = bitmap_zalloc(num, GFP_KERNEL);
if (!alloc->table)
return -ENOMEM;
- bitmap_zero(alloc->table, num);
- for (i = 0; i < reserved; ++i)
- set_bit(i, alloc->table);
+ bitmap_set(alloc->table, 0, reserved);
return 0;
}
void mthca_alloc_cleanup(struct mthca_alloc *alloc)
{
- kfree(alloc->table);
+ bitmap_free(alloc->table);
}
/*
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index ce0e0867e488..192f83fd7c8a 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -101,13 +101,13 @@ static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
return -1;
found:
- clear_bit(seg, buddy->bits[o]);
+ __clear_bit(seg, buddy->bits[o]);
--buddy->num_free[o];
while (o > order) {
--o;
seg <<= 1;
- set_bit(seg ^ 1, buddy->bits[o]);
+ __set_bit(seg ^ 1, buddy->bits[o]);
++buddy->num_free[o];
}
@@ -125,13 +125,13 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
spin_lock(&buddy->lock);
while (test_bit(seg ^ 1, buddy->bits[order])) {
- clear_bit(seg ^ 1, buddy->bits[order]);
+ __clear_bit(seg ^ 1, buddy->bits[order]);
--buddy->num_free[order];
seg >>= 1;
++order;
}
- set_bit(seg, buddy->bits[order]);
+ __set_bit(seg, buddy->bits[order]);
++buddy->num_free[order];
spin_unlock(&buddy->lock);
@@ -139,7 +139,7 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
{
- int i, s;
+ int i;
buddy->max_order = max_order;
spin_lock_init(&buddy->lock);
@@ -152,22 +152,20 @@ static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
goto err_out;
for (i = 0; i <= buddy->max_order; ++i) {
- s = BITS_TO_LONGS(1 << (buddy->max_order - i));
- buddy->bits[i] = kmalloc_array(s, sizeof(long), GFP_KERNEL);
+ buddy->bits[i] = bitmap_zalloc(1 << (buddy->max_order - i),
+ GFP_KERNEL);
if (!buddy->bits[i])
goto err_out_free;
- bitmap_zero(buddy->bits[i],
- 1 << (buddy->max_order - i));
}
- set_bit(0, buddy->bits[buddy->max_order]);
+ __set_bit(0, buddy->bits[buddy->max_order]);
buddy->num_free[buddy->max_order] = 1;
return 0;
err_out_free:
for (i = 0; i <= buddy->max_order; ++i)
- kfree(buddy->bits[i]);
+ bitmap_free(buddy->bits[i]);
err_out:
kfree(buddy->bits);
@@ -181,7 +179,7 @@ static void mthca_buddy_cleanup(struct mthca_buddy *buddy)
int i;
for (i = 0; i <= buddy->max_order; ++i)
- kfree(buddy->bits[i]);
+ bitmap_free(buddy->bits[i]);
kfree(buddy->bits);
kfree(buddy->num_free);
@@ -469,8 +467,7 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
mpt_entry->start = cpu_to_be64(iova);
mpt_entry->length = cpu_to_be64(total_size);
- memset(&mpt_entry->lkey, 0,
- sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey));
+ memset_startat(mpt_entry, 0, lkey);
if (mr->mtt)
mpt_entry->mtt_seg =
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index ceee23ebc0f2..c46df53f26cf 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -50,14 +50,6 @@
#include <rdma/mthca-abi.h>
#include "mthca_memfree.h"
-static void init_query_mad(struct ib_smp *mad)
-{
- mad->base_version = 1;
- mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- mad->class_version = 1;
- mad->method = IB_MGMT_METHOD_GET;
-}
-
static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
struct ib_udata *uhw)
{
@@ -78,7 +70,7 @@ static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *pr
props->fw_ver = mdev->fw_ver;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mthca_MAD_IFC(mdev, 1, 1,
@@ -140,7 +132,7 @@ static int mthca_query_port(struct ib_device *ibdev,
/* props being zeroed by the caller, avoid zeroing it here */
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -234,7 +226,7 @@ static int mthca_query_pkey(struct ib_device *ibdev,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
in_mad->attr_mod = cpu_to_be32(index / 32);
@@ -263,7 +255,7 @@ static int mthca_query_gid(struct ib_device *ibdev, u32 port,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -274,7 +266,7 @@ static int mthca_query_gid(struct ib_device *ibdev, u32 port,
memcpy(gid->raw, out_mad->data + 8, 8);
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
in_mad->attr_mod = cpu_to_be32(index / 8);
@@ -1006,7 +998,7 @@ static int mthca_init_node_data(struct mthca_dev *dev)
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
err = mthca_MAD_IFC(dev, 1, 1,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index c51c3f40700e..265a581133dc 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1506,7 +1506,6 @@ int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
{
int status = -ENOMEM;
- size_t pd_bitmap_size;
struct ocrdma_alloc_pd_range *cmd;
struct ocrdma_alloc_pd_range_rsp *rsp;
@@ -1528,10 +1527,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
dev->pd_mgr->max_dpp_pd = rsp->pd_count;
- pd_bitmap_size =
- BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
- dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
- GFP_KERNEL);
+ dev->pd_mgr->pd_dpp_bitmap = bitmap_zalloc(rsp->pd_count,
+ GFP_KERNEL);
}
kfree(cmd);
}
@@ -1547,9 +1544,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
dev->pd_mgr->max_normal_pd = rsp->pd_count;
- pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
- dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
- GFP_KERNEL);
+ dev->pd_mgr->pd_norm_bitmap = bitmap_zalloc(rsp->pd_count,
+ GFP_KERNEL);
}
kfree(cmd);
@@ -1611,8 +1607,8 @@ void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
static void ocrdma_free_pd_pool(struct ocrdma_dev *dev)
{
ocrdma_mbx_dealloc_pd_range(dev);
- kfree(dev->pd_mgr->pd_norm_bitmap);
- kfree(dev->pd_mgr->pd_dpp_bitmap);
+ bitmap_free(dev->pd_mgr->pd_norm_bitmap);
+ bitmap_free(dev->pd_mgr->pd_dpp_bitmap);
kfree(dev->pd_mgr);
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 7abf6cf1e937..5d4b3bc16493 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -62,20 +62,6 @@ MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("Dual BSD/GPL");
-void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
-{
- u8 mac_addr[6];
-
- memcpy(&mac_addr[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
- guid[0] = mac_addr[0] ^ 2;
- guid[1] = mac_addr[1];
- guid[2] = mac_addr[2];
- guid[3] = 0xff;
- guid[4] = 0xfe;
- guid[5] = mac_addr[3];
- guid[6] = mac_addr[4];
- guid[7] = mac_addr[5];
-}
static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
u32 port_num)
{
@@ -203,7 +189,8 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
{
int ret;
- ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid);
+ addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid,
+ dev->nic_info.mac_addr);
BUILD_BUG_ON(sizeof(OCRDMA_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
sizeof(OCRDMA_NODE_DESC));
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 735123d0e9ec..acf9970ec245 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -41,6 +41,7 @@
*/
#include <linux/dma-mapping.h>
+#include <net/addrconf.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/iw_cm.h>
@@ -74,7 +75,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
memset(attr, 0, sizeof *attr);
memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
- ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
+ addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
+ dev->nic_info.mac_addr);
attr->max_mr_size = dev->attr.max_mr_size;
attr->page_size_cap = 0xffff000;
attr->vendor_id = dev->nic_info.pdev->vendor;
@@ -245,13 +247,13 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
{
u16 pd_bitmap_idx = 0;
- const unsigned long *pd_bitmap;
+ unsigned long *pd_bitmap;
if (dpp_pool) {
pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
dev->pd_mgr->max_dpp_pd);
- __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
+ __set_bit(pd_bitmap_idx, pd_bitmap);
dev->pd_mgr->pd_dpp_count++;
if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
@@ -259,7 +261,7 @@ static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
dev->pd_mgr->max_normal_pd);
- __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
+ __set_bit(pd_bitmap_idx, pd_bitmap);
dev->pd_mgr->pd_norm_count++;
if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
@@ -1844,12 +1846,10 @@ int ocrdma_modify_srq(struct ib_srq *ibsrq,
int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
{
- int status;
struct ocrdma_srq *srq;
srq = get_ocrdma_srq(ibsrq);
- status = ocrdma_mbx_query_srq(srq, srq_attr);
- return status;
+ return ocrdma_mbx_query_srq(srq, srq_attr);
}
int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
@@ -1960,7 +1960,6 @@ static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
const struct ib_send_wr *wr)
{
- int status;
struct ocrdma_sge *sge;
u32 wqe_size = sizeof(*hdr);
@@ -1972,8 +1971,7 @@ static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
sge = (struct ocrdma_sge *)(hdr + 1);
}
- status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
- return status;
+ return ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
}
static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index b73d742a520c..f860b7fcef33 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -59,7 +59,6 @@ int ocrdma_query_port(struct ib_device *ibdev, u32 port,
enum rdma_protocol_type
ocrdma_query_protocol(struct ib_device *device, u32 port_num);
-void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid);
int ocrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 9100009f0a23..a53476653b0d 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1931,6 +1931,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
/* db offset was calculated in copy_qp_uresp, now set in the user q */
if (qedr_qp_has_sq(qp)) {
qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
+ qp->sq.max_wr = attrs->cap.max_send_wr;
rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
&qp->usq.db_rec_data->db_data,
DB_REC_WIDTH_32B,
@@ -1941,6 +1942,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
if (qedr_qp_has_rq(qp)) {
qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
+ qp->rq.max_wr = attrs->cap.max_recv_wr;
rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
&qp->urq.db_rec_data->db_data,
DB_REC_WIDTH_32B,
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 9363bccfc6e7..a8e1c30c370f 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -196,7 +196,7 @@ struct qib_ctxtdata {
pid_t pid;
pid_t subpid[QLOGIC_IB_MAX_SUBCTXT];
/* same size as task_struct .comm[], command that opened context */
- char comm[16];
+ char comm[TASK_COMM_LEN];
/* pkeys set by this use of this ctxt */
u16 pkeys[4];
/* so file ops can get at unit */
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 63854f4b6524..aa290928cf96 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1321,7 +1321,7 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
rcd->tid_pg_list = ptmp;
rcd->pid = current->pid;
init_waitqueue_head(&dd->rcd[ctxt]->wait);
- strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
+ get_task_comm(rcd->comm, current);
ctxt_fp(fp) = rcd;
qib_stats.sps_ctxts++;
dd->freectxts--;
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index a9b83bc13f4a..aea571943768 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -3030,7 +3030,7 @@ static int qib_6120_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
/* Does read/modify/write to appropriate registers to
* set output and direction bits selected by mask.
- * these are in their canonical postions (e.g. lsb of
+ * these are in their canonical positions (e.g. lsb of
* dir will end up in D48 of extctrl on existing chips).
* returns contents of GP Inputs.
*/
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index d1c0bc31869f..80a8dd6c7814 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -3742,7 +3742,7 @@ static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
/*
* Does read/modify/write to appropriate registers to
* set output and direction bits selected by mask.
- * these are in their canonical postions (e.g. lsb of
+ * these are in their canonical positions (e.g. lsb of
* dir will end up in D48 of extctrl on existing chips).
* returns contents of GP Inputs.
*/
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index ab98b6a3ae1e..ceed302cf6a0 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -5665,7 +5665,7 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
/*
* Does read/modify/write to appropriate registers to
* set output and direction bits selected by mask.
- * these are in their canonical postions (e.g. lsb of
+ * these are in their canonical positions (e.g. lsb of
* dir will end up in D48 of extctrl on existing chips).
* returns contents of GP Inputs.
*/
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index ef91bff5c23c..0080f0be72fe 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -425,7 +425,7 @@ static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
}
#endif
-static void copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss,
+static void qib_copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss,
u32 length, unsigned flush_wc)
{
u32 extra = 0;
@@ -975,7 +975,7 @@ static int qib_verbs_send_pio(struct rvt_qp *qp, struct ib_header *ibhdr,
qib_pio_copy(piobuf, addr, dwords);
goto done;
}
- copy_io(piobuf, ss, len, flush_wc);
+ qib_copy_io(piobuf, ss, len, flush_wc);
done:
if (dd->flags & QIB_USE_SPCL_TRIG) {
u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index 586b0e52ba7f..7d868f033bbf 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -243,10 +243,11 @@ static struct attribute *usnic_ib_qpn_default_attrs[] = {
&qpn_attr_summary.attr,
NULL
};
+ATTRIBUTE_GROUPS(usnic_ib_qpn_default);
static struct kobj_type usnic_ib_qpn_type = {
.sysfs_ops = &usnic_ib_qpn_sysfs_ops,
- .default_attrs = usnic_ib_qpn_default_attrs
+ .default_groups = usnic_ib_qpn_default_groups,
};
int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev)
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 756a83bcff58..5a0e26cd648e 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -442,12 +442,10 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct usnic_ib_pd *pd = to_upd(ibpd);
- void *umem_pd;
- umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
- if (IS_ERR_OR_NULL(umem_pd)) {
- return umem_pd ? PTR_ERR(umem_pd) : -ENOMEM;
- }
+ pd->umem_pd = usnic_uiom_alloc_pd();
+ if (IS_ERR(pd->umem_pd))
+ return PTR_ERR(pd->umem_pd);
return 0;
}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
index bf51357ea3aa..9a4de962e947 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
@@ -63,12 +63,12 @@ int pvrdma_uar_table_init(struct pvrdma_dev *dev)
tbl->max = num;
tbl->mask = mask;
spin_lock_init(&tbl->lock);
- tbl->table = kcalloc(BITS_TO_LONGS(num), sizeof(long), GFP_KERNEL);
+ tbl->table = bitmap_zalloc(num, GFP_KERNEL);
if (!tbl->table)
return -ENOMEM;
/* 0th UAR is taken by the device. */
- set_bit(0, tbl->table);
+ __set_bit(0, tbl->table);
return 0;
}
@@ -77,7 +77,7 @@ void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev)
{
struct pvrdma_id_table *tbl = &dev->uar_table.tbl;
- kfree(tbl->table);
+ bitmap_free(tbl->table);
}
int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar)
@@ -100,7 +100,7 @@ int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar)
return -ENOMEM;
}
- set_bit(obj, tbl->table);
+ __set_bit(obj, tbl->table);
obj |= tbl->top;
spin_unlock_irqrestore(&tbl->lock, flags);
@@ -120,7 +120,7 @@ void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar)
obj = uar->index & (tbl->max - 1);
spin_lock_irqsave(&tbl->lock, flags);
- clear_bit(obj, tbl->table);
+ __clear_bit(obj, tbl->table);
tbl->last = min(tbl->last, obj);
tbl->top = (tbl->top + tbl->max) & tbl->mask;
spin_unlock_irqrestore(&tbl->lock, flags);
diff --git a/drivers/infiniband/sw/rxe/Makefile b/drivers/infiniband/sw/rxe/Makefile
index 1e24673e9318..5395a581f4bb 100644
--- a/drivers/infiniband/sw/rxe/Makefile
+++ b/drivers/infiniband/sw/rxe/Makefile
@@ -22,5 +22,4 @@ rdma_rxe-y := \
rxe_mcast.o \
rxe_task.o \
rxe_net.o \
- rxe_sysfs.o \
rxe_hw_counters.o
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 8e0f9c489cab..fab291245366 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -13,8 +13,6 @@ MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib");
MODULE_DESCRIPTION("Soft RDMA transport");
MODULE_LICENSE("Dual BSD/GPL");
-bool rxe_initialized;
-
/* free resources for a rxe device all objects created for this device must
* have been destroyed
*/
@@ -290,7 +288,6 @@ static int __init rxe_module_init(void)
return err;
rdma_link_register(&rxe_link_ops);
- rxe_initialized = true;
pr_info("loaded\n");
return 0;
}
@@ -301,7 +298,6 @@ static void __exit rxe_module_exit(void)
ib_unregister_driver(RDMA_DRIVER_RXE);
rxe_net_exit();
- rxe_initialized = false;
pr_info("unloaded\n");
}
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index 1bb3fb618bf5..fb9066e6f5f0 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -39,8 +39,6 @@
#define RXE_ROCE_V2_SPORT (0xc000)
-extern bool rxe_initialized;
-
void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name);
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index d771ba8449a1..f363fe3fa414 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -458,8 +458,6 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
struct rxe_pkt_info *pkt,
struct rxe_send_wqe *wqe)
{
- unsigned long flags;
-
if (wqe->has_rd_atomic) {
wqe->has_rd_atomic = 0;
atomic_inc(&qp->req.rd_atomic);
@@ -472,11 +470,11 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
/* state_lock used by requester & completer */
- spin_lock_irqsave(&qp->state_lock, flags);
+ spin_lock_bh(&qp->state_lock);
if ((qp->req.state == QP_STATE_DRAIN) &&
(qp->comp.psn == qp->req.psn)) {
qp->req.state = QP_STATE_DRAINED;
- spin_unlock_irqrestore(&qp->state_lock, flags);
+ spin_unlock_bh(&qp->state_lock);
if (qp->ibqp.event_handler) {
struct ib_event ev;
@@ -488,7 +486,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
qp->ibqp.qp_context);
}
} else {
- spin_unlock_irqrestore(&qp->state_lock, flags);
+ spin_unlock_bh(&qp->state_lock);
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index 6848426c074f..6baaaa34458e 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -42,14 +42,13 @@ err1:
static void rxe_send_complete(struct tasklet_struct *t)
{
struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
- unsigned long flags;
- spin_lock_irqsave(&cq->cq_lock, flags);
+ spin_lock_bh(&cq->cq_lock);
if (cq->is_dying) {
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
return;
}
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
@@ -106,15 +105,14 @@ int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
{
struct ib_event ev;
- unsigned long flags;
int full;
void *addr;
- spin_lock_irqsave(&cq->cq_lock, flags);
+ spin_lock_bh(&cq->cq_lock);
full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (unlikely(full)) {
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
if (cq->ibcq.event_handler) {
ev.device = cq->ibcq.device;
ev.element.cq = &cq->ibcq;
@@ -130,7 +128,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT);
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
if ((cq->notify == IB_CQ_NEXT_COMP) ||
(cq->notify == IB_CQ_SOLICITED && solicited)) {
@@ -143,16 +141,14 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
void rxe_cq_disable(struct rxe_cq *cq)
{
- unsigned long flags;
-
- spin_lock_irqsave(&cq->cq_lock, flags);
+ spin_lock_bh(&cq->cq_lock);
cq->is_dying = true;
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
}
-void rxe_cq_cleanup(struct rxe_pool_entry *arg)
+void rxe_cq_cleanup(struct rxe_pool_elem *elem)
{
- struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem);
+ struct rxe_cq *cq = container_of(elem, typeof(*cq), elem);
if (cq->queue)
rxe_queue_cleanup(cq->queue);
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 1ca43b859d80..b1e174afb1d4 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -37,7 +37,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
void rxe_cq_disable(struct rxe_cq *cq);
-void rxe_cq_cleanup(struct rxe_pool_entry *arg);
+void rxe_cq_cleanup(struct rxe_pool_elem *arg);
/* rxe_mcast.c */
int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
@@ -51,7 +51,7 @@ int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
void rxe_drop_all_mcast_groups(struct rxe_qp *qp);
-void rxe_mc_cleanup(struct rxe_pool_entry *arg);
+void rxe_mc_cleanup(struct rxe_pool_elem *arg);
/* rxe_mmap.c */
struct rxe_mmap_info {
@@ -89,7 +89,7 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey);
int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr);
int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
-void rxe_mr_cleanup(struct rxe_pool_entry *arg);
+void rxe_mr_cleanup(struct rxe_pool_elem *arg);
/* rxe_mw.c */
int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata);
@@ -97,7 +97,7 @@ int rxe_dealloc_mw(struct ib_mw *ibmw);
int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey);
struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey);
-void rxe_mw_cleanup(struct rxe_pool_entry *arg);
+void rxe_mw_cleanup(struct rxe_pool_elem *arg);
/* rxe_net.c */
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
@@ -131,7 +131,7 @@ void rxe_qp_error(struct rxe_qp *qp);
void rxe_qp_destroy(struct rxe_qp *qp);
-void rxe_qp_cleanup(struct rxe_pool_entry *arg);
+void rxe_qp_cleanup(struct rxe_pool_elem *elem);
static inline int qp_num(struct rxe_qp *qp)
{
diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c
index 1c1d1b53312d..bd1ac88b8700 100644
--- a/drivers/infiniband/sw/rxe/rxe_mcast.c
+++ b/drivers/infiniband/sw/rxe/rxe_mcast.c
@@ -40,12 +40,11 @@ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
int err;
struct rxe_mc_grp *grp;
struct rxe_pool *pool = &rxe->mc_grp_pool;
- unsigned long flags;
if (rxe->attr.max_mcast_qp_attach == 0)
return -EINVAL;
- write_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_bh(&pool->pool_lock);
grp = rxe_pool_get_key_locked(pool, mgid);
if (grp)
@@ -53,13 +52,13 @@ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
grp = create_grp(rxe, pool, mgid);
if (IS_ERR(grp)) {
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
err = PTR_ERR(grp);
return err;
}
done:
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
*grp_p = grp;
return 0;
}
@@ -169,9 +168,9 @@ void rxe_drop_all_mcast_groups(struct rxe_qp *qp)
}
}
-void rxe_mc_cleanup(struct rxe_pool_entry *arg)
+void rxe_mc_cleanup(struct rxe_pool_elem *elem)
{
- struct rxe_mc_grp *grp = container_of(arg, typeof(*grp), pelem);
+ struct rxe_mc_grp *grp = container_of(elem, typeof(*grp), elem);
struct rxe_dev *rxe = grp->rxe;
rxe_drop_key(grp);
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index bcf717bcf0b3..453ef3c9d535 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -50,7 +50,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
static void rxe_mr_init(int access, struct rxe_mr *mr)
{
- u32 lkey = mr->pelem.index << 8 | rxe_get_next_key(-1);
+ u32 lkey = mr->elem.index << 8 | rxe_get_next_key(-1);
u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
/* set ibmr->l/rkey and also copy into private l/rkey
@@ -697,9 +697,9 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
return 0;
}
-void rxe_mr_cleanup(struct rxe_pool_entry *arg)
+void rxe_mr_cleanup(struct rxe_pool_elem *elem)
{
- struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem);
+ struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
ib_umem_release(mr->umem);
diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
index 9534a7fe1a98..32dd8c0b8b9e 100644
--- a/drivers/infiniband/sw/rxe/rxe_mw.c
+++ b/drivers/infiniband/sw/rxe/rxe_mw.c
@@ -21,7 +21,7 @@ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
}
rxe_add_index(mw);
- mw->rkey = ibmw->rkey = (mw->pelem.index << 8) | rxe_get_next_key(-1);
+ mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1);
mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ?
RXE_MW_STATE_FREE : RXE_MW_STATE_VALID;
spin_lock_init(&mw->lock);
@@ -56,11 +56,10 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
{
struct rxe_mw *mw = to_rmw(ibmw);
struct rxe_pd *pd = to_rpd(ibmw->pd);
- unsigned long flags;
- spin_lock_irqsave(&mw->lock, flags);
+ spin_lock_bh(&mw->lock);
rxe_do_dealloc_mw(mw);
- spin_unlock_irqrestore(&mw->lock, flags);
+ spin_unlock_bh(&mw->lock);
rxe_drop_ref(mw);
rxe_drop_ref(pd);
@@ -197,7 +196,6 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
u32 mw_rkey = wqe->wr.wr.mw.mw_rkey;
u32 mr_lkey = wqe->wr.wr.mw.mr_lkey;
- unsigned long flags;
mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8);
if (unlikely(!mw)) {
@@ -225,7 +223,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
mr = NULL;
}
- spin_lock_irqsave(&mw->lock, flags);
+ spin_lock_bh(&mw->lock);
ret = rxe_check_bind_mw(qp, wqe, mw, mr);
if (ret)
@@ -233,7 +231,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
rxe_do_bind_mw(qp, wqe, mw, mr);
err_unlock:
- spin_unlock_irqrestore(&mw->lock, flags);
+ spin_unlock_bh(&mw->lock);
err_drop_mr:
if (mr)
rxe_drop_ref(mr);
@@ -280,7 +278,6 @@ static void rxe_do_invalidate_mw(struct rxe_mw *mw)
int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
{
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
- unsigned long flags;
struct rxe_mw *mw;
int ret;
@@ -295,7 +292,7 @@ int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
goto err_drop_ref;
}
- spin_lock_irqsave(&mw->lock, flags);
+ spin_lock_bh(&mw->lock);
ret = rxe_check_invalidate_mw(qp, mw);
if (ret)
@@ -303,7 +300,7 @@ int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
rxe_do_invalidate_mw(mw);
err_unlock:
- spin_unlock_irqrestore(&mw->lock, flags);
+ spin_unlock_bh(&mw->lock);
err_drop_ref:
rxe_drop_ref(mw);
err:
@@ -333,9 +330,9 @@ struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey)
return mw;
}
-void rxe_mw_cleanup(struct rxe_pool_entry *elem)
+void rxe_mw_cleanup(struct rxe_pool_elem *elem)
{
- struct rxe_mw *mw = container_of(elem, typeof(*mw), pelem);
+ struct rxe_mw *mw = container_of(elem, typeof(*mw), elem);
rxe_drop_index(mw);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 2cb810cb890a..be72bdbfb4ba 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -22,24 +22,20 @@ static struct rxe_recv_sockets recv_sockets;
int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
{
- int err;
unsigned char ll_addr[ETH_ALEN];
ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
- err = dev_mc_add(rxe->ndev, ll_addr);
- return err;
+ return dev_mc_add(rxe->ndev, ll_addr);
}
int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
{
- int err;
unsigned char ll_addr[ETH_ALEN];
ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
- err = dev_mc_del(rxe->ndev, ll_addr);
- return err;
+ return dev_mc_del(rxe->ndev, ll_addr);
}
static struct dst_entry *rxe_find_route4(struct net_device *ndev,
@@ -444,7 +440,6 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
else
err = rxe_send(skb, pkt);
if (err) {
- rxe->xmit_errors++;
rxe_counter_inc(rxe, RXE_CNT_SEND_ERR);
return err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c
index 3ef5a10a6efd..df596ba7527d 100644
--- a/drivers/infiniband/sw/rxe/rxe_opcode.c
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.c
@@ -108,8 +108,8 @@ struct rxe_wr_opcode_info rxe_wr_opcode_info[] = {
struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
[IB_OPCODE_RC_SEND_FIRST] = {
.name = "IB_OPCODE_RC_SEND_FIRST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK
- | RXE_SEND_MASK | RXE_START_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK |
+ RXE_SEND_MASK | RXE_START_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -117,9 +117,9 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
}
},
[IB_OPCODE_RC_SEND_MIDDLE] = {
- .name = "IB_OPCODE_RC_SEND_MIDDLE]",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK
- | RXE_MIDDLE_MASK,
+ .name = "IB_OPCODE_RC_SEND_MIDDLE",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -128,8 +128,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_SEND_LAST] = {
.name = "IB_OPCODE_RC_SEND_LAST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
- | RXE_SEND_MASK | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
+ RXE_SEND_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -138,21 +138,21 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RC_SEND_ONLY] = {
.name = "IB_OPCODE_RC_SEND_ONLY",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
- | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
+ RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -161,33 +161,33 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RC_RDMA_WRITE_FIRST] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_FIRST",
- .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK,
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_MIDDLE",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -196,8 +196,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_RDMA_WRITE_LAST] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_LAST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -206,69 +206,69 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RC_RDMA_WRITE_ONLY] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_ONLY",
- .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK
- | RXE_END_MASK,
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
- .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RC_RDMA_READ_REQUEST] = {
.name = "IB_OPCODE_RC_RDMA_READ_REQUEST",
- .mask = RXE_RETH_MASK | RXE_REQ_MASK | RXE_READ_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RETH_MASK | RXE_REQ_MASK | RXE_READ_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = {
.name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST",
- .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
- | RXE_START_MASK,
+ .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_AETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = {
@@ -282,109 +282,110 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = {
.name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST",
- .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
- | RXE_END_MASK,
+ .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_AETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = {
.name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY",
- .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_AETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RC_ACKNOWLEDGE] = {
.name = "IB_OPCODE_RC_ACKNOWLEDGE",
- .mask = RXE_AETH_MASK | RXE_ACK_MASK | RXE_START_MASK
- | RXE_END_MASK,
+ .mask = RXE_AETH_MASK | RXE_ACK_MASK | RXE_START_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_AETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = {
.name = "IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE",
- .mask = RXE_AETH_MASK | RXE_ATMACK_MASK | RXE_ACK_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_AETH_MASK | RXE_ATMACK_MASK | RXE_ACK_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_AETH] = RXE_BTH_BYTES,
- [RXE_ATMACK] = RXE_BTH_BYTES
- + RXE_AETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_ATMACK_BYTES + RXE_AETH_BYTES,
+ [RXE_ATMACK] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_ATMACK_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RC_COMPARE_SWAP] = {
.name = "IB_OPCODE_RC_COMPARE_SWAP",
- .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_ATMETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_ATMETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_ATMETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_ATMETH_BYTES,
}
},
[IB_OPCODE_RC_FETCH_ADD] = {
.name = "IB_OPCODE_RC_FETCH_ADD",
- .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_ATMETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_ATMETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_ATMETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_ATMETH_BYTES,
}
},
[IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = {
.name = "IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE",
- .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+ .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IETH_BYTES,
}
},
[IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = {
.name = "IB_OPCODE_RC_SEND_ONLY_INV",
- .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_END_MASK | RXE_START_MASK,
+ .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_END_MASK | RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_IETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IETH_BYTES,
}
},
/* UC */
[IB_OPCODE_UC_SEND_FIRST] = {
.name = "IB_OPCODE_UC_SEND_FIRST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK
- | RXE_SEND_MASK | RXE_START_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK |
+ RXE_SEND_MASK | RXE_START_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -393,8 +394,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_SEND_MIDDLE] = {
.name = "IB_OPCODE_UC_SEND_MIDDLE",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -403,8 +404,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_SEND_LAST] = {
.name = "IB_OPCODE_UC_SEND_LAST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
- | RXE_SEND_MASK | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
+ RXE_SEND_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -413,21 +414,21 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_UC_SEND_ONLY] = {
.name = "IB_OPCODE_UC_SEND_ONLY",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
- | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
+ RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -436,33 +437,33 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_UC_RDMA_WRITE_FIRST] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_FIRST",
- .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK,
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_MIDDLE",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -471,8 +472,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_RDMA_WRITE_LAST] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_LAST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -481,460 +482,460 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_UC_RDMA_WRITE_ONLY] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_ONLY",
- .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK
- | RXE_END_MASK,
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
- .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
/* RD */
[IB_OPCODE_RD_SEND_FIRST] = {
.name = "IB_OPCODE_RD_SEND_FIRST",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_SEND_MIDDLE] = {
.name = "IB_OPCODE_RD_SEND_MIDDLE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_SEND_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_SEND_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_SEND_LAST] = {
.name = "IB_OPCODE_RD_SEND_LAST",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_COMP_MASK | RXE_SEND_MASK
- | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_COMP_MASK | RXE_SEND_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK
- | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_SEND_MASK
- | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_SEND_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RD_SEND_ONLY] = {
.name = "IB_OPCODE_RD_SEND_ONLY",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK
- | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_FIRST] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_FIRST",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
- | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK,
- .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_RETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_MIDDLE] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_MIDDLE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_LAST] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_LAST",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK
- | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_ONLY] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_ONLY",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
- | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK
- | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_RETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
- | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES
- + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
+ RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES +
+ RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_RETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_RETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_RETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RD_RDMA_READ_REQUEST] = {
.name = "IB_OPCODE_RD_RDMA_READ_REQUEST",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
- | RXE_REQ_MASK | RXE_READ_MASK
- | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
+ RXE_REQ_MASK | RXE_READ_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_RETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES
- + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST] = {
.name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST",
- .mask = RXE_RDETH_MASK | RXE_AETH_MASK
- | RXE_PAYLOAD_MASK | RXE_ACK_MASK
- | RXE_START_MASK,
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK |
+ RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_AETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE] = {
.name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE",
- .mask = RXE_RDETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_RDETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST] = {
.name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST",
- .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK
- | RXE_ACK_MASK | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_ACK_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_AETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY] = {
.name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY",
- .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK
- | RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_AETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RD_ACKNOWLEDGE] = {
.name = "IB_OPCODE_RD_ACKNOWLEDGE",
- .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ACK_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ACK_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_AETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
}
},
[IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE] = {
.name = "IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE",
- .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ATMACK_MASK
- | RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ATMACK_MASK |
+ RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_AETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_ATMACK] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_ATMACK] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RD_COMPARE_SWAP] = {
.name = "RD_COMPARE_SWAP",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK
- | RXE_REQ_MASK | RXE_ATOMIC_MASK
- | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK |
+ RXE_REQ_MASK | RXE_ATOMIC_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_ATMETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_ATMETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
[RXE_PAYLOAD] = RXE_BTH_BYTES +
- + RXE_ATMETH_BYTES
- + RXE_DETH_BYTES +
- + RXE_RDETH_BYTES,
+ RXE_ATMETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
}
},
[IB_OPCODE_RD_FETCH_ADD] = {
.name = "IB_OPCODE_RD_FETCH_ADD",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK
- | RXE_REQ_MASK | RXE_ATOMIC_MASK
- | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK |
+ RXE_REQ_MASK | RXE_ATOMIC_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_ATMETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_ATMETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
[RXE_PAYLOAD] = RXE_BTH_BYTES +
- + RXE_ATMETH_BYTES
- + RXE_DETH_BYTES +
- + RXE_RDETH_BYTES,
+ RXE_ATMETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
}
},
/* UD */
[IB_OPCODE_UD_SEND_ONLY] = {
.name = "IB_OPCODE_UD_SEND_ONLY",
- .mask = RXE_DETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_DETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_DETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE",
- .mask = RXE_DETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_DETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_DETH] = RXE_BTH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_DETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 2e80bb6aa957..4cb003885e00 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -5,13 +5,14 @@
*/
#include "rxe.h"
-#include "rxe_loc.h"
+
+#define RXE_POOL_ALIGN (16)
static const struct rxe_type_info {
const char *name;
size_t size;
size_t elem_offset;
- void (*cleanup)(struct rxe_pool_entry *obj);
+ void (*cleanup)(struct rxe_pool_elem *obj);
enum rxe_pool_flags flags;
u32 min_index;
u32 max_index;
@@ -21,19 +22,19 @@ static const struct rxe_type_info {
[RXE_TYPE_UC] = {
.name = "rxe-uc",
.size = sizeof(struct rxe_ucontext),
- .elem_offset = offsetof(struct rxe_ucontext, pelem),
+ .elem_offset = offsetof(struct rxe_ucontext, elem),
.flags = RXE_POOL_NO_ALLOC,
},
[RXE_TYPE_PD] = {
.name = "rxe-pd",
.size = sizeof(struct rxe_pd),
- .elem_offset = offsetof(struct rxe_pd, pelem),
+ .elem_offset = offsetof(struct rxe_pd, elem),
.flags = RXE_POOL_NO_ALLOC,
},
[RXE_TYPE_AH] = {
.name = "rxe-ah",
.size = sizeof(struct rxe_ah),
- .elem_offset = offsetof(struct rxe_ah, pelem),
+ .elem_offset = offsetof(struct rxe_ah, elem),
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_AH_INDEX,
.max_index = RXE_MAX_AH_INDEX,
@@ -41,7 +42,7 @@ static const struct rxe_type_info {
[RXE_TYPE_SRQ] = {
.name = "rxe-srq",
.size = sizeof(struct rxe_srq),
- .elem_offset = offsetof(struct rxe_srq, pelem),
+ .elem_offset = offsetof(struct rxe_srq, elem),
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_SRQ_INDEX,
.max_index = RXE_MAX_SRQ_INDEX,
@@ -49,7 +50,7 @@ static const struct rxe_type_info {
[RXE_TYPE_QP] = {
.name = "rxe-qp",
.size = sizeof(struct rxe_qp),
- .elem_offset = offsetof(struct rxe_qp, pelem),
+ .elem_offset = offsetof(struct rxe_qp, elem),
.cleanup = rxe_qp_cleanup,
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_QP_INDEX,
@@ -58,14 +59,14 @@ static const struct rxe_type_info {
[RXE_TYPE_CQ] = {
.name = "rxe-cq",
.size = sizeof(struct rxe_cq),
- .elem_offset = offsetof(struct rxe_cq, pelem),
+ .elem_offset = offsetof(struct rxe_cq, elem),
.flags = RXE_POOL_NO_ALLOC,
.cleanup = rxe_cq_cleanup,
},
[RXE_TYPE_MR] = {
.name = "rxe-mr",
.size = sizeof(struct rxe_mr),
- .elem_offset = offsetof(struct rxe_mr, pelem),
+ .elem_offset = offsetof(struct rxe_mr, elem),
.cleanup = rxe_mr_cleanup,
.flags = RXE_POOL_INDEX,
.min_index = RXE_MIN_MR_INDEX,
@@ -74,7 +75,7 @@ static const struct rxe_type_info {
[RXE_TYPE_MW] = {
.name = "rxe-mw",
.size = sizeof(struct rxe_mw),
- .elem_offset = offsetof(struct rxe_mw, pelem),
+ .elem_offset = offsetof(struct rxe_mw, elem),
.cleanup = rxe_mw_cleanup,
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_MW_INDEX,
@@ -83,7 +84,7 @@ static const struct rxe_type_info {
[RXE_TYPE_MC_GRP] = {
.name = "rxe-mc_grp",
.size = sizeof(struct rxe_mc_grp),
- .elem_offset = offsetof(struct rxe_mc_grp, pelem),
+ .elem_offset = offsetof(struct rxe_mc_grp, elem),
.cleanup = rxe_mc_cleanup,
.flags = RXE_POOL_KEY,
.key_offset = offsetof(struct rxe_mc_grp, mgid),
@@ -92,15 +93,10 @@ static const struct rxe_type_info {
[RXE_TYPE_MC_ELEM] = {
.name = "rxe-mc_elem",
.size = sizeof(struct rxe_mc_elem),
- .elem_offset = offsetof(struct rxe_mc_elem, pelem),
+ .elem_offset = offsetof(struct rxe_mc_elem, elem),
},
};
-static inline const char *pool_name(struct rxe_pool *pool)
-{
- return rxe_type_info[pool->type].name;
-}
-
static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
{
int err = 0;
@@ -130,35 +126,36 @@ int rxe_pool_init(
enum rxe_elem_type type,
unsigned int max_elem)
{
+ const struct rxe_type_info *info = &rxe_type_info[type];
int err = 0;
- size_t size = rxe_type_info[type].size;
memset(pool, 0, sizeof(*pool));
pool->rxe = rxe;
+ pool->name = info->name;
pool->type = type;
pool->max_elem = max_elem;
- pool->elem_size = ALIGN(size, RXE_POOL_ALIGN);
- pool->flags = rxe_type_info[type].flags;
- pool->index.tree = RB_ROOT;
- pool->key.tree = RB_ROOT;
- pool->cleanup = rxe_type_info[type].cleanup;
+ pool->elem_size = ALIGN(info->size, RXE_POOL_ALIGN);
+ pool->elem_offset = info->elem_offset;
+ pool->flags = info->flags;
+ pool->cleanup = info->cleanup;
atomic_set(&pool->num_elem, 0);
rwlock_init(&pool->pool_lock);
- if (rxe_type_info[type].flags & RXE_POOL_INDEX) {
- err = rxe_pool_init_index(pool,
- rxe_type_info[type].max_index,
- rxe_type_info[type].min_index);
+ if (pool->flags & RXE_POOL_INDEX) {
+ pool->index.tree = RB_ROOT;
+ err = rxe_pool_init_index(pool, info->max_index,
+ info->min_index);
if (err)
goto out;
}
- if (rxe_type_info[type].flags & RXE_POOL_KEY) {
- pool->key.key_offset = rxe_type_info[type].key_offset;
- pool->key.key_size = rxe_type_info[type].key_size;
+ if (pool->flags & RXE_POOL_KEY) {
+ pool->key.tree = RB_ROOT;
+ pool->key.key_offset = info->key_offset;
+ pool->key.key_size = info->key_size;
}
out:
@@ -169,9 +166,10 @@ void rxe_pool_cleanup(struct rxe_pool *pool)
{
if (atomic_read(&pool->num_elem) > 0)
pr_warn("%s pool destroyed with unfree'd elem\n",
- pool_name(pool));
+ pool->name);
- bitmap_free(pool->index.table);
+ if (pool->flags & RXE_POOL_INDEX)
+ bitmap_free(pool->index.table);
}
static u32 alloc_index(struct rxe_pool *pool)
@@ -189,15 +187,15 @@ static u32 alloc_index(struct rxe_pool *pool)
return index + pool->index.min_index;
}
-static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
+static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_elem *new)
{
struct rb_node **link = &pool->index.tree.rb_node;
struct rb_node *parent = NULL;
- struct rxe_pool_entry *elem;
+ struct rxe_pool_elem *elem;
while (*link) {
parent = *link;
- elem = rb_entry(parent, struct rxe_pool_entry, index_node);
+ elem = rb_entry(parent, struct rxe_pool_elem, index_node);
if (elem->index == new->index) {
pr_warn("element already exists!\n");
@@ -216,19 +214,20 @@ static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
return 0;
}
-static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
+static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_elem *new)
{
struct rb_node **link = &pool->key.tree.rb_node;
struct rb_node *parent = NULL;
- struct rxe_pool_entry *elem;
+ struct rxe_pool_elem *elem;
int cmp;
while (*link) {
parent = *link;
- elem = rb_entry(parent, struct rxe_pool_entry, key_node);
+ elem = rb_entry(parent, struct rxe_pool_elem, key_node);
cmp = memcmp((u8 *)elem + pool->key.key_offset,
- (u8 *)new + pool->key.key_offset, pool->key.key_size);
+ (u8 *)new + pool->key.key_offset,
+ pool->key.key_size);
if (cmp == 0) {
pr_warn("key already exists!\n");
@@ -247,7 +246,7 @@ static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
return 0;
}
-int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key)
+int __rxe_add_key_locked(struct rxe_pool_elem *elem, void *key)
{
struct rxe_pool *pool = elem->pool;
int err;
@@ -258,37 +257,35 @@ int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key)
return err;
}
-int __rxe_add_key(struct rxe_pool_entry *elem, void *key)
+int __rxe_add_key(struct rxe_pool_elem *elem, void *key)
{
struct rxe_pool *pool = elem->pool;
- unsigned long flags;
int err;
- write_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_bh(&pool->pool_lock);
err = __rxe_add_key_locked(elem, key);
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
return err;
}
-void __rxe_drop_key_locked(struct rxe_pool_entry *elem)
+void __rxe_drop_key_locked(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
rb_erase(&elem->key_node, &pool->key.tree);
}
-void __rxe_drop_key(struct rxe_pool_entry *elem)
+void __rxe_drop_key(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
- unsigned long flags;
- write_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_bh(&pool->pool_lock);
__rxe_drop_key_locked(elem);
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
}
-int __rxe_add_index_locked(struct rxe_pool_entry *elem)
+int __rxe_add_index_locked(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
int err;
@@ -299,20 +296,19 @@ int __rxe_add_index_locked(struct rxe_pool_entry *elem)
return err;
}
-int __rxe_add_index(struct rxe_pool_entry *elem)
+int __rxe_add_index(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
- unsigned long flags;
int err;
- write_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_bh(&pool->pool_lock);
err = __rxe_add_index_locked(elem);
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
return err;
}
-void __rxe_drop_index_locked(struct rxe_pool_entry *elem)
+void __rxe_drop_index_locked(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
@@ -320,32 +316,31 @@ void __rxe_drop_index_locked(struct rxe_pool_entry *elem)
rb_erase(&elem->index_node, &pool->index.tree);
}
-void __rxe_drop_index(struct rxe_pool_entry *elem)
+void __rxe_drop_index(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
- unsigned long flags;
- write_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_bh(&pool->pool_lock);
__rxe_drop_index_locked(elem);
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
}
void *rxe_alloc_locked(struct rxe_pool *pool)
{
- const struct rxe_type_info *info = &rxe_type_info[pool->type];
- struct rxe_pool_entry *elem;
- u8 *obj;
+ struct rxe_pool_elem *elem;
+ void *obj;
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_cnt;
- obj = kzalloc(info->size, GFP_ATOMIC);
+ obj = kzalloc(pool->elem_size, GFP_ATOMIC);
if (!obj)
goto out_cnt;
- elem = (struct rxe_pool_entry *)(obj + info->elem_offset);
+ elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset);
elem->pool = pool;
+ elem->obj = obj;
kref_init(&elem->ref_cnt);
return obj;
@@ -357,20 +352,20 @@ out_cnt:
void *rxe_alloc(struct rxe_pool *pool)
{
- const struct rxe_type_info *info = &rxe_type_info[pool->type];
- struct rxe_pool_entry *elem;
- u8 *obj;
+ struct rxe_pool_elem *elem;
+ void *obj;
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_cnt;
- obj = kzalloc(info->size, GFP_KERNEL);
+ obj = kzalloc(pool->elem_size, GFP_KERNEL);
if (!obj)
goto out_cnt;
- elem = (struct rxe_pool_entry *)(obj + info->elem_offset);
+ elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset);
elem->pool = pool;
+ elem->obj = obj;
kref_init(&elem->ref_cnt);
return obj;
@@ -380,12 +375,13 @@ out_cnt:
return NULL;
}
-int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
+int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
{
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_cnt;
elem->pool = pool;
+ elem->obj = (u8 *)elem - pool->elem_offset;
kref_init(&elem->ref_cnt);
return 0;
@@ -397,17 +393,16 @@ out_cnt:
void rxe_elem_release(struct kref *kref)
{
- struct rxe_pool_entry *elem =
- container_of(kref, struct rxe_pool_entry, ref_cnt);
+ struct rxe_pool_elem *elem =
+ container_of(kref, struct rxe_pool_elem, ref_cnt);
struct rxe_pool *pool = elem->pool;
- const struct rxe_type_info *info = &rxe_type_info[pool->type];
- u8 *obj;
+ void *obj;
if (pool->cleanup)
pool->cleanup(elem);
if (!(pool->flags & RXE_POOL_NO_ALLOC)) {
- obj = (u8 *)elem - info->elem_offset;
+ obj = elem->obj;
kfree(obj);
}
@@ -416,15 +411,14 @@ void rxe_elem_release(struct kref *kref)
void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
{
- const struct rxe_type_info *info = &rxe_type_info[pool->type];
struct rb_node *node;
- struct rxe_pool_entry *elem;
- u8 *obj;
+ struct rxe_pool_elem *elem;
+ void *obj;
node = pool->index.tree.rb_node;
while (node) {
- elem = rb_entry(node, struct rxe_pool_entry, index_node);
+ elem = rb_entry(node, struct rxe_pool_elem, index_node);
if (elem->index > index)
node = node->rb_left;
@@ -436,7 +430,7 @@ void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
if (node) {
kref_get(&elem->ref_cnt);
- obj = (u8 *)elem - info->elem_offset;
+ obj = elem->obj;
} else {
obj = NULL;
}
@@ -446,28 +440,26 @@ void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
{
- u8 *obj;
- unsigned long flags;
+ void *obj;
- read_lock_irqsave(&pool->pool_lock, flags);
+ read_lock_bh(&pool->pool_lock);
obj = rxe_pool_get_index_locked(pool, index);
- read_unlock_irqrestore(&pool->pool_lock, flags);
+ read_unlock_bh(&pool->pool_lock);
return obj;
}
void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key)
{
- const struct rxe_type_info *info = &rxe_type_info[pool->type];
struct rb_node *node;
- struct rxe_pool_entry *elem;
- u8 *obj;
+ struct rxe_pool_elem *elem;
+ void *obj;
int cmp;
node = pool->key.tree.rb_node;
while (node) {
- elem = rb_entry(node, struct rxe_pool_entry, key_node);
+ elem = rb_entry(node, struct rxe_pool_elem, key_node);
cmp = memcmp((u8 *)elem + pool->key.key_offset,
key, pool->key.key_size);
@@ -482,7 +474,7 @@ void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key)
if (node) {
kref_get(&elem->ref_cnt);
- obj = (u8 *)elem - info->elem_offset;
+ obj = elem->obj;
} else {
obj = NULL;
}
@@ -492,12 +484,11 @@ void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key)
void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
{
- u8 *obj;
- unsigned long flags;
+ void *obj;
- read_lock_irqsave(&pool->pool_lock, flags);
+ read_lock_bh(&pool->pool_lock);
obj = rxe_pool_get_key_locked(pool, key);
- read_unlock_irqrestore(&pool->pool_lock, flags);
+ read_unlock_bh(&pool->pool_lock);
return obj;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 8ecd9f870aea..214279310f4d 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -7,9 +7,6 @@
#ifndef RXE_POOL_H
#define RXE_POOL_H
-#define RXE_POOL_ALIGN (16)
-#define RXE_POOL_CACHE_FLAGS (0)
-
enum rxe_pool_flags {
RXE_POOL_INDEX = BIT(1),
RXE_POOL_KEY = BIT(2),
@@ -30,10 +27,9 @@ enum rxe_elem_type {
RXE_NUM_TYPES, /* keep me last */
};
-struct rxe_pool_entry;
-
-struct rxe_pool_entry {
+struct rxe_pool_elem {
struct rxe_pool *pool;
+ void *obj;
struct kref ref_cnt;
struct list_head list;
@@ -47,14 +43,16 @@ struct rxe_pool_entry {
struct rxe_pool {
struct rxe_dev *rxe;
+ const char *name;
rwlock_t pool_lock; /* protects pool add/del/search */
- size_t elem_size;
- void (*cleanup)(struct rxe_pool_entry *obj);
+ void (*cleanup)(struct rxe_pool_elem *obj);
enum rxe_pool_flags flags;
enum rxe_elem_type type;
unsigned int max_elem;
atomic_t num_elem;
+ size_t elem_size;
+ size_t elem_offset;
/* only used if indexed */
struct {
@@ -89,51 +87,51 @@ void *rxe_alloc_locked(struct rxe_pool *pool);
void *rxe_alloc(struct rxe_pool *pool);
/* connect already allocated object to pool */
-int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem);
+int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem);
-#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->pelem)
+#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->elem)
/* assign an index to an indexed object and insert object into
* pool's rb tree holding and not holding the pool_lock
*/
-int __rxe_add_index_locked(struct rxe_pool_entry *elem);
+int __rxe_add_index_locked(struct rxe_pool_elem *elem);
-#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->pelem)
+#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->elem)
-int __rxe_add_index(struct rxe_pool_entry *elem);
+int __rxe_add_index(struct rxe_pool_elem *elem);
-#define rxe_add_index(obj) __rxe_add_index(&(obj)->pelem)
+#define rxe_add_index(obj) __rxe_add_index(&(obj)->elem)
/* drop an index and remove object from rb tree
* holding and not holding the pool_lock
*/
-void __rxe_drop_index_locked(struct rxe_pool_entry *elem);
+void __rxe_drop_index_locked(struct rxe_pool_elem *elem);
-#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->pelem)
+#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->elem)
-void __rxe_drop_index(struct rxe_pool_entry *elem);
+void __rxe_drop_index(struct rxe_pool_elem *elem);
-#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->pelem)
+#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->elem)
/* assign a key to a keyed object and insert object into
* pool's rb tree holding and not holding pool_lock
*/
-int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key);
+int __rxe_add_key_locked(struct rxe_pool_elem *elem, void *key);
-#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->pelem, key)
+#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->elem, key)
-int __rxe_add_key(struct rxe_pool_entry *elem, void *key);
+int __rxe_add_key(struct rxe_pool_elem *elem, void *key);
-#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->pelem, key)
+#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->elem, key)
/* remove elem from rb tree holding and not holding the pool_lock */
-void __rxe_drop_key_locked(struct rxe_pool_entry *elem);
+void __rxe_drop_key_locked(struct rxe_pool_elem *elem);
-#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->pelem)
+#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->elem)
-void __rxe_drop_key(struct rxe_pool_entry *elem);
+void __rxe_drop_key(struct rxe_pool_elem *elem);
-#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->pelem)
+#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->elem)
/* lookup an indexed object from index holding and not holding the pool_lock.
* takes a reference on object
@@ -153,9 +151,9 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key);
void rxe_elem_release(struct kref *kref);
/* take a reference on an object */
-#define rxe_add_ref(elem) kref_get(&(elem)->pelem.ref_cnt)
+#define rxe_add_ref(obj) kref_get(&(obj)->elem.ref_cnt)
/* drop a reference on an object */
-#define rxe_drop_ref(elem) kref_put(&(elem)->pelem.ref_cnt, rxe_elem_release)
+#define rxe_drop_ref(obj) kref_put(&(obj)->elem.ref_cnt, rxe_elem_release)
#endif /* RXE_POOL_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 54b8711321c1..5018b9387694 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -167,7 +167,7 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->attr.path_mtu = 1;
qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
- qpn = qp->pelem.index;
+ qpn = qp->elem.index;
port = &rxe->port;
switch (init->qp_type) {
@@ -217,8 +217,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
* the port number must be in the Dynamic Ports range
* (0xc000 - 0xffff).
*/
- qp->src_port = RXE_ROCE_V2_SPORT +
- (hash_32_generic(qp_num(qp), 14) & 0x3fff);
+ qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff);
qp->sq.max_wr = init->cap.max_send_wr;
/* These caps are limited by rxe_qp_chk_cap() done by the caller */
@@ -832,9 +831,9 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
}
/* called when the last reference to the qp is dropped */
-void rxe_qp_cleanup(struct rxe_pool_entry *arg)
+void rxe_qp_cleanup(struct rxe_pool_elem *elem)
{
- struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
+ struct rxe_qp *qp = container_of(elem, typeof(*qp), elem);
execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
index 6e6e023c1b45..a1b283dd2d4c 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.c
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -151,7 +151,6 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
struct rxe_queue *new_q;
unsigned int num_elem = *num_elem_p;
int err;
- unsigned long flags = 0, flags1;
new_q = rxe_queue_init(q->rxe, &num_elem, elem_size, q->type);
if (!new_q)
@@ -165,17 +164,17 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
goto err1;
}
- spin_lock_irqsave(consumer_lock, flags1);
+ spin_lock_bh(consumer_lock);
if (producer_lock) {
- spin_lock_irqsave(producer_lock, flags);
+ spin_lock_bh(producer_lock);
err = resize_finish(q, new_q, num_elem);
- spin_unlock_irqrestore(producer_lock, flags);
+ spin_unlock_bh(producer_lock);
} else {
err = resize_finish(q, new_q, num_elem);
}
- spin_unlock_irqrestore(consumer_lock, flags1);
+ spin_unlock_bh(consumer_lock);
rxe_queue_cleanup(new_q); /* new/old dep on err */
if (err)
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 0c9d2af15f3d..5eb89052dd66 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -110,7 +110,6 @@ void rnr_nak_timer(struct timer_list *t)
static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
{
struct rxe_send_wqe *wqe;
- unsigned long flags;
struct rxe_queue *q = qp->sq.queue;
unsigned int index = qp->req.wqe_index;
unsigned int cons;
@@ -124,25 +123,23 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
/* check to see if we are drained;
* state_lock used by requester and completer
*/
- spin_lock_irqsave(&qp->state_lock, flags);
+ spin_lock_bh(&qp->state_lock);
do {
if (qp->req.state != QP_STATE_DRAIN) {
/* comp just finished */
- spin_unlock_irqrestore(&qp->state_lock,
- flags);
+ spin_unlock_bh(&qp->state_lock);
break;
}
if (wqe && ((index != cons) ||
(wqe->state != wqe_state_posted))) {
/* comp not done yet */
- spin_unlock_irqrestore(&qp->state_lock,
- flags);
+ spin_unlock_bh(&qp->state_lock);
break;
}
qp->req.state = QP_STATE_DRAINED;
- spin_unlock_irqrestore(&qp->state_lock, flags);
+ spin_unlock_bh(&qp->state_lock);
if (qp->ibqp.event_handler) {
struct ib_event ev;
@@ -372,7 +369,6 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
int pad = (-payload) & 0x3;
int paylen;
int solicited;
- u16 pkey;
u32 qp_num;
int ack_req;
@@ -404,8 +400,6 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
(pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
(RXE_WRITE_MASK | RXE_IMMDT_MASK));
- pkey = IB_DEFAULT_PKEY_FULL;
-
qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
qp->attr.dest_qp_num;
@@ -414,7 +408,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
if (ack_req)
qp->req.noack_pkts = 0;
- bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num,
+ bth_init(pkt, pkt->opcode, solicited, 0, pad, IB_DEFAULT_PKEY_FULL, qp_num,
ack_req, pkt->psn);
/* init optional headers */
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index eb1c4c3b3a78..0c0721f04357 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -83,7 +83,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
srq->ibsrq.event_handler = init->event_handler;
srq->ibsrq.srq_context = init->srq_context;
srq->limit = init->attr.srq_limit;
- srq->srq_num = srq->pelem.index;
+ srq->srq_num = srq->elem.index;
srq->rq.max_wr = init->attr.max_wr;
srq->rq.max_sge = init->attr.max_sge;
diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c
deleted file mode 100644
index 666202ddff48..000000000000
--- a/drivers/infiniband/sw/rxe/rxe_sysfs.c
+++ /dev/null
@@ -1,119 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/*
- * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
- * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- */
-
-#include "rxe.h"
-#include "rxe_net.h"
-
-/* Copy argument and remove trailing CR. Return the new length. */
-static int sanitize_arg(const char *val, char *intf, int intf_len)
-{
- int len;
-
- if (!val)
- return 0;
-
- /* Remove newline. */
- for (len = 0; len < intf_len - 1 && val[len] && val[len] != '\n'; len++)
- intf[len] = val[len];
- intf[len] = 0;
-
- if (len == 0 || (val[len] != 0 && val[len] != '\n'))
- return 0;
-
- return len;
-}
-
-static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
-{
- int len;
- int err = 0;
- char intf[32];
- struct net_device *ndev;
- struct rxe_dev *exists;
-
- if (!rxe_initialized) {
- pr_err("Module parameters are not supported, use rdma link add or rxe_cfg\n");
- return -EAGAIN;
- }
-
- len = sanitize_arg(val, intf, sizeof(intf));
- if (!len) {
- pr_err("add: invalid interface name\n");
- return -EINVAL;
- }
-
- ndev = dev_get_by_name(&init_net, intf);
- if (!ndev) {
- pr_err("interface %s not found\n", intf);
- return -EINVAL;
- }
-
- if (is_vlan_dev(ndev)) {
- pr_err("rxe creation allowed on top of a real device only\n");
- err = -EPERM;
- goto err;
- }
-
- exists = rxe_get_dev_from_net(ndev);
- if (exists) {
- ib_device_put(&exists->ib_dev);
- pr_err("already configured on %s\n", intf);
- err = -EINVAL;
- goto err;
- }
-
- err = rxe_net_add("rxe%d", ndev);
- if (err) {
- pr_err("failed to add %s\n", intf);
- goto err;
- }
-
-err:
- dev_put(ndev);
- return err;
-}
-
-static int rxe_param_set_remove(const char *val, const struct kernel_param *kp)
-{
- int len;
- char intf[32];
- struct ib_device *ib_dev;
-
- len = sanitize_arg(val, intf, sizeof(intf));
- if (!len) {
- pr_err("add: invalid interface name\n");
- return -EINVAL;
- }
-
- if (strncmp("all", intf, len) == 0) {
- pr_info("rxe_sys: remove all");
- ib_unregister_driver(RDMA_DRIVER_RXE);
- return 0;
- }
-
- ib_dev = ib_device_get_by_name(intf, RDMA_DRIVER_RXE);
- if (!ib_dev) {
- pr_err("not configured on %s\n", intf);
- return -EINVAL;
- }
-
- ib_unregister_device_and_put(ib_dev);
-
- return 0;
-}
-
-static const struct kernel_param_ops rxe_add_ops = {
- .set = rxe_param_set_add,
-};
-
-static const struct kernel_param_ops rxe_remove_ops = {
- .set = rxe_param_set_remove,
-};
-
-module_param_cb(add, &rxe_add_ops, NULL, 0200);
-MODULE_PARM_DESC(add, "DEPRECATED. Create RXE device over network interface");
-module_param_cb(remove, &rxe_remove_ops, NULL, 0200);
-MODULE_PARM_DESC(remove, "DEPRECATED. Remove RXE device over network interface");
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 6951fdcb31bf..0c4db5bb17d7 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -32,25 +32,24 @@ void rxe_do_task(struct tasklet_struct *t)
{
int cont;
int ret;
- unsigned long flags;
struct rxe_task *task = from_tasklet(task, t, tasklet);
- spin_lock_irqsave(&task->state_lock, flags);
+ spin_lock_bh(&task->state_lock);
switch (task->state) {
case TASK_STATE_START:
task->state = TASK_STATE_BUSY;
- spin_unlock_irqrestore(&task->state_lock, flags);
+ spin_unlock_bh(&task->state_lock);
break;
case TASK_STATE_BUSY:
task->state = TASK_STATE_ARMED;
fallthrough;
case TASK_STATE_ARMED:
- spin_unlock_irqrestore(&task->state_lock, flags);
+ spin_unlock_bh(&task->state_lock);
return;
default:
- spin_unlock_irqrestore(&task->state_lock, flags);
+ spin_unlock_bh(&task->state_lock);
pr_warn("%s failed with bad state %d\n", __func__, task->state);
return;
}
@@ -59,7 +58,7 @@ void rxe_do_task(struct tasklet_struct *t)
cont = 0;
ret = task->func(task->arg);
- spin_lock_irqsave(&task->state_lock, flags);
+ spin_lock_bh(&task->state_lock);
switch (task->state) {
case TASK_STATE_BUSY:
if (ret)
@@ -81,7 +80,7 @@ void rxe_do_task(struct tasklet_struct *t)
pr_warn("%s failed with bad state %d\n", __func__,
task->state);
}
- spin_unlock_irqrestore(&task->state_lock, flags);
+ spin_unlock_bh(&task->state_lock);
} while (cont);
task->ret = ret;
@@ -106,7 +105,6 @@ int rxe_init_task(void *obj, struct rxe_task *task,
void rxe_cleanup_task(struct rxe_task *task)
{
- unsigned long flags;
bool idle;
/*
@@ -116,9 +114,9 @@ void rxe_cleanup_task(struct rxe_task *task)
task->destroyed = true;
do {
- spin_lock_irqsave(&task->state_lock, flags);
+ spin_lock_bh(&task->state_lock);
idle = (task->state == TASK_STATE_START);
- spin_unlock_irqrestore(&task->state_lock, flags);
+ spin_unlock_bh(&task->state_lock);
} while (!idle);
tasklet_kill(&task->tasklet);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 0aa0d7e52773..915ad6664321 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -182,7 +182,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
/* create index > 0 */
rxe_add_index(ah);
- ah->ah_num = ah->pelem.index;
+ ah->ah_num = ah->elem.index;
if (uresp) {
/* only if new user provider */
@@ -383,10 +383,9 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr)
{
int err = 0;
- unsigned long flags;
struct rxe_srq *srq = to_rsrq(ibsrq);
- spin_lock_irqsave(&srq->rq.producer_lock, flags);
+ spin_lock_bh(&srq->rq.producer_lock);
while (wr) {
err = post_one_recv(&srq->rq, wr);
@@ -395,7 +394,7 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
wr = wr->next;
}
- spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
+ spin_unlock_bh(&srq->rq.producer_lock);
if (err)
*bad_wr = wr;
@@ -469,6 +468,11 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (err)
goto err1;
+ if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH))
+ qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
+ qp->ibqp.qp_num,
+ qp->attr.dest_qp_num);
+
return 0;
err1:
@@ -634,19 +638,18 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
int err;
struct rxe_sq *sq = &qp->sq;
struct rxe_send_wqe *send_wqe;
- unsigned long flags;
int full;
err = validate_send_wr(qp, ibwr, mask, length);
if (err)
return err;
- spin_lock_irqsave(&qp->sq.sq_lock, flags);
+ spin_lock_bh(&qp->sq.sq_lock);
full = queue_full(sq->queue, QUEUE_TYPE_TO_DRIVER);
if (unlikely(full)) {
- spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
+ spin_unlock_bh(&qp->sq.sq_lock);
return -ENOMEM;
}
@@ -655,7 +658,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
queue_advance_producer(sq->queue, QUEUE_TYPE_TO_DRIVER);
- spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
+ spin_unlock_bh(&qp->sq.sq_lock);
return 0;
}
@@ -735,7 +738,6 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
int err = 0;
struct rxe_qp *qp = to_rqp(ibqp);
struct rxe_rq *rq = &qp->rq;
- unsigned long flags;
if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
*bad_wr = wr;
@@ -749,7 +751,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
goto err1;
}
- spin_lock_irqsave(&rq->producer_lock, flags);
+ spin_lock_bh(&rq->producer_lock);
while (wr) {
err = post_one_recv(rq, wr);
@@ -760,7 +762,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
wr = wr->next;
}
- spin_unlock_irqrestore(&rq->producer_lock, flags);
+ spin_unlock_bh(&rq->producer_lock);
if (qp->resp.state == QP_STATE_ERROR)
rxe_run_task(&qp->resp.task, 1);
@@ -841,9 +843,8 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
int i;
struct rxe_cq *cq = to_rcq(ibcq);
struct rxe_cqe *cqe;
- unsigned long flags;
- spin_lock_irqsave(&cq->cq_lock, flags);
+ spin_lock_bh(&cq->cq_lock);
for (i = 0; i < num_entries; i++) {
cqe = queue_head(cq->queue, QUEUE_TYPE_FROM_DRIVER);
if (!cqe)
@@ -852,7 +853,7 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
memcpy(wc++, &cqe->ibwc, sizeof(*wc));
queue_advance_consumer(cq->queue, QUEUE_TYPE_FROM_DRIVER);
}
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
return i;
}
@@ -870,11 +871,10 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct rxe_cq *cq = to_rcq(ibcq);
- unsigned long irq_flags;
int ret = 0;
int empty;
- spin_lock_irqsave(&cq->cq_lock, irq_flags);
+ spin_lock_bh(&cq->cq_lock);
if (cq->notify != IB_CQ_NEXT_COMP)
cq->notify = flags & IB_CQ_SOLICITED_MASK;
@@ -883,7 +883,7 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty)
ret = 1;
- spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
+ spin_unlock_bh(&cq->cq_lock);
return ret;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 35e041450090..e48969e8d4c8 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -35,17 +35,17 @@ static inline int psn_compare(u32 psn_a, u32 psn_b)
struct rxe_ucontext {
struct ib_ucontext ibuc;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
};
struct rxe_pd {
struct ib_pd ibpd;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
};
struct rxe_ah {
struct ib_ah ibah;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct rxe_av av;
bool is_user;
int ah_num;
@@ -60,7 +60,7 @@ struct rxe_cqe {
struct rxe_cq {
struct ib_cq ibcq;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct rxe_queue *queue;
spinlock_t cq_lock;
u8 notify;
@@ -95,7 +95,7 @@ struct rxe_rq {
struct rxe_srq {
struct ib_srq ibsrq;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct rxe_pd *pd;
struct rxe_rq rq;
u32 srq_num;
@@ -209,7 +209,7 @@ struct rxe_resp_info {
struct rxe_qp {
struct ib_qp ibqp;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct ib_qp_attr attr;
unsigned int valid;
unsigned int mtu;
@@ -309,7 +309,7 @@ static inline int rkey_is_mw(u32 rkey)
}
struct rxe_mr {
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct ib_mr ibmr;
struct ib_umem *umem;
@@ -342,7 +342,7 @@ enum rxe_mw_state {
struct rxe_mw {
struct ib_mw ibmw;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
spinlock_t lock;
enum rxe_mw_state state;
struct rxe_qp *qp; /* Type 2 only */
@@ -354,7 +354,7 @@ struct rxe_mw {
};
struct rxe_mc_grp {
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
spinlock_t mcg_lock; /* guard group */
struct rxe_dev *rxe;
struct list_head qp_list;
@@ -365,7 +365,7 @@ struct rxe_mc_grp {
};
struct rxe_mc_elem {
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct list_head qp_list;
struct list_head grp_list;
struct rxe_qp *qp;
@@ -392,8 +392,6 @@ struct rxe_dev {
struct net_device *ndev;
- int xmit_errors;
-
struct rxe_pool uc_pool;
struct rxe_pool pd_pool;
struct rxe_pool ah_pool;
@@ -484,6 +482,6 @@ static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
-void rxe_mc_cleanup(struct rxe_pool_entry *arg);
+void rxe_mc_cleanup(struct rxe_pool_elem *elem);
#endif /* RXE_VERBS_H */
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index 9093e6a80b26..e5c586913d0b 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -98,15 +98,14 @@ static int siw_create_tx_threads(void)
continue;
siw_tx_thread[cpu] =
- kthread_create(siw_run_sq, (unsigned long *)(long)cpu,
- "siw_tx/%d", cpu);
+ kthread_run_on_cpu(siw_run_sq,
+ (unsigned long *)(long)cpu,
+ cpu, "siw_tx/%u");
if (IS_ERR(siw_tx_thread[cpu])) {
siw_tx_thread[cpu] = NULL;
continue;
}
- kthread_bind(siw_tx_thread[cpu], cpu);
- wake_up_process(siw_tx_thread[cpu]);
assigned++;
}
return assigned;
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 1b36350601fa..a3dd2cb6d5c9 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -8,6 +8,7 @@
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/xarray.h>
+#include <net/addrconf.h>
#include <rdma/iw_cm.h>
#include <rdma/ib_verbs.h>
@@ -155,7 +156,8 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
attr->vendor_id = SIW_VENDOR_ID;
attr->vendor_part_id = sdev->vendor_part_id;
- memcpy(&attr->sys_image_guid, sdev->netdev->dev_addr, 6);
+ addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
+ sdev->netdev->dev_addr);
return 0;
}
@@ -660,7 +662,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
kbuf += core_sge->length;
core_sge++;
}
- sqe->sge[0].length = bytes > 0 ? bytes : 0;
+ sqe->sge[0].length = max(bytes, 0);
sqe->num_sge = bytes > 0 ? 1 : 0;
return bytes;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 776e46ee95da..07e47021a71f 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -113,10 +113,6 @@ bool iser_pi_enable = false;
module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO);
MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
-int iser_pi_guard;
-module_param_named(pi_guard, iser_pi_guard, int, S_IRUGO);
-MODULE_PARM_DESC(pi_guard, "T10-PI guard_type [deprecated]");
-
static int iscsi_iser_set(const char *val, const struct kernel_param *kp)
{
int ret;
@@ -139,9 +135,8 @@ static int iscsi_iser_set(const char *val, const struct kernel_param *kp)
* Notes: In case of data length errors or iscsi PDU completion failures
* this routine will signal iscsi layer of connection failure.
*/
-void
-iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
- char *rx_data, int rx_data_len)
+void iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *rx_data, int rx_data_len)
{
int rc = 0;
int datalen;
@@ -176,8 +171,7 @@ error:
* Netes: This routine can't fail, just assign iscsi task
* hdr and max hdr size.
*/
-static int
-iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
+static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
{
struct iscsi_iser_task *iser_task = task->dd_data;
@@ -198,9 +192,8 @@ iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
* state mutex to avoid dereferencing the IB device which
* may have already been terminated.
*/
-int
-iser_initialize_task_headers(struct iscsi_task *task,
- struct iser_tx_desc *tx_desc)
+int iser_initialize_task_headers(struct iscsi_task *task,
+ struct iser_tx_desc *tx_desc)
{
struct iser_conn *iser_conn = task->conn->dd_data;
struct iser_device *device = iser_conn->ib_conn.device;
@@ -237,8 +230,7 @@ iser_initialize_task_headers(struct iscsi_task *task,
* Return: Returns zero on success or -ENOMEM when failing
* to init task headers (dma mapping error).
*/
-static int
-iscsi_iser_task_init(struct iscsi_task *task)
+static int iscsi_iser_task_init(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
int ret;
@@ -272,8 +264,8 @@ iscsi_iser_task_init(struct iscsi_task *task)
* xmit.
*
**/
-static int
-iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+static int iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
+ struct iscsi_task *task)
{
int error = 0;
@@ -290,9 +282,8 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
return error;
}
-static int
-iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
- struct iscsi_task *task)
+static int iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
+ struct iscsi_task *task)
{
struct iscsi_r2t_info *r2t = &task->unsol_r2t;
struct iscsi_data hdr;
@@ -326,8 +317,7 @@ iscsi_iser_task_xmit_unsol_data_exit:
*
* Return: zero on success or escalates $error on failure.
*/
-static int
-iscsi_iser_task_xmit(struct iscsi_task *task)
+static int iscsi_iser_task_xmit(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
struct iscsi_iser_task *iser_task = task->dd_data;
@@ -410,8 +400,7 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
*
* In addition the error sector is marked.
*/
-static u8
-iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector)
+static u8 iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector)
{
struct iscsi_iser_task *iser_task = task->dd_data;
enum iser_data_dir dir = iser_task->dir[ISER_DIR_IN] ?
@@ -460,11 +449,9 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session,
* -EINVAL in case end-point doesn't exsits anymore or iser connection
* state is not UP (teardown already started).
*/
-static int
-iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
- struct iscsi_cls_conn *cls_conn,
- uint64_t transport_eph,
- int is_leading)
+static int iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ uint64_t transport_eph, int is_leading)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iser_conn *iser_conn;
@@ -519,8 +506,7 @@ out:
* from this point iscsi must call conn_stop in session/connection
* teardown so iser transport must wait for it.
*/
-static int
-iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
+static int iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *iscsi_conn;
struct iser_conn *iser_conn;
@@ -542,8 +528,7 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
* handle, so we call it under iser the state lock to protect against
* this kind of race.
*/
-static void
-iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+static void iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iser_conn *iser_conn = conn->dd_data;
@@ -578,8 +563,7 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
*
* Removes and free iscsi host.
*/
-static void
-iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
+static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
{
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
@@ -588,8 +572,7 @@ iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
iscsi_host_free(shost);
}
-static inline unsigned int
-iser_dif_prot_caps(int prot_caps)
+static inline unsigned int iser_dif_prot_caps(int prot_caps)
{
int ret = 0;
@@ -708,9 +691,8 @@ free_host:
return NULL;
}
-static int
-iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
- enum iscsi_param param, char *buf, int buflen)
+static int iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen)
{
int value;
@@ -760,8 +742,8 @@ iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
*
* Output connection statistics.
*/
-static void
-iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+static void iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats)
{
struct iscsi_conn *conn = cls_conn->dd_data;
@@ -812,9 +794,9 @@ static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
* Return: iscsi_endpoint created by iscsi layer or ERR_PTR(error)
* if fails.
*/
-static struct iscsi_endpoint *
-iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
- int non_blocking)
+static struct iscsi_endpoint *iscsi_iser_ep_connect(struct Scsi_Host *shost,
+ struct sockaddr *dst_addr,
+ int non_blocking)
{
int err;
struct iser_conn *iser_conn;
@@ -857,8 +839,7 @@ failure:
* or more likely iser connection state transitioned to TEMINATING or
* DOWN during the wait period.
*/
-static int
-iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+static int iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
{
struct iser_conn *iser_conn = ep->dd_data;
int rc;
@@ -893,8 +874,7 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
* and cleanup or actually call it immediately in case we didn't pass
* iscsi conn bind/start stage, thus it is safe.
*/
-static void
-iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
+static void iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
{
struct iser_conn *iser_conn = ep->dd_data;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 9f6ac0a09a78..20af46c4e954 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -119,8 +119,6 @@
#define ISER_QP_MAX_RECV_DTOS (ISER_DEF_XMIT_CMDS_MAX)
-#define ISER_MIN_POSTED_RX (ISER_DEF_XMIT_CMDS_MAX >> 2)
-
/* the max TX (send) WR supported by the iSER QP is defined by *
* max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect *
* to have at max for SCSI command. The tx posting & completion handling code *
@@ -148,8 +146,6 @@
- ISER_MAX_RX_MISC_PDUS) / \
(1 + ISER_INFLIGHT_DATAOUTS))
-#define ISER_SIGNAL_CMD_COUNT 32
-
/* Constant PDU lengths calculations */
#define ISER_HEADERS_LEN (sizeof(struct iser_ctrl) + sizeof(struct iscsi_hdr))
@@ -366,9 +362,6 @@ struct iser_fr_pool {
* @qp: Connection Queue-pair
* @cq: Connection completion queue
* @cq_size: The number of max outstanding completions
- * @post_recv_buf_count: post receive counter
- * @sig_count: send work request signal count
- * @rx_wr: receive work request for batch posts
* @device: reference to iser device
* @fr_pool: connection fast registration poool
* @pi_support: Indicate device T10-PI support
@@ -379,9 +372,6 @@ struct ib_conn {
struct ib_qp *qp;
struct ib_cq *cq;
u32 cq_size;
- int post_recv_buf_count;
- u8 sig_count;
- struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
struct iser_device *device;
struct iser_fr_pool fr_pool;
bool pi_support;
@@ -397,8 +387,6 @@ struct ib_conn {
* @state: connection logical state
* @qp_max_recv_dtos: maximum number of data outs, corresponds
* to max number of post recvs
- * @qp_max_recv_dtos_mask: (qp_max_recv_dtos - 1)
- * @min_posted_rx: (qp_max_recv_dtos >> 2)
* @max_cmds: maximum cmds allowed for this connection
* @name: connection peer portal
* @release_work: deffered work for release job
@@ -409,7 +397,6 @@ struct ib_conn {
* (state is ISER_CONN_UP)
* @conn_list: entry in ig conn list
* @login_desc: login descriptor
- * @rx_desc_head: head of rx_descs cyclic buffer
* @rx_descs: rx buffers array (cyclic buffer)
* @num_rx_descs: number of rx descriptors
* @scsi_sg_tablesize: scsi host sg_tablesize
@@ -422,8 +409,6 @@ struct iser_conn {
struct iscsi_endpoint *ep;
enum iser_conn_state state;
unsigned qp_max_recv_dtos;
- unsigned qp_max_recv_dtos_mask;
- unsigned min_posted_rx;
u16 max_cmds;
char name[ISER_OBJECT_NAME_SIZE];
struct work_struct release_work;
@@ -433,7 +418,6 @@ struct iser_conn {
struct completion up_completion;
struct list_head conn_list;
struct iser_login_desc login_desc;
- unsigned int rx_desc_head;
struct iser_rx_desc *rx_descs;
u32 num_rx_descs;
unsigned short scsi_sg_tablesize;
@@ -486,7 +470,6 @@ struct iser_global {
extern struct iser_global ig;
extern int iser_debug_level;
extern bool iser_pi_enable;
-extern int iser_pi_guard;
extern unsigned int iser_max_sectors;
extern bool iser_always_reg;
@@ -543,9 +526,9 @@ int iser_connect(struct iser_conn *iser_conn,
int non_blocking);
int iser_post_recvl(struct iser_conn *iser_conn);
-int iser_post_recvm(struct iser_conn *iser_conn, int count);
-int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
- bool signal);
+int iser_post_recvm(struct iser_conn *iser_conn,
+ struct iser_rx_desc *rx_desc);
+int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc);
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 27a6f75a9912..2490150d3085 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -95,11 +95,8 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
* task->data[ISER_DIR_OUT].data_len, Protection size
* is stored at task->prot[ISER_DIR_OUT].data_len
*/
-static int
-iser_prepare_write_cmd(struct iscsi_task *task,
- unsigned int imm_sz,
- unsigned int unsol_sz,
- unsigned int edtl)
+static int iser_prepare_write_cmd(struct iscsi_task *task, unsigned int imm_sz,
+ unsigned int unsol_sz, unsigned int edtl)
{
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_mem_reg *mem_reg;
@@ -160,8 +157,8 @@ iser_prepare_write_cmd(struct iscsi_task *task,
}
/* creates a new tx descriptor and adds header regd buffer */
-static void iser_create_send_desc(struct iser_conn *iser_conn,
- struct iser_tx_desc *tx_desc)
+static void iser_create_send_desc(struct iser_conn *iser_conn,
+ struct iser_tx_desc *tx_desc)
{
struct iser_device *device = iser_conn->ib_conn.device;
@@ -247,8 +244,6 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
struct iser_device *device = ib_conn->device;
iser_conn->qp_max_recv_dtos = session->cmds_max;
- iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
- iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;
if (iser_alloc_fastreg_pool(ib_conn, session->scsi_cmds_max,
iser_conn->pages_per_mr))
@@ -280,7 +275,6 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
rx_sg->lkey = device->pd->local_dma_lkey;
}
- iser_conn->rx_desc_head = 0;
return 0;
rx_desc_dma_map_failed:
@@ -322,37 +316,35 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
{
struct iser_conn *iser_conn = conn->dd_data;
- struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iscsi_session *session = conn->session;
+ int err = 0;
+ int i;
iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
/* check if this is the last login - going to full feature phase */
if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
- return 0;
-
- /*
- * Check that there is one posted recv buffer
- * (for the last login response).
- */
- WARN_ON(ib_conn->post_recv_buf_count != 1);
+ goto out;
if (session->discovery_sess) {
iser_info("Discovery session, re-using login RX buffer\n");
- return 0;
- } else
- iser_info("Normal session, posting batch of RX %d buffers\n",
- iser_conn->min_posted_rx);
-
- /* Initial post receive buffers */
- if (iser_post_recvm(iser_conn, iser_conn->min_posted_rx))
- return -ENOMEM;
+ goto out;
+ }
- return 0;
-}
+ iser_info("Normal session, posting batch of RX %d buffers\n",
+ iser_conn->qp_max_recv_dtos - 1);
-static inline bool iser_signal_comp(u8 sig_count)
-{
- return ((sig_count % ISER_SIGNAL_CMD_COUNT) == 0);
+ /*
+ * Initial post receive buffers.
+ * There is one already posted recv buffer (for the last login
+ * response). Therefore, the first recv buffer is skipped here.
+ */
+ for (i = 1; i < iser_conn->qp_max_recv_dtos; i++) {
+ err = iser_post_recvm(iser_conn, &iser_conn->rx_descs[i]);
+ if (err)
+ goto out;
+ }
+out:
+ return err;
}
/**
@@ -360,8 +352,7 @@ static inline bool iser_signal_comp(u8 sig_count)
* @conn: link to matching iscsi connection
* @task: SCSI command task
*/
-int iser_send_command(struct iscsi_conn *conn,
- struct iscsi_task *task)
+int iser_send_command(struct iscsi_conn *conn, struct iscsi_task *task)
{
struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
@@ -371,7 +362,6 @@ int iser_send_command(struct iscsi_conn *conn,
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
struct scsi_cmnd *sc = task->sc;
struct iser_tx_desc *tx_desc = &iser_task->desc;
- u8 sig_count = ++iser_conn->ib_conn.sig_count;
edtl = ntohl(hdr->data_length);
@@ -418,8 +408,7 @@ int iser_send_command(struct iscsi_conn *conn,
iser_task->status = ISER_TASK_STATUS_STARTED;
- err = iser_post_send(&iser_conn->ib_conn, tx_desc,
- iser_signal_comp(sig_count));
+ err = iser_post_send(&iser_conn->ib_conn, tx_desc);
if (!err)
return 0;
@@ -434,8 +423,7 @@ send_command_error:
* @task: SCSI command task
* @hdr: pointer to the LLD's iSCSI message header
*/
-int iser_send_data_out(struct iscsi_conn *conn,
- struct iscsi_task *task,
+int iser_send_data_out(struct iscsi_conn *conn, struct iscsi_task *task,
struct iscsi_data *hdr)
{
struct iser_conn *iser_conn = conn->dd_data;
@@ -487,7 +475,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
itt, buf_offset, data_seg_len);
- err = iser_post_send(&iser_conn->ib_conn, tx_desc, true);
+ err = iser_post_send(&iser_conn->ib_conn, tx_desc);
if (!err)
return 0;
@@ -497,8 +485,7 @@ send_data_out_error:
return err;
}
-int iser_send_control(struct iscsi_conn *conn,
- struct iscsi_task *task)
+int iser_send_control(struct iscsi_conn *conn, struct iscsi_task *task)
{
struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
@@ -550,7 +537,7 @@ int iser_send_control(struct iscsi_conn *conn,
goto send_control_error;
}
- err = iser_post_send(&iser_conn->ib_conn, mdesc, true);
+ err = iser_post_send(&iser_conn->ib_conn, mdesc);
if (!err)
return 0;
@@ -590,11 +577,14 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
desc->rsp_dma, ISER_RX_LOGIN_SIZE,
DMA_FROM_DEVICE);
- ib_conn->post_recv_buf_count--;
+ if (iser_conn->iscsi_conn->session->discovery_sess)
+ return;
+
+ /* Post the first RX buffer that is skipped in iser_post_rx_bufs() */
+ iser_post_recvm(iser_conn, iser_conn->rx_descs);
}
-static inline int
-iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
+static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
{
if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) ||
(desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) {
@@ -607,10 +597,8 @@ iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
return 0;
}
-static int
-iser_check_remote_inv(struct iser_conn *iser_conn,
- struct ib_wc *wc,
- struct iscsi_hdr *hdr)
+static int iser_check_remote_inv(struct iser_conn *iser_conn, struct ib_wc *wc,
+ struct iscsi_hdr *hdr)
{
if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
struct iscsi_task *task;
@@ -657,8 +645,7 @@ void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc)
struct iser_conn *iser_conn = to_iser_conn(ib_conn);
struct iser_rx_desc *desc = iser_rx(wc->wr_cqe);
struct iscsi_hdr *hdr;
- int length;
- int outstanding, count, err;
+ int length, err;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
iser_err_comp(wc, "task_rsp");
@@ -687,20 +674,9 @@ void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc)
desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
DMA_FROM_DEVICE);
- /* decrementing conn->post_recv_buf_count only --after-- freeing the *
- * task eliminates the need to worry on tasks which are completed in *
- * parallel to the execution of iser_conn_term. So the code that waits *
- * for the posted rx bufs refcount to become zero handles everything */
- ib_conn->post_recv_buf_count--;
-
- outstanding = ib_conn->post_recv_buf_count;
- if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) {
- count = min(iser_conn->qp_max_recv_dtos - outstanding,
- iser_conn->min_posted_rx);
- err = iser_post_recvm(iser_conn, count);
- if (err)
- iser_err("posting %d rx bufs err %d\n", count, err);
- }
+ err = iser_post_recvm(iser_conn, desc);
+ if (err)
+ iser_err("posting rx buffer err %d\n", err);
}
void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc)
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 9776b755d848..660982625488 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -44,8 +44,7 @@ void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc)
iser_err_comp(wc, "memreg");
}
-static struct iser_fr_desc *
-iser_reg_desc_get_fr(struct ib_conn *ib_conn)
+static struct iser_fr_desc *iser_reg_desc_get_fr(struct ib_conn *ib_conn)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_fr_desc *desc;
@@ -60,9 +59,8 @@ iser_reg_desc_get_fr(struct ib_conn *ib_conn)
return desc;
}
-static void
-iser_reg_desc_put_fr(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc)
+static void iser_reg_desc_put_fr(struct ib_conn *ib_conn,
+ struct iser_fr_desc *desc)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
unsigned long flags;
@@ -73,9 +71,9 @@ iser_reg_desc_put_fr(struct ib_conn *ib_conn,
}
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *data,
- enum iser_data_dir iser_dir,
- enum dma_data_direction dma_dir)
+ struct iser_data_buf *data,
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir)
{
struct ib_device *dev;
@@ -100,9 +98,8 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
ib_dma_unmap_sg(dev, data->sg, data->size, dir);
}
-static int
-iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
- struct iser_mem_reg *reg)
+static int iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
+ struct iser_mem_reg *reg)
{
struct scatterlist *sg = mem->sg;
@@ -154,8 +151,8 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
reg->mem_h = NULL;
}
-static void
-iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain)
+static void iser_set_dif_domain(struct scsi_cmnd *sc,
+ struct ib_sig_domain *domain)
{
domain->sig_type = IB_SIG_TYPE_T10_DIF;
domain->sig.dif.pi_interval = scsi_prot_interval(sc);
@@ -171,8 +168,8 @@ iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain)
domain->sig.dif.ref_remap = true;
}
-static int
-iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
+static int iser_set_sig_attrs(struct scsi_cmnd *sc,
+ struct ib_sig_attrs *sig_attrs)
{
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_WRITE_INSERT:
@@ -205,8 +202,7 @@ iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
return 0;
}
-static inline void
-iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
+static inline void iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
{
*mask = 0;
if (sc->prot_flags & SCSI_PROT_REF_CHECK)
@@ -215,11 +211,8 @@ iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
*mask |= IB_SIG_CHECK_GUARD;
}
-static inline void
-iser_inv_rkey(struct ib_send_wr *inv_wr,
- struct ib_mr *mr,
- struct ib_cqe *cqe,
- struct ib_send_wr *next_wr)
+static inline void iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr,
+ struct ib_cqe *cqe, struct ib_send_wr *next_wr)
{
inv_wr->opcode = IB_WR_LOCAL_INV;
inv_wr->wr_cqe = cqe;
@@ -229,12 +222,11 @@ iser_inv_rkey(struct ib_send_wr *inv_wr,
inv_wr->next = next_wr;
}
-static int
-iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_data_buf *sig_mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *sig_reg)
+static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *mem,
+ struct iser_data_buf *sig_mem,
+ struct iser_reg_resources *rsc,
+ struct iser_mem_reg *sig_reg)
{
struct iser_tx_desc *tx_desc = &iser_task->desc;
struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
@@ -335,12 +327,10 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
return 0;
}
-static int
-iser_reg_data_sg(struct iscsi_iser_task *task,
- struct iser_data_buf *mem,
- struct iser_fr_desc *desc,
- bool use_dma_key,
- struct iser_mem_reg *reg)
+static int iser_reg_data_sg(struct iscsi_iser_task *task,
+ struct iser_data_buf *mem,
+ struct iser_fr_desc *desc, bool use_dma_key,
+ struct iser_mem_reg *reg)
{
struct iser_device *device = task->iser_conn->ib_conn.device;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index b566f7cb7797..8bf87b073d9b 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -265,14 +265,14 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
memset(&init_attr, 0, sizeof(init_attr));
init_attr.event_handler = iser_qp_event_callback;
- init_attr.qp_context = (void *)ib_conn;
- init_attr.send_cq = ib_conn->cq;
- init_attr.recv_cq = ib_conn->cq;
- init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
+ init_attr.qp_context = (void *)ib_conn;
+ init_attr.send_cq = ib_conn->cq;
+ init_attr.recv_cq = ib_conn->cq;
+ init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
init_attr.cap.max_send_sge = 2;
init_attr.cap.max_recv_sge = 1;
- init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
- init_attr.qp_type = IB_QPT_RC;
+ init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ init_attr.qp_type = IB_QPT_RC;
init_attr.cap.max_send_wr = max_send_wr;
if (ib_conn->pi_support)
init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
@@ -283,9 +283,8 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
goto out_err;
ib_conn->qp = ib_conn->cma_id->qp;
- iser_info("setting conn %p cma_id %p qp %p max_send_wr %d\n",
- ib_conn, ib_conn->cma_id,
- ib_conn->cma_id->qp, max_send_wr);
+ iser_info("setting conn %p cma_id %p qp %p max_send_wr %d\n", ib_conn,
+ ib_conn->cma_id, ib_conn->cma_id->qp, max_send_wr);
return ret;
out_err:
@@ -313,7 +312,7 @@ struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
goto inc_refcnt;
device = kzalloc(sizeof *device, GFP_KERNEL);
- if (device == NULL)
+ if (!device)
goto out;
/* assign this device to the device */
@@ -392,8 +391,7 @@ void iser_release_work(struct work_struct *work)
* so the cm_id removal is out of here. It is Safe to
* be invoked multiple times.
*/
-static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
- bool destroy)
+static void iser_free_ib_conn_res(struct iser_conn *iser_conn, bool destroy)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
@@ -401,7 +399,7 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
iser_info("freeing conn %p cma_id %p qp %p\n",
iser_conn, ib_conn->cma_id, ib_conn->qp);
- if (ib_conn->qp != NULL) {
+ if (ib_conn->qp) {
rdma_destroy_qp(ib_conn->cma_id);
ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size);
ib_conn->qp = NULL;
@@ -411,7 +409,7 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
if (iser_conn->rx_descs)
iser_free_rx_descriptors(iser_conn);
- if (device != NULL) {
+ if (device) {
iser_device_try_release(device);
ib_conn->device = NULL;
}
@@ -445,7 +443,7 @@ void iser_conn_release(struct iser_conn *iser_conn)
iser_free_ib_conn_res(iser_conn, true);
mutex_unlock(&iser_conn->state_mutex);
- if (ib_conn->cma_id != NULL) {
+ if (ib_conn->cma_id) {
rdma_destroy_id(ib_conn->cma_id);
ib_conn->cma_id = NULL;
}
@@ -501,13 +499,12 @@ static void iser_connect_error(struct rdma_cm_id *cma_id)
{
struct iser_conn *iser_conn;
- iser_conn = (struct iser_conn *)cma_id->context;
+ iser_conn = cma_id->context;
iser_conn->state = ISER_CONN_TERMINATING;
}
-static void
-iser_calc_scsi_params(struct iser_conn *iser_conn,
- unsigned int max_sectors)
+static void iser_calc_scsi_params(struct iser_conn *iser_conn,
+ unsigned int max_sectors)
{
struct iser_device *device = iser_conn->ib_conn.device;
struct ib_device_attr *attr = &device->ib_device->attrs;
@@ -545,11 +542,11 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
static void iser_addr_handler(struct rdma_cm_id *cma_id)
{
struct iser_device *device;
- struct iser_conn *iser_conn;
- struct ib_conn *ib_conn;
+ struct iser_conn *iser_conn;
+ struct ib_conn *ib_conn;
int ret;
- iser_conn = (struct iser_conn *)cma_id->context;
+ iser_conn = cma_id->context;
if (iser_conn->state != ISER_CONN_PENDING)
/* bailout */
return;
@@ -593,9 +590,9 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
static void iser_route_handler(struct rdma_cm_id *cma_id)
{
struct rdma_conn_param conn_param;
- int ret;
+ int ret;
struct iser_cm_hdr req_hdr;
- struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
+ struct iser_conn *iser_conn = cma_id->context;
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct ib_device *ib_dev = ib_conn->device->ib_device;
@@ -609,9 +606,9 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
memset(&conn_param, 0, sizeof conn_param);
conn_param.responder_resources = ib_dev->attrs.max_qp_rd_atom;
- conn_param.initiator_depth = 1;
- conn_param.retry_count = 7;
- conn_param.rnr_retry_count = 6;
+ conn_param.initiator_depth = 1;
+ conn_param.retry_count = 7;
+ conn_param.rnr_retry_count = 6;
memset(&req_hdr, 0, sizeof(req_hdr));
req_hdr.flags = ISER_ZBVA_NOT_SUP;
@@ -638,7 +635,7 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id,
struct ib_qp_attr attr;
struct ib_qp_init_attr init_attr;
- iser_conn = (struct iser_conn *)cma_id->context;
+ iser_conn = cma_id->context;
if (iser_conn->state != ISER_CONN_PENDING)
/* bailout */
return;
@@ -661,7 +658,7 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id,
static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
{
- struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
+ struct iser_conn *iser_conn = cma_id->context;
if (iser_conn_terminate(iser_conn)) {
if (iser_conn->iscsi_conn)
@@ -675,7 +672,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
bool destroy)
{
- struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
+ struct iser_conn *iser_conn = cma_id->context;
/*
* We are not guaranteed that we visited disconnected_handler
@@ -687,12 +684,13 @@ static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
complete(&iser_conn->ib_completion);
}
-static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+static int iser_cma_handler(struct rdma_cm_id *cma_id,
+ struct rdma_cm_event *event)
{
struct iser_conn *iser_conn;
int ret = 0;
- iser_conn = (struct iser_conn *)cma_id->context;
+ iser_conn = cma_id->context;
iser_info("%s (%d): status %d conn %p id %p\n",
rdma_event_msg(event->event), event->event,
event->status, cma_id->context, cma_id);
@@ -757,7 +755,6 @@ void iser_conn_init(struct iser_conn *iser_conn)
INIT_LIST_HEAD(&iser_conn->conn_list);
mutex_init(&iser_conn->state_mutex);
- ib_conn->post_recv_buf_count = 0;
ib_conn->reg_cqe.done = iser_reg_comp;
}
@@ -765,10 +762,8 @@ void iser_conn_init(struct iser_conn *iser_conn)
* starts the process of connecting to the target
* sleeps until the connection is established or rejected
*/
-int iser_connect(struct iser_conn *iser_conn,
- struct sockaddr *src_addr,
- struct sockaddr *dst_addr,
- int non_blocking)
+int iser_connect(struct iser_conn *iser_conn, struct sockaddr *src_addr,
+ struct sockaddr *dst_addr, int non_blocking)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
int err = 0;
@@ -785,8 +780,7 @@ int iser_connect(struct iser_conn *iser_conn,
iser_conn->state = ISER_CONN_PENDING;
ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler,
- (void *)iser_conn,
- RDMA_PS_TCP, IB_QPT_RC);
+ iser_conn, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(ib_conn->cma_id)) {
err = PTR_ERR(ib_conn->cma_id);
iser_err("rdma_create_id failed: %d\n", err);
@@ -829,7 +823,7 @@ int iser_post_recvl(struct iser_conn *iser_conn)
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_login_desc *desc = &iser_conn->login_desc;
struct ib_recv_wr wr;
- int ib_ret;
+ int ret;
desc->sge.addr = desc->rsp_dma;
desc->sge.length = ISER_RX_LOGIN_SIZE;
@@ -841,46 +835,30 @@ int iser_post_recvl(struct iser_conn *iser_conn)
wr.num_sge = 1;
wr.next = NULL;
- ib_conn->post_recv_buf_count++;
- ib_ret = ib_post_recv(ib_conn->qp, &wr, NULL);
- if (ib_ret) {
- iser_err("ib_post_recv failed ret=%d\n", ib_ret);
- ib_conn->post_recv_buf_count--;
- }
+ ret = ib_post_recv(ib_conn->qp, &wr, NULL);
+ if (unlikely(ret))
+ iser_err("ib_post_recv login failed ret=%d\n", ret);
- return ib_ret;
+ return ret;
}
-int iser_post_recvm(struct iser_conn *iser_conn, int count)
+int iser_post_recvm(struct iser_conn *iser_conn, struct iser_rx_desc *rx_desc)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
- unsigned int my_rx_head = iser_conn->rx_desc_head;
- struct iser_rx_desc *rx_desc;
- struct ib_recv_wr *wr;
- int i, ib_ret;
-
- for (wr = ib_conn->rx_wr, i = 0; i < count; i++, wr++) {
- rx_desc = &iser_conn->rx_descs[my_rx_head];
- rx_desc->cqe.done = iser_task_rsp;
- wr->wr_cqe = &rx_desc->cqe;
- wr->sg_list = &rx_desc->rx_sg;
- wr->num_sge = 1;
- wr->next = wr + 1;
- my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask;
- }
+ struct ib_recv_wr wr;
+ int ret;
- wr--;
- wr->next = NULL; /* mark end of work requests list */
+ rx_desc->cqe.done = iser_task_rsp;
+ wr.wr_cqe = &rx_desc->cqe;
+ wr.sg_list = &rx_desc->rx_sg;
+ wr.num_sge = 1;
+ wr.next = NULL;
- ib_conn->post_recv_buf_count += count;
- ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, NULL);
- if (unlikely(ib_ret)) {
- iser_err("ib_post_recv failed ret=%d\n", ib_ret);
- ib_conn->post_recv_buf_count -= count;
- } else
- iser_conn->rx_desc_head = my_rx_head;
+ ret = ib_post_recv(ib_conn->qp, &wr, NULL);
+ if (unlikely(ret))
+ iser_err("ib_post_recv failed ret=%d\n", ret);
- return ib_ret;
+ return ret;
}
@@ -888,16 +866,14 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
* iser_post_send - Initiate a Send DTO operation
* @ib_conn: connection RDMA resources
* @tx_desc: iSER TX descriptor
- * @signal: true to send work request as SIGNALED
*
* Return: 0 on success, -1 on failure
*/
-int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
- bool signal)
+int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc)
{
struct ib_send_wr *wr = &tx_desc->send_wr;
struct ib_send_wr *first_wr;
- int ib_ret;
+ int ret;
ib_dma_sync_single_for_device(ib_conn->device->ib_device,
tx_desc->dma_addr, ISER_HEADERS_LEN,
@@ -908,7 +884,7 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
wr->sg_list = tx_desc->tx_sg;
wr->num_sge = tx_desc->num_sge;
wr->opcode = IB_WR_SEND;
- wr->send_flags = signal ? IB_SEND_SIGNALED : 0;
+ wr->send_flags = IB_SEND_SIGNALED;
if (tx_desc->inv_wr.next)
first_wr = &tx_desc->inv_wr;
@@ -917,12 +893,12 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
else
first_wr = wr;
- ib_ret = ib_post_send(ib_conn->qp, first_wr, NULL);
- if (unlikely(ib_ret))
+ ret = ib_post_send(ib_conn->qp, first_wr, NULL);
+ if (unlikely(ret))
iser_err("ib_post_send failed, ret:%d opcode:%d\n",
- ib_ret, wr->opcode);
+ ret, wr->opcode);
- return ib_ret;
+ return ret;
}
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
index 76e4352fe3f6..385a19846c24 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
@@ -13,8 +13,8 @@
void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
- struct rtrs_clt_stats *stats = sess->stats;
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_clt_stats *stats = clt_path->stats;
struct rtrs_clt_stats_pcpu *s;
int cpu;
@@ -180,8 +180,8 @@ static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
{
struct rtrs_clt_con *con = req->con;
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
- struct rtrs_clt_stats *stats = sess->stats;
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_clt_stats *stats = clt_path->stats;
unsigned int len;
len = req->usr_len + req->data_len;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
index 0e69180c3771..b4fa473b7888 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
@@ -16,21 +16,21 @@
#define MIN_MAX_RECONN_ATT -1
#define MAX_MAX_RECONN_ATT 9999
-static void rtrs_clt_sess_release(struct kobject *kobj)
+static void rtrs_clt_path_release(struct kobject *kobj)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
- free_sess(sess);
+ free_path(clt_path);
}
static struct kobj_type ktype_sess = {
.sysfs_ops = &kobj_sysfs_ops,
- .release = rtrs_clt_sess_release
+ .release = rtrs_clt_path_release
};
-static void rtrs_clt_sess_stats_release(struct kobject *kobj)
+static void rtrs_clt_path_stats_release(struct kobject *kobj)
{
struct rtrs_clt_stats *stats;
@@ -43,14 +43,15 @@ static void rtrs_clt_sess_stats_release(struct kobject *kobj)
static struct kobj_type ktype_stats = {
.sysfs_ops = &kobj_sysfs_ops,
- .release = rtrs_clt_sess_stats_release,
+ .release = rtrs_clt_path_stats_release,
};
static ssize_t max_reconnect_attempts_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
- struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
+ struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
+ dev);
return sysfs_emit(page, "%d\n",
rtrs_clt_get_max_reconnect_attempts(clt));
@@ -63,7 +64,8 @@ static ssize_t max_reconnect_attempts_store(struct device *dev,
{
int value;
int ret;
- struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
+ struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
+ dev);
ret = kstrtoint(buf, 10, &value);
if (ret) {
@@ -90,9 +92,9 @@ static ssize_t mpath_policy_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
- struct rtrs_clt *clt;
+ struct rtrs_clt_sess *clt;
- clt = container_of(dev, struct rtrs_clt, dev);
+ clt = container_of(dev, struct rtrs_clt_sess, dev);
switch (clt->mp_policy) {
case MP_POLICY_RR:
@@ -114,12 +116,12 @@ static ssize_t mpath_policy_store(struct device *dev,
const char *buf,
size_t count)
{
- struct rtrs_clt *clt;
+ struct rtrs_clt_sess *clt;
int value;
int ret;
size_t len = 0;
- clt = container_of(dev, struct rtrs_clt, dev);
+ clt = container_of(dev, struct rtrs_clt_sess, dev);
ret = kstrtoint(buf, 10, &value);
if (!ret && (value == MP_POLICY_RR ||
@@ -169,12 +171,12 @@ static ssize_t add_path_store(struct device *dev,
.src = &srcaddr,
.dst = &dstaddr
};
- struct rtrs_clt *clt;
+ struct rtrs_clt_sess *clt;
const char *nl;
size_t len;
int err;
- clt = container_of(dev, struct rtrs_clt, dev);
+ clt = container_of(dev, struct rtrs_clt_sess, dev);
nl = strchr(buf, '\n');
if (nl)
@@ -197,10 +199,10 @@ static DEVICE_ATTR_RW(add_path);
static ssize_t rtrs_clt_state_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
- if (sess->state == RTRS_CLT_CONNECTED)
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+ if (clt_path->state == RTRS_CLT_CONNECTED)
return sysfs_emit(page, "connected\n");
return sysfs_emit(page, "disconnected\n");
@@ -219,16 +221,16 @@ static ssize_t rtrs_clt_reconnect_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
int ret;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
if (!sysfs_streq(buf, "1")) {
- rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
+ rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n",
attr->attr.name, buf);
return -EINVAL;
}
- ret = rtrs_clt_reconnect_from_sysfs(sess);
+ ret = rtrs_clt_reconnect_from_sysfs(clt_path);
if (ret)
return ret;
@@ -249,15 +251,15 @@ static ssize_t rtrs_clt_disconnect_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
if (!sysfs_streq(buf, "1")) {
- rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
+ rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n",
attr->attr.name, buf);
return -EINVAL;
}
- rtrs_clt_close_conns(sess, true);
+ rtrs_clt_close_conns(clt_path, true);
return count;
}
@@ -276,16 +278,16 @@ static ssize_t rtrs_clt_remove_path_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
int ret;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
if (!sysfs_streq(buf, "1")) {
- rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
+ rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n",
attr->attr.name, buf);
return -EINVAL;
}
- ret = rtrs_clt_remove_path_from_sysfs(sess, &attr->attr);
+ ret = rtrs_clt_remove_path_from_sysfs(clt_path, &attr->attr);
if (ret)
return ret;
@@ -333,11 +335,11 @@ static ssize_t rtrs_clt_hca_port_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = container_of(kobj, typeof(*sess), kobj);
+ clt_path = container_of(kobj, typeof(*clt_path), kobj);
- return sysfs_emit(page, "%u\n", sess->hca_port);
+ return sysfs_emit(page, "%u\n", clt_path->hca_port);
}
static struct kobj_attribute rtrs_clt_hca_port_attr =
@@ -347,11 +349,11 @@ static ssize_t rtrs_clt_hca_name_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
- return sysfs_emit(page, "%s\n", sess->hca_name);
+ return sysfs_emit(page, "%s\n", clt_path->hca_name);
}
static struct kobj_attribute rtrs_clt_hca_name_attr =
@@ -361,12 +363,12 @@ static ssize_t rtrs_clt_cur_latency_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
return sysfs_emit(page, "%lld ns\n",
- ktime_to_ns(sess->s.hb_cur_latency));
+ ktime_to_ns(clt_path->s.hb_cur_latency));
}
static struct kobj_attribute rtrs_clt_cur_latency_attr =
@@ -376,11 +378,11 @@ static ssize_t rtrs_clt_src_addr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
int len;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
- len = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr, page,
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+ len = sockaddr_to_str((struct sockaddr *)&clt_path->s.src_addr, page,
PAGE_SIZE);
len += sysfs_emit_at(page, len, "\n");
return len;
@@ -393,11 +395,11 @@ static ssize_t rtrs_clt_dst_addr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
int len;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
- len = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, page,
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+ len = sockaddr_to_str((struct sockaddr *)&clt_path->s.dst_addr, page,
PAGE_SIZE);
len += sysfs_emit_at(page, len, "\n");
return len;
@@ -406,7 +408,7 @@ static ssize_t rtrs_clt_dst_addr_show(struct kobject *kobj,
static struct kobj_attribute rtrs_clt_dst_addr_attr =
__ATTR(dst_addr, 0444, rtrs_clt_dst_addr_show, NULL);
-static struct attribute *rtrs_clt_sess_attrs[] = {
+static struct attribute *rtrs_clt_path_attrs[] = {
&rtrs_clt_hca_name_attr.attr,
&rtrs_clt_hca_port_attr.attr,
&rtrs_clt_src_addr_attr.attr,
@@ -419,42 +421,43 @@ static struct attribute *rtrs_clt_sess_attrs[] = {
NULL,
};
-static const struct attribute_group rtrs_clt_sess_attr_group = {
- .attrs = rtrs_clt_sess_attrs,
+static const struct attribute_group rtrs_clt_path_attr_group = {
+ .attrs = rtrs_clt_path_attrs,
};
-int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess)
+int rtrs_clt_create_path_files(struct rtrs_clt_path *clt_path)
{
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_sess *clt = clt_path->clt;
char str[NAME_MAX];
int err;
struct rtrs_addr path = {
- .src = &sess->s.src_addr,
- .dst = &sess->s.dst_addr,
+ .src = &clt_path->s.src_addr,
+ .dst = &clt_path->s.dst_addr,
};
rtrs_addr_to_str(&path, str, sizeof(str));
- err = kobject_init_and_add(&sess->kobj, &ktype_sess, clt->kobj_paths,
+ err = kobject_init_and_add(&clt_path->kobj, &ktype_sess,
+ clt->kobj_paths,
"%s", str);
if (err) {
pr_err("kobject_init_and_add: %d\n", err);
- kobject_put(&sess->kobj);
+ kobject_put(&clt_path->kobj);
return err;
}
- err = sysfs_create_group(&sess->kobj, &rtrs_clt_sess_attr_group);
+ err = sysfs_create_group(&clt_path->kobj, &rtrs_clt_path_attr_group);
if (err) {
pr_err("sysfs_create_group(): %d\n", err);
goto put_kobj;
}
- err = kobject_init_and_add(&sess->stats->kobj_stats, &ktype_stats,
- &sess->kobj, "stats");
+ err = kobject_init_and_add(&clt_path->stats->kobj_stats, &ktype_stats,
+ &clt_path->kobj, "stats");
if (err) {
pr_err("kobject_init_and_add: %d\n", err);
- kobject_put(&sess->stats->kobj_stats);
+ kobject_put(&clt_path->stats->kobj_stats);
goto remove_group;
}
- err = sysfs_create_group(&sess->stats->kobj_stats,
+ err = sysfs_create_group(&clt_path->stats->kobj_stats,
&rtrs_clt_stats_attr_group);
if (err) {
pr_err("failed to create stats sysfs group, err: %d\n", err);
@@ -464,25 +467,25 @@ int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess)
return 0;
put_kobj_stats:
- kobject_del(&sess->stats->kobj_stats);
- kobject_put(&sess->stats->kobj_stats);
+ kobject_del(&clt_path->stats->kobj_stats);
+ kobject_put(&clt_path->stats->kobj_stats);
remove_group:
- sysfs_remove_group(&sess->kobj, &rtrs_clt_sess_attr_group);
+ sysfs_remove_group(&clt_path->kobj, &rtrs_clt_path_attr_group);
put_kobj:
- kobject_del(&sess->kobj);
- kobject_put(&sess->kobj);
+ kobject_del(&clt_path->kobj);
+ kobject_put(&clt_path->kobj);
return err;
}
-void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess,
+void rtrs_clt_destroy_path_files(struct rtrs_clt_path *clt_path,
const struct attribute *sysfs_self)
{
- kobject_del(&sess->stats->kobj_stats);
- kobject_put(&sess->stats->kobj_stats);
+ kobject_del(&clt_path->stats->kobj_stats);
+ kobject_put(&clt_path->stats->kobj_stats);
if (sysfs_self)
- sysfs_remove_file_self(&sess->kobj, sysfs_self);
- kobject_del(&sess->kobj);
+ sysfs_remove_file_self(&clt_path->kobj, sysfs_self);
+ kobject_del(&clt_path->kobj);
}
static struct attribute *rtrs_clt_attrs[] = {
@@ -496,12 +499,12 @@ static const struct attribute_group rtrs_clt_attr_group = {
.attrs = rtrs_clt_attrs,
};
-int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt)
+int rtrs_clt_create_sysfs_root_files(struct rtrs_clt_sess *clt)
{
return sysfs_create_group(&clt->dev.kobj, &rtrs_clt_attr_group);
}
-void rtrs_clt_destroy_sysfs_root(struct rtrs_clt *clt)
+void rtrs_clt_destroy_sysfs_root(struct rtrs_clt_sess *clt)
{
sysfs_remove_group(&clt->dev.kobj, &rtrs_clt_attr_group);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 15c0077dd27e..7c3f98e57889 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -46,21 +46,21 @@ static struct rtrs_rdma_dev_pd dev_pd = {
static struct workqueue_struct *rtrs_wq;
static struct class *rtrs_clt_dev_class;
-static inline bool rtrs_clt_is_connected(const struct rtrs_clt *clt)
+static inline bool rtrs_clt_is_connected(const struct rtrs_clt_sess *clt)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
bool connected = false;
rcu_read_lock();
- list_for_each_entry_rcu(sess, &clt->paths_list, s.entry)
- connected |= READ_ONCE(sess->state) == RTRS_CLT_CONNECTED;
+ list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry)
+ connected |= READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED;
rcu_read_unlock();
return connected;
}
static struct rtrs_permit *
-__rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type)
+__rtrs_get_permit(struct rtrs_clt_sess *clt, enum rtrs_clt_con_type con_type)
{
size_t max_depth = clt->queue_depth;
struct rtrs_permit *permit;
@@ -87,7 +87,7 @@ __rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type)
return permit;
}
-static inline void __rtrs_put_permit(struct rtrs_clt *clt,
+static inline void __rtrs_put_permit(struct rtrs_clt_sess *clt,
struct rtrs_permit *permit)
{
clear_bit_unlock(permit->mem_id, clt->permits_map);
@@ -107,7 +107,7 @@ static inline void __rtrs_put_permit(struct rtrs_clt *clt,
* Context:
* Can sleep if @wait == RTRS_PERMIT_WAIT
*/
-struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *clt,
+struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *clt,
enum rtrs_clt_con_type con_type,
enum wait_type can_wait)
{
@@ -142,7 +142,8 @@ EXPORT_SYMBOL(rtrs_clt_get_permit);
* Context:
* Does not matter
*/
-void rtrs_clt_put_permit(struct rtrs_clt *clt, struct rtrs_permit *permit)
+void rtrs_clt_put_permit(struct rtrs_clt_sess *clt,
+ struct rtrs_permit *permit)
{
if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map)))
return;
@@ -163,29 +164,29 @@ EXPORT_SYMBOL(rtrs_clt_put_permit);
/**
* rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit
- * @sess: client session pointer
+ * @clt_path: client path pointer
* @permit: permit for the allocation of the RDMA buffer
* Note:
* IO connection starts from 1.
* 0 connection is for user messages.
*/
static
-struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess,
+struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_path *clt_path,
struct rtrs_permit *permit)
{
int id = 0;
if (permit->con_type == RTRS_IO_CON)
- id = (permit->cpu_id % (sess->s.irq_con_num - 1)) + 1;
+ id = (permit->cpu_id % (clt_path->s.irq_con_num - 1)) + 1;
- return to_clt_con(sess->s.con[id]);
+ return to_clt_con(clt_path->s.con[id]);
}
/**
* rtrs_clt_change_state() - change the session state through session state
* machine.
*
- * @sess: client session to change the state of.
+ * @clt_path: client path to change the state of.
* @new_state: state to change to.
*
* returns true if sess's state is changed to new state, otherwise return false.
@@ -193,15 +194,15 @@ struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess,
* Locks:
* state_wq lock must be hold.
*/
-static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess,
+static bool rtrs_clt_change_state(struct rtrs_clt_path *clt_path,
enum rtrs_clt_state new_state)
{
enum rtrs_clt_state old_state;
bool changed = false;
- lockdep_assert_held(&sess->state_wq.lock);
+ lockdep_assert_held(&clt_path->state_wq.lock);
- old_state = sess->state;
+ old_state = clt_path->state;
switch (new_state) {
case RTRS_CLT_CONNECTING:
switch (old_state) {
@@ -275,42 +276,42 @@ static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess,
break;
}
if (changed) {
- sess->state = new_state;
- wake_up_locked(&sess->state_wq);
+ clt_path->state = new_state;
+ wake_up_locked(&clt_path->state_wq);
}
return changed;
}
-static bool rtrs_clt_change_state_from_to(struct rtrs_clt_sess *sess,
+static bool rtrs_clt_change_state_from_to(struct rtrs_clt_path *clt_path,
enum rtrs_clt_state old_state,
enum rtrs_clt_state new_state)
{
bool changed = false;
- spin_lock_irq(&sess->state_wq.lock);
- if (sess->state == old_state)
- changed = rtrs_clt_change_state(sess, new_state);
- spin_unlock_irq(&sess->state_wq.lock);
+ spin_lock_irq(&clt_path->state_wq.lock);
+ if (clt_path->state == old_state)
+ changed = rtrs_clt_change_state(clt_path, new_state);
+ spin_unlock_irq(&clt_path->state_wq.lock);
return changed;
}
static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
- if (rtrs_clt_change_state_from_to(sess,
+ if (rtrs_clt_change_state_from_to(clt_path,
RTRS_CLT_CONNECTED,
RTRS_CLT_RECONNECTING)) {
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_sess *clt = clt_path->clt;
unsigned int delay_ms;
/*
* Normal scenario, reconnect if we were successfully connected
*/
delay_ms = clt->reconnect_delay_sec * 1000;
- queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
+ queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
msecs_to_jiffies(delay_ms +
prandom_u32() % RTRS_RECONNECT_SEED));
} else {
@@ -319,7 +320,7 @@ static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
* so notify waiter with error state, waiter is responsible
* for cleaning the rest and reconnect if needed.
*/
- rtrs_clt_change_state_from_to(sess,
+ rtrs_clt_change_state_from_to(clt_path,
RTRS_CLT_CONNECTING,
RTRS_CLT_CONNECTING_ERR);
}
@@ -330,7 +331,7 @@ static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
if (wc->status != IB_WC_SUCCESS) {
- rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n",
+ rtrs_err(con->c.path, "Failed IB_WR_REG_MR: %s\n",
ib_wc_status_msg(wc->status));
rtrs_rdma_error_recovery(con);
}
@@ -350,7 +351,7 @@ static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
if (wc->status != IB_WC_SUCCESS) {
- rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n",
+ rtrs_err(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n",
ib_wc_status_msg(wc->status));
rtrs_rdma_error_recovery(con);
}
@@ -380,14 +381,14 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
bool notify, bool can_wait)
{
struct rtrs_clt_con *con = req->con;
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
int err;
if (WARN_ON(!req->in_use))
return;
if (WARN_ON(!req->con))
return;
- sess = to_clt_sess(con->c.sess);
+ clt_path = to_clt_path(con->c.path);
if (req->sg_cnt) {
if (req->dir == DMA_FROM_DEVICE && req->need_inv) {
@@ -417,7 +418,7 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
refcount_inc(&req->ref);
err = rtrs_inv_rkey(req);
if (err) {
- rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n",
+ rtrs_err(con->c.path, "Send INV WR key=%#x: %d\n",
req->mr->rkey, err);
} else if (can_wait) {
wait_for_completion(&req->inv_comp);
@@ -433,21 +434,21 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
if (!refcount_dec_and_test(&req->ref))
return;
}
- ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
+ ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
}
if (!refcount_dec_and_test(&req->ref))
return;
if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
- atomic_dec(&sess->stats->inflight);
+ atomic_dec(&clt_path->stats->inflight);
req->in_use = false;
req->con = NULL;
if (errno) {
- rtrs_err_rl(con->c.sess, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n",
- errno, kobject_name(&sess->kobj), sess->hca_name,
- sess->hca_port, notify);
+ rtrs_err_rl(con->c.path, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n",
+ errno, kobject_name(&clt_path->kobj), clt_path->hca_name,
+ clt_path->hca_port, notify);
}
if (notify)
@@ -459,12 +460,12 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
struct rtrs_rbuf *rbuf, u32 off,
u32 imm, struct ib_send_wr *wr)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
enum ib_send_flags flags;
struct ib_sge sge;
if (!req->sg_size) {
- rtrs_wrn(con->c.sess,
+ rtrs_wrn(con->c.path,
"Doing RDMA Write failed, no data supplied\n");
return -EINVAL;
}
@@ -472,16 +473,17 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
/* user data and user message in the first list element */
sge.addr = req->iu->dma_addr;
sge.length = req->sg_size;
- sge.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ sge.lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
/*
* From time to time we have to post signalled sends,
* or send queue will fill up and only QP reset can help.
*/
- flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ?
+ flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
0 : IB_SEND_SIGNALED;
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
+ ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
+ req->iu->dma_addr,
req->sg_size, DMA_TO_DEVICE);
return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
@@ -489,15 +491,15 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
imm, flags, wr, NULL);
}
-static void process_io_rsp(struct rtrs_clt_sess *sess, u32 msg_id,
+static void process_io_rsp(struct rtrs_clt_path *clt_path, u32 msg_id,
s16 errno, bool w_inval)
{
struct rtrs_clt_io_req *req;
- if (WARN_ON(msg_id >= sess->queue_depth))
+ if (WARN_ON(msg_id >= clt_path->queue_depth))
return;
- req = &sess->reqs[msg_id];
+ req = &clt_path->reqs[msg_id];
/* Drop need_inv if server responded with send with invalidation */
req->need_inv &= !w_inval;
complete_rdma_req(req, errno, true, false);
@@ -507,21 +509,21 @@ static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
{
struct rtrs_iu *iu;
int err;
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
- WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0);
+ WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0);
iu = container_of(wc->wr_cqe, struct rtrs_iu,
cqe);
err = rtrs_iu_post_recv(&con->c, iu);
if (err) {
- rtrs_err(con->c.sess, "post iu failed %d\n", err);
+ rtrs_err(con->c.path, "post iu failed %d\n", err);
rtrs_rdma_error_recovery(con);
}
}
static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
struct rtrs_msg_rkey_rsp *msg;
u32 imm_type, imm_payload;
bool w_inval = false;
@@ -529,25 +531,26 @@ static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
u32 buf_id;
int err;
- WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0);
+ WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0);
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
if (wc->byte_len < sizeof(*msg)) {
- rtrs_err(con->c.sess, "rkey response is malformed: size %d\n",
+ rtrs_err(con->c.path, "rkey response is malformed: size %d\n",
wc->byte_len);
goto out;
}
- ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
+ ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr,
iu->size, DMA_FROM_DEVICE);
msg = iu->buf;
if (le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP) {
- rtrs_err(sess->clt, "rkey response is malformed: type %d\n",
+ rtrs_err(clt_path->clt,
+ "rkey response is malformed: type %d\n",
le16_to_cpu(msg->type));
goto out;
}
buf_id = le16_to_cpu(msg->buf_id);
- if (WARN_ON(buf_id >= sess->queue_depth))
+ if (WARN_ON(buf_id >= clt_path->queue_depth))
goto out;
rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload);
@@ -560,10 +563,10 @@ static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
if (WARN_ON(buf_id != msg_id))
goto out;
- sess->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey);
- process_io_rsp(sess, msg_id, err, w_inval);
+ clt_path->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey);
+ process_io_rsp(clt_path, msg_id, err, w_inval);
}
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev, iu->dma_addr,
+ ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, iu->dma_addr,
iu->size, DMA_FROM_DEVICE);
return rtrs_clt_recv_done(con, wc);
out:
@@ -600,14 +603,14 @@ static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe)
static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
u32 imm_type, imm_payload;
bool w_inval = false;
int err;
if (wc->status != IB_WC_SUCCESS) {
if (wc->status != IB_WC_WR_FLUSH_ERR) {
- rtrs_err(sess->clt, "RDMA failed: %s\n",
+ rtrs_err(clt_path->clt, "RDMA failed: %s\n",
ib_wc_status_msg(wc->status));
rtrs_rdma_error_recovery(con);
}
@@ -632,21 +635,21 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
- process_io_rsp(sess, msg_id, err, w_inval);
+ process_io_rsp(clt_path, msg_id, err, w_inval);
} else if (imm_type == RTRS_HB_MSG_IMM) {
WARN_ON(con->c.cid);
- rtrs_send_hb_ack(&sess->s);
- if (sess->flags & RTRS_MSG_NEW_RKEY_F)
+ rtrs_send_hb_ack(&clt_path->s);
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
return rtrs_clt_recv_done(con, wc);
} else if (imm_type == RTRS_HB_ACK_IMM) {
WARN_ON(con->c.cid);
- sess->s.hb_missed_cnt = 0;
- sess->s.hb_cur_latency =
- ktime_sub(ktime_get(), sess->s.hb_last_sent);
- if (sess->flags & RTRS_MSG_NEW_RKEY_F)
+ clt_path->s.hb_missed_cnt = 0;
+ clt_path->s.hb_cur_latency =
+ ktime_sub(ktime_get(), clt_path->s.hb_last_sent);
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
return rtrs_clt_recv_done(con, wc);
} else {
- rtrs_wrn(con->c.sess, "Unknown IMM type %u\n",
+ rtrs_wrn(con->c.path, "Unknown IMM type %u\n",
imm_type);
}
if (w_inval)
@@ -658,7 +661,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
else
err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
if (err) {
- rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n",
+ rtrs_err(con->c.path, "rtrs_post_recv_empty(): %d\n",
err);
rtrs_rdma_error_recovery(con);
}
@@ -670,7 +673,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
wc->wc_flags & IB_WC_WITH_IMM));
WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
- if (sess->flags & RTRS_MSG_NEW_RKEY_F) {
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) {
if (wc->wc_flags & IB_WC_WITH_INVALIDATE)
return rtrs_clt_recv_done(con, wc);
@@ -685,7 +688,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
break;
default:
- rtrs_wrn(sess->clt, "Unexpected WC type: %d\n", wc->opcode);
+ rtrs_wrn(clt_path->clt, "Unexpected WC type: %d\n", wc->opcode);
return;
}
}
@@ -693,10 +696,10 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
{
int err, i;
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
for (i = 0; i < q_size; i++) {
- if (sess->flags & RTRS_MSG_NEW_RKEY_F) {
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) {
struct rtrs_iu *iu = &con->rsp_ius[i];
err = rtrs_iu_post_recv(&con->c, iu);
@@ -710,16 +713,16 @@ static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
return 0;
}
-static int post_recv_sess(struct rtrs_clt_sess *sess)
+static int post_recv_path(struct rtrs_clt_path *clt_path)
{
size_t q_size = 0;
int err, cid;
- for (cid = 0; cid < sess->s.con_num; cid++) {
+ for (cid = 0; cid < clt_path->s.con_num; cid++) {
if (cid == 0)
q_size = SERVICE_CON_QUEUE_DEPTH;
else
- q_size = sess->queue_depth;
+ q_size = clt_path->queue_depth;
/*
* x2 for RDMA read responses + FR key invalidations,
@@ -727,9 +730,10 @@ static int post_recv_sess(struct rtrs_clt_sess *sess)
*/
q_size *= 2;
- err = post_recv_io(to_clt_con(sess->s.con[cid]), q_size);
+ err = post_recv_io(to_clt_con(clt_path->s.con[cid]), q_size);
if (err) {
- rtrs_err(sess->clt, "post_recv_io(), err: %d\n", err);
+ rtrs_err(clt_path->clt, "post_recv_io(), err: %d\n",
+ err);
return err;
}
}
@@ -740,8 +744,8 @@ static int post_recv_sess(struct rtrs_clt_sess *sess)
struct path_it {
int i;
struct list_head skip_list;
- struct rtrs_clt *clt;
- struct rtrs_clt_sess *(*next_path)(struct path_it *it);
+ struct rtrs_clt_sess *clt;
+ struct rtrs_clt_path *(*next_path)(struct path_it *it);
};
/**
@@ -773,11 +777,11 @@ struct path_it {
* Locks:
* rcu_read_lock() must be hold.
*/
-static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it)
+static struct rtrs_clt_path *get_next_path_rr(struct path_it *it)
{
- struct rtrs_clt_sess __rcu **ppcpu_path;
- struct rtrs_clt_sess *path;
- struct rtrs_clt *clt;
+ struct rtrs_clt_path __rcu **ppcpu_path;
+ struct rtrs_clt_path *path;
+ struct rtrs_clt_sess *clt;
clt = it->clt;
@@ -811,26 +815,26 @@ static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it)
* Locks:
* rcu_read_lock() must be hold.
*/
-static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it)
+static struct rtrs_clt_path *get_next_path_min_inflight(struct path_it *it)
{
- struct rtrs_clt_sess *min_path = NULL;
- struct rtrs_clt *clt = it->clt;
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *min_path = NULL;
+ struct rtrs_clt_sess *clt = it->clt;
+ struct rtrs_clt_path *clt_path;
int min_inflight = INT_MAX;
int inflight;
- list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
- if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)
+ list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
continue;
- if (!list_empty(raw_cpu_ptr(sess->mp_skip_entry)))
+ if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry)))
continue;
- inflight = atomic_read(&sess->stats->inflight);
+ inflight = atomic_read(&clt_path->stats->inflight);
if (inflight < min_inflight) {
min_inflight = inflight;
- min_path = sess;
+ min_path = clt_path;
}
}
@@ -862,26 +866,26 @@ static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it)
* Therefore the caller MUST check the returned
* path is NULL and trigger the IO error.
*/
-static struct rtrs_clt_sess *get_next_path_min_latency(struct path_it *it)
+static struct rtrs_clt_path *get_next_path_min_latency(struct path_it *it)
{
- struct rtrs_clt_sess *min_path = NULL;
- struct rtrs_clt *clt = it->clt;
- struct rtrs_clt_sess *sess;
- ktime_t min_latency = INT_MAX;
+ struct rtrs_clt_path *min_path = NULL;
+ struct rtrs_clt_sess *clt = it->clt;
+ struct rtrs_clt_path *clt_path;
+ ktime_t min_latency = KTIME_MAX;
ktime_t latency;
- list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
- if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)
+ list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
continue;
- if (!list_empty(raw_cpu_ptr(sess->mp_skip_entry)))
+ if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry)))
continue;
- latency = sess->s.hb_cur_latency;
+ latency = clt_path->s.hb_cur_latency;
if (latency < min_latency) {
min_latency = latency;
- min_path = sess;
+ min_path = clt_path;
}
}
@@ -895,7 +899,7 @@ static struct rtrs_clt_sess *get_next_path_min_latency(struct path_it *it)
return min_path;
}
-static inline void path_it_init(struct path_it *it, struct rtrs_clt *clt)
+static inline void path_it_init(struct path_it *it, struct rtrs_clt_sess *clt)
{
INIT_LIST_HEAD(&it->skip_list);
it->clt = clt;
@@ -928,7 +932,7 @@ static inline void path_it_deinit(struct path_it *it)
* the corresponding buffer of rtrs_iu (req->iu->buf), which later on will
* also hold the control message of rtrs.
* @req: an io request holding information about IO.
- * @sess: client session
+ * @clt_path: client path
* @conf: conformation callback function to notify upper layer.
* @permit: permit for allocation of RDMA remote buffer
* @priv: private pointer
@@ -940,7 +944,7 @@ static inline void path_it_deinit(struct path_it *it)
* @dir: direction of the IO.
*/
static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
- struct rtrs_clt_sess *sess,
+ struct rtrs_clt_path *clt_path,
void (*conf)(void *priv, int errno),
struct rtrs_permit *permit, void *priv,
const struct kvec *vec, size_t usr_len,
@@ -958,13 +962,13 @@ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
req->sg_cnt = sg_cnt;
req->priv = priv;
req->dir = dir;
- req->con = rtrs_permit_to_clt_con(sess, permit);
+ req->con = rtrs_permit_to_clt_con(clt_path, permit);
req->conf = conf;
req->need_inv = false;
req->need_inv_comp = false;
req->inv_errno = 0;
refcount_set(&req->ref, 1);
- req->mp_policy = sess->clt->mp_policy;
+ req->mp_policy = clt_path->clt->mp_policy;
iov_iter_kvec(&iter, READ, vec, 1, usr_len);
len = _copy_from_iter(req->iu->buf, usr_len, &iter);
@@ -974,7 +978,7 @@ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
}
static struct rtrs_clt_io_req *
-rtrs_clt_get_req(struct rtrs_clt_sess *sess,
+rtrs_clt_get_req(struct rtrs_clt_path *clt_path,
void (*conf)(void *priv, int errno),
struct rtrs_permit *permit, void *priv,
const struct kvec *vec, size_t usr_len,
@@ -983,14 +987,14 @@ rtrs_clt_get_req(struct rtrs_clt_sess *sess,
{
struct rtrs_clt_io_req *req;
- req = &sess->reqs[permit->mem_id];
- rtrs_clt_init_req(req, sess, conf, permit, priv, vec, usr_len,
+ req = &clt_path->reqs[permit->mem_id];
+ rtrs_clt_init_req(req, clt_path, conf, permit, priv, vec, usr_len,
sg, sg_cnt, data_len, dir);
return req;
}
static struct rtrs_clt_io_req *
-rtrs_clt_get_copy_req(struct rtrs_clt_sess *alive_sess,
+rtrs_clt_get_copy_req(struct rtrs_clt_path *alive_path,
struct rtrs_clt_io_req *fail_req)
{
struct rtrs_clt_io_req *req;
@@ -999,8 +1003,8 @@ rtrs_clt_get_copy_req(struct rtrs_clt_sess *alive_sess,
.iov_len = fail_req->usr_len
};
- req = &alive_sess->reqs[fail_req->permit->mem_id];
- rtrs_clt_init_req(req, alive_sess, fail_req->conf, fail_req->permit,
+ req = &alive_path->reqs[fail_req->permit->mem_id];
+ rtrs_clt_init_req(req, alive_path, fail_req->conf, fail_req->permit,
fail_req->priv, &vec, fail_req->usr_len,
fail_req->sglist, fail_req->sg_cnt,
fail_req->data_len, fail_req->dir);
@@ -1013,7 +1017,7 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
u32 size, u32 imm, struct ib_send_wr *wr,
struct ib_send_wr *tail)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
struct ib_sge *sge = req->sge;
enum ib_send_flags flags;
struct scatterlist *sg;
@@ -1033,22 +1037,23 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
for_each_sg(req->sglist, sg, req->sg_cnt, i) {
sge[i].addr = sg_dma_address(sg);
sge[i].length = sg_dma_len(sg);
- sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
}
num_sge = 1 + req->sg_cnt;
}
sge[i].addr = req->iu->dma_addr;
sge[i].length = size;
- sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
/*
* From time to time we have to post signalled sends,
* or send queue will fill up and only QP reset can help.
*/
- flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ?
+ flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
0 : IB_SEND_SIGNALED;
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
+ ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
+ req->iu->dma_addr,
size, DMA_TO_DEVICE);
return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge,
@@ -1074,8 +1079,8 @@ static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count)
static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
{
struct rtrs_clt_con *con = req->con;
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_clt_path *clt_path = to_clt_path(s);
struct rtrs_msg_rdma_write *msg;
struct rtrs_rbuf *rbuf;
@@ -1088,13 +1093,13 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
- if (tsize > sess->chunk_size) {
+ if (tsize > clt_path->chunk_size) {
rtrs_wrn(s, "Write request failed, size too big %zu > %d\n",
- tsize, sess->chunk_size);
+ tsize, clt_path->chunk_size);
return -EMSGSIZE;
}
if (req->sg_cnt) {
- count = ib_dma_map_sg(sess->s.dev->ib_dev, req->sglist,
+ count = ib_dma_map_sg(clt_path->s.dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
if (!count) {
rtrs_wrn(s, "Write request failed, map failed\n");
@@ -1111,7 +1116,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
imm = rtrs_to_io_req_imm(imm);
buf_id = req->permit->mem_id;
req->sg_size = tsize;
- rbuf = &sess->rbufs[buf_id];
+ rbuf = &clt_path->rbufs[buf_id];
if (count) {
ret = rtrs_map_sg_fr(req, count);
@@ -1119,7 +1124,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
rtrs_err_rl(s,
"Write request failed, failed to map fast reg. data, err: %d\n",
ret);
- ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
+ ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
return ret;
}
@@ -1153,12 +1158,12 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
if (ret) {
rtrs_err_rl(s,
"Write request failed: error=%d path=%s [%s:%u]\n",
- ret, kobject_name(&sess->kobj), sess->hca_name,
- sess->hca_port);
+ ret, kobject_name(&clt_path->kobj), clt_path->hca_name,
+ clt_path->hca_port);
if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
- atomic_dec(&sess->stats->inflight);
+ atomic_dec(&clt_path->stats->inflight);
if (req->sg_cnt)
- ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
+ ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
}
@@ -1168,10 +1173,10 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
{
struct rtrs_clt_con *con = req->con;
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_clt_path *clt_path = to_clt_path(s);
struct rtrs_msg_rdma_read *msg;
- struct rtrs_ib_dev *dev = sess->s.dev;
+ struct rtrs_ib_dev *dev = clt_path->s.dev;
struct ib_reg_wr rwr;
struct ib_send_wr *wr = NULL;
@@ -1181,10 +1186,10 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
- if (tsize > sess->chunk_size) {
+ if (tsize > clt_path->chunk_size) {
rtrs_wrn(s,
"Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n",
- tsize, sess->chunk_size);
+ tsize, clt_path->chunk_size);
return -EMSGSIZE;
}
@@ -1254,15 +1259,15 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
*/
rtrs_clt_update_all_stats(req, READ);
- ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id],
+ ret = rtrs_post_send_rdma(req->con, req, &clt_path->rbufs[buf_id],
req->data_len, imm, wr);
if (ret) {
rtrs_err_rl(s,
"Read request failed: error=%d path=%s [%s:%u]\n",
- ret, kobject_name(&sess->kobj), sess->hca_name,
- sess->hca_port);
+ ret, kobject_name(&clt_path->kobj), clt_path->hca_name,
+ clt_path->hca_port);
if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
- atomic_dec(&sess->stats->inflight);
+ atomic_dec(&clt_path->stats->inflight);
req->need_inv = false;
if (req->sg_cnt)
ib_dma_unmap_sg(dev->ib_dev, req->sglist,
@@ -1277,21 +1282,21 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
* @clt: clt context
* @fail_req: a failed io request.
*/
-static int rtrs_clt_failover_req(struct rtrs_clt *clt,
+static int rtrs_clt_failover_req(struct rtrs_clt_sess *clt,
struct rtrs_clt_io_req *fail_req)
{
- struct rtrs_clt_sess *alive_sess;
+ struct rtrs_clt_path *alive_path;
struct rtrs_clt_io_req *req;
int err = -ECONNABORTED;
struct path_it it;
rcu_read_lock();
for (path_it_init(&it, clt);
- (alive_sess = it.next_path(&it)) && it.i < it.clt->paths_num;
+ (alive_path = it.next_path(&it)) && it.i < it.clt->paths_num;
it.i++) {
- if (READ_ONCE(alive_sess->state) != RTRS_CLT_CONNECTED)
+ if (READ_ONCE(alive_path->state) != RTRS_CLT_CONNECTED)
continue;
- req = rtrs_clt_get_copy_req(alive_sess, fail_req);
+ req = rtrs_clt_get_copy_req(alive_path, fail_req);
if (req->dir == DMA_TO_DEVICE)
err = rtrs_clt_write_req(req);
else
@@ -1301,7 +1306,7 @@ static int rtrs_clt_failover_req(struct rtrs_clt *clt,
continue;
}
/* Success path */
- rtrs_clt_inc_failover_cnt(alive_sess->stats);
+ rtrs_clt_inc_failover_cnt(alive_path->stats);
break;
}
path_it_deinit(&it);
@@ -1310,16 +1315,16 @@ static int rtrs_clt_failover_req(struct rtrs_clt *clt,
return err;
}
-static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess)
+static void fail_all_outstanding_reqs(struct rtrs_clt_path *clt_path)
{
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_sess *clt = clt_path->clt;
struct rtrs_clt_io_req *req;
int i, err;
- if (!sess->reqs)
+ if (!clt_path->reqs)
return;
- for (i = 0; i < sess->queue_depth; ++i) {
- req = &sess->reqs[i];
+ for (i = 0; i < clt_path->queue_depth; ++i) {
+ req = &clt_path->reqs[i];
if (!req->in_use)
continue;
@@ -1337,38 +1342,39 @@ static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess)
}
}
-static void free_sess_reqs(struct rtrs_clt_sess *sess)
+static void free_path_reqs(struct rtrs_clt_path *clt_path)
{
struct rtrs_clt_io_req *req;
int i;
- if (!sess->reqs)
+ if (!clt_path->reqs)
return;
- for (i = 0; i < sess->queue_depth; ++i) {
- req = &sess->reqs[i];
+ for (i = 0; i < clt_path->queue_depth; ++i) {
+ req = &clt_path->reqs[i];
if (req->mr)
ib_dereg_mr(req->mr);
kfree(req->sge);
- rtrs_iu_free(req->iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(req->iu, clt_path->s.dev->ib_dev, 1);
}
- kfree(sess->reqs);
- sess->reqs = NULL;
+ kfree(clt_path->reqs);
+ clt_path->reqs = NULL;
}
-static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
+static int alloc_path_reqs(struct rtrs_clt_path *clt_path)
{
struct rtrs_clt_io_req *req;
int i, err = -ENOMEM;
- sess->reqs = kcalloc(sess->queue_depth, sizeof(*sess->reqs),
- GFP_KERNEL);
- if (!sess->reqs)
+ clt_path->reqs = kcalloc(clt_path->queue_depth,
+ sizeof(*clt_path->reqs),
+ GFP_KERNEL);
+ if (!clt_path->reqs)
return -ENOMEM;
- for (i = 0; i < sess->queue_depth; ++i) {
- req = &sess->reqs[i];
- req->iu = rtrs_iu_alloc(1, sess->max_hdr_size, GFP_KERNEL,
- sess->s.dev->ib_dev,
+ for (i = 0; i < clt_path->queue_depth; ++i) {
+ req = &clt_path->reqs[i];
+ req->iu = rtrs_iu_alloc(1, clt_path->max_hdr_size, GFP_KERNEL,
+ clt_path->s.dev->ib_dev,
DMA_TO_DEVICE,
rtrs_clt_rdma_done);
if (!req->iu)
@@ -1378,13 +1384,14 @@ static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
if (!req->sge)
goto out;
- req->mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
- sess->max_pages_per_mr);
+ req->mr = ib_alloc_mr(clt_path->s.dev->ib_pd,
+ IB_MR_TYPE_MEM_REG,
+ clt_path->max_pages_per_mr);
if (IS_ERR(req->mr)) {
err = PTR_ERR(req->mr);
req->mr = NULL;
- pr_err("Failed to alloc sess->max_pages_per_mr %d\n",
- sess->max_pages_per_mr);
+ pr_err("Failed to alloc clt_path->max_pages_per_mr %d\n",
+ clt_path->max_pages_per_mr);
goto out;
}
@@ -1394,12 +1401,12 @@ static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
return 0;
out:
- free_sess_reqs(sess);
+ free_path_reqs(clt_path);
return err;
}
-static int alloc_permits(struct rtrs_clt *clt)
+static int alloc_permits(struct rtrs_clt_sess *clt)
{
unsigned int chunk_bits;
int err, i;
@@ -1433,7 +1440,7 @@ out_err:
return err;
}
-static void free_permits(struct rtrs_clt *clt)
+static void free_permits(struct rtrs_clt_sess *clt)
{
if (clt->permits_map) {
size_t sz = clt->queue_depth;
@@ -1447,13 +1454,13 @@ static void free_permits(struct rtrs_clt *clt)
clt->permits = NULL;
}
-static void query_fast_reg_mode(struct rtrs_clt_sess *sess)
+static void query_fast_reg_mode(struct rtrs_clt_path *clt_path)
{
struct ib_device *ib_dev;
u64 max_pages_per_mr;
int mr_page_shift;
- ib_dev = sess->s.dev->ib_dev;
+ ib_dev = clt_path->s.dev->ib_dev;
/*
* Use the smallest page size supported by the HCA, down to a
@@ -1463,24 +1470,24 @@ static void query_fast_reg_mode(struct rtrs_clt_sess *sess)
mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1);
max_pages_per_mr = ib_dev->attrs.max_mr_size;
do_div(max_pages_per_mr, (1ull << mr_page_shift));
- sess->max_pages_per_mr =
- min3(sess->max_pages_per_mr, (u32)max_pages_per_mr,
+ clt_path->max_pages_per_mr =
+ min3(clt_path->max_pages_per_mr, (u32)max_pages_per_mr,
ib_dev->attrs.max_fast_reg_page_list_len);
- sess->clt->max_segments =
- min(sess->max_pages_per_mr, sess->clt->max_segments);
+ clt_path->clt->max_segments =
+ min(clt_path->max_pages_per_mr, clt_path->clt->max_segments);
}
-static bool rtrs_clt_change_state_get_old(struct rtrs_clt_sess *sess,
+static bool rtrs_clt_change_state_get_old(struct rtrs_clt_path *clt_path,
enum rtrs_clt_state new_state,
enum rtrs_clt_state *old_state)
{
bool changed;
- spin_lock_irq(&sess->state_wq.lock);
+ spin_lock_irq(&clt_path->state_wq.lock);
if (old_state)
- *old_state = sess->state;
- changed = rtrs_clt_change_state(sess, new_state);
- spin_unlock_irq(&sess->state_wq.lock);
+ *old_state = clt_path->state;
+ changed = rtrs_clt_change_state(clt_path, new_state);
+ spin_unlock_irq(&clt_path->state_wq.lock);
return changed;
}
@@ -1492,9 +1499,9 @@ static void rtrs_clt_hb_err_handler(struct rtrs_con *c)
rtrs_rdma_error_recovery(con);
}
-static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess)
+static void rtrs_clt_init_hb(struct rtrs_clt_path *clt_path)
{
- rtrs_init_hb(&sess->s, &io_comp_cqe,
+ rtrs_init_hb(&clt_path->s, &io_comp_cqe,
RTRS_HB_INTERVAL_MS,
RTRS_HB_MISSED_MAX,
rtrs_clt_hb_err_handler,
@@ -1504,17 +1511,17 @@ static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess)
static void rtrs_clt_reconnect_work(struct work_struct *work);
static void rtrs_clt_close_work(struct work_struct *work);
-static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt,
+static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
const struct rtrs_addr *path,
size_t con_num, u32 nr_poll_queues)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
int err = -ENOMEM;
int cpu;
size_t total_con;
- sess = kzalloc(sizeof(*sess), GFP_KERNEL);
- if (!sess)
+ clt_path = kzalloc(sizeof(*clt_path), GFP_KERNEL);
+ if (!clt_path)
goto err;
/*
@@ -1522,20 +1529,21 @@ static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt,
* +1: Extra connection for user messages
*/
total_con = con_num + nr_poll_queues + 1;
- sess->s.con = kcalloc(total_con, sizeof(*sess->s.con), GFP_KERNEL);
- if (!sess->s.con)
- goto err_free_sess;
+ clt_path->s.con = kcalloc(total_con, sizeof(*clt_path->s.con),
+ GFP_KERNEL);
+ if (!clt_path->s.con)
+ goto err_free_path;
- sess->s.con_num = total_con;
- sess->s.irq_con_num = con_num + 1;
+ clt_path->s.con_num = total_con;
+ clt_path->s.irq_con_num = con_num + 1;
- sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL);
- if (!sess->stats)
+ clt_path->stats = kzalloc(sizeof(*clt_path->stats), GFP_KERNEL);
+ if (!clt_path->stats)
goto err_free_con;
- mutex_init(&sess->init_mutex);
- uuid_gen(&sess->s.uuid);
- memcpy(&sess->s.dst_addr, path->dst,
+ mutex_init(&clt_path->init_mutex);
+ uuid_gen(&clt_path->s.uuid);
+ memcpy(&clt_path->s.dst_addr, path->dst,
rdma_addr_size((struct sockaddr *)path->dst));
/*
@@ -1544,53 +1552,54 @@ static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt,
* the sess->src_addr will contain only zeros, which is then fine.
*/
if (path->src)
- memcpy(&sess->s.src_addr, path->src,
+ memcpy(&clt_path->s.src_addr, path->src,
rdma_addr_size((struct sockaddr *)path->src));
- strscpy(sess->s.sessname, clt->sessname, sizeof(sess->s.sessname));
- sess->clt = clt;
- sess->max_pages_per_mr = RTRS_MAX_SEGMENTS;
- init_waitqueue_head(&sess->state_wq);
- sess->state = RTRS_CLT_CONNECTING;
- atomic_set(&sess->connected_cnt, 0);
- INIT_WORK(&sess->close_work, rtrs_clt_close_work);
- INIT_DELAYED_WORK(&sess->reconnect_dwork, rtrs_clt_reconnect_work);
- rtrs_clt_init_hb(sess);
-
- sess->mp_skip_entry = alloc_percpu(typeof(*sess->mp_skip_entry));
- if (!sess->mp_skip_entry)
+ strscpy(clt_path->s.sessname, clt->sessname,
+ sizeof(clt_path->s.sessname));
+ clt_path->clt = clt;
+ clt_path->max_pages_per_mr = RTRS_MAX_SEGMENTS;
+ init_waitqueue_head(&clt_path->state_wq);
+ clt_path->state = RTRS_CLT_CONNECTING;
+ atomic_set(&clt_path->connected_cnt, 0);
+ INIT_WORK(&clt_path->close_work, rtrs_clt_close_work);
+ INIT_DELAYED_WORK(&clt_path->reconnect_dwork, rtrs_clt_reconnect_work);
+ rtrs_clt_init_hb(clt_path);
+
+ clt_path->mp_skip_entry = alloc_percpu(typeof(*clt_path->mp_skip_entry));
+ if (!clt_path->mp_skip_entry)
goto err_free_stats;
for_each_possible_cpu(cpu)
- INIT_LIST_HEAD(per_cpu_ptr(sess->mp_skip_entry, cpu));
+ INIT_LIST_HEAD(per_cpu_ptr(clt_path->mp_skip_entry, cpu));
- err = rtrs_clt_init_stats(sess->stats);
+ err = rtrs_clt_init_stats(clt_path->stats);
if (err)
goto err_free_percpu;
- return sess;
+ return clt_path;
err_free_percpu:
- free_percpu(sess->mp_skip_entry);
+ free_percpu(clt_path->mp_skip_entry);
err_free_stats:
- kfree(sess->stats);
+ kfree(clt_path->stats);
err_free_con:
- kfree(sess->s.con);
-err_free_sess:
- kfree(sess);
+ kfree(clt_path->s.con);
+err_free_path:
+ kfree(clt_path);
err:
return ERR_PTR(err);
}
-void free_sess(struct rtrs_clt_sess *sess)
+void free_path(struct rtrs_clt_path *clt_path)
{
- free_percpu(sess->mp_skip_entry);
- mutex_destroy(&sess->init_mutex);
- kfree(sess->s.con);
- kfree(sess->rbufs);
- kfree(sess);
+ free_percpu(clt_path->mp_skip_entry);
+ mutex_destroy(&clt_path->init_mutex);
+ kfree(clt_path->s.con);
+ kfree(clt_path->rbufs);
+ kfree(clt_path);
}
-static int create_con(struct rtrs_clt_sess *sess, unsigned int cid)
+static int create_con(struct rtrs_clt_path *clt_path, unsigned int cid)
{
struct rtrs_clt_con *con;
@@ -1601,28 +1610,28 @@ static int create_con(struct rtrs_clt_sess *sess, unsigned int cid)
/* Map first two connections to the first CPU */
con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids;
con->c.cid = cid;
- con->c.sess = &sess->s;
+ con->c.path = &clt_path->s;
/* Align with srv, init as 1 */
atomic_set(&con->c.wr_cnt, 1);
mutex_init(&con->con_mutex);
- sess->s.con[cid] = &con->c;
+ clt_path->s.con[cid] = &con->c;
return 0;
}
static void destroy_con(struct rtrs_clt_con *con)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
- sess->s.con[con->c.cid] = NULL;
+ clt_path->s.con[con->c.cid] = NULL;
mutex_destroy(&con->con_mutex);
kfree(con);
}
static int create_con_cq_qp(struct rtrs_clt_con *con)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
u32 max_send_wr, max_recv_wr, cq_num, max_send_sge, wr_limit;
int err, cq_vector;
struct rtrs_msg_rkey_rsp *rsp;
@@ -1631,7 +1640,7 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
if (con->c.cid == 0) {
max_send_sge = 1;
/* We must be the first here */
- if (WARN_ON(sess->s.dev))
+ if (WARN_ON(clt_path->s.dev))
return -EINVAL;
/*
@@ -1639,16 +1648,16 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
* Be careful not to close user connection before ib dev
* is gracefully put.
*/
- sess->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
+ clt_path->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
&dev_pd);
- if (!sess->s.dev) {
- rtrs_wrn(sess->clt,
+ if (!clt_path->s.dev) {
+ rtrs_wrn(clt_path->clt,
"rtrs_ib_dev_find_get_or_add(): no memory\n");
return -ENOMEM;
}
- sess->s.dev_ref = 1;
- query_fast_reg_mode(sess);
- wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr;
+ clt_path->s.dev_ref = 1;
+ query_fast_reg_mode(clt_path);
+ wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr;
/*
* Two (request + registration) completion for send
* Two for recv if always_invalidate is set on server
@@ -1665,27 +1674,28 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
* This is always true if user connection (cid == 0) is
* established first.
*/
- if (WARN_ON(!sess->s.dev))
+ if (WARN_ON(!clt_path->s.dev))
return -EINVAL;
- if (WARN_ON(!sess->queue_depth))
+ if (WARN_ON(!clt_path->queue_depth))
return -EINVAL;
- wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr;
+ wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr;
/* Shared between connections */
- sess->s.dev_ref++;
+ clt_path->s.dev_ref++;
max_send_wr = min_t(int, wr_limit,
/* QD * (REQ + RSP + FR REGS or INVS) + drain */
- sess->queue_depth * 3 + 1);
+ clt_path->queue_depth * 3 + 1);
max_recv_wr = min_t(int, wr_limit,
- sess->queue_depth * 3 + 1);
+ clt_path->queue_depth * 3 + 1);
max_send_sge = 2;
}
atomic_set(&con->c.sq_wr_avail, max_send_wr);
cq_num = max_send_wr + max_recv_wr;
/* alloc iu to recv new rkey reply when server reports flags set */
- if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
con->rsp_ius = rtrs_iu_alloc(cq_num, sizeof(*rsp),
- GFP_KERNEL, sess->s.dev->ib_dev,
+ GFP_KERNEL,
+ clt_path->s.dev->ib_dev,
DMA_FROM_DEVICE,
rtrs_clt_rdma_done);
if (!con->rsp_ius)
@@ -1693,13 +1703,13 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
con->queue_num = cq_num;
}
cq_num = max_send_wr + max_recv_wr;
- cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
- if (con->c.cid >= sess->s.irq_con_num)
- err = rtrs_cq_qp_create(&sess->s, &con->c, max_send_sge,
+ cq_vector = con->cpu % clt_path->s.dev->ib_dev->num_comp_vectors;
+ if (con->c.cid >= clt_path->s.irq_con_num)
+ err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
cq_vector, cq_num, max_send_wr,
max_recv_wr, IB_POLL_DIRECT);
else
- err = rtrs_cq_qp_create(&sess->s, &con->c, max_send_sge,
+ err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
cq_vector, cq_num, max_send_wr,
max_recv_wr, IB_POLL_SOFTIRQ);
/*
@@ -1711,7 +1721,7 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
static void destroy_con_cq_qp(struct rtrs_clt_con *con)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
/*
* Be careful here: destroy_con_cq_qp() can be called even
@@ -1720,13 +1730,14 @@ static void destroy_con_cq_qp(struct rtrs_clt_con *con)
lockdep_assert_held(&con->con_mutex);
rtrs_cq_qp_destroy(&con->c);
if (con->rsp_ius) {
- rtrs_iu_free(con->rsp_ius, sess->s.dev->ib_dev, con->queue_num);
+ rtrs_iu_free(con->rsp_ius, clt_path->s.dev->ib_dev,
+ con->queue_num);
con->rsp_ius = NULL;
con->queue_num = 0;
}
- if (sess->s.dev_ref && !--sess->s.dev_ref) {
- rtrs_ib_dev_put(sess->s.dev);
- sess->s.dev = NULL;
+ if (clt_path->s.dev_ref && !--clt_path->s.dev_ref) {
+ rtrs_ib_dev_put(clt_path->s.dev);
+ clt_path->s.dev = NULL;
}
}
@@ -1745,7 +1756,7 @@ static void destroy_cm(struct rtrs_clt_con *con)
static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
{
- struct rtrs_sess *s = con->c.sess;
+ struct rtrs_path *s = con->c.path;
int err;
mutex_lock(&con->con_mutex);
@@ -1764,8 +1775,8 @@ static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_clt_sess *clt = clt_path->clt;
struct rtrs_msg_conn_req msg;
struct rdma_conn_param param;
@@ -1782,11 +1793,11 @@ static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
.magic = cpu_to_le16(RTRS_MAGIC),
.version = cpu_to_le16(RTRS_PROTO_VER),
.cid = cpu_to_le16(con->c.cid),
- .cid_num = cpu_to_le16(sess->s.con_num),
- .recon_cnt = cpu_to_le16(sess->s.recon_cnt),
+ .cid_num = cpu_to_le16(clt_path->s.con_num),
+ .recon_cnt = cpu_to_le16(clt_path->s.recon_cnt),
};
- msg.first_conn = sess->for_new_clt ? FIRST_CONN : 0;
- uuid_copy(&msg.sess_uuid, &sess->s.uuid);
+ msg.first_conn = clt_path->for_new_clt ? FIRST_CONN : 0;
+ uuid_copy(&msg.sess_uuid, &clt_path->s.uuid);
uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
err = rdma_connect_locked(con->c.cm_id, &param);
@@ -1799,8 +1810,8 @@ static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
struct rdma_cm_event *ev)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_clt_sess *clt = clt_path->clt;
const struct rtrs_msg_conn_rsp *msg;
u16 version, queue_depth;
int errno;
@@ -1831,31 +1842,32 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
if (con->c.cid == 0) {
queue_depth = le16_to_cpu(msg->queue_depth);
- if (sess->queue_depth > 0 && queue_depth != sess->queue_depth) {
+ if (clt_path->queue_depth > 0 && queue_depth != clt_path->queue_depth) {
rtrs_err(clt, "Error: queue depth changed\n");
/*
* Stop any more reconnection attempts
*/
- sess->reconnect_attempts = -1;
+ clt_path->reconnect_attempts = -1;
rtrs_err(clt,
"Disabling auto-reconnect. Trigger a manual reconnect after issue is resolved\n");
return -ECONNRESET;
}
- if (!sess->rbufs) {
- sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs),
- GFP_KERNEL);
- if (!sess->rbufs)
+ if (!clt_path->rbufs) {
+ clt_path->rbufs = kcalloc(queue_depth,
+ sizeof(*clt_path->rbufs),
+ GFP_KERNEL);
+ if (!clt_path->rbufs)
return -ENOMEM;
}
- sess->queue_depth = queue_depth;
- sess->s.signal_interval = min_not_zero(queue_depth,
+ clt_path->queue_depth = queue_depth;
+ clt_path->s.signal_interval = min_not_zero(queue_depth,
(unsigned short) SERVICE_CON_QUEUE_DEPTH);
- sess->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
- sess->max_io_size = le32_to_cpu(msg->max_io_size);
- sess->flags = le32_to_cpu(msg->flags);
- sess->chunk_size = sess->max_io_size + sess->max_hdr_size;
+ clt_path->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
+ clt_path->max_io_size = le32_to_cpu(msg->max_io_size);
+ clt_path->flags = le32_to_cpu(msg->flags);
+ clt_path->chunk_size = clt_path->max_io_size + clt_path->max_hdr_size;
/*
* Global IO size is always a minimum.
@@ -1866,20 +1878,20 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
* connections in parallel, use lock.
*/
mutex_lock(&clt->paths_mutex);
- clt->queue_depth = sess->queue_depth;
- clt->max_io_size = min_not_zero(sess->max_io_size,
+ clt->queue_depth = clt_path->queue_depth;
+ clt->max_io_size = min_not_zero(clt_path->max_io_size,
clt->max_io_size);
mutex_unlock(&clt->paths_mutex);
/*
* Cache the hca_port and hca_name for sysfs
*/
- sess->hca_port = con->c.cm_id->port_num;
- scnprintf(sess->hca_name, sizeof(sess->hca_name),
- sess->s.dev->ib_dev->name);
- sess->s.src_addr = con->c.cm_id->route.addr.src_addr;
+ clt_path->hca_port = con->c.cm_id->port_num;
+ scnprintf(clt_path->hca_name, sizeof(clt_path->hca_name),
+ clt_path->s.dev->ib_dev->name);
+ clt_path->s.src_addr = con->c.cm_id->route.addr.src_addr;
/* set for_new_clt, to allow future reconnect on any path */
- sess->for_new_clt = 1;
+ clt_path->for_new_clt = 1;
}
return 0;
@@ -1887,16 +1899,16 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
static inline void flag_success_on_conn(struct rtrs_clt_con *con)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
- atomic_inc(&sess->connected_cnt);
+ atomic_inc(&clt_path->connected_cnt);
con->cm_err = 1;
}
static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
struct rdma_cm_event *ev)
{
- struct rtrs_sess *s = con->c.sess;
+ struct rtrs_path *s = con->c.path;
const struct rtrs_msg_conn_rsp *msg;
const char *rej_msg;
int status, errno;
@@ -1924,23 +1936,23 @@ static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
return -ECONNRESET;
}
-void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait)
+void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait)
{
- if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSING, NULL))
- queue_work(rtrs_wq, &sess->close_work);
+ if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSING, NULL))
+ queue_work(rtrs_wq, &clt_path->close_work);
if (wait)
- flush_work(&sess->close_work);
+ flush_work(&clt_path->close_work);
}
static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err)
{
if (con->cm_err == 1) {
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = to_clt_sess(con->c.sess);
- if (atomic_dec_and_test(&sess->connected_cnt))
+ clt_path = to_clt_path(con->c.path);
+ if (atomic_dec_and_test(&clt_path->connected_cnt))
- wake_up(&sess->state_wq);
+ wake_up(&clt_path->state_wq);
}
con->cm_err = cm_err;
}
@@ -1949,8 +1961,8 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *ev)
{
struct rtrs_clt_con *con = cm_id->context;
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_clt_path *clt_path = to_clt_path(s);
int cm_err = 0;
switch (ev->event) {
@@ -1968,7 +1980,7 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
* i.e. wake up without state change, but we set cm_err.
*/
flag_success_on_conn(con);
- wake_up(&sess->state_wq);
+ wake_up(&clt_path->state_wq);
return 0;
}
break;
@@ -1997,7 +2009,7 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
/*
* Device removal is a special case. Queue close and return 0.
*/
- rtrs_clt_close_conns(sess, false);
+ rtrs_clt_close_conns(clt_path, false);
return 0;
default:
rtrs_err(s, "Unexpected RDMA CM error (CM event: %s, err: %d)\n",
@@ -2020,13 +2032,13 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
static int create_cm(struct rtrs_clt_con *con)
{
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_clt_path *clt_path = to_clt_path(s);
struct rdma_cm_id *cm_id;
int err;
cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con,
- sess->s.dst_addr.ss_family == AF_IB ?
+ clt_path->s.dst_addr.ss_family == AF_IB ?
RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id)) {
err = PTR_ERR(cm_id);
@@ -2042,8 +2054,8 @@ static int create_cm(struct rtrs_clt_con *con)
rtrs_err(s, "Set address reuse failed, err: %d\n", err);
goto destroy_cm;
}
- err = rdma_resolve_addr(cm_id, (struct sockaddr *)&sess->s.src_addr,
- (struct sockaddr *)&sess->s.dst_addr,
+ err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr,
+ (struct sockaddr *)&clt_path->s.dst_addr,
RTRS_CONNECT_TIMEOUT_MS);
if (err) {
rtrs_err(s, "Failed to resolve address, err: %d\n", err);
@@ -2055,8 +2067,8 @@ static int create_cm(struct rtrs_clt_con *con)
* or session state was really changed to error by device removal.
*/
err = wait_event_interruptible_timeout(
- sess->state_wq,
- con->cm_err || sess->state != RTRS_CLT_CONNECTING,
+ clt_path->state_wq,
+ con->cm_err || clt_path->state != RTRS_CLT_CONNECTING,
msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
if (err == 0 || err == -ERESTARTSYS) {
if (err == 0)
@@ -2068,7 +2080,7 @@ static int create_cm(struct rtrs_clt_con *con)
err = con->cm_err;
goto errr;
}
- if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTING) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) {
/* Device removal */
err = -ECONNABORTED;
goto errr;
@@ -2087,9 +2099,9 @@ destroy_cm:
return err;
}
-static void rtrs_clt_sess_up(struct rtrs_clt_sess *sess)
+static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path)
{
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_sess *clt = clt_path->clt;
int up;
/*
@@ -2113,19 +2125,19 @@ static void rtrs_clt_sess_up(struct rtrs_clt_sess *sess)
mutex_unlock(&clt->paths_ev_mutex);
/* Mark session as established */
- sess->established = true;
- sess->reconnect_attempts = 0;
- sess->stats->reconnects.successful_cnt++;
+ clt_path->established = true;
+ clt_path->reconnect_attempts = 0;
+ clt_path->stats->reconnects.successful_cnt++;
}
-static void rtrs_clt_sess_down(struct rtrs_clt_sess *sess)
+static void rtrs_clt_path_down(struct rtrs_clt_path *clt_path)
{
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_sess *clt = clt_path->clt;
- if (!sess->established)
+ if (!clt_path->established)
return;
- sess->established = false;
+ clt_path->established = false;
mutex_lock(&clt->paths_ev_mutex);
WARN_ON(!clt->paths_up);
if (--clt->paths_up == 0)
@@ -2133,19 +2145,19 @@ static void rtrs_clt_sess_down(struct rtrs_clt_sess *sess)
mutex_unlock(&clt->paths_ev_mutex);
}
-static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
+static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path)
{
struct rtrs_clt_con *con;
unsigned int cid;
- WARN_ON(READ_ONCE(sess->state) == RTRS_CLT_CONNECTED);
+ WARN_ON(READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED);
/*
* Possible race with rtrs_clt_open(), when DEVICE_REMOVAL comes
* exactly in between. Start destroying after it finishes.
*/
- mutex_lock(&sess->init_mutex);
- mutex_unlock(&sess->init_mutex);
+ mutex_lock(&clt_path->init_mutex);
+ mutex_unlock(&clt_path->init_mutex);
/*
* All IO paths must observe !CONNECTED state before we
@@ -2153,7 +2165,7 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
*/
synchronize_rcu();
- rtrs_stop_hb(&sess->s);
+ rtrs_stop_hb(&clt_path->s);
/*
* The order it utterly crucial: firstly disconnect and complete all
@@ -2162,15 +2174,15 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
* eventually notify upper layer about session disconnection.
*/
- for (cid = 0; cid < sess->s.con_num; cid++) {
- if (!sess->s.con[cid])
+ for (cid = 0; cid < clt_path->s.con_num; cid++) {
+ if (!clt_path->s.con[cid])
break;
- con = to_clt_con(sess->s.con[cid]);
+ con = to_clt_con(clt_path->s.con[cid]);
stop_cm(con);
}
- fail_all_outstanding_reqs(sess);
- free_sess_reqs(sess);
- rtrs_clt_sess_down(sess);
+ fail_all_outstanding_reqs(clt_path);
+ free_path_reqs(clt_path);
+ rtrs_clt_path_down(clt_path);
/*
* Wait for graceful shutdown, namely when peer side invokes
@@ -2180,13 +2192,14 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
* since CM does not fire anything. That is fine, we are not in
* hurry.
*/
- wait_event_timeout(sess->state_wq, !atomic_read(&sess->connected_cnt),
+ wait_event_timeout(clt_path->state_wq,
+ !atomic_read(&clt_path->connected_cnt),
msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
- for (cid = 0; cid < sess->s.con_num; cid++) {
- if (!sess->s.con[cid])
+ for (cid = 0; cid < clt_path->s.con_num; cid++) {
+ if (!clt_path->s.con[cid])
break;
- con = to_clt_con(sess->s.con[cid]);
+ con = to_clt_con(clt_path->s.con[cid]);
mutex_lock(&con->con_mutex);
destroy_con_cq_qp(con);
mutex_unlock(&con->con_mutex);
@@ -2195,26 +2208,26 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
}
}
-static inline bool xchg_sessions(struct rtrs_clt_sess __rcu **rcu_ppcpu_path,
- struct rtrs_clt_sess *sess,
- struct rtrs_clt_sess *next)
+static inline bool xchg_paths(struct rtrs_clt_path __rcu **rcu_ppcpu_path,
+ struct rtrs_clt_path *clt_path,
+ struct rtrs_clt_path *next)
{
- struct rtrs_clt_sess **ppcpu_path;
+ struct rtrs_clt_path **ppcpu_path;
/* Call cmpxchg() without sparse warnings */
ppcpu_path = (typeof(ppcpu_path))rcu_ppcpu_path;
- return sess == cmpxchg(ppcpu_path, sess, next);
+ return clt_path == cmpxchg(ppcpu_path, clt_path, next);
}
-static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
+static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path)
{
- struct rtrs_clt *clt = sess->clt;
- struct rtrs_clt_sess *next;
+ struct rtrs_clt_sess *clt = clt_path->clt;
+ struct rtrs_clt_path *next;
bool wait_for_grace = false;
int cpu;
mutex_lock(&clt->paths_mutex);
- list_del_rcu(&sess->s.entry);
+ list_del_rcu(&clt_path->s.entry);
/* Make sure everybody observes path removal. */
synchronize_rcu();
@@ -2255,7 +2268,7 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
* removed. If @sess is the last element, then @next is NULL.
*/
rcu_read_lock();
- next = list_next_or_null_rr_rcu(&clt->paths_list, &sess->s.entry,
+ next = list_next_or_null_rr_rcu(&clt->paths_list, &clt_path->s.entry,
typeof(*next), s.entry);
rcu_read_unlock();
@@ -2264,11 +2277,11 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
* removed, so change the pointer manually.
*/
for_each_possible_cpu(cpu) {
- struct rtrs_clt_sess __rcu **ppcpu_path;
+ struct rtrs_clt_path __rcu **ppcpu_path;
ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu);
if (rcu_dereference_protected(*ppcpu_path,
- lockdep_is_held(&clt->paths_mutex)) != sess)
+ lockdep_is_held(&clt->paths_mutex)) != clt_path)
/*
* synchronize_rcu() was called just after deleting
* entry from the list, thus IO code path cannot
@@ -2281,7 +2294,7 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
* We race with IO code path, which also changes pointer,
* thus we have to be careful not to overwrite it.
*/
- if (xchg_sessions(ppcpu_path, sess, next))
+ if (xchg_paths(ppcpu_path, clt_path, next))
/*
* @ppcpu_path was successfully replaced with @next,
* that means that someone could also pick up the
@@ -2296,29 +2309,29 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
mutex_unlock(&clt->paths_mutex);
}
-static void rtrs_clt_add_path_to_arr(struct rtrs_clt_sess *sess)
+static void rtrs_clt_add_path_to_arr(struct rtrs_clt_path *clt_path)
{
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_sess *clt = clt_path->clt;
mutex_lock(&clt->paths_mutex);
clt->paths_num++;
- list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
+ list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list);
mutex_unlock(&clt->paths_mutex);
}
static void rtrs_clt_close_work(struct work_struct *work)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = container_of(work, struct rtrs_clt_sess, close_work);
+ clt_path = container_of(work, struct rtrs_clt_path, close_work);
- cancel_delayed_work_sync(&sess->reconnect_dwork);
- rtrs_clt_stop_and_destroy_conns(sess);
- rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSED, NULL);
+ cancel_delayed_work_sync(&clt_path->reconnect_dwork);
+ rtrs_clt_stop_and_destroy_conns(clt_path);
+ rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSED, NULL);
}
-static int init_conns(struct rtrs_clt_sess *sess)
+static int init_conns(struct rtrs_clt_path *clt_path)
{
unsigned int cid;
int err;
@@ -2328,31 +2341,31 @@ static int init_conns(struct rtrs_clt_sess *sess)
* to avoid clashes with previous sessions not yet closed
* sessions on a server side.
*/
- sess->s.recon_cnt++;
+ clt_path->s.recon_cnt++;
/* Establish all RDMA connections */
- for (cid = 0; cid < sess->s.con_num; cid++) {
- err = create_con(sess, cid);
+ for (cid = 0; cid < clt_path->s.con_num; cid++) {
+ err = create_con(clt_path, cid);
if (err)
goto destroy;
- err = create_cm(to_clt_con(sess->s.con[cid]));
+ err = create_cm(to_clt_con(clt_path->s.con[cid]));
if (err) {
- destroy_con(to_clt_con(sess->s.con[cid]));
+ destroy_con(to_clt_con(clt_path->s.con[cid]));
goto destroy;
}
}
- err = alloc_sess_reqs(sess);
+ err = alloc_path_reqs(clt_path);
if (err)
goto destroy;
- rtrs_start_hb(&sess->s);
+ rtrs_start_hb(&clt_path->s);
return 0;
destroy:
while (cid--) {
- struct rtrs_clt_con *con = to_clt_con(sess->s.con[cid]);
+ struct rtrs_clt_con *con = to_clt_con(clt_path->s.con[cid]);
stop_cm(con);
@@ -2367,7 +2380,7 @@ destroy:
* doing rdma_resolve_addr(), switch to CONNECTION_ERR state
* manually to keep reconnecting.
*/
- rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
+ rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL);
return err;
}
@@ -2375,31 +2388,32 @@ destroy:
static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
struct rtrs_iu *iu;
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
- rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1);
if (wc->status != IB_WC_SUCCESS) {
- rtrs_err(sess->clt, "Sess info request send failed: %s\n",
+ rtrs_err(clt_path->clt, "Path info request send failed: %s\n",
ib_wc_status_msg(wc->status));
- rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
+ rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL);
return;
}
rtrs_clt_update_wc_stats(con);
}
-static int process_info_rsp(struct rtrs_clt_sess *sess,
+static int process_info_rsp(struct rtrs_clt_path *clt_path,
const struct rtrs_msg_info_rsp *msg)
{
unsigned int sg_cnt, total_len;
int i, sgi;
sg_cnt = le16_to_cpu(msg->sg_cnt);
- if (!sg_cnt || (sess->queue_depth % sg_cnt)) {
- rtrs_err(sess->clt, "Incorrect sg_cnt %d, is not multiple\n",
+ if (!sg_cnt || (clt_path->queue_depth % sg_cnt)) {
+ rtrs_err(clt_path->clt,
+ "Incorrect sg_cnt %d, is not multiple\n",
sg_cnt);
return -EINVAL;
}
@@ -2408,15 +2422,15 @@ static int process_info_rsp(struct rtrs_clt_sess *sess,
* Check if IB immediate data size is enough to hold the mem_id and
* the offset inside the memory chunk.
*/
- if ((ilog2(sg_cnt - 1) + 1) + (ilog2(sess->chunk_size - 1) + 1) >
+ if ((ilog2(sg_cnt - 1) + 1) + (ilog2(clt_path->chunk_size - 1) + 1) >
MAX_IMM_PAYL_BITS) {
- rtrs_err(sess->clt,
+ rtrs_err(clt_path->clt,
"RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n",
- MAX_IMM_PAYL_BITS, sg_cnt, sess->chunk_size);
+ MAX_IMM_PAYL_BITS, sg_cnt, clt_path->chunk_size);
return -EINVAL;
}
total_len = 0;
- for (sgi = 0, i = 0; sgi < sg_cnt && i < sess->queue_depth; sgi++) {
+ for (sgi = 0, i = 0; sgi < sg_cnt && i < clt_path->queue_depth; sgi++) {
const struct rtrs_sg_desc *desc = &msg->desc[sgi];
u32 len, rkey;
u64 addr;
@@ -2427,26 +2441,28 @@ static int process_info_rsp(struct rtrs_clt_sess *sess,
total_len += len;
- if (!len || (len % sess->chunk_size)) {
- rtrs_err(sess->clt, "Incorrect [%d].len %d\n", sgi,
+ if (!len || (len % clt_path->chunk_size)) {
+ rtrs_err(clt_path->clt, "Incorrect [%d].len %d\n",
+ sgi,
len);
return -EINVAL;
}
- for ( ; len && i < sess->queue_depth; i++) {
- sess->rbufs[i].addr = addr;
- sess->rbufs[i].rkey = rkey;
+ for ( ; len && i < clt_path->queue_depth; i++) {
+ clt_path->rbufs[i].addr = addr;
+ clt_path->rbufs[i].rkey = rkey;
- len -= sess->chunk_size;
- addr += sess->chunk_size;
+ len -= clt_path->chunk_size;
+ addr += clt_path->chunk_size;
}
}
/* Sanity check */
- if (sgi != sg_cnt || i != sess->queue_depth) {
- rtrs_err(sess->clt, "Incorrect sg vector, not fully mapped\n");
+ if (sgi != sg_cnt || i != clt_path->queue_depth) {
+ rtrs_err(clt_path->clt,
+ "Incorrect sg vector, not fully mapped\n");
return -EINVAL;
}
- if (total_len != sess->chunk_size * sess->queue_depth) {
- rtrs_err(sess->clt, "Incorrect total_len %d\n", total_len);
+ if (total_len != clt_path->chunk_size * clt_path->queue_depth) {
+ rtrs_err(clt_path->clt, "Incorrect total_len %d\n", total_len);
return -EINVAL;
}
@@ -2456,7 +2472,7 @@ static int process_info_rsp(struct rtrs_clt_sess *sess,
static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
struct rtrs_msg_info_rsp *msg;
enum rtrs_clt_state state;
struct rtrs_iu *iu;
@@ -2468,37 +2484,37 @@ static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
WARN_ON(con->c.cid);
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
if (wc->status != IB_WC_SUCCESS) {
- rtrs_err(sess->clt, "Sess info response recv failed: %s\n",
+ rtrs_err(clt_path->clt, "Path info response recv failed: %s\n",
ib_wc_status_msg(wc->status));
goto out;
}
WARN_ON(wc->opcode != IB_WC_RECV);
if (wc->byte_len < sizeof(*msg)) {
- rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
+ rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n",
wc->byte_len);
goto out;
}
- ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
+ ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr,
iu->size, DMA_FROM_DEVICE);
msg = iu->buf;
if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP) {
- rtrs_err(sess->clt, "Sess info response is malformed: type %d\n",
+ rtrs_err(clt_path->clt, "Path info response is malformed: type %d\n",
le16_to_cpu(msg->type));
goto out;
}
rx_sz = sizeof(*msg);
rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt);
if (wc->byte_len < rx_sz) {
- rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
+ rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n",
wc->byte_len);
goto out;
}
- err = process_info_rsp(sess, msg);
+ err = process_info_rsp(clt_path, msg);
if (err)
goto out;
- err = post_recv_sess(sess);
+ err = post_recv_path(clt_path);
if (err)
goto out;
@@ -2506,25 +2522,25 @@ static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
out:
rtrs_clt_update_wc_stats(con);
- rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
- rtrs_clt_change_state_get_old(sess, state, NULL);
+ rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1);
+ rtrs_clt_change_state_get_old(clt_path, state, NULL);
}
-static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
+static int rtrs_send_path_info(struct rtrs_clt_path *clt_path)
{
- struct rtrs_clt_con *usr_con = to_clt_con(sess->s.con[0]);
+ struct rtrs_clt_con *usr_con = to_clt_con(clt_path->s.con[0]);
struct rtrs_msg_info_req *msg;
struct rtrs_iu *tx_iu, *rx_iu;
size_t rx_sz;
int err;
rx_sz = sizeof(struct rtrs_msg_info_rsp);
- rx_sz += sizeof(struct rtrs_sg_desc) * sess->queue_depth;
+ rx_sz += sizeof(struct rtrs_sg_desc) * clt_path->queue_depth;
tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL,
- sess->s.dev->ib_dev, DMA_TO_DEVICE,
+ clt_path->s.dev->ib_dev, DMA_TO_DEVICE,
rtrs_clt_info_req_done);
- rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, sess->s.dev->ib_dev,
+ rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, clt_path->s.dev->ib_dev,
DMA_FROM_DEVICE, rtrs_clt_info_rsp_done);
if (!tx_iu || !rx_iu) {
err = -ENOMEM;
@@ -2533,33 +2549,34 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
/* Prepare for getting info response */
err = rtrs_iu_post_recv(&usr_con->c, rx_iu);
if (err) {
- rtrs_err(sess->clt, "rtrs_iu_post_recv(), err: %d\n", err);
+ rtrs_err(clt_path->clt, "rtrs_iu_post_recv(), err: %d\n", err);
goto out;
}
rx_iu = NULL;
msg = tx_iu->buf;
msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ);
- memcpy(msg->sessname, sess->s.sessname, sizeof(msg->sessname));
+ memcpy(msg->pathname, clt_path->s.sessname, sizeof(msg->pathname));
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr,
+ ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
+ tx_iu->dma_addr,
tx_iu->size, DMA_TO_DEVICE);
/* Send info request */
err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL);
if (err) {
- rtrs_err(sess->clt, "rtrs_iu_post_send(), err: %d\n", err);
+ rtrs_err(clt_path->clt, "rtrs_iu_post_send(), err: %d\n", err);
goto out;
}
tx_iu = NULL;
/* Wait for state change */
- wait_event_interruptible_timeout(sess->state_wq,
- sess->state != RTRS_CLT_CONNECTING,
+ wait_event_interruptible_timeout(clt_path->state_wq,
+ clt_path->state != RTRS_CLT_CONNECTING,
msecs_to_jiffies(
RTRS_CONNECT_TIMEOUT_MS));
- if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) {
- if (READ_ONCE(sess->state) == RTRS_CLT_CONNECTING_ERR)
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) {
+ if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTING_ERR)
err = -ECONNRESET;
else
err = -ETIMEDOUT;
@@ -2567,82 +2584,82 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
out:
if (tx_iu)
- rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(tx_iu, clt_path->s.dev->ib_dev, 1);
if (rx_iu)
- rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(rx_iu, clt_path->s.dev->ib_dev, 1);
if (err)
/* If we've never taken async path because of malloc problems */
- rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
+ rtrs_clt_change_state_get_old(clt_path,
+ RTRS_CLT_CONNECTING_ERR, NULL);
return err;
}
/**
- * init_sess() - establishes all session connections and does handshake
- * @sess: client session.
+ * init_path() - establishes all path connections and does handshake
+ * @clt_path: client path.
* In case of error full close or reconnect procedure should be taken,
* because reconnect or close async works can be started.
*/
-static int init_sess(struct rtrs_clt_sess *sess)
+static int init_path(struct rtrs_clt_path *clt_path)
{
int err;
char str[NAME_MAX];
struct rtrs_addr path = {
- .src = &sess->s.src_addr,
- .dst = &sess->s.dst_addr,
+ .src = &clt_path->s.src_addr,
+ .dst = &clt_path->s.dst_addr,
};
rtrs_addr_to_str(&path, str, sizeof(str));
- mutex_lock(&sess->init_mutex);
- err = init_conns(sess);
+ mutex_lock(&clt_path->init_mutex);
+ err = init_conns(clt_path);
if (err) {
- rtrs_err(sess->clt,
+ rtrs_err(clt_path->clt,
"init_conns() failed: err=%d path=%s [%s:%u]\n", err,
- str, sess->hca_name, sess->hca_port);
+ str, clt_path->hca_name, clt_path->hca_port);
goto out;
}
- err = rtrs_send_sess_info(sess);
+ err = rtrs_send_path_info(clt_path);
if (err) {
- rtrs_err(
- sess->clt,
- "rtrs_send_sess_info() failed: err=%d path=%s [%s:%u]\n",
- err, str, sess->hca_name, sess->hca_port);
+ rtrs_err(clt_path->clt,
+ "rtrs_send_path_info() failed: err=%d path=%s [%s:%u]\n",
+ err, str, clt_path->hca_name, clt_path->hca_port);
goto out;
}
- rtrs_clt_sess_up(sess);
+ rtrs_clt_path_up(clt_path);
out:
- mutex_unlock(&sess->init_mutex);
+ mutex_unlock(&clt_path->init_mutex);
return err;
}
static void rtrs_clt_reconnect_work(struct work_struct *work)
{
- struct rtrs_clt_sess *sess;
- struct rtrs_clt *clt;
+ struct rtrs_clt_path *clt_path;
+ struct rtrs_clt_sess *clt;
unsigned int delay_ms;
int err;
- sess = container_of(to_delayed_work(work), struct rtrs_clt_sess,
- reconnect_dwork);
- clt = sess->clt;
+ clt_path = container_of(to_delayed_work(work), struct rtrs_clt_path,
+ reconnect_dwork);
+ clt = clt_path->clt;
- if (READ_ONCE(sess->state) != RTRS_CLT_RECONNECTING)
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_RECONNECTING)
return;
- if (sess->reconnect_attempts >= clt->max_reconnect_attempts) {
- /* Close a session completely if max attempts is reached */
- rtrs_clt_close_conns(sess, false);
+ if (clt_path->reconnect_attempts >= clt->max_reconnect_attempts) {
+ /* Close a path completely if max attempts is reached */
+ rtrs_clt_close_conns(clt_path, false);
return;
}
- sess->reconnect_attempts++;
+ clt_path->reconnect_attempts++;
/* Stop everything */
- rtrs_clt_stop_and_destroy_conns(sess);
+ rtrs_clt_stop_and_destroy_conns(clt_path);
msleep(RTRS_RECONNECT_BACKOFF);
- if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING, NULL)) {
- err = init_sess(sess);
+ if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING, NULL)) {
+ err = init_path(clt_path);
if (err)
goto reconnect_again;
}
@@ -2650,10 +2667,10 @@ static void rtrs_clt_reconnect_work(struct work_struct *work)
return;
reconnect_again:
- if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING, NULL)) {
- sess->stats->reconnects.fail_cnt++;
+ if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_RECONNECTING, NULL)) {
+ clt_path->stats->reconnects.fail_cnt++;
delay_ms = clt->reconnect_delay_sec * 1000;
- queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
+ queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
msecs_to_jiffies(delay_ms +
prandom_u32() %
RTRS_RECONNECT_SEED));
@@ -2662,19 +2679,20 @@ reconnect_again:
static void rtrs_clt_dev_release(struct device *dev)
{
- struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
+ struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
+ dev);
kfree(clt);
}
-static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
+static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num,
u16 port, size_t pdu_sz, void *priv,
void (*link_ev)(void *priv,
enum rtrs_clt_link_ev ev),
unsigned int reconnect_delay_sec,
unsigned int max_reconnect_attempts)
{
- struct rtrs_clt *clt;
+ struct rtrs_clt_sess *clt;
int err;
if (!paths_num || paths_num > MAX_PATHS_NUM)
@@ -2749,7 +2767,7 @@ err:
return ERR_PTR(err);
}
-static void free_clt(struct rtrs_clt *clt)
+static void free_clt(struct rtrs_clt_sess *clt)
{
free_permits(clt);
free_percpu(clt->pcpu_path);
@@ -2760,7 +2778,7 @@ static void free_clt(struct rtrs_clt *clt)
}
/**
- * rtrs_clt_open() - Open a session to an RTRS server
+ * rtrs_clt_open() - Open a path to an RTRS server
* @ops: holds the link event callback and the private pointer.
* @sessname: name of the session
* @paths: Paths to be established defined by their src and dst addresses
@@ -2777,24 +2795,24 @@ static void free_clt(struct rtrs_clt *clt)
*
* Return a valid pointer on success otherwise PTR_ERR.
*/
-struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
- const char *sessname,
+struct rtrs_clt_sess *rtrs_clt_open(struct rtrs_clt_ops *ops,
+ const char *pathname,
const struct rtrs_addr *paths,
size_t paths_num, u16 port,
size_t pdu_sz, u8 reconnect_delay_sec,
s16 max_reconnect_attempts, u32 nr_poll_queues)
{
- struct rtrs_clt_sess *sess, *tmp;
- struct rtrs_clt *clt;
+ struct rtrs_clt_path *clt_path, *tmp;
+ struct rtrs_clt_sess *clt;
int err, i;
- if (strchr(sessname, '/') || strchr(sessname, '.')) {
- pr_err("sessname cannot contain / and .\n");
+ if (strchr(pathname, '/') || strchr(pathname, '.')) {
+ pr_err("pathname cannot contain / and .\n");
err = -EINVAL;
goto out;
}
- clt = alloc_clt(sessname, paths_num, port, pdu_sz, ops->priv,
+ clt = alloc_clt(pathname, paths_num, port, pdu_sz, ops->priv,
ops->link_ev,
reconnect_delay_sec,
max_reconnect_attempts);
@@ -2803,49 +2821,49 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
goto out;
}
for (i = 0; i < paths_num; i++) {
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = alloc_sess(clt, &paths[i], nr_cpu_ids,
+ clt_path = alloc_path(clt, &paths[i], nr_cpu_ids,
nr_poll_queues);
- if (IS_ERR(sess)) {
- err = PTR_ERR(sess);
- goto close_all_sess;
+ if (IS_ERR(clt_path)) {
+ err = PTR_ERR(clt_path);
+ goto close_all_path;
}
if (!i)
- sess->for_new_clt = 1;
- list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
+ clt_path->for_new_clt = 1;
+ list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list);
- err = init_sess(sess);
+ err = init_path(clt_path);
if (err) {
- list_del_rcu(&sess->s.entry);
- rtrs_clt_close_conns(sess, true);
- free_percpu(sess->stats->pcpu_stats);
- kfree(sess->stats);
- free_sess(sess);
- goto close_all_sess;
+ list_del_rcu(&clt_path->s.entry);
+ rtrs_clt_close_conns(clt_path, true);
+ free_percpu(clt_path->stats->pcpu_stats);
+ kfree(clt_path->stats);
+ free_path(clt_path);
+ goto close_all_path;
}
- err = rtrs_clt_create_sess_files(sess);
+ err = rtrs_clt_create_path_files(clt_path);
if (err) {
- list_del_rcu(&sess->s.entry);
- rtrs_clt_close_conns(sess, true);
- free_percpu(sess->stats->pcpu_stats);
- kfree(sess->stats);
- free_sess(sess);
- goto close_all_sess;
+ list_del_rcu(&clt_path->s.entry);
+ rtrs_clt_close_conns(clt_path, true);
+ free_percpu(clt_path->stats->pcpu_stats);
+ kfree(clt_path->stats);
+ free_path(clt_path);
+ goto close_all_path;
}
}
err = alloc_permits(clt);
if (err)
- goto close_all_sess;
+ goto close_all_path;
return clt;
-close_all_sess:
- list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
- rtrs_clt_destroy_sess_files(sess, NULL);
- rtrs_clt_close_conns(sess, true);
- kobject_put(&sess->kobj);
+close_all_path:
+ list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) {
+ rtrs_clt_destroy_path_files(clt_path, NULL);
+ rtrs_clt_close_conns(clt_path, true);
+ kobject_put(&clt_path->kobj);
}
rtrs_clt_destroy_sysfs_root(clt);
free_clt(clt);
@@ -2856,37 +2874,38 @@ out:
EXPORT_SYMBOL(rtrs_clt_open);
/**
- * rtrs_clt_close() - Close a session
+ * rtrs_clt_close() - Close a path
* @clt: Session handle. Session is freed upon return.
*/
-void rtrs_clt_close(struct rtrs_clt *clt)
+void rtrs_clt_close(struct rtrs_clt_sess *clt)
{
- struct rtrs_clt_sess *sess, *tmp;
+ struct rtrs_clt_path *clt_path, *tmp;
/* Firstly forbid sysfs access */
rtrs_clt_destroy_sysfs_root(clt);
/* Now it is safe to iterate over all paths without locks */
- list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
- rtrs_clt_close_conns(sess, true);
- rtrs_clt_destroy_sess_files(sess, NULL);
- kobject_put(&sess->kobj);
+ list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) {
+ rtrs_clt_close_conns(clt_path, true);
+ rtrs_clt_destroy_path_files(clt_path, NULL);
+ kobject_put(&clt_path->kobj);
}
free_clt(clt);
}
EXPORT_SYMBOL(rtrs_clt_close);
-int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess)
+int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *clt_path)
{
enum rtrs_clt_state old_state;
int err = -EBUSY;
bool changed;
- changed = rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING,
+ changed = rtrs_clt_change_state_get_old(clt_path,
+ RTRS_CLT_RECONNECTING,
&old_state);
if (changed) {
- sess->reconnect_attempts = 0;
- queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, 0);
+ clt_path->reconnect_attempts = 0;
+ queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 0);
}
if (changed || old_state == RTRS_CLT_RECONNECTING) {
/*
@@ -2894,15 +2913,15 @@ int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess)
* execution, so do the flush if we have queued something
* right now or work is pending.
*/
- flush_delayed_work(&sess->reconnect_dwork);
- err = (READ_ONCE(sess->state) ==
+ flush_delayed_work(&clt_path->reconnect_dwork);
+ err = (READ_ONCE(clt_path->state) ==
RTRS_CLT_CONNECTED ? 0 : -ENOTCONN);
}
return err;
}
-int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
+int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *clt_path,
const struct attribute *sysfs_self)
{
enum rtrs_clt_state old_state;
@@ -2918,27 +2937,27 @@ int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
* removing the path.
*/
do {
- rtrs_clt_close_conns(sess, true);
- changed = rtrs_clt_change_state_get_old(sess,
+ rtrs_clt_close_conns(clt_path, true);
+ changed = rtrs_clt_change_state_get_old(clt_path,
RTRS_CLT_DEAD,
&old_state);
} while (!changed && old_state != RTRS_CLT_DEAD);
if (changed) {
- rtrs_clt_remove_path_from_arr(sess);
- rtrs_clt_destroy_sess_files(sess, sysfs_self);
- kobject_put(&sess->kobj);
+ rtrs_clt_remove_path_from_arr(clt_path);
+ rtrs_clt_destroy_path_files(clt_path, sysfs_self);
+ kobject_put(&clt_path->kobj);
}
return 0;
}
-void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value)
+void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value)
{
clt->max_reconnect_attempts = (unsigned int)value;
}
-int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt)
+int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt)
{
return (int)clt->max_reconnect_attempts;
}
@@ -2968,12 +2987,12 @@ int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt)
* On dir=WRITE rtrs client will rdma write data in sg to server side.
*/
int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
- struct rtrs_clt *clt, struct rtrs_permit *permit,
- const struct kvec *vec, size_t nr, size_t data_len,
- struct scatterlist *sg, unsigned int sg_cnt)
+ struct rtrs_clt_sess *clt, struct rtrs_permit *permit,
+ const struct kvec *vec, size_t nr, size_t data_len,
+ struct scatterlist *sg, unsigned int sg_cnt)
{
struct rtrs_clt_io_req *req;
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
enum dma_data_direction dma_dir;
int err = -ECONNABORTED, i;
@@ -2995,19 +3014,19 @@ int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
rcu_read_lock();
for (path_it_init(&it, clt);
- (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
- if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)
+ (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
continue;
- if (usr_len + hdr_len > sess->max_hdr_size) {
- rtrs_wrn_rl(sess->clt,
+ if (usr_len + hdr_len > clt_path->max_hdr_size) {
+ rtrs_wrn_rl(clt_path->clt,
"%s request failed, user message size is %zu and header length %zu, but max size is %u\n",
dir == READ ? "Read" : "Write",
- usr_len, hdr_len, sess->max_hdr_size);
+ usr_len, hdr_len, clt_path->max_hdr_size);
err = -EMSGSIZE;
break;
}
- req = rtrs_clt_get_req(sess, ops->conf_fn, permit, ops->priv,
+ req = rtrs_clt_get_req(clt_path, ops->conf_fn, permit, ops->priv,
vec, usr_len, sg, sg_cnt, data_len,
dma_dir);
if (dir == READ)
@@ -3028,21 +3047,21 @@ int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
}
EXPORT_SYMBOL(rtrs_clt_request);
-int rtrs_clt_rdma_cq_direct(struct rtrs_clt *clt, unsigned int index)
+int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index)
{
/* If no path, return -1 for block layer not to try again */
int cnt = -1;
struct rtrs_con *con;
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
struct path_it it;
rcu_read_lock();
for (path_it_init(&it, clt);
- (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
- if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)
+ (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
continue;
- con = sess->s.con[index + 1];
+ con = clt_path->s.con[index + 1];
cnt = ib_process_cq_direct(con->cq, -1);
if (cnt)
break;
@@ -3062,7 +3081,7 @@ EXPORT_SYMBOL(rtrs_clt_rdma_cq_direct);
* 0 on success
* -ECOMM no connection to the server
*/
-int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr)
+int rtrs_clt_query(struct rtrs_clt_sess *clt, struct rtrs_attrs *attr)
{
if (!rtrs_clt_is_connected(clt))
return -ECOMM;
@@ -3077,15 +3096,15 @@ int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr)
}
EXPORT_SYMBOL(rtrs_clt_query);
-int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
+int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt,
struct rtrs_addr *addr)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
int err;
- sess = alloc_sess(clt, addr, nr_cpu_ids, 0);
- if (IS_ERR(sess))
- return PTR_ERR(sess);
+ clt_path = alloc_path(clt, addr, nr_cpu_ids, 0);
+ if (IS_ERR(clt_path))
+ return PTR_ERR(clt_path);
mutex_lock(&clt->paths_mutex);
if (clt->paths_num == 0) {
@@ -3094,7 +3113,7 @@ int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
* the addition of the first path is like a new session for
* the storage server
*/
- sess->for_new_clt = 1;
+ clt_path->for_new_clt = 1;
}
mutex_unlock(&clt->paths_mutex);
@@ -3104,24 +3123,24 @@ int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
* IO will never grab it. Also it is very important to add
* path before init, since init fires LINK_CONNECTED event.
*/
- rtrs_clt_add_path_to_arr(sess);
+ rtrs_clt_add_path_to_arr(clt_path);
- err = init_sess(sess);
+ err = init_path(clt_path);
if (err)
- goto close_sess;
+ goto close_path;
- err = rtrs_clt_create_sess_files(sess);
+ err = rtrs_clt_create_path_files(clt_path);
if (err)
- goto close_sess;
+ goto close_path;
return 0;
-close_sess:
- rtrs_clt_remove_path_from_arr(sess);
- rtrs_clt_close_conns(sess, true);
- free_percpu(sess->stats->pcpu_stats);
- kfree(sess->stats);
- free_sess(sess);
+close_path:
+ rtrs_clt_remove_path_from_arr(clt_path);
+ rtrs_clt_close_conns(clt_path, true);
+ free_percpu(clt_path->stats->pcpu_stats);
+ kfree(clt_path->stats);
+ free_path(clt_path);
return err;
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
index 9afffccff973..d1b18a154ae0 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
@@ -124,9 +124,9 @@ struct rtrs_rbuf {
u32 rkey;
};
-struct rtrs_clt_sess {
- struct rtrs_sess s;
- struct rtrs_clt *clt;
+struct rtrs_clt_path {
+ struct rtrs_path s;
+ struct rtrs_clt_sess *clt;
wait_queue_head_t state_wq;
enum rtrs_clt_state state;
atomic_t connected_cnt;
@@ -153,10 +153,10 @@ struct rtrs_clt_sess {
*mp_skip_entry;
};
-struct rtrs_clt {
+struct rtrs_clt_sess {
struct list_head paths_list; /* rcu protected list */
size_t paths_num;
- struct rtrs_clt_sess
+ struct rtrs_clt_path
__rcu * __percpu *pcpu_path;
uuid_t paths_uuid;
int paths_up;
@@ -186,31 +186,32 @@ static inline struct rtrs_clt_con *to_clt_con(struct rtrs_con *c)
return container_of(c, struct rtrs_clt_con, c);
}
-static inline struct rtrs_clt_sess *to_clt_sess(struct rtrs_sess *s)
+static inline struct rtrs_clt_path *to_clt_path(struct rtrs_path *s)
{
- return container_of(s, struct rtrs_clt_sess, s);
+ return container_of(s, struct rtrs_clt_path, s);
}
-static inline int permit_size(struct rtrs_clt *clt)
+static inline int permit_size(struct rtrs_clt_sess *clt)
{
return sizeof(struct rtrs_permit) + clt->pdu_sz;
}
-static inline struct rtrs_permit *get_permit(struct rtrs_clt *clt, int idx)
+static inline struct rtrs_permit *get_permit(struct rtrs_clt_sess *clt,
+ int idx)
{
return (struct rtrs_permit *)(clt->permits + permit_size(clt) * idx);
}
-int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess);
-void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait);
-int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
+int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *path);
+void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait);
+int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt,
struct rtrs_addr *addr);
-int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
+int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *path,
const struct attribute *sysfs_self);
-void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value);
-int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt);
-void free_sess(struct rtrs_clt_sess *sess);
+void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value);
+int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt);
+void free_path(struct rtrs_clt_path *clt_path);
/* rtrs-clt-stats.c */
@@ -239,11 +240,11 @@ ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *stats,
/* rtrs-clt-sysfs.c */
-int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt);
-void rtrs_clt_destroy_sysfs_root(struct rtrs_clt *clt);
+int rtrs_clt_create_sysfs_root_files(struct rtrs_clt_sess *clt);
+void rtrs_clt_destroy_sysfs_root(struct rtrs_clt_sess *clt);
-int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess);
-void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess,
+int rtrs_clt_create_path_files(struct rtrs_clt_path *clt_path);
+void rtrs_clt_destroy_path_files(struct rtrs_clt_path *clt_path,
const struct attribute *sysfs_self);
#endif /* RTRS_CLT_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index 78eac9a4f703..9a1e5c2ae55c 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -90,7 +90,7 @@ struct rtrs_ib_dev {
};
struct rtrs_con {
- struct rtrs_sess *sess;
+ struct rtrs_path *path;
struct ib_qp *qp;
struct ib_cq *cq;
struct rdma_cm_id *cm_id;
@@ -100,7 +100,7 @@ struct rtrs_con {
atomic_t sq_wr_avail;
};
-struct rtrs_sess {
+struct rtrs_path {
struct list_head entry;
struct sockaddr_storage dst_addr;
struct sockaddr_storage src_addr;
@@ -229,11 +229,11 @@ struct rtrs_msg_conn_rsp {
/**
* struct rtrs_msg_info_req
* @type: @RTRS_MSG_INFO_REQ
- * @sessname: Session name chosen by client
+ * @pathname: Path name chosen by client
*/
struct rtrs_msg_info_req {
__le16 type;
- u8 sessname[NAME_MAX];
+ u8 pathname[NAME_MAX];
u8 reserved[15];
};
@@ -313,19 +313,19 @@ int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe);
-int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
+int rtrs_cq_qp_create(struct rtrs_path *path, struct rtrs_con *con,
u32 max_send_sge, int cq_vector, int nr_cqe,
u32 max_send_wr, u32 max_recv_wr,
enum ib_poll_context poll_ctx);
void rtrs_cq_qp_destroy(struct rtrs_con *con);
-void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe,
+void rtrs_init_hb(struct rtrs_path *path, struct ib_cqe *cqe,
unsigned int interval_ms, unsigned int missed_max,
void (*err_handler)(struct rtrs_con *con),
struct workqueue_struct *wq);
-void rtrs_start_hb(struct rtrs_sess *sess);
-void rtrs_stop_hb(struct rtrs_sess *sess);
-void rtrs_send_hb_ack(struct rtrs_sess *sess);
+void rtrs_start_hb(struct rtrs_path *path);
+void rtrs_stop_hb(struct rtrs_path *path);
+void rtrs_send_hb_ack(struct rtrs_path *path);
void rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags,
struct rtrs_rdma_dev_pd *pool);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index 9c43ce5ba1c1..b94ae12c2795 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -15,10 +15,10 @@
static void rtrs_srv_release(struct kobject *kobj)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
- sess = container_of(kobj, struct rtrs_srv_sess, kobj);
- kfree(sess);
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
+ kfree(srv_path);
}
static struct kobj_type ktype = {
@@ -36,24 +36,25 @@ static ssize_t rtrs_srv_disconnect_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
- struct rtrs_srv_sess *sess;
- struct rtrs_sess *s;
+ struct rtrs_srv_path *srv_path;
+ struct rtrs_path *s;
char str[MAXHOSTNAMELEN];
- sess = container_of(kobj, struct rtrs_srv_sess, kobj);
- s = &sess->s;
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
+ s = &srv_path->s;
if (!sysfs_streq(buf, "1")) {
rtrs_err(s, "%s: invalid value: '%s'\n",
attr->attr.name, buf);
return -EINVAL;
}
- sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, str, sizeof(str));
+ sockaddr_to_str((struct sockaddr *)&srv_path->s.dst_addr, str,
+ sizeof(str));
rtrs_info(s, "disconnect for path %s requested\n", str);
/* first remove sysfs itself to avoid deadlock */
- sysfs_remove_file_self(&sess->kobj, &attr->attr);
- close_sess(sess);
+ sysfs_remove_file_self(&srv_path->kobj, &attr->attr);
+ close_path(srv_path);
return count;
}
@@ -66,11 +67,11 @@ static ssize_t rtrs_srv_hca_port_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
struct rtrs_con *usr_con;
- sess = container_of(kobj, typeof(*sess), kobj);
- usr_con = sess->s.con[0];
+ srv_path = container_of(kobj, typeof(*srv_path), kobj);
+ usr_con = srv_path->s.con[0];
return sysfs_emit(page, "%u\n", usr_con->cm_id->port_num);
}
@@ -82,11 +83,11 @@ static ssize_t rtrs_srv_hca_name_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
- sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
- return sysfs_emit(page, "%s\n", sess->s.dev->ib_dev->name);
+ return sysfs_emit(page, "%s\n", srv_path->s.dev->ib_dev->name);
}
static struct kobj_attribute rtrs_srv_hca_name_attr =
@@ -96,11 +97,11 @@ static ssize_t rtrs_srv_src_addr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
int cnt;
- sess = container_of(kobj, struct rtrs_srv_sess, kobj);
- cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
+ cnt = sockaddr_to_str((struct sockaddr *)&srv_path->s.dst_addr,
page, PAGE_SIZE);
return cnt + sysfs_emit_at(page, cnt, "\n");
}
@@ -112,11 +113,11 @@ static ssize_t rtrs_srv_dst_addr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
int len;
- sess = container_of(kobj, struct rtrs_srv_sess, kobj);
- len = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr, page,
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
+ len = sockaddr_to_str((struct sockaddr *)&srv_path->s.src_addr, page,
PAGE_SIZE);
len += sysfs_emit_at(page, len, "\n");
return len;
@@ -125,7 +126,7 @@ static ssize_t rtrs_srv_dst_addr_show(struct kobject *kobj,
static struct kobj_attribute rtrs_srv_dst_addr_attr =
__ATTR(dst_addr, 0444, rtrs_srv_dst_addr_show, NULL);
-static struct attribute *rtrs_srv_sess_attrs[] = {
+static struct attribute *rtrs_srv_path_attrs[] = {
&rtrs_srv_hca_name_attr.attr,
&rtrs_srv_hca_port_attr.attr,
&rtrs_srv_src_addr_attr.attr,
@@ -134,8 +135,8 @@ static struct attribute *rtrs_srv_sess_attrs[] = {
NULL,
};
-static const struct attribute_group rtrs_srv_sess_attr_group = {
- .attrs = rtrs_srv_sess_attrs,
+static const struct attribute_group rtrs_srv_path_attr_group = {
+ .attrs = rtrs_srv_path_attrs,
};
STAT_ATTR(struct rtrs_srv_stats, rdma,
@@ -151,9 +152,9 @@ static const struct attribute_group rtrs_srv_stats_attr_group = {
.attrs = rtrs_srv_stats_attrs,
};
-static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
+static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
int err = 0;
mutex_lock(&srv->paths_mutex);
@@ -164,7 +165,7 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
goto unlock;
}
srv->dev.class = rtrs_dev_class;
- err = dev_set_name(&srv->dev, "%s", sess->s.sessname);
+ err = dev_set_name(&srv->dev, "%s", srv_path->s.sessname);
if (err)
goto unlock;
@@ -196,9 +197,9 @@ unlock:
}
static void
-rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
+rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
mutex_lock(&srv->paths_mutex);
if (!--srv->dev_ref) {
@@ -213,7 +214,7 @@ rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
}
}
-static void rtrs_srv_sess_stats_release(struct kobject *kobj)
+static void rtrs_srv_path_stats_release(struct kobject *kobj)
{
struct rtrs_srv_stats *stats;
@@ -224,22 +225,22 @@ static void rtrs_srv_sess_stats_release(struct kobject *kobj)
static struct kobj_type ktype_stats = {
.sysfs_ops = &kobj_sysfs_ops,
- .release = rtrs_srv_sess_stats_release,
+ .release = rtrs_srv_path_stats_release,
};
-static int rtrs_srv_create_stats_files(struct rtrs_srv_sess *sess)
+static int rtrs_srv_create_stats_files(struct rtrs_srv_path *srv_path)
{
int err;
- struct rtrs_sess *s = &sess->s;
+ struct rtrs_path *s = &srv_path->s;
- err = kobject_init_and_add(&sess->stats->kobj_stats, &ktype_stats,
- &sess->kobj, "stats");
+ err = kobject_init_and_add(&srv_path->stats->kobj_stats, &ktype_stats,
+ &srv_path->kobj, "stats");
if (err) {
rtrs_err(s, "kobject_init_and_add(): %d\n", err);
- kobject_put(&sess->stats->kobj_stats);
+ kobject_put(&srv_path->stats->kobj_stats);
return err;
}
- err = sysfs_create_group(&sess->stats->kobj_stats,
+ err = sysfs_create_group(&srv_path->stats->kobj_stats,
&rtrs_srv_stats_attr_group);
if (err) {
rtrs_err(s, "sysfs_create_group(): %d\n", err);
@@ -249,64 +250,64 @@ static int rtrs_srv_create_stats_files(struct rtrs_srv_sess *sess)
return 0;
err:
- kobject_del(&sess->stats->kobj_stats);
- kobject_put(&sess->stats->kobj_stats);
+ kobject_del(&srv_path->stats->kobj_stats);
+ kobject_put(&srv_path->stats->kobj_stats);
return err;
}
-int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess)
+int rtrs_srv_create_path_files(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
- struct rtrs_sess *s = &sess->s;
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_path *s = &srv_path->s;
char str[NAME_MAX];
int err;
struct rtrs_addr path = {
- .src = &sess->s.dst_addr,
- .dst = &sess->s.src_addr,
+ .src = &srv_path->s.dst_addr,
+ .dst = &srv_path->s.src_addr,
};
rtrs_addr_to_str(&path, str, sizeof(str));
- err = rtrs_srv_create_once_sysfs_root_folders(sess);
+ err = rtrs_srv_create_once_sysfs_root_folders(srv_path);
if (err)
return err;
- err = kobject_init_and_add(&sess->kobj, &ktype, srv->kobj_paths,
+ err = kobject_init_and_add(&srv_path->kobj, &ktype, srv->kobj_paths,
"%s", str);
if (err) {
rtrs_err(s, "kobject_init_and_add(): %d\n", err);
goto destroy_root;
}
- err = sysfs_create_group(&sess->kobj, &rtrs_srv_sess_attr_group);
+ err = sysfs_create_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
if (err) {
rtrs_err(s, "sysfs_create_group(): %d\n", err);
goto put_kobj;
}
- err = rtrs_srv_create_stats_files(sess);
+ err = rtrs_srv_create_stats_files(srv_path);
if (err)
goto remove_group;
return 0;
remove_group:
- sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group);
+ sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
put_kobj:
- kobject_del(&sess->kobj);
+ kobject_del(&srv_path->kobj);
destroy_root:
- kobject_put(&sess->kobj);
- rtrs_srv_destroy_once_sysfs_root_folders(sess);
+ kobject_put(&srv_path->kobj);
+ rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
return err;
}
-void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess)
+void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path)
{
- if (sess->kobj.state_in_sysfs) {
- kobject_del(&sess->stats->kobj_stats);
- kobject_put(&sess->stats->kobj_stats);
- sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group);
- kobject_put(&sess->kobj);
+ if (srv_path->kobj.state_in_sysfs) {
+ kobject_del(&srv_path->stats->kobj_stats);
+ kobject_put(&srv_path->stats->kobj_stats);
+ sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
+ kobject_put(&srv_path->kobj);
- rtrs_srv_destroy_once_sysfs_root_folders(sess);
+ rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
}
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 7df71f8cf149..24024bce2566 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -62,19 +62,19 @@ static inline struct rtrs_srv_con *to_srv_con(struct rtrs_con *c)
return container_of(c, struct rtrs_srv_con, c);
}
-static inline struct rtrs_srv_sess *to_srv_sess(struct rtrs_sess *s)
+static inline struct rtrs_srv_path *to_srv_path(struct rtrs_path *s)
{
- return container_of(s, struct rtrs_srv_sess, s);
+ return container_of(s, struct rtrs_srv_path, s);
}
-static bool rtrs_srv_change_state(struct rtrs_srv_sess *sess,
+static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
enum rtrs_srv_state new_state)
{
enum rtrs_srv_state old_state;
bool changed = false;
- spin_lock_irq(&sess->state_lock);
- old_state = sess->state;
+ spin_lock_irq(&srv_path->state_lock);
+ old_state = srv_path->state;
switch (new_state) {
case RTRS_SRV_CONNECTED:
if (old_state == RTRS_SRV_CONNECTING)
@@ -93,8 +93,8 @@ static bool rtrs_srv_change_state(struct rtrs_srv_sess *sess,
break;
}
if (changed)
- sess->state = new_state;
- spin_unlock_irq(&sess->state_lock);
+ srv_path->state = new_state;
+ spin_unlock_irq(&srv_path->state_lock);
return changed;
}
@@ -106,16 +106,16 @@ static void free_id(struct rtrs_srv_op *id)
kfree(id);
}
-static void rtrs_srv_free_ops_ids(struct rtrs_srv_sess *sess)
+static void rtrs_srv_free_ops_ids(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
int i;
- if (sess->ops_ids) {
+ if (srv_path->ops_ids) {
for (i = 0; i < srv->queue_depth; i++)
- free_id(sess->ops_ids[i]);
- kfree(sess->ops_ids);
- sess->ops_ids = NULL;
+ free_id(srv_path->ops_ids[i]);
+ kfree(srv_path->ops_ids);
+ srv_path->ops_ids = NULL;
}
}
@@ -127,21 +127,24 @@ static struct ib_cqe io_comp_cqe = {
static inline void rtrs_srv_inflight_ref_release(struct percpu_ref *ref)
{
- struct rtrs_srv_sess *sess = container_of(ref, struct rtrs_srv_sess, ids_inflight_ref);
+ struct rtrs_srv_path *srv_path = container_of(ref,
+ struct rtrs_srv_path,
+ ids_inflight_ref);
- percpu_ref_exit(&sess->ids_inflight_ref);
- complete(&sess->complete_done);
+ percpu_ref_exit(&srv_path->ids_inflight_ref);
+ complete(&srv_path->complete_done);
}
-static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_sess *sess)
+static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_srv_op *id;
int i, ret;
- sess->ops_ids = kcalloc(srv->queue_depth, sizeof(*sess->ops_ids),
- GFP_KERNEL);
- if (!sess->ops_ids)
+ srv_path->ops_ids = kcalloc(srv->queue_depth,
+ sizeof(*srv_path->ops_ids),
+ GFP_KERNEL);
+ if (!srv_path->ops_ids)
goto err;
for (i = 0; i < srv->queue_depth; ++i) {
@@ -149,44 +152,44 @@ static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_sess *sess)
if (!id)
goto err;
- sess->ops_ids[i] = id;
+ srv_path->ops_ids[i] = id;
}
- ret = percpu_ref_init(&sess->ids_inflight_ref,
+ ret = percpu_ref_init(&srv_path->ids_inflight_ref,
rtrs_srv_inflight_ref_release, 0, GFP_KERNEL);
if (ret) {
pr_err("Percpu reference init failed\n");
goto err;
}
- init_completion(&sess->complete_done);
+ init_completion(&srv_path->complete_done);
return 0;
err:
- rtrs_srv_free_ops_ids(sess);
+ rtrs_srv_free_ops_ids(srv_path);
return -ENOMEM;
}
-static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_sess *sess)
+static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_path *srv_path)
{
- percpu_ref_get(&sess->ids_inflight_ref);
+ percpu_ref_get(&srv_path->ids_inflight_ref);
}
-static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_sess *sess)
+static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_path *srv_path)
{
- percpu_ref_put(&sess->ids_inflight_ref);
+ percpu_ref_put(&srv_path->ids_inflight_ref);
}
static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
if (wc->status != IB_WC_SUCCESS) {
rtrs_err(s, "REG MR failed: %s\n",
ib_wc_status_msg(wc->status));
- close_sess(sess);
+ close_path(srv_path);
return;
}
}
@@ -197,9 +200,9 @@ static struct ib_cqe local_reg_cqe = {
static int rdma_write_sg(struct rtrs_srv_op *id)
{
- struct rtrs_sess *s = id->con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
- dma_addr_t dma_addr = sess->dma_addr[id->msg_id];
+ struct rtrs_path *s = id->con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ dma_addr_t dma_addr = srv_path->dma_addr[id->msg_id];
struct rtrs_srv_mr *srv_mr;
struct ib_send_wr inv_wr;
struct ib_rdma_wr imm_wr;
@@ -233,7 +236,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
return -EINVAL;
}
- plist->lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ plist->lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
offset += plist->length;
wr->wr.sg_list = plist;
@@ -284,7 +287,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
if (always_invalidate) {
struct rtrs_msg_rkey_rsp *msg;
- srv_mr = &sess->mrs[id->msg_id];
+ srv_mr = &srv_path->mrs[id->msg_id];
rwr.wr.opcode = IB_WR_REG_MR;
rwr.wr.wr_cqe = &local_reg_cqe;
rwr.wr.num_sge = 0;
@@ -300,11 +303,11 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
list.addr = srv_mr->iu->dma_addr;
list.length = sizeof(*msg);
- list.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
imm_wr.wr.sg_list = &list;
imm_wr.wr.num_sge = 1;
imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
+ ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
srv_mr->iu->dma_addr,
srv_mr->iu->size, DMA_TO_DEVICE);
} else {
@@ -317,7 +320,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
0, need_inval));
imm_wr.wr.wr_cqe = &io_comp_cqe;
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev, dma_addr,
+ ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, dma_addr,
offset, DMA_BIDIRECTIONAL);
err = ib_post_send(id->con->c.qp, &id->tx_wr.wr, NULL);
@@ -341,8 +344,8 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
int errno)
{
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
struct ib_send_wr inv_wr, *wr = NULL;
struct ib_rdma_wr imm_wr;
struct ib_reg_wr rwr;
@@ -402,7 +405,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
struct ib_sge list;
struct rtrs_msg_rkey_rsp *msg;
- srv_mr = &sess->mrs[id->msg_id];
+ srv_mr = &srv_path->mrs[id->msg_id];
rwr.wr.next = &imm_wr.wr;
rwr.wr.opcode = IB_WR_REG_MR;
rwr.wr.wr_cqe = &local_reg_cqe;
@@ -419,11 +422,11 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
list.addr = srv_mr->iu->dma_addr;
list.length = sizeof(*msg);
- list.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
imm_wr.wr.sg_list = &list;
imm_wr.wr.num_sge = 1;
imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
+ ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
srv_mr->iu->dma_addr,
srv_mr->iu->size, DMA_TO_DEVICE);
} else {
@@ -444,11 +447,11 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
return err;
}
-void close_sess(struct rtrs_srv_sess *sess)
+void close_path(struct rtrs_srv_path *srv_path)
{
- if (rtrs_srv_change_state(sess, RTRS_SRV_CLOSING))
- queue_work(rtrs_wq, &sess->close_work);
- WARN_ON(sess->state != RTRS_SRV_CLOSING);
+ if (rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSING))
+ queue_work(rtrs_wq, &srv_path->close_work);
+ WARN_ON(srv_path->state != RTRS_SRV_CLOSING);
}
static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state)
@@ -480,35 +483,35 @@ static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state)
*/
bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
struct rtrs_srv_con *con;
- struct rtrs_sess *s;
+ struct rtrs_path *s;
int err;
if (WARN_ON(!id))
return true;
con = id->con;
- s = con->c.sess;
- sess = to_srv_sess(s);
+ s = con->c.path;
+ srv_path = to_srv_path(s);
id->status = status;
- if (sess->state != RTRS_SRV_CONNECTED) {
+ if (srv_path->state != RTRS_SRV_CONNECTED) {
rtrs_err_rl(s,
- "Sending I/O response failed, session %s is disconnected, sess state %s\n",
- kobject_name(&sess->kobj),
- rtrs_srv_state_str(sess->state));
+ "Sending I/O response failed, server path %s is disconnected, path state %s\n",
+ kobject_name(&srv_path->kobj),
+ rtrs_srv_state_str(srv_path->state));
goto out;
}
if (always_invalidate) {
- struct rtrs_srv_mr *mr = &sess->mrs[id->msg_id];
+ struct rtrs_srv_mr *mr = &srv_path->mrs[id->msg_id];
ib_update_fast_reg_key(mr->mr, ib_inc_rkey(mr->mr->rkey));
}
if (atomic_sub_return(1, &con->c.sq_wr_avail) < 0) {
- rtrs_err(s, "IB send queue full: sess=%s cid=%d\n",
- kobject_name(&sess->kobj),
+ rtrs_err(s, "IB send queue full: srv_path=%s cid=%d\n",
+ kobject_name(&srv_path->kobj),
con->c.cid);
atomic_add(1, &con->c.sq_wr_avail);
spin_lock(&con->rsp_wr_wait_lock);
@@ -523,12 +526,12 @@ bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
err = rdma_write_sg(id);
if (err) {
- rtrs_err_rl(s, "IO response failed: %d: sess=%s\n", err,
- kobject_name(&sess->kobj));
- close_sess(sess);
+ rtrs_err_rl(s, "IO response failed: %d: srv_path=%s\n", err,
+ kobject_name(&srv_path->kobj));
+ close_path(srv_path);
}
out:
- rtrs_srv_put_ops_ids(sess);
+ rtrs_srv_put_ops_ids(srv_path);
return true;
}
EXPORT_SYMBOL(rtrs_srv_resp_rdma);
@@ -538,33 +541,33 @@ EXPORT_SYMBOL(rtrs_srv_resp_rdma);
* @srv: Session pointer
* @priv: The private pointer that is associated with the session.
*/
-void rtrs_srv_set_sess_priv(struct rtrs_srv *srv, void *priv)
+void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *srv, void *priv)
{
srv->priv = priv;
}
EXPORT_SYMBOL(rtrs_srv_set_sess_priv);
-static void unmap_cont_bufs(struct rtrs_srv_sess *sess)
+static void unmap_cont_bufs(struct rtrs_srv_path *srv_path)
{
int i;
- for (i = 0; i < sess->mrs_num; i++) {
+ for (i = 0; i < srv_path->mrs_num; i++) {
struct rtrs_srv_mr *srv_mr;
- srv_mr = &sess->mrs[i];
- rtrs_iu_free(srv_mr->iu, sess->s.dev->ib_dev, 1);
+ srv_mr = &srv_path->mrs[i];
+ rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
ib_dereg_mr(srv_mr->mr);
- ib_dma_unmap_sg(sess->s.dev->ib_dev, srv_mr->sgt.sgl,
+ ib_dma_unmap_sg(srv_path->s.dev->ib_dev, srv_mr->sgt.sgl,
srv_mr->sgt.nents, DMA_BIDIRECTIONAL);
sg_free_table(&srv_mr->sgt);
}
- kfree(sess->mrs);
+ kfree(srv_path->mrs);
}
-static int map_cont_bufs(struct rtrs_srv_sess *sess)
+static int map_cont_bufs(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
- struct rtrs_sess *ss = &sess->s;
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_path *ss = &srv_path->s;
int i, mri, err, mrs_num;
unsigned int chunk_bits;
int chunks_per_mr = 1;
@@ -581,19 +584,19 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess)
mrs_num = srv->queue_depth;
} else {
chunks_per_mr =
- sess->s.dev->ib_dev->attrs.max_fast_reg_page_list_len;
+ srv_path->s.dev->ib_dev->attrs.max_fast_reg_page_list_len;
mrs_num = DIV_ROUND_UP(srv->queue_depth, chunks_per_mr);
chunks_per_mr = DIV_ROUND_UP(srv->queue_depth, mrs_num);
}
- sess->mrs = kcalloc(mrs_num, sizeof(*sess->mrs), GFP_KERNEL);
- if (!sess->mrs)
+ srv_path->mrs = kcalloc(mrs_num, sizeof(*srv_path->mrs), GFP_KERNEL);
+ if (!srv_path->mrs)
return -ENOMEM;
- sess->mrs_num = mrs_num;
+ srv_path->mrs_num = mrs_num;
for (mri = 0; mri < mrs_num; mri++) {
- struct rtrs_srv_mr *srv_mr = &sess->mrs[mri];
+ struct rtrs_srv_mr *srv_mr = &srv_path->mrs[mri];
struct sg_table *sgt = &srv_mr->sgt;
struct scatterlist *s;
struct ib_mr *mr;
@@ -612,13 +615,13 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess)
sg_set_page(s, srv->chunks[chunks + i],
max_chunk_size, 0);
- nr = ib_dma_map_sg(sess->s.dev->ib_dev, sgt->sgl,
+ nr = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
sgt->nents, DMA_BIDIRECTIONAL);
if (nr < sgt->nents) {
err = nr < 0 ? nr : -EINVAL;
goto free_sg;
}
- mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
+ mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
sgt->nents);
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
@@ -634,7 +637,7 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess)
if (always_invalidate) {
srv_mr->iu = rtrs_iu_alloc(1,
sizeof(struct rtrs_msg_rkey_rsp),
- GFP_KERNEL, sess->s.dev->ib_dev,
+ GFP_KERNEL, srv_path->s.dev->ib_dev,
DMA_TO_DEVICE, rtrs_srv_rdma_done);
if (!srv_mr->iu) {
err = -ENOMEM;
@@ -644,7 +647,7 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess)
}
/* Eventually dma addr for each chunk can be cached */
for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
- sess->dma_addr[chunks + i] = sg_dma_address(s);
+ srv_path->dma_addr[chunks + i] = sg_dma_address(s);
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
srv_mr->mr = mr;
@@ -652,75 +655,75 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess)
continue;
err:
while (mri--) {
- srv_mr = &sess->mrs[mri];
+ srv_mr = &srv_path->mrs[mri];
sgt = &srv_mr->sgt;
mr = srv_mr->mr;
- rtrs_iu_free(srv_mr->iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
dereg_mr:
ib_dereg_mr(mr);
unmap_sg:
- ib_dma_unmap_sg(sess->s.dev->ib_dev, sgt->sgl,
+ ib_dma_unmap_sg(srv_path->s.dev->ib_dev, sgt->sgl,
sgt->nents, DMA_BIDIRECTIONAL);
free_sg:
sg_free_table(sgt);
}
- kfree(sess->mrs);
+ kfree(srv_path->mrs);
return err;
}
chunk_bits = ilog2(srv->queue_depth - 1) + 1;
- sess->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits);
+ srv_path->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits);
return 0;
}
static void rtrs_srv_hb_err_handler(struct rtrs_con *c)
{
- close_sess(to_srv_sess(c->sess));
+ close_path(to_srv_path(c->path));
}
-static void rtrs_srv_init_hb(struct rtrs_srv_sess *sess)
+static void rtrs_srv_init_hb(struct rtrs_srv_path *srv_path)
{
- rtrs_init_hb(&sess->s, &io_comp_cqe,
+ rtrs_init_hb(&srv_path->s, &io_comp_cqe,
RTRS_HB_INTERVAL_MS,
RTRS_HB_MISSED_MAX,
rtrs_srv_hb_err_handler,
rtrs_wq);
}
-static void rtrs_srv_start_hb(struct rtrs_srv_sess *sess)
+static void rtrs_srv_start_hb(struct rtrs_srv_path *srv_path)
{
- rtrs_start_hb(&sess->s);
+ rtrs_start_hb(&srv_path->s);
}
-static void rtrs_srv_stop_hb(struct rtrs_srv_sess *sess)
+static void rtrs_srv_stop_hb(struct rtrs_srv_path *srv_path)
{
- rtrs_stop_hb(&sess->s);
+ rtrs_stop_hb(&srv_path->s);
}
static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
struct rtrs_iu *iu;
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
- rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
if (wc->status != IB_WC_SUCCESS) {
rtrs_err(s, "Sess info response send failed: %s\n",
ib_wc_status_msg(wc->status));
- close_sess(sess);
+ close_path(srv_path);
return;
}
WARN_ON(wc->opcode != IB_WC_SEND);
}
-static void rtrs_srv_sess_up(struct rtrs_srv_sess *sess)
+static void rtrs_srv_path_up(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_srv_ctx *ctx = srv->ctx;
int up;
@@ -731,18 +734,18 @@ static void rtrs_srv_sess_up(struct rtrs_srv_sess *sess)
mutex_unlock(&srv->paths_ev_mutex);
/* Mark session as established */
- sess->established = true;
+ srv_path->established = true;
}
-static void rtrs_srv_sess_down(struct rtrs_srv_sess *sess)
+static void rtrs_srv_path_down(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_srv_ctx *ctx = srv->ctx;
- if (!sess->established)
+ if (!srv_path->established)
return;
- sess->established = false;
+ srv_path->established = false;
mutex_lock(&srv->paths_ev_mutex);
WARN_ON(!srv->paths_up);
if (--srv->paths_up == 0)
@@ -750,11 +753,11 @@ static void rtrs_srv_sess_down(struct rtrs_srv_sess *sess)
mutex_unlock(&srv->paths_ev_mutex);
}
-static bool exist_sessname(struct rtrs_srv_ctx *ctx,
- const char *sessname, const uuid_t *path_uuid)
+static bool exist_pathname(struct rtrs_srv_ctx *ctx,
+ const char *pathname, const uuid_t *path_uuid)
{
- struct rtrs_srv *srv;
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_sess *srv;
+ struct rtrs_srv_path *srv_path;
bool found = false;
mutex_lock(&ctx->srv_mutex);
@@ -767,9 +770,9 @@ static bool exist_sessname(struct rtrs_srv_ctx *ctx,
continue;
}
- list_for_each_entry(sess, &srv->paths_list, s.entry) {
- if (strlen(sess->s.sessname) == strlen(sessname) &&
- !strcmp(sess->s.sessname, sessname)) {
+ list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
+ if (strlen(srv_path->s.sessname) == strlen(pathname) &&
+ !strcmp(srv_path->s.sessname, pathname)) {
found = true;
break;
}
@@ -782,14 +785,14 @@ static bool exist_sessname(struct rtrs_srv_ctx *ctx,
return found;
}
-static int post_recv_sess(struct rtrs_srv_sess *sess);
+static int post_recv_path(struct rtrs_srv_path *srv_path);
static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno);
static int process_info_req(struct rtrs_srv_con *con,
struct rtrs_msg_info_req *msg)
{
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
struct ib_send_wr *reg_wr = NULL;
struct rtrs_msg_info_rsp *rsp;
struct rtrs_iu *tx_iu;
@@ -797,31 +800,32 @@ static int process_info_req(struct rtrs_srv_con *con,
int mri, err;
size_t tx_sz;
- err = post_recv_sess(sess);
+ err = post_recv_path(srv_path);
if (err) {
- rtrs_err(s, "post_recv_sess(), err: %d\n", err);
+ rtrs_err(s, "post_recv_path(), err: %d\n", err);
return err;
}
- if (strchr(msg->sessname, '/') || strchr(msg->sessname, '.')) {
- rtrs_err(s, "sessname cannot contain / and .\n");
+ if (strchr(msg->pathname, '/') || strchr(msg->pathname, '.')) {
+ rtrs_err(s, "pathname cannot contain / and .\n");
return -EINVAL;
}
- if (exist_sessname(sess->srv->ctx,
- msg->sessname, &sess->srv->paths_uuid)) {
- rtrs_err(s, "sessname is duplicated: %s\n", msg->sessname);
+ if (exist_pathname(srv_path->srv->ctx,
+ msg->pathname, &srv_path->srv->paths_uuid)) {
+ rtrs_err(s, "pathname is duplicated: %s\n", msg->pathname);
return -EPERM;
}
- strscpy(sess->s.sessname, msg->sessname, sizeof(sess->s.sessname));
+ strscpy(srv_path->s.sessname, msg->pathname,
+ sizeof(srv_path->s.sessname));
- rwr = kcalloc(sess->mrs_num, sizeof(*rwr), GFP_KERNEL);
+ rwr = kcalloc(srv_path->mrs_num, sizeof(*rwr), GFP_KERNEL);
if (!rwr)
return -ENOMEM;
tx_sz = sizeof(*rsp);
- tx_sz += sizeof(rsp->desc[0]) * sess->mrs_num;
- tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, sess->s.dev->ib_dev,
+ tx_sz += sizeof(rsp->desc[0]) * srv_path->mrs_num;
+ tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, srv_path->s.dev->ib_dev,
DMA_TO_DEVICE, rtrs_srv_info_rsp_done);
if (!tx_iu) {
err = -ENOMEM;
@@ -830,10 +834,10 @@ static int process_info_req(struct rtrs_srv_con *con,
rsp = tx_iu->buf;
rsp->type = cpu_to_le16(RTRS_MSG_INFO_RSP);
- rsp->sg_cnt = cpu_to_le16(sess->mrs_num);
+ rsp->sg_cnt = cpu_to_le16(srv_path->mrs_num);
- for (mri = 0; mri < sess->mrs_num; mri++) {
- struct ib_mr *mr = sess->mrs[mri].mr;
+ for (mri = 0; mri < srv_path->mrs_num; mri++) {
+ struct ib_mr *mr = srv_path->mrs[mri].mr;
rsp->desc[mri].addr = cpu_to_le64(mr->iova);
rsp->desc[mri].key = cpu_to_le32(mr->rkey);
@@ -854,13 +858,13 @@ static int process_info_req(struct rtrs_srv_con *con,
reg_wr = &rwr[mri].wr;
}
- err = rtrs_srv_create_sess_files(sess);
+ err = rtrs_srv_create_path_files(srv_path);
if (err)
goto iu_free;
- kobject_get(&sess->kobj);
- get_device(&sess->srv->dev);
- rtrs_srv_change_state(sess, RTRS_SRV_CONNECTED);
- rtrs_srv_start_hb(sess);
+ kobject_get(&srv_path->kobj);
+ get_device(&srv_path->srv->dev);
+ rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED);
+ rtrs_srv_start_hb(srv_path);
/*
* We do not account number of established connections at the current
@@ -868,9 +872,10 @@ static int process_info_req(struct rtrs_srv_con *con,
* all connections are successfully established. Thus, simply notify
* listener with a proper event if we are the first path.
*/
- rtrs_srv_sess_up(sess);
+ rtrs_srv_path_up(srv_path);
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr,
+ ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
+ tx_iu->dma_addr,
tx_iu->size, DMA_TO_DEVICE);
/* Send info response */
@@ -878,7 +883,7 @@ static int process_info_req(struct rtrs_srv_con *con,
if (err) {
rtrs_err(s, "rtrs_iu_post_send(), err: %d\n", err);
iu_free:
- rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(tx_iu, srv_path->s.dev->ib_dev, 1);
}
rwr_free:
kfree(rwr);
@@ -889,8 +894,8 @@ rwr_free:
static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
struct rtrs_msg_info_req *msg;
struct rtrs_iu *iu;
int err;
@@ -910,7 +915,7 @@ static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
wc->byte_len);
goto close;
}
- ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
+ ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev, iu->dma_addr,
iu->size, DMA_FROM_DEVICE);
msg = iu->buf;
if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_REQ) {
@@ -923,22 +928,22 @@ static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
goto close;
out:
- rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
return;
close:
- close_sess(sess);
+ close_path(srv_path);
goto out;
}
static int post_recv_info_req(struct rtrs_srv_con *con)
{
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
struct rtrs_iu *rx_iu;
int err;
rx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req),
- GFP_KERNEL, sess->s.dev->ib_dev,
+ GFP_KERNEL, srv_path->s.dev->ib_dev,
DMA_FROM_DEVICE, rtrs_srv_info_req_done);
if (!rx_iu)
return -ENOMEM;
@@ -946,7 +951,7 @@ static int post_recv_info_req(struct rtrs_srv_con *con)
err = rtrs_iu_post_recv(&con->c, rx_iu);
if (err) {
rtrs_err(s, "rtrs_iu_post_recv(), err: %d\n", err);
- rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(rx_iu, srv_path->s.dev->ib_dev, 1);
return err;
}
@@ -966,20 +971,20 @@ static int post_recv_io(struct rtrs_srv_con *con, size_t q_size)
return 0;
}
-static int post_recv_sess(struct rtrs_srv_sess *sess)
+static int post_recv_path(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
- struct rtrs_sess *s = &sess->s;
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_path *s = &srv_path->s;
size_t q_size;
int err, cid;
- for (cid = 0; cid < sess->s.con_num; cid++) {
+ for (cid = 0; cid < srv_path->s.con_num; cid++) {
if (cid == 0)
q_size = SERVICE_CON_QUEUE_DEPTH;
else
q_size = srv->queue_depth;
- err = post_recv_io(to_srv_con(sess->s.con[cid]), q_size);
+ err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size);
if (err) {
rtrs_err(s, "post_recv_io(), err: %d\n", err);
return err;
@@ -993,9 +998,9 @@ static void process_read(struct rtrs_srv_con *con,
struct rtrs_msg_rdma_read *msg,
u32 buf_id, u32 off)
{
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_srv_ctx *ctx = srv->ctx;
struct rtrs_srv_op *id;
@@ -1003,10 +1008,10 @@ static void process_read(struct rtrs_srv_con *con,
void *data;
int ret;
- if (sess->state != RTRS_SRV_CONNECTED) {
+ if (srv_path->state != RTRS_SRV_CONNECTED) {
rtrs_err_rl(s,
"Processing read request failed, session is disconnected, sess state %s\n",
- rtrs_srv_state_str(sess->state));
+ rtrs_srv_state_str(srv_path->state));
return;
}
if (msg->sg_cnt != 1 && msg->sg_cnt != 0) {
@@ -1014,9 +1019,9 @@ static void process_read(struct rtrs_srv_con *con,
"Processing read request failed, invalid message\n");
return;
}
- rtrs_srv_get_ops_ids(sess);
- rtrs_srv_update_rdma_stats(sess->stats, off, READ);
- id = sess->ops_ids[buf_id];
+ rtrs_srv_get_ops_ids(srv_path);
+ rtrs_srv_update_rdma_stats(srv_path->stats, off, READ);
+ id = srv_path->ops_ids[buf_id];
id->con = con;
id->dir = READ;
id->msg_id = buf_id;
@@ -1042,18 +1047,18 @@ send_err_msg:
rtrs_err_rl(s,
"Sending err msg for failed RDMA-Write-Req failed, msg_id %d, err: %d\n",
buf_id, ret);
- close_sess(sess);
+ close_path(srv_path);
}
- rtrs_srv_put_ops_ids(sess);
+ rtrs_srv_put_ops_ids(srv_path);
}
static void process_write(struct rtrs_srv_con *con,
struct rtrs_msg_rdma_write *req,
u32 buf_id, u32 off)
{
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_srv_ctx *ctx = srv->ctx;
struct rtrs_srv_op *id;
@@ -1061,15 +1066,15 @@ static void process_write(struct rtrs_srv_con *con,
void *data;
int ret;
- if (sess->state != RTRS_SRV_CONNECTED) {
+ if (srv_path->state != RTRS_SRV_CONNECTED) {
rtrs_err_rl(s,
"Processing write request failed, session is disconnected, sess state %s\n",
- rtrs_srv_state_str(sess->state));
+ rtrs_srv_state_str(srv_path->state));
return;
}
- rtrs_srv_get_ops_ids(sess);
- rtrs_srv_update_rdma_stats(sess->stats, off, WRITE);
- id = sess->ops_ids[buf_id];
+ rtrs_srv_get_ops_ids(srv_path);
+ rtrs_srv_update_rdma_stats(srv_path->stats, off, WRITE);
+ id = srv_path->ops_ids[buf_id];
id->con = con;
id->dir = WRITE;
id->msg_id = buf_id;
@@ -1094,20 +1099,21 @@ send_err_msg:
rtrs_err_rl(s,
"Processing write request failed, sending I/O response failed, msg_id %d, err: %d\n",
buf_id, ret);
- close_sess(sess);
+ close_path(srv_path);
}
- rtrs_srv_put_ops_ids(sess);
+ rtrs_srv_put_ops_ids(srv_path);
}
static void process_io_req(struct rtrs_srv_con *con, void *msg,
u32 id, u32 off)
{
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
struct rtrs_msg_rdma_hdr *hdr;
unsigned int type;
- ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, sess->dma_addr[id],
+ ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev,
+ srv_path->dma_addr[id],
max_chunk_size, DMA_BIDIRECTIONAL);
hdr = msg;
type = le16_to_cpu(hdr->type);
@@ -1129,7 +1135,7 @@ static void process_io_req(struct rtrs_srv_con *con, void *msg,
return;
err:
- close_sess(sess);
+ close_path(srv_path);
}
static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
@@ -1137,16 +1143,16 @@ static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
struct rtrs_srv_mr *mr =
container_of(wc->wr_cqe, typeof(*mr), inv_cqe);
struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ struct rtrs_srv_sess *srv = srv_path->srv;
u32 msg_id, off;
void *data;
if (wc->status != IB_WC_SUCCESS) {
rtrs_err(s, "Failed IB_WR_LOCAL_INV: %s\n",
ib_wc_status_msg(wc->status));
- close_sess(sess);
+ close_path(srv_path);
}
msg_id = mr->msg_id;
off = mr->msg_off;
@@ -1194,9 +1200,9 @@ static void rtrs_rdma_process_wr_wait_list(struct rtrs_srv_con *con)
static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ struct rtrs_srv_sess *srv = srv_path->srv;
u32 imm_type, imm_payload;
int err;
@@ -1206,7 +1212,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
"%s (wr_cqe: %p, type: %d, vendor_err: 0x%x, len: %u)\n",
ib_wc_status_msg(wc->status), wc->wr_cqe,
wc->opcode, wc->vendor_err, wc->byte_len);
- close_sess(sess);
+ close_path(srv_path);
}
return;
}
@@ -1222,7 +1228,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
if (err) {
rtrs_err(s, "rtrs_post_recv(), err: %d\n", err);
- close_sess(sess);
+ close_path(srv_path);
break;
}
rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
@@ -1231,16 +1237,16 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
u32 msg_id, off;
void *data;
- msg_id = imm_payload >> sess->mem_bits;
- off = imm_payload & ((1 << sess->mem_bits) - 1);
+ msg_id = imm_payload >> srv_path->mem_bits;
+ off = imm_payload & ((1 << srv_path->mem_bits) - 1);
if (msg_id >= srv->queue_depth || off >= max_chunk_size) {
rtrs_err(s, "Wrong msg_id %u, off %u\n",
msg_id, off);
- close_sess(sess);
+ close_path(srv_path);
return;
}
if (always_invalidate) {
- struct rtrs_srv_mr *mr = &sess->mrs[msg_id];
+ struct rtrs_srv_mr *mr = &srv_path->mrs[msg_id];
mr->msg_off = off;
mr->msg_id = msg_id;
@@ -1248,7 +1254,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
if (err) {
rtrs_err(s, "rtrs_post_recv(), err: %d\n",
err);
- close_sess(sess);
+ close_path(srv_path);
break;
}
} else {
@@ -1257,10 +1263,10 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
}
} else if (imm_type == RTRS_HB_MSG_IMM) {
WARN_ON(con->c.cid);
- rtrs_send_hb_ack(&sess->s);
+ rtrs_send_hb_ack(&srv_path->s);
} else if (imm_type == RTRS_HB_ACK_IMM) {
WARN_ON(con->c.cid);
- sess->s.hb_missed_cnt = 0;
+ srv_path->s.hb_missed_cnt = 0;
} else {
rtrs_wrn(s, "Unknown IMM type %u\n", imm_type);
}
@@ -1284,22 +1290,23 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
}
/**
- * rtrs_srv_get_sess_name() - Get rtrs_srv peer hostname.
+ * rtrs_srv_get_path_name() - Get rtrs_srv peer hostname.
* @srv: Session
- * @sessname: Sessname buffer
+ * @pathname: Pathname buffer
* @len: Length of sessname buffer
*/
-int rtrs_srv_get_sess_name(struct rtrs_srv *srv, char *sessname, size_t len)
+int rtrs_srv_get_path_name(struct rtrs_srv_sess *srv, char *pathname,
+ size_t len)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
int err = -ENOTCONN;
mutex_lock(&srv->paths_mutex);
- list_for_each_entry(sess, &srv->paths_list, s.entry) {
- if (sess->state != RTRS_SRV_CONNECTED)
+ list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
+ if (srv_path->state != RTRS_SRV_CONNECTED)
continue;
- strscpy(sessname, sess->s.sessname,
- min_t(size_t, sizeof(sess->s.sessname), len));
+ strscpy(pathname, srv_path->s.sessname,
+ min_t(size_t, sizeof(srv_path->s.sessname), len));
err = 0;
break;
}
@@ -1307,44 +1314,45 @@ int rtrs_srv_get_sess_name(struct rtrs_srv *srv, char *sessname, size_t len)
return err;
}
-EXPORT_SYMBOL(rtrs_srv_get_sess_name);
+EXPORT_SYMBOL(rtrs_srv_get_path_name);
/**
* rtrs_srv_get_queue_depth() - Get rtrs_srv qdepth.
* @srv: Session
*/
-int rtrs_srv_get_queue_depth(struct rtrs_srv *srv)
+int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *srv)
{
return srv->queue_depth;
}
EXPORT_SYMBOL(rtrs_srv_get_queue_depth);
-static int find_next_bit_ring(struct rtrs_srv_sess *sess)
+static int find_next_bit_ring(struct rtrs_srv_path *srv_path)
{
- struct ib_device *ib_dev = sess->s.dev->ib_dev;
+ struct ib_device *ib_dev = srv_path->s.dev->ib_dev;
int v;
- v = cpumask_next(sess->cur_cq_vector, &cq_affinity_mask);
+ v = cpumask_next(srv_path->cur_cq_vector, &cq_affinity_mask);
if (v >= nr_cpu_ids || v >= ib_dev->num_comp_vectors)
v = cpumask_first(&cq_affinity_mask);
return v;
}
-static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_sess *sess)
+static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_path *srv_path)
{
- sess->cur_cq_vector = find_next_bit_ring(sess);
+ srv_path->cur_cq_vector = find_next_bit_ring(srv_path);
- return sess->cur_cq_vector;
+ return srv_path->cur_cq_vector;
}
static void rtrs_srv_dev_release(struct device *dev)
{
- struct rtrs_srv *srv = container_of(dev, struct rtrs_srv, dev);
+ struct rtrs_srv_sess *srv = container_of(dev, struct rtrs_srv_sess,
+ dev);
kfree(srv);
}
-static void free_srv(struct rtrs_srv *srv)
+static void free_srv(struct rtrs_srv_sess *srv)
{
int i;
@@ -1358,11 +1366,11 @@ static void free_srv(struct rtrs_srv *srv)
put_device(&srv->dev);
}
-static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
+static struct rtrs_srv_sess *get_or_create_srv(struct rtrs_srv_ctx *ctx,
const uuid_t *paths_uuid,
bool first_conn)
{
- struct rtrs_srv *srv;
+ struct rtrs_srv_sess *srv;
int i;
mutex_lock(&ctx->srv_mutex);
@@ -1424,7 +1432,7 @@ err_free_srv:
return ERR_PTR(-ENOMEM);
}
-static void put_srv(struct rtrs_srv *srv)
+static void put_srv(struct rtrs_srv_sess *srv)
{
if (refcount_dec_and_test(&srv->refcount)) {
struct rtrs_srv_ctx *ctx = srv->ctx;
@@ -1438,23 +1446,23 @@ static void put_srv(struct rtrs_srv *srv)
}
}
-static void __add_path_to_srv(struct rtrs_srv *srv,
- struct rtrs_srv_sess *sess)
+static void __add_path_to_srv(struct rtrs_srv_sess *srv,
+ struct rtrs_srv_path *srv_path)
{
- list_add_tail(&sess->s.entry, &srv->paths_list);
+ list_add_tail(&srv_path->s.entry, &srv->paths_list);
srv->paths_num++;
WARN_ON(srv->paths_num >= MAX_PATHS_NUM);
}
-static void del_path_from_srv(struct rtrs_srv_sess *sess)
+static void del_path_from_srv(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
if (WARN_ON(!srv))
return;
mutex_lock(&srv->paths_mutex);
- list_del(&sess->s.entry);
+ list_del(&srv_path->s.entry);
WARN_ON(!srv->paths_num);
srv->paths_num--;
mutex_unlock(&srv->paths_mutex);
@@ -1484,47 +1492,47 @@ static int sockaddr_cmp(const struct sockaddr *a, const struct sockaddr *b)
}
}
-static bool __is_path_w_addr_exists(struct rtrs_srv *srv,
+static bool __is_path_w_addr_exists(struct rtrs_srv_sess *srv,
struct rdma_addr *addr)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
- list_for_each_entry(sess, &srv->paths_list, s.entry)
- if (!sockaddr_cmp((struct sockaddr *)&sess->s.dst_addr,
+ list_for_each_entry(srv_path, &srv->paths_list, s.entry)
+ if (!sockaddr_cmp((struct sockaddr *)&srv_path->s.dst_addr,
(struct sockaddr *)&addr->dst_addr) &&
- !sockaddr_cmp((struct sockaddr *)&sess->s.src_addr,
+ !sockaddr_cmp((struct sockaddr *)&srv_path->s.src_addr,
(struct sockaddr *)&addr->src_addr))
return true;
return false;
}
-static void free_sess(struct rtrs_srv_sess *sess)
+static void free_path(struct rtrs_srv_path *srv_path)
{
- if (sess->kobj.state_in_sysfs) {
- kobject_del(&sess->kobj);
- kobject_put(&sess->kobj);
+ if (srv_path->kobj.state_in_sysfs) {
+ kobject_del(&srv_path->kobj);
+ kobject_put(&srv_path->kobj);
} else {
- kfree(sess->stats);
- kfree(sess);
+ kfree(srv_path->stats);
+ kfree(srv_path);
}
}
static void rtrs_srv_close_work(struct work_struct *work)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
struct rtrs_srv_con *con;
int i;
- sess = container_of(work, typeof(*sess), close_work);
+ srv_path = container_of(work, typeof(*srv_path), close_work);
- rtrs_srv_destroy_sess_files(sess);
- rtrs_srv_stop_hb(sess);
+ rtrs_srv_destroy_path_files(srv_path);
+ rtrs_srv_stop_hb(srv_path);
- for (i = 0; i < sess->s.con_num; i++) {
- if (!sess->s.con[i])
+ for (i = 0; i < srv_path->s.con_num; i++) {
+ if (!srv_path->s.con[i])
continue;
- con = to_srv_con(sess->s.con[i]);
+ con = to_srv_con(srv_path->s.con[i]);
rdma_disconnect(con->c.cm_id);
ib_drain_qp(con->c.qp);
}
@@ -1533,41 +1541,41 @@ static void rtrs_srv_close_work(struct work_struct *work)
* Degrade ref count to the usual model with a single shared
* atomic_t counter
*/
- percpu_ref_kill(&sess->ids_inflight_ref);
+ percpu_ref_kill(&srv_path->ids_inflight_ref);
/* Wait for all completion */
- wait_for_completion(&sess->complete_done);
+ wait_for_completion(&srv_path->complete_done);
/* Notify upper layer if we are the last path */
- rtrs_srv_sess_down(sess);
+ rtrs_srv_path_down(srv_path);
- unmap_cont_bufs(sess);
- rtrs_srv_free_ops_ids(sess);
+ unmap_cont_bufs(srv_path);
+ rtrs_srv_free_ops_ids(srv_path);
- for (i = 0; i < sess->s.con_num; i++) {
- if (!sess->s.con[i])
+ for (i = 0; i < srv_path->s.con_num; i++) {
+ if (!srv_path->s.con[i])
continue;
- con = to_srv_con(sess->s.con[i]);
+ con = to_srv_con(srv_path->s.con[i]);
rtrs_cq_qp_destroy(&con->c);
rdma_destroy_id(con->c.cm_id);
kfree(con);
}
- rtrs_ib_dev_put(sess->s.dev);
+ rtrs_ib_dev_put(srv_path->s.dev);
- del_path_from_srv(sess);
- put_srv(sess->srv);
- sess->srv = NULL;
- rtrs_srv_change_state(sess, RTRS_SRV_CLOSED);
+ del_path_from_srv(srv_path);
+ put_srv(srv_path->srv);
+ srv_path->srv = NULL;
+ rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSED);
- kfree(sess->dma_addr);
- kfree(sess->s.con);
- free_sess(sess);
+ kfree(srv_path->dma_addr);
+ kfree(srv_path->s.con);
+ free_path(srv_path);
}
-static int rtrs_rdma_do_accept(struct rtrs_srv_sess *sess,
+static int rtrs_rdma_do_accept(struct rtrs_srv_path *srv_path,
struct rdma_cm_id *cm_id)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_msg_conn_rsp msg;
struct rdma_conn_param param;
int err;
@@ -1615,25 +1623,25 @@ static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno)
return errno;
}
-static struct rtrs_srv_sess *
-__find_sess(struct rtrs_srv *srv, const uuid_t *sess_uuid)
+static struct rtrs_srv_path *
+__find_path(struct rtrs_srv_sess *srv, const uuid_t *sess_uuid)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
- list_for_each_entry(sess, &srv->paths_list, s.entry) {
- if (uuid_equal(&sess->s.uuid, sess_uuid))
- return sess;
+ list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
+ if (uuid_equal(&srv_path->s.uuid, sess_uuid))
+ return srv_path;
}
return NULL;
}
-static int create_con(struct rtrs_srv_sess *sess,
+static int create_con(struct rtrs_srv_path *srv_path,
struct rdma_cm_id *cm_id,
unsigned int cid)
{
- struct rtrs_srv *srv = sess->srv;
- struct rtrs_sess *s = &sess->s;
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_path *s = &srv_path->s;
struct rtrs_srv_con *con;
u32 cq_num, max_send_wr, max_recv_wr, wr_limit;
@@ -1648,10 +1656,10 @@ static int create_con(struct rtrs_srv_sess *sess,
spin_lock_init(&con->rsp_wr_wait_lock);
INIT_LIST_HEAD(&con->rsp_wr_wait_list);
con->c.cm_id = cm_id;
- con->c.sess = &sess->s;
+ con->c.path = &srv_path->s;
con->c.cid = cid;
atomic_set(&con->c.wr_cnt, 1);
- wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr;
+ wr_limit = srv_path->s.dev->ib_dev->attrs.max_qp_wr;
if (con->c.cid == 0) {
/*
@@ -1684,10 +1692,10 @@ static int create_con(struct rtrs_srv_sess *sess,
}
cq_num = max_send_wr + max_recv_wr;
atomic_set(&con->c.sq_wr_avail, max_send_wr);
- cq_vector = rtrs_srv_get_next_cq_vector(sess);
+ cq_vector = rtrs_srv_get_next_cq_vector(srv_path);
/* TODO: SOFTIRQ can be faster, but be careful with softirq context */
- err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_num,
+ err = rtrs_cq_qp_create(&srv_path->s, &con->c, 1, cq_vector, cq_num,
max_send_wr, max_recv_wr,
IB_POLL_WORKQUEUE);
if (err) {
@@ -1699,8 +1707,8 @@ static int create_con(struct rtrs_srv_sess *sess,
if (err)
goto free_cqqp;
}
- WARN_ON(sess->s.con[cid]);
- sess->s.con[cid] = &con->c;
+ WARN_ON(srv_path->s.con[cid]);
+ srv_path->s.con[cid] = &con->c;
/*
* Change context from server to current connection. The other
@@ -1719,13 +1727,13 @@ err:
return err;
}
-static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv,
+static struct rtrs_srv_path *__alloc_path(struct rtrs_srv_sess *srv,
struct rdma_cm_id *cm_id,
unsigned int con_num,
unsigned int recon_cnt,
const uuid_t *uuid)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
int err = -ENOMEM;
char str[NAME_MAX];
struct rtrs_addr path;
@@ -1739,74 +1747,76 @@ static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv,
pr_err("Path with same addr exists\n");
goto err;
}
- sess = kzalloc(sizeof(*sess), GFP_KERNEL);
- if (!sess)
+ srv_path = kzalloc(sizeof(*srv_path), GFP_KERNEL);
+ if (!srv_path)
goto err;
- sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL);
- if (!sess->stats)
+ srv_path->stats = kzalloc(sizeof(*srv_path->stats), GFP_KERNEL);
+ if (!srv_path->stats)
goto err_free_sess;
- sess->stats->sess = sess;
+ srv_path->stats->srv_path = srv_path;
- sess->dma_addr = kcalloc(srv->queue_depth, sizeof(*sess->dma_addr),
- GFP_KERNEL);
- if (!sess->dma_addr)
+ srv_path->dma_addr = kcalloc(srv->queue_depth,
+ sizeof(*srv_path->dma_addr),
+ GFP_KERNEL);
+ if (!srv_path->dma_addr)
goto err_free_stats;
- sess->s.con = kcalloc(con_num, sizeof(*sess->s.con), GFP_KERNEL);
- if (!sess->s.con)
+ srv_path->s.con = kcalloc(con_num, sizeof(*srv_path->s.con),
+ GFP_KERNEL);
+ if (!srv_path->s.con)
goto err_free_dma_addr;
- sess->state = RTRS_SRV_CONNECTING;
- sess->srv = srv;
- sess->cur_cq_vector = -1;
- sess->s.dst_addr = cm_id->route.addr.dst_addr;
- sess->s.src_addr = cm_id->route.addr.src_addr;
+ srv_path->state = RTRS_SRV_CONNECTING;
+ srv_path->srv = srv;
+ srv_path->cur_cq_vector = -1;
+ srv_path->s.dst_addr = cm_id->route.addr.dst_addr;
+ srv_path->s.src_addr = cm_id->route.addr.src_addr;
/* temporary until receiving session-name from client */
- path.src = &sess->s.src_addr;
- path.dst = &sess->s.dst_addr;
+ path.src = &srv_path->s.src_addr;
+ path.dst = &srv_path->s.dst_addr;
rtrs_addr_to_str(&path, str, sizeof(str));
- strscpy(sess->s.sessname, str, sizeof(sess->s.sessname));
-
- sess->s.con_num = con_num;
- sess->s.irq_con_num = con_num;
- sess->s.recon_cnt = recon_cnt;
- uuid_copy(&sess->s.uuid, uuid);
- spin_lock_init(&sess->state_lock);
- INIT_WORK(&sess->close_work, rtrs_srv_close_work);
- rtrs_srv_init_hb(sess);
-
- sess->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd);
- if (!sess->s.dev) {
+ strscpy(srv_path->s.sessname, str, sizeof(srv_path->s.sessname));
+
+ srv_path->s.con_num = con_num;
+ srv_path->s.irq_con_num = con_num;
+ srv_path->s.recon_cnt = recon_cnt;
+ uuid_copy(&srv_path->s.uuid, uuid);
+ spin_lock_init(&srv_path->state_lock);
+ INIT_WORK(&srv_path->close_work, rtrs_srv_close_work);
+ rtrs_srv_init_hb(srv_path);
+
+ srv_path->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd);
+ if (!srv_path->s.dev) {
err = -ENOMEM;
goto err_free_con;
}
- err = map_cont_bufs(sess);
+ err = map_cont_bufs(srv_path);
if (err)
goto err_put_dev;
- err = rtrs_srv_alloc_ops_ids(sess);
+ err = rtrs_srv_alloc_ops_ids(srv_path);
if (err)
goto err_unmap_bufs;
- __add_path_to_srv(srv, sess);
+ __add_path_to_srv(srv, srv_path);
- return sess;
+ return srv_path;
err_unmap_bufs:
- unmap_cont_bufs(sess);
+ unmap_cont_bufs(srv_path);
err_put_dev:
- rtrs_ib_dev_put(sess->s.dev);
+ rtrs_ib_dev_put(srv_path->s.dev);
err_free_con:
- kfree(sess->s.con);
+ kfree(srv_path->s.con);
err_free_dma_addr:
- kfree(sess->dma_addr);
+ kfree(srv_path->dma_addr);
err_free_stats:
- kfree(sess->stats);
+ kfree(srv_path->stats);
err_free_sess:
- kfree(sess);
+ kfree(srv_path);
err:
return ERR_PTR(err);
}
@@ -1816,8 +1826,8 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
size_t len)
{
struct rtrs_srv_ctx *ctx = cm_id->context;
- struct rtrs_srv_sess *sess;
- struct rtrs_srv *srv;
+ struct rtrs_srv_path *srv_path;
+ struct rtrs_srv_sess *srv;
u16 version, con_num, cid;
u16 recon_cnt;
@@ -1857,16 +1867,16 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
goto reject_w_err;
}
mutex_lock(&srv->paths_mutex);
- sess = __find_sess(srv, &msg->sess_uuid);
- if (sess) {
- struct rtrs_sess *s = &sess->s;
+ srv_path = __find_path(srv, &msg->sess_uuid);
+ if (srv_path) {
+ struct rtrs_path *s = &srv_path->s;
/* Session already holds a reference */
put_srv(srv);
- if (sess->state != RTRS_SRV_CONNECTING) {
+ if (srv_path->state != RTRS_SRV_CONNECTING) {
rtrs_err(s, "Session in wrong state: %s\n",
- rtrs_srv_state_str(sess->state));
+ rtrs_srv_state_str(srv_path->state));
mutex_unlock(&srv->paths_mutex);
goto reject_w_err;
}
@@ -1886,19 +1896,19 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
goto reject_w_err;
}
} else {
- sess = __alloc_sess(srv, cm_id, con_num, recon_cnt,
+ srv_path = __alloc_path(srv, cm_id, con_num, recon_cnt,
&msg->sess_uuid);
- if (IS_ERR(sess)) {
+ if (IS_ERR(srv_path)) {
mutex_unlock(&srv->paths_mutex);
put_srv(srv);
- err = PTR_ERR(sess);
+ err = PTR_ERR(srv_path);
pr_err("RTRS server session allocation failed: %d\n", err);
goto reject_w_err;
}
}
- err = create_con(sess, cm_id, cid);
+ err = create_con(srv_path, cm_id, cid);
if (err) {
- rtrs_err((&sess->s), "create_con(), error %d\n", err);
+ rtrs_err((&srv_path->s), "create_con(), error %d\n", err);
rtrs_rdma_do_reject(cm_id, err);
/*
* Since session has other connections we follow normal way
@@ -1907,9 +1917,9 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
*/
goto close_and_return_err;
}
- err = rtrs_rdma_do_accept(sess, cm_id);
+ err = rtrs_rdma_do_accept(srv_path, cm_id);
if (err) {
- rtrs_err((&sess->s), "rtrs_rdma_do_accept(), error %d\n", err);
+ rtrs_err((&srv_path->s), "rtrs_rdma_do_accept(), error %d\n", err);
rtrs_rdma_do_reject(cm_id, err);
/*
* Since current connection was successfully added to the
@@ -1929,7 +1939,7 @@ reject_w_err:
close_and_return_err:
mutex_unlock(&srv->paths_mutex);
- close_sess(sess);
+ close_path(srv_path);
return err;
}
@@ -1937,14 +1947,14 @@ close_and_return_err:
static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *ev)
{
- struct rtrs_srv_sess *sess = NULL;
- struct rtrs_sess *s = NULL;
+ struct rtrs_srv_path *srv_path = NULL;
+ struct rtrs_path *s = NULL;
if (ev->event != RDMA_CM_EVENT_CONNECT_REQUEST) {
struct rtrs_con *c = cm_id->context;
- s = c->sess;
- sess = to_srv_sess(s);
+ s = c->path;
+ srv_path = to_srv_path(s);
}
switch (ev->event) {
@@ -1968,7 +1978,7 @@ static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_ADDR_CHANGE:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
case RDMA_CM_EVENT_DEVICE_REMOVAL:
- close_sess(sess);
+ close_path(srv_path);
break;
default:
pr_err("Ignoring unexpected CM event %s, err %d\n",
@@ -2176,23 +2186,23 @@ struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port)
}
EXPORT_SYMBOL(rtrs_srv_open);
-static void close_sessions(struct rtrs_srv *srv)
+static void close_paths(struct rtrs_srv_sess *srv)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
mutex_lock(&srv->paths_mutex);
- list_for_each_entry(sess, &srv->paths_list, s.entry)
- close_sess(sess);
+ list_for_each_entry(srv_path, &srv->paths_list, s.entry)
+ close_path(srv_path);
mutex_unlock(&srv->paths_mutex);
}
static void close_ctx(struct rtrs_srv_ctx *ctx)
{
- struct rtrs_srv *srv;
+ struct rtrs_srv_sess *srv;
mutex_lock(&ctx->srv_mutex);
list_for_each_entry(srv, &ctx->srv_list, ctx_list)
- close_sessions(srv);
+ close_paths(srv);
mutex_unlock(&ctx->srv_mutex);
flush_workqueue(rtrs_wq);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
index 7d403c12faf3..6292e87f6afd 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -37,7 +37,7 @@ struct rtrs_srv_stats_rdma_stats {
struct rtrs_srv_stats {
struct kobject kobj_stats;
struct rtrs_srv_stats_rdma_stats rdma_stats;
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
};
struct rtrs_srv_con {
@@ -71,9 +71,9 @@ struct rtrs_srv_mr {
struct rtrs_iu *iu; /* send buffer for new rkey msg */
};
-struct rtrs_srv_sess {
- struct rtrs_sess s;
- struct rtrs_srv *srv;
+struct rtrs_srv_path {
+ struct rtrs_path s;
+ struct rtrs_srv_sess *srv;
struct work_struct close_work;
enum rtrs_srv_state state;
spinlock_t state_lock;
@@ -90,7 +90,7 @@ struct rtrs_srv_sess {
struct rtrs_srv_stats *stats;
};
-struct rtrs_srv {
+struct rtrs_srv_sess {
struct list_head paths_list;
int paths_up;
struct mutex paths_ev_mutex;
@@ -125,7 +125,7 @@ struct rtrs_srv_ib_ctx {
extern struct class *rtrs_dev_class;
-void close_sess(struct rtrs_srv_sess *sess);
+void close_path(struct rtrs_srv_path *srv_path);
static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s,
size_t size, int d)
@@ -142,7 +142,7 @@ ssize_t rtrs_srv_reset_all_help(struct rtrs_srv_stats *stats,
char *page, size_t len);
/* functions which are implemented in rtrs-srv-sysfs.c */
-int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess);
-void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess);
+int rtrs_srv_create_path_files(struct rtrs_srv_path *srv_path);
+void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path);
#endif /* RTRS_SRV_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
index 37952c8e768c..4da889103a5f 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -69,16 +69,16 @@ EXPORT_SYMBOL_GPL(rtrs_iu_free);
int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu)
{
- struct rtrs_sess *sess = con->sess;
+ struct rtrs_path *path = con->path;
struct ib_recv_wr wr;
struct ib_sge list;
list.addr = iu->dma_addr;
list.length = iu->size;
- list.lkey = sess->dev->ib_pd->local_dma_lkey;
+ list.lkey = path->dev->ib_pd->local_dma_lkey;
if (list.length == 0) {
- rtrs_wrn(con->sess,
+ rtrs_wrn(con->path,
"Posting receive work request failed, sg list is empty\n");
return -EINVAL;
}
@@ -126,7 +126,7 @@ static int rtrs_post_send(struct ib_qp *qp, struct ib_send_wr *head,
int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
struct ib_send_wr *head)
{
- struct rtrs_sess *sess = con->sess;
+ struct rtrs_path *path = con->path;
struct ib_send_wr wr;
struct ib_sge list;
@@ -135,7 +135,7 @@ int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
list.addr = iu->dma_addr;
list.length = size;
- list.lkey = sess->dev->ib_pd->local_dma_lkey;
+ list.lkey = path->dev->ib_pd->local_dma_lkey;
wr = (struct ib_send_wr) {
.wr_cqe = &iu->cqe,
@@ -188,11 +188,11 @@ static int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con,
struct ib_send_wr *head)
{
struct ib_rdma_wr wr;
- struct rtrs_sess *sess = con->sess;
+ struct rtrs_path *path = con->path;
enum ib_send_flags sflags;
atomic_dec_if_positive(&con->sq_wr_avail);
- sflags = (atomic_inc_return(&con->wr_cnt) % sess->signal_interval) ?
+ sflags = (atomic_inc_return(&con->wr_cnt) % path->signal_interval) ?
0 : IB_SEND_SIGNALED;
wr = (struct ib_rdma_wr) {
@@ -211,12 +211,12 @@ static void qp_event_handler(struct ib_event *ev, void *ctx)
switch (ev->event) {
case IB_EVENT_COMM_EST:
- rtrs_info(con->sess, "QP event %s (%d) received\n",
+ rtrs_info(con->path, "QP event %s (%d) received\n",
ib_event_msg(ev->event), ev->event);
rdma_notify(con->cm_id, IB_EVENT_COMM_EST);
break;
default:
- rtrs_info(con->sess, "Unhandled QP event %s (%d) received\n",
+ rtrs_info(con->path, "Unhandled QP event %s (%d) received\n",
ib_event_msg(ev->event), ev->event);
break;
}
@@ -224,7 +224,7 @@ static void qp_event_handler(struct ib_event *ev, void *ctx)
static bool is_pollqueue(struct rtrs_con *con)
{
- return con->cid >= con->sess->irq_con_num;
+ return con->cid >= con->path->irq_con_num;
}
static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe,
@@ -240,7 +240,7 @@ static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe,
cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx);
if (IS_ERR(cq)) {
- rtrs_err(con->sess, "Creating completion queue failed, errno: %ld\n",
+ rtrs_err(con->path, "Creating completion queue failed, errno: %ld\n",
PTR_ERR(cq));
return PTR_ERR(cq);
}
@@ -271,7 +271,7 @@ static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
ret = rdma_create_qp(cm_id, pd, &init_attr);
if (ret) {
- rtrs_err(con->sess, "Creating QP failed, err: %d\n", ret);
+ rtrs_err(con->path, "Creating QP failed, err: %d\n", ret);
return ret;
}
con->qp = cm_id->qp;
@@ -290,7 +290,7 @@ static void destroy_cq(struct rtrs_con *con)
con->cq = NULL;
}
-int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
+int rtrs_cq_qp_create(struct rtrs_path *path, struct rtrs_con *con,
u32 max_send_sge, int cq_vector, int nr_cqe,
u32 max_send_wr, u32 max_recv_wr,
enum ib_poll_context poll_ctx)
@@ -301,13 +301,13 @@ int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
if (err)
return err;
- err = create_qp(con, sess->dev->ib_pd, max_send_wr, max_recv_wr,
+ err = create_qp(con, path->dev->ib_pd, max_send_wr, max_recv_wr,
max_send_sge);
if (err) {
destroy_cq(con);
return err;
}
- con->sess = sess;
+ con->path = path;
return 0;
}
@@ -323,24 +323,24 @@ void rtrs_cq_qp_destroy(struct rtrs_con *con)
}
EXPORT_SYMBOL_GPL(rtrs_cq_qp_destroy);
-static void schedule_hb(struct rtrs_sess *sess)
+static void schedule_hb(struct rtrs_path *path)
{
- queue_delayed_work(sess->hb_wq, &sess->hb_dwork,
- msecs_to_jiffies(sess->hb_interval_ms));
+ queue_delayed_work(path->hb_wq, &path->hb_dwork,
+ msecs_to_jiffies(path->hb_interval_ms));
}
-void rtrs_send_hb_ack(struct rtrs_sess *sess)
+void rtrs_send_hb_ack(struct rtrs_path *path)
{
- struct rtrs_con *usr_con = sess->con[0];
+ struct rtrs_con *usr_con = path->con[0];
u32 imm;
int err;
imm = rtrs_to_imm(RTRS_HB_ACK_IMM, 0);
- err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
+ err = rtrs_post_rdma_write_imm_empty(usr_con, path->hb_cqe, imm,
NULL);
if (err) {
- rtrs_err(sess, "send HB ACK failed, errno: %d\n", err);
- sess->hb_err_handler(usr_con);
+ rtrs_err(path, "send HB ACK failed, errno: %d\n", err);
+ path->hb_err_handler(usr_con);
return;
}
}
@@ -349,63 +349,63 @@ EXPORT_SYMBOL_GPL(rtrs_send_hb_ack);
static void hb_work(struct work_struct *work)
{
struct rtrs_con *usr_con;
- struct rtrs_sess *sess;
+ struct rtrs_path *path;
u32 imm;
int err;
- sess = container_of(to_delayed_work(work), typeof(*sess), hb_dwork);
- usr_con = sess->con[0];
+ path = container_of(to_delayed_work(work), typeof(*path), hb_dwork);
+ usr_con = path->con[0];
- if (sess->hb_missed_cnt > sess->hb_missed_max) {
- rtrs_err(sess, "HB missed max reached.\n");
- sess->hb_err_handler(usr_con);
+ if (path->hb_missed_cnt > path->hb_missed_max) {
+ rtrs_err(path, "HB missed max reached.\n");
+ path->hb_err_handler(usr_con);
return;
}
- if (sess->hb_missed_cnt++) {
+ if (path->hb_missed_cnt++) {
/* Reschedule work without sending hb */
- schedule_hb(sess);
+ schedule_hb(path);
return;
}
- sess->hb_last_sent = ktime_get();
+ path->hb_last_sent = ktime_get();
imm = rtrs_to_imm(RTRS_HB_MSG_IMM, 0);
- err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
+ err = rtrs_post_rdma_write_imm_empty(usr_con, path->hb_cqe, imm,
NULL);
if (err) {
- rtrs_err(sess, "HB send failed, errno: %d\n", err);
- sess->hb_err_handler(usr_con);
+ rtrs_err(path, "HB send failed, errno: %d\n", err);
+ path->hb_err_handler(usr_con);
return;
}
- schedule_hb(sess);
+ schedule_hb(path);
}
-void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe,
+void rtrs_init_hb(struct rtrs_path *path, struct ib_cqe *cqe,
unsigned int interval_ms, unsigned int missed_max,
void (*err_handler)(struct rtrs_con *con),
struct workqueue_struct *wq)
{
- sess->hb_cqe = cqe;
- sess->hb_interval_ms = interval_ms;
- sess->hb_err_handler = err_handler;
- sess->hb_wq = wq;
- sess->hb_missed_max = missed_max;
- sess->hb_missed_cnt = 0;
- INIT_DELAYED_WORK(&sess->hb_dwork, hb_work);
+ path->hb_cqe = cqe;
+ path->hb_interval_ms = interval_ms;
+ path->hb_err_handler = err_handler;
+ path->hb_wq = wq;
+ path->hb_missed_max = missed_max;
+ path->hb_missed_cnt = 0;
+ INIT_DELAYED_WORK(&path->hb_dwork, hb_work);
}
EXPORT_SYMBOL_GPL(rtrs_init_hb);
-void rtrs_start_hb(struct rtrs_sess *sess)
+void rtrs_start_hb(struct rtrs_path *path)
{
- schedule_hb(sess);
+ schedule_hb(path);
}
EXPORT_SYMBOL_GPL(rtrs_start_hb);
-void rtrs_stop_hb(struct rtrs_sess *sess)
+void rtrs_stop_hb(struct rtrs_path *path)
{
- cancel_delayed_work_sync(&sess->hb_dwork);
- sess->hb_missed_cnt = 0;
+ cancel_delayed_work_sync(&path->hb_dwork);
+ path->hb_missed_cnt = 0;
}
EXPORT_SYMBOL_GPL(rtrs_stop_hb);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.h b/drivers/infiniband/ulp/rtrs/rtrs.h
index 859c79685daf..5e57a7ccc7fb 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs.h
@@ -13,9 +13,9 @@
#include <linux/scatterlist.h>
struct rtrs_permit;
-struct rtrs_clt;
+struct rtrs_clt_sess;
struct rtrs_srv_ctx;
-struct rtrs_srv;
+struct rtrs_srv_sess;
struct rtrs_srv_op;
/*
@@ -52,14 +52,14 @@ struct rtrs_clt_ops {
void (*link_ev)(void *priv, enum rtrs_clt_link_ev ev);
};
-struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
- const char *sessname,
+struct rtrs_clt_sess *rtrs_clt_open(struct rtrs_clt_ops *ops,
+ const char *pathname,
const struct rtrs_addr *paths,
size_t path_cnt, u16 port,
size_t pdu_sz, u8 reconnect_delay_sec,
s16 max_reconnect_attempts, u32 nr_poll_queues);
-void rtrs_clt_close(struct rtrs_clt *sess);
+void rtrs_clt_close(struct rtrs_clt_sess *clt);
enum wait_type {
RTRS_PERMIT_NOWAIT = 0,
@@ -77,11 +77,12 @@ enum rtrs_clt_con_type {
RTRS_IO_CON
};
-struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *sess,
- enum rtrs_clt_con_type con_type,
- enum wait_type wait);
+struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *sess,
+ enum rtrs_clt_con_type con_type,
+ enum wait_type wait);
-void rtrs_clt_put_permit(struct rtrs_clt *sess, struct rtrs_permit *permit);
+void rtrs_clt_put_permit(struct rtrs_clt_sess *sess,
+ struct rtrs_permit *permit);
/**
* rtrs_clt_req_ops - it holds the request confirmation callback
@@ -98,10 +99,10 @@ struct rtrs_clt_req_ops {
};
int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
- struct rtrs_clt *sess, struct rtrs_permit *permit,
+ struct rtrs_clt_sess *sess, struct rtrs_permit *permit,
const struct kvec *vec, size_t nr, size_t len,
struct scatterlist *sg, unsigned int sg_cnt);
-int rtrs_clt_rdma_cq_direct(struct rtrs_clt *clt, unsigned int index);
+int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index);
/**
* rtrs_attrs - RTRS session attributes
@@ -112,7 +113,7 @@ struct rtrs_attrs {
u32 max_segments;
};
-int rtrs_clt_query(struct rtrs_clt *sess, struct rtrs_attrs *attr);
+int rtrs_clt_query(struct rtrs_clt_sess *sess, struct rtrs_attrs *attr);
/*
* Here goes RTRS server API
@@ -163,7 +164,7 @@ struct rtrs_srv_ops {
* @priv: Private data from user if previously set with
* rtrs_srv_set_sess_priv()
*/
- int (*link_ev)(struct rtrs_srv *sess, enum rtrs_srv_link_ev ev,
+ int (*link_ev)(struct rtrs_srv_sess *sess, enum rtrs_srv_link_ev ev,
void *priv);
};
@@ -173,11 +174,12 @@ void rtrs_srv_close(struct rtrs_srv_ctx *ctx);
bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int errno);
-void rtrs_srv_set_sess_priv(struct rtrs_srv *sess, void *priv);
+void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *sess, void *priv);
-int rtrs_srv_get_sess_name(struct rtrs_srv *sess, char *sessname, size_t len);
+int rtrs_srv_get_path_name(struct rtrs_srv_sess *sess, char *pathname,
+ size_t len);
-int rtrs_srv_get_queue_depth(struct rtrs_srv *sess);
+int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *sess);
int rtrs_addr_to_sockaddr(const char *str, size_t len, u16 port,
struct rtrs_addr *addr);
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
index 1cf5deda06e1..fa8d1a466014 100644
--- a/drivers/input/ff-core.c
+++ b/drivers/input/ff-core.c
@@ -67,7 +67,7 @@ static int compat_effect(struct ff_device *ff, struct ff_effect *effect)
effect->type = FF_PERIODIC;
effect->u.periodic.waveform = FF_SINE;
effect->u.periodic.period = 50;
- effect->u.periodic.magnitude = max(magnitude, 0x7fff);
+ effect->u.periodic.magnitude = magnitude;
effect->u.periodic.offset = 0;
effect->u.periodic.phase = 0;
effect->u.periodic.envelope.attack_length = 0;
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 8dbf1e69c90a..d75a8b179a8a 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -247,7 +247,7 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata,
ssize_t error;
int i;
- bits = bitmap_zalloc(n_events, GFP_KERNEL);
+ bits = bitmap_alloc(n_events, GFP_KERNEL);
if (!bits)
return -ENOMEM;
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index e09b1fae42e1..04da7916eb70 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -206,11 +206,8 @@ ATTRIBUTE_GROUPS(axp20x);
static irqreturn_t axp20x_pek_irq(int irq, void *pwr)
{
- struct axp20x_pek *axp20x_pek = pwr;
- struct input_dev *idev = axp20x_pek->input;
-
- if (!idev)
- return IRQ_HANDLED;
+ struct input_dev *idev = pwr;
+ struct axp20x_pek *axp20x_pek = input_get_drvdata(idev);
/*
* The power-button is connected to ground so a falling edge (dbf)
@@ -229,9 +226,22 @@ static irqreturn_t axp20x_pek_irq(int irq, void *pwr)
static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
struct platform_device *pdev)
{
+ struct axp20x_dev *axp20x = axp20x_pek->axp20x;
struct input_dev *idev;
int error;
+ axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR");
+ if (axp20x_pek->irq_dbr < 0)
+ return axp20x_pek->irq_dbr;
+ axp20x_pek->irq_dbr = regmap_irq_get_virq(axp20x->regmap_irqc,
+ axp20x_pek->irq_dbr);
+
+ axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF");
+ if (axp20x_pek->irq_dbf < 0)
+ return axp20x_pek->irq_dbf;
+ axp20x_pek->irq_dbf = regmap_irq_get_virq(axp20x->regmap_irqc,
+ axp20x_pek->irq_dbf);
+
axp20x_pek->input = devm_input_allocate_device(&pdev->dev);
if (!axp20x_pek->input)
return -ENOMEM;
@@ -246,6 +256,24 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
input_set_drvdata(idev, axp20x_pek);
+ error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr,
+ axp20x_pek_irq, 0,
+ "axp20x-pek-dbr", idev);
+ if (error < 0) {
+ dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n",
+ axp20x_pek->irq_dbr, error);
+ return error;
+ }
+
+ error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf,
+ axp20x_pek_irq, 0,
+ "axp20x-pek-dbf", idev);
+ if (error < 0) {
+ dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n",
+ axp20x_pek->irq_dbf, error);
+ return error;
+ }
+
error = input_register_device(idev);
if (error) {
dev_err(&pdev->dev, "Can't register input device: %d\n",
@@ -253,6 +281,8 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
return error;
}
+ device_init_wakeup(&pdev->dev, true);
+
return 0;
}
@@ -293,18 +323,6 @@ static int axp20x_pek_probe(struct platform_device *pdev)
axp20x_pek->axp20x = dev_get_drvdata(pdev->dev.parent);
- axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR");
- if (axp20x_pek->irq_dbr < 0)
- return axp20x_pek->irq_dbr;
- axp20x_pek->irq_dbr = regmap_irq_get_virq(
- axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbr);
-
- axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF");
- if (axp20x_pek->irq_dbf < 0)
- return axp20x_pek->irq_dbf;
- axp20x_pek->irq_dbf = regmap_irq_get_virq(
- axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbf);
-
if (axp20x_pek_should_register_input(axp20x_pek)) {
error = axp20x_pek_probe_input_device(axp20x_pek, pdev);
if (error)
@@ -313,26 +331,6 @@ static int axp20x_pek_probe(struct platform_device *pdev)
axp20x_pek->info = (struct axp20x_info *)match->driver_data;
- error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr,
- axp20x_pek_irq, 0,
- "axp20x-pek-dbr", axp20x_pek);
- if (error < 0) {
- dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n",
- axp20x_pek->irq_dbr, error);
- return error;
- }
-
- error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf,
- axp20x_pek_irq, 0,
- "axp20x-pek-dbf", axp20x_pek);
- if (error < 0) {
- dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n",
- axp20x_pek->irq_dbf, error);
- return error;
- }
-
- device_init_wakeup(&pdev->dev, true);
-
platform_set_drvdata(pdev, axp20x_pek);
return 0;
diff --git a/drivers/input/misc/palmas-pwrbutton.c b/drivers/input/misc/palmas-pwrbutton.c
index f9b05cf09ff5..2213e06b611d 100644
--- a/drivers/input/misc/palmas-pwrbutton.c
+++ b/drivers/input/misc/palmas-pwrbutton.c
@@ -15,6 +15,7 @@
* GNU General Public License for more details.
*/
+#include <linux/bitfield.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
@@ -115,8 +116,8 @@ static void palmas_pwron_params_ofinit(struct device *dev,
struct device_node *np;
u32 val;
int i, error;
- u8 lpk_times[] = { 6, 8, 10, 12 };
- int pwr_on_deb_ms[] = { 15, 100, 500, 1000 };
+ static const u8 lpk_times[] = { 6, 8, 10, 12 };
+ static const int pwr_on_deb_ms[] = { 15, 100, 500, 1000 };
memset(config, 0, sizeof(*config));
@@ -192,8 +193,8 @@ static int palmas_pwron_probe(struct platform_device *pdev)
* Setup default hardware shutdown option (long key press)
* and debounce.
*/
- val = config.long_press_time_val << __ffs(PALMAS_LPK_TIME_MASK);
- val |= config.pwron_debounce_val << __ffs(PALMAS_PWRON_DEBOUNCE_MASK);
+ val = FIELD_PREP(PALMAS_LPK_TIME_MASK, config.long_press_time_val) |
+ FIELD_PREP(PALMAS_PWRON_DEBOUNCE_MASK, config.pwron_debounce_val);
error = palmas_update_bits(palmas, PALMAS_PMU_CONTROL_BASE,
PALMAS_LONG_PRESS_KEY,
PALMAS_LPK_TIME_MASK |
diff --git a/drivers/input/mouse/byd.c b/drivers/input/mouse/byd.c
index 6e0c5f5a2713..221a553f45cd 100644
--- a/drivers/input/mouse/byd.c
+++ b/drivers/input/mouse/byd.c
@@ -191,7 +191,7 @@
/*
* The touchpad generates a mixture of absolute and relative packets, indicated
- * by the the last byte of each packet being set to one of the following:
+ * by the last byte of each packet being set to one of the following:
*/
#define BYD_PACKET_ABSOLUTE 0xf8
#define BYD_PACKET_RELATIVE 0x00
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index 17eb8f2aa48d..669a728095b8 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -207,8 +207,8 @@ static void serport_set_type(struct tty_struct *tty, unsigned long type)
* serport_ldisc_ioctl() allows to set the port protocol, and device ID
*/
-static int serport_ldisc_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int serport_ldisc_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
{
if (cmd == SPIOCSTYPE) {
unsigned long type;
@@ -226,7 +226,6 @@ static int serport_ldisc_ioctl(struct tty_struct *tty, struct file *file,
#ifdef CONFIG_COMPAT
#define COMPAT_SPIOCSTYPE _IOW('q', 0x01, compat_ulong_t)
static int serport_ldisc_compat_ioctl(struct tty_struct *tty,
- struct file *file,
unsigned int cmd, unsigned long arg)
{
if (cmd == COMPAT_SPIOCSTYPE) {
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index aaa3c455e01e..a3bfc7a41679 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -297,6 +297,108 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
return -ENOMSG;
}
+static struct input_dev *goodix_create_pen_input(struct goodix_ts_data *ts)
+{
+ struct device *dev = &ts->client->dev;
+ struct input_dev *input;
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return NULL;
+
+ input_alloc_absinfo(input);
+ if (!input->absinfo) {
+ input_free_device(input);
+ return NULL;
+ }
+
+ input->absinfo[ABS_X] = ts->input_dev->absinfo[ABS_MT_POSITION_X];
+ input->absinfo[ABS_Y] = ts->input_dev->absinfo[ABS_MT_POSITION_Y];
+ __set_bit(ABS_X, input->absbit);
+ __set_bit(ABS_Y, input->absbit);
+ input_set_abs_params(input, ABS_PRESSURE, 0, 255, 0, 0);
+
+ input_set_capability(input, EV_KEY, BTN_TOUCH);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
+ input_set_capability(input, EV_KEY, BTN_STYLUS);
+ input_set_capability(input, EV_KEY, BTN_STYLUS2);
+ __set_bit(INPUT_PROP_DIRECT, input->propbit);
+ /*
+ * The resolution of these touchscreens is about 10 units/mm, the actual
+ * resolution does not matter much since we set INPUT_PROP_DIRECT.
+ * Userspace wants something here though, so just set it to 10 units/mm.
+ */
+ input_abs_set_res(input, ABS_X, 10);
+ input_abs_set_res(input, ABS_Y, 10);
+
+ input->name = "Goodix Active Pen";
+ input->phys = "input/pen";
+ input->id.bustype = BUS_I2C;
+ input->id.vendor = 0x0416;
+ if (kstrtou16(ts->id, 10, &input->id.product))
+ input->id.product = 0x1001;
+ input->id.version = ts->version;
+
+ if (input_register_device(input) != 0) {
+ input_free_device(input);
+ return NULL;
+ }
+
+ return input;
+}
+
+static void goodix_ts_report_pen_down(struct goodix_ts_data *ts, u8 *data)
+{
+ int input_x, input_y, input_w;
+ u8 key_value;
+
+ if (!ts->input_pen) {
+ ts->input_pen = goodix_create_pen_input(ts);
+ if (!ts->input_pen)
+ return;
+ }
+
+ if (ts->contact_size == 9) {
+ input_x = get_unaligned_le16(&data[4]);
+ input_y = get_unaligned_le16(&data[6]);
+ input_w = get_unaligned_le16(&data[8]);
+ } else {
+ input_x = get_unaligned_le16(&data[2]);
+ input_y = get_unaligned_le16(&data[4]);
+ input_w = get_unaligned_le16(&data[6]);
+ }
+
+ touchscreen_report_pos(ts->input_pen, &ts->prop, input_x, input_y, false);
+ input_report_abs(ts->input_pen, ABS_PRESSURE, input_w);
+
+ input_report_key(ts->input_pen, BTN_TOUCH, 1);
+ input_report_key(ts->input_pen, BTN_TOOL_PEN, 1);
+
+ if (data[0] & GOODIX_HAVE_KEY) {
+ key_value = data[1 + ts->contact_size];
+ input_report_key(ts->input_pen, BTN_STYLUS, key_value & 0x10);
+ input_report_key(ts->input_pen, BTN_STYLUS2, key_value & 0x20);
+ } else {
+ input_report_key(ts->input_pen, BTN_STYLUS, 0);
+ input_report_key(ts->input_pen, BTN_STYLUS2, 0);
+ }
+
+ input_sync(ts->input_pen);
+}
+
+static void goodix_ts_report_pen_up(struct goodix_ts_data *ts)
+{
+ if (!ts->input_pen)
+ return;
+
+ input_report_key(ts->input_pen, BTN_TOUCH, 0);
+ input_report_key(ts->input_pen, BTN_TOOL_PEN, 0);
+ input_report_key(ts->input_pen, BTN_STYLUS, 0);
+ input_report_key(ts->input_pen, BTN_STYLUS2, 0);
+
+ input_sync(ts->input_pen);
+}
+
static void goodix_ts_report_touch_8b(struct goodix_ts_data *ts, u8 *coor_data)
{
int id = coor_data[0] & 0x0F;
@@ -327,6 +429,14 @@ static void goodix_ts_report_touch_9b(struct goodix_ts_data *ts, u8 *coor_data)
input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, input_w);
}
+static void goodix_ts_release_keys(struct goodix_ts_data *ts)
+{
+ int i;
+
+ for (i = 0; i < GOODIX_MAX_KEYS; i++)
+ input_report_key(ts->input_dev, ts->keymap[i], 0);
+}
+
static void goodix_ts_report_key(struct goodix_ts_data *ts, u8 *data)
{
int touch_num;
@@ -341,8 +451,7 @@ static void goodix_ts_report_key(struct goodix_ts_data *ts, u8 *data)
input_report_key(ts->input_dev,
ts->keymap[i], 1);
} else {
- for (i = 0; i < GOODIX_MAX_KEYS; i++)
- input_report_key(ts->input_dev, ts->keymap[i], 0);
+ goodix_ts_release_keys(ts);
}
}
@@ -364,6 +473,15 @@ static void goodix_process_events(struct goodix_ts_data *ts)
if (touch_num < 0)
return;
+ /* The pen being down is always reported as a single touch */
+ if (touch_num == 1 && (point_data[1] & 0x80)) {
+ goodix_ts_report_pen_down(ts, point_data);
+ goodix_ts_release_keys(ts);
+ goto sync; /* Release any previously registered touches */
+ } else {
+ goodix_ts_report_pen_up(ts);
+ }
+
goodix_ts_report_key(ts, point_data);
for (i = 0; i < touch_num; i++)
@@ -374,6 +492,7 @@ static void goodix_process_events(struct goodix_ts_data *ts)
goodix_ts_report_touch_8b(ts,
&point_data[1 + ts->contact_size * i]);
+sync:
input_mt_sync_frame(ts->input_dev);
input_sync(ts->input_dev);
}
@@ -857,7 +976,7 @@ retry_get_irq_gpio:
if (IS_ERR(gpiod)) {
error = PTR_ERR(gpiod);
if (error != -EPROBE_DEFER)
- dev_dbg(dev, "Failed to get %s GPIO: %d\n",
+ dev_err(dev, "Failed to get %s GPIO: %d\n",
GOODIX_GPIO_INT_NAME, error);
return error;
}
@@ -874,7 +993,7 @@ retry_get_irq_gpio:
if (IS_ERR(gpiod)) {
error = PTR_ERR(gpiod);
if (error != -EPROBE_DEFER)
- dev_dbg(dev, "Failed to get %s GPIO: %d\n",
+ dev_err(dev, "Failed to get %s GPIO: %d\n",
GOODIX_GPIO_RST_NAME, error);
return error;
}
diff --git a/drivers/input/touchscreen/goodix.h b/drivers/input/touchscreen/goodix.h
index 02065d1c3263..fa8602e78a64 100644
--- a/drivers/input/touchscreen/goodix.h
+++ b/drivers/input/touchscreen/goodix.h
@@ -76,6 +76,7 @@ struct goodix_chip_data {
struct goodix_ts_data {
struct i2c_client *client;
struct input_dev *input_dev;
+ struct input_dev *input_pen;
const struct goodix_chip_data *chip;
const char *firmware_name;
struct touchscreen_properties prop;
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index 1ee760bac0cf..3eef8c01090f 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -67,6 +67,7 @@ struct silead_ts_data {
struct i2c_client *client;
struct gpio_desc *gpio_power;
struct input_dev *input;
+ struct input_dev *pen_input;
struct regulator_bulk_data regulators[2];
char fw_name[64];
struct touchscreen_properties prop;
@@ -75,6 +76,13 @@ struct silead_ts_data {
struct input_mt_pos pos[SILEAD_MAX_FINGERS];
int slots[SILEAD_MAX_FINGERS];
int id[SILEAD_MAX_FINGERS];
+ u32 efi_fw_min_max[4];
+ bool efi_fw_min_max_set;
+ bool pen_supported;
+ bool pen_down;
+ u32 pen_x_res;
+ u32 pen_y_res;
+ int pen_up_count;
};
struct silead_fw_data {
@@ -82,6 +90,35 @@ struct silead_fw_data {
u32 val;
};
+static void silead_apply_efi_fw_min_max(struct silead_ts_data *data)
+{
+ struct input_absinfo *absinfo_x = &data->input->absinfo[ABS_MT_POSITION_X];
+ struct input_absinfo *absinfo_y = &data->input->absinfo[ABS_MT_POSITION_Y];
+
+ if (!data->efi_fw_min_max_set)
+ return;
+
+ absinfo_x->minimum = data->efi_fw_min_max[0];
+ absinfo_x->maximum = data->efi_fw_min_max[1];
+ absinfo_y->minimum = data->efi_fw_min_max[2];
+ absinfo_y->maximum = data->efi_fw_min_max[3];
+
+ if (data->prop.invert_x) {
+ absinfo_x->maximum -= absinfo_x->minimum;
+ absinfo_x->minimum = 0;
+ }
+
+ if (data->prop.invert_y) {
+ absinfo_y->maximum -= absinfo_y->minimum;
+ absinfo_y->minimum = 0;
+ }
+
+ if (data->prop.swap_x_y) {
+ swap(absinfo_x->minimum, absinfo_y->minimum);
+ swap(absinfo_x->maximum, absinfo_y->maximum);
+ }
+}
+
static int silead_ts_request_input_dev(struct silead_ts_data *data)
{
struct device *dev = &data->client->dev;
@@ -97,6 +134,7 @@ static int silead_ts_request_input_dev(struct silead_ts_data *data)
input_set_abs_params(data->input, ABS_MT_POSITION_X, 0, 4095, 0, 0);
input_set_abs_params(data->input, ABS_MT_POSITION_Y, 0, 4095, 0, 0);
touchscreen_parse_properties(data->input, true, &data->prop);
+ silead_apply_efi_fw_min_max(data);
input_mt_init_slots(data->input, data->max_fingers,
INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED |
@@ -112,6 +150,40 @@ static int silead_ts_request_input_dev(struct silead_ts_data *data)
error = input_register_device(data->input);
if (error) {
dev_err(dev, "Failed to register input device: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int silead_ts_request_pen_input_dev(struct silead_ts_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int error;
+
+ if (!data->pen_supported)
+ return 0;
+
+ data->pen_input = devm_input_allocate_device(dev);
+ if (!data->pen_input)
+ return -ENOMEM;
+
+ input_set_abs_params(data->pen_input, ABS_X, 0, 4095, 0, 0);
+ input_set_abs_params(data->pen_input, ABS_Y, 0, 4095, 0, 0);
+ input_set_capability(data->pen_input, EV_KEY, BTN_TOUCH);
+ input_set_capability(data->pen_input, EV_KEY, BTN_TOOL_PEN);
+ set_bit(INPUT_PROP_DIRECT, data->pen_input->propbit);
+ touchscreen_parse_properties(data->pen_input, false, &data->prop);
+ input_abs_set_res(data->pen_input, ABS_X, data->pen_x_res);
+ input_abs_set_res(data->pen_input, ABS_Y, data->pen_y_res);
+
+ data->pen_input->name = SILEAD_TS_NAME " pen";
+ data->pen_input->phys = "input/pen";
+ data->input->id.bustype = BUS_I2C;
+
+ error = input_register_device(data->pen_input);
+ if (error) {
+ dev_err(dev, "Failed to register pen input device: %d\n", error);
return error;
}
@@ -129,6 +201,45 @@ static void silead_ts_set_power(struct i2c_client *client,
}
}
+static bool silead_ts_handle_pen_data(struct silead_ts_data *data, u8 *buf)
+{
+ u8 *coord = buf + SILEAD_POINT_DATA_LEN;
+ struct input_mt_pos pos;
+
+ if (!data->pen_supported || buf[2] != 0x00 || buf[3] != 0x00)
+ return false;
+
+ if (buf[0] == 0x00 && buf[1] == 0x00 && data->pen_down) {
+ data->pen_up_count++;
+ if (data->pen_up_count == 6) {
+ data->pen_down = false;
+ goto sync;
+ }
+ return true;
+ }
+
+ if (buf[0] == 0x01 && buf[1] == 0x08) {
+ touchscreen_set_mt_pos(&pos, &data->prop,
+ get_unaligned_le16(&coord[SILEAD_POINT_X_OFF]) & 0xfff,
+ get_unaligned_le16(&coord[SILEAD_POINT_Y_OFF]) & 0xfff);
+
+ input_report_abs(data->pen_input, ABS_X, pos.x);
+ input_report_abs(data->pen_input, ABS_Y, pos.y);
+
+ data->pen_up_count = 0;
+ data->pen_down = true;
+ goto sync;
+ }
+
+ return false;
+
+sync:
+ input_report_key(data->pen_input, BTN_TOOL_PEN, data->pen_down);
+ input_report_key(data->pen_input, BTN_TOUCH, data->pen_down);
+ input_sync(data->pen_input);
+ return true;
+}
+
static void silead_ts_read_data(struct i2c_client *client)
{
struct silead_ts_data *data = i2c_get_clientdata(client);
@@ -151,6 +262,9 @@ static void silead_ts_read_data(struct i2c_client *client)
buf[0] = data->max_fingers;
}
+ if (silead_ts_handle_pen_data(data, buf))
+ goto sync; /* Pen is down, release all previous touches */
+
touch_nr = 0;
bufp = buf + SILEAD_POINT_DATA_LEN;
for (i = 0; i < buf[0]; i++, bufp += SILEAD_POINT_DATA_LEN) {
@@ -193,6 +307,7 @@ static void silead_ts_read_data(struct i2c_client *client)
data->pos[i].y, data->id[i], data->slots[i]);
}
+sync:
input_mt_sync_frame(input);
input_report_key(input, KEY_LEFTMETA, softbutton_pressed);
input_sync(input);
@@ -282,17 +397,56 @@ static int silead_ts_load_fw(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct silead_ts_data *data = i2c_get_clientdata(client);
- unsigned int fw_size, i;
- const struct firmware *fw;
+ const struct firmware *fw = NULL;
struct silead_fw_data *fw_data;
+ unsigned int fw_size, i;
int error;
dev_dbg(dev, "Firmware file name: %s", data->fw_name);
- error = firmware_request_platform(&fw, data->fw_name, dev);
+ /*
+ * Unfortunately, at the time of writing this comment, we have been unable to
+ * get permission from Silead, or from device OEMs, to distribute the necessary
+ * Silead firmware files in linux-firmware.
+ *
+ * On a whole bunch of devices the UEFI BIOS code contains a touchscreen driver,
+ * which contains an embedded copy of the firmware. The fw-loader code has a
+ * "platform" fallback mechanism, which together with info on the firmware
+ * from drivers/platform/x86/touchscreen_dmi.c will use the firmware from the
+ * UEFI driver when the firmware is missing from /lib/firmware. This makes the
+ * touchscreen work OOTB without users needing to manually download the firmware.
+ *
+ * The firmware bundled with the original Windows/Android is usually newer then
+ * the firmware in the UEFI driver and it is better calibrated. This better
+ * calibration can lead to significant differences in the reported min/max
+ * coordinates.
+ *
+ * To deal with this we first try to load the firmware without "platform"
+ * fallback. If that fails we retry with "platform" fallback and if that
+ * succeeds we apply an (optional) set of alternative min/max values from the
+ * "silead,efi-fw-min-max" property.
+ */
+ error = firmware_request_nowarn(&fw, data->fw_name, dev);
if (error) {
- dev_err(dev, "Firmware request error %d\n", error);
- return error;
+ error = firmware_request_platform(&fw, data->fw_name, dev);
+ if (error) {
+ dev_err(dev, "Firmware request error %d\n", error);
+ return error;
+ }
+
+ error = device_property_read_u32_array(dev, "silead,efi-fw-min-max",
+ data->efi_fw_min_max,
+ ARRAY_SIZE(data->efi_fw_min_max));
+ if (!error)
+ data->efi_fw_min_max_set = true;
+
+ /* The EFI (platform) embedded fw does not have pen support */
+ if (data->pen_supported) {
+ dev_warn(dev, "Warning loading '%s' from filesystem failed, using EFI embedded copy.\n",
+ data->fw_name);
+ dev_warn(dev, "Warning pen support is known to be broken in the EFI embedded fw version\n");
+ data->pen_supported = false;
+ }
}
fw_size = fw->size / sizeof(*fw_data);
@@ -450,6 +604,10 @@ static void silead_ts_read_props(struct i2c_client *client)
"silead/%s", str);
else
dev_dbg(dev, "Firmware file name read error. Using default.");
+
+ data->pen_supported = device_property_read_bool(dev, "silead,pen-supported");
+ device_property_read_u32(dev, "silead,pen-resolution-x", &data->pen_x_res);
+ device_property_read_u32(dev, "silead,pen-resolution-y", &data->pen_y_res);
}
#ifdef CONFIG_ACPI
@@ -562,6 +720,10 @@ static int silead_ts_probe(struct i2c_client *client,
if (error)
return error;
+ error = silead_ts_request_pen_input_dev(data);
+ if (error)
+ return error;
+
error = devm_request_threaded_irq(dev, client->irq,
NULL, silead_ts_threaded_irq_handler,
IRQF_ONESHOT, client->name, data);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 83e685557a19..f2fb6a9a1a57 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -126,12 +126,13 @@ static int titsc_config_wires(struct titsc *ts_dev)
static void titsc_step_config(struct titsc *ts_dev)
{
unsigned int config;
- int i;
+ int i, n;
int end_step, first_step, tsc_steps;
u32 stepenable;
config = STEPCONFIG_MODE_HWSYNC |
- STEPCONFIG_AVG_16 | ts_dev->bit_xp;
+ STEPCONFIG_AVG_16 | ts_dev->bit_xp |
+ STEPCONFIG_INM_ADCREFM;
switch (ts_dev->wires) {
case 4:
config |= STEPCONFIG_INP(ts_dev->inp_yp) | ts_dev->bit_xn;
@@ -150,9 +151,11 @@ static void titsc_step_config(struct titsc *ts_dev)
first_step = TOTAL_STEPS - tsc_steps;
/* Steps 16 to 16-coordinate_readouts is for X */
end_step = first_step + tsc_steps;
+ n = 0;
for (i = end_step - ts_dev->coordinate_readouts; i < end_step; i++) {
titsc_writel(ts_dev, REG_STEPCONFIG(i), config);
- titsc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY);
+ titsc_writel(ts_dev, REG_STEPDELAY(i),
+ n++ == 0 ? STEPCONFIG_OPENDLY : 0);
}
config = 0;
@@ -174,9 +177,11 @@ static void titsc_step_config(struct titsc *ts_dev)
/* 1 ... coordinate_readouts is for Y */
end_step = first_step + ts_dev->coordinate_readouts;
+ n = 0;
for (i = first_step; i < end_step; i++) {
titsc_writel(ts_dev, REG_STEPCONFIG(i), config);
- titsc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY);
+ titsc_writel(ts_dev, REG_STEPDELAY(i),
+ n++ == 0 ? STEPCONFIG_OPENDLY : 0);
}
/* Make CHARGECONFIG same as IDLECONFIG */
@@ -195,7 +200,10 @@ static void titsc_step_config(struct titsc *ts_dev)
STEPCONFIG_OPENDLY);
end_step++;
- config |= STEPCONFIG_INP(ts_dev->inp_yn);
+ config = STEPCONFIG_MODE_HWSYNC |
+ STEPCONFIG_AVG_16 | ts_dev->bit_yp |
+ ts_dev->bit_xn | STEPCONFIG_INM_ADCREFM |
+ STEPCONFIG_INP(ts_dev->inp_yn);
titsc_writel(ts_dev, REG_STEPCONFIG(end_step), config);
titsc_writel(ts_dev, REG_STEPDELAY(end_step),
STEPCONFIG_OPENDLY);
@@ -310,7 +318,7 @@ static irqreturn_t titsc_irq(int irq, void *dev)
/*
* Calculate pressure using formula
* Resistance(touch) = x plate resistance *
- * x postion/4096 * ((z2 / z1) - 1)
+ * x position/4096 * ((z2 / z1) - 1)
*/
z = z1 - z2;
z *= x;
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index e3f2c940ef3d..dfd3b35590c3 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -186,7 +186,6 @@ static irqreturn_t ucb1400_irq(int irqnr, void *devid)
{
struct ucb1400_ts *ucb = devid;
unsigned int x, y, p;
- bool penup;
if (unlikely(irqnr != ucb->irq))
return IRQ_NONE;
@@ -196,8 +195,7 @@ static irqreturn_t ucb1400_irq(int irqnr, void *devid)
/* Start with a small delay before checking pendown state */
msleep(UCB1400_TS_POLL_PERIOD);
- while (!ucb->stopped && !(penup = ucb1400_ts_pen_up(ucb))) {
-
+ while (!ucb->stopped && !ucb1400_ts_pen_up(ucb)) {
ucb1400_adc_enable(ucb->ac97);
x = ucb1400_ts_read_xpos(ucb);
y = ucb1400_ts_read_ypos(ucb);
diff --git a/drivers/input/touchscreen/wacom_i2c.c b/drivers/input/touchscreen/wacom_i2c.c
index fe4ea6204a4e..141754b2764c 100644
--- a/drivers/input/touchscreen/wacom_i2c.c
+++ b/drivers/input/touchscreen/wacom_i2c.c
@@ -24,12 +24,19 @@
#define WACOM_IN_PROXIMITY BIT(5)
/* Registers */
-#define WACOM_CMD_QUERY0 0x04
-#define WACOM_CMD_QUERY1 0x00
-#define WACOM_CMD_QUERY2 0x33
-#define WACOM_CMD_QUERY3 0x02
-#define WACOM_CMD_THROW0 0x05
-#define WACOM_CMD_THROW1 0x00
+#define WACOM_COMMAND_LSB 0x04
+#define WACOM_COMMAND_MSB 0x00
+
+#define WACOM_DATA_LSB 0x05
+#define WACOM_DATA_MSB 0x00
+
+/* Report types */
+#define REPORT_FEATURE 0x30
+
+/* Requests / operations */
+#define OPCODE_GET_REPORT 0x02
+
+#define WACOM_QUERY_REPORT 3
#define WACOM_QUERY_SIZE 19
struct wacom_features {
@@ -50,23 +57,24 @@ struct wacom_i2c {
static int wacom_query_device(struct i2c_client *client,
struct wacom_features *features)
{
- int ret;
- u8 cmd1[] = { WACOM_CMD_QUERY0, WACOM_CMD_QUERY1,
- WACOM_CMD_QUERY2, WACOM_CMD_QUERY3 };
- u8 cmd2[] = { WACOM_CMD_THROW0, WACOM_CMD_THROW1 };
+ u8 get_query_data_cmd[] = {
+ WACOM_COMMAND_LSB,
+ WACOM_COMMAND_MSB,
+ REPORT_FEATURE | WACOM_QUERY_REPORT,
+ OPCODE_GET_REPORT,
+ WACOM_DATA_LSB,
+ WACOM_DATA_MSB,
+ };
u8 data[WACOM_QUERY_SIZE];
+ int ret;
+
struct i2c_msg msgs[] = {
+ /* Request reading of feature ReportID: 3 (Pen Query Data) */
{
.addr = client->addr,
.flags = 0,
- .len = sizeof(cmd1),
- .buf = cmd1,
- },
- {
- .addr = client->addr,
- .flags = 0,
- .len = sizeof(cmd2),
- .buf = cmd2,
+ .len = sizeof(get_query_data_cmd),
+ .buf = get_query_data_cmd,
},
{
.addr = client->addr,
diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c
index 1e70b8d2a8d7..7c82c4f5fa6b 100644
--- a/drivers/input/touchscreen/zinitix.c
+++ b/drivers/input/touchscreen/zinitix.c
@@ -252,16 +252,27 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541)
static int zinitix_init_regulators(struct bt541_ts_data *bt541)
{
- struct i2c_client *client = bt541->client;
+ struct device *dev = &bt541->client->dev;
int error;
- bt541->supplies[0].supply = "vdd";
- bt541->supplies[1].supply = "vddo";
- error = devm_regulator_bulk_get(&client->dev,
+ /*
+ * Some older device trees have erroneous names for the regulators,
+ * so check if "vddo" is present and in that case use these names.
+ * Else use the proper supply names on the component.
+ */
+ if (of_find_property(dev->of_node, "vddo-supply", NULL)) {
+ bt541->supplies[0].supply = "vdd";
+ bt541->supplies[1].supply = "vddo";
+ } else {
+ /* Else use the proper supply names */
+ bt541->supplies[0].supply = "vcca";
+ bt541->supplies[1].supply = "vdd";
+ }
+ error = devm_regulator_bulk_get(dev,
ARRAY_SIZE(bt541->supplies),
bt541->supplies);
if (error < 0) {
- dev_err(&client->dev, "Failed to get regulators: %d\n", error);
+ dev_err(dev, "Failed to get regulators: %d\n", error);
return error;
}
@@ -560,6 +571,7 @@ static SIMPLE_DEV_PM_OPS(zinitix_pm_ops, zinitix_suspend, zinitix_resume);
#ifdef CONFIG_OF
static const struct of_device_id zinitix_of_match[] = {
+ { .compatible = "zinitix,bt532" },
{ .compatible = "zinitix,bt541" },
{ }
};
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index daf1e25f6042..91353e651a52 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -35,6 +35,15 @@ config INTERCONNECT_QCOM_MSM8974
This is a driver for the Qualcomm Network-on-Chip on msm8974-based
platforms.
+config INTERCONNECT_QCOM_MSM8996
+ tristate "Qualcomm MSM8996 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on msm8996-based
+ platforms.
+
config INTERCONNECT_QCOM_OSM_L3
tristate "Qualcomm OSM L3 interconnect driver"
depends on INTERCONNECT_QCOM || COMPILE_TEST
@@ -42,6 +51,15 @@ config INTERCONNECT_QCOM_OSM_L3
Say y here to support the Operating State Manager (OSM) interconnect
driver which controls the scaling of L3 caches on Qualcomm SoCs.
+config INTERCONNECT_QCOM_QCM2290
+ tristate "Qualcomm QCM2290 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on qcm2290-based
+ platforms.
+
config INTERCONNECT_QCOM_QCS404
tristate "Qualcomm QCS404 interconnect driver"
depends on INTERCONNECT_QCOM
@@ -146,5 +164,14 @@ config INTERCONNECT_QCOM_SM8350
This is a driver for the Qualcomm Network-on-Chip on SM8350-based
platforms.
+config INTERCONNECT_QCOM_SM8450
+ tristate "Qualcomm SM8450 interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on SM8450-based
+ platforms.
+
config INTERCONNECT_QCOM_SMD_RPM
tristate
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 69300b1d48ef..ceae9bb566c6 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -4,7 +4,9 @@ icc-bcm-voter-objs := bcm-voter.o
qnoc-msm8916-objs := msm8916.o
qnoc-msm8939-objs := msm8939.o
qnoc-msm8974-objs := msm8974.o
+qnoc-msm8996-objs := msm8996.o
icc-osm-l3-objs := osm-l3.o
+qnoc-qcm2290-objs := qcm2290.o
qnoc-qcs404-objs := qcs404.o
icc-rpmh-obj := icc-rpmh.o
qnoc-sc7180-objs := sc7180.o
@@ -16,13 +18,16 @@ qnoc-sdx55-objs := sdx55.o
qnoc-sm8150-objs := sm8150.o
qnoc-sm8250-objs := sm8250.o
qnoc-sm8350-objs := sm8350.o
+qnoc-sm8450-objs := sm8450.o
icc-smd-rpm-objs := smd-rpm.o icc-rpm.o
obj-$(CONFIG_INTERCONNECT_QCOM_BCM_VOTER) += icc-bcm-voter.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8916) += qnoc-msm8916.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8939) += qnoc-msm8939.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8974) += qnoc-msm8974.o
+obj-$(CONFIG_INTERCONNECT_QCOM_MSM8996) += qnoc-msm8996.o
obj-$(CONFIG_INTERCONNECT_QCOM_OSM_L3) += icc-osm-l3.o
+obj-$(CONFIG_INTERCONNECT_QCOM_QCM2290) += qnoc-qcm2290.o
obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o
obj-$(CONFIG_INTERCONNECT_QCOM_RPMH) += icc-rpmh.o
obj-$(CONFIG_INTERCONNECT_QCOM_SC7180) += qnoc-sc7180.o
@@ -34,4 +39,5 @@ obj-$(CONFIG_INTERCONNECT_QCOM_SDX55) += qnoc-sdx55.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8150) += qnoc-sm8150.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8250) += qnoc-sm8250.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8350) += qnoc-sm8350.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SM8450) += qnoc-sm8450.o
obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index ef7999a08c8b..34125e8f8b60 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -11,12 +11,20 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include "smd-rpm.h"
#include "icc-rpm.h"
+/* QNOC QoS */
+#define QNOC_QOS_MCTL_LOWn_ADDR(n) (0x8 + (n * 0x1000))
+#define QNOC_QOS_MCTL_DFLT_PRIO_MASK 0x70
+#define QNOC_QOS_MCTL_DFLT_PRIO_SHIFT 4
+#define QNOC_QOS_MCTL_URGFWD_EN_MASK 0x8
+#define QNOC_QOS_MCTL_URGFWD_EN_SHIFT 3
+
/* BIMC QoS */
#define M_BKE_REG_BASE(n) (0x300 + (0x4000 * n))
#define M_BKE_EN_ADDR(n) (M_BKE_REG_BASE(n))
@@ -39,6 +47,27 @@
#define NOC_QOS_MODEn_ADDR(n) (0xc + (n * 0x1000))
#define NOC_QOS_MODEn_MASK 0x3
+static int qcom_icc_set_qnoc_qos(struct icc_node *src, u64 max_bw)
+{
+ struct icc_provider *provider = src->provider;
+ struct qcom_icc_provider *qp = to_qcom_provider(provider);
+ struct qcom_icc_node *qn = src->data;
+ struct qcom_icc_qos *qos = &qn->qos;
+ int rc;
+
+ rc = regmap_update_bits(qp->regmap,
+ qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
+ QNOC_QOS_MCTL_DFLT_PRIO_MASK,
+ qos->areq_prio << QNOC_QOS_MCTL_DFLT_PRIO_SHIFT);
+ if (rc)
+ return rc;
+
+ return regmap_update_bits(qp->regmap,
+ qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
+ QNOC_QOS_MCTL_URGFWD_EN_MASK,
+ !!qos->urg_fwd_en << QNOC_QOS_MCTL_URGFWD_EN_SHIFT);
+}
+
static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider *qp,
struct qcom_icc_qos *qos,
int regnum)
@@ -76,7 +105,7 @@ static int qcom_icc_set_bimc_qos(struct icc_node *src, u64 max_bw)
provider = src->provider;
qp = to_qcom_provider(provider);
- if (qn->qos.qos_mode != -1)
+ if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID)
mode = qn->qos.qos_mode;
/* QoS Priority: The QoS Health parameters are getting considered
@@ -137,7 +166,7 @@ static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw)
return 0;
}
- if (qn->qos.qos_mode != -1)
+ if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID)
mode = qn->qos.qos_mode;
if (mode == NOC_QOS_MODE_FIXED) {
@@ -163,10 +192,14 @@ static int qcom_icc_qos_set(struct icc_node *node, u64 sum_bw)
dev_dbg(node->provider->dev, "Setting QoS for %s\n", qn->name);
- if (qp->is_bimc_node)
+ switch (qp->type) {
+ case QCOM_ICC_BIMC:
return qcom_icc_set_bimc_qos(node, sum_bw);
-
- return qcom_icc_set_noc_qos(node, sum_bw);
+ case QCOM_ICC_QNOC:
+ return qcom_icc_set_qnoc_qos(node, sum_bw);
+ default:
+ return qcom_icc_set_noc_qos(node, sum_bw);
+ }
}
static int qcom_icc_rpm_set(int mas_rpm_id, int slv_rpm_id, u64 sum_bw)
@@ -239,6 +272,7 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
rate = max(sum_bw, max_peak_bw);
do_div(rate, qn->buswidth);
+ rate = min_t(u64, rate, LONG_MAX);
if (qn->rate == rate)
return 0;
@@ -307,7 +341,7 @@ int qnoc_probe(struct platform_device *pdev)
qp->bus_clks[i].id = cds[i];
qp->num_clks = cd_num;
- qp->is_bimc_node = desc->is_bimc_node;
+ qp->type = desc->type;
qp->qos_offset = desc->qos_offset;
if (desc->regmap_cfg) {
@@ -315,8 +349,13 @@ int qnoc_probe(struct platform_device *pdev)
void __iomem *mmio;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
+ if (!res) {
+ /* Try parent's regmap */
+ qp->regmap = dev_get_regmap(dev->parent, NULL);
+ if (qp->regmap)
+ goto regmap_done;
return -ENODEV;
+ }
mmio = devm_ioremap_resource(dev, res);
@@ -332,6 +371,7 @@ int qnoc_probe(struct platform_device *pdev)
}
}
+regmap_done:
ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
if (ret)
return ret;
@@ -340,6 +380,12 @@ int qnoc_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (desc->has_bus_pd) {
+ ret = dev_pm_domain_attach(dev, true);
+ if (ret)
+ return ret;
+ }
+
provider = &qp->provider;
INIT_LIST_HEAD(&provider->nodes);
provider->dev = dev;
@@ -377,6 +423,10 @@ int qnoc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qp);
+ /* Populate child NoC devices if any */
+ if (of_get_child_count(dev->of_node) > 0)
+ return of_platform_populate(dev->of_node, NULL, NULL, dev);
+
return 0;
err:
icc_nodes_remove(provider);
diff --git a/drivers/interconnect/qcom/icc-rpm.h b/drivers/interconnect/qcom/icc-rpm.h
index f5744de4da19..26dad006034f 100644
--- a/drivers/interconnect/qcom/icc-rpm.h
+++ b/drivers/interconnect/qcom/icc-rpm.h
@@ -12,19 +12,25 @@
#define to_qcom_provider(_provider) \
container_of(_provider, struct qcom_icc_provider, provider)
+enum qcom_icc_type {
+ QCOM_ICC_NOC,
+ QCOM_ICC_BIMC,
+ QCOM_ICC_QNOC,
+};
+
/**
* struct qcom_icc_provider - Qualcomm specific interconnect provider
* @provider: generic interconnect provider
* @bus_clks: the clk_bulk_data table of bus clocks
* @num_clks: the total number of clk_bulk_data entries
- * @is_bimc_node: indicates whether to use bimc specific setting
+ * @type: the ICC provider type
* @qos_offset: offset to QoS registers
* @regmap: regmap for QoS registers read/write access
*/
struct qcom_icc_provider {
struct icc_provider provider;
int num_clks;
- bool is_bimc_node;
+ enum qcom_icc_type type;
struct regmap *regmap;
unsigned int qos_offset;
struct clk_bulk_data bus_clks[];
@@ -38,6 +44,7 @@ struct qcom_icc_provider {
* @ap_owned: indicates if the node is owned by the AP or by the RPM
* @qos_mode: default qos mode for this node
* @qos_port: qos port number for finding qos registers of this node
+ * @urg_fwd_en: enable urgent forwarding
*/
struct qcom_icc_qos {
u32 areq_prio;
@@ -46,6 +53,7 @@ struct qcom_icc_qos {
bool ap_owned;
int qos_mode;
int qos_port;
+ bool urg_fwd_en;
};
/**
@@ -77,7 +85,8 @@ struct qcom_icc_desc {
size_t num_nodes;
const char * const *clocks;
size_t num_clocks;
- bool is_bimc_node;
+ bool has_bus_pd;
+ enum qcom_icc_type type;
const struct regmap_config *regmap_cfg;
unsigned int qos_offset;
};
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
index 3eb7936d2cf6..2c8e12549804 100644
--- a/drivers/interconnect/qcom/icc-rpmh.c
+++ b/drivers/interconnect/qcom/icc-rpmh.c
@@ -21,13 +21,18 @@ void qcom_icc_pre_aggregate(struct icc_node *node)
{
size_t i;
struct qcom_icc_node *qn;
+ struct qcom_icc_provider *qp;
qn = node->data;
+ qp = to_qcom_provider(node->provider);
for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
qn->sum_avg[i] = 0;
qn->max_peak[i] = 0;
}
+
+ for (i = 0; i < qn->num_bcms; i++)
+ qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
}
EXPORT_SYMBOL_GPL(qcom_icc_pre_aggregate);
@@ -45,10 +50,8 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
{
size_t i;
struct qcom_icc_node *qn;
- struct qcom_icc_provider *qp;
qn = node->data;
- qp = to_qcom_provider(node->provider);
if (!tag)
tag = QCOM_ICC_TAG_ALWAYS;
@@ -68,9 +71,6 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
*agg_avg += avg_bw;
*agg_peak = max_t(u32, *agg_peak, peak_bw);
- for (i = 0; i < qn->num_bcms; i++)
- qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
-
return 0;
}
EXPORT_SYMBOL_GPL(qcom_icc_aggregate);
diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c
index e3c995b11357..2f397a7c3322 100644
--- a/drivers/interconnect/qcom/msm8916.c
+++ b/drivers/interconnect/qcom/msm8916.c
@@ -1229,6 +1229,7 @@ static const struct regmap_config msm8916_snoc_regmap_config = {
};
static struct qcom_icc_desc msm8916_snoc = {
+ .type = QCOM_ICC_NOC,
.nodes = msm8916_snoc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_snoc_nodes),
.regmap_cfg = &msm8916_snoc_regmap_config,
@@ -1256,9 +1257,9 @@ static const struct regmap_config msm8916_bimc_regmap_config = {
};
static struct qcom_icc_desc msm8916_bimc = {
+ .type = QCOM_ICC_BIMC,
.nodes = msm8916_bimc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_bimc_nodes),
- .is_bimc_node = true,
.regmap_cfg = &msm8916_bimc_regmap_config,
.qos_offset = 0x8000,
};
@@ -1325,6 +1326,7 @@ static const struct regmap_config msm8916_pcnoc_regmap_config = {
};
static struct qcom_icc_desc msm8916_pcnoc = {
+ .type = QCOM_ICC_NOC,
.nodes = msm8916_pcnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_pcnoc_nodes),
.regmap_cfg = &msm8916_pcnoc_regmap_config,
diff --git a/drivers/interconnect/qcom/msm8939.c b/drivers/interconnect/qcom/msm8939.c
index 16272a477bd8..d188f3636e4c 100644
--- a/drivers/interconnect/qcom/msm8939.c
+++ b/drivers/interconnect/qcom/msm8939.c
@@ -1282,6 +1282,7 @@ static const struct regmap_config msm8939_snoc_regmap_config = {
};
static struct qcom_icc_desc msm8939_snoc = {
+ .type = QCOM_ICC_NOC,
.nodes = msm8939_snoc_nodes,
.num_nodes = ARRAY_SIZE(msm8939_snoc_nodes),
.regmap_cfg = &msm8939_snoc_regmap_config,
@@ -1309,6 +1310,7 @@ static const struct regmap_config msm8939_snoc_mm_regmap_config = {
};
static struct qcom_icc_desc msm8939_snoc_mm = {
+ .type = QCOM_ICC_NOC,
.nodes = msm8939_snoc_mm_nodes,
.num_nodes = ARRAY_SIZE(msm8939_snoc_mm_nodes),
.regmap_cfg = &msm8939_snoc_mm_regmap_config,
@@ -1336,9 +1338,9 @@ static const struct regmap_config msm8939_bimc_regmap_config = {
};
static struct qcom_icc_desc msm8939_bimc = {
+ .type = QCOM_ICC_BIMC,
.nodes = msm8939_bimc_nodes,
.num_nodes = ARRAY_SIZE(msm8939_bimc_nodes),
- .is_bimc_node = true,
.regmap_cfg = &msm8939_bimc_regmap_config,
.qos_offset = 0x8000,
};
@@ -1407,6 +1409,7 @@ static const struct regmap_config msm8939_pcnoc_regmap_config = {
};
static struct qcom_icc_desc msm8939_pcnoc = {
+ .type = QCOM_ICC_NOC,
.nodes = msm8939_pcnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8939_pcnoc_nodes),
.regmap_cfg = &msm8939_pcnoc_regmap_config,
diff --git a/drivers/interconnect/qcom/msm8996.c b/drivers/interconnect/qcom/msm8996.c
new file mode 100644
index 000000000000..499e11fbbd2e
--- /dev/null
+++ b/drivers/interconnect/qcom/msm8996.c
@@ -0,0 +1,2110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Qualcomm MSM8996 Network-on-Chip (NoC) QoS driver
+ *
+ * Copyright (c) 2021 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/interconnect/qcom,msm8996.h>
+
+#include "icc-rpm.h"
+#include "smd-rpm.h"
+#include "msm8996.h"
+
+static const char * const bus_mm_clocks[] = {
+ "bus",
+ "bus_a",
+ "iface"
+};
+
+static const char * const bus_a0noc_clocks[] = {
+ "aggre0_snoc_axi",
+ "aggre0_cnoc_ahb",
+ "aggre0_noc_mpu_cfg"
+};
+
+static const u16 mas_a0noc_common_links[] = {
+ MSM8996_SLAVE_A0NOC_SNOC
+};
+
+static struct qcom_icc_node mas_pcie_0 = {
+ .name = "mas_pcie_0",
+ .id = MSM8996_MASTER_PCIE_0,
+ .buswidth = 8,
+ .mas_rpm_id = 65,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_a0noc_common_links),
+ .links = mas_a0noc_common_links
+};
+
+static struct qcom_icc_node mas_pcie_1 = {
+ .name = "mas_pcie_1",
+ .id = MSM8996_MASTER_PCIE_1,
+ .buswidth = 8,
+ .mas_rpm_id = 66,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 1,
+ .num_links = ARRAY_SIZE(mas_a0noc_common_links),
+ .links = mas_a0noc_common_links
+};
+
+static struct qcom_icc_node mas_pcie_2 = {
+ .name = "mas_pcie_2",
+ .id = MSM8996_MASTER_PCIE_2,
+ .buswidth = 8,
+ .mas_rpm_id = 119,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 2,
+ .num_links = ARRAY_SIZE(mas_a0noc_common_links),
+ .links = mas_a0noc_common_links
+};
+
+static const u16 mas_a1noc_common_links[] = {
+ MSM8996_SLAVE_A1NOC_SNOC
+};
+
+static struct qcom_icc_node mas_cnoc_a1noc = {
+ .name = "mas_cnoc_a1noc",
+ .id = MSM8996_MASTER_CNOC_A1NOC,
+ .buswidth = 8,
+ .mas_rpm_id = 116,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_a1noc_common_links),
+ .links = mas_a1noc_common_links
+};
+
+static struct qcom_icc_node mas_crypto_c0 = {
+ .name = "mas_crypto_c0",
+ .id = MSM8996_MASTER_CRYPTO_CORE0,
+ .buswidth = 8,
+ .mas_rpm_id = 23,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_a1noc_common_links),
+ .links = mas_a1noc_common_links
+};
+
+static struct qcom_icc_node mas_pnoc_a1noc = {
+ .name = "mas_pnoc_a1noc",
+ .id = MSM8996_MASTER_PNOC_A1NOC,
+ .buswidth = 8,
+ .mas_rpm_id = 117,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = false,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 1,
+ .num_links = ARRAY_SIZE(mas_a1noc_common_links),
+ .links = mas_a1noc_common_links
+};
+
+static const u16 mas_a2noc_common_links[] = {
+ MSM8996_SLAVE_A2NOC_SNOC
+};
+
+static struct qcom_icc_node mas_usb3 = {
+ .name = "mas_usb3",
+ .id = MSM8996_MASTER_USB3,
+ .buswidth = 8,
+ .mas_rpm_id = 32,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 3,
+ .num_links = ARRAY_SIZE(mas_a2noc_common_links),
+ .links = mas_a2noc_common_links
+};
+
+static struct qcom_icc_node mas_ipa = {
+ .name = "mas_ipa",
+ .id = MSM8996_MASTER_IPA,
+ .buswidth = 8,
+ .mas_rpm_id = 59,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = -1,
+ .num_links = ARRAY_SIZE(mas_a2noc_common_links),
+ .links = mas_a2noc_common_links
+};
+
+static struct qcom_icc_node mas_ufs = {
+ .name = "mas_ufs",
+ .id = MSM8996_MASTER_UFS,
+ .buswidth = 8,
+ .mas_rpm_id = 68,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 2,
+ .num_links = ARRAY_SIZE(mas_a2noc_common_links),
+ .links = mas_a2noc_common_links
+};
+
+static const u16 mas_apps_proc_links[] = {
+ MSM8996_SLAVE_BIMC_SNOC_1,
+ MSM8996_SLAVE_EBI_CH0,
+ MSM8996_SLAVE_BIMC_SNOC_0
+};
+
+static struct qcom_icc_node mas_apps_proc = {
+ .name = "mas_apps_proc",
+ .id = MSM8996_MASTER_AMPSS_M0,
+ .buswidth = 8,
+ .mas_rpm_id = 0,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_apps_proc_links),
+ .links = mas_apps_proc_links
+};
+
+static const u16 mas_oxili_common_links[] = {
+ MSM8996_SLAVE_BIMC_SNOC_1,
+ MSM8996_SLAVE_HMSS_L3,
+ MSM8996_SLAVE_EBI_CH0,
+ MSM8996_SLAVE_BIMC_SNOC_0
+};
+
+static struct qcom_icc_node mas_oxili = {
+ .name = "mas_oxili",
+ .id = MSM8996_MASTER_GRAPHICS_3D,
+ .buswidth = 8,
+ .mas_rpm_id = 6,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 1,
+ .num_links = ARRAY_SIZE(mas_oxili_common_links),
+ .links = mas_oxili_common_links
+};
+
+static struct qcom_icc_node mas_mnoc_bimc = {
+ .name = "mas_mnoc_bimc",
+ .id = MSM8996_MASTER_MNOC_BIMC,
+ .buswidth = 8,
+ .mas_rpm_id = 2,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 2,
+ .num_links = ARRAY_SIZE(mas_oxili_common_links),
+ .links = mas_oxili_common_links
+};
+
+static const u16 mas_snoc_bimc_links[] = {
+ MSM8996_SLAVE_HMSS_L3,
+ MSM8996_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node mas_snoc_bimc = {
+ .name = "mas_snoc_bimc",
+ .id = MSM8996_MASTER_SNOC_BIMC,
+ .buswidth = 8,
+ .mas_rpm_id = 3,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = false,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_links),
+ .links = mas_snoc_bimc_links
+};
+
+static const u16 mas_snoc_cnoc_links[] = {
+ MSM8996_SLAVE_CLK_CTL,
+ MSM8996_SLAVE_RBCPR_CX,
+ MSM8996_SLAVE_A2NOC_SMMU_CFG,
+ MSM8996_SLAVE_A0NOC_MPU_CFG,
+ MSM8996_SLAVE_MESSAGE_RAM,
+ MSM8996_SLAVE_CNOC_MNOC_MMSS_CFG,
+ MSM8996_SLAVE_PCIE_0_CFG,
+ MSM8996_SLAVE_TLMM,
+ MSM8996_SLAVE_MPM,
+ MSM8996_SLAVE_A0NOC_SMMU_CFG,
+ MSM8996_SLAVE_EBI1_PHY_CFG,
+ MSM8996_SLAVE_BIMC_CFG,
+ MSM8996_SLAVE_PIMEM_CFG,
+ MSM8996_SLAVE_RBCPR_MX,
+ MSM8996_SLAVE_PRNG,
+ MSM8996_SLAVE_PCIE20_AHB2PHY,
+ MSM8996_SLAVE_A2NOC_MPU_CFG,
+ MSM8996_SLAVE_QDSS_CFG,
+ MSM8996_SLAVE_A2NOC_CFG,
+ MSM8996_SLAVE_A0NOC_CFG,
+ MSM8996_SLAVE_UFS_CFG,
+ MSM8996_SLAVE_CRYPTO_0_CFG,
+ MSM8996_SLAVE_PCIE_1_CFG,
+ MSM8996_SLAVE_SNOC_CFG,
+ MSM8996_SLAVE_SNOC_MPU_CFG,
+ MSM8996_SLAVE_A1NOC_MPU_CFG,
+ MSM8996_SLAVE_A1NOC_SMMU_CFG,
+ MSM8996_SLAVE_PCIE_2_CFG,
+ MSM8996_SLAVE_CNOC_MNOC_CFG,
+ MSM8996_SLAVE_QDSS_RBCPR_APU_CFG,
+ MSM8996_SLAVE_PMIC_ARB,
+ MSM8996_SLAVE_IMEM_CFG,
+ MSM8996_SLAVE_A1NOC_CFG,
+ MSM8996_SLAVE_SSC_CFG,
+ MSM8996_SLAVE_TCSR,
+ MSM8996_SLAVE_LPASS_SMMU_CFG,
+ MSM8996_SLAVE_DCC_CFG
+};
+
+static struct qcom_icc_node mas_snoc_cnoc = {
+ .name = "mas_snoc_cnoc",
+ .id = MSM8996_MASTER_SNOC_CNOC,
+ .buswidth = 8,
+ .mas_rpm_id = 52,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_cnoc_links),
+ .links = mas_snoc_cnoc_links
+};
+
+static const u16 mas_qdss_dap_links[] = {
+ MSM8996_SLAVE_QDSS_RBCPR_APU_CFG,
+ MSM8996_SLAVE_RBCPR_CX,
+ MSM8996_SLAVE_A2NOC_SMMU_CFG,
+ MSM8996_SLAVE_A0NOC_MPU_CFG,
+ MSM8996_SLAVE_MESSAGE_RAM,
+ MSM8996_SLAVE_PCIE_0_CFG,
+ MSM8996_SLAVE_TLMM,
+ MSM8996_SLAVE_MPM,
+ MSM8996_SLAVE_A0NOC_SMMU_CFG,
+ MSM8996_SLAVE_EBI1_PHY_CFG,
+ MSM8996_SLAVE_BIMC_CFG,
+ MSM8996_SLAVE_PIMEM_CFG,
+ MSM8996_SLAVE_RBCPR_MX,
+ MSM8996_SLAVE_CLK_CTL,
+ MSM8996_SLAVE_PRNG,
+ MSM8996_SLAVE_PCIE20_AHB2PHY,
+ MSM8996_SLAVE_A2NOC_MPU_CFG,
+ MSM8996_SLAVE_QDSS_CFG,
+ MSM8996_SLAVE_A2NOC_CFG,
+ MSM8996_SLAVE_A0NOC_CFG,
+ MSM8996_SLAVE_UFS_CFG,
+ MSM8996_SLAVE_CRYPTO_0_CFG,
+ MSM8996_SLAVE_CNOC_A1NOC,
+ MSM8996_SLAVE_PCIE_1_CFG,
+ MSM8996_SLAVE_SNOC_CFG,
+ MSM8996_SLAVE_SNOC_MPU_CFG,
+ MSM8996_SLAVE_A1NOC_MPU_CFG,
+ MSM8996_SLAVE_A1NOC_SMMU_CFG,
+ MSM8996_SLAVE_PCIE_2_CFG,
+ MSM8996_SLAVE_CNOC_MNOC_CFG,
+ MSM8996_SLAVE_CNOC_MNOC_MMSS_CFG,
+ MSM8996_SLAVE_PMIC_ARB,
+ MSM8996_SLAVE_IMEM_CFG,
+ MSM8996_SLAVE_A1NOC_CFG,
+ MSM8996_SLAVE_SSC_CFG,
+ MSM8996_SLAVE_TCSR,
+ MSM8996_SLAVE_LPASS_SMMU_CFG,
+ MSM8996_SLAVE_DCC_CFG
+};
+
+static struct qcom_icc_node mas_qdss_dap = {
+ .name = "mas_qdss_dap",
+ .id = MSM8996_MASTER_QDSS_DAP,
+ .buswidth = 8,
+ .mas_rpm_id = 49,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_qdss_dap_links),
+ .links = mas_qdss_dap_links
+};
+
+static const u16 mas_cnoc_mnoc_mmss_cfg_links[] = {
+ MSM8996_SLAVE_MMAGIC_CFG,
+ MSM8996_SLAVE_DSA_MPU_CFG,
+ MSM8996_SLAVE_MMSS_CLK_CFG,
+ MSM8996_SLAVE_CAMERA_THROTTLE_CFG,
+ MSM8996_SLAVE_VENUS_CFG,
+ MSM8996_SLAVE_SMMU_VFE_CFG,
+ MSM8996_SLAVE_MISC_CFG,
+ MSM8996_SLAVE_SMMU_CPP_CFG,
+ MSM8996_SLAVE_GRAPHICS_3D_CFG,
+ MSM8996_SLAVE_DISPLAY_THROTTLE_CFG,
+ MSM8996_SLAVE_VENUS_THROTTLE_CFG,
+ MSM8996_SLAVE_CAMERA_CFG,
+ MSM8996_SLAVE_DISPLAY_CFG,
+ MSM8996_SLAVE_CPR_CFG,
+ MSM8996_SLAVE_SMMU_ROTATOR_CFG,
+ MSM8996_SLAVE_DSA_CFG,
+ MSM8996_SLAVE_SMMU_VENUS_CFG,
+ MSM8996_SLAVE_VMEM_CFG,
+ MSM8996_SLAVE_SMMU_JPEG_CFG,
+ MSM8996_SLAVE_SMMU_MDP_CFG,
+ MSM8996_SLAVE_MNOC_MPU_CFG
+};
+
+static struct qcom_icc_node mas_cnoc_mnoc_mmss_cfg = {
+ .name = "mas_cnoc_mnoc_mmss_cfg",
+ .id = MSM8996_MASTER_CNOC_MNOC_MMSS_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = 4,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_cnoc_mnoc_mmss_cfg_links),
+ .links = mas_cnoc_mnoc_mmss_cfg_links
+};
+
+static const u16 mas_cnoc_mnoc_cfg_links[] = {
+ MSM8996_SLAVE_SERVICE_MNOC
+};
+
+static struct qcom_icc_node mas_cnoc_mnoc_cfg = {
+ .name = "mas_cnoc_mnoc_cfg",
+ .id = MSM8996_MASTER_CNOC_MNOC_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = 5,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_cnoc_mnoc_cfg_links),
+ .links = mas_cnoc_mnoc_cfg_links
+};
+
+static const u16 mas_mnoc_bimc_common_links[] = {
+ MSM8996_SLAVE_MNOC_BIMC
+};
+
+static struct qcom_icc_node mas_cpp = {
+ .name = "mas_cpp",
+ .id = MSM8996_MASTER_CPP,
+ .buswidth = 32,
+ .mas_rpm_id = 115,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 5,
+ .num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links),
+ .links = mas_mnoc_bimc_common_links
+};
+
+static struct qcom_icc_node mas_jpeg = {
+ .name = "mas_jpeg",
+ .id = MSM8996_MASTER_JPEG,
+ .buswidth = 32,
+ .mas_rpm_id = 7,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 7,
+ .num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links),
+ .links = mas_mnoc_bimc_common_links
+};
+
+static struct qcom_icc_node mas_mdp_p0 = {
+ .name = "mas_mdp_p0",
+ .id = MSM8996_MASTER_MDP_PORT0,
+ .buswidth = 32,
+ .mas_rpm_id = 8,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 1,
+ .num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links),
+ .links = mas_mnoc_bimc_common_links
+};
+
+static struct qcom_icc_node mas_mdp_p1 = {
+ .name = "mas_mdp_p1",
+ .id = MSM8996_MASTER_MDP_PORT1,
+ .buswidth = 32,
+ .mas_rpm_id = 61,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 2,
+ .num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links),
+ .links = mas_mnoc_bimc_common_links
+};
+
+static struct qcom_icc_node mas_rotator = {
+ .name = "mas_rotator",
+ .id = MSM8996_MASTER_ROTATOR,
+ .buswidth = 32,
+ .mas_rpm_id = 120,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links),
+ .links = mas_mnoc_bimc_common_links
+};
+
+static struct qcom_icc_node mas_venus = {
+ .name = "mas_venus",
+ .id = MSM8996_MASTER_VIDEO_P0,
+ .buswidth = 32,
+ .mas_rpm_id = 9,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 3,
+ .num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links),
+ .links = mas_mnoc_bimc_common_links
+};
+
+static struct qcom_icc_node mas_vfe = {
+ .name = "mas_vfe",
+ .id = MSM8996_MASTER_VFE,
+ .buswidth = 32,
+ .mas_rpm_id = 11,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 6,
+ .num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links),
+ .links = mas_mnoc_bimc_common_links
+};
+
+static const u16 mas_vmem_common_links[] = {
+ MSM8996_SLAVE_VMEM
+};
+
+static struct qcom_icc_node mas_snoc_vmem = {
+ .name = "mas_snoc_vmem",
+ .id = MSM8996_MASTER_SNOC_VMEM,
+ .buswidth = 32,
+ .mas_rpm_id = 114,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_vmem_common_links),
+ .links = mas_vmem_common_links
+};
+
+static struct qcom_icc_node mas_venus_vmem = {
+ .name = "mas_venus_vmem",
+ .id = MSM8996_MASTER_VIDEO_P0_OCMEM,
+ .buswidth = 32,
+ .mas_rpm_id = 121,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_vmem_common_links),
+ .links = mas_vmem_common_links
+};
+
+static const u16 mas_snoc_pnoc_links[] = {
+ MSM8996_SLAVE_BLSP_1,
+ MSM8996_SLAVE_BLSP_2,
+ MSM8996_SLAVE_SDCC_1,
+ MSM8996_SLAVE_SDCC_2,
+ MSM8996_SLAVE_SDCC_4,
+ MSM8996_SLAVE_TSIF,
+ MSM8996_SLAVE_PDM,
+ MSM8996_SLAVE_AHB2PHY
+};
+
+static struct qcom_icc_node mas_snoc_pnoc = {
+ .name = "mas_snoc_pnoc",
+ .id = MSM8996_MASTER_SNOC_PNOC,
+ .buswidth = 8,
+ .mas_rpm_id = 44,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_pnoc_links),
+ .links = mas_snoc_pnoc_links
+};
+
+static const u16 mas_pnoc_a1noc_common_links[] = {
+ MSM8996_SLAVE_PNOC_A1NOC
+};
+
+static struct qcom_icc_node mas_sdcc_1 = {
+ .name = "mas_sdcc_1",
+ .id = MSM8996_MASTER_SDCC_1,
+ .buswidth = 8,
+ .mas_rpm_id = 33,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links),
+ .links = mas_pnoc_a1noc_common_links
+};
+
+static struct qcom_icc_node mas_sdcc_2 = {
+ .name = "mas_sdcc_2",
+ .id = MSM8996_MASTER_SDCC_2,
+ .buswidth = 8,
+ .mas_rpm_id = 35,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links),
+ .links = mas_pnoc_a1noc_common_links
+};
+
+static struct qcom_icc_node mas_sdcc_4 = {
+ .name = "mas_sdcc_4",
+ .id = MSM8996_MASTER_SDCC_4,
+ .buswidth = 8,
+ .mas_rpm_id = 36,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links),
+ .links = mas_pnoc_a1noc_common_links
+};
+
+static struct qcom_icc_node mas_usb_hs = {
+ .name = "mas_usb_hs",
+ .id = MSM8996_MASTER_USB_HS,
+ .buswidth = 8,
+ .mas_rpm_id = 42,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links),
+ .links = mas_pnoc_a1noc_common_links
+};
+
+static struct qcom_icc_node mas_blsp_1 = {
+ .name = "mas_blsp_1",
+ .id = MSM8996_MASTER_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = 41,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links),
+ .links = mas_pnoc_a1noc_common_links
+};
+
+static struct qcom_icc_node mas_blsp_2 = {
+ .name = "mas_blsp_2",
+ .id = MSM8996_MASTER_BLSP_2,
+ .buswidth = 4,
+ .mas_rpm_id = 39,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links),
+ .links = mas_pnoc_a1noc_common_links
+};
+
+static struct qcom_icc_node mas_tsif = {
+ .name = "mas_tsif",
+ .id = MSM8996_MASTER_TSIF,
+ .buswidth = 4,
+ .mas_rpm_id = 37,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links),
+ .links = mas_pnoc_a1noc_common_links
+};
+
+static const u16 mas_hmss_links[] = {
+ MSM8996_SLAVE_PIMEM,
+ MSM8996_SLAVE_OCIMEM,
+ MSM8996_SLAVE_SNOC_BIMC
+};
+
+static struct qcom_icc_node mas_hmss = {
+ .name = "mas_hmss",
+ .id = MSM8996_MASTER_HMSS,
+ .buswidth = 8,
+ .mas_rpm_id = 118,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 4,
+ .num_links = ARRAY_SIZE(mas_hmss_links),
+ .links = mas_hmss_links
+};
+
+static const u16 mas_qdss_common_links[] = {
+ MSM8996_SLAVE_PIMEM,
+ MSM8996_SLAVE_USB3,
+ MSM8996_SLAVE_OCIMEM,
+ MSM8996_SLAVE_SNOC_BIMC,
+ MSM8996_SLAVE_SNOC_PNOC
+};
+
+static struct qcom_icc_node mas_qdss_bam = {
+ .name = "mas_qdss_bam",
+ .id = MSM8996_MASTER_QDSS_BAM,
+ .buswidth = 16,
+ .mas_rpm_id = 19,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 2,
+ .num_links = ARRAY_SIZE(mas_qdss_common_links),
+ .links = mas_qdss_common_links
+};
+
+static const u16 mas_snoc_cfg_links[] = {
+ MSM8996_SLAVE_SERVICE_SNOC
+};
+
+static struct qcom_icc_node mas_snoc_cfg = {
+ .name = "mas_snoc_cfg",
+ .id = MSM8996_MASTER_SNOC_CFG,
+ .buswidth = 16,
+ .mas_rpm_id = 20,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_snoc_cfg_links),
+ .links = mas_snoc_cfg_links
+};
+
+static const u16 mas_bimc_snoc_0_links[] = {
+ MSM8996_SLAVE_SNOC_VMEM,
+ MSM8996_SLAVE_USB3,
+ MSM8996_SLAVE_PIMEM,
+ MSM8996_SLAVE_LPASS,
+ MSM8996_SLAVE_APPSS,
+ MSM8996_SLAVE_SNOC_CNOC,
+ MSM8996_SLAVE_SNOC_PNOC,
+ MSM8996_SLAVE_OCIMEM,
+ MSM8996_SLAVE_QDSS_STM
+};
+
+static struct qcom_icc_node mas_bimc_snoc_0 = {
+ .name = "mas_bimc_snoc_0",
+ .id = MSM8996_MASTER_BIMC_SNOC_0,
+ .buswidth = 16,
+ .mas_rpm_id = 21,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_bimc_snoc_0_links),
+ .links = mas_bimc_snoc_0_links
+};
+
+static const u16 mas_bimc_snoc_1_links[] = {
+ MSM8996_SLAVE_PCIE_2,
+ MSM8996_SLAVE_PCIE_1,
+ MSM8996_SLAVE_PCIE_0
+};
+
+static struct qcom_icc_node mas_bimc_snoc_1 = {
+ .name = "mas_bimc_snoc_1",
+ .id = MSM8996_MASTER_BIMC_SNOC_1,
+ .buswidth = 16,
+ .mas_rpm_id = 109,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_bimc_snoc_1_links),
+ .links = mas_bimc_snoc_1_links
+};
+
+static const u16 mas_a0noc_snoc_links[] = {
+ MSM8996_SLAVE_SNOC_PNOC,
+ MSM8996_SLAVE_OCIMEM,
+ MSM8996_SLAVE_APPSS,
+ MSM8996_SLAVE_SNOC_BIMC,
+ MSM8996_SLAVE_PIMEM
+};
+
+static struct qcom_icc_node mas_a0noc_snoc = {
+ .name = "mas_a0noc_snoc",
+ .id = MSM8996_MASTER_A0NOC_SNOC,
+ .buswidth = 16,
+ .mas_rpm_id = 110,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_a0noc_snoc_links),
+ .links = mas_a0noc_snoc_links
+};
+
+static const u16 mas_a1noc_snoc_links[] = {
+ MSM8996_SLAVE_SNOC_VMEM,
+ MSM8996_SLAVE_USB3,
+ MSM8996_SLAVE_PCIE_0,
+ MSM8996_SLAVE_PIMEM,
+ MSM8996_SLAVE_PCIE_2,
+ MSM8996_SLAVE_LPASS,
+ MSM8996_SLAVE_PCIE_1,
+ MSM8996_SLAVE_APPSS,
+ MSM8996_SLAVE_SNOC_BIMC,
+ MSM8996_SLAVE_SNOC_CNOC,
+ MSM8996_SLAVE_SNOC_PNOC,
+ MSM8996_SLAVE_OCIMEM,
+ MSM8996_SLAVE_QDSS_STM
+};
+
+static struct qcom_icc_node mas_a1noc_snoc = {
+ .name = "mas_a1noc_snoc",
+ .id = MSM8996_MASTER_A1NOC_SNOC,
+ .buswidth = 16,
+ .mas_rpm_id = 111,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_a1noc_snoc_links),
+ .links = mas_a1noc_snoc_links
+};
+
+static const u16 mas_a2noc_snoc_links[] = {
+ MSM8996_SLAVE_SNOC_VMEM,
+ MSM8996_SLAVE_USB3,
+ MSM8996_SLAVE_PCIE_1,
+ MSM8996_SLAVE_PIMEM,
+ MSM8996_SLAVE_PCIE_2,
+ MSM8996_SLAVE_QDSS_STM,
+ MSM8996_SLAVE_LPASS,
+ MSM8996_SLAVE_SNOC_BIMC,
+ MSM8996_SLAVE_SNOC_CNOC,
+ MSM8996_SLAVE_SNOC_PNOC,
+ MSM8996_SLAVE_OCIMEM,
+ MSM8996_SLAVE_PCIE_0
+};
+
+static struct qcom_icc_node mas_a2noc_snoc = {
+ .name = "mas_a2noc_snoc",
+ .id = MSM8996_MASTER_A2NOC_SNOC,
+ .buswidth = 16,
+ .mas_rpm_id = 112,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_a2noc_snoc_links),
+ .links = mas_a2noc_snoc_links
+};
+
+static struct qcom_icc_node mas_qdss_etr = {
+ .name = "mas_qdss_etr",
+ .id = MSM8996_MASTER_QDSS_ETR,
+ .buswidth = 16,
+ .mas_rpm_id = 31,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 3,
+ .num_links = ARRAY_SIZE(mas_qdss_common_links),
+ .links = mas_qdss_common_links
+};
+
+static const u16 slv_a0noc_snoc_links[] = {
+ MSM8996_MASTER_A0NOC_SNOC
+};
+
+static struct qcom_icc_node slv_a0noc_snoc = {
+ .name = "slv_a0noc_snoc",
+ .id = MSM8996_SLAVE_A0NOC_SNOC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 141,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_a0noc_snoc_links),
+ .links = slv_a0noc_snoc_links
+};
+
+static const u16 slv_a1noc_snoc_links[] = {
+ MSM8996_MASTER_A1NOC_SNOC
+};
+
+static struct qcom_icc_node slv_a1noc_snoc = {
+ .name = "slv_a1noc_snoc",
+ .id = MSM8996_SLAVE_A1NOC_SNOC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 142,
+ .num_links = ARRAY_SIZE(slv_a1noc_snoc_links),
+ .links = slv_a1noc_snoc_links
+};
+
+static const u16 slv_a2noc_snoc_links[] = {
+ MSM8996_MASTER_A2NOC_SNOC
+};
+
+static struct qcom_icc_node slv_a2noc_snoc = {
+ .name = "slv_a2noc_snoc",
+ .id = MSM8996_SLAVE_A2NOC_SNOC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 143,
+ .num_links = ARRAY_SIZE(slv_a2noc_snoc_links),
+ .links = slv_a2noc_snoc_links
+};
+
+static struct qcom_icc_node slv_ebi = {
+ .name = "slv_ebi",
+ .id = MSM8996_SLAVE_EBI_CH0,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 0
+};
+
+static struct qcom_icc_node slv_hmss_l3 = {
+ .name = "slv_hmss_l3",
+ .id = MSM8996_SLAVE_HMSS_L3,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 160
+};
+
+static const u16 slv_bimc_snoc_0_links[] = {
+ MSM8996_MASTER_BIMC_SNOC_0
+};
+
+static struct qcom_icc_node slv_bimc_snoc_0 = {
+ .name = "slv_bimc_snoc_0",
+ .id = MSM8996_SLAVE_BIMC_SNOC_0,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 2,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_bimc_snoc_0_links),
+ .links = slv_bimc_snoc_0_links
+};
+
+static const u16 slv_bimc_snoc_1_links[] = {
+ MSM8996_MASTER_BIMC_SNOC_1
+};
+
+static struct qcom_icc_node slv_bimc_snoc_1 = {
+ .name = "slv_bimc_snoc_1",
+ .id = MSM8996_SLAVE_BIMC_SNOC_1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 138,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_bimc_snoc_1_links),
+ .links = slv_bimc_snoc_1_links
+};
+
+static const u16 slv_cnoc_a1noc_links[] = {
+ MSM8996_MASTER_CNOC_A1NOC
+};
+
+static struct qcom_icc_node slv_cnoc_a1noc = {
+ .name = "slv_cnoc_a1noc",
+ .id = MSM8996_SLAVE_CNOC_A1NOC,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 75,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_cnoc_a1noc_links),
+ .links = slv_cnoc_a1noc_links
+};
+
+static struct qcom_icc_node slv_clk_ctl = {
+ .name = "slv_clk_ctl",
+ .id = MSM8996_SLAVE_CLK_CTL,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 47
+};
+
+static struct qcom_icc_node slv_tcsr = {
+ .name = "slv_tcsr",
+ .id = MSM8996_SLAVE_TCSR,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 50
+};
+
+static struct qcom_icc_node slv_tlmm = {
+ .name = "slv_tlmm",
+ .id = MSM8996_SLAVE_TLMM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 51
+};
+
+static struct qcom_icc_node slv_crypto0_cfg = {
+ .name = "slv_crypto0_cfg",
+ .id = MSM8996_SLAVE_CRYPTO_0_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 52,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_mpm = {
+ .name = "slv_mpm",
+ .id = MSM8996_SLAVE_MPM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 62,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_pimem_cfg = {
+ .name = "slv_pimem_cfg",
+ .id = MSM8996_SLAVE_PIMEM_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 167,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_imem_cfg = {
+ .name = "slv_imem_cfg",
+ .id = MSM8996_SLAVE_IMEM_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 54,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_message_ram = {
+ .name = "slv_message_ram",
+ .id = MSM8996_SLAVE_MESSAGE_RAM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 55
+};
+
+static struct qcom_icc_node slv_bimc_cfg = {
+ .name = "slv_bimc_cfg",
+ .id = MSM8996_SLAVE_BIMC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 56,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_pmic_arb = {
+ .name = "slv_pmic_arb",
+ .id = MSM8996_SLAVE_PMIC_ARB,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 59
+};
+
+static struct qcom_icc_node slv_prng = {
+ .name = "slv_prng",
+ .id = MSM8996_SLAVE_PRNG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 127,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_dcc_cfg = {
+ .name = "slv_dcc_cfg",
+ .id = MSM8996_SLAVE_DCC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 155,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_rbcpr_mx = {
+ .name = "slv_rbcpr_mx",
+ .id = MSM8996_SLAVE_RBCPR_MX,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 170,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_qdss_cfg = {
+ .name = "slv_qdss_cfg",
+ .id = MSM8996_SLAVE_QDSS_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 63,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_rbcpr_cx = {
+ .name = "slv_rbcpr_cx",
+ .id = MSM8996_SLAVE_RBCPR_CX,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 169,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_cpu_apu_cfg = {
+ .name = "slv_cpu_apu_cfg",
+ .id = MSM8996_SLAVE_QDSS_RBCPR_APU_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 168,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static const u16 slv_cnoc_mnoc_cfg_links[] = {
+ MSM8996_MASTER_CNOC_MNOC_CFG
+};
+
+static struct qcom_icc_node slv_cnoc_mnoc_cfg = {
+ .name = "slv_cnoc_mnoc_cfg",
+ .id = MSM8996_SLAVE_CNOC_MNOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 66,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_cnoc_mnoc_cfg_links),
+ .links = slv_cnoc_mnoc_cfg_links
+};
+
+static struct qcom_icc_node slv_snoc_cfg = {
+ .name = "slv_snoc_cfg",
+ .id = MSM8996_SLAVE_SNOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 70,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_snoc_mpu_cfg = {
+ .name = "slv_snoc_mpu_cfg",
+ .id = MSM8996_SLAVE_SNOC_MPU_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 67,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_ebi1_phy_cfg = {
+ .name = "slv_ebi1_phy_cfg",
+ .id = MSM8996_SLAVE_EBI1_PHY_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 73,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_a0noc_cfg = {
+ .name = "slv_a0noc_cfg",
+ .id = MSM8996_SLAVE_A0NOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 144,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_pcie_1_cfg = {
+ .name = "slv_pcie_1_cfg",
+ .id = MSM8996_SLAVE_PCIE_1_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 89,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_pcie_2_cfg = {
+ .name = "slv_pcie_2_cfg",
+ .id = MSM8996_SLAVE_PCIE_2_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 165,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_pcie_0_cfg = {
+ .name = "slv_pcie_0_cfg",
+ .id = MSM8996_SLAVE_PCIE_0_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 88,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_pcie20_ahb2phy = {
+ .name = "slv_pcie20_ahb2phy",
+ .id = MSM8996_SLAVE_PCIE20_AHB2PHY,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 163,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_a0noc_mpu_cfg = {
+ .name = "slv_a0noc_mpu_cfg",
+ .id = MSM8996_SLAVE_A0NOC_MPU_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 145,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_ufs_cfg = {
+ .name = "slv_ufs_cfg",
+ .id = MSM8996_SLAVE_UFS_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 92,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_a1noc_cfg = {
+ .name = "slv_a1noc_cfg",
+ .id = MSM8996_SLAVE_A1NOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 147,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_a1noc_mpu_cfg = {
+ .name = "slv_a1noc_mpu_cfg",
+ .id = MSM8996_SLAVE_A1NOC_MPU_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 148,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_a2noc_cfg = {
+ .name = "slv_a2noc_cfg",
+ .id = MSM8996_SLAVE_A2NOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 150,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_a2noc_mpu_cfg = {
+ .name = "slv_a2noc_mpu_cfg",
+ .id = MSM8996_SLAVE_A2NOC_MPU_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 151,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_ssc_cfg = {
+ .name = "slv_ssc_cfg",
+ .id = MSM8996_SLAVE_SSC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 177,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_a0noc_smmu_cfg = {
+ .name = "slv_a0noc_smmu_cfg",
+ .id = MSM8996_SLAVE_A0NOC_SMMU_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 146,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_a1noc_smmu_cfg = {
+ .name = "slv_a1noc_smmu_cfg",
+ .id = MSM8996_SLAVE_A1NOC_SMMU_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 149,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_a2noc_smmu_cfg = {
+ .name = "slv_a2noc_smmu_cfg",
+ .id = MSM8996_SLAVE_A2NOC_SMMU_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 152,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_lpass_smmu_cfg = {
+ .name = "slv_lpass_smmu_cfg",
+ .id = MSM8996_SLAVE_LPASS_SMMU_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 161,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static const u16 slv_cnoc_mnoc_mmss_cfg_links[] = {
+ MSM8996_MASTER_CNOC_MNOC_MMSS_CFG
+};
+
+static struct qcom_icc_node slv_cnoc_mnoc_mmss_cfg = {
+ .name = "slv_cnoc_mnoc_mmss_cfg",
+ .id = MSM8996_SLAVE_CNOC_MNOC_MMSS_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 58,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_cnoc_mnoc_mmss_cfg_links),
+ .links = slv_cnoc_mnoc_mmss_cfg_links
+};
+
+static struct qcom_icc_node slv_mmagic_cfg = {
+ .name = "slv_mmagic_cfg",
+ .id = MSM8996_SLAVE_MMAGIC_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 162,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_cpr_cfg = {
+ .name = "slv_cpr_cfg",
+ .id = MSM8996_SLAVE_CPR_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 6,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_misc_cfg = {
+ .name = "slv_misc_cfg",
+ .id = MSM8996_SLAVE_MISC_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 8,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_venus_throttle_cfg = {
+ .name = "slv_venus_throttle_cfg",
+ .id = MSM8996_SLAVE_VENUS_THROTTLE_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 178,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_venus_cfg = {
+ .name = "slv_venus_cfg",
+ .id = MSM8996_SLAVE_VENUS_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 10,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_vmem_cfg = {
+ .name = "slv_vmem_cfg",
+ .id = MSM8996_SLAVE_VMEM_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 180,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_dsa_cfg = {
+ .name = "slv_dsa_cfg",
+ .id = MSM8996_SLAVE_DSA_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 157,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_mnoc_clocks_cfg = {
+ .name = "slv_mnoc_clocks_cfg",
+ .id = MSM8996_SLAVE_MMSS_CLK_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 12,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_dsa_mpu_cfg = {
+ .name = "slv_dsa_mpu_cfg",
+ .id = MSM8996_SLAVE_DSA_MPU_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 158,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_mnoc_mpu_cfg = {
+ .name = "slv_mnoc_mpu_cfg",
+ .id = MSM8996_SLAVE_MNOC_MPU_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 14,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_display_cfg = {
+ .name = "slv_display_cfg",
+ .id = MSM8996_SLAVE_DISPLAY_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_display_throttle_cfg = {
+ .name = "slv_display_throttle_cfg",
+ .id = MSM8996_SLAVE_DISPLAY_THROTTLE_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 156,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_camera_cfg = {
+ .name = "slv_camera_cfg",
+ .id = MSM8996_SLAVE_CAMERA_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 3,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_camera_throttle_cfg = {
+ .name = "slv_camera_throttle_cfg",
+ .id = MSM8996_SLAVE_CAMERA_THROTTLE_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 154,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_oxili_cfg = {
+ .name = "slv_oxili_cfg",
+ .id = MSM8996_SLAVE_GRAPHICS_3D_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 11,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_smmu_mdp_cfg = {
+ .name = "slv_smmu_mdp_cfg",
+ .id = MSM8996_SLAVE_SMMU_MDP_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 173,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_smmu_rot_cfg = {
+ .name = "slv_smmu_rot_cfg",
+ .id = MSM8996_SLAVE_SMMU_ROTATOR_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 174,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_smmu_venus_cfg = {
+ .name = "slv_smmu_venus_cfg",
+ .id = MSM8996_SLAVE_SMMU_VENUS_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 175,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_smmu_cpp_cfg = {
+ .name = "slv_smmu_cpp_cfg",
+ .id = MSM8996_SLAVE_SMMU_CPP_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 171,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_smmu_jpeg_cfg = {
+ .name = "slv_smmu_jpeg_cfg",
+ .id = MSM8996_SLAVE_SMMU_JPEG_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 172,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_smmu_vfe_cfg = {
+ .name = "slv_smmu_vfe_cfg",
+ .id = MSM8996_SLAVE_SMMU_VFE_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 176,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static const u16 slv_mnoc_bimc_links[] = {
+ MSM8996_MASTER_MNOC_BIMC
+};
+
+static struct qcom_icc_node slv_mnoc_bimc = {
+ .name = "slv_mnoc_bimc",
+ .id = MSM8996_SLAVE_MNOC_BIMC,
+ .buswidth = 32,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 16,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_mnoc_bimc_links),
+ .links = slv_mnoc_bimc_links
+};
+
+static struct qcom_icc_node slv_vmem = {
+ .name = "slv_vmem",
+ .id = MSM8996_SLAVE_VMEM,
+ .buswidth = 32,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 179,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_srvc_mnoc = {
+ .name = "slv_srvc_mnoc",
+ .id = MSM8996_SLAVE_SERVICE_MNOC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 17,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static const u16 slv_pnoc_a1noc_links[] = {
+ MSM8996_MASTER_PNOC_A1NOC
+};
+
+static struct qcom_icc_node slv_pnoc_a1noc = {
+ .name = "slv_pnoc_a1noc",
+ .id = MSM8996_SLAVE_PNOC_A1NOC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 139,
+ .num_links = ARRAY_SIZE(slv_pnoc_a1noc_links),
+ .links = slv_pnoc_a1noc_links
+};
+
+static struct qcom_icc_node slv_usb_hs = {
+ .name = "slv_usb_hs",
+ .id = MSM8996_SLAVE_USB_HS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 40
+};
+
+static struct qcom_icc_node slv_sdcc_2 = {
+ .name = "slv_sdcc_2",
+ .id = MSM8996_SLAVE_SDCC_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 33
+};
+
+static struct qcom_icc_node slv_sdcc_4 = {
+ .name = "slv_sdcc_4",
+ .id = MSM8996_SLAVE_SDCC_4,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 34
+};
+
+static struct qcom_icc_node slv_tsif = {
+ .name = "slv_tsif",
+ .id = MSM8996_SLAVE_TSIF,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 35
+};
+
+static struct qcom_icc_node slv_blsp_2 = {
+ .name = "slv_blsp_2",
+ .id = MSM8996_SLAVE_BLSP_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 37
+};
+
+static struct qcom_icc_node slv_sdcc_1 = {
+ .name = "slv_sdcc_1",
+ .id = MSM8996_SLAVE_SDCC_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 31
+};
+
+static struct qcom_icc_node slv_blsp_1 = {
+ .name = "slv_blsp_1",
+ .id = MSM8996_SLAVE_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 39
+};
+
+static struct qcom_icc_node slv_pdm = {
+ .name = "slv_pdm",
+ .id = MSM8996_SLAVE_PDM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 41
+};
+
+static struct qcom_icc_node slv_ahb2phy = {
+ .name = "slv_ahb2phy",
+ .id = MSM8996_SLAVE_AHB2PHY,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 153,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_hmss = {
+ .name = "slv_hmss",
+ .id = MSM8996_SLAVE_APPSS,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 20,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_lpass = {
+ .name = "slv_lpass",
+ .id = MSM8996_SLAVE_LPASS,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 21,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_usb3 = {
+ .name = "slv_usb3",
+ .id = MSM8996_SLAVE_USB3,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 22,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static const u16 slv_snoc_bimc_links[] = {
+ MSM8996_MASTER_SNOC_BIMC
+};
+
+static struct qcom_icc_node slv_snoc_bimc = {
+ .name = "slv_snoc_bimc",
+ .id = MSM8996_SLAVE_SNOC_BIMC,
+ .buswidth = 32,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 24,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_links),
+ .links = slv_snoc_bimc_links
+};
+
+static const u16 slv_snoc_cnoc_links[] = {
+ MSM8996_MASTER_SNOC_CNOC
+};
+
+static struct qcom_icc_node slv_snoc_cnoc = {
+ .name = "slv_snoc_cnoc",
+ .id = MSM8996_SLAVE_SNOC_CNOC,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 25,
+ .num_links = ARRAY_SIZE(slv_snoc_cnoc_links),
+ .links = slv_snoc_cnoc_links
+};
+
+static struct qcom_icc_node slv_imem = {
+ .name = "slv_imem",
+ .id = MSM8996_SLAVE_OCIMEM,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 26
+};
+
+static struct qcom_icc_node slv_pimem = {
+ .name = "slv_pimem",
+ .id = MSM8996_SLAVE_PIMEM,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 166
+};
+
+static const u16 slv_snoc_vmem_links[] = {
+ MSM8996_MASTER_SNOC_VMEM
+};
+
+static struct qcom_icc_node slv_snoc_vmem = {
+ .name = "slv_snoc_vmem",
+ .id = MSM8996_SLAVE_SNOC_VMEM,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 140,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_snoc_vmem_links),
+ .links = slv_snoc_vmem_links
+};
+
+static const u16 slv_snoc_pnoc_links[] = {
+ MSM8996_MASTER_SNOC_PNOC
+};
+
+static struct qcom_icc_node slv_snoc_pnoc = {
+ .name = "slv_snoc_pnoc",
+ .id = MSM8996_SLAVE_SNOC_PNOC,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 28,
+ .num_links = ARRAY_SIZE(slv_snoc_pnoc_links),
+ .links = slv_snoc_pnoc_links
+};
+
+static struct qcom_icc_node slv_qdss_stm = {
+ .name = "slv_qdss_stm",
+ .id = MSM8996_SLAVE_QDSS_STM,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 30
+};
+
+static struct qcom_icc_node slv_pcie_0 = {
+ .name = "slv_pcie_0",
+ .id = MSM8996_SLAVE_PCIE_0,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 84,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_pcie_1 = {
+ .name = "slv_pcie_1",
+ .id = MSM8996_SLAVE_PCIE_1,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 85,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_pcie_2 = {
+ .name = "slv_pcie_2",
+ .id = MSM8996_SLAVE_PCIE_2,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 164,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_srvc_snoc = {
+ .name = "slv_srvc_snoc",
+ .id = MSM8996_SLAVE_SERVICE_SNOC,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 29,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node *a0noc_nodes[] = {
+ [MASTER_PCIE_0] = &mas_pcie_0,
+ [MASTER_PCIE_1] = &mas_pcie_1,
+ [MASTER_PCIE_2] = &mas_pcie_2
+};
+
+static const struct regmap_config msm8996_a0noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9000,
+ .fast_io = true
+};
+
+static const struct qcom_icc_desc msm8996_a0noc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = a0noc_nodes,
+ .num_nodes = ARRAY_SIZE(a0noc_nodes),
+ .clocks = bus_a0noc_clocks,
+ .num_clocks = ARRAY_SIZE(bus_a0noc_clocks),
+ .has_bus_pd = true,
+ .regmap_cfg = &msm8996_a0noc_regmap_config
+};
+
+static struct qcom_icc_node *a1noc_nodes[] = {
+ [MASTER_CNOC_A1NOC] = &mas_cnoc_a1noc,
+ [MASTER_CRYPTO_CORE0] = &mas_crypto_c0,
+ [MASTER_PNOC_A1NOC] = &mas_pnoc_a1noc
+};
+
+static const struct regmap_config msm8996_a1noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x7000,
+ .fast_io = true
+};
+
+static const struct qcom_icc_desc msm8996_a1noc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = a1noc_nodes,
+ .num_nodes = ARRAY_SIZE(a1noc_nodes),
+ .regmap_cfg = &msm8996_a1noc_regmap_config
+};
+
+static struct qcom_icc_node *a2noc_nodes[] = {
+ [MASTER_USB3] = &mas_usb3,
+ [MASTER_IPA] = &mas_ipa,
+ [MASTER_UFS] = &mas_ufs
+};
+
+static const struct regmap_config msm8996_a2noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xa000,
+ .fast_io = true
+};
+
+static const struct qcom_icc_desc msm8996_a2noc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = a2noc_nodes,
+ .num_nodes = ARRAY_SIZE(a2noc_nodes),
+ .regmap_cfg = &msm8996_a2noc_regmap_config
+};
+
+static struct qcom_icc_node *bimc_nodes[] = {
+ [MASTER_AMPSS_M0] = &mas_apps_proc,
+ [MASTER_GRAPHICS_3D] = &mas_oxili,
+ [MASTER_MNOC_BIMC] = &mas_mnoc_bimc,
+ [MASTER_SNOC_BIMC] = &mas_snoc_bimc,
+ [SLAVE_EBI_CH0] = &slv_ebi,
+ [SLAVE_HMSS_L3] = &slv_hmss_l3,
+ [SLAVE_BIMC_SNOC_0] = &slv_bimc_snoc_0,
+ [SLAVE_BIMC_SNOC_1] = &slv_bimc_snoc_1
+};
+
+static const struct regmap_config msm8996_bimc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x62000,
+ .fast_io = true
+};
+
+static const struct qcom_icc_desc msm8996_bimc = {
+ .type = QCOM_ICC_BIMC,
+ .nodes = bimc_nodes,
+ .num_nodes = ARRAY_SIZE(bimc_nodes),
+ .regmap_cfg = &msm8996_bimc_regmap_config
+};
+
+static struct qcom_icc_node *cnoc_nodes[] = {
+ [MASTER_SNOC_CNOC] = &mas_snoc_cnoc,
+ [MASTER_QDSS_DAP] = &mas_qdss_dap,
+ [SLAVE_CNOC_A1NOC] = &slv_cnoc_a1noc,
+ [SLAVE_CLK_CTL] = &slv_clk_ctl,
+ [SLAVE_TCSR] = &slv_tcsr,
+ [SLAVE_TLMM] = &slv_tlmm,
+ [SLAVE_CRYPTO_0_CFG] = &slv_crypto0_cfg,
+ [SLAVE_MPM] = &slv_mpm,
+ [SLAVE_PIMEM_CFG] = &slv_pimem_cfg,
+ [SLAVE_IMEM_CFG] = &slv_imem_cfg,
+ [SLAVE_MESSAGE_RAM] = &slv_message_ram,
+ [SLAVE_BIMC_CFG] = &slv_bimc_cfg,
+ [SLAVE_PMIC_ARB] = &slv_pmic_arb,
+ [SLAVE_PRNG] = &slv_prng,
+ [SLAVE_DCC_CFG] = &slv_dcc_cfg,
+ [SLAVE_RBCPR_MX] = &slv_rbcpr_mx,
+ [SLAVE_QDSS_CFG] = &slv_qdss_cfg,
+ [SLAVE_RBCPR_CX] = &slv_rbcpr_cx,
+ [SLAVE_QDSS_RBCPR_APU] = &slv_cpu_apu_cfg,
+ [SLAVE_CNOC_MNOC_CFG] = &slv_cnoc_mnoc_cfg,
+ [SLAVE_SNOC_CFG] = &slv_snoc_cfg,
+ [SLAVE_SNOC_MPU_CFG] = &slv_snoc_mpu_cfg,
+ [SLAVE_EBI1_PHY_CFG] = &slv_ebi1_phy_cfg,
+ [SLAVE_A0NOC_CFG] = &slv_a0noc_cfg,
+ [SLAVE_PCIE_1_CFG] = &slv_pcie_1_cfg,
+ [SLAVE_PCIE_2_CFG] = &slv_pcie_2_cfg,
+ [SLAVE_PCIE_0_CFG] = &slv_pcie_0_cfg,
+ [SLAVE_PCIE20_AHB2PHY] = &slv_pcie20_ahb2phy,
+ [SLAVE_A0NOC_MPU_CFG] = &slv_a0noc_mpu_cfg,
+ [SLAVE_UFS_CFG] = &slv_ufs_cfg,
+ [SLAVE_A1NOC_CFG] = &slv_a1noc_cfg,
+ [SLAVE_A1NOC_MPU_CFG] = &slv_a1noc_mpu_cfg,
+ [SLAVE_A2NOC_CFG] = &slv_a2noc_cfg,
+ [SLAVE_A2NOC_MPU_CFG] = &slv_a2noc_mpu_cfg,
+ [SLAVE_SSC_CFG] = &slv_ssc_cfg,
+ [SLAVE_A0NOC_SMMU_CFG] = &slv_a0noc_smmu_cfg,
+ [SLAVE_A1NOC_SMMU_CFG] = &slv_a1noc_smmu_cfg,
+ [SLAVE_A2NOC_SMMU_CFG] = &slv_a2noc_smmu_cfg,
+ [SLAVE_LPASS_SMMU_CFG] = &slv_lpass_smmu_cfg,
+ [SLAVE_CNOC_MNOC_MMSS_CFG] = &slv_cnoc_mnoc_mmss_cfg
+};
+
+static const struct regmap_config msm8996_cnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1000,
+ .fast_io = true
+};
+
+static const struct qcom_icc_desc msm8996_cnoc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = cnoc_nodes,
+ .num_nodes = ARRAY_SIZE(cnoc_nodes),
+ .regmap_cfg = &msm8996_cnoc_regmap_config
+};
+
+static struct qcom_icc_node *mnoc_nodes[] = {
+ [MASTER_CNOC_MNOC_CFG] = &mas_cnoc_mnoc_cfg,
+ [MASTER_CPP] = &mas_cpp,
+ [MASTER_JPEG] = &mas_jpeg,
+ [MASTER_MDP_PORT0] = &mas_mdp_p0,
+ [MASTER_MDP_PORT1] = &mas_mdp_p1,
+ [MASTER_ROTATOR] = &mas_rotator,
+ [MASTER_VIDEO_P0] = &mas_venus,
+ [MASTER_VFE] = &mas_vfe,
+ [MASTER_SNOC_VMEM] = &mas_snoc_vmem,
+ [MASTER_VIDEO_P0_OCMEM] = &mas_venus_vmem,
+ [MASTER_CNOC_MNOC_MMSS_CFG] = &mas_cnoc_mnoc_mmss_cfg,
+ [SLAVE_MNOC_BIMC] = &slv_mnoc_bimc,
+ [SLAVE_VMEM] = &slv_vmem,
+ [SLAVE_SERVICE_MNOC] = &slv_srvc_mnoc,
+ [SLAVE_MMAGIC_CFG] = &slv_mmagic_cfg,
+ [SLAVE_CPR_CFG] = &slv_cpr_cfg,
+ [SLAVE_MISC_CFG] = &slv_misc_cfg,
+ [SLAVE_VENUS_THROTTLE_CFG] = &slv_venus_throttle_cfg,
+ [SLAVE_VENUS_CFG] = &slv_venus_cfg,
+ [SLAVE_VMEM_CFG] = &slv_vmem_cfg,
+ [SLAVE_DSA_CFG] = &slv_dsa_cfg,
+ [SLAVE_MMSS_CLK_CFG] = &slv_mnoc_clocks_cfg,
+ [SLAVE_DSA_MPU_CFG] = &slv_dsa_mpu_cfg,
+ [SLAVE_MNOC_MPU_CFG] = &slv_mnoc_mpu_cfg,
+ [SLAVE_DISPLAY_CFG] = &slv_display_cfg,
+ [SLAVE_DISPLAY_THROTTLE_CFG] = &slv_display_throttle_cfg,
+ [SLAVE_CAMERA_CFG] = &slv_camera_cfg,
+ [SLAVE_CAMERA_THROTTLE_CFG] = &slv_camera_throttle_cfg,
+ [SLAVE_GRAPHICS_3D_CFG] = &slv_oxili_cfg,
+ [SLAVE_SMMU_MDP_CFG] = &slv_smmu_mdp_cfg,
+ [SLAVE_SMMU_ROT_CFG] = &slv_smmu_rot_cfg,
+ [SLAVE_SMMU_VENUS_CFG] = &slv_smmu_venus_cfg,
+ [SLAVE_SMMU_CPP_CFG] = &slv_smmu_cpp_cfg,
+ [SLAVE_SMMU_JPEG_CFG] = &slv_smmu_jpeg_cfg,
+ [SLAVE_SMMU_VFE_CFG] = &slv_smmu_vfe_cfg
+};
+
+static const struct regmap_config msm8996_mnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x20000,
+ .fast_io = true
+};
+
+static const struct qcom_icc_desc msm8996_mnoc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = mnoc_nodes,
+ .num_nodes = ARRAY_SIZE(mnoc_nodes),
+ .clocks = bus_mm_clocks,
+ .num_clocks = ARRAY_SIZE(bus_mm_clocks),
+ .regmap_cfg = &msm8996_mnoc_regmap_config
+};
+
+static struct qcom_icc_node *pnoc_nodes[] = {
+ [MASTER_SNOC_PNOC] = &mas_snoc_pnoc,
+ [MASTER_SDCC_1] = &mas_sdcc_1,
+ [MASTER_SDCC_2] = &mas_sdcc_2,
+ [MASTER_SDCC_4] = &mas_sdcc_4,
+ [MASTER_USB_HS] = &mas_usb_hs,
+ [MASTER_BLSP_1] = &mas_blsp_1,
+ [MASTER_BLSP_2] = &mas_blsp_2,
+ [MASTER_TSIF] = &mas_tsif,
+ [SLAVE_PNOC_A1NOC] = &slv_pnoc_a1noc,
+ [SLAVE_USB_HS] = &slv_usb_hs,
+ [SLAVE_SDCC_2] = &slv_sdcc_2,
+ [SLAVE_SDCC_4] = &slv_sdcc_4,
+ [SLAVE_TSIF] = &slv_tsif,
+ [SLAVE_BLSP_2] = &slv_blsp_2,
+ [SLAVE_SDCC_1] = &slv_sdcc_1,
+ [SLAVE_BLSP_1] = &slv_blsp_1,
+ [SLAVE_PDM] = &slv_pdm,
+ [SLAVE_AHB2PHY] = &slv_ahb2phy
+};
+
+static const struct regmap_config msm8996_pnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x3000,
+ .fast_io = true
+};
+
+static const struct qcom_icc_desc msm8996_pnoc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = pnoc_nodes,
+ .num_nodes = ARRAY_SIZE(pnoc_nodes),
+ .regmap_cfg = &msm8996_pnoc_regmap_config
+};
+
+static struct qcom_icc_node *snoc_nodes[] = {
+ [MASTER_HMSS] = &mas_hmss,
+ [MASTER_QDSS_BAM] = &mas_qdss_bam,
+ [MASTER_SNOC_CFG] = &mas_snoc_cfg,
+ [MASTER_BIMC_SNOC_0] = &mas_bimc_snoc_0,
+ [MASTER_BIMC_SNOC_1] = &mas_bimc_snoc_1,
+ [MASTER_A0NOC_SNOC] = &mas_a0noc_snoc,
+ [MASTER_A1NOC_SNOC] = &mas_a1noc_snoc,
+ [MASTER_A2NOC_SNOC] = &mas_a2noc_snoc,
+ [MASTER_QDSS_ETR] = &mas_qdss_etr,
+ [SLAVE_A0NOC_SNOC] = &slv_a0noc_snoc,
+ [SLAVE_A1NOC_SNOC] = &slv_a1noc_snoc,
+ [SLAVE_A2NOC_SNOC] = &slv_a2noc_snoc,
+ [SLAVE_HMSS] = &slv_hmss,
+ [SLAVE_LPASS] = &slv_lpass,
+ [SLAVE_USB3] = &slv_usb3,
+ [SLAVE_SNOC_BIMC] = &slv_snoc_bimc,
+ [SLAVE_SNOC_CNOC] = &slv_snoc_cnoc,
+ [SLAVE_IMEM] = &slv_imem,
+ [SLAVE_PIMEM] = &slv_pimem,
+ [SLAVE_SNOC_VMEM] = &slv_snoc_vmem,
+ [SLAVE_SNOC_PNOC] = &slv_snoc_pnoc,
+ [SLAVE_QDSS_STM] = &slv_qdss_stm,
+ [SLAVE_PCIE_0] = &slv_pcie_0,
+ [SLAVE_PCIE_1] = &slv_pcie_1,
+ [SLAVE_PCIE_2] = &slv_pcie_2,
+ [SLAVE_SERVICE_SNOC] = &slv_srvc_snoc
+};
+
+static const struct regmap_config msm8996_snoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x20000,
+ .fast_io = true
+};
+
+static const struct qcom_icc_desc msm8996_snoc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = snoc_nodes,
+ .num_nodes = ARRAY_SIZE(snoc_nodes),
+ .regmap_cfg = &msm8996_snoc_regmap_config
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,msm8996-a0noc", .data = &msm8996_a0noc},
+ { .compatible = "qcom,msm8996-a1noc", .data = &msm8996_a1noc},
+ { .compatible = "qcom,msm8996-a2noc", .data = &msm8996_a2noc},
+ { .compatible = "qcom,msm8996-bimc", .data = &msm8996_bimc},
+ { .compatible = "qcom,msm8996-cnoc", .data = &msm8996_cnoc},
+ { .compatible = "qcom,msm8996-mnoc", .data = &msm8996_mnoc},
+ { .compatible = "qcom,msm8996-pnoc", .data = &msm8996_pnoc},
+ { .compatible = "qcom,msm8996-snoc", .data = &msm8996_snoc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-msm8996",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ }
+};
+module_platform_driver(qnoc_driver);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("Qualcomm MSM8996 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/msm8996.h b/drivers/interconnect/qcom/msm8996.h
new file mode 100644
index 000000000000..42b54ffcaa7b
--- /dev/null
+++ b/drivers/interconnect/qcom/msm8996.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Qualcomm MSM8996 interconnect IDs
+ *
+ * Copyright (c) 2021 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_MSM8996_H__
+#define __DRIVERS_INTERCONNECT_QCOM_MSM8996_H__
+
+#define MSM8996_MASTER_PCIE_0 1
+#define MSM8996_MASTER_PCIE_1 2
+#define MSM8996_MASTER_PCIE_2 3
+#define MSM8996_MASTER_CNOC_A1NOC 4
+#define MSM8996_MASTER_CRYPTO_CORE0 5
+#define MSM8996_MASTER_PNOC_A1NOC 6
+#define MSM8996_MASTER_USB3 7
+#define MSM8996_MASTER_IPA 8
+#define MSM8996_MASTER_UFS 9
+#define MSM8996_MASTER_AMPSS_M0 10
+#define MSM8996_MASTER_GRAPHICS_3D 11
+#define MSM8996_MASTER_MNOC_BIMC 12
+#define MSM8996_MASTER_SNOC_BIMC 13
+#define MSM8996_MASTER_SNOC_CNOC 14
+#define MSM8996_MASTER_QDSS_DAP 15
+#define MSM8996_MASTER_CNOC_MNOC_MMSS_CFG 16
+#define MSM8996_MASTER_CNOC_MNOC_CFG 17
+#define MSM8996_MASTER_CPP 18
+#define MSM8996_MASTER_JPEG 19
+#define MSM8996_MASTER_MDP_PORT0 20
+#define MSM8996_MASTER_MDP_PORT1 21
+#define MSM8996_MASTER_ROTATOR 22
+#define MSM8996_MASTER_VIDEO_P0 23
+#define MSM8996_MASTER_VFE 24
+#define MSM8996_MASTER_SNOC_VMEM 25
+#define MSM8996_MASTER_VIDEO_P0_OCMEM 26
+#define MSM8996_MASTER_SNOC_PNOC 27
+#define MSM8996_MASTER_SDCC_1 28
+#define MSM8996_MASTER_SDCC_2 29
+#define MSM8996_MASTER_SDCC_4 30
+#define MSM8996_MASTER_USB_HS 31
+#define MSM8996_MASTER_BLSP_1 32
+#define MSM8996_MASTER_BLSP_2 33
+#define MSM8996_MASTER_TSIF 34
+#define MSM8996_MASTER_HMSS 35
+#define MSM8996_MASTER_QDSS_BAM 36
+#define MSM8996_MASTER_SNOC_CFG 37
+#define MSM8996_MASTER_BIMC_SNOC_0 38
+#define MSM8996_MASTER_BIMC_SNOC_1 39
+#define MSM8996_MASTER_A0NOC_SNOC 40
+#define MSM8996_MASTER_A1NOC_SNOC 41
+#define MSM8996_MASTER_A2NOC_SNOC 42
+#define MSM8996_MASTER_QDSS_ETR 43
+
+#define MSM8996_SLAVE_A0NOC_SNOC 44
+#define MSM8996_SLAVE_A1NOC_SNOC 45
+#define MSM8996_SLAVE_A2NOC_SNOC 46
+#define MSM8996_SLAVE_EBI_CH0 47
+#define MSM8996_SLAVE_HMSS_L3 48
+#define MSM8996_SLAVE_BIMC_SNOC_0 49
+#define MSM8996_SLAVE_BIMC_SNOC_1 50
+#define MSM8996_SLAVE_CNOC_A1NOC 51
+#define MSM8996_SLAVE_CLK_CTL 52
+#define MSM8996_SLAVE_TCSR 53
+#define MSM8996_SLAVE_TLMM 54
+#define MSM8996_SLAVE_CRYPTO_0_CFG 55
+#define MSM8996_SLAVE_MPM 56
+#define MSM8996_SLAVE_PIMEM_CFG 57
+#define MSM8996_SLAVE_IMEM_CFG 58
+#define MSM8996_SLAVE_MESSAGE_RAM 59
+#define MSM8996_SLAVE_BIMC_CFG 60
+#define MSM8996_SLAVE_PMIC_ARB 61
+#define MSM8996_SLAVE_PRNG 62
+#define MSM8996_SLAVE_DCC_CFG 63
+#define MSM8996_SLAVE_RBCPR_MX 64
+#define MSM8996_SLAVE_QDSS_CFG 65
+#define MSM8996_SLAVE_RBCPR_CX 66
+#define MSM8996_SLAVE_QDSS_RBCPR_APU_CFG 67
+#define MSM8996_SLAVE_CNOC_MNOC_CFG 68
+#define MSM8996_SLAVE_SNOC_CFG 69
+#define MSM8996_SLAVE_SNOC_MPU_CFG 70
+#define MSM8996_SLAVE_EBI1_PHY_CFG 71
+#define MSM8996_SLAVE_A0NOC_CFG 72
+#define MSM8996_SLAVE_PCIE_1_CFG 73
+#define MSM8996_SLAVE_PCIE_2_CFG 74
+#define MSM8996_SLAVE_PCIE_0_CFG 75
+#define MSM8996_SLAVE_PCIE20_AHB2PHY 76
+#define MSM8996_SLAVE_A0NOC_MPU_CFG 77
+#define MSM8996_SLAVE_UFS_CFG 78
+#define MSM8996_SLAVE_A1NOC_CFG 79
+#define MSM8996_SLAVE_A1NOC_MPU_CFG 80
+#define MSM8996_SLAVE_A2NOC_CFG 81
+#define MSM8996_SLAVE_A2NOC_MPU_CFG 82
+#define MSM8996_SLAVE_SSC_CFG 83
+#define MSM8996_SLAVE_A0NOC_SMMU_CFG 84
+#define MSM8996_SLAVE_A1NOC_SMMU_CFG 85
+#define MSM8996_SLAVE_A2NOC_SMMU_CFG 86
+#define MSM8996_SLAVE_LPASS_SMMU_CFG 87
+#define MSM8996_SLAVE_CNOC_MNOC_MMSS_CFG 88
+#define MSM8996_SLAVE_MMAGIC_CFG 89
+#define MSM8996_SLAVE_CPR_CFG 90
+#define MSM8996_SLAVE_MISC_CFG 91
+#define MSM8996_SLAVE_VENUS_THROTTLE_CFG 92
+#define MSM8996_SLAVE_VENUS_CFG 93
+#define MSM8996_SLAVE_VMEM_CFG 94
+#define MSM8996_SLAVE_DSA_CFG 95
+#define MSM8996_SLAVE_MMSS_CLK_CFG 96
+#define MSM8996_SLAVE_DSA_MPU_CFG 97
+#define MSM8996_SLAVE_MNOC_MPU_CFG 98
+#define MSM8996_SLAVE_DISPLAY_CFG 99
+#define MSM8996_SLAVE_DISPLAY_THROTTLE_CFG 100
+#define MSM8996_SLAVE_CAMERA_CFG 101
+#define MSM8996_SLAVE_CAMERA_THROTTLE_CFG 102
+#define MSM8996_SLAVE_GRAPHICS_3D_CFG 103
+#define MSM8996_SLAVE_SMMU_MDP_CFG 104
+#define MSM8996_SLAVE_SMMU_ROTATOR_CFG 105
+#define MSM8996_SLAVE_SMMU_VENUS_CFG 106
+#define MSM8996_SLAVE_SMMU_CPP_CFG 107
+#define MSM8996_SLAVE_SMMU_JPEG_CFG 108
+#define MSM8996_SLAVE_SMMU_VFE_CFG 109
+#define MSM8996_SLAVE_MNOC_BIMC 110
+#define MSM8996_SLAVE_VMEM 111
+#define MSM8996_SLAVE_SERVICE_MNOC 112
+#define MSM8996_SLAVE_PNOC_A1NOC 113
+#define MSM8996_SLAVE_USB_HS 114
+#define MSM8996_SLAVE_SDCC_2 115
+#define MSM8996_SLAVE_SDCC_4 116
+#define MSM8996_SLAVE_TSIF 117
+#define MSM8996_SLAVE_BLSP_2 118
+#define MSM8996_SLAVE_SDCC_1 119
+#define MSM8996_SLAVE_BLSP_1 120
+#define MSM8996_SLAVE_PDM 121
+#define MSM8996_SLAVE_AHB2PHY 122
+#define MSM8996_SLAVE_APPSS 123
+#define MSM8996_SLAVE_LPASS 124
+#define MSM8996_SLAVE_USB3 125
+#define MSM8996_SLAVE_SNOC_BIMC 126
+#define MSM8996_SLAVE_SNOC_CNOC 127
+#define MSM8996_SLAVE_OCIMEM 128
+#define MSM8996_SLAVE_PIMEM 129
+#define MSM8996_SLAVE_SNOC_VMEM 130
+#define MSM8996_SLAVE_SNOC_PNOC 131
+#define MSM8996_SLAVE_QDSS_STM 132
+#define MSM8996_SLAVE_PCIE_0 133
+#define MSM8996_SLAVE_PCIE_1 134
+#define MSM8996_SLAVE_PCIE_2 135
+#define MSM8996_SLAVE_SERVICE_SNOC 136
+
+#endif /* __DRIVERS_INTERCONNECT_QCOM_MSM8996_H__ */
diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
index c7af143980de..eec13099a6a3 100644
--- a/drivers/interconnect/qcom/osm-l3.c
+++ b/drivers/interconnect/qcom/osm-l3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/bitfield.h>
@@ -15,6 +15,7 @@
#include <dt-bindings/interconnect/qcom,osm-l3.h>
#include "sc7180.h"
+#include "sc7280.h"
#include "sc8180x.h"
#include "sdm845.h"
#include "sm8150.h"
@@ -114,6 +115,22 @@ static const struct qcom_osm_l3_desc sc7180_icc_osm_l3 = {
.reg_perf_state = OSM_REG_PERF_STATE,
};
+DEFINE_QNODE(sc7280_epss_apps_l3, SC7280_MASTER_EPSS_L3_APPS, 32, SC7280_SLAVE_EPSS_L3);
+DEFINE_QNODE(sc7280_epss_l3, SC7280_SLAVE_EPSS_L3, 32);
+
+static const struct qcom_osm_l3_node *sc7280_epss_l3_nodes[] = {
+ [MASTER_EPSS_L3_APPS] = &sc7280_epss_apps_l3,
+ [SLAVE_EPSS_L3_SHARED] = &sc7280_epss_l3,
+};
+
+static const struct qcom_osm_l3_desc sc7280_icc_epss_l3 = {
+ .nodes = sc7280_epss_l3_nodes,
+ .num_nodes = ARRAY_SIZE(sc7280_epss_l3_nodes),
+ .lut_row_size = EPSS_LUT_ROW_SIZE,
+ .reg_freq_lut = EPSS_REG_FREQ_LUT,
+ .reg_perf_state = EPSS_REG_PERF_STATE,
+};
+
DEFINE_QNODE(sc8180x_osm_apps_l3, SC8180X_MASTER_OSM_L3_APPS, 32, SC8180X_SLAVE_OSM_L3);
DEFINE_QNODE(sc8180x_osm_l3, SC8180X_SLAVE_OSM_L3, 32);
@@ -326,6 +343,7 @@ err:
static const struct of_device_id osm_l3_of_match[] = {
{ .compatible = "qcom,sc7180-osm-l3", .data = &sc7180_icc_osm_l3 },
+ { .compatible = "qcom,sc7280-epss-l3", .data = &sc7280_icc_epss_l3 },
{ .compatible = "qcom,sdm845-osm-l3", .data = &sdm845_icc_osm_l3 },
{ .compatible = "qcom,sm8150-osm-l3", .data = &sm8150_icc_osm_l3 },
{ .compatible = "qcom,sc8180x-osm-l3", .data = &sc8180x_icc_osm_l3 },
diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c
new file mode 100644
index 000000000000..74404e0b2080
--- /dev/null
+++ b/drivers/interconnect/qcom/qcm2290.c
@@ -0,0 +1,1363 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm QCM2290 Network-on-Chip (NoC) QoS driver
+ *
+ * Copyright (c) 2021, Linaro Ltd.
+ *
+ */
+
+#include <dt-bindings/interconnect/qcom,qcm2290.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "icc-rpm.h"
+#include "smd-rpm.h"
+
+enum {
+ QCM2290_MASTER_APPSS_PROC = 1,
+ QCM2290_MASTER_SNOC_BIMC_RT,
+ QCM2290_MASTER_SNOC_BIMC_NRT,
+ QCM2290_MASTER_SNOC_BIMC,
+ QCM2290_MASTER_TCU_0,
+ QCM2290_MASTER_GFX3D,
+ QCM2290_MASTER_SNOC_CNOC,
+ QCM2290_MASTER_QDSS_DAP,
+ QCM2290_MASTER_CRYPTO_CORE0,
+ QCM2290_MASTER_SNOC_CFG,
+ QCM2290_MASTER_TIC,
+ QCM2290_MASTER_ANOC_SNOC,
+ QCM2290_MASTER_BIMC_SNOC,
+ QCM2290_MASTER_PIMEM,
+ QCM2290_MASTER_QDSS_BAM,
+ QCM2290_MASTER_QUP_0,
+ QCM2290_MASTER_IPA,
+ QCM2290_MASTER_QDSS_ETR,
+ QCM2290_MASTER_SDCC_1,
+ QCM2290_MASTER_SDCC_2,
+ QCM2290_MASTER_QPIC,
+ QCM2290_MASTER_USB3_0,
+ QCM2290_MASTER_QUP_CORE_0,
+ QCM2290_MASTER_CAMNOC_SF,
+ QCM2290_MASTER_VIDEO_P0,
+ QCM2290_MASTER_VIDEO_PROC,
+ QCM2290_MASTER_CAMNOC_HF,
+ QCM2290_MASTER_MDP0,
+
+ QCM2290_SLAVE_EBI1,
+ QCM2290_SLAVE_BIMC_SNOC,
+ QCM2290_SLAVE_BIMC_CFG,
+ QCM2290_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ QCM2290_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ QCM2290_SLAVE_CAMERA_CFG,
+ QCM2290_SLAVE_CLK_CTL,
+ QCM2290_SLAVE_CRYPTO_0_CFG,
+ QCM2290_SLAVE_DISPLAY_CFG,
+ QCM2290_SLAVE_DISPLAY_THROTTLE_CFG,
+ QCM2290_SLAVE_GPU_CFG,
+ QCM2290_SLAVE_HWKM,
+ QCM2290_SLAVE_IMEM_CFG,
+ QCM2290_SLAVE_IPA_CFG,
+ QCM2290_SLAVE_LPASS,
+ QCM2290_SLAVE_MESSAGE_RAM,
+ QCM2290_SLAVE_PDM,
+ QCM2290_SLAVE_PIMEM_CFG,
+ QCM2290_SLAVE_PKA_WRAPPER,
+ QCM2290_SLAVE_PMIC_ARB,
+ QCM2290_SLAVE_PRNG,
+ QCM2290_SLAVE_QDSS_CFG,
+ QCM2290_SLAVE_QM_CFG,
+ QCM2290_SLAVE_QM_MPU_CFG,
+ QCM2290_SLAVE_QPIC,
+ QCM2290_SLAVE_QUP_0,
+ QCM2290_SLAVE_SDCC_1,
+ QCM2290_SLAVE_SDCC_2,
+ QCM2290_SLAVE_SNOC_CFG,
+ QCM2290_SLAVE_TCSR,
+ QCM2290_SLAVE_USB3,
+ QCM2290_SLAVE_VENUS_CFG,
+ QCM2290_SLAVE_VENUS_THROTTLE_CFG,
+ QCM2290_SLAVE_VSENSE_CTRL_CFG,
+ QCM2290_SLAVE_SERVICE_CNOC,
+ QCM2290_SLAVE_APPSS,
+ QCM2290_SLAVE_SNOC_CNOC,
+ QCM2290_SLAVE_IMEM,
+ QCM2290_SLAVE_PIMEM,
+ QCM2290_SLAVE_SNOC_BIMC,
+ QCM2290_SLAVE_SERVICE_SNOC,
+ QCM2290_SLAVE_QDSS_STM,
+ QCM2290_SLAVE_TCU,
+ QCM2290_SLAVE_ANOC_SNOC,
+ QCM2290_SLAVE_QUP_CORE_0,
+ QCM2290_SLAVE_SNOC_BIMC_NRT,
+ QCM2290_SLAVE_SNOC_BIMC_RT,
+};
+
+/* Master nodes */
+static const u16 mas_appss_proc_links[] = {
+ QCM2290_SLAVE_EBI1,
+ QCM2290_SLAVE_BIMC_SNOC,
+};
+
+static struct qcom_icc_node mas_appss_proc = {
+ .id = QCM2290_MASTER_APPSS_PROC,
+ .name = "mas_apps_proc",
+ .buswidth = 16,
+ .qos.ap_owned = true,
+ .qos.qos_port = 0,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.prio_level = 0,
+ .qos.areq_prio = 0,
+ .mas_rpm_id = 0,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_appss_proc_links),
+ .links = mas_appss_proc_links,
+};
+
+static const u16 mas_snoc_bimc_rt_links[] = {
+ QCM2290_SLAVE_EBI1,
+};
+
+static struct qcom_icc_node mas_snoc_bimc_rt = {
+ .id = QCM2290_MASTER_SNOC_BIMC_RT,
+ .name = "mas_snoc_bimc_rt",
+ .buswidth = 16,
+ .qos.ap_owned = true,
+ .qos.qos_port = 2,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .mas_rpm_id = 163,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_rt_links),
+ .links = mas_snoc_bimc_rt_links,
+};
+
+static const u16 mas_snoc_bimc_nrt_links[] = {
+ QCM2290_SLAVE_EBI1,
+};
+
+static struct qcom_icc_node mas_snoc_bimc_nrt = {
+ .id = QCM2290_MASTER_SNOC_BIMC_NRT,
+ .name = "mas_snoc_bimc_nrt",
+ .buswidth = 16,
+ .qos.ap_owned = true,
+ .qos.qos_port = 2,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .mas_rpm_id = 163,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_nrt_links),
+ .links = mas_snoc_bimc_nrt_links,
+};
+
+static const u16 mas_snoc_bimc_links[] = {
+ QCM2290_SLAVE_EBI1,
+};
+
+static struct qcom_icc_node mas_snoc_bimc = {
+ .id = QCM2290_MASTER_SNOC_BIMC,
+ .name = "mas_snoc_bimc",
+ .buswidth = 16,
+ .qos.ap_owned = true,
+ .qos.qos_port = 2,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .mas_rpm_id = 164,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_links),
+ .links = mas_snoc_bimc_links,
+};
+
+static const u16 mas_tcu_0_links[] = {
+ QCM2290_SLAVE_EBI1,
+ QCM2290_SLAVE_BIMC_SNOC,
+};
+
+static struct qcom_icc_node mas_tcu_0 = {
+ .id = QCM2290_MASTER_TCU_0,
+ .name = "mas_tcu_0",
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_port = 4,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.prio_level = 6,
+ .qos.areq_prio = 6,
+ .mas_rpm_id = 102,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_tcu_0_links),
+ .links = mas_tcu_0_links,
+};
+
+static const u16 mas_snoc_cnoc_links[] = {
+ QCM2290_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ QCM2290_SLAVE_SDCC_2,
+ QCM2290_SLAVE_SDCC_1,
+ QCM2290_SLAVE_QM_CFG,
+ QCM2290_SLAVE_BIMC_CFG,
+ QCM2290_SLAVE_USB3,
+ QCM2290_SLAVE_QM_MPU_CFG,
+ QCM2290_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ QCM2290_SLAVE_QDSS_CFG,
+ QCM2290_SLAVE_PDM,
+ QCM2290_SLAVE_IPA_CFG,
+ QCM2290_SLAVE_DISPLAY_THROTTLE_CFG,
+ QCM2290_SLAVE_TCSR,
+ QCM2290_SLAVE_MESSAGE_RAM,
+ QCM2290_SLAVE_PMIC_ARB,
+ QCM2290_SLAVE_LPASS,
+ QCM2290_SLAVE_DISPLAY_CFG,
+ QCM2290_SLAVE_VENUS_CFG,
+ QCM2290_SLAVE_GPU_CFG,
+ QCM2290_SLAVE_IMEM_CFG,
+ QCM2290_SLAVE_SNOC_CFG,
+ QCM2290_SLAVE_SERVICE_CNOC,
+ QCM2290_SLAVE_VENUS_THROTTLE_CFG,
+ QCM2290_SLAVE_PKA_WRAPPER,
+ QCM2290_SLAVE_HWKM,
+ QCM2290_SLAVE_PRNG,
+ QCM2290_SLAVE_VSENSE_CTRL_CFG,
+ QCM2290_SLAVE_CRYPTO_0_CFG,
+ QCM2290_SLAVE_PIMEM_CFG,
+ QCM2290_SLAVE_QUP_0,
+ QCM2290_SLAVE_CAMERA_CFG,
+ QCM2290_SLAVE_CLK_CTL,
+ QCM2290_SLAVE_QPIC,
+};
+
+static struct qcom_icc_node mas_snoc_cnoc = {
+ .id = QCM2290_MASTER_SNOC_CNOC,
+ .name = "mas_snoc_cnoc",
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = 52,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_cnoc_links),
+ .links = mas_snoc_cnoc_links,
+};
+
+static const u16 mas_qdss_dap_links[] = {
+ QCM2290_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ QCM2290_SLAVE_SDCC_2,
+ QCM2290_SLAVE_SDCC_1,
+ QCM2290_SLAVE_QM_CFG,
+ QCM2290_SLAVE_BIMC_CFG,
+ QCM2290_SLAVE_USB3,
+ QCM2290_SLAVE_QM_MPU_CFG,
+ QCM2290_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ QCM2290_SLAVE_QDSS_CFG,
+ QCM2290_SLAVE_PDM,
+ QCM2290_SLAVE_IPA_CFG,
+ QCM2290_SLAVE_DISPLAY_THROTTLE_CFG,
+ QCM2290_SLAVE_TCSR,
+ QCM2290_SLAVE_MESSAGE_RAM,
+ QCM2290_SLAVE_PMIC_ARB,
+ QCM2290_SLAVE_LPASS,
+ QCM2290_SLAVE_DISPLAY_CFG,
+ QCM2290_SLAVE_VENUS_CFG,
+ QCM2290_SLAVE_GPU_CFG,
+ QCM2290_SLAVE_IMEM_CFG,
+ QCM2290_SLAVE_SNOC_CFG,
+ QCM2290_SLAVE_SERVICE_CNOC,
+ QCM2290_SLAVE_VENUS_THROTTLE_CFG,
+ QCM2290_SLAVE_PKA_WRAPPER,
+ QCM2290_SLAVE_HWKM,
+ QCM2290_SLAVE_PRNG,
+ QCM2290_SLAVE_VSENSE_CTRL_CFG,
+ QCM2290_SLAVE_CRYPTO_0_CFG,
+ QCM2290_SLAVE_PIMEM_CFG,
+ QCM2290_SLAVE_QUP_0,
+ QCM2290_SLAVE_CAMERA_CFG,
+ QCM2290_SLAVE_CLK_CTL,
+ QCM2290_SLAVE_QPIC,
+};
+
+static struct qcom_icc_node mas_qdss_dap = {
+ .id = QCM2290_MASTER_QDSS_DAP,
+ .name = "mas_qdss_dap",
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = 49,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_qdss_dap_links),
+ .links = mas_qdss_dap_links,
+};
+
+static const u16 mas_crypto_core0_links[] = {
+ QCM2290_SLAVE_ANOC_SNOC
+};
+
+static struct qcom_icc_node mas_crypto_core0 = {
+ .id = QCM2290_MASTER_CRYPTO_CORE0,
+ .name = "mas_crypto_core0",
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_port = 22,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 23,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_crypto_core0_links),
+ .links = mas_crypto_core0_links,
+};
+
+static const u16 mas_qup_core_0_links[] = {
+ QCM2290_SLAVE_QUP_CORE_0,
+};
+
+static struct qcom_icc_node mas_qup_core_0 = {
+ .id = QCM2290_MASTER_QUP_CORE_0,
+ .name = "mas_qup_core_0",
+ .buswidth = 4,
+ .mas_rpm_id = 170,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_qup_core_0_links),
+ .links = mas_qup_core_0_links,
+};
+
+static const u16 mas_camnoc_sf_links[] = {
+ QCM2290_SLAVE_SNOC_BIMC_NRT,
+};
+
+static struct qcom_icc_node mas_camnoc_sf = {
+ .id = QCM2290_MASTER_CAMNOC_SF,
+ .name = "mas_camnoc_sf",
+ .buswidth = 32,
+ .qos.ap_owned = true,
+ .qos.qos_port = 4,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 3,
+ .mas_rpm_id = 172,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_camnoc_sf_links),
+ .links = mas_camnoc_sf_links,
+};
+
+static const u16 mas_camnoc_hf_links[] = {
+ QCM2290_SLAVE_SNOC_BIMC_RT,
+};
+
+static struct qcom_icc_node mas_camnoc_hf = {
+ .id = QCM2290_MASTER_CAMNOC_HF,
+ .name = "mas_camnoc_hf",
+ .buswidth = 32,
+ .qos.ap_owned = true,
+ .qos.qos_port = 10,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 3,
+ .qos.urg_fwd_en = true,
+ .mas_rpm_id = 173,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_camnoc_hf_links),
+ .links = mas_camnoc_hf_links,
+};
+
+static const u16 mas_mdp0_links[] = {
+ QCM2290_SLAVE_SNOC_BIMC_RT,
+};
+
+static struct qcom_icc_node mas_mdp0 = {
+ .id = QCM2290_MASTER_MDP0,
+ .name = "mas_mdp0",
+ .buswidth = 16,
+ .qos.ap_owned = true,
+ .qos.qos_port = 5,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 3,
+ .qos.urg_fwd_en = true,
+ .mas_rpm_id = 8,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_mdp0_links),
+ .links = mas_mdp0_links,
+};
+
+static const u16 mas_video_p0_links[] = {
+ QCM2290_SLAVE_SNOC_BIMC_NRT,
+};
+
+static struct qcom_icc_node mas_video_p0 = {
+ .id = QCM2290_MASTER_VIDEO_P0,
+ .name = "mas_video_p0",
+ .buswidth = 16,
+ .qos.ap_owned = true,
+ .qos.qos_port = 9,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 3,
+ .qos.urg_fwd_en = true,
+ .mas_rpm_id = 9,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_video_p0_links),
+ .links = mas_video_p0_links,
+};
+
+static const u16 mas_video_proc_links[] = {
+ QCM2290_SLAVE_SNOC_BIMC_NRT,
+};
+
+static struct qcom_icc_node mas_video_proc = {
+ .id = QCM2290_MASTER_VIDEO_PROC,
+ .name = "mas_video_proc",
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_port = 13,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 4,
+ .mas_rpm_id = 168,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_video_proc_links),
+ .links = mas_video_proc_links,
+};
+
+static const u16 mas_snoc_cfg_links[] = {
+ QCM2290_SLAVE_SERVICE_SNOC,
+};
+
+static struct qcom_icc_node mas_snoc_cfg = {
+ .id = QCM2290_MASTER_SNOC_CFG,
+ .name = "mas_snoc_cfg",
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = 20,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_cfg_links),
+ .links = mas_snoc_cfg_links,
+};
+
+static const u16 mas_tic_links[] = {
+ QCM2290_SLAVE_PIMEM,
+ QCM2290_SLAVE_IMEM,
+ QCM2290_SLAVE_APPSS,
+ QCM2290_SLAVE_SNOC_BIMC,
+ QCM2290_SLAVE_SNOC_CNOC,
+ QCM2290_SLAVE_TCU,
+ QCM2290_SLAVE_QDSS_STM,
+};
+
+static struct qcom_icc_node mas_tic = {
+ .id = QCM2290_MASTER_TIC,
+ .name = "mas_tic",
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_port = 8,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 51,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_tic_links),
+ .links = mas_tic_links,
+};
+
+static const u16 mas_anoc_snoc_links[] = {
+ QCM2290_SLAVE_PIMEM,
+ QCM2290_SLAVE_IMEM,
+ QCM2290_SLAVE_APPSS,
+ QCM2290_SLAVE_SNOC_BIMC,
+ QCM2290_SLAVE_SNOC_CNOC,
+ QCM2290_SLAVE_TCU,
+ QCM2290_SLAVE_QDSS_STM,
+};
+
+static struct qcom_icc_node mas_anoc_snoc = {
+ .id = QCM2290_MASTER_ANOC_SNOC,
+ .name = "mas_anoc_snoc",
+ .buswidth = 16,
+ .mas_rpm_id = 110,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_anoc_snoc_links),
+ .links = mas_anoc_snoc_links,
+};
+
+static const u16 mas_bimc_snoc_links[] = {
+ QCM2290_SLAVE_PIMEM,
+ QCM2290_SLAVE_IMEM,
+ QCM2290_SLAVE_APPSS,
+ QCM2290_SLAVE_SNOC_CNOC,
+ QCM2290_SLAVE_TCU,
+ QCM2290_SLAVE_QDSS_STM,
+};
+
+static struct qcom_icc_node mas_bimc_snoc = {
+ .id = QCM2290_MASTER_BIMC_SNOC,
+ .name = "mas_bimc_snoc",
+ .buswidth = 8,
+ .mas_rpm_id = 21,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_bimc_snoc_links),
+ .links = mas_bimc_snoc_links,
+};
+
+static const u16 mas_pimem_links[] = {
+ QCM2290_SLAVE_IMEM,
+ QCM2290_SLAVE_SNOC_BIMC,
+};
+
+static struct qcom_icc_node mas_pimem = {
+ .id = QCM2290_MASTER_PIMEM,
+ .name = "mas_pimem",
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_port = 20,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 113,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pimem_links),
+ .links = mas_pimem_links,
+};
+
+static const u16 mas_qdss_bam_links[] = {
+ QCM2290_SLAVE_ANOC_SNOC,
+};
+
+static struct qcom_icc_node mas_qdss_bam = {
+ .id = QCM2290_MASTER_QDSS_BAM,
+ .name = "mas_qdss_bam",
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_port = 2,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 19,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_qdss_bam_links),
+ .links = mas_qdss_bam_links,
+};
+
+static const u16 mas_qup_0_links[] = {
+ QCM2290_SLAVE_ANOC_SNOC,
+};
+
+static struct qcom_icc_node mas_qup_0 = {
+ .id = QCM2290_MASTER_QUP_0,
+ .name = "mas_qup_0",
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_port = 0,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 166,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_qup_0_links),
+ .links = mas_qup_0_links,
+};
+
+static const u16 mas_ipa_links[] = {
+ QCM2290_SLAVE_ANOC_SNOC,
+};
+
+static struct qcom_icc_node mas_ipa = {
+ .id = QCM2290_MASTER_IPA,
+ .name = "mas_ipa",
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_port = 3,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 59,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_ipa_links),
+ .links = mas_ipa_links,
+};
+
+static const u16 mas_qdss_etr_links[] = {
+ QCM2290_SLAVE_ANOC_SNOC,
+};
+
+static struct qcom_icc_node mas_qdss_etr = {
+ .id = QCM2290_MASTER_QDSS_ETR,
+ .name = "mas_qdss_etr",
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_port = 12,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 31,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_qdss_etr_links),
+ .links = mas_qdss_etr_links,
+};
+
+static const u16 mas_sdcc_1_links[] = {
+ QCM2290_SLAVE_ANOC_SNOC,
+};
+
+static struct qcom_icc_node mas_sdcc_1 = {
+ .id = QCM2290_MASTER_SDCC_1,
+ .name = "mas_sdcc_1",
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_port = 17,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 33,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_sdcc_1_links),
+ .links = mas_sdcc_1_links,
+};
+
+static const u16 mas_sdcc_2_links[] = {
+ QCM2290_SLAVE_ANOC_SNOC,
+};
+
+static struct qcom_icc_node mas_sdcc_2 = {
+ .id = QCM2290_MASTER_SDCC_2,
+ .name = "mas_sdcc_2",
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_port = 23,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 35,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_sdcc_2_links),
+ .links = mas_sdcc_2_links,
+};
+
+static const u16 mas_qpic_links[] = {
+ QCM2290_SLAVE_ANOC_SNOC,
+};
+
+static struct qcom_icc_node mas_qpic = {
+ .id = QCM2290_MASTER_QPIC,
+ .name = "mas_qpic",
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_port = 1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 58,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_qpic_links),
+ .links = mas_qpic_links,
+};
+
+static const u16 mas_usb3_0_links[] = {
+ QCM2290_SLAVE_ANOC_SNOC,
+};
+
+static struct qcom_icc_node mas_usb3_0 = {
+ .id = QCM2290_MASTER_USB3_0,
+ .name = "mas_usb3_0",
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_port = 24,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 32,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_usb3_0_links),
+ .links = mas_usb3_0_links,
+};
+
+static const u16 mas_gfx3d_links[] = {
+ QCM2290_SLAVE_EBI1,
+};
+
+static struct qcom_icc_node mas_gfx3d = {
+ .id = QCM2290_MASTER_GFX3D,
+ .name = "mas_gfx3d",
+ .buswidth = 32,
+ .qos.ap_owned = true,
+ .qos.qos_port = 1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.prio_level = 0,
+ .qos.areq_prio = 0,
+ .mas_rpm_id = 6,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_gfx3d_links),
+ .links = mas_gfx3d_links,
+};
+
+/* Slave nodes */
+static struct qcom_icc_node slv_ebi1 = {
+ .name = "slv_ebi1",
+ .id = QCM2290_SLAVE_EBI1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 0,
+};
+
+static const u16 slv_bimc_snoc_links[] = {
+ QCM2290_MASTER_BIMC_SNOC,
+};
+
+static struct qcom_icc_node slv_bimc_snoc = {
+ .name = "slv_bimc_snoc",
+ .id = QCM2290_SLAVE_BIMC_SNOC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 2,
+ .num_links = ARRAY_SIZE(slv_bimc_snoc_links),
+ .links = slv_bimc_snoc_links,
+};
+
+static struct qcom_icc_node slv_bimc_cfg = {
+ .name = "slv_bimc_cfg",
+ .id = QCM2290_SLAVE_BIMC_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 56,
+};
+
+static struct qcom_icc_node slv_camera_nrt_throttle_cfg = {
+ .name = "slv_camera_nrt_throttle_cfg",
+ .id = QCM2290_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 271,
+};
+
+static struct qcom_icc_node slv_camera_rt_throttle_cfg = {
+ .name = "slv_camera_rt_throttle_cfg",
+ .id = QCM2290_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 279,
+};
+
+static struct qcom_icc_node slv_camera_cfg = {
+ .name = "slv_camera_cfg",
+ .id = QCM2290_SLAVE_CAMERA_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 3,
+};
+
+static struct qcom_icc_node slv_clk_ctl = {
+ .name = "slv_clk_ctl",
+ .id = QCM2290_SLAVE_CLK_CTL,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 47,
+};
+
+static struct qcom_icc_node slv_crypto_0_cfg = {
+ .name = "slv_crypto_0_cfg",
+ .id = QCM2290_SLAVE_CRYPTO_0_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 52,
+};
+
+static struct qcom_icc_node slv_display_cfg = {
+ .name = "slv_display_cfg",
+ .id = QCM2290_SLAVE_DISPLAY_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 4,
+};
+
+static struct qcom_icc_node slv_display_throttle_cfg = {
+ .name = "slv_display_throttle_cfg",
+ .id = QCM2290_SLAVE_DISPLAY_THROTTLE_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 156,
+};
+
+static struct qcom_icc_node slv_gpu_cfg = {
+ .name = "slv_gpu_cfg",
+ .id = QCM2290_SLAVE_GPU_CFG,
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 275,
+};
+
+static struct qcom_icc_node slv_hwkm = {
+ .name = "slv_hwkm",
+ .id = QCM2290_SLAVE_HWKM,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 280,
+};
+
+static struct qcom_icc_node slv_imem_cfg = {
+ .name = "slv_imem_cfg",
+ .id = QCM2290_SLAVE_IMEM_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 54,
+};
+
+static struct qcom_icc_node slv_ipa_cfg = {
+ .name = "slv_ipa_cfg",
+ .id = QCM2290_SLAVE_IPA_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 183,
+};
+
+static struct qcom_icc_node slv_lpass = {
+ .name = "slv_lpass",
+ .id = QCM2290_SLAVE_LPASS,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 21,
+};
+
+static struct qcom_icc_node slv_message_ram = {
+ .name = "slv_message_ram",
+ .id = QCM2290_SLAVE_MESSAGE_RAM,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 55,
+};
+
+static struct qcom_icc_node slv_pdm = {
+ .name = "slv_pdm",
+ .id = QCM2290_SLAVE_PDM,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 41,
+};
+
+static struct qcom_icc_node slv_pimem_cfg = {
+ .name = "slv_pimem_cfg",
+ .id = QCM2290_SLAVE_PIMEM_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 167,
+};
+
+static struct qcom_icc_node slv_pka_wrapper = {
+ .name = "slv_pka_wrapper",
+ .id = QCM2290_SLAVE_PKA_WRAPPER,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 281,
+};
+
+static struct qcom_icc_node slv_pmic_arb = {
+ .name = "slv_pmic_arb",
+ .id = QCM2290_SLAVE_PMIC_ARB,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 59,
+};
+
+static struct qcom_icc_node slv_prng = {
+ .name = "slv_prng",
+ .id = QCM2290_SLAVE_PRNG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 44,
+};
+
+static struct qcom_icc_node slv_qdss_cfg = {
+ .name = "slv_qdss_cfg",
+ .id = QCM2290_SLAVE_QDSS_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 63,
+};
+
+static struct qcom_icc_node slv_qm_cfg = {
+ .name = "slv_qm_cfg",
+ .id = QCM2290_SLAVE_QM_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 212,
+};
+
+static struct qcom_icc_node slv_qm_mpu_cfg = {
+ .name = "slv_qm_mpu_cfg",
+ .id = QCM2290_SLAVE_QM_MPU_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 231,
+};
+
+static struct qcom_icc_node slv_qpic = {
+ .name = "slv_qpic",
+ .id = QCM2290_SLAVE_QPIC,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 80,
+};
+
+static struct qcom_icc_node slv_qup_0 = {
+ .name = "slv_qup_0",
+ .id = QCM2290_SLAVE_QUP_0,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 261,
+};
+
+static struct qcom_icc_node slv_sdcc_1 = {
+ .name = "slv_sdcc_1",
+ .id = QCM2290_SLAVE_SDCC_1,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 31,
+};
+
+static struct qcom_icc_node slv_sdcc_2 = {
+ .name = "slv_sdcc_2",
+ .id = QCM2290_SLAVE_SDCC_2,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 33,
+};
+
+static const u16 slv_snoc_cfg_links[] = {
+ QCM2290_MASTER_SNOC_CFG,
+};
+
+static struct qcom_icc_node slv_snoc_cfg = {
+ .name = "slv_snoc_cfg",
+ .id = QCM2290_SLAVE_SNOC_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 70,
+ .num_links = ARRAY_SIZE(slv_snoc_cfg_links),
+ .links = slv_snoc_cfg_links,
+};
+
+static struct qcom_icc_node slv_tcsr = {
+ .name = "slv_tcsr",
+ .id = QCM2290_SLAVE_TCSR,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 50,
+};
+
+static struct qcom_icc_node slv_usb3 = {
+ .name = "slv_usb3",
+ .id = QCM2290_SLAVE_USB3,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 22,
+};
+
+static struct qcom_icc_node slv_venus_cfg = {
+ .name = "slv_venus_cfg",
+ .id = QCM2290_SLAVE_VENUS_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 10,
+};
+
+static struct qcom_icc_node slv_venus_throttle_cfg = {
+ .name = "slv_venus_throttle_cfg",
+ .id = QCM2290_SLAVE_VENUS_THROTTLE_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 178,
+};
+
+static struct qcom_icc_node slv_vsense_ctrl_cfg = {
+ .name = "slv_vsense_ctrl_cfg",
+ .id = QCM2290_SLAVE_VSENSE_CTRL_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 263,
+};
+
+static struct qcom_icc_node slv_service_cnoc = {
+ .name = "slv_service_cnoc",
+ .id = QCM2290_SLAVE_SERVICE_CNOC,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 76,
+};
+
+static struct qcom_icc_node slv_qup_core_0 = {
+ .name = "slv_qup_core_0",
+ .id = QCM2290_SLAVE_QUP_CORE_0,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 264,
+};
+
+static const u16 slv_snoc_bimc_nrt_links[] = {
+ QCM2290_MASTER_SNOC_BIMC_NRT,
+};
+
+static struct qcom_icc_node slv_snoc_bimc_nrt = {
+ .name = "slv_snoc_bimc_nrt",
+ .id = QCM2290_SLAVE_SNOC_BIMC_NRT,
+ .buswidth = 16,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 259,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_nrt_links),
+ .links = slv_snoc_bimc_nrt_links,
+};
+
+static const u16 slv_snoc_bimc_rt_links[] = {
+ QCM2290_MASTER_SNOC_BIMC_RT,
+};
+
+static struct qcom_icc_node slv_snoc_bimc_rt = {
+ .name = "slv_snoc_bimc_rt",
+ .id = QCM2290_SLAVE_SNOC_BIMC_RT,
+ .buswidth = 16,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 260,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_rt_links),
+ .links = slv_snoc_bimc_rt_links,
+};
+
+static struct qcom_icc_node slv_appss = {
+ .name = "slv_appss",
+ .id = QCM2290_SLAVE_APPSS,
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 20,
+};
+
+static const u16 slv_snoc_cnoc_links[] = {
+ QCM2290_MASTER_SNOC_CNOC,
+};
+
+static struct qcom_icc_node slv_snoc_cnoc = {
+ .name = "slv_snoc_cnoc",
+ .id = QCM2290_SLAVE_SNOC_CNOC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 25,
+ .num_links = ARRAY_SIZE(slv_snoc_cnoc_links),
+ .links = slv_snoc_cnoc_links,
+};
+
+static struct qcom_icc_node slv_imem = {
+ .name = "slv_imem",
+ .id = QCM2290_SLAVE_IMEM,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 26,
+};
+
+static struct qcom_icc_node slv_pimem = {
+ .name = "slv_pimem",
+ .id = QCM2290_SLAVE_PIMEM,
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 166,
+};
+
+static const u16 slv_snoc_bimc_links[] = {
+ QCM2290_MASTER_SNOC_BIMC,
+};
+
+static struct qcom_icc_node slv_snoc_bimc = {
+ .name = "slv_snoc_bimc",
+ .id = QCM2290_SLAVE_SNOC_BIMC,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 24,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_links),
+ .links = slv_snoc_bimc_links,
+};
+
+static struct qcom_icc_node slv_service_snoc = {
+ .name = "slv_service_snoc",
+ .id = QCM2290_SLAVE_SERVICE_SNOC,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 29,
+};
+
+static struct qcom_icc_node slv_qdss_stm = {
+ .name = "slv_qdss_stm",
+ .id = QCM2290_SLAVE_QDSS_STM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 30,
+};
+
+static struct qcom_icc_node slv_tcu = {
+ .name = "slv_tcu",
+ .id = QCM2290_SLAVE_TCU,
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 133,
+};
+
+static const u16 slv_anoc_snoc_links[] = {
+ QCM2290_MASTER_ANOC_SNOC,
+};
+
+static struct qcom_icc_node slv_anoc_snoc = {
+ .name = "slv_anoc_snoc",
+ .id = QCM2290_SLAVE_ANOC_SNOC,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 141,
+ .num_links = ARRAY_SIZE(slv_anoc_snoc_links),
+ .links = slv_anoc_snoc_links,
+};
+
+/* NoC descriptors */
+static struct qcom_icc_node *qcm2290_bimc_nodes[] = {
+ [MASTER_APPSS_PROC] = &mas_appss_proc,
+ [MASTER_SNOC_BIMC_RT] = &mas_snoc_bimc_rt,
+ [MASTER_SNOC_BIMC_NRT] = &mas_snoc_bimc_nrt,
+ [MASTER_SNOC_BIMC] = &mas_snoc_bimc,
+ [MASTER_TCU_0] = &mas_tcu_0,
+ [MASTER_GFX3D] = &mas_gfx3d,
+ [SLAVE_EBI1] = &slv_ebi1,
+ [SLAVE_BIMC_SNOC] = &slv_bimc_snoc,
+};
+
+static const struct regmap_config qcm2290_bimc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x80000,
+ .fast_io = true,
+};
+
+static struct qcom_icc_desc qcm2290_bimc = {
+ .type = QCOM_ICC_BIMC,
+ .nodes = qcm2290_bimc_nodes,
+ .num_nodes = ARRAY_SIZE(qcm2290_bimc_nodes),
+ .regmap_cfg = &qcm2290_bimc_regmap_config,
+ /* M_REG_BASE() in vendor msm_bus_bimc_adhoc driver */
+ .qos_offset = 0x8000,
+};
+
+static struct qcom_icc_node *qcm2290_cnoc_nodes[] = {
+ [MASTER_SNOC_CNOC] = &mas_snoc_cnoc,
+ [MASTER_QDSS_DAP] = &mas_qdss_dap,
+ [SLAVE_BIMC_CFG] = &slv_bimc_cfg,
+ [SLAVE_CAMERA_NRT_THROTTLE_CFG] = &slv_camera_nrt_throttle_cfg,
+ [SLAVE_CAMERA_RT_THROTTLE_CFG] = &slv_camera_rt_throttle_cfg,
+ [SLAVE_CAMERA_CFG] = &slv_camera_cfg,
+ [SLAVE_CLK_CTL] = &slv_clk_ctl,
+ [SLAVE_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
+ [SLAVE_DISPLAY_CFG] = &slv_display_cfg,
+ [SLAVE_DISPLAY_THROTTLE_CFG] = &slv_display_throttle_cfg,
+ [SLAVE_GPU_CFG] = &slv_gpu_cfg,
+ [SLAVE_HWKM] = &slv_hwkm,
+ [SLAVE_IMEM_CFG] = &slv_imem_cfg,
+ [SLAVE_IPA_CFG] = &slv_ipa_cfg,
+ [SLAVE_LPASS] = &slv_lpass,
+ [SLAVE_MESSAGE_RAM] = &slv_message_ram,
+ [SLAVE_PDM] = &slv_pdm,
+ [SLAVE_PIMEM_CFG] = &slv_pimem_cfg,
+ [SLAVE_PKA_WRAPPER] = &slv_pka_wrapper,
+ [SLAVE_PMIC_ARB] = &slv_pmic_arb,
+ [SLAVE_PRNG] = &slv_prng,
+ [SLAVE_QDSS_CFG] = &slv_qdss_cfg,
+ [SLAVE_QM_CFG] = &slv_qm_cfg,
+ [SLAVE_QM_MPU_CFG] = &slv_qm_mpu_cfg,
+ [SLAVE_QPIC] = &slv_qpic,
+ [SLAVE_QUP_0] = &slv_qup_0,
+ [SLAVE_SDCC_1] = &slv_sdcc_1,
+ [SLAVE_SDCC_2] = &slv_sdcc_2,
+ [SLAVE_SNOC_CFG] = &slv_snoc_cfg,
+ [SLAVE_TCSR] = &slv_tcsr,
+ [SLAVE_USB3] = &slv_usb3,
+ [SLAVE_VENUS_CFG] = &slv_venus_cfg,
+ [SLAVE_VENUS_THROTTLE_CFG] = &slv_venus_throttle_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &slv_vsense_ctrl_cfg,
+ [SLAVE_SERVICE_CNOC] = &slv_service_cnoc,
+};
+
+static const struct regmap_config qcm2290_cnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x8200,
+ .fast_io = true,
+};
+
+static struct qcom_icc_desc qcm2290_cnoc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = qcm2290_cnoc_nodes,
+ .num_nodes = ARRAY_SIZE(qcm2290_cnoc_nodes),
+ .regmap_cfg = &qcm2290_cnoc_regmap_config,
+};
+
+static struct qcom_icc_node *qcm2290_snoc_nodes[] = {
+ [MASTER_CRYPTO_CORE0] = &mas_crypto_core0,
+ [MASTER_SNOC_CFG] = &mas_snoc_cfg,
+ [MASTER_TIC] = &mas_tic,
+ [MASTER_ANOC_SNOC] = &mas_anoc_snoc,
+ [MASTER_BIMC_SNOC] = &mas_bimc_snoc,
+ [MASTER_PIMEM] = &mas_pimem,
+ [MASTER_QDSS_BAM] = &mas_qdss_bam,
+ [MASTER_QUP_0] = &mas_qup_0,
+ [MASTER_IPA] = &mas_ipa,
+ [MASTER_QDSS_ETR] = &mas_qdss_etr,
+ [MASTER_SDCC_1] = &mas_sdcc_1,
+ [MASTER_SDCC_2] = &mas_sdcc_2,
+ [MASTER_QPIC] = &mas_qpic,
+ [MASTER_USB3_0] = &mas_usb3_0,
+ [SLAVE_APPSS] = &slv_appss,
+ [SLAVE_SNOC_CNOC] = &slv_snoc_cnoc,
+ [SLAVE_IMEM] = &slv_imem,
+ [SLAVE_PIMEM] = &slv_pimem,
+ [SLAVE_SNOC_BIMC] = &slv_snoc_bimc,
+ [SLAVE_SERVICE_SNOC] = &slv_service_snoc,
+ [SLAVE_QDSS_STM] = &slv_qdss_stm,
+ [SLAVE_TCU] = &slv_tcu,
+ [SLAVE_ANOC_SNOC] = &slv_anoc_snoc,
+};
+
+static const struct regmap_config qcm2290_snoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x60200,
+ .fast_io = true,
+};
+
+static struct qcom_icc_desc qcm2290_snoc = {
+ .type = QCOM_ICC_QNOC,
+ .nodes = qcm2290_snoc_nodes,
+ .num_nodes = ARRAY_SIZE(qcm2290_snoc_nodes),
+ .regmap_cfg = &qcm2290_snoc_regmap_config,
+ /* Vendor DT node fab-sys_noc property 'qcom,base-offset' */
+ .qos_offset = 0x15000,
+};
+
+static struct qcom_icc_node *qcm2290_qup_virt_nodes[] = {
+ [MASTER_QUP_CORE_0] = &mas_qup_core_0,
+ [SLAVE_QUP_CORE_0] = &slv_qup_core_0
+};
+
+static struct qcom_icc_desc qcm2290_qup_virt = {
+ .type = QCOM_ICC_QNOC,
+ .nodes = qcm2290_qup_virt_nodes,
+ .num_nodes = ARRAY_SIZE(qcm2290_qup_virt_nodes),
+};
+
+static struct qcom_icc_node *qcm2290_mmnrt_virt_nodes[] = {
+ [MASTER_CAMNOC_SF] = &mas_camnoc_sf,
+ [MASTER_VIDEO_P0] = &mas_video_p0,
+ [MASTER_VIDEO_PROC] = &mas_video_proc,
+ [SLAVE_SNOC_BIMC_NRT] = &slv_snoc_bimc_nrt,
+};
+
+static struct qcom_icc_desc qcm2290_mmnrt_virt = {
+ .type = QCOM_ICC_QNOC,
+ .nodes = qcm2290_mmnrt_virt_nodes,
+ .num_nodes = ARRAY_SIZE(qcm2290_mmnrt_virt_nodes),
+ .regmap_cfg = &qcm2290_snoc_regmap_config,
+ .qos_offset = 0x15000,
+};
+
+static struct qcom_icc_node *qcm2290_mmrt_virt_nodes[] = {
+ [MASTER_CAMNOC_HF] = &mas_camnoc_hf,
+ [MASTER_MDP0] = &mas_mdp0,
+ [SLAVE_SNOC_BIMC_RT] = &slv_snoc_bimc_rt,
+};
+
+static struct qcom_icc_desc qcm2290_mmrt_virt = {
+ .type = QCOM_ICC_QNOC,
+ .nodes = qcm2290_mmrt_virt_nodes,
+ .num_nodes = ARRAY_SIZE(qcm2290_mmrt_virt_nodes),
+ .regmap_cfg = &qcm2290_snoc_regmap_config,
+ .qos_offset = 0x15000,
+};
+
+static const struct of_device_id qcm2290_noc_of_match[] = {
+ { .compatible = "qcom,qcm2290-bimc", .data = &qcm2290_bimc },
+ { .compatible = "qcom,qcm2290-cnoc", .data = &qcm2290_cnoc },
+ { .compatible = "qcom,qcm2290-snoc", .data = &qcm2290_snoc },
+ { .compatible = "qcom,qcm2290-qup-virt", .data = &qcm2290_qup_virt },
+ { .compatible = "qcom,qcm2290-mmrt-virt", .data = &qcm2290_mmrt_virt },
+ { .compatible = "qcom,qcm2290-mmnrt-virt", .data = &qcm2290_mmnrt_virt },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qcm2290_noc_of_match);
+
+static struct platform_driver qcm2290_noc_driver = {
+ .probe = qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-qcm2290",
+ .of_match_table = qcm2290_noc_of_match,
+ },
+};
+module_platform_driver(qcm2290_noc_driver);
+
+MODULE_DESCRIPTION("Qualcomm QCM2290 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sc7280.h b/drivers/interconnect/qcom/sc7280.h
index 175e400305c5..1fb9839b2c14 100644
--- a/drivers/interconnect/qcom/sc7280.h
+++ b/drivers/interconnect/qcom/sc7280.h
@@ -150,5 +150,7 @@
#define SC7280_SLAVE_PCIE_1 139
#define SC7280_SLAVE_QDSS_STM 140
#define SC7280_SLAVE_TCU 141
+#define SC7280_MASTER_EPSS_L3_APPS 142
+#define SC7280_SLAVE_EPSS_L3 143
#endif
diff --git a/drivers/interconnect/qcom/sdm660.c b/drivers/interconnect/qcom/sdm660.c
index 471bb88f8828..274a7139fe1a 100644
--- a/drivers/interconnect/qcom/sdm660.c
+++ b/drivers/interconnect/qcom/sdm660.c
@@ -1513,6 +1513,7 @@ static const struct regmap_config sdm660_a2noc_regmap_config = {
};
static struct qcom_icc_desc sdm660_a2noc = {
+ .type = QCOM_ICC_NOC,
.nodes = sdm660_a2noc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_a2noc_nodes),
.clocks = bus_a2noc_clocks,
@@ -1540,9 +1541,9 @@ static const struct regmap_config sdm660_bimc_regmap_config = {
};
static struct qcom_icc_desc sdm660_bimc = {
+ .type = QCOM_ICC_BIMC,
.nodes = sdm660_bimc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_bimc_nodes),
- .is_bimc_node = true,
.regmap_cfg = &sdm660_bimc_regmap_config,
};
@@ -1594,6 +1595,7 @@ static const struct regmap_config sdm660_cnoc_regmap_config = {
};
static struct qcom_icc_desc sdm660_cnoc = {
+ .type = QCOM_ICC_NOC,
.nodes = sdm660_cnoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_cnoc_nodes),
.regmap_cfg = &sdm660_cnoc_regmap_config,
@@ -1614,6 +1616,7 @@ static const struct regmap_config sdm660_gnoc_regmap_config = {
};
static struct qcom_icc_desc sdm660_gnoc = {
+ .type = QCOM_ICC_NOC,
.nodes = sdm660_gnoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_gnoc_nodes),
.regmap_cfg = &sdm660_gnoc_regmap_config,
@@ -1653,6 +1656,7 @@ static const struct regmap_config sdm660_mnoc_regmap_config = {
};
static struct qcom_icc_desc sdm660_mnoc = {
+ .type = QCOM_ICC_NOC,
.nodes = sdm660_mnoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_mnoc_nodes),
.clocks = bus_mm_clocks,
@@ -1689,6 +1693,7 @@ static const struct regmap_config sdm660_snoc_regmap_config = {
};
static struct qcom_icc_desc sdm660_snoc = {
+ .type = QCOM_ICC_NOC,
.nodes = sdm660_snoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_snoc_nodes),
.regmap_cfg = &sdm660_snoc_regmap_config,
diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c
index 2a85f53802b5..745e3c36a61a 100644
--- a/drivers/interconnect/qcom/sm8150.c
+++ b/drivers/interconnect/qcom/sm8150.c
@@ -535,7 +535,6 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sm8150",
.of_match_table = qnoc_of_match,
- .sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c
index 8dfb5dea562a..aa707582ea01 100644
--- a/drivers/interconnect/qcom/sm8250.c
+++ b/drivers/interconnect/qcom/sm8250.c
@@ -551,7 +551,6 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sm8250",
.of_match_table = qnoc_of_match,
- .sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c
index 3e26a2175b28..c79f93a1ac73 100644
--- a/drivers/interconnect/qcom/sm8350.c
+++ b/drivers/interconnect/qcom/sm8350.c
@@ -531,7 +531,6 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sm8350",
.of_match_table = qnoc_of_match,
- .sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
diff --git a/drivers/interconnect/qcom/sm8450.c b/drivers/interconnect/qcom/sm8450.c
new file mode 100644
index 000000000000..8d99ee6421df
--- /dev/null
+++ b/drivers/interconnect/qcom/sm8450.c
@@ -0,0 +1,1987 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sm8450.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sm8450.h"
+
+static struct qcom_icc_node qhm_qspi = {
+ .name = "qhm_qspi",
+ .id = SM8450_MASTER_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+ .name = "qhm_qup1",
+ .id = SM8450_MASTER_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qnm_a1noc_cfg = {
+ .name = "qnm_a1noc_cfg",
+ .id = SM8450_MASTER_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SERVICE_A1NOC },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+ .name = "xm_sdc4",
+ .id = SM8450_MASTER_SDCC_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .id = SM8450_MASTER_UFS_MEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .id = SM8450_MASTER_USB3_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SM8450_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup0 = {
+ .name = "qhm_qup0",
+ .id = SM8450_MASTER_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup2 = {
+ .name = "qhm_qup2",
+ .id = SM8450_MASTER_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qnm_a2noc_cfg = {
+ .name = "qnm_a2noc_cfg",
+ .id = SM8450_MASTER_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SERVICE_A2NOC },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = SM8450_MASTER_CRYPTO,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+ .name = "qxm_ipa",
+ .id = SM8450_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_sensorss_q6 = {
+ .name = "qxm_sensorss_q6",
+ .id = SM8450_MASTER_SENSORS_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_sp = {
+ .name = "qxm_sp",
+ .id = SM8450_MASTER_SP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr_0 = {
+ .name = "xm_qdss_etr_0",
+ .id = SM8450_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr_1 = {
+ .name = "xm_qdss_etr_1",
+ .id = SM8450_MASTER_QDSS_ETR_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .id = SM8450_MASTER_SDCC_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qup0_core_master = {
+ .name = "qup0_core_master",
+ .id = SM8450_MASTER_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_QUP_CORE_0 },
+};
+
+static struct qcom_icc_node qup1_core_master = {
+ .name = "qup1_core_master",
+ .id = SM8450_MASTER_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_QUP_CORE_1 },
+};
+
+static struct qcom_icc_node qup2_core_master = {
+ .name = "qup2_core_master",
+ .id = SM8450_MASTER_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_QUP_CORE_2 },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cnoc = {
+ .name = "qnm_gemnoc_cnoc",
+ .id = SM8450_MASTER_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 51,
+ .links = { SM8450_SLAVE_AHB2PHY_SOUTH, SM8450_SLAVE_AHB2PHY_NORTH,
+ SM8450_SLAVE_AOSS, SM8450_SLAVE_CAMERA_CFG,
+ SM8450_SLAVE_CLK_CTL, SM8450_SLAVE_CDSP_CFG,
+ SM8450_SLAVE_RBCPR_CX_CFG, SM8450_SLAVE_RBCPR_MMCX_CFG,
+ SM8450_SLAVE_RBCPR_MXA_CFG, SM8450_SLAVE_RBCPR_MXC_CFG,
+ SM8450_SLAVE_CRYPTO_0_CFG, SM8450_SLAVE_CX_RDPM,
+ SM8450_SLAVE_DISPLAY_CFG, SM8450_SLAVE_GFX3D_CFG,
+ SM8450_SLAVE_IMEM_CFG, SM8450_SLAVE_IPA_CFG,
+ SM8450_SLAVE_IPC_ROUTER_CFG, SM8450_SLAVE_LPASS,
+ SM8450_SLAVE_CNOC_MSS, SM8450_SLAVE_MX_RDPM,
+ SM8450_SLAVE_PCIE_0_CFG, SM8450_SLAVE_PCIE_1_CFG,
+ SM8450_SLAVE_PDM, SM8450_SLAVE_PIMEM_CFG,
+ SM8450_SLAVE_PRNG, SM8450_SLAVE_QDSS_CFG,
+ SM8450_SLAVE_QSPI_0, SM8450_SLAVE_QUP_0,
+ SM8450_SLAVE_QUP_1, SM8450_SLAVE_QUP_2,
+ SM8450_SLAVE_SDCC_2, SM8450_SLAVE_SDCC_4,
+ SM8450_SLAVE_SPSS_CFG, SM8450_SLAVE_TCSR,
+ SM8450_SLAVE_TLMM, SM8450_SLAVE_TME_CFG,
+ SM8450_SLAVE_UFS_MEM_CFG, SM8450_SLAVE_USB3_0,
+ SM8450_SLAVE_VENUS_CFG, SM8450_SLAVE_VSENSE_CTRL_CFG,
+ SM8450_SLAVE_A1NOC_CFG, SM8450_SLAVE_A2NOC_CFG,
+ SM8450_SLAVE_DDRSS_CFG, SM8450_SLAVE_CNOC_MNOC_CFG,
+ SM8450_SLAVE_PCIE_ANOC_CFG, SM8450_SLAVE_SNOC_CFG,
+ SM8450_SLAVE_IMEM, SM8450_SLAVE_PIMEM,
+ SM8450_SLAVE_SERVICE_CNOC, SM8450_SLAVE_QDSS_STM,
+ SM8450_SLAVE_TCU },
+};
+
+static struct qcom_icc_node qnm_gemnoc_pcie = {
+ .name = "qnm_gemnoc_pcie",
+ .id = SM8450_MASTER_GEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8450_SLAVE_PCIE_0, SM8450_SLAVE_PCIE_1 },
+};
+
+static struct qcom_icc_node alm_gpu_tcu = {
+ .name = "alm_gpu_tcu",
+ .id = SM8450_MASTER_GPU_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node alm_sys_tcu = {
+ .name = "alm_sys_tcu",
+ .id = SM8450_MASTER_SYS_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node chm_apps = {
+ .name = "chm_apps",
+ .id = SM8450_MASTER_APPSS_PROC,
+ .channels = 3,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC,
+ SM8450_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_gpu = {
+ .name = "qnm_gpu",
+ .id = SM8450_MASTER_GFX3D,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_mdsp = {
+ .name = "qnm_mdsp",
+ .id = SM8450_MASTER_MSS_PROC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC,
+ SM8450_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .id = SM8450_MASTER_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .id = SM8450_MASTER_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_nsp_gemnoc = {
+ .name = "qnm_nsp_gemnoc",
+ .id = SM8450_MASTER_COMPUTE_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_pcie = {
+ .name = "qnm_pcie",
+ .id = SM8450_MASTER_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 2,
+ .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_gc = {
+ .name = "qnm_snoc_gc",
+ .id = SM8450_MASTER_SNOC_GC_MEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .id = SM8450_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC,
+ SM8450_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qhm_config_noc = {
+ .name = "qhm_config_noc",
+ .id = SM8450_MASTER_CNOC_LPASS_AG_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 6,
+ .links = { SM8450_SLAVE_LPASS_CORE_CFG, SM8450_SLAVE_LPASS_LPI_CFG,
+ SM8450_SLAVE_LPASS_MPU_CFG, SM8450_SLAVE_LPASS_TOP_CFG,
+ SM8450_SLAVE_SERVICES_LPASS_AML_NOC, SM8450_SLAVE_SERVICE_LPASS_AG_NOC },
+};
+
+static struct qcom_icc_node qxm_lpass_dsp = {
+ .name = "qxm_lpass_dsp",
+ .id = SM8450_MASTER_LPASS_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 4,
+ .links = { SM8450_SLAVE_LPASS_TOP_CFG, SM8450_SLAVE_LPASS_SNOC,
+ SM8450_SLAVE_SERVICES_LPASS_AML_NOC, SM8450_SLAVE_SERVICE_LPASS_AG_NOC },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = SM8450_MASTER_LLCC,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_EBI1 },
+};
+
+static struct qcom_icc_node qnm_camnoc_hf = {
+ .name = "qnm_camnoc_hf",
+ .id = SM8450_MASTER_CAMNOC_HF,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_icp = {
+ .name = "qnm_camnoc_icp",
+ .id = SM8450_MASTER_CAMNOC_ICP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_sf = {
+ .name = "qnm_camnoc_sf",
+ .id = SM8450_MASTER_CAMNOC_SF,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp = {
+ .name = "qnm_mdp",
+ .id = SM8450_MASTER_MDP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mnoc_cfg = {
+ .name = "qnm_mnoc_cfg",
+ .id = SM8450_MASTER_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SERVICE_MNOC },
+};
+
+static struct qcom_icc_node qnm_rot = {
+ .name = "qnm_rot",
+ .id = SM8450_MASTER_ROTATOR,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_vapss_hcp = {
+ .name = "qnm_vapss_hcp",
+ .id = SM8450_MASTER_CDSP_HCP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video = {
+ .name = "qnm_video",
+ .id = SM8450_MASTER_VIDEO,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_cv_cpu = {
+ .name = "qnm_video_cv_cpu",
+ .id = SM8450_MASTER_VIDEO_CV_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_cvp = {
+ .name = "qnm_video_cvp",
+ .id = SM8450_MASTER_VIDEO_PROC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_v_cpu = {
+ .name = "qnm_video_v_cpu",
+ .id = SM8450_MASTER_VIDEO_V_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qhm_nsp_noc_config = {
+ .name = "qhm_nsp_noc_config",
+ .id = SM8450_MASTER_CDSP_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SERVICE_NSP_NOC },
+};
+
+static struct qcom_icc_node qxm_nsp = {
+ .name = "qxm_nsp",
+ .id = SM8450_MASTER_CDSP_PROC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_CDSP_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_pcie_anoc_cfg = {
+ .name = "qnm_pcie_anoc_cfg",
+ .id = SM8450_MASTER_PCIE_ANOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SERVICE_PCIE_ANOC },
+};
+
+static struct qcom_icc_node xm_pcie3_0 = {
+ .name = "xm_pcie3_0",
+ .id = SM8450_MASTER_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_1 = {
+ .name = "xm_pcie3_1",
+ .id = SM8450_MASTER_PCIE_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node qhm_gic = {
+ .name = "qhm_gic",
+ .id = SM8450_MASTER_GIC_AHB,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .id = SM8450_MASTER_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .id = SM8450_MASTER_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_lpass_noc = {
+ .name = "qnm_lpass_noc",
+ .id = SM8450_MASTER_LPASS_ANOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_snoc_cfg = {
+ .name = "qnm_snoc_cfg",
+ .id = SM8450_MASTER_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SERVICE_SNOC },
+};
+
+static struct qcom_icc_node qxm_pimem = {
+ .name = "qxm_pimem",
+ .id = SM8450_MASTER_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .id = SM8450_MASTER_GIC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf_disp = {
+ .name = "qnm_mnoc_hf_disp",
+ .id = SM8450_MASTER_MNOC_HF_MEM_NOC_DISP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_LLCC_DISP },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf_disp = {
+ .name = "qnm_mnoc_sf_disp",
+ .id = SM8450_MASTER_MNOC_SF_MEM_NOC_DISP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_LLCC_DISP },
+};
+
+static struct qcom_icc_node qnm_pcie_disp = {
+ .name = "qnm_pcie_disp",
+ .id = SM8450_MASTER_ANOC_PCIE_GEM_NOC_DISP,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_LLCC_DISP },
+};
+
+static struct qcom_icc_node llcc_mc_disp = {
+ .name = "llcc_mc_disp",
+ .id = SM8450_MASTER_LLCC_DISP,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_EBI1_DISP },
+};
+
+static struct qcom_icc_node qnm_mdp_disp = {
+ .name = "qnm_mdp_disp",
+ .id = SM8450_MASTER_MDP_DISP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_HF_MEM_NOC_DISP },
+};
+
+static struct qcom_icc_node qnm_rot_disp = {
+ .name = "qnm_rot_disp",
+ .id = SM8450_MASTER_ROTATOR_DISP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC_DISP },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .id = SM8450_SLAVE_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_MASTER_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node srvc_aggre1_noc = {
+ .name = "srvc_aggre1_noc",
+ .id = SM8450_SLAVE_SERVICE_A1NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .id = SM8450_SLAVE_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_MASTER_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node srvc_aggre2_noc = {
+ .name = "srvc_aggre2_noc",
+ .id = SM8450_SLAVE_SERVICE_A2NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qup0_core_slave = {
+ .name = "qup0_core_slave",
+ .id = SM8450_SLAVE_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qup1_core_slave = {
+ .name = "qup1_core_slave",
+ .id = SM8450_SLAVE_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qup2_core_slave = {
+ .name = "qup2_core_slave",
+ .id = SM8450_SLAVE_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy0 = {
+ .name = "qhs_ahb2phy0",
+ .id = SM8450_SLAVE_AHB2PHY_SOUTH,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy1 = {
+ .name = "qhs_ahb2phy1",
+ .id = SM8450_SLAVE_AHB2PHY_NORTH,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = SM8450_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .id = SM8450_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SM8450_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_compute_cfg = {
+ .name = "qhs_compute_cfg",
+ .id = SM8450_SLAVE_CDSP_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { MASTER_CDSP_NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_cpr_cx = {
+ .name = "qhs_cpr_cx",
+ .id = SM8450_SLAVE_RBCPR_CX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_mmcx = {
+ .name = "qhs_cpr_mmcx",
+ .id = SM8450_SLAVE_RBCPR_MMCX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_mxa = {
+ .name = "qhs_cpr_mxa",
+ .id = SM8450_SLAVE_RBCPR_MXA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_mxc = {
+ .name = "qhs_cpr_mxc",
+ .id = SM8450_SLAVE_RBCPR_MXC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = SM8450_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cx_rdpm = {
+ .name = "qhs_cx_rdpm",
+ .id = SM8450_SLAVE_CX_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+ .name = "qhs_display_cfg",
+ .id = SM8450_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .id = SM8450_SLAVE_GFX3D_CFG,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SM8450_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .id = SM8450_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ipc_router = {
+ .name = "qhs_ipc_router",
+ .id = SM8450_SLAVE_IPC_ROUTER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_lpass_cfg = {
+ .name = "qhs_lpass_cfg",
+ .id = SM8450_SLAVE_LPASS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { MASTER_CNOC_LPASS_AG_NOC },
+};
+
+static struct qcom_icc_node qhs_mss_cfg = {
+ .name = "qhs_mss_cfg",
+ .id = SM8450_SLAVE_CNOC_MSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_mx_rdpm = {
+ .name = "qhs_mx_rdpm",
+ .id = SM8450_SLAVE_MX_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie0_cfg = {
+ .name = "qhs_pcie0_cfg",
+ .id = SM8450_SLAVE_PCIE_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie1_cfg = {
+ .name = "qhs_pcie1_cfg",
+ .id = SM8450_SLAVE_PCIE_1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .id = SM8450_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pimem_cfg = {
+ .name = "qhs_pimem_cfg",
+ .id = SM8450_SLAVE_PIMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .id = SM8450_SLAVE_PRNG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SM8450_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qspi = {
+ .name = "qhs_qspi",
+ .id = SM8450_SLAVE_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup0 = {
+ .name = "qhs_qup0",
+ .id = SM8450_SLAVE_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+ .name = "qhs_qup1",
+ .id = SM8450_SLAVE_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup2 = {
+ .name = "qhs_qup2",
+ .id = SM8450_SLAVE_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .id = SM8450_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+ .name = "qhs_sdc4",
+ .id = SM8450_SLAVE_SDCC_4,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_spss_cfg = {
+ .name = "qhs_spss_cfg",
+ .id = SM8450_SLAVE_SPSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SM8450_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+ .name = "qhs_tlmm",
+ .id = SM8450_SLAVE_TLMM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tme_cfg = {
+ .name = "qhs_tme_cfg",
+ .id = SM8450_SLAVE_TME_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .id = SM8450_SLAVE_UFS_MEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb3_0 = {
+ .name = "qhs_usb3_0",
+ .id = SM8450_SLAVE_USB3_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .id = SM8450_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+ .name = "qhs_vsense_ctrl_cfg",
+ .id = SM8450_SLAVE_VSENSE_CTRL_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_a1_noc_cfg = {
+ .name = "qns_a1_noc_cfg",
+ .id = SM8450_SLAVE_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_MASTER_A1NOC_CFG },
+};
+
+static struct qcom_icc_node qns_a2_noc_cfg = {
+ .name = "qns_a2_noc_cfg",
+ .id = SM8450_SLAVE_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_MASTER_A2NOC_CFG },
+};
+
+static struct qcom_icc_node qns_ddrss_cfg = {
+ .name = "qns_ddrss_cfg",
+ .id = SM8450_SLAVE_DDRSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ //FIXME where is link
+};
+
+static struct qcom_icc_node qns_mnoc_cfg = {
+ .name = "qns_mnoc_cfg",
+ .id = SM8450_SLAVE_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_MASTER_CNOC_MNOC_CFG },
+};
+
+static struct qcom_icc_node qns_pcie_anoc_cfg = {
+ .name = "qns_pcie_anoc_cfg",
+ .id = SM8450_SLAVE_PCIE_ANOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_MASTER_PCIE_ANOC_CFG },
+};
+
+static struct qcom_icc_node qns_snoc_cfg = {
+ .name = "qns_snoc_cfg",
+ .id = SM8450_SLAVE_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_MASTER_SNOC_CFG },
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SM8450_SLAVE_IMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qxs_pimem = {
+ .name = "qxs_pimem",
+ .id = SM8450_SLAVE_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node srvc_cnoc = {
+ .name = "srvc_cnoc",
+ .id = SM8450_SLAVE_SERVICE_CNOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_0 = {
+ .name = "xs_pcie_0",
+ .id = SM8450_SLAVE_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_1 = {
+ .name = "xs_pcie_1",
+ .id = SM8450_SLAVE_PCIE_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SM8450_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SM8450_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_gem_noc_cnoc = {
+ .name = "qns_gem_noc_cnoc",
+ .id = SM8450_SLAVE_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_MASTER_GEM_NOC_CNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = SM8450_SLAVE_LLCC,
+ .channels = 4,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_MASTER_LLCC },
+};
+
+static struct qcom_icc_node qns_pcie = {
+ .name = "qns_pcie",
+ .id = SM8450_SLAVE_MEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_MASTER_GEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qhs_lpass_core = {
+ .name = "qhs_lpass_core",
+ .id = SM8450_SLAVE_LPASS_CORE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_lpass_lpi = {
+ .name = "qhs_lpass_lpi",
+ .id = SM8450_SLAVE_LPASS_LPI_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_lpass_mpu = {
+ .name = "qhs_lpass_mpu",
+ .id = SM8450_SLAVE_LPASS_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_lpass_top = {
+ .name = "qhs_lpass_top",
+ .id = SM8450_SLAVE_LPASS_TOP_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_sysnoc = {
+ .name = "qns_sysnoc",
+ .id = SM8450_SLAVE_LPASS_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_MASTER_LPASS_ANOC },
+};
+
+static struct qcom_icc_node srvc_niu_aml_noc = {
+ .name = "srvc_niu_aml_noc",
+ .id = SM8450_SLAVE_SERVICES_LPASS_AML_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node srvc_niu_lpass_agnoc = {
+ .name = "srvc_niu_lpass_agnoc",
+ .id = SM8450_SLAVE_SERVICE_LPASS_AG_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SM8450_SLAVE_EBI1,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .id = SM8450_SLAVE_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf = {
+ .name = "qns_mem_noc_sf",
+ .id = SM8450_SLAVE_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .id = SM8450_SLAVE_SERVICE_MNOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_nsp_gemnoc = {
+ .name = "qns_nsp_gemnoc",
+ .id = SM8450_SLAVE_CDSP_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_MASTER_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node service_nsp_noc = {
+ .name = "service_nsp_noc",
+ .id = SM8450_SLAVE_SERVICE_NSP_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_pcie_mem_noc = {
+ .name = "qns_pcie_mem_noc",
+ .id = SM8450_SLAVE_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_MASTER_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node srvc_pcie_aggre_noc = {
+ .name = "srvc_pcie_aggre_noc",
+ .id = SM8450_SLAVE_SERVICE_PCIE_ANOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_gemnoc_gc = {
+ .name = "qns_gemnoc_gc",
+ .id = SM8450_SLAVE_SNOC_GEM_NOC_GC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_MASTER_SNOC_GC_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .id = SM8450_SLAVE_SNOC_GEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_snoc = {
+ .name = "srvc_snoc",
+ .id = SM8450_SLAVE_SERVICE_SNOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_llcc_disp = {
+ .name = "qns_llcc_disp",
+ .id = SM8450_SLAVE_LLCC_DISP,
+ .channels = 4,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_MASTER_LLCC_DISP },
+};
+
+static struct qcom_icc_node ebi_disp = {
+ .name = "ebi_disp",
+ .id = SM8450_SLAVE_EBI1_DISP,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf_disp = {
+ .name = "qns_mem_noc_hf_disp",
+ .id = SM8450_SLAVE_MNOC_HF_MEM_NOC_DISP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_MASTER_MNOC_HF_MEM_NOC_DISP },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf_disp = {
+ .name = "qns_mem_noc_sf_disp",
+ .id = SM8450_SLAVE_MNOC_SF_MEM_NOC_DISP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_MASTER_MNOC_SF_MEM_NOC_DISP },
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = true,
+ .num_nodes = 55,
+ .nodes = { &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie,
+ &qhs_ahb2phy0, &qhs_ahb2phy1,
+ &qhs_aoss, &qhs_camera_cfg,
+ &qhs_clk_ctl, &qhs_compute_cfg,
+ &qhs_cpr_cx, &qhs_cpr_mmcx,
+ &qhs_cpr_mxa, &qhs_cpr_mxc,
+ &qhs_crypto0_cfg, &qhs_cx_rdpm,
+ &qhs_display_cfg, &qhs_gpuss_cfg,
+ &qhs_imem_cfg, &qhs_ipa,
+ &qhs_ipc_router, &qhs_lpass_cfg,
+ &qhs_mss_cfg, &qhs_mx_rdpm,
+ &qhs_pcie0_cfg, &qhs_pcie1_cfg,
+ &qhs_pdm, &qhs_pimem_cfg,
+ &qhs_prng, &qhs_qdss_cfg,
+ &qhs_qspi, &qhs_qup0,
+ &qhs_qup1, &qhs_qup2,
+ &qhs_sdc2, &qhs_sdc4,
+ &qhs_spss_cfg, &qhs_tcsr,
+ &qhs_tlmm, &qhs_tme_cfg,
+ &qhs_ufs_mem_cfg, &qhs_usb3_0,
+ &qhs_venus_cfg, &qhs_vsense_ctrl_cfg,
+ &qns_a1_noc_cfg, &qns_a2_noc_cfg,
+ &qns_ddrss_cfg, &qns_mnoc_cfg,
+ &qns_pcie_anoc_cfg, &qns_snoc_cfg,
+ &qxs_imem, &qxs_pimem,
+ &srvc_cnoc, &xs_pcie_0,
+ &xs_pcie_1, &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
+};
+
+static struct qcom_icc_bcm bcm_co0 = {
+ .name = "CO0",
+ .num_nodes = 2,
+ .nodes = { &qxm_nsp, &qns_nsp_gemnoc },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .num_nodes = 12,
+ .nodes = { &qnm_camnoc_hf, &qnm_camnoc_icp,
+ &qnm_camnoc_sf, &qnm_mdp,
+ &qnm_mnoc_cfg, &qnm_rot,
+ &qnm_vapss_hcp, &qnm_video,
+ &qnm_video_cv_cpu, &qnm_video_cvp,
+ &qnm_video_v_cpu, &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup0_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup1 = {
+ .name = "QUP1",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup1_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup2 = {
+ .name = "QUP2",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup2_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh1 = {
+ .name = "SH1",
+ .num_nodes = 7,
+ .nodes = { &alm_gpu_tcu, &alm_sys_tcu,
+ &qnm_nsp_gemnoc, &qnm_pcie,
+ &qnm_snoc_gc, &qns_gem_noc_cnoc,
+ &qns_pcie },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .num_nodes = 4,
+ .nodes = { &qhm_gic, &qxm_pimem,
+ &xm_gic, &qns_gemnoc_gc },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .num_nodes = 1,
+ .nodes = { &qnm_lpass_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn7 = {
+ .name = "SN7",
+ .num_nodes = 1,
+ .nodes = { &qns_pcie_mem_noc },
+};
+
+static struct qcom_icc_bcm bcm_acv_disp = {
+ .name = "ACV",
+ .num_nodes = 1,
+ .nodes = { &ebi_disp },
+};
+
+static struct qcom_icc_bcm bcm_mc0_disp = {
+ .name = "MC0",
+ .num_nodes = 1,
+ .nodes = { &ebi_disp },
+};
+
+static struct qcom_icc_bcm bcm_mm0_disp = {
+ .name = "MM0",
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf_disp },
+};
+
+static struct qcom_icc_bcm bcm_mm1_disp = {
+ .name = "MM1",
+ .num_nodes = 3,
+ .nodes = { &qnm_mdp_disp, &qnm_rot_disp,
+ &qns_mem_noc_sf_disp },
+};
+
+static struct qcom_icc_bcm bcm_sh0_disp = {
+ .name = "SH0",
+ .num_nodes = 1,
+ .nodes = { &qns_llcc_disp },
+};
+
+static struct qcom_icc_bcm bcm_sh1_disp = {
+ .name = "SH1",
+ .num_nodes = 1,
+ .nodes = { &qnm_pcie_disp },
+};
+
+static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+};
+
+static struct qcom_icc_node *aggre1_noc_nodes[] = {
+ [MASTER_QSPI_0] = &qhm_qspi,
+ [MASTER_QUP_1] = &qhm_qup1,
+ [MASTER_A1NOC_CFG] = &qnm_a1noc_cfg,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_USB3_0] = &xm_usb3_0,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+ [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
+};
+
+static struct qcom_icc_desc sm8450_aggre1_noc = {
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+ .bcms = aggre1_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+};
+
+static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+ &bcm_ce0,
+};
+
+static struct qcom_icc_node *aggre2_noc_nodes[] = {
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QUP_0] = &qhm_qup0,
+ [MASTER_QUP_2] = &qhm_qup2,
+ [MASTER_A2NOC_CFG] = &qnm_a2noc_cfg,
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_SENSORS_PROC] = &qxm_sensorss_q6,
+ [MASTER_SP] = &qxm_sp,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr_0,
+ [MASTER_QDSS_ETR_1] = &xm_qdss_etr_1,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+ [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+};
+
+static struct qcom_icc_desc sm8450_aggre2_noc = {
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .bcms = aggre2_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm *clk_virt_bcms[] = {
+ &bcm_qup0,
+ &bcm_qup1,
+ &bcm_qup2,
+};
+
+static struct qcom_icc_node *clk_virt_nodes[] = {
+ [MASTER_QUP_CORE_0] = &qup0_core_master,
+ [MASTER_QUP_CORE_1] = &qup1_core_master,
+ [MASTER_QUP_CORE_2] = &qup2_core_master,
+ [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+ [SLAVE_QUP_CORE_1] = &qup1_core_slave,
+ [SLAVE_QUP_CORE_2] = &qup2_core_slave,
+};
+
+static struct qcom_icc_desc sm8450_clk_virt = {
+ .nodes = clk_virt_nodes,
+ .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+ .bcms = clk_virt_bcms,
+ .num_bcms = ARRAY_SIZE(clk_virt_bcms),
+};
+
+static struct qcom_icc_bcm *config_noc_bcms[] = {
+ &bcm_cn0,
+};
+
+static struct qcom_icc_node *config_noc_nodes[] = {
+ [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
+ [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
+ [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
+ [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CDSP_CFG] = &qhs_compute_cfg,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx,
+ [SLAVE_RBCPR_MXA_CFG] = &qhs_cpr_mxa,
+ [SLAVE_RBCPR_MXC_CFG] = &qhs_cpr_mxc,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_CX_RDPM] = &qhs_cx_rdpm,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+ [SLAVE_LPASS] = &qhs_lpass_cfg,
+ [SLAVE_CNOC_MSS] = &qhs_mss_cfg,
+ [SLAVE_MX_RDPM] = &qhs_mx_rdpm,
+ [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+ [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QSPI_0] = &qhs_qspi,
+ [SLAVE_QUP_0] = &qhs_qup0,
+ [SLAVE_QUP_1] = &qhs_qup1,
+ [SLAVE_QUP_2] = &qhs_qup2,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SPSS_CFG] = &qhs_spss_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_TME_CFG] = &qhs_tme_cfg,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB3_0] = &qhs_usb3_0,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_A1NOC_CFG] = &qns_a1_noc_cfg,
+ [SLAVE_A2NOC_CFG] = &qns_a2_noc_cfg,
+ [SLAVE_DDRSS_CFG] = &qns_ddrss_cfg,
+ [SLAVE_CNOC_MNOC_CFG] = &qns_mnoc_cfg,
+ [SLAVE_PCIE_ANOC_CFG] = &qns_pcie_anoc_cfg,
+ [SLAVE_SNOC_CFG] = &qns_snoc_cfg,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+ [SLAVE_PCIE_0] = &xs_pcie_0,
+ [SLAVE_PCIE_1] = &xs_pcie_1,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static struct qcom_icc_desc sm8450_config_noc = {
+ .nodes = config_noc_nodes,
+ .num_nodes = ARRAY_SIZE(config_noc_nodes),
+ .bcms = config_noc_bcms,
+ .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_bcm *gem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh1,
+ &bcm_sh0_disp,
+ &bcm_sh1_disp,
+};
+
+static struct qcom_icc_node *gem_noc_nodes[] = {
+ [MASTER_GPU_TCU] = &alm_gpu_tcu,
+ [MASTER_SYS_TCU] = &alm_sys_tcu,
+ [MASTER_APPSS_PROC] = &chm_apps,
+ [MASTER_GFX3D] = &qnm_gpu,
+ [MASTER_MSS_PROC] = &qnm_mdsp,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_COMPUTE_NOC] = &qnm_nsp_gemnoc,
+ [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
+ [MASTER_MNOC_HF_MEM_NOC_DISP] = &qnm_mnoc_hf_disp,
+ [MASTER_MNOC_SF_MEM_NOC_DISP] = &qnm_mnoc_sf_disp,
+ [MASTER_ANOC_PCIE_GEM_NOC_DISP] = &qnm_pcie_disp,
+ [SLAVE_LLCC_DISP] = &qns_llcc_disp,
+};
+
+static struct qcom_icc_desc sm8450_gem_noc = {
+ .nodes = gem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+ .bcms = gem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = {
+};
+
+static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
+ [MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
+ [MASTER_LPASS_PROC] = &qxm_lpass_dsp,
+ [SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
+ [SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi,
+ [SLAVE_LPASS_MPU_CFG] = &qhs_lpass_mpu,
+ [SLAVE_LPASS_TOP_CFG] = &qhs_lpass_top,
+ [SLAVE_LPASS_SNOC] = &qns_sysnoc,
+ [SLAVE_SERVICES_LPASS_AML_NOC] = &srvc_niu_aml_noc,
+ [SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
+};
+
+static struct qcom_icc_desc sm8450_lpass_ag_noc = {
+ .nodes = lpass_ag_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
+ .bcms = lpass_ag_noc_bcms,
+ .num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
+};
+
+static struct qcom_icc_bcm *mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+ &bcm_acv_disp,
+ &bcm_mc0_disp,
+};
+
+static struct qcom_icc_node *mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+ [MASTER_LLCC_DISP] = &llcc_mc_disp,
+ [SLAVE_EBI1_DISP] = &ebi_disp,
+};
+
+static struct qcom_icc_desc sm8450_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+ &bcm_mm0_disp,
+ &bcm_mm1_disp,
+};
+
+static struct qcom_icc_node *mmss_noc_nodes[] = {
+ [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
+ [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
+ [MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
+ [MASTER_MDP] = &qnm_mdp,
+ [MASTER_CNOC_MNOC_CFG] = &qnm_mnoc_cfg,
+ [MASTER_ROTATOR] = &qnm_rot,
+ [MASTER_CDSP_HCP] = &qnm_vapss_hcp,
+ [MASTER_VIDEO] = &qnm_video,
+ [MASTER_VIDEO_CV_PROC] = &qnm_video_cv_cpu,
+ [MASTER_VIDEO_PROC] = &qnm_video_cvp,
+ [MASTER_VIDEO_V_PROC] = &qnm_video_v_cpu,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+ [MASTER_MDP_DISP] = &qnm_mdp_disp,
+ [MASTER_ROTATOR_DISP] = &qnm_rot_disp,
+ [SLAVE_MNOC_HF_MEM_NOC_DISP] = &qns_mem_noc_hf_disp,
+ [SLAVE_MNOC_SF_MEM_NOC_DISP] = &qns_mem_noc_sf_disp,
+};
+
+static struct qcom_icc_desc sm8450_mmss_noc = {
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm *nsp_noc_bcms[] = {
+ &bcm_co0,
+};
+
+static struct qcom_icc_node *nsp_noc_nodes[] = {
+ [MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
+ [MASTER_CDSP_PROC] = &qxm_nsp,
+ [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
+ [SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
+};
+
+static struct qcom_icc_desc sm8450_nsp_noc = {
+ .nodes = nsp_noc_nodes,
+ .num_nodes = ARRAY_SIZE(nsp_noc_nodes),
+ .bcms = nsp_noc_bcms,
+ .num_bcms = ARRAY_SIZE(nsp_noc_bcms),
+};
+
+static struct qcom_icc_bcm *pcie_anoc_bcms[] = {
+ &bcm_sn7,
+};
+
+static struct qcom_icc_node *pcie_anoc_nodes[] = {
+ [MASTER_PCIE_ANOC_CFG] = &qnm_pcie_anoc_cfg,
+ [MASTER_PCIE_0] = &xm_pcie3_0,
+ [MASTER_PCIE_1] = &xm_pcie3_1,
+ [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
+ [SLAVE_SERVICE_PCIE_ANOC] = &srvc_pcie_aggre_noc,
+};
+
+static struct qcom_icc_desc sm8450_pcie_anoc = {
+ .nodes = pcie_anoc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_anoc_nodes),
+ .bcms = pcie_anoc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_anoc_bcms),
+};
+
+static struct qcom_icc_bcm *system_noc_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn2,
+ &bcm_sn3,
+ &bcm_sn4,
+};
+
+static struct qcom_icc_node *system_noc_nodes[] = {
+ [MASTER_GIC_AHB] = &qhm_gic,
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [MASTER_LPASS_ANOC] = &qnm_lpass_noc,
+ [MASTER_SNOC_CFG] = &qnm_snoc_cfg,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+};
+
+static struct qcom_icc_desc sm8450_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static int qnoc_probe(struct platform_device *pdev)
+{
+ const struct qcom_icc_desc *desc;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct qcom_icc_node **qnodes;
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ provider = &qp->provider;
+ provider->dev = &pdev->dev;
+ provider->set = qcom_icc_set;
+ provider->pre_aggregate = qcom_icc_pre_aggregate;
+ provider->aggregate = qcom_icc_aggregate;
+ provider->xlate_extended = qcom_icc_xlate_extended;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->data = data;
+
+ qp->dev = &pdev->dev;
+ qp->bcms = desc->bcms;
+ qp->num_bcms = desc->num_bcms;
+
+ qp->voter = of_bcm_voter_get(qp->dev, NULL);
+ if (IS_ERR(qp->voter))
+ return PTR_ERR(qp->voter);
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(&pdev->dev, "error adding interconnect provider\n");
+ return ret;
+ }
+
+ for (i = 0; i < qp->num_bcms; i++)
+ qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ if (!qnodes[i])
+ continue;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+err:
+ icc_nodes_remove(provider);
+ icc_provider_del(provider);
+ return ret;
+}
+
+static int qnoc_remove(struct platform_device *pdev)
+{
+ struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+
+ icc_nodes_remove(&qp->provider);
+ return icc_provider_del(&qp->provider);
+}
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sm8450-aggre1-noc",
+ .data = &sm8450_aggre1_noc},
+ { .compatible = "qcom,sm8450-aggre2-noc",
+ .data = &sm8450_aggre2_noc},
+ { .compatible = "qcom,sm8450-clk-virt",
+ .data = &sm8450_clk_virt},
+ { .compatible = "qcom,sm8450-config-noc",
+ .data = &sm8450_config_noc},
+ { .compatible = "qcom,sm8450-gem-noc",
+ .data = &sm8450_gem_noc},
+ { .compatible = "qcom,sm8450-lpass-ag-noc",
+ .data = &sm8450_lpass_ag_noc},
+ { .compatible = "qcom,sm8450-mc-virt",
+ .data = &sm8450_mc_virt},
+ { .compatible = "qcom,sm8450-mmss-noc",
+ .data = &sm8450_mmss_noc},
+ { .compatible = "qcom,sm8450-nsp-noc",
+ .data = &sm8450_nsp_noc},
+ { .compatible = "qcom,sm8450-pcie-anoc",
+ .data = &sm8450_pcie_anoc},
+ { .compatible = "qcom,sm8450-system-noc",
+ .data = &sm8450_system_noc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-sm8450",
+ .of_match_table = qnoc_of_match,
+ },
+};
+
+static int __init qnoc_driver_init(void)
+{
+ return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+ platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("sm8450 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sm8450.h b/drivers/interconnect/qcom/sm8450.h
new file mode 100644
index 000000000000..a5790ec6767b
--- /dev/null
+++ b/drivers/interconnect/qcom/sm8450.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * SM8450 interconnect IDs
+ *
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SM8450_H
+#define __DRIVERS_INTERCONNECT_QCOM_SM8450_H
+
+#define SM8450_MASTER_GPU_TCU 0
+#define SM8450_MASTER_SYS_TCU 1
+#define SM8450_MASTER_APPSS_PROC 2
+#define SM8450_MASTER_LLCC 3
+#define SM8450_MASTER_CNOC_LPASS_AG_NOC 4
+#define SM8450_MASTER_GIC_AHB 5
+#define SM8450_MASTER_CDSP_NOC_CFG 6
+#define SM8450_MASTER_QDSS_BAM 7
+#define SM8450_MASTER_QSPI_0 8
+#define SM8450_MASTER_QUP_0 9
+#define SM8450_MASTER_QUP_1 10
+#define SM8450_MASTER_QUP_2 11
+#define SM8450_MASTER_A1NOC_CFG 12
+#define SM8450_MASTER_A2NOC_CFG 13
+#define SM8450_MASTER_A1NOC_SNOC 14
+#define SM8450_MASTER_A2NOC_SNOC 15
+#define SM8450_MASTER_CAMNOC_HF 16
+#define SM8450_MASTER_CAMNOC_ICP 17
+#define SM8450_MASTER_CAMNOC_SF 18
+#define SM8450_MASTER_GEM_NOC_CNOC 19
+#define SM8450_MASTER_GEM_NOC_PCIE_SNOC 20
+#define SM8450_MASTER_GFX3D 21
+#define SM8450_MASTER_LPASS_ANOC 22
+#define SM8450_MASTER_MDP 23
+#define SM8450_MASTER_MDP0 SM8450_MASTER_MDP
+#define SM8450_MASTER_MDP1 SM8450_MASTER_MDP
+#define SM8450_MASTER_MSS_PROC 24
+#define SM8450_MASTER_CNOC_MNOC_CFG 25
+#define SM8450_MASTER_MNOC_HF_MEM_NOC 26
+#define SM8450_MASTER_MNOC_SF_MEM_NOC 27
+#define SM8450_MASTER_COMPUTE_NOC 28
+#define SM8450_MASTER_ANOC_PCIE_GEM_NOC 29
+#define SM8450_MASTER_PCIE_ANOC_CFG 30
+#define SM8450_MASTER_ROTATOR 31
+#define SM8450_MASTER_SNOC_CFG 32
+#define SM8450_MASTER_SNOC_GC_MEM_NOC 33
+#define SM8450_MASTER_SNOC_SF_MEM_NOC 34
+#define SM8450_MASTER_CDSP_HCP 35
+#define SM8450_MASTER_VIDEO 36
+#define SM8450_MASTER_VIDEO_P0 SM8450_MASTER_VIDEO
+#define SM8450_MASTER_VIDEO_P1 SM8450_MASTER_VIDEO
+#define SM8450_MASTER_VIDEO_CV_PROC 37
+#define SM8450_MASTER_VIDEO_PROC 38
+#define SM8450_MASTER_VIDEO_V_PROC 39
+#define SM8450_MASTER_QUP_CORE_0 40
+#define SM8450_MASTER_QUP_CORE_1 41
+#define SM8450_MASTER_QUP_CORE_2 42
+#define SM8450_MASTER_CRYPTO 43
+#define SM8450_MASTER_IPA 44
+#define SM8450_MASTER_LPASS_PROC 45
+#define SM8450_MASTER_CDSP_PROC 46
+#define SM8450_MASTER_PIMEM 47
+#define SM8450_MASTER_SENSORS_PROC 48
+#define SM8450_MASTER_SP 49
+#define SM8450_MASTER_GIC 50
+#define SM8450_MASTER_PCIE_0 51
+#define SM8450_MASTER_PCIE_1 52
+#define SM8450_MASTER_QDSS_ETR 53
+#define SM8450_MASTER_QDSS_ETR_1 54
+#define SM8450_MASTER_SDCC_2 55
+#define SM8450_MASTER_SDCC_4 56
+#define SM8450_MASTER_UFS_MEM 57
+#define SM8450_MASTER_USB3_0 58
+#define SM8450_SLAVE_EBI1 512
+#define SM8450_SLAVE_AHB2PHY_SOUTH 513
+#define SM8450_SLAVE_AHB2PHY_NORTH 514
+#define SM8450_SLAVE_AOSS 515
+#define SM8450_SLAVE_CAMERA_CFG 516
+#define SM8450_SLAVE_CLK_CTL 517
+#define SM8450_SLAVE_CDSP_CFG 518
+#define SM8450_SLAVE_RBCPR_CX_CFG 519
+#define SM8450_SLAVE_RBCPR_MMCX_CFG 520
+#define SM8450_SLAVE_RBCPR_MXA_CFG 521
+#define SM8450_SLAVE_RBCPR_MXC_CFG 522
+#define SM8450_SLAVE_CRYPTO_0_CFG 523
+#define SM8450_SLAVE_CX_RDPM 524
+#define SM8450_SLAVE_DISPLAY_CFG 525
+#define SM8450_SLAVE_GFX3D_CFG 526
+#define SM8450_SLAVE_IMEM_CFG 527
+#define SM8450_SLAVE_IPA_CFG 528
+#define SM8450_SLAVE_IPC_ROUTER_CFG 529
+#define SM8450_SLAVE_LPASS 530
+#define SM8450_SLAVE_LPASS_CORE_CFG 531
+#define SM8450_SLAVE_LPASS_LPI_CFG 532
+#define SM8450_SLAVE_LPASS_MPU_CFG 533
+#define SM8450_SLAVE_LPASS_TOP_CFG 534
+#define SM8450_SLAVE_CNOC_MSS 535
+#define SM8450_SLAVE_MX_RDPM 536
+#define SM8450_SLAVE_PCIE_0_CFG 537
+#define SM8450_SLAVE_PCIE_1_CFG 538
+#define SM8450_SLAVE_PDM 539
+#define SM8450_SLAVE_PIMEM_CFG 540
+#define SM8450_SLAVE_PRNG 541
+#define SM8450_SLAVE_QDSS_CFG 542
+#define SM8450_SLAVE_QSPI_0 543
+#define SM8450_SLAVE_QUP_0 544
+#define SM8450_SLAVE_QUP_1 545
+#define SM8450_SLAVE_QUP_2 546
+#define SM8450_SLAVE_SDCC_2 547
+#define SM8450_SLAVE_SDCC_4 548
+#define SM8450_SLAVE_SPSS_CFG 549
+#define SM8450_SLAVE_TCSR 550
+#define SM8450_SLAVE_TLMM 551
+#define SM8450_SLAVE_TME_CFG 552
+#define SM8450_SLAVE_UFS_MEM_CFG 553
+#define SM8450_SLAVE_USB3_0 554
+#define SM8450_SLAVE_VENUS_CFG 555
+#define SM8450_SLAVE_VSENSE_CTRL_CFG 556
+#define SM8450_SLAVE_A1NOC_CFG 557
+#define SM8450_SLAVE_A1NOC_SNOC 558
+#define SM8450_SLAVE_A2NOC_CFG 559
+#define SM8450_SLAVE_A2NOC_SNOC 560
+#define SM8450_SLAVE_DDRSS_CFG 561
+#define SM8450_SLAVE_GEM_NOC_CNOC 562
+#define SM8450_SLAVE_SNOC_GEM_NOC_GC 563
+#define SM8450_SLAVE_SNOC_GEM_NOC_SF 564
+#define SM8450_SLAVE_LLCC 565
+#define SM8450_SLAVE_MNOC_HF_MEM_NOC 566
+#define SM8450_SLAVE_MNOC_SF_MEM_NOC 567
+#define SM8450_SLAVE_CNOC_MNOC_CFG 568
+#define SM8450_SLAVE_CDSP_MEM_NOC 569
+#define SM8450_SLAVE_MEM_NOC_PCIE_SNOC 570
+#define SM8450_SLAVE_PCIE_ANOC_CFG 571
+#define SM8450_SLAVE_ANOC_PCIE_GEM_NOC 572
+#define SM8450_SLAVE_SNOC_CFG 573
+#define SM8450_SLAVE_LPASS_SNOC 574
+#define SM8450_SLAVE_QUP_CORE_0 575
+#define SM8450_SLAVE_QUP_CORE_1 576
+#define SM8450_SLAVE_QUP_CORE_2 577
+#define SM8450_SLAVE_IMEM 578
+#define SM8450_SLAVE_PIMEM 579
+#define SM8450_SLAVE_SERVICE_NSP_NOC 580
+#define SM8450_SLAVE_SERVICE_A1NOC 581
+#define SM8450_SLAVE_SERVICE_A2NOC 582
+#define SM8450_SLAVE_SERVICE_CNOC 583
+#define SM8450_SLAVE_SERVICE_MNOC 584
+#define SM8450_SLAVE_SERVICES_LPASS_AML_NOC 585
+#define SM8450_SLAVE_SERVICE_LPASS_AG_NOC 586
+#define SM8450_SLAVE_SERVICE_PCIE_ANOC 587
+#define SM8450_SLAVE_SERVICE_SNOC 588
+#define SM8450_SLAVE_PCIE_0 589
+#define SM8450_SLAVE_PCIE_1 590
+#define SM8450_SLAVE_QDSS_STM 591
+#define SM8450_SLAVE_TCU 592
+#define SM8450_MASTER_LLCC_DISP 1000
+#define SM8450_MASTER_MDP_DISP 1001
+#define SM8450_MASTER_MDP0_DISP SM8450_MASTER_MDP_DISP
+#define SM8450_MASTER_MDP1_DISP SM8450_MASTER_MDP_DISP
+#define SM8450_MASTER_MNOC_HF_MEM_NOC_DISP 1002
+#define SM8450_MASTER_MNOC_SF_MEM_NOC_DISP 1003
+#define SM8450_MASTER_ANOC_PCIE_GEM_NOC_DISP 1004
+#define SM8450_MASTER_ROTATOR_DISP 1005
+#define SM8450_SLAVE_EBI1_DISP 1512
+#define SM8450_SLAVE_LLCC_DISP 1513
+#define SM8450_SLAVE_MNOC_HF_MEM_NOC_DISP 1514
+#define SM8450_SLAVE_MNOC_SF_MEM_NOC_DISP 1515
+
+#endif
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 867535eb0ce9..ffc89c4fb120 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -645,8 +645,6 @@ struct amd_iommu {
/* DebugFS Info */
struct dentry *debugfs;
#endif
- /* IRQ notifier for IntCapXT interrupt */
- struct irq_affinity_notify intcapxt_notify;
};
static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 1eacd43cb436..dc338acf3338 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -806,16 +806,27 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
{
#ifdef CONFIG_IRQ_REMAP
u32 status, i;
+ u64 entry;
if (!iommu->ga_log)
return -EINVAL;
- status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
-
/* Check if already running */
- if (status & (MMIO_STATUS_GALOG_RUN_MASK))
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ if (WARN_ON(status & (MMIO_STATUS_GALOG_RUN_MASK)))
return 0;
+ entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
+ memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
+ &entry, sizeof(entry));
+ entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
+ (BIT_ULL(52)-1)) & ~7ULL;
+ memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
+ &entry, sizeof(entry));
+ writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
+ writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
+
+
iommu_feature_enable(iommu, CONTROL_GAINT_EN);
iommu_feature_enable(iommu, CONTROL_GALOG_EN);
@@ -825,7 +836,7 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
break;
}
- if (i >= LOOP_TIMEOUT)
+ if (WARN_ON(i >= LOOP_TIMEOUT))
return -EINVAL;
#endif /* CONFIG_IRQ_REMAP */
return 0;
@@ -834,8 +845,6 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
static int iommu_init_ga_log(struct amd_iommu *iommu)
{
#ifdef CONFIG_IRQ_REMAP
- u64 entry;
-
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
return 0;
@@ -849,16 +858,6 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
if (!iommu->ga_log_tail)
goto err_out;
- entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
- memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
- &entry, sizeof(entry));
- entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
- (BIT_ULL(52)-1)) & ~7ULL;
- memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
- &entry, sizeof(entry));
- writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
- writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
-
return 0;
err_out:
free_ga_log(iommu);
@@ -1523,7 +1522,7 @@ static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
}
/*
- * This function clues the initialization function for one IOMMU
+ * This function glues the initialization function for one IOMMU
* together and also allocates the command buffer and programs the
* hardware. It does NOT enable the IOMMU. This is done afterwards.
*/
@@ -2016,48 +2015,18 @@ union intcapxt {
};
} __attribute__ ((packed));
-/*
- * There isn't really any need to mask/unmask at the irqchip level because
- * the 64-bit INTCAPXT registers can be updated atomically without tearing
- * when the affinity is being updated.
- */
-static void intcapxt_unmask_irq(struct irq_data *data)
-{
-}
-
-static void intcapxt_mask_irq(struct irq_data *data)
-{
-}
static struct irq_chip intcapxt_controller;
static int intcapxt_irqdomain_activate(struct irq_domain *domain,
struct irq_data *irqd, bool reserve)
{
- struct amd_iommu *iommu = irqd->chip_data;
- struct irq_cfg *cfg = irqd_cfg(irqd);
- union intcapxt xt;
-
- xt.capxt = 0ULL;
- xt.dest_mode_logical = apic->dest_mode_logical;
- xt.vector = cfg->vector;
- xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0);
- xt.destid_24_31 = cfg->dest_apicid >> 24;
-
- /**
- * Current IOMMU implemtation uses the same IRQ for all
- * 3 IOMMU interrupts.
- */
- writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
- writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
- writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
return 0;
}
static void intcapxt_irqdomain_deactivate(struct irq_domain *domain,
struct irq_data *irqd)
{
- intcapxt_mask_irq(irqd);
}
@@ -2091,6 +2060,38 @@ static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq
irq_domain_free_irqs_top(domain, virq, nr_irqs);
}
+
+static void intcapxt_unmask_irq(struct irq_data *irqd)
+{
+ struct amd_iommu *iommu = irqd->chip_data;
+ struct irq_cfg *cfg = irqd_cfg(irqd);
+ union intcapxt xt;
+
+ xt.capxt = 0ULL;
+ xt.dest_mode_logical = apic->dest_mode_logical;
+ xt.vector = cfg->vector;
+ xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0);
+ xt.destid_24_31 = cfg->dest_apicid >> 24;
+
+ /**
+ * Current IOMMU implementation uses the same IRQ for all
+ * 3 IOMMU interrupts.
+ */
+ writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
+ writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
+ writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
+}
+
+static void intcapxt_mask_irq(struct irq_data *irqd)
+{
+ struct amd_iommu *iommu = irqd->chip_data;
+
+ writeq(0, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
+ writeq(0, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
+ writeq(0, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
+}
+
+
static int intcapxt_set_affinity(struct irq_data *irqd,
const struct cpumask *mask, bool force)
{
@@ -2100,8 +2101,12 @@ static int intcapxt_set_affinity(struct irq_data *irqd,
ret = parent->chip->irq_set_affinity(parent, mask, force);
if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
return ret;
+ return 0;
+}
- return intcapxt_irqdomain_activate(irqd->domain, irqd, false);
+static int intcapxt_set_wake(struct irq_data *irqd, unsigned int on)
+{
+ return on ? -EOPNOTSUPP : 0;
}
static struct irq_chip intcapxt_controller = {
@@ -2111,7 +2116,8 @@ static struct irq_chip intcapxt_controller = {
.irq_ack = irq_chip_ack_parent,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_affinity = intcapxt_set_affinity,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .irq_set_wake = intcapxt_set_wake,
+ .flags = IRQCHIP_MASK_ON_SUSPEND,
};
static const struct irq_domain_ops intcapxt_domain_ops = {
@@ -2173,7 +2179,6 @@ static int iommu_setup_intcapxt(struct amd_iommu *iommu)
return ret;
}
- iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
return 0;
}
@@ -2196,6 +2201,10 @@ static int iommu_init_irq(struct amd_iommu *iommu)
iommu->int_enabled = true;
enable_faults:
+
+ if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
+ iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
+
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
if (iommu->ppr_log != NULL)
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 182c93a43efd..b1bf4125b0f7 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -74,87 +74,61 @@ static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
*
****************************************************************************/
-static void free_page_list(struct page *freelist)
+static void free_pt_page(u64 *pt, struct list_head *freelist)
{
- while (freelist != NULL) {
- unsigned long p = (unsigned long)page_address(freelist);
+ struct page *p = virt_to_page(pt);
- freelist = freelist->freelist;
- free_page(p);
- }
+ list_add_tail(&p->lru, freelist);
}
-static struct page *free_pt_page(unsigned long pt, struct page *freelist)
+static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl)
{
- struct page *p = virt_to_page((void *)pt);
+ u64 *p;
+ int i;
- p->freelist = freelist;
+ for (i = 0; i < 512; ++i) {
+ /* PTE present? */
+ if (!IOMMU_PTE_PRESENT(pt[i]))
+ continue;
- return p;
-}
+ /* Large PTE? */
+ if (PM_PTE_LEVEL(pt[i]) == 0 ||
+ PM_PTE_LEVEL(pt[i]) == 7)
+ continue;
-#define DEFINE_FREE_PT_FN(LVL, FN) \
-static struct page *free_pt_##LVL (unsigned long __pt, struct page *freelist) \
-{ \
- unsigned long p; \
- u64 *pt; \
- int i; \
- \
- pt = (u64 *)__pt; \
- \
- for (i = 0; i < 512; ++i) { \
- /* PTE present? */ \
- if (!IOMMU_PTE_PRESENT(pt[i])) \
- continue; \
- \
- /* Large PTE? */ \
- if (PM_PTE_LEVEL(pt[i]) == 0 || \
- PM_PTE_LEVEL(pt[i]) == 7) \
- continue; \
- \
- p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \
- freelist = FN(p, freelist); \
- } \
- \
- return free_pt_page((unsigned long)pt, freelist); \
-}
+ /*
+ * Free the next level. No need to look at l1 tables here since
+ * they can only contain leaf PTEs; just free them directly.
+ */
+ p = IOMMU_PTE_PAGE(pt[i]);
+ if (lvl > 2)
+ free_pt_lvl(p, freelist, lvl - 1);
+ else
+ free_pt_page(p, freelist);
+ }
-DEFINE_FREE_PT_FN(l2, free_pt_page)
-DEFINE_FREE_PT_FN(l3, free_pt_l2)
-DEFINE_FREE_PT_FN(l4, free_pt_l3)
-DEFINE_FREE_PT_FN(l5, free_pt_l4)
-DEFINE_FREE_PT_FN(l6, free_pt_l5)
+ free_pt_page(pt, freelist);
+}
-static struct page *free_sub_pt(unsigned long root, int mode,
- struct page *freelist)
+static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
{
switch (mode) {
case PAGE_MODE_NONE:
case PAGE_MODE_7_LEVEL:
break;
case PAGE_MODE_1_LEVEL:
- freelist = free_pt_page(root, freelist);
+ free_pt_page(root, freelist);
break;
case PAGE_MODE_2_LEVEL:
- freelist = free_pt_l2(root, freelist);
- break;
case PAGE_MODE_3_LEVEL:
- freelist = free_pt_l3(root, freelist);
- break;
case PAGE_MODE_4_LEVEL:
- freelist = free_pt_l4(root, freelist);
- break;
case PAGE_MODE_5_LEVEL:
- freelist = free_pt_l5(root, freelist);
- break;
case PAGE_MODE_6_LEVEL:
- freelist = free_pt_l6(root, freelist);
+ free_pt_lvl(root, freelist, mode);
break;
default:
BUG();
}
-
- return freelist;
}
void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
@@ -362,9 +336,9 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
return pte;
}
-static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
+static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
{
- unsigned long pt;
+ u64 *pt;
int mode;
while (cmpxchg64(pte, pteval, 0) != pteval) {
@@ -373,12 +347,12 @@ static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
}
if (!IOMMU_PTE_PRESENT(pteval))
- return freelist;
+ return;
- pt = (unsigned long)IOMMU_PTE_PAGE(pteval);
+ pt = IOMMU_PTE_PAGE(pteval);
mode = IOMMU_PTE_MODE(pteval);
- return free_sub_pt(pt, mode, freelist);
+ free_sub_pt(pt, mode, freelist);
}
/*
@@ -392,7 +366,7 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
- struct page *freelist = NULL;
+ LIST_HEAD(freelist);
bool updated = false;
u64 __pte, *pte;
int ret, i, count;
@@ -412,9 +386,9 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
goto out;
for (i = 0; i < count; ++i)
- freelist = free_clear_pte(&pte[i], pte[i], freelist);
+ free_clear_pte(&pte[i], pte[i], &freelist);
- if (freelist != NULL)
+ if (!list_empty(&freelist))
updated = true;
if (count > 1) {
@@ -449,7 +423,7 @@ out:
}
/* Everything flushed out, free pages now */
- free_page_list(freelist);
+ put_pages_list(&freelist);
return ret;
}
@@ -511,8 +485,7 @@ static void v1_free_pgtable(struct io_pgtable *iop)
{
struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
struct protection_domain *dom;
- struct page *freelist = NULL;
- unsigned long root;
+ LIST_HEAD(freelist);
if (pgtable->mode == PAGE_MODE_NONE)
return;
@@ -529,10 +502,9 @@ static void v1_free_pgtable(struct io_pgtable *iop)
BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
pgtable->mode > PAGE_MODE_6_LEVEL);
- root = (unsigned long)pgtable->root;
- freelist = free_sub_pt(root, pgtable->mode, freelist);
+ free_sub_pt(pgtable->root, pgtable->mode, &freelist);
- free_page_list(freelist);
+ put_pages_list(&freelist);
}
static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index ee66d1f4cb81..a737ba5f727e 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -220,7 +220,7 @@ static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn)
kfree(mn_to_smmu(mn));
}
-static struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = {
+static const struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = {
.invalidate_range = arm_smmu_mm_invalidate_range,
.release = arm_smmu_mm_release,
.free_notifier = arm_smmu_mmu_notifier_free,
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index f5848b351b19..6dc6d8b6b368 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -3142,7 +3142,7 @@ static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
phys_addr_t doorbell;
struct device *dev = msi_desc_to_dev(desc);
struct arm_smmu_device *smmu = dev_get_drvdata(dev);
- phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index];
+ phys_addr_t *cfg = arm_smmu_msi_cfg[desc->msi_index];
doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
doorbell &= MSI_CFG0_ADDR_MASK;
@@ -3154,7 +3154,6 @@ static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
{
- struct msi_desc *desc;
int ret, nvec = ARM_SMMU_MAX_MSIS;
struct device *dev = smmu->dev;
@@ -3170,7 +3169,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
if (!(smmu->features & ARM_SMMU_FEAT_MSI))
return;
- if (!dev->msi_domain) {
+ if (!dev->msi.domain) {
dev_info(smmu->dev, "msi_domain absent - falling back to wired irqs\n");
return;
}
@@ -3182,21 +3181,9 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
return;
}
- for_each_msi_entry(desc, dev) {
- switch (desc->platform.msi_index) {
- case EVTQ_MSI_INDEX:
- smmu->evtq.q.irq = desc->irq;
- break;
- case GERROR_MSI_INDEX:
- smmu->gerr_irq = desc->irq;
- break;
- case PRIQ_MSI_INDEX:
- smmu->priq.q.irq = desc->irq;
- break;
- default: /* Unknown */
- continue;
- }
- }
+ smmu->evtq.q.irq = msi_get_virq(dev, EVTQ_MSI_INDEX);
+ smmu->gerr_irq = msi_get_virq(dev, GERROR_MSI_INDEX);
+ smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX);
/* Add callback to free MSIs on teardown */
devm_add_action(dev, arm_smmu_free_msis, dev);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 4cb136f07914..cd48590ada30 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -184,7 +184,6 @@
#else
#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_ORDER - 1)
#endif
-#define Q_MIN_SZ_SHIFT (PAGE_SHIFT)
/*
* Stream table.
@@ -374,7 +373,7 @@
/* Event queue */
#define EVTQ_ENT_SZ_SHIFT 5
#define EVTQ_ENT_DWORDS ((1 << EVTQ_ENT_SZ_SHIFT) >> 3)
-#define EVTQ_MAX_SZ_SHIFT (Q_MIN_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT)
+#define EVTQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT)
#define EVTQ_0_ID GENMASK_ULL(7, 0)
@@ -400,7 +399,7 @@
/* PRI queue */
#define PRIQ_ENT_SZ_SHIFT 4
#define PRIQ_ENT_DWORDS ((1 << PRIQ_ENT_SZ_SHIFT) >> 3)
-#define PRIQ_MAX_SZ_SHIFT (Q_MIN_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT)
+#define PRIQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT)
#define PRIQ_0_SID GENMASK_ULL(31, 0)
#define PRIQ_0_SSID GENMASK_ULL(51, 32)
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index ca736b065dd0..ba6298c7140e 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -51,7 +51,7 @@ static void qcom_adreno_smmu_get_fault_info(const void *cookie,
info->fsynr1 = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSYNR1);
info->far = arm_smmu_cb_readq(smmu, cfg->cbndx, ARM_SMMU_CB_FAR);
info->cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
- info->ttbr0 = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_TTBR0);
+ info->ttbr0 = arm_smmu_cb_readq(smmu, cfg->cbndx, ARM_SMMU_CB_TTBR0);
info->contextidr = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_CONTEXTIDR);
}
@@ -415,6 +415,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
{ .compatible = "qcom,sm8150-smmu-500" },
{ .compatible = "qcom,sm8250-smmu-500" },
{ .compatible = "qcom,sm8350-smmu-500" },
+ { .compatible = "qcom,sm8450-smmu-500" },
{ }
};
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index b42e38a0dbe2..d85d54f2b549 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -9,9 +9,12 @@
*/
#include <linux/acpi_iort.h>
+#include <linux/atomic.h>
+#include <linux/crash_dump.h>
#include <linux/device.h>
-#include <linux/dma-map-ops.h>
+#include <linux/dma-direct.h>
#include <linux/dma-iommu.h>
+#include <linux/dma-map-ops.h>
#include <linux/gfp.h>
#include <linux/huge_mm.h>
#include <linux/iommu.h>
@@ -20,11 +23,10 @@
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/pci.h>
-#include <linux/swiotlb.h>
#include <linux/scatterlist.h>
+#include <linux/spinlock.h>
+#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
-#include <linux/crash_dump.h>
-#include <linux/dma-direct.h>
struct iommu_dma_msi_page {
struct list_head list;
@@ -41,7 +43,19 @@ struct iommu_dma_cookie {
enum iommu_dma_cookie_type type;
union {
/* Full allocator for IOMMU_DMA_IOVA_COOKIE */
- struct iova_domain iovad;
+ struct {
+ struct iova_domain iovad;
+
+ struct iova_fq __percpu *fq; /* Flush queue */
+ /* Number of TLB flushes that have been started */
+ atomic64_t fq_flush_start_cnt;
+ /* Number of TLB flushes that have been finished */
+ atomic64_t fq_flush_finish_cnt;
+ /* Timer to regularily empty the flush queues */
+ struct timer_list fq_timer;
+ /* 1 when timer is active, 0 when not */
+ atomic_t fq_timer_on;
+ };
/* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
dma_addr_t msi_iova;
};
@@ -64,16 +78,203 @@ static int __init iommu_dma_forcedac_setup(char *str)
}
early_param("iommu.forcedac", iommu_dma_forcedac_setup);
-static void iommu_dma_entry_dtor(unsigned long data)
+/* Number of entries per flush queue */
+#define IOVA_FQ_SIZE 256
+
+/* Timeout (in ms) after which entries are flushed from the queue */
+#define IOVA_FQ_TIMEOUT 10
+
+/* Flush queue entry for deferred flushing */
+struct iova_fq_entry {
+ unsigned long iova_pfn;
+ unsigned long pages;
+ struct list_head freelist;
+ u64 counter; /* Flush counter when this entry was added */
+};
+
+/* Per-CPU flush queue structure */
+struct iova_fq {
+ struct iova_fq_entry entries[IOVA_FQ_SIZE];
+ unsigned int head, tail;
+ spinlock_t lock;
+};
+
+#define fq_ring_for_each(i, fq) \
+ for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
+
+static inline bool fq_full(struct iova_fq *fq)
+{
+ assert_spin_locked(&fq->lock);
+ return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
+}
+
+static inline unsigned int fq_ring_add(struct iova_fq *fq)
+{
+ unsigned int idx = fq->tail;
+
+ assert_spin_locked(&fq->lock);
+
+ fq->tail = (idx + 1) % IOVA_FQ_SIZE;
+
+ return idx;
+}
+
+static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
+{
+ u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt);
+ unsigned int idx;
+
+ assert_spin_locked(&fq->lock);
+
+ fq_ring_for_each(idx, fq) {
+
+ if (fq->entries[idx].counter >= counter)
+ break;
+
+ put_pages_list(&fq->entries[idx].freelist);
+ free_iova_fast(&cookie->iovad,
+ fq->entries[idx].iova_pfn,
+ fq->entries[idx].pages);
+
+ fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
+ }
+}
+
+static void fq_flush_iotlb(struct iommu_dma_cookie *cookie)
{
- struct page *freelist = (struct page *)data;
+ atomic64_inc(&cookie->fq_flush_start_cnt);
+ cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain);
+ atomic64_inc(&cookie->fq_flush_finish_cnt);
+}
+
+static void fq_flush_timeout(struct timer_list *t)
+{
+ struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer);
+ int cpu;
+
+ atomic_set(&cookie->fq_timer_on, 0);
+ fq_flush_iotlb(cookie);
+
+ for_each_possible_cpu(cpu) {
+ unsigned long flags;
+ struct iova_fq *fq;
+
+ fq = per_cpu_ptr(cookie->fq, cpu);
+ spin_lock_irqsave(&fq->lock, flags);
+ fq_ring_free(cookie, fq);
+ spin_unlock_irqrestore(&fq->lock, flags);
+ }
+}
+
+static void queue_iova(struct iommu_dma_cookie *cookie,
+ unsigned long pfn, unsigned long pages,
+ struct list_head *freelist)
+{
+ struct iova_fq *fq;
+ unsigned long flags;
+ unsigned int idx;
+
+ /*
+ * Order against the IOMMU driver's pagetable update from unmapping
+ * @pte, to guarantee that fq_flush_iotlb() observes that if called
+ * from a different CPU before we release the lock below. Full barrier
+ * so it also pairs with iommu_dma_init_fq() to avoid seeing partially
+ * written fq state here.
+ */
+ smp_mb();
- while (freelist) {
- unsigned long p = (unsigned long)page_address(freelist);
+ fq = raw_cpu_ptr(cookie->fq);
+ spin_lock_irqsave(&fq->lock, flags);
+
+ /*
+ * First remove all entries from the flush queue that have already been
+ * flushed out on another CPU. This makes the fq_full() check below less
+ * likely to be true.
+ */
+ fq_ring_free(cookie, fq);
- freelist = freelist->freelist;
- free_page(p);
+ if (fq_full(fq)) {
+ fq_flush_iotlb(cookie);
+ fq_ring_free(cookie, fq);
}
+
+ idx = fq_ring_add(fq);
+
+ fq->entries[idx].iova_pfn = pfn;
+ fq->entries[idx].pages = pages;
+ fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt);
+ list_splice(freelist, &fq->entries[idx].freelist);
+
+ spin_unlock_irqrestore(&fq->lock, flags);
+
+ /* Avoid false sharing as much as possible. */
+ if (!atomic_read(&cookie->fq_timer_on) &&
+ !atomic_xchg(&cookie->fq_timer_on, 1))
+ mod_timer(&cookie->fq_timer,
+ jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
+}
+
+static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie)
+{
+ int cpu, idx;
+
+ if (!cookie->fq)
+ return;
+
+ del_timer_sync(&cookie->fq_timer);
+ /* The IOVAs will be torn down separately, so just free our queued pages */
+ for_each_possible_cpu(cpu) {
+ struct iova_fq *fq = per_cpu_ptr(cookie->fq, cpu);
+
+ fq_ring_for_each(idx, fq)
+ put_pages_list(&fq->entries[idx].freelist);
+ }
+
+ free_percpu(cookie->fq);
+}
+
+/* sysfs updates are serialised by the mutex of the group owning @domain */
+int iommu_dma_init_fq(struct iommu_domain *domain)
+{
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_fq __percpu *queue;
+ int i, cpu;
+
+ if (cookie->fq_domain)
+ return 0;
+
+ atomic64_set(&cookie->fq_flush_start_cnt, 0);
+ atomic64_set(&cookie->fq_flush_finish_cnt, 0);
+
+ queue = alloc_percpu(struct iova_fq);
+ if (!queue) {
+ pr_warn("iova flush queue initialization failed\n");
+ return -ENOMEM;
+ }
+
+ for_each_possible_cpu(cpu) {
+ struct iova_fq *fq = per_cpu_ptr(queue, cpu);
+
+ fq->head = 0;
+ fq->tail = 0;
+
+ spin_lock_init(&fq->lock);
+
+ for (i = 0; i < IOVA_FQ_SIZE; i++)
+ INIT_LIST_HEAD(&fq->entries[i].freelist);
+ }
+
+ cookie->fq = queue;
+
+ timer_setup(&cookie->fq_timer, fq_flush_timeout, 0);
+ atomic_set(&cookie->fq_timer_on, 0);
+ /*
+ * Prevent incomplete fq state being observable. Pairs with path from
+ * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
+ */
+ smp_wmb();
+ WRITE_ONCE(cookie->fq_domain, domain);
+ return 0;
}
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
@@ -156,8 +357,10 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
if (!cookie)
return;
- if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
+ if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) {
+ iommu_dma_free_fq(cookie);
put_iova_domain(&cookie->iovad);
+ }
list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
list_del(&msi->list);
@@ -294,17 +497,6 @@ static int iova_reserve_iommu_regions(struct device *dev,
return ret;
}
-static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
-{
- struct iommu_dma_cookie *cookie;
- struct iommu_domain *domain;
-
- cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
- domain = cookie->fq_domain;
-
- domain->ops->flush_iotlb_all(domain);
-}
-
static bool dev_is_untrusted(struct device *dev)
{
return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
@@ -315,30 +507,6 @@ static bool dev_use_swiotlb(struct device *dev)
return IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev);
}
-/* sysfs updates are serialised by the mutex of the group owning @domain */
-int iommu_dma_init_fq(struct iommu_domain *domain)
-{
- struct iommu_dma_cookie *cookie = domain->iova_cookie;
- int ret;
-
- if (cookie->fq_domain)
- return 0;
-
- ret = init_iova_flush_queue(&cookie->iovad, iommu_dma_flush_iotlb_all,
- iommu_dma_entry_dtor);
- if (ret) {
- pr_warn("iova flush queue initialization failed\n");
- return ret;
- }
- /*
- * Prevent incomplete iovad->fq being observable. Pairs with path from
- * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
- */
- smp_wmb();
- WRITE_ONCE(cookie->fq_domain, domain);
- return 0;
-}
-
/**
* iommu_dma_init_domain - Initialise a DMA mapping domain
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
@@ -442,14 +610,6 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
shift = iova_shift(iovad);
iova_len = size >> shift;
- /*
- * Freeing non-power-of-two-sized allocations back into the IOVA caches
- * will come back to bite us badly, so we have to waste a bit of space
- * rounding up anything cacheable to make sure that can't happen. The
- * order of the unadjusted size will still match upon freeing.
- */
- if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
- iova_len = roundup_pow_of_two(iova_len);
dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
@@ -477,9 +637,9 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
if (cookie->type == IOMMU_DMA_MSI_COOKIE)
cookie->msi_iova -= size;
else if (gather && gather->queued)
- queue_iova(iovad, iova_pfn(iovad, iova),
+ queue_iova(cookie, iova_pfn(iovad, iova),
size >> iova_shift(iovad),
- (unsigned long)gather->freelist);
+ &gather->freelist);
else
free_iova_fast(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad));
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index b6a8f3282411..92fea3fbbb11 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -133,11 +133,6 @@ static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
are never going to work. */
-static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
-{
- return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
-}
-
static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
{
return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
@@ -1280,10 +1275,6 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
unsigned long last_pfn,
int retain_level)
{
- BUG_ON(!domain_pfn_supported(domain, start_pfn));
- BUG_ON(!domain_pfn_supported(domain, last_pfn));
- BUG_ON(start_pfn > last_pfn);
-
dma_pte_clear_range(domain, start_pfn, last_pfn);
/* We don't need lock here; nobody else touches the iova range */
@@ -1303,35 +1294,30 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
know the hardware page-walk will no longer touch them.
The 'pte' argument is the *parent* PTE, pointing to the page that is to
be freed. */
-static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
- int level, struct dma_pte *pte,
- struct page *freelist)
+static void dma_pte_list_pagetables(struct dmar_domain *domain,
+ int level, struct dma_pte *pte,
+ struct list_head *freelist)
{
struct page *pg;
pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
- pg->freelist = freelist;
- freelist = pg;
+ list_add_tail(&pg->lru, freelist);
if (level == 1)
- return freelist;
+ return;
pte = page_address(pg);
do {
if (dma_pte_present(pte) && !dma_pte_superpage(pte))
- freelist = dma_pte_list_pagetables(domain, level - 1,
- pte, freelist);
+ dma_pte_list_pagetables(domain, level - 1, pte, freelist);
pte++;
} while (!first_pte_in_page(pte));
-
- return freelist;
}
-static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
- struct dma_pte *pte, unsigned long pfn,
- unsigned long start_pfn,
- unsigned long last_pfn,
- struct page *freelist)
+static void dma_pte_clear_level(struct dmar_domain *domain, int level,
+ struct dma_pte *pte, unsigned long pfn,
+ unsigned long start_pfn, unsigned long last_pfn,
+ struct list_head *freelist)
{
struct dma_pte *first_pte = NULL, *last_pte = NULL;
@@ -1350,7 +1336,7 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
/* These suborbinate page tables are going away entirely. Don't
bother to clear them; we're just going to *free* them. */
if (level > 1 && !dma_pte_superpage(pte))
- freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
+ dma_pte_list_pagetables(domain, level - 1, pte, freelist);
dma_clear_pte(pte);
if (!first_pte)
@@ -1358,10 +1344,10 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
last_pte = pte;
} else if (level > 1) {
/* Recurse down into a level that isn't *entirely* obsolete */
- freelist = dma_pte_clear_level(domain, level - 1,
- phys_to_virt(dma_pte_addr(pte)),
- level_pfn, start_pfn, last_pfn,
- freelist);
+ dma_pte_clear_level(domain, level - 1,
+ phys_to_virt(dma_pte_addr(pte)),
+ level_pfn, start_pfn, last_pfn,
+ freelist);
}
next:
pfn = level_pfn + level_size(level);
@@ -1370,47 +1356,28 @@ next:
if (first_pte)
domain_flush_cache(domain, first_pte,
(void *)++last_pte - (void *)first_pte);
-
- return freelist;
}
/* We can't just free the pages because the IOMMU may still be walking
the page tables, and may have cached the intermediate levels. The
pages can only be freed after the IOTLB flush has been done. */
-static struct page *domain_unmap(struct dmar_domain *domain,
- unsigned long start_pfn,
- unsigned long last_pfn,
- struct page *freelist)
+static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
+ unsigned long last_pfn, struct list_head *freelist)
{
BUG_ON(!domain_pfn_supported(domain, start_pfn));
BUG_ON(!domain_pfn_supported(domain, last_pfn));
BUG_ON(start_pfn > last_pfn);
/* we don't need lock here; nobody else touches the iova range */
- freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
- domain->pgd, 0, start_pfn, last_pfn,
- freelist);
+ dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
+ domain->pgd, 0, start_pfn, last_pfn, freelist);
/* free pgd */
if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
struct page *pgd_page = virt_to_page(domain->pgd);
- pgd_page->freelist = freelist;
- freelist = pgd_page;
-
+ list_add_tail(&pgd_page->lru, freelist);
domain->pgd = NULL;
}
-
- return freelist;
-}
-
-static void dma_free_pagelist(struct page *freelist)
-{
- struct page *pg;
-
- while ((pg = freelist)) {
- freelist = pg->freelist;
- free_pgtable_page(page_address(pg));
- }
}
/* iommu handling */
@@ -1878,17 +1845,16 @@ static void iommu_disable_translation(struct intel_iommu *iommu)
static int iommu_init_domains(struct intel_iommu *iommu)
{
- u32 ndomains, nlongs;
+ u32 ndomains;
size_t size;
ndomains = cap_ndoms(iommu->cap);
pr_debug("%s: Number of Domains supported <%d>\n",
iommu->name, ndomains);
- nlongs = BITS_TO_LONGS(ndomains);
spin_lock_init(&iommu->lock);
- iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
+ iommu->domain_ids = bitmap_zalloc(ndomains, GFP_KERNEL);
if (!iommu->domain_ids)
return -ENOMEM;
@@ -1903,7 +1869,7 @@ static int iommu_init_domains(struct intel_iommu *iommu)
if (!iommu->domains || !iommu->domains[0]) {
pr_err("%s: Allocating domain array failed\n",
iommu->name);
- kfree(iommu->domain_ids);
+ bitmap_free(iommu->domain_ids);
kfree(iommu->domains);
iommu->domain_ids = NULL;
iommu->domains = NULL;
@@ -1964,7 +1930,7 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
for (i = 0; i < elems; i++)
kfree(iommu->domains[i]);
kfree(iommu->domains);
- kfree(iommu->domain_ids);
+ bitmap_free(iommu->domain_ids);
iommu->domains = NULL;
iommu->domain_ids = NULL;
}
@@ -2095,11 +2061,10 @@ static void domain_exit(struct dmar_domain *domain)
domain_remove_dev_info(domain);
if (domain->pgd) {
- struct page *freelist;
+ LIST_HEAD(freelist);
- freelist = domain_unmap(domain, 0,
- DOMAIN_MAX_PFN(domain->gaw), NULL);
- dma_free_pagelist(freelist);
+ domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist);
+ put_pages_list(&freelist);
}
free_domain_mem(domain);
@@ -2112,10 +2077,10 @@ static void domain_exit(struct dmar_domain *domain)
*/
static inline unsigned long context_get_sm_pds(struct pasid_table *table)
{
- int pds, max_pde;
+ unsigned long pds, max_pde;
max_pde = table->max_pasid >> PASID_PDE_SHIFT;
- pds = find_first_bit((unsigned long *)&max_pde, MAX_NR_PASID_BITS);
+ pds = find_first_bit(&max_pde, MAX_NR_PASID_BITS);
if (pds < 7)
return 0;
@@ -4192,19 +4157,17 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
- struct page *freelist;
+ LIST_HEAD(freelist);
- freelist = domain_unmap(si_domain,
- start_vpfn, last_vpfn,
- NULL);
+ domain_unmap(si_domain, start_vpfn, last_vpfn, &freelist);
rcu_read_lock();
for_each_active_iommu(iommu, drhd)
iommu_flush_iotlb_psi(iommu, si_domain,
start_vpfn, mhp->nr_pages,
- !freelist, 0);
+ list_empty(&freelist), 0);
rcu_read_unlock();
- dma_free_pagelist(freelist);
+ put_pages_list(&freelist);
}
break;
}
@@ -5211,8 +5174,7 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
start_pfn = iova >> VTD_PAGE_SHIFT;
last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
- gather->freelist = domain_unmap(dmar_domain, start_pfn,
- last_pfn, gather->freelist);
+ domain_unmap(dmar_domain, start_pfn, last_pfn, &gather->freelist);
if (dmar_domain->max_addr == iova + size)
dmar_domain->max_addr = iova;
@@ -5248,9 +5210,10 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
for_each_domain_iommu(iommu_id, dmar_domain)
iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
- start_pfn, nrpages, !gather->freelist, 0);
+ start_pfn, nrpages,
+ list_empty(&gather->freelist), 0);
- dma_free_pagelist(gather->freelist);
+ put_pages_list(&gather->freelist);
}
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index bfb6acb651e5..be066c1503d3 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -246,13 +246,17 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
__GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
else if (lvl == 2)
table = kmem_cache_zalloc(data->l2_tables, gfp);
+
+ if (!table)
+ return NULL;
+
phys = virt_to_phys(table);
if (phys != (arm_v7s_iopte)phys) {
/* Doesn't fit in PTE */
dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
goto out_free;
}
- if (table && !cfg->coherent_walk) {
+ if (!cfg->coherent_walk) {
dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
goto out_free;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index dd9e47189d0d..94ff319ae8ac 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -315,11 +315,12 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
arm_lpae_iopte *ptep,
arm_lpae_iopte curr,
- struct io_pgtable_cfg *cfg)
+ struct arm_lpae_io_pgtable *data)
{
arm_lpae_iopte old, new;
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
- new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE;
+ new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
new |= ARM_LPAE_PTE_NSTABLE;
@@ -380,7 +381,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
if (!cptep)
return -ENOMEM;
- pte = arm_lpae_install_table(cptep, ptep, 0, cfg);
+ pte = arm_lpae_install_table(cptep, ptep, 0, data);
if (pte)
__arm_lpae_free_pages(cptep, tblsz, cfg);
} else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
@@ -592,7 +593,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
__arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]);
}
- pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg);
+ pte = arm_lpae_install_table(tablep, ptep, blk_pte, data);
if (pte != blk_pte) {
__arm_lpae_free_pages(tablep, tablesz, cfg);
/*
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index dd7863e453a5..8b86406b7162 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -288,11 +288,11 @@ int iommu_probe_device(struct device *dev)
*/
mutex_lock(&group->mutex);
iommu_alloc_default_domain(group, dev);
- mutex_unlock(&group->mutex);
if (group->default_domain) {
ret = __iommu_attach_device(group->default_domain, dev);
if (ret) {
+ mutex_unlock(&group->mutex);
iommu_group_put(group);
goto err_release;
}
@@ -300,6 +300,7 @@ int iommu_probe_device(struct device *dev)
iommu_create_device_direct_mappings(group, dev);
+ mutex_unlock(&group->mutex);
iommu_group_put(group);
if (ops->probe_finalize)
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 9e8bc802ac05..b28c9435b898 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -24,8 +24,6 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
static void init_iova_rcaches(struct iova_domain *iovad);
static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
-static void fq_destroy_all_entries(struct iova_domain *iovad);
-static void fq_flush_timeout(struct timer_list *t);
static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
{
@@ -63,8 +61,6 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
iovad->max32_alloc_size = iovad->dma_32bit_pfn;
- iovad->flush_cb = NULL;
- iovad->fq = NULL;
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
@@ -73,62 +69,6 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
}
EXPORT_SYMBOL_GPL(init_iova_domain);
-static bool has_iova_flush_queue(struct iova_domain *iovad)
-{
- return !!iovad->fq;
-}
-
-static void free_iova_flush_queue(struct iova_domain *iovad)
-{
- if (!has_iova_flush_queue(iovad))
- return;
-
- if (timer_pending(&iovad->fq_timer))
- del_timer(&iovad->fq_timer);
-
- fq_destroy_all_entries(iovad);
-
- free_percpu(iovad->fq);
-
- iovad->fq = NULL;
- iovad->flush_cb = NULL;
- iovad->entry_dtor = NULL;
-}
-
-int init_iova_flush_queue(struct iova_domain *iovad,
- iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
-{
- struct iova_fq __percpu *queue;
- int cpu;
-
- atomic64_set(&iovad->fq_flush_start_cnt, 0);
- atomic64_set(&iovad->fq_flush_finish_cnt, 0);
-
- queue = alloc_percpu(struct iova_fq);
- if (!queue)
- return -ENOMEM;
-
- iovad->flush_cb = flush_cb;
- iovad->entry_dtor = entry_dtor;
-
- for_each_possible_cpu(cpu) {
- struct iova_fq *fq;
-
- fq = per_cpu_ptr(queue, cpu);
- fq->head = 0;
- fq->tail = 0;
-
- spin_lock_init(&fq->lock);
- }
-
- iovad->fq = queue;
-
- timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
- atomic_set(&iovad->fq_timer_on, 0);
-
- return 0;
-}
-
static struct rb_node *
__get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
{
@@ -497,6 +437,15 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
unsigned long iova_pfn;
struct iova *new_iova;
+ /*
+ * Freeing non-power-of-two-sized allocations back into the IOVA caches
+ * will come back to bite us badly, so we have to waste a bit of space
+ * rounding up anything cacheable to make sure that can't happen. The
+ * order of the unadjusted size will still match upon freeing.
+ */
+ if (size < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
+ size = roundup_pow_of_two(size);
+
iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
if (iova_pfn)
return iova_pfn;
@@ -539,144 +488,6 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
}
EXPORT_SYMBOL_GPL(free_iova_fast);
-#define fq_ring_for_each(i, fq) \
- for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
-
-static inline bool fq_full(struct iova_fq *fq)
-{
- assert_spin_locked(&fq->lock);
- return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
-}
-
-static inline unsigned fq_ring_add(struct iova_fq *fq)
-{
- unsigned idx = fq->tail;
-
- assert_spin_locked(&fq->lock);
-
- fq->tail = (idx + 1) % IOVA_FQ_SIZE;
-
- return idx;
-}
-
-static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
-{
- u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt);
- unsigned idx;
-
- assert_spin_locked(&fq->lock);
-
- fq_ring_for_each(idx, fq) {
-
- if (fq->entries[idx].counter >= counter)
- break;
-
- if (iovad->entry_dtor)
- iovad->entry_dtor(fq->entries[idx].data);
-
- free_iova_fast(iovad,
- fq->entries[idx].iova_pfn,
- fq->entries[idx].pages);
-
- fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
- }
-}
-
-static void iova_domain_flush(struct iova_domain *iovad)
-{
- atomic64_inc(&iovad->fq_flush_start_cnt);
- iovad->flush_cb(iovad);
- atomic64_inc(&iovad->fq_flush_finish_cnt);
-}
-
-static void fq_destroy_all_entries(struct iova_domain *iovad)
-{
- int cpu;
-
- /*
- * This code runs when the iova_domain is being detroyed, so don't
- * bother to free iovas, just call the entry_dtor on all remaining
- * entries.
- */
- if (!iovad->entry_dtor)
- return;
-
- for_each_possible_cpu(cpu) {
- struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
- int idx;
-
- fq_ring_for_each(idx, fq)
- iovad->entry_dtor(fq->entries[idx].data);
- }
-}
-
-static void fq_flush_timeout(struct timer_list *t)
-{
- struct iova_domain *iovad = from_timer(iovad, t, fq_timer);
- int cpu;
-
- atomic_set(&iovad->fq_timer_on, 0);
- iova_domain_flush(iovad);
-
- for_each_possible_cpu(cpu) {
- unsigned long flags;
- struct iova_fq *fq;
-
- fq = per_cpu_ptr(iovad->fq, cpu);
- spin_lock_irqsave(&fq->lock, flags);
- fq_ring_free(iovad, fq);
- spin_unlock_irqrestore(&fq->lock, flags);
- }
-}
-
-void queue_iova(struct iova_domain *iovad,
- unsigned long pfn, unsigned long pages,
- unsigned long data)
-{
- struct iova_fq *fq;
- unsigned long flags;
- unsigned idx;
-
- /*
- * Order against the IOMMU driver's pagetable update from unmapping
- * @pte, to guarantee that iova_domain_flush() observes that if called
- * from a different CPU before we release the lock below. Full barrier
- * so it also pairs with iommu_dma_init_fq() to avoid seeing partially
- * written fq state here.
- */
- smp_mb();
-
- fq = raw_cpu_ptr(iovad->fq);
- spin_lock_irqsave(&fq->lock, flags);
-
- /*
- * First remove all entries from the flush queue that have already been
- * flushed out on another CPU. This makes the fq_full() check below less
- * likely to be true.
- */
- fq_ring_free(iovad, fq);
-
- if (fq_full(fq)) {
- iova_domain_flush(iovad);
- fq_ring_free(iovad, fq);
- }
-
- idx = fq_ring_add(fq);
-
- fq->entries[idx].iova_pfn = pfn;
- fq->entries[idx].pages = pages;
- fq->entries[idx].data = data;
- fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
-
- spin_unlock_irqrestore(&fq->lock, flags);
-
- /* Avoid false sharing as much as possible. */
- if (!atomic_read(&iovad->fq_timer_on) &&
- !atomic_xchg(&iovad->fq_timer_on, 1))
- mod_timer(&iovad->fq_timer,
- jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
-}
-
/**
* put_iova_domain - destroys the iova domain
* @iovad: - iova domain in question.
@@ -688,8 +499,6 @@ void put_iova_domain(struct iova_domain *iovad)
cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
&iovad->cpuhp_dead);
-
- free_iova_flush_queue(iovad);
free_iova_rcaches(iovad);
rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
free_iova_mem(iova);
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 83df387e70a3..50860ebdd087 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -109,7 +109,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
zdev->dma_table = s390_domain->dma_table;
cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
- (u64) zdev->dma_table);
+ virt_to_phys(zdev->dma_table));
if (cc) {
rc = -EIO;
goto out_restore;
@@ -205,11 +205,11 @@ static void s390_iommu_release_device(struct device *dev)
}
static int s390_iommu_update_trans(struct s390_domain *s390_domain,
- unsigned long pa, dma_addr_t dma_addr,
+ phys_addr_t pa, dma_addr_t dma_addr,
size_t size, int flags)
{
struct s390_domain_device *domain_device;
- u8 *page_addr = (u8 *) (pa & PAGE_MASK);
+ phys_addr_t page_addr = pa & PAGE_MASK;
dma_addr_t start_dma_addr = dma_addr;
unsigned long irq_flags, nr_pages, i;
unsigned long *entry;
@@ -274,7 +274,7 @@ static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova,
if (!(prot & IOMMU_WRITE))
flags |= ZPCI_TABLE_PROTECTED;
- rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova,
+ rc = s390_iommu_update_trans(s390_domain, paddr, iova,
size, flags);
return rc;
@@ -324,7 +324,7 @@ static size_t s390_iommu_unmap(struct iommu_domain *domain,
if (!paddr)
return 0;
- rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova,
+ rc = s390_iommu_update_trans(s390_domain, paddr, iova,
size, flags);
if (rc)
return 0;
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 80930ce04a16..f2aa34f57454 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -71,6 +71,7 @@ struct viommu_domain {
struct rb_root_cached mappings;
unsigned long nr_endpoints;
+ bool bypass;
};
struct viommu_endpoint {
@@ -310,8 +311,8 @@ out_unlock:
*
* On success, return the new mapping. Otherwise return NULL.
*/
-static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
- phys_addr_t paddr, size_t size, u32 flags)
+static int viommu_add_mapping(struct viommu_domain *vdomain, u64 iova, u64 end,
+ phys_addr_t paddr, u32 flags)
{
unsigned long irqflags;
struct viommu_mapping *mapping;
@@ -322,7 +323,7 @@ static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
mapping->paddr = paddr;
mapping->iova.start = iova;
- mapping->iova.last = iova + size - 1;
+ mapping->iova.last = end;
mapping->flags = flags;
spin_lock_irqsave(&vdomain->mappings_lock, irqflags);
@@ -337,26 +338,24 @@ static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
*
* @vdomain: the domain
* @iova: start of the range
- * @size: size of the range. A size of 0 corresponds to the entire address
- * space.
+ * @end: end of the range
*
- * On success, returns the number of unmapped bytes (>= size)
+ * On success, returns the number of unmapped bytes
*/
static size_t viommu_del_mappings(struct viommu_domain *vdomain,
- unsigned long iova, size_t size)
+ u64 iova, u64 end)
{
size_t unmapped = 0;
unsigned long flags;
- unsigned long last = iova + size - 1;
struct viommu_mapping *mapping = NULL;
struct interval_tree_node *node, *next;
spin_lock_irqsave(&vdomain->mappings_lock, flags);
- next = interval_tree_iter_first(&vdomain->mappings, iova, last);
+ next = interval_tree_iter_first(&vdomain->mappings, iova, end);
while (next) {
node = next;
mapping = container_of(node, struct viommu_mapping, iova);
- next = interval_tree_iter_next(node, iova, last);
+ next = interval_tree_iter_next(node, iova, end);
/* Trying to split a mapping? */
if (mapping->iova.start < iova)
@@ -377,6 +376,55 @@ static size_t viommu_del_mappings(struct viommu_domain *vdomain,
}
/*
+ * Fill the domain with identity mappings, skipping the device's reserved
+ * regions.
+ */
+static int viommu_domain_map_identity(struct viommu_endpoint *vdev,
+ struct viommu_domain *vdomain)
+{
+ int ret;
+ struct iommu_resv_region *resv;
+ u64 iova = vdomain->domain.geometry.aperture_start;
+ u64 limit = vdomain->domain.geometry.aperture_end;
+ u32 flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
+ unsigned long granule = 1UL << __ffs(vdomain->domain.pgsize_bitmap);
+
+ iova = ALIGN(iova, granule);
+ limit = ALIGN_DOWN(limit + 1, granule) - 1;
+
+ list_for_each_entry(resv, &vdev->resv_regions, list) {
+ u64 resv_start = ALIGN_DOWN(resv->start, granule);
+ u64 resv_end = ALIGN(resv->start + resv->length, granule) - 1;
+
+ if (resv_end < iova || resv_start > limit)
+ /* No overlap */
+ continue;
+
+ if (resv_start > iova) {
+ ret = viommu_add_mapping(vdomain, iova, resv_start - 1,
+ (phys_addr_t)iova, flags);
+ if (ret)
+ goto err_unmap;
+ }
+
+ if (resv_end >= limit)
+ return 0;
+
+ iova = resv_end + 1;
+ }
+
+ ret = viommu_add_mapping(vdomain, iova, limit, (phys_addr_t)iova,
+ flags);
+ if (ret)
+ goto err_unmap;
+ return 0;
+
+err_unmap:
+ viommu_del_mappings(vdomain, 0, iova);
+ return ret;
+}
+
+/*
* viommu_replay_mappings - re-send MAP requests
*
* When reattaching a domain that was previously detached from all endpoints,
@@ -422,7 +470,7 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
size_t size;
u64 start64, end64;
phys_addr_t start, end;
- struct iommu_resv_region *region = NULL;
+ struct iommu_resv_region *region = NULL, *next;
unsigned long prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
start = start64 = le64_to_cpu(mem->start);
@@ -453,7 +501,12 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
if (!region)
return -ENOMEM;
- list_add(&region->list, &vdev->resv_regions);
+ /* Keep the list sorted */
+ list_for_each_entry(next, &vdev->resv_regions, list) {
+ if (next->start > region->start)
+ break;
+ }
+ list_add_tail(&region->list, &next->list);
return 0;
}
@@ -587,7 +640,9 @@ static struct iommu_domain *viommu_domain_alloc(unsigned type)
{
struct viommu_domain *vdomain;
- if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
+ if (type != IOMMU_DOMAIN_UNMANAGED &&
+ type != IOMMU_DOMAIN_DMA &&
+ type != IOMMU_DOMAIN_IDENTITY)
return NULL;
vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
@@ -630,6 +685,21 @@ static int viommu_domain_finalise(struct viommu_endpoint *vdev,
vdomain->map_flags = viommu->map_flags;
vdomain->viommu = viommu;
+ if (domain->type == IOMMU_DOMAIN_IDENTITY) {
+ if (virtio_has_feature(viommu->vdev,
+ VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
+ vdomain->bypass = true;
+ return 0;
+ }
+
+ ret = viommu_domain_map_identity(vdev, vdomain);
+ if (ret) {
+ ida_free(&viommu->domain_ids, vdomain->id);
+ vdomain->viommu = NULL;
+ return -EOPNOTSUPP;
+ }
+ }
+
return 0;
}
@@ -637,8 +707,8 @@ static void viommu_domain_free(struct iommu_domain *domain)
{
struct viommu_domain *vdomain = to_viommu_domain(domain);
- /* Free all remaining mappings (size 2^64) */
- viommu_del_mappings(vdomain, 0, 0);
+ /* Free all remaining mappings */
+ viommu_del_mappings(vdomain, 0, ULLONG_MAX);
if (vdomain->viommu)
ida_free(&vdomain->viommu->domain_ids, vdomain->id);
@@ -673,7 +743,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
/*
* In the virtio-iommu device, when attaching the endpoint to a new
- * domain, it is detached from the old one and, if as as a result the
+ * domain, it is detached from the old one and, if as a result the
* old domain isn't attached to any endpoint, all mappings are removed
* from the old domain and it is freed.
*
@@ -691,6 +761,9 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
.domain = cpu_to_le32(vdomain->id),
};
+ if (vdomain->bypass)
+ req.flags |= cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS);
+
for (i = 0; i < fwspec->num_ids; i++) {
req.endpoint = cpu_to_le32(fwspec->ids[i]);
@@ -720,6 +793,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
{
int ret;
u32 flags;
+ u64 end = iova + size - 1;
struct virtio_iommu_req_map map;
struct viommu_domain *vdomain = to_viommu_domain(domain);
@@ -730,7 +804,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
if (flags & ~vdomain->map_flags)
return -EINVAL;
- ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
+ ret = viommu_add_mapping(vdomain, iova, end, paddr, flags);
if (ret)
return ret;
@@ -739,7 +813,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
.domain = cpu_to_le32(vdomain->id),
.virt_start = cpu_to_le64(iova),
.phys_start = cpu_to_le64(paddr),
- .virt_end = cpu_to_le64(iova + size - 1),
+ .virt_end = cpu_to_le64(end),
.flags = cpu_to_le32(flags),
};
@@ -748,7 +822,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
if (ret)
- viommu_del_mappings(vdomain, iova, size);
+ viommu_del_mappings(vdomain, iova, end);
return ret;
}
@@ -761,7 +835,7 @@ static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
struct virtio_iommu_req_unmap unmap;
struct viommu_domain *vdomain = to_viommu_domain(domain);
- unmapped = viommu_del_mappings(vdomain, iova, size);
+ unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1);
if (unmapped < size)
return 0;
@@ -1115,7 +1189,7 @@ static void viommu_remove(struct virtio_device *vdev)
iommu_device_unregister(&viommu->iommu);
/* Stop all virtqueues */
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
dev_info(&vdev->dev, "device removed\n");
@@ -1132,6 +1206,7 @@ static unsigned int features[] = {
VIRTIO_IOMMU_F_DOMAIN_RANGE,
VIRTIO_IOMMU_F_PROBE,
VIRTIO_IOMMU_F_MMIO,
+ VIRTIO_IOMMU_F_BYPASS_CONFIG,
};
static struct virtio_device_id id_table[] = {
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index 2543ef65825b..38091ebb9403 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -178,7 +178,6 @@ struct aic_irq_chip {
struct irq_domain *hw_domain;
struct irq_domain *ipi_domain;
int nr_hw;
- int ipi_hwirq;
};
static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 9349fc68b81a..b249d4df899e 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -88,7 +88,6 @@ static struct irq_chip gicv2m_msi_irq_chip = {
.irq_mask = gicv2m_mask_msi_irq,
.irq_unmask = gicv2m_unmask_msi_irq,
.irq_eoi = irq_chip_eoi_parent,
- .irq_write_msi_msg = pci_msi_domain_write_msg,
};
static struct msi_domain_info gicv2m_msi_domain_info = {
@@ -405,7 +404,7 @@ err_free_v2m:
return ret;
}
-static struct of_device_id gicv2m_device_id[] = {
+static const struct of_device_id gicv2m_device_id[] = {
{ .compatible = "arm,gic-v2m-frame", },
{},
};
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index ad2810c017ed..93f77a8196da 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -28,7 +28,6 @@ static struct irq_chip its_msi_irq_chip = {
.irq_unmask = its_unmask_msi_irq,
.irq_mask = its_mask_msi_irq,
.irq_eoi = irq_chip_eoi_parent,
- .irq_write_msi_msg = pci_msi_domain_write_msg,
};
static int its_pci_msi_vec_count(struct pci_dev *pdev, void *data)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 0cb584d9815b..9e93ff2b6375 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -46,6 +46,10 @@
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
+#define RD_LOCAL_LPI_ENABLED BIT(0)
+#define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1)
+#define RD_LOCAL_MEMRESERVE_DONE BIT(2)
+
static u32 lpi_id_bits;
/*
@@ -3044,7 +3048,7 @@ static void its_cpu_init_lpis(void)
phys_addr_t paddr;
u64 val, tmp;
- if (gic_data_rdist()->lpi_enabled)
+ if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED)
return;
val = readl_relaxed(rbase + GICR_CTLR);
@@ -3063,15 +3067,13 @@ static void its_cpu_init_lpis(void)
paddr &= GENMASK_ULL(51, 16);
WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
- its_free_pending_table(gic_data_rdist()->pend_page);
- gic_data_rdist()->pend_page = NULL;
+ gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED;
goto out;
}
pend_page = gic_data_rdist()->pend_page;
paddr = page_to_phys(pend_page);
- WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
/* set PROPBASE */
val = (gic_rdists->prop_table_pa |
@@ -3158,10 +3160,11 @@ static void its_cpu_init_lpis(void)
/* Make sure the GIC has seen the above */
dsb(sy);
out:
- gic_data_rdist()->lpi_enabled = true;
+ gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED;
pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
smp_processor_id(),
- gic_data_rdist()->pend_page ? "allocated" : "reserved",
+ gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ?
+ "reserved" : "allocated",
&paddr);
}
@@ -4853,6 +4856,38 @@ static struct syscore_ops its_syscore_ops = {
.resume = its_restore_enable,
};
+static void __init __iomem *its_map_one(struct resource *res, int *err)
+{
+ void __iomem *its_base;
+ u32 val;
+
+ its_base = ioremap(res->start, SZ_64K);
+ if (!its_base) {
+ pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
+ if (val != 0x30 && val != 0x40) {
+ pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
+ *err = -ENODEV;
+ goto out_unmap;
+ }
+
+ *err = its_force_quiescent(its_base);
+ if (*err) {
+ pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
+ goto out_unmap;
+ }
+
+ return its_base;
+
+out_unmap:
+ iounmap(its_base);
+ return NULL;
+}
+
static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
{
struct irq_domain *inner_domain;
@@ -4960,29 +4995,14 @@ static int __init its_probe_one(struct resource *res,
{
struct its_node *its;
void __iomem *its_base;
- u32 val, ctlr;
u64 baser, tmp, typer;
struct page *page;
+ u32 ctlr;
int err;
- its_base = ioremap(res->start, SZ_64K);
- if (!its_base) {
- pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
- return -ENOMEM;
- }
-
- val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
- if (val != 0x30 && val != 0x40) {
- pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
- err = -ENODEV;
- goto out_unmap;
- }
-
- err = its_force_quiescent(its_base);
- if (err) {
- pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
- goto out_unmap;
- }
+ its_base = its_map_one(res, &err);
+ if (!its_base)
+ return err;
pr_info("ITS %pR\n", res);
@@ -5138,7 +5158,7 @@ static int redist_disable_lpis(void)
*
* If running with preallocated tables, there is nothing to do.
*/
- if (gic_data_rdist()->lpi_enabled ||
+ if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) ||
(gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
return 0;
@@ -5200,6 +5220,69 @@ int its_cpu_init(void)
return 0;
}
+static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work)
+{
+ cpuhp_remove_state_nocalls(gic_rdists->cpuhp_memreserve_state);
+ gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
+}
+
+static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work,
+ rdist_memreserve_cpuhp_cleanup_workfn);
+
+static int its_cpu_memreserve_lpi(unsigned int cpu)
+{
+ struct page *pend_page;
+ int ret = 0;
+
+ /* This gets to run exactly once per CPU */
+ if (gic_data_rdist()->flags & RD_LOCAL_MEMRESERVE_DONE)
+ return 0;
+
+ pend_page = gic_data_rdist()->pend_page;
+ if (WARN_ON(!pend_page)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ /*
+ * If the pending table was pre-programmed, free the memory we
+ * preemptively allocated. Otherwise, reserve that memory for
+ * later kexecs.
+ */
+ if (gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED) {
+ its_free_pending_table(pend_page);
+ gic_data_rdist()->pend_page = NULL;
+ } else {
+ phys_addr_t paddr = page_to_phys(pend_page);
+ WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
+ }
+
+out:
+ /* Last CPU being brought up gets to issue the cleanup */
+ if (!IS_ENABLED(CONFIG_SMP) ||
+ cpumask_equal(&cpus_booted_once_mask, cpu_possible_mask))
+ schedule_work(&rdist_memreserve_cpuhp_cleanup_work);
+
+ gic_data_rdist()->flags |= RD_LOCAL_MEMRESERVE_DONE;
+ return ret;
+}
+
+/* Mark all the BASER registers as invalid before they get reprogrammed */
+static int __init its_reset_one(struct resource *res)
+{
+ void __iomem *its_base;
+ int err, i;
+
+ its_base = its_map_one(res, &err);
+ if (!its_base)
+ return err;
+
+ for (i = 0; i < GITS_BASER_NR_REGS; i++)
+ gits_write_baser(0, its_base + GITS_BASER + (i << 3));
+
+ iounmap(its_base);
+ return 0;
+}
+
static const struct of_device_id its_device_id[] = {
{ .compatible = "arm,gic-v3-its", },
{},
@@ -5210,6 +5293,26 @@ static int __init its_of_probe(struct device_node *node)
struct device_node *np;
struct resource res;
+ /*
+ * Make sure *all* the ITS are reset before we probe any, as
+ * they may be sharing memory. If any of the ITS fails to
+ * reset, don't even try to go any further, as this could
+ * result in something even worse.
+ */
+ for (np = of_find_matching_node(node, its_device_id); np;
+ np = of_find_matching_node(np, its_device_id)) {
+ int err;
+
+ if (!of_device_is_available(np) ||
+ !of_property_read_bool(np, "msi-controller") ||
+ of_address_to_resource(np, 0, &res))
+ continue;
+
+ err = its_reset_one(&res);
+ if (err)
+ return err;
+ }
+
for (np = of_find_matching_node(node, its_device_id); np;
np = of_find_matching_node(np, its_device_id)) {
if (!of_device_is_available(np))
@@ -5372,17 +5475,61 @@ dom_err:
return err;
}
+static int __init its_acpi_reset(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_translator *its_entry;
+ struct resource res;
+
+ its_entry = (struct acpi_madt_generic_translator *)header;
+ res = (struct resource) {
+ .start = its_entry->base_address,
+ .end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ };
+
+ return its_reset_one(&res);
+}
+
static void __init its_acpi_probe(void)
{
acpi_table_parse_srat_its();
- acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
- gic_acpi_parse_madt_its, 0);
+ /*
+ * Make sure *all* the ITS are reset before we probe any, as
+ * they may be sharing memory. If any of the ITS fails to
+ * reset, don't even try to go any further, as this could
+ * result in something even worse.
+ */
+ if (acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
+ its_acpi_reset, 0) > 0)
+ acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
+ gic_acpi_parse_madt_its, 0);
acpi_its_srat_maps_free();
}
#else
static void __init its_acpi_probe(void) { }
#endif
+int __init its_lpi_memreserve_init(void)
+{
+ int state;
+
+ if (!efi_enabled(EFI_CONFIG_TABLES))
+ return 0;
+
+ gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
+ state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "irqchip/arm/gicv3/memreserve:online",
+ its_cpu_memreserve_lpi,
+ NULL);
+ if (state < 0)
+ return state;
+
+ gic_rdists->cpuhp_memreserve_state = state;
+
+ return 0;
+}
+
int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
struct irq_domain *parent_domain)
{
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index b84c9c2eccdc..a2163d32f17d 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -171,7 +171,6 @@ static struct irq_chip mbi_msi_irq_chip = {
.irq_unmask = mbi_unmask_msi_irq,
.irq_eoi = irq_chip_eoi_parent,
.irq_compose_msi_msg = mbi_compose_msi_msg,
- .irq_write_msi_msg = pci_msi_domain_write_msg,
};
static struct msi_domain_info mbi_msi_domain_info = {
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index daec3309b014..5e935d97207d 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -920,6 +920,22 @@ static int __gic_update_rdist_properties(struct redist_region *region,
{
u64 typer = gic_read_typer(ptr + GICR_TYPER);
+ /* Boot-time cleanip */
+ if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) {
+ u64 val;
+
+ /* Deactivate any present vPE */
+ val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER);
+ if (val & GICR_VPENDBASER_Valid)
+ gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
+ ptr + SZ_128K + GICR_VPENDBASER);
+
+ /* Mark the VPE table as invalid */
+ val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER);
+ val &= ~GICR_VPROPBASER_4_1_VALID;
+ gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER);
+ }
+
gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
/* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */
@@ -1802,6 +1818,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
if (gic_dist_supports_lpis()) {
its_init(handle, &gic_data.rdists, gic_data.domain);
its_cpu_init();
+ its_lpi_memreserve_init();
} else {
if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
gicv2m_init(handle, gic_data.domain);
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 5b5a365dbd5e..b9c22f764b4d 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -26,7 +26,7 @@ struct gpcv2_irqchip_data {
u32 cpu2wakeup;
};
-static struct gpcv2_irqchip_data *imx_gpcv2_instance;
+static struct gpcv2_irqchip_data *imx_gpcv2_instance __ro_after_init;
static void __iomem *gpcv2_idx_to_reg(struct gpcv2_irqchip_data *cd, int i)
{
diff --git a/drivers/irqchip/irq-ingenic-tcu.c b/drivers/irqchip/irq-ingenic-tcu.c
index 34a7d261b710..3363f83bd7e9 100644
--- a/drivers/irqchip/irq-ingenic-tcu.c
+++ b/drivers/irqchip/irq-ingenic-tcu.c
@@ -28,6 +28,7 @@ static void ingenic_tcu_intc_cascade(struct irq_desc *desc)
struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0);
struct regmap *map = gc->private;
uint32_t irq_reg, irq_mask;
+ unsigned long bits;
unsigned int i;
regmap_read(map, TCU_REG_TFR, &irq_reg);
@@ -36,8 +37,9 @@ static void ingenic_tcu_intc_cascade(struct irq_desc *desc)
chained_irq_enter(irq_chip, desc);
irq_reg &= ~irq_mask;
+ bits = irq_reg;
- for_each_set_bit(i, (unsigned long *)&irq_reg, 32)
+ for_each_set_bit(i, &bits, 32)
generic_handle_domain_irq(domain, i);
chained_irq_exit(irq_chip, desc);
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index 32562b7e681b..e3801c4a77ed 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -241,7 +241,7 @@ static int pch_msi_init(struct device_node *node,
return 0;
err_map:
- kfree(priv->msi_map);
+ bitmap_free(priv->msi_map);
err_priv:
kfree(priv);
return ret;
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 12df2162108e..f3faf5c99770 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -207,7 +207,7 @@ static int mbigen_irq_domain_alloc(struct irq_domain *domain,
if (err)
return err;
- err = platform_msi_domain_alloc(domain, virq, nr_irqs);
+ err = platform_msi_device_domain_alloc(domain, virq, nr_irqs);
if (err)
return err;
@@ -223,7 +223,7 @@ static int mbigen_irq_domain_alloc(struct irq_domain *domain,
static void mbigen_irq_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
- platform_msi_domain_free(domain, virq, nr_irqs);
+ platform_msi_device_domain_free(domain, virq, nr_irqs);
}
static const struct irq_domain_ops mbigen_domain_ops = {
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
index 3e7297fc5948..497da344717c 100644
--- a/drivers/irqchip/irq-mvebu-icu.c
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -221,7 +221,7 @@ mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
icu_irqd->icu_group = msi_data->subset_data->icu_group;
icu_irqd->icu = icu;
- err = platform_msi_domain_alloc(domain, virq, nr_irqs);
+ err = platform_msi_device_domain_alloc(domain, virq, nr_irqs);
if (err) {
dev_err(icu->dev, "failed to allocate ICU interrupt in parent domain\n");
goto free_irqd;
@@ -245,7 +245,7 @@ mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
return 0;
free_msi:
- platform_msi_domain_free(domain, virq, nr_irqs);
+ platform_msi_device_domain_free(domain, virq, nr_irqs);
free_irqd:
kfree(icu_irqd);
return err;
@@ -260,7 +260,7 @@ mvebu_icu_irq_domain_free(struct irq_domain *domain, unsigned int virq,
kfree(icu_irqd);
- platform_msi_domain_free(domain, virq, nr_irqs);
+ platform_msi_device_domain_free(domain, virq, nr_irqs);
}
static const struct irq_domain_ops mvebu_icu_domain_ops = {
@@ -314,12 +314,12 @@ static int mvebu_icu_subset_probe(struct platform_device *pdev)
msi_data->subset_data = of_device_get_match_data(dev);
}
- dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
+ dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
DOMAIN_BUS_PLATFORM_MSI);
- if (!dev->msi_domain)
+ if (!dev->msi.domain)
return -EPROBE_DEFER;
- msi_parent_dn = irq_domain_get_of_node(dev->msi_domain);
+ msi_parent_dn = irq_domain_get_of_node(dev->msi.domain);
if (!msi_parent_dn)
return -ENODEV;
diff --git a/drivers/irqchip/irq-realtek-rtl.c b/drivers/irqchip/irq-realtek-rtl.c
index fd9f275592d2..50a56820c99b 100644
--- a/drivers/irqchip/irq-realtek-rtl.c
+++ b/drivers/irqchip/irq-realtek-rtl.c
@@ -62,7 +62,7 @@ static struct irq_chip realtek_ictl_irq = {
static int intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
- irq_set_chip_and_handler(hw, &realtek_ictl_irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &realtek_ictl_irq, handle_level_irq);
return 0;
}
@@ -76,16 +76,20 @@ static void realtek_irq_dispatch(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irq_domain *domain;
- unsigned int pending;
+ unsigned long pending;
+ unsigned int soc_int;
chained_irq_enter(chip, desc);
pending = readl(REG(RTL_ICTL_GIMR)) & readl(REG(RTL_ICTL_GISR));
+
if (unlikely(!pending)) {
spurious_interrupt();
goto out;
}
+
domain = irq_desc_get_handler_data(desc);
- generic_handle_domain_irq(domain, __ffs(pending));
+ for_each_set_bit(soc_int, &pending, 32)
+ generic_handle_domain_irq(domain, soc_int);
out:
chained_irq_exit(chip, desc);
@@ -95,7 +99,8 @@ out:
* SoC interrupts are cascaded to MIPS CPU interrupts according to the
* interrupt-map in the device tree. Each SoC interrupt gets 4 bits for
* the CPU interrupt in an Interrupt Routing Register. Max 32 SoC interrupts
- * thus go into 4 IRRs.
+ * thus go into 4 IRRs. A routing value of '0' means the interrupt is left
+ * disconnected. Routing values {1..15} connect to output lines {0..14}.
*/
static int __init map_interrupts(struct device_node *node, struct irq_domain *domain)
{
@@ -134,7 +139,7 @@ static int __init map_interrupts(struct device_node *node, struct irq_domain *do
of_node_put(cpu_ictl);
cpu_int = be32_to_cpup(imap + 2);
- if (cpu_int > 7)
+ if (cpu_int > 7 || cpu_int < 2)
return -EINVAL;
if (!(mips_irqs_set & BIT(cpu_int))) {
@@ -143,7 +148,8 @@ static int __init map_interrupts(struct device_node *node, struct irq_domain *do
mips_irqs_set |= BIT(cpu_int);
}
- regs[(soc_int * 4) / 32] |= cpu_int << (soc_int * 4) % 32;
+ /* Use routing values (1..6) for CPU interrupts (2..7) */
+ regs[(soc_int * 4) / 32] |= (cpu_int - 1) << (soc_int * 4) % 32;
imap += 3;
}
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index cb7f60b3b4a9..37f9a4499fdb 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -375,7 +375,6 @@ static int intc_irqpin_probe(struct platform_device *pdev)
struct intc_irqpin_priv *p;
struct intc_irqpin_iomem *i;
struct resource *io[INTC_IRQPIN_REG_NR];
- struct resource *irq;
struct irq_chip *irq_chip;
void (*enable_fn)(struct irq_data *d);
void (*disable_fn)(struct irq_data *d);
@@ -418,12 +417,14 @@ static int intc_irqpin_probe(struct platform_device *pdev)
/* allow any number of IRQs between 1 and INTC_IRQPIN_MAX */
for (k = 0; k < INTC_IRQPIN_MAX; k++) {
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, k);
- if (!irq)
+ ret = platform_get_irq_optional(pdev, k);
+ if (ret == -ENXIO)
break;
+ if (ret < 0)
+ goto err0;
p->irq[k].p = p;
- p->irq[k].requested_irq = irq->start;
+ p->irq[k].requested_irq = ret;
}
nirqs = k;
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index 07a6d8b42b63..909325f88239 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -126,7 +126,6 @@ static int irqc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const char *name = dev_name(dev);
struct irqc_priv *p;
- struct resource *irq;
int ret;
int k;
@@ -142,13 +141,15 @@ static int irqc_probe(struct platform_device *pdev)
/* allow any number of IRQs between 1 and IRQC_IRQ_MAX */
for (k = 0; k < IRQC_IRQ_MAX; k++) {
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, k);
- if (!irq)
+ ret = platform_get_irq_optional(pdev, k);
+ if (ret == -ENXIO)
break;
+ if (ret < 0)
+ goto err_runtime_pm_disable;
p->irq[k].p = p;
p->irq[k].hw_irq = k;
- p->irq[k].requested_irq = irq->start;
+ p->irq[k].requested_irq = ret;
}
p->number_of_irqs = k;
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index 8eba08db33b2..5fdbb4358dd0 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -595,7 +595,7 @@ static void ti_sci_inta_msi_set_desc(msi_alloc_info_t *arg,
struct platform_device *pdev = to_platform_device(desc->dev);
arg->desc = desc;
- arg->hwirq = TO_HWIRQ(pdev->id, desc->inta.dev_index);
+ arg->hwirq = TO_HWIRQ(pdev->id, desc->msi_index);
}
static struct msi_domain_ops ti_sci_inta_msi_ops = {
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index 1518ba31a80c..7c17a6f643ef 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -149,6 +149,8 @@ static struct spear_shirq spear320_shirq_ras3 = {
.offset = 0,
.nr_irqs = 7,
.mask = ((0x1 << 7) - 1) << 0,
+ .irq_chip = &dummy_irq_chip,
+ .status_reg = SPEAR320_INT_STS_MASK_REG,
};
static struct spear_shirq spear320_shirq_ras1 = {
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 7313454e403a..e69c4bf557bf 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -32,7 +32,7 @@
#include <linux/mutex.h>
#include <linux/rcupdate.h>
-static int showcapimsgs = 0;
+static int showcapimsgs;
static struct workqueue_struct *kcapi_wq;
module_param(showcapimsgs, uint, 0);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index ed800f5da7d8..6090e647daee 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -260,13 +260,6 @@ config LEDS_NET48XX
This option enables support for the Soekris net4801 and net4826 error
LED.
-config LEDS_FSG
- tristate "LED Support for the Freecom FSG-3"
- depends on LEDS_CLASS
- depends on MACH_FSG
- help
- This option enables support for the LEDs on the Freecom FSG-3.
-
config LEDS_WRAP
tristate "LED Support for the WRAP series LEDs"
depends on LEDS_CLASS
@@ -879,4 +872,7 @@ source "drivers/leds/flash/Kconfig"
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
+comment "Simple LED drivers"
+source "drivers/leds/simple/Kconfig"
+
endif # NEW_LEDS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index c636ec069612..e58ecb36360f 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -26,7 +26,6 @@ obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
obj-$(CONFIG_LEDS_CPCAP) += leds-cpcap.o
obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
obj-$(CONFIG_LEDS_DA9052) += leds-da9052.o
-obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
obj-$(CONFIG_LEDS_GPIO_REGISTER) += leds-gpio-register.o
obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
@@ -105,3 +104,6 @@ obj-$(CONFIG_LEDS_TRIGGERS) += trigger/
# LED Blink
obj-y += blink/
+
+# Simple LED drivers
+obj-y += simple/
diff --git a/drivers/leds/blink/leds-lgm-sso.c b/drivers/leds/blink/leds-lgm-sso.c
index fd8b7573285a..6f270c0272fb 100644
--- a/drivers/leds/blink/leds-lgm-sso.c
+++ b/drivers/leds/blink/leds-lgm-sso.c
@@ -477,7 +477,6 @@ static int sso_gpio_gc_init(struct device *dev, struct sso_led_priv *priv)
gc->ngpio = priv->gpio.pins;
gc->parent = dev;
gc->owner = THIS_MODULE;
- gc->of_node = dev->of_node;
return devm_gpiochip_add_data(dev, gc, priv);
}
diff --git a/drivers/leds/flash/Kconfig b/drivers/leds/flash/Kconfig
index b230f3d65eb0..d3eb689b193c 100644
--- a/drivers/leds/flash/Kconfig
+++ b/drivers/leds/flash/Kconfig
@@ -48,6 +48,19 @@ config LEDS_MAX77693
multifunction device. It has build in control for two leds in flash
and torch mode.
+config LEDS_MT6360
+ tristate "LED Support for Mediatek MT6360 PMIC"
+ depends on LEDS_CLASS && OF
+ depends on LEDS_CLASS_FLASH || !LEDS_CLASS_FLASH
+ depends on LEDS_CLASS_MULTICOLOR || !LEDS_CLASS_MULTICOLOR
+ depends on V4L2_FLASH_LED_CLASS || !V4L2_FLASH_LED_CLASS
+ depends on MFD_MT6360
+ help
+ This option enables support for dual Flash LED drivers found on
+ Mediatek MT6360 PMIC.
+ Independent current sources supply for each flash LED support torch
+ and strobe mode.
+
config LEDS_RT4505
tristate "LED support for RT4505 flashlight controller"
depends on I2C && OF
diff --git a/drivers/leds/flash/Makefile b/drivers/leds/flash/Makefile
index ebea42f9c37e..0acbddc0b91b 100644
--- a/drivers/leds/flash/Makefile
+++ b/drivers/leds/flash/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_LEDS_MT6360) += leds-mt6360.o
obj-$(CONFIG_LEDS_AAT1290) += leds-aat1290.o
obj-$(CONFIG_LEDS_AS3645A) += leds-as3645a.o
obj-$(CONFIG_LEDS_KTD2692) += leds-ktd2692.o
diff --git a/drivers/leds/flash/leds-ktd2692.c b/drivers/leds/flash/leds-ktd2692.c
index f341da1503a4..ed1f20a58bf6 100644
--- a/drivers/leds/flash/leds-ktd2692.c
+++ b/drivers/leds/flash/leds-ktd2692.c
@@ -274,7 +274,7 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
struct device_node *child_node;
int ret;
- if (!dev_of_node(dev))
+ if (!np)
return -ENXIO;
led->ctrl_gpio = devm_gpiod_get(dev, "ctrl", GPIOD_ASIS);
diff --git a/drivers/leds/flash/leds-mt6360.c b/drivers/leds/flash/leds-mt6360.c
new file mode 100644
index 000000000000..e1066a52d2d2
--- /dev/null
+++ b/drivers/leds/flash/leds-mt6360.c
@@ -0,0 +1,910 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/led-class-flash.h>
+#include <linux/led-class-multicolor.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <media/v4l2-flash-led-class.h>
+
+enum {
+ MT6360_LED_ISNK1 = 0,
+ MT6360_LED_ISNK2,
+ MT6360_LED_ISNK3,
+ MT6360_LED_ISNKML,
+ MT6360_LED_FLASH1,
+ MT6360_LED_FLASH2,
+ MT6360_MAX_LEDS
+};
+
+#define MT6360_REG_RGBEN 0x380
+#define MT6360_REG_ISNK(_led_no) (0x381 + (_led_no))
+#define MT6360_ISNK_ENMASK(_led_no) BIT(7 - (_led_no))
+#define MT6360_ISNK_MASK GENMASK(4, 0)
+#define MT6360_CHRINDSEL_MASK BIT(3)
+
+/* Virtual definition for multicolor */
+#define MT6360_VIRTUAL_MULTICOLOR (MT6360_MAX_LEDS + 1)
+#define MULTICOLOR_NUM_CHANNELS 3
+
+#define MT6360_REG_FLEDEN 0x37E
+#define MT6360_REG_STRBTO 0x373
+#define MT6360_REG_FLEDBASE(_id) (0x372 + 4 * (_id - MT6360_LED_FLASH1))
+#define MT6360_REG_FLEDISTRB(_id) (MT6360_REG_FLEDBASE(_id) + 2)
+#define MT6360_REG_FLEDITOR(_id) (MT6360_REG_FLEDBASE(_id) + 3)
+#define MT6360_REG_CHGSTAT2 0x3E1
+#define MT6360_REG_FLEDSTAT1 0x3E9
+#define MT6360_ITORCH_MASK GENMASK(4, 0)
+#define MT6360_ISTROBE_MASK GENMASK(6, 0)
+#define MT6360_STRBTO_MASK GENMASK(6, 0)
+#define MT6360_TORCHEN_MASK BIT(3)
+#define MT6360_STROBEN_MASK BIT(2)
+#define MT6360_FLCSEN_MASK(_id) BIT(MT6360_LED_FLASH2 - _id)
+#define MT6360_FLEDCHGVINOVP_MASK BIT(3)
+#define MT6360_FLED1STRBTO_MASK BIT(11)
+#define MT6360_FLED2STRBTO_MASK BIT(10)
+#define MT6360_FLED1STRB_MASK BIT(9)
+#define MT6360_FLED2STRB_MASK BIT(8)
+#define MT6360_FLED1SHORT_MASK BIT(7)
+#define MT6360_FLED2SHORT_MASK BIT(6)
+#define MT6360_FLEDLVF_MASK BIT(3)
+
+#define MT6360_ISNKRGB_STEPUA 2000
+#define MT6360_ISNKRGB_MAXUA 24000
+#define MT6360_ISNKML_STEPUA 5000
+#define MT6360_ISNKML_MAXUA 150000
+
+#define MT6360_ITORCH_MINUA 25000
+#define MT6360_ITORCH_STEPUA 12500
+#define MT6360_ITORCH_MAXUA 400000
+#define MT6360_ISTRB_MINUA 50000
+#define MT6360_ISTRB_STEPUA 12500
+#define MT6360_ISTRB_MAXUA 1500000
+#define MT6360_STRBTO_MINUS 64000
+#define MT6360_STRBTO_STEPUS 32000
+#define MT6360_STRBTO_MAXUS 2432000
+
+#define STATE_OFF 0
+#define STATE_KEEP 1
+#define STATE_ON 2
+
+struct mt6360_led {
+ union {
+ struct led_classdev isnk;
+ struct led_classdev_mc mc;
+ struct led_classdev_flash flash;
+ };
+ struct v4l2_flash *v4l2_flash;
+ struct mt6360_priv *priv;
+ u32 led_no;
+ u32 default_state;
+};
+
+struct mt6360_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex lock;
+ unsigned int fled_strobe_used;
+ unsigned int fled_torch_used;
+ unsigned int leds_active;
+ unsigned int leds_count;
+ struct mt6360_led leds[];
+};
+
+static int mt6360_mc_brightness_set(struct led_classdev *lcdev,
+ enum led_brightness level)
+{
+ struct led_classdev_mc *mccdev = lcdev_to_mccdev(lcdev);
+ struct mt6360_led *led = container_of(mccdev, struct mt6360_led, mc);
+ struct mt6360_priv *priv = led->priv;
+ u32 real_bright, enable_mask = 0, enable = 0;
+ int i, ret;
+
+ mutex_lock(&priv->lock);
+
+ led_mc_calc_color_components(mccdev, level);
+
+ for (i = 0; i < mccdev->num_colors; i++) {
+ struct mc_subled *subled = mccdev->subled_info + i;
+
+ real_bright = min(lcdev->max_brightness, subled->brightness);
+ ret = regmap_update_bits(priv->regmap, MT6360_REG_ISNK(i),
+ MT6360_ISNK_MASK, real_bright);
+ if (ret)
+ goto out;
+
+ enable_mask |= MT6360_ISNK_ENMASK(subled->channel);
+ if (real_bright)
+ enable |= MT6360_ISNK_ENMASK(subled->channel);
+ }
+
+ ret = regmap_update_bits(priv->regmap, MT6360_REG_RGBEN, enable_mask,
+ enable);
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int mt6360_isnk_brightness_set(struct led_classdev *lcdev,
+ enum led_brightness level)
+{
+ struct mt6360_led *led = container_of(lcdev, struct mt6360_led, isnk);
+ struct mt6360_priv *priv = led->priv;
+ u32 enable_mask = MT6360_ISNK_ENMASK(led->led_no);
+ u32 val = level ? MT6360_ISNK_ENMASK(led->led_no) : 0;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ ret = regmap_update_bits(priv->regmap, MT6360_REG_ISNK(led->led_no),
+ MT6360_ISNK_MASK, level);
+ if (ret)
+ goto out;
+
+ ret = regmap_update_bits(priv->regmap, MT6360_REG_RGBEN, enable_mask,
+ val);
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int mt6360_torch_brightness_set(struct led_classdev *lcdev,
+ enum led_brightness level)
+{
+ struct mt6360_led *led =
+ container_of(lcdev, struct mt6360_led, flash.led_cdev);
+ struct mt6360_priv *priv = led->priv;
+ u32 enable_mask = MT6360_TORCHEN_MASK | MT6360_FLCSEN_MASK(led->led_no);
+ u32 val = level ? MT6360_FLCSEN_MASK(led->led_no) : 0;
+ u32 prev = priv->fled_torch_used, curr;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ /*
+ * Only one set of flash control logic, use the flag to avoid strobe is
+ * currently used.
+ */
+ if (priv->fled_strobe_used) {
+ dev_warn(lcdev->dev, "Please disable strobe first [%d]\n",
+ priv->fled_strobe_used);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (level)
+ curr = prev | BIT(led->led_no);
+ else
+ curr = prev & ~BIT(led->led_no);
+
+ if (curr)
+ val |= MT6360_TORCHEN_MASK;
+
+ if (level) {
+ ret = regmap_update_bits(priv->regmap,
+ MT6360_REG_FLEDITOR(led->led_no),
+ MT6360_ITORCH_MASK, level - 1);
+ if (ret)
+ goto unlock;
+ }
+
+ ret = regmap_update_bits(priv->regmap, MT6360_REG_FLEDEN, enable_mask,
+ val);
+ if (ret)
+ goto unlock;
+
+ priv->fled_torch_used = curr;
+
+unlock:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int mt6360_flash_brightness_set(struct led_classdev_flash *fl_cdev,
+ u32 brightness)
+{
+ /*
+ * Due to the current spike when turning on flash, let brightness to be
+ * kept by framework.
+ * This empty function is used to prevent led_classdev_flash register
+ * ops check failure.
+ */
+ return 0;
+}
+
+static int _mt6360_flash_brightness_set(struct led_classdev_flash *fl_cdev,
+ u32 brightness)
+{
+ struct mt6360_led *led =
+ container_of(fl_cdev, struct mt6360_led, flash);
+ struct mt6360_priv *priv = led->priv;
+ struct led_flash_setting *s = &fl_cdev->brightness;
+ u32 val = (brightness - s->min) / s->step;
+
+ return regmap_update_bits(priv->regmap,
+ MT6360_REG_FLEDISTRB(led->led_no),
+ MT6360_ISTROBE_MASK, val);
+}
+
+static int mt6360_strobe_set(struct led_classdev_flash *fl_cdev, bool state)
+{
+ struct mt6360_led *led =
+ container_of(fl_cdev, struct mt6360_led, flash);
+ struct mt6360_priv *priv = led->priv;
+ struct led_classdev *lcdev = &fl_cdev->led_cdev;
+ struct led_flash_setting *s = &fl_cdev->brightness;
+ u32 enable_mask = MT6360_STROBEN_MASK | MT6360_FLCSEN_MASK(led->led_no);
+ u32 val = state ? MT6360_FLCSEN_MASK(led->led_no) : 0;
+ u32 prev = priv->fled_strobe_used, curr;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ /*
+ * Only one set of flash control logic, use the flag to avoid torch is
+ * currently used
+ */
+ if (priv->fled_torch_used) {
+ dev_warn(lcdev->dev, "Please disable torch first [0x%x]\n",
+ priv->fled_torch_used);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (state)
+ curr = prev | BIT(led->led_no);
+ else
+ curr = prev & ~BIT(led->led_no);
+
+ if (curr)
+ val |= MT6360_STROBEN_MASK;
+
+ ret = regmap_update_bits(priv->regmap, MT6360_REG_FLEDEN, enable_mask,
+ val);
+ if (ret) {
+ dev_err(lcdev->dev, "[%d] control current source %d fail\n",
+ led->led_no, state);
+ goto unlock;
+ }
+
+ /*
+ * If the flash need to be on, config the flash current ramping up to
+ * the setting value.
+ * Else, always recover back to the minimum one
+ */
+ ret = _mt6360_flash_brightness_set(fl_cdev, state ? s->val : s->min);
+ if (ret)
+ goto unlock;
+
+ /*
+ * For the flash turn on/off, HW rampping up/down time is 5ms/500us,
+ * respectively.
+ */
+ if (!prev && curr)
+ usleep_range(5000, 6000);
+ else if (prev && !curr)
+ udelay(500);
+
+ priv->fled_strobe_used = curr;
+
+unlock:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int mt6360_strobe_get(struct led_classdev_flash *fl_cdev, bool *state)
+{
+ struct mt6360_led *led =
+ container_of(fl_cdev, struct mt6360_led, flash);
+ struct mt6360_priv *priv = led->priv;
+
+ mutex_lock(&priv->lock);
+ *state = !!(priv->fled_strobe_used & BIT(led->led_no));
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int mt6360_timeout_set(struct led_classdev_flash *fl_cdev, u32 timeout)
+{
+ struct mt6360_led *led =
+ container_of(fl_cdev, struct mt6360_led, flash);
+ struct mt6360_priv *priv = led->priv;
+ struct led_flash_setting *s = &fl_cdev->timeout;
+ u32 val = (timeout - s->min) / s->step;
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = regmap_update_bits(priv->regmap, MT6360_REG_STRBTO,
+ MT6360_STRBTO_MASK, val);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int mt6360_fault_get(struct led_classdev_flash *fl_cdev, u32 *fault)
+{
+ struct mt6360_led *led =
+ container_of(fl_cdev, struct mt6360_led, flash);
+ struct mt6360_priv *priv = led->priv;
+ u16 fled_stat;
+ unsigned int chg_stat, strobe_timeout_mask, fled_short_mask;
+ u32 rfault = 0;
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = regmap_read(priv->regmap, MT6360_REG_CHGSTAT2, &chg_stat);
+ if (ret)
+ goto unlock;
+
+ ret = regmap_raw_read(priv->regmap, MT6360_REG_FLEDSTAT1, &fled_stat,
+ sizeof(fled_stat));
+ if (ret)
+ goto unlock;
+
+ if (led->led_no == MT6360_LED_FLASH1) {
+ strobe_timeout_mask = MT6360_FLED1STRBTO_MASK;
+ fled_short_mask = MT6360_FLED1SHORT_MASK;
+ } else {
+ strobe_timeout_mask = MT6360_FLED2STRBTO_MASK;
+ fled_short_mask = MT6360_FLED2SHORT_MASK;
+ }
+
+ if (chg_stat & MT6360_FLEDCHGVINOVP_MASK)
+ rfault |= LED_FAULT_INPUT_VOLTAGE;
+
+ if (fled_stat & strobe_timeout_mask)
+ rfault |= LED_FAULT_TIMEOUT;
+
+ if (fled_stat & fled_short_mask)
+ rfault |= LED_FAULT_SHORT_CIRCUIT;
+
+ if (fled_stat & MT6360_FLEDLVF_MASK)
+ rfault |= LED_FAULT_UNDER_VOLTAGE;
+
+ *fault = rfault;
+unlock:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static const struct led_flash_ops mt6360_flash_ops = {
+ .flash_brightness_set = mt6360_flash_brightness_set,
+ .strobe_set = mt6360_strobe_set,
+ .strobe_get = mt6360_strobe_get,
+ .timeout_set = mt6360_timeout_set,
+ .fault_get = mt6360_fault_get,
+};
+
+static int mt6360_isnk_init_default_state(struct mt6360_led *led)
+{
+ struct mt6360_priv *priv = led->priv;
+ unsigned int regval;
+ u32 level;
+ int ret;
+
+ ret = regmap_read(priv->regmap, MT6360_REG_ISNK(led->led_no), &regval);
+ if (ret)
+ return ret;
+ level = regval & MT6360_ISNK_MASK;
+
+ ret = regmap_read(priv->regmap, MT6360_REG_RGBEN, &regval);
+ if (ret)
+ return ret;
+
+ if (!(regval & MT6360_ISNK_ENMASK(led->led_no)))
+ level = LED_OFF;
+
+ switch (led->default_state) {
+ case STATE_ON:
+ led->isnk.brightness = led->isnk.max_brightness;
+ break;
+ case STATE_KEEP:
+ led->isnk.brightness = min(level, led->isnk.max_brightness);
+ break;
+ default:
+ led->isnk.brightness = LED_OFF;
+ }
+
+ return mt6360_isnk_brightness_set(&led->isnk, led->isnk.brightness);
+}
+
+static int mt6360_flash_init_default_state(struct mt6360_led *led)
+{
+ struct led_classdev_flash *flash = &led->flash;
+ struct mt6360_priv *priv = led->priv;
+ u32 enable_mask = MT6360_TORCHEN_MASK | MT6360_FLCSEN_MASK(led->led_no);
+ u32 level;
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(priv->regmap, MT6360_REG_FLEDITOR(led->led_no),
+ &regval);
+ if (ret)
+ return ret;
+ level = regval & MT6360_ITORCH_MASK;
+
+ ret = regmap_read(priv->regmap, MT6360_REG_FLEDEN, &regval);
+ if (ret)
+ return ret;
+
+ if ((regval & enable_mask) == enable_mask)
+ level += 1;
+ else
+ level = LED_OFF;
+
+ switch (led->default_state) {
+ case STATE_ON:
+ flash->led_cdev.brightness = flash->led_cdev.max_brightness;
+ break;
+ case STATE_KEEP:
+ flash->led_cdev.brightness =
+ min(level, flash->led_cdev.max_brightness);
+ break;
+ default:
+ flash->led_cdev.brightness = LED_OFF;
+ }
+
+ return mt6360_torch_brightness_set(&flash->led_cdev,
+ flash->led_cdev.brightness);
+}
+
+#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
+static int mt6360_flash_external_strobe_set(struct v4l2_flash *v4l2_flash,
+ bool enable)
+{
+ struct led_classdev_flash *flash = v4l2_flash->fled_cdev;
+ struct mt6360_led *led = container_of(flash, struct mt6360_led, flash);
+ struct mt6360_priv *priv = led->priv;
+ u32 mask = MT6360_FLCSEN_MASK(led->led_no);
+ u32 val = enable ? mask : 0;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ ret = regmap_update_bits(priv->regmap, MT6360_REG_FLEDEN, mask, val);
+ if (ret)
+ goto unlock;
+
+ if (enable)
+ priv->fled_strobe_used |= BIT(led->led_no);
+ else
+ priv->fled_strobe_used &= ~BIT(led->led_no);
+
+unlock:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static const struct v4l2_flash_ops v4l2_flash_ops = {
+ .external_strobe_set = mt6360_flash_external_strobe_set,
+};
+
+static void mt6360_init_v4l2_flash_config(struct mt6360_led *led,
+ struct v4l2_flash_config *config)
+{
+ struct led_classdev *lcdev;
+ struct led_flash_setting *s = &config->intensity;
+
+ lcdev = &led->flash.led_cdev;
+
+ s->min = MT6360_ITORCH_MINUA;
+ s->step = MT6360_ITORCH_STEPUA;
+ s->val = s->max = s->min + (lcdev->max_brightness - 1) * s->step;
+
+ config->has_external_strobe = 1;
+ strscpy(config->dev_name, lcdev->dev->kobj.name,
+ sizeof(config->dev_name));
+
+ config->flash_faults = LED_FAULT_SHORT_CIRCUIT | LED_FAULT_TIMEOUT |
+ LED_FAULT_INPUT_VOLTAGE |
+ LED_FAULT_UNDER_VOLTAGE;
+}
+#else
+static const struct v4l2_flash_ops v4l2_flash_ops;
+static void mt6360_init_v4l2_flash_config(struct mt6360_led *led,
+ struct v4l2_flash_config *config)
+{
+}
+#endif
+
+static int mt6360_led_register(struct device *parent, struct mt6360_led *led,
+ struct led_init_data *init_data)
+{
+ struct mt6360_priv *priv = led->priv;
+ struct v4l2_flash_config v4l2_config = {0};
+ int ret;
+
+ if ((led->led_no == MT6360_LED_ISNK1 ||
+ led->led_no == MT6360_VIRTUAL_MULTICOLOR) &&
+ (priv->leds_active & BIT(MT6360_LED_ISNK1))) {
+ /*
+ * Change isink1 to SW control mode, disconnect it with
+ * charger state
+ */
+ ret = regmap_update_bits(priv->regmap, MT6360_REG_RGBEN,
+ MT6360_CHRINDSEL_MASK,
+ MT6360_CHRINDSEL_MASK);
+ if (ret) {
+ dev_err(parent, "Failed to config ISNK1 to SW mode\n");
+ return ret;
+ }
+ }
+
+ switch (led->led_no) {
+ case MT6360_VIRTUAL_MULTICOLOR:
+ ret = mt6360_mc_brightness_set(&led->mc.led_cdev, LED_OFF);
+ if (ret) {
+ dev_err(parent,
+ "Failed to init multicolor brightness\n");
+ return ret;
+ }
+
+ ret = devm_led_classdev_multicolor_register_ext(parent,
+ &led->mc, init_data);
+ if (ret) {
+ dev_err(parent, "Couldn't register multicolor\n");
+ return ret;
+ }
+ break;
+ case MT6360_LED_ISNK1 ... MT6360_LED_ISNKML:
+ ret = mt6360_isnk_init_default_state(led);
+ if (ret) {
+ dev_err(parent, "Failed to init %d isnk state\n",
+ led->led_no);
+ return ret;
+ }
+
+ ret = devm_led_classdev_register_ext(parent, &led->isnk,
+ init_data);
+ if (ret) {
+ dev_err(parent, "Couldn't register isink %d\n",
+ led->led_no);
+ return ret;
+ }
+ break;
+ default:
+ ret = mt6360_flash_init_default_state(led);
+ if (ret) {
+ dev_err(parent, "Failed to init %d flash state\n",
+ led->led_no);
+ return ret;
+ }
+
+ ret = devm_led_classdev_flash_register_ext(parent, &led->flash,
+ init_data);
+ if (ret) {
+ dev_err(parent, "Couldn't register flash %d\n",
+ led->led_no);
+ return ret;
+ }
+
+ mt6360_init_v4l2_flash_config(led, &v4l2_config);
+ led->v4l2_flash = v4l2_flash_init(parent, init_data->fwnode,
+ &led->flash,
+ &v4l2_flash_ops,
+ &v4l2_config);
+ if (IS_ERR(led->v4l2_flash)) {
+ dev_err(parent, "Failed to register %d v4l2 sd\n",
+ led->led_no);
+ return PTR_ERR(led->v4l2_flash);
+ }
+ }
+
+ return 0;
+}
+
+static u32 clamp_align(u32 val, u32 min, u32 max, u32 step)
+{
+ u32 retval;
+
+ retval = clamp_val(val, min, max);
+ if (step > 1)
+ retval = rounddown(retval - min, step) + min;
+
+ return retval;
+}
+
+static int mt6360_init_isnk_properties(struct mt6360_led *led,
+ struct led_init_data *init_data)
+{
+ struct led_classdev *lcdev;
+ struct mt6360_priv *priv = led->priv;
+ struct fwnode_handle *child;
+ u32 step_uA = MT6360_ISNKRGB_STEPUA, max_uA = MT6360_ISNKRGB_MAXUA;
+ u32 val;
+ int num_color = 0, ret;
+
+ if (led->led_no == MT6360_VIRTUAL_MULTICOLOR) {
+ struct mc_subled *sub_led;
+
+ sub_led = devm_kzalloc(priv->dev,
+ sizeof(*sub_led) * MULTICOLOR_NUM_CHANNELS, GFP_KERNEL);
+ if (!sub_led)
+ return -ENOMEM;
+
+ fwnode_for_each_child_node(init_data->fwnode, child) {
+ u32 reg, color;
+
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret || reg > MT6360_LED_ISNK3 ||
+ priv->leds_active & BIT(reg))
+ return -EINVAL;
+
+ ret = fwnode_property_read_u32(child, "color", &color);
+ if (ret) {
+ dev_err(priv->dev,
+ "led %d, no color specified\n",
+ led->led_no);
+ return ret;
+ }
+
+ priv->leds_active |= BIT(reg);
+ sub_led[num_color].color_index = color;
+ sub_led[num_color].channel = reg;
+ num_color++;
+ }
+
+ if (num_color < 2) {
+ dev_err(priv->dev,
+ "Multicolor must include 2 or more led channel\n");
+ return -EINVAL;
+ }
+
+ led->mc.num_colors = num_color;
+ led->mc.subled_info = sub_led;
+
+ lcdev = &led->mc.led_cdev;
+ lcdev->brightness_set_blocking = mt6360_mc_brightness_set;
+ } else {
+ if (led->led_no == MT6360_LED_ISNKML) {
+ step_uA = MT6360_ISNKML_STEPUA;
+ max_uA = MT6360_ISNKML_MAXUA;
+ }
+
+ lcdev = &led->isnk;
+ lcdev->brightness_set_blocking = mt6360_isnk_brightness_set;
+ }
+
+ ret = fwnode_property_read_u32(init_data->fwnode, "led-max-microamp",
+ &val);
+ if (ret) {
+ dev_warn(priv->dev,
+ "Not specified led-max-microamp, config to the minimum\n");
+ val = step_uA;
+ } else
+ val = clamp_align(val, 0, max_uA, step_uA);
+
+ lcdev->max_brightness = val / step_uA;
+
+ fwnode_property_read_string(init_data->fwnode, "linux,default-trigger",
+ &lcdev->default_trigger);
+
+ return 0;
+}
+
+static int mt6360_init_flash_properties(struct mt6360_led *led,
+ struct led_init_data *init_data)
+{
+ struct led_classdev_flash *flash = &led->flash;
+ struct led_classdev *lcdev = &flash->led_cdev;
+ struct mt6360_priv *priv = led->priv;
+ struct led_flash_setting *s;
+ u32 val;
+ int ret;
+
+ ret = fwnode_property_read_u32(init_data->fwnode, "led-max-microamp",
+ &val);
+ if (ret) {
+ dev_warn(priv->dev,
+ "Not specified led-max-microamp, config to the minimum\n");
+ val = MT6360_ITORCH_MINUA;
+ } else
+ val = clamp_align(val, MT6360_ITORCH_MINUA, MT6360_ITORCH_MAXUA,
+ MT6360_ITORCH_STEPUA);
+
+ lcdev->max_brightness =
+ (val - MT6360_ITORCH_MINUA) / MT6360_ITORCH_STEPUA + 1;
+ lcdev->brightness_set_blocking = mt6360_torch_brightness_set;
+ lcdev->flags |= LED_DEV_CAP_FLASH;
+
+ ret = fwnode_property_read_u32(init_data->fwnode, "flash-max-microamp",
+ &val);
+ if (ret) {
+ dev_warn(priv->dev,
+ "Not specified flash-max-microamp, config to the minimum\n");
+ val = MT6360_ISTRB_MINUA;
+ } else
+ val = clamp_align(val, MT6360_ISTRB_MINUA, MT6360_ISTRB_MAXUA,
+ MT6360_ISTRB_STEPUA);
+
+ s = &flash->brightness;
+ s->min = MT6360_ISTRB_MINUA;
+ s->step = MT6360_ISTRB_STEPUA;
+ s->val = s->max = val;
+
+ /*
+ * Always configure as min level when off to prevent flash current
+ * spike.
+ */
+ ret = _mt6360_flash_brightness_set(flash, s->min);
+ if (ret)
+ return ret;
+
+ ret = fwnode_property_read_u32(init_data->fwnode,
+ "flash-max-timeout-us", &val);
+ if (ret) {
+ dev_warn(priv->dev,
+ "Not specified flash-max-timeout-us, config to the minimum\n");
+ val = MT6360_STRBTO_MINUS;
+ } else
+ val = clamp_align(val, MT6360_STRBTO_MINUS, MT6360_STRBTO_MAXUS,
+ MT6360_STRBTO_STEPUS);
+
+ s = &flash->timeout;
+ s->min = MT6360_STRBTO_MINUS;
+ s->step = MT6360_STRBTO_STEPUS;
+ s->val = s->max = val;
+
+ flash->ops = &mt6360_flash_ops;
+
+ return 0;
+}
+
+static int mt6360_init_common_properties(struct mt6360_led *led,
+ struct led_init_data *init_data)
+{
+ const char *const states[] = { "off", "keep", "on" };
+ const char *str;
+ int ret;
+
+ if (!fwnode_property_read_string(init_data->fwnode,
+ "default-state", &str)) {
+ ret = match_string(states, ARRAY_SIZE(states), str);
+ if (ret < 0)
+ ret = STATE_OFF;
+
+ led->default_state = ret;
+ }
+
+ return 0;
+}
+
+static void mt6360_v4l2_flash_release(struct mt6360_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->leds_count; i++) {
+ struct mt6360_led *led = priv->leds + i;
+
+ if (led->v4l2_flash)
+ v4l2_flash_release(led->v4l2_flash);
+ }
+}
+
+static int mt6360_led_probe(struct platform_device *pdev)
+{
+ struct mt6360_priv *priv;
+ struct fwnode_handle *child;
+ size_t count;
+ int i = 0, ret;
+
+ count = device_get_child_node_count(&pdev->dev);
+ if (!count || count > MT6360_MAX_LEDS) {
+ dev_err(&pdev->dev,
+ "No child node or node count over max led number %zu\n",
+ count);
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(&pdev->dev,
+ struct_size(priv, leds, count), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->leds_count = count;
+ priv->dev = &pdev->dev;
+ mutex_init(&priv->lock);
+
+ priv->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!priv->regmap) {
+ dev_err(&pdev->dev, "Failed to get parent regmap\n");
+ return -ENODEV;
+ }
+
+ device_for_each_child_node(&pdev->dev, child) {
+ struct mt6360_led *led = priv->leds + i;
+ struct led_init_data init_data = { .fwnode = child, };
+ u32 reg, led_color;
+
+ ret = fwnode_property_read_u32(child, "color", &led_color);
+ if (ret)
+ goto out_flash_release;
+
+ if (led_color == LED_COLOR_ID_RGB ||
+ led_color == LED_COLOR_ID_MULTI)
+ reg = MT6360_VIRTUAL_MULTICOLOR;
+ else {
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret)
+ goto out_flash_release;
+
+ if (reg >= MT6360_MAX_LEDS) {
+ ret = -EINVAL;
+ goto out_flash_release;
+ }
+ }
+
+ if (priv->leds_active & BIT(reg)) {
+ ret = -EINVAL;
+ goto out_flash_release;
+ }
+ priv->leds_active |= BIT(reg);
+
+ led->led_no = reg;
+ led->priv = priv;
+
+ ret = mt6360_init_common_properties(led, &init_data);
+ if (ret)
+ goto out_flash_release;
+
+ if (reg == MT6360_VIRTUAL_MULTICOLOR ||
+ reg <= MT6360_LED_ISNKML)
+ ret = mt6360_init_isnk_properties(led, &init_data);
+ else
+ ret = mt6360_init_flash_properties(led, &init_data);
+
+ if (ret)
+ goto out_flash_release;
+
+ ret = mt6360_led_register(&pdev->dev, led, &init_data);
+ if (ret)
+ goto out_flash_release;
+
+ i++;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ return 0;
+
+out_flash_release:
+ mt6360_v4l2_flash_release(priv);
+ return ret;
+}
+
+static int mt6360_led_remove(struct platform_device *pdev)
+{
+ struct mt6360_priv *priv = platform_get_drvdata(pdev);
+
+ mt6360_v4l2_flash_release(priv);
+ return 0;
+}
+
+static const struct of_device_id __maybe_unused mt6360_led_of_id[] = {
+ { .compatible = "mediatek,mt6360-led", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mt6360_led_of_id);
+
+static struct platform_driver mt6360_led_driver = {
+ .driver = {
+ .name = "mt6360-led",
+ .of_match_table = mt6360_led_of_id,
+ },
+ .probe = mt6360_led_probe,
+ .remove = mt6360_led_remove,
+};
+module_platform_driver(mt6360_led_driver);
+
+MODULE_AUTHOR("Gene Chen <gene_chen@richtek.com>");
+MODULE_DESCRIPTION("MT6360 LED Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index f4bb02f6e042..6a8ea94834fa 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -375,10 +375,8 @@ int led_classdev_register_ext(struct device *parent,
mutex_unlock(&led_cdev->led_access);
return PTR_ERR(led_cdev->dev);
}
- if (init_data && init_data->fwnode) {
- led_cdev->dev->fwnode = init_data->fwnode;
- led_cdev->dev->of_node = to_of_node(init_data->fwnode);
- }
+ if (init_data && init_data->fwnode)
+ device_set_node(led_cdev->dev, init_data->fwnode);
if (ret)
dev_warn(parent, "Led %s renamed to %s due to name collision",
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c
deleted file mode 100644
index bc6b420637d6..000000000000
--- a/drivers/leds/leds-fsg.c
+++ /dev/null
@@ -1,193 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * LED Driver for the Freecom FSG-3
- *
- * Copyright (c) 2008 Rod Whitby <rod@whitby.id.au>
- *
- * Author: Rod Whitby <rod@whitby.id.au>
- *
- * Based on leds-spitz.c
- * Copyright 2005-2006 Openedhand Ltd.
- * Author: Richard Purdie <rpurdie@openedhand.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <mach/hardware.h>
-
-#define FSG_LED_WLAN_BIT 0
-#define FSG_LED_WAN_BIT 1
-#define FSG_LED_SATA_BIT 2
-#define FSG_LED_USB_BIT 4
-#define FSG_LED_RING_BIT 5
-#define FSG_LED_SYNC_BIT 7
-
-static short __iomem *latch_address;
-static unsigned short latch_value;
-
-
-static void fsg_led_wlan_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- if (value) {
- latch_value &= ~(1 << FSG_LED_WLAN_BIT);
- *latch_address = latch_value;
- } else {
- latch_value |= (1 << FSG_LED_WLAN_BIT);
- *latch_address = latch_value;
- }
-}
-
-static void fsg_led_wan_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- if (value) {
- latch_value &= ~(1 << FSG_LED_WAN_BIT);
- *latch_address = latch_value;
- } else {
- latch_value |= (1 << FSG_LED_WAN_BIT);
- *latch_address = latch_value;
- }
-}
-
-static void fsg_led_sata_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- if (value) {
- latch_value &= ~(1 << FSG_LED_SATA_BIT);
- *latch_address = latch_value;
- } else {
- latch_value |= (1 << FSG_LED_SATA_BIT);
- *latch_address = latch_value;
- }
-}
-
-static void fsg_led_usb_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- if (value) {
- latch_value &= ~(1 << FSG_LED_USB_BIT);
- *latch_address = latch_value;
- } else {
- latch_value |= (1 << FSG_LED_USB_BIT);
- *latch_address = latch_value;
- }
-}
-
-static void fsg_led_sync_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- if (value) {
- latch_value &= ~(1 << FSG_LED_SYNC_BIT);
- *latch_address = latch_value;
- } else {
- latch_value |= (1 << FSG_LED_SYNC_BIT);
- *latch_address = latch_value;
- }
-}
-
-static void fsg_led_ring_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- if (value) {
- latch_value &= ~(1 << FSG_LED_RING_BIT);
- *latch_address = latch_value;
- } else {
- latch_value |= (1 << FSG_LED_RING_BIT);
- *latch_address = latch_value;
- }
-}
-
-
-static struct led_classdev fsg_wlan_led = {
- .name = "fsg:blue:wlan",
- .brightness_set = fsg_led_wlan_set,
- .flags = LED_CORE_SUSPENDRESUME,
-};
-
-static struct led_classdev fsg_wan_led = {
- .name = "fsg:blue:wan",
- .brightness_set = fsg_led_wan_set,
- .flags = LED_CORE_SUSPENDRESUME,
-};
-
-static struct led_classdev fsg_sata_led = {
- .name = "fsg:blue:sata",
- .brightness_set = fsg_led_sata_set,
- .flags = LED_CORE_SUSPENDRESUME,
-};
-
-static struct led_classdev fsg_usb_led = {
- .name = "fsg:blue:usb",
- .brightness_set = fsg_led_usb_set,
- .flags = LED_CORE_SUSPENDRESUME,
-};
-
-static struct led_classdev fsg_sync_led = {
- .name = "fsg:blue:sync",
- .brightness_set = fsg_led_sync_set,
- .flags = LED_CORE_SUSPENDRESUME,
-};
-
-static struct led_classdev fsg_ring_led = {
- .name = "fsg:blue:ring",
- .brightness_set = fsg_led_ring_set,
- .flags = LED_CORE_SUSPENDRESUME,
-};
-
-
-static int fsg_led_probe(struct platform_device *pdev)
-{
- int ret;
-
- /* Map the LED chip select address space */
- latch_address = (unsigned short *) devm_ioremap(&pdev->dev,
- IXP4XX_EXP_BUS_BASE(2), 512);
- if (!latch_address)
- return -ENOMEM;
-
- latch_value = 0xffff;
- *latch_address = latch_value;
-
- ret = devm_led_classdev_register(&pdev->dev, &fsg_wlan_led);
- if (ret < 0)
- return ret;
-
- ret = devm_led_classdev_register(&pdev->dev, &fsg_wan_led);
- if (ret < 0)
- return ret;
-
- ret = devm_led_classdev_register(&pdev->dev, &fsg_sata_led);
- if (ret < 0)
- return ret;
-
- ret = devm_led_classdev_register(&pdev->dev, &fsg_usb_led);
- if (ret < 0)
- return ret;
-
- ret = devm_led_classdev_register(&pdev->dev, &fsg_sync_led);
- if (ret < 0)
- return ret;
-
- ret = devm_led_classdev_register(&pdev->dev, &fsg_ring_led);
- if (ret < 0)
- return ret;
-
- return ret;
-}
-
-static struct platform_driver fsg_led_driver = {
- .probe = fsg_led_probe,
- .driver = {
- .name = "fsg-led",
- },
-};
-
-module_platform_driver(fsg_led_driver);
-
-MODULE_AUTHOR("Rod Whitby <rod@whitby.id.au>");
-MODULE_DESCRIPTION("Freecom FSG-3 LED driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-lp50xx.c b/drivers/leds/leds-lp50xx.c
index 401df1e2e05d..50b195ff96ca 100644
--- a/drivers/leds/leds-lp50xx.c
+++ b/drivers/leds/leds-lp50xx.c
@@ -266,7 +266,6 @@ struct lp50xx_led {
struct led_classdev_mc mc_cdev;
struct lp50xx *priv;
unsigned long bank_modules;
- int led_intensity[LP50XX_LEDS_PER_MODULE];
u8 ctrl_bank_enabled;
int led_number;
};
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index d1657c46ee2f..9fdfc1b9a1a0 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -439,6 +439,8 @@ int lp55xx_init_device(struct lp55xx_chip *chip)
return -EINVAL;
if (pdata->enable_gpiod) {
+ gpiod_direction_output(pdata->enable_gpiod, 0);
+
gpiod_set_consumer_name(pdata->enable_gpiod, "LP55xx enable");
gpiod_set_value(pdata->enable_gpiod, 0);
usleep_range(1000, 2000); /* Keep enable down at least 1ms */
@@ -694,7 +696,7 @@ struct lp55xx_platform_data *lp55xx_of_populate_pdata(struct device *dev,
of_property_read_u8(np, "clock-mode", &pdata->clock_mode);
pdata->enable_gpiod = devm_gpiod_get_optional(dev, "enable",
- GPIOD_OUT_LOW);
+ GPIOD_ASIS);
if (IS_ERR(pdata->enable_gpiod))
return ERR_CAST(pdata->enable_gpiod);
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 225b765830bd..1473ced8664c 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -242,9 +242,7 @@ static int choose_times(int msec, int *c1p, int *c2p)
if (diff < 65536) {
int actual;
if (msec & 1) {
- c1 = *c2p;
- *c2p = *c1p;
- *c1p = c1;
+ swap(*c2p, *c1p);
}
actual = time_codes[*c1p] + time_codes[*c2p];
if (*c1p < *c2p)
@@ -643,9 +641,6 @@ static int tca6507_probe_gpios(struct device *dev,
tca->gpio.direction_output = tca6507_gpio_direction_output;
tca->gpio.set = tca6507_gpio_set_value;
tca->gpio.parent = dev;
-#ifdef CONFIG_OF_GPIO
- tca->gpio.of_node = of_node_get(dev_of_node(dev));
-#endif
err = gpiochip_add_data(&tca->gpio, tca);
if (err) {
tca->gpio.ngpio = 0;
diff --git a/drivers/leds/simple/Kconfig b/drivers/leds/simple/Kconfig
new file mode 100644
index 000000000000..9f6a68336659
--- /dev/null
+++ b/drivers/leds/simple/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config LEDS_SIEMENS_SIMATIC_IPC
+ tristate "LED driver for Siemens Simatic IPCs"
+ depends on LEDS_CLASS
+ depends on SIEMENS_SIMATIC_IPC
+ help
+ This option enables support for the LEDs of several Industrial PCs
+ from Siemens.
+
+ To compile this driver as a module, choose M here: the module
+ will be called simatic-ipc-leds.
diff --git a/drivers/leds/simple/Makefile b/drivers/leds/simple/Makefile
new file mode 100644
index 000000000000..8481f1e9e360
--- /dev/null
+++ b/drivers/leds/simple/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_LEDS_SIEMENS_SIMATIC_IPC) += simatic-ipc-leds.o
diff --git a/drivers/leds/simple/simatic-ipc-leds.c b/drivers/leds/simple/simatic-ipc-leds.c
new file mode 100644
index 000000000000..ff2c96e73241
--- /dev/null
+++ b/drivers/leds/simple/simatic-ipc-leds.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Siemens SIMATIC IPC driver for LEDs
+ *
+ * Copyright (c) Siemens AG, 2018-2021
+ *
+ * Authors:
+ * Henning Schild <henning.schild@siemens.com>
+ * Jan Kiszka <jan.kiszka@siemens.com>
+ * Gerd Haeussler <gerd.haeussler.ext@siemens.com>
+ */
+
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_data/x86/simatic-ipc-base.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/spinlock.h>
+
+#define SIMATIC_IPC_LED_PORT_BASE 0x404E
+
+struct simatic_ipc_led {
+ unsigned int value; /* mask for io and offset for mem */
+ char *name;
+ struct led_classdev cdev;
+};
+
+static struct simatic_ipc_led simatic_ipc_leds_io[] = {
+ {1 << 15, "green:" LED_FUNCTION_STATUS "-1" },
+ {1 << 7, "yellow:" LED_FUNCTION_STATUS "-1" },
+ {1 << 14, "red:" LED_FUNCTION_STATUS "-2" },
+ {1 << 6, "yellow:" LED_FUNCTION_STATUS "-2" },
+ {1 << 13, "red:" LED_FUNCTION_STATUS "-3" },
+ {1 << 5, "yellow:" LED_FUNCTION_STATUS "-3" },
+ { }
+};
+
+/* the actual start will be discovered with PCI, 0 is a placeholder */
+struct resource simatic_ipc_led_mem_res = DEFINE_RES_MEM_NAMED(0, SZ_4K, KBUILD_MODNAME);
+
+static void *simatic_ipc_led_memory;
+
+static struct simatic_ipc_led simatic_ipc_leds_mem[] = {
+ {0x500 + 0x1A0, "red:" LED_FUNCTION_STATUS "-1"},
+ {0x500 + 0x1A8, "green:" LED_FUNCTION_STATUS "-1"},
+ {0x500 + 0x1C8, "red:" LED_FUNCTION_STATUS "-2"},
+ {0x500 + 0x1D0, "green:" LED_FUNCTION_STATUS "-2"},
+ {0x500 + 0x1E0, "red:" LED_FUNCTION_STATUS "-3"},
+ {0x500 + 0x198, "green:" LED_FUNCTION_STATUS "-3"},
+ { }
+};
+
+static struct resource simatic_ipc_led_io_res =
+ DEFINE_RES_IO_NAMED(SIMATIC_IPC_LED_PORT_BASE, SZ_2, KBUILD_MODNAME);
+
+static DEFINE_SPINLOCK(reg_lock);
+
+static inline struct simatic_ipc_led *cdev_to_led(struct led_classdev *led_cd)
+{
+ return container_of(led_cd, struct simatic_ipc_led, cdev);
+}
+
+static void simatic_ipc_led_set_io(struct led_classdev *led_cd,
+ enum led_brightness brightness)
+{
+ struct simatic_ipc_led *led = cdev_to_led(led_cd);
+ unsigned long flags;
+ unsigned int val;
+
+ spin_lock_irqsave(&reg_lock, flags);
+
+ val = inw(SIMATIC_IPC_LED_PORT_BASE);
+ if (brightness == LED_OFF)
+ outw(val | led->value, SIMATIC_IPC_LED_PORT_BASE);
+ else
+ outw(val & ~led->value, SIMATIC_IPC_LED_PORT_BASE);
+
+ spin_unlock_irqrestore(&reg_lock, flags);
+}
+
+static enum led_brightness simatic_ipc_led_get_io(struct led_classdev *led_cd)
+{
+ struct simatic_ipc_led *led = cdev_to_led(led_cd);
+
+ return inw(SIMATIC_IPC_LED_PORT_BASE) & led->value ? LED_OFF : led_cd->max_brightness;
+}
+
+static void simatic_ipc_led_set_mem(struct led_classdev *led_cd,
+ enum led_brightness brightness)
+{
+ struct simatic_ipc_led *led = cdev_to_led(led_cd);
+
+ u32 *p;
+
+ p = simatic_ipc_led_memory + led->value;
+ *p = (*p & ~1) | (brightness == LED_OFF);
+}
+
+static enum led_brightness simatic_ipc_led_get_mem(struct led_classdev *led_cd)
+{
+ struct simatic_ipc_led *led = cdev_to_led(led_cd);
+
+ u32 *p;
+
+ p = simatic_ipc_led_memory + led->value;
+ return (*p & 1) ? LED_OFF : led_cd->max_brightness;
+}
+
+static int simatic_ipc_leds_probe(struct platform_device *pdev)
+{
+ const struct simatic_ipc_platform *plat = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct simatic_ipc_led *ipcled;
+ struct led_classdev *cdev;
+ struct resource *res;
+ int err, type;
+ u32 *p;
+
+ switch (plat->devmode) {
+ case SIMATIC_IPC_DEVICE_227D:
+ case SIMATIC_IPC_DEVICE_427E:
+ res = &simatic_ipc_led_io_res;
+ ipcled = simatic_ipc_leds_io;
+ /* on 227D the two bytes work the other way araound */
+ if (plat->devmode == SIMATIC_IPC_DEVICE_227D) {
+ while (ipcled->value) {
+ ipcled->value = swab16(ipcled->value);
+ ipcled++;
+ }
+ ipcled = simatic_ipc_leds_io;
+ }
+ type = IORESOURCE_IO;
+ if (!devm_request_region(dev, res->start, resource_size(res), KBUILD_MODNAME)) {
+ dev_err(dev, "Unable to register IO resource at %pR\n", res);
+ return -EBUSY;
+ }
+ break;
+ case SIMATIC_IPC_DEVICE_127E:
+ res = &simatic_ipc_led_mem_res;
+ ipcled = simatic_ipc_leds_mem;
+ type = IORESOURCE_MEM;
+
+ /* get GPIO base from PCI */
+ res->start = simatic_ipc_get_membase0(PCI_DEVFN(13, 0));
+ if (res->start == 0)
+ return -ENODEV;
+
+ /* do the final address calculation */
+ res->start = res->start + (0xC5 << 16);
+ res->end += res->start;
+
+ simatic_ipc_led_memory = devm_ioremap_resource(dev, res);
+ if (IS_ERR(simatic_ipc_led_memory))
+ return PTR_ERR(simatic_ipc_led_memory);
+
+ /* initialize power/watchdog LED */
+ p = simatic_ipc_led_memory + 0x500 + 0x1D8; /* PM_WDT_OUT */
+ *p = (*p & ~1);
+ p = simatic_ipc_led_memory + 0x500 + 0x1C0; /* PM_BIOS_BOOT_N */
+ *p = (*p | 1);
+
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ while (ipcled->value) {
+ cdev = &ipcled->cdev;
+ if (type == IORESOURCE_MEM) {
+ cdev->brightness_set = simatic_ipc_led_set_mem;
+ cdev->brightness_get = simatic_ipc_led_get_mem;
+ } else {
+ cdev->brightness_set = simatic_ipc_led_set_io;
+ cdev->brightness_get = simatic_ipc_led_get_io;
+ }
+ cdev->max_brightness = LED_ON;
+ cdev->name = ipcled->name;
+
+ err = devm_led_classdev_register(dev, cdev);
+ if (err < 0)
+ return err;
+ ipcled++;
+ }
+
+ return 0;
+}
+
+static struct platform_driver simatic_ipc_led_driver = {
+ .probe = simatic_ipc_leds_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ }
+};
+
+module_platform_driver(simatic_ipc_led_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_AUTHOR("Henning Schild <henning.schild@siemens.com>");
diff --git a/drivers/macintosh/mac_hid.c b/drivers/macintosh/mac_hid.c
index 28b8581b44dd..d8c4d5664145 100644
--- a/drivers/macintosh/mac_hid.c
+++ b/drivers/macintosh/mac_hid.c
@@ -239,33 +239,11 @@ static struct ctl_table mac_hid_files[] = {
{ }
};
-/* dir in /proc/sys/dev */
-static struct ctl_table mac_hid_dir[] = {
- {
- .procname = "mac_hid",
- .maxlen = 0,
- .mode = 0555,
- .child = mac_hid_files,
- },
- { }
-};
-
-/* /proc/sys/dev itself, in case that is not there yet */
-static struct ctl_table mac_hid_root_dir[] = {
- {
- .procname = "dev",
- .maxlen = 0,
- .mode = 0555,
- .child = mac_hid_dir,
- },
- { }
-};
-
static struct ctl_table_header *mac_hid_sysctl_header;
static int __init mac_hid_init(void)
{
- mac_hid_sysctl_header = register_sysctl_table(mac_hid_root_dir);
+ mac_hid_sysctl_header = register_sysctl("dev/mac_hid", mac_hid_files);
if (!mac_hid_sysctl_header)
return -ENOMEM;
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index eab7e83c11c4..b17660c022eb 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -703,7 +703,7 @@ static const struct mb_ops keylargo_mb_ops = {
* Therefore we do it all by polling the media bay once each tick.
*/
-static struct of_device_id media_bay_match[] =
+static const struct of_device_id media_bay_match[] =
{
{
.name = "media-bay",
diff --git a/drivers/mailbox/apple-mailbox.c b/drivers/mailbox/apple-mailbox.c
index 72942002a54a..496c4951ccb1 100644
--- a/drivers/mailbox/apple-mailbox.c
+++ b/drivers/mailbox/apple-mailbox.c
@@ -364,8 +364,8 @@ static const struct apple_mbox_hw apple_mbox_m3_hw = {
};
static const struct of_device_id apple_mbox_of_match[] = {
- { .compatible = "apple,t8103-asc-mailbox", .data = &apple_mbox_asc_hw },
- { .compatible = "apple,t8103-m3-mailbox", .data = &apple_mbox_m3_hw },
+ { .compatible = "apple,asc-mailbox-v4", .data = &apple_mbox_asc_hw },
+ { .compatible = "apple,m3-mailbox-v2", .data = &apple_mbox_m3_hw },
{}
};
MODULE_DEVICE_TABLE(of, apple_mbox_of_match);
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index 78073ad1f2f1..22acb51531cb 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1298,7 +1298,7 @@ static int flexrm_startup(struct mbox_chan *chan)
val = (num_online_cpus() < val) ? val / num_online_cpus() : 1;
cpumask_set_cpu((ring->num / val) % num_online_cpus(),
&ring->irq_aff_hint);
- ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint);
+ ret = irq_update_affinity_hint(ring->irq, &ring->irq_aff_hint);
if (ret) {
dev_err(ring->mbox->dev,
"failed to set IRQ affinity hint for ring%d\n",
@@ -1425,7 +1425,7 @@ static void flexrm_shutdown(struct mbox_chan *chan)
/* Release IRQ */
if (ring->irq_requested) {
- irq_set_affinity_hint(ring->irq, NULL);
+ irq_update_affinity_hint(ring->irq, NULL);
free_irq(ring->irq, ring);
ring->irq_requested = false;
}
@@ -1484,7 +1484,7 @@ static void flexrm_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg)
{
struct device *dev = msi_desc_to_dev(desc);
struct flexrm_mbox *mbox = dev_get_drvdata(dev);
- struct flexrm_ring *ring = &mbox->rings[desc->platform.msi_index];
+ struct flexrm_ring *ring = &mbox->rings[desc->msi_index];
/* Configure per-Ring MSI registers */
writel_relaxed(msg->address_lo, ring->regs + RING_MSI_ADDR_LS);
@@ -1497,7 +1497,6 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
int index, ret = 0;
void __iomem *regs;
void __iomem *regs_end;
- struct msi_desc *desc;
struct resource *iomem;
struct flexrm_ring *ring;
struct flexrm_mbox *mbox;
@@ -1608,10 +1607,8 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
goto fail_destroy_cmpl_pool;
/* Save alloced IRQ numbers for each ring */
- for_each_msi_entry(desc, dev) {
- ring = &mbox->rings[desc->platform.msi_index];
- ring->irq = desc->irq;
- }
+ for (index = 0; index < mbox->num_rings; index++)
+ mbox->rings[index].irq = msi_get_virq(dev, index);
/* Check availability of debugfs */
if (!debugfs_initialized())
diff --git a/drivers/mailbox/hi3660-mailbox.c b/drivers/mailbox/hi3660-mailbox.c
index e41bd2f5ea46..ab24e731a782 100644
--- a/drivers/mailbox/hi3660-mailbox.c
+++ b/drivers/mailbox/hi3660-mailbox.c
@@ -44,14 +44,13 @@
#define MBOX_MSG_LEN 8
/**
- * Hi3660 mailbox channel information
+ * struct hi3660_chan_info - Hi3660 mailbox channel information
+ * @dst_irq: Interrupt vector for remote processor
+ * @ack_irq: Interrupt vector for local processor
*
* A channel can be used for TX or RX, it can trigger remote
* processor interrupt to notify remote processor and can receive
- * interrupt if has incoming message.
- *
- * @dst_irq: Interrupt vector for remote processor
- * @ack_irq: Interrupt vector for local processor
+ * interrupt if it has an incoming message.
*/
struct hi3660_chan_info {
unsigned int dst_irq;
@@ -59,16 +58,15 @@ struct hi3660_chan_info {
};
/**
- * Hi3660 mailbox controller data
- *
- * Mailbox controller includes 32 channels and can allocate
- * channel for message transferring.
- *
+ * struct hi3660_mbox - Hi3660 mailbox controller data
* @dev: Device to which it is attached
* @base: Base address of the register mapping region
* @chan: Representation of channels in mailbox controller
* @mchan: Representation of channel info
* @controller: Representation of a communication channel controller
+ *
+ * Mailbox controller includes 32 channels and can allocate
+ * channel for message transferring.
*/
struct hi3660_mbox {
struct device *dev;
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index ffe36a6bef9e..544de2db6453 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -563,8 +563,8 @@ static int imx_mu_probe(struct platform_device *pdev)
size = sizeof(struct imx_sc_rpc_msg_max);
priv->msg = devm_kzalloc(dev, size, GFP_KERNEL);
- if (IS_ERR(priv->msg))
- return PTR_ERR(priv->msg);
+ if (!priv->msg)
+ return -ENOMEM;
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c
index 0d6e2231a2c7..4e34854d1238 100644
--- a/drivers/mailbox/mailbox-mpfs.c
+++ b/drivers/mailbox/mailbox-mpfs.c
@@ -232,7 +232,7 @@ static int mpfs_mbox_probe(struct platform_device *pdev)
}
static const struct of_device_id mpfs_mbox_of_match[] = {
- {.compatible = "microchip,polarfire-soc-mailbox", },
+ {.compatible = "microchip,mpfs-mailbox", },
{},
};
MODULE_DEVICE_TABLE(of, mpfs_mbox_of_match);
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index a8845b162dbf..2578e5aaa935 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -573,8 +573,11 @@ static int cmdq_probe(struct platform_device *pdev)
cmdq->clocks[alias_id].id = clk_names[alias_id];
cmdq->clocks[alias_id].clk = of_clk_get(node, 0);
if (IS_ERR(cmdq->clocks[alias_id].clk)) {
- dev_err(dev, "failed to get gce clk: %d\n", alias_id);
- return PTR_ERR(cmdq->clocks[alias_id].clk);
+ of_node_put(node);
+ return dev_err_probe(dev,
+ PTR_ERR(cmdq->clocks[alias_id].clk),
+ "failed to get gce clk: %d\n",
+ alias_id);
}
}
}
@@ -582,8 +585,8 @@ static int cmdq_probe(struct platform_device *pdev)
cmdq->clocks[alias_id].id = clk_name;
cmdq->clocks[alias_id].clk = devm_clk_get(&pdev->dev, clk_name);
if (IS_ERR(cmdq->clocks[alias_id].clk)) {
- dev_err(dev, "failed to get gce clk\n");
- return PTR_ERR(cmdq->clocks[alias_id].clk);
+ return dev_err_probe(dev, PTR_ERR(cmdq->clocks[alias_id].clk),
+ "failed to get gce clk\n");
}
}
@@ -658,13 +661,13 @@ static const struct gce_plat gce_plat_v5 = {
.thread_nr = 24,
.shift = 3,
.control_by_sw = true,
- .gce_num = 2
+ .gce_num = 1
};
static const struct gce_plat gce_plat_v6 = {
.thread_nr = 24,
.shift = 3,
- .control_by_sw = false,
+ .control_by_sw = true,
.gce_num = 2
};
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 887a3704c12e..ed18936b8ce6 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -241,9 +241,11 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
if (ret)
return IRQ_NONE;
- val &= pchan->cmd_complete.status_mask;
- if (!val)
- return IRQ_NONE;
+ if (val) { /* Ensure GAS exists and value is non-zero */
+ val &= pchan->cmd_complete.status_mask;
+ if (!val)
+ return IRQ_NONE;
+ }
ret = pcc_chan_reg_read(&pchan->error, &val);
if (ret)
@@ -289,7 +291,7 @@ pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
pchan = chan_info + subspace_id;
chan = pchan->chan.mchan;
if (IS_ERR(chan) || chan->cl) {
- dev_err(dev, "Channel not found for idx: %d\n", subspace_id);
+ pr_err("Channel not found for idx: %d\n", subspace_id);
return ERR_PTR(-EBUSY);
}
dev = chan->mbox->dev;
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
index f1d4f4679b17..c5d963222014 100644
--- a/drivers/mailbox/qcom-ipcc.c
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -13,8 +13,6 @@
#include <dt-bindings/mailbox/qcom-ipcc.h>
-#define IPCC_MBOX_MAX_CHAN 48
-
/* IPCC Register offsets */
#define IPCC_REG_SEND_ID 0x0c
#define IPCC_REG_RECV_ID 0x10
@@ -52,9 +50,10 @@ struct qcom_ipcc {
struct device *dev;
void __iomem *base;
struct irq_domain *irq_domain;
- struct mbox_chan chan[IPCC_MBOX_MAX_CHAN];
- struct qcom_ipcc_chan_info mchan[IPCC_MBOX_MAX_CHAN];
+ struct mbox_chan *chans;
+ struct qcom_ipcc_chan_info *mchan;
struct mbox_controller mbox;
+ int num_chans;
int irq;
};
@@ -166,25 +165,37 @@ static struct mbox_chan *qcom_ipcc_mbox_xlate(struct mbox_controller *mbox,
struct qcom_ipcc *ipcc = to_qcom_ipcc(mbox);
struct qcom_ipcc_chan_info *mchan;
struct mbox_chan *chan;
- unsigned int i;
+ struct device *dev;
+ int chan_id;
+
+ dev = ipcc->dev;
if (ph->args_count != 2)
return ERR_PTR(-EINVAL);
- for (i = 0; i < IPCC_MBOX_MAX_CHAN; i++) {
- chan = &ipcc->chan[i];
- if (!chan->con_priv) {
- mchan = &ipcc->mchan[i];
- mchan->client_id = ph->args[0];
- mchan->signal_id = ph->args[1];
- chan->con_priv = mchan;
- break;
- }
+ for (chan_id = 0; chan_id < mbox->num_chans; chan_id++) {
+ chan = &ipcc->chans[chan_id];
+ mchan = chan->con_priv;
- chan = NULL;
+ if (!mchan)
+ break;
+ else if (mchan->client_id == ph->args[0] &&
+ mchan->signal_id == ph->args[1])
+ return ERR_PTR(-EBUSY);
}
- return chan ?: ERR_PTR(-EBUSY);
+ if (chan_id >= mbox->num_chans)
+ return ERR_PTR(-EBUSY);
+
+ mchan = devm_kzalloc(dev, sizeof(*mchan), GFP_KERNEL);
+ if (!mchan)
+ return ERR_PTR(-ENOMEM);
+
+ mchan->client_id = ph->args[0];
+ mchan->signal_id = ph->args[1];
+ chan->con_priv = mchan;
+
+ return chan;
}
static const struct mbox_chan_ops ipcc_mbox_chan_ops = {
@@ -192,15 +203,49 @@ static const struct mbox_chan_ops ipcc_mbox_chan_ops = {
.shutdown = qcom_ipcc_mbox_shutdown,
};
-static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc)
+static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc,
+ struct device_node *controller_dn)
{
+ struct of_phandle_args curr_ph;
+ struct device_node *client_dn;
struct mbox_controller *mbox;
struct device *dev = ipcc->dev;
+ int i, j, ret;
+
+ /*
+ * Find out the number of clients interested in this mailbox
+ * and create channels accordingly.
+ */
+ ipcc->num_chans = 0;
+ for_each_node_with_property(client_dn, "mboxes") {
+ if (!of_device_is_available(client_dn))
+ continue;
+ i = of_count_phandle_with_args(client_dn,
+ "mboxes", "#mbox-cells");
+ for (j = 0; j < i; j++) {
+ ret = of_parse_phandle_with_args(client_dn, "mboxes",
+ "#mbox-cells", j, &curr_ph);
+ of_node_put(curr_ph.np);
+ if (!ret && curr_ph.np == controller_dn) {
+ ipcc->num_chans++;
+ break;
+ }
+ }
+ }
+
+ /* If no clients are found, skip registering as a mbox controller */
+ if (!ipcc->num_chans)
+ return 0;
+
+ ipcc->chans = devm_kcalloc(dev, ipcc->num_chans,
+ sizeof(struct mbox_chan), GFP_KERNEL);
+ if (!ipcc->chans)
+ return -ENOMEM;
mbox = &ipcc->mbox;
mbox->dev = dev;
- mbox->num_chans = IPCC_MBOX_MAX_CHAN;
- mbox->chans = ipcc->chan;
+ mbox->num_chans = ipcc->num_chans;
+ mbox->chans = ipcc->chans;
mbox->ops = &ipcc_mbox_chan_ops;
mbox->of_xlate = qcom_ipcc_mbox_xlate;
mbox->txdone_irq = false;
@@ -212,6 +257,8 @@ static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc)
static int qcom_ipcc_probe(struct platform_device *pdev)
{
struct qcom_ipcc *ipcc;
+ static int id;
+ char *name;
int ret;
ipcc = devm_kzalloc(&pdev->dev, sizeof(*ipcc), GFP_KERNEL);
@@ -228,27 +275,33 @@ static int qcom_ipcc_probe(struct platform_device *pdev)
if (ipcc->irq < 0)
return ipcc->irq;
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "ipcc_%d", id++);
+ if (!name)
+ return -ENOMEM;
+
ipcc->irq_domain = irq_domain_add_tree(pdev->dev.of_node,
&qcom_ipcc_irq_ops, ipcc);
if (!ipcc->irq_domain)
return -ENOMEM;
- ret = qcom_ipcc_setup_mbox(ipcc);
+ ret = qcom_ipcc_setup_mbox(ipcc, pdev->dev.of_node);
if (ret)
goto err_mbox;
ret = devm_request_irq(&pdev->dev, ipcc->irq, qcom_ipcc_irq_fn,
- IRQF_TRIGGER_HIGH, "ipcc", ipcc);
+ IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND, name, ipcc);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register the irq: %d\n", ret);
- goto err_mbox;
+ goto err_req_irq;
}
- enable_irq_wake(ipcc->irq);
platform_set_drvdata(pdev, ipcc);
return 0;
+err_req_irq:
+ if (ipcc->num_chans)
+ mbox_controller_unregister(&ipcc->mbox);
err_mbox:
irq_domain_remove(ipcc->irq_domain);
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index f44079d62b1a..31a0fa914274 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -655,6 +655,7 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
mbox->pdata = pdata;
ret = zynqmp_ipi_mbox_probe(mbox, nc);
if (ret) {
+ of_node_put(nc);
dev_err(dev, "failed to probe subdev.\n");
ret = -EINVAL;
goto free_mbox_dev;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 7af242de3202..eb4b5e52bd6f 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -121,8 +121,10 @@ struct journal_entry {
#define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
struct journal_sector {
- __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
- __u8 mac[JOURNAL_MAC_PER_SECTOR];
+ struct_group(sectors,
+ __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
+ __u8 mac[JOURNAL_MAC_PER_SECTOR];
+ );
commit_id_t commit_id;
};
@@ -2870,7 +2872,8 @@ static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
wraparound_section(ic, &i);
for (j = 0; j < ic->journal_section_sectors; j++) {
struct journal_sector *js = access_journal(ic, i, j);
- memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
+ BUILD_BUG_ON(sizeof(js->sectors) != JOURNAL_SECTOR_DATA);
+ memset(&js->sectors, 0, sizeof(js->sectors));
js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
}
for (j = 0; j < ic->journal_section_entries; j++) {
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 66ba16713f69..1b97a11d7151 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -162,71 +162,34 @@ static int linear_iterate_devices(struct dm_target *ti,
return fn(ti, lc->dev, lc->start, ti->len, data);
}
-#if IS_ENABLED(CONFIG_DAX_DRIVER)
-static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+#if IS_ENABLED(CONFIG_FS_DAX)
+static struct dax_device *linear_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff)
{
- long ret;
struct linear_c *lc = ti->private;
- struct block_device *bdev = lc->dev->bdev;
- struct dax_device *dax_dev = lc->dev->dax_dev;
- sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
-
- dev_sector = linear_map_sector(ti, sector);
- ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
- if (ret)
- return ret;
- return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
-}
+ sector_t sector = linear_map_sector(ti, *pgoff << PAGE_SECTORS_SHIFT);
-static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
-{
- struct linear_c *lc = ti->private;
- struct block_device *bdev = lc->dev->bdev;
- struct dax_device *dax_dev = lc->dev->dax_dev;
- sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
-
- dev_sector = linear_map_sector(ti, sector);
- if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
- return 0;
- return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
+ *pgoff = (get_start_sect(lc->dev->bdev) + sector) >> PAGE_SECTORS_SHIFT;
+ return lc->dev->dax_dev;
}
-static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
+static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
{
- struct linear_c *lc = ti->private;
- struct block_device *bdev = lc->dev->bdev;
- struct dax_device *dax_dev = lc->dev->dax_dev;
- sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
-
- dev_sector = linear_map_sector(ti, sector);
- if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
- return 0;
- return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
+ struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff);
+
+ return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
}
static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
size_t nr_pages)
{
- int ret;
- struct linear_c *lc = ti->private;
- struct block_device *bdev = lc->dev->bdev;
- struct dax_device *dax_dev = lc->dev->dax_dev;
- sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
-
- dev_sector = linear_map_sector(ti, sector);
- ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff);
- if (ret)
- return ret;
+ struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff);
+
return dax_zero_page_range(dax_dev, pgoff, nr_pages);
}
#else
#define linear_dax_direct_access NULL
-#define linear_dax_copy_from_iter NULL
-#define linear_dax_copy_to_iter NULL
#define linear_dax_zero_page_range NULL
#endif
@@ -244,8 +207,6 @@ static struct target_type linear_target = {
.prepare_ioctl = linear_prepare_ioctl,
.iterate_devices = linear_iterate_devices,
.direct_access = linear_dax_direct_access,
- .dax_copy_from_iter = linear_dax_copy_from_iter,
- .dax_copy_to_iter = linear_dax_copy_to_iter,
.dax_zero_page_range = linear_dax_zero_page_range,
};
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 0b3ef977ceeb..139b09b06eda 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -901,120 +901,34 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit
limits->io_min = limits->physical_block_size;
}
-#if IS_ENABLED(CONFIG_DAX_DRIVER)
-static int log_dax(struct log_writes_c *lc, sector_t sector, size_t bytes,
- struct iov_iter *i)
+#if IS_ENABLED(CONFIG_FS_DAX)
+static struct dax_device *log_writes_dax_pgoff(struct dm_target *ti,
+ pgoff_t *pgoff)
{
- struct pending_block *block;
-
- if (!bytes)
- return 0;
-
- block = kzalloc(sizeof(struct pending_block), GFP_KERNEL);
- if (!block) {
- DMERR("Error allocating dax pending block");
- return -ENOMEM;
- }
-
- block->data = kzalloc(bytes, GFP_KERNEL);
- if (!block->data) {
- DMERR("Error allocating dax data space");
- kfree(block);
- return -ENOMEM;
- }
-
- /* write data provided via the iterator */
- if (!copy_from_iter(block->data, bytes, i)) {
- DMERR("Error copying dax data");
- kfree(block->data);
- kfree(block);
- return -EIO;
- }
-
- /* rewind the iterator so that the block driver can use it */
- iov_iter_revert(i, bytes);
-
- block->datalen = bytes;
- block->sector = bio_to_dev_sectors(lc, sector);
- block->nr_sectors = ALIGN(bytes, lc->sectorsize) >> lc->sectorshift;
-
- atomic_inc(&lc->pending_blocks);
- spin_lock_irq(&lc->blocks_lock);
- list_add_tail(&block->list, &lc->unflushed_blocks);
- spin_unlock_irq(&lc->blocks_lock);
- wake_up_process(lc->log_kthread);
+ struct log_writes_c *lc = ti->private;
- return 0;
+ *pgoff += (get_start_sect(lc->dev->bdev) >> PAGE_SECTORS_SHIFT);
+ return lc->dev->dax_dev;
}
static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
long nr_pages, void **kaddr, pfn_t *pfn)
{
- struct log_writes_c *lc = ti->private;
- sector_t sector = pgoff * PAGE_SECTORS;
- int ret;
-
- ret = bdev_dax_pgoff(lc->dev->bdev, sector, nr_pages * PAGE_SIZE, &pgoff);
- if (ret)
- return ret;
- return dax_direct_access(lc->dev->dax_dev, pgoff, nr_pages, kaddr, pfn);
-}
-
-static size_t log_writes_dax_copy_from_iter(struct dm_target *ti,
- pgoff_t pgoff, void *addr, size_t bytes,
- struct iov_iter *i)
-{
- struct log_writes_c *lc = ti->private;
- sector_t sector = pgoff * PAGE_SECTORS;
- int err;
-
- if (bdev_dax_pgoff(lc->dev->bdev, sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
- return 0;
-
- /* Don't bother doing anything if logging has been disabled */
- if (!lc->logging_enabled)
- goto dax_copy;
-
- err = log_dax(lc, sector, bytes, i);
- if (err) {
- DMWARN("Error %d logging DAX write", err);
- return 0;
- }
-dax_copy:
- return dax_copy_from_iter(lc->dev->dax_dev, pgoff, addr, bytes, i);
-}
-
-static size_t log_writes_dax_copy_to_iter(struct dm_target *ti,
- pgoff_t pgoff, void *addr, size_t bytes,
- struct iov_iter *i)
-{
- struct log_writes_c *lc = ti->private;
- sector_t sector = pgoff * PAGE_SECTORS;
+ struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
- if (bdev_dax_pgoff(lc->dev->bdev, sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
- return 0;
- return dax_copy_to_iter(lc->dev->dax_dev, pgoff, addr, bytes, i);
+ return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
}
static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
size_t nr_pages)
{
- int ret;
- struct log_writes_c *lc = ti->private;
- sector_t sector = pgoff * PAGE_SECTORS;
-
- ret = bdev_dax_pgoff(lc->dev->bdev, sector, nr_pages << PAGE_SHIFT,
- &pgoff);
- if (ret)
- return ret;
- return dax_zero_page_range(lc->dev->dax_dev, pgoff,
- nr_pages << PAGE_SHIFT);
+ struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
+
+ return dax_zero_page_range(dax_dev, pgoff, nr_pages << PAGE_SHIFT);
}
#else
#define log_writes_dax_direct_access NULL
-#define log_writes_dax_copy_from_iter NULL
-#define log_writes_dax_copy_to_iter NULL
#define log_writes_dax_zero_page_range NULL
#endif
@@ -1032,8 +946,6 @@ static struct target_type log_writes_target = {
.iterate_devices = log_writes_iterate_devices,
.io_hints = log_writes_io_hints,
.direct_access = log_writes_dax_direct_access,
- .dax_copy_from_iter = log_writes_dax_copy_from_iter,
- .dax_copy_to_iter = log_writes_dax_copy_to_iter,
.dax_zero_page_range = log_writes_dax_zero_page_range,
};
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 90dc9cc48881..f4719b65e5e3 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -550,7 +550,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
return DM_MAPIO_REQUEUE;
}
clone->bio = clone->biotail = NULL;
- clone->rq_disk = bdev->bd_disk;
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
*__clone = clone;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 6660b6b53d5b..e566115ec0bb 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -300,91 +300,40 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
-#if IS_ENABLED(CONFIG_DAX_DRIVER)
-static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+#if IS_ENABLED(CONFIG_FS_DAX)
+static struct dax_device *stripe_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff)
{
- sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
struct stripe_c *sc = ti->private;
- struct dax_device *dax_dev;
struct block_device *bdev;
+ sector_t dev_sector;
uint32_t stripe;
- long ret;
- stripe_map_sector(sc, sector, &stripe, &dev_sector);
+ stripe_map_sector(sc, *pgoff * PAGE_SECTORS, &stripe, &dev_sector);
dev_sector += sc->stripe[stripe].physical_start;
- dax_dev = sc->stripe[stripe].dev->dax_dev;
bdev = sc->stripe[stripe].dev->bdev;
- ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
- if (ret)
- return ret;
- return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
+ *pgoff = (get_start_sect(bdev) + dev_sector) >> PAGE_SECTORS_SHIFT;
+ return sc->stripe[stripe].dev->dax_dev;
}
-static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
+static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
{
- sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
- struct stripe_c *sc = ti->private;
- struct dax_device *dax_dev;
- struct block_device *bdev;
- uint32_t stripe;
-
- stripe_map_sector(sc, sector, &stripe, &dev_sector);
- dev_sector += sc->stripe[stripe].physical_start;
- dax_dev = sc->stripe[stripe].dev->dax_dev;
- bdev = sc->stripe[stripe].dev->bdev;
+ struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff);
- if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
- return 0;
- return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
-}
-
-static size_t stripe_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
-{
- sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
- struct stripe_c *sc = ti->private;
- struct dax_device *dax_dev;
- struct block_device *bdev;
- uint32_t stripe;
-
- stripe_map_sector(sc, sector, &stripe, &dev_sector);
- dev_sector += sc->stripe[stripe].physical_start;
- dax_dev = sc->stripe[stripe].dev->dax_dev;
- bdev = sc->stripe[stripe].dev->bdev;
-
- if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
- return 0;
- return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
+ return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
}
static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
size_t nr_pages)
{
- int ret;
- sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
- struct stripe_c *sc = ti->private;
- struct dax_device *dax_dev;
- struct block_device *bdev;
- uint32_t stripe;
-
- stripe_map_sector(sc, sector, &stripe, &dev_sector);
- dev_sector += sc->stripe[stripe].physical_start;
- dax_dev = sc->stripe[stripe].dev->dax_dev;
- bdev = sc->stripe[stripe].dev->bdev;
+ struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff);
- ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff);
- if (ret)
- return ret;
return dax_zero_page_range(dax_dev, pgoff, nr_pages);
}
#else
#define stripe_dax_direct_access NULL
-#define stripe_dax_copy_from_iter NULL
-#define stripe_dax_copy_to_iter NULL
#define stripe_dax_zero_page_range NULL
#endif
@@ -521,8 +470,6 @@ static struct target_type stripe_target = {
.iterate_devices = stripe_iterate_devices,
.io_hints = stripe_io_hints,
.direct_access = stripe_dax_direct_access,
- .dax_copy_from_iter = stripe_dax_copy_from_iter,
- .dax_copy_to_iter = stripe_dax_copy_to_iter,
.dax_zero_page_range = stripe_dax_zero_page_range,
};
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index a05fcd50e1b9..e28c92478536 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -112,6 +112,7 @@ static struct attribute *dm_attrs[] = {
&dm_attr_rq_based_seq_io_merge_deadline.attr,
NULL,
};
+ATTRIBUTE_GROUPS(dm);
static const struct sysfs_ops dm_sysfs_ops = {
.show = dm_attr_show,
@@ -120,7 +121,7 @@ static const struct sysfs_ops dm_sysfs_ops = {
static struct kobj_type dm_ktype = {
.sysfs_ops = &dm_sysfs_ops,
- .default_attrs = dm_attrs,
+ .default_groups = dm_groups,
.release = dm_kobject_release,
};
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index aa173f5bdc3d..e43096cfe9e2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -806,12 +806,14 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
EXPORT_SYMBOL_GPL(dm_table_set_type);
/* validate the dax capability of the target device span */
-int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
+static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- int blocksize = *(int *) data;
+ if (dev->dax_dev)
+ return false;
- return !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
+ DMDEBUG("%pg: error: dax unsupported by block device", dev->bdev);
+ return true;
}
/* Check devices support synchronous DAX */
@@ -821,8 +823,8 @@ static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_de
return !dev->dax_dev || !dax_synchronous(dev->dax_dev);
}
-bool dm_table_supports_dax(struct dm_table *t,
- iterate_devices_callout_fn iterate_fn, int *blocksize)
+static bool dm_table_supports_dax(struct dm_table *t,
+ iterate_devices_callout_fn iterate_fn)
{
struct dm_target *ti;
unsigned i;
@@ -835,7 +837,7 @@ bool dm_table_supports_dax(struct dm_table *t,
return false;
if (!ti->type->iterate_devices ||
- ti->type->iterate_devices(ti, iterate_fn, blocksize))
+ ti->type->iterate_devices(ti, iterate_fn, NULL))
return false;
}
@@ -862,7 +864,6 @@ static int dm_table_determine_type(struct dm_table *t)
struct dm_target *tgt;
struct list_head *devices = dm_table_get_devices(t);
enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
- int page_size = PAGE_SIZE;
if (t->type != DM_TYPE_NONE) {
/* target already set the table's type */
@@ -906,7 +907,7 @@ static int dm_table_determine_type(struct dm_table *t)
verify_bio_based:
/* We must use this table as bio-based */
t->type = DM_TYPE_BIO_BASED;
- if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) ||
+ if (dm_table_supports_dax(t, device_not_dax_capable) ||
(list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
t->type = DM_TYPE_DAX_BIO_BASED;
}
@@ -1976,7 +1977,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
bool wc = false, fua = false;
- int page_size = PAGE_SIZE;
int r;
/*
@@ -2010,9 +2010,9 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
}
blk_queue_write_cache(q, wc, fua);
- if (dm_table_supports_dax(t, device_not_dax_capable, &page_size)) {
+ if (dm_table_supports_dax(t, device_not_dax_capable)) {
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
- if (dm_table_supports_dax(t, device_not_dax_synchronous_capable, NULL))
+ if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
set_dax_synchronous(t->md->dax_dev);
}
else
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 4b8991cde223..4f31591d2d25 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -38,7 +38,7 @@
#define BITMAP_GRANULARITY PAGE_SIZE
#endif
-#if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_DAX_DRIVER)
+#if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_FS_DAX)
#define DM_WRITECACHE_HAS_PMEM
#endif
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 662742a310cb..dcbd6d201619 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -489,7 +489,7 @@ static void start_io_acct(struct dm_io *io)
struct mapped_device *md = io->md;
struct bio *bio = io->orig_bio;
- io->start_time = bio_start_io_acct(bio);
+ bio_start_io_acct_time(bio, io->start_time);
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
bio->bi_iter.bi_sector, bio_sectors(bio),
@@ -535,7 +535,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
io->md = md;
spin_lock_init(&io->endio_lock);
- start_io_acct(io);
+ io->start_time = jiffies;
return io;
}
@@ -637,7 +637,7 @@ static int open_table_device(struct table_device *td, dev_t dev,
struct mapped_device *md)
{
struct block_device *bdev;
-
+ u64 part_off;
int r;
BUG_ON(td->dm_dev.bdev);
@@ -653,7 +653,7 @@ static int open_table_device(struct table_device *td, dev_t dev,
}
td->dm_dev.bdev = bdev;
- td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev);
+ td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off);
return 0;
}
@@ -1027,74 +1027,6 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
return ret;
}
-static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
- int blocksize, sector_t start, sector_t len)
-{
- struct mapped_device *md = dax_get_private(dax_dev);
- struct dm_table *map;
- bool ret = false;
- int srcu_idx;
-
- map = dm_get_live_table(md, &srcu_idx);
- if (!map)
- goto out;
-
- ret = dm_table_supports_dax(map, device_not_dax_capable, &blocksize);
-
-out:
- dm_put_live_table(md, srcu_idx);
-
- return ret;
-}
-
-static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
-{
- struct mapped_device *md = dax_get_private(dax_dev);
- sector_t sector = pgoff * PAGE_SECTORS;
- struct dm_target *ti;
- long ret = 0;
- int srcu_idx;
-
- ti = dm_dax_get_live_target(md, sector, &srcu_idx);
-
- if (!ti)
- goto out;
- if (!ti->type->dax_copy_from_iter) {
- ret = copy_from_iter(addr, bytes, i);
- goto out;
- }
- ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i);
- out:
- dm_put_live_table(md, srcu_idx);
-
- return ret;
-}
-
-static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
-{
- struct mapped_device *md = dax_get_private(dax_dev);
- sector_t sector = pgoff * PAGE_SECTORS;
- struct dm_target *ti;
- long ret = 0;
- int srcu_idx;
-
- ti = dm_dax_get_live_target(md, sector, &srcu_idx);
-
- if (!ti)
- goto out;
- if (!ti->type->dax_copy_to_iter) {
- ret = copy_to_iter(addr, bytes, i);
- goto out;
- }
- ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i);
- out:
- dm_put_live_table(md, srcu_idx);
-
- return ret;
-}
-
static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
size_t nr_pages)
{
@@ -1510,9 +1442,6 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
ci->sector = bio->bi_iter.bi_sector;
}
-#define __dm_part_stat_sub(part, field, subnd) \
- (part_stat_get(part, field) -= (subnd))
-
/*
* Entry point to split a bio into clones and submit them to the targets.
*/
@@ -1548,23 +1477,12 @@ static void __split_and_process_bio(struct mapped_device *md,
GFP_NOIO, &md->queue->bio_split);
ci.io->orig_bio = b;
- /*
- * Adjust IO stats for each split, otherwise upon queue
- * reentry there will be redundant IO accounting.
- * NOTE: this is a stop-gap fix, a proper fix involves
- * significant refactoring of DM core's bio splitting
- * (by eliminating DM's splitting and just using bio_split)
- */
- part_stat_lock();
- __dm_part_stat_sub(dm_disk(md)->part0,
- sectors[op_stat_group(bio_op(bio))], ci.sector_count);
- part_stat_unlock();
-
bio_chain(b, bio);
trace_block_split(b, bio->bi_iter.bi_sector);
submit_bio_noacct(bio);
}
}
+ start_io_acct(ci.io);
/* drop the extra reference count */
dm_io_dec_pending(ci.io, errno_to_blk_status(error));
@@ -1683,6 +1601,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
bioset_exit(&md->io_bs);
if (md->dax_dev) {
+ dax_remove_host(md->disk);
kill_dax(md->dax_dev);
put_dax(md->dax_dev);
md->dax_dev = NULL;
@@ -1778,15 +1697,21 @@ static struct mapped_device *alloc_dev(int minor)
md->disk->major = _major;
md->disk->first_minor = minor;
md->disk->minors = 1;
+ md->disk->flags |= GENHD_FL_NO_PART;
md->disk->fops = &dm_blk_dops;
md->disk->queue = md->queue;
md->disk->private_data = md;
sprintf(md->disk->disk_name, "dm-%d", minor);
- if (IS_ENABLED(CONFIG_DAX_DRIVER)) {
- md->dax_dev = alloc_dax(md, md->disk->disk_name,
- &dm_dax_ops, 0);
- if (IS_ERR(md->dax_dev))
+ if (IS_ENABLED(CONFIG_FS_DAX)) {
+ md->dax_dev = alloc_dax(md, &dm_dax_ops);
+ if (IS_ERR(md->dax_dev)) {
+ md->dax_dev = NULL;
+ goto bad;
+ }
+ set_dax_nocache(md->dax_dev);
+ set_dax_nomc(md->dax_dev);
+ if (dax_add_host(md->dax_dev, md->disk))
goto bad;
}
@@ -3040,9 +2965,6 @@ static const struct block_device_operations dm_rq_blk_dops = {
static const struct dax_operations dm_dax_ops = {
.direct_access = dm_dax_direct_access,
- .dax_supported = dm_dax_supported,
- .copy_from_iter = dm_dax_copy_from_iter,
- .copy_to_iter = dm_dax_copy_to_iter,
.zero_page_range = dm_dax_zero_page_range,
};
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 742d9c80efe1..9013dc1a7b00 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -73,10 +73,6 @@ bool dm_table_bio_based(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
-bool dm_table_supports_dax(struct dm_table *t, iterate_devices_callout_fn fn,
- int *blocksize);
-int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data);
void dm_lock_md_type(struct mapped_device *md);
void dm_unlock_md_type(struct mapped_device *md);
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 7fbd41e156c9..1c8a06b77c85 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -574,7 +574,7 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
int ret = 0;
if (WARN(mddev->cluster_info->slot_number - 1 == le32_to_cpu(msg->slot),
- "node %d received it's own msg\n", le32_to_cpu(msg->slot)))
+ "node %d received its own msg\n", le32_to_cpu(msg->slot)))
return -1;
switch (le32_to_cpu(msg->type)) {
case METADATA_UPDATED:
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 41d6e2383517..5881d05a76eb 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -418,6 +418,12 @@ check_suspended:
rcu_read_lock();
if (is_suspended(mddev, bio)) {
DEFINE_WAIT(__wait);
+ /* Bail out if REQ_NOWAIT is set for the bio */
+ if (bio->bi_opf & REQ_NOWAIT) {
+ rcu_read_unlock();
+ bio_wouldblock_error(bio);
+ return;
+ }
for (;;) {
prepare_to_wait(&mddev->sb_wait, &__wait,
TASK_UNINTERRUPTIBLE);
@@ -3603,6 +3609,7 @@ static struct attribute *rdev_default_attrs[] = {
&rdev_ppl_size.attr,
NULL,
};
+ATTRIBUTE_GROUPS(rdev_default);
static ssize_t
rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
@@ -3652,7 +3659,7 @@ static const struct sysfs_ops rdev_sysfs_ops = {
static struct kobj_type rdev_ktype = {
.release = rdev_free,
.sysfs_ops = &rdev_sysfs_ops,
- .default_attrs = rdev_default_attrs,
+ .default_groups = rdev_default_groups,
};
int md_rdev_init(struct md_rdev *rdev)
@@ -5708,11 +5715,6 @@ static int md_alloc(dev_t dev, char *name)
mddev->queue = disk->queue;
blk_set_stacking_limits(&mddev->queue->limits);
blk_queue_write_cache(mddev->queue, true, true);
- /* Allow extended partitions. This makes the
- * 'mdp' device redundant, but we can't really
- * remove it now.
- */
- disk->flags |= GENHD_FL_EXT_DEVT;
disk->events |= DISK_EVENT_MEDIA_CHANGE;
mddev->gendisk = disk;
error = add_disk(disk);
@@ -5793,6 +5795,7 @@ int md_run(struct mddev *mddev)
int err;
struct md_rdev *rdev;
struct md_personality *pers;
+ bool nowait = true;
if (list_empty(&mddev->disks))
/* cannot run an array with no devices.. */
@@ -5863,8 +5866,13 @@ int md_run(struct mddev *mddev)
}
}
sysfs_notify_dirent_safe(rdev->sysfs_state);
+ nowait = nowait && blk_queue_nowait(bdev_get_queue(rdev->bdev));
}
+ /* Set the NOWAIT flags if all underlying devices support it */
+ if (nowait)
+ blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
+
if (!bioset_initialized(&mddev->bio_set)) {
err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
if (err)
@@ -5875,13 +5883,6 @@ int md_run(struct mddev *mddev)
if (err)
goto exit_bio_set;
}
- if (mddev->level != 1 && mddev->level != 10 &&
- !bioset_initialized(&mddev->io_acct_set)) {
- err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE,
- offsetof(struct md_io_acct, bio_clone), 0);
- if (err)
- goto exit_sync_set;
- }
spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
@@ -6058,9 +6059,6 @@ bitmap_abort:
module_put(pers->owner);
md_bitmap_destroy(mddev);
abort:
- if (mddev->level != 1 && mddev->level != 10)
- bioset_exit(&mddev->io_acct_set);
-exit_sync_set:
bioset_exit(&mddev->sync_set);
exit_bio_set:
bioset_exit(&mddev->bio_set);
@@ -7010,6 +7008,15 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
if (!mddev->thread)
md_update_sb(mddev, 1);
/*
+ * If the new disk does not support REQ_NOWAIT,
+ * disable on the whole MD.
+ */
+ if (!blk_queue_nowait(bdev_get_queue(rdev->bdev))) {
+ pr_info("%s: Disabling nowait because %s does not support nowait\n",
+ mdname(mddev), bdevname(rdev->bdev, b));
+ blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue);
+ }
+ /*
* Kick recovery, maybe this spare has to be added to the
* array immediately.
*/
@@ -8407,7 +8414,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
spin_lock(&pers_lock);
/* ensure module won't be unloaded */
if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
- pr_warn("can't find md-cluster module or get it's reference.\n");
+ pr_warn("can't find md-cluster module or get its reference.\n");
spin_unlock(&pers_lock);
return -ENOENT;
}
@@ -8594,6 +8601,23 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
}
EXPORT_SYMBOL_GPL(md_submit_discard_bio);
+int acct_bioset_init(struct mddev *mddev)
+{
+ int err = 0;
+
+ if (!bioset_initialized(&mddev->io_acct_set))
+ err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE,
+ offsetof(struct md_io_acct, bio_clone), 0);
+ return err;
+}
+EXPORT_SYMBOL_GPL(acct_bioset_init);
+
+void acct_bioset_exit(struct mddev *mddev)
+{
+ bioset_exit(&mddev->io_acct_set);
+}
+EXPORT_SYMBOL_GPL(acct_bioset_exit);
+
static void md_end_io_acct(struct bio *bio)
{
struct md_io_acct *md_io_acct = bio->bi_private;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 53ea7a6961de..f1bf3625ef4c 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -721,6 +721,8 @@ extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
extern void md_finish_reshape(struct mddev *mddev);
void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
struct bio *bio, sector_t start, sector_t size);
+int acct_bioset_init(struct mddev *mddev);
+void acct_bioset_exit(struct mddev *mddev);
void md_account_bio(struct mddev *mddev, struct bio **bio);
extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index cb670f16e98e..4ead31e0d8ce 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -9,6 +9,9 @@
#include "dm-transaction-manager.h"
#include <linux/export.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "btree"
/*
* Removing an entry from a btree
@@ -79,15 +82,23 @@ static void node_shift(struct btree_node *n, int shift)
}
}
-static void node_copy(struct btree_node *left, struct btree_node *right, int shift)
+static int node_copy(struct btree_node *left, struct btree_node *right, int shift)
{
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t value_size = le32_to_cpu(left->header.value_size);
- BUG_ON(value_size != le32_to_cpu(right->header.value_size));
+ if (value_size != le32_to_cpu(right->header.value_size)) {
+ DMERR("mismatched value size");
+ return -EILSEQ;
+ }
if (shift < 0) {
shift = -shift;
- BUG_ON(nr_left + shift > le32_to_cpu(left->header.max_entries));
+
+ if (nr_left + shift > le32_to_cpu(left->header.max_entries)) {
+ DMERR("bad shift");
+ return -EINVAL;
+ }
+
memcpy(key_ptr(left, nr_left),
key_ptr(right, 0),
shift * sizeof(__le64));
@@ -95,7 +106,11 @@ static void node_copy(struct btree_node *left, struct btree_node *right, int shi
value_ptr(right, 0),
shift * value_size);
} else {
- BUG_ON(shift > le32_to_cpu(right->header.max_entries));
+ if (shift > le32_to_cpu(right->header.max_entries)) {
+ DMERR("bad shift");
+ return -EINVAL;
+ }
+
memcpy(key_ptr(right, 0),
key_ptr(left, nr_left - shift),
shift * sizeof(__le64));
@@ -103,6 +118,7 @@ static void node_copy(struct btree_node *left, struct btree_node *right, int shi
value_ptr(left, nr_left - shift),
shift * value_size);
}
+ return 0;
}
/*
@@ -170,35 +186,54 @@ static void exit_child(struct dm_btree_info *info, struct child *c)
dm_tm_unlock(info->tm, c->block);
}
-static void shift(struct btree_node *left, struct btree_node *right, int count)
+static int shift(struct btree_node *left, struct btree_node *right, int count)
{
+ int r;
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
uint32_t max_entries = le32_to_cpu(left->header.max_entries);
uint32_t r_max_entries = le32_to_cpu(right->header.max_entries);
- BUG_ON(max_entries != r_max_entries);
- BUG_ON(nr_left - count > max_entries);
- BUG_ON(nr_right + count > max_entries);
+ if (max_entries != r_max_entries) {
+ DMERR("node max_entries mismatch");
+ return -EILSEQ;
+ }
+
+ if (nr_left - count > max_entries) {
+ DMERR("node shift out of bounds");
+ return -EINVAL;
+ }
+
+ if (nr_right + count > max_entries) {
+ DMERR("node shift out of bounds");
+ return -EINVAL;
+ }
if (!count)
- return;
+ return 0;
if (count > 0) {
node_shift(right, count);
- node_copy(left, right, count);
+ r = node_copy(left, right, count);
+ if (r)
+ return r;
} else {
- node_copy(left, right, count);
+ r = node_copy(left, right, count);
+ if (r)
+ return r;
node_shift(right, count);
}
left->header.nr_entries = cpu_to_le32(nr_left - count);
right->header.nr_entries = cpu_to_le32(nr_right + count);
+
+ return 0;
}
-static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
- struct child *l, struct child *r)
+static int __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
+ struct child *l, struct child *r)
{
+ int ret;
struct btree_node *left = l->n;
struct btree_node *right = r->n;
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
@@ -229,9 +264,12 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
* Rebalance.
*/
unsigned target_left = (nr_left + nr_right) / 2;
- shift(left, right, nr_left - target_left);
+ ret = shift(left, right, nr_left - target_left);
+ if (ret)
+ return ret;
*key_ptr(parent, r->index) = right->keys[0];
}
+ return 0;
}
static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
@@ -253,12 +291,12 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
return r;
}
- __rebalance2(info, parent, &left, &right);
+ r = __rebalance2(info, parent, &left, &right);
exit_child(info, &left);
exit_child(info, &right);
- return 0;
+ return r;
}
/*
@@ -266,21 +304,30 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
* in right, then rebalance2. This wastes some cpu, but I want something
* simple atm.
*/
-static void delete_center_node(struct dm_btree_info *info, struct btree_node *parent,
- struct child *l, struct child *c, struct child *r,
- struct btree_node *left, struct btree_node *center, struct btree_node *right,
- uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
+static int delete_center_node(struct dm_btree_info *info, struct btree_node *parent,
+ struct child *l, struct child *c, struct child *r,
+ struct btree_node *left, struct btree_node *center, struct btree_node *right,
+ uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
{
uint32_t max_entries = le32_to_cpu(left->header.max_entries);
unsigned shift = min(max_entries - nr_left, nr_center);
- BUG_ON(nr_left + shift > max_entries);
+ if (nr_left + shift > max_entries) {
+ DMERR("node shift out of bounds");
+ return -EINVAL;
+ }
+
node_copy(left, center, -shift);
left->header.nr_entries = cpu_to_le32(nr_left + shift);
if (shift != nr_center) {
shift = nr_center - shift;
- BUG_ON((nr_right + shift) > max_entries);
+
+ if ((nr_right + shift) > max_entries) {
+ DMERR("node shift out of bounds");
+ return -EINVAL;
+ }
+
node_shift(right, shift);
node_copy(center, right, shift);
right->header.nr_entries = cpu_to_le32(nr_right + shift);
@@ -291,18 +338,18 @@ static void delete_center_node(struct dm_btree_info *info, struct btree_node *pa
r->index--;
dm_tm_dec(info->tm, dm_block_location(c->block));
- __rebalance2(info, parent, l, r);
+ return __rebalance2(info, parent, l, r);
}
/*
* Redistributes entries among 3 sibling nodes.
*/
-static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
- struct child *l, struct child *c, struct child *r,
- struct btree_node *left, struct btree_node *center, struct btree_node *right,
- uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
+static int redistribute3(struct dm_btree_info *info, struct btree_node *parent,
+ struct child *l, struct child *c, struct child *r,
+ struct btree_node *left, struct btree_node *center, struct btree_node *right,
+ uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
{
- int s;
+ int s, ret;
uint32_t max_entries = le32_to_cpu(left->header.max_entries);
unsigned total = nr_left + nr_center + nr_right;
unsigned target_right = total / 3;
@@ -317,35 +364,55 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
if (s < 0 && nr_center < -s) {
/* not enough in central node */
- shift(left, center, -nr_center);
+ ret = shift(left, center, -nr_center);
+ if (ret)
+ return ret;
+
s += nr_center;
- shift(left, right, s);
- nr_right += s;
- } else
- shift(left, center, s);
+ ret = shift(left, right, s);
+ if (ret)
+ return ret;
- shift(center, right, target_right - nr_right);
+ nr_right += s;
+ } else {
+ ret = shift(left, center, s);
+ if (ret)
+ return ret;
+ }
+ ret = shift(center, right, target_right - nr_right);
+ if (ret)
+ return ret;
} else {
s = target_right - nr_right;
if (s > 0 && nr_center < s) {
/* not enough in central node */
- shift(center, right, nr_center);
+ ret = shift(center, right, nr_center);
+ if (ret)
+ return ret;
s -= nr_center;
- shift(left, right, s);
+ ret = shift(left, right, s);
+ if (ret)
+ return ret;
nr_left -= s;
- } else
- shift(center, right, s);
+ } else {
+ ret = shift(center, right, s);
+ if (ret)
+ return ret;
+ }
- shift(left, center, nr_left - target_left);
+ ret = shift(left, center, nr_left - target_left);
+ if (ret)
+ return ret;
}
*key_ptr(parent, c->index) = center->keys[0];
*key_ptr(parent, r->index) = right->keys[0];
+ return 0;
}
-static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
- struct child *l, struct child *c, struct child *r)
+static int __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
+ struct child *l, struct child *c, struct child *r)
{
struct btree_node *left = l->n;
struct btree_node *center = c->n;
@@ -357,15 +424,19 @@ static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
unsigned threshold = merge_threshold(left) * 4 + 1;
- BUG_ON(left->header.max_entries != center->header.max_entries);
- BUG_ON(center->header.max_entries != right->header.max_entries);
+ if ((left->header.max_entries != center->header.max_entries) ||
+ (center->header.max_entries != right->header.max_entries)) {
+ DMERR("bad btree metadata, max_entries differ");
+ return -EILSEQ;
+ }
+
+ if ((nr_left + nr_center + nr_right) < threshold) {
+ return delete_center_node(info, parent, l, c, r, left, center, right,
+ nr_left, nr_center, nr_right);
+ }
- if ((nr_left + nr_center + nr_right) < threshold)
- delete_center_node(info, parent, l, c, r, left, center, right,
- nr_left, nr_center, nr_right);
- else
- redistribute3(info, parent, l, c, r, left, center, right,
- nr_left, nr_center, nr_right);
+ return redistribute3(info, parent, l, c, r, left, center, right,
+ nr_left, nr_center, nr_right);
}
static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
@@ -395,13 +466,13 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
return r;
}
- __rebalance3(info, parent, &left, &center, &right);
+ r = __rebalance3(info, parent, &left, &center, &right);
exit_child(info, &left);
exit_child(info, &center);
exit_child(info, &right);
- return 0;
+ return r;
}
static int rebalance_children(struct shadow_spine *s,
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index f5bd76ed8fe6..e653458888a7 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -15,10 +15,6 @@
#define BTREE_CSUM_XOR 121107
-static int node_check(struct dm_block_validator *v,
- struct dm_block *b,
- size_t block_size);
-
static void node_prepare_for_write(struct dm_block_validator *v,
struct dm_block *b,
size_t block_size)
@@ -40,7 +36,7 @@ static int node_check(struct dm_block_validator *v,
struct node_header *h = &n->header;
size_t value_size;
__le32 csum_disk;
- uint32_t flags;
+ uint32_t flags, nr_entries, max_entries;
if (dm_block_location(b) != le64_to_cpu(h->blocknr)) {
DMERR_LIMIT("node_check failed: blocknr %llu != wanted %llu",
@@ -57,15 +53,17 @@ static int node_check(struct dm_block_validator *v,
return -EILSEQ;
}
+ nr_entries = le32_to_cpu(h->nr_entries);
+ max_entries = le32_to_cpu(h->max_entries);
value_size = le32_to_cpu(h->value_size);
if (sizeof(struct node_header) +
- (sizeof(__le64) + value_size) * le32_to_cpu(h->max_entries) > block_size) {
+ (sizeof(__le64) + value_size) * max_entries > block_size) {
DMERR_LIMIT("node_check failed: max_entries too large");
return -EILSEQ;
}
- if (le32_to_cpu(h->nr_entries) > le32_to_cpu(h->max_entries)) {
+ if (nr_entries > max_entries) {
DMERR_LIMIT("node_check failed: too many entries");
return -EILSEQ;
}
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 0703ca7a7d9a..5ce64e93aae7 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -81,14 +81,16 @@ void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
}
static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
- uint64_t key, void *value)
- __dm_written_to_disk(value)
+ uint64_t key, void *value)
+ __dm_written_to_disk(value)
{
uint32_t nr_entries = le32_to_cpu(node->header.nr_entries);
+ uint32_t max_entries = le32_to_cpu(node->header.max_entries);
__le64 key_le = cpu_to_le64(key);
if (index > nr_entries ||
- index >= le32_to_cpu(node->header.max_entries)) {
+ index >= max_entries ||
+ nr_entries >= max_entries) {
DMERR("too many entries in btree node for insert");
__dm_unbless_for_disk(value);
return -ENOMEM;
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 4a6a2a9b4eb4..bfbfa750e016 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -283,6 +283,11 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result)
struct disk_index_entry ie_disk;
struct dm_block *blk;
+ if (b >= ll->nr_blocks) {
+ DMERR_LIMIT("metadata block out of bounds");
+ return -EINVAL;
+ }
+
b = do_div(index, ll->entries_per_block);
r = ll->load_ie(ll, index, &ie_disk);
if (r < 0)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 62c8b6adac70..b59a77b31b90 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -356,7 +356,21 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks
return array_sectors;
}
-static void raid0_free(struct mddev *mddev, void *priv);
+static void free_conf(struct mddev *mddev, struct r0conf *conf)
+{
+ kfree(conf->strip_zone);
+ kfree(conf->devlist);
+ kfree(conf);
+ mddev->private = NULL;
+}
+
+static void raid0_free(struct mddev *mddev, void *priv)
+{
+ struct r0conf *conf = priv;
+
+ free_conf(mddev, conf);
+ acct_bioset_exit(mddev);
+}
static int raid0_run(struct mddev *mddev)
{
@@ -370,11 +384,16 @@ static int raid0_run(struct mddev *mddev)
if (md_check_no_bitmap(mddev))
return -EINVAL;
+ if (acct_bioset_init(mddev)) {
+ pr_err("md/raid0:%s: alloc acct bioset failed.\n", mdname(mddev));
+ return -ENOMEM;
+ }
+
/* if private is not null, we are here after takeover */
if (mddev->private == NULL) {
ret = create_strip_zones(mddev, &conf);
if (ret < 0)
- return ret;
+ goto exit_acct_set;
mddev->private = conf;
}
conf = mddev->private;
@@ -413,17 +432,16 @@ static int raid0_run(struct mddev *mddev)
dump_zones(mddev);
ret = md_integrity_register(mddev);
+ if (ret)
+ goto free;
return ret;
-}
-static void raid0_free(struct mddev *mddev, void *priv)
-{
- struct r0conf *conf = priv;
-
- kfree(conf->strip_zone);
- kfree(conf->devlist);
- kfree(conf);
+free:
+ free_conf(mddev, conf);
+exit_acct_set:
+ acct_bioset_exit(mddev);
+ return ret;
}
static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index 54db34163968..83f9a4f3d82e 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -22,12 +22,6 @@
#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
-/* When there are this many requests queue to be written by
- * the raid thread, we become 'congested' to provide back-pressure
- * for writeback.
- */
-static int max_queued_requests = 1024;
-
/* for managing resync I/O pages */
struct resync_pages {
void *raid_bio;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 85505424f7a4..e2d8acb1e988 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -929,8 +929,10 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
wake_up(&conf->wait_barrier);
}
-static void _wait_barrier(struct r1conf *conf, int idx)
+static bool _wait_barrier(struct r1conf *conf, int idx, bool nowait)
{
+ bool ret = true;
+
/*
* We need to increase conf->nr_pending[idx] very early here,
* then raise_barrier() can be blocked when it waits for
@@ -961,7 +963,7 @@ static void _wait_barrier(struct r1conf *conf, int idx)
*/
if (!READ_ONCE(conf->array_frozen) &&
!atomic_read(&conf->barrier[idx]))
- return;
+ return ret;
/*
* After holding conf->resync_lock, conf->nr_pending[idx]
@@ -979,18 +981,27 @@ static void _wait_barrier(struct r1conf *conf, int idx)
*/
wake_up(&conf->wait_barrier);
/* Wait for the barrier in same barrier unit bucket to drop. */
- wait_event_lock_irq(conf->wait_barrier,
- !conf->array_frozen &&
- !atomic_read(&conf->barrier[idx]),
- conf->resync_lock);
- atomic_inc(&conf->nr_pending[idx]);
+
+ /* Return false when nowait flag is set */
+ if (nowait) {
+ ret = false;
+ } else {
+ wait_event_lock_irq(conf->wait_barrier,
+ !conf->array_frozen &&
+ !atomic_read(&conf->barrier[idx]),
+ conf->resync_lock);
+ atomic_inc(&conf->nr_pending[idx]);
+ }
+
atomic_dec(&conf->nr_waiting[idx]);
spin_unlock_irq(&conf->resync_lock);
+ return ret;
}
-static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
+static bool wait_read_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
{
int idx = sector_to_idx(sector_nr);
+ bool ret = true;
/*
* Very similar to _wait_barrier(). The difference is, for read
@@ -1002,7 +1013,7 @@ static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
atomic_inc(&conf->nr_pending[idx]);
if (!READ_ONCE(conf->array_frozen))
- return;
+ return ret;
spin_lock_irq(&conf->resync_lock);
atomic_inc(&conf->nr_waiting[idx]);
@@ -1013,19 +1024,28 @@ static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
*/
wake_up(&conf->wait_barrier);
/* Wait for array to be unfrozen */
- wait_event_lock_irq(conf->wait_barrier,
- !conf->array_frozen,
- conf->resync_lock);
- atomic_inc(&conf->nr_pending[idx]);
+
+ /* Return false when nowait flag is set */
+ if (nowait) {
+ /* Return false when nowait flag is set */
+ ret = false;
+ } else {
+ wait_event_lock_irq(conf->wait_barrier,
+ !conf->array_frozen,
+ conf->resync_lock);
+ atomic_inc(&conf->nr_pending[idx]);
+ }
+
atomic_dec(&conf->nr_waiting[idx]);
spin_unlock_irq(&conf->resync_lock);
+ return ret;
}
-static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
+static bool wait_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
{
int idx = sector_to_idx(sector_nr);
- _wait_barrier(conf, idx);
+ return _wait_barrier(conf, idx, nowait);
}
static void _allow_barrier(struct r1conf *conf, int idx)
@@ -1236,7 +1256,11 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
* Still need barrier for READ in case that whole
* array is frozen.
*/
- wait_read_barrier(conf, bio->bi_iter.bi_sector);
+ if (!wait_read_barrier(conf, bio->bi_iter.bi_sector,
+ bio->bi_opf & REQ_NOWAIT)) {
+ bio_wouldblock_error(bio);
+ return;
+ }
if (!r1_bio)
r1_bio = alloc_r1bio(mddev, bio);
@@ -1336,6 +1360,10 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
bio->bi_iter.bi_sector, bio_end_sector(bio))) {
DEFINE_WAIT(w);
+ if (bio->bi_opf & REQ_NOWAIT) {
+ bio_wouldblock_error(bio);
+ return;
+ }
for (;;) {
prepare_to_wait(&conf->wait_barrier,
&w, TASK_IDLE);
@@ -1353,17 +1381,15 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
* thread has put up a bar for new requests.
* Continue immediately if no resync is active currently.
*/
- wait_barrier(conf, bio->bi_iter.bi_sector);
+ if (!wait_barrier(conf, bio->bi_iter.bi_sector,
+ bio->bi_opf & REQ_NOWAIT)) {
+ bio_wouldblock_error(bio);
+ return;
+ }
r1_bio = alloc_r1bio(mddev, bio);
r1_bio->sectors = max_write_sectors;
- if (conf->pending_count >= max_queued_requests) {
- md_wakeup_thread(mddev->thread);
- raid1_log(mddev, "wait queued");
- wait_event(conf->wait_barrier,
- conf->pending_count < max_queued_requests);
- }
/* first select target devices under rcu_lock and
* inc refcount on their rdev. Record them by setting
* bios[x] to bio
@@ -1458,9 +1484,14 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
r1_bio->state = 0;
allow_barrier(conf, bio->bi_iter.bi_sector);
+
+ if (bio->bi_opf & REQ_NOWAIT) {
+ bio_wouldblock_error(bio);
+ return;
+ }
raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
- wait_barrier(conf, bio->bi_iter.bi_sector);
+ wait_barrier(conf, bio->bi_iter.bi_sector, false);
goto retry_write;
}
@@ -1688,7 +1719,7 @@ static void close_sync(struct r1conf *conf)
int idx;
for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
- _wait_barrier(conf, idx);
+ _wait_barrier(conf, idx, false);
_allow_barrier(conf, idx);
}
@@ -3410,5 +3441,3 @@ MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
MODULE_ALIAS("md-personality-3"); /* RAID1 */
MODULE_ALIAS("md-raid1");
MODULE_ALIAS("md-level-1");
-
-module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index dde98f65bd04..2b969f70a31f 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -952,8 +952,10 @@ static void lower_barrier(struct r10conf *conf)
wake_up(&conf->wait_barrier);
}
-static void wait_barrier(struct r10conf *conf)
+static bool wait_barrier(struct r10conf *conf, bool nowait)
{
+ bool ret = true;
+
spin_lock_irq(&conf->resync_lock);
if (conf->barrier) {
struct bio_list *bio_list = current->bio_list;
@@ -967,27 +969,35 @@ static void wait_barrier(struct r10conf *conf)
* that queue to get the nr_pending
* count down.
*/
- raid10_log(conf->mddev, "wait barrier");
- wait_event_lock_irq(conf->wait_barrier,
- !conf->barrier ||
- (atomic_read(&conf->nr_pending) &&
- bio_list &&
- (!bio_list_empty(&bio_list[0]) ||
- !bio_list_empty(&bio_list[1]))) ||
- /* move on if recovery thread is
- * blocked by us
- */
- (conf->mddev->thread->tsk == current &&
- test_bit(MD_RECOVERY_RUNNING,
- &conf->mddev->recovery) &&
- conf->nr_queued > 0),
- conf->resync_lock);
+ /* Return false when nowait flag is set */
+ if (nowait) {
+ ret = false;
+ } else {
+ raid10_log(conf->mddev, "wait barrier");
+ wait_event_lock_irq(conf->wait_barrier,
+ !conf->barrier ||
+ (atomic_read(&conf->nr_pending) &&
+ bio_list &&
+ (!bio_list_empty(&bio_list[0]) ||
+ !bio_list_empty(&bio_list[1]))) ||
+ /* move on if recovery thread is
+ * blocked by us
+ */
+ (conf->mddev->thread->tsk == current &&
+ test_bit(MD_RECOVERY_RUNNING,
+ &conf->mddev->recovery) &&
+ conf->nr_queued > 0),
+ conf->resync_lock);
+ }
conf->nr_waiting--;
if (!conf->nr_waiting)
wake_up(&conf->wait_barrier);
}
- atomic_inc(&conf->nr_pending);
+ /* Only increment nr_pending when we wait */
+ if (ret)
+ atomic_inc(&conf->nr_pending);
spin_unlock_irq(&conf->resync_lock);
+ return ret;
}
static void allow_barrier(struct r10conf *conf)
@@ -1098,21 +1108,30 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
* currently.
* 2. If IO spans the reshape position. Need to wait for reshape to pass.
*/
-static void regular_request_wait(struct mddev *mddev, struct r10conf *conf,
+static bool regular_request_wait(struct mddev *mddev, struct r10conf *conf,
struct bio *bio, sector_t sectors)
{
- wait_barrier(conf);
+ /* Bail out if REQ_NOWAIT is set for the bio */
+ if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
+ bio_wouldblock_error(bio);
+ return false;
+ }
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
bio->bi_iter.bi_sector < conf->reshape_progress &&
bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
- raid10_log(conf->mddev, "wait reshape");
allow_barrier(conf);
+ if (bio->bi_opf & REQ_NOWAIT) {
+ bio_wouldblock_error(bio);
+ return false;
+ }
+ raid10_log(conf->mddev, "wait reshape");
wait_event(conf->wait_barrier,
conf->reshape_progress <= bio->bi_iter.bi_sector ||
conf->reshape_progress >= bio->bi_iter.bi_sector +
sectors);
- wait_barrier(conf);
+ wait_barrier(conf, false);
}
+ return true;
}
static void raid10_read_request(struct mddev *mddev, struct bio *bio,
@@ -1157,7 +1176,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
rcu_read_unlock();
}
- regular_request_wait(mddev, conf, bio, r10_bio->sectors);
+ if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors))
+ return;
rdev = read_balance(conf, r10_bio, &max_sectors);
if (!rdev) {
if (err_rdev) {
@@ -1179,7 +1199,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
bio_chain(split, bio);
allow_barrier(conf);
submit_bio_noacct(bio);
- wait_barrier(conf);
+ wait_barrier(conf, false);
bio = split;
r10_bio->master_bio = bio;
r10_bio->sectors = max_sectors;
@@ -1338,7 +1358,7 @@ retry_wait:
raid10_log(conf->mddev, "%s wait rdev %d blocked",
__func__, blocked_rdev->raid_disk);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
- wait_barrier(conf);
+ wait_barrier(conf, false);
goto retry_wait;
}
}
@@ -1356,6 +1376,11 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
bio->bi_iter.bi_sector,
bio_end_sector(bio)))) {
DEFINE_WAIT(w);
+ /* Bail out if REQ_NOWAIT is set for the bio */
+ if (bio->bi_opf & REQ_NOWAIT) {
+ bio_wouldblock_error(bio);
+ return;
+ }
for (;;) {
prepare_to_wait(&conf->wait_barrier,
&w, TASK_IDLE);
@@ -1368,7 +1393,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
}
sectors = r10_bio->sectors;
- regular_request_wait(mddev, conf, bio, sectors);
+ if (!regular_request_wait(mddev, conf, bio, sectors))
+ return;
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
(mddev->reshape_backwards
? (bio->bi_iter.bi_sector < conf->reshape_safe &&
@@ -1380,6 +1406,11 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
set_mask_bits(&mddev->sb_flags, 0,
BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
md_wakeup_thread(mddev->thread);
+ if (bio->bi_opf & REQ_NOWAIT) {
+ allow_barrier(conf);
+ bio_wouldblock_error(bio);
+ return;
+ }
raid10_log(conf->mddev, "wait reshape metadata");
wait_event(mddev->sb_wait,
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
@@ -1387,12 +1418,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
conf->reshape_safe = mddev->reshape_position;
}
- if (conf->pending_count >= max_queued_requests) {
- md_wakeup_thread(mddev->thread);
- raid10_log(mddev, "wait queued");
- wait_event(conf->wait_barrier,
- conf->pending_count < max_queued_requests);
- }
/* first select target devices under rcu_lock and
* inc refcount on their rdev. Record them by setting
* bios[x] to bio
@@ -1482,7 +1507,7 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
bio_chain(split, bio);
allow_barrier(conf);
submit_bio_noacct(bio);
- wait_barrier(conf);
+ wait_barrier(conf, false);
bio = split;
r10_bio->master_bio = bio;
}
@@ -1607,7 +1632,11 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
return -EAGAIN;
- wait_barrier(conf);
+ if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) {
+ bio_wouldblock_error(bio);
+ return 0;
+ }
+ wait_barrier(conf, false);
/*
* Check reshape again to avoid reshape happens after checking
@@ -1649,7 +1678,7 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
allow_barrier(conf);
/* Resend the fist split part */
submit_bio_noacct(split);
- wait_barrier(conf);
+ wait_barrier(conf, false);
}
div_u64_rem(bio_end, stripe_size, &remainder);
if (remainder) {
@@ -1660,7 +1689,7 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
/* Resend the second split part */
submit_bio_noacct(bio);
bio = split;
- wait_barrier(conf);
+ wait_barrier(conf, false);
}
bio_start = bio->bi_iter.bi_sector;
@@ -1816,7 +1845,7 @@ retry_discard:
end_disk_offset += geo->stride;
atomic_inc(&first_r10bio->remaining);
raid_end_discard_bio(r10_bio);
- wait_barrier(conf);
+ wait_barrier(conf, false);
goto retry_discard;
}
@@ -2011,7 +2040,7 @@ static void print_conf(struct r10conf *conf)
static void close_sync(struct r10conf *conf)
{
- wait_barrier(conf);
+ wait_barrier(conf, false);
allow_barrier(conf);
mempool_exit(&conf->r10buf_pool);
@@ -4819,7 +4848,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
if (need_flush ||
time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
/* Need to update reshape_position in metadata */
- wait_barrier(conf);
+ wait_barrier(conf, false);
mddev->reshape_position = conf->reshape_progress;
if (mddev->reshape_backwards)
mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
@@ -5242,5 +5271,3 @@ MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
MODULE_ALIAS("md-personality-9"); /* RAID10 */
MODULE_ALIAS("md-raid10");
MODULE_ALIAS("md-level-10");
-
-module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 9c1a5877cf9f..ffe720c73b0a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2215,10 +2215,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
struct r5conf *conf = sh->raid_conf;
int level = conf->level;
struct raid5_percpu *percpu;
- unsigned long cpu;
- cpu = get_cpu();
- percpu = per_cpu_ptr(conf->percpu, cpu);
+ local_lock(&conf->percpu->lock);
+ percpu = this_cpu_ptr(conf->percpu);
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
ops_run_biofill(sh);
overlap_clear++;
@@ -2271,13 +2270,14 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
BUG();
}
- if (overlap_clear && !sh->batch_head)
+ if (overlap_clear && !sh->batch_head) {
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (test_and_clear_bit(R5_Overlap, &dev->flags))
wake_up(&sh->raid_conf->wait_for_overlap);
}
- put_cpu();
+ }
+ local_unlock(&conf->percpu->lock);
}
static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh)
@@ -5686,6 +5686,10 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
struct stripe_head *sh;
int stripe_sectors;
+ /* We need to handle this when io_uring supports discard/trim */
+ if (WARN_ON_ONCE(bi->bi_opf & REQ_NOWAIT))
+ return;
+
if (mddev->reshape_position != MaxSector)
/* Skip discard while reshape is happening */
return;
@@ -5819,6 +5823,17 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
last_sector = bio_end_sector(bi);
bi->bi_next = NULL;
+ /* Bail out if conflicts with reshape and REQ_NOWAIT is set */
+ if ((bi->bi_opf & REQ_NOWAIT) &&
+ (conf->reshape_progress != MaxSector) &&
+ (mddev->reshape_backwards
+ ? (logical_sector > conf->reshape_progress && logical_sector <= conf->reshape_safe)
+ : (logical_sector >= conf->reshape_safe && logical_sector < conf->reshape_progress))) {
+ bio_wouldblock_error(bi);
+ if (rw == WRITE)
+ md_write_end(mddev);
+ return true;
+ }
md_account_bio(mddev, &bi);
prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
for (; logical_sector < last_sector; logical_sector += RAID5_STRIPE_SECTORS(conf)) {
@@ -7052,6 +7067,7 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu
return -ENOMEM;
}
+ local_lock_init(&percpu->lock);
return 0;
}
@@ -7446,12 +7462,19 @@ static int raid5_run(struct mddev *mddev)
struct md_rdev *rdev;
struct md_rdev *journal_dev = NULL;
sector_t reshape_offset = 0;
- int i;
+ int i, ret = 0;
long long min_offset_diff = 0;
int first = 1;
- if (mddev_init_writes_pending(mddev) < 0)
+ if (acct_bioset_init(mddev)) {
+ pr_err("md/raid456:%s: alloc acct bioset failed.\n", mdname(mddev));
return -ENOMEM;
+ }
+
+ if (mddev_init_writes_pending(mddev) < 0) {
+ ret = -ENOMEM;
+ goto exit_acct_set;
+ }
if (mddev->recovery_cp != MaxSector)
pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
@@ -7482,7 +7505,8 @@ static int raid5_run(struct mddev *mddev)
(mddev->bitmap_info.offset || mddev->bitmap_info.file)) {
pr_notice("md/raid:%s: array cannot have both journal and bitmap\n",
mdname(mddev));
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit_acct_set;
}
if (mddev->reshape_position != MaxSector) {
@@ -7507,13 +7531,15 @@ static int raid5_run(struct mddev *mddev)
if (journal_dev) {
pr_warn("md/raid:%s: don't support reshape with journal - aborting.\n",
mdname(mddev));
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit_acct_set;
}
if (mddev->new_level != mddev->level) {
pr_warn("md/raid:%s: unsupported reshape required - aborting.\n",
mdname(mddev));
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit_acct_set;
}
old_disks = mddev->raid_disks - mddev->delta_disks;
/* reshape_position must be on a new-stripe boundary, and one
@@ -7529,7 +7555,8 @@ static int raid5_run(struct mddev *mddev)
if (sector_div(here_new, chunk_sectors * new_data_disks)) {
pr_warn("md/raid:%s: reshape_position not on a stripe boundary\n",
mdname(mddev));
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit_acct_set;
}
reshape_offset = here_new * chunk_sectors;
/* here_new is the stripe we will write to */
@@ -7551,7 +7578,8 @@ static int raid5_run(struct mddev *mddev)
else if (mddev->ro == 0) {
pr_warn("md/raid:%s: in-place reshape must be started in read-only mode - aborting\n",
mdname(mddev));
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit_acct_set;
}
} else if (mddev->reshape_backwards
? (here_new * chunk_sectors + min_offset_diff <=
@@ -7561,7 +7589,8 @@ static int raid5_run(struct mddev *mddev)
/* Reading from the same stripe as writing to - bad */
pr_warn("md/raid:%s: reshape_position too early for auto-recovery - aborting.\n",
mdname(mddev));
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit_acct_set;
}
pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev));
/* OK, we should be able to continue; */
@@ -7585,8 +7614,10 @@ static int raid5_run(struct mddev *mddev)
else
conf = mddev->private;
- if (IS_ERR(conf))
- return PTR_ERR(conf);
+ if (IS_ERR(conf)) {
+ ret = PTR_ERR(conf);
+ goto exit_acct_set;
+ }
if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
if (!journal_dev) {
@@ -7783,7 +7814,10 @@ abort:
free_conf(conf);
mddev->private = NULL;
pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev));
- return -EIO;
+ ret = -EIO;
+exit_acct_set:
+ acct_bioset_exit(mddev);
+ return ret;
}
static void raid5_free(struct mddev *mddev, void *priv)
@@ -7791,6 +7825,7 @@ static void raid5_free(struct mddev *mddev, void *priv)
struct r5conf *conf = priv;
free_conf(conf);
+ acct_bioset_exit(mddev);
mddev->to_remove = &raid5_attrs_group;
}
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 5c05acf20e1f..9e8486a9e445 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -4,6 +4,7 @@
#include <linux/raid/xor.h>
#include <linux/dmaengine.h>
+#include <linux/local_lock.h>
/*
*
@@ -640,7 +641,8 @@ struct r5conf {
* lists and performing address
* conversions
*/
- int scribble_obj_size;
+ int scribble_obj_size;
+ local_lock_t lock;
} __percpu *percpu;
int scribble_disks;
int scribble_sectors;
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index b07812657cee..f3f24c63536b 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -141,10 +141,10 @@ config MEDIA_TEST_SUPPORT
prompt "Test drivers" if MEDIA_SUPPORT_FILTER
default y if !MEDIA_SUPPORT_FILTER
help
- Those drivers should not be used on production Kernels, but
- can be useful on debug ones. It enables several dummy drivers
- that simulate a real hardware. Very useful to test userspace
- applications and to validate if the subsystem core is doesn't
+ These drivers should not be used on production kernels, but
+ can be useful on debug ones. This option enables several dummy drivers
+ that simulate real hardware. Very useful to test userspace
+ applications and to validate if the subsystem core doesn't
have regressions.
Say Y if you want to use some virtual test driver.
diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
index cd9cb354dc2c..2e12331c12a9 100644
--- a/drivers/media/cec/core/cec-adap.c
+++ b/drivers/media/cec/core/cec-adap.c
@@ -161,10 +161,10 @@ static void cec_queue_event(struct cec_adapter *adap,
u64 ts = ktime_get_ns();
struct cec_fh *fh;
- mutex_lock(&adap->devnode.lock);
+ mutex_lock(&adap->devnode.lock_fhs);
list_for_each_entry(fh, &adap->devnode.fhs, list)
cec_queue_event_fh(fh, ev, ts);
- mutex_unlock(&adap->devnode.lock);
+ mutex_unlock(&adap->devnode.lock_fhs);
}
/* Notify userspace that the CEC pin changed state at the given time. */
@@ -178,11 +178,12 @@ void cec_queue_pin_cec_event(struct cec_adapter *adap, bool is_high,
};
struct cec_fh *fh;
- mutex_lock(&adap->devnode.lock);
- list_for_each_entry(fh, &adap->devnode.fhs, list)
+ mutex_lock(&adap->devnode.lock_fhs);
+ list_for_each_entry(fh, &adap->devnode.fhs, list) {
if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
cec_queue_event_fh(fh, &ev, ktime_to_ns(ts));
- mutex_unlock(&adap->devnode.lock);
+ }
+ mutex_unlock(&adap->devnode.lock_fhs);
}
EXPORT_SYMBOL_GPL(cec_queue_pin_cec_event);
@@ -195,10 +196,10 @@ void cec_queue_pin_hpd_event(struct cec_adapter *adap, bool is_high, ktime_t ts)
};
struct cec_fh *fh;
- mutex_lock(&adap->devnode.lock);
+ mutex_lock(&adap->devnode.lock_fhs);
list_for_each_entry(fh, &adap->devnode.fhs, list)
cec_queue_event_fh(fh, &ev, ktime_to_ns(ts));
- mutex_unlock(&adap->devnode.lock);
+ mutex_unlock(&adap->devnode.lock_fhs);
}
EXPORT_SYMBOL_GPL(cec_queue_pin_hpd_event);
@@ -211,10 +212,10 @@ void cec_queue_pin_5v_event(struct cec_adapter *adap, bool is_high, ktime_t ts)
};
struct cec_fh *fh;
- mutex_lock(&adap->devnode.lock);
+ mutex_lock(&adap->devnode.lock_fhs);
list_for_each_entry(fh, &adap->devnode.fhs, list)
cec_queue_event_fh(fh, &ev, ktime_to_ns(ts));
- mutex_unlock(&adap->devnode.lock);
+ mutex_unlock(&adap->devnode.lock_fhs);
}
EXPORT_SYMBOL_GPL(cec_queue_pin_5v_event);
@@ -286,12 +287,12 @@ static void cec_queue_msg_monitor(struct cec_adapter *adap,
u32 monitor_mode = valid_la ? CEC_MODE_MONITOR :
CEC_MODE_MONITOR_ALL;
- mutex_lock(&adap->devnode.lock);
+ mutex_lock(&adap->devnode.lock_fhs);
list_for_each_entry(fh, &adap->devnode.fhs, list) {
if (fh->mode_follower >= monitor_mode)
cec_queue_msg_fh(fh, msg);
}
- mutex_unlock(&adap->devnode.lock);
+ mutex_unlock(&adap->devnode.lock_fhs);
}
/*
@@ -302,12 +303,12 @@ static void cec_queue_msg_followers(struct cec_adapter *adap,
{
struct cec_fh *fh;
- mutex_lock(&adap->devnode.lock);
+ mutex_lock(&adap->devnode.lock_fhs);
list_for_each_entry(fh, &adap->devnode.fhs, list) {
if (fh->mode_follower == CEC_MODE_FOLLOWER)
cec_queue_msg_fh(fh, msg);
}
- mutex_unlock(&adap->devnode.lock);
+ mutex_unlock(&adap->devnode.lock_fhs);
}
/* Notify userspace of an adapter state change. */
@@ -342,7 +343,7 @@ static void cec_data_completed(struct cec_data *data)
* Without that we would be referring to a closed filehandle.
*/
if (data->fh)
- list_del(&data->xfer_list);
+ list_del_init(&data->xfer_list);
if (data->blocking) {
/*
@@ -898,6 +899,8 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
if (fh)
list_add_tail(&data->xfer_list, &fh->xfer_list);
+ else
+ INIT_LIST_HEAD(&data->xfer_list);
list_add_tail(&data->list, &adap->transmit_queue);
adap->transmit_queue_sz++;
@@ -923,6 +926,10 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
/* The transmit completed (possibly with an error) */
*msg = data->msg;
+ if (WARN_ON(!list_empty(&data->list)))
+ list_del(&data->list);
+ if (WARN_ON(!list_empty(&data->xfer_list)))
+ list_del(&data->xfer_list);
kfree(data);
return 0;
}
@@ -1573,6 +1580,7 @@ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
/* Disabling monitor all mode should always succeed */
if (adap->monitor_all_cnt)
WARN_ON(call_op(adap, adap_monitor_all_enable, false));
+ /* serialize adap_enable */
mutex_lock(&adap->devnode.lock);
if (adap->needs_hpd || list_empty(&adap->devnode.fhs)) {
WARN_ON(adap->ops->adap_enable(adap, false));
@@ -1584,14 +1592,16 @@ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
return;
}
+ /* serialize adap_enable */
mutex_lock(&adap->devnode.lock);
adap->last_initiator = 0xff;
adap->transmit_in_progress = false;
- if ((adap->needs_hpd || list_empty(&adap->devnode.fhs)) &&
- adap->ops->adap_enable(adap, true)) {
- mutex_unlock(&adap->devnode.lock);
- return;
+ if (adap->needs_hpd || list_empty(&adap->devnode.fhs)) {
+ if (adap->ops->adap_enable(adap, true)) {
+ mutex_unlock(&adap->devnode.lock);
+ return;
+ }
}
if (adap->monitor_all_cnt &&
diff --git a/drivers/media/cec/core/cec-api.c b/drivers/media/cec/core/cec-api.c
index 769e6b4cddce..d72ad48c9898 100644
--- a/drivers/media/cec/core/cec-api.c
+++ b/drivers/media/cec/core/cec-api.c
@@ -586,6 +586,7 @@ static int cec_open(struct inode *inode, struct file *filp)
return err;
}
+ /* serialize adap_enable */
mutex_lock(&devnode->lock);
if (list_empty(&devnode->fhs) &&
!adap->needs_hpd &&
@@ -624,7 +625,9 @@ static int cec_open(struct inode *inode, struct file *filp)
}
#endif
+ mutex_lock(&devnode->lock_fhs);
list_add(&fh->list, &devnode->fhs);
+ mutex_unlock(&devnode->lock_fhs);
mutex_unlock(&devnode->lock);
return 0;
@@ -653,8 +656,11 @@ static int cec_release(struct inode *inode, struct file *filp)
cec_monitor_all_cnt_dec(adap);
mutex_unlock(&adap->lock);
+ /* serialize adap_enable */
mutex_lock(&devnode->lock);
+ mutex_lock(&devnode->lock_fhs);
list_del(&fh->list);
+ mutex_unlock(&devnode->lock_fhs);
if (cec_is_registered(adap) && list_empty(&devnode->fhs) &&
!adap->needs_hpd && adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
WARN_ON(adap->ops->adap_enable(adap, false));
@@ -669,7 +675,7 @@ static int cec_release(struct inode *inode, struct file *filp)
data->blocking = false;
data->fh = NULL;
- list_del(&data->xfer_list);
+ list_del_init(&data->xfer_list);
}
mutex_unlock(&adap->lock);
while (!list_empty(&fh->msgs)) {
diff --git a/drivers/media/cec/core/cec-core.c b/drivers/media/cec/core/cec-core.c
index 551689d371a7..a3ab6a43fb14 100644
--- a/drivers/media/cec/core/cec-core.c
+++ b/drivers/media/cec/core/cec-core.c
@@ -106,7 +106,7 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
/* Part 1: Find a free minor number */
mutex_lock(&cec_devnode_lock);
- minor = find_next_zero_bit(cec_devnode_nums, CEC_NUM_DEVICES, 0);
+ minor = find_first_zero_bit(cec_devnode_nums, CEC_NUM_DEVICES);
if (minor == CEC_NUM_DEVICES) {
mutex_unlock(&cec_devnode_lock);
pr_err("could not get a free minor\n");
@@ -169,8 +169,10 @@ static void cec_devnode_unregister(struct cec_adapter *adap)
devnode->registered = false;
devnode->unregistered = true;
+ mutex_lock(&devnode->lock_fhs);
list_for_each_entry(fh, &devnode->fhs, list)
wake_up_interruptible(&fh->wait);
+ mutex_unlock(&devnode->lock_fhs);
mutex_unlock(&devnode->lock);
@@ -272,6 +274,7 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
/* adap->devnode initialization */
INIT_LIST_HEAD(&adap->devnode.fhs);
+ mutex_init(&adap->devnode.lock_fhs);
mutex_init(&adap->devnode.lock);
adap->kthread = kthread_run(cec_thread_func, adap, "cec-%s", name);
diff --git a/drivers/media/cec/core/cec-pin-priv.h b/drivers/media/cec/core/cec-pin-priv.h
index fb101f15865c..7bad5a0b7cb7 100644
--- a/drivers/media/cec/core/cec-pin-priv.h
+++ b/drivers/media/cec/core/cec-pin-priv.h
@@ -170,7 +170,6 @@ struct cec_pin {
ktime_t ts;
unsigned int wait_usecs;
u16 la_mask;
- bool enabled;
bool monitor_all;
bool rx_eom;
bool enable_irq_failed;
diff --git a/drivers/media/cec/core/cec-pin.c b/drivers/media/cec/core/cec-pin.c
index a60b6f03a6a1..21f0f749713e 100644
--- a/drivers/media/cec/core/cec-pin.c
+++ b/drivers/media/cec/core/cec-pin.c
@@ -1033,6 +1033,7 @@ static int cec_pin_thread_func(void *_adap)
{
struct cec_adapter *adap = _adap;
struct cec_pin *pin = adap->pin;
+ bool irq_enabled = false;
for (;;) {
wait_event_interruptible(pin->kthread_waitq,
@@ -1060,6 +1061,7 @@ static int cec_pin_thread_func(void *_adap)
ns_to_ktime(pin->work_rx_msg.rx_ts));
msg->len = 0;
}
+
if (pin->work_tx_status) {
unsigned int tx_status = pin->work_tx_status;
@@ -1083,27 +1085,39 @@ static int cec_pin_thread_func(void *_adap)
switch (atomic_xchg(&pin->work_irq_change,
CEC_PIN_IRQ_UNCHANGED)) {
case CEC_PIN_IRQ_DISABLE:
- pin->ops->disable_irq(adap);
+ if (irq_enabled) {
+ pin->ops->disable_irq(adap);
+ irq_enabled = false;
+ }
cec_pin_high(pin);
cec_pin_to_idle(pin);
hrtimer_start(&pin->timer, ns_to_ktime(0),
HRTIMER_MODE_REL);
break;
case CEC_PIN_IRQ_ENABLE:
+ if (irq_enabled)
+ break;
pin->enable_irq_failed = !pin->ops->enable_irq(adap);
if (pin->enable_irq_failed) {
cec_pin_to_idle(pin);
hrtimer_start(&pin->timer, ns_to_ktime(0),
HRTIMER_MODE_REL);
+ } else {
+ irq_enabled = true;
}
break;
default:
break;
}
-
if (kthread_should_stop())
break;
}
+ if (pin->ops->disable_irq && irq_enabled)
+ pin->ops->disable_irq(adap);
+ hrtimer_cancel(&pin->timer);
+ cec_pin_read(pin);
+ cec_pin_to_idle(pin);
+ pin->state = CEC_ST_OFF;
return 0;
}
@@ -1111,7 +1125,6 @@ static int cec_pin_adap_enable(struct cec_adapter *adap, bool enable)
{
struct cec_pin *pin = adap->pin;
- pin->enabled = enable;
if (enable) {
atomic_set(&pin->work_pin_num_events, 0);
pin->work_pin_events_rd = pin->work_pin_events_wr = 0;
@@ -1130,13 +1143,7 @@ static int cec_pin_adap_enable(struct cec_adapter *adap, bool enable)
hrtimer_start(&pin->timer, ns_to_ktime(0),
HRTIMER_MODE_REL);
} else {
- if (pin->ops->disable_irq)
- pin->ops->disable_irq(adap);
- hrtimer_cancel(&pin->timer);
kthread_stop(pin->kthread);
- cec_pin_read(pin);
- cec_pin_to_idle(pin);
- pin->state = CEC_ST_OFF;
}
return 0;
}
@@ -1157,11 +1164,8 @@ void cec_pin_start_timer(struct cec_pin *pin)
if (pin->state != CEC_ST_RX_IRQ)
return;
- atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_UNCHANGED);
- pin->ops->disable_irq(pin->adap);
- cec_pin_high(pin);
- cec_pin_to_idle(pin);
- hrtimer_start(&pin->timer, ns_to_ktime(0), HRTIMER_MODE_REL);
+ atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_DISABLE);
+ wake_up_interruptible(&pin->kthread_waitq);
}
static int cec_pin_adap_transmit(struct cec_adapter *adap, u8 attempts,
diff --git a/drivers/media/common/b2c2/flexcop.c b/drivers/media/common/b2c2/flexcop.c
index cbaa61f10d5f..e7a88a2d248c 100644
--- a/drivers/media/common/b2c2/flexcop.c
+++ b/drivers/media/common/b2c2/flexcop.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: LGPL-2.1-or-later
/*
* Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
* flexcop.c - main module part
@@ -15,16 +16,6 @@
* Uwe Bugla, uwe.bugla at gmx.de (doing tests, restyling code, writing docu)
* Niklas Peinecke, peinecke at gdv.uni-hannover.de (hardware pid/mac
* filtering)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public License
- * as published by the Free Software Foundation; either version 2.1
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "flexcop.h"
diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c
index baf5772c52a9..e9a15de6126e 100644
--- a/drivers/media/common/saa7146/saa7146_fops.c
+++ b/drivers/media/common/saa7146/saa7146_fops.c
@@ -487,6 +487,7 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
if (hdl->error) {
err = hdl->error;
v4l2_ctrl_handler_free(hdl);
+ v4l2_device_unregister(&dev->v4l2_dev);
return err;
}
dev->v4l2_dev.ctrl_handler = hdl;
@@ -495,6 +496,7 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
if (vv == NULL) {
ERR("out of memory. aborting.\n");
v4l2_ctrl_handler_free(hdl);
+ v4l2_device_unregister(&dev->v4l2_dev);
return -ENOMEM;
}
ext_vv->vid_ops = saa7146_video_ioctl_ops;
@@ -521,7 +523,8 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
ERR("out of memory. aborting.\n");
kfree(vv);
v4l2_ctrl_handler_free(hdl);
- return -1;
+ v4l2_device_unregister(&dev->v4l2_dev);
+ return -ENOMEM;
}
saa7146_video_uops.init(dev,vv);
diff --git a/drivers/media/common/videobuf2/frame_vector.c b/drivers/media/common/videobuf2/frame_vector.c
index ce879f6f8f82..542dde9d2609 100644
--- a/drivers/media/common/videobuf2/frame_vector.c
+++ b/drivers/media/common/videobuf2/frame_vector.c
@@ -37,6 +37,7 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
+ int ret_pin_user_pages_fast = 0;
int ret = 0;
int err;
@@ -56,6 +57,7 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
vec->is_pfns = false;
goto out_unlocked;
}
+ ret_pin_user_pages_fast = ret;
mmap_read_lock(mm);
vec->got_ref = false;
@@ -71,7 +73,18 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) {
err = follow_pfn(vma, start, &nums[ret]);
if (err) {
- if (ret == 0)
+ if (ret)
+ goto out;
+ // If follow_pfn() returns -EINVAL, then this
+ // is not an IO mapping or a raw PFN mapping.
+ // In that case, return the original error from
+ // pin_user_pages_fast(). Otherwise this
+ // function would return -EINVAL when
+ // pin_user_pages_fast() returned -ENOMEM,
+ // which makes debugging hard.
+ if (err == -EINVAL && ret_pin_user_pages_fast)
+ ret = ret_pin_user_pages_fast;
+ else
ret = err;
goto out;
}
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index 556e42ba66e5..7c4096e62173 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -257,7 +257,7 @@ static void *vb2_dc_alloc(struct vb2_buffer *vb,
ret = vb2_dc_alloc_coherent(buf);
if (ret) {
- dev_err(dev, "dma alloc of size %ld failed\n", size);
+ dev_err(dev, "dma alloc of size %lu failed\n", size);
kfree(buf);
return ERR_PTR(-ENOMEM);
}
@@ -298,9 +298,9 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
vma->vm_ops->open(vma);
- pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
- __func__, (unsigned long)buf->dma_addr, vma->vm_start,
- buf->size);
+ pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %lu\n",
+ __func__, (unsigned long)buf->dma_addr, vma->vm_start,
+ buf->size);
return 0;
}
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 5d5a48475a54..f6ee678107d3 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -1,19 +1,9 @@
+// SPDX-License-Identifier: LGPL-2.1-or-later
/*
* dmxdev.c - DVB demultiplexer device
*
* Copyright (C) 2000 Ralph Metzler & Marcus Metzler
* for convergence integrated media GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public License
- * as published by the Free Software Foundation; either version 2.1
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#define pr_fmt(fmt) "dmxdev: " fmt
@@ -1413,7 +1403,7 @@ static const struct dvb_device dvbdev_dvr = {
};
int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter)
{
- int i;
+ int i, ret;
if (dmxdev->demux->open(dmxdev->demux) < 0)
return -EUSERS;
@@ -1432,14 +1422,26 @@ int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter)
DMXDEV_STATE_FREE);
}
- dvb_register_device(dvb_adapter, &dmxdev->dvbdev, &dvbdev_demux, dmxdev,
+ ret = dvb_register_device(dvb_adapter, &dmxdev->dvbdev, &dvbdev_demux, dmxdev,
DVB_DEVICE_DEMUX, dmxdev->filternum);
- dvb_register_device(dvb_adapter, &dmxdev->dvr_dvbdev, &dvbdev_dvr,
+ if (ret < 0)
+ goto err_register_dvbdev;
+
+ ret = dvb_register_device(dvb_adapter, &dmxdev->dvr_dvbdev, &dvbdev_dvr,
dmxdev, DVB_DEVICE_DVR, dmxdev->filternum);
+ if (ret < 0)
+ goto err_register_dvr_dvbdev;
dvb_ringbuffer_init(&dmxdev->dvr_buffer, NULL, 8192);
return 0;
+
+err_register_dvr_dvbdev:
+ dvb_unregister_device(dmxdev->dvbdev);
+err_register_dvbdev:
+ vfree(dmxdev->filter);
+ dmxdev->filter = NULL;
+ return ret;
}
EXPORT_SYMBOL(dvb_dmxdev_init);
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index 5fde1d38b3e3..83cc32ad7e12 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -1,20 +1,10 @@
+// SPDX-License-Identifier: LGPL-2.1-or-later
/*
* dvb_demux.c - DVB kernel demux API
*
* Copyright (C) 2000-2001 Ralph Metzler <ralph@convergence.de>
* & Marcus Metzler <marcus@convergence.de>
* for convergence integrated media GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public License
- * as published by the Free Software Foundation; either version 2.1
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#define pr_fmt(fmt) "dvb_demux: " fmt
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 258637d762d6..48e735cdbe6b 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -2554,8 +2554,7 @@ static int dvb_frontend_handle_ioctl(struct file *file,
case FE_DISEQC_SEND_BURST:
if (fe->ops.diseqc_send_burst) {
- err = fe->ops.diseqc_send_burst(fe,
- (enum fe_sec_mini_cmd)parg);
+ err = fe->ops.diseqc_send_burst(fe, (long)parg);
fepriv->state = FESTATE_DISEQC;
fepriv->status = 0;
}
@@ -2563,9 +2562,8 @@ static int dvb_frontend_handle_ioctl(struct file *file,
case FE_SET_TONE:
if (fe->ops.set_tone) {
- err = fe->ops.set_tone(fe,
- (enum fe_sec_tone_mode)parg);
- fepriv->tone = (enum fe_sec_tone_mode)parg;
+ fepriv->tone = (long)parg;
+ err = fe->ops.set_tone(fe, fepriv->tone);
fepriv->state = FESTATE_DISEQC;
fepriv->status = 0;
}
@@ -2573,9 +2571,8 @@ static int dvb_frontend_handle_ioctl(struct file *file,
case FE_SET_VOLTAGE:
if (fe->ops.set_voltage) {
- err = fe->ops.set_voltage(fe,
- (enum fe_sec_voltage)parg);
- fepriv->voltage = (enum fe_sec_voltage)parg;
+ fepriv->voltage = (long)parg;
+ err = fe->ops.set_voltage(fe, fepriv->voltage);
fepriv->state = FESTATE_DISEQC;
fepriv->status = 0;
}
@@ -2935,7 +2932,9 @@ int dvb_frontend_suspend(struct dvb_frontend *fe)
else if (fe->ops.tuner_ops.sleep)
ret = fe->ops.tuner_ops.sleep(fe);
- if (fe->ops.sleep)
+ if (fe->ops.suspend)
+ ret = fe->ops.suspend(fe);
+ else if (fe->ops.sleep)
ret = fe->ops.sleep(fe);
return ret;
@@ -2951,7 +2950,9 @@ int dvb_frontend_resume(struct dvb_frontend *fe)
fe->id);
fe->exit = DVB_FE_DEVICE_RESUME;
- if (fe->ops.init)
+ if (fe->ops.resume)
+ ret = fe->ops.resume(fe);
+ else if (fe->ops.init)
ret = fe->ops.init(fe);
if (fe->ops.tuner_ops.resume)
diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c
index 959d110407a4..a1bd6d9c9223 100644
--- a/drivers/media/dvb-core/dvb_vb2.c
+++ b/drivers/media/dvb-core/dvb_vb2.c
@@ -5,10 +5,6 @@
* Copyright (C) 2015 Samsung Electronics
*
* Author: jh1009.sung@samsung.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation.
*/
#include <linux/err.h>
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 795d9bfaba5c..675d877a67b2 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -1,20 +1,10 @@
+// SPDX-License-Identifier: LGPL-2.1-or-later
/*
* dvbdev.c
*
* Copyright (C) 2000 Ralph Metzler <ralph@convergence.de>
* & Marcus Metzler <marcus@convergence.de>
* for convergence integrated media GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public License
- * as published by the Free Software Foundation; either version 2.1
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#define pr_fmt(fmt) "dvbdev: " fmt
diff --git a/drivers/media/dvb-frontends/cx24113.c b/drivers/media/dvb-frontends/cx24113.c
index 60a9f70275f7..dd55d314bf9a 100644
--- a/drivers/media/dvb-frontends/cx24113.c
+++ b/drivers/media/dvb-frontends/cx24113.c
@@ -378,7 +378,7 @@ static void cx24113_set_nfr(struct cx24113_state *state, u16 n, s32 f, u8 r)
static int cx24113_set_frequency(struct cx24113_state *state, u32 frequency)
{
- u8 r = 1; /* or 2 */
+ u8 r;
u16 n = 6;
s32 f = 0;
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index bb02354a48b8..d67f2dd997d0 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -4473,8 +4473,10 @@ static struct dvb_frontend *dib8000_init(struct i2c_adapter *i2c_adap, u8 i2c_ad
state->timf_default = cfg->pll->timf;
- if (dib8000_identify(&state->i2c) == 0)
+ if (dib8000_identify(&state->i2c) == 0) {
+ kfree(fe);
goto error;
+ }
dibx000_init_i2c_master(&state->i2c_master, DIB8000, state->i2c.adap, state->i2c.addr);
diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c
index 04d92d614279..914ca820c174 100644
--- a/drivers/media/dvb-frontends/dib9000.c
+++ b/drivers/media/dvb-frontends/dib9000.c
@@ -258,7 +258,7 @@ static int dib9000_read16_attr(struct dib9000_state *state, u16 reg, u8 *b, u32
state->i2c_write_buffer[0] |= (1 << 4);
do {
- l = len < chunk_size ? len : chunk_size;
+ l = min(len, chunk_size);
state->msg[1].len = l;
state->msg[1].buf = b;
ret = i2c_transfer(state->i2c.i2c_adap, state->msg, 2) != 2 ? -EREMOTEIO : 0;
@@ -342,7 +342,7 @@ static int dib9000_write16_attr(struct dib9000_state *state, u16 reg, const u8 *
state->i2c_write_buffer[0] |= (1 << 4);
do {
- l = len < chunk_size ? len : chunk_size;
+ l = min(len, chunk_size);
state->msg[0].len = l + 2;
memcpy(&state->i2c_write_buffer[2], buf, l);
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index a7eb81df88c2..9860cae65f1c 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -914,44 +914,36 @@ static int DownloadMicrocode(struct drxd_state *state,
u32 Address;
u16 nBlocks;
u16 BlockSize;
- u32 offset = 0;
int i, status = 0;
pSrc = (u8 *) pMCImage;
/* We're not using Flags */
/* Flags = (pSrc[0] << 8) | pSrc[1]; */
pSrc += sizeof(u16);
- offset += sizeof(u16);
nBlocks = (pSrc[0] << 8) | pSrc[1];
pSrc += sizeof(u16);
- offset += sizeof(u16);
for (i = 0; i < nBlocks; i++) {
Address = (pSrc[0] << 24) | (pSrc[1] << 16) |
(pSrc[2] << 8) | pSrc[3];
pSrc += sizeof(u32);
- offset += sizeof(u32);
BlockSize = ((pSrc[0] << 8) | pSrc[1]) * sizeof(u16);
pSrc += sizeof(u16);
- offset += sizeof(u16);
/* We're not using Flags */
/* u16 Flags = (pSrc[0] << 8) | pSrc[1]; */
pSrc += sizeof(u16);
- offset += sizeof(u16);
/* We're not using BlockCRC */
/* u16 BlockCRC = (pSrc[0] << 8) | pSrc[1]; */
pSrc += sizeof(u16);
- offset += sizeof(u16);
status = WriteBlock(state, Address, BlockSize,
pSrc, DRX_I2C_CLEARCRC);
if (status < 0)
break;
pSrc += BlockSize;
- offset += BlockSize;
}
return status;
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index d7fc2595f15b..9430295a8175 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -3720,7 +3720,6 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
{
u16 cmd_result = 0;
u16 transmission_params = 0;
- u16 operation_mode = 0;
u32 iqm_rc_rate_ofs = 0;
u32 bandwidth = 0;
u16 param1;
@@ -3759,10 +3758,8 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
/* mode */
switch (state->props.transmission_mode) {
case TRANSMISSION_MODE_AUTO:
- default:
- operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_MODE__M;
- fallthrough; /* try first guess DRX_FFTMODE_8K */
case TRANSMISSION_MODE_8K:
+ default:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_MODE_8K;
break;
case TRANSMISSION_MODE_2K:
@@ -3773,9 +3770,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
/* guard */
switch (state->props.guard_interval) {
default:
- case GUARD_INTERVAL_AUTO:
- operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_GUARD__M;
- fallthrough; /* try first guess DRX_GUARD_1DIV4 */
+ case GUARD_INTERVAL_AUTO: /* try first guess DRX_GUARD_1DIV4 */
case GUARD_INTERVAL_1_4:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_4;
break;
@@ -3794,11 +3789,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
switch (state->props.hierarchy) {
case HIERARCHY_AUTO:
case HIERARCHY_NONE:
- default:
- operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_HIER__M;
- /* try first guess SC_RA_RAM_OP_PARAM_HIER_NO */
- /* transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_HIER_NO; */
- fallthrough;
+ default: /* try first guess SC_RA_RAM_OP_PARAM_HIER_NO */
case HIERARCHY_1:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_HIER_A1;
break;
@@ -3814,9 +3805,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
/* modulation */
switch (state->props.modulation) {
case QAM_AUTO:
- default:
- operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_CONST__M;
- fallthrough; /* try first guess DRX_CONSTELLATION_QAM64 */
+ default: /* try first guess DRX_CONSTELLATION_QAM64 */
case QAM_64:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM64;
break;
@@ -3857,9 +3846,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
/* coderate */
switch (state->props.code_rate_HP) {
case FEC_AUTO:
- default:
- operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_RATE__M;
- fallthrough; /* try first guess DRX_CODERATE_2DIV3 */
+ default: /* try first guess DRX_CODERATE_2DIV3 */
case FEC_2_3:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_RATE_2_3;
break;
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index 02e8aa11e36e..bce0f42f3d19 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -451,7 +451,7 @@ static int m88ds3103b_select_mclk(struct m88ds3103_dev *dev)
static int m88ds3103b_set_mclk(struct m88ds3103_dev *dev, u32 mclk_khz)
{
- u8 reg11 = 0x0A, reg15, reg16, reg1D, reg1E, reg1F, tmp;
+ u8 reg15, reg16, reg1D, reg1E, reg1F, tmp;
u8 sm, f0 = 0, f1 = 0, f2 = 0, f3 = 0;
u16 pll_div_fb, N;
u32 div;
@@ -480,8 +480,6 @@ static int m88ds3103b_set_mclk(struct m88ds3103_dev *dev, u32 mclk_khz)
div /= mclk_khz;
if (dev->cfg->ts_mode == M88DS3103_TS_SERIAL) {
- reg11 |= 0x02;
-
if (div <= 32) {
N = 2;
@@ -532,8 +530,6 @@ static int m88ds3103b_set_mclk(struct m88ds3103_dev *dev, u32 mclk_khz)
else if ((f3 < 8) && (f3 != 0))
f3 = 8;
} else {
- reg11 &= ~0x02;
-
if (div <= 32) {
N = 2;
diff --git a/drivers/media/dvb-frontends/s5h1411.c b/drivers/media/dvb-frontends/s5h1411.c
index c1334d7eb442..2563a72e98b7 100644
--- a/drivers/media/dvb-frontends/s5h1411.c
+++ b/drivers/media/dvb-frontends/s5h1411.c
@@ -150,7 +150,7 @@ static struct vsb_snr_tab {
{ 0x35b, 235, },
{ 0x353, 230, },
{ 0x349, 225, },
- { 0x340, 320, },
+ { 0x340, 220, },
{ 0x337, 215, },
{ 0x327, 210, },
{ 0x31b, 205, },
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 14b93a7d3358..196e028a6617 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -448,23 +448,10 @@ static int si2168_init(struct dvb_frontend *fe)
/* request the firmware, this will block and timeout */
ret = request_firmware(&fw, dev->firmware_name, &client->dev);
if (ret) {
- /* fallback mechanism to handle old name for Si2168 B40 fw */
- if (dev->chip_id == SI2168_CHIP_ID_B40) {
- dev->firmware_name = SI2168_B40_FIRMWARE_FALLBACK;
- ret = request_firmware(&fw, dev->firmware_name,
- &client->dev);
- }
-
- if (ret == 0) {
- dev_notice(&client->dev,
- "please install firmware file '%s'\n",
- SI2168_B40_FIRMWARE);
- } else {
- dev_err(&client->dev,
- "firmware file '%s' not found\n",
- dev->firmware_name);
- goto err_release_firmware;
- }
+ dev_err(&client->dev,
+ "firmware file '%s' not found\n",
+ dev->firmware_name);
+ goto err_release_firmware;
}
dev_info(&client->dev, "downloading firmware from file '%s'\n",
@@ -527,6 +514,7 @@ static int si2168_init(struct dvb_frontend *fe)
goto err;
dev->warm = true;
+ dev->initialized = true;
warm:
/* Init stats here to indicate which stats are supported */
c->cnr.len = 1;
@@ -548,6 +536,26 @@ err:
return ret;
}
+static int si2168_resume(struct dvb_frontend *fe)
+{
+ struct i2c_client *client = fe->demodulator_priv;
+ struct si2168_dev *dev = i2c_get_clientdata(client);
+
+ /*
+ * check whether si2168_init() has been called successfully
+ * outside of a resume cycle. Only call it (and load firmware)
+ * in this case. si2168_init() is only called during resume
+ * once the device has actually been used. Otherwise, leave the
+ * device untouched.
+ */
+ if (dev->initialized) {
+ dev_dbg(&client->dev, "previously initialized, call si2168_init()\n");
+ return si2168_init(fe);
+ }
+ dev_dbg(&client->dev, "not initialized yet, skipping init on resume\n");
+ return 0;
+}
+
static int si2168_sleep(struct dvb_frontend *fe)
{
struct i2c_client *client = fe->demodulator_priv;
@@ -657,6 +665,7 @@ static const struct dvb_frontend_ops si2168_ops = {
.init = si2168_init,
.sleep = si2168_sleep,
+ .resume = si2168_resume,
.set_frontend = si2168_set_frontend,
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index 18bea5222082..3a8976229a4a 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -18,7 +18,6 @@
#define SI2168_A30_FIRMWARE "dvb-demod-si2168-a30-01.fw"
#define SI2168_B40_FIRMWARE "dvb-demod-si2168-b40-01.fw"
#define SI2168_D60_FIRMWARE "dvb-demod-si2168-d60-01.fw"
-#define SI2168_B40_FIRMWARE_FALLBACK "dvb-demod-si2168-02.fw"
/* state struct */
struct si2168_dev {
@@ -37,6 +36,7 @@ struct si2168_dev {
u8 ts_mode;
unsigned int active:1;
unsigned int warm:1;
+ unsigned int initialized:1;
unsigned int ts_clock_inv:1;
unsigned int ts_clock_gapped:1;
unsigned int spectral_inversion:1;
diff --git a/drivers/media/dvb-frontends/si21xx.c b/drivers/media/dvb-frontends/si21xx.c
index e31eb2c5cc4c..001b23588389 100644
--- a/drivers/media/dvb-frontends/si21xx.c
+++ b/drivers/media/dvb-frontends/si21xx.c
@@ -711,7 +711,7 @@ static int si21xx_set_frontend(struct dvb_frontend *fe)
int i;
bool inband_interferer_div2[ALLOWABLE_FS_COUNT];
bool inband_interferer_div4[ALLOWABLE_FS_COUNT];
- int status;
+ int status = 0;
/* allowable sample rates for ADC in MHz */
int afs[ALLOWABLE_FS_COUNT] = { 200, 192, 193, 194, 195,
@@ -747,8 +747,6 @@ static int si21xx_set_frontend(struct dvb_frontend *fe)
rf_freq = 10 * c->frequency ;
data_rate = c->symbol_rate / 100;
- status = PASS;
-
band_low = (rf_freq - lnb_lo) - ((lnb_uncertanity * 200)
+ (data_rate * 135)) / 200;
@@ -832,6 +830,9 @@ static int si21xx_set_frontend(struct dvb_frontend *fe)
state->fs = sample_rate;/*ADC MHz*/
si21xx_setacquire(fe, c->symbol_rate, c->fec_inner);
+ if (status)
+ return -EREMOTEIO;
+
return 0;
}
diff --git a/drivers/media/dvb-frontends/sp887x.c b/drivers/media/dvb-frontends/sp887x.c
index c89a91a3daf4..146e7f2dd3c5 100644
--- a/drivers/media/dvb-frontends/sp887x.c
+++ b/drivers/media/dvb-frontends/sp887x.c
@@ -140,7 +140,7 @@ static int sp887x_initial_setup (struct dvb_frontend* fe, const struct firmware
u8 buf [BLOCKSIZE + 2];
int i;
int fw_size = fw->size;
- const unsigned char *mem = fw->data;
+ const unsigned char *mem = fw->data + 10;
dprintk("%s\n", __func__);
@@ -148,8 +148,6 @@ static int sp887x_initial_setup (struct dvb_frontend* fe, const struct firmware
if (fw_size < FW_SIZE + 10)
return -ENODEV;
- mem = fw->data + 10;
-
/* soft reset */
sp887x_writereg(state, 0xf1a, 0x000);
diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
index d541d6613610..698866c4f15a 100644
--- a/drivers/media/dvb-frontends/stb6100.c
+++ b/drivers/media/dvb-frontends/stb6100.c
@@ -110,7 +110,7 @@ static const struct stb6100_regmask stb6100_template[] = {
/*
* Currently unused. Some boards might need it in the future
*/
-static inline void stb6100_normalise_regs(u8 regs[])
+static __always_unused inline void stb6100_normalise_regs(u8 regs[])
{
int i;
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index 6c2b05fae1c5..95e376f23506 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -1797,11 +1797,7 @@ static u32 stv0367cab_get_mclk(struct dvb_frontend *fe, u32 ExtClk_Hz)
static u32 stv0367cab_get_adc_freq(struct dvb_frontend *fe, u32 ExtClk_Hz)
{
- u32 ADCClk_Hz = ExtClk_Hz;
-
- ADCClk_Hz = stv0367cab_get_mclk(fe, ExtClk_Hz);
-
- return ADCClk_Hz;
+ return stv0367cab_get_mclk(fe, ExtClk_Hz);
}
static enum stv0367cab_mod stv0367cab_SetQamSize(struct stv0367_state *state,
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index d6a5d4ca439a..69c56e24a612 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -469,6 +469,7 @@ config VIDEO_VPX3220
config VIDEO_MAX9286
tristate "Maxim MAX9286 GMSL deserializer support"
depends on I2C && I2C_MUX
+ depends on VIDEO_V4L2
depends on OF_GPIO
select V4L2_FWNODE
select VIDEO_V4L2_SUBDEV_API
@@ -1058,6 +1059,17 @@ config VIDEO_OV5675
To compile this driver as a module, choose M here: the
module will be called ov5675.
+config VIDEO_OV5693
+ tristate "OmniVision OV5693 sensor support"
+ depends on I2C && VIDEO_V4L2
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV5693 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov5693.
+
config VIDEO_OV5695
tristate "OmniVision OV5695 sensor support"
depends on I2C && VIDEO_V4L2
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 4d4fe08d7a6a..b01f6cd05ee8 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -75,6 +75,7 @@ obj-$(CONFIG_VIDEO_OV5647) += ov5647.o
obj-$(CONFIG_VIDEO_OV5648) += ov5648.o
obj-$(CONFIG_VIDEO_OV5670) += ov5670.o
obj-$(CONFIG_VIDEO_OV5675) += ov5675.o
+obj-$(CONFIG_VIDEO_OV5693) += ov5693.o
obj-$(CONFIG_VIDEO_OV5695) += ov5695.o
obj-$(CONFIG_VIDEO_OV6650) += ov6650.o
obj-$(CONFIG_VIDEO_OV7251) += ov7251.o
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
index 41f4e749a859..8e13cae40ec5 100644
--- a/drivers/media/i2c/adv7511-v4l2.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -270,28 +270,6 @@ static int adv7511_pktmem_rd(struct v4l2_subdev *sd, u8 reg)
return adv_smbus_read_byte_data(state->i2c_pktmem, reg);
}
-static int adv7511_pktmem_wr(struct v4l2_subdev *sd, u8 reg, u8 val)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
- int ret;
- int i;
-
- for (i = 0; i < 3; i++) {
- ret = i2c_smbus_write_byte_data(state->i2c_pktmem, reg, val);
- if (ret == 0)
- return 0;
- }
- v4l2_err(sd, "%s: i2c write error\n", __func__);
- return ret;
-}
-
-/* To set specific bits in the register, a clear-mask is given (to be AND-ed),
- and then the value-mask (to be OR-ed). */
-static inline void adv7511_pktmem_wr_and_or(struct v4l2_subdev *sd, u8 reg, u8 clr_mask, u8 val_mask)
-{
- adv7511_pktmem_wr(sd, reg, (adv7511_pktmem_rd(sd, reg) & clr_mask) | val_mask);
-}
-
static inline bool adv7511_have_hotplug(struct v4l2_subdev *sd)
{
return adv7511_rd(sd, 0x42) & MASK_ADV7511_HPD_DETECT;
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 44768b59a6ff..a2fa408d2d9f 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -398,14 +398,14 @@ static inline int io_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask,
return io_write(sd, reg, (io_read(sd, reg) & ~mask) | val);
}
-static inline int avlink_read(struct v4l2_subdev *sd, u8 reg)
+static inline int __always_unused avlink_read(struct v4l2_subdev *sd, u8 reg)
{
struct adv76xx_state *state = to_state(sd);
return adv76xx_read_check(state, ADV7604_PAGE_AVLINK, reg);
}
-static inline int avlink_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+static inline int __always_unused avlink_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
struct adv76xx_state *state = to_state(sd);
@@ -439,14 +439,14 @@ static inline int infoframe_read(struct v4l2_subdev *sd, u8 reg)
return adv76xx_read_check(state, ADV76XX_PAGE_INFOFRAME, reg);
}
-static inline int infoframe_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+static inline int __always_unused infoframe_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
struct adv76xx_state *state = to_state(sd);
return regmap_write(state->regmap[ADV76XX_PAGE_INFOFRAME], reg, val);
}
-static inline int afe_read(struct v4l2_subdev *sd, u8 reg)
+static inline int __always_unused afe_read(struct v4l2_subdev *sd, u8 reg)
{
struct adv76xx_state *state = to_state(sd);
@@ -479,14 +479,14 @@ static inline int rep_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8
return rep_write(sd, reg, (rep_read(sd, reg) & ~mask) | val);
}
-static inline int edid_read(struct v4l2_subdev *sd, u8 reg)
+static inline int __always_unused edid_read(struct v4l2_subdev *sd, u8 reg)
{
struct adv76xx_state *state = to_state(sd);
return adv76xx_read_check(state, ADV76XX_PAGE_EDID, reg);
}
-static inline int edid_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+static inline int __always_unused edid_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
struct adv76xx_state *state = to_state(sd);
@@ -570,7 +570,7 @@ static inline int hdmi_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8
return hdmi_write(sd, reg, (hdmi_read(sd, reg) & ~mask) | val);
}
-static inline int test_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+static inline int __always_unused test_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
struct adv76xx_state *state = to_state(sd);
@@ -601,14 +601,14 @@ static inline int cp_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 v
return cp_write(sd, reg, (cp_read(sd, reg) & ~mask) | val);
}
-static inline int vdp_read(struct v4l2_subdev *sd, u8 reg)
+static inline int __always_unused vdp_read(struct v4l2_subdev *sd, u8 reg)
{
struct adv76xx_state *state = to_state(sd);
return adv76xx_read_check(state, ADV7604_PAGE_VDP, reg);
}
-static inline int vdp_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+static inline int __always_unused vdp_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
struct adv76xx_state *state = to_state(sd);
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 7f8acbdf0db4..9d6eed0f8281 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -256,21 +256,11 @@ static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
return &container_of(ctrl->handler, struct adv7842_state, hdl)->sd;
}
-static inline unsigned hblanking(const struct v4l2_bt_timings *t)
-{
- return V4L2_DV_BT_BLANKING_WIDTH(t);
-}
-
static inline unsigned htotal(const struct v4l2_bt_timings *t)
{
return V4L2_DV_BT_FRAME_WIDTH(t);
}
-static inline unsigned vblanking(const struct v4l2_bt_timings *t)
-{
- return V4L2_DV_BT_BLANKING_HEIGHT(t);
-}
-
static inline unsigned vtotal(const struct v4l2_bt_timings *t)
{
return V4L2_DV_BT_FRAME_HEIGHT(t);
diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
index 5363f3bcafe3..9158d3ce45c0 100644
--- a/drivers/media/i2c/ccs/ccs-core.c
+++ b/drivers/media/i2c/ccs/ccs-core.c
@@ -2758,13 +2758,13 @@ ident_show(struct device *dev, struct device_attribute *attr, char *buf)
struct ccs_module_info *minfo = &sensor->minfo;
if (minfo->mipi_manufacturer_id)
- return snprintf(buf, PAGE_SIZE, "%4.4x%4.4x%2.2x\n",
- minfo->mipi_manufacturer_id, minfo->model_id,
- minfo->revision_number) + 1;
+ return sysfs_emit(buf, "%4.4x%4.4x%2.2x\n",
+ minfo->mipi_manufacturer_id, minfo->model_id,
+ minfo->revision_number) + 1;
else
- return snprintf(buf, PAGE_SIZE, "%2.2x%4.4x%2.2x\n",
- minfo->smia_manufacturer_id, minfo->model_id,
- minfo->revision_number) + 1;
+ return sysfs_emit(buf, "%2.2x%4.4x%2.2x\n",
+ minfo->smia_manufacturer_id, minfo->model_id,
+ minfo->revision_number) + 1;
}
static DEVICE_ATTR_RO(ident);
diff --git a/drivers/media/i2c/cx25840/cx25840-ir.c b/drivers/media/i2c/cx25840/cx25840-ir.c
index 2cf3e6a1f9e1..9d7d1d149f1a 100644
--- a/drivers/media/i2c/cx25840/cx25840-ir.c
+++ b/drivers/media/i2c/cx25840/cx25840-ir.c
@@ -136,19 +136,6 @@ static inline u16 count_to_clock_divider(unsigned int d)
return (u16) d;
}
-static inline u16 ns_to_clock_divider(unsigned int ns)
-{
- return count_to_clock_divider(
- DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ / 1000000 * ns, 1000));
-}
-
-static inline unsigned int clock_divider_to_ns(unsigned int divider)
-{
- /* Period of the Rx or Tx clock in ns */
- return DIV_ROUND_CLOSEST((divider + 1) * 1000,
- CX25840_IR_REFCLK_FREQ / 1000000);
-}
-
static inline u16 carrier_freq_to_clock_divider(unsigned int freq)
{
return count_to_clock_divider(
@@ -160,13 +147,6 @@ static inline unsigned int clock_divider_to_carrier_freq(unsigned int divider)
return DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ, (divider + 1) * 16);
}
-static inline u16 freq_to_clock_divider(unsigned int freq,
- unsigned int rollovers)
-{
- return count_to_clock_divider(
- DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ, freq * rollovers));
-}
-
static inline unsigned int clock_divider_to_freq(unsigned int divider,
unsigned int rollovers)
{
diff --git a/drivers/media/i2c/dw9768.c b/drivers/media/i2c/dw9768.c
index c086580efac7..65c6acf3ced9 100644
--- a/drivers/media/i2c/dw9768.c
+++ b/drivers/media/i2c/dw9768.c
@@ -469,6 +469,11 @@ static int dw9768_probe(struct i2c_client *client)
dw9768->sd.entity.function = MEDIA_ENT_F_LENS;
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Attempt to turn off the device to satisfy the privacy LED concerns.
+ */
+ pm_runtime_set_active(dev);
pm_runtime_enable(dev);
if (!pm_runtime_enabled(dev)) {
ret = dw9768_runtime_resume(dev);
@@ -483,6 +488,7 @@ static int dw9768_probe(struct i2c_client *client)
dev_err(dev, "failed to register V4L2 subdev: %d", ret);
goto err_power_off;
}
+ pm_runtime_idle(dev);
return 0;
diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
index 8db1cbedc1fd..055d1aa8410e 100644
--- a/drivers/media/i2c/hi556.c
+++ b/drivers/media/i2c/hi556.c
@@ -495,6 +495,9 @@ struct hi556 {
/* Streaming on/off */
bool streaming;
+
+ /* True if the device has been identified */
+ bool identified;
};
static u64 to_pixel_rate(u32 f_index)
@@ -757,12 +760,41 @@ static void hi556_assign_pad_format(const struct hi556_mode *mode,
fmt->field = V4L2_FIELD_NONE;
}
+static int hi556_identify_module(struct hi556 *hi556)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ int ret;
+ u32 val;
+
+ if (hi556->identified)
+ return 0;
+
+ ret = hi556_read_reg(hi556, HI556_REG_CHIP_ID,
+ HI556_REG_VALUE_16BIT, &val);
+ if (ret)
+ return ret;
+
+ if (val != HI556_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ HI556_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ hi556->identified = true;
+
+ return 0;
+}
+
static int hi556_start_streaming(struct hi556 *hi556)
{
struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
const struct hi556_reg_list *reg_list;
int link_freq_index, ret;
+ ret = hi556_identify_module(hi556);
+ if (ret)
+ return ret;
+
link_freq_index = hi556->cur_mode->link_freq_index;
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = hi556_write_reg_list(hi556, reg_list);
@@ -1001,26 +1033,6 @@ static const struct v4l2_subdev_internal_ops hi556_internal_ops = {
.open = hi556_open,
};
-static int hi556_identify_module(struct hi556 *hi556)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
- int ret;
- u32 val;
-
- ret = hi556_read_reg(hi556, HI556_REG_CHIP_ID,
- HI556_REG_VALUE_16BIT, &val);
- if (ret)
- return ret;
-
- if (val != HI556_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
- HI556_CHIP_ID, val);
- return -ENXIO;
- }
-
- return 0;
-}
-
static int hi556_check_hwcfg(struct device *dev)
{
struct fwnode_handle *ep;
@@ -1106,6 +1118,7 @@ static int hi556_remove(struct i2c_client *client)
static int hi556_probe(struct i2c_client *client)
{
struct hi556 *hi556;
+ bool full_power;
int ret;
ret = hi556_check_hwcfg(&client->dev);
@@ -1120,10 +1133,14 @@ static int hi556_probe(struct i2c_client *client)
return -ENOMEM;
v4l2_i2c_subdev_init(&hi556->sd, client, &hi556_subdev_ops);
- ret = hi556_identify_module(hi556);
- if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
- return ret;
+
+ full_power = acpi_dev_state_d0(&client->dev);
+ if (full_power) {
+ ret = hi556_identify_module(hi556);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ return ret;
+ }
}
mutex_init(&hi556->mutex);
@@ -1152,7 +1169,9 @@ static int hi556_probe(struct i2c_client *client)
goto probe_error_media_entity_cleanup;
}
- pm_runtime_set_active(&client->dev);
+ /* Set the device's state to active if it's in D0 state. */
+ if (full_power)
+ pm_runtime_set_active(&client->dev);
pm_runtime_enable(&client->dev);
pm_runtime_idle(&client->dev);
@@ -1189,6 +1208,7 @@ static struct i2c_driver hi556_i2c_driver = {
},
.probe_new = hi556_probe,
.remove = hi556_remove,
+ .flags = I2C_DRV_ACPI_WAIVE_D0_PROBE,
};
module_i2c_driver(hi556_i2c_driver);
diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c
index 48909faeced4..ad35c3ff3611 100644
--- a/drivers/media/i2c/hi846.c
+++ b/drivers/media/i2c/hi846.c
@@ -1656,7 +1656,7 @@ err_reg:
return ret;
}
-static void hi846_power_off(struct hi846 *hi846)
+static int hi846_power_off(struct hi846 *hi846)
{
if (hi846->rst_gpio)
gpiod_set_value_cansleep(hi846->rst_gpio, 1);
@@ -1665,7 +1665,7 @@ static void hi846_power_off(struct hi846 *hi846)
gpiod_set_value_cansleep(hi846->shutdown_gpio, 1);
clk_disable_unprepare(hi846->clock);
- regulator_bulk_disable(HI846_NUM_SUPPLIES, hi846->supplies);
+ return regulator_bulk_disable(HI846_NUM_SUPPLIES, hi846->supplies);
}
static int __maybe_unused hi846_suspend(struct device *dev)
@@ -1677,9 +1677,7 @@ static int __maybe_unused hi846_suspend(struct device *dev)
if (hi846->streaming)
hi846_stop_streaming(hi846);
- hi846_power_off(hi846);
-
- return 0;
+ return hi846_power_off(hi846);
}
static int __maybe_unused hi846_resume(struct device *dev)
@@ -2164,7 +2162,11 @@ static int hi846_remove(struct i2c_client *client)
return 0;
}
-static UNIVERSAL_DEV_PM_OPS(hi846_pm_ops, hi846_suspend, hi846_resume, NULL);
+static const struct dev_pm_ops hi846_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(hi846_suspend, hi846_resume, NULL)
+};
static const struct of_device_id hi846_of_match[] = {
{ .compatible = "hynix,hi846", },
diff --git a/drivers/media/i2c/imx208.c b/drivers/media/i2c/imx208.c
index 6f3d9c1b5879..b9516b2f1c15 100644
--- a/drivers/media/i2c/imx208.c
+++ b/drivers/media/i2c/imx208.c
@@ -296,6 +296,9 @@ struct imx208 {
/* OTP data */
bool otp_read;
char otp_data[IMX208_OTP_SIZE];
+
+ /* True if the device has been identified */
+ bool identified;
};
static inline struct imx208 *to_imx208(struct v4l2_subdev *_sd)
@@ -619,6 +622,34 @@ static int imx208_set_pad_format(struct v4l2_subdev *sd,
return 0;
}
+static int imx208_identify_module(struct imx208 *imx208)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
+ int ret;
+ u32 val;
+
+ if (imx208->identified)
+ return 0;
+
+ ret = imx208_read_reg(imx208, IMX208_REG_CHIP_ID,
+ 2, &val);
+ if (ret) {
+ dev_err(&client->dev, "failed to read chip id %x\n",
+ IMX208_CHIP_ID);
+ return ret;
+ }
+
+ if (val != IMX208_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ IMX208_CHIP_ID, val);
+ return -EIO;
+ }
+
+ imx208->identified = true;
+
+ return 0;
+}
+
/* Start streaming */
static int imx208_start_streaming(struct imx208 *imx208)
{
@@ -626,6 +657,10 @@ static int imx208_start_streaming(struct imx208 *imx208)
const struct imx208_reg_list *reg_list;
int ret, link_freq_index;
+ ret = imx208_identify_module(imx208);
+ if (ret)
+ return ret;
+
/* Setup PLL */
link_freq_index = imx208->cur_mode->link_freq_index;
reg_list = &link_freq_configs[link_freq_index].reg_list;
@@ -752,29 +787,6 @@ error:
}
/* Verify chip ID */
-static int imx208_identify_module(struct imx208 *imx208)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
- int ret;
- u32 val;
-
- ret = imx208_read_reg(imx208, IMX208_REG_CHIP_ID,
- 2, &val);
- if (ret) {
- dev_err(&client->dev, "failed to read chip id %x\n",
- IMX208_CHIP_ID);
- return ret;
- }
-
- if (val != IMX208_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
- IMX208_CHIP_ID, val);
- return -EIO;
- }
-
- return 0;
-}
-
static const struct v4l2_subdev_video_ops imx208_video_ops = {
.s_stream = imx208_set_stream,
};
@@ -813,6 +825,10 @@ static int imx208_read_otp(struct imx208 *imx208)
goto out_unlock;
}
+ ret = imx208_identify_module(imx208);
+ if (ret)
+ goto out_pm_put;
+
/* Write register address */
msgs[0].addr = client->addr;
msgs[0].flags = 0;
@@ -831,6 +847,7 @@ static int imx208_read_otp(struct imx208 *imx208)
ret = 0;
}
+out_pm_put:
pm_runtime_put(&client->dev);
out_unlock:
@@ -961,6 +978,7 @@ static int imx208_probe(struct i2c_client *client)
{
struct imx208 *imx208;
int ret;
+ bool full_power;
u32 val = 0;
device_property_read_u32(&client->dev, "clock-frequency", &val);
@@ -978,11 +996,14 @@ static int imx208_probe(struct i2c_client *client)
/* Initialize subdev */
v4l2_i2c_subdev_init(&imx208->sd, client, &imx208_subdev_ops);
- /* Check module identity */
- ret = imx208_identify_module(imx208);
- if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
- goto error_probe;
+ full_power = acpi_dev_state_d0(&client->dev);
+ if (full_power) {
+ /* Check module identity */
+ ret = imx208_identify_module(imx208);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ goto error_probe;
+ }
}
/* Set default mode to max resolution */
@@ -1017,7 +1038,9 @@ static int imx208_probe(struct i2c_client *client)
goto error_async_subdev;
}
- pm_runtime_set_active(&client->dev);
+ /* Set the device's state to active if it's in D0 state. */
+ if (full_power)
+ pm_runtime_set_active(&client->dev);
pm_runtime_enable(&client->dev);
pm_runtime_idle(&client->dev);
@@ -1077,6 +1100,7 @@ static struct i2c_driver imx208_i2c_driver = {
},
.probe_new = imx208_probe,
.remove = imx208_remove,
+ .flags = I2C_DRV_ACPI_WAIVE_D0_PROBE,
};
module_i2c_driver(imx208_i2c_driver);
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index 0dce92872176..2aa15b9c23cc 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -27,6 +27,7 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
/*
@@ -1367,6 +1368,10 @@ static int imx274_s_frame_interval(struct v4l2_subdev *sd,
int min, max, def;
int ret;
+ ret = pm_runtime_resume_and_get(&imx274->client->dev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&imx274->lock);
ret = imx274_set_frame_interval(imx274, fi->interval);
@@ -1398,6 +1403,7 @@ static int imx274_s_frame_interval(struct v4l2_subdev *sd,
unlock:
mutex_unlock(&imx274->lock);
+ pm_runtime_put(&imx274->client->dev);
return ret;
}
@@ -1457,7 +1463,7 @@ static int imx274_s_stream(struct v4l2_subdev *sd, int on)
goto fail;
/*
- * update frame rate & expsoure. if the last mode is different,
+ * update frame rate & exposure. if the last mode is different,
* HMAX could be changed. As the result, frame rate & exposure
* are changed.
* gain is not affected.
@@ -1494,7 +1500,7 @@ fail:
/*
* imx274_get_frame_length - Function for obtaining current frame length
* @priv: Pointer to device structure
- * @val: Pointer to obainted value
+ * @val: Pointer to obtained value
*
* frame_length = vmax x (svr + 1), in unit of hmax.
*
@@ -1904,7 +1910,21 @@ fail:
return err;
}
+static int imx274_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ /* only supported format in the driver is Raw 10 bits SRGGB */
+ code->code = MEDIA_BUS_FMT_SRGGB10_1X10;
+
+ return 0;
+}
+
static const struct v4l2_subdev_pad_ops imx274_pad_ops = {
+ .enum_mbus_code = imx274_enum_mbus_code,
.get_fmt = imx274_get_fmt,
.set_fmt = imx274_set_fmt,
.get_selection = imx274_get_selection,
@@ -1938,27 +1958,66 @@ static const struct i2c_device_id imx274_id[] = {
};
MODULE_DEVICE_TABLE(i2c, imx274_id);
+static int imx274_fwnode_parse(struct device *dev)
+{
+ struct fwnode_handle *endpoint;
+ /* Only CSI2 is supported */
+ struct v4l2_fwnode_endpoint ep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ int ret;
+
+ endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
+ if (!endpoint) {
+ dev_err(dev, "Endpoint node not found\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(endpoint, &ep);
+ fwnode_handle_put(endpoint);
+ if (ret == -ENXIO) {
+ dev_err(dev, "Unsupported bus type, should be CSI2\n");
+ return ret;
+ } else if (ret) {
+ dev_err(dev, "Parsing endpoint node failed %d\n", ret);
+ return ret;
+ }
+
+ /* Check number of data lanes, only 4 lanes supported */
+ if (ep.bus.mipi_csi2.num_data_lanes != 4) {
+ dev_err(dev, "Invalid data lanes: %d\n",
+ ep.bus.mipi_csi2.num_data_lanes);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int imx274_probe(struct i2c_client *client)
{
struct v4l2_subdev *sd;
struct stimx274 *imx274;
+ struct device *dev = &client->dev;
int ret;
/* initialize imx274 */
- imx274 = devm_kzalloc(&client->dev, sizeof(*imx274), GFP_KERNEL);
+ imx274 = devm_kzalloc(dev, sizeof(*imx274), GFP_KERNEL);
if (!imx274)
return -ENOMEM;
mutex_init(&imx274->lock);
- imx274->inck = devm_clk_get_optional(&client->dev, "inck");
+ ret = imx274_fwnode_parse(dev);
+ if (ret)
+ return ret;
+
+ imx274->inck = devm_clk_get_optional(dev, "inck");
if (IS_ERR(imx274->inck))
return PTR_ERR(imx274->inck);
- ret = imx274_regulators_get(&client->dev, imx274);
+ ret = imx274_regulators_get(dev, imx274);
if (ret) {
- dev_err(&client->dev,
- "Failed to get power regulators, err: %d\n", ret);
+ dev_err(dev, "Failed to get power regulators, err: %d\n", ret);
return ret;
}
@@ -1977,7 +2036,7 @@ static int imx274_probe(struct i2c_client *client)
/* initialize regmap */
imx274->regmap = devm_regmap_init_i2c(client, &imx274_regmap_config);
if (IS_ERR(imx274->regmap)) {
- dev_err(&client->dev,
+ dev_err(dev,
"regmap init failed: %ld\n", PTR_ERR(imx274->regmap));
ret = -ENODEV;
goto err_regmap;
@@ -1994,34 +2053,32 @@ static int imx274_probe(struct i2c_client *client)
sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
ret = media_entity_pads_init(&sd->entity, 1, &imx274->pad);
if (ret < 0) {
- dev_err(&client->dev,
+ dev_err(dev,
"%s : media entity init Failed %d\n", __func__, ret);
goto err_regmap;
}
/* initialize sensor reset gpio */
- imx274->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ imx274->reset_gpio = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(imx274->reset_gpio)) {
if (PTR_ERR(imx274->reset_gpio) != -EPROBE_DEFER)
- dev_err(&client->dev, "Reset GPIO not setup in DT");
+ dev_err(dev, "Reset GPIO not setup in DT");
ret = PTR_ERR(imx274->reset_gpio);
goto err_me;
}
/* power on the sensor */
- ret = imx274_power_on(&client->dev);
+ ret = imx274_power_on(dev);
if (ret < 0) {
- dev_err(&client->dev,
- "%s : imx274 power on failed\n", __func__);
+ dev_err(dev, "%s : imx274 power on failed\n", __func__);
goto err_me;
}
/* initialize controls */
ret = v4l2_ctrl_handler_init(&imx274->ctrls.handler, 4);
if (ret < 0) {
- dev_err(&client->dev,
- "%s : ctrl handler init Failed\n", __func__);
+ dev_err(dev, "%s : ctrl handler init Failed\n", __func__);
goto err_power_off;
}
@@ -2064,23 +2121,22 @@ static int imx274_probe(struct i2c_client *client)
/* register subdevice */
ret = v4l2_async_register_subdev(sd);
if (ret < 0) {
- dev_err(&client->dev,
- "%s : v4l2_async_register_subdev failed %d\n",
+ dev_err(dev, "%s : v4l2_async_register_subdev failed %d\n",
__func__, ret);
goto err_ctrls;
}
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_idle(dev);
- dev_info(&client->dev, "imx274 : imx274 probe success !\n");
+ dev_info(dev, "imx274 : imx274 probe success !\n");
return 0;
err_ctrls:
v4l2_ctrl_handler_free(&imx274->ctrls.handler);
err_power_off:
- imx274_power_off(&client->dev);
+ imx274_power_off(dev);
err_me:
media_entity_cleanup(&sd->entity);
err_regmap:
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
index bf7a6c37ca5d..99f2a50d39a4 100644
--- a/drivers/media/i2c/imx290.c
+++ b/drivers/media/i2c/imx290.c
@@ -363,7 +363,7 @@ static inline struct imx290 *to_imx290(struct v4l2_subdev *_sd)
return container_of(_sd, struct imx290, sd);
}
-static inline int imx290_read_reg(struct imx290 *imx290, u16 addr, u8 *value)
+static inline int __always_unused imx290_read_reg(struct imx290 *imx290, u16 addr, u8 *value)
{
unsigned int regval;
int ret;
diff --git a/drivers/media/i2c/imx319.c b/drivers/media/i2c/imx319.c
index daa976858e29..a2b5a34de76b 100644
--- a/drivers/media/i2c/imx319.c
+++ b/drivers/media/i2c/imx319.c
@@ -2565,6 +2565,6 @@ module_i2c_driver(imx319_i2c_driver);
MODULE_AUTHOR("Qiu, Tianshu <tian.shu.qiu@intel.com>");
MODULE_AUTHOR("Rapolu, Chiranjeevi <chiranjeevi.rapolu@intel.com>");
MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
-MODULE_AUTHOR("Yang, Hyungwoo <hyungwoo.yang@intel.com>");
+MODULE_AUTHOR("Yang, Hyungwoo");
MODULE_DESCRIPTION("Sony imx319 sensor driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/imx355.c b/drivers/media/i2c/imx355.c
index cb51c81786bd..3922b9305978 100644
--- a/drivers/media/i2c/imx355.c
+++ b/drivers/media/i2c/imx355.c
@@ -1851,6 +1851,6 @@ module_i2c_driver(imx355_i2c_driver);
MODULE_AUTHOR("Qiu, Tianshu <tian.shu.qiu@intel.com>");
MODULE_AUTHOR("Rapolu, Chiranjeevi <chiranjeevi.rapolu@intel.com>");
MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
-MODULE_AUTHOR("Yang, Hyungwoo <hyungwoo.yang@intel.com>");
+MODULE_AUTHOR("Yang, Hyungwoo");
MODULE_DESCRIPTION("Sony imx355 sensor driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index 7c663fd587bb..eb2b8e42335b 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -1055,7 +1055,6 @@ static int max9286_register_gpio(struct max9286_priv *priv)
gpio->label = dev_name(dev);
gpio->parent = dev;
gpio->owner = THIS_MODULE;
- gpio->of_node = dev->of_node;
gpio->ngpio = 2;
gpio->base = -1;
gpio->set = max9286_gpio_set;
@@ -1295,11 +1294,9 @@ static int max9286_probe(struct i2c_client *client)
priv->regulator = devm_regulator_get(&client->dev, "poc");
if (IS_ERR(priv->regulator)) {
- if (PTR_ERR(priv->regulator) != -EPROBE_DEFER)
- dev_err(&client->dev,
- "Unable to get PoC regulator (%ld)\n",
- PTR_ERR(priv->regulator));
ret = PTR_ERR(priv->regulator);
+ dev_err_probe(&client->dev, ret,
+ "Unable to get PoC regulator\n");
goto err_powerdown;
}
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index b4d22f5d9933..d5fe67c763f7 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -1818,6 +1818,6 @@ module_i2c_driver(ov13858_i2c_driver);
MODULE_AUTHOR("Kan, Chris <chris.kan@intel.com>");
MODULE_AUTHOR("Rapolu, Chiranjeevi <chiranjeevi.rapolu@intel.com>");
-MODULE_AUTHOR("Yang, Hyungwoo <hyungwoo.yang@intel.com>");
+MODULE_AUTHOR("Yang, Hyungwoo");
MODULE_DESCRIPTION("Omnivision ov13858 sensor driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index 934c9d65cb09..bab720c7c1de 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -345,6 +345,9 @@ struct ov2740 {
/* NVM data inforamtion */
struct nvm_data *nvm;
+
+ /* True if the device has been identified */
+ bool identified;
};
static inline struct ov2740 *to_ov2740(struct v4l2_subdev *subdev)
@@ -440,6 +443,30 @@ static int ov2740_write_reg_list(struct ov2740 *ov2740,
return 0;
}
+static int ov2740_identify_module(struct ov2740 *ov2740)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
+ int ret;
+ u32 val;
+
+ if (ov2740->identified)
+ return 0;
+
+ ret = ov2740_read_reg(ov2740, OV2740_REG_CHIP_ID, 3, &val);
+ if (ret)
+ return ret;
+
+ if (val != OV2740_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ OV2740_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ ov2740->identified = true;
+
+ return 0;
+}
+
static int ov2740_update_digital_gain(struct ov2740 *ov2740, u32 d_gain)
{
int ret = 0;
@@ -724,6 +751,10 @@ static int ov2740_start_streaming(struct ov2740 *ov2740)
int link_freq_index;
int ret = 0;
+ ret = ov2740_identify_module(ov2740);
+ if (ret)
+ return ret;
+
ov2740_load_otp_data(nvm);
link_freq_index = ov2740->cur_mode->link_freq_index;
@@ -956,25 +987,6 @@ static const struct v4l2_subdev_internal_ops ov2740_internal_ops = {
.open = ov2740_open,
};
-static int ov2740_identify_module(struct ov2740 *ov2740)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
- int ret;
- u32 val;
-
- ret = ov2740_read_reg(ov2740, OV2740_REG_CHIP_ID, 3, &val);
- if (ret)
- return ret;
-
- if (val != OV2740_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
- OV2740_CHIP_ID, val);
- return -ENXIO;
- }
-
- return 0;
-}
-
static int ov2740_check_hwcfg(struct device *dev)
{
struct fwnode_handle *ep;
@@ -1137,6 +1149,7 @@ static int ov2740_probe(struct i2c_client *client)
{
struct ov2740 *ov2740;
int ret = 0;
+ bool full_power;
ret = ov2740_check_hwcfg(&client->dev);
if (ret) {
@@ -1149,6 +1162,15 @@ static int ov2740_probe(struct i2c_client *client)
if (!ov2740)
return -ENOMEM;
+ full_power = acpi_dev_state_d0(&client->dev);
+ if (full_power) {
+ ret = ov2740_identify_module(ov2740);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ return ret;
+ }
+ }
+
v4l2_i2c_subdev_init(&ov2740->sd, client, &ov2740_subdev_ops);
ret = ov2740_identify_module(ov2740);
if (ret) {
@@ -1186,11 +1208,9 @@ static int ov2740_probe(struct i2c_client *client)
if (ret)
dev_warn(&client->dev, "register nvmem failed, ret %d\n", ret);
- /*
- * Device is already turned on by i2c-core with ACPI domain PM.
- * Enable runtime PM and turn off the device.
- */
- pm_runtime_set_active(&client->dev);
+ /* Set the device's state to active if it's in D0 state. */
+ if (full_power)
+ pm_runtime_set_active(&client->dev);
pm_runtime_enable(&client->dev);
pm_runtime_idle(&client->dev);
@@ -1225,6 +1245,7 @@ static struct i2c_driver ov2740_i2c_driver = {
},
.probe_new = ov2740_probe,
.remove = ov2740_remove,
+ .flags = I2C_DRV_ACPI_WAIVE_D0_PROBE,
};
module_i2c_driver(ov2740_i2c_driver);
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index 251f459ab484..02f75c18e480 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -1833,6 +1833,8 @@ struct ov5670 {
/* Streaming on/off */
bool streaming;
+ /* True if the device has been identified */
+ bool identified;
};
#define to_ov5670(_sd) container_of(_sd, struct ov5670, sd)
@@ -2273,6 +2275,32 @@ static int ov5670_get_skip_frames(struct v4l2_subdev *sd, u32 *frames)
return 0;
}
+/* Verify chip ID */
+static int ov5670_identify_module(struct ov5670 *ov5670)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
+ int ret;
+ u32 val;
+
+ if (ov5670->identified)
+ return 0;
+
+ ret = ov5670_read_reg(ov5670, OV5670_REG_CHIP_ID,
+ OV5670_REG_VALUE_24BIT, &val);
+ if (ret)
+ return ret;
+
+ if (val != OV5670_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ OV5670_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ ov5670->identified = true;
+
+ return 0;
+}
+
/* Prepare streaming by writing default values and customized values */
static int ov5670_start_streaming(struct ov5670 *ov5670)
{
@@ -2281,6 +2309,10 @@ static int ov5670_start_streaming(struct ov5670 *ov5670)
int link_freq_index;
int ret;
+ ret = ov5670_identify_module(ov5670);
+ if (ret)
+ return ret;
+
/* Get out of from software reset */
ret = ov5670_write_reg(ov5670, OV5670_REG_SOFTWARE_RST,
OV5670_REG_VALUE_08BIT, OV5670_SOFTWARE_RST);
@@ -2400,27 +2432,6 @@ static int __maybe_unused ov5670_resume(struct device *dev)
return 0;
}
-/* Verify chip ID */
-static int ov5670_identify_module(struct ov5670 *ov5670)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
- int ret;
- u32 val;
-
- ret = ov5670_read_reg(ov5670, OV5670_REG_CHIP_ID,
- OV5670_REG_VALUE_24BIT, &val);
- if (ret)
- return ret;
-
- if (val != OV5670_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
- OV5670_CHIP_ID, val);
- return -ENXIO;
- }
-
- return 0;
-}
-
static const struct v4l2_subdev_core_ops ov5670_core_ops = {
.log_status = v4l2_ctrl_subdev_log_status,
.subscribe_event = v4l2_ctrl_subdev_subscribe_event,
@@ -2462,6 +2473,7 @@ static int ov5670_probe(struct i2c_client *client)
struct ov5670 *ov5670;
const char *err_msg;
u32 input_clk = 0;
+ bool full_power;
int ret;
device_property_read_u32(&client->dev, "clock-frequency", &input_clk);
@@ -2478,11 +2490,14 @@ static int ov5670_probe(struct i2c_client *client)
/* Initialize subdev */
v4l2_i2c_subdev_init(&ov5670->sd, client, &ov5670_subdev_ops);
- /* Check module identity */
- ret = ov5670_identify_module(ov5670);
- if (ret) {
- err_msg = "ov5670_identify_module() error";
- goto error_print;
+ full_power = acpi_dev_state_d0(&client->dev);
+ if (full_power) {
+ /* Check module identity */
+ ret = ov5670_identify_module(ov5670);
+ if (ret) {
+ err_msg = "ov5670_identify_module() error";
+ goto error_print;
+ }
}
mutex_init(&ov5670->mutex);
@@ -2519,11 +2534,9 @@ static int ov5670_probe(struct i2c_client *client)
ov5670->streaming = false;
- /*
- * Device is already turned on by i2c-core with ACPI domain PM.
- * Enable runtime PM and turn off the device.
- */
- pm_runtime_set_active(&client->dev);
+ /* Set the device's state to active if it's in D0 state. */
+ if (full_power)
+ pm_runtime_set_active(&client->dev);
pm_runtime_enable(&client->dev);
pm_runtime_idle(&client->dev);
@@ -2565,7 +2578,7 @@ static const struct dev_pm_ops ov5670_pm_ops = {
#ifdef CONFIG_ACPI
static const struct acpi_device_id ov5670_acpi_ids[] = {
- {"INT3479"},
+ { "INT3479" },
{ /* sentinel */ }
};
@@ -2580,11 +2593,12 @@ static struct i2c_driver ov5670_i2c_driver = {
},
.probe_new = ov5670_probe,
.remove = ov5670_remove,
+ .flags = I2C_DRV_ACPI_WAIVE_D0_PROBE,
};
module_i2c_driver(ov5670_i2c_driver);
MODULE_AUTHOR("Rapolu, Chiranjeevi <chiranjeevi.rapolu@intel.com>");
-MODULE_AUTHOR("Yang, Hyungwoo <hyungwoo.yang@intel.com>");
+MODULE_AUTHOR("Yang, Hyungwoo");
MODULE_DESCRIPTION("Omnivision ov5670 sensor driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
index da5850b7ad07..00925850fa7c 100644
--- a/drivers/media/i2c/ov5675.c
+++ b/drivers/media/i2c/ov5675.c
@@ -493,6 +493,9 @@ struct ov5675 {
/* Streaming on/off */
bool streaming;
+
+ /* True if the device has been identified */
+ bool identified;
};
static u64 to_pixel_rate(u32 f_index)
@@ -808,12 +811,41 @@ static void ov5675_update_pad_format(const struct ov5675_mode *mode,
fmt->field = V4L2_FIELD_NONE;
}
+static int ov5675_identify_module(struct ov5675 *ov5675)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
+ int ret;
+ u32 val;
+
+ if (ov5675->identified)
+ return 0;
+
+ ret = ov5675_read_reg(ov5675, OV5675_REG_CHIP_ID,
+ OV5675_REG_VALUE_24BIT, &val);
+ if (ret)
+ return ret;
+
+ if (val != OV5675_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ OV5675_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ ov5675->identified = true;
+
+ return 0;
+}
+
static int ov5675_start_streaming(struct ov5675 *ov5675)
{
struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
const struct ov5675_reg_list *reg_list;
int link_freq_index, ret;
+ ret = ov5675_identify_module(ov5675);
+ if (ret)
+ return ret;
+
link_freq_index = ov5675->cur_mode->link_freq_index;
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = ov5675_write_reg_list(ov5675, reg_list);
@@ -1048,26 +1080,6 @@ static const struct v4l2_subdev_internal_ops ov5675_internal_ops = {
.open = ov5675_open,
};
-static int ov5675_identify_module(struct ov5675 *ov5675)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
- int ret;
- u32 val;
-
- ret = ov5675_read_reg(ov5675, OV5675_REG_CHIP_ID,
- OV5675_REG_VALUE_24BIT, &val);
- if (ret)
- return ret;
-
- if (val != OV5675_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
- OV5675_CHIP_ID, val);
- return -ENXIO;
- }
-
- return 0;
-}
-
static int ov5675_check_hwcfg(struct device *dev)
{
struct fwnode_handle *ep;
@@ -1154,6 +1166,7 @@ static int ov5675_remove(struct i2c_client *client)
static int ov5675_probe(struct i2c_client *client)
{
struct ov5675 *ov5675;
+ bool full_power;
int ret;
ret = ov5675_check_hwcfg(&client->dev);
@@ -1168,10 +1181,14 @@ static int ov5675_probe(struct i2c_client *client)
return -ENOMEM;
v4l2_i2c_subdev_init(&ov5675->sd, client, &ov5675_subdev_ops);
- ret = ov5675_identify_module(ov5675);
- if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
- return ret;
+
+ full_power = acpi_dev_state_d0(&client->dev);
+ if (full_power) {
+ ret = ov5675_identify_module(ov5675);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ return ret;
+ }
}
mutex_init(&ov5675->mutex);
@@ -1204,7 +1221,10 @@ static int ov5675_probe(struct i2c_client *client)
* Device is already turned on by i2c-core with ACPI domain PM.
* Enable runtime PM and turn off the device.
*/
- pm_runtime_set_active(&client->dev);
+
+ /* Set the device's state to active if it's in D0 state. */
+ if (full_power)
+ pm_runtime_set_active(&client->dev);
pm_runtime_enable(&client->dev);
pm_runtime_idle(&client->dev);
@@ -1241,6 +1261,7 @@ static struct i2c_driver ov5675_i2c_driver = {
},
.probe_new = ov5675_probe,
.remove = ov5675_remove,
+ .flags = I2C_DRV_ACPI_WAIVE_D0_PROBE,
};
module_i2c_driver(ov5675_i2c_driver);
diff --git a/drivers/media/i2c/ov5693.c b/drivers/media/i2c/ov5693.c
new file mode 100644
index 000000000000..2784fcf67f3b
--- /dev/null
+++ b/drivers/media/i2c/ov5693.c
@@ -0,0 +1,1537 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * Adapted from the atomisp-ov5693 driver, with contributions from:
+ *
+ * Daniel Scally
+ * Jean-Michel Hautbois
+ * Fabian Wuthrich
+ * Tsuchiya Yuto
+ * Jordan Hand
+ * Jake Day
+ */
+
+#include <asm/unaligned.h>
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define OV5693_REG_8BIT(n) ((1 << 16) | (n))
+#define OV5693_REG_16BIT(n) ((2 << 16) | (n))
+#define OV5693_REG_24BIT(n) ((3 << 16) | (n))
+#define OV5693_REG_SIZE_SHIFT 16
+#define OV5693_REG_ADDR_MASK 0xffff
+
+/* System Control */
+#define OV5693_SW_RESET_REG OV5693_REG_8BIT(0x0103)
+#define OV5693_SW_STREAM_REG OV5693_REG_8BIT(0x0100)
+#define OV5693_START_STREAMING 0x01
+#define OV5693_STOP_STREAMING 0x00
+#define OV5693_SW_RESET 0x01
+
+#define OV5693_REG_CHIP_ID OV5693_REG_16BIT(0x300a)
+/* Yes, this is right. The datasheet for the OV5693 gives its ID as 0x5690 */
+#define OV5693_CHIP_ID 0x5690
+
+/* Exposure */
+#define OV5693_EXPOSURE_CTRL_REG OV5693_REG_24BIT(0x3500)
+#define OV5693_EXPOSURE_CTRL_MASK GENMASK(19, 4)
+#define OV5693_INTEGRATION_TIME_MARGIN 8
+#define OV5693_EXPOSURE_MIN 1
+#define OV5693_EXPOSURE_STEP 1
+
+/* Analogue Gain */
+#define OV5693_GAIN_CTRL_REG OV5693_REG_16BIT(0x350a)
+#define OV5693_GAIN_CTRL_MASK GENMASK(10, 4)
+#define OV5693_GAIN_MIN 1
+#define OV5693_GAIN_MAX 127
+#define OV5693_GAIN_DEF 8
+#define OV5693_GAIN_STEP 1
+
+/* Digital Gain */
+#define OV5693_MWB_RED_GAIN_REG OV5693_REG_16BIT(0x3400)
+#define OV5693_MWB_GREEN_GAIN_REG OV5693_REG_16BIT(0x3402)
+#define OV5693_MWB_BLUE_GAIN_REG OV5693_REG_16BIT(0x3404)
+#define OV5693_MWB_GAIN_MASK GENMASK(11, 0)
+#define OV5693_MWB_GAIN_MAX 0x0fff
+#define OV5693_DIGITAL_GAIN_MIN 1
+#define OV5693_DIGITAL_GAIN_MAX 4095
+#define OV5693_DIGITAL_GAIN_DEF 1024
+#define OV5693_DIGITAL_GAIN_STEP 1
+
+/* Timing and Format */
+#define OV5693_CROP_START_X_REG OV5693_REG_16BIT(0x3800)
+#define OV5693_CROP_START_Y_REG OV5693_REG_16BIT(0x3802)
+#define OV5693_CROP_END_X_REG OV5693_REG_16BIT(0x3804)
+#define OV5693_CROP_END_Y_REG OV5693_REG_16BIT(0x3806)
+#define OV5693_OUTPUT_SIZE_X_REG OV5693_REG_16BIT(0x3808)
+#define OV5693_OUTPUT_SIZE_Y_REG OV5693_REG_16BIT(0x380a)
+
+#define OV5693_TIMING_HTS_REG OV5693_REG_16BIT(0x380c)
+#define OV5693_FIXED_PPL 2688U
+#define OV5693_TIMING_VTS_REG OV5693_REG_16BIT(0x380e)
+#define OV5693_TIMING_MAX_VTS 0xffff
+#define OV5693_TIMING_MIN_VTS 0x04
+
+#define OV5693_OFFSET_START_X_REG OV5693_REG_16BIT(0x3810)
+#define OV5693_OFFSET_START_Y_REG OV5693_REG_16BIT(0x3812)
+
+#define OV5693_SUB_INC_X_REG OV5693_REG_8BIT(0x3814)
+#define OV5693_SUB_INC_Y_REG OV5693_REG_8BIT(0x3815)
+
+#define OV5693_FORMAT1_REG OV5693_REG_8BIT(0x3820)
+#define OV5693_FORMAT1_FLIP_VERT_ISP_EN BIT(6)
+#define OV5693_FORMAT1_FLIP_VERT_SENSOR_EN BIT(1)
+#define OV5693_FORMAT1_VBIN_EN BIT(0)
+#define OV5693_FORMAT2_REG OV5693_REG_8BIT(0x3821)
+#define OV5693_FORMAT2_HDR_EN BIT(7)
+#define OV5693_FORMAT2_FLIP_HORZ_ISP_EN BIT(2)
+#define OV5693_FORMAT2_FLIP_HORZ_SENSOR_EN BIT(1)
+#define OV5693_FORMAT2_HBIN_EN BIT(0)
+
+#define OV5693_ISP_CTRL2_REG OV5693_REG_8BIT(0x5002)
+#define OV5693_ISP_SCALE_ENABLE BIT(7)
+
+/* Pixel Array */
+#define OV5693_NATIVE_WIDTH 2624
+#define OV5693_NATIVE_HEIGHT 1956
+#define OV5693_NATIVE_START_LEFT 0
+#define OV5693_NATIVE_START_TOP 0
+#define OV5693_ACTIVE_WIDTH 2592
+#define OV5693_ACTIVE_HEIGHT 1944
+#define OV5693_ACTIVE_START_LEFT 16
+#define OV5693_ACTIVE_START_TOP 6
+#define OV5693_MIN_CROP_WIDTH 2
+#define OV5693_MIN_CROP_HEIGHT 2
+
+/* Test Pattern */
+#define OV5693_TEST_PATTERN_REG OV5693_REG_8BIT(0x5e00)
+#define OV5693_TEST_PATTERN_ENABLE BIT(7)
+#define OV5693_TEST_PATTERN_ROLLING BIT(6)
+#define OV5693_TEST_PATTERN_RANDOM 0x01
+#define OV5693_TEST_PATTERN_BARS 0x00
+
+/* System Frequencies */
+#define OV5693_XVCLK_FREQ 19200000
+#define OV5693_LINK_FREQ_419_2MHZ 419200000
+#define OV5693_PIXEL_RATE 167680000
+
+/* Miscellaneous */
+#define OV5693_NUM_SUPPLIES 2
+
+#define to_ov5693_sensor(x) container_of(x, struct ov5693_device, sd)
+
+struct ov5693_reg {
+ u32 reg;
+ u8 val;
+};
+
+struct ov5693_reg_list {
+ u32 num_regs;
+ const struct ov5693_reg *regs;
+};
+
+struct ov5693_device {
+ struct i2c_client *client;
+ struct device *dev;
+
+ /* Protect against concurrent changes to controls */
+ struct mutex lock;
+
+ struct gpio_desc *reset;
+ struct gpio_desc *powerdown;
+ struct regulator_bulk_data supplies[OV5693_NUM_SUPPLIES];
+ struct clk *clk;
+
+ struct ov5693_mode {
+ struct v4l2_rect crop;
+ struct v4l2_mbus_framefmt format;
+ bool binning_x;
+ bool binning_y;
+ unsigned int inc_x_odd;
+ unsigned int inc_y_odd;
+ unsigned int vts;
+ } mode;
+ bool streaming;
+
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ struct ov5693_v4l2_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *analogue_gain;
+ struct v4l2_ctrl *digital_gain;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *test_pattern;
+ } ctrls;
+};
+
+static const struct ov5693_reg ov5693_global_regs[] = {
+ {OV5693_REG_8BIT(0x3016), 0xf0},
+ {OV5693_REG_8BIT(0x3017), 0xf0},
+ {OV5693_REG_8BIT(0x3018), 0xf0},
+ {OV5693_REG_8BIT(0x3022), 0x01},
+ {OV5693_REG_8BIT(0x3028), 0x44},
+ {OV5693_REG_8BIT(0x3098), 0x02},
+ {OV5693_REG_8BIT(0x3099), 0x19},
+ {OV5693_REG_8BIT(0x309a), 0x02},
+ {OV5693_REG_8BIT(0x309b), 0x01},
+ {OV5693_REG_8BIT(0x309c), 0x00},
+ {OV5693_REG_8BIT(0x30a0), 0xd2},
+ {OV5693_REG_8BIT(0x30a2), 0x01},
+ {OV5693_REG_8BIT(0x30b2), 0x00},
+ {OV5693_REG_8BIT(0x30b3), 0x83},
+ {OV5693_REG_8BIT(0x30b4), 0x03},
+ {OV5693_REG_8BIT(0x30b5), 0x04},
+ {OV5693_REG_8BIT(0x30b6), 0x01},
+ {OV5693_REG_8BIT(0x3080), 0x01},
+ {OV5693_REG_8BIT(0x3104), 0x21},
+ {OV5693_REG_8BIT(0x3106), 0x00},
+ {OV5693_REG_8BIT(0x3406), 0x01},
+ {OV5693_REG_8BIT(0x3503), 0x07},
+ {OV5693_REG_8BIT(0x350b), 0x40},
+ {OV5693_REG_8BIT(0x3601), 0x0a},
+ {OV5693_REG_8BIT(0x3602), 0x38},
+ {OV5693_REG_8BIT(0x3612), 0x80},
+ {OV5693_REG_8BIT(0x3620), 0x54},
+ {OV5693_REG_8BIT(0x3621), 0xc7},
+ {OV5693_REG_8BIT(0x3622), 0x0f},
+ {OV5693_REG_8BIT(0x3625), 0x10},
+ {OV5693_REG_8BIT(0x3630), 0x55},
+ {OV5693_REG_8BIT(0x3631), 0xf4},
+ {OV5693_REG_8BIT(0x3632), 0x00},
+ {OV5693_REG_8BIT(0x3633), 0x34},
+ {OV5693_REG_8BIT(0x3634), 0x02},
+ {OV5693_REG_8BIT(0x364d), 0x0d},
+ {OV5693_REG_8BIT(0x364f), 0xdd},
+ {OV5693_REG_8BIT(0x3660), 0x04},
+ {OV5693_REG_8BIT(0x3662), 0x10},
+ {OV5693_REG_8BIT(0x3663), 0xf1},
+ {OV5693_REG_8BIT(0x3665), 0x00},
+ {OV5693_REG_8BIT(0x3666), 0x20},
+ {OV5693_REG_8BIT(0x3667), 0x00},
+ {OV5693_REG_8BIT(0x366a), 0x80},
+ {OV5693_REG_8BIT(0x3680), 0xe0},
+ {OV5693_REG_8BIT(0x3681), 0x00},
+ {OV5693_REG_8BIT(0x3700), 0x42},
+ {OV5693_REG_8BIT(0x3701), 0x14},
+ {OV5693_REG_8BIT(0x3702), 0xa0},
+ {OV5693_REG_8BIT(0x3703), 0xd8},
+ {OV5693_REG_8BIT(0x3704), 0x78},
+ {OV5693_REG_8BIT(0x3705), 0x02},
+ {OV5693_REG_8BIT(0x370a), 0x00},
+ {OV5693_REG_8BIT(0x370b), 0x20},
+ {OV5693_REG_8BIT(0x370c), 0x0c},
+ {OV5693_REG_8BIT(0x370d), 0x11},
+ {OV5693_REG_8BIT(0x370e), 0x00},
+ {OV5693_REG_8BIT(0x370f), 0x40},
+ {OV5693_REG_8BIT(0x3710), 0x00},
+ {OV5693_REG_8BIT(0x371a), 0x1c},
+ {OV5693_REG_8BIT(0x371b), 0x05},
+ {OV5693_REG_8BIT(0x371c), 0x01},
+ {OV5693_REG_8BIT(0x371e), 0xa1},
+ {OV5693_REG_8BIT(0x371f), 0x0c},
+ {OV5693_REG_8BIT(0x3721), 0x00},
+ {OV5693_REG_8BIT(0x3724), 0x10},
+ {OV5693_REG_8BIT(0x3726), 0x00},
+ {OV5693_REG_8BIT(0x372a), 0x01},
+ {OV5693_REG_8BIT(0x3730), 0x10},
+ {OV5693_REG_8BIT(0x3738), 0x22},
+ {OV5693_REG_8BIT(0x3739), 0xe5},
+ {OV5693_REG_8BIT(0x373a), 0x50},
+ {OV5693_REG_8BIT(0x373b), 0x02},
+ {OV5693_REG_8BIT(0x373c), 0x41},
+ {OV5693_REG_8BIT(0x373f), 0x02},
+ {OV5693_REG_8BIT(0x3740), 0x42},
+ {OV5693_REG_8BIT(0x3741), 0x02},
+ {OV5693_REG_8BIT(0x3742), 0x18},
+ {OV5693_REG_8BIT(0x3743), 0x01},
+ {OV5693_REG_8BIT(0x3744), 0x02},
+ {OV5693_REG_8BIT(0x3747), 0x10},
+ {OV5693_REG_8BIT(0x374c), 0x04},
+ {OV5693_REG_8BIT(0x3751), 0xf0},
+ {OV5693_REG_8BIT(0x3752), 0x00},
+ {OV5693_REG_8BIT(0x3753), 0x00},
+ {OV5693_REG_8BIT(0x3754), 0xc0},
+ {OV5693_REG_8BIT(0x3755), 0x00},
+ {OV5693_REG_8BIT(0x3756), 0x1a},
+ {OV5693_REG_8BIT(0x3758), 0x00},
+ {OV5693_REG_8BIT(0x3759), 0x0f},
+ {OV5693_REG_8BIT(0x376b), 0x44},
+ {OV5693_REG_8BIT(0x375c), 0x04},
+ {OV5693_REG_8BIT(0x3774), 0x10},
+ {OV5693_REG_8BIT(0x3776), 0x00},
+ {OV5693_REG_8BIT(0x377f), 0x08},
+ {OV5693_REG_8BIT(0x3780), 0x22},
+ {OV5693_REG_8BIT(0x3781), 0x0c},
+ {OV5693_REG_8BIT(0x3784), 0x2c},
+ {OV5693_REG_8BIT(0x3785), 0x1e},
+ {OV5693_REG_8BIT(0x378f), 0xf5},
+ {OV5693_REG_8BIT(0x3791), 0xb0},
+ {OV5693_REG_8BIT(0x3795), 0x00},
+ {OV5693_REG_8BIT(0x3796), 0x64},
+ {OV5693_REG_8BIT(0x3797), 0x11},
+ {OV5693_REG_8BIT(0x3798), 0x30},
+ {OV5693_REG_8BIT(0x3799), 0x41},
+ {OV5693_REG_8BIT(0x379a), 0x07},
+ {OV5693_REG_8BIT(0x379b), 0xb0},
+ {OV5693_REG_8BIT(0x379c), 0x0c},
+ {OV5693_REG_8BIT(0x3a04), 0x06},
+ {OV5693_REG_8BIT(0x3a05), 0x14},
+ {OV5693_REG_8BIT(0x3e07), 0x20},
+ {OV5693_REG_8BIT(0x4000), 0x08},
+ {OV5693_REG_8BIT(0x4001), 0x04},
+ {OV5693_REG_8BIT(0x4004), 0x08},
+ {OV5693_REG_8BIT(0x4006), 0x20},
+ {OV5693_REG_8BIT(0x4008), 0x24},
+ {OV5693_REG_8BIT(0x4009), 0x10},
+ {OV5693_REG_8BIT(0x4058), 0x00},
+ {OV5693_REG_8BIT(0x4101), 0xb2},
+ {OV5693_REG_8BIT(0x4307), 0x31},
+ {OV5693_REG_8BIT(0x4511), 0x05},
+ {OV5693_REG_8BIT(0x4512), 0x01},
+ {OV5693_REG_8BIT(0x481f), 0x30},
+ {OV5693_REG_8BIT(0x4826), 0x2c},
+ {OV5693_REG_8BIT(0x4d02), 0xfd},
+ {OV5693_REG_8BIT(0x4d03), 0xf5},
+ {OV5693_REG_8BIT(0x4d04), 0x0c},
+ {OV5693_REG_8BIT(0x4d05), 0xcc},
+ {OV5693_REG_8BIT(0x4837), 0x0a},
+ {OV5693_REG_8BIT(0x5003), 0x20},
+ {OV5693_REG_8BIT(0x5013), 0x00},
+ {OV5693_REG_8BIT(0x5842), 0x01},
+ {OV5693_REG_8BIT(0x5843), 0x2b},
+ {OV5693_REG_8BIT(0x5844), 0x01},
+ {OV5693_REG_8BIT(0x5845), 0x92},
+ {OV5693_REG_8BIT(0x5846), 0x01},
+ {OV5693_REG_8BIT(0x5847), 0x8f},
+ {OV5693_REG_8BIT(0x5848), 0x01},
+ {OV5693_REG_8BIT(0x5849), 0x0c},
+ {OV5693_REG_8BIT(0x5e10), 0x0c},
+ {OV5693_REG_8BIT(0x3820), 0x00},
+ {OV5693_REG_8BIT(0x3821), 0x1e},
+ {OV5693_REG_8BIT(0x5041), 0x14}
+};
+
+static const struct ov5693_reg_list ov5693_global_setting = {
+ .num_regs = ARRAY_SIZE(ov5693_global_regs),
+ .regs = ov5693_global_regs,
+};
+
+static const struct v4l2_rect ov5693_default_crop = {
+ .left = OV5693_ACTIVE_START_LEFT,
+ .top = OV5693_ACTIVE_START_TOP,
+ .width = OV5693_ACTIVE_WIDTH,
+ .height = OV5693_ACTIVE_HEIGHT,
+};
+
+static const struct v4l2_mbus_framefmt ov5693_default_fmt = {
+ .width = OV5693_ACTIVE_WIDTH,
+ .height = OV5693_ACTIVE_HEIGHT,
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+};
+
+static const s64 link_freq_menu_items[] = {
+ OV5693_LINK_FREQ_419_2MHZ
+};
+
+static const char * const ov5693_supply_names[] = {
+ "avdd",
+ "dovdd",
+};
+
+static const char * const ov5693_test_pattern_menu[] = {
+ "Disabled",
+ "Random Data",
+ "Colour Bars",
+ "Colour Bars with Rolling Bar"
+};
+
+static const u8 ov5693_test_pattern_bits[] = {
+ 0,
+ OV5693_TEST_PATTERN_ENABLE | OV5693_TEST_PATTERN_RANDOM,
+ OV5693_TEST_PATTERN_ENABLE | OV5693_TEST_PATTERN_BARS,
+ OV5693_TEST_PATTERN_ENABLE | OV5693_TEST_PATTERN_BARS |
+ OV5693_TEST_PATTERN_ROLLING,
+};
+
+/* I2C I/O Operations */
+
+static int ov5693_read_reg(struct ov5693_device *ov5693, u32 addr, u32 *value)
+{
+ struct i2c_client *client = ov5693->client;
+ __be16 reg;
+ u8 val[4];
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 2,
+ .buf = (u8 *)&reg,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .buf = (u8 *)&val,
+ },
+ };
+ unsigned int len = ((addr >> OV5693_REG_SIZE_SHIFT) & 3);
+ unsigned int i;
+ int ret;
+
+ reg = cpu_to_be16(addr & OV5693_REG_ADDR_MASK);
+
+ msg[1].len = len;
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to read register 0x%04x: %d\n",
+ addr & OV5693_REG_ADDR_MASK, ret);
+
+ *value = 0;
+ for (i = 0; i < len; ++i) {
+ *value <<= 8;
+ *value |= val[i];
+ }
+
+ return 0;
+}
+
+static void ov5693_write_reg(struct ov5693_device *ov5693, u32 addr, u32 value,
+ int *error)
+{
+ struct i2c_client *client = ov5693->client;
+ struct {
+ __be16 reg;
+ u8 val[4];
+ } __packed buf;
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .buf = (u8 *)&buf,
+ };
+ unsigned int len = ((addr >> OV5693_REG_SIZE_SHIFT) & 3);
+ unsigned int i;
+ int ret;
+
+ if (*error < 0)
+ return;
+
+ buf.reg = cpu_to_be16(addr & OV5693_REG_ADDR_MASK);
+ for (i = 0; i < len; ++i) {
+ buf.val[len - i - 1] = value & 0xff;
+ value >>= 8;
+ }
+
+ msg.len = len + 2;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to write register 0x%04x: %d\n",
+ addr & OV5693_REG_ADDR_MASK, ret);
+ *error = ret;
+ }
+}
+
+static int ov5693_write_reg_array(struct ov5693_device *ov5693,
+ const struct ov5693_reg_list *reglist)
+{
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < reglist->num_regs; i++)
+ ov5693_write_reg(ov5693, reglist->regs[i].reg,
+ reglist->regs[i].val, &ret);
+
+ return ret;
+}
+
+static int ov5693_update_bits(struct ov5693_device *ov5693, u32 address,
+ u32 mask, u32 bits)
+{
+ u32 value = 0;
+ int ret;
+
+ ret = ov5693_read_reg(ov5693, address, &value);
+ if (ret)
+ return ret;
+
+ value &= ~mask;
+ value |= bits;
+
+ ov5693_write_reg(ov5693, address, value, &ret);
+
+ return ret;
+}
+
+/* V4L2 Controls Functions */
+
+static int ov5693_flip_vert_configure(struct ov5693_device *ov5693,
+ bool enable)
+{
+ u8 bits = OV5693_FORMAT1_FLIP_VERT_ISP_EN |
+ OV5693_FORMAT1_FLIP_VERT_SENSOR_EN;
+ int ret;
+
+ ret = ov5693_update_bits(ov5693, OV5693_FORMAT1_REG, bits,
+ enable ? bits : 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ov5693_flip_horz_configure(struct ov5693_device *ov5693,
+ bool enable)
+{
+ u8 bits = OV5693_FORMAT2_FLIP_HORZ_ISP_EN |
+ OV5693_FORMAT2_FLIP_HORZ_SENSOR_EN;
+ int ret;
+
+ ret = ov5693_update_bits(ov5693, OV5693_FORMAT2_REG, bits,
+ enable ? bits : 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ov5693_get_exposure(struct ov5693_device *ov5693, s32 *value)
+{
+ u32 exposure;
+ int ret;
+
+ ret = ov5693_read_reg(ov5693, OV5693_EXPOSURE_CTRL_REG, &exposure);
+ if (ret)
+ return ret;
+
+ /* The lowest 4 bits are unsupported fractional bits */
+ *value = exposure >> 4;
+
+ return 0;
+}
+
+static int ov5693_exposure_configure(struct ov5693_device *ov5693,
+ u32 exposure)
+{
+ int ret = 0;
+
+ exposure = (exposure << 4) & OV5693_EXPOSURE_CTRL_MASK;
+
+ ov5693_write_reg(ov5693, OV5693_EXPOSURE_CTRL_REG, exposure, &ret);
+
+ return ret;
+}
+
+static int ov5693_get_gain(struct ov5693_device *ov5693, u32 *gain)
+{
+ u32 value;
+ int ret;
+
+ ret = ov5693_read_reg(ov5693, OV5693_GAIN_CTRL_REG, &value);
+ if (ret)
+ return ret;
+
+ /* As with exposure, the lowest 4 bits are fractional bits. */
+ *gain = value >> 4;
+
+ return ret;
+}
+
+static int ov5693_digital_gain_configure(struct ov5693_device *ov5693,
+ u32 gain)
+{
+ int ret = 0;
+
+ gain &= OV5693_MWB_GAIN_MASK;
+
+ ov5693_write_reg(ov5693, OV5693_MWB_RED_GAIN_REG, gain, &ret);
+ ov5693_write_reg(ov5693, OV5693_MWB_GREEN_GAIN_REG, gain, &ret);
+ ov5693_write_reg(ov5693, OV5693_MWB_BLUE_GAIN_REG, gain, &ret);
+
+ return ret;
+}
+
+static int ov5693_analog_gain_configure(struct ov5693_device *ov5693, u32 gain)
+{
+ int ret = 0;
+
+ gain = (gain << 4) & OV5693_GAIN_CTRL_MASK;
+
+ ov5693_write_reg(ov5693, OV5693_GAIN_CTRL_REG, gain, &ret);
+
+ return ret;
+}
+
+static int ov5693_vts_configure(struct ov5693_device *ov5693, u32 vblank)
+{
+ u16 vts = ov5693->mode.format.height + vblank;
+ int ret = 0;
+
+ ov5693_write_reg(ov5693, OV5693_TIMING_VTS_REG, vts, &ret);
+
+ return ret;
+}
+
+static int ov5693_test_pattern_configure(struct ov5693_device *ov5693, u32 idx)
+{
+ int ret = 0;
+
+ ov5693_write_reg(ov5693, OV5693_TEST_PATTERN_REG,
+ ov5693_test_pattern_bits[idx], &ret);
+
+ return ret;
+}
+
+static int ov5693_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov5693_device *ov5693 =
+ container_of(ctrl->handler, struct ov5693_device, ctrls.handler);
+ int ret = 0;
+
+ /* If VBLANK is altered we need to update exposure to compensate */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ int exposure_max;
+
+ exposure_max = ov5693->mode.format.height + ctrl->val -
+ OV5693_INTEGRATION_TIME_MARGIN;
+ __v4l2_ctrl_modify_range(ov5693->ctrls.exposure,
+ ov5693->ctrls.exposure->minimum,
+ exposure_max,
+ ov5693->ctrls.exposure->step,
+ min(ov5693->ctrls.exposure->val,
+ exposure_max));
+ }
+
+ /* Only apply changes to the controls if the device is powered up */
+ if (!pm_runtime_get_if_in_use(ov5693->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE:
+ ret = ov5693_exposure_configure(ov5693, ctrl->val);
+ break;
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = ov5693_analog_gain_configure(ov5693, ctrl->val);
+ break;
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = ov5693_digital_gain_configure(ov5693, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ ret = ov5693_flip_horz_configure(ov5693, !!ctrl->val);
+ break;
+ case V4L2_CID_VFLIP:
+ ret = ov5693_flip_vert_configure(ov5693, !!ctrl->val);
+ break;
+ case V4L2_CID_VBLANK:
+ ret = ov5693_vts_configure(ov5693, ctrl->val);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov5693_test_pattern_configure(ov5693, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ pm_runtime_put(ov5693->dev);
+
+ return ret;
+}
+
+static int ov5693_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov5693_device *ov5693 = container_of(ctrl->handler,
+ struct ov5693_device,
+ ctrls.handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ return ov5693_get_exposure(ov5693, &ctrl->val);
+ case V4L2_CID_AUTOGAIN:
+ return ov5693_get_gain(ov5693, &ctrl->val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct v4l2_ctrl_ops ov5693_ctrl_ops = {
+ .s_ctrl = ov5693_s_ctrl,
+ .g_volatile_ctrl = ov5693_g_volatile_ctrl
+};
+
+/* System Control Functions */
+
+static int ov5693_mode_configure(struct ov5693_device *ov5693)
+{
+ const struct ov5693_mode *mode = &ov5693->mode;
+ int ret = 0;
+
+ /* Crop Start X */
+ ov5693_write_reg(ov5693, OV5693_CROP_START_X_REG, mode->crop.left,
+ &ret);
+
+ /* Offset X */
+ ov5693_write_reg(ov5693, OV5693_OFFSET_START_X_REG, 0, &ret);
+
+ /* Output Size X */
+ ov5693_write_reg(ov5693, OV5693_OUTPUT_SIZE_X_REG, mode->format.width,
+ &ret);
+
+ /* Crop End X */
+ ov5693_write_reg(ov5693, OV5693_CROP_END_X_REG,
+ mode->crop.left + mode->crop.width, &ret);
+
+ /* Horizontal Total Size */
+ ov5693_write_reg(ov5693, OV5693_TIMING_HTS_REG, OV5693_FIXED_PPL,
+ &ret);
+
+ /* Crop Start Y */
+ ov5693_write_reg(ov5693, OV5693_CROP_START_Y_REG, mode->crop.top,
+ &ret);
+
+ /* Offset Y */
+ ov5693_write_reg(ov5693, OV5693_OFFSET_START_Y_REG, 0, &ret);
+
+ /* Output Size Y */
+ ov5693_write_reg(ov5693, OV5693_OUTPUT_SIZE_Y_REG, mode->format.height,
+ &ret);
+
+ /* Crop End Y */
+ ov5693_write_reg(ov5693, OV5693_CROP_END_Y_REG,
+ mode->crop.top + mode->crop.height, &ret);
+
+ /* Subsample X increase */
+ ov5693_write_reg(ov5693, OV5693_SUB_INC_X_REG,
+ ((mode->inc_x_odd << 4) & 0xf0) | 0x01, &ret);
+ /* Subsample Y increase */
+ ov5693_write_reg(ov5693, OV5693_SUB_INC_Y_REG,
+ ((mode->inc_y_odd << 4) & 0xf0) | 0x01, &ret);
+
+ if (ret)
+ return ret;
+
+ /* Binning */
+ ret = ov5693_update_bits(ov5693, OV5693_FORMAT1_REG,
+ OV5693_FORMAT1_VBIN_EN,
+ mode->binning_y ? OV5693_FORMAT1_VBIN_EN : 0);
+ if (ret)
+ return ret;
+
+ ret = ov5693_update_bits(ov5693, OV5693_FORMAT2_REG,
+ OV5693_FORMAT2_HBIN_EN,
+ mode->binning_x ? OV5693_FORMAT2_HBIN_EN : 0);
+
+ return ret;
+}
+
+static int ov5693_enable_streaming(struct ov5693_device *ov5693, bool enable)
+{
+ int ret = 0;
+
+ ov5693_write_reg(ov5693, OV5693_SW_STREAM_REG,
+ enable ? OV5693_START_STREAMING :
+ OV5693_STOP_STREAMING, &ret);
+
+ return ret;
+}
+
+static int ov5693_sw_reset(struct ov5693_device *ov5693)
+{
+ int ret = 0;
+
+ ov5693_write_reg(ov5693, OV5693_SW_RESET_REG, OV5693_SW_RESET, &ret);
+
+ return ret;
+}
+
+static int ov5693_sensor_init(struct ov5693_device *ov5693)
+{
+ int ret;
+
+ ret = ov5693_sw_reset(ov5693);
+ if (ret)
+ return dev_err_probe(ov5693->dev, ret,
+ "software reset error\n");
+
+ ret = ov5693_write_reg_array(ov5693, &ov5693_global_setting);
+ if (ret)
+ return dev_err_probe(ov5693->dev, ret,
+ "global settings error\n");
+
+ ret = ov5693_mode_configure(ov5693);
+ if (ret)
+ return dev_err_probe(ov5693->dev, ret,
+ "mode configure error\n");
+
+ ret = ov5693_enable_streaming(ov5693, false);
+ if (ret)
+ dev_err(ov5693->dev, "stop streaming error\n");
+
+ return ret;
+}
+
+static void ov5693_sensor_powerdown(struct ov5693_device *ov5693)
+{
+ gpiod_set_value_cansleep(ov5693->reset, 1);
+ gpiod_set_value_cansleep(ov5693->powerdown, 1);
+
+ regulator_bulk_disable(OV5693_NUM_SUPPLIES, ov5693->supplies);
+
+ clk_disable_unprepare(ov5693->clk);
+}
+
+static int ov5693_sensor_powerup(struct ov5693_device *ov5693)
+{
+ int ret;
+
+ gpiod_set_value_cansleep(ov5693->reset, 1);
+ gpiod_set_value_cansleep(ov5693->powerdown, 1);
+
+ ret = clk_prepare_enable(ov5693->clk);
+ if (ret) {
+ dev_err(ov5693->dev, "Failed to enable clk\n");
+ goto fail_power;
+ }
+
+ ret = regulator_bulk_enable(OV5693_NUM_SUPPLIES, ov5693->supplies);
+ if (ret) {
+ dev_err(ov5693->dev, "Failed to enable regulators\n");
+ goto fail_power;
+ }
+
+ gpiod_set_value_cansleep(ov5693->powerdown, 0);
+ gpiod_set_value_cansleep(ov5693->reset, 0);
+
+ usleep_range(5000, 7500);
+
+ return 0;
+
+fail_power:
+ ov5693_sensor_powerdown(ov5693);
+ return ret;
+}
+
+static int __maybe_unused ov5693_sensor_suspend(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov5693_device *ov5693 = to_ov5693_sensor(sd);
+
+ ov5693_sensor_powerdown(ov5693);
+
+ return 0;
+}
+
+static int __maybe_unused ov5693_sensor_resume(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov5693_device *ov5693 = to_ov5693_sensor(sd);
+ int ret;
+
+ mutex_lock(&ov5693->lock);
+
+ ret = ov5693_sensor_powerup(ov5693);
+ if (ret)
+ goto out_unlock;
+
+ ret = ov5693_sensor_init(ov5693);
+ if (ret) {
+ dev_err(dev, "ov5693 sensor init failure\n");
+ goto err_power;
+ }
+
+ goto out_unlock;
+
+err_power:
+ ov5693_sensor_powerdown(ov5693);
+out_unlock:
+ mutex_unlock(&ov5693->lock);
+ return ret;
+}
+
+static int ov5693_detect(struct ov5693_device *ov5693)
+{
+ int ret;
+ u32 id;
+
+ ret = ov5693_read_reg(ov5693, OV5693_REG_CHIP_ID, &id);
+ if (ret)
+ return ret;
+
+ if (id != OV5693_CHIP_ID)
+ return dev_err_probe(ov5693->dev, -ENODEV,
+ "sensor ID mismatch. Found 0x%04x\n", id);
+
+ return 0;
+}
+
+/* V4L2 Framework callbacks */
+
+static unsigned int __ov5693_calc_vts(u32 height)
+{
+ /*
+ * We need to set a sensible default VTS for whatever format height we
+ * happen to be given from set_fmt(). This function just targets
+ * an even multiple of 30fps.
+ */
+
+ unsigned int tgt_fps;
+
+ tgt_fps = rounddown(OV5693_PIXEL_RATE / OV5693_FIXED_PPL / height, 30);
+
+ return ALIGN_DOWN(OV5693_PIXEL_RATE / OV5693_FIXED_PPL / tgt_fps, 2);
+}
+
+static struct v4l2_mbus_framefmt *
+__ov5693_get_pad_format(struct ov5693_device *ov5693,
+ struct v4l2_subdev_state *state,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&ov5693->sd, state, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &ov5693->mode.format;
+ default:
+ return NULL;
+ }
+}
+
+static struct v4l2_rect *
+__ov5693_get_pad_crop(struct ov5693_device *ov5693,
+ struct v4l2_subdev_state *state,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_crop(&ov5693->sd, state, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &ov5693->mode.crop;
+ }
+
+ return NULL;
+}
+
+static int ov5693_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct ov5693_device *ov5693 = to_ov5693_sensor(sd);
+
+ format->format = ov5693->mode.format;
+
+ return 0;
+}
+
+static int ov5693_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct ov5693_device *ov5693 = to_ov5693_sensor(sd);
+ const struct v4l2_rect *crop;
+ struct v4l2_mbus_framefmt *fmt;
+ unsigned int hratio, vratio;
+ unsigned int width, height;
+ unsigned int hblank;
+ int exposure_max;
+ int ret = 0;
+
+ crop = __ov5693_get_pad_crop(ov5693, state, format->pad, format->which);
+
+ /*
+ * Align to two to simplify the binning calculations below, and clamp
+ * the requested format at the crop rectangle
+ */
+ width = clamp_t(unsigned int, ALIGN(format->format.width, 2),
+ OV5693_MIN_CROP_WIDTH, crop->width);
+ height = clamp_t(unsigned int, ALIGN(format->format.height, 2),
+ OV5693_MIN_CROP_HEIGHT, crop->height);
+
+ /*
+ * We can only support setting either the dimensions of the crop rect
+ * or those dimensions binned (separately) by a factor of two.
+ */
+ hratio = clamp_t(unsigned int,
+ DIV_ROUND_CLOSEST(crop->width, width), 1, 2);
+ vratio = clamp_t(unsigned int,
+ DIV_ROUND_CLOSEST(crop->height, height), 1, 2);
+
+ fmt = __ov5693_get_pad_format(ov5693, state, format->pad,
+ format->which);
+
+ fmt->width = crop->width / hratio;
+ fmt->height = crop->height / vratio;
+ fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+
+ format->format = *fmt;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ return ret;
+
+ mutex_lock(&ov5693->lock);
+
+ ov5693->mode.binning_x = hratio > 1 ? true : false;
+ ov5693->mode.inc_x_odd = hratio > 1 ? 3 : 1;
+ ov5693->mode.binning_y = vratio > 1 ? true : false;
+ ov5693->mode.inc_y_odd = vratio > 1 ? 3 : 1;
+
+ ov5693->mode.vts = __ov5693_calc_vts(fmt->height);
+
+ __v4l2_ctrl_modify_range(ov5693->ctrls.vblank,
+ OV5693_TIMING_MIN_VTS,
+ OV5693_TIMING_MAX_VTS - fmt->height,
+ 1, ov5693->mode.vts - fmt->height);
+ __v4l2_ctrl_s_ctrl(ov5693->ctrls.vblank,
+ ov5693->mode.vts - fmt->height);
+
+ hblank = OV5693_FIXED_PPL - fmt->width;
+ __v4l2_ctrl_modify_range(ov5693->ctrls.hblank, hblank, hblank, 1,
+ hblank);
+
+ exposure_max = ov5693->mode.vts - OV5693_INTEGRATION_TIME_MARGIN;
+ __v4l2_ctrl_modify_range(ov5693->ctrls.exposure,
+ ov5693->ctrls.exposure->minimum, exposure_max,
+ ov5693->ctrls.exposure->step,
+ min(ov5693->ctrls.exposure->val,
+ exposure_max));
+
+ mutex_unlock(&ov5693->lock);
+ return ret;
+}
+
+static int ov5693_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct ov5693_device *ov5693 = to_ov5693_sensor(sd);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ mutex_lock(&ov5693->lock);
+ sel->r = *__ov5693_get_pad_crop(ov5693, state, sel->pad,
+ sel->which);
+ mutex_unlock(&ov5693->lock);
+ break;
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = OV5693_NATIVE_WIDTH;
+ sel->r.height = OV5693_NATIVE_HEIGHT;
+ break;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.top = OV5693_ACTIVE_START_TOP;
+ sel->r.left = OV5693_ACTIVE_START_LEFT;
+ sel->r.width = OV5693_ACTIVE_WIDTH;
+ sel->r.height = OV5693_ACTIVE_HEIGHT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ov5693_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct ov5693_device *ov5693 = to_ov5693_sensor(sd);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *__crop;
+ struct v4l2_rect rect;
+
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ /*
+ * Clamp the boundaries of the crop rectangle to the size of the sensor
+ * pixel array. Align to multiples of 2 to ensure Bayer pattern isn't
+ * disrupted.
+ */
+ rect.left = clamp(ALIGN(sel->r.left, 2), OV5693_NATIVE_START_LEFT,
+ OV5693_NATIVE_WIDTH);
+ rect.top = clamp(ALIGN(sel->r.top, 2), OV5693_NATIVE_START_TOP,
+ OV5693_NATIVE_HEIGHT);
+ rect.width = clamp_t(unsigned int, ALIGN(sel->r.width, 2),
+ OV5693_MIN_CROP_WIDTH, OV5693_NATIVE_WIDTH);
+ rect.height = clamp_t(unsigned int, ALIGN(sel->r.height, 2),
+ OV5693_MIN_CROP_HEIGHT, OV5693_NATIVE_HEIGHT);
+
+ /* Make sure the crop rectangle isn't outside the bounds of the array */
+ rect.width = min_t(unsigned int, rect.width,
+ OV5693_NATIVE_WIDTH - rect.left);
+ rect.height = min_t(unsigned int, rect.height,
+ OV5693_NATIVE_HEIGHT - rect.top);
+
+ __crop = __ov5693_get_pad_crop(ov5693, state, sel->pad, sel->which);
+
+ if (rect.width != __crop->width || rect.height != __crop->height) {
+ /*
+ * Reset the output image size if the crop rectangle size has
+ * been modified.
+ */
+ format = __ov5693_get_pad_format(ov5693, state, sel->pad,
+ sel->which);
+ format->width = rect.width;
+ format->height = rect.height;
+ }
+
+ *__crop = rect;
+ sel->r = rect;
+
+ return 0;
+}
+
+static int ov5693_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov5693_device *ov5693 = to_ov5693_sensor(sd);
+ int ret;
+
+ if (enable) {
+ ret = pm_runtime_get_sync(ov5693->dev);
+ if (ret < 0)
+ goto err_power_down;
+
+ mutex_lock(&ov5693->lock);
+ ret = __v4l2_ctrl_handler_setup(&ov5693->ctrls.handler);
+ if (ret) {
+ mutex_unlock(&ov5693->lock);
+ goto err_power_down;
+ }
+
+ ret = ov5693_enable_streaming(ov5693, true);
+ mutex_unlock(&ov5693->lock);
+ } else {
+ mutex_lock(&ov5693->lock);
+ ret = ov5693_enable_streaming(ov5693, false);
+ mutex_unlock(&ov5693->lock);
+ }
+ if (ret)
+ goto err_power_down;
+
+ ov5693->streaming = !!enable;
+
+ if (!enable)
+ pm_runtime_put(ov5693->dev);
+
+ return 0;
+err_power_down:
+ pm_runtime_put_noidle(ov5693->dev);
+ return ret;
+}
+
+static int ov5693_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct ov5693_device *ov5693 = to_ov5693_sensor(sd);
+ unsigned int framesize = OV5693_FIXED_PPL * (ov5693->mode.format.height +
+ ov5693->ctrls.vblank->val);
+ unsigned int fps = DIV_ROUND_CLOSEST(OV5693_PIXEL_RATE, framesize);
+
+ interval->interval.numerator = 1;
+ interval->interval.denominator = fps;
+
+ return 0;
+}
+
+static int ov5693_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ /* Only a single mbus format is supported */
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ return 0;
+}
+
+static int ov5693_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct ov5693_device *ov5693 = to_ov5693_sensor(sd);
+ struct v4l2_rect *__crop;
+
+ if (fse->index > 1 || fse->code != MEDIA_BUS_FMT_SBGGR10_1X10)
+ return -EINVAL;
+
+ __crop = __ov5693_get_pad_crop(ov5693, state, fse->pad, fse->which);
+ if (!__crop)
+ return -EINVAL;
+
+ fse->min_width = __crop->width / (fse->index + 1);
+ fse->min_height = __crop->height / (fse->index + 1);
+ fse->max_width = fse->min_width;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov5693_video_ops = {
+ .s_stream = ov5693_s_stream,
+ .g_frame_interval = ov5693_g_frame_interval,
+};
+
+static const struct v4l2_subdev_pad_ops ov5693_pad_ops = {
+ .enum_mbus_code = ov5693_enum_mbus_code,
+ .enum_frame_size = ov5693_enum_frame_size,
+ .get_fmt = ov5693_get_fmt,
+ .set_fmt = ov5693_set_fmt,
+ .get_selection = ov5693_get_selection,
+ .set_selection = ov5693_set_selection,
+};
+
+static const struct v4l2_subdev_ops ov5693_ops = {
+ .video = &ov5693_video_ops,
+ .pad = &ov5693_pad_ops,
+};
+
+/* Sensor and Driver Configuration Functions */
+
+static int ov5693_init_controls(struct ov5693_device *ov5693)
+{
+ const struct v4l2_ctrl_ops *ops = &ov5693_ctrl_ops;
+ struct ov5693_v4l2_ctrls *ctrls = &ov5693->ctrls;
+ struct v4l2_fwnode_device_properties props;
+ int vblank_max, vblank_def;
+ int exposure_max;
+ int hblank;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(&ctrls->handler, 12);
+ if (ret)
+ return ret;
+
+ /* link freq */
+ ctrls->link_freq = v4l2_ctrl_new_int_menu(&ctrls->handler,
+ NULL, V4L2_CID_LINK_FREQ,
+ 0, 0, link_freq_menu_items);
+ if (ctrls->link_freq)
+ ctrls->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ /* pixel rate */
+ ctrls->pixel_rate = v4l2_ctrl_new_std(&ctrls->handler, NULL,
+ V4L2_CID_PIXEL_RATE, 0,
+ OV5693_PIXEL_RATE, 1,
+ OV5693_PIXEL_RATE);
+
+ /* Exposure */
+ exposure_max = ov5693->mode.vts - OV5693_INTEGRATION_TIME_MARGIN;
+ ctrls->exposure = v4l2_ctrl_new_std(&ctrls->handler, ops,
+ V4L2_CID_EXPOSURE,
+ OV5693_EXPOSURE_MIN, exposure_max,
+ OV5693_EXPOSURE_STEP, exposure_max);
+
+ /* Gain */
+ ctrls->analogue_gain = v4l2_ctrl_new_std(&ctrls->handler,
+ ops, V4L2_CID_ANALOGUE_GAIN,
+ OV5693_GAIN_MIN,
+ OV5693_GAIN_MAX,
+ OV5693_GAIN_STEP,
+ OV5693_GAIN_DEF);
+
+ ctrls->digital_gain = v4l2_ctrl_new_std(&ctrls->handler, ops,
+ V4L2_CID_DIGITAL_GAIN,
+ OV5693_DIGITAL_GAIN_MIN,
+ OV5693_DIGITAL_GAIN_MAX,
+ OV5693_DIGITAL_GAIN_STEP,
+ OV5693_DIGITAL_GAIN_DEF);
+
+ /* Flip */
+ ctrls->hflip = v4l2_ctrl_new_std(&ctrls->handler, ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+
+ ctrls->vflip = v4l2_ctrl_new_std(&ctrls->handler, ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ hblank = OV5693_FIXED_PPL - ov5693->mode.format.width;
+ ctrls->hblank = v4l2_ctrl_new_std(&ctrls->handler, ops,
+ V4L2_CID_HBLANK, hblank,
+ hblank, 1, hblank);
+
+ if (ctrls->hblank)
+ ctrls->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ vblank_max = OV5693_TIMING_MAX_VTS - ov5693->mode.format.height;
+ vblank_def = ov5693->mode.vts - ov5693->mode.format.height;
+ ctrls->vblank = v4l2_ctrl_new_std(&ctrls->handler, ops,
+ V4L2_CID_VBLANK,
+ OV5693_TIMING_MIN_VTS,
+ vblank_max, 1, vblank_def);
+
+ ctrls->test_pattern = v4l2_ctrl_new_std_menu_items(
+ &ctrls->handler, ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov5693_test_pattern_menu) - 1,
+ 0, 0, ov5693_test_pattern_menu);
+
+ if (ctrls->handler.error) {
+ dev_err(ov5693->dev, "Error initialising v4l2 ctrls\n");
+ ret = ctrls->handler.error;
+ goto err_free_handler;
+ }
+
+ /* set properties from fwnode (e.g. rotation, orientation) */
+ ret = v4l2_fwnode_device_parse(ov5693->dev, &props);
+ if (ret)
+ goto err_free_handler;
+
+ ret = v4l2_ctrl_new_fwnode_properties(&ctrls->handler, ops,
+ &props);
+ if (ret)
+ goto err_free_handler;
+
+ /* Use same lock for controls as for everything else. */
+ ctrls->handler.lock = &ov5693->lock;
+ ov5693->sd.ctrl_handler = &ctrls->handler;
+
+ return 0;
+
+err_free_handler:
+ v4l2_ctrl_handler_free(&ctrls->handler);
+ return ret;
+}
+
+static int ov5693_configure_gpios(struct ov5693_device *ov5693)
+{
+ ov5693->reset = devm_gpiod_get_optional(ov5693->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ov5693->reset)) {
+ dev_err(ov5693->dev, "Error fetching reset GPIO\n");
+ return PTR_ERR(ov5693->reset);
+ }
+
+ ov5693->powerdown = devm_gpiod_get_optional(ov5693->dev, "powerdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ov5693->powerdown)) {
+ dev_err(ov5693->dev, "Error fetching powerdown GPIO\n");
+ return PTR_ERR(ov5693->powerdown);
+ }
+
+ return 0;
+}
+
+static int ov5693_get_regulators(struct ov5693_device *ov5693)
+{
+ unsigned int i;
+
+ for (i = 0; i < OV5693_NUM_SUPPLIES; i++)
+ ov5693->supplies[i].supply = ov5693_supply_names[i];
+
+ return devm_regulator_bulk_get(ov5693->dev, OV5693_NUM_SUPPLIES,
+ ov5693->supplies);
+}
+
+static int ov5693_check_hwcfg(struct ov5693_device *ov5693)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(ov5693->dev);
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct fwnode_handle *endpoint;
+ unsigned int i;
+ int ret;
+
+ endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!endpoint)
+ return -EPROBE_DEFER; /* Could be provided by cio2-bridge */
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &bus_cfg);
+ fwnode_handle_put(endpoint);
+ if (ret)
+ return ret;
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != 2) {
+ dev_err(ov5693->dev, "only a 2-lane CSI2 config is supported");
+ ret = -EINVAL;
+ goto out_free_bus_cfg;
+ }
+
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_err(ov5693->dev, "no link frequencies defined\n");
+ ret = -EINVAL;
+ goto out_free_bus_cfg;
+ }
+
+ for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++)
+ if (bus_cfg.link_frequencies[i] == OV5693_LINK_FREQ_419_2MHZ)
+ break;
+
+ if (i == bus_cfg.nr_of_link_frequencies) {
+ dev_err(ov5693->dev, "supported link freq %ull not found\n",
+ OV5693_LINK_FREQ_419_2MHZ);
+ ret = -EINVAL;
+ goto out_free_bus_cfg;
+ }
+
+out_free_bus_cfg:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+static int ov5693_probe(struct i2c_client *client)
+{
+ struct ov5693_device *ov5693;
+ u32 clk_rate;
+ int ret = 0;
+
+ ov5693 = devm_kzalloc(&client->dev, sizeof(*ov5693), GFP_KERNEL);
+ if (!ov5693)
+ return -ENOMEM;
+
+ ov5693->client = client;
+ ov5693->dev = &client->dev;
+
+ ret = ov5693_check_hwcfg(ov5693);
+ if (ret)
+ return ret;
+
+ mutex_init(&ov5693->lock);
+
+ v4l2_i2c_subdev_init(&ov5693->sd, client, &ov5693_ops);
+
+ ov5693->clk = devm_clk_get(&client->dev, "xvclk");
+ if (IS_ERR(ov5693->clk)) {
+ dev_err(&client->dev, "Error getting clock\n");
+ return PTR_ERR(ov5693->clk);
+ }
+
+ clk_rate = clk_get_rate(ov5693->clk);
+ if (clk_rate != OV5693_XVCLK_FREQ)
+ dev_warn(&client->dev, "Found clk freq %u, expected %u\n",
+ clk_rate, OV5693_XVCLK_FREQ);
+
+ ret = ov5693_configure_gpios(ov5693);
+ if (ret)
+ return ret;
+
+ ret = ov5693_get_regulators(ov5693);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Error fetching regulators\n");
+
+ ov5693->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov5693->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ov5693->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ ov5693->mode.crop = ov5693_default_crop;
+ ov5693->mode.format = ov5693_default_fmt;
+ ov5693->mode.vts = __ov5693_calc_vts(ov5693->mode.format.height);
+
+ ret = ov5693_init_controls(ov5693);
+ if (ret)
+ return ret;
+
+ ret = media_entity_pads_init(&ov5693->sd.entity, 1, &ov5693->pad);
+ if (ret)
+ goto err_ctrl_handler_free;
+
+ /*
+ * We need the driver to work in the event that pm runtime is disable in
+ * the kernel, so power up and verify the chip now. In the event that
+ * runtime pm is disabled this will leave the chip on, so that streaming
+ * will work.
+ */
+
+ ret = ov5693_sensor_powerup(ov5693);
+ if (ret)
+ goto err_media_entity_cleanup;
+
+ ret = ov5693_detect(ov5693);
+ if (ret)
+ goto err_powerdown;
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_get_noresume(&client->dev);
+ pm_runtime_enable(&client->dev);
+
+ ret = v4l2_async_register_subdev_sensor(&ov5693->sd);
+ if (ret) {
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ ret);
+ goto err_pm_runtime;
+ }
+
+ pm_runtime_set_autosuspend_delay(&client->dev, 1000);
+ pm_runtime_use_autosuspend(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
+
+ return ret;
+
+err_pm_runtime:
+ pm_runtime_disable(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+err_powerdown:
+ ov5693_sensor_powerdown(ov5693);
+err_media_entity_cleanup:
+ media_entity_cleanup(&ov5693->sd.entity);
+err_ctrl_handler_free:
+ v4l2_ctrl_handler_free(&ov5693->ctrls.handler);
+
+ return ret;
+}
+
+static int ov5693_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov5693_device *ov5693 = to_ov5693_sensor(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&ov5693->sd.entity);
+ v4l2_ctrl_handler_free(&ov5693->ctrls.handler);
+ mutex_destroy(&ov5693->lock);
+
+ /*
+ * Disable runtime PM. In case runtime PM is disabled in the kernel,
+ * make sure to turn power off manually.
+ */
+ pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ ov5693_sensor_powerdown(ov5693);
+ pm_runtime_set_suspended(&client->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ov5693_pm_ops = {
+ SET_RUNTIME_PM_OPS(ov5693_sensor_suspend, ov5693_sensor_resume, NULL)
+};
+
+static const struct acpi_device_id ov5693_acpi_match[] = {
+ {"INT33BE"},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, ov5693_acpi_match);
+
+static struct i2c_driver ov5693_driver = {
+ .driver = {
+ .name = "ov5693",
+ .acpi_match_table = ov5693_acpi_match,
+ .pm = &ov5693_pm_ops,
+ },
+ .probe_new = ov5693_probe,
+ .remove = ov5693_remove,
+};
+module_i2c_driver(ov5693_driver);
+
+MODULE_DESCRIPTION("A low-level driver for OmniVision 5693 sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
index c6c6050cda1a..8785764b7a74 100644
--- a/drivers/media/i2c/ov8856.c
+++ b/drivers/media/i2c/ov8856.c
@@ -1445,6 +1445,9 @@ struct ov8856 {
const struct ov8856_lane_cfg *priv_lane;
u8 modes_size;
+
+ /* True if the device has been identified */
+ bool identified;
};
struct ov8856_lane_cfg {
@@ -1685,6 +1688,71 @@ static int ov8856_write_reg_list(struct ov8856 *ov8856,
return 0;
}
+static int ov8856_identify_module(struct ov8856 *ov8856)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
+ int ret;
+ u32 val;
+
+ if (ov8856->identified)
+ return 0;
+
+ ret = ov8856_read_reg(ov8856, OV8856_REG_CHIP_ID,
+ OV8856_REG_VALUE_24BIT, &val);
+ if (ret)
+ return ret;
+
+ if (val != OV8856_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ OV8856_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ ret = ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
+ OV8856_REG_VALUE_08BIT, OV8856_MODE_STREAMING);
+ if (ret)
+ return ret;
+
+ ret = ov8856_write_reg(ov8856, OV8856_OTP_MODE_CTRL,
+ OV8856_REG_VALUE_08BIT, OV8856_OTP_MODE_AUTO);
+ if (ret) {
+ dev_err(&client->dev, "failed to set otp mode");
+ return ret;
+ }
+
+ ret = ov8856_write_reg(ov8856, OV8856_OTP_LOAD_CTRL,
+ OV8856_REG_VALUE_08BIT,
+ OV8856_OTP_LOAD_CTRL_ENABLE);
+ if (ret) {
+ dev_err(&client->dev, "failed to enable load control");
+ return ret;
+ }
+
+ ret = ov8856_read_reg(ov8856, OV8856_MODULE_REVISION,
+ OV8856_REG_VALUE_08BIT, &val);
+ if (ret) {
+ dev_err(&client->dev, "failed to read module revision");
+ return ret;
+ }
+
+ dev_info(&client->dev, "OV8856 revision %x (%s) at address 0x%02x\n",
+ val,
+ val == OV8856_2A_MODULE ? "2A" :
+ val == OV8856_1B_MODULE ? "1B" : "unknown revision",
+ client->addr);
+
+ ret = ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
+ OV8856_REG_VALUE_08BIT, OV8856_MODE_STANDBY);
+ if (ret) {
+ dev_err(&client->dev, "failed to exit streaming mode");
+ return ret;
+ }
+
+ ov8856->identified = true;
+
+ return 0;
+}
+
static int ov8856_update_digital_gain(struct ov8856 *ov8856, u32 d_gain)
{
int ret;
@@ -1969,6 +2037,10 @@ static int ov8856_start_streaming(struct ov8856 *ov8856)
const struct ov8856_reg_list *reg_list;
int link_freq_index, ret;
+ ret = ov8856_identify_module(ov8856);
+ if (ret)
+ return ret;
+
link_freq_index = ov8856->cur_mode->link_freq_index;
reg_list = &ov8856->priv_lane->link_freq_configs[link_freq_index].reg_list;
@@ -2276,65 +2348,6 @@ static const struct v4l2_subdev_internal_ops ov8856_internal_ops = {
.open = ov8856_open,
};
-static int ov8856_identify_module(struct ov8856 *ov8856)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
- int ret;
- u32 val;
-
- ret = ov8856_read_reg(ov8856, OV8856_REG_CHIP_ID,
- OV8856_REG_VALUE_24BIT, &val);
- if (ret)
- return ret;
-
- if (val != OV8856_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
- OV8856_CHIP_ID, val);
- return -ENXIO;
- }
-
- ret = ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
- OV8856_REG_VALUE_08BIT, OV8856_MODE_STREAMING);
- if (ret)
- return ret;
-
- ret = ov8856_write_reg(ov8856, OV8856_OTP_MODE_CTRL,
- OV8856_REG_VALUE_08BIT, OV8856_OTP_MODE_AUTO);
- if (ret) {
- dev_err(&client->dev, "failed to set otp mode");
- return ret;
- }
-
- ret = ov8856_write_reg(ov8856, OV8856_OTP_LOAD_CTRL,
- OV8856_REG_VALUE_08BIT,
- OV8856_OTP_LOAD_CTRL_ENABLE);
- if (ret) {
- dev_err(&client->dev, "failed to enable load control");
- return ret;
- }
-
- ret = ov8856_read_reg(ov8856, OV8856_MODULE_REVISION,
- OV8856_REG_VALUE_08BIT, &val);
- if (ret) {
- dev_err(&client->dev, "failed to read module revision");
- return ret;
- }
-
- dev_info(&client->dev, "OV8856 revision %x (%s) at address 0x%02x\n",
- val,
- val == OV8856_2A_MODULE ? "2A" :
- val == OV8856_1B_MODULE ? "1B" : "unknown revision",
- client->addr);
-
- ret = ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
- OV8856_REG_VALUE_08BIT, OV8856_MODE_STANDBY);
- if (ret) {
- dev_err(&client->dev, "failed to exit streaming mode");
- return ret;
- }
-
- return 0;
-}
static int ov8856_get_hwcfg(struct ov8856 *ov8856, struct device *dev)
{
@@ -2458,6 +2471,7 @@ static int ov8856_probe(struct i2c_client *client)
{
struct ov8856 *ov8856;
int ret;
+ bool full_power;
ov8856 = devm_kzalloc(&client->dev, sizeof(*ov8856), GFP_KERNEL);
if (!ov8856)
@@ -2472,16 +2486,19 @@ static int ov8856_probe(struct i2c_client *client)
v4l2_i2c_subdev_init(&ov8856->sd, client, &ov8856_subdev_ops);
- ret = __ov8856_power_on(ov8856);
- if (ret) {
- dev_err(&client->dev, "failed to power on\n");
- return ret;
- }
+ full_power = acpi_dev_state_d0(&client->dev);
+ if (full_power) {
+ ret = __ov8856_power_on(ov8856);
+ if (ret) {
+ dev_err(&client->dev, "failed to power on\n");
+ return ret;
+ }
- ret = ov8856_identify_module(ov8856);
- if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
- goto probe_power_off;
+ ret = ov8856_identify_module(ov8856);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ goto probe_power_off;
+ }
}
mutex_init(&ov8856->mutex);
@@ -2511,11 +2528,9 @@ static int ov8856_probe(struct i2c_client *client)
goto probe_error_media_entity_cleanup;
}
- /*
- * Device is already turned on by i2c-core with ACPI domain PM.
- * Enable runtime PM and turn off the device.
- */
- pm_runtime_set_active(&client->dev);
+ /* Set the device's state to active if it's in D0 state. */
+ if (full_power)
+ pm_runtime_set_active(&client->dev);
pm_runtime_enable(&client->dev);
pm_runtime_idle(&client->dev);
@@ -2562,6 +2577,7 @@ static struct i2c_driver ov8856_i2c_driver = {
},
.probe_new = ov8856_probe,
.remove = ov8856_remove,
+ .flags = I2C_DRV_ACPI_WAIVE_D0_PROBE,
};
module_i2c_driver(ov8856_i2c_driver);
diff --git a/drivers/media/i2c/ov8865.c b/drivers/media/i2c/ov8865.c
index ce50f3ea87b8..d9d016cfa9ac 100644
--- a/drivers/media/i2c/ov8865.c
+++ b/drivers/media/i2c/ov8865.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/pm_runtime.h>
@@ -20,10 +21,6 @@
#include <media/v4l2-image-sizes.h>
#include <media/v4l2-mediabus.h>
-/* Clock rate */
-
-#define OV8865_EXTCLK_RATE 24000000
-
/* Register definitions */
/* System */
@@ -146,6 +143,7 @@
#define OV8865_EXPOSURE_CTRL_L_REG 0x3502
#define OV8865_EXPOSURE_CTRL_L(v) ((v) & GENMASK(7, 0))
#define OV8865_EXPOSURE_GAIN_MANUAL_REG 0x3503
+#define OV8865_INTEGRATION_TIME_MARGIN 8
#define OV8865_GAIN_CTRL_H_REG 0x3508
#define OV8865_GAIN_CTRL_H(v) (((v) & GENMASK(12, 8)) >> 8)
@@ -186,6 +184,8 @@
#define OV8865_VTS_H(v) (((v) & GENMASK(11, 8)) >> 8)
#define OV8865_VTS_L_REG 0x380f
#define OV8865_VTS_L(v) ((v) & GENMASK(7, 0))
+#define OV8865_TIMING_MAX_VTS 0xffff
+#define OV8865_TIMING_MIN_VTS 0x04
#define OV8865_OFFSET_X_H_REG 0x3810
#define OV8865_OFFSET_X_H(v) (((v) & GENMASK(15, 8)) >> 8)
#define OV8865_OFFSET_X_L_REG 0x3811
@@ -453,6 +453,15 @@
#define OV8865_PRE_CTRL0_PATTERN_COLOR_SQUARES 2
#define OV8865_PRE_CTRL0_PATTERN_BLACK 3
+/* Pixel Array */
+
+#define OV8865_NATIVE_WIDTH 3296
+#define OV8865_NATIVE_HEIGHT 2528
+#define OV8865_ACTIVE_START_TOP 32
+#define OV8865_ACTIVE_START_LEFT 80
+#define OV8865_ACTIVE_WIDTH 3264
+#define OV8865_ACTIVE_HEIGHT 2448
+
/* Macros */
#define ov8865_subdev_sensor(s) \
@@ -566,6 +575,25 @@ struct ov8865_sclk_config {
unsigned int sclk_div;
};
+struct ov8865_pll_configs {
+ const struct ov8865_pll1_config *pll1_config;
+ const struct ov8865_pll2_config *pll2_config_native;
+ const struct ov8865_pll2_config *pll2_config_binning;
+};
+
+/* Clock rate */
+
+enum extclk_rate {
+ OV8865_19_2_MHZ,
+ OV8865_24_MHZ,
+ OV8865_NUM_SUPPORTED_RATES
+};
+
+static const unsigned long supported_extclk_rates[] = {
+ [OV8865_19_2_MHZ] = 19200000,
+ [OV8865_24_MHZ] = 24000000,
+};
+
/*
* General formulas for (array-centered) mode calculation:
* - photo_array_width = 3296
@@ -632,11 +660,7 @@ struct ov8865_mode {
unsigned int blc_anchor_right_start;
unsigned int blc_anchor_right_end;
- struct v4l2_fract frame_interval;
-
- const struct ov8865_pll1_config *pll1_config;
- const struct ov8865_pll2_config *pll2_config;
- const struct ov8865_sclk_config *sclk_config;
+ bool pll2_binning;
const struct ov8865_register_value *register_values;
unsigned int register_values_count;
@@ -652,6 +676,9 @@ struct ov8865_state {
struct ov8865_ctrls {
struct v4l2_ctrl *link_freq;
struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *exposure;
struct v4l2_ctrl_handler handler;
};
@@ -664,6 +691,9 @@ struct ov8865_sensor {
struct regulator *avdd;
struct regulator *dvdd;
struct regulator *dovdd;
+
+ unsigned long extclk_rate;
+ const struct ov8865_pll_configs *pll_configs;
struct clk *extclk;
struct v4l2_fwnode_endpoint endpoint;
@@ -679,43 +709,70 @@ struct ov8865_sensor {
/* Static definitions */
/*
- * EXTCLK = 24 MHz
* PHY_SCLK = 720 MHz
* MIPI_PCLK = 90 MHz
*/
-static const struct ov8865_pll1_config ov8865_pll1_config_native = {
- .pll_pre_div_half = 1,
- .pll_pre_div = 0,
- .pll_mul = 30,
- .m_div = 1,
- .mipi_div = 3,
- .pclk_div = 1,
- .sys_pre_div = 1,
- .sys_div = 2,
+
+static const struct ov8865_pll1_config ov8865_pll1_config_native_19_2mhz = {
+ .pll_pre_div_half = 1,
+ .pll_pre_div = 2,
+ .pll_mul = 75,
+ .m_div = 1,
+ .mipi_div = 3,
+ .pclk_div = 1,
+ .sys_pre_div = 1,
+ .sys_div = 2,
+};
+
+static const struct ov8865_pll1_config ov8865_pll1_config_native_24mhz = {
+ .pll_pre_div_half = 1,
+ .pll_pre_div = 0,
+ .pll_mul = 30,
+ .m_div = 1,
+ .mipi_div = 3,
+ .pclk_div = 1,
+ .sys_pre_div = 1,
+ .sys_div = 2,
};
/*
- * EXTCLK = 24 MHz
* DAC_CLK = 360 MHz
* SCLK = 144 MHz
*/
-static const struct ov8865_pll2_config ov8865_pll2_config_native = {
- .pll_pre_div_half = 1,
- .pll_pre_div = 0,
- .pll_mul = 30,
- .dac_div = 2,
- .sys_pre_div = 5,
- .sys_div = 0,
+static const struct ov8865_pll2_config ov8865_pll2_config_native_19_2mhz = {
+ .pll_pre_div_half = 1,
+ .pll_pre_div = 5,
+ .pll_mul = 75,
+ .dac_div = 1,
+ .sys_pre_div = 1,
+ .sys_div = 3,
+};
+
+static const struct ov8865_pll2_config ov8865_pll2_config_native_24mhz = {
+ .pll_pre_div_half = 1,
+ .pll_pre_div = 0,
+ .pll_mul = 30,
+ .dac_div = 2,
+ .sys_pre_div = 5,
+ .sys_div = 0,
};
/*
- * EXTCLK = 24 MHz
* DAC_CLK = 360 MHz
- * SCLK = 80 MHz
+ * SCLK = 72 MHz
*/
-static const struct ov8865_pll2_config ov8865_pll2_config_binning = {
+static const struct ov8865_pll2_config ov8865_pll2_config_binning_19_2mhz = {
+ .pll_pre_div_half = 1,
+ .pll_pre_div = 2,
+ .pll_mul = 75,
+ .dac_div = 2,
+ .sys_pre_div = 10,
+ .sys_div = 0,
+};
+
+static const struct ov8865_pll2_config ov8865_pll2_config_binning_24mhz = {
.pll_pre_div_half = 1,
.pll_pre_div = 0,
.pll_mul = 30,
@@ -724,6 +781,23 @@ static const struct ov8865_pll2_config ov8865_pll2_config_binning = {
.sys_div = 0,
};
+static const struct ov8865_pll_configs ov8865_pll_configs_19_2mhz = {
+ .pll1_config = &ov8865_pll1_config_native_19_2mhz,
+ .pll2_config_native = &ov8865_pll2_config_native_19_2mhz,
+ .pll2_config_binning = &ov8865_pll2_config_binning_19_2mhz,
+};
+
+static const struct ov8865_pll_configs ov8865_pll_configs_24mhz = {
+ .pll1_config = &ov8865_pll1_config_native_24mhz,
+ .pll2_config_native = &ov8865_pll2_config_native_24mhz,
+ .pll2_config_binning = &ov8865_pll2_config_binning_24mhz,
+};
+
+static const struct ov8865_pll_configs *ov8865_pll_configs[] = {
+ &ov8865_pll_configs_19_2mhz,
+ &ov8865_pll_configs_24mhz,
+};
+
static const struct ov8865_sclk_config ov8865_sclk_config_native = {
.sys_sel = 1,
.sclk_sel = 0,
@@ -890,7 +964,7 @@ static const struct ov8865_mode ov8865_modes[] = {
{
/* Horizontal */
.output_size_x = 3264,
- .hts = 1944,
+ .hts = 3888,
/* Vertical */
.output_size_y = 2448,
@@ -929,13 +1003,8 @@ static const struct ov8865_mode ov8865_modes[] = {
.blc_anchor_right_start = 1984,
.blc_anchor_right_end = 2239,
- /* Frame Interval */
- .frame_interval = { 1, 30 },
-
/* PLL */
- .pll1_config = &ov8865_pll1_config_native,
- .pll2_config = &ov8865_pll2_config_native,
- .sclk_config = &ov8865_sclk_config_native,
+ .pll2_binning = false,
/* Registers */
.register_values = ov8865_register_values_native,
@@ -946,11 +1015,11 @@ static const struct ov8865_mode ov8865_modes[] = {
{
/* Horizontal */
.output_size_x = 3264,
- .hts = 2582,
+ .hts = 3888,
/* Vertical */
.output_size_y = 1836,
- .vts = 2002,
+ .vts = 2470,
.size_auto = true,
.size_auto_boundary_x = 8,
@@ -985,13 +1054,8 @@ static const struct ov8865_mode ov8865_modes[] = {
.blc_anchor_right_start = 1984,
.blc_anchor_right_end = 2239,
- /* Frame Interval */
- .frame_interval = { 1, 30 },
-
/* PLL */
- .pll1_config = &ov8865_pll1_config_native,
- .pll2_config = &ov8865_pll2_config_native,
- .sclk_config = &ov8865_sclk_config_native,
+ .pll2_binning = false,
/* Registers */
.register_values = ov8865_register_values_native,
@@ -1045,13 +1109,8 @@ static const struct ov8865_mode ov8865_modes[] = {
.blc_anchor_right_start = 992,
.blc_anchor_right_end = 1119,
- /* Frame Interval */
- .frame_interval = { 1, 30 },
-
/* PLL */
- .pll1_config = &ov8865_pll1_config_native,
- .pll2_config = &ov8865_pll2_config_binning,
- .sclk_config = &ov8865_sclk_config_native,
+ .pll2_binning = true,
/* Registers */
.register_values = ov8865_register_values_binning,
@@ -1111,13 +1170,8 @@ static const struct ov8865_mode ov8865_modes[] = {
.blc_anchor_right_start = 992,
.blc_anchor_right_end = 1119,
- /* Frame Interval */
- .frame_interval = { 1, 90 },
-
/* PLL */
- .pll1_config = &ov8865_pll1_config_native,
- .pll2_config = &ov8865_pll2_config_binning,
- .sclk_config = &ov8865_sclk_config_native,
+ .pll2_binning = true,
/* Registers */
.register_values = ov8865_register_values_binning,
@@ -1512,12 +1566,11 @@ static int ov8865_isp_configure(struct ov8865_sensor *sensor)
static unsigned long ov8865_mode_pll1_rate(struct ov8865_sensor *sensor,
const struct ov8865_mode *mode)
{
- const struct ov8865_pll1_config *config = mode->pll1_config;
- unsigned long extclk_rate;
+ const struct ov8865_pll1_config *config;
unsigned long pll1_rate;
- extclk_rate = clk_get_rate(sensor->extclk);
- pll1_rate = extclk_rate * config->pll_mul / config->pll_pre_div_half;
+ config = sensor->pll_configs->pll1_config;
+ pll1_rate = sensor->extclk_rate * config->pll_mul / config->pll_pre_div_half;
switch (config->pll_pre_div) {
case 0:
@@ -1551,10 +1604,12 @@ static int ov8865_mode_pll1_configure(struct ov8865_sensor *sensor,
const struct ov8865_mode *mode,
u32 mbus_code)
{
- const struct ov8865_pll1_config *config = mode->pll1_config;
+ const struct ov8865_pll1_config *config;
u8 value;
int ret;
+ config = sensor->pll_configs->pll1_config;
+
switch (mbus_code) {
case MEDIA_BUS_FMT_SBGGR10_1X10:
value = OV8865_MIPI_BIT_SEL(10);
@@ -1621,9 +1676,12 @@ static int ov8865_mode_pll1_configure(struct ov8865_sensor *sensor,
static int ov8865_mode_pll2_configure(struct ov8865_sensor *sensor,
const struct ov8865_mode *mode)
{
- const struct ov8865_pll2_config *config = mode->pll2_config;
+ const struct ov8865_pll2_config *config;
int ret;
+ config = mode->pll2_binning ? sensor->pll_configs->pll2_config_binning :
+ sensor->pll_configs->pll2_config_native;
+
ret = ov8865_write(sensor, OV8865_PLL_CTRL12_REG,
OV8865_PLL_CTRL12_PRE_DIV_HALF(config->pll_pre_div_half) |
OV8865_PLL_CTRL12_DAC_DIV(config->dac_div));
@@ -1657,7 +1715,7 @@ static int ov8865_mode_pll2_configure(struct ov8865_sensor *sensor,
static int ov8865_mode_sclk_configure(struct ov8865_sensor *sensor,
const struct ov8865_mode *mode)
{
- const struct ov8865_sclk_config *config = mode->sclk_config;
+ const struct ov8865_sclk_config *config = &ov8865_sclk_config_native;
int ret;
ret = ov8865_write(sensor, OV8865_CLK_SEL0_REG,
@@ -2052,9 +2110,11 @@ static int ov8865_mode_configure(struct ov8865_sensor *sensor,
static unsigned long ov8865_mode_mipi_clk_rate(struct ov8865_sensor *sensor,
const struct ov8865_mode *mode)
{
- const struct ov8865_pll1_config *config = mode->pll1_config;
+ const struct ov8865_pll1_config *config;
unsigned long pll1_rate;
+ config = sensor->pll_configs->pll1_config;
+
pll1_rate = ov8865_mode_pll1_rate(sensor, mode);
return pll1_rate / config->m_div / 2;
@@ -2066,6 +2126,9 @@ static int ov8865_exposure_configure(struct ov8865_sensor *sensor, u32 exposure)
{
int ret;
+ /* The sensor stores exposure in units of 1/16th of a line */
+ exposure *= 16;
+
ret = ov8865_write(sensor, OV8865_EXPOSURE_CTRL_HH_REG,
OV8865_EXPOSURE_CTRL_HH(exposure));
if (ret)
@@ -2082,7 +2145,7 @@ static int ov8865_exposure_configure(struct ov8865_sensor *sensor, u32 exposure)
/* Gain */
-static int ov8865_gain_configure(struct ov8865_sensor *sensor, u32 gain)
+static int ov8865_analog_gain_configure(struct ov8865_sensor *sensor, u32 gain)
{
int ret;
@@ -2157,6 +2220,20 @@ static int ov8865_test_pattern_configure(struct ov8865_sensor *sensor,
ov8865_test_pattern_bits[index]);
}
+/* Blanking */
+
+static int ov8865_vts_configure(struct ov8865_sensor *sensor, u32 vblank)
+{
+ u16 vts = sensor->state.mode->output_size_y + vblank;
+ int ret;
+
+ ret = ov8865_write(sensor, OV8865_VTS_H_REG, OV8865_VTS_H(vts));
+ if (ret)
+ return ret;
+
+ return ov8865_write(sensor, OV8865_VTS_L_REG, OV8865_VTS_L(vts));
+}
+
/* State */
static int ov8865_state_mipi_configure(struct ov8865_sensor *sensor,
@@ -2330,27 +2407,27 @@ static int ov8865_sensor_power(struct ov8865_sensor *sensor, bool on)
if (ret) {
dev_err(sensor->dev,
"failed to enable DOVDD regulator\n");
- goto disable;
+ return ret;
}
ret = regulator_enable(sensor->avdd);
if (ret) {
dev_err(sensor->dev,
"failed to enable AVDD regulator\n");
- goto disable;
+ goto disable_dovdd;
}
ret = regulator_enable(sensor->dvdd);
if (ret) {
dev_err(sensor->dev,
"failed to enable DVDD regulator\n");
- goto disable;
+ goto disable_avdd;
}
ret = clk_prepare_enable(sensor->extclk);
if (ret) {
dev_err(sensor->dev, "failed to enable EXTCLK clock\n");
- goto disable;
+ goto disable_dvdd;
}
gpiod_set_value_cansleep(sensor->reset, 0);
@@ -2359,14 +2436,16 @@ static int ov8865_sensor_power(struct ov8865_sensor *sensor, bool on)
/* Time to enter streaming mode according to power timings. */
usleep_range(10000, 12000);
} else {
-disable:
gpiod_set_value_cansleep(sensor->powerdown, 1);
gpiod_set_value_cansleep(sensor->reset, 1);
clk_disable_unprepare(sensor->extclk);
+disable_dvdd:
regulator_disable(sensor->dvdd);
+disable_avdd:
regulator_disable(sensor->avdd);
+disable_dovdd:
regulator_disable(sensor->dovdd);
}
@@ -2382,6 +2461,20 @@ static int ov8865_s_ctrl(struct v4l2_ctrl *ctrl)
unsigned int index;
int ret;
+ /* If VBLANK is altered we need to update exposure to compensate */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ int exposure_max;
+
+ exposure_max = sensor->state.mode->output_size_y + ctrl->val -
+ OV8865_INTEGRATION_TIME_MARGIN;
+ __v4l2_ctrl_modify_range(sensor->ctrls.exposure,
+ sensor->ctrls.exposure->minimum,
+ exposure_max,
+ sensor->ctrls.exposure->step,
+ min(sensor->ctrls.exposure->val,
+ exposure_max));
+ }
+
/* Wait for the sensor to be on before setting controls. */
if (pm_runtime_suspended(sensor->dev))
return 0;
@@ -2392,8 +2485,8 @@ static int ov8865_s_ctrl(struct v4l2_ctrl *ctrl)
if (ret)
return ret;
break;
- case V4L2_CID_GAIN:
- ret = ov8865_gain_configure(sensor, ctrl->val);
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = ov8865_analog_gain_configure(sensor, ctrl->val);
if (ret)
return ret;
break;
@@ -2408,6 +2501,8 @@ static int ov8865_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_TEST_PATTERN:
index = (unsigned int)ctrl->val;
return ov8865_test_pattern_configure(sensor, index);
+ case V4L2_CID_VBLANK:
+ return ov8865_vts_configure(sensor, ctrl->val);
default:
return -EINVAL;
}
@@ -2424,6 +2519,10 @@ static int ov8865_ctrls_init(struct ov8865_sensor *sensor)
struct ov8865_ctrls *ctrls = &sensor->ctrls;
struct v4l2_ctrl_handler *handler = &ctrls->handler;
const struct v4l2_ctrl_ops *ops = &ov8865_ctrl_ops;
+ const struct ov8865_mode *mode = &ov8865_modes[0];
+ struct v4l2_fwnode_device_properties props;
+ unsigned int vblank_max, vblank_def;
+ unsigned int hblank;
int ret;
v4l2_ctrl_handler_init(handler, 32);
@@ -2433,12 +2532,13 @@ static int ov8865_ctrls_init(struct ov8865_sensor *sensor)
/* Exposure */
- v4l2_ctrl_new_std(handler, ops, V4L2_CID_EXPOSURE, 16, 1048575, 16,
- 512);
+ ctrls->exposure = v4l2_ctrl_new_std(handler, ops, V4L2_CID_EXPOSURE, 2,
+ 65535, 1, 32);
/* Gain */
- v4l2_ctrl_new_std(handler, ops, V4L2_CID_GAIN, 128, 8191, 128, 128);
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_ANALOGUE_GAIN, 128, 2048, 128,
+ 128);
/* White Balance */
@@ -2459,6 +2559,20 @@ static int ov8865_ctrls_init(struct ov8865_sensor *sensor)
ARRAY_SIZE(ov8865_test_pattern_menu) - 1,
0, 0, ov8865_test_pattern_menu);
+ /* Blanking */
+ hblank = mode->hts - mode->output_size_x;
+ ctrls->hblank = v4l2_ctrl_new_std(handler, ops, V4L2_CID_HBLANK, hblank,
+ hblank, 1, hblank);
+
+ if (ctrls->hblank)
+ ctrls->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ vblank_max = OV8865_TIMING_MAX_VTS - mode->output_size_y;
+ vblank_def = mode->vts - mode->output_size_y;
+ ctrls->vblank = v4l2_ctrl_new_std(handler, ops, V4L2_CID_VBLANK,
+ OV8865_TIMING_MIN_VTS, vblank_max, 1,
+ vblank_def);
+
/* MIPI CSI-2 */
ctrls->link_freq =
@@ -2470,6 +2584,15 @@ static int ov8865_ctrls_init(struct ov8865_sensor *sensor)
v4l2_ctrl_new_std(handler, NULL, V4L2_CID_PIXEL_RATE, 1,
INT_MAX, 1, 1);
+ /* set properties from fwnode (e.g. rotation, orientation) */
+ ret = v4l2_fwnode_device_parse(sensor->dev, &props);
+ if (ret)
+ goto error_ctrls;
+
+ ret = v4l2_ctrl_new_fwnode_properties(handler, ops, &props);
+ if (ret)
+ goto error_ctrls;
+
if (handler->error) {
ret = handler->error;
goto error_ctrls;
@@ -2522,11 +2645,18 @@ static int ov8865_g_frame_interval(struct v4l2_subdev *subdev,
{
struct ov8865_sensor *sensor = ov8865_subdev_sensor(subdev);
const struct ov8865_mode *mode;
+ unsigned int framesize;
+ unsigned int fps;
mutex_lock(&sensor->mutex);
mode = sensor->state.mode;
- interval->interval = mode->frame_interval;
+ framesize = mode->hts * (mode->output_size_y +
+ sensor->ctrls.vblank->val);
+ fps = DIV_ROUND_CLOSEST(sensor->ctrls.pixel_rate->val, framesize);
+
+ interval->interval.numerator = 1;
+ interval->interval.denominator = fps;
mutex_unlock(&sensor->mutex);
@@ -2599,7 +2729,9 @@ static int ov8865_set_fmt(struct v4l2_subdev *subdev,
struct v4l2_mbus_framefmt *mbus_format = &format->format;
const struct ov8865_mode *mode;
u32 mbus_code = 0;
+ unsigned int hblank;
unsigned int index;
+ int exposure_max;
int ret = 0;
mutex_lock(&sensor->mutex);
@@ -2639,6 +2771,21 @@ static int ov8865_set_fmt(struct v4l2_subdev *subdev,
sensor->state.mbus_code != mbus_code)
ret = ov8865_state_configure(sensor, mode, mbus_code);
+ __v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV8865_TIMING_MIN_VTS,
+ OV8865_TIMING_MAX_VTS - mode->output_size_y,
+ 1, mode->vts - mode->output_size_y);
+
+ hblank = mode->hts - mode->output_size_x;
+ __v4l2_ctrl_modify_range(sensor->ctrls.hblank, hblank, hblank, 1,
+ hblank);
+
+ exposure_max = mode->vts - OV8865_INTEGRATION_TIME_MARGIN;
+ __v4l2_ctrl_modify_range(sensor->ctrls.exposure,
+ sensor->ctrls.exposure->minimum, exposure_max,
+ sensor->ctrls.exposure->step,
+ min(sensor->ctrls.exposure->val,
+ exposure_max));
+
complete:
mutex_unlock(&sensor->mutex);
@@ -2662,37 +2809,55 @@ static int ov8865_enum_frame_size(struct v4l2_subdev *subdev,
return 0;
}
-static int ov8865_enum_frame_interval(struct v4l2_subdev *subdev,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_frame_interval_enum *interval_enum)
+static void
+__ov8865_get_pad_crop(struct ov8865_sensor *sensor,
+ struct v4l2_subdev_state *state, unsigned int pad,
+ enum v4l2_subdev_format_whence which, struct v4l2_rect *r)
{
- const struct ov8865_mode *mode = NULL;
- unsigned int mode_index;
- unsigned int interval_index;
-
- if (interval_enum->index > 0)
- return -EINVAL;
- /*
- * Multiple modes with the same dimensions may have different frame
- * intervals, so look up each relevant mode.
- */
- for (mode_index = 0, interval_index = 0;
- mode_index < ARRAY_SIZE(ov8865_modes); mode_index++) {
- mode = &ov8865_modes[mode_index];
-
- if (mode->output_size_x == interval_enum->width &&
- mode->output_size_y == interval_enum->height) {
- if (interval_index == interval_enum->index)
- break;
+ const struct ov8865_mode *mode = sensor->state.mode;
- interval_index++;
- }
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ *r = *v4l2_subdev_get_try_crop(&sensor->subdev, state, pad);
+ break;
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ r->height = mode->output_size_y;
+ r->width = mode->output_size_x;
+ r->top = (OV8865_NATIVE_HEIGHT - mode->output_size_y) / 2;
+ r->left = (OV8865_NATIVE_WIDTH - mode->output_size_x) / 2;
+ break;
}
+}
- if (mode_index == ARRAY_SIZE(ov8865_modes))
- return -EINVAL;
+static int ov8865_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct ov8865_sensor *sensor = ov8865_subdev_sensor(subdev);
- interval_enum->interval = mode->frame_interval;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ mutex_lock(&sensor->mutex);
+ __ov8865_get_pad_crop(sensor, state, sel->pad,
+ sel->which, &sel->r);
+ mutex_unlock(&sensor->mutex);
+ break;
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = OV8865_NATIVE_WIDTH;
+ sel->r.height = OV8865_NATIVE_HEIGHT;
+ break;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.top = OV8865_ACTIVE_START_TOP;
+ sel->r.left = OV8865_ACTIVE_START_LEFT;
+ sel->r.width = OV8865_ACTIVE_WIDTH;
+ sel->r.height = OV8865_ACTIVE_HEIGHT;
+ break;
+ default:
+ return -EINVAL;
+ }
return 0;
}
@@ -2702,7 +2867,8 @@ static const struct v4l2_subdev_pad_ops ov8865_subdev_pad_ops = {
.get_fmt = ov8865_get_fmt,
.set_fmt = ov8865_set_fmt,
.enum_frame_size = ov8865_enum_frame_size,
- .enum_frame_interval = ov8865_enum_frame_interval,
+ .get_selection = ov8865_get_selection,
+ .set_selection = ov8865_get_selection,
};
static const struct v4l2_subdev_ops ov8865_subdev_ops = {
@@ -2782,7 +2948,8 @@ static int ov8865_probe(struct i2c_client *client)
struct ov8865_sensor *sensor;
struct v4l2_subdev *subdev;
struct media_pad *pad;
- unsigned long rate;
+ unsigned int rate = 0;
+ unsigned int i;
int ret;
sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
@@ -2792,13 +2959,31 @@ static int ov8865_probe(struct i2c_client *client)
sensor->dev = dev;
sensor->i2c_client = client;
+ /* Regulators */
+
+ /* DVDD: digital core */
+ sensor->dvdd = devm_regulator_get(dev, "dvdd");
+ if (IS_ERR(sensor->dvdd))
+ return dev_err_probe(dev, PTR_ERR(sensor->dvdd),
+ "cannot get DVDD regulator\n");
+
+ /* DOVDD: digital I/O */
+ sensor->dovdd = devm_regulator_get(dev, "dovdd");
+ if (IS_ERR(sensor->dovdd))
+ return dev_err_probe(dev, PTR_ERR(sensor->dovdd),
+ "cannot get DOVDD regulator\n");
+
+ /* AVDD: analog */
+ sensor->avdd = devm_regulator_get(dev, "avdd");
+ if (IS_ERR(sensor->avdd))
+ return dev_err_probe(dev, PTR_ERR(sensor->avdd),
+ "cannot get AVDD (analog) regulator\n");
+
/* Graph Endpoint */
handle = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
- if (!handle) {
- dev_err(dev, "unable to find endpoint node\n");
- return -EINVAL;
- }
+ if (!handle)
+ return -EPROBE_DEFER;
sensor->endpoint.bus_type = V4L2_MBUS_CSI2_DPHY;
@@ -2824,48 +3009,54 @@ static int ov8865_probe(struct i2c_client *client)
goto error_endpoint;
}
- /* Regulators */
-
- /* DVDD: digital core */
- sensor->dvdd = devm_regulator_get(dev, "dvdd");
- if (IS_ERR(sensor->dvdd)) {
- dev_err(dev, "cannot get DVDD (digital core) regulator\n");
- ret = PTR_ERR(sensor->dvdd);
- goto error_endpoint;
- }
+ /* External Clock */
- /* DOVDD: digital I/O */
- sensor->dovdd = devm_regulator_get(dev, "dovdd");
- if (IS_ERR(sensor->dovdd)) {
- dev_err(dev, "cannot get DOVDD (digital I/O) regulator\n");
- ret = PTR_ERR(sensor->dovdd);
+ sensor->extclk = devm_clk_get(dev, NULL);
+ if (PTR_ERR(sensor->extclk) == -ENOENT) {
+ dev_info(dev, "no external clock found, continuing...\n");
+ sensor->extclk = NULL;
+ } else if (IS_ERR(sensor->extclk)) {
+ dev_err(dev, "failed to get external clock\n");
+ ret = PTR_ERR(sensor->extclk);
goto error_endpoint;
}
- /* AVDD: analog */
- sensor->avdd = devm_regulator_get(dev, "avdd");
- if (IS_ERR(sensor->avdd)) {
- dev_err(dev, "cannot get AVDD (analog) regulator\n");
- ret = PTR_ERR(sensor->avdd);
- goto error_endpoint;
+ /*
+ * We could have either a 24MHz or 19.2MHz clock rate from either dt or
+ * ACPI...but we also need to support the weird IPU3 case which will
+ * have an external clock AND a clock-frequency property. Check for the
+ * clock-frequency property and if found, set that rate if we managed
+ * to acquire a clock. This should cover the ACPI case. If the system
+ * uses devicetree then the configured rate should already be set, so
+ * we can just read it.
+ */
+ ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
+ &rate);
+ if (!ret && sensor->extclk) {
+ ret = clk_set_rate(sensor->extclk, rate);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to set clock rate\n");
+ } else if (ret && !sensor->extclk) {
+ return dev_err_probe(dev, ret, "invalid clock config\n");
}
- /* External Clock */
+ sensor->extclk_rate = rate ? rate : clk_get_rate(sensor->extclk);
- sensor->extclk = devm_clk_get(dev, NULL);
- if (IS_ERR(sensor->extclk)) {
- dev_err(dev, "failed to get external clock\n");
- ret = PTR_ERR(sensor->extclk);
- goto error_endpoint;
+ for (i = 0; i < ARRAY_SIZE(supported_extclk_rates); i++) {
+ if (sensor->extclk_rate == supported_extclk_rates[i])
+ break;
}
- rate = clk_get_rate(sensor->extclk);
- if (rate != OV8865_EXTCLK_RATE) {
- dev_err(dev, "clock rate %lu Hz is unsupported\n", rate);
+ if (i == ARRAY_SIZE(supported_extclk_rates)) {
+ dev_err(dev, "clock rate %lu Hz is unsupported\n",
+ sensor->extclk_rate);
ret = -EINVAL;
goto error_endpoint;
}
+ sensor->pll_configs = ov8865_pll_configs[i];
+
/* Subdev, entity and pad */
subdev = &sensor->subdev;
@@ -2891,14 +3082,16 @@ static int ov8865_probe(struct i2c_client *client)
if (ret)
goto error_mutex;
+ mutex_lock(&sensor->mutex);
ret = ov8865_state_init(sensor);
+ mutex_unlock(&sensor->mutex);
if (ret)
goto error_ctrls;
/* Runtime PM */
- pm_runtime_enable(sensor->dev);
pm_runtime_set_suspended(sensor->dev);
+ pm_runtime_enable(sensor->dev);
/* V4L2 subdev register */
@@ -2946,6 +3139,12 @@ static const struct dev_pm_ops ov8865_pm_ops = {
SET_RUNTIME_PM_OPS(ov8865_suspend, ov8865_resume, NULL)
};
+static const struct acpi_device_id ov8865_acpi_match[] = {
+ {"INT347A"},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, ov8865_acpi_match);
+
static const struct of_device_id ov8865_of_match[] = {
{ .compatible = "ovti,ov8865" },
{ }
@@ -2956,6 +3155,7 @@ static struct i2c_driver ov8865_driver = {
.driver = {
.name = "ov8865",
.of_match_table = ov8865_of_match,
+ .acpi_match_table = ov8865_acpi_match,
.pm = &ov8865_pm_ops,
},
.probe_new = ov8865_probe,
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-spi.c b/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
index c102c6bbc118..7fe61187a2f8 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
@@ -130,16 +130,10 @@ static int s5c73m3_spi_probe(struct spi_device *spi)
return 0;
}
-static int s5c73m3_spi_remove(struct spi_device *spi)
-{
- return 0;
-}
-
int s5c73m3_register_spi_driver(struct s5c73m3 *state)
{
struct spi_driver *spidrv = &state->spidrv;
- spidrv->remove = s5c73m3_spi_remove;
spidrv->probe = s5c73m3_spi_probe;
spidrv->driver.name = S5C73M3_SPI_DRV_NAME;
spidrv->driver.of_match_table = s5c73m3_spi_ids;
diff --git a/drivers/media/mc/mc-devnode.c b/drivers/media/mc/mc-devnode.c
index f11382afe23b..680fbb3a9340 100644
--- a/drivers/media/mc/mc-devnode.c
+++ b/drivers/media/mc/mc-devnode.c
@@ -217,7 +217,7 @@ int __must_check media_devnode_register(struct media_device *mdev,
/* Part 1: Find a free minor number */
mutex_lock(&media_devnode_lock);
- minor = find_next_zero_bit(media_devnode_nums, MEDIA_NUM_DEVICES, 0);
+ minor = find_first_zero_bit(media_devnode_nums, MEDIA_NUM_DEVICES);
if (minor == MEDIA_NUM_DEVICES) {
mutex_unlock(&media_devnode_lock);
pr_err("could not get a free minor\n");
diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
index f40f41977142..b411f9796191 100644
--- a/drivers/media/mc/mc-entity.c
+++ b/drivers/media/mc/mc-entity.c
@@ -14,22 +14,6 @@
#include <media/media-entity.h>
#include <media/media-device.h>
-static inline const char *gobj_type(enum media_gobj_type type)
-{
- switch (type) {
- case MEDIA_GRAPH_ENTITY:
- return "entity";
- case MEDIA_GRAPH_PAD:
- return "pad";
- case MEDIA_GRAPH_LINK:
- return "link";
- case MEDIA_GRAPH_INTF_DEVNODE:
- return "intf-devnode";
- default:
- return "unknown";
- }
-}
-
static inline const char *intf_type(struct media_interface *intf)
{
switch (intf->type) {
@@ -64,12 +48,10 @@ __must_check int __media_entity_enum_init(struct media_entity_enum *ent_enum,
int idx_max)
{
idx_max = ALIGN(idx_max, BITS_PER_LONG);
- ent_enum->bmap = kcalloc(idx_max / BITS_PER_LONG, sizeof(long),
- GFP_KERNEL);
+ ent_enum->bmap = bitmap_zalloc(idx_max, GFP_KERNEL);
if (!ent_enum->bmap)
return -ENOMEM;
- bitmap_zero(ent_enum->bmap, idx_max);
ent_enum->idx_max = idx_max;
return 0;
@@ -78,7 +60,7 @@ EXPORT_SYMBOL_GPL(__media_entity_enum_init);
void media_entity_enum_cleanup(struct media_entity_enum *ent_enum)
{
- kfree(ent_enum->bmap);
+ bitmap_free(ent_enum->bmap);
}
EXPORT_SYMBOL_GPL(media_entity_enum_cleanup);
diff --git a/drivers/media/pci/b2c2/flexcop-pci.c b/drivers/media/pci/b2c2/flexcop-pci.c
index 6a4c7cb0ad0f..486c8ec0fa60 100644
--- a/drivers/media/pci/b2c2/flexcop-pci.c
+++ b/drivers/media/pci/b2c2/flexcop-pci.c
@@ -185,6 +185,8 @@ static irqreturn_t flexcop_pci_isr(int irq, void *dev_id)
dma_addr_t cur_addr =
fc->read_ibi_reg(fc,dma1_008).dma_0x8.dma_cur_addr << 2;
u32 cur_pos = cur_addr - fc_pci->dma[0].dma_addr0;
+ if (cur_pos > fc_pci->dma[0].size * 2)
+ goto error;
deb_irq("%u irq: %08x cur_addr: %llx: cur_pos: %08x, last_cur_pos: %08x ",
jiffies_to_usecs(jiffies - fc_pci->last_irq),
@@ -225,6 +227,7 @@ static irqreturn_t flexcop_pci_isr(int irq, void *dev_id)
ret = IRQ_NONE;
}
+error:
spin_unlock_irqrestore(&fc_pci->irq_lock, flags);
return ret;
}
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 0e9df8b35ac6..8cc9bec43688 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -170,14 +170,14 @@ MODULE_VERSION(BTTV_VERSION);
/* ----------------------------------------------------------------------- */
/* sysfs */
-static ssize_t show_card(struct device *cd,
+static ssize_t card_show(struct device *cd,
struct device_attribute *attr, char *buf)
{
struct video_device *vfd = to_video_device(cd);
struct bttv *btv = video_get_drvdata(vfd);
return sprintf(buf, "%d\n", btv ? btv->c.type : UNSET);
}
-static DEVICE_ATTR(card, S_IRUGO, show_card, NULL);
+static DEVICE_ATTR_RO(card);
/* ----------------------------------------------------------------------- */
/* dvb auto-load setup */
diff --git a/drivers/media/pci/cobalt/cobalt-cpld.c b/drivers/media/pci/cobalt/cobalt-cpld.c
index 3d8026483ac3..fad882459d23 100644
--- a/drivers/media/pci/cobalt/cobalt-cpld.c
+++ b/drivers/media/pci/cobalt/cobalt-cpld.c
@@ -236,7 +236,6 @@ bool cobalt_cpld_set_freq(struct cobalt *cobalt, unsigned f_out)
u8 n1, hsdiv;
u8 regs[6];
int found = 0;
- u16 clock_ctrl;
int retries = 3;
for (i = 0; i < ARRAY_SIZE(multipliers); i++) {
@@ -260,9 +259,7 @@ bool cobalt_cpld_set_freq(struct cobalt *cobalt, unsigned f_out)
hsdiv = multipliers[i_best].hsdiv - 4;
rfreq = div_u64(dco << 28, f_xtal);
- clock_ctrl = cpld_read(cobalt, SI570_CLOCK_CTRL);
- clock_ctrl |= S01755_REG_CLOCK_CTRL_BITMAP_CLKHSMA_FPGA_CTRL;
- clock_ctrl |= S01755_REG_CLOCK_CTRL_BITMAP_CLKHSMA_EN;
+ cpld_read(cobalt, SI570_CLOCK_CTRL);
regs[0] = (hsdiv << 5) | (n1 >> 2);
regs[1] = ((n1 & 0x3) << 6) | (rfreq >> 32);
diff --git a/drivers/media/pci/cx18/cx18-alsa-main.c b/drivers/media/pci/cx18/cx18-alsa-main.c
index 9a82e68303b6..9dc361886284 100644
--- a/drivers/media/pci/cx18/cx18-alsa-main.c
+++ b/drivers/media/pci/cx18/cx18-alsa-main.c
@@ -51,12 +51,6 @@ struct snd_cx18_card *to_snd_cx18_card(struct v4l2_device *v4l2_dev)
return to_cx18(v4l2_dev)->alsa;
}
-static inline
-struct snd_cx18_card *p_to_snd_cx18_card(struct v4l2_device **v4l2_dev)
-{
- return container_of(v4l2_dev, struct snd_cx18_card, v4l2_dev);
-}
-
static void snd_cx18_card_free(struct snd_cx18_card *cxsc)
{
if (cxsc == NULL)
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index 40c10ca94def..3078a39f0b95 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -337,13 +337,6 @@ static int cx25821_risc_decode(u32 risc)
return incr[risc >> 28] ? incr[risc >> 28] : 1;
}
-static inline int i2c_slave_did_ack(struct i2c_adapter *i2c_adap)
-{
- struct cx25821_i2c *bus = i2c_adap->algo_data;
- struct cx25821_dev *dev = bus->dev;
- return cx_read(bus->reg_stat) & 0x01;
-}
-
static void cx25821_registers_init(struct cx25821_dev *dev)
{
u32 tmp;
diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.c b/drivers/media/pci/intel/ipu3/cio2-bridge.c
index 67c467d3c81f..7ccb7b6eaa82 100644
--- a/drivers/media/pci/intel/ipu3/cio2-bridge.c
+++ b/drivers/media/pci/intel/ipu3/cio2-bridge.c
@@ -3,6 +3,7 @@
#include <linux/acpi.h>
#include <linux/device.h>
+#include <linux/i2c.h>
#include <linux/pci.h>
#include <linux/property.h>
#include <media/v4l2-fwnode.h>
@@ -21,7 +22,9 @@
*/
static const struct cio2_sensor_config cio2_supported_sensors[] = {
/* Omnivision OV5693 */
- CIO2_SENSOR_CONFIG("INT33BE", 0),
+ CIO2_SENSOR_CONFIG("INT33BE", 1, 419200000),
+ /* Omnivision OV8865 */
+ CIO2_SENSOR_CONFIG("INT347A", 1, 360000000),
/* Omnivision OV2680 */
CIO2_SENSOR_CONFIG("OVTI2680", 0),
};
@@ -36,6 +39,18 @@ static const struct cio2_property_names prop_names = {
.link_frequencies = "link-frequencies",
};
+static const char * const cio2_vcm_types[] = {
+ "ad5823",
+ "dw9714",
+ "ad5816",
+ "dw9719",
+ "dw9718",
+ "dw9806b",
+ "wv517s",
+ "lc898122xa",
+ "lc898212axb",
+};
+
static int cio2_bridge_read_acpi_buffer(struct acpi_device *adev, char *id,
void *data, u32 size)
{
@@ -132,6 +147,12 @@ static void cio2_bridge_create_fwnode_properties(
sensor->dev_properties[2] = PROPERTY_ENTRY_U32(
sensor->prop_names.orientation,
orientation);
+ if (sensor->ssdb.vcmtype) {
+ sensor->vcm_ref[0] =
+ SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_VCM]);
+ sensor->dev_properties[3] =
+ PROPERTY_ENTRY_REF_ARRAY("lens-focus", sensor->vcm_ref);
+ }
sensor->ep_properties[0] = PROPERTY_ENTRY_U32(
sensor->prop_names.bus_type,
@@ -193,6 +214,33 @@ static void cio2_bridge_create_connection_swnodes(struct cio2_bridge *bridge,
sensor->node_names.endpoint,
&nodes[SWNODE_CIO2_PORT],
sensor->cio2_properties);
+ if (sensor->ssdb.vcmtype)
+ nodes[SWNODE_VCM] =
+ NODE_VCM(cio2_vcm_types[sensor->ssdb.vcmtype - 1]);
+}
+
+static void cio2_bridge_instantiate_vcm_i2c_client(struct cio2_sensor *sensor)
+{
+ struct i2c_board_info board_info = { };
+ char name[16];
+
+ if (!sensor->ssdb.vcmtype)
+ return;
+
+ snprintf(name, sizeof(name), "%s-VCM", acpi_dev_name(sensor->adev));
+ board_info.dev_name = name;
+ strscpy(board_info.type, cio2_vcm_types[sensor->ssdb.vcmtype - 1],
+ ARRAY_SIZE(board_info.type));
+ board_info.swnode = &sensor->swnodes[SWNODE_VCM];
+
+ sensor->vcm_i2c_client =
+ i2c_acpi_new_device_by_fwnode(acpi_fwnode_handle(sensor->adev),
+ 1, &board_info);
+ if (IS_ERR(sensor->vcm_i2c_client)) {
+ dev_warn(&sensor->adev->dev, "Error instantiation VCM i2c-client: %ld\n",
+ PTR_ERR(sensor->vcm_i2c_client));
+ sensor->vcm_i2c_client = NULL;
+ }
}
static void cio2_bridge_unregister_sensors(struct cio2_bridge *bridge)
@@ -205,6 +253,7 @@ static void cio2_bridge_unregister_sensors(struct cio2_bridge *bridge)
software_node_unregister_nodes(sensor->swnodes);
ACPI_FREE(sensor->pld);
acpi_dev_put(sensor->adev);
+ i2c_unregister_device(sensor->vcm_i2c_client);
}
}
@@ -237,9 +286,17 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
if (ret)
goto err_put_adev;
+ if (sensor->ssdb.vcmtype > ARRAY_SIZE(cio2_vcm_types)) {
+ dev_warn(&adev->dev, "Unknown VCM type %d\n",
+ sensor->ssdb.vcmtype);
+ sensor->ssdb.vcmtype = 0;
+ }
+
status = acpi_get_physical_device_location(adev->handle, &sensor->pld);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
+ ret = -ENODEV;
goto err_put_adev;
+ }
if (sensor->ssdb.lanes > CIO2_MAX_LANES) {
dev_err(&adev->dev,
@@ -265,6 +322,8 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
sensor->adev = acpi_dev_get(adev);
adev->fwnode.secondary = fwnode;
+ cio2_bridge_instantiate_vcm_i2c_client(sensor);
+
dev_info(&cio2->dev, "Found supported sensor %s\n",
acpi_dev_name(adev));
@@ -304,6 +363,40 @@ err_unregister_sensors:
return ret;
}
+/*
+ * The VCM cannot be probed until the PMIC is completely setup. We cannot rely
+ * on -EPROBE_DEFER for this, since the consumer<->supplier relations between
+ * the VCM and regulators/clks are not described in ACPI, instead they are
+ * passed as board-data to the PMIC drivers. Since -PROBE_DEFER does not work
+ * for the clks/regulators the VCM i2c-clients must not be instantiated until
+ * the PMIC is fully setup.
+ *
+ * The sensor/VCM ACPI device has an ACPI _DEP on the PMIC, check this using the
+ * acpi_dev_ready_for_enumeration() helper, like the i2c-core-acpi code does
+ * for the sensors.
+ */
+static int cio2_bridge_sensors_are_ready(void)
+{
+ struct acpi_device *adev;
+ bool ready = true;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(cio2_supported_sensors); i++) {
+ const struct cio2_sensor_config *cfg =
+ &cio2_supported_sensors[i];
+
+ for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
+ if (!adev->status.enabled)
+ continue;
+
+ if (!acpi_dev_ready_for_enumeration(adev))
+ ready = false;
+ }
+ }
+
+ return ready;
+}
+
int cio2_bridge_init(struct pci_dev *cio2)
{
struct device *dev = &cio2->dev;
@@ -312,6 +405,9 @@ int cio2_bridge_init(struct pci_dev *cio2)
unsigned int i;
int ret;
+ if (!cio2_bridge_sensors_are_ready())
+ return -EPROBE_DEFER;
+
bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
if (!bridge)
return -ENOMEM;
diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.h b/drivers/media/pci/intel/ipu3/cio2-bridge.h
index 202c7d494f7a..4418cbd08208 100644
--- a/drivers/media/pci/intel/ipu3/cio2-bridge.h
+++ b/drivers/media/pci/intel/ipu3/cio2-bridge.h
@@ -8,6 +8,8 @@
#include "ipu3-cio2.h"
+struct i2c_client;
+
#define CIO2_HID "INT343E"
#define CIO2_MAX_LANES 4
#define MAX_NUM_LINK_FREQS 3
@@ -42,12 +44,19 @@
.properties = _PROPS, \
}
+#define NODE_VCM(_TYPE) \
+ (const struct software_node) { \
+ .name = _TYPE, \
+ }
+
enum cio2_sensor_swnodes {
SWNODE_SENSOR_HID,
SWNODE_SENSOR_PORT,
SWNODE_SENSOR_ENDPOINT,
SWNODE_CIO2_PORT,
SWNODE_CIO2_ENDPOINT,
+ /* Must be last because it is optional / maybe empty */
+ SWNODE_VCM,
SWNODE_COUNT
};
@@ -106,8 +115,10 @@ struct cio2_sensor_config {
struct cio2_sensor {
char name[ACPI_ID_LEN];
struct acpi_device *adev;
+ struct i2c_client *vcm_i2c_client;
- struct software_node swnodes[6];
+ /* SWNODE_COUNT + 1 for terminating empty node */
+ struct software_node swnodes[SWNODE_COUNT + 1];
struct cio2_node_names node_names;
struct cio2_sensor_ssdb ssdb;
@@ -115,10 +126,11 @@ struct cio2_sensor {
struct cio2_property_names prop_names;
struct property_entry ep_properties[5];
- struct property_entry dev_properties[4];
+ struct property_entry dev_properties[5];
struct property_entry cio2_properties[3];
struct software_node_ref_args local_ref[1];
struct software_node_ref_args remote_ref[1];
+ struct software_node_ref_args vcm_ref[1];
};
struct cio2_bridge {
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
index 356ea966cf8d..0e9b0503b62a 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
@@ -1713,11 +1713,6 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
struct cio2_device *cio2;
int r;
- cio2 = devm_kzalloc(dev, sizeof(*cio2), GFP_KERNEL);
- if (!cio2)
- return -ENOMEM;
- cio2->pci_dev = pci_dev;
-
/*
* On some platforms no connections to sensors are defined in firmware,
* if the device has no endpoints then we can try to build those as
@@ -1735,6 +1730,11 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
return r;
}
+ cio2 = devm_kzalloc(dev, sizeof(*cio2), GFP_KERNEL);
+ if (!cio2)
+ return -ENOMEM;
+ cio2->pci_dev = pci_dev;
+
r = pcim_enable_device(pci_dev);
if (r) {
dev_err(dev, "failed to enable device (%d)\n", r);
@@ -1966,12 +1966,19 @@ static int __maybe_unused cio2_suspend(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
struct cio2_queue *q = cio2->cur_queue;
+ int r;
dev_dbg(dev, "cio2 suspend\n");
if (!cio2->streaming)
return 0;
/* Stop stream */
+ r = v4l2_subdev_call(q->sensor, video, s_stream, 0);
+ if (r) {
+ dev_err(dev, "failed to stop sensor streaming\n");
+ return r;
+ }
+
cio2_hw_exit(cio2, q);
synchronize_irq(pci_dev->irq);
@@ -2005,8 +2012,16 @@ static int __maybe_unused cio2_resume(struct device *dev)
}
r = cio2_hw_init(cio2, q);
- if (r)
+ if (r) {
dev_err(dev, "fail to init cio2 hw\n");
+ return r;
+ }
+
+ r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
+ if (r) {
+ dev_err(dev, "fail to start sensor streaming\n");
+ cio2_hw_exit(cio2, q);
+ }
return r;
}
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-main.c b/drivers/media/pci/ivtv/ivtv-alsa-main.c
index 4cefdb2e4d40..9e13a7128a53 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-main.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-main.c
@@ -48,12 +48,6 @@ struct snd_ivtv_card *to_snd_ivtv_card(struct v4l2_device *v4l2_dev)
return to_ivtv(v4l2_dev)->alsa;
}
-static inline
-struct snd_ivtv_card *p_to_snd_ivtv_card(struct v4l2_device **v4l2_dev)
-{
- return container_of(v4l2_dev, struct snd_ivtv_card, v4l2_dev);
-}
-
static void snd_ivtv_card_free(struct snd_ivtv_card *itvsc)
{
if (itvsc == NULL)
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index 2c43ebf83966..00ac94d4ab19 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -42,7 +42,7 @@
/* card parameters */
static int ivtvfb_card_id = -1;
-static int ivtvfb_debug = 0;
+static int ivtvfb_debug;
static bool ivtvfb_force_pat = IS_ENABLED(CONFIG_VIDEO_FB_IVTV_FORCE_PAT);
static bool osd_laced;
static int osd_depth;
diff --git a/drivers/media/pci/pt3/pt3.c b/drivers/media/pci/pt3/pt3.c
index c0bc86793355..0d51bdf01f43 100644
--- a/drivers/media/pci/pt3/pt3.c
+++ b/drivers/media/pci/pt3/pt3.c
@@ -685,12 +685,6 @@ static void pt3_remove(struct pci_dev *pdev)
for (i = PT3_NUM_FE - 1; i >= 0; i--)
pt3_cleanup_adapter(pt3, i);
i2c_del_adapter(&pt3->i2c_adap);
- kfree(pt3->i2c_buf);
- pci_iounmap(pt3->pdev, pt3->regs[0]);
- pci_iounmap(pt3->pdev, pt3->regs[1]);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- kfree(pt3);
}
static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -704,14 +698,14 @@ static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_read_config_byte(pdev, PCI_REVISION_ID, &rev) || rev != 1)
return -ENODEV;
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret < 0)
return -ENODEV;
pci_set_master(pdev);
- ret = pci_request_regions(pdev, DRV_NAME);
+ ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2), DRV_NAME);
if (ret < 0)
- goto err_disable_device;
+ return ret;
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (ret == 0)
@@ -722,42 +716,32 @@ static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
else {
dev_err(&pdev->dev, "Failed to set DMA mask\n");
- goto err_release_regions;
+ return ret;
}
dev_info(&pdev->dev, "Use 32bit DMA\n");
}
- pt3 = kzalloc(sizeof(*pt3), GFP_KERNEL);
- if (!pt3) {
- ret = -ENOMEM;
- goto err_release_regions;
- }
+ pt3 = devm_kzalloc(&pdev->dev, sizeof(*pt3), GFP_KERNEL);
+ if (!pt3)
+ return -ENOMEM;
pci_set_drvdata(pdev, pt3);
pt3->pdev = pdev;
mutex_init(&pt3->lock);
- pt3->regs[0] = pci_ioremap_bar(pdev, 0);
- pt3->regs[1] = pci_ioremap_bar(pdev, 2);
- if (pt3->regs[0] == NULL || pt3->regs[1] == NULL) {
- dev_err(&pdev->dev, "Failed to ioremap\n");
- ret = -ENOMEM;
- goto err_kfree;
- }
+ pt3->regs[0] = pcim_iomap_table(pdev)[0];
+ pt3->regs[1] = pcim_iomap_table(pdev)[2];
ver = ioread32(pt3->regs[0] + REG_VERSION);
if ((ver >> 16) != 0x0301) {
dev_warn(&pdev->dev, "PT%d, I/F-ver.:%d not supported\n",
ver >> 24, (ver & 0x00ff0000) >> 16);
- ret = -ENODEV;
- goto err_iounmap;
+ return -ENODEV;
}
pt3->num_bufs = clamp_val(num_bufs, MIN_DATA_BUFS, MAX_DATA_BUFS);
- pt3->i2c_buf = kmalloc(sizeof(*pt3->i2c_buf), GFP_KERNEL);
- if (pt3->i2c_buf == NULL) {
- ret = -ENOMEM;
- goto err_iounmap;
- }
+ pt3->i2c_buf = devm_kmalloc(&pdev->dev, sizeof(*pt3->i2c_buf), GFP_KERNEL);
+ if (!pt3->i2c_buf)
+ return -ENOMEM;
i2c = &pt3->i2c_adap;
i2c->owner = THIS_MODULE;
i2c->algo = &pt3_i2c_algo;
@@ -767,7 +751,7 @@ static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i2c_set_adapdata(i2c, pt3);
ret = i2c_add_adapter(i2c);
if (ret < 0)
- goto err_i2cbuf;
+ return ret;
for (i = 0; i < PT3_NUM_FE; i++) {
ret = pt3_alloc_adapter(pt3, i);
@@ -799,21 +783,7 @@ err_cleanup_adapters:
while (i >= 0)
pt3_cleanup_adapter(pt3, i--);
i2c_del_adapter(i2c);
-err_i2cbuf:
- kfree(pt3->i2c_buf);
-err_iounmap:
- if (pt3->regs[0])
- pci_iounmap(pdev, pt3->regs[0]);
- if (pt3->regs[1])
- pci_iounmap(pdev, pt3->regs[1]);
-err_kfree:
- kfree(pt3);
-err_release_regions:
- pci_release_regions(pdev);
-err_disable_device:
- pci_disable_device(pdev);
return ret;
-
}
static const struct pci_device_id pt3_id_table[] = {
diff --git a/drivers/media/pci/saa7134/saa7134-go7007.c b/drivers/media/pci/saa7134/saa7134-go7007.c
index f319edb39c0e..da83893ffee9 100644
--- a/drivers/media/pci/saa7134/saa7134-go7007.c
+++ b/drivers/media/pci/saa7134/saa7134-go7007.c
@@ -56,11 +56,6 @@ struct saa7134_go7007 {
dma_addr_t bottom_dma;
};
-static inline struct saa7134_go7007 *to_state(struct v4l2_subdev *sd)
-{
- return container_of(sd, struct saa7134_go7007, sd);
-}
-
static const struct go7007_board_info board_voyager = {
.flags = 0,
.sensor_flags = GO7007_SENSOR_656 |
@@ -385,7 +380,7 @@ MODULE_FIRMWARE("go7007/go7007tv.bin");
static int saa7134_go7007_s_std(struct v4l2_subdev *sd, v4l2_std_id norm)
{
#if 0
- struct saa7134_go7007 *saa = to_state(sd);
+ struct saa7134_go7007 *saa = container_of(sd, struct saa7134_go7007, sd);
struct saa7134_dev *dev = saa->dev;
return saa7134_s_std_internal(dev, NULL, norm);
diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c
index 2214c74bbbf1..3947701cd6c7 100644
--- a/drivers/media/pci/saa7146/hexium_gemini.c
+++ b/drivers/media/pci/saa7146/hexium_gemini.c
@@ -284,7 +284,12 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
hexium_set_input(hexium, 0);
hexium->cur_input = 0;
- saa7146_vv_init(dev, &vv_data);
+ ret = saa7146_vv_init(dev, &vv_data);
+ if (ret) {
+ i2c_del_adapter(&hexium->i2c_adapter);
+ kfree(hexium);
+ return ret;
+ }
vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input;
vv_data.vid_ops.vidioc_g_input = vidioc_g_input;
diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/media/pci/saa7146/hexium_orion.c
index 39d14c179d22..2eb4bee16b71 100644
--- a/drivers/media/pci/saa7146/hexium_orion.c
+++ b/drivers/media/pci/saa7146/hexium_orion.c
@@ -355,10 +355,16 @@ static struct saa7146_ext_vv vv_data;
static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info)
{
struct hexium *hexium = (struct hexium *) dev->ext_priv;
+ int ret;
DEB_EE("\n");
- saa7146_vv_init(dev, &vv_data);
+ ret = saa7146_vv_init(dev, &vv_data);
+ if (ret) {
+ pr_err("Error in saa7146_vv_init()\n");
+ return ret;
+ }
+
vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input;
vv_data.vid_ops.vidioc_g_input = vidioc_g_input;
vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
diff --git a/drivers/media/pci/saa7146/mxb.c b/drivers/media/pci/saa7146/mxb.c
index 73fc901ecf3d..7ded8f5b05cb 100644
--- a/drivers/media/pci/saa7146/mxb.c
+++ b/drivers/media/pci/saa7146/mxb.c
@@ -340,7 +340,7 @@ static int mxb_init_done(struct saa7146_dev* dev)
struct tuner_setup tun_setup;
v4l2_std_id std = V4L2_STD_PAL_BG;
- int i = 0, err = 0;
+ int i, err = 0;
/* mute audio on tea6420s */
tea6420_route(mxb, 6);
@@ -349,7 +349,6 @@ static int mxb_init_done(struct saa7146_dev* dev)
saa7111a_call(mxb, video, s_std, std);
/* select tuner-output on saa7111a */
- i = 0;
saa7111a_call(mxb, video, s_routing, SAA7115_COMPOSITE0,
SAA7111_FMT_CCIR, 0);
@@ -683,10 +682,16 @@ static struct saa7146_ext_vv vv_data;
static int mxb_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info)
{
struct mxb *mxb;
+ int ret;
DEB_EE("dev:%p\n", dev);
- saa7146_vv_init(dev, &vv_data);
+ ret = saa7146_vv_init(dev, &vv_data);
+ if (ret) {
+ ERR("Error in saa7146_vv_init()");
+ return ret;
+ }
+
if (mxb_probe(dev)) {
saa7146_vv_release(dev);
return -1;
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
index 0abcad4e84fa..7766cadb73ea 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
@@ -391,12 +391,12 @@ static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip,
}
/* Extract values from VOP header - VE_STATUSxx */
-static inline int vop_interlaced(const vop_header *vh)
+static inline __always_unused int vop_interlaced(const vop_header *vh)
{
return (__le32_to_cpu((*vh)[0]) >> 30) & 1;
}
-static inline u8 vop_channel(const vop_header *vh)
+static inline __always_unused u8 vop_channel(const vop_header *vh)
{
return (__le32_to_cpu((*vh)[0]) >> 24) & 0x1F;
}
@@ -411,12 +411,12 @@ static inline u32 vop_mpeg_size(const vop_header *vh)
return __le32_to_cpu((*vh)[0]) & 0xFFFFF;
}
-static inline u8 vop_hsize(const vop_header *vh)
+static inline u8 __always_unused vop_hsize(const vop_header *vh)
{
return (__le32_to_cpu((*vh)[1]) >> 8) & 0xFF;
}
-static inline u8 vop_vsize(const vop_header *vh)
+static inline u8 __always_unused vop_vsize(const vop_header *vh)
{
return __le32_to_cpu((*vh)[1]) & 0xFF;
}
@@ -436,12 +436,12 @@ static inline u32 vop_jpeg_size(const vop_header *vh)
return __le32_to_cpu((*vh)[4]) & 0xFFFFF;
}
-static inline u32 vop_sec(const vop_header *vh)
+static inline u32 __always_unused vop_sec(const vop_header *vh)
{
return __le32_to_cpu((*vh)[5]);
}
-static inline u32 vop_usec(const vop_header *vh)
+static inline __always_unused u32 vop_usec(const vop_header *vh)
{
return __le32_to_cpu((*vh)[6]);
}
diff --git a/drivers/media/pci/tw5864/tw5864-core.c b/drivers/media/pci/tw5864/tw5864-core.c
index 23d3cae54a5d..5cae73e6fb9c 100644
--- a/drivers/media/pci/tw5864/tw5864-core.c
+++ b/drivers/media/pci/tw5864/tw5864-core.c
@@ -333,11 +333,10 @@ static void tw5864_finidev(struct pci_dev *pci_dev)
/* release resources */
iounmap(dev->mmio);
- release_mem_region(pci_resource_start(pci_dev, 0),
- pci_resource_len(pci_dev, 0));
+ pci_release_regions(pci_dev);
+ pci_disable_device(pci_dev);
v4l2_device_unregister(&dev->v4l2_dev);
- devm_kfree(&pci_dev->dev, dev);
}
static struct pci_driver tw5864_pci_driver = {
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index cf4adc64c953..9fbdba0fd1e7 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -492,6 +492,18 @@ config VIDEO_STI_DELTA_DRIVER
endif # VIDEO_STI_DELTA
+config VIDEO_STM32_DMA2D
+ tristate "STM32 Chrom-Art Accelerator (DMA2D)"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_STM32 || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ Enables DMA2D hardware support on stm32.
+
+ The STM32 DMA2D is a memory-to-memory engine for pixel conversion
+ and specialized DMA dedicated to image manipulation.
+
config VIDEO_RENESAS_FDP1
tristate "Renesas Fine Display Processor"
depends on VIDEO_DEV && VIDEO_V4L2
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index a148553babfc..19bcbced7382 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -71,6 +71,7 @@ obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel/
obj-$(CONFIG_VIDEO_ATMEL_XISC) += atmel/
obj-$(CONFIG_VIDEO_STM32_DCMI) += stm32/
+obj-$(CONFIG_VIDEO_STM32_DMA2D) += stm32/
obj-$(CONFIG_VIDEO_MEDIATEK_VPU) += mtk-vpu/
diff --git a/drivers/media/platform/allegro-dvt/allegro-core.c b/drivers/media/platform/allegro-dvt/allegro-core.c
index c8156da33043..4a3d06c70e34 100644
--- a/drivers/media/platform/allegro-dvt/allegro-core.c
+++ b/drivers/media/platform/allegro-dvt/allegro-core.c
@@ -2815,7 +2815,7 @@ static void allegro_buf_queue(struct vb2_buffer *vb)
unsigned int i;
for (i = 0; i < vb->num_planes; i++)
- vb->planes[i].bytesused = 0;
+ vb2_set_plane_payload(vb, i, 0);
vbuf->field = V4L2_FIELD_NONE;
vbuf->sequence = channel->csequence++;
diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
index cad3f97515ae..7a24daf7165a 100644
--- a/drivers/media/platform/aspeed-video.c
+++ b/drivers/media/platform/aspeed-video.c
@@ -539,6 +539,10 @@ static void aspeed_video_enable_mode_detect(struct aspeed_video *video)
aspeed_video_update(video, VE_INTERRUPT_CTRL, 0,
VE_INTERRUPT_MODE_DETECT);
+ /* Disable mode detect in order to re-trigger */
+ aspeed_video_update(video, VE_SEQ_CTRL,
+ VE_SEQ_CTRL_TRIG_MODE_DET, 0);
+
/* Trigger mode detect */
aspeed_video_update(video, VE_SEQ_CTRL, 0, VE_SEQ_CTRL_TRIG_MODE_DET);
}
@@ -591,6 +595,8 @@ static void aspeed_video_irq_res_change(struct aspeed_video *video, ulong delay)
set_bit(VIDEO_RES_CHANGE, &video->flags);
clear_bit(VIDEO_FRAME_INPRG, &video->flags);
+ video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL;
+
aspeed_video_off(video);
aspeed_video_bufs_done(video, VB2_BUF_STATE_ERROR);
@@ -824,10 +830,6 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
return;
}
- /* Disable mode detect in order to re-trigger */
- aspeed_video_update(video, VE_SEQ_CTRL,
- VE_SEQ_CTRL_TRIG_MODE_DET, 0);
-
aspeed_video_check_and_set_polarity(video);
aspeed_video_enable_mode_detect(video);
@@ -1375,7 +1377,6 @@ static void aspeed_video_resolution_work(struct work_struct *work)
struct delayed_work *dwork = to_delayed_work(work);
struct aspeed_video *video = container_of(dwork, struct aspeed_video,
res_work);
- u32 input_status = video->v4l2_input_status;
aspeed_video_on(video);
@@ -1388,8 +1389,7 @@ static void aspeed_video_resolution_work(struct work_struct *work)
aspeed_video_get_resolution(video);
if (video->detected_timings.width != video->active_timings.width ||
- video->detected_timings.height != video->active_timings.height ||
- input_status != video->v4l2_input_status) {
+ video->detected_timings.height != video->active_timings.height) {
static const struct v4l2_event ev = {
.type = V4L2_EVENT_SOURCE_CHANGE,
.u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 0e312b0842d7..3cd47ba26357 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -43,7 +43,7 @@
#define CODA_NAME "coda"
#define CODADX6_MAX_INSTANCES 4
-#define CODA_MAX_FORMATS 4
+#define CODA_MAX_FORMATS 5
#define CODA_ISRAM_SIZE (2048 * 2)
@@ -247,6 +247,7 @@ static const struct coda_video_device coda9_jpeg_encoder = {
V4L2_PIX_FMT_YUV420,
V4L2_PIX_FMT_YVU420,
V4L2_PIX_FMT_YUV422P,
+ V4L2_PIX_FMT_GREY,
},
.dst_formats = {
V4L2_PIX_FMT_JPEG,
@@ -626,6 +627,11 @@ static int coda_try_fmt(struct coda_ctx *ctx, const struct coda_codec *codec,
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
f->fmt.pix.height * 2;
break;
+ case V4L2_PIX_FMT_GREY:
+ /* keep 16 pixel alignment of 8-bit pixel data */
+ f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16);
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
+ break;
case V4L2_PIX_FMT_JPEG:
case V4L2_PIX_FMT_H264:
case V4L2_PIX_FMT_MPEG4:
@@ -1537,11 +1543,13 @@ static void coda_pic_run_work(struct work_struct *work)
if (!wait_for_completion_timeout(&ctx->completion,
msecs_to_jiffies(1000))) {
- dev_err(dev->dev, "CODA PIC_RUN timeout\n");
+ if (ctx->use_bit) {
+ dev_err(dev->dev, "CODA PIC_RUN timeout\n");
- ctx->hold = true;
+ ctx->hold = true;
- coda_hw_reset(ctx);
+ coda_hw_reset(ctx);
+ }
if (ctx->ops->run_timeout)
ctx->ops->run_timeout(ctx);
diff --git a/drivers/media/platform/coda/coda-jpeg.c b/drivers/media/platform/coda/coda-jpeg.c
index b11cfbe166dd..a72f4655e5ad 100644
--- a/drivers/media/platform/coda/coda-jpeg.c
+++ b/drivers/media/platform/coda/coda-jpeg.c
@@ -1127,7 +1127,8 @@ static int coda9_jpeg_prepare_encode(struct coda_ctx *ctx)
coda_write(dev, 0, CODA9_REG_JPEG_GBU_BT_PTR);
coda_write(dev, 0, CODA9_REG_JPEG_GBU_WD_PTR);
coda_write(dev, 0, CODA9_REG_JPEG_GBU_BBSR);
- coda_write(dev, 0, CODA9_REG_JPEG_BBC_STRM_CTRL);
+ coda_write(dev, BIT(31) | ((end_addr - start_addr - header_len) / 256),
+ CODA9_REG_JPEG_BBC_STRM_CTRL);
coda_write(dev, 0, CODA9_REG_JPEG_GBU_CTRL);
coda_write(dev, 0, CODA9_REG_JPEG_GBU_FF_RPTR);
coda_write(dev, 127, CODA9_REG_JPEG_GBU_BBER);
@@ -1257,6 +1258,23 @@ static void coda9_jpeg_finish_encode(struct coda_ctx *ctx)
coda_hw_reset(ctx);
}
+static void coda9_jpeg_encode_timeout(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+ u32 end_addr, wr_ptr;
+
+ /* Handle missing BBC overflow interrupt via timeout */
+ end_addr = coda_read(dev, CODA9_REG_JPEG_BBC_END_ADDR);
+ wr_ptr = coda_read(dev, CODA9_REG_JPEG_BBC_WR_PTR);
+ if (wr_ptr >= end_addr - 256) {
+ v4l2_err(&dev->v4l2_dev, "JPEG too large for capture buffer\n");
+ coda9_jpeg_finish_encode(ctx);
+ return;
+ }
+
+ coda_hw_reset(ctx);
+}
+
static void coda9_jpeg_release(struct coda_ctx *ctx)
{
int i;
@@ -1276,6 +1294,7 @@ const struct coda_context_ops coda9_jpeg_encode_ops = {
.start_streaming = coda9_jpeg_start_encoding,
.prepare_run = coda9_jpeg_prepare_encode,
.finish_run = coda9_jpeg_finish_encode,
+ .run_timeout = coda9_jpeg_encode_timeout,
.release = coda9_jpeg_release,
};
diff --git a/drivers/media/platform/coda/imx-vdoa.c b/drivers/media/platform/coda/imx-vdoa.c
index 6996d4571e36..00643f37b3e6 100644
--- a/drivers/media/platform/coda/imx-vdoa.c
+++ b/drivers/media/platform/coda/imx-vdoa.c
@@ -287,7 +287,11 @@ static int vdoa_probe(struct platform_device *pdev)
struct resource *res;
int ret;
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "DMA enable failed\n");
+ return ret;
+ }
vdoa = devm_kzalloc(&pdev->dev, sizeof(*vdoa), GFP_KERNEL);
if (!vdoa)
diff --git a/drivers/media/platform/davinci/vpbe_osd.c b/drivers/media/platform/davinci/vpbe_osd.c
index 91b571a0ac2c..32f7ef547c82 100644
--- a/drivers/media/platform/davinci/vpbe_osd.c
+++ b/drivers/media/platform/davinci/vpbe_osd.c
@@ -45,7 +45,7 @@ static const struct platform_device_id vpbe_osd_devtype[] = {
MODULE_DEVICE_TABLE(platform, vpbe_osd_devtype);
/* register access routines */
-static inline u32 osd_read(struct osd_state *sd, u32 offset)
+static inline u32 __always_unused osd_read(struct osd_state *sd, u32 offset)
{
struct osd_state *osd = sd;
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index ae92e2c206d0..8fe55374c5a3 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -619,17 +619,6 @@ static void vpif_calculate_offsets(struct channel_obj *ch)
}
/**
- * vpif_get_default_field() - Get default field type based on interface
- * @iface: ptr to vpif interface
- */
-static inline enum v4l2_field vpif_get_default_field(
- struct vpif_interface *iface)
-{
- return (iface->if_type == VPIF_IF_RAW_BAYER) ? V4L2_FIELD_NONE :
- V4L2_FIELD_INTERLACED;
-}
-
-/**
* vpif_config_addr() - function to configure buffer address in vpif
* @ch: channel ptr
* @muxmode: channel mux mode
@@ -1478,7 +1467,6 @@ probe_out:
for (k = 0; k < j; k++) {
/* Get the pointer to the channel object */
ch = vpif_obj.dev[k];
- common = &ch->common[k];
/* Unregister video device */
video_unregister_device(&ch->video_dev);
}
diff --git a/drivers/media/platform/imx-pxp.c b/drivers/media/platform/imx-pxp.c
index 723b096fedd1..689ae5e6ac62 100644
--- a/drivers/media/platform/imx-pxp.c
+++ b/drivers/media/platform/imx-pxp.c
@@ -211,6 +211,7 @@ struct pxp_ctx {
/* Processing mode */
int mode;
u8 alpha_component;
+ u8 rotation;
enum v4l2_colorspace colorspace;
enum v4l2_xfer_func xfer_func;
@@ -767,14 +768,20 @@ static int pxp_start(struct pxp_ctx *ctx, struct vb2_v4l2_buffer *in_vb,
V4L2_BUF_FLAG_BFRAME |
V4L2_BUF_FLAG_TSTAMP_SRC_MASK);
- /* Rotation disabled, 8x8 block size */
+ /* 8x8 block size */
ctrl = BF_PXP_CTRL_VFLIP0(!!(ctx->mode & MEM2MEM_VFLIP)) |
- BF_PXP_CTRL_HFLIP0(!!(ctx->mode & MEM2MEM_HFLIP));
+ BF_PXP_CTRL_HFLIP0(!!(ctx->mode & MEM2MEM_HFLIP)) |
+ BF_PXP_CTRL_ROTATE0(ctx->rotation);
/* Always write alpha value as V4L2_CID_ALPHA_COMPONENT */
out_ctrl = BF_PXP_OUT_CTRL_ALPHA(ctx->alpha_component) |
BF_PXP_OUT_CTRL_ALPHA_OUTPUT(1) |
pxp_v4l2_pix_fmt_to_out_format(dst_fourcc);
out_buf = p_out;
+
+ if (ctx->rotation == BV_PXP_CTRL_ROTATE0__ROT_90 ||
+ ctx->rotation == BV_PXP_CTRL_ROTATE0__ROT_270)
+ swap(dst_width, dst_height);
+
switch (dst_fourcc) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
@@ -1297,6 +1304,21 @@ static int pxp_s_fmt_vid_out(struct file *file, void *priv,
return 0;
}
+static u8 pxp_degrees_to_rot_mode(u32 degrees)
+{
+ switch (degrees) {
+ case 90:
+ return BV_PXP_CTRL_ROTATE0__ROT_90;
+ case 180:
+ return BV_PXP_CTRL_ROTATE0__ROT_180;
+ case 270:
+ return BV_PXP_CTRL_ROTATE0__ROT_270;
+ case 0:
+ default:
+ return BV_PXP_CTRL_ROTATE0__ROT_0;
+ }
+}
+
static int pxp_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct pxp_ctx *ctx =
@@ -1317,6 +1339,10 @@ static int pxp_s_ctrl(struct v4l2_ctrl *ctrl)
ctx->mode &= ~MEM2MEM_VFLIP;
break;
+ case V4L2_CID_ROTATE:
+ ctx->rotation = pxp_degrees_to_rot_mode(ctrl->val);
+ break;
+
case V4L2_CID_ALPHA_COMPONENT:
ctx->alpha_component = ctrl->val;
break;
@@ -1524,6 +1550,7 @@ static int pxp_open(struct file *file)
v4l2_ctrl_handler_init(hdl, 4);
v4l2_ctrl_new_std(hdl, &pxp_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
v4l2_ctrl_new_std(hdl, &pxp_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdl, &pxp_ctrl_ops, V4L2_CID_ROTATE, 0, 270, 90, 0);
v4l2_ctrl_new_std(hdl, &pxp_ctrl_ops, V4L2_CID_ALPHA_COMPONENT,
0, 255, 1, 255);
if (hdl->error) {
@@ -1659,6 +1686,8 @@ static int pxp_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
+ spin_lock_init(&dev->irqlock);
+
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, pxp_irq_handler,
IRQF_ONESHOT, dev_name(&pdev->dev), dev);
if (ret < 0) {
@@ -1676,8 +1705,6 @@ static int pxp_probe(struct platform_device *pdev)
goto err_clk;
}
- spin_lock_init(&dev->irqlock);
-
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret)
goto err_clk;
diff --git a/drivers/media/platform/marvell-ccic/cafe-driver.c b/drivers/media/platform/marvell-ccic/cafe-driver.c
index b61b9d9551af..03dcf8bf705e 100644
--- a/drivers/media/platform/marvell-ccic/cafe-driver.c
+++ b/drivers/media/platform/marvell-ccic/cafe-driver.c
@@ -139,13 +139,6 @@ struct cafe_camera {
*/
#define CAFE_SMBUS_TIMEOUT (HZ) /* generous */
-static inline struct cafe_camera *to_cam(struct v4l2_device *dev)
-{
- struct mcam_camera *m = container_of(dev, struct mcam_camera, v4l2_dev);
- return container_of(m, struct cafe_camera, mcam);
-}
-
-
static int cafe_smbus_write_done(struct mcam_camera *mcam)
{
unsigned long flags;
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
index af994b9913a6..f332beb06d51 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
@@ -1361,10 +1361,8 @@ static int mtk_jpeg_probe(struct platform_device *pdev)
}
jpeg_irq = platform_get_irq(pdev, 0);
- if (jpeg_irq < 0) {
- dev_err(&pdev->dev, "Failed to get jpeg_irq %d.\n", jpeg_irq);
+ if (jpeg_irq < 0)
return jpeg_irq;
- }
ret = devm_request_irq(&pdev->dev, jpeg_irq,
jpeg->variant->irq_handler, 0, pdev->name, jpeg);
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
index 976aa1f4829b..3d38793aaa25 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
@@ -148,7 +148,7 @@ static int mtk_mdp_probe(struct platform_device *pdev)
continue;
}
- comp_type = (enum mtk_mdp_comp_type)of_id->data;
+ comp_type = (uintptr_t)of_id->data;
comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
if (!comp) {
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
index 46783516b84a..e08886a600a3 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
@@ -11,6 +11,7 @@
#include <media/videobuf2-core.h>
#include <media/v4l2-mem2mem.h>
+#define VCODEC_DEC_ALIGNED_64 64
#define VCODEC_CAPABILITY_4K_DISABLED 0x10
#define VCODEC_DEC_4K_CODED_WIDTH 4096U
#define VCODEC_DEC_4K_CODED_HEIGHT 2304U
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
index e6e6a8203eeb..40c39e1e596b 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
@@ -28,9 +28,6 @@
#define VDEC_IRQ_CLR 0x10
#define VDEC_IRQ_CFG_REG 0xa4
-module_param(mtk_v4l2_dbg_level, int, 0644);
-module_param(mtk_vcodec_dbg, bool, 0644);
-
/* Wake up context wait_queue */
static void wake_up_ctx(struct mtk_vcodec_ctx *ctx)
{
@@ -358,6 +355,8 @@ err_media_reg:
if (dev->vdec_pdata->uses_stateless_api)
v4l2_m2m_unregister_media_controller(dev->m2m_dev_dec);
err_reg_cont:
+ if (dev->vdec_pdata->uses_stateless_api)
+ media_device_cleanup(&dev->mdev_dec);
destroy_workqueue(dev->decode_workqueue);
err_event_workq:
v4l2_m2m_release(dev->m2m_dev_dec);
@@ -390,7 +389,6 @@ static int mtk_vcodec_dec_remove(struct platform_device *pdev)
{
struct mtk_vcodec_dev *dev = platform_get_drvdata(pdev);
- flush_workqueue(dev->decode_workqueue);
destroy_workqueue(dev->decode_workqueue);
if (media_devnode_is_registered(dev->mdev_dec.devnode)) {
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateless.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateless.c
index 8f4a1f0a0769..3d9f47555884 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateless.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateless.c
@@ -133,7 +133,6 @@ static struct vdec_fb *vdec_get_cap_buffer(struct mtk_vcodec_ctx *ctx,
struct vdec_fb *pfb = &framebuf->frame_buffer;
struct vb2_buffer *dst_buf = &vb2_v4l2->vb2_buf;
- pfb = &framebuf->frame_buffer;
pfb->base_y.va = NULL;
pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
pfb->base_y.size = ctx->q_data[MTK_Q_DATA_DST].sizeimage[0];
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
index 7457451ebff0..ffb046eec610 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -104,6 +104,13 @@ static int vidioc_venc_s_ctrl(struct v4l2_ctrl *ctrl)
p->gop_size = ctrl->val;
ctx->param_change |= MTK_ENCODE_PARAM_GOP_SIZE;
break;
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
+ /*
+ * FIXME - what vp8 profiles are actually supported?
+ * The ctrl is added (with only profile 0 supported) for now.
+ */
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_VP8_PROFILE val = %d", ctrl->val);
+ break;
case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME");
p->force_intra = 1;
@@ -257,87 +264,82 @@ static struct mtk_q_data *mtk_venc_get_q_data(struct mtk_vcodec_ctx *ctx,
return &ctx->q_data[MTK_Q_DATA_DST];
}
+static void vidioc_try_fmt_cap(struct v4l2_format *f)
+{
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+ f->fmt.pix_mp.num_planes = 1;
+ f->fmt.pix_mp.plane_fmt[0].bytesperline = 0;
+ f->fmt.pix_mp.flags = 0;
+}
+
/* V4L2 specification suggests the driver corrects the format struct if any of
* the dimensions is unsupported
*/
-static int vidioc_try_fmt(struct mtk_vcodec_ctx *ctx, struct v4l2_format *f,
- const struct mtk_video_fmt *fmt)
+static int vidioc_try_fmt_out(struct mtk_vcodec_ctx *ctx, struct v4l2_format *f,
+ const struct mtk_video_fmt *fmt)
{
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+ int tmp_w, tmp_h;
+ unsigned int max_width, max_height;
pix_fmt_mp->field = V4L2_FIELD_NONE;
- if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- pix_fmt_mp->num_planes = 1;
- pix_fmt_mp->plane_fmt[0].bytesperline = 0;
- } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- int tmp_w, tmp_h;
- unsigned int max_width, max_height;
-
- if (ctx->dev->enc_capability & MTK_VENC_4K_CAPABILITY_ENABLE) {
- max_width = MTK_VENC_4K_MAX_W;
- max_height = MTK_VENC_4K_MAX_H;
- } else {
- max_width = MTK_VENC_HD_MAX_W;
- max_height = MTK_VENC_HD_MAX_H;
- }
+ if (ctx->dev->enc_capability & MTK_VENC_4K_CAPABILITY_ENABLE) {
+ max_width = MTK_VENC_4K_MAX_W;
+ max_height = MTK_VENC_4K_MAX_H;
+ } else {
+ max_width = MTK_VENC_HD_MAX_W;
+ max_height = MTK_VENC_HD_MAX_H;
+ }
- pix_fmt_mp->height = clamp(pix_fmt_mp->height,
- MTK_VENC_MIN_H,
- max_height);
- pix_fmt_mp->width = clamp(pix_fmt_mp->width,
- MTK_VENC_MIN_W,
- max_width);
+ pix_fmt_mp->height = clamp(pix_fmt_mp->height, MTK_VENC_MIN_H, max_height);
+ pix_fmt_mp->width = clamp(pix_fmt_mp->width, MTK_VENC_MIN_W, max_width);
- /* find next closer width align 16, heign align 32, size align
- * 64 rectangle
- */
- tmp_w = pix_fmt_mp->width;
- tmp_h = pix_fmt_mp->height;
- v4l_bound_align_image(&pix_fmt_mp->width,
- MTK_VENC_MIN_W,
- max_width, 4,
- &pix_fmt_mp->height,
- MTK_VENC_MIN_H,
- max_height, 5, 6);
-
- if (pix_fmt_mp->width < tmp_w &&
- (pix_fmt_mp->width + 16) <= max_width)
- pix_fmt_mp->width += 16;
- if (pix_fmt_mp->height < tmp_h &&
- (pix_fmt_mp->height + 32) <= max_height)
- pix_fmt_mp->height += 32;
-
- mtk_v4l2_debug(0,
- "before resize width=%d, height=%d, after resize width=%d, height=%d, sizeimage=%d %d",
- tmp_w, tmp_h, pix_fmt_mp->width,
- pix_fmt_mp->height,
- pix_fmt_mp->plane_fmt[0].sizeimage,
- pix_fmt_mp->plane_fmt[1].sizeimage);
-
- pix_fmt_mp->num_planes = fmt->num_planes;
- pix_fmt_mp->plane_fmt[0].sizeimage =
- pix_fmt_mp->width * pix_fmt_mp->height +
- ((ALIGN(pix_fmt_mp->width, 16) * 2) * 16);
- pix_fmt_mp->plane_fmt[0].bytesperline = pix_fmt_mp->width;
-
- if (pix_fmt_mp->num_planes == 2) {
- pix_fmt_mp->plane_fmt[1].sizeimage =
- (pix_fmt_mp->width * pix_fmt_mp->height) / 2 +
- (ALIGN(pix_fmt_mp->width, 16) * 16);
- pix_fmt_mp->plane_fmt[2].sizeimage = 0;
- pix_fmt_mp->plane_fmt[1].bytesperline =
- pix_fmt_mp->width;
- pix_fmt_mp->plane_fmt[2].bytesperline = 0;
- } else if (pix_fmt_mp->num_planes == 3) {
- pix_fmt_mp->plane_fmt[1].sizeimage =
- pix_fmt_mp->plane_fmt[2].sizeimage =
- (pix_fmt_mp->width * pix_fmt_mp->height) / 4 +
- ((ALIGN(pix_fmt_mp->width, 16) / 2) * 16);
- pix_fmt_mp->plane_fmt[1].bytesperline =
- pix_fmt_mp->plane_fmt[2].bytesperline =
- pix_fmt_mp->width / 2;
- }
+ /* find next closer width align 16, heign align 32, size align
+ * 64 rectangle
+ */
+ tmp_w = pix_fmt_mp->width;
+ tmp_h = pix_fmt_mp->height;
+ v4l_bound_align_image(&pix_fmt_mp->width,
+ MTK_VENC_MIN_W,
+ max_width, 4,
+ &pix_fmt_mp->height,
+ MTK_VENC_MIN_H,
+ max_height, 5, 6);
+
+ if (pix_fmt_mp->width < tmp_w && (pix_fmt_mp->width + 16) <= max_width)
+ pix_fmt_mp->width += 16;
+ if (pix_fmt_mp->height < tmp_h && (pix_fmt_mp->height + 32) <= max_height)
+ pix_fmt_mp->height += 32;
+
+ mtk_v4l2_debug(0, "before resize w=%d, h=%d, after resize w=%d, h=%d, sizeimage=%d %d",
+ tmp_w, tmp_h, pix_fmt_mp->width,
+ pix_fmt_mp->height,
+ pix_fmt_mp->plane_fmt[0].sizeimage,
+ pix_fmt_mp->plane_fmt[1].sizeimage);
+
+ pix_fmt_mp->num_planes = fmt->num_planes;
+ pix_fmt_mp->plane_fmt[0].sizeimage =
+ pix_fmt_mp->width * pix_fmt_mp->height +
+ ((ALIGN(pix_fmt_mp->width, 16) * 2) * 16);
+ pix_fmt_mp->plane_fmt[0].bytesperline = pix_fmt_mp->width;
+
+ if (pix_fmt_mp->num_planes == 2) {
+ pix_fmt_mp->plane_fmt[1].sizeimage =
+ (pix_fmt_mp->width * pix_fmt_mp->height) / 2 +
+ (ALIGN(pix_fmt_mp->width, 16) * 16);
+ pix_fmt_mp->plane_fmt[2].sizeimage = 0;
+ pix_fmt_mp->plane_fmt[1].bytesperline =
+ pix_fmt_mp->width;
+ pix_fmt_mp->plane_fmt[2].bytesperline = 0;
+ } else if (pix_fmt_mp->num_planes == 3) {
+ pix_fmt_mp->plane_fmt[1].sizeimage =
+ pix_fmt_mp->plane_fmt[2].sizeimage =
+ (pix_fmt_mp->width * pix_fmt_mp->height) / 4 +
+ ((ALIGN(pix_fmt_mp->width, 16) / 2) * 16);
+ pix_fmt_mp->plane_fmt[1].bytesperline =
+ pix_fmt_mp->plane_fmt[2].bytesperline =
+ pix_fmt_mp->width / 2;
}
pix_fmt_mp->flags = 0;
@@ -398,7 +400,7 @@ static int vidioc_venc_s_fmt_cap(struct file *file, void *priv,
struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
const struct mtk_vcodec_enc_pdata *pdata = ctx->dev->venc_pdata;
struct vb2_queue *vq;
- struct mtk_q_data *q_data;
+ struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, f->type);
int i, ret;
const struct mtk_video_fmt *fmt;
@@ -413,12 +415,6 @@ static int vidioc_venc_s_fmt_cap(struct file *file, void *priv,
return -EBUSY;
}
- q_data = mtk_venc_get_q_data(ctx, f->type);
- if (!q_data) {
- mtk_v4l2_err("fail to get q data");
- return -EINVAL;
- }
-
fmt = mtk_venc_find_format(f->fmt.pix.pixelformat, pdata);
if (!fmt) {
fmt = &ctx->dev->venc_pdata->capture_formats[0];
@@ -426,9 +422,7 @@ static int vidioc_venc_s_fmt_cap(struct file *file, void *priv,
}
q_data->fmt = fmt;
- ret = vidioc_try_fmt(ctx, f, q_data->fmt);
- if (ret)
- return ret;
+ vidioc_try_fmt_cap(f);
q_data->coded_width = f->fmt.pix_mp.width;
q_data->coded_height = f->fmt.pix_mp.height;
@@ -461,7 +455,7 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
const struct mtk_vcodec_enc_pdata *pdata = ctx->dev->venc_pdata;
struct vb2_queue *vq;
- struct mtk_q_data *q_data;
+ struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, f->type);
int ret, i;
const struct mtk_video_fmt *fmt;
@@ -476,19 +470,13 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
return -EBUSY;
}
- q_data = mtk_venc_get_q_data(ctx, f->type);
- if (!q_data) {
- mtk_v4l2_err("fail to get q data");
- return -EINVAL;
- }
-
fmt = mtk_venc_find_format(f->fmt.pix.pixelformat, pdata);
if (!fmt) {
fmt = &ctx->dev->venc_pdata->output_formats[0];
f->fmt.pix.pixelformat = fmt->fourcc;
}
- ret = vidioc_try_fmt(ctx, f, fmt);
+ ret = vidioc_try_fmt_out(ctx, f, fmt);
if (ret)
return ret;
@@ -521,14 +509,13 @@ static int vidioc_venc_g_fmt(struct file *file, void *priv,
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
struct vb2_queue *vq;
- struct mtk_q_data *q_data;
+ struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, f->type);
int i;
vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
if (!vq)
return -EINVAL;
- q_data = mtk_venc_get_q_data(ctx, f->type);
pix->width = q_data->coded_width;
pix->height = q_data->coded_height;
@@ -566,7 +553,9 @@ static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
f->fmt.pix_mp.quantization = ctx->quantization;
f->fmt.pix_mp.xfer_func = ctx->xfer_func;
- return vidioc_try_fmt(ctx, f, fmt);
+ vidioc_try_fmt_cap(f);
+
+ return 0;
}
static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
@@ -588,22 +577,18 @@ static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
f->fmt.pix_mp.xfer_func = V4L2_XFER_FUNC_DEFAULT;
}
- return vidioc_try_fmt(ctx, f, fmt);
+ return vidioc_try_fmt_out(ctx, f, fmt);
}
static int vidioc_venc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
- struct mtk_q_data *q_data;
+ struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, s->type);
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
- q_data = mtk_venc_get_q_data(ctx, s->type);
- if (!q_data)
- return -EINVAL;
-
switch (s->target) {
case V4L2_SEL_TGT_CROP_DEFAULT:
case V4L2_SEL_TGT_CROP_BOUNDS:
@@ -629,15 +614,11 @@ static int vidioc_venc_s_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
- struct mtk_q_data *q_data;
+ struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, s->type);
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
- q_data = mtk_venc_get_q_data(ctx, s->type);
- if (!q_data)
- return -EINVAL;
-
switch (s->target) {
case V4L2_SEL_TGT_CROP:
/* Only support crop from (0,0) */
@@ -804,11 +785,9 @@ static int vb2ops_venc_queue_setup(struct vb2_queue *vq,
struct device *alloc_devs[])
{
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vq);
- struct mtk_q_data *q_data;
+ struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, vq->type);
unsigned int i;
- q_data = mtk_venc_get_q_data(ctx, vq->type);
-
if (q_data == NULL)
return -EINVAL;
@@ -828,11 +807,9 @@ static int vb2ops_venc_queue_setup(struct vb2_queue *vq,
static int vb2ops_venc_buf_prepare(struct vb2_buffer *vb)
{
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- struct mtk_q_data *q_data;
+ struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, vb->vb2_queue->type);
int i;
- q_data = mtk_venc_get_q_data(ctx, vb->vb2_queue->type);
-
for (i = 0; i < q_data->fmt->num_planes; i++) {
if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
mtk_v4l2_err("data will not fit into plane %d (%lu < %d)",
@@ -959,7 +936,7 @@ static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
while ((dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) {
- dst_buf->vb2_buf.planes[0].bytesused = 0;
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0);
v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
}
/* STREAMOFF on the CAPTURE queue completes any ongoing flush */
@@ -1068,7 +1045,7 @@ static int mtk_venc_encode_header(void *priv)
NULL, &bs_buf, &enc_result);
if (ret) {
- dst_buf->vb2_buf.planes[0].bytesused = 0;
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0);
ctx->state = MTK_STATE_ABORT;
v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
mtk_v4l2_err("venc_if_encode failed=%d", ret);
@@ -1083,7 +1060,7 @@ static int mtk_venc_encode_header(void *priv)
}
ctx->state = MTK_STATE_HEADER;
- dst_buf->vb2_buf.planes[0].bytesused = enc_result.bs_size;
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, enc_result.bs_size);
v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
return 0;
@@ -1232,12 +1209,12 @@ static void mtk_venc_worker(struct work_struct *work)
if (ret) {
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
- dst_buf->vb2_buf.planes[0].bytesused = 0;
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0);
v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
mtk_v4l2_err("venc_if_encode failed=%d", ret);
} else {
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
- dst_buf->vb2_buf.planes[0].bytesused = enc_result.bs_size;
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, enc_result.bs_size);
v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
mtk_v4l2_debug(2, "venc_if_encode bs size=%d",
enc_result.bs_size);
@@ -1395,6 +1372,9 @@ int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx)
v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL,
h264_max_level,
0, V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
+ v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_VP8_PROFILE,
+ V4L2_MPEG_VIDEO_VP8_PROFILE_0, 0, V4L2_MPEG_VIDEO_VP8_PROFILE_0);
+
if (handler->error) {
mtk_v4l2_err("Init control handler fail %d",
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
index eed67394cf46..aeaecb8d416e 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
@@ -23,9 +23,6 @@
#include "mtk_vcodec_util.h"
#include "mtk_vcodec_fw.h"
-module_param(mtk_v4l2_dbg_level, int, S_IRUGO | S_IWUSR);
-module_param(mtk_vcodec_dbg, bool, S_IRUGO | S_IWUSR);
-
static const struct mtk_video_fmt mtk_video_formats_output[] = {
{
.fourcc = V4L2_PIX_FMT_NV12M,
@@ -214,11 +211,11 @@ static int fops_vcodec_release(struct file *file)
mtk_v4l2_debug(1, "[%d] encoder", ctx->id);
mutex_lock(&dev->dev_mutex);
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
mtk_vcodec_enc_release(ctx);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
- v4l2_m2m_ctx_release(ctx->m2m_ctx);
list_del_init(&ctx->list);
kfree(ctx);
@@ -461,7 +458,6 @@ static int mtk_vcodec_enc_remove(struct platform_device *pdev)
struct mtk_vcodec_dev *dev = platform_get_drvdata(pdev);
mtk_v4l2_debug_enter();
- flush_workqueue(dev->encode_workqueue);
destroy_workqueue(dev->encode_workqueue);
if (dev->m2m_dev_enc)
v4l2_m2m_release(dev->m2m_dev_enc);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
index ac5973b6735f..5bac820a47fc 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
@@ -10,16 +10,6 @@
#include "mtk_vcodec_drv.h"
#include "mtk_vcodec_util.h"
-/* For encoder, this will enable logs in venc/*/
-bool mtk_vcodec_dbg;
-EXPORT_SYMBOL(mtk_vcodec_dbg);
-
-/* The log level of v4l2 encoder or decoder driver.
- * That is, files under mtk-vcodec/.
- */
-int mtk_v4l2_dbg_level;
-EXPORT_SYMBOL(mtk_v4l2_dbg_level);
-
void __iomem *mtk_vcodec_get_reg_addr(struct mtk_vcodec_ctx *data,
unsigned int reg_idx)
{
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
index b999d7b84ed1..87c3d6d4bfa7 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
@@ -25,54 +25,29 @@ struct mtk_vcodec_fb {
struct mtk_vcodec_ctx;
struct mtk_vcodec_dev;
-extern int mtk_v4l2_dbg_level;
-extern bool mtk_vcodec_dbg;
-
+#undef pr_fmt
+#define pr_fmt(fmt) "%s(),%d: " fmt, __func__, __LINE__
#define mtk_v4l2_err(fmt, args...) \
- pr_err("[MTK_V4L2][ERROR] %s:%d: " fmt "\n", __func__, __LINE__, \
- ##args)
-
-#define mtk_vcodec_err(h, fmt, args...) \
- pr_err("[MTK_VCODEC][ERROR][%d]: %s() " fmt "\n", \
- ((struct mtk_vcodec_ctx *)h->ctx)->id, __func__, ##args)
+ pr_err("[MTK_V4L2][ERROR] " fmt "\n", ##args)
+#define mtk_vcodec_err(h, fmt, args...) \
+ pr_err("[MTK_VCODEC][ERROR][%d]: " fmt "\n", \
+ ((struct mtk_vcodec_ctx *)(h)->ctx)->id, ##args)
-#if defined(DEBUG)
-#define mtk_v4l2_debug(level, fmt, args...) \
- do { \
- if (mtk_v4l2_dbg_level >= level) \
- pr_info("[MTK_V4L2] level=%d %s(),%d: " fmt "\n",\
- level, __func__, __LINE__, ##args); \
- } while (0)
+#define mtk_v4l2_debug(level, fmt, args...) pr_debug(fmt, ##args)
#define mtk_v4l2_debug_enter() mtk_v4l2_debug(3, "+")
#define mtk_v4l2_debug_leave() mtk_v4l2_debug(3, "-")
-#define mtk_vcodec_debug(h, fmt, args...) \
- do { \
- if (mtk_vcodec_dbg) \
- pr_info("[MTK_VCODEC][%d]: %s() " fmt "\n", \
- ((struct mtk_vcodec_ctx *)h->ctx)->id, \
- __func__, ##args); \
- } while (0)
+#define mtk_vcodec_debug(h, fmt, args...) \
+ pr_debug("[MTK_VCODEC][%d]: " fmt "\n", \
+ ((struct mtk_vcodec_ctx *)(h)->ctx)->id, ##args)
#define mtk_vcodec_debug_enter(h) mtk_vcodec_debug(h, "+")
#define mtk_vcodec_debug_leave(h) mtk_vcodec_debug(h, "-")
-#else
-
-#define mtk_v4l2_debug(level, fmt, args...) {}
-#define mtk_v4l2_debug_enter() {}
-#define mtk_v4l2_debug_leave() {}
-
-#define mtk_vcodec_debug(h, fmt, args...) {}
-#define mtk_vcodec_debug_enter(h) {}
-#define mtk_vcodec_debug_leave(h) {}
-
-#endif
-
void __iomem *mtk_vcodec_get_reg_addr(struct mtk_vcodec_ctx *data,
unsigned int reg_idx);
int mtk_vcodec_mem_alloc(struct mtk_vcodec_ctx *data,
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_req_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_req_if.c
index 946c23088308..fada4d146703 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_req_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_req_if.c
@@ -527,7 +527,7 @@ static int alloc_mv_buf(struct vdec_h264_slice_inst *inst,
struct mtk_vcodec_mem *mem = NULL;
unsigned int buf_sz = get_mv_buf_size(pic->buf_w, pic->buf_h);
- mtk_v4l2_debug(3, "size = 0x%lx", buf_sz);
+ mtk_v4l2_debug(3, "size = 0x%x", buf_sz);
for (i = 0; i < H264_MAX_MV_NUM; i++) {
mem = &inst->mv_buf[i];
if (mem->va)
@@ -562,8 +562,8 @@ static void get_pic_info(struct vdec_h264_slice_inst *inst,
{
struct mtk_vcodec_ctx *ctx = inst->ctx;
- ctx->picinfo.buf_w = (ctx->picinfo.pic_w + 15) & 0xFFFFFFF0;
- ctx->picinfo.buf_h = (ctx->picinfo.pic_h + 31) & 0xFFFFFFE0;
+ ctx->picinfo.buf_w = ALIGN(ctx->picinfo.pic_w, VCODEC_DEC_ALIGNED_64);
+ ctx->picinfo.buf_h = ALIGN(ctx->picinfo.pic_h, VCODEC_DEC_ALIGNED_64);
ctx->picinfo.fb_sz[0] = ctx->picinfo.buf_w * ctx->picinfo.buf_h;
ctx->picinfo.fb_sz[1] = ctx->picinfo.fb_sz[0] >> 1;
inst->vsi_ctx.dec.cap_num_planes =
@@ -637,7 +637,7 @@ static int vdec_h264_slice_init(struct mtk_vcodec_ctx *ctx)
if (err)
goto error_deinit;
- mtk_vcodec_debug(inst, "struct size = %d,%d,%d,%d\n",
+ mtk_vcodec_debug(inst, "struct size = %zu,%zu,%zu,%zu\n",
sizeof(struct mtk_h264_sps_param),
sizeof(struct mtk_h264_pps_param),
sizeof(struct mtk_h264_dec_slice_param),
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
index b6a4f2074fa5..bf03888a824f 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
@@ -367,8 +367,7 @@ static int h264_encode_sps(struct venc_h264_inst *inst,
mtk_vcodec_debug_enter(inst);
- ret = vpu_enc_encode(&inst->vpu_inst, H264_BS_MODE_SPS, NULL,
- bs_buf, bs_size, NULL);
+ ret = vpu_enc_encode(&inst->vpu_inst, H264_BS_MODE_SPS, NULL, bs_buf, NULL);
if (ret)
return ret;
@@ -394,8 +393,7 @@ static int h264_encode_pps(struct venc_h264_inst *inst,
mtk_vcodec_debug_enter(inst);
- ret = vpu_enc_encode(&inst->vpu_inst, H264_BS_MODE_PPS, NULL,
- bs_buf, bs_size, NULL);
+ ret = vpu_enc_encode(&inst->vpu_inst, H264_BS_MODE_PPS, NULL, bs_buf, NULL);
if (ret)
return ret;
@@ -451,8 +449,7 @@ static int h264_encode_frame(struct venc_h264_inst *inst,
mtk_vcodec_debug(inst, "frm_count = %d,skip_frm_count =%d,frm_type=%d.\n",
frame_info.frm_count, frame_info.skip_frm_count,
frame_info.frm_type);
- ret = vpu_enc_encode(&inst->vpu_inst, H264_BS_MODE_FRAME, frm_buf,
- bs_buf, bs_size, &frame_info);
+ ret = vpu_enc_encode(&inst->vpu_inst, H264_BS_MODE_FRAME, frm_buf, bs_buf, &frame_info);
if (ret)
return ret;
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
index 8267a9c4fd25..6b66957d5192 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
@@ -302,8 +302,7 @@ static int vp8_enc_encode_frame(struct venc_vp8_inst *inst,
mtk_vcodec_debug(inst, "->frm_cnt=%d", inst->frm_cnt);
- ret = vpu_enc_encode(&inst->vpu_inst, 0, frm_buf, bs_buf, bs_size,
- NULL);
+ ret = vpu_enc_encode(&inst->vpu_inst, 0, frm_buf, bs_buf, NULL);
if (ret)
return ret;
diff --git a/drivers/media/platform/mtk-vcodec/venc_vpu_if.c b/drivers/media/platform/mtk-vcodec/venc_vpu_if.c
index be6d8790a41e..e7899d8a3e4e 100644
--- a/drivers/media/platform/mtk-vcodec/venc_vpu_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc_vpu_if.c
@@ -225,7 +225,6 @@ int vpu_enc_set_param(struct venc_vpu_inst *vpu,
int vpu_enc_encode(struct venc_vpu_inst *vpu, unsigned int bs_mode,
struct venc_frm_buf *frm_buf,
struct mtk_vcodec_mem *bs_buf,
- unsigned int *bs_size,
struct venc_frame_info *frame_info)
{
const bool is_ext = MTK_ENC_CTX_IS_EXT(vpu->ctx);
diff --git a/drivers/media/platform/mtk-vcodec/venc_vpu_if.h b/drivers/media/platform/mtk-vcodec/venc_vpu_if.h
index f9be9cab7ff7..f83bc1b3f2bf 100644
--- a/drivers/media/platform/mtk-vcodec/venc_vpu_if.h
+++ b/drivers/media/platform/mtk-vcodec/venc_vpu_if.h
@@ -45,7 +45,6 @@ int vpu_enc_set_param(struct venc_vpu_inst *vpu,
int vpu_enc_encode(struct venc_vpu_inst *vpu, unsigned int bs_mode,
struct venc_frm_buf *frm_buf,
struct mtk_vcodec_mem *bs_buf,
- unsigned int *bs_size,
struct venc_frame_info *frame_info);
int vpu_enc_deinit(struct venc_vpu_inst *vpu);
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index 7f1647da0ade..7bd715fc844d 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -964,10 +964,8 @@ static int mtk_vpu_remove(struct platform_device *pdev)
#ifdef CONFIG_DEBUG_FS
debugfs_remove(vpu_debugfs);
#endif
- if (vpu->wdt.wq) {
- flush_workqueue(vpu->wdt.wq);
+ if (vpu->wdt.wq)
destroy_workqueue(vpu->wdt.wq);
- }
vpu_free_ext_mem(vpu, P_FW);
vpu_free_ext_mem(vpu, D_FW);
mutex_destroy(&vpu->vpu_mutex);
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 6de377ce281d..4c937f3f323e 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -476,7 +476,8 @@ void omap3isp_hist_dma_done(struct isp_device *isp)
}
}
-static inline void isp_isr_dbg(struct isp_device *isp, u32 irqstatus)
+static inline void __maybe_unused isp_isr_dbg(struct isp_device *isp,
+ u32 irqstatus)
{
static const char *name[] = {
"CSIA_IRQ",
diff --git a/drivers/media/platform/omap3isp/isph3a_af.c b/drivers/media/platform/omap3isp/isph3a_af.c
index a65cfdfa9637..de7b116d0122 100644
--- a/drivers/media/platform/omap3isp/isph3a_af.c
+++ b/drivers/media/platform/omap3isp/isph3a_af.c
@@ -21,7 +21,7 @@
#include "ispstat.h"
#define IS_OUT_OF_BOUNDS(value, min, max) \
- (((value) < (min)) || ((value) > (max)))
+ ((((unsigned int)value) < (min)) || (((unsigned int)value) > (max)))
static void h3a_af_setup_regs(struct ispstat *af, void *priv)
{
diff --git a/drivers/media/platform/omap3isp/omap3isp.h b/drivers/media/platform/omap3isp/omap3isp.h
index 214f94c46a9d..4a003c8263ed 100644
--- a/drivers/media/platform/omap3isp/omap3isp.h
+++ b/drivers/media/platform/omap3isp/omap3isp.h
@@ -95,6 +95,8 @@ struct isp_csiphy_lanes_cfg {
* ISP_CCP2_PHY_DATA_CLOCK - Data/clock physical layer
* ISP_CCP2_PHY_DATA_STROBE - Data/strobe physical layer
* @vpclk_div: Video port output clock control
+ * @vp_clk_pol: Video port output clock polarity
+ * @lanecfg: CCP2/CSI2 lane configuration
*/
struct isp_ccp2_cfg {
unsigned int strobe_clk_pol:1;
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-170.c b/drivers/media/platform/qcom/camss/camss-vfe-170.c
index 5c083d70d495..f524af712a84 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-170.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-170.c
@@ -191,13 +191,6 @@ static u32 vfe_hw_version(struct vfe_device *vfe)
return hw_version;
}
-static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
-{
- u32 bits = readl_relaxed(vfe->base + reg);
-
- writel_relaxed(bits & ~clr_bits, vfe->base + reg);
-}
-
static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits)
{
u32 bits = readl_relaxed(vfe->base + reg);
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index f5fa81896012..877eca125803 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -350,11 +350,11 @@ static int venus_probe(struct platform_device *pdev)
ret = venus_firmware_init(core);
if (ret)
- goto err_runtime_disable;
+ goto err_of_depopulate;
ret = venus_boot(core);
if (ret)
- goto err_runtime_disable;
+ goto err_firmware_deinit;
ret = hfi_core_resume(core, true);
if (ret)
@@ -386,6 +386,10 @@ err_dev_unregister:
v4l2_device_unregister(&core->v4l2_dev);
err_venus_shutdown:
venus_shutdown(core);
+err_firmware_deinit:
+ venus_firmware_deinit(core);
+err_of_depopulate:
+ of_platform_depopulate(dev);
err_runtime_disable:
pm_runtime_put_noidle(dev);
pm_runtime_set_suspended(dev);
@@ -473,7 +477,8 @@ static __maybe_unused int venus_runtime_suspend(struct device *dev)
err_video_path:
icc_set_bw(core->cpucfg_path, kbps_to_icc(1000), 0);
err_cpucfg_path:
- pm_ops->core_power(core, POWER_ON);
+ if (pm_ops->core_power)
+ pm_ops->core_power(core, POWER_ON);
return ret;
}
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index cedc664ba755..cb48c5ff3dee 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -163,14 +163,12 @@ static u32 load_per_type(struct venus_core *core, u32 session_type)
struct venus_inst *inst = NULL;
u32 mbs_per_sec = 0;
- mutex_lock(&core->lock);
list_for_each_entry(inst, &core->instances, list) {
if (inst->session_type != session_type)
continue;
mbs_per_sec += load_per_instance(inst);
}
- mutex_unlock(&core->lock);
return mbs_per_sec;
}
@@ -219,14 +217,12 @@ static int load_scale_bw(struct venus_core *core)
struct venus_inst *inst = NULL;
u32 mbs_per_sec, avg, peak, total_avg = 0, total_peak = 0;
- mutex_lock(&core->lock);
list_for_each_entry(inst, &core->instances, list) {
mbs_per_sec = load_per_instance(inst);
mbs_to_bw(inst, mbs_per_sec, &avg, &peak);
total_avg += avg;
total_peak += peak;
}
- mutex_unlock(&core->lock);
/*
* keep minimum bandwidth vote for "video-mem" path,
@@ -253,8 +249,9 @@ static int load_scale_v1(struct venus_inst *inst)
struct device *dev = core->dev;
u32 mbs_per_sec;
unsigned int i;
- int ret;
+ int ret = 0;
+ mutex_lock(&core->lock);
mbs_per_sec = load_per_type(core, VIDC_SESSION_TYPE_ENC) +
load_per_type(core, VIDC_SESSION_TYPE_DEC);
@@ -279,17 +276,19 @@ set_freq:
if (ret) {
dev_err(dev, "failed to set clock rate %lu (%d)\n",
freq, ret);
- return ret;
+ goto exit;
}
ret = load_scale_bw(core);
if (ret) {
dev_err(dev, "failed to set bandwidth (%d)\n",
ret);
- return ret;
+ goto exit;
}
- return 0;
+exit:
+ mutex_unlock(&core->lock);
+ return ret;
}
static int core_get_v1(struct venus_core *core)
@@ -587,8 +586,8 @@ min_loaded_core(struct venus_inst *inst, u32 *min_coreid, u32 *min_load, bool lo
if (inst->session_type == VIDC_SESSION_TYPE_DEC)
vpp_freq = inst_pos->clk_data.vpp_freq;
else if (inst->session_type == VIDC_SESSION_TYPE_ENC)
- vpp_freq = low_power ? inst_pos->clk_data.vpp_freq :
- inst_pos->clk_data.low_power_freq;
+ vpp_freq = low_power ? inst_pos->clk_data.low_power_freq :
+ inst_pos->clk_data.vpp_freq;
else
continue;
@@ -1116,13 +1115,13 @@ static int load_scale_v4(struct venus_inst *inst)
struct device *dev = core->dev;
unsigned long freq = 0, freq_core1 = 0, freq_core2 = 0;
unsigned long filled_len = 0;
- int i, ret;
+ int i, ret = 0;
for (i = 0; i < inst->num_input_bufs; i++)
filled_len = max(filled_len, inst->payloads[i]);
if (inst->session_type == VIDC_SESSION_TYPE_DEC && !filled_len)
- return 0;
+ return ret;
freq = calculate_inst_freq(inst, filled_len);
inst->clk_data.freq = freq;
@@ -1138,7 +1137,6 @@ static int load_scale_v4(struct venus_inst *inst)
freq_core2 += inst->clk_data.freq;
}
}
- mutex_unlock(&core->lock);
freq = max(freq_core1, freq_core2);
@@ -1163,17 +1161,19 @@ set_freq:
if (ret) {
dev_err(dev, "failed to set clock rate %lu (%d)\n",
freq, ret);
- return ret;
+ goto exit;
}
ret = load_scale_bw(core);
if (ret) {
dev_err(dev, "failed to set bandwidth (%d)\n",
ret);
- return ret;
+ goto exit;
}
- return 0;
+exit:
+ mutex_unlock(&core->lock);
+ return ret;
}
static const struct venus_pm_ops pm_ops_v4 = {
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index 1d92cc8ede8f..0186ae235113 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -349,12 +349,10 @@ out:
static void rvin_group_notifier_cleanup(struct rvin_dev *vin)
{
- mutex_lock(&vin->group->lock);
if (&vin->v4l2_dev == vin->group->notifier.v4l2_dev) {
v4l2_async_nf_unregister(&vin->group->notifier);
v4l2_async_nf_cleanup(&vin->group->notifier);
}
- mutex_unlock(&vin->group->lock);
}
static int rvin_group_notifier_init(struct rvin_dev *vin, unsigned int port,
@@ -1620,6 +1618,7 @@ static SIMPLE_DEV_PM_OPS(rvin_pm_ops, rvin_suspend, rvin_resume);
static struct platform_driver rcar_vin_driver = {
.driver = {
.name = "rcar-vin",
+ .suppress_bind_attrs = true,
.pm = &rvin_pm_ops,
.of_match_table = rvin_of_id_table,
},
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index 11848d0c4a55..8c939cb3073d 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -542,16 +542,27 @@ static int rcsi2_wait_phy_start(struct rcar_csi2 *priv,
static int rcsi2_set_phypll(struct rcar_csi2 *priv, unsigned int mbps)
{
const struct rcsi2_mbps_reg *hsfreq;
+ const struct rcsi2_mbps_reg *hsfreq_prev = NULL;
- for (hsfreq = priv->info->hsfreqrange; hsfreq->mbps != 0; hsfreq++)
+ if (mbps < priv->info->hsfreqrange->mbps)
+ dev_warn(priv->dev, "%u Mbps less than min PHY speed %u Mbps",
+ mbps, priv->info->hsfreqrange->mbps);
+
+ for (hsfreq = priv->info->hsfreqrange; hsfreq->mbps != 0; hsfreq++) {
if (hsfreq->mbps >= mbps)
break;
+ hsfreq_prev = hsfreq;
+ }
if (!hsfreq->mbps) {
dev_err(priv->dev, "Unsupported PHY speed (%u Mbps)", mbps);
return -ERANGE;
}
+ if (hsfreq_prev &&
+ ((mbps - hsfreq_prev->mbps) <= (hsfreq->mbps - mbps)))
+ hsfreq = hsfreq_prev;
+
rcsi2_write(priv, PHYPLL_REG, PHYPLL_HSFREQRANGE(hsfreq->reg));
return 0;
@@ -1097,10 +1108,17 @@ static int rcsi2_phtw_write_mbps(struct rcar_csi2 *priv, unsigned int mbps,
const struct rcsi2_mbps_reg *values, u16 code)
{
const struct rcsi2_mbps_reg *value;
+ const struct rcsi2_mbps_reg *prev_value = NULL;
- for (value = values; value->mbps; value++)
+ for (value = values; value->mbps; value++) {
if (value->mbps >= mbps)
break;
+ prev_value = value;
+ }
+
+ if (prev_value &&
+ ((mbps - prev_value->mbps) <= (value->mbps - mbps)))
+ value = prev_value;
if (!value->mbps) {
dev_err(priv->dev, "Unsupported PHY speed (%u Mbps)", mbps);
@@ -1498,6 +1516,7 @@ static struct platform_driver rcar_csi2_pdrv = {
.probe = rcsi2_probe,
.driver = {
.name = "rcar-csi2",
+ .suppress_bind_attrs = true,
.of_match_table = rcar_csi2_of_table,
},
};
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index 25ead9333d00..8136bc75e7c4 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -111,6 +111,9 @@
#define VNIE_FIE (1 << 4)
#define VNIE_EFE (1 << 1)
+/* Video n Interrupt Status Register bits */
+#define VNINTS_FIS (1 << 4)
+
/* Video n Data Mode Register bits */
#define VNDMR_A8BIT(n) (((n) & 0xff) << 24)
#define VNDMR_A8BIT_MASK (0xff << 24)
@@ -1005,6 +1008,10 @@ static irqreturn_t rvin_irq(int irq, void *data)
rvin_ack_interrupt(vin);
handled = 1;
+ /* Nothing to do if nothing was captured. */
+ if (!(int_status & VNINTS_FIS))
+ goto done;
+
/* Nothing to do if capture status is 'STOPPED' */
if (vin->state == STOPPED) {
vin_dbg(vin, "IRQ while state stopped\n");
@@ -1371,6 +1378,16 @@ void rvin_stop_streaming(struct rvin_dev *vin)
spin_unlock_irqrestore(&vin->qlock, flags);
+ /* If something went wrong, free buffers with an error. */
+ if (!buffersFreed) {
+ return_unused_buffers(vin, VB2_BUF_STATE_ERROR);
+ for (i = 0; i < HW_BUFFER_NUM; i++) {
+ if (vin->buf_hw[i].buffer)
+ vb2_buffer_done(&vin->buf_hw[i].buffer->vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ }
+ }
+
rvin_set_stream(vin, 0);
/* disable interrupts */
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index a5bfa76fdac6..2e60b9fce03b 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -179,20 +179,27 @@ static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix)
break;
}
- /* HW limit width to a multiple of 32 (2^5) for NV12/16 else 2 (2^1) */
+ /* Hardware limits width alignment based on format. */
switch (pix->pixelformat) {
+ /* Multiple of 32 (2^5) for NV12/16. */
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV16:
walign = 5;
break;
- default:
+ /* Multiple of 2 (2^1) for YUV. */
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
walign = 1;
break;
+ /* No multiple for RGB. */
+ default:
+ walign = 0;
+ break;
}
/* Limit to VIN capabilities */
- v4l_bound_align_image(&pix->width, 2, vin->info->max_width, walign,
- &pix->height, 4, vin->info->max_height, 2, 0);
+ v4l_bound_align_image(&pix->width, 5, vin->info->max_width, walign,
+ &pix->height, 2, vin->info->max_height, 0, 0);
pix->bytesperline = rvin_format_bytesperline(vin, pix);
pix->sizeimage = rvin_format_sizeimage(pix);
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index 19de3c19bcca..37ecf489d112 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -2287,11 +2287,10 @@ static int fdp1_probe(struct platform_device *pdev)
return PTR_ERR(fdp1->regs);
/* Interrupt service routine registration */
- fdp1->irq = ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(&pdev->dev, "cannot find IRQ\n");
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
return ret;
- }
+ fdp1->irq = ret;
ret = devm_request_irq(&pdev->dev, fdp1->irq, fdp1_irq_handler, 0,
dev_name(&pdev->dev), fdp1);
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
index 50b166c49a03..3f5cfa7eb937 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
@@ -462,7 +462,7 @@ static void rkisp1_debug_init(struct rkisp1_device *rkisp1)
{
struct rkisp1_debug *debug = &rkisp1->debug;
- debug->debugfs_dir = debugfs_create_dir(RKISP1_DRIVER_NAME, NULL);
+ debug->debugfs_dir = debugfs_create_dir(dev_name(rkisp1->dev), NULL);
debugfs_create_ulong("data_loss", 0444, debug->debugfs_dir,
&debug->data_loss);
debugfs_create_ulong("outform_size_err", 0444, debug->debugfs_dir,
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
index 32892ab359ee..6e8ef86566b7 100644
--- a/drivers/media/platform/s3c-camif/camif-core.c
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -291,7 +291,6 @@ static void camif_unregister_media_entities(struct camif_dev *camif)
{
camif_unregister_video_nodes(camif);
camif_unregister_sensor(camif);
- s3c_camif_unregister_subdev(camif);
}
/*
@@ -520,6 +519,7 @@ static int s3c_camif_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
camif_clk_put(camif);
+ s3c_camif_unregister_subdev(camif);
pdata->gpio_put();
return 0;
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index ebdfd24e9cd5..a8d9159d5ed8 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -2425,17 +2425,17 @@ static int s5p_jpeg_job_ready(void *priv)
return 1;
}
-static struct v4l2_m2m_ops s5p_jpeg_m2m_ops = {
+static const struct v4l2_m2m_ops s5p_jpeg_m2m_ops = {
.device_run = s5p_jpeg_device_run,
.job_ready = s5p_jpeg_job_ready,
};
-static struct v4l2_m2m_ops exynos3250_jpeg_m2m_ops = {
+static const struct v4l2_m2m_ops exynos3250_jpeg_m2m_ops = {
.device_run = exynos3250_jpeg_device_run,
.job_ready = s5p_jpeg_job_ready,
};
-static struct v4l2_m2m_ops exynos4_jpeg_m2m_ops = {
+static const struct v4l2_m2m_ops exynos4_jpeg_m2m_ops = {
.device_run = exynos4_jpeg_device_run,
.job_ready = s5p_jpeg_job_ready,
};
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h
index 8473a019bb5f..4a5fb1b15455 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h
@@ -142,7 +142,7 @@ struct s5p_jpeg_variant {
unsigned int hw3250_compat:1;
unsigned int htbl_reinit:1;
unsigned int hw_ex4_compat:1;
- struct v4l2_m2m_ops *m2m_ops;
+ const struct v4l2_m2m_ops *m2m_ops;
irqreturn_t (*jpeg_irq)(int irq, void *priv);
const char *clk_names[JPEG_MAX_CLOCKS];
int num_clocks;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index fc85e4e2d020..f6732f031e96 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -1185,7 +1185,6 @@ static int s5p_mfc_configure_common_memory(struct s5p_mfc_dev *mfc_dev)
{
struct device *dev = &mfc_dev->plat_dev->dev;
unsigned long mem_size = SZ_4M;
- unsigned int bitmap_size;
if (IS_ENABLED(CONFIG_DMA_CMA) || exynos_is_iommu_available(dev))
mem_size = SZ_8M;
@@ -1193,16 +1192,14 @@ static int s5p_mfc_configure_common_memory(struct s5p_mfc_dev *mfc_dev)
if (mfc_mem_size)
mem_size = memparse(mfc_mem_size, NULL);
- bitmap_size = BITS_TO_LONGS(mem_size >> PAGE_SHIFT) * sizeof(long);
-
- mfc_dev->mem_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ mfc_dev->mem_bitmap = bitmap_zalloc(mem_size >> PAGE_SHIFT, GFP_KERNEL);
if (!mfc_dev->mem_bitmap)
return -ENOMEM;
mfc_dev->mem_virt = dma_alloc_coherent(dev, mem_size,
&mfc_dev->mem_base, GFP_KERNEL);
if (!mfc_dev->mem_virt) {
- kfree(mfc_dev->mem_bitmap);
+ bitmap_free(mfc_dev->mem_bitmap);
dev_err(dev, "failed to preallocate %ld MiB for the firmware and context buffers\n",
(mem_size / SZ_1M));
return -ENOMEM;
@@ -1241,7 +1238,7 @@ static void s5p_mfc_unconfigure_common_memory(struct s5p_mfc_dev *mfc_dev)
dma_free_coherent(dev, mfc_dev->mem_size, mfc_dev->mem_virt,
mfc_dev->mem_base);
- kfree(mfc_dev->mem_bitmap);
+ bitmap_free(mfc_dev->mem_bitmap);
vb2_dma_contig_clear_max_seg_size(dev);
}
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 7d467f2ba072..01ce7b711774 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -1394,6 +1394,7 @@ err_filter:
err_pm:
pm_runtime_put(dev);
err_remove:
+ pm_runtime_disable(dev);
bdisp_debugfs_remove(bdisp);
v4l2_device_unregister(&bdisp->v4l2_dev);
err_clk:
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index 02dc78bd7fab..7bb1384e4bad 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -925,17 +925,12 @@ static int c8sectpfe_remove(struct platform_device *pdev)
static int configure_channels(struct c8sectpfei *fei)
{
int index = 0, ret;
- struct channel_info *tsin;
struct device_node *child, *np = fei->dev->of_node;
/* iterate round each tsin and configure memdma descriptor and IB hw */
for_each_child_of_node(np, child) {
-
- tsin = fei->channel_data[index];
-
ret = configure_memdma_and_inputblock(fei,
fei->channel_data[index]);
-
if (ret) {
dev_err(fei->dev,
"configure_memdma_and_inputblock failed\n");
@@ -947,10 +942,9 @@ static int configure_channels(struct c8sectpfei *fei)
return 0;
err_unmap:
- for (index = 0; index < fei->tsin_count; index++) {
- tsin = fei->channel_data[index];
- free_input_block(fei, tsin);
- }
+ while (--index >= 0)
+ free_input_block(fei, fei->channel_data[index]);
+
return ret;
}
diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
index 15e8f83b1b56..fe4ea2e7f37e 100644
--- a/drivers/media/platform/sti/hva/hva-hw.c
+++ b/drivers/media/platform/sti/hva/hva-hw.c
@@ -385,7 +385,7 @@ int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "%s failed to set PM\n", HVA_PREFIX);
- goto err_clk;
+ goto err_disable;
}
/* check IP hardware version */
@@ -403,6 +403,8 @@ int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
err_pm:
pm_runtime_put(dev);
+err_disable:
+ pm_runtime_disable(dev);
err_clk:
if (hva->clk)
clk_unprepare(hva->clk);
diff --git a/drivers/media/platform/stm32/Makefile b/drivers/media/platform/stm32/Makefile
index 48b36db2c2e2..896ef98a73ab 100644
--- a/drivers/media/platform/stm32/Makefile
+++ b/drivers/media/platform/stm32/Makefile
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_VIDEO_STM32_DCMI) += stm32-dcmi.o
+stm32-dma2d-objs := dma2d/dma2d.o dma2d/dma2d-hw.o
+obj-$(CONFIG_VIDEO_STM32_DMA2D) += stm32-dma2d.o
diff --git a/drivers/media/platform/stm32/dma2d/dma2d-hw.c b/drivers/media/platform/stm32/dma2d/dma2d-hw.c
new file mode 100644
index 000000000000..ea4cc84d8a39
--- /dev/null
+++ b/drivers/media/platform/stm32/dma2d/dma2d-hw.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ST stm32 Chrom-Art - 2D Graphics Accelerator Driver
+ *
+ * Copyright (c) 2021 Dillon Min
+ * Dillon Min, <dillon.minfei@gmail.com>
+ *
+ * based on s5p-g2d
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ */
+
+#include <linux/io.h>
+
+#include "dma2d.h"
+#include "dma2d-regs.h"
+
+static inline u32 reg_read(void __iomem *base, u32 reg)
+{
+ return readl_relaxed(base + reg);
+}
+
+static inline void reg_write(void __iomem *base, u32 reg, u32 val)
+{
+ writel_relaxed(val, base + reg);
+}
+
+static inline void reg_update_bits(void __iomem *base, u32 reg, u32 mask,
+ u32 val)
+{
+ reg_write(base, reg, (reg_read(base, reg) & ~mask) | val);
+}
+
+void dma2d_start(struct dma2d_dev *d)
+{
+ reg_update_bits(d->regs, DMA2D_CR_REG, CR_START, CR_START);
+}
+
+u32 dma2d_get_int(struct dma2d_dev *d)
+{
+ return reg_read(d->regs, DMA2D_ISR_REG);
+}
+
+void dma2d_clear_int(struct dma2d_dev *d)
+{
+ u32 isr_val = reg_read(d->regs, DMA2D_ISR_REG);
+
+ reg_write(d->regs, DMA2D_IFCR_REG, isr_val & 0x003f);
+}
+
+void dma2d_config_common(struct dma2d_dev *d, enum dma2d_op_mode op_mode,
+ u16 width, u16 height)
+{
+ reg_update_bits(d->regs, DMA2D_CR_REG, CR_MODE_MASK,
+ op_mode << CR_MODE_SHIFT);
+
+ reg_write(d->regs, DMA2D_NLR_REG, (width << 16) | height);
+}
+
+void dma2d_config_out(struct dma2d_dev *d, struct dma2d_frame *frm,
+ dma_addr_t o_addr)
+{
+ reg_update_bits(d->regs, DMA2D_CR_REG, CR_CEIE, CR_CEIE);
+ reg_update_bits(d->regs, DMA2D_CR_REG, CR_CTCIE, CR_CTCIE);
+ reg_update_bits(d->regs, DMA2D_CR_REG, CR_CAEIE, CR_CAEIE);
+ reg_update_bits(d->regs, DMA2D_CR_REG, CR_TCIE, CR_TCIE);
+ reg_update_bits(d->regs, DMA2D_CR_REG, CR_TEIE, CR_TEIE);
+
+ if (frm->fmt->cmode >= CM_MODE_ARGB8888 &&
+ frm->fmt->cmode <= CM_MODE_ARGB4444)
+ reg_update_bits(d->regs, DMA2D_OPFCCR_REG, OPFCCR_CM_MASK,
+ frm->fmt->cmode);
+
+ reg_write(d->regs, DMA2D_OMAR_REG, o_addr);
+
+ reg_write(d->regs, DMA2D_OCOLR_REG,
+ (frm->a_rgb[3] << 24) |
+ (frm->a_rgb[2] << 16) |
+ (frm->a_rgb[1] << 8) |
+ frm->a_rgb[0]);
+
+ reg_update_bits(d->regs, DMA2D_OOR_REG, OOR_LO_MASK,
+ frm->line_offset & 0x3fff);
+}
+
+void dma2d_config_fg(struct dma2d_dev *d, struct dma2d_frame *frm,
+ dma_addr_t f_addr)
+{
+ reg_write(d->regs, DMA2D_FGMAR_REG, f_addr);
+ reg_update_bits(d->regs, DMA2D_FGOR_REG, FGOR_LO_MASK,
+ frm->line_offset);
+
+ if (frm->fmt->cmode >= CM_MODE_ARGB8888 &&
+ frm->fmt->cmode <= CM_MODE_A4)
+ reg_update_bits(d->regs, DMA2D_FGPFCCR_REG, FGPFCCR_CM_MASK,
+ frm->fmt->cmode);
+
+ reg_update_bits(d->regs, DMA2D_FGPFCCR_REG, FGPFCCR_AM_MASK,
+ (frm->a_mode << 16) & 0x03);
+
+ reg_update_bits(d->regs, DMA2D_FGPFCCR_REG, FGPFCCR_ALPHA_MASK,
+ frm->a_rgb[3] << 24);
+
+ reg_write(d->regs, DMA2D_FGCOLR_REG,
+ (frm->a_rgb[2] << 16) |
+ (frm->a_rgb[1] << 8) |
+ frm->a_rgb[0]);
+}
+
+void dma2d_config_bg(struct dma2d_dev *d, struct dma2d_frame *frm,
+ dma_addr_t b_addr)
+{
+ reg_write(d->regs, DMA2D_BGMAR_REG, b_addr);
+ reg_update_bits(d->regs, DMA2D_BGOR_REG, BGOR_LO_MASK,
+ frm->line_offset);
+
+ if (frm->fmt->cmode >= CM_MODE_ARGB8888 &&
+ frm->fmt->cmode <= CM_MODE_A4)
+ reg_update_bits(d->regs, DMA2D_BGPFCCR_REG, BGPFCCR_CM_MASK,
+ frm->fmt->cmode);
+
+ reg_update_bits(d->regs, DMA2D_BGPFCCR_REG, BGPFCCR_AM_MASK,
+ (frm->a_mode << 16) & 0x03);
+
+ reg_update_bits(d->regs, DMA2D_BGPFCCR_REG, BGPFCCR_ALPHA_MASK,
+ frm->a_rgb[3] << 24);
+
+ reg_write(d->regs, DMA2D_BGCOLR_REG,
+ (frm->a_rgb[2] << 16) |
+ (frm->a_rgb[1] << 8) |
+ frm->a_rgb[0]);
+}
diff --git a/drivers/media/platform/stm32/dma2d/dma2d-regs.h b/drivers/media/platform/stm32/dma2d/dma2d-regs.h
new file mode 100644
index 000000000000..6444592d415b
--- /dev/null
+++ b/drivers/media/platform/stm32/dma2d/dma2d-regs.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * ST stm32 Chrom-Art - 2D Graphics Accelerator Driver
+ *
+ * Copyright (c) 2021 Dillon Min
+ * Dillon Min, <dillon.minfei@gmail.com>
+ *
+ * based on s5p-g2d
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ */
+
+#ifndef __DMA2D_REGS_H__
+#define __DMA2D_REGS_H__
+
+#define DMA2D_CR_REG 0x0000
+#define CR_MODE_MASK GENMASK(17, 16)
+#define CR_MODE_SHIFT 16
+#define CR_M2M 0x0000
+#define CR_M2M_PFC BIT(16)
+#define CR_M2M_BLEND BIT(17)
+#define CR_R2M (BIT(17) | BIT(16))
+#define CR_CEIE BIT(13)
+#define CR_CTCIE BIT(12)
+#define CR_CAEIE BIT(11)
+#define CR_TWIE BIT(10)
+#define CR_TCIE BIT(9)
+#define CR_TEIE BIT(8)
+#define CR_ABORT BIT(2)
+#define CR_SUSP BIT(1)
+#define CR_START BIT(0)
+
+#define DMA2D_ISR_REG 0x0004
+#define ISR_CEIF BIT(5)
+#define ISR_CTCIF BIT(4)
+#define ISR_CAEIF BIT(3)
+#define ISR_TWIF BIT(2)
+#define ISR_TCIF BIT(1)
+#define ISR_TEIF BIT(0)
+
+#define DMA2D_IFCR_REG 0x0008
+#define IFCR_CCEIF BIT(5)
+#define IFCR_CCTCIF BIT(4)
+#define IFCR_CAECIF BIT(3)
+#define IFCR_CTWIF BIT(2)
+#define IFCR_CTCIF BIT(1)
+#define IFCR_CTEIF BIT(0)
+
+#define DMA2D_FGMAR_REG 0x000c
+#define DMA2D_FGOR_REG 0x0010
+#define FGOR_LO_MASK GENMASK(13, 0)
+
+#define DMA2D_BGMAR_REG 0x0014
+#define DMA2D_BGOR_REG 0x0018
+#define BGOR_LO_MASK GENMASK(13, 0)
+
+#define DMA2D_FGPFCCR_REG 0x001c
+#define FGPFCCR_ALPHA_MASK GENMASK(31, 24)
+#define FGPFCCR_AM_MASK GENMASK(17, 16)
+#define FGPFCCR_CS_MASK GENMASK(15, 8)
+#define FGPFCCR_START BIT(5)
+#define FGPFCCR_CCM_RGB888 BIT(4)
+#define FGPFCCR_CM_MASK GENMASK(3, 0)
+
+#define DMA2D_FGCOLR_REG 0x0020
+#define FGCOLR_REG_MASK GENMASK(23, 16)
+#define FGCOLR_GREEN_MASK GENMASK(15, 8)
+#define FGCOLR_BLUE_MASK GENMASK(7, 0)
+
+#define DMA2D_BGPFCCR_REG 0x0024
+#define BGPFCCR_ALPHA_MASK GENMASK(31, 24)
+#define BGPFCCR_AM_MASK GENMASK(17, 16)
+#define BGPFCCR_CS_MASK GENMASK(15, 8)
+#define BGPFCCR_START BIT(5)
+#define BGPFCCR_CCM_RGB888 BIT(4)
+#define BGPFCCR_CM_MASK GENMASK(3, 0)
+
+#define DMA2D_BGCOLR_REG 0x0028
+#define BGCOLR_REG_MASK GENMASK(23, 16)
+#define BGCOLR_GREEN_MASK GENMASK(15, 8)
+#define BGCOLR_BLUE_MASK GENMASK(7, 0)
+
+#define DMA2D_OPFCCR_REG 0x0034
+#define OPFCCR_CM_MASK GENMASK(2, 0)
+
+#define DMA2D_OCOLR_REG 0x0038
+#define OCOLR_ALPHA_MASK GENMASK(31, 24)
+#define OCOLR_RED_MASK GENMASK(23, 16)
+#define OCOLR_GREEN_MASK GENMASK(15, 8)
+#define OCOLR_BLUE_MASK GENMASK(7, 0)
+
+#define DMA2D_OMAR_REG 0x003c
+
+#define DMA2D_OOR_REG 0x0040
+#define OOR_LO_MASK GENMASK(13, 0)
+
+#define DMA2D_NLR_REG 0x0044
+#define NLR_PL_MASK GENMASK(29, 16)
+#define NLR_NL_MASK GENMASK(15, 0)
+
+/* Hardware limits */
+#define MAX_WIDTH 2592
+#define MAX_HEIGHT 2592
+
+#define DEFAULT_WIDTH 240
+#define DEFAULT_HEIGHT 320
+#define DEFAULT_SIZE 307200
+
+#define CM_MODE_ARGB8888 0x00
+#define CM_MODE_ARGB4444 0x04
+#define CM_MODE_A4 0x0a
+#endif /* __DMA2D_REGS_H__ */
diff --git a/drivers/media/platform/stm32/dma2d/dma2d.c b/drivers/media/platform/stm32/dma2d/dma2d.c
new file mode 100644
index 000000000000..17af90d86898
--- /dev/null
+++ b/drivers/media/platform/stm32/dma2d/dma2d.c
@@ -0,0 +1,739 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * STM32 DMA2D - 2D Graphics Accelerator Driver
+ *
+ * Copyright (c) 2021 Dillon Min
+ * Dillon Min, <dillon.minfei@gmail.com>
+ *
+ * based on s5p-g2d
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "dma2d.h"
+#include "dma2d-regs.h"
+
+/*
+ * This V4L2 subdev m2m driver enables Chrom-Art Accelerator unit
+ * of STMicroelectronics STM32 SoC series.
+ *
+ * Currently support r2m, m2m, m2m_pfc.
+ *
+ * - r2m, Filling a part or the whole of a destination image with a specific
+ * color.
+ * - m2m, Copying a part or the whole of a source image into a part or the
+ * whole of a destination.
+ * - m2m_pfc, Copying a part or the whole of a source image into a part or the
+ * whole of a destination image with a pixel format conversion.
+ */
+
+#define fh2ctx(__fh) container_of(__fh, struct dma2d_ctx, fh)
+
+static const struct dma2d_fmt formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB32,
+ .cmode = DMA2D_CMODE_ARGB8888,
+ .depth = 32,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .cmode = DMA2D_CMODE_RGB888,
+ .depth = 24,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .cmode = DMA2D_CMODE_RGB565,
+ .depth = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB555,
+ .cmode = DMA2D_CMODE_ARGB1555,
+ .depth = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB444,
+ .cmode = DMA2D_CMODE_ARGB4444,
+ .depth = 16,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+static const struct dma2d_frame def_frame = {
+ .width = DEFAULT_WIDTH,
+ .height = DEFAULT_HEIGHT,
+ .line_offset = 0,
+ .a_rgb = {0x00, 0x00, 0x00, 0xff},
+ .a_mode = DMA2D_ALPHA_MODE_NO_MODIF,
+ .fmt = (struct dma2d_fmt *)&formats[0],
+ .size = DEFAULT_SIZE,
+};
+
+static struct dma2d_fmt *find_fmt(int pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (formats[i].fourcc == pixelformat)
+ return (struct dma2d_fmt *)&formats[i];
+ }
+
+ return NULL;
+}
+
+static struct dma2d_frame *get_frame(struct dma2d_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ return V4L2_TYPE_IS_OUTPUT(type) ? &ctx->cap : &ctx->out;
+}
+
+static int dma2d_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct dma2d_ctx *ctx = vb2_get_drv_priv(vq);
+ struct dma2d_frame *f = get_frame(ctx, vq->type);
+
+ if (*nplanes)
+ return sizes[0] < f->size ? -EINVAL : 0;
+
+ sizes[0] = f->size;
+ *nplanes = 1;
+
+ return 0;
+}
+
+static int dma2d_buf_out_validate(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dma2d_buf_prepare(struct vb2_buffer *vb)
+{
+ struct dma2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct dma2d_frame *f = get_frame(ctx, vb->vb2_queue->type);
+
+ if (vb2_plane_size(vb, 0) < f->size)
+ return -EINVAL;
+
+ vb2_set_plane_payload(vb, 0, f->size);
+
+ return 0;
+}
+
+static void dma2d_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct dma2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int dma2d_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct dma2d_ctx *ctx = vb2_get_drv_priv(q);
+ struct dma2d_frame *f = get_frame(ctx, q->type);
+
+ f->sequence = 0;
+ return 0;
+}
+
+static void dma2d_stop_streaming(struct vb2_queue *q)
+{
+ struct dma2d_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf)
+ return;
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static const struct vb2_ops dma2d_qops = {
+ .queue_setup = dma2d_queue_setup,
+ .buf_out_validate = dma2d_buf_out_validate,
+ .buf_prepare = dma2d_buf_prepare,
+ .buf_queue = dma2d_buf_queue,
+ .start_streaming = dma2d_start_streaming,
+ .stop_streaming = dma2d_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct dma2d_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &dma2d_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->mutex;
+ src_vq->dev = ctx->dev->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &dma2d_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->mutex;
+ dst_vq->dev = ctx->dev->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int dma2d_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct dma2d_frame *frm;
+ struct dma2d_ctx *ctx = container_of(ctrl->handler, struct dma2d_ctx,
+ ctrl_handler);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->dev->ctrl_lock, flags);
+ switch (ctrl->id) {
+ case V4L2_CID_COLORFX:
+ if (ctrl->val == V4L2_COLORFX_SET_RGB)
+ ctx->op_mode = DMA2D_MODE_R2M;
+ else if (ctrl->val == V4L2_COLORFX_NONE)
+ ctx->op_mode = DMA2D_MODE_M2M;
+ break;
+ case V4L2_CID_COLORFX_RGB:
+ frm = get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ frm->a_rgb[2] = (ctrl->val >> 16) & 0xff;
+ frm->a_rgb[1] = (ctrl->val >> 8) & 0xff;
+ frm->a_rgb[0] = (ctrl->val >> 0) & 0xff;
+ break;
+ default:
+ spin_unlock_irqrestore(&ctx->dev->ctrl_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ctx->dev->ctrl_lock, flags);
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops dma2d_ctrl_ops = {
+ .s_ctrl = dma2d_s_ctrl,
+};
+
+static int dma2d_setup_ctrls(struct dma2d_ctx *ctx)
+{
+ struct v4l2_ctrl_handler *handler = &ctx->ctrl_handler;
+
+ v4l2_ctrl_handler_init(handler, 2);
+
+ v4l2_ctrl_new_std_menu(handler, &dma2d_ctrl_ops, V4L2_CID_COLORFX,
+ V4L2_COLORFX_SET_RGB, ~0x10001,
+ V4L2_COLORFX_NONE);
+
+ v4l2_ctrl_new_std(handler, &dma2d_ctrl_ops, V4L2_CID_COLORFX_RGB, 0,
+ 0xffffff, 1, 0);
+
+ return 0;
+}
+
+static int dma2d_open(struct file *file)
+{
+ struct dma2d_dev *dev = video_drvdata(file);
+ struct dma2d_ctx *ctx = NULL;
+ int ret = 0;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->dev = dev;
+ /* Set default formats */
+ ctx->cap = def_frame;
+ ctx->bg = def_frame;
+ ctx->out = def_frame;
+ ctx->op_mode = DMA2D_MODE_M2M_FPC;
+ ctx->colorspace = V4L2_COLORSPACE_REC709;
+ if (mutex_lock_interruptible(&dev->mutex)) {
+ kfree(ctx);
+ return -ERESTARTSYS;
+ }
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ mutex_unlock(&dev->mutex);
+ kfree(ctx);
+ return ret;
+ }
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ dma2d_setup_ctrls(ctx);
+
+ /* Write the default values to the ctx struct */
+ v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+
+static int dma2d_release(struct file *file)
+{
+ struct dma2d_dev *dev = video_drvdata(file);
+ struct dma2d_ctx *ctx = fh2ctx(file->private_data);
+
+ mutex_lock(&dev->mutex);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ mutex_unlock(&dev->mutex);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+
+ return 0;
+}
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, DMA2D_NAME, sizeof(cap->driver));
+ strscpy(cap->card, DMA2D_NAME, sizeof(cap->card));
+ strscpy(cap->bus_info, BUS_INFO, sizeof(cap->bus_info));
+
+ return 0;
+}
+
+static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
+{
+ if (f->index >= NUM_FORMATS)
+ return -EINVAL;
+
+ f->pixelformat = formats[f->index].fourcc;
+ return 0;
+}
+
+static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct dma2d_ctx *ctx = prv;
+ struct vb2_queue *vq;
+ struct dma2d_frame *frm;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ frm = get_frame(ctx, f->type);
+ f->fmt.pix.width = frm->width;
+ f->fmt.pix.height = frm->height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.pixelformat = frm->fmt->fourcc;
+ f->fmt.pix.bytesperline = (frm->width * frm->fmt->depth) >> 3;
+ f->fmt.pix.sizeimage = frm->size;
+ f->fmt.pix.colorspace = ctx->colorspace;
+ f->fmt.pix.xfer_func = ctx->xfer_func;
+ f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix.quantization = ctx->quant;
+
+ return 0;
+}
+
+static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct dma2d_ctx *ctx = prv;
+ struct dma2d_fmt *fmt;
+ enum v4l2_field *field;
+ u32 fourcc = f->fmt.pix.pixelformat;
+
+ fmt = find_fmt(fourcc);
+ if (!fmt) {
+ f->fmt.pix.pixelformat = formats[0].fourcc;
+ fmt = find_fmt(f->fmt.pix.pixelformat);
+ }
+
+ field = &f->fmt.pix.field;
+ if (*field == V4L2_FIELD_ANY)
+ *field = V4L2_FIELD_NONE;
+ else if (*field != V4L2_FIELD_NONE)
+ return -EINVAL;
+
+ if (f->fmt.pix.width > MAX_WIDTH)
+ f->fmt.pix.width = MAX_WIDTH;
+ if (f->fmt.pix.height > MAX_HEIGHT)
+ f->fmt.pix.height = MAX_HEIGHT;
+
+ if (f->fmt.pix.width < 1)
+ f->fmt.pix.width = 1;
+ if (f->fmt.pix.height < 1)
+ f->fmt.pix.height = 1;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && !f->fmt.pix.colorspace) {
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ f->fmt.pix.colorspace = ctx->colorspace;
+ f->fmt.pix.xfer_func = ctx->xfer_func;
+ f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix.quantization = ctx->quant;
+ }
+ f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+
+ return 0;
+}
+
+static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct dma2d_ctx *ctx = prv;
+ struct vb2_queue *vq;
+ struct dma2d_frame *frm;
+ struct dma2d_fmt *fmt;
+ int ret = 0;
+
+ /* Adjust all values accordingly to the hardware capabilities
+ * and chosen format.
+ */
+ ret = vidioc_try_fmt(file, prv, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ fmt = find_fmt(f->fmt.pix.pixelformat);
+ if (!fmt)
+ return -EINVAL;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ ctx->colorspace = f->fmt.pix.colorspace;
+ ctx->xfer_func = f->fmt.pix.xfer_func;
+ ctx->ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ ctx->quant = f->fmt.pix.quantization;
+ }
+
+ frm = get_frame(ctx, f->type);
+ frm->width = f->fmt.pix.width;
+ frm->height = f->fmt.pix.height;
+ frm->size = f->fmt.pix.sizeimage;
+ /* Reset crop settings */
+ frm->o_width = 0;
+ frm->o_height = 0;
+ frm->c_width = frm->width;
+ frm->c_height = frm->height;
+ frm->right = frm->width;
+ frm->bottom = frm->height;
+ frm->fmt = fmt;
+ frm->line_offset = 0;
+
+ return 0;
+}
+
+static void device_run(void *prv)
+{
+ struct dma2d_ctx *ctx = prv;
+ struct dma2d_dev *dev = ctx->dev;
+ struct dma2d_frame *frm_out, *frm_cap;
+ struct vb2_v4l2_buffer *src, *dst;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->ctrl_lock, flags);
+ dev->curr = ctx;
+
+ src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ if (!dst || !src)
+ goto end;
+
+ frm_cap = get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ frm_out = get_frame(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (!frm_cap || !frm_out)
+ goto end;
+
+ src->sequence = frm_out->sequence++;
+ dst->sequence = frm_cap->sequence++;
+ v4l2_m2m_buf_copy_metadata(src, dst, true);
+
+ clk_enable(dev->gate);
+
+ dma2d_config_fg(dev, frm_out,
+ vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0));
+
+ /* TODO: add M2M_BLEND handler here */
+
+ if (ctx->op_mode != DMA2D_MODE_R2M) {
+ if (frm_out->fmt->fourcc == frm_cap->fmt->fourcc)
+ ctx->op_mode = DMA2D_MODE_M2M;
+ else
+ ctx->op_mode = DMA2D_MODE_M2M_FPC;
+ }
+
+ dma2d_config_out(dev, frm_cap,
+ vb2_dma_contig_plane_dma_addr(&dst->vb2_buf, 0));
+ dma2d_config_common(dev, ctx->op_mode, frm_cap->width, frm_cap->height);
+
+ dma2d_start(dev);
+end:
+ spin_unlock_irqrestore(&dev->ctrl_lock, flags);
+}
+
+static irqreturn_t dma2d_isr(int irq, void *prv)
+{
+ struct dma2d_dev *dev = prv;
+ struct dma2d_ctx *ctx = dev->curr;
+ struct vb2_v4l2_buffer *src, *dst;
+ u32 s = dma2d_get_int(dev);
+
+ dma2d_clear_int(dev);
+ if (s & ISR_TCIF || s == 0) {
+ clk_disable(dev->gate);
+
+ WARN_ON(!ctx);
+
+ src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ WARN_ON(!dst);
+ WARN_ON(!src);
+
+ v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
+ v4l2_m2m_job_finish(dev->m2m_dev, ctx->fh.m2m_ctx);
+
+ dev->curr = NULL;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct v4l2_file_operations dma2d_fops = {
+ .owner = THIS_MODULE,
+ .open = dma2d_open,
+ .release = dma2d_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+#ifndef CONFIG_MMU
+ .get_unmapped_area = v4l2_m2m_get_unmapped_area,
+#endif
+};
+
+static const struct v4l2_ioctl_ops dma2d_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct video_device dma2d_videodev = {
+ .name = DMA2D_NAME,
+ .fops = &dma2d_fops,
+ .ioctl_ops = &dma2d_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+ .vfl_dir = VFL_DIR_M2M,
+};
+
+static const struct v4l2_m2m_ops dma2d_m2m_ops = {
+ .device_run = device_run,
+};
+
+static const struct of_device_id stm32_dma2d_match[];
+
+static int dma2d_probe(struct platform_device *pdev)
+{
+ struct dma2d_dev *dev;
+ struct video_device *vfd;
+ struct resource *res;
+ int ret = 0;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->ctrl_lock);
+ mutex_init(&dev->mutex);
+ atomic_set(&dev->num_inst, 0);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ dev->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->regs))
+ return PTR_ERR(dev->regs);
+
+ dev->gate = clk_get(&pdev->dev, "dma2d");
+ if (IS_ERR(dev->gate)) {
+ dev_err(&pdev->dev, "failed to get dma2d clock gate\n");
+ ret = -ENXIO;
+ return ret;
+ }
+
+ ret = clk_prepare(dev->gate);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to prepare dma2d clock gate\n");
+ goto put_clk_gate;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to find IRQ\n");
+ ret = -ENXIO;
+ goto unprep_clk_gate;
+ }
+
+ dev->irq = res->start;
+
+ ret = devm_request_irq(&pdev->dev, dev->irq, dma2d_isr,
+ 0, pdev->name, dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to install IRQ\n");
+ goto unprep_clk_gate;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ goto unprep_clk_gate;
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto unreg_v4l2_dev;
+ }
+
+ *vfd = dma2d_videodev;
+ vfd->lock = &dev->mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+
+ platform_set_drvdata(pdev, dev);
+ dev->m2m_dev = v4l2_m2m_init(&dma2d_m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(dev->m2m_dev);
+ goto rel_vdev;
+ }
+
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto free_m2m;
+ }
+
+ video_set_drvdata(vfd, dev);
+ dev->vfd = vfd;
+ v4l2_info(&dev->v4l2_dev, "device registered as /dev/video%d\n",
+ vfd->num);
+ return 0;
+
+free_m2m:
+ v4l2_m2m_release(dev->m2m_dev);
+rel_vdev:
+ video_device_release(vfd);
+unreg_v4l2_dev:
+ v4l2_device_unregister(&dev->v4l2_dev);
+unprep_clk_gate:
+ clk_unprepare(dev->gate);
+put_clk_gate:
+ clk_put(dev->gate);
+
+ return ret;
+}
+
+static int dma2d_remove(struct platform_device *pdev)
+{
+ struct dma2d_dev *dev = platform_get_drvdata(pdev);
+
+ v4l2_info(&dev->v4l2_dev, "Removing " DMA2D_NAME);
+ v4l2_m2m_release(dev->m2m_dev);
+ video_unregister_device(dev->vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ vb2_dma_contig_clear_max_seg_size(&pdev->dev);
+ clk_unprepare(dev->gate);
+ clk_put(dev->gate);
+
+ return 0;
+}
+
+static const struct of_device_id stm32_dma2d_match[] = {
+ {
+ .compatible = "st,stm32-dma2d",
+ .data = NULL,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_dma2d_match);
+
+static struct platform_driver dma2d_pdrv = {
+ .probe = dma2d_probe,
+ .remove = dma2d_remove,
+ .driver = {
+ .name = DMA2D_NAME,
+ .of_match_table = stm32_dma2d_match,
+ },
+};
+
+module_platform_driver(dma2d_pdrv);
+
+MODULE_AUTHOR("Dillon Min <dillon.minfei@gmail.com>");
+MODULE_DESCRIPTION("STM32 Chrom-Art Accelerator DMA2D driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/stm32/dma2d/dma2d.h b/drivers/media/platform/stm32/dma2d/dma2d.h
new file mode 100644
index 000000000000..3f03a7ca9ee3
--- /dev/null
+++ b/drivers/media/platform/stm32/dma2d/dma2d.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * ST stm32 DMA2D - 2D Graphics Accelerator Driver
+ *
+ * Copyright (c) 2021 Dillon Min
+ * Dillon Min, <dillon.minfei@gmail.com>
+ *
+ * based on s5p-g2d
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ */
+
+#ifndef __DMA2D_H__
+#define __DMA2D_H__
+
+#include <linux/platform_device.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+
+#define DMA2D_NAME "stm-dma2d"
+#define BUS_INFO "platform:stm-dma2d"
+enum dma2d_op_mode {
+ DMA2D_MODE_M2M,
+ DMA2D_MODE_M2M_FPC,
+ DMA2D_MODE_M2M_BLEND,
+ DMA2D_MODE_R2M
+};
+
+enum dma2d_cmode {
+ /* output pfc cmode from ARGB888 to ARGB4444 */
+ DMA2D_CMODE_ARGB8888,
+ DMA2D_CMODE_RGB888,
+ DMA2D_CMODE_RGB565,
+ DMA2D_CMODE_ARGB1555,
+ DMA2D_CMODE_ARGB4444,
+ /* bg or fg pfc cmode from L8 to A4 */
+ DMA2D_CMODE_L8,
+ DMA2D_CMODE_AL44,
+ DMA2D_CMODE_AL88,
+ DMA2D_CMODE_L4,
+ DMA2D_CMODE_A8,
+ DMA2D_CMODE_A4
+};
+
+enum dma2d_alpha_mode {
+ DMA2D_ALPHA_MODE_NO_MODIF,
+ DMA2D_ALPHA_MODE_REPLACE,
+ DMA2D_ALPHA_MODE_COMBINE
+};
+
+struct dma2d_fmt {
+ u32 fourcc;
+ int depth;
+ enum dma2d_cmode cmode;
+};
+
+struct dma2d_frame {
+ /* Original dimensions */
+ u32 width;
+ u32 height;
+ /* Crop size */
+ u32 c_width;
+ u32 c_height;
+ /* Offset */
+ u32 o_width;
+ u32 o_height;
+ u32 bottom;
+ u32 right;
+ u16 line_offset;
+ /* Image format */
+ struct dma2d_fmt *fmt;
+ /* [0]: blue
+ * [1]: green
+ * [2]: red
+ * [3]: alpha
+ */
+ u8 a_rgb[4];
+ /*
+ * AM[1:0] of DMA2D_FGPFCCR
+ */
+ enum dma2d_alpha_mode a_mode;
+ u32 size;
+ unsigned int sequence;
+};
+
+struct dma2d_ctx {
+ struct v4l2_fh fh;
+ struct dma2d_dev *dev;
+ struct dma2d_frame cap;
+ struct dma2d_frame out;
+ struct dma2d_frame bg;
+ /* fb_buf always point to bg address */
+ struct v4l2_framebuffer fb_buf;
+ /*
+ * MODE[17:16] of DMA2D_CR
+ */
+ enum dma2d_op_mode op_mode;
+ struct v4l2_ctrl_handler ctrl_handler;
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_quantization quant;
+};
+
+struct dma2d_dev {
+ struct v4l2_device v4l2_dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct video_device *vfd;
+ /* for device open/close etc */
+ struct mutex mutex;
+ /* to avoid the conflict with device running and user setting
+ * at the same time
+ */
+ spinlock_t ctrl_lock;
+ atomic_t num_inst;
+ void __iomem *regs;
+ struct clk *gate;
+ struct dma2d_ctx *curr;
+ int irq;
+};
+
+void dma2d_start(struct dma2d_dev *d);
+u32 dma2d_get_int(struct dma2d_dev *d);
+void dma2d_clear_int(struct dma2d_dev *d);
+void dma2d_config_out(struct dma2d_dev *d, struct dma2d_frame *frm,
+ dma_addr_t o_addr);
+void dma2d_config_fg(struct dma2d_dev *d, struct dma2d_frame *frm,
+ dma_addr_t f_addr);
+void dma2d_config_bg(struct dma2d_dev *d, struct dma2d_frame *frm,
+ dma_addr_t b_addr);
+void dma2d_config_common(struct dma2d_dev *d, enum dma2d_op_mode op_mode,
+ u16 width, u16 height);
+
+#endif /* __DMA2D_H__ */
diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
index b39a68f83c5f..0bf99e1cd1d8 100644
--- a/drivers/media/radio/radio-si476x.c
+++ b/drivers/media/radio/radio-si476x.c
@@ -313,12 +313,6 @@ struct si476x_radio {
};
static inline struct si476x_radio *
-v4l2_dev_to_radio(struct v4l2_device *d)
-{
- return container_of(d, struct si476x_radio, v4l2dev);
-}
-
-static inline struct si476x_radio *
v4l2_ctrl_handler_to_radio(struct v4l2_ctrl_handler *d)
{
return container_of(d, struct si476x_radio, ctrl_handler);
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index a972c0705ac7..59b3d77e282d 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -334,7 +334,6 @@ static int si470x_i2c_probe(struct i2c_client *client)
{
struct si470x_device *radio;
int retval = 0;
- unsigned char version_warning = 0;
/* private data allocation and initialization */
radio = devm_kzalloc(&client->dev, sizeof(*radio), GFP_KERNEL);
@@ -368,7 +367,7 @@ static int si470x_i2c_probe(struct i2c_client *client)
if (radio->hdl.error) {
retval = radio->hdl.error;
dev_err(&client->dev, "couldn't register control\n");
- goto err_dev;
+ goto err_all;
}
/* video device initialization */
@@ -410,20 +409,10 @@ static int si470x_i2c_probe(struct i2c_client *client)
radio->registers[DEVICEID], radio->registers[SI_CHIPID]);
if ((radio->registers[SI_CHIPID] & SI_CHIPID_FIRMWARE) < RADIO_FW_VERSION) {
dev_warn(&client->dev,
- "This driver is known to work with firmware version %hu,\n",
- RADIO_FW_VERSION);
- dev_warn(&client->dev,
- "but the device has firmware version %hu.\n",
+ "This driver is known to work with firmware version %u, but the device has firmware version %u.\n"
+ "If you have some trouble using this driver, please report to V4L ML at linux-media@vger.kernel.org\n",
+ RADIO_FW_VERSION,
radio->registers[SI_CHIPID] & SI_CHIPID_FIRMWARE);
- version_warning = 1;
- }
-
- /* give out version warning */
- if (version_warning == 1) {
- dev_warn(&client->dev,
- "If you have some trouble using this driver,\n");
- dev_warn(&client->dev,
- "please report to V4L ML at linux-media@vger.kernel.org\n");
}
/* set initial frequency */
@@ -463,7 +452,6 @@ static int si470x_i2c_probe(struct i2c_client *client)
return 0;
err_all:
v4l2_ctrl_handler_free(&radio->hdl);
-err_dev:
v4l2_device_unregister(&radio->v4l2_dev);
err_initial:
return retval;
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index 3f8634a46573..6b2768623c88 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -681,10 +681,8 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
radio->registers[DEVICEID], radio->registers[SI_CHIPID]);
if ((radio->registers[SI_CHIPID] & SI_CHIPID_FIRMWARE) < RADIO_FW_VERSION) {
dev_warn(&intf->dev,
- "This driver is known to work with firmware version %hu,\n",
- RADIO_FW_VERSION);
- dev_warn(&intf->dev,
- "but the device has firmware version %hu.\n",
+ "This driver is known to work with firmware version %u, but the device has firmware version %u.\n",
+ RADIO_FW_VERSION,
radio->registers[SI_CHIPID] & SI_CHIPID_FIRMWARE);
version_warning = 1;
}
@@ -698,10 +696,8 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
radio->software_version, radio->hardware_version);
if (radio->hardware_version < RADIO_HW_VERSION) {
dev_warn(&intf->dev,
- "This driver is known to work with hardware version %hu,\n",
- RADIO_HW_VERSION);
- dev_warn(&intf->dev,
- "but the device has hardware version %hu.\n",
+ "This driver is known to work with hardware version %u, but the device has hardware version %u.\n",
+ RADIO_HW_VERSION,
radio->hardware_version);
version_warning = 1;
}
@@ -709,9 +705,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
/* give out version warning */
if (version_warning == 1) {
dev_warn(&intf->dev,
- "If you have some trouble using this driver,\n");
- dev_warn(&intf->dev,
- "please report to V4L ML at linux-media@vger.kernel.org\n");
+ "If you have some trouble using this driver, please report to V4L ML at linux-media@vger.kernel.org\n");
}
/* set led to connect state */
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 9506baf3c4c1..c111af820ae4 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -175,7 +175,7 @@ config IR_ENE
config IR_HIX5HD2
tristate "Hisilicon hix5hd2 IR remote control"
- depends on OF || COMPILE_TEST
+ depends on (OF && HAS_IOMEM) || COMPILE_TEST
help
Say Y here if you want to use hisilicon hix5hd2 remote control.
To compile this driver as a module, choose M here: the module will be
diff --git a/drivers/media/rc/igorplugusb.c b/drivers/media/rc/igorplugusb.c
index effaa5751d6c..3e9988ee785f 100644
--- a/drivers/media/rc/igorplugusb.c
+++ b/drivers/media/rc/igorplugusb.c
@@ -64,9 +64,11 @@ static void igorplugusb_irdata(struct igorplugusb *ir, unsigned len)
if (start >= len) {
dev_err(ir->dev, "receive overflow invalid: %u", overflow);
} else {
- if (overflow > 0)
+ if (overflow > 0) {
dev_warn(ir->dev, "receive overflow, at least %u lost",
overflow);
+ ir_raw_event_reset(ir->rc);
+ }
do {
rawir.duration = ir->buf_in[i] * 85;
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index 84949baf9f6b..f8d080e41f4c 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -262,9 +262,6 @@ static int iguanair_receiver(struct iguanair *ir, bool enable)
ir->packet->header.direction = DIR_OUT;
ir->packet->header.cmd = enable ? CMD_RECEIVER_ON : CMD_RECEIVER_OFF;
- if (enable)
- ir_raw_event_reset(ir->rc);
-
return iguanair_send(ir, sizeof(ir->packet->header));
}
diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c
index a0d9c02a7588..a3b145183260 100644
--- a/drivers/media/rc/ir-rx51.c
+++ b/drivers/media/rc/ir-rx51.c
@@ -19,6 +19,7 @@
struct ir_rx51 {
struct rc_dev *rcdev;
struct pwm_device *pwm;
+ struct pwm_state state;
struct hrtimer timer;
struct device *dev;
wait_queue_head_t wqueue;
@@ -32,22 +33,20 @@ struct ir_rx51 {
static inline void ir_rx51_on(struct ir_rx51 *ir_rx51)
{
- pwm_enable(ir_rx51->pwm);
+ ir_rx51->state.enabled = true;
+ pwm_apply_state(ir_rx51->pwm, &ir_rx51->state);
}
static inline void ir_rx51_off(struct ir_rx51 *ir_rx51)
{
- pwm_disable(ir_rx51->pwm);
+ ir_rx51->state.enabled = false;
+ pwm_apply_state(ir_rx51->pwm, &ir_rx51->state);
}
static int init_timing_params(struct ir_rx51 *ir_rx51)
{
- struct pwm_device *pwm = ir_rx51->pwm;
- int duty, period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, ir_rx51->freq);
-
- duty = DIV_ROUND_CLOSEST(ir_rx51->duty_cycle * period, 100);
-
- pwm_config(pwm, duty, period);
+ ir_rx51->state.period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, ir_rx51->freq);
+ pwm_set_relative_duty_cycle(&ir_rx51->state, ir_rx51->duty_cycle, 100);
return 0;
}
@@ -242,6 +241,7 @@ static int ir_rx51_probe(struct platform_device *dev)
/* Use default, in case userspace does not set the carrier */
ir_rx51.freq = DIV_ROUND_CLOSEST_ULL(pwm_get_period(pwm), NSEC_PER_SEC);
+ pwm_init_state(pwm, &ir_rx51.state);
pwm_put(pwm);
hrtimer_init(&ir_rx51.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
diff --git a/drivers/media/rc/ir-spi.c b/drivers/media/rc/ir-spi.c
index c58f2d38a458..51aa55a84bb5 100644
--- a/drivers/media/rc/ir-spi.c
+++ b/drivers/media/rc/ir-spi.c
@@ -152,11 +152,6 @@ static int ir_spi_probe(struct spi_device *spi)
return devm_rc_register_device(&spi->dev, idata->rc);
}
-static int ir_spi_remove(struct spi_device *spi)
-{
- return 0;
-}
-
static const struct of_device_id ir_spi_of_match[] = {
{ .compatible = "ir-spi-led" },
{},
@@ -165,7 +160,6 @@ MODULE_DEVICE_TABLE(of, ir_spi_of_match);
static struct spi_driver ir_spi_driver = {
.probe = ir_spi_probe,
- .remove = ir_spi_remove,
.driver = {
.name = IR_SPI_DRIVER_NAME,
.of_match_table = ir_spi_of_match,
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 7f591ff5269d..c7c5157725f8 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -102,8 +102,6 @@ void lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev)
spin_lock_irqsave(&dev->lirc_fh_lock, flags);
list_for_each_entry(fh, &dev->lirc_fh, list) {
- if (LIRC_IS_TIMEOUT(sample) && !fh->send_timeout_reports)
- continue;
if (kfifo_put(&fh->rawir, sample))
wake_up_poll(&fh->wait_poll, EPOLLIN | EPOLLRDNORM);
}
@@ -166,7 +164,6 @@ static int lirc_open(struct inode *inode, struct file *file)
fh->send_mode = LIRC_MODE_PULSE;
fh->rc = dev;
- fh->send_timeout_reports = true;
if (dev->driver_type == RC_DRIVER_SCANCODE)
fh->rec_mode = LIRC_MODE_SCANCODE;
@@ -570,8 +567,6 @@ static long lirc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case LIRC_SET_REC_TIMEOUT_REPORTS:
if (dev->driver_type != RC_DRIVER_IR_RAW)
ret = -ENOTTY;
- else
- fh->send_timeout_reports = !!val;
break;
default:
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index d09bee82c04c..2dc810f5a73f 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1430,7 +1430,7 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
*/
ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
USB_REQ_SET_ADDRESS, USB_TYPE_VENDOR, 0, 0,
- data, USB_CTRL_MSG_SZ, HZ * 3);
+ data, USB_CTRL_MSG_SZ, 3000);
dev_dbg(dev, "set address - ret = %d", ret);
dev_dbg(dev, "set address - data[0] = %d, data[1] = %d",
data[0], data[1]);
@@ -1438,20 +1438,20 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
/* set feature: bit rate 38400 bps */
ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
USB_REQ_SET_FEATURE, USB_TYPE_VENDOR,
- 0xc04e, 0x0000, NULL, 0, HZ * 3);
+ 0xc04e, 0x0000, NULL, 0, 3000);
dev_dbg(dev, "set feature - ret = %d", ret);
/* bRequest 4: set char length to 8 bits */
ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
4, USB_TYPE_VENDOR,
- 0x0808, 0x0000, NULL, 0, HZ * 3);
+ 0x0808, 0x0000, NULL, 0, 3000);
dev_dbg(dev, "set char length - retB = %d", ret);
/* bRequest 2: set handshaking to use DTR/DSR */
ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
2, USB_TYPE_VENDOR,
- 0x0000, 0x0100, NULL, 0, HZ * 3);
+ 0x0000, 0x0100, NULL, 0, 3000);
dev_dbg(dev, "set handshake - retC = %d", ret);
/* device resume */
diff --git a/drivers/media/rc/pwm-ir-tx.c b/drivers/media/rc/pwm-ir-tx.c
index 4bc28d2c9cc9..105a9c24f1e3 100644
--- a/drivers/media/rc/pwm-ir-tx.c
+++ b/drivers/media/rc/pwm-ir-tx.c
@@ -53,22 +53,21 @@ static int pwm_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
{
struct pwm_ir *pwm_ir = dev->priv;
struct pwm_device *pwm = pwm_ir->pwm;
- int i, duty, period;
+ struct pwm_state state;
+ int i;
ktime_t edge;
long delta;
- period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, pwm_ir->carrier);
- duty = DIV_ROUND_CLOSEST(pwm_ir->duty_cycle * period, 100);
+ pwm_init_state(pwm, &state);
- pwm_config(pwm, duty, period);
+ state.period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, pwm_ir->carrier);
+ pwm_set_relative_duty_cycle(&state, pwm_ir->duty_cycle, 100);
edge = ktime_get();
for (i = 0; i < count; i++) {
- if (i % 2) // space
- pwm_disable(pwm);
- else
- pwm_enable(pwm);
+ state.enabled = !(i % 2);
+ pwm_apply_state(pwm, &state);
edge = ktime_add_us(edge, txbuf[i]);
delta = ktime_us_delta(edge, ktime_get());
@@ -76,7 +75,8 @@ static int pwm_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
usleep_range(delta, delta + 10);
}
- pwm_disable(pwm);
+ state.enabled = false;
+ pwm_apply_state(pwm, &state);
return count;
}
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index ac85464864b9..cb22316b3f00 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -404,7 +404,7 @@ static int redrat3_send_cmd(int cmd, struct redrat3_dev *rr3)
udev = rr3->udev;
res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), cmd,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x0000, 0x0000, data, sizeof(u8), HZ * 10);
+ 0x0000, 0x0000, data, sizeof(u8), 10000);
if (res < 0) {
dev_err(rr3->dev, "%s: Error sending rr3 cmd res %d, data %d",
@@ -480,7 +480,7 @@ static u32 redrat3_get_timeout(struct redrat3_dev *rr3)
pipe = usb_rcvctrlpipe(rr3->udev, 0);
ret = usb_control_msg(rr3->udev, pipe, RR3_GET_IR_PARAM,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- RR3_IR_IO_SIG_TIMEOUT, 0, tmp, len, HZ * 5);
+ RR3_IR_IO_SIG_TIMEOUT, 0, tmp, len, 5000);
if (ret != len)
dev_warn(rr3->dev, "Failed to read timeout from hardware\n");
else {
@@ -510,7 +510,7 @@ static int redrat3_set_timeout(struct rc_dev *rc_dev, unsigned int timeoutus)
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), RR3_SET_IR_PARAM,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
RR3_IR_IO_SIG_TIMEOUT, 0, timeout, sizeof(*timeout),
- HZ * 25);
+ 25000);
dev_dbg(dev, "set ir parm timeout %d ret 0x%02x\n",
be32_to_cpu(*timeout), ret);
@@ -542,32 +542,32 @@ static void redrat3_reset(struct redrat3_dev *rr3)
*val = 0x01;
rc = usb_control_msg(udev, rxpipe, RR3_RESET,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- RR3_CPUCS_REG_ADDR, 0, val, len, HZ * 25);
+ RR3_CPUCS_REG_ADDR, 0, val, len, 25000);
dev_dbg(dev, "reset returned 0x%02x\n", rc);
*val = length_fuzz;
rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
- RR3_IR_IO_LENGTH_FUZZ, 0, val, len, HZ * 25);
+ RR3_IR_IO_LENGTH_FUZZ, 0, val, len, 25000);
dev_dbg(dev, "set ir parm len fuzz %d rc 0x%02x\n", *val, rc);
*val = (65536 - (minimum_pause * 2000)) / 256;
rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
- RR3_IR_IO_MIN_PAUSE, 0, val, len, HZ * 25);
+ RR3_IR_IO_MIN_PAUSE, 0, val, len, 25000);
dev_dbg(dev, "set ir parm min pause %d rc 0x%02x\n", *val, rc);
*val = periods_measure_carrier;
rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
- RR3_IR_IO_PERIODS_MF, 0, val, len, HZ * 25);
+ RR3_IR_IO_PERIODS_MF, 0, val, len, 25000);
dev_dbg(dev, "set ir parm periods measure carrier %d rc 0x%02x", *val,
rc);
*val = RR3_DRIVER_MAXLENS;
rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
- RR3_IR_IO_MAX_LENGTHS, 0, val, len, HZ * 25);
+ RR3_IR_IO_MAX_LENGTHS, 0, val, len, 25000);
dev_dbg(dev, "set ir parm max lens %d rc 0x%02x\n", *val, rc);
kfree(val);
@@ -585,7 +585,7 @@ static void redrat3_get_firmware_rev(struct redrat3_dev *rr3)
rc = usb_control_msg(rr3->udev, usb_rcvctrlpipe(rr3->udev, 0),
RR3_FW_VERSION,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0, 0, buffer, RR3_FW_VERSION_LEN, HZ * 5);
+ 0, 0, buffer, RR3_FW_VERSION_LEN, 5000);
if (rc >= 0)
dev_info(rr3->dev, "Firmware rev: %s", buffer);
@@ -825,14 +825,14 @@ static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf,
pipe = usb_sndbulkpipe(rr3->udev, rr3->ep_out->bEndpointAddress);
ret = usb_bulk_msg(rr3->udev, pipe, irdata,
- sendbuf_len, &ret_len, 10 * HZ);
+ sendbuf_len, &ret_len, 10000);
dev_dbg(dev, "sent %d bytes, (ret %d)\n", ret_len, ret);
/* now tell the hardware to transmit what we sent it */
pipe = usb_rcvctrlpipe(rr3->udev, 0);
ret = usb_control_msg(rr3->udev, pipe, RR3_TX_SEND_SIGNAL,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0, 0, irdata, 2, HZ * 10);
+ 0, 0, irdata, 2, 10000);
if (ret < 0)
dev_err(dev, "Error: control msg send failed, rc %d\n", ret);
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 1cc5ebb85b6c..16ba85d7c090 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -21,12 +21,10 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/ktime.h>
#include <linux/usb.h>
#include <linux/usb/input.h>
#include <media/rc-core.h>
-#define DRIVER_VERSION "1.61"
#define DRIVER_NAME "streamzap"
#define DRIVER_DESC "Streamzap Remote Control driver"
@@ -67,9 +65,6 @@ struct streamzap_ir {
struct device *dev;
/* usb */
- struct usb_device *usbdev;
- struct usb_interface *interface;
- struct usb_endpoint_descriptor *endpoint;
struct urb *urb_in;
/* buffer & dma */
@@ -79,16 +74,7 @@ struct streamzap_ir {
/* track what state we're in */
enum StreamzapDecoderState decoder_state;
- /* tracks whether we are currently receiving some signal */
- bool idle;
- /* sum of signal lengths received since signal start */
- unsigned long sum;
- /* start time of signal; necessary for gap tracking */
- ktime_t signal_last;
- ktime_t signal_start;
- bool timeout_enabled;
-
- char name[128];
+
char phys[64];
};
@@ -121,37 +107,11 @@ static void sz_push(struct streamzap_ir *sz, struct ir_raw_event rawir)
static void sz_push_full_pulse(struct streamzap_ir *sz,
unsigned char value)
{
- struct ir_raw_event rawir = {};
-
- if (sz->idle) {
- int delta;
-
- sz->signal_last = sz->signal_start;
- sz->signal_start = ktime_get_real();
-
- delta = ktime_us_delta(sz->signal_start, sz->signal_last);
- rawir.pulse = false;
- if (delta > (15 * USEC_PER_SEC)) {
- /* really long time */
- rawir.duration = IR_MAX_DURATION;
- } else {
- rawir.duration = delta;
- rawir.duration -= sz->sum;
- rawir.duration = (rawir.duration > IR_MAX_DURATION) ?
- IR_MAX_DURATION : rawir.duration;
- }
- sz_push(sz, rawir);
+ struct ir_raw_event rawir = {
+ .pulse = true,
+ .duration = value * SZ_RESOLUTION + SZ_RESOLUTION / 2,
+ };
- sz->idle = false;
- sz->sum = 0;
- }
-
- rawir.pulse = true;
- rawir.duration = ((int) value) * SZ_RESOLUTION;
- rawir.duration += SZ_RESOLUTION / 2;
- sz->sum += rawir.duration;
- rawir.duration = (rawir.duration > IR_MAX_DURATION) ?
- IR_MAX_DURATION : rawir.duration;
sz_push(sz, rawir);
}
@@ -164,12 +124,11 @@ static void sz_push_half_pulse(struct streamzap_ir *sz,
static void sz_push_full_space(struct streamzap_ir *sz,
unsigned char value)
{
- struct ir_raw_event rawir = {};
+ struct ir_raw_event rawir = {
+ .pulse = false,
+ .duration = value * SZ_RESOLUTION + SZ_RESOLUTION / 2,
+ };
- rawir.pulse = false;
- rawir.duration = ((int) value) * SZ_RESOLUTION;
- rawir.duration += SZ_RESOLUTION / 2;
- sz->sum += rawir.duration;
sz_push(sz, rawir);
}
@@ -241,11 +200,7 @@ static void streamzap_callback(struct urb *urb)
.pulse = false,
.duration = sz->rdev->timeout
};
- sz->idle = true;
- if (sz->timeout_enabled)
- sz_push(sz, rawir);
- ir_raw_event_handle(sz->rdev);
- ir_raw_event_reset(sz->rdev);
+ sz_push(sz, rawir);
} else {
sz_push_full_space(sz, sz->buf_in[i]);
}
@@ -267,27 +222,23 @@ static void streamzap_callback(struct urb *urb)
usb_submit_urb(urb, GFP_ATOMIC);
}
-static struct rc_dev *streamzap_init_rc_dev(struct streamzap_ir *sz)
+static struct rc_dev *streamzap_init_rc_dev(struct streamzap_ir *sz,
+ struct usb_device *usbdev)
{
struct rc_dev *rdev;
struct device *dev = sz->dev;
int ret;
rdev = rc_allocate_device(RC_DRIVER_IR_RAW);
- if (!rdev) {
- dev_err(dev, "remote dev allocation failed\n");
+ if (!rdev)
goto out;
- }
- snprintf(sz->name, sizeof(sz->name), "Streamzap PC Remote Infrared Receiver (%04x:%04x)",
- le16_to_cpu(sz->usbdev->descriptor.idVendor),
- le16_to_cpu(sz->usbdev->descriptor.idProduct));
- usb_make_path(sz->usbdev, sz->phys, sizeof(sz->phys));
+ usb_make_path(usbdev, sz->phys, sizeof(sz->phys));
strlcat(sz->phys, "/input0", sizeof(sz->phys));
- rdev->device_name = sz->name;
+ rdev->device_name = "Streamzap PC Remote Infrared Receiver";
rdev->input_phys = sz->phys;
- usb_to_input_id(sz->usbdev, &rdev->input_id);
+ usb_to_input_id(usbdev, &rdev->input_id);
rdev->dev.parent = dev;
rdev->priv = sz;
rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
@@ -319,9 +270,9 @@ static int streamzap_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *usbdev = interface_to_usbdev(intf);
+ struct usb_endpoint_descriptor *endpoint;
struct usb_host_interface *iface_host;
struct streamzap_ir *sz = NULL;
- char buf[63], name[128] = "";
int retval = -ENOMEM;
int pipe, maxp;
@@ -330,9 +281,6 @@ static int streamzap_probe(struct usb_interface *intf,
if (!sz)
return -ENOMEM;
- sz->usbdev = usbdev;
- sz->interface = intf;
-
/* Check to ensure endpoint information matches requirements */
iface_host = intf->cur_altsetting;
@@ -343,22 +291,22 @@ static int streamzap_probe(struct usb_interface *intf,
goto free_sz;
}
- sz->endpoint = &(iface_host->endpoint[0].desc);
- if (!usb_endpoint_dir_in(sz->endpoint)) {
+ endpoint = &iface_host->endpoint[0].desc;
+ if (!usb_endpoint_dir_in(endpoint)) {
dev_err(&intf->dev, "%s: endpoint doesn't match input device 02%02x\n",
- __func__, sz->endpoint->bEndpointAddress);
+ __func__, endpoint->bEndpointAddress);
retval = -ENODEV;
goto free_sz;
}
- if (!usb_endpoint_xfer_int(sz->endpoint)) {
+ if (!usb_endpoint_xfer_int(endpoint)) {
dev_err(&intf->dev, "%s: endpoint attributes don't match xfer 02%02x\n",
- __func__, sz->endpoint->bmAttributes);
+ __func__, endpoint->bmAttributes);
retval = -ENODEV;
goto free_sz;
}
- pipe = usb_rcvintpipe(usbdev, sz->endpoint->bEndpointAddress);
+ pipe = usb_rcvintpipe(usbdev, endpoint->bEndpointAddress);
maxp = usb_maxpacket(usbdev, pipe, usb_pipeout(pipe));
if (maxp == 0) {
@@ -380,25 +328,12 @@ static int streamzap_probe(struct usb_interface *intf,
sz->dev = &intf->dev;
sz->buf_in_len = maxp;
- if (usbdev->descriptor.iManufacturer
- && usb_string(usbdev, usbdev->descriptor.iManufacturer,
- buf, sizeof(buf)) > 0)
- strscpy(name, buf, sizeof(name));
-
- if (usbdev->descriptor.iProduct
- && usb_string(usbdev, usbdev->descriptor.iProduct,
- buf, sizeof(buf)) > 0)
- snprintf(name + strlen(name), sizeof(name) - strlen(name),
- " %s", buf);
-
- sz->rdev = streamzap_init_rc_dev(sz);
+ sz->rdev = streamzap_init_rc_dev(sz, usbdev);
if (!sz->rdev)
goto rc_dev_fail;
- sz->idle = true;
sz->decoder_state = PulseSpace;
/* FIXME: don't yet have a way to set this */
- sz->timeout_enabled = true;
sz->rdev->timeout = SZ_TIMEOUT * SZ_RESOLUTION;
#if 0
/* not yet supported, depends on patches from maxim */
@@ -407,12 +342,9 @@ static int streamzap_probe(struct usb_interface *intf,
sz->max_timeout = SZ_TIMEOUT * SZ_RESOLUTION;
#endif
- sz->signal_start = ktime_get_real();
-
/* Complete final initialisations */
usb_fill_int_urb(sz->urb_in, usbdev, pipe, sz->buf_in,
- maxp, (usb_complete_t)streamzap_callback,
- sz, sz->endpoint->bInterval);
+ maxp, streamzap_callback, sz, endpoint->bInterval);
sz->urb_in->transfer_dma = sz->dma_in;
sz->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
@@ -421,9 +353,6 @@ static int streamzap_probe(struct usb_interface *intf,
if (usb_submit_urb(sz->urb_in, GFP_ATOMIC))
dev_err(sz->dev, "urb submit failed\n");
- dev_info(sz->dev, "Registered %s on usb%d:%d\n", name,
- usbdev->bus->busnum, usbdev->devnum);
-
return 0;
rc_dev_fail:
@@ -456,7 +385,6 @@ static void streamzap_disconnect(struct usb_interface *interface)
if (!sz)
return;
- sz->usbdev = NULL;
rc_unregister_device(sz->rdev);
usb_kill_urb(sz->urb_in);
usb_free_urb(sz->urb_in);
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index aed23ca0fa6c..94efb035d21b 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -997,7 +997,6 @@ wbcir_resume(struct pnp_dev *device)
struct wbcir_data *data = pnp_get_drvdata(device);
wbcir_init_hw(data);
- ir_raw_event_reset(data->dev);
enable_irq(data->irq);
led_classdev_resume(&data->led);
diff --git a/drivers/media/spi/cxd2880-spi.c b/drivers/media/spi/cxd2880-spi.c
index 506f52c1af10..6f2a66bc87fb 100644
--- a/drivers/media/spi/cxd2880-spi.c
+++ b/drivers/media/spi/cxd2880-spi.c
@@ -628,19 +628,8 @@ fail_regulator:
static int
cxd2880_spi_remove(struct spi_device *spi)
{
- struct cxd2880_dvb_spi *dvb_spi;
+ struct cxd2880_dvb_spi *dvb_spi = spi_get_drvdata(spi);
- if (!spi) {
- pr_err("invalid arg\n");
- return -EINVAL;
- }
-
- dvb_spi = spi_get_drvdata(spi);
-
- if (!dvb_spi) {
- pr_err("failed\n");
- return -EINVAL;
- }
dvb_spi->demux.dmx.remove_frontend(&dvb_spi->demux.dmx,
&dvb_spi->dmx_fe);
dvb_dmxdev_release(&dvb_spi->dmxdev);
diff --git a/drivers/media/test-drivers/vicodec/vicodec-core.c b/drivers/media/test-drivers/vicodec/vicodec-core.c
index 33f1c893c1b6..be43f7d32df9 100644
--- a/drivers/media/test-drivers/vicodec/vicodec-core.c
+++ b/drivers/media/test-drivers/vicodec/vicodec-core.c
@@ -1443,7 +1443,7 @@ static void vicodec_buf_queue(struct vb2_buffer *vb)
unsigned int i;
for (i = 0; i < vb->num_planes; i++)
- vb->planes[i].bytesused = 0;
+ vb2_set_plane_payload(vb, i, 0);
vbuf->field = V4L2_FIELD_NONE;
vbuf->sequence =
diff --git a/drivers/media/test-drivers/vidtv/vidtv_psi.c b/drivers/media/test-drivers/vidtv/vidtv_psi.c
index c11ac8dca73d..a5875380ef40 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_psi.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_psi.c
@@ -94,34 +94,28 @@ static void vidtv_psi_update_version_num(struct vidtv_psi_table_header *h)
static u16 vidtv_psi_get_sec_len(struct vidtv_psi_table_header *h)
{
u16 mask;
- u16 ret;
mask = GENMASK(11, 0);
- ret = be16_to_cpu(h->bitfield) & mask;
- return ret;
+ return be16_to_cpu(h->bitfield) & mask;
}
u16 vidtv_psi_get_pat_program_pid(struct vidtv_psi_table_pat_program *p)
{
u16 mask;
- u16 ret;
mask = GENMASK(12, 0);
- ret = be16_to_cpu(p->bitfield) & mask;
- return ret;
+ return be16_to_cpu(p->bitfield) & mask;
}
u16 vidtv_psi_pmt_stream_get_elem_pid(struct vidtv_psi_table_pmt_stream *s)
{
u16 mask;
- u16 ret;
mask = GENMASK(12, 0);
- ret = be16_to_cpu(s->bitfield) & mask;
- return ret;
+ return be16_to_cpu(s->bitfield) & mask;
}
static void vidtv_psi_set_desc_loop_len(__be16 *bitfield, u16 new_len,
diff --git a/drivers/media/tuners/msi001.c b/drivers/media/tuners/msi001.c
index 78e6fd600d8e..44247049a319 100644
--- a/drivers/media/tuners/msi001.c
+++ b/drivers/media/tuners/msi001.c
@@ -442,6 +442,13 @@ static int msi001_probe(struct spi_device *spi)
V4L2_CID_RF_TUNER_BANDWIDTH_AUTO, 0, 1, 1, 1);
dev->bandwidth = v4l2_ctrl_new_std(&dev->hdl, &msi001_ctrl_ops,
V4L2_CID_RF_TUNER_BANDWIDTH, 200000, 8000000, 1, 200000);
+ if (dev->hdl.error) {
+ ret = dev->hdl.error;
+ dev_err(&spi->dev, "Could not initialize controls\n");
+ /* control init failed, free handler */
+ goto err_ctrl_handler_free;
+ }
+
v4l2_ctrl_auto_cluster(2, &dev->bandwidth_auto, 0, false);
dev->lna_gain = v4l2_ctrl_new_std(&dev->hdl, &msi001_ctrl_ops,
V4L2_CID_RF_TUNER_LNA_GAIN, 0, 1, 1, 1);
diff --git a/drivers/media/tuners/mxl5005s.c b/drivers/media/tuners/mxl5005s.c
index f6e82a8e7d37..ab4c43df9d18 100644
--- a/drivers/media/tuners/mxl5005s.c
+++ b/drivers/media/tuners/mxl5005s.c
@@ -3414,9 +3414,8 @@ static u16 MXL_ControlWrite_Group(struct dvb_frontend *fe, u16 controlNum,
u32 value, u16 controlGroup)
{
struct mxl5005s_state *state = fe->tuner_priv;
- u16 i, j, k;
+ u16 i, j;
u32 highLimit;
- u32 ctrlVal;
if (controlGroup == 1) /* Initial Control */ {
@@ -3432,9 +3431,6 @@ static u16 MXL_ControlWrite_Group(struct dvb_frontend *fe, u16 controlNum,
(u8)(state->Init_Ctrl[i].bit[j]),
(u8)((value>>j) & 0x01));
}
- ctrlVal = 0;
- for (k = 0; k < state->Init_Ctrl[i].size; k++)
- ctrlVal += state->Init_Ctrl[i].val[k] * (1 << k);
} else
return -1;
}
@@ -3454,9 +3450,6 @@ static u16 MXL_ControlWrite_Group(struct dvb_frontend *fe, u16 controlNum,
(u8)(state->CH_Ctrl[i].bit[j]),
(u8)((value>>j) & 0x01));
}
- ctrlVal = 0;
- for (k = 0; k < state->CH_Ctrl[i].size; k++)
- ctrlVal += state->CH_Ctrl[i].val[k] * (1 << k);
} else
return -1;
}
@@ -3477,11 +3470,6 @@ static u16 MXL_ControlWrite_Group(struct dvb_frontend *fe, u16 controlNum,
(u8)(state->MXL_Ctrl[i].bit[j]),
(u8)((value>>j) & 0x01));
}
- ctrlVal = 0;
- for (k = 0; k < state->MXL_Ctrl[i].size; k++)
- ctrlVal += state->
- MXL_Ctrl[i].val[k] *
- (1 << k);
} else
return -1;
}
diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
index aed2f130ec74..b9a7590aeec6 100644
--- a/drivers/media/tuners/r820t.c
+++ b/drivers/media/tuners/r820t.c
@@ -326,6 +326,26 @@ static int r820t_xtal_capacitor[][2] = {
{ 0x10, XTAL_HIGH_CAP_0P },
};
+static const char *r820t_chip_enum_to_str(enum r820t_chip chip)
+{
+ switch (chip) {
+ case CHIP_R820T:
+ return "R820T";
+ case CHIP_R620D:
+ return "R620D";
+ case CHIP_R828D:
+ return "R828D";
+ case CHIP_R828:
+ return "R828";
+ case CHIP_R828S:
+ return "R828S";
+ case CHIP_R820C:
+ return "R820C";
+ default:
+ return "<unknown>";
+ }
+}
+
/*
* I2C read/write code and shadow registers logic
*/
@@ -2355,7 +2375,9 @@ struct dvb_frontend *r820t_attach(struct dvb_frontend *fe,
if (rc < 0)
goto err;
- tuner_info("Rafael Micro r820t successfully identified\n");
+ tuner_info(
+ "Rafael Micro r820t successfully identified, chip type: %s\n",
+ r820t_chip_enum_to_str(cfg->rafael_chip));
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index fefb2625f655..47029746b89e 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -76,21 +76,156 @@ err_mutex_unlock:
return ret;
}
-static int si2157_init(struct dvb_frontend *fe)
+static const struct si2157_tuner_info si2157_tuners[] = {
+ { SI2141, false, 0x60, SI2141_60_FIRMWARE, SI2141_A10_FIRMWARE },
+ { SI2141, false, 0x61, SI2141_61_FIRMWARE, SI2141_A10_FIRMWARE },
+ { SI2146, false, 0x11, SI2146_11_FIRMWARE, NULL },
+ { SI2147, false, 0x50, SI2147_50_FIRMWARE, NULL },
+ { SI2148, true, 0x32, SI2148_32_FIRMWARE, SI2158_A20_FIRMWARE },
+ { SI2148, true, 0x33, SI2148_33_FIRMWARE, SI2158_A20_FIRMWARE },
+ { SI2157, false, 0x50, SI2157_50_FIRMWARE, SI2157_A30_FIRMWARE },
+ { SI2158, false, 0x50, SI2158_50_FIRMWARE, SI2158_A20_FIRMWARE },
+ { SI2158, false, 0x51, SI2158_51_FIRMWARE, SI2158_A20_FIRMWARE },
+ { SI2177, false, 0x50, SI2177_50_FIRMWARE, SI2157_A30_FIRMWARE },
+};
+
+static int si2157_load_firmware(struct dvb_frontend *fe,
+ const char *fw_name)
+{
+ struct i2c_client *client = fe->tuner_priv;
+ const struct firmware *fw;
+ int ret, len, remaining;
+ struct si2157_cmd cmd;
+
+ /* request the firmware, this will block and timeout */
+ ret = firmware_request_nowarn(&fw, fw_name, &client->dev);
+ if (ret)
+ return ret;
+
+ /* firmware should be n chunks of 17 bytes */
+ if (fw->size % 17 != 0) {
+ dev_err(&client->dev, "firmware file '%s' is invalid\n",
+ fw_name);
+ ret = -EINVAL;
+ goto err_release_firmware;
+ }
+
+ dev_info(&client->dev, "downloading firmware from file '%s'\n",
+ fw_name);
+
+ for (remaining = fw->size; remaining > 0; remaining -= 17) {
+ len = fw->data[fw->size - remaining];
+ if (len > SI2157_ARGLEN) {
+ dev_err(&client->dev, "Bad firmware length\n");
+ ret = -EINVAL;
+ goto err_release_firmware;
+ }
+ memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
+ cmd.wlen = len;
+ cmd.rlen = 1;
+ ret = si2157_cmd_execute(client, &cmd);
+ if (ret) {
+ dev_err(&client->dev, "firmware download failed %d\n",
+ ret);
+ goto err_release_firmware;
+ }
+ }
+
+err_release_firmware:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int si2157_find_and_load_firmware(struct dvb_frontend *fe)
{
struct i2c_client *client = fe->tuner_priv;
struct si2157_dev *dev = i2c_get_clientdata(client);
+ const char *fw_alt_name = NULL;
+ unsigned char part_id, rom_id;
+ const char *fw_name = NULL;
+ struct si2157_cmd cmd;
+ bool required = true;
+ int ret, i;
+
+ if (dev->dont_load_firmware) {
+ dev_info(&client->dev,
+ "device is buggy, skipping firmware download\n");
+ return 0;
+ }
+
+ /* query chip revision */
+ memcpy(cmd.args, "\x02", 1);
+ cmd.wlen = 1;
+ cmd.rlen = 13;
+ ret = si2157_cmd_execute(client, &cmd);
+ if (ret)
+ return ret;
+
+ part_id = cmd.args[2];
+ rom_id = cmd.args[12];
+
+ for (i = 0; i < ARRAY_SIZE(si2157_tuners); i++) {
+ if (si2157_tuners[i].part_id != part_id)
+ continue;
+ required = si2157_tuners[i].required;
+ fw_alt_name = si2157_tuners[i].fw_alt_name;
+
+ /* Both part and rom ID match */
+ if (si2157_tuners[i].rom_id == rom_id) {
+ fw_name = si2157_tuners[i].fw_name;
+ break;
+ }
+ }
+
+ if (!fw_name && !fw_alt_name) {
+ dev_err(&client->dev,
+ "unknown chip version Si21%d-%c%c%c ROM 0x%02x\n",
+ part_id, cmd.args[1], cmd.args[3], cmd.args[4], rom_id);
+ return -EINVAL;
+ }
+
+ /* Update the part id based on device's report */
+ dev->part_id = part_id;
+
+ dev_info(&client->dev,
+ "found a 'Silicon Labs Si21%d-%c%c%c ROM 0x%02x'\n",
+ part_id, cmd.args[1], cmd.args[3], cmd.args[4], rom_id);
+
+ if (fw_name)
+ ret = si2157_load_firmware(fe, fw_name);
+ else
+ ret = -ENOENT;
+
+ /* Try alternate name, if any */
+ if (ret == -ENOENT && fw_alt_name)
+ ret = si2157_load_firmware(fe, fw_alt_name);
+
+ if (ret == -ENOENT) {
+ if (!required) {
+ dev_info(&client->dev, "Using ROM firmware.\n");
+ return 0;
+ }
+ dev_err(&client->dev, "Can't continue without a firmware.\n");
+ } else if (ret < 0) {
+ dev_err(&client->dev, "error %d when loading firmware\n", ret);
+ }
+ return ret;
+}
+
+static int si2157_init(struct dvb_frontend *fe)
+{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int ret, len, remaining;
+ struct i2c_client *client = fe->tuner_priv;
+ struct si2157_dev *dev = i2c_get_clientdata(client);
+ unsigned int xtal_trim;
struct si2157_cmd cmd;
- const struct firmware *fw;
- const char *fw_name;
- unsigned int chip_id, xtal_trim;
+ int ret;
dev_dbg(&client->dev, "\n");
/* Try to get Xtal trim property, to verify tuner still running */
- memcpy(cmd.args, "\x15\x00\x04\x02", 4);
+ memcpy(cmd.args, "\x15\x00\x02\x04", 4);
cmd.wlen = 4;
cmd.rlen = 4;
ret = si2157_cmd_execute(client, &cmd);
@@ -103,10 +238,12 @@ static int si2157_init(struct dvb_frontend *fe)
dev->if_frequency = 0; /* we no longer know current tuner state */
/* power up */
- if (dev->chiptype == SI2157_CHIPTYPE_SI2146) {
+ if (dev->part_id == SI2146) {
+ /* clock_mode = XTAL, clock_freq = 24MHz */
memcpy(cmd.args, "\xc0\x05\x01\x00\x00\x0b\x00\x00\x01", 9);
cmd.wlen = 9;
- } else if (dev->chiptype == SI2157_CHIPTYPE_SI2141) {
+ } else if (dev->part_id == SI2141) {
+ /* clock_mode: XTAL, xout enabled */
memcpy(cmd.args, "\xc0\x00\x0d\x0e\x00\x01\x01\x01\x01\x03", 10);
cmd.wlen = 10;
} else {
@@ -115,11 +252,11 @@ static int si2157_init(struct dvb_frontend *fe)
}
cmd.rlen = 1;
ret = si2157_cmd_execute(client, &cmd);
- if (ret && (dev->chiptype != SI2157_CHIPTYPE_SI2141 || ret != -EAGAIN))
+ if (ret && (dev->part_id != SI2141 || ret != -EAGAIN))
goto err;
- /* Si2141 needs a second command before it answers the revision query */
- if (dev->chiptype == SI2157_CHIPTYPE_SI2141) {
+ /* Si2141 needs a wake up command */
+ if (dev->part_id == SI2141) {
memcpy(cmd.args, "\xc0\x08\x01\x02\x00\x00\x01", 7);
cmd.wlen = 7;
ret = si2157_cmd_execute(client, &cmd);
@@ -127,100 +264,11 @@ static int si2157_init(struct dvb_frontend *fe)
goto err;
}
- if (dev->dont_load_firmware) {
- dev_info(&client->dev, "device is buggy, skipping firmware download\n");
- goto skip_fw_download;
- }
-
- /* query chip revision */
- memcpy(cmd.args, "\x02", 1);
- cmd.wlen = 1;
- cmd.rlen = 13;
- ret = si2157_cmd_execute(client, &cmd);
- if (ret)
+ /* Try to load the firmware */
+ ret = si2157_find_and_load_firmware(fe);
+ if (ret < 0)
goto err;
- chip_id = cmd.args[1] << 24 | cmd.args[2] << 16 | cmd.args[3] << 8 |
- cmd.args[4] << 0;
-
- #define SI2177_A30 ('A' << 24 | 77 << 16 | '3' << 8 | '0' << 0)
- #define SI2158_A20 ('A' << 24 | 58 << 16 | '2' << 8 | '0' << 0)
- #define SI2148_A20 ('A' << 24 | 48 << 16 | '2' << 8 | '0' << 0)
- #define SI2157_A30 ('A' << 24 | 57 << 16 | '3' << 8 | '0' << 0)
- #define SI2147_A30 ('A' << 24 | 47 << 16 | '3' << 8 | '0' << 0)
- #define SI2146_A10 ('A' << 24 | 46 << 16 | '1' << 8 | '0' << 0)
- #define SI2141_A10 ('A' << 24 | 41 << 16 | '1' << 8 | '0' << 0)
-
- switch (chip_id) {
- case SI2158_A20:
- case SI2148_A20:
- fw_name = SI2158_A20_FIRMWARE;
- break;
- case SI2141_A10:
- fw_name = SI2141_A10_FIRMWARE;
- break;
- case SI2177_A30:
- fw_name = SI2157_A30_FIRMWARE;
- break;
- case SI2157_A30:
- case SI2147_A30:
- case SI2146_A10:
- fw_name = NULL;
- break;
- default:
- dev_err(&client->dev, "unknown chip version Si21%d-%c%c%c\n",
- cmd.args[2], cmd.args[1],
- cmd.args[3], cmd.args[4]);
- ret = -EINVAL;
- goto err;
- }
-
- dev_info(&client->dev, "found a 'Silicon Labs Si21%d-%c%c%c'\n",
- cmd.args[2], cmd.args[1], cmd.args[3], cmd.args[4]);
-
- if (fw_name == NULL)
- goto skip_fw_download;
-
- /* request the firmware, this will block and timeout */
- ret = request_firmware(&fw, fw_name, &client->dev);
- if (ret) {
- dev_err(&client->dev, "firmware file '%s' not found\n",
- fw_name);
- goto err;
- }
-
- /* firmware should be n chunks of 17 bytes */
- if (fw->size % 17 != 0) {
- dev_err(&client->dev, "firmware file '%s' is invalid\n",
- fw_name);
- ret = -EINVAL;
- goto err_release_firmware;
- }
-
- dev_info(&client->dev, "downloading firmware from file '%s'\n",
- fw_name);
-
- for (remaining = fw->size; remaining > 0; remaining -= 17) {
- len = fw->data[fw->size - remaining];
- if (len > SI2157_ARGLEN) {
- dev_err(&client->dev, "Bad firmware length\n");
- ret = -EINVAL;
- goto err_release_firmware;
- }
- memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
- cmd.wlen = len;
- cmd.rlen = 1;
- ret = si2157_cmd_execute(client, &cmd);
- if (ret) {
- dev_err(&client->dev, "firmware download failed %d\n",
- ret);
- goto err_release_firmware;
- }
- }
-
- release_firmware(fw);
-
-skip_fw_download:
/* reboot the tuner with new firmware? */
memcpy(cmd.args, "\x01\x01", 2);
cmd.wlen = 2;
@@ -270,8 +318,7 @@ warm:
dev->active = true;
return 0;
-err_release_firmware:
- release_firmware(fw);
+
err:
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
@@ -399,7 +446,8 @@ static int si2157_set_params(struct dvb_frontend *fe)
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
struct si2157_cmd cmd;
- u8 bandwidth, delivery_system;
+ u8 bw, delivery_system;
+ u32 bandwidth;
u32 if_frequency = 5000000;
dev_dbg(&client->dev,
@@ -411,14 +459,22 @@ static int si2157_set_params(struct dvb_frontend *fe)
goto err;
}
- if (c->bandwidth_hz <= 6000000)
- bandwidth = 0x06;
- else if (c->bandwidth_hz <= 7000000)
- bandwidth = 0x07;
- else if (c->bandwidth_hz <= 8000000)
- bandwidth = 0x08;
- else
- bandwidth = 0x0f;
+ if (SUPPORTS_1700KHz(dev) && c->bandwidth_hz <= 1700000) {
+ bandwidth = 1700000;
+ bw = 9;
+ } else if (c->bandwidth_hz <= 6000000) {
+ bandwidth = 6000000;
+ bw = 6;
+ } else if (SUPPORTS_1700KHz(dev) && c->bandwidth_hz <= 6100000) {
+ bandwidth = 6100000;
+ bw = 10;
+ } else if (c->bandwidth_hz <= 7000000) {
+ bandwidth = 7000000;
+ bw = 7;
+ } else {
+ bandwidth = 8000000;
+ bw = 8;
+ }
switch (c->delivery_system) {
case SYS_ATSC:
@@ -434,15 +490,22 @@ static int si2157_set_params(struct dvb_frontend *fe)
delivery_system = 0x20;
break;
case SYS_DVBC_ANNEX_A:
+ case SYS_DVBC_ANNEX_C:
delivery_system = 0x30;
break;
+ case SYS_ISDBT:
+ delivery_system = 0x40;
+ break;
+ case SYS_DTMB:
+ delivery_system = 0x60;
+ break;
default:
ret = -EINVAL;
goto err;
}
memcpy(cmd.args, "\x14\x00\x03\x07\x00\x00", 6);
- cmd.args[4] = delivery_system | bandwidth;
+ cmd.args[4] = delivery_system | bw;
if (dev->inversion)
cmd.args[5] = 0x01;
cmd.wlen = 6;
@@ -451,7 +514,8 @@ static int si2157_set_params(struct dvb_frontend *fe)
if (ret)
goto err;
- if (dev->chiptype == SI2157_CHIPTYPE_SI2146)
+ /* On SI2146, set DTV AGC source to DLIF_AGC_3DB */
+ if (dev->part_id == SI2146)
memcpy(cmd.args, "\x14\x00\x02\x07\x00\x01", 6);
else
memcpy(cmd.args, "\x14\x00\x02\x07\x00\x00", 6);
@@ -518,9 +582,9 @@ static int si2157_set_analog_params(struct dvb_frontend *fe,
u8 color = 0; /* 0=NTSC/PAL, 0x10=SECAM */
u8 invert_analog = 1; /* analog tuner spectrum; 0=normal, 1=inverted */
- if (dev->chiptype != SI2157_CHIPTYPE_SI2157) {
- dev_info(&client->dev, "Analog tuning not supported for chiptype=%u\n",
- dev->chiptype);
+ if (!SUPPORTS_ATV_IF(dev)) {
+ dev_info(&client->dev, "Analog tuning not supported yet for Si21%d\n",
+ dev->part_id);
ret = -EINVAL;
goto err;
}
@@ -832,7 +896,7 @@ static int si2157_probe(struct i2c_client *client,
dev->inversion = cfg->inversion;
dev->dont_load_firmware = cfg->dont_load_firmware;
dev->if_port = cfg->if_port;
- dev->chiptype = (u8)id->driver_data;
+ dev->part_id = (u8)id->driver_data;
dev->if_frequency = 5000000; /* default value of property 0x0706 */
mutex_init(&dev->i2c_mutex);
INIT_DELAYED_WORK(&dev->stat_work, si2157_stat_work);
@@ -875,10 +939,8 @@ static int si2157_probe(struct i2c_client *client,
}
#endif
- dev_info(&client->dev, "Silicon Labs %s successfully attached\n",
- dev->chiptype == SI2157_CHIPTYPE_SI2141 ? "Si2141" :
- dev->chiptype == SI2157_CHIPTYPE_SI2146 ?
- "Si2146" : "Si2147/2148/2157/2158");
+ dev_info(&client->dev, "Silicon Labs Si21%d successfully attached\n",
+ dev->part_id);
return 0;
@@ -911,11 +973,16 @@ static int si2157_remove(struct i2c_client *client)
return 0;
}
+/*
+ * The part_id used here will only be used on buggy devices that don't
+ * accept firmware uploads. Non-buggy devices should just use "si2157" for
+ * all SiLabs TER tuners, as the driver should auto-detect it.
+ */
static const struct i2c_device_id si2157_id_table[] = {
- {"si2157", SI2157_CHIPTYPE_SI2157},
- {"si2146", SI2157_CHIPTYPE_SI2146},
- {"si2141", SI2157_CHIPTYPE_SI2141},
- {"si2177", SI2157_CHIPTYPE_SI2177},
+ {"si2157", SI2157},
+ {"si2146", SI2146},
+ {"si2141", SI2141},
+ {"si2177", SI2177},
{}
};
MODULE_DEVICE_TABLE(i2c, si2157_id_table);
@@ -938,3 +1005,13 @@ MODULE_LICENSE("GPL");
MODULE_FIRMWARE(SI2158_A20_FIRMWARE);
MODULE_FIRMWARE(SI2141_A10_FIRMWARE);
MODULE_FIRMWARE(SI2157_A30_FIRMWARE);
+MODULE_FIRMWARE(SI2141_60_FIRMWARE);
+MODULE_FIRMWARE(SI2141_61_FIRMWARE);
+MODULE_FIRMWARE(SI2146_11_FIRMWARE);
+MODULE_FIRMWARE(SI2147_50_FIRMWARE);
+MODULE_FIRMWARE(SI2148_32_FIRMWARE);
+MODULE_FIRMWARE(SI2148_33_FIRMWARE);
+MODULE_FIRMWARE(SI2157_50_FIRMWARE);
+MODULE_FIRMWARE(SI2158_50_FIRMWARE);
+MODULE_FIRMWARE(SI2158_51_FIRMWARE);
+MODULE_FIRMWARE(SI2177_50_FIRMWARE);
diff --git a/drivers/media/tuners/si2157_priv.h b/drivers/media/tuners/si2157_priv.h
index ef4796098931..8579e80f7af7 100644
--- a/drivers/media/tuners/si2157_priv.h
+++ b/drivers/media/tuners/si2157_priv.h
@@ -26,7 +26,7 @@ struct si2157_dev {
unsigned int active:1;
unsigned int inversion:1;
unsigned int dont_load_firmware:1;
- u8 chiptype;
+ u8 part_id;
u8 if_port;
u32 if_frequency;
u32 bandwidth;
@@ -41,10 +41,22 @@ struct si2157_dev {
};
-#define SI2157_CHIPTYPE_SI2157 0
-#define SI2157_CHIPTYPE_SI2146 1
-#define SI2157_CHIPTYPE_SI2141 2
-#define SI2157_CHIPTYPE_SI2177 3
+enum si2157_part_id {
+ SI2141 = 41,
+ SI2146 = 46,
+ SI2147 = 47,
+ SI2148 = 48,
+ SI2157 = 57,
+ SI2158 = 58,
+ SI2177 = 77,
+};
+
+struct si2157_tuner_info {
+ enum si2157_part_id part_id;
+ unsigned char rom_id;
+ bool required;
+ const char *fw_name, *fw_alt_name;
+};
/* firmware command struct */
#define SI2157_ARGLEN 30
@@ -54,7 +66,29 @@ struct si2157_cmd {
unsigned rlen;
};
+#define SUPPORTS_1700KHz(dev) (((dev)->part_id == SI2141) || \
+ ((dev)->part_id == SI2147) || \
+ ((dev)->part_id == SI2157) || \
+ ((dev)->part_id == SI2177))
+
+#define SUPPORTS_ATV_IF(dev) (((dev)->part_id == SI2157) || \
+ ((dev)->part_id == SI2158))
+
+/* Old firmware namespace */
#define SI2158_A20_FIRMWARE "dvb-tuner-si2158-a20-01.fw"
#define SI2141_A10_FIRMWARE "dvb-tuner-si2141-a10-01.fw"
#define SI2157_A30_FIRMWARE "dvb-tuner-si2157-a30-01.fw"
+
+/* New firmware namespace */
+#define SI2141_60_FIRMWARE "dvb_driver_si2141_rom60.fw"
+#define SI2141_61_FIRMWARE "dvb_driver_si2141_rom61.fw"
+#define SI2146_11_FIRMWARE "dvb_driver_si2146_rom11.fw"
+#define SI2147_50_FIRMWARE "dvb_driver_si2147_rom50.fw"
+#define SI2148_32_FIRMWARE "dvb_driver_si2148_rom32.fw"
+#define SI2148_33_FIRMWARE "dvb_driver_si2148_rom33.fw"
+#define SI2157_50_FIRMWARE "dvb_driver_si2157_rom50.fw"
+#define SI2158_50_FIRMWARE "dvb_driver_si2178_rom50.fw"
+#define SI2158_51_FIRMWARE "dvb_driver_si2158_rom51.fw"
+#define SI2177_50_FIRMWARE "dvb_driver_si2177_rom50.fw"
+
#endif
diff --git a/drivers/media/tuners/tua9001.c b/drivers/media/tuners/tua9001.c
index 5e3625e75620..af7d5ea1f77e 100644
--- a/drivers/media/tuners/tua9001.c
+++ b/drivers/media/tuners/tua9001.c
@@ -240,14 +240,10 @@ static int tua9001_remove(struct i2c_client *client)
DVB_FRONTEND_COMPONENT_TUNER,
TUA9001_CMD_CEN, 0);
if (ret)
- goto err_kfree;
+ dev_err(&client->dev, "Tuner disable failed (%pe)\n", ERR_PTR(ret));
}
kfree(dev);
return 0;
-err_kfree:
- kfree(dev);
- dev_dbg(&client->dev, "failed=%d\n", ret);
- return ret;
}
static const struct i2c_device_id tua9001_id_table[] = {
diff --git a/drivers/media/usb/au0828/au0828-i2c.c b/drivers/media/usb/au0828/au0828-i2c.c
index 708f01ab47fa..749f90d73b5b 100644
--- a/drivers/media/usb/au0828/au0828-i2c.c
+++ b/drivers/media/usb/au0828/au0828-i2c.c
@@ -23,13 +23,6 @@ MODULE_PARM_DESC(i2c_scan, "scan i2c bus at insmod time");
#define I2C_WAIT_DELAY 25
#define I2C_WAIT_RETRY 1000
-static inline int i2c_slave_did_write_ack(struct i2c_adapter *i2c_adap)
-{
- struct au0828_dev *dev = i2c_adap->algo_data;
- return au0828_read(dev, AU0828_I2C_STATUS_201) &
- AU0828_I2C_STATUS_NO_WRITE_ACK ? 0 : 1;
-}
-
static inline int i2c_slave_did_read_ack(struct i2c_adapter *i2c_adap)
{
struct au0828_dev *dev = i2c_adap->algo_data;
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index 5d38171b7638..7835bb0f32fc 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -87,7 +87,7 @@ static int flexcop_usb_readwrite_dw(struct flexcop_device *fc, u16 wRegOffsPCI,
0,
fc_usb->data,
sizeof(u32),
- B2C2_WAIT_FOR_OPERATION_RDW * HZ);
+ B2C2_WAIT_FOR_OPERATION_RDW);
if (ret != sizeof(u32)) {
err("error while %s dword from %d (%d).", read ? "reading" :
@@ -155,7 +155,7 @@ static int flexcop_usb_v8_memory_req(struct flexcop_usb *fc_usb,
wIndex,
fc_usb->data,
buflen,
- nWaitTime * HZ);
+ nWaitTime);
if (ret != buflen)
ret = -EIO;
@@ -171,7 +171,7 @@ static int flexcop_usb_v8_memory_req(struct flexcop_usb *fc_usb,
return ret;
}
-#define bytes_left_to_read_on_page(paddr,buflen) \
+#define bytes_left_to_read_on_page(paddr, buflen) \
((V8_MEMORY_PAGE_SIZE - (paddr & V8_MEMORY_PAGE_MASK)) > buflen \
? buflen : (V8_MEMORY_PAGE_SIZE - (paddr & V8_MEMORY_PAGE_MASK)))
@@ -179,11 +179,11 @@ static int flexcop_usb_memory_req(struct flexcop_usb *fc_usb,
flexcop_usb_request_t req, flexcop_usb_mem_page_t page_start,
u32 addr, int extended, u8 *buf, u32 len)
{
- int i,ret = 0;
+ int i, ret = 0;
u16 wMax;
u32 pagechunk = 0;
- switch(req) {
+ switch (req) {
case B2C2_USB_READ_V8_MEM:
wMax = USB_MEM_READ_MAX;
break;
@@ -248,13 +248,13 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
/* DKT 020208 - add this to support special case of DiSEqC */
case USB_FUNC_I2C_CHECKWRITE:
pipe = B2C2_USB_CTRL_PIPE_OUT;
- nWaitTime = 2;
+ nWaitTime = 2000;
request_type |= USB_DIR_OUT;
break;
case USB_FUNC_I2C_READ:
case USB_FUNC_I2C_REPEATREAD:
pipe = B2C2_USB_CTRL_PIPE_IN;
- nWaitTime = 2;
+ nWaitTime = 2000;
request_type |= USB_DIR_IN;
break;
default:
@@ -281,7 +281,7 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
wIndex,
fc_usb->data,
buflen,
- nWaitTime * HZ);
+ nWaitTime);
if (ret != buflen)
ret = -EIO;
@@ -341,8 +341,8 @@ static void flexcop_usb_process_frame(struct flexcop_usb *fc_usb,
b = fc_usb->tmp_buffer;
l = fc_usb->tmp_buffer_length;
} else {
- b=buffer;
- l=buffer_length;
+ b = buffer;
+ l = buffer_length;
}
while (l >= 190) {
@@ -368,7 +368,7 @@ static void flexcop_usb_process_frame(struct flexcop_usb *fc_usb,
}
}
- if (l>0)
+ if (l > 0)
memcpy(fc_usb->tmp_buffer, b, l);
fc_usb->tmp_buffer_length = l;
}
@@ -399,7 +399,7 @@ static void flexcop_usb_urb_complete(struct urb *urb)
urb->iso_frame_desc[i].status = 0;
urb->iso_frame_desc[i].actual_length = 0;
}
- usb_submit_urb(urb,GFP_ATOMIC);
+ usb_submit_urb(urb, GFP_ATOMIC);
}
static int flexcop_usb_stream_control(struct flexcop_device *fc, int onoff)
@@ -413,7 +413,7 @@ static void flexcop_usb_transfer_exit(struct flexcop_usb *fc_usb)
int i;
for (i = 0; i < B2C2_USB_NUM_ISO_URB; i++)
if (fc_usb->iso_urb[i] != NULL) {
- deb_ts("unlinking/killing urb no. %d\n",i);
+ deb_ts("unlinking/killing urb no. %d\n", i);
usb_kill_urb(fc_usb->iso_urb[i]);
usb_free_urb(fc_usb->iso_urb[i]);
}
@@ -483,7 +483,7 @@ static int flexcop_usb_transfer_init(struct flexcop_usb *fc_usb)
err("submitting urb %d failed with %d.", i, ret);
goto urb_error;
}
- deb_ts("submitted urb no. %d.\n",i);
+ deb_ts("submitted urb no. %d.\n", i);
}
/* SRAM */
diff --git a/drivers/media/usb/b2c2/flexcop-usb.h b/drivers/media/usb/b2c2/flexcop-usb.h
index 2f230bf72252..c7cca1a5ee59 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.h
+++ b/drivers/media/usb/b2c2/flexcop-usb.h
@@ -91,13 +91,13 @@ typedef enum {
UTILITY_SRAM_TESTVERIFY = 0x16,
} flexcop_usb_utility_function_t;
-#define B2C2_WAIT_FOR_OPERATION_RW (1*HZ)
-#define B2C2_WAIT_FOR_OPERATION_RDW (3*HZ)
-#define B2C2_WAIT_FOR_OPERATION_WDW (1*HZ)
+#define B2C2_WAIT_FOR_OPERATION_RW 1000
+#define B2C2_WAIT_FOR_OPERATION_RDW 3000
+#define B2C2_WAIT_FOR_OPERATION_WDW 1000
-#define B2C2_WAIT_FOR_OPERATION_V8READ (3*HZ)
-#define B2C2_WAIT_FOR_OPERATION_V8WRITE (3*HZ)
-#define B2C2_WAIT_FOR_OPERATION_V8FLASH (3*HZ)
+#define B2C2_WAIT_FOR_OPERATION_V8READ 3000
+#define B2C2_WAIT_FOR_OPERATION_V8WRITE 3000
+#define B2C2_WAIT_FOR_OPERATION_V8FLASH 3000
typedef enum {
V8_MEMORY_PAGE_DVB_CI = 0x20,
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index 76aac06f9fb8..cba03b286473 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -550,7 +550,7 @@ static int write_packet(struct usb_device *udev,
0, /* index */
buf, /* buffer */
size,
- HZ);
+ 1000);
kfree(buf);
return ret;
@@ -582,7 +582,7 @@ static int read_packet(struct usb_device *udev,
0, /* index */
buf, /* buffer */
size,
- HZ);
+ 1000);
if (ret >= 0)
memcpy(registers, buf, size);
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index fe4d886442a4..8a34e6c0d6a6 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -423,6 +423,9 @@ static int lme2510_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff)
mutex_unlock(&d->i2c_mutex);
+ if (ret)
+ return -EREMOTEIO;
+
return 0;
}
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index 70219b3e8566..1caabb51ea47 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -583,7 +583,7 @@ out:
int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
struct dib0700_state *st = adap->dev->priv;
- int ret;
+ int ret, adapt_nr;
if ((onoff != 0) && (st->fw_version >= 0x10201)) {
/* for firmware later than 1.20.1,
@@ -610,26 +610,24 @@ int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
st->buf[3] = 0x00;
- deb_info("modifying (%d) streaming state for %d\n", onoff, adap->id);
-
- st->channel_state &= ~0x3;
if ((adap->fe_adap[0].stream.props.endpoint != 2)
- && (adap->fe_adap[0].stream.props.endpoint != 3)) {
- deb_info("the endpoint number (%i) is not correct, use the adapter id instead", adap->fe_adap[0].stream.props.endpoint);
- if (onoff)
- st->channel_state |= 1 << (adap->id);
- else
- st->channel_state |= 1 << ~(adap->id);
+ && (adap->fe_adap[0].stream.props.endpoint != 3)) {
+ deb_info("the endpoint number (%i) is not correct, use the adapter id instead\n",
+ adap->fe_adap[0].stream.props.endpoint);
+ adapt_nr = adap->id;
} else {
- if (onoff)
- st->channel_state |= 1 << (adap->fe_adap[0].stream.props.endpoint-2);
- else
- st->channel_state |= 1 << (3-adap->fe_adap[0].stream.props.endpoint);
+ adapt_nr = adap->fe_adap[0].stream.props.endpoint - 2;
}
+ if (onoff)
+ st->channel_state |= 1 << adapt_nr;
+ else
+ st->channel_state &= ~(1 << adapt_nr);
+
st->buf[2] |= st->channel_state;
- deb_info("data for streaming: %x %x\n", st->buf[1], st->buf[2]);
+ deb_info("adapter %d, streaming %s: %*ph\n",
+ adapt_nr, onoff ? "ON" : "OFF", 3, st->buf);
ret = dib0700_ctrl_wr(adap->dev, st->buf, 4);
mutex_unlock(&adap->dev->usb_mutex);
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index f0e686b05dc6..ca75ebdc10b3 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -2150,46 +2150,153 @@ static struct dvb_usb_device_properties s6x0_properties = {
}
};
-static const struct dvb_usb_device_description d1100 = {
- "Prof 1100 USB ",
- {&dw2102_table[PROF_1100], NULL},
- {NULL},
-};
+static struct dvb_usb_device_properties p1100_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .size_of_priv = sizeof(struct dw2102_state),
+ .firmware = P1100_FIRMWARE,
+ .no_reconnect = 1,
-static const struct dvb_usb_device_description d660 = {
- "TeVii S660 USB",
- {&dw2102_table[TEVII_S660], NULL},
- {NULL},
-};
+ .i2c_algo = &s6x0_i2c_algo,
+ .rc.core = {
+ .rc_interval = 150,
+ .rc_codes = RC_MAP_TBS_NEC,
+ .module_name = "dw2102",
+ .allowed_protos = RC_PROTO_BIT_NEC,
+ .rc_query = prof_rc_query,
+ },
-static const struct dvb_usb_device_description d480_1 = {
- "TeVii S480.1 USB",
- {&dw2102_table[TEVII_S480_1], NULL},
- {NULL},
+ .generic_bulk_ctrl_endpoint = 0x81,
+ .num_adapters = 1,
+ .download_firmware = dw2102_load_firmware,
+ .read_mac_address = s6x0_read_mac_address,
+ .adapter = {
+ {
+ .num_frontends = 1,
+ .fe = {{
+ .frontend_attach = stv0288_frontend_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 8,
+ .endpoint = 0x82,
+ .u = {
+ .bulk = {
+ .buffersize = 4096,
+ }
+ }
+ },
+ } },
+ }
+ },
+ .num_device_descs = 1,
+ .devices = {
+ {"Prof 1100 USB ",
+ {&dw2102_table[PROF_1100], NULL},
+ {NULL},
+ },
+ }
};
-static const struct dvb_usb_device_description d480_2 = {
- "TeVii S480.2 USB",
- {&dw2102_table[TEVII_S480_2], NULL},
- {NULL},
-};
+static struct dvb_usb_device_properties s660_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .size_of_priv = sizeof(struct dw2102_state),
+ .firmware = S660_FIRMWARE,
+ .no_reconnect = 1,
-static const struct dvb_usb_device_description d7500 = {
- "Prof 7500 USB DVB-S2",
- {&dw2102_table[PROF_7500], NULL},
- {NULL},
-};
+ .i2c_algo = &s6x0_i2c_algo,
+ .rc.core = {
+ .rc_interval = 150,
+ .rc_codes = RC_MAP_TEVII_NEC,
+ .module_name = "dw2102",
+ .allowed_protos = RC_PROTO_BIT_NEC,
+ .rc_query = dw2102_rc_query,
+ },
-static const struct dvb_usb_device_description d421 = {
- "TeVii S421 PCI",
- {&dw2102_table[TEVII_S421], NULL},
- {NULL},
+ .generic_bulk_ctrl_endpoint = 0x81,
+ .num_adapters = 1,
+ .download_firmware = dw2102_load_firmware,
+ .read_mac_address = s6x0_read_mac_address,
+ .adapter = {
+ {
+ .num_frontends = 1,
+ .fe = {{
+ .frontend_attach = ds3000_frontend_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 8,
+ .endpoint = 0x82,
+ .u = {
+ .bulk = {
+ .buffersize = 4096,
+ }
+ }
+ },
+ } },
+ }
+ },
+ .num_device_descs = 3,
+ .devices = {
+ {"TeVii S660 USB",
+ {&dw2102_table[TEVII_S660], NULL},
+ {NULL},
+ },
+ {"TeVii S480.1 USB",
+ {&dw2102_table[TEVII_S480_1], NULL},
+ {NULL},
+ },
+ {"TeVii S480.2 USB",
+ {&dw2102_table[TEVII_S480_2], NULL},
+ {NULL},
+ },
+ }
};
-static const struct dvb_usb_device_description d632 = {
- "TeVii S632 USB",
- {&dw2102_table[TEVII_S632], NULL},
- {NULL},
+static struct dvb_usb_device_properties p7500_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .size_of_priv = sizeof(struct dw2102_state),
+ .firmware = P7500_FIRMWARE,
+ .no_reconnect = 1,
+
+ .i2c_algo = &s6x0_i2c_algo,
+ .rc.core = {
+ .rc_interval = 150,
+ .rc_codes = RC_MAP_TBS_NEC,
+ .module_name = "dw2102",
+ .allowed_protos = RC_PROTO_BIT_NEC,
+ .rc_query = prof_rc_query,
+ },
+
+ .generic_bulk_ctrl_endpoint = 0x81,
+ .num_adapters = 1,
+ .download_firmware = dw2102_load_firmware,
+ .read_mac_address = s6x0_read_mac_address,
+ .adapter = {
+ {
+ .num_frontends = 1,
+ .fe = {{
+ .frontend_attach = prof_7500_frontend_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 8,
+ .endpoint = 0x82,
+ .u = {
+ .bulk = {
+ .buffersize = 4096,
+ }
+ }
+ },
+ } },
+ }
+ },
+ .num_device_descs = 1,
+ .devices = {
+ {"Prof 7500 USB DVB-S2",
+ {&dw2102_table[PROF_7500], NULL},
+ {NULL},
+ },
+ }
};
static struct dvb_usb_device_properties su3000_properties = {
@@ -2273,6 +2380,59 @@ static struct dvb_usb_device_properties su3000_properties = {
}
};
+static struct dvb_usb_device_properties s421_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .size_of_priv = sizeof(struct dw2102_state),
+ .power_ctrl = su3000_power_ctrl,
+ .num_adapters = 1,
+ .identify_state = su3000_identify_state,
+ .i2c_algo = &su3000_i2c_algo,
+
+ .rc.core = {
+ .rc_interval = 150,
+ .rc_codes = RC_MAP_SU3000,
+ .module_name = "dw2102",
+ .allowed_protos = RC_PROTO_BIT_RC5,
+ .rc_query = su3000_rc_query,
+ },
+
+ .read_mac_address = su3000_read_mac_address,
+
+ .generic_bulk_ctrl_endpoint = 0x01,
+
+ .adapter = {
+ {
+ .num_frontends = 1,
+ .fe = {{
+ .streaming_ctrl = su3000_streaming_ctrl,
+ .frontend_attach = m88rs2000_frontend_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 8,
+ .endpoint = 0x82,
+ .u = {
+ .bulk = {
+ .buffersize = 4096,
+ }
+ }
+ }
+ } },
+ }
+ },
+ .num_device_descs = 2,
+ .devices = {
+ { "TeVii S421 PCI",
+ { &dw2102_table[TEVII_S421], NULL },
+ { NULL },
+ },
+ { "TeVii S632 USB",
+ { &dw2102_table[TEVII_S632], NULL },
+ { NULL },
+ },
+ }
+};
+
static struct dvb_usb_device_properties t220_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
@@ -2390,101 +2550,33 @@ static struct dvb_usb_device_properties tt_s2_4600_properties = {
static int dw2102_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- int retval = -ENOMEM;
- struct dvb_usb_device_properties *p1100;
- struct dvb_usb_device_properties *s660;
- struct dvb_usb_device_properties *p7500;
- struct dvb_usb_device_properties *s421;
-
- p1100 = kmemdup(&s6x0_properties,
- sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
- if (!p1100)
- goto err0;
-
- /* copy default structure */
- /* fill only different fields */
- p1100->firmware = P1100_FIRMWARE;
- p1100->devices[0] = d1100;
- p1100->rc.core.rc_query = prof_rc_query;
- p1100->rc.core.rc_codes = RC_MAP_TBS_NEC;
- p1100->adapter->fe[0].frontend_attach = stv0288_frontend_attach;
-
- s660 = kmemdup(&s6x0_properties,
- sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
- if (!s660)
- goto err1;
-
- s660->firmware = S660_FIRMWARE;
- s660->num_device_descs = 3;
- s660->devices[0] = d660;
- s660->devices[1] = d480_1;
- s660->devices[2] = d480_2;
- s660->adapter->fe[0].frontend_attach = ds3000_frontend_attach;
-
- p7500 = kmemdup(&s6x0_properties,
- sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
- if (!p7500)
- goto err2;
-
- p7500->firmware = P7500_FIRMWARE;
- p7500->devices[0] = d7500;
- p7500->rc.core.rc_query = prof_rc_query;
- p7500->rc.core.rc_codes = RC_MAP_TBS_NEC;
- p7500->adapter->fe[0].frontend_attach = prof_7500_frontend_attach;
-
-
- s421 = kmemdup(&su3000_properties,
- sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
- if (!s421)
- goto err3;
-
- s421->num_device_descs = 2;
- s421->devices[0] = d421;
- s421->devices[1] = d632;
- s421->adapter->fe[0].frontend_attach = m88rs2000_frontend_attach;
-
- if (0 == dvb_usb_device_init(intf, &dw2102_properties,
- THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf, &dw2104_properties,
- THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf, &dw3101_properties,
- THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf, &s6x0_properties,
- THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf, p1100,
- THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf, s660,
- THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf, p7500,
- THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf, s421,
- THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf, &su3000_properties,
- THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf, &t220_properties,
- THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf, &tt_s2_4600_properties,
- THIS_MODULE, NULL, adapter_nr)) {
-
- /* clean up copied properties */
- kfree(s421);
- kfree(p7500);
- kfree(s660);
- kfree(p1100);
+ if (!(dvb_usb_device_init(intf, &dw2102_properties,
+ THIS_MODULE, NULL, adapter_nr) &&
+ dvb_usb_device_init(intf, &dw2104_properties,
+ THIS_MODULE, NULL, adapter_nr) &&
+ dvb_usb_device_init(intf, &dw3101_properties,
+ THIS_MODULE, NULL, adapter_nr) &&
+ dvb_usb_device_init(intf, &s6x0_properties,
+ THIS_MODULE, NULL, adapter_nr) &&
+ dvb_usb_device_init(intf, &p1100_properties,
+ THIS_MODULE, NULL, adapter_nr) &&
+ dvb_usb_device_init(intf, &s660_properties,
+ THIS_MODULE, NULL, adapter_nr) &&
+ dvb_usb_device_init(intf, &p7500_properties,
+ THIS_MODULE, NULL, adapter_nr) &&
+ dvb_usb_device_init(intf, &s421_properties,
+ THIS_MODULE, NULL, adapter_nr) &&
+ dvb_usb_device_init(intf, &su3000_properties,
+ THIS_MODULE, NULL, adapter_nr) &&
+ dvb_usb_device_init(intf, &t220_properties,
+ THIS_MODULE, NULL, adapter_nr) &&
+ dvb_usb_device_init(intf, &tt_s2_4600_properties,
+ THIS_MODULE, NULL, adapter_nr))) {
return 0;
}
- retval = -ENODEV;
- kfree(s421);
-err3:
- kfree(p7500);
-err2:
- kfree(s660);
-err1:
- kfree(p1100);
-err0:
- return retval;
+ return -ENODEV;
}
static void dw2102_disconnect(struct usb_interface *intf)
diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c
index 4bb5b82599a7..691e05833db1 100644
--- a/drivers/media/usb/dvb-usb/m920x.c
+++ b/drivers/media/usb/dvb-usb/m920x.c
@@ -274,6 +274,13 @@ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int nu
/* Should check for ack here, if we knew how. */
}
if (msg[i].flags & I2C_M_RD) {
+ char *read = kmalloc(1, GFP_KERNEL);
+ if (!read) {
+ ret = -ENOMEM;
+ kfree(read);
+ goto unlock;
+ }
+
for (j = 0; j < msg[i].len; j++) {
/* Last byte of transaction?
* Send STOP, otherwise send ACK. */
@@ -281,9 +288,12 @@ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int nu
if ((ret = m920x_read(d->udev, M9206_I2C, 0x0,
0x20 | stop,
- &msg[i].buf[j], 1)) != 0)
+ read, 1)) != 0)
goto unlock;
+ msg[i].buf[j] = read[0];
}
+
+ kfree(read);
} else {
for (j = 0; j < msg[i].len; j++) {
/* Last byte of transaction? Then send STOP. */
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index b207f34af5c6..b451ce3cb169 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -3630,8 +3630,10 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
if (dev->is_audio_only) {
retval = em28xx_audio_setup(dev);
- if (retval)
- return -ENODEV;
+ if (retval) {
+ retval = -ENODEV;
+ goto err_deinit_media;
+ }
em28xx_init_extension(dev);
return 0;
@@ -3650,7 +3652,7 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
dev_err(&dev->intf->dev,
"%s: em28xx_i2c_register bus 0 - error [%d]!\n",
__func__, retval);
- return retval;
+ goto err_deinit_media;
}
/* register i2c bus 1 */
@@ -3666,9 +3668,7 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
"%s: em28xx_i2c_register bus 1 - error [%d]!\n",
__func__, retval);
- em28xx_i2c_unregister(dev, 0);
-
- return retval;
+ goto err_unreg_i2c;
}
}
@@ -3676,6 +3676,12 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
em28xx_card_setup(dev);
return 0;
+
+err_unreg_i2c:
+ em28xx_i2c_unregister(dev, 0);
+err_deinit_media:
+ em28xx_unregister_media_device(dev);
+ return retval;
}
static int em28xx_duplicate_dev(struct em28xx *dev)
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index acc0bf7dbe2b..c837cc528a33 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -89,7 +89,7 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
mutex_lock(&dev->ctrl_urb_lock);
ret = usb_control_msg(udev, pipe, req,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0x0000, reg, dev->urb_buf, len, HZ);
+ 0x0000, reg, dev->urb_buf, len, 1000);
if (ret < 0) {
em28xx_regdbg("(pipe 0x%08x): IN: %02x %02x %02x %02x %02x %02x %02x %02x failed with error %i\n",
pipe,
@@ -158,7 +158,7 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
memcpy(dev->urb_buf, buf, len);
ret = usb_control_msg(udev, pipe, req,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0x0000, reg, dev->urb_buf, len, HZ);
+ 0x0000, reg, dev->urb_buf, len, 1000);
mutex_unlock(&dev->ctrl_urb_lock);
if (ret < 0) {
diff --git a/drivers/media/usb/go7007/go7007-driver.c b/drivers/media/usb/go7007/go7007-driver.c
index 6650eab913d8..0c24e2984304 100644
--- a/drivers/media/usb/go7007/go7007-driver.c
+++ b/drivers/media/usb/go7007/go7007-driver.c
@@ -516,7 +516,7 @@ void go7007_parse_video_stream(struct go7007 *go, u8 *buf, int length)
if (vb && vb->vb.vb2_buf.planes[0].bytesused >=
GO7007_BUF_SIZE - 3) {
v4l2_info(&go->v4l2_dev, "dropping oversized frame\n");
- vb->vb.vb2_buf.planes[0].bytesused = 0;
+ vb2_set_plane_payload(&vb->vb.vb2_buf, 0, 0);
vb->frame_offset = 0;
vb->modet_active = 0;
vb = go->active_buf = NULL;
diff --git a/drivers/media/usb/gspca/m5602/m5602_s5k83a.c b/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
index bc4008d5d116..8ef010a87445 100644
--- a/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
+++ b/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
@@ -339,14 +339,13 @@ int s5k83a_start(struct sd *sd)
/* Create another thread, polling the GPIO ports of the camera to check
if it got rotated. This is how the windows driver does it so we have
to assume that there is no better way of accomplishing this */
- sd->rotation_thread = kthread_create(rotation_thread_function,
- sd, "rotation thread");
+ sd->rotation_thread = kthread_run(rotation_thread_function,
+ sd, "rotation thread");
if (IS_ERR(sd->rotation_thread)) {
err = PTR_ERR(sd->rotation_thread);
sd->rotation_thread = NULL;
return err;
}
- wake_up_process(sd->rotation_thread);
/* Preinit the sensor */
for (i = 0; i < ARRAY_SIZE(start_s5k83a) && !err; i++) {
@@ -408,25 +407,21 @@ static int s5k83a_set_gain(struct gspca_dev *gspca_dev, __s32 val)
static int s5k83a_set_brightness(struct gspca_dev *gspca_dev, __s32 val)
{
- int err;
u8 data[1];
struct sd *sd = (struct sd *) gspca_dev;
data[0] = val;
- err = m5602_write_sensor(sd, S5K83A_BRIGHTNESS, data, 1);
- return err;
+ return m5602_write_sensor(sd, S5K83A_BRIGHTNESS, data, 1);
}
static int s5k83a_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
{
- int err;
u8 data[2];
struct sd *sd = (struct sd *) gspca_dev;
data[0] = 0;
data[1] = val;
- err = m5602_write_sensor(sd, S5K83A_EXPOSURE, data, 2);
- return err;
+ return m5602_write_sensor(sd, S5K83A_EXPOSURE, data, 2);
}
static int s5k83a_set_flip_real(struct gspca_dev *gspca_dev,
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-encoder.c b/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
index f6005d1296ef..c8102772344b 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
@@ -188,8 +188,8 @@ static int pvr2_encoder_cmd(void *ctxt,
}
- LOCK_TAKE(hdw->ctl_lock); while (1) {
-
+ LOCK_TAKE(hdw->ctl_lock);
+ while (1) {
if (!hdw->state_encoder_ok) {
ret = -EIO;
break;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index d38dee1792e4..cd7b118d5929 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -1467,7 +1467,7 @@ static int pvr2_upload_firmware1(struct pvr2_hdw *hdw)
for (address = 0; address < fwsize; address += 0x800) {
memcpy(fw_ptr, fw_entry->data + address, 0x800);
ret += usb_control_msg(hdw->usb_dev, pipe, 0xa0, 0x40, address,
- 0, fw_ptr, 0x800, HZ);
+ 0, fw_ptr, 0x800, 1000);
}
trace_firmware("Upload done, releasing device's CPU");
@@ -1605,7 +1605,7 @@ int pvr2_upload_firmware2(struct pvr2_hdw *hdw)
((u32 *)fw_ptr)[icnt] = swab32(((u32 *)fw_ptr)[icnt]);
ret |= usb_bulk_msg(hdw->usb_dev, pipe, fw_ptr,bcnt,
- &actual_length, HZ);
+ &actual_length, 1000);
ret |= (actual_length != bcnt);
if (ret) break;
fw_done += bcnt;
@@ -1718,16 +1718,16 @@ int pvr2_hdw_get_streaming(struct pvr2_hdw *hdw)
int pvr2_hdw_set_streaming(struct pvr2_hdw *hdw,int enable_flag)
{
int ret,st;
- LOCK_TAKE(hdw->big_lock); do {
- pvr2_hdw_untrip_unlocked(hdw);
- if ((!enable_flag) != !(hdw->state_pipeline_req)) {
- hdw->state_pipeline_req = enable_flag != 0;
- pvr2_trace(PVR2_TRACE_START_STOP,
- "/*--TRACE_STREAM--*/ %s",
- enable_flag ? "enable" : "disable");
- }
- pvr2_hdw_state_sched(hdw);
- } while (0); LOCK_GIVE(hdw->big_lock);
+ LOCK_TAKE(hdw->big_lock);
+ pvr2_hdw_untrip_unlocked(hdw);
+ if (!enable_flag != !hdw->state_pipeline_req) {
+ hdw->state_pipeline_req = enable_flag != 0;
+ pvr2_trace(PVR2_TRACE_START_STOP,
+ "/*--TRACE_STREAM--*/ %s",
+ enable_flag ? "enable" : "disable");
+ }
+ pvr2_hdw_state_sched(hdw);
+ LOCK_GIVE(hdw->big_lock);
if ((ret = pvr2_hdw_wait(hdw,0)) < 0) return ret;
if (enable_flag) {
while ((st = hdw->master_state) != PVR2_STATE_RUN) {
@@ -3394,7 +3394,8 @@ void pvr2_hdw_cpufw_set_enabled(struct pvr2_hdw *hdw,
int ret;
u16 address;
unsigned int pipe;
- LOCK_TAKE(hdw->big_lock); do {
+ LOCK_TAKE(hdw->big_lock);
+ do {
if ((hdw->fw_buffer == NULL) == !enable_flag) break;
if (!enable_flag) {
@@ -3438,7 +3439,7 @@ void pvr2_hdw_cpufw_set_enabled(struct pvr2_hdw *hdw,
0xa0,0xc0,
address,0,
hdw->fw_buffer+address,
- 0x800,HZ);
+ 0x800,1000);
if (ret < 0) break;
}
@@ -3457,8 +3458,8 @@ void pvr2_hdw_cpufw_set_enabled(struct pvr2_hdw *hdw,
pvr2_trace(PVR2_TRACE_FIRMWARE,
"Done sucking down EEPROM contents");
}
-
- } while (0); LOCK_GIVE(hdw->big_lock);
+ } while (0);
+ LOCK_GIVE(hdw->big_lock);
}
@@ -3473,7 +3474,8 @@ int pvr2_hdw_cpufw_get(struct pvr2_hdw *hdw,unsigned int offs,
char *buf,unsigned int cnt)
{
int ret = -EINVAL;
- LOCK_TAKE(hdw->big_lock); do {
+ LOCK_TAKE(hdw->big_lock);
+ do {
if (!buf) break;
if (!cnt) break;
@@ -3498,7 +3500,8 @@ int pvr2_hdw_cpufw_get(struct pvr2_hdw *hdw,unsigned int offs,
"Read firmware data offs=%d cnt=%d",
offs,cnt);
ret = cnt;
- } while (0); LOCK_GIVE(hdw->big_lock);
+ } while (0);
+ LOCK_GIVE(hdw->big_lock);
return ret;
}
@@ -3977,7 +3980,7 @@ void pvr2_hdw_cpureset_assert(struct pvr2_hdw *hdw,int val)
/* Write the CPUCS register on the 8051. The lsb of the register
is the reset bit; a 1 asserts reset while a 0 clears it. */
pipe = usb_sndctrlpipe(hdw->usb_dev, 0);
- ret = usb_control_msg(hdw->usb_dev,pipe,0xa0,0x40,0xe600,0,da,1,HZ);
+ ret = usb_control_msg(hdw->usb_dev,pipe,0xa0,0x40,0xe600,0,da,1,1000);
if (ret < 0) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"cpureset_assert(%d) error=%d",val,ret);
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index 3b0e4ed75d99..acf18e2251a5 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -1882,7 +1882,7 @@ static long s2255_vendor_req(struct s2255_dev *dev, unsigned char Request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE |
USB_DIR_IN,
Value, Index, buf,
- TransferBufferLength, HZ * 5);
+ TransferBufferLength, USB_CTRL_SET_TIMEOUT);
if (r >= 0)
memcpy(TransferBuffer, buf, TransferBufferLength);
@@ -1891,7 +1891,7 @@ static long s2255_vendor_req(struct s2255_dev *dev, unsigned char Request,
r = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
Request, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
Value, Index, buf,
- TransferBufferLength, HZ * 5);
+ TransferBufferLength, USB_CTRL_SET_TIMEOUT);
}
kfree(buf);
return r;
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index df4c5dcba39c..fe9c7b3a950e 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -661,10 +661,6 @@ static const struct usb_device_id smsusb_id_table[] = {
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
{ USB_DEVICE(0x2040, 0x5590),
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
- { USB_DEVICE(0x187f, 0x0202),
- .driver_info = SMS1XXX_BOARD_SIANO_NICE },
- { USB_DEVICE(0x187f, 0x0301),
- .driver_info = SMS1XXX_BOARD_SIANO_VENICE },
{ USB_DEVICE(0x2040, 0xb900),
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
{ USB_DEVICE(0x2040, 0xb910),
diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c
index b4f8bc5db138..4e1698f78818 100644
--- a/drivers/media/usb/stk1160/stk1160-core.c
+++ b/drivers/media/usb/stk1160/stk1160-core.c
@@ -65,7 +65,7 @@ int stk1160_read_reg(struct stk1160 *dev, u16 reg, u8 *value)
return -ENOMEM;
ret = usb_control_msg(dev->udev, pipe, 0x00,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0x00, reg, buf, sizeof(u8), HZ);
+ 0x00, reg, buf, sizeof(u8), 1000);
if (ret < 0) {
stk1160_err("read failed on reg 0x%x (%d)\n",
reg, ret);
@@ -85,7 +85,7 @@ int stk1160_write_reg(struct stk1160 *dev, u16 reg, u16 value)
ret = usb_control_msg(dev->udev, pipe, 0x01,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value, reg, NULL, 0, HZ);
+ value, reg, NULL, 0, 1000);
if (ret < 0) {
stk1160_err("write failed on reg 0x%x (%d)\n",
reg, ret);
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 30bfe9069a1f..b4f6edf968bc 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -1638,8 +1638,8 @@ static int uvc_ctrl_find_ctrl_idx(struct uvc_entity *entity,
struct v4l2_ext_controls *ctrls,
struct uvc_control *uvc_control)
{
- struct uvc_control_mapping *mapping;
- struct uvc_control *ctrl_found;
+ struct uvc_control_mapping *mapping = NULL;
+ struct uvc_control *ctrl_found = NULL;
unsigned int i;
if (!entity)
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 7c007426e082..5f394d4efc21 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -256,7 +256,7 @@ static struct uvc_format_desc *uvc_format_by_guid(const u8 guid[16])
static enum v4l2_colorspace uvc_colorspace(const u8 primaries)
{
static const enum v4l2_colorspace colorprimaries[] = {
- V4L2_COLORSPACE_DEFAULT, /* Unspecified */
+ V4L2_COLORSPACE_SRGB, /* Unspecified */
V4L2_COLORSPACE_SRGB,
V4L2_COLORSPACE_470_SYSTEM_M,
V4L2_COLORSPACE_470_SYSTEM_BG,
@@ -267,7 +267,7 @@ static enum v4l2_colorspace uvc_colorspace(const u8 primaries)
if (primaries < ARRAY_SIZE(colorprimaries))
return colorprimaries[primaries];
- return V4L2_COLORSPACE_DEFAULT; /* Reserved */
+ return V4L2_COLORSPACE_SRGB; /* Reserved */
}
static enum v4l2_xfer_func uvc_xfer_func(const u8 transfer_characteristics)
@@ -769,6 +769,8 @@ static int uvc_parse_format(struct uvc_device *dev,
buflen -= buffer[0];
buffer += buffer[0];
+ } else {
+ format->colorspace = V4L2_COLORSPACE_SRGB;
}
return buffer - start;
@@ -2193,7 +2195,6 @@ int uvc_register_video_device(struct uvc_device *dev,
const struct v4l2_file_operations *fops,
const struct v4l2_ioctl_ops *ioctl_ops)
{
- const char *name;
int ret;
/* Initialize the video buffers queue. */
@@ -2222,20 +2223,16 @@ int uvc_register_video_device(struct uvc_device *dev,
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
default:
vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- name = "Video Capture";
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
vdev->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
- name = "Video Output";
break;
case V4L2_BUF_TYPE_META_CAPTURE:
vdev->device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING;
- name = "Metadata";
break;
}
- snprintf(vdev->name, sizeof(vdev->name), "%s %u", name,
- stream->header.bTerminalLink);
+ strscpy(vdev->name, dev->name, sizeof(vdev->name));
/*
* Set the driver data before calling video_register_device, otherwise
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index f4e4aff8ddf7..711556d13d03 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -44,8 +44,10 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
if (v4l2_ctrl_get_name(map->id) == NULL) {
map->name = kmemdup(xmap->name, sizeof(xmap->name),
GFP_KERNEL);
- if (!map->name)
- return -ENOMEM;
+ if (!map->name) {
+ ret = -ENOMEM;
+ goto free_map;
+ }
}
memcpy(map->entity, xmap->entity, sizeof(map->entity));
map->selector = xmap->selector;
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 9f37eaf28ce7..1b4cc934109e 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1963,6 +1963,10 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream,
if (ep == NULL)
return -EIO;
+ /* Reject broken descriptors. */
+ if (usb_endpoint_maxp(&ep->desc) == 0)
+ return -EIO;
+
ret = uvc_init_video_bulk(stream, ep, gfp_flags);
}
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 2e5366143b81..143230b3275b 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -189,7 +189,7 @@
/* Maximum status buffer size in bytes of interrupt URB. */
#define UVC_MAX_STATUS_SIZE 16
-#define UVC_CTRL_CONTROL_TIMEOUT 500
+#define UVC_CTRL_CONTROL_TIMEOUT 5000
#define UVC_CTRL_STREAMING_TIMEOUT 5000
/* Maximum allowed number of control mappings per device */
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 02dc1787e953..6ee75c6c820e 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -52,6 +52,10 @@ config V4L2_JPEG_HELPER
config V4L2_H264
tristate
+# Used by drivers that need v4l2-vp9.ko
+config V4L2_VP9
+ tristate
+
# Used by drivers that need v4l2-mem2mem.ko
config V4L2_MEM2MEM_DEV
tristate
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index 66a78c556c98..83fac5c746f5 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_VIDEO_TUNER) += tuner.o
obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o
obj-$(CONFIG_V4L2_H264) += v4l2-h264.o
+obj-$(CONFIG_V4L2_VP9) += v4l2-vp9.o
obj-$(CONFIG_V4L2_FLASH_LED_CLASS) += v4l2-flash-led-class.o
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
index 70adfc1b9c81..54abe5245dcc 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
@@ -283,6 +283,12 @@ static void std_log(const struct v4l2_ctrl *ctrl)
case V4L2_CTRL_TYPE_MPEG2_PICTURE:
pr_cont("MPEG2_PICTURE");
break;
+ case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR:
+ pr_cont("VP9_COMPRESSED_HDR");
+ break;
+ case V4L2_CTRL_TYPE_VP9_FRAME:
+ pr_cont("VP9_FRAME");
+ break;
default:
pr_cont("unknown type %d", ctrl->type);
break;
@@ -317,6 +323,168 @@ static void std_log(const struct v4l2_ctrl *ctrl)
#define zero_reserved(s) \
memset(&(s).reserved, 0, sizeof((s).reserved))
+static int
+validate_vp9_lf_params(struct v4l2_vp9_loop_filter *lf)
+{
+ unsigned int i;
+
+ if (lf->flags & ~(V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED |
+ V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE))
+ return -EINVAL;
+
+ /* That all values are in the accepted range. */
+ if (lf->level > GENMASK(5, 0))
+ return -EINVAL;
+
+ if (lf->sharpness > GENMASK(2, 0))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(lf->ref_deltas); i++)
+ if (lf->ref_deltas[i] < -63 || lf->ref_deltas[i] > 63)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(lf->mode_deltas); i++)
+ if (lf->mode_deltas[i] < -63 || lf->mode_deltas[i] > 63)
+ return -EINVAL;
+
+ zero_reserved(*lf);
+ return 0;
+}
+
+static int
+validate_vp9_quant_params(struct v4l2_vp9_quantization *quant)
+{
+ if (quant->delta_q_y_dc < -15 || quant->delta_q_y_dc > 15 ||
+ quant->delta_q_uv_dc < -15 || quant->delta_q_uv_dc > 15 ||
+ quant->delta_q_uv_ac < -15 || quant->delta_q_uv_ac > 15)
+ return -EINVAL;
+
+ zero_reserved(*quant);
+ return 0;
+}
+
+static int
+validate_vp9_seg_params(struct v4l2_vp9_segmentation *seg)
+{
+ unsigned int i, j;
+
+ if (seg->flags & ~(V4L2_VP9_SEGMENTATION_FLAG_ENABLED |
+ V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP |
+ V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE |
+ V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA |
+ V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(seg->feature_enabled); i++) {
+ if (seg->feature_enabled[i] &
+ ~V4L2_VP9_SEGMENT_FEATURE_ENABLED_MASK)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(seg->feature_data); i++) {
+ const int range[] = { 255, 63, 3, 0 };
+
+ for (j = 0; j < ARRAY_SIZE(seg->feature_data[j]); j++) {
+ if (seg->feature_data[i][j] < -range[j] ||
+ seg->feature_data[i][j] > range[j])
+ return -EINVAL;
+ }
+ }
+
+ zero_reserved(*seg);
+ return 0;
+}
+
+static int
+validate_vp9_compressed_hdr(struct v4l2_ctrl_vp9_compressed_hdr *hdr)
+{
+ if (hdr->tx_mode > V4L2_VP9_TX_MODE_SELECT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+validate_vp9_frame(struct v4l2_ctrl_vp9_frame *frame)
+{
+ int ret;
+
+ /* Make sure we're not passed invalid flags. */
+ if (frame->flags & ~(V4L2_VP9_FRAME_FLAG_KEY_FRAME |
+ V4L2_VP9_FRAME_FLAG_SHOW_FRAME |
+ V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT |
+ V4L2_VP9_FRAME_FLAG_INTRA_ONLY |
+ V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV |
+ V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX |
+ V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE |
+ V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING |
+ V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING |
+ V4L2_VP9_FRAME_FLAG_COLOR_RANGE_FULL_SWING))
+ return -EINVAL;
+
+ if (frame->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT &&
+ frame->flags & V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX)
+ return -EINVAL;
+
+ if (frame->profile > V4L2_VP9_PROFILE_MAX)
+ return -EINVAL;
+
+ if (frame->reset_frame_context > V4L2_VP9_RESET_FRAME_CTX_ALL)
+ return -EINVAL;
+
+ if (frame->frame_context_idx >= V4L2_VP9_NUM_FRAME_CTX)
+ return -EINVAL;
+
+ /*
+ * Profiles 0 and 1 only support 8-bit depth, profiles 2 and 3 only 10
+ * and 12 bit depths.
+ */
+ if ((frame->profile < 2 && frame->bit_depth != 8) ||
+ (frame->profile >= 2 &&
+ (frame->bit_depth != 10 && frame->bit_depth != 12)))
+ return -EINVAL;
+
+ /* Profile 0 and 2 only accept YUV 4:2:0. */
+ if ((frame->profile == 0 || frame->profile == 2) &&
+ (!(frame->flags & V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING) ||
+ !(frame->flags & V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING)))
+ return -EINVAL;
+
+ /* Profile 1 and 3 only accept YUV 4:2:2, 4:4:0 and 4:4:4. */
+ if ((frame->profile == 1 || frame->profile == 3) &&
+ ((frame->flags & V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING) &&
+ (frame->flags & V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING)))
+ return -EINVAL;
+
+ if (frame->interpolation_filter > V4L2_VP9_INTERP_FILTER_SWITCHABLE)
+ return -EINVAL;
+
+ /*
+ * According to the spec, tile_cols_log2 shall be less than or equal
+ * to 6.
+ */
+ if (frame->tile_cols_log2 > 6)
+ return -EINVAL;
+
+ if (frame->reference_mode > V4L2_VP9_REFERENCE_MODE_SELECT)
+ return -EINVAL;
+
+ ret = validate_vp9_lf_params(&frame->lf);
+ if (ret)
+ return ret;
+
+ ret = validate_vp9_quant_params(&frame->quant);
+ if (ret)
+ return ret;
+
+ ret = validate_vp9_seg_params(&frame->seg);
+ if (ret)
+ return ret;
+
+ zero_reserved(*frame);
+ return 0;
+}
+
/*
* Compound controls validation requires setting unused fields/flags to zero
* in order to properly detect unchanged controls with std_equal's memcmp.
@@ -690,6 +858,12 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX:
break;
+ case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR:
+ return validate_vp9_compressed_hdr(p);
+
+ case V4L2_CTRL_TYPE_VP9_FRAME:
+ return validate_vp9_frame(p);
+
case V4L2_CTRL_TYPE_AREA:
area = p;
if (!area->width || !area->height)
@@ -1255,6 +1429,12 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY:
elem_size = sizeof(struct v4l2_ctrl_hdr10_mastering_display);
break;
+ case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR:
+ elem_size = sizeof(struct v4l2_ctrl_vp9_compressed_hdr);
+ break;
+ case V4L2_CTRL_TYPE_VP9_FRAME:
+ elem_size = sizeof(struct v4l2_ctrl_vp9_frame);
+ break;
case V4L2_CTRL_TYPE_AREA:
elem_size = sizeof(struct v4l2_area);
break;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-defs.c b/drivers/media/v4l2-core/v4l2-ctrls-defs.c
index ebe82b6ba6e6..54ca4e6b820b 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-defs.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-defs.c
@@ -785,6 +785,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Min Number of Output Buffers";
case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component";
case V4L2_CID_COLORFX_CBCR: return "Color Effects, CbCr";
+ case V4L2_CID_COLORFX_RGB: return "Color Effects, RGB";
/*
* Codec controls
@@ -1177,6 +1178,8 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_STATELESS_MPEG2_SEQUENCE: return "MPEG-2 Sequence Header";
case V4L2_CID_STATELESS_MPEG2_PICTURE: return "MPEG-2 Picture Header";
case V4L2_CID_STATELESS_MPEG2_QUANTISATION: return "MPEG-2 Quantisation Matrices";
+ case V4L2_CID_STATELESS_VP9_COMPRESSED_HDR: return "VP9 Probabilities Updates";
+ case V4L2_CID_STATELESS_VP9_FRAME: return "VP9 Frame Decode Parameters";
/* Colorimetry controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -1394,11 +1397,18 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
*min = *max = *step = *def = 0;
break;
case V4L2_CID_BG_COLOR:
+ case V4L2_CID_COLORFX_RGB:
*type = V4L2_CTRL_TYPE_INTEGER;
*step = 1;
*min = 0;
- /* Max is calculated as RGB888 that is 2^24 */
- *max = 0xFFFFFF;
+ /* Max is calculated as RGB888 that is 2^24 - 1 */
+ *max = 0xffffff;
+ break;
+ case V4L2_CID_COLORFX_CBCR:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ *step = 1;
+ *min = 0;
+ *max = 0xffff;
break;
case V4L2_CID_FLASH_FAULT:
case V4L2_CID_JPEG_ACTIVE_MARKER:
@@ -1498,6 +1508,12 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS:
*type = V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS;
break;
+ case V4L2_CID_STATELESS_VP9_COMPRESSED_HDR:
+ *type = V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR;
+ break;
+ case V4L2_CID_STATELESS_VP9_FRAME:
+ *type = V4L2_CTRL_TYPE_VP9_FRAME;
+ break;
case V4L2_CID_UNIT_CELL_SIZE:
*type = V4L2_CTRL_TYPE_AREA;
*flags |= V4L2_CTRL_FLAG_READ_ONLY;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 31d0109ce5a8..9ac557b8e146 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1413,6 +1413,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_VP8: descr = "VP8"; break;
case V4L2_PIX_FMT_VP8_FRAME: descr = "VP8 Frame"; break;
case V4L2_PIX_FMT_VP9: descr = "VP9"; break;
+ case V4L2_PIX_FMT_VP9_FRAME: descr = "VP9 Frame"; break;
case V4L2_PIX_FMT_HEVC: descr = "HEVC"; break; /* aka H.265 */
case V4L2_PIX_FMT_HEVC_SLICE: descr = "HEVC Parsed Slice Data"; break;
case V4L2_PIX_FMT_FWHT: descr = "FWHT"; break; /* used in vicodec */
@@ -2090,6 +2091,7 @@ static int v4l_prepare_buf(const struct v4l2_ioctl_ops *ops,
static int v4l_g_parm(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
+ struct video_device *vfd = video_devdata(file);
struct v4l2_streamparm *p = arg;
v4l2_std_id std;
int ret = check_fmt(file, p->type);
@@ -2101,7 +2103,8 @@ static int v4l_g_parm(const struct v4l2_ioctl_ops *ops,
if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
return -EINVAL;
- p->parm.capture.readbuffers = 2;
+ if (vfd->device_caps & V4L2_CAP_READWRITE)
+ p->parm.capture.readbuffers = 2;
ret = ops->vidioc_g_std(file, fh, &std);
if (ret == 0)
v4l2_video_std_frame_period(std, &p->parm.capture.timeperframe);
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index e7f4bf5bc8dd..e2654b422334 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -966,6 +966,27 @@ int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL(v4l2_m2m_mmap);
+#ifndef CONFIG_MMU
+unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ struct v4l2_fh *fh = file->private_data;
+ unsigned long offset = pgoff << PAGE_SHIFT;
+ struct vb2_queue *vq;
+
+ if (offset < DST_QUEUE_OFF_BASE) {
+ vq = v4l2_m2m_get_src_vq(fh->m2m_ctx);
+ } else {
+ vq = v4l2_m2m_get_dst_vq(fh->m2m_ctx);
+ pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
+ }
+
+ return vb2_get_unmapped_area(vq, addr, len, pgoff, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_get_unmapped_area);
+#endif
+
#if defined(CONFIG_MEDIA_CONTROLLER)
void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
{
diff --git a/drivers/media/v4l2-core/v4l2-vp9.c b/drivers/media/v4l2-core/v4l2-vp9.c
new file mode 100644
index 000000000000..859589f1fd35
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-vp9.c
@@ -0,0 +1,1850 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 VP9 helpers.
+ *
+ * Copyright (C) 2021 Collabora, Ltd.
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@collabora.com>
+ */
+
+#include <linux/module.h>
+
+#include <media/v4l2-vp9.h>
+
+const u8 v4l2_vp9_kf_y_mode_prob[10][10][9] = {
+ {
+ /* above = dc */
+ { 137, 30, 42, 148, 151, 207, 70, 52, 91 }, /*left = dc */
+ { 92, 45, 102, 136, 116, 180, 74, 90, 100 }, /*left = v */
+ { 73, 32, 19, 187, 222, 215, 46, 34, 100 }, /*left = h */
+ { 91, 30, 32, 116, 121, 186, 93, 86, 94 }, /*left = d45 */
+ { 72, 35, 36, 149, 68, 206, 68, 63, 105 }, /*left = d135*/
+ { 73, 31, 28, 138, 57, 124, 55, 122, 151 }, /*left = d117*/
+ { 67, 23, 21, 140, 126, 197, 40, 37, 171 }, /*left = d153*/
+ { 86, 27, 28, 128, 154, 212, 45, 43, 53 }, /*left = d207*/
+ { 74, 32, 27, 107, 86, 160, 63, 134, 102 }, /*left = d63 */
+ { 59, 67, 44, 140, 161, 202, 78, 67, 119 }, /*left = tm */
+ }, { /* above = v */
+ { 63, 36, 126, 146, 123, 158, 60, 90, 96 }, /*left = dc */
+ { 43, 46, 168, 134, 107, 128, 69, 142, 92 }, /*left = v */
+ { 44, 29, 68, 159, 201, 177, 50, 57, 77 }, /*left = h */
+ { 58, 38, 76, 114, 97, 172, 78, 133, 92 }, /*left = d45 */
+ { 46, 41, 76, 140, 63, 184, 69, 112, 57 }, /*left = d135*/
+ { 38, 32, 85, 140, 46, 112, 54, 151, 133 }, /*left = d117*/
+ { 39, 27, 61, 131, 110, 175, 44, 75, 136 }, /*left = d153*/
+ { 52, 30, 74, 113, 130, 175, 51, 64, 58 }, /*left = d207*/
+ { 47, 35, 80, 100, 74, 143, 64, 163, 74 }, /*left = d63 */
+ { 36, 61, 116, 114, 128, 162, 80, 125, 82 }, /*left = tm */
+ }, { /* above = h */
+ { 82, 26, 26, 171, 208, 204, 44, 32, 105 }, /*left = dc */
+ { 55, 44, 68, 166, 179, 192, 57, 57, 108 }, /*left = v */
+ { 42, 26, 11, 199, 241, 228, 23, 15, 85 }, /*left = h */
+ { 68, 42, 19, 131, 160, 199, 55, 52, 83 }, /*left = d45 */
+ { 58, 50, 25, 139, 115, 232, 39, 52, 118 }, /*left = d135*/
+ { 50, 35, 33, 153, 104, 162, 64, 59, 131 }, /*left = d117*/
+ { 44, 24, 16, 150, 177, 202, 33, 19, 156 }, /*left = d153*/
+ { 55, 27, 12, 153, 203, 218, 26, 27, 49 }, /*left = d207*/
+ { 53, 49, 21, 110, 116, 168, 59, 80, 76 }, /*left = d63 */
+ { 38, 72, 19, 168, 203, 212, 50, 50, 107 }, /*left = tm */
+ }, { /* above = d45 */
+ { 103, 26, 36, 129, 132, 201, 83, 80, 93 }, /*left = dc */
+ { 59, 38, 83, 112, 103, 162, 98, 136, 90 }, /*left = v */
+ { 62, 30, 23, 158, 200, 207, 59, 57, 50 }, /*left = h */
+ { 67, 30, 29, 84, 86, 191, 102, 91, 59 }, /*left = d45 */
+ { 60, 32, 33, 112, 71, 220, 64, 89, 104 }, /*left = d135*/
+ { 53, 26, 34, 130, 56, 149, 84, 120, 103 }, /*left = d117*/
+ { 53, 21, 23, 133, 109, 210, 56, 77, 172 }, /*left = d153*/
+ { 77, 19, 29, 112, 142, 228, 55, 66, 36 }, /*left = d207*/
+ { 61, 29, 29, 93, 97, 165, 83, 175, 162 }, /*left = d63 */
+ { 47, 47, 43, 114, 137, 181, 100, 99, 95 }, /*left = tm */
+ }, { /* above = d135 */
+ { 69, 23, 29, 128, 83, 199, 46, 44, 101 }, /*left = dc */
+ { 53, 40, 55, 139, 69, 183, 61, 80, 110 }, /*left = v */
+ { 40, 29, 19, 161, 180, 207, 43, 24, 91 }, /*left = h */
+ { 60, 34, 19, 105, 61, 198, 53, 64, 89 }, /*left = d45 */
+ { 52, 31, 22, 158, 40, 209, 58, 62, 89 }, /*left = d135*/
+ { 44, 31, 29, 147, 46, 158, 56, 102, 198 }, /*left = d117*/
+ { 35, 19, 12, 135, 87, 209, 41, 45, 167 }, /*left = d153*/
+ { 55, 25, 21, 118, 95, 215, 38, 39, 66 }, /*left = d207*/
+ { 51, 38, 25, 113, 58, 164, 70, 93, 97 }, /*left = d63 */
+ { 47, 54, 34, 146, 108, 203, 72, 103, 151 }, /*left = tm */
+ }, { /* above = d117 */
+ { 64, 19, 37, 156, 66, 138, 49, 95, 133 }, /*left = dc */
+ { 46, 27, 80, 150, 55, 124, 55, 121, 135 }, /*left = v */
+ { 36, 23, 27, 165, 149, 166, 54, 64, 118 }, /*left = h */
+ { 53, 21, 36, 131, 63, 163, 60, 109, 81 }, /*left = d45 */
+ { 40, 26, 35, 154, 40, 185, 51, 97, 123 }, /*left = d135*/
+ { 35, 19, 34, 179, 19, 97, 48, 129, 124 }, /*left = d117*/
+ { 36, 20, 26, 136, 62, 164, 33, 77, 154 }, /*left = d153*/
+ { 45, 18, 32, 130, 90, 157, 40, 79, 91 }, /*left = d207*/
+ { 45, 26, 28, 129, 45, 129, 49, 147, 123 }, /*left = d63 */
+ { 38, 44, 51, 136, 74, 162, 57, 97, 121 }, /*left = tm */
+ }, { /* above = d153 */
+ { 75, 17, 22, 136, 138, 185, 32, 34, 166 }, /*left = dc */
+ { 56, 39, 58, 133, 117, 173, 48, 53, 187 }, /*left = v */
+ { 35, 21, 12, 161, 212, 207, 20, 23, 145 }, /*left = h */
+ { 56, 29, 19, 117, 109, 181, 55, 68, 112 }, /*left = d45 */
+ { 47, 29, 17, 153, 64, 220, 59, 51, 114 }, /*left = d135*/
+ { 46, 16, 24, 136, 76, 147, 41, 64, 172 }, /*left = d117*/
+ { 34, 17, 11, 108, 152, 187, 13, 15, 209 }, /*left = d153*/
+ { 51, 24, 14, 115, 133, 209, 32, 26, 104 }, /*left = d207*/
+ { 55, 30, 18, 122, 79, 179, 44, 88, 116 }, /*left = d63 */
+ { 37, 49, 25, 129, 168, 164, 41, 54, 148 }, /*left = tm */
+ }, { /* above = d207 */
+ { 82, 22, 32, 127, 143, 213, 39, 41, 70 }, /*left = dc */
+ { 62, 44, 61, 123, 105, 189, 48, 57, 64 }, /*left = v */
+ { 47, 25, 17, 175, 222, 220, 24, 30, 86 }, /*left = h */
+ { 68, 36, 17, 106, 102, 206, 59, 74, 74 }, /*left = d45 */
+ { 57, 39, 23, 151, 68, 216, 55, 63, 58 }, /*left = d135*/
+ { 49, 30, 35, 141, 70, 168, 82, 40, 115 }, /*left = d117*/
+ { 51, 25, 15, 136, 129, 202, 38, 35, 139 }, /*left = d153*/
+ { 68, 26, 16, 111, 141, 215, 29, 28, 28 }, /*left = d207*/
+ { 59, 39, 19, 114, 75, 180, 77, 104, 42 }, /*left = d63 */
+ { 40, 61, 26, 126, 152, 206, 61, 59, 93 }, /*left = tm */
+ }, { /* above = d63 */
+ { 78, 23, 39, 111, 117, 170, 74, 124, 94 }, /*left = dc */
+ { 48, 34, 86, 101, 92, 146, 78, 179, 134 }, /*left = v */
+ { 47, 22, 24, 138, 187, 178, 68, 69, 59 }, /*left = h */
+ { 56, 25, 33, 105, 112, 187, 95, 177, 129 }, /*left = d45 */
+ { 48, 31, 27, 114, 63, 183, 82, 116, 56 }, /*left = d135*/
+ { 43, 28, 37, 121, 63, 123, 61, 192, 169 }, /*left = d117*/
+ { 42, 17, 24, 109, 97, 177, 56, 76, 122 }, /*left = d153*/
+ { 58, 18, 28, 105, 139, 182, 70, 92, 63 }, /*left = d207*/
+ { 46, 23, 32, 74, 86, 150, 67, 183, 88 }, /*left = d63 */
+ { 36, 38, 48, 92, 122, 165, 88, 137, 91 }, /*left = tm */
+ }, { /* above = tm */
+ { 65, 70, 60, 155, 159, 199, 61, 60, 81 }, /*left = dc */
+ { 44, 78, 115, 132, 119, 173, 71, 112, 93 }, /*left = v */
+ { 39, 38, 21, 184, 227, 206, 42, 32, 64 }, /*left = h */
+ { 58, 47, 36, 124, 137, 193, 80, 82, 78 }, /*left = d45 */
+ { 49, 50, 35, 144, 95, 205, 63, 78, 59 }, /*left = d135*/
+ { 41, 53, 52, 148, 71, 142, 65, 128, 51 }, /*left = d117*/
+ { 40, 36, 28, 143, 143, 202, 40, 55, 137 }, /*left = d153*/
+ { 52, 34, 29, 129, 183, 227, 42, 35, 43 }, /*left = d207*/
+ { 42, 44, 44, 104, 105, 164, 64, 130, 80 }, /*left = d63 */
+ { 43, 81, 53, 140, 169, 204, 68, 84, 72 }, /*left = tm */
+ }
+};
+EXPORT_SYMBOL_GPL(v4l2_vp9_kf_y_mode_prob);
+
+const u8 v4l2_vp9_kf_partition_probs[16][3] = {
+ /* 8x8 -> 4x4 */
+ { 158, 97, 94 }, /* a/l both not split */
+ { 93, 24, 99 }, /* a split, l not split */
+ { 85, 119, 44 }, /* l split, a not split */
+ { 62, 59, 67 }, /* a/l both split */
+ /* 16x16 -> 8x8 */
+ { 149, 53, 53 }, /* a/l both not split */
+ { 94, 20, 48 }, /* a split, l not split */
+ { 83, 53, 24 }, /* l split, a not split */
+ { 52, 18, 18 }, /* a/l both split */
+ /* 32x32 -> 16x16 */
+ { 150, 40, 39 }, /* a/l both not split */
+ { 78, 12, 26 }, /* a split, l not split */
+ { 67, 33, 11 }, /* l split, a not split */
+ { 24, 7, 5 }, /* a/l both split */
+ /* 64x64 -> 32x32 */
+ { 174, 35, 49 }, /* a/l both not split */
+ { 68, 11, 27 }, /* a split, l not split */
+ { 57, 15, 9 }, /* l split, a not split */
+ { 12, 3, 3 }, /* a/l both split */
+};
+EXPORT_SYMBOL_GPL(v4l2_vp9_kf_partition_probs);
+
+const u8 v4l2_vp9_kf_uv_mode_prob[10][9] = {
+ { 144, 11, 54, 157, 195, 130, 46, 58, 108 }, /* y = dc */
+ { 118, 15, 123, 148, 131, 101, 44, 93, 131 }, /* y = v */
+ { 113, 12, 23, 188, 226, 142, 26, 32, 125 }, /* y = h */
+ { 120, 11, 50, 123, 163, 135, 64, 77, 103 }, /* y = d45 */
+ { 113, 9, 36, 155, 111, 157, 32, 44, 161 }, /* y = d135 */
+ { 116, 9, 55, 176, 76, 96, 37, 61, 149 }, /* y = d117 */
+ { 115, 9, 28, 141, 161, 167, 21, 25, 193 }, /* y = d153 */
+ { 120, 12, 32, 145, 195, 142, 32, 38, 86 }, /* y = d207 */
+ { 116, 12, 64, 120, 140, 125, 49, 115, 121 }, /* y = d63 */
+ { 102, 19, 66, 162, 182, 122, 35, 59, 128 } /* y = tm */
+};
+EXPORT_SYMBOL_GPL(v4l2_vp9_kf_uv_mode_prob);
+
+const struct v4l2_vp9_frame_context v4l2_vp9_default_probs = {
+ .tx8 = {
+ { 100 },
+ { 66 },
+ },
+ .tx16 = {
+ { 20, 152 },
+ { 15, 101 },
+ },
+ .tx32 = {
+ { 3, 136, 37 },
+ { 5, 52, 13 },
+ },
+ .coef = {
+ { /* tx = 4x4 */
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 195, 29, 183 },
+ { 84, 49, 136 },
+ { 8, 42, 71 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 31, 107, 169 },
+ { 35, 99, 159 },
+ { 17, 82, 140 },
+ { 8, 66, 114 },
+ { 2, 44, 76 },
+ { 1, 19, 32 },
+ },
+ { /* Coeff Band 2 */
+ { 40, 132, 201 },
+ { 29, 114, 187 },
+ { 13, 91, 157 },
+ { 7, 75, 127 },
+ { 3, 58, 95 },
+ { 1, 28, 47 },
+ },
+ { /* Coeff Band 3 */
+ { 69, 142, 221 },
+ { 42, 122, 201 },
+ { 15, 91, 159 },
+ { 6, 67, 121 },
+ { 1, 42, 77 },
+ { 1, 17, 31 },
+ },
+ { /* Coeff Band 4 */
+ { 102, 148, 228 },
+ { 67, 117, 204 },
+ { 17, 82, 154 },
+ { 6, 59, 114 },
+ { 2, 39, 75 },
+ { 1, 15, 29 },
+ },
+ { /* Coeff Band 5 */
+ { 156, 57, 233 },
+ { 119, 57, 212 },
+ { 58, 48, 163 },
+ { 29, 40, 124 },
+ { 12, 30, 81 },
+ { 3, 12, 31 }
+ },
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 191, 107, 226 },
+ { 124, 117, 204 },
+ { 25, 99, 155 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 29, 148, 210 },
+ { 37, 126, 194 },
+ { 8, 93, 157 },
+ { 2, 68, 118 },
+ { 1, 39, 69 },
+ { 1, 17, 33 },
+ },
+ { /* Coeff Band 2 */
+ { 41, 151, 213 },
+ { 27, 123, 193 },
+ { 3, 82, 144 },
+ { 1, 58, 105 },
+ { 1, 32, 60 },
+ { 1, 13, 26 },
+ },
+ { /* Coeff Band 3 */
+ { 59, 159, 220 },
+ { 23, 126, 198 },
+ { 4, 88, 151 },
+ { 1, 66, 114 },
+ { 1, 38, 71 },
+ { 1, 18, 34 },
+ },
+ { /* Coeff Band 4 */
+ { 114, 136, 232 },
+ { 51, 114, 207 },
+ { 11, 83, 155 },
+ { 3, 56, 105 },
+ { 1, 33, 65 },
+ { 1, 17, 34 },
+ },
+ { /* Coeff Band 5 */
+ { 149, 65, 234 },
+ { 121, 57, 215 },
+ { 61, 49, 166 },
+ { 28, 36, 114 },
+ { 12, 25, 76 },
+ { 3, 16, 42 },
+ },
+ },
+ },
+ { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 214, 49, 220 },
+ { 132, 63, 188 },
+ { 42, 65, 137 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 85, 137, 221 },
+ { 104, 131, 216 },
+ { 49, 111, 192 },
+ { 21, 87, 155 },
+ { 2, 49, 87 },
+ { 1, 16, 28 },
+ },
+ { /* Coeff Band 2 */
+ { 89, 163, 230 },
+ { 90, 137, 220 },
+ { 29, 100, 183 },
+ { 10, 70, 135 },
+ { 2, 42, 81 },
+ { 1, 17, 33 },
+ },
+ { /* Coeff Band 3 */
+ { 108, 167, 237 },
+ { 55, 133, 222 },
+ { 15, 97, 179 },
+ { 4, 72, 135 },
+ { 1, 45, 85 },
+ { 1, 19, 38 },
+ },
+ { /* Coeff Band 4 */
+ { 124, 146, 240 },
+ { 66, 124, 224 },
+ { 17, 88, 175 },
+ { 4, 58, 122 },
+ { 1, 36, 75 },
+ { 1, 18, 37 },
+ },
+ { /* Coeff Band 5 */
+ { 141, 79, 241 },
+ { 126, 70, 227 },
+ { 66, 58, 182 },
+ { 30, 44, 136 },
+ { 12, 34, 96 },
+ { 2, 20, 47 },
+ },
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 229, 99, 249 },
+ { 143, 111, 235 },
+ { 46, 109, 192 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 82, 158, 236 },
+ { 94, 146, 224 },
+ { 25, 117, 191 },
+ { 9, 87, 149 },
+ { 3, 56, 99 },
+ { 1, 33, 57 },
+ },
+ { /* Coeff Band 2 */
+ { 83, 167, 237 },
+ { 68, 145, 222 },
+ { 10, 103, 177 },
+ { 2, 72, 131 },
+ { 1, 41, 79 },
+ { 1, 20, 39 },
+ },
+ { /* Coeff Band 3 */
+ { 99, 167, 239 },
+ { 47, 141, 224 },
+ { 10, 104, 178 },
+ { 2, 73, 133 },
+ { 1, 44, 85 },
+ { 1, 22, 47 },
+ },
+ { /* Coeff Band 4 */
+ { 127, 145, 243 },
+ { 71, 129, 228 },
+ { 17, 93, 177 },
+ { 3, 61, 124 },
+ { 1, 41, 84 },
+ { 1, 21, 52 },
+ },
+ { /* Coeff Band 5 */
+ { 157, 78, 244 },
+ { 140, 72, 231 },
+ { 69, 58, 184 },
+ { 31, 44, 137 },
+ { 14, 38, 105 },
+ { 8, 23, 61 },
+ },
+ },
+ },
+ },
+ { /* tx = 8x8 */
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 125, 34, 187 },
+ { 52, 41, 133 },
+ { 6, 31, 56 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 37, 109, 153 },
+ { 51, 102, 147 },
+ { 23, 87, 128 },
+ { 8, 67, 101 },
+ { 1, 41, 63 },
+ { 1, 19, 29 },
+ },
+ { /* Coeff Band 2 */
+ { 31, 154, 185 },
+ { 17, 127, 175 },
+ { 6, 96, 145 },
+ { 2, 73, 114 },
+ { 1, 51, 82 },
+ { 1, 28, 45 },
+ },
+ { /* Coeff Band 3 */
+ { 23, 163, 200 },
+ { 10, 131, 185 },
+ { 2, 93, 148 },
+ { 1, 67, 111 },
+ { 1, 41, 69 },
+ { 1, 14, 24 },
+ },
+ { /* Coeff Band 4 */
+ { 29, 176, 217 },
+ { 12, 145, 201 },
+ { 3, 101, 156 },
+ { 1, 69, 111 },
+ { 1, 39, 63 },
+ { 1, 14, 23 },
+ },
+ { /* Coeff Band 5 */
+ { 57, 192, 233 },
+ { 25, 154, 215 },
+ { 6, 109, 167 },
+ { 3, 78, 118 },
+ { 1, 48, 69 },
+ { 1, 21, 29 },
+ },
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 202, 105, 245 },
+ { 108, 106, 216 },
+ { 18, 90, 144 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 33, 172, 219 },
+ { 64, 149, 206 },
+ { 14, 117, 177 },
+ { 5, 90, 141 },
+ { 2, 61, 95 },
+ { 1, 37, 57 },
+ },
+ { /* Coeff Band 2 */
+ { 33, 179, 220 },
+ { 11, 140, 198 },
+ { 1, 89, 148 },
+ { 1, 60, 104 },
+ { 1, 33, 57 },
+ { 1, 12, 21 },
+ },
+ { /* Coeff Band 3 */
+ { 30, 181, 221 },
+ { 8, 141, 198 },
+ { 1, 87, 145 },
+ { 1, 58, 100 },
+ { 1, 31, 55 },
+ { 1, 12, 20 },
+ },
+ { /* Coeff Band 4 */
+ { 32, 186, 224 },
+ { 7, 142, 198 },
+ { 1, 86, 143 },
+ { 1, 58, 100 },
+ { 1, 31, 55 },
+ { 1, 12, 22 },
+ },
+ { /* Coeff Band 5 */
+ { 57, 192, 227 },
+ { 20, 143, 204 },
+ { 3, 96, 154 },
+ { 1, 68, 112 },
+ { 1, 42, 69 },
+ { 1, 19, 32 },
+ },
+ },
+ },
+ { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 212, 35, 215 },
+ { 113, 47, 169 },
+ { 29, 48, 105 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 74, 129, 203 },
+ { 106, 120, 203 },
+ { 49, 107, 178 },
+ { 19, 84, 144 },
+ { 4, 50, 84 },
+ { 1, 15, 25 },
+ },
+ { /* Coeff Band 2 */
+ { 71, 172, 217 },
+ { 44, 141, 209 },
+ { 15, 102, 173 },
+ { 6, 76, 133 },
+ { 2, 51, 89 },
+ { 1, 24, 42 },
+ },
+ { /* Coeff Band 3 */
+ { 64, 185, 231 },
+ { 31, 148, 216 },
+ { 8, 103, 175 },
+ { 3, 74, 131 },
+ { 1, 46, 81 },
+ { 1, 18, 30 },
+ },
+ { /* Coeff Band 4 */
+ { 65, 196, 235 },
+ { 25, 157, 221 },
+ { 5, 105, 174 },
+ { 1, 67, 120 },
+ { 1, 38, 69 },
+ { 1, 15, 30 },
+ },
+ { /* Coeff Band 5 */
+ { 65, 204, 238 },
+ { 30, 156, 224 },
+ { 7, 107, 177 },
+ { 2, 70, 124 },
+ { 1, 42, 73 },
+ { 1, 18, 34 },
+ },
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 225, 86, 251 },
+ { 144, 104, 235 },
+ { 42, 99, 181 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 85, 175, 239 },
+ { 112, 165, 229 },
+ { 29, 136, 200 },
+ { 12, 103, 162 },
+ { 6, 77, 123 },
+ { 2, 53, 84 },
+ },
+ { /* Coeff Band 2 */
+ { 75, 183, 239 },
+ { 30, 155, 221 },
+ { 3, 106, 171 },
+ { 1, 74, 128 },
+ { 1, 44, 76 },
+ { 1, 17, 28 },
+ },
+ { /* Coeff Band 3 */
+ { 73, 185, 240 },
+ { 27, 159, 222 },
+ { 2, 107, 172 },
+ { 1, 75, 127 },
+ { 1, 42, 73 },
+ { 1, 17, 29 },
+ },
+ { /* Coeff Band 4 */
+ { 62, 190, 238 },
+ { 21, 159, 222 },
+ { 2, 107, 172 },
+ { 1, 72, 122 },
+ { 1, 40, 71 },
+ { 1, 18, 32 },
+ },
+ { /* Coeff Band 5 */
+ { 61, 199, 240 },
+ { 27, 161, 226 },
+ { 4, 113, 180 },
+ { 1, 76, 129 },
+ { 1, 46, 80 },
+ { 1, 23, 41 },
+ },
+ },
+ },
+ },
+ { /* tx = 16x16 */
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 7, 27, 153 },
+ { 5, 30, 95 },
+ { 1, 16, 30 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 50, 75, 127 },
+ { 57, 75, 124 },
+ { 27, 67, 108 },
+ { 10, 54, 86 },
+ { 1, 33, 52 },
+ { 1, 12, 18 },
+ },
+ { /* Coeff Band 2 */
+ { 43, 125, 151 },
+ { 26, 108, 148 },
+ { 7, 83, 122 },
+ { 2, 59, 89 },
+ { 1, 38, 60 },
+ { 1, 17, 27 },
+ },
+ { /* Coeff Band 3 */
+ { 23, 144, 163 },
+ { 13, 112, 154 },
+ { 2, 75, 117 },
+ { 1, 50, 81 },
+ { 1, 31, 51 },
+ { 1, 14, 23 },
+ },
+ { /* Coeff Band 4 */
+ { 18, 162, 185 },
+ { 6, 123, 171 },
+ { 1, 78, 125 },
+ { 1, 51, 86 },
+ { 1, 31, 54 },
+ { 1, 14, 23 },
+ },
+ { /* Coeff Band 5 */
+ { 15, 199, 227 },
+ { 3, 150, 204 },
+ { 1, 91, 146 },
+ { 1, 55, 95 },
+ { 1, 30, 53 },
+ { 1, 11, 20 },
+ }
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 19, 55, 240 },
+ { 19, 59, 196 },
+ { 3, 52, 105 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 41, 166, 207 },
+ { 104, 153, 199 },
+ { 31, 123, 181 },
+ { 14, 101, 152 },
+ { 5, 72, 106 },
+ { 1, 36, 52 },
+ },
+ { /* Coeff Band 2 */
+ { 35, 176, 211 },
+ { 12, 131, 190 },
+ { 2, 88, 144 },
+ { 1, 60, 101 },
+ { 1, 36, 60 },
+ { 1, 16, 28 },
+ },
+ { /* Coeff Band 3 */
+ { 28, 183, 213 },
+ { 8, 134, 191 },
+ { 1, 86, 142 },
+ { 1, 56, 96 },
+ { 1, 30, 53 },
+ { 1, 12, 20 },
+ },
+ { /* Coeff Band 4 */
+ { 20, 190, 215 },
+ { 4, 135, 192 },
+ { 1, 84, 139 },
+ { 1, 53, 91 },
+ { 1, 28, 49 },
+ { 1, 11, 20 },
+ },
+ { /* Coeff Band 5 */
+ { 13, 196, 216 },
+ { 2, 137, 192 },
+ { 1, 86, 143 },
+ { 1, 57, 99 },
+ { 1, 32, 56 },
+ { 1, 13, 24 },
+ },
+ },
+ },
+ { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 211, 29, 217 },
+ { 96, 47, 156 },
+ { 22, 43, 87 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 78, 120, 193 },
+ { 111, 116, 186 },
+ { 46, 102, 164 },
+ { 15, 80, 128 },
+ { 2, 49, 76 },
+ { 1, 18, 28 },
+ },
+ { /* Coeff Band 2 */
+ { 71, 161, 203 },
+ { 42, 132, 192 },
+ { 10, 98, 150 },
+ { 3, 69, 109 },
+ { 1, 44, 70 },
+ { 1, 18, 29 },
+ },
+ { /* Coeff Band 3 */
+ { 57, 186, 211 },
+ { 30, 140, 196 },
+ { 4, 93, 146 },
+ { 1, 62, 102 },
+ { 1, 38, 65 },
+ { 1, 16, 27 },
+ },
+ { /* Coeff Band 4 */
+ { 47, 199, 217 },
+ { 14, 145, 196 },
+ { 1, 88, 142 },
+ { 1, 57, 98 },
+ { 1, 36, 62 },
+ { 1, 15, 26 },
+ },
+ { /* Coeff Band 5 */
+ { 26, 219, 229 },
+ { 5, 155, 207 },
+ { 1, 94, 151 },
+ { 1, 60, 104 },
+ { 1, 36, 62 },
+ { 1, 16, 28 },
+ }
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 233, 29, 248 },
+ { 146, 47, 220 },
+ { 43, 52, 140 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 100, 163, 232 },
+ { 179, 161, 222 },
+ { 63, 142, 204 },
+ { 37, 113, 174 },
+ { 26, 89, 137 },
+ { 18, 68, 97 },
+ },
+ { /* Coeff Band 2 */
+ { 85, 181, 230 },
+ { 32, 146, 209 },
+ { 7, 100, 164 },
+ { 3, 71, 121 },
+ { 1, 45, 77 },
+ { 1, 18, 30 },
+ },
+ { /* Coeff Band 3 */
+ { 65, 187, 230 },
+ { 20, 148, 207 },
+ { 2, 97, 159 },
+ { 1, 68, 116 },
+ { 1, 40, 70 },
+ { 1, 14, 29 },
+ },
+ { /* Coeff Band 4 */
+ { 40, 194, 227 },
+ { 8, 147, 204 },
+ { 1, 94, 155 },
+ { 1, 65, 112 },
+ { 1, 39, 66 },
+ { 1, 14, 26 },
+ },
+ { /* Coeff Band 5 */
+ { 16, 208, 228 },
+ { 3, 151, 207 },
+ { 1, 98, 160 },
+ { 1, 67, 117 },
+ { 1, 41, 74 },
+ { 1, 17, 31 },
+ },
+ },
+ },
+ },
+ { /* tx = 32x32 */
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 17, 38, 140 },
+ { 7, 34, 80 },
+ { 1, 17, 29 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 37, 75, 128 },
+ { 41, 76, 128 },
+ { 26, 66, 116 },
+ { 12, 52, 94 },
+ { 2, 32, 55 },
+ { 1, 10, 16 },
+ },
+ { /* Coeff Band 2 */
+ { 50, 127, 154 },
+ { 37, 109, 152 },
+ { 16, 82, 121 },
+ { 5, 59, 85 },
+ { 1, 35, 54 },
+ { 1, 13, 20 },
+ },
+ { /* Coeff Band 3 */
+ { 40, 142, 167 },
+ { 17, 110, 157 },
+ { 2, 71, 112 },
+ { 1, 44, 72 },
+ { 1, 27, 45 },
+ { 1, 11, 17 },
+ },
+ { /* Coeff Band 4 */
+ { 30, 175, 188 },
+ { 9, 124, 169 },
+ { 1, 74, 116 },
+ { 1, 48, 78 },
+ { 1, 30, 49 },
+ { 1, 11, 18 },
+ },
+ { /* Coeff Band 5 */
+ { 10, 222, 223 },
+ { 2, 150, 194 },
+ { 1, 83, 128 },
+ { 1, 48, 79 },
+ { 1, 27, 45 },
+ { 1, 11, 17 },
+ },
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 36, 41, 235 },
+ { 29, 36, 193 },
+ { 10, 27, 111 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 85, 165, 222 },
+ { 177, 162, 215 },
+ { 110, 135, 195 },
+ { 57, 113, 168 },
+ { 23, 83, 120 },
+ { 10, 49, 61 },
+ },
+ { /* Coeff Band 2 */
+ { 85, 190, 223 },
+ { 36, 139, 200 },
+ { 5, 90, 146 },
+ { 1, 60, 103 },
+ { 1, 38, 65 },
+ { 1, 18, 30 },
+ },
+ { /* Coeff Band 3 */
+ { 72, 202, 223 },
+ { 23, 141, 199 },
+ { 2, 86, 140 },
+ { 1, 56, 97 },
+ { 1, 36, 61 },
+ { 1, 16, 27 },
+ },
+ { /* Coeff Band 4 */
+ { 55, 218, 225 },
+ { 13, 145, 200 },
+ { 1, 86, 141 },
+ { 1, 57, 99 },
+ { 1, 35, 61 },
+ { 1, 13, 22 },
+ },
+ { /* Coeff Band 5 */
+ { 15, 235, 212 },
+ { 1, 132, 184 },
+ { 1, 84, 139 },
+ { 1, 57, 97 },
+ { 1, 34, 56 },
+ { 1, 14, 23 },
+ },
+ },
+ },
+ { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 181, 21, 201 },
+ { 61, 37, 123 },
+ { 10, 38, 71 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 47, 106, 172 },
+ { 95, 104, 173 },
+ { 42, 93, 159 },
+ { 18, 77, 131 },
+ { 4, 50, 81 },
+ { 1, 17, 23 },
+ },
+ { /* Coeff Band 2 */
+ { 62, 147, 199 },
+ { 44, 130, 189 },
+ { 28, 102, 154 },
+ { 18, 75, 115 },
+ { 2, 44, 65 },
+ { 1, 12, 19 },
+ },
+ { /* Coeff Band 3 */
+ { 55, 153, 210 },
+ { 24, 130, 194 },
+ { 3, 93, 146 },
+ { 1, 61, 97 },
+ { 1, 31, 50 },
+ { 1, 10, 16 },
+ },
+ { /* Coeff Band 4 */
+ { 49, 186, 223 },
+ { 17, 148, 204 },
+ { 1, 96, 142 },
+ { 1, 53, 83 },
+ { 1, 26, 44 },
+ { 1, 11, 17 },
+ },
+ { /* Coeff Band 5 */
+ { 13, 217, 212 },
+ { 2, 136, 180 },
+ { 1, 78, 124 },
+ { 1, 50, 83 },
+ { 1, 29, 49 },
+ { 1, 14, 23 },
+ },
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 197, 13, 247 },
+ { 82, 17, 222 },
+ { 25, 17, 162 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 126, 186, 247 },
+ { 234, 191, 243 },
+ { 176, 177, 234 },
+ { 104, 158, 220 },
+ { 66, 128, 186 },
+ { 55, 90, 137 },
+ },
+ { /* Coeff Band 2 */
+ { 111, 197, 242 },
+ { 46, 158, 219 },
+ { 9, 104, 171 },
+ { 2, 65, 125 },
+ { 1, 44, 80 },
+ { 1, 17, 91 },
+ },
+ { /* Coeff Band 3 */
+ { 104, 208, 245 },
+ { 39, 168, 224 },
+ { 3, 109, 162 },
+ { 1, 79, 124 },
+ { 1, 50, 102 },
+ { 1, 43, 102 },
+ },
+ { /* Coeff Band 4 */
+ { 84, 220, 246 },
+ { 31, 177, 231 },
+ { 2, 115, 180 },
+ { 1, 79, 134 },
+ { 1, 55, 77 },
+ { 1, 60, 79 },
+ },
+ { /* Coeff Band 5 */
+ { 43, 243, 240 },
+ { 8, 180, 217 },
+ { 1, 115, 166 },
+ { 1, 84, 121 },
+ { 1, 51, 67 },
+ { 1, 16, 6 },
+ },
+ },
+ },
+ },
+ },
+
+ .skip = { 192, 128, 64 },
+ .inter_mode = {
+ { 2, 173, 34 },
+ { 7, 145, 85 },
+ { 7, 166, 63 },
+ { 7, 94, 66 },
+ { 8, 64, 46 },
+ { 17, 81, 31 },
+ { 25, 29, 30 },
+ },
+ .interp_filter = {
+ { 235, 162 },
+ { 36, 255 },
+ { 34, 3 },
+ { 149, 144 },
+ },
+ .is_inter = { 9, 102, 187, 225 },
+ .comp_mode = { 239, 183, 119, 96, 41 },
+ .single_ref = {
+ { 33, 16 },
+ { 77, 74 },
+ { 142, 142 },
+ { 172, 170 },
+ { 238, 247 },
+ },
+ .comp_ref = { 50, 126, 123, 221, 226 },
+ .y_mode = {
+ { 65, 32, 18, 144, 162, 194, 41, 51, 98 },
+ { 132, 68, 18, 165, 217, 196, 45, 40, 78 },
+ { 173, 80, 19, 176, 240, 193, 64, 35, 46 },
+ { 221, 135, 38, 194, 248, 121, 96, 85, 29 },
+ },
+ .uv_mode = {
+ { 120, 7, 76, 176, 208, 126, 28, 54, 103 } /* y = dc */,
+ { 48, 12, 154, 155, 139, 90, 34, 117, 119 } /* y = v */,
+ { 67, 6, 25, 204, 243, 158, 13, 21, 96 } /* y = h */,
+ { 97, 5, 44, 131, 176, 139, 48, 68, 97 } /* y = d45 */,
+ { 83, 5, 42, 156, 111, 152, 26, 49, 152 } /* y = d135 */,
+ { 80, 5, 58, 178, 74, 83, 33, 62, 145 } /* y = d117 */,
+ { 86, 5, 32, 154, 192, 168, 14, 22, 163 } /* y = d153 */,
+ { 85, 5, 32, 156, 216, 148, 19, 29, 73 } /* y = d207 */,
+ { 77, 7, 64, 116, 132, 122, 37, 126, 120 } /* y = d63 */,
+ { 101, 21, 107, 181, 192, 103, 19, 67, 125 } /* y = tm */
+ },
+ .partition = {
+ /* 8x8 -> 4x4 */
+ { 199, 122, 141 } /* a/l both not split */,
+ { 147, 63, 159 } /* a split, l not split */,
+ { 148, 133, 118 } /* l split, a not split */,
+ { 121, 104, 114 } /* a/l both split */,
+ /* 16x16 -> 8x8 */
+ { 174, 73, 87 } /* a/l both not split */,
+ { 92, 41, 83 } /* a split, l not split */,
+ { 82, 99, 50 } /* l split, a not split */,
+ { 53, 39, 39 } /* a/l both split */,
+ /* 32x32 -> 16x16 */
+ { 177, 58, 59 } /* a/l both not split */,
+ { 68, 26, 63 } /* a split, l not split */,
+ { 52, 79, 25 } /* l split, a not split */,
+ { 17, 14, 12 } /* a/l both split */,
+ /* 64x64 -> 32x32 */
+ { 222, 34, 30 } /* a/l both not split */,
+ { 72, 16, 44 } /* a split, l not split */,
+ { 58, 32, 12 } /* l split, a not split */,
+ { 10, 7, 6 } /* a/l both split */,
+ },
+
+ .mv = {
+ .joint = { 32, 64, 96 },
+ .sign = { 128, 128 },
+ .classes = {
+ { 224, 144, 192, 168, 192, 176, 192, 198, 198, 245 },
+ { 216, 128, 176, 160, 176, 176, 192, 198, 198, 208 },
+ },
+ .class0_bit = { 216, 208 },
+ .bits = {
+ { 136, 140, 148, 160, 176, 192, 224, 234, 234, 240},
+ { 136, 140, 148, 160, 176, 192, 224, 234, 234, 240},
+ },
+ .class0_fr = {
+ {
+ { 128, 128, 64 },
+ { 96, 112, 64 },
+ },
+ {
+ { 128, 128, 64 },
+ { 96, 112, 64 },
+ },
+ },
+ .fr = {
+ { 64, 96, 64 },
+ { 64, 96, 64 },
+ },
+ .class0_hp = { 160, 160 },
+ .hp = { 128, 128 },
+ },
+};
+EXPORT_SYMBOL_GPL(v4l2_vp9_default_probs);
+
+static u32 fastdiv(u32 dividend, u16 divisor)
+{
+#define DIV_INV(d) ((u32)(((1ULL << 32) + ((d) - 1)) / (d)))
+#define DIVS_INV(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) \
+ DIV_INV(d0), DIV_INV(d1), DIV_INV(d2), DIV_INV(d3), \
+ DIV_INV(d4), DIV_INV(d5), DIV_INV(d6), DIV_INV(d7), \
+ DIV_INV(d8), DIV_INV(d9)
+
+ static const u32 inv[] = {
+ DIV_INV(2), DIV_INV(3), DIV_INV(4), DIV_INV(5),
+ DIV_INV(6), DIV_INV(7), DIV_INV(8), DIV_INV(9),
+ DIVS_INV(10, 11, 12, 13, 14, 15, 16, 17, 18, 19),
+ DIVS_INV(20, 21, 22, 23, 24, 25, 26, 27, 28, 29),
+ DIVS_INV(30, 31, 32, 33, 34, 35, 36, 37, 38, 39),
+ DIVS_INV(40, 41, 42, 43, 44, 45, 46, 47, 48, 49),
+ DIVS_INV(50, 51, 52, 53, 54, 55, 56, 57, 58, 59),
+ DIVS_INV(60, 61, 62, 63, 64, 65, 66, 67, 68, 69),
+ DIVS_INV(70, 71, 72, 73, 74, 75, 76, 77, 78, 79),
+ DIVS_INV(80, 81, 82, 83, 84, 85, 86, 87, 88, 89),
+ DIVS_INV(90, 91, 92, 93, 94, 95, 96, 97, 98, 99),
+ DIVS_INV(100, 101, 102, 103, 104, 105, 106, 107, 108, 109),
+ DIVS_INV(110, 111, 112, 113, 114, 115, 116, 117, 118, 119),
+ DIVS_INV(120, 121, 122, 123, 124, 125, 126, 127, 128, 129),
+ DIVS_INV(130, 131, 132, 133, 134, 135, 136, 137, 138, 139),
+ DIVS_INV(140, 141, 142, 143, 144, 145, 146, 147, 148, 149),
+ DIVS_INV(150, 151, 152, 153, 154, 155, 156, 157, 158, 159),
+ DIVS_INV(160, 161, 162, 163, 164, 165, 166, 167, 168, 169),
+ DIVS_INV(170, 171, 172, 173, 174, 175, 176, 177, 178, 179),
+ DIVS_INV(180, 181, 182, 183, 184, 185, 186, 187, 188, 189),
+ DIVS_INV(190, 191, 192, 193, 194, 195, 196, 197, 198, 199),
+ DIVS_INV(200, 201, 202, 203, 204, 205, 206, 207, 208, 209),
+ DIVS_INV(210, 211, 212, 213, 214, 215, 216, 217, 218, 219),
+ DIVS_INV(220, 221, 222, 223, 224, 225, 226, 227, 228, 229),
+ DIVS_INV(230, 231, 232, 233, 234, 235, 236, 237, 238, 239),
+ DIVS_INV(240, 241, 242, 243, 244, 245, 246, 247, 248, 249),
+ DIV_INV(250), DIV_INV(251), DIV_INV(252), DIV_INV(253),
+ DIV_INV(254), DIV_INV(255), DIV_INV(256),
+ };
+
+ if (divisor == 0)
+ return 0;
+ else if (divisor == 1)
+ return dividend;
+
+ if (WARN_ON(divisor - 2 >= ARRAY_SIZE(inv)))
+ return dividend;
+
+ return ((u64)dividend * inv[divisor - 2]) >> 32;
+}
+
+/* 6.3.6 inv_recenter_nonneg(v, m) */
+static int inv_recenter_nonneg(int v, int m)
+{
+ if (v > 2 * m)
+ return v;
+
+ if (v & 1)
+ return m - ((v + 1) >> 1);
+
+ return m + (v >> 1);
+}
+
+/*
+ * part of 6.3.5 inv_remap_prob(deltaProb, prob)
+ * delta = inv_map_table[deltaProb] done by userspace
+ */
+static int update_prob(int delta, int prob)
+{
+ if (!delta)
+ return prob;
+
+ return prob <= 128 ?
+ 1 + inv_recenter_nonneg(delta, prob - 1) :
+ 255 - inv_recenter_nonneg(delta, 255 - prob);
+}
+
+/* Counterpart to 6.3.2 tx_mode_probs() */
+static void update_tx_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(probs->tx8); i++) {
+ u8 *p8x8 = probs->tx8[i];
+ u8 *p16x16 = probs->tx16[i];
+ u8 *p32x32 = probs->tx32[i];
+ const u8 *d8x8 = deltas->tx8[i];
+ const u8 *d16x16 = deltas->tx16[i];
+ const u8 *d32x32 = deltas->tx32[i];
+
+ p8x8[0] = update_prob(d8x8[0], p8x8[0]);
+ p16x16[0] = update_prob(d16x16[0], p16x16[0]);
+ p16x16[1] = update_prob(d16x16[1], p16x16[1]);
+ p32x32[0] = update_prob(d32x32[0], p32x32[0]);
+ p32x32[1] = update_prob(d32x32[1], p32x32[1]);
+ p32x32[2] = update_prob(d32x32[2], p32x32[2]);
+ }
+}
+
+#define BAND_6(band) ((band) == 0 ? 3 : 6)
+
+static void update_coeff(const u8 deltas[6][6][3], u8 probs[6][6][3])
+{
+ int l, m, n;
+
+ for (l = 0; l < 6; l++)
+ for (m = 0; m < BAND_6(l); m++) {
+ u8 *p = probs[l][m];
+ const u8 *d = deltas[l][m];
+
+ for (n = 0; n < 3; n++)
+ p[n] = update_prob(d[n], p[n]);
+ }
+}
+
+/* Counterpart to 6.3.7 read_coef_probs() */
+static void update_coef_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ int i, j, k;
+
+ for (i = 0; i < ARRAY_SIZE(probs->coef); i++) {
+ for (j = 0; j < ARRAY_SIZE(probs->coef[0]); j++)
+ for (k = 0; k < ARRAY_SIZE(probs->coef[0][0]); k++)
+ update_coeff(deltas->coef[i][j][k], probs->coef[i][j][k]);
+
+ if (deltas->tx_mode == i)
+ break;
+ }
+}
+
+/* Counterpart to 6.3.8 read_skip_prob() */
+static void update_skip_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(probs->skip); i++)
+ probs->skip[i] = update_prob(deltas->skip[i], probs->skip[i]);
+}
+
+/* Counterpart to 6.3.9 read_inter_mode_probs() */
+static void update_inter_mode_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(probs->inter_mode); i++) {
+ u8 *p = probs->inter_mode[i];
+ const u8 *d = deltas->inter_mode[i];
+
+ p[0] = update_prob(d[0], p[0]);
+ p[1] = update_prob(d[1], p[1]);
+ p[2] = update_prob(d[2], p[2]);
+ }
+}
+
+/* Counterpart to 6.3.10 read_interp_filter_probs() */
+static void update_interp_filter_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(probs->interp_filter); i++) {
+ u8 *p = probs->interp_filter[i];
+ const u8 *d = deltas->interp_filter[i];
+
+ p[0] = update_prob(d[0], p[0]);
+ p[1] = update_prob(d[1], p[1]);
+ }
+}
+
+/* Counterpart to 6.3.11 read_is_inter_probs() */
+static void update_is_inter_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(probs->is_inter); i++)
+ probs->is_inter[i] = update_prob(deltas->is_inter[i], probs->is_inter[i]);
+}
+
+/* 6.3.12 frame_reference_mode() done entirely in userspace */
+
+/* Counterpart to 6.3.13 frame_reference_mode_probs() */
+static void
+update_frame_reference_mode_probs(unsigned int reference_mode,
+ struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i;
+
+ if (reference_mode == V4L2_VP9_REFERENCE_MODE_SELECT)
+ for (i = 0; i < ARRAY_SIZE(probs->comp_mode); i++)
+ probs->comp_mode[i] = update_prob(deltas->comp_mode[i],
+ probs->comp_mode[i]);
+
+ if (reference_mode != V4L2_VP9_REFERENCE_MODE_COMPOUND_REFERENCE)
+ for (i = 0; i < ARRAY_SIZE(probs->single_ref); i++) {
+ u8 *p = probs->single_ref[i];
+ const u8 *d = deltas->single_ref[i];
+
+ p[0] = update_prob(d[0], p[0]);
+ p[1] = update_prob(d[1], p[1]);
+ }
+
+ if (reference_mode != V4L2_VP9_REFERENCE_MODE_SINGLE_REFERENCE)
+ for (i = 0; i < ARRAY_SIZE(probs->comp_ref); i++)
+ probs->comp_ref[i] = update_prob(deltas->comp_ref[i], probs->comp_ref[i]);
+}
+
+/* Counterpart to 6.3.14 read_y_mode_probs() */
+static void update_y_mode_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(probs->y_mode); i++)
+ for (j = 0; j < ARRAY_SIZE(probs->y_mode[0]); ++j)
+ probs->y_mode[i][j] =
+ update_prob(deltas->y_mode[i][j], probs->y_mode[i][j]);
+}
+
+/* Counterpart to 6.3.15 read_partition_probs() */
+static void update_partition_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i, j;
+
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 4; j++) {
+ u8 *p = probs->partition[i * 4 + j];
+ const u8 *d = deltas->partition[i * 4 + j];
+
+ p[0] = update_prob(d[0], p[0]);
+ p[1] = update_prob(d[1], p[1]);
+ p[2] = update_prob(d[2], p[2]);
+ }
+}
+
+static inline int update_mv_prob(int delta, int prob)
+{
+ if (!delta)
+ return prob;
+
+ return delta;
+}
+
+/* Counterpart to 6.3.16 mv_probs() */
+static void update_mv_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ u8 *p = probs->mv.joint;
+ const u8 *d = deltas->mv.joint;
+ unsigned int i, j;
+
+ p[0] = update_mv_prob(d[0], p[0]);
+ p[1] = update_mv_prob(d[1], p[1]);
+ p[2] = update_mv_prob(d[2], p[2]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->mv.sign); i++) {
+ p = probs->mv.sign;
+ d = deltas->mv.sign;
+ p[i] = update_mv_prob(d[i], p[i]);
+
+ p = probs->mv.classes[i];
+ d = deltas->mv.classes[i];
+ for (j = 0; j < ARRAY_SIZE(probs->mv.classes[0]); j++)
+ p[j] = update_mv_prob(d[j], p[j]);
+
+ p = probs->mv.class0_bit;
+ d = deltas->mv.class0_bit;
+ p[i] = update_mv_prob(d[i], p[i]);
+
+ p = probs->mv.bits[i];
+ d = deltas->mv.bits[i];
+ for (j = 0; j < ARRAY_SIZE(probs->mv.bits[0]); j++)
+ p[j] = update_mv_prob(d[j], p[j]);
+
+ for (j = 0; j < ARRAY_SIZE(probs->mv.class0_fr[0]); j++) {
+ p = probs->mv.class0_fr[i][j];
+ d = deltas->mv.class0_fr[i][j];
+
+ p[0] = update_mv_prob(d[0], p[0]);
+ p[1] = update_mv_prob(d[1], p[1]);
+ p[2] = update_mv_prob(d[2], p[2]);
+ }
+
+ p = probs->mv.fr[i];
+ d = deltas->mv.fr[i];
+ for (j = 0; j < ARRAY_SIZE(probs->mv.fr[i]); j++)
+ p[j] = update_mv_prob(d[j], p[j]);
+
+ if (dec_params->flags & V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV) {
+ p = probs->mv.class0_hp;
+ d = deltas->mv.class0_hp;
+ p[i] = update_mv_prob(d[i], p[i]);
+
+ p = probs->mv.hp;
+ d = deltas->mv.hp;
+ p[i] = update_mv_prob(d[i], p[i]);
+ }
+ }
+}
+
+/* Counterpart to 6.3 compressed_header(), but parsing has been done in userspace. */
+void v4l2_vp9_fw_update_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ if (deltas->tx_mode == V4L2_VP9_TX_MODE_SELECT)
+ update_tx_probs(probs, deltas);
+
+ update_coef_probs(probs, deltas, dec_params);
+
+ update_skip_probs(probs, deltas);
+
+ if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME ||
+ dec_params->flags & V4L2_VP9_FRAME_FLAG_INTRA_ONLY)
+ return;
+
+ update_inter_mode_probs(probs, deltas);
+
+ if (dec_params->interpolation_filter == V4L2_VP9_INTERP_FILTER_SWITCHABLE)
+ update_interp_filter_probs(probs, deltas);
+
+ update_is_inter_probs(probs, deltas);
+
+ update_frame_reference_mode_probs(dec_params->reference_mode, probs, deltas);
+
+ update_y_mode_probs(probs, deltas);
+
+ update_partition_probs(probs, deltas);
+
+ update_mv_probs(probs, deltas, dec_params);
+}
+EXPORT_SYMBOL_GPL(v4l2_vp9_fw_update_probs);
+
+u8 v4l2_vp9_reset_frame_ctx(const struct v4l2_ctrl_vp9_frame *dec_params,
+ struct v4l2_vp9_frame_context *frame_context)
+{
+ int i;
+
+ u8 fctx_idx = dec_params->frame_context_idx;
+
+ if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME ||
+ dec_params->flags & V4L2_VP9_FRAME_FLAG_INTRA_ONLY ||
+ dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT) {
+ /*
+ * setup_past_independence()
+ * We do nothing here. Instead of storing default probs in some intermediate
+ * location and then copying from that location to appropriate contexts
+ * in save_probs() below, we skip that step and save default probs directly
+ * to appropriate contexts.
+ */
+ if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME ||
+ dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT ||
+ dec_params->reset_frame_context == V4L2_VP9_RESET_FRAME_CTX_ALL)
+ for (i = 0; i < 4; ++i)
+ /* save_probs(i) */
+ memcpy(&frame_context[i], &v4l2_vp9_default_probs,
+ sizeof(v4l2_vp9_default_probs));
+ else if (dec_params->reset_frame_context == V4L2_VP9_RESET_FRAME_CTX_SPEC)
+ /* save_probs(fctx_idx) */
+ memcpy(&frame_context[fctx_idx], &v4l2_vp9_default_probs,
+ sizeof(v4l2_vp9_default_probs));
+ fctx_idx = 0;
+ }
+
+ return fctx_idx;
+}
+EXPORT_SYMBOL_GPL(v4l2_vp9_reset_frame_ctx);
+
+/* 8.4.1 Merge prob process */
+static u8 merge_prob(u8 pre_prob, u32 ct0, u32 ct1, u16 count_sat, u32 max_update_factor)
+{
+ u32 den, prob, count, factor;
+
+ den = ct0 + ct1;
+ if (!den) {
+ /*
+ * prob = 128, count = 0, update_factor = 0
+ * Round2's argument: pre_prob * 256
+ * (pre_prob * 256 + 128) >> 8 == pre_prob
+ */
+ return pre_prob;
+ }
+
+ prob = clamp(((ct0 << 8) + (den >> 1)) / den, (u32)1, (u32)255);
+ count = min_t(u32, den, count_sat);
+ factor = fastdiv(max_update_factor * count, count_sat);
+
+ /*
+ * Round2(pre_prob * (256 - factor) + prob * factor, 8)
+ * Round2(pre_prob * 256 + (prob - pre_prob) * factor, 8)
+ * (pre_prob * 256 >> 8) + (((prob - pre_prob) * factor + 128) >> 8)
+ */
+ return pre_prob + (((prob - pre_prob) * factor + 128) >> 8);
+}
+
+static inline u8 noncoef_merge_prob(u8 pre_prob, u32 ct0, u32 ct1)
+{
+ return merge_prob(pre_prob, ct0, ct1, 20, 128);
+}
+
+/* 8.4.2 Merge probs process */
+/*
+ * merge_probs() is a recursive function in the spec. We avoid recursion in the kernel.
+ * That said, the "tree" parameter of merge_probs() controls how deep the recursion goes.
+ * It turns out that in all cases the recursive calls boil down to a short-ish series
+ * of merge_prob() invocations (note no "s").
+ *
+ * Variant A
+ * ---------
+ * merge_probs(small_token_tree, 2):
+ * merge_prob(p[1], c[0], c[1] + c[2])
+ * merge_prob(p[2], c[1], c[2])
+ *
+ * Variant B
+ * ---------
+ * merge_probs(binary_tree, 0) or
+ * merge_probs(tx_size_8_tree, 0):
+ * merge_prob(p[0], c[0], c[1])
+ *
+ * Variant C
+ * ---------
+ * merge_probs(inter_mode_tree, 0):
+ * merge_prob(p[0], c[2], c[1] + c[0] + c[3])
+ * merge_prob(p[1], c[0], c[1] + c[3])
+ * merge_prob(p[2], c[1], c[3])
+ *
+ * Variant D
+ * ---------
+ * merge_probs(intra_mode_tree, 0):
+ * merge_prob(p[0], c[0], c[1] + ... + c[9])
+ * merge_prob(p[1], c[9], c[1] + ... + c[8])
+ * merge_prob(p[2], c[1], c[2] + ... + c[8])
+ * merge_prob(p[3], c[2] + c[4] + c[5], c[3] + c[8] + c[6] + c[7])
+ * merge_prob(p[4], c[2], c[4] + c[5])
+ * merge_prob(p[5], c[4], c[5])
+ * merge_prob(p[6], c[3], c[8] + c[6] + c[7])
+ * merge_prob(p[7], c[8], c[6] + c[7])
+ * merge_prob(p[8], c[6], c[7])
+ *
+ * Variant E
+ * ---------
+ * merge_probs(partition_tree, 0) or
+ * merge_probs(tx_size_32_tree, 0) or
+ * merge_probs(mv_joint_tree, 0) or
+ * merge_probs(mv_fr_tree, 0):
+ * merge_prob(p[0], c[0], c[1] + c[2] + c[3])
+ * merge_prob(p[1], c[1], c[2] + c[3])
+ * merge_prob(p[2], c[2], c[3])
+ *
+ * Variant F
+ * ---------
+ * merge_probs(interp_filter_tree, 0) or
+ * merge_probs(tx_size_16_tree, 0):
+ * merge_prob(p[0], c[0], c[1] + c[2])
+ * merge_prob(p[1], c[1], c[2])
+ *
+ * Variant G
+ * ---------
+ * merge_probs(mv_class_tree, 0):
+ * merge_prob(p[0], c[0], c[1] + ... + c[10])
+ * merge_prob(p[1], c[1], c[2] + ... + c[10])
+ * merge_prob(p[2], c[2] + c[3], c[4] + ... + c[10])
+ * merge_prob(p[3], c[2], c[3])
+ * merge_prob(p[4], c[4] + c[5], c[6] + ... + c[10])
+ * merge_prob(p[5], c[4], c[5])
+ * merge_prob(p[6], c[6], c[7] + ... + c[10])
+ * merge_prob(p[7], c[7] + c[8], c[9] + c[10])
+ * merge_prob(p[8], c[7], c[8])
+ * merge_prob(p[9], c[9], [10])
+ */
+
+static inline void merge_probs_variant_a(u8 *p, const u32 *c, u16 count_sat, u32 update_factor)
+{
+ p[1] = merge_prob(p[1], c[0], c[1] + c[2], count_sat, update_factor);
+ p[2] = merge_prob(p[2], c[1], c[2], count_sat, update_factor);
+}
+
+static inline void merge_probs_variant_b(u8 *p, const u32 *c, u16 count_sat, u32 update_factor)
+{
+ p[0] = merge_prob(p[0], c[0], c[1], count_sat, update_factor);
+}
+
+static inline void merge_probs_variant_c(u8 *p, const u32 *c)
+{
+ p[0] = noncoef_merge_prob(p[0], c[2], c[1] + c[0] + c[3]);
+ p[1] = noncoef_merge_prob(p[1], c[0], c[1] + c[3]);
+ p[2] = noncoef_merge_prob(p[2], c[1], c[3]);
+}
+
+static void merge_probs_variant_d(u8 *p, const u32 *c)
+{
+ u32 sum = 0, s2;
+
+ sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9];
+
+ p[0] = noncoef_merge_prob(p[0], c[0], sum);
+ sum -= c[9];
+ p[1] = noncoef_merge_prob(p[1], c[9], sum);
+ sum -= c[1];
+ p[2] = noncoef_merge_prob(p[2], c[1], sum);
+ s2 = c[2] + c[4] + c[5];
+ sum -= s2;
+ p[3] = noncoef_merge_prob(p[3], s2, sum);
+ s2 -= c[2];
+ p[4] = noncoef_merge_prob(p[4], c[2], s2);
+ p[5] = noncoef_merge_prob(p[5], c[4], c[5]);
+ sum -= c[3];
+ p[6] = noncoef_merge_prob(p[6], c[3], sum);
+ sum -= c[8];
+ p[7] = noncoef_merge_prob(p[7], c[8], sum);
+ p[8] = noncoef_merge_prob(p[8], c[6], c[7]);
+}
+
+static inline void merge_probs_variant_e(u8 *p, const u32 *c)
+{
+ p[0] = noncoef_merge_prob(p[0], c[0], c[1] + c[2] + c[3]);
+ p[1] = noncoef_merge_prob(p[1], c[1], c[2] + c[3]);
+ p[2] = noncoef_merge_prob(p[2], c[2], c[3]);
+}
+
+static inline void merge_probs_variant_f(u8 *p, const u32 *c)
+{
+ p[0] = noncoef_merge_prob(p[0], c[0], c[1] + c[2]);
+ p[1] = noncoef_merge_prob(p[1], c[1], c[2]);
+}
+
+static void merge_probs_variant_g(u8 *p, const u32 *c)
+{
+ u32 sum;
+
+ sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9] + c[10];
+ p[0] = noncoef_merge_prob(p[0], c[0], sum);
+ sum -= c[1];
+ p[1] = noncoef_merge_prob(p[1], c[1], sum);
+ sum -= c[2] + c[3];
+ p[2] = noncoef_merge_prob(p[2], c[2] + c[3], sum);
+ p[3] = noncoef_merge_prob(p[3], c[2], c[3]);
+ sum -= c[4] + c[5];
+ p[4] = noncoef_merge_prob(p[4], c[4] + c[5], sum);
+ p[5] = noncoef_merge_prob(p[5], c[4], c[5]);
+ sum -= c[6];
+ p[6] = noncoef_merge_prob(p[6], c[6], sum);
+ p[7] = noncoef_merge_prob(p[7], c[7] + c[8], c[9] + c[10]);
+ p[8] = noncoef_merge_prob(p[8], c[7], c[8]);
+ p[9] = noncoef_merge_prob(p[9], c[9], c[10]);
+}
+
+/* 8.4.3 Coefficient probability adaptation process */
+static inline void adapt_probs_variant_a_coef(u8 *p, const u32 *c, u32 update_factor)
+{
+ merge_probs_variant_a(p, c, 24, update_factor);
+}
+
+static inline void adapt_probs_variant_b_coef(u8 *p, const u32 *c, u32 update_factor)
+{
+ merge_probs_variant_b(p, c, 24, update_factor);
+}
+
+static void _adapt_coeff(unsigned int i, unsigned int j, unsigned int k,
+ struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_vp9_frame_symbol_counts *counts,
+ u32 uf)
+{
+ s32 l, m;
+
+ for (l = 0; l < ARRAY_SIZE(probs->coef[0][0][0]); l++) {
+ for (m = 0; m < BAND_6(l); m++) {
+ u8 *p = probs->coef[i][j][k][l][m];
+ const u32 counts_more_coefs[2] = {
+ *counts->eob[i][j][k][l][m][1],
+ *counts->eob[i][j][k][l][m][0] - *counts->eob[i][j][k][l][m][1],
+ };
+
+ adapt_probs_variant_a_coef(p, *counts->coeff[i][j][k][l][m], uf);
+ adapt_probs_variant_b_coef(p, counts_more_coefs, uf);
+ }
+ }
+}
+
+static void _adapt_coef_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_vp9_frame_symbol_counts *counts,
+ unsigned int uf)
+{
+ unsigned int i, j, k;
+
+ for (i = 0; i < ARRAY_SIZE(probs->coef); i++)
+ for (j = 0; j < ARRAY_SIZE(probs->coef[0]); j++)
+ for (k = 0; k < ARRAY_SIZE(probs->coef[0][0]); k++)
+ _adapt_coeff(i, j, k, probs, counts, uf);
+}
+
+void v4l2_vp9_adapt_coef_probs(struct v4l2_vp9_frame_context *probs,
+ struct v4l2_vp9_frame_symbol_counts *counts,
+ bool use_128,
+ bool frame_is_intra)
+{
+ if (frame_is_intra) {
+ _adapt_coef_probs(probs, counts, 112);
+ } else {
+ if (use_128)
+ _adapt_coef_probs(probs, counts, 128);
+ else
+ _adapt_coef_probs(probs, counts, 112);
+ }
+}
+EXPORT_SYMBOL_GPL(v4l2_vp9_adapt_coef_probs);
+
+/* 8.4.4 Non coefficient probability adaptation process, adapt_probs() */
+static inline void adapt_probs_variant_b(u8 *p, const u32 *c)
+{
+ merge_probs_variant_b(p, c, 20, 128);
+}
+
+static inline void adapt_probs_variant_c(u8 *p, const u32 *c)
+{
+ merge_probs_variant_c(p, c);
+}
+
+static inline void adapt_probs_variant_d(u8 *p, const u32 *c)
+{
+ merge_probs_variant_d(p, c);
+}
+
+static inline void adapt_probs_variant_e(u8 *p, const u32 *c)
+{
+ merge_probs_variant_e(p, c);
+}
+
+static inline void adapt_probs_variant_f(u8 *p, const u32 *c)
+{
+ merge_probs_variant_f(p, c);
+}
+
+static inline void adapt_probs_variant_g(u8 *p, const u32 *c)
+{
+ merge_probs_variant_g(p, c);
+}
+
+/* 8.4.4 Non coefficient probability adaptation process, adapt_prob() */
+static inline u8 adapt_prob(u8 prob, const u32 counts[2])
+{
+ return noncoef_merge_prob(prob, counts[0], counts[1]);
+}
+
+/* 8.4.4 Non coefficient probability adaptation process */
+void v4l2_vp9_adapt_noncoef_probs(struct v4l2_vp9_frame_context *probs,
+ struct v4l2_vp9_frame_symbol_counts *counts,
+ u8 reference_mode, u8 interpolation_filter, u8 tx_mode,
+ u32 flags)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(probs->is_inter); i++)
+ probs->is_inter[i] = adapt_prob(probs->is_inter[i], (*counts->intra_inter)[i]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->comp_mode); i++)
+ probs->comp_mode[i] = adapt_prob(probs->comp_mode[i], (*counts->comp)[i]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->comp_ref); i++)
+ probs->comp_ref[i] = adapt_prob(probs->comp_ref[i], (*counts->comp_ref)[i]);
+
+ if (reference_mode != V4L2_VP9_REFERENCE_MODE_COMPOUND_REFERENCE)
+ for (i = 0; i < ARRAY_SIZE(probs->single_ref); i++)
+ for (j = 0; j < ARRAY_SIZE(probs->single_ref[0]); j++)
+ probs->single_ref[i][j] = adapt_prob(probs->single_ref[i][j],
+ (*counts->single_ref)[i][j]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->inter_mode); i++)
+ adapt_probs_variant_c(probs->inter_mode[i], (*counts->mv_mode)[i]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->y_mode); i++)
+ adapt_probs_variant_d(probs->y_mode[i], (*counts->y_mode)[i]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->uv_mode); i++)
+ adapt_probs_variant_d(probs->uv_mode[i], (*counts->uv_mode)[i]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->partition); i++)
+ adapt_probs_variant_e(probs->partition[i], (*counts->partition)[i]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->skip); i++)
+ probs->skip[i] = adapt_prob(probs->skip[i], (*counts->skip)[i]);
+
+ if (interpolation_filter == V4L2_VP9_INTERP_FILTER_SWITCHABLE)
+ for (i = 0; i < ARRAY_SIZE(probs->interp_filter); i++)
+ adapt_probs_variant_f(probs->interp_filter[i], (*counts->filter)[i]);
+
+ if (tx_mode == V4L2_VP9_TX_MODE_SELECT)
+ for (i = 0; i < ARRAY_SIZE(probs->tx8); i++) {
+ adapt_probs_variant_b(probs->tx8[i], (*counts->tx8p)[i]);
+ adapt_probs_variant_f(probs->tx16[i], (*counts->tx16p)[i]);
+ adapt_probs_variant_e(probs->tx32[i], (*counts->tx32p)[i]);
+ }
+
+ adapt_probs_variant_e(probs->mv.joint, *counts->mv_joint);
+
+ for (i = 0; i < ARRAY_SIZE(probs->mv.sign); i++) {
+ probs->mv.sign[i] = adapt_prob(probs->mv.sign[i], (*counts->sign)[i]);
+
+ adapt_probs_variant_g(probs->mv.classes[i], (*counts->classes)[i]);
+
+ probs->mv.class0_bit[i] = adapt_prob(probs->mv.class0_bit[i], (*counts->class0)[i]);
+
+ for (j = 0; j < ARRAY_SIZE(probs->mv.bits[0]); j++)
+ probs->mv.bits[i][j] = adapt_prob(probs->mv.bits[i][j],
+ (*counts->bits)[i][j]);
+
+ for (j = 0; j < ARRAY_SIZE(probs->mv.class0_fr[0]); j++)
+ adapt_probs_variant_e(probs->mv.class0_fr[i][j],
+ (*counts->class0_fp)[i][j]);
+
+ adapt_probs_variant_e(probs->mv.fr[i], (*counts->fp)[i]);
+
+ if (!(flags & V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV))
+ continue;
+
+ probs->mv.class0_hp[i] = adapt_prob(probs->mv.class0_hp[i],
+ (*counts->class0_hp)[i]);
+
+ probs->mv.hp[i] = adapt_prob(probs->mv.hp[i], (*counts->hp)[i]);
+ }
+}
+EXPORT_SYMBOL_GPL(v4l2_vp9_adapt_noncoef_probs);
+
+bool
+v4l2_vp9_seg_feat_enabled(const u8 *feature_enabled,
+ unsigned int feature,
+ unsigned int segid)
+{
+ u8 mask = V4L2_VP9_SEGMENT_FEATURE_ENABLED(feature);
+
+ return !!(feature_enabled[segid] & mask);
+}
+EXPORT_SYMBOL_GPL(v4l2_vp9_seg_feat_enabled);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("V4L2 VP9 Helpers");
+MODULE_AUTHOR("Andrzej Pietrasiewicz <andrzej.p@collabora.com>");
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index be0858bff4d3..ed11887c1b7c 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -237,6 +237,7 @@ struct gpmc_device {
struct omap3_gpmc_regs context;
int nirqs;
unsigned int is_suspended:1;
+ struct resource *data;
};
static struct irq_domain *gpmc_irq_domain;
@@ -1456,12 +1457,18 @@ static void gpmc_mem_exit(void)
}
}
-static void gpmc_mem_init(void)
+static void gpmc_mem_init(struct gpmc_device *gpmc)
{
int cs;
- gpmc_mem_root.start = GPMC_MEM_START;
- gpmc_mem_root.end = GPMC_MEM_END;
+ if (!gpmc->data) {
+ /* All legacy devices have same data IO window */
+ gpmc_mem_root.start = GPMC_MEM_START;
+ gpmc_mem_root.end = GPMC_MEM_END;
+ } else {
+ gpmc_mem_root.start = gpmc->data->start;
+ gpmc_mem_root.end = gpmc->data->end;
+ }
/* Reserve all regions that has been set up by bootloader */
for (cs = 0; cs < gpmc_cs_num; cs++) {
@@ -1888,6 +1895,7 @@ static const struct of_device_id gpmc_dt_ids[] = {
{ .compatible = "ti,omap3430-gpmc" }, /* omap3430 & omap3630 */
{ .compatible = "ti,omap4430-gpmc" }, /* omap4430 & omap4460 & omap543x */
{ .compatible = "ti,am3352-gpmc" }, /* am335x devices */
+ { .compatible = "ti,am64-gpmc" },
{ }
};
@@ -2175,7 +2183,7 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
}
}
- if (of_device_is_compatible(child, "ti,omap2-nand")) {
+ if (of_match_node(omap_nand_ids, child)) {
/* NAND specific setup */
val = 8;
of_property_read_u32(child, "nand-bus-width", &val);
@@ -2502,21 +2510,29 @@ static int gpmc_probe(struct platform_device *pdev)
gpmc->dev = &pdev->dev;
platform_set_drvdata(pdev, gpmc);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOENT;
-
- gpmc_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(gpmc_base))
- return PTR_ERR(gpmc_base);
-
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
if (!res) {
- dev_err(&pdev->dev, "Failed to get resource: irq\n");
- return -ENOENT;
+ /* legacy DT */
+ gpmc_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(gpmc_base))
+ return PTR_ERR(gpmc_base);
+ } else {
+ gpmc_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gpmc_base))
+ return PTR_ERR(gpmc_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "data");
+ if (!res) {
+ dev_err(&pdev->dev, "couldn't get data reg resource\n");
+ return -ENOENT;
+ }
+
+ gpmc->data = res;
}
- gpmc->irq = res->start;
+ gpmc->irq = platform_get_irq(pdev, 0);
+ if (gpmc->irq < 0)
+ return gpmc->irq;
gpmc_l3_clk = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(gpmc_l3_clk)) {
@@ -2562,7 +2578,7 @@ static int gpmc_probe(struct platform_device *pdev)
dev_info(gpmc->dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l),
GPMC_REVISION_MINOR(l));
- gpmc_mem_init();
+ gpmc_mem_init(gpmc);
rc = gpmc_gpio_init(gpmc);
if (rc)
goto gpio_init_failed;
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
index 7435baad0007..e4cc64f56019 100644
--- a/drivers/memory/renesas-rpc-if.c
+++ b/drivers/memory/renesas-rpc-if.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -19,19 +20,17 @@
#define RPCIF_CMNCR 0x0000 /* R/W */
#define RPCIF_CMNCR_MD BIT(31)
-#define RPCIF_CMNCR_SFDE BIT(24) /* undocumented but must be set */
#define RPCIF_CMNCR_MOIIO3(val) (((val) & 0x3) << 22)
#define RPCIF_CMNCR_MOIIO2(val) (((val) & 0x3) << 20)
#define RPCIF_CMNCR_MOIIO1(val) (((val) & 0x3) << 18)
#define RPCIF_CMNCR_MOIIO0(val) (((val) & 0x3) << 16)
-#define RPCIF_CMNCR_MOIIO_HIZ (RPCIF_CMNCR_MOIIO0(3) | \
- RPCIF_CMNCR_MOIIO1(3) | \
- RPCIF_CMNCR_MOIIO2(3) | RPCIF_CMNCR_MOIIO3(3))
-#define RPCIF_CMNCR_IO3FV(val) (((val) & 0x3) << 14) /* undocumented */
-#define RPCIF_CMNCR_IO2FV(val) (((val) & 0x3) << 12) /* undocumented */
+#define RPCIF_CMNCR_MOIIO(val) (RPCIF_CMNCR_MOIIO0(val) | RPCIF_CMNCR_MOIIO1(val) | \
+ RPCIF_CMNCR_MOIIO2(val) | RPCIF_CMNCR_MOIIO3(val))
+#define RPCIF_CMNCR_IO3FV(val) (((val) & 0x3) << 14) /* documented for RZ/G2L */
+#define RPCIF_CMNCR_IO2FV(val) (((val) & 0x3) << 12) /* documented for RZ/G2L */
#define RPCIF_CMNCR_IO0FV(val) (((val) & 0x3) << 8)
-#define RPCIF_CMNCR_IOFV_HIZ (RPCIF_CMNCR_IO0FV(3) | RPCIF_CMNCR_IO2FV(3) | \
- RPCIF_CMNCR_IO3FV(3))
+#define RPCIF_CMNCR_IOFV(val) (RPCIF_CMNCR_IO0FV(val) | RPCIF_CMNCR_IO2FV(val) | \
+ RPCIF_CMNCR_IO3FV(val))
#define RPCIF_CMNCR_BSZ(val) (((val) & 0x3) << 0)
#define RPCIF_SSLDR 0x0004 /* R/W */
@@ -126,6 +125,9 @@
#define RPCIF_SMDRENR_OPDRE BIT(4)
#define RPCIF_SMDRENR_SPIDRE BIT(0)
+#define RPCIF_PHYADD 0x0070 /* R/W available on R-Car E3/D3/V3M and RZ/G2{E,L} */
+#define RPCIF_PHYWR 0x0074 /* R/W available on R-Car E3/D3/V3M and RZ/G2{E,L} */
+
#define RPCIF_PHYCNT 0x007C /* R/W */
#define RPCIF_PHYCNT_CAL BIT(31)
#define RPCIF_PHYCNT_OCTA(v) (((v) & 0x3) << 22)
@@ -133,10 +135,12 @@
#define RPCIF_PHYCNT_OCT BIT(20)
#define RPCIF_PHYCNT_DDRCAL BIT(19)
#define RPCIF_PHYCNT_HS BIT(18)
-#define RPCIF_PHYCNT_STRTIM(v) (((v) & 0x7) << 15)
+#define RPCIF_PHYCNT_CKSEL(v) (((v) & 0x3) << 16) /* valid only for RZ/G2L */
+#define RPCIF_PHYCNT_STRTIM(v) (((v) & 0x7) << 15) /* valid for R-Car and RZ/G2{E,H,M,N} */
#define RPCIF_PHYCNT_WBUF2 BIT(4)
#define RPCIF_PHYCNT_WBUF BIT(2)
#define RPCIF_PHYCNT_PHYMEM(v) (((v) & 0x3) << 0)
+#define RPCIF_PHYCNT_PHYMEM_MASK GENMASK(1, 0)
#define RPCIF_PHYOFFSET1 0x0080 /* R/W */
#define RPCIF_PHYOFFSET1_DDRTMG(v) (((v) & 0x3) << 28)
@@ -147,8 +151,6 @@
#define RPCIF_PHYINT 0x0088 /* R/W */
#define RPCIF_PHYINT_WPVAL BIT(1)
-#define RPCIF_DIRMAP_SIZE 0x4000000
-
static const struct regmap_range rpcif_volatile_ranges[] = {
regmap_reg_range(RPCIF_SMRDR0, RPCIF_SMRDR1),
regmap_reg_range(RPCIF_SMWDR0, RPCIF_SMWDR1),
@@ -243,50 +245,74 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
rpc->dirmap = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(rpc->dirmap))
- rpc->dirmap = NULL;
+ return PTR_ERR(rpc->dirmap);
rpc->size = resource_size(res);
+ rpc->type = (uintptr_t)of_device_get_match_data(dev);
rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
return PTR_ERR_OR_ZERO(rpc->rstc);
}
EXPORT_SYMBOL(rpcif_sw_init);
-void rpcif_hw_init(struct rpcif *rpc, bool hyperflash)
+static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif *rpc)
+{
+ regmap_write(rpc->regmap, RPCIF_PHYWR, 0xa5390000);
+ regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000000);
+ regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00008080);
+ regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000022);
+ regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00008080);
+ regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000024);
+ regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_CKSEL(3),
+ RPCIF_PHYCNT_CKSEL(3));
+ regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00000030);
+ regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000032);
+}
+
+int rpcif_hw_init(struct rpcif *rpc, bool hyperflash)
{
u32 dummy;
pm_runtime_get_sync(rpc->dev);
- /*
- * NOTE: The 0x260 are undocumented bits, but they must be set.
- * RPCIF_PHYCNT_STRTIM is strobe timing adjustment bits,
- * 0x0 : the delay is biggest,
- * 0x1 : the delay is 2nd biggest,
- * On H3 ES1.x, the value should be 0, while on others,
- * the value should be 7.
- */
- regmap_write(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_STRTIM(7) |
- RPCIF_PHYCNT_PHYMEM(hyperflash ? 3 : 0) | 0x260);
-
- /*
- * NOTE: The 0x1511144 are undocumented bits, but they must be set
- * for RPCIF_PHYOFFSET1.
- * The 0x31 are undocumented bits, but they must be set
- * for RPCIF_PHYOFFSET2.
- */
- regmap_write(rpc->regmap, RPCIF_PHYOFFSET1, 0x1511144 |
- RPCIF_PHYOFFSET1_DDRTMG(3));
- regmap_write(rpc->regmap, RPCIF_PHYOFFSET2, 0x31 |
- RPCIF_PHYOFFSET2_OCTTMG(4));
+ if (rpc->type == RPCIF_RZ_G2L) {
+ int ret;
+
+ ret = reset_control_reset(rpc->rstc);
+ if (ret)
+ return ret;
+ usleep_range(200, 300);
+ rpcif_rzg2l_timing_adjust_sdr(rpc);
+ }
+
+ regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_PHYMEM_MASK,
+ RPCIF_PHYCNT_PHYMEM(hyperflash ? 3 : 0));
+
+ if (rpc->type == RPCIF_RCAR_GEN3)
+ regmap_update_bits(rpc->regmap, RPCIF_PHYCNT,
+ RPCIF_PHYCNT_STRTIM(7), RPCIF_PHYCNT_STRTIM(7));
+
+ regmap_update_bits(rpc->regmap, RPCIF_PHYOFFSET1, RPCIF_PHYOFFSET1_DDRTMG(3),
+ RPCIF_PHYOFFSET1_DDRTMG(3));
+ regmap_update_bits(rpc->regmap, RPCIF_PHYOFFSET2, RPCIF_PHYOFFSET2_OCTTMG(7),
+ RPCIF_PHYOFFSET2_OCTTMG(4));
if (hyperflash)
regmap_update_bits(rpc->regmap, RPCIF_PHYINT,
RPCIF_PHYINT_WPVAL, 0);
- regmap_write(rpc->regmap, RPCIF_CMNCR, RPCIF_CMNCR_SFDE |
- RPCIF_CMNCR_MOIIO_HIZ | RPCIF_CMNCR_IOFV_HIZ |
- RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0));
+ if (rpc->type == RPCIF_RCAR_GEN3)
+ regmap_update_bits(rpc->regmap, RPCIF_CMNCR,
+ RPCIF_CMNCR_MOIIO(3) | RPCIF_CMNCR_BSZ(3),
+ RPCIF_CMNCR_MOIIO(3) |
+ RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0));
+ else
+ regmap_update_bits(rpc->regmap, RPCIF_CMNCR,
+ RPCIF_CMNCR_MOIIO(3) | RPCIF_CMNCR_IOFV(3) |
+ RPCIF_CMNCR_BSZ(3),
+ RPCIF_CMNCR_MOIIO(1) | RPCIF_CMNCR_IOFV(2) |
+ RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0));
+
/* Set RCF after BSZ update */
regmap_write(rpc->regmap, RPCIF_DRCR, RPCIF_DRCR_RCF);
/* Dummy read according to spec */
@@ -297,6 +323,8 @@ void rpcif_hw_init(struct rpcif *rpc, bool hyperflash)
pm_runtime_put(rpc->dev);
rpc->bus_size = hyperflash ? 2 : 1;
+
+ return 0;
}
EXPORT_SYMBOL(rpcif_hw_init);
@@ -588,8 +616,8 @@ static void memcpy_fromio_readw(void *to,
ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf)
{
- loff_t from = offs & (RPCIF_DIRMAP_SIZE - 1);
- size_t size = RPCIF_DIRMAP_SIZE - from;
+ loff_t from = offs & (rpc->size - 1);
+ size_t size = rpc->size - from;
if (len > size)
len = size;
@@ -659,7 +687,8 @@ static int rpcif_remove(struct platform_device *pdev)
}
static const struct of_device_id rpcif_of_match[] = {
- { .compatible = "renesas,rcar-gen3-rpc-if", },
+ { .compatible = "renesas,rcar-gen3-rpc-if", .data = (void *)RPCIF_RCAR_GEN3 },
+ { .compatible = "renesas,rzg2l-rpc-if", .data = (void *)RPCIF_RZ_G2L },
{},
};
MODULE_DEVICE_TABLE(of, rpcif_of_match);
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index b94d5e4fdc23..e90adfa57950 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -300,8 +300,8 @@ mpt_is_discovery_complete(MPT_ADAPTER *ioc)
if (!hdr.ExtPageLength)
goto out;
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer)
goto out;
@@ -316,8 +316,8 @@ mpt_is_discovery_complete(MPT_ADAPTER *ioc)
rc = 1;
out_free_consistent:
- pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- buffer, dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
+ dma_handle);
out:
return rc;
}
@@ -1274,8 +1274,6 @@ mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req,
static int
mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag)
{
- int r = 0;
-
/* return if in use */
if (CHIPREG_READ32(&ioc->chip->Doorbell)
& MPI_DOORBELL_ACTIVE)
@@ -1289,9 +1287,9 @@ mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int slee
(access_control_value<<12)));
/* Wait for IOC to clear Doorbell Status bit */
- if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
+ if (WaitForDoorbellAck(ioc, 5, sleepFlag) < 0)
return -2;
- }else
+ else
return 0;
}
@@ -1663,16 +1661,14 @@ mpt_mapresources(MPT_ADAPTER *ioc)
const uint64_t required_mask = dma_get_required_mask
(&pdev->dev);
if (required_mask > DMA_BIT_MASK(32)
- && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
- && !pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(64))) {
+ && !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))
+ && !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
ioc->dma_mask = DMA_BIT_MASK(64);
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
ioc->name));
- } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- && !pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32))) {
+ } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))
+ && !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
ioc->dma_mask = DMA_BIT_MASK(32);
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
@@ -1683,9 +1679,8 @@ mpt_mapresources(MPT_ADAPTER *ioc)
goto out_pci_release_region;
}
} else {
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- && !pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))
+ && !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
ioc->dma_mask = DMA_BIT_MASK(32);
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
@@ -2771,9 +2766,9 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
if (ioc->spi_data.pIocPg4 != NULL) {
sz = ioc->spi_data.IocPg4Sz;
- pci_free_consistent(ioc->pcidev, sz,
- ioc->spi_data.pIocPg4,
- ioc->spi_data.IocPg4_dma);
+ dma_free_coherent(&ioc->pcidev->dev, sz,
+ ioc->spi_data.pIocPg4,
+ ioc->spi_data.IocPg4_dma);
ioc->spi_data.pIocPg4 = NULL;
ioc->alloc_total -= sz;
}
@@ -3517,7 +3512,8 @@ mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size)
rc = 0;
goto out;
}
- ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma);
+ ioc->cached_fw = dma_alloc_coherent(&ioc->pcidev->dev, size,
+ &ioc->cached_fw_dma, GFP_ATOMIC);
if (!ioc->cached_fw) {
printk(MYIOC_s_ERR_FMT "Unable to allocate memory for the cached firmware image!\n",
ioc->name);
@@ -3550,7 +3546,8 @@ mpt_free_fw_memory(MPT_ADAPTER *ioc)
sz = ioc->facts.FWImageSize;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "free_fw_memory: FW Image @ %p[%p], sz=%d[%x] bytes\n",
ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
- pci_free_consistent(ioc->pcidev, sz, ioc->cached_fw, ioc->cached_fw_dma);
+ dma_free_coherent(&ioc->pcidev->dev, sz, ioc->cached_fw,
+ ioc->cached_fw_dma);
ioc->alloc_total -= sz;
ioc->cached_fw = NULL;
}
@@ -4449,9 +4446,8 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
*/
if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078 &&
ioc->dma_mask > DMA_BIT_MASK(35)) {
- if (!pci_set_dma_mask(ioc->pcidev, DMA_BIT_MASK(32))
- && !pci_set_consistent_dma_mask(ioc->pcidev,
- DMA_BIT_MASK(32))) {
+ if (!dma_set_mask(&ioc->pcidev->dev, DMA_BIT_MASK(32))
+ && !dma_set_coherent_mask(&ioc->pcidev->dev, DMA_BIT_MASK(32))) {
dma_mask = DMA_BIT_MASK(35);
d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"setting 35 bit addressing for "
@@ -4459,10 +4455,10 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
ioc->name));
} else {
/*Reseting DMA mask to 64 bit*/
- pci_set_dma_mask(ioc->pcidev,
- DMA_BIT_MASK(64));
- pci_set_consistent_dma_mask(ioc->pcidev,
- DMA_BIT_MASK(64));
+ dma_set_mask(&ioc->pcidev->dev,
+ DMA_BIT_MASK(64));
+ dma_set_coherent_mask(&ioc->pcidev->dev,
+ DMA_BIT_MASK(64));
printk(MYIOC_s_ERR_FMT
"failed setting 35 bit addressing for "
@@ -4597,8 +4593,8 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
alloc_dma += ioc->reply_sz;
}
- if (dma_mask == DMA_BIT_MASK(35) && !pci_set_dma_mask(ioc->pcidev,
- ioc->dma_mask) && !pci_set_consistent_dma_mask(ioc->pcidev,
+ if (dma_mask == DMA_BIT_MASK(35) && !dma_set_mask(&ioc->pcidev->dev,
+ ioc->dma_mask) && !dma_set_coherent_mask(&ioc->pcidev->dev,
ioc->dma_mask))
d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"restoring 64 bit addressing\n", ioc->name));
@@ -4622,8 +4618,8 @@ out_fail:
ioc->sense_buf_pool = NULL;
}
- if (dma_mask == DMA_BIT_MASK(35) && !pci_set_dma_mask(ioc->pcidev,
- DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(ioc->pcidev,
+ if (dma_mask == DMA_BIT_MASK(35) && !dma_set_mask(&ioc->pcidev->dev,
+ DMA_BIT_MASK(64)) && !dma_set_coherent_mask(&ioc->pcidev->dev,
DMA_BIT_MASK(64)))
d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"restoring 64 bit addressing\n", ioc->name));
@@ -4970,7 +4966,8 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
if (hdr.PageLength > 0) {
data_sz = hdr.PageLength * 4;
- ppage0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
+ ppage0_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
+ &page0_dma, GFP_KERNEL);
rc = -ENOMEM;
if (ppage0_alloc) {
memset((u8 *)ppage0_alloc, 0, data_sz);
@@ -4984,7 +4981,8 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
}
- pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma);
+ dma_free_coherent(&ioc->pcidev->dev, data_sz,
+ (u8 *)ppage0_alloc, page0_dma);
/* FIXME!
* Normalize endianness of structure data,
@@ -5016,7 +5014,8 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
data_sz = hdr.PageLength * 4;
rc = -ENOMEM;
- ppage1_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page1_dma);
+ ppage1_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
+ &page1_dma, GFP_KERNEL);
if (ppage1_alloc) {
memset((u8 *)ppage1_alloc, 0, data_sz);
cfg.physAddr = page1_dma;
@@ -5028,7 +5027,8 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
memcpy(&ioc->lan_cnfg_page1, ppage1_alloc, copy_sz);
}
- pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage1_alloc, page1_dma);
+ dma_free_coherent(&ioc->pcidev->dev, data_sz,
+ (u8 *)ppage1_alloc, page1_dma);
/* FIXME!
* Normalize endianness of structure data,
@@ -5317,7 +5317,8 @@ GetIoUnitPage2(MPT_ADAPTER *ioc)
/* Read the config page */
data_sz = hdr.PageLength * 4;
rc = -ENOMEM;
- ppage_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
+ ppage_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
+ &page_dma, GFP_KERNEL);
if (ppage_alloc) {
memset((u8 *)ppage_alloc, 0, data_sz);
cfg.physAddr = page_dma;
@@ -5327,7 +5328,8 @@ GetIoUnitPage2(MPT_ADAPTER *ioc)
if ((rc = mpt_config(ioc, &cfg)) == 0)
ioc->biosVersion = le32_to_cpu(ppage_alloc->BiosVersion);
- pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage_alloc, page_dma);
+ dma_free_coherent(&ioc->pcidev->dev, data_sz,
+ (u8 *)ppage_alloc, page_dma);
}
return rc;
@@ -5402,7 +5404,9 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
return -EFAULT;
if (header.PageLength > 0) {
- pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma);
+ pbuf = dma_alloc_coherent(&ioc->pcidev->dev,
+ header.PageLength * 4, &buf_dma,
+ GFP_KERNEL);
if (pbuf) {
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.physAddr = buf_dma;
@@ -5458,7 +5462,9 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
}
}
if (pbuf) {
- pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma);
+ dma_free_coherent(&ioc->pcidev->dev,
+ header.PageLength * 4, pbuf,
+ buf_dma);
}
}
}
@@ -5480,7 +5486,9 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
if (header.PageLength > 0) {
/* Allocate memory and read SCSI Port Page 2
*/
- pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma);
+ pbuf = dma_alloc_coherent(&ioc->pcidev->dev,
+ header.PageLength * 4, &buf_dma,
+ GFP_KERNEL);
if (pbuf) {
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_NVRAM;
cfg.physAddr = buf_dma;
@@ -5545,7 +5553,9 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
}
}
- pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma);
+ dma_free_coherent(&ioc->pcidev->dev,
+ header.PageLength * 4, pbuf,
+ buf_dma);
}
}
@@ -5661,8 +5671,8 @@ mpt_inactive_raid_volumes(MPT_ADAPTER *ioc, u8 channel, u8 id)
if (!hdr.PageLength)
goto out;
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer)
goto out;
@@ -5709,8 +5719,8 @@ mpt_inactive_raid_volumes(MPT_ADAPTER *ioc, u8 channel, u8 id)
out:
if (buffer)
- pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
- dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ buffer, dma_handle);
}
/**
@@ -5754,8 +5764,8 @@ mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num,
goto out;
}
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer) {
rc = -ENOMEM;
@@ -5778,8 +5788,8 @@ mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num,
out:
if (buffer)
- pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
- dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ buffer, dma_handle);
return rc;
}
@@ -5821,8 +5831,8 @@ mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, u8 phys_disk_num)
goto out;
}
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer) {
rc = 0;
@@ -5842,8 +5852,8 @@ mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, u8 phys_disk_num)
out:
if (buffer)
- pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
- dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ buffer, dma_handle);
return rc;
}
@@ -5893,8 +5903,8 @@ mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
goto out;
}
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer) {
rc = -ENOMEM;
@@ -5931,8 +5941,8 @@ mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
out:
if (buffer)
- pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
- dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ buffer, dma_handle);
return rc;
}
@@ -5988,7 +5998,8 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
return -EFAULT;
iocpage2sz = header.PageLength * 4;
- pIoc2 = pci_alloc_consistent(ioc->pcidev, iocpage2sz, &ioc2_dma);
+ pIoc2 = dma_alloc_coherent(&ioc->pcidev->dev, iocpage2sz, &ioc2_dma,
+ GFP_KERNEL);
if (!pIoc2)
return -ENOMEM;
@@ -6013,7 +6024,7 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
pIoc2->RaidVolume[i].VolumeID);
out:
- pci_free_consistent(ioc->pcidev, iocpage2sz, pIoc2, ioc2_dma);
+ dma_free_coherent(&ioc->pcidev->dev, iocpage2sz, pIoc2, ioc2_dma);
return rc;
}
@@ -6055,7 +6066,8 @@ mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
/* Read Header good, alloc memory
*/
iocpage3sz = header.PageLength * 4;
- pIoc3 = pci_alloc_consistent(ioc->pcidev, iocpage3sz, &ioc3_dma);
+ pIoc3 = dma_alloc_coherent(&ioc->pcidev->dev, iocpage3sz, &ioc3_dma,
+ GFP_KERNEL);
if (!pIoc3)
return 0;
@@ -6072,7 +6084,7 @@ mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
}
}
- pci_free_consistent(ioc->pcidev, iocpage3sz, pIoc3, ioc3_dma);
+ dma_free_coherent(&ioc->pcidev->dev, iocpage3sz, pIoc3, ioc3_dma);
return 0;
}
@@ -6106,7 +6118,8 @@ mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)
if ( (pIoc4 = ioc->spi_data.pIocPg4) == NULL ) {
iocpage4sz = (header.PageLength + 4) * 4; /* Allow 4 additional SEP's */
- pIoc4 = pci_alloc_consistent(ioc->pcidev, iocpage4sz, &ioc4_dma);
+ pIoc4 = dma_alloc_coherent(&ioc->pcidev->dev, iocpage4sz,
+ &ioc4_dma, GFP_KERNEL);
if (!pIoc4)
return;
ioc->alloc_total += iocpage4sz;
@@ -6124,7 +6137,8 @@ mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)
ioc->spi_data.IocPg4_dma = ioc4_dma;
ioc->spi_data.IocPg4Sz = iocpage4sz;
} else {
- pci_free_consistent(ioc->pcidev, iocpage4sz, pIoc4, ioc4_dma);
+ dma_free_coherent(&ioc->pcidev->dev, iocpage4sz, pIoc4,
+ ioc4_dma);
ioc->spi_data.pIocPg4 = NULL;
ioc->alloc_total -= iocpage4sz;
}
@@ -6161,7 +6175,8 @@ mpt_read_ioc_pg_1(MPT_ADAPTER *ioc)
/* Read Header good, alloc memory
*/
iocpage1sz = header.PageLength * 4;
- pIoc1 = pci_alloc_consistent(ioc->pcidev, iocpage1sz, &ioc1_dma);
+ pIoc1 = dma_alloc_coherent(&ioc->pcidev->dev, iocpage1sz, &ioc1_dma,
+ GFP_KERNEL);
if (!pIoc1)
return;
@@ -6212,7 +6227,7 @@ mpt_read_ioc_pg_1(MPT_ADAPTER *ioc)
}
}
- pci_free_consistent(ioc->pcidev, iocpage1sz, pIoc1, ioc1_dma);
+ dma_free_coherent(&ioc->pcidev->dev, iocpage1sz, pIoc1, ioc1_dma);
return;
}
@@ -6241,7 +6256,8 @@ mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc)
goto out;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
- pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma);
+ pbuf = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ &buf_dma, GFP_KERNEL);
if (!pbuf)
goto out;
@@ -6257,7 +6273,8 @@ mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc)
out:
if (pbuf)
- pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4, pbuf,
+ buf_dma);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index ae433c150b37..03c8fb1795c2 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1041,14 +1041,15 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
* copying the data in this array into the correct place in the
* request and chain buffers.
*/
- sglbuf = pci_alloc_consistent(ioc->pcidev, MAX_SGL_BYTES, sglbuf_dma);
+ sglbuf = dma_alloc_coherent(&ioc->pcidev->dev, MAX_SGL_BYTES,
+ sglbuf_dma, GFP_KERNEL);
if (sglbuf == NULL)
goto free_and_fail;
if (sgdir & 0x04000000)
- dir = PCI_DMA_TODEVICE;
+ dir = DMA_TO_DEVICE;
else
- dir = PCI_DMA_FROMDEVICE;
+ dir = DMA_FROM_DEVICE;
/* At start:
* sgl = sglbuf = point to beginning of sg buffer
@@ -1062,9 +1063,9 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
while (bytes_allocd < bytes) {
this_alloc = min(alloc_sz, bytes-bytes_allocd);
buflist[buflist_ent].len = this_alloc;
- buflist[buflist_ent].kptr = pci_alloc_consistent(ioc->pcidev,
- this_alloc,
- &pa);
+ buflist[buflist_ent].kptr = dma_alloc_coherent(&ioc->pcidev->dev,
+ this_alloc,
+ &pa, GFP_KERNEL);
if (buflist[buflist_ent].kptr == NULL) {
alloc_sz = alloc_sz / 2;
if (alloc_sz == 0) {
@@ -1080,8 +1081,9 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
bytes_allocd += this_alloc;
sgl->FlagsLength = (0x10000000|sgdir|this_alloc);
- dma_addr = pci_map_single(ioc->pcidev,
- buflist[buflist_ent].kptr, this_alloc, dir);
+ dma_addr = dma_map_single(&ioc->pcidev->dev,
+ buflist[buflist_ent].kptr,
+ this_alloc, dir);
sgl->Address = dma_addr;
fragcnt++;
@@ -1140,9 +1142,11 @@ free_and_fail:
kptr = buflist[i].kptr;
len = buflist[i].len;
- pci_free_consistent(ioc->pcidev, len, kptr, dma_addr);
+ dma_free_coherent(&ioc->pcidev->dev, len, kptr,
+ dma_addr);
}
- pci_free_consistent(ioc->pcidev, MAX_SGL_BYTES, sglbuf, *sglbuf_dma);
+ dma_free_coherent(&ioc->pcidev->dev, MAX_SGL_BYTES, sglbuf,
+ *sglbuf_dma);
}
kfree(buflist);
return NULL;
@@ -1162,9 +1166,9 @@ kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTE
int n = 0;
if (sg->FlagsLength & 0x04000000)
- dir = PCI_DMA_TODEVICE;
+ dir = DMA_TO_DEVICE;
else
- dir = PCI_DMA_FROMDEVICE;
+ dir = DMA_FROM_DEVICE;
nib = (sg->FlagsLength & 0xF0000000) >> 28;
while (! (nib & 0x4)) { /* eob */
@@ -1179,8 +1183,10 @@ kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTE
dma_addr = sg->Address;
kptr = bl->kptr;
len = bl->len;
- pci_unmap_single(ioc->pcidev, dma_addr, len, dir);
- pci_free_consistent(ioc->pcidev, len, kptr, dma_addr);
+ dma_unmap_single(&ioc->pcidev->dev, dma_addr, len,
+ dir);
+ dma_free_coherent(&ioc->pcidev->dev, len, kptr,
+ dma_addr);
n++;
}
sg++;
@@ -1197,12 +1203,12 @@ kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTE
dma_addr = sg->Address;
kptr = bl->kptr;
len = bl->len;
- pci_unmap_single(ioc->pcidev, dma_addr, len, dir);
- pci_free_consistent(ioc->pcidev, len, kptr, dma_addr);
+ dma_unmap_single(&ioc->pcidev->dev, dma_addr, len, dir);
+ dma_free_coherent(&ioc->pcidev->dev, len, kptr, dma_addr);
n++;
}
- pci_free_consistent(ioc->pcidev, MAX_SGL_BYTES, sgl, sgl_dma);
+ dma_free_coherent(&ioc->pcidev->dev, MAX_SGL_BYTES, sgl, sgl_dma);
kfree(buflist);
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "-SG: Free'd 1 SGL buf + %d kbufs!\n",
ioc->name, n));
@@ -2100,8 +2106,9 @@ mptctl_do_mpt_command (MPT_ADAPTER *ioc, struct mpt_ioctl_command karg, void __u
}
flagsLength |= karg.dataOutSize;
bufOut.len = karg.dataOutSize;
- bufOut.kptr = pci_alloc_consistent(
- ioc->pcidev, bufOut.len, &dma_addr_out);
+ bufOut.kptr = dma_alloc_coherent(&ioc->pcidev->dev,
+ bufOut.len,
+ &dma_addr_out, GFP_KERNEL);
if (bufOut.kptr == NULL) {
rc = -ENOMEM;
@@ -2134,8 +2141,9 @@ mptctl_do_mpt_command (MPT_ADAPTER *ioc, struct mpt_ioctl_command karg, void __u
flagsLength |= karg.dataInSize;
bufIn.len = karg.dataInSize;
- bufIn.kptr = pci_alloc_consistent(ioc->pcidev,
- bufIn.len, &dma_addr_in);
+ bufIn.kptr = dma_alloc_coherent(&ioc->pcidev->dev,
+ bufIn.len,
+ &dma_addr_in, GFP_KERNEL);
if (bufIn.kptr == NULL) {
rc = -ENOMEM;
@@ -2283,13 +2291,13 @@ done_free_mem:
/* Free the allocated memory.
*/
if (bufOut.kptr != NULL) {
- pci_free_consistent(ioc->pcidev,
- bufOut.len, (void *) bufOut.kptr, dma_addr_out);
+ dma_free_coherent(&ioc->pcidev->dev, bufOut.len,
+ (void *)bufOut.kptr, dma_addr_out);
}
if (bufIn.kptr != NULL) {
- pci_free_consistent(ioc->pcidev,
- bufIn.len, (void *) bufIn.kptr, dma_addr_in);
+ dma_free_coherent(&ioc->pcidev->dev, bufIn.len,
+ (void *)bufIn.kptr, dma_addr_in);
}
/* mf is null if command issued successfully
@@ -2395,7 +2403,9 @@ mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
/* Issue the second config page request */
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
- pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma);
+ pbuf = dma_alloc_coherent(&ioc->pcidev->dev,
+ hdr.PageLength * 4,
+ &buf_dma, GFP_KERNEL);
if (pbuf) {
cfg.physAddr = buf_dma;
if (mpt_config(ioc, &cfg) == 0) {
@@ -2405,7 +2415,9 @@ mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
pdata->BoardTracerNumber, 24);
}
}
- pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma);
+ dma_free_coherent(&ioc->pcidev->dev,
+ hdr.PageLength * 4, pbuf,
+ buf_dma);
pbuf = NULL;
}
}
@@ -2470,7 +2482,7 @@ mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
else
IstwiRWRequest->DeviceAddr = 0xB0;
- pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma);
+ pbuf = dma_alloc_coherent(&ioc->pcidev->dev, 4, &buf_dma, GFP_KERNEL);
if (!pbuf)
goto out;
ioc->add_sge((char *)&IstwiRWRequest->SGL,
@@ -2519,7 +2531,7 @@ retry_wait:
SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0);
if (pbuf)
- pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma);
+ dma_free_coherent(&ioc->pcidev->dev, 4, pbuf, buf_dma);
/* Copy the data from kernel memory to user memory
*/
@@ -2585,7 +2597,8 @@ mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
/* Get the data transfer speeds
*/
data_sz = ioc->spi_data.sdp0length * 4;
- pg0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
+ pg0_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz, &page_dma,
+ GFP_KERNEL);
if (pg0_alloc) {
hdr.PageVersion = ioc->spi_data.sdp0version;
hdr.PageLength = data_sz;
@@ -2623,7 +2636,8 @@ mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
karg.negotiated_speed = HP_DEV_SPEED_ASYNC;
}
- pci_free_consistent(ioc->pcidev, data_sz, (u8 *) pg0_alloc, page_dma);
+ dma_free_coherent(&ioc->pcidev->dev, data_sz, (u8 *)pg0_alloc,
+ page_dma);
}
/* Set defaults
@@ -2649,7 +2663,8 @@ mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
/* Issue the second config page request */
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
data_sz = (int) cfg.cfghdr.hdr->PageLength * 4;
- pg3_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
+ pg3_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
+ &page_dma, GFP_KERNEL);
if (pg3_alloc) {
cfg.physAddr = page_dma;
cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id;
@@ -2658,7 +2673,8 @@ mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
karg.phase_errors = (u32) le16_to_cpu(pg3_alloc->PhaseErrorCount);
karg.parity_errors = (u32) le16_to_cpu(pg3_alloc->ParityErrorCount);
}
- pci_free_consistent(ioc->pcidev, data_sz, (u8 *) pg3_alloc, page_dma);
+ dma_free_coherent(&ioc->pcidev->dev, data_sz,
+ (u8 *)pg3_alloc, page_dma);
}
}
hd = shost_priv(ioc->sh);
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 117fa4ebf6d7..142eb5d5d9df 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -516,9 +516,9 @@ mpt_lan_close(struct net_device *dev)
if (priv->RcvCtl[i].skb != NULL) {
/**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
/**/ "is still out\n", i));
- pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
- priv->RcvCtl[i].len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&mpt_dev->pcidev->dev,
+ priv->RcvCtl[i].dma,
+ priv->RcvCtl[i].len, DMA_FROM_DEVICE);
dev_kfree_skb(priv->RcvCtl[i].skb);
}
}
@@ -528,9 +528,9 @@ mpt_lan_close(struct net_device *dev)
for (i = 0; i < priv->tx_max_out; i++) {
if (priv->SendCtl[i].skb != NULL) {
- pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
- priv->SendCtl[i].len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&mpt_dev->pcidev->dev,
+ priv->SendCtl[i].dma,
+ priv->SendCtl[i].len, DMA_TO_DEVICE);
dev_kfree_skb(priv->SendCtl[i].skb);
}
}
@@ -582,8 +582,8 @@ mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
__func__, sent));
priv->SendCtl[ctx].skb = NULL;
- pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
- priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&mpt_dev->pcidev->dev, priv->SendCtl[ctx].dma,
+ priv->SendCtl[ctx].len, DMA_TO_DEVICE);
dev_kfree_skb_irq(sent);
spin_lock_irqsave(&priv->txfidx_lock, flags);
@@ -648,8 +648,9 @@ mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
__func__, sent));
priv->SendCtl[ctx].skb = NULL;
- pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
- priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&mpt_dev->pcidev->dev,
+ priv->SendCtl[ctx].dma,
+ priv->SendCtl[ctx].len, DMA_TO_DEVICE);
dev_kfree_skb_irq(sent);
priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
@@ -720,8 +721,8 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
skb_reset_mac_header(skb);
skb_pull(skb, 12);
- dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma = dma_map_single(&mpt_dev->pcidev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
priv->SendCtl[ctx].skb = skb;
priv->SendCtl[ctx].dma = dma;
@@ -868,13 +869,17 @@ mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
return -ENOMEM;
}
- pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ DMA_FROM_DEVICE);
skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
- pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&mpt_dev->pcidev->dev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ DMA_FROM_DEVICE);
goto out;
}
@@ -882,8 +887,8 @@ mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
priv->RcvCtl[ctx].skb = NULL;
- pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
out:
spin_lock_irqsave(&priv->rxfidx_lock, flags);
@@ -927,8 +932,8 @@ mpt_lan_receive_post_free(struct net_device *dev,
// dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
priv->RcvCtl[ctx].skb = NULL;
- pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
@@ -1028,16 +1033,16 @@ mpt_lan_receive_post_reply(struct net_device *dev,
// IOC_AND_NETDEV_NAMES_s_s(dev),
// i, l));
- pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
- priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ DMA_FROM_DEVICE);
skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
- pci_dma_sync_single_for_device(mpt_dev->pcidev,
- priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&mpt_dev->pcidev->dev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ DMA_FROM_DEVICE);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
szrem -= l;
@@ -1056,17 +1061,17 @@ mpt_lan_receive_post_reply(struct net_device *dev,
return -ENOMEM;
}
- pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
- priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ DMA_FROM_DEVICE);
skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
- pci_dma_sync_single_for_device(mpt_dev->pcidev,
- priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&mpt_dev->pcidev->dev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ DMA_FROM_DEVICE);
spin_lock_irqsave(&priv->rxfidx_lock, flags);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
@@ -1077,8 +1082,8 @@ mpt_lan_receive_post_reply(struct net_device *dev,
priv->RcvCtl[ctx].skb = NULL;
- pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
priv->RcvCtl[ctx].dma = 0;
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
@@ -1199,10 +1204,10 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
skb = priv->RcvCtl[ctx].skb;
if (skb && (priv->RcvCtl[ctx].len != len)) {
- pci_unmap_single(mpt_dev->pcidev,
+ dma_unmap_single(&mpt_dev->pcidev->dev,
priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dev_kfree_skb(priv->RcvCtl[ctx].skb);
skb = priv->RcvCtl[ctx].skb = NULL;
}
@@ -1218,8 +1223,9 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
break;
}
- dma = pci_map_single(mpt_dev->pcidev, skb->data,
- len, PCI_DMA_FROMDEVICE);
+ dma = dma_map_single(&mpt_dev->pcidev->dev,
+ skb->data, len,
+ DMA_FROM_DEVICE);
priv->RcvCtl[ctx].skb = skb;
priv->RcvCtl[ctx].dma = dma;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 091b45024d34..4acd8f9a48e1 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -702,8 +702,8 @@ mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc,
if (!hdr.PageLength)
goto out;
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer)
goto out;
@@ -769,8 +769,8 @@ mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc,
out:
if (buffer)
- pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
- dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ buffer, dma_handle);
}
/**
@@ -1399,8 +1399,8 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
goto out;
}
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto out;
@@ -1426,8 +1426,8 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
enclosure->sep_channel = buffer->SEPBus;
out_free_consistent:
- pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- buffer, dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
+ dma_handle);
out:
return error;
}
@@ -2058,8 +2058,8 @@ static int mptsas_get_linkerrors(struct sas_phy *phy)
if (!hdr.ExtPageLength)
return -ENXIO;
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -2081,8 +2081,8 @@ static int mptsas_get_linkerrors(struct sas_phy *phy)
le32_to_cpu(buffer->PhyResetProblemCount);
out_free_consistent:
- pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- buffer, dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
+ dma_handle);
return error;
}
@@ -2301,7 +2301,7 @@ static void mptsas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
<< MPI_SGE_FLAGS_SHIFT;
if (!dma_map_sg(&ioc->pcidev->dev, job->request_payload.sg_list,
- 1, PCI_DMA_BIDIRECTIONAL))
+ 1, DMA_BIDIRECTIONAL))
goto put_mf;
flagsLength |= (sg_dma_len(job->request_payload.sg_list) - 4);
@@ -2318,7 +2318,7 @@ static void mptsas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
if (!dma_map_sg(&ioc->pcidev->dev, job->reply_payload.sg_list,
- 1, PCI_DMA_BIDIRECTIONAL))
+ 1, DMA_BIDIRECTIONAL))
goto unmap_out;
flagsLength |= sg_dma_len(job->reply_payload.sg_list) + 4;
ioc->add_sge(psge, flagsLength,
@@ -2356,10 +2356,10 @@ static void mptsas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
unmap_in:
dma_unmap_sg(&ioc->pcidev->dev, job->reply_payload.sg_list, 1,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
unmap_out:
dma_unmap_sg(&ioc->pcidev->dev, job->request_payload.sg_list, 1,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
put_mf:
if (mf)
mpt_free_msg_frame(ioc, mf);
@@ -2412,8 +2412,8 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
goto out;
}
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto out;
@@ -2452,8 +2452,8 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
}
out_free_consistent:
- pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- buffer, dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
+ dma_handle);
out:
return error;
}
@@ -2487,8 +2487,8 @@ mptsas_sas_io_unit_pg1(MPT_ADAPTER *ioc)
goto out;
}
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto out;
@@ -2509,8 +2509,8 @@ mptsas_sas_io_unit_pg1(MPT_ADAPTER *ioc)
device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
out_free_consistent:
- pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- buffer, dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
+ dma_handle);
out:
return error;
}
@@ -2551,8 +2551,8 @@ mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
goto out;
}
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto out;
@@ -2573,8 +2573,8 @@ mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle);
out_free_consistent:
- pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- buffer, dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
+ dma_handle);
out:
return error;
}
@@ -2614,8 +2614,8 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
goto out;
}
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto out;
@@ -2654,8 +2654,8 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
device_info->flags = le16_to_cpu(buffer->Flags);
out_free_consistent:
- pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- buffer, dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
+ dma_handle);
out:
return error;
}
@@ -2697,8 +2697,8 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
goto out;
}
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto out;
@@ -2737,8 +2737,8 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
}
out_free_consistent:
- pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- buffer, dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
+ dma_handle);
out:
return error;
}
@@ -2777,8 +2777,8 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
goto out;
}
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto out;
@@ -2810,8 +2810,8 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle);
out_free_consistent:
- pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- buffer, dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
+ dma_handle);
out:
return error;
}
@@ -2896,7 +2896,8 @@ mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
sz = sizeof(struct rep_manu_request) + sizeof(struct rep_manu_reply);
- data_out = pci_alloc_consistent(ioc->pcidev, sz, &data_out_dma);
+ data_out = dma_alloc_coherent(&ioc->pcidev->dev, sz, &data_out_dma,
+ GFP_KERNEL);
if (!data_out) {
printk(KERN_ERR "Memory allocation failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
@@ -2987,7 +2988,8 @@ mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
}
out_free:
if (data_out_dma)
- pci_free_consistent(ioc->pcidev, sz, data_out, data_out_dma);
+ dma_free_coherent(&ioc->pcidev->dev, sz, data_out,
+ data_out_dma);
put_mf:
if (mf)
mpt_free_msg_frame(ioc, mf);
@@ -4271,8 +4273,8 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
if (!hdr.PageLength)
goto out;
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer)
goto out;
@@ -4318,8 +4320,8 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
out:
if (buffer)
- pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
- dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ buffer, dma_handle);
}
/*
* Work queue thread to handle SAS hotplug events
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 3fb480818599..ba0b3eb131f1 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -634,7 +634,7 @@ config INTEL_SOC_PMIC_CHTWC
config INTEL_SOC_PMIC_CHTDC_TI
tristate "Support for Intel Cherry Trail Dollar Cove TI PMIC"
depends on GPIOLIB
- depends on I2C
+ depends on I2C=y && I2C_DESIGNWARE_PLATFORM=y
depends on ACPI
depends on X86
select MFD_CORE
@@ -644,6 +644,10 @@ config INTEL_SOC_PMIC_CHTDC_TI
Select this option for supporting Dollar Cove (TI version) PMIC
device that is found on some Intel Cherry Trail systems.
+ This option is a bool as it provides an ACPI OpRegion which must be
+ available before any devices using it are probed. This option also
+ needs the designware-i2c driver to be builtin for the same reason.
+
config INTEL_SOC_PMIC_MRFLD
tristate "Support for Intel Merrifield Basin Cove PMIC"
depends on GPIOLIB
@@ -692,16 +696,6 @@ config MFD_INTEL_PMC_BXT
Register and P-unit access. In addition this creates devices
for iTCO watchdog and telemetry that are part of the PMC.
-config MFD_INTEL_PMT
- tristate "Intel Platform Monitoring Technology (PMT) support"
- depends on X86 && PCI
- select MFD_CORE
- help
- The Intel Platform Monitoring Technology (PMT) is an interface that
- provides access to hardware monitor registers. This driver supports
- Telemetry, Watcher, and Crashlog PMT capabilities/devices for
- platforms starting from Tiger Lake.
-
config MFD_IPAQ_MICRO
bool "Atmel Micro ASIC (iPAQ h3100/h3600/h3700) Support"
depends on SA1100_H3100 || SA1100_H3600
@@ -1945,23 +1939,6 @@ config MFD_ROHM_BD718XX
NXP i.MX8. It contains 8 BUCK outputs and 7 LDOs, voltage monitoring
and emergency shut down as well as 32,768KHz clock output.
-config MFD_ROHM_BD70528
- tristate "ROHM BD70528 Power Management IC"
- depends on I2C=y
- depends on OF
- select REGMAP_I2C
- select REGMAP_IRQ
- select MFD_CORE
- help
- Select this option to get support for the ROHM BD70528 Power
- Management IC. BD71837 is general purpose single-chip power
- management IC for battery-powered portable devices. It contains
- 3 ultra-low current consumption buck converters, 3 LDOs and 2 LED
- drivers. Also included are 4 GPIOs, a real-time clock (RTC), a 32kHz
- crystal oscillator, high-accuracy VREF for use with an external ADC,
- 10 bits SAR ADC for battery temperature monitor and 1S battery
- charger.
-
config MFD_ROHM_BD71828
tristate "ROHM BD71828 and BD71815 Power Management IC"
depends on I2C=y
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 0b1b629aef3e..df1ecc4a4c95 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -211,7 +211,6 @@ obj-$(CONFIG_MFD_INTEL_LPSS) += intel-lpss.o
obj-$(CONFIG_MFD_INTEL_LPSS_PCI) += intel-lpss-pci.o
obj-$(CONFIG_MFD_INTEL_LPSS_ACPI) += intel-lpss-acpi.o
obj-$(CONFIG_MFD_INTEL_PMC_BXT) += intel_pmc_bxt.o
-obj-$(CONFIG_MFD_INTEL_PMT) += intel_pmt.o
obj-$(CONFIG_MFD_PALMAS) += palmas.o
obj-$(CONFIG_MFD_VIPERBOARD) += viperboard.o
obj-$(CONFIG_MFD_NTXEC) += ntxec.o
@@ -257,7 +256,6 @@ obj-$(CONFIG_MFD_STM32_TIMERS) += stm32-timers.o
obj-$(CONFIG_MFD_MXS_LRADC) += mxs-lradc.o
obj-$(CONFIG_MFD_SC27XX_PMIC) += sprd-sc27xx-spi.o
obj-$(CONFIG_RAVE_SP_CORE) += rave-sp.o
-obj-$(CONFIG_MFD_ROHM_BD70528) += rohm-bd70528.o
obj-$(CONFIG_MFD_ROHM_BD71828) += rohm-bd71828.o
obj-$(CONFIG_MFD_ROHM_BD718XX) += rohm-bd718x7.o
obj-$(CONFIG_MFD_ROHM_BD957XMUF) += rohm-bd9576.o
diff --git a/drivers/mfd/atmel-flexcom.c b/drivers/mfd/atmel-flexcom.c
index d2f5c073fdf3..559eb4d352b6 100644
--- a/drivers/mfd/atmel-flexcom.c
+++ b/drivers/mfd/atmel-flexcom.c
@@ -87,8 +87,7 @@ static const struct of_device_id atmel_flexcom_of_match[] = {
};
MODULE_DEVICE_TABLE(of, atmel_flexcom_of_match);
-#ifdef CONFIG_PM_SLEEP
-static int atmel_flexcom_resume(struct device *dev)
+static int __maybe_unused atmel_flexcom_resume_noirq(struct device *dev)
{
struct atmel_flexcom *ddata = dev_get_drvdata(dev);
int err;
@@ -105,16 +104,16 @@ static int atmel_flexcom_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(atmel_flexcom_pm_ops, NULL,
- atmel_flexcom_resume);
+static const struct dev_pm_ops atmel_flexcom_pm_ops = {
+ .resume_noirq = atmel_flexcom_resume_noirq,
+};
static struct platform_driver atmel_flexcom_driver = {
.probe = atmel_flexcom_probe,
.driver = {
.name = "atmel_flexcom",
- .pm = &atmel_flexcom_pm_ops,
+ .pm = pm_ptr(&atmel_flexcom_pm_ops),
.of_match_table = atmel_flexcom_of_match,
},
};
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index 01f8e10dfa55..2774b2cbaea6 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -556,6 +556,7 @@ static const struct regmap_range da9062_aa_writeable_ranges[] = {
regmap_reg_range(DA9062AA_VBUCK3_B, DA9062AA_VBUCK3_B),
regmap_reg_range(DA9062AA_VLDO1_B, DA9062AA_VLDO4_B),
regmap_reg_range(DA9062AA_BBAT_CONT, DA9062AA_BBAT_CONT),
+ regmap_reg_range(DA9062AA_CONFIG_J, DA9062AA_CONFIG_J),
regmap_reg_range(DA9062AA_GP_ID_0, DA9062AA_GP_ID_19),
};
@@ -674,6 +675,17 @@ static int da9062_i2c_probe(struct i2c_client *i2c,
return ret;
}
+ /* If SMBus is not available and only I2C is possible, enter I2C mode */
+ if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C)) {
+ dev_info(chip->dev, "Entering I2C mode!\n");
+ ret = regmap_clear_bits(chip->regmap, DA9062AA_CONFIG_J,
+ DA9062AA_TWOWIRE_TO_MASK);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to set Two-Wire Bus Mode.\n");
+ return ret;
+ }
+ }
+
ret = da9062_clear_fault_log(chip);
if (ret < 0)
dev_warn(chip->dev, "Cannot clear fault log\n");
diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c
index 3f1d976eb67c..f2ea6540a01e 100644
--- a/drivers/mfd/intel-lpss-acpi.c
+++ b/drivers/mfd/intel-lpss-acpi.c
@@ -136,6 +136,7 @@ static int intel_lpss_acpi_probe(struct platform_device *pdev)
{
struct intel_lpss_platform_info *info;
const struct acpi_device_id *id;
+ int ret;
id = acpi_match_device(intel_lpss_acpi_ids, &pdev->dev);
if (!id)
@@ -149,10 +150,14 @@ static int intel_lpss_acpi_probe(struct platform_device *pdev)
info->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
info->irq = platform_get_irq(pdev, 0);
+ ret = intel_lpss_probe(&pdev->dev, info);
+ if (ret)
+ return ret;
+
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- return intel_lpss_probe(&pdev->dev, info);
+ return 0;
}
static int intel_lpss_acpi_remove(struct platform_device *pdev)
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index a872b4485eac..5513fae6be92 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -17,6 +17,15 @@
#include "intel-lpss.h"
+/* Some DSDTs have an unused GEXP ACPI device conflicting with I2C4 resources */
+static const struct pci_device_id ignore_resource_conflicts_ids[] = {
+ /* Microsoft Surface Go (version 1) I2C4 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, 0x9d64, 0x152d, 0x1182), },
+ /* Microsoft Surface Go 2 I2C4 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, 0x9d64, 0x152d, 0x1237), },
+ { }
+};
+
static int intel_lpss_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -35,6 +44,9 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
info->mem = &pdev->resource[0];
info->irq = pdev->irq;
+ if (pci_match_id(ignore_resource_conflicts_ids, pdev))
+ info->ignore_resource_conflicts = true;
+
pdev->d3cold_delay = 0;
/* Probably it is enough to set this for iDMA capable devices only */
@@ -254,7 +266,7 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x34eb), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x34fb), (kernel_ulong_t)&spt_info },
/* ICL-N */
- { PCI_VDEVICE(INTEL, 0x38a8), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x38a8), (kernel_ulong_t)&spt_uart_info },
/* TGL-H */
{ PCI_VDEVICE(INTEL, 0x43a7), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x43a8), (kernel_ulong_t)&bxt_uart_info },
@@ -359,7 +371,14 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
/* LKF */
{ PCI_VDEVICE(INTEL, 0x98a8), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x98a9), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x98aa), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x98c5), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x98c6), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x98c7), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x98e8), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x98e9), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x98ea), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x98eb), (kernel_ulong_t)&bxt_i2c_info },
/* SPT-LP */
{ PCI_VDEVICE(INTEL, 0x9d27), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x9d28), (kernel_ulong_t)&spt_uart_info },
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 0e15afc39f54..cfbee2cfba6b 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -401,6 +401,7 @@ int intel_lpss_probe(struct device *dev,
return ret;
lpss->cell->swnode = info->swnode;
+ lpss->cell->ignore_resource_conflicts = info->ignore_resource_conflicts;
intel_lpss_init_dev(lpss);
diff --git a/drivers/mfd/intel-lpss.h b/drivers/mfd/intel-lpss.h
index 22dbc4aed793..062ce95b68b9 100644
--- a/drivers/mfd/intel-lpss.h
+++ b/drivers/mfd/intel-lpss.h
@@ -19,6 +19,7 @@ struct software_node;
struct intel_lpss_platform_info {
struct resource *mem;
+ bool ignore_resource_conflicts;
int irq;
unsigned long clk_rate;
const char *clk_con_id;
diff --git a/drivers/mfd/intel_pmt.c b/drivers/mfd/intel_pmt.c
deleted file mode 100644
index dd7eb614c28e..000000000000
--- a/drivers/mfd/intel_pmt.c
+++ /dev/null
@@ -1,261 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Intel Platform Monitoring Technology PMT driver
- *
- * Copyright (c) 2020, Intel Corporation.
- * All Rights Reserved.
- *
- * Author: David E. Box <david.e.box@linux.intel.com>
- */
-
-#include <linux/bits.h>
-#include <linux/kernel.h>
-#include <linux/mfd/core.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/pm_runtime.h>
-#include <linux/types.h>
-
-/* Intel DVSEC capability vendor space offsets */
-#define INTEL_DVSEC_ENTRIES 0xA
-#define INTEL_DVSEC_SIZE 0xB
-#define INTEL_DVSEC_TABLE 0xC
-#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0))
-#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3))
-#define INTEL_DVSEC_ENTRY_SIZE 4
-
-/* PMT capabilities */
-#define DVSEC_INTEL_ID_TELEMETRY 2
-#define DVSEC_INTEL_ID_WATCHER 3
-#define DVSEC_INTEL_ID_CRASHLOG 4
-
-struct intel_dvsec_header {
- u16 length;
- u16 id;
- u8 num_entries;
- u8 entry_size;
- u8 tbir;
- u32 offset;
-};
-
-enum pmt_quirks {
- /* Watcher capability not supported */
- PMT_QUIRK_NO_WATCHER = BIT(0),
-
- /* Crashlog capability not supported */
- PMT_QUIRK_NO_CRASHLOG = BIT(1),
-
- /* Use shift instead of mask to read discovery table offset */
- PMT_QUIRK_TABLE_SHIFT = BIT(2),
-
- /* DVSEC not present (provided in driver data) */
- PMT_QUIRK_NO_DVSEC = BIT(3),
-};
-
-struct pmt_platform_info {
- unsigned long quirks;
- struct intel_dvsec_header **capabilities;
-};
-
-static const struct pmt_platform_info tgl_info = {
- .quirks = PMT_QUIRK_NO_WATCHER | PMT_QUIRK_NO_CRASHLOG |
- PMT_QUIRK_TABLE_SHIFT,
-};
-
-/* DG1 Platform with DVSEC quirk*/
-static struct intel_dvsec_header dg1_telemetry = {
- .length = 0x10,
- .id = 2,
- .num_entries = 1,
- .entry_size = 3,
- .tbir = 0,
- .offset = 0x466000,
-};
-
-static struct intel_dvsec_header *dg1_capabilities[] = {
- &dg1_telemetry,
- NULL
-};
-
-static const struct pmt_platform_info dg1_info = {
- .quirks = PMT_QUIRK_NO_DVSEC,
- .capabilities = dg1_capabilities,
-};
-
-static int pmt_add_dev(struct pci_dev *pdev, struct intel_dvsec_header *header,
- unsigned long quirks)
-{
- struct device *dev = &pdev->dev;
- struct resource *res, *tmp;
- struct mfd_cell *cell;
- const char *name;
- int count = header->num_entries;
- int size = header->entry_size;
- int id = header->id;
- int i;
-
- switch (id) {
- case DVSEC_INTEL_ID_TELEMETRY:
- name = "pmt_telemetry";
- break;
- case DVSEC_INTEL_ID_WATCHER:
- if (quirks & PMT_QUIRK_NO_WATCHER) {
- dev_info(dev, "Watcher not supported\n");
- return -EINVAL;
- }
- name = "pmt_watcher";
- break;
- case DVSEC_INTEL_ID_CRASHLOG:
- if (quirks & PMT_QUIRK_NO_CRASHLOG) {
- dev_info(dev, "Crashlog not supported\n");
- return -EINVAL;
- }
- name = "pmt_crashlog";
- break;
- default:
- return -EINVAL;
- }
-
- if (!header->num_entries || !header->entry_size) {
- dev_err(dev, "Invalid count or size for %s header\n", name);
- return -EINVAL;
- }
-
- cell = devm_kzalloc(dev, sizeof(*cell), GFP_KERNEL);
- if (!cell)
- return -ENOMEM;
-
- res = devm_kcalloc(dev, count, sizeof(*res), GFP_KERNEL);
- if (!res)
- return -ENOMEM;
-
- if (quirks & PMT_QUIRK_TABLE_SHIFT)
- header->offset >>= 3;
-
- /*
- * The PMT DVSEC contains the starting offset and count for a block of
- * discovery tables, each providing access to monitoring facilities for
- * a section of the device. Create a resource list of these tables to
- * provide to the driver.
- */
- for (i = 0, tmp = res; i < count; i++, tmp++) {
- tmp->start = pdev->resource[header->tbir].start +
- header->offset + i * (size << 2);
- tmp->end = tmp->start + (size << 2) - 1;
- tmp->flags = IORESOURCE_MEM;
- }
-
- cell->resources = res;
- cell->num_resources = count;
- cell->name = name;
-
- return devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, cell, 1, NULL, 0,
- NULL);
-}
-
-static int pmt_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- struct pmt_platform_info *info;
- unsigned long quirks = 0;
- bool found_devices = false;
- int ret, pos = 0;
-
- ret = pcim_enable_device(pdev);
- if (ret)
- return ret;
-
- info = (struct pmt_platform_info *)id->driver_data;
-
- if (info)
- quirks = info->quirks;
-
- if (info && (info->quirks & PMT_QUIRK_NO_DVSEC)) {
- struct intel_dvsec_header **header;
-
- header = info->capabilities;
- while (*header) {
- ret = pmt_add_dev(pdev, *header, quirks);
- if (ret)
- dev_warn(&pdev->dev,
- "Failed to add device for DVSEC id %d\n",
- (*header)->id);
- else
- found_devices = true;
-
- ++header;
- }
- } else {
- do {
- struct intel_dvsec_header header;
- u32 table;
- u16 vid;
-
- pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_DVSEC);
- if (!pos)
- break;
-
- pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vid);
- if (vid != PCI_VENDOR_ID_INTEL)
- continue;
-
- pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2,
- &header.id);
- pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES,
- &header.num_entries);
- pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE,
- &header.entry_size);
- pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE,
- &table);
-
- header.tbir = INTEL_DVSEC_TABLE_BAR(table);
- header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
-
- ret = pmt_add_dev(pdev, &header, quirks);
- if (ret)
- continue;
-
- found_devices = true;
- } while (true);
- }
-
- if (!found_devices)
- return -ENODEV;
-
- pm_runtime_put(&pdev->dev);
- pm_runtime_allow(&pdev->dev);
-
- return 0;
-}
-
-static void pmt_pci_remove(struct pci_dev *pdev)
-{
- pm_runtime_forbid(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
-}
-
-#define PCI_DEVICE_ID_INTEL_PMT_ADL 0x467d
-#define PCI_DEVICE_ID_INTEL_PMT_DG1 0x490e
-#define PCI_DEVICE_ID_INTEL_PMT_OOBMSM 0x09a7
-#define PCI_DEVICE_ID_INTEL_PMT_TGL 0x9a0d
-static const struct pci_device_id pmt_pci_ids[] = {
- { PCI_DEVICE_DATA(INTEL, PMT_ADL, &tgl_info) },
- { PCI_DEVICE_DATA(INTEL, PMT_DG1, &dg1_info) },
- { PCI_DEVICE_DATA(INTEL, PMT_OOBMSM, NULL) },
- { PCI_DEVICE_DATA(INTEL, PMT_TGL, &tgl_info) },
- { }
-};
-MODULE_DEVICE_TABLE(pci, pmt_pci_ids);
-
-static struct pci_driver pmt_pci_driver = {
- .name = "intel-pmt",
- .id_table = pmt_pci_ids,
- .probe = pmt_pci_probe,
- .remove = pmt_pci_remove,
-};
-module_pci_driver(pmt_pci_driver);
-
-MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
-MODULE_DESCRIPTION("Intel Platform Monitoring Technology PMT driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index ddd64f9e3341..47cb7f00dfcf 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -14,15 +14,12 @@
#include <linux/module.h>
#include <linux/mfd/core.h>
#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/platform_data/x86/soc.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
#include "intel_soc_pmic_core.h"
-/* Crystal Cove PMIC shares same ACPI ID between different platforms */
-#define BYT_CRC_HRV 2
-#define CHT_CRC_HRV 3
-
/* PWM consumed by the Intel GFX */
static struct pwm_lookup crc_pwm_lookup[] = {
PWM_LOOKUP("crystal_cove_pwm", 0, "0000:00:02.0", "pwm_pmic_backlight", 0, PWM_POLARITY_NORMAL),
@@ -34,31 +31,12 @@ static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c,
struct device *dev = &i2c->dev;
struct intel_soc_pmic_config *config;
struct intel_soc_pmic *pmic;
- unsigned long long hrv;
- acpi_status status;
int ret;
- /*
- * There are 2 different Crystal Cove PMICs a Bay Trail and Cherry
- * Trail version, use _HRV to differentiate between the 2.
- */
- status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_HRV", NULL, &hrv);
- if (ACPI_FAILURE(status)) {
- dev_err(dev, "Failed to get PMIC hardware revision\n");
- return -ENODEV;
- }
-
- switch (hrv) {
- case BYT_CRC_HRV:
+ if (soc_intel_is_byt())
config = &intel_soc_pmic_config_byt_crc;
- break;
- case CHT_CRC_HRV:
+ else
config = &intel_soc_pmic_config_cht_crc;
- break;
- default:
- dev_warn(dev, "Unknown hardware rev %llu, assuming BYT\n", hrv);
- config = &intel_soc_pmic_config_byt_crc;
- }
pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
if (!pmic)
diff --git a/drivers/mfd/rohm-bd70528.c b/drivers/mfd/rohm-bd70528.c
deleted file mode 100644
index 5c44d3b77b3e..000000000000
--- a/drivers/mfd/rohm-bd70528.c
+++ /dev/null
@@ -1,314 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-//
-// Copyright (C) 2019 ROHM Semiconductors
-//
-// ROHM BD70528 PMIC driver
-
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/irq.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/rohm-bd70528.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/regmap.h>
-#include <linux/types.h>
-
-#define BD70528_NUM_OF_GPIOS 4
-
-static const struct resource rtc_irqs[] = {
- DEFINE_RES_IRQ_NAMED(BD70528_INT_RTC_ALARM, "bd70528-rtc-alm"),
- DEFINE_RES_IRQ_NAMED(BD70528_INT_ELPS_TIM, "bd70528-elapsed-timer"),
-};
-
-static const struct resource charger_irqs[] = {
- DEFINE_RES_IRQ_NAMED(BD70528_INT_BAT_OV_RES, "bd70528-bat-ov-res"),
- DEFINE_RES_IRQ_NAMED(BD70528_INT_BAT_OV_DET, "bd70528-bat-ov-det"),
- DEFINE_RES_IRQ_NAMED(BD70528_INT_DBAT_DET, "bd70528-bat-dead"),
- DEFINE_RES_IRQ_NAMED(BD70528_INT_BATTSD_COLD_RES, "bd70528-bat-warmed"),
- DEFINE_RES_IRQ_NAMED(BD70528_INT_BATTSD_COLD_DET, "bd70528-bat-cold"),
- DEFINE_RES_IRQ_NAMED(BD70528_INT_BATTSD_HOT_RES, "bd70528-bat-cooled"),
- DEFINE_RES_IRQ_NAMED(BD70528_INT_BATTSD_HOT_DET, "bd70528-bat-hot"),
- DEFINE_RES_IRQ_NAMED(BD70528_INT_CHG_TSD, "bd70528-chg-tshd"),
- DEFINE_RES_IRQ_NAMED(BD70528_INT_BAT_RMV, "bd70528-bat-removed"),
- DEFINE_RES_IRQ_NAMED(BD70528_INT_BAT_DET, "bd70528-bat-detected"),
- DEFINE_RES_IRQ_NAMED(BD70528_INT_DCIN2_OV_RES, "bd70528-dcin2-ov-res"),
- DEFINE_RES_IRQ_NAMED(BD70528_INT_DCIN2_OV_DET, "bd70528-dcin2-ov-det"),
- DEFINE_RES_IRQ_NAMED(BD70528_INT_DCIN2_RMV, "bd70528-dcin2-removed"),
- DEFINE_RES_IRQ_NAMED(BD70528_INT_DCIN2_DET, "bd70528-dcin2-detected"),
- DEFINE_RES_IRQ_NAMED(BD70528_INT_DCIN1_RMV, "bd70528-dcin1-removed"),
- DEFINE_RES_IRQ_NAMED(BD70528_INT_DCIN1_DET, "bd70528-dcin1-detected"),
-};
-
-static struct mfd_cell bd70528_mfd_cells[] = {
- { .name = "bd70528-pmic", },
- { .name = "bd70528-gpio", },
- /*
- * We use BD71837 driver to drive the clock block. Only differences to
- * BD70528 clock gate are the register address and mask.
- */
- { .name = "bd70528-clk", },
- { .name = "bd70528-wdt", },
- {
- .name = "bd70528-power",
- .resources = charger_irqs,
- .num_resources = ARRAY_SIZE(charger_irqs),
- }, {
- .name = "bd70528-rtc",
- .resources = rtc_irqs,
- .num_resources = ARRAY_SIZE(rtc_irqs),
- },
-};
-
-static const struct regmap_range volatile_ranges[] = {
- {
- .range_min = BD70528_REG_INT_MAIN,
- .range_max = BD70528_REG_INT_OP_FAIL,
- }, {
- .range_min = BD70528_REG_RTC_COUNT_H,
- .range_max = BD70528_REG_RTC_ALM_REPEAT,
- }, {
- /*
- * WDT control reg is special. Magic values must be written to
- * it in order to change the control. Should not be cached.
- */
- .range_min = BD70528_REG_WDT_CTRL,
- .range_max = BD70528_REG_WDT_CTRL,
- }, {
- /*
- * BD70528 also contains a few other registers which require
- * magic sequences to be written in order to update the value.
- * At least SHIPMODE, HWRESET, WARMRESET,and STANDBY
- */
- .range_min = BD70528_REG_SHIPMODE,
- .range_max = BD70528_REG_STANDBY,
- },
-};
-
-static const struct regmap_access_table volatile_regs = {
- .yes_ranges = &volatile_ranges[0],
- .n_yes_ranges = ARRAY_SIZE(volatile_ranges),
-};
-
-static struct regmap_config bd70528_regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .volatile_table = &volatile_regs,
- .max_register = BD70528_MAX_REGISTER,
- .cache_type = REGCACHE_RBTREE,
-};
-
-/*
- * Mapping of main IRQ register bits to sub-IRQ register offsets so that we can
- * access corect sub-IRQ registers based on bits that are set in main IRQ
- * register.
- */
-
-static unsigned int bit0_offsets[] = {0}; /* Shutdown */
-static unsigned int bit1_offsets[] = {1}; /* Power failure */
-static unsigned int bit2_offsets[] = {2}; /* VR FAULT */
-static unsigned int bit3_offsets[] = {3}; /* PMU interrupts */
-static unsigned int bit4_offsets[] = {4, 5}; /* Charger 1 and Charger 2 */
-static unsigned int bit5_offsets[] = {6}; /* RTC */
-static unsigned int bit6_offsets[] = {7}; /* GPIO */
-static unsigned int bit7_offsets[] = {8}; /* Invalid operation */
-
-static struct regmap_irq_sub_irq_map bd70528_sub_irq_offsets[] = {
- REGMAP_IRQ_MAIN_REG_OFFSET(bit0_offsets),
- REGMAP_IRQ_MAIN_REG_OFFSET(bit1_offsets),
- REGMAP_IRQ_MAIN_REG_OFFSET(bit2_offsets),
- REGMAP_IRQ_MAIN_REG_OFFSET(bit3_offsets),
- REGMAP_IRQ_MAIN_REG_OFFSET(bit4_offsets),
- REGMAP_IRQ_MAIN_REG_OFFSET(bit5_offsets),
- REGMAP_IRQ_MAIN_REG_OFFSET(bit6_offsets),
- REGMAP_IRQ_MAIN_REG_OFFSET(bit7_offsets),
-};
-
-static struct regmap_irq bd70528_irqs[] = {
- REGMAP_IRQ_REG(BD70528_INT_LONGPUSH, 0, BD70528_INT_LONGPUSH_MASK),
- REGMAP_IRQ_REG(BD70528_INT_WDT, 0, BD70528_INT_WDT_MASK),
- REGMAP_IRQ_REG(BD70528_INT_HWRESET, 0, BD70528_INT_HWRESET_MASK),
- REGMAP_IRQ_REG(BD70528_INT_RSTB_FAULT, 0, BD70528_INT_RSTB_FAULT_MASK),
- REGMAP_IRQ_REG(BD70528_INT_VBAT_UVLO, 0, BD70528_INT_VBAT_UVLO_MASK),
- REGMAP_IRQ_REG(BD70528_INT_TSD, 0, BD70528_INT_TSD_MASK),
- REGMAP_IRQ_REG(BD70528_INT_RSTIN, 0, BD70528_INT_RSTIN_MASK),
- REGMAP_IRQ_REG(BD70528_INT_BUCK1_FAULT, 1,
- BD70528_INT_BUCK1_FAULT_MASK),
- REGMAP_IRQ_REG(BD70528_INT_BUCK2_FAULT, 1,
- BD70528_INT_BUCK2_FAULT_MASK),
- REGMAP_IRQ_REG(BD70528_INT_BUCK3_FAULT, 1,
- BD70528_INT_BUCK3_FAULT_MASK),
- REGMAP_IRQ_REG(BD70528_INT_LDO1_FAULT, 1, BD70528_INT_LDO1_FAULT_MASK),
- REGMAP_IRQ_REG(BD70528_INT_LDO2_FAULT, 1, BD70528_INT_LDO2_FAULT_MASK),
- REGMAP_IRQ_REG(BD70528_INT_LDO3_FAULT, 1, BD70528_INT_LDO3_FAULT_MASK),
- REGMAP_IRQ_REG(BD70528_INT_LED1_FAULT, 1, BD70528_INT_LED1_FAULT_MASK),
- REGMAP_IRQ_REG(BD70528_INT_LED2_FAULT, 1, BD70528_INT_LED2_FAULT_MASK),
- REGMAP_IRQ_REG(BD70528_INT_BUCK1_OCP, 2, BD70528_INT_BUCK1_OCP_MASK),
- REGMAP_IRQ_REG(BD70528_INT_BUCK2_OCP, 2, BD70528_INT_BUCK2_OCP_MASK),
- REGMAP_IRQ_REG(BD70528_INT_BUCK3_OCP, 2, BD70528_INT_BUCK3_OCP_MASK),
- REGMAP_IRQ_REG(BD70528_INT_LED1_OCP, 2, BD70528_INT_LED1_OCP_MASK),
- REGMAP_IRQ_REG(BD70528_INT_LED2_OCP, 2, BD70528_INT_LED2_OCP_MASK),
- REGMAP_IRQ_REG(BD70528_INT_BUCK1_FULLON, 2,
- BD70528_INT_BUCK1_FULLON_MASK),
- REGMAP_IRQ_REG(BD70528_INT_BUCK2_FULLON, 2,
- BD70528_INT_BUCK2_FULLON_MASK),
- REGMAP_IRQ_REG(BD70528_INT_SHORTPUSH, 3, BD70528_INT_SHORTPUSH_MASK),
- REGMAP_IRQ_REG(BD70528_INT_AUTO_WAKEUP, 3,
- BD70528_INT_AUTO_WAKEUP_MASK),
- REGMAP_IRQ_REG(BD70528_INT_STATE_CHANGE, 3,
- BD70528_INT_STATE_CHANGE_MASK),
- REGMAP_IRQ_REG(BD70528_INT_BAT_OV_RES, 4, BD70528_INT_BAT_OV_RES_MASK),
- REGMAP_IRQ_REG(BD70528_INT_BAT_OV_DET, 4, BD70528_INT_BAT_OV_DET_MASK),
- REGMAP_IRQ_REG(BD70528_INT_DBAT_DET, 4, BD70528_INT_DBAT_DET_MASK),
- REGMAP_IRQ_REG(BD70528_INT_BATTSD_COLD_RES, 4,
- BD70528_INT_BATTSD_COLD_RES_MASK),
- REGMAP_IRQ_REG(BD70528_INT_BATTSD_COLD_DET, 4,
- BD70528_INT_BATTSD_COLD_DET_MASK),
- REGMAP_IRQ_REG(BD70528_INT_BATTSD_HOT_RES, 4,
- BD70528_INT_BATTSD_HOT_RES_MASK),
- REGMAP_IRQ_REG(BD70528_INT_BATTSD_HOT_DET, 4,
- BD70528_INT_BATTSD_HOT_DET_MASK),
- REGMAP_IRQ_REG(BD70528_INT_CHG_TSD, 4, BD70528_INT_CHG_TSD_MASK),
- REGMAP_IRQ_REG(BD70528_INT_BAT_RMV, 5, BD70528_INT_BAT_RMV_MASK),
- REGMAP_IRQ_REG(BD70528_INT_BAT_DET, 5, BD70528_INT_BAT_DET_MASK),
- REGMAP_IRQ_REG(BD70528_INT_DCIN2_OV_RES, 5,
- BD70528_INT_DCIN2_OV_RES_MASK),
- REGMAP_IRQ_REG(BD70528_INT_DCIN2_OV_DET, 5,
- BD70528_INT_DCIN2_OV_DET_MASK),
- REGMAP_IRQ_REG(BD70528_INT_DCIN2_RMV, 5, BD70528_INT_DCIN2_RMV_MASK),
- REGMAP_IRQ_REG(BD70528_INT_DCIN2_DET, 5, BD70528_INT_DCIN2_DET_MASK),
- REGMAP_IRQ_REG(BD70528_INT_DCIN1_RMV, 5, BD70528_INT_DCIN1_RMV_MASK),
- REGMAP_IRQ_REG(BD70528_INT_DCIN1_DET, 5, BD70528_INT_DCIN1_DET_MASK),
- REGMAP_IRQ_REG(BD70528_INT_RTC_ALARM, 6, BD70528_INT_RTC_ALARM_MASK),
- REGMAP_IRQ_REG(BD70528_INT_ELPS_TIM, 6, BD70528_INT_ELPS_TIM_MASK),
- REGMAP_IRQ_REG(BD70528_INT_GPIO0, 7, BD70528_INT_GPIO0_MASK),
- REGMAP_IRQ_REG(BD70528_INT_GPIO1, 7, BD70528_INT_GPIO1_MASK),
- REGMAP_IRQ_REG(BD70528_INT_GPIO2, 7, BD70528_INT_GPIO2_MASK),
- REGMAP_IRQ_REG(BD70528_INT_GPIO3, 7, BD70528_INT_GPIO3_MASK),
- REGMAP_IRQ_REG(BD70528_INT_BUCK1_DVS_OPFAIL, 8,
- BD70528_INT_BUCK1_DVS_OPFAIL_MASK),
- REGMAP_IRQ_REG(BD70528_INT_BUCK2_DVS_OPFAIL, 8,
- BD70528_INT_BUCK2_DVS_OPFAIL_MASK),
- REGMAP_IRQ_REG(BD70528_INT_BUCK3_DVS_OPFAIL, 8,
- BD70528_INT_BUCK3_DVS_OPFAIL_MASK),
- REGMAP_IRQ_REG(BD70528_INT_LED1_VOLT_OPFAIL, 8,
- BD70528_INT_LED1_VOLT_OPFAIL_MASK),
- REGMAP_IRQ_REG(BD70528_INT_LED2_VOLT_OPFAIL, 8,
- BD70528_INT_LED2_VOLT_OPFAIL_MASK),
-};
-
-static struct regmap_irq_chip bd70528_irq_chip = {
- .name = "bd70528_irq",
- .main_status = BD70528_REG_INT_MAIN,
- .irqs = &bd70528_irqs[0],
- .num_irqs = ARRAY_SIZE(bd70528_irqs),
- .status_base = BD70528_REG_INT_SHDN,
- .mask_base = BD70528_REG_INT_SHDN_MASK,
- .ack_base = BD70528_REG_INT_SHDN,
- .type_base = BD70528_REG_GPIO1_IN,
- .init_ack_masked = true,
- .num_regs = 9,
- .num_main_regs = 1,
- .num_type_reg = 4,
- .sub_reg_offsets = &bd70528_sub_irq_offsets[0],
- .num_main_status_bits = 8,
- .irq_reg_stride = 1,
-};
-
-static int bd70528_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
-{
- struct bd70528_data *bd70528;
- struct regmap_irq_chip_data *irq_data;
- int ret, i;
-
- if (!i2c->irq) {
- dev_err(&i2c->dev, "No IRQ configured\n");
- return -EINVAL;
- }
-
- bd70528 = devm_kzalloc(&i2c->dev, sizeof(*bd70528), GFP_KERNEL);
- if (!bd70528)
- return -ENOMEM;
-
- mutex_init(&bd70528->rtc_timer_lock);
-
- dev_set_drvdata(&i2c->dev, &bd70528->chip);
-
- bd70528->chip.regmap = devm_regmap_init_i2c(i2c, &bd70528_regmap);
- if (IS_ERR(bd70528->chip.regmap)) {
- dev_err(&i2c->dev, "Failed to initialize Regmap\n");
- return PTR_ERR(bd70528->chip.regmap);
- }
-
- /*
- * Disallow type setting for all IRQs by default as most of them do not
- * support setting type.
- */
- for (i = 0; i < ARRAY_SIZE(bd70528_irqs); i++)
- bd70528_irqs[i].type.types_supported = 0;
-
- /* Set IRQ typesetting information for GPIO pins 0 - 3 */
- for (i = 0; i < BD70528_NUM_OF_GPIOS; i++) {
- struct regmap_irq_type *type;
-
- type = &bd70528_irqs[BD70528_INT_GPIO0 + i].type;
- type->type_reg_offset = 2 * i;
- type->type_rising_val = 0x20;
- type->type_falling_val = 0x10;
- type->type_level_high_val = 0x40;
- type->type_level_low_val = 0x50;
- type->types_supported = (IRQ_TYPE_EDGE_BOTH |
- IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW);
- }
-
- ret = devm_regmap_add_irq_chip(&i2c->dev, bd70528->chip.regmap,
- i2c->irq, IRQF_ONESHOT, 0,
- &bd70528_irq_chip, &irq_data);
- if (ret) {
- dev_err(&i2c->dev, "Failed to add IRQ chip\n");
- return ret;
- }
- dev_dbg(&i2c->dev, "Registered %d IRQs for chip\n",
- bd70528_irq_chip.num_irqs);
-
- /*
- * BD70528 IRQ controller is not touching the main mask register.
- * So enable the GPIO block interrupts at main level. We can just leave
- * them enabled as the IRQ controller should disable IRQs from
- * sub-registers when IRQ is disabled or freed.
- */
- ret = regmap_update_bits(bd70528->chip.regmap,
- BD70528_REG_INT_MAIN_MASK,
- BD70528_INT_GPIO_MASK, 0);
-
- ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_AUTO,
- bd70528_mfd_cells,
- ARRAY_SIZE(bd70528_mfd_cells), NULL, 0,
- regmap_irq_get_domain(irq_data));
- if (ret)
- dev_err(&i2c->dev, "Failed to create subdevices\n");
-
- return ret;
-}
-
-static const struct of_device_id bd70528_of_match[] = {
- { .compatible = "rohm,bd70528", },
- { },
-};
-MODULE_DEVICE_TABLE(of, bd70528_of_match);
-
-static struct i2c_driver bd70528_drv = {
- .driver = {
- .name = "rohm-bd70528",
- .of_match_table = bd70528_of_match,
- },
- .probe = &bd70528_i2c_probe,
-};
-
-module_i2c_driver(bd70528_drv);
-
-MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
-MODULE_DESCRIPTION("ROHM BD70528 Power Management IC driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index e928df95e316..aeb9ea55f97d 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -1361,7 +1361,7 @@ static void stmpe_of_probe(struct stmpe_platform_data *pdata,
pdata->autosleep = (pdata->autosleep_timeout) ? true : false;
- for_each_child_of_node(np, child) {
+ for_each_available_child_of_node(np, child) {
if (of_node_name_eq(child, "stmpe_gpio")) {
pdata->blocks |= STMPE_BLOCK_GPIO;
} else if (of_node_name_eq(child, "stmpe_keypad")) {
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index 6e105cca27d4..67e2707af4bc 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -436,15 +436,6 @@ static void tps65910_power_off(void)
tps65910 = dev_get_drvdata(&tps65910_i2c_client->dev);
- /*
- * The PWR_OFF bit needs to be set separately, before transitioning
- * to the OFF state. It enables the "sequential" power-off mode on
- * TPS65911, it's a NO-OP on TPS65910.
- */
- if (regmap_set_bits(tps65910->regmap, TPS65910_DEVCTRL,
- DEVCTRL_PWR_OFF_MASK) < 0)
- return;
-
regmap_update_bits(tps65910->regmap, TPS65910_DEVCTRL,
DEVCTRL_DEV_OFF_MASK | DEVCTRL_DEV_ON_MASK,
DEVCTRL_DEV_OFF_MASK);
@@ -504,6 +495,19 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
tps65910_sleepinit(tps65910, pmic_plat_data);
if (pmic_plat_data->pm_off && !pm_power_off) {
+ /*
+ * The PWR_OFF bit needs to be set separately, before
+ * transitioning to the OFF state. It enables the "sequential"
+ * power-off mode on TPS65911, it's a NO-OP on TPS65910.
+ */
+ ret = regmap_set_bits(tps65910->regmap, TPS65910_DEVCTRL,
+ DEVCTRL_PWR_OFF_MASK);
+ if (ret) {
+ dev_err(&i2c->dev, "failed to set power-off mode: %d\n",
+ ret);
+ return ret;
+ }
+
tps65910_i2c_client = i2c;
pm_power_off = tps65910_power_off;
}
diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig
index 51aecafdcbdf..5efc4151bf58 100644
--- a/drivers/misc/cxl/Kconfig
+++ b/drivers/misc/cxl/Kconfig
@@ -6,6 +6,7 @@
config CXL_BASE
bool
select PPC_COPRO_BASE
+ select PPC_64S_HASH_MMU
config CXL
tristate "Support for IBM Coherent Accelerators (CXL)"
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index c173a5e88c91..315c43f17dd3 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -570,6 +570,7 @@ static struct attribute *afu_cr_attrs[] = {
&class_attribute.attr,
NULL,
};
+ATTRIBUTE_GROUPS(afu_cr);
static void release_afu_config_record(struct kobject *kobj)
{
@@ -581,7 +582,7 @@ static void release_afu_config_record(struct kobject *kobj)
static struct kobj_type afu_config_record_type = {
.sysfs_ops = &kobj_sysfs_ops,
.release = release_afu_config_record,
- .default_attrs = afu_cr_attrs,
+ .default_groups = afu_cr_groups,
};
static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int cr_idx)
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 49ab656e8a96..633e1cf08d6e 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -68,11 +68,6 @@
* which won't work on pure SMBus systems.
*/
-struct at24_client {
- struct i2c_client *client;
- struct regmap *regmap;
-};
-
struct at24_data {
/*
* Lock protects against activities from other Linux tasks,
@@ -94,9 +89,10 @@ struct at24_data {
/*
* Some chips tie up multiple I2C addresses; dummy devices reserve
- * them for us, and we'll use them with SMBus calls.
+ * them for us.
*/
- struct at24_client client[];
+ u8 bank_addr_shift;
+ struct regmap *client_regmaps[];
};
/*
@@ -123,6 +119,7 @@ MODULE_PARM_DESC(at24_write_timeout, "Time (in ms) to try writes (default 25)");
struct at24_chip_data {
u32 byte_len;
u8 flags;
+ u8 bank_addr_shift;
void (*read_post)(unsigned int off, char *buf, size_t count);
};
@@ -137,6 +134,12 @@ struct at24_chip_data {
.read_post = _read_post, \
}
+#define AT24_CHIP_DATA_BS(_name, _len, _flags, _bank_addr_shift) \
+ static const struct at24_chip_data _name = { \
+ .byte_len = _len, .flags = _flags, \
+ .bank_addr_shift = _bank_addr_shift \
+ }
+
static void at24_read_post_vaio(unsigned int off, char *buf, size_t count)
{
int i;
@@ -197,6 +200,7 @@ AT24_CHIP_DATA(at24_data_24c128, 131072 / 8, AT24_FLAG_ADDR16);
AT24_CHIP_DATA(at24_data_24c256, 262144 / 8, AT24_FLAG_ADDR16);
AT24_CHIP_DATA(at24_data_24c512, 524288 / 8, AT24_FLAG_ADDR16);
AT24_CHIP_DATA(at24_data_24c1024, 1048576 / 8, AT24_FLAG_ADDR16);
+AT24_CHIP_DATA_BS(at24_data_24c1025, 1048576 / 8, AT24_FLAG_ADDR16, 2);
AT24_CHIP_DATA(at24_data_24c2048, 2097152 / 8, AT24_FLAG_ADDR16);
/* identical to 24c08 ? */
AT24_CHIP_DATA(at24_data_INT3499, 8192 / 8, 0);
@@ -225,6 +229,7 @@ static const struct i2c_device_id at24_ids[] = {
{ "24c256", (kernel_ulong_t)&at24_data_24c256 },
{ "24c512", (kernel_ulong_t)&at24_data_24c512 },
{ "24c1024", (kernel_ulong_t)&at24_data_24c1024 },
+ { "24c1025", (kernel_ulong_t)&at24_data_24c1025 },
{ "24c2048", (kernel_ulong_t)&at24_data_24c2048 },
{ "at24", 0 },
{ /* END OF LIST */ }
@@ -254,6 +259,7 @@ static const struct of_device_id at24_of_match[] = {
{ .compatible = "atmel,24c256", .data = &at24_data_24c256 },
{ .compatible = "atmel,24c512", .data = &at24_data_24c512 },
{ .compatible = "atmel,24c1024", .data = &at24_data_24c1024 },
+ { .compatible = "atmel,24c1025", .data = &at24_data_24c1025 },
{ .compatible = "atmel,24c2048", .data = &at24_data_24c2048 },
{ /* END OF LIST */ },
};
@@ -275,8 +281,8 @@ MODULE_DEVICE_TABLE(acpi, at24_acpi_ids);
* set the byte address; on a multi-master board, another master
* may have changed the chip's "current" address pointer.
*/
-static struct at24_client *at24_translate_offset(struct at24_data *at24,
- unsigned int *offset)
+static struct regmap *at24_translate_offset(struct at24_data *at24,
+ unsigned int *offset)
{
unsigned int i;
@@ -288,12 +294,12 @@ static struct at24_client *at24_translate_offset(struct at24_data *at24,
*offset &= 0xff;
}
- return &at24->client[i];
+ return at24->client_regmaps[i];
}
static struct device *at24_base_client_dev(struct at24_data *at24)
{
- return &at24->client[0].client->dev;
+ return regmap_get_device(at24->client_regmaps[0]);
}
static size_t at24_adjust_read_count(struct at24_data *at24,
@@ -324,14 +330,10 @@ static ssize_t at24_regmap_read(struct at24_data *at24, char *buf,
unsigned int offset, size_t count)
{
unsigned long timeout, read_time;
- struct at24_client *at24_client;
- struct i2c_client *client;
struct regmap *regmap;
int ret;
- at24_client = at24_translate_offset(at24, &offset);
- regmap = at24_client->regmap;
- client = at24_client->client;
+ regmap = at24_translate_offset(at24, &offset);
count = at24_adjust_read_count(at24, offset, count);
/* adjust offset for mac and serial read ops */
@@ -346,7 +348,7 @@ static ssize_t at24_regmap_read(struct at24_data *at24, char *buf,
read_time = jiffies;
ret = regmap_bulk_read(regmap, offset, buf, count);
- dev_dbg(&client->dev, "read %zu@%d --> %d (%ld)\n",
+ dev_dbg(regmap_get_device(regmap), "read %zu@%d --> %d (%ld)\n",
count, offset, ret, jiffies);
if (!ret)
return count;
@@ -387,14 +389,10 @@ static ssize_t at24_regmap_write(struct at24_data *at24, const char *buf,
unsigned int offset, size_t count)
{
unsigned long timeout, write_time;
- struct at24_client *at24_client;
- struct i2c_client *client;
struct regmap *regmap;
int ret;
- at24_client = at24_translate_offset(at24, &offset);
- regmap = at24_client->regmap;
- client = at24_client->client;
+ regmap = at24_translate_offset(at24, &offset);
count = at24_adjust_write_count(at24, offset, count);
timeout = jiffies + msecs_to_jiffies(at24_write_timeout);
@@ -406,7 +404,7 @@ static ssize_t at24_regmap_write(struct at24_data *at24, const char *buf,
write_time = jiffies;
ret = regmap_bulk_write(regmap, offset, buf, count);
- dev_dbg(&client->dev, "write %zu@%d --> %d (%ld)\n",
+ dev_dbg(regmap_get_device(regmap), "write %zu@%d --> %d (%ld)\n",
count, offset, ret, jiffies);
if (!ret)
return count;
@@ -538,17 +536,16 @@ static const struct at24_chip_data *at24_get_chip_data(struct device *dev)
}
static int at24_make_dummy_client(struct at24_data *at24, unsigned int index,
+ struct i2c_client *base_client,
struct regmap_config *regmap_config)
{
- struct i2c_client *base_client, *dummy_client;
+ struct i2c_client *dummy_client;
struct regmap *regmap;
- struct device *dev;
-
- base_client = at24->client[0].client;
- dev = &base_client->dev;
- dummy_client = devm_i2c_new_dummy_device(dev, base_client->adapter,
- base_client->addr + index);
+ dummy_client = devm_i2c_new_dummy_device(&base_client->dev,
+ base_client->adapter,
+ base_client->addr +
+ (index << at24->bank_addr_shift));
if (IS_ERR(dummy_client))
return PTR_ERR(dummy_client);
@@ -556,8 +553,7 @@ static int at24_make_dummy_client(struct at24_data *at24, unsigned int index,
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- at24->client[index].client = dummy_client;
- at24->client[index].regmap = regmap;
+ at24->client_regmaps[index] = regmap;
return 0;
}
@@ -680,7 +676,7 @@ static int at24_probe(struct i2c_client *client)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- at24 = devm_kzalloc(dev, struct_size(at24, client, num_addresses),
+ at24 = devm_kzalloc(dev, struct_size(at24, client_regmaps, num_addresses),
GFP_KERNEL);
if (!at24)
return -ENOMEM;
@@ -690,10 +686,10 @@ static int at24_probe(struct i2c_client *client)
at24->page_size = page_size;
at24->flags = flags;
at24->read_post = cdata->read_post;
+ at24->bank_addr_shift = cdata->bank_addr_shift;
at24->num_addresses = num_addresses;
at24->offset_adj = at24_get_offset_adj(flags, byte_len);
- at24->client[0].client = client;
- at24->client[0].regmap = regmap;
+ at24->client_regmaps[0] = regmap;
at24->vcc_reg = devm_regulator_get(dev, "vcc");
if (IS_ERR(at24->vcc_reg))
@@ -709,7 +705,7 @@ static int at24_probe(struct i2c_client *client)
/* use dummy devices for multiple-address chips */
for (i = 1; i < num_addresses; i++) {
- err = at24_make_dummy_client(at24, i, &regmap_config);
+ err = at24_make_dummy_client(at24, i, client, &regmap_config);
if (err)
return err;
}
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index b38978a3b3ff..bee727ed98db 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -1,28 +1,27 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * at25.c -- support most SPI EEPROMs, such as Atmel AT25 models
- * and Cypress FRAMs FM25 models
+ * Driver for most of the SPI EEPROMs, such as Atmel AT25 models
+ * and Cypress FRAMs FM25 models.
*
* Copyright (C) 2006 David Brownell
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
+#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/property.h>
#include <linux/sched.h>
+#include <linux/slab.h>
-#include <linux/nvmem-provider.h>
-#include <linux/spi/spi.h>
#include <linux/spi/eeprom.h>
-#include <linux/property.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/math.h>
+#include <linux/spi/spi.h>
+
+#include <linux/nvmem-provider.h>
/*
- * NOTE: this is an *EEPROM* driver. The vagaries of product naming
+ * NOTE: this is an *EEPROM* driver. The vagaries of product naming
* mean that some AT25 products are EEPROMs, and others are FLASH.
* Handle FLASH chips with the drivers/mtd/devices/m25p80.c driver,
* not this one!
@@ -33,9 +32,9 @@
#define FM25_SN_LEN 8 /* serial number length */
struct at25_data {
+ struct spi_eeprom chip;
struct spi_device *spi;
struct mutex lock;
- struct spi_eeprom chip;
unsigned addrlen;
struct nvmem_config nvmem_config;
struct nvmem_device *nvmem;
@@ -58,13 +57,14 @@ struct at25_data {
#define AT25_SR_BP1 0x08
#define AT25_SR_WPEN 0x80 /* writeprotect enable */
-#define AT25_INSTR_BIT3 0x08 /* Additional address bit in instr */
+#define AT25_INSTR_BIT3 0x08 /* additional address bit in instr */
#define FM25_ID_LEN 9 /* ID length */
#define EE_MAXADDRLEN 3 /* 24 bit addresses, up to 2 MBytes */
-/* Specs often allow 5 msec for a page write, sometimes 20 msec;
+/*
+ * Specs often allow 5ms for a page write, sometimes 20ms;
* it's important to recover from write timeouts.
*/
#define EE_TIMEOUT 25
@@ -96,7 +96,7 @@ static int at25_ee_read(void *priv, unsigned int offset,
instr = AT25_READ;
if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR)
- if (offset >= (1U << (at25->addrlen * 8)))
+ if (offset >= BIT(at25->addrlen * 8))
instr |= AT25_INSTR_BIT3;
*cp++ = instr;
@@ -109,7 +109,7 @@ static int at25_ee_read(void *priv, unsigned int offset,
*cp++ = offset >> 8;
fallthrough;
case 1:
- case 0: /* can't happen: for better codegen */
+ case 0: /* can't happen: for better code generation */
*cp++ = offset >> 0;
}
@@ -126,11 +126,12 @@ static int at25_ee_read(void *priv, unsigned int offset,
mutex_lock(&at25->lock);
- /* Read it all at once.
+ /*
+ * Read it all at once.
*
* REVISIT that's potentially a problem with large chips, if
* other devices on the bus need to be accessed regularly or
- * this chip is clocked very slowly
+ * this chip is clocked very slowly.
*/
status = spi_sync(at25->spi, &m);
dev_dbg(&at25->spi->dev, "read %zu bytes at %d --> %zd\n",
@@ -140,9 +141,7 @@ static int at25_ee_read(void *priv, unsigned int offset,
return status;
}
-/*
- * read extra registers as ID or serial number
- */
+/* Read extra registers as ID or serial number */
static int fm25_aux_read(struct at25_data *at25, u8 *buf, uint8_t command,
int len)
{
@@ -208,7 +207,8 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
if (!bounce)
return -ENOMEM;
- /* For write, rollover is within the page ... so we write at
+ /*
+ * For write, rollover is within the page ... so we write at
* most one page, then manually roll over to the next page.
*/
mutex_lock(&at25->lock);
@@ -229,7 +229,7 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
instr = AT25_WRITE;
if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR)
- if (offset >= (1U << (at25->addrlen * 8)))
+ if (offset >= BIT(at25->addrlen * 8))
instr |= AT25_INSTR_BIT3;
*cp++ = instr;
@@ -242,7 +242,7 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
*cp++ = offset >> 8;
fallthrough;
case 1:
- case 0: /* can't happen: for better codegen */
+ case 0: /* can't happen: for better code generation */
*cp++ = offset >> 0;
}
@@ -258,8 +258,9 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
if (status < 0)
break;
- /* REVISIT this should detect (or prevent) failed writes
- * to readonly sections of the EEPROM...
+ /*
+ * REVISIT this should detect (or prevent) failed writes
+ * to read-only sections of the EEPROM...
*/
/* Wait for non-busy status */
@@ -306,34 +307,37 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
static int at25_fw_to_chip(struct device *dev, struct spi_eeprom *chip)
{
u32 val;
+ int err;
- memset(chip, 0, sizeof(*chip));
strncpy(chip->name, "at25", sizeof(chip->name));
- if (device_property_read_u32(dev, "size", &val) == 0 ||
- device_property_read_u32(dev, "at25,byte-len", &val) == 0) {
- chip->byte_len = val;
- } else {
+ err = device_property_read_u32(dev, "size", &val);
+ if (err)
+ err = device_property_read_u32(dev, "at25,byte-len", &val);
+ if (err) {
dev_err(dev, "Error: missing \"size\" property\n");
- return -ENODEV;
+ return err;
}
+ chip->byte_len = val;
- if (device_property_read_u32(dev, "pagesize", &val) == 0 ||
- device_property_read_u32(dev, "at25,page-size", &val) == 0) {
- chip->page_size = val;
- } else {
+ err = device_property_read_u32(dev, "pagesize", &val);
+ if (err)
+ err = device_property_read_u32(dev, "at25,page-size", &val);
+ if (err) {
dev_err(dev, "Error: missing \"pagesize\" property\n");
- return -ENODEV;
+ return err;
}
+ chip->page_size = val;
- if (device_property_read_u32(dev, "at25,addr-mode", &val) == 0) {
+ err = device_property_read_u32(dev, "address-width", &val);
+ if (err) {
+ err = device_property_read_u32(dev, "at25,addr-mode", &val);
+ if (err) {
+ dev_err(dev, "Error: missing \"address-width\" property\n");
+ return err;
+ }
chip->flags = (u16)val;
} else {
- if (device_property_read_u32(dev, "address-width", &val)) {
- dev_err(dev,
- "Error: missing \"address-width\" property\n");
- return -ENODEV;
- }
switch (val) {
case 9:
chip->flags |= EE_INSTR_BIT3_IS_ADDR;
@@ -359,16 +363,54 @@ static int at25_fw_to_chip(struct device *dev, struct spi_eeprom *chip)
return 0;
}
+static int at25_fram_to_chip(struct device *dev, struct spi_eeprom *chip)
+{
+ struct at25_data *at25 = container_of(chip, struct at25_data, chip);
+ u8 sernum[FM25_SN_LEN];
+ u8 id[FM25_ID_LEN];
+ int i;
+
+ strncpy(chip->name, "fm25", sizeof(chip->name));
+
+ /* Get ID of chip */
+ fm25_aux_read(at25, id, FM25_RDID, FM25_ID_LEN);
+ if (id[6] != 0xc2) {
+ dev_err(dev, "Error: no Cypress FRAM (id %02x)\n", id[6]);
+ return -ENODEV;
+ }
+ /* Set size found in ID */
+ if (id[7] < 0x21 || id[7] > 0x26) {
+ dev_err(dev, "Error: unsupported size (id %02x)\n", id[7]);
+ return -ENODEV;
+ }
+
+ chip->byte_len = BIT(id[7] - 0x21 + 4) * 1024;
+ if (chip->byte_len > 64 * 1024)
+ chip->flags |= EE_ADDR3;
+ else
+ chip->flags |= EE_ADDR2;
+
+ if (id[8]) {
+ fm25_aux_read(at25, sernum, FM25_RDSN, FM25_SN_LEN);
+ /* Swap byte order */
+ for (i = 0; i < FM25_SN_LEN; i++)
+ at25->sernum[i] = sernum[FM25_SN_LEN - 1 - i];
+ }
+
+ chip->page_size = PAGE_SIZE;
+ return 0;
+}
+
static const struct of_device_id at25_of_match[] = {
- { .compatible = "atmel,at25",},
- { .compatible = "cypress,fm25",},
+ { .compatible = "atmel,at25" },
+ { .compatible = "cypress,fm25" },
{ }
};
MODULE_DEVICE_TABLE(of, at25_of_match);
static const struct spi_device_id at25_spi_ids[] = {
- { .name = "at25",},
- { .name = "fm25",},
+ { .name = "at25" },
+ { .name = "fm25" },
{ }
};
MODULE_DEVICE_TABLE(spi, at25_spi_ids);
@@ -378,31 +420,18 @@ static int at25_probe(struct spi_device *spi)
struct at25_data *at25 = NULL;
int err;
int sr;
- u8 id[FM25_ID_LEN];
- u8 sernum[FM25_SN_LEN];
- int i;
- const struct of_device_id *match;
- bool is_fram = 0;
-
- match = of_match_device(of_match_ptr(at25_of_match), &spi->dev);
- if (match && !strcmp(match->compatible, "cypress,fm25"))
- is_fram = 1;
-
- at25 = devm_kzalloc(&spi->dev, sizeof(struct at25_data), GFP_KERNEL);
- if (!at25)
- return -ENOMEM;
-
- /* Chip description */
- if (spi->dev.platform_data) {
- memcpy(&at25->chip, spi->dev.platform_data, sizeof(at25->chip));
- } else if (!is_fram) {
- err = at25_fw_to_chip(&spi->dev, &at25->chip);
- if (err)
- return err;
- }
-
- /* Ping the chip ... the status register is pretty portable,
- * unlike probing manufacturer IDs. We do expect that system
+ struct spi_eeprom *pdata;
+ bool is_fram;
+
+ err = device_property_match_string(&spi->dev, "compatible", "cypress,fm25");
+ if (err >= 0)
+ is_fram = true;
+ else
+ is_fram = false;
+
+ /*
+ * Ping the chip ... the status register is pretty portable,
+ * unlike probing manufacturer IDs. We do expect that system
* firmware didn't write it in the past few milliseconds!
*/
sr = spi_w8r8(spi, AT25_RDSR);
@@ -411,39 +440,25 @@ static int at25_probe(struct spi_device *spi)
return -ENXIO;
}
+ at25 = devm_kzalloc(&spi->dev, sizeof(*at25), GFP_KERNEL);
+ if (!at25)
+ return -ENOMEM;
+
mutex_init(&at25->lock);
at25->spi = spi;
spi_set_drvdata(spi, at25);
- if (is_fram) {
- /* Get ID of chip */
- fm25_aux_read(at25, id, FM25_RDID, FM25_ID_LEN);
- if (id[6] != 0xc2) {
- dev_err(&spi->dev,
- "Error: no Cypress FRAM (id %02x)\n", id[6]);
- return -ENODEV;
- }
- /* set size found in ID */
- if (id[7] < 0x21 || id[7] > 0x26) {
- dev_err(&spi->dev, "Error: unsupported size (id %02x)\n", id[7]);
- return -ENODEV;
- }
- at25->chip.byte_len = int_pow(2, id[7] - 0x21 + 4) * 1024;
-
- if (at25->chip.byte_len > 64 * 1024)
- at25->chip.flags |= EE_ADDR3;
+ /* Chip description */
+ pdata = dev_get_platdata(&spi->dev);
+ if (pdata) {
+ at25->chip = *pdata;
+ } else {
+ if (is_fram)
+ err = at25_fram_to_chip(&spi->dev, &at25->chip);
else
- at25->chip.flags |= EE_ADDR2;
-
- if (id[8]) {
- fm25_aux_read(at25, sernum, FM25_RDSN, FM25_SN_LEN);
- /* swap byte order */
- for (i = 0; i < FM25_SN_LEN; i++)
- at25->sernum[i] = sernum[FM25_SN_LEN - 1 - i];
- }
-
- at25->chip.page_size = PAGE_SIZE;
- strncpy(at25->chip.name, "fm25", sizeof(at25->chip.name));
+ err = at25_fw_to_chip(&spi->dev, &at25->chip);
+ if (err)
+ return err;
}
/* For now we only support 8/16/24 bit addressing */
diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c
index 8132a84698d5..3c0ae07a2d80 100644
--- a/drivers/misc/habanalabs/common/command_buffer.c
+++ b/drivers/misc/habanalabs/common/command_buffer.c
@@ -57,7 +57,7 @@ static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
}
va_block->start = virt_addr;
- va_block->end = virt_addr + page_size;
+ va_block->end = virt_addr + page_size - 1;
va_block->size = page_size;
list_add_tail(&va_block->node, &cb->va_block_list);
}
@@ -80,13 +80,13 @@ static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
offset += va_block->size;
}
- hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR);
+ rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV);
mutex_unlock(&ctx->mmu_lock);
cb->is_mmu_mapped = true;
- return 0;
+ return rc;
err_va_umap:
list_for_each_entry(va_block, &cb->va_block_list, node) {
@@ -97,7 +97,7 @@ err_va_umap:
offset -= va_block->size;
}
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
+ rc = hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
mutex_unlock(&ctx->mmu_lock);
@@ -126,7 +126,7 @@ static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
"Failed to unmap CB's va 0x%llx\n",
va_block->start);
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
+ hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
mutex_unlock(&ctx->mmu_lock);
@@ -250,8 +250,7 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
* Can't use generic function to check this because of special case
* where we create a CB as part of the reset process
*/
- if ((hdev->disabled) || ((atomic_read(&hdev->in_reset)) &&
- (ctx_id != HL_KERNEL_ASID_ID))) {
+ if ((hdev->disabled) || (hdev->reset_info.in_reset && (ctx_id != HL_KERNEL_ASID_ID))) {
dev_warn_ratelimited(hdev->dev,
"Device is disabled or in reset. Can't create new CBs\n");
rc = -EBUSY;
@@ -380,8 +379,9 @@ int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle)
}
static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr,
- u64 cb_handle, u32 *usage_cnt)
+ u64 cb_handle, u32 flags, u32 *usage_cnt, u64 *device_va)
{
+ struct hl_vm_va_block *va_block;
struct hl_cb *cb;
u32 handle;
int rc = 0;
@@ -402,7 +402,18 @@ static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr,
goto out;
}
- *usage_cnt = atomic_read(&cb->cs_cnt);
+ if (flags & HL_CB_FLAGS_GET_DEVICE_VA) {
+ va_block = list_first_entry(&cb->va_block_list, struct hl_vm_va_block, node);
+ if (va_block) {
+ *device_va = va_block->start;
+ } else {
+ dev_err(hdev->dev, "CB is not mapped to the device's MMU\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ } else {
+ *usage_cnt = atomic_read(&cb->cs_cnt);
+ }
out:
spin_unlock(&mgr->cb_lock);
@@ -414,7 +425,7 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
union hl_cb_args *args = data;
struct hl_device *hdev = hpriv->hdev;
enum hl_device_status status;
- u64 handle = 0;
+ u64 handle = 0, device_va;
u32 usage_cnt = 0;
int rc;
@@ -450,13 +461,20 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
case HL_CB_OP_INFO:
rc = hl_cb_info(hdev, &hpriv->cb_mgr, args->in.cb_handle,
- &usage_cnt);
- memset(args, 0, sizeof(*args));
- args->out.usage_cnt = usage_cnt;
+ args->in.flags,
+ &usage_cnt,
+ &device_va);
+
+ memset(&args->out, 0, sizeof(args->out));
+
+ if (args->in.flags & HL_CB_FLAGS_GET_DEVICE_VA)
+ args->out.device_va = device_va;
+ else
+ args->out.usage_cnt = usage_cnt;
break;
default:
- rc = -ENOTTY;
+ rc = -EINVAL;
break;
}
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index 4c8000fd246c..0a4ef13d9ac4 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -533,8 +533,8 @@ static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs)
mcs_compl->stream_master_qid_map)) {
/* extract the timestamp only of first completed CS */
if (!mcs_compl->timestamp)
- mcs_compl->timestamp =
- ktime_to_ns(fence->timestamp);
+ mcs_compl->timestamp = ktime_to_ns(fence->timestamp);
+
complete_all(&mcs_compl->completion);
/*
@@ -733,6 +733,14 @@ static void cs_timedout(struct work_struct *work)
hdev = cs->ctx->hdev;
+ /* Save only the first CS timeout parameters */
+ rc = atomic_cmpxchg(&hdev->last_error.cs_write_disable, 0, 1);
+ if (!rc) {
+ hdev->last_error.open_dev_timestamp = hdev->last_successful_open_ktime;
+ hdev->last_error.cs_timeout_timestamp = ktime_get();
+ hdev->last_error.cs_timeout_seq = cs->sequence;
+ }
+
switch (cs->type) {
case CS_TYPE_SIGNAL:
dev_err(hdev->dev,
@@ -767,9 +775,9 @@ static void cs_timedout(struct work_struct *work)
if (likely(!skip_reset_on_timeout)) {
if (hdev->reset_on_lockup)
- hl_device_reset(hdev, HL_RESET_TDR);
+ hl_device_reset(hdev, HL_DRV_RESET_TDR);
else
- hdev->needs_reset = true;
+ hdev->reset_info.needs_reset = true;
}
}
@@ -806,7 +814,7 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
cs->timeout_jiffies = timeout;
cs->skip_reset_on_timeout =
- hdev->skip_reset_on_timeout ||
+ hdev->reset_info.skip_reset_on_timeout ||
!!(flags & HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT);
cs->submission_time_jiffies = jiffies;
INIT_LIST_HEAD(&cs->job_list);
@@ -1131,9 +1139,6 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
enum hl_cs_type cs_type;
if (!hl_device_operational(hdev, &status)) {
- dev_warn_ratelimited(hdev->dev,
- "Device is %s. Can't submit new CS\n",
- hdev->status[status]);
return -EBUSY;
}
@@ -1262,7 +1267,8 @@ static u32 get_stream_master_qid_mask(struct hl_device *hdev, u32 qid)
static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
u32 num_chunks, u64 *cs_seq, u32 flags,
- u32 encaps_signals_handle, u32 timeout)
+ u32 encaps_signals_handle, u32 timeout,
+ u16 *signal_initial_sob_count)
{
bool staged_mid, int_queues_only = true;
struct hl_device *hdev = hpriv->hdev;
@@ -1429,6 +1435,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
goto free_cs_object;
}
+ *signal_initial_sob_count = cs->initial_sob_count;
+
rc = HL_CS_STATUS_SUCCESS;
goto put_cs;
@@ -1457,6 +1465,7 @@ static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
int rc = 0, do_ctx_switch;
void __user *chunks;
u32 num_chunks, tmp;
+ u16 sob_count;
int ret;
do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
@@ -1497,7 +1506,7 @@ static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
rc = 0;
} else {
rc = cs_ioctl_default(hpriv, chunks, num_chunks,
- cs_seq, 0, 0, hdev->timeout_jiffies);
+ cs_seq, 0, 0, hdev->timeout_jiffies, &sob_count);
}
mutex_unlock(&hpriv->restore_phase_mutex);
@@ -1813,6 +1822,9 @@ static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
}
handle->count = count;
+
+ hl_ctx_get(hdev, hpriv->ctx);
+ handle->ctx = hpriv->ctx;
mgr = &hpriv->ctx->sig_mgr;
spin_lock(&mgr->lock);
@@ -1822,7 +1834,7 @@ static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
if (hdl_id < 0) {
dev_err(hdev->dev, "Failed to allocate IDR for a new signal reservation\n");
rc = -EINVAL;
- goto out;
+ goto put_ctx;
}
handle->id = hdl_id;
@@ -1875,7 +1887,10 @@ remove_idr:
idr_remove(&mgr->handles, hdl_id);
spin_unlock(&mgr->lock);
+put_ctx:
+ hl_ctx_put(handle->ctx);
kfree(handle);
+
out:
return rc;
}
@@ -1935,6 +1950,7 @@ static int cs_ioctl_unreserve_signals(struct hl_fpriv *hpriv, u32 handle_id)
/* Release the id and free allocated memory of the handle */
idr_remove(&mgr->handles, handle_id);
+ hl_ctx_put(encaps_sig_hdl->ctx);
kfree(encaps_sig_hdl);
} else {
rc = -EINVAL;
@@ -1948,7 +1964,8 @@ out:
static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
void __user *chunks, u32 num_chunks,
- u64 *cs_seq, u32 flags, u32 timeout)
+ u64 *cs_seq, u32 flags, u32 timeout,
+ u32 *signal_sob_addr_offset, u16 *signal_initial_sob_count)
{
struct hl_cs_encaps_sig_handle *encaps_sig_hdl = NULL;
bool handle_found = false, is_wait_cs = false,
@@ -2180,6 +2197,9 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
goto free_cs_object;
}
+ *signal_sob_addr_offset = cs->sob_addr_offset;
+ *signal_initial_sob_count = cs->initial_sob_count;
+
rc = HL_CS_STATUS_SUCCESS;
if (is_wait_cs)
wait_cs_submitted = true;
@@ -2210,6 +2230,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
void __user *chunks;
u32 num_chunks, flags, timeout,
signals_count = 0, sob_addr = 0, handle_id = 0;
+ u16 sob_initial_count = 0;
int rc;
rc = hl_cs_sanity_checks(hpriv, args);
@@ -2240,7 +2261,8 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
case CS_TYPE_WAIT:
case CS_TYPE_COLLECTIVE_WAIT:
rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
- &cs_seq, args->in.cs_flags, timeout);
+ &cs_seq, args->in.cs_flags, timeout,
+ &sob_addr, &sob_initial_count);
break;
case CS_RESERVE_SIGNALS:
rc = cs_ioctl_reserve_signals(hpriv,
@@ -2256,20 +2278,33 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
args->in.cs_flags,
args->in.encaps_sig_handle_id,
- timeout);
+ timeout, &sob_initial_count);
break;
}
out:
if (rc != -EAGAIN) {
memset(args, 0, sizeof(*args));
- if (cs_type == CS_RESERVE_SIGNALS) {
+ switch (cs_type) {
+ case CS_RESERVE_SIGNALS:
args->out.handle_id = handle_id;
args->out.sob_base_addr_offset = sob_addr;
args->out.count = signals_count;
- } else {
+ break;
+ case CS_TYPE_SIGNAL:
+ args->out.sob_base_addr_offset = sob_addr;
+ args->out.sob_count_before_submission = sob_initial_count;
+ args->out.seq = cs_seq;
+ break;
+ case CS_TYPE_DEFAULT:
+ args->out.sob_count_before_submission = sob_initial_count;
args->out.seq = cs_seq;
+ break;
+ default:
+ args->out.seq = cs_seq;
+ break;
}
+
args->out.status = rc;
}
@@ -2334,16 +2369,18 @@ static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence
* hl_cs_poll_fences - iterate CS fences to check for CS completion
*
* @mcs_data: multi-CS internal data
+ * @mcs_compl: multi-CS completion structure
*
* @return 0 on success, otherwise non 0 error code
*
* The function iterates on all CS sequence in the list and set bit in
* completion_bitmap for each completed CS.
- * while iterating, the function can extracts the stream map to be later
- * used by the waiting function.
- * this function shall be called after taking context ref
+ * While iterating, the function sets the stream map of each fence in the fence
+ * array in the completion QID stream map to be used by CSs to perform
+ * completion to the multi-CS context.
+ * This function shall be called after taking context ref
*/
-static int hl_cs_poll_fences(struct multi_cs_data *mcs_data)
+static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_completion *mcs_compl)
{
struct hl_fence **fence_ptr = mcs_data->fence_arr;
struct hl_device *hdev = mcs_data->ctx->hdev;
@@ -2360,6 +2397,15 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data)
return rc;
/*
+ * re-initialize the completion here to handle 2 possible cases:
+ * 1. CS will complete the multi-CS prior clearing the completion. in which
+ * case the fence iteration is guaranteed to catch the CS completion.
+ * 2. the completion will occur after re-init of the completion.
+ * in which case we will wake up immediately in wait_for_completion.
+ */
+ reinit_completion(&mcs_compl->completion);
+
+ /*
* set to maximum time to verify timestamp is valid: if at the end
* this value is maintained- no timestamp was updated
*/
@@ -2370,6 +2416,21 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data)
struct hl_fence *fence = *fence_ptr;
/*
+ * In order to prevent case where we wait until timeout even though a CS associated
+ * with the multi-CS actually completed we do things in the below order:
+ * 1. for each fence set it's QID map in the multi-CS completion QID map. This way
+ * any CS can, potentially, complete the multi CS for the specific QID (note
+ * that once completion is initialized, calling complete* and then wait on the
+ * completion will cause it to return at once)
+ * 2. only after allowing multi-CS completion for the specific QID we check whether
+ * the specific CS already completed (and thus the wait for completion part will
+ * be skipped). if the CS not completed it is guaranteed that completing CS will
+ * wake up the completion.
+ */
+ if (fence)
+ mcs_compl->stream_master_qid_map |= fence->stream_master_qid_map;
+
+ /*
* function won't sleep as it is called with timeout 0 (i.e.
* poll the fence)
*/
@@ -2384,9 +2445,7 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data)
switch (status) {
case CS_WAIT_STATUS_BUSY:
- /* CS did not finished, keep waiting on its QID*/
- mcs_data->stream_master_qid_map |=
- fence->stream_master_qid_map;
+ /* CS did not finished, QID to wait on already stored */
break;
case CS_WAIT_STATUS_COMPLETED:
/*
@@ -2394,9 +2453,19 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data)
* returns to user indicating CS completed before it finished
* all of its mcs handling, to avoid race the next time the
* user waits for mcs.
+ * note: when reaching this case fence is definitely not NULL
+ * but NULL check was added to overcome static analysis
*/
- if (!fence->mcs_handling_done)
+ if (fence && !fence->mcs_handling_done) {
+ /*
+ * in case multi CS is completed but MCS handling not done
+ * we "complete" the multi CS to prevent it from waiting
+ * until time-out and the "multi-CS handling done" will have
+ * another chance at the next iteration
+ */
+ complete_all(&mcs_compl->completion);
break;
+ }
mcs_data->completion_bitmap |= BIT(i);
/*
@@ -2456,6 +2525,21 @@ static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
return rc;
}
+static inline unsigned long hl_usecs64_to_jiffies(const u64 usecs)
+{
+ if (usecs <= U32_MAX)
+ return usecs_to_jiffies(usecs);
+
+ /*
+ * If the value in nanoseconds is larger than 64 bit, use the largest
+ * 64 bit value.
+ */
+ if (usecs >= ((u64)(U64_MAX / NSEC_PER_USEC)))
+ return nsecs_to_jiffies(U64_MAX);
+
+ return nsecs_to_jiffies(usecs * NSEC_PER_USEC);
+}
+
/*
* hl_wait_multi_cs_completion_init - init completion structure
*
@@ -2469,9 +2553,7 @@ static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
* the function gets the first available completion (by marking it "used")
* and initialize its values.
*/
-static struct multi_cs_completion *hl_wait_multi_cs_completion_init(
- struct hl_device *hdev,
- u8 stream_master_bitmap)
+static struct multi_cs_completion *hl_wait_multi_cs_completion_init(struct hl_device *hdev)
{
struct multi_cs_completion *mcs_compl;
int i;
@@ -2483,8 +2565,11 @@ static struct multi_cs_completion *hl_wait_multi_cs_completion_init(
if (!mcs_compl->used) {
mcs_compl->used = 1;
mcs_compl->timestamp = 0;
- mcs_compl->stream_master_qid_map = stream_master_bitmap;
- reinit_completion(&mcs_compl->completion);
+ /*
+ * init QID map to 0 to avoid completion by CSs. the actual QID map
+ * to multi-CS CSs will be set incrementally at a later stage
+ */
+ mcs_compl->stream_master_qid_map = 0;
spin_unlock(&mcs_compl->lock);
break;
}
@@ -2492,8 +2577,7 @@ static struct multi_cs_completion *hl_wait_multi_cs_completion_init(
}
if (i == MULTI_CS_MAX_USER_CTX) {
- dev_err(hdev->dev,
- "no available multi-CS completion structure\n");
+ dev_err(hdev->dev, "no available multi-CS completion structure\n");
return ERR_PTR(-ENOMEM);
}
return mcs_compl;
@@ -2524,27 +2608,18 @@ static void hl_wait_multi_cs_completion_fini(
*
* @return 0 on success, otherwise non 0 error code
*/
-static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data)
+static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data,
+ struct multi_cs_completion *mcs_compl)
{
- struct hl_device *hdev = mcs_data->ctx->hdev;
- struct multi_cs_completion *mcs_compl;
long completion_rc;
- mcs_compl = hl_wait_multi_cs_completion_init(hdev,
- mcs_data->stream_master_qid_map);
- if (IS_ERR(mcs_compl))
- return PTR_ERR(mcs_compl);
-
- completion_rc = wait_for_completion_interruptible_timeout(
- &mcs_compl->completion,
- usecs_to_jiffies(mcs_data->timeout_us));
+ completion_rc = wait_for_completion_interruptible_timeout(&mcs_compl->completion,
+ mcs_data->timeout_jiffies);
/* update timestamp */
if (completion_rc > 0)
mcs_data->timestamp = mcs_compl->timestamp;
- hl_wait_multi_cs_completion_fini(mcs_compl);
-
mcs_data->wait_status = completion_rc;
return 0;
@@ -2577,6 +2652,7 @@ void hl_multi_cs_completion_init(struct hl_device *hdev)
*/
static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
{
+ struct multi_cs_completion *mcs_compl;
struct hl_device *hdev = hpriv->hdev;
struct multi_cs_data mcs_data = {0};
union hl_wait_cs_args *args = data;
@@ -2631,9 +2707,17 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
hl_ctx_get(hdev, ctx);
+ /* wait (with timeout) for the first CS to be completed */
+ mcs_data.timeout_jiffies = hl_usecs64_to_jiffies(args->in.timeout_us);
+ mcs_compl = hl_wait_multi_cs_completion_init(hdev);
+ if (IS_ERR(mcs_compl)) {
+ rc = PTR_ERR(mcs_compl);
+ goto put_ctx;
+ }
+
/* poll all CS fences, extract timestamp */
mcs_data.update_ts = true;
- rc = hl_cs_poll_fences(&mcs_data);
+ rc = hl_cs_poll_fences(&mcs_data, mcs_compl);
/*
* skip wait for CS completion when one of the below is true:
* - an error on the poll function
@@ -2641,34 +2725,39 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
* - the user called ioctl with timeout 0
*/
if (rc || mcs_data.completion_bitmap || !args->in.timeout_us)
- goto put_ctx;
+ goto completion_fini;
- /* wait (with timeout) for the first CS to be completed */
- mcs_data.timeout_us = args->in.timeout_us;
- rc = hl_wait_multi_cs_completion(&mcs_data);
- if (rc)
- goto put_ctx;
+ while (true) {
+ rc = hl_wait_multi_cs_completion(&mcs_data, mcs_compl);
+ if (rc || (mcs_data.wait_status == 0))
+ break;
- if (mcs_data.wait_status > 0) {
/*
* poll fences once again to update the CS map.
* no timestamp should be updated this time.
*/
mcs_data.update_ts = false;
- rc = hl_cs_poll_fences(&mcs_data);
+ rc = hl_cs_poll_fences(&mcs_data, mcs_compl);
+
+ if (mcs_data.completion_bitmap)
+ break;
/*
* if hl_wait_multi_cs_completion returned before timeout (i.e.
- * it got a completion) we expect to see at least one CS
- * completed after the poll function.
+ * it got a completion) it either got completed by CS in the multi CS list
+ * (in which case the indication will be non empty completion_bitmap) or it
+ * got completed by CS submitted to one of the shared stream master but
+ * not in the multi CS list (in which case we should wait again but modify
+ * the timeout and set timestamp as zero to let a CS related to the current
+ * multi-CS set a new, relevant, timestamp)
*/
- if (!mcs_data.completion_bitmap) {
- dev_warn_ratelimited(hdev->dev,
- "Multi-CS got completion on wait but no CS completed\n");
- rc = -EFAULT;
- }
+ mcs_data.timeout_jiffies = mcs_data.wait_status;
+ mcs_compl->timestamp = 0;
}
+completion_fini:
+ hl_wait_multi_cs_completion_fini(mcs_compl);
+
put_ctx:
hl_ctx_put(ctx);
kfree(fence_arr);
@@ -2699,7 +2788,7 @@ free_seq_arr:
}
/* update if some CS was gone */
- if (mcs_data.timestamp)
+ if (!mcs_data.timestamp)
args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
} else {
args->out.status = HL_WAIT_CS_STATUS_BUSY;
@@ -2766,37 +2855,129 @@ static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
}
static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
- u32 timeout_us, u64 user_address,
- u64 target_value, u16 interrupt_offset,
- enum hl_cs_wait_status *status,
+ struct hl_cb_mgr *cb_mgr, u64 timeout_us,
+ u64 cq_counters_handle, u64 cq_counters_offset,
+ u64 target_value, struct hl_user_interrupt *interrupt,
+ u32 *status,
u64 *timestamp)
{
struct hl_user_pending_interrupt *pend;
- struct hl_user_interrupt *interrupt;
unsigned long timeout, flags;
- u64 completion_value;
long completion_rc;
+ struct hl_cb *cb;
int rc = 0;
+ u32 handle;
- if (timeout_us == U32_MAX)
- timeout = timeout_us;
- else
- timeout = usecs_to_jiffies(timeout_us);
+ timeout = hl_usecs64_to_jiffies(timeout_us);
hl_ctx_get(hdev, ctx);
- pend = kmalloc(sizeof(*pend), GFP_KERNEL);
+ cq_counters_handle >>= PAGE_SHIFT;
+ handle = (u32) cq_counters_handle;
+
+ cb = hl_cb_get(hdev, cb_mgr, handle);
+ if (!cb) {
+ hl_ctx_put(ctx);
+ return -EINVAL;
+ }
+
+ pend = kzalloc(sizeof(*pend), GFP_KERNEL);
if (!pend) {
+ hl_cb_put(cb);
hl_ctx_put(ctx);
return -ENOMEM;
}
hl_fence_init(&pend->fence, ULONG_MAX);
- if (interrupt_offset == HL_COMMON_USER_INTERRUPT_ID)
- interrupt = &hdev->common_user_interrupt;
- else
- interrupt = &hdev->user_interrupt[interrupt_offset];
+ pend->cq_kernel_addr = (u64 *) cb->kernel_address + cq_counters_offset;
+ pend->cq_target_value = target_value;
+
+ /* We check for completion value as interrupt could have been received
+ * before we added the node to the wait list
+ */
+ if (*pend->cq_kernel_addr >= target_value) {
+ *status = HL_WAIT_CS_STATUS_COMPLETED;
+ /* There was no interrupt, we assume the completion is now. */
+ pend->fence.timestamp = ktime_get();
+ }
+
+ if (!timeout_us || (*status == HL_WAIT_CS_STATUS_COMPLETED))
+ goto set_timestamp;
+
+ /* Add pending user interrupt to relevant list for the interrupt
+ * handler to monitor
+ */
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+
+ /* Wait for interrupt handler to signal completion */
+ completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
+ timeout);
+ if (completion_rc > 0) {
+ *status = HL_WAIT_CS_STATUS_COMPLETED;
+ } else {
+ if (completion_rc == -ERESTARTSYS) {
+ dev_err_ratelimited(hdev->dev,
+ "user process got signal while waiting for interrupt ID %d\n",
+ interrupt->interrupt_id);
+ rc = -EINTR;
+ *status = HL_WAIT_CS_STATUS_ABORTED;
+ } else {
+ if (pend->fence.error == -EIO) {
+ dev_err_ratelimited(hdev->dev,
+ "interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n",
+ pend->fence.error);
+ rc = -EIO;
+ *status = HL_WAIT_CS_STATUS_ABORTED;
+ } else {
+ dev_err_ratelimited(hdev->dev, "Waiting for interrupt ID %d timedout\n",
+ interrupt->interrupt_id);
+ rc = -ETIMEDOUT;
+ }
+ *status = HL_WAIT_CS_STATUS_BUSY;
+ }
+ }
+
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ list_del(&pend->wait_list_node);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+
+set_timestamp:
+ *timestamp = ktime_to_ns(pend->fence.timestamp);
+
+ kfree(pend);
+ hl_cb_put(cb);
+ hl_ctx_put(ctx);
+
+ return rc;
+}
+
+static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_ctx *ctx,
+ u64 timeout_us, u64 user_address,
+ u64 target_value, struct hl_user_interrupt *interrupt,
+
+ u32 *status,
+ u64 *timestamp)
+{
+ struct hl_user_pending_interrupt *pend;
+ unsigned long timeout, flags;
+ u64 completion_value;
+ long completion_rc;
+ int rc = 0;
+
+ timeout = hl_usecs64_to_jiffies(timeout_us);
+
+ hl_ctx_get(hdev, ctx);
+
+ pend = kzalloc(sizeof(*pend), GFP_KERNEL);
+ if (!pend) {
+ hl_ctx_put(ctx);
+ return -ENOMEM;
+ }
+
+ hl_fence_init(&pend->fence, ULONG_MAX);
/* Add pending user interrupt to relevant list for the interrupt
* handler to monitor
@@ -2815,13 +2996,14 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
}
if (completion_value >= target_value) {
- *status = CS_WAIT_STATUS_COMPLETED;
+ *status = HL_WAIT_CS_STATUS_COMPLETED;
/* There was no interrupt, we assume the completion is now. */
pend->fence.timestamp = ktime_get();
- } else
- *status = CS_WAIT_STATUS_BUSY;
+ } else {
+ *status = HL_WAIT_CS_STATUS_BUSY;
+ }
- if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED))
+ if (!timeout_us || (*status == HL_WAIT_CS_STATUS_COMPLETED))
goto remove_pending_user_interrupt;
wait_again:
@@ -2850,7 +3032,13 @@ wait_again:
}
if (completion_value >= target_value) {
- *status = CS_WAIT_STATUS_COMPLETED;
+ *status = HL_WAIT_CS_STATUS_COMPLETED;
+ } else if (pend->fence.error) {
+ dev_err_ratelimited(hdev->dev,
+ "interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n",
+ pend->fence.error);
+ /* set the command completion status as ABORTED */
+ *status = HL_WAIT_CS_STATUS_ABORTED;
} else {
timeout = completion_rc;
goto wait_again;
@@ -2861,7 +3049,7 @@ wait_again:
interrupt->interrupt_id);
rc = -EINTR;
} else {
- *status = CS_WAIT_STATUS_BUSY;
+ *status = HL_WAIT_CS_STATUS_BUSY;
}
remove_pending_user_interrupt:
@@ -2879,11 +3067,12 @@ remove_pending_user_interrupt:
static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
{
- u16 interrupt_id, interrupt_offset, first_interrupt, last_interrupt;
+ u16 interrupt_id, first_interrupt, last_interrupt;
struct hl_device *hdev = hpriv->hdev;
struct asic_fixed_properties *prop;
+ struct hl_user_interrupt *interrupt;
union hl_wait_cs_args *args = data;
- enum hl_cs_wait_status status;
+ u32 status = HL_WAIT_CS_STATUS_BUSY;
u64 timestamp;
int rc;
@@ -2894,8 +3083,7 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
return -EPERM;
}
- interrupt_id =
- FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags);
+ interrupt_id = FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags);
first_interrupt = prop->first_available_user_msix_interrupt;
last_interrupt = prop->first_available_user_msix_interrupt +
@@ -2908,15 +3096,21 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
}
if (interrupt_id == HL_COMMON_USER_INTERRUPT_ID)
- interrupt_offset = HL_COMMON_USER_INTERRUPT_ID;
+ interrupt = &hdev->common_user_interrupt;
else
- interrupt_offset = interrupt_id - first_interrupt;
+ interrupt = &hdev->user_interrupt[interrupt_id - first_interrupt];
- rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx,
+ if (args->in.flags & HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ)
+ rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->cb_mgr,
+ args->in.interrupt_timeout_us, args->in.cq_counters_handle,
+ args->in.cq_counters_offset,
+ args->in.target, interrupt, &status,
+ &timestamp);
+ else
+ rc = _hl_interrupt_wait_ioctl_user_addr(hdev, hpriv->ctx,
args->in.interrupt_timeout_us, args->in.addr,
- args->in.target, interrupt_offset, &status,
+ args->in.target, interrupt, &status,
&timestamp);
-
if (rc) {
if (rc != -EINTR)
dev_err_ratelimited(hdev->dev,
@@ -2926,22 +3120,13 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
}
memset(args, 0, sizeof(*args));
+ args->out.status = status;
if (timestamp) {
args->out.timestamp_nsec = timestamp;
args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
}
- switch (status) {
- case CS_WAIT_STATUS_COMPLETED:
- args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
- break;
- case CS_WAIT_STATUS_BUSY:
- default:
- args->out.status = HL_WAIT_CS_STATUS_BUSY;
- break;
- }
-
return 0;
}
@@ -2955,7 +3140,7 @@ int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data)
* user interrupt
*/
if (!hl_device_operational(hpriv->hdev, NULL))
- return -EPERM;
+ return -EBUSY;
if (flags & HL_WAIT_CS_FLAGS_INTERRUPT)
rc = hl_interrupt_wait_ioctl(hpriv, data);
diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c
index d0aaccd4df2c..c6360e33bce8 100644
--- a/drivers/misc/habanalabs/common/context.c
+++ b/drivers/misc/habanalabs/common/context.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -13,13 +13,13 @@ void hl_encaps_handle_do_release(struct kref *ref)
{
struct hl_cs_encaps_sig_handle *handle =
container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
- struct hl_ctx *ctx = handle->hdev->compute_ctx;
- struct hl_encaps_signals_mgr *mgr = &ctx->sig_mgr;
+ struct hl_encaps_signals_mgr *mgr = &handle->ctx->sig_mgr;
spin_lock(&mgr->lock);
idr_remove(&mgr->handles, handle->id);
spin_unlock(&mgr->lock);
+ hl_ctx_put(handle->ctx);
kfree(handle);
}
@@ -27,8 +27,7 @@ static void hl_encaps_handle_do_release_sob(struct kref *ref)
{
struct hl_cs_encaps_sig_handle *handle =
container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
- struct hl_ctx *ctx = handle->hdev->compute_ctx;
- struct hl_encaps_signals_mgr *mgr = &ctx->sig_mgr;
+ struct hl_encaps_signals_mgr *mgr = &handle->ctx->sig_mgr;
/* if we're here, then there was a signals reservation but cs with
* encaps signals wasn't submitted, so need to put refcount
@@ -40,6 +39,7 @@ static void hl_encaps_handle_do_release_sob(struct kref *ref)
idr_remove(&mgr->handles, handle->id);
spin_unlock(&mgr->lock);
+ hl_ctx_put(handle->ctx);
kfree(handle);
}
@@ -97,11 +97,9 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
/* The engines are stopped as there is no executing CS, but the
* Coresight might be still working by accessing addresses
* related to the stopped engines. Hence stop it explicitly.
- * Stop only if this is the compute context, as there can be
- * only one compute context
*/
- if ((hdev->in_debug) && (hdev->compute_ctx == ctx))
- hl_device_set_debug_mode(hdev, false);
+ if (hdev->in_debug)
+ hl_device_set_debug_mode(hdev, ctx, false);
hdev->asic_funcs->ctx_fini(ctx);
hl_cb_va_pool_fini(ctx);
@@ -167,7 +165,7 @@ int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
hpriv->ctx = ctx;
/* TODO: remove the following line for multiple process support */
- hdev->compute_ctx = ctx;
+ hdev->is_compute_ctx_active = true;
return 0;
@@ -274,6 +272,27 @@ int hl_ctx_put(struct hl_ctx *ctx)
return kref_put(&ctx->refcount, hl_ctx_do_release);
}
+struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev)
+{
+ struct hl_ctx *ctx = NULL;
+ struct hl_fpriv *hpriv;
+
+ mutex_lock(&hdev->fpriv_list_lock);
+
+ list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
+ /* There can only be a single user which has opened the compute device, so exit
+ * immediately once we find him
+ */
+ ctx = hpriv->ctx;
+ hl_ctx_get(hdev, ctx);
+ break;
+ }
+
+ mutex_unlock(&hdev->fpriv_list_lock);
+
+ return ctx;
+}
+
/*
* hl_ctx_get_fence_locked - get CS fence under CS lock
*
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index 1f2a3dc6c4e2..fc084ee5106e 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -15,19 +15,25 @@
#define MMU_ADDR_BUF_SIZE 40
#define MMU_ASID_BUF_SIZE 10
#define MMU_KBUF_SIZE (MMU_ADDR_BUF_SIZE + MMU_ASID_BUF_SIZE)
+#define I2C_MAX_TRANSACTION_LEN 8
static struct dentry *hl_debug_root;
static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
- u8 i2c_reg, long *val)
+ u8 i2c_reg, u8 i2c_len, u64 *val)
{
struct cpucp_packet pkt;
- u64 result;
int rc;
if (!hl_device_operational(hdev, NULL))
return -EBUSY;
+ if (i2c_len > I2C_MAX_TRANSACTION_LEN) {
+ dev_err(hdev->dev, "I2C transaction length %u, exceeds maximum of %u\n",
+ i2c_len, I2C_MAX_TRANSACTION_LEN);
+ return -EINVAL;
+ }
+
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_RD <<
@@ -35,12 +41,10 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
pkt.i2c_bus = i2c_bus;
pkt.i2c_addr = i2c_addr;
pkt.i2c_reg = i2c_reg;
+ pkt.i2c_len = i2c_len;
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- 0, &result);
-
- *val = (long) result;
-
+ 0, val);
if (rc)
dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
@@ -48,7 +52,7 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
}
static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
- u8 i2c_reg, u32 val)
+ u8 i2c_reg, u8 i2c_len, u64 val)
{
struct cpucp_packet pkt;
int rc;
@@ -56,6 +60,12 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
if (!hl_device_operational(hdev, NULL))
return -EBUSY;
+ if (i2c_len > I2C_MAX_TRANSACTION_LEN) {
+ dev_err(hdev->dev, "I2C transaction length %u, exceeds maximum of %u\n",
+ i2c_len, I2C_MAX_TRANSACTION_LEN);
+ return -EINVAL;
+ }
+
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_WR <<
@@ -63,6 +73,7 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
pkt.i2c_bus = i2c_bus;
pkt.i2c_addr = i2c_addr;
pkt.i2c_reg = i2c_reg;
+ pkt.i2c_len = i2c_len;
pkt.value = cpu_to_le64(val);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
@@ -235,6 +246,8 @@ static int vm_show(struct seq_file *s, void *data)
struct hl_vm_hash_node *hnode;
struct hl_userptr *userptr;
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
+ struct hl_va_range *va_range;
+ struct hl_vm_va_block *va_block;
enum vm_type *vm_type;
bool once = true;
u64 j;
@@ -314,6 +327,25 @@ static int vm_show(struct seq_file *s, void *data)
spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
+ ctx = hl_get_compute_ctx(dev_entry->hdev);
+ if (ctx) {
+ seq_puts(s, "\nVA ranges:\n\n");
+ for (i = HL_VA_RANGE_TYPE_HOST ; i < HL_VA_RANGE_TYPE_MAX ; ++i) {
+ va_range = ctx->va_range[i];
+ seq_printf(s, " va_range %d\n", i);
+ seq_puts(s, "---------------------\n");
+ mutex_lock(&va_range->lock);
+ list_for_each_entry(va_block, &va_range->list, node) {
+ seq_printf(s, "%#16llx - %#16llx (%#llx)\n",
+ va_block->start, va_block->end,
+ va_block->size);
+ }
+ mutex_unlock(&va_range->lock);
+ seq_puts(s, "\n");
+ }
+ hl_ctx_put(ctx);
+ }
+
if (!once)
seq_puts(s, "\n");
@@ -407,7 +439,7 @@ static int mmu_show(struct seq_file *s, void *data)
if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID)
ctx = hdev->kernel_ctx;
else
- ctx = hdev->compute_ctx;
+ ctx = hl_get_compute_ctx(hdev);
if (!ctx) {
dev_err(hdev->dev, "no ctx available\n");
@@ -495,7 +527,7 @@ static int engines_show(struct seq_file *s, void *data)
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
- if (atomic_read(&hdev->in_reset)) {
+ if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev,
"Can't check device idle during reset\n");
return 0;
@@ -560,7 +592,7 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
u64 *phys_addr)
{
struct hl_vm_phys_pg_pack *phys_pg_pack;
- struct hl_ctx *ctx = hdev->compute_ctx;
+ struct hl_ctx *ctx;
struct hl_vm_hash_node *hnode;
u64 end_address, range_size;
struct hl_userptr *userptr;
@@ -568,6 +600,8 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
bool valid = false;
int i, rc = 0;
+ ctx = hl_get_compute_ctx(hdev);
+
if (!ctx) {
dev_err(hdev->dev, "no ctx available\n");
return -EINVAL;
@@ -624,7 +658,7 @@ static ssize_t hl_data_read32(struct file *f, char __user *buf,
ssize_t rc;
u32 val;
- if (atomic_read(&hdev->in_reset)) {
+ if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
return 0;
}
@@ -660,7 +694,7 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
u32 value;
ssize_t rc;
- if (atomic_read(&hdev->in_reset)) {
+ if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
return 0;
}
@@ -697,7 +731,7 @@ static ssize_t hl_data_read64(struct file *f, char __user *buf,
ssize_t rc;
u64 val;
- if (atomic_read(&hdev->in_reset)) {
+ if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
return 0;
}
@@ -733,7 +767,7 @@ static ssize_t hl_data_write64(struct file *f, const char __user *buf,
u64 value;
ssize_t rc;
- if (atomic_read(&hdev->in_reset)) {
+ if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
return 0;
}
@@ -768,7 +802,7 @@ static ssize_t hl_dma_size_write(struct file *f, const char __user *buf,
ssize_t rc;
u32 size;
- if (atomic_read(&hdev->in_reset)) {
+ if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev, "Can't DMA during reset\n");
return 0;
}
@@ -874,22 +908,22 @@ static ssize_t hl_i2c_data_read(struct file *f, char __user *buf,
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
char tmp_buf[32];
- long val;
+ u64 val;
ssize_t rc;
if (*ppos)
return 0;
rc = hl_debugfs_i2c_read(hdev, entry->i2c_bus, entry->i2c_addr,
- entry->i2c_reg, &val);
+ entry->i2c_reg, entry->i2c_len, &val);
if (rc) {
dev_err(hdev->dev,
- "Failed to read from I2C bus %d, addr %d, reg %d\n",
- entry->i2c_bus, entry->i2c_addr, entry->i2c_reg);
+ "Failed to read from I2C bus %d, addr %d, reg %d, len %d\n",
+ entry->i2c_bus, entry->i2c_addr, entry->i2c_reg, entry->i2c_len);
return rc;
}
- sprintf(tmp_buf, "0x%02lx\n", val);
+ sprintf(tmp_buf, "%#02llx\n", val);
rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
strlen(tmp_buf));
@@ -901,19 +935,19 @@ static ssize_t hl_i2c_data_write(struct file *f, const char __user *buf,
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
- u32 value;
+ u64 value;
ssize_t rc;
- rc = kstrtouint_from_user(buf, count, 16, &value);
+ rc = kstrtou64_from_user(buf, count, 16, &value);
if (rc)
return rc;
rc = hl_debugfs_i2c_write(hdev, entry->i2c_bus, entry->i2c_addr,
- entry->i2c_reg, value);
+ entry->i2c_reg, entry->i2c_len, value);
if (rc) {
dev_err(hdev->dev,
- "Failed to write 0x%02x to I2C bus %d, addr %d, reg %d\n",
- value, entry->i2c_bus, entry->i2c_addr, entry->i2c_reg);
+ "Failed to write %#02llx to I2C bus %d, addr %d, reg %d, len %d\n",
+ value, entry->i2c_bus, entry->i2c_addr, entry->i2c_reg, entry->i2c_len);
return rc;
}
@@ -1043,7 +1077,7 @@ static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
u64 value;
ssize_t rc;
- if (atomic_read(&hdev->in_reset)) {
+ if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev,
"Can't change clock gating during reset\n");
return 0;
@@ -1085,7 +1119,7 @@ static ssize_t hl_stop_on_err_write(struct file *f, const char __user *buf,
u32 value;
ssize_t rc;
- if (atomic_read(&hdev->in_reset)) {
+ if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev,
"Can't change stop on error during reset\n");
return 0;
@@ -1396,6 +1430,11 @@ void hl_debugfs_add_device(struct hl_device *hdev)
dev_entry->root,
&dev_entry->i2c_reg);
+ debugfs_create_u8("i2c_len",
+ 0644,
+ dev_entry->root,
+ &dev_entry->i2c_len);
+
debugfs_create_file("i2c_data",
0644,
dev_entry->root,
@@ -1458,7 +1497,7 @@ void hl_debugfs_add_device(struct hl_device *hdev)
debugfs_create_x8("skip_reset_on_timeout",
0644,
dev_entry->root,
- &hdev->skip_reset_on_timeout);
+ &hdev->reset_info.skip_reset_on_timeout);
debugfs_create_file("state_dump",
0600,
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index 2022e5d7b3ad..733338ab6f1d 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -17,9 +17,9 @@ enum hl_device_status hl_device_status(struct hl_device *hdev)
{
enum hl_device_status status;
- if (atomic_read(&hdev->in_reset))
+ if (hdev->reset_info.in_reset)
status = HL_DEVICE_STATUS_IN_RESET;
- else if (hdev->needs_reset)
+ else if (hdev->reset_info.needs_reset)
status = HL_DEVICE_STATUS_NEEDS_RESET;
else if (hdev->disabled)
status = HL_DEVICE_STATUS_MALFUNCTION;
@@ -95,14 +95,14 @@ static void hpriv_release(struct kref *ref)
if ((hdev->reset_if_device_not_idle && !device_is_idle)
|| hdev->reset_upon_device_release)
- hl_device_reset(hdev, HL_RESET_DEVICE_RELEASE);
+ hl_device_reset(hdev, HL_DRV_RESET_DEV_RELEASE);
- /* Now we can mark the compute_ctx as empty. Even if a reset is running in a different
+ /* Now we can mark the compute_ctx as not active. Even if a reset is running in a different
* thread, we don't care because the in_reset is marked so if a user will try to open
- * the device it will fail on that, even if compute_ctx is NULL.
+ * the device it will fail on that, even if compute_ctx is false.
*/
mutex_lock(&hdev->fpriv_list_lock);
- hdev->compute_ctx = NULL;
+ hdev->is_compute_ctx_active = false;
mutex_unlock(&hdev->fpriv_list_lock);
kfree(hpriv);
@@ -169,9 +169,9 @@ static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
goto out;
}
- mutex_lock(&hdev->fpriv_list_lock);
+ mutex_lock(&hdev->fpriv_ctrl_list_lock);
list_del(&hpriv->dev_node);
- mutex_unlock(&hdev->fpriv_list_lock);
+ mutex_unlock(&hdev->fpriv_ctrl_list_lock);
out:
put_pid(hpriv->taskpid);
@@ -324,16 +324,12 @@ put_devices:
static void device_hard_reset_pending(struct work_struct *work)
{
struct hl_device_reset_work *device_reset_work =
- container_of(work, struct hl_device_reset_work,
- reset_work.work);
+ container_of(work, struct hl_device_reset_work, reset_work.work);
struct hl_device *hdev = device_reset_work->hdev;
u32 flags;
int rc;
- flags = HL_RESET_HARD | HL_RESET_FROM_RESET_THREAD;
-
- if (device_reset_work->fw_reset)
- flags |= HL_RESET_FW;
+ flags = device_reset_work->flags | HL_DRV_RESET_FROM_RESET_THR;
rc = hl_device_reset(hdev, flags);
if ((rc == -EBUSY) && !hdev->device_fini_pending) {
@@ -452,9 +448,12 @@ static int device_early_init(struct hl_device *hdev)
mutex_init(&hdev->debug_lock);
INIT_LIST_HEAD(&hdev->cs_mirror_list);
spin_lock_init(&hdev->cs_mirror_lock);
+ spin_lock_init(&hdev->reset_info.lock);
INIT_LIST_HEAD(&hdev->fpriv_list);
+ INIT_LIST_HEAD(&hdev->fpriv_ctrl_list);
mutex_init(&hdev->fpriv_list_lock);
- atomic_set(&hdev->in_reset, 0);
+ mutex_init(&hdev->fpriv_ctrl_list_lock);
+ mutex_init(&hdev->clk_throttling.lock);
return 0;
@@ -494,6 +493,9 @@ static void device_early_fini(struct hl_device *hdev)
mutex_destroy(&hdev->send_cpu_message_lock);
mutex_destroy(&hdev->fpriv_list_lock);
+ mutex_destroy(&hdev->fpriv_ctrl_list_lock);
+
+ mutex_destroy(&hdev->clk_throttling.lock);
hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
@@ -513,22 +515,6 @@ static void device_early_fini(struct hl_device *hdev)
hdev->asic_funcs->early_fini(hdev);
}
-static void set_freq_to_low_job(struct work_struct *work)
-{
- struct hl_device *hdev = container_of(work, struct hl_device,
- work_freq.work);
-
- mutex_lock(&hdev->fpriv_list_lock);
-
- if (!hdev->compute_ctx)
- hl_device_set_frequency(hdev, PLL_LOW);
-
- mutex_unlock(&hdev->fpriv_list_lock);
-
- schedule_delayed_work(&hdev->work_freq,
- usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
-}
-
static void hl_device_heartbeat(struct work_struct *work)
{
struct hl_device *hdev = container_of(work, struct hl_device,
@@ -540,8 +526,10 @@ static void hl_device_heartbeat(struct work_struct *work)
if (!hdev->asic_funcs->send_heartbeat(hdev))
goto reschedule;
- dev_err(hdev->dev, "Device heartbeat failed!\n");
- hl_device_reset(hdev, HL_RESET_HARD | HL_RESET_HEARTBEAT);
+ if (hl_device_operational(hdev, NULL))
+ dev_err(hdev->dev, "Device heartbeat failed!\n");
+
+ hl_device_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_HEARTBEAT);
return;
@@ -552,12 +540,12 @@ reschedule:
* If control reached here, then at least one heartbeat work has been
* scheduled since last reset/init cycle.
* So if the device is not already in reset cycle, reset the flag
- * prev_reset_trigger as no reset occurred with HL_RESET_FW_FATAL_ERR
+ * prev_reset_trigger as no reset occurred with HL_DRV_RESET_FW_FATAL_ERR
* status for at least one heartbeat. From this point driver restarts
* tracking future consecutive fatal errors.
*/
- if (!(atomic_read(&hdev->in_reset)))
- hdev->prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
+ if (!hdev->reset_info.in_reset)
+ hdev->reset_info.prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
schedule_delayed_work(&hdev->work_heartbeat,
usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
@@ -586,18 +574,6 @@ static int device_late_init(struct hl_device *hdev)
hdev->high_pll = hdev->asic_prop.high_pll;
- /* force setting to low frequency */
- hdev->curr_pll_profile = PLL_LOW;
-
- if (hdev->pm_mng_profile == PM_AUTO)
- hdev->asic_funcs->set_pll_profile(hdev, PLL_LOW);
- else
- hdev->asic_funcs->set_pll_profile(hdev, PLL_LAST);
-
- INIT_DELAYED_WORK(&hdev->work_freq, set_freq_to_low_job);
- schedule_delayed_work(&hdev->work_freq,
- usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
-
if (hdev->heartbeat) {
INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
schedule_delayed_work(&hdev->work_heartbeat,
@@ -620,7 +596,6 @@ static void device_late_fini(struct hl_device *hdev)
if (!hdev->late_init_done)
return;
- cancel_delayed_work_sync(&hdev->work_freq);
if (hdev->heartbeat)
cancel_delayed_work_sync(&hdev->work_heartbeat);
@@ -650,36 +625,7 @@ int hl_device_utilization(struct hl_device *hdev, u32 *utilization)
return 0;
}
-/*
- * hl_device_set_frequency - set the frequency of the device
- *
- * @hdev: pointer to habanalabs device structure
- * @freq: the new frequency value
- *
- * Change the frequency if needed. This function has no protection against
- * concurrency, therefore it is assumed that the calling function has protected
- * itself against the case of calling this function from multiple threads with
- * different values
- *
- * Returns 0 if no change was done, otherwise returns 1
- */
-int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq)
-{
- if ((hdev->pm_mng_profile == PM_MANUAL) ||
- (hdev->curr_pll_profile == freq))
- return 0;
-
- dev_dbg(hdev->dev, "Changing device frequency to %s\n",
- freq == PLL_HIGH ? "high" : "low");
-
- hdev->asic_funcs->set_pll_profile(hdev, freq);
-
- hdev->curr_pll_profile = freq;
-
- return 1;
-}
-
-int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
+int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable)
{
int rc = 0;
@@ -693,12 +639,12 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
goto out;
}
- if (!hdev->hard_reset_pending)
- hdev->asic_funcs->halt_coresight(hdev);
+ if (!hdev->reset_info.hard_reset_pending)
+ hdev->asic_funcs->halt_coresight(hdev, ctx);
hdev->in_debug = 0;
- if (!hdev->hard_reset_pending)
+ if (!hdev->reset_info.hard_reset_pending)
hdev->asic_funcs->set_clock_gating(hdev);
goto out;
@@ -735,6 +681,8 @@ static void take_release_locks(struct hl_device *hdev)
/* Flush anyone that is inside device open */
mutex_lock(&hdev->fpriv_list_lock);
mutex_unlock(&hdev->fpriv_list_lock);
+ mutex_lock(&hdev->fpriv_ctrl_list_lock);
+ mutex_unlock(&hdev->fpriv_ctrl_list_lock);
}
static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset)
@@ -774,11 +722,14 @@ int hl_device_suspend(struct hl_device *hdev)
pci_save_state(hdev->pdev);
/* Block future CS/VM/JOB completion operations */
- rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
- if (rc) {
+ spin_lock(&hdev->reset_info.lock);
+ if (hdev->reset_info.in_reset) {
+ spin_unlock(&hdev->reset_info.lock);
dev_err(hdev->dev, "Can't suspend while in reset\n");
return -EIO;
}
+ hdev->reset_info.in_reset = 1;
+ spin_unlock(&hdev->reset_info.lock);
/* This blocks all other stuff that is not blocked by in_reset */
hdev->disabled = true;
@@ -828,10 +779,12 @@ int hl_device_resume(struct hl_device *hdev)
}
- hdev->disabled = false;
- atomic_set(&hdev->in_reset, 0);
+ /* 'in_reset' was set to true during suspend, now we must clear it in order
+ * for hard reset to be performed
+ */
+ hdev->reset_info.in_reset = 0;
- rc = hl_device_reset(hdev, HL_RESET_HARD);
+ rc = hl_device_reset(hdev, HL_DRV_RESET_HARD);
if (rc) {
dev_err(hdev->dev, "Failed to reset device during resume\n");
goto disable_device;
@@ -846,17 +799,21 @@ disable_device:
return rc;
}
-static int device_kill_open_processes(struct hl_device *hdev, u32 timeout)
+static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool control_dev)
{
- struct hl_fpriv *hpriv;
struct task_struct *task = NULL;
+ struct list_head *fd_list;
+ struct hl_fpriv *hpriv;
+ struct mutex *fd_lock;
u32 pending_cnt;
+ fd_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
+ fd_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
/* Giving time for user to close FD, and for processes that are inside
* hl_device_open to finish
*/
- if (!list_empty(&hdev->fpriv_list))
+ if (!list_empty(fd_list))
ssleep(1);
if (timeout) {
@@ -872,12 +829,12 @@ static int device_kill_open_processes(struct hl_device *hdev, u32 timeout)
}
}
- mutex_lock(&hdev->fpriv_list_lock);
+ mutex_lock(fd_lock);
/* This section must be protected because we are dereferencing
* pointers that are freed if the process exits
*/
- list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
+ list_for_each_entry(hpriv, fd_list, dev_node) {
task = get_pid_task(hpriv->taskpid, PIDTYPE_PID);
if (task) {
dev_info(hdev->dev, "Killing user process pid=%d\n",
@@ -889,12 +846,12 @@ static int device_kill_open_processes(struct hl_device *hdev, u32 timeout)
} else {
dev_warn(hdev->dev,
"Can't get task struct for PID so giving up on killing process\n");
- mutex_unlock(&hdev->fpriv_list_lock);
+ mutex_unlock(fd_lock);
return -ETIME;
}
}
- mutex_unlock(&hdev->fpriv_list_lock);
+ mutex_unlock(fd_lock);
/*
* We killed the open users, but that doesn't mean they are closed.
@@ -906,7 +863,7 @@ static int device_kill_open_processes(struct hl_device *hdev, u32 timeout)
*/
wait_for_processes:
- while ((!list_empty(&hdev->fpriv_list)) && (pending_cnt)) {
+ while ((!list_empty(fd_list)) && (pending_cnt)) {
dev_dbg(hdev->dev,
"Waiting for all unmap operations to finish before hard reset\n");
@@ -916,7 +873,7 @@ wait_for_processes:
}
/* All processes exited successfully */
- if (list_empty(&hdev->fpriv_list))
+ if (list_empty(fd_list))
return 0;
/* Give up waiting for processes to exit */
@@ -928,14 +885,19 @@ wait_for_processes:
return -EBUSY;
}
-static void device_disable_open_processes(struct hl_device *hdev)
+static void device_disable_open_processes(struct hl_device *hdev, bool control_dev)
{
+ struct list_head *fd_list;
struct hl_fpriv *hpriv;
+ struct mutex *fd_lock;
- mutex_lock(&hdev->fpriv_list_lock);
- list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node)
+ fd_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
+ fd_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
+
+ mutex_lock(fd_lock);
+ list_for_each_entry(hpriv, fd_list, dev_node)
hpriv->hdev = NULL;
- mutex_unlock(&hdev->fpriv_list_lock);
+ mutex_unlock(fd_lock);
}
static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
@@ -948,17 +910,17 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
* ('in_reset' makes sure of it). This makes sure that
* 'reset_cause' will continue holding its 1st recorded reason!
*/
- if (flags & HL_RESET_HEARTBEAT) {
- hdev->curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT;
- cur_reset_trigger = HL_RESET_HEARTBEAT;
- } else if (flags & HL_RESET_TDR) {
- hdev->curr_reset_cause = HL_RESET_CAUSE_TDR;
- cur_reset_trigger = HL_RESET_TDR;
- } else if (flags & HL_RESET_FW_FATAL_ERR) {
- hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
- cur_reset_trigger = HL_RESET_FW_FATAL_ERR;
+ if (flags & HL_DRV_RESET_HEARTBEAT) {
+ hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT;
+ cur_reset_trigger = HL_DRV_RESET_HEARTBEAT;
+ } else if (flags & HL_DRV_RESET_TDR) {
+ hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_TDR;
+ cur_reset_trigger = HL_DRV_RESET_TDR;
+ } else if (flags & HL_DRV_RESET_FW_FATAL_ERR) {
+ hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
+ cur_reset_trigger = HL_DRV_RESET_FW_FATAL_ERR;
} else {
- hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
+ hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
}
/*
@@ -966,11 +928,11 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
* is set and if this reset is due to a fatal FW error
* device is set to an unstable state.
*/
- if (hdev->prev_reset_trigger != cur_reset_trigger) {
- hdev->prev_reset_trigger = cur_reset_trigger;
- hdev->reset_trigger_repeated = 0;
+ if (hdev->reset_info.prev_reset_trigger != cur_reset_trigger) {
+ hdev->reset_info.prev_reset_trigger = cur_reset_trigger;
+ hdev->reset_info.reset_trigger_repeated = 0;
} else {
- hdev->reset_trigger_repeated = 1;
+ hdev->reset_info.reset_trigger_repeated = 1;
}
/* If reset is due to heartbeat, device CPU is no responsive in
@@ -979,8 +941,8 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
* If F/W is performing the reset, no need to send it a message to disable
* PCI access
*/
- if ((flags & HL_RESET_HARD) &&
- !(flags & (HL_RESET_HEARTBEAT | HL_RESET_FW))) {
+ if ((flags & HL_DRV_RESET_HARD) &&
+ !(flags & (HL_DRV_RESET_HEARTBEAT | HL_DRV_RESET_BYPASS_REQ_TO_FW))) {
/* Disable PCI access from device F/W so he won't send
* us additional interrupts. We disable MSI/MSI-X at
* the halt_engines function and we can't have the F/W
@@ -1015,34 +977,39 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
*/
int hl_device_reset(struct hl_device *hdev, u32 flags)
{
+ bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false,
+ reset_upon_device_release = false, schedule_hard_reset = false;
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
- bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false;
+ struct hl_ctx *ctx;
int i, rc;
if (!hdev->init_done) {
- dev_err(hdev->dev,
- "Can't reset before initialization is done\n");
+ dev_err(hdev->dev, "Can't reset before initialization is done\n");
return 0;
}
- hard_reset = !!(flags & HL_RESET_HARD);
- from_hard_reset_thread = !!(flags & HL_RESET_FROM_RESET_THREAD);
- fw_reset = !!(flags & HL_RESET_FW);
+ hard_reset = !!(flags & HL_DRV_RESET_HARD);
+ from_hard_reset_thread = !!(flags & HL_DRV_RESET_FROM_RESET_THR);
+ fw_reset = !!(flags & HL_DRV_RESET_BYPASS_REQ_TO_FW);
- if (!hard_reset && !hdev->supports_soft_reset) {
+ if (!hard_reset && !hdev->asic_prop.supports_soft_reset) {
hard_instead_soft = true;
hard_reset = true;
}
- if (hdev->reset_upon_device_release &&
- (flags & HL_RESET_DEVICE_RELEASE)) {
- dev_dbg(hdev->dev,
- "Perform %s-reset upon device release\n",
- hard_reset ? "hard" : "soft");
+ if (hdev->reset_upon_device_release && (flags & HL_DRV_RESET_DEV_RELEASE)) {
+ if (hard_reset) {
+ dev_crit(hdev->dev,
+ "Aborting reset because hard-reset is mutually exclusive with reset-on-device-release\n");
+ return -EINVAL;
+ }
+
+ reset_upon_device_release = true;
+
goto do_reset;
}
- if (!hard_reset && !hdev->allow_inference_soft_reset) {
+ if (!hard_reset && !hdev->asic_prop.allow_inference_soft_reset) {
hard_instead_soft = true;
hard_reset = true;
}
@@ -1062,12 +1029,22 @@ do_reset:
*/
if (!from_hard_reset_thread) {
/* Block future CS/VM/JOB completion operations */
- rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
- if (rc)
+ spin_lock(&hdev->reset_info.lock);
+ if (hdev->reset_info.in_reset) {
+ /* We only allow scheduling of a hard reset during soft reset */
+ if (hard_reset && hdev->reset_info.is_in_soft_reset)
+ hdev->reset_info.hard_reset_schedule_flags = flags;
+ spin_unlock(&hdev->reset_info.lock);
return 0;
+ }
+ hdev->reset_info.in_reset = 1;
+ spin_unlock(&hdev->reset_info.lock);
handle_reset_trigger(hdev, flags);
+ /* This still allows the completion of some KDMA ops */
+ hdev->reset_info.is_in_soft_reset = !hard_reset;
+
/* This also blocks future CS/VM/JOB completion operations */
hdev->disabled = true;
@@ -1075,21 +1052,19 @@ do_reset:
if (hard_reset)
dev_info(hdev->dev, "Going to reset device\n");
- else if (flags & HL_RESET_DEVICE_RELEASE)
- dev_info(hdev->dev,
- "Going to reset device after it was released by user\n");
+ else if (reset_upon_device_release)
+ dev_info(hdev->dev, "Going to reset device after release by user\n");
else
- dev_info(hdev->dev,
- "Going to reset compute engines of inference device\n");
+ dev_info(hdev->dev, "Going to reset engines of inference device\n");
}
again:
if ((hard_reset) && (!from_hard_reset_thread)) {
- hdev->hard_reset_pending = true;
+ hdev->reset_info.hard_reset_pending = true;
hdev->process_kill_trial_cnt = 0;
- hdev->device_reset_work.fw_reset = fw_reset;
+ hdev->device_reset_work.flags = flags;
/*
* Because the reset function can't run from heartbeat work,
@@ -1109,7 +1084,7 @@ kill_processes:
* process can't really exit until all its CSs are done, which
* is what we do in cs rollback
*/
- rc = device_kill_open_processes(hdev, 0);
+ rc = device_kill_open_processes(hdev, 0, false);
if (rc == -EBUSY) {
if (hdev->device_fini_pending) {
@@ -1138,7 +1113,7 @@ kill_processes:
hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset);
if (hard_reset) {
- hdev->fw_loader.linux_loaded = false;
+ hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
/* Release kernel context */
if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
@@ -1154,24 +1129,23 @@ kill_processes:
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
hl_cq_reset(hdev, &hdev->completion_queue[i]);
- mutex_lock(&hdev->fpriv_list_lock);
-
/* Make sure the context switch phase will run again */
- if (hdev->compute_ctx) {
- atomic_set(&hdev->compute_ctx->thread_ctx_switch_token, 1);
- hdev->compute_ctx->thread_ctx_switch_wait_token = 0;
+ ctx = hl_get_compute_ctx(hdev);
+ if (ctx) {
+ atomic_set(&ctx->thread_ctx_switch_token, 1);
+ ctx->thread_ctx_switch_wait_token = 0;
+ hl_ctx_put(ctx);
}
- mutex_unlock(&hdev->fpriv_list_lock);
-
/* Finished tear-down, starting to re-initialize */
if (hard_reset) {
hdev->device_cpu_disabled = false;
- hdev->hard_reset_pending = false;
+ hdev->reset_info.hard_reset_pending = false;
- if (hdev->reset_trigger_repeated &&
- (hdev->prev_reset_trigger == HL_RESET_FW_FATAL_ERR)) {
+ if (hdev->reset_info.reset_trigger_repeated &&
+ (hdev->reset_info.prev_reset_trigger ==
+ HL_DRV_RESET_FW_FATAL_ERR)) {
/* if there 2 back to back resets from FW,
* ensure driver puts the driver in a unusable state
*/
@@ -1204,7 +1178,7 @@ kill_processes:
goto out_err;
}
- hdev->compute_ctx = NULL;
+ hdev->is_compute_ctx_active = false;
rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
if (rc) {
@@ -1225,16 +1199,14 @@ kill_processes:
rc = hdev->asic_funcs->hw_init(hdev);
if (rc) {
- dev_err(hdev->dev,
- "failed to initialize the H/W after reset\n");
+ dev_err(hdev->dev, "failed to initialize the H/W after reset\n");
goto out_err;
}
/* If device is not idle fail the reset process */
if (!hdev->asic_funcs->is_device_idle(hdev, idle_mask,
HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) {
- dev_err(hdev->dev,
- "device is not idle (mask 0x%llx_%llx) after reset\n",
+ dev_err(hdev->dev, "device is not idle (mask 0x%llx_%llx) after reset\n",
idle_mask[1], idle_mask[0]);
rc = -EIO;
goto out_err;
@@ -1243,43 +1215,56 @@ kill_processes:
/* Check that the communication with the device is working */
rc = hdev->asic_funcs->test_queues(hdev);
if (rc) {
- dev_err(hdev->dev,
- "Failed to detect if device is alive after reset\n");
+ dev_err(hdev->dev, "Failed to detect if device is alive after reset\n");
goto out_err;
}
if (hard_reset) {
rc = device_late_init(hdev);
if (rc) {
- dev_err(hdev->dev,
- "Failed late init after hard reset\n");
+ dev_err(hdev->dev, "Failed late init after hard reset\n");
goto out_err;
}
rc = hl_vm_init(hdev);
if (rc) {
- dev_err(hdev->dev,
- "Failed to init memory module after hard reset\n");
+ dev_err(hdev->dev, "Failed to init memory module after hard reset\n");
goto out_err;
}
hl_set_max_power(hdev);
} else {
- rc = hdev->asic_funcs->soft_reset_late_init(hdev);
+ rc = hdev->asic_funcs->non_hard_reset_late_init(hdev);
if (rc) {
- dev_err(hdev->dev,
- "Failed late init after soft reset\n");
+ if (reset_upon_device_release)
+ dev_err(hdev->dev,
+ "Failed late init in reset after device release\n");
+ else
+ dev_err(hdev->dev, "Failed late init after soft reset\n");
goto out_err;
}
}
- atomic_set(&hdev->in_reset, 0);
- hdev->needs_reset = false;
+ spin_lock(&hdev->reset_info.lock);
+ hdev->reset_info.is_in_soft_reset = false;
+
+ /* Schedule hard reset only if requested and if not already in hard reset.
+ * We keep 'in_reset' enabled, so no other reset can go in during the hard
+ * reset schedule
+ */
+ if (!hard_reset && hdev->reset_info.hard_reset_schedule_flags)
+ schedule_hard_reset = true;
+ else
+ hdev->reset_info.in_reset = 0;
+
+ spin_unlock(&hdev->reset_info.lock);
+
+ hdev->reset_info.needs_reset = false;
dev_notice(hdev->dev, "Successfully finished resetting the device\n");
if (hard_reset) {
- hdev->hard_reset_cnt++;
+ hdev->reset_info.hard_reset_cnt++;
/* After reset is done, we are ready to receive events from
* the F/W. We can't do it before because we will ignore events
@@ -1287,28 +1272,41 @@ kill_processes:
* the device will be operational although it shouldn't be
*/
hdev->asic_funcs->enable_events_from_fw(hdev);
- } else {
- hdev->soft_reset_cnt++;
+ } else if (!reset_upon_device_release) {
+ hdev->reset_info.soft_reset_cnt++;
+ }
+
+ if (schedule_hard_reset) {
+ dev_info(hdev->dev, "Performing hard reset scheduled during soft reset\n");
+ flags = hdev->reset_info.hard_reset_schedule_flags;
+ hdev->reset_info.hard_reset_schedule_flags = 0;
+ hdev->disabled = true;
+ hard_reset = true;
+ handle_reset_trigger(hdev, flags);
+ goto again;
}
return 0;
out_err:
hdev->disabled = true;
+ hdev->reset_info.is_in_soft_reset = false;
if (hard_reset) {
- dev_err(hdev->dev,
- "Failed to reset! Device is NOT usable\n");
- hdev->hard_reset_cnt++;
+ dev_err(hdev->dev, "Failed to reset! Device is NOT usable\n");
+ hdev->reset_info.hard_reset_cnt++;
+ } else if (reset_upon_device_release) {
+ dev_err(hdev->dev, "Failed to reset device after user release\n");
+ hard_reset = true;
+ goto again;
} else {
- dev_err(hdev->dev,
- "Failed to do soft-reset, trying hard reset\n");
- hdev->soft_reset_cnt++;
+ dev_err(hdev->dev, "Failed to do soft-reset\n");
+ hdev->reset_info.soft_reset_cnt++;
hard_reset = true;
goto again;
}
- atomic_set(&hdev->in_reset, 0);
+ hdev->reset_info.in_reset = 0;
return rc;
}
@@ -1455,7 +1453,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
goto mmu_fini;
}
- hdev->compute_ctx = NULL;
+ hdev->is_compute_ctx_active = false;
hdev->asic_funcs->state_dump_init(hdev);
@@ -1619,6 +1617,7 @@ out_disabled:
*/
void hl_device_fini(struct hl_device *hdev)
{
+ bool device_in_reset;
ktime_t timeout;
u64 reset_sec;
int i, rc;
@@ -1642,10 +1641,22 @@ void hl_device_fini(struct hl_device *hdev)
*/
timeout = ktime_add_us(ktime_get(), reset_sec * 1000 * 1000);
- rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
- while (rc) {
+
+ spin_lock(&hdev->reset_info.lock);
+ device_in_reset = !!hdev->reset_info.in_reset;
+ if (!device_in_reset)
+ hdev->reset_info.in_reset = 1;
+ spin_unlock(&hdev->reset_info.lock);
+
+ while (device_in_reset) {
usleep_range(50, 200);
- rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
+
+ spin_lock(&hdev->reset_info.lock);
+ device_in_reset = !!hdev->reset_info.in_reset;
+ if (!device_in_reset)
+ hdev->reset_info.in_reset = 1;
+ spin_unlock(&hdev->reset_info.lock);
+
if (ktime_compare(ktime_get(), timeout) > 0) {
dev_crit(hdev->dev,
"Failed to remove device because reset function did not finish\n");
@@ -1667,7 +1678,7 @@ void hl_device_fini(struct hl_device *hdev)
take_release_locks(hdev);
- hdev->hard_reset_pending = true;
+ hdev->reset_info.hard_reset_pending = true;
hl_hwmon_fini(hdev);
@@ -1681,10 +1692,16 @@ void hl_device_fini(struct hl_device *hdev)
"Waiting for all processes to exit (timeout of %u seconds)",
HL_PENDING_RESET_LONG_SEC);
- rc = device_kill_open_processes(hdev, HL_PENDING_RESET_LONG_SEC);
+ rc = device_kill_open_processes(hdev, HL_PENDING_RESET_LONG_SEC, false);
if (rc) {
dev_crit(hdev->dev, "Failed to kill all open processes\n");
- device_disable_open_processes(hdev);
+ device_disable_open_processes(hdev, false);
+ }
+
+ rc = device_kill_open_processes(hdev, 0, true);
+ if (rc) {
+ dev_crit(hdev->dev, "Failed to kill all control device open processes\n");
+ device_disable_open_processes(hdev, true);
}
hl_cb_pool_fini(hdev);
@@ -1692,7 +1709,7 @@ void hl_device_fini(struct hl_device *hdev)
/* Reset the H/W. It will be in idle state after this returns */
hdev->asic_funcs->hw_fini(hdev, true, false);
- hdev->fw_loader.linux_loaded = false;
+ hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
/* Release kernel context */
if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 4e68fb9d2a6b..6775c5c3166b 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -15,8 +15,6 @@
#define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */
-#define FW_CPU_STATUS_POLL_INTERVAL_USEC 10000
-
static char *extract_fw_ver_from_str(const char *fw_str)
{
char *str, *fw_ver, *whitespace;
@@ -214,7 +212,8 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct cpucp_packet *pkt;
dma_addr_t pkt_dma_addr;
- u32 tmp, expected_ack_val;
+ struct hl_bd *sent_bd;
+ u32 tmp, expected_ack_val, pi;
int rc = 0;
pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
@@ -239,6 +238,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
/* set fence to a non valid value */
pkt->fence = cpu_to_le32(UINT_MAX);
+ pi = queue->pi;
/*
* The CPU queue is a synchronous queue with an effective depth of
@@ -248,7 +248,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
* Which means that we don't need to lock the access to the entire H/W
* queues module when submitting a JOB to the CPU queue.
*/
- hl_hw_queue_submit_bd(hdev, queue, 0, len, pkt_dma_addr);
+ hl_hw_queue_submit_bd(hdev, queue, hl_queue_inc_ptr(queue->pi), len, pkt_dma_addr);
if (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN)
expected_ack_val = queue->pi;
@@ -280,6 +280,14 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
*result = le64_to_cpu(pkt->result);
}
+ /* Scrub previous buffer descriptor 'ctl' field which contains the
+ * previous PI value written during packet submission.
+ * We must do this or else F/W can read an old value upon queue wraparound.
+ */
+ sent_bd = queue->kernel_address;
+ sent_bd += hl_pi_2_offset(pi);
+ sent_bd->ctl = cpu_to_le32(UINT_MAX);
+
out:
mutex_unlock(&hdev->send_cpu_message_lock);
@@ -445,15 +453,6 @@ static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
err_exists = true;
}
- if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) {
- dev_warn(hdev->dev,
- "Device boot warning - Skipped DRAM initialization\n");
- /* This is a warning so we don't want it to disable the
- * device
- */
- err_val &= ~CPU_BOOT_ERR0_DRAM_SKIPPED;
- }
-
if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED) {
if (hdev->bmc_enable) {
dev_err(hdev->dev,
@@ -497,15 +496,6 @@ static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
err_exists = true;
}
- if (err_val & CPU_BOOT_ERR0_PRI_IMG_VER_FAIL) {
- dev_warn(hdev->dev,
- "Device boot warning - Failed to load preboot primary image\n");
- /* This is a warning so we don't want it to disable the
- * device as we have a secondary preboot image
- */
- err_val &= ~CPU_BOOT_ERR0_PRI_IMG_VER_FAIL;
- }
-
if (err_val & CPU_BOOT_ERR0_SEC_IMG_VER_FAIL) {
dev_err(hdev->dev, "Device boot error - Failed to load preboot secondary image\n");
err_exists = true;
@@ -525,6 +515,34 @@ static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
if (sts_val & CPU_BOOT_DEV_STS0_ENABLED)
dev_dbg(hdev->dev, "Device status0 %#x\n", sts_val);
+ /* All warnings should go here in order not to reach the unknown error validation */
+ if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) {
+ dev_warn(hdev->dev,
+ "Device boot warning - Skipped DRAM initialization\n");
+ /* This is a warning so we don't want it to disable the
+ * device
+ */
+ err_val &= ~CPU_BOOT_ERR0_DRAM_SKIPPED;
+ }
+
+ if (err_val & CPU_BOOT_ERR0_PRI_IMG_VER_FAIL) {
+ dev_warn(hdev->dev,
+ "Device boot warning - Failed to load preboot primary image\n");
+ /* This is a warning so we don't want it to disable the
+ * device as we have a secondary preboot image
+ */
+ err_val &= ~CPU_BOOT_ERR0_PRI_IMG_VER_FAIL;
+ }
+
+ if (err_val & CPU_BOOT_ERR0_TPM_FAIL) {
+ dev_warn(hdev->dev,
+ "Device boot warning - TPM failure\n");
+ /* This is a warning so we don't want it to disable the
+ * device
+ */
+ err_val &= ~CPU_BOOT_ERR0_TPM_FAIL;
+ }
+
if (!err_exists && (err_val & ~CPU_BOOT_ERR0_ENABLED)) {
dev_err(hdev->dev,
"Device boot error - unknown ERR0 error 0x%08x\n", err_val);
@@ -961,6 +979,7 @@ int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power)
pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.type = cpu_to_le16(CPUCP_POWER_INPUT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
@@ -974,6 +993,92 @@ int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power)
return rc;
}
+int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
+ struct cpucp_hbm_row_info *info)
+{
+ struct cpucp_hbm_row_info *cpucp_repl_rows_info_cpu_addr;
+ dma_addr_t cpucp_repl_rows_info_dma_addr;
+ struct cpucp_packet pkt = {};
+ u64 result;
+ int rc;
+
+ cpucp_repl_rows_info_cpu_addr =
+ hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
+ sizeof(struct cpucp_hbm_row_info),
+ &cpucp_repl_rows_info_dma_addr);
+ if (!cpucp_repl_rows_info_cpu_addr) {
+ dev_err(hdev->dev,
+ "Failed to allocate DMA memory for CPU-CP replaced rows info packet\n");
+ return -ENOMEM;
+ }
+
+ memset(cpucp_repl_rows_info_cpu_addr, 0, sizeof(struct cpucp_hbm_row_info));
+
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.addr = cpu_to_le64(cpucp_repl_rows_info_dma_addr);
+ pkt.data_max_size = cpu_to_le32(sizeof(struct cpucp_hbm_row_info));
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_CPUCP_INFO_TIMEOUT_USEC, &result);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to handle CPU-CP replaced rows info pkt, error %d\n", rc);
+ goto out;
+ }
+
+ memcpy(info, cpucp_repl_rows_info_cpu_addr, sizeof(*info));
+
+out:
+ hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
+ sizeof(struct cpucp_hbm_row_info),
+ cpucp_repl_rows_info_cpu_addr);
+
+ return rc;
+}
+
+int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num)
+{
+ struct cpucp_packet pkt;
+ u64 result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_HBM_PENDING_ROWS_STATUS << CPUCP_PKT_CTL_OPCODE_SHIFT);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to handle CPU-CP pending rows info pkt, error %d\n", rc);
+ goto out;
+ }
+
+ *pend_rows_num = (u32) result;
+out:
+ return rc;
+}
+
+int hl_fw_cpucp_engine_core_asid_set(struct hl_device *hdev, u32 asid)
+{
+ struct cpucp_packet pkt;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_ENGINE_CORE_ASID_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.value = cpu_to_le64(asid);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_CPUCP_INFO_TIMEOUT_USEC, NULL);
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed on ASID configuration request for engine core, error %d\n",
+ rc);
+
+ return rc;
+}
+
void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev)
{
struct static_fw_load_mgr *static_loader =
@@ -1028,7 +1133,7 @@ static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
switch (status) {
case CPU_BOOT_STATUS_NA:
dev_err(hdev->dev,
- "Device boot progress - BTL did NOT run\n");
+ "Device boot progress - BTL/ROM did NOT run\n");
break;
case CPU_BOOT_STATUS_IN_WFE:
dev_err(hdev->dev,
@@ -1101,9 +1206,8 @@ static int hl_fw_read_preboot_caps(struct hl_device *hdev,
(status == CPU_BOOT_STATUS_DRAM_RDY) ||
(status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
- (status == CPU_BOOT_STATUS_SRAM_AVAIL) ||
(status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT),
- FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ hdev->fw_poll_interval_usec,
timeout);
if (rc) {
@@ -1250,8 +1354,7 @@ static void hl_fw_preboot_update_state(struct hl_device *hdev)
* 3. FW application - a. Fetch fw application security status
* b. Check whether hard reset is done by fw app
*/
- prop->hard_reset_done_by_fw =
- !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
+ prop->hard_reset_done_by_fw = !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
dev_dbg(hdev->dev, "Firmware preboot boot device status0 %#x\n",
cpu_boot_dev_sts0);
@@ -1287,11 +1390,7 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
{
int rc;
- /* pldm was added for cases in which we use preboot on pldm and want
- * to load boot fit, but we can't wait for preboot because it runs
- * very slowly
- */
- if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU) || hdev->pldm)
+ if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
return 0;
/*
@@ -1437,7 +1536,7 @@ static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
le32_to_cpu(dyn_regs->cpu_cmd_status_to_host),
status,
FIELD_GET(COMMS_STATUS_STATUS_MASK, status) == expected_status,
- FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ hdev->fw_poll_interval_usec,
timeout);
if (rc) {
@@ -1703,6 +1802,9 @@ static int hl_fw_dynamic_validate_descriptor(struct hl_device *hdev,
return rc;
}
+ /* here we can mark the descriptor as valid as the content has been validated */
+ fw_loader->dynamic_loader.fw_desc_valid = true;
+
return 0;
}
@@ -1759,7 +1861,13 @@ static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev,
return rc;
}
- /* extract address copy the descriptor from */
+ /*
+ * extract address to copy the descriptor from
+ * in addition, as the descriptor value is going to be over-ridden by new data- we mark it
+ * as invalid.
+ * it will be marked again as valid once validated
+ */
+ fw_loader->dynamic_loader.fw_desc_valid = false;
src = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
response->ram_offset;
memcpy_fromio(fw_desc, src, sizeof(struct lkd_fw_comms_desc));
@@ -1920,17 +2028,15 @@ static void hl_fw_boot_fit_update_state(struct hl_device *hdev,
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- /* Clear reset status since we need to read it again from boot CPU */
- prop->hard_reset_done_by_fw = false;
+ hdev->fw_loader.fw_comp_loaded |= FW_TYPE_BOOT_CPU;
/* Read boot_cpu status bits */
if (prop->fw_preboot_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_ENABLED) {
prop->fw_bootfit_cpu_boot_dev_sts0 =
RREG32(cpu_boot_dev_sts0_reg);
- if (prop->fw_bootfit_cpu_boot_dev_sts0 &
- CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
- prop->hard_reset_done_by_fw = true;
+ prop->hard_reset_done_by_fw = !!(prop->fw_bootfit_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
dev_dbg(hdev->dev, "Firmware boot CPU status0 %#x\n",
prop->fw_bootfit_cpu_boot_dev_sts0);
@@ -2055,14 +2161,21 @@ static int hl_fw_dynamic_wait_for_boot_fit_active(struct hl_device *hdev,
dyn_loader = &fw_loader->dynamic_loader;
- /* Make sure CPU boot-loader is running */
+ /*
+ * Make sure CPU boot-loader is running
+ * Note that the CPU_BOOT_STATUS_SRAM_AVAIL is generally set by Linux
+ * yet there is a debug scenario in which we loading uboot (without Linux)
+ * which at later stage is relocated to DRAM. In this case we expect
+ * uboot to set the CPU_BOOT_STATUS_SRAM_AVAIL and so we add it to the
+ * poll flags
+ */
rc = hl_poll_timeout(
hdev,
le32_to_cpu(dyn_loader->comm_desc.cpu_dyn_regs.cpu_boot_status),
status,
- (status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
- (status == CPU_BOOT_STATUS_READY_TO_BOOT),
- FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ (status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
+ (status == CPU_BOOT_STATUS_SRAM_AVAIL),
+ hdev->fw_poll_interval_usec,
dyn_loader->wait_for_bl_timeout);
if (rc) {
dev_err(hdev->dev, "failed to wait for boot\n");
@@ -2082,14 +2195,14 @@ static int hl_fw_dynamic_wait_for_linux_active(struct hl_device *hdev,
dyn_loader = &fw_loader->dynamic_loader;
- /* Make sure CPU boot-loader is running */
+ /* Make sure CPU linux is running */
rc = hl_poll_timeout(
hdev,
le32_to_cpu(dyn_loader->comm_desc.cpu_dyn_regs.cpu_boot_status),
status,
(status == CPU_BOOT_STATUS_SRAM_AVAIL),
- FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ hdev->fw_poll_interval_usec,
fw_loader->cpu_timeout);
if (rc) {
dev_err(hdev->dev, "failed to wait for Linux\n");
@@ -2121,18 +2234,14 @@ static void hl_fw_linux_update_state(struct hl_device *hdev,
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- hdev->fw_loader.linux_loaded = true;
-
- /* Clear reset status since we need to read again from app */
- prop->hard_reset_done_by_fw = false;
+ hdev->fw_loader.fw_comp_loaded |= FW_TYPE_LINUX;
/* Read FW application security bits */
if (prop->fw_cpu_boot_dev_sts0_valid) {
prop->fw_app_cpu_boot_dev_sts0 = RREG32(cpu_boot_dev_sts0_reg);
- if (prop->fw_app_cpu_boot_dev_sts0 &
- CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
- prop->hard_reset_done_by_fw = true;
+ prop->hard_reset_done_by_fw = !!(prop->fw_app_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
if (prop->fw_app_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN)
@@ -2247,6 +2356,9 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
dev_info(hdev->dev,
"Loading firmware to device, may take some time...\n");
+ /* initialize FW descriptor as invalid */
+ fw_loader->dynamic_loader.fw_desc_valid = false;
+
/*
* In this stage, "cpu_dyn_regs" contains only LKD's hard coded values!
* It will be updated from FW after hl_fw_dynamic_request_descriptor().
@@ -2259,14 +2371,14 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
if (rc)
goto protocol_err;
- if (hdev->curr_reset_cause) {
+ if (hdev->reset_info.curr_reset_cause) {
rc = hl_fw_dynamic_send_msg(hdev, fw_loader,
- HL_COMMS_RESET_CAUSE_TYPE, &hdev->curr_reset_cause);
+ HL_COMMS_RESET_CAUSE_TYPE, &hdev->reset_info.curr_reset_cause);
if (rc)
goto protocol_err;
/* Clear current reset cause */
- hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
+ hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
}
if (!(hdev->fw_components & FW_TYPE_BOOT_CPU)) {
@@ -2288,6 +2400,15 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
goto protocol_err;
}
+ /*
+ * when testing FW load (without Linux) on PLDM we don't want to
+ * wait until boot fit is active as it may take several hours.
+ * instead, we load the bootfit and let it do all initializations in
+ * the background.
+ */
+ if (hdev->pldm && !(hdev->fw_components & FW_TYPE_LINUX))
+ return 0;
+
rc = hl_fw_dynamic_wait_for_boot_fit_active(hdev, fw_loader);
if (rc)
goto protocol_err;
@@ -2333,7 +2454,8 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
return 0;
protocol_err:
- fw_read_errors(hdev, le32_to_cpu(dyn_regs->cpu_boot_err0),
+ if (fw_loader->dynamic_loader.fw_desc_valid)
+ fw_read_errors(hdev, le32_to_cpu(dyn_regs->cpu_boot_err0),
le32_to_cpu(dyn_regs->cpu_boot_err1),
le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
@@ -2380,7 +2502,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev,
cpu_boot_status_reg,
status,
status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT,
- FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ hdev->fw_poll_interval_usec,
fw_loader->boot_fit_timeout);
if (rc) {
@@ -2403,7 +2525,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev,
cpu_msg_status_reg,
status,
status == CPU_MSG_OK,
- FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ hdev->fw_poll_interval_usec,
fw_loader->boot_fit_timeout);
if (rc) {
@@ -2416,7 +2538,14 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev,
WREG32(msg_to_cpu_reg, KMD_MSG_NA);
}
- /* Make sure CPU boot-loader is running */
+ /*
+ * Make sure CPU boot-loader is running
+ * Note that the CPU_BOOT_STATUS_SRAM_AVAIL is generally set by Linux
+ * yet there is a debug scenario in which we loading uboot (without Linux)
+ * which at later stage is relocated to DRAM. In this case we expect
+ * uboot to set the CPU_BOOT_STATUS_SRAM_AVAIL and so we add it to the
+ * poll flags
+ */
rc = hl_poll_timeout(
hdev,
cpu_boot_status_reg,
@@ -2425,7 +2554,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev,
(status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
(status == CPU_BOOT_STATUS_SRAM_AVAIL),
- FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ hdev->fw_poll_interval_usec,
cpu_timeout);
dev_dbg(hdev->dev, "uboot status = %d\n", status);
@@ -2474,7 +2603,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev,
cpu_boot_status_reg,
status,
(status == CPU_BOOT_STATUS_BMC_WAITING_SKIPPED),
- FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ hdev->fw_poll_interval_usec,
cpu_timeout);
if (rc) {
@@ -2494,7 +2623,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev,
cpu_boot_status_reg,
status,
(status == CPU_BOOT_STATUS_SRAM_AVAIL),
- FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ hdev->fw_poll_interval_usec,
cpu_timeout);
/* Clear message */
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index a2002cbf794b..cb710fd478b6 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -61,6 +61,8 @@
#define HL_CPUCP_INFO_TIMEOUT_USEC 10000000 /* 10s */
#define HL_CPUCP_EEPROM_TIMEOUT_USEC 10000000 /* 10s */
+#define HL_FW_STATUS_POLL_INTERVAL_USEC 10000 /* 10ms */
+
#define HL_PCI_ELBI_TIMEOUT_MSEC 10 /* 10ms */
#define HL_SIM_MAX_TIMEOUT_US 10000000 /* 10s */
@@ -117,37 +119,37 @@ enum hl_mmu_page_table_location {
/*
* Reset Flags
*
- * - HL_RESET_HARD
+ * - HL_DRV_RESET_HARD
* If set do hard reset to all engines. If not set reset just
* compute/DMA engines.
*
- * - HL_RESET_FROM_RESET_THREAD
+ * - HL_DRV_RESET_FROM_RESET_THR
* Set if the caller is the hard-reset thread
*
- * - HL_RESET_HEARTBEAT
+ * - HL_DRV_RESET_HEARTBEAT
* Set if reset is due to heartbeat
*
- * - HL_RESET_TDR
+ * - HL_DRV_RESET_TDR
* Set if reset is due to TDR
*
- * - HL_RESET_DEVICE_RELEASE
+ * - HL_DRV_RESET_DEV_RELEASE
* Set if reset is due to device release
*
- * - HL_RESET_FW
+ * - HL_DRV_RESET_BYPASS_REQ_TO_FW
* F/W will perform the reset. No need to ask it to reset the device. This is relevant
* only when running with secured f/w
*
- * - HL_RESET_FW_FATAL_ERR
+ * - HL_DRV_RESET_FW_FATAL_ERR
* Set if reset is due to a fatal error from FW
*/
-#define HL_RESET_HARD (1 << 0)
-#define HL_RESET_FROM_RESET_THREAD (1 << 1)
-#define HL_RESET_HEARTBEAT (1 << 2)
-#define HL_RESET_TDR (1 << 3)
-#define HL_RESET_DEVICE_RELEASE (1 << 4)
-#define HL_RESET_FW (1 << 5)
-#define HL_RESET_FW_FATAL_ERR (1 << 6)
+#define HL_DRV_RESET_HARD (1 << 0)
+#define HL_DRV_RESET_FROM_RESET_THR (1 << 1)
+#define HL_DRV_RESET_HEARTBEAT (1 << 2)
+#define HL_DRV_RESET_TDR (1 << 3)
+#define HL_DRV_RESET_DEV_RELEASE (1 << 4)
+#define HL_DRV_RESET_BYPASS_REQ_TO_FW (1 << 5)
+#define HL_DRV_RESET_FW_FATAL_ERR (1 << 6)
#define HL_MAX_SOBS_PER_MONITOR 8
@@ -219,6 +221,7 @@ enum hl_fw_component {
/**
* enum hl_fw_types - F/W types present in the system
+ * @FW_TYPE_NONE: no FW component indication
* @FW_TYPE_LINUX: Linux image for device CPU
* @FW_TYPE_BOOT_CPU: Boot image for device CPU
* @FW_TYPE_PREBOOT_CPU: Indicates pre-loaded CPUs are present in the system
@@ -226,6 +229,7 @@ enum hl_fw_component {
* @FW_TYPE_ALL_TYPES: Mask for all types
*/
enum hl_fw_types {
+ FW_TYPE_NONE = 0x0,
FW_TYPE_LINUX = 0x1,
FW_TYPE_BOOT_CPU = 0x2,
FW_TYPE_PREBOOT_CPU = 0x4,
@@ -353,6 +357,21 @@ enum vm_type {
};
/**
+ * enum mmu_op_flags - mmu operation relevant information.
+ * @MMU_OP_USERPTR: operation on user memory (host resident).
+ * @MMU_OP_PHYS_PACK: operation on DRAM (device resident).
+ * @MMU_OP_CLEAR_MEMCACHE: operation has to clear memcache.
+ * @MMU_OP_SKIP_LOW_CACHE_INV: operation is allowed to skip parts of cache invalidation.
+ */
+enum mmu_op_flags {
+ MMU_OP_USERPTR = 0x1,
+ MMU_OP_PHYS_PACK = 0x2,
+ MMU_OP_CLEAR_MEMCACHE = 0x4,
+ MMU_OP_SKIP_LOW_CACHE_INV = 0x8,
+};
+
+
+/**
* enum hl_device_hw_state - H/W device state. use this to understand whether
* to do reset before hw_init or not
* @HL_DEVICE_HW_STATE_CLEAN: H/W state is clean. i.e. after hard reset
@@ -382,6 +401,7 @@ enum hl_device_hw_state {
* @hop3_mask: mask to get the PTE address in hop 3.
* @hop4_mask: mask to get the PTE address in hop 4.
* @hop5_mask: mask to get the PTE address in hop 5.
+ * @last_mask: mask to get the bit indicating this is the last hop.
* @page_size: default page size used to allocate memory.
* @num_hops: The amount of hops supported by the translation table.
* @host_resident: Should the MMU page table reside in host memory or in the
@@ -402,6 +422,7 @@ struct hl_mmu_properties {
u64 hop3_mask;
u64 hop4_mask;
u64 hop5_mask;
+ u64 last_mask;
u32 page_size;
u32 num_hops;
u8 host_resident;
@@ -524,6 +545,15 @@ struct hl_hints_range {
* @dynamic_fw_load: is dynamic FW load is supported.
* @gic_interrupts_enable: true if FW is not blocking GIC controller,
* false otherwise.
+ * @use_get_power_for_reset_history: To support backward compatibility for Goya
+ * and Gaudi
+ * @supports_soft_reset: is soft reset supported.
+ * @allow_inference_soft_reset: true if the ASIC supports soft reset that is
+ * initiated by user or TDR. This is only true
+ * in inference ASICs, as there is no real-world
+ * use-case of doing soft-reset in training (due
+ * to the fact that training runs on multiple
+ * devices)
*/
struct asic_fixed_properties {
struct hw_queue_properties *hw_queues_props;
@@ -604,6 +634,9 @@ struct asic_fixed_properties {
u8 iatu_done_by_fw;
u8 dynamic_fw_load;
u8 gic_interrupts_enable;
+ u8 use_get_power_for_reset_history;
+ u8 supports_soft_reset;
+ u8 allow_inference_soft_reset;
};
/**
@@ -852,10 +885,15 @@ struct hl_user_interrupt {
* pending on an interrupt
* @wait_list_node: node in the list of user threads pending on an interrupt
* @fence: hl fence object for interrupt completion
+ * @cq_target_value: CQ target value
+ * @cq_kernel_addr: CQ kernel address, to be used in the cq interrupt
+ * handler for taget value comparison
*/
struct hl_user_pending_interrupt {
struct list_head wait_list_node;
struct hl_fence fence;
+ u64 cq_target_value;
+ u64 *cq_kernel_addr;
};
/**
@@ -1010,6 +1048,7 @@ struct fw_response {
* @image_region: region to copy the FW image to
* @fw_image_size: size of FW image to load
* @wait_for_bl_timeout: timeout for waiting for boot loader to respond
+ * @fw_desc_valid: true if FW descriptor has been validated and hence the data can be used
*/
struct dynamic_fw_load_mgr {
struct fw_response response;
@@ -1017,6 +1056,7 @@ struct dynamic_fw_load_mgr {
struct pci_mem_region *image_region;
size_t fw_image_size;
u32 wait_for_bl_timeout;
+ bool fw_desc_valid;
};
/**
@@ -1042,7 +1082,8 @@ struct fw_image_props {
* @skip_bmc: should BMC be skipped
* @sram_bar_id: SRAM bar ID
* @dram_bar_id: DRAM bar ID
- * @linux_loaded: true if linux was loaded so far
+ * @fw_comp_loaded: bitmask of loaded FW components. set bit meaning loaded
+ * component. values are set according to enum hl_fw_types.
*/
struct fw_load_mgr {
union {
@@ -1056,7 +1097,7 @@ struct fw_load_mgr {
u8 skip_bmc;
u8 sram_bar_id;
u8 dram_bar_id;
- u8 linux_loaded;
+ u8 fw_comp_loaded;
};
/**
@@ -1128,7 +1169,7 @@ struct fw_load_mgr {
* @disable_clock_gating: disable clock gating completely
* @debug_coresight: perform certain actions on Coresight for debugging.
* @is_device_idle: return true if device is idle, false otherwise.
- * @soft_reset_late_init: perform certain actions needed after soft reset.
+ * @non_hard_reset_late_init: perform certain actions needed after a reset which is not hard-reset
* @hw_queues_lock: acquire H/W queues lock.
* @hw_queues_unlock: release H/W queues lock.
* @get_pci_id: retrieve PCI ID.
@@ -1261,10 +1302,10 @@ struct hl_asic_funcs {
int (*send_heartbeat)(struct hl_device *hdev);
void (*set_clock_gating)(struct hl_device *hdev);
void (*disable_clock_gating)(struct hl_device *hdev);
- int (*debug_coresight)(struct hl_device *hdev, void *data);
+ int (*debug_coresight)(struct hl_device *hdev, struct hl_ctx *ctx, void *data);
bool (*is_device_idle)(struct hl_device *hdev, u64 *mask_arr,
u8 mask_len, struct seq_file *s);
- int (*soft_reset_late_init)(struct hl_device *hdev);
+ int (*non_hard_reset_late_init)(struct hl_device *hdev);
void (*hw_queues_lock)(struct hl_device *hdev);
void (*hw_queues_unlock)(struct hl_device *hdev);
u32 (*get_pci_id)(struct hl_device *hdev);
@@ -1276,7 +1317,7 @@ struct hl_asic_funcs {
int (*init_iatu)(struct hl_device *hdev);
u32 (*rreg)(struct hl_device *hdev, u32 reg);
void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
- void (*halt_coresight)(struct hl_device *hdev);
+ void (*halt_coresight)(struct hl_device *hdev, struct hl_ctx *ctx);
int (*ctx_init)(struct hl_ctx *ctx);
void (*ctx_fini)(struct hl_ctx *ctx);
int (*get_clk_rate)(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
@@ -1518,6 +1559,9 @@ struct hl_userptr {
* @submission_time_jiffies: submission time of the cs
* @type: CS_TYPE_*.
* @encaps_sig_hdl_id: encaps signals handle id, set for the first staged cs.
+ * @sob_addr_offset: sob offset from the configuration base address.
+ * @initial_sob_count: count of completed signals in SOB before current submission of signal or
+ * cs with encaps signals.
* @submitted: true if CS was submitted to H/W.
* @completed: true if CS was completed by device.
* @timedout : true if CS was timedout.
@@ -1553,6 +1597,8 @@ struct hl_cs {
u64 submission_time_jiffies;
enum hl_cs_type type;
u32 encaps_sig_hdl_id;
+ u32 sob_addr_offset;
+ u16 initial_sob_count;
u8 submitted;
u8 completed;
u8 timedout;
@@ -1792,7 +1838,6 @@ struct hl_debug_params {
* @dev_node: node in the device list of file private data
* @refcount: number of related contexts.
* @restore_phase_mutex: lock for context switch and restore phase.
- * @is_control: true for control device, false otherwise
*/
struct hl_fpriv {
struct hl_device *hdev;
@@ -1805,7 +1850,6 @@ struct hl_fpriv {
struct list_head dev_node;
struct kref refcount;
struct mutex restore_phase_mutex;
- u8 is_control;
};
@@ -1864,6 +1908,7 @@ struct hl_debugfs_entry {
* @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
* @i2c_addr: generic u8 debugfs file for address value to use in i2c_data_read.
* @i2c_reg: generic u8 debugfs file for register value to use in i2c_data_read.
+ * @i2c_len: generic u8 debugfs file for length value to use in i2c_data_read.
*/
struct hl_dbg_device_entry {
struct dentry *root;
@@ -1892,6 +1937,7 @@ struct hl_dbg_device_entry {
u8 i2c_bus;
u8 i2c_addr;
u8 i2c_reg;
+ u8 i2c_len;
};
/**
@@ -2180,13 +2226,13 @@ struct hwmon_chip_info;
* @wq: work queue for device reset procedure.
* @reset_work: reset work to be done.
* @hdev: habanalabs device structure.
- * @fw_reset: whether f/w will do the reset without us sending them a message to do it.
+ * @flags: reset flags.
*/
struct hl_device_reset_work {
struct workqueue_struct *wq;
struct delayed_work reset_work;
struct hl_device *hdev;
- bool fw_reset;
+ u32 flags;
};
/**
@@ -2328,12 +2374,10 @@ struct multi_cs_completion {
* @ctx: pointer to the context structure
* @fence_arr: array of fences of all CSs
* @seq_arr: array of CS sequence numbers
- * @timeout_us: timeout in usec for waiting for CS to complete
+ * @timeout_jiffies: timeout in jiffies for waiting for CS to complete
* @timestamp: timestamp of first completed CS
* @wait_status: wait for CS status
* @completion_bitmap: bitmap of completed CSs (1- completed, otherwise 0)
- * @stream_master_qid_map: bitmap of all stream master QIDs on which the
- * multi-CS is waiting
* @arr_len: fence_arr and seq_arr array length
* @gone_cs: indication of gone CS (1- there was gone CS, otherwise 0)
* @update_ts: update timestamp. 1- update the timestamp, otherwise 0.
@@ -2342,17 +2386,114 @@ struct multi_cs_data {
struct hl_ctx *ctx;
struct hl_fence **fence_arr;
u64 *seq_arr;
- s64 timeout_us;
+ s64 timeout_jiffies;
s64 timestamp;
long wait_status;
u32 completion_bitmap;
- u32 stream_master_qid_map;
u8 arr_len;
u8 gone_cs;
u8 update_ts;
};
/**
+ * struct hl_clk_throttle_timestamp - current/last clock throttling timestamp
+ * @start: timestamp taken when 'start' event is received in driver
+ * @end: timestamp taken when 'end' event is received in driver
+ */
+struct hl_clk_throttle_timestamp {
+ ktime_t start;
+ ktime_t end;
+};
+
+/**
+ * struct hl_clk_throttle - keeps current/last clock throttling timestamps
+ * @timestamp: timestamp taken by driver and firmware, index 0 refers to POWER
+ * index 1 refers to THERMAL
+ * @lock: protects this structure as it can be accessed from both event queue
+ * context and info_ioctl context
+ * @current_reason: bitmask represents the current clk throttling reasons
+ * @aggregated_reason: bitmask represents aggregated clk throttling reasons since driver load
+ */
+struct hl_clk_throttle {
+ struct hl_clk_throttle_timestamp timestamp[HL_CLK_THROTTLE_TYPE_MAX];
+ struct mutex lock;
+ u32 current_reason;
+ u32 aggregated_reason;
+};
+
+/**
+ * struct last_error_session_info - info about last session in which CS timeout or
+ * razwi error occurred.
+ * @open_dev_timestamp: device open timestamp.
+ * @cs_timeout_timestamp: CS timeout timestamp.
+ * @razwi_timestamp: razwi timestamp.
+ * @cs_write_disable: if set writing to CS parameters in the structure is disabled so the
+ * first (root cause) CS timeout will not be overwritten.
+ * @razwi_write_disable: if set writing to razwi parameters in the structure is disabled so the
+ * first (root cause) razwi will not be overwritten.
+ * @cs_timeout_seq: CS timeout sequence number.
+ * @razwi_addr: address that caused razwi.
+ * @razwi_engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does
+ * not have engine id it will be set to U16_MAX.
+ * @razwi_engine_id_2: second engine id of razwi initiator. Might happen that razwi have 2 possible
+ * engines which one them caused the razwi. In that case, it will contain the
+ * second possible engine id, otherwise it will be set to U16_MAX.
+ * @razwi_non_engine_initiator: in case the initiator of the razwi does not have engine id.
+ * @razwi_type: cause of razwi, page fault or access error, otherwise it will be set to U8_MAX.
+ */
+struct last_error_session_info {
+ ktime_t open_dev_timestamp;
+ ktime_t cs_timeout_timestamp;
+ ktime_t razwi_timestamp;
+ atomic_t cs_write_disable;
+ atomic_t razwi_write_disable;
+ u64 cs_timeout_seq;
+ u64 razwi_addr;
+ u16 razwi_engine_id_1;
+ u16 razwi_engine_id_2;
+ u8 razwi_non_engine_initiator;
+ u8 razwi_type;
+};
+
+/**
+ * struct hl_reset_info - holds current device reset information.
+ * @lock: lock to protect critical reset flows.
+ * @soft_reset_cnt: number of soft reset since the driver was loaded.
+ * @hard_reset_cnt: number of hard reset since the driver was loaded.
+ * @hard_reset_schedule_flags: hard reset is scheduled to after current soft reset,
+ * here we hold the hard reset flags.
+ * @in_reset: is device in reset flow.
+ * @is_in_soft_reset: Device is currently in soft reset process.
+ * @needs_reset: true if reset_on_lockup is false and device should be reset
+ * due to lockup.
+ * @hard_reset_pending: is there a hard reset work pending.
+ * @curr_reset_cause: saves an enumerated reset cause when a hard reset is
+ * triggered, and cleared after it is shared with preboot.
+ * @prev_reset_trigger: saves the previous trigger which caused a reset, overidden
+ * with a new value on next reset
+ * @reset_trigger_repeated: set if device reset is triggered more than once with
+ * same cause.
+ * @skip_reset_on_timeout: Skip device reset if CS has timed out, wait for it to
+ * complete instead.
+ */
+struct hl_reset_info {
+ spinlock_t lock;
+ u32 soft_reset_cnt;
+ u32 hard_reset_cnt;
+ u32 hard_reset_schedule_flags;
+ u8 in_reset;
+ u8 is_in_soft_reset;
+ u8 needs_reset;
+ u8 hard_reset_pending;
+
+ u8 curr_reset_cause;
+ u8 prev_reset_trigger;
+ u8 reset_trigger_repeated;
+
+ u8 skip_reset_on_timeout;
+};
+
+/**
* struct hl_device - habanalabs device structure.
* @pdev: pointer to PCI device, can be NULL in case of simulator device.
* @pcie_bar_phys: array of available PCIe bars physical addresses.
@@ -2363,7 +2504,6 @@ struct multi_cs_data {
* @cdev_ctrl: char device for control operations only (INFO IOCTL)
* @dev: related kernel basic device structure.
* @dev_ctrl: related kernel device structure for the control device
- * @work_freq: delayed work to lower device frequency if possible.
* @work_heartbeat: delayed work for CPU-CP is-alive check.
* @device_reset_work: delayed work which performs hard reset
* @asic_name: ASIC specific name.
@@ -2398,7 +2538,6 @@ struct multi_cs_data {
* @asic_specific: ASIC specific information to use only from ASIC files.
* @vm: virtual memory manager for MMU.
* @hwmon_dev: H/W monitor device.
- * @pm_mng_profile: current power management profile.
* @hl_chip_info: ASIC's sensors information.
* @device_status_description: device status description.
* @hl_debugfs: device's debugfs manager.
@@ -2410,8 +2549,10 @@ struct multi_cs_data {
* @internal_cb_va_base: internal cb pool mmu virtual address base
* @fpriv_list: list of file private data structures. Each structure is created
* when a user opens the device
+ * @fpriv_ctrl_list: list of file private data structures. Each structure is created
+ * when a user opens the control device
* @fpriv_list_lock: protects the fpriv_list
- * @compute_ctx: current compute context executing.
+ * @fpriv_ctrl_list_lock: protects the fpriv_ctrl_list
* @aggregated_cs_counters: aggregated cs counters among all contexts
* @mmu_priv: device-specific MMU data.
* @mmu_func: device-related MMU functions.
@@ -2419,6 +2560,10 @@ struct multi_cs_data {
* @pci_mem_region: array of memory regions in the PCI
* @state_dump_specs: constants and dictionaries needed to dump system state.
* @multi_cs_completion: array of multi-CS completion.
+ * @clk_throttling: holds information about current/previous clock throttling events
+ * @reset_info: holds current device reset information.
+ * @last_error: holds information about last session in which CS timeout or razwi error occurred.
+ * @stream_master_qid_arr: pointer to array with QIDs of master streams.
* @dram_used_mem: current DRAM memory consumption.
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
@@ -2434,20 +2579,17 @@ struct multi_cs_data {
* device initialization. Mainly used to debug and
* workaround firmware bugs
* @dram_pci_bar_start: start bus address of PCIe bar towards DRAM.
+ * @last_successful_open_ktime: timestamp (ktime) of the last successful device open.
* @last_successful_open_jif: timestamp (jiffies) of the last successful
* device open.
* @last_open_session_duration_jif: duration (jiffies) of the last device open
* session.
* @open_counter: number of successful device open operations.
- * @in_reset: is device in reset flow.
- * @curr_pll_profile: current PLL profile.
+ * @fw_poll_interval_usec: FW status poll interval in usec.
* @card_type: Various ASICs have several card types. This indicates the card
* type of the current device.
* @major: habanalabs kernel driver major.
* @high_pll: high PLL profile frequency.
- * @soft_reset_cnt: number of soft reset since the driver was loaded.
- * @hard_reset_cnt: number of hard reset since the driver was loaded.
- * @clk_throttling_reason: bitmask represents the current clk throttling reasons
* @id: device minor.
* @id_control: minor of the control device
* @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit
@@ -2455,7 +2597,6 @@ struct multi_cs_data {
* @disabled: is device disabled.
* @late_init_done: is late init stage was done during initialization.
* @hwmon_initialized: is H/W monitor sensors was initialized.
- * @hard_reset_pending: is there a hard reset work pending.
* @heartbeat: is heartbeat sanity check towards CPU-CP enabled.
* @reset_on_lockup: true if a reset should be done in case of stuck CS, false
* otherwise.
@@ -2467,8 +2608,9 @@ struct multi_cs_data {
* @init_done: is the initialization of the device done.
* @device_cpu_disabled: is the device CPU disabled (due to timeouts)
* @dma_mask: the dma mask that was set for this device
- * @in_debug: is device under debug. This, together with fpriv_list, enforces
- * that only a single user is configuring the debug infrastructure.
+ * @in_debug: whether the device is in a state where the profiling/tracing infrastructure
+ * can be used. This indication is needed because in some ASICs we need to do
+ * specific operations to enable that infrastructure.
* @power9_64bit_dma_enable: true to enable 64-bit DMA mask support. Relevant
* only to POWER9 machines.
* @cdev_sysfs_created: were char devices and sysfs nodes created.
@@ -2477,34 +2619,18 @@ struct multi_cs_data {
* @sync_stream_queue_idx: helper index for sync stream queues initialization.
* @collective_mon_idx: helper index for collective initialization
* @supports_coresight: is CoreSight supported.
- * @supports_soft_reset: is soft reset supported.
- * @allow_inference_soft_reset: true if the ASIC supports soft reset that is
- * initiated by user or TDR. This is only true
- * in inference ASICs, as there is no real-world
- * use-case of doing soft-reset in training (due
- * to the fact that training runs on multiple
- * devices)
* @supports_cb_mapping: is mapping a CB to the device's MMU supported.
- * @needs_reset: true if reset_on_lockup is false and device should be reset
- * due to lockup.
* @process_kill_trial_cnt: number of trials reset thread tried killing
* user processes
* @device_fini_pending: true if device_fini was called and might be
* waiting for the reset thread to finish
* @supports_staged_submission: true if staged submissions are supported
- * @curr_reset_cause: saves an enumerated reset cause when a hard reset is
- * triggered, and cleared after it is shared with preboot.
- * @prev_reset_trigger: saves the previous trigger which caused a reset, overidden
- * with a new value on next reset
- * @reset_trigger_repeated: set if device reset is triggered more than once with
- * same cause.
- * @skip_reset_on_timeout: Skip device reset if CS has timed out, wait for it to
- * complete instead.
* @device_cpu_is_halted: Flag to indicate whether the device CPU was already
* halted. We can't halt it again because the COMMS
* protocol will throw an error. Relevant only for
* cases where Linux was not loaded to device CPU
* @supports_wait_for_multi_cs: true if wait for multi CS is supported
+ * @is_compute_ctx_active: Whether there is an active compute context executing.
*/
struct hl_device {
struct pci_dev *pdev;
@@ -2515,7 +2641,6 @@ struct hl_device {
struct cdev cdev_ctrl;
struct device *dev;
struct device *dev_ctrl;
- struct delayed_work work_freq;
struct delayed_work work_heartbeat;
struct hl_device_reset_work device_reset_work;
char asic_name[HL_STR_MAX];
@@ -2546,7 +2671,6 @@ struct hl_device {
void *asic_specific;
struct hl_vm vm;
struct device *hwmon_dev;
- enum hl_pm_mng_profile pm_mng_profile;
struct hwmon_chip_info *hl_chip_info;
struct hl_dbg_device_entry hl_debugfs;
@@ -2560,9 +2684,9 @@ struct hl_device {
u64 internal_cb_va_base;
struct list_head fpriv_list;
+ struct list_head fpriv_ctrl_list;
struct mutex fpriv_list_lock;
-
- struct hl_ctx *compute_ctx;
+ struct mutex fpriv_ctrl_list_lock;
struct hl_cs_counters_atomic aggregated_cs_counters;
@@ -2577,6 +2701,11 @@ struct hl_device {
struct multi_cs_completion multi_cs_completion[
MULTI_CS_MAX_USER_CTX];
+ struct hl_clk_throttle clk_throttling;
+ struct last_error_session_info last_error;
+
+ struct hl_reset_info reset_info;
+
u32 *stream_master_qid_arr;
atomic64_t dram_used_mem;
u64 timeout_jiffies;
@@ -2587,21 +2716,17 @@ struct hl_device {
u64 last_successful_open_jif;
u64 last_open_session_duration_jif;
u64 open_counter;
- atomic_t in_reset;
- enum hl_pll_frequency curr_pll_profile;
+ u64 fw_poll_interval_usec;
+ ktime_t last_successful_open_ktime;
enum cpucp_card_types card_type;
u32 major;
u32 high_pll;
- u32 soft_reset_cnt;
- u32 hard_reset_cnt;
- u32 clk_throttling_reason;
u16 id;
u16 id_control;
u16 cpu_pci_msb_addr;
u8 disabled;
u8 late_init_done;
u8 hwmon_initialized;
- u8 hard_reset_pending;
u8 heartbeat;
u8 reset_on_lockup;
u8 dram_default_page_mapping;
@@ -2618,20 +2743,14 @@ struct hl_device {
u8 sync_stream_queue_idx;
u8 collective_mon_idx;
u8 supports_coresight;
- u8 supports_soft_reset;
- u8 allow_inference_soft_reset;
u8 supports_cb_mapping;
- u8 needs_reset;
u8 process_kill_trial_cnt;
u8 device_fini_pending;
u8 supports_staged_submission;
- u8 curr_reset_cause;
- u8 prev_reset_trigger;
- u8 reset_trigger_repeated;
- u8 skip_reset_on_timeout;
u8 device_cpu_is_halted;
u8 supports_wait_for_multi_cs;
u8 stream_master_qid_arr_size;
+ u8 is_compute_ctx_active;
/* Parameters for bring-up */
u64 nic_ports_mask;
@@ -2659,6 +2778,7 @@ struct hl_device {
* wait cs are used to wait of the reserved encaps signals.
* @hdev: pointer to habanalabs device structure.
* @hw_sob: pointer to H/W SOB used in the reservation.
+ * @ctx: pointer to the user's context data structure
* @cs_seq: staged cs sequence which contains encapsulated signals
* @id: idr handler id to be used to fetch the handler info
* @q_idx: stream queue index
@@ -2669,6 +2789,7 @@ struct hl_cs_encaps_sig_handle {
struct kref refcount;
struct hl_device *hdev;
struct hl_hw_sob *hw_sob;
+ struct hl_ctx *ctx;
u64 cs_seq;
u32 id;
u32 q_idx;
@@ -2757,21 +2878,9 @@ static inline bool hl_mem_area_inside_range(u64 address, u64 size,
static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
u64 range_start_address, u64 range_end_address)
{
- u64 end_address = address + size;
+ u64 end_address = address + size - 1;
- if ((address >= range_start_address) &&
- (address < range_end_address))
- return true;
-
- if ((end_address >= range_start_address) &&
- (end_address < range_end_address))
- return true;
-
- if ((address < range_start_address) &&
- (end_address >= range_end_address))
- return true;
-
- return false;
+ return ((address <= range_end_address) && (range_start_address <= end_address));
}
int hl_device_open(struct inode *inode, struct file *filp);
@@ -2779,10 +2888,7 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp);
bool hl_device_operational(struct hl_device *hdev,
enum hl_device_status *status);
enum hl_device_status hl_device_status(struct hl_device *hdev);
-int hl_device_set_debug_mode(struct hl_device *hdev, bool enable);
-int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
- enum hl_asic_type asic_type, int minor);
-void destroy_hdev(struct hl_device *hdev);
+int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable);
int hl_hw_queues_create(struct hl_device *hdev);
void hl_hw_queues_destroy(struct hl_device *hdev);
int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
@@ -2821,6 +2927,7 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx);
void hl_ctx_do_release(struct kref *ref);
void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx);
int hl_ctx_put(struct hl_ctx *ctx);
+struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev);
struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
struct hl_fence **fence, u32 arr_len);
@@ -2834,7 +2941,6 @@ int hl_device_resume(struct hl_device *hdev);
int hl_device_reset(struct hl_device *hdev, u32 flags);
void hl_hpriv_get(struct hl_fpriv *hpriv);
int hl_hpriv_put(struct hl_fpriv *hpriv);
-int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq);
int hl_device_utilization(struct hl_device *hdev, u32 *utilization);
int hl_build_hwmon_channel_info(struct hl_device *hdev,
@@ -2915,6 +3021,9 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr,
u64 phys_addr, u32 size);
int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size);
+int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags);
+int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
+ u32 flags, u32 asid, u64 va, u64 size);
void hl_mmu_swap_out(struct hl_ctx *ctx);
void hl_mmu_swap_in(struct hl_ctx *ctx);
int hl_mmu_if_set_funcs(struct hl_device *hdev);
@@ -2969,6 +3078,10 @@ int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
struct fw_load_mgr *fw_loader,
enum comms_cmd cmd, unsigned int size,
bool wait_ok, u32 timeout);
+int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
+ struct cpucp_hbm_row_info *info);
+int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num);
+int hl_fw_cpucp_engine_core_asid_set(struct hl_device *hdev, u32 asid);
int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
bool is_wc[3]);
int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data);
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index 949d1b5c5c41..690b763c7a95 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -153,15 +153,7 @@ int hl_device_open(struct inode *inode, struct file *filp)
goto out_err;
}
- if (hdev->in_debug) {
- dev_err_ratelimited(hdev->dev,
- "Can't open %s because it is being debugged by another user\n",
- dev_name(hdev->dev));
- rc = -EPERM;
- goto out_err;
- }
-
- if (hdev->compute_ctx) {
+ if (hdev->is_compute_ctx_active) {
dev_dbg_ratelimited(hdev->dev,
"Can't open %s because another user is working on it\n",
dev_name(hdev->dev));
@@ -175,20 +167,17 @@ int hl_device_open(struct inode *inode, struct file *filp)
goto out_err;
}
- /* Device is IDLE at this point so it is legal to change PLLs.
- * There is no need to check anything because if the PLL is
- * already HIGH, the set function will return without doing
- * anything
- */
- hl_device_set_frequency(hdev, PLL_HIGH);
-
list_add(&hpriv->dev_node, &hdev->fpriv_list);
mutex_unlock(&hdev->fpriv_list_lock);
hl_debugfs_add_file(hpriv);
+ atomic_set(&hdev->last_error.cs_write_disable, 0);
+ atomic_set(&hdev->last_error.razwi_write_disable, 0);
+
hdev->open_counter++;
hdev->last_successful_open_jif = jiffies;
+ hdev->last_successful_open_ktime = ktime_get();
return 0;
@@ -231,12 +220,11 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp)
hpriv->hdev = hdev;
filp->private_data = hpriv;
hpriv->filp = filp;
- hpriv->is_control = true;
nonseekable_open(inode, filp);
hpriv->taskpid = find_get_pid(current->pid);
- mutex_lock(&hdev->fpriv_list_lock);
+ mutex_lock(&hdev->fpriv_ctrl_list_lock);
if (!hl_device_operational(hdev, NULL)) {
dev_err_ratelimited(hdev->dev_ctrl,
@@ -246,13 +234,13 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp)
goto out_err;
}
- list_add(&hpriv->dev_node, &hdev->fpriv_list);
- mutex_unlock(&hdev->fpriv_list_lock);
+ list_add(&hpriv->dev_node, &hdev->fpriv_ctrl_list);
+ mutex_unlock(&hdev->fpriv_ctrl_list_lock);
return 0;
out_err:
- mutex_unlock(&hdev->fpriv_list_lock);
+ mutex_unlock(&hdev->fpriv_ctrl_list_lock);
filp->private_data = NULL;
put_pid(hpriv->taskpid);
@@ -263,6 +251,7 @@ out_err:
static void set_driver_behavior_per_device(struct hl_device *hdev)
{
+ hdev->pldm = 0;
hdev->fw_components = FW_TYPE_ALL_TYPES;
hdev->cpu_queues_enable = 1;
hdev->heartbeat = 1;
@@ -279,23 +268,53 @@ static void set_driver_behavior_per_device(struct hl_device *hdev)
hdev->axi_drain = 0;
}
-/*
+static void copy_kernel_module_params_to_device(struct hl_device *hdev)
+{
+ hdev->major = hl_major;
+ hdev->memory_scrub = memory_scrub;
+ hdev->reset_on_lockup = reset_on_lockup;
+ hdev->boot_error_status_mask = boot_error_status_mask;
+
+ if (timeout_locked)
+ hdev->timeout_jiffies = msecs_to_jiffies(timeout_locked * 1000);
+ else
+ hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
+
+}
+
+static int fixup_device_params(struct hl_device *hdev)
+{
+ hdev->asic_prop.fw_security_enabled = is_asic_secured(hdev->asic_type);
+
+ hdev->fw_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC;
+
+ hdev->stop_on_err = true;
+ hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
+ hdev->reset_info.prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
+
+ /* Enable only after the initialization of the device */
+ hdev->disabled = true;
+
+ /* Set default DMA mask to 32 bits */
+ hdev->dma_mask = 32;
+
+ return 0;
+}
+
+/**
* create_hdev - create habanalabs device instance
*
* @dev: will hold the pointer to the new habanalabs device structure
* @pdev: pointer to the pci device
- * @asic_type: in case of simulator device, which device is it
- * @minor: in case of simulator device, the minor of the device
*
* Allocate memory for habanalabs device and initialize basic fields
* Identify the ASIC type
* Allocate ID (minor) for the device (only for real devices)
*/
-int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
- enum hl_asic_type asic_type, int minor)
+static int create_hdev(struct hl_device **dev, struct pci_dev *pdev)
{
+ int main_id, ctrl_id = 0, rc = 0;
struct hl_device *hdev;
- int rc, main_id, ctrl_id = 0;
*dev = NULL;
@@ -303,69 +322,39 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
if (!hdev)
return -ENOMEM;
- /* First, we must find out which ASIC are we handling. This is needed
- * to configure the behavior of the driver (kernel parameters)
- */
- if (pdev) {
- hdev->asic_type = get_asic_type(pdev->device);
- if (hdev->asic_type == ASIC_INVALID) {
- dev_err(&pdev->dev, "Unsupported ASIC\n");
- rc = -ENODEV;
- goto free_hdev;
- }
- } else {
- hdev->asic_type = asic_type;
- }
-
- if (pdev)
- hdev->asic_prop.fw_security_enabled =
- is_asic_secured(hdev->asic_type);
- else
- hdev->asic_prop.fw_security_enabled = false;
+ /* can be NULL in case of simulator device */
+ hdev->pdev = pdev;
/* Assign status description string */
- strncpy(hdev->status[HL_DEVICE_STATUS_OPERATIONAL],
- "operational", HL_STR_MAX);
- strncpy(hdev->status[HL_DEVICE_STATUS_IN_RESET],
- "in reset", HL_STR_MAX);
- strncpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION],
- "disabled", HL_STR_MAX);
- strncpy(hdev->status[HL_DEVICE_STATUS_NEEDS_RESET],
- "needs reset", HL_STR_MAX);
+ strncpy(hdev->status[HL_DEVICE_STATUS_OPERATIONAL], "operational", HL_STR_MAX);
+ strncpy(hdev->status[HL_DEVICE_STATUS_IN_RESET], "in reset", HL_STR_MAX);
+ strncpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION], "disabled", HL_STR_MAX);
+ strncpy(hdev->status[HL_DEVICE_STATUS_NEEDS_RESET], "needs reset", HL_STR_MAX);
strncpy(hdev->status[HL_DEVICE_STATUS_IN_DEVICE_CREATION],
"in device creation", HL_STR_MAX);
- hdev->major = hl_major;
- hdev->reset_on_lockup = reset_on_lockup;
- hdev->memory_scrub = memory_scrub;
- hdev->boot_error_status_mask = boot_error_status_mask;
- hdev->stop_on_err = true;
+ /* First, we must find out which ASIC are we handling. This is needed
+ * to configure the behavior of the driver (kernel parameters)
+ */
+ hdev->asic_type = get_asic_type(pdev->device);
+ if (hdev->asic_type == ASIC_INVALID) {
+ dev_err(&pdev->dev, "Unsupported ASIC\n");
+ rc = -ENODEV;
+ goto free_hdev;
+ }
- hdev->pldm = 0;
+ copy_kernel_module_params_to_device(hdev);
set_driver_behavior_per_device(hdev);
- hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
- hdev->prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
-
- if (timeout_locked)
- hdev->timeout_jiffies = msecs_to_jiffies(timeout_locked * 1000);
- else
- hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
-
- hdev->disabled = true;
- hdev->pdev = pdev; /* can be NULL in case of simulator device */
-
- /* Set default DMA mask to 32 bits */
- hdev->dma_mask = 32;
+ fixup_device_params(hdev);
mutex_lock(&hl_devs_idr_lock);
/* Always save 2 numbers, 1 for main device and 1 for control.
* They must be consecutive
*/
- main_id = idr_alloc(&hl_devs_idr, hdev, 0, HL_MAX_MINORS,
- GFP_KERNEL);
+ main_id = idr_alloc(&hl_devs_idr, hdev, 0, HL_MAX_MINORS, GFP_KERNEL);
if (main_id >= 0)
ctrl_id = idr_alloc(&hl_devs_idr, hdev, main_id + 1,
@@ -405,7 +394,7 @@ free_hdev:
* @dev: pointer to the habanalabs device structure
*
*/
-void destroy_hdev(struct hl_device *hdev)
+static void destroy_hdev(struct hl_device *hdev)
{
/* Remove device from the device list */
mutex_lock(&hl_devs_idr_lock);
@@ -444,7 +433,7 @@ static int hl_pmops_resume(struct device *dev)
return hl_device_resume(hdev);
}
-/*
+/**
* hl_pci_probe - probe PCI habanalabs devices
*
* @pdev: pointer to pci device
@@ -454,8 +443,7 @@ static int hl_pmops_resume(struct device *dev)
* Create a new habanalabs device and initialize it according to the
* device's type
*/
-static int hl_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int hl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hl_device *hdev;
int rc;
@@ -464,7 +452,7 @@ static int hl_pci_probe(struct pci_dev *pdev,
" device found [%04x:%04x] (rev %x)\n",
(int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
- rc = create_hdev(&hdev, pdev, ASIC_INVALID, -1);
+ rc = create_hdev(&hdev, pdev);
if (rc)
return rc;
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index 86c3257d9ae1..3ba3a8ffda3e 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -158,7 +158,7 @@ static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
}
-static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args)
+static int debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, struct hl_debug_args *args)
{
struct hl_debug_params *params;
void *input = NULL, *output = NULL;
@@ -200,7 +200,7 @@ static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args)
params->output_size = args->output_size;
}
- rc = hdev->asic_funcs->debug_coresight(hdev, params);
+ rc = hdev->asic_funcs->debug_coresight(hdev, ctx, params);
if (rc) {
dev_err(hdev->dev,
"debug coresight operation failed %d\n", rc);
@@ -269,8 +269,8 @@ static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args)
if ((!max_size) || (!out))
return -EINVAL;
- reset_count.hard_reset_cnt = hdev->hard_reset_cnt;
- reset_count.soft_reset_cnt = hdev->soft_reset_cnt;
+ reset_count.hard_reset_cnt = hdev->reset_info.hard_reset_cnt;
+ reset_count.soft_reset_cnt = hdev->reset_info.soft_reset_cnt;
return copy_to_user(out, &reset_count,
min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0;
@@ -313,15 +313,38 @@ static int pci_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
struct hl_device *hdev = hpriv->hdev;
struct hl_info_clk_throttle clk_throttle = {0};
+ ktime_t end_time, zero_time = ktime_set(0, 0);
u32 max_size = args->return_size;
- void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ int i;
if ((!max_size) || (!out))
return -EINVAL;
- clk_throttle.clk_throttling_reason = hdev->clk_throttling_reason;
+ mutex_lock(&hdev->clk_throttling.lock);
+
+ clk_throttle.clk_throttling_reason = hdev->clk_throttling.current_reason;
+
+ for (i = 0 ; i < HL_CLK_THROTTLE_TYPE_MAX ; i++) {
+ if (!(hdev->clk_throttling.aggregated_reason & BIT(i)))
+ continue;
+
+ clk_throttle.clk_throttling_timestamp_us[i] =
+ ktime_to_us(hdev->clk_throttling.timestamp[i].start);
+
+ if (ktime_compare(hdev->clk_throttling.timestamp[i].end, zero_time))
+ end_time = hdev->clk_throttling.timestamp[i].end;
+ else
+ end_time = ktime_get();
+
+ clk_throttle.clk_throttling_duration_ns[i] =
+ ktime_to_ns(ktime_sub(end_time,
+ hdev->clk_throttling.timestamp[i].start));
+
+ }
+ mutex_unlock(&hdev->clk_throttling.lock);
return copy_to_user(out, &clk_throttle,
min((size_t) max_size, sizeof(clk_throttle))) ? -EFAULT : 0;
@@ -480,6 +503,94 @@ static int open_stats_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
min((size_t) max_size, sizeof(open_stats_info))) ? -EFAULT : 0;
}
+static int dram_pending_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ u32 max_size = args->return_size;
+ u32 pend_rows_num = 0;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ int rc;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ rc = hl_fw_dram_pending_row_get(hdev, &pend_rows_num);
+ if (rc)
+ return rc;
+
+ return copy_to_user(out, &pend_rows_num,
+ min_t(size_t, max_size, sizeof(pend_rows_num))) ? -EFAULT : 0;
+}
+
+static int dram_replaced_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ u32 max_size = args->return_size;
+ struct cpucp_hbm_row_info info = {0};
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ int rc;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ rc = hl_fw_dram_replaced_row_get(hdev, &info);
+ if (rc)
+ return rc;
+
+ return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
+}
+
+static int last_err_open_dev_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ struct hl_info_last_err_open_dev_time info = {0};
+ struct hl_device *hdev = hpriv->hdev;
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ info.timestamp = ktime_to_ns(hdev->last_error.open_dev_timestamp);
+
+ return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
+}
+
+static int cs_timeout_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ struct hl_info_cs_timeout_event info = {0};
+ struct hl_device *hdev = hpriv->hdev;
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ info.seq = hdev->last_error.cs_timeout_seq;
+ info.timestamp = ktime_to_ns(hdev->last_error.cs_timeout_timestamp);
+
+ return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
+}
+
+static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ u32 max_size = args->return_size;
+ struct hl_info_razwi_event info = {0};
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ info.timestamp = ktime_to_ns(hdev->last_error.razwi_timestamp);
+ info.addr = hdev->last_error.razwi_addr;
+ info.engine_id_1 = hdev->last_error.razwi_engine_id_1;
+ info.engine_id_2 = hdev->last_error.razwi_engine_id_2;
+ info.no_engine_id = hdev->last_error.razwi_non_engine_initiator;
+ info.error_type = hdev->last_error.razwi_type;
+
+ return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
+}
+
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
struct device *dev)
{
@@ -503,6 +614,33 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_RESET_COUNT:
return get_reset_count(hdev, args);
+ case HL_INFO_HW_EVENTS:
+ return hw_events_info(hdev, false, args);
+
+ case HL_INFO_HW_EVENTS_AGGREGATE:
+ return hw_events_info(hdev, true, args);
+
+ case HL_INFO_CS_COUNTERS:
+ return cs_counters_info(hpriv, args);
+
+ case HL_INFO_CLK_THROTTLE_REASON:
+ return clk_throttle_info(hpriv, args);
+
+ case HL_INFO_SYNC_MANAGER:
+ return sync_manager_info(hpriv, args);
+
+ case HL_INFO_OPEN_STATS:
+ return open_stats_info(hpriv, args);
+
+ case HL_INFO_LAST_ERR_OPEN_DEV_TIME:
+ return last_err_open_dev_info(hpriv, args);
+
+ case HL_INFO_CS_TIMEOUT_EVENT:
+ return cs_timeout_info(hpriv, args);
+
+ case HL_INFO_RAZWI_EVENT:
+ return razwi_info(hpriv, args);
+
default:
break;
}
@@ -515,10 +653,6 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
}
switch (args->op) {
- case HL_INFO_HW_EVENTS:
- rc = hw_events_info(hdev, false, args);
- break;
-
case HL_INFO_DRAM_USAGE:
rc = dram_usage_info(hpriv, args);
break;
@@ -531,10 +665,6 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
rc = device_utilization(hdev, args);
break;
- case HL_INFO_HW_EVENTS_AGGREGATE:
- rc = hw_events_info(hdev, true, args);
- break;
-
case HL_INFO_CLK_RATE:
rc = get_clk_rate(hdev, args);
break;
@@ -542,18 +672,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_TIME_SYNC:
return time_sync_info(hdev, args);
- case HL_INFO_CS_COUNTERS:
- return cs_counters_info(hpriv, args);
-
case HL_INFO_PCI_COUNTERS:
return pci_counters_info(hpriv, args);
- case HL_INFO_CLK_THROTTLE_REASON:
- return clk_throttle_info(hpriv, args);
-
- case HL_INFO_SYNC_MANAGER:
- return sync_manager_info(hpriv, args);
-
case HL_INFO_TOTAL_ENERGY:
return total_energy_consumption_info(hpriv, args);
@@ -563,12 +684,16 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_POWER:
return power_info(hpriv, args);
- case HL_INFO_OPEN_STATS:
- return open_stats_info(hpriv, args);
+
+ case HL_INFO_DRAM_REPLACED_ROWS:
+ return dram_replaced_rows_info(hpriv, args);
+
+ case HL_INFO_DRAM_PENDING_ROWS:
+ return dram_pending_rows_info(hpriv, args);
default:
dev_err(dev, "Invalid request %d\n", args->op);
- rc = -ENOTTY;
+ rc = -EINVAL;
break;
}
@@ -613,16 +738,17 @@ static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
"Rejecting debug configuration request because device not in debug mode\n");
return -EFAULT;
}
- args->input_size =
- min(args->input_size, hl_debug_struct_size[args->op]);
- rc = debug_coresight(hdev, args);
+ args->input_size = min(args->input_size, hl_debug_struct_size[args->op]);
+ rc = debug_coresight(hdev, hpriv->ctx, args);
break;
+
case HL_DEBUG_OP_SET_MODE:
- rc = hl_device_set_debug_mode(hdev, (bool) args->enable);
+ rc = hl_device_set_debug_mode(hdev, hpriv->ctx, (bool) args->enable);
break;
+
default:
dev_err(hdev->dev, "Invalid request %d\n", args->op);
- rc = -ENOTTY;
+ rc = -EINVAL;
break;
}
@@ -649,7 +775,6 @@ static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
const struct hl_ioctl_desc *ioctl, struct device *dev)
{
struct hl_fpriv *hpriv = filep->private_data;
- struct hl_device *hdev = hpriv->hdev;
unsigned int nr = _IOC_NR(cmd);
char stack_kdata[128] = {0};
char *kdata = NULL;
@@ -658,12 +783,6 @@ static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
u32 hl_size;
int retcode;
- if (hdev->hard_reset_pending) {
- dev_crit_ratelimited(dev,
- "Device HARD reset pending! Please close FD\n");
- return -ENODEV;
- }
-
/* Do not trust userspace, use our own definition */
func = ioctl->func;
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index 0743319b10c7..6103e479e855 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -429,6 +429,9 @@ static int init_signal_cs(struct hl_device *hdev,
rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, 1,
false);
+ job->cs->sob_addr_offset = hw_sob->sob_addr;
+ job->cs->initial_sob_count = prop->next_sob_val - 1;
+
return rc;
}
@@ -571,7 +574,7 @@ static int encaps_sig_first_staged_cs_handler
struct hl_encaps_signals_mgr *mgr;
int rc = 0;
- mgr = &hdev->compute_ctx->sig_mgr;
+ mgr = &cs->ctx->sig_mgr;
spin_lock(&mgr->lock);
encaps_sig_hdl = idr_find(&mgr->handles, cs->encaps_sig_hdl_id);
diff --git a/drivers/misc/habanalabs/common/hwmon.c b/drivers/misc/habanalabs/common/hwmon.c
index e33f65be8a00..57f5d2c48330 100644
--- a/drivers/misc/habanalabs/common/hwmon.c
+++ b/drivers/misc/habanalabs/common/hwmon.c
@@ -10,17 +10,148 @@
#include <linux/pci.h>
#include <linux/hwmon.h>
-#define HWMON_NR_SENSOR_TYPES (hwmon_pwm + 1)
+#define HWMON_NR_SENSOR_TYPES (hwmon_max)
-int hl_build_hwmon_channel_info(struct hl_device *hdev,
- struct cpucp_sensor *sensors_arr)
+#ifdef _HAS_HWMON_HWMON_T_ENABLE
+
+static u32 fixup_flags_legacy_fw(struct hl_device *hdev, enum hwmon_sensor_types type,
+ u32 cpucp_flags)
{
- u32 counts[HWMON_NR_SENSOR_TYPES] = {0};
- u32 *sensors_by_type[HWMON_NR_SENSOR_TYPES] = {NULL};
+ u32 flags;
+
+ switch (type) {
+ case hwmon_temp:
+ flags = (cpucp_flags << 1) | HWMON_T_ENABLE;
+ break;
+
+ case hwmon_in:
+ flags = (cpucp_flags << 1) | HWMON_I_ENABLE;
+ break;
+
+ case hwmon_curr:
+ flags = (cpucp_flags << 1) | HWMON_C_ENABLE;
+ break;
+
+ case hwmon_fan:
+ flags = (cpucp_flags << 1) | HWMON_F_ENABLE;
+ break;
+
+ case hwmon_power:
+ flags = (cpucp_flags << 1) | HWMON_P_ENABLE;
+ break;
+
+ case hwmon_pwm:
+ /* enable bit was here from day 1, so no need to adjust */
+ flags = cpucp_flags;
+ break;
+
+ default:
+ dev_err(hdev->dev, "unsupported h/w sensor type %d\n", type);
+ flags = cpucp_flags;
+ break;
+ }
+
+ return flags;
+}
+
+static u32 fixup_attr_legacy_fw(u32 attr)
+{
+ return (attr - 1);
+}
+
+#else
+
+static u32 fixup_flags_legacy_fw(struct hl_device *hdev, enum hwmon_sensor_types type,
+ u32 cpucp_flags)
+{
+ return cpucp_flags;
+}
+
+static u32 fixup_attr_legacy_fw(u32 attr)
+{
+ return attr;
+}
+
+#endif /* !_HAS_HWMON_HWMON_T_ENABLE */
+
+static u32 adjust_hwmon_flags(struct hl_device *hdev, enum hwmon_sensor_types type, u32 cpucp_flags)
+{
+ u32 flags, cpucp_input_val;
+ bool use_cpucp_enum;
+
+ use_cpucp_enum = (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_MAP_HWMON_EN) ? true : false;
+
+ /* If f/w is using it's own enum, we need to check if the properties values are aligned.
+ * If not, it means we need to adjust the values to the new format that is used in the
+ * kernel since 5.6 (enum values were incremented by 1 by adding a new enable value).
+ */
+ if (use_cpucp_enum) {
+ switch (type) {
+ case hwmon_temp:
+ cpucp_input_val = cpucp_temp_input;
+ if (cpucp_input_val == hwmon_temp_input)
+ flags = cpucp_flags;
+ else
+ flags = (cpucp_flags << 1) | HWMON_T_ENABLE;
+ break;
+
+ case hwmon_in:
+ cpucp_input_val = cpucp_in_input;
+ if (cpucp_input_val == hwmon_in_input)
+ flags = cpucp_flags;
+ else
+ flags = (cpucp_flags << 1) | HWMON_I_ENABLE;
+ break;
+
+ case hwmon_curr:
+ cpucp_input_val = cpucp_curr_input;
+ if (cpucp_input_val == hwmon_curr_input)
+ flags = cpucp_flags;
+ else
+ flags = (cpucp_flags << 1) | HWMON_C_ENABLE;
+ break;
+
+ case hwmon_fan:
+ cpucp_input_val = cpucp_fan_input;
+ if (cpucp_input_val == hwmon_fan_input)
+ flags = cpucp_flags;
+ else
+ flags = (cpucp_flags << 1) | HWMON_F_ENABLE;
+ break;
+
+ case hwmon_pwm:
+ /* enable bit was here from day 1, so no need to adjust */
+ flags = cpucp_flags;
+ break;
+
+ case hwmon_power:
+ cpucp_input_val = CPUCP_POWER_INPUT;
+ if (cpucp_input_val == hwmon_power_input)
+ flags = cpucp_flags;
+ else
+ flags = (cpucp_flags << 1) | HWMON_P_ENABLE;
+ break;
+
+ default:
+ dev_err(hdev->dev, "unsupported h/w sensor type %d\n", type);
+ flags = cpucp_flags;
+ break;
+ }
+ } else {
+ flags = fixup_flags_legacy_fw(hdev, type, cpucp_flags);
+ }
+
+ return flags;
+}
+
+int hl_build_hwmon_channel_info(struct hl_device *hdev, struct cpucp_sensor *sensors_arr)
+{
+ u32 num_sensors_for_type, flags, num_active_sensor_types = 0, arr_size = 0, *curr_arr;
u32 sensors_by_type_next_index[HWMON_NR_SENSOR_TYPES] = {0};
+ u32 *sensors_by_type[HWMON_NR_SENSOR_TYPES] = {NULL};
struct hwmon_channel_info **channels_info;
- u32 num_sensors_for_type, num_active_sensor_types = 0,
- arr_size = 0, *curr_arr;
+ u32 counts[HWMON_NR_SENSOR_TYPES] = {0};
enum hwmon_sensor_types type;
int rc, i, j;
@@ -31,8 +162,7 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev,
break;
if (type >= HWMON_NR_SENSOR_TYPES) {
- dev_err(hdev->dev,
- "Got wrong sensor type %d from device\n", type);
+ dev_err(hdev->dev, "Got wrong sensor type %d from device\n", type);
return -EINVAL;
}
@@ -45,8 +175,9 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev,
continue;
num_sensors_for_type = counts[i] + 1;
- curr_arr = kcalloc(num_sensors_for_type, sizeof(*curr_arr),
- GFP_KERNEL);
+ dev_dbg(hdev->dev, "num_sensors_for_type %d = %d\n", i, num_sensors_for_type);
+
+ curr_arr = kcalloc(num_sensors_for_type, sizeof(*curr_arr), GFP_KERNEL);
if (!curr_arr) {
rc = -ENOMEM;
goto sensors_type_err;
@@ -59,20 +190,18 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev,
for (i = 0 ; i < arr_size ; i++) {
type = le32_to_cpu(sensors_arr[i].type);
curr_arr = sensors_by_type[type];
- curr_arr[sensors_by_type_next_index[type]++] =
- le32_to_cpu(sensors_arr[i].flags);
+ flags = adjust_hwmon_flags(hdev, type, le32_to_cpu(sensors_arr[i].flags));
+ curr_arr[sensors_by_type_next_index[type]++] = flags;
}
- channels_info = kcalloc(num_active_sensor_types + 1,
- sizeof(*channels_info), GFP_KERNEL);
+ channels_info = kcalloc(num_active_sensor_types + 1, sizeof(*channels_info), GFP_KERNEL);
if (!channels_info) {
rc = -ENOMEM;
goto channels_info_array_err;
}
for (i = 0 ; i < num_active_sensor_types ; i++) {
- channels_info[i] = kzalloc(sizeof(*channels_info[i]),
- GFP_KERNEL);
+ channels_info[i] = kzalloc(sizeof(*channels_info[i]), GFP_KERNEL);
if (!channels_info[i]) {
rc = -ENOMEM;
goto channel_info_err;
@@ -88,18 +217,19 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev,
j++;
}
- hdev->hl_chip_info->info =
- (const struct hwmon_channel_info **)channels_info;
+ hdev->hl_chip_info->info = (const struct hwmon_channel_info **)channels_info;
return 0;
channel_info_err:
- for (i = 0 ; i < num_active_sensor_types ; i++)
+ for (i = 0 ; i < num_active_sensor_types ; i++) {
if (channels_info[i]) {
kfree(channels_info[i]->config);
kfree(channels_info[i]);
}
+ }
kfree(channels_info);
+
channels_info_array_err:
sensors_type_err:
for (i = 0 ; i < HWMON_NR_SENSOR_TYPES ; i++)
@@ -112,14 +242,16 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
struct hl_device *hdev = dev_get_drvdata(dev);
- int rc;
+ bool use_cpucp_enum;
u32 cpucp_attr;
- bool use_cpucp_enum = (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
- CPU_BOOT_DEV_STS0_MAP_HWMON_EN) ? true : false;
+ int rc;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
+ use_cpucp_enum = (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_MAP_HWMON_EN) ? true : false;
+
switch (type) {
case hwmon_temp:
switch (attr) {
@@ -151,7 +283,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
if (use_cpucp_enum)
rc = hl_get_temperature(hdev, channel, cpucp_attr, val);
else
- rc = hl_get_temperature(hdev, channel, attr, val);
+ rc = hl_get_temperature(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
case hwmon_in:
switch (attr) {
@@ -174,7 +306,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
if (use_cpucp_enum)
rc = hl_get_voltage(hdev, channel, cpucp_attr, val);
else
- rc = hl_get_voltage(hdev, channel, attr, val);
+ rc = hl_get_voltage(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
case hwmon_curr:
switch (attr) {
@@ -197,7 +329,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
if (use_cpucp_enum)
rc = hl_get_current(hdev, channel, cpucp_attr, val);
else
- rc = hl_get_current(hdev, channel, attr, val);
+ rc = hl_get_current(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
case hwmon_fan:
switch (attr) {
@@ -217,7 +349,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
if (use_cpucp_enum)
rc = hl_get_fan_speed(hdev, channel, cpucp_attr, val);
else
- rc = hl_get_fan_speed(hdev, channel, attr, val);
+ rc = hl_get_fan_speed(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
case hwmon_pwm:
switch (attr) {
@@ -234,6 +366,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
if (use_cpucp_enum)
rc = hl_get_pwm_info(hdev, channel, cpucp_attr, val);
else
+ /* no need for fixup as pwm was aligned from day 1 */
rc = hl_get_pwm_info(hdev, channel, attr, val);
break;
case hwmon_power:
@@ -251,7 +384,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
if (use_cpucp_enum)
rc = hl_get_power(hdev, channel, cpucp_attr, val);
else
- rc = hl_get_power(hdev, channel, attr, val);
+ rc = hl_get_power(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
default:
return -EINVAL;
@@ -286,7 +419,7 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type,
if (use_cpucp_enum)
hl_set_temperature(hdev, channel, cpucp_attr, val);
else
- hl_set_temperature(hdev, channel, attr, val);
+ hl_set_temperature(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
case hwmon_pwm:
switch (attr) {
@@ -303,6 +436,7 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type,
if (use_cpucp_enum)
hl_set_pwm_info(hdev, channel, cpucp_attr, val);
else
+ /* no need for fixup as pwm was aligned from day 1 */
hl_set_pwm_info(hdev, channel, attr, val);
break;
case hwmon_in:
@@ -317,7 +451,7 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type,
if (use_cpucp_enum)
hl_set_voltage(hdev, channel, cpucp_attr, val);
else
- hl_set_voltage(hdev, channel, attr, val);
+ hl_set_voltage(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
case hwmon_curr:
switch (attr) {
@@ -331,7 +465,7 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type,
if (use_cpucp_enum)
hl_set_current(hdev, channel, cpucp_attr, val);
else
- hl_set_current(hdev, channel, attr, val);
+ hl_set_current(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
case hwmon_power:
switch (attr) {
@@ -345,7 +479,7 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type,
if (use_cpucp_enum)
hl_set_power(hdev, channel, cpucp_attr, val);
else
- hl_set_power(hdev, channel, attr, val);
+ hl_set_power(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
default:
return -EINVAL;
@@ -444,6 +578,9 @@ int hl_get_temperature(struct hl_device *hdev,
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
+ dev_dbg(hdev->dev, "get temp, ctl 0x%x, sensor %d, type %d\n",
+ pkt.ctl, pkt.sensor_index, pkt.type);
+
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, &result);
@@ -677,12 +814,18 @@ int hl_set_power(struct hl_device *hdev,
int sensor_index, u32 attr, long value)
{
struct cpucp_packet pkt;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
int rc;
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET <<
+ if (prop->use_get_power_for_reset_history)
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
+ else
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_SET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
+
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
pkt.value = __cpu_to_le64(value);
diff --git a/drivers/misc/habanalabs/common/irq.c b/drivers/misc/habanalabs/common/irq.c
index 96d82b682674..1b6bdc900c26 100644
--- a/drivers/misc/habanalabs/common/irq.c
+++ b/drivers/misc/habanalabs/common/irq.c
@@ -145,8 +145,12 @@ static void handle_user_cq(struct hl_device *hdev,
spin_lock(&user_cq->wait_list_lock);
list_for_each_entry(pend, &user_cq->wait_list_head, wait_list_node) {
- pend->fence.timestamp = now;
- complete_all(&pend->fence.completion);
+ if ((pend->cq_kernel_addr &&
+ *(pend->cq_kernel_addr) >= pend->cq_target_value) ||
+ !pend->cq_kernel_addr) {
+ pend->fence.timestamp = now;
+ complete_all(&pend->fence.completion);
+ }
}
spin_unlock(&user_cq->wait_list_lock);
}
@@ -245,10 +249,8 @@ irqreturn_t hl_irq_handler_eq(int irq, void *arg)
*/
dma_rmb();
- if (hdev->disabled) {
- dev_warn(hdev->dev,
- "Device disabled but received IRQ %d for EQ\n",
- irq);
+ if (hdev->disabled && !hdev->reset_info.is_in_soft_reset) {
+ dev_warn(hdev->dev, "Device disabled but received an EQ event\n");
goto skip_irq;
}
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index 9bd626a00de3..c1eefaebacb6 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -316,7 +316,7 @@ static int free_phys_pg_pack(struct hl_device *hdev,
}
if (rc && !hdev->disabled)
- hl_device_reset(hdev, HL_RESET_HARD);
+ hl_device_reset(hdev, HL_DRV_RESET_HARD);
end:
kvfree(phys_pg_pack->pages);
@@ -477,7 +477,7 @@ static int add_va_block_locked(struct hl_device *hdev,
struct list_head *va_list, u64 start, u64 end)
{
struct hl_vm_va_block *va_block, *res = NULL;
- u64 size = end - start;
+ u64 size = end - start + 1;
print_va_list_locked(hdev, va_list);
@@ -518,7 +518,7 @@ static int add_va_block_locked(struct hl_device *hdev,
/**
* add_va_block() - wrapper for add_va_block_locked.
* @hdev: pointer to the habanalabs device structure.
- * @va_list: pointer to the virtual addresses block list.
+ * @va_range: pointer to the virtual addresses range object.
* @start: start virtual address.
* @end: end virtual address.
*
@@ -538,8 +538,11 @@ static inline int add_va_block(struct hl_device *hdev,
}
/**
- * is_hint_crossing_range() - check if hint address crossing specified reserved
- * range.
+ * is_hint_crossing_range() - check if hint address crossing specified reserved.
+ * @range_type: virtual space range type.
+ * @start_addr: start virtual address.
+ * @size: block size.
+ * @prop: asic properties structure to retrieve reserved ranges from.
*/
static inline bool is_hint_crossing_range(enum hl_va_range_type range_type,
u64 start_addr, u32 size, struct asic_fixed_properties *prop) {
@@ -644,7 +647,7 @@ static u64 get_va_block(struct hl_device *hdev,
continue;
}
- valid_size = va_block->end - valid_start;
+ valid_size = va_block->end - valid_start + 1;
if (valid_size < size)
continue;
@@ -707,7 +710,7 @@ static u64 get_va_block(struct hl_device *hdev,
if (new_va_block->size > size) {
new_va_block->start += size;
- new_va_block->size = new_va_block->end - new_va_block->start;
+ new_va_block->size = new_va_block->end - new_va_block->start + 1;
} else {
list_del(&new_va_block->node);
kfree(new_va_block);
@@ -749,6 +752,7 @@ u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
/**
* hl_get_va_range_type() - get va_range type for the given address and size.
+ * @ctx: context to fetch va_range from.
* @address: the start address of the area we want to validate.
* @size: the size in bytes of the area we want to validate.
* @type: returned va_range type.
@@ -776,8 +780,8 @@ static int hl_get_va_range_type(struct hl_ctx *ctx, u64 address, u64 size,
* hl_unreserve_va_block() - wrapper for add_va_block to unreserve a va block.
* @hdev: pointer to the habanalabs device structure
* @ctx: pointer to the context structure.
- * @start: start virtual address.
- * @end: end virtual address.
+ * @start_addr: start virtual address.
+ * @size: number of bytes to unreserve.
*
* This function does the following:
* - Takes the list lock and calls add_va_block_locked.
@@ -1201,17 +1205,13 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
goto map_err;
}
- rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, false,
- *vm_type, ctx->asid, ret_vaddr, phys_pg_pack->total_size);
+ rc = hl_mmu_invalidate_cache_range(hdev, false, *vm_type | MMU_OP_SKIP_LOW_CACHE_INV,
+ ctx->asid, ret_vaddr, phys_pg_pack->total_size);
mutex_unlock(&ctx->mmu_lock);
- if (rc) {
- dev_err(hdev->dev,
- "mapping handle %u failed due to MMU cache invalidation\n",
- handle);
+ if (rc)
goto map_err;
- }
ret_vaddr += phys_pg_pack->offset;
@@ -1349,9 +1349,8 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
* at the loop end rather than for each iteration
*/
if (!ctx_free)
- rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, true,
- *vm_type, ctx->asid, vaddr,
- phys_pg_pack->total_size);
+ rc = hl_mmu_invalidate_cache_range(hdev, true, *vm_type, ctx->asid, vaddr,
+ phys_pg_pack->total_size);
mutex_unlock(&ctx->mmu_lock);
@@ -1364,11 +1363,6 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
if (!ctx_free) {
int tmp_rc;
- if (rc)
- dev_err(hdev->dev,
- "unmapping vaddr 0x%llx failed due to MMU cache invalidation\n",
- vaddr);
-
tmp_rc = add_va_block(hdev, va_range, vaddr,
vaddr + phys_pg_pack->total_size - 1);
if (tmp_rc) {
@@ -2037,7 +2031,7 @@ static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
default:
dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
- rc = -ENOTTY;
+ rc = -EINVAL;
break;
}
@@ -2162,7 +2156,7 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
default:
dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
- rc = -ENOTTY;
+ rc = -EINVAL;
break;
}
@@ -2339,6 +2333,8 @@ void hl_userptr_delete_list(struct hl_device *hdev,
/**
* hl_userptr_is_pinned() - returns whether the given userptr is pinned.
* @hdev: pointer to the habanalabs device structure.
+ * @addr: user address to check.
+ * @size: user block size to check.
* @userptr_list: pointer to the list to clear.
* @userptr: pointer to userptr to check.
*
@@ -2361,9 +2357,10 @@ bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
/**
* va_range_init() - initialize virtual addresses range.
* @hdev: pointer to the habanalabs device structure.
- * @va_range: pointer to the range to initialize.
+ * @va_ranges: pointer to va_ranges array.
* @start: range start address.
* @end: range end address.
+ * @page_size: page size for this va_range.
*
* This function does the following:
* - Initializes the virtual addresses list of the given range with the given
@@ -2388,8 +2385,14 @@ static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
start += PAGE_SIZE;
}
- if (end & (PAGE_SIZE - 1))
- end &= PAGE_MASK;
+ /*
+ * The end of the range is inclusive, hence we need to align it
+ * to the end of the last full page in the range. For example if
+ * end = 0x3ff5 with page size 0x1000, we need to align it to
+ * 0x2fff. The remainig 0xff5 bytes do not form a full page.
+ */
+ if ((end + 1) & (PAGE_SIZE - 1))
+ end = ((end + 1) & PAGE_MASK) - 1;
}
if (start >= end) {
@@ -2414,7 +2417,7 @@ static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
/**
* va_range_fini() - clear a virtual addresses range.
* @hdev: pointer to the habanalabs structure.
- * va_range: pointer to virtual addresses rang.e
+ * @va_range: pointer to virtual addresses range.
*
* This function does the following:
* - Frees the virtual addresses block list and its lock.
@@ -2434,12 +2437,15 @@ static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range)
* @ctx: pointer to the habanalabs context structure.
* @host_range_start: host virtual addresses range start.
* @host_range_end: host virtual addresses range end.
+ * @host_page_size: host page size.
* @host_huge_range_start: host virtual addresses range start for memory
* allocated with huge pages.
* @host_huge_range_end: host virtual addresses range end for memory allocated
* with huge pages.
+ * @host_huge_page_size: host huge page size.
* @dram_range_start: dram virtual addresses range start.
* @dram_range_end: dram virtual addresses range end.
+ * @dram_page_size: dram page size.
*
* This function initializes the following:
* - MMU for context.
@@ -2564,14 +2570,14 @@ int hl_vm_ctx_init(struct hl_ctx *ctx)
return 0;
dram_range_start = prop->dmmu.start_addr;
- dram_range_end = prop->dmmu.end_addr;
+ dram_range_end = prop->dmmu.end_addr - 1;
dram_page_size = prop->dram_page_size ?
prop->dram_page_size : prop->dmmu.page_size;
host_range_start = prop->pmmu.start_addr;
- host_range_end = prop->pmmu.end_addr;
+ host_range_end = prop->pmmu.end_addr - 1;
host_page_size = prop->pmmu.page_size;
host_huge_range_start = prop->pmmu_huge.start_addr;
- host_huge_range_end = prop->pmmu_huge.end_addr;
+ host_huge_range_end = prop->pmmu_huge.end_addr - 1;
host_huge_page_size = prop->pmmu_huge.page_size;
return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
@@ -2618,7 +2624,7 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
* Clearly something went wrong on hard reset so no point in printing
* another side effect error
*/
- if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
+ if (!hdev->reset_info.hard_reset_pending && !hash_empty(ctx->mem_hash))
dev_dbg(hdev->dev,
"user released device without removing its memory mappings\n");
@@ -2633,8 +2639,8 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
mutex_lock(&ctx->mmu_lock);
/* invalidate the cache once after the unmapping loop */
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_PHYS_PACK);
+ hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
+ hl_mmu_invalidate_cache(hdev, true, MMU_OP_PHYS_PACK);
mutex_unlock(&ctx->mmu_lock);
diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c
index aa96917f62e5..9153a1f55175 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu.c
@@ -637,3 +637,28 @@ u64 hl_mmu_descramble_addr(struct hl_device *hdev, u64 addr)
{
return addr;
}
+
+int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags)
+{
+ int rc;
+
+ rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, is_hard, flags);
+ if (rc)
+ dev_err_ratelimited(hdev->dev, "MMU cache invalidation failed\n");
+
+ return rc;
+}
+
+int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
+ u32 flags, u32 asid, u64 va, u64 size)
+{
+ int rc;
+
+ rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, is_hard, flags,
+ asid, va, size);
+ if (rc)
+ dev_err_ratelimited(hdev->dev, "MMU cache range invalidation failed\n");
+
+ return rc;
+}
+
diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v1.c b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
index 0f536f79dd9c..6134b6ae7615 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu_v1.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
@@ -269,7 +269,7 @@ static int dram_default_mapping_init(struct hl_ctx *ctx)
num_of_hop3 = prop->dram_size_for_default_page_mapping;
do_div(num_of_hop3, prop->dram_page_size);
- do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
+ do_div(num_of_hop3, HOP_PTE_ENTRIES_512);
/* add hop1 and hop2 */
total_hops = num_of_hop3 + 2;
@@ -330,7 +330,7 @@ static int dram_default_mapping_init(struct hl_ctx *ctx)
for (i = 0 ; i < num_of_hop3 ; i++) {
hop3_pte_addr = ctx->dram_default_hops[i];
- for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
+ for (j = 0 ; j < HOP_PTE_ENTRIES_512 ; j++) {
write_final_pte(ctx, hop3_pte_addr, pte_val);
get_pte(ctx, ctx->dram_default_hops[i]);
hop3_pte_addr += HL_PTE_SIZE;
@@ -369,7 +369,7 @@ static void dram_default_mapping_fini(struct hl_ctx *ctx)
num_of_hop3 = prop->dram_size_for_default_page_mapping;
do_div(num_of_hop3, prop->dram_page_size);
- do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
+ do_div(num_of_hop3, HOP_PTE_ENTRIES_512);
hop0_addr = get_hop0_addr(ctx);
/* add hop1 and hop2 */
@@ -379,7 +379,7 @@ static void dram_default_mapping_fini(struct hl_ctx *ctx)
for (i = 0 ; i < num_of_hop3 ; i++) {
hop3_pte_addr = ctx->dram_default_hops[i];
- for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
+ for (j = 0 ; j < HOP_PTE_ENTRIES_512 ; j++) {
clear_pte(ctx, hop3_pte_addr);
put_pte(ctx, ctx->dram_default_hops[i]);
hop3_pte_addr += HL_PTE_SIZE;
@@ -573,7 +573,7 @@ static int _hl_mmu_v1_unmap(struct hl_ctx *ctx,
curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
- is_huge = curr_pte & LAST_MASK;
+ is_huge = curr_pte & mmu_prop->last_mask;
if (is_dram_addr && !is_huge) {
dev_err(hdev->dev,
@@ -597,7 +597,7 @@ static int _hl_mmu_v1_unmap(struct hl_ctx *ctx,
if (hdev->dram_default_page_mapping && is_dram_addr) {
u64 default_pte = (prop->mmu_dram_default_page_addr &
- HOP_PHYS_ADDR_MASK) | LAST_MASK |
+ HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask |
PAGE_PRESENT_MASK;
if (curr_pte == default_pte) {
dev_err(hdev->dev,
@@ -729,7 +729,7 @@ static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
if (hdev->dram_default_page_mapping && is_dram_addr) {
u64 default_pte = (prop->mmu_dram_default_page_addr &
- HOP_PHYS_ADDR_MASK) | LAST_MASK |
+ HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask |
PAGE_PRESENT_MASK;
if (curr_pte != default_pte) {
@@ -769,7 +769,7 @@ static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
goto err;
}
- curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | LAST_MASK
+ curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask
| PAGE_PRESENT_MASK;
if (is_huge)
@@ -930,7 +930,7 @@ static int hl_mmu_v1_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
return -EFAULT;
- if (hops->hop_info[i].hop_pte_val & LAST_MASK)
+ if (hops->hop_info[i].hop_pte_val & mmu_prop->last_mask)
break;
}
diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
index 42c1769ad25d..45c715325e2a 100644
--- a/drivers/misc/habanalabs/common/sysfs.c
+++ b/drivers/misc/habanalabs/common/sysfs.c
@@ -139,7 +139,7 @@ static ssize_t cpld_ver_show(struct device *dev, struct device_attribute *attr,
struct hl_device *hdev = dev_get_drvdata(dev);
return sprintf(buf, "0x%08x\n",
- hdev->asic_prop.cpucp_info.cpld_version);
+ le32_to_cpu(hdev->asic_prop.cpucp_info.cpld_version));
}
static ssize_t cpucp_kernel_ver_show(struct device *dev,
@@ -163,8 +163,13 @@ static ssize_t infineon_ver_show(struct device *dev,
{
struct hl_device *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "0x%04x\n",
- hdev->asic_prop.cpucp_info.infineon_version);
+ if (hdev->asic_prop.cpucp_info.infineon_second_stage_version)
+ return sprintf(buf, "%#04x %#04x\n",
+ le32_to_cpu(hdev->asic_prop.cpucp_info.infineon_version),
+ le32_to_cpu(hdev->asic_prop.cpucp_info.infineon_second_stage_version));
+ else
+ return sprintf(buf, "%#04x\n",
+ le32_to_cpu(hdev->asic_prop.cpucp_info.infineon_version));
}
static ssize_t fuse_ver_show(struct device *dev, struct device_attribute *attr,
@@ -206,7 +211,7 @@ static ssize_t soft_reset_store(struct device *dev,
goto out;
}
- if (!hdev->allow_inference_soft_reset) {
+ if (!hdev->asic_prop.allow_inference_soft_reset) {
dev_err(hdev->dev, "Device does not support inference soft-reset\n");
goto out;
}
@@ -236,7 +241,7 @@ static ssize_t hard_reset_store(struct device *dev,
dev_warn(hdev->dev, "Hard-Reset requested through sysfs\n");
- hl_device_reset(hdev, HL_RESET_HARD);
+ hl_device_reset(hdev, HL_DRV_RESET_HARD);
out:
return count;
@@ -298,7 +303,7 @@ static ssize_t soft_reset_cnt_show(struct device *dev,
{
struct hl_device *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", hdev->soft_reset_cnt);
+ return sprintf(buf, "%d\n", hdev->reset_info.soft_reset_cnt);
}
static ssize_t hard_reset_cnt_show(struct device *dev,
@@ -306,7 +311,7 @@ static ssize_t hard_reset_cnt_show(struct device *dev,
{
struct hl_device *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", hdev->hard_reset_cnt);
+ return sprintf(buf, "%d\n", hdev->reset_info.hard_reset_cnt);
}
static ssize_t max_power_show(struct device *dev, struct device_attribute *attr,
@@ -419,8 +424,6 @@ static struct attribute *hl_dev_attrs[] = {
&dev_attr_max_power.attr,
&dev_attr_pci_addr.attr,
&dev_attr_preboot_btl_ver.attr,
- &dev_attr_soft_reset.attr,
- &dev_attr_soft_reset_cnt.attr,
&dev_attr_status.attr,
&dev_attr_thermal_ver.attr,
&dev_attr_uboot_ver.attr,
@@ -445,15 +448,25 @@ static const struct attribute_group *hl_dev_attr_groups[] = {
NULL,
};
+static struct attribute *hl_dev_inference_attrs[] = {
+ &dev_attr_soft_reset.attr,
+ &dev_attr_soft_reset_cnt.attr,
+ NULL,
+};
+
+static struct attribute_group hl_dev_inference_attr_group = {
+ .attrs = hl_dev_inference_attrs,
+};
+
+static const struct attribute_group *hl_dev_inference_attr_groups[] = {
+ &hl_dev_inference_attr_group,
+ NULL,
+};
+
int hl_sysfs_init(struct hl_device *hdev)
{
int rc;
- if (hdev->asic_type == ASIC_GOYA)
- hdev->pm_mng_profile = PM_AUTO;
- else
- hdev->pm_mng_profile = PM_MANUAL;
-
hdev->max_power = hdev->asic_prop.max_power_default;
hdev->asic_funcs->add_device_attr(hdev, &hl_dev_clks_attr_group);
@@ -465,10 +478,25 @@ int hl_sysfs_init(struct hl_device *hdev)
return rc;
}
+ if (!hdev->asic_prop.allow_inference_soft_reset)
+ return 0;
+
+ rc = device_add_groups(hdev->dev, hl_dev_inference_attr_groups);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to add groups to device, error %d\n", rc);
+ return rc;
+ }
+
return 0;
}
void hl_sysfs_fini(struct hl_device *hdev)
{
device_remove_groups(hdev->dev, hl_dev_attr_groups);
+
+ if (!hdev->asic_prop.allow_inference_soft_reset)
+ return;
+
+ device_remove_groups(hdev->dev, hl_dev_inference_attr_groups);
}
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 825737dfe381..013c6da2e3ca 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2020 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -593,26 +593,27 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
else
prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
prop->mmu_pte_size = HL_PTE_SIZE;
- prop->mmu_hop_table_size = HOP_TABLE_SIZE;
- prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
+ prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
+ prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
prop->dram_page_size = PAGE_SIZE_2MB;
prop->dram_supports_virtual_memory = false;
- prop->pmmu.hop0_shift = HOP0_SHIFT;
- prop->pmmu.hop1_shift = HOP1_SHIFT;
- prop->pmmu.hop2_shift = HOP2_SHIFT;
- prop->pmmu.hop3_shift = HOP3_SHIFT;
- prop->pmmu.hop4_shift = HOP4_SHIFT;
- prop->pmmu.hop0_mask = HOP0_MASK;
- prop->pmmu.hop1_mask = HOP1_MASK;
- prop->pmmu.hop2_mask = HOP2_MASK;
- prop->pmmu.hop3_mask = HOP3_MASK;
- prop->pmmu.hop4_mask = HOP4_MASK;
+ prop->pmmu.hop0_shift = MMU_V1_1_HOP0_SHIFT;
+ prop->pmmu.hop1_shift = MMU_V1_1_HOP1_SHIFT;
+ prop->pmmu.hop2_shift = MMU_V1_1_HOP2_SHIFT;
+ prop->pmmu.hop3_shift = MMU_V1_1_HOP3_SHIFT;
+ prop->pmmu.hop4_shift = MMU_V1_1_HOP4_SHIFT;
+ prop->pmmu.hop0_mask = MMU_V1_1_HOP0_MASK;
+ prop->pmmu.hop1_mask = MMU_V1_1_HOP1_MASK;
+ prop->pmmu.hop2_mask = MMU_V1_1_HOP2_MASK;
+ prop->pmmu.hop3_mask = MMU_V1_1_HOP3_MASK;
+ prop->pmmu.hop4_mask = MMU_V1_1_HOP4_MASK;
prop->pmmu.start_addr = VA_HOST_SPACE_START;
prop->pmmu.end_addr =
(VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2) - 1;
prop->pmmu.page_size = PAGE_SIZE_4KB;
prop->pmmu.num_hops = MMU_ARCH_5_HOPS;
+ prop->pmmu.last_mask = LAST_MASK;
/* PMMU and HPMMU are the same except of page size */
memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
@@ -664,6 +665,8 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->clk_pll_index = HL_GAUDI_MME_PLL;
prop->max_freq_value = GAUDI_MAX_CLK_FREQ;
+ prop->use_get_power_for_reset_history = true;
+
return 0;
}
@@ -878,6 +881,11 @@ static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
int rc;
if (hdev->asic_prop.fw_security_enabled) {
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
rc = hl_fw_cpucp_pll_info_get(hdev, HL_GAUDI_CPU_PLL, pll_freq_arr);
if (rc)
@@ -1273,6 +1281,7 @@ static int gaudi_collective_wait_init_cs(struct hl_cs *cs)
container_of(cs->signal_fence, struct hl_cs_compl, base_fence);
struct hl_cs_compl *cs_cmpl =
container_of(cs->fence, struct hl_cs_compl, base_fence);
+ struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl;
struct gaudi_collective_properties *cprop;
u32 stream, queue_id, sob_group_offset;
struct gaudi_device *gaudi;
@@ -1285,10 +1294,16 @@ static int gaudi_collective_wait_init_cs(struct hl_cs *cs)
gaudi = hdev->asic_specific;
cprop = &gaudi->collective_props;
- /* In encaps signals case the SOB info will be retrieved from
- * the handle in gaudi_collective_slave_init_job.
- */
- if (!cs->encaps_signals) {
+ if (cs->encaps_signals) {
+ cs_cmpl->hw_sob = handle->hw_sob;
+ /* at this checkpoint we only need the hw_sob pointer
+ * for the completion check before start going over the jobs
+ * of the master/slaves, the sob_value will be taken later on
+ * in gaudi_collective_slave_init_job depends on each
+ * job wait offset value.
+ */
+ cs_cmpl->sob_val = 0;
+ } else {
/* copy the SOB id and value of the signal CS */
cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
@@ -1621,6 +1636,8 @@ static int gaudi_late_init(struct hl_device *hdev)
*/
gaudi_mmu_prepare(hdev, 1);
+ hdev->asic_funcs->set_pll_profile(hdev, PLL_LAST);
+
return 0;
disable_pci_access:
@@ -4006,7 +4023,7 @@ static void gaudi_init_firmware_loader(struct hl_device *hdev)
struct fw_load_mgr *fw_loader = &hdev->fw_loader;
/* fill common fields */
- fw_loader->linux_loaded = false;
+ fw_loader->fw_comp_loaded = FW_TYPE_NONE;
fw_loader->boot_fit_img.image_name = GAUDI_BOOT_FIT_FILE;
fw_loader->linux_img.image_name = GAUDI_LINUX_FW_FILE;
fw_loader->cpu_timeout = GAUDI_CPU_TIMEOUT_USEC;
@@ -4289,13 +4306,31 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset
* via the GIC. Otherwise, we need to use COMMS or the MSG_TO_CPU
* registers in case of old F/Ws
*/
- if (hdev->fw_loader.linux_loaded) {
+ if (hdev->fw_loader.fw_comp_loaded & FW_TYPE_LINUX) {
irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
le32_to_cpu(dyn_regs->gic_host_halt_irq);
WREG32(irq_handler_offset,
gaudi_irq_map_table[GAUDI_EVENT_HALT_MACHINE].cpu_id);
+
+ /* This is a hail-mary attempt to revive the card in the small chance that the
+ * f/w has experienced a watchdog event, which caused it to return back to preboot.
+ * In that case, triggering reset through GIC won't help. We need to trigger the
+ * reset as if Linux wasn't loaded.
+ *
+ * We do it only if the reset cause was HB, because that would be the indication
+ * of such an event.
+ *
+ * In case watchdog hasn't expired but we still got HB, then this won't do any
+ * damage.
+ */
+ if (hdev->reset_info.curr_reset_cause == HL_RESET_CAUSE_HEARTBEAT) {
+ if (hdev->asic_prop.hard_reset_done_by_fw)
+ hl_fw_ask_hard_reset_without_linux(hdev);
+ else
+ hl_fw_ask_halt_machine_without_linux(hdev);
+ }
} else {
if (hdev->asic_prop.hard_reset_done_by_fw)
hl_fw_ask_hard_reset_without_linux(hdev);
@@ -6412,6 +6447,7 @@ static int gaudi_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size,
{
u32 dma_core_sts0, err_cause, cfg1, size_left, pos, size_to_dma;
struct gaudi_device *gaudi = hdev->asic_specific;
+ u32 qm_glbl_sts0, qm_cgm_sts;
u64 dma_offset, qm_offset;
dma_addr_t dma_addr;
void *kernel_addr;
@@ -6436,14 +6472,20 @@ static int gaudi_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size,
dma_offset = dma_id * DMA_CORE_OFFSET;
qm_offset = dma_id * DMA_QMAN_OFFSET;
dma_core_sts0 = RREG32(mmDMA0_CORE_STS0 + dma_offset);
- is_eng_idle = IS_DMA_IDLE(dma_core_sts0);
+ qm_glbl_sts0 = RREG32(mmDMA0_QM_GLBL_STS0 + qm_offset);
+ qm_cgm_sts = RREG32(mmDMA0_QM_CGM_STS + qm_offset);
+ is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) &&
+ IS_DMA_IDLE(dma_core_sts0);
if (!is_eng_idle) {
dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_2];
dma_offset = dma_id * DMA_CORE_OFFSET;
qm_offset = dma_id * DMA_QMAN_OFFSET;
dma_core_sts0 = RREG32(mmDMA0_CORE_STS0 + dma_offset);
- is_eng_idle = IS_DMA_IDLE(dma_core_sts0);
+ qm_glbl_sts0 = RREG32(mmDMA0_QM_GLBL_STS0 + qm_offset);
+ qm_cgm_sts = RREG32(mmDMA0_QM_CGM_STS + qm_offset);
+ is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) &&
+ IS_DMA_IDLE(dma_core_sts0);
if (!is_eng_idle) {
dev_err_ratelimited(hdev->dev,
@@ -6522,7 +6564,7 @@ static u64 gaudi_read_pte(struct hl_device *hdev, u64 addr)
{
struct gaudi_device *gaudi = hdev->asic_specific;
- if (hdev->hard_reset_pending)
+ if (hdev->reset_info.hard_reset_pending)
return U64_MAX;
return readq(hdev->pcie_bar[HBM_BAR_ID] +
@@ -6533,7 +6575,7 @@ static void gaudi_write_pte(struct hl_device *hdev, u64 addr, u64 val)
{
struct gaudi_device *gaudi = hdev->asic_specific;
- if (hdev->hard_reset_pending)
+ if (hdev->reset_info.hard_reset_pending)
return;
writeq(val, hdev->pcie_bar[HBM_BAR_ID] +
@@ -6935,8 +6977,9 @@ event_not_supported:
snprintf(desc, size, "N/A");
}
-static const char *gaudi_get_razwi_initiator_dma_name(struct hl_device *hdev,
- u32 x_y, bool is_write)
+static const char *gaudi_get_razwi_initiator_dma_name(struct hl_device *hdev, u32 x_y,
+ bool is_write, s32 *engine_id_1,
+ s32 *engine_id_2)
{
u32 dma_id[2], dma_offset, err_cause[2], mask, i;
@@ -6976,44 +7019,64 @@ static const char *gaudi_get_razwi_initiator_dma_name(struct hl_device *hdev,
switch (x_y) {
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1:
- if ((err_cause[0] & mask) && !(err_cause[1] & mask))
+ if ((err_cause[0] & mask) && !(err_cause[1] & mask)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_0;
return "DMA0";
- else if (!(err_cause[0] & mask) && (err_cause[1] & mask))
+ } else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_2;
return "DMA2";
- else
+ } else {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_0;
+ *engine_id_2 = GAUDI_ENGINE_ID_DMA_2;
return "DMA0 or DMA2";
+ }
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1:
- if ((err_cause[0] & mask) && !(err_cause[1] & mask))
+ if ((err_cause[0] & mask) && !(err_cause[1] & mask)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_1;
return "DMA1";
- else if (!(err_cause[0] & mask) && (err_cause[1] & mask))
+ } else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_3;
return "DMA3";
- else
+ } else {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_1;
+ *engine_id_2 = GAUDI_ENGINE_ID_DMA_3;
return "DMA1 or DMA3";
+ }
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1:
- if ((err_cause[0] & mask) && !(err_cause[1] & mask))
+ if ((err_cause[0] & mask) && !(err_cause[1] & mask)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_4;
return "DMA4";
- else if (!(err_cause[0] & mask) && (err_cause[1] & mask))
+ } else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_6;
return "DMA6";
- else
+ } else {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_4;
+ *engine_id_2 = GAUDI_ENGINE_ID_DMA_6;
return "DMA4 or DMA6";
+ }
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1:
- if ((err_cause[0] & mask) && !(err_cause[1] & mask))
+ if ((err_cause[0] & mask) && !(err_cause[1] & mask)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_5;
return "DMA5";
- else if (!(err_cause[0] & mask) && (err_cause[1] & mask))
+ } else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_7;
return "DMA7";
- else
+ } else {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_5;
+ *engine_id_2 = GAUDI_ENGINE_ID_DMA_7;
return "DMA5 or DMA7";
+ }
}
unknown_initiator:
return "unknown initiator";
}
-static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev,
- bool is_write)
+static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev, bool is_write,
+ u32 *engine_id_1, u32 *engine_id_2)
{
u32 val, x_y, axi_id;
@@ -7026,24 +7089,35 @@ static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev,
switch (x_y) {
case RAZWI_INITIATOR_ID_X_Y_TPC0_NIC0:
- if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC))
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_TPC_0;
return "TPC0";
- if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC))
+ }
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_NIC_0;
return "NIC0";
+ }
break;
case RAZWI_INITIATOR_ID_X_Y_TPC1:
+ *engine_id_1 = GAUDI_ENGINE_ID_TPC_1;
return "TPC1";
case RAZWI_INITIATOR_ID_X_Y_MME0_0:
case RAZWI_INITIATOR_ID_X_Y_MME0_1:
+ *engine_id_1 = GAUDI_ENGINE_ID_MME_0;
return "MME0";
case RAZWI_INITIATOR_ID_X_Y_MME1_0:
case RAZWI_INITIATOR_ID_X_Y_MME1_1:
+ *engine_id_1 = GAUDI_ENGINE_ID_MME_1;
return "MME1";
case RAZWI_INITIATOR_ID_X_Y_TPC2:
+ *engine_id_1 = GAUDI_ENGINE_ID_TPC_2;
return "TPC2";
case RAZWI_INITIATOR_ID_X_Y_TPC3_PCI_CPU_PSOC:
- if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC))
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_TPC_3;
return "TPC3";
+ }
+ /* PCI, CPU or PSOC does not have engine id*/
if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_PCI))
return "PCI";
if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_CPU))
@@ -7059,32 +7133,49 @@ static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev,
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1:
- return gaudi_get_razwi_initiator_dma_name(hdev, x_y, is_write);
+ return gaudi_get_razwi_initiator_dma_name(hdev, x_y, is_write,
+ engine_id_1, engine_id_2);
case RAZWI_INITIATOR_ID_X_Y_TPC4_NIC1_NIC2:
- if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC))
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_TPC_4;
return "TPC4";
- if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC))
+ }
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_NIC_1;
return "NIC1";
- if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT))
+ }
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_NIC_2;
return "NIC2";
+ }
break;
case RAZWI_INITIATOR_ID_X_Y_TPC5:
+ *engine_id_1 = GAUDI_ENGINE_ID_TPC_5;
return "TPC5";
case RAZWI_INITIATOR_ID_X_Y_MME2_0:
case RAZWI_INITIATOR_ID_X_Y_MME2_1:
+ *engine_id_1 = GAUDI_ENGINE_ID_MME_2;
return "MME2";
case RAZWI_INITIATOR_ID_X_Y_MME3_0:
case RAZWI_INITIATOR_ID_X_Y_MME3_1:
+ *engine_id_1 = GAUDI_ENGINE_ID_MME_3;
return "MME3";
case RAZWI_INITIATOR_ID_X_Y_TPC6:
+ *engine_id_1 = GAUDI_ENGINE_ID_TPC_6;
return "TPC6";
case RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5:
- if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC))
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_TPC_7;
return "TPC7";
- if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC))
+ }
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_NIC_4;
return "NIC4";
- if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT))
+ }
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_NIC_5;
return "NIC5";
+ }
break;
default:
break;
@@ -7101,27 +7192,28 @@ static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev,
return "unknown initiator";
}
-static void gaudi_print_razwi_info(struct hl_device *hdev)
+static void gaudi_print_and_get_razwi_info(struct hl_device *hdev, u32 *engine_id_1,
+ u32 *engine_id_2)
{
+
if (RREG32(mmMMU_UP_RAZWI_WRITE_VLD)) {
dev_err_ratelimited(hdev->dev,
"RAZWI event caused by illegal write of %s\n",
- gaudi_get_razwi_initiator_name(hdev, true));
+ gaudi_get_razwi_initiator_name(hdev, true, engine_id_1, engine_id_2));
WREG32(mmMMU_UP_RAZWI_WRITE_VLD, 0);
}
if (RREG32(mmMMU_UP_RAZWI_READ_VLD)) {
dev_err_ratelimited(hdev->dev,
"RAZWI event caused by illegal read of %s\n",
- gaudi_get_razwi_initiator_name(hdev, false));
+ gaudi_get_razwi_initiator_name(hdev, false, engine_id_1, engine_id_2));
WREG32(mmMMU_UP_RAZWI_READ_VLD, 0);
}
}
-static void gaudi_print_mmu_error_info(struct hl_device *hdev)
+static void gaudi_print_and_get_mmu_error_info(struct hl_device *hdev, u64 *addr, u8 *type)
{
struct gaudi_device *gaudi = hdev->asic_specific;
- u64 addr;
u32 val;
if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
@@ -7129,24 +7221,24 @@ static void gaudi_print_mmu_error_info(struct hl_device *hdev)
val = RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE);
if (val & MMU_UP_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
- addr = val & MMU_UP_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
- addr <<= 32;
- addr |= RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE_VA);
+ *addr = val & MMU_UP_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
+ *addr <<= 32;
+ *addr |= RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE_VA);
- dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n",
- addr);
+ dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n", *addr);
+ *type = HL_RAZWI_PAGE_FAULT;
WREG32(mmMMU_UP_PAGE_ERROR_CAPTURE, 0);
}
val = RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE);
if (val & MMU_UP_ACCESS_ERROR_CAPTURE_ENTRY_VALID_MASK) {
- addr = val & MMU_UP_ACCESS_ERROR_CAPTURE_VA_49_32_MASK;
- addr <<= 32;
- addr |= RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE_VA);
+ *addr = val & MMU_UP_ACCESS_ERROR_CAPTURE_VA_49_32_MASK;
+ *addr <<= 32;
+ *addr |= RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE_VA);
- dev_err_ratelimited(hdev->dev,
- "MMU access error on va 0x%llx\n", addr);
+ dev_err_ratelimited(hdev->dev, "MMU access error on va 0x%llx\n", *addr);
+ *type = HL_RAZWI_MMU_ACCESS_ERROR;
WREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE, 0);
}
@@ -7665,15 +7757,46 @@ static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type)
static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
bool razwi)
{
+ u32 engine_id_1, engine_id_2;
char desc[64] = "";
+ u64 razwi_addr = 0;
+ u8 razwi_type;
+ int rc;
+
+ /*
+ * Init engine id by default as not valid and only if razwi initiated from engine with
+ * engine id it will get valid value.
+ * Init razwi type to default, will be changed only if razwi caused by page fault of
+ * MMU access error
+ */
+ engine_id_1 = U16_MAX;
+ engine_id_2 = U16_MAX;
+ razwi_type = U8_MAX;
gaudi_get_event_desc(event_type, desc, sizeof(desc));
dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
event_type, desc);
if (razwi) {
- gaudi_print_razwi_info(hdev);
- gaudi_print_mmu_error_info(hdev);
+ gaudi_print_and_get_razwi_info(hdev, &engine_id_1, &engine_id_2);
+ gaudi_print_and_get_mmu_error_info(hdev, &razwi_addr, &razwi_type);
+
+ /* In case it's the first razwi, save its parameters*/
+ rc = atomic_cmpxchg(&hdev->last_error.razwi_write_disable, 0, 1);
+ if (!rc) {
+ hdev->last_error.open_dev_timestamp = hdev->last_successful_open_ktime;
+ hdev->last_error.razwi_timestamp = ktime_get();
+ hdev->last_error.razwi_addr = razwi_addr;
+ hdev->last_error.razwi_engine_id_1 = engine_id_1;
+ hdev->last_error.razwi_engine_id_2 = engine_id_2;
+ /*
+ * If first engine id holds non valid value the razwi initiator
+ * does not have engine id
+ */
+ hdev->last_error.razwi_non_engine_initiator = (engine_id_1 == U16_MAX);
+ hdev->last_error.razwi_type = razwi_type;
+
+ }
}
}
@@ -7696,14 +7819,10 @@ static void gaudi_print_fw_alive_info(struct hl_device *hdev,
fw_alive->thread_id, fw_alive->uptime_seconds);
}
-static int gaudi_soft_reset_late_init(struct hl_device *hdev)
+static int gaudi_non_hard_reset_late_init(struct hl_device *hdev)
{
- struct gaudi_device *gaudi = hdev->asic_specific;
-
- /* Unmask all IRQs since some could have been received
- * during the soft reset
- */
- return hl_fw_unmask_irq_arr(hdev, gaudi->events, sizeof(gaudi->events));
+ /* GAUDI doesn't support any reset except hard-reset */
+ return -EPERM;
}
static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device,
@@ -7897,27 +8016,39 @@ static int tpc_krn_event_to_tpc_id(u16 tpc_dec_event_type)
static void gaudi_print_clk_change_info(struct hl_device *hdev,
u16 event_type)
{
+ ktime_t zero_time = ktime_set(0, 0);
+
+ mutex_lock(&hdev->clk_throttling.lock);
+
switch (event_type) {
case GAUDI_EVENT_FIX_POWER_ENV_S:
- hdev->clk_throttling_reason |= HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get();
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time;
dev_info_ratelimited(hdev->dev,
"Clock throttling due to power consumption\n");
break;
case GAUDI_EVENT_FIX_POWER_ENV_E:
- hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get();
dev_info_ratelimited(hdev->dev,
"Power envelop is safe, back to optimal clock\n");
break;
case GAUDI_EVENT_FIX_THERMAL_ENV_S:
- hdev->clk_throttling_reason |= HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get();
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time;
dev_info_ratelimited(hdev->dev,
"Clock throttling due to overheating\n");
break;
case GAUDI_EVENT_FIX_THERMAL_ENV_E:
- hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get();
dev_info_ratelimited(hdev->dev,
"Thermal envelop is safe, back to optimal clock\n");
break;
@@ -7927,6 +8058,8 @@ static void gaudi_print_clk_change_info(struct hl_device *hdev,
event_type);
break;
}
+
+ mutex_unlock(&hdev->clk_throttling.lock);
}
static void gaudi_handle_eqe(struct hl_device *hdev,
@@ -7975,7 +8108,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
case GAUDI_EVENT_NIC0_CS_DBG_DERR ... GAUDI_EVENT_NIC4_CS_DBG_DERR:
gaudi_print_irq_info(hdev, event_type, true);
gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
- fw_fatal_err_flag = HL_RESET_FW_FATAL_ERR;
+ fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR;
goto reset_device;
case GAUDI_EVENT_GIC500:
@@ -7983,7 +8116,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
case GAUDI_EVENT_L2_RAM_ECC:
case GAUDI_EVENT_PLL0 ... GAUDI_EVENT_PLL17:
gaudi_print_irq_info(hdev, event_type, false);
- fw_fatal_err_flag = HL_RESET_FW_FATAL_ERR;
+ fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR;
goto reset_device;
case GAUDI_EVENT_HBM0_SPI_0:
@@ -7994,7 +8127,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
gaudi_hbm_read_interrupts(hdev,
gaudi_hbm_event_to_dev(event_type),
&eq_entry->hbm_ecc_data);
- fw_fatal_err_flag = HL_RESET_FW_FATAL_ERR;
+ fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR;
goto reset_device;
case GAUDI_EVENT_HBM0_SPI_1:
@@ -8177,9 +8310,11 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
reset_device:
if (hdev->asic_prop.fw_security_enabled)
- hl_device_reset(hdev, HL_RESET_HARD | HL_RESET_FW | fw_fatal_err_flag);
+ hl_device_reset(hdev, HL_DRV_RESET_HARD
+ | HL_DRV_RESET_BYPASS_REQ_TO_FW
+ | fw_fatal_err_flag);
else if (hdev->hard_reset_on_fw_events)
- hl_device_reset(hdev, HL_RESET_HARD | fw_fatal_err_flag);
+ hl_device_reset(hdev, HL_DRV_RESET_HARD | fw_fatal_err_flag);
else
hl_fw_unmask_irq(hdev, event_type);
}
@@ -8206,7 +8341,7 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
int rc;
if (!(gaudi->hw_cap_initialized & HW_CAP_MMU) ||
- hdev->hard_reset_pending)
+ hdev->reset_info.hard_reset_pending)
return 0;
if (hdev->pldm)
@@ -8229,12 +8364,6 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
WREG32(mmSTLB_INV_SET, 0);
- if (rc) {
- dev_err_ratelimited(hdev->dev,
- "MMU cache invalidation timeout\n");
- hl_device_reset(hdev, HL_RESET_HARD);
- }
-
return rc;
}
@@ -8662,7 +8791,7 @@ static int gaudi_internal_cb_pool_init(struct hl_device *hdev,
hdev->internal_cb_pool_dma_addr,
HOST_SPACE_INTERNAL_CB_SZ);
- hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR);
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR);
mutex_unlock(&ctx->mmu_lock);
if (rc)
@@ -8697,7 +8826,7 @@ static void gaudi_internal_cb_pool_fini(struct hl_device *hdev,
HOST_SPACE_INTERNAL_CB_SZ);
hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base,
HOST_SPACE_INTERNAL_CB_SZ);
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
mutex_unlock(&ctx->mmu_lock);
gen_pool_destroy(hdev->internal_cb_pool);
@@ -9458,7 +9587,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
.disable_clock_gating = gaudi_disable_clock_gating,
.debug_coresight = gaudi_debug_coresight,
.is_device_idle = gaudi_is_device_idle,
- .soft_reset_late_init = gaudi_soft_reset_late_init,
+ .non_hard_reset_late_init = gaudi_non_hard_reset_late_init,
.hw_queues_lock = gaudi_hw_queues_lock,
.hw_queues_unlock = gaudi_hw_queues_unlock,
.get_pci_id = gaudi_get_pci_id,
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index f325e36a71e6..8ac16a9b7d15 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -357,8 +357,8 @@ void gaudi_init_security(struct hl_device *hdev);
void gaudi_ack_protection_bits_errors(struct hl_device *hdev);
void gaudi_add_device_attr(struct hl_device *hdev,
struct attribute_group *dev_attr_grp);
-int gaudi_debug_coresight(struct hl_device *hdev, void *data);
-void gaudi_halt_coresight(struct hl_device *hdev);
+int gaudi_debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, void *data);
+void gaudi_halt_coresight(struct hl_device *hdev, struct hl_ctx *ctx);
void gaudi_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid);
#endif /* GAUDIP_H_ */
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
index 5349c1be13f9..08108f5fed67 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
@@ -848,7 +848,7 @@ static int gaudi_config_spmu(struct hl_device *hdev,
return 0;
}
-int gaudi_debug_coresight(struct hl_device *hdev, void *data)
+int gaudi_debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, void *data)
{
struct hl_debug_params *params = data;
int rc = 0;
@@ -887,7 +887,7 @@ int gaudi_debug_coresight(struct hl_device *hdev, void *data)
return rc;
}
-void gaudi_halt_coresight(struct hl_device *hdev)
+void gaudi_halt_coresight(struct hl_device *hdev, struct hl_ctx *ctx)
{
struct hl_debug_params params = {};
int i, rc;
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 5536e8c27bd5..fbcc7bbf44b3 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -410,25 +410,26 @@ int goya_set_fixed_properties(struct hl_device *hdev)
else
prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
prop->mmu_pte_size = HL_PTE_SIZE;
- prop->mmu_hop_table_size = HOP_TABLE_SIZE;
- prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
+ prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
+ prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
prop->dram_page_size = PAGE_SIZE_2MB;
prop->dram_supports_virtual_memory = true;
- prop->dmmu.hop0_shift = HOP0_SHIFT;
- prop->dmmu.hop1_shift = HOP1_SHIFT;
- prop->dmmu.hop2_shift = HOP2_SHIFT;
- prop->dmmu.hop3_shift = HOP3_SHIFT;
- prop->dmmu.hop4_shift = HOP4_SHIFT;
- prop->dmmu.hop0_mask = HOP0_MASK;
- prop->dmmu.hop1_mask = HOP1_MASK;
- prop->dmmu.hop2_mask = HOP2_MASK;
- prop->dmmu.hop3_mask = HOP3_MASK;
- prop->dmmu.hop4_mask = HOP4_MASK;
+ prop->dmmu.hop0_shift = MMU_V1_0_HOP0_SHIFT;
+ prop->dmmu.hop1_shift = MMU_V1_0_HOP1_SHIFT;
+ prop->dmmu.hop2_shift = MMU_V1_0_HOP2_SHIFT;
+ prop->dmmu.hop3_shift = MMU_V1_0_HOP3_SHIFT;
+ prop->dmmu.hop4_shift = MMU_V1_0_HOP4_SHIFT;
+ prop->dmmu.hop0_mask = MMU_V1_0_HOP0_MASK;
+ prop->dmmu.hop1_mask = MMU_V1_0_HOP1_MASK;
+ prop->dmmu.hop2_mask = MMU_V1_0_HOP2_MASK;
+ prop->dmmu.hop3_mask = MMU_V1_0_HOP3_MASK;
+ prop->dmmu.hop4_mask = MMU_V1_0_HOP4_MASK;
prop->dmmu.start_addr = VA_DDR_SPACE_START;
prop->dmmu.end_addr = VA_DDR_SPACE_END;
prop->dmmu.page_size = PAGE_SIZE_2MB;
prop->dmmu.num_hops = MMU_ARCH_5_HOPS;
+ prop->dmmu.last_mask = LAST_MASK;
/* shifts and masks are the same in PMMU and DMMU */
memcpy(&prop->pmmu, &prop->dmmu, sizeof(prop->dmmu));
@@ -436,6 +437,7 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->pmmu.end_addr = VA_HOST_SPACE_END;
prop->pmmu.page_size = PAGE_SIZE_4KB;
prop->pmmu.num_hops = MMU_ARCH_5_HOPS;
+ prop->pmmu.last_mask = LAST_MASK;
/* PMMU and HPMMU are the same except of page size */
memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
@@ -473,6 +475,8 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->clk_pll_index = HL_GOYA_MME_PLL;
+ prop->use_get_power_for_reset_history = true;
+
return 0;
}
@@ -735,6 +739,11 @@ static void goya_fetch_psoc_frequency(struct hl_device *hdev)
int rc;
if (hdev->asic_prop.fw_security_enabled) {
+ struct goya_device *goya = hdev->asic_specific;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
+ return;
+
rc = hl_fw_cpucp_pll_info_get(hdev, HL_GOYA_PCI_PLL,
pll_freq_arr);
@@ -778,9 +787,59 @@ static void goya_fetch_psoc_frequency(struct hl_device *hdev)
prop->psoc_pci_pll_div_factor = div_fctr;
}
+/*
+ * goya_set_frequency - set the frequency of the device
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @freq: the new frequency value
+ *
+ * Change the frequency if needed. This function has no protection against
+ * concurrency, therefore it is assumed that the calling function has protected
+ * itself against the case of calling this function from multiple threads with
+ * different values
+ *
+ * Returns 0 if no change was done, otherwise returns 1
+ */
+int goya_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq)
+{
+ struct goya_device *goya = hdev->asic_specific;
+
+ if ((goya->pm_mng_profile == PM_MANUAL) ||
+ (goya->curr_pll_profile == freq))
+ return 0;
+
+ dev_dbg(hdev->dev, "Changing device frequency to %s\n",
+ freq == PLL_HIGH ? "high" : "low");
+
+ goya_set_pll_profile(hdev, freq);
+
+ goya->curr_pll_profile = freq;
+
+ return 1;
+}
+
+static void goya_set_freq_to_low_job(struct work_struct *work)
+{
+ struct goya_work_freq *goya_work = container_of(work,
+ struct goya_work_freq,
+ work_freq.work);
+ struct hl_device *hdev = goya_work->hdev;
+
+ mutex_lock(&hdev->fpriv_list_lock);
+
+ if (!hdev->is_compute_ctx_active)
+ goya_set_frequency(hdev, PLL_LOW);
+
+ mutex_unlock(&hdev->fpriv_list_lock);
+
+ schedule_delayed_work(&goya_work->work_freq,
+ usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
+}
+
int goya_late_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct goya_device *goya = hdev->asic_specific;
int rc;
goya_fetch_psoc_frequency(hdev);
@@ -829,6 +888,16 @@ int goya_late_init(struct hl_device *hdev)
return rc;
}
+ /* force setting to low frequency */
+ goya->curr_pll_profile = PLL_LOW;
+
+ goya->pm_mng_profile = PM_AUTO;
+
+ hdev->asic_funcs->set_pll_profile(hdev, PLL_LOW);
+
+ schedule_delayed_work(&goya->goya_work->work_freq,
+ usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
+
return 0;
}
@@ -842,8 +911,11 @@ int goya_late_init(struct hl_device *hdev)
void goya_late_fini(struct hl_device *hdev)
{
const struct hwmon_channel_info **channel_info_arr;
+ struct goya_device *goya = hdev->asic_specific;
int i = 0;
+ cancel_delayed_work_sync(&goya->goya_work->work_freq);
+
if (!hdev->hl_chip_info->info)
return;
@@ -961,12 +1033,21 @@ static int goya_sw_init(struct hl_device *hdev)
spin_lock_init(&goya->hw_queues_lock);
hdev->supports_coresight = true;
- hdev->supports_soft_reset = true;
- hdev->allow_inference_soft_reset = true;
+ hdev->asic_prop.supports_soft_reset = true;
+ hdev->asic_prop.allow_inference_soft_reset = true;
hdev->supports_wait_for_multi_cs = false;
hdev->asic_funcs->set_pci_memory_regions(hdev);
+ goya->goya_work = kmalloc(sizeof(struct goya_work_freq), GFP_KERNEL);
+ if (!goya->goya_work) {
+ rc = -ENOMEM;
+ goto free_cpu_accessible_dma_pool;
+ }
+
+ goya->goya_work->hdev = hdev;
+ INIT_DELAYED_WORK(&goya->goya_work->work_freq, goya_set_freq_to_low_job);
+
return 0;
free_cpu_accessible_dma_pool:
@@ -1003,6 +1084,7 @@ static int goya_sw_fini(struct hl_device *hdev)
dma_pool_destroy(hdev->dma_pool);
+ kfree(goya->goya_work);
kfree(goya);
return 0;
@@ -2502,7 +2584,7 @@ static void goya_init_firmware_loader(struct hl_device *hdev)
struct fw_load_mgr *fw_loader = &hdev->fw_loader;
/* fill common fields */
- fw_loader->linux_loaded = false;
+ fw_loader->fw_comp_loaded = FW_TYPE_NONE;
fw_loader->boot_fit_img.image_name = GOYA_BOOT_FIT_FILE;
fw_loader->linux_img.image_name = GOYA_LINUX_FW_FILE;
fw_loader->cpu_timeout = GOYA_CPU_TIMEOUT_USEC;
@@ -2619,7 +2701,7 @@ int goya_mmu_init(struct hl_device *hdev)
(~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK));
hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
- VM_TYPE_USERPTR | VM_TYPE_PHYS_PACK);
+ MMU_OP_USERPTR | MMU_OP_PHYS_PACK);
WREG32(mmMMU_MMU_ENABLE, 1);
WREG32(mmMMU_SPI_MASK, 0xF);
@@ -4395,7 +4477,7 @@ static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
{
struct goya_device *goya = hdev->asic_specific;
- if (hdev->hard_reset_pending)
+ if (hdev->reset_info.hard_reset_pending)
return U64_MAX;
return readq(hdev->pcie_bar[DDR_BAR_ID] +
@@ -4406,7 +4488,7 @@ static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
{
struct goya_device *goya = hdev->asic_specific;
- if (hdev->hard_reset_pending)
+ if (hdev->reset_info.hard_reset_pending)
return;
writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
@@ -4731,7 +4813,7 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
return rc;
}
-static int goya_soft_reset_late_init(struct hl_device *hdev)
+static int goya_non_hard_reset_late_init(struct hl_device *hdev)
{
/*
* Unmask all IRQs since some could have been received
@@ -4764,24 +4846,39 @@ static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
static void goya_print_clk_change_info(struct hl_device *hdev, u16 event_type)
{
+ ktime_t zero_time = ktime_set(0, 0);
+
+ mutex_lock(&hdev->clk_throttling.lock);
+
switch (event_type) {
case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
- hdev->clk_throttling_reason |= HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get();
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time;
dev_info_ratelimited(hdev->dev,
"Clock throttling due to power consumption\n");
break;
+
case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
- hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get();
dev_info_ratelimited(hdev->dev,
"Power envelop is safe, back to optimal clock\n");
break;
+
case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
- hdev->clk_throttling_reason |= HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get();
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time;
dev_info_ratelimited(hdev->dev,
"Clock throttling due to overheating\n");
break;
+
case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
- hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get();
dev_info_ratelimited(hdev->dev,
"Thermal envelop is safe, back to optimal clock\n");
break;
@@ -4791,6 +4888,8 @@ static void goya_print_clk_change_info(struct hl_device *hdev, u16 event_type)
event_type);
break;
}
+
+ mutex_unlock(&hdev->clk_throttling.lock);
}
void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
@@ -4834,14 +4933,14 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
goya_print_irq_info(hdev, event_type, false);
if (hdev->hard_reset_on_fw_events)
- hl_device_reset(hdev, (HL_RESET_HARD |
- HL_RESET_FW_FATAL_ERR));
+ hl_device_reset(hdev, (HL_DRV_RESET_HARD |
+ HL_DRV_RESET_FW_FATAL_ERR));
break;
case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
goya_print_irq_info(hdev, event_type, false);
if (hdev->hard_reset_on_fw_events)
- hl_device_reset(hdev, HL_RESET_HARD);
+ hl_device_reset(hdev, HL_DRV_RESET_HARD);
break;
case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
@@ -4901,7 +5000,7 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
goya_print_irq_info(hdev, event_type, false);
goya_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err);
if (hdev->hard_reset_on_fw_events)
- hl_device_reset(hdev, HL_RESET_HARD);
+ hl_device_reset(hdev, HL_DRV_RESET_HARD);
else
hl_fw_unmask_irq(hdev, event_type);
break;
@@ -5209,7 +5308,7 @@ static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
int rc;
if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
- hdev->hard_reset_pending)
+ hdev->reset_info.hard_reset_pending)
return 0;
/* no need in L1 only invalidation in Goya */
@@ -5232,12 +5331,6 @@ static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
1000,
timeout_usec);
- if (rc) {
- dev_err_ratelimited(hdev->dev,
- "MMU cache invalidation timeout\n");
- hl_device_reset(hdev, HL_RESET_HARD);
- }
-
return rc;
}
@@ -5645,7 +5738,7 @@ static const struct hl_asic_funcs goya_funcs = {
.disable_clock_gating = goya_disable_clock_gating,
.debug_coresight = goya_debug_coresight,
.is_device_idle = goya_is_device_idle,
- .soft_reset_late_init = goya_soft_reset_late_init,
+ .non_hard_reset_late_init = goya_non_hard_reset_late_init,
.hw_queues_lock = goya_hw_queues_lock,
.hw_queues_unlock = goya_hw_queues_unlock,
.get_pci_id = goya_get_pci_id,
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index 97add7b04f82..3740fd25bf84 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -153,9 +153,15 @@
#define HW_CAP_GOLDEN 0x00000400
#define HW_CAP_TPC 0x00000800
+struct goya_work_freq {
+ struct hl_device *hdev;
+ struct delayed_work work_freq;
+};
+
struct goya_device {
/* TODO: remove hw_queues_lock after moving to scheduler code */
spinlock_t hw_queues_lock;
+ struct goya_work_freq *goya_work;
u64 mme_clk;
u64 tpc_clk;
@@ -166,6 +172,9 @@ struct goya_device {
u32 events_stat_aggregate[GOYA_ASYNC_EVENT_ID_SIZE];
u32 hw_cap_initialized;
u8 device_cpu_mmu_mappings_done;
+
+ enum hl_pll_frequency curr_pll_profile;
+ enum hl_pm_mng_profile pm_mng_profile;
};
int goya_set_fixed_properties(struct hl_device *hdev);
@@ -211,8 +220,8 @@ void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq);
void goya_add_device_attr(struct hl_device *hdev,
struct attribute_group *dev_attr_grp);
int goya_cpucp_info_get(struct hl_device *hdev);
-int goya_debug_coresight(struct hl_device *hdev, void *data);
-void goya_halt_coresight(struct hl_device *hdev);
+int goya_debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, void *data);
+void goya_halt_coresight(struct hl_device *hdev, struct hl_ctx *ctx);
int goya_suspend(struct hl_device *hdev);
int goya_resume(struct hl_device *hdev);
@@ -237,5 +246,6 @@ void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev);
u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx);
u64 goya_get_device_time(struct hl_device *hdev);
+int goya_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq);
#endif /* GOYAP_H_ */
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index c55c100fdd24..2c5133cfae65 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -652,7 +652,7 @@ static int goya_config_spmu(struct hl_device *hdev,
return 0;
}
-int goya_debug_coresight(struct hl_device *hdev, void *data)
+int goya_debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, void *data)
{
struct hl_debug_params *params = data;
int rc = 0;
@@ -691,7 +691,7 @@ int goya_debug_coresight(struct hl_device *hdev, void *data)
return rc;
}
-void goya_halt_coresight(struct hl_device *hdev)
+void goya_halt_coresight(struct hl_device *hdev, struct hl_ctx *ctx)
{
struct hl_debug_params params = {};
int i, rc;
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c
index 59b2624ff81a..76b47749affe 100644
--- a/drivers/misc/habanalabs/goya/goya_hwmgr.c
+++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -62,7 +62,7 @@ static ssize_t mme_clk_store(struct device *dev, struct device_attribute *attr,
goto fail;
}
- if (hdev->pm_mng_profile == PM_AUTO) {
+ if (goya->pm_mng_profile == PM_AUTO) {
count = -EPERM;
goto fail;
}
@@ -111,7 +111,7 @@ static ssize_t tpc_clk_store(struct device *dev, struct device_attribute *attr,
goto fail;
}
- if (hdev->pm_mng_profile == PM_AUTO) {
+ if (goya->pm_mng_profile == PM_AUTO) {
count = -EPERM;
goto fail;
}
@@ -160,7 +160,7 @@ static ssize_t ic_clk_store(struct device *dev, struct device_attribute *attr,
goto fail;
}
- if (hdev->pm_mng_profile == PM_AUTO) {
+ if (goya->pm_mng_profile == PM_AUTO) {
count = -EPERM;
goto fail;
}
@@ -234,13 +234,14 @@ static ssize_t pm_mng_profile_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
+ struct goya_device *goya = hdev->asic_specific;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
return sprintf(buf, "%s\n",
- (hdev->pm_mng_profile == PM_AUTO) ? "auto" :
- (hdev->pm_mng_profile == PM_MANUAL) ? "manual" :
+ (goya->pm_mng_profile == PM_AUTO) ? "auto" :
+ (goya->pm_mng_profile == PM_MANUAL) ? "manual" :
"unknown");
}
@@ -248,6 +249,7 @@ static ssize_t pm_mng_profile_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct hl_device *hdev = dev_get_drvdata(dev);
+ struct goya_device *goya = hdev->asic_specific;
if (!hl_device_operational(hdev, NULL)) {
count = -ENODEV;
@@ -256,7 +258,7 @@ static ssize_t pm_mng_profile_store(struct device *dev,
mutex_lock(&hdev->fpriv_list_lock);
- if (hdev->compute_ctx) {
+ if (hdev->is_compute_ctx_active) {
dev_err(hdev->dev,
"Can't change PM profile while compute context is opened on the device\n");
count = -EPERM;
@@ -265,26 +267,27 @@ static ssize_t pm_mng_profile_store(struct device *dev,
if (strncmp("auto", buf, strlen("auto")) == 0) {
/* Make sure we are in LOW PLL when changing modes */
- if (hdev->pm_mng_profile == PM_MANUAL) {
- hdev->curr_pll_profile = PLL_HIGH;
- hdev->pm_mng_profile = PM_AUTO;
- hl_device_set_frequency(hdev, PLL_LOW);
+ if (goya->pm_mng_profile == PM_MANUAL) {
+ goya->curr_pll_profile = PLL_HIGH;
+ goya->pm_mng_profile = PM_AUTO;
+ goya_set_frequency(hdev, PLL_LOW);
}
} else if (strncmp("manual", buf, strlen("manual")) == 0) {
- if (hdev->pm_mng_profile == PM_AUTO) {
+ if (goya->pm_mng_profile == PM_AUTO) {
/* Must release the lock because the work thread also
* takes this lock. But before we release it, set
* the mode to manual so nothing will change if a user
* suddenly opens the device
*/
- hdev->pm_mng_profile = PM_MANUAL;
+ goya->pm_mng_profile = PM_MANUAL;
mutex_unlock(&hdev->fpriv_list_lock);
/* Flush the current work so we can return to the user
* knowing that he is the only one changing frequencies
*/
- flush_delayed_work(&hdev->work_freq);
+ if (goya->goya_work)
+ flush_delayed_work(&goya->goya_work->work_freq);
return count;
}
diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h
index ae13231fda94..737c39f33f05 100644
--- a/drivers/misc/habanalabs/include/common/cpucp_if.h
+++ b/drivers/misc/habanalabs/include/common/cpucp_if.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright 2020 HabanaLabs, Ltd.
+ * Copyright 2020-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -376,6 +376,19 @@ enum pq_init_status {
* and QMANs. The f/w will return a bitmask where each bit represents
* a different engine or QMAN according to enum cpucp_idle_mask.
* The bit will be 1 if the engine is NOT idle.
+ *
+ * CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET -
+ * Fetch all HBM replaced-rows and prending to be replaced rows data.
+ *
+ * CPUCP_PACKET_HBM_PENDING_ROWS_STATUS -
+ * Fetch status of HBM rows pending replacement and need a reboot to
+ * be replaced.
+ *
+ * CPUCP_PACKET_POWER_SET -
+ * Resets power history of device to 0
+ *
+ * CPUCP_PACKET_ENGINE_CORE_ASID_SET -
+ * Packet to perform engine core ASID configuration
*/
enum cpucp_packet_id {
@@ -421,6 +434,11 @@ enum cpucp_packet_id {
CPUCP_PACKET_NIC_STAT_REGS_CLR, /* internal */
CPUCP_PACKET_NIC_STAT_REGS_ALL_GET, /* internal */
CPUCP_PACKET_IS_IDLE_CHECK, /* internal */
+ CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET,/* internal */
+ CPUCP_PACKET_HBM_PENDING_ROWS_STATUS, /* internal */
+ CPUCP_PACKET_POWER_SET, /* internal */
+ CPUCP_PACKET_RESERVED, /* not used */
+ CPUCP_PACKET_ENGINE_CORE_ASID_SET, /* internal */
};
#define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5
@@ -480,7 +498,14 @@ struct cpucp_packet {
__u8 i2c_bus;
__u8 i2c_addr;
__u8 i2c_reg;
- __u8 pad; /* unused */
+ /*
+ * In legacy implemetations, i2c_len was not present,
+ * was unused and just added as pad.
+ * So if i2c_len is 0, it is treated as legacy
+ * and r/w 1 Byte, else if i2c_len is specified,
+ * its treated as new multibyte r/w support.
+ */
+ __u8 i2c_len;
};
struct {/* For PLL info fetch */
@@ -688,6 +713,7 @@ struct eq_generic_event {
#define CPUCP_MAX_NIC_LANES (CPUCP_MAX_NICS * CPUCP_LANES_PER_NIC)
#define CPUCP_NIC_MASK_ARR_LEN ((CPUCP_MAX_NICS + 63) / 64)
#define CPUCP_NIC_POLARITY_ARR_LEN ((CPUCP_MAX_NIC_LANES + 63) / 64)
+#define CPUCP_HBM_ROW_REPLACE_MAX 32
struct cpucp_sensor {
__le32 type;
@@ -740,6 +766,7 @@ struct cpucp_security_info {
* @fuse_version: silicon production FUSE information.
* @thermal_version: thermald S/W version.
* @cpucp_version: CpuCP S/W version.
+ * @infineon_second_stage_version: Infineon 2nd stage DC-DC version.
* @dram_size: available DRAM size.
* @card_name: card name that will be displayed in HWMON subsystem on the host
* @sec_info: security information
@@ -749,6 +776,10 @@ struct cpucp_security_info {
* @dram_binning_mask: DRAM binning mask, 1 bit per dram instance
* (0 = functional 1 = binned)
* @memory_repair_flag: eFuse flag indicating memory repair
+ * @edma_binning_mask: EDMA binning mask, 1 bit per EDMA instance
+ * (0 = functional 1 = binned)
+ * @xbar_binning_mask: Xbar binning mask, 1 bit per Xbar instance
+ * (0 = functional 1 = binned)
*/
struct cpucp_info {
struct cpucp_sensor sensors[CPUCP_MAX_SENSORS];
@@ -761,7 +792,7 @@ struct cpucp_info {
__u8 fuse_version[VERSION_MAX_LEN];
__u8 thermal_version[VERSION_MAX_LEN];
__u8 cpucp_version[VERSION_MAX_LEN];
- __le32 reserved2;
+ __le32 infineon_second_stage_version;
__le64 dram_size;
char card_name[CARD_NAME_MAX_LEN];
__le64 reserved3;
@@ -769,7 +800,9 @@ struct cpucp_info {
__u8 reserved5;
__u8 dram_binning_mask;
__u8 memory_repair_flag;
- __u8 pad[5];
+ __u8 edma_binning_mask;
+ __u8 xbar_binning_mask;
+ __u8 pad[3];
struct cpucp_security_info sec_info;
__le32 reserved6;
__u8 pll_map[PLL_MAP_LEN];
@@ -833,4 +866,25 @@ struct cpucp_nic_status {
__le32 high_ber_cnt;
};
+enum cpucp_hbm_row_replace_cause {
+ REPLACE_CAUSE_DOUBLE_ECC_ERR,
+ REPLACE_CAUSE_MULTI_SINGLE_ECC_ERR,
+};
+
+struct cpucp_hbm_row_info {
+ __u8 hbm_idx;
+ __u8 pc;
+ __u8 sid;
+ __u8 bank_idx;
+ __le16 row_addr;
+ __u8 replaced_row_cause; /* enum cpucp_hbm_row_replace_cause */
+ __u8 pad;
+};
+
+struct cpucp_hbm_row_replaced_rows_info {
+ __le16 num_replaced_rows;
+ __u8 pad[6];
+ struct cpucp_hbm_row_info replaced_rows[CPUCP_HBM_ROW_REPLACE_MAX];
+};
+
#endif /* CPUCP_IF_H */
diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index 2626df6ef3ef..135e21d6edc9 100644
--- a/drivers/misc/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -32,6 +32,7 @@ enum cpu_boot_err {
CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL = 13,
CPU_BOOT_ERR_BOOT_FW_CRIT_ERR = 18,
CPU_BOOT_ERR_BINNING_FAIL = 19,
+ CPU_BOOT_ERR_TPM_FAIL = 20,
CPU_BOOT_ERR_ENABLED = 31,
CPU_BOOT_ERR_SCND_EN = 63,
CPU_BOOT_ERR_LAST = 64 /* we have 2 registers of 32 bits */
@@ -108,6 +109,8 @@ enum cpu_boot_err {
* malfunctioning components might still be
* in use.
*
+ * CPU_BOOT_ERR0_TPM_FAIL TPM verification flow failed.
+ *
* CPU_BOOT_ERR0_ENABLED Error registers enabled.
* This is a main indication that the
* running FW populates the error
@@ -130,6 +133,7 @@ enum cpu_boot_err {
#define CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL)
#define CPU_BOOT_ERR0_BOOT_FW_CRIT_ERR (1 << CPU_BOOT_ERR_BOOT_FW_CRIT_ERR)
#define CPU_BOOT_ERR0_BINNING_FAIL (1 << CPU_BOOT_ERR_BINNING_FAIL)
+#define CPU_BOOT_ERR0_TPM_FAIL (1 << CPU_BOOT_ERR_TPM_FAIL)
#define CPU_BOOT_ERR0_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
#define CPU_BOOT_ERR1_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
index dedf20e8f956..758f246627f8 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
@@ -16,27 +16,18 @@
#define PAGE_PRESENT_MASK 0x0000000000001ull
#define SWAP_OUT_MASK 0x0000000000004ull
#define LAST_MASK 0x0000000000800ull
-#define HOP0_MASK 0x3000000000000ull
-#define HOP1_MASK 0x0FF8000000000ull
-#define HOP2_MASK 0x0007FC0000000ull
-#define HOP3_MASK 0x000003FE00000ull
-#define HOP4_MASK 0x00000001FF000ull
#define FLAGS_MASK 0x0000000000FFFull
-#define HOP0_SHIFT 48
-#define HOP1_SHIFT 39
-#define HOP2_SHIFT 30
-#define HOP3_SHIFT 21
-#define HOP4_SHIFT 12
-
#define MMU_ARCH_5_HOPS 5
#define HOP_PHYS_ADDR_MASK (~FLAGS_MASK)
#define HL_PTE_SIZE sizeof(u64)
-#define HOP_TABLE_SIZE PAGE_SIZE_4KB
-#define PTE_ENTRIES_IN_HOP (HOP_TABLE_SIZE / HL_PTE_SIZE)
-#define HOP0_TABLES_TOTAL_SIZE (HOP_TABLE_SIZE * MAX_ASID)
+
+/* definitions for HOP with 512 PTE entries */
+#define HOP_PTE_ENTRIES_512 512
+#define HOP_TABLE_SIZE_512_PTE (HOP_PTE_ENTRIES_512 * HL_PTE_SIZE)
+#define HOP0_512_PTE_TABLES_TOTAL_SIZE (HOP_TABLE_SIZE_512_PTE * MAX_ASID)
#define MMU_HOP0_PA43_12_SHIFT 12
#define MMU_HOP0_PA49_44_SHIFT (12 + 32)
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h
index 8539dd041f2c..86511002e367 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h
@@ -8,8 +8,20 @@
#ifndef INCLUDE_MMU_V1_0_H_
#define INCLUDE_MMU_V1_0_H_
-#define MMU_HOP0_PA43_12 0x490004
-#define MMU_HOP0_PA49_44 0x490008
-#define MMU_ASID_BUSY 0x490000
+#define MMU_V1_0_HOP0_MASK 0x3000000000000ull
+#define MMU_V1_0_HOP1_MASK 0x0FF8000000000ull
+#define MMU_V1_0_HOP2_MASK 0x0007FC0000000ull
+#define MMU_V1_0_HOP3_MASK 0x000003FE00000ull
+#define MMU_V1_0_HOP4_MASK 0x00000001FF000ull
+
+#define MMU_V1_0_HOP0_SHIFT 48
+#define MMU_V1_0_HOP1_SHIFT 39
+#define MMU_V1_0_HOP2_SHIFT 30
+#define MMU_V1_0_HOP3_SHIFT 21
+#define MMU_V1_0_HOP4_SHIFT 12
+
+#define MMU_HOP0_PA43_12 0x490004
+#define MMU_HOP0_PA49_44 0x490008
+#define MMU_ASID_BUSY 0x490000
#endif /* INCLUDE_MMU_V1_0_H_ */
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h
index b2a9570583ac..9c727a5d47b4 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h
@@ -8,9 +8,21 @@
#ifndef INCLUDE_MMU_V1_1_H_
#define INCLUDE_MMU_V1_1_H_
-#define MMU_ASID 0xC12004
-#define MMU_HOP0_PA43_12 0xC12008
-#define MMU_HOP0_PA49_44 0xC1200C
-#define MMU_BUSY 0xC12000
+#define MMU_V1_1_HOP0_MASK 0x3000000000000ull
+#define MMU_V1_1_HOP1_MASK 0x0FF8000000000ull
+#define MMU_V1_1_HOP2_MASK 0x0007FC0000000ull
+#define MMU_V1_1_HOP3_MASK 0x000003FE00000ull
+#define MMU_V1_1_HOP4_MASK 0x00000001FF000ull
+
+#define MMU_V1_1_HOP0_SHIFT 48
+#define MMU_V1_1_HOP1_SHIFT 39
+#define MMU_V1_1_HOP2_SHIFT 30
+#define MMU_V1_1_HOP3_SHIFT 21
+#define MMU_V1_1_HOP4_SHIFT 12
+
+#define MMU_ASID 0xC12004
+#define MMU_HOP0_PA43_12 0xC12008
+#define MMU_HOP0_PA49_44 0xC1200C
+#define MMU_BUSY 0xC12000
#endif /* INCLUDE_MMU_V1_1_H_ */
diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c
index 0f54730c7ed5..98828030b5a4 100644
--- a/drivers/misc/lattice-ecp3-config.c
+++ b/drivers/misc/lattice-ecp3-config.c
@@ -76,12 +76,12 @@ static void firmware_load(const struct firmware *fw, void *context)
if (fw == NULL) {
dev_err(&spi->dev, "Cannot load firmware, aborting\n");
- return;
+ goto out;
}
if (fw->size == 0) {
dev_err(&spi->dev, "Error: Firmware size is 0!\n");
- return;
+ goto out;
}
/* Fill dummy data (24 stuffing bits for commands) */
@@ -103,7 +103,7 @@ static void firmware_load(const struct firmware *fw, void *context)
dev_err(&spi->dev,
"Error: No supported FPGA detected (JEDEC_ID=%08x)!\n",
jedec_id);
- return;
+ goto out;
}
dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name);
@@ -116,7 +116,7 @@ static void firmware_load(const struct firmware *fw, void *context)
buffer = kzalloc(fw->size + 8, GFP_KERNEL);
if (!buffer) {
dev_err(&spi->dev, "Error: Can't allocate memory!\n");
- return;
+ goto out;
}
/*
@@ -155,7 +155,7 @@ static void firmware_load(const struct firmware *fw, void *context)
"Error: Timeout waiting for FPGA to clear (status=%08x)!\n",
status);
kfree(buffer);
- return;
+ goto out;
}
dev_info(&spi->dev, "Configuring the FPGA...\n");
@@ -181,7 +181,7 @@ static void firmware_load(const struct firmware *fw, void *context)
release_firmware(fw);
kfree(buffer);
-
+out:
complete(&data->fw_loaded);
}
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index aa12097668d3..2e0aa74ac185 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -11,7 +11,7 @@ lkdtm-$(CONFIG_LKDTM) += usercopy.o
lkdtm-$(CONFIG_LKDTM) += stackleak.o
lkdtm-$(CONFIG_LKDTM) += cfi.o
lkdtm-$(CONFIG_LKDTM) += fortify.o
-lkdtm-$(CONFIG_PPC_BOOK3S_64) += powerpc.o
+lkdtm-$(CONFIG_PPC_64S_HASH_MMU) += powerpc.o
KASAN_SANITIZE_rodata.o := n
KASAN_SANITIZE_stackleak.o := n
@@ -20,7 +20,7 @@ CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO)
OBJCOPYFLAGS :=
OBJCOPYFLAGS_rodata_objcopy.o := \
- --rename-section .noinstr.text=.rodata,alloc,readonly,load
+ --rename-section .noinstr.text=.rodata,alloc,readonly,load,contents
targets += rodata.o rodata_objcopy.o
$(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE
$(call if_changed,objcopy)
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index f4cb94a9aa9c..f21854ac5cc2 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -41,20 +41,22 @@ static DEFINE_SPINLOCK(lock_me_up);
* Make sure compiler does not optimize this function or stack frame away:
* - function marked noinline
* - stack variables are marked volatile
- * - stack variables are written (memset()) and read (pr_info())
- * - function has external effects (pr_info())
- * */
+ * - stack variables are written (memset()) and read (buf[..] passed as arg)
+ * - function may have external effects (memzero_explicit())
+ * - no tail recursion possible
+ */
static int noinline recursive_loop(int remaining)
{
volatile char buf[REC_STACK_SIZE];
+ volatile int ret;
memset((void *)buf, remaining & 0xFF, sizeof(buf));
- pr_info("loop %d/%d ...\n", (int)buf[remaining % sizeof(buf)],
- recur_count);
if (!remaining)
- return 0;
+ ret = 0;
else
- return recursive_loop(remaining - 1);
+ ret = recursive_loop((int)buf[remaining % sizeof(buf)] - 1);
+ memzero_explicit((void *)buf, sizeof(buf));
+ return ret;
}
/* If the depth is negative, use the default, otherwise keep parameter. */
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 609d9ee2acc0..f69b964b9952 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -182,7 +182,7 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(FORTIFIED_SUBOBJECT),
CRASHTYPE(FORTIFIED_STRSCPY),
CRASHTYPE(DOUBLE_FAULT),
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
CRASHTYPE(PPC_SLB_MULTIHIT),
#endif
};
@@ -212,7 +212,11 @@ module_param(cpoint_count, int, 0644);
MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
"crash point is to be hit to trigger action");
-/* For test debug reporting. */
+/*
+ * For test debug reporting when CI systems provide terse summaries.
+ * TODO: Remove this once reasonable reporting exists in most CI systems:
+ * https://lore.kernel.org/lkml/CAHk-=wiFvfkoFixTapvvyPMN9pq5G-+Dys2eSyBa1vzDGAO5+A@mail.gmail.com
+ */
char *lkdtm_kernel_info;
/* Return the crashtype number or NULL if the name is invalid */
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 44bac4ad687c..46aa3554e97b 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -643,6 +643,64 @@ static void mei_cl_bus_vtag_free(struct mei_cl_device *cldev)
kfree(cl_vtag);
}
+void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size)
+{
+ struct mei_device *bus;
+ struct mei_cl *cl;
+ int ret;
+
+ if (!cldev || !buffer_id || !size)
+ return ERR_PTR(-EINVAL);
+
+ if (!IS_ALIGNED(size, MEI_FW_PAGE_SIZE)) {
+ dev_err(&cldev->dev, "Map size should be aligned to %lu\n",
+ MEI_FW_PAGE_SIZE);
+ return ERR_PTR(-EINVAL);
+ }
+
+ cl = cldev->cl;
+ bus = cldev->bus;
+
+ mutex_lock(&bus->device_lock);
+ if (cl->state == MEI_FILE_UNINITIALIZED) {
+ ret = mei_cl_link(cl);
+ if (ret)
+ goto out;
+ /* update pointers */
+ cl->cldev = cldev;
+ }
+
+ ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size);
+out:
+ mutex_unlock(&bus->device_lock);
+ if (ret)
+ return ERR_PTR(ret);
+ return cl->dma.vaddr;
+}
+EXPORT_SYMBOL_GPL(mei_cldev_dma_map);
+
+int mei_cldev_dma_unmap(struct mei_cl_device *cldev)
+{
+ struct mei_device *bus;
+ struct mei_cl *cl;
+ int ret;
+
+ if (!cldev)
+ return -EINVAL;
+
+ cl = cldev->cl;
+ bus = cldev->bus;
+
+ mutex_lock(&bus->device_lock);
+ ret = mei_cl_dma_unmap(cl, NULL);
+
+ mei_cl_flush_queues(cl, NULL);
+ mei_cl_unlink(cl);
+ mutex_unlock(&bus->device_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mei_cldev_dma_unmap);
+
/**
* mei_cldev_enable - enable me client device
* create connection with me client
@@ -753,9 +811,11 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
dev_err(bus->dev, "Could not disconnect from the ME client\n");
out:
- /* Flush queues and remove any pending read */
- mei_cl_flush_queues(cl, NULL);
- mei_cl_unlink(cl);
+ /* Flush queues and remove any pending read unless we have mapped DMA */
+ if (!cl->dma_mapped) {
+ mei_cl_flush_queues(cl, NULL);
+ mei_cl_unlink(cl);
+ }
mutex_unlock(&bus->device_lock);
return err;
@@ -1052,6 +1112,7 @@ static void mei_cl_bus_dev_release(struct device *dev)
if (!cldev)
return;
+ mei_cl_flush_queues(cldev->cl, NULL);
mei_me_cl_put(cldev->me_cl);
mei_dev_bus_put(cldev->bus);
mei_cl_unlink(cldev->cl);
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 96f4e59c32a5..06734670a732 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -700,6 +700,9 @@ int mei_cl_unlink(struct mei_cl *cl)
cl_dbg(dev, cl, "unlink client");
+ if (cl->state == MEI_FILE_UNINITIALIZED)
+ return 0;
+
if (dev->open_handle_count > 0)
dev->open_handle_count--;
@@ -2327,6 +2330,8 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
list_move_tail(&cb->list, &dev->ctrl_rd_list);
}
+ cl->status = 0;
+
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
cl->dma_mapped || cl->status,
@@ -2404,6 +2409,8 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
list_move_tail(&cb->list, &dev->ctrl_rd_list);
}
+ cl->status = 0;
+
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
!cl->dma_mapped || cl->status,
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index be41843df75b..cebcca6d6d3e 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -672,10 +672,14 @@ static void mei_hbm_cl_dma_map_res(struct mei_device *dev,
if (!cl)
return;
- dev_dbg(dev->dev, "cl dma map result = %d\n", res->status);
- cl->status = res->status;
- if (!cl->status)
+ if (res->status) {
+ dev_err(dev->dev, "cl dma map failed %d\n", res->status);
+ cl->status = -EFAULT;
+ } else {
+ dev_dbg(dev->dev, "cl dma map succeeded\n");
cl->dma_mapped = 1;
+ cl->status = 0;
+ }
wake_up(&cl->wait);
}
@@ -698,10 +702,14 @@ static void mei_hbm_cl_dma_unmap_res(struct mei_device *dev,
if (!cl)
return;
- dev_dbg(dev->dev, "cl dma unmap result = %d\n", res->status);
- cl->status = res->status;
- if (!cl->status)
+ if (res->status) {
+ dev_err(dev->dev, "cl dma unmap failed %d\n", res->status);
+ cl->status = -EFAULT;
+ } else {
+ dev_dbg(dev->dev, "cl dma unmap succeeded\n");
cl->dma_mapped = 0;
+ cl->status = 0;
+ }
wake_up(&cl->wait);
}
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index a4e854b9b9e6..00652c137cc7 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -994,11 +994,7 @@ static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
hhisr &= ~IPC_HHIER_SEC;
}
- generated = generated ||
- (hisr & HISR_INT_STS_MSK) ||
- (ipc_isr & SEC_IPC_HOST_INT_STATUS_PENDING);
-
- if (generated && do_ack) {
+ if (do_ack) {
/* Save the interrupt causes */
hw->intr_cause |= hisr & HISR_INT_STS_MSK;
if (ipc_isr & SEC_IPC_HOST_INT_STATUS_IN_RDY)
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index dfd60c916da0..b46077b17114 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -23,6 +23,11 @@
#define MEI_HBM_TIMEOUT 1 /* 1 second */
/*
+ * FW page size for DMA allocations
+ */
+#define MEI_FW_PAGE_SIZE 4096UL
+
+/*
* MEI Version
*/
#define HBM_MINOR_VERSION 2
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 5c8cb679b997..f79076c67256 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -24,6 +24,7 @@ const char *mei_dev_state_str(int state)
MEI_DEV_STATE(ENABLED);
MEI_DEV_STATE(RESETTING);
MEI_DEV_STATE(DISABLED);
+ MEI_DEV_STATE(POWERING_DOWN);
MEI_DEV_STATE(POWER_DOWN);
MEI_DEV_STATE(POWER_UP);
default:
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index e70525eedaae..d881f5e40ad9 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -74,7 +74,6 @@ static long afu_ioctl_attach(struct ocxl_context *ctx,
{
struct ocxl_ioctl_attach arg;
u64 amr = 0;
- int rc;
pr_debug("%s for context %d\n", __func__, ctx->pasid);
@@ -86,8 +85,7 @@ static long afu_ioctl_attach(struct ocxl_context *ctx,
return -EINVAL;
amr = arg.amr & mfspr(SPRN_UAMOR);
- rc = ocxl_context_attach(ctx, amr, current->mm);
- return rc;
+ return ocxl_context_attach(ctx, amr, current->mm);
}
static long afu_ioctl_get_metadata(struct ocxl_context *ctx,
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 2ed7e3aaff3a..8f786a225dcf 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -865,7 +865,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
goto err_release_irq;
}
misc_device->parent = &pdev->dev;
- misc_device->fops = &pci_endpoint_test_fops,
+ misc_device->fops = &pci_endpoint_test_fops;
err = misc_register(misc_device);
if (err) {
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index 4c26b19f5154..f0e7f02605eb 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -371,6 +371,7 @@ static const struct of_device_id sram_dt_ids[] = {
{ .compatible = "atmel,sama5d2-securam", .data = &atmel_securam_config },
{ .compatible = "nvidia,tegra186-sysram", .data = &tegra_sysram_config },
{ .compatible = "nvidia,tegra194-sysram", .data = &tegra_sysram_config },
+ { .compatible = "nvidia,tegra234-sysram", .data = &tegra_sysram_config },
{}
};
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index 488eeb2811ae..281c54003edc 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -289,7 +289,7 @@ static ssize_t api_show(struct device *dev,
{
struct uacce_device *uacce = to_uacce_device(dev);
- return sprintf(buf, "%s\n", uacce->api_ver);
+ return sysfs_emit(buf, "%s\n", uacce->api_ver);
}
static ssize_t flags_show(struct device *dev,
@@ -297,7 +297,7 @@ static ssize_t flags_show(struct device *dev,
{
struct uacce_device *uacce = to_uacce_device(dev);
- return sprintf(buf, "%u\n", uacce->flags);
+ return sysfs_emit(buf, "%u\n", uacce->flags);
}
static ssize_t available_instances_show(struct device *dev,
@@ -309,7 +309,7 @@ static ssize_t available_instances_show(struct device *dev,
if (!uacce->ops->get_available_instances)
return -ENODEV;
- return sprintf(buf, "%d\n",
+ return sysfs_emit(buf, "%d\n",
uacce->ops->get_available_instances(uacce));
}
@@ -318,7 +318,7 @@ static ssize_t algorithms_show(struct device *dev,
{
struct uacce_device *uacce = to_uacce_device(dev);
- return sprintf(buf, "%s\n", uacce->algs);
+ return sysfs_emit(buf, "%s\n", uacce->algs);
}
static ssize_t region_mmio_size_show(struct device *dev,
@@ -326,7 +326,7 @@ static ssize_t region_mmio_size_show(struct device *dev,
{
struct uacce_device *uacce = to_uacce_device(dev);
- return sprintf(buf, "%lu\n",
+ return sysfs_emit(buf, "%lu\n",
uacce->qf_pg_num[UACCE_QFRT_MMIO] << PAGE_SHIFT);
}
@@ -335,7 +335,7 @@ static ssize_t region_dus_size_show(struct device *dev,
{
struct uacce_device *uacce = to_uacce_device(dev);
- return sprintf(buf, "%lu\n",
+ return sysfs_emit(buf, "%lu\n",
uacce->qf_pg_num[UACCE_QFRT_DUS] << PAGE_SHIFT);
}
diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c
index c0b5e339d5a1..6cf3e21c7604 100644
--- a/drivers/misc/vmw_vmci/vmci_context.c
+++ b/drivers/misc/vmw_vmci/vmci_context.c
@@ -687,10 +687,8 @@ int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid)
}
spin_unlock(&context->lock);
- if (found) {
- synchronize_rcu();
- kfree(notifier);
- }
+ if (found)
+ kvfree_rcu(notifier);
vmci_ctx_put(context);
diff --git a/drivers/misc/vmw_vmci/vmci_event.c b/drivers/misc/vmw_vmci/vmci_event.c
index e3436abf39f4..2100297c94ad 100644
--- a/drivers/misc/vmw_vmci/vmci_event.c
+++ b/drivers/misc/vmw_vmci/vmci_event.c
@@ -209,8 +209,7 @@ int vmci_event_unsubscribe(u32 sub_id)
if (!s)
return VMCI_ERROR_NOT_FOUND;
- synchronize_rcu();
- kfree(s);
+ kvfree_rcu(s);
return VMCI_SUCCESS;
}
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 90e1bcd03b46..4e61b28a002f 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -264,7 +264,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
goto out_put;
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
- blk_execute_rq(NULL, req, 0);
+ blk_execute_rq(req, false);
ret = req_to_mmc_queue_req(req)->drv_op_result;
blk_mq_free_request(req);
@@ -657,7 +657,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idatas;
req_to_mmc_queue_req(req)->ioc_count = 1;
- blk_execute_rq(NULL, req, 0);
+ blk_execute_rq(req, false);
ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
blk_mq_free_request(req);
@@ -726,7 +726,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idata;
req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
- blk_execute_rq(NULL, req, 0);
+ blk_execute_rq(req, false);
ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
/* copy to user if data and response */
@@ -1837,7 +1837,7 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
/* Reset if the card is in a bad state */
if (!mmc_host_is_spi(mq->card->host) &&
err && mmc_blk_reset(md, card->host, type)) {
- pr_err("%s: recovery failed!\n", req->rq_disk->disk_name);
+ pr_err("%s: recovery failed!\n", req->q->disk->disk_name);
mqrq->retries = MMC_NO_RETRIES;
return;
}
@@ -1908,8 +1908,8 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
cb_data.card = card;
cb_data.status = 0;
- err = __mmc_poll_for_busy(card, MMC_BLK_TIMEOUT_MS, &mmc_blk_busy_cb,
- &cb_data);
+ err = __mmc_poll_for_busy(card->host, MMC_BLK_TIMEOUT_MS,
+ &mmc_blk_busy_cb, &cb_data);
/*
* Do not assume data transferred correctly if there are any error bits
@@ -2051,7 +2051,8 @@ static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
mmc_put_card(mq->card, &mq->ctx);
}
-static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
+static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
+ bool can_sleep)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_request *mrq = &mqrq->brq.mrq;
@@ -2063,10 +2064,14 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
* Block layer timeouts race with completions which means the normal
* completion path cannot be used during recovery.
*/
- if (mq->in_recovery)
+ if (mq->in_recovery) {
mmc_blk_mq_complete_rq(mq, req);
- else if (likely(!blk_should_fake_timeout(req->q)))
- blk_mq_complete_request(req);
+ } else if (likely(!blk_should_fake_timeout(req->q))) {
+ if (can_sleep)
+ blk_mq_complete_request_direct(req, mmc_blk_mq_complete);
+ else
+ blk_mq_complete_request(req);
+ }
mmc_blk_mq_dec_in_flight(mq, req);
}
@@ -2087,7 +2092,7 @@ void mmc_blk_mq_recovery(struct mmc_queue *mq)
mmc_blk_urgent_bkops(mq, mqrq);
- mmc_blk_mq_post_req(mq, req);
+ mmc_blk_mq_post_req(mq, req, true);
}
static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
@@ -2106,7 +2111,7 @@ static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
if (prev_req)
*prev_req = mq->complete_req;
else
- mmc_blk_mq_post_req(mq, mq->complete_req);
+ mmc_blk_mq_post_req(mq, mq->complete_req, true);
mq->complete_req = NULL;
@@ -2178,7 +2183,8 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
mq->rw_wait = false;
wake_up(&mq->wait);
- mmc_blk_mq_post_req(mq, req);
+ /* context unknown */
+ mmc_blk_mq_post_req(mq, req, false);
}
static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
@@ -2238,7 +2244,7 @@ static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
err = mmc_start_request(host, &mqrq->brq.mrq);
if (prev_req)
- mmc_blk_mq_post_req(mq, prev_req);
+ mmc_blk_mq_post_req(mq, prev_req, true);
if (err)
mq->rw_wait = false;
@@ -2395,10 +2401,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
md->disk->private_data = md;
md->parent = parent;
set_disk_ro(md->disk, md->read_only || default_ro);
- md->disk->flags = GENHD_FL_EXT_DEVT;
if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
- md->disk->flags |= GENHD_FL_NO_PART_SCAN
- | GENHD_FL_SUPPRESS_PARTITION_INFO;
+ md->disk->flags |= GENHD_FL_NO_PART;
/*
* As discussed on lkml, GENHD_FL_REMOVABLE should:
@@ -2739,7 +2743,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
if (IS_ERR(req))
return PTR_ERR(req);
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
- blk_execute_rq(NULL, req, 0);
+ blk_execute_rq(req, false);
ret = req_to_mmc_queue_req(req)->drv_op_result;
if (ret >= 0) {
*val = ret;
@@ -2778,7 +2782,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
- blk_execute_rq(NULL, req, 0);
+ blk_execute_rq(req, false);
err = req_to_mmc_queue_req(req)->drv_op_result;
blk_mq_free_request(req);
if (err) {
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index f6b7a9c5bbff..096ae624be9a 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -53,16 +53,6 @@ static struct attribute *mmc_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(mmc_dev);
-/*
- * This currently matches any MMC driver to any MMC card - drivers
- * themselves make the decision whether to drive this card in their
- * probe method.
- */
-static int mmc_bus_match(struct device *dev, struct device_driver *drv)
-{
- return 1;
-}
-
static int
mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
{
@@ -226,7 +216,6 @@ static const struct dev_pm_ops mmc_bus_pm_ops = {
static struct bus_type mmc_bus_type = {
.name = "mmc",
.dev_groups = mmc_dev_groups,
- .match = mmc_bus_match,
.uevent = mmc_bus_uevent,
.probe = mmc_bus_probe,
.remove = mmc_bus_remove,
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
index 7bd392d55cfa..99045e138ba4 100644
--- a/drivers/mmc/core/card.h
+++ b/drivers/mmc/core/card.h
@@ -59,6 +59,9 @@ struct mmc_fixup {
/* for MMC cards */
unsigned int ext_csd_rev;
+ /* Match against functions declared in device tree */
+ const char *of_compatible;
+
void (*vendor_fixup)(struct mmc_card *card, int data);
int data;
};
@@ -119,6 +122,21 @@ struct mmc_fixup {
_vendor, _device, \
_fixup, _data, EXT_CSD_REV_ANY) \
+#define SDIO_FIXUP_COMPATIBLE(_compatible, _fixup, _data) \
+ { \
+ .name = CID_NAME_ANY, \
+ .manfid = CID_MANFID_ANY, \
+ .oemid = CID_OEMID_ANY, \
+ .rev_start = 0, \
+ .rev_end = -1ull, \
+ .cis_vendor = SDIO_ANY_ID, \
+ .cis_device = SDIO_ANY_ID, \
+ .vendor_fixup = (_fixup), \
+ .data = (_data), \
+ .ext_csd_rev = EXT_CSD_REV_ANY, \
+ .of_compatible = _compatible, \
+ }
+
#define cid_rev(hwrev, fwrev, year, month) \
(((u64) hwrev) << 40 | \
((u64) fwrev) << 32 | \
@@ -150,6 +168,24 @@ static inline void __maybe_unused add_limit_rate_quirk(struct mmc_card *card,
card->quirk_max_rate = data;
}
+static inline void __maybe_unused wl1251_quirk(struct mmc_card *card,
+ int data)
+{
+ /*
+ * We have TI wl1251 attached to this mmc. Pass this
+ * information to the SDIO core because it can't be
+ * probed by normal methods.
+ */
+
+ dev_info(card->host->parent, "found wl1251\n");
+ card->quirks |= MMC_QUIRK_NONSTD_SDIO;
+ card->cccr.wide_bus = 1;
+ card->cis.vendor = 0x104c;
+ card->cis.device = 0x9066;
+ card->cis.blksize = 512;
+ card->cis.max_dtr = 24000000;
+}
+
/*
* Quirk add/remove for MMC products.
*/
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index b1c1716dacf0..bbbbcaf70a59 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1962,7 +1962,7 @@ static int mmc_sleep(struct mmc_host *host)
goto out_release;
}
- err = __mmc_poll_for_busy(card, timeout_ms, &mmc_sleep_busy_cb, host);
+ err = __mmc_poll_for_busy(host, timeout_ms, &mmc_sleep_busy_cb, host);
out_release:
mmc_retune_release(host);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 0c54858e89c0..d63d1c735335 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -58,6 +58,12 @@ struct mmc_busy_data {
enum mmc_busy_cmd busy_cmd;
};
+struct mmc_op_cond_busy_data {
+ struct mmc_host *host;
+ u32 ocr;
+ struct mmc_command *cmd;
+};
+
int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
{
int err;
@@ -173,43 +179,62 @@ int mmc_go_idle(struct mmc_host *host)
return err;
}
+static int __mmc_send_op_cond_cb(void *cb_data, bool *busy)
+{
+ struct mmc_op_cond_busy_data *data = cb_data;
+ struct mmc_host *host = data->host;
+ struct mmc_command *cmd = data->cmd;
+ u32 ocr = data->ocr;
+ int err = 0;
+
+ err = mmc_wait_for_cmd(host, cmd, 0);
+ if (err)
+ return err;
+
+ if (mmc_host_is_spi(host)) {
+ if (!(cmd->resp[0] & R1_SPI_IDLE)) {
+ *busy = false;
+ return 0;
+ }
+ } else {
+ if (cmd->resp[0] & MMC_CARD_BUSY) {
+ *busy = false;
+ return 0;
+ }
+ }
+
+ *busy = true;
+
+ /*
+ * According to eMMC specification v5.1 section 6.4.3, we
+ * should issue CMD1 repeatedly in the idle state until
+ * the eMMC is ready. Otherwise some eMMC devices seem to enter
+ * the inactive mode after mmc_init_card() issued CMD0 when
+ * the eMMC device is busy.
+ */
+ if (!ocr && !mmc_host_is_spi(host))
+ cmd->arg = cmd->resp[0] | BIT(30);
+
+ return 0;
+}
+
int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
{
struct mmc_command cmd = {};
- int i, err = 0;
+ int err = 0;
+ struct mmc_op_cond_busy_data cb_data = {
+ .host = host,
+ .ocr = ocr,
+ .cmd = &cmd
+ };
cmd.opcode = MMC_SEND_OP_COND;
cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
- for (i = 100; i; i--) {
- err = mmc_wait_for_cmd(host, &cmd, 0);
- if (err)
- break;
-
- /* wait until reset completes */
- if (mmc_host_is_spi(host)) {
- if (!(cmd.resp[0] & R1_SPI_IDLE))
- break;
- } else {
- if (cmd.resp[0] & MMC_CARD_BUSY)
- break;
- }
-
- err = -ETIMEDOUT;
-
- mmc_delay(10);
-
- /*
- * According to eMMC specification v5.1 section 6.4.3, we
- * should issue CMD1 repeatedly in the idle state until
- * the eMMC is ready. Otherwise some eMMC devices seem to enter
- * the inactive mode after mmc_init_card() issued CMD0 when
- * the eMMC device is busy.
- */
- if (!ocr && !mmc_host_is_spi(host))
- cmd.arg = cmd.resp[0] | BIT(30);
- }
+ err = __mmc_poll_for_busy(host, 1000, &__mmc_send_op_cond_cb, &cb_data);
+ if (err)
+ return err;
if (rocr && !mmc_host_is_spi(host))
*rocr = cmd.resp[0];
@@ -470,11 +495,10 @@ static int mmc_busy_cb(void *cb_data, bool *busy)
return 0;
}
-int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
+int __mmc_poll_for_busy(struct mmc_host *host, unsigned int timeout_ms,
int (*busy_cb)(void *cb_data, bool *busy),
void *cb_data)
{
- struct mmc_host *host = card->host;
int err;
unsigned long timeout;
unsigned int udelay = 32, udelay_max = 32768;
@@ -515,13 +539,14 @@ EXPORT_SYMBOL_GPL(__mmc_poll_for_busy);
int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
{
+ struct mmc_host *host = card->host;
struct mmc_busy_data cb_data;
cb_data.card = card;
cb_data.retry_crc_err = retry_crc_err;
cb_data.busy_cmd = busy_cmd;
- return __mmc_poll_for_busy(card, timeout_ms, &mmc_busy_cb, &cb_data);
+ return __mmc_poll_for_busy(host, timeout_ms, &mmc_busy_cb, &cb_data);
}
EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index e5e94567a9a9..9c813b851d0b 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -41,7 +41,7 @@ int mmc_can_ext_csd(struct mmc_card *card);
int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
unsigned int timeout_ms);
-int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
+int __mmc_poll_for_busy(struct mmc_host *host, unsigned int timeout_ms,
int (*busy_cb)(void *cb_data, bool *busy),
void *cb_data);
int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index ea4d3670560e..988467fbb621 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -54,7 +54,7 @@ static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
gpiod_set_array_value_cansleep(nvalues, reset_gpios->desc,
reset_gpios->info, values);
- kfree(values);
+ bitmap_free(values);
}
}
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index b15c034b42fb..c69b2d9df6f1 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -234,7 +234,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
enum mmc_issue_type issue_type;
enum mmc_issued issued;
bool get_card, cqe_retune_ok;
- int ret;
+ blk_status_t ret;
if (mmc_card_removed(mq->card)) {
req->rq_flags |= RQF_QUIET;
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index d68e6e513a4f..20f568727277 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -10,6 +10,7 @@
*
*/
+#include <linux/of.h>
#include <linux/mmc/sdio_ids.h>
#include "card.h"
@@ -145,6 +146,25 @@ static const struct mmc_fixup __maybe_unused sdio_fixup_methods[] = {
END_FIXUP
};
+static const struct mmc_fixup __maybe_unused sdio_card_init_methods[] = {
+ SDIO_FIXUP_COMPATIBLE("ti,wl1251", wl1251_quirk, 0),
+
+ END_FIXUP
+};
+
+static inline bool mmc_fixup_of_compatible_match(struct mmc_card *card,
+ const char *compatible)
+{
+ struct device_node *np;
+
+ for_each_child_of_node(mmc_dev(card->host)->of_node, np) {
+ if (of_device_is_compatible(np, compatible))
+ return true;
+ }
+
+ return false;
+}
+
static inline void mmc_fixup_device(struct mmc_card *card,
const struct mmc_fixup *table)
{
@@ -152,22 +172,32 @@ static inline void mmc_fixup_device(struct mmc_card *card,
u64 rev = cid_rev_card(card);
for (f = table; f->vendor_fixup; f++) {
- if ((f->manfid == CID_MANFID_ANY ||
- f->manfid == card->cid.manfid) &&
- (f->oemid == CID_OEMID_ANY ||
- f->oemid == card->cid.oemid) &&
- (f->name == CID_NAME_ANY ||
- !strncmp(f->name, card->cid.prod_name,
- sizeof(card->cid.prod_name))) &&
- (f->cis_vendor == card->cis.vendor ||
- f->cis_vendor == (u16) SDIO_ANY_ID) &&
- (f->cis_device == card->cis.device ||
- f->cis_device == (u16) SDIO_ANY_ID) &&
- (f->ext_csd_rev == EXT_CSD_REV_ANY ||
- f->ext_csd_rev == card->ext_csd.rev) &&
- rev >= f->rev_start && rev <= f->rev_end) {
- dev_dbg(&card->dev, "calling %ps\n", f->vendor_fixup);
- f->vendor_fixup(card, f->data);
- }
+ if (f->manfid != CID_MANFID_ANY &&
+ f->manfid != card->cid.manfid)
+ continue;
+ if (f->oemid != CID_OEMID_ANY &&
+ f->oemid != card->cid.oemid)
+ continue;
+ if (f->name != CID_NAME_ANY &&
+ strncmp(f->name, card->cid.prod_name,
+ sizeof(card->cid.prod_name)))
+ continue;
+ if (f->cis_vendor != (u16)SDIO_ANY_ID &&
+ f->cis_vendor != card->cis.vendor)
+ continue;
+ if (f->cis_device != (u16)SDIO_ANY_ID &&
+ f->cis_device != card->cis.device)
+ continue;
+ if (f->ext_csd_rev != EXT_CSD_REV_ANY &&
+ f->ext_csd_rev != card->ext_csd.rev)
+ continue;
+ if (rev < f->rev_start || rev > f->rev_end)
+ continue;
+ if (f->of_compatible &&
+ !mmc_fixup_of_compatible_match(card, f->of_compatible))
+ continue;
+
+ dev_dbg(&card->dev, "calling %ps\n", f->vendor_fixup);
+ f->vendor_fixup(card, f->data);
}
}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index c9db24e16af1..45f578793980 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1666,7 +1666,7 @@ static int sd_poweroff_notify(struct mmc_card *card)
cb_data.card = card;
cb_data.reg_buf = reg_buf;
- err = __mmc_poll_for_busy(card, SD_POWEROFF_NOTIFY_TIMEOUT_MS,
+ err = __mmc_poll_for_busy(card->host, SD_POWEROFF_NOTIFY_TIMEOUT_MS,
&sd_busy_poweroff_notify_cb, &cb_data);
out:
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 68edf7a615be..41164748723d 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -707,6 +707,9 @@ try_again:
*/
if (host->ops->init_card)
host->ops->init_card(host, card);
+ mmc_fixup_device(card, sdio_card_init_methods);
+
+ card->ocr = ocr_card;
/*
* If the host and card support UHS-I mode request the card
@@ -820,7 +823,7 @@ try_again:
goto mismatch;
}
}
- card->ocr = ocr_card;
+
mmc_fixup_device(card, sdio_fixup_methods);
if (card->type == MMC_TYPE_SD_COMBO) {
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 5af8494c31b5..52b0b27a6839 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -966,6 +966,7 @@ config MMC_REALTEK_USB
config MMC_SUNXI
tristate "Allwinner sunxi SD/MMC Host Controller support"
depends on ARCH_SUNXI || COMPILE_TEST
+ depends on SUNXI_CCU
help
This selects support for the SD/MMC Host Controller on
Allwinner sunxi SoCs.
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 0acc237843f7..a9a0837153d8 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -969,8 +969,10 @@ static int au1xmmc_probe(struct platform_device *pdev)
}
host->irq = platform_get_irq(pdev, 0);
- if (host->irq < 0)
+ if (host->irq < 0) {
+ ret = host->irq;
goto out3;
+ }
mmc->ops = &au1xmmc_ops;
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 8c2361e66277..463b707d9e99 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1293,14 +1293,12 @@ static int bcm2835_add_host(struct bcm2835_host *host)
host->dma_cfg_tx.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
host->dma_cfg_tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- host->dma_cfg_tx.slave_id = 13; /* DREQ channel */
host->dma_cfg_tx.direction = DMA_MEM_TO_DEV;
host->dma_cfg_tx.src_addr = 0;
host->dma_cfg_tx.dst_addr = host->phys_addr + SDDATA;
host->dma_cfg_rx.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
host->dma_cfg_rx.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- host->dma_cfg_rx.slave_id = 13; /* DREQ channel */
host->dma_cfg_rx.direction = DMA_DEV_TO_MEM;
host->dma_cfg_rx.src_addr = host->phys_addr + SDDATA;
host->dma_cfg_rx.dst_addr = 0;
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index c2dd29ef45c6..ca5be4445ae0 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -28,6 +28,7 @@ enum dw_mci_exynos_type {
DW_MCI_TYPE_EXYNOS5420_SMU,
DW_MCI_TYPE_EXYNOS7,
DW_MCI_TYPE_EXYNOS7_SMU,
+ DW_MCI_TYPE_ARTPEC8,
};
/* Exynos implementation specific driver private data */
@@ -69,6 +70,9 @@ static struct dw_mci_exynos_compatible {
}, {
.compatible = "samsung,exynos7-dw-mshc-smu",
.ctrl_type = DW_MCI_TYPE_EXYNOS7_SMU,
+ }, {
+ .compatible = "axis,artpec8-dw-mshc",
+ .ctrl_type = DW_MCI_TYPE_ARTPEC8,
},
};
@@ -81,7 +85,8 @@ static inline u8 dw_mci_exynos_get_ciu_div(struct dw_mci *host)
else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210)
return EXYNOS4210_FIXED_CIU_CLK_DIV;
else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL64)) + 1;
else
return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL)) + 1;
@@ -122,6 +127,11 @@ static int dw_mci_exynos_priv_init(struct dw_mci *host)
DQS_CTRL_GET_RD_DELAY(priv->saved_strobe_ctrl);
}
+ if (priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) {
+ /* Quirk needed for the ARTPEC-8 SoC */
+ host->quirks |= DW_MMC_QUIRK_EXTENDED_TMOUT;
+ }
+
host->bus_hz /= (priv->ciu_div + 1);
return 0;
@@ -133,7 +143,8 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
u32 clksel;
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
clksel = mci_readl(host, CLKSEL);
@@ -141,7 +152,8 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
clksel = (clksel & ~SDMMC_CLKSEL_TIMING_MASK) | timing;
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
mci_writel(host, CLKSEL, clksel);
@@ -210,14 +222,16 @@ static int dw_mci_exynos_resume_noirq(struct device *dev)
return ret;
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
clksel = mci_readl(host, CLKSEL);
if (clksel & SDMMC_CLKSEL_WAKEUP_INT) {
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
mci_writel(host, CLKSEL, clksel);
@@ -238,7 +252,8 @@ static void dw_mci_exynos_config_hs400(struct dw_mci *host, u32 timing)
* Not supported to configure register
* related to HS400
*/
- if (priv->ctrl_type < DW_MCI_TYPE_EXYNOS5420) {
+ if ((priv->ctrl_type < DW_MCI_TYPE_EXYNOS5420) ||
+ (priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)) {
if (timing == MMC_TIMING_MMC_HS400)
dev_warn(host->dev,
"cannot configure HS400, unsupported chipset\n");
@@ -394,7 +409,8 @@ static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host)
struct dw_mci_exynos_priv_data *priv = host->priv;
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL64));
else
return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL));
@@ -406,13 +422,15 @@ static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample)
struct dw_mci_exynos_priv_data *priv = host->priv;
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
clksel = mci_readl(host, CLKSEL);
clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample);
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
mci_writel(host, CLKSEL, clksel);
@@ -425,7 +443,8 @@ static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
u8 sample;
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
clksel = mci_readl(host, CLKSEL);
@@ -434,7 +453,8 @@ static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample);
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
mci_writel(host, CLKSEL, clksel);
@@ -524,17 +544,65 @@ static int dw_mci_exynos_prepare_hs400_tuning(struct dw_mci *host,
return 0;
}
+static void dw_mci_exynos_set_data_timeout(struct dw_mci *host,
+ unsigned int timeout_ns)
+{
+ u32 clk_div, tmout;
+ u64 tmp;
+ unsigned int tmp2;
+
+ clk_div = (mci_readl(host, CLKDIV) & 0xFF) * 2;
+ if (clk_div == 0)
+ clk_div = 1;
+
+ tmp = DIV_ROUND_UP_ULL((u64)timeout_ns * host->bus_hz, NSEC_PER_SEC);
+ tmp = DIV_ROUND_UP_ULL(tmp, clk_div);
+
+ /* TMOUT[7:0] (RESPONSE_TIMEOUT) */
+ tmout = 0xFF; /* Set maximum */
+
+ /*
+ * Extended HW timer (max = 0x6FFFFF2):
+ * ((TMOUT[10:8] - 1) * 0xFFFFFF + TMOUT[31:11] * 8)
+ */
+ if (!tmp || tmp > 0x6FFFFF2)
+ tmout |= (0xFFFFFF << 8);
+ else {
+ /* TMOUT[10:8] */
+ tmp2 = (((unsigned int)tmp / 0xFFFFFF) + 1) & 0x7;
+ tmout |= tmp2 << 8;
+
+ /* TMOUT[31:11] */
+ tmp = tmp - ((tmp2 - 1) * 0xFFFFFF);
+ tmout |= (tmp & 0xFFFFF8) << 8;
+ }
+
+ mci_writel(host, TMOUT, tmout);
+ dev_dbg(host->dev, "timeout_ns: %u => TMOUT[31:8]: %#08x",
+ timeout_ns, tmout >> 8);
+}
+
+static u32 dw_mci_exynos_get_drto_clks(struct dw_mci *host)
+{
+ u32 drto_clks;
+
+ drto_clks = mci_readl(host, TMOUT) >> 8;
+
+ return (((drto_clks & 0x7) - 1) * 0xFFFFFF) + ((drto_clks & 0xFFFFF8));
+}
+
/* Common capabilities of Exynos4/Exynos5 SoC */
static unsigned long exynos_dwmmc_caps[4] = {
- MMC_CAP_1_8V_DDR | MMC_CAP_8_BIT_DATA | MMC_CAP_CMD23,
- MMC_CAP_CMD23,
- MMC_CAP_CMD23,
- MMC_CAP_CMD23,
+ MMC_CAP_1_8V_DDR | MMC_CAP_8_BIT_DATA,
+ 0,
+ 0,
+ 0,
};
static const struct dw_mci_drv_data exynos_drv_data = {
.caps = exynos_dwmmc_caps,
.num_caps = ARRAY_SIZE(exynos_dwmmc_caps),
+ .common_caps = MMC_CAP_CMD23,
.init = dw_mci_exynos_priv_init,
.set_ios = dw_mci_exynos_set_ios,
.parse_dt = dw_mci_exynos_parse_dt,
@@ -542,6 +610,16 @@ static const struct dw_mci_drv_data exynos_drv_data = {
.prepare_hs400_tuning = dw_mci_exynos_prepare_hs400_tuning,
};
+static const struct dw_mci_drv_data artpec_drv_data = {
+ .common_caps = MMC_CAP_CMD23,
+ .init = dw_mci_exynos_priv_init,
+ .set_ios = dw_mci_exynos_set_ios,
+ .parse_dt = dw_mci_exynos_parse_dt,
+ .execute_tuning = dw_mci_exynos_execute_tuning,
+ .set_data_timeout = dw_mci_exynos_set_data_timeout,
+ .get_drto_clks = dw_mci_exynos_get_drto_clks,
+};
+
static const struct of_device_id dw_mci_exynos_match[] = {
{ .compatible = "samsung,exynos4412-dw-mshc",
.data = &exynos_drv_data, },
@@ -555,6 +633,8 @@ static const struct of_device_id dw_mci_exynos_match[] = {
.data = &exynos_drv_data, },
{ .compatible = "samsung,exynos7-dw-mshc-smu",
.data = &exynos_drv_data, },
+ { .compatible = "axis,artpec8-dw-mshc",
+ .data = &artpec_drv_data, },
{},
};
MODULE_DEVICE_TABLE(of, dw_mci_exynos_match);
diff --git a/drivers/mmc/host/dw_mmc-hi3798cv200.c b/drivers/mmc/host/dw_mmc-hi3798cv200.c
index 39794f93826f..e9437ef8ef19 100644
--- a/drivers/mmc/host/dw_mmc-hi3798cv200.c
+++ b/drivers/mmc/host/dw_mmc-hi3798cv200.c
@@ -23,12 +23,6 @@ struct hi3798cv200_priv {
struct clk *drive_clk;
};
-static unsigned long dw_mci_hi3798cv200_caps[] = {
- MMC_CAP_CMD23,
- MMC_CAP_CMD23,
- MMC_CAP_CMD23
-};
-
static void dw_mci_hi3798cv200_set_ios(struct dw_mci *host, struct mmc_ios *ios)
{
struct hi3798cv200_priv *priv = host->priv;
@@ -166,8 +160,7 @@ disable_sample_clk:
}
static const struct dw_mci_drv_data hi3798cv200_data = {
- .caps = dw_mci_hi3798cv200_caps,
- .num_caps = ARRAY_SIZE(dw_mci_hi3798cv200_caps),
+ .common_caps = MMC_CAP_CMD23,
.init = dw_mci_hi3798cv200_init,
.set_ios = dw_mci_hi3798cv200_set_ios,
.execute_tuning = dw_mci_hi3798cv200_execute_tuning,
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index d36991acd6df..95d0ec0f5f3a 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -300,21 +300,12 @@ static int dw_mci_rockchip_init(struct dw_mci *host)
return 0;
}
-/* Common capabilities of RK3288 SoC */
-static unsigned long dw_mci_rk3288_dwmmc_caps[4] = {
- MMC_CAP_CMD23,
- MMC_CAP_CMD23,
- MMC_CAP_CMD23,
- MMC_CAP_CMD23,
-};
-
static const struct dw_mci_drv_data rk2928_drv_data = {
.init = dw_mci_rockchip_init,
};
static const struct dw_mci_drv_data rk3288_drv_data = {
- .caps = dw_mci_rk3288_dwmmc_caps,
- .num_caps = ARRAY_SIZE(dw_mci_rk3288_dwmmc_caps),
+ .common_caps = MMC_CAP_CMD23,
.set_ios = dw_mci_rk3288_set_ios,
.execute_tuning = dw_mci_rk3288_execute_tuning,
.parse_dt = dw_mci_rk3288_parse_dt,
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index d977f34f6b55..42bf8a2287ba 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -335,7 +335,8 @@ static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
cmdr == MMC_WRITE_BLOCK ||
cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
cmdr == MMC_SEND_TUNING_BLOCK ||
- cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
+ cmdr == MMC_SEND_TUNING_BLOCK_HS200 ||
+ cmdr == MMC_GEN_CMD) {
stop->opcode = MMC_STOP_TRANSMISSION;
stop->arg = 0;
stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
@@ -1283,6 +1284,37 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
mci_writel(host, CTYPE, (slot->ctype << slot->id));
}
+static void dw_mci_set_data_timeout(struct dw_mci *host,
+ unsigned int timeout_ns)
+{
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
+ u32 clk_div, tmout;
+ u64 tmp;
+
+ if (drv_data && drv_data->set_data_timeout)
+ return drv_data->set_data_timeout(host, timeout_ns);
+
+ clk_div = (mci_readl(host, CLKDIV) & 0xFF) * 2;
+ if (clk_div == 0)
+ clk_div = 1;
+
+ tmp = DIV_ROUND_UP_ULL((u64)timeout_ns * host->bus_hz, NSEC_PER_SEC);
+ tmp = DIV_ROUND_UP_ULL(tmp, clk_div);
+
+ /* TMOUT[7:0] (RESPONSE_TIMEOUT) */
+ tmout = 0xFF; /* Set maximum */
+
+ /* TMOUT[31:8] (DATA_TIMEOUT) */
+ if (!tmp || tmp > 0xFFFFFF)
+ tmout |= (0xFFFFFF << 8);
+ else
+ tmout |= (tmp & 0xFFFFFF) << 8;
+
+ mci_writel(host, TMOUT, tmout);
+ dev_dbg(host->dev, "timeout_ns: %u => TMOUT[31:8]: %#08x",
+ timeout_ns, tmout >> 8);
+}
+
static void __dw_mci_start_request(struct dw_mci *host,
struct dw_mci_slot *slot,
struct mmc_command *cmd)
@@ -1303,7 +1335,7 @@ static void __dw_mci_start_request(struct dw_mci *host,
data = cmd->data;
if (data) {
- mci_writel(host, TMOUT, 0xFFFFFFFF);
+ dw_mci_set_data_timeout(host, data->timeout_ns);
mci_writel(host, BYTCNT, data->blksz*data->blocks);
mci_writel(host, BLKSIZ, data->blksz);
}
@@ -1967,12 +1999,16 @@ static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
static void dw_mci_set_drto(struct dw_mci *host)
{
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
unsigned int drto_clks;
unsigned int drto_div;
unsigned int drto_ms;
unsigned long irqflags;
- drto_clks = mci_readl(host, TMOUT) >> 8;
+ if (drv_data && drv_data->get_drto_clks)
+ drto_clks = drv_data->get_drto_clks(host);
+ else
+ drto_clks = mci_readl(host, TMOUT) >> 8;
drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
if (drto_div == 0)
drto_div = 1;
@@ -1980,6 +2016,8 @@ static void dw_mci_set_drto(struct dw_mci *host)
drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
host->bus_hz);
+ dev_dbg(host->dev, "drto_ms: %u\n", drto_ms);
+
/* add a bit spare time */
drto_ms += 10;
@@ -2724,11 +2762,20 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (pending & DW_MCI_DATA_ERROR_FLAGS) {
spin_lock(&host->irq_lock);
+ if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT)
+ del_timer(&host->dto_timer);
+
/* if there is an error report DATA_ERROR */
mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
host->data_status = pending;
smp_wmb(); /* drain writebuffer */
set_bit(EVENT_DATA_ERROR, &host->pending_events);
+
+ if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT)
+ /* In case of error, we cannot expect a DTO */
+ set_bit(EVENT_DATA_COMPLETE,
+ &host->pending_events);
+
tasklet_schedule(&host->tasklet);
spin_unlock(&host->irq_lock);
@@ -2828,6 +2875,9 @@ static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
if (host->pdata->pm_caps)
mmc->pm_caps = host->pdata->pm_caps;
+ if (drv_data)
+ mmc->caps |= drv_data->common_caps;
+
if (host->dev->of_node) {
ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
if (ctrl_id < 0)
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index ce05d81477d9..7f1e38621d13 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -118,6 +118,7 @@ struct dw_mci_dma_slave {
* @part_buf: Simple buffer for partial fifo reads/writes.
* @push_data: Pointer to FIFO push function.
* @pull_data: Pointer to FIFO pull function.
+ * @quirks: Set of quirks that apply to specific versions of the IP.
* @vqmmc_enabled: Status of vqmmc, should be true or false.
* @irq_flags: The flags to be passed to request_irq.
* @irq: The irq value to be passed to request_irq.
@@ -223,6 +224,7 @@ struct dw_mci {
void (*push_data)(struct dw_mci *host, void *buf, int cnt);
void (*pull_data)(struct dw_mci *host, void *buf, int cnt);
+ u32 quirks;
bool vqmmc_enabled;
unsigned long irq_flags; /* IRQ flags */
int irq;
@@ -274,6 +276,9 @@ struct dw_mci_board {
struct dma_pdata *data;
};
+/* Support for longer data read timeout */
+#define DW_MMC_QUIRK_EXTENDED_TMOUT BIT(0)
+
#define DW_MMC_240A 0x240a
#define DW_MMC_280A 0x280a
@@ -550,10 +555,14 @@ struct dw_mci_slot {
* dw_mci driver data - dw-mshc implementation specific driver data.
* @caps: mmc subsystem specified capabilities of the controller(s).
* @num_caps: number of capabilities specified by @caps.
+ * @common_caps: mmc subsystem specified capabilities applicable to all of
+ * the controllers
* @init: early implementation specific initialization.
* @set_ios: handle bus specific extensions.
* @parse_dt: parse implementation specific device tree properties.
* @execute_tuning: implementation specific tuning procedure.
+ * @set_data_timeout: implementation specific timeout.
+ * @get_drto_clks: implementation specific cycle count for data read timeout.
*
* Provide controller implementation specific extensions. The usage of this
* data structure is fully optional and usage of each member in this structure
@@ -562,6 +571,7 @@ struct dw_mci_slot {
struct dw_mci_drv_data {
unsigned long *caps;
u32 num_caps;
+ u32 common_caps;
int (*init)(struct dw_mci *host);
void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
int (*parse_dt)(struct dw_mci *host);
@@ -570,5 +580,8 @@ struct dw_mci_drv_data {
struct mmc_ios *ios);
int (*switch_voltage)(struct mmc_host *mmc,
struct mmc_ios *ios);
+ void (*set_data_timeout)(struct dw_mci *host,
+ unsigned int timeout_ns);
+ u32 (*get_drto_clks)(struct dw_mci *host);
};
#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 80a2c270d502..7ab1b38a7be5 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -217,11 +217,23 @@ static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host)
return;
dma_release_channel(host->dma_tx);
- dma_release_channel(host->dma_rx);
+ if (host->dma_rx)
+ dma_release_channel(host->dma_rx);
}
static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
{
+ struct device *dev = mmc_dev(host->mmc);
+
+ host->dma_tx = dma_request_chan(dev, "tx-rx");
+ if (!IS_ERR(host->dma_tx))
+ return 0;
+
+ if (PTR_ERR(host->dma_tx) != -ENODEV) {
+ dev_err(dev, "Failed to get dma tx-rx channel\n");
+ return PTR_ERR(host->dma_tx);
+ }
+
host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
if (IS_ERR(host->dma_tx)) {
dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n");
@@ -241,7 +253,10 @@ static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host,
struct mmc_data *data)
{
- return (data->flags & MMC_DATA_READ) ? host->dma_rx : host->dma_tx;
+ if ((data->flags & MMC_DATA_READ) && host->dma_rx)
+ return host->dma_rx;
+ else
+ return host->dma_tx;
}
static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
@@ -1103,18 +1118,18 @@ static int jz4740_mmc_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused jz4740_mmc_suspend(struct device *dev)
+static int jz4740_mmc_suspend(struct device *dev)
{
return pinctrl_pm_select_sleep_state(dev);
}
-static int __maybe_unused jz4740_mmc_resume(struct device *dev)
+static int jz4740_mmc_resume(struct device *dev)
{
return pinctrl_select_default_state(dev);
}
-static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
- jz4740_mmc_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
+ jz4740_mmc_resume);
static struct platform_driver jz4740_mmc_driver = {
.probe = jz4740_mmc_probe,
@@ -1123,7 +1138,7 @@ static struct platform_driver jz4740_mmc_driver = {
.name = "jz4740-mmc",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(jz4740_mmc_of_match),
- .pm = pm_ptr(&jz4740_mmc_pm_ops),
+ .pm = pm_sleep_ptr(&jz4740_mmc_pm_ops),
},
};
diff --git a/drivers/mmc/host/meson-mx-sdhc-clkc.c b/drivers/mmc/host/meson-mx-sdhc-clkc.c
index e1f29b279123..19200b7079a6 100644
--- a/drivers/mmc/host/meson-mx-sdhc-clkc.c
+++ b/drivers/mmc/host/meson-mx-sdhc-clkc.c
@@ -12,8 +12,6 @@
#include "meson-mx-sdhc.h"
-#define MESON_SDHC_NUM_BUILTIN_CLKS 6
-
struct meson_mx_sdhc_clkc {
struct clk_mux src_sel;
struct clk_divider div;
diff --git a/drivers/mmc/host/meson-mx-sdhc-mmc.c b/drivers/mmc/host/meson-mx-sdhc-mmc.c
index 8fdd0bbbfa21..28aa78aa08f3 100644
--- a/drivers/mmc/host/meson-mx-sdhc-mmc.c
+++ b/drivers/mmc/host/meson-mx-sdhc-mmc.c
@@ -854,6 +854,11 @@ static int meson_mx_sdhc_probe(struct platform_device *pdev)
goto err_disable_pclk;
irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_disable_pclk;
+ }
+
ret = devm_request_threaded_irq(dev, irq, meson_mx_sdhc_irq,
meson_mx_sdhc_irq_thread, IRQF_ONESHOT,
NULL, host);
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index d4a48916bfb6..3a19a05ef55a 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -662,6 +662,11 @@ static int meson_mx_mmc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto error_free_mmc;
+ }
+
ret = devm_request_threaded_irq(host->controller_dev, irq,
meson_mx_mmc_irq,
meson_mx_mmc_irq_thread, IRQF_ONESHOT,
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index b431cdd27353..a576181e9db0 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -547,7 +547,7 @@ mmc_spi_command_send(struct mmc_spi_host *host,
static void
mmc_spi_setup_data_message(
struct mmc_spi_host *host,
- int multiple,
+ bool multiple,
enum dma_data_direction direction)
{
struct spi_transfer *t;
@@ -859,14 +859,14 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
struct spi_device *spi = host->spi;
struct device *dma_dev = host->dma_dev;
struct spi_transfer *t;
- enum dma_data_direction direction;
+ enum dma_data_direction direction = mmc_get_dma_dir(data);
struct scatterlist *sg;
unsigned n_sg;
- int multiple = (data->blocks > 1);
+ bool multiple = (data->blocks > 1);
+ const char *write_or_read = (direction == DMA_TO_DEVICE) ? "write" : "read";
u32 clock_rate;
unsigned long timeout;
- direction = mmc_get_dma_dir(data);
mmc_spi_setup_data_message(host, multiple, direction);
t = &host->t;
@@ -921,9 +921,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
while (length) {
t->len = min(length, blk_size);
- dev_dbg(&host->spi->dev, " %s block, %d bytes\n",
- (direction == DMA_TO_DEVICE) ? "write" : "read",
- t->len);
+ dev_dbg(&spi->dev, " %s block, %d bytes\n", write_or_read, t->len);
if (direction == DMA_TO_DEVICE)
status = mmc_spi_writeblock(host, t, timeout);
@@ -948,9 +946,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
if (status < 0) {
data->error = status;
- dev_dbg(&spi->dev, "%s status %d\n",
- (direction == DMA_TO_DEVICE) ? "write" : "read",
- status);
+ dev_dbg(&spi->dev, "%s status %d\n", write_or_read, status);
break;
}
}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index c9cacd4d5b22..45b8608c935c 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -280,7 +280,7 @@ static struct variant_data variant_stm32_sdmmc = {
static struct variant_data variant_stm32_sdmmcv2 = {
.fifosize = 16 * 4,
.fifohalfsize = 8 * 4,
- .f_max = 208000000,
+ .f_max = 267000000,
.stm32_clkdiv = true,
.cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
.cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
@@ -2435,6 +2435,11 @@ static const struct amba_id mmci_ids[] = {
.mask = 0xf0ffffff,
.data = &variant_stm32_sdmmcv2,
},
+ {
+ .id = 0x20253180,
+ .mask = 0xf0ffffff,
+ .data = &variant_stm32_sdmmcv2,
+ },
/* Qualcomm variants */
{
.id = 0x00051180,
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index a75d3dd34d18..9c13f2c31365 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -241,11 +241,12 @@ static void mmci_sdmmc_set_clkreg(struct mmci_host *host, unsigned int desired)
/*
* SDMMC_FBCK is selected when an external Delay Block is needed
- * with SDR104.
+ * with SDR104 or HS200.
*/
if (host->mmc->ios.timing >= MMC_TIMING_UHS_SDR50) {
clk |= MCI_STM32_CLK_BUSSPEED;
- if (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104) {
+ if (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104 ||
+ host->mmc->ios.timing == MMC_TIMING_MMC_HS200) {
clk &= ~MCI_STM32_CLK_SEL_MSK;
clk |= MCI_STM32_CLK_SELFBCK;
}
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 632775217d35..65037e1d7723 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -5,6 +5,7 @@
*/
#include <linux/module.h>
+#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
@@ -98,226 +99,226 @@
/*--------------------------------------------------------------------------*/
/* MSDC_CFG mask */
-#define MSDC_CFG_MODE (0x1 << 0) /* RW */
-#define MSDC_CFG_CKPDN (0x1 << 1) /* RW */
-#define MSDC_CFG_RST (0x1 << 2) /* RW */
-#define MSDC_CFG_PIO (0x1 << 3) /* RW */
-#define MSDC_CFG_CKDRVEN (0x1 << 4) /* RW */
-#define MSDC_CFG_BV18SDT (0x1 << 5) /* RW */
-#define MSDC_CFG_BV18PSS (0x1 << 6) /* R */
-#define MSDC_CFG_CKSTB (0x1 << 7) /* R */
-#define MSDC_CFG_CKDIV (0xff << 8) /* RW */
-#define MSDC_CFG_CKMOD (0x3 << 16) /* RW */
-#define MSDC_CFG_HS400_CK_MODE (0x1 << 18) /* RW */
-#define MSDC_CFG_HS400_CK_MODE_EXTRA (0x1 << 22) /* RW */
-#define MSDC_CFG_CKDIV_EXTRA (0xfff << 8) /* RW */
-#define MSDC_CFG_CKMOD_EXTRA (0x3 << 20) /* RW */
+#define MSDC_CFG_MODE BIT(0) /* RW */
+#define MSDC_CFG_CKPDN BIT(1) /* RW */
+#define MSDC_CFG_RST BIT(2) /* RW */
+#define MSDC_CFG_PIO BIT(3) /* RW */
+#define MSDC_CFG_CKDRVEN BIT(4) /* RW */
+#define MSDC_CFG_BV18SDT BIT(5) /* RW */
+#define MSDC_CFG_BV18PSS BIT(6) /* R */
+#define MSDC_CFG_CKSTB BIT(7) /* R */
+#define MSDC_CFG_CKDIV GENMASK(15, 8) /* RW */
+#define MSDC_CFG_CKMOD GENMASK(17, 16) /* RW */
+#define MSDC_CFG_HS400_CK_MODE BIT(18) /* RW */
+#define MSDC_CFG_HS400_CK_MODE_EXTRA BIT(22) /* RW */
+#define MSDC_CFG_CKDIV_EXTRA GENMASK(19, 8) /* RW */
+#define MSDC_CFG_CKMOD_EXTRA GENMASK(21, 20) /* RW */
/* MSDC_IOCON mask */
-#define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */
-#define MSDC_IOCON_RSPL (0x1 << 1) /* RW */
-#define MSDC_IOCON_DSPL (0x1 << 2) /* RW */
-#define MSDC_IOCON_DDLSEL (0x1 << 3) /* RW */
-#define MSDC_IOCON_DDR50CKD (0x1 << 4) /* RW */
-#define MSDC_IOCON_DSPLSEL (0x1 << 5) /* RW */
-#define MSDC_IOCON_W_DSPL (0x1 << 8) /* RW */
-#define MSDC_IOCON_D0SPL (0x1 << 16) /* RW */
-#define MSDC_IOCON_D1SPL (0x1 << 17) /* RW */
-#define MSDC_IOCON_D2SPL (0x1 << 18) /* RW */
-#define MSDC_IOCON_D3SPL (0x1 << 19) /* RW */
-#define MSDC_IOCON_D4SPL (0x1 << 20) /* RW */
-#define MSDC_IOCON_D5SPL (0x1 << 21) /* RW */
-#define MSDC_IOCON_D6SPL (0x1 << 22) /* RW */
-#define MSDC_IOCON_D7SPL (0x1 << 23) /* RW */
-#define MSDC_IOCON_RISCSZ (0x3 << 24) /* RW */
+#define MSDC_IOCON_SDR104CKS BIT(0) /* RW */
+#define MSDC_IOCON_RSPL BIT(1) /* RW */
+#define MSDC_IOCON_DSPL BIT(2) /* RW */
+#define MSDC_IOCON_DDLSEL BIT(3) /* RW */
+#define MSDC_IOCON_DDR50CKD BIT(4) /* RW */
+#define MSDC_IOCON_DSPLSEL BIT(5) /* RW */
+#define MSDC_IOCON_W_DSPL BIT(8) /* RW */
+#define MSDC_IOCON_D0SPL BIT(16) /* RW */
+#define MSDC_IOCON_D1SPL BIT(17) /* RW */
+#define MSDC_IOCON_D2SPL BIT(18) /* RW */
+#define MSDC_IOCON_D3SPL BIT(19) /* RW */
+#define MSDC_IOCON_D4SPL BIT(20) /* RW */
+#define MSDC_IOCON_D5SPL BIT(21) /* RW */
+#define MSDC_IOCON_D6SPL BIT(22) /* RW */
+#define MSDC_IOCON_D7SPL BIT(23) /* RW */
+#define MSDC_IOCON_RISCSZ GENMASK(25, 24) /* RW */
/* MSDC_PS mask */
-#define MSDC_PS_CDEN (0x1 << 0) /* RW */
-#define MSDC_PS_CDSTS (0x1 << 1) /* R */
-#define MSDC_PS_CDDEBOUNCE (0xf << 12) /* RW */
-#define MSDC_PS_DAT (0xff << 16) /* R */
-#define MSDC_PS_DATA1 (0x1 << 17) /* R */
-#define MSDC_PS_CMD (0x1 << 24) /* R */
-#define MSDC_PS_WP (0x1 << 31) /* R */
+#define MSDC_PS_CDEN BIT(0) /* RW */
+#define MSDC_PS_CDSTS BIT(1) /* R */
+#define MSDC_PS_CDDEBOUNCE GENMASK(15, 12) /* RW */
+#define MSDC_PS_DAT GENMASK(23, 16) /* R */
+#define MSDC_PS_DATA1 BIT(17) /* R */
+#define MSDC_PS_CMD BIT(24) /* R */
+#define MSDC_PS_WP BIT(31) /* R */
/* MSDC_INT mask */
-#define MSDC_INT_MMCIRQ (0x1 << 0) /* W1C */
-#define MSDC_INT_CDSC (0x1 << 1) /* W1C */
-#define MSDC_INT_ACMDRDY (0x1 << 3) /* W1C */
-#define MSDC_INT_ACMDTMO (0x1 << 4) /* W1C */
-#define MSDC_INT_ACMDCRCERR (0x1 << 5) /* W1C */
-#define MSDC_INT_DMAQ_EMPTY (0x1 << 6) /* W1C */
-#define MSDC_INT_SDIOIRQ (0x1 << 7) /* W1C */
-#define MSDC_INT_CMDRDY (0x1 << 8) /* W1C */
-#define MSDC_INT_CMDTMO (0x1 << 9) /* W1C */
-#define MSDC_INT_RSPCRCERR (0x1 << 10) /* W1C */
-#define MSDC_INT_CSTA (0x1 << 11) /* R */
-#define MSDC_INT_XFER_COMPL (0x1 << 12) /* W1C */
-#define MSDC_INT_DXFER_DONE (0x1 << 13) /* W1C */
-#define MSDC_INT_DATTMO (0x1 << 14) /* W1C */
-#define MSDC_INT_DATCRCERR (0x1 << 15) /* W1C */
-#define MSDC_INT_ACMD19_DONE (0x1 << 16) /* W1C */
-#define MSDC_INT_DMA_BDCSERR (0x1 << 17) /* W1C */
-#define MSDC_INT_DMA_GPDCSERR (0x1 << 18) /* W1C */
-#define MSDC_INT_DMA_PROTECT (0x1 << 19) /* W1C */
-#define MSDC_INT_CMDQ (0x1 << 28) /* W1C */
+#define MSDC_INT_MMCIRQ BIT(0) /* W1C */
+#define MSDC_INT_CDSC BIT(1) /* W1C */
+#define MSDC_INT_ACMDRDY BIT(3) /* W1C */
+#define MSDC_INT_ACMDTMO BIT(4) /* W1C */
+#define MSDC_INT_ACMDCRCERR BIT(5) /* W1C */
+#define MSDC_INT_DMAQ_EMPTY BIT(6) /* W1C */
+#define MSDC_INT_SDIOIRQ BIT(7) /* W1C */
+#define MSDC_INT_CMDRDY BIT(8) /* W1C */
+#define MSDC_INT_CMDTMO BIT(9) /* W1C */
+#define MSDC_INT_RSPCRCERR BIT(10) /* W1C */
+#define MSDC_INT_CSTA BIT(11) /* R */
+#define MSDC_INT_XFER_COMPL BIT(12) /* W1C */
+#define MSDC_INT_DXFER_DONE BIT(13) /* W1C */
+#define MSDC_INT_DATTMO BIT(14) /* W1C */
+#define MSDC_INT_DATCRCERR BIT(15) /* W1C */
+#define MSDC_INT_ACMD19_DONE BIT(16) /* W1C */
+#define MSDC_INT_DMA_BDCSERR BIT(17) /* W1C */
+#define MSDC_INT_DMA_GPDCSERR BIT(18) /* W1C */
+#define MSDC_INT_DMA_PROTECT BIT(19) /* W1C */
+#define MSDC_INT_CMDQ BIT(28) /* W1C */
/* MSDC_INTEN mask */
-#define MSDC_INTEN_MMCIRQ (0x1 << 0) /* RW */
-#define MSDC_INTEN_CDSC (0x1 << 1) /* RW */
-#define MSDC_INTEN_ACMDRDY (0x1 << 3) /* RW */
-#define MSDC_INTEN_ACMDTMO (0x1 << 4) /* RW */
-#define MSDC_INTEN_ACMDCRCERR (0x1 << 5) /* RW */
-#define MSDC_INTEN_DMAQ_EMPTY (0x1 << 6) /* RW */
-#define MSDC_INTEN_SDIOIRQ (0x1 << 7) /* RW */
-#define MSDC_INTEN_CMDRDY (0x1 << 8) /* RW */
-#define MSDC_INTEN_CMDTMO (0x1 << 9) /* RW */
-#define MSDC_INTEN_RSPCRCERR (0x1 << 10) /* RW */
-#define MSDC_INTEN_CSTA (0x1 << 11) /* RW */
-#define MSDC_INTEN_XFER_COMPL (0x1 << 12) /* RW */
-#define MSDC_INTEN_DXFER_DONE (0x1 << 13) /* RW */
-#define MSDC_INTEN_DATTMO (0x1 << 14) /* RW */
-#define MSDC_INTEN_DATCRCERR (0x1 << 15) /* RW */
-#define MSDC_INTEN_ACMD19_DONE (0x1 << 16) /* RW */
-#define MSDC_INTEN_DMA_BDCSERR (0x1 << 17) /* RW */
-#define MSDC_INTEN_DMA_GPDCSERR (0x1 << 18) /* RW */
-#define MSDC_INTEN_DMA_PROTECT (0x1 << 19) /* RW */
+#define MSDC_INTEN_MMCIRQ BIT(0) /* RW */
+#define MSDC_INTEN_CDSC BIT(1) /* RW */
+#define MSDC_INTEN_ACMDRDY BIT(3) /* RW */
+#define MSDC_INTEN_ACMDTMO BIT(4) /* RW */
+#define MSDC_INTEN_ACMDCRCERR BIT(5) /* RW */
+#define MSDC_INTEN_DMAQ_EMPTY BIT(6) /* RW */
+#define MSDC_INTEN_SDIOIRQ BIT(7) /* RW */
+#define MSDC_INTEN_CMDRDY BIT(8) /* RW */
+#define MSDC_INTEN_CMDTMO BIT(9) /* RW */
+#define MSDC_INTEN_RSPCRCERR BIT(10) /* RW */
+#define MSDC_INTEN_CSTA BIT(11) /* RW */
+#define MSDC_INTEN_XFER_COMPL BIT(12) /* RW */
+#define MSDC_INTEN_DXFER_DONE BIT(13) /* RW */
+#define MSDC_INTEN_DATTMO BIT(14) /* RW */
+#define MSDC_INTEN_DATCRCERR BIT(15) /* RW */
+#define MSDC_INTEN_ACMD19_DONE BIT(16) /* RW */
+#define MSDC_INTEN_DMA_BDCSERR BIT(17) /* RW */
+#define MSDC_INTEN_DMA_GPDCSERR BIT(18) /* RW */
+#define MSDC_INTEN_DMA_PROTECT BIT(19) /* RW */
/* MSDC_FIFOCS mask */
-#define MSDC_FIFOCS_RXCNT (0xff << 0) /* R */
-#define MSDC_FIFOCS_TXCNT (0xff << 16) /* R */
-#define MSDC_FIFOCS_CLR (0x1 << 31) /* RW */
+#define MSDC_FIFOCS_RXCNT GENMASK(7, 0) /* R */
+#define MSDC_FIFOCS_TXCNT GENMASK(23, 16) /* R */
+#define MSDC_FIFOCS_CLR BIT(31) /* RW */
/* SDC_CFG mask */
-#define SDC_CFG_SDIOINTWKUP (0x1 << 0) /* RW */
-#define SDC_CFG_INSWKUP (0x1 << 1) /* RW */
-#define SDC_CFG_WRDTOC (0x1fff << 2) /* RW */
-#define SDC_CFG_BUSWIDTH (0x3 << 16) /* RW */
-#define SDC_CFG_SDIO (0x1 << 19) /* RW */
-#define SDC_CFG_SDIOIDE (0x1 << 20) /* RW */
-#define SDC_CFG_INTATGAP (0x1 << 21) /* RW */
-#define SDC_CFG_DTOC (0xff << 24) /* RW */
+#define SDC_CFG_SDIOINTWKUP BIT(0) /* RW */
+#define SDC_CFG_INSWKUP BIT(1) /* RW */
+#define SDC_CFG_WRDTOC GENMASK(14, 2) /* RW */
+#define SDC_CFG_BUSWIDTH GENMASK(17, 16) /* RW */
+#define SDC_CFG_SDIO BIT(19) /* RW */
+#define SDC_CFG_SDIOIDE BIT(20) /* RW */
+#define SDC_CFG_INTATGAP BIT(21) /* RW */
+#define SDC_CFG_DTOC GENMASK(31, 24) /* RW */
/* SDC_STS mask */
-#define SDC_STS_SDCBUSY (0x1 << 0) /* RW */
-#define SDC_STS_CMDBUSY (0x1 << 1) /* RW */
-#define SDC_STS_SWR_COMPL (0x1 << 31) /* RW */
+#define SDC_STS_SDCBUSY BIT(0) /* RW */
+#define SDC_STS_CMDBUSY BIT(1) /* RW */
+#define SDC_STS_SWR_COMPL BIT(31) /* RW */
-#define SDC_DAT1_IRQ_TRIGGER (0x1 << 19) /* RW */
+#define SDC_DAT1_IRQ_TRIGGER BIT(19) /* RW */
/* SDC_ADV_CFG0 mask */
-#define SDC_RX_ENHANCE_EN (0x1 << 20) /* RW */
+#define SDC_RX_ENHANCE_EN BIT(20) /* RW */
/* DMA_SA_H4BIT mask */
-#define DMA_ADDR_HIGH_4BIT (0xf << 0) /* RW */
+#define DMA_ADDR_HIGH_4BIT GENMASK(3, 0) /* RW */
/* MSDC_DMA_CTRL mask */
-#define MSDC_DMA_CTRL_START (0x1 << 0) /* W */
-#define MSDC_DMA_CTRL_STOP (0x1 << 1) /* W */
-#define MSDC_DMA_CTRL_RESUME (0x1 << 2) /* W */
-#define MSDC_DMA_CTRL_MODE (0x1 << 8) /* RW */
-#define MSDC_DMA_CTRL_LASTBUF (0x1 << 10) /* RW */
-#define MSDC_DMA_CTRL_BRUSTSZ (0x7 << 12) /* RW */
+#define MSDC_DMA_CTRL_START BIT(0) /* W */
+#define MSDC_DMA_CTRL_STOP BIT(1) /* W */
+#define MSDC_DMA_CTRL_RESUME BIT(2) /* W */
+#define MSDC_DMA_CTRL_MODE BIT(8) /* RW */
+#define MSDC_DMA_CTRL_LASTBUF BIT(10) /* RW */
+#define MSDC_DMA_CTRL_BRUSTSZ GENMASK(14, 12) /* RW */
/* MSDC_DMA_CFG mask */
-#define MSDC_DMA_CFG_STS (0x1 << 0) /* R */
-#define MSDC_DMA_CFG_DECSEN (0x1 << 1) /* RW */
-#define MSDC_DMA_CFG_AHBHPROT2 (0x2 << 8) /* RW */
-#define MSDC_DMA_CFG_ACTIVEEN (0x2 << 12) /* RW */
-#define MSDC_DMA_CFG_CS12B16B (0x1 << 16) /* RW */
+#define MSDC_DMA_CFG_STS BIT(0) /* R */
+#define MSDC_DMA_CFG_DECSEN BIT(1) /* RW */
+#define MSDC_DMA_CFG_AHBHPROT2 BIT(9) /* RW */
+#define MSDC_DMA_CFG_ACTIVEEN BIT(13) /* RW */
+#define MSDC_DMA_CFG_CS12B16B BIT(16) /* RW */
/* MSDC_PATCH_BIT mask */
-#define MSDC_PATCH_BIT_ODDSUPP (0x1 << 1) /* RW */
-#define MSDC_INT_DAT_LATCH_CK_SEL (0x7 << 7)
-#define MSDC_CKGEN_MSDC_DLY_SEL (0x1f << 10)
-#define MSDC_PATCH_BIT_IODSSEL (0x1 << 16) /* RW */
-#define MSDC_PATCH_BIT_IOINTSEL (0x1 << 17) /* RW */
-#define MSDC_PATCH_BIT_BUSYDLY (0xf << 18) /* RW */
-#define MSDC_PATCH_BIT_WDOD (0xf << 22) /* RW */
-#define MSDC_PATCH_BIT_IDRTSEL (0x1 << 26) /* RW */
-#define MSDC_PATCH_BIT_CMDFSEL (0x1 << 27) /* RW */
-#define MSDC_PATCH_BIT_INTDLSEL (0x1 << 28) /* RW */
-#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */
-#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */
-
-#define MSDC_PATCH_BIT1_CMDTA (0x7 << 3) /* RW */
-#define MSDC_PB1_BUSY_CHECK_SEL (0x1 << 7) /* RW */
-#define MSDC_PATCH_BIT1_STOP_DLY (0xf << 8) /* RW */
-
-#define MSDC_PATCH_BIT2_CFGRESP (0x1 << 15) /* RW */
-#define MSDC_PATCH_BIT2_CFGCRCSTS (0x1 << 28) /* RW */
-#define MSDC_PB2_SUPPORT_64G (0x1 << 1) /* RW */
-#define MSDC_PB2_RESPWAIT (0x3 << 2) /* RW */
-#define MSDC_PB2_RESPSTSENSEL (0x7 << 16) /* RW */
-#define MSDC_PB2_CRCSTSENSEL (0x7 << 29) /* RW */
-
-#define MSDC_PAD_TUNE_DATWRDLY (0x1f << 0) /* RW */
-#define MSDC_PAD_TUNE_DATRRDLY (0x1f << 8) /* RW */
-#define MSDC_PAD_TUNE_CMDRDLY (0x1f << 16) /* RW */
-#define MSDC_PAD_TUNE_CMDRRDLY (0x1f << 22) /* RW */
-#define MSDC_PAD_TUNE_CLKTDLY (0x1f << 27) /* RW */
-#define MSDC_PAD_TUNE_RXDLYSEL (0x1 << 15) /* RW */
-#define MSDC_PAD_TUNE_RD_SEL (0x1 << 13) /* RW */
-#define MSDC_PAD_TUNE_CMD_SEL (0x1 << 21) /* RW */
-
-#define PAD_DS_TUNE_DLY_SEL (0x1 << 0) /* RW */
-#define PAD_DS_TUNE_DLY1 (0x1f << 2) /* RW */
-#define PAD_DS_TUNE_DLY2 (0x1f << 7) /* RW */
-#define PAD_DS_TUNE_DLY3 (0x1f << 12) /* RW */
-
-#define PAD_CMD_TUNE_RX_DLY3 (0x1f << 1) /* RW */
+#define MSDC_PATCH_BIT_ODDSUPP BIT(1) /* RW */
+#define MSDC_INT_DAT_LATCH_CK_SEL GENMASK(9, 7)
+#define MSDC_CKGEN_MSDC_DLY_SEL GENMASK(14, 10)
+#define MSDC_PATCH_BIT_IODSSEL BIT(16) /* RW */
+#define MSDC_PATCH_BIT_IOINTSEL BIT(17) /* RW */
+#define MSDC_PATCH_BIT_BUSYDLY GENMASK(21, 18) /* RW */
+#define MSDC_PATCH_BIT_WDOD GENMASK(25, 22) /* RW */
+#define MSDC_PATCH_BIT_IDRTSEL BIT(26) /* RW */
+#define MSDC_PATCH_BIT_CMDFSEL BIT(27) /* RW */
+#define MSDC_PATCH_BIT_INTDLSEL BIT(28) /* RW */
+#define MSDC_PATCH_BIT_SPCPUSH BIT(29) /* RW */
+#define MSDC_PATCH_BIT_DECRCTMO BIT(30) /* RW */
+
+#define MSDC_PATCH_BIT1_CMDTA GENMASK(5, 3) /* RW */
+#define MSDC_PB1_BUSY_CHECK_SEL BIT(7) /* RW */
+#define MSDC_PATCH_BIT1_STOP_DLY GENMASK(11, 8) /* RW */
+
+#define MSDC_PATCH_BIT2_CFGRESP BIT(15) /* RW */
+#define MSDC_PATCH_BIT2_CFGCRCSTS BIT(28) /* RW */
+#define MSDC_PB2_SUPPORT_64G BIT(1) /* RW */
+#define MSDC_PB2_RESPWAIT GENMASK(3, 2) /* RW */
+#define MSDC_PB2_RESPSTSENSEL GENMASK(18, 16) /* RW */
+#define MSDC_PB2_CRCSTSENSEL GENMASK(31, 29) /* RW */
+
+#define MSDC_PAD_TUNE_DATWRDLY GENMASK(4, 0) /* RW */
+#define MSDC_PAD_TUNE_DATRRDLY GENMASK(12, 8) /* RW */
+#define MSDC_PAD_TUNE_CMDRDLY GENMASK(20, 16) /* RW */
+#define MSDC_PAD_TUNE_CMDRRDLY GENMASK(26, 22) /* RW */
+#define MSDC_PAD_TUNE_CLKTDLY GENMASK(31, 27) /* RW */
+#define MSDC_PAD_TUNE_RXDLYSEL BIT(15) /* RW */
+#define MSDC_PAD_TUNE_RD_SEL BIT(13) /* RW */
+#define MSDC_PAD_TUNE_CMD_SEL BIT(21) /* RW */
+
+#define PAD_DS_TUNE_DLY_SEL BIT(0) /* RW */
+#define PAD_DS_TUNE_DLY1 GENMASK(6, 2) /* RW */
+#define PAD_DS_TUNE_DLY2 GENMASK(11, 7) /* RW */
+#define PAD_DS_TUNE_DLY3 GENMASK(16, 12) /* RW */
+
+#define PAD_CMD_TUNE_RX_DLY3 GENMASK(5, 1) /* RW */
/* EMMC51_CFG0 mask */
-#define CMDQ_RDAT_CNT (0x3ff << 12) /* RW */
+#define CMDQ_RDAT_CNT GENMASK(21, 12) /* RW */
-#define EMMC50_CFG_PADCMD_LATCHCK (0x1 << 0) /* RW */
-#define EMMC50_CFG_CRCSTS_EDGE (0x1 << 3) /* RW */
-#define EMMC50_CFG_CFCSTS_SEL (0x1 << 4) /* RW */
-#define EMMC50_CFG_CMD_RESP_SEL (0x1 << 9) /* RW */
+#define EMMC50_CFG_PADCMD_LATCHCK BIT(0) /* RW */
+#define EMMC50_CFG_CRCSTS_EDGE BIT(3) /* RW */
+#define EMMC50_CFG_CFCSTS_SEL BIT(4) /* RW */
+#define EMMC50_CFG_CMD_RESP_SEL BIT(9) /* RW */
/* EMMC50_CFG1 mask */
-#define EMMC50_CFG1_DS_CFG (0x1 << 28) /* RW */
+#define EMMC50_CFG1_DS_CFG BIT(28) /* RW */
-#define EMMC50_CFG3_OUTS_WR (0x1f << 0) /* RW */
+#define EMMC50_CFG3_OUTS_WR GENMASK(4, 0) /* RW */
-#define SDC_FIFO_CFG_WRVALIDSEL (0x1 << 24) /* RW */
-#define SDC_FIFO_CFG_RDVALIDSEL (0x1 << 25) /* RW */
+#define SDC_FIFO_CFG_WRVALIDSEL BIT(24) /* RW */
+#define SDC_FIFO_CFG_RDVALIDSEL BIT(25) /* RW */
/* CQHCI_SETTING */
-#define CQHCI_RD_CMD_WND_SEL (0x1 << 14) /* RW */
-#define CQHCI_WR_CMD_WND_SEL (0x1 << 15) /* RW */
+#define CQHCI_RD_CMD_WND_SEL BIT(14) /* RW */
+#define CQHCI_WR_CMD_WND_SEL BIT(15) /* RW */
/* EMMC_TOP_CONTROL mask */
-#define PAD_RXDLY_SEL (0x1 << 0) /* RW */
-#define DELAY_EN (0x1 << 1) /* RW */
-#define PAD_DAT_RD_RXDLY2 (0x1f << 2) /* RW */
-#define PAD_DAT_RD_RXDLY (0x1f << 7) /* RW */
-#define PAD_DAT_RD_RXDLY2_SEL (0x1 << 12) /* RW */
-#define PAD_DAT_RD_RXDLY_SEL (0x1 << 13) /* RW */
-#define DATA_K_VALUE_SEL (0x1 << 14) /* RW */
-#define SDC_RX_ENH_EN (0x1 << 15) /* TW */
+#define PAD_RXDLY_SEL BIT(0) /* RW */
+#define DELAY_EN BIT(1) /* RW */
+#define PAD_DAT_RD_RXDLY2 GENMASK(6, 2) /* RW */
+#define PAD_DAT_RD_RXDLY GENMASK(11, 7) /* RW */
+#define PAD_DAT_RD_RXDLY2_SEL BIT(12) /* RW */
+#define PAD_DAT_RD_RXDLY_SEL BIT(13) /* RW */
+#define DATA_K_VALUE_SEL BIT(14) /* RW */
+#define SDC_RX_ENH_EN BIT(15) /* TW */
/* EMMC_TOP_CMD mask */
-#define PAD_CMD_RXDLY2 (0x1f << 0) /* RW */
-#define PAD_CMD_RXDLY (0x1f << 5) /* RW */
-#define PAD_CMD_RD_RXDLY2_SEL (0x1 << 10) /* RW */
-#define PAD_CMD_RD_RXDLY_SEL (0x1 << 11) /* RW */
-#define PAD_CMD_TX_DLY (0x1f << 12) /* RW */
+#define PAD_CMD_RXDLY2 GENMASK(4, 0) /* RW */
+#define PAD_CMD_RXDLY GENMASK(9, 5) /* RW */
+#define PAD_CMD_RD_RXDLY2_SEL BIT(10) /* RW */
+#define PAD_CMD_RD_RXDLY_SEL BIT(11) /* RW */
+#define PAD_CMD_TX_DLY GENMASK(16, 12) /* RW */
/* EMMC50_PAD_DS_TUNE mask */
-#define PAD_DS_DLY_SEL (0x1 << 16) /* RW */
-#define PAD_DS_DLY1 (0x1f << 10) /* RW */
-#define PAD_DS_DLY3 (0x1f << 0) /* RW */
+#define PAD_DS_DLY_SEL BIT(16) /* RW */
+#define PAD_DS_DLY1 GENMASK(14, 10) /* RW */
+#define PAD_DS_DLY3 GENMASK(4, 0) /* RW */
-#define REQ_CMD_EIO (0x1 << 0)
-#define REQ_CMD_TMO (0x1 << 1)
-#define REQ_DAT_ERR (0x1 << 2)
-#define REQ_STOP_EIO (0x1 << 3)
-#define REQ_STOP_TMO (0x1 << 4)
-#define REQ_CMD_BUSY (0x1 << 5)
+#define REQ_CMD_EIO BIT(0)
+#define REQ_CMD_TMO BIT(1)
+#define REQ_DAT_ERR BIT(2)
+#define REQ_STOP_EIO BIT(3)
+#define REQ_STOP_TMO BIT(4)
+#define REQ_CMD_BUSY BIT(5)
-#define MSDC_PREPARE_FLAG (0x1 << 0)
-#define MSDC_ASYNC_FLAG (0x1 << 1)
-#define MSDC_MMAP_FLAG (0x1 << 2)
+#define MSDC_PREPARE_FLAG BIT(0)
+#define MSDC_ASYNC_FLAG BIT(1)
+#define MSDC_MMAP_FLAG BIT(2)
#define MTK_MMC_AUTOSUSPEND_DELAY 50
#define CMD_TIMEOUT (HZ/10 * 5) /* 100ms x5 */
@@ -331,17 +332,17 @@
/*--------------------------------------------------------------------------*/
struct mt_gpdma_desc {
u32 gpd_info;
-#define GPDMA_DESC_HWO (0x1 << 0)
-#define GPDMA_DESC_BDP (0x1 << 1)
-#define GPDMA_DESC_CHECKSUM (0xff << 8) /* bit8 ~ bit15 */
-#define GPDMA_DESC_INT (0x1 << 16)
-#define GPDMA_DESC_NEXT_H4 (0xf << 24)
-#define GPDMA_DESC_PTR_H4 (0xf << 28)
+#define GPDMA_DESC_HWO BIT(0)
+#define GPDMA_DESC_BDP BIT(1)
+#define GPDMA_DESC_CHECKSUM GENMASK(15, 8)
+#define GPDMA_DESC_INT BIT(16)
+#define GPDMA_DESC_NEXT_H4 GENMASK(27, 24)
+#define GPDMA_DESC_PTR_H4 GENMASK(31, 28)
u32 next;
u32 ptr;
u32 gpd_data_len;
-#define GPDMA_DESC_BUFLEN (0xffff) /* bit0 ~ bit15 */
-#define GPDMA_DESC_EXTLEN (0xff << 16) /* bit16 ~ bit23 */
+#define GPDMA_DESC_BUFLEN GENMASK(15, 0)
+#define GPDMA_DESC_EXTLEN GENMASK(23, 16)
u32 arg;
u32 blknum;
u32 cmd;
@@ -349,17 +350,17 @@ struct mt_gpdma_desc {
struct mt_bdma_desc {
u32 bd_info;
-#define BDMA_DESC_EOL (0x1 << 0)
-#define BDMA_DESC_CHECKSUM (0xff << 8) /* bit8 ~ bit15 */
-#define BDMA_DESC_BLKPAD (0x1 << 17)
-#define BDMA_DESC_DWPAD (0x1 << 18)
-#define BDMA_DESC_NEXT_H4 (0xf << 24)
-#define BDMA_DESC_PTR_H4 (0xf << 28)
+#define BDMA_DESC_EOL BIT(0)
+#define BDMA_DESC_CHECKSUM GENMASK(15, 8)
+#define BDMA_DESC_BLKPAD BIT(17)
+#define BDMA_DESC_DWPAD BIT(18)
+#define BDMA_DESC_NEXT_H4 GENMASK(27, 24)
+#define BDMA_DESC_PTR_H4 GENMASK(31, 28)
u32 next;
u32 ptr;
u32 bd_data_len;
-#define BDMA_DESC_BUFLEN (0xffff) /* bit0 ~ bit15 */
-#define BDMA_DESC_BUFLEN_EXT (0xffffff) /* bit0 ~ bit23 */
+#define BDMA_DESC_BUFLEN GENMASK(15, 0)
+#define BDMA_DESC_BUFLEN_EXT GENMASK(23, 0)
};
struct msdc_dma {
@@ -636,12 +637,11 @@ static void msdc_reset_hw(struct msdc_host *host)
u32 val;
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_RST);
- while (readl(host->base + MSDC_CFG) & MSDC_CFG_RST)
- cpu_relax();
+ readl_poll_timeout(host->base + MSDC_CFG, val, !(val & MSDC_CFG_RST), 0, 0);
sdr_set_bits(host->base + MSDC_FIFOCS, MSDC_FIFOCS_CLR);
- while (readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_CLR)
- cpu_relax();
+ readl_poll_timeout(host->base + MSDC_FIFOCS, val,
+ !(val & MSDC_FIFOCS_CLR), 0, 0);
val = readl(host->base + MSDC_INT);
writel(val, host->base + MSDC_INT);
@@ -725,7 +725,7 @@ static inline void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
sdr_set_field(host->base + MSDC_DMA_CFG, MSDC_DMA_CFG_DECSEN, 1);
dma_ctrl = readl_relaxed(host->base + MSDC_DMA_CTRL);
dma_ctrl &= ~(MSDC_DMA_CTRL_BRUSTSZ | MSDC_DMA_CTRL_MODE);
- dma_ctrl |= (MSDC_BURST_64B << 12 | 1 << 8);
+ dma_ctrl |= (MSDC_BURST_64B << 12 | BIT(8));
writel_relaxed(dma_ctrl, host->base + MSDC_DMA_CTRL);
if (host->dev_comp->support_64g)
sdr_set_field(host->base + DMA_SA_H4BIT, DMA_ADDR_HIGH_4BIT,
@@ -769,7 +769,7 @@ static u64 msdc_timeout_cal(struct msdc_host *host, u64 ns, u64 clks)
do_div(timeout, clk_ns);
timeout += clks;
/* in 1048576 sclk cycle unit */
- timeout = DIV_ROUND_UP(timeout, (0x1 << 20));
+ timeout = DIV_ROUND_UP(timeout, BIT(20));
if (host->dev_comp->clk_div_bits == 8)
sdr_get_field(host->base + MSDC_CFG,
MSDC_CFG_CKMOD, &mode);
@@ -814,8 +814,9 @@ static void msdc_gate_clock(struct msdc_host *host)
clk_disable_unprepare(host->h_clk);
}
-static void msdc_ungate_clock(struct msdc_host *host)
+static int msdc_ungate_clock(struct msdc_host *host)
{
+ u32 val;
int ret;
clk_prepare_enable(host->h_clk);
@@ -825,11 +826,11 @@ static void msdc_ungate_clock(struct msdc_host *host)
ret = clk_bulk_prepare_enable(MSDC_NR_CLOCKS, host->bulk_clks);
if (ret) {
dev_err(host->dev, "Cannot enable pclk/axi/ahb clock gates\n");
- return;
+ return ret;
}
- while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
- cpu_relax();
+ return readl_poll_timeout(host->base + MSDC_CFG, val,
+ (val & MSDC_CFG_CKSTB), 1, 20000);
}
static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
@@ -840,6 +841,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
u32 div;
u32 sclk;
u32 tune_reg = host->dev_comp->pad_tune_reg;
+ u32 val;
if (!hz) {
dev_dbg(host->dev, "set mclk to 0\n");
@@ -899,14 +901,8 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
}
}
sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
- /*
- * As src_clk/HCLK use the same bit to gate/ungate,
- * So if want to only gate src_clk, need gate its parent(mux).
- */
- if (host->src_clk_cg)
- clk_disable_unprepare(host->src_clk_cg);
- else
- clk_disable_unprepare(clk_get_parent(host->src_clk));
+
+ clk_disable_unprepare(host->src_clk_cg);
if (host->dev_comp->clk_div_bits == 8)
sdr_set_field(host->base + MSDC_CFG,
MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
@@ -915,13 +911,9 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
sdr_set_field(host->base + MSDC_CFG,
MSDC_CFG_CKMOD_EXTRA | MSDC_CFG_CKDIV_EXTRA,
(mode << 12) | div);
- if (host->src_clk_cg)
- clk_prepare_enable(host->src_clk_cg);
- else
- clk_prepare_enable(clk_get_parent(host->src_clk));
- while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
- cpu_relax();
+ clk_prepare_enable(host->src_clk_cg);
+ readl_poll_timeout(host->base + MSDC_CFG, val, (val & MSDC_CFG_CKSTB), 0, 0);
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
mmc->actual_clock = sclk;
host->mclk = hz;
@@ -1013,15 +1005,15 @@ static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host,
if ((opcode == SD_IO_RW_DIRECT && cmd->flags == (unsigned int) -1) ||
opcode == MMC_STOP_TRANSMISSION)
- rawcmd |= (0x1 << 14);
+ rawcmd |= BIT(14);
else if (opcode == SD_SWITCH_VOLTAGE)
- rawcmd |= (0x1 << 30);
+ rawcmd |= BIT(30);
else if (opcode == SD_APP_SEND_SCR ||
opcode == SD_APP_SEND_NUM_WR_BLKS ||
(opcode == SD_SWITCH && mmc_cmd_type(cmd) == MMC_CMD_ADTC) ||
(opcode == SD_APP_SD_STATUS && mmc_cmd_type(cmd) == MMC_CMD_ADTC) ||
(opcode == MMC_SEND_EXT_CSD && mmc_cmd_type(cmd) == MMC_CMD_ADTC))
- rawcmd |= (0x1 << 11);
+ rawcmd |= BIT(11);
if (cmd->data) {
struct mmc_data *data = cmd->data;
@@ -1029,16 +1021,16 @@ static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host,
if (mmc_op_multi(opcode)) {
if (mmc_card_mmc(mmc->card) && mrq->sbc &&
!(mrq->sbc->arg & 0xFFFF0000))
- rawcmd |= 0x2 << 28; /* AutoCMD23 */
+ rawcmd |= BIT(29); /* AutoCMD23 */
}
rawcmd |= ((data->blksz & 0xFFF) << 16);
if (data->flags & MMC_DATA_WRITE)
- rawcmd |= (0x1 << 13);
+ rawcmd |= BIT(13);
if (data->blocks > 1)
- rawcmd |= (0x2 << 11);
+ rawcmd |= BIT(12);
else
- rawcmd |= (0x1 << 11);
+ rawcmd |= BIT(11);
/* Always use dma mode */
sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_PIO);
@@ -1231,13 +1223,13 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
static inline bool msdc_cmd_is_ready(struct msdc_host *host,
struct mmc_request *mrq, struct mmc_command *cmd)
{
- /* The max busy time we can endure is 20ms */
- unsigned long tmo = jiffies + msecs_to_jiffies(20);
+ u32 val;
+ int ret;
- while ((readl(host->base + SDC_STS) & SDC_STS_CMDBUSY) &&
- time_before(jiffies, tmo))
- cpu_relax();
- if (readl(host->base + SDC_STS) & SDC_STS_CMDBUSY) {
+ /* The max busy time we can endure is 20ms */
+ ret = readl_poll_timeout_atomic(host->base + SDC_STS, val,
+ !(val & SDC_STS_CMDBUSY), 1, 20000);
+ if (ret) {
dev_err(host->dev, "CMD bus busy detected\n");
host->error |= REQ_CMD_BUSY;
msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd);
@@ -1245,12 +1237,10 @@ static inline bool msdc_cmd_is_ready(struct msdc_host *host,
}
if (mmc_resp_type(cmd) == MMC_RSP_R1B || cmd->data) {
- tmo = jiffies + msecs_to_jiffies(20);
/* R1B or with data, should check SDCBUSY */
- while ((readl(host->base + SDC_STS) & SDC_STS_SDCBUSY) &&
- time_before(jiffies, tmo))
- cpu_relax();
- if (readl(host->base + SDC_STS) & SDC_STS_SDCBUSY) {
+ ret = readl_poll_timeout_atomic(host->base + SDC_STS, val,
+ !(val & SDC_STS_SDCBUSY), 1, 20000);
+ if (ret) {
dev_err(host->dev, "Controller busy detected\n");
host->error |= REQ_CMD_BUSY;
msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd);
@@ -1376,6 +1366,8 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
(MSDC_INT_XFER_COMPL | MSDC_INT_DATCRCERR | MSDC_INT_DATTMO
| MSDC_INT_DMA_BDCSERR | MSDC_INT_DMA_GPDCSERR
| MSDC_INT_DMA_PROTECT);
+ u32 val;
+ int ret;
spin_lock_irqsave(&host->lock, flags);
done = !host->data;
@@ -1392,8 +1384,14 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
readl(host->base + MSDC_DMA_CFG));
sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP,
1);
- while (readl(host->base + MSDC_DMA_CFG) & MSDC_DMA_CFG_STS)
- cpu_relax();
+
+ ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CFG, val,
+ !(val & MSDC_DMA_CFG_STS), 1, 20000);
+ if (ret) {
+ dev_dbg(host->dev, "DMA stop timed out\n");
+ return false;
+ }
+
sdr_clr_bits(host->base + MSDC_INTEN, data_ints_mask);
dev_dbg(host->dev, "DMA stop\n");
@@ -1631,6 +1629,7 @@ static void msdc_init_hw(struct msdc_host *host)
{
u32 val;
u32 tune_reg = host->dev_comp->pad_tune_reg;
+ struct mmc_host *mmc = mmc_from_priv(host);
if (host->reset) {
reset_control_assert(host->reset);
@@ -1685,7 +1684,7 @@ static void msdc_init_hw(struct msdc_host *host)
}
if (host->dev_comp->busy_check)
- sdr_clr_bits(host->base + MSDC_PATCH_BIT1, (1 << 7));
+ sdr_clr_bits(host->base + MSDC_PATCH_BIT1, BIT(7));
if (host->dev_comp->async_fifo) {
sdr_set_field(host->base + MSDC_PATCH_BIT2,
@@ -1736,14 +1735,18 @@ static void msdc_init_hw(struct msdc_host *host)
MSDC_PAD_TUNE_RXDLYSEL);
}
- /* Configure to enable SDIO mode.
- * it's must otherwise sdio cmd5 failed
- */
- sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIO);
+ if (mmc->caps2 & MMC_CAP2_NO_SDIO) {
+ sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIO);
+ sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
+ sdr_clr_bits(host->base + SDC_ADV_CFG0, SDC_DAT1_IRQ_TRIGGER);
+ } else {
+ /* Configure to enable SDIO mode, otherwise SDIO CMD5 fails */
+ sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIO);
- /* Config SDIO device detect interrupt function */
- sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
- sdr_set_bits(host->base + SDC_ADV_CFG0, SDC_DAT1_IRQ_TRIGGER);
+ /* Config SDIO device detect interrupt function */
+ sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
+ sdr_set_bits(host->base + SDC_ADV_CFG0, SDC_DAT1_IRQ_TRIGGER);
+ }
/* Configure to default data timeout */
sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3);
@@ -1865,7 +1868,7 @@ static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
static u32 test_delay_bit(u32 delay, u32 bit)
{
bit %= PAD_DELAY_MAX;
- return delay & (1 << bit);
+ return delay & BIT(bit);
}
static int get_delay_len(u32 delay, u32 start_bit)
@@ -1970,9 +1973,9 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
for (j = 0; j < 3; j++) {
mmc_send_tuning(mmc, opcode, &cmd_err);
if (!cmd_err) {
- rise_delay |= (1 << i);
+ rise_delay |= BIT(i);
} else {
- rise_delay &= ~(1 << i);
+ rise_delay &= ~BIT(i);
break;
}
}
@@ -1994,9 +1997,9 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
for (j = 0; j < 3; j++) {
mmc_send_tuning(mmc, opcode, &cmd_err);
if (!cmd_err) {
- fall_delay |= (1 << i);
+ fall_delay |= BIT(i);
} else {
- fall_delay &= ~(1 << i);
+ fall_delay &= ~BIT(i);
break;
}
}
@@ -2024,7 +2027,7 @@ skip_fall:
MSDC_PAD_TUNE_CMDRRDLY, i);
mmc_send_tuning(mmc, opcode, &cmd_err);
if (!cmd_err)
- internal_delay |= (1 << i);
+ internal_delay |= BIT(i);
}
dev_dbg(host->dev, "Final internal delay: 0x%x\n", internal_delay);
internal_delay_phase = get_best_delay(host, internal_delay);
@@ -2069,9 +2072,9 @@ static int hs400_tune_response(struct mmc_host *mmc, u32 opcode)
for (j = 0; j < 3; j++) {
mmc_send_tuning(mmc, opcode, &cmd_err);
if (!cmd_err) {
- cmd_delay |= (1 << i);
+ cmd_delay |= BIT(i);
} else {
- cmd_delay &= ~(1 << i);
+ cmd_delay &= ~BIT(i);
break;
}
}
@@ -2101,7 +2104,7 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
msdc_set_data_delay(host, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
- rise_delay |= (1 << i);
+ rise_delay |= BIT(i);
}
final_rise_delay = get_best_delay(host, rise_delay);
/* if rising edge has enough margin, then do not scan falling edge */
@@ -2115,7 +2118,7 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
msdc_set_data_delay(host, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
- fall_delay |= (1 << i);
+ fall_delay |= BIT(i);
}
final_fall_delay = get_best_delay(host, fall_delay);
@@ -2159,7 +2162,7 @@ static int msdc_tune_together(struct mmc_host *mmc, u32 opcode)
msdc_set_data_delay(host, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
- rise_delay |= (1 << i);
+ rise_delay |= BIT(i);
}
final_rise_delay = get_best_delay(host, rise_delay);
/* if rising edge has enough margin, then do not scan falling edge */
@@ -2175,7 +2178,7 @@ static int msdc_tune_together(struct mmc_host *mmc, u32 opcode)
msdc_set_data_delay(host, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
- fall_delay |= (1 << i);
+ fall_delay |= BIT(i);
}
final_fall_delay = get_best_delay(host, fall_delay);
@@ -2292,7 +2295,7 @@ static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card
PAD_DS_TUNE_DLY1, i);
ret = mmc_get_ext_csd(card, &ext_csd);
if (!ret) {
- result_dly1 |= (1 << i);
+ result_dly1 |= BIT(i);
kfree(ext_csd);
}
}
@@ -2516,7 +2519,20 @@ static int msdc_of_clock_parse(struct platform_device *pdev,
/*source clock control gate is optional clock*/
host->src_clk_cg = devm_clk_get_optional(&pdev->dev, "source_cg");
if (IS_ERR(host->src_clk_cg))
- host->src_clk_cg = NULL;
+ return PTR_ERR(host->src_clk_cg);
+
+ /*
+ * Fallback for legacy device-trees: src_clk and HCLK use the same
+ * bit to control gating but they are parented to a different mux,
+ * hence if our intention is to gate only the source, required
+ * during a clk mode switch to avoid hw hangs, we need to gate
+ * its parent (specified as a different clock only on new DTs).
+ */
+ if (!host->src_clk_cg) {
+ host->src_clk_cg = clk_get_parent(host->src_clk);
+ if (IS_ERR(host->src_clk_cg))
+ return PTR_ERR(host->src_clk_cg);
+ }
host->sys_clk_cg = devm_clk_get_optional(&pdev->dev, "sys_cg");
if (IS_ERR(host->sys_clk_cg))
@@ -2674,7 +2690,11 @@ static int msdc_drv_probe(struct platform_device *pdev)
spin_lock_init(&host->lock);
platform_set_drvdata(pdev, mmc);
- msdc_ungate_clock(host);
+ ret = msdc_ungate_clock(host);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot ungate clocks!\n");
+ goto release_mem;
+ }
msdc_init_hw(host);
if (mmc->caps2 & MMC_CAP2_CQE) {
@@ -2833,8 +2853,12 @@ static int __maybe_unused msdc_runtime_resume(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct msdc_host *host = mmc_priv(mmc);
+ int ret;
+
+ ret = msdc_ungate_clock(host);
+ if (ret)
+ return ret;
- msdc_ungate_clock(host);
msdc_restore_reg(host);
return 0;
}
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 2fe6fcdbb1b3..40b6878bea6c 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -1183,7 +1183,6 @@ static int mxcmci_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int mxcmci_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
@@ -1210,9 +1209,8 @@ static int mxcmci_resume(struct device *dev)
return ret;
}
-#endif
-static SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume);
static struct platform_driver mxcmci_driver = {
.probe = mxcmci_probe,
@@ -1220,7 +1218,7 @@ static struct platform_driver mxcmci_driver = {
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &mxcmci_pm_ops,
+ .pm = pm_sleep_ptr(&mxcmci_pm_ops),
.of_match_table = mxcmci_of_match,
}
};
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 9dafcbf969d9..fca30add563e 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1499,41 +1499,6 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
omap_hsmmc_set_bus_mode(host);
}
-static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card)
-{
- struct omap_hsmmc_host *host = mmc_priv(mmc);
-
- if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) {
- struct device_node *np = mmc_dev(mmc)->of_node;
-
- /*
- * REVISIT: should be moved to sdio core and made more
- * general e.g. by expanding the DT bindings of child nodes
- * to provide a mechanism to provide this information:
- * Documentation/devicetree/bindings/mmc/mmc-card.yaml
- */
-
- np = of_get_compatible_child(np, "ti,wl1251");
- if (np) {
- /*
- * We have TI wl1251 attached to MMC3. Pass this
- * information to the SDIO core because it can't be
- * probed by normal methods.
- */
-
- dev_info(host->dev, "found wl1251\n");
- card->quirks |= MMC_QUIRK_NONSTD_SDIO;
- card->cccr.wide_bus = 1;
- card->cis.vendor = 0x104c;
- card->cis.device = 0x9066;
- card->cis.blksize = 512;
- card->cis.max_dtr = 24000000;
- card->ocr = 0x80;
- of_node_put(np);
- }
- }
-}
-
static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
@@ -1660,7 +1625,6 @@ static struct mmc_host_ops omap_hsmmc_ops = {
.set_ios = omap_hsmmc_set_ios,
.get_cd = mmc_gpio_get_cd,
.get_ro = mmc_gpio_get_ro,
- .init_card = omap_hsmmc_init_card,
.enable_sdio_irq = omap_hsmmc_enable_sdio_irq,
};
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index 0c45e82ff0de..66d308e73e17 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -18,6 +18,8 @@ struct renesas_sdhi_scc {
u32 tap_hs400_4tap; /* sampling clock position for HS400 (4 TAP) */
};
+#define SDHI_FLAG_NEED_CLKH_FALLBACK BIT(0)
+
struct renesas_sdhi_of_data {
unsigned long tmio_flags;
u32 tmio_ocr_mask;
@@ -31,6 +33,7 @@ struct renesas_sdhi_of_data {
int taps_num;
unsigned int max_blk_count;
unsigned short max_segs;
+ unsigned long sdhi_flags;
};
#define SDHI_CALIB_TABLE_MAX 32
@@ -57,6 +60,7 @@ struct tmio_mmc_dma {
struct renesas_sdhi {
struct clk *clk;
+ struct clk *clkh;
struct clk *clk_cd;
struct tmio_mmc_data mmc_data;
struct tmio_mmc_dma dma_priv;
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index f5b2684ad805..2797a9c0f17d 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -127,10 +127,12 @@ static int renesas_sdhi_clk_enable(struct tmio_mmc_host *host)
}
static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
- unsigned int new_clock)
+ unsigned int wanted_clock)
{
struct renesas_sdhi *priv = host_to_priv(host);
+ struct clk *ref_clk = priv->clk;
unsigned int freq, diff, best_freq = 0, diff_min = ~0;
+ unsigned int new_clock, clkh_shift = 0;
int i;
/*
@@ -141,6 +143,16 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2) || mmc_doing_tune(host->mmc))
return clk_get_rate(priv->clk);
+ if (priv->clkh) {
+ bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
+ bool need_slow_clkh = (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104) ||
+ (host->mmc->ios.timing == MMC_TIMING_MMC_HS400);
+ clkh_shift = use_4tap && need_slow_clkh ? 1 : 2;
+ ref_clk = priv->clkh;
+ }
+
+ new_clock = wanted_clock << clkh_shift;
+
/*
* We want the bus clock to be as close as possible to, but no
* greater than, new_clock. As we can divide by 1 << i for
@@ -148,11 +160,10 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
* possible, but no greater than, new_clock << i.
*/
for (i = min(9, ilog2(UINT_MAX / new_clock)); i >= 0; i--) {
- freq = clk_round_rate(priv->clk, new_clock << i);
+ freq = clk_round_rate(ref_clk, new_clock << i);
if (freq > (new_clock << i)) {
/* Too fast; look for a slightly slower option */
- freq = clk_round_rate(priv->clk,
- (new_clock << i) / 4 * 3);
+ freq = clk_round_rate(ref_clk, (new_clock << i) / 4 * 3);
if (freq > (new_clock << i))
continue;
}
@@ -164,7 +175,10 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
}
}
- clk_set_rate(priv->clk, best_freq);
+ clk_set_rate(ref_clk, best_freq);
+
+ if (priv->clkh)
+ clk_set_rate(priv->clk, best_freq >> clkh_shift);
return clk_get_rate(priv->clk);
}
@@ -628,7 +642,7 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
* is at least SH_MOBILE_SDHI_MIN_TAP_ROW probes long then use the
* center index as the tap, otherwise bail out.
*/
- bitmap_for_each_set_region(bitmap, rs, re, 0, taps_size) {
+ for_each_set_bitrange(rs, re, bitmap, taps_size) {
if (re - rs > tap_cnt) {
tap_end = re;
tap_start = rs;
@@ -904,11 +918,12 @@ int renesas_sdhi_probe(struct platform_device *pdev,
dma_priv = &priv->dma_priv;
priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- dev_err(&pdev->dev, "cannot get clock: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk), "cannot get clock");
+
+ priv->clkh = devm_clk_get_optional(&pdev->dev, "clkh");
+ if (IS_ERR(priv->clkh))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clkh), "cannot get clkh");
/*
* Some controllers provide a 2nd clock just to run the internal card
@@ -921,9 +936,9 @@ int renesas_sdhi_probe(struct platform_device *pdev,
* to the card detect circuit. That leaves us with if separate clocks
* are presented, we must treat them both as virtually 1 clock.
*/
- priv->clk_cd = devm_clk_get(&pdev->dev, "cd");
+ priv->clk_cd = devm_clk_get_optional(&pdev->dev, "cd");
if (IS_ERR(priv->clk_cd))
- priv->clk_cd = NULL;
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk_cd), "cannot get cd clock");
priv->pinctrl = devm_pinctrl_get(&pdev->dev);
if (!IS_ERR(priv->pinctrl)) {
@@ -947,6 +962,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
mmc_data->max_segs = of_data->max_segs;
dma_priv->dma_buswidth = of_data->dma_buswidth;
host->bus_shift = of_data->bus_shift;
+ /* Fallback for old DTs */
+ if (!priv->clkh && of_data->sdhi_flags & SDHI_FLAG_NEED_CLKH_FALLBACK)
+ priv->clkh = clk_get_parent(clk_get_parent(priv->clk));
+
}
host->write16_hook = renesas_sdhi_write16_hook;
@@ -1044,7 +1063,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR |
MMC_CAP2_HS400_1_8V))) {
const struct renesas_sdhi_scc *taps = of_data->taps;
- bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
+ bool use_4tap = quirks && quirks->hs400_4taps;
bool hit = false;
for (i = 0; i < of_data->taps_num; i++) {
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 7660f7ea74dd..9d2c600fd4ce 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -125,6 +125,22 @@ static const struct renesas_sdhi_of_data of_data_rcar_gen3 = {
/* DMAC can handle 32bit blk count but only 1 segment */
.max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
.max_segs = 1,
+ .sdhi_flags = SDHI_FLAG_NEED_CLKH_FALLBACK,
+};
+
+static const struct renesas_sdhi_of_data of_data_rcar_gen3_no_fallback = {
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
+ TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
+ .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
+ MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY,
+ .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT | MMC_CAP2_MERGE_CAPABLE,
+ .bus_shift = 2,
+ .scc_offset = 0x1000,
+ .taps = rcar_gen3_scc_taps,
+ .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
+ /* DMAC can handle 32bit blk count but only 1 segment */
+ .max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
+ .max_segs = 1,
};
static const u8 r8a7796_es13_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
@@ -214,6 +230,10 @@ static const struct renesas_sdhi_of_data_with_quirks of_r8a77965_compatible = {
.quirks = &sdhi_quirks_r8a77965,
};
+static const struct renesas_sdhi_of_data_with_quirks of_r8a77970_compatible = {
+ .of_data = &of_data_rcar_gen3_no_fallback,
+};
+
static const struct renesas_sdhi_of_data_with_quirks of_r8a77980_compatible = {
.of_data = &of_data_rcar_gen3,
.quirks = &sdhi_quirks_nohs400,
@@ -235,6 +255,7 @@ static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
{ .compatible = "renesas,sdhi-r8a77961", .data = &of_r8a77961_compatible, },
{ .compatible = "renesas,sdhi-r8a77965", .data = &of_r8a77965_compatible, },
+ { .compatible = "renesas,sdhi-r8a77970", .data = &of_r8a77970_compatible, },
{ .compatible = "renesas,sdhi-r8a77980", .data = &of_r8a77980_compatible, },
{ .compatible = "renesas,sdhi-r8a77990", .data = &of_r8a77990_compatible, },
{ .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index f1ef0d28b0dd..c0350e9c03f3 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -31,10 +31,8 @@
#include <linux/mmc/slot-gpio.h>
#ifdef CONFIG_X86
-#include <asm/cpu_device_id.h>
-#include <asm/intel-family.h>
+#include <linux/platform_data/x86/soc.h>
#include <asm/iosf_mbi.h>
-#include <linux/pci.h>
#endif
#include "sdhci.h"
@@ -240,26 +238,6 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
#ifdef CONFIG_X86
-static bool sdhci_acpi_byt(void)
-{
- static const struct x86_cpu_id byt[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL),
- {}
- };
-
- return x86_match_cpu(byt);
-}
-
-static bool sdhci_acpi_cht(void)
-{
- static const struct x86_cpu_id cht[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
- {}
- };
-
- return x86_match_cpu(cht);
-}
-
#define BYT_IOSF_SCCEP 0x63
#define BYT_IOSF_OCP_NETCTRL0 0x1078
#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
@@ -268,7 +246,7 @@ static void sdhci_acpi_byt_setting(struct device *dev)
{
u32 val = 0;
- if (!sdhci_acpi_byt())
+ if (!soc_intel_is_byt())
return;
if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
@@ -293,7 +271,7 @@ static void sdhci_acpi_byt_setting(struct device *dev)
static bool sdhci_acpi_byt_defer(struct device *dev)
{
- if (!sdhci_acpi_byt())
+ if (!soc_intel_is_byt())
return false;
if (!iosf_mbi_available())
@@ -304,43 +282,6 @@ static bool sdhci_acpi_byt_defer(struct device *dev)
return false;
}
-static bool sdhci_acpi_cht_pci_wifi(unsigned int vendor, unsigned int device,
- unsigned int slot, unsigned int parent_slot)
-{
- struct pci_dev *dev, *parent, *from = NULL;
-
- while (1) {
- dev = pci_get_device(vendor, device, from);
- pci_dev_put(from);
- if (!dev)
- break;
- parent = pci_upstream_bridge(dev);
- if (ACPI_COMPANION(&dev->dev) && PCI_SLOT(dev->devfn) == slot &&
- parent && PCI_SLOT(parent->devfn) == parent_slot &&
- !pci_upstream_bridge(parent)) {
- pci_dev_put(dev);
- return true;
- }
- from = dev;
- }
-
- return false;
-}
-
-/*
- * GPDwin uses PCI wifi which conflicts with SDIO's use of
- * acpi_device_fix_up_power() on child device nodes. Identifying GPDwin is
- * problematic, but since SDIO is only used for wifi, the presence of the PCI
- * wifi card in the expected slot with an ACPI companion node, is used to
- * indicate that acpi_device_fix_up_power() should be avoided.
- */
-static inline bool sdhci_acpi_no_fixup_child_power(struct acpi_device *adev)
-{
- return sdhci_acpi_cht() &&
- acpi_dev_hid_uid_match(adev, "80860F14", "2") &&
- sdhci_acpi_cht_pci_wifi(0x14e4, 0x43ec, 0, 28);
-}
-
#else
static inline void sdhci_acpi_byt_setting(struct device *dev)
@@ -352,11 +293,6 @@ static inline bool sdhci_acpi_byt_defer(struct device *dev)
return false;
}
-static inline bool sdhci_acpi_no_fixup_child_power(struct acpi_device *adev)
-{
- return false;
-}
-
#endif
static int bxt_get_cd(struct mmc_host *mmc)
@@ -861,11 +797,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
/* Power on the SDHCI controller and its children */
acpi_device_fix_up_power(device);
- if (!sdhci_acpi_no_fixup_child_power(device)) {
- list_for_each_entry(child, &device->children, node)
- if (child->status.present && child->status.enabled)
- acpi_device_fix_up_power(child);
- }
+ list_for_each_entry(child, &device->children, node)
+ if (child->status.present && child->status.enabled)
+ acpi_device_fix_up_power(child);
if (sdhci_acpi_byt_defer(dev))
return -EPROBE_DEFER;
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 764ee1b761d9..55981b0f0b10 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -305,6 +305,9 @@ static struct esdhc_soc_data usdhc_imx7ulp_data = {
| ESDHC_FLAG_PMQOS | ESDHC_FLAG_HS400
| ESDHC_FLAG_STATE_LOST_IN_LPMODE,
};
+static struct esdhc_soc_data usdhc_imxrt1050_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_HS200 | ESDHC_FLAG_ERR004536,
+};
static struct esdhc_soc_data usdhc_imx8qxp_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
@@ -355,6 +358,7 @@ static const struct of_device_id imx_esdhc_dt_ids[] = {
{ .compatible = "fsl,imx7ulp-usdhc", .data = &usdhc_imx7ulp_data, },
{ .compatible = "fsl,imx8qxp-usdhc", .data = &usdhc_imx8qxp_data, },
{ .compatible = "fsl,imx8mm-usdhc", .data = &usdhc_imx8mm_data, },
+ { .compatible = "fsl,imxrt1050-usdhc", .data = &usdhc_imxrt1050_data, },
{ .compatible = "nxp,s32g2-usdhc", .data = &usdhc_s32g2_data, },
{ /* sentinel */ }
};
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 6f9877546830..ed53276f6ad9 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -1866,6 +1866,7 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(INTEL, JSL_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, LKF_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, LKF_SD, intel_byt_sd),
+ SDHCI_PCI_DEVICE(INTEL, ADL_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(O2, 8120, o2),
SDHCI_PCI_DEVICE(O2, 8220, o2),
SDHCI_PCI_DEVICE(O2, 8221, o2),
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 4fd99c1e82ba..97035d77c18c 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -12,6 +12,7 @@
#include <linux/pci.h>
#include <linux/mmc/mmc.h>
#include <linux/delay.h>
+#include <linux/of.h>
#include "sdhci.h"
#include "sdhci-pci.h"
#include "cqhci.h"
@@ -116,6 +117,8 @@
#define PCI_GLI_9755_PECONF 0x44
#define PCI_GLI_9755_LFCLK GENMASK(14, 12)
#define PCI_GLI_9755_DMACLK BIT(29)
+#define PCI_GLI_9755_INVERT_CD BIT(30)
+#define PCI_GLI_9755_INVERT_WP BIT(31)
#define PCI_GLI_9755_CFG2 0x48
#define PCI_GLI_9755_CFG2_L1DLY GENMASK(28, 24)
@@ -570,6 +573,14 @@ static void gl9755_hw_setting(struct sdhci_pci_slot *slot)
gl9755_wt_on(pdev);
pci_read_config_dword(pdev, PCI_GLI_9755_PECONF, &value);
+ /*
+ * Apple ARM64 platforms using these chips may have
+ * inverted CD/WP detection.
+ */
+ if (of_property_read_bool(pdev->dev.of_node, "cd-inverted"))
+ value |= PCI_GLI_9755_INVERT_CD;
+ if (of_property_read_bool(pdev->dev.of_node, "wp-inverted"))
+ value |= PCI_GLI_9755_INVERT_WP;
value &= ~PCI_GLI_9755_LFCLK;
value &= ~PCI_GLI_9755_DMACLK;
pci_write_config_dword(pdev, PCI_GLI_9755_PECONF, value);
@@ -891,7 +902,28 @@ static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot)
return 0;
}
+#define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18)
+
+static u16 sdhci_gli_readw(struct sdhci_host *host, int reg)
+{
+ u32 val = readl(host->ioaddr + (reg & ~3));
+ u16 word;
+
+ word = (val >> REG_OFFSET_IN_BITS(reg)) & 0xffff;
+ return word;
+}
+
+static u8 sdhci_gli_readb(struct sdhci_host *host, int reg)
+{
+ u32 val = readl(host->ioaddr + (reg & ~3));
+ u8 byte = (val >> REG_OFFSET_IN_BITS(reg)) & 0xff;
+
+ return byte;
+}
+
static const struct sdhci_ops sdhci_gl9755_ops = {
+ .read_w = sdhci_gli_readw,
+ .read_b = sdhci_gli_readb,
.set_clock = sdhci_gl9755_set_clock,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
@@ -911,6 +943,8 @@ const struct sdhci_pci_fixes sdhci_gl9755 = {
};
static const struct sdhci_ops sdhci_gl9750_ops = {
+ .read_w = sdhci_gli_readw,
+ .read_b = sdhci_gli_readb,
.read_l = sdhci_gl9750_readl,
.set_clock = sdhci_gl9750_set_clock,
.enable_dma = sdhci_pci_enable_dma,
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index f045c1ee4667..92c20cb8074a 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -12,6 +12,7 @@
#include <linux/mmc/mmc.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
+#include <linux/bitfield.h>
#include "sdhci.h"
#include "sdhci-pci.h"
@@ -43,12 +44,16 @@
#define O2_SD_CAP_REG0 0x334
#define O2_SD_UHS1_CAP_SETTING 0x33C
#define O2_SD_DELAY_CTRL 0x350
+#define O2_SD_OUTPUT_CLK_SOURCE_SWITCH 0x354
#define O2_SD_UHS2_L1_CTRL 0x35C
#define O2_SD_FUNC_REG3 0x3E0
#define O2_SD_FUNC_REG4 0x3E4
#define O2_SD_LED_ENABLE BIT(6)
#define O2_SD_FREG0_LEDOFF BIT(13)
+#define O2_SD_SEL_DLL BIT(16)
#define O2_SD_FREG4_ENABLE_CLK_SET BIT(22)
+#define O2_SD_PHASE_MASK GENMASK(23, 20)
+#define O2_SD_FIX_PHASE FIELD_PREP(O2_SD_PHASE_MASK, 0x9)
#define O2_SD_VENDOR_SETTING 0x110
#define O2_SD_VENDOR_SETTING2 0x1C8
@@ -301,9 +306,13 @@ static int sdhci_o2_dll_recovery(struct sdhci_host *host)
static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct sdhci_pci_chip *chip = slot->chip;
int current_bus_width = 0;
u32 scratch32 = 0;
u16 scratch = 0;
+ u8 scratch_8 = 0;
+ u32 reg_val;
/*
* This handler only implements the eMMC tuning that is specific to
@@ -322,6 +331,32 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
scratch |= O2_SD_PWR_FORCE_L0;
sdhci_writew(host, scratch, O2_SD_MISC_CTRL);
+ /* Stop clk */
+ reg_val = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ reg_val &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, reg_val, SDHCI_CLOCK_CONTROL);
+
+ /* UnLock WP */
+ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
+ scratch_8 &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
+
+ /* Set pcr 0x354[16] to choose dll clock, and set the default phase */
+ pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, &reg_val);
+ reg_val &= ~(O2_SD_SEL_DLL | O2_SD_PHASE_MASK);
+ reg_val |= (O2_SD_SEL_DLL | O2_SD_FIX_PHASE);
+ pci_write_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, reg_val);
+
+ /* Lock WP */
+ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
+ scratch_8 |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
+
+ /* Start clk */
+ reg_val = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ reg_val |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, reg_val, SDHCI_CLOCK_CONTROL);
+
/* wait DLL lock, timeout value 5ms */
if (readx_poll_timeout(sdhci_o2_pll_dll_wdt_control, host,
scratch32, (scratch32 & O2_DLL_LOCK_STATUS), 1, 5000))
@@ -533,23 +568,32 @@ static void sdhci_pci_o2_set_clock(struct sdhci_host *host, unsigned int clock)
if (clock == 0)
return;
- if ((host->timing == MMC_TIMING_UHS_SDR104) && (clock == 200000000)) {
- pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
-
- scratch &= 0x7f;
- pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ /* UnLock WP */
+ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+ scratch &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ if ((host->timing == MMC_TIMING_UHS_SDR104) && (clock == 200000000)) {
pci_read_config_dword(chip->pdev, O2_SD_PLL_SETTING, &scratch_32);
if ((scratch_32 & 0xFFFF0000) != 0x2c280000)
o2_pci_set_baseclk(chip, 0x2c280000);
+ } else {
+ pci_read_config_dword(chip->pdev, O2_SD_PLL_SETTING, &scratch_32);
- pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
-
- scratch |= 0x80;
- pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ if ((scratch_32 & 0xFFFF0000) != 0x25100000)
+ o2_pci_set_baseclk(chip, 0x25100000);
}
+ pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, &scratch_32);
+ scratch_32 &= ~(O2_SD_SEL_DLL | O2_SD_PHASE_MASK);
+ pci_write_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, scratch_32);
+
+ /* Lock WP */
+ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+ scratch |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+
clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
sdhci_o2_enable_clk(host, clk);
}
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 5e3193278ff9..3661a224fb04 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -59,6 +59,7 @@
#define PCI_DEVICE_ID_INTEL_JSL_SD 0x4df8
#define PCI_DEVICE_ID_INTEL_LKF_EMMC 0x98c4
#define PCI_DEVICE_ID_INTEL_LKF_SD 0x98f8
+#define PCI_DEVICE_ID_INTEL_ADL_EMMC 0x54c4
#define PCI_DEVICE_ID_SYSKONNECT_8000 0x8000
#define PCI_DEVICE_ID_VIA_95D0 0x95d0
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 9762ffab2e23..35ebba067e87 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -15,6 +15,8 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/mmc/card.h>
@@ -24,6 +26,8 @@
#include <linux/gpio/consumer.h>
#include <linux/ktime.h>
+#include <soc/tegra/common.h>
+
#include "sdhci-pltfm.h"
#include "cqhci.h"
@@ -743,7 +747,9 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+ struct device *dev = mmc_dev(host->mmc);
unsigned long host_clk;
+ int err;
if (!clock)
return sdhci_set_clock(host, clock);
@@ -761,7 +767,12 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
* from clk_get_rate() is used.
*/
host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
- clk_set_rate(pltfm_host->clk, host_clk);
+
+ err = dev_pm_opp_set_rate(dev, host_clk);
+ if (err)
+ dev_err(dev, "failed to set clk rate to %luHz: %d\n",
+ host_clk, err);
+
tegra_host->curr_clk_rate = host_clk;
if (tegra_host->ddr_signaling)
host->max_clk = host_clk;
@@ -1714,7 +1725,6 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
"failed to get clock\n");
goto err_clk_get;
}
- clk_prepare_enable(clk);
pltfm_host->clk = clk;
tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev,
@@ -1725,15 +1735,24 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
goto err_rst_get;
}
- rc = reset_control_assert(tegra_host->rst);
+ rc = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (rc)
goto err_rst_get;
+ pm_runtime_enable(&pdev->dev);
+ rc = pm_runtime_resume_and_get(&pdev->dev);
+ if (rc)
+ goto err_pm_get;
+
+ rc = reset_control_assert(tegra_host->rst);
+ if (rc)
+ goto err_rst_assert;
+
usleep_range(2000, 4000);
rc = reset_control_deassert(tegra_host->rst);
if (rc)
- goto err_rst_get;
+ goto err_rst_assert;
usleep_range(2000, 4000);
@@ -1745,8 +1764,11 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
err_add_host:
reset_control_assert(tegra_host->rst);
+err_rst_assert:
+ pm_runtime_put_sync_suspend(&pdev->dev);
+err_pm_get:
+ pm_runtime_disable(&pdev->dev);
err_rst_get:
- clk_disable_unprepare(pltfm_host->clk);
err_clk_get:
clk_disable_unprepare(tegra_host->tmclk);
err_power_req:
@@ -1765,19 +1787,38 @@ static int sdhci_tegra_remove(struct platform_device *pdev)
reset_control_assert(tegra_host->rst);
usleep_range(2000, 4000);
- clk_disable_unprepare(pltfm_host->clk);
- clk_disable_unprepare(tegra_host->tmclk);
+ pm_runtime_put_sync_suspend(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
+
+ clk_disable_unprepare(tegra_host->tmclk);
sdhci_pltfm_free(pdev);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int __maybe_unused sdhci_tegra_suspend(struct device *dev)
+static int __maybe_unused sdhci_tegra_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ clk_disable_unprepare(pltfm_host->clk);
+
+ return 0;
+}
+
+static int __maybe_unused sdhci_tegra_runtime_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return clk_prepare_enable(pltfm_host->clk);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_tegra_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
int ret;
if (host->mmc->caps2 & MMC_CAP2_CQE) {
@@ -1792,17 +1833,22 @@ static int __maybe_unused sdhci_tegra_suspend(struct device *dev)
return ret;
}
- clk_disable_unprepare(pltfm_host->clk);
+ ret = pm_runtime_force_suspend(dev);
+ if (ret) {
+ sdhci_resume_host(host);
+ cqhci_resume(host->mmc);
+ return ret;
+ }
+
return 0;
}
-static int __maybe_unused sdhci_tegra_resume(struct device *dev)
+static int sdhci_tegra_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
int ret;
- ret = clk_prepare_enable(pltfm_host->clk);
+ ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
@@ -1821,13 +1867,16 @@ static int __maybe_unused sdhci_tegra_resume(struct device *dev)
suspend_host:
sdhci_suspend_host(host);
disable_clk:
- clk_disable_unprepare(pltfm_host->clk);
+ pm_runtime_force_suspend(dev);
return ret;
}
#endif
-static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend,
- sdhci_tegra_resume);
+static const struct dev_pm_ops sdhci_tegra_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(sdhci_tegra_runtime_suspend, sdhci_tegra_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_tegra_suspend, sdhci_tegra_resume)
+};
static struct platform_driver sdhci_tegra_driver = {
.driver = {
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index e2affa52ef46..a5850d83908b 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -960,14 +960,8 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_POWER_OFF:
tmio_mmc_power_off(host);
/* For R-Car Gen2+, we need to reset SDHI specific SCC */
- if (host->pdata->flags & TMIO_MMC_MIN_RCAR2) {
- host->reset(host);
-
- if (host->native_hotplug)
- tmio_mmc_enable_mmc_irqs(host,
- TMIO_STAT_CARD_REMOVE |
- TMIO_STAT_CARD_INSERT);
- }
+ if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
+ tmio_mmc_reset(host);
host->set_clock(host, 0);
break;
@@ -1175,6 +1169,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
if (mmc_can_gpio_cd(mmc))
_host->ops.get_cd = mmc_gpio_get_cd;
+ /* must be set before tmio_mmc_reset() */
_host->native_hotplug = !(mmc_can_gpio_cd(mmc) ||
mmc->caps & MMC_CAP_NEEDS_POLL ||
!mmc_card_is_removable(mmc));
@@ -1295,10 +1290,6 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
if (host->clk_cache)
host->set_clock(host, host->clk_cache);
- if (host->native_hotplug)
- tmio_mmc_enable_mmc_irqs(host,
- TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
-
tmio_mmc_enable_dma(host, true);
return 0;
diff --git a/drivers/most/most_usb.c b/drivers/most/most_usb.c
index acabb7715b42..73258b24fea7 100644
--- a/drivers/most/most_usb.c
+++ b/drivers/most/most_usb.c
@@ -831,7 +831,7 @@ static ssize_t value_show(struct device *dev, struct device_attribute *attr,
int err;
if (sysfs_streq(name, "arb_address"))
- return snprintf(buf, PAGE_SIZE, "%04x\n", dci_obj->reg_addr);
+ return sysfs_emit(buf, "%04x\n", dci_obj->reg_addr);
if (sysfs_streq(name, "arb_value"))
reg_addr = dci_obj->reg_addr;
@@ -843,7 +843,7 @@ static ssize_t value_show(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- return snprintf(buf, PAGE_SIZE, "%04x\n", val);
+ return sysfs_emit(buf, "%04x\n", val);
}
static ssize_t value_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index e5bd3c2bc3b2..4d4f97841016 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -61,8 +61,8 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
struct cfi_private cfi;
struct cfi_private *retcfi;
unsigned long *chip_map;
- int i, j, mapsize;
int max_chips;
+ int i, j;
memset(&cfi, 0, sizeof(cfi));
@@ -111,8 +111,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
max_chips = 1;
}
- mapsize = sizeof(long) * DIV_ROUND_UP(max_chips, BITS_PER_LONG);
- chip_map = kzalloc(mapsize, GFP_KERNEL);
+ chip_map = bitmap_zalloc(max_chips, GFP_KERNEL);
if (!chip_map) {
kfree(cfi.cfiq);
return NULL;
@@ -139,7 +138,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
if (!retcfi) {
kfree(cfi.cfiq);
- kfree(chip_map);
+ bitmap_free(chip_map);
return NULL;
}
@@ -157,7 +156,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
}
}
- kfree(chip_map);
+ bitmap_free(chip_map);
return retcfi;
}
diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c
index 77c872fd3d83..a8b31bddf14b 100644
--- a/drivers/mtd/devices/mchp23k256.c
+++ b/drivers/mtd/devices/mchp23k256.c
@@ -213,7 +213,9 @@ static int mchp23k256_remove(struct spi_device *spi)
{
struct mchp23k256_flash *flash = spi_get_drvdata(spi);
- return mtd_device_unregister(&flash->mtd);
+ WARN_ON(mtd_device_unregister(&flash->mtd));
+
+ return 0;
}
static const struct of_device_id mchp23k256_of_table[] = {
diff --git a/drivers/mtd/devices/mchp48l640.c b/drivers/mtd/devices/mchp48l640.c
index 99400d0fb8c1..231a10790196 100644
--- a/drivers/mtd/devices/mchp48l640.c
+++ b/drivers/mtd/devices/mchp48l640.c
@@ -345,7 +345,9 @@ static int mchp48l640_remove(struct spi_device *spi)
{
struct mchp48l640_flash *flash = spi_get_drvdata(spi);
- return mtd_device_unregister(&flash->mtd);
+ WARN_ON(mtd_device_unregister(&flash->mtd));
+
+ return 0;
}
static const struct of_device_id mchp48l640_of_table[] = {
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 2b317ed6c103..734878abaa23 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -919,14 +919,14 @@ static int dataflash_probe(struct spi_device *spi)
static int dataflash_remove(struct spi_device *spi)
{
struct dataflash *flash = spi_get_drvdata(spi);
- int status;
dev_dbg(&spi->dev, "remove\n");
- status = mtd_device_unregister(&flash->mtd);
- if (status == 0)
- kfree(flash);
- return status;
+ WARN_ON(mtd_device_unregister(&flash->mtd));
+
+ kfree(flash);
+
+ return 0;
}
static struct spi_driver dataflash_driver = {
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index b81c3f0b85f9..7f124c1bfa40 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -402,7 +402,9 @@ static int sst25l_remove(struct spi_device *spi)
{
struct sst25l_flash *flash = spi_get_drvdata(spi);
- return mtd_device_unregister(&flash->mtd);
+ WARN_ON(mtd_device_unregister(&flash->mtd));
+
+ return 0;
}
static struct spi_driver sst25l_driver = {
diff --git a/drivers/mtd/hyperbus/rpc-if.c b/drivers/mtd/hyperbus/rpc-if.c
index ecb050ba95cd..6e08ec1d4f09 100644
--- a/drivers/mtd/hyperbus/rpc-if.c
+++ b/drivers/mtd/hyperbus/rpc-if.c
@@ -124,13 +124,17 @@ static int rpcif_hb_probe(struct platform_device *pdev)
if (!hyperbus)
return -ENOMEM;
- rpcif_sw_init(&hyperbus->rpc, pdev->dev.parent);
+ error = rpcif_sw_init(&hyperbus->rpc, pdev->dev.parent);
+ if (error)
+ return error;
platform_set_drvdata(pdev, hyperbus);
rpcif_enable_rpm(&hyperbus->rpc);
- rpcif_hw_init(&hyperbus->rpc, true);
+ error = rpcif_hw_init(&hyperbus->rpc, true);
+ if (error)
+ return error;
hyperbus->hbdev.map.size = hyperbus->rpc.size;
hyperbus->hbdev.map.virt = hyperbus->rpc.dirmap;
@@ -150,9 +154,9 @@ static int rpcif_hb_remove(struct platform_device *pdev)
{
struct rpcif_hyperbus *hyperbus = platform_get_drvdata(pdev);
int error = hyperbus_unregister_device(&hyperbus->hbdev);
- struct rpcif *rpc = dev_get_drvdata(pdev->dev.parent);
- rpcif_disable_rpm(rpc);
+ rpcif_disable_rpm(&hyperbus->rpc);
+
return error;
}
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 4945caa88345..6a099bbcd8be 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -357,12 +357,6 @@ config MTD_INTEL_VR_NOR
Map driver for a NOR flash bank located on the Expansion Bus of the
Intel Vermilion Range chipset.
-config MTD_RBTX4939
- tristate "Map driver for RBTX4939 board"
- depends on TOSHIBA_RBTX4939 && MTD_CFI && MTD_COMPLEX_MAPPINGS
- help
- Map driver for NOR flash chips on RBTX4939 board.
-
config MTD_PLATRAM
tristate "Map driver for platform device RAM (mtd-ram)"
select MTD_RAM
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 11fea9c8d561..2240b100f66a 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -42,6 +42,5 @@ obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o
obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
-obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o
obj-$(CONFIG_MTD_VMU) += vmu-flash.o
obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
deleted file mode 100644
index 39c86c0b0ec1..000000000000
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ /dev/null
@@ -1,133 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * rbtx4939-flash (based on physmap.c)
- *
- * This is a simplified physmap driver with map_init callback function.
- *
- * Copyright (C) 2009 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <asm/txx9/rbtx4939.h>
-
-struct rbtx4939_flash_info {
- struct mtd_info *mtd;
- struct map_info map;
-};
-
-static int rbtx4939_flash_remove(struct platform_device *dev)
-{
- struct rbtx4939_flash_info *info;
-
- info = platform_get_drvdata(dev);
- if (!info)
- return 0;
-
- if (info->mtd) {
- mtd_device_unregister(info->mtd);
- map_destroy(info->mtd);
- }
- return 0;
-}
-
-static const char * const rom_probe_types[] = {
- "cfi_probe", "jedec_probe", NULL };
-
-static int rbtx4939_flash_probe(struct platform_device *dev)
-{
- struct rbtx4939_flash_data *pdata;
- struct rbtx4939_flash_info *info;
- struct resource *res;
- const char * const *probe_type;
- int err = 0;
- unsigned long size;
-
- pdata = dev_get_platdata(&dev->dev);
- if (!pdata)
- return -ENODEV;
-
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
- info = devm_kzalloc(&dev->dev, sizeof(struct rbtx4939_flash_info),
- GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- platform_set_drvdata(dev, info);
-
- size = resource_size(res);
- pr_notice("rbtx4939 platform flash device: %pR\n", res);
-
- if (!devm_request_mem_region(&dev->dev, res->start, size,
- dev_name(&dev->dev)))
- return -EBUSY;
-
- info->map.name = dev_name(&dev->dev);
- info->map.phys = res->start;
- info->map.size = size;
- info->map.bankwidth = pdata->width;
-
- info->map.virt = devm_ioremap(&dev->dev, info->map.phys, size);
- if (!info->map.virt)
- return -EBUSY;
-
- if (pdata->map_init)
- (*pdata->map_init)(&info->map);
- else
- simple_map_init(&info->map);
-
- probe_type = rom_probe_types;
- for (; !info->mtd && *probe_type; probe_type++)
- info->mtd = do_map_probe(*probe_type, &info->map);
- if (!info->mtd) {
- dev_err(&dev->dev, "map_probe failed\n");
- err = -ENXIO;
- goto err_out;
- }
- info->mtd->dev.parent = &dev->dev;
- err = mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
-
- if (err)
- goto err_out;
- return 0;
-
-err_out:
- rbtx4939_flash_remove(dev);
- return err;
-}
-
-#ifdef CONFIG_PM
-static void rbtx4939_flash_shutdown(struct platform_device *dev)
-{
- struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
-
- if (mtd_suspend(info->mtd) == 0)
- mtd_resume(info->mtd);
-}
-#else
-#define rbtx4939_flash_shutdown NULL
-#endif
-
-static struct platform_driver rbtx4939_flash_driver = {
- .probe = rbtx4939_flash_probe,
- .remove = rbtx4939_flash_remove,
- .shutdown = rbtx4939_flash_shutdown,
- .driver = {
- .name = "rbtx4939-flash",
- },
-};
-
-module_platform_driver(rbtx4939_flash_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("RBTX4939 MTD map driver");
-MODULE_ALIAS("platform:rbtx4939-flash");
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 4eaba6f4ec68..243f28a3206b 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -46,23 +46,19 @@ static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
struct mtd_blktrans_dev *dev,
struct request *req)
{
+ struct req_iterator iter;
+ struct bio_vec bvec;
unsigned long block, nsect;
char *buf;
block = blk_rq_pos(req) << 9 >> tr->blkshift;
nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
- if (req_op(req) == REQ_OP_FLUSH) {
+ switch (req_op(req)) {
+ case REQ_OP_FLUSH:
if (tr->flush(dev))
return BLK_STS_IOERR;
return BLK_STS_OK;
- }
-
- if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
- get_capacity(req->rq_disk))
- return BLK_STS_IOERR;
-
- switch (req_op(req)) {
case REQ_OP_DISCARD:
if (tr->discard(dev, block, nsect))
return BLK_STS_IOERR;
@@ -76,13 +72,17 @@ static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
}
}
kunmap(bio_page(req->bio));
- rq_flush_dcache_pages(req);
+
+ rq_for_each_segment(bvec, req, iter)
+ flush_dcache_page(bvec.bv_page);
return BLK_STS_OK;
case REQ_OP_WRITE:
if (!tr->writesect)
return BLK_STS_IOERR;
- rq_flush_dcache_pages(req);
+ rq_for_each_segment(bvec, req, iter)
+ flush_dcache_page(bvec.bv_page);
+
buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
if (tr->writesect(dev, block, buf)) {
@@ -346,7 +346,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
gd->minors = 1 << tr->part_bits;
gd->fops = &mtd_block_ops;
- if (tr->part_bits)
+ if (tr->part_bits) {
if (new->devnum < 26)
snprintf(gd->disk_name, sizeof(gd->disk_name),
"%s%c", tr->name, 'a' + new->devnum);
@@ -355,9 +355,11 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
"%s%c%c", tr->name,
'a' - 1 + new->devnum / 26,
'a' + new->devnum % 26);
- else
+ } else {
snprintf(gd->disk_name, sizeof(gd->disk_name),
"%s%d", tr->name, new->devnum);
+ gd->flags |= GENHD_FL_NO_PART;
+ }
set_capacity(gd, ((u64)new->size * tr->blksize) >> 9);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 155e991d9d75..d0f9c4b0285c 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -573,14 +573,32 @@ static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
}
}
+static void adjust_oob_length(struct mtd_info *mtd, uint64_t start,
+ struct mtd_oob_ops *ops)
+{
+ uint32_t start_page, end_page;
+ u32 oob_per_page;
+
+ if (ops->len == 0 || ops->ooblen == 0)
+ return;
+
+ start_page = mtd_div_by_ws(start, mtd);
+ end_page = mtd_div_by_ws(start + ops->len - 1, mtd);
+ oob_per_page = mtd_oobavail(mtd, ops);
+
+ ops->ooblen = min_t(size_t, ops->ooblen,
+ (end_page - start_page + 1) * oob_per_page);
+}
+
static int mtdchar_write_ioctl(struct mtd_info *mtd,
struct mtd_write_req __user *argp)
{
struct mtd_info *master = mtd_get_master(mtd);
struct mtd_write_req req;
- struct mtd_oob_ops ops = {};
const void __user *usr_data, *usr_oob;
- int ret;
+ uint8_t *datbuf = NULL, *oobbuf = NULL;
+ size_t datbuf_len, oobbuf_len;
+ int ret = 0;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
@@ -590,33 +608,79 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
if (!master->_write_oob)
return -EOPNOTSUPP;
- ops.mode = req.mode;
- ops.len = (size_t)req.len;
- ops.ooblen = (size_t)req.ooblen;
- ops.ooboffs = 0;
-
- if (usr_data) {
- ops.datbuf = memdup_user(usr_data, ops.len);
- if (IS_ERR(ops.datbuf))
- return PTR_ERR(ops.datbuf);
- } else {
- ops.datbuf = NULL;
+
+ if (!usr_data)
+ req.len = 0;
+
+ if (!usr_oob)
+ req.ooblen = 0;
+
+ if (req.start + req.len > mtd->size)
+ return -EINVAL;
+
+ datbuf_len = min_t(size_t, req.len, mtd->erasesize);
+ if (datbuf_len > 0) {
+ datbuf = kmalloc(datbuf_len, GFP_KERNEL);
+ if (!datbuf)
+ return -ENOMEM;
}
- if (usr_oob) {
- ops.oobbuf = memdup_user(usr_oob, ops.ooblen);
- if (IS_ERR(ops.oobbuf)) {
- kfree(ops.datbuf);
- return PTR_ERR(ops.oobbuf);
+ oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
+ if (oobbuf_len > 0) {
+ oobbuf = kmalloc(oobbuf_len, GFP_KERNEL);
+ if (!oobbuf) {
+ kfree(datbuf);
+ return -ENOMEM;
}
- } else {
- ops.oobbuf = NULL;
}
- ret = mtd_write_oob(mtd, (loff_t)req.start, &ops);
+ while (req.len > 0 || (!usr_data && req.ooblen > 0)) {
+ struct mtd_oob_ops ops = {
+ .mode = req.mode,
+ .len = min_t(size_t, req.len, datbuf_len),
+ .ooblen = min_t(size_t, req.ooblen, oobbuf_len),
+ .datbuf = datbuf,
+ .oobbuf = oobbuf,
+ };
- kfree(ops.datbuf);
- kfree(ops.oobbuf);
+ /*
+ * Shorten non-page-aligned, eraseblock-sized writes so that
+ * the write ends on an eraseblock boundary. This is necessary
+ * for adjust_oob_length() to properly handle non-page-aligned
+ * writes.
+ */
+ if (ops.len == mtd->erasesize)
+ ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd);
+
+ /*
+ * For writes which are not OOB-only, adjust the amount of OOB
+ * data written according to the number of data pages written.
+ * This is necessary to prevent OOB data from being skipped
+ * over in data+OOB writes requiring multiple mtd_write_oob()
+ * calls to be completed.
+ */
+ adjust_oob_length(mtd, req.start, &ops);
+
+ if (copy_from_user(datbuf, usr_data, ops.len) ||
+ copy_from_user(oobbuf, usr_oob, ops.ooblen)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = mtd_write_oob(mtd, req.start, &ops);
+ if (ret)
+ break;
+
+ req.start += ops.retlen;
+ req.len -= ops.retlen;
+ usr_data += ops.retlen;
+
+ req.ooblen -= ops.oobretlen;
+ usr_oob += ops.oobretlen;
+ }
+
+ kfree(datbuf);
+ kfree(oobbuf);
return ret;
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 9186268d361b..70f492dce158 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -747,6 +747,9 @@ int del_mtd_device(struct mtd_info *mtd)
device_unregister(&mtd->dev);
+ /* Clear dev so mtd can be safely re-registered later if desired */
+ memset(&mtd->dev, 0, sizeof(mtd->dev));
+
idr_remove(&mtd_idr, mtd->index);
of_node_put(mtd_get_of_node(mtd));
@@ -825,8 +828,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
/* OTP nvmem will be registered on the physical device */
config.dev = mtd->dev.parent;
- /* just reuse the compatible as name */
- config.name = compatible;
+ config.name = kasprintf(GFP_KERNEL, "%s-%s", dev_name(&mtd->dev), compatible);
config.id = NVMEM_DEVID_NONE;
config.owner = THIS_MODULE;
config.type = NVMEM_TYPE_OTP;
@@ -842,6 +844,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
nvmem = NULL;
of_node_put(np);
+ kfree(config.name);
return nvmem;
}
@@ -1018,8 +1021,10 @@ int mtd_device_unregister(struct mtd_info *master)
{
int err;
- if (master->_reboot)
+ if (master->_reboot) {
unregister_reboot_notifier(&master->reboot_notifier);
+ memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier));
+ }
if (master->otp_user_nvmem)
nvmem_unregister(master->otp_user_nvmem);
@@ -2365,6 +2370,14 @@ static struct backing_dev_info * __init mtd_bdi_init(const char *name)
return ret ? ERR_PTR(ret) : bdi;
}
+char *mtd_expert_analysis_warning =
+ "Bad block checks have been entirely disabled.\n"
+ "This is only reserved for post-mortem forensics and debug purposes.\n"
+ "Never enable this mode if you do not know what you are doing!\n";
+EXPORT_SYMBOL_GPL(mtd_expert_analysis_warning);
+bool mtd_expert_analysis_mode;
+EXPORT_SYMBOL_GPL(mtd_expert_analysis_mode);
+
static struct proc_dir_entry *proc_mtd;
static int __init init_mtd(void)
@@ -2388,6 +2401,8 @@ static int __init init_mtd(void)
goto out_procfs;
dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
+ debugfs_create_bool("expert_analysis_mode", 0600, dfs_dir_mtd,
+ &mtd_expert_analysis_mode);
return 0;
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 04af12b66110..357661b62c94 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -312,7 +312,7 @@ static int __mtd_del_partition(struct mtd_info *mtd)
if (err)
return err;
- list_del(&child->part.node);
+ list_del(&mtd->part.node);
free_partition(mtd);
return 0;
diff --git a/drivers/mtd/nand/core.c b/drivers/mtd/nand/core.c
index 5e13a03d2b32..416947f28b67 100644
--- a/drivers/mtd/nand/core.c
+++ b/drivers/mtd/nand/core.c
@@ -21,6 +21,9 @@
*/
bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
{
+ if (WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning))
+ return false;
+
if (nanddev_bbt_is_initialized(nand)) {
unsigned int entry;
int status;
diff --git a/drivers/mtd/nand/onenand/onenand_bbt.c b/drivers/mtd/nand/onenand/onenand_bbt.c
index def89f108007..b17315f8e1d4 100644
--- a/drivers/mtd/nand/onenand/onenand_bbt.c
+++ b/drivers/mtd/nand/onenand/onenand_bbt.c
@@ -60,7 +60,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
int i, j, numblocks, len, scanlen;
int startblock;
loff_t from;
- size_t readlen, ooblen;
+ size_t readlen;
struct mtd_oob_ops ops;
int rgn;
@@ -69,7 +69,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
len = 2;
/* We need only read few bytes from the OOB area */
- scanlen = ooblen = 0;
+ scanlen = 0;
readlen = bd->len;
/* chip == -1 case only */
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 0a45d3c6c15b..20408b7db540 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -40,8 +40,9 @@ config MTD_NAND_AMS_DELTA
config MTD_NAND_OMAP2
tristate "OMAP2, OMAP3, OMAP4 and Keystone NAND controller"
- depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
+ depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
depends on HAS_IOMEM
+ select OMAP_GPMC if ARCH_K3
help
Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4
and Keystone platforms.
@@ -308,7 +309,7 @@ config MTD_NAND_DAVINCI
config MTD_NAND_TXX9NDFMC
tristate "TXx9 NAND controller"
- depends on SOC_TX4938 || SOC_TX4939 || COMPILE_TEST
+ depends on SOC_TX4938 || COMPILE_TEST
depends on HAS_IOMEM
help
This enables the NAND flash controller on the TXx9 SoCs.
@@ -461,6 +462,13 @@ config MTD_NAND_PL35X
Enables support for PrimeCell SMC PL351 and PL353 NAND
controller found on Zynq7000.
+config MTD_NAND_RENESAS
+ tristate "Renesas R-Car Gen3 & RZ/N1 NAND controller"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ help
+ Enables support for the NAND controller found on Renesas R-Car
+ Gen3 and RZ/N1 SoC families.
+
comment "Misc"
config MTD_SM_COMMON
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index 2f97958c3a33..88a566513c56 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_MTD_NAND_ARASAN) += arasan-nand-controller.o
obj-$(CONFIG_MTD_NAND_INTEL_LGM) += intel-nand-controller.o
obj-$(CONFIG_MTD_NAND_ROCKCHIP) += rockchip-nand-controller.o
obj-$(CONFIG_MTD_NAND_PL35X) += pl35x-nand-controller.o
+obj-$(CONFIG_MTD_NAND_RENESAS) += renesas-nand-controller.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
index 118da9944e3b..45fec8c192ab 100644
--- a/drivers/mtd/nand/raw/davinci_nand.c
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -371,77 +371,6 @@ correct:
return corrected;
}
-/**
- * nand_read_page_hwecc_oob_first - hw ecc, read oob first
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @oob_required: caller requires OOB data read to chip->oob_poi
- * @page: page number to read
- *
- * Hardware ECC for large page chips, require OOB to be read first. For this
- * ECC mode, the write_page method is re-used from ECC_HW. These methods
- * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
- * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
- * the data area, by overwriting the NAND manufacturer bad block markings.
- */
-static int nand_davinci_read_page_hwecc_oob_first(struct nand_chip *chip,
- uint8_t *buf,
- int oob_required, int page)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- int i, eccsize = chip->ecc.size, ret;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- uint8_t *p = buf;
- uint8_t *ecc_code = chip->ecc.code_buf;
- uint8_t *ecc_calc = chip->ecc.calc_buf;
- unsigned int max_bitflips = 0;
-
- /* Read the OOB area first */
- ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
- if (ret)
- return ret;
-
- ret = nand_read_page_op(chip, page, 0, NULL, 0);
- if (ret)
- return ret;
-
- ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
- chip->ecc.total);
- if (ret)
- return ret;
-
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- int stat;
-
- chip->ecc.hwctl(chip, NAND_ECC_READ);
-
- ret = nand_read_data_op(chip, p, eccsize, false, false);
- if (ret)
- return ret;
-
- chip->ecc.calculate(chip, p, &ecc_calc[i]);
-
- stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
- if (stat == -EBADMSG &&
- (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
- /* check for empty pages with bitflips */
- stat = nand_check_erased_ecc_chunk(p, eccsize,
- &ecc_code[i],
- eccbytes, NULL, 0,
- chip->ecc.strength);
- }
-
- if (stat < 0) {
- mtd->ecc_stats.failed++;
- } else {
- mtd->ecc_stats.corrected += stat;
- max_bitflips = max_t(unsigned int, max_bitflips, stat);
- }
- }
- return max_bitflips;
-}
-
/*----------------------------------------------------------------------*/
/* An ECC layout for using 4-bit ECC with small-page flash, storing
@@ -651,7 +580,7 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
} else if (chunks == 4 || chunks == 8) {
mtd_set_ooblayout(mtd,
nand_get_large_page_ooblayout());
- chip->ecc.read_page = nand_davinci_read_page_hwecc_oob_first;
+ chip->ecc.read_page = nand_read_page_hwecc_oob_first;
} else {
return -EIO;
}
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 10cc71829dcb..1b64c5a5140d 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -713,14 +713,32 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
(use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
}
-static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
+static int gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
{
struct gpmi_nfc_hardware_timing *hw = &this->hw;
struct resources *r = &this->resources;
void __iomem *gpmi_regs = r->gpmi_regs;
unsigned int dll_wait_time_us;
+ int ret;
+
+ /* Clock dividers do NOT guarantee a clean clock signal on its output
+ * during the change of the divide factor on i.MX6Q/UL/SX. On i.MX7/8,
+ * all clock dividers provide these guarantee.
+ */
+ if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this))
+ clk_disable_unprepare(r->clock[0]);
- clk_set_rate(r->clock[0], hw->clk_rate);
+ ret = clk_set_rate(r->clock[0], hw->clk_rate);
+ if (ret) {
+ dev_err(this->dev, "cannot set clock rate to %lu Hz: %d\n", hw->clk_rate, ret);
+ return ret;
+ }
+
+ if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this)) {
+ ret = clk_prepare_enable(r->clock[0]);
+ if (ret)
+ return ret;
+ }
writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
@@ -739,6 +757,8 @@ static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
/* Wait for the DLL to settle. */
udelay(dll_wait_time_us);
+
+ return 0;
}
static int gpmi_setup_interface(struct nand_chip *chip, int chipnr,
@@ -971,16 +991,13 @@ static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
{
struct platform_device *pdev = this->pdev;
const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
- struct resource *r;
int err;
- r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
- if (!r) {
- dev_err(this->dev, "Can't get resource for %s\n", res_name);
- return -ENODEV;
- }
+ err = platform_get_irq_byname(pdev, res_name);
+ if (err < 0)
+ return err;
- err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this);
+ err = devm_request_irq(this->dev, err, irq_h, 0, res_name, this);
if (err)
dev_err(this->dev, "error requesting BCH IRQ\n");
@@ -1032,15 +1049,6 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)
r->clock[i] = clk;
}
- if (GPMI_IS_MX6(this))
- /*
- * Set the default value for the gpmi clock.
- *
- * If you want to use the ONFI nand which is in the
- * Synchronous Mode, you should change the clock as you need.
- */
- clk_set_rate(r->clock[0], 22000000);
-
return 0;
err_clock:
@@ -1425,7 +1433,6 @@ static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
struct gpmi_nand_data *this = nand_get_controller_data(chip);
struct bch_geometry *nfc_geo = &this->bch_geometry;
- int ret;
dev_dbg(this->dev, "ecc write page.\n");
@@ -1445,9 +1452,7 @@ static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
this->auxiliary_virt);
}
- ret = nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
-
- return ret;
+ return nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
}
/*
@@ -2278,7 +2283,9 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
*/
if (this->hw.must_apply_timings) {
this->hw.must_apply_timings = false;
- gpmi_nfc_apply_timings(this);
+ ret = gpmi_nfc_apply_timings(this);
+ if (ret)
+ return ret;
}
dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
index 0e9d426fe4f2..b18861bdcdc8 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
@@ -32,6 +32,7 @@ struct jz_soc_info {
unsigned long addr_offset;
unsigned long cmd_offset;
const struct mtd_ooblayout_ops *oob_layout;
+ bool oob_first;
};
struct ingenic_nand_cs {
@@ -240,6 +241,9 @@ static int ingenic_nand_attach_chip(struct nand_chip *chip)
if (chip->bbt_options & NAND_BBT_USE_FLASH)
chip->bbt_options |= NAND_BBT_NO_OOB;
+ if (nfc->soc_info->oob_first)
+ chip->ecc.read_page = nand_read_page_hwecc_oob_first;
+
/* For legacy reasons we use a different layout on the qi,lb60 board. */
if (of_machine_is_compatible("qi,lb60"))
mtd_set_ooblayout(mtd, &qi_lb60_ooblayout_ops);
@@ -534,6 +538,7 @@ static const struct jz_soc_info jz4740_soc_info = {
.data_offset = 0x00000000,
.cmd_offset = 0x00008000,
.addr_offset = 0x00010000,
+ .oob_first = true,
};
static const struct jz_soc_info jz4725b_soc_info = {
diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c
index cb293c50acb8..5b9271b9c326 100644
--- a/drivers/mtd/nand/raw/mpc5121_nfc.c
+++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
@@ -291,7 +291,6 @@ static int ads5121_chipselect_init(struct mtd_info *mtd)
/* Control chips select signal on ADS5121 board */
static void ads5121_select_chip(struct nand_chip *nand, int chip)
{
- struct mtd_info *mtd = nand_to_mtd(nand);
struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand);
u8 v;
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index a130320de412..e7b2ba016d8c 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -321,6 +321,9 @@ static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
if (nand_region_is_secured(chip, ofs, mtd->erasesize))
return -EIO;
+ if (WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning))
+ return 0;
+
if (chip->legacy.block_bad)
return chip->legacy.block_bad(chip, ofs);
@@ -3161,6 +3164,73 @@ static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
}
/**
+ * nand_read_page_hwecc_oob_first - Hardware ECC page read with ECC
+ * data read from OOB area
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Hardware ECC for large page chips, which requires the ECC data to be
+ * extracted from the OOB before the actual data is read.
+ */
+int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, eccsize = chip->ecc.size, ret;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
+ unsigned int max_bitflips = 0;
+
+ /* Read the OOB area first */
+ ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+ if (ret)
+ return ret;
+
+ /* Move read cursor to start of page */
+ ret = nand_change_read_column_op(chip, 0, NULL, 0, false);
+ if (ret)
+ return ret;
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
+
+ ret = nand_read_data_op(chip, p, eccsize, false, false);
+ if (ret)
+ return ret;
+
+ stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
+ if (stat == -EBADMSG &&
+ (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+ /* check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, eccsize,
+ &ecc_code[i],
+ eccbytes, NULL, 0,
+ chip->ecc.strength);
+ }
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
+}
+EXPORT_SYMBOL_GPL(nand_read_page_hwecc_oob_first);
+
+/**
* nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
* @chip: nand chip info structure
* @buf: buffer to store read data
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
index b7ad030225f8..ab630af3a309 100644
--- a/drivers/mtd/nand/raw/nand_bbt.c
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -1455,6 +1455,9 @@ int nand_isbad_bbt(struct nand_chip *this, loff_t offs, int allowbbt)
pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n",
(unsigned int)offs, block, res);
+ if (WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning))
+ return 0;
+
switch (res) {
case BBT_BLOCK_GOOD:
return 0;
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index b26d4947af02..f0bbbe401e76 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -19,7 +19,7 @@
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/omap-dma.h>
-#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -148,7 +148,6 @@ struct omap_nand_info {
int gpmc_cs;
bool dev_ready;
enum nand_io xfer_type;
- int devsize;
enum omap_ecc ecc_opt;
struct device_node *elm_of_node;
@@ -164,6 +163,7 @@ struct omap_nand_info {
u_char *buf;
int buf_len;
/* Interface to GPMC */
+ void __iomem *fifo;
struct gpmc_nand_regs reg;
struct gpmc_nand_ops *ops;
bool flash_bbt;
@@ -175,6 +175,11 @@ struct omap_nand_info {
unsigned int nsteps_per_eccpg;
unsigned int eccpg_size;
unsigned int eccpg_bytes;
+ void (*data_in)(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit);
+ void (*data_out)(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit);
};
static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd)
@@ -182,6 +187,13 @@ static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd)
return container_of(mtd_to_nand(mtd), struct omap_nand_info, nand);
}
+static void omap_nand_data_in(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit);
+
+static void omap_nand_data_out(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit);
+
/**
* omap_prefetch_enable - configures and starts prefetch transfer
* @cs: cs (chip select) number
@@ -241,169 +253,70 @@ static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
}
/**
- * omap_hwcontrol - hardware specific access to control-lines
- * @chip: NAND chip object
- * @cmd: command to device
- * @ctrl:
- * NAND_NCE: bit 0 -> don't care
- * NAND_CLE: bit 1 -> Command Latch
- * NAND_ALE: bit 2 -> Address Latch
- *
- * NOTE: boards may use different bits for these!!
+ * omap_nand_data_in_pref - NAND data in using prefetch engine
*/
-static void omap_hwcontrol(struct nand_chip *chip, int cmd, unsigned int ctrl)
+static void omap_nand_data_in_pref(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
-
- if (cmd != NAND_CMD_NONE) {
- if (ctrl & NAND_CLE)
- writeb(cmd, info->reg.gpmc_nand_command);
-
- else if (ctrl & NAND_ALE)
- writeb(cmd, info->reg.gpmc_nand_address);
-
- else /* NAND_NCE */
- writeb(cmd, info->reg.gpmc_nand_data);
- }
-}
-
-/**
- * omap_read_buf8 - read data from NAND controller into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
- */
-static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
-{
- struct nand_chip *nand = mtd_to_nand(mtd);
-
- ioread8_rep(nand->legacy.IO_ADDR_R, buf, len);
-}
-
-/**
- * omap_write_buf8 - write buffer to NAND controller
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
- */
-static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
-{
- struct omap_nand_info *info = mtd_to_omap(mtd);
- u_char *p = (u_char *)buf;
- bool status;
-
- while (len--) {
- iowrite8(*p++, info->nand.legacy.IO_ADDR_W);
- /* wait until buffer is available for write */
- do {
- status = info->ops->nand_writebuffer_empty();
- } while (!status);
- }
-}
-
-/**
- * omap_read_buf16 - read data from NAND controller into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
- */
-static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
-{
- struct nand_chip *nand = mtd_to_nand(mtd);
-
- ioread16_rep(nand->legacy.IO_ADDR_R, buf, len / 2);
-}
-
-/**
- * omap_write_buf16 - write buffer to NAND controller
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
- */
-static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
-{
- struct omap_nand_info *info = mtd_to_omap(mtd);
- u16 *p = (u16 *) buf;
- bool status;
- /* FIXME try bursts of writesw() or DMA ... */
- len >>= 1;
-
- while (len--) {
- iowrite16(*p++, info->nand.legacy.IO_ADDR_W);
- /* wait until buffer is available for write */
- do {
- status = info->ops->nand_writebuffer_empty();
- } while (!status);
- }
-}
-
-/**
- * omap_read_buf_pref - read data from NAND controller into buffer
- * @chip: NAND chip object
- * @buf: buffer to store date
- * @len: number of bytes to read
- */
-static void omap_read_buf_pref(struct nand_chip *chip, u_char *buf, int len)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct omap_nand_info *info = mtd_to_omap(mtd);
uint32_t r_count = 0;
int ret = 0;
u32 *p = (u32 *)buf;
+ unsigned int pref_len;
- /* take care of subpage reads */
- if (len % 4) {
- if (info->nand.options & NAND_BUSWIDTH_16)
- omap_read_buf16(mtd, buf, len % 4);
- else
- omap_read_buf8(mtd, buf, len % 4);
- p = (u32 *) (buf + len % 4);
- len -= len % 4;
+ if (force_8bit) {
+ omap_nand_data_in(chip, buf, len, force_8bit);
+ return;
}
+ /* read 32-bit words using prefetch and remaining bytes normally */
+
/* configure and start prefetch transfer */
+ pref_len = len - (len & 3);
ret = omap_prefetch_enable(info->gpmc_cs,
- PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info);
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, pref_len, 0x0, info);
if (ret) {
- /* PFPW engine is busy, use cpu copy method */
- if (info->nand.options & NAND_BUSWIDTH_16)
- omap_read_buf16(mtd, (u_char *)p, len);
- else
- omap_read_buf8(mtd, (u_char *)p, len);
+ /* prefetch engine is busy, use CPU copy method */
+ omap_nand_data_in(chip, buf, len, false);
} else {
do {
r_count = readl(info->reg.gpmc_prefetch_status);
r_count = PREFETCH_STATUS_FIFO_CNT(r_count);
r_count = r_count >> 2;
- ioread32_rep(info->nand.legacy.IO_ADDR_R, p, r_count);
+ ioread32_rep(info->fifo, p, r_count);
p += r_count;
- len -= r_count << 2;
- } while (len);
- /* disable and stop the PFPW engine */
+ pref_len -= r_count << 2;
+ } while (pref_len);
+ /* disable and stop the Prefetch engine */
omap_prefetch_reset(info->gpmc_cs, info);
+ /* fetch any remaining bytes */
+ if (len & 3)
+ omap_nand_data_in(chip, p, len & 3, false);
}
}
/**
- * omap_write_buf_pref - write buffer to NAND controller
- * @chip: NAND chip object
- * @buf: data buffer
- * @len: number of bytes to write
+ * omap_nand_data_out_pref - NAND data out using Write Posting engine
*/
-static void omap_write_buf_pref(struct nand_chip *chip, const u_char *buf,
- int len)
+static void omap_nand_data_out_pref(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
uint32_t w_count = 0;
int i = 0, ret = 0;
u16 *p = (u16 *)buf;
unsigned long tim, limit;
u32 val;
+ if (force_8bit) {
+ omap_nand_data_out(chip, buf, len, force_8bit);
+ return;
+ }
+
/* take care of subpage writes */
if (len % 2 != 0) {
- writeb(*buf, info->nand.legacy.IO_ADDR_W);
+ writeb(*(u8 *)buf, info->fifo);
p = (u16 *)(buf + 1);
len--;
}
@@ -412,18 +325,15 @@ static void omap_write_buf_pref(struct nand_chip *chip, const u_char *buf,
ret = omap_prefetch_enable(info->gpmc_cs,
PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
if (ret) {
- /* PFPW engine is busy, use cpu copy method */
- if (info->nand.options & NAND_BUSWIDTH_16)
- omap_write_buf16(mtd, (u_char *)p, len);
- else
- omap_write_buf8(mtd, (u_char *)p, len);
+ /* write posting engine is busy, use CPU copy method */
+ omap_nand_data_out(chip, buf, len, false);
} else {
while (len) {
w_count = readl(info->reg.gpmc_prefetch_status);
w_count = PREFETCH_STATUS_FIFO_CNT(w_count);
w_count = w_count >> 1;
for (i = 0; (i < w_count) && len; i++, len -= 2)
- iowrite16(*p++, info->nand.legacy.IO_ADDR_W);
+ iowrite16(*p++, info->fifo);
}
/* wait for data to flushed-out before reset the prefetch */
tim = 0;
@@ -451,15 +361,16 @@ static void omap_nand_dma_callback(void *data)
/*
* omap_nand_dma_transfer: configure and start dma transfer
- * @mtd: MTD device structure
+ * @chip: nand chip structure
* @addr: virtual address in RAM of source/destination
* @len: number of data bytes to be transferred
* @is_write: flag for read/write operation
*/
-static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
- unsigned int len, int is_write)
+static inline int omap_nand_dma_transfer(struct nand_chip *chip,
+ const void *addr, unsigned int len,
+ int is_write)
{
- struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
struct dma_async_tx_descriptor *tx;
enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
DMA_FROM_DEVICE;
@@ -521,49 +432,51 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
out_copy_unmap:
dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
out_copy:
- if (info->nand.options & NAND_BUSWIDTH_16)
- is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
- : omap_write_buf16(mtd, (u_char *) addr, len);
- else
- is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
- : omap_write_buf8(mtd, (u_char *) addr, len);
+ is_write == 0 ? omap_nand_data_in(chip, (void *)addr, len, false)
+ : omap_nand_data_out(chip, addr, len, false);
+
return 0;
}
/**
- * omap_read_buf_dma_pref - read data from NAND controller into buffer
- * @chip: NAND chip object
- * @buf: buffer to store date
- * @len: number of bytes to read
+ * omap_nand_data_in_dma_pref - NAND data in using DMA and Prefetch
*/
-static void omap_read_buf_dma_pref(struct nand_chip *chip, u_char *buf,
- int len)
+static void omap_nand_data_in_dma_pref(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ if (force_8bit) {
+ omap_nand_data_in(chip, buf, len, force_8bit);
+ return;
+ }
+
if (len <= mtd->oobsize)
- omap_read_buf_pref(chip, buf, len);
+ omap_nand_data_in_pref(chip, buf, len, false);
else
/* start transfer in DMA mode */
- omap_nand_dma_transfer(mtd, buf, len, 0x0);
+ omap_nand_dma_transfer(chip, buf, len, 0x0);
}
/**
- * omap_write_buf_dma_pref - write buffer to NAND controller
- * @chip: NAND chip object
- * @buf: data buffer
- * @len: number of bytes to write
+ * omap_nand_data_out_dma_pref - NAND data out using DMA and write posting
*/
-static void omap_write_buf_dma_pref(struct nand_chip *chip, const u_char *buf,
- int len)
+static void omap_nand_data_out_dma_pref(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ if (force_8bit) {
+ omap_nand_data_out(chip, buf, len, force_8bit);
+ return;
+ }
+
if (len <= mtd->oobsize)
- omap_write_buf_pref(chip, buf, len);
+ omap_nand_data_out_pref(chip, buf, len, false);
else
/* start transfer in DMA mode */
- omap_nand_dma_transfer(mtd, (u_char *)buf, len, 0x1);
+ omap_nand_dma_transfer(chip, buf, len, 0x1);
}
/*
@@ -587,13 +500,13 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev)
bytes = info->buf_len;
else if (!info->buf_len)
bytes = 0;
- iowrite32_rep(info->nand.legacy.IO_ADDR_W, (u32 *)info->buf,
+ iowrite32_rep(info->fifo, (u32 *)info->buf,
bytes >> 2);
info->buf = info->buf + bytes;
info->buf_len -= bytes;
} else {
- ioread32_rep(info->nand.legacy.IO_ADDR_R, (u32 *)info->buf,
+ ioread32_rep(info->fifo, (u32 *)info->buf,
bytes >> 2);
info->buf = info->buf + bytes;
@@ -613,20 +526,17 @@ done:
}
/*
- * omap_read_buf_irq_pref - read data from NAND controller into buffer
- * @chip: NAND chip object
- * @buf: buffer to store date
- * @len: number of bytes to read
+ * omap_nand_data_in_irq_pref - NAND data in using Prefetch and IRQ
*/
-static void omap_read_buf_irq_pref(struct nand_chip *chip, u_char *buf,
- int len)
+static void omap_nand_data_in_irq_pref(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ struct mtd_info *mtd = nand_to_mtd(&info->nand);
int ret = 0;
- if (len <= mtd->oobsize) {
- omap_read_buf_pref(chip, buf, len);
+ if (len <= mtd->oobsize || force_8bit) {
+ omap_nand_data_in(chip, buf, len, force_8bit);
return;
}
@@ -637,9 +547,11 @@ static void omap_read_buf_irq_pref(struct nand_chip *chip, u_char *buf,
/* configure and start prefetch transfer */
ret = omap_prefetch_enable(info->gpmc_cs,
PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
- if (ret)
+ if (ret) {
/* PFPW engine is busy, use cpu copy method */
- goto out_copy;
+ omap_nand_data_in(chip, buf, len, false);
+ return;
+ }
info->buf_len = len;
@@ -652,31 +564,23 @@ static void omap_read_buf_irq_pref(struct nand_chip *chip, u_char *buf,
/* disable and stop the PFPW engine */
omap_prefetch_reset(info->gpmc_cs, info);
return;
-
-out_copy:
- if (info->nand.options & NAND_BUSWIDTH_16)
- omap_read_buf16(mtd, buf, len);
- else
- omap_read_buf8(mtd, buf, len);
}
/*
- * omap_write_buf_irq_pref - write buffer to NAND controller
- * @chip: NAND chip object
- * @buf: data buffer
- * @len: number of bytes to write
+ * omap_nand_data_out_irq_pref - NAND out using write posting and IRQ
*/
-static void omap_write_buf_irq_pref(struct nand_chip *chip, const u_char *buf,
- int len)
+static void omap_nand_data_out_irq_pref(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ struct mtd_info *mtd = nand_to_mtd(&info->nand);
int ret = 0;
unsigned long tim, limit;
u32 val;
- if (len <= mtd->oobsize) {
- omap_write_buf_pref(chip, buf, len);
+ if (len <= mtd->oobsize || force_8bit) {
+ omap_nand_data_out(chip, buf, len, force_8bit);
return;
}
@@ -687,9 +591,11 @@ static void omap_write_buf_irq_pref(struct nand_chip *chip, const u_char *buf,
/* configure and start prefetch transfer : size=24 */
ret = omap_prefetch_enable(info->gpmc_cs,
(PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
- if (ret)
+ if (ret) {
/* PFPW engine is busy, use cpu copy method */
- goto out_copy;
+ omap_nand_data_out(chip, buf, len, false);
+ return;
+ }
info->buf_len = len;
@@ -711,12 +617,6 @@ static void omap_write_buf_irq_pref(struct nand_chip *chip, const u_char *buf,
/* disable and stop the PFPW engine */
omap_prefetch_reset(info->gpmc_cs, info);
return;
-
-out_copy:
- if (info->nand.options & NAND_BUSWIDTH_16)
- omap_write_buf16(mtd, buf, len);
- else
- omap_write_buf8(mtd, buf, len);
}
/**
@@ -982,50 +882,6 @@ static void omap_enable_hwecc(struct nand_chip *chip, int mode)
}
/**
- * omap_wait - wait until the command is done
- * @this: NAND Chip structure
- *
- * Wait function is called during Program and erase operations and
- * the way it is called from MTD layer, we should wait till the NAND
- * chip is ready after the programming/erase operation has completed.
- *
- * Erase can take up to 400ms and program up to 20ms according to
- * general NAND and SmartMedia specs
- */
-static int omap_wait(struct nand_chip *this)
-{
- struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(this));
- unsigned long timeo = jiffies;
- int status;
-
- timeo += msecs_to_jiffies(400);
-
- writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
- while (time_before(jiffies, timeo)) {
- status = readb(info->reg.gpmc_nand_data);
- if (status & NAND_STATUS_READY)
- break;
- cond_resched();
- }
-
- status = readb(info->reg.gpmc_nand_data);
- return status;
-}
-
-/**
- * omap_dev_ready - checks the NAND Ready GPIO line
- * @chip: NAND chip object
- *
- * Returns true if ready and false if busy.
- */
-static int omap_dev_ready(struct nand_chip *chip)
-{
- struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
-
- return gpiod_get_value(info->ready_gpiod);
-}
-
-/**
* omap_enable_hwecc_bch - Program GPMC to perform BCH ECC calculation
* @chip: NAND chip object
* @mode: Read/Write mode
@@ -1543,8 +1399,8 @@ static int omap_write_page_bch(struct nand_chip *chip, const uint8_t *buf,
chip->ecc.hwctl(chip, NAND_ECC_WRITE);
/* Write data */
- chip->legacy.write_buf(chip, buf + (eccpg * info->eccpg_size),
- info->eccpg_size);
+ info->data_out(chip, buf + (eccpg * info->eccpg_size),
+ info->eccpg_size, false);
/* Update ecc vector from GPMC result registers */
ret = omap_calculate_ecc_bch_multi(mtd,
@@ -1562,7 +1418,7 @@ static int omap_write_page_bch(struct nand_chip *chip, const uint8_t *buf,
}
/* Write ecc vector to OOB area */
- chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
+ info->data_out(chip, chip->oob_poi, mtd->oobsize, false);
return nand_prog_page_end_op(chip);
}
@@ -1607,8 +1463,8 @@ static int omap_write_subpage_bch(struct nand_chip *chip, u32 offset,
chip->ecc.hwctl(chip, NAND_ECC_WRITE);
/* Write data */
- chip->legacy.write_buf(chip, buf + (eccpg * info->eccpg_size),
- info->eccpg_size);
+ info->data_out(chip, buf + (eccpg * info->eccpg_size),
+ info->eccpg_size, false);
for (step = 0; step < info->nsteps_per_eccpg; step++) {
unsigned int base_step = eccpg * info->nsteps_per_eccpg;
@@ -1641,7 +1497,7 @@ static int omap_write_subpage_bch(struct nand_chip *chip, u32 offset,
}
/* write OOB buffer to NAND device */
- chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
+ info->data_out(chip, chip->oob_poi, mtd->oobsize, false);
return nand_prog_page_end_op(chip);
}
@@ -1984,8 +1840,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
/* Re-populate low-level callbacks based on xfer modes */
switch (info->xfer_type) {
case NAND_OMAP_PREFETCH_POLLED:
- chip->legacy.read_buf = omap_read_buf_pref;
- chip->legacy.write_buf = omap_write_buf_pref;
+ info->data_in = omap_nand_data_in_pref;
+ info->data_out = omap_nand_data_out_pref;
break;
case NAND_OMAP_POLLED:
@@ -2017,8 +1873,9 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
err);
return err;
}
- chip->legacy.read_buf = omap_read_buf_dma_pref;
- chip->legacy.write_buf = omap_write_buf_dma_pref;
+
+ info->data_in = omap_nand_data_in_dma_pref;
+ info->data_out = omap_nand_data_out_dma_pref;
}
break;
@@ -2049,9 +1906,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
return err;
}
- chip->legacy.read_buf = omap_read_buf_irq_pref;
- chip->legacy.write_buf = omap_write_buf_irq_pref;
-
+ info->data_in = omap_nand_data_in_irq_pref;
+ info->data_out = omap_nand_data_out_irq_pref;
break;
default:
@@ -2217,8 +2073,105 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
return 0;
}
+static void omap_nand_data_in(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ u32 alignment = ((uintptr_t)buf | len) & 3;
+
+ if (force_8bit || (alignment & 1))
+ ioread8_rep(info->fifo, buf, len);
+ else if (alignment & 3)
+ ioread16_rep(info->fifo, buf, len >> 1);
+ else
+ ioread32_rep(info->fifo, buf, len >> 2);
+}
+
+static void omap_nand_data_out(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ u32 alignment = ((uintptr_t)buf | len) & 3;
+
+ if (force_8bit || (alignment & 1))
+ iowrite8_rep(info->fifo, buf, len);
+ else if (alignment & 3)
+ iowrite16_rep(info->fifo, buf, len >> 1);
+ else
+ iowrite32_rep(info->fifo, buf, len >> 2);
+}
+
+static int omap_nand_exec_instr(struct nand_chip *chip,
+ const struct nand_op_instr *instr)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ unsigned int i;
+ int ret;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ iowrite8(instr->ctx.cmd.opcode,
+ info->reg.gpmc_nand_command);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ iowrite8(instr->ctx.addr.addrs[i],
+ info->reg.gpmc_nand_address);
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ info->data_in(chip, instr->ctx.data.buf.in,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ info->data_out(chip, instr->ctx.data.buf.out,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ ret = info->ready_gpiod ?
+ nand_gpio_waitrdy(chip, info->ready_gpiod, instr->ctx.waitrdy.timeout_ms) :
+ nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
+
+ return 0;
+}
+
+static int omap_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ unsigned int i;
+
+ if (check_only)
+ return 0;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ int ret;
+
+ ret = omap_nand_exec_instr(chip, &op->instrs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct nand_controller_ops omap_nand_controller_ops = {
.attach_chip = omap_nand_attach_chip,
+ .exec_op = omap_nand_exec_op,
};
/* Shared among all NAND instances to synchronize access to the ECC Engine */
@@ -2233,6 +2186,7 @@ static int omap_nand_probe(struct platform_device *pdev)
int err;
struct resource *res;
struct device *dev = &pdev->dev;
+ void __iomem *vaddr;
info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
GFP_KERNEL);
@@ -2266,10 +2220,11 @@ static int omap_nand_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nand_chip->legacy.IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(nand_chip->legacy.IO_ADDR_R))
- return PTR_ERR(nand_chip->legacy.IO_ADDR_R);
+ vaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+ info->fifo = vaddr;
info->phys_base = res->start;
if (!omap_gpmc_controller_initialized) {
@@ -2280,9 +2235,6 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->controller = &omap_gpmc_controller;
- nand_chip->legacy.IO_ADDR_W = nand_chip->legacy.IO_ADDR_R;
- nand_chip->legacy.cmd_ctrl = omap_hwcontrol;
-
info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb",
GPIOD_IN);
if (IS_ERR(info->ready_gpiod)) {
@@ -2290,26 +2242,12 @@ static int omap_nand_probe(struct platform_device *pdev)
return PTR_ERR(info->ready_gpiod);
}
- /*
- * If RDY/BSY line is connected to OMAP then use the omap ready
- * function and the generic nand_wait function which reads the status
- * register after monitoring the RDY/BSY line. Otherwise use a standard
- * chip delay which is slightly more than tR (AC Timing) of the NAND
- * device and read status register until you get a failure or success
- */
- if (info->ready_gpiod) {
- nand_chip->legacy.dev_ready = omap_dev_ready;
- nand_chip->legacy.chip_delay = 0;
- } else {
- nand_chip->legacy.waitfunc = omap_wait;
- nand_chip->legacy.chip_delay = 50;
- }
-
if (info->flash_bbt)
nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
- /* scan NAND device connected to chip controller */
- nand_chip->options |= info->devsize & NAND_BUSWIDTH_16;
+ /* default operations */
+ info->data_in = omap_nand_data_in;
+ info->data_out = omap_nand_data_out;
err = nand_scan(nand_chip, 1);
if (err)
@@ -2352,10 +2290,7 @@ static int omap_nand_remove(struct platform_device *pdev)
return ret;
}
-static const struct of_device_id omap_nand_ids[] = {
- { .compatible = "ti,omap2-nand", },
- {},
-};
+/* omap_nand_ids defined in linux/platform_data/mtd-nand-omap2.h */
MODULE_DEVICE_TABLE(of, omap_nand_ids);
static struct platform_driver omap_nand_driver = {
diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c
index 8bab753211e9..db105d9b560c 100644
--- a/drivers/mtd/nand/raw/omap_elm.c
+++ b/drivers/mtd/nand/raw/omap_elm.c
@@ -384,8 +384,8 @@ static irqreturn_t elm_isr(int this_irq, void *dev_id)
static int elm_probe(struct platform_device *pdev)
{
int ret = 0;
- struct resource *irq;
struct elm_info *info;
+ int irq;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -393,20 +393,18 @@ static int elm_probe(struct platform_device *pdev)
info->dev = &pdev->dev;
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq) {
- dev_err(&pdev->dev, "no irq resource defined\n");
- return -ENODEV;
- }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
info->elm_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->elm_base))
return PTR_ERR(info->elm_base);
- ret = devm_request_irq(&pdev->dev, irq->start, elm_isr, 0,
- pdev->name, info);
+ ret = devm_request_irq(&pdev->dev, irq, elm_isr, 0,
+ pdev->name, info);
if (ret) {
- dev_err(&pdev->dev, "failure requesting %pr\n", irq);
+ dev_err(&pdev->dev, "failure requesting %d\n", irq);
return ret;
}
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 04e6f7b26706..7c6efa3b6255 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -6,6 +6,7 @@
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/bitops.h>
+#include <linux/dma/qcom_adm.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
@@ -952,6 +953,7 @@ static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
struct dma_async_tx_descriptor *dma_desc;
struct scatterlist *sgl;
struct dma_slave_config slave_conf;
+ struct qcom_adm_peripheral_config periph_conf = {};
enum dma_transfer_direction dir_eng;
int ret;
@@ -983,11 +985,19 @@ static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
if (read) {
slave_conf.src_maxburst = 16;
slave_conf.src_addr = nandc->base_dma + reg_off;
- slave_conf.slave_id = nandc->data_crci;
+ if (nandc->data_crci) {
+ periph_conf.crci = nandc->data_crci;
+ slave_conf.peripheral_config = &periph_conf;
+ slave_conf.peripheral_size = sizeof(periph_conf);
+ }
} else {
slave_conf.dst_maxburst = 16;
slave_conf.dst_addr = nandc->base_dma + reg_off;
- slave_conf.slave_id = nandc->cmd_crci;
+ if (nandc->cmd_crci) {
+ periph_conf.crci = nandc->cmd_crci;
+ slave_conf.peripheral_config = &periph_conf;
+ slave_conf.peripheral_size = sizeof(periph_conf);
+ }
}
ret = dmaengine_slave_config(nandc->chan, &slave_conf);
diff --git a/drivers/mtd/nand/raw/renesas-nand-controller.c b/drivers/mtd/nand/raw/renesas-nand-controller.c
new file mode 100644
index 000000000000..428e08362956
--- /dev/null
+++ b/drivers/mtd/nand/raw/renesas-nand-controller.c
@@ -0,0 +1,1424 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Evatronix/Renesas R-Car Gen3, RZ/N1D, RZ/N1S, RZ/N1L NAND controller driver
+ *
+ * Copyright (C) 2021 Schneider Electric
+ * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define COMMAND_REG 0x00
+#define COMMAND_SEQ(x) FIELD_PREP(GENMASK(5, 0), (x))
+#define COMMAND_SEQ_10 COMMAND_SEQ(0x2A)
+#define COMMAND_SEQ_12 COMMAND_SEQ(0x0C)
+#define COMMAND_SEQ_18 COMMAND_SEQ(0x32)
+#define COMMAND_SEQ_19 COMMAND_SEQ(0x13)
+#define COMMAND_SEQ_GEN_IN COMMAND_SEQ_18
+#define COMMAND_SEQ_GEN_OUT COMMAND_SEQ_19
+#define COMMAND_SEQ_READ_PAGE COMMAND_SEQ_10
+#define COMMAND_SEQ_WRITE_PAGE COMMAND_SEQ_12
+#define COMMAND_INPUT_SEL_AHBS 0
+#define COMMAND_INPUT_SEL_DMA BIT(6)
+#define COMMAND_FIFO_SEL 0
+#define COMMAND_DATA_SEL BIT(7)
+#define COMMAND_0(x) FIELD_PREP(GENMASK(15, 8), (x))
+#define COMMAND_1(x) FIELD_PREP(GENMASK(23, 16), (x))
+#define COMMAND_2(x) FIELD_PREP(GENMASK(31, 24), (x))
+
+#define CONTROL_REG 0x04
+#define CONTROL_CHECK_RB_LINE 0
+#define CONTROL_ECC_BLOCK_SIZE(x) FIELD_PREP(GENMASK(2, 1), (x))
+#define CONTROL_ECC_BLOCK_SIZE_256 CONTROL_ECC_BLOCK_SIZE(0)
+#define CONTROL_ECC_BLOCK_SIZE_512 CONTROL_ECC_BLOCK_SIZE(1)
+#define CONTROL_ECC_BLOCK_SIZE_1024 CONTROL_ECC_BLOCK_SIZE(2)
+#define CONTROL_INT_EN BIT(4)
+#define CONTROL_ECC_EN BIT(5)
+#define CONTROL_BLOCK_SIZE(x) FIELD_PREP(GENMASK(7, 6), (x))
+#define CONTROL_BLOCK_SIZE_32P CONTROL_BLOCK_SIZE(0)
+#define CONTROL_BLOCK_SIZE_64P CONTROL_BLOCK_SIZE(1)
+#define CONTROL_BLOCK_SIZE_128P CONTROL_BLOCK_SIZE(2)
+#define CONTROL_BLOCK_SIZE_256P CONTROL_BLOCK_SIZE(3)
+
+#define STATUS_REG 0x8
+#define MEM_RDY(cs, reg) (FIELD_GET(GENMASK(3, 0), (reg)) & BIT(cs))
+#define CTRL_RDY(reg) (FIELD_GET(BIT(8), (reg)) == 0)
+
+#define ECC_CTRL_REG 0x18
+#define ECC_CTRL_CAP(x) FIELD_PREP(GENMASK(2, 0), (x))
+#define ECC_CTRL_CAP_2B ECC_CTRL_CAP(0)
+#define ECC_CTRL_CAP_4B ECC_CTRL_CAP(1)
+#define ECC_CTRL_CAP_8B ECC_CTRL_CAP(2)
+#define ECC_CTRL_CAP_16B ECC_CTRL_CAP(3)
+#define ECC_CTRL_CAP_24B ECC_CTRL_CAP(4)
+#define ECC_CTRL_CAP_32B ECC_CTRL_CAP(5)
+#define ECC_CTRL_ERR_THRESHOLD(x) FIELD_PREP(GENMASK(13, 8), (x))
+
+#define INT_MASK_REG 0x10
+#define INT_STATUS_REG 0x14
+#define INT_CMD_END BIT(1)
+#define INT_DMA_END BIT(3)
+#define INT_MEM_RDY(cs) FIELD_PREP(GENMASK(11, 8), BIT(cs))
+#define INT_DMA_ENDED BIT(3)
+#define MEM_IS_RDY(cs, reg) (FIELD_GET(GENMASK(11, 8), (reg)) & BIT(cs))
+#define DMA_HAS_ENDED(reg) FIELD_GET(BIT(3), (reg))
+
+#define ECC_OFFSET_REG 0x1C
+#define ECC_OFFSET(x) FIELD_PREP(GENMASK(15, 0), (x))
+
+#define ECC_STAT_REG 0x20
+#define ECC_STAT_CORRECTABLE(cs, reg) (FIELD_GET(GENMASK(3, 0), (reg)) & BIT(cs))
+#define ECC_STAT_UNCORRECTABLE(cs, reg) (FIELD_GET(GENMASK(11, 8), (reg)) & BIT(cs))
+
+#define ADDR0_COL_REG 0x24
+#define ADDR0_COL(x) FIELD_PREP(GENMASK(15, 0), (x))
+
+#define ADDR0_ROW_REG 0x28
+#define ADDR0_ROW(x) FIELD_PREP(GENMASK(23, 0), (x))
+
+#define ADDR1_COL_REG 0x2C
+#define ADDR1_COL(x) FIELD_PREP(GENMASK(15, 0), (x))
+
+#define ADDR1_ROW_REG 0x30
+#define ADDR1_ROW(x) FIELD_PREP(GENMASK(23, 0), (x))
+
+#define FIFO_DATA_REG 0x38
+
+#define DATA_REG 0x3C
+
+#define DATA_REG_SIZE_REG 0x40
+
+#define DMA_ADDR_LOW_REG 0x64
+
+#define DMA_ADDR_HIGH_REG 0x68
+
+#define DMA_CNT_REG 0x6C
+
+#define DMA_CTRL_REG 0x70
+#define DMA_CTRL_INCREMENT_BURST_4 0
+#define DMA_CTRL_REGISTER_MANAGED_MODE 0
+#define DMA_CTRL_START BIT(7)
+
+#define MEM_CTRL_REG 0x80
+#define MEM_CTRL_CS(cs) FIELD_PREP(GENMASK(1, 0), (cs))
+#define MEM_CTRL_DIS_WP(cs) FIELD_PREP(GENMASK(11, 8), BIT((cs)))
+
+#define DATA_SIZE_REG 0x84
+#define DATA_SIZE(x) FIELD_PREP(GENMASK(14, 0), (x))
+
+#define TIMINGS_ASYN_REG 0x88
+#define TIMINGS_ASYN_TRWP(x) FIELD_PREP(GENMASK(3, 0), max((x), 1U) - 1)
+#define TIMINGS_ASYN_TRWH(x) FIELD_PREP(GENMASK(7, 4), max((x), 1U) - 1)
+
+#define TIM_SEQ0_REG 0x90
+#define TIM_SEQ0_TCCS(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
+#define TIM_SEQ0_TADL(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
+#define TIM_SEQ0_TRHW(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
+#define TIM_SEQ0_TWHR(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
+
+#define TIM_SEQ1_REG 0x94
+#define TIM_SEQ1_TWB(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
+#define TIM_SEQ1_TRR(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
+#define TIM_SEQ1_TWW(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
+
+#define TIM_GEN_SEQ0_REG 0x98
+#define TIM_GEN_SEQ0_D0(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
+#define TIM_GEN_SEQ0_D1(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
+#define TIM_GEN_SEQ0_D2(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
+#define TIM_GEN_SEQ0_D3(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
+
+#define TIM_GEN_SEQ1_REG 0x9c
+#define TIM_GEN_SEQ1_D4(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
+#define TIM_GEN_SEQ1_D5(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
+#define TIM_GEN_SEQ1_D6(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
+#define TIM_GEN_SEQ1_D7(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
+
+#define TIM_GEN_SEQ2_REG 0xA0
+#define TIM_GEN_SEQ2_D8(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
+#define TIM_GEN_SEQ2_D9(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
+#define TIM_GEN_SEQ2_D10(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
+#define TIM_GEN_SEQ2_D11(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
+
+#define FIFO_INIT_REG 0xB4
+#define FIFO_INIT BIT(0)
+
+#define FIFO_STATE_REG 0xB4
+#define FIFO_STATE_R_EMPTY(reg) FIELD_GET(BIT(0), (reg))
+#define FIFO_STATE_W_FULL(reg) FIELD_GET(BIT(1), (reg))
+#define FIFO_STATE_C_EMPTY(reg) FIELD_GET(BIT(2), (reg))
+#define FIFO_STATE_R_FULL(reg) FIELD_GET(BIT(6), (reg))
+#define FIFO_STATE_W_EMPTY(reg) FIELD_GET(BIT(7), (reg))
+
+#define GEN_SEQ_CTRL_REG 0xB8
+#define GEN_SEQ_CMD0_EN BIT(0)
+#define GEN_SEQ_CMD1_EN BIT(1)
+#define GEN_SEQ_CMD2_EN BIT(2)
+#define GEN_SEQ_CMD3_EN BIT(3)
+#define GEN_SEQ_COL_A0(x) FIELD_PREP(GENMASK(5, 4), min((x), 2U))
+#define GEN_SEQ_COL_A1(x) FIELD_PREP(GENMASK(7, 6), min((x), 2U))
+#define GEN_SEQ_ROW_A0(x) FIELD_PREP(GENMASK(9, 8), min((x), 3U))
+#define GEN_SEQ_ROW_A1(x) FIELD_PREP(GENMASK(11, 10), min((x), 3U))
+#define GEN_SEQ_DATA_EN BIT(12)
+#define GEN_SEQ_DELAY_EN(x) FIELD_PREP(GENMASK(14, 13), (x))
+#define GEN_SEQ_DELAY0_EN GEN_SEQ_DELAY_EN(1)
+#define GEN_SEQ_DELAY1_EN GEN_SEQ_DELAY_EN(2)
+#define GEN_SEQ_IMD_SEQ BIT(15)
+#define GEN_SEQ_COMMAND_3(x) FIELD_PREP(GENMASK(26, 16), (x))
+
+#define DMA_TLVL_REG 0x114
+#define DMA_TLVL(x) FIELD_PREP(GENMASK(7, 0), (x))
+#define DMA_TLVL_MAX DMA_TLVL(0xFF)
+
+#define TIM_GEN_SEQ3_REG 0x134
+#define TIM_GEN_SEQ3_D12(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
+
+#define ECC_CNT_REG 0x14C
+#define ECC_CNT(cs, reg) FIELD_GET(GENMASK(5, 0), (reg) >> ((cs) * 8))
+
+#define RNANDC_CS_NUM 4
+
+#define TO_CYCLES64(ps, period_ns) ((unsigned int)DIV_ROUND_UP_ULL(div_u64(ps, 1000), \
+ period_ns))
+
+struct rnand_chip_sel {
+ unsigned int cs;
+};
+
+struct rnand_chip {
+ struct nand_chip chip;
+ struct list_head node;
+ int selected_die;
+ u32 ctrl;
+ unsigned int nsels;
+ u32 control;
+ u32 ecc_ctrl;
+ u32 timings_asyn;
+ u32 tim_seq0;
+ u32 tim_seq1;
+ u32 tim_gen_seq0;
+ u32 tim_gen_seq1;
+ u32 tim_gen_seq2;
+ u32 tim_gen_seq3;
+ struct rnand_chip_sel sels[];
+};
+
+struct rnandc {
+ struct nand_controller controller;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *hclk;
+ struct clk *eclk;
+ unsigned long assigned_cs;
+ struct list_head chips;
+ struct nand_chip *selected_chip;
+ struct completion complete;
+ bool use_polling;
+ u8 *buf;
+ unsigned int buf_sz;
+};
+
+struct rnandc_op {
+ u32 command;
+ u32 addr0_col;
+ u32 addr0_row;
+ u32 addr1_col;
+ u32 addr1_row;
+ u32 data_size;
+ u32 ecc_offset;
+ u32 gen_seq_ctrl;
+ u8 *buf;
+ bool read;
+ unsigned int len;
+};
+
+static inline struct rnandc *to_rnandc(struct nand_controller *ctrl)
+{
+ return container_of(ctrl, struct rnandc, controller);
+}
+
+static inline struct rnand_chip *to_rnand(struct nand_chip *chip)
+{
+ return container_of(chip, struct rnand_chip, chip);
+}
+
+static inline unsigned int to_rnandc_cs(struct rnand_chip *nand)
+{
+ return nand->sels[nand->selected_die].cs;
+}
+
+static void rnandc_dis_correction(struct rnandc *rnandc)
+{
+ u32 control;
+
+ control = readl_relaxed(rnandc->regs + CONTROL_REG);
+ control &= ~CONTROL_ECC_EN;
+ writel_relaxed(control, rnandc->regs + CONTROL_REG);
+}
+
+static void rnandc_en_correction(struct rnandc *rnandc)
+{
+ u32 control;
+
+ control = readl_relaxed(rnandc->regs + CONTROL_REG);
+ control |= CONTROL_ECC_EN;
+ writel_relaxed(control, rnandc->regs + CONTROL_REG);
+}
+
+static void rnandc_clear_status(struct rnandc *rnandc)
+{
+ writel_relaxed(0, rnandc->regs + INT_STATUS_REG);
+ writel_relaxed(0, rnandc->regs + ECC_STAT_REG);
+ writel_relaxed(0, rnandc->regs + ECC_CNT_REG);
+}
+
+static void rnandc_dis_interrupts(struct rnandc *rnandc)
+{
+ writel_relaxed(0, rnandc->regs + INT_MASK_REG);
+}
+
+static void rnandc_en_interrupts(struct rnandc *rnandc, u32 val)
+{
+ if (!rnandc->use_polling)
+ writel_relaxed(val, rnandc->regs + INT_MASK_REG);
+}
+
+static void rnandc_clear_fifo(struct rnandc *rnandc)
+{
+ writel_relaxed(FIFO_INIT, rnandc->regs + FIFO_INIT_REG);
+}
+
+static void rnandc_select_target(struct nand_chip *chip, int die_nr)
+{
+ struct rnand_chip *rnand = to_rnand(chip);
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ unsigned int cs = rnand->sels[die_nr].cs;
+
+ if (chip == rnandc->selected_chip && die_nr == rnand->selected_die)
+ return;
+
+ rnandc_clear_status(rnandc);
+ writel_relaxed(MEM_CTRL_CS(cs) | MEM_CTRL_DIS_WP(cs), rnandc->regs + MEM_CTRL_REG);
+ writel_relaxed(rnand->control, rnandc->regs + CONTROL_REG);
+ writel_relaxed(rnand->ecc_ctrl, rnandc->regs + ECC_CTRL_REG);
+ writel_relaxed(rnand->timings_asyn, rnandc->regs + TIMINGS_ASYN_REG);
+ writel_relaxed(rnand->tim_seq0, rnandc->regs + TIM_SEQ0_REG);
+ writel_relaxed(rnand->tim_seq1, rnandc->regs + TIM_SEQ1_REG);
+ writel_relaxed(rnand->tim_gen_seq0, rnandc->regs + TIM_GEN_SEQ0_REG);
+ writel_relaxed(rnand->tim_gen_seq1, rnandc->regs + TIM_GEN_SEQ1_REG);
+ writel_relaxed(rnand->tim_gen_seq2, rnandc->regs + TIM_GEN_SEQ2_REG);
+ writel_relaxed(rnand->tim_gen_seq3, rnandc->regs + TIM_GEN_SEQ3_REG);
+
+ rnandc->selected_chip = chip;
+ rnand->selected_die = die_nr;
+}
+
+static void rnandc_trigger_op(struct rnandc *rnandc, struct rnandc_op *rop)
+{
+ writel_relaxed(rop->addr0_col, rnandc->regs + ADDR0_COL_REG);
+ writel_relaxed(rop->addr0_row, rnandc->regs + ADDR0_ROW_REG);
+ writel_relaxed(rop->addr1_col, rnandc->regs + ADDR1_COL_REG);
+ writel_relaxed(rop->addr1_row, rnandc->regs + ADDR1_ROW_REG);
+ writel_relaxed(rop->ecc_offset, rnandc->regs + ECC_OFFSET_REG);
+ writel_relaxed(rop->gen_seq_ctrl, rnandc->regs + GEN_SEQ_CTRL_REG);
+ writel_relaxed(DATA_SIZE(rop->len), rnandc->regs + DATA_SIZE_REG);
+ writel_relaxed(rop->command, rnandc->regs + COMMAND_REG);
+}
+
+static void rnandc_trigger_dma(struct rnandc *rnandc)
+{
+ writel_relaxed(DMA_CTRL_INCREMENT_BURST_4 |
+ DMA_CTRL_REGISTER_MANAGED_MODE |
+ DMA_CTRL_START, rnandc->regs + DMA_CTRL_REG);
+}
+
+static irqreturn_t rnandc_irq_handler(int irq, void *private)
+{
+ struct rnandc *rnandc = private;
+
+ rnandc_dis_interrupts(rnandc);
+ complete(&rnandc->complete);
+
+ return IRQ_HANDLED;
+}
+
+static int rnandc_wait_end_of_op(struct rnandc *rnandc,
+ struct nand_chip *chip)
+{
+ struct rnand_chip *rnand = to_rnand(chip);
+ unsigned int cs = to_rnandc_cs(rnand);
+ u32 status;
+ int ret;
+
+ ret = readl_poll_timeout(rnandc->regs + STATUS_REG, status,
+ MEM_RDY(cs, status) && CTRL_RDY(status),
+ 1, 100000);
+ if (ret)
+ dev_err(rnandc->dev, "Operation timed out, status: 0x%08x\n",
+ status);
+
+ return ret;
+}
+
+static int rnandc_wait_end_of_io(struct rnandc *rnandc,
+ struct nand_chip *chip)
+{
+ int timeout_ms = 1000;
+ int ret;
+
+ if (rnandc->use_polling) {
+ struct rnand_chip *rnand = to_rnand(chip);
+ unsigned int cs = to_rnandc_cs(rnand);
+ u32 status;
+
+ ret = readl_poll_timeout(rnandc->regs + INT_STATUS_REG, status,
+ MEM_IS_RDY(cs, status) &
+ DMA_HAS_ENDED(status),
+ 0, timeout_ms * 1000);
+ } else {
+ ret = wait_for_completion_timeout(&rnandc->complete,
+ msecs_to_jiffies(timeout_ms));
+ if (!ret)
+ ret = -ETIMEDOUT;
+ else
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int rnandc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct rnand_chip *rnand = to_rnand(chip);
+ unsigned int cs = to_rnandc_cs(rnand);
+ struct rnandc_op rop = {
+ .command = COMMAND_INPUT_SEL_DMA | COMMAND_0(NAND_CMD_READ0) |
+ COMMAND_2(NAND_CMD_READSTART) | COMMAND_FIFO_SEL |
+ COMMAND_SEQ_READ_PAGE,
+ .addr0_row = page,
+ .len = mtd->writesize,
+ .ecc_offset = ECC_OFFSET(mtd->writesize + 2),
+ };
+ unsigned int max_bitflips = 0;
+ dma_addr_t dma_addr;
+ u32 ecc_stat;
+ int bf, ret, i;
+
+ /* Prepare controller */
+ rnandc_select_target(chip, chip->cur_cs);
+ rnandc_clear_status(rnandc);
+ reinit_completion(&rnandc->complete);
+ rnandc_en_interrupts(rnandc, INT_DMA_ENDED);
+ rnandc_en_correction(rnandc);
+
+ /* Configure DMA */
+ dma_addr = dma_map_single(rnandc->dev, rnandc->buf, mtd->writesize,
+ DMA_FROM_DEVICE);
+ writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG);
+ writel(mtd->writesize, rnandc->regs + DMA_CNT_REG);
+ writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG);
+
+ rnandc_trigger_op(rnandc, &rop);
+ rnandc_trigger_dma(rnandc);
+
+ ret = rnandc_wait_end_of_io(rnandc, chip);
+ dma_unmap_single(rnandc->dev, dma_addr, mtd->writesize, DMA_FROM_DEVICE);
+ rnandc_dis_correction(rnandc);
+ if (ret) {
+ dev_err(rnandc->dev, "Read page operation never ending\n");
+ return ret;
+ }
+
+ ecc_stat = readl_relaxed(rnandc->regs + ECC_STAT_REG);
+
+ if (oob_required || ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ if (ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
+ for (i = 0; i < chip->ecc.steps; i++) {
+ unsigned int off = i * chip->ecc.size;
+ unsigned int eccoff = i * chip->ecc.bytes;
+
+ bf = nand_check_erased_ecc_chunk(rnandc->buf + off,
+ chip->ecc.size,
+ chip->oob_poi + 2 + eccoff,
+ chip->ecc.bytes,
+ NULL, 0,
+ chip->ecc.strength);
+ if (bf < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += bf;
+ max_bitflips = max_t(unsigned int, max_bitflips, bf);
+ }
+ }
+ } else if (ECC_STAT_CORRECTABLE(cs, ecc_stat)) {
+ bf = ECC_CNT(cs, readl_relaxed(rnandc->regs + ECC_CNT_REG));
+ /*
+ * The number of bitflips is an approximation given the fact
+ * that this controller does not provide per-chunk details but
+ * only gives statistics on the entire page.
+ */
+ mtd->ecc_stats.corrected += bf;
+ }
+
+ memcpy(buf, rnandc->buf, mtd->writesize);
+
+ return 0;
+}
+
+static int rnandc_read_subpage_hw_ecc(struct nand_chip *chip, u32 req_offset,
+ u32 req_len, u8 *bufpoi, int page)
+{
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct rnand_chip *rnand = to_rnand(chip);
+ unsigned int cs = to_rnandc_cs(rnand);
+ unsigned int page_off = round_down(req_offset, chip->ecc.size);
+ unsigned int real_len = round_up(req_offset + req_len - page_off,
+ chip->ecc.size);
+ unsigned int start_chunk = page_off / chip->ecc.size;
+ unsigned int nchunks = real_len / chip->ecc.size;
+ unsigned int ecc_off = 2 + (start_chunk * chip->ecc.bytes);
+ struct rnandc_op rop = {
+ .command = COMMAND_INPUT_SEL_AHBS | COMMAND_0(NAND_CMD_READ0) |
+ COMMAND_2(NAND_CMD_READSTART) | COMMAND_FIFO_SEL |
+ COMMAND_SEQ_READ_PAGE,
+ .addr0_row = page,
+ .addr0_col = page_off,
+ .len = real_len,
+ .ecc_offset = ECC_OFFSET(mtd->writesize + ecc_off),
+ };
+ unsigned int max_bitflips = 0, i;
+ u32 ecc_stat;
+ int bf, ret;
+
+ /* Prepare controller */
+ rnandc_select_target(chip, chip->cur_cs);
+ rnandc_clear_status(rnandc);
+ rnandc_en_correction(rnandc);
+ rnandc_trigger_op(rnandc, &rop);
+
+ while (!FIFO_STATE_C_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+
+ while (FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+
+ ioread32_rep(rnandc->regs + FIFO_DATA_REG, bufpoi + page_off,
+ real_len / 4);
+
+ if (!FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG))) {
+ dev_err(rnandc->dev, "Clearing residual data in the read FIFO\n");
+ rnandc_clear_fifo(rnandc);
+ }
+
+ ret = rnandc_wait_end_of_op(rnandc, chip);
+ rnandc_dis_correction(rnandc);
+ if (ret) {
+ dev_err(rnandc->dev, "Read subpage operation never ending\n");
+ return ret;
+ }
+
+ ecc_stat = readl_relaxed(rnandc->regs + ECC_STAT_REG);
+
+ if (ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+
+ for (i = start_chunk; i < nchunks; i++) {
+ unsigned int dataoff = i * chip->ecc.size;
+ unsigned int eccoff = 2 + (i * chip->ecc.bytes);
+
+ bf = nand_check_erased_ecc_chunk(bufpoi + dataoff,
+ chip->ecc.size,
+ chip->oob_poi + eccoff,
+ chip->ecc.bytes,
+ NULL, 0,
+ chip->ecc.strength);
+ if (bf < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += bf;
+ max_bitflips = max_t(unsigned int, max_bitflips, bf);
+ }
+ }
+ } else if (ECC_STAT_CORRECTABLE(cs, ecc_stat)) {
+ bf = ECC_CNT(cs, readl_relaxed(rnandc->regs + ECC_CNT_REG));
+ /*
+ * The number of bitflips is an approximation given the fact
+ * that this controller does not provide per-chunk details but
+ * only gives statistics on the entire page.
+ */
+ mtd->ecc_stats.corrected += bf;
+ }
+
+ return 0;
+}
+
+static int rnandc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct rnand_chip *rnand = to_rnand(chip);
+ unsigned int cs = to_rnandc_cs(rnand);
+ struct rnandc_op rop = {
+ .command = COMMAND_INPUT_SEL_DMA | COMMAND_0(NAND_CMD_SEQIN) |
+ COMMAND_1(NAND_CMD_PAGEPROG) | COMMAND_FIFO_SEL |
+ COMMAND_SEQ_WRITE_PAGE,
+ .addr0_row = page,
+ .len = mtd->writesize,
+ .ecc_offset = ECC_OFFSET(mtd->writesize + 2),
+ };
+ dma_addr_t dma_addr;
+ int ret;
+
+ memcpy(rnandc->buf, buf, mtd->writesize);
+
+ /* Prepare controller */
+ rnandc_select_target(chip, chip->cur_cs);
+ rnandc_clear_status(rnandc);
+ reinit_completion(&rnandc->complete);
+ rnandc_en_interrupts(rnandc, INT_MEM_RDY(cs));
+ rnandc_en_correction(rnandc);
+
+ /* Configure DMA */
+ dma_addr = dma_map_single(rnandc->dev, (void *)rnandc->buf, mtd->writesize,
+ DMA_TO_DEVICE);
+ writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG);
+ writel(mtd->writesize, rnandc->regs + DMA_CNT_REG);
+ writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG);
+
+ rnandc_trigger_op(rnandc, &rop);
+ rnandc_trigger_dma(rnandc);
+
+ ret = rnandc_wait_end_of_io(rnandc, chip);
+ dma_unmap_single(rnandc->dev, dma_addr, mtd->writesize, DMA_TO_DEVICE);
+ rnandc_dis_correction(rnandc);
+ if (ret) {
+ dev_err(rnandc->dev, "Write page operation never ending\n");
+ return ret;
+ }
+
+ if (!oob_required)
+ return 0;
+
+ return nand_change_write_column_op(chip, mtd->writesize, chip->oob_poi,
+ mtd->oobsize, false);
+}
+
+static int rnandc_write_subpage_hw_ecc(struct nand_chip *chip, u32 req_offset,
+ u32 req_len, const u8 *bufpoi,
+ int oob_required, int page)
+{
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int page_off = round_down(req_offset, chip->ecc.size);
+ unsigned int real_len = round_up(req_offset + req_len - page_off,
+ chip->ecc.size);
+ unsigned int start_chunk = page_off / chip->ecc.size;
+ unsigned int ecc_off = 2 + (start_chunk * chip->ecc.bytes);
+ struct rnandc_op rop = {
+ .command = COMMAND_INPUT_SEL_AHBS | COMMAND_0(NAND_CMD_SEQIN) |
+ COMMAND_1(NAND_CMD_PAGEPROG) | COMMAND_FIFO_SEL |
+ COMMAND_SEQ_WRITE_PAGE,
+ .addr0_row = page,
+ .addr0_col = page_off,
+ .len = real_len,
+ .ecc_offset = ECC_OFFSET(mtd->writesize + ecc_off),
+ };
+ int ret;
+
+ /* Prepare controller */
+ rnandc_select_target(chip, chip->cur_cs);
+ rnandc_clear_status(rnandc);
+ rnandc_en_correction(rnandc);
+ rnandc_trigger_op(rnandc, &rop);
+
+ while (FIFO_STATE_W_FULL(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+
+ iowrite32_rep(rnandc->regs + FIFO_DATA_REG, bufpoi + page_off,
+ real_len / 4);
+
+ while (!FIFO_STATE_W_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+
+ ret = rnandc_wait_end_of_op(rnandc, chip);
+ rnandc_dis_correction(rnandc);
+ if (ret) {
+ dev_err(rnandc->dev, "Write subpage operation never ending\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * This controller is simple enough and thus does not need to use the parser
+ * provided by the core, instead, handle every situation here.
+ */
+static int rnandc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ const struct nand_op_instr *instr = NULL;
+ struct rnandc_op rop = {
+ .command = COMMAND_INPUT_SEL_AHBS,
+ .gen_seq_ctrl = GEN_SEQ_IMD_SEQ,
+ };
+ unsigned int cmd_phase = 0, addr_phase = 0, data_phase = 0,
+ delay_phase = 0, delays = 0;
+ unsigned int op_id, col_addrs, row_addrs, naddrs, remainder, words, i;
+ const u8 *addrs;
+ u32 last_bytes;
+ int ret;
+
+ if (!check_only)
+ rnandc_select_target(chip, op->cs);
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+
+ nand_op_trace(" ", instr);
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ switch (cmd_phase++) {
+ case 0:
+ rop.command |= COMMAND_0(instr->ctx.cmd.opcode);
+ rop.gen_seq_ctrl |= GEN_SEQ_CMD0_EN;
+ break;
+ case 1:
+ rop.gen_seq_ctrl |= GEN_SEQ_COMMAND_3(instr->ctx.cmd.opcode);
+ rop.gen_seq_ctrl |= GEN_SEQ_CMD3_EN;
+ if (addr_phase == 0)
+ addr_phase = 1;
+ break;
+ case 2:
+ rop.command |= COMMAND_2(instr->ctx.cmd.opcode);
+ rop.gen_seq_ctrl |= GEN_SEQ_CMD2_EN;
+ if (addr_phase <= 1)
+ addr_phase = 2;
+ break;
+ case 3:
+ rop.command |= COMMAND_1(instr->ctx.cmd.opcode);
+ rop.gen_seq_ctrl |= GEN_SEQ_CMD1_EN;
+ if (addr_phase <= 1)
+ addr_phase = 2;
+ if (delay_phase == 0)
+ delay_phase = 1;
+ if (data_phase == 0)
+ data_phase = 1;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ addrs = instr->ctx.addr.addrs;
+ naddrs = instr->ctx.addr.naddrs;
+ if (naddrs > 5)
+ return -EOPNOTSUPP;
+
+ col_addrs = min(2U, naddrs);
+ row_addrs = naddrs > 2 ? naddrs - col_addrs : 0;
+
+ switch (addr_phase++) {
+ case 0:
+ for (i = 0; i < col_addrs; i++)
+ rop.addr0_col |= addrs[i] << (i * 8);
+ rop.gen_seq_ctrl |= GEN_SEQ_COL_A0(col_addrs);
+
+ for (i = 0; i < row_addrs; i++)
+ rop.addr0_row |= addrs[2 + i] << (i * 8);
+ rop.gen_seq_ctrl |= GEN_SEQ_ROW_A0(row_addrs);
+
+ if (cmd_phase == 0)
+ cmd_phase = 1;
+ break;
+ case 1:
+ for (i = 0; i < col_addrs; i++)
+ rop.addr1_col |= addrs[i] << (i * 8);
+ rop.gen_seq_ctrl |= GEN_SEQ_COL_A1(col_addrs);
+
+ for (i = 0; i < row_addrs; i++)
+ rop.addr1_row |= addrs[2 + i] << (i * 8);
+ rop.gen_seq_ctrl |= GEN_SEQ_ROW_A1(row_addrs);
+
+ if (cmd_phase <= 1)
+ cmd_phase = 2;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ rop.read = true;
+ fallthrough;
+ case NAND_OP_DATA_OUT_INSTR:
+ rop.gen_seq_ctrl |= GEN_SEQ_DATA_EN;
+ rop.buf = instr->ctx.data.buf.in;
+ rop.len = instr->ctx.data.len;
+ rop.command |= COMMAND_FIFO_SEL;
+
+ switch (data_phase++) {
+ case 0:
+ if (cmd_phase <= 2)
+ cmd_phase = 3;
+ if (addr_phase <= 1)
+ addr_phase = 2;
+ if (delay_phase == 0)
+ delay_phase = 1;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ switch (delay_phase++) {
+ case 0:
+ rop.gen_seq_ctrl |= GEN_SEQ_DELAY0_EN;
+
+ if (cmd_phase <= 2)
+ cmd_phase = 3;
+ break;
+ case 1:
+ rop.gen_seq_ctrl |= GEN_SEQ_DELAY1_EN;
+
+ if (cmd_phase <= 3)
+ cmd_phase = 4;
+ if (data_phase == 0)
+ data_phase = 1;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ }
+ }
+
+ /*
+ * Sequence 19 is generic and dedicated to write operations.
+ * Sequence 18 is also generic and works for all other operations.
+ */
+ if (rop.buf && !rop.read)
+ rop.command |= COMMAND_SEQ_GEN_OUT;
+ else
+ rop.command |= COMMAND_SEQ_GEN_IN;
+
+ if (delays > 1) {
+ dev_err(rnandc->dev, "Cannot handle more than one wait delay\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (check_only)
+ return 0;
+
+ rnandc_trigger_op(rnandc, &rop);
+
+ words = rop.len / sizeof(u32);
+ remainder = rop.len % sizeof(u32);
+ if (rop.buf && rop.read) {
+ while (!FIFO_STATE_C_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+
+ while (FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+
+ ioread32_rep(rnandc->regs + FIFO_DATA_REG, rop.buf, words);
+ if (remainder) {
+ last_bytes = readl_relaxed(rnandc->regs + FIFO_DATA_REG);
+ memcpy(rop.buf + (words * sizeof(u32)), &last_bytes,
+ remainder);
+ }
+
+ if (!FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG))) {
+ dev_warn(rnandc->dev,
+ "Clearing residual data in the read FIFO\n");
+ rnandc_clear_fifo(rnandc);
+ }
+ } else if (rop.len && !rop.read) {
+ while (FIFO_STATE_W_FULL(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+
+ iowrite32_rep(rnandc->regs + FIFO_DATA_REG, rop.buf,
+ DIV_ROUND_UP(rop.len, 4));
+
+ if (remainder) {
+ last_bytes = 0;
+ memcpy(&last_bytes, rop.buf + (words * sizeof(u32)), remainder);
+ writel_relaxed(last_bytes, rnandc->regs + FIFO_DATA_REG);
+ }
+
+ while (!FIFO_STATE_W_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
+ cpu_relax();
+ }
+
+ ret = rnandc_wait_end_of_op(rnandc, chip);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rnandc_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf)
+{
+ struct rnand_chip *rnand = to_rnand(chip);
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ unsigned int period_ns = 1000000000 / clk_get_rate(rnandc->eclk);
+ const struct nand_sdr_timings *sdr;
+ unsigned int cyc, cle, ale, bef_dly, ca_to_data;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ if (sdr->tRP_min != sdr->tWP_min || sdr->tREH_min != sdr->tWH_min) {
+ dev_err(rnandc->dev, "Read and write hold times must be identical\n");
+ return -EINVAL;
+ }
+
+ if (chipnr < 0)
+ return 0;
+
+ rnand->timings_asyn =
+ TIMINGS_ASYN_TRWP(TO_CYCLES64(sdr->tRP_min, period_ns)) |
+ TIMINGS_ASYN_TRWH(TO_CYCLES64(sdr->tREH_min, period_ns));
+ rnand->tim_seq0 =
+ TIM_SEQ0_TCCS(TO_CYCLES64(sdr->tCCS_min, period_ns)) |
+ TIM_SEQ0_TADL(TO_CYCLES64(sdr->tADL_min, period_ns)) |
+ TIM_SEQ0_TRHW(TO_CYCLES64(sdr->tRHW_min, period_ns)) |
+ TIM_SEQ0_TWHR(TO_CYCLES64(sdr->tWHR_min, period_ns));
+ rnand->tim_seq1 =
+ TIM_SEQ1_TWB(TO_CYCLES64(sdr->tWB_max, period_ns)) |
+ TIM_SEQ1_TRR(TO_CYCLES64(sdr->tRR_min, period_ns)) |
+ TIM_SEQ1_TWW(TO_CYCLES64(sdr->tWW_min, period_ns));
+
+ cyc = sdr->tDS_min + sdr->tDH_min;
+ cle = sdr->tCLH_min + sdr->tCLS_min;
+ ale = sdr->tALH_min + sdr->tALS_min;
+ bef_dly = sdr->tWB_max - sdr->tDH_min;
+ ca_to_data = sdr->tWHR_min + sdr->tREA_max - sdr->tDH_min;
+
+ /*
+ * D0 = CMD -> ADDR = tCLH + tCLS - 1 cycle
+ * D1 = CMD -> CMD = tCLH + tCLS - 1 cycle
+ * D2 = CMD -> DLY = tWB - tDH
+ * D3 = CMD -> DATA = tWHR + tREA - tDH
+ */
+ rnand->tim_gen_seq0 =
+ TIM_GEN_SEQ0_D0(TO_CYCLES64(cle - cyc, period_ns)) |
+ TIM_GEN_SEQ0_D1(TO_CYCLES64(cle - cyc, period_ns)) |
+ TIM_GEN_SEQ0_D2(TO_CYCLES64(bef_dly, period_ns)) |
+ TIM_GEN_SEQ0_D3(TO_CYCLES64(ca_to_data, period_ns));
+
+ /*
+ * D4 = ADDR -> CMD = tALH + tALS - 1 cyle
+ * D5 = ADDR -> ADDR = tALH + tALS - 1 cyle
+ * D6 = ADDR -> DLY = tWB - tDH
+ * D7 = ADDR -> DATA = tWHR + tREA - tDH
+ */
+ rnand->tim_gen_seq1 =
+ TIM_GEN_SEQ1_D4(TO_CYCLES64(ale - cyc, period_ns)) |
+ TIM_GEN_SEQ1_D5(TO_CYCLES64(ale - cyc, period_ns)) |
+ TIM_GEN_SEQ1_D6(TO_CYCLES64(bef_dly, period_ns)) |
+ TIM_GEN_SEQ1_D7(TO_CYCLES64(ca_to_data, period_ns));
+
+ /*
+ * D8 = DLY -> DATA = tRR + tREA
+ * D9 = DLY -> CMD = tRR
+ * D10 = DATA -> CMD = tCLH + tCLS - 1 cycle
+ * D11 = DATA -> DLY = tWB - tDH
+ */
+ rnand->tim_gen_seq2 =
+ TIM_GEN_SEQ2_D8(TO_CYCLES64(sdr->tRR_min + sdr->tREA_max, period_ns)) |
+ TIM_GEN_SEQ2_D9(TO_CYCLES64(sdr->tRR_min, period_ns)) |
+ TIM_GEN_SEQ2_D10(TO_CYCLES64(cle - cyc, period_ns)) |
+ TIM_GEN_SEQ2_D11(TO_CYCLES64(bef_dly, period_ns));
+
+ /* D12 = DATA -> END = tCLH - tDH */
+ rnand->tim_gen_seq3 =
+ TIM_GEN_SEQ3_D12(TO_CYCLES64(sdr->tCLH_min - sdr->tDH_min, period_ns));
+
+ return 0;
+}
+
+static int rnandc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ unsigned int eccbytes = round_up(chip->ecc.bytes, 4) * chip->ecc.steps;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 2;
+ oobregion->length = eccbytes;
+
+ return 0;
+}
+
+static int rnandc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ unsigned int eccbytes = round_up(chip->ecc.bytes, 4) * chip->ecc.steps;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 2 + eccbytes;
+ oobregion->length = mtd->oobsize - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops rnandc_ooblayout_ops = {
+ .ecc = rnandc_ooblayout_ecc,
+ .free = rnandc_ooblayout_free,
+};
+
+static int rnandc_hw_ecc_controller_init(struct nand_chip *chip)
+{
+ struct rnand_chip *rnand = to_rnand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+
+ if (mtd->writesize > SZ_16K) {
+ dev_err(rnandc->dev, "Unsupported page size\n");
+ return -EINVAL;
+ }
+
+ switch (chip->ecc.size) {
+ case SZ_256:
+ rnand->control |= CONTROL_ECC_BLOCK_SIZE_256;
+ break;
+ case SZ_512:
+ rnand->control |= CONTROL_ECC_BLOCK_SIZE_512;
+ break;
+ case SZ_1K:
+ rnand->control |= CONTROL_ECC_BLOCK_SIZE_1024;
+ break;
+ default:
+ dev_err(rnandc->dev, "Unsupported ECC chunk size\n");
+ return -EINVAL;
+ }
+
+ switch (chip->ecc.strength) {
+ case 2:
+ chip->ecc.bytes = 4;
+ rnand->ecc_ctrl |= ECC_CTRL_CAP_2B;
+ break;
+ case 4:
+ chip->ecc.bytes = 7;
+ rnand->ecc_ctrl |= ECC_CTRL_CAP_4B;
+ break;
+ case 8:
+ chip->ecc.bytes = 14;
+ rnand->ecc_ctrl |= ECC_CTRL_CAP_8B;
+ break;
+ case 16:
+ chip->ecc.bytes = 28;
+ rnand->ecc_ctrl |= ECC_CTRL_CAP_16B;
+ break;
+ case 24:
+ chip->ecc.bytes = 42;
+ rnand->ecc_ctrl |= ECC_CTRL_CAP_24B;
+ break;
+ case 32:
+ chip->ecc.bytes = 56;
+ rnand->ecc_ctrl |= ECC_CTRL_CAP_32B;
+ break;
+ default:
+ dev_err(rnandc->dev, "Unsupported ECC strength\n");
+ return -EINVAL;
+ }
+
+ rnand->ecc_ctrl |= ECC_CTRL_ERR_THRESHOLD(chip->ecc.strength);
+
+ mtd_set_ooblayout(mtd, &rnandc_ooblayout_ops);
+ chip->ecc.steps = mtd->writesize / chip->ecc.size;
+ chip->ecc.read_page = rnandc_read_page_hw_ecc;
+ chip->ecc.read_subpage = rnandc_read_subpage_hw_ecc;
+ chip->ecc.write_page = rnandc_write_page_hw_ecc;
+ chip->ecc.write_subpage = rnandc_write_subpage_hw_ecc;
+
+ return 0;
+}
+
+static int rnandc_ecc_init(struct nand_chip *chip)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ int ret;
+
+ if (ecc->engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
+ (!ecc->size || !ecc->strength)) {
+ if (requirements->step_size && requirements->strength) {
+ ecc->size = requirements->step_size;
+ ecc->strength = requirements->strength;
+ } else {
+ dev_err(rnandc->dev, "No minimum ECC strength\n");
+ return -EINVAL;
+ }
+ }
+
+ switch (ecc->engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ ret = rnandc_hw_ecc_controller_init(chip);
+ if (ret)
+ return ret;
+ break;
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rnandc_attach_chip(struct nand_chip *chip)
+{
+ struct rnand_chip *rnand = to_rnand(chip);
+ struct rnandc *rnandc = to_rnandc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg = nanddev_get_memorg(&chip->base);
+ int ret;
+
+ /* Do not store BBT bits in the OOB section as it is not protected */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ if (mtd->writesize <= 512) {
+ dev_err(rnandc->dev, "Small page devices not supported\n");
+ return -EINVAL;
+ }
+
+ rnand->control |= CONTROL_CHECK_RB_LINE | CONTROL_INT_EN;
+
+ switch (memorg->pages_per_eraseblock) {
+ case 32:
+ rnand->control |= CONTROL_BLOCK_SIZE_32P;
+ break;
+ case 64:
+ rnand->control |= CONTROL_BLOCK_SIZE_64P;
+ break;
+ case 128:
+ rnand->control |= CONTROL_BLOCK_SIZE_128P;
+ break;
+ case 256:
+ rnand->control |= CONTROL_BLOCK_SIZE_256P;
+ break;
+ default:
+ dev_err(rnandc->dev, "Unsupported memory organization\n");
+ return -EINVAL;
+ }
+
+ chip->options |= NAND_SUBPAGE_READ;
+
+ ret = rnandc_ecc_init(chip);
+ if (ret) {
+ dev_err(rnandc->dev, "ECC initialization failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* Force an update of the configuration registers */
+ rnand->selected_die = -1;
+
+ return 0;
+}
+
+static const struct nand_controller_ops rnandc_ops = {
+ .attach_chip = rnandc_attach_chip,
+ .exec_op = rnandc_exec_op,
+ .setup_interface = rnandc_setup_interface,
+};
+
+static int rnandc_alloc_dma_buf(struct rnandc *rnandc,
+ struct mtd_info *new_mtd)
+{
+ unsigned int max_len = new_mtd->writesize + new_mtd->oobsize;
+ struct rnand_chip *entry, *temp;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+
+ list_for_each_entry_safe(entry, temp, &rnandc->chips, node) {
+ chip = &entry->chip;
+ mtd = nand_to_mtd(chip);
+ max_len = max(max_len, mtd->writesize + mtd->oobsize);
+ }
+
+ if (rnandc->buf && rnandc->buf_sz < max_len) {
+ devm_kfree(rnandc->dev, rnandc->buf);
+ rnandc->buf = NULL;
+ }
+
+ if (!rnandc->buf) {
+ rnandc->buf_sz = max_len;
+ rnandc->buf = devm_kmalloc(rnandc->dev, max_len,
+ GFP_KERNEL | GFP_DMA);
+ if (!rnandc->buf)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int rnandc_chip_init(struct rnandc *rnandc, struct device_node *np)
+{
+ struct rnand_chip *rnand;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int nsels, ret, i;
+ u32 cs;
+
+ nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
+ if (nsels <= 0) {
+ ret = (nsels < 0) ? nsels : -EINVAL;
+ dev_err(rnandc->dev, "Invalid reg property (%d)\n", ret);
+ return ret;
+ }
+
+ /* Alloc the driver's NAND chip structure */
+ rnand = devm_kzalloc(rnandc->dev, struct_size(rnand, sels, nsels),
+ GFP_KERNEL);
+ if (!rnand)
+ return -ENOMEM;
+
+ rnand->nsels = nsels;
+ rnand->selected_die = -1;
+
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(np, "reg", i, &cs);
+ if (ret) {
+ dev_err(rnandc->dev, "Incomplete reg property (%d)\n", ret);
+ return ret;
+ }
+
+ if (cs >= RNANDC_CS_NUM) {
+ dev_err(rnandc->dev, "Invalid reg property (%d)\n", cs);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &rnandc->assigned_cs)) {
+ dev_err(rnandc->dev, "CS %d already assigned\n", cs);
+ return -EINVAL;
+ }
+
+ /*
+ * No need to check for RB or WP properties, there is a 1:1
+ * mandatory mapping with the CS.
+ */
+ rnand->sels[i].cs = cs;
+ }
+
+ chip = &rnand->chip;
+ chip->controller = &rnandc->controller;
+ nand_set_flash_node(chip, np);
+
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = rnandc->dev;
+ if (!mtd->name) {
+ dev_err(rnandc->dev, "Missing MTD label\n");
+ return -EINVAL;
+ }
+
+ ret = nand_scan(chip, rnand->nsels);
+ if (ret) {
+ dev_err(rnandc->dev, "Failed to scan the NAND chip (%d)\n", ret);
+ return ret;
+ }
+
+ ret = rnandc_alloc_dma_buf(rnandc, mtd);
+ if (ret)
+ goto cleanup_nand;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(rnandc->dev, "Failed to register MTD device (%d)\n", ret);
+ goto cleanup_nand;
+ }
+
+ list_add_tail(&rnand->node, &rnandc->chips);
+
+ return 0;
+
+cleanup_nand:
+ nand_cleanup(chip);
+
+ return ret;
+}
+
+static void rnandc_chips_cleanup(struct rnandc *rnandc)
+{
+ struct rnand_chip *entry, *temp;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry_safe(entry, temp, &rnandc->chips, node) {
+ chip = &entry->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&entry->node);
+ }
+}
+
+static int rnandc_chips_init(struct rnandc *rnandc)
+{
+ struct device_node *np;
+ int ret;
+
+ for_each_child_of_node(rnandc->dev->of_node, np) {
+ ret = rnandc_chip_init(rnandc, np);
+ if (ret) {
+ of_node_put(np);
+ goto cleanup_chips;
+ }
+ }
+
+ return 0;
+
+cleanup_chips:
+ rnandc_chips_cleanup(rnandc);
+
+ return ret;
+}
+
+static int rnandc_probe(struct platform_device *pdev)
+{
+ struct rnandc *rnandc;
+ int irq, ret;
+
+ rnandc = devm_kzalloc(&pdev->dev, sizeof(*rnandc), GFP_KERNEL);
+ if (!rnandc)
+ return -ENOMEM;
+
+ rnandc->dev = &pdev->dev;
+ nand_controller_init(&rnandc->controller);
+ rnandc->controller.ops = &rnandc_ops;
+ INIT_LIST_HEAD(&rnandc->chips);
+ init_completion(&rnandc->complete);
+
+ rnandc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rnandc->regs))
+ return PTR_ERR(rnandc->regs);
+
+ /* APB clock */
+ rnandc->hclk = devm_clk_get(&pdev->dev, "hclk");
+ if (IS_ERR(rnandc->hclk))
+ return PTR_ERR(rnandc->hclk);
+
+ /* External NAND bus clock */
+ rnandc->eclk = devm_clk_get(&pdev->dev, "eclk");
+ if (IS_ERR(rnandc->eclk))
+ return PTR_ERR(rnandc->eclk);
+
+ ret = clk_prepare_enable(rnandc->hclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(rnandc->eclk);
+ if (ret)
+ goto disable_hclk;
+
+ rnandc_dis_interrupts(rnandc);
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq == -EPROBE_DEFER) {
+ ret = irq;
+ goto disable_eclk;
+ } else if (irq < 0) {
+ dev_info(&pdev->dev, "No IRQ found, fallback to polling\n");
+ rnandc->use_polling = true;
+ } else {
+ ret = devm_request_irq(&pdev->dev, irq, rnandc_irq_handler, 0,
+ "renesas-nand-controller", rnandc);
+ if (ret < 0)
+ goto disable_eclk;
+ }
+
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto disable_eclk;
+
+ rnandc_clear_fifo(rnandc);
+
+ platform_set_drvdata(pdev, rnandc);
+
+ ret = rnandc_chips_init(rnandc);
+ if (ret)
+ goto disable_eclk;
+
+ return 0;
+
+disable_eclk:
+ clk_disable_unprepare(rnandc->eclk);
+disable_hclk:
+ clk_disable_unprepare(rnandc->hclk);
+
+ return ret;
+}
+
+static int rnandc_remove(struct platform_device *pdev)
+{
+ struct rnandc *rnandc = platform_get_drvdata(pdev);
+
+ rnandc_chips_cleanup(rnandc);
+
+ clk_disable_unprepare(rnandc->eclk);
+ clk_disable_unprepare(rnandc->hclk);
+
+ return 0;
+}
+
+static const struct of_device_id rnandc_id_table[] = {
+ { .compatible = "renesas,rcar-gen3-nandc" },
+ { .compatible = "renesas,rzn1-nandc" },
+ {} /* sentinel */
+};
+MODULE_DEVICE_TABLE(of, rnandc_id_table);
+
+static struct platform_driver rnandc_driver = {
+ .driver = {
+ .name = "renesas-nandc",
+ .of_match_table = of_match_ptr(rnandc_id_table),
+ },
+ .probe = rnandc_probe,
+ .remove = rnandc_remove,
+};
+module_platform_driver(rnandc_driver);
+
+MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
+MODULE_DESCRIPTION("Renesas R-Car Gen3 & RZ/N1 NAND controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
index 32431bbe69b8..b36e5260ae27 100644
--- a/drivers/mtd/nand/raw/tegra_nand.c
+++ b/drivers/mtd/nand/raw/tegra_nand.c
@@ -17,8 +17,11 @@
#include <linux/mtd/rawnand.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <soc/tegra/common.h>
+
#define COMMAND 0x00
#define COMMAND_GO BIT(31)
#define COMMAND_CLE BIT(30)
@@ -1151,6 +1154,7 @@ static int tegra_nand_probe(struct platform_device *pdev)
return -ENOMEM;
ctrl->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ctrl);
nand_controller_init(&ctrl->controller);
ctrl->controller.ops = &tegra_nand_controller_ops;
@@ -1166,14 +1170,23 @@ static int tegra_nand_probe(struct platform_device *pdev)
if (IS_ERR(ctrl->clk))
return PTR_ERR(ctrl->clk);
- err = clk_prepare_enable(ctrl->clk);
+ err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (err)
+ return err;
+
+ /*
+ * This driver doesn't support active power management yet,
+ * so we will simply keep device resumed.
+ */
+ pm_runtime_enable(&pdev->dev);
+ err = pm_runtime_resume_and_get(&pdev->dev);
if (err)
return err;
err = reset_control_reset(rst);
if (err) {
dev_err(ctrl->dev, "Failed to reset HW: %d\n", err);
- goto err_disable_clk;
+ goto err_put_pm;
}
writel_relaxed(HWSTATUS_CMD_DEFAULT, ctrl->regs + HWSTATUS_CMD);
@@ -1188,21 +1201,20 @@ static int tegra_nand_probe(struct platform_device *pdev)
dev_name(&pdev->dev), ctrl);
if (err) {
dev_err(ctrl->dev, "Failed to get IRQ: %d\n", err);
- goto err_disable_clk;
+ goto err_put_pm;
}
writel_relaxed(DMA_MST_CTRL_IS_DONE, ctrl->regs + DMA_MST_CTRL);
err = tegra_nand_chips_init(ctrl->dev, ctrl);
if (err)
- goto err_disable_clk;
-
- platform_set_drvdata(pdev, ctrl);
+ goto err_put_pm;
return 0;
-err_disable_clk:
- clk_disable_unprepare(ctrl->clk);
+err_put_pm:
+ pm_runtime_put_sync_suspend(ctrl->dev);
+ pm_runtime_force_suspend(ctrl->dev);
return err;
}
@@ -1219,11 +1231,40 @@ static int tegra_nand_remove(struct platform_device *pdev)
nand_cleanup(chip);
+ pm_runtime_put_sync_suspend(ctrl->dev);
+ pm_runtime_force_suspend(ctrl->dev);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_nand_runtime_resume(struct device *dev)
+{
+ struct tegra_nand_controller *ctrl = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(ctrl->clk);
+ if (err) {
+ dev_err(dev, "Failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_nand_runtime_suspend(struct device *dev)
+{
+ struct tegra_nand_controller *ctrl = dev_get_drvdata(dev);
+
clk_disable_unprepare(ctrl->clk);
return 0;
}
+static const struct dev_pm_ops tegra_nand_pm = {
+ SET_RUNTIME_PM_OPS(tegra_nand_runtime_suspend, tegra_nand_runtime_resume,
+ NULL)
+};
+
static const struct of_device_id tegra_nand_of_match[] = {
{ .compatible = "nvidia,tegra20-nand" },
{ /* sentinel */ }
@@ -1234,6 +1275,7 @@ static struct platform_driver tegra_nand_driver = {
.driver = {
.name = "tegra-nand",
.of_match_table = tegra_nand_of_match,
+ .pm = &tegra_nand_pm,
},
.probe = tegra_nand_probe,
.remove = tegra_nand_remove,
diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c
index 1fea5cab492c..d6d889ce8876 100644
--- a/drivers/mtd/spi-nor/atmel.c
+++ b/drivers/mtd/spi-nor/atmel.c
@@ -48,13 +48,13 @@ static const struct spi_nor_locking_ops atmel_at25fs_locking_ops = {
.is_locked = atmel_at25fs_is_locked,
};
-static void atmel_at25fs_default_init(struct spi_nor *nor)
+static void atmel_at25fs_late_init(struct spi_nor *nor)
{
nor->params->locking_ops = &atmel_at25fs_locking_ops;
}
static const struct spi_nor_fixups atmel_at25fs_fixups = {
- .default_init = atmel_at25fs_default_init,
+ .late_init = atmel_at25fs_late_init,
};
/**
@@ -146,50 +146,59 @@ static const struct spi_nor_locking_ops atmel_global_protection_ops = {
.is_locked = atmel_is_global_protected,
};
-static void atmel_global_protection_default_init(struct spi_nor *nor)
+static void atmel_global_protection_late_init(struct spi_nor *nor)
{
nor->params->locking_ops = &atmel_global_protection_ops;
}
static const struct spi_nor_fixups atmel_global_protection_fixups = {
- .default_init = atmel_global_protection_default_init,
+ .late_init = atmel_global_protection_late_init,
};
static const struct flash_info atmel_parts[] = {
/* Atmel -- some are (confusingly) marketed as "DataFlash" */
- { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K | SPI_NOR_HAS_LOCK)
+ { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4)
+ FLAGS(SPI_NOR_HAS_LOCK)
+ NO_SFDP_FLAGS(SECT_4K)
.fixups = &atmel_at25fs_fixups },
- { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_HAS_LOCK)
+ { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8)
+ FLAGS(SPI_NOR_HAS_LOCK)
+ NO_SFDP_FLAGS(SECT_4K)
.fixups = &atmel_at25fs_fixups },
-
- { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8,
- SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- .fixups = &atmel_global_protection_fixups },
- { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- .fixups = &atmel_global_protection_fixups },
- { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- .fixups = &atmel_global_protection_fixups },
- { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- .fixups = &atmel_global_protection_fixups },
-
- { "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-
- { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
- { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16,
- SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- .fixups = &atmel_global_protection_fixups },
- { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- .fixups = &atmel_global_protection_fixups },
- { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
- .fixups = &atmel_global_protection_fixups },
-
- { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
+ { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &atmel_global_protection_fixups },
+ { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &atmel_global_protection_fixups },
+ { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &atmel_global_protection_fixups },
+ { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &atmel_global_protection_fixups },
+ { "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &atmel_global_protection_fixups },
+ { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &atmel_global_protection_fixups },
+ { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &atmel_global_protection_fixups },
+ { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K) },
};
const struct spi_nor_manufacturer spi_nor_atmel = {
diff --git a/drivers/mtd/spi-nor/catalyst.c b/drivers/mtd/spi-nor/catalyst.c
index 011b83e99e95..ae4d67e01bb3 100644
--- a/drivers/mtd/spi-nor/catalyst.c
+++ b/drivers/mtd/spi-nor/catalyst.c
@@ -10,16 +10,11 @@
static const struct flash_info catalyst_parts[] = {
/* Catalyst / On Semiconductor -- non-JEDEC */
- { "cat25c11", CAT25_INFO(16, 8, 16, 1,
- SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "cat25c03", CAT25_INFO(32, 8, 16, 2,
- SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "cat25c09", CAT25_INFO(128, 8, 32, 2,
- SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "cat25c17", CAT25_INFO(256, 8, 32, 2,
- SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "cat25128", CAT25_INFO(2048, 8, 64, 2,
- SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c11", CAT25_INFO(16, 8, 16, 1) },
+ { "cat25c03", CAT25_INFO(32, 8, 16, 2) },
+ { "cat25c09", CAT25_INFO(128, 8, 32, 2) },
+ { "cat25c17", CAT25_INFO(256, 8, 32, 2) },
+ { "cat25128", CAT25_INFO(2048, 8, 64, 2) },
};
const struct spi_nor_manufacturer spi_nor_catalyst = {
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index cc08bd707378..04ea180118e3 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -1952,6 +1952,7 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
struct spi_nor *nor = mtd_to_spi_nor(mtd);
size_t page_offset, page_remain, i;
ssize_t ret;
+ u32 page_size = nor->params->page_size;
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
@@ -1968,16 +1969,15 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
* calculated with an AND operation. On the other cases we
* need to do a modulus operation (more expensive).
*/
- if (is_power_of_2(nor->page_size)) {
- page_offset = addr & (nor->page_size - 1);
+ if (is_power_of_2(page_size)) {
+ page_offset = addr & (page_size - 1);
} else {
uint64_t aux = addr;
- page_offset = do_div(aux, nor->page_size);
+ page_offset = do_div(aux, page_size);
}
/* the size of data remaining on the first page */
- page_remain = min_t(size_t,
- nor->page_size - page_offset, len - i);
+ page_remain = min_t(size_t, page_size - page_offset, len - i);
addr = spi_nor_convert_addr(nor, addr);
@@ -2115,7 +2115,7 @@ static int spi_nor_spimem_check_op(struct spi_nor *nor,
*/
op->addr.nbytes = 4;
if (!spi_mem_supports_op(nor->spimem, op)) {
- if (nor->mtd.size > SZ_16M)
+ if (nor->params->size > SZ_16M)
return -EOPNOTSUPP;
/* If flash size <= 16MB, 3 address bytes are sufficient */
@@ -2141,7 +2141,7 @@ static int spi_nor_spimem_check_readop(struct spi_nor *nor,
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 0),
SPI_MEM_OP_ADDR(3, 0, 0),
SPI_MEM_OP_DUMMY(1, 0),
- SPI_MEM_OP_DATA_IN(1, NULL, 0));
+ SPI_MEM_OP_DATA_IN(2, NULL, 0));
spi_nor_spimem_setup_op(nor, &op, read->proto);
@@ -2167,7 +2167,7 @@ static int spi_nor_spimem_check_pp(struct spi_nor *nor,
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 0),
SPI_MEM_OP_ADDR(3, 0, 0),
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, NULL, 0));
+ SPI_MEM_OP_DATA_OUT(2, NULL, 0));
spi_nor_spimem_setup_op(nor, &op, pp->proto);
@@ -2484,13 +2484,61 @@ static int spi_nor_default_setup(struct spi_nor *nor,
return 0;
}
+static int spi_nor_set_addr_width(struct spi_nor *nor)
+{
+ if (nor->addr_width) {
+ /* already configured from SFDP */
+ } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
+ /*
+ * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
+ * in this protocol an odd address width cannot be used because
+ * then the address phase would only span a cycle and a half.
+ * Half a cycle would be left over. We would then have to start
+ * the dummy phase in the middle of a cycle and so too the data
+ * phase, and we will end the transaction with half a cycle left
+ * over.
+ *
+ * Force all 8D-8D-8D flashes to use an address width of 4 to
+ * avoid this situation.
+ */
+ nor->addr_width = 4;
+ } else if (nor->info->addr_width) {
+ nor->addr_width = nor->info->addr_width;
+ } else {
+ nor->addr_width = 3;
+ }
+
+ if (nor->addr_width == 3 && nor->params->size > 0x1000000) {
+ /* enable 4-byte addressing if the device exceeds 16MiB */
+ nor->addr_width = 4;
+ }
+
+ if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
+ dev_dbg(nor->dev, "address width is too large: %u\n",
+ nor->addr_width);
+ return -EINVAL;
+ }
+
+ /* Set 4byte opcodes when possible. */
+ if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
+ !(nor->flags & SNOR_F_HAS_4BAIT))
+ spi_nor_set_4byte_opcodes(nor);
+
+ return 0;
+}
+
static int spi_nor_setup(struct spi_nor *nor,
const struct spi_nor_hwcaps *hwcaps)
{
- if (!nor->params->setup)
- return 0;
+ int ret;
- return nor->params->setup(nor, hwcaps);
+ if (nor->params->setup) {
+ ret = nor->params->setup(nor, hwcaps);
+ if (ret)
+ return ret;
+ }
+
+ return spi_nor_set_addr_width(nor);
}
/**
@@ -2509,107 +2557,50 @@ static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
}
/**
- * spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings
- * based on JESD216 SFDP standard.
- * @nor: pointer to a 'struct spi_nor'.
- *
- * The method has a roll-back mechanism: in case the SFDP parsing fails, the
- * legacy flash parameters and settings will be restored.
- */
-static void spi_nor_sfdp_init_params(struct spi_nor *nor)
-{
- struct spi_nor_flash_parameter sfdp_params;
-
- memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
-
- if (spi_nor_parse_sfdp(nor)) {
- memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
- nor->addr_width = 0;
- nor->flags &= ~SNOR_F_4B_OPCODES;
- }
-}
-
-/**
- * spi_nor_info_init_params() - Initialize the flash's parameters and settings
- * based on nor->info data.
+ * spi_nor_no_sfdp_init_params() - Initialize the flash's parameters and
+ * settings based on nor->info->sfdp_flags. This method should be called only by
+ * flashes that do not define SFDP tables. If the flash supports SFDP but the
+ * information is wrong and the settings from this function can not be retrieved
+ * by parsing SFDP, one should instead use the fixup hooks and update the wrong
+ * bits.
* @nor: pointer to a 'struct spi_nor'.
*/
-static void spi_nor_info_init_params(struct spi_nor *nor)
+static void spi_nor_no_sfdp_init_params(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
struct spi_nor_erase_map *map = &params->erase_map;
- const struct flash_info *info = nor->info;
- struct device_node *np = spi_nor_get_flash_node(nor);
+ const u8 no_sfdp_flags = nor->info->no_sfdp_flags;
u8 i, erase_mask;
- /* Initialize default flash parameters and settings. */
- params->quad_enable = spi_nor_sr2_bit1_quad_enable;
- params->set_4byte_addr_mode = spansion_set_4byte_addr_mode;
- params->setup = spi_nor_default_setup;
- params->otp.org = &info->otp_org;
-
- /* Default to 16-bit Write Status (01h) Command */
- nor->flags |= SNOR_F_HAS_16BIT_SR;
-
- /* Set SPI NOR sizes. */
- params->writesize = 1;
- params->size = (u64)info->sector_size * info->n_sectors;
- params->page_size = info->page_size;
-
- if (!(info->flags & SPI_NOR_NO_FR)) {
- /* Default to Fast Read for DT and non-DT platform devices. */
- params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
-
- /* Mask out Fast Read if not requested at DT instantiation. */
- if (np && !of_property_read_bool(np, "m25p,fast-read"))
- params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
- }
-
- /* (Fast) Read settings. */
- params->hwcaps.mask |= SNOR_HWCAPS_READ;
- spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
- 0, 0, SPINOR_OP_READ,
- SNOR_PROTO_1_1_1);
-
- if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
- spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
- 0, 8, SPINOR_OP_READ_FAST,
- SNOR_PROTO_1_1_1);
-
- if (info->flags & SPI_NOR_DUAL_READ) {
+ if (no_sfdp_flags & SPI_NOR_DUAL_READ) {
params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
0, 8, SPINOR_OP_READ_1_1_2,
SNOR_PROTO_1_1_2);
}
- if (info->flags & SPI_NOR_QUAD_READ) {
+ if (no_sfdp_flags & SPI_NOR_QUAD_READ) {
params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
0, 8, SPINOR_OP_READ_1_1_4,
SNOR_PROTO_1_1_4);
}
- if (info->flags & SPI_NOR_OCTAL_READ) {
+ if (no_sfdp_flags & SPI_NOR_OCTAL_READ) {
params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
0, 8, SPINOR_OP_READ_1_1_8,
SNOR_PROTO_1_1_8);
}
- if (info->flags & SPI_NOR_OCTAL_DTR_READ) {
+ if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_READ) {
params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_8_8_8_DTR],
0, 20, SPINOR_OP_READ_FAST,
SNOR_PROTO_8_8_8_DTR);
}
- /* Page Program settings. */
- params->hwcaps.mask |= SNOR_HWCAPS_PP;
- spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
- SPINOR_OP_PP, SNOR_PROTO_1_1_1);
-
- if (info->flags & SPI_NOR_OCTAL_DTR_PP) {
+ if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_PP) {
params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
/*
* Since xSPI Page Program opcode is backward compatible with
@@ -2625,52 +2616,111 @@ static void spi_nor_info_init_params(struct spi_nor *nor)
*/
erase_mask = 0;
i = 0;
- if (info->flags & SECT_4K_PMC) {
+ if (no_sfdp_flags & SECT_4K_PMC) {
erase_mask |= BIT(i);
spi_nor_set_erase_type(&map->erase_type[i], 4096u,
SPINOR_OP_BE_4K_PMC);
i++;
- } else if (info->flags & SECT_4K) {
+ } else if (no_sfdp_flags & SECT_4K) {
erase_mask |= BIT(i);
spi_nor_set_erase_type(&map->erase_type[i], 4096u,
SPINOR_OP_BE_4K);
i++;
}
erase_mask |= BIT(i);
- spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
+ spi_nor_set_erase_type(&map->erase_type[i], nor->info->sector_size,
SPINOR_OP_SE);
spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
}
/**
- * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
- * after SFDP has been parsed (is also called for SPI NORs that do not
- * support RDSFDP).
+ * spi_nor_init_flags() - Initialize NOR flags for settings that are not defined
+ * in the JESD216 SFDP standard, thus can not be retrieved when parsing SFDP.
* @nor: pointer to a 'struct spi_nor'
- *
- * Typically used to tweak various parameters that could not be extracted by
- * other means (i.e. when information provided by the SFDP/flash_info tables
- * are incomplete or wrong).
*/
-static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
+static void spi_nor_init_flags(struct spi_nor *nor)
{
- if (nor->manufacturer && nor->manufacturer->fixups &&
- nor->manufacturer->fixups->post_sfdp)
- nor->manufacturer->fixups->post_sfdp(nor);
+ struct device_node *np = spi_nor_get_flash_node(nor);
+ const u16 flags = nor->info->flags;
+
+ if (of_property_read_bool(np, "broken-flash-reset"))
+ nor->flags |= SNOR_F_BROKEN_RESET;
- if (nor->info->fixups && nor->info->fixups->post_sfdp)
- nor->info->fixups->post_sfdp(nor);
+ if (flags & SPI_NOR_SWP_IS_VOLATILE)
+ nor->flags |= SNOR_F_SWP_IS_VOLATILE;
+
+ if (flags & SPI_NOR_HAS_LOCK)
+ nor->flags |= SNOR_F_HAS_LOCK;
+
+ if (flags & SPI_NOR_HAS_TB) {
+ nor->flags |= SNOR_F_HAS_SR_TB;
+ if (flags & SPI_NOR_TB_SR_BIT6)
+ nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
+ }
+
+ if (flags & SPI_NOR_4BIT_BP) {
+ nor->flags |= SNOR_F_HAS_4BIT_BP;
+ if (flags & SPI_NOR_BP3_SR_BIT6)
+ nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
+ }
+
+ if (flags & NO_CHIP_ERASE)
+ nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
+
+ if (flags & USE_CLSR)
+ nor->flags |= SNOR_F_USE_CLSR;
+
+ if (flags & USE_FSR)
+ nor->flags |= SNOR_F_USE_FSR;
+
+ /*
+ * Make sure the XSR_RDY flag is set before calling
+ * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
+ * with Atmel SPI NOR.
+ */
+ if (flags & SPI_NOR_XSR_RDY)
+ nor->flags |= SNOR_F_READY_XSR_RDY;
+}
+
+/**
+ * spi_nor_init_fixup_flags() - Initialize NOR flags for settings that can not
+ * be discovered by SFDP for this particular flash because the SFDP table that
+ * indicates this support is not defined in the flash. In case the table for
+ * this support is defined but has wrong values, one should instead use a
+ * post_sfdp() hook to set the SNOR_F equivalent flag.
+ * @nor: pointer to a 'struct spi_nor'
+ */
+static void spi_nor_init_fixup_flags(struct spi_nor *nor)
+{
+ const u8 fixup_flags = nor->info->fixup_flags;
+
+ if (fixup_flags & SPI_NOR_4B_OPCODES)
+ nor->flags |= SNOR_F_4B_OPCODES;
+
+ if (fixup_flags & SPI_NOR_IO_MODE_EN_VOLATILE)
+ nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
}
/**
* spi_nor_late_init_params() - Late initialization of default flash parameters.
* @nor: pointer to a 'struct spi_nor'
*
- * Used to set default flash parameters and settings when the ->default_init()
- * hook or the SFDP parser let voids.
+ * Used to initialize flash parameters that are not declared in the JESD216
+ * SFDP standard, or where SFDP tables are not defined at all.
+ * Will replace the spi_nor_manufacturer_init_params() method.
*/
static void spi_nor_late_init_params(struct spi_nor *nor)
{
+ if (nor->manufacturer && nor->manufacturer->fixups &&
+ nor->manufacturer->fixups->late_init)
+ nor->manufacturer->fixups->late_init(nor);
+
+ if (nor->info->fixups && nor->info->fixups->late_init)
+ nor->info->fixups->late_init(nor);
+
+ spi_nor_init_flags(nor);
+ spi_nor_init_fixup_flags(nor);
+
/*
* NOR protection support. When locking_ops are not provided, we pick
* the default ones.
@@ -2680,6 +2730,99 @@ static void spi_nor_late_init_params(struct spi_nor *nor)
}
/**
+ * spi_nor_sfdp_init_params_deprecated() - Deprecated way of initializing flash
+ * parameters and settings based on JESD216 SFDP standard.
+ * @nor: pointer to a 'struct spi_nor'.
+ *
+ * The method has a roll-back mechanism: in case the SFDP parsing fails, the
+ * legacy flash parameters and settings will be restored.
+ */
+static void spi_nor_sfdp_init_params_deprecated(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter sfdp_params;
+
+ memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
+
+ if (spi_nor_parse_sfdp(nor)) {
+ memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
+ nor->addr_width = 0;
+ nor->flags &= ~SNOR_F_4B_OPCODES;
+ }
+}
+
+/**
+ * spi_nor_init_params_deprecated() - Deprecated way of initializing flash
+ * parameters and settings.
+ * @nor: pointer to a 'struct spi_nor'.
+ *
+ * The method assumes that flash doesn't support SFDP so it initializes flash
+ * parameters in spi_nor_no_sfdp_init_params() which later on can be overwritten
+ * when parsing SFDP, if supported.
+ */
+static void spi_nor_init_params_deprecated(struct spi_nor *nor)
+{
+ spi_nor_no_sfdp_init_params(nor);
+
+ spi_nor_manufacturer_init_params(nor);
+
+ if (nor->info->no_sfdp_flags & (SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ |
+ SPI_NOR_OCTAL_READ |
+ SPI_NOR_OCTAL_DTR_READ))
+ spi_nor_sfdp_init_params_deprecated(nor);
+}
+
+/**
+ * spi_nor_init_default_params() - Default initialization of flash parameters
+ * and settings. Done for all flashes, regardless is they define SFDP tables
+ * or not.
+ * @nor: pointer to a 'struct spi_nor'.
+ */
+static void spi_nor_init_default_params(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ const struct flash_info *info = nor->info;
+ struct device_node *np = spi_nor_get_flash_node(nor);
+
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
+ params->set_4byte_addr_mode = spansion_set_4byte_addr_mode;
+ params->setup = spi_nor_default_setup;
+ params->otp.org = &info->otp_org;
+
+ /* Default to 16-bit Write Status (01h) Command */
+ nor->flags |= SNOR_F_HAS_16BIT_SR;
+
+ /* Set SPI NOR sizes. */
+ params->writesize = 1;
+ params->size = (u64)info->sector_size * info->n_sectors;
+ params->page_size = info->page_size;
+
+ if (!(info->flags & SPI_NOR_NO_FR)) {
+ /* Default to Fast Read for DT and non-DT platform devices. */
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
+
+ /* Mask out Fast Read if not requested at DT instantiation. */
+ if (np && !of_property_read_bool(np, "m25p,fast-read"))
+ params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
+ }
+
+ /* (Fast) Read settings. */
+ params->hwcaps.mask |= SNOR_HWCAPS_READ;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
+ 0, 0, SPINOR_OP_READ,
+ SNOR_PROTO_1_1_1);
+
+ if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
+ 0, 8, SPINOR_OP_READ_FAST,
+ SNOR_PROTO_1_1_1);
+ /* Page Program settings. */
+ params->hwcaps.mask |= SNOR_HWCAPS_PP;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
+ SPINOR_OP_PP, SNOR_PROTO_1_1_1);
+}
+
+/**
* spi_nor_init_params() - Initialize the flash's parameters and settings.
* @nor: pointer to a 'struct spi_nor'.
*
@@ -2699,39 +2842,44 @@ static void spi_nor_late_init_params(struct spi_nor *nor)
* which can be overwritten by:
* 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
* should be more accurate that the above.
- * spi_nor_sfdp_init_params()
+ * spi_nor_parse_sfdp() or spi_nor_no_sfdp_init_params()
*
* Please note that there is a ->post_bfpt() fixup hook that can overwrite
* the flash parameters and settings immediately after parsing the Basic
* Flash Parameter Table.
+ * spi_nor_post_sfdp_fixups() is called after the SFDP tables are parsed.
+ * It is used to tweak various flash parameters when information provided
+ * by the SFDP tables are wrong.
*
* which can be overwritten by:
- * 4/ Post SFDP flash parameters initialization. Used to tweak various
- * parameters that could not be extracted by other means (i.e. when
- * information provided by the SFDP/flash_info tables are incomplete or
- * wrong).
- * spi_nor_post_sfdp_fixups()
- *
- * 5/ Late default flash parameters initialization, used when the
- * ->default_init() hook or the SFDP parser do not set specific params.
+ * 4/ Late flash parameters initialization, used to initialize flash
+ * parameters that are not declared in the JESD216 SFDP standard, or where SFDP
+ * tables are not defined at all.
* spi_nor_late_init_params()
+ *
+ * Return: 0 on success, -errno otherwise.
*/
static int spi_nor_init_params(struct spi_nor *nor)
{
+ int ret;
+
nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL);
if (!nor->params)
return -ENOMEM;
- spi_nor_info_init_params(nor);
-
- spi_nor_manufacturer_init_params(nor);
+ spi_nor_init_default_params(nor);
- if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_OCTAL_READ | SPI_NOR_OCTAL_DTR_READ)) &&
- !(nor->info->flags & SPI_NOR_SKIP_SFDP))
- spi_nor_sfdp_init_params(nor);
-
- spi_nor_post_sfdp_fixups(nor);
+ if (nor->info->parse_sfdp) {
+ ret = spi_nor_parse_sfdp(nor);
+ if (ret) {
+ dev_err(nor->dev, "BFPT parsing failed. Please consider using SPI_NOR_SKIP_SFDP when declaring the flash\n");
+ return ret;
+ }
+ } else if (nor->info->no_sfdp_flags & SPI_NOR_SKIP_SFDP) {
+ spi_nor_no_sfdp_init_params(nor);
+ } else {
+ spi_nor_init_params_deprecated(nor);
+ }
spi_nor_late_init_params(nor);
@@ -2978,59 +3126,6 @@ static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
return NULL;
}
-static int spi_nor_set_addr_width(struct spi_nor *nor)
-{
- if (nor->addr_width) {
- /* already configured from SFDP */
- } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
- /*
- * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
- * in this protocol an odd address width cannot be used because
- * then the address phase would only span a cycle and a half.
- * Half a cycle would be left over. We would then have to start
- * the dummy phase in the middle of a cycle and so too the data
- * phase, and we will end the transaction with half a cycle left
- * over.
- *
- * Force all 8D-8D-8D flashes to use an address width of 4 to
- * avoid this situation.
- */
- nor->addr_width = 4;
- } else if (nor->info->addr_width) {
- nor->addr_width = nor->info->addr_width;
- } else {
- nor->addr_width = 3;
- }
-
- if (nor->addr_width == 3 && nor->mtd.size > 0x1000000) {
- /* enable 4-byte addressing if the device exceeds 16MiB */
- nor->addr_width = 4;
- }
-
- if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
- dev_dbg(nor->dev, "address width is too large: %u\n",
- nor->addr_width);
- return -EINVAL;
- }
-
- /* Set 4byte opcodes when possible. */
- if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
- !(nor->flags & SNOR_F_HAS_4BAIT))
- spi_nor_set_4byte_opcodes(nor);
-
- return 0;
-}
-
-static void spi_nor_debugfs_init(struct spi_nor *nor,
- const struct flash_info *info)
-{
- struct mtd_info *mtd = &nor->mtd;
-
- mtd->dbg.partname = info->name;
- mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN",
- info->id_len, info->id);
-}
-
static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
const char *name)
{
@@ -3071,13 +3166,41 @@ static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
return info;
}
+static void spi_nor_set_mtd_info(struct spi_nor *nor)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ struct device *dev = nor->dev;
+
+ spi_nor_set_mtd_locking_ops(nor);
+ spi_nor_set_mtd_otp_ops(nor);
+
+ mtd->dev.parent = dev;
+ if (!mtd->name)
+ mtd->name = dev_name(dev);
+ mtd->type = MTD_NORFLASH;
+ mtd->flags = MTD_CAP_NORFLASH;
+ if (nor->info->flags & SPI_NOR_NO_ERASE)
+ mtd->flags |= MTD_NO_ERASE;
+ mtd->writesize = nor->params->writesize;
+ mtd->writebufsize = nor->params->page_size;
+ mtd->size = nor->params->size;
+ mtd->_erase = spi_nor_erase;
+ mtd->_read = spi_nor_read;
+ /* Might be already set by some SST flashes. */
+ if (!mtd->_write)
+ mtd->_write = spi_nor_write;
+ mtd->_suspend = spi_nor_suspend;
+ mtd->_resume = spi_nor_resume;
+ mtd->_get_device = spi_nor_get_device;
+ mtd->_put_device = spi_nor_put_device;
+}
+
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps)
{
const struct flash_info *info;
struct device *dev = nor->dev;
struct mtd_info *mtd = &nor->mtd;
- struct device_node *np = spi_nor_get_flash_node(nor);
int ret;
int i;
@@ -3094,7 +3217,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
* We need the bounce buffer early to read/write registers when going
* through the spi-mem layer (buffers have to be DMA-able).
* For spi-mem drivers, we'll reallocate a new buffer if
- * nor->page_size turns out to be greater than PAGE_SIZE (which
+ * nor->params->page_size turns out to be greater than PAGE_SIZE (which
* shouldn't happen before long since NOR pages are usually less
* than 1KB) after spi_nor_scan() returns.
*/
@@ -3110,102 +3233,31 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
nor->info = info;
- spi_nor_debugfs_init(nor, info);
-
mutex_init(&nor->lock);
- /*
- * Make sure the XSR_RDY flag is set before calling
- * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
- * with Atmel SPI NOR.
- */
- if (info->flags & SPI_NOR_XSR_RDY)
- nor->flags |= SNOR_F_READY_XSR_RDY;
-
- if (info->flags & SPI_NOR_HAS_LOCK)
- nor->flags |= SNOR_F_HAS_LOCK;
-
- mtd->_write = spi_nor_write;
-
/* Init flash parameters based on flash_info struct and SFDP */
ret = spi_nor_init_params(nor);
if (ret)
return ret;
- if (!mtd->name)
- mtd->name = dev_name(dev);
- mtd->priv = nor;
- mtd->type = MTD_NORFLASH;
- mtd->writesize = nor->params->writesize;
- mtd->flags = MTD_CAP_NORFLASH;
- mtd->size = nor->params->size;
- mtd->_erase = spi_nor_erase;
- mtd->_read = spi_nor_read;
- mtd->_suspend = spi_nor_suspend;
- mtd->_resume = spi_nor_resume;
- mtd->_get_device = spi_nor_get_device;
- mtd->_put_device = spi_nor_put_device;
-
- if (info->flags & USE_FSR)
- nor->flags |= SNOR_F_USE_FSR;
- if (info->flags & SPI_NOR_HAS_TB) {
- nor->flags |= SNOR_F_HAS_SR_TB;
- if (info->flags & SPI_NOR_TB_SR_BIT6)
- nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
- }
-
- if (info->flags & NO_CHIP_ERASE)
- nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
- if (info->flags & USE_CLSR)
- nor->flags |= SNOR_F_USE_CLSR;
- if (info->flags & SPI_NOR_SWP_IS_VOLATILE)
- nor->flags |= SNOR_F_SWP_IS_VOLATILE;
-
- if (info->flags & SPI_NOR_4BIT_BP) {
- nor->flags |= SNOR_F_HAS_4BIT_BP;
- if (info->flags & SPI_NOR_BP3_SR_BIT6)
- nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
- }
-
- if (info->flags & SPI_NOR_NO_ERASE)
- mtd->flags |= MTD_NO_ERASE;
-
- mtd->dev.parent = dev;
- nor->page_size = nor->params->page_size;
- mtd->writebufsize = nor->page_size;
-
- if (of_property_read_bool(np, "broken-flash-reset"))
- nor->flags |= SNOR_F_BROKEN_RESET;
-
/*
* Configure the SPI memory:
* - select op codes for (Fast) Read, Page Program and Sector Erase.
* - set the number of dummy cycles (mode cycles + wait states).
* - set the SPI protocols for register and memory accesses.
+ * - set the address width.
*/
ret = spi_nor_setup(nor, hwcaps);
if (ret)
return ret;
- if (info->flags & SPI_NOR_4B_OPCODES)
- nor->flags |= SNOR_F_4B_OPCODES;
-
- if (info->flags & SPI_NOR_IO_MODE_EN_VOLATILE)
- nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
-
- ret = spi_nor_set_addr_width(nor);
- if (ret)
- return ret;
-
- spi_nor_register_locking_ops(nor);
-
/* Send all the required SPI flash commands to initialize device */
ret = spi_nor_init(nor);
if (ret)
return ret;
- /* Configure OTP parameters and ops */
- spi_nor_otp_init(nor);
+ /* No mtd_info fields should be used up to this point. */
+ spi_nor_set_mtd_info(nor);
dev_info(dev, "%s (%lld Kbytes)\n", info->name,
(long long)mtd->size >> 10);
@@ -3238,7 +3290,7 @@ static int spi_nor_create_read_dirmap(struct spi_nor *nor)
SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
SPI_MEM_OP_DATA_IN(0, NULL, 0)),
.offset = 0,
- .length = nor->mtd.size,
+ .length = nor->params->size,
};
struct spi_mem_op *op = &info.op_tmpl;
@@ -3269,7 +3321,7 @@ static int spi_nor_create_write_dirmap(struct spi_nor *nor)
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(0, NULL, 0)),
.offset = 0,
- .length = nor->mtd.size,
+ .length = nor->params->size,
};
struct spi_mem_op *op = &info.op_tmpl;
@@ -3341,8 +3393,8 @@ static int spi_nor_probe(struct spi_mem *spimem)
* and add this logic so that if anyone ever adds support for such
* a NOR we don't end up with buffer overflows.
*/
- if (nor->page_size > PAGE_SIZE) {
- nor->bouncebuf_size = nor->page_size;
+ if (nor->params->page_size > PAGE_SIZE) {
+ nor->bouncebuf_size = nor->params->page_size;
devm_kfree(nor->dev, nor->bouncebuf);
nor->bouncebuf = devm_kmalloc(nor->dev,
nor->bouncebuf_size,
@@ -3389,8 +3441,8 @@ static void spi_nor_shutdown(struct spi_mem *spimem)
* encourage new users to add support to the spi-nor library, and simply bind
* against a generic string here (e.g., "jedec,spi-nor").
*
- * Many flash names are kept here in this list (as well as in spi-nor.c) to
- * keep them available as module aliases for existing platforms.
+ * Many flash names are kept here in this list to keep them available
+ * as module aliases for existing platforms.
*/
static const struct spi_device_id spi_nor_dev_ids[] = {
/*
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index 3348e1dd1445..2afb610853a9 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -250,7 +250,7 @@ struct spi_nor_otp {
* higher index in the array, the higher priority.
* @erase_map: the erase map parsed from the SFDP Sector Map Parameter
* Table.
- * @otp_info: describes the OTP regions.
+ * @otp: SPI NOR OTP info.
* @octal_dtr_enable: enables SPI NOR octal DTR mode.
* @quad_enable: enables SPI NOR quad mode.
* @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode.
@@ -262,7 +262,6 @@ struct spi_nor_otp {
* e.g. different opcodes, specific address calculation,
* page size, etc.
* @locking_ops: SPI NOR locking methods.
- * @otp: SPI NOR OTP methods.
*/
struct spi_nor_flash_parameter {
u64 size;
@@ -298,6 +297,9 @@ struct spi_nor_flash_parameter {
* parameters that could not be extracted by other means (i.e.
* when information provided by the SFDP/flash_info tables are
* incomplete or wrong).
+ * @late_init: used to initialize flash parameters that are not declared in the
+ * JESD216 SFDP standard, or where SFDP tables not defined at all.
+ * Will replace the default_init() hook.
*
* Those hooks can be used to tweak the SPI NOR configuration when the SFDP
* table is broken or not available.
@@ -308,89 +310,121 @@ struct spi_nor_fixups {
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt);
void (*post_sfdp)(struct spi_nor *nor);
+ void (*late_init)(struct spi_nor *nor);
};
+/**
+ * struct flash_info - SPI NOR flash_info entry.
+ * @name: the name of the flash.
+ * @id: the flash's ID bytes. The first three bytes are the
+ * JEDIC ID. JEDEC ID zero means "no ID" (mostly older chips).
+ * @id_len: the number of bytes of ID.
+ * @sector_size: the size listed here is what works with SPINOR_OP_SE, which
+ * isn't necessarily called a "sector" by the vendor.
+ * @n_sectors: the number of sectors.
+ * @page_size: the flash's page size.
+ * @addr_width: the flash's address width.
+ *
+ * @parse_sfdp: true when flash supports SFDP tables. The false value has no
+ * meaning. If one wants to skip the SFDP tables, one should
+ * instead use the SPI_NOR_SKIP_SFDP sfdp_flag.
+ * @flags: flags that indicate support that is not defined by the
+ * JESD216 standard in its SFDP tables. Flag meanings:
+ * SPI_NOR_HAS_LOCK: flash supports lock/unlock via SR
+ * SPI_NOR_HAS_TB: flash SR has Top/Bottom (TB) protect bit. Must be
+ * used with SPI_NOR_HAS_LOCK.
+ * SPI_NOR_TB_SR_BIT6: Top/Bottom (TB) is bit 6 of status register.
+ * Must be used with SPI_NOR_HAS_TB.
+ * SPI_NOR_4BIT_BP: flash SR has 4 bit fields (BP0-3) for block
+ * protection.
+ * SPI_NOR_BP3_SR_BIT6: BP3 is bit 6 of status register. Must be used with
+ * SPI_NOR_4BIT_BP.
+ * SPI_NOR_SWP_IS_VOLATILE: flash has volatile software write protection bits.
+ * Usually these will power-up in a write-protected
+ * state.
+ * SPI_NOR_NO_ERASE: no erase command needed.
+ * NO_CHIP_ERASE: chip does not support chip erase.
+ * SPI_NOR_NO_FR: can't do fastread.
+ * USE_CLSR: use CLSR command.
+ * USE_FSR: use flag status register
+ * SPI_NOR_XSR_RDY: S3AN flashes have specific opcode to read the
+ * status register.
+ *
+ * @no_sfdp_flags: flags that indicate support that can be discovered via SFDP.
+ * Used when SFDP tables are not defined in the flash. These
+ * flags are used together with the SPI_NOR_SKIP_SFDP flag.
+ * SPI_NOR_SKIP_SFDP: skip parsing of SFDP tables.
+ * SECT_4K: SPINOR_OP_BE_4K works uniformly.
+ * SECT_4K_PMC: SPINOR_OP_BE_4K_PMC works uniformly.
+ * SPI_NOR_DUAL_READ: flash supports Dual Read.
+ * SPI_NOR_QUAD_READ: flash supports Quad Read.
+ * SPI_NOR_OCTAL_READ: flash supports Octal Read.
+ * SPI_NOR_OCTAL_DTR_READ: flash supports octal DTR Read.
+ * SPI_NOR_OCTAL_DTR_PP: flash supports Octal DTR Page Program.
+ *
+ * @fixup_flags: flags that indicate support that can be discovered via SFDP
+ * ideally, but can not be discovered for this particular flash
+ * because the SFDP table that indicates this support is not
+ * defined by the flash. In case the table for this support is
+ * defined but has wrong values, one should instead use a
+ * post_sfdp() hook to set the SNOR_F equivalent flag.
+ *
+ * SPI_NOR_4B_OPCODES: use dedicated 4byte address op codes to support
+ * memory size above 128Mib.
+ * SPI_NOR_IO_MODE_EN_VOLATILE: flash enables the best available I/O mode
+ * via a volatile bit.
+ * @mfr_flags: manufacturer private flags. Used in the manufacturer fixup
+ * hooks to differentiate support between flashes of the same
+ * manufacturer.
+ * @otp_org: flash's OTP organization.
+ * @fixups: part specific fixup hooks.
+ */
struct flash_info {
- char *name;
-
- /*
- * This array stores the ID bytes.
- * The first three bytes are the JEDIC ID.
- * JEDEC ID zero means "no ID" (mostly older chips).
- */
- u8 id[SPI_NOR_MAX_ID_LEN];
- u8 id_len;
-
- /* The size listed here is what works with SPINOR_OP_SE, which isn't
- * necessarily called a "sector" by the vendor.
- */
- unsigned sector_size;
- u16 n_sectors;
-
- u16 page_size;
- u16 addr_width;
-
- u32 flags;
-#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
-#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
-#define SST_WRITE BIT(2) /* use SST byte programming */
-#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */
-#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
-#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
-#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
-#define USE_FSR BIT(7) /* use flag status register */
-#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
-#define SPI_NOR_HAS_TB BIT(9) /*
- * Flash SR has Top/Bottom (TB) protect
- * bit. Must be used with
- * SPI_NOR_HAS_LOCK.
- */
-#define SPI_NOR_XSR_RDY BIT(10) /*
- * S3AN flashes have specific opcode to
- * read the status register.
- */
-#define SPI_NOR_4B_OPCODES BIT(11) /*
- * Use dedicated 4byte address op codes
- * to support memory size above 128Mib.
- */
-#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
-#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
-#define USE_CLSR BIT(14) /* use CLSR command */
-#define SPI_NOR_OCTAL_READ BIT(15) /* Flash supports Octal Read */
-#define SPI_NOR_TB_SR_BIT6 BIT(16) /*
- * Top/Bottom (TB) is bit 6 of
- * status register. Must be used with
- * SPI_NOR_HAS_TB.
- */
-#define SPI_NOR_4BIT_BP BIT(17) /*
- * Flash SR has 4 bit fields (BP0-3)
- * for block protection.
- */
-#define SPI_NOR_BP3_SR_BIT6 BIT(18) /*
- * BP3 is bit 6 of status register.
- * Must be used with SPI_NOR_4BIT_BP.
- */
-#define SPI_NOR_OCTAL_DTR_READ BIT(19) /* Flash supports octal DTR Read. */
-#define SPI_NOR_OCTAL_DTR_PP BIT(20) /* Flash supports Octal DTR Page Program */
-#define SPI_NOR_IO_MODE_EN_VOLATILE BIT(21) /*
- * Flash enables the best
- * available I/O mode via a
- * volatile bit.
- */
-#define SPI_NOR_SWP_IS_VOLATILE BIT(22) /*
- * Flash has volatile software write
- * protection bits. Usually these will
- * power-up in a write-protected state.
- */
+ char *name;
+ u8 id[SPI_NOR_MAX_ID_LEN];
+ u8 id_len;
+ unsigned sector_size;
+ u16 n_sectors;
+ u16 page_size;
+ u16 addr_width;
+
+ bool parse_sfdp;
+ u16 flags;
+#define SPI_NOR_HAS_LOCK BIT(0)
+#define SPI_NOR_HAS_TB BIT(1)
+#define SPI_NOR_TB_SR_BIT6 BIT(2)
+#define SPI_NOR_4BIT_BP BIT(3)
+#define SPI_NOR_BP3_SR_BIT6 BIT(4)
+#define SPI_NOR_SWP_IS_VOLATILE BIT(5)
+#define SPI_NOR_NO_ERASE BIT(6)
+#define NO_CHIP_ERASE BIT(7)
+#define SPI_NOR_NO_FR BIT(8)
+#define USE_CLSR BIT(9)
+#define USE_FSR BIT(10)
+#define SPI_NOR_XSR_RDY BIT(11)
+
+ u8 no_sfdp_flags;
+#define SPI_NOR_SKIP_SFDP BIT(0)
+#define SECT_4K BIT(1)
+#define SECT_4K_PMC BIT(2)
+#define SPI_NOR_DUAL_READ BIT(3)
+#define SPI_NOR_QUAD_READ BIT(4)
+#define SPI_NOR_OCTAL_READ BIT(5)
+#define SPI_NOR_OCTAL_DTR_READ BIT(6)
+#define SPI_NOR_OCTAL_DTR_PP BIT(7)
+
+ u8 fixup_flags;
+#define SPI_NOR_4B_OPCODES BIT(0)
+#define SPI_NOR_IO_MODE_EN_VOLATILE BIT(1)
+
+ u8 mfr_flags;
const struct spi_nor_otp_organization otp_org;
-
- /* Part specific fixup hooks. */
const struct spi_nor_fixups *fixups;
};
/* Used when the "_ext_id" is two bytes at most */
-#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors) \
.id = { \
((_jedec_id) >> 16) & 0xff, \
((_jedec_id) >> 8) & 0xff, \
@@ -402,9 +436,8 @@ struct flash_info {
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = 256, \
- .flags = (_flags),
-#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors) \
.id = { \
((_jedec_id) >> 16) & 0xff, \
((_jedec_id) >> 8) & 0xff, \
@@ -417,14 +450,13 @@ struct flash_info {
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = 256, \
- .flags = (_flags),
-#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
+#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width) \
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = (_page_size), \
.addr_width = (_addr_width), \
- .flags = (_flags),
+ .flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR, \
#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \
.id = { \
@@ -447,6 +479,21 @@ struct flash_info {
.n_regions = (_n_regions), \
},
+#define PARSE_SFDP \
+ .parse_sfdp = true, \
+
+#define FLAGS(_flags) \
+ .flags = (_flags), \
+
+#define NO_SFDP_FLAGS(_no_sfdp_flags) \
+ .no_sfdp_flags = (_no_sfdp_flags), \
+
+#define FIXUP_FLAGS(_fixup_flags) \
+ .fixup_flags = (_fixup_flags), \
+
+#define MFR_FLAGS(_mfr_flags) \
+ .mfr_flags = (_mfr_flags), \
+
/**
* struct spi_nor_manufacturer - SPI NOR manufacturer object
* @name: manufacturer name
@@ -549,12 +596,12 @@ int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
void spi_nor_init_default_locking_ops(struct spi_nor *nor);
void spi_nor_try_unlock_all(struct spi_nor *nor);
-void spi_nor_register_locking_ops(struct spi_nor *nor);
-void spi_nor_otp_init(struct spi_nor *nor);
+void spi_nor_set_mtd_locking_ops(struct spi_nor *nor);
+void spi_nor_set_mtd_otp_ops(struct spi_nor *nor);
-static struct spi_nor __maybe_unused *mtd_to_spi_nor(struct mtd_info *mtd)
+static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
{
- return mtd->priv;
+ return container_of(mtd, struct spi_nor, mtd);
}
#endif /* __LINUX_MTD_SPI_NOR_INTERNAL_H */
diff --git a/drivers/mtd/spi-nor/eon.c b/drivers/mtd/spi-nor/eon.c
index ddb8e3650835..4f3ee6331f37 100644
--- a/drivers/mtd/spi-nor/eon.c
+++ b/drivers/mtd/spi-nor/eon.c
@@ -10,21 +10,24 @@
static const struct flash_info eon_parts[] = {
/* EON -- en25xxx */
- { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
- { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
- { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
- { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
- { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
- { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) },
- { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
- { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
- { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
+ { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64) },
+ { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64) },
+ { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128) },
+ { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64) },
+ { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256) },
+ { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512) },
+ { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K) },
};
const struct spi_nor_manufacturer spi_nor_eon = {
diff --git a/drivers/mtd/spi-nor/esmt.c b/drivers/mtd/spi-nor/esmt.c
index cfc9218c1053..ace1da221566 100644
--- a/drivers/mtd/spi-nor/esmt.c
+++ b/drivers/mtd/spi-nor/esmt.c
@@ -10,12 +10,15 @@
static const struct flash_info esmt_parts[] = {
/* ESMT */
- { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_HAS_LOCK) },
- { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_HAS_LOCK) },
+ { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK)
+ NO_SFDP_FLAGS(SECT_4K) },
};
const struct spi_nor_manufacturer spi_nor_esmt = {
diff --git a/drivers/mtd/spi-nor/everspin.c b/drivers/mtd/spi-nor/everspin.c
index 04a177a32283..f6c6fb36a428 100644
--- a/drivers/mtd/spi-nor/everspin.c
+++ b/drivers/mtd/spi-nor/everspin.c
@@ -10,14 +10,10 @@
static const struct flash_info everspin_parts[] = {
/* Everspin */
- { "mr25h128", CAT25_INFO(16 * 1024, 1, 256, 2,
- SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "mr25h256", CAT25_INFO(32 * 1024, 1, 256, 2,
- SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3,
- SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3,
- SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "mr25h128", CAT25_INFO(16 * 1024, 1, 256, 2) },
+ { "mr25h256", CAT25_INFO(32 * 1024, 1, 256, 2) },
+ { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3) },
+ { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3) },
};
const struct spi_nor_manufacturer spi_nor_everspin = {
diff --git a/drivers/mtd/spi-nor/fujitsu.c b/drivers/mtd/spi-nor/fujitsu.c
index e385d93e756c..5fa8f04f2e35 100644
--- a/drivers/mtd/spi-nor/fujitsu.c
+++ b/drivers/mtd/spi-nor/fujitsu.c
@@ -10,7 +10,8 @@
static const struct flash_info fujitsu_parts[] = {
/* Fujitsu */
- { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
+ { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1)
+ FLAGS(SPI_NOR_NO_ERASE) },
};
const struct spi_nor_manufacturer spi_nor_fujitsu = {
diff --git a/drivers/mtd/spi-nor/gigadevice.c b/drivers/mtd/spi-nor/gigadevice.c
index 447d84bb2128..0807d0263808 100644
--- a/drivers/mtd/spi-nor/gigadevice.c
+++ b/drivers/mtd/spi-nor/gigadevice.c
@@ -19,36 +19,43 @@ static void gd25q256_default_init(struct spi_nor *nor)
nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
}
-static struct spi_nor_fixups gd25q256_fixups = {
+static const struct spi_nor_fixups gd25q256_fixups = {
.default_init = gd25q256_default_init,
};
static const struct flash_info gigadevice_parts[] = {
- { "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK |
- SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6)
+ { "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512)
+ PARSE_SFDP
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
.fixups = &gd25q256_fixups },
};
diff --git a/drivers/mtd/spi-nor/intel.c b/drivers/mtd/spi-nor/intel.c
index 8ece9cceb3cf..d64e114e9fb4 100644
--- a/drivers/mtd/spi-nor/intel.c
+++ b/drivers/mtd/spi-nor/intel.c
@@ -10,12 +10,12 @@
static const struct flash_info intel_parts[] = {
/* Intel/Numonyx -- xxxs33b */
- { "160s33b", INFO(0x898911, 0, 64 * 1024, 32,
- SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "320s33b", INFO(0x898912, 0, 64 * 1024, 64,
- SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "640s33b", INFO(0x898913, 0, 64 * 1024, 128,
- SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
+ { "160s33b", INFO(0x898911, 0, 64 * 1024, 32)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
+ { "320s33b", INFO(0x898912, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
+ { "640s33b", INFO(0x898913, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
};
const struct spi_nor_manufacturer spi_nor_intel = {
diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c
index 1e5bb5408b68..23629b919ade 100644
--- a/drivers/mtd/spi-nor/issi.c
+++ b/drivers/mtd/spi-nor/issi.c
@@ -25,44 +25,48 @@ is25lp256_post_bfpt_fixups(struct spi_nor *nor,
return 0;
}
-static struct spi_nor_fixups is25lp256_fixups = {
+static const struct spi_nor_fixups is25lp256_fixups = {
.post_bfpt = is25lp256_post_bfpt_fixups,
};
static const struct flash_info issi_parts[] = {
/* ISSI */
- { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
- { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES)
+ { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512)
+ PARSE_SFDP
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
.fixups = &is25lp256_fixups },
- { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25wp256", INFO(0x9d7019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES)
+ { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp256", INFO(0x9d7019, 0, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
.fixups = &is25lp256_fixups },
/* PMC */
- { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
- { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
- { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
+ { "pm25lv512", INFO(0, 0, 32 * 1024, 2)
+ NO_SFDP_FLAGS(SECT_4K_PMC) },
+ { "pm25lv010", INFO(0, 0, 32 * 1024, 4)
+ NO_SFDP_FLAGS(SECT_4K_PMC) },
+ { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
};
static void issi_default_init(struct spi_nor *nor)
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
index 27498ed0cc0d..97dba1ae7fb1 100644
--- a/drivers/mtd/spi-nor/macronix.c
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -28,65 +28,78 @@ mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
return 0;
}
-static struct spi_nor_fixups mx25l25635_fixups = {
+static const struct spi_nor_fixups mx25l25635_fixups = {
.post_bfpt = mx25l25635_post_bfpt_fixups,
};
static const struct flash_info macronix_parts[] = {
/* Macronix */
- { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
- { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
- { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
- { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
- { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
- { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
- { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
- { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
- { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) },
- { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ |
+ { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16) },
+ { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
- { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
- { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
- { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
- { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, SECT_4K |
- SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP) },
- { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
- { "mx25r1635f", INFO(0xc22815, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ |
+ { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256) },
+ { "mx25r1635f", INFO(0xc22815, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
- { "mx25r3235f", INFO(0xc22816, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ |
+ { "mx25r3235f", INFO(0xc22816, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
- { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ |
+ { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
- { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
.fixups = &mx25l25635_fixups },
- { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_4B_OPCODES) },
- { "mx25u51245g", INFO(0xc2253a, 0, 64 * 1024, 1024,
- SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16,
- SECT_4K | SPI_NOR_DUAL_READ |
+ { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "mx25u51245g", INFO(0xc2253a, 0, 64 * 1024, 1024)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
- { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
- { "mx66l51235f", INFO(0xc2201a, 0, 64 * 1024, 1024,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES) },
- { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024,
- SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048,
- SECT_4K | SPI_NOR_DUAL_READ |
+ { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512) },
+ { "mx66l51235f", INFO(0xc2201a, 0, 64 * 1024, 1024)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
- { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048,
- SPI_NOR_QUAD_READ) },
- { "mx66u2g45g", INFO(0xc2253c, 0, 64 * 1024, 4096,
- SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048)
+ NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
+ { "mx66u2g45g", INFO(0xc2253c, 0, 64 * 1024, 4096)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
};
static void macronix_default_init(struct spi_nor *nor)
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
index f3d19b716b7b..bb95b1aabf74 100644
--- a/drivers/mtd/spi-nor/micron-st.c
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -13,6 +13,7 @@
#define SPINOR_OP_MT_WR_ANY_REG 0x81 /* Write volatile register */
#define SPINOR_REG_MT_CFR0V 0x00 /* For setting octal DTR mode */
#define SPINOR_REG_MT_CFR1V 0x01 /* For setting dummy cycles */
+#define SPINOR_REG_MT_CFR1V_DEF 0x1f /* Default dummy cycles */
#define SPINOR_MT_OCT_DTR 0xe7 /* Enable Octal DTR. */
#define SPINOR_MT_EXSPI 0xff /* Enable Extended SPI (default) */
@@ -48,17 +49,28 @@ static int spi_nor_micron_octal_dtr_enable(struct spi_nor *nor, bool enable)
if (ret)
return ret;
- if (enable)
- *buf = SPINOR_MT_OCT_DTR;
- else
- *buf = SPINOR_MT_EXSPI;
+ if (enable) {
+ buf[0] = SPINOR_MT_OCT_DTR;
+ } else {
+ /*
+ * The register is 1-byte wide, but 1-byte transactions are not
+ * allowed in 8D-8D-8D mode. The next register is the dummy
+ * cycle configuration register. Since the transaction needs to
+ * be at least 2 bytes wide, set the next register to its
+ * default value. This also makes sense because the value was
+ * changed when enabling 8D-8D-8D mode, it should be reset when
+ * disabling.
+ */
+ buf[0] = SPINOR_MT_EXSPI;
+ buf[1] = SPINOR_REG_MT_CFR1V_DEF;
+ }
op = (struct spi_mem_op)
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_MT_WR_ANY_REG, 1),
SPI_MEM_OP_ADDR(enable ? 3 : 4,
SPINOR_REG_MT_CFR0V, 1),
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, buf, 1));
+ SPI_MEM_OP_DATA_OUT(enable ? 1 : 2, buf, 1));
if (!enable)
spi_nor_spimem_setup_op(nor, &op, SNOR_PROTO_8_8_8_DTR);
@@ -113,116 +125,128 @@ static void mt35xu512aba_post_sfdp_fixup(struct spi_nor *nor)
nor->params->quad_enable = NULL;
}
-static struct spi_nor_fixups mt35xu512aba_fixups = {
+static const struct spi_nor_fixups mt35xu512aba_fixups = {
.default_init = mt35xu512aba_default_init,
.post_sfdp = mt35xu512aba_post_sfdp_fixup,
};
static const struct flash_info micron_parts[] = {
- { "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
- SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
- SPI_NOR_4B_OPCODES | SPI_NOR_OCTAL_DTR_READ |
- SPI_NOR_OCTAL_DTR_PP |
- SPI_NOR_IO_MODE_EN_VOLATILE)
- .fixups = &mt35xu512aba_fixups},
- { "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048,
- SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
- SPI_NOR_4B_OPCODES) },
+ { "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512)
+ FLAGS(USE_FSR)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_OCTAL_READ |
+ SPI_NOR_OCTAL_DTR_READ | SPI_NOR_OCTAL_DTR_PP)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES | SPI_NOR_IO_MODE_EN_VOLATILE)
+ .fixups = &mt35xu512aba_fixups},
+ { "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048)
+ FLAGS(USE_FSR)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_OCTAL_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
};
static const struct flash_info st_parts[] = {
- { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64,
+ { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
+ { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
+ { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6 | USE_FSR)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6 | USE_FSR)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
+ { "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512)
+ FLAGS(USE_FSR)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512)
+ FLAGS(USE_FSR)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
- { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64,
+ { "mt25qu256a", INFO6(0x20bb19, 0x104400, 64 * 1024, 512)
+ FLAGS(USE_FSR)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512)
+ FLAGS(USE_FSR)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
+ { "mt25ql512a", INFO6(0x20ba20, 0x104400, 64 * 1024, 1024)
+ FLAGS(USE_FSR)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6 | USE_FSR)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
+ { "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024)
+ FLAGS(USE_FSR)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6 | USE_FSR)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+ SPI_NOR_BP3_SR_BIT6 | NO_CHIP_ERASE | USE_FSR)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048)
+ FLAGS(NO_CHIP_ERASE | USE_FSR)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
+ { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096)
+ FLAGS(NO_CHIP_ERASE | USE_FSR)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
+ { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096)
+ FLAGS(NO_CHIP_ERASE | USE_FSR)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
- { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
- SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
- { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
- SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
- { "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K |
- USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "mt25qu256a", INFO6(0x20bb19, 0x104400, 64 * 1024, 512,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
- { "mt25ql512a", INFO6(0x20ba20, 0x104400, 64 * 1024, 1024,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
- SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
- { "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
- SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
- { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
- SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6 |
- NO_CHIP_ERASE) },
- { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- NO_CHIP_ERASE) },
- { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- NO_CHIP_ERASE) },
- { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
-
- { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
- { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
- { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
- { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
- { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
- { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
- { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
- { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
- { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
-
- { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
- { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
- { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
- { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
- { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
- { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
- { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
- { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
- { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
-
- { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
- { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
- { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
-
- { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
- { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
- { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
-
- { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
- { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
- { "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
+
+ { "m25p05", INFO(0x202010, 0, 32 * 1024, 2) },
+ { "m25p10", INFO(0x202011, 0, 32 * 1024, 4) },
+ { "m25p20", INFO(0x202012, 0, 64 * 1024, 4) },
+ { "m25p40", INFO(0x202013, 0, 64 * 1024, 8) },
+ { "m25p80", INFO(0x202014, 0, 64 * 1024, 16) },
+ { "m25p16", INFO(0x202015, 0, 64 * 1024, 32) },
+ { "m25p32", INFO(0x202016, 0, 64 * 1024, 64) },
+ { "m25p64", INFO(0x202017, 0, 64 * 1024, 128) },
+ { "m25p128", INFO(0x202018, 0, 256 * 1024, 64) },
+
+ { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2) },
+ { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4) },
+ { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4) },
+ { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8) },
+ { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16) },
+ { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32) },
+ { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64) },
+ { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128) },
+ { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64) },
+
+ { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2) },
+ { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16) },
+ { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32) },
+
+ { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4) },
+ { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16) },
+ { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K) },
+
+ { "m25px16", INFO(0x207115, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "m25px32", INFO(0x207116, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "m25px64", INFO(0x207117, 0, 64 * 1024, 128) },
+ { "m25px80", INFO(0x207114, 0, 64 * 1024, 16) },
};
/**
diff --git a/drivers/mtd/spi-nor/otp.c b/drivers/mtd/spi-nor/otp.c
index 983e40b19134..fa63d8571218 100644
--- a/drivers/mtd/spi-nor/otp.c
+++ b/drivers/mtd/spi-nor/otp.c
@@ -480,7 +480,7 @@ out:
return ret;
}
-void spi_nor_otp_init(struct spi_nor *nor)
+void spi_nor_set_mtd_otp_ops(struct spi_nor *nor)
{
struct mtd_info *mtd = &nor->mtd;
diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
index c500c2118a5d..a5211543d30d 100644
--- a/drivers/mtd/spi-nor/sfdp.c
+++ b/drivers/mtd/spi-nor/sfdp.c
@@ -1229,6 +1229,25 @@ out:
}
/**
+ * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
+ * after SFDP has been parsed. Called only for flashes that define JESD216 SFDP
+ * tables.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Used to tweak various flash parameters when information provided by the SFDP
+ * tables are wrong.
+ */
+static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
+{
+ if (nor->manufacturer && nor->manufacturer->fixups &&
+ nor->manufacturer->fixups->post_sfdp)
+ nor->manufacturer->fixups->post_sfdp(nor);
+
+ if (nor->info->fixups && nor->info->fixups->post_sfdp)
+ nor->info->fixups->post_sfdp(nor);
+}
+
+/**
* spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
* @nor: pointer to a 'struct spi_nor'
*
@@ -1408,6 +1427,7 @@ int spi_nor_parse_sfdp(struct spi_nor *nor)
}
}
+ spi_nor_post_sfdp_fixups(nor);
exit:
kfree(param_headers);
return err;
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index ee82dcd75310..534196b1d3e7 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -65,10 +65,18 @@ static int spi_nor_cypress_octal_dtr_enable(struct spi_nor *nor, bool enable)
if (ret)
return ret;
- if (enable)
- *buf = SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_EN;
- else
- *buf = SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_DS;
+ if (enable) {
+ buf[0] = SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_EN;
+ } else {
+ /*
+ * The register is 1-byte wide, but 1-byte transactions are not
+ * allowed in 8D-8D-8D mode. Since there is no register at the
+ * next location, just initialize the value to 0 and let the
+ * transaction go on.
+ */
+ buf[0] = SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_DS;
+ buf[1] = 0;
+ }
op = (struct spi_mem_op)
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WR_ANY_REG, 1),
@@ -76,7 +84,7 @@ static int spi_nor_cypress_octal_dtr_enable(struct spi_nor *nor, bool enable)
SPINOR_REG_CYPRESS_CFR5V,
1),
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, buf, 1));
+ SPI_MEM_OP_DATA_OUT(enable ? 1 : 2, buf, 1));
if (!enable)
spi_nor_spimem_setup_op(nor, &op, SNOR_PROTO_8_8_8_DTR);
@@ -168,7 +176,7 @@ static int s28hs512t_post_bfpt_fixup(struct spi_nor *nor,
return 0;
}
-static struct spi_nor_fixups s28hs512t_fixups = {
+static const struct spi_nor_fixups s28hs512t_fixups = {
.default_init = s28hs512t_default_init,
.post_sfdp = s28hs512t_post_sfdp_fixup,
.post_bfpt = s28hs512t_post_bfpt_fixup,
@@ -190,7 +198,7 @@ s25fs_s_post_bfpt_fixups(struct spi_nor *nor,
return 0;
}
-static struct spi_nor_fixups s25fs_s_fixups = {
+static const struct spi_nor_fixups s25fs_s_fixups = {
.post_bfpt = s25fs_s_post_bfpt_fixups,
};
@@ -198,85 +206,95 @@ static const struct flash_info spansion_parts[] = {
/* Spansion/Cypress -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
*/
- { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- USE_CLSR) },
- { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- USE_CLSR) },
- { "s25fl256s0", INFO6(0x010219, 0x4d0080, 256 * 1024, 128,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- USE_CLSR) },
- { "s25fl256s1", INFO6(0x010219, 0x4d0180, 64 * 1024, 512,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- USE_CLSR) },
- { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | USE_CLSR) },
- { "s25fs128s1", INFO6(0x012018, 0x4d0181, 64 * 1024, 256,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR)
- .fixups = &s25fs_s_fixups, },
- { "s25fs256s0", INFO6(0x010219, 0x4d0081, 256 * 1024, 128,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- USE_CLSR) },
- { "s25fs256s1", INFO6(0x010219, 0x4d0181, 64 * 1024, 512,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- USE_CLSR) },
- { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR)
- .fixups = &s25fs_s_fixups, },
- { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
- { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
- { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- USE_CLSR) },
- { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- USE_CLSR) },
- { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
- { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
- { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
- { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
- { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
- { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
- { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
- { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES) },
- { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES) },
- { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES) },
- { "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1,
- SPI_NOR_NO_ERASE) },
- { "s28hs512t", INFO(0x345b1a, 0, 256 * 1024, 256,
- SECT_4K | SPI_NOR_OCTAL_DTR_READ |
+ { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64)
+ FLAGS(USE_CLSR)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256)
+ FLAGS(USE_CLSR)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl256s0", INFO6(0x010219, 0x4d0080, 256 * 1024, 128)
+ FLAGS(USE_CLSR)
+ NO_SFDP_FLAGS(SPI_NOR_SKIP_SFDP | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "s25fl256s1", INFO6(0x010219, 0x4d0180, 64 * 1024, 512)
+ FLAGS(USE_CLSR)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | USE_CLSR)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fs128s1", INFO6(0x012018, 0x4d0181, 64 * 1024, 256)
+ FLAGS(USE_CLSR)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ .fixups = &s25fs_s_fixups, },
+ { "s25fs256s0", INFO6(0x010219, 0x4d0081, 256 * 1024, 128)
+ FLAGS(USE_CLSR)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fs256s1", INFO6(0x010219, 0x4d0181, 64 * 1024, 512)
+ FLAGS(USE_CLSR)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256)
+ FLAGS(USE_CLSR)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ .fixups = &s25fs_s_fixups, },
+ { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64) },
+ { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256) },
+ { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64)
+ FLAGS(USE_CLSR)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256)
+ FLAGS(USE_CLSR)
+ NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8) },
+ { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16) },
+ { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32) },
+ { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64) },
+ { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128) },
+ { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1)
+ FLAGS(SPI_NOR_NO_ERASE) },
+ { "s28hs512t", INFO(0x345b1a, 0, 256 * 1024, 256)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_OCTAL_DTR_READ |
SPI_NOR_OCTAL_DTR_PP)
- .fixups = &s28hs512t_fixups,
+ .fixups = &s28hs512t_fixups,
},
};
-static void spansion_post_sfdp_fixups(struct spi_nor *nor)
+static void spansion_late_init(struct spi_nor *nor)
{
if (nor->params->size <= SZ_16M)
return;
@@ -288,7 +306,7 @@ static void spansion_post_sfdp_fixups(struct spi_nor *nor)
}
static const struct spi_nor_fixups spansion_fixups = {
- .post_sfdp = spansion_post_sfdp_fixups,
+ .late_init = spansion_late_init,
};
const struct spi_nor_manufacturer spi_nor_spansion = {
diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
index 980f4c09c91d..30183e9189b9 100644
--- a/drivers/mtd/spi-nor/sst.c
+++ b/drivers/mtd/spi-nor/sst.c
@@ -8,6 +8,9 @@
#include "core.h"
+/* SST flash_info mfr_flag. Used to specify SST byte programming. */
+#define SST_WRITE BIT(0)
+
#define SST26VF_CR_BPNV BIT(3)
static int sst26vf_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
@@ -46,48 +49,71 @@ static const struct spi_nor_locking_ops sst26vf_locking_ops = {
.is_locked = sst26vf_is_locked,
};
-static void sst26vf_default_init(struct spi_nor *nor)
+static void sst26vf_late_init(struct spi_nor *nor)
{
nor->params->locking_ops = &sst26vf_locking_ops;
}
static const struct spi_nor_fixups sst26vf_fixups = {
- .default_init = sst26vf_default_init,
+ .late_init = sst26vf_late_init,
};
static const struct flash_info sst_parts[] = {
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
- { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8,
- SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16,
- SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32,
- SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64,
- SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_4BIT_BP | SPI_NOR_HAS_LOCK |
- SPI_NOR_SWP_IS_VOLATILE) },
- { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1,
- SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2,
- SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4,
- SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K | SPI_NOR_HAS_LOCK) },
- { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_HAS_LOCK) },
- { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8,
- SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16,
- SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
- { "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ |
+ { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP |
+ SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4)
+ FLAGS(SPI_NOR_HAS_LOCK)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8)
+ FLAGS(SPI_NOR_HAS_LOCK)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K)
+ MFR_FLAGS(SST_WRITE) },
+ { "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
- { "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ { "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
.fixups = &sst26vf_fixups },
};
@@ -177,14 +203,14 @@ out:
return ret;
}
-static void sst_post_sfdp_fixups(struct spi_nor *nor)
+static void sst_late_init(struct spi_nor *nor)
{
- if (nor->info->flags & SST_WRITE)
+ if (nor->info->mfr_flags & SST_WRITE)
nor->mtd._write = sst_write;
}
static const struct spi_nor_fixups sst_fixups = {
- .post_sfdp = sst_post_sfdp_fixups,
+ .late_init = sst_late_init,
};
const struct spi_nor_manufacturer spi_nor_sst = {
diff --git a/drivers/mtd/spi-nor/swp.c b/drivers/mtd/spi-nor/swp.c
index 8594bcbb7dbe..1f178313ba8f 100644
--- a/drivers/mtd/spi-nor/swp.c
+++ b/drivers/mtd/spi-nor/swp.c
@@ -414,7 +414,7 @@ void spi_nor_try_unlock_all(struct spi_nor *nor)
dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n");
}
-void spi_nor_register_locking_ops(struct spi_nor *nor)
+void spi_nor_set_mtd_locking_ops(struct spi_nor *nor)
{
struct mtd_info *mtd = &nor->mtd;
diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
index 96573f61caf5..675f32c136b3 100644
--- a/drivers/mtd/spi-nor/winbond.c
+++ b/drivers/mtd/spi-nor/winbond.c
@@ -28,80 +28,105 @@ w25q256_post_bfpt_fixups(struct spi_nor *nor,
return 0;
}
-static struct spi_nor_fixups w25q256_fixups = {
+static const struct spi_nor_fixups w25q256_fixups = {
.post_bfpt = w25q256_post_bfpt_fixups,
};
static const struct flash_info winbond_parts[] = {
/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
- { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
- { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
- { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
- { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
- { "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
- { "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK |
- SPI_NOR_HAS_TB) },
- { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
- { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- OTP_INFO(256, 3, 0x1000, 0x1000)
- },
-
- { "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- { "w25q32jwm", INFO(0xef8016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- OTP_INFO(256, 3, 0x1000, 0x1000) },
- { "w25q64jwm", INFO(0xef8017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "w25q128jwm", INFO(0xef8018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "w25q256jwm", INFO(0xef8019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
- { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "w25q64jvm", INFO(0xef7017, 0, 64 * 1024, 128, SECT_4K) },
- { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
- { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
- { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- .fixups = &w25q256_fixups },
- { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
- SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
- { "w25q512jvq", INFO(0xef4020, 0, 64 * 1024, 1024,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ OTP_INFO(256, 3, 0x1000, 0x1000) },
+ { "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q32jwm", INFO(0xef8016, 0, 64 * 1024, 64)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ OTP_INFO(256, 3, 0x1000, 0x1000) },
+ { "w25q64jwm", INFO(0xef8017, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q128jwm", INFO(0xef8018, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q256jwm", INFO(0xef8019, 0, 64 * 1024, 512)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q64jvm", INFO(0xef7017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ .fixups = &w25q256_fixups },
+ { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512)
+ PARSE_SFDP },
+ { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ |
+ SPI_NOR_DUAL_READ) },
+ { "w25q512jvq", INFO(0xef4020, 0, 64 * 1024, 1024)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
};
/**
@@ -147,12 +172,17 @@ static const struct spi_nor_otp_ops winbond_otp_ops = {
static void winbond_default_init(struct spi_nor *nor)
{
nor->params->set_4byte_addr_mode = winbond_set_4byte_addr_mode;
+}
+
+static void winbond_late_init(struct spi_nor *nor)
+{
if (nor->params->otp.org->n_regions)
nor->params->otp.ops = &winbond_otp_ops;
}
static const struct spi_nor_fixups winbond_fixups = {
.default_init = winbond_default_init,
+ .late_init = winbond_late_init,
};
const struct spi_nor_manufacturer spi_nor_winbond = {
diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c
index 1138bdbf4199..580562bc1e45 100644
--- a/drivers/mtd/spi-nor/xilinx.c
+++ b/drivers/mtd/spi-nor/xilinx.c
@@ -28,11 +28,12 @@ static const struct flash_info xilinx_parts[] = {
*/
static u32 s3an_convert_addr(struct spi_nor *nor, u32 addr)
{
+ u32 page_size = nor->params->page_size;
u32 offset, page;
- offset = addr % nor->page_size;
- page = addr / nor->page_size;
- page <<= (nor->page_size > 512) ? 10 : 9;
+ offset = addr % page_size;
+ page = addr / page_size;
+ page <<= (page_size > 512) ? 10 : 9;
return page | offset;
}
@@ -40,6 +41,7 @@ static u32 s3an_convert_addr(struct spi_nor *nor, u32 addr)
static int xilinx_nor_setup(struct spi_nor *nor,
const struct spi_nor_hwcaps *hwcaps)
{
+ u32 page_size;
int ret;
ret = spi_nor_xread_sr(nor, nor->bouncebuf);
@@ -64,10 +66,11 @@ static int xilinx_nor_setup(struct spi_nor *nor,
*/
if (nor->bouncebuf[0] & XSR_PAGESIZE) {
/* Flash in Power of 2 mode */
- nor->page_size = (nor->page_size == 264) ? 256 : 512;
- nor->mtd.writebufsize = nor->page_size;
- nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors;
- nor->mtd.erasesize = 8 * nor->page_size;
+ page_size = (nor->params->page_size == 264) ? 256 : 512;
+ nor->params->page_size = page_size;
+ nor->mtd.writebufsize = page_size;
+ nor->params->size = 8 * page_size * nor->info->n_sectors;
+ nor->mtd.erasesize = 8 * page_size;
} else {
/* Flash in Default addressing mode */
nor->params->convert_addr = s3an_convert_addr;
@@ -77,13 +80,13 @@ static int xilinx_nor_setup(struct spi_nor *nor,
return 0;
}
-static void xilinx_post_sfdp_fixups(struct spi_nor *nor)
+static void xilinx_late_init(struct spi_nor *nor)
{
nor->params->setup = xilinx_nor_setup;
}
static const struct spi_nor_fixups xilinx_fixups = {
- .post_sfdp = xilinx_post_sfdp_fixups,
+ .late_init = xilinx_late_init,
};
const struct spi_nor_manufacturer spi_nor_xilinx = {
diff --git a/drivers/mtd/spi-nor/xmc.c b/drivers/mtd/spi-nor/xmc.c
index 2c7773b68993..2992af03cb0a 100644
--- a/drivers/mtd/spi-nor/xmc.c
+++ b/drivers/mtd/spi-nor/xmc.c
@@ -10,10 +10,12 @@
static const struct flash_info xmc_parts[] = {
/* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
- { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256)
+ NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
};
const struct spi_nor_manufacturer spi_nor_xmc = {
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 062e6c2c45f5..a78fdf3b30f7 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -294,6 +294,8 @@ static void ubiblock_do_work(struct work_struct *work)
int ret;
struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
struct request *req = blk_mq_rq_from_pdu(pdu);
+ struct req_iterator iter;
+ struct bio_vec bvec;
blk_mq_start_request(req);
@@ -305,7 +307,9 @@ static void ubiblock_do_work(struct work_struct *work)
blk_rq_map_sg(req->q, req, pdu->usgl.sg);
ret = ubiblock_read(pdu);
- rq_flush_dcache_pages(req);
+
+ rq_for_each_segment(bvec, req, iter)
+ flush_dcache_page(bvec.bv_page);
blk_mq_end_request(req, errno_to_blk_status(ret));
}
@@ -426,6 +430,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
ret = -ENODEV;
goto out_cleanup_disk;
}
+ gd->flags |= GENHD_FL_NO_PART;
gd->private_data = dev;
sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
set_capacity(gd, disk_capacity);
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 3c0c8eca4d51..31d427ee191a 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -562,7 +562,7 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
}
/**
- * dbg_debug_exit_dev - free all debugfs files corresponding to device @ubi
+ * ubi_debugfs_exit_dev - free all debugfs files corresponding to device @ubi
* @ubi: UBI device description object
*/
void ubi_debugfs_exit_dev(struct ubi_device *ubi)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 6cccc3dc00bc..b2a4f998c180 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -81,7 +81,6 @@ config WIREGUARD
select CRYPTO
select CRYPTO_LIB_CURVE25519
select CRYPTO_LIB_CHACHA20POLY1305
- select CRYPTO_LIB_BLAKE2S
select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT
select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index b732ee9a50ef..f1a36d7e2151 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -11,6 +11,7 @@
#include <linux/net.h>
#include <linux/igmp.h>
#include <linux/workqueue.h>
+#include <net/sch_generic.h>
#include <net/net_namespace.h>
#include <net/ip.h>
#include <net/udp.h>
@@ -1106,7 +1107,7 @@ static bool amt_send_membership_query(struct amt_dev *amt,
rt = ip_route_output_key(amt->net, &fl4);
if (IS_ERR(rt)) {
netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4);
- return -1;
+ return true;
}
amtmq = skb_push(skb, sizeof(*amtmq));
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 5566daefbff4..d558535390f9 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -23,6 +23,7 @@
* of the GNU General Public License, incorporated herein by reference.
*/
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index edffc3489a12..ba587e5fc24f 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -38,6 +38,13 @@ struct bareudp_net {
struct list_head bareudp_list;
};
+struct bareudp_conf {
+ __be16 ethertype;
+ __be16 port;
+ u16 sport_min;
+ bool multi_proto_mode;
+};
+
/* Pseudo network device */
struct bareudp_dev {
struct net *net; /* netns for packet i/o */
@@ -602,7 +609,8 @@ static struct bareudp_dev *bareudp_find_dev(struct bareudp_net *bn,
}
static int bareudp_configure(struct net *net, struct net_device *dev,
- struct bareudp_conf *conf)
+ struct bareudp_conf *conf,
+ struct netlink_ext_ack *extack)
{
struct bareudp_net *bn = net_generic(net, bareudp_net_id);
struct bareudp_dev *t, *bareudp = netdev_priv(dev);
@@ -611,13 +619,17 @@ static int bareudp_configure(struct net *net, struct net_device *dev,
bareudp->net = net;
bareudp->dev = dev;
t = bareudp_find_dev(bn, conf);
- if (t)
+ if (t) {
+ NL_SET_ERR_MSG(extack, "Another bareudp device using the same port already exists");
return -EBUSY;
+ }
if (conf->multi_proto_mode &&
(conf->ethertype != htons(ETH_P_MPLS_UC) &&
- conf->ethertype != htons(ETH_P_IP)))
+ conf->ethertype != htons(ETH_P_IP))) {
+ NL_SET_ERR_MSG(extack, "Cannot set multiproto mode for this ethertype (only IPv4 and unicast MPLS are supported)");
return -EINVAL;
+ }
bareudp->port = conf->port;
bareudp->ethertype = conf->ethertype;
@@ -664,7 +676,7 @@ static int bareudp_newlink(struct net *net, struct net_device *dev,
if (err)
return err;
- err = bareudp_configure(net, dev, &conf);
+ err = bareudp_configure(net, dev, &conf, extack);
if (err)
return err;
@@ -721,40 +733,6 @@ static struct rtnl_link_ops bareudp_link_ops __read_mostly = {
.fill_info = bareudp_fill_info,
};
-struct net_device *bareudp_dev_create(struct net *net, const char *name,
- u8 name_assign_type,
- struct bareudp_conf *conf)
-{
- struct nlattr *tb[IFLA_MAX + 1];
- struct net_device *dev;
- int err;
-
- memset(tb, 0, sizeof(tb));
- dev = rtnl_create_link(net, name, name_assign_type,
- &bareudp_link_ops, tb, NULL);
- if (IS_ERR(dev))
- return dev;
-
- err = bareudp_configure(net, dev, conf);
- if (err) {
- free_netdev(dev);
- return ERR_PTR(err);
- }
- err = dev_set_mtu(dev, IP_MAX_MTU - BAREUDP_BASE_HLEN);
- if (err)
- goto err;
-
- err = rtnl_configure_link(dev, NULL);
- if (err < 0)
- goto err;
-
- return dev;
-err:
- bareudp_dellink(dev, NULL);
- return ERR_PTR(err);
-}
-EXPORT_SYMBOL_GPL(bareudp_dev_create);
-
static __net_init int bareudp_init_net(struct net *net)
{
struct bareudp_net *bn = net_generic(net, bareudp_net_id);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index ff8da720a33a..238b56d77c36 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -35,6 +35,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fcntl.h>
+#include <linux/filter.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
@@ -71,6 +72,7 @@
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/if_bonding.h>
+#include <linux/phy.h>
#include <linux/jiffies.h>
#include <linux/preempt.h>
#include <net/route.h>
@@ -1096,9 +1098,6 @@ static bool bond_should_notify_peers(struct bonding *bond)
slave = rcu_dereference(bond->curr_active_slave);
rcu_read_unlock();
- netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
- slave ? slave->dev->name : "NULL");
-
if (!slave || !bond->send_peer_notif ||
bond->send_peer_notif %
max(1, bond->params.peer_notif_delay) != 0 ||
@@ -1106,6 +1105,9 @@ static bool bond_should_notify_peers(struct bonding *bond)
test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
return false;
+ netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
+ slave ? slave->dev->name : "NULL");
+
return true;
}
@@ -1460,7 +1462,7 @@ done:
bond_dev->hw_enc_features |= xfrm_features;
#endif /* CONFIG_XFRM_OFFLOAD */
bond_dev->mpls_features = mpls_features;
- bond_dev->gso_max_segs = gso_max_segs;
+ netif_set_gso_max_segs(bond_dev, gso_max_segs);
netif_set_gso_max_size(bond_dev, gso_max_size);
bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
@@ -3129,8 +3131,8 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* when the source ip is 0, so don't take the link down
* if we don't know our ip yet
*/
- if (!bond_time_in_interval(bond, trans_start, 2) ||
- !bond_time_in_interval(bond, slave->last_rx, 2)) {
+ if (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) ||
+ !bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
slave_state_changed = 1;
@@ -3224,7 +3226,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
/* Backup slave is down if:
* - No current_arp_slave AND
- * - more than 3*delta since last receive AND
+ * - more than (missed_max+1)*delta since last receive AND
* - the bond has an IP address
*
* Note: a non-null current_arp_slave indicates
@@ -3236,20 +3238,20 @@ static int bond_ab_arp_inspect(struct bonding *bond)
*/
if (!bond_is_active_slave(slave) &&
!rcu_access_pointer(bond->current_arp_slave) &&
- !bond_time_in_interval(bond, last_rx, 3)) {
+ !bond_time_in_interval(bond, last_rx, bond->params.missed_max + 1)) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++;
}
/* Active slave is down if:
- * - more than 2*delta since transmitting OR
- * - (more than 2*delta since receive AND
+ * - more than missed_max*delta since transmitting OR
+ * - (more than missed_max*delta since receive AND
* the bond has an IP address)
*/
trans_start = dev_trans_start(slave->dev);
if (bond_is_active_slave(slave) &&
- (!bond_time_in_interval(bond, trans_start, 2) ||
- !bond_time_in_interval(bond, last_rx, 2))) {
+ (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) ||
+ !bond_time_in_interval(bond, last_rx, bond->params.missed_max))) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++;
}
@@ -3872,8 +3874,8 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
skb->l4_hash)
return skb->hash;
- return __bond_xmit_hash(bond, skb, skb->head, skb->protocol,
- skb->mac_header, skb->network_header,
+ return __bond_xmit_hash(bond, skb, skb->data, skb->protocol,
+ skb_mac_offset(skb), skb_network_offset(skb),
skb_headlen(skb));
}
@@ -4091,7 +4093,11 @@ static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cm
{
struct bonding *bond = netdev_priv(bond_dev);
struct mii_ioctl_data *mii = NULL;
- int res;
+ const struct net_device_ops *ops;
+ struct net_device *real_dev;
+ struct hwtstamp_config cfg;
+ struct ifreq ifrr;
+ int res = 0;
netdev_dbg(bond_dev, "bond_eth_ioctl: cmd=%d\n", cmd);
@@ -4117,7 +4123,40 @@ static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cm
mii->val_out = BMSR_LSTATUS;
}
- return 0;
+ break;
+ case SIOCSHWTSTAMP:
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ if (!(cfg.flags & HWTSTAMP_FLAG_BONDED_PHC_INDEX))
+ return -EOPNOTSUPP;
+
+ fallthrough;
+ case SIOCGHWTSTAMP:
+ real_dev = bond_option_active_slave_get_rcu(bond);
+ if (!real_dev)
+ return -EOPNOTSUPP;
+
+ strscpy_pad(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
+ ifrr.ifr_ifru = ifr->ifr_ifru;
+
+ ops = real_dev->netdev_ops;
+ if (netif_device_present(real_dev) && ops->ndo_eth_ioctl) {
+ res = ops->ndo_eth_ioctl(real_dev, &ifrr, cmd);
+ if (res)
+ return res;
+
+ ifr->ifr_ifru = ifrr.ifr_ifru;
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* Set the BOND_PHC_INDEX flag to notify user space */
+ cfg.flags |= HWTSTAMP_FLAG_BONDED_PHC_INDEX;
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ?
+ -EFAULT : 0;
+ }
+ fallthrough;
default:
res = -EOPNOTSUPP;
}
@@ -4843,25 +4882,39 @@ static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave = NULL;
struct list_head *iter;
+ bool xmit_suc = false;
+ bool skb_used = false;
bond_for_each_slave_rcu(bond, slave, iter) {
- if (bond_is_last_slave(bond, slave))
- break;
- if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
- struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
+ struct sk_buff *skb2;
+
+ if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP))
+ continue;
+ if (bond_is_last_slave(bond, slave)) {
+ skb2 = skb;
+ skb_used = true;
+ } else {
+ skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2) {
net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
bond_dev->name, __func__);
continue;
}
- bond_dev_queue_xmit(bond, skb2, slave->dev);
}
+
+ if (bond_dev_queue_xmit(bond, skb2, slave->dev) == NETDEV_TX_OK)
+ xmit_suc = true;
}
- if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
- return bond_dev_queue_xmit(bond, skb, slave->dev);
- return bond_tx_drop(bond_dev, skb);
+ if (!skb_used)
+ dev_kfree_skb_any(skb);
+
+ if (xmit_suc)
+ return NETDEV_TX_OK;
+
+ atomic_long_inc(&bond_dev->tx_dropped);
+ return NET_XMIT_DROP;
}
/*------------------------- Device initialization ---------------------------*/
@@ -5319,10 +5372,38 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
BOND_ABI_VERSION);
}
+static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
+ struct ethtool_ts_info *info)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ const struct ethtool_ops *ops;
+ struct net_device *real_dev;
+ struct phy_device *phydev;
+
+ real_dev = bond_option_active_slave_get_rcu(bond);
+ if (real_dev) {
+ ops = real_dev->ethtool_ops;
+ phydev = real_dev->phydev;
+
+ if (phy_has_tsinfo(phydev)) {
+ return phy_ts_info(phydev, info);
+ } else if (ops->get_ts_info) {
+ return ops->get_ts_info(real_dev, info);
+ }
+ }
+
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+
+ return 0;
+}
+
static const struct ethtool_ops bond_ethtool_ops = {
.get_drvinfo = bond_ethtool_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = bond_ethtool_get_link_ksettings,
+ .get_ts_info = bond_ethtool_get_ts_info,
};
static const struct net_device_ops bond_netdev_ops = {
@@ -5822,6 +5903,7 @@ static int bond_check_params(struct bond_params *params)
params->arp_interval = arp_interval;
params->arp_validate = arp_validate_value;
params->arp_all_targets = arp_all_targets_value;
+ params->missed_max = 2;
params->updelay = updelay;
params->downdelay = downdelay;
params->peer_notif_delay = 0;
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 5d54e11d18fa..1007bf6d385d 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -110,6 +110,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
.len = ETH_ALEN },
[IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 },
[IFLA_BOND_PEER_NOTIF_DELAY] = { .type = NLA_U32 },
+ [IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
};
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
@@ -453,6 +454,15 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
return err;
}
+ if (data[IFLA_BOND_MISSED_MAX]) {
+ int missed_max = nla_get_u8(data[IFLA_BOND_MISSED_MAX]);
+
+ bond_opt_initval(&newval, missed_max);
+ err = __bond_opt_set(bond, BOND_OPT_MISSED_MAX, &newval);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -515,6 +525,7 @@ static size_t bond_get_size(const struct net_device *bond_dev)
nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_ACTOR_SYSTEM */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_TLB_DYNAMIC_LB */
nla_total_size(sizeof(u32)) + /* IFLA_BOND_PEER_NOTIF_DELAY */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_MISSED_MAX */
0;
}
@@ -650,6 +661,10 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.tlb_dynamic_lb))
goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_BOND_MISSED_MAX,
+ bond->params.missed_max))
+ goto nla_put_failure;
+
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info info;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index b93337b5a721..2e8484a91a0e 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -78,6 +78,8 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_ad_user_port_key_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_missed_max_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static const struct bond_opt_value bond_mode_tbl[] = {
@@ -213,6 +215,13 @@ static const struct bond_opt_value bond_ad_user_port_key_tbl[] = {
{ NULL, -1, 0},
};
+static const struct bond_opt_value bond_missed_max_tbl[] = {
+ { "minval", 1, BOND_VALFLAG_MIN},
+ { "maxval", 255, BOND_VALFLAG_MAX},
+ { "default", 2, BOND_VALFLAG_DEFAULT},
+ { NULL, -1, 0},
+};
+
static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_MODE] = {
.id = BOND_OPT_MODE,
@@ -270,6 +279,15 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.values = bond_intmax_tbl,
.set = bond_option_arp_interval_set
},
+ [BOND_OPT_MISSED_MAX] = {
+ .id = BOND_OPT_MISSED_MAX,
+ .name = "arp_missed_max",
+ .desc = "Maximum number of missed ARP interval",
+ .unsuppmodes = BIT(BOND_MODE_8023AD) | BIT(BOND_MODE_TLB) |
+ BIT(BOND_MODE_ALB),
+ .values = bond_missed_max_tbl,
+ .set = bond_option_missed_max_set
+ },
[BOND_OPT_ARP_TARGETS] = {
.id = BOND_OPT_ARP_TARGETS,
.name = "arp_ip_target",
@@ -1186,6 +1204,16 @@ static int bond_option_arp_all_targets_set(struct bonding *bond,
return 0;
}
+static int bond_option_missed_max_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ netdev_dbg(bond->dev, "Setting missed max to %s (%llu)\n",
+ newval->string, newval->value);
+ bond->params.missed_max = newval->value;
+
+ return 0;
+}
+
static int bond_option_primary_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index f3e3bfd72556..46b150e6289e 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -11,7 +11,7 @@
static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
- struct bonding *bond = PDE_DATA(file_inode(seq->file));
+ struct bonding *bond = pde_data(file_inode(seq->file));
struct list_head *iter;
struct slave *slave;
loff_t off = 0;
@@ -30,7 +30,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct bonding *bond = PDE_DATA(file_inode(seq->file));
+ struct bonding *bond = pde_data(file_inode(seq->file));
struct list_head *iter;
struct slave *slave;
bool found = false;
@@ -57,7 +57,7 @@ static void bond_info_seq_stop(struct seq_file *seq, void *v)
static void bond_info_show_master(struct seq_file *seq)
{
- struct bonding *bond = PDE_DATA(file_inode(seq->file));
+ struct bonding *bond = pde_data(file_inode(seq->file));
const struct bond_opt_value *optval;
struct slave *curr, *primary;
int i;
@@ -115,6 +115,8 @@ static void bond_info_show_master(struct seq_file *seq)
seq_printf(seq, "ARP Polling Interval (ms): %d\n",
bond->params.arp_interval);
+ seq_printf(seq, "ARP Missed Max: %u\n",
+ bond->params.missed_max);
seq_printf(seq, "ARP IP target/s (n.n.n.n form):");
@@ -173,7 +175,7 @@ static void bond_info_show_master(struct seq_file *seq)
static void bond_info_show_slave(struct seq_file *seq,
const struct slave *slave)
{
- struct bonding *bond = PDE_DATA(file_inode(seq->file));
+ struct bonding *bond = pde_data(file_inode(seq->file));
seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
seq_printf(seq, "MII Status: %s\n", bond_slave_link_status(slave->link));
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index c48b77167fab..9b5a5df23d21 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -303,6 +303,18 @@ static ssize_t bonding_show_arp_targets(struct device *d,
static DEVICE_ATTR(arp_ip_target, 0644,
bonding_show_arp_targets, bonding_sysfs_store_option);
+/* Show the arp missed max. */
+static ssize_t bonding_show_missed_max(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bonding *bond = to_bond(d);
+
+ return sprintf(buf, "%u\n", bond->params.missed_max);
+}
+static DEVICE_ATTR(arp_missed_max, 0644,
+ bonding_show_missed_max, bonding_sysfs_store_option);
+
/* Show the up and down delays. */
static ssize_t bonding_show_downdelay(struct device *d,
struct device_attribute *attr,
@@ -779,6 +791,7 @@ static struct attribute *per_bond_attrs[] = {
&dev_attr_ad_actor_sys_prio.attr,
&dev_attr_ad_actor_system.attr,
&dev_attr_ad_user_port_key.attr,
+ &dev_attr_arp_missed_max.attr,
NULL,
};
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 91230894692d..444ef6a342f6 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -754,7 +754,7 @@ static void cfv_remove(struct virtio_device *vdev)
debugfs_remove_recursive(cfv->debugfs);
vringh_kiov_cleanup(&cfv->ctx.riov);
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
vdev->vringh_config->del_vrhs(cfv->vdev);
cfv->vr_rx = NULL;
vdev->config->del_vqs(cfv->vdev);
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index a2b4463d8480..1e660afcb61b 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -16,7 +16,7 @@ obj-y += softing/
obj-$(CONFIG_CAN_AT91) += at91_can.o
obj-$(CONFIG_CAN_CC770) += cc770/
obj-$(CONFIG_CAN_C_CAN) += c_can/
-obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
+obj-$(CONFIG_CAN_FLEXCAN) += flexcan/
obj-$(CONFIG_CAN_GRCAN) += grcan.o
obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 3aea32c9b108..a00655ccda02 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -448,7 +448,6 @@ static void at91_chip_stop(struct net_device *dev, enum can_state state)
static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct at91_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
struct can_frame *cf = (struct can_frame *)skb->data;
unsigned int mb, prio;
u32 reg_mid, reg_mcr;
@@ -480,8 +479,6 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* This triggers transmission */
at91_write(priv, AT91_MCR(mb), reg_mcr);
- stats->tx_bytes += cf->len;
-
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv), 0);
@@ -553,8 +550,6 @@ static void at91_rx_overflow_err(struct net_device *dev)
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_receive_skb(skb);
}
@@ -619,7 +614,9 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
at91_read_mb(dev, mb, cf);
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += cf->len;
+
netif_receive_skb(skb);
can_led_event(dev, CAN_LED_EVENT_RX);
@@ -779,8 +776,6 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
at91_poll_err_frame(dev, cf, reg_sr);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += cf->len;
netif_receive_skb(skb);
return 1;
@@ -854,7 +849,10 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
if (likely(reg_msr & AT91_MSR_MRDY &&
~reg_msr & AT91_MSR_MABT)) {
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
- can_get_echo_skb(dev, mb - get_mb_tx_first(priv), NULL);
+ dev->stats.tx_bytes +=
+ can_get_echo_skb(dev,
+ mb - get_mb_tx_first(priv),
+ NULL);
dev->stats.tx_packets++;
can_led_event(dev, CAN_LED_EVENT_TX);
}
@@ -1037,8 +1035,6 @@ static void at91_irq_err(struct net_device *dev)
at91_irq_err_state(dev, cf, new_state);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += cf->len;
netif_rx(skb);
priv->can.state = new_state;
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 08b6efa7a1a7..bd2f6dc01194 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -211,7 +211,6 @@ struct c_can_priv {
struct c_can_raminit raminit_sys; /* RAMINIT via syscon regmap */
void (*raminit)(const struct c_can_priv *priv, bool enable);
u32 comm_rcv_high;
- u32 dlc[];
};
struct net_device *alloc_c_can_dev(int msg_obj_num);
diff --git a/drivers/net/can/c_can/c_can_ethtool.c b/drivers/net/can/c_can/c_can_ethtool.c
index 377c7d2e7612..6655146294fc 100644
--- a/drivers/net/can/c_can/c_can_ethtool.c
+++ b/drivers/net/can/c_can/c_can_ethtool.c
@@ -20,7 +20,9 @@ static void c_can_get_drvinfo(struct net_device *netdev,
}
static void c_can_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct c_can_priv *priv = netdev_priv(netdev);
diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c
index 52671d1ea17d..faa217f26771 100644
--- a/drivers/net/can/c_can/c_can_main.c
+++ b/drivers/net/can/c_can/c_can_main.c
@@ -403,10 +403,10 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
frame->data[i + 1] = data >> 8;
}
}
- }
+ stats->rx_bytes += frame->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += frame->len;
netif_receive_skb(skb);
return 0;
@@ -477,7 +477,6 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
* transmit as we might race against do_tx().
*/
c_can_setup_tx_object(dev, IF_TX, frame, idx);
- priv->dlc[idx] = frame->len;
can_put_echo_skb(skb, dev, idx, 0);
obj = idx + priv->msg_obj_tx_first;
c_can_object_put(dev, IF_TX, obj, cmd);
@@ -742,8 +741,7 @@ static void c_can_do_tx(struct net_device *dev)
* NAPI. We are not transmitting.
*/
c_can_inval_tx_object(dev, IF_NAPI, obj);
- can_get_echo_skb(dev, idx, NULL);
- bytes += priv->dlc[idx];
+ bytes += can_get_echo_skb(dev, idx, NULL);
pkts++;
}
@@ -920,7 +918,6 @@ static int c_can_handle_state_change(struct net_device *dev,
unsigned int reg_err_counter;
unsigned int rx_err_passive;
struct c_can_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
struct can_frame *cf;
struct sk_buff *skb;
struct can_berr_counter bec;
@@ -996,8 +993,6 @@ static int c_can_handle_state_change(struct net_device *dev,
break;
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_receive_skb(skb);
return 1;
@@ -1064,8 +1059,6 @@ static int c_can_handle_bus_err(struct net_device *dev,
break;
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_receive_skb(skb);
return 1;
}
@@ -1232,8 +1225,7 @@ struct net_device *alloc_c_can_dev(int msg_obj_num)
struct c_can_priv *priv;
int msg_obj_tx_num = msg_obj_num / 2;
- dev = alloc_candev(struct_size(priv, dlc, msg_obj_tx_num),
- msg_obj_tx_num);
+ dev = alloc_candev(sizeof(*priv), msg_obj_tx_num);
if (!dev)
return NULL;
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index f8a130f594e2..bb7224cfc6ab 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -489,17 +489,17 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
cf->len = can_cc_dlc2len((config & 0xf0) >> 4);
for (i = 0; i < cf->len; i++)
cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]);
- }
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+
netif_rx(skb);
}
static int cc770_err(struct net_device *dev, u8 status)
{
struct cc770_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
struct can_frame *cf;
struct sk_buff *skb;
u8 lec;
@@ -571,8 +571,6 @@ static int cc770_err(struct net_device *dev, u8 status)
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
return 0;
@@ -666,7 +664,6 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
struct cc770_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
unsigned int mo = obj2msgobj(o);
- struct can_frame *cf;
u8 ctrl1;
ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
@@ -698,12 +695,9 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
return;
}
- cf = (struct can_frame *)priv->tx_skb->data;
- stats->tx_bytes += cf->len;
- stats->tx_packets++;
-
can_put_echo_skb(priv->tx_skb, dev, 0, 0);
- can_get_echo_skb(dev, 0, NULL);
+ stats->tx_bytes += can_get_echo_skb(dev, 0, NULL);
+ stats->tx_packets++;
priv->tx_skb = NULL;
netif_wake_queue(dev);
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
index 0509625c3082..d5fca3bfaf9a 100644
--- a/drivers/net/can/dev/bittiming.c
+++ b/drivers/net/can/dev/bittiming.c
@@ -4,6 +4,7 @@
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
*/
+#include <linux/units.h>
#include <linux/can/dev.h>
#ifdef CONFIG_CAN_CALC_BITTIMING
@@ -81,9 +82,9 @@ int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
if (bt->sample_point) {
sample_point_nominal = bt->sample_point;
} else {
- if (bt->bitrate > 800 * CAN_KBPS)
+ if (bt->bitrate > 800 * KILO /* BPS */)
sample_point_nominal = 750;
- else if (bt->bitrate > 500 * CAN_KBPS)
+ else if (bt->bitrate > 500 * KILO /* BPS */)
sample_point_nominal = 800;
else
sample_point_nominal = 875;
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index e3d840b81357..c192f25f9695 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -136,7 +136,6 @@ EXPORT_SYMBOL_GPL(can_change_state);
static void can_restart(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
struct can_frame *cf;
int err;
@@ -155,9 +154,6 @@ static void can_restart(struct net_device *dev)
cf->can_id |= CAN_ERR_RESTARTED;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
-
netif_rx_ni(skb);
restart:
@@ -300,6 +296,7 @@ EXPORT_SYMBOL_GPL(free_candev);
int can_change_mtu(struct net_device *dev, int new_mtu)
{
struct can_priv *priv = netdev_priv(dev);
+ u32 ctrlmode_static = can_get_static_ctrlmode(priv);
/* Do not allow changing the MTU while running */
if (dev->flags & IFF_UP)
@@ -309,7 +306,7 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
switch (new_mtu) {
case CAN_MTU:
/* 'CANFD-only' controllers can not switch to CAN_MTU */
- if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
+ if (ctrlmode_static & CAN_CTRLMODE_FD)
return -EINVAL;
priv->ctrlmode &= ~CAN_CTRLMODE_FD;
@@ -318,7 +315,7 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
case CANFD_MTU:
/* check for potential CANFD ability */
if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
- !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
+ !(ctrlmode_static & CAN_CTRLMODE_FD))
return -EINVAL;
priv->ctrlmode |= CAN_CTRLMODE_FD;
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 95cca4e5251f..7633d98e3912 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -21,6 +21,7 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
[IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
[IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
[IFLA_CAN_TDC] = { .type = NLA_NESTED },
+ [IFLA_CAN_CTRLMODE_EXT] = { .type = NLA_NESTED },
};
static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = {
@@ -211,7 +212,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
if (dev->flags & IFF_UP)
return -EBUSY;
cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- ctrlstatic = priv->ctrlmode_static;
+ ctrlstatic = can_get_static_ctrlmode(priv);
maskedflags = cm->flags & cm->mask;
/* check whether provided bits are allowed to be passed */
@@ -383,6 +384,12 @@ static size_t can_tdc_get_size(const struct net_device *dev)
return size;
}
+static size_t can_ctrlmode_ext_get_size(void)
+{
+ return nla_total_size(0) + /* nest IFLA_CAN_CTRLMODE_EXT */
+ nla_total_size(sizeof(u32)); /* IFLA_CAN_CTRLMODE_SUPPORTED */
+}
+
static size_t can_get_size(const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
@@ -415,6 +422,7 @@ static size_t can_get_size(const struct net_device *dev)
priv->data_bitrate_const_cnt);
size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
size += can_tdc_get_size(dev); /* IFLA_CAN_TDC */
+ size += can_ctrlmode_ext_get_size(); /* IFLA_CAN_CTRLMODE_EXT */
return size;
}
@@ -472,6 +480,25 @@ err_cancel:
return -EMSGSIZE;
}
+static int can_ctrlmode_ext_fill_info(struct sk_buff *skb,
+ const struct can_priv *priv)
+{
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, IFLA_CAN_CTRLMODE_EXT);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, IFLA_CAN_CTRLMODE_SUPPORTED,
+ priv->ctrlmode_supported)) {
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+ }
+
+ nla_nest_end(skb, nest);
+ return 0;
+}
+
static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
@@ -531,7 +558,9 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
sizeof(priv->bitrate_max),
&priv->bitrate_max)) ||
- (can_tdc_fill_info(skb, dev))
+ can_tdc_fill_info(skb, dev) ||
+
+ can_ctrlmode_ext_fill_info(skb, priv)
)
return -EMSGSIZE;
diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c
index 37b0cc65237b..7f80d8e1e750 100644
--- a/drivers/net/can/dev/rx-offload.c
+++ b/drivers/net/can/dev/rx-offload.c
@@ -54,8 +54,11 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
struct can_frame *cf = (struct can_frame *)skb->data;
work_done++;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (!(cf->can_id & CAN_ERR_FLAG)) {
+ stats->rx_packets++;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += cf->len;
+ }
netif_receive_skb(skb);
}
diff --git a/drivers/net/can/flexcan/Makefile b/drivers/net/can/flexcan/Makefile
new file mode 100644
index 000000000000..89d5695c902e
--- /dev/null
+++ b/drivers/net/can/flexcan/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
+
+flexcan-objs :=
+flexcan-objs += flexcan-core.o
+flexcan-objs += flexcan-ethtool.o
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan/flexcan-core.c
index 12b60ad95b02..74d7fcbfd065 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -15,7 +15,6 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
#include <linux/can/led.h>
-#include <linux/can/rx-offload.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/firmware/imx/sci.h>
@@ -33,6 +32,8 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include "flexcan.h"
+
#define DRV_NAME "flexcan"
/* 8 for RX fifo and 2 error handling */
@@ -173,9 +174,9 @@
/* FLEXCAN interrupt flag register (IFLAG) bits */
/* Errata ERR005829 step7: Reserve first valid MB */
-#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8
-#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0
-#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1)
+#define FLEXCAN_TX_MB_RESERVED_RX_FIFO 8
+#define FLEXCAN_TX_MB_RESERVED_RX_MAILBOX 0
+#define FLEXCAN_RX_MB_RX_MAILBOX_FIRST (FLEXCAN_TX_MB_RESERVED_RX_MAILBOX + 1)
#define FLEXCAN_IFLAG_MB(x) BIT_ULL(x)
#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6)
@@ -206,53 +207,6 @@
#define FLEXCAN_TIMEOUT_US (250)
-/* FLEXCAN hardware feature flags
- *
- * Below is some version info we got:
- * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR rece- FD Mode MB
- * Filter? connected? Passive detection ption in MB Supported?
- * MCF5441X FlexCAN2 ? no yes no no yes no 16
- * MX25 FlexCAN2 03.00.00.00 no no no no no no 64
- * MX28 FlexCAN2 03.00.04.00 yes yes no no no no 64
- * MX35 FlexCAN2 03.00.00.00 no no no no no no 64
- * MX53 FlexCAN2 03.00.00.00 yes no no no no no 64
- * MX6s FlexCAN3 10.00.12.00 yes yes no no yes no 64
- * MX8QM FlexCAN3 03.00.23.00 yes yes no no yes yes 64
- * MX8MP FlexCAN3 03.00.17.01 yes yes no yes yes yes 64
- * VF610 FlexCAN3 ? no yes no yes yes? no 64
- * LS1021A FlexCAN2 03.00.04.00 no yes no no yes no 64
- * LX2160A FlexCAN3 03.00.23.00 no yes no yes yes yes 64
- *
- * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
- */
-
-/* [TR]WRN_INT not connected */
-#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1)
- /* Disable RX FIFO Global mask */
-#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2)
-/* Enable EACEN and RRS bit in ctrl2 */
-#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3)
-/* Disable non-correctable errors interrupt and freeze mode */
-#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4)
-/* Use timestamp based offloading */
-#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5)
-/* No interrupt for error passive */
-#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6)
-/* default to BE register access */
-#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7)
-/* Setup stop mode with GPR to support wakeup */
-#define FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR BIT(8)
-/* Support CAN-FD mode */
-#define FLEXCAN_QUIRK_SUPPORT_FD BIT(9)
-/* support memory detection and correction */
-#define FLEXCAN_QUIRK_SUPPORT_ECC BIT(10)
-/* Setup stop mode with SCU firmware to support wakeup */
-#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW BIT(11)
-/* Setup 3 separate interrupts, main, boff and err */
-#define FLEXCAN_QUIRK_NR_IRQ_3 BIT(12)
-/* Setup 16 mailboxes */
-#define FLEXCAN_QUIRK_NR_MB_16 BIT(13)
-
/* Structure of the message buffer */
struct flexcan_mb {
u32 can_ctrl;
@@ -339,106 +293,81 @@ struct flexcan_regs {
static_assert(sizeof(struct flexcan_regs) == 0x4 * 18 + 0xfb8);
-struct flexcan_devtype_data {
- u32 quirks; /* quirks needed for different IP cores */
-};
-
-struct flexcan_stop_mode {
- struct regmap *gpr;
- u8 req_gpr;
- u8 req_bit;
-};
-
-struct flexcan_priv {
- struct can_priv can;
- struct can_rx_offload offload;
- struct device *dev;
-
- struct flexcan_regs __iomem *regs;
- struct flexcan_mb __iomem *tx_mb;
- struct flexcan_mb __iomem *tx_mb_reserved;
- u8 tx_mb_idx;
- u8 mb_count;
- u8 mb_size;
- u8 clk_src; /* clock source of CAN Protocol Engine */
- u8 scu_idx;
-
- u64 rx_mask;
- u64 tx_mask;
- u32 reg_ctrl_default;
-
- struct clk *clk_ipg;
- struct clk *clk_per;
- const struct flexcan_devtype_data *devtype_data;
- struct regulator *reg_xceiver;
- struct flexcan_stop_mode stm;
-
- int irq_boff;
- int irq_err;
-
- /* IPC handle when setup stop mode by System Controller firmware(scfw) */
- struct imx_sc_ipc *sc_ipc_handle;
-
- /* Read and Write APIs */
- u32 (*read)(void __iomem *addr);
- void (*write)(u32 val, void __iomem *addr);
-};
-
static const struct flexcan_devtype_data fsl_mcf5441x_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_NR_IRQ_3 | FLEXCAN_QUIRK_NR_MB_16,
+ FLEXCAN_QUIRK_NR_IRQ_3 | FLEXCAN_QUIRK_NR_MB_16 |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN,
+ FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_imx25_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
- FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_imx28_devtype_data = {
- .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+ .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
- FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR,
+ FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_imx8qm_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
- FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW,
+ FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
};
static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
- FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR |
- FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC,
+ FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
- FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
- FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_ECC,
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_ECC |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
- FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_SUPPORT_FD |
- FLEXCAN_QUIRK_SUPPORT_ECC,
+ FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_FD |
+ FLEXCAN_QUIRK_SUPPORT_ECC |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
};
static const struct can_bittiming_const flexcan_bittiming_const = {
@@ -600,7 +529,7 @@ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
priv->write(reg_mcr, &regs->mcr);
/* enable stop request */
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
ret = flexcan_stop_mode_enable_scfw(priv, true);
if (ret < 0)
return ret;
@@ -619,7 +548,7 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
int ret;
/* remove stop request */
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
ret = flexcan_stop_mode_enable_scfw(priv, false);
if (ret < 0)
return ret;
@@ -1022,7 +951,7 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
mb = flexcan_get_mb(priv, n);
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
u32 code;
do {
@@ -1087,7 +1016,7 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
}
mark_as_read:
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX)
flexcan_write64(priv, FLEXCAN_IFLAG_MB(n), &regs->iflag1);
else
priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
@@ -1113,7 +1042,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
enum can_state last_state = priv->can.state;
/* reception interrupt */
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
u64 reg_iflag_rx;
int ret;
@@ -1173,7 +1102,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
/* state change interrupt or broken error state quirk fix is enabled */
if ((reg_esr & FLEXCAN_ESR_ERR_STATE) ||
- (priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE |
+ (priv->devtype_data.quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE |
FLEXCAN_QUIRK_BROKEN_PERR_STATE)))
flexcan_irq_state(dev, reg_esr);
@@ -1195,11 +1124,11 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
* (1): enabled if FLEXCAN_QUIRK_BROKEN_WERR_STATE is enabled
*/
if ((last_state != priv->can.state) &&
- (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) &&
+ (priv->devtype_data.quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) &&
!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) {
switch (priv->can.state) {
case CAN_STATE_ERROR_ACTIVE:
- if (priv->devtype_data->quirks &
+ if (priv->devtype_data.quirks &
FLEXCAN_QUIRK_BROKEN_WERR_STATE)
flexcan_error_irq_enable(priv);
else
@@ -1423,26 +1352,26 @@ static int flexcan_rx_offload_setup(struct net_device *dev)
else
priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN;
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_NR_MB_16)
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_MB_16)
priv->mb_count = 16;
else
priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) +
(sizeof(priv->regs->mb[1]) / priv->mb_size);
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX)
priv->tx_mb_reserved =
- flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP);
+ flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_RX_MAILBOX);
else
priv->tx_mb_reserved =
- flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_FIFO);
+ flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_RX_FIFO);
priv->tx_mb_idx = priv->mb_count - 1;
priv->tx_mb = flexcan_get_mb(priv, priv->tx_mb_idx);
priv->tx_mask = FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
priv->offload.mailbox_read = flexcan_mailbox_read;
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
- priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST;
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
+ priv->offload.mb_first = FLEXCAN_RX_MB_RX_MAILBOX_FIRST;
priv->offload.mb_last = priv->mb_count - 2;
priv->rx_mask = GENMASK_ULL(priv->offload.mb_last,
@@ -1506,7 +1435,7 @@ static int flexcan_chip_start(struct net_device *dev)
if (err)
goto out_chip_disable;
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_ECC)
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SUPPORT_ECC)
flexcan_ram_init(dev);
flexcan_set_bittiming(dev);
@@ -1532,10 +1461,10 @@ static int flexcan_chip_start(struct net_device *dev)
/* MCR
*
* FIFO:
- * - disable for timestamp mode
+ * - disable for mailbox mode
* - enable for FIFO mode
*/
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX)
reg_mcr &= ~FLEXCAN_MCR_FEN;
else
reg_mcr |= FLEXCAN_MCR_FEN;
@@ -1586,7 +1515,7 @@ static int flexcan_chip_start(struct net_device *dev)
* on most Flexcan cores, too. Otherwise we don't get
* any error warning or passive interrupts.
*/
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE ||
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE ||
priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
else
@@ -1599,7 +1528,7 @@ static int flexcan_chip_start(struct net_device *dev)
netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
priv->write(reg_ctrl, &regs->ctrl);
- if ((priv->devtype_data->quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) {
+ if ((priv->devtype_data.quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) {
reg_ctrl2 = priv->read(&regs->ctrl2);
reg_ctrl2 |= FLEXCAN_CTRL2_EACEN | FLEXCAN_CTRL2_RRS;
priv->write(reg_ctrl2, &regs->ctrl2);
@@ -1631,7 +1560,7 @@ static int flexcan_chip_start(struct net_device *dev)
priv->write(reg_fdctrl, &regs->fdctrl);
}
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) {
mb = flexcan_get_mb(priv, i);
priv->write(FLEXCAN_MB_CODE_RX_EMPTY,
@@ -1639,7 +1568,7 @@ static int flexcan_chip_start(struct net_device *dev)
}
} else {
/* clear and invalidate unused mailboxes first */
- for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < priv->mb_count; i++) {
+ for (i = FLEXCAN_TX_MB_RESERVED_RX_FIFO; i < priv->mb_count; i++) {
mb = flexcan_get_mb(priv, i);
priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
&mb->can_ctrl);
@@ -1659,7 +1588,7 @@ static int flexcan_chip_start(struct net_device *dev)
priv->write(0x0, &regs->rx14mask);
priv->write(0x0, &regs->rx15mask);
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_RXFG)
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_DISABLE_RXFG)
priv->write(0x0, &regs->rxfgmask);
/* clear acceptance filters */
@@ -1673,7 +1602,7 @@ static int flexcan_chip_start(struct net_device *dev)
* This also works around errata e5295 which generates false
* positive memory errors and put the device in freeze mode.
*/
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_MECR) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_DISABLE_MECR) {
/* Follow the protocol as described in "Detection
* and Correction of Memory Errors" to write to
* MECR register (step 1 - 5)
@@ -1799,7 +1728,7 @@ static int flexcan_open(struct net_device *dev)
if (err)
goto out_can_rx_offload_disable;
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
err = request_irq(priv->irq_boff,
flexcan_irq, IRQF_SHARED, dev->name, dev);
if (err)
@@ -1845,7 +1774,7 @@ static int flexcan_close(struct net_device *dev)
netif_stop_queue(dev);
flexcan_chip_interrupts_disable(dev);
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
free_irq(priv->irq_err, dev);
free_irq(priv->irq_boff, dev);
}
@@ -2051,9 +1980,9 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
priv = netdev_priv(dev);
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW)
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW)
ret = flexcan_setup_stop_mode_scfw(pdev);
- else if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR)
+ else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR)
ret = flexcan_setup_stop_mode_gpr(pdev);
else
/* return 0 directly if doesn't support stop mode feature */
@@ -2164,8 +2093,25 @@ static int flexcan_probe(struct platform_device *pdev)
return -ENODEV;
if ((devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) &&
- !(devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)) {
- dev_err(&pdev->dev, "CAN-FD mode doesn't work with FIFO mode!\n");
+ !((devtype_data->quirks &
+ (FLEXCAN_QUIRK_USE_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR |
+ FLEXCAN_QUIRK_SUPPPORT_RX_FIFO)) ==
+ (FLEXCAN_QUIRK_USE_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR))) {
+ dev_err(&pdev->dev, "CAN-FD mode doesn't work in RX-FIFO mode!\n");
+ return -EINVAL;
+ }
+
+ if ((devtype_data->quirks &
+ (FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR)) ==
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR) {
+ dev_err(&pdev->dev,
+ "Quirks (0x%08x) inconsistent: RX_MAILBOX_RX supported but not RX_MAILBOX\n",
+ devtype_data->quirks);
return -EINVAL;
}
@@ -2177,13 +2123,15 @@ static int flexcan_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
dev->netdev_ops = &flexcan_netdev_ops;
+ flexcan_set_ethtool_ops(dev);
dev->irq = irq;
dev->flags |= IFF_ECHO;
priv = netdev_priv(dev);
+ priv->devtype_data = *devtype_data;
if (of_property_read_bool(pdev->dev.of_node, "big-endian") ||
- devtype_data->quirks & FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN) {
+ priv->devtype_data.quirks & FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN) {
priv->read = flexcan_read_be;
priv->write = flexcan_write_be;
} else {
@@ -2202,10 +2150,9 @@ static int flexcan_probe(struct platform_device *pdev)
priv->clk_ipg = clk_ipg;
priv->clk_per = clk_per;
priv->clk_src = clk_src;
- priv->devtype_data = devtype_data;
priv->reg_xceiver = reg_xceiver;
- if (devtype_data->quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
priv->irq_boff = platform_get_irq(pdev, 1);
if (priv->irq_boff <= 0) {
err = -ENODEV;
@@ -2218,7 +2165,7 @@ static int flexcan_probe(struct platform_device *pdev)
}
}
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SUPPORT_FD) {
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
CAN_CTRLMODE_FD_NON_ISO;
priv->can.bittiming_const = &flexcan_fd_bittiming_const;
diff --git a/drivers/net/can/flexcan/flexcan-ethtool.c b/drivers/net/can/flexcan/flexcan-ethtool.c
new file mode 100644
index 000000000000..3ae535577700
--- /dev/null
+++ b/drivers/net/can/flexcan/flexcan-ethtool.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com>
+ * Copyright (c) 2022 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ */
+
+#include <linux/can/dev.h>
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#include "flexcan.h"
+
+static const char flexcan_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define FLEXCAN_PRIV_FLAGS_RX_RTR BIT(0)
+ "rx-rtr",
+};
+
+static void
+flexcan_get_ringparam(struct net_device *ndev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *ext_ack)
+{
+ const struct flexcan_priv *priv = netdev_priv(ndev);
+
+ ring->rx_max_pending = priv->mb_count;
+ ring->tx_max_pending = priv->mb_count;
+
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX)
+ ring->rx_pending = priv->offload.mb_last -
+ priv->offload.mb_first + 1;
+ else
+ ring->rx_pending = 6; /* RX-FIFO depth is fixed */
+
+ /* the drive currently supports only on TX buffer */
+ ring->tx_pending = 1;
+}
+
+static void
+flexcan_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, flexcan_priv_flags_strings,
+ sizeof(flexcan_priv_flags_strings));
+ }
+}
+
+static u32 flexcan_get_priv_flags(struct net_device *ndev)
+{
+ const struct flexcan_priv *priv = netdev_priv(ndev);
+ u32 priv_flags = 0;
+
+ if (flexcan_active_rx_rtr(priv))
+ priv_flags |= FLEXCAN_PRIV_FLAGS_RX_RTR;
+
+ return priv_flags;
+}
+
+static int flexcan_set_priv_flags(struct net_device *ndev, u32 priv_flags)
+{
+ struct flexcan_priv *priv = netdev_priv(ndev);
+ u32 quirks = priv->devtype_data.quirks;
+
+ if (priv_flags & FLEXCAN_PRIV_FLAGS_RX_RTR) {
+ if (flexcan_supports_rx_mailbox_rtr(priv))
+ quirks |= FLEXCAN_QUIRK_USE_RX_MAILBOX;
+ else if (flexcan_supports_rx_fifo(priv))
+ quirks &= ~FLEXCAN_QUIRK_USE_RX_MAILBOX;
+ else
+ quirks |= FLEXCAN_QUIRK_USE_RX_MAILBOX;
+ } else {
+ if (flexcan_supports_rx_mailbox(priv))
+ quirks |= FLEXCAN_QUIRK_USE_RX_MAILBOX;
+ else
+ quirks &= ~FLEXCAN_QUIRK_USE_RX_MAILBOX;
+ }
+
+ if (quirks != priv->devtype_data.quirks && netif_running(ndev))
+ return -EBUSY;
+
+ priv->devtype_data.quirks = quirks;
+
+ if (!(priv_flags & FLEXCAN_PRIV_FLAGS_RX_RTR) &&
+ !flexcan_active_rx_rtr(priv))
+ netdev_info(ndev,
+ "Activating RX mailbox mode, cannot receive RTR frames.\n");
+
+ return 0;
+}
+
+static int flexcan_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(flexcan_priv_flags_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct ethtool_ops flexcan_ethtool_ops = {
+ .get_ringparam = flexcan_get_ringparam,
+ .get_strings = flexcan_get_strings,
+ .get_priv_flags = flexcan_get_priv_flags,
+ .set_priv_flags = flexcan_set_priv_flags,
+ .get_sset_count = flexcan_get_sset_count,
+};
+
+void flexcan_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &flexcan_ethtool_ops;
+}
diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h
new file mode 100644
index 000000000000..23fc09a7e10f
--- /dev/null
+++ b/drivers/net/can/flexcan/flexcan.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * flexcan.c - FLEXCAN CAN controller driver
+ *
+ * Copyright (c) 2005-2006 Varma Electronics Oy
+ * Copyright (c) 2009 Sascha Hauer, Pengutronix
+ * Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2014 David Jander, Protonic Holland
+ * Copyright (C) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com>
+ *
+ * Based on code originally by Andrey Volkov <avolkov@varma-el.com>
+ *
+ */
+
+#ifndef _FLEXCAN_H
+#define _FLEXCAN_H
+
+#include <linux/can/rx-offload.h>
+
+/* FLEXCAN hardware feature flags
+ *
+ * Below is some version info we got:
+ * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR rece- FD Mode MB
+ * Filter? connected? Passive detection ption in MB Supported?
+ * MCF5441X FlexCAN2 ? no yes no no no no 16
+ * MX25 FlexCAN2 03.00.00.00 no no no no no no 64
+ * MX28 FlexCAN2 03.00.04.00 yes yes no no no no 64
+ * MX35 FlexCAN2 03.00.00.00 no no no no no no 64
+ * MX53 FlexCAN2 03.00.00.00 yes no no no no no 64
+ * MX6s FlexCAN3 10.00.12.00 yes yes no no yes no 64
+ * MX8QM FlexCAN3 03.00.23.00 yes yes no no yes yes 64
+ * MX8MP FlexCAN3 03.00.17.01 yes yes no yes yes yes 64
+ * VF610 FlexCAN3 ? no yes no yes yes? no 64
+ * LS1021A FlexCAN2 03.00.04.00 no yes no no yes no 64
+ * LX2160A FlexCAN3 03.00.23.00 no yes no yes yes yes 64
+ *
+ * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
+ */
+
+/* [TR]WRN_INT not connected */
+#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1)
+ /* Disable RX FIFO Global mask */
+#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2)
+/* Enable EACEN and RRS bit in ctrl2 */
+#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3)
+/* Disable non-correctable errors interrupt and freeze mode */
+#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4)
+/* Use mailboxes (not FIFO) for RX path */
+#define FLEXCAN_QUIRK_USE_RX_MAILBOX BIT(5)
+/* No interrupt for error passive */
+#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6)
+/* default to BE register access */
+#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7)
+/* Setup stop mode with GPR to support wakeup */
+#define FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR BIT(8)
+/* Support CAN-FD mode */
+#define FLEXCAN_QUIRK_SUPPORT_FD BIT(9)
+/* support memory detection and correction */
+#define FLEXCAN_QUIRK_SUPPORT_ECC BIT(10)
+/* Setup stop mode with SCU firmware to support wakeup */
+#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW BIT(11)
+/* Setup 3 separate interrupts, main, boff and err */
+#define FLEXCAN_QUIRK_NR_IRQ_3 BIT(12)
+/* Setup 16 mailboxes */
+#define FLEXCAN_QUIRK_NR_MB_16 BIT(13)
+/* Device supports RX via mailboxes */
+#define FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX BIT(14)
+/* Device supports RTR reception via mailboxes */
+#define FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR BIT(15)
+/* Device supports RX via FIFO */
+#define FLEXCAN_QUIRK_SUPPPORT_RX_FIFO BIT(16)
+
+struct flexcan_devtype_data {
+ u32 quirks; /* quirks needed for different IP cores */
+};
+
+struct flexcan_stop_mode {
+ struct regmap *gpr;
+ u8 req_gpr;
+ u8 req_bit;
+};
+
+struct flexcan_priv {
+ struct can_priv can;
+ struct can_rx_offload offload;
+ struct device *dev;
+
+ struct flexcan_regs __iomem *regs;
+ struct flexcan_mb __iomem *tx_mb;
+ struct flexcan_mb __iomem *tx_mb_reserved;
+ u8 tx_mb_idx;
+ u8 mb_count;
+ u8 mb_size;
+ u8 clk_src; /* clock source of CAN Protocol Engine */
+ u8 scu_idx;
+
+ u64 rx_mask;
+ u64 tx_mask;
+ u32 reg_ctrl_default;
+
+ struct clk *clk_ipg;
+ struct clk *clk_per;
+ struct flexcan_devtype_data devtype_data;
+ struct regulator *reg_xceiver;
+ struct flexcan_stop_mode stm;
+
+ int irq_boff;
+ int irq_err;
+
+ /* IPC handle when setup stop mode by System Controller firmware(scfw) */
+ struct imx_sc_ipc *sc_ipc_handle;
+
+ /* Read and Write APIs */
+ u32 (*read)(void __iomem *addr);
+ void (*write)(u32 val, void __iomem *addr);
+};
+
+void flexcan_set_ethtool_ops(struct net_device *dev);
+
+static inline bool
+flexcan_supports_rx_mailbox(const struct flexcan_priv *priv)
+{
+ const u32 quirks = priv->devtype_data.quirks;
+
+ return quirks & FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX;
+}
+
+static inline bool
+flexcan_supports_rx_mailbox_rtr(const struct flexcan_priv *priv)
+{
+ const u32 quirks = priv->devtype_data.quirks;
+
+ return (quirks & (FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR)) ==
+ (FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR);
+}
+
+static inline bool
+flexcan_supports_rx_fifo(const struct flexcan_priv *priv)
+{
+ const u32 quirks = priv->devtype_data.quirks;
+
+ return quirks & FLEXCAN_QUIRK_SUPPPORT_RX_FIFO;
+}
+
+static inline bool
+flexcan_active_rx_rtr(const struct flexcan_priv *priv)
+{
+ const u32 quirks = priv->devtype_data.quirks;
+
+ if (quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
+ if (quirks & FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR)
+ return true;
+ } else {
+ /* RX-FIFO is always RTR capable */
+ return true;
+ }
+
+ return false;
+}
+
+
+#endif /* _FLEXCAN_H */
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 78e27940b2af..d0c5a7a60daf 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -255,7 +255,6 @@ struct grcan_priv {
struct grcan_dma dma;
struct sk_buff **echo_skb; /* We allocate this on our own */
- u8 *txdlc; /* Length of queued frames */
/* The echo skb pointer, pointing into echo_skb and indicating which
* frames can be echoed back. See the "Notes on the tx cyclic buffer
@@ -515,9 +514,7 @@ static int catch_up_echo_skb(struct net_device *dev, int budget, bool echo)
if (echo) {
/* Normal echo of messages */
stats->tx_packets++;
- stats->tx_bytes += priv->txdlc[i];
- priv->txdlc[i] = 0;
- can_get_echo_skb(dev, i, NULL);
+ stats->tx_bytes += can_get_echo_skb(dev, i, NULL);
} else {
/* For cleanup of untransmitted messages */
can_free_echo_skb(dev, i, NULL);
@@ -1062,16 +1059,10 @@ static int grcan_open(struct net_device *dev)
priv->can.echo_skb_max = dma->tx.size;
priv->can.echo_skb = priv->echo_skb;
- priv->txdlc = kcalloc(dma->tx.size, sizeof(*priv->txdlc), GFP_KERNEL);
- if (!priv->txdlc) {
- err = -ENOMEM;
- goto exit_free_echo_skb;
- }
-
/* Get can device up */
err = open_candev(dev);
if (err)
- goto exit_free_txdlc;
+ goto exit_free_echo_skb;
err = request_irq(dev->irq, grcan_interrupt, IRQF_SHARED,
dev->name, dev);
@@ -1093,8 +1084,6 @@ static int grcan_open(struct net_device *dev)
exit_close_candev:
close_candev(dev);
-exit_free_txdlc:
- kfree(priv->txdlc);
exit_free_echo_skb:
kfree(priv->echo_skb);
exit_free_dma_buffers:
@@ -1129,7 +1118,6 @@ static int grcan_close(struct net_device *dev)
priv->can.echo_skb_max = 0;
priv->can.echo_skb = NULL;
kfree(priv->echo_skb);
- kfree(priv->txdlc);
return 0;
}
@@ -1211,11 +1199,11 @@ static int grcan_receive(struct net_device *dev, int budget)
shift = GRCAN_MSG_DATA_SHIFT(i);
cf->data[i] = (u8)(slot[j] >> shift);
}
- }
- /* Update statistics and read pointer */
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+
netif_receive_skb(skb);
rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size);
@@ -1447,7 +1435,6 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb,
* can_put_echo_skb would be an error unless other measures are
* taken.
*/
- priv->txdlc[slotindex] = cf->len; /* Store dlc for statistics */
can_put_echo_skb(skb, dev, slotindex, 0);
/* Make sure everything is written before allowing hardware to
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 5bb957a26bc6..b0a3473f211d 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -309,15 +309,15 @@ static void ifi_canfd_read_fifo(struct net_device *ndev)
*(u32 *)(cf->data + i) =
readl(priv->base + IFI_CANFD_RXFIFO_DATA + i);
}
+
+ stats->rx_bytes += cf->len;
}
+ stats->rx_packets++;
/* Remove the packet from FIFO */
writel(IFI_CANFD_RXSTCMD_REMOVE_MSG, priv->base + IFI_CANFD_RXSTCMD);
writel(rx_irq_mask, priv->base + IFI_CANFD_INTERRUPT);
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
-
netif_receive_skb(skb);
}
@@ -430,8 +430,6 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev)
priv->base + IFI_CANFD_INTERRUPT);
writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR);
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_receive_skb(skb);
return 1;
@@ -456,7 +454,6 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
enum can_state new_state)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf;
struct sk_buff *skb;
struct can_berr_counter bec;
@@ -522,8 +519,6 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
break;
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_receive_skb(skb);
return 1;
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 32006dbf5abd..808c105cf8f7 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1285,7 +1285,7 @@ static unsigned int ican3_get_echo_skb(struct ican3_dev *mod)
{
struct sk_buff *skb = skb_dequeue(&mod->echoq);
struct can_frame *cf;
- u8 dlc;
+ u8 dlc = 0;
/* this should never trigger unless there is a driver bug */
if (!skb) {
@@ -1294,7 +1294,8 @@ static unsigned int ican3_get_echo_skb(struct ican3_dev *mod)
}
cf = (struct can_frame *)skb->data;
- dlc = cf->len;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ dlc = cf->len;
/* check flag whether this packet has to be looped back */
if (skb->pkt_type != PACKET_LOOPBACK) {
@@ -1421,7 +1422,8 @@ static int ican3_recv_skb(struct ican3_dev *mod)
/* update statistics, receive the skb */
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += cf->len;
netif_receive_skb(skb);
err_noalloc:
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index eb74cdf26b88..017f2d36ffc3 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -1185,20 +1185,21 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
cf->len = can_fd_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT);
- if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR)
+ if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) {
cf->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(cf->data, data, cf->len);
+ stats->rx_bytes += cf->len;
+ }
+ stats->rx_packets++;
+
shhwtstamps = skb_hwtstamps(skb);
shhwtstamps->hwtstamp =
ns_to_ktime(div_u64(p->timestamp * 1000,
pcie->freq_to_ticks_div));
- stats->rx_bytes += cf->len;
- stats->rx_packets++;
-
return netif_rx(skb);
}
@@ -1310,9 +1311,6 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
-
netif_rx(skb);
return 0;
}
@@ -1510,8 +1508,6 @@ static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
if (skb) {
cf->can_id |= CAN_ERR_BUSERROR;
- stats->rx_bytes += cf->len;
- stats->rx_packets++;
netif_rx(skb);
} else {
stats->rx_dropped++;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index c2a8421e7845..1a4b56f6fa8c 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -336,6 +336,9 @@ m_can_fifo_read(struct m_can_classdev *cdev,
u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE +
offset;
+ if (val_count == 0)
+ return 0;
+
return cdev->ops->read_fifo(cdev, addr_offset, val, val_count);
}
@@ -346,6 +349,9 @@ m_can_fifo_write(struct m_can_classdev *cdev,
u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE +
offset;
+ if (val_count == 0)
+ return 0;
+
return cdev->ops->write_fifo(cdev, addr_offset, val, val_count);
}
@@ -518,14 +524,14 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
cf->data, DIV_ROUND_UP(cf->len, 4));
if (err)
goto out_free_skb;
+
+ stats->rx_bytes += cf->len;
}
+ stats->rx_packets++;
/* acknowledge rx fifo 0 */
m_can_write(cdev, M_CAN_RXF0A, fgi);
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
-
timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc);
m_can_receive_skb(cdev, skb, timestamp);
@@ -647,9 +653,6 @@ static int m_can_handle_lec_err(struct net_device *dev,
break;
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
-
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
@@ -706,7 +709,6 @@ static int m_can_handle_state_change(struct net_device *dev,
enum can_state new_state)
{
struct m_can_classdev *cdev = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
struct can_frame *cf;
struct sk_buff *skb;
struct can_berr_counter bec;
@@ -771,9 +773,6 @@ static int m_can_handle_state_change(struct net_device *dev,
break;
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
-
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
@@ -1463,7 +1462,7 @@ static bool m_can_niso_supported(struct m_can_classdev *cdev)
static int m_can_dev_setup(struct m_can_classdev *cdev)
{
struct net_device *dev = cdev->net;
- int m_can_version;
+ int m_can_version, err;
m_can_version = m_can_check_core_release(cdev);
/* return if unsupported version */
@@ -1493,7 +1492,9 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
switch (cdev->version) {
case 30:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */
- can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
+ err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
+ if (err)
+ return err;
cdev->can.bittiming_const = cdev->bit_timing ?
cdev->bit_timing : &m_can_bittiming_const_30X;
@@ -1503,7 +1504,9 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
break;
case 31:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
- can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
+ err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
+ if (err)
+ return err;
cdev->can.bittiming_const = cdev->bit_timing ?
cdev->bit_timing : &m_can_bittiming_const_31X;
diff --git a/drivers/net/can/m_can/tcan4x5x-regmap.c b/drivers/net/can/m_can/tcan4x5x-regmap.c
index ca80dbaf7a3f..26e212b8ca7a 100644
--- a/drivers/net/can/m_can/tcan4x5x-regmap.c
+++ b/drivers/net/can/m_can/tcan4x5x-regmap.c
@@ -12,7 +12,7 @@
#define TCAN4X5X_SPI_INSTRUCTION_WRITE (0x61 << 24)
#define TCAN4X5X_SPI_INSTRUCTION_READ (0x41 << 24)
-#define TCAN4X5X_MAX_REGISTER 0x8ffc
+#define TCAN4X5X_MAX_REGISTER 0x87fc
static int tcan4x5x_regmap_gather_write(void *context,
const void *reg, size_t reg_len,
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index fa32e418eb29..5b5802fac772 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -401,13 +401,15 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota)
continue;
}
- if (canrflg & MSCAN_RXF)
+ if (canrflg & MSCAN_RXF) {
mscan_get_rx_frame(dev, frame);
- else if (canrflg & MSCAN_ERR_IF)
+ stats->rx_packets++;
+ if (!(frame->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += frame->len;
+ } else if (canrflg & MSCAN_ERR_IF) {
mscan_get_err_frame(dev, frame, canrflg);
+ }
- stats->rx_packets++;
- stats->rx_bytes += frame->len;
work_done++;
netif_receive_skb(skb);
}
@@ -446,9 +448,9 @@ static irqreturn_t mscan_isr(int irq, void *dev_id)
continue;
out_8(&regs->cantbsel, mask);
- stats->tx_bytes += in_8(&regs->tx.dlr);
+ stats->tx_bytes += can_get_echo_skb(dev, entry->id,
+ NULL);
stats->tx_packets++;
- can_get_echo_skb(dev, entry->id, NULL);
priv->tx_active &= ~mask;
list_del(pos);
}
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 964c8a09226a..888bef03de09 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -561,9 +561,6 @@ static void pch_can_error(struct net_device *ndev, u32 status)
priv->can.state = state;
netif_receive_skb(skb);
-
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
}
static irqreturn_t pch_can_interrupt(int irq, void *dev_id)
@@ -680,22 +677,23 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
cf->can_id = id;
}
- if (id2 & PCH_ID2_DIR)
- cf->can_id |= CAN_RTR_FLAG;
-
cf->len = can_cc_dlc2len((ioread32(&priv->regs->
ifregs[0].mcont)) & 0xF);
- for (i = 0; i < cf->len; i += 2) {
- data_reg = ioread16(&priv->regs->ifregs[0].data[i / 2]);
- cf->data[i] = data_reg;
- cf->data[i + 1] = data_reg >> 8;
- }
+ if (id2 & PCH_ID2_DIR) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ for (i = 0; i < cf->len; i += 2) {
+ data_reg = ioread16(&priv->regs->ifregs[0].data[i / 2]);
+ cf->data[i] = data_reg;
+ cf->data[i + 1] = data_reg >> 8;
+ }
- rcv_pkts++;
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
+ rcv_pkts++;
quota--;
- stats->rx_bytes += cf->len;
netif_receive_skb(skb);
pch_fifo_thresh(priv, obj_num);
@@ -709,16 +707,13 @@ static void pch_can_tx_complete(struct net_device *ndev, u32 int_stat)
{
struct pch_can_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &(priv->ndev->stats);
- u32 dlc;
- can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1, NULL);
+ stats->tx_bytes += can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1,
+ NULL);
+ stats->tx_packets++;
iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND,
&priv->regs->ifregs[1].cmask);
pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, int_stat);
- dlc = can_cc_dlc2len(ioread32(&priv->regs->ifregs[1].mcont) &
- PCH_IF_MCONT_DLC);
- stats->tx_bytes += dlc;
- stats->tx_packets++;
if (int_stat == PCH_TX_OBJ_END)
netif_wake_queue(ndev);
}
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index d08718e98e11..b2dea360813d 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -266,10 +266,9 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
unsigned long flags;
spin_lock_irqsave(&priv->echo_lock, flags);
- can_get_echo_skb(priv->ndev, msg->client, NULL);
/* count bytes of the echo instead of skb */
- stats->tx_bytes += cf_len;
+ stats->tx_bytes += can_get_echo_skb(priv->ndev, msg->client, NULL);
stats->tx_packets++;
/* restart tx queue (a slot is free) */
@@ -310,12 +309,13 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
if (rx_msg_flags & PUCAN_MSG_EXT_ID)
cf->can_id |= CAN_EFF_FLAG;
- if (rx_msg_flags & PUCAN_MSG_RTR)
+ if (rx_msg_flags & PUCAN_MSG_RTR) {
cf->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(cf->data, msg->d, cf->len);
- stats->rx_bytes += cf->len;
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
@@ -409,8 +409,6 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
return -ENOMEM;
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
return 0;
@@ -438,8 +436,6 @@ static int pucan_handle_cache_critical(struct peak_canfd_priv *priv)
cf->data[6] = priv->bec.txerr;
cf->data[7] = priv->bec.rxerr;
- stats->rx_bytes += cf->len;
- stats->rx_packets++;
netif_rx(skb);
return 0;
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 8999ec9455ec..33e37395379d 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -94,7 +94,6 @@ struct rcar_can_priv {
struct rcar_can_regs __iomem *regs;
struct clk *clk;
struct clk *can_clk;
- u8 tx_dlc[RCAR_CAN_FIFO_DEPTH];
u32 tx_head;
u32 tx_tail;
u8 clock_select;
@@ -223,7 +222,6 @@ static void tx_failure_cleanup(struct net_device *ndev)
static void rcar_can_error(struct net_device *ndev)
{
struct rcar_can_priv *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf;
struct sk_buff *skb;
u8 eifr, txerr = 0, rxerr = 0;
@@ -362,11 +360,8 @@ static void rcar_can_error(struct net_device *ndev)
}
}
- if (skb) {
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (skb)
netif_rx(skb);
- }
}
static void rcar_can_tx_done(struct net_device *ndev)
@@ -383,10 +378,11 @@ static void rcar_can_tx_done(struct net_device *ndev)
if (priv->tx_head - priv->tx_tail <= unsent)
break;
stats->tx_packets++;
- stats->tx_bytes += priv->tx_dlc[priv->tx_tail %
- RCAR_CAN_FIFO_DEPTH];
- priv->tx_dlc[priv->tx_tail % RCAR_CAN_FIFO_DEPTH] = 0;
- can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH, NULL);
+ stats->tx_bytes +=
+ can_get_echo_skb(ndev,
+ priv->tx_tail % RCAR_CAN_FIFO_DEPTH,
+ NULL);
+
priv->tx_tail++;
netif_wake_queue(ndev);
}
@@ -616,7 +612,6 @@ static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
writeb(cf->len, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc);
- priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->len;
can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH, 0);
priv->tx_head++;
/* Start Tx: write 0xff to the TFPCR register to increment
@@ -666,12 +661,13 @@ static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
for (dlc = 0; dlc < cf->len; dlc++)
cf->data[dlc] =
readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].data[dlc]);
+
+ stats->rx_bytes += cf->len;
}
+ stats->rx_packets++;
can_led_event(priv->ndev, CAN_LED_EVENT_RX);
- stats->rx_bytes += cf->len;
- stats->rx_packets++;
netif_receive_skb(skb);
}
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index ff9d0f5ae0dd..b7dc1c32875f 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -502,7 +502,6 @@ struct rcar_canfd_channel {
struct rcar_canfd_global *gpriv; /* Controller reference */
void __iomem *base; /* Register base address */
struct napi_struct napi;
- u8 tx_len[RCANFD_FIFO_DEPTH]; /* For net stats */
u32 tx_head; /* Incremented on xmit */
u32 tx_tail; /* Incremented on xmit done */
u32 channel; /* Channel number */
@@ -1033,8 +1032,6 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl,
/* Clear channel error interrupts that are handled */
rcar_canfd_write(priv->base, RCANFD_CERFL(ch),
RCANFD_CERFL_ERR(~cerfl));
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -1051,9 +1048,7 @@ static void rcar_canfd_tx_done(struct net_device *ndev)
sent = priv->tx_tail % RCANFD_FIFO_DEPTH;
stats->tx_packets++;
- stats->tx_bytes += priv->tx_len[sent];
- priv->tx_len[sent] = 0;
- can_get_echo_skb(ndev, sent, NULL);
+ stats->tx_bytes += can_get_echo_skb(ndev, sent, NULL);
spin_lock_irqsave(&priv->tx_lock, flags);
priv->tx_tail++;
@@ -1174,8 +1169,6 @@ static void rcar_canfd_state_change(struct net_device *ndev,
rx_state = txerr <= rxerr ? state : 0;
can_change_state(ndev, cf, tx_state, rx_state);
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
}
}
@@ -1465,7 +1458,6 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
RCANFD_C_CFDF(ch, RCANFD_CFFIFO_IDX, 0));
}
- priv->tx_len[priv->tx_head % RCANFD_FIFO_DEPTH] = cf->len;
can_put_echo_skb(skb, ndev, priv->tx_head % RCANFD_FIFO_DEPTH, 0);
spin_lock_irqsave(&priv->tx_lock, flags);
@@ -1554,7 +1546,8 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
can_led_event(priv->ndev, CAN_LED_EVENT_RX);
- stats->rx_bytes += cf->len;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += cf->len;
stats->rx_packets++;
netif_receive_skb(skb);
}
@@ -1640,8 +1633,7 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
ndev = alloc_candev(sizeof(*priv), RCANFD_FIFO_DEPTH);
if (!ndev) {
dev_err(&pdev->dev, "alloc_candev() failed\n");
- err = -ENOMEM;
- goto fail;
+ return -ENOMEM;
}
priv = netdev_priv(ndev);
@@ -1706,7 +1698,9 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
&rcar_canfd_data_bittiming_const;
/* Controller starts in CAN FD only mode */
- can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD);
+ err = can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD);
+ if (err)
+ goto fail;
priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
} else {
/* Controller starts in Classical CAN only mode */
@@ -1735,8 +1729,8 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
fail_candev:
netif_napi_del(&priv->napi);
- free_candev(ndev);
fail:
+ free_candev(ndev);
return err;
}
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 3fad54646746..966316479485 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -372,15 +372,16 @@ static void sja1000_rx(struct net_device *dev)
} else {
for (i = 0; i < cf->len; i++)
cf->data[i] = priv->read_reg(priv, dreg++);
+
+ stats->rx_bytes += cf->len;
}
+ stats->rx_packets++;
cf->can_id = id;
/* release receive buffer */
sja1000_write_cmdreg(priv, CMD_RRB);
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
can_led_event(dev, CAN_LED_EVENT_RX);
@@ -487,8 +488,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
can_bus_off(dev);
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
return 0;
@@ -528,10 +527,8 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
can_free_echo_skb(dev, 0, NULL);
} else {
/* transmission complete */
- stats->tx_bytes +=
- priv->read_reg(priv, SJA1000_FI) & 0xf;
+ stats->tx_bytes += can_get_echo_skb(dev, 0, NULL);
stats->tx_packets++;
- can_get_echo_skb(dev, 0, NULL);
}
netif_wake_queue(dev);
can_led_event(dev, CAN_LED_EVENT_TX);
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index d7c2ec529b8f..f9ec7bd8dfac 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -17,7 +17,6 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include "sja1000.h"
@@ -234,13 +233,15 @@ static int sp_probe(struct platform_device *pdev)
if (!addr)
return -ENOMEM;
- if (of)
- irq = irq_of_parse_and_map(of, 0);
- else
+ if (of) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ } else {
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-
- if (!irq && !res_irq)
- return -ENODEV;
+ if (!res_irq)
+ return -ENODEV;
+ }
of_id = of_match_device(sp_of_table, &pdev->dev);
if (of_id && of_id->data) {
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 9a4ebda30510..27783fbf011f 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -218,7 +218,9 @@ static void slc_bump(struct slcan *sl)
skb_put_data(skb, &cf, sizeof(struct can_frame));
sl->dev->stats.rx_packets++;
- sl->dev->stats.rx_bytes += cf.len;
+ if (!(cf.can_id & CAN_RTR_FLAG))
+ sl->dev->stats.rx_bytes += cf.len;
+
netif_rx_ni(skb);
}
@@ -288,6 +290,8 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
if (!(cf->can_id & CAN_RTR_FLAG)) {
for (i = 0; i < cf->len; i++)
pos = hex_byte_pack_upper(pos, cf->data[i]);
+
+ sl->dev->stats.tx_bytes += cf->len;
}
*pos++ = '\r';
@@ -304,7 +308,6 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff);
sl->xleft = (pos - sl->xbuff) - actual;
sl->xhead = sl->xbuff + actual;
- sl->dev->stats.tx_bytes += cf->len;
}
/* Write out any remaining transmit buffer. Scheduled when tty is writable */
@@ -670,8 +673,8 @@ static void slcan_hangup(struct tty_struct *tty)
}
/* Perform I/O control on an active SLCAN channel. */
-static int slcan_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int slcan_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
{
struct slcan *sl = (struct slcan *) tty->disc_data;
unsigned int tmp;
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
index 2e93ee792373..e5c939b63fa6 100644
--- a/drivers/net/can/softing/softing_cs.c
+++ b/drivers/net/can/softing/softing_cs.c
@@ -293,7 +293,7 @@ static int softingcs_probe(struct pcmcia_device *pcmcia)
return 0;
platform_failed:
- kfree(dev);
+ platform_device_put(pdev);
mem_failed:
pcmcia_bad:
pcmcia_failed:
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index 7e1536877993..32286f861a19 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -565,18 +565,19 @@ int softing_startstop(struct net_device *dev, int up)
if (ret < 0)
goto failed;
}
- /* enable_error_frame */
- /*
+
+ /* enable_error_frame
+ *
* Error reporting is switched off at the moment since
* the receiving of them is not yet 100% verified
* This should be enabled sooner or later
- *
- if (error_reporting) {
+ */
+ if (0 && error_reporting) {
ret = softing_fct_cmd(card, 51, "enable_error_frame");
if (ret < 0)
goto failed;
}
- */
+
/* initialize interface */
iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]);
iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]);
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index cfc1325aad10..d74e895bddf7 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -282,7 +282,10 @@ static int softing_handle_1(struct softing *card)
skb = priv->can.echo_skb[priv->tx.echo_get];
if (skb)
skb->tstamp = ktime;
- can_get_echo_skb(netdev, priv->tx.echo_get, NULL);
+ ++netdev->stats.tx_packets;
+ netdev->stats.tx_bytes +=
+ can_get_echo_skb(netdev, priv->tx.echo_get,
+ NULL);
++priv->tx.echo_get;
if (priv->tx.echo_get >= TX_ECHO_SKB_MAX)
priv->tx.echo_get = 0;
@@ -290,9 +293,6 @@ static int softing_handle_1(struct softing *card)
--priv->tx.pending;
if (card->tx.pending)
--card->tx.pending;
- ++netdev->stats.tx_packets;
- if (!(msg.can_id & CAN_RTR_FLAG))
- netdev->stats.tx_bytes += msg.len;
} else {
int ret;
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 89d9c986a229..cfcc14fe3e42 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -25,11 +25,11 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/netdevice.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
@@ -153,7 +153,6 @@ struct hi3110_priv {
u8 *spi_rx_buf;
struct sk_buff *tx_skb;
- int tx_len;
struct workqueue_struct *wq;
struct work_struct tx_work;
@@ -166,6 +165,8 @@ struct hi3110_priv {
#define HI3110_AFTER_SUSPEND_POWER 4
#define HI3110_AFTER_SUSPEND_RESTART 8
int restart_tx;
+ bool tx_busy;
+
struct regulator *power;
struct regulator *transceiver;
struct clk *clk;
@@ -175,13 +176,13 @@ static void hi3110_clean(struct net_device *net)
{
struct hi3110_priv *priv = netdev_priv(net);
- if (priv->tx_skb || priv->tx_len)
+ if (priv->tx_skb || priv->tx_busy)
net->stats.tx_errors++;
dev_kfree_skb(priv->tx_skb);
- if (priv->tx_len)
+ if (priv->tx_busy)
can_free_echo_skb(priv->net, 0, NULL);
priv->tx_skb = NULL;
- priv->tx_len = 0;
+ priv->tx_busy = false;
}
/* Note about handling of error return of hi3110_spi_trans: accessing
@@ -343,14 +344,15 @@ static void hi3110_hw_rx(struct spi_device *spi)
/* Data length */
frame->len = can_cc_dlc2len(buf[HI3110_FIFO_WOTIME_DLC_OFF] & 0x0F);
- if (buf[HI3110_FIFO_WOTIME_ID_OFF + 3] & HI3110_FIFO_WOTIME_ID_RTR)
+ if (buf[HI3110_FIFO_WOTIME_ID_OFF + 3] & HI3110_FIFO_WOTIME_ID_RTR) {
frame->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(frame->data, buf + HI3110_FIFO_WOTIME_DAT_OFF,
frame->len);
+ priv->net->stats.rx_bytes += frame->len;
+ }
priv->net->stats.rx_packets++;
- priv->net->stats.rx_bytes += frame->len;
can_led_event(priv->net, CAN_LED_EVENT_RX);
@@ -368,7 +370,7 @@ static netdev_tx_t hi3110_hard_start_xmit(struct sk_buff *skb,
struct hi3110_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
- if (priv->tx_skb || priv->tx_len) {
+ if (priv->tx_skb || priv->tx_busy) {
dev_err(&spi->dev, "hard_xmit called while tx busy\n");
return NETDEV_TX_BUSY;
}
@@ -585,7 +587,7 @@ static void hi3110_tx_work_handler(struct work_struct *ws)
} else {
frame = (struct can_frame *)priv->tx_skb->data;
hi3110_hw_tx(spi, frame);
- priv->tx_len = 1 + frame->len;
+ priv->tx_busy = true;
can_put_echo_skb(priv->tx_skb, net, 0, 0);
priv->tx_skb = NULL;
}
@@ -720,14 +722,11 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
}
}
- if (priv->tx_len && statf & HI3110_STAT_TXMTY) {
+ if (priv->tx_busy && statf & HI3110_STAT_TXMTY) {
net->stats.tx_packets++;
- net->stats.tx_bytes += priv->tx_len - 1;
+ net->stats.tx_bytes += can_get_echo_skb(net, 0, NULL);
can_led_event(net, CAN_LED_EVENT_TX);
- if (priv->tx_len) {
- can_get_echo_skb(net, 0, NULL);
- priv->tx_len = 0;
- }
+ priv->tx_busy = false;
netif_wake_queue(net);
}
@@ -754,7 +753,7 @@ static int hi3110_open(struct net_device *net)
priv->force_quit = 0;
priv->tx_skb = NULL;
- priv->tx_len = 0;
+ priv->tx_busy = false;
ret = request_threaded_irq(spi->irq, NULL, hi3110_can_ist,
flags, DEVICE_NAME, priv);
@@ -828,19 +827,25 @@ MODULE_DEVICE_TABLE(spi, hi3110_id_table);
static int hi3110_can_probe(struct spi_device *spi)
{
- const struct of_device_id *of_id = of_match_device(hi3110_of_match,
- &spi->dev);
+ struct device *dev = &spi->dev;
struct net_device *net;
struct hi3110_priv *priv;
+ const void *match;
struct clk *clk;
- int freq, ret;
+ u32 freq;
+ int ret;
+
+ clk = devm_clk_get_optional(&spi->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "no CAN clock source defined\n");
- clk = devm_clk_get(&spi->dev, NULL);
- if (IS_ERR(clk)) {
- dev_err(&spi->dev, "no CAN clock source defined\n");
- return PTR_ERR(clk);
+ if (clk) {
+ freq = clk_get_rate(clk);
+ } else {
+ ret = device_property_read_u32(dev, "clock-frequency", &freq);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clock-frequency!\n");
}
- freq = clk_get_rate(clk);
/* Sanity check */
if (freq > 40000000)
@@ -851,11 +856,9 @@ static int hi3110_can_probe(struct spi_device *spi)
if (!net)
return -ENOMEM;
- if (!IS_ERR(clk)) {
- ret = clk_prepare_enable(clk);
- if (ret)
- goto out_free;
- }
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto out_free;
net->netdev_ops = &hi3110_netdev_ops;
net->flags |= IFF_ECHO;
@@ -870,8 +873,9 @@ static int hi3110_can_probe(struct spi_device *spi)
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING;
- if (of_id)
- priv->model = (enum hi3110_model)(uintptr_t)of_id->data;
+ match = device_get_match_data(dev);
+ if (match)
+ priv->model = (enum hi3110_model)(uintptr_t)match;
else
priv->model = spi_get_device_id(spi)->driver_data;
priv->net = net;
@@ -918,9 +922,7 @@ static int hi3110_can_probe(struct spi_device *spi)
ret = hi3110_hw_probe(spi);
if (ret) {
- if (ret == -ENODEV)
- dev_err(&spi->dev, "Cannot initialize %x. Wrong wiring?\n",
- priv->model);
+ dev_err_probe(dev, ret, "Cannot initialize %x. Wrong wiring?\n", priv->model);
goto error_probe;
}
hi3110_hw_sleep(spi);
@@ -938,14 +940,12 @@ static int hi3110_can_probe(struct spi_device *spi)
hi3110_power_enable(priv->power, 0);
out_clk:
- if (!IS_ERR(clk))
- clk_disable_unprepare(clk);
+ clk_disable_unprepare(clk);
out_free:
free_candev(net);
- dev_err(&spi->dev, "Probe failed, err=%d\n", -ret);
- return ret;
+ return dev_err_probe(dev, ret, "Probe failed\n");
}
static int hi3110_can_remove(struct spi_device *spi)
@@ -957,8 +957,7 @@ static int hi3110_can_remove(struct spi_device *spi)
hi3110_power_enable(priv->power, 0);
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
free_candev(net);
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 0579ab74f728..025e07cb7439 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -237,7 +237,6 @@ struct mcp251x_priv {
u8 *spi_rx_buf;
struct sk_buff *tx_skb;
- int tx_len;
struct workqueue_struct *wq;
struct work_struct tx_work;
@@ -250,6 +249,8 @@ struct mcp251x_priv {
#define AFTER_SUSPEND_POWER 4
#define AFTER_SUSPEND_RESTART 8
int restart_tx;
+ bool tx_busy;
+
struct regulator *power;
struct regulator *transceiver;
struct clk *clk;
@@ -272,13 +273,13 @@ static void mcp251x_clean(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
- if (priv->tx_skb || priv->tx_len)
+ if (priv->tx_skb || priv->tx_busy)
net->stats.tx_errors++;
dev_kfree_skb(priv->tx_skb);
- if (priv->tx_len)
+ if (priv->tx_busy)
can_free_echo_skb(priv->net, 0, NULL);
priv->tx_skb = NULL;
- priv->tx_len = 0;
+ priv->tx_busy = false;
}
/* Note about handling of error return of mcp251x_spi_trans: accessing
@@ -600,9 +601,6 @@ static int mcp251x_gpio_setup(struct mcp251x_priv *priv)
gpio->ngpio = ARRAY_SIZE(mcp251x_gpio_names);
gpio->names = mcp251x_gpio_names;
gpio->can_sleep = true;
-#ifdef CONFIG_OF_GPIO
- gpio->of_node = priv->spi->dev.of_node;
-#endif
return devm_gpiochip_add_data(&priv->spi->dev, gpio, priv);
}
@@ -733,10 +731,12 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
}
/* Data length */
frame->len = can_cc_dlc2len(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
- memcpy(frame->data, buf + RXBDAT_OFF, frame->len);
+ if (!(frame->can_id & CAN_RTR_FLAG)) {
+ memcpy(frame->data, buf + RXBDAT_OFF, frame->len);
+ priv->net->stats.rx_bytes += frame->len;
+ }
priv->net->stats.rx_packets++;
- priv->net->stats.rx_bytes += frame->len;
can_led_event(priv->net, CAN_LED_EVENT_RX);
@@ -786,7 +786,7 @@ static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
- if (priv->tx_skb || priv->tx_len) {
+ if (priv->tx_skb || priv->tx_busy) {
dev_warn(&spi->dev, "hard_xmit called while tx busy\n");
return NETDEV_TX_BUSY;
}
@@ -1011,7 +1011,7 @@ static void mcp251x_tx_work_handler(struct work_struct *ws)
if (frame->len > CAN_FRAME_MAX_DATA_LEN)
frame->len = CAN_FRAME_MAX_DATA_LEN;
mcp251x_hw_tx(spi, frame, 0);
- priv->tx_len = 1 + frame->len;
+ priv->tx_busy = true;
can_put_echo_skb(priv->tx_skb, net, 0, 0);
priv->tx_skb = NULL;
}
@@ -1177,12 +1177,12 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
break;
if (intf & CANINTF_TX) {
- net->stats.tx_packets++;
- net->stats.tx_bytes += priv->tx_len - 1;
can_led_event(net, CAN_LED_EVENT_TX);
- if (priv->tx_len) {
- can_get_echo_skb(net, 0, NULL);
- priv->tx_len = 0;
+ if (priv->tx_busy) {
+ net->stats.tx_packets++;
+ net->stats.tx_bytes += can_get_echo_skb(net, 0,
+ NULL);
+ priv->tx_busy = false;
}
netif_wake_queue(net);
}
@@ -1209,7 +1209,7 @@ static int mcp251x_open(struct net_device *net)
priv->force_quit = 0;
priv->tx_skb = NULL;
- priv->tx_len = 0;
+ priv->tx_busy = false;
if (!dev_fwnode(&spi->dev))
flags = IRQF_TRIGGER_FALLING;
diff --git a/drivers/net/can/spi/mcp251xfd/Makefile b/drivers/net/can/spi/mcp251xfd/Makefile
index 3cba3b9447ea..a83d685d64e0 100644
--- a/drivers/net/can/spi/mcp251xfd/Makefile
+++ b/drivers/net/can/spi/mcp251xfd/Makefile
@@ -3,9 +3,14 @@
obj-$(CONFIG_CAN_MCP251XFD) += mcp251xfd.o
mcp251xfd-objs :=
+mcp251xfd-objs += mcp251xfd-chip-fifo.o
mcp251xfd-objs += mcp251xfd-core.o
mcp251xfd-objs += mcp251xfd-crc16.o
mcp251xfd-objs += mcp251xfd-regmap.o
+mcp251xfd-objs += mcp251xfd-ring.o
+mcp251xfd-objs += mcp251xfd-rx.o
+mcp251xfd-objs += mcp251xfd-tef.o
mcp251xfd-objs += mcp251xfd-timestamp.o
+mcp251xfd-objs += mcp251xfd-tx.o
mcp251xfd-$(CONFIG_DEV_COREDUMP) += mcp251xfd-dump.o
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c
new file mode 100644
index 000000000000..2f9a623d381d
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
+//
+// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+//
+
+#include <linux/bitfield.h>
+
+#include "mcp251xfd.h"
+
+static int
+mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring)
+{
+ u32 fifo_con;
+
+ /* Enable RXOVIE on _all_ RX FIFOs, not just the last one.
+ *
+ * FIFOs hit by a RX MAB overflow and RXOVIE enabled will
+ * generate a RXOVIF, use this to properly detect RX MAB
+ * overflows.
+ */
+ fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
+ ring->obj_num - 1) |
+ MCP251XFD_REG_FIFOCON_RXTSEN |
+ MCP251XFD_REG_FIFOCON_RXOVIE |
+ MCP251XFD_REG_FIFOCON_TFNRFNIE;
+
+ if (mcp251xfd_is_fd_mode(priv))
+ fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP251XFD_REG_FIFOCON_PLSIZE_64);
+ else
+ fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP251XFD_REG_FIFOCON_PLSIZE_8);
+
+ return regmap_write(priv->map_reg,
+ MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con);
+}
+
+static int
+mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring)
+{
+ u32 fltcon;
+
+ fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) |
+ MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr);
+
+ return regmap_update_bits(priv->map_reg,
+ MCP251XFD_REG_FLTCON(ring->nr >> 2),
+ MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr),
+ fltcon);
+}
+
+int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
+{
+ const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ const struct mcp251xfd_rx_ring *rx_ring;
+ u32 val;
+ int err, n;
+
+ /* TEF */
+ val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK,
+ tx_ring->obj_num - 1) |
+ MCP251XFD_REG_TEFCON_TEFTSEN |
+ MCP251XFD_REG_TEFCON_TEFOVIE |
+ MCP251XFD_REG_TEFCON_TEFNEIE;
+
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val);
+ if (err)
+ return err;
+
+ /* FIFO 1 - TX */
+ val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
+ tx_ring->obj_num - 1) |
+ MCP251XFD_REG_FIFOCON_TXEN |
+ MCP251XFD_REG_FIFOCON_TXATIE;
+
+ if (mcp251xfd_is_fd_mode(priv))
+ val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP251XFD_REG_FIFOCON_PLSIZE_64);
+ else
+ val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP251XFD_REG_FIFOCON_PLSIZE_8);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
+ MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT);
+ else
+ val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
+ MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED);
+
+ err = regmap_write(priv->map_reg,
+ MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO),
+ val);
+ if (err)
+ return err;
+
+ /* RX FIFOs */
+ mcp251xfd_for_each_rx_ring(priv, rx_ring, n) {
+ err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring);
+ if (err)
+ return err;
+
+ err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index e16dc482f327..b5986df6eca0 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -20,8 +20,6 @@
#include <linux/pm_runtime.h>
#include <linux/property.h>
-#include <asm/unaligned.h>
-
#include "mcp251xfd.h"
#define DEVICE_NAME "mcp251xfd"
@@ -180,330 +178,6 @@ static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv)
return 0;
}
-static inline u8
-mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
- union mcp251xfd_write_reg_buf *write_reg_buf,
- const u16 reg, const u32 mask, const u32 val)
-{
- u8 first_byte, last_byte, len;
- u8 *data;
- __le32 val_le32;
-
- first_byte = mcp251xfd_first_byte_set(mask);
- last_byte = mcp251xfd_last_byte_set(mask);
- len = last_byte - first_byte + 1;
-
- data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte);
- val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
- memcpy(data, &val_le32, len);
-
- if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
- u16 crc;
-
- mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
- len);
- /* CRC */
- len += sizeof(write_reg_buf->crc.cmd);
- crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len);
- put_unaligned_be16(crc, (void *)write_reg_buf + len);
-
- /* Total length */
- len += sizeof(write_reg_buf->crc.crc);
- } else {
- len += sizeof(write_reg_buf->nocrc.cmd);
- }
-
- return len;
-}
-
-static inline int
-mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
- u8 *tef_tail)
-{
- u32 tef_ua;
- int err;
-
- err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua);
- if (err)
- return err;
-
- *tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj);
-
- return 0;
-}
-
-static inline int
-mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
- u8 *tx_tail)
-{
- u32 fifo_sta;
- int err;
-
- err = regmap_read(priv->map_reg,
- MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO),
- &fifo_sta);
- if (err)
- return err;
-
- *tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
-
- return 0;
-}
-
-static inline int
-mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_rx_ring *ring,
- u8 *rx_head)
-{
- u32 fifo_sta;
- int err;
-
- err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
- &fifo_sta);
- if (err)
- return err;
-
- *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
-
- return 0;
-}
-
-static inline int
-mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_rx_ring *ring,
- u8 *rx_tail)
-{
- u32 fifo_ua;
- int err;
-
- err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr),
- &fifo_ua);
- if (err)
- return err;
-
- fifo_ua -= ring->base - MCP251XFD_RAM_START;
- *rx_tail = fifo_ua / ring->obj_size;
-
- return 0;
-}
-
-static void
-mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_tx_ring *ring,
- struct mcp251xfd_tx_obj *tx_obj,
- const u8 rts_buf_len,
- const u8 n)
-{
- struct spi_transfer *xfer;
- u16 addr;
-
- /* FIFO load */
- addr = mcp251xfd_get_tx_obj_addr(ring, n);
- if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
- mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
- addr);
- else
- mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
- addr);
-
- xfer = &tx_obj->xfer[0];
- xfer->tx_buf = &tx_obj->buf;
- xfer->len = 0; /* actual len is assigned on the fly */
- xfer->cs_change = 1;
- xfer->cs_change_delay.value = 0;
- xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
-
- /* FIFO request to send */
- xfer = &tx_obj->xfer[1];
- xfer->tx_buf = &ring->rts_buf;
- xfer->len = rts_buf_len;
-
- /* SPI message */
- spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
- ARRAY_SIZE(tx_obj->xfer));
-}
-
-static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
-{
- struct mcp251xfd_tef_ring *tef_ring;
- struct mcp251xfd_tx_ring *tx_ring;
- struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL;
- struct mcp251xfd_tx_obj *tx_obj;
- struct spi_transfer *xfer;
- u32 val;
- u16 addr;
- u8 len;
- int i, j;
-
- netdev_reset_queue(priv->ndev);
-
- /* TEF */
- tef_ring = priv->tef;
- tef_ring->head = 0;
- tef_ring->tail = 0;
-
- /* FIFO increment TEF tail pointer */
- addr = MCP251XFD_REG_TEFCON;
- val = MCP251XFD_REG_TEFCON_UINC;
- len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
- addr, val, val);
-
- for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) {
- xfer = &tef_ring->uinc_xfer[j];
- xfer->tx_buf = &tef_ring->uinc_buf;
- xfer->len = len;
- xfer->cs_change = 1;
- xfer->cs_change_delay.value = 0;
- xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
- }
-
- /* "cs_change == 1" on the last transfer results in an active
- * chip select after the complete SPI message. This causes the
- * controller to interpret the next register access as
- * data. Set "cs_change" of the last transfer to "0" to
- * properly deactivate the chip select at the end of the
- * message.
- */
- xfer->cs_change = 0;
-
- /* TX */
- tx_ring = priv->tx;
- tx_ring->head = 0;
- tx_ring->tail = 0;
- tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num);
-
- /* FIFO request to send */
- addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO);
- val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
- len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
- addr, val, val);
-
- mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
- mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
-
- /* RX */
- mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
- rx_ring->head = 0;
- rx_ring->tail = 0;
- rx_ring->nr = i;
- rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i);
-
- if (!prev_rx_ring)
- rx_ring->base =
- mcp251xfd_get_tx_obj_addr(tx_ring,
- tx_ring->obj_num);
- else
- rx_ring->base = prev_rx_ring->base +
- prev_rx_ring->obj_size *
- prev_rx_ring->obj_num;
-
- prev_rx_ring = rx_ring;
-
- /* FIFO increment RX tail pointer */
- addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
- val = MCP251XFD_REG_FIFOCON_UINC;
- len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
- addr, val, val);
-
- for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) {
- xfer = &rx_ring->uinc_xfer[j];
- xfer->tx_buf = &rx_ring->uinc_buf;
- xfer->len = len;
- xfer->cs_change = 1;
- xfer->cs_change_delay.value = 0;
- xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
- }
-
- /* "cs_change == 1" on the last transfer results in an
- * active chip select after the complete SPI
- * message. This causes the controller to interpret
- * the next register access as data. Set "cs_change"
- * of the last transfer to "0" to properly deactivate
- * the chip select at the end of the message.
- */
- xfer->cs_change = 0;
- }
-}
-
-static void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
-{
- int i;
-
- for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) {
- kfree(priv->rx[i]);
- priv->rx[i] = NULL;
- }
-}
-
-static int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
-{
- struct mcp251xfd_tx_ring *tx_ring;
- struct mcp251xfd_rx_ring *rx_ring;
- int tef_obj_size, tx_obj_size, rx_obj_size;
- int tx_obj_num;
- int ram_free, i;
-
- tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj);
- /* listen-only mode works like FD mode */
- if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) {
- tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD;
- tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
- rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
- } else {
- tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN;
- tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
- rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
- }
-
- tx_ring = priv->tx;
- tx_ring->obj_num = tx_obj_num;
- tx_ring->obj_size = tx_obj_size;
-
- ram_free = MCP251XFD_RAM_SIZE - tx_obj_num *
- (tef_obj_size + tx_obj_size);
-
- for (i = 0;
- i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size;
- i++) {
- int rx_obj_num;
-
- rx_obj_num = ram_free / rx_obj_size;
- rx_obj_num = min(1 << (fls(rx_obj_num) - 1),
- MCP251XFD_RX_OBJ_NUM_MAX);
-
- rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
- GFP_KERNEL);
- if (!rx_ring) {
- mcp251xfd_ring_free(priv);
- return -ENOMEM;
- }
- rx_ring->obj_num = rx_obj_num;
- rx_ring->obj_size = rx_obj_size;
- priv->rx[i] = rx_ring;
-
- ram_free -= rx_ring->obj_num * rx_ring->obj_size;
- }
- priv->rx_ring_num = i;
-
- netdev_dbg(priv->ndev,
- "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n",
- tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num,
- tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num);
-
- mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
- netdev_dbg(priv->ndev,
- "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n",
- i, rx_ring->obj_num, rx_ring->obj_size,
- rx_ring->obj_size * rx_ring->obj_num);
- }
-
- netdev_dbg(priv->ndev,
- "FIFO setup: free: %d bytes\n",
- ram_free);
-
- return 0;
-}
-
static inline int
mcp251xfd_chip_get_mode(const struct mcp251xfd_priv *priv, u8 *mode)
{
@@ -838,108 +512,6 @@ static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv)
return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
}
-static int
-mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_rx_ring *ring)
-{
- u32 fifo_con;
-
- /* Enable RXOVIE on _all_ RX FIFOs, not just the last one.
- *
- * FIFOs hit by a RX MAB overflow and RXOVIE enabled will
- * generate a RXOVIF, use this to properly detect RX MAB
- * overflows.
- */
- fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
- ring->obj_num - 1) |
- MCP251XFD_REG_FIFOCON_RXTSEN |
- MCP251XFD_REG_FIFOCON_RXOVIE |
- MCP251XFD_REG_FIFOCON_TFNRFNIE;
-
- if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
- fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
- MCP251XFD_REG_FIFOCON_PLSIZE_64);
- else
- fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
- MCP251XFD_REG_FIFOCON_PLSIZE_8);
-
- return regmap_write(priv->map_reg,
- MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con);
-}
-
-static int
-mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_rx_ring *ring)
-{
- u32 fltcon;
-
- fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) |
- MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr);
-
- return regmap_update_bits(priv->map_reg,
- MCP251XFD_REG_FLTCON(ring->nr >> 2),
- MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr),
- fltcon);
-}
-
-static int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
-{
- const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- const struct mcp251xfd_rx_ring *rx_ring;
- u32 val;
- int err, n;
-
- /* TEF */
- val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK,
- tx_ring->obj_num - 1) |
- MCP251XFD_REG_TEFCON_TEFTSEN |
- MCP251XFD_REG_TEFCON_TEFOVIE |
- MCP251XFD_REG_TEFCON_TEFNEIE;
-
- err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val);
- if (err)
- return err;
-
- /* FIFO 1 - TX */
- val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
- tx_ring->obj_num - 1) |
- MCP251XFD_REG_FIFOCON_TXEN |
- MCP251XFD_REG_FIFOCON_TXATIE;
-
- if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
- val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
- MCP251XFD_REG_FIFOCON_PLSIZE_64);
- else
- val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
- MCP251XFD_REG_FIFOCON_PLSIZE_8);
-
- if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
- val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
- MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT);
- else
- val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
- MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED);
-
- err = regmap_write(priv->map_reg,
- MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO),
- val);
- if (err)
- return err;
-
- /* RX FIFOs */
- mcp251xfd_for_each_rx_ring(priv, rx_ring, n) {
- err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring);
- if (err)
- return err;
-
- err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv)
{
struct mcp251xfd_ecc *ecc = &priv->ecc;
@@ -968,18 +540,10 @@ static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv)
return err;
}
-static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv)
-{
- struct mcp251xfd_ecc *ecc = &priv->ecc;
-
- ecc->ecc_stat = 0;
-}
-
static u8 mcp251xfd_get_normal_mode(const struct mcp251xfd_priv *priv)
{
u8 mode;
-
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
mode = MCP251XFD_REG_CON_MODE_INT_LOOPBACK;
else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
@@ -1186,433 +750,6 @@ static int mcp251xfd_get_berr_counter(const struct net_device *ndev,
return __mcp251xfd_get_berr_counter(ndev, bec);
}
-static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
-{
- u8 tef_tail_chip, tef_tail;
- int err;
-
- if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
- return 0;
-
- err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip);
- if (err)
- return err;
-
- tef_tail = mcp251xfd_get_tef_tail(priv);
- if (tef_tail_chip != tef_tail) {
- netdev_err(priv->ndev,
- "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n",
- tef_tail_chip, tef_tail);
- return -EILSEQ;
- }
-
- return 0;
-}
-
-static int
-mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_rx_ring *ring)
-{
- u8 rx_tail_chip, rx_tail;
- int err;
-
- if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
- return 0;
-
- err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip);
- if (err)
- return err;
-
- rx_tail = mcp251xfd_get_rx_tail(ring);
- if (rx_tail_chip != rx_tail) {
- netdev_err(priv->ndev,
- "RX tail of chip (%d) and ours (%d) inconsistent.\n",
- rx_tail_chip, rx_tail);
- return -EILSEQ;
- }
-
- return 0;
-}
-
-static int
-mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
-{
- const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- u32 tef_sta;
- int err;
-
- err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
- if (err)
- return err;
-
- if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
- netdev_err(priv->ndev,
- "Transmit Event FIFO buffer overflow.\n");
- return -ENOBUFS;
- }
-
- netdev_info(priv->ndev,
- "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n",
- tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
- "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
- "not empty" : "empty",
- seq, priv->tef->tail, priv->tef->head, tx_ring->head);
-
- /* The Sequence Number in the TEF doesn't match our tef_tail. */
- return -EAGAIN;
-}
-
-static int
-mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
- const struct mcp251xfd_hw_tef_obj *hw_tef_obj,
- unsigned int *frame_len_ptr)
-{
- struct net_device_stats *stats = &priv->ndev->stats;
- struct sk_buff *skb;
- u32 seq, seq_masked, tef_tail_masked, tef_tail;
-
- seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
- hw_tef_obj->flags);
-
- /* Use the MCP2517FD mask on the MCP2518FD, too. We only
- * compare 7 bits, this should be enough to detect
- * net-yet-completed, i.e. old TEF objects.
- */
- seq_masked = seq &
- field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
- tef_tail_masked = priv->tef->tail &
- field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
- if (seq_masked != tef_tail_masked)
- return mcp251xfd_handle_tefif_recover(priv, seq);
-
- tef_tail = mcp251xfd_get_tef_tail(priv);
- skb = priv->can.echo_skb[tef_tail];
- if (skb)
- mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts);
- stats->tx_bytes +=
- can_rx_offload_get_echo_skb(&priv->offload,
- tef_tail, hw_tef_obj->ts,
- frame_len_ptr);
- stats->tx_packets++;
- priv->tef->tail++;
-
- return 0;
-}
-
-static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
-{
- const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- unsigned int new_head;
- u8 chip_tx_tail;
- int err;
-
- err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
- if (err)
- return err;
-
- /* chip_tx_tail, is the next TX-Object send by the HW.
- * The new TEF head must be >= the old head, ...
- */
- new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail;
- if (new_head <= priv->tef->head)
- new_head += tx_ring->obj_num;
-
- /* ... but it cannot exceed the TX head. */
- priv->tef->head = min(new_head, tx_ring->head);
-
- return mcp251xfd_check_tef_tail(priv);
-}
-
-static inline int
-mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
- struct mcp251xfd_hw_tef_obj *hw_tef_obj,
- const u8 offset, const u8 len)
-{
- const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- const int val_bytes = regmap_get_val_bytes(priv->map_rx);
-
- if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
- (offset > tx_ring->obj_num ||
- len > tx_ring->obj_num ||
- offset + len > tx_ring->obj_num)) {
- netdev_err(priv->ndev,
- "Trying to read to many TEF objects (max=%d, offset=%d, len=%d).\n",
- tx_ring->obj_num, offset, len);
- return -ERANGE;
- }
-
- return regmap_bulk_read(priv->map_rx,
- mcp251xfd_get_tef_obj_addr(offset),
- hw_tef_obj,
- sizeof(*hw_tef_obj) / val_bytes * len);
-}
-
-static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
-{
- struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX];
- unsigned int total_frame_len = 0;
- u8 tef_tail, len, l;
- int err, i;
-
- err = mcp251xfd_tef_ring_update(priv);
- if (err)
- return err;
-
- tef_tail = mcp251xfd_get_tef_tail(priv);
- len = mcp251xfd_get_tef_len(priv);
- l = mcp251xfd_get_tef_linear_len(priv);
- err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
- if (err)
- return err;
-
- if (l < len) {
- err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l);
- if (err)
- return err;
- }
-
- for (i = 0; i < len; i++) {
- unsigned int frame_len = 0;
-
- err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len);
- /* -EAGAIN means the Sequence Number in the TEF
- * doesn't match our tef_tail. This can happen if we
- * read the TEF objects too early. Leave loop let the
- * interrupt handler call us again.
- */
- if (err == -EAGAIN)
- goto out_netif_wake_queue;
- if (err)
- return err;
-
- total_frame_len += frame_len;
- }
-
- out_netif_wake_queue:
- len = i; /* number of handled goods TEFs */
- if (len) {
- struct mcp251xfd_tef_ring *ring = priv->tef;
- struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- int offset;
-
- /* Increment the TEF FIFO tail pointer 'len' times in
- * a single SPI message.
- *
- * Note:
- * Calculate offset, so that the SPI transfer ends on
- * the last message of the uinc_xfer array, which has
- * "cs_change == 0", to properly deactivate the chip
- * select.
- */
- offset = ARRAY_SIZE(ring->uinc_xfer) - len;
- err = spi_sync_transfer(priv->spi,
- ring->uinc_xfer + offset, len);
- if (err)
- return err;
-
- tx_ring->tail += len;
- netdev_completed_queue(priv->ndev, len, total_frame_len);
-
- err = mcp251xfd_check_tef_tail(priv);
- if (err)
- return err;
- }
-
- mcp251xfd_ecc_tefif_successful(priv);
-
- if (mcp251xfd_get_tx_free(priv->tx)) {
- /* Make sure that anybody stopping the queue after
- * this sees the new tx_ring->tail.
- */
- smp_mb();
- netif_wake_queue(priv->ndev);
- }
-
- return 0;
-}
-
-static int
-mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
- struct mcp251xfd_rx_ring *ring)
-{
- u32 new_head;
- u8 chip_rx_head;
- int err;
-
- err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head);
- if (err)
- return err;
-
- /* chip_rx_head, is the next RX-Object filled by the HW.
- * The new RX head must be >= the old head.
- */
- new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
- if (new_head <= ring->head)
- new_head += ring->obj_num;
-
- ring->head = new_head;
-
- return mcp251xfd_check_rx_tail(priv, ring);
-}
-
-static void
-mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
- struct sk_buff *skb)
-{
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
- u8 dlc;
-
- if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) {
- u32 sid, eid;
-
- eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id);
- sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id);
-
- cfd->can_id = CAN_EFF_FLAG |
- FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) |
- FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid);
- } else {
- cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK,
- hw_rx_obj->id);
- }
-
- dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC_MASK, hw_rx_obj->flags);
-
- /* CANFD */
- if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) {
-
- if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI)
- cfd->flags |= CANFD_ESI;
-
- if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS)
- cfd->flags |= CANFD_BRS;
-
- cfd->len = can_fd_dlc2len(dlc);
- } else {
- if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)
- cfd->can_id |= CAN_RTR_FLAG;
-
- can_frame_set_cc_len((struct can_frame *)cfd, dlc,
- priv->can.ctrlmode);
- }
-
- if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR))
- memcpy(cfd->data, hw_rx_obj->data, cfd->len);
-
- mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts);
-}
-
-static int
-mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
- struct mcp251xfd_rx_ring *ring,
- const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj)
-{
- struct net_device_stats *stats = &priv->ndev->stats;
- struct sk_buff *skb;
- struct canfd_frame *cfd;
- int err;
-
- if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
- skb = alloc_canfd_skb(priv->ndev, &cfd);
- else
- skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
-
- if (!skb) {
- stats->rx_dropped++;
- return 0;
- }
-
- mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts);
- if (err)
- stats->rx_fifo_errors++;
-
- return 0;
-}
-
-static inline int
-mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_rx_ring *ring,
- struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
- const u8 offset, const u8 len)
-{
- const int val_bytes = regmap_get_val_bytes(priv->map_rx);
- int err;
-
- err = regmap_bulk_read(priv->map_rx,
- mcp251xfd_get_rx_obj_addr(ring, offset),
- hw_rx_obj,
- len * ring->obj_size / val_bytes);
-
- return err;
-}
-
-static int
-mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
- struct mcp251xfd_rx_ring *ring)
-{
- struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
- u8 rx_tail, len;
- int err, i;
-
- err = mcp251xfd_rx_ring_update(priv, ring);
- if (err)
- return err;
-
- while ((len = mcp251xfd_get_rx_linear_len(ring))) {
- int offset;
-
- rx_tail = mcp251xfd_get_rx_tail(ring);
-
- err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
- rx_tail, len);
- if (err)
- return err;
-
- for (i = 0; i < len; i++) {
- err = mcp251xfd_handle_rxif_one(priv, ring,
- (void *)hw_rx_obj +
- i * ring->obj_size);
- if (err)
- return err;
- }
-
- /* Increment the RX FIFO tail pointer 'len' times in a
- * single SPI message.
- *
- * Note:
- * Calculate offset, so that the SPI transfer ends on
- * the last message of the uinc_xfer array, which has
- * "cs_change == 0", to properly deactivate the chip
- * select.
- */
- offset = ARRAY_SIZE(ring->uinc_xfer) - len;
- err = spi_sync_transfer(priv->spi,
- ring->uinc_xfer + offset, len);
- if (err)
- return err;
-
- ring->tail += len;
- }
-
- return 0;
-}
-
-static int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
-{
- struct mcp251xfd_rx_ring *ring;
- int err, n;
-
- mcp251xfd_for_each_rx_ring(priv, ring, n) {
- err = mcp251xfd_handle_rxif_ring(priv, ring);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static struct sk_buff *
mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv,
struct can_frame **cf, u32 *timestamp)
@@ -1653,12 +790,15 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
/* If SERRIF is active, there was a RX MAB overflow. */
if (priv->regs_status.intf & MCP251XFD_REG_INT_SERRIF) {
- netdev_info(priv->ndev,
- "RX-%d: MAB overflow detected.\n",
- ring->nr);
+ if (net_ratelimit())
+ netdev_dbg(priv->ndev,
+ "RX-%d: MAB overflow detected.\n",
+ ring->nr);
} else {
- netdev_info(priv->ndev,
- "RX-%d: FIFO overflow.\n", ring->nr);
+ if (net_ratelimit())
+ netdev_dbg(priv->ndev,
+ "RX-%d: FIFO overflow.\n",
+ ring->nr);
}
err = regmap_update_bits(priv->map_reg,
@@ -2311,212 +1451,23 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
return handled;
}
-static inline struct
-mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring)
-{
- u8 tx_head;
-
- tx_head = mcp251xfd_get_tx_head(tx_ring);
-
- return &tx_ring->obj[tx_head];
-}
-
-static void
-mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
- struct mcp251xfd_tx_obj *tx_obj,
- const struct sk_buff *skb,
- unsigned int seq)
-{
- const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
- struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj;
- union mcp251xfd_tx_obj_load_buf *load_buf;
- u8 dlc;
- u32 id, flags;
- int len_sanitized = 0, len;
-
- if (cfd->can_id & CAN_EFF_FLAG) {
- u32 sid, eid;
-
- sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id);
- eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id);
-
- id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) |
- FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid);
-
- flags = MCP251XFD_OBJ_FLAGS_IDE;
- } else {
- id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id);
- flags = 0;
- }
-
- /* Use the MCP2518FD mask even on the MCP2517FD. It doesn't
- * harm, only the lower 7 bits will be transferred into the
- * TEF object.
- */
- flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq);
-
- if (cfd->can_id & CAN_RTR_FLAG)
- flags |= MCP251XFD_OBJ_FLAGS_RTR;
- else
- len_sanitized = canfd_sanitize_len(cfd->len);
-
- /* CANFD */
- if (can_is_canfd_skb(skb)) {
- if (cfd->flags & CANFD_ESI)
- flags |= MCP251XFD_OBJ_FLAGS_ESI;
-
- flags |= MCP251XFD_OBJ_FLAGS_FDF;
-
- if (cfd->flags & CANFD_BRS)
- flags |= MCP251XFD_OBJ_FLAGS_BRS;
-
- dlc = can_fd_len2dlc(cfd->len);
- } else {
- dlc = can_get_cc_dlc((struct can_frame *)cfd,
- priv->can.ctrlmode);
- }
-
- flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, dlc);
-
- load_buf = &tx_obj->buf;
- if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
- hw_tx_obj = &load_buf->crc.hw_tx_obj;
- else
- hw_tx_obj = &load_buf->nocrc.hw_tx_obj;
-
- put_unaligned_le32(id, &hw_tx_obj->id);
- put_unaligned_le32(flags, &hw_tx_obj->flags);
-
- /* Copy data */
- memcpy(hw_tx_obj->data, cfd->data, cfd->len);
-
- /* Clear unused data at end of CAN frame */
- if (MCP251XFD_SANITIZE_CAN && len_sanitized) {
- int pad_len;
-
- pad_len = len_sanitized - cfd->len;
- if (pad_len)
- memset(hw_tx_obj->data + cfd->len, 0x0, pad_len);
- }
-
- /* Number of bytes to be written into the RAM of the controller */
- len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
- if (MCP251XFD_SANITIZE_CAN)
- len += round_up(len_sanitized, sizeof(u32));
- else
- len += round_up(cfd->len, sizeof(u32));
-
- if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) {
- u16 crc;
-
- mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd,
- len);
- /* CRC */
- len += sizeof(load_buf->crc.cmd);
- crc = mcp251xfd_crc16_compute(&load_buf->crc, len);
- put_unaligned_be16(crc, (void *)load_buf + len);
-
- /* Total length */
- len += sizeof(load_buf->crc.crc);
- } else {
- len += sizeof(load_buf->nocrc.cmd);
- }
-
- tx_obj->xfer[0].len = len;
-}
-
-static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
- struct mcp251xfd_tx_obj *tx_obj)
-{
- return spi_async(priv->spi, &tx_obj->msg);
-}
-
-static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
- struct mcp251xfd_tx_ring *tx_ring)
-{
- if (mcp251xfd_get_tx_free(tx_ring) > 0)
- return false;
-
- netif_stop_queue(priv->ndev);
-
- /* Memory barrier before checking tx_free (head and tail) */
- smp_mb();
-
- if (mcp251xfd_get_tx_free(tx_ring) == 0) {
- netdev_dbg(priv->ndev,
- "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
- tx_ring->head, tx_ring->tail,
- tx_ring->head - tx_ring->tail);
-
- return true;
- }
-
- netif_start_queue(priv->ndev);
-
- return false;
-}
-
-static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
- struct net_device *ndev)
-{
- struct mcp251xfd_priv *priv = netdev_priv(ndev);
- struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- struct mcp251xfd_tx_obj *tx_obj;
- unsigned int frame_len;
- u8 tx_head;
- int err;
-
- if (can_dropped_invalid_skb(ndev, skb))
- return NETDEV_TX_OK;
-
- if (mcp251xfd_tx_busy(priv, tx_ring))
- return NETDEV_TX_BUSY;
-
- tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
- mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head);
-
- /* Stop queue if we occupy the complete TX FIFO */
- tx_head = mcp251xfd_get_tx_head(tx_ring);
- tx_ring->head++;
- if (mcp251xfd_get_tx_free(tx_ring) == 0)
- netif_stop_queue(ndev);
-
- frame_len = can_skb_get_frame_len(skb);
- err = can_put_echo_skb(skb, ndev, tx_head, frame_len);
- if (!err)
- netdev_sent_queue(priv->ndev, frame_len);
-
- err = mcp251xfd_tx_obj_write(priv, tx_obj);
- if (err)
- goto out_err;
-
- return NETDEV_TX_OK;
-
- out_err:
- netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
-
- return NETDEV_TX_OK;
-}
-
static int mcp251xfd_open(struct net_device *ndev)
{
struct mcp251xfd_priv *priv = netdev_priv(ndev);
const struct spi_device *spi = priv->spi;
int err;
- err = pm_runtime_get_sync(ndev->dev.parent);
- if (err < 0) {
- pm_runtime_put_noidle(ndev->dev.parent);
+ err = open_candev(ndev);
+ if (err)
return err;
- }
- err = open_candev(ndev);
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
if (err)
- goto out_pm_runtime_put;
+ goto out_close_candev;
err = mcp251xfd_ring_alloc(priv);
if (err)
- goto out_close_candev;
+ goto out_pm_runtime_put;
err = mcp251xfd_transceiver_enable(priv);
if (err)
@@ -2552,11 +1503,11 @@ static int mcp251xfd_open(struct net_device *ndev)
mcp251xfd_transceiver_disable(priv);
out_mcp251xfd_ring_free:
mcp251xfd_ring_free(priv);
- out_close_candev:
- close_candev(ndev);
out_pm_runtime_put:
mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
pm_runtime_put(ndev->dev.parent);
+ out_close_candev:
+ close_candev(ndev);
return err;
}
@@ -2625,7 +1576,7 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
if (!mcp251xfd_is_251X(priv) &&
priv->devtype_data.model != devtype_data->model) {
netdev_info(ndev,
- "Detected %s, but firmware specifies a %s. Fixing up.",
+ "Detected %s, but firmware specifies a %s. Fixing up.\n",
__mcp251xfd_get_model_str(devtype_data->model),
mcp251xfd_get_model_str(priv));
}
@@ -2662,7 +1613,7 @@ static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv)
return 0;
netdev_info(priv->ndev,
- "RX_INT active after softreset, disabling RX_INT support.");
+ "RX_INT active after softreset, disabling RX_INT support.\n");
devm_gpiod_put(&priv->spi->dev, priv->rx_int);
priv->rx_int = NULL;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
index 297491516a26..7b120c716228 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
@@ -250,7 +250,6 @@ mcp251xfd_regmap_crc_read_check_crc(const struct mcp251xfd_map_buf_crc * const b
return 0;
}
-
static int
mcp251xfd_regmap_crc_read_one(struct mcp251xfd_priv *priv,
struct spi_message *msg, unsigned int data_len)
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
new file mode 100644
index 000000000000..92f9e9b01289
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
+//
+// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+//
+
+#include <asm/unaligned.h>
+
+#include "mcp251xfd.h"
+
+static inline u8
+mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
+ union mcp251xfd_write_reg_buf *write_reg_buf,
+ const u16 reg, const u32 mask, const u32 val)
+{
+ u8 first_byte, last_byte, len;
+ u8 *data;
+ __le32 val_le32;
+
+ first_byte = mcp251xfd_first_byte_set(mask);
+ last_byte = mcp251xfd_last_byte_set(mask);
+ len = last_byte - first_byte + 1;
+
+ data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte);
+ val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
+ memcpy(data, &val_le32, len);
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
+ u16 crc;
+
+ mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
+ len);
+ /* CRC */
+ len += sizeof(write_reg_buf->crc.cmd);
+ crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len);
+ put_unaligned_be16(crc, (void *)write_reg_buf + len);
+
+ /* Total length */
+ len += sizeof(write_reg_buf->crc.crc);
+ } else {
+ len += sizeof(write_reg_buf->nocrc.cmd);
+ }
+
+ return len;
+}
+
+static void
+mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_tx_ring *ring,
+ struct mcp251xfd_tx_obj *tx_obj,
+ const u8 rts_buf_len,
+ const u8 n)
+{
+ struct spi_transfer *xfer;
+ u16 addr;
+
+ /* FIFO load */
+ addr = mcp251xfd_get_tx_obj_addr(ring, n);
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
+ mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
+ addr);
+ else
+ mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
+ addr);
+
+ xfer = &tx_obj->xfer[0];
+ xfer->tx_buf = &tx_obj->buf;
+ xfer->len = 0; /* actual len is assigned on the fly */
+ xfer->cs_change = 1;
+ xfer->cs_change_delay.value = 0;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ /* FIFO request to send */
+ xfer = &tx_obj->xfer[1];
+ xfer->tx_buf = &ring->rts_buf;
+ xfer->len = rts_buf_len;
+
+ /* SPI message */
+ spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
+ ARRAY_SIZE(tx_obj->xfer));
+}
+
+void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_tef_ring *tef_ring;
+ struct mcp251xfd_tx_ring *tx_ring;
+ struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL;
+ struct mcp251xfd_tx_obj *tx_obj;
+ struct spi_transfer *xfer;
+ u32 val;
+ u16 addr;
+ u8 len;
+ int i, j;
+
+ netdev_reset_queue(priv->ndev);
+
+ /* TEF */
+ tef_ring = priv->tef;
+ tef_ring->head = 0;
+ tef_ring->tail = 0;
+
+ /* FIFO increment TEF tail pointer */
+ addr = MCP251XFD_REG_TEFCON;
+ val = MCP251XFD_REG_TEFCON_UINC;
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
+ addr, val, val);
+
+ for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) {
+ xfer = &tef_ring->uinc_xfer[j];
+ xfer->tx_buf = &tef_ring->uinc_buf;
+ xfer->len = len;
+ xfer->cs_change = 1;
+ xfer->cs_change_delay.value = 0;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ }
+
+ /* "cs_change == 1" on the last transfer results in an active
+ * chip select after the complete SPI message. This causes the
+ * controller to interpret the next register access as
+ * data. Set "cs_change" of the last transfer to "0" to
+ * properly deactivate the chip select at the end of the
+ * message.
+ */
+ xfer->cs_change = 0;
+
+ /* TX */
+ tx_ring = priv->tx;
+ tx_ring->head = 0;
+ tx_ring->tail = 0;
+ tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num);
+
+ /* FIFO request to send */
+ addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO);
+ val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
+ addr, val, val);
+
+ mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
+ mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
+
+ /* RX */
+ mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
+ rx_ring->head = 0;
+ rx_ring->tail = 0;
+ rx_ring->nr = i;
+ rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i);
+
+ if (!prev_rx_ring)
+ rx_ring->base =
+ mcp251xfd_get_tx_obj_addr(tx_ring,
+ tx_ring->obj_num);
+ else
+ rx_ring->base = prev_rx_ring->base +
+ prev_rx_ring->obj_size *
+ prev_rx_ring->obj_num;
+
+ prev_rx_ring = rx_ring;
+
+ /* FIFO increment RX tail pointer */
+ addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
+ val = MCP251XFD_REG_FIFOCON_UINC;
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
+ addr, val, val);
+
+ for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) {
+ xfer = &rx_ring->uinc_xfer[j];
+ xfer->tx_buf = &rx_ring->uinc_buf;
+ xfer->len = len;
+ xfer->cs_change = 1;
+ xfer->cs_change_delay.value = 0;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ }
+
+ /* "cs_change == 1" on the last transfer results in an
+ * active chip select after the complete SPI
+ * message. This causes the controller to interpret
+ * the next register access as data. Set "cs_change"
+ * of the last transfer to "0" to properly deactivate
+ * the chip select at the end of the message.
+ */
+ xfer->cs_change = 0;
+ }
+}
+
+void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) {
+ kfree(priv->rx[i]);
+ priv->rx[i] = NULL;
+ }
+}
+
+int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_tx_ring *tx_ring;
+ struct mcp251xfd_rx_ring *rx_ring;
+ int tef_obj_size, tx_obj_size, rx_obj_size;
+ int tx_obj_num;
+ int ram_free, i;
+
+ tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj);
+ if (mcp251xfd_is_fd_mode(priv)) {
+ tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD;
+ tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
+ rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
+ } else {
+ tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN;
+ tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
+ rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
+ }
+
+ tx_ring = priv->tx;
+ tx_ring->obj_num = tx_obj_num;
+ tx_ring->obj_size = tx_obj_size;
+
+ ram_free = MCP251XFD_RAM_SIZE - tx_obj_num *
+ (tef_obj_size + tx_obj_size);
+
+ for (i = 0;
+ i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size;
+ i++) {
+ int rx_obj_num;
+
+ rx_obj_num = ram_free / rx_obj_size;
+ rx_obj_num = min(1 << (fls(rx_obj_num) - 1),
+ MCP251XFD_RX_OBJ_NUM_MAX);
+
+ rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
+ GFP_KERNEL);
+ if (!rx_ring) {
+ mcp251xfd_ring_free(priv);
+ return -ENOMEM;
+ }
+ rx_ring->obj_num = rx_obj_num;
+ rx_ring->obj_size = rx_obj_size;
+ priv->rx[i] = rx_ring;
+
+ ram_free -= rx_ring->obj_num * rx_ring->obj_size;
+ }
+ priv->rx_ring_num = i;
+
+ netdev_dbg(priv->ndev,
+ "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n",
+ tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num,
+ tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num);
+
+ mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n",
+ i, rx_ring->obj_num, rx_ring->obj_size,
+ rx_ring->obj_size * rx_ring->obj_num);
+ }
+
+ netdev_dbg(priv->ndev,
+ "FIFO setup: free: %d bytes\n",
+ ram_free);
+
+ return 0;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
new file mode 100644
index 000000000000..63f2526464b3
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
+//
+// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+//
+
+#include <linux/bitfield.h>
+
+#include "mcp251xfd.h"
+
+static inline int
+mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring,
+ u8 *rx_head)
+{
+ u32 fifo_sta;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
+ &fifo_sta);
+ if (err)
+ return err;
+
+ *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+
+ return 0;
+}
+
+static inline int
+mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring,
+ u8 *rx_tail)
+{
+ u32 fifo_ua;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr),
+ &fifo_ua);
+ if (err)
+ return err;
+
+ fifo_ua -= ring->base - MCP251XFD_RAM_START;
+ *rx_tail = fifo_ua / ring->obj_size;
+
+ return 0;
+}
+
+static int
+mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring)
+{
+ u8 rx_tail_chip, rx_tail;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
+ return 0;
+
+ err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip);
+ if (err)
+ return err;
+
+ rx_tail = mcp251xfd_get_rx_tail(ring);
+ if (rx_tail_chip != rx_tail) {
+ netdev_err(priv->ndev,
+ "RX tail of chip (%d) and ours (%d) inconsistent.\n",
+ rx_tail_chip, rx_tail);
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+static int
+mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_rx_ring *ring)
+{
+ u32 new_head;
+ u8 chip_rx_head;
+ int err;
+
+ err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head);
+ if (err)
+ return err;
+
+ /* chip_rx_head, is the next RX-Object filled by the HW.
+ * The new RX head must be >= the old head.
+ */
+ new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
+ if (new_head <= ring->head)
+ new_head += ring->obj_num;
+
+ ring->head = new_head;
+
+ return mcp251xfd_check_rx_tail(priv, ring);
+}
+
+static void
+mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
+ struct sk_buff *skb)
+{
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ u8 dlc;
+
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) {
+ u32 sid, eid;
+
+ eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id);
+ sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id);
+
+ cfd->can_id = CAN_EFF_FLAG |
+ FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) |
+ FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid);
+ } else {
+ cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK,
+ hw_rx_obj->id);
+ }
+
+ dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC_MASK, hw_rx_obj->flags);
+
+ /* CANFD */
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) {
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI)
+ cfd->flags |= CANFD_ESI;
+
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS)
+ cfd->flags |= CANFD_BRS;
+
+ cfd->len = can_fd_dlc2len(dlc);
+ } else {
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)
+ cfd->can_id |= CAN_RTR_FLAG;
+
+ can_frame_set_cc_len((struct can_frame *)cfd, dlc,
+ priv->can.ctrlmode);
+ }
+
+ if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR))
+ memcpy(cfd->data, hw_rx_obj->data, cfd->len);
+
+ mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts);
+}
+
+static int
+mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
+ struct mcp251xfd_rx_ring *ring,
+ const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct sk_buff *skb;
+ struct canfd_frame *cfd;
+ int err;
+
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
+ skb = alloc_canfd_skb(priv->ndev, &cfd);
+ else
+ skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
+
+ if (!skb) {
+ stats->rx_dropped++;
+ return 0;
+ }
+
+ mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
+ err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static inline int
+mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring,
+ struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
+ const u8 offset, const u8 len)
+{
+ const int val_bytes = regmap_get_val_bytes(priv->map_rx);
+ int err;
+
+ err = regmap_bulk_read(priv->map_rx,
+ mcp251xfd_get_rx_obj_addr(ring, offset),
+ hw_rx_obj,
+ len * ring->obj_size / val_bytes);
+
+ return err;
+}
+
+static int
+mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
+ struct mcp251xfd_rx_ring *ring)
+{
+ struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
+ u8 rx_tail, len;
+ int err, i;
+
+ err = mcp251xfd_rx_ring_update(priv, ring);
+ if (err)
+ return err;
+
+ while ((len = mcp251xfd_get_rx_linear_len(ring))) {
+ int offset;
+
+ rx_tail = mcp251xfd_get_rx_tail(ring);
+
+ err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
+ rx_tail, len);
+ if (err)
+ return err;
+
+ for (i = 0; i < len; i++) {
+ err = mcp251xfd_handle_rxif_one(priv, ring,
+ (void *)hw_rx_obj +
+ i * ring->obj_size);
+ if (err)
+ return err;
+ }
+
+ /* Increment the RX FIFO tail pointer 'len' times in a
+ * single SPI message.
+ *
+ * Note:
+ * Calculate offset, so that the SPI transfer ends on
+ * the last message of the uinc_xfer array, which has
+ * "cs_change == 0", to properly deactivate the chip
+ * select.
+ */
+ offset = ARRAY_SIZE(ring->uinc_xfer) - len;
+ err = spi_sync_transfer(priv->spi,
+ ring->uinc_xfer + offset, len);
+ if (err)
+ return err;
+
+ ring->tail += len;
+ }
+
+ return 0;
+}
+
+int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_rx_ring *ring;
+ int err, n;
+
+ mcp251xfd_for_each_rx_ring(priv, ring, n) {
+ err = mcp251xfd_handle_rxif_ring(priv, ring);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
new file mode 100644
index 000000000000..406166005b99
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
+//
+// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+//
+
+#include <linux/bitfield.h>
+
+#include "mcp251xfd.h"
+
+static inline int
+mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
+ u8 *tef_tail)
+{
+ u32 tef_ua;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua);
+ if (err)
+ return err;
+
+ *tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj);
+
+ return 0;
+}
+
+static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
+{
+ u8 tef_tail_chip, tef_tail;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
+ return 0;
+
+ err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip);
+ if (err)
+ return err;
+
+ tef_tail = mcp251xfd_get_tef_tail(priv);
+ if (tef_tail_chip != tef_tail) {
+ netdev_err(priv->ndev,
+ "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n",
+ tef_tail_chip, tef_tail);
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+static int
+mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
+{
+ const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ u32 tef_sta;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
+ if (err)
+ return err;
+
+ if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
+ netdev_err(priv->ndev,
+ "Transmit Event FIFO buffer overflow.\n");
+ return -ENOBUFS;
+ }
+
+ netdev_info(priv->ndev,
+ "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n",
+ tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
+ "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
+ "not empty" : "empty",
+ seq, priv->tef->tail, priv->tef->head, tx_ring->head);
+
+ /* The Sequence Number in the TEF doesn't match our tef_tail. */
+ return -EAGAIN;
+}
+
+static int
+mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_hw_tef_obj *hw_tef_obj,
+ unsigned int *frame_len_ptr)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct sk_buff *skb;
+ u32 seq, seq_masked, tef_tail_masked, tef_tail;
+
+ seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
+ hw_tef_obj->flags);
+
+ /* Use the MCP2517FD mask on the MCP2518FD, too. We only
+ * compare 7 bits, this should be enough to detect
+ * net-yet-completed, i.e. old TEF objects.
+ */
+ seq_masked = seq &
+ field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
+ tef_tail_masked = priv->tef->tail &
+ field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
+ if (seq_masked != tef_tail_masked)
+ return mcp251xfd_handle_tefif_recover(priv, seq);
+
+ tef_tail = mcp251xfd_get_tef_tail(priv);
+ skb = priv->can.echo_skb[tef_tail];
+ if (skb)
+ mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts);
+ stats->tx_bytes +=
+ can_rx_offload_get_echo_skb(&priv->offload,
+ tef_tail, hw_tef_obj->ts,
+ frame_len_ptr);
+ stats->tx_packets++;
+ priv->tef->tail++;
+
+ return 0;
+}
+
+static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
+{
+ const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ unsigned int new_head;
+ u8 chip_tx_tail;
+ int err;
+
+ err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
+ if (err)
+ return err;
+
+ /* chip_tx_tail, is the next TX-Object send by the HW.
+ * The new TEF head must be >= the old head, ...
+ */
+ new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail;
+ if (new_head <= priv->tef->head)
+ new_head += tx_ring->obj_num;
+
+ /* ... but it cannot exceed the TX head. */
+ priv->tef->head = min(new_head, tx_ring->head);
+
+ return mcp251xfd_check_tef_tail(priv);
+}
+
+static inline int
+mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_hw_tef_obj *hw_tef_obj,
+ const u8 offset, const u8 len)
+{
+ const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ const int val_bytes = regmap_get_val_bytes(priv->map_rx);
+
+ if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
+ (offset > tx_ring->obj_num ||
+ len > tx_ring->obj_num ||
+ offset + len > tx_ring->obj_num)) {
+ netdev_err(priv->ndev,
+ "Trying to read too many TEF objects (max=%d, offset=%d, len=%d).\n",
+ tx_ring->obj_num, offset, len);
+ return -ERANGE;
+ }
+
+ return regmap_bulk_read(priv->map_rx,
+ mcp251xfd_get_tef_obj_addr(offset),
+ hw_tef_obj,
+ sizeof(*hw_tef_obj) / val_bytes * len);
+}
+
+static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_ecc *ecc = &priv->ecc;
+
+ ecc->ecc_stat = 0;
+}
+
+int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX];
+ unsigned int total_frame_len = 0;
+ u8 tef_tail, len, l;
+ int err, i;
+
+ err = mcp251xfd_tef_ring_update(priv);
+ if (err)
+ return err;
+
+ tef_tail = mcp251xfd_get_tef_tail(priv);
+ len = mcp251xfd_get_tef_len(priv);
+ l = mcp251xfd_get_tef_linear_len(priv);
+ err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
+ if (err)
+ return err;
+
+ if (l < len) {
+ err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < len; i++) {
+ unsigned int frame_len = 0;
+
+ err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len);
+ /* -EAGAIN means the Sequence Number in the TEF
+ * doesn't match our tef_tail. This can happen if we
+ * read the TEF objects too early. Leave loop let the
+ * interrupt handler call us again.
+ */
+ if (err == -EAGAIN)
+ goto out_netif_wake_queue;
+ if (err)
+ return err;
+
+ total_frame_len += frame_len;
+ }
+
+ out_netif_wake_queue:
+ len = i; /* number of handled goods TEFs */
+ if (len) {
+ struct mcp251xfd_tef_ring *ring = priv->tef;
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ int offset;
+
+ /* Increment the TEF FIFO tail pointer 'len' times in
+ * a single SPI message.
+ *
+ * Note:
+ * Calculate offset, so that the SPI transfer ends on
+ * the last message of the uinc_xfer array, which has
+ * "cs_change == 0", to properly deactivate the chip
+ * select.
+ */
+ offset = ARRAY_SIZE(ring->uinc_xfer) - len;
+ err = spi_sync_transfer(priv->spi,
+ ring->uinc_xfer + offset, len);
+ if (err)
+ return err;
+
+ tx_ring->tail += len;
+ netdev_completed_queue(priv->ndev, len, total_frame_len);
+
+ err = mcp251xfd_check_tef_tail(priv);
+ if (err)
+ return err;
+ }
+
+ mcp251xfd_ecc_tefif_successful(priv);
+
+ if (mcp251xfd_get_tx_free(priv->tx)) {
+ /* Make sure that anybody stopping the queue after
+ * this sees the new tx_ring->tail.
+ */
+ smp_mb();
+ netif_wake_queue(priv->ndev);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
new file mode 100644
index 000000000000..ffb6c36b7d9b
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
+//
+// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+//
+
+#include <asm/unaligned.h>
+#include <linux/bitfield.h>
+
+#include "mcp251xfd.h"
+
+static inline struct
+mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring)
+{
+ u8 tx_head;
+
+ tx_head = mcp251xfd_get_tx_head(tx_ring);
+
+ return &tx_ring->obj[tx_head];
+}
+
+static void
+mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_tx_obj *tx_obj,
+ const struct sk_buff *skb,
+ unsigned int seq)
+{
+ const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj;
+ union mcp251xfd_tx_obj_load_buf *load_buf;
+ u8 dlc;
+ u32 id, flags;
+ int len_sanitized = 0, len;
+
+ if (cfd->can_id & CAN_EFF_FLAG) {
+ u32 sid, eid;
+
+ sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id);
+ eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id);
+
+ id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) |
+ FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid);
+
+ flags = MCP251XFD_OBJ_FLAGS_IDE;
+ } else {
+ id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id);
+ flags = 0;
+ }
+
+ /* Use the MCP2518FD mask even on the MCP2517FD. It doesn't
+ * harm, only the lower 7 bits will be transferred into the
+ * TEF object.
+ */
+ flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq);
+
+ if (cfd->can_id & CAN_RTR_FLAG)
+ flags |= MCP251XFD_OBJ_FLAGS_RTR;
+ else
+ len_sanitized = canfd_sanitize_len(cfd->len);
+
+ /* CANFD */
+ if (can_is_canfd_skb(skb)) {
+ if (cfd->flags & CANFD_ESI)
+ flags |= MCP251XFD_OBJ_FLAGS_ESI;
+
+ flags |= MCP251XFD_OBJ_FLAGS_FDF;
+
+ if (cfd->flags & CANFD_BRS)
+ flags |= MCP251XFD_OBJ_FLAGS_BRS;
+
+ dlc = can_fd_len2dlc(cfd->len);
+ } else {
+ dlc = can_get_cc_dlc((struct can_frame *)cfd,
+ priv->can.ctrlmode);
+ }
+
+ flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, dlc);
+
+ load_buf = &tx_obj->buf;
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
+ hw_tx_obj = &load_buf->crc.hw_tx_obj;
+ else
+ hw_tx_obj = &load_buf->nocrc.hw_tx_obj;
+
+ put_unaligned_le32(id, &hw_tx_obj->id);
+ put_unaligned_le32(flags, &hw_tx_obj->flags);
+
+ /* Copy data */
+ memcpy(hw_tx_obj->data, cfd->data, cfd->len);
+
+ /* Clear unused data at end of CAN frame */
+ if (MCP251XFD_SANITIZE_CAN && len_sanitized) {
+ int pad_len;
+
+ pad_len = len_sanitized - cfd->len;
+ if (pad_len)
+ memset(hw_tx_obj->data + cfd->len, 0x0, pad_len);
+ }
+
+ /* Number of bytes to be written into the RAM of the controller */
+ len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
+ if (MCP251XFD_SANITIZE_CAN)
+ len += round_up(len_sanitized, sizeof(u32));
+ else
+ len += round_up(cfd->len, sizeof(u32));
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) {
+ u16 crc;
+
+ mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd,
+ len);
+ /* CRC */
+ len += sizeof(load_buf->crc.cmd);
+ crc = mcp251xfd_crc16_compute(&load_buf->crc, len);
+ put_unaligned_be16(crc, (void *)load_buf + len);
+
+ /* Total length */
+ len += sizeof(load_buf->crc.crc);
+ } else {
+ len += sizeof(load_buf->nocrc.cmd);
+ }
+
+ tx_obj->xfer[0].len = len;
+}
+
+static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_tx_obj *tx_obj)
+{
+ return spi_async(priv->spi, &tx_obj->msg);
+}
+
+static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_tx_ring *tx_ring)
+{
+ if (mcp251xfd_get_tx_free(tx_ring) > 0)
+ return false;
+
+ netif_stop_queue(priv->ndev);
+
+ /* Memory barrier before checking tx_free (head and tail) */
+ smp_mb();
+
+ if (mcp251xfd_get_tx_free(tx_ring) == 0) {
+ netdev_dbg(priv->ndev,
+ "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
+ tx_ring->head, tx_ring->tail,
+ tx_ring->head - tx_ring->tail);
+
+ return true;
+ }
+
+ netif_start_queue(priv->ndev);
+
+ return false;
+}
+
+netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ struct mcp251xfd_tx_obj *tx_obj;
+ unsigned int frame_len;
+ u8 tx_head;
+ int err;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (mcp251xfd_tx_busy(priv, tx_ring))
+ return NETDEV_TX_BUSY;
+
+ tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
+ mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head);
+
+ /* Stop queue if we occupy the complete TX FIFO */
+ tx_head = mcp251xfd_get_tx_head(tx_ring);
+ tx_ring->head++;
+ if (mcp251xfd_get_tx_free(tx_ring) == 0)
+ netif_stop_queue(ndev);
+
+ frame_len = can_skb_get_frame_len(skb);
+ err = can_put_echo_skb(skb, ndev, tx_head, frame_len);
+ if (!err)
+ netdev_sent_queue(priv->ndev, frame_len);
+
+ err = mcp251xfd_tx_obj_write(priv, tx_obj);
+ if (err)
+ goto out_err;
+
+ return NETDEV_TX_OK;
+
+ out_err:
+ netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
+
+ return NETDEV_TX_OK;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index 0f322dabaf65..f551c900803e 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -10,6 +10,7 @@
#ifndef _MCP251XFD_H
#define _MCP251XFD_H
+#include <linux/bitfield.h>
#include <linux/can/core.h>
#include <linux/can/dev.h>
#include <linux/can/rx-offload.h>
@@ -625,6 +626,12 @@ MCP251XFD_IS(2517);
MCP251XFD_IS(2518);
MCP251XFD_IS(251X);
+static inline bool mcp251xfd_is_fd_mode(const struct mcp251xfd_priv *priv)
+{
+ /* listen-only mode works like FD mode */
+ return priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD);
+}
+
static inline u8 mcp251xfd_first_byte_set(u32 mask)
{
return (mask & 0x0000ffff) ?
@@ -761,6 +768,24 @@ mcp251xfd_get_rx_obj_addr(const struct mcp251xfd_rx_ring *ring, u8 n)
return ring->base + ring->obj_size * n;
}
+static inline int
+mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
+ u8 *tx_tail)
+{
+ u32 fifo_sta;
+ int err;
+
+ err = regmap_read(priv->map_reg,
+ MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO),
+ &fifo_sta);
+ if (err)
+ return err;
+
+ *tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+
+ return 0;
+}
+
static inline u8 mcp251xfd_get_tef_head(const struct mcp251xfd_priv *priv)
{
return priv->tef->head & (priv->tx->obj_num - 1);
@@ -849,15 +874,24 @@ mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring)
(n) < (priv)->rx_ring_num; \
(n)++, (ring) = *((priv)->rx + (n)))
-int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv);
+int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv);
u16 mcp251xfd_crc16_compute2(const void *cmd, size_t cmd_size,
const void *data, size_t data_size);
u16 mcp251xfd_crc16_compute(const void *data, size_t data_size);
+int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv);
+void mcp251xfd_ring_init(struct mcp251xfd_priv *priv);
+void mcp251xfd_ring_free(struct mcp251xfd_priv *priv);
+int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv);
+int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv);
+int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv);
void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
struct sk_buff *skb, u32 timestamp);
void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv);
void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv);
+netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev);
+
#if IS_ENABLED(CONFIG_DEV_COREDUMP)
void mcp251xfd_dump(const struct mcp251xfd_priv *priv);
#else
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 54aa7c25c4de..25d6d81ab4f4 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -61,6 +61,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#define DRV_NAME "sun4i_can"
@@ -200,10 +201,20 @@
#define SUN4I_CAN_MAX_IRQ 20
#define SUN4I_MODE_MAX_RETRIES 100
+/**
+ * struct sun4ican_quirks - Differences between SoC variants.
+ *
+ * @has_reset: SoC needs reset deasserted.
+ */
+struct sun4ican_quirks {
+ bool has_reset;
+};
+
struct sun4ican_priv {
struct can_priv can;
void __iomem *base;
struct clk *clk;
+ struct reset_control *reset;
spinlock_t cmdreg_lock; /* lock for concurrent cmd register writes */
};
@@ -490,18 +501,20 @@ static void sun4i_can_rx(struct net_device *dev)
}
/* remote frame ? */
- if (fi & SUN4I_MSG_RTR_FLAG)
+ if (fi & SUN4I_MSG_RTR_FLAG) {
id |= CAN_RTR_FLAG;
- else
+ } else {
for (i = 0; i < cf->len; i++)
cf->data[i] = readl(priv->base + dreg + i * 4);
+ stats->rx_bytes += cf->len;
+ }
+ stats->rx_packets++;
+
cf->can_id = id;
sun4i_can_write_cmdreg(priv, SUN4I_CMD_RELEASE_RBUF);
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
can_led_event(dev, CAN_LED_EVENT_RX);
@@ -622,13 +635,10 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
can_bus_off(dev);
}
- if (likely(skb)) {
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (likely(skb))
netif_rx(skb);
- } else {
+ else
return -ENOMEM;
- }
return 0;
}
@@ -651,11 +661,8 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
if (isrc & SUN4I_INT_TBUF_VLD) {
/* transmission complete interrupt */
- stats->tx_bytes +=
- readl(priv->base +
- SUN4I_REG_RBUF_RBACK_START_ADDR) & 0xf;
+ stats->tx_bytes += can_get_echo_skb(dev, 0, NULL);
stats->tx_packets++;
- can_get_echo_skb(dev, 0, NULL);
netif_wake_queue(dev);
can_led_event(dev, CAN_LED_EVENT_TX);
}
@@ -702,6 +709,13 @@ static int sun4ican_open(struct net_device *dev)
goto exit_irq;
}
+ /* software reset deassert */
+ err = reset_control_deassert(priv->reset);
+ if (err) {
+ netdev_err(dev, "could not deassert CAN reset\n");
+ goto exit_soft_reset;
+ }
+
/* turn on clocking for CAN peripheral block */
err = clk_prepare_enable(priv->clk);
if (err) {
@@ -723,6 +737,8 @@ static int sun4ican_open(struct net_device *dev)
exit_can_start:
clk_disable_unprepare(priv->clk);
exit_clock:
+ reset_control_assert(priv->reset);
+exit_soft_reset:
free_irq(dev->irq, dev);
exit_irq:
close_candev(dev);
@@ -736,6 +752,7 @@ static int sun4ican_close(struct net_device *dev)
netif_stop_queue(dev);
sun4i_can_stop(dev);
clk_disable_unprepare(priv->clk);
+ reset_control_assert(priv->reset);
free_irq(dev->irq, dev);
close_candev(dev);
@@ -750,9 +767,27 @@ static const struct net_device_ops sun4ican_netdev_ops = {
.ndo_start_xmit = sun4ican_start_xmit,
};
+static const struct sun4ican_quirks sun4ican_quirks_a10 = {
+ .has_reset = false,
+};
+
+static const struct sun4ican_quirks sun4ican_quirks_r40 = {
+ .has_reset = true,
+};
+
static const struct of_device_id sun4ican_of_match[] = {
- {.compatible = "allwinner,sun4i-a10-can"},
- {},
+ {
+ .compatible = "allwinner,sun4i-a10-can",
+ .data = &sun4ican_quirks_a10
+ }, {
+ .compatible = "allwinner,sun7i-a20-can",
+ .data = &sun4ican_quirks_a10
+ }, {
+ .compatible = "allwinner,sun8i-r40-can",
+ .data = &sun4ican_quirks_r40
+ }, {
+ /* sentinel */
+ },
};
MODULE_DEVICE_TABLE(of, sun4ican_of_match);
@@ -771,10 +806,28 @@ static int sun4ican_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct clk *clk;
+ struct reset_control *reset = NULL;
void __iomem *addr;
int err, irq;
struct net_device *dev;
struct sun4ican_priv *priv;
+ const struct sun4ican_quirks *quirks;
+
+ quirks = of_device_get_match_data(&pdev->dev);
+ if (!quirks) {
+ dev_err(&pdev->dev, "failed to determine the quirks to use\n");
+ err = -ENODEV;
+ goto exit;
+ }
+
+ if (quirks->has_reset) {
+ reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(reset)) {
+ dev_err(&pdev->dev, "unable to request reset\n");
+ err = PTR_ERR(reset);
+ goto exit;
+ }
+ }
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
@@ -818,6 +871,7 @@ static int sun4ican_probe(struct platform_device *pdev)
CAN_CTRLMODE_3_SAMPLES;
priv->base = addr;
priv->clk = clk;
+ priv->reset = reset;
spin_lock_init(&priv->cmdreg_lock);
platform_set_drvdata(pdev, dev);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 353062ead98f..ff31b993ab17 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -859,7 +859,6 @@ static int ti_hecc_probe(struct platform_device *pdev)
struct net_device *ndev = (struct net_device *)0;
struct ti_hecc_priv *priv;
struct device_node *np = pdev->dev.of_node;
- struct resource *irq;
struct regulator *reg_xceiver;
int err = -ENODEV;
@@ -904,9 +903,9 @@ static int ti_hecc_probe(struct platform_device *pdev)
goto probe_exit_candev;
}
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq) {
- dev_err(&pdev->dev, "No irq resource\n");
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq < 0) {
+ err = ndev->irq;
goto probe_exit_candev;
}
@@ -920,7 +919,6 @@ static int ti_hecc_probe(struct platform_device *pdev)
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
spin_lock_init(&priv->mbx_lock);
- ndev->irq = irq->start;
ndev->flags |= IFF_ECHO;
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 2b5302e72435..7bedceffdfa3 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -230,7 +230,6 @@ struct ems_tx_urb_context {
struct ems_usb *dev;
u32 echo_index;
- u8 dlc;
};
struct ems_usb {
@@ -320,10 +319,11 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
} else {
for (i = 0; i < cf->len; i++)
cf->data[i] = msg->msg.can_msg.msg[i];
- }
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+
netif_rx(skb);
}
@@ -397,8 +397,6 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
stats->rx_errors++;
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -518,9 +516,8 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
/* transmission complete interrupt */
netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += context->dlc;
-
- can_get_echo_skb(netdev, context->echo_index, NULL);
+ netdev->stats.tx_bytes += can_get_echo_skb(netdev, context->echo_index,
+ NULL);
/* Release context */
context->echo_index = MAX_TX_URBS;
@@ -806,7 +803,6 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
context->dev = dev;
context->echo_index = i;
- context->dlc = cf->len;
usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf,
size, ems_usb_write_bulk_callback, context);
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index c6068a251fbe..286daaaea0b8 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -183,7 +183,6 @@ struct esd_usb2_net_priv;
struct esd_tx_urb_context {
struct esd_usb2_net_priv *priv;
u32 echo_index;
- int len; /* CAN payload length */
};
struct esd_usb2 {
@@ -293,8 +292,6 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
priv->bec.txerr = txerr;
priv->bec.rxerr = rxerr;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
}
}
@@ -334,10 +331,11 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
} else {
for (i = 0; i < cf->len; i++)
cf->data[i] = msg->msg.rx.data[i];
- }
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+
netif_rx(skb);
}
@@ -358,8 +356,8 @@ static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv,
if (!msg->msg.txdone.status) {
stats->tx_packets++;
- stats->tx_bytes += context->len;
- can_get_echo_skb(netdev, context->echo_index, NULL);
+ stats->tx_bytes += can_get_echo_skb(netdev, context->echo_index,
+ NULL);
} else {
stats->tx_errors++;
can_free_echo_skb(netdev, context->echo_index, NULL);
@@ -784,7 +782,6 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
context->priv = priv;
context->echo_index = i;
- context->len = cf->len;
/* hnd must not be 0 - MSB is stripped in txdone handling */
msg->msg.tx.hnd = 0x80000000 | i; /* returned in TX done message */
diff --git a/drivers/net/can/usb/etas_es58x/es581_4.c b/drivers/net/can/usb/etas_es58x/es581_4.c
index 14e360c9f2c9..1bcdcece5ec7 100644
--- a/drivers/net/can/usb/etas_es58x/es581_4.c
+++ b/drivers/net/can/usb/etas_es58x/es581_4.c
@@ -10,6 +10,7 @@
*/
#include <linux/kernel.h>
+#include <linux/units.h>
#include <asm/unaligned.h>
#include "es58x_core.h"
@@ -469,8 +470,8 @@ const struct es58x_parameters es581_4_param = {
.bittiming_const = &es581_4_bittiming_const,
.data_bittiming_const = NULL,
.tdc_const = NULL,
- .bitrate_max = 1 * CAN_MBPS,
- .clock = {.freq = 50 * CAN_MHZ},
+ .bitrate_max = 1 * MEGA /* BPS */,
+ .clock = {.freq = 50 * MEGA /* Hz */},
.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC,
.tx_start_of_frame = 0xAFAF,
.rx_start_of_frame = 0xFAFA,
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index 24627ab14626..2ed2370a3166 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -849,13 +849,6 @@ int es58x_rx_err_msg(struct net_device *netdev, enum es58x_err error,
break;
}
- /* driver/net/can/dev.c:can_restart() takes in account error
- * messages in the RX stats. Doing the same here for
- * consistency.
- */
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += CAN_ERR_DLC;
-
if (cf) {
if (cf->data[1])
cf->can_id |= CAN_ERR_CRTL;
@@ -2094,6 +2087,7 @@ static int es58x_init_netdev(struct es58x_device *es58x_dev, int channel_idx)
netdev->netdev_ops = &es58x_netdev_ops;
netdev->flags |= IFF_ECHO; /* We support local echo */
+ netdev->dev_port = channel_idx;
ret = register_candev(netdev);
if (ret)
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c
index 4f0cae29f4d8..ec87126e1a7d 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_fd.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c
@@ -12,6 +12,7 @@
*/
#include <linux/kernel.h>
+#include <linux/units.h>
#include <asm/unaligned.h>
#include "es58x_core.h"
@@ -522,8 +523,8 @@ const struct es58x_parameters es58x_fd_param = {
* Mbps work in an optimal environment but are not recommended
* for production environment.
*/
- .bitrate_max = 8 * CAN_MBPS,
- .clock = {.freq = 80 * CAN_MHZ},
+ .bitrate_max = 8 * MEGA /* BPS */,
+ .clock = {.freq = 80 * MEGA /* Hz */},
.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO,
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 1b400de00f51..b487e3fe770a 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -321,7 +321,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
/* device reports out of range channel id */
if (hf->channel >= GS_MAX_INTF)
- goto resubmit_urb;
+ goto device_detach;
dev = usbcan->canch[hf->channel];
@@ -357,9 +357,6 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
goto resubmit_urb;
}
- netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += hf->can_dlc;
-
txc = gs_get_tx_context(dev, hf->echo_id);
/* bad devices send bad echo_ids. */
@@ -370,7 +367,9 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
goto resubmit_urb;
}
- can_get_echo_skb(netdev, hf->echo_id, NULL);
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += can_get_echo_skb(netdev, hf->echo_id,
+ NULL);
gs_free_tx_context(txc);
@@ -406,6 +405,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
/* USB failure take down all interfaces */
if (rc == -ENODEV) {
+ device_detach:
for (rc = 0; rc < GS_MAX_INTF; rc++) {
if (usbcan->canch[rc])
netif_device_detach(usbcan->canch[rc]->netdev);
@@ -507,6 +507,8 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
hf->echo_id = idx;
hf->channel = dev->channel;
+ hf->flags = 0;
+ hf->reserved = 0;
cf = (struct can_frame *)skb->data;
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
index 390b6bde883c..3a49257f9fa6 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -77,7 +77,6 @@ struct kvaser_usb_dev_card_data {
struct kvaser_usb_tx_urb_context {
struct kvaser_usb_net_priv *priv;
u32 echo_index;
- int dlc;
};
struct kvaser_usb {
@@ -162,8 +161,8 @@ struct kvaser_usb_dev_ops {
void (*dev_read_bulk_callback)(struct kvaser_usb *dev, void *buf,
int len);
void *(*dev_frame_to_cmd)(const struct kvaser_usb_net_priv *priv,
- const struct sk_buff *skb, int *frame_len,
- int *cmd_len, u16 transid);
+ const struct sk_buff *skb, int *cmd_len,
+ u16 transid);
};
struct kvaser_usb_dev_cfg {
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 0cc0fc866a2a..c4b4d3d0a387 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -279,8 +279,6 @@ int kvaser_usb_can_rx_over_error(struct net_device *netdev)
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
return 0;
@@ -567,7 +565,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
goto freeurb;
}
- buf = dev->ops->dev_frame_to_cmd(priv, skb, &context->dlc, &cmd_len,
+ buf = dev->ops->dev_frame_to_cmd(priv, skb, &cmd_len,
context->echo_index);
if (!buf) {
stats->tx_dropped++;
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index dcee8dc828ec..a26823c5b62a 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -22,6 +22,7 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/units.h>
#include <linux/usb.h>
#include <linux/can.h>
@@ -295,6 +296,7 @@ struct kvaser_cmd {
#define KVASER_USB_HYDRA_CF_FLAG_OVERRUN BIT(1)
#define KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME BIT(4)
#define KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID BIT(5)
+#define KVASER_USB_HYDRA_CF_FLAG_TX_ACK BIT(6)
/* CAN frame flags. Used in ext_rx_can and ext_tx_can */
#define KVASER_USB_HYDRA_CF_FLAG_OSM_NACK BIT(12)
#define KVASER_USB_HYDRA_CF_FLAG_ABL BIT(13)
@@ -869,7 +871,6 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
struct net_device *netdev = priv->netdev;
struct can_frame *cf;
struct sk_buff *skb;
- struct net_device_stats *stats;
enum can_state new_state, old_state;
old_state = priv->can.state;
@@ -919,9 +920,6 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
cf->data[6] = bec->txerr;
cf->data[7] = bec->rxerr;
- stats = &netdev->stats;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -1074,8 +1072,6 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
priv->bec.txerr = bec.txerr;
@@ -1109,8 +1105,6 @@ static void kvaser_usb_hydra_one_shot_fail(struct kvaser_usb_net_priv *priv,
}
stats->tx_errors++;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -1120,7 +1114,9 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
struct kvaser_usb_tx_urb_context *context;
struct kvaser_usb_net_priv *priv;
unsigned long irq_flags;
+ unsigned int len;
bool one_shot_fail = false;
+ bool is_err_frame = false;
u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd);
priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
@@ -1139,24 +1135,28 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
kvaser_usb_hydra_one_shot_fail(priv, cmd_ext);
one_shot_fail = true;
}
+
+ is_err_frame = flags & KVASER_USB_HYDRA_CF_FLAG_TX_ACK &&
+ flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME;
}
context = &priv->tx_contexts[transid % dev->max_tx_urbs];
- if (!one_shot_fail) {
- struct net_device_stats *stats = &priv->netdev->stats;
-
- stats->tx_packets++;
- stats->tx_bytes += can_fd_dlc2len(context->dlc);
- }
spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags);
- can_get_echo_skb(priv->netdev, context->echo_index, NULL);
+ len = can_get_echo_skb(priv->netdev, context->echo_index, NULL);
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
netif_wake_queue(priv->netdev);
spin_unlock_irqrestore(&priv->tx_contexts_lock, irq_flags);
+
+ if (!one_shot_fail && !is_err_frame) {
+ struct net_device_stats *stats = &priv->netdev->stats;
+
+ stats->tx_packets++;
+ stats->tx_bytes += len;
+ }
}
static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev,
@@ -1208,13 +1208,15 @@ static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev,
cf->len = can_cc_dlc2len(cmd->rx_can.dlc);
- if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME)
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) {
cf->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(cf->data, cmd->rx_can.data, cf->len);
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+
netif_rx(skb);
}
@@ -1286,13 +1288,15 @@ static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev,
cf->len = can_cc_dlc2len(dlc);
}
- if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME)
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) {
cf->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(cf->data, cmd->rx_can.kcan_payload, cf->len);
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+
netif_rx(skb);
}
@@ -1371,8 +1375,8 @@ static void kvaser_usb_hydra_handle_cmd(const struct kvaser_usb *dev,
static void *
kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
- const struct sk_buff *skb, int *frame_len,
- int *cmd_len, u16 transid)
+ const struct sk_buff *skb, int *cmd_len,
+ u16 transid)
{
struct kvaser_usb *dev = priv->dev;
struct kvaser_cmd_ext *cmd;
@@ -1384,8 +1388,6 @@ kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
u32 kcan_id;
u32 kcan_header;
- *frame_len = nbr_of_bytes;
-
cmd = kcalloc(1, sizeof(struct kvaser_cmd_ext), GFP_ATOMIC);
if (!cmd)
return NULL;
@@ -1451,8 +1453,8 @@ kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
static void *
kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
- const struct sk_buff *skb, int *frame_len,
- int *cmd_len, u16 transid)
+ const struct sk_buff *skb, int *cmd_len,
+ u16 transid)
{
struct kvaser_usb *dev = priv->dev;
struct kvaser_cmd *cmd;
@@ -1460,8 +1462,6 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
u32 flags;
u32 id;
- *frame_len = cf->len;
-
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
if (!cmd)
return NULL;
@@ -1495,7 +1495,7 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
cmd->tx_can.id = cpu_to_le32(id);
cmd->tx_can.flags = flags;
- memcpy(cmd->tx_can.data, cf->data, *frame_len);
+ memcpy(cmd->tx_can.data, cf->data, cf->len);
return cmd;
}
@@ -2003,17 +2003,17 @@ static void kvaser_usb_hydra_read_bulk_callback(struct kvaser_usb *dev,
static void *
kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
- const struct sk_buff *skb, int *frame_len,
- int *cmd_len, u16 transid)
+ const struct sk_buff *skb, int *cmd_len,
+ u16 transid)
{
void *buf;
if (priv->dev->card_data.capabilities & KVASER_USB_HYDRA_CAP_EXT_CMD)
- buf = kvaser_usb_hydra_frame_to_cmd_ext(priv, skb, frame_len,
- cmd_len, transid);
+ buf = kvaser_usb_hydra_frame_to_cmd_ext(priv, skb, cmd_len,
+ transid);
else
- buf = kvaser_usb_hydra_frame_to_cmd_std(priv, skb, frame_len,
- cmd_len, transid);
+ buf = kvaser_usb_hydra_frame_to_cmd_std(priv, skb, cmd_len,
+ transid);
return buf;
}
@@ -2040,7 +2040,7 @@ const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = {
static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = {
.clock = {
- .freq = 80000000,
+ .freq = 80 * MEGA /* Hz */,
},
.timestamp_freq = 80,
.bittiming_const = &kvaser_usb_hydra_kcan_bittiming_c,
@@ -2049,7 +2049,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = {
static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = {
.clock = {
- .freq = 24000000,
+ .freq = 24 * MEGA /* Hz */,
},
.timestamp_freq = 1,
.bittiming_const = &kvaser_usb_hydra_flexc_bittiming_c,
@@ -2057,7 +2057,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = {
static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt = {
.clock = {
- .freq = 80000000,
+ .freq = 80 * MEGA /* Hz */,
},
.timestamp_freq = 24,
.bittiming_const = &kvaser_usb_hydra_rt_bittiming_c,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index f7af1bf5ab46..c805b999c543 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -19,6 +19,7 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/units.h>
#include <linux/usb.h>
#include <linux/can.h>
@@ -356,7 +357,7 @@ static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = {
.clock = {
- .freq = 8000000,
+ .freq = 8 * MEGA /* Hz */,
},
.timestamp_freq = 1,
.bittiming_const = &kvaser_usb_leaf_bittiming_const,
@@ -364,7 +365,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = {
static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = {
.clock = {
- .freq = 16000000,
+ .freq = 16 * MEGA /* Hz */,
},
.timestamp_freq = 1,
.bittiming_const = &kvaser_usb_leaf_bittiming_const,
@@ -372,7 +373,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = {
static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = {
.clock = {
- .freq = 24000000,
+ .freq = 24 * MEGA /* Hz */,
},
.timestamp_freq = 1,
.bittiming_const = &kvaser_usb_leaf_bittiming_const,
@@ -380,7 +381,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = {
static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = {
.clock = {
- .freq = 32000000,
+ .freq = 32 * MEGA /* Hz */,
},
.timestamp_freq = 1,
.bittiming_const = &kvaser_usb_leaf_bittiming_const,
@@ -388,16 +389,14 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = {
static void *
kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
- const struct sk_buff *skb, int *frame_len,
- int *cmd_len, u16 transid)
+ const struct sk_buff *skb, int *cmd_len,
+ u16 transid)
{
struct kvaser_usb *dev = priv->dev;
struct kvaser_cmd *cmd;
u8 *cmd_tx_can_flags = NULL; /* GCC */
struct can_frame *cf = (struct can_frame *)skb->data;
- *frame_len = cf->len;
-
cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
if (cmd) {
cmd->u.tx_can.tid = transid & 0xff;
@@ -641,8 +640,6 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
if (skb) {
cf->can_id |= CAN_ERR_RESTARTED;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
} else {
netdev_err(priv->netdev,
@@ -655,12 +652,11 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
priv->can.state = CAN_STATE_ERROR_ACTIVE;
}
- stats->tx_packets++;
- stats->tx_bytes += context->dlc;
-
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
- can_get_echo_skb(priv->netdev, context->echo_index, NULL);
+ stats->tx_packets++;
+ stats->tx_bytes += can_get_echo_skb(priv->netdev,
+ context->echo_index, NULL);
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
netif_wake_queue(priv->netdev);
@@ -843,8 +839,6 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
cf->data[6] = es->txerr;
cf->data[7] = es->rxerr;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -1071,7 +1065,8 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
}
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += cf->len;
netif_rx(skb);
}
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index a1a154c08b7f..77bddff86252 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -64,7 +64,6 @@
struct mcba_usb_ctx {
struct mcba_priv *priv;
u32 ndx;
- u8 dlc;
bool can;
};
@@ -184,13 +183,10 @@ static inline struct mcba_usb_ctx *mcba_usb_get_free_ctx(struct mcba_priv *priv,
ctx = &priv->tx_context[i];
ctx->ndx = i;
- if (cf) {
+ if (cf)
ctx->can = true;
- ctx->dlc = cf->len;
- } else {
+ else
ctx->can = false;
- ctx->dlc = 0;
- }
atomic_dec(&priv->free_ctx_cnt);
break;
@@ -236,10 +232,10 @@ static void mcba_usb_write_bulk_callback(struct urb *urb)
return;
netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += ctx->dlc;
+ netdev->stats.tx_bytes += can_get_echo_skb(netdev, ctx->ndx,
+ NULL);
can_led_event(netdev, CAN_LED_EVENT_TX);
- can_get_echo_skb(netdev, ctx->ndx, NULL);
}
if (urb->status)
@@ -450,15 +446,16 @@ static void mcba_usb_process_can(struct mcba_priv *priv,
cf->can_id = (sid & 0xffe0) >> 5;
}
- if (msg->dlc & MCBA_DLC_RTR_MASK)
- cf->can_id |= CAN_RTR_FLAG;
-
cf->len = can_cc_dlc2len(msg->dlc & MCBA_DLC_MASK);
- memcpy(cf->data, msg->data, cf->len);
+ if (msg->dlc & MCBA_DLC_RTR_MASK) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ memcpy(cf->data, msg->data, cf->len);
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
can_led_event(priv->netdev, CAN_LED_EVENT_RX);
netif_rx(skb);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 876218752766..17dc178f555b 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -8,6 +8,7 @@
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
+#include <asm/unaligned.h>
#include <linux/netdevice.h>
#include <linux/usb.h>
#include <linux/module.h>
@@ -520,8 +521,6 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
&hwts->hwtstamp);
}
- mc->netdev->stats.rx_packets++;
- mc->netdev->stats.rx_bytes += cf->len;
netif_rx(skb);
return 0;
@@ -678,15 +677,16 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
/* Ignore next byte (client private id) if SRR bit is set */
if (can_id_flags & PCAN_USB_TX_SRR)
mc->ptr++;
+
+ /* update statistics */
+ mc->netdev->stats.rx_bytes += cf->len;
}
+ mc->netdev->stats.rx_packets++;
/* convert timestamp into kernel time */
hwts = skb_hwtstamps(skb);
peak_usb_get_ts_time(&mc->pdev->time_ref, mc->ts16, &hwts->hwtstamp);
- /* update statistics */
- mc->netdev->stats.rx_packets++;
- mc->netdev->stats.rx_bytes += cf->len;
/* push the skb */
netif_rx(skb);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 6107fef9f4a0..b850ff8fe4bd 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -291,6 +291,7 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
struct peak_tx_urb_context *context = urb->context;
struct peak_usb_device *dev;
struct net_device *netdev;
+ int tx_bytes;
BUG_ON(!context);
@@ -305,10 +306,6 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
/* check tx status */
switch (urb->status) {
case 0:
- /* transmission complete */
- netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += context->data_len;
-
/* prevent tx timeout */
netif_trans_update(netdev);
break;
@@ -327,12 +324,17 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
}
/* should always release echo skb and corresponding context */
- can_get_echo_skb(netdev, context->echo_index, NULL);
+ tx_bytes = can_get_echo_skb(netdev, context->echo_index, NULL);
context->echo_index = PCAN_USB_MAX_TX_URBS;
- /* do wakeup tx queue in case of success only */
- if (!urb->status)
+ if (!urb->status) {
+ /* transmission complete */
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += tx_bytes;
+
+ /* do wakeup tx queue in case of success only */
netif_wake_queue(netdev);
+ }
}
/*
@@ -344,7 +346,6 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
struct peak_usb_device *dev = netdev_priv(netdev);
struct peak_tx_urb_context *context = NULL;
struct net_device_stats *stats = &netdev->stats;
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct urb *urb;
u8 *obuf;
int i, err;
@@ -378,9 +379,6 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
context->echo_index = i;
- /* Note: this works with CANFD frames too */
- context->data_len = cfd->len;
-
usb_anchor_urb(urb, &dev->tx_submitted);
can_put_echo_skb(skb, netdev, context->echo_index, 0);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index daa19f57e742..f60af573a2e0 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -99,7 +99,6 @@ struct peak_time_ref {
struct peak_tx_urb_context {
struct peak_usb_device *dev;
u32 echo_index;
- u8 data_len;
struct urb *urb;
};
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 6bd12549f101..65487ec33566 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -507,13 +507,13 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
if (rx_msg_flags & PUCAN_MSG_EXT_ID)
cfd->can_id |= CAN_EFF_FLAG;
- if (rx_msg_flags & PUCAN_MSG_RTR)
+ if (rx_msg_flags & PUCAN_MSG_RTR) {
cfd->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(cfd->data, rm->d, cfd->len);
-
+ netdev->stats.rx_bytes += cfd->len;
+ }
netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += cfd->len;
peak_usb_netif_rx_64(skb, le32_to_cpu(rm->ts_low),
le32_to_cpu(rm->ts_high));
@@ -577,9 +577,6 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
if (!skb)
return -ENOMEM;
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += cf->len;
-
peak_usb_netif_rx_64(skb, le32_to_cpu(sm->ts_low),
le32_to_cpu(sm->ts_high));
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 858ab22708fc..ebe087f258e3 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -536,17 +536,19 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
if (rx->flags & PCAN_USBPRO_EXT)
can_frame->can_id |= CAN_EFF_FLAG;
- if (rx->flags & PCAN_USBPRO_RTR)
+ if (rx->flags & PCAN_USBPRO_RTR) {
can_frame->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(can_frame->data, rx->data, can_frame->len);
+ netdev->stats.rx_bytes += can_frame->len;
+ }
+ netdev->stats.rx_packets++;
+
hwts = skb_hwtstamps(skb);
peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(rx->ts32),
&hwts->hwtstamp);
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += can_frame->len;
netif_rx(skb);
return 0;
@@ -660,8 +662,6 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
hwts = skb_hwtstamps(skb);
peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(er->ts32), &hwts->hwtstamp);
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += can_frame->len;
netif_rx(skb);
return 0;
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 1679cbe45ded..c7c41d1fd038 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -259,7 +259,6 @@ struct ucan_priv;
/* Context Information for transmission URBs */
struct ucan_urb_context {
struct ucan_priv *up;
- u8 dlc;
bool allocated;
};
@@ -621,8 +620,11 @@ static void ucan_rx_can_msg(struct ucan_priv *up, struct ucan_message_in *m)
memcpy(cf->data, m->msg.can_msg.data, cf->len);
/* don't count error frames as real packets */
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (!(cf->can_id & CAN_ERR_FLAG)) {
+ stats->rx_packets++;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += cf->len;
+ }
/* pass it to Linux */
netif_rx(skb);
@@ -634,7 +636,7 @@ static void ucan_tx_complete_msg(struct ucan_priv *up,
{
unsigned long flags;
u16 count, i;
- u8 echo_index, dlc;
+ u8 echo_index;
u16 len = le16_to_cpu(m->len);
struct ucan_urb_context *context;
@@ -658,7 +660,6 @@ static void ucan_tx_complete_msg(struct ucan_priv *up,
/* gather information from the context */
context = &up->context_array[echo_index];
- dlc = READ_ONCE(context->dlc);
/* Release context and restart queue if necessary.
* Also check if the context was allocated
@@ -671,8 +672,8 @@ static void ucan_tx_complete_msg(struct ucan_priv *up,
UCAN_TX_COMPLETE_SUCCESS) {
/* update statistics */
up->netdev->stats.tx_packets++;
- up->netdev->stats.tx_bytes += dlc;
- can_get_echo_skb(up->netdev, echo_index, NULL);
+ up->netdev->stats.tx_bytes +=
+ can_get_echo_skb(up->netdev, echo_index, NULL);
} else {
up->netdev->stats.tx_dropped++;
can_free_echo_skb(up->netdev, echo_index, NULL);
@@ -1086,8 +1087,6 @@ static struct urb *ucan_prepare_tx_urb(struct ucan_priv *up,
}
m->len = cpu_to_le16(mlen);
- context->dlc = cf->len;
-
m->subtype = echo_index;
/* build the urb */
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index d1b83bd1b3cb..431af1ec1e3c 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -114,15 +114,12 @@ struct usb_8dev_tx_urb_context {
struct usb_8dev_priv *priv;
u32 echo_index;
- u8 dlc;
};
/* Structure to hold all of our device specific stuff */
struct usb_8dev_priv {
struct can_priv can; /* must be the first member */
- struct sk_buff *echo_skb[MAX_TX_URBS];
-
struct usb_device *udev;
struct net_device *netdev;
@@ -449,8 +446,6 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
priv->bec.txerr = txerr;
priv->bec.rxerr = rxerr;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -476,13 +471,14 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv,
if (msg->flags & USB_8DEV_EXTID)
cf->can_id |= CAN_EFF_FLAG;
- if (msg->flags & USB_8DEV_RTR)
+ if (msg->flags & USB_8DEV_RTR) {
cf->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(cf->data, msg->data, cf->len);
-
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+
netif_rx(skb);
can_led_event(priv->netdev, CAN_LED_EVENT_RX);
@@ -584,9 +580,7 @@ static void usb_8dev_write_bulk_callback(struct urb *urb)
urb->status);
netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += context->dlc;
-
- can_get_echo_skb(netdev, context->echo_index, NULL);
+ netdev->stats.tx_bytes += can_get_echo_skb(netdev, context->echo_index, NULL);
can_led_event(netdev, CAN_LED_EVENT_TX);
@@ -657,7 +651,6 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
context->priv = priv;
context->echo_index = i;
- context->dlc = cf->len;
usb_fill_bulk_urb(urb, priv->udev,
usb_sndbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_TX),
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 067705e2850b..c42f18845b02 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -87,13 +87,14 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
{
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
- int loop;
+ int loop, len;
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
+ len = cfd->can_id & CAN_RTR_FLAG ? 0 : cfd->len;
stats->tx_packets++;
- stats->tx_bytes += cfd->len;
+ stats->tx_bytes += len;
/* set flag whether this packet has to be looped back */
loop = skb->pkt_type == PACKET_LOOPBACK;
@@ -105,7 +106,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
* CAN core already did the echo for us
*/
stats->rx_packets++;
- stats->rx_bytes += cfd->len;
+ stats->rx_bytes += len;
}
consume_skb(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 8861a7d875e7..47ccc15a3486 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -62,7 +62,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
skb->dev = peer;
skb->ip_summed = CHECKSUM_UNNECESSARY;
- len = cfd->len;
+ len = cfd->can_id & CAN_RTR_FLAG ? 0 : cfd->len;
if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
srcstats->tx_packets++;
srcstats->tx_bytes += len;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index e2b15d29d15e..1674b561c9a2 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -787,10 +787,11 @@ static int xcan_rx(struct net_device *ndev, int frame_base)
*(__be32 *)(cf->data) = cpu_to_be32(data[0]);
if (cf->len > 4)
*(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
- }
- stats->rx_bytes += cf->len;
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
+
netif_receive_skb(skb);
return 1;
@@ -871,8 +872,11 @@ static int xcanfd_rx(struct net_device *ndev, int frame_base)
*(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
}
}
- stats->rx_bytes += cf->len;
+
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += cf->len;
stats->rx_packets++;
+
netif_receive_skb(skb);
return 1;
@@ -965,13 +969,8 @@ static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
- if (skb) {
- struct net_device_stats *stats = &ndev->stats;
-
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (skb)
netif_rx(skb);
- }
}
}
@@ -1095,8 +1094,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
if (skb) {
skb_cf->can_id |= cf.can_id;
memcpy(skb_cf->data, cf.data, CAN_ERR_DLC);
- stats->rx_packets++;
- stats->rx_bytes += CAN_ERR_DLC;
netif_rx(skb);
}
}
@@ -1761,7 +1758,12 @@ static int xcan_probe(struct platform_device *pdev)
spin_lock_init(&priv->tx_lock);
/* Get IRQ for the device */
- ndev->irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_free;
+
+ ndev->irq = ret;
+
ndev->flags |= IFF_ECHO; /* We support local echo */
platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index af4761968733..3867f3d4545f 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1860,7 +1860,8 @@ int b53_mdb_del(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_mdb_del);
-int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
+int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
+ bool *tx_fwd_offload)
{
struct b53_device *dev = ds->priv;
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
@@ -1887,7 +1888,7 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
b53_for_each_port(dev, i) {
- if (dsa_to_port(ds, i)->bridge_dev != br)
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Add this local port to the remote port VLAN control
@@ -1911,7 +1912,7 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
}
EXPORT_SYMBOL(b53_br_join);
-void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
+void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
{
struct b53_device *dev = ds->priv;
struct b53_vlan *vl = &dev->vlans[0];
@@ -1923,7 +1924,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
b53_for_each_port(dev, i) {
/* Don't touch the remaining ports */
- if (dsa_to_port(ds, i)->bridge_dev != br)
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 579da74ada64..b41dc8ac2ca8 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -324,8 +324,9 @@ void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
int b53_get_sset_count(struct dsa_switch *ds, int port, int sset);
void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data);
-int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge);
-void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge);
+int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
+ bool *tx_fwd_offload);
+void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge);
void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
void b53_br_fast_age(struct dsa_switch *ds, int port);
int b53_br_flags_pre(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 13aa43b5cffd..33499fcd8848 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -62,6 +62,38 @@ static u16 bcm_sf2_reg_rgmii_cntrl(struct bcm_sf2_priv *priv, int port)
return REG_SWITCH_STATUS;
}
+static u16 bcm_sf2_reg_led_base(struct bcm_sf2_priv *priv, int port)
+{
+ switch (port) {
+ case 0:
+ return REG_LED_0_CNTRL;
+ case 1:
+ return REG_LED_1_CNTRL;
+ case 2:
+ return REG_LED_2_CNTRL;
+ }
+
+ switch (priv->type) {
+ case BCM4908_DEVICE_ID:
+ switch (port) {
+ case 3:
+ return REG_LED_3_CNTRL;
+ case 7:
+ return REG_LED_4_CNTRL;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ WARN_ONCE(1, "Unsupported port %d\n", port);
+
+ /* RO fallback reg */
+ return REG_SWITCH_STATUS;
+}
+
/* Return the number of active ports, not counting the IMP (CPU) port */
static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
{
@@ -187,9 +219,14 @@ static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
/* Use PHY-driven LED signaling */
if (!enable) {
- reg = reg_readl(priv, REG_LED_CNTRL(0));
- reg |= SPDLNK_SRC_SEL;
- reg_writel(priv, reg, REG_LED_CNTRL(0));
+ u16 led_ctrl = bcm_sf2_reg_led_base(priv, 0);
+
+ if (priv->type == BCM7278_DEVICE_ID ||
+ priv->type == BCM7445_DEVICE_ID) {
+ reg = reg_led_readl(priv, led_ctrl, 0);
+ reg |= LED_CNTRL_SPDLNK_SRC_SEL;
+ reg_led_writel(priv, reg, led_ctrl, 0);
+ }
}
}
@@ -1232,9 +1269,14 @@ static const u16 bcm_sf2_4908_reg_offsets[] = {
[REG_SPHY_CNTRL] = 0x24,
[REG_CROSSBAR] = 0xc8,
[REG_RGMII_11_CNTRL] = 0x014c,
- [REG_LED_0_CNTRL] = 0x40,
- [REG_LED_1_CNTRL] = 0x4c,
- [REG_LED_2_CNTRL] = 0x58,
+ [REG_LED_0_CNTRL] = 0x40,
+ [REG_LED_1_CNTRL] = 0x4c,
+ [REG_LED_2_CNTRL] = 0x58,
+ [REG_LED_3_CNTRL] = 0x64,
+ [REG_LED_4_CNTRL] = 0x88,
+ [REG_LED_5_CNTRL] = 0xa0,
+ [REG_LED_AGGREGATE_CTRL] = 0xb8,
+
};
static const struct bcm_sf2_of_data bcm_sf2_4908_data = {
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 0d48402068d3..00afc94ce522 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -210,6 +210,16 @@ SF2_IO_MACRO(acb);
SWITCH_INTR_L2(0);
SWITCH_INTR_L2(1);
+static inline u32 reg_led_readl(struct bcm_sf2_priv *priv, u16 off, u16 reg)
+{
+ return readl_relaxed(priv->reg + priv->reg_offsets[off] + reg);
+}
+
+static inline void reg_led_writel(struct bcm_sf2_priv *priv, u32 val, u16 off, u16 reg)
+{
+ writel_relaxed(val, priv->reg + priv->reg_offsets[off] + reg);
+}
+
/* RXNFC */
int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc, u32 *rule_locs);
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index 7bffc80f241f..da0dedbd6555 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -25,6 +25,10 @@ enum bcm_sf2_reg_offs {
REG_LED_0_CNTRL,
REG_LED_1_CNTRL,
REG_LED_2_CNTRL,
+ REG_LED_3_CNTRL,
+ REG_LED_4_CNTRL,
+ REG_LED_5_CNTRL,
+ REG_LED_AGGREGATE_CTRL,
REG_SWITCH_REG_MAX,
};
@@ -56,6 +60,63 @@ enum bcm_sf2_reg_offs {
#define CROSSBAR_BCM4908_EXT_GPHY4 1
#define CROSSBAR_BCM4908_EXT_RGMII 2
+/* Relative to REG_LED_*_CNTRL (BCM7278, BCM7445) */
+#define LED_CNTRL_NO_LINK_ENCODE_SHIFT 0
+#define LED_CNTRL_M10_ENCODE_SHIFT 2
+#define LED_CNTRL_M100_ENCODE_SHIFT 4
+#define LED_CNTRL_M1000_ENCODE_SHIFT 6
+#define LED_CNTRL_SEL_NO_LINK_ENCODE_SHIFT 8
+#define LED_CNTRL_SEL_10M_ENCODE_SHIFT 10
+#define LED_CNTRL_SEL_100M_ENCODE_SHIFT 12
+#define LED_CNTRL_SEL_1000M_ENCODE_SHIFT 14
+#define LED_CNTRL_RX_DV_EN (1 << 16)
+#define LED_CNTRL_TX_EN_EN (1 << 17)
+#define LED_CNTRL_SPDLNK_LED0_ACT_SEL_SHIFT 18
+#define LED_CNTRL_SPDLNK_LED1_ACT_SEL_SHIFT 20
+#define LED_CNTRL_ACT_LED_ACT_SEL_SHIFT 22
+#define LED_CNTRL_SPDLNK_SRC_SEL (1 << 24)
+#define LED_CNTRL_SPDLNK_LED0_ACT_POL_SEL (1 << 25)
+#define LED_CNTRL_SPDLNK_LED1_ACT_POL_SEL (1 << 26)
+#define LED_CNTRL_ACT_LED_POL_SEL (1 << 27)
+#define LED_CNTRL_MASK 0x3
+
+/* Register relative to REG_LED_*_CNTRL (BCM4908) */
+#define REG_LED_CTRL 0x0
+#define LED_CTRL_RX_ACT_EN 0x00000001
+#define LED_CTRL_TX_ACT_EN 0x00000002
+#define LED_CTRL_SPDLNK_LED0_ACT_SEL 0x00000004
+#define LED_CTRL_SPDLNK_LED1_ACT_SEL 0x00000008
+#define LED_CTRL_SPDLNK_LED2_ACT_SEL 0x00000010
+#define LED_CTRL_ACT_LED_ACT_SEL 0x00000020
+#define LED_CTRL_SPDLNK_LED0_ACT_POL_SEL 0x00000040
+#define LED_CTRL_SPDLNK_LED1_ACT_POL_SEL 0x00000080
+#define LED_CTRL_SPDLNK_LED2_ACT_POL_SEL 0x00000100
+#define LED_CTRL_ACT_LED_POL_SEL 0x00000200
+#define LED_CTRL_LED_SPD_OVRD 0x00001c00
+#define LED_CTRL_LNK_STATUS_OVRD 0x00002000
+#define LED_CTRL_SPD_OVRD_EN 0x00004000
+#define LED_CTRL_LNK_OVRD_EN 0x00008000
+
+/* Register relative to REG_LED_*_CNTRL (BCM4908) */
+#define REG_LED_LINK_SPEED_ENC_SEL 0x4
+#define LED_LINK_SPEED_ENC_SEL_NO_LINK_SHIFT 0
+#define LED_LINK_SPEED_ENC_SEL_10M_SHIFT 3
+#define LED_LINK_SPEED_ENC_SEL_100M_SHIFT 6
+#define LED_LINK_SPEED_ENC_SEL_1000M_SHIFT 9
+#define LED_LINK_SPEED_ENC_SEL_2500M_SHIFT 12
+#define LED_LINK_SPEED_ENC_SEL_10G_SHIFT 15
+#define LED_LINK_SPEED_ENC_SEL_MASK 0x7
+
+/* Register relative to REG_LED_*_CNTRL (BCM4908) */
+#define REG_LED_LINK_SPEED_ENC 0x8
+#define LED_LINK_SPEED_ENC_NO_LINK_SHIFT 0
+#define LED_LINK_SPEED_ENC_M10_SHIFT 3
+#define LED_LINK_SPEED_ENC_M100_SHIFT 6
+#define LED_LINK_SPEED_ENC_M1000_SHIFT 9
+#define LED_LINK_SPEED_ENC_M2500_SHIFT 12
+#define LED_LINK_SPEED_ENC_M10G_SHIFT 15
+#define LED_LINK_SPEED_ENC_MASK 0x7
+
/* Relative to REG_RGMII_CNTRL */
#define RGMII_MODE_EN (1 << 0)
#define ID_MODE_DIS (1 << 1)
@@ -73,10 +134,6 @@ enum bcm_sf2_reg_offs {
#define LPI_COUNT_SHIFT 9
#define LPI_COUNT_MASK 0x3F
-#define REG_LED_CNTRL(x) (REG_LED_0_CNTRL + (x))
-
-#define SPDLNK_SRC_SEL (1 << 24)
-
/* Register set relative to 'INTRL2_0' and 'INTRL2_1' */
#define INTRL2_CPU_STATUS 0x00
#define INTRL2_CPU_SET 0x04
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index e638e3eea911..33daaf10c488 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -167,19 +167,20 @@ static int dsa_loop_phy_write(struct dsa_switch *ds, int port,
}
static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload)
{
dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n",
- __func__, port, bridge->name);
+ __func__, port, bridge.dev->name);
return 0;
}
static void dsa_loop_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge)
{
dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n",
- __func__, port, bridge->name);
+ __func__, port, bridge.dev->name);
}
static void dsa_loop_port_stp_state_set(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 4e0b53d94b52..726f267cb228 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -674,7 +674,8 @@ static int hellcreek_bridge_flags(struct dsa_switch *ds, int port,
}
static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload)
{
struct hellcreek *hellcreek = ds->priv;
@@ -691,7 +692,7 @@ static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port,
}
static void hellcreek_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge)
{
struct hellcreek *hellcreek = ds->priv;
@@ -710,8 +711,9 @@ static int __hellcreek_fdb_add(struct hellcreek *hellcreek,
u16 meta = 0;
dev_dbg(hellcreek->dev, "Add static FDB entry: MAC=%pM, MASK=0x%02x, "
- "OBT=%d, REPRIO_EN=%d, PRIO=%d\n", entry->mac, entry->portmask,
- entry->is_obt, entry->reprio_en, entry->reprio_tc);
+ "OBT=%d, PASS_BLOCKED=%d, REPRIO_EN=%d, PRIO=%d\n", entry->mac,
+ entry->portmask, entry->is_obt, entry->pass_blocked,
+ entry->reprio_en, entry->reprio_tc);
/* Add mac address */
hellcreek_write(hellcreek, entry->mac[1] | (entry->mac[0] << 8), HR_FDBWDH);
@@ -722,6 +724,8 @@ static int __hellcreek_fdb_add(struct hellcreek *hellcreek,
meta |= entry->portmask << HR_FDBWRM0_PORTMASK_SHIFT;
if (entry->is_obt)
meta |= HR_FDBWRM0_OBT;
+ if (entry->pass_blocked)
+ meta |= HR_FDBWRM0_PASS_BLOCKED;
if (entry->reprio_en) {
meta |= HR_FDBWRM0_REPRIO_EN;
meta |= entry->reprio_tc << HR_FDBWRM0_REPRIO_TC_SHIFT;
@@ -1049,7 +1053,7 @@ static void hellcreek_setup_tc_identity_mapping(struct hellcreek *hellcreek)
static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
{
- static struct hellcreek_fdb_entry ptp = {
+ static struct hellcreek_fdb_entry l2_ptp = {
/* MAC: 01-1B-19-00-00-00 */
.mac = { 0x01, 0x1b, 0x19, 0x00, 0x00, 0x00 },
.portmask = 0x03, /* Management ports */
@@ -1060,24 +1064,94 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
.reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */
.reprio_en = 1,
};
- static struct hellcreek_fdb_entry p2p = {
+ static struct hellcreek_fdb_entry udp4_ptp = {
+ /* MAC: 01-00-5E-00-01-81 */
+ .mac = { 0x01, 0x00, 0x5e, 0x00, 0x01, 0x81 },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 0,
+ .is_static = 1,
+ .reprio_tc = 6,
+ .reprio_en = 1,
+ };
+ static struct hellcreek_fdb_entry udp6_ptp = {
+ /* MAC: 33-33-00-00-01-81 */
+ .mac = { 0x33, 0x33, 0x00, 0x00, 0x01, 0x81 },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 0,
+ .is_static = 1,
+ .reprio_tc = 6,
+ .reprio_en = 1,
+ };
+ static struct hellcreek_fdb_entry l2_p2p = {
/* MAC: 01-80-C2-00-00-0E */
.mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e },
.portmask = 0x03, /* Management ports */
.age = 0,
.is_obt = 0,
- .pass_blocked = 0,
+ .pass_blocked = 1,
.is_static = 1,
.reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */
.reprio_en = 1,
};
+ static struct hellcreek_fdb_entry udp4_p2p = {
+ /* MAC: 01-00-5E-00-00-6B */
+ .mac = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x6b },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 1,
+ .is_static = 1,
+ .reprio_tc = 6,
+ .reprio_en = 1,
+ };
+ static struct hellcreek_fdb_entry udp6_p2p = {
+ /* MAC: 33-33-00-00-00-6B */
+ .mac = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x6b },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 1,
+ .is_static = 1,
+ .reprio_tc = 6,
+ .reprio_en = 1,
+ };
+ static struct hellcreek_fdb_entry stp = {
+ /* MAC: 01-80-C2-00-00-00 */
+ .mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 1,
+ .is_static = 1,
+ .reprio_tc = 6,
+ .reprio_en = 1,
+ };
int ret;
mutex_lock(&hellcreek->reg_lock);
- ret = __hellcreek_fdb_add(hellcreek, &ptp);
+ ret = __hellcreek_fdb_add(hellcreek, &l2_ptp);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &udp4_ptp);
if (ret)
goto out;
- ret = __hellcreek_fdb_add(hellcreek, &p2p);
+ ret = __hellcreek_fdb_add(hellcreek, &udp6_ptp);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &l2_p2p);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &udp4_p2p);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &udp6_p2p);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &stp);
out:
mutex_unlock(&hellcreek->reg_lock);
@@ -1384,14 +1458,19 @@ static void hellcreek_teardown(struct dsa_switch *ds)
dsa_devlink_resources_unregister(ds);
}
-static void hellcreek_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void hellcreek_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct hellcreek *hellcreek = ds->priv;
- dev_dbg(hellcreek->dev, "Phylink validate for port %d\n", port);
+ __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RGMII, config->supported_interfaces);
+
+ /* Include GMII - the hardware does not support this interface
+ * mode, but it's the default interface mode for phylib, so we
+ * need it for compatibility with existing DT.
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces);
/* The MAC settings are a hardware configuration option and cannot be
* changed at run time or by strapping. Therefore the attached PHYs
@@ -1399,12 +1478,9 @@ static void hellcreek_phylink_validate(struct dsa_switch *ds, int port,
* by the hardware.
*/
if (hellcreek->pdata->is_100_mbits)
- phylink_set(mask, 100baseT_Full);
+ config->mac_capabilities = MAC_100FD;
else
- phylink_set(mask, 1000baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ config->mac_capabilities = MAC_1000FD;
}
static int
@@ -1755,7 +1831,7 @@ static const struct dsa_switch_ops hellcreek_ds_ops = {
.get_strings = hellcreek_get_strings,
.get_tag_protocol = hellcreek_get_tag_protocol,
.get_ts_info = hellcreek_get_ts_info,
- .phylink_validate = hellcreek_phylink_validate,
+ .phylink_get_caps = hellcreek_phylink_get_caps,
.port_bridge_flags = hellcreek_bridge_flags,
.port_bridge_join = hellcreek_port_bridge_join,
.port_bridge_leave = hellcreek_port_bridge_leave,
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
index 40b41c794dfa..b3bc948d6145 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
@@ -52,10 +52,6 @@ static int hellcreek_set_hwtstamp_config(struct hellcreek *hellcreek, int port,
*/
clear_bit_unlock(HELLCREEK_HWTSTAMP_ENABLED, &ps->state);
- /* Reserved for future extensions */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_ON:
tx_tstamp_enable = true;
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index 89f920289ae2..d55784d19fa4 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -1103,12 +1103,13 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port)
}
static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload)
{
struct lan9303 *chip = ds->priv;
dev_dbg(chip->dev, "%s(port %d)\n", __func__, port);
- if (dsa_to_port(ds, 1)->bridge_dev == dsa_to_port(ds, 2)->bridge_dev) {
+ if (dsa_port_bridge_same(dsa_to_port(ds, 1), dsa_to_port(ds, 2))) {
lan9303_bridge_ports(chip);
chip->is_bridged = true; /* unleash stp_state_set() */
}
@@ -1117,7 +1118,7 @@ static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
}
static void lan9303_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge)
{
struct lan9303 *chip = ds->priv;
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 7056d98d8177..46ed953e787e 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -759,7 +759,7 @@ static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering,
struct netlink_ext_ack *extack)
{
- struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
struct gswip_priv *priv = ds->priv;
/* Do not allow changing the VLAN filtering options while in bridge */
@@ -1146,16 +1146,18 @@ static int gswip_vlan_remove(struct gswip_priv *priv,
}
static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload)
{
+ struct net_device *br = bridge.dev;
struct gswip_priv *priv = ds->priv;
int err;
/* When the bridge uses VLAN filtering we have to configure VLAN
* specific bridges. No bridge is configured here.
*/
- if (!br_vlan_enabled(bridge)) {
- err = gswip_vlan_add_unaware(priv, bridge, port);
+ if (!br_vlan_enabled(br)) {
+ err = gswip_vlan_add_unaware(priv, br, port);
if (err)
return err;
priv->port_vlan_filter &= ~BIT(port);
@@ -1166,8 +1168,9 @@ static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
}
static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge)
{
+ struct net_device *br = bridge.dev;
struct gswip_priv *priv = ds->priv;
gswip_add_single_port_br(priv, port, true);
@@ -1175,16 +1178,16 @@ static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
/* When the bridge uses VLAN filtering we have to configure VLAN
* specific bridges. No bridge is configured here.
*/
- if (!br_vlan_enabled(bridge))
- gswip_vlan_remove(priv, bridge, port, 0, true, false);
+ if (!br_vlan_enabled(br))
+ gswip_vlan_remove(priv, br, port, 0, true, false);
}
static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
struct gswip_priv *priv = ds->priv;
- struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
unsigned int max_ports = priv->hw_info->max_ports;
int pos = max_ports;
int i, idx = -1;
@@ -1229,8 +1232,8 @@ static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
struct gswip_priv *priv = ds->priv;
- struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
int err;
@@ -1254,8 +1257,8 @@ static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
struct gswip_priv *priv = ds->priv;
- struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
/* We have to receive all packets on the CPU port and should not
@@ -1340,8 +1343,8 @@ static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
static int gswip_port_fdb(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid, bool add)
{
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
struct gswip_priv *priv = ds->priv;
- struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
struct gswip_pce_table_entry mac_bridge = {0,};
unsigned int cpu_port = priv->hw_info->cpu_port;
int fid = -1;
@@ -1438,114 +1441,70 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
return 0;
}
-static void gswip_phylink_set_capab(unsigned long *supported,
- struct phylink_link_state *state)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- /* With the exclusion of MII, Reverse MII and Reduced MII, we
- * support Gigabit, including Half duplex
- */
- if (state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII &&
- state->interface != PHY_INTERFACE_MODE_RMII) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- }
-
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
-static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
switch (port) {
case 0:
case 1:
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII &&
- state->interface != PHY_INTERFACE_MODE_RMII)
- goto unsupported;
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
break;
+
case 2:
case 3:
case 4:
- if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
break;
+
case 5:
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
break;
- default:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported port: %i\n", port);
- return;
}
- gswip_phylink_set_capab(supported, state);
-
- return;
-
-unsupported:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported interface '%s' for port %d\n",
- phy_modes(state->interface), port);
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
}
-static void gswip_xrx300_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
switch (port) {
case 0:
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_RMII)
- goto unsupported;
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
break;
+
case 1:
case 2:
case 3:
case 4:
- if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
break;
+
case 5:
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL &&
- state->interface != PHY_INTERFACE_MODE_RMII)
- goto unsupported;
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
break;
- default:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported port: %i\n", port);
- return;
}
- gswip_phylink_set_capab(supported, state);
-
- return;
-
-unsupported:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported interface '%s' for port %d\n",
- phy_modes(state->interface), port);
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
}
static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
@@ -1827,7 +1786,7 @@ static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
.port_fdb_add = gswip_port_fdb_add,
.port_fdb_del = gswip_port_fdb_del,
.port_fdb_dump = gswip_port_fdb_dump,
- .phylink_validate = gswip_xrx200_phylink_validate,
+ .phylink_get_caps = gswip_xrx200_phylink_get_caps,
.phylink_mac_config = gswip_phylink_mac_config,
.phylink_mac_link_down = gswip_phylink_mac_link_down,
.phylink_mac_link_up = gswip_phylink_mac_link_up,
@@ -1851,7 +1810,7 @@ static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
.port_fdb_add = gswip_port_fdb_add,
.port_fdb_del = gswip_port_fdb_del,
.port_fdb_dump = gswip_port_fdb_dump,
- .phylink_validate = gswip_xrx300_phylink_validate,
+ .phylink_get_caps = gswip_xrx300_phylink_get_caps,
.phylink_mac_config = gswip_phylink_mac_config,
.phylink_mac_link_down = gswip_phylink_mac_link_down,
.phylink_mac_link_up = gswip_phylink_mac_link_up,
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 013e9c02be71..991b9c6b6ce7 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/gpio.h>
+#include <linux/if_vlan.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_data/microchip-ksz.h>
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 8a04302018dc..55dbda04ea62 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -43,7 +43,7 @@ void ksz_update_port_member(struct ksz_device *dev, int port)
continue;
if (port == i)
continue;
- if (!dp->bridge_dev || dp->bridge_dev != other_dp->bridge_dev)
+ if (!dsa_port_bridge_same(dp, other_dp))
continue;
if (other_p->stp_state == BR_STATE_FORWARDING &&
@@ -192,7 +192,8 @@ void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);
int ksz_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload)
{
/* port_stp_state_set() will be called after to put the port in
* appropriate state so there is no need to do anything.
@@ -203,7 +204,7 @@ int ksz_port_bridge_join(struct dsa_switch *ds, int port,
EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge)
{
/* port_stp_state_set() will be called after to put the port in
* forwarding state so there is no need to do anything.
@@ -301,7 +302,6 @@ int ksz_port_mdb_del(struct dsa_switch *ds, int port,
struct ksz_device *dev = ds->priv;
struct alu_struct alu;
int index;
- int ret = 0;
for (index = 0; index < dev->num_statics; index++) {
if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
@@ -323,7 +323,7 @@ int ksz_port_mdb_del(struct dsa_switch *ds, int port,
dev->dev_ops->w_sta_mac_table(dev, index, &alu);
exit:
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(ksz_port_mdb_del);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 54b456bc8972..df8ae59c8525 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -155,9 +155,9 @@ void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
int ksz_sset_count(struct dsa_switch *ds, int port, int sset);
void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf);
int ksz_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br);
+ struct dsa_bridge bridge, bool *tx_fwd_offload);
void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *br);
+ struct dsa_bridge bridge);
void ksz_port_fast_age(struct dsa_switch *ds, int port);
int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
void *data);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 9890672a206d..b82512e5b33b 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1186,29 +1186,33 @@ mt7530_port_bridge_flags(struct dsa_switch *ds, int port,
static int
mt7530_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge, bool *tx_fwd_offload)
{
- struct mt7530_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
u32 port_bitmap = BIT(MT7530_CPU_PORT);
- int i;
+ struct mt7530_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
- for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ int other_port = other_dp->index;
+
+ if (dp == other_dp)
+ continue;
+
/* Add this port to the port matrix of the other ports in the
* same bridge. If the port is disabled, port matrix is kept
* and not being setup until the port becomes enabled.
*/
- if (dsa_is_user_port(ds, i) && i != port) {
- if (dsa_to_port(ds, i)->bridge_dev != bridge)
- continue;
- if (priv->ports[i].enable)
- mt7530_set(priv, MT7530_PCR_P(i),
- PCR_MATRIX(BIT(port)));
- priv->ports[i].pm |= PCR_MATRIX(BIT(port));
+ if (!dsa_port_offloads_bridge(other_dp, &bridge))
+ continue;
- port_bitmap |= BIT(i);
- }
+ if (priv->ports[other_port].enable)
+ mt7530_set(priv, MT7530_PCR_P(other_port),
+ PCR_MATRIX(BIT(port)));
+ priv->ports[other_port].pm |= PCR_MATRIX(BIT(port));
+
+ port_bitmap |= BIT(other_port);
}
/* Add the all other ports to this port matrix. */
@@ -1236,7 +1240,7 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
/* This is called after .port_bridge_leave when leaving a VLAN-aware
* bridge. Don't set standalone ports to fallback mode.
*/
- if (dsa_to_port(ds, port)->bridge_dev)
+ if (dsa_port_bridge_dev_get(dsa_to_port(ds, port)))
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
MT7530_PORT_FALLBACK_MODE);
@@ -1299,26 +1303,30 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
static void
mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge)
{
+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
struct mt7530_priv *priv = ds->priv;
- int i;
mutex_lock(&priv->reg_mutex);
- for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ int other_port = other_dp->index;
+
+ if (dp == other_dp)
+ continue;
+
/* Remove this port from the port matrix of the other ports
* in the same bridge. If the port is disabled, port matrix
* is kept and not being setup until the port becomes enabled.
*/
- if (dsa_is_user_port(ds, i) && i != port) {
- if (dsa_to_port(ds, i)->bridge_dev != bridge)
- continue;
- if (priv->ports[i].enable)
- mt7530_clear(priv, MT7530_PCR_P(i),
- PCR_MATRIX(BIT(port)));
- priv->ports[i].pm &= ~PCR_MATRIX(BIT(port));
- }
+ if (!dsa_port_offloads_bridge(other_dp, &bridge))
+ continue;
+
+ if (priv->ports[other_port].enable)
+ mt7530_clear(priv, MT7530_PCR_P(other_port),
+ PCR_MATRIX(BIT(port)));
+ priv->ports[other_port].pm &= ~PCR_MATRIX(BIT(port));
}
/* Set the cpu port to be the only one in the port matrix of
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index cd8462d1e27c..58ca684d73f7 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1241,8 +1241,7 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
{
struct dsa_switch *ds = chip->ds;
struct dsa_switch_tree *dst = ds->dst;
- struct net_device *br;
- struct dsa_port *dp;
+ struct dsa_port *dp, *other_dp;
bool found = false;
u16 pvlan;
@@ -1251,11 +1250,9 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
list_for_each_entry(dp, &dst->ports, list) {
if (dp->ds->index == dev && dp->index == port) {
/* dp might be a DSA link or a user port, so it
- * might or might not have a bridge_dev
- * pointer. Use the "found" variable for both
- * cases.
+ * might or might not have a bridge.
+ * Use the "found" variable for both cases.
*/
- br = dp->bridge_dev;
found = true;
break;
}
@@ -1263,13 +1260,14 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
/* dev is a virtual bridge */
} else {
list_for_each_entry(dp, &dst->ports, list) {
- if (dp->bridge_num < 0)
+ unsigned int bridge_num = dsa_port_bridge_num_get(dp);
+
+ if (!bridge_num)
continue;
- if (dp->bridge_num + 1 + dst->last_switch != dev)
+ if (bridge_num + dst->last_switch != dev)
continue;
- br = dp->bridge_dev;
found = true;
break;
}
@@ -1288,12 +1286,11 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
/* Frames from user ports can egress any local DSA links and CPU ports,
* as well as any local member of their bridge group.
*/
- list_for_each_entry(dp, &dst->ports, list)
- if (dp->ds == ds &&
- (dp->type == DSA_PORT_TYPE_CPU ||
- dp->type == DSA_PORT_TYPE_DSA ||
- (br && dp->bridge_dev == br)))
- pvlan |= BIT(dp->index);
+ dsa_switch_for_each_port(other_dp, ds)
+ if (other_dp->type == DSA_PORT_TYPE_CPU ||
+ other_dp->type == DSA_PORT_TYPE_DSA ||
+ dsa_port_bridge_same(dp, other_dp))
+ pvlan |= BIT(other_dp->index);
return pvlan;
}
@@ -1660,12 +1657,13 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
u16 vid)
{
+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_vtu_entry vlan;
- int i, err;
+ int err;
/* DSA and CPU ports have to be members of multiple vlans */
- if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+ if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
return 0;
err = mv88e6xxx_vtu_get(chip, vid, &vlan);
@@ -1675,27 +1673,22 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
if (!vlan.valid)
return 0;
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
- continue;
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ struct net_device *other_br;
- if (!dsa_to_port(ds, i)->slave)
- continue;
-
- if (vlan.member[i] ==
+ if (vlan.member[other_dp->index] ==
MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
continue;
- if (dsa_to_port(ds, i)->bridge_dev ==
- dsa_to_port(ds, port)->bridge_dev)
+ if (dsa_port_bridge_same(dp, other_dp))
break; /* same bridge, check next VLAN */
- if (!dsa_to_port(ds, i)->bridge_dev)
+ other_br = dsa_port_bridge_dev_get(other_dp);
+ if (!other_br)
continue;
dev_err(ds->dev, "p%d: hw VLAN %d already used by port %d in %s\n",
- port, vlan.vid, i,
- netdev_name(dsa_to_port(ds, i)->bridge_dev));
+ port, vlan.vid, other_dp->index, netdev_name(other_br));
return -EOPNOTSUPP;
}
@@ -1705,13 +1698,14 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
static int mv88e6xxx_port_commit_pvid(struct mv88e6xxx_chip *chip, int port)
{
struct dsa_port *dp = dsa_to_port(chip->ds, port);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
struct mv88e6xxx_port *p = &chip->ports[port];
u16 pvid = MV88E6XXX_VID_STANDALONE;
bool drop_untagged = false;
int err;
- if (dp->bridge_dev) {
- if (br_vlan_enabled(dp->bridge_dev)) {
+ if (br) {
+ if (br_vlan_enabled(br)) {
pvid = p->bridge_pvid.vid;
drop_untagged = !p->bridge_pvid.valid;
} else {
@@ -2429,7 +2423,7 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
- struct net_device *br)
+ struct dsa_bridge bridge)
{
struct dsa_switch *ds = chip->ds;
struct dsa_switch_tree *dst = ds->dst;
@@ -2437,7 +2431,7 @@ static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
int err;
list_for_each_entry(dp, &dst->ports, list) {
- if (dp->bridge_dev == br) {
+ if (dsa_port_offloads_bridge(dp, &bridge)) {
if (dp->ds == ds) {
/* This is a local bridge group member,
* remap its Port VLAN Map.
@@ -2460,15 +2454,29 @@ static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
return 0;
}
+/* Treat the software bridge as a virtual single-port switch behind the
+ * CPU and map in the PVT. First dst->last_switch elements are taken by
+ * physical switches, so start from beyond that range.
+ */
+static int mv88e6xxx_map_virtual_bridge_to_pvt(struct dsa_switch *ds,
+ unsigned int bridge_num)
+{
+ u8 dev = bridge_num + ds->dst->last_switch;
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ return mv88e6xxx_pvt_map(chip, dev, 0);
+}
+
static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_bridge_map(chip, br);
+ err = mv88e6xxx_bridge_map(chip, bridge);
if (err)
goto unlock;
@@ -2476,6 +2484,14 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
if (err)
goto unlock;
+ if (mv88e6xxx_has_pvt(chip)) {
+ err = mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num);
+ if (err)
+ goto unlock;
+
+ *tx_fwd_offload = true;
+ }
+
unlock:
mv88e6xxx_reg_unlock(chip);
@@ -2483,14 +2499,18 @@ unlock:
}
static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
mv88e6xxx_reg_lock(chip);
- if (mv88e6xxx_bridge_map(chip, br) ||
+ if (bridge.tx_fwd_offload &&
+ mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num))
+ dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
+
+ if (mv88e6xxx_bridge_map(chip, bridge) ||
mv88e6xxx_port_vlan_map(chip, port))
dev_err(ds->dev, "failed to remap in-chip Port VLAN\n");
@@ -2505,7 +2525,7 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds,
int tree_index, int sw_index,
- int port, struct net_device *br)
+ int port, struct dsa_bridge bridge)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -2515,6 +2535,7 @@ static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds,
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_pvt_map(chip, sw_index, port);
+ err = err ? : mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num);
mv88e6xxx_reg_unlock(chip);
return err;
@@ -2522,7 +2543,7 @@ static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds,
static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds,
int tree_index, int sw_index,
- int port, struct net_device *br)
+ int port, struct dsa_bridge bridge)
{
struct mv88e6xxx_chip *chip = ds->priv;
@@ -2530,49 +2551,12 @@ static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds,
return;
mv88e6xxx_reg_lock(chip);
- if (mv88e6xxx_pvt_map(chip, sw_index, port))
+ if (mv88e6xxx_pvt_map(chip, sw_index, port) ||
+ mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num))
dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
mv88e6xxx_reg_unlock(chip);
}
-/* Treat the software bridge as a virtual single-port switch behind the
- * CPU and map in the PVT. First dst->last_switch elements are taken by
- * physical switches, so start from beyond that range.
- */
-static int mv88e6xxx_map_virtual_bridge_to_pvt(struct dsa_switch *ds,
- int bridge_num)
-{
- u8 dev = bridge_num + ds->dst->last_switch + 1;
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_pvt_map(chip, dev, 0);
- mv88e6xxx_reg_unlock(chip);
-
- return err;
-}
-
-static int mv88e6xxx_bridge_tx_fwd_offload(struct dsa_switch *ds, int port,
- struct net_device *br,
- int bridge_num)
-{
- return mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge_num);
-}
-
-static void mv88e6xxx_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port,
- struct net_device *br,
- int bridge_num)
-{
- int err;
-
- err = mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge_num);
- if (err) {
- dev_err(ds->dev, "failed to remap cross-chip Port VLAN: %pe\n",
- ERR_PTR(err));
- }
-}
-
static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip)
{
if (chip->info->ops->reset)
@@ -3199,8 +3183,8 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
* time.
*/
if (mv88e6xxx_has_pvt(chip))
- ds->num_fwd_offloading_bridges = MV88E6XXX_MAX_PVT_SWITCHES -
- ds->dst->last_switch - 1;
+ ds->max_num_bridges = MV88E6XXX_MAX_PVT_SWITCHES -
+ ds->dst->last_switch - 1;
mv88e6xxx_reg_lock(chip);
@@ -6292,8 +6276,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.crosschip_lag_change = mv88e6xxx_crosschip_lag_change,
.crosschip_lag_join = mv88e6xxx_crosschip_lag_join,
.crosschip_lag_leave = mv88e6xxx_crosschip_lag_leave,
- .port_bridge_tx_fwd_offload = mv88e6xxx_bridge_tx_fwd_offload,
- .port_bridge_tx_fwd_unoffload = mv88e6xxx_bridge_tx_fwd_unoffload,
};
static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index 8f74ffc7a279..389f8a6ec0ab 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -100,10 +100,6 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
*/
clear_bit_unlock(MV88E6XXX_HWTSTAMP_ENABLED, &ps->state);
- /* reserved for future extensions */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tstamp_enable = false;
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
index 9948544ba1c4..220b0b027b55 100644
--- a/drivers/net/dsa/ocelot/Kconfig
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -21,6 +21,7 @@ config NET_DSA_MSCC_SEVILLE
depends on NET_VENDOR_MICROSEMI
depends on HAS_IOMEM
depends on PTP_1588_CLOCK_OPTIONAL
+ select MDIO_MSCC_MIIM
select MSCC_OCELOT_SWITCH_LIB
select NET_DSA_TAG_OCELOT_8021Q
select NET_DSA_TAG_OCELOT
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index f1a05e7dc818..9957772201d5 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -21,7 +21,6 @@
#include <linux/of_net.h>
#include <linux/pci.h>
#include <linux/of.h>
-#include <linux/pcs-lynx.h>
#include <net/pkt_sched.h>
#include <net/dsa.h>
#include "felix.h"
@@ -240,24 +239,32 @@ static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
*/
static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port)
{
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot->ports[port]->is_dsa_8021q_cpu = true;
ocelot->npi = -1;
/* Overwrite PGID_CPU with the non-tagging port */
ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU);
- ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot, true);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
{
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot->ports[port]->is_dsa_8021q_cpu = false;
/* Restore PGID_CPU */
ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID,
PGID_CPU);
- ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot, true);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
/* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module.
@@ -632,6 +639,17 @@ static int felix_set_ageing_time(struct dsa_switch *ds,
return 0;
}
+static void felix_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+ int err;
+
+ err = ocelot_mact_flush(ocelot, port);
+ if (err)
+ dev_err(ds->dev, "Flushing MAC table on port %d returned %pe\n",
+ port, ERR_PTR(err));
+}
+
static int felix_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
@@ -701,21 +719,21 @@ static int felix_bridge_flags(struct dsa_switch *ds, int port,
}
static int felix_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge, bool *tx_fwd_offload)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_bridge_join(ocelot, port, br);
+ ocelot_port_bridge_join(ocelot, port, bridge.dev);
return 0;
}
static void felix_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_bridge_leave(ocelot, port, br);
+ ocelot_port_bridge_leave(ocelot, port, bridge.dev);
}
static int felix_lag_join(struct dsa_switch *ds, int port,
@@ -823,8 +841,8 @@ static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
struct felix *felix = ocelot_to_felix(ocelot);
struct dsa_port *dp = dsa_to_port(ds, port);
- if (felix->pcs[port])
- phylink_set_pcs(dp->pl, &felix->pcs[port]->pcs);
+ if (felix->pcs && felix->pcs[port])
+ phylink_set_pcs(dp->pl, felix->pcs[port]);
}
static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
@@ -992,6 +1010,10 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
ocelot->num_stats = felix->info->num_stats;
ocelot->num_mact_rows = felix->info->num_mact_rows;
ocelot->vcap = felix->info->vcap;
+ ocelot->vcap_pol.base = felix->info->vcap_pol_base;
+ ocelot->vcap_pol.max = felix->info->vcap_pol_max;
+ ocelot->vcap_pol.base2 = felix->info->vcap_pol_base2;
+ ocelot->vcap_pol.max2 = felix->info->vcap_pol_max2;
ocelot->ops = felix->info->ops;
ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT;
ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT;
@@ -1019,7 +1041,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
res.start += felix->switch_base;
res.end += felix->switch_base;
- target = ocelot_regmap_init(ocelot, &res);
+ target = felix->info->init_regmap(ocelot, &res);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
"Failed to map device memory space\n");
@@ -1056,7 +1078,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
res.start += felix->switch_base;
res.end += felix->switch_base;
- target = ocelot_regmap_init(ocelot, &res);
+ target = felix->info->init_regmap(ocelot, &res);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
"Failed to map memory space for port %d\n",
@@ -1143,38 +1165,22 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
kfree(xmit_work);
}
-static int felix_port_setup_tagger_data(struct dsa_switch *ds, int port)
+static int felix_connect_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
- struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
- struct felix_port *felix_port;
+ struct ocelot_8021q_tagger_data *tagger_data;
- if (!dsa_port_is_user(dp))
+ switch (proto) {
+ case DSA_TAG_PROTO_OCELOT_8021Q:
+ tagger_data = ocelot_8021q_tagger_data(ds);
+ tagger_data->xmit_work_fn = felix_port_deferred_xmit;
return 0;
-
- felix_port = kzalloc(sizeof(*felix_port), GFP_KERNEL);
- if (!felix_port)
- return -ENOMEM;
-
- felix_port->xmit_worker = felix->xmit_worker;
- felix_port->xmit_work_fn = felix_port_deferred_xmit;
-
- dp->priv = felix_port;
-
- return 0;
-}
-
-static void felix_port_teardown_tagger_data(struct dsa_switch *ds, int port)
-{
- struct dsa_port *dp = dsa_to_port(ds, port);
- struct felix_port *felix_port = dp->priv;
-
- if (!felix_port)
- return;
-
- dp->priv = NULL;
- kfree(felix_port);
+ case DSA_TAG_PROTO_OCELOT:
+ case DSA_TAG_PROTO_SEVILLE:
+ return 0;
+ default:
+ return -EPROTONOSUPPORT;
+ }
}
/* Hardware initialization done here so that we can allocate structures with
@@ -1205,12 +1211,6 @@ static int felix_setup(struct dsa_switch *ds)
}
}
- felix->xmit_worker = kthread_create_worker(0, "felix_xmit");
- if (IS_ERR(felix->xmit_worker)) {
- err = PTR_ERR(felix->xmit_worker);
- goto out_deinit_timestamp;
- }
-
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
@@ -1221,14 +1221,6 @@ static int felix_setup(struct dsa_switch *ds)
* bits of vlan tag.
*/
felix_port_qos_map_init(ocelot, port);
-
- err = felix_port_setup_tagger_data(ds, port);
- if (err) {
- dev_err(ds->dev,
- "port %d failed to set up tagger data: %pe\n",
- port, ERR_PTR(err));
- goto out_deinit_ports;
- }
}
err = ocelot_devlink_sb_register(ocelot);
@@ -1256,13 +1248,9 @@ out_deinit_ports:
if (dsa_is_unused_port(ds, port))
continue;
- felix_port_teardown_tagger_data(ds, port);
ocelot_deinit_port(ocelot, port);
}
- kthread_destroy_worker(felix->xmit_worker);
-
-out_deinit_timestamp:
ocelot_deinit_timestamp(ocelot);
ocelot_deinit(ocelot);
@@ -1291,12 +1279,9 @@ static void felix_teardown(struct dsa_switch *ds)
if (dsa_is_unused_port(ds, port))
continue;
- felix_port_teardown_tagger_data(ds, port);
ocelot_deinit_port(ocelot, port);
}
- kthread_destroy_worker(felix->xmit_worker);
-
ocelot_devlink_sb_unregister(ocelot);
ocelot_deinit_timestamp(ocelot);
ocelot_deinit(ocelot);
@@ -1636,6 +1621,7 @@ felix_mrp_del_ring_role(struct dsa_switch *ds, int port,
const struct dsa_switch_ops felix_switch_ops = {
.get_tag_protocol = felix_get_tag_protocol,
.change_tag_protocol = felix_change_tag_protocol,
+ .connect_tag_protocol = felix_connect_tag_protocol,
.setup = felix_setup,
.teardown = felix_teardown,
.set_ageing_time = felix_set_ageing_time,
@@ -1647,6 +1633,7 @@ const struct dsa_switch_ops felix_switch_ops = {
.phylink_mac_config = felix_phylink_mac_config,
.phylink_mac_link_down = felix_phylink_mac_link_down,
.phylink_mac_link_up = felix_phylink_mac_link_up,
+ .port_fast_age = felix_port_fast_age,
.port_fdb_dump = felix_fdb_dump,
.port_fdb_add = felix_fdb_add,
.port_fdb_del = felix_fdb_del,
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index be3e42e135c0..9395ac119d33 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -21,8 +21,10 @@ struct felix_info {
int num_ports;
int num_tx_queues;
struct vcap_props *vcap;
- int switch_pci_bar;
- int imdio_pci_bar;
+ u16 vcap_pol_base;
+ u16 vcap_pol_max;
+ u16 vcap_pol_base2;
+ u16 vcap_pol_max2;
const struct ptp_clock_info *ptp_caps;
/* Some Ocelot switches are integrated into the SoC without the
@@ -48,6 +50,8 @@ struct felix_info {
enum tc_setup_type type, void *type_data);
void (*port_sched_speed_set)(struct ocelot *ocelot, int port,
u32 speed);
+ struct regmap *(*init_regmap)(struct ocelot *ocelot,
+ struct resource *res);
};
extern const struct dsa_switch_ops felix_switch_ops;
@@ -58,7 +62,7 @@ struct felix {
const struct felix_info *info;
struct ocelot ocelot;
struct mii_bus *imdio;
- struct lynx_pcs **pcs;
+ struct phylink_pcs **pcs;
resource_size_t switch_base;
resource_size_t imdio_base;
enum dsa_tag_protocol tag_proto;
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 45c5ec7a83ea..bf8d38239e7e 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -5,8 +5,10 @@
#include <linux/fsl/enetc_mdio.h>
#include <soc/mscc/ocelot_qsys.h>
#include <soc/mscc/ocelot_vcap.h>
+#include <soc/mscc/ocelot_ana.h>
#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot_sys.h>
+#include <net/tc_act/tc_gate.h>
#include <soc/mscc/ocelot.h>
#include <linux/dsa/ocelot.h>
#include <linux/pcs-lynx.h>
@@ -17,6 +19,10 @@
#include "felix.h"
#define VSC9959_TAS_GCL_ENTRY_MAX 63
+#define VSC9959_VCAP_POLICER_BASE 63
+#define VSC9959_VCAP_POLICER_MAX 383
+#define VSC9959_SWITCH_PCI_BAR 4
+#define VSC9959_IMDIO_PCI_BAR 0
static const u32 vsc9959_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x0089a0),
@@ -292,7 +298,7 @@ static const u32 vsc9959_sys_regmap[] = {
REG_RESERVED(SYS_MMGT_FAST),
REG_RESERVED(SYS_EVENTS_DIF),
REG_RESERVED(SYS_EVENTS_CORE),
- REG_RESERVED(SYS_CNT),
+ REG(SYS_CNT, 0x000000),
REG(SYS_PTP_STATUS, 0x000f14),
REG(SYS_PTP_TXSTAMP, 0x000f18),
REG(SYS_PTP_NXT, 0x000f1c),
@@ -1020,15 +1026,6 @@ static void vsc9959_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
*maxuse = val & GENMASK(11, 0);
}
-static const struct ocelot_ops vsc9959_ops = {
- .reset = vsc9959_reset,
- .wm_enc = vsc9959_wm_enc,
- .wm_dec = vsc9959_wm_dec,
- .wm_stat = vsc9959_wm_stat,
- .port_to_netdev = felix_port_to_netdev,
- .netdev_to_port = felix_netdev_to_port,
-};
-
static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
{
struct felix *felix = ocelot_to_felix(ocelot);
@@ -1042,7 +1039,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
int rc;
felix->pcs = devm_kcalloc(dev, felix->info->num_ports,
- sizeof(struct lynx_pcs *),
+ sizeof(struct phylink_pcs *),
GFP_KERNEL);
if (!felix->pcs) {
dev_err(dev, "failed to allocate array for PCS PHYs\n");
@@ -1091,8 +1088,8 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
for (port = 0; port < felix->info->num_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct mdio_device *pcs;
- struct lynx_pcs *lynx;
+ struct phylink_pcs *phylink_pcs;
+ struct mdio_device *mdio_device;
if (dsa_is_unused_port(felix->ds, port))
continue;
@@ -1100,17 +1097,17 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL)
continue;
- pcs = mdio_device_create(felix->imdio, port);
- if (IS_ERR(pcs))
+ mdio_device = mdio_device_create(felix->imdio, port);
+ if (IS_ERR(mdio_device))
continue;
- lynx = lynx_pcs_create(pcs);
- if (!lynx) {
- mdio_device_free(pcs);
+ phylink_pcs = lynx_pcs_create(mdio_device);
+ if (!phylink_pcs) {
+ mdio_device_free(mdio_device);
continue;
}
- felix->pcs[port] = lynx;
+ felix->pcs[port] = phylink_pcs;
dev_info(dev, "Found PCS at internal MDIO address %d\n", port);
}
@@ -1124,13 +1121,15 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
- struct lynx_pcs *pcs = felix->pcs[port];
+ struct phylink_pcs *phylink_pcs = felix->pcs[port];
+ struct mdio_device *mdio_device;
- if (!pcs)
+ if (!phylink_pcs)
continue;
- mdio_device_free(pcs->mdio);
- lynx_pcs_destroy(pcs);
+ mdio_device = lynx_get_mdio_device(phylink_pcs);
+ mdio_device_free(mdio_device);
+ lynx_pcs_destroy(phylink_pcs);
}
mdiobus_unregister(felix->imdio);
}
@@ -1344,6 +1343,877 @@ static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
}
}
+#define VSC9959_PSFP_SFID_MAX 175
+#define VSC9959_PSFP_GATE_ID_MAX 183
+#define VSC9959_PSFP_POLICER_BASE 63
+#define VSC9959_PSFP_POLICER_MAX 383
+#define VSC9959_PSFP_GATE_LIST_NUM 4
+#define VSC9959_PSFP_GATE_CYCLETIME_MIN 5000
+
+struct felix_stream {
+ struct list_head list;
+ unsigned long id;
+ bool dummy;
+ int ports;
+ int port;
+ u8 dmac[ETH_ALEN];
+ u16 vid;
+ s8 prio;
+ u8 sfid_valid;
+ u8 ssid_valid;
+ u32 sfid;
+ u32 ssid;
+};
+
+struct felix_stream_filter {
+ struct list_head list;
+ refcount_t refcount;
+ u32 index;
+ u8 enable;
+ int portmask;
+ u8 sg_valid;
+ u32 sgid;
+ u8 fm_valid;
+ u32 fmid;
+ u8 prio_valid;
+ u8 prio;
+ u32 maxsdu;
+};
+
+struct felix_stream_filter_counters {
+ u32 match;
+ u32 not_pass_gate;
+ u32 not_pass_sdu;
+ u32 red;
+};
+
+struct felix_stream_gate {
+ u32 index;
+ u8 enable;
+ u8 ipv_valid;
+ u8 init_ipv;
+ u64 basetime;
+ u64 cycletime;
+ u64 cycletime_ext;
+ u32 num_entries;
+ struct action_gate_entry entries[];
+};
+
+struct felix_stream_gate_entry {
+ struct list_head list;
+ refcount_t refcount;
+ u32 index;
+};
+
+static int vsc9959_stream_identify(struct flow_cls_offload *f,
+ struct felix_stream *stream)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS)))
+ return -EOPNOTSUPP;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ ether_addr_copy(stream->dmac, match.key->dst);
+ if (!is_zero_ether_addr(match.mask->src))
+ return -EOPNOTSUPP;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ if (match.mask->vlan_priority)
+ stream->prio = match.key->vlan_priority;
+ else
+ stream->prio = -1;
+
+ if (!match.mask->vlan_id)
+ return -EOPNOTSUPP;
+ stream->vid = match.key->vlan_id;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ stream->id = f->cookie;
+
+ return 0;
+}
+
+static int vsc9959_mact_stream_set(struct ocelot *ocelot,
+ struct felix_stream *stream,
+ struct netlink_ext_ack *extack)
+{
+ enum macaccess_entry_type type;
+ int ret, sfid, ssid;
+ u32 vid, dst_idx;
+ u8 mac[ETH_ALEN];
+
+ ether_addr_copy(mac, stream->dmac);
+ vid = stream->vid;
+
+ /* Stream identification desn't support to add a stream with non
+ * existent MAC (The MAC entry has not been learned in MAC table).
+ */
+ ret = ocelot_mact_lookup(ocelot, &dst_idx, mac, vid, &type);
+ if (ret) {
+ if (extack)
+ NL_SET_ERR_MSG_MOD(extack, "Stream is not learned in MAC table");
+ return -EOPNOTSUPP;
+ }
+
+ if ((stream->sfid_valid || stream->ssid_valid) &&
+ type == ENTRYTYPE_NORMAL)
+ type = ENTRYTYPE_LOCKED;
+
+ sfid = stream->sfid_valid ? stream->sfid : -1;
+ ssid = stream->ssid_valid ? stream->ssid : -1;
+
+ ret = ocelot_mact_learn_streamdata(ocelot, dst_idx, mac, vid, type,
+ sfid, ssid);
+
+ return ret;
+}
+
+static struct felix_stream *
+vsc9959_stream_table_lookup(struct list_head *stream_list,
+ struct felix_stream *stream)
+{
+ struct felix_stream *tmp;
+
+ list_for_each_entry(tmp, stream_list, list)
+ if (ether_addr_equal(tmp->dmac, stream->dmac) &&
+ tmp->vid == stream->vid)
+ return tmp;
+
+ return NULL;
+}
+
+static int vsc9959_stream_table_add(struct ocelot *ocelot,
+ struct list_head *stream_list,
+ struct felix_stream *stream,
+ struct netlink_ext_ack *extack)
+{
+ struct felix_stream *stream_entry;
+ int ret;
+
+ stream_entry = kmemdup(stream, sizeof(*stream_entry), GFP_KERNEL);
+ if (!stream_entry)
+ return -ENOMEM;
+
+ if (!stream->dummy) {
+ ret = vsc9959_mact_stream_set(ocelot, stream_entry, extack);
+ if (ret) {
+ kfree(stream_entry);
+ return ret;
+ }
+ }
+
+ list_add_tail(&stream_entry->list, stream_list);
+
+ return 0;
+}
+
+static struct felix_stream *
+vsc9959_stream_table_get(struct list_head *stream_list, unsigned long id)
+{
+ struct felix_stream *tmp;
+
+ list_for_each_entry(tmp, stream_list, list)
+ if (tmp->id == id)
+ return tmp;
+
+ return NULL;
+}
+
+static void vsc9959_stream_table_del(struct ocelot *ocelot,
+ struct felix_stream *stream)
+{
+ if (!stream->dummy)
+ vsc9959_mact_stream_set(ocelot, stream, NULL);
+
+ list_del(&stream->list);
+ kfree(stream);
+}
+
+static u32 vsc9959_sfi_access_status(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, ANA_TABLES_SFIDACCESS);
+}
+
+static int vsc9959_psfp_sfi_set(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi)
+{
+ u32 val;
+
+ if (sfi->index > VSC9959_PSFP_SFID_MAX)
+ return -EINVAL;
+
+ if (!sfi->enable) {
+ ocelot_write(ocelot, ANA_TABLES_SFIDTIDX_SFID_INDEX(sfi->index),
+ ANA_TABLES_SFIDTIDX);
+
+ val = ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE);
+ ocelot_write(ocelot, val, ANA_TABLES_SFIDACCESS);
+
+ return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val,
+ (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)),
+ 10, 100000);
+ }
+
+ if (sfi->sgid > VSC9959_PSFP_GATE_ID_MAX ||
+ sfi->fmid > VSC9959_PSFP_POLICER_MAX)
+ return -EINVAL;
+
+ ocelot_write(ocelot,
+ (sfi->sg_valid ? ANA_TABLES_SFIDTIDX_SGID_VALID : 0) |
+ ANA_TABLES_SFIDTIDX_SGID(sfi->sgid) |
+ (sfi->fm_valid ? ANA_TABLES_SFIDTIDX_POL_ENA : 0) |
+ ANA_TABLES_SFIDTIDX_POL_IDX(sfi->fmid) |
+ ANA_TABLES_SFIDTIDX_SFID_INDEX(sfi->index),
+ ANA_TABLES_SFIDTIDX);
+
+ ocelot_write(ocelot,
+ (sfi->prio_valid ? ANA_TABLES_SFIDACCESS_IGR_PRIO_MATCH_ENA : 0) |
+ ANA_TABLES_SFIDACCESS_IGR_PRIO(sfi->prio) |
+ ANA_TABLES_SFIDACCESS_MAX_SDU_LEN(sfi->maxsdu) |
+ ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE),
+ ANA_TABLES_SFIDACCESS);
+
+ return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val,
+ (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)),
+ 10, 100000);
+}
+
+static int vsc9959_psfp_sfidmask_set(struct ocelot *ocelot, u32 sfid, int ports)
+{
+ u32 val;
+
+ ocelot_rmw(ocelot,
+ ANA_TABLES_SFIDTIDX_SFID_INDEX(sfid),
+ ANA_TABLES_SFIDTIDX_SFID_INDEX_M,
+ ANA_TABLES_SFIDTIDX);
+
+ ocelot_write(ocelot,
+ ANA_TABLES_SFID_MASK_IGR_PORT_MASK(ports) |
+ ANA_TABLES_SFID_MASK_IGR_SRCPORT_MATCH_ENA,
+ ANA_TABLES_SFID_MASK);
+
+ ocelot_rmw(ocelot,
+ ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE),
+ ANA_TABLES_SFIDACCESS_SFID_TBL_CMD_M,
+ ANA_TABLES_SFIDACCESS);
+
+ return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val,
+ (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)),
+ 10, 100000);
+}
+
+static int vsc9959_psfp_sfi_list_add(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi,
+ struct list_head *pos)
+{
+ struct felix_stream_filter *sfi_entry;
+ int ret;
+
+ sfi_entry = kmemdup(sfi, sizeof(*sfi_entry), GFP_KERNEL);
+ if (!sfi_entry)
+ return -ENOMEM;
+
+ refcount_set(&sfi_entry->refcount, 1);
+
+ ret = vsc9959_psfp_sfi_set(ocelot, sfi_entry);
+ if (ret) {
+ kfree(sfi_entry);
+ return ret;
+ }
+
+ vsc9959_psfp_sfidmask_set(ocelot, sfi->index, sfi->portmask);
+
+ list_add(&sfi_entry->list, pos);
+
+ return 0;
+}
+
+static int vsc9959_psfp_sfi_table_add(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi)
+{
+ struct list_head *pos, *q, *last;
+ struct felix_stream_filter *tmp;
+ struct ocelot_psfp_list *psfp;
+ u32 insert = 0;
+
+ psfp = &ocelot->psfp;
+ last = &psfp->sfi_list;
+
+ list_for_each_safe(pos, q, &psfp->sfi_list) {
+ tmp = list_entry(pos, struct felix_stream_filter, list);
+ if (sfi->sg_valid == tmp->sg_valid &&
+ sfi->fm_valid == tmp->fm_valid &&
+ sfi->portmask == tmp->portmask &&
+ tmp->sgid == sfi->sgid &&
+ tmp->fmid == sfi->fmid) {
+ sfi->index = tmp->index;
+ refcount_inc(&tmp->refcount);
+ return 0;
+ }
+ /* Make sure that the index is increasing in order. */
+ if (tmp->index == insert) {
+ last = pos;
+ insert++;
+ }
+ }
+ sfi->index = insert;
+
+ return vsc9959_psfp_sfi_list_add(ocelot, sfi, last);
+}
+
+static int vsc9959_psfp_sfi_table_add2(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi,
+ struct felix_stream_filter *sfi2)
+{
+ struct felix_stream_filter *tmp;
+ struct list_head *pos, *q, *last;
+ struct ocelot_psfp_list *psfp;
+ u32 insert = 0;
+ int ret;
+
+ psfp = &ocelot->psfp;
+ last = &psfp->sfi_list;
+
+ list_for_each_safe(pos, q, &psfp->sfi_list) {
+ tmp = list_entry(pos, struct felix_stream_filter, list);
+ /* Make sure that the index is increasing in order. */
+ if (tmp->index >= insert + 2)
+ break;
+
+ insert = tmp->index + 1;
+ last = pos;
+ }
+ sfi->index = insert;
+
+ ret = vsc9959_psfp_sfi_list_add(ocelot, sfi, last);
+ if (ret)
+ return ret;
+
+ sfi2->index = insert + 1;
+
+ return vsc9959_psfp_sfi_list_add(ocelot, sfi2, last->next);
+}
+
+static struct felix_stream_filter *
+vsc9959_psfp_sfi_table_get(struct list_head *sfi_list, u32 index)
+{
+ struct felix_stream_filter *tmp;
+
+ list_for_each_entry(tmp, sfi_list, list)
+ if (tmp->index == index)
+ return tmp;
+
+ return NULL;
+}
+
+static void vsc9959_psfp_sfi_table_del(struct ocelot *ocelot, u32 index)
+{
+ struct felix_stream_filter *tmp, *n;
+ struct ocelot_psfp_list *psfp;
+ u8 z;
+
+ psfp = &ocelot->psfp;
+
+ list_for_each_entry_safe(tmp, n, &psfp->sfi_list, list)
+ if (tmp->index == index) {
+ z = refcount_dec_and_test(&tmp->refcount);
+ if (z) {
+ tmp->enable = 0;
+ vsc9959_psfp_sfi_set(ocelot, tmp);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ break;
+ }
+}
+
+static void vsc9959_psfp_parse_gate(const struct flow_action_entry *entry,
+ struct felix_stream_gate *sgi)
+{
+ sgi->index = entry->hw_index;
+ sgi->ipv_valid = (entry->gate.prio < 0) ? 0 : 1;
+ sgi->init_ipv = (sgi->ipv_valid) ? entry->gate.prio : 0;
+ sgi->basetime = entry->gate.basetime;
+ sgi->cycletime = entry->gate.cycletime;
+ sgi->num_entries = entry->gate.num_entries;
+ sgi->enable = 1;
+
+ memcpy(sgi->entries, entry->gate.entries,
+ entry->gate.num_entries * sizeof(struct action_gate_entry));
+}
+
+static u32 vsc9959_sgi_cfg_status(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, ANA_SG_ACCESS_CTRL);
+}
+
+static int vsc9959_psfp_sgi_set(struct ocelot *ocelot,
+ struct felix_stream_gate *sgi)
+{
+ struct action_gate_entry *e;
+ struct timespec64 base_ts;
+ u32 interval_sum = 0;
+ u32 val;
+ int i;
+
+ if (sgi->index > VSC9959_PSFP_GATE_ID_MAX)
+ return -EINVAL;
+
+ ocelot_write(ocelot, ANA_SG_ACCESS_CTRL_SGID(sgi->index),
+ ANA_SG_ACCESS_CTRL);
+
+ if (!sgi->enable) {
+ ocelot_rmw(ocelot, ANA_SG_CONFIG_REG_3_INIT_GATE_STATE,
+ ANA_SG_CONFIG_REG_3_INIT_GATE_STATE |
+ ANA_SG_CONFIG_REG_3_GATE_ENABLE,
+ ANA_SG_CONFIG_REG_3);
+
+ return 0;
+ }
+
+ if (sgi->cycletime < VSC9959_PSFP_GATE_CYCLETIME_MIN ||
+ sgi->cycletime > NSEC_PER_SEC)
+ return -EINVAL;
+
+ if (sgi->num_entries > VSC9959_PSFP_GATE_LIST_NUM)
+ return -EINVAL;
+
+ vsc9959_new_base_time(ocelot, sgi->basetime, sgi->cycletime, &base_ts);
+ ocelot_write(ocelot, base_ts.tv_nsec, ANA_SG_CONFIG_REG_1);
+ val = lower_32_bits(base_ts.tv_sec);
+ ocelot_write(ocelot, val, ANA_SG_CONFIG_REG_2);
+
+ val = upper_32_bits(base_ts.tv_sec);
+ ocelot_write(ocelot,
+ (sgi->ipv_valid ? ANA_SG_CONFIG_REG_3_IPV_VALID : 0) |
+ ANA_SG_CONFIG_REG_3_INIT_IPV(sgi->init_ipv) |
+ ANA_SG_CONFIG_REG_3_GATE_ENABLE |
+ ANA_SG_CONFIG_REG_3_LIST_LENGTH(sgi->num_entries) |
+ ANA_SG_CONFIG_REG_3_INIT_GATE_STATE |
+ ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB(val),
+ ANA_SG_CONFIG_REG_3);
+
+ ocelot_write(ocelot, sgi->cycletime, ANA_SG_CONFIG_REG_4);
+
+ e = sgi->entries;
+ for (i = 0; i < sgi->num_entries; i++) {
+ u32 ips = (e[i].ipv < 0) ? 0 : (e[i].ipv + 8);
+
+ ocelot_write_rix(ocelot, ANA_SG_GCL_GS_CONFIG_IPS(ips) |
+ (e[i].gate_state ?
+ ANA_SG_GCL_GS_CONFIG_GATE_STATE : 0),
+ ANA_SG_GCL_GS_CONFIG, i);
+
+ interval_sum += e[i].interval;
+ ocelot_write_rix(ocelot, interval_sum, ANA_SG_GCL_TI_CONFIG, i);
+ }
+
+ ocelot_rmw(ocelot, ANA_SG_ACCESS_CTRL_CONFIG_CHANGE,
+ ANA_SG_ACCESS_CTRL_CONFIG_CHANGE,
+ ANA_SG_ACCESS_CTRL);
+
+ return readx_poll_timeout(vsc9959_sgi_cfg_status, ocelot, val,
+ (!(ANA_SG_ACCESS_CTRL_CONFIG_CHANGE & val)),
+ 10, 100000);
+}
+
+static int vsc9959_psfp_sgi_table_add(struct ocelot *ocelot,
+ struct felix_stream_gate *sgi)
+{
+ struct felix_stream_gate_entry *tmp;
+ struct ocelot_psfp_list *psfp;
+ int ret;
+
+ psfp = &ocelot->psfp;
+
+ list_for_each_entry(tmp, &psfp->sgi_list, list)
+ if (tmp->index == sgi->index) {
+ refcount_inc(&tmp->refcount);
+ return 0;
+ }
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = vsc9959_psfp_sgi_set(ocelot, sgi);
+ if (ret) {
+ kfree(tmp);
+ return ret;
+ }
+
+ tmp->index = sgi->index;
+ refcount_set(&tmp->refcount, 1);
+ list_add_tail(&tmp->list, &psfp->sgi_list);
+
+ return 0;
+}
+
+static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot,
+ u32 index)
+{
+ struct felix_stream_gate_entry *tmp, *n;
+ struct felix_stream_gate sgi = {0};
+ struct ocelot_psfp_list *psfp;
+ u8 z;
+
+ psfp = &ocelot->psfp;
+
+ list_for_each_entry_safe(tmp, n, &psfp->sgi_list, list)
+ if (tmp->index == index) {
+ z = refcount_dec_and_test(&tmp->refcount);
+ if (z) {
+ sgi.index = index;
+ sgi.enable = 0;
+ vsc9959_psfp_sgi_set(ocelot, &sgi);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ break;
+ }
+}
+
+static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
+ struct felix_stream_filter_counters *counters)
+{
+ ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(index),
+ SYS_STAT_CFG_STAT_VIEW_M,
+ SYS_STAT_CFG);
+
+ counters->match = ocelot_read_gix(ocelot, SYS_CNT, 0x200);
+ counters->not_pass_gate = ocelot_read_gix(ocelot, SYS_CNT, 0x201);
+ counters->not_pass_sdu = ocelot_read_gix(ocelot, SYS_CNT, 0x202);
+ counters->red = ocelot_read_gix(ocelot, SYS_CNT, 0x203);
+
+ /* Clear the PSFP counter. */
+ ocelot_write(ocelot,
+ SYS_STAT_CFG_STAT_VIEW(index) |
+ SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
+ SYS_STAT_CFG);
+}
+
+static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct felix_stream_filter old_sfi, *sfi_entry;
+ struct felix_stream_filter sfi = {0};
+ const struct flow_action_entry *a;
+ struct felix_stream *stream_entry;
+ struct felix_stream stream = {0};
+ struct felix_stream_gate *sgi;
+ struct ocelot_psfp_list *psfp;
+ struct ocelot_policer pol;
+ int ret, i, size;
+ u64 rate, burst;
+ u32 index;
+
+ psfp = &ocelot->psfp;
+
+ ret = vsc9959_stream_identify(f, &stream);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack, "Only can match on VID, PCP, and dest MAC");
+ return ret;
+ }
+
+ flow_action_for_each(i, a, &f->rule->action) {
+ switch (a->id) {
+ case FLOW_ACTION_GATE:
+ size = struct_size(sgi, entries, a->gate.num_entries);
+ sgi = kzalloc(size, GFP_KERNEL);
+ vsc9959_psfp_parse_gate(a, sgi);
+ ret = vsc9959_psfp_sgi_table_add(ocelot, sgi);
+ if (ret) {
+ kfree(sgi);
+ goto err;
+ }
+ sfi.sg_valid = 1;
+ sfi.sgid = sgi->index;
+ kfree(sgi);
+ break;
+ case FLOW_ACTION_POLICE:
+ index = a->hw_index + VSC9959_PSFP_POLICER_BASE;
+ if (index > VSC9959_PSFP_POLICER_MAX) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ rate = a->police.rate_bytes_ps;
+ burst = rate * PSCHED_NS2TICKS(a->police.burst);
+ pol = (struct ocelot_policer) {
+ .burst = div_u64(burst, PSCHED_TICKS_PER_SEC),
+ .rate = div_u64(rate, 1000) * 8,
+ };
+ ret = ocelot_vcap_policer_add(ocelot, index, &pol);
+ if (ret)
+ goto err;
+
+ sfi.fm_valid = 1;
+ sfi.fmid = index;
+ sfi.maxsdu = a->police.mtu;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ stream.ports = BIT(port);
+ stream.port = port;
+
+ sfi.portmask = stream.ports;
+ sfi.prio_valid = (stream.prio < 0 ? 0 : 1);
+ sfi.prio = (sfi.prio_valid ? stream.prio : 0);
+ sfi.enable = 1;
+
+ /* Check if stream is set. */
+ stream_entry = vsc9959_stream_table_lookup(&psfp->stream_list, &stream);
+ if (stream_entry) {
+ if (stream_entry->ports & BIT(port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "The stream is added on this port");
+ ret = -EEXIST;
+ goto err;
+ }
+
+ if (stream_entry->ports != BIT(stream_entry->port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "The stream is added on two ports");
+ ret = -EEXIST;
+ goto err;
+ }
+
+ stream_entry->ports |= BIT(port);
+ stream.ports = stream_entry->ports;
+
+ sfi_entry = vsc9959_psfp_sfi_table_get(&psfp->sfi_list,
+ stream_entry->sfid);
+ memcpy(&old_sfi, sfi_entry, sizeof(old_sfi));
+
+ vsc9959_psfp_sfi_table_del(ocelot, stream_entry->sfid);
+
+ old_sfi.portmask = stream_entry->ports;
+ sfi.portmask = stream.ports;
+
+ if (stream_entry->port > port) {
+ ret = vsc9959_psfp_sfi_table_add2(ocelot, &sfi,
+ &old_sfi);
+ stream_entry->dummy = true;
+ } else {
+ ret = vsc9959_psfp_sfi_table_add2(ocelot, &old_sfi,
+ &sfi);
+ stream.dummy = true;
+ }
+ if (ret)
+ goto err;
+
+ stream_entry->sfid = old_sfi.index;
+ } else {
+ ret = vsc9959_psfp_sfi_table_add(ocelot, &sfi);
+ if (ret)
+ goto err;
+ }
+
+ stream.sfid = sfi.index;
+ stream.sfid_valid = 1;
+ ret = vsc9959_stream_table_add(ocelot, &psfp->stream_list,
+ &stream, extack);
+ if (ret) {
+ vsc9959_psfp_sfi_table_del(ocelot, stream.sfid);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ if (sfi.sg_valid)
+ vsc9959_psfp_sgi_table_del(ocelot, sfi.sgid);
+
+ if (sfi.fm_valid)
+ ocelot_vcap_policer_del(ocelot, sfi.fmid);
+
+ return ret;
+}
+
+static int vsc9959_psfp_filter_del(struct ocelot *ocelot,
+ struct flow_cls_offload *f)
+{
+ struct felix_stream *stream, tmp, *stream_entry;
+ static struct felix_stream_filter *sfi;
+ struct ocelot_psfp_list *psfp;
+
+ psfp = &ocelot->psfp;
+
+ stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie);
+ if (!stream)
+ return -ENOMEM;
+
+ sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid);
+ if (!sfi)
+ return -ENOMEM;
+
+ if (sfi->sg_valid)
+ vsc9959_psfp_sgi_table_del(ocelot, sfi->sgid);
+
+ if (sfi->fm_valid)
+ ocelot_vcap_policer_del(ocelot, sfi->fmid);
+
+ vsc9959_psfp_sfi_table_del(ocelot, stream->sfid);
+
+ memcpy(&tmp, stream, sizeof(tmp));
+
+ stream->sfid_valid = 0;
+ vsc9959_stream_table_del(ocelot, stream);
+
+ stream_entry = vsc9959_stream_table_lookup(&psfp->stream_list, &tmp);
+ if (stream_entry) {
+ stream_entry->ports = BIT(stream_entry->port);
+ if (stream_entry->dummy) {
+ stream_entry->dummy = false;
+ vsc9959_mact_stream_set(ocelot, stream_entry, NULL);
+ }
+ vsc9959_psfp_sfidmask_set(ocelot, stream_entry->sfid,
+ stream_entry->ports);
+ }
+
+ return 0;
+}
+
+static int vsc9959_psfp_stats_get(struct ocelot *ocelot,
+ struct flow_cls_offload *f,
+ struct flow_stats *stats)
+{
+ struct felix_stream_filter_counters counters;
+ struct ocelot_psfp_list *psfp;
+ struct felix_stream *stream;
+
+ psfp = &ocelot->psfp;
+ stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie);
+ if (!stream)
+ return -ENOMEM;
+
+ vsc9959_psfp_counters_get(ocelot, stream->sfid, &counters);
+
+ stats->pkts = counters.match;
+ stats->drops = counters.not_pass_gate + counters.not_pass_sdu +
+ counters.red;
+
+ return 0;
+}
+
+static void vsc9959_psfp_init(struct ocelot *ocelot)
+{
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+
+ INIT_LIST_HEAD(&psfp->stream_list);
+ INIT_LIST_HEAD(&psfp->sfi_list);
+ INIT_LIST_HEAD(&psfp->sgi_list);
+}
+
+/* When using cut-through forwarding and the egress port runs at a higher data
+ * rate than the ingress port, the packet currently under transmission would
+ * suffer an underrun since it would be transmitted faster than it is received.
+ * The Felix switch implementation of cut-through forwarding does not check in
+ * hardware whether this condition is satisfied or not, so we must restrict the
+ * list of ports that have cut-through forwarding enabled on egress to only be
+ * the ports operating at the lowest link speed within their respective
+ * forwarding domain.
+ */
+static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_switch *ds = felix->ds;
+ int port, other_port;
+
+ lockdep_assert_held(&ocelot->fwd_domain_lock);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int min_speed = ocelot_port->speed;
+ unsigned long mask = 0;
+ u32 tmp, val = 0;
+
+ /* Disable cut-through on ports that are down */
+ if (ocelot_port->speed <= 0)
+ goto set;
+
+ if (dsa_is_cpu_port(ds, port)) {
+ /* Ocelot switches forward from the NPI port towards
+ * any port, regardless of it being in the NPI port's
+ * forwarding domain or not.
+ */
+ mask = dsa_user_ports(ds);
+ } else {
+ mask = ocelot_get_bridge_fwd_mask(ocelot, port);
+ mask &= ~BIT(port);
+ if (ocelot->npi >= 0)
+ mask |= BIT(ocelot->npi);
+ else
+ mask |= ocelot_get_dsa_8021q_cpu_mask(ocelot);
+ }
+
+ /* Calculate the minimum link speed, among the ports that are
+ * up, of this source port's forwarding domain.
+ */
+ for_each_set_bit(other_port, &mask, ocelot->num_phys_ports) {
+ struct ocelot_port *other_ocelot_port;
+
+ other_ocelot_port = ocelot->ports[other_port];
+ if (other_ocelot_port->speed <= 0)
+ continue;
+
+ if (min_speed > other_ocelot_port->speed)
+ min_speed = other_ocelot_port->speed;
+ }
+
+ /* Enable cut-through forwarding for all traffic classes. */
+ if (ocelot_port->speed == min_speed)
+ val = GENMASK(7, 0);
+
+set:
+ tmp = ocelot_read_rix(ocelot, ANA_CUT_THRU_CFG, port);
+ if (tmp == val)
+ continue;
+
+ dev_dbg(ocelot->dev,
+ "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding\n",
+ port, mask, ocelot_port->speed, min_speed,
+ val ? "enabling" : "disabling");
+
+ ocelot_write_rix(ocelot, val, ANA_CUT_THRU_CFG, port);
+ }
+}
+
+static const struct ocelot_ops vsc9959_ops = {
+ .reset = vsc9959_reset,
+ .wm_enc = vsc9959_wm_enc,
+ .wm_dec = vsc9959_wm_dec,
+ .wm_stat = vsc9959_wm_stat,
+ .port_to_netdev = felix_port_to_netdev,
+ .netdev_to_port = felix_netdev_to_port,
+ .psfp_init = vsc9959_psfp_init,
+ .psfp_filter_add = vsc9959_psfp_filter_add,
+ .psfp_filter_del = vsc9959_psfp_filter_del,
+ .psfp_stats_get = vsc9959_psfp_stats_get,
+ .cut_through_fwd = vsc9959_cut_through_fwd,
+};
+
static const struct felix_info felix_info_vsc9959 = {
.target_io_res = vsc9959_target_io_res,
.port_io_res = vsc9959_port_io_res,
@@ -1354,11 +2224,13 @@ static const struct felix_info felix_info_vsc9959 = {
.stats_layout = vsc9959_stats_layout,
.num_stats = ARRAY_SIZE(vsc9959_stats_layout),
.vcap = vsc9959_vcap_props,
+ .vcap_pol_base = VSC9959_VCAP_POLICER_BASE,
+ .vcap_pol_max = VSC9959_VCAP_POLICER_MAX,
+ .vcap_pol_base2 = 0,
+ .vcap_pol_max2 = 0,
.num_mact_rows = 2048,
.num_ports = 6,
.num_tx_queues = OCELOT_NUM_TC,
- .switch_pci_bar = 4,
- .imdio_pci_bar = 0,
.quirk_no_xtr_irq = true,
.ptp_caps = &vsc9959_ptp_caps,
.mdio_bus_alloc = vsc9959_mdio_bus_alloc,
@@ -1367,6 +2239,7 @@ static const struct felix_info felix_info_vsc9959 = {
.prevalidate_phy_mode = vsc9959_prevalidate_phy_mode,
.port_setup_tc = vsc9959_port_setup_tc,
.port_sched_speed_set = vsc9959_sched_speed_set,
+ .init_regmap = ocelot_regmap_init,
};
static irqreturn_t felix_irq_handler(int irq, void *data)
@@ -1417,10 +2290,8 @@ static int felix_pci_probe(struct pci_dev *pdev,
ocelot->dev = &pdev->dev;
ocelot->num_flooding_pgids = OCELOT_NUM_TC;
felix->info = &felix_info_vsc9959;
- felix->switch_base = pci_resource_start(pdev,
- felix->info->switch_pci_bar);
- felix->imdio_base = pci_resource_start(pdev,
- felix->info->imdio_pci_bar);
+ felix->switch_base = pci_resource_start(pdev, VSC9959_SWITCH_PCI_BAR);
+ felix->imdio_base = pci_resource_start(pdev, VSC9959_IMDIO_PCI_BAR);
pci_set_master(pdev);
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 92eae63150ea..8c1c9da61602 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -6,18 +6,18 @@
#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot.h>
+#include <linux/mdio/mdio-mscc-miim.h>
+#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/pcs-lynx.h>
#include <linux/dsa/ocelot.h>
#include <linux/iopoll.h>
#include "felix.h"
-#define MSCC_MIIM_CMD_OPR_WRITE BIT(1)
-#define MSCC_MIIM_CMD_OPR_READ BIT(2)
-#define MSCC_MIIM_CMD_WRDATA_SHIFT 4
-#define MSCC_MIIM_CMD_REGAD_SHIFT 20
-#define MSCC_MIIM_CMD_PHYAD_SHIFT 25
-#define MSCC_MIIM_CMD_VLD BIT(31)
+#define VSC9953_VCAP_POLICER_BASE 11
+#define VSC9953_VCAP_POLICER_MAX 31
+#define VSC9953_VCAP_POLICER_BASE2 120
+#define VSC9953_VCAP_POLICER_MAX2 161
static const u32 vsc9953_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x00b500),
@@ -857,7 +857,6 @@ static struct vcap_props vsc9953_vcap_props[] = {
#define VSC9953_INIT_TIMEOUT 50000
#define VSC9953_GCB_RST_SLEEP 100
#define VSC9953_SYS_RAMINIT_SLEEP 80
-#define VCS9953_MII_TIMEOUT 10000
static int vsc9953_gcb_soft_rst_status(struct ocelot *ocelot)
{
@@ -877,82 +876,6 @@ static int vsc9953_sys_ram_init_status(struct ocelot *ocelot)
return val;
}
-static int vsc9953_gcb_miim_pending_status(struct ocelot *ocelot)
-{
- int val;
-
- ocelot_field_read(ocelot, GCB_MIIM_MII_STATUS_PENDING, &val);
-
- return val;
-}
-
-static int vsc9953_gcb_miim_busy_status(struct ocelot *ocelot)
-{
- int val;
-
- ocelot_field_read(ocelot, GCB_MIIM_MII_STATUS_BUSY, &val);
-
- return val;
-}
-
-static int vsc9953_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
- u16 value)
-{
- struct ocelot *ocelot = bus->priv;
- int err, cmd, val;
-
- /* Wait while MIIM controller becomes idle */
- err = readx_poll_timeout(vsc9953_gcb_miim_pending_status, ocelot,
- val, !val, 10, VCS9953_MII_TIMEOUT);
- if (err) {
- dev_err(ocelot->dev, "MDIO write: pending timeout\n");
- goto out;
- }
-
- cmd = MSCC_MIIM_CMD_VLD | (phy_id << MSCC_MIIM_CMD_PHYAD_SHIFT) |
- (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) |
- (value << MSCC_MIIM_CMD_WRDATA_SHIFT) |
- MSCC_MIIM_CMD_OPR_WRITE;
-
- ocelot_write(ocelot, cmd, GCB_MIIM_MII_CMD);
-
-out:
- return err;
-}
-
-static int vsc9953_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
-{
- struct ocelot *ocelot = bus->priv;
- int err, cmd, val;
-
- /* Wait until MIIM controller becomes idle */
- err = readx_poll_timeout(vsc9953_gcb_miim_pending_status, ocelot,
- val, !val, 10, VCS9953_MII_TIMEOUT);
- if (err) {
- dev_err(ocelot->dev, "MDIO read: pending timeout\n");
- goto out;
- }
-
- /* Write the MIIM COMMAND register */
- cmd = MSCC_MIIM_CMD_VLD | (phy_id << MSCC_MIIM_CMD_PHYAD_SHIFT) |
- (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | MSCC_MIIM_CMD_OPR_READ;
-
- ocelot_write(ocelot, cmd, GCB_MIIM_MII_CMD);
-
- /* Wait while read operation via the MIIM controller is in progress */
- err = readx_poll_timeout(vsc9953_gcb_miim_busy_status, ocelot,
- val, !val, 10, VCS9953_MII_TIMEOUT);
- if (err) {
- dev_err(ocelot->dev, "MDIO read: busy timeout\n");
- goto out;
- }
-
- val = ocelot_read(ocelot, GCB_MIIM_MII_DATA);
-
- err = val & 0xFFFF;
-out:
- return err;
-}
/* CORE_ENA is in SYS:SYSTEM:RESET_CFG
* MEM_INIT is in SYS:SYSTEM:RESET_CFG
@@ -1089,26 +1012,24 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
int rc;
felix->pcs = devm_kcalloc(dev, felix->info->num_ports,
- sizeof(struct phy_device *),
+ sizeof(struct phylink_pcs *),
GFP_KERNEL);
if (!felix->pcs) {
dev_err(dev, "failed to allocate array for PCS PHYs\n");
return -ENOMEM;
}
- bus = devm_mdiobus_alloc(dev);
- if (!bus)
- return -ENOMEM;
+ rc = mscc_miim_setup(dev, &bus, "VSC9953 internal MDIO bus",
+ ocelot->targets[GCB],
+ ocelot->map[GCB][GCB_MIIM_MII_STATUS & REG_MASK]);
- bus->name = "VSC9953 internal MDIO bus";
- bus->read = vsc9953_mdio_read;
- bus->write = vsc9953_mdio_write;
- bus->parent = dev;
- bus->priv = ocelot;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev));
+ if (rc) {
+ dev_err(dev, "failed to setup MDIO bus\n");
+ return rc;
+ }
/* Needed in order to initialize the bus mutex lock */
- rc = mdiobus_register(bus);
+ rc = of_mdiobus_register(bus, NULL);
if (rc < 0) {
dev_err(dev, "failed to register MDIO bus\n");
return rc;
@@ -1118,9 +1039,9 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
for (port = 0; port < felix->info->num_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct phylink_pcs *phylink_pcs;
+ struct mdio_device *mdio_device;
int addr = port + 4;
- struct mdio_device *pcs;
- struct lynx_pcs *lynx;
if (dsa_is_unused_port(felix->ds, port))
continue;
@@ -1128,17 +1049,17 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL)
continue;
- pcs = mdio_device_create(felix->imdio, addr);
- if (IS_ERR(pcs))
+ mdio_device = mdio_device_create(felix->imdio, addr);
+ if (IS_ERR(mdio_device))
continue;
- lynx = lynx_pcs_create(pcs);
- if (!lynx) {
- mdio_device_free(pcs);
+ phylink_pcs = lynx_pcs_create(mdio_device);
+ if (!phylink_pcs) {
+ mdio_device_free(mdio_device);
continue;
}
- felix->pcs[port] = lynx;
+ felix->pcs[port] = phylink_pcs;
dev_info(dev, "Found PCS at internal MDIO address %d\n", addr);
}
@@ -1152,13 +1073,15 @@ static void vsc9953_mdio_bus_free(struct ocelot *ocelot)
int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
- struct lynx_pcs *pcs = felix->pcs[port];
+ struct phylink_pcs *phylink_pcs = felix->pcs[port];
+ struct mdio_device *mdio_device;
- if (!pcs)
+ if (!phylink_pcs)
continue;
- mdio_device_free(pcs->mdio);
- lynx_pcs_destroy(pcs);
+ mdio_device = lynx_get_mdio_device(phylink_pcs);
+ mdio_device_free(mdio_device);
+ lynx_pcs_destroy(phylink_pcs);
}
mdiobus_unregister(felix->imdio);
}
@@ -1172,6 +1095,10 @@ static const struct felix_info seville_info_vsc9953 = {
.stats_layout = vsc9953_stats_layout,
.num_stats = ARRAY_SIZE(vsc9953_stats_layout),
.vcap = vsc9953_vcap_props,
+ .vcap_pol_base = VSC9953_VCAP_POLICER_BASE,
+ .vcap_pol_max = VSC9953_VCAP_POLICER_MAX,
+ .vcap_pol_base2 = VSC9953_VCAP_POLICER_BASE2,
+ .vcap_pol_max2 = VSC9953_VCAP_POLICER_MAX2,
.num_mact_rows = 2048,
.num_ports = 10,
.num_tx_queues = OCELOT_NUM_TC,
@@ -1179,6 +1106,7 @@ static const struct felix_info seville_info_vsc9953 = {
.mdio_bus_free = vsc9953_mdio_bus_free,
.phylink_validate = vsc9953_phylink_validate,
.prevalidate_phy_mode = vsc9953_prevalidate_phy_mode,
+ .init_regmap = ocelot_regmap_init,
};
static int seville_probe(struct platform_device *pdev)
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 147ca39531a3..039694518788 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -9,6 +9,8 @@
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
#include <net/dsa.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
@@ -68,6 +70,8 @@ static const struct qca8k_mib_desc ar8327_mib[] = {
MIB_DESC(1, 0x9c, "TxExcDefer"),
MIB_DESC(1, 0xa0, "TxDefer"),
MIB_DESC(1, 0xa4, "TxLateCol"),
+ MIB_DESC(1, 0xa8, "RXUnicast"),
+ MIB_DESC(1, 0xac, "TXUnicast"),
};
/* The 32bit switch registers are accessed indirectly. To achieve this we need
@@ -151,6 +155,25 @@ qca8k_set_page(struct mii_bus *bus, u16 page)
static int
qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
{
+ return regmap_read(priv->regmap, reg, val);
+}
+
+static int
+qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
+{
+ return regmap_write(priv->regmap, reg, val);
+}
+
+static int
+qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
+{
+ return regmap_update_bits(priv->regmap, reg, mask, write_val);
+}
+
+static int
+qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
int ret;
@@ -171,8 +194,9 @@ exit:
}
static int
-qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
+qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
int ret;
@@ -193,8 +217,9 @@ exit:
}
static int
-qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
+qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
@@ -222,34 +247,6 @@ exit:
return ret;
}
-static int
-qca8k_reg_set(struct qca8k_priv *priv, u32 reg, u32 val)
-{
- return qca8k_rmw(priv, reg, 0, val);
-}
-
-static int
-qca8k_reg_clear(struct qca8k_priv *priv, u32 reg, u32 val)
-{
- return qca8k_rmw(priv, reg, val, 0);
-}
-
-static int
-qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
-
- return qca8k_read(priv, reg, val);
-}
-
-static int
-qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
-
- return qca8k_write(priv, reg, val);
-}
-
static const struct regmap_range qca8k_readable_ranges[] = {
regmap_reg_range(0x0000, 0x00e4), /* Global control */
regmap_reg_range(0x0100, 0x0168), /* EEE control */
@@ -281,26 +278,19 @@ static struct regmap_config qca8k_regmap_config = {
.max_register = 0x16ac, /* end MIB - Port6 range */
.reg_read = qca8k_regmap_read,
.reg_write = qca8k_regmap_write,
+ .reg_update_bits = qca8k_regmap_update_bits,
.rd_table = &qca8k_readable_table,
+ .disable_locking = true, /* Locking is handled by qca8k read/write */
+ .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
};
static int
qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
{
- int ret, ret1;
u32 val;
- ret = read_poll_timeout(qca8k_read, ret1, !(val & mask),
- 0, QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
- priv, reg, &val);
-
- /* Check if qca8k_read has failed for a different reason
- * before returning -ETIMEDOUT
- */
- if (ret < 0 && ret1 < 0)
- return ret1;
-
- return ret;
+ return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
}
static int
@@ -319,18 +309,18 @@ qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
}
/* vid - 83:72 */
- fdb->vid = (reg[2] >> QCA8K_ATU_VID_S) & QCA8K_ATU_VID_M;
+ fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
/* aging - 67:64 */
- fdb->aging = reg[2] & QCA8K_ATU_STATUS_M;
+ fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
/* portmask - 54:48 */
- fdb->port_mask = (reg[1] >> QCA8K_ATU_PORT_S) & QCA8K_ATU_PORT_M;
+ fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
/* mac - 47:0 */
- fdb->mac[0] = (reg[1] >> QCA8K_ATU_ADDR0_S) & 0xff;
- fdb->mac[1] = reg[1] & 0xff;
- fdb->mac[2] = (reg[0] >> QCA8K_ATU_ADDR2_S) & 0xff;
- fdb->mac[3] = (reg[0] >> QCA8K_ATU_ADDR3_S) & 0xff;
- fdb->mac[4] = (reg[0] >> QCA8K_ATU_ADDR4_S) & 0xff;
- fdb->mac[5] = reg[0] & 0xff;
+ fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
+ fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
+ fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
+ fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
+ fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
+ fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
return 0;
}
@@ -343,18 +333,18 @@ qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
int i;
/* vid - 83:72 */
- reg[2] = (vid & QCA8K_ATU_VID_M) << QCA8K_ATU_VID_S;
+ reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
/* aging - 67:64 */
- reg[2] |= aging & QCA8K_ATU_STATUS_M;
+ reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
/* portmask - 54:48 */
- reg[1] = (port_mask & QCA8K_ATU_PORT_M) << QCA8K_ATU_PORT_S;
+ reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
/* mac - 47:0 */
- reg[1] |= mac[0] << QCA8K_ATU_ADDR0_S;
- reg[1] |= mac[1];
- reg[0] |= mac[2] << QCA8K_ATU_ADDR2_S;
- reg[0] |= mac[3] << QCA8K_ATU_ADDR3_S;
- reg[0] |= mac[4] << QCA8K_ATU_ADDR4_S;
- reg[0] |= mac[5];
+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
/* load the array into the ARL table */
for (i = 0; i < 3; i++)
@@ -372,7 +362,7 @@ qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
reg |= cmd;
if (port >= 0) {
reg |= QCA8K_ATU_FUNC_PORT_EN;
- reg |= (port & QCA8K_ATU_FUNC_PORT_M) << QCA8K_ATU_FUNC_PORT_S;
+ reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
}
/* Write the function register triggering the table access */
@@ -446,6 +436,81 @@ qca8k_fdb_flush(struct qca8k_priv *priv)
}
static int
+qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
+ const u8 *mac, u16 vid)
+{
+ struct qca8k_fdb fdb = { 0 };
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+
+ qca8k_fdb_write(priv, vid, 0, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
+ if (ret < 0)
+ goto exit;
+
+ ret = qca8k_fdb_read(priv, &fdb);
+ if (ret < 0)
+ goto exit;
+
+ /* Rule exist. Delete first */
+ if (!fdb.aging) {
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ if (ret)
+ goto exit;
+ }
+
+ /* Add port to fdb portmask */
+ fdb.port_mask |= port_mask;
+
+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+static int
+qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
+ const u8 *mac, u16 vid)
+{
+ struct qca8k_fdb fdb = { 0 };
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+
+ qca8k_fdb_write(priv, vid, 0, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
+ if (ret < 0)
+ goto exit;
+
+ /* Rule doesn't exist. Why delete? */
+ if (!fdb.aging) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ if (ret)
+ goto exit;
+
+ /* Only port in the rule is this port. Don't re insert */
+ if (fdb.port_mask == port_mask)
+ goto exit;
+
+ /* Remove port from port mask */
+ fdb.port_mask &= ~port_mask;
+
+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+static int
qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
{
u32 reg;
@@ -454,7 +519,7 @@ qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
/* Set the command and VLAN index */
reg = QCA8K_VTU_FUNC1_BUSY;
reg |= cmd;
- reg |= vid << QCA8K_VTU_FUNC1_VID_S;
+ reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
/* Write the function register triggering the table access */
ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
@@ -500,13 +565,11 @@ qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
if (ret < 0)
goto out;
reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
- reg &= ~(QCA8K_VTU_FUNC0_EG_MODE_MASK << QCA8K_VTU_FUNC0_EG_MODE_S(port));
+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
if (untagged)
- reg |= QCA8K_VTU_FUNC0_EG_MODE_UNTAG <<
- QCA8K_VTU_FUNC0_EG_MODE_S(port);
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
else
- reg |= QCA8K_VTU_FUNC0_EG_MODE_TAG <<
- QCA8K_VTU_FUNC0_EG_MODE_S(port);
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
if (ret)
@@ -534,15 +597,13 @@ qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
if (ret < 0)
goto out;
- reg &= ~(3 << QCA8K_VTU_FUNC0_EG_MODE_S(port));
- reg |= QCA8K_VTU_FUNC0_EG_MODE_NOT <<
- QCA8K_VTU_FUNC0_EG_MODE_S(port);
+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
/* Check if we're the last member to be removed */
del = true;
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- mask = QCA8K_VTU_FUNC0_EG_MODE_NOT;
- mask <<= QCA8K_VTU_FUNC0_EG_MODE_S(i);
+ mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
if ((reg & mask) != mask) {
del = false;
@@ -571,7 +632,7 @@ qca8k_mib_init(struct qca8k_priv *priv)
int ret;
mutex_lock(&priv->reg_mutex);
- ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
if (ret)
goto exit;
@@ -579,7 +640,7 @@ qca8k_mib_init(struct qca8k_priv *priv)
if (ret)
goto exit;
- ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
if (ret)
goto exit;
@@ -600,9 +661,9 @@ qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
mask |= QCA8K_PORT_STATUS_LINK_AUTO;
if (enable)
- qca8k_reg_set(priv, QCA8K_REG_PORT_STATUS(port), mask);
+ regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
else
- qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask);
+ regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
}
static u32
@@ -864,8 +925,8 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
* a dt-overlay and driver reload changed the configuration
*/
- return qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL,
- QCA8K_MDIO_MASTER_EN);
+ return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
+ QCA8K_MDIO_MASTER_EN);
}
/* Check if the devicetree declare the port:phy mapping */
@@ -983,7 +1044,7 @@ qca8k_parse_port_config(struct qca8k_priv *priv)
u32 delay;
/* We have 2 CPU port. Check them */
- for (port = 0; port < QCA8K_NUM_PORTS && cpu_port_index < QCA8K_NUM_CPU_PORTS; port++) {
+ for (port = 0; port < QCA8K_NUM_PORTS; port++) {
/* Skip every other port */
if (port != 0 && port != 6)
continue;
@@ -1014,7 +1075,7 @@ qca8k_parse_port_config(struct qca8k_priv *priv)
mode == PHY_INTERFACE_MODE_RGMII_TXID)
delay = 1;
- if (delay > QCA8K_MAX_DELAY) {
+ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
delay = 3;
}
@@ -1030,7 +1091,7 @@ qca8k_parse_port_config(struct qca8k_priv *priv)
mode == PHY_INTERFACE_MODE_RGMII_RXID)
delay = 2;
- if (delay > QCA8K_MAX_DELAY) {
+ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
delay = 3;
}
@@ -1089,14 +1150,6 @@ qca8k_setup(struct dsa_switch *ds)
if (ret)
return ret;
- mutex_init(&priv->reg_mutex);
-
- /* Start by setting up the register mapping */
- priv->regmap = devm_regmap_init(ds->dev, NULL, priv,
- &qca8k_regmap_config);
- if (IS_ERR(priv->regmap))
- dev_warn(priv->dev, "regmap initialization failed");
-
ret = qca8k_setup_mdio_bus(priv);
if (ret)
return ret;
@@ -1110,16 +1163,16 @@ qca8k_setup(struct dsa_switch *ds)
return ret;
/* Make sure MAC06 is disabled */
- ret = qca8k_reg_clear(priv, QCA8K_REG_PORT0_PAD_CTRL,
- QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
+ ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
+ QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
if (ret) {
dev_err(priv->dev, "failed disabling MAC06 exchange");
return ret;
}
/* Enable CPU Port */
- ret = qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0,
- QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
if (ret) {
dev_err(priv->dev, "failed enabling CPU port");
return ret;
@@ -1141,8 +1194,8 @@ qca8k_setup(struct dsa_switch *ds)
/* Enable QCA header mode on all cpu ports */
if (dsa_is_cpu_port(ds, i)) {
ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);
+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
if (ret) {
dev_err(priv->dev, "failed enabling QCA header mode");
return ret;
@@ -1159,10 +1212,10 @@ qca8k_setup(struct dsa_switch *ds)
* for igmp, unknown, multicast and broadcast packet
*/
ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S |
- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S |
- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S |
- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
if (ret)
return ret;
@@ -1180,8 +1233,6 @@ qca8k_setup(struct dsa_switch *ds)
/* Individual user ports get connected to CPU port only */
if (dsa_is_user_port(ds, i)) {
- int shift = 16 * (i % 2);
-
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
QCA8K_PORT_LOOKUP_MEMBER,
BIT(cpu_port));
@@ -1189,8 +1240,8 @@ qca8k_setup(struct dsa_switch *ds)
return ret;
/* Enable ARP Auto-learning by default */
- ret = qca8k_reg_set(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_LEARN);
+ ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_LEARN);
if (ret)
return ret;
@@ -1198,8 +1249,8 @@ qca8k_setup(struct dsa_switch *ds)
* default egress vid
*/
ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
- 0xfff << shift,
- QCA8K_PORT_VID_DEF << shift);
+ QCA8K_EGREES_VLAN_PORT_MASK(i),
+ QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
if (ret)
return ret;
@@ -1246,7 +1297,7 @@ qca8k_setup(struct dsa_switch *ds)
QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
QCA8K_PORT_HOL_CTRL1_WRED_EN;
qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
- QCA8K_PORT_HOL_CTRL1_ING_BUF |
+ QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
QCA8K_PORT_HOL_CTRL1_WRED_EN,
@@ -1269,8 +1320,8 @@ qca8k_setup(struct dsa_switch *ds)
mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
- QCA8K_GLOBAL_FC_GOL_XON_THRES_S |
- QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S,
+ QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
mask);
}
@@ -1285,6 +1336,13 @@ qca8k_setup(struct dsa_switch *ds)
/* We don't have interrupts for link changes, so we need to poll */
ds->pcs_poll = true;
+ /* Set min a max ageing value supported */
+ ds->ageing_time_min = 7000;
+ ds->ageing_time_max = 458745000;
+
+ /* Set max number of LAGs supported */
+ ds->num_lag_ids = QCA8K_NUM_LAGS;
+
return 0;
}
@@ -1631,12 +1689,16 @@ qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
static void
qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
{
+ const struct qca8k_match_data *match_data;
+ struct qca8k_priv *priv = ds->priv;
int i;
if (stringset != ETH_SS_STATS)
return;
- for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++)
+ match_data = of_device_get_match_data(priv->dev);
+
+ for (i = 0; i < match_data->mib_count; i++)
strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
ETH_GSTRING_LEN);
}
@@ -1646,12 +1708,15 @@ qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ const struct qca8k_match_data *match_data;
const struct qca8k_mib_desc *mib;
u32 reg, i, val;
u32 hi = 0;
int ret;
- for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) {
+ match_data = of_device_get_match_data(priv->dev);
+
+ for (i = 0; i < match_data->mib_count; i++) {
mib = &ar8327_mib[i];
reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
@@ -1674,10 +1739,15 @@ qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
static int
qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
+ const struct qca8k_match_data *match_data;
+ struct qca8k_priv *priv = ds->priv;
+
if (sset != ETH_SS_STATS)
return 0;
- return ARRAY_SIZE(ar8327_mib);
+ match_data = of_device_get_match_data(priv->dev);
+
+ return match_data->mib_count;
}
static int
@@ -1740,8 +1810,9 @@ qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
}
-static int
-qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
+static int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
int port_mask, cpu_port;
@@ -1753,14 +1824,14 @@ qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
if (dsa_is_cpu_port(ds, i))
continue;
- if (dsa_to_port(ds, i)->bridge_dev != br)
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Add this port to the portvlan mask of the other ports
* in the bridge
*/
- ret = qca8k_reg_set(priv,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
+ ret = regmap_set_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
if (ret)
return ret;
if (i != port)
@@ -1774,8 +1845,8 @@ qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
return ret;
}
-static void
-qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
+static void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
int cpu_port, i;
@@ -1785,14 +1856,14 @@ qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
if (dsa_is_cpu_port(ds, i))
continue;
- if (dsa_to_port(ds, i)->bridge_dev != br)
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Remove this port to the portvlan mask of the other ports
* in the bridge
*/
- qca8k_reg_clear(priv,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
+ regmap_clear_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
}
/* Set the cpu port to be the only one in the portvlan mask of
@@ -1802,6 +1873,36 @@ qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
}
+static void
+qca8k_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static int
+qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct qca8k_priv *priv = ds->priv;
+ unsigned int secs = msecs / 1000;
+ u32 val;
+
+ /* AGE_TIME reg is set in 7s step */
+ val = secs / 7;
+
+ /* Handle case with 0 as val to NOT disable
+ * learning
+ */
+ if (!val)
+ val = 1;
+
+ return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK,
+ QCA8K_ATU_AGE_TIME(val));
+}
+
static int
qca8k_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
@@ -1908,6 +2009,121 @@ qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
}
static int
+qca8k_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct qca8k_priv *priv = ds->priv;
+ const u8 *addr = mdb->addr;
+ u16 vid = mdb->vid;
+
+ return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
+}
+
+static int
+qca8k_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct qca8k_priv *priv = ds->priv;
+ const u8 *addr = mdb->addr;
+ u16 vid = mdb->vid;
+
+ return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
+}
+
+static int
+qca8k_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int monitor_port, ret;
+ u32 reg, val;
+
+ /* Check for existent entry */
+ if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
+ return -EEXIST;
+
+ ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
+ if (ret)
+ return ret;
+
+ /* QCA83xx can have only one port set to mirror mode.
+ * Check that the correct port is requested and return error otherwise.
+ * When no mirror port is set, the values is set to 0xF
+ */
+ monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
+ return -EEXIST;
+
+ /* Set the monitor port */
+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
+ mirror->to_local_port);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (ret)
+ return ret;
+
+ if (ingress) {
+ reg = QCA8K_PORT_LOOKUP_CTRL(port);
+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
+ } else {
+ reg = QCA8K_REG_PORT_HOL_CTRL1(port);
+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
+ }
+
+ ret = regmap_update_bits(priv->regmap, reg, val, val);
+ if (ret)
+ return ret;
+
+ /* Track mirror port for tx and rx to decide when the
+ * mirror port has to be disabled.
+ */
+ if (ingress)
+ priv->mirror_rx |= BIT(port);
+ else
+ priv->mirror_tx |= BIT(port);
+
+ return 0;
+}
+
+static void
+qca8k_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct qca8k_priv *priv = ds->priv;
+ u32 reg, val;
+ int ret;
+
+ if (mirror->ingress) {
+ reg = QCA8K_PORT_LOOKUP_CTRL(port);
+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
+ } else {
+ reg = QCA8K_REG_PORT_HOL_CTRL1(port);
+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
+ }
+
+ ret = regmap_clear_bits(priv->regmap, reg, val);
+ if (ret)
+ goto err;
+
+ if (mirror->ingress)
+ priv->mirror_rx &= ~BIT(port);
+ else
+ priv->mirror_tx &= ~BIT(port);
+
+ /* No port set to send packet to mirror port. Disable mirror port */
+ if (!priv->mirror_rx && !priv->mirror_tx) {
+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (ret)
+ goto err;
+ }
+err:
+ dev_err(priv->dev, "Failed to del mirror port from %d", port);
+}
+
+static int
qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
struct netlink_ext_ack *extack)
{
@@ -1916,11 +2132,11 @@ qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
if (vlan_filtering) {
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_VLAN_MODE,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
} else {
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_VLAN_MODE,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
}
@@ -1944,10 +2160,9 @@ qca8k_port_vlan_add(struct dsa_switch *ds, int port,
}
if (pvid) {
- int shift = 16 * (port % 2);
-
ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
- 0xfff << shift, vlan->vid << shift);
+ QCA8K_EGREES_VLAN_PORT_MASK(port),
+ QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
if (ret)
return ret;
@@ -1996,12 +2211,185 @@ qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
return DSA_TAG_PROTO_QCA;
}
+static bool
+qca8k_lag_can_offload(struct dsa_switch *ds,
+ struct net_device *lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct dsa_port *dp;
+ int id, members = 0;
+
+ id = dsa_lag_id(ds->dst, lag);
+ if (id < 0 || id >= ds->num_lag_ids)
+ return false;
+
+ dsa_lag_foreach_port(dp, ds->dst, lag)
+ /* Includes the port joining the LAG */
+ members++;
+
+ if (members > QCA8K_NUM_PORTS_FOR_LAG)
+ return false;
+
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ return false;
+
+ if (info->hash_type != NETDEV_LAG_HASH_L2 &&
+ info->hash_type != NETDEV_LAG_HASH_L23)
+ return false;
+
+ return true;
+}
+
+static int
+qca8k_lag_setup_hash(struct dsa_switch *ds,
+ struct net_device *lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct qca8k_priv *priv = ds->priv;
+ bool unique_lag = true;
+ u32 hash = 0;
+ int i, id;
+
+ id = dsa_lag_id(ds->dst, lag);
+
+ switch (info->hash_type) {
+ case NETDEV_LAG_HASH_L23:
+ hash |= QCA8K_TRUNK_HASH_SIP_EN;
+ hash |= QCA8K_TRUNK_HASH_DIP_EN;
+ fallthrough;
+ case NETDEV_LAG_HASH_L2:
+ hash |= QCA8K_TRUNK_HASH_SA_EN;
+ hash |= QCA8K_TRUNK_HASH_DA_EN;
+ break;
+ default: /* We should NEVER reach this */
+ return -EOPNOTSUPP;
+ }
+
+ /* Check if we are the unique configured LAG */
+ dsa_lags_foreach_id(i, ds->dst)
+ if (i != id && dsa_lag_dev(ds->dst, i)) {
+ unique_lag = false;
+ break;
+ }
+
+ /* Hash Mode is global. Make sure the same Hash Mode
+ * is set to all the 4 possible lag.
+ * If we are the unique LAG we can set whatever hash
+ * mode we want.
+ * To change hash mode it's needed to remove all LAG
+ * and change the mode with the latest.
+ */
+ if (unique_lag) {
+ priv->lag_hash_mode = hash;
+ } else if (priv->lag_hash_mode != hash) {
+ netdev_err(lag, "Error: Mismatched Hash Mode across different lag is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
+ QCA8K_TRUNK_HASH_MASK, hash);
+}
+
+static int
+qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
+ struct net_device *lag, bool delete)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret, id, i;
+ u32 val;
+
+ id = dsa_lag_id(ds->dst, lag);
+
+ /* Read current port member */
+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
+ if (ret)
+ return ret;
+
+ /* Shift val to the correct trunk */
+ val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
+ val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
+ if (delete)
+ val &= ~BIT(port);
+ else
+ val |= BIT(port);
+
+ /* Update port member. With empty portmap disable trunk */
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
+ QCA8K_REG_GOL_TRUNK_MEMBER(id) |
+ QCA8K_REG_GOL_TRUNK_EN(id),
+ !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
+ val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
+
+ /* Search empty member if adding or port on deleting */
+ for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
+ if (ret)
+ return ret;
+
+ val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
+
+ if (delete) {
+ /* If port flagged to be disabled assume this member is
+ * empty
+ */
+ if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
+ continue;
+
+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
+ if (val != port)
+ continue;
+ } else {
+ /* If port flagged to be enabled assume this member is
+ * already set
+ */
+ if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
+ continue;
+ }
+
+ /* We have found the member to add/remove */
+ break;
+ }
+
+ /* Set port in the correct port mask or disable port if in delete mode */
+ return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
+ !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
+ port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
+}
+
+static int
+qca8k_port_lag_join(struct dsa_switch *ds, int port,
+ struct net_device *lag,
+ struct netdev_lag_upper_info *info)
+{
+ int ret;
+
+ if (!qca8k_lag_can_offload(ds, lag, info))
+ return -EOPNOTSUPP;
+
+ ret = qca8k_lag_setup_hash(ds, lag, info);
+ if (ret)
+ return ret;
+
+ return qca8k_lag_refresh_portmap(ds, port, lag, false);
+}
+
+static int
+qca8k_port_lag_leave(struct dsa_switch *ds, int port,
+ struct net_device *lag)
+{
+ return qca8k_lag_refresh_portmap(ds, port, lag, true);
+}
+
static const struct dsa_switch_ops qca8k_switch_ops = {
.get_tag_protocol = qca8k_get_tag_protocol,
.setup = qca8k_setup,
.get_strings = qca8k_get_strings,
.get_ethtool_stats = qca8k_get_ethtool_stats,
.get_sset_count = qca8k_get_sset_count,
+ .set_ageing_time = qca8k_set_ageing_time,
.get_mac_eee = qca8k_get_mac_eee,
.set_mac_eee = qca8k_set_mac_eee,
.port_enable = qca8k_port_enable,
@@ -2011,9 +2399,14 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.port_stp_state_set = qca8k_port_stp_state_set,
.port_bridge_join = qca8k_port_bridge_join,
.port_bridge_leave = qca8k_port_bridge_leave,
+ .port_fast_age = qca8k_port_fast_age,
.port_fdb_add = qca8k_port_fdb_add,
.port_fdb_del = qca8k_port_fdb_del,
.port_fdb_dump = qca8k_port_fdb_dump,
+ .port_mdb_add = qca8k_port_mdb_add,
+ .port_mdb_del = qca8k_port_mdb_del,
+ .port_mirror_add = qca8k_port_mirror_add,
+ .port_mirror_del = qca8k_port_mirror_del,
.port_vlan_filtering = qca8k_port_vlan_filtering,
.port_vlan_add = qca8k_port_vlan_add,
.port_vlan_del = qca8k_port_vlan_del,
@@ -2023,6 +2416,8 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.phylink_mac_link_down = qca8k_phylink_mac_link_down,
.phylink_mac_link_up = qca8k_phylink_mac_link_up,
.get_phy_flags = qca8k_get_phy_flags,
+ .port_lag_join = qca8k_port_lag_join,
+ .port_lag_leave = qca8k_port_lag_leave,
};
static int qca8k_read_switch_id(struct qca8k_priv *priv)
@@ -2041,7 +2436,7 @@ static int qca8k_read_switch_id(struct qca8k_priv *priv)
if (ret < 0)
return -ENODEV;
- id = QCA8K_MASK_CTRL_DEVICE_ID(val & QCA8K_MASK_CTRL_DEVICE_ID_MASK);
+ id = QCA8K_MASK_CTRL_DEVICE_ID(val);
if (id != data->id) {
dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id);
return -ENODEV;
@@ -2050,7 +2445,7 @@ static int qca8k_read_switch_id(struct qca8k_priv *priv)
priv->switch_id = id;
/* Save revision to communicate to the internal PHY driver */
- priv->switch_revision = (val & QCA8K_MASK_CTRL_REV_ID_MASK);
+ priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
return 0;
}
@@ -2085,6 +2480,14 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
gpiod_set_value_cansleep(priv->reset_gpio, 0);
}
+ /* Start by setting up the register mapping */
+ priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
+ &qca8k_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(priv->dev, "regmap initialization failed");
+ return PTR_ERR(priv->regmap);
+ }
+
/* Check the detected switch id */
ret = qca8k_read_switch_id(priv);
if (ret)
@@ -2173,14 +2576,17 @@ static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
static const struct qca8k_match_data qca8327 = {
.id = QCA8K_ID_QCA8327,
.reduced_package = true,
+ .mib_count = QCA8K_QCA832X_MIB_COUNT,
};
static const struct qca8k_match_data qca8328 = {
.id = QCA8K_ID_QCA8327,
+ .mib_count = QCA8K_QCA832X_MIB_COUNT,
};
static const struct qca8k_match_data qca833x = {
.id = QCA8K_ID_QCA8337,
+ .mib_count = QCA8K_QCA833X_MIB_COUNT,
};
static const struct of_device_id qca8k_of_match[] = {
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index 128b8cf85e08..ab4a417b25a9 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -15,12 +15,17 @@
#define QCA8K_NUM_PORTS 7
#define QCA8K_NUM_CPU_PORTS 2
#define QCA8K_MAX_MTU 9000
+#define QCA8K_NUM_LAGS 4
+#define QCA8K_NUM_PORTS_FOR_LAG 4
#define PHY_ID_QCA8327 0x004dd034
#define QCA8K_ID_QCA8327 0x12
#define PHY_ID_QCA8337 0x004dd036
#define QCA8K_ID_QCA8337 0x13
+#define QCA8K_QCA832X_MIB_COUNT 39
+#define QCA8K_QCA833X_MIB_COUNT 41
+
#define QCA8K_BUSY_WAIT_TIMEOUT 2000
#define QCA8K_NUM_FDB_RECORDS 2048
@@ -30,9 +35,9 @@
/* Global control registers */
#define QCA8K_REG_MASK_CTRL 0x000
#define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0)
-#define QCA8K_MASK_CTRL_REV_ID(x) ((x) >> 0)
+#define QCA8K_MASK_CTRL_REV_ID(x) FIELD_GET(QCA8K_MASK_CTRL_REV_ID_MASK, x)
#define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8)
-#define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8)
+#define QCA8K_MASK_CTRL_DEVICE_ID(x) FIELD_GET(QCA8K_MASK_CTRL_DEVICE_ID_MASK, x)
#define QCA8K_REG_PORT0_PAD_CTRL 0x004
#define QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN BIT(31)
#define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19)
@@ -41,12 +46,11 @@
#define QCA8K_REG_PORT6_PAD_CTRL 0x00c
#define QCA8K_PORT_PAD_RGMII_EN BIT(26)
#define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22)
-#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) ((x) << 22)
+#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, x)
#define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20)
-#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) ((x) << 20)
+#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, x)
#define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25)
#define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
-#define QCA8K_MAX_DELAY 3
#define QCA8K_PORT_PAD_SGMII_EN BIT(7)
#define QCA8K_REG_PWS 0x010
#define QCA8K_PWS_POWER_ON_SEL BIT(31)
@@ -68,10 +72,12 @@
#define QCA8K_MDIO_MASTER_READ BIT(27)
#define QCA8K_MDIO_MASTER_WRITE 0
#define QCA8K_MDIO_MASTER_SUP_PRE BIT(26)
-#define QCA8K_MDIO_MASTER_PHY_ADDR(x) ((x) << 21)
-#define QCA8K_MDIO_MASTER_REG_ADDR(x) ((x) << 16)
-#define QCA8K_MDIO_MASTER_DATA(x) (x)
+#define QCA8K_MDIO_MASTER_PHY_ADDR_MASK GENMASK(25, 21)
+#define QCA8K_MDIO_MASTER_PHY_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_PHY_ADDR_MASK, x)
+#define QCA8K_MDIO_MASTER_REG_ADDR_MASK GENMASK(20, 16)
+#define QCA8K_MDIO_MASTER_REG_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_REG_ADDR_MASK, x)
#define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0)
+#define QCA8K_MDIO_MASTER_DATA(x) FIELD_PREP(QCA8K_MDIO_MASTER_DATA_MASK, x)
#define QCA8K_MDIO_MASTER_MAX_PORTS 5
#define QCA8K_MDIO_MASTER_MAX_REG 32
#define QCA8K_GOL_MAC_ADDR0 0x60
@@ -93,9 +99,7 @@
#define QCA8K_PORT_STATUS_FLOW_AUTO BIT(12)
#define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4))
#define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2)
-#define QCA8K_PORT_HDR_CTRL_RX_S 2
#define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0)
-#define QCA8K_PORT_HDR_CTRL_TX_S 0
#define QCA8K_PORT_HDR_CTRL_ALL 2
#define QCA8K_PORT_HDR_CTRL_MGMT 1
#define QCA8K_PORT_HDR_CTRL_NONE 0
@@ -105,10 +109,11 @@
#define QCA8K_SGMII_EN_TX BIT(3)
#define QCA8K_SGMII_EN_SD BIT(4)
#define QCA8K_SGMII_CLK125M_DELAY BIT(7)
-#define QCA8K_SGMII_MODE_CTRL_MASK (BIT(22) | BIT(23))
-#define QCA8K_SGMII_MODE_CTRL_BASEX (0 << 22)
-#define QCA8K_SGMII_MODE_CTRL_PHY (1 << 22)
-#define QCA8K_SGMII_MODE_CTRL_MAC (2 << 22)
+#define QCA8K_SGMII_MODE_CTRL_MASK GENMASK(23, 22)
+#define QCA8K_SGMII_MODE_CTRL(x) FIELD_PREP(QCA8K_SGMII_MODE_CTRL_MASK, x)
+#define QCA8K_SGMII_MODE_CTRL_BASEX QCA8K_SGMII_MODE_CTRL(0x0)
+#define QCA8K_SGMII_MODE_CTRL_PHY QCA8K_SGMII_MODE_CTRL(0x1)
+#define QCA8K_SGMII_MODE_CTRL_MAC QCA8K_SGMII_MODE_CTRL(0x2)
/* MAC_PWR_SEL registers */
#define QCA8K_REG_MAC_PWR_SEL 0x0e4
@@ -119,102 +124,152 @@
#define QCA8K_REG_EEE_CTRL 0x100
#define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2)
+/* TRUNK_HASH_EN registers */
+#define QCA8K_TRUNK_HASH_EN_CTRL 0x270
+#define QCA8K_TRUNK_HASH_SIP_EN BIT(3)
+#define QCA8K_TRUNK_HASH_DIP_EN BIT(2)
+#define QCA8K_TRUNK_HASH_SA_EN BIT(1)
+#define QCA8K_TRUNK_HASH_DA_EN BIT(0)
+#define QCA8K_TRUNK_HASH_MASK GENMASK(3, 0)
+
/* ACL registers */
#define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8))
-#define QCA8K_PORT_VLAN_CVID(x) (x << 16)
-#define QCA8K_PORT_VLAN_SVID(x) x
+#define QCA8K_PORT_VLAN_CVID_MASK GENMASK(27, 16)
+#define QCA8K_PORT_VLAN_CVID(x) FIELD_PREP(QCA8K_PORT_VLAN_CVID_MASK, x)
+#define QCA8K_PORT_VLAN_SVID_MASK GENMASK(11, 0)
+#define QCA8K_PORT_VLAN_SVID(x) FIELD_PREP(QCA8K_PORT_VLAN_SVID_MASK, x)
#define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8))
#define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470
#define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474
/* Lookup registers */
#define QCA8K_REG_ATU_DATA0 0x600
-#define QCA8K_ATU_ADDR2_S 24
-#define QCA8K_ATU_ADDR3_S 16
-#define QCA8K_ATU_ADDR4_S 8
+#define QCA8K_ATU_ADDR2_MASK GENMASK(31, 24)
+#define QCA8K_ATU_ADDR3_MASK GENMASK(23, 16)
+#define QCA8K_ATU_ADDR4_MASK GENMASK(15, 8)
+#define QCA8K_ATU_ADDR5_MASK GENMASK(7, 0)
#define QCA8K_REG_ATU_DATA1 0x604
-#define QCA8K_ATU_PORT_M 0x7f
-#define QCA8K_ATU_PORT_S 16
-#define QCA8K_ATU_ADDR0_S 8
+#define QCA8K_ATU_PORT_MASK GENMASK(22, 16)
+#define QCA8K_ATU_ADDR0_MASK GENMASK(15, 8)
+#define QCA8K_ATU_ADDR1_MASK GENMASK(7, 0)
#define QCA8K_REG_ATU_DATA2 0x608
-#define QCA8K_ATU_VID_M 0xfff
-#define QCA8K_ATU_VID_S 8
-#define QCA8K_ATU_STATUS_M 0xf
+#define QCA8K_ATU_VID_MASK GENMASK(19, 8)
+#define QCA8K_ATU_STATUS_MASK GENMASK(3, 0)
#define QCA8K_ATU_STATUS_STATIC 0xf
#define QCA8K_REG_ATU_FUNC 0x60c
#define QCA8K_ATU_FUNC_BUSY BIT(31)
#define QCA8K_ATU_FUNC_PORT_EN BIT(14)
#define QCA8K_ATU_FUNC_MULTI_EN BIT(13)
#define QCA8K_ATU_FUNC_FULL BIT(12)
-#define QCA8K_ATU_FUNC_PORT_M 0xf
-#define QCA8K_ATU_FUNC_PORT_S 8
+#define QCA8K_ATU_FUNC_PORT_MASK GENMASK(11, 8)
#define QCA8K_REG_VTU_FUNC0 0x610
#define QCA8K_VTU_FUNC0_VALID BIT(20)
#define QCA8K_VTU_FUNC0_IVL_EN BIT(19)
-#define QCA8K_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2)
-#define QCA8K_VTU_FUNC0_EG_MODE_MASK 3
-#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD 0
-#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG 1
-#define QCA8K_VTU_FUNC0_EG_MODE_TAG 2
-#define QCA8K_VTU_FUNC0_EG_MODE_NOT 3
+/* QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(17, 4)
+ * It does contain VLAN_MODE for each port [5:4] for port0,
+ * [7:6] for port1 ... [17:16] for port6. Use virtual port
+ * define to handle this.
+ */
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i) (4 + (_i) * 2)
+#define QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(1, 0)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(_i) (GENMASK(1, 0) << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x0)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNMOD(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNMOD << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x1)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNTAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_VTU_FUNC0_EG_MODE_TAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x2)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_TAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_VTU_FUNC0_EG_MODE_NOT FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x3)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(_i) (QCA8K_VTU_FUNC0_EG_MODE_NOT << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
#define QCA8K_REG_VTU_FUNC1 0x614
#define QCA8K_VTU_FUNC1_BUSY BIT(31)
-#define QCA8K_VTU_FUNC1_VID_S 16
+#define QCA8K_VTU_FUNC1_VID_MASK GENMASK(27, 16)
#define QCA8K_VTU_FUNC1_FULL BIT(4)
+#define QCA8K_REG_ATU_CTRL 0x618
+#define QCA8K_ATU_AGE_TIME_MASK GENMASK(15, 0)
+#define QCA8K_ATU_AGE_TIME(x) FIELD_PREP(QCA8K_ATU_AGE_TIME_MASK, (x))
#define QCA8K_REG_GLOBAL_FW_CTRL0 0x620
#define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
+#define QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM GENMASK(7, 4)
#define QCA8K_REG_GLOBAL_FW_CTRL1 0x624
-#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S 24
-#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_S 16
-#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_S 8
-#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_S 0
+#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK GENMASK(30, 24)
+#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK GENMASK(22, 16)
+#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK GENMASK(14, 8)
+#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK GENMASK(6, 0)
#define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc)
#define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE GENMASK(9, 8)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE (0 << 8)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK (1 << 8)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK (2 << 8)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE (3 << 8)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_MASK GENMASK(9, 8)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, x)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE QCA8K_PORT_LOOKUP_VLAN_MODE(0x0)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK QCA8K_PORT_LOOKUP_VLAN_MODE(0x1)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK QCA8K_PORT_LOOKUP_VLAN_MODE(0x2)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE QCA8K_PORT_LOOKUP_VLAN_MODE(0x3)
#define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16)
-#define QCA8K_PORT_LOOKUP_STATE_DISABLED (0 << 16)
-#define QCA8K_PORT_LOOKUP_STATE_BLOCKING (1 << 16)
-#define QCA8K_PORT_LOOKUP_STATE_LISTENING (2 << 16)
-#define QCA8K_PORT_LOOKUP_STATE_LEARNING (3 << 16)
-#define QCA8K_PORT_LOOKUP_STATE_FORWARD (4 << 16)
-#define QCA8K_PORT_LOOKUP_STATE GENMASK(18, 16)
+#define QCA8K_PORT_LOOKUP_STATE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_STATE_MASK, x)
+#define QCA8K_PORT_LOOKUP_STATE_DISABLED QCA8K_PORT_LOOKUP_STATE(0x0)
+#define QCA8K_PORT_LOOKUP_STATE_BLOCKING QCA8K_PORT_LOOKUP_STATE(0x1)
+#define QCA8K_PORT_LOOKUP_STATE_LISTENING QCA8K_PORT_LOOKUP_STATE(0x2)
+#define QCA8K_PORT_LOOKUP_STATE_LEARNING QCA8K_PORT_LOOKUP_STATE(0x3)
+#define QCA8K_PORT_LOOKUP_STATE_FORWARD QCA8K_PORT_LOOKUP_STATE(0x4)
#define QCA8K_PORT_LOOKUP_LEARN BIT(20)
+#define QCA8K_PORT_LOOKUP_ING_MIRROR_EN BIT(25)
+
+#define QCA8K_REG_GOL_TRUNK_CTRL0 0x700
+/* 4 max trunk first
+ * first 6 bit for member bitmap
+ * 7th bit is to enable trunk port
+ */
+#define QCA8K_REG_GOL_TRUNK_SHIFT(_i) ((_i) * 8)
+#define QCA8K_REG_GOL_TRUNK_EN_MASK BIT(7)
+#define QCA8K_REG_GOL_TRUNK_EN(_i) (QCA8K_REG_GOL_TRUNK_EN_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i))
+#define QCA8K_REG_GOL_TRUNK_MEMBER_MASK GENMASK(6, 0)
+#define QCA8K_REG_GOL_TRUNK_MEMBER(_i) (QCA8K_REG_GOL_TRUNK_MEMBER_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i))
+/* 0x704 for TRUNK 0-1 --- 0x708 for TRUNK 2-3 */
+#define QCA8K_REG_GOL_TRUNK_CTRL(_i) (0x704 + (((_i) / 2) * 4))
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK GENMASK(3, 0)
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK BIT(3)
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK GENMASK(2, 0)
+#define QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i) (((_i) / 2) * 16)
+#define QCA8K_REG_GOL_MEM_ID_SHIFT(_i) ((_i) * 4)
+/* Complex shift: FIRST shift for port THEN shift for trunk */
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j) (QCA8K_REG_GOL_MEM_ID_SHIFT(_j) + QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i))
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j))
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j))
#define QCA8K_REG_GLOBAL_FC_THRESH 0x800
-#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) ((x) << 16)
-#define QCA8K_GLOBAL_FC_GOL_XON_THRES_S GENMASK(24, 16)
-#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) ((x) << 0)
-#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S GENMASK(8, 0)
+#define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16)
+#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK, x)
+#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK GENMASK(8, 0)
+#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, x)
#define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF GENMASK(3, 0)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) ((x) << 0)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF GENMASK(7, 4)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) ((x) << 4)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF GENMASK(11, 8)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) ((x) << 8)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF GENMASK(15, 12)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) ((x) << 12)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF GENMASK(19, 16)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) ((x) << 16)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF GENMASK(23, 20)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) ((x) << 20)
-#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF GENMASK(29, 24)
-#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) ((x) << 24)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK GENMASK(3, 0)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK GENMASK(7, 4)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK GENMASK(11, 8)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK GENMASK(15, 12)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK GENMASK(19, 16)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK GENMASK(23, 20)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK GENMASK(29, 24)
+#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK, x)
#define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8)
-#define QCA8K_PORT_HOL_CTRL1_ING_BUF GENMASK(3, 0)
-#define QCA8K_PORT_HOL_CTRL1_ING(x) ((x) << 0)
+#define QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK GENMASK(3, 0)
+#define QCA8K_PORT_HOL_CTRL1_ING(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK, x)
#define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6)
#define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7)
#define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8)
#define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16)
/* Pkt edit registers */
+#define QCA8K_EGREES_VLAN_PORT_SHIFT(_i) (16 * ((_i) % 2))
+#define QCA8K_EGREES_VLAN_PORT_MASK(_i) (GENMASK(11, 0) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i))
+#define QCA8K_EGREES_VLAN_PORT(_i, x) ((x) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i))
#define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2)))
/* L3 registers */
@@ -244,6 +299,7 @@ enum qca8k_fdb_cmd {
QCA8K_FDB_FLUSH = 1,
QCA8K_FDB_LOAD = 2,
QCA8K_FDB_PURGE = 3,
+ QCA8K_FDB_FLUSH_PORT = 5,
QCA8K_FDB_NEXT = 6,
QCA8K_FDB_SEARCH = 7,
};
@@ -264,6 +320,7 @@ struct ar8xxx_port_status {
struct qca8k_match_data {
u8 id;
bool reduced_package;
+ u8 mib_count;
};
enum {
@@ -282,6 +339,9 @@ struct qca8k_ports_config {
struct qca8k_priv {
u8 switch_id;
u8 switch_revision;
+ u8 mirror_rx;
+ u8 mirror_tx;
+ u8 lag_hash_mode;
bool legacy_phy_port_mapping;
struct qca8k_ports_config ports_config;
struct regmap *regmap;
diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c
index c66ebd0ee217..aae46ada8d83 100644
--- a/drivers/net/dsa/realtek-smi-core.c
+++ b/drivers/net/dsa/realtek-smi-core.c
@@ -456,7 +456,7 @@ static int realtek_smi_probe(struct platform_device *pdev)
smi->ds->ops = var->ds_ops;
ret = dsa_register_switch(smi->ds);
if (ret) {
- dev_err(dev, "unable to register switch ret = %d\n", ret);
+ dev_err_probe(dev, ret, "unable to register switch\n");
return ret;
}
return 0;
diff --git a/drivers/net/dsa/rtl8365mb.c b/drivers/net/dsa/rtl8365mb.c
index 078ca4cd7160..3b729544798b 100644
--- a/drivers/net/dsa/rtl8365mb.c
+++ b/drivers/net/dsa/rtl8365mb.c
@@ -277,7 +277,7 @@
(RTL8365MB_PORT_ISOLATION_REG_BASE + (_physport))
#define RTL8365MB_PORT_ISOLATION_MASK 0x07FF
-/* MSTP port state registers - indexed by tree instancrSTI (tree ine */
+/* MSTP port state registers - indexed by tree instance */
#define RTL8365MB_MSTI_CTRL_BASE 0x0A00
#define RTL8365MB_MSTI_CTRL_REG(_msti, _physport) \
(RTL8365MB_MSTI_CTRL_BASE + ((_msti) << 1) + ((_physport) >> 3))
@@ -767,7 +767,8 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
* 0 = no delay, 1 = 2 ns delay
* RX delay:
* 0 = no delay, 7 = maximum delay
- * No units are specified, but there are a total of 8 steps.
+ * Each step is approximately 0.3 ns, so the maximum delay is about
+ * 2.1 ns.
*
* The vendor driver also states that this must be configured *before*
* forcing the external interface into a particular mode, which is done
@@ -778,10 +779,6 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
* specified. We ignore the detail of the RGMII interface mode
* (RGMII_{RXID, TXID, etc.}), as this is considered to be a PHY-only
* property.
- *
- * For the RX delay, we assume that a register value of 4 corresponds to
- * 2 ns. But this is just an educated guess, so ignore all other values
- * to avoid too much confusion.
*/
if (!of_property_read_u32(dn, "tx-internal-delay-ps", &val)) {
val = val / 1000; /* convert to ns */
@@ -794,13 +791,13 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
}
if (!of_property_read_u32(dn, "rx-internal-delay-ps", &val)) {
- val = val / 1000; /* convert to ns */
+ val = DIV_ROUND_CLOSEST(val, 300); /* convert to 0.3 ns step */
- if (val == 0 || val == 2)
- rx_delay = val * 2;
+ if (val <= 7)
+ rx_delay = val;
else
dev_warn(smi->dev,
- "EXT port RX delay must be 0 to 2 ns\n");
+ "EXT port RX delay must be 0 to 2.1 ns\n");
}
ret = regmap_update_bits(
@@ -903,7 +900,8 @@ static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port,
{
if (dsa_is_user_port(ds, port) &&
(interface == PHY_INTERFACE_MODE_NA ||
- interface == PHY_INTERFACE_MODE_INTERNAL))
+ interface == PHY_INTERFACE_MODE_INTERNAL ||
+ interface == PHY_INTERFACE_MODE_GMII))
/* Internal PHY */
return true;
else if (dsa_is_cpu_port(ds, port) &&
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
index 03deacd83e61..ecc19bd5115f 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/rtl8366rb.c
@@ -1186,7 +1186,8 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port)
static int
rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload)
{
struct realtek_smi *smi = ds->priv;
unsigned int port_bitmap = 0;
@@ -1198,7 +1199,7 @@ rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port,
if (i == port)
continue;
/* Not on this bridge */
- if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Join this port to each other port on the bridge */
ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
@@ -1218,7 +1219,7 @@ rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port,
static void
rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge)
{
struct realtek_smi *smi = ds->priv;
unsigned int port_bitmap = 0;
@@ -1230,7 +1231,7 @@ rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port,
if (i == port)
continue;
/* Not on this bridge */
- if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Remove this port from any other port on the bridge */
ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index 21dba16af097..9ba2ec2b966d 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -249,6 +249,7 @@ struct sja1105_private {
bool fixed_link[SJA1105_MAX_NUM_PORTS];
unsigned long ucast_egress_floods;
unsigned long bcast_egress_floods;
+ unsigned long hwts_tx_en;
const struct sja1105_info *info;
size_t max_xfer_len;
struct spi_device *spidev;
@@ -256,11 +257,13 @@ struct sja1105_private {
u16 bridge_pvid[SJA1105_MAX_NUM_PORTS];
u16 tag_8021q_pvid[SJA1105_MAX_NUM_PORTS];
struct sja1105_flow_block flow_block;
- struct sja1105_port ports[SJA1105_MAX_NUM_PORTS];
/* Serializes transmission of management frames so that
* the switch doesn't confuse them with one another.
*/
struct mutex mgmt_lock;
+ /* PTP two-step TX timestamp ID, and its serialization lock */
+ spinlock_t ts_id_lock;
+ u8 ts_id;
/* Serializes access to the dynamic config interface */
struct mutex dynamic_config_lock;
struct devlink_region **regions;
@@ -269,7 +272,6 @@ struct sja1105_private {
struct mii_bus *mdio_base_tx;
struct mii_bus *mdio_pcs;
struct dw_xpcs *xpcs[SJA1105_MAX_NUM_PORTS];
- struct sja1105_tagger_data tagger_data;
struct sja1105_ptp_data ptp_data;
struct sja1105_tas_data tas_data;
};
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
index 72b9b39b0989..7dcdd784aea4 100644
--- a/drivers/net/dsa/sja1105/sja1105_flower.c
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -379,7 +379,7 @@ int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
vl_rule = true;
rc = sja1105_vl_gate(priv, port, extack, cookie,
- &key, act->gate.index,
+ &key, act->hw_index,
act->gate.prio,
act->gate.basetime,
act->gate.cycletime,
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index c343effe2e96..b513713be610 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -118,13 +118,14 @@ static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
static int sja1105_commit_pvid(struct dsa_switch *ds, int port)
{
struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
struct sja1105_private *priv = ds->priv;
struct sja1105_vlan_lookup_entry *vlan;
bool drop_untagged = false;
int match, rc;
u16 pvid;
- if (dp->bridge_dev && br_vlan_enabled(dp->bridge_dev))
+ if (br && br_vlan_enabled(br))
pvid = priv->bridge_pvid[port];
else
pvid = priv->tag_8021q_pvid[port];
@@ -1979,7 +1980,7 @@ static int sja1105_manage_flood_domains(struct sja1105_private *priv)
}
static int sja1105_bridge_member(struct dsa_switch *ds, int port,
- struct net_device *br, bool member)
+ struct dsa_bridge bridge, bool member)
{
struct sja1105_l2_forwarding_entry *l2_fwd;
struct sja1105_private *priv = ds->priv;
@@ -2004,7 +2005,7 @@ static int sja1105_bridge_member(struct dsa_switch *ds, int port,
*/
if (i == port)
continue;
- if (dsa_to_port(ds, i)->bridge_dev != br)
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
sja1105_port_allow_traffic(l2_fwd, i, port, member);
sja1105_port_allow_traffic(l2_fwd, port, i, member);
@@ -2073,15 +2074,31 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
}
static int sja1105_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload)
{
- return sja1105_bridge_member(ds, port, br, true);
+ int rc;
+
+ rc = sja1105_bridge_member(ds, port, bridge, true);
+ if (rc)
+ return rc;
+
+ rc = dsa_tag_8021q_bridge_tx_fwd_offload(ds, port, bridge);
+ if (rc) {
+ sja1105_bridge_member(ds, port, bridge, false);
+ return rc;
+ }
+
+ *tx_fwd_offload = true;
+
+ return 0;
}
static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge)
{
- sja1105_bridge_member(ds, port, br, false);
+ dsa_tag_8021q_bridge_tx_fwd_unoffload(ds, port, bridge);
+ sja1105_bridge_member(ds, port, bridge, false);
}
#define BYTES_PER_KBIT (1000LL / 8)
@@ -2587,8 +2604,9 @@ static int sja1105_prechangeupper(struct dsa_switch *ds, int port,
if (netif_is_bridge_master(upper)) {
list_for_each_entry(dp, &dst->ports, list) {
- if (dp->bridge_dev && dp->bridge_dev != upper &&
- br_vlan_enabled(dp->bridge_dev)) {
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+
+ if (br && br != upper && br_vlan_enabled(br)) {
NL_SET_ERR_MSG_MOD(extack,
"Only one VLAN-aware bridge is supported");
return -EBUSY;
@@ -2599,18 +2617,6 @@ static int sja1105_prechangeupper(struct dsa_switch *ds, int port,
return 0;
}
-static void sja1105_port_disable(struct dsa_switch *ds, int port)
-{
- struct sja1105_private *priv = ds->priv;
- struct sja1105_port *sp = &priv->ports[port];
-
- if (!dsa_is_user_port(ds, port))
- return;
-
- kthread_cancel_work_sync(&sp->xmit_work);
- skb_queue_purge(&sp->xmit_queue);
-}
-
static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
struct sk_buff *skb, bool takets)
{
@@ -2669,10 +2675,8 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
return NETDEV_TX_OK;
}
-#define work_to_port(work) \
- container_of((work), struct sja1105_port, xmit_work)
-#define tagger_to_sja1105(t) \
- container_of((t), struct sja1105_private, tagger_data)
+#define work_to_xmit_work(w) \
+ container_of((w), struct sja1105_deferred_xmit_work, work)
/* Deferred work is unfortunately necessary because setting up the management
* route cannot be done from atomit context (SPI transfer takes a sleepable
@@ -2680,25 +2684,41 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
*/
static void sja1105_port_deferred_xmit(struct kthread_work *work)
{
- struct sja1105_port *sp = work_to_port(work);
- struct sja1105_tagger_data *tagger_data = sp->data;
- struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
- int port = sp - priv->ports;
- struct sk_buff *skb;
+ struct sja1105_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
+ struct sk_buff *clone, *skb = xmit_work->skb;
+ struct dsa_switch *ds = xmit_work->dp->ds;
+ struct sja1105_private *priv = ds->priv;
+ int port = xmit_work->dp->index;
- while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) {
- struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
+ clone = SJA1105_SKB_CB(skb)->clone;
- mutex_lock(&priv->mgmt_lock);
+ mutex_lock(&priv->mgmt_lock);
- sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone);
+ sja1105_mgmt_xmit(ds, port, 0, skb, !!clone);
- /* The clone, if there, was made by dsa_skb_tx_timestamp */
- if (clone)
- sja1105_ptp_txtstamp_skb(priv->ds, port, clone);
+ /* The clone, if there, was made by dsa_skb_tx_timestamp */
+ if (clone)
+ sja1105_ptp_txtstamp_skb(ds, port, clone);
- mutex_unlock(&priv->mgmt_lock);
- }
+ mutex_unlock(&priv->mgmt_lock);
+
+ kfree(xmit_work);
+}
+
+static int sja1105_connect_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tagger_data *tagger_data;
+
+ if (proto != priv->info->tag_proto)
+ return -EPROTONOSUPPORT;
+
+ tagger_data = sja1105_tagger_data(ds);
+ tagger_data->xmit_work_fn = sja1105_port_deferred_xmit;
+ tagger_data->meta_tstamp_handler = sja1110_process_meta_tstamp;
+
+ return 0;
}
/* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
@@ -3001,58 +3021,6 @@ static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port,
return 0;
}
-static void sja1105_teardown_ports(struct sja1105_private *priv)
-{
- struct dsa_switch *ds = priv->ds;
- int port;
-
- for (port = 0; port < ds->num_ports; port++) {
- struct sja1105_port *sp = &priv->ports[port];
-
- if (sp->xmit_worker)
- kthread_destroy_worker(sp->xmit_worker);
- }
-}
-
-static int sja1105_setup_ports(struct sja1105_private *priv)
-{
- struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
- struct dsa_switch *ds = priv->ds;
- int port, rc;
-
- /* Connections between dsa_port and sja1105_port */
- for (port = 0; port < ds->num_ports; port++) {
- struct sja1105_port *sp = &priv->ports[port];
- struct dsa_port *dp = dsa_to_port(ds, port);
- struct kthread_worker *worker;
- struct net_device *slave;
-
- if (!dsa_port_is_user(dp))
- continue;
-
- dp->priv = sp;
- sp->data = tagger_data;
- slave = dp->slave;
- kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
- worker = kthread_create_worker(0, "%s_xmit", slave->name);
- if (IS_ERR(worker)) {
- rc = PTR_ERR(worker);
- dev_err(ds->dev,
- "failed to create deferred xmit thread: %d\n",
- rc);
- goto out_destroy_workers;
- }
- sp->xmit_worker = worker;
- skb_queue_head_init(&sp->xmit_queue);
- }
-
- return 0;
-
-out_destroy_workers:
- sja1105_teardown_ports(priv);
- return rc;
-}
-
/* The programming model for the SJA1105 switch is "all-at-once" via static
* configuration tables. Some of these can be dynamically modified at runtime,
* but not the xMII mode parameters table.
@@ -3098,10 +3066,6 @@ static int sja1105_setup(struct dsa_switch *ds)
}
}
- rc = sja1105_setup_ports(priv);
- if (rc)
- goto out_static_config_free;
-
sja1105_tas_setup(ds);
sja1105_flower_setup(ds);
@@ -3139,7 +3103,7 @@ static int sja1105_setup(struct dsa_switch *ds)
ds->vlan_filtering_is_global = true;
ds->untag_bridge_pvid = true;
/* tag_8021q has 3 bits for the VBID, and the value 0 is reserved */
- ds->num_fwd_offloading_bridges = 7;
+ ds->max_num_bridges = 7;
/* Advertise the 8 egress queues */
ds->num_tx_queues = SJA1105_NUM_TC;
@@ -3158,7 +3122,6 @@ out_ptp_clock_unregister:
out_flower_teardown:
sja1105_flower_teardown(ds);
sja1105_tas_teardown(ds);
- sja1105_teardown_ports(priv);
out_static_config_free:
sja1105_static_config_free(&priv->static_config);
@@ -3178,12 +3141,12 @@ static void sja1105_teardown(struct dsa_switch *ds)
sja1105_ptp_clock_unregister(ds);
sja1105_flower_teardown(ds);
sja1105_tas_teardown(ds);
- sja1105_teardown_ports(priv);
sja1105_static_config_free(&priv->static_config);
}
static const struct dsa_switch_ops sja1105_switch_ops = {
.get_tag_protocol = sja1105_get_tag_protocol,
+ .connect_tag_protocol = sja1105_connect_tag_protocol,
.setup = sja1105_setup,
.teardown = sja1105_teardown,
.set_ageing_time = sja1105_set_ageing_time,
@@ -3197,7 +3160,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.get_ethtool_stats = sja1105_get_ethtool_stats,
.get_sset_count = sja1105_get_sset_count,
.get_ts_info = sja1105_get_ts_info,
- .port_disable = sja1105_port_disable,
.port_fdb_dump = sja1105_fdb_dump,
.port_fdb_add = sja1105_fdb_add,
.port_fdb_del = sja1105_fdb_del,
@@ -3228,8 +3190,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.tag_8021q_vlan_add = sja1105_dsa_8021q_vlan_add,
.tag_8021q_vlan_del = sja1105_dsa_8021q_vlan_del,
.port_prechangeupper = sja1105_prechangeupper,
- .port_bridge_tx_fwd_offload = dsa_tag_8021q_bridge_tx_fwd_offload,
- .port_bridge_tx_fwd_unoffload = dsa_tag_8021q_bridge_tx_fwd_unoffload,
};
static const struct of_device_id sja1105_dt_ids[];
@@ -3367,6 +3327,7 @@ static int sja1105_probe(struct spi_device *spi)
mutex_init(&priv->ptp_data.lock);
mutex_init(&priv->dynamic_config_lock);
mutex_init(&priv->mgmt_lock);
+ spin_lock_init(&priv->ts_id_lock);
rc = sja1105_parse_dt(priv);
if (rc < 0) {
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index 54396992a919..be3068a935af 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -58,13 +58,12 @@ enum sja1105_ptp_clk_mode {
#define ptp_data_to_sja1105(d) \
container_of((d), struct sja1105_private, ptp_data)
-/* Must be called only with priv->tagger_data.state bit
- * SJA1105_HWTS_RX_EN cleared
+/* Must be called only while the RX timestamping state of the tagger
+ * is turned off
*/
static int sja1105_change_rxtstamping(struct sja1105_private *priv,
bool on)
{
- struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
struct sja1105_general_params_entry *general_params;
struct sja1105_table *table;
@@ -74,13 +73,8 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv,
general_params->send_meta1 = on;
general_params->send_meta0 = on;
- /* Initialize the meta state machine to a known state */
- if (priv->tagger_data.stampable_skb) {
- kfree_skb(priv->tagger_data.stampable_skb);
- priv->tagger_data.stampable_skb = NULL;
- }
ptp_cancel_worker_sync(ptp_data->clock);
- skb_queue_purge(&tagger_data->skb_txtstamp_queue);
+ skb_queue_purge(&ptp_data->skb_txtstamp_queue);
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING);
@@ -88,6 +82,7 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv,
int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
{
+ struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(ds);
struct sja1105_private *priv = ds->priv;
struct hwtstamp_config config;
bool rx_on;
@@ -98,10 +93,10 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
- priv->ports[port].hwts_tx_en = false;
+ priv->hwts_tx_en &= ~BIT(port);
break;
case HWTSTAMP_TX_ON:
- priv->ports[port].hwts_tx_en = true;
+ priv->hwts_tx_en |= BIT(port);
break;
default:
return -ERANGE;
@@ -116,8 +111,8 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
break;
}
- if (rx_on != test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) {
- clear_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
+ if (rx_on != tagger_data->rxtstamp_get_state(ds)) {
+ tagger_data->rxtstamp_set_state(ds, false);
rc = sja1105_change_rxtstamping(priv, rx_on);
if (rc < 0) {
@@ -126,7 +121,7 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
return rc;
}
if (rx_on)
- set_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
+ tagger_data->rxtstamp_set_state(ds, true);
}
if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
@@ -136,15 +131,16 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
{
+ struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(ds);
struct sja1105_private *priv = ds->priv;
struct hwtstamp_config config;
config.flags = 0;
- if (priv->ports[port].hwts_tx_en)
+ if (priv->hwts_tx_en & BIT(port))
config.tx_type = HWTSTAMP_TX_ON;
else
config.tx_type = HWTSTAMP_TX_OFF;
- if (test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
+ if (tagger_data->rxtstamp_get_state(ds))
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
else
config.rx_filter = HWTSTAMP_FILTER_NONE;
@@ -417,10 +413,11 @@ static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
{
+ struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(ds);
struct sja1105_private *priv = ds->priv;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
- if (!test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
+ if (!tagger_data->rxtstamp_get_state(ds))
return false;
/* We need to read the full PTP clock to reconstruct the Rx
@@ -453,6 +450,39 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
return priv->info->rxtstamp(ds, port, skb);
}
+void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
+ enum sja1110_meta_tstamp dir, u64 tstamp)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shwt = {0};
+
+ /* We don't care about RX timestamps on the CPU port */
+ if (dir == SJA1110_META_TSTAMP_RX)
+ return;
+
+ spin_lock(&ptp_data->skb_txtstamp_queue.lock);
+
+ skb_queue_walk_safe(&ptp_data->skb_txtstamp_queue, skb, skb_tmp) {
+ if (SJA1105_SKB_CB(skb)->ts_id != ts_id)
+ continue;
+
+ __skb_unlink(skb, &ptp_data->skb_txtstamp_queue);
+ skb_match = skb;
+
+ break;
+ }
+
+ spin_unlock(&ptp_data->skb_txtstamp_queue.lock);
+
+ if (WARN_ON(!skb_match))
+ return;
+
+ shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp));
+ skb_complete_tx_timestamp(skb_match, &shwt);
+}
+
/* In addition to cloning the skb which is done by the common
* sja1105_port_txtstamp, we need to generate a timestamp ID and save the
* packet to the TX timestamping queue.
@@ -461,22 +491,22 @@ void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
{
struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
struct sja1105_private *priv = ds->priv;
- struct sja1105_port *sp = &priv->ports[port];
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
u8 ts_id;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- spin_lock(&sp->data->meta_lock);
+ spin_lock(&priv->ts_id_lock);
- ts_id = sp->data->ts_id;
+ ts_id = priv->ts_id;
/* Deal automatically with 8-bit wraparound */
- sp->data->ts_id++;
+ priv->ts_id++;
SJA1105_SKB_CB(clone)->ts_id = ts_id;
- spin_unlock(&sp->data->meta_lock);
+ spin_unlock(&priv->ts_id_lock);
- skb_queue_tail(&sp->data->skb_txtstamp_queue, clone);
+ skb_queue_tail(&ptp_data->skb_txtstamp_queue, clone);
}
/* Called from dsa_skb_tx_timestamp. This callback is just to clone
@@ -486,10 +516,9 @@ void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
void sja1105_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
{
struct sja1105_private *priv = ds->priv;
- struct sja1105_port *sp = &priv->ports[port];
struct sk_buff *clone;
- if (!sp->hwts_tx_en)
+ if (!(priv->hwts_tx_en & BIT(port)))
return;
clone = skb_clone_sk(skb);
@@ -896,7 +925,6 @@ static struct ptp_pin_desc sja1105_ptp_pin = {
int sja1105_ptp_clock_register(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
- struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
ptp_data->caps = (struct ptp_clock_info) {
@@ -919,8 +947,7 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
/* Only used on SJA1105 */
skb_queue_head_init(&ptp_data->skb_rxtstamp_queue);
/* Only used on SJA1110 */
- skb_queue_head_init(&tagger_data->skb_txtstamp_queue);
- spin_lock_init(&tagger_data->meta_lock);
+ skb_queue_head_init(&ptp_data->skb_txtstamp_queue);
ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev);
if (IS_ERR_OR_NULL(ptp_data->clock))
@@ -937,7 +964,6 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
- struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
if (IS_ERR_OR_NULL(ptp_data->clock))
@@ -945,7 +971,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
del_timer_sync(&ptp_data->extts_timer);
ptp_cancel_worker_sync(ptp_data->clock);
- skb_queue_purge(&tagger_data->skb_txtstamp_queue);
+ skb_queue_purge(&ptp_data->skb_txtstamp_queue);
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
ptp_clock_unregister(ptp_data->clock);
ptp_data->clock = NULL;
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 3ae6b9fdd492..416461ee95d2 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -8,6 +8,21 @@
#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP)
+/* Timestamps are in units of 8 ns clock ticks (equivalent to
+ * a fixed 125 MHz clock).
+ */
+#define SJA1105_TICK_NS 8
+
+static inline s64 ns_to_sja1105_ticks(s64 ns)
+{
+ return ns / SJA1105_TICK_NS;
+}
+
+static inline s64 sja1105_ticks_to_ns(s64 ticks)
+{
+ return ticks * SJA1105_TICK_NS;
+}
+
/* Calculate the first base_time in the future that satisfies this
* relationship:
*
@@ -62,6 +77,10 @@ struct sja1105_ptp_data {
struct timer_list extts_timer;
/* Used only on SJA1105 to reconstruct partial timestamps */
struct sk_buff_head skb_rxtstamp_queue;
+ /* Used on SJA1110 where meta frames are generated only for
+ * 2-step TX timestamps
+ */
+ struct sk_buff_head skb_txtstamp_queue;
struct ptp_clock_info caps;
struct ptp_clock *clock;
struct sja1105_ptp_cmd cmd;
@@ -112,6 +131,9 @@ bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
bool sja1110_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
+void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
+ enum sja1110_meta_tstamp dir, u64 tstamp);
+
#else
struct sja1105_ptp_cmd;
@@ -178,6 +200,8 @@ static inline int sja1105_ptp_commit(struct dsa_switch *ds,
#define sja1110_rxtstamp NULL
#define sja1110_txtstamp NULL
+#define sja1110_process_meta_tstamp NULL
+
#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */
#endif /* _SJA1105_PTP_H */
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index a4b1447ff055..ae55167ce0a6 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -1122,9 +1122,6 @@ static int vsc73xx_gpio_probe(struct vsc73xx *vsc)
vsc->gc.ngpio = 4;
vsc->gc.owner = THIS_MODULE;
vsc->gc.parent = vsc->dev;
-#if IS_ENABLED(CONFIG_OF_GPIO)
- vsc->gc.of_node = vsc->dev->of_node;
-#endif
vsc->gc.base = -1;
vsc->gc.get = vsc73xx_gpio_get;
vsc->gc.set = vsc73xx_gpio_set;
@@ -1216,12 +1213,10 @@ int vsc73xx_probe(struct vsc73xx *vsc)
}
EXPORT_SYMBOL(vsc73xx_probe);
-int vsc73xx_remove(struct vsc73xx *vsc)
+void vsc73xx_remove(struct vsc73xx *vsc)
{
dsa_unregister_switch(vsc->ds);
gpiod_set_value(vsc->reset, 1);
-
- return 0;
}
EXPORT_SYMBOL(vsc73xx_remove);
diff --git a/drivers/net/dsa/vitesse-vsc73xx.h b/drivers/net/dsa/vitesse-vsc73xx.h
index 30b951504e65..30b1f0a36566 100644
--- a/drivers/net/dsa/vitesse-vsc73xx.h
+++ b/drivers/net/dsa/vitesse-vsc73xx.h
@@ -26,5 +26,5 @@ struct vsc73xx_ops {
int vsc73xx_is_addr_valid(u8 block, u8 subblock);
int vsc73xx_probe(struct vsc73xx *vsc);
-int vsc73xx_remove(struct vsc73xx *vsc);
+void vsc73xx_remove(struct vsc73xx *vsc);
void vsc73xx_shutdown(struct vsc73xx *vsc);
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index 910fcb3b252b..0730352cdd57 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -5,6 +5,7 @@
*/
#include <net/dsa.h>
+#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
#include <linux/of_device.h>
#include <linux/netdev_features.h>
@@ -501,7 +502,7 @@ static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
}
static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
- struct net_device *bridge, bool join)
+ struct dsa_bridge bridge, bool join)
{
unsigned int i, cpu_mask = 0, mask = 0;
struct xrs700x *priv = ds->priv;
@@ -513,14 +514,14 @@ static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
cpu_mask |= BIT(i);
- if (dsa_to_port(ds, i)->bridge_dev == bridge)
+ if (dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
mask |= BIT(i);
}
for (i = 0; i < ds->num_ports; i++) {
- if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* 1 = Disable forwarding to the port */
@@ -540,13 +541,13 @@ static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
}
static int xrs700x_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge, bool *tx_fwd_offload)
{
return xrs700x_bridge_common(ds, port, bridge, true);
}
static void xrs700x_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge)
{
xrs700x_bridge_common(ds, port, bridge, false);
}
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index 8ef34901c2d8..1111d1f33865 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -225,7 +225,7 @@ static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave)
list_del(&slave->list);
queue->num_slaves--;
slave->dev->flags &= ~IFF_SLAVE;
- dev_put(slave->dev);
+ dev_put_track(slave->dev, &slave->dev_tracker);
kfree(slave);
}
@@ -399,7 +399,7 @@ static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
if (duplicate_slave)
eql_kill_one_slave(queue, duplicate_slave);
- dev_hold(slave->dev);
+ dev_hold_track(slave->dev, &slave->dev_tracker, GFP_ATOMIC);
list_add(&slave->list, &queue->all_slaves);
queue->num_slaves++;
slave->dev->flags |= IFF_SLAVE;
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 05e15b6e5e2c..8aec5d9fbfef 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -1138,7 +1138,9 @@ typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
}
static void
-typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
ering->rx_max_pending = RXENT_ENTRIES;
ering->tx_max_pending = TXLO_ENTRIES - 1;
@@ -2276,6 +2278,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *dev;
struct typhoon *tp;
int card_id = (int) ent->driver_data;
+ u8 addr[ETH_ALEN] __aligned(4);
void __iomem *ioaddr;
void *shared;
dma_addr_t shared_dma;
@@ -2407,8 +2410,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto error_out_reset;
}
- *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
- *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
+ *(__be16 *)&addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
+ *(__be32 *)&addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
+ eth_hw_addr_set(dev, addr);
if (!is_valid_ether_addr(dev->dev_addr)) {
err_msg = "Could not obtain valid ethernet address, aborting";
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index bd22a534b1c0..e7b879123bb1 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -655,6 +655,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
struct ei_device *ei_local;
struct net_device *dev;
struct etherh_priv *eh;
+ u8 addr[ETH_ALEN];
int ret;
ret = ecard_request_resources(ec);
@@ -724,12 +725,13 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
spin_lock_init(&ei_local->page_lock);
if (ec->cid.product == PROD_ANT_ETHERM) {
- etherm_addr(dev->dev_addr);
+ etherm_addr(addr);
ei_local->reg_offset = etherm_regoffsets;
} else {
- etherh_addr(dev->dev_addr, ec);
+ etherh_addr(addr, ec);
ei_local->reg_offset = etherh_regoffsets;
}
+ eth_hw_addr_set(dev, addr);
ei_local->name = dev->name;
ei_local->word16 = 1;
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index 941754ea78ec..1df7601af86a 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -116,6 +116,7 @@ static int hydra_init(struct zorro_dev *z)
unsigned long ioaddr = board+HYDRA_NIC_BASE;
const char name[] = "NE2000";
int start_page, stop_page;
+ u8 macaddr[ETH_ALEN];
int j;
int err;
@@ -129,7 +130,8 @@ static int hydra_init(struct zorro_dev *z)
return -ENOMEM;
for (j = 0; j < ETH_ALEN; j++)
- dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j));
+ macaddr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j));
+ eth_hw_addr_set(dev, macaddr);
/* We must set the 8390 for word mode. */
z_writeb(0x4b, ioaddr + NE_EN0_DCFG);
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 91b04abfd687..7fb819b9b89a 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -292,6 +292,7 @@ static bool mac8390_rsrc_init(struct net_device *dev,
struct nubus_dirent ent;
int offset;
volatile unsigned short *i;
+ u8 addr[ETH_ALEN];
dev->irq = SLOT2IRQ(board->slot);
/* This is getting to be a habit */
@@ -314,7 +315,8 @@ static bool mac8390_rsrc_init(struct net_device *dev,
return false;
}
- nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
+ nubus_get_rsrc_mem(addr, &ent, 6);
+ eth_hw_addr_set(dev, addr);
if (useresources[cardtype] == 1) {
nubus_rewinddir(&dir);
diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c
index 0890fa493f70..6e62c37c9400 100644
--- a/drivers/net/ethernet/8390/smc-ultra.c
+++ b/drivers/net/ethernet/8390/smc-ultra.c
@@ -204,6 +204,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
{
int i, retval;
int checksum = 0;
+ u8 macaddr[ETH_ALEN];
const char *model_name;
unsigned char eeprom_irq = 0;
static unsigned version_printed;
@@ -239,7 +240,8 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
model_name = (idreg & 0xF0) == 0x20 ? "SMC Ultra" : "SMC EtherEZ";
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + 8 + i);
+ macaddr[i] = inb(ioaddr + 8 + i);
+ eth_hw_addr_set(dev, macaddr);
netdev_info(dev, "%s at %#3x, %pM", model_name,
ioaddr, dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c
index 263a942d81fa..5b00c452bede 100644
--- a/drivers/net/ethernet/8390/wd.c
+++ b/drivers/net/ethernet/8390/wd.c
@@ -168,6 +168,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
int checksum = 0;
int ancient = 0; /* An old card without config registers. */
int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */
+ u8 addr[ETH_ALEN];
const char *model_name;
static unsigned version_printed;
struct ei_device *ei_local = netdev_priv(dev);
@@ -191,7 +192,8 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
netdev_info(dev, version);
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + 8 + i);
+ addr[i] = inb(ioaddr + 8 + i);
+ eth_hw_addr_set(dev, addr);
netdev_info(dev, "WD80x3 at %#3x, %pM", ioaddr, dev->dev_addr);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 4601b38f532a..db3ec4768159 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -73,6 +73,7 @@ config DNET
source "drivers/net/ethernet/dec/Kconfig"
source "drivers/net/ethernet/dlink/Kconfig"
source "drivers/net/ethernet/emulex/Kconfig"
+source "drivers/net/ethernet/engleder/Kconfig"
source "drivers/net/ethernet/ezchip/Kconfig"
source "drivers/net/ethernet/faraday/Kconfig"
source "drivers/net/ethernet/freescale/Kconfig"
@@ -182,6 +183,7 @@ source "drivers/net/ethernet/tehuti/Kconfig"
source "drivers/net/ethernet/ti/Kconfig"
source "drivers/net/ethernet/toshiba/Kconfig"
source "drivers/net/ethernet/tundra/Kconfig"
+source "drivers/net/ethernet/vertexcom/Kconfig"
source "drivers/net/ethernet/via/Kconfig"
source "drivers/net/ethernet/wiznet/Kconfig"
source "drivers/net/ethernet/xilinx/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index fdd8c6c17451..8a87c1083d1d 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_DNET) += dnet.o
obj-$(CONFIG_NET_VENDOR_DEC) += dec/
obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/
obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/
+obj-$(CONFIG_NET_VENDOR_ENGLEDER) += engleder/
obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/
obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
@@ -92,6 +93,7 @@ obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
obj-$(CONFIG_NET_VENDOR_TI) += ti/
obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/
obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/
+obj-$(CONFIG_NET_VENDOR_VERTEXCOM) += vertexcom/
obj-$(CONFIG_NET_VENDOR_VIA) += via/
obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index f4edc616388c..537e6a85e18d 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3914,10 +3914,9 @@ static int et131x_pci_setup(struct pci_dev *pdev,
pci_set_master(pdev);
/* Check the DMA addressing support of this device */
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
dev_err(&pdev->dev, "No usable DMA addressing method\n");
- rc = -EIO;
goto err_release_res;
}
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 800ee022388f..621ce742ad21 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -29,6 +29,7 @@
#include <linux/platform_device.h>
#include <linux/phy.h>
#include <linux/soc/sunxi/sunxi_sram.h>
+#include <linux/dmaengine.h>
#include "sun4i-emac.h"
@@ -76,7 +77,6 @@ struct emac_board_info {
void __iomem *membase;
u32 msg_enable;
struct net_device *ndev;
- struct sk_buff *skb_last;
u16 tx_fifo_stat;
int emacrx_completed_flag;
@@ -87,6 +87,16 @@ struct emac_board_info {
unsigned int duplex;
phy_interface_t phy_interface;
+ struct dma_chan *rx_chan;
+ phys_addr_t emac_rx_fifo;
+};
+
+struct emac_dma_req {
+ struct emac_board_info *db;
+ struct dma_async_tx_descriptor *desc;
+ struct sk_buff *skb;
+ dma_addr_t rxbuf;
+ int count;
};
static void emac_update_speed(struct net_device *dev)
@@ -96,9 +106,9 @@ static void emac_update_speed(struct net_device *dev)
/* set EMAC SPEED, depend on PHY */
reg_val = readl(db->membase + EMAC_MAC_SUPP_REG);
- reg_val &= ~(0x1 << 8);
+ reg_val &= ~EMAC_MAC_SUPP_100M;
if (db->speed == SPEED_100)
- reg_val |= 1 << 8;
+ reg_val |= EMAC_MAC_SUPP_100M;
writel(reg_val, db->membase + EMAC_MAC_SUPP_REG);
}
@@ -206,6 +216,117 @@ static void emac_inblk_32bit(void __iomem *reg, void *data, int count)
readsl(reg, data, round_up(count, 4) / 4);
}
+static struct emac_dma_req *
+emac_alloc_dma_req(struct emac_board_info *db,
+ struct dma_async_tx_descriptor *desc, struct sk_buff *skb,
+ dma_addr_t rxbuf, int count)
+{
+ struct emac_dma_req *req;
+
+ req = kzalloc(sizeof(struct emac_dma_req), GFP_ATOMIC);
+ if (!req)
+ return NULL;
+
+ req->db = db;
+ req->desc = desc;
+ req->skb = skb;
+ req->rxbuf = rxbuf;
+ req->count = count;
+ return req;
+}
+
+static void emac_free_dma_req(struct emac_dma_req *req)
+{
+ kfree(req);
+}
+
+static void emac_dma_done_callback(void *arg)
+{
+ struct emac_dma_req *req = arg;
+ struct emac_board_info *db = req->db;
+ struct sk_buff *skb = req->skb;
+ struct net_device *dev = db->ndev;
+ int rxlen = req->count;
+ u32 reg_val;
+
+ dma_unmap_single(db->dev, req->rxbuf, rxlen, DMA_FROM_DEVICE);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_bytes += rxlen;
+ /* Pass to upper layer */
+ dev->stats.rx_packets++;
+
+ /* re enable cpu receive */
+ reg_val = readl(db->membase + EMAC_RX_CTL_REG);
+ reg_val &= ~EMAC_RX_CTL_DMA_EN;
+ writel(reg_val, db->membase + EMAC_RX_CTL_REG);
+
+ /* re enable interrupt */
+ reg_val = readl(db->membase + EMAC_INT_CTL_REG);
+ reg_val |= EMAC_INT_CTL_RX_EN;
+ writel(reg_val, db->membase + EMAC_INT_CTL_REG);
+
+ db->emacrx_completed_flag = 1;
+ emac_free_dma_req(req);
+}
+
+static int emac_dma_inblk_32bit(struct emac_board_info *db,
+ struct sk_buff *skb, void *rdptr, int count)
+{
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+ dma_addr_t rxbuf;
+ struct emac_dma_req *req;
+ int ret = 0;
+
+ rxbuf = dma_map_single(db->dev, rdptr, count, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(db->dev, rxbuf);
+ if (ret) {
+ dev_err(db->dev, "dma mapping error.\n");
+ return ret;
+ }
+
+ desc = dmaengine_prep_slave_single(db->rx_chan, rxbuf, count,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ dev_err(db->dev, "prepare slave single failed\n");
+ ret = -ENOMEM;
+ goto prepare_err;
+ }
+
+ req = emac_alloc_dma_req(db, desc, skb, rxbuf, count);
+ if (!req) {
+ dev_err(db->dev, "alloc emac dma req error.\n");
+ ret = -ENOMEM;
+ goto alloc_req_err;
+ }
+
+ desc->callback_param = req;
+ desc->callback = emac_dma_done_callback;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(db->dev, "dma submit error.\n");
+ goto submit_err;
+ }
+
+ dma_async_issue_pending(db->rx_chan);
+ return ret;
+
+submit_err:
+ emac_free_dma_req(req);
+
+alloc_req_err:
+ dmaengine_desc_free(desc);
+
+prepare_err:
+ dma_unmap_single(db->dev, rxbuf, count, DMA_FROM_DEVICE);
+ return ret;
+}
+
/* ethtool ops */
static void emac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
@@ -308,7 +429,7 @@ static unsigned int emac_powerup(struct net_device *ndev)
/* initial EMAC */
/* flush RX FIFO */
reg_val = readl(db->membase + EMAC_RX_CTL_REG);
- reg_val |= 0x8;
+ reg_val |= EMAC_RX_CTL_FLUSH_FIFO;
writel(reg_val, db->membase + EMAC_RX_CTL_REG);
udelay(1);
@@ -320,8 +441,8 @@ static unsigned int emac_powerup(struct net_device *ndev)
/* set MII clock */
reg_val = readl(db->membase + EMAC_MAC_MCFG_REG);
- reg_val &= (~(0xf << 2));
- reg_val |= (0xD << 2);
+ reg_val &= ~EMAC_MAC_MCFG_MII_CLKD_MASK;
+ reg_val |= EMAC_MAC_MCFG_MII_CLKD_72;
writel(reg_val, db->membase + EMAC_MAC_MCFG_REG);
/* clear RX counter */
@@ -385,7 +506,7 @@ static void emac_init_device(struct net_device *dev)
/* enable RX/TX0/RX Hlevel interrup */
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN | EMAC_INT_CTL_TX_ABRT_EN | EMAC_INT_CTL_RX_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
spin_unlock_irqrestore(&db->lock, flags);
@@ -499,7 +620,6 @@ static void emac_rx(struct net_device *dev)
struct sk_buff *skb;
u8 *rdptr;
bool good_packet;
- static int rxlen_last;
unsigned int reg_val;
u32 rxhdr, rxstatus, rxcount, rxlen;
@@ -514,26 +634,12 @@ static void emac_rx(struct net_device *dev)
if (netif_msg_rx_status(db))
dev_dbg(db->dev, "RXCount: %x\n", rxcount);
- if ((db->skb_last != NULL) && (rxlen_last > 0)) {
- dev->stats.rx_bytes += rxlen_last;
-
- /* Pass to upper layer */
- db->skb_last->protocol = eth_type_trans(db->skb_last,
- dev);
- netif_rx(db->skb_last);
- dev->stats.rx_packets++;
- db->skb_last = NULL;
- rxlen_last = 0;
-
- reg_val = readl(db->membase + EMAC_RX_CTL_REG);
- reg_val &= ~EMAC_RX_CTL_DMA_EN;
- writel(reg_val, db->membase + EMAC_RX_CTL_REG);
- }
-
if (!rxcount) {
db->emacrx_completed_flag = 1;
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN |
+ EMAC_INT_CTL_TX_ABRT_EN |
+ EMAC_INT_CTL_RX_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
/* had one stuck? */
@@ -565,7 +671,9 @@ static void emac_rx(struct net_device *dev)
writel(reg_val | EMAC_CTL_RX_EN,
db->membase + EMAC_CTL_REG);
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN |
+ EMAC_INT_CTL_TX_ABRT_EN |
+ EMAC_INT_CTL_RX_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
db->emacrx_completed_flag = 1;
@@ -623,6 +731,19 @@ static void emac_rx(struct net_device *dev)
if (netif_msg_rx_status(db))
dev_dbg(db->dev, "RxLen %x\n", rxlen);
+ if (rxlen >= dev->mtu && db->rx_chan) {
+ reg_val = readl(db->membase + EMAC_RX_CTL_REG);
+ reg_val |= EMAC_RX_CTL_DMA_EN;
+ writel(reg_val, db->membase + EMAC_RX_CTL_REG);
+ if (!emac_dma_inblk_32bit(db, skb, rdptr, rxlen))
+ break;
+
+ /* re enable cpu receive. then try to receive by emac_inblk_32bit */
+ reg_val = readl(db->membase + EMAC_RX_CTL_REG);
+ reg_val &= ~EMAC_RX_CTL_DMA_EN;
+ writel(reg_val, db->membase + EMAC_RX_CTL_REG);
+ }
+
emac_inblk_32bit(db->membase + EMAC_RX_IO_DATA_REG,
rdptr, rxlen);
dev->stats.rx_bytes += rxlen;
@@ -666,18 +787,23 @@ static irqreturn_t emac_interrupt(int irq, void *dev_id)
}
/* Transmit Interrupt check */
- if (int_status & (0x01 | 0x02))
+ if (int_status & EMAC_INT_STA_TX_COMPLETE)
emac_tx_done(dev, db, int_status);
- if (int_status & (0x04 | 0x08))
+ if (int_status & EMAC_INT_STA_TX_ABRT)
netdev_info(dev, " ab : %x\n", int_status);
/* Re-enable interrupt mask */
if (db->emacrx_completed_flag == 1) {
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN | EMAC_INT_CTL_TX_ABRT_EN | EMAC_INT_CTL_RX_EN);
+ writel(reg_val, db->membase + EMAC_INT_CTL_REG);
+ } else {
+ reg_val = readl(db->membase + EMAC_INT_CTL_REG);
+ reg_val |= (EMAC_INT_CTL_TX_EN | EMAC_INT_CTL_TX_ABRT_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
}
+
spin_unlock(&db->lock);
return IRQ_HANDLED;
@@ -782,6 +908,58 @@ static const struct net_device_ops emac_netdev_ops = {
#endif
};
+static int emac_configure_dma(struct emac_board_info *db)
+{
+ struct platform_device *pdev = db->pdev;
+ struct net_device *ndev = db->ndev;
+ struct dma_slave_config conf = {};
+ struct resource *regs;
+ int err = 0;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ netdev_err(ndev, "get io resource from device failed.\n");
+ err = -ENOMEM;
+ goto out_clear_chan;
+ }
+
+ netdev_info(ndev, "get io resource from device: %pa, size = %u\n",
+ &regs->start, (unsigned int)resource_size(regs));
+ db->emac_rx_fifo = regs->start + EMAC_RX_IO_DATA_REG;
+
+ db->rx_chan = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR(db->rx_chan)) {
+ netdev_err(ndev,
+ "failed to request dma channel. dma is disabled\n");
+ err = PTR_ERR(db->rx_chan);
+ goto out_clear_chan;
+ }
+
+ conf.direction = DMA_DEV_TO_MEM;
+ conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ conf.src_addr = db->emac_rx_fifo;
+ conf.dst_maxburst = 4;
+ conf.src_maxburst = 4;
+ conf.device_fc = false;
+
+ err = dmaengine_slave_config(db->rx_chan, &conf);
+ if (err) {
+ netdev_err(ndev, "config dma slave failed\n");
+ err = -EINVAL;
+ goto out_slave_configure_err;
+ }
+
+ return err;
+
+out_slave_configure_err:
+ dma_release_channel(db->rx_chan);
+
+out_clear_chan:
+ db->rx_chan = NULL;
+ return err;
+}
+
/* Search EMAC board, allocate space and register it
*/
static int emac_probe(struct platform_device *pdev)
@@ -824,6 +1002,9 @@ static int emac_probe(struct platform_device *pdev)
goto out_iounmap;
}
+ if (emac_configure_dma(db))
+ netdev_info(ndev, "configure dma failed. disable dma.\n");
+
db->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(db->clk)) {
ret = PTR_ERR(db->clk);
@@ -891,6 +1072,7 @@ out_clk_disable_unprepare:
clk_disable_unprepare(db->clk);
out_dispose_mapping:
irq_dispose_mapping(ndev->irq);
+ dma_release_channel(db->rx_chan);
out_iounmap:
iounmap(db->membase);
out:
@@ -906,6 +1088,11 @@ static int emac_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct emac_board_info *db = netdev_priv(ndev);
+ if (db->rx_chan) {
+ dmaengine_terminate_all(db->rx_chan);
+ dma_release_channel(db->rx_chan);
+ }
+
unregister_netdev(ndev);
sunxi_sram_release(&pdev->dev);
clk_disable_unprepare(db->clk);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.h b/drivers/net/ethernet/allwinner/sun4i-emac.h
index 38c72d9ec600..90bd9ad77607 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.h
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.h
@@ -38,6 +38,7 @@
#define EMAC_RX_CTL_REG (0x3c)
#define EMAC_RX_CTL_AUTO_DRQ_EN (1 << 1)
#define EMAC_RX_CTL_DMA_EN (1 << 2)
+#define EMAC_RX_CTL_FLUSH_FIFO (1 << 3)
#define EMAC_RX_CTL_PASS_ALL_EN (1 << 4)
#define EMAC_RX_CTL_PASS_CTL_EN (1 << 5)
#define EMAC_RX_CTL_PASS_CRC_ERR_EN (1 << 6)
@@ -61,7 +62,21 @@
#define EMAC_RX_IO_DATA_STATUS_OK (1 << 7)
#define EMAC_RX_FBC_REG (0x50)
#define EMAC_INT_CTL_REG (0x54)
+#define EMAC_INT_CTL_RX_EN (1 << 8)
+#define EMAC_INT_CTL_TX0_EN (1)
+#define EMAC_INT_CTL_TX1_EN (1 << 1)
+#define EMAC_INT_CTL_TX_EN (EMAC_INT_CTL_TX0_EN | EMAC_INT_CTL_TX1_EN)
+#define EMAC_INT_CTL_TX0_ABRT_EN (0x1 << 2)
+#define EMAC_INT_CTL_TX1_ABRT_EN (0x1 << 3)
+#define EMAC_INT_CTL_TX_ABRT_EN (EMAC_INT_CTL_TX0_ABRT_EN | EMAC_INT_CTL_TX1_ABRT_EN)
#define EMAC_INT_STA_REG (0x58)
+#define EMAC_INT_STA_TX0_COMPLETE (0x1)
+#define EMAC_INT_STA_TX1_COMPLETE (0x1 << 1)
+#define EMAC_INT_STA_TX_COMPLETE (EMAC_INT_STA_TX0_COMPLETE | EMAC_INT_STA_TX1_COMPLETE)
+#define EMAC_INT_STA_TX0_ABRT (0x1 << 2)
+#define EMAC_INT_STA_TX1_ABRT (0x1 << 3)
+#define EMAC_INT_STA_TX_ABRT (EMAC_INT_STA_TX0_ABRT | EMAC_INT_STA_TX1_ABRT)
+#define EMAC_INT_STA_RX_COMPLETE (0x1 << 8)
#define EMAC_MAC_CTL0_REG (0x5c)
#define EMAC_MAC_CTL0_RX_FLOW_CTL_EN (1 << 2)
#define EMAC_MAC_CTL0_TX_FLOW_CTL_EN (1 << 3)
@@ -87,8 +102,11 @@
#define EMAC_MAC_CLRT_RM (0x0f)
#define EMAC_MAC_MAXF_REG (0x70)
#define EMAC_MAC_SUPP_REG (0x74)
+#define EMAC_MAC_SUPP_100M (0x1 << 8)
#define EMAC_MAC_TEST_REG (0x78)
#define EMAC_MAC_MCFG_REG (0x7c)
+#define EMAC_MAC_MCFG_MII_CLKD_MASK (0xff << 2)
+#define EMAC_MAC_MCFG_MII_CLKD_72 (0x0d << 2)
#define EMAC_MAC_A0_REG (0x98)
#define EMAC_MAC_A1_REG (0x9c)
#define EMAC_MAC_A2_REG (0xa0)
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 732da15a3827..22fe98555b24 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -589,8 +589,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
}
ap->name = dev->name;
- if (ap->pci_using_dac)
- dev->features |= NETIF_F_HIGHDMA;
+ dev->features |= NETIF_F_HIGHDMA;
pci_set_drvdata(pdev, dev);
@@ -1130,11 +1129,7 @@ static int ace_init(struct net_device *dev)
/*
* Configure DMA attributes.
*/
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- ap->pci_using_dac = 1;
- } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- ap->pci_using_dac = 0;
- } else {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
ecode = -ENODEV;
goto init_error;
}
diff --git a/drivers/net/ethernet/alteon/acenic.h b/drivers/net/ethernet/alteon/acenic.h
index 265fa601a258..ca5ce0cbbad1 100644
--- a/drivers/net/ethernet/alteon/acenic.h
+++ b/drivers/net/ethernet/alteon/acenic.h
@@ -692,7 +692,6 @@ struct ace_private
__attribute__ ((aligned (SMP_CACHE_BYTES)));
u32 last_tx, last_std_rx, last_mini_rx;
#endif
- int pci_using_dac;
u8 firmware_major;
u8 firmware_minor;
u8 firmware_fix;
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index f5ec35fa4c63..466ad9470d1f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -48,6 +48,11 @@ enum ena_admin_aq_feature_id {
ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
};
+/* device capabilities */
+enum ena_admin_aq_caps_id {
+ ENA_ADMIN_ENI_STATS = 0,
+};
+
enum ena_admin_placement_policy_type {
/* descriptors and headers are in host memory */
ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
@@ -455,7 +460,10 @@ struct ena_admin_device_attr_feature_desc {
*/
u32 supported_features;
- u32 reserved3;
+ /* bitmap of ena_admin_aq_caps_id, which represents device
+ * capabilities.
+ */
+ u32 capabilities;
/* Indicates how many bits are used physical address access. */
u32 phys_addr_width;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index ab413fc1f68e..8c8b4c88c7de 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -1971,6 +1971,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
sizeof(get_resp.u.dev_attr));
ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
+ ena_dev->capabilities = get_resp.u.dev_attr.capabilities;
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
rc = ena_com_get_feature(ena_dev, &get_resp,
@@ -2223,6 +2224,13 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
struct ena_com_stats_ctx ctx;
int ret;
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
+ netdev_err(ena_dev->net_device,
+ "Capability %d isn't supported\n",
+ ENA_ADMIN_ENI_STATS);
+ return -EOPNOTSUPP;
+ }
+
memset(&ctx, 0x0, sizeof(ctx));
ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
if (likely(ret == 0))
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 73b03ce59412..3c5081d9d25d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -314,6 +314,7 @@ struct ena_com_dev {
struct ena_rss rss;
u32 supported_features;
+ u32 capabilities;
u32 dma_addr_bits;
struct ena_host_attribute host_attr;
@@ -967,6 +968,18 @@ static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_d
ena_dev->adaptive_coalescing = false;
}
+/* ena_com_get_cap - query whether device supports a capability.
+ * @ena_dev: ENA communication layer struct
+ * @cap_id: enum value representing the capability
+ *
+ * @return - true if capability is supported or false otherwise
+ */
+static inline bool ena_com_get_cap(struct ena_com_dev *ena_dev,
+ enum ena_admin_aq_caps_id cap_id)
+{
+ return !!(ena_dev->capabilities & BIT(cap_id));
+}
+
/* ena_com_update_intr_reg - Prepare interrupt register
* @intr_reg: interrupt register to update.
* @rx_delay_interval: Rx interval in usecs
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 13e745cf3781..39242c5a1729 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -82,7 +82,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
ENA_STAT_RX_ENTRY(csum_good),
ENA_STAT_RX_ENTRY(refil_partial),
- ENA_STAT_RX_ENTRY(bad_csum),
+ ENA_STAT_RX_ENTRY(csum_bad),
ENA_STAT_RX_ENTRY(page_alloc_fail),
ENA_STAT_RX_ENTRY(skb_alloc_fail),
ENA_STAT_RX_ENTRY(dma_mapping_err),
@@ -110,8 +110,7 @@ static const struct ena_stats ena_stats_ena_com_strings[] = {
#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
-#define ENA_STATS_ARRAY_ENI(adapter) \
- (ARRAY_SIZE(ena_stats_eni_strings) * (adapter)->eni_stats_supported)
+#define ENA_STATS_ARRAY_ENI(adapter) ARRAY_SIZE(ena_stats_eni_strings)
static void ena_safe_update_stat(u64 *src, u64 *dst,
struct u64_stats_sync *syncp)
@@ -213,8 +212,9 @@ static void ena_get_ethtool_stats(struct net_device *netdev,
u64 *data)
{
struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *dev = adapter->ena_dev;
- ena_get_stats(adapter, data, adapter->eni_stats_supported);
+ ena_get_stats(adapter, data, ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS));
}
static int ena_get_sw_stats_count(struct ena_adapter *adapter)
@@ -226,7 +226,9 @@ static int ena_get_sw_stats_count(struct ena_adapter *adapter)
static int ena_get_hw_stats_count(struct ena_adapter *adapter)
{
- return ENA_STATS_ARRAY_ENI(adapter);
+ bool supported = ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENI_STATS);
+
+ return ENA_STATS_ARRAY_ENI(adapter) * supported;
}
int ena_get_sset_count(struct net_device *netdev, int sset)
@@ -316,10 +318,11 @@ static void ena_get_ethtool_strings(struct net_device *netdev,
u8 *data)
{
struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *dev = adapter->ena_dev;
switch (sset) {
case ETH_SS_STATS:
- ena_get_strings(adapter, data, adapter->eni_stats_supported);
+ ena_get_strings(adapter, data, ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS));
break;
}
}
@@ -465,7 +468,9 @@ static void ena_get_drvinfo(struct net_device *dev,
}
static void ena_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ena_adapter *adapter = netdev_priv(netdev);
@@ -476,7 +481,9 @@ static void ena_get_ringparam(struct net_device *netdev,
}
static int ena_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ena_adapter *adapter = netdev_priv(netdev);
u32 new_tx_size, new_rx_size;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index c72f0c7ff4aa..53080fd143dc 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -103,7 +103,7 @@ static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
return;
- adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
+ ena_reset_device(adapter, ENA_REGS_RESET_OS_NETDEV_WD);
ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp);
netif_err(adapter, tx_err, dev, "Transmit time out\n");
@@ -166,11 +166,9 @@ static int ena_xmit_common(struct net_device *dev,
"Failed to prepare tx bufs\n");
ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
&ring->syncp);
- if (rc != -ENOMEM) {
- adapter->reset_reason =
- ENA_REGS_RESET_DRIVER_INVALID_STATE;
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
- }
+ if (rc != -ENOMEM)
+ ena_reset_device(adapter,
+ ENA_REGS_RESET_DRIVER_INVALID_STATE);
return rc;
}
@@ -434,7 +432,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
xdp_stat = &rx_ring->rx_stats.xdp_pass;
break;
default:
- bpf_warn_invalid_xdp_action(verdict);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_invalid;
}
@@ -1269,20 +1267,18 @@ static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
netif_err(ring->adapter,
tx_done,
ring->netdev,
- "tx_info doesn't have valid %s",
- is_xdp ? "xdp frame" : "skb");
+ "tx_info doesn't have valid %s. qid %u req_id %u",
+ is_xdp ? "xdp frame" : "skb", ring->qid, req_id);
else
netif_err(ring->adapter,
tx_done,
ring->netdev,
- "Invalid req_id: %hu\n",
- req_id);
+ "Invalid req_id %u in qid %u\n",
+ req_id, ring->qid);
ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp);
+ ena_reset_device(ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID);
- /* Trigger device reset */
- ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
- set_bit(ENA_FLAG_TRIGGER_RESET, &ring->adapter->flags);
return -EFAULT;
}
@@ -1445,10 +1441,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
netif_err(adapter, rx_err, rx_ring->netdev,
"Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id);
ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp);
- adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
- /* Make sure reset reason is set before triggering the reset */
- smp_mb__before_atomic();
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID);
return NULL;
}
@@ -1558,7 +1551,7 @@ static void ena_rx_checksum(struct ena_ring *rx_ring,
(ena_rx_ctx->l3_csum_err))) {
/* ipv4 checksum error */
skb->ip_summed = CHECKSUM_NONE;
- ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1,
+ ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1,
&rx_ring->syncp);
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"RX IPv4 header checksum error\n");
@@ -1570,7 +1563,7 @@ static void ena_rx_checksum(struct ena_ring *rx_ring,
(ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
if (unlikely(ena_rx_ctx->l4_csum_err)) {
/* TCP/UDP checksum error */
- ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1,
+ ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1,
&rx_ring->syncp);
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"RX L4 checksum error\n");
@@ -1781,15 +1774,12 @@ error:
if (rc == -ENOSPC) {
ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1,
&rx_ring->syncp);
- adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
+ ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
} else {
ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
&rx_ring->syncp);
- adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
+ ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID);
}
-
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
-
return 0;
}
@@ -3253,11 +3243,11 @@ err:
int ena_update_hw_stats(struct ena_adapter *adapter)
{
- int rc = 0;
+ int rc;
rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats);
if (rc) {
- dev_info_once(&adapter->pdev->dev, "Failed to get ENI stats\n");
+ netdev_err(adapter->netdev, "Failed to get ENI stats\n");
return rc;
}
@@ -3654,8 +3644,6 @@ static int ena_restore_device(struct ena_adapter *adapter)
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
adapter->last_keep_alive_jiffies = jiffies;
- dev_err(&pdev->dev, "Device reset completed successfully\n");
-
return rc;
err_disable_msix:
ena_free_mgmnt_irq(adapter);
@@ -3685,6 +3673,8 @@ static void ena_fw_reset_device(struct work_struct *work)
if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
ena_destroy_device(adapter, false);
ena_restore_device(adapter);
+
+ dev_err(&adapter->pdev->dev, "Device reset completed successfully\n");
}
rtnl_unlock();
@@ -3707,9 +3697,8 @@ static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
netif_err(adapter, rx_err, adapter->netdev,
"Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
rx_ring->qid);
- adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
- smp_mb__before_atomic();
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+
+ ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
return -EIO;
}
@@ -3746,9 +3735,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
netif_err(adapter, tx_err, adapter->netdev,
"Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
tx_ring->qid);
- adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
- smp_mb__before_atomic();
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
return -EIO;
}
@@ -3774,9 +3761,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
"The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
missed_tx,
adapter->missing_tx_completion_threshold);
- adapter->reset_reason =
- ENA_REGS_RESET_MISS_TX_CMPL;
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ ena_reset_device(adapter, ENA_REGS_RESET_MISS_TX_CMPL);
rc = -EIO;
}
@@ -3897,8 +3882,7 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
"Keep alive watchdog timeout.\n");
ena_increase_stat(&adapter->dev_stats.wd_expired, 1,
&adapter->syncp);
- adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ ena_reset_device(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
}
}
@@ -3909,8 +3893,7 @@ static void check_for_admin_com_state(struct ena_adapter *adapter)
"ENA admin queue is not in running state!\n");
ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1,
&adapter->syncp);
- adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ ena_reset_device(adapter, ENA_REGS_RESET_ADMIN_TO);
}
}
@@ -4110,7 +4093,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
val = ethtool_rxfh_indir_default(i, adapter->num_io_queues);
rc = ena_com_indirect_table_fill_entry(ena_dev, i,
ENA_IO_RXQ_IDX(val));
- if (unlikely(rc && (rc != -EOPNOTSUPP))) {
+ if (unlikely(rc)) {
dev_err(dev, "Cannot fill indirect table\n");
goto err_fill_indir;
}
@@ -4146,10 +4129,11 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
}
-static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
+static void ena_calc_io_queue_size(struct ena_adapter *adapter,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
- struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
- struct ena_com_dev *ena_dev = ctx->ena_dev;
+ struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq;
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
u32 max_tx_queue_size;
@@ -4157,7 +4141,7 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
struct ena_admin_queue_ext_feature_fields *max_queue_ext =
- &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
+ &get_feat_ctx->max_queue_ext.max_queue_ext;
max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
max_queue_ext->max_rx_sq_depth);
max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
@@ -4169,13 +4153,13 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
max_tx_queue_size = min_t(u32, max_tx_queue_size,
max_queue_ext->max_tx_sq_depth);
- ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queue_ext->max_per_packet_tx_descs);
- ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queue_ext->max_per_packet_rx_descs);
+ adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queue_ext->max_per_packet_tx_descs);
+ adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queue_ext->max_per_packet_rx_descs);
} else {
struct ena_admin_queue_feature_desc *max_queues =
- &ctx->get_feat_ctx->max_queues;
+ &get_feat_ctx->max_queues;
max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
max_queues->max_sq_depth);
max_tx_queue_size = max_queues->max_cq_depth;
@@ -4187,10 +4171,10 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
max_tx_queue_size = min_t(u32, max_tx_queue_size,
max_queues->max_sq_depth);
- ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queues->max_packet_tx_descs);
- ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queues->max_packet_rx_descs);
+ adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queues->max_packet_tx_descs);
+ adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queues->max_packet_rx_descs);
}
max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
@@ -4204,12 +4188,10 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
tx_queue_size = rounddown_pow_of_two(tx_queue_size);
rx_queue_size = rounddown_pow_of_two(rx_queue_size);
- ctx->max_tx_queue_size = max_tx_queue_size;
- ctx->max_rx_queue_size = max_rx_queue_size;
- ctx->tx_queue_size = tx_queue_size;
- ctx->rx_queue_size = rx_queue_size;
-
- return 0;
+ adapter->max_tx_ring_size = max_tx_queue_size;
+ adapter->max_rx_ring_size = max_rx_queue_size;
+ adapter->requested_tx_ring_size = tx_queue_size;
+ adapter->requested_rx_ring_size = rx_queue_size;
}
/* ena_probe - Device Initialization Routine
@@ -4224,7 +4206,6 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
*/
static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct ena_calc_queue_size_ctx calc_queue_ctx = {};
struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_com_dev *ena_dev = NULL;
struct ena_adapter *adapter;
@@ -4309,10 +4290,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_device_destroy;
}
- calc_queue_ctx.ena_dev = ena_dev;
- calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
- calc_queue_ctx.pdev = pdev;
-
/* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
* Updated during device initialization with the real granularity
*/
@@ -4320,8 +4297,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
- rc = ena_calc_io_queue_size(&calc_queue_ctx);
- if (rc || !max_num_io_queues) {
+ ena_calc_io_queue_size(adapter, &get_feat_ctx);
+ if (unlikely(!max_num_io_queues)) {
rc = -EFAULT;
goto err_device_destroy;
}
@@ -4330,13 +4307,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
- adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
- adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
- adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
- adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
- adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
- adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
-
adapter->num_io_queues = max_num_io_queues;
adapter->max_num_io_queues = max_num_io_queues;
adapter->last_monitored_tx_qid = 0;
@@ -4387,11 +4357,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_config_debug_area(adapter);
- if (!ena_update_hw_stats(adapter))
- adapter->eni_stats_supported = true;
- else
- adapter->eni_stats_supported = false;
-
memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
netif_carrier_off(netdev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 0c39fc2fa345..1bdce99bf688 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
+#include <uapi/linux/bpf.h>
#include "ena_com.h"
#include "ena_eth_com.h"
@@ -139,18 +140,6 @@ struct ena_napi {
struct dim dim;
};
-struct ena_calc_queue_size_ctx {
- struct ena_com_dev_get_features_ctx *get_feat_ctx;
- struct ena_com_dev *ena_dev;
- struct pci_dev *pdev;
- u32 tx_queue_size;
- u32 rx_queue_size;
- u32 max_tx_queue_size;
- u32 max_rx_queue_size;
- u16 max_tx_sgl_size;
- u16 max_rx_sgl_size;
-};
-
struct ena_tx_buffer {
struct sk_buff *skb;
/* num of ena desc for this specific skb
@@ -215,7 +204,7 @@ struct ena_stats_rx {
u64 rx_copybreak_pkt;
u64 csum_good;
u64 refil_partial;
- u64 bad_csum;
+ u64 csum_bad;
u64 page_alloc_fail;
u64 skb_alloc_fail;
u64 dma_mapping_err;
@@ -378,7 +367,6 @@ struct ena_adapter {
struct u64_stats_sync syncp;
struct ena_stats_dev dev_stats;
struct ena_admin_eni_stats eni_stats;
- bool eni_stats_supported;
/* last queue index that was checked for uncompleted tx packets */
u32 last_monitored_tx_qid;
@@ -406,6 +394,15 @@ int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count);
int ena_get_sset_count(struct net_device *netdev, int sset);
+static inline void ena_reset_device(struct ena_adapter *adapter,
+ enum ena_regs_reset_reason_types reset_reason)
+{
+ adapter->reset_reason = reset_reason;
+ /* Make sure reset reason is set before triggering the reset */
+ smp_mb__before_atomic();
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+}
+
enum ena_xdp_errors_t {
ENA_XDP_ALLOWED = 0,
ENA_XDP_CURRENT_MTU_TOO_LARGE,
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 2f808dbc8b0e..3a351d3396bf 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -680,6 +680,7 @@ static int a2065_init_one(struct zorro_dev *z,
unsigned long base_addr = board + A2065_LANCE;
unsigned long mem_start = board + A2065_RAM;
struct resource *r1, *r2;
+ u8 addr[ETH_ALEN];
u32 serial;
int err;
@@ -706,17 +707,18 @@ static int a2065_init_one(struct zorro_dev *z,
r2->name = dev->name;
serial = be32_to_cpu(z->rom.er_SerialNumber);
- dev->dev_addr[0] = 0x00;
+ addr[0] = 0x00;
if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */
- dev->dev_addr[1] = 0x80;
- dev->dev_addr[2] = 0x10;
+ addr[1] = 0x80;
+ addr[2] = 0x10;
} else { /* Ameristar */
- dev->dev_addr[1] = 0x00;
- dev->dev_addr[2] = 0x9f;
+ addr[1] = 0x00;
+ addr[2] = 0x9f;
}
- dev->dev_addr[3] = (serial >> 16) & 0xff;
- dev->dev_addr[4] = (serial >> 8) & 0xff;
- dev->dev_addr[5] = serial & 0xff;
+ addr[3] = (serial >> 16) & 0xff;
+ addr[4] = (serial >> 8) & 0xff;
+ addr[5] = serial & 0xff;
+ eth_hw_addr_set(dev, addr);
dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
dev->mem_end = dev->mem_start + A2065_RAM_SIZE;
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 5e0f645f5bde..4ea7b9f3c424 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -441,11 +441,11 @@ static int ariadne_open(struct net_device *dev)
/* Set the Ethernet Hardware Address */
lance->RAP = CSR12; /* Physical Address Register, PADR[15:0] */
- lance->RDP = ((u_short *)&dev->dev_addr[0])[0];
+ lance->RDP = ((const u_short *)&dev->dev_addr[0])[0];
lance->RAP = CSR13; /* Physical Address Register, PADR[31:16] */
- lance->RDP = ((u_short *)&dev->dev_addr[0])[1];
+ lance->RDP = ((const u_short *)&dev->dev_addr[0])[1];
lance->RAP = CSR14; /* Physical Address Register, PADR[47:32] */
- lance->RDP = ((u_short *)&dev->dev_addr[0])[2];
+ lance->RDP = ((const u_short *)&dev->dev_addr[0])[2];
/* Set the Init Block Mode */
lance->RAP = CSR15; /* Mode Register */
@@ -717,6 +717,7 @@ static int ariadne_init_one(struct zorro_dev *z,
unsigned long mem_start = board + ARIADNE_RAM;
struct resource *r1, *r2;
struct net_device *dev;
+ u8 addr[ETH_ALEN];
u32 serial;
int err;
@@ -740,12 +741,13 @@ static int ariadne_init_one(struct zorro_dev *z,
r2->name = dev->name;
serial = be32_to_cpu(z->rom.er_SerialNumber);
- dev->dev_addr[0] = 0x00;
- dev->dev_addr[1] = 0x60;
- dev->dev_addr[2] = 0x30;
- dev->dev_addr[3] = (serial >> 16) & 0xff;
- dev->dev_addr[4] = (serial >> 8) & 0xff;
- dev->dev_addr[5] = serial & 0xff;
+ addr[0] = 0x00;
+ addr[1] = 0x60;
+ addr[2] = 0x30;
+ addr[3] = (serial >> 16) & 0xff;
+ addr[4] = (serial >> 8) & 0xff;
+ addr[5] = serial & 0xff;
+ eth_hw_addr_set(dev, addr);
dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
dev->mem_end = dev->mem_start + ARIADNE_RAM_SIZE;
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 9c7d9690d00c..27869164c6e6 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -471,6 +471,7 @@ static unsigned long __init lance_probe1( struct net_device *dev,
int i;
static int did_version;
unsigned short save1, save2;
+ u8 addr[ETH_ALEN];
PROBE_PRINT(( "Probing for Lance card at mem %#lx io %#lx\n",
(long)memaddr, (long)ioaddr ));
@@ -585,14 +586,16 @@ static unsigned long __init lance_probe1( struct net_device *dev,
eth_hw_addr_set(dev, OldRieblDefHwaddr);
break;
case NEW_RIEBL:
- lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
+ lp->memcpy_f(addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
break;
case PAM_CARD:
i = IO->eeprom;
for( i = 0; i < 6; ++i )
- dev->dev_addr[i] =
+ addr[i] =
((((unsigned short *)MEM)[i*2] & 0x0f) << 4) |
((((unsigned short *)MEM)[i*2+1] & 0x0f));
+ eth_hw_addr_set(dev, addr);
i = IO->mem;
break;
}
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 493b0cefcc2a..ec8df05e7bf6 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -1032,6 +1032,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
int i, ret;
unsigned long esar_base;
unsigned char *esar;
+ u8 addr[ETH_ALEN];
const char *desc;
if (dec_lance_debug && version_printed++ == 0)
@@ -1228,7 +1229,8 @@ static int dec_lance_probe(struct device *bdev, const int type)
break;
}
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = esar[i * 4];
+ addr[i] = esar[i * 4];
+ eth_hw_addr_set(dev, addr);
printk("%s: %s, addr = %pM, irq = %d\n",
name, desc, dev->dev_addr, dev->irq);
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 6784f8748638..055fda11c572 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -129,6 +129,7 @@ static void hplance_init(struct net_device *dev, struct dio_dev *d)
{
unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
struct hplance_private *lp;
+ u8 addr[ETH_ALEN];
int i;
/* reset the board */
@@ -144,9 +145,10 @@ static void hplance_init(struct net_device *dev, struct dio_dev *d)
/* The NVRAM holds our ethernet address, one nibble per byte,
* at bytes NVRAMOFF+1,3,5,7,9...
*/
- dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
+ addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
| (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF);
}
+ eth_hw_addr_set(dev, addr);
lp = netdev_priv(dev);
lp->lance.name = d->name;
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 945bf1d87507..462016666752 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -480,6 +480,7 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
unsigned long flags;
int err = -ENOMEM;
void __iomem *bios;
+ u8 addr[ETH_ALEN];
/* First we look for special cases.
Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
@@ -541,7 +542,8 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
/* There is a 16 byte station address PROM at the base address.
The first six bytes are the station address. */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + i);
+ addr[i] = inb(ioaddr + i);
+ eth_hw_addr_set(dev, addr);
printk("%pM", dev->dev_addr);
dev->base_addr = ioaddr;
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index da97fccea9ea..410c7b67eba4 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -74,6 +74,7 @@ static struct net_device * __init mvme147lance_probe(void)
static int called;
static const char name[] = "MVME147 LANCE";
struct m147lance_private *lp;
+ u8 macaddr[ETH_ALEN];
u_long *addr;
u_long address;
int err;
@@ -93,15 +94,16 @@ static struct net_device * __init mvme147lance_probe(void)
addr = (u_long *)ETHERNET_ADDRESS;
address = *addr;
- dev->dev_addr[0] = 0x08;
- dev->dev_addr[1] = 0x00;
- dev->dev_addr[2] = 0x3e;
+ macaddr[0] = 0x08;
+ macaddr[1] = 0x00;
+ macaddr[2] = 0x3e;
address = address >> 8;
- dev->dev_addr[5] = address&0xff;
+ macaddr[5] = address&0xff;
address = address >> 8;
- dev->dev_addr[4] = address&0xff;
+ macaddr[4] = address&0xff;
address = address >> 8;
- dev->dev_addr[3] = address&0xff;
+ macaddr[3] = address&0xff;
+ eth_hw_addr_set(dev, macaddr);
printk("%s: MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n",
dev->name, dev->base_addr, MVME147_LANCE_IRQ,
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 032e8922b482..8ba579b89b75 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -251,7 +251,7 @@ static void ni65_recv_intr(struct net_device *dev,int);
static void ni65_xmit_intr(struct net_device *dev,int);
static int ni65_open(struct net_device *dev);
static int ni65_lance_reinit(struct net_device *dev);
-static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
+static void ni65_init_lance(struct priv *p,const unsigned char*,int,int);
static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
struct net_device *dev);
static void ni65_timeout(struct net_device *dev, unsigned int txqueue);
@@ -418,6 +418,7 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr)
{
int i,j;
struct priv *p;
+ u8 addr[ETH_ALEN];
unsigned long flags;
dev->irq = irq;
@@ -444,7 +445,8 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr)
return -ENODEV;
for(j=0;j<6;j++)
- dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j);
+ addr[j] = inb(ioaddr+cards[i].addr_offset+j);
+ eth_hw_addr_set(dev, addr);
if( (j=ni65_alloc_buffer(dev)) < 0) {
release_region(ioaddr, cards[i].total_size);
@@ -566,7 +568,7 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr)
/*
* set lance register and trigger init
*/
-static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode)
+static void ni65_init_lance(struct priv *p,const unsigned char *daddr,int filter,int mode)
{
int i;
u32 pib;
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index f5c50ff377ff..c20c369c7eb8 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -860,7 +860,9 @@ static int pcnet32_nway_reset(struct net_device *dev)
}
static void pcnet32_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct pcnet32_private *lp = netdev_priv(dev);
@@ -871,7 +873,9 @@ static void pcnet32_get_ringparam(struct net_device *dev,
}
static int pcnet32_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long flags;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 533b8519ec35..466273b22f0a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -898,6 +898,8 @@
#define PCS_V2_WINDOW_SELECT 0x9064
#define PCS_V2_RV_WINDOW_DEF 0x1060
#define PCS_V2_RV_WINDOW_SELECT 0x1064
+#define PCS_V2_YC_WINDOW_DEF 0x18060
+#define PCS_V2_YC_WINDOW_SELECT 0x18064
/* PCS register entry bit positions and sizes */
#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
@@ -1030,8 +1032,8 @@
#define XP_PROP_0_PORT_ID_WIDTH 8
#define XP_PROP_0_PORT_MODE_INDEX 8
#define XP_PROP_0_PORT_MODE_WIDTH 4
-#define XP_PROP_0_PORT_SPEEDS_INDEX 23
-#define XP_PROP_0_PORT_SPEEDS_WIDTH 4
+#define XP_PROP_0_PORT_SPEEDS_INDEX 22
+#define XP_PROP_0_PORT_SPEEDS_WIDTH 5
#define XP_PROP_1_MAX_RX_DMA_INDEX 24
#define XP_PROP_1_MAX_RX_DMA_WIDTH 5
#define XP_PROP_1_MAX_RX_QUEUES_INDEX 8
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 30d24d19f40d..492ac383f16d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1508,9 +1508,6 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
return -EFAULT;
- if (config.flags)
- return -EINVAL;
-
mac_tscr = 0;
switch (config.tx_type) {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 94879cf8b420..6ceb1cdf6eba 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -619,8 +619,11 @@ static int xgbe_get_module_eeprom(struct net_device *netdev,
return pdata->phy_if.module_eeprom(pdata, eeprom, data);
}
-static void xgbe_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ringparam)
+static void
+xgbe_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ringparam,
+ struct kernel_ethtool_ringparam *kernel_ringparam,
+ struct netlink_ext_ack *extack)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
@@ -631,7 +634,9 @@ static void xgbe_get_ringparam(struct net_device *netdev,
}
static int xgbe_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ringparam)
+ struct ethtool_ringparam *ringparam,
+ struct kernel_ethtool_ringparam *kernel_ringparam,
+ struct netlink_ext_ack *extack)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
unsigned int rx, tx;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index 90cb55eb5466..efdcf484a510 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -278,6 +278,13 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
(rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
+ } else if (rdev && (rdev->vendor == PCI_VENDOR_ID_AMD) &&
+ (rdev->device == 0x14b5)) {
+ pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT;
+
+ /* Yellow Carp devices do not need cdr workaround */
+ pdata->vdata->an_cdr_workaround = 0;
} else {
pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
@@ -460,7 +467,7 @@ static int __maybe_unused xgbe_pci_resume(struct device *dev)
return ret;
}
-static const struct xgbe_version_data xgbe_v2a = {
+static struct xgbe_version_data xgbe_v2a = {
.init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
.xpcs_access = XGBE_XPCS_ACCESS_V2,
.mmc_64bit = 1,
@@ -475,7 +482,7 @@ static const struct xgbe_version_data xgbe_v2a = {
.an_cdr_workaround = 1,
};
-static const struct xgbe_version_data xgbe_v2b = {
+static struct xgbe_version_data xgbe_v2b = {
.init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
.xpcs_access = XGBE_XPCS_ACCESS_V2,
.mmc_64bit = 1,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 213769054391..2156600641b6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -124,10 +124,10 @@
#include "xgbe.h"
#include "xgbe-common.h"
-#define XGBE_PHY_PORT_SPEED_100 BIT(0)
-#define XGBE_PHY_PORT_SPEED_1000 BIT(1)
-#define XGBE_PHY_PORT_SPEED_2500 BIT(2)
-#define XGBE_PHY_PORT_SPEED_10000 BIT(3)
+#define XGBE_PHY_PORT_SPEED_100 BIT(1)
+#define XGBE_PHY_PORT_SPEED_1000 BIT(2)
+#define XGBE_PHY_PORT_SPEED_2500 BIT(3)
+#define XGBE_PHY_PORT_SPEED_10000 BIT(4)
#define XGBE_MUTEX_RELEASE 0x80000000
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 220dc42af31a..ff2d099aab21 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -869,7 +869,7 @@ static void xgene_enet_timeout(struct net_device *ndev, unsigned int txqueue)
for (i = 0; i < pdata->txq_cnt; i++) {
txq = netdev_get_tx_queue(ndev, i);
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
netif_tx_start_queue(txq);
}
}
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 9a650d1c1bdd..4d2ba30c2fbd 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1237,6 +1237,7 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
struct bmac_data *bp;
const unsigned char *prop_addr;
unsigned char addr[6];
+ u8 macaddr[6];
struct net_device *dev;
int is_bmac_plus = ((int)match->data) != 0;
@@ -1284,7 +1285,9 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
rev = addr[0] == 0 && addr[1] == 0xA0;
for (j = 0; j < 6; ++j)
- dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
+ macaddr[j] = rev ? bitrev8(addr[j]): addr[j];
+
+ eth_hw_addr_set(dev, macaddr);
/* Enable chip without interrupts for now */
bmac_enable_and_reset_chip(dev);
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index 4b80e3a52a19..6f8c91eb1263 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -90,7 +90,7 @@ static void mace_set_timeout(struct net_device *dev);
static void mace_tx_timeout(struct timer_list *t);
static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma);
static inline void mace_clean_rings(struct mace_data *mp);
-static void __mace_set_address(struct net_device *dev, void *addr);
+static void __mace_set_address(struct net_device *dev, const void *addr);
/*
* If we can't get a skbuff when we need it, we use this area for DMA.
@@ -112,6 +112,7 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
struct net_device *dev;
struct mace_data *mp;
const unsigned char *addr;
+ u8 macaddr[ETH_ALEN];
int j, rev, rc = -EBUSY;
if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
@@ -167,8 +168,9 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
rev = addr[0] == 0 && addr[1] == 0xA0;
for (j = 0; j < 6; ++j) {
- dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
+ macaddr[j] = rev ? bitrev8(addr[j]): addr[j];
}
+ eth_hw_addr_set(dev, macaddr);
mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
in_8(&mp->mace->chipid_lo);
@@ -369,11 +371,12 @@ static void mace_reset(struct net_device *dev)
out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO);
}
-static void __mace_set_address(struct net_device *dev, void *addr)
+static void __mace_set_address(struct net_device *dev, const void *addr)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace __iomem *mb = mp->mace;
- unsigned char *p = addr;
+ const unsigned char *p = addr;
+ u8 macaddr[ETH_ALEN];
int i;
/* load up the hardware address */
@@ -385,7 +388,10 @@ static void __mace_set_address(struct net_device *dev, void *addr)
;
}
for (i = 0; i < 6; ++i)
- out_8(&mb->padr, dev->dev_addr[i] = p[i]);
+ out_8(&mb->padr, macaddr[i] = p[i]);
+
+ eth_hw_addr_set(dev, macaddr);
+
if (mp->chipid != BROKEN_ADDRCHG_REV)
out_8(&mb->iac, 0);
}
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 95d3061c61be..8fcaf1639920 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -92,7 +92,7 @@ static void mace_reset(struct net_device *dev);
static irqreturn_t mace_interrupt(int irq, void *dev_id);
static irqreturn_t mace_dma_intr(int irq, void *dev_id);
static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue);
-static void __mace_set_address(struct net_device *dev, void *addr);
+static void __mace_set_address(struct net_device *dev, const void *addr);
/*
* Load a receive DMA channel with a base address and ring length
@@ -197,6 +197,7 @@ static int mace_probe(struct platform_device *pdev)
unsigned char *addr;
struct net_device *dev;
unsigned char checksum = 0;
+ u8 macaddr[ETH_ALEN];
int err;
dev = alloc_etherdev(PRIV_BYTES);
@@ -229,8 +230,9 @@ static int mace_probe(struct platform_device *pdev)
for (j = 0; j < 6; ++j) {
u8 v = bitrev8(addr[j<<4]);
checksum ^= v;
- dev->dev_addr[j] = v;
+ macaddr[j] = v;
}
+ eth_hw_addr_set(dev, macaddr);
for (; j < 8; ++j) {
checksum ^= bitrev8(addr[j<<4]);
}
@@ -315,11 +317,12 @@ static void mace_reset(struct net_device *dev)
* Load the address on a mace controller.
*/
-static void __mace_set_address(struct net_device *dev, void *addr)
+static void __mace_set_address(struct net_device *dev, const void *addr)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace *mb = mp->mace;
- unsigned char *p = addr;
+ const unsigned char *p = addr;
+ u8 macaddr[ETH_ALEN];
int i;
/* load up the hardware address */
@@ -331,7 +334,8 @@ static void __mace_set_address(struct net_device *dev, void *addr)
;
}
for (i = 0; i < 6; ++i)
- mb->padr = dev->dev_addr[i] = p[i];
+ mb->padr = macaddr[i] = p[i];
+ eth_hw_addr_set(dev, macaddr);
if (mp->chipid != BROKEN_ADDRCHG_REV)
mb->iac = 0;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index a9ef0544e30f..a418238f6309 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -812,7 +812,9 @@ static int aq_ethtool_set_pauseparam(struct net_device *ndev,
}
static void aq_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg;
@@ -827,7 +829,9 @@ static void aq_get_ringparam(struct net_device *ndev,
}
static int aq_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
const struct aq_hw_caps_s *hw_caps;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
index 1bc4d33a0ce5..30a573db02bb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -826,7 +826,6 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
struct aq_hw_s *aq_hw = aq_nic->aq_hw;
int hweight = 0;
int err = 0;
- int i;
if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
return -EOPNOTSUPP;
@@ -837,8 +836,7 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
- for (i = 0; i < BITS_TO_LONGS(VLAN_N_VID); i++)
- hweight += hweight_long(aq_nic->active_vlans[i]);
+ hweight = bitmap_weight(aq_nic->active_vlans, VLAN_N_VID);
err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
if (err)
@@ -871,7 +869,7 @@ int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
struct aq_hw_s *aq_hw = aq_nic->aq_hw;
int err = 0;
- memset(aq_nic->active_vlans, 0, sizeof(aq_nic->active_vlans));
+ bitmap_zero(aq_nic->active_vlans, VLAN_N_VID);
aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index e22935ce9573..e65ce7199dac 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -231,9 +231,6 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic,
struct hwtstamp_config *config)
{
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c
index e230d8d0ff73..e7a9f9863258 100644
--- a/drivers/net/ethernet/asix/ax88796c_main.c
+++ b/drivers/net/ethernet/asix/ax88796c_main.c
@@ -144,12 +144,13 @@ static void ax88796c_set_mac_addr(struct net_device *ndev)
static void ax88796c_load_mac_addr(struct net_device *ndev)
{
struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ u8 addr[ETH_ALEN];
u16 temp;
lockdep_assert_held(&ax_local->spi_lock);
/* Try the device tree first */
- if (!eth_platform_get_mac_address(&ax_local->spi->dev, ndev->dev_addr) &&
+ if (!platform_get_ethdev_address(&ax_local->spi->dev, ndev) &&
is_valid_ether_addr(ndev->dev_addr)) {
if (netif_msg_probe(ax_local))
dev_info(&ax_local->spi->dev,
@@ -159,18 +160,19 @@ static void ax88796c_load_mac_addr(struct net_device *ndev)
/* Read the MAC address from AX88796C */
temp = AX_READ(&ax_local->ax_spi, P3_MACASR0);
- ndev->dev_addr[5] = (u8)temp;
- ndev->dev_addr[4] = (u8)(temp >> 8);
+ addr[5] = (u8)temp;
+ addr[4] = (u8)(temp >> 8);
temp = AX_READ(&ax_local->ax_spi, P3_MACASR1);
- ndev->dev_addr[3] = (u8)temp;
- ndev->dev_addr[2] = (u8)(temp >> 8);
+ addr[3] = (u8)temp;
+ addr[2] = (u8)(temp >> 8);
temp = AX_READ(&ax_local->ax_spi, P3_MACASR2);
- ndev->dev_addr[1] = (u8)temp;
- ndev->dev_addr[0] = (u8)(temp >> 8);
+ addr[1] = (u8)temp;
+ addr[0] = (u8)(temp >> 8);
- if (is_valid_ether_addr(ndev->dev_addr)) {
+ if (is_valid_ether_addr(addr)) {
+ eth_hw_addr_set(ndev, addr);
if (netif_msg_probe(ax_local))
dev_info(&ax_local->spi->dev,
"MAC address read from ASIX chip\n");
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 4579ddf9c427..ec167af0e3b2 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -766,7 +766,7 @@ static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
unsigned long timestamp;
u32 rx_sm, tx_sm, rx_fd;
- timestamp = netdev_get_tx_queue(ag->ndev, 0)->trans_start;
+ timestamp = READ_ONCE(netdev_get_tx_queue(ag->ndev, 0)->trans_start);
if (likely(time_before(jiffies, timestamp + HZ / 10)))
return false;
@@ -1024,83 +1024,6 @@ static void ag71xx_mac_config(struct phylink_config *config, unsigned int mode,
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
}
-static void ag71xx_mac_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_NA:
- break;
- case PHY_INTERFACE_MODE_MII:
- if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
- ag71xx_is(ag, AR9340) ||
- ag71xx_is(ag, QCA9530) ||
- (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
- break;
- goto unsupported;
- case PHY_INTERFACE_MODE_GMII:
- if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
- (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
- (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
- break;
- goto unsupported;
- case PHY_INTERFACE_MODE_SGMII:
- if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
- break;
- goto unsupported;
- case PHY_INTERFACE_MODE_RMII:
- if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
- break;
- goto unsupported;
- case PHY_INTERFACE_MODE_RGMII:
- if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
- (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
- break;
- goto unsupported;
- default:
- goto unsupported;
- }
-
- phylink_set(mask, MII);
-
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, Autoneg);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- if (state->interface == PHY_INTERFACE_MODE_NA ||
- state->interface == PHY_INTERFACE_MODE_SGMII ||
- state->interface == PHY_INTERFACE_MODE_RGMII ||
- state->interface == PHY_INTERFACE_MODE_GMII) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- return;
-unsupported:
- linkmode_zero(supported);
-}
-
-static void ag71xx_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
-{
- state->link = 0;
-}
-
-static void ag71xx_mac_an_restart(struct phylink_config *config)
-{
- /* Not Supported */
-}
-
static void ag71xx_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
@@ -1163,9 +1086,7 @@ static void ag71xx_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops ag71xx_phylink_mac_ops = {
- .validate = ag71xx_mac_validate,
- .mac_pcs_get_state = ag71xx_mac_pcs_get_state,
- .mac_an_restart = ag71xx_mac_an_restart,
+ .validate = phylink_generic_validate,
.mac_config = ag71xx_mac_config,
.mac_link_down = ag71xx_mac_link_down,
.mac_link_up = ag71xx_mac_link_up,
@@ -1177,6 +1098,34 @@ static int ag71xx_phylink_setup(struct ag71xx *ag)
ag->phylink_config.dev = &ag->ndev->dev;
ag->phylink_config.type = PHYLINK_NETDEV;
+ ag->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
+
+ if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
+ ag71xx_is(ag, AR9340) ||
+ ag71xx_is(ag, QCA9530) ||
+ (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ ag->phylink_config.supported_interfaces);
+
+ if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
+ (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
+ (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ ag->phylink_config.supported_interfaces);
+
+ if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ ag->phylink_config.supported_interfaces);
+
+ if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ ag->phylink_config.supported_interfaces);
+
+ if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
+ (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
+ __set_bit(PHY_INTERFACE_MODE_RGMII,
+ ag->phylink_config.supported_interfaces);
phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode,
ag->phy_if_mode, &ag71xx_phylink_mac_ops);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index b4c9e805e981..6a969969d221 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3438,7 +3438,9 @@ static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
}
static void atl1_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
@@ -3451,7 +3453,9 @@ static void atl1_get_ringparam(struct net_device *netdev,
}
static int atl1_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 969591bbc066..e5857e88c207 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1961,7 +1961,9 @@ static int b44_set_link_ksettings(struct net_device *dev,
}
static void b44_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct b44 *bp = netdev_priv(dev);
@@ -1972,7 +1974,9 @@ static void b44_get_ringparam(struct net_device *dev,
}
static int b44_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct b44 *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index b07cb9bc5f2d..4a2622b05ee1 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -635,7 +635,6 @@ static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight)
struct bcm4908_enet_dma_ring_bd *buf_desc;
struct bcm4908_enet_dma_ring_slot *slot;
struct device *dev = enet->dev;
- unsigned int bytes = 0;
int handled = 0;
while (handled < weight && tx_ring->read_idx != tx_ring->write_idx) {
@@ -646,7 +645,6 @@ static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight)
dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
dev_kfree_skb(slot->skb);
- bytes += slot->len;
if (++tx_ring->read_idx == tx_ring->length)
tx_ring->read_idx = 0;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index a568994a03a6..b04e423c446a 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1497,8 +1497,11 @@ static int bcm_enet_set_link_ksettings(struct net_device *dev,
}
}
-static void bcm_enet_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+static void
+bcm_enet_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bcm_enet_priv *priv;
@@ -1512,7 +1515,9 @@ static void bcm_enet_get_ringparam(struct net_device *dev,
}
static int bcm_enet_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bcm_enet_priv *priv;
int was_running;
@@ -2579,8 +2584,11 @@ static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
}
}
-static void bcm_enetsw_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+static void
+bcm_enetsw_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bcm_enet_priv *priv;
@@ -2595,8 +2603,11 @@ static void bcm_enetsw_get_ringparam(struct net_device *dev,
ering->tx_pending = priv->tx_ring_size;
}
-static int bcm_enetsw_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+static int
+bcm_enetsw_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bcm_enet_priv *priv;
int was_running;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index babc955ba64e..e20aafeb4ca9 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -7318,7 +7318,9 @@ static int bnx2_set_coalesce(struct net_device *dev,
}
static void
-bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnx2 *bp = netdev_priv(dev);
@@ -7389,7 +7391,9 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
}
static int
-bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnx2 *bp = netdev_priv(dev);
int rc;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 2b06d78baa08..a19dd6797070 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1850,6 +1850,14 @@ struct bnx2x {
/* Vxlan/Geneve related information */
u16 udp_tunnel_ports[BNX2X_UDP_PORT_MAX];
+
+#define FW_CAP_INVALIDATE_VF_FP_HSI BIT(0)
+ u32 fw_cap;
+
+ u32 fw_major;
+ u32 fw_minor;
+ u32 fw_rev;
+ u32 fw_eng;
};
/* Tx queues may be less or equal to Rx queues */
@@ -2525,5 +2533,6 @@ void bnx2x_register_phc(struct bnx2x *bp);
* Meant for implicit re-load flows.
*/
int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
-
+int bnx2x_init_firmware(struct bnx2x *bp);
+void bnx2x_release_firmware(struct bnx2x *bp);
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e8e8c2d593c5..8d36ebbf08e1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -25,6 +25,7 @@
#include <linux/ip.h>
#include <linux/crash_dump.h>
#include <net/tcp.h>
+#include <net/gro.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
#include <linux/prefetch.h>
@@ -2364,10 +2365,8 @@ int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
/* build my FW version dword */
- u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
- (BCM_5710_FW_MINOR_VERSION << 8) +
- (BCM_5710_FW_REVISION_VERSION << 16) +
- (BCM_5710_FW_ENGINEERING_VERSION << 24);
+ u32 my_fw = (bp->fw_major) + (bp->fw_minor << 8) +
+ (bp->fw_rev << 16) + (bp->fw_eng << 24);
/* read loaded FW from chip */
u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 472a3a478038..0e319ac7799f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1914,7 +1914,9 @@ static int bnx2x_set_coalesce(struct net_device *dev,
}
static void bnx2x_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -1938,7 +1940,9 @@ static void bnx2x_get_ringparam(struct net_device *dev,
}
static int bnx2x_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnx2x *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 3f8435208bf4..a84d015da5df 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -241,6 +241,8 @@
IRO[221].m2))
#define XSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[48].base + ((funcId) * IRO[48].m1))
+#define XSTORM_ETH_FUNCTION_INFO_FP_HSI_VALID_E2_OFFSET(fid) \
+ (IRO[386].base + ((fid) * IRO[386].m1))
#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
/* eth hsi version */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 622fadc50316..611efee75834 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -3024,7 +3024,8 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 13
-#define BCM_5710_FW_REVISION_VERSION 15
+#define BCM_5710_FW_REVISION_VERSION 21
+#define BCM_5710_FW_REVISION_VERSION_V15 15
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index aec666e97683..774c1f1a57c3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -74,9 +74,19 @@
__stringify(BCM_5710_FW_MINOR_VERSION) "." \
__stringify(BCM_5710_FW_REVISION_VERSION) "." \
__stringify(BCM_5710_FW_ENGINEERING_VERSION)
+
+#define FW_FILE_VERSION_V15 \
+ __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
+ __stringify(BCM_5710_FW_MINOR_VERSION) "." \
+ __stringify(BCM_5710_FW_REVISION_VERSION_V15) "." \
+ __stringify(BCM_5710_FW_ENGINEERING_VERSION)
+
#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E1_V15 "bnx2x/bnx2x-e1-" FW_FILE_VERSION_V15 ".fw"
+#define FW_FILE_NAME_E1H_V15 "bnx2x/bnx2x-e1h-" FW_FILE_VERSION_V15 ".fw"
+#define FW_FILE_NAME_E2_V15 "bnx2x/bnx2x-e2-" FW_FILE_VERSION_V15 ".fw"
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
@@ -747,9 +757,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
CHIP_IS_E1(bp) ? "everest1" :
CHIP_IS_E1H(bp) ? "everest1h" :
CHIP_IS_E2(bp) ? "everest2" : "everest3",
- BCM_5710_FW_MAJOR_VERSION,
- BCM_5710_FW_MINOR_VERSION,
- BCM_5710_FW_REVISION_VERSION);
+ bp->fw_major, bp->fw_minor, bp->fw_rev);
return rc;
}
@@ -12308,6 +12316,15 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bnx2x_read_fwinfo(bp);
+ if (IS_PF(bp)) {
+ rc = bnx2x_init_firmware(bp);
+
+ if (rc) {
+ bnx2x_free_mem_bp(bp);
+ return rc;
+ }
+ }
+
func = BP_FUNC(bp);
/* need to reset chip if undi was active */
@@ -12320,6 +12337,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
rc = bnx2x_prev_unload(bp);
if (rc) {
+ bnx2x_release_firmware(bp);
bnx2x_free_mem_bp(bp);
return rc;
}
@@ -13026,19 +13044,6 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_features_check = bnx2x_features_check,
};
-static int bnx2x_set_coherency_mask(struct bnx2x *bp)
-{
- struct device *dev = &bp->pdev->dev;
-
- if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
- dev_err(dev, "System does not support DMA, aborting\n");
- return -EIO;
- }
-
- return 0;
-}
-
static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
{
if (bp->flags & AER_ENABLED) {
@@ -13116,9 +13121,11 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
goto err_out_release;
}
- rc = bnx2x_set_coherency_mask(bp);
- if (rc)
+ rc = dma_set_mask_and_coherent(&bp->pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ dev_err(&bp->pdev->dev, "System does not support DMA, aborting\n");
goto err_out_release;
+ }
dev->mem_start = pci_resource_start(pdev, 0);
dev->base_addr = dev->mem_start;
@@ -13317,16 +13324,11 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
/* Check FW version */
offset = be32_to_cpu(fw_hdr->fw_version.offset);
fw_ver = firmware->data + offset;
- if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
- (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
- (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
- (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
+ if (fw_ver[0] != bp->fw_major || fw_ver[1] != bp->fw_minor ||
+ fw_ver[2] != bp->fw_rev || fw_ver[3] != bp->fw_eng) {
BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
- fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
- BCM_5710_FW_MAJOR_VERSION,
- BCM_5710_FW_MINOR_VERSION,
- BCM_5710_FW_REVISION_VERSION,
- BCM_5710_FW_ENGINEERING_VERSION);
+ fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
+ bp->fw_major, bp->fw_minor, bp->fw_rev, bp->fw_eng);
return -EINVAL;
}
@@ -13404,34 +13406,51 @@ do { \
(u8 *)bp->arr, len); \
} while (0)
-static int bnx2x_init_firmware(struct bnx2x *bp)
+int bnx2x_init_firmware(struct bnx2x *bp)
{
- const char *fw_file_name;
+ const char *fw_file_name, *fw_file_name_v15;
struct bnx2x_fw_file_hdr *fw_hdr;
int rc;
if (bp->firmware)
return 0;
- if (CHIP_IS_E1(bp))
+ if (CHIP_IS_E1(bp)) {
fw_file_name = FW_FILE_NAME_E1;
- else if (CHIP_IS_E1H(bp))
+ fw_file_name_v15 = FW_FILE_NAME_E1_V15;
+ } else if (CHIP_IS_E1H(bp)) {
fw_file_name = FW_FILE_NAME_E1H;
- else if (!CHIP_IS_E1x(bp))
+ fw_file_name_v15 = FW_FILE_NAME_E1H_V15;
+ } else if (!CHIP_IS_E1x(bp)) {
fw_file_name = FW_FILE_NAME_E2;
- else {
+ fw_file_name_v15 = FW_FILE_NAME_E2_V15;
+ } else {
BNX2X_ERR("Unsupported chip revision\n");
return -EINVAL;
}
+
BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
if (rc) {
- BNX2X_ERR("Can't load firmware file %s\n",
- fw_file_name);
- goto request_firmware_exit;
+ BNX2X_DEV_INFO("Trying to load older fw %s\n", fw_file_name_v15);
+
+ /* try to load prev version */
+ rc = request_firmware(&bp->firmware, fw_file_name_v15, &bp->pdev->dev);
+
+ if (rc)
+ goto request_firmware_exit;
+
+ bp->fw_rev = BCM_5710_FW_REVISION_VERSION_V15;
+ } else {
+ bp->fw_cap |= FW_CAP_INVALIDATE_VF_FP_HSI;
+ bp->fw_rev = BCM_5710_FW_REVISION_VERSION;
}
+ bp->fw_major = BCM_5710_FW_MAJOR_VERSION;
+ bp->fw_minor = BCM_5710_FW_MINOR_VERSION;
+ bp->fw_eng = BCM_5710_FW_ENGINEERING_VERSION;
+
rc = bnx2x_check_firmware(bp);
if (rc) {
BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
@@ -13487,7 +13506,7 @@ request_firmware_exit:
return rc;
}
-static void bnx2x_release_firmware(struct bnx2x *bp)
+void bnx2x_release_firmware(struct bnx2x *bp)
{
kfree(bp->init_ops_offsets);
kfree(bp->init_ops);
@@ -14004,6 +14023,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
return 0;
init_one_freemem:
+ bnx2x_release_firmware(bp);
bnx2x_free_mem_bp(bp);
init_one_exit:
@@ -15356,11 +15376,6 @@ static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
config.tx_type, config.rx_filter);
- if (config.flags) {
- BNX2X_ERR("config.flags is reserved for future use\n");
- return -EINVAL;
- }
-
bp->hwtstamp_ioctl_called = true;
bp->tx_type = config.tx_type;
bp->rx_filter = config.rx_filter;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 74a8931ce1d1..11d15cd03600 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -758,9 +758,18 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
{
+ u16 abs_fid;
+
+ abs_fid = FW_VF_HANDLE(abs_vfid);
+
/* set the VF-PF association in the FW */
- storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
- storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
+ storm_memset_vf_to_pf(bp, abs_fid, BP_FUNC(bp));
+ storm_memset_func_en(bp, abs_fid, 1);
+
+ /* Invalidate fp_hsi version for vfs */
+ if (bp->fw_cap & FW_CAP_INVALIDATE_VF_FP_HSI)
+ REG_WR8(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ETH_FUNCTION_INFO_FP_HSI_VALID_E2_OFFSET(abs_fid), 0);
/* clear vf errors*/
bnx2x_vf_semi_clear_err(bp, abs_vfid);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 8c2cf5519787..2dac704dc346 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -586,7 +586,7 @@ static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; }
-static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
+static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr,
u8 vf_qid, bool set) {return 0; }
static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp,
struct bnx2x_config_rss_params *params) {return 0; }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 0b193edb73b8..2bb133ae61c3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -849,7 +849,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
memcpy(old, new, sizeof(struct nig_stats));
- memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
+ BUILD_BUG_ON(sizeof(estats->shared) != sizeof(pstats->mac_stx[1]));
+ memcpy(&(estats->shared), &(pstats->mac_stx[1]),
sizeof(struct mac_stx));
estats->brb_drop_hi = pstats->brb_drop_hi;
estats->brb_drop_lo = pstats->brb_drop_lo;
@@ -1634,9 +1635,9 @@ void bnx2x_stats_init(struct bnx2x *bp)
REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
if (!CHIP_IS_E3(bp)) {
REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
- &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
+ &(bp->port.old_nig_stats.egress_mac_pkt0), 2);
REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
- &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
+ &(bp->port.old_nig_stats.egress_mac_pkt1), 2);
}
/* Prepare statistics ramrod data */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index d55e63692cf3..ae93c078707b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -36,10 +36,14 @@ struct nig_stats {
u32 pbf_octets;
u32 pbf_packet;
u32 safc_inp;
- u32 egress_mac_pkt0_lo;
- u32 egress_mac_pkt0_hi;
- u32 egress_mac_pkt1_lo;
- u32 egress_mac_pkt1_hi;
+ struct_group(egress_mac_pkt0,
+ u32 egress_mac_pkt0_lo;
+ u32 egress_mac_pkt0_hi;
+ );
+ struct_group(egress_mac_pkt1,
+ u32 egress_mac_pkt1_lo;
+ u32 egress_mac_pkt1_hi;
+ );
};
enum bnx2x_stats_event {
@@ -83,6 +87,7 @@ struct bnx2x_eth_stats {
u32 no_buff_discard_hi;
u32 no_buff_discard_lo;
+ struct_group(shared,
u32 rx_stat_ifhcinbadoctets_hi;
u32 rx_stat_ifhcinbadoctets_lo;
u32 tx_stat_ifhcoutbadoctets_hi;
@@ -159,6 +164,7 @@ struct bnx2x_eth_stats {
u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
u32 tx_stat_bmac_ufl_hi;
u32 tx_stat_bmac_ufl_lo;
+ );
u32 pause_frames_received_hi;
u32 pause_frames_received_lo;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c04ea83188e2..4f94136a011a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -37,6 +37,7 @@
#include <linux/if_bridge.h>
#include <linux/rtc.h>
#include <linux/bpf.h>
+#include <net/gro.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/udp.h>
@@ -740,13 +741,16 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
return page;
}
-static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
+static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
gfp_t gfp)
{
u8 *data;
struct pci_dev *pdev = bp->pdev;
- data = kmalloc(bp->rx_buf_size, gfp);
+ if (gfp == GFP_ATOMIC)
+ data = napi_alloc_frag(bp->rx_buf_size);
+ else
+ data = netdev_alloc_frag(bp->rx_buf_size);
if (!data)
return NULL;
@@ -755,7 +759,7 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(&pdev->dev, *mapping)) {
- kfree(data);
+ skb_free_frag(data);
data = NULL;
}
return data;
@@ -778,7 +782,7 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
rx_buf->data = page;
rx_buf->data_ptr = page_address(page) + bp->rx_offset;
} else {
- u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
+ u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
if (!data)
return -ENOMEM;
@@ -1020,11 +1024,11 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
return NULL;
}
- skb = build_skb(data, 0);
+ skb = build_skb(data, bp->rx_buf_size);
dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
if (!skb) {
- kfree(data);
+ skb_free_frag(data);
return NULL;
}
@@ -1612,7 +1616,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
u8 *new_data;
dma_addr_t new_mapping;
- new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
+ new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
if (!new_data) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
cpr->sw_stats.rx.rx_oom_discards += 1;
@@ -1623,13 +1627,13 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
tpa_info->data_ptr = new_data + bp->rx_offset;
tpa_info->mapping = new_mapping;
- skb = build_skb(data, 0);
+ skb = build_skb(data, bp->rx_buf_size);
dma_unmap_single_attrs(&bp->pdev->dev, mapping,
bp->rx_buf_use_size, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
if (!skb) {
- kfree(data);
+ skb_free_frag(data);
bnxt_abort_tpa(cpr, idx, agg_bufs);
cpr->sw_stats.rx.rx_oom_discards += 1;
return NULL;
@@ -2043,13 +2047,22 @@ static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
{
- switch (BNXT_EVENT_ERROR_REPORT_TYPE(data1)) {
+ u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
+
+ switch (err_type) {
case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
+ netdev_warn(bp->dev, "Pause Storm detected!\n");
+ break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
+ netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
+ break;
default:
- netdev_err(bp->dev, "FW reported unknown error type\n");
+ netdev_err(bp->dev, "FW reported unknown error type %u\n",
+ err_type);
break;
}
}
@@ -2073,6 +2086,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
u32 data1 = le32_to_cpu(cmpl->event_data1);
u32 data2 = le32_to_cpu(cmpl->event_data2);
+ netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
+ event_id, data1, data2);
+
/* TODO CHIMP_FW: Define event id's for link change, error etc */
switch (event_id) {
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
@@ -2419,7 +2435,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (event & BNXT_REDIRECT_EVENT)
- xdp_do_flush_map();
+ xdp_do_flush();
if (event & BNXT_TX_EVENT) {
struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
@@ -2622,6 +2638,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
{
struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_cp_ring_info *cpr_rx;
u32 raw_cons = cpr->cp_raw_cons;
struct bnxt *bp = bnapi->bp;
struct nqe_cn *nqcmp;
@@ -2649,7 +2666,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
if (napi_complete_done(napi, work_done))
BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
cpr->cp_raw_cons);
- return work_done;
+ goto poll_done;
}
/* The valid test of the entry must be done first before
@@ -2675,6 +2692,17 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
cpr->cp_raw_cons = raw_cons;
BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
}
+poll_done:
+ cpr_rx = cpr->cp_ring_arr[BNXT_RX_HDL];
+ if (cpr_rx && (bp->flags & BNXT_FLAG_DIM)) {
+ struct dim_sample dim_sample = {};
+
+ dim_update_sample(cpr->event_ctr,
+ cpr_rx->rx_packets,
+ cpr_rx->rx_bytes,
+ &dim_sample);
+ net_dim(&cpr->dim, dim_sample);
+ }
return work_done;
}
@@ -2774,7 +2802,7 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
tpa_info->data = NULL;
- kfree(data);
+ skb_free_frag(data);
}
skip_rx_tpa_free:
@@ -2800,7 +2828,7 @@ skip_rx_tpa_free:
dma_unmap_single_attrs(&pdev->dev, mapping,
bp->rx_buf_use_size, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
- kfree(data);
+ skb_free_frag(data);
}
}
@@ -3504,7 +3532,7 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
u8 *data;
for (i = 0; i < bp->max_tpa; i++) {
- data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
+ data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -6474,8 +6502,8 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
{
struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
+ u16 val, tmr, max, flags = hw_coal->flags;
u32 cmpl_params = coal_cap->cmpl_params;
- u16 val, tmr, max, flags = 0;
max = hw_coal->bufs_per_record * 128;
if (hw_coal->budget)
@@ -6518,8 +6546,6 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
}
- if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
- flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
@@ -7668,19 +7694,6 @@ static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
BNXT_FW_HEALTH_WIN_MAP_OFF);
}
-bool bnxt_is_fw_healthy(struct bnxt *bp)
-{
- if (bp->fw_health && bp->fw_health->status_reliable) {
- u32 fw_status;
-
- fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
- if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status))
- return false;
- }
-
- return true;
-}
-
static void bnxt_inv_fw_health_reg(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -8008,6 +8021,12 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
if (!bp->hwrm_cmd_timeout)
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
+ bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
+ if (!bp->hwrm_cmd_max_timeout)
+ bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
+ else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
+ netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
+ bp->hwrm_cmd_max_timeout / 1000);
if (resp->hwrm_intf_maj_8b >= 1) {
bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
@@ -8611,7 +8630,10 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
/* Filter for default vnic 0 */
rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
if (rc) {
- netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
+ if (BNXT_VF(bp) && rc == -ENODEV)
+ netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
+ else
+ netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
goto err_out;
}
vnic->uc_filter_count = 1;
@@ -9404,6 +9426,10 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
rc = hwrm_req_send(bp, req);
if (rc) {
hwrm_req_drop(bp, req);
+ if (BNXT_VF(bp) && rc == -ENODEV) {
+ netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
+ rc = 0;
+ }
return rc;
}
@@ -10802,12 +10828,21 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
if (rc) {
- netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
- rc);
+ if (BNXT_VF(bp) && rc == -ENODEV) {
+ if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
+ netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
+ else
+ netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
+ rc = 0;
+ } else {
+ netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
+ }
vnic->uc_filter_count = i;
return rc;
}
}
+ if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
+ netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
skip_uc:
if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
@@ -11372,6 +11407,11 @@ static void bnxt_timer(struct timer_list *t)
}
}
+ if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) {
+ set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
+ bnxt_queue_sp_work(bp);
+ }
+
if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
netif_carrier_ok(dev)) {
set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
@@ -11875,7 +11915,13 @@ static void bnxt_cleanup_pci(struct bnxt *bp)
static void bnxt_init_dflt_coal(struct bnxt *bp)
{
+ struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
struct bnxt_coal *coal;
+ u16 flags = 0;
+
+ if (coal_cap->cmpl_params &
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
+ flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
/* Tick values in micro seconds.
* 1 coal_buf x bufs_per_record = 1 completion record.
@@ -11888,6 +11934,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
coal->idle_thresh = 50;
coal->bufs_per_record = 2;
coal->budget = 64; /* NAPI budget */
+ coal->flags = flags;
coal = &bp->tx_coal;
coal->coal_ticks = 28;
@@ -11895,6 +11942,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
coal->coal_ticks_irq = 2;
coal->coal_bufs_irq = 2;
coal->bufs_per_record = 1;
+ coal->flags = flags;
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
}
@@ -12381,8 +12429,6 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
- bnxt_init_dflt_coal(bp);
-
timer_setup(&bp->timer, bnxt_timer, 0);
bp->current_interval = BNXT_TIMER_INTERVAL;
@@ -13072,7 +13118,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
rc = __bnxt_reserve_rings(bp);
- if (rc)
+ if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "Unable to reserve tx rings\n");
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
if (sh)
@@ -13081,7 +13127,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
/* Rings may have been trimmed, re-reserve the trimmed rings. */
if (bnxt_need_reserve_rings(bp)) {
rc = __bnxt_reserve_rings(bp);
- if (rc)
+ if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "2nd rings reservation failed.\n");
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
}
@@ -13107,7 +13153,10 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
bnxt_clear_int_mode(bp);
rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
- netdev_err(bp->dev, "Not enough rings available.\n");
+ if (BNXT_VF(bp) && rc == -ENODEV)
+ netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
+ else
+ netdev_err(bp->dev, "Not enough rings available.\n");
goto init_dflt_ring_err;
}
rc = bnxt_init_int_mode(bp);
@@ -13395,13 +13444,19 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_set_ring_params(bp);
rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
- netdev_err(bp->dev, "Not enough rings available.\n");
- rc = -ENOMEM;
+ if (BNXT_VF(bp) && rc == -ENODEV) {
+ netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
+ } else {
+ netdev_err(bp->dev, "Not enough rings available.\n");
+ rc = -ENOMEM;
+ }
goto init_err_pci_clean;
}
bnxt_fw_init_one_p3(bp);
+ bnxt_init_dflt_coal(bp);
+
if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
bp->flags |= BNXT_FLAG_STRIP_VLAN;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 4c9507d82fd0..440dfeb4948b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -847,6 +847,7 @@ struct bnxt_coal {
u16 idle_thresh;
u8 bufs_per_record;
u8 budget;
+ u16 flags;
};
struct bnxt_tpa_info {
@@ -1915,6 +1916,7 @@ struct bnxt {
#define BNXT_STATE_DRV_REGISTERED 7
#define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8
#define BNXT_STATE_NAPI_DISABLED 9
+#define BNXT_STATE_L2_FILTER_RETRY 10
#define BNXT_STATE_FW_ACTIVATE 11
#define BNXT_STATE_RECOVER 12
#define BNXT_STATE_FW_NON_FATAL_COND 13
@@ -1985,7 +1987,8 @@ struct bnxt {
u16 hwrm_max_req_len;
u16 hwrm_max_ext_req_len;
- int hwrm_cmd_timeout;
+ unsigned int hwrm_cmd_timeout;
+ unsigned int hwrm_cmd_max_timeout;
struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
struct hwrm_ver_get_output ver_resp;
#define FW_VER_STR_LEN 32
@@ -2302,7 +2305,6 @@ int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset);
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
-bool bnxt_is_fw_healthy(struct bnxt *bp);
int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index d3cb2f21946d..c06789882036 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -32,7 +32,7 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
return -ENOMEM;
}
- hwrm_req_timeout(bp, msg, HWRM_COREDUMP_TIMEOUT);
+ hwrm_req_timeout(bp, msg, bp->hwrm_cmd_max_timeout);
cmn_resp = hwrm_req_hold(bp, msg);
resp = cmn_resp;
@@ -125,7 +125,7 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
if (rc)
return rc;
- hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT);
+ hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
req->component_id = cpu_to_le16(component_id);
req->segment_id = cpu_to_le16(segment_id);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 951c4c569a9b..4da31b1b84f9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -9,6 +9,7 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
#include <net/devlink.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8188d55722e4..003330e8cd58 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -31,9 +31,6 @@
#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
#include "bnxt_coredump.h"
-#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
-#define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
-#define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
static u32 bnxt_get_msglevel(struct net_device *dev)
{
@@ -68,6 +65,9 @@ static int bnxt_get_coalesce(struct net_device *dev,
coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
+ if (hw_coal->flags &
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
+ kernel_coal->use_cqe_mode_rx = true;
hw_coal = &bp->tx_coal;
mult = hw_coal->bufs_per_record;
@@ -75,6 +75,9 @@ static int bnxt_get_coalesce(struct net_device *dev,
coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
+ if (hw_coal->flags &
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
+ kernel_coal->use_cqe_mode_tx = true;
coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
@@ -101,12 +104,22 @@ static int bnxt_set_coalesce(struct net_device *dev,
}
}
+ if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
+ !(bp->coal_cap.cmpl_params &
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET))
+ return -EOPNOTSUPP;
+
hw_coal = &bp->rx_coal;
mult = hw_coal->bufs_per_record;
hw_coal->coal_ticks = coal->rx_coalesce_usecs;
hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
+ hw_coal->flags &=
+ ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+ if (kernel_coal->use_cqe_mode_rx)
+ hw_coal->flags |=
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
hw_coal = &bp->tx_coal;
mult = hw_coal->bufs_per_record;
@@ -114,6 +127,11 @@ static int bnxt_set_coalesce(struct net_device *dev,
hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
+ hw_coal->flags &=
+ ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+ if (kernel_coal->use_cqe_mode_tx)
+ hw_coal->flags |=
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
u32 stats_ticks = coal->stats_block_coalesce_usecs;
@@ -775,7 +793,9 @@ skip_tpa_stats:
}
static void bnxt_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
@@ -794,7 +814,9 @@ static void bnxt_get_ringparam(struct net_device *dev,
}
static int bnxt_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
@@ -2169,7 +2191,7 @@ static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
req->host_src_addr = cpu_to_le64(dma_handle);
}
- hwrm_req_timeout(bp, req, FLASH_NVRAM_TIMEOUT);
+ hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
req->dir_type = cpu_to_le16(dir_type);
req->dir_ordinal = cpu_to_le16(dir_ordinal);
req->dir_ext = cpu_to_le16(dir_ext);
@@ -2515,8 +2537,8 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
return rc;
}
- hwrm_req_timeout(bp, modify, FLASH_PACKAGE_TIMEOUT);
- hwrm_req_timeout(bp, install, INSTALL_PACKAGE_TIMEOUT);
+ hwrm_req_timeout(bp, modify, bp->hwrm_cmd_max_timeout);
+ hwrm_req_timeout(bp, install, bp->hwrm_cmd_max_timeout);
hwrm_req_hold(bp, modify);
modify->host_src_addr = cpu_to_le64(dma_handle);
@@ -3917,7 +3939,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
ETHTOOL_COALESCE_USECS_IRQ |
ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
ETHTOOL_COALESCE_STATS_BLOCK_USECS |
- ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
+ ETHTOOL_COALESCE_USE_CQE,
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
.get_fec_stats = bnxt_get_fec_stats,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
index bb7327b82d0b..566c9487ef55 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -359,6 +359,8 @@ static int __hwrm_to_stderr(u32 hwrm_err)
return -EAGAIN;
case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
return -EOPNOTSUPP;
+ case HWRM_ERR_CODE_PF_UNAVAILABLE:
+ return -ENODEV;
default:
return -EIO;
}
@@ -416,6 +418,44 @@ hwrm_update_token(struct bnxt *bp, u16 seq_id, enum bnxt_hwrm_wait_state state)
netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
}
+static void hwrm_req_dbg(struct bnxt *bp, struct input *req)
+{
+ u32 ring = le16_to_cpu(req->cmpl_ring);
+ u32 type = le16_to_cpu(req->req_type);
+ u32 tgt = le16_to_cpu(req->target_id);
+ u32 seq = le16_to_cpu(req->seq_id);
+ char opt[32] = "\n";
+
+ if (unlikely(ring != (u16)BNXT_HWRM_NO_CMPL_RING))
+ snprintf(opt, 16, " ring %d\n", ring);
+
+ if (unlikely(tgt != BNXT_HWRM_TARGET))
+ snprintf(opt + strlen(opt) - 1, 16, " tgt 0x%x\n", tgt);
+
+ netdev_dbg(bp->dev, "sent hwrm req_type 0x%x seq id 0x%x%s",
+ type, seq, opt);
+}
+
+#define hwrm_err(bp, ctx, fmt, ...) \
+ do { \
+ if ((ctx)->flags & BNXT_HWRM_CTX_SILENT) \
+ netdev_dbg((bp)->dev, fmt, __VA_ARGS__); \
+ else \
+ netdev_err((bp)->dev, fmt, __VA_ARGS__); \
+ } while (0)
+
+static bool hwrm_wait_must_abort(struct bnxt *bp, u32 req_type, u32 *fw_status)
+{
+ if (req_type == HWRM_VER_GET)
+ return false;
+
+ if (!bp->fw_health || !bp->fw_health->status_reliable)
+ return false;
+
+ *fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ return *fw_status && !BNXT_FW_IS_HEALTHY(*fw_status);
+}
+
static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
{
u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
@@ -427,8 +467,8 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
unsigned int i, timeout, tmo_count;
u32 *data = (u32 *)ctx->req;
u32 msg_len = ctx->req_len;
+ u32 req_type, sts;
int rc = -EBUSY;
- u32 req_type;
u16 len = 0;
u8 *valid;
@@ -436,8 +476,11 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
memset(ctx->resp, 0, PAGE_SIZE);
req_type = le16_to_cpu(ctx->req->req_type);
- if (BNXT_NO_FW_ACCESS(bp) && req_type != HWRM_FUNC_RESET)
+ if (BNXT_NO_FW_ACCESS(bp) && req_type != HWRM_FUNC_RESET) {
+ netdev_dbg(bp->dev, "hwrm req_type 0x%x skipped, FW channel down\n",
+ req_type);
goto exit;
+ }
if (msg_len > BNXT_HWRM_MAX_REQ_LEN &&
msg_len > bp->hwrm_max_ext_req_len) {
@@ -490,13 +533,15 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
/* Ring channel doorbell */
writel(1, bp->bar0 + doorbell_offset);
+ hwrm_req_dbg(bp, ctx->req);
+
if (!pci_is_enabled(bp->pdev)) {
rc = -ENODEV;
goto exit;
}
/* Limit timeout to an upper limit */
- timeout = min_t(uint, ctx->timeout, HWRM_CMD_MAX_TIMEOUT);
+ timeout = min(ctx->timeout, bp->hwrm_cmd_max_timeout ?: HWRM_CMD_MAX_TIMEOUT);
/* convert timeout to usec */
timeout *= 1000;
@@ -523,17 +568,19 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
HWRM_SHORT_MAX_TIMEOUT);
} else {
- if (HWRM_WAIT_MUST_ABORT(bp, ctx))
- break;
+ if (hwrm_wait_must_abort(bp, req_type, &sts)) {
+ hwrm_err(bp, ctx, "Resp cmpl intr abandoning msg: 0x%x due to firmware status: 0x%x\n",
+ req_type, sts);
+ goto exit;
+ }
usleep_range(HWRM_MIN_TIMEOUT,
HWRM_MAX_TIMEOUT);
}
}
if (READ_ONCE(token->state) != BNXT_HWRM_COMPLETE) {
- if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
- netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
- le16_to_cpu(ctx->req->req_type));
+ hwrm_err(bp, ctx, "Resp cmpl intr err msg: 0x%x\n",
+ req_type);
goto exit;
}
len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
@@ -565,7 +612,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
if (resp_seq != seen_out_of_seq) {
netdev_warn(bp->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n",
le16_to_cpu(resp_seq),
- le16_to_cpu(ctx->req->req_type),
+ req_type,
le16_to_cpu(ctx->req->seq_id));
seen_out_of_seq = resp_seq;
}
@@ -576,20 +623,22 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
HWRM_SHORT_MAX_TIMEOUT);
} else {
- if (HWRM_WAIT_MUST_ABORT(bp, ctx))
- goto timeout_abort;
+ if (hwrm_wait_must_abort(bp, req_type, &sts)) {
+ hwrm_err(bp, ctx, "Abandoning msg {0x%x 0x%x} len: %d due to firmware status: 0x%x\n",
+ req_type,
+ le16_to_cpu(ctx->req->seq_id),
+ len, sts);
+ goto exit;
+ }
usleep_range(HWRM_MIN_TIMEOUT,
HWRM_MAX_TIMEOUT);
}
}
if (i >= tmo_count) {
-timeout_abort:
- if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
- netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n",
- hwrm_total_timeout(i),
- le16_to_cpu(ctx->req->req_type),
- le16_to_cpu(ctx->req->seq_id), len);
+ hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n",
+ hwrm_total_timeout(i), req_type,
+ le16_to_cpu(ctx->req->seq_id), len);
goto exit;
}
@@ -604,12 +653,9 @@ timeout_abort:
}
if (j >= HWRM_VALID_BIT_DELAY_USEC) {
- if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
- netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
- hwrm_total_timeout(i),
- le16_to_cpu(ctx->req->req_type),
- le16_to_cpu(ctx->req->seq_id), len,
- *valid);
+ hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
+ hwrm_total_timeout(i), req_type,
+ le16_to_cpu(ctx->req->seq_id), len, *valid);
goto exit;
}
}
@@ -620,11 +666,12 @@ timeout_abort:
*/
*valid = 0;
rc = le16_to_cpu(ctx->resp->error_code);
- if (rc && !(ctx->flags & BNXT_HWRM_CTX_SILENT)) {
- netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
- le16_to_cpu(ctx->resp->req_type),
- le16_to_cpu(ctx->resp->seq_id), rc);
- }
+ if (rc == HWRM_ERR_CODE_BUSY && !(ctx->flags & BNXT_HWRM_CTX_SILENT))
+ netdev_warn(bp->dev, "FW returned busy, hwrm req_type 0x%x\n",
+ req_type);
+ else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
+ hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
+ req_type, token->seq_id, rc);
rc = __hwrm_to_stderr(rc);
exit:
if (token)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
index 4d17f0d5363b..d52bd2d63aec 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
@@ -58,11 +58,10 @@ void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s);
#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
-#define HWRM_CMD_MAX_TIMEOUT 40000
+#define HWRM_CMD_MAX_TIMEOUT 40000U
#define SHORT_HWRM_CMD_TIMEOUT 20
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
-#define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12)
#define BNXT_HWRM_TARGET 0xffff
#define BNXT_HWRM_NO_CMPL_RING -1
#define BNXT_HWRM_REQ_MAX_SIZE 128
@@ -83,10 +82,6 @@ void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s);
#define HWRM_MIN_TIMEOUT 25
#define HWRM_MAX_TIMEOUT 40
-#define HWRM_WAIT_MUST_ABORT(bp, ctx) \
- (le16_to_cpu((ctx)->req->req_type) != HWRM_VER_GET && \
- !bnxt_is_fw_healthy(bp))
-
static inline unsigned int hwrm_total_timeout(unsigned int n)
{
return n <= HWRM_SHORT_TIMEOUT_COUNTER ? n * HWRM_SHORT_MIN_TIMEOUT :
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 8388be119f9a..48520967746f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -417,9 +417,6 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
return -EFAULT;
- if (stmpconf.flags)
- return -EINVAL;
-
if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
stmpconf.tx_type != HWTSTAMP_TX_OFF)
return -ERANGE;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 1471b6130a2b..d8afcf8d6b30 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -1962,7 +1962,7 @@ static int bnxt_tc_setup_indr_cb(struct net_device *netdev, struct Qdisc *sch, v
void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
{
- if (!bnxt_is_netdev_indr_offload(netdev))
+ if (!netdev || !bnxt_is_netdev_indr_offload(netdev))
return -EOPNOTSUPP;
switch (type) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index c8083df5e0ab..52fad0fdeacf 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -195,7 +195,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
*event |= BNXT_REDIRECT_EVENT;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(bp->dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(bp->dev, xdp_prog, act);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 226f4403cfed..87f1056e29ff 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -4020,10 +4020,12 @@ static int bcmgenet_probe(struct platform_device *pdev)
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = true;
- err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
- dev->name, priv);
- if (!err)
- device_set_wakeup_capable(&pdev->dev, 1);
+ if (priv->wol_irq > 0) {
+ err = devm_request_irq(&pdev->dev, priv->wol_irq,
+ bcmgenet_wol_isr, 0, dev->name, priv);
+ if (!err)
+ device_set_wakeup_capable(&pdev->dev, 1);
+ }
/* Set the needed headroom to account for any possible
* features enabling/disabling at runtime
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index f38f40eb966e..a1a38456c9a3 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2183,9 +2183,7 @@ static int sbmac_init(struct platform_device *pldev, long long base)
ea_reg >>= 8;
}
- for (i = 0; i < 6; i++) {
- dev->dev_addr[i] = eaddr[i];
- }
+ eth_hw_addr_set(dev, eaddr);
/*
* Initialize context (get pointers to registers and stuff), then
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 85ca3909859d..c28f8cc00d1c 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12390,7 +12390,10 @@ static int tg3_nway_reset(struct net_device *dev)
return r;
}
-static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+static void tg3_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct tg3 *tp = netdev_priv(dev);
@@ -12411,7 +12414,10 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
ering->tx_pending = tp->napi[0].tx_pending;
}
-static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+static int tg3_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct tg3 *tp = netdev_priv(dev);
int i, irq_sync = 0, err = 0;
@@ -13800,9 +13806,6 @@ static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
return -EFAULT;
- if (stmpconf.flags)
- return -EINVAL;
-
if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
stmpconf.tx_type != HWTSTAMP_TX_OFF)
return -ERANGE;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index bbdc829c3524..f1d2c4cd5da2 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3421,7 +3421,7 @@ static const struct net_device_ops bnad_netdev_ops = {
};
static void
-bnad_netdev_init(struct bnad *bnad, bool using_dac)
+bnad_netdev_init(struct bnad *bnad)
{
struct net_device *netdev = bnad->netdev;
@@ -3434,10 +3434,8 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
- netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
-
- if (using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HIGHDMA;
netdev->mem_start = bnad->mmio_start;
netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
@@ -3544,8 +3542,7 @@ bnad_lock_uninit(struct bnad *bnad)
/* PCI Initialization */
static int
-bnad_pci_init(struct bnad *bnad,
- struct pci_dev *pdev, bool *using_dac)
+bnad_pci_init(struct bnad *bnad, struct pci_dev *pdev)
{
int err;
@@ -3555,14 +3552,9 @@ bnad_pci_init(struct bnad *bnad,
err = pci_request_regions(pdev, BNAD_NAME);
if (err)
goto disable_device;
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- *using_dac = true;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err)
- goto release_regions;
- *using_dac = false;
- }
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err)
+ goto release_regions;
pci_set_master(pdev);
return 0;
@@ -3585,7 +3577,6 @@ static int
bnad_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pcidev_id)
{
- bool using_dac;
int err;
struct bnad *bnad;
struct bna *bna;
@@ -3615,13 +3606,8 @@ bnad_pci_probe(struct pci_dev *pdev,
bnad->id = atomic_inc_return(&bna_id) - 1;
mutex_lock(&bnad->conf_mutex);
- /*
- * PCI initialization
- * Output : using_dac = 1 for 64 bit DMA
- * = 0 for 32 bit DMA
- */
- using_dac = false;
- err = bnad_pci_init(bnad, pdev, &using_dac);
+ /* PCI initialization */
+ err = bnad_pci_init(bnad, pdev);
if (err)
goto unlock_mutex;
@@ -3634,7 +3620,7 @@ bnad_pci_probe(struct pci_dev *pdev,
goto pci_uninit;
/* Initialize netdev structure, set up ethtool ops */
- bnad_netdev_init(bnad, using_dac);
+ bnad_netdev_init(bnad);
/* Set link to down state */
netif_carrier_off(netdev);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 391b85f25141..8aca768571b2 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -235,13 +235,18 @@ static int
bnad_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
- u32 supported, advertising;
-
- supported = SUPPORTED_10000baseT_Full;
- advertising = ADVERTISED_10000baseT_Full;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseLR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseLR_Full);
cmd->base.autoneg = AUTONEG_DISABLE;
- supported |= SUPPORTED_FIBRE;
- advertising |= ADVERTISED_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
cmd->base.port = PORT_FIBRE;
cmd->base.phy_address = 0;
@@ -253,11 +258,6 @@ bnad_get_link_ksettings(struct net_device *netdev,
cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
-
return 0;
}
@@ -405,7 +405,9 @@ static int bnad_set_coalesce(struct net_device *netdev,
static void
bnad_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ringparam)
+ struct ethtool_ringparam *ringparam,
+ struct kernel_ethtool_ringparam *kernel_ringparam,
+ struct netlink_ext_ack *extack)
{
struct bnad *bnad = netdev_priv(netdev);
@@ -418,7 +420,9 @@ bnad_get_ringparam(struct net_device *netdev,
static int
bnad_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ringparam)
+ struct ethtool_ringparam *ringparam,
+ struct kernel_ethtool_ringparam *kernel_ringparam,
+ struct netlink_ext_ack *extack)
{
int i, current_err, err = 0;
struct bnad *bnad = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 5620b97b3482..9ddbee7de72b 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -1271,7 +1271,8 @@ struct macb {
struct mii_bus *mii_bus;
struct phylink *phylink;
struct phylink_config phylink_config;
- struct phylink_pcs phylink_pcs;
+ struct phylink_pcs phylink_usx_pcs;
+ struct phylink_pcs phylink_sgmii_pcs;
u32 caps;
unsigned int dma_burst_length;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index ffce528aa00e..a363da928e8b 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -506,79 +506,11 @@ static void macb_set_tx_clk(struct macb *bp, int speed)
netdev_err(bp->dev, "adjusting tx_clk failed.\n");
}
-static void macb_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct net_device *ndev = to_net_dev(config->dev);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- struct macb *bp = netdev_priv(ndev);
-
- /* We only support MII, RMII, GMII, RGMII & SGMII. */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_RMII &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_SGMII &&
- state->interface != PHY_INTERFACE_MODE_10GBASER &&
- !phy_interface_mode_is_rgmii(state->interface)) {
- linkmode_zero(supported);
- return;
- }
-
- if (!macb_is_gem(bp) &&
- (state->interface == PHY_INTERFACE_MODE_GMII ||
- phy_interface_mode_is_rgmii(state->interface))) {
- linkmode_zero(supported);
- return;
- }
-
- if (state->interface == PHY_INTERFACE_MODE_10GBASER &&
- !(bp->caps & MACB_CAPS_HIGH_SPEED &&
- bp->caps & MACB_CAPS_PCS)) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Asym_Pause);
-
- if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
- (state->interface == PHY_INTERFACE_MODE_NA ||
- state->interface == PHY_INTERFACE_MODE_10GBASER)) {
- phylink_set_10g_modes(mask);
- phylink_set(mask, 10000baseKR_Full);
- if (state->interface != PHY_INTERFACE_MODE_NA)
- goto out;
- }
-
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
- (state->interface == PHY_INTERFACE_MODE_NA ||
- state->interface == PHY_INTERFACE_MODE_GMII ||
- state->interface == PHY_INTERFACE_MODE_SGMII ||
- phy_interface_mode_is_rgmii(state->interface))) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
-
- if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
- phylink_set(mask, 1000baseT_Half);
- }
-out:
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
phy_interface_t interface, int speed,
int duplex)
{
- struct macb *bp = container_of(pcs, struct macb, phylink_pcs);
+ struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
u32 config;
config = gem_readl(bp, USX_CONTROL);
@@ -592,7 +524,7 @@ static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
static void macb_usx_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
- struct macb *bp = container_of(pcs, struct macb, phylink_pcs);
+ struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
u32 val;
state->speed = SPEED_10000;
@@ -612,7 +544,7 @@ static int macb_usx_pcs_config(struct phylink_pcs *pcs,
const unsigned long *advertising,
bool permit_pause_to_mac)
{
- struct macb *bp = container_of(pcs, struct macb, phylink_pcs);
+ struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) |
GEM_BIT(SIGNAL_OK));
@@ -795,28 +727,23 @@ static void macb_mac_link_up(struct phylink_config *config,
netif_tx_wake_all_queues(ndev);
}
-static int macb_mac_prepare(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface)
+static struct phylink_pcs *macb_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
{
struct net_device *ndev = to_net_dev(config->dev);
struct macb *bp = netdev_priv(ndev);
if (interface == PHY_INTERFACE_MODE_10GBASER)
- bp->phylink_pcs.ops = &macb_phylink_usx_pcs_ops;
+ return &bp->phylink_usx_pcs;
else if (interface == PHY_INTERFACE_MODE_SGMII)
- bp->phylink_pcs.ops = &macb_phylink_pcs_ops;
+ return &bp->phylink_sgmii_pcs;
else
- bp->phylink_pcs.ops = NULL;
-
- if (bp->phylink_pcs.ops)
- phylink_set_pcs(bp->phylink, &bp->phylink_pcs);
-
- return 0;
+ return NULL;
}
static const struct phylink_mac_ops macb_phylink_ops = {
- .validate = macb_validate,
- .mac_prepare = macb_mac_prepare,
+ .validate = phylink_generic_validate,
+ .mac_select_pcs = macb_mac_select_pcs,
.mac_config = macb_mac_config,
.mac_link_down = macb_mac_link_down,
.mac_link_up = macb_mac_link_up,
@@ -874,6 +801,9 @@ static int macb_mii_probe(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
+ bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops;
+ bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops;
+
bp->phylink_config.dev = &dev->dev;
bp->phylink_config.type = PHYLINK_NETDEV;
@@ -882,6 +812,35 @@ static int macb_mii_probe(struct net_device *dev)
bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state;
}
+ bp->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
+ MAC_10 | MAC_100;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ bp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ bp->phylink_config.supported_interfaces);
+
+ /* Determine what modes are supported */
+ if (macb_is_gem(bp) && (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)) {
+ bp->phylink_config.mac_capabilities |= MAC_1000FD;
+ if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
+ bp->phylink_config.mac_capabilities |= MAC_1000HD;
+
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ bp->phylink_config.supported_interfaces);
+ phy_interface_set_rgmii(bp->phylink_config.supported_interfaces);
+
+ if (bp->caps & MACB_CAPS_PCS)
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ bp->phylink_config.supported_interfaces);
+
+ if (bp->caps & MACB_CAPS_HIGH_SPEED) {
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ bp->phylink_config.supported_interfaces);
+ bp->phylink_config.mac_capabilities |= MAC_10000FD;
+ }
+ }
+
bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
bp->phy_interface, &macb_phylink_ops);
if (IS_ERR(bp->phylink)) {
@@ -3101,7 +3060,9 @@ static int macb_set_link_ksettings(struct net_device *netdev,
}
static void macb_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct macb *bp = netdev_priv(netdev);
@@ -3113,7 +3074,9 @@ static void macb_get_ringparam(struct net_device *netdev,
}
static int macb_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct macb *bp = netdev_priv(netdev);
u32 new_rx_size, new_tx_size;
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index 095c5a2144a7..fb6b27f46b15 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -464,10 +464,6 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
sizeof(*tstamp_config)))
return -EFAULT;
- /* reserved for future extensions */
- if (tstamp_config->flags)
- return -EINVAL;
-
switch (tstamp_config->tx_type) {
case HWTSTAMP_TX_OFF:
break;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 2b9747867d4c..2c10ae3f7fc1 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -947,7 +947,9 @@ static int lio_set_phys_id(struct net_device *netdev,
static void
lio_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
@@ -1252,8 +1254,11 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
return 0;
}
-static int lio_ethtool_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+static int
+lio_ethtool_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
u32 rx_count, tx_count, rx_count_old, tx_count_old;
struct lio *lio = GET_LIO(netdev);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 12eee2bc7f5c..ba28aa444e5a 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -2114,9 +2114,6 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
return -EFAULT;
- if (conf.flags)
- return -EINVAL;
-
switch (conf.tx_type) {
case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_OFF:
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index c607756b731f..568f211d91cc 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1254,9 +1254,6 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
return -EFAULT;
- if (conf.flags)
- return -EINVAL;
-
switch (conf.tx_type) {
case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_OFF:
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 4e39d712e121..103591dcea1c 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -548,7 +548,7 @@ struct octeon_mgmt_cam_state {
};
static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
- unsigned char *addr)
+ const unsigned char *addr)
{
int i;
@@ -702,9 +702,6 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
return -EFAULT;
- if (config.flags) /* reserved for future extensions */
- return -EINVAL;
-
/* Check the status of hardware for tiemstamps */
if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
/* Get the current state of the PTP clock */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 7f2882109b16..5a9fad61e9ea 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -467,7 +467,9 @@ static int nicvf_get_coalesce(struct net_device *netdev,
}
static void nicvf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
@@ -479,7 +481,9 @@ static void nicvf_get_ringparam(struct net_device *netdev,
}
static int nicvf_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index bb45d5df2856..a04aa206fddc 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -590,7 +590,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
return true;
default:
- bpf_warn_invalid_xdp_action(action);
+ bpf_warn_invalid_xdp_action(nic->netdev, prog, action);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(nic->netdev, prog, action);
@@ -1917,10 +1917,6 @@ static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 50bbe79fb93d..4367edbdd579 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -10,6 +10,7 @@
#include <linux/iommu.h>
#include <net/ip.h>
#include <net/tso.h>
+#include <uapi/linux/bpf.h>
#include "nic_reg.h"
#include "nic.h"
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 609820e214a3..f4054d2553ea 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -710,7 +710,9 @@ static int set_pauseparam(struct net_device *dev,
return 0;
}
-static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
struct adapter *adapter = dev->ml_priv;
int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
@@ -724,7 +726,9 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e->tx_pending = adapter->params.sge.cmdQ_size[0];
}
-static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
struct adapter *adapter = dev->ml_priv;
int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
@@ -940,11 +944,11 @@ static const struct net_device_ops cxgb_netdev_ops = {
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- int i, err, pci_using_dac = 0;
unsigned long mmio_start, mmio_len;
const struct board_info *bi;
struct adapter *adapter = NULL;
struct port_info *pi;
+ int i, err;
err = pci_enable_device(pdev);
if (err)
@@ -957,17 +961,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_pdev;
}
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
-
- if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- pr_err("%s: unable to obtain 64-bit DMA for coherent allocations\n",
- pci_name(pdev));
- err = -ENODEV;
- goto out_disable_pdev;
- }
-
- } else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
goto out_disable_pdev;
}
@@ -1039,10 +1034,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_RXCSUM;
netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_RXCSUM | NETIF_F_LLTX;
+ NETIF_F_RXCSUM | NETIF_F_LLTX | NETIF_F_HIGHDMA;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
if (vlan_tso_capable(adapter)) {
netdev->features |=
NETIF_F_HW_VLAN_CTAG_TX |
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index cda01f22c71c..12e76fd0ae91 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1359,7 +1359,7 @@ static void restart_sched(struct tasklet_struct *t)
* @fl: the free list that contains the packet buffer
* @len: the packet length
*
- * Process an ingress ethernet pakcet and deliver it to the stack.
+ * Process an ingress ethernet packet and deliver it to the stack.
*/
static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
{
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index bfffcaeee624..63521312cb90 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1948,7 +1948,9 @@ static int set_pauseparam(struct net_device *dev,
return 0;
}
-static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -1964,7 +1966,9 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e->tx_pending = q->txq_size[0];
}
-static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -3200,7 +3204,7 @@ static void cxgb3_init_iscsi_mac(struct net_device *dev)
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- int i, err, pci_using_dac = 0;
+ int i, err;
resource_size_t mmio_start, mmio_len;
const struct adapter_info *ai;
struct adapter *adapter = NULL;
@@ -3227,9 +3231,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_device;
}
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
- } else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
dev_err(&pdev->dev, "no usable DMA configuration\n");
goto out_release_regions;
}
@@ -3305,8 +3308,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->features |= netdev->hw_features |
NETIF_F_HW_VLAN_CTAG_TX;
netdev->vlan_features |= netdev->features & VLAN_FEAT;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->features |= NETIF_F_HIGHDMA;
netdev->netdev_ops = &cxgb_netdev_ops;
netdev->ethtool_ops = &cxgb_ethtool_ops;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index c3afec1041f8..62dfbdd33365 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -126,8 +126,10 @@ struct rsp_desc { /* response queue descriptor */
struct rss_header rss_hdr;
__be32 flags;
__be32 len_cq;
- u8 imm_data[47];
- u8 intr_gen;
+ struct_group(immediate,
+ u8 imm_data[47];
+ u8 intr_gen;
+ );
};
/*
@@ -925,7 +927,8 @@ static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
if (skb) {
__skb_put(skb, IMMED_PKT_SIZE);
- skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
+ BUILD_BUG_ON(IMMED_PKT_SIZE != sizeof(resp->immediate));
+ skb_copy_to_linear_data(skb, &resp->immediate, IMMED_PKT_SIZE);
}
return skb;
}
@@ -1953,7 +1956,7 @@ static int ofld_poll(struct napi_struct *napi, int budget)
* @rx_gather: a gather list of packets if we are building a bundle
* @gather_idx: index of the next available slot in the bundle
*
- * Process an ingress offload pakcet and add it to the offload ingress
+ * Process an ingress offload packet and add it to the offload ingress
* queue. Returns the index of the next available slot in the bundle.
*/
static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
@@ -2079,7 +2082,7 @@ static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
* @pad: padding
* @lro: large receive offload
*
- * Process an ingress ethernet pakcet and deliver it to the stack.
+ * Process an ingress ethernet packet and deliver it to the stack.
* The padding is 2 if the packet was delivered in an Rx buffer and 0
* if it was immediate data in a response.
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 129352bbe114..6c790af92170 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -890,7 +890,9 @@ static int set_pauseparam(struct net_device *dev,
return 0;
}
-static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
const struct port_info *pi = netdev_priv(dev);
const struct sge *s = &pi->adapter->sge;
@@ -906,7 +908,9 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e->tx_pending = s->ethtxq[pi->first_qset].q.size;
}
-static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
int i;
const struct port_info *pi = netdev_priv(dev);
@@ -1989,6 +1993,15 @@ static int get_dump_data(struct net_device *dev, struct ethtool_dump *eth_dump,
return 0;
}
+static bool cxgb4_fw_mod_type_info_available(unsigned int fw_mod_type)
+{
+ /* Read port module EEPROM as long as it is plugged-in and
+ * safe to read.
+ */
+ return (fw_mod_type != FW_PORT_MOD_TYPE_NONE &&
+ fw_mod_type != FW_PORT_MOD_TYPE_ERROR);
+}
+
static int cxgb4_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
@@ -1997,7 +2010,7 @@ static int cxgb4_get_module_info(struct net_device *dev,
struct adapter *adapter = pi->adapter;
int ret;
- if (!t4_is_inserted_mod_type(pi->mod_type))
+ if (!cxgb4_fw_mod_type_info_available(pi->mod_type))
return -EINVAL;
switch (pi->port_type) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index dde1cf51d0ab..0c78c0db8937 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6608,7 +6608,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
static int adap_idx = 1;
int s_qpp, qpp, num_seg;
struct port_info *pi;
- bool highdma = false;
enum chip_type chip;
void __iomem *regs;
int func, chip_ver;
@@ -6687,14 +6686,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
}
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- highdma = true;
- } else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "no usable DMA configuration\n");
- goto out_free_adapter;
- }
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "no usable DMA configuration\n");
+ goto out_free_adapter;
}
pci_enable_pcie_error_reporting(pdev);
@@ -6823,7 +6818,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_TC | NETIF_F_NTUPLE;
+ NETIF_F_HW_TC | NETIF_F_NTUPLE | NETIF_F_HIGHDMA;
if (chip_ver > CHELSIO_T5) {
netdev->hw_enc_features |= NETIF_F_IP_CSUM |
@@ -6841,8 +6836,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels;
}
- if (highdma)
- netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
netdev->vlan_features = netdev->features & VLAN_FEAT;
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index fa5b596ff23a..f889f404305c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1842,8 +1842,10 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
* (including the VLAN tag) into the header so we reject anything
* smaller than that ...
*/
- fw_hdr_copy_len = sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) +
- sizeof(wr->ethtype) + sizeof(wr->vlantci);
+ BUILD_BUG_ON(sizeof(wr->firmware) !=
+ (sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) +
+ sizeof(wr->ethtype) + sizeof(wr->vlantci)));
+ fw_hdr_copy_len = sizeof(wr->firmware);
ret = cxgb4_validate_skb(skb, dev, fw_hdr_copy_len);
if (ret)
goto out_free;
@@ -1924,7 +1926,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
wr->equiq_to_len16 = cpu_to_be32(wr_mid);
wr->r3[0] = cpu_to_be32(0);
wr->r3[1] = cpu_to_be32(0);
- skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
+ skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len);
end = (u64 *)wr + flits;
/* If this is a Large Send Offload packet we'll put in an LSO CPL
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index fed5f93bf620..26433a62d7f0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -497,7 +497,7 @@ struct cpl_t5_pass_accept_rpl {
__be32 opt2;
__be64 opt0;
__be32 iss;
- __be32 rsvd;
+ __be32 rsvd[3];
};
struct cpl_act_open_req {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 0a326c054707..2419459a0b85 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -794,10 +794,12 @@ struct fw_eth_tx_pkt_vm_wr {
__be32 op_immdlen;
__be32 equiq_to_len16;
__be32 r3[2];
- u8 ethmacdst[6];
- u8 ethmacsrc[6];
- __be16 ethtype;
- __be16 vlantci;
+ struct_group(firmware,
+ u8 ethmacdst[ETH_ALEN];
+ u8 ethmacsrc[ETH_ALEN];
+ __be16 ethtype;
+ __be16 vlantci;
+ );
};
#define FW_CMD_MAX_TIMEOUT 10000
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index ae9cca768d74..7de3800437c9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1591,7 +1591,9 @@ static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
* first Queue Set.
*/
static void cxgb4vf_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
const struct port_info *pi = netdev_priv(dev);
const struct sge *s = &pi->adapter->sge;
@@ -1614,7 +1616,9 @@ static void cxgb4vf_get_ringparam(struct net_device *dev,
* device -- after vetting them of course!
*/
static int cxgb4vf_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
const struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -2895,7 +2899,6 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
struct net_device *netdev;
struct port_info *pi;
unsigned int pmask;
- int pci_using_dac;
int err, pidx;
/*
@@ -2916,19 +2919,12 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
}
/*
- * Set up our DMA mask: try for 64-bit address masking first and
- * fall back to 32-bit if we can't get 64 bits ...
+ * Set up our DMA mask
*/
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (err == 0) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err != 0) {
- dev_err(&pdev->dev, "no usable DMA configuration\n");
- goto err_release_regions;
- }
- pci_using_dac = 0;
+ if (err) {
+ dev_err(&pdev->dev, "no usable DMA configuration\n");
+ goto err_release_regions;
}
/*
@@ -3074,9 +3070,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
netdev->hw_features = NETIF_F_SG | TSO_FLAGS | NETIF_F_GRO |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
- netdev->features = netdev->hw_features;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features = netdev->hw_features | NETIF_F_HIGHDMA;
netdev->vlan_features = netdev->features & VLAN_FEAT;
netdev->priv_flags |= IFF_UNICAST_FLT;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 0295b2406646..43b2ceb6aa32 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1167,10 +1167,7 @@ netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
struct cpl_tx_pkt_core *cpl;
const struct skb_shared_info *ssi;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
- const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
- sizeof(wr->ethmacsrc) +
- sizeof(wr->ethtype) +
- sizeof(wr->vlantci));
+ const size_t fw_hdr_copy_len = sizeof(wr->firmware);
/*
* The chip minimum packet length is 10 octets but the firmware
@@ -1267,7 +1264,7 @@ netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
wr->equiq_to_len16 = cpu_to_be32(wr_mid);
wr->r3[0] = cpu_to_be32(0);
wr->r3[1] = cpu_to_be32(0);
- skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
+ skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len);
end = (u64 *)wr + flits;
/*
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
index d04a6c163445..da8d10475a08 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
@@ -32,6 +32,7 @@
#include <linux/tcp.h>
#include <linux/ipv6.h>
+#include <net/inet_ecn.h>
#include <net/route.h>
#include <net/ip6_route.h>
@@ -99,7 +100,7 @@ cxgb_find_route(struct cxgb4_lld_info *lldi,
rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
peer_port, local_port, IPPROTO_TCP,
- tos, 0);
+ tos & ~INET_ECN_MASK, 0);
if (IS_ERR(rt))
return NULL;
n = dst_neigh_lookup(&rt->dst, &peer_ip);
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 84251b85fc93..21a70b1f0ac5 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -242,12 +242,15 @@ static int mac89x0_device_probe(struct platform_device *pdev)
pr_info("No EEPROM, giving up now.\n");
goto out1;
} else {
+ u8 addr[ETH_ALEN];
+
for (i = 0; i < ETH_ALEN; i += 2) {
/* Big-endian (why??!) */
unsigned short s = readreg(dev, PP_IA + i);
- dev->dev_addr[i] = s >> 8;
- dev->dev_addr[i+1] = s & 0xff;
+ addr[i] = s >> 8;
+ addr[i+1] = s & 0xff;
}
+ eth_hw_addr_set(dev, addr);
}
dev->irq = SLOT2IRQ(slot);
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index c67a16a48d62..52aaf1bb5205 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -304,7 +304,7 @@ static inline bool enic_is_notify_intr(struct enic *enic, int intr)
static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr)
{
- if (unlikely(pci_dma_mapping_error(enic->pdev, dma_addr))) {
+ if (unlikely(dma_mapping_error(&enic->pdev->dev, dma_addr))) {
net_warn_ratelimited("%s: PCI dma mapping failed!\n",
enic->netdev->name);
enic->gen_stats.dma_map_error++;
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 6ded4d9fa32a..6c11f9d62526 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -177,7 +177,9 @@ static void enic_get_strings(struct net_device *netdev, u32 stringset,
}
static void enic_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct enic *enic = netdev_priv(netdev);
struct vnic_enet_config *c = &enic->config;
@@ -189,7 +191,9 @@ static void enic_get_ringparam(struct net_device *netdev,
}
static int enic_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct enic *enic = netdev_priv(netdev);
struct vnic_enet_config *c = &enic->config;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index aacf141986d5..1c81b161de52 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -150,10 +150,10 @@ static void enic_set_affinity_hint(struct enic *enic)
!cpumask_available(enic->msix[i].affinity_mask) ||
cpumask_empty(enic->msix[i].affinity_mask))
continue;
- err = irq_set_affinity_hint(enic->msix_entry[i].vector,
- enic->msix[i].affinity_mask);
+ err = irq_update_affinity_hint(enic->msix_entry[i].vector,
+ enic->msix[i].affinity_mask);
if (err)
- netdev_warn(enic->netdev, "irq_set_affinity_hint failed, err %d\n",
+ netdev_warn(enic->netdev, "irq_update_affinity_hint failed, err %d\n",
err);
}
@@ -173,7 +173,7 @@ static void enic_unset_affinity_hint(struct enic *enic)
int i;
for (i = 0; i < enic->intr_count; i++)
- irq_set_affinity_hint(enic->msix_entry[i].vector, NULL);
+ irq_update_affinity_hint(enic->msix_entry[i].vector, NULL);
}
static int enic_udp_tunnel_set_port(struct net_device *netdev,
@@ -2718,26 +2718,14 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* fail to 32-bit.
*/
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(47));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(47));
if (err) {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(dev, "No usable DMA configuration, aborting\n");
goto err_out_release_regions;
}
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(dev, "Unable to obtain %u-bit DMA "
- "for consistent allocations, aborting\n", 32);
- goto err_out_release_regions;
- }
} else {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(47));
- if (err) {
- dev_err(dev, "Unable to obtain %u-bit DMA "
- "for consistent allocations, aborting\n", 47);
- goto err_out_release_regions;
- }
using_dac = 1;
}
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 941f175fb911..c78b99a497df 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -305,21 +305,21 @@ static void gmac_speed_set(struct net_device *netdev)
switch (phydev->speed) {
case 1000:
status.bits.speed = GMAC_SPEED_1000;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_mode_is_rgmii(phydev->interface))
status.bits.mii_rmii = GMAC_PHY_RGMII_1000;
netdev_dbg(netdev, "connect %s to RGMII @ 1Gbit\n",
phydev_name(phydev));
break;
case 100:
status.bits.speed = GMAC_SPEED_100;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_mode_is_rgmii(phydev->interface))
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
netdev_dbg(netdev, "connect %s to RGMII @ 100 Mbit\n",
phydev_name(phydev));
break;
case 10:
status.bits.speed = GMAC_SPEED_10;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_mode_is_rgmii(phydev->interface))
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
netdev_dbg(netdev, "connect %s to RGMII @ 10 Mbit\n",
phydev_name(phydev));
@@ -389,6 +389,9 @@ static int gmac_setup_phy(struct net_device *netdev)
status.bits.mii_rmii = GMAC_PHY_GMII;
break;
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
netdev_dbg(netdev,
"RGMII: set GMAC0 and GMAC1 to MII/RGMII mode\n");
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
@@ -2105,7 +2108,9 @@ static void gmac_get_pauseparam(struct net_device *netdev,
}
static void gmac_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
@@ -2123,7 +2128,9 @@ static void gmac_get_ringparam(struct net_device *netdev,
}
static int gmac_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
int err = 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index f9955308b93d..dfa784339781 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -683,7 +683,9 @@ static int be_get_link_ksettings(struct net_device *netdev,
}
static void be_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct be_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index d51f24c9e1b8..d0c262f2695a 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3491,7 +3491,7 @@ static int be_msix_register(struct be_adapter *adapter)
if (status)
goto err_msix;
- irq_set_affinity_hint(vec, eqo->affinity_mask);
+ irq_update_affinity_hint(vec, eqo->affinity_mask);
}
return 0;
@@ -3552,7 +3552,7 @@ static void be_irq_unregister(struct be_adapter *adapter)
/* MSIx */
for_all_evt_queues(adapter, eqo, i) {
vec = be_msix_vec_get(adapter, eqo);
- irq_set_affinity_hint(vec, NULL);
+ irq_update_affinity_hint(vec, NULL);
free_irq(vec, eqo);
}
@@ -5194,7 +5194,8 @@ static void be_netdev_init(struct net_device *netdev)
netdev->hw_features |= NETIF_F_RXHASH;
netdev->features |= netdev->hw_features |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HIGHDMA;
netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -5840,14 +5841,9 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
SET_NETDEV_DEV(netdev, &pdev->dev);
status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!status) {
- netdev->features |= NETIF_F_HIGHDMA;
- } else {
- status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (status) {
- dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
- goto free_netdev;
- }
+ if (status) {
+ dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
+ goto free_netdev;
}
status = pci_enable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/engleder/Kconfig b/drivers/net/ethernet/engleder/Kconfig
new file mode 100644
index 000000000000..f4e2b1102d8f
--- /dev/null
+++ b/drivers/net/ethernet/engleder/Kconfig
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Engleder network device configuration
+#
+
+config NET_VENDOR_ENGLEDER
+ bool "Engleder devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Engleder devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_ENGLEDER
+
+config TSNEP
+ tristate "TSN endpoint support"
+ depends on HAS_IOMEM && HAS_DMA
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select PHYLIB
+ help
+ Support for the Engleder TSN endpoint Ethernet MAC IP Core.
+
+ To compile this driver as a module, choose M here. The module will be
+ called tsnep.
+
+config TSNEP_SELFTESTS
+ bool "TSN endpoint self test support"
+ default n
+ depends on TSNEP
+ help
+ This enables self test support within the TSN endpoint driver.
+
+ If unsure, say N.
+
+endif # NET_VENDOR_ENGLEDER
diff --git a/drivers/net/ethernet/engleder/Makefile b/drivers/net/ethernet/engleder/Makefile
new file mode 100644
index 000000000000..cce2191cb889
--- /dev/null
+++ b/drivers/net/ethernet/engleder/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Engleder Ethernet drivers
+#
+
+obj-$(CONFIG_TSNEP) += tsnep.o
+
+tsnep-objs := tsnep_main.o tsnep_ethtool.o tsnep_ptp.o tsnep_tc.o \
+ $(tsnep-y)
+tsnep-$(CONFIG_TSNEP_SELFTESTS) += tsnep_selftests.o
diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
new file mode 100644
index 000000000000..23bbece6b7de
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#ifndef _TSNEP_H
+#define _TSNEP_H
+
+#include "tsnep_hw.h"
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/ethtool.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/miscdevice.h>
+
+#define TSNEP "tsnep"
+
+#define TSNEP_RING_SIZE 256
+#define TSNEP_RING_ENTRIES_PER_PAGE (PAGE_SIZE / TSNEP_DESC_SIZE)
+#define TSNEP_RING_PAGE_COUNT (TSNEP_RING_SIZE / TSNEP_RING_ENTRIES_PER_PAGE)
+
+#define TSNEP_QUEUES 1
+
+struct tsnep_gcl {
+ void __iomem *addr;
+
+ u64 base_time;
+ u64 cycle_time;
+ u64 cycle_time_extension;
+
+ struct tsnep_gcl_operation operation[TSNEP_GCL_COUNT];
+ int count;
+
+ u64 change_limit;
+
+ u64 start_time;
+ bool change;
+};
+
+struct tsnep_tx_entry {
+ struct tsnep_tx_desc *desc;
+ struct tsnep_tx_desc_wb *desc_wb;
+ dma_addr_t desc_dma;
+ bool owner_user_flag;
+
+ u32 properties;
+
+ struct sk_buff *skb;
+ size_t len;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+};
+
+struct tsnep_tx {
+ struct tsnep_adapter *adapter;
+ void __iomem *addr;
+
+ void *page[TSNEP_RING_PAGE_COUNT];
+ dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
+
+ /* TX ring lock */
+ spinlock_t lock;
+ struct tsnep_tx_entry entry[TSNEP_RING_SIZE];
+ int write;
+ int read;
+ u32 owner_counter;
+ int increment_owner_counter;
+
+ u32 packets;
+ u32 bytes;
+ u32 dropped;
+};
+
+struct tsnep_rx_entry {
+ struct tsnep_rx_desc *desc;
+ struct tsnep_rx_desc_wb *desc_wb;
+ dma_addr_t desc_dma;
+
+ u32 properties;
+
+ struct sk_buff *skb;
+ size_t len;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+};
+
+struct tsnep_rx {
+ struct tsnep_adapter *adapter;
+ void __iomem *addr;
+
+ void *page[TSNEP_RING_PAGE_COUNT];
+ dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
+
+ struct tsnep_rx_entry entry[TSNEP_RING_SIZE];
+ int read;
+ u32 owner_counter;
+ int increment_owner_counter;
+
+ u32 packets;
+ u32 bytes;
+ u32 dropped;
+ u32 multicast;
+};
+
+struct tsnep_queue {
+ struct tsnep_adapter *adapter;
+
+ struct tsnep_tx *tx;
+ struct tsnep_rx *rx;
+
+ struct napi_struct napi;
+
+ u32 irq_mask;
+};
+
+struct tsnep_adapter {
+ struct net_device *netdev;
+ u8 mac_address[ETH_ALEN];
+ struct mii_bus *mdiobus;
+ bool suppress_preamble;
+ phy_interface_t phy_mode;
+ struct phy_device *phydev;
+ int msg_enable;
+
+ struct platform_device *pdev;
+ struct device *dmadev;
+ void __iomem *addr;
+ int irq;
+
+ bool gate_control;
+ /* gate control lock */
+ struct mutex gate_control_lock;
+ bool gate_control_active;
+ struct tsnep_gcl gcl[2];
+ int next_gcl;
+
+ struct hwtstamp_config hwtstamp_config;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ /* ptp clock lock */
+ spinlock_t ptp_lock;
+
+ int num_tx_queues;
+ struct tsnep_tx tx[TSNEP_MAX_QUEUES];
+ int num_rx_queues;
+ struct tsnep_rx rx[TSNEP_MAX_QUEUES];
+
+ int num_queues;
+ struct tsnep_queue queue[TSNEP_MAX_QUEUES];
+};
+
+extern const struct ethtool_ops tsnep_ethtool_ops;
+
+int tsnep_ptp_init(struct tsnep_adapter *adapter);
+void tsnep_ptp_cleanup(struct tsnep_adapter *adapter);
+int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+
+int tsnep_tc_init(struct tsnep_adapter *adapter);
+void tsnep_tc_cleanup(struct tsnep_adapter *adapter);
+int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data);
+
+#if IS_ENABLED(CONFIG_TSNEP_SELFTESTS)
+int tsnep_ethtool_get_test_count(void);
+void tsnep_ethtool_get_test_strings(u8 *data);
+void tsnep_ethtool_self_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data);
+#else
+static inline int tsnep_ethtool_get_test_count(void)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void tsnep_ethtool_get_test_strings(u8 *data)
+{
+ /* not enabled */
+}
+
+static inline void tsnep_ethtool_self_test(struct net_device *dev,
+ struct ethtool_test *eth_test,
+ u64 *data)
+{
+ /* not enabled */
+}
+#endif /* CONFIG_TSNEP_SELFTESTS */
+
+void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time);
+
+#endif /* _TSNEP_H */
diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c
new file mode 100644
index 000000000000..e6760dc68ddd
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+static const char tsnep_stats_strings[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "rx_bytes",
+ "rx_dropped",
+ "rx_multicast",
+ "rx_phy_errors",
+ "rx_forwarded_phy_errors",
+ "rx_invalid_frame_errors",
+ "tx_packets",
+ "tx_bytes",
+ "tx_dropped",
+};
+
+struct tsnep_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_multicast;
+ u64 rx_phy_errors;
+ u64 rx_forwarded_phy_errors;
+ u64 rx_invalid_frame_errors;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_dropped;
+};
+
+#define TSNEP_STATS_COUNT (sizeof(struct tsnep_stats) / sizeof(u64))
+
+static const char tsnep_rx_queue_stats_strings[][ETH_GSTRING_LEN] = {
+ "rx_%d_packets",
+ "rx_%d_bytes",
+ "rx_%d_dropped",
+ "rx_%d_multicast",
+ "rx_%d_no_descriptor_errors",
+ "rx_%d_buffer_too_small_errors",
+ "rx_%d_fifo_overflow_errors",
+ "rx_%d_invalid_frame_errors",
+};
+
+struct tsnep_rx_queue_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_multicast;
+ u64 rx_no_descriptor_errors;
+ u64 rx_buffer_too_small_errors;
+ u64 rx_fifo_overflow_errors;
+ u64 rx_invalid_frame_errors;
+};
+
+#define TSNEP_RX_QUEUE_STATS_COUNT (sizeof(struct tsnep_rx_queue_stats) / \
+ sizeof(u64))
+
+static const char tsnep_tx_queue_stats_strings[][ETH_GSTRING_LEN] = {
+ "tx_%d_packets",
+ "tx_%d_bytes",
+ "tx_%d_dropped",
+};
+
+struct tsnep_tx_queue_stats {
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_dropped;
+};
+
+#define TSNEP_TX_QUEUE_STATS_COUNT (sizeof(struct tsnep_tx_queue_stats) / \
+ sizeof(u64))
+
+static void tsnep_ethtool_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ strscpy(drvinfo->driver, TSNEP, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, dev_name(&adapter->pdev->dev),
+ sizeof(drvinfo->bus_info));
+}
+
+static int tsnep_ethtool_get_regs_len(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int len;
+ int num_additional_queues;
+
+ len = TSNEP_MAC_SIZE;
+
+ /* first queue pair is within TSNEP_MAC_SIZE, only queues additional to
+ * the first queue pair extend the register length by TSNEP_QUEUE_SIZE
+ */
+ num_additional_queues =
+ max(adapter->num_tx_queues, adapter->num_rx_queues) - 1;
+ len += TSNEP_QUEUE_SIZE * num_additional_queues;
+
+ return len;
+}
+
+static void tsnep_ethtool_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs,
+ void *p)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ regs->version = 1;
+
+ memcpy_fromio(p, adapter->addr, regs->len);
+}
+
+static u32 tsnep_ethtool_get_msglevel(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->msg_enable;
+}
+
+static void tsnep_ethtool_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ adapter->msg_enable = data;
+}
+
+static void tsnep_ethtool_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int rx_count = adapter->num_rx_queues;
+ int tx_count = adapter->num_tx_queues;
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, tsnep_stats_strings, sizeof(tsnep_stats_strings));
+ data += sizeof(tsnep_stats_strings);
+
+ for (i = 0; i < rx_count; i++) {
+ for (j = 0; j < TSNEP_RX_QUEUE_STATS_COUNT; j++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ tsnep_rx_queue_stats_strings[j], i);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (i = 0; i < tx_count; i++) {
+ for (j = 0; j < TSNEP_TX_QUEUE_STATS_COUNT; j++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ tsnep_tx_queue_stats_strings[j], i);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ case ETH_SS_TEST:
+ tsnep_ethtool_get_test_strings(data);
+ break;
+ }
+}
+
+static void tsnep_ethtool_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int rx_count = adapter->num_rx_queues;
+ int tx_count = adapter->num_tx_queues;
+ struct tsnep_stats tsnep_stats;
+ struct tsnep_rx_queue_stats tsnep_rx_queue_stats;
+ struct tsnep_tx_queue_stats tsnep_tx_queue_stats;
+ u32 reg;
+ int i;
+
+ memset(&tsnep_stats, 0, sizeof(tsnep_stats));
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ tsnep_stats.rx_packets += adapter->rx[i].packets;
+ tsnep_stats.rx_bytes += adapter->rx[i].bytes;
+ tsnep_stats.rx_dropped += adapter->rx[i].dropped;
+ tsnep_stats.rx_multicast += adapter->rx[i].multicast;
+ }
+ reg = ioread32(adapter->addr + ECM_STAT);
+ tsnep_stats.rx_phy_errors =
+ (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT;
+ tsnep_stats.rx_forwarded_phy_errors =
+ (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT;
+ tsnep_stats.rx_invalid_frame_errors =
+ (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tsnep_stats.tx_packets += adapter->tx[i].packets;
+ tsnep_stats.tx_bytes += adapter->tx[i].bytes;
+ tsnep_stats.tx_dropped += adapter->tx[i].dropped;
+ }
+ memcpy(data, &tsnep_stats, sizeof(tsnep_stats));
+ data += TSNEP_STATS_COUNT;
+
+ for (i = 0; i < rx_count; i++) {
+ memset(&tsnep_rx_queue_stats, 0, sizeof(tsnep_rx_queue_stats));
+ tsnep_rx_queue_stats.rx_packets = adapter->rx[i].packets;
+ tsnep_rx_queue_stats.rx_bytes = adapter->rx[i].bytes;
+ tsnep_rx_queue_stats.rx_dropped = adapter->rx[i].dropped;
+ tsnep_rx_queue_stats.rx_multicast = adapter->rx[i].multicast;
+ reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
+ TSNEP_RX_STATISTIC);
+ tsnep_rx_queue_stats.rx_no_descriptor_errors =
+ (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >>
+ TSNEP_RX_STATISTIC_NO_DESC_SHIFT;
+ tsnep_rx_queue_stats.rx_buffer_too_small_errors =
+ (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >>
+ TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT;
+ tsnep_rx_queue_stats.rx_fifo_overflow_errors =
+ (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >>
+ TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT;
+ tsnep_rx_queue_stats.rx_invalid_frame_errors =
+ (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >>
+ TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT;
+ memcpy(data, &tsnep_rx_queue_stats,
+ sizeof(tsnep_rx_queue_stats));
+ data += TSNEP_RX_QUEUE_STATS_COUNT;
+ }
+
+ for (i = 0; i < tx_count; i++) {
+ memset(&tsnep_tx_queue_stats, 0, sizeof(tsnep_tx_queue_stats));
+ tsnep_tx_queue_stats.tx_packets += adapter->tx[i].packets;
+ tsnep_tx_queue_stats.tx_bytes += adapter->tx[i].bytes;
+ tsnep_tx_queue_stats.tx_dropped += adapter->tx[i].dropped;
+ memcpy(data, &tsnep_tx_queue_stats,
+ sizeof(tsnep_tx_queue_stats));
+ data += TSNEP_TX_QUEUE_STATS_COUNT;
+ }
+}
+
+static int tsnep_ethtool_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int rx_count;
+ int tx_count;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ rx_count = adapter->num_rx_queues;
+ tx_count = adapter->num_tx_queues;
+ return TSNEP_STATS_COUNT +
+ TSNEP_RX_QUEUE_STATS_COUNT * rx_count +
+ TSNEP_TX_QUEUE_STATS_COUNT * tx_count;
+ case ETH_SS_TEST:
+ return tsnep_ethtool_get_test_count();
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tsnep_ethtool_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+const struct ethtool_ops tsnep_ethtool_ops = {
+ .get_drvinfo = tsnep_ethtool_get_drvinfo,
+ .get_regs_len = tsnep_ethtool_get_regs_len,
+ .get_regs = tsnep_ethtool_get_regs,
+ .get_msglevel = tsnep_ethtool_get_msglevel,
+ .set_msglevel = tsnep_ethtool_set_msglevel,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .self_test = tsnep_ethtool_self_test,
+ .get_strings = tsnep_ethtool_get_strings,
+ .get_ethtool_stats = tsnep_ethtool_get_ethtool_stats,
+ .get_sset_count = tsnep_ethtool_get_sset_count,
+ .get_ts_info = tsnep_ethtool_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
diff --git a/drivers/net/ethernet/engleder/tsnep_hw.h b/drivers/net/ethernet/engleder/tsnep_hw.h
new file mode 100644
index 000000000000..71cc8577d640
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_hw.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+/* Hardware definition of TSNEP and EtherCAT MAC device */
+
+#ifndef _TSNEP_HW_H
+#define _TSNEP_HW_H
+
+#include <linux/types.h>
+
+/* type */
+#define ECM_TYPE 0x0000
+#define ECM_REVISION_MASK 0x000000FF
+#define ECM_REVISION_SHIFT 0
+#define ECM_VERSION_MASK 0x0000FF00
+#define ECM_VERSION_SHIFT 8
+#define ECM_QUEUE_COUNT_MASK 0x00070000
+#define ECM_QUEUE_COUNT_SHIFT 16
+#define ECM_GATE_CONTROL 0x02000000
+
+/* system time */
+#define ECM_SYSTEM_TIME_LOW 0x0008
+#define ECM_SYSTEM_TIME_HIGH 0x000C
+
+/* clock */
+#define ECM_CLOCK_RATE 0x0010
+#define ECM_CLOCK_RATE_OFFSET_MASK 0x7FFFFFFF
+#define ECM_CLOCK_RATE_OFFSET_SIGN 0x80000000
+
+/* interrupt */
+#define ECM_INT_ENABLE 0x0018
+#define ECM_INT_ACTIVE 0x001C
+#define ECM_INT_ACKNOWLEDGE 0x001C
+#define ECM_INT_LINK 0x00000020
+#define ECM_INT_TX_0 0x00000100
+#define ECM_INT_RX_0 0x00000200
+#define ECM_INT_ALL 0x7FFFFFFF
+#define ECM_INT_DISABLE 0x80000000
+
+/* reset */
+#define ECM_RESET 0x0020
+#define ECM_RESET_COMMON 0x00000001
+#define ECM_RESET_CHANNEL 0x00000100
+#define ECM_RESET_TXRX 0x00010000
+
+/* control and status */
+#define ECM_STATUS 0x0080
+#define ECM_LINK_MODE_OFF 0x01000000
+#define ECM_LINK_MODE_100 0x02000000
+#define ECM_LINK_MODE_1000 0x04000000
+#define ECM_NO_LINK 0x01000000
+#define ECM_LINK_MODE_MASK 0x06000000
+
+/* management data */
+#define ECM_MD_CONTROL 0x0084
+#define ECM_MD_STATUS 0x0084
+#define ECM_MD_PREAMBLE 0x00000001
+#define ECM_MD_READ 0x00000004
+#define ECM_MD_WRITE 0x00000002
+#define ECM_MD_ADDR_MASK 0x000000F8
+#define ECM_MD_ADDR_SHIFT 3
+#define ECM_MD_PHY_ADDR_MASK 0x00001F00
+#define ECM_MD_PHY_ADDR_SHIFT 8
+#define ECM_MD_BUSY 0x00000001
+#define ECM_MD_DATA_MASK 0xFFFF0000
+#define ECM_MD_DATA_SHIFT 16
+
+/* statistic */
+#define ECM_STAT 0x00B0
+#define ECM_STAT_RX_ERR_MASK 0x000000FF
+#define ECM_STAT_RX_ERR_SHIFT 0
+#define ECM_STAT_INV_FRM_MASK 0x0000FF00
+#define ECM_STAT_INV_FRM_SHIFT 8
+#define ECM_STAT_FWD_RX_ERR_MASK 0x00FF0000
+#define ECM_STAT_FWD_RX_ERR_SHIFT 16
+
+/* tsnep */
+#define TSNEP_MAC_SIZE 0x4000
+#define TSNEP_QUEUE_SIZE 0x1000
+#define TSNEP_QUEUE(n) ({ typeof(n) __n = (n); \
+ (__n) == 0 ? \
+ 0 : \
+ TSNEP_MAC_SIZE + TSNEP_QUEUE_SIZE * ((__n) - 1); })
+#define TSNEP_MAX_QUEUES 8
+#define TSNEP_MAX_FRAME_SIZE (2 * 1024) /* hardware supports actually 16k */
+#define TSNEP_DESC_SIZE 256
+#define TSNEP_DESC_OFFSET 128
+
+/* tsnep register */
+#define TSNEP_INFO 0x0100
+#define TSNEP_INFO_RX_ASSIGN 0x00010000
+#define TSNEP_INFO_TX_TIME 0x00020000
+#define TSNEP_CONTROL 0x0108
+#define TSNEP_CONTROL_TX_RESET 0x00000001
+#define TSNEP_CONTROL_TX_ENABLE 0x00000002
+#define TSNEP_CONTROL_TX_DMA_ERROR 0x00000010
+#define TSNEP_CONTROL_TX_DESC_ERROR 0x00000020
+#define TSNEP_CONTROL_RX_RESET 0x00000100
+#define TSNEP_CONTROL_RX_ENABLE 0x00000200
+#define TSNEP_CONTROL_RX_DISABLE 0x00000400
+#define TSNEP_CONTROL_RX_DMA_ERROR 0x00001000
+#define TSNEP_CONTROL_RX_DESC_ERROR 0x00002000
+#define TSNEP_TX_DESC_ADDR_LOW 0x0140
+#define TSNEP_TX_DESC_ADDR_HIGH 0x0144
+#define TSNEP_RX_DESC_ADDR_LOW 0x0180
+#define TSNEP_RX_DESC_ADDR_HIGH 0x0184
+#define TSNEP_RESET_OWNER_COUNTER 0x01
+#define TSNEP_RX_STATISTIC 0x0190
+#define TSNEP_RX_STATISTIC_NO_DESC_MASK 0x000000FF
+#define TSNEP_RX_STATISTIC_NO_DESC_SHIFT 0
+#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK 0x0000FF00
+#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT 8
+#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK 0x00FF0000
+#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT 16
+#define TSNEP_RX_STATISTIC_INVALID_FRAME_MASK 0xFF000000
+#define TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT 24
+#define TSNEP_RX_STATISTIC_NO_DESC 0x0190
+#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL 0x0191
+#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW 0x0192
+#define TSNEP_RX_STATISTIC_INVALID_FRAME 0x0193
+#define TSNEP_RX_ASSIGN 0x01A0
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_ACTIVE 0x00000001
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_MASK 0xFFFF0000
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_SHIFT 16
+#define TSNEP_MAC_ADDRESS_LOW 0x0800
+#define TSNEP_MAC_ADDRESS_HIGH 0x0804
+#define TSNEP_RX_FILTER 0x0806
+#define TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS 0x0001
+#define TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS 0x0002
+#define TSNEP_GC 0x0808
+#define TSNEP_GC_ENABLE_A 0x00000002
+#define TSNEP_GC_ENABLE_B 0x00000004
+#define TSNEP_GC_DISABLE 0x00000008
+#define TSNEP_GC_ENABLE_TIMEOUT 0x00000010
+#define TSNEP_GC_ACTIVE_A 0x00000002
+#define TSNEP_GC_ACTIVE_B 0x00000004
+#define TSNEP_GC_CHANGE_AB 0x00000008
+#define TSNEP_GC_TIMEOUT_ACTIVE 0x00000010
+#define TSNEP_GC_TIMEOUT_SIGNAL 0x00000020
+#define TSNEP_GC_LIST_ERROR 0x00000080
+#define TSNEP_GC_OPEN 0x00FF0000
+#define TSNEP_GC_OPEN_SHIFT 16
+#define TSNEP_GC_NEXT_OPEN 0xFF000000
+#define TSNEP_GC_NEXT_OPEN_SHIFT 24
+#define TSNEP_GC_TIMEOUT 131072
+#define TSNEP_GC_TIME 0x080C
+#define TSNEP_GC_CHANGE 0x0810
+#define TSNEP_GCL_A 0x2000
+#define TSNEP_GCL_B 0x2800
+#define TSNEP_GCL_SIZE SZ_2K
+
+/* tsnep gate control list operation */
+struct tsnep_gcl_operation {
+ u32 properties;
+ u32 interval;
+};
+
+#define TSNEP_GCL_COUNT (TSNEP_GCL_SIZE / sizeof(struct tsnep_gcl_operation))
+#define TSNEP_GCL_MASK 0x000000FF
+#define TSNEP_GCL_INSERT 0x20000000
+#define TSNEP_GCL_CHANGE 0x40000000
+#define TSNEP_GCL_LAST 0x80000000
+#define TSNEP_GCL_MIN_INTERVAL 32
+
+/* tsnep TX/RX descriptor */
+#define TSNEP_DESC_SIZE 256
+#define TSNEP_DESC_SIZE_DATA_AFTER 2048
+#define TSNEP_DESC_OFFSET 128
+#define TSNEP_DESC_OWNER_COUNTER_MASK 0xC0000000
+#define TSNEP_DESC_OWNER_COUNTER_SHIFT 30
+#define TSNEP_DESC_LENGTH_MASK 0x00003FFF
+#define TSNEP_DESC_INTERRUPT_FLAG 0x00040000
+#define TSNEP_DESC_EXTENDED_WRITEBACK_FLAG 0x00080000
+#define TSNEP_DESC_NO_LINK_FLAG 0x01000000
+
+/* tsnep TX descriptor */
+struct tsnep_tx_desc {
+ __le32 properties;
+ __le32 more_properties;
+ __le32 reserved[2];
+ __le64 next;
+ __le64 tx;
+};
+
+#define TSNEP_TX_DESC_OWNER_MASK 0xE0000000
+#define TSNEP_TX_DESC_OWNER_USER_FLAG 0x20000000
+#define TSNEP_TX_DESC_LAST_FRAGMENT_FLAG 0x00010000
+#define TSNEP_TX_DESC_DATA_AFTER_DESC_FLAG 0x00020000
+
+/* tsnep TX descriptor writeback */
+struct tsnep_tx_desc_wb {
+ __le32 properties;
+ __le32 reserved1[3];
+ __le64 timestamp;
+ __le32 dma_delay;
+ __le32 reserved2;
+};
+
+#define TSNEP_TX_DESC_UNDERRUN_ERROR_FLAG 0x00010000
+#define TSNEP_TX_DESC_DMA_DELAY_FIRST_DATA_MASK 0x0000FFFC
+#define TSNEP_TX_DESC_DMA_DELAY_FIRST_DATA_SHIFT 2
+#define TSNEP_TX_DESC_DMA_DELAY_LAST_DATA_MASK 0xFFFC0000
+#define TSNEP_TX_DESC_DMA_DELAY_LAST_DATA_SHIFT 18
+#define TSNEP_TX_DESC_DMA_DELAY_NS 64
+
+/* tsnep RX descriptor */
+struct tsnep_rx_desc {
+ __le32 properties;
+ __le32 reserved[3];
+ __le64 next;
+ __le64 rx;
+};
+
+#define TSNEP_RX_DESC_BUFFER_SIZE_MASK 0x00003FFC
+
+/* tsnep RX descriptor writeback */
+struct tsnep_rx_desc_wb {
+ __le32 properties;
+ __le32 reserved[7];
+};
+
+/* tsnep RX inline meta */
+struct tsnep_rx_inline {
+ __le64 reserved;
+ __le64 timestamp;
+};
+
+#define TSNEP_RX_INLINE_METADATA_SIZE (sizeof(struct tsnep_rx_inline))
+
+#endif /* _TSNEP_HW_H */
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
new file mode 100644
index 000000000000..904f3304727e
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -0,0 +1,1272 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+/* TSN endpoint Ethernet MAC driver
+ *
+ * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time
+ * communication. It is designed for endpoints within TSN (Time Sensitive
+ * Networking) networks; e.g., for PLCs in the industrial automation case.
+ *
+ * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
+ * by the driver.
+ *
+ * More information can be found here:
+ * - www.embedded-experts.at/tsn
+ * - www.engleder-embedded.com
+ */
+
+#include "tsnep.h"
+#include "tsnep_hw.h"
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/iopoll.h>
+
+#define RX_SKB_LENGTH (round_up(TSNEP_RX_INLINE_METADATA_SIZE + ETH_HLEN + \
+ TSNEP_MAX_FRAME_SIZE + ETH_FCS_LEN, 4))
+#define RX_SKB_RESERVE ((16 - TSNEP_RX_INLINE_METADATA_SIZE) + NET_IP_ALIGN)
+#define RX_SKB_ALLOC_LENGTH (RX_SKB_RESERVE + RX_SKB_LENGTH)
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+#define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF))
+#else
+#define DMA_ADDR_HIGH(dma_addr) ((u32)(0))
+#endif
+#define DMA_ADDR_LOW(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF))
+
+static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask)
+{
+ iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
+}
+
+static void tsnep_disable_irq(struct tsnep_adapter *adapter, u32 mask)
+{
+ mask |= ECM_INT_DISABLE;
+ iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
+}
+
+static irqreturn_t tsnep_irq(int irq, void *arg)
+{
+ struct tsnep_adapter *adapter = arg;
+ u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE);
+
+ /* acknowledge interrupt */
+ if (active != 0)
+ iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE);
+
+ /* handle link interrupt */
+ if ((active & ECM_INT_LINK) != 0) {
+ if (adapter->netdev->phydev)
+ phy_mac_interrupt(adapter->netdev->phydev);
+ }
+
+ /* handle TX/RX queue 0 interrupt */
+ if ((active & adapter->queue[0].irq_mask) != 0) {
+ if (adapter->netdev) {
+ tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
+ napi_schedule(&adapter->queue[0].napi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct tsnep_adapter *adapter = bus->priv;
+ u32 md;
+ int retval;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ md = ECM_MD_READ;
+ if (!adapter->suppress_preamble)
+ md |= ECM_MD_PREAMBLE;
+ md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
+ md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
+ iowrite32(md, adapter->addr + ECM_MD_CONTROL);
+ retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
+ !(md & ECM_MD_BUSY), 16, 1000);
+ if (retval != 0)
+ return retval;
+
+ return (md & ECM_MD_DATA_MASK) >> ECM_MD_DATA_SHIFT;
+}
+
+static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct tsnep_adapter *adapter = bus->priv;
+ u32 md;
+ int retval;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ md = ECM_MD_WRITE;
+ if (!adapter->suppress_preamble)
+ md |= ECM_MD_PREAMBLE;
+ md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
+ md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
+ md |= ((u32)val << ECM_MD_DATA_SHIFT) & ECM_MD_DATA_MASK;
+ iowrite32(md, adapter->addr + ECM_MD_CONTROL);
+ retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
+ !(md & ECM_MD_BUSY), 16, 1000);
+ if (retval != 0)
+ return retval;
+
+ return 0;
+}
+
+static void tsnep_phy_link_status_change(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ u32 mode;
+
+ if (phydev->link) {
+ switch (phydev->speed) {
+ case SPEED_100:
+ mode = ECM_LINK_MODE_100;
+ break;
+ case SPEED_1000:
+ mode = ECM_LINK_MODE_1000;
+ break;
+ default:
+ mode = ECM_LINK_MODE_OFF;
+ break;
+ }
+ iowrite32(mode, adapter->addr + ECM_STATUS);
+ }
+
+ phy_print_status(netdev->phydev);
+}
+
+static int tsnep_phy_open(struct tsnep_adapter *adapter)
+{
+ struct phy_device *phydev;
+ struct ethtool_eee ethtool_eee;
+ int retval;
+
+ retval = phy_connect_direct(adapter->netdev, adapter->phydev,
+ tsnep_phy_link_status_change,
+ adapter->phy_mode);
+ if (retval)
+ return retval;
+ phydev = adapter->netdev->phydev;
+
+ /* MAC supports only 100Mbps|1000Mbps full duplex
+ * SPE (Single Pair Ethernet) is also an option but not implemented yet
+ */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
+ /* disable EEE autoneg, EEE not supported by TSNEP */
+ memset(&ethtool_eee, 0, sizeof(ethtool_eee));
+ phy_ethtool_set_eee(adapter->phydev, &ethtool_eee);
+
+ adapter->phydev->irq = PHY_MAC_INTERRUPT;
+ phy_start(adapter->phydev);
+
+ return 0;
+}
+
+static void tsnep_phy_close(struct tsnep_adapter *adapter)
+{
+ phy_stop(adapter->netdev->phydev);
+ phy_disconnect(adapter->netdev->phydev);
+ adapter->netdev->phydev = NULL;
+}
+
+static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ int i;
+
+ memset(tx->entry, 0, sizeof(tx->entry));
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ if (tx->page[i]) {
+ dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i],
+ tx->page_dma[i]);
+ tx->page[i] = NULL;
+ tx->page_dma[i] = 0;
+ }
+ }
+}
+
+static int tsnep_tx_ring_init(struct tsnep_tx *tx)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ struct tsnep_tx_entry *next_entry;
+ int i, j;
+ int retval;
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ tx->page[i] =
+ dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i],
+ GFP_KERNEL);
+ if (!tx->page[i]) {
+ retval = -ENOMEM;
+ goto alloc_failed;
+ }
+ for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
+ entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
+ entry->desc_wb = (struct tsnep_tx_desc_wb *)
+ (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j);
+ entry->desc = (struct tsnep_tx_desc *)
+ (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
+ entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j;
+ }
+ }
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ entry = &tx->entry[i];
+ next_entry = &tx->entry[(i + 1) % TSNEP_RING_SIZE];
+ entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
+ }
+
+ return 0;
+
+alloc_failed:
+ tsnep_tx_ring_cleanup(tx);
+ return retval;
+}
+
+static void tsnep_tx_activate(struct tsnep_tx *tx, int index, bool last)
+{
+ struct tsnep_tx_entry *entry = &tx->entry[index];
+
+ entry->properties = 0;
+ if (entry->skb) {
+ entry->properties =
+ skb_pagelen(entry->skb) & TSNEP_DESC_LENGTH_MASK;
+ entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
+ if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)
+ entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
+
+ /* toggle user flag to prevent false acknowledge
+ *
+ * Only the first fragment is acknowledged. For all other
+ * fragments no acknowledge is done and the last written owner
+ * counter stays in the writeback descriptor. Therefore, it is
+ * possible that the last written owner counter is identical to
+ * the new incremented owner counter and a false acknowledge is
+ * detected before the real acknowledge has been done by
+ * hardware.
+ *
+ * The user flag is used to prevent this situation. The user
+ * flag is copied to the writeback descriptor by the hardware
+ * and is used as additional acknowledge data. By toggeling the
+ * user flag only for the first fragment (which is
+ * acknowledged), it is guaranteed that the last acknowledge
+ * done for this descriptor has used a different user flag and
+ * cannot be detected as false acknowledge.
+ */
+ entry->owner_user_flag = !entry->owner_user_flag;
+ }
+ if (last)
+ entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG;
+ if (index == tx->increment_owner_counter) {
+ tx->owner_counter++;
+ if (tx->owner_counter == 4)
+ tx->owner_counter = 1;
+ tx->increment_owner_counter--;
+ if (tx->increment_owner_counter < 0)
+ tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+ }
+ entry->properties |=
+ (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
+ TSNEP_DESC_OWNER_COUNTER_MASK;
+ if (entry->owner_user_flag)
+ entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG;
+ entry->desc->more_properties =
+ __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK);
+
+ /* descriptor properties shall be written last, because valid data is
+ * signaled there
+ */
+ dma_wmb();
+
+ entry->desc->properties = __cpu_to_le32(entry->properties);
+}
+
+static int tsnep_tx_desc_available(struct tsnep_tx *tx)
+{
+ if (tx->read <= tx->write)
+ return TSNEP_RING_SIZE - tx->write + tx->read - 1;
+ else
+ return tx->read - tx->write - 1;
+}
+
+static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ unsigned int len;
+ dma_addr_t dma;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE];
+
+ if (i == 0) {
+ len = skb_headlen(skb);
+ dma = dma_map_single(dmadev, skb->data, len,
+ DMA_TO_DEVICE);
+ } else {
+ len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]);
+ dma = skb_frag_dma_map(dmadev,
+ &skb_shinfo(skb)->frags[i - 1],
+ 0, len, DMA_TO_DEVICE);
+ }
+ if (dma_mapping_error(dmadev, dma))
+ return -ENOMEM;
+
+ entry->len = len;
+ dma_unmap_addr_set(entry, dma, dma);
+
+ entry->desc->tx = __cpu_to_le64(dma);
+ }
+
+ return 0;
+}
+
+static void tsnep_tx_unmap(struct tsnep_tx *tx, int count)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ entry = &tx->entry[(tx->read + i) % TSNEP_RING_SIZE];
+
+ if (entry->len) {
+ if (i == 0)
+ dma_unmap_single(dmadev,
+ dma_unmap_addr(entry, dma),
+ dma_unmap_len(entry, len),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dmadev,
+ dma_unmap_addr(entry, dma),
+ dma_unmap_len(entry, len),
+ DMA_TO_DEVICE);
+ entry->len = 0;
+ }
+ }
+}
+
+static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
+ struct tsnep_tx *tx)
+{
+ unsigned long flags;
+ int count = 1;
+ struct tsnep_tx_entry *entry;
+ int i;
+ int retval;
+
+ if (skb_shinfo(skb)->nr_frags > 0)
+ count += skb_shinfo(skb)->nr_frags;
+
+ spin_lock_irqsave(&tx->lock, flags);
+
+ if (tsnep_tx_desc_available(tx) < count) {
+ /* ring full, shall not happen because queue is stopped if full
+ * below
+ */
+ netif_stop_queue(tx->adapter->netdev);
+
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = &tx->entry[tx->write];
+ entry->skb = skb;
+
+ retval = tsnep_tx_map(skb, tx, count);
+ if (retval != 0) {
+ tsnep_tx_unmap(tx, count);
+ dev_kfree_skb_any(entry->skb);
+ entry->skb = NULL;
+
+ tx->dropped++;
+
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ netdev_err(tx->adapter->netdev, "TX DMA map failed\n");
+
+ return NETDEV_TX_OK;
+ }
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ for (i = 0; i < count; i++)
+ tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE,
+ i == (count - 1));
+ tx->write = (tx->write + count) % TSNEP_RING_SIZE;
+
+ skb_tx_timestamp(skb);
+
+ /* descriptor properties shall be valid before hardware is notified */
+ dma_wmb();
+
+ iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
+
+ if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) {
+ /* ring can get full with next frame */
+ netif_stop_queue(tx->adapter->netdev);
+ }
+
+ tx->packets++;
+ tx->bytes += skb_pagelen(entry->skb) + ETH_FCS_LEN;
+
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
+{
+ unsigned long flags;
+ int budget = 128;
+ struct tsnep_tx_entry *entry;
+ int count;
+
+ spin_lock_irqsave(&tx->lock, flags);
+
+ do {
+ if (tx->read == tx->write)
+ break;
+
+ entry = &tx->entry[tx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_TX_DESC_OWNER_MASK) !=
+ (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
+ break;
+
+ /* descriptor properties shall be read first, because valid data
+ * is signaled there
+ */
+ dma_rmb();
+
+ count = 1;
+ if (skb_shinfo(entry->skb)->nr_frags > 0)
+ count += skb_shinfo(entry->skb)->nr_frags;
+
+ tsnep_tx_unmap(tx, count);
+
+ if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
+ (__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
+ struct skb_shared_hwtstamps hwtstamps;
+ u64 timestamp =
+ __le64_to_cpu(entry->desc_wb->timestamp);
+
+ memset(&hwtstamps, 0, sizeof(hwtstamps));
+ hwtstamps.hwtstamp = ns_to_ktime(timestamp);
+
+ skb_tstamp_tx(entry->skb, &hwtstamps);
+ }
+
+ napi_consume_skb(entry->skb, budget);
+ entry->skb = NULL;
+
+ tx->read = (tx->read + count) % TSNEP_RING_SIZE;
+
+ budget--;
+ } while (likely(budget));
+
+ if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) &&
+ netif_queue_stopped(tx->adapter->netdev)) {
+ netif_wake_queue(tx->adapter->netdev);
+ }
+
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ return (budget != 0);
+}
+
+static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
+ struct tsnep_tx *tx)
+{
+ dma_addr_t dma;
+ int retval;
+
+ memset(tx, 0, sizeof(*tx));
+ tx->adapter = adapter;
+ tx->addr = addr;
+
+ retval = tsnep_tx_ring_init(tx);
+ if (retval)
+ return retval;
+
+ dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
+ iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW);
+ iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH);
+ tx->owner_counter = 1;
+ tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+
+ spin_lock_init(&tx->lock);
+
+ return 0;
+}
+
+static void tsnep_tx_close(struct tsnep_tx *tx)
+{
+ u32 val;
+
+ readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val,
+ ((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000,
+ 1000000);
+
+ tsnep_tx_ring_cleanup(tx);
+}
+
+static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
+{
+ struct device *dmadev = rx->adapter->dmadev;
+ struct tsnep_rx_entry *entry;
+ int i;
+
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ entry = &rx->entry[i];
+ if (dma_unmap_addr(entry, dma))
+ dma_unmap_single(dmadev, dma_unmap_addr(entry, dma),
+ dma_unmap_len(entry, len),
+ DMA_FROM_DEVICE);
+ if (entry->skb)
+ dev_kfree_skb(entry->skb);
+ }
+
+ memset(rx->entry, 0, sizeof(rx->entry));
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ if (rx->page[i]) {
+ dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i],
+ rx->page_dma[i]);
+ rx->page[i] = NULL;
+ rx->page_dma[i] = 0;
+ }
+ }
+}
+
+static int tsnep_rx_alloc_and_map_skb(struct tsnep_rx *rx,
+ struct tsnep_rx_entry *entry)
+{
+ struct device *dmadev = rx->adapter->dmadev;
+ struct sk_buff *skb;
+ dma_addr_t dma;
+
+ skb = __netdev_alloc_skb(rx->adapter->netdev, RX_SKB_ALLOC_LENGTH,
+ GFP_ATOMIC | GFP_DMA);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, RX_SKB_RESERVE);
+
+ dma = dma_map_single(dmadev, skb->data, RX_SKB_LENGTH,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dmadev, dma)) {
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ entry->skb = skb;
+ entry->len = RX_SKB_LENGTH;
+ dma_unmap_addr_set(entry, dma, dma);
+ entry->desc->rx = __cpu_to_le64(dma);
+
+ return 0;
+}
+
+static int tsnep_rx_ring_init(struct tsnep_rx *rx)
+{
+ struct device *dmadev = rx->adapter->dmadev;
+ struct tsnep_rx_entry *entry;
+ struct tsnep_rx_entry *next_entry;
+ int i, j;
+ int retval;
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ rx->page[i] =
+ dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i],
+ GFP_KERNEL);
+ if (!rx->page[i]) {
+ retval = -ENOMEM;
+ goto failed;
+ }
+ for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
+ entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
+ entry->desc_wb = (struct tsnep_rx_desc_wb *)
+ (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j);
+ entry->desc = (struct tsnep_rx_desc *)
+ (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
+ entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j;
+ }
+ }
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ entry = &rx->entry[i];
+ next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE];
+ entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
+
+ retval = tsnep_rx_alloc_and_map_skb(rx, entry);
+ if (retval)
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ tsnep_rx_ring_cleanup(rx);
+ return retval;
+}
+
+static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
+{
+ struct tsnep_rx_entry *entry = &rx->entry[index];
+
+ /* RX_SKB_LENGTH is a multiple of 4 */
+ entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK;
+ entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
+ if (index == rx->increment_owner_counter) {
+ rx->owner_counter++;
+ if (rx->owner_counter == 4)
+ rx->owner_counter = 1;
+ rx->increment_owner_counter--;
+ if (rx->increment_owner_counter < 0)
+ rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+ }
+ entry->properties |=
+ (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
+ TSNEP_DESC_OWNER_COUNTER_MASK;
+
+ /* descriptor properties shall be written last, because valid data is
+ * signaled there
+ */
+ dma_wmb();
+
+ entry->desc->properties = __cpu_to_le32(entry->properties);
+}
+
+static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
+ int budget)
+{
+ struct device *dmadev = rx->adapter->dmadev;
+ int done = 0;
+ struct tsnep_rx_entry *entry;
+ struct sk_buff *skb;
+ size_t len;
+ dma_addr_t dma;
+ int length;
+ bool enable = false;
+ int retval;
+
+ while (likely(done < budget)) {
+ entry = &rx->entry[rx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_OWNER_COUNTER_MASK) !=
+ (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
+ break;
+
+ /* descriptor properties shall be read first, because valid data
+ * is signaled there
+ */
+ dma_rmb();
+
+ skb = entry->skb;
+ len = dma_unmap_len(entry, len);
+ dma = dma_unmap_addr(entry, dma);
+
+ /* forward skb only if allocation is successful, otherwise
+ * skb is reused and frame dropped
+ */
+ retval = tsnep_rx_alloc_and_map_skb(rx, entry);
+ if (!retval) {
+ dma_unmap_single(dmadev, dma, len, DMA_FROM_DEVICE);
+
+ length = __le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_LENGTH_MASK;
+ skb_put(skb, length - ETH_FCS_LEN);
+ if (rx->adapter->hwtstamp_config.rx_filter ==
+ HWTSTAMP_FILTER_ALL) {
+ struct skb_shared_hwtstamps *hwtstamps =
+ skb_hwtstamps(skb);
+ struct tsnep_rx_inline *rx_inline =
+ (struct tsnep_rx_inline *)skb->data;
+ u64 timestamp =
+ __le64_to_cpu(rx_inline->timestamp);
+
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(timestamp);
+ }
+ skb_pull(skb, TSNEP_RX_INLINE_METADATA_SIZE);
+ skb->protocol = eth_type_trans(skb,
+ rx->adapter->netdev);
+
+ rx->packets++;
+ rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ rx->multicast++;
+
+ napi_gro_receive(napi, skb);
+ done++;
+ } else {
+ rx->dropped++;
+ }
+
+ tsnep_rx_activate(rx, rx->read);
+
+ enable = true;
+
+ rx->read = (rx->read + 1) % TSNEP_RING_SIZE;
+ }
+
+ if (enable) {
+ /* descriptor properties shall be valid before hardware is
+ * notified
+ */
+ dma_wmb();
+
+ iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
+ }
+
+ return done;
+}
+
+static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
+ struct tsnep_rx *rx)
+{
+ dma_addr_t dma;
+ int i;
+ int retval;
+
+ memset(rx, 0, sizeof(*rx));
+ rx->adapter = adapter;
+ rx->addr = addr;
+
+ retval = tsnep_rx_ring_init(rx);
+ if (retval)
+ return retval;
+
+ dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
+ iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW);
+ iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH);
+ rx->owner_counter = 1;
+ rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+
+ for (i = 0; i < TSNEP_RING_SIZE; i++)
+ tsnep_rx_activate(rx, i);
+
+ /* descriptor properties shall be valid before hardware is notified */
+ dma_wmb();
+
+ iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
+
+ return 0;
+}
+
+static void tsnep_rx_close(struct tsnep_rx *rx)
+{
+ u32 val;
+
+ iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL);
+ readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val,
+ ((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000,
+ 1000000);
+
+ tsnep_rx_ring_cleanup(rx);
+}
+
+static int tsnep_poll(struct napi_struct *napi, int budget)
+{
+ struct tsnep_queue *queue = container_of(napi, struct tsnep_queue,
+ napi);
+ bool complete = true;
+ int done = 0;
+
+ if (queue->tx)
+ complete = tsnep_tx_poll(queue->tx, budget);
+
+ if (queue->rx) {
+ done = tsnep_rx_poll(queue->rx, napi, budget);
+ if (done >= budget)
+ complete = false;
+ }
+
+ /* if all work not completed, return budget and keep polling */
+ if (!complete)
+ return budget;
+
+ if (likely(napi_complete_done(napi, done)))
+ tsnep_enable_irq(queue->adapter, queue->irq_mask);
+
+ return min(done, budget - 1);
+}
+
+static int tsnep_netdev_open(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int i;
+ void __iomem *addr;
+ int tx_queue_index = 0;
+ int rx_queue_index = 0;
+ int retval;
+
+ retval = tsnep_phy_open(adapter);
+ if (retval)
+ return retval;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ adapter->queue[i].adapter = adapter;
+ if (adapter->queue[i].tx) {
+ addr = adapter->addr + TSNEP_QUEUE(tx_queue_index);
+ retval = tsnep_tx_open(adapter, addr,
+ adapter->queue[i].tx);
+ if (retval)
+ goto failed;
+ tx_queue_index++;
+ }
+ if (adapter->queue[i].rx) {
+ addr = adapter->addr + TSNEP_QUEUE(rx_queue_index);
+ retval = tsnep_rx_open(adapter, addr,
+ adapter->queue[i].rx);
+ if (retval)
+ goto failed;
+ rx_queue_index++;
+ }
+ }
+
+ retval = netif_set_real_num_tx_queues(adapter->netdev,
+ adapter->num_tx_queues);
+ if (retval)
+ goto failed;
+ retval = netif_set_real_num_rx_queues(adapter->netdev,
+ adapter->num_rx_queues);
+ if (retval)
+ goto failed;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
+ tsnep_poll, 64);
+ napi_enable(&adapter->queue[i].napi);
+
+ tsnep_enable_irq(adapter, adapter->queue[i].irq_mask);
+ }
+
+ return 0;
+
+failed:
+ for (i = 0; i < adapter->num_queues; i++) {
+ if (adapter->queue[i].rx)
+ tsnep_rx_close(adapter->queue[i].rx);
+ if (adapter->queue[i].tx)
+ tsnep_tx_close(adapter->queue[i].tx);
+ }
+ tsnep_phy_close(adapter);
+ return retval;
+}
+
+static int tsnep_netdev_close(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
+
+ napi_disable(&adapter->queue[i].napi);
+ netif_napi_del(&adapter->queue[i].napi);
+
+ if (adapter->queue[i].rx)
+ tsnep_rx_close(adapter->queue[i].rx);
+ if (adapter->queue[i].tx)
+ tsnep_tx_close(adapter->queue[i].tx);
+ }
+
+ tsnep_phy_close(adapter);
+
+ return 0;
+}
+
+static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ u16 queue_mapping = skb_get_queue_mapping(skb);
+
+ if (queue_mapping >= adapter->num_tx_queues)
+ queue_mapping = 0;
+
+ return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]);
+}
+
+static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr,
+ int cmd)
+{
+ if (!netif_running(netdev))
+ return -EINVAL;
+ if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP)
+ return tsnep_ptp_ioctl(netdev, ifr, cmd);
+ return phy_mii_ioctl(netdev->phydev, ifr, cmd);
+}
+
+static void tsnep_netdev_set_multicast(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ u16 rx_filter = 0;
+
+ /* configured MAC address and broadcasts are never filtered */
+ if (netdev->flags & IFF_PROMISC) {
+ rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
+ rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS;
+ } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) {
+ rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
+ }
+ iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER);
+}
+
+static void tsnep_netdev_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ u32 reg;
+ u32 val;
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ stats->tx_packets += adapter->tx[i].packets;
+ stats->tx_bytes += adapter->tx[i].bytes;
+ stats->tx_dropped += adapter->tx[i].dropped;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ stats->rx_packets += adapter->rx[i].packets;
+ stats->rx_bytes += adapter->rx[i].bytes;
+ stats->rx_dropped += adapter->rx[i].dropped;
+ stats->multicast += adapter->rx[i].multicast;
+
+ reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
+ TSNEP_RX_STATISTIC);
+ val = (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >>
+ TSNEP_RX_STATISTIC_NO_DESC_SHIFT;
+ stats->rx_dropped += val;
+ val = (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >>
+ TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT;
+ stats->rx_dropped += val;
+ val = (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >>
+ TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT;
+ stats->rx_errors += val;
+ stats->rx_fifo_errors += val;
+ val = (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >>
+ TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT;
+ stats->rx_errors += val;
+ stats->rx_frame_errors += val;
+ }
+
+ reg = ioread32(adapter->addr + ECM_STAT);
+ val = (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT;
+ stats->rx_errors += val;
+ val = (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT;
+ stats->rx_errors += val;
+ stats->rx_crc_errors += val;
+ val = (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT;
+ stats->rx_errors += val;
+}
+
+static void tsnep_mac_set_address(struct tsnep_adapter *adapter, u8 *addr)
+{
+ iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW);
+ iowrite16(*(u16 *)(addr + sizeof(u32)),
+ adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
+
+ ether_addr_copy(adapter->mac_address, addr);
+ netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n",
+ addr);
+}
+
+static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *sock_addr = addr;
+ int retval;
+
+ retval = eth_prepare_mac_addr_change(netdev, sock_addr);
+ if (retval)
+ return retval;
+ eth_hw_addr_set(netdev, sock_addr->sa_data);
+ tsnep_mac_set_address(adapter, sock_addr->sa_data);
+
+ return 0;
+}
+
+static const struct net_device_ops tsnep_netdev_ops = {
+ .ndo_open = tsnep_netdev_open,
+ .ndo_stop = tsnep_netdev_close,
+ .ndo_start_xmit = tsnep_netdev_xmit_frame,
+ .ndo_eth_ioctl = tsnep_netdev_ioctl,
+ .ndo_set_rx_mode = tsnep_netdev_set_multicast,
+
+ .ndo_get_stats64 = tsnep_netdev_get_stats64,
+ .ndo_set_mac_address = tsnep_netdev_set_mac_address,
+ .ndo_setup_tc = tsnep_tc_setup,
+};
+
+static int tsnep_mac_init(struct tsnep_adapter *adapter)
+{
+ int retval;
+
+ /* initialize RX filtering, at least configured MAC address and
+ * broadcast are not filtered
+ */
+ iowrite16(0, adapter->addr + TSNEP_RX_FILTER);
+
+ /* try to get MAC address in the following order:
+ * - device tree
+ * - valid MAC address already set
+ * - MAC address register if valid
+ * - random MAC address
+ */
+ retval = of_get_mac_address(adapter->pdev->dev.of_node,
+ adapter->mac_address);
+ if (retval == -EPROBE_DEFER)
+ return retval;
+ if (retval && !is_valid_ether_addr(adapter->mac_address)) {
+ *(u32 *)adapter->mac_address =
+ ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW);
+ *(u16 *)(adapter->mac_address + sizeof(u32)) =
+ ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
+ if (!is_valid_ether_addr(adapter->mac_address))
+ eth_random_addr(adapter->mac_address);
+ }
+
+ tsnep_mac_set_address(adapter, adapter->mac_address);
+ eth_hw_addr_set(adapter->netdev, adapter->mac_address);
+
+ return 0;
+}
+
+static int tsnep_mdio_init(struct tsnep_adapter *adapter)
+{
+ struct device_node *np = adapter->pdev->dev.of_node;
+ int retval;
+
+ if (np) {
+ np = of_get_child_by_name(np, "mdio");
+ if (!np)
+ return 0;
+
+ adapter->suppress_preamble =
+ of_property_read_bool(np, "suppress-preamble");
+ }
+
+ adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
+ if (!adapter->mdiobus) {
+ retval = -ENOMEM;
+
+ goto out;
+ }
+
+ adapter->mdiobus->priv = (void *)adapter;
+ adapter->mdiobus->parent = &adapter->pdev->dev;
+ adapter->mdiobus->read = tsnep_mdiobus_read;
+ adapter->mdiobus->write = tsnep_mdiobus_write;
+ adapter->mdiobus->name = TSNEP "-mdiobus";
+ snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s",
+ adapter->pdev->name);
+
+ /* do not scan broadcast address */
+ adapter->mdiobus->phy_mask = 0x0000001;
+
+ retval = of_mdiobus_register(adapter->mdiobus, np);
+
+out:
+ if (np)
+ of_node_put(np);
+
+ return retval;
+}
+
+static int tsnep_phy_init(struct tsnep_adapter *adapter)
+{
+ struct device_node *phy_node;
+ int retval;
+
+ retval = of_get_phy_mode(adapter->pdev->dev.of_node,
+ &adapter->phy_mode);
+ if (retval)
+ adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
+
+ phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle",
+ 0);
+ adapter->phydev = of_phy_find_device(phy_node);
+ of_node_put(phy_node);
+ if (!adapter->phydev && adapter->mdiobus)
+ adapter->phydev = phy_find_first(adapter->mdiobus);
+ if (!adapter->phydev)
+ return -EIO;
+
+ return 0;
+}
+
+static int tsnep_probe(struct platform_device *pdev)
+{
+ struct tsnep_adapter *adapter;
+ struct net_device *netdev;
+ struct resource *io;
+ u32 type;
+ int revision;
+ int version;
+ int retval;
+
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct tsnep_adapter),
+ TSNEP_MAX_QUEUES, TSNEP_MAX_QUEUES);
+ if (!netdev)
+ return -ENODEV;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ adapter = netdev_priv(netdev);
+ platform_set_drvdata(pdev, adapter);
+ adapter->pdev = pdev;
+ adapter->dmadev = &pdev->dev;
+ adapter->netdev = netdev;
+ adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
+
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = TSNEP_MAX_FRAME_SIZE;
+
+ mutex_init(&adapter->gate_control_lock);
+
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ adapter->addr = devm_ioremap_resource(&pdev->dev, io);
+ if (IS_ERR(adapter->addr))
+ return PTR_ERR(adapter->addr);
+ adapter->irq = platform_get_irq(pdev, 0);
+ netdev->mem_start = io->start;
+ netdev->mem_end = io->end;
+ netdev->irq = adapter->irq;
+
+ type = ioread32(adapter->addr + ECM_TYPE);
+ revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT;
+ version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT;
+ adapter->gate_control = type & ECM_GATE_CONTROL;
+
+ adapter->num_tx_queues = TSNEP_QUEUES;
+ adapter->num_rx_queues = TSNEP_QUEUES;
+ adapter->num_queues = TSNEP_QUEUES;
+ adapter->queue[0].tx = &adapter->tx[0];
+ adapter->queue[0].rx = &adapter->rx[0];
+ adapter->queue[0].irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0;
+
+ tsnep_disable_irq(adapter, ECM_INT_ALL);
+ retval = devm_request_irq(&adapter->pdev->dev, adapter->irq, tsnep_irq,
+ 0, TSNEP, adapter);
+ if (retval != 0) {
+ dev_err(&adapter->pdev->dev, "can't get assigned irq %d.\n",
+ adapter->irq);
+ return retval;
+ }
+ tsnep_enable_irq(adapter, ECM_INT_LINK);
+
+ retval = tsnep_mac_init(adapter);
+ if (retval)
+ goto mac_init_failed;
+
+ retval = tsnep_mdio_init(adapter);
+ if (retval)
+ goto mdio_init_failed;
+
+ retval = tsnep_phy_init(adapter);
+ if (retval)
+ goto phy_init_failed;
+
+ retval = tsnep_ptp_init(adapter);
+ if (retval)
+ goto ptp_init_failed;
+
+ retval = tsnep_tc_init(adapter);
+ if (retval)
+ goto tc_init_failed;
+
+ netdev->netdev_ops = &tsnep_netdev_ops;
+ netdev->ethtool_ops = &tsnep_ethtool_ops;
+ netdev->features = NETIF_F_SG;
+ netdev->hw_features = netdev->features;
+
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+ retval = register_netdev(netdev);
+ if (retval)
+ goto register_failed;
+
+ dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version,
+ revision);
+ if (adapter->gate_control)
+ dev_info(&adapter->pdev->dev, "gate control detected\n");
+
+ return 0;
+
+register_failed:
+ tsnep_tc_cleanup(adapter);
+tc_init_failed:
+ tsnep_ptp_cleanup(adapter);
+ptp_init_failed:
+phy_init_failed:
+ if (adapter->mdiobus)
+ mdiobus_unregister(adapter->mdiobus);
+mdio_init_failed:
+mac_init_failed:
+ tsnep_disable_irq(adapter, ECM_INT_ALL);
+ return retval;
+}
+
+static int tsnep_remove(struct platform_device *pdev)
+{
+ struct tsnep_adapter *adapter = platform_get_drvdata(pdev);
+
+ unregister_netdev(adapter->netdev);
+
+ tsnep_tc_cleanup(adapter);
+
+ tsnep_ptp_cleanup(adapter);
+
+ if (adapter->mdiobus)
+ mdiobus_unregister(adapter->mdiobus);
+
+ tsnep_disable_irq(adapter, ECM_INT_ALL);
+
+ return 0;
+}
+
+static const struct of_device_id tsnep_of_match[] = {
+ { .compatible = "engleder,tsnep", },
+{ },
+};
+MODULE_DEVICE_TABLE(of, tsnep_of_match);
+
+static struct platform_driver tsnep_driver = {
+ .driver = {
+ .name = TSNEP,
+ .of_match_table = of_match_ptr(tsnep_of_match),
+ },
+ .probe = tsnep_probe,
+ .remove = tsnep_remove,
+};
+module_platform_driver(tsnep_driver);
+
+MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>");
+MODULE_DESCRIPTION("TSN endpoint Ethernet MAC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/engleder/tsnep_ptp.c b/drivers/net/ethernet/engleder/tsnep_ptp.c
new file mode 100644
index 000000000000..eaad453d487e
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_ptp.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time)
+{
+ u32 high_before;
+ u32 low;
+ u32 high;
+
+ /* read high dword twice to detect overrun */
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ do {
+ low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ high_before = high;
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ } while (high != high_before);
+ *time = (((u64)high) << 32) | ((u64)low);
+}
+
+int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config config;
+
+ if (!ifr)
+ return -EINVAL;
+
+ if (cmd == SIOCSHWTSTAMP) {
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ memcpy(&adapter->hwtstamp_config, &config,
+ sizeof(adapter->hwtstamp_config));
+ }
+
+ if (copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
+ sizeof(adapter->hwtstamp_config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int tsnep_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ bool negative = false;
+ u64 rate_offset;
+
+ if (scaled_ppm < 0) {
+ scaled_ppm = -scaled_ppm;
+ negative = true;
+ }
+
+ /* convert from 16 bit to 32 bit binary fractional, divide by 1000000 to
+ * eliminate ppm, multiply with 8 to compensate 8ns clock cycle time,
+ * simplify calculation because 15625 * 8 = 1000000 / 8
+ */
+ rate_offset = scaled_ppm;
+ rate_offset <<= 16 - 3;
+ rate_offset = div_u64(rate_offset, 15625);
+
+ rate_offset &= ECM_CLOCK_RATE_OFFSET_MASK;
+ if (negative)
+ rate_offset |= ECM_CLOCK_RATE_OFFSET_SIGN;
+ iowrite32(rate_offset & 0xFFFFFFFF, adapter->addr + ECM_CLOCK_RATE);
+
+ return 0;
+}
+
+static int tsnep_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ u64 system_time;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->ptp_lock, flags);
+
+ tsnep_get_system_time(adapter, &system_time);
+
+ system_time += delta;
+
+ /* high dword is buffered in hardware and synchronously written to
+ * system time when low dword is written
+ */
+ iowrite32(system_time >> 32, adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ iowrite32(system_time & 0xFFFFFFFF,
+ adapter->addr + ECM_SYSTEM_TIME_LOW);
+
+ spin_unlock_irqrestore(&adapter->ptp_lock, flags);
+
+ return 0;
+}
+
+static int tsnep_ptp_gettimex64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ u32 high_before;
+ u32 low;
+ u32 high;
+ u64 system_time;
+
+ /* read high dword twice to detect overrun */
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ do {
+ ptp_read_system_prets(sts);
+ low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ ptp_read_system_postts(sts);
+ high_before = high;
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ } while (high != high_before);
+ system_time = (((u64)high) << 32) | ((u64)low);
+
+ *ts = ns_to_timespec64(system_time);
+
+ return 0;
+}
+
+static int tsnep_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ u64 system_time = timespec64_to_ns(ts);
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->ptp_lock, flags);
+
+ /* high dword is buffered in hardware and synchronously written to
+ * system time when low dword is written
+ */
+ iowrite32(system_time >> 32, adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ iowrite32(system_time & 0xFFFFFFFF,
+ adapter->addr + ECM_SYSTEM_TIME_LOW);
+
+ spin_unlock_irqrestore(&adapter->ptp_lock, flags);
+
+ return 0;
+}
+
+int tsnep_ptp_init(struct tsnep_adapter *adapter)
+{
+ int retval = 0;
+
+ adapter->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ adapter->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+ snprintf(adapter->ptp_clock_info.name, 16, "%s", TSNEP);
+ adapter->ptp_clock_info.owner = THIS_MODULE;
+ /* at most 2^-1ns adjustment every clock cycle for 8ns clock cycle time,
+ * stay slightly below because only bits below 2^-1ns are supported
+ */
+ adapter->ptp_clock_info.max_adj = (500000000 / 8 - 1);
+ adapter->ptp_clock_info.adjfine = tsnep_ptp_adjfine;
+ adapter->ptp_clock_info.adjtime = tsnep_ptp_adjtime;
+ adapter->ptp_clock_info.gettimex64 = tsnep_ptp_gettimex64;
+ adapter->ptp_clock_info.settime64 = tsnep_ptp_settime64;
+
+ spin_lock_init(&adapter->ptp_lock);
+
+ adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info,
+ &adapter->pdev->dev);
+ if (IS_ERR(adapter->ptp_clock)) {
+ netdev_err(adapter->netdev, "ptp_clock_register failed\n");
+
+ retval = PTR_ERR(adapter->ptp_clock);
+ adapter->ptp_clock = NULL;
+ } else if (adapter->ptp_clock) {
+ netdev_info(adapter->netdev, "PHC added\n");
+ }
+
+ return retval;
+}
+
+void tsnep_ptp_cleanup(struct tsnep_adapter *adapter)
+{
+ if (adapter->ptp_clock) {
+ ptp_clock_unregister(adapter->ptp_clock);
+ netdev_info(adapter->netdev, "PHC removed\n");
+ }
+}
diff --git a/drivers/net/ethernet/engleder/tsnep_selftests.c b/drivers/net/ethernet/engleder/tsnep_selftests.c
new file mode 100644
index 000000000000..1581d6b22232
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_selftests.c
@@ -0,0 +1,811 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+#include <net/pkt_sched.h>
+
+enum tsnep_test {
+ TSNEP_TEST_ENABLE = 0,
+ TSNEP_TEST_TAPRIO,
+ TSNEP_TEST_TAPRIO_CHANGE,
+ TSNEP_TEST_TAPRIO_EXTENSION,
+};
+
+static const char tsnep_test_strings[][ETH_GSTRING_LEN] = {
+ "Enable timeout (offline)",
+ "TAPRIO (offline)",
+ "TAPRIO change (offline)",
+ "TAPRIO extension (offline)",
+};
+
+#define TSNEP_TEST_COUNT (sizeof(tsnep_test_strings) / ETH_GSTRING_LEN)
+
+static bool enable_gc_timeout(struct tsnep_adapter *adapter)
+{
+ iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
+ if (!(ioread32(adapter->addr + TSNEP_GC) & TSNEP_GC_TIMEOUT_ACTIVE))
+ return false;
+
+ return true;
+}
+
+static bool gc_timeout_signaled(struct tsnep_adapter *adapter)
+{
+ if (ioread32(adapter->addr + TSNEP_GC) & TSNEP_GC_TIMEOUT_SIGNAL)
+ return true;
+
+ return false;
+}
+
+static bool ack_gc_timeout(struct tsnep_adapter *adapter)
+{
+ iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
+ if (ioread32(adapter->addr + TSNEP_GC) &
+ (TSNEP_GC_TIMEOUT_ACTIVE | TSNEP_GC_TIMEOUT_SIGNAL))
+ return false;
+ return true;
+}
+
+static bool enable_gc(struct tsnep_adapter *adapter, bool a)
+{
+ u8 enable;
+ u8 active;
+
+ if (a) {
+ enable = TSNEP_GC_ENABLE_A;
+ active = TSNEP_GC_ACTIVE_A;
+ } else {
+ enable = TSNEP_GC_ENABLE_B;
+ active = TSNEP_GC_ACTIVE_B;
+ }
+
+ iowrite8(enable, adapter->addr + TSNEP_GC);
+ if (!(ioread32(adapter->addr + TSNEP_GC) & active))
+ return false;
+
+ return true;
+}
+
+static bool disable_gc(struct tsnep_adapter *adapter)
+{
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ if (ioread32(adapter->addr + TSNEP_GC) &
+ (TSNEP_GC_ACTIVE_A | TSNEP_GC_ACTIVE_B))
+ return false;
+
+ return true;
+}
+
+static bool gc_delayed_enable(struct tsnep_adapter *adapter, bool a, int delay)
+{
+ u64 before, after;
+ u32 time;
+ bool enabled;
+
+ if (!disable_gc(adapter))
+ return false;
+
+ before = ktime_get_ns();
+
+ if (!enable_gc_timeout(adapter))
+ return false;
+
+ /* for start time after timeout, the timeout can guarantee, that enable
+ * is blocked if too late
+ */
+ time = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ time += TSNEP_GC_TIMEOUT;
+ iowrite32(time, adapter->addr + TSNEP_GC_TIME);
+
+ ndelay(delay);
+
+ enabled = enable_gc(adapter, a);
+ after = ktime_get_ns();
+
+ if (delay > TSNEP_GC_TIMEOUT) {
+ /* timeout must have blocked enable */
+ if (enabled)
+ return false;
+ } else if ((after - before) < TSNEP_GC_TIMEOUT * 14 / 16) {
+ /* timeout must not have blocked enable */
+ if (!enabled)
+ return false;
+ }
+
+ if (enabled) {
+ if (gc_timeout_signaled(adapter))
+ return false;
+ } else {
+ if (!gc_timeout_signaled(adapter))
+ return false;
+ if (!ack_gc_timeout(adapter))
+ return false;
+ }
+
+ if (!disable_gc(adapter))
+ return false;
+
+ return true;
+}
+
+static bool tsnep_test_gc_enable(struct tsnep_adapter *adapter)
+{
+ int i;
+
+ iowrite32(0x80000001, adapter->addr + TSNEP_GCL_A + 0);
+ iowrite32(100000, adapter->addr + TSNEP_GCL_A + 4);
+
+ for (i = 0; i < 200000; i += 100) {
+ if (!gc_delayed_enable(adapter, true, i))
+ return false;
+ }
+
+ iowrite32(0x80000001, adapter->addr + TSNEP_GCL_B + 0);
+ iowrite32(100000, adapter->addr + TSNEP_GCL_B + 4);
+
+ for (i = 0; i < 200000; i += 100) {
+ if (!gc_delayed_enable(adapter, false, i))
+ return false;
+ }
+
+ return true;
+}
+
+static void delay_base_time(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt, s64 ms)
+{
+ u64 system_time;
+ u64 base_time = ktime_to_ns(qopt->base_time);
+ u64 n;
+
+ tsnep_get_system_time(adapter, &system_time);
+ system_time += ms * 1000000;
+ n = div64_u64(system_time - base_time, qopt->cycle_time);
+
+ qopt->base_time = ktime_add_ns(qopt->base_time,
+ (n + 1) * qopt->cycle_time);
+}
+
+static void get_gate_state(struct tsnep_adapter *adapter, u32 *gc, u32 *gc_time,
+ u64 *system_time)
+{
+ u32 time_high_before;
+ u32 time_low;
+ u32 time_high;
+ u32 gc_time_before;
+
+ time_high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ *gc_time = ioread32(adapter->addr + TSNEP_GC_TIME);
+ do {
+ time_low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ *gc = ioread32(adapter->addr + TSNEP_GC);
+
+ gc_time_before = *gc_time;
+ *gc_time = ioread32(adapter->addr + TSNEP_GC_TIME);
+ time_high_before = time_high;
+ time_high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ } while ((time_high != time_high_before) ||
+ (*gc_time != gc_time_before));
+
+ *system_time = (((u64)time_high) << 32) | ((u64)time_low);
+}
+
+static int get_operation(struct tsnep_gcl *gcl, u64 system_time, u64 *next)
+{
+ u64 n = div64_u64(system_time - gcl->base_time, gcl->cycle_time);
+ u64 cycle_start = gcl->base_time + gcl->cycle_time * n;
+ int i;
+
+ *next = cycle_start;
+ for (i = 0; i < gcl->count; i++) {
+ *next += gcl->operation[i].interval;
+ if (*next > system_time)
+ break;
+ }
+
+ return i;
+}
+
+static bool check_gate(struct tsnep_adapter *adapter)
+{
+ u32 gc_time;
+ u32 gc;
+ u64 system_time;
+ struct tsnep_gcl *curr;
+ struct tsnep_gcl *prev;
+ u64 next_time;
+ u8 gate_open;
+ u8 next_gate_open;
+
+ get_gate_state(adapter, &gc, &gc_time, &system_time);
+
+ if (gc & TSNEP_GC_ACTIVE_A) {
+ curr = &adapter->gcl[0];
+ prev = &adapter->gcl[1];
+ } else if (gc & TSNEP_GC_ACTIVE_B) {
+ curr = &adapter->gcl[1];
+ prev = &adapter->gcl[0];
+ } else {
+ return false;
+ }
+ if (curr->start_time <= system_time) {
+ /* GCL is already active */
+ int index;
+
+ index = get_operation(curr, system_time, &next_time);
+ gate_open = curr->operation[index].properties & TSNEP_GCL_MASK;
+ if (index == curr->count - 1)
+ index = 0;
+ else
+ index++;
+ next_gate_open =
+ curr->operation[index].properties & TSNEP_GCL_MASK;
+ } else if (curr->change) {
+ /* operation of previous GCL is active */
+ int index;
+ u64 start_before;
+ u64 n;
+
+ index = get_operation(prev, system_time, &next_time);
+ next_time = curr->start_time;
+ start_before = prev->base_time;
+ n = div64_u64(curr->start_time - start_before,
+ prev->cycle_time);
+ start_before += n * prev->cycle_time;
+ if (curr->start_time == start_before)
+ start_before -= prev->cycle_time;
+ if (((start_before + prev->cycle_time_extension) >=
+ curr->start_time) &&
+ (curr->start_time - prev->cycle_time_extension <=
+ system_time)) {
+ /* extend */
+ index = prev->count - 1;
+ }
+ gate_open = prev->operation[index].properties & TSNEP_GCL_MASK;
+ next_gate_open =
+ curr->operation[0].properties & TSNEP_GCL_MASK;
+ } else {
+ /* GCL is waiting for start */
+ next_time = curr->start_time;
+ gate_open = 0xFF;
+ next_gate_open = curr->operation[0].properties & TSNEP_GCL_MASK;
+ }
+
+ if (gc_time != (next_time & 0xFFFFFFFF)) {
+ dev_err(&adapter->pdev->dev, "gate control time 0x%x!=0x%llx\n",
+ gc_time, next_time);
+ return false;
+ }
+ if (((gc & TSNEP_GC_OPEN) >> TSNEP_GC_OPEN_SHIFT) != gate_open) {
+ dev_err(&adapter->pdev->dev,
+ "gate control open 0x%02x!=0x%02x\n",
+ ((gc & TSNEP_GC_OPEN) >> TSNEP_GC_OPEN_SHIFT),
+ gate_open);
+ return false;
+ }
+ if (((gc & TSNEP_GC_NEXT_OPEN) >> TSNEP_GC_NEXT_OPEN_SHIFT) !=
+ next_gate_open) {
+ dev_err(&adapter->pdev->dev,
+ "gate control next open 0x%02x!=0x%02x\n",
+ ((gc & TSNEP_GC_NEXT_OPEN) >> TSNEP_GC_NEXT_OPEN_SHIFT),
+ next_gate_open);
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_gate_duration(struct tsnep_adapter *adapter, s64 ms)
+{
+ ktime_t start = ktime_get();
+
+ do {
+ if (!check_gate(adapter))
+ return false;
+ } while (ktime_ms_delta(ktime_get(), start) < ms);
+
+ return true;
+}
+
+static bool enable_check_taprio(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt, s64 ms)
+{
+ int retval;
+
+ retval = tsnep_tc_setup(adapter->netdev, TC_SETUP_QDISC_TAPRIO, qopt);
+ if (retval)
+ return false;
+
+ if (!check_gate_duration(adapter, ms))
+ return false;
+
+ return true;
+}
+
+static bool disable_taprio(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload qopt;
+ int retval;
+
+ memset(&qopt, 0, sizeof(qopt));
+ qopt.enable = 0;
+ retval = tsnep_tc_setup(adapter->netdev, TC_SETUP_QDISC_TAPRIO, &qopt);
+ if (retval)
+ return false;
+
+ return true;
+}
+
+static bool run_taprio(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt, s64 ms)
+{
+ if (!enable_check_taprio(adapter, qopt, ms))
+ return false;
+
+ if (!disable_taprio(adapter))
+ return false;
+
+ return true;
+}
+
+static bool tsnep_test_taprio(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload *qopt;
+ int i;
+
+ qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL);
+ if (!qopt)
+ return false;
+ for (i = 0; i < 255; i++)
+ qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES;
+
+ qopt->enable = 1;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1500000;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x02;
+ qopt->entries[0].interval = 200000;
+ qopt->entries[1].gate_mask = 0x03;
+ qopt->entries[1].interval = 800000;
+ qopt->entries[2].gate_mask = 0x07;
+ qopt->entries[2].interval = 240000;
+ qopt->entries[3].gate_mask = 0x01;
+ qopt->entries[3].interval = 80000;
+ qopt->entries[4].gate_mask = 0x04;
+ qopt->entries[4].interval = 70000;
+ qopt->entries[5].gate_mask = 0x06;
+ qopt->entries[5].interval = 60000;
+ qopt->entries[6].gate_mask = 0x0F;
+ qopt->entries[6].interval = 50000;
+ qopt->num_entries = 7;
+ if (!run_taprio(adapter, qopt, 100))
+ goto failed;
+
+ qopt->enable = 1;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 411854;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x17;
+ qopt->entries[0].interval = 23842;
+ qopt->entries[1].gate_mask = 0x16;
+ qopt->entries[1].interval = 13482;
+ qopt->entries[2].gate_mask = 0x15;
+ qopt->entries[2].interval = 49428;
+ qopt->entries[3].gate_mask = 0x14;
+ qopt->entries[3].interval = 38189;
+ qopt->entries[4].gate_mask = 0x13;
+ qopt->entries[4].interval = 92321;
+ qopt->entries[5].gate_mask = 0x12;
+ qopt->entries[5].interval = 71239;
+ qopt->entries[6].gate_mask = 0x11;
+ qopt->entries[6].interval = 69932;
+ qopt->entries[7].gate_mask = 0x10;
+ qopt->entries[7].interval = 53421;
+ qopt->num_entries = 8;
+ if (!run_taprio(adapter, qopt, 100))
+ goto failed;
+
+ qopt->enable = 1;
+ qopt->base_time = ktime_set(0, 0);
+ delay_base_time(adapter, qopt, 12);
+ qopt->cycle_time = 125000;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x27;
+ qopt->entries[0].interval = 15000;
+ qopt->entries[1].gate_mask = 0x26;
+ qopt->entries[1].interval = 15000;
+ qopt->entries[2].gate_mask = 0x25;
+ qopt->entries[2].interval = 12500;
+ qopt->entries[3].gate_mask = 0x24;
+ qopt->entries[3].interval = 17500;
+ qopt->entries[4].gate_mask = 0x23;
+ qopt->entries[4].interval = 10000;
+ qopt->entries[5].gate_mask = 0x22;
+ qopt->entries[5].interval = 11000;
+ qopt->entries[6].gate_mask = 0x21;
+ qopt->entries[6].interval = 9000;
+ qopt->entries[7].gate_mask = 0x20;
+ qopt->entries[7].interval = 10000;
+ qopt->entries[8].gate_mask = 0x20;
+ qopt->entries[8].interval = 12500;
+ qopt->entries[9].gate_mask = 0x20;
+ qopt->entries[9].interval = 12500;
+ qopt->num_entries = 10;
+ if (!run_taprio(adapter, qopt, 100))
+ goto failed;
+
+ kfree(qopt);
+
+ return true;
+
+failed:
+ disable_taprio(adapter);
+ kfree(qopt);
+
+ return false;
+}
+
+static bool tsnep_test_taprio_change(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload *qopt;
+ int i;
+
+ qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL);
+ if (!qopt)
+ return false;
+ for (i = 0; i < 255; i++)
+ qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES;
+
+ qopt->enable = 1;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 100000;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x30;
+ qopt->entries[0].interval = 20000;
+ qopt->entries[1].gate_mask = 0x31;
+ qopt->entries[1].interval = 80000;
+ qopt->num_entries = 2;
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to identical */
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ delay_base_time(adapter, qopt, 17);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to same cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->entries[0].gate_mask = 0x42;
+ qopt->entries[1].gate_mask = 0x43;
+ delay_base_time(adapter, qopt, 2);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->entries[0].gate_mask = 0x54;
+ qopt->entries[0].interval = 33333;
+ qopt->entries[1].gate_mask = 0x55;
+ qopt->entries[1].interval = 66667;
+ delay_base_time(adapter, qopt, 23);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->entries[0].gate_mask = 0x66;
+ qopt->entries[0].interval = 50000;
+ qopt->entries[1].gate_mask = 0x67;
+ qopt->entries[1].interval = 25000;
+ qopt->entries[2].gate_mask = 0x68;
+ qopt->entries[2].interval = 25000;
+ qopt->num_entries = 3;
+ delay_base_time(adapter, qopt, 11);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to multiple of cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 200000;
+ qopt->entries[0].gate_mask = 0x79;
+ qopt->entries[0].interval = 50000;
+ qopt->entries[1].gate_mask = 0x7A;
+ qopt->entries[1].interval = 150000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 11);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1000000;
+ qopt->entries[0].gate_mask = 0x7B;
+ qopt->entries[0].interval = 125000;
+ qopt->entries[1].gate_mask = 0x7C;
+ qopt->entries[1].interval = 250000;
+ qopt->entries[2].gate_mask = 0x7D;
+ qopt->entries[2].interval = 375000;
+ qopt->entries[3].gate_mask = 0x7E;
+ qopt->entries[3].interval = 250000;
+ qopt->num_entries = 4;
+ delay_base_time(adapter, qopt, 3);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to shorter cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 333333;
+ qopt->entries[0].gate_mask = 0x8F;
+ qopt->entries[0].interval = 166666;
+ qopt->entries[1].gate_mask = 0x80;
+ qopt->entries[1].interval = 166667;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 11);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 62500;
+ qopt->entries[0].gate_mask = 0x81;
+ qopt->entries[0].interval = 31250;
+ qopt->entries[1].gate_mask = 0x82;
+ qopt->entries[1].interval = 15625;
+ qopt->entries[2].gate_mask = 0x83;
+ qopt->entries[2].interval = 15625;
+ qopt->num_entries = 3;
+ delay_base_time(adapter, qopt, 1);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to longer cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 400000;
+ qopt->entries[0].gate_mask = 0x84;
+ qopt->entries[0].interval = 100000;
+ qopt->entries[1].gate_mask = 0x85;
+ qopt->entries[1].interval = 100000;
+ qopt->entries[2].gate_mask = 0x86;
+ qopt->entries[2].interval = 100000;
+ qopt->entries[3].gate_mask = 0x87;
+ qopt->entries[3].interval = 100000;
+ qopt->num_entries = 4;
+ delay_base_time(adapter, qopt, 7);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1700000;
+ qopt->entries[0].gate_mask = 0x88;
+ qopt->entries[0].interval = 200000;
+ qopt->entries[1].gate_mask = 0x89;
+ qopt->entries[1].interval = 300000;
+ qopt->entries[2].gate_mask = 0x8A;
+ qopt->entries[2].interval = 600000;
+ qopt->entries[3].gate_mask = 0x8B;
+ qopt->entries[3].interval = 100000;
+ qopt->entries[4].gate_mask = 0x8C;
+ qopt->entries[4].interval = 500000;
+ qopt->num_entries = 5;
+ delay_base_time(adapter, qopt, 6);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ if (!disable_taprio(adapter))
+ goto failed;
+
+ kfree(qopt);
+
+ return true;
+
+failed:
+ disable_taprio(adapter);
+ kfree(qopt);
+
+ return false;
+}
+
+static bool tsnep_test_taprio_extension(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload *qopt;
+ int i;
+
+ qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL);
+ if (!qopt)
+ return false;
+ for (i = 0; i < 255; i++)
+ qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES;
+
+ qopt->enable = 1;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 100000;
+ qopt->cycle_time_extension = 50000;
+ qopt->entries[0].gate_mask = 0x90;
+ qopt->entries[0].interval = 20000;
+ qopt->entries[1].gate_mask = 0x91;
+ qopt->entries[1].interval = 80000;
+ qopt->num_entries = 2;
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different phase */
+ qopt->base_time = ktime_set(0, 50000);
+ qopt->entries[0].gate_mask = 0x92;
+ qopt->entries[0].interval = 33000;
+ qopt->entries[1].gate_mask = 0x93;
+ qopt->entries[1].interval = 67000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 2);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different phase and longer cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1000000;
+ qopt->cycle_time_extension = 700000;
+ qopt->entries[0].gate_mask = 0x94;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0x95;
+ qopt->entries[1].interval = 600000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 7);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 700000);
+ qopt->cycle_time = 2000000;
+ qopt->cycle_time_extension = 1900000;
+ qopt->entries[0].gate_mask = 0x96;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0x97;
+ qopt->entries[1].interval = 1600000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 9);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different phase and shorter cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1500000;
+ qopt->cycle_time_extension = 700000;
+ qopt->entries[0].gate_mask = 0x98;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0x99;
+ qopt->entries[1].interval = 600000;
+ qopt->entries[2].gate_mask = 0x9A;
+ qopt->entries[2].interval = 500000;
+ qopt->num_entries = 3;
+ delay_base_time(adapter, qopt, 3);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 100000);
+ qopt->cycle_time = 500000;
+ qopt->cycle_time_extension = 300000;
+ qopt->entries[0].gate_mask = 0x9B;
+ qopt->entries[0].interval = 150000;
+ qopt->entries[1].gate_mask = 0x9C;
+ qopt->entries[1].interval = 350000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 9);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1000000;
+ qopt->cycle_time_extension = 700000;
+ qopt->entries[0].gate_mask = 0xAD;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0xAE;
+ qopt->entries[1].interval = 300000;
+ qopt->entries[2].gate_mask = 0xAF;
+ qopt->entries[2].interval = 300000;
+ qopt->num_entries = 3;
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 400000;
+ qopt->cycle_time_extension = 100000;
+ qopt->entries[0].gate_mask = 0xA0;
+ qopt->entries[0].interval = 200000;
+ qopt->entries[1].gate_mask = 0xA1;
+ qopt->entries[1].interval = 200000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 19);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 500000;
+ qopt->cycle_time_extension = 499999;
+ qopt->entries[0].gate_mask = 0xB2;
+ qopt->entries[0].interval = 100000;
+ qopt->entries[1].gate_mask = 0xB3;
+ qopt->entries[1].interval = 100000;
+ qopt->entries[2].gate_mask = 0xB4;
+ qopt->entries[2].interval = 100000;
+ qopt->entries[3].gate_mask = 0xB5;
+ qopt->entries[3].interval = 200000;
+ qopt->num_entries = 4;
+ delay_base_time(adapter, qopt, 19);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 6000000;
+ qopt->cycle_time_extension = 5999999;
+ qopt->entries[0].gate_mask = 0xC6;
+ qopt->entries[0].interval = 1000000;
+ qopt->entries[1].gate_mask = 0xC7;
+ qopt->entries[1].interval = 1000000;
+ qopt->entries[2].gate_mask = 0xC8;
+ qopt->entries[2].interval = 1000000;
+ qopt->entries[3].gate_mask = 0xC9;
+ qopt->entries[3].interval = 1500000;
+ qopt->entries[4].gate_mask = 0xCA;
+ qopt->entries[4].interval = 1500000;
+ qopt->num_entries = 5;
+ delay_base_time(adapter, qopt, 1);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ if (!disable_taprio(adapter))
+ goto failed;
+
+ kfree(qopt);
+
+ return true;
+
+failed:
+ disable_taprio(adapter);
+ kfree(qopt);
+
+ return false;
+}
+
+int tsnep_ethtool_get_test_count(void)
+{
+ return TSNEP_TEST_COUNT;
+}
+
+void tsnep_ethtool_get_test_strings(u8 *data)
+{
+ memcpy(data, tsnep_test_strings, sizeof(tsnep_test_strings));
+}
+
+void tsnep_ethtool_self_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ eth_test->len = TSNEP_TEST_COUNT;
+
+ if (eth_test->flags != ETH_TEST_FL_OFFLINE) {
+ /* no tests are done online */
+ data[TSNEP_TEST_ENABLE] = 0;
+ data[TSNEP_TEST_TAPRIO] = 0;
+ data[TSNEP_TEST_TAPRIO_CHANGE] = 0;
+ data[TSNEP_TEST_TAPRIO_EXTENSION] = 0;
+
+ return;
+ }
+
+ if (tsnep_test_gc_enable(adapter)) {
+ data[TSNEP_TEST_ENABLE] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_ENABLE] = 1;
+ }
+
+ if (tsnep_test_taprio(adapter)) {
+ data[TSNEP_TEST_TAPRIO] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_TAPRIO] = 1;
+ }
+
+ if (tsnep_test_taprio_change(adapter)) {
+ data[TSNEP_TEST_TAPRIO_CHANGE] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_TAPRIO_CHANGE] = 1;
+ }
+
+ if (tsnep_test_taprio_extension(adapter)) {
+ data[TSNEP_TEST_TAPRIO_EXTENSION] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_TAPRIO_EXTENSION] = 1;
+ }
+}
diff --git a/drivers/net/ethernet/engleder/tsnep_tc.c b/drivers/net/ethernet/engleder/tsnep_tc.c
new file mode 100644
index 000000000000..c4c6e1357317
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_tc.c
@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+#include <net/pkt_sched.h>
+
+/* save one operation at the end for additional operation at list change */
+#define TSNEP_MAX_GCL_NUM (TSNEP_GCL_COUNT - 1)
+
+static int tsnep_validate_gcl(struct tc_taprio_qopt_offload *qopt)
+{
+ int i;
+ u64 cycle_time;
+
+ if (!qopt->cycle_time)
+ return -ERANGE;
+ if (qopt->num_entries > TSNEP_MAX_GCL_NUM)
+ return -EINVAL;
+ cycle_time = 0;
+ for (i = 0; i < qopt->num_entries; i++) {
+ if (qopt->entries[i].command != TC_TAPRIO_CMD_SET_GATES)
+ return -EINVAL;
+ if (qopt->entries[i].gate_mask & ~TSNEP_GCL_MASK)
+ return -EINVAL;
+ if (qopt->entries[i].interval < TSNEP_GCL_MIN_INTERVAL)
+ return -EINVAL;
+ cycle_time += qopt->entries[i].interval;
+ }
+ if (qopt->cycle_time != cycle_time)
+ return -EINVAL;
+ if (qopt->cycle_time_extension >= qopt->cycle_time)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void tsnep_write_gcl_operation(struct tsnep_gcl *gcl, int index,
+ u32 properties, u32 interval, bool flush)
+{
+ void __iomem *addr = gcl->addr +
+ sizeof(struct tsnep_gcl_operation) * index;
+
+ gcl->operation[index].properties = properties;
+ gcl->operation[index].interval = interval;
+
+ iowrite32(properties, addr);
+ iowrite32(interval, addr + sizeof(u32));
+
+ if (flush) {
+ /* flush write with read access */
+ ioread32(addr);
+ }
+}
+
+static u64 tsnep_change_duration(struct tsnep_gcl *gcl, int index)
+{
+ u64 duration;
+ int count;
+
+ /* change needs to be triggered one or two operations before start of
+ * new gate control list
+ * - change is triggered at start of operation (minimum one operation)
+ * - operation with adjusted interval is inserted on demand to exactly
+ * meet the start of the new gate control list (optional)
+ *
+ * additionally properties are read directly after start of previous
+ * operation
+ *
+ * therefore, three operations needs to be considered for the limit
+ */
+ duration = 0;
+ count = 3;
+ while (count) {
+ duration += gcl->operation[index].interval;
+
+ index--;
+ if (index < 0)
+ index = gcl->count - 1;
+
+ count--;
+ }
+
+ return duration;
+}
+
+static void tsnep_write_gcl(struct tsnep_gcl *gcl,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ int i;
+ u32 properties;
+ u64 extend;
+ u64 cut;
+
+ gcl->base_time = ktime_to_ns(qopt->base_time);
+ gcl->cycle_time = qopt->cycle_time;
+ gcl->cycle_time_extension = qopt->cycle_time_extension;
+
+ for (i = 0; i < qopt->num_entries; i++) {
+ properties = qopt->entries[i].gate_mask;
+ if (i == (qopt->num_entries - 1))
+ properties |= TSNEP_GCL_LAST;
+
+ tsnep_write_gcl_operation(gcl, i, properties,
+ qopt->entries[i].interval, true);
+ }
+ gcl->count = qopt->num_entries;
+
+ /* calculate change limit; i.e., the time needed between enable and
+ * start of new gate control list
+ */
+
+ /* case 1: extend cycle time for change
+ * - change duration of last operation
+ * - cycle time extension
+ */
+ extend = tsnep_change_duration(gcl, gcl->count - 1);
+ extend += gcl->cycle_time_extension;
+
+ /* case 2: cut cycle time for change
+ * - maximum change duration
+ */
+ cut = 0;
+ for (i = 0; i < gcl->count; i++)
+ cut = max(cut, tsnep_change_duration(gcl, i));
+
+ /* use maximum, because the actual case (extend or cut) can be
+ * determined only after limit is known (chicken-and-egg problem)
+ */
+ gcl->change_limit = max(extend, cut);
+}
+
+static u64 tsnep_gcl_start_after(struct tsnep_gcl *gcl, u64 limit)
+{
+ u64 start = gcl->base_time;
+ u64 n;
+
+ if (start <= limit) {
+ n = div64_u64(limit - start, gcl->cycle_time);
+ start += (n + 1) * gcl->cycle_time;
+ }
+
+ return start;
+}
+
+static u64 tsnep_gcl_start_before(struct tsnep_gcl *gcl, u64 limit)
+{
+ u64 start = gcl->base_time;
+ u64 n;
+
+ n = div64_u64(limit - start, gcl->cycle_time);
+ start += n * gcl->cycle_time;
+ if (start == limit)
+ start -= gcl->cycle_time;
+
+ return start;
+}
+
+static u64 tsnep_set_gcl_change(struct tsnep_gcl *gcl, int index, u64 change,
+ bool insert)
+{
+ /* previous operation triggers change and properties are evaluated at
+ * start of operation
+ */
+ if (index == 0)
+ index = gcl->count - 1;
+ else
+ index = index - 1;
+ change -= gcl->operation[index].interval;
+
+ /* optionally change to new list with additional operation in between */
+ if (insert) {
+ void __iomem *addr = gcl->addr +
+ sizeof(struct tsnep_gcl_operation) * index;
+
+ gcl->operation[index].properties |= TSNEP_GCL_INSERT;
+ iowrite32(gcl->operation[index].properties, addr);
+ }
+
+ return change;
+}
+
+static void tsnep_clean_gcl(struct tsnep_gcl *gcl)
+{
+ int i;
+ u32 mask = TSNEP_GCL_LAST | TSNEP_GCL_MASK;
+ void __iomem *addr;
+
+ /* search for insert operation and reset properties */
+ for (i = 0; i < gcl->count; i++) {
+ if (gcl->operation[i].properties & ~mask) {
+ addr = gcl->addr +
+ sizeof(struct tsnep_gcl_operation) * i;
+
+ gcl->operation[i].properties &= mask;
+ iowrite32(gcl->operation[i].properties, addr);
+
+ break;
+ }
+ }
+}
+
+static u64 tsnep_insert_gcl_operation(struct tsnep_gcl *gcl, int ref,
+ u64 change, u32 interval)
+{
+ u32 properties;
+
+ properties = gcl->operation[ref].properties & TSNEP_GCL_MASK;
+ /* change to new list directly after inserted operation */
+ properties |= TSNEP_GCL_CHANGE;
+
+ /* last operation of list is reserved to insert operation */
+ tsnep_write_gcl_operation(gcl, TSNEP_GCL_COUNT - 1, properties,
+ interval, false);
+
+ return tsnep_set_gcl_change(gcl, ref, change, true);
+}
+
+static u64 tsnep_extend_gcl(struct tsnep_gcl *gcl, u64 start, u32 extension)
+{
+ int ref = gcl->count - 1;
+ u32 interval = gcl->operation[ref].interval + extension;
+
+ start -= gcl->operation[ref].interval;
+
+ return tsnep_insert_gcl_operation(gcl, ref, start, interval);
+}
+
+static u64 tsnep_cut_gcl(struct tsnep_gcl *gcl, u64 start, u64 cycle_time)
+{
+ u64 sum = 0;
+ int i;
+
+ /* find operation which shall be cutted */
+ for (i = 0; i < gcl->count; i++) {
+ u64 sum_tmp = sum + gcl->operation[i].interval;
+ u64 interval;
+
+ /* sum up operations as long as cycle time is not exceeded */
+ if (sum_tmp > cycle_time)
+ break;
+
+ /* remaining interval must be big enough for hardware */
+ interval = cycle_time - sum_tmp;
+ if (interval > 0 && interval < TSNEP_GCL_MIN_INTERVAL)
+ break;
+
+ sum = sum_tmp;
+ }
+ if (sum == cycle_time) {
+ /* no need to cut operation itself or whole cycle
+ * => change exactly at operation
+ */
+ return tsnep_set_gcl_change(gcl, i, start + sum, false);
+ }
+ return tsnep_insert_gcl_operation(gcl, i, start + sum,
+ cycle_time - sum);
+}
+
+static int tsnep_enable_gcl(struct tsnep_adapter *adapter,
+ struct tsnep_gcl *gcl, struct tsnep_gcl *curr)
+{
+ u64 system_time;
+ u64 timeout;
+ u64 limit;
+
+ /* estimate timeout limit after timeout enable, actually timeout limit
+ * in hardware will be earlier than estimate so we are on the safe side
+ */
+ tsnep_get_system_time(adapter, &system_time);
+ timeout = system_time + TSNEP_GC_TIMEOUT;
+
+ if (curr)
+ limit = timeout + curr->change_limit;
+ else
+ limit = timeout;
+
+ gcl->start_time = tsnep_gcl_start_after(gcl, limit);
+
+ /* gate control time register is only 32bit => time shall be in the near
+ * future (no driver support for far future implemented)
+ */
+ if ((gcl->start_time - system_time) >= U32_MAX)
+ return -EAGAIN;
+
+ if (curr) {
+ /* change gate control list */
+ u64 last;
+ u64 change;
+
+ last = tsnep_gcl_start_before(curr, gcl->start_time);
+ if ((last + curr->cycle_time) == gcl->start_time)
+ change = tsnep_cut_gcl(curr, last,
+ gcl->start_time - last);
+ else if (((gcl->start_time - last) <=
+ curr->cycle_time_extension) ||
+ ((gcl->start_time - last) <= TSNEP_GCL_MIN_INTERVAL))
+ change = tsnep_extend_gcl(curr, last,
+ gcl->start_time - last);
+ else
+ change = tsnep_cut_gcl(curr, last,
+ gcl->start_time - last);
+
+ WARN_ON(change <= timeout);
+ gcl->change = true;
+ iowrite32(change & 0xFFFFFFFF, adapter->addr + TSNEP_GC_CHANGE);
+ } else {
+ /* start gate control list */
+ WARN_ON(gcl->start_time <= timeout);
+ gcl->change = false;
+ iowrite32(gcl->start_time & 0xFFFFFFFF,
+ adapter->addr + TSNEP_GC_TIME);
+ }
+
+ return 0;
+}
+
+static int tsnep_taprio(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct tsnep_gcl *gcl;
+ struct tsnep_gcl *curr;
+ int retval;
+
+ if (!adapter->gate_control)
+ return -EOPNOTSUPP;
+
+ if (!qopt->enable) {
+ /* disable gate control if active */
+ mutex_lock(&adapter->gate_control_lock);
+
+ if (adapter->gate_control_active) {
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ adapter->gate_control_active = false;
+ }
+
+ mutex_unlock(&adapter->gate_control_lock);
+
+ return 0;
+ }
+
+ retval = tsnep_validate_gcl(qopt);
+ if (retval)
+ return retval;
+
+ mutex_lock(&adapter->gate_control_lock);
+
+ gcl = &adapter->gcl[adapter->next_gcl];
+ tsnep_write_gcl(gcl, qopt);
+
+ /* select current gate control list if active */
+ if (adapter->gate_control_active) {
+ if (adapter->next_gcl == 0)
+ curr = &adapter->gcl[1];
+ else
+ curr = &adapter->gcl[0];
+ } else {
+ curr = NULL;
+ }
+
+ for (;;) {
+ /* start timeout which discards late enable, this helps ensuring
+ * that start/change time are in the future at enable
+ */
+ iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
+
+ retval = tsnep_enable_gcl(adapter, gcl, curr);
+ if (retval) {
+ mutex_unlock(&adapter->gate_control_lock);
+
+ return retval;
+ }
+
+ /* enable gate control list */
+ if (adapter->next_gcl == 0)
+ iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC);
+ else
+ iowrite8(TSNEP_GC_ENABLE_B, adapter->addr + TSNEP_GC);
+
+ /* done if timeout did not happen */
+ if (!(ioread32(adapter->addr + TSNEP_GC) &
+ TSNEP_GC_TIMEOUT_SIGNAL))
+ break;
+
+ /* timeout is acknowledged with any enable */
+ iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC);
+
+ if (curr)
+ tsnep_clean_gcl(curr);
+
+ /* retry because of timeout */
+ }
+
+ adapter->gate_control_active = true;
+
+ if (adapter->next_gcl == 0)
+ adapter->next_gcl = 1;
+ else
+ adapter->next_gcl = 0;
+
+ mutex_unlock(&adapter->gate_control_lock);
+
+ return 0;
+}
+
+int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return tsnep_taprio(adapter, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int tsnep_tc_init(struct tsnep_adapter *adapter)
+{
+ if (!adapter->gate_control)
+ return 0;
+
+ /* open all gates */
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ iowrite32(TSNEP_GC_OPEN | TSNEP_GC_NEXT_OPEN, adapter->addr + TSNEP_GC);
+
+ adapter->gcl[0].addr = adapter->addr + TSNEP_GCL_A;
+ adapter->gcl[1].addr = adapter->addr + TSNEP_GCL_B;
+
+ return 0;
+}
+
+void tsnep_tc_cleanup(struct tsnep_adapter *adapter)
+{
+ if (!adapter->gate_control)
+ return;
+
+ if (adapter->gate_control_active) {
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ adapter->gate_control_active = false;
+ }
+}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index b1c8ffea6ad2..437c5acfe222 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -945,7 +945,9 @@ static void ethoc_get_regs(struct net_device *dev, struct ethtool_regs *regs,
}
static void ethoc_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ethoc *priv = netdev_priv(dev);
@@ -961,7 +963,9 @@ static void ethoc_get_ringparam(struct net_device *dev,
}
static int ethoc_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ethoc *priv = netdev_priv(dev);
@@ -1074,14 +1078,11 @@ static int ethoc_probe(struct platform_device *pdev)
/* obtain device IRQ number */
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&pdev->dev, "cannot obtain IRQ\n");
- ret = -ENXIO;
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
goto free;
- }
- netdev->irq = res->start;
+ netdev->irq = ret;
/* setup driver-private data */
priv = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 97c5d70de76e..691605c15265 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1178,8 +1178,11 @@ static void ftgmac100_get_drvinfo(struct net_device *netdev,
strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
-static void ftgmac100_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+static void
+ftgmac100_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct ftgmac100 *priv = netdev_priv(netdev);
@@ -1190,8 +1193,11 @@ static void ftgmac100_get_ringparam(struct net_device *netdev,
ering->tx_pending = priv->tx_q_entries;
}
-static int ftgmac100_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+static int
+ftgmac100_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct ftgmac100 *priv = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 6b2927d863e2..c78883c3a2c8 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2325,7 +2325,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
txq = netdev_get_tx_queue(net_dev, queue_mapping);
/* LLTX requires to do our own update of trans_start */
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
@@ -2531,7 +2531,7 @@ static int dpaa_xdp_xmit_frame(struct net_device *net_dev,
/* Bump the trans_start */
txq = netdev_get_tx_queue(net_dev, smp_processor_id());
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
err = dpaa_xmit(priv, percpu_stats, smp_processor_id(), &fd);
if (err) {
@@ -2623,7 +2623,7 @@ static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
}
break;
default:
- bpf_warn_invalid_xdp_action(xdp_act);
+ bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 8e643567abce..e985ae008a97 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -374,7 +374,7 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
break;
default:
- bpf_warn_invalid_xdp_action(xdp_act);
+ bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
@@ -4246,7 +4246,7 @@ static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
}
irq = ls_dev->irqs[0];
- err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
+ err = devm_request_threaded_irq(&ls_dev->dev, irq->virq,
NULL, dpni_irq0_handler_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
dev_name(&ls_dev->dev), &ls_dev->dev);
@@ -4273,7 +4273,7 @@ static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
return 0;
free_irq:
- devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
+ devm_free_irq(&ls_dev->dev, irq->virq, &ls_dev->dev);
free_mc_irq:
fsl_mc_free_irqs(ls_dev);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index ef8f0a055024..623d113b6581 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -2,6 +2,7 @@
/* Copyright 2019 NXP */
#include <linux/acpi.h>
+#include <linux/pcs-lynx.h>
#include <linux/property.h>
#include "dpaa2-eth.h"
@@ -40,7 +41,7 @@ static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev,
u16 dpmac_id)
{
- struct fwnode_handle *fwnode, *parent, *child = NULL;
+ struct fwnode_handle *fwnode, *parent = NULL, *child = NULL;
struct device_node *dpmacs = NULL;
int err;
u32 id;
@@ -53,8 +54,17 @@ static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev,
parent = of_fwnode_handle(dpmacs);
} else if (is_acpi_node(fwnode)) {
parent = fwnode;
+ } else {
+ /* The root dprc device didn't yet get to finalize it's probe,
+ * thus the fwnode field is not yet set. Defer probe if we are
+ * facing this situation.
+ */
+ return ERR_PTR(-EPROBE_DEFER);
}
+ if (!parent)
+ return NULL;
+
fwnode_for_each_child_node(parent, child) {
err = -EINVAL;
if (is_acpi_device_node(child))
@@ -90,88 +100,6 @@ static int dpaa2_mac_get_if_mode(struct fwnode_handle *dpmac_node,
return err;
}
-static bool dpaa2_mac_phy_mode_mismatch(struct dpaa2_mac *mac,
- phy_interface_t interface)
-{
- switch (interface) {
- /* We can switch between SGMII and 1000BASE-X at runtime with
- * pcs-lynx
- */
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- if (mac->pcs &&
- (mac->if_mode == PHY_INTERFACE_MODE_SGMII ||
- mac->if_mode == PHY_INTERFACE_MODE_1000BASEX))
- return false;
- return interface != mac->if_mode;
-
- case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_USXGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- return (interface != mac->if_mode);
- default:
- return true;
- }
-}
-
-static void dpaa2_mac_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- dpaa2_mac_phy_mode_mismatch(mac, state->interface)) {
- goto empty_set;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_NA:
- case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_USXGMII:
- phylink_set_10g_modes(mask);
- if (state->interface == PHY_INTERFACE_MODE_10GBASER)
- break;
- phylink_set(mask, 5000baseT_Full);
- phylink_set(mask, 2500baseT_Full);
- fallthrough;
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 1000baseT_Full);
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
- break;
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 10baseT_Full);
- break;
- default:
- goto empty_set;
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- return;
-
-empty_set:
- linkmode_zero(supported);
-}
-
static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -243,7 +171,7 @@ static void dpaa2_mac_link_down(struct phylink_config *config,
}
static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
- .validate = dpaa2_mac_validate,
+ .validate = phylink_generic_validate,
.mac_config = dpaa2_mac_config,
.mac_link_up = dpaa2_mac_link_up,
.mac_link_down = dpaa2_mac_link_down,
@@ -286,11 +214,13 @@ static int dpaa2_pcs_create(struct dpaa2_mac *mac,
static void dpaa2_pcs_destroy(struct dpaa2_mac *mac)
{
- struct lynx_pcs *pcs = mac->pcs;
+ struct phylink_pcs *phylink_pcs = mac->pcs;
- if (pcs) {
- struct device *dev = &pcs->mdio->dev;
- lynx_pcs_destroy(pcs);
+ if (phylink_pcs) {
+ struct mdio_device *mdio = lynx_get_mdio_device(phylink_pcs);
+ struct device *dev = &mdio->dev;
+
+ lynx_pcs_destroy(phylink_pcs);
put_device(dev);
mac->pcs = NULL;
}
@@ -336,9 +266,34 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
return err;
}
+ memset(&mac->phylink_config, 0, sizeof(mac->phylink_config));
mac->phylink_config.dev = &net_dev->dev;
mac->phylink_config.type = PHYLINK_NETDEV;
+ mac->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10FD | MAC_100FD | MAC_1000FD | MAC_2500FD | MAC_5000FD |
+ MAC_10000FD;
+
+ /* We support the current interface mode, and if we have a PCS
+ * similar interface modes that do not require the PLLs to be
+ * reconfigured.
+ */
+ __set_bit(mac->if_mode, mac->phylink_config.supported_interfaces);
+ if (mac->pcs) {
+ switch (mac->if_mode) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ mac->phylink_config.supported_interfaces);
+ break;
+
+ default:
+ break;
+ }
+ }
+
phylink = phylink_create(&mac->phylink_config,
dpmac_node, mac->if_mode,
&dpaa2_mac_phylink_ops);
@@ -349,7 +304,7 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
mac->phylink = phylink;
if (mac->pcs)
- phylink_set_pcs(mac->phylink, &mac->pcs->pcs);
+ phylink_set_pcs(mac->phylink, mac->pcs);
err = phylink_fwnode_phy_connect(mac->phylink, dpmac_node, 0);
if (err) {
@@ -381,6 +336,7 @@ int dpaa2_mac_open(struct dpaa2_mac *mac)
{
struct fsl_mc_device *dpmac_dev = mac->mc_dev;
struct net_device *net_dev = mac->net_dev;
+ struct fwnode_handle *fw_node;
int err;
err = dpmac_open(mac->mc_io, 0, dpmac_dev->obj_desc.id,
@@ -400,7 +356,13 @@ int dpaa2_mac_open(struct dpaa2_mac *mac)
/* Find the device node representing the MAC device and link the device
* behind the associated netdev to it.
*/
- mac->fw_node = dpaa2_mac_get_node(&mac->mc_dev->dev, mac->attr.id);
+ fw_node = dpaa2_mac_get_node(&mac->mc_dev->dev, mac->attr.id);
+ if (IS_ERR(fw_node)) {
+ err = PTR_ERR(fw_node);
+ goto err_close_dpmac;
+ }
+
+ mac->fw_node = fw_node;
net_dev->dev.of_node = to_of_node(mac->fw_node);
return 0;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index 7842cbb2207a..1331a8477fe4 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -7,7 +7,6 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phylink.h>
-#include <linux/pcs-lynx.h>
#include "dpmac.h"
#include "dpmac-cmd.h"
@@ -23,7 +22,7 @@ struct dpaa2_mac {
struct phylink *phylink;
phy_interface_t if_mode;
enum dpmac_link_type if_link_type;
- struct lynx_pcs *pcs;
+ struct phylink_pcs *pcs;
struct fwnode_handle *fw_node;
};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
index 32b5faa87bb8..5f5f8c53c4a0 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -129,7 +129,6 @@ static irqreturn_t dpaa2_ptp_irq_handler_thread(int irq, void *priv)
static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
{
struct device *dev = &mc_dev->dev;
- struct fsl_mc_device_irq *irq;
struct ptp_qoriq *ptp_qoriq;
struct device_node *node;
void __iomem *base;
@@ -177,8 +176,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
goto err_unmap;
}
- irq = mc_dev->irqs[0];
- ptp_qoriq->irq = irq->msi_desc->irq;
+ ptp_qoriq->irq = mc_dev->irqs[0]->virq;
err = request_threaded_irq(ptp_qoriq->irq, NULL,
dpaa2_ptp_irq_handler_thread,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index d039457928b0..9a561072aa4a 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -394,7 +394,8 @@ static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid)
for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
ppriv_local = ethsw->ports[i];
- ppriv_local->vlans[vid] = 0;
+ if (ppriv_local)
+ ppriv_local->vlans[vid] = 0;
}
return 0;
@@ -1553,8 +1554,7 @@ static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
- err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
- NULL,
+ err = devm_request_threaded_irq(dev, irq->virq, NULL,
dpaa2_switch_irq0_handler_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
dev_name(dev), dev);
@@ -1580,7 +1580,7 @@ static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
return 0;
free_devm_irq:
- devm_free_irq(dev, irq->msi_desc->irq, dev);
+ devm_free_irq(dev, irq->virq, dev);
free_irq:
fsl_mc_free_irqs(sw_dev);
return err;
@@ -1896,9 +1896,11 @@ static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid
/* Delete VLAN from switch if it is no longer configured on
* any port
*/
- for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
- if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ if (ethsw->ports[i] &&
+ ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
return 0; /* Found a port member in VID */
+ }
ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 504e12554079..d6930a797c6c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1547,7 +1547,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
switch (xdp_act) {
default:
- bpf_warn_invalid_xdp_action(xdp_act);
+ bpf_warn_invalid_xdp_action(rx_ring->ndev, prog, xdp_act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->ndev, prog, xdp_act);
@@ -2897,12 +2897,8 @@ int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
/* set up for high or low dma */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "DMA configuration failed: 0x%x\n", err);
- goto err_dma;
- }
+ dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
}
err = pci_request_mem_regions(pdev, name);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 910b9f722504..fa5b4f885b17 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -562,7 +562,9 @@ static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir,
}
static void enetc_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 0e87c7043b77..ed16a5ac9ad0 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -8,6 +8,7 @@
#include <linux/of_platform.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
+#include <linux/pcs-lynx.h>
#include "enetc_ierb.h"
#include "enetc_pf.h"
@@ -828,8 +829,8 @@ static int enetc_imdio_create(struct enetc_pf *pf)
{
struct device *dev = &pf->si->pdev->dev;
struct enetc_mdio_priv *mdio_priv;
- struct lynx_pcs *pcs_lynx;
- struct mdio_device *pcs;
+ struct phylink_pcs *phylink_pcs;
+ struct mdio_device *mdio_device;
struct mii_bus *bus;
int err;
@@ -853,23 +854,23 @@ static int enetc_imdio_create(struct enetc_pf *pf)
goto free_mdio_bus;
}
- pcs = mdio_device_create(bus, 0);
- if (IS_ERR(pcs)) {
- err = PTR_ERR(pcs);
- dev_err(dev, "cannot create pcs (%d)\n", err);
+ mdio_device = mdio_device_create(bus, 0);
+ if (IS_ERR(mdio_device)) {
+ err = PTR_ERR(mdio_device);
+ dev_err(dev, "cannot create mdio device (%d)\n", err);
goto unregister_mdiobus;
}
- pcs_lynx = lynx_pcs_create(pcs);
- if (!pcs_lynx) {
- mdio_device_free(pcs);
+ phylink_pcs = lynx_pcs_create(mdio_device);
+ if (!phylink_pcs) {
+ mdio_device_free(mdio_device);
err = -ENOMEM;
dev_err(dev, "cannot create lynx pcs (%d)\n", err);
goto unregister_mdiobus;
}
pf->imdio = bus;
- pf->pcs = pcs_lynx;
+ pf->pcs = phylink_pcs;
return 0;
@@ -882,8 +883,11 @@ free_mdio_bus:
static void enetc_imdio_remove(struct enetc_pf *pf)
{
+ struct mdio_device *mdio_device;
+
if (pf->pcs) {
- mdio_device_free(pf->pcs->mdio);
+ mdio_device = lynx_get_mdio_device(pf->pcs);
+ mdio_device_free(mdio_device);
lynx_pcs_destroy(pf->pcs);
}
if (pf->imdio) {
@@ -930,45 +934,6 @@ static void enetc_mdiobus_destroy(struct enetc_pf *pf)
enetc_imdio_remove(pf);
}
-static void enetc_pl_mac_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL &&
- state->interface != PHY_INTERFACE_MODE_SGMII &&
- state->interface != PHY_INTERFACE_MODE_2500BASEX &&
- state->interface != PHY_INTERFACE_MODE_USXGMII &&
- !phy_interface_mode_is_rgmii(state->interface)) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 1000baseT_Half);
- phylink_set(mask, 1000baseT_Full);
-
- if (state->interface == PHY_INTERFACE_MODE_INTERNAL ||
- state->interface == PHY_INTERFACE_MODE_2500BASEX ||
- state->interface == PHY_INTERFACE_MODE_USXGMII) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
static void enetc_pl_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
@@ -980,7 +945,7 @@ static void enetc_pl_mac_config(struct phylink_config *config,
priv = netdev_priv(pf->si->ndev);
if (pf->pcs)
- phylink_set_pcs(priv->phylink, &pf->pcs->pcs);
+ phylink_set_pcs(priv->phylink, pf->pcs);
}
static void enetc_force_rgmii_mac(struct enetc_hw *hw, int speed, int duplex)
@@ -1096,7 +1061,7 @@ static void enetc_pl_mac_link_down(struct phylink_config *config,
}
static const struct phylink_mac_ops enetc_mac_phylink_ops = {
- .validate = enetc_pl_mac_validate,
+ .validate = phylink_generic_validate,
.mac_config = enetc_pl_mac_config,
.mac_link_up = enetc_pl_mac_link_up,
.mac_link_down = enetc_pl_mac_link_down,
@@ -1111,6 +1076,18 @@ static int enetc_phylink_create(struct enetc_ndev_priv *priv,
pf->phylink_config.dev = &priv->ndev->dev;
pf->phylink_config.type = PHYLINK_NETDEV;
+ pf->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_USXGMII,
+ pf->phylink_config.supported_interfaces);
+ phy_interface_set_rgmii(pf->phylink_config.supported_interfaces);
phylink = phylink_create(&pf->phylink_config, of_fwnode_handle(node),
pf->if_mode, &enetc_mac_phylink_ops);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
index 263946c51e37..c26bd66e4597 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
@@ -2,7 +2,7 @@
/* Copyright 2017-2019 NXP */
#include "enetc.h"
-#include <linux/pcs-lynx.h>
+#include <linux/phylink.h>
#define ENETC_PF_NUM_RINGS 8
@@ -46,7 +46,7 @@ struct enetc_pf {
struct mii_bus *mdio; /* saved for cleanup */
struct mii_bus *imdio;
- struct lynx_pcs *pcs;
+ struct phylink_pcs *pcs;
phy_interface_t if_mode;
struct phylink_config phylink_config;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
index 36b4f51dd297..17c097cef7d4 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -42,15 +42,10 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
if (err)
return dev_err_probe(&pdev->dev, err, "device enable failed\n");
- /* set up for high or low dma */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "DMA configuration failed: 0x%x\n", err);
- goto err_dma;
- }
+ dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
}
err = pci_request_mem_regions(pdev, KBUILD_MODNAME);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 0536d2c76fbc..3555c12edb45 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -1182,7 +1182,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
}
/* parsing gate action */
- if (entryg->gate.index >= priv->psfp_cap.max_psfp_gate) {
+ if (entryg->hw_index >= priv->psfp_cap.max_psfp_gate) {
NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
err = -ENOSPC;
goto free_filter;
@@ -1202,7 +1202,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
}
refcount_set(&sgi->refcount, 1);
- sgi->index = entryg->gate.index;
+ sgi->index = entryg->hw_index;
sgi->init_ipv = entryg->gate.prio;
sgi->basetime = entryg->gate.basetime;
sgi->cycletime = entryg->gate.cycletime;
@@ -1244,7 +1244,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
refcount_set(&fmi->refcount, 1);
fmi->cir = entryp->police.rate_bytes_ps;
fmi->cbs = entryp->police.burst;
- fmi->index = entryp->police.index;
+ fmi->index = entryp->hw_index;
filter->flags |= ENETC_PSFP_FLAGS_FMI;
filter->fmi_index = fmi->index;
sfi->meter_id = fmi->index;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1b1f7f2a6130..796133de527e 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1185,6 +1185,21 @@ static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
}
}
+static void fec_irqs_disable(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ writel(0, fep->hwp + FEC_IMASK);
+}
+
+static void fec_irqs_disable_except_wakeup(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ writel(0, fep->hwp + FEC_IMASK);
+ writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
+}
+
static void
fec_stop(struct net_device *ndev)
{
@@ -1211,15 +1226,13 @@ fec_stop(struct net_device *ndev)
writel(1, fep->hwp + FEC_ECNTRL);
udelay(10);
}
- writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
} else {
- writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
val = readl(fep->hwp + FEC_ECNTRL);
val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
writel(val, fep->hwp + FEC_ECNTRL);
- fec_enet_stop_mode(fep, true);
}
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
/* We have to keep ENET enabled to have MII interrupt stay working */
if (fep->quirks & FEC_QUIRK_ENET_MAC &&
@@ -2877,15 +2890,10 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
return -EINVAL;
device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
- if (device_may_wakeup(&ndev->dev)) {
+ if (device_may_wakeup(&ndev->dev))
fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
- if (fep->wake_irq > 0)
- enable_irq_wake(fep->wake_irq);
- } else {
+ else
fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
- if (fep->wake_irq > 0)
- disable_irq_wake(fep->wake_irq);
- }
return 0;
}
@@ -3558,7 +3566,7 @@ static int fec_enet_init(struct net_device *ndev)
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
- ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
+ netif_set_gso_max_segs(ndev, FEC_MAX_TSO_SEGS);
/* enable hw accelerator */
ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
@@ -4057,9 +4065,19 @@ static int __maybe_unused fec_suspend(struct device *dev)
netif_device_detach(ndev);
netif_tx_unlock_bh(ndev);
fec_stop(ndev);
- fec_enet_clk_enable(ndev, false);
- if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
+ if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
+ fec_irqs_disable(ndev);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+ } else {
+ fec_irqs_disable_except_wakeup(ndev);
+ if (fep->wake_irq > 0) {
+ disable_irq(fep->wake_irq);
+ enable_irq_wake(fep->wake_irq);
+ }
+ fec_enet_stop_mode(fep, true);
+ }
+ /* It's safe to disable clocks since interrupts are masked */
+ fec_enet_clk_enable(ndev, false);
}
rtnl_unlock();
@@ -4097,6 +4115,10 @@ static int __maybe_unused fec_resume(struct device *dev)
}
if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
fec_enet_stop_mode(fep, false);
+ if (fep->wake_irq) {
+ disable_irq_wake(fep->wake_irq);
+ enable_irq(fep->wake_irq);
+ }
val = readl(fep->hwp + FEC_ECNTRL);
val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index bbbde9f701c2..be0bd4b44926 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -99,13 +99,13 @@ static void mpc52xx_fec_tx_timeout(struct net_device *dev, unsigned int txqueue)
netif_wake_queue(dev);
}
-static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac)
+static void mpc52xx_fec_set_paddr(struct net_device *dev, const u8 *mac)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
- out_be32(&fec->paddr1, *(u32 *)(&mac[0]));
- out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
+ out_be32(&fec->paddr1, *(const u32 *)(&mac[0]));
+ out_be32(&fec->paddr2, (*(const u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
}
static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
@@ -893,13 +893,15 @@ static int mpc52xx_fec_probe(struct platform_device *op)
rv = of_get_ethdev_address(np, ndev);
if (rv) {
struct mpc52xx_fec __iomem *fec = priv->fec;
+ u8 addr[ETH_ALEN] __aligned(4);
/*
* If the MAC addresse is not provided via DT then read
* it back from the controller regs
*/
- *(u32 *)(&ndev->dev_addr[0]) = in_be32(&fec->paddr1);
- *(u16 *)(&ndev->dev_addr[4]) = in_be32(&fec->paddr2) >> 16;
+ *(u32 *)(&addr[0]) = in_be32(&fec->paddr1);
+ *(u16 *)(&addr[4]) = in_be32(&fec->paddr2) >> 16;
+ eth_hw_addr_set(ndev, addr);
}
/*
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index d71eac7e1924..af99017a5453 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -473,10 +473,6 @@ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
fep->hwts_tx_en = 0;
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index ce0a121580f6..8f0db61cb1f6 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2727,7 +2727,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
fman = kzalloc(sizeof(*fman), GFP_KERNEL);
if (!fman)
- return NULL;
+ return ERR_PTR(-ENOMEM);
fm_node = of_node_get(of_dev->dev.of_node);
@@ -2740,26 +2740,21 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
fman->dts_params.id = (u8)val;
/* Get the FM interrupt */
- res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&of_dev->dev, "%s: Can't get FMan IRQ resource\n",
- __func__);
+ err = platform_get_irq(of_dev, 0);
+ if (err < 0)
goto fman_node_put;
- }
- irq = res->start;
+ irq = err;
/* Get the FM error interrupt */
- res = platform_get_resource(of_dev, IORESOURCE_IRQ, 1);
- if (!res) {
- dev_err(&of_dev->dev, "%s: Can't get FMan Error IRQ resource\n",
- __func__);
+ err = platform_get_irq(of_dev, 1);
+ if (err < 0)
goto fman_node_put;
- }
- fman->dts_params.err_irq = res->start;
+ fman->dts_params.err_irq = err;
/* Get the FM address */
res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
if (!res) {
+ err = -EINVAL;
dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
__func__);
goto fman_node_put;
@@ -2770,6 +2765,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
clk = of_clk_get(fm_node, 0);
if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
dev_err(&of_dev->dev, "%s: Failed to get FM%d clock structure\n",
__func__, fman->dts_params.id);
goto fman_node_put;
@@ -2777,6 +2773,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
clk_rate = clk_get_rate(clk);
if (!clk_rate) {
+ err = -EINVAL;
dev_err(&of_dev->dev, "%s: Failed to determine FM%d clock rate\n",
__func__, fman->dts_params.id);
goto fman_node_put;
@@ -2797,6 +2794,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
/* Get the MURAM base address and size */
muram_node = of_find_matching_node(fm_node, fman_muram_match);
if (!muram_node) {
+ err = -EINVAL;
dev_err(&of_dev->dev, "%s: could not find MURAM node\n",
__func__);
goto fman_free;
@@ -2836,6 +2834,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
devm_request_mem_region(&of_dev->dev, phys_base_addr,
mem_size, "fman");
if (!fman->dts_params.res) {
+ err = -EBUSY;
dev_err(&of_dev->dev, "%s: request_mem_region() failed\n",
__func__);
goto fman_free;
@@ -2844,6 +2843,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
fman->dts_params.base_addr =
devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
if (!fman->dts_params.base_addr) {
+ err = -ENOMEM;
dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__);
goto fman_free;
}
@@ -2868,7 +2868,7 @@ fman_node_put:
of_node_put(fm_node);
fman_free:
kfree(fman);
- return NULL;
+ return ERR_PTR(err);
}
static int fman_probe(struct platform_device *of_dev)
@@ -2880,8 +2880,8 @@ static int fman_probe(struct platform_device *of_dev)
dev = &of_dev->dev;
fman = read_dts_node(of_dev);
- if (!fman)
- return -EIO;
+ if (IS_ERR(fman))
+ return PTR_ERR(fman);
err = fman_config(fman);
if (err) {
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index d9fc5c456bf3..39ae965cd4f6 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -94,14 +94,17 @@ static void mac_exception(void *handle, enum fman_mac_exceptions ex)
__func__, ex);
}
-static void set_fman_mac_params(struct mac_device *mac_dev,
- struct fman_mac_params *params)
+static int set_fman_mac_params(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
{
struct mac_priv_s *priv = mac_dev->priv;
params->base_addr = (typeof(params->base_addr))
devm_ioremap(priv->dev, mac_dev->res->start,
resource_size(mac_dev->res));
+ if (!params->base_addr)
+ return -ENOMEM;
+
memcpy(&params->addr, mac_dev->addr, sizeof(mac_dev->addr));
params->max_speed = priv->max_speed;
params->phy_if = mac_dev->phy_if;
@@ -112,6 +115,8 @@ static void set_fman_mac_params(struct mac_device *mac_dev,
params->event_cb = mac_exception;
params->dev_id = mac_dev;
params->internal_phy_node = priv->internal_phy_node;
+
+ return 0;
}
static int tgec_initialization(struct mac_device *mac_dev)
@@ -123,7 +128,9 @@ static int tgec_initialization(struct mac_device *mac_dev)
priv = mac_dev->priv;
- set_fman_mac_params(mac_dev, &params);
+ err = set_fman_mac_params(mac_dev, &params);
+ if (err)
+ goto _return;
mac_dev->fman_mac = tgec_config(&params);
if (!mac_dev->fman_mac) {
@@ -169,7 +176,9 @@ static int dtsec_initialization(struct mac_device *mac_dev)
priv = mac_dev->priv;
- set_fman_mac_params(mac_dev, &params);
+ err = set_fman_mac_params(mac_dev, &params);
+ if (err)
+ goto _return;
mac_dev->fman_mac = dtsec_config(&params);
if (!mac_dev->fman_mac) {
@@ -218,7 +227,9 @@ static int memac_initialization(struct mac_device *mac_dev)
priv = mac_dev->priv;
- set_fman_mac_params(mac_dev, &params);
+ err = set_fman_mac_params(mac_dev, &params);
+ if (err)
+ goto _return;
if (priv->max_speed == SPEED_10000)
params.phy_if = PHY_INTERFACE_MODE_XGMII;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index acab58fd3db3..206b7a35eaf5 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2076,10 +2076,6 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
priv->hwts_tx_en = 0;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 7b32ed29bf4c..ff756265d58f 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -372,7 +372,9 @@ static int gfar_scoalesce(struct net_device *dev,
* rx, rx_mini, and rx_jumbo rings are the same size, as mini and
* jumbo are ignored by the driver */
static void gfar_gringparam(struct net_device *dev,
- struct ethtool_ringparam *rvals)
+ struct ethtool_ringparam *rvals,
+ struct kernel_ethtool_ringparam *kernel_rvals,
+ struct netlink_ext_ack *extack)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_tx_q *tx_queue = NULL;
@@ -399,7 +401,9 @@ static void gfar_gringparam(struct net_device *dev,
* necessary so that we don't mess things up while we're in motion.
*/
static int gfar_sringparam(struct net_device *dev,
- struct ethtool_ringparam *rvals)
+ struct ethtool_ringparam *rvals,
+ struct kernel_ethtool_ringparam *kernel_rvals,
+ struct netlink_ext_ack *extack)
{
struct gfar_private *priv = netdev_priv(dev);
int err = 0, i;
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 14c08a868190..69b2b98b1525 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -207,7 +207,9 @@ uec_get_regs(struct net_device *netdev,
static void
uec_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
struct ucc_geth_info *ug_info = ugeth->ug_info;
@@ -226,7 +228,9 @@ uec_get_ringparam(struct net_device *netdev,
static int
uec_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
struct ucc_geth_info *ug_info = ugeth->ug_info;
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index 5b8b9bcf41a2..266e562bd67a 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -51,6 +51,7 @@ struct tgec_mdio_controller {
struct mdio_fsl_priv {
struct tgec_mdio_controller __iomem *mdio_base;
bool is_little_endian;
+ bool has_a009885;
bool has_a011043;
};
@@ -186,10 +187,10 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
{
struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ unsigned long flags;
uint16_t dev_addr;
uint32_t mdio_stat;
uint32_t mdio_ctl;
- uint16_t value;
int ret;
bool endian = priv->is_little_endian;
@@ -221,12 +222,18 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
return ret;
}
+ if (priv->has_a009885)
+ /* Once the operation completes, i.e. MDIO_STAT_BSY clears, we
+ * must read back the data register within 16 MDC cycles.
+ */
+ local_irq_save(flags);
+
/* Initiate the read */
xgmac_write32(mdio_ctl | MDIO_CTL_READ, &regs->mdio_ctl, endian);
ret = xgmac_wait_until_done(&bus->dev, regs, endian);
if (ret)
- return ret;
+ goto irq_restore;
/* Return all Fs if nothing was there */
if ((xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
@@ -234,13 +241,17 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
dev_dbg(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
- return 0xffff;
+ ret = 0xffff;
+ } else {
+ ret = xgmac_read32(&regs->mdio_data, endian) & 0xffff;
+ dev_dbg(&bus->dev, "read %04x\n", ret);
}
- value = xgmac_read32(&regs->mdio_data, endian) & 0xffff;
- dev_dbg(&bus->dev, "read %04x\n", value);
+irq_restore:
+ if (priv->has_a009885)
+ local_irq_restore(flags);
- return value;
+ return ret;
}
static int xgmac_mdio_probe(struct platform_device *pdev)
@@ -287,6 +298,8 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
priv->is_little_endian = device_property_read_bool(&pdev->dev,
"little-endian");
+ priv->has_a009885 = device_property_read_bool(&pdev->dev,
+ "fsl,erratum-a009885");
priv->has_a011043 = device_property_read_bool(&pdev->dev,
"fsl,erratum-a011043");
@@ -318,9 +331,10 @@ err_ioremap:
static int xgmac_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
+ struct mdio_fsl_priv *priv = bus->priv;
mdiobus_unregister(bus);
- iounmap(bus->priv);
+ iounmap(priv->mdio_base);
mdiobus_free(bus);
return 0;
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index b719f72281c4..160735484465 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -229,6 +229,7 @@ struct gve_rx_ring {
/* A TX desc ring entry */
union gve_tx_desc {
struct gve_tx_pkt_desc pkt; /* first desc for a packet */
+ struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
};
@@ -441,13 +442,13 @@ struct gve_tx_ring {
* associated with that irq.
*/
struct gve_notify_block {
- __be32 irq_db_index; /* idx into Bar2 - set by device, must be 1st */
+ __be32 *irq_db_index; /* pointer to idx into Bar2 */
char name[IFNAMSIZ + 16]; /* name registered with the kernel */
struct napi_struct napi; /* kernel napi struct for this block */
struct gve_priv *priv;
struct gve_tx_ring *tx; /* tx rings on this block */
struct gve_rx_ring *rx; /* rx rings on this block */
-} ____cacheline_aligned;
+};
/* Tracks allowed and current queue settings */
struct gve_queue_config {
@@ -466,6 +467,10 @@ struct gve_options_dqo_rda {
u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
};
+struct gve_irq_db {
+ __be32 index;
+} ____cacheline_aligned;
+
struct gve_ptype {
u8 l3_type; /* `gve_l3_type` in gve_adminq.h */
u8 l4_type; /* `gve_l4_type` in gve_adminq.h */
@@ -492,7 +497,8 @@ struct gve_priv {
struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
struct gve_queue_page_list *qpls; /* array of num qpls */
struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
- dma_addr_t ntfy_block_bus;
+ struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
+ dma_addr_t irq_db_indices_bus;
struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
char mgmt_msix_name[IFNAMSIZ + 16];
u32 mgmt_msix_idx;
@@ -551,6 +557,8 @@ struct gve_priv {
u32 page_alloc_fail; /* count of page alloc fails */
u32 dma_mapping_error; /* count of dma mapping errors */
u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
+ u32 suspend_cnt; /* count of times suspended */
+ u32 resume_cnt; /* count of times resumed */
struct workqueue_struct *gve_wq;
struct work_struct service_task;
struct work_struct stats_report_task;
@@ -567,6 +575,7 @@ struct gve_priv {
/* Gvnic device link speed from hypervisor. */
u64 link_speed;
+ bool up_before_suspend; /* True if dev was up before suspend */
struct gve_options_dqo_rda options_dqo_rda;
struct gve_ptype_lut *ptype_lut_dqo;
@@ -575,6 +584,10 @@ struct gve_priv {
int data_buffer_size_dqo;
enum gve_queue_format queue_format;
+
+ /* Interrupt coalescing settings */
+ u32 tx_coalesce_usecs;
+ u32 rx_coalesce_usecs;
};
enum gve_service_task_flags_bit {
@@ -733,7 +746,7 @@ static inline void gve_clear_report_stats(struct gve_priv *priv)
static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
struct gve_notify_block *block)
{
- return &priv->db_bar2[be32_to_cpu(block->irq_db_index)];
+ return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
}
/* Returns the index into ntfy_blocks of the given tx ring's block
@@ -830,7 +843,7 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
/* buffers */
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
- enum dma_data_direction);
+ enum dma_data_direction, gfp_t gfp_flags);
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
enum dma_data_direction);
/* tx handling */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 326b56b49216..2ad7f57f7e5b 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -462,7 +462,7 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
.num_counters = cpu_to_be32(num_counters),
.irq_db_addr = cpu_to_be64(db_array_bus_addr),
.num_irq_dbs = cpu_to_be32(num_ntfy_blks),
- .irq_db_stride = cpu_to_be32(sizeof(priv->ntfy_blocks[0])),
+ .irq_db_stride = cpu_to_be32(sizeof(*priv->irq_db_indices)),
.ntfy_blk_msix_base_idx =
cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX),
.queue_format = priv->queue_format,
diff --git a/drivers/net/ethernet/google/gve/gve_desc.h b/drivers/net/ethernet/google/gve/gve_desc.h
index 4d225a18d8ce..f4ae9e19b844 100644
--- a/drivers/net/ethernet/google/gve/gve_desc.h
+++ b/drivers/net/ethernet/google/gve/gve_desc.h
@@ -33,6 +33,14 @@ struct gve_tx_pkt_desc {
__be64 seg_addr; /* Base address (see note) of this segment */
} __packed;
+struct gve_tx_mtd_desc {
+ u8 type_flags; /* type is lower 4 bits, subtype upper */
+ u8 path_state; /* state is lower 4 bits, hash type upper */
+ __be16 reserved0;
+ __be32 path_hash;
+ __be64 reserved1;
+} __packed;
+
struct gve_tx_seg_desc {
u8 type_flags; /* type is lower 4 bits, flags upper */
u8 l3_offset; /* TSO: 2 byte units to start of IPH */
@@ -46,6 +54,7 @@ struct gve_tx_seg_desc {
#define GVE_TXD_STD (0x0 << 4) /* Std with Host Address */
#define GVE_TXD_TSO (0x1 << 4) /* TSO with Host Address */
#define GVE_TXD_SEG (0x2 << 4) /* Seg with Host Address */
+#define GVE_TXD_MTD (0x3 << 4) /* Metadata */
/* GVE Transmit Descriptor Flags for Std Pkts */
#define GVE_TXF_L4CSUM BIT(0) /* Need csum offload */
@@ -54,6 +63,17 @@ struct gve_tx_seg_desc {
/* GVE Transmit Descriptor Flags for TSO Segs */
#define GVE_TXSF_IPV6 BIT(1) /* IPv6 TSO */
+/* GVE Transmit Descriptor Options for MTD Segs */
+#define GVE_MTD_SUBTYPE_PATH 0
+
+#define GVE_MTD_PATH_STATE_DEFAULT 0
+#define GVE_MTD_PATH_STATE_TIMEOUT 1
+#define GVE_MTD_PATH_STATE_CONGESTION 2
+#define GVE_MTD_PATH_STATE_RETRANSMIT 3
+
+#define GVE_MTD_PATH_HASH_NONE (0x0 << 4)
+#define GVE_MTD_PATH_HASH_L4 (0x1 << 4)
+
/* GVE Receive Packet Descriptor */
/* The start of an ethernet packet comes 2 bytes into the rx buffer.
* gVNIC adds this padding so that both the DMA and the L3/4 protocol header
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
index 836042364124..1eb4d5fd8561 100644
--- a/drivers/net/ethernet/google/gve/gve_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -18,6 +18,7 @@
#define GVE_TX_IRQ_RATELIMIT_US_DQO 50
#define GVE_RX_IRQ_RATELIMIT_US_DQO 20
+#define GVE_MAX_ITR_INTERVAL_DQO (GVE_ITR_INTERVAL_DQO_MASK * 2)
/* Timeout in seconds to wait for a reinjection completion after receiving
* its corresponding miss completion.
@@ -54,17 +55,17 @@ gve_tx_put_doorbell_dqo(const struct gve_priv *priv,
}
/* Builds register value to write to DQO IRQ doorbell to enable with specified
- * ratelimit.
+ * ITR interval.
*/
-static inline u32 gve_set_itr_ratelimit_dqo(u32 ratelimit_us)
+static inline u32 gve_setup_itr_interval_dqo(u32 interval_us)
{
u32 result = GVE_ITR_ENABLE_BIT_DQO;
/* Interval has 2us granularity. */
- ratelimit_us >>= 1;
+ interval_us >>= 1;
- ratelimit_us &= GVE_ITR_INTERVAL_DQO_MASK;
- result |= (ratelimit_us << GVE_ITR_INTERVAL_DQO_SHIFT);
+ interval_us &= GVE_ITR_INTERVAL_DQO_MASK;
+ result |= (interval_us << GVE_ITR_INTERVAL_DQO_SHIFT);
return result;
}
@@ -73,9 +74,20 @@ static inline void
gve_write_irq_doorbell_dqo(const struct gve_priv *priv,
const struct gve_notify_block *block, u32 val)
{
- u32 index = be32_to_cpu(block->irq_db_index);
+ u32 index = be32_to_cpu(*block->irq_db_index);
iowrite32(val, &priv->db_bar2[index]);
}
+/* Sets interrupt throttling interval and enables interrupt
+ * by writing to IRQ doorbell.
+ */
+static inline void
+gve_set_itr_coalesce_usecs_dqo(struct gve_priv *priv,
+ struct gve_notify_block *block,
+ u32 usecs)
+{
+ gve_write_irq_doorbell_dqo(priv, block,
+ gve_setup_itr_interval_dqo(usecs));
+}
#endif /* _GVE_DQO_H_ */
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index c8df47a97fa4..50b384910c83 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -8,6 +8,7 @@
#include <linux/rtnetlink.h>
#include "gve.h"
#include "gve_adminq.h"
+#include "gve_dqo.h"
static void gve_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
@@ -42,7 +43,7 @@ static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
};
static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
- "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_bytes[%u]",
+ "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]", "rx_bytes[%u]",
"rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]",
"rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
"rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
@@ -50,7 +51,7 @@ static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
};
static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
- "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_bytes[%u]",
+ "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
"tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
"tx_dma_mapping_error[%u]",
};
@@ -139,10 +140,11 @@ static void
gve_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
- u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail,
- tmp_rx_desc_err_dropped_pkt, tmp_tx_pkts, tmp_tx_bytes;
+ u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail,
+ tmp_rx_buf_alloc_fail, tmp_rx_desc_err_dropped_pkt,
+ tmp_tx_pkts, tmp_tx_bytes;
u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts,
- rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes;
+ rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, tx_dropped;
int stats_idx, base_stats_idx, max_stats_idx;
struct stats *report_stats;
int *rx_qid_to_stats_idx;
@@ -191,7 +193,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
}
}
- for (tx_pkts = 0, tx_bytes = 0, ring = 0;
+ for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0;
ring < priv->tx_cfg.num_queues; ring++) {
if (priv->tx) {
do {
@@ -203,6 +205,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
start));
tx_pkts += tmp_tx_pkts;
tx_bytes += tmp_tx_bytes;
+ tx_dropped += priv->tx[ring].dropped_pkt;
}
}
@@ -214,9 +217,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
/* total rx dropped packets */
data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail +
rx_desc_err_dropped_pkt;
- /* Skip tx_dropped */
- i++;
-
+ data[i++] = tx_dropped;
data[i++] = priv->tx_timeo_cnt;
data[i++] = rx_skb_alloc_fail;
data[i++] = rx_buf_alloc_fail;
@@ -255,6 +256,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx->fill_cnt;
data[i++] = rx->cnt;
+ data[i++] = rx->fill_cnt - rx->cnt;
do {
start =
u64_stats_fetch_begin(&priv->rx[ring].statss);
@@ -318,12 +320,14 @@ gve_get_ethtool_stats(struct net_device *netdev,
if (gve_is_gqi(priv)) {
data[i++] = tx->req;
data[i++] = tx->done;
+ data[i++] = tx->req - tx->done;
} else {
/* DQO doesn't currently support
* posted/completed descriptor counts;
*/
data[i++] = 0;
data[i++] = 0;
+ data[i++] = tx->dqo_tx.tail - tx->dqo_tx.head;
}
do {
start =
@@ -419,7 +423,9 @@ static int gve_set_channels(struct net_device *netdev,
}
static void gve_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *cmd)
+ struct ethtool_ringparam *cmd,
+ struct kernel_ethtool_ringparam *kernel_cmd,
+ struct netlink_ext_ack *extack)
{
struct gve_priv *priv = netdev_priv(netdev);
@@ -535,7 +541,65 @@ static int gve_get_link_ksettings(struct net_device *netdev,
return err;
}
+static int gve_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_ec,
+ struct netlink_ext_ack *extack)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ if (gve_is_gqi(priv))
+ return -EOPNOTSUPP;
+ ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
+ ec->rx_coalesce_usecs = priv->rx_coalesce_usecs;
+
+ return 0;
+}
+
+static int gve_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_ec,
+ struct netlink_ext_ack *extack)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ u32 tx_usecs_orig = priv->tx_coalesce_usecs;
+ u32 rx_usecs_orig = priv->rx_coalesce_usecs;
+ int idx;
+
+ if (gve_is_gqi(priv))
+ return -EOPNOTSUPP;
+
+ if (ec->tx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO ||
+ ec->rx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO)
+ return -EINVAL;
+ priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
+ priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
+
+ if (tx_usecs_orig != priv->tx_coalesce_usecs) {
+ for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
+ int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+
+ gve_set_itr_coalesce_usecs_dqo(priv, block,
+ priv->tx_coalesce_usecs);
+ }
+ }
+
+ if (rx_usecs_orig != priv->rx_coalesce_usecs) {
+ for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
+ int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+
+ gve_set_itr_coalesce_usecs_dqo(priv, block,
+ priv->rx_coalesce_usecs);
+ }
+ }
+
+ return 0;
+}
+
const struct ethtool_ops gve_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = gve_get_drvinfo,
.get_strings = gve_get_strings,
.get_sset_count = gve_get_sset_count,
@@ -545,6 +609,8 @@ const struct ethtool_ops gve_ethtool_ops = {
.set_channels = gve_set_channels,
.get_channels = gve_get_channels,
.get_link = ethtool_op_get_link,
+ .get_coalesce = gve_get_coalesce,
+ .set_coalesce = gve_set_coalesce,
.get_ringparam = gve_get_ringparam,
.reset = gve_user_reset,
.get_tunable = gve_get_tunable,
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 59b66f679e46..54e51c8221b8 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -334,15 +334,23 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
goto abort_with_msix_enabled;
}
- priv->ntfy_blocks =
+ priv->irq_db_indices =
dma_alloc_coherent(&priv->pdev->dev,
priv->num_ntfy_blks *
- sizeof(*priv->ntfy_blocks),
- &priv->ntfy_block_bus, GFP_KERNEL);
- if (!priv->ntfy_blocks) {
+ sizeof(*priv->irq_db_indices),
+ &priv->irq_db_indices_bus, GFP_KERNEL);
+ if (!priv->irq_db_indices) {
err = -ENOMEM;
goto abort_with_mgmt_vector;
}
+
+ priv->ntfy_blocks = kvzalloc(priv->num_ntfy_blks *
+ sizeof(*priv->ntfy_blocks), GFP_KERNEL);
+ if (!priv->ntfy_blocks) {
+ err = -ENOMEM;
+ goto abort_with_irq_db_indices;
+ }
+
/* Setup the other blocks - the first n-1 vectors */
for (i = 0; i < priv->num_ntfy_blks; i++) {
struct gve_notify_block *block = &priv->ntfy_blocks[i];
@@ -361,6 +369,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
}
irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
get_cpu_mask(i % active_cpus));
+ block->irq_db_index = &priv->irq_db_indices[i].index;
}
return 0;
abort_with_some_ntfy_blocks:
@@ -372,10 +381,13 @@ abort_with_some_ntfy_blocks:
NULL);
free_irq(priv->msix_vectors[msix_idx].vector, block);
}
- dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
- sizeof(*priv->ntfy_blocks),
- priv->ntfy_blocks, priv->ntfy_block_bus);
+ kvfree(priv->ntfy_blocks);
priv->ntfy_blocks = NULL;
+abort_with_irq_db_indices:
+ dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
+ sizeof(*priv->irq_db_indices),
+ priv->irq_db_indices, priv->irq_db_indices_bus);
+ priv->irq_db_indices = NULL;
abort_with_mgmt_vector:
free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
abort_with_msix_enabled:
@@ -403,10 +415,12 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
free_irq(priv->msix_vectors[msix_idx].vector, block);
}
free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
- dma_free_coherent(&priv->pdev->dev,
- priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
- priv->ntfy_blocks, priv->ntfy_block_bus);
+ kvfree(priv->ntfy_blocks);
priv->ntfy_blocks = NULL;
+ dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
+ sizeof(*priv->irq_db_indices),
+ priv->irq_db_indices, priv->irq_db_indices_bus);
+ priv->irq_db_indices = NULL;
pci_disable_msix(priv->pdev);
kvfree(priv->msix_vectors);
priv->msix_vectors = NULL;
@@ -428,7 +442,7 @@ static int gve_setup_device_resources(struct gve_priv *priv)
err = gve_adminq_configure_device_resources(priv,
priv->counter_array_bus,
priv->num_event_counters,
- priv->ntfy_block_bus,
+ priv->irq_db_indices_bus,
priv->num_ntfy_blks);
if (unlikely(err)) {
dev_err(&priv->pdev->dev,
@@ -752,9 +766,9 @@ static void gve_free_rings(struct gve_priv *priv)
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
- enum dma_data_direction dir)
+ enum dma_data_direction dir, gfp_t gfp_flags)
{
- *page = alloc_page(GFP_KERNEL);
+ *page = alloc_page(gfp_flags);
if (!*page) {
priv->page_alloc_fail++;
return -ENOMEM;
@@ -797,7 +811,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
for (i = 0; i < pages; i++) {
err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
&qpl->page_buses[i],
- gve_qpl_dma_dir(priv, id));
+ gve_qpl_dma_dir(priv, id), GFP_KERNEL);
/* caller handles clean up */
if (err)
return -ENOMEM;
@@ -817,8 +831,7 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
put_page(page);
}
-static void gve_free_queue_page_list(struct gve_priv *priv,
- int id)
+static void gve_free_queue_page_list(struct gve_priv *priv, u32 id)
{
struct gve_queue_page_list *qpl = &priv->qpls[id];
int i;
@@ -1100,9 +1113,8 @@ static void gve_turnup(struct gve_priv *priv)
if (gve_is_gqi(priv)) {
iowrite32be(0, gve_irq_doorbell(priv, block));
} else {
- u32 val = gve_set_itr_ratelimit_dqo(GVE_TX_IRQ_RATELIMIT_US_DQO);
-
- gve_write_irq_doorbell_dqo(priv, block, val);
+ gve_set_itr_coalesce_usecs_dqo(priv, block,
+ priv->tx_coalesce_usecs);
}
}
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
@@ -1113,9 +1125,8 @@ static void gve_turnup(struct gve_priv *priv)
if (gve_is_gqi(priv)) {
iowrite32be(0, gve_irq_doorbell(priv, block));
} else {
- u32 val = gve_set_itr_ratelimit_dqo(GVE_RX_IRQ_RATELIMIT_US_DQO);
-
- gve_write_irq_doorbell_dqo(priv, block, val);
+ gve_set_itr_coalesce_usecs_dqo(priv, block,
+ priv->rx_coalesce_usecs);
}
}
@@ -1412,6 +1423,11 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
+ if (!gve_is_gqi(priv)) {
+ priv->tx_coalesce_usecs = GVE_TX_IRQ_RATELIMIT_US_DQO;
+ priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO;
+ }
+
setup_device:
err = gve_setup_device_resources(priv);
if (!err)
@@ -1663,6 +1679,58 @@ static void gve_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static void gve_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct gve_priv *priv = netdev_priv(netdev);
+ bool was_up = netif_carrier_ok(priv->dev);
+
+ rtnl_lock();
+ if (was_up && gve_close(priv->dev)) {
+ /* If the dev was up, attempt to close, if close fails, reset */
+ gve_reset_and_teardown(priv, was_up);
+ } else {
+ /* If the dev wasn't up or close worked, finish tearing down */
+ gve_teardown_priv_resources(priv);
+ }
+ rtnl_unlock();
+}
+
+#ifdef CONFIG_PM
+static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct gve_priv *priv = netdev_priv(netdev);
+ bool was_up = netif_carrier_ok(priv->dev);
+
+ priv->suspend_cnt++;
+ rtnl_lock();
+ if (was_up && gve_close(priv->dev)) {
+ /* If the dev was up, attempt to close, if close fails, reset */
+ gve_reset_and_teardown(priv, was_up);
+ } else {
+ /* If the dev wasn't up or close worked, finish tearing down */
+ gve_teardown_priv_resources(priv);
+ }
+ priv->up_before_suspend = was_up;
+ rtnl_unlock();
+ return 0;
+}
+
+static int gve_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct gve_priv *priv = netdev_priv(netdev);
+ int err;
+
+ priv->resume_cnt++;
+ rtnl_lock();
+ err = gve_reset_recovery(priv, priv->up_before_suspend);
+ rtnl_unlock();
+ return err;
+}
+#endif /* CONFIG_PM */
+
static const struct pci_device_id gve_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
{ }
@@ -1673,6 +1741,11 @@ static struct pci_driver gvnic_driver = {
.id_table = gve_id_table,
.probe = gve_probe,
.remove = gve_remove,
+ .shutdown = gve_shutdown,
+#ifdef CONFIG_PM
+ .suspend = gve_suspend,
+ .resume = gve_resume,
+#endif
};
module_pci_driver(gvnic_driver);
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 3d04b5aff331..2068199445bd 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -86,7 +86,8 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
dma_addr_t dma;
int err;
- err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE);
+ err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE,
+ GFP_ATOMIC);
if (err)
return err;
@@ -639,8 +640,6 @@ bool gve_rx_work_pending(struct gve_rx_ring *rx)
desc = rx->desc.desc_ring + next_idx;
flags_seq = desc->flags_seq;
- /* Make sure we have synchronized the seq no with the device */
- smp_rmb();
return (GVE_SEQNO(flags_seq) == rx->desc.seqno);
}
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index beb8bb079023..8c939628e2d8 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -157,7 +157,7 @@ static int gve_alloc_page_dqo(struct gve_priv *priv,
int err;
err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page,
- &buf_state->addr, DMA_FROM_DEVICE);
+ &buf_state->addr, DMA_FROM_DEVICE, GFP_KERNEL);
if (err)
return err;
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index a9cb241fedf4..4888bf05fbed 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -296,11 +296,14 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
return bytes;
}
-/* The most descriptors we could need is MAX_SKB_FRAGS + 3 : 1 for each skb frag,
- * +1 for the skb linear portion, +1 for when tcp hdr needs to be in separate descriptor,
- * and +1 if the payload wraps to the beginning of the FIFO.
+/* The most descriptors we could need is MAX_SKB_FRAGS + 4 :
+ * 1 for each skb frag
+ * 1 for the skb linear portion
+ * 1 for when tcp hdr needs to be in separate descriptor
+ * 1 if the payload wraps to the beginning of the FIFO
+ * 1 for metadata descriptor
*/
-#define MAX_TX_DESC_NEEDED (MAX_SKB_FRAGS + 3)
+#define MAX_TX_DESC_NEEDED (MAX_SKB_FRAGS + 4)
static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info)
{
if (info->skb) {
@@ -395,6 +398,19 @@ static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
pkt_desc->pkt.seg_addr = cpu_to_be64(addr);
}
+static void gve_tx_fill_mtd_desc(union gve_tx_desc *mtd_desc,
+ struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(mtd_desc->mtd) != sizeof(mtd_desc->pkt));
+
+ mtd_desc->mtd.type_flags = GVE_TXD_MTD | GVE_MTD_SUBTYPE_PATH;
+ mtd_desc->mtd.path_state = GVE_MTD_PATH_STATE_DEFAULT |
+ GVE_MTD_PATH_HASH_L4;
+ mtd_desc->mtd.path_hash = cpu_to_be32(skb->hash);
+ mtd_desc->mtd.reserved0 = 0;
+ mtd_desc->mtd.reserved1 = 0;
+}
+
static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
struct sk_buff *skb, bool is_gso,
u16 len, u64 addr)
@@ -426,6 +442,7 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
union gve_tx_desc *pkt_desc, *seg_desc;
struct gve_tx_buffer_state *info;
+ int mtd_desc_nr = !!skb->l4_hash;
bool is_gso = skb_is_gso(skb);
u32 idx = tx->req & tx->mask;
int payload_iov = 2;
@@ -457,7 +474,7 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
&info->iov[payload_iov]);
gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
- 1 + payload_nfrags, hlen,
+ 1 + mtd_desc_nr + payload_nfrags, hlen,
info->iov[hdr_nfrags - 1].iov_offset);
skb_copy_bits(skb, 0,
@@ -468,8 +485,13 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
info->iov[hdr_nfrags - 1].iov_len);
copy_offset = hlen;
+ if (mtd_desc_nr) {
+ next_idx = (tx->req + 1) & tx->mask;
+ gve_tx_fill_mtd_desc(&tx->desc[next_idx], skb);
+ }
+
for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
- next_idx = (tx->req + 1 + i - payload_iov) & tx->mask;
+ next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask;
seg_desc = &tx->desc[next_idx];
gve_tx_fill_seg_desc(seg_desc, skb, is_gso,
@@ -485,16 +507,17 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
copy_offset += info->iov[i].iov_len;
}
- return 1 + payload_nfrags;
+ return 1 + mtd_desc_nr + payload_nfrags;
}
static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
struct sk_buff *skb)
{
const struct skb_shared_info *shinfo = skb_shinfo(skb);
- int hlen, payload_nfrags, l4_hdr_offset;
- union gve_tx_desc *pkt_desc, *seg_desc;
+ int hlen, num_descriptors, l4_hdr_offset;
+ union gve_tx_desc *pkt_desc, *mtd_desc, *seg_desc;
struct gve_tx_buffer_state *info;
+ int mtd_desc_nr = !!skb->l4_hash;
bool is_gso = skb_is_gso(skb);
u32 idx = tx->req & tx->mask;
u64 addr;
@@ -523,23 +546,30 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
dma_unmap_len_set(info, len, len);
dma_unmap_addr_set(info, dma, addr);
- payload_nfrags = shinfo->nr_frags;
+ num_descriptors = 1 + shinfo->nr_frags;
+ if (hlen < len)
+ num_descriptors++;
+ if (mtd_desc_nr)
+ num_descriptors++;
+
+ gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
+ num_descriptors, hlen, addr);
+
+ if (mtd_desc_nr) {
+ idx = (idx + 1) & tx->mask;
+ mtd_desc = &tx->desc[idx];
+ gve_tx_fill_mtd_desc(mtd_desc, skb);
+ }
+
if (hlen < len) {
/* For gso the rest of the linear portion of the skb needs to
* be in its own descriptor.
*/
- payload_nfrags++;
- gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
- 1 + payload_nfrags, hlen, addr);
-
len -= hlen;
addr += hlen;
- idx = (tx->req + 1) & tx->mask;
+ idx = (idx + 1) & tx->mask;
seg_desc = &tx->desc[idx];
gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
- } else {
- gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
- 1 + payload_nfrags, hlen, addr);
}
for (i = 0; i < shinfo->nr_frags; i++) {
@@ -560,11 +590,14 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
}
- return 1 + payload_nfrags;
+ return num_descriptors;
unmap_drop:
- i += (payload_nfrags == shinfo->nr_frags ? 1 : 2);
+ i += num_descriptors - shinfo->nr_frags;
while (i--) {
+ /* Skip metadata descriptor, if set */
+ if (i == 1 && mtd_desc_nr == 1)
+ continue;
idx--;
gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index ab7390225942..d7a27c244d48 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -663,9 +663,13 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev,
* hns_get_ringparam - get ring parameter
* @net_dev: net device
* @param: ethtool parameter
+ * @kernel_param: ethtool external parameter
+ * @extack: netlink extended ACK report struct
*/
static void hns_get_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index 7aa2fac76c5e..6efea4662858 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -4,9 +4,9 @@
#
ccflags-y += -I$(srctree)/$(src)
-
-obj-$(CONFIG_HNS3) += hns3pf/
-obj-$(CONFIG_HNS3) += hns3vf/
+ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3pf
+ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3vf
+ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3_common
obj-$(CONFIG_HNS3) += hnae3.o
@@ -14,3 +14,16 @@ obj-$(CONFIG_HNS3_ENET) += hns3.o
hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o
hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
+
+obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
+
+hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o \
+ hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
+
+obj-$(CONFIG_HNS3_HCLGE) += hclge.o
+hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o \
+ hns3pf/hclge_mbx.o hns3pf/hclge_err.o hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \
+ hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
+
+
+hclge-$(CONFIG_HNS3_DCB) += hns3pf/hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index c2bd2584201f..b668df6193be 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -80,6 +80,9 @@ enum hclge_mbx_tbl_cfg_subcode {
#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U
#define HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM 4
+#define HCLGE_RESET_SCHED_TIMEOUT (3 * HZ)
+#define HCLGE_MBX_SCHED_TIMEOUT (HZ / 2)
+
struct hclge_ring_chain_param {
u8 ring_type;
u8 tqp_index;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 63f5abcc6bf4..9298fbecb31a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -861,6 +861,20 @@ struct hnae3_handle {
#define hnae3_get_bit(origin, shift) \
hnae3_get_field(origin, 0x1 << (shift), shift)
+#define HNAE3_FORMAT_MAC_ADDR_LEN 18
+#define HNAE3_FORMAT_MAC_ADDR_OFFSET_0 0
+#define HNAE3_FORMAT_MAC_ADDR_OFFSET_4 4
+#define HNAE3_FORMAT_MAC_ADDR_OFFSET_5 5
+
+static inline void hnae3_format_mac_addr(char *format_mac_addr,
+ const u8 *mac_addr)
+{
+ snprintf(format_mac_addr, HNAE3_FORMAT_MAC_ADDR_LEN, "%02x:**:**:**:%02x:%02x",
+ mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_0],
+ mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_4],
+ mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_5]);
+}
+
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
new file mode 100644
index 000000000000..c15ca710dabb
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -0,0 +1,610 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2021-2021 Hisilicon Limited.
+
+#include "hnae3.h"
+#include "hclge_comm_cmd.h"
+
+static void hclge_comm_cmd_config_regs(struct hclge_comm_hw *hw,
+ struct hclge_comm_cmq_ring *ring)
+{
+ dma_addr_t dma = ring->desc_dma_addr;
+ u32 reg_val;
+
+ if (ring->ring_type == HCLGE_COMM_TYPE_CSQ) {
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
+ lower_32_bits(dma));
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
+ upper_32_bits(dma));
+ reg_val = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
+ reg_val &= HCLGE_COMM_NIC_SW_RST_RDY;
+ reg_val |= ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S;
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0);
+ } else {
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
+ lower_32_bits(dma));
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
+ upper_32_bits(dma));
+ reg_val = ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S;
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, reg_val);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0);
+ }
+}
+
+void hclge_comm_cmd_init_regs(struct hclge_comm_hw *hw)
+{
+ hclge_comm_cmd_config_regs(hw, &hw->cmq.csq);
+ hclge_comm_cmd_config_regs(hw, &hw->cmq.crq);
+}
+
+void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
+{
+ desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR |
+ HCLGE_COMM_CMD_FLAG_IN);
+ if (is_read)
+ desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR);
+ else
+ desc->flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_WR);
+}
+
+static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev,
+ bool is_pf)
+{
+ set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
+ if (is_pf && ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
+ set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
+ }
+}
+
+void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc,
+ enum hclge_opcode_type opcode,
+ bool is_read)
+{
+ memset((void *)desc, 0, sizeof(struct hclge_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR |
+ HCLGE_COMM_CMD_FLAG_IN);
+
+ if (is_read)
+ desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR);
+}
+
+int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw, bool en)
+{
+ struct hclge_comm_firmware_compat_cmd *req;
+ struct hclge_desc desc;
+ u32 compat = 0;
+
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
+
+ if (en) {
+ req = (struct hclge_comm_firmware_compat_cmd *)desc.data;
+
+ hnae3_set_bit(compat, HCLGE_COMM_LINK_EVENT_REPORT_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_COMM_NCSI_ERROR_REPORT_EN_B, 1);
+ if (hclge_comm_dev_phy_imp_supported(ae_dev))
+ hnae3_set_bit(compat, HCLGE_COMM_PHY_IMP_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_COMM_MAC_STATS_EXT_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B, 1);
+
+ req->compat = cpu_to_le32(compat);
+ }
+
+ return hclge_comm_cmd_send(hw, &desc, 1);
+}
+
+void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring)
+{
+ int size = ring->desc_num * sizeof(struct hclge_desc);
+
+ if (!ring->desc)
+ return;
+
+ dma_free_coherent(&ring->pdev->dev, size,
+ ring->desc, ring->desc_dma_addr);
+ ring->desc = NULL;
+}
+
+static int hclge_comm_alloc_cmd_desc(struct hclge_comm_cmq_ring *ring)
+{
+ int size = ring->desc_num * sizeof(struct hclge_desc);
+
+ ring->desc = dma_alloc_coherent(&ring->pdev->dev,
+ size, &ring->desc_dma_addr, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static __le32 hclge_comm_build_api_caps(void)
+{
+ u32 api_caps = 0;
+
+ hnae3_set_bit(api_caps, HCLGE_COMM_API_CAP_FLEX_RSS_TBL_B, 1);
+
+ return cpu_to_le32(api_caps);
+}
+
+static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
+ {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
+ {HCLGE_COMM_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B},
+ {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
+ {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
+ {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
+ {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
+ {HCLGE_COMM_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B},
+ {HCLGE_COMM_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B},
+ {HCLGE_COMM_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B},
+ {HCLGE_COMM_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B},
+ {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B},
+ {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
+ {HCLGE_COMM_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B},
+ {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
+ {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B,
+ HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
+ {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
+};
+
+static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
+ {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
+ {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
+ {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
+ {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
+ {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
+ {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B},
+ {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
+ {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
+};
+
+static void
+hclge_comm_parse_capability(struct hnae3_ae_dev *ae_dev, bool is_pf,
+ struct hclge_comm_query_version_cmd *cmd)
+{
+ const struct hclge_comm_caps_bit_map *caps_map =
+ is_pf ? hclge_pf_cmd_caps : hclge_vf_cmd_caps;
+ u32 size = is_pf ? ARRAY_SIZE(hclge_pf_cmd_caps) :
+ ARRAY_SIZE(hclge_vf_cmd_caps);
+ u32 caps, i;
+
+ caps = __le32_to_cpu(cmd->caps[0]);
+ for (i = 0; i < size; i++)
+ if (hnae3_get_bit(caps, caps_map[i].imp_bit))
+ set_bit(caps_map[i].local_bit, ae_dev->caps);
+}
+
+int hclge_comm_alloc_cmd_queue(struct hclge_comm_hw *hw, int ring_type)
+{
+ struct hclge_comm_cmq_ring *ring =
+ (ring_type == HCLGE_COMM_TYPE_CSQ) ? &hw->cmq.csq :
+ &hw->cmq.crq;
+ int ret;
+
+ ring->ring_type = ring_type;
+
+ ret = hclge_comm_alloc_cmd_desc(ring);
+ if (ret)
+ dev_err(&ring->pdev->dev, "descriptor %s alloc error %d\n",
+ (ring_type == HCLGE_COMM_TYPE_CSQ) ? "CSQ" : "CRQ",
+ ret);
+
+ return ret;
+}
+
+int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw,
+ u32 *fw_version, bool is_pf)
+{
+ struct hclge_comm_query_version_cmd *resp;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
+ resp = (struct hclge_comm_query_version_cmd *)desc.data;
+ resp->api_caps = hclge_comm_build_api_caps();
+
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ *fw_version = le32_to_cpu(resp->firmware);
+
+ ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
+ HNAE3_PCI_REVISION_BIT_SIZE;
+ ae_dev->dev_version |= ae_dev->pdev->revision;
+
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ hclge_comm_set_default_capability(ae_dev, is_pf);
+
+ hclge_comm_parse_capability(ae_dev, is_pf, resp);
+
+ return ret;
+}
+
+static const u16 spec_opcode[] = { HCLGE_OPC_STATS_64_BIT,
+ HCLGE_OPC_STATS_32_BIT,
+ HCLGE_OPC_STATS_MAC,
+ HCLGE_OPC_STATS_MAC_ALL,
+ HCLGE_OPC_QUERY_32_BIT_REG,
+ HCLGE_OPC_QUERY_64_BIT_REG,
+ HCLGE_QUERY_CLEAR_MPF_RAS_INT,
+ HCLGE_QUERY_CLEAR_PF_RAS_INT,
+ HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
+ HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
+ HCLGE_QUERY_ALL_ERR_INFO };
+
+static bool hclge_comm_is_special_opcode(u16 opcode)
+{
+ /* these commands have several descriptors,
+ * and use the first one to save opcode and return value
+ */
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
+ if (spec_opcode[i] == opcode)
+ return true;
+
+ return false;
+}
+
+static int hclge_comm_ring_space(struct hclge_comm_cmq_ring *ring)
+{
+ int ntc = ring->next_to_clean;
+ int ntu = ring->next_to_use;
+ int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
+
+ return ring->desc_num - used - 1;
+}
+
+static void hclge_comm_cmd_copy_desc(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc, int num)
+{
+ struct hclge_desc *desc_to_use;
+ int handle = 0;
+
+ while (handle < num) {
+ desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
+ *desc_to_use = desc[handle];
+ (hw->cmq.csq.next_to_use)++;
+ if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
+ hw->cmq.csq.next_to_use = 0;
+ handle++;
+ }
+}
+
+static int hclge_comm_is_valid_csq_clean_head(struct hclge_comm_cmq_ring *ring,
+ int head)
+{
+ int ntc = ring->next_to_clean;
+ int ntu = ring->next_to_use;
+
+ if (ntu > ntc)
+ return head >= ntc && head <= ntu;
+
+ return head >= ntc || head <= ntu;
+}
+
+static int hclge_comm_cmd_csq_clean(struct hclge_comm_hw *hw)
+{
+ struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
+ int clean;
+ u32 head;
+
+ head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
+ rmb(); /* Make sure head is ready before touch any data */
+
+ if (!hclge_comm_is_valid_csq_clean_head(csq, head)) {
+ dev_warn(&hw->cmq.csq.pdev->dev, "wrong cmd head (%u, %d-%d)\n",
+ head, csq->next_to_use, csq->next_to_clean);
+ dev_warn(&hw->cmq.csq.pdev->dev,
+ "Disabling any further commands to IMP firmware\n");
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
+ dev_warn(&hw->cmq.csq.pdev->dev,
+ "IMP firmware watchdog reset soon expected!\n");
+ return -EIO;
+ }
+
+ clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
+ csq->next_to_clean = head;
+ return clean;
+}
+
+static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
+{
+ u32 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
+ return head == hw->cmq.csq.next_to_use;
+}
+
+static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw,
+ bool *is_completed)
+{
+ u32 timeout = 0;
+
+ do {
+ if (hclge_comm_cmd_csq_done(hw)) {
+ *is_completed = true;
+ break;
+ }
+ udelay(1);
+ timeout++;
+ } while (timeout < hw->cmq.tx_timeout);
+}
+
+static int hclge_comm_cmd_convert_err_code(u16 desc_ret)
+{
+ struct hclge_comm_errcode hclge_comm_cmd_errcode[] = {
+ { HCLGE_COMM_CMD_EXEC_SUCCESS, 0 },
+ { HCLGE_COMM_CMD_NO_AUTH, -EPERM },
+ { HCLGE_COMM_CMD_NOT_SUPPORTED, -EOPNOTSUPP },
+ { HCLGE_COMM_CMD_QUEUE_FULL, -EXFULL },
+ { HCLGE_COMM_CMD_NEXT_ERR, -ENOSR },
+ { HCLGE_COMM_CMD_UNEXE_ERR, -ENOTBLK },
+ { HCLGE_COMM_CMD_PARA_ERR, -EINVAL },
+ { HCLGE_COMM_CMD_RESULT_ERR, -ERANGE },
+ { HCLGE_COMM_CMD_TIMEOUT, -ETIME },
+ { HCLGE_COMM_CMD_HILINK_ERR, -ENOLINK },
+ { HCLGE_COMM_CMD_QUEUE_ILLEGAL, -ENXIO },
+ { HCLGE_COMM_CMD_INVALID, -EBADR },
+ };
+ u32 errcode_count = ARRAY_SIZE(hclge_comm_cmd_errcode);
+ u32 i;
+
+ for (i = 0; i < errcode_count; i++)
+ if (hclge_comm_cmd_errcode[i].imp_errcode == desc_ret)
+ return hclge_comm_cmd_errcode[i].common_errno;
+
+ return -EIO;
+}
+
+static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc, int num,
+ int ntc)
+{
+ u16 opcode, desc_ret;
+ int handle;
+
+ opcode = le16_to_cpu(desc[0].opcode);
+ for (handle = 0; handle < num; handle++) {
+ desc[handle] = hw->cmq.csq.desc[ntc];
+ ntc++;
+ if (ntc >= hw->cmq.csq.desc_num)
+ ntc = 0;
+ }
+ if (likely(!hclge_comm_is_special_opcode(opcode)))
+ desc_ret = le16_to_cpu(desc[num - 1].retval);
+ else
+ desc_ret = le16_to_cpu(desc[0].retval);
+
+ hw->cmq.last_status = desc_ret;
+
+ return hclge_comm_cmd_convert_err_code(desc_ret);
+}
+
+static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int num, int ntc)
+{
+ bool is_completed = false;
+ int handle, ret;
+
+ /* If the command is sync, wait for the firmware to write back,
+ * if multi descriptors to be sent, use the first one to check
+ */
+ if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
+ hclge_comm_wait_for_resp(hw, &is_completed);
+
+ if (!is_completed)
+ ret = -EBADE;
+ else
+ ret = hclge_comm_cmd_check_retval(hw, desc, num, ntc);
+
+ /* Clean the command send queue */
+ handle = hclge_comm_cmd_csq_clean(hw);
+ if (handle < 0)
+ ret = handle;
+ else if (handle != num)
+ dev_warn(&hw->cmq.csq.pdev->dev,
+ "cleaned %d, need to clean %d\n", handle, num);
+ return ret;
+}
+
+/**
+ * hclge_comm_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ **/
+int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
+ int num)
+{
+ struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
+ int ret;
+ int ntc;
+
+ spin_lock_bh(&hw->cmq.csq.lock);
+
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state)) {
+ spin_unlock_bh(&hw->cmq.csq.lock);
+ return -EBUSY;
+ }
+
+ if (num > hclge_comm_ring_space(&hw->cmq.csq)) {
+ /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
+ * need update the SW HEAD pointer csq->next_to_clean
+ */
+ csq->next_to_clean =
+ hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
+ spin_unlock_bh(&hw->cmq.csq.lock);
+ return -EBUSY;
+ }
+
+ /**
+ * Record the location of desc in the ring for this time
+ * which will be use for hardware to write back
+ */
+ ntc = hw->cmq.csq.next_to_use;
+
+ hclge_comm_cmd_copy_desc(hw, desc, num);
+
+ /* Write to hardware */
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG,
+ hw->cmq.csq.next_to_use);
+
+ ret = hclge_comm_cmd_check_result(hw, desc, num, ntc);
+
+ spin_unlock_bh(&hw->cmq.csq.lock);
+
+ return ret;
+}
+
+static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw)
+{
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0);
+}
+
+void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw)
+{
+ struct hclge_comm_cmq *cmdq = &hw->cmq;
+
+ hclge_comm_firmware_compat_config(ae_dev, hw, false);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
+
+ /* wait to ensure that the firmware completes the possible left
+ * over commands.
+ */
+ msleep(HCLGE_COMM_CMDQ_CLEAR_WAIT_TIME);
+ spin_lock_bh(&cmdq->csq.lock);
+ spin_lock(&cmdq->crq.lock);
+ hclge_comm_cmd_uninit_regs(hw);
+ spin_unlock(&cmdq->crq.lock);
+ spin_unlock_bh(&cmdq->csq.lock);
+
+ hclge_comm_free_cmd_desc(&cmdq->csq);
+ hclge_comm_free_cmd_desc(&cmdq->crq);
+}
+
+int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw)
+{
+ struct hclge_comm_cmq *cmdq = &hw->cmq;
+ int ret;
+
+ /* Setup the lock for command queue */
+ spin_lock_init(&cmdq->csq.lock);
+ spin_lock_init(&cmdq->crq.lock);
+
+ cmdq->csq.pdev = pdev;
+ cmdq->crq.pdev = pdev;
+
+ /* Setup the queue entries for use cmd queue */
+ cmdq->csq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
+ cmdq->crq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
+
+ /* Setup Tx write back timeout */
+ cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT;
+
+ /* Setup queue rings */
+ ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CSQ);
+ if (ret) {
+ dev_err(&pdev->dev, "CSQ ring setup error %d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CRQ);
+ if (ret) {
+ dev_err(&pdev->dev, "CRQ ring setup error %d\n", ret);
+ goto err_csq;
+ }
+
+ return 0;
+err_csq:
+ hclge_comm_free_cmd_desc(&hw->cmq.csq);
+ return ret;
+}
+
+int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
+ u32 *fw_version, bool is_pf,
+ unsigned long reset_pending)
+{
+ struct hclge_comm_cmq *cmdq = &hw->cmq;
+ int ret;
+
+ spin_lock_bh(&cmdq->csq.lock);
+ spin_lock(&cmdq->crq.lock);
+
+ cmdq->csq.next_to_clean = 0;
+ cmdq->csq.next_to_use = 0;
+ cmdq->crq.next_to_clean = 0;
+ cmdq->crq.next_to_use = 0;
+
+ hclge_comm_cmd_init_regs(hw);
+
+ spin_unlock(&cmdq->crq.lock);
+ spin_unlock_bh(&cmdq->csq.lock);
+
+ clear_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
+
+ /* Check if there is new reset pending, because the higher level
+ * reset may happen when lower level reset is being processed.
+ */
+ if (reset_pending) {
+ ret = -EBUSY;
+ goto err_cmd_init;
+ }
+
+ /* get version and device capabilities */
+ ret = hclge_comm_cmd_query_version_and_capability(ae_dev, hw,
+ fw_version, is_pf);
+ if (ret) {
+ dev_err(&ae_dev->pdev->dev,
+ "failed to query version and capabilities, ret = %d\n",
+ ret);
+ goto err_cmd_init;
+ }
+
+ dev_info(&ae_dev->pdev->dev,
+ "The firmware version is %lu.%lu.%lu.%lu\n",
+ hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+
+ if (!is_pf && ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
+ return 0;
+
+ /* ask the firmware to enable some features, driver can work without
+ * it.
+ */
+ ret = hclge_comm_firmware_compat_config(ae_dev, hw, true);
+ if (ret)
+ dev_warn(&ae_dev->pdev->dev,
+ "Firmware compatible features not enabled(%d).\n",
+ ret);
+ return 0;
+
+err_cmd_init:
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
new file mode 100644
index 000000000000..876650eddac4
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
@@ -0,0 +1,458 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2021-2021 Hisilicon Limited.
+
+#ifndef __HCLGE_COMM_CMD_H
+#define __HCLGE_COMM_CMD_H
+#include <linux/types.h>
+
+#include "hnae3.h"
+
+#define HCLGE_COMM_CMD_FLAG_IN BIT(0)
+#define HCLGE_COMM_CMD_FLAG_NEXT BIT(2)
+#define HCLGE_COMM_CMD_FLAG_WR BIT(3)
+#define HCLGE_COMM_CMD_FLAG_NO_INTR BIT(4)
+
+#define HCLGE_COMM_SEND_SYNC(flag) \
+ ((flag) & HCLGE_COMM_CMD_FLAG_NO_INTR)
+
+#define HCLGE_COMM_LINK_EVENT_REPORT_EN_B 0
+#define HCLGE_COMM_NCSI_ERROR_REPORT_EN_B 1
+#define HCLGE_COMM_PHY_IMP_EN_B 2
+#define HCLGE_COMM_MAC_STATS_EXT_EN_B 3
+#define HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B 4
+
+#define hclge_comm_dev_phy_imp_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, (ae_dev)->caps)
+
+#define HCLGE_COMM_TYPE_CRQ 0
+#define HCLGE_COMM_TYPE_CSQ 1
+
+#define HCLGE_COMM_CMDQ_CLEAR_WAIT_TIME 200
+
+/* bar registers for cmdq */
+#define HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG 0x27000
+#define HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG 0x27004
+#define HCLGE_COMM_NIC_CSQ_DEPTH_REG 0x27008
+#define HCLGE_COMM_NIC_CSQ_TAIL_REG 0x27010
+#define HCLGE_COMM_NIC_CSQ_HEAD_REG 0x27014
+#define HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG 0x27018
+#define HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG 0x2701C
+#define HCLGE_COMM_NIC_CRQ_DEPTH_REG 0x27020
+#define HCLGE_COMM_NIC_CRQ_TAIL_REG 0x27024
+#define HCLGE_COMM_NIC_CRQ_HEAD_REG 0x27028
+/* Vector0 interrupt CMDQ event source register(RW) */
+#define HCLGE_COMM_VECTOR0_CMDQ_SRC_REG 0x27100
+/* Vector0 interrupt CMDQ event status register(RO) */
+#define HCLGE_COMM_VECTOR0_CMDQ_STATE_REG 0x27104
+#define HCLGE_COMM_CMDQ_INTR_EN_REG 0x27108
+#define HCLGE_COMM_CMDQ_INTR_GEN_REG 0x2710C
+#define HCLGE_COMM_CMDQ_INTR_STS_REG 0x27104
+
+/* this bit indicates that the driver is ready for hardware reset */
+#define HCLGE_COMM_NIC_SW_RST_RDY_B 16
+#define HCLGE_COMM_NIC_SW_RST_RDY BIT(HCLGE_COMM_NIC_SW_RST_RDY_B)
+#define HCLGE_COMM_NIC_CMQ_DESC_NUM_S 3
+#define HCLGE_COMM_NIC_CMQ_DESC_NUM 1024
+#define HCLGE_COMM_CMDQ_TX_TIMEOUT 30000
+
+enum hclge_opcode_type {
+ /* Generic commands */
+ HCLGE_OPC_QUERY_FW_VER = 0x0001,
+ HCLGE_OPC_CFG_RST_TRIGGER = 0x0020,
+ HCLGE_OPC_GBL_RST_STATUS = 0x0021,
+ HCLGE_OPC_QUERY_FUNC_STATUS = 0x0022,
+ HCLGE_OPC_QUERY_PF_RSRC = 0x0023,
+ HCLGE_OPC_QUERY_VF_RSRC = 0x0024,
+ HCLGE_OPC_GET_CFG_PARAM = 0x0025,
+ HCLGE_OPC_PF_RST_DONE = 0x0026,
+ HCLGE_OPC_QUERY_VF_RST_RDY = 0x0027,
+
+ HCLGE_OPC_STATS_64_BIT = 0x0030,
+ HCLGE_OPC_STATS_32_BIT = 0x0031,
+ HCLGE_OPC_STATS_MAC = 0x0032,
+ HCLGE_OPC_QUERY_MAC_REG_NUM = 0x0033,
+ HCLGE_OPC_STATS_MAC_ALL = 0x0034,
+
+ HCLGE_OPC_QUERY_REG_NUM = 0x0040,
+ HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
+ HCLGE_OPC_QUERY_64_BIT_REG = 0x0042,
+ HCLGE_OPC_DFX_BD_NUM = 0x0043,
+ HCLGE_OPC_DFX_BIOS_COMMON_REG = 0x0044,
+ HCLGE_OPC_DFX_SSU_REG_0 = 0x0045,
+ HCLGE_OPC_DFX_SSU_REG_1 = 0x0046,
+ HCLGE_OPC_DFX_IGU_EGU_REG = 0x0047,
+ HCLGE_OPC_DFX_RPU_REG_0 = 0x0048,
+ HCLGE_OPC_DFX_RPU_REG_1 = 0x0049,
+ HCLGE_OPC_DFX_NCSI_REG = 0x004A,
+ HCLGE_OPC_DFX_RTC_REG = 0x004B,
+ HCLGE_OPC_DFX_PPP_REG = 0x004C,
+ HCLGE_OPC_DFX_RCB_REG = 0x004D,
+ HCLGE_OPC_DFX_TQP_REG = 0x004E,
+ HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
+
+ HCLGE_OPC_QUERY_DEV_SPECS = 0x0050,
+
+ /* MAC command */
+ HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
+ HCLGE_OPC_CONFIG_AN_MODE = 0x0304,
+ HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
+ HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
+ HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
+ HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310,
+ HCLGE_OPC_MAC_TNL_INT_EN = 0x0311,
+ HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312,
+ HCLGE_OPC_COMMON_LOOPBACK = 0x0315,
+ HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
+ HCLGE_OPC_QUERY_ROH_TYPE_INFO = 0x0389,
+
+ /* PTP commands */
+ HCLGE_OPC_PTP_INT_EN = 0x0501,
+ HCLGE_OPC_PTP_MODE_CFG = 0x0507,
+
+ /* PFC/Pause commands */
+ HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701,
+ HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702,
+ HCLGE_OPC_CFG_MAC_PARA = 0x0703,
+ HCLGE_OPC_CFG_PFC_PARA = 0x0704,
+ HCLGE_OPC_QUERY_MAC_TX_PKT_CNT = 0x0705,
+ HCLGE_OPC_QUERY_MAC_RX_PKT_CNT = 0x0706,
+ HCLGE_OPC_QUERY_PFC_TX_PKT_CNT = 0x0707,
+ HCLGE_OPC_QUERY_PFC_RX_PKT_CNT = 0x0708,
+ HCLGE_OPC_PRI_TO_TC_MAPPING = 0x0709,
+ HCLGE_OPC_QOS_MAP = 0x070A,
+
+ /* ETS/scheduler commands */
+ HCLGE_OPC_TM_PG_TO_PRI_LINK = 0x0804,
+ HCLGE_OPC_TM_QS_TO_PRI_LINK = 0x0805,
+ HCLGE_OPC_TM_NQ_TO_QS_LINK = 0x0806,
+ HCLGE_OPC_TM_RQ_TO_QS_LINK = 0x0807,
+ HCLGE_OPC_TM_PORT_WEIGHT = 0x0808,
+ HCLGE_OPC_TM_PG_WEIGHT = 0x0809,
+ HCLGE_OPC_TM_QS_WEIGHT = 0x080A,
+ HCLGE_OPC_TM_PRI_WEIGHT = 0x080B,
+ HCLGE_OPC_TM_PRI_C_SHAPPING = 0x080C,
+ HCLGE_OPC_TM_PRI_P_SHAPPING = 0x080D,
+ HCLGE_OPC_TM_PG_C_SHAPPING = 0x080E,
+ HCLGE_OPC_TM_PG_P_SHAPPING = 0x080F,
+ HCLGE_OPC_TM_PORT_SHAPPING = 0x0810,
+ HCLGE_OPC_TM_PG_SCH_MODE_CFG = 0x0812,
+ HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813,
+ HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
+ HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
+ HCLGE_OPC_TM_NODES = 0x0816,
+ HCLGE_OPC_ETS_TC_WEIGHT = 0x0843,
+ HCLGE_OPC_QSET_DFX_STS = 0x0844,
+ HCLGE_OPC_PRI_DFX_STS = 0x0845,
+ HCLGE_OPC_PG_DFX_STS = 0x0846,
+ HCLGE_OPC_PORT_DFX_STS = 0x0847,
+ HCLGE_OPC_SCH_NQ_CNT = 0x0848,
+ HCLGE_OPC_SCH_RQ_CNT = 0x0849,
+ HCLGE_OPC_TM_INTERNAL_STS = 0x0850,
+ HCLGE_OPC_TM_INTERNAL_CNT = 0x0851,
+ HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852,
+
+ /* Packet buffer allocate commands */
+ HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
+ HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902,
+ HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903,
+ HCLGE_OPC_RX_COM_THRD_ALLOC = 0x0904,
+ HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905,
+ HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906,
+
+ /* TQP management command */
+ HCLGE_OPC_SET_TQP_MAP = 0x0A01,
+
+ /* TQP commands */
+ HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
+ HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
+ HCLGE_OPC_QUERY_TX_STATS = 0x0B03,
+ HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04,
+ HCLGE_OPC_CFG_RX_QUEUE = 0x0B11,
+ HCLGE_OPC_QUERY_RX_POINTER = 0x0B12,
+ HCLGE_OPC_QUERY_RX_STATS = 0x0B13,
+ HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16,
+ HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17,
+ HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
+ HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22,
+
+ /* PPU commands */
+ HCLGE_OPC_PPU_PF_OTHER_INT_DFX = 0x0B4A,
+
+ /* TSO command */
+ HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
+ HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
+
+ /* RSS commands */
+ HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
+ HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07,
+ HCLGE_OPC_RSS_TC_MODE = 0x0D08,
+ HCLGE_OPC_RSS_INPUT_TUPLE = 0x0D02,
+
+ /* Promisuous mode command */
+ HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01,
+
+ /* Vlan offload commands */
+ HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01,
+ HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02,
+
+ /* Interrupts commands */
+ HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503,
+ HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504,
+
+ /* MAC commands */
+ HCLGE_OPC_MAC_VLAN_ADD = 0x1000,
+ HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001,
+ HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002,
+ HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
+ HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004,
+ HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
+ HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
+
+ /* MAC VLAN commands */
+ HCLGE_OPC_MAC_VLAN_SWITCH_PARAM = 0x1033,
+
+ /* VLAN commands */
+ HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
+ HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
+ HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
+ HCLGE_OPC_PORT_VLAN_BYPASS = 0x1103,
+
+ /* Flow Director commands */
+ HCLGE_OPC_FD_MODE_CTRL = 0x1200,
+ HCLGE_OPC_FD_GET_ALLOCATION = 0x1201,
+ HCLGE_OPC_FD_KEY_CONFIG = 0x1202,
+ HCLGE_OPC_FD_TCAM_OP = 0x1203,
+ HCLGE_OPC_FD_AD_OP = 0x1204,
+ HCLGE_OPC_FD_CNT_OP = 0x1205,
+ HCLGE_OPC_FD_USER_DEF_OP = 0x1207,
+ HCLGE_OPC_FD_QB_CTRL = 0x1210,
+ HCLGE_OPC_FD_QB_AD_OP = 0x1211,
+
+ /* MDIO command */
+ HCLGE_OPC_MDIO_CONFIG = 0x1900,
+
+ /* QCN commands */
+ HCLGE_OPC_QCN_MOD_CFG = 0x1A01,
+ HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02,
+ HCLGE_OPC_QCN_SHAPPING_CFG = 0x1A03,
+ HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04,
+ HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05,
+ HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06,
+ HCLGE_OPC_QCN_AJUST_INIT = 0x1A07,
+ HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08,
+
+ /* Mailbox command */
+ HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
+ HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001,
+
+ /* Led command */
+ HCLGE_OPC_LED_STATUS_CFG = 0xB000,
+
+ /* clear hardware resource command */
+ HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B,
+
+ /* NCL config command */
+ HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
+
+ /* IMP stats command */
+ HCLGE_OPC_IMP_STATS_BD = 0x7012,
+ HCLGE_OPC_IMP_STATS_INFO = 0x7013,
+ HCLGE_OPC_IMP_COMPAT_CFG = 0x701A,
+
+ /* SFP command */
+ HCLGE_OPC_GET_SFP_EEPROM = 0x7100,
+ HCLGE_OPC_GET_SFP_EXIST = 0x7101,
+ HCLGE_OPC_GET_SFP_INFO = 0x7104,
+
+ /* Error INT commands */
+ HCLGE_MAC_COMMON_INT_EN = 0x030E,
+ HCLGE_TM_SCH_ECC_INT_EN = 0x0829,
+ HCLGE_SSU_ECC_INT_CMD = 0x0989,
+ HCLGE_SSU_COMMON_INT_CMD = 0x098C,
+ HCLGE_PPU_MPF_ECC_INT_CMD = 0x0B40,
+ HCLGE_PPU_MPF_OTHER_INT_CMD = 0x0B41,
+ HCLGE_PPU_PF_OTHER_INT_CMD = 0x0B42,
+ HCLGE_COMMON_ECC_INT_CFG = 0x1505,
+ HCLGE_QUERY_RAS_INT_STS_BD_NUM = 0x1510,
+ HCLGE_QUERY_CLEAR_MPF_RAS_INT = 0x1511,
+ HCLGE_QUERY_CLEAR_PF_RAS_INT = 0x1512,
+ HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513,
+ HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
+ HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515,
+ HCLGE_QUERY_ALL_ERR_BD_NUM = 0x1516,
+ HCLGE_QUERY_ALL_ERR_INFO = 0x1517,
+ HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580,
+ HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581,
+ HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584,
+ HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD = 0x1585,
+ HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD = 0x1586,
+ HCLGE_IGU_EGU_TNL_INT_EN = 0x1803,
+ HCLGE_IGU_COMMON_INT_EN = 0x1806,
+ HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14,
+ HCLGE_PPP_CMD0_INT_CMD = 0x2100,
+ HCLGE_PPP_CMD1_INT_CMD = 0x2101,
+ HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105,
+ HCLGE_NCSI_INT_EN = 0x2401,
+
+ /* ROH MAC commands */
+ HCLGE_OPC_MAC_ADDR_CHECK = 0x9004,
+
+ /* PHY command */
+ HCLGE_OPC_PHY_LINK_KSETTING = 0x7025,
+ HCLGE_OPC_PHY_REG = 0x7026,
+
+ /* Query link diagnosis info command */
+ HCLGE_OPC_QUERY_LINK_DIAGNOSIS = 0x702A,
+};
+
+enum hclge_comm_cmd_return_status {
+ HCLGE_COMM_CMD_EXEC_SUCCESS = 0,
+ HCLGE_COMM_CMD_NO_AUTH = 1,
+ HCLGE_COMM_CMD_NOT_SUPPORTED = 2,
+ HCLGE_COMM_CMD_QUEUE_FULL = 3,
+ HCLGE_COMM_CMD_NEXT_ERR = 4,
+ HCLGE_COMM_CMD_UNEXE_ERR = 5,
+ HCLGE_COMM_CMD_PARA_ERR = 6,
+ HCLGE_COMM_CMD_RESULT_ERR = 7,
+ HCLGE_COMM_CMD_TIMEOUT = 8,
+ HCLGE_COMM_CMD_HILINK_ERR = 9,
+ HCLGE_COMM_CMD_QUEUE_ILLEGAL = 10,
+ HCLGE_COMM_CMD_INVALID = 11,
+};
+
+enum HCLGE_COMM_CAP_BITS {
+ HCLGE_COMM_CAP_UDP_GSO_B,
+ HCLGE_COMM_CAP_QB_B,
+ HCLGE_COMM_CAP_FD_FORWARD_TC_B,
+ HCLGE_COMM_CAP_PTP_B,
+ HCLGE_COMM_CAP_INT_QL_B,
+ HCLGE_COMM_CAP_HW_TX_CSUM_B,
+ HCLGE_COMM_CAP_TX_PUSH_B,
+ HCLGE_COMM_CAP_PHY_IMP_B,
+ HCLGE_COMM_CAP_TQP_TXRX_INDEP_B,
+ HCLGE_COMM_CAP_HW_PAD_B,
+ HCLGE_COMM_CAP_STASH_B,
+ HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B,
+ HCLGE_COMM_CAP_RAS_IMP_B = 12,
+ HCLGE_COMM_CAP_FEC_B = 13,
+ HCLGE_COMM_CAP_PAUSE_B = 14,
+ HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15,
+ HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17,
+};
+
+enum HCLGE_COMM_API_CAP_BITS {
+ HCLGE_COMM_API_CAP_FLEX_RSS_TBL_B,
+};
+
+/* capabilities bits map between imp firmware and local driver */
+struct hclge_comm_caps_bit_map {
+ u16 imp_bit;
+ u16 local_bit;
+};
+
+struct hclge_comm_firmware_compat_cmd {
+ __le32 compat;
+ u8 rsv[20];
+};
+
+enum hclge_comm_cmd_state {
+ HCLGE_COMM_STATE_CMD_DISABLE,
+};
+
+struct hclge_comm_errcode {
+ u32 imp_errcode;
+ int common_errno;
+};
+
+#define HCLGE_COMM_QUERY_CAP_LENGTH 3
+struct hclge_comm_query_version_cmd {
+ __le32 firmware;
+ __le32 hardware;
+ __le32 api_caps;
+ __le32 caps[HCLGE_COMM_QUERY_CAP_LENGTH]; /* capabilities of device */
+};
+
+#define HCLGE_DESC_DATA_LEN 6
+struct hclge_desc {
+ __le16 opcode;
+ __le16 flag;
+ __le16 retval;
+ __le16 rsv;
+ __le32 data[HCLGE_DESC_DATA_LEN];
+};
+
+struct hclge_comm_cmq_ring {
+ dma_addr_t desc_dma_addr;
+ struct hclge_desc *desc;
+ struct pci_dev *pdev;
+ u32 head;
+ u32 tail;
+
+ u16 buf_size;
+ u16 desc_num;
+ int next_to_use;
+ int next_to_clean;
+ u8 ring_type; /* cmq ring type */
+ spinlock_t lock; /* Command queue lock */
+};
+
+enum hclge_comm_cmd_status {
+ HCLGE_COMM_STATUS_SUCCESS = 0,
+ HCLGE_COMM_ERR_CSQ_FULL = -1,
+ HCLGE_COMM_ERR_CSQ_TIMEOUT = -2,
+ HCLGE_COMM_ERR_CSQ_ERROR = -3,
+};
+
+struct hclge_comm_cmq {
+ struct hclge_comm_cmq_ring csq;
+ struct hclge_comm_cmq_ring crq;
+ u16 tx_timeout;
+ enum hclge_comm_cmd_status last_status;
+};
+
+struct hclge_comm_hw {
+ void __iomem *io_base;
+ void __iomem *mem_base;
+ struct hclge_comm_cmq cmq;
+ unsigned long comm_state;
+};
+
+static inline void hclge_comm_write_reg(void __iomem *base, u32 reg, u32 value)
+{
+ writel(value, base + reg);
+}
+
+static inline u32 hclge_comm_read_reg(u8 __iomem *base, u32 reg)
+{
+ u8 __iomem *reg_addr = READ_ONCE(base);
+
+ return readl(reg_addr + reg);
+}
+
+#define hclge_comm_write_dev(a, reg, value) \
+ hclge_comm_write_reg((a)->io_base, reg, value)
+#define hclge_comm_read_dev(a, reg) \
+ hclge_comm_read_reg((a)->io_base, reg)
+
+void hclge_comm_cmd_init_regs(struct hclge_comm_hw *hw);
+int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw,
+ u32 *fw_version, bool is_pf);
+int hclge_comm_alloc_cmd_queue(struct hclge_comm_hw *hw, int ring_type);
+int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
+ int num);
+void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read);
+int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw, bool en);
+void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring);
+void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc,
+ enum hclge_opcode_type opcode,
+ bool is_read);
+void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw);
+int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw);
+int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
+ u32 *fw_version, bool is_pf,
+ unsigned long reset_pending);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
new file mode 100644
index 000000000000..e23729ac3bb8
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
@@ -0,0 +1,525 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2021-2021 Hisilicon Limited.
+#include <linux/skbuff.h>
+
+#include "hnae3.h"
+#include "hclge_comm_cmd.h"
+#include "hclge_comm_rss.h"
+
+static const u8 hclge_comm_hash_key[] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
+};
+
+static void
+hclge_comm_init_rss_tuple(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_tuple_cfg *rss_tuple_cfg)
+{
+ rss_tuple_cfg->ipv4_tcp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ rss_tuple_cfg->ipv4_udp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ rss_tuple_cfg->ipv4_sctp_en = HCLGE_COMM_RSS_INPUT_TUPLE_SCTP;
+ rss_tuple_cfg->ipv4_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ rss_tuple_cfg->ipv6_tcp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ rss_tuple_cfg->ipv6_udp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ rss_tuple_cfg->ipv6_sctp_en =
+ ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+ HCLGE_COMM_RSS_INPUT_TUPLE_SCTP_NO_PORT :
+ HCLGE_COMM_RSS_INPUT_TUPLE_SCTP;
+ rss_tuple_cfg->ipv6_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+}
+
+int hclge_comm_rss_init_cfg(struct hnae3_handle *nic,
+ struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_cfg *rss_cfg)
+{
+ u16 rss_ind_tbl_size = ae_dev->dev_specs.rss_ind_tbl_size;
+ int rss_algo = HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ;
+ u16 *rss_ind_tbl;
+
+ if (nic->flags & HNAE3_SUPPORT_VF)
+ rss_cfg->rss_size = nic->kinfo.rss_size;
+
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ rss_algo = HCLGE_COMM_RSS_HASH_ALGO_SIMPLE;
+
+ hclge_comm_init_rss_tuple(ae_dev, &rss_cfg->rss_tuple_sets);
+
+ rss_cfg->rss_algo = rss_algo;
+
+ rss_ind_tbl = devm_kcalloc(&ae_dev->pdev->dev, rss_ind_tbl_size,
+ sizeof(*rss_ind_tbl), GFP_KERNEL);
+ if (!rss_ind_tbl)
+ return -ENOMEM;
+
+ rss_cfg->rss_indirection_tbl = rss_ind_tbl;
+ memcpy(rss_cfg->rss_hash_key, hclge_comm_hash_key,
+ HCLGE_COMM_RSS_KEY_SIZE);
+
+ hclge_comm_rss_indir_init_cfg(ae_dev, rss_cfg);
+
+ return 0;
+}
+
+void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset,
+ u16 *tc_valid, u16 *tc_size)
+{
+ u16 roundup_size;
+ u32 i;
+
+ roundup_size = roundup_pow_of_two(rss_size);
+ roundup_size = ilog2(roundup_size);
+
+ for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) {
+ tc_valid[i] = 1;
+ tc_size[i] = roundup_size;
+ tc_offset[i] = (hw_tc_map & BIT(i)) ? rss_size * i : 0;
+ }
+}
+
+int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset,
+ u16 *tc_valid, u16 *tc_size)
+{
+ struct hclge_comm_rss_tc_mode_cmd *req;
+ struct hclge_desc desc;
+ unsigned int i;
+ int ret;
+
+ req = (struct hclge_comm_rss_tc_mode_cmd *)desc.data;
+
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
+ for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) {
+ u16 mode = 0;
+
+ hnae3_set_bit(mode, HCLGE_COMM_RSS_TC_VALID_B,
+ (tc_valid[i] & 0x1));
+ hnae3_set_field(mode, HCLGE_COMM_RSS_TC_SIZE_M,
+ HCLGE_COMM_RSS_TC_SIZE_S, tc_size[i]);
+ hnae3_set_bit(mode, HCLGE_COMM_RSS_TC_SIZE_MSB_B,
+ tc_size[i] >> HCLGE_COMM_RSS_TC_SIZE_MSB_OFFSET &
+ 0x1);
+ hnae3_set_field(mode, HCLGE_COMM_RSS_TC_OFFSET_M,
+ HCLGE_COMM_RSS_TC_OFFSET_S, tc_offset[i]);
+
+ req->rss_tc_mode[i] = cpu_to_le16(mode);
+ }
+
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret)
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to set rss tc mode, ret = %d.\n", ret);
+
+ return ret;
+}
+
+int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
+ struct hclge_comm_hw *hw, const u8 *key,
+ const u8 hfunc)
+{
+ u8 hash_algo;
+ int ret;
+
+ ret = hclge_comm_parse_rss_hfunc(rss_cfg, hfunc, &hash_algo);
+ if (ret)
+ return ret;
+
+ /* Set the RSS Hash Key if specififed by the user */
+ if (key) {
+ ret = hclge_comm_set_rss_algo_key(hw, hash_algo, key);
+ if (ret)
+ return ret;
+
+ /* Update the shadow RSS key with user specified qids */
+ memcpy(rss_cfg->rss_hash_key, key, HCLGE_COMM_RSS_KEY_SIZE);
+ } else {
+ ret = hclge_comm_set_rss_algo_key(hw, hash_algo,
+ rss_cfg->rss_hash_key);
+ if (ret)
+ return ret;
+ }
+ rss_cfg->rss_algo = hash_algo;
+
+ return 0;
+}
+
+int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw,
+ struct hclge_comm_rss_cfg *rss_cfg,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclge_comm_rss_input_tuple_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ if (nfc->data &
+ ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req = (struct hclge_comm_rss_input_tuple_cmd *)desc.data;
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE,
+ false);
+
+ ret = hclge_comm_init_rss_tuple_cmd(rss_cfg, nfc, ae_dev, req);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to init rss tuple cmd, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to set rss tuple, ret = %d.\n", ret);
+ return ret;
+ }
+
+ rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
+ rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
+ rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
+ rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
+ rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
+ rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
+ rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
+ rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
+ return 0;
+}
+
+u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle)
+{
+ return HCLGE_COMM_RSS_KEY_SIZE;
+}
+
+void hclge_comm_get_rss_type(struct hnae3_handle *nic,
+ struct hclge_comm_rss_tuple_cfg *rss_tuple_sets)
+{
+ if (rss_tuple_sets->ipv4_tcp_en ||
+ rss_tuple_sets->ipv4_udp_en ||
+ rss_tuple_sets->ipv4_sctp_en ||
+ rss_tuple_sets->ipv6_tcp_en ||
+ rss_tuple_sets->ipv6_udp_en ||
+ rss_tuple_sets->ipv6_sctp_en)
+ nic->kinfo.rss_type = PKT_HASH_TYPE_L4;
+ else if (rss_tuple_sets->ipv4_fragment_en ||
+ rss_tuple_sets->ipv6_fragment_en)
+ nic->kinfo.rss_type = PKT_HASH_TYPE_L3;
+ else
+ nic->kinfo.rss_type = PKT_HASH_TYPE_NONE;
+}
+
+int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg,
+ const u8 hfunc, u8 *hash_algo)
+{
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
+ *hash_algo = HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ;
+ return 0;
+ case ETH_RSS_HASH_XOR:
+ *hash_algo = HCLGE_COMM_RSS_HASH_ALGO_SIMPLE;
+ return 0;
+ case ETH_RSS_HASH_NO_CHANGE:
+ *hash_algo = rss_cfg->rss_algo;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_cfg *rss_cfg)
+{
+ u16 i;
+ /* Initialize RSS indirect table */
+ for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
+ rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
+}
+
+int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type,
+ u8 *tuple_sets)
+{
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ break;
+ case UDP_V4_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ break;
+ case TCP_V6_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ break;
+ case UDP_V6_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ break;
+ case SCTP_V4_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ break;
+ case SCTP_V6_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ *tuple_sets = HCLGE_COMM_S_IP_BIT | HCLGE_COMM_D_IP_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+hclge_comm_append_rss_msb_info(struct hclge_comm_rss_ind_tbl_cmd *req,
+ u16 qid, u32 j)
+{
+ u8 rss_msb_oft;
+ u8 rss_msb_val;
+
+ rss_msb_oft =
+ j * HCLGE_COMM_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
+ rss_msb_val = (qid >> HCLGE_COMM_RSS_CFG_TBL_BW_L & 0x1) <<
+ (j * HCLGE_COMM_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
+ req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
+}
+
+int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw, const u16 *indir)
+{
+ struct hclge_comm_rss_ind_tbl_cmd *req;
+ struct hclge_desc desc;
+ u16 rss_cfg_tbl_num;
+ int ret;
+ u16 qid;
+ u16 i;
+ u32 j;
+
+ req = (struct hclge_comm_rss_ind_tbl_cmd *)desc.data;
+ rss_cfg_tbl_num = ae_dev->dev_specs.rss_ind_tbl_size /
+ HCLGE_COMM_RSS_CFG_TBL_SIZE;
+
+ for (i = 0; i < rss_cfg_tbl_num; i++) {
+ hclge_comm_cmd_setup_basic_desc(&desc,
+ HCLGE_OPC_RSS_INDIR_TABLE,
+ false);
+
+ req->start_table_index =
+ cpu_to_le16(i * HCLGE_COMM_RSS_CFG_TBL_SIZE);
+ req->rss_set_bitmap =
+ cpu_to_le16(HCLGE_COMM_RSS_SET_BITMAP_MSK);
+ for (j = 0; j < HCLGE_COMM_RSS_CFG_TBL_SIZE; j++) {
+ qid = indir[i * HCLGE_COMM_RSS_CFG_TBL_SIZE + j];
+ req->rss_qid_l[j] = qid & 0xff;
+ hclge_comm_append_rss_msb_info(req, qid, j);
+ }
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to configure rss table, ret = %d.\n",
+ ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+int hclge_comm_set_rss_input_tuple(struct hnae3_handle *nic,
+ struct hclge_comm_hw *hw, bool is_pf,
+ struct hclge_comm_rss_cfg *rss_cfg)
+{
+ struct hclge_comm_rss_input_tuple_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE,
+ false);
+
+ req = (struct hclge_comm_rss_input_tuple_cmd *)desc.data;
+
+ req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
+ req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
+
+ if (is_pf)
+ hclge_comm_get_rss_type(nic, &rss_cfg->rss_tuple_sets);
+
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret)
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to configure rss input, ret = %d.\n", ret);
+ return ret;
+}
+
+void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key,
+ u8 *hfunc)
+{
+ /* Get hash algorithm */
+ if (hfunc) {
+ switch (rss_cfg->rss_algo) {
+ case HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ:
+ *hfunc = ETH_RSS_HASH_TOP;
+ break;
+ case HCLGE_COMM_RSS_HASH_ALGO_SIMPLE:
+ *hfunc = ETH_RSS_HASH_XOR;
+ break;
+ default:
+ *hfunc = ETH_RSS_HASH_UNKNOWN;
+ break;
+ }
+ }
+
+ /* Get the RSS Key required by the user */
+ if (key)
+ memcpy(key, rss_cfg->rss_hash_key, HCLGE_COMM_RSS_KEY_SIZE);
+}
+
+void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
+ u32 *indir, u16 rss_ind_tbl_size)
+{
+ u16 i;
+
+ if (!indir)
+ return;
+
+ for (i = 0; i < rss_ind_tbl_size; i++)
+ indir[i] = rss_cfg->rss_indirection_tbl[i];
+}
+
+int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
+ const u8 *key)
+{
+ struct hclge_comm_rss_config_cmd *req;
+ unsigned int key_offset = 0;
+ struct hclge_desc desc;
+ int key_counts;
+ int key_size;
+ int ret;
+
+ key_counts = HCLGE_COMM_RSS_KEY_SIZE;
+ req = (struct hclge_comm_rss_config_cmd *)desc.data;
+
+ while (key_counts) {
+ hclge_comm_cmd_setup_basic_desc(&desc,
+ HCLGE_OPC_RSS_GENERIC_CONFIG,
+ false);
+
+ req->hash_config |= (hfunc & HCLGE_COMM_RSS_HASH_ALGO_MASK);
+ req->hash_config |=
+ (key_offset << HCLGE_COMM_RSS_HASH_KEY_OFFSET_B);
+
+ key_size = min(HCLGE_COMM_RSS_HASH_KEY_NUM, key_counts);
+ memcpy(req->hash_key,
+ key + key_offset * HCLGE_COMM_RSS_HASH_KEY_NUM,
+ key_size);
+
+ key_counts -= key_size;
+ key_offset++;
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to configure RSS key, ret = %d.\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
+{
+ u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_COMM_S_PORT_BIT : 0;
+
+ if (nfc->data & RXH_L4_B_2_3)
+ hash_sets |= HCLGE_COMM_D_PORT_BIT;
+ else
+ hash_sets &= ~HCLGE_COMM_D_PORT_BIT;
+
+ if (nfc->data & RXH_IP_SRC)
+ hash_sets |= HCLGE_COMM_S_IP_BIT;
+ else
+ hash_sets &= ~HCLGE_COMM_S_IP_BIT;
+
+ if (nfc->data & RXH_IP_DST)
+ hash_sets |= HCLGE_COMM_D_IP_BIT;
+ else
+ hash_sets &= ~HCLGE_COMM_D_IP_BIT;
+
+ if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
+ hash_sets |= HCLGE_COMM_V_TAG_BIT;
+
+ return hash_sets;
+}
+
+int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
+ struct ethtool_rxnfc *nfc,
+ struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_input_tuple_cmd *req)
+{
+ u8 tuple_sets;
+
+ req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
+ req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
+
+ tuple_sets = hclge_comm_get_rss_hash_bits(nfc);
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ req->ipv4_tcp_en = tuple_sets;
+ break;
+ case TCP_V6_FLOW:
+ req->ipv6_tcp_en = tuple_sets;
+ break;
+ case UDP_V4_FLOW:
+ req->ipv4_udp_en = tuple_sets;
+ break;
+ case UDP_V6_FLOW:
+ req->ipv6_udp_en = tuple_sets;
+ break;
+ case SCTP_V4_FLOW:
+ req->ipv4_sctp_en = tuple_sets;
+ break;
+ case SCTP_V6_FLOW:
+ if (ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+ (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
+ return -EINVAL;
+
+ req->ipv6_sctp_en = tuple_sets;
+ break;
+ case IPV4_FLOW:
+ req->ipv4_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ break;
+ case IPV6_FLOW:
+ req->ipv6_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+u64 hclge_comm_convert_rss_tuple(u8 tuple_sets)
+{
+ u64 tuple_data = 0;
+
+ if (tuple_sets & HCLGE_COMM_D_PORT_BIT)
+ tuple_data |= RXH_L4_B_2_3;
+ if (tuple_sets & HCLGE_COMM_S_PORT_BIT)
+ tuple_data |= RXH_L4_B_0_1;
+ if (tuple_sets & HCLGE_COMM_D_IP_BIT)
+ tuple_data |= RXH_IP_DST;
+ if (tuple_sets & HCLGE_COMM_S_IP_BIT)
+ tuple_data |= RXH_IP_SRC;
+
+ return tuple_data;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
new file mode 100644
index 000000000000..aa1d7a6ff4ca
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2021-2021 Hisilicon Limited.
+
+#ifndef __HCLGE_COMM_RSS_H
+#define __HCLGE_COMM_RSS_H
+#include <linux/types.h>
+
+#include "hnae3.h"
+#include "hclge_comm_cmd.h"
+
+#define HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ 0
+#define HCLGE_COMM_RSS_HASH_ALGO_SIMPLE 1
+#define HCLGE_COMM_RSS_HASH_ALGO_SYMMETRIC 2
+
+#define HCLGE_COMM_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
+#define HCLGE_COMM_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
+
+#define HCLGE_COMM_D_PORT_BIT BIT(0)
+#define HCLGE_COMM_S_PORT_BIT BIT(1)
+#define HCLGE_COMM_D_IP_BIT BIT(2)
+#define HCLGE_COMM_S_IP_BIT BIT(3)
+#define HCLGE_COMM_V_TAG_BIT BIT(4)
+#define HCLGE_COMM_RSS_INPUT_TUPLE_SCTP_NO_PORT \
+ (HCLGE_COMM_D_IP_BIT | HCLGE_COMM_S_IP_BIT | HCLGE_COMM_V_TAG_BIT)
+#define HCLGE_COMM_MAX_TC_NUM 8
+
+#define HCLGE_COMM_RSS_TC_OFFSET_S 0
+#define HCLGE_COMM_RSS_TC_OFFSET_M GENMASK(10, 0)
+#define HCLGE_COMM_RSS_TC_SIZE_MSB_B 11
+#define HCLGE_COMM_RSS_TC_SIZE_S 12
+#define HCLGE_COMM_RSS_TC_SIZE_M GENMASK(14, 12)
+#define HCLGE_COMM_RSS_TC_VALID_B 15
+#define HCLGE_COMM_RSS_TC_SIZE_MSB_OFFSET 3
+
+struct hclge_comm_rss_tuple_cfg {
+ u8 ipv4_tcp_en;
+ u8 ipv4_udp_en;
+ u8 ipv4_sctp_en;
+ u8 ipv4_fragment_en;
+ u8 ipv6_tcp_en;
+ u8 ipv6_udp_en;
+ u8 ipv6_sctp_en;
+ u8 ipv6_fragment_en;
+};
+
+#define HCLGE_COMM_RSS_KEY_SIZE 40
+#define HCLGE_COMM_RSS_CFG_TBL_SIZE 16
+#define HCLGE_COMM_RSS_CFG_TBL_BW_H 2U
+#define HCLGE_COMM_RSS_CFG_TBL_BW_L 8U
+#define HCLGE_COMM_RSS_CFG_TBL_SIZE_H 4
+#define HCLGE_COMM_RSS_SET_BITMAP_MSK GENMASK(15, 0)
+#define HCLGE_COMM_RSS_HASH_ALGO_MASK GENMASK(3, 0)
+#define HCLGE_COMM_RSS_HASH_KEY_OFFSET_B 4
+
+#define HCLGE_COMM_RSS_HASH_KEY_NUM 16
+struct hclge_comm_rss_config_cmd {
+ u8 hash_config;
+ u8 rsv[7];
+ u8 hash_key[HCLGE_COMM_RSS_HASH_KEY_NUM];
+};
+
+struct hclge_comm_rss_cfg {
+ u8 rss_hash_key[HCLGE_COMM_RSS_KEY_SIZE]; /* user configured hash keys */
+
+ /* shadow table */
+ u16 *rss_indirection_tbl;
+ u32 rss_algo;
+
+ struct hclge_comm_rss_tuple_cfg rss_tuple_sets;
+ u32 rss_size;
+};
+
+struct hclge_comm_rss_input_tuple_cmd {
+ u8 ipv4_tcp_en;
+ u8 ipv4_udp_en;
+ u8 ipv4_sctp_en;
+ u8 ipv4_fragment_en;
+ u8 ipv6_tcp_en;
+ u8 ipv6_udp_en;
+ u8 ipv6_sctp_en;
+ u8 ipv6_fragment_en;
+ u8 rsv[16];
+};
+
+struct hclge_comm_rss_ind_tbl_cmd {
+ __le16 start_table_index;
+ __le16 rss_set_bitmap;
+ u8 rss_qid_h[HCLGE_COMM_RSS_CFG_TBL_SIZE_H];
+ u8 rss_qid_l[HCLGE_COMM_RSS_CFG_TBL_SIZE];
+};
+
+struct hclge_comm_rss_tc_mode_cmd {
+ __le16 rss_tc_mode[HCLGE_COMM_MAX_TC_NUM];
+ u8 rsv[8];
+};
+
+u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle);
+void hclge_comm_get_rss_type(struct hnae3_handle *nic,
+ struct hclge_comm_rss_tuple_cfg *rss_tuple_sets);
+void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_cfg *rss_cfg);
+int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type,
+ u8 *tuple_sets);
+int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg,
+ const u8 hfunc, u8 *hash_algo);
+void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key,
+ u8 *hfunc);
+void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
+ u32 *indir, __le16 rss_ind_tbl_size);
+int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
+ const u8 *key);
+int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
+ struct ethtool_rxnfc *nfc,
+ struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_input_tuple_cmd *req);
+u64 hclge_comm_convert_rss_tuple(u8 tuple_sets);
+int hclge_comm_set_rss_input_tuple(struct hnae3_handle *nic,
+ struct hclge_comm_hw *hw, bool is_pf,
+ struct hclge_comm_rss_cfg *rss_cfg);
+int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw, const u16 *indir);
+int hclge_comm_rss_init_cfg(struct hnae3_handle *nic,
+ struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_cfg *rss_cfg);
+void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset,
+ u16 *tc_valid, u16 *tc_size);
+int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset,
+ u16 *tc_valid, u16 *tc_size);
+int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
+ struct hclge_comm_hw *hw, const u8 *key,
+ const u8 hfunc);
+int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw,
+ struct hclge_comm_rss_cfg *rss_cfg,
+ struct ethtool_rxnfc *nfc);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
new file mode 100644
index 000000000000..0c60f41fca8a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2021-2021 Hisilicon Limited.
+
+#include <linux/err.h>
+
+#include "hnae3.h"
+#include "hclge_comm_cmd.h"
+#include "hclge_comm_tqp_stats.h"
+
+u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_comm_tqp *tqp;
+ u64 *buff = data;
+ u16 i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
+ *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
+ }
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
+ *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
+ }
+
+ return buff;
+}
+
+int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+
+ return kinfo->num_tqps * HCLGE_COMM_QUEUE_PAIR_SIZE;
+}
+
+u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ u8 *buff = data;
+ u16 i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ struct hclge_comm_tqp *tqp =
+ container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
+ snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd", tqp->index);
+ buff += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ struct hclge_comm_tqp *tqp =
+ container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
+ snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd", tqp->index);
+ buff += ETH_GSTRING_LEN;
+ }
+
+ return buff;
+}
+
+int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
+ struct hclge_comm_hw *hw)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_comm_tqp *tqp;
+ struct hclge_desc desc;
+ int ret;
+ u16 i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_RX_STATS,
+ true);
+
+ desc.data[0] = cpu_to_le32(tqp->index);
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to get tqp stat, ret = %d, tx = %u.\n",
+ ret, i);
+ return ret;
+ }
+ tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
+ le32_to_cpu(desc.data[1]);
+
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_TX_STATS,
+ true);
+
+ desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to get tqp stat, ret = %d, rx = %u.\n",
+ ret, i);
+ return ret;
+ }
+ tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
+ le32_to_cpu(desc.data[1]);
+ }
+
+ return 0;
+}
+
+void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_comm_tqp *tqp;
+ struct hnae3_queue *queue;
+ u16 i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ queue = kinfo->tqp[i];
+ tqp = container_of(queue, struct hclge_comm_tqp, q);
+ memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
+ }
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h
new file mode 100644
index 000000000000..a46350162ee8
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2021-2021 Hisilicon Limited.
+
+#ifndef __HCLGE_COMM_TQP_STATS_H
+#define __HCLGE_COMM_TQP_STATS_H
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include "hnae3.h"
+
+/* each tqp has TX & RX two queues */
+#define HCLGE_COMM_QUEUE_PAIR_SIZE 2
+
+/* TQP stats */
+struct hclge_comm_tqp_stats {
+ /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
+ u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
+ /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
+ u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
+};
+
+struct hclge_comm_tqp {
+ /* copy of device pointer from pci_dev,
+ * used when perform DMA mapping
+ */
+ struct device *dev;
+ struct hnae3_queue q;
+ struct hclge_comm_tqp_stats tqp_stats;
+ u16 index; /* Global index in a NIC controller */
+
+ bool alloced;
+};
+
+u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data);
+int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle);
+u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data);
+void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle);
+int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
+ struct hclge_comm_hw *hw);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index c381f8af67f0..f726a5b70f9e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -1083,7 +1083,7 @@ static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring,
sprintf(result[j++], "%u", index);
sprintf(result[j++], "%u",
READ_ONCE(ring->page_pool->pages_state_hold_cnt));
- sprintf(result[j++], "%u",
+ sprintf(result[j++], "%d",
atomic_read(&ring->page_pool->pages_state_release_cnt));
sprintf(result[j++], "%u", ring->page_pool->p.pool_size);
sprintf(result[j++], "%u", ring->page_pool->p.order);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
index bd8801065e02..83aa1450ab9f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
@@ -4,6 +4,8 @@
#ifndef __HNS3_DEBUGFS_H
#define __HNS3_DEBUGFS_H
+#include "hnae3.h"
+
#define HNS3_DBG_READ_LEN 65536
#define HNS3_DBG_READ_LEN_128KB 0x20000
#define HNS3_DBG_READ_LEN_1MB 0x100000
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 9ccebbaa0d69..babc5d7a3b52 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -17,6 +17,7 @@
#include <linux/skbuff.h>
#include <linux/sctp.h>
#include <net/gre.h>
+#include <net/gro.h>
#include <net/ip6_checksum.h>
#include <net/pkt_cls.h>
#include <net/tcp.h>
@@ -53,10 +54,6 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, " Network interface message level setting");
-static unsigned int tx_spare_buf_size;
-module_param(tx_spare_buf_size, uint, 0400);
-MODULE_PARM_DESC(tx_spare_buf_size, "Size used to allocate tx spare buffer");
-
static unsigned int tx_sgl = 1;
module_param(tx_sgl, uint, 0600);
MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping");
@@ -1005,9 +1002,7 @@ static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring,
return false;
if (ALIGN(len, dma_get_cache_alignment()) > space) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_spare_full++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_spare_full);
return false;
}
@@ -1024,9 +1019,7 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
return false;
if (space < HNS3_MAX_SGL_SIZE) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_spare_full++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_spare_full);
return false;
}
@@ -1041,8 +1034,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
dma_addr_t dma;
int order;
- alloc_size = tx_spare_buf_size ? tx_spare_buf_size :
- ring->tqp->handle->kinfo.tx_spare_buf_size;
+ alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
if (!alloc_size)
return;
@@ -1306,7 +1298,7 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
if (!(!skb->encapsulation &&
(l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
l4.udp->dest == htons(GENEVE_UDP_PORT) ||
- l4.udp->dest == htons(4790))))
+ l4.udp->dest == htons(IANA_VXLAN_GPE_UDP_PORT))))
return false;
return true;
@@ -1359,44 +1351,9 @@ static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
HNS3_TUN_NVGRE);
}
-static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
- u8 il4_proto, u32 *type_cs_vlan_tso,
- u32 *ol_type_vlan_len_msec)
+static void hns3_set_l3_type(struct sk_buff *skb, union l3_hdr_info l3,
+ u32 *type_cs_vlan_tso)
{
- unsigned char *l2_hdr = skb->data;
- u32 l4_proto = ol4_proto;
- union l4_hdr_info l4;
- union l3_hdr_info l3;
- u32 l2_len, l3_len;
-
- l4.hdr = skb_transport_header(skb);
- l3.hdr = skb_network_header(skb);
-
- /* handle encapsulation skb */
- if (skb->encapsulation) {
- /* If this is a not UDP/GRE encapsulation skb */
- if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
- /* drop the skb tunnel packet if hardware don't support,
- * because hardware can't calculate csum when TSO.
- */
- if (skb_is_gso(skb))
- return -EDOM;
-
- /* the stack computes the IP header already,
- * driver calculate l4 checksum when not TSO.
- */
- return skb_checksum_help(skb);
- }
-
- hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
-
- /* switch to inner header */
- l2_hdr = skb_inner_mac_header(skb);
- l3.hdr = skb_inner_network_header(skb);
- l4.hdr = skb_inner_transport_header(skb);
- l4_proto = il4_proto;
- }
-
if (l3.v4->version == 4) {
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
HNS3_L3T_IPV4);
@@ -1410,15 +1367,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
HNS3_L3T_IPV6);
}
+}
- /* compute inner(/normal) L2 header size, defined in 2 Bytes */
- l2_len = l3.hdr - l2_hdr;
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
-
- /* compute inner(/normal) L3 header size, defined in 4 Bytes */
- l3_len = l4.hdr - l3.hdr;
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
-
+static int hns3_set_l4_csum_length(struct sk_buff *skb, union l4_hdr_info l4,
+ u32 l4_proto, u32 *type_cs_vlan_tso)
+{
/* compute inner(/normal) L4 header size, defined in 4 Bytes */
switch (l4_proto) {
case IPPROTO_TCP:
@@ -1464,6 +1417,57 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
return 0;
}
+static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
+ u8 il4_proto, u32 *type_cs_vlan_tso,
+ u32 *ol_type_vlan_len_msec)
+{
+ unsigned char *l2_hdr = skb->data;
+ u32 l4_proto = ol4_proto;
+ union l4_hdr_info l4;
+ union l3_hdr_info l3;
+ u32 l2_len, l3_len;
+
+ l4.hdr = skb_transport_header(skb);
+ l3.hdr = skb_network_header(skb);
+
+ /* handle encapsulation skb */
+ if (skb->encapsulation) {
+ /* If this is a not UDP/GRE encapsulation skb */
+ if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
+ /* drop the skb tunnel packet if hardware don't support,
+ * because hardware can't calculate csum when TSO.
+ */
+ if (skb_is_gso(skb))
+ return -EDOM;
+
+ /* the stack computes the IP header already,
+ * driver calculate l4 checksum when not TSO.
+ */
+ return skb_checksum_help(skb);
+ }
+
+ hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
+
+ /* switch to inner header */
+ l2_hdr = skb_inner_mac_header(skb);
+ l3.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ l4_proto = il4_proto;
+ }
+
+ hns3_set_l3_type(skb, l3, type_cs_vlan_tso);
+
+ /* compute inner(/normal) L2 header size, defined in 2 Bytes */
+ l2_len = l3.hdr - l2_hdr;
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
+
+ /* compute inner(/normal) L3 header size, defined in 4 Bytes */
+ l3_len = l4.hdr - l3.hdr;
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
+
+ return hns3_set_l4_csum_length(skb, l4, l4_proto, type_cs_vlan_tso);
+}
+
static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
struct sk_buff *skb)
{
@@ -1540,92 +1544,122 @@ static bool hns3_check_hw_tx_csum(struct sk_buff *skb)
return true;
}
-static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
- struct sk_buff *skb, struct hns3_desc *desc,
- struct hns3_desc_cb *desc_cb)
+struct hns3_desc_param {
+ u32 paylen_ol4cs;
+ u32 ol_type_vlan_len_msec;
+ u32 type_cs_vlan_tso;
+ u16 mss_hw_csum;
+ u16 inner_vtag;
+ u16 out_vtag;
+};
+
+static void hns3_init_desc_data(struct sk_buff *skb, struct hns3_desc_param *pa)
+{
+ pa->paylen_ol4cs = skb->len;
+ pa->ol_type_vlan_len_msec = 0;
+ pa->type_cs_vlan_tso = 0;
+ pa->mss_hw_csum = 0;
+ pa->inner_vtag = 0;
+ pa->out_vtag = 0;
+}
+
+static int hns3_handle_vlan_info(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ struct hns3_desc_param *param)
{
- u32 ol_type_vlan_len_msec = 0;
- u32 paylen_ol4cs = skb->len;
- u32 type_cs_vlan_tso = 0;
- u16 mss_hw_csum = 0;
- u16 inner_vtag = 0;
- u16 out_vtag = 0;
int ret;
ret = hns3_handle_vtags(ring, skb);
if (unlikely(ret < 0)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_vlan_err++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_vlan_err);
return ret;
} else if (ret == HNS3_INNER_VLAN_TAG) {
- inner_vtag = skb_vlan_tag_get(skb);
- inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
+ param->inner_vtag = skb_vlan_tag_get(skb);
+ param->inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
VLAN_PRIO_MASK;
- hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
+ hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
} else if (ret == HNS3_OUTER_VLAN_TAG) {
- out_vtag = skb_vlan_tag_get(skb);
- out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
+ param->out_vtag = skb_vlan_tag_get(skb);
+ param->out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
VLAN_PRIO_MASK;
- hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
+ hns3_set_field(param->ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
1);
}
+ return 0;
+}
- desc_cb->send_bytes = skb->len;
+static int hns3_handle_csum_partial(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ struct hns3_desc_cb *desc_cb,
+ struct hns3_desc_param *param)
+{
+ u8 ol4_proto, il4_proto;
+ int ret;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- u8 ol4_proto, il4_proto;
-
- if (hns3_check_hw_tx_csum(skb)) {
- /* set checksum start and offset, defined in 2 Bytes */
- hns3_set_field(type_cs_vlan_tso, HNS3_TXD_CSUM_START_S,
- skb_checksum_start_offset(skb) >> 1);
- hns3_set_field(ol_type_vlan_len_msec,
- HNS3_TXD_CSUM_OFFSET_S,
- skb->csum_offset >> 1);
- mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B);
- goto out_hw_tx_csum;
- }
+ if (hns3_check_hw_tx_csum(skb)) {
+ /* set checksum start and offset, defined in 2 Bytes */
+ hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_CSUM_START_S,
+ skb_checksum_start_offset(skb) >> 1);
+ hns3_set_field(param->ol_type_vlan_len_msec,
+ HNS3_TXD_CSUM_OFFSET_S,
+ skb->csum_offset >> 1);
+ param->mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B);
+ return 0;
+ }
- skb_reset_mac_len(skb);
+ skb_reset_mac_len(skb);
- ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
- if (unlikely(ret < 0)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_l4_proto_err++;
- u64_stats_update_end(&ring->syncp);
- return ret;
- }
+ ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
+ if (unlikely(ret < 0)) {
+ hns3_ring_stats_update(ring, tx_l4_proto_err);
+ return ret;
+ }
- ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
- &type_cs_vlan_tso,
- &ol_type_vlan_len_msec);
- if (unlikely(ret < 0)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_l2l3l4_err++;
- u64_stats_update_end(&ring->syncp);
- return ret;
- }
+ ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
+ &param->type_cs_vlan_tso,
+ &param->ol_type_vlan_len_msec);
+ if (unlikely(ret < 0)) {
+ hns3_ring_stats_update(ring, tx_l2l3l4_err);
+ return ret;
+ }
+
+ ret = hns3_set_tso(skb, &param->paylen_ol4cs, &param->mss_hw_csum,
+ &param->type_cs_vlan_tso, &desc_cb->send_bytes);
+ if (unlikely(ret < 0)) {
+ hns3_ring_stats_update(ring, tx_tso_err);
+ return ret;
+ }
+ return 0;
+}
+
+static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, struct hns3_desc *desc,
+ struct hns3_desc_cb *desc_cb)
+{
+ struct hns3_desc_param param;
+ int ret;
+
+ hns3_init_desc_data(skb, &param);
+ ret = hns3_handle_vlan_info(ring, skb, &param);
+ if (unlikely(ret < 0))
+ return ret;
+
+ desc_cb->send_bytes = skb->len;
- ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum,
- &type_cs_vlan_tso, &desc_cb->send_bytes);
- if (unlikely(ret < 0)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_tso_err++;
- u64_stats_update_end(&ring->syncp);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ ret = hns3_handle_csum_partial(ring, skb, desc_cb, &param);
+ if (ret)
return ret;
- }
}
-out_hw_tx_csum:
/* Set txbd */
desc->tx.ol_type_vlan_len_msec =
- cpu_to_le32(ol_type_vlan_len_msec);
- desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
- desc->tx.paylen_ol4cs = cpu_to_le32(paylen_ol4cs);
- desc->tx.mss_hw_csum = cpu_to_le16(mss_hw_csum);
- desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
- desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
+ cpu_to_le32(param.ol_type_vlan_len_msec);
+ desc->tx.type_cs_vlan_tso_len = cpu_to_le32(param.type_cs_vlan_tso);
+ desc->tx.paylen_ol4cs = cpu_to_le32(param.paylen_ol4cs);
+ desc->tx.mss_hw_csum = cpu_to_le16(param.mss_hw_csum);
+ desc->tx.vlan_tag = cpu_to_le16(param.inner_vtag);
+ desc->tx.outer_vlan_tag = cpu_to_le16(param.out_vtag);
return 0;
}
@@ -1705,9 +1739,7 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
}
if (unlikely(dma_mapping_error(dev, dma))) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
return -ENOMEM;
}
@@ -1853,9 +1885,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
* recursion level of over HNS3_MAX_RECURSION_LEVEL.
*/
if (bd_num == UINT_MAX) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.over_max_recursion++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, over_max_recursion);
return -ENOMEM;
}
@@ -1864,16 +1894,12 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
*/
if (skb->len > HNS3_MAX_TSO_SIZE ||
(!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.hw_limitation++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, hw_limitation);
return -ENOMEM;
}
if (__skb_linearize(skb)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
return -ENOMEM;
}
@@ -1903,9 +1929,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
bd_num = hns3_tx_bd_count(skb->len);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_copy++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_copy);
}
out:
@@ -1925,9 +1949,7 @@ out:
return bd_num;
}
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_busy++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_busy);
return -EBUSY;
}
@@ -2012,9 +2034,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
ring->pending_buf += num;
if (!doorbell) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_more++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_more);
return;
}
@@ -2064,9 +2084,7 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring,
ret = skb_copy_bits(skb, 0, buf, size);
if (unlikely(ret < 0)) {
hns3_tx_spare_rollback(ring, cb_len);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.copy_bits_err++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, copy_bits_err);
return ret;
}
@@ -2089,9 +2107,8 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring,
dma_sync_single_for_device(ring_to_dev(ring), dma, size,
DMA_TO_DEVICE);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_bounce++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_bounce);
+
return bd_num;
}
@@ -2121,9 +2138,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len);
if (unlikely(nents < 0)) {
hns3_tx_spare_rollback(ring, cb_len);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.skb2sgl_err++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, skb2sgl_err);
return -ENOMEM;
}
@@ -2132,9 +2147,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
DMA_TO_DEVICE);
if (unlikely(!sgt->nents)) {
hns3_tx_spare_rollback(ring, cb_len);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.map_sg_err++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, map_sg_err);
return -ENOMEM;
}
@@ -2146,10 +2159,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
for (i = 0; i < sgt->nents; i++)
bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i),
sg_dma_len(sgt->sgl + i));
-
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_sgl++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_sgl);
return bd_num;
}
@@ -2174,23 +2184,45 @@ out:
return hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
}
+static int hns3_handle_skb_desc(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ struct hns3_desc_cb *desc_cb,
+ int next_to_use_head)
+{
+ int ret;
+
+ ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use],
+ desc_cb);
+ if (unlikely(ret < 0))
+ goto fill_err;
+
+ /* 'ret < 0' means filling error, 'ret == 0' means skb->len is
+ * zero, which is unlikely, and 'ret > 0' means how many tx desc
+ * need to be notified to the hw.
+ */
+ ret = hns3_handle_desc_filling(ring, skb);
+ if (likely(ret > 0))
+ return ret;
+
+fill_err:
+ hns3_clear_desc(ring, next_to_use_head);
+ return ret;
+}
+
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct netdev_queue *dev_queue;
- int pre_ntu, next_to_use_head;
+ int pre_ntu, ret;
bool doorbell;
- int ret;
/* Hardware can only handle short frames above 32 bytes */
if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
return NETDEV_TX_OK;
}
@@ -2209,20 +2241,9 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
goto out_err_tx_ok;
}
- next_to_use_head = ring->next_to_use;
-
- ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use],
- desc_cb);
- if (unlikely(ret < 0))
- goto fill_err;
-
- /* 'ret < 0' means filling error, 'ret == 0' means skb->len is
- * zero, which is unlikely, and 'ret > 0' means how many tx desc
- * need to be notified to the hw.
- */
- ret = hns3_handle_desc_filling(ring, skb);
+ ret = hns3_handle_skb_desc(ring, skb, desc_cb, ring->next_to_use);
if (unlikely(ret <= 0))
- goto fill_err;
+ goto out_err_tx_ok;
pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
(ring->desc_num - 1);
@@ -2244,9 +2265,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
-fill_err:
- hns3_clear_desc(ring, next_to_use_head);
-
out_err_tx_ok:
dev_kfree_skb_any(skb);
hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
@@ -2255,6 +2273,8 @@ out_err_tx_ok:
static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
{
+ char format_mac_addr_perm[HNAE3_FORMAT_MAC_ADDR_LEN];
+ char format_mac_addr_sa[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hnae3_handle *h = hns3_get_handle(netdev);
struct sockaddr *mac_addr = p;
int ret;
@@ -2263,8 +2283,9 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
return -EADDRNOTAVAIL;
if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
- netdev_info(netdev, "already using mac address %pM\n",
- mac_addr->sa_data);
+ hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data);
+ netdev_info(netdev, "already using mac address %s\n",
+ format_mac_addr_sa);
return 0;
}
@@ -2273,8 +2294,10 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
*/
if (!hns3_is_phys_func(h->pdev) &&
!is_zero_ether_addr(netdev->perm_addr)) {
- netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n",
- netdev->perm_addr, mac_addr->sa_data);
+ hnae3_format_mac_addr(format_mac_addr_perm, netdev->perm_addr);
+ hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data);
+ netdev_err(netdev, "has permanent MAC %s, user MAC %s not allow\n",
+ format_mac_addr_perm, format_mac_addr_sa);
return -EPERM;
}
@@ -2382,90 +2405,89 @@ static netdev_features_t hns3_features_check(struct sk_buff *skb,
return features;
}
+static void hns3_fetch_stats(struct rtnl_link_stats64 *stats,
+ struct hns3_enet_ring *ring, bool is_tx)
+{
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ if (is_tx) {
+ stats->tx_bytes += ring->stats.tx_bytes;
+ stats->tx_packets += ring->stats.tx_pkts;
+ stats->tx_dropped += ring->stats.sw_err_cnt;
+ stats->tx_dropped += ring->stats.tx_vlan_err;
+ stats->tx_dropped += ring->stats.tx_l4_proto_err;
+ stats->tx_dropped += ring->stats.tx_l2l3l4_err;
+ stats->tx_dropped += ring->stats.tx_tso_err;
+ stats->tx_dropped += ring->stats.over_max_recursion;
+ stats->tx_dropped += ring->stats.hw_limitation;
+ stats->tx_dropped += ring->stats.copy_bits_err;
+ stats->tx_dropped += ring->stats.skb2sgl_err;
+ stats->tx_dropped += ring->stats.map_sg_err;
+ stats->tx_errors += ring->stats.sw_err_cnt;
+ stats->tx_errors += ring->stats.tx_vlan_err;
+ stats->tx_errors += ring->stats.tx_l4_proto_err;
+ stats->tx_errors += ring->stats.tx_l2l3l4_err;
+ stats->tx_errors += ring->stats.tx_tso_err;
+ stats->tx_errors += ring->stats.over_max_recursion;
+ stats->tx_errors += ring->stats.hw_limitation;
+ stats->tx_errors += ring->stats.copy_bits_err;
+ stats->tx_errors += ring->stats.skb2sgl_err;
+ stats->tx_errors += ring->stats.map_sg_err;
+ } else {
+ stats->rx_bytes += ring->stats.rx_bytes;
+ stats->rx_packets += ring->stats.rx_pkts;
+ stats->rx_dropped += ring->stats.l2_err;
+ stats->rx_errors += ring->stats.l2_err;
+ stats->rx_errors += ring->stats.l3l4_csum_err;
+ stats->rx_crc_errors += ring->stats.l2_err;
+ stats->multicast += ring->stats.rx_multicast;
+ stats->rx_length_errors += ring->stats.err_pkt_len;
+ }
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+}
+
static void hns3_nic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
int queue_num = priv->ae_handle->kinfo.num_tqps;
struct hnae3_handle *handle = priv->ae_handle;
+ struct rtnl_link_stats64 ring_total_stats;
struct hns3_enet_ring *ring;
- u64 rx_length_errors = 0;
- u64 rx_crc_errors = 0;
- u64 rx_multicast = 0;
- unsigned int start;
- u64 tx_errors = 0;
- u64 rx_errors = 0;
unsigned int idx;
- u64 tx_bytes = 0;
- u64 rx_bytes = 0;
- u64 tx_pkts = 0;
- u64 rx_pkts = 0;
- u64 tx_drop = 0;
- u64 rx_drop = 0;
if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return;
handle->ae_algo->ops->update_stats(handle, &netdev->stats);
+ memset(&ring_total_stats, 0, sizeof(ring_total_stats));
for (idx = 0; idx < queue_num; idx++) {
/* fetch the tx stats */
ring = &priv->ring[idx];
- do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
- tx_bytes += ring->stats.tx_bytes;
- tx_pkts += ring->stats.tx_pkts;
- tx_drop += ring->stats.sw_err_cnt;
- tx_drop += ring->stats.tx_vlan_err;
- tx_drop += ring->stats.tx_l4_proto_err;
- tx_drop += ring->stats.tx_l2l3l4_err;
- tx_drop += ring->stats.tx_tso_err;
- tx_drop += ring->stats.over_max_recursion;
- tx_drop += ring->stats.hw_limitation;
- tx_drop += ring->stats.copy_bits_err;
- tx_drop += ring->stats.skb2sgl_err;
- tx_drop += ring->stats.map_sg_err;
- tx_errors += ring->stats.sw_err_cnt;
- tx_errors += ring->stats.tx_vlan_err;
- tx_errors += ring->stats.tx_l4_proto_err;
- tx_errors += ring->stats.tx_l2l3l4_err;
- tx_errors += ring->stats.tx_tso_err;
- tx_errors += ring->stats.over_max_recursion;
- tx_errors += ring->stats.hw_limitation;
- tx_errors += ring->stats.copy_bits_err;
- tx_errors += ring->stats.skb2sgl_err;
- tx_errors += ring->stats.map_sg_err;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ hns3_fetch_stats(&ring_total_stats, ring, true);
/* fetch the rx stats */
ring = &priv->ring[idx + queue_num];
- do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
- rx_bytes += ring->stats.rx_bytes;
- rx_pkts += ring->stats.rx_pkts;
- rx_drop += ring->stats.l2_err;
- rx_errors += ring->stats.l2_err;
- rx_errors += ring->stats.l3l4_csum_err;
- rx_crc_errors += ring->stats.l2_err;
- rx_multicast += ring->stats.rx_multicast;
- rx_length_errors += ring->stats.err_pkt_len;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
- }
-
- stats->tx_bytes = tx_bytes;
- stats->tx_packets = tx_pkts;
- stats->rx_bytes = rx_bytes;
- stats->rx_packets = rx_pkts;
-
- stats->rx_errors = rx_errors;
- stats->multicast = rx_multicast;
- stats->rx_length_errors = rx_length_errors;
- stats->rx_crc_errors = rx_crc_errors;
+ hns3_fetch_stats(&ring_total_stats, ring, false);
+ }
+
+ stats->tx_bytes = ring_total_stats.tx_bytes;
+ stats->tx_packets = ring_total_stats.tx_packets;
+ stats->rx_bytes = ring_total_stats.rx_bytes;
+ stats->rx_packets = ring_total_stats.rx_packets;
+
+ stats->rx_errors = ring_total_stats.rx_errors;
+ stats->multicast = ring_total_stats.multicast;
+ stats->rx_length_errors = ring_total_stats.rx_length_errors;
+ stats->rx_crc_errors = ring_total_stats.rx_crc_errors;
stats->rx_missed_errors = netdev->stats.rx_missed_errors;
- stats->tx_errors = tx_errors;
- stats->rx_dropped = rx_drop;
- stats->tx_dropped = tx_drop;
+ stats->tx_errors = ring_total_stats.tx_errors;
+ stats->rx_dropped = ring_total_stats.rx_dropped;
+ stats->tx_dropped = ring_total_stats.tx_dropped;
stats->collisions = netdev->stats.collisions;
stats->rx_over_errors = netdev->stats.rx_over_errors;
stats->rx_frame_errors = netdev->stats.rx_frame_errors;
@@ -2658,18 +2680,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
}
-static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
+static int hns3_get_timeout_queue(struct net_device *ndev)
{
- struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hnae3_handle *h = hns3_get_handle(ndev);
- struct hns3_enet_ring *tx_ring;
- struct napi_struct *napi;
- int timeout_queue = 0;
- int hw_head, hw_tail;
- int fbd_num, fbd_oft;
- int ebd_num, ebd_oft;
- int bd_num, bd_err;
- int ring_en, tc;
int i;
/* Find the stopped queue the same way the stack does */
@@ -2678,11 +2690,17 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
unsigned long trans_start;
q = netdev_get_tx_queue(ndev, i);
- trans_start = q->trans_start;
+ trans_start = READ_ONCE(q->trans_start);
if (netif_xmit_stopped(q) &&
time_after(jiffies,
(trans_start + ndev->watchdog_timeo))) {
- timeout_queue = i;
+#ifdef CONFIG_BQL
+ struct dql *dql = &q->dql;
+
+ netdev_info(ndev, "DQL info last_cnt: %u, queued: %u, adj_limit: %u, completed: %u\n",
+ dql->last_obj_cnt, dql->num_queued,
+ dql->adj_limit, dql->num_completed);
+#endif
netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n",
q->state,
jiffies_to_msecs(jiffies - trans_start));
@@ -2690,17 +2708,15 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
}
}
- if (i == ndev->num_tx_queues) {
- netdev_info(ndev,
- "no netdev TX timeout queue found, timeout count: %llu\n",
- priv->tx_timeout_count);
- return false;
- }
-
- priv->tx_timeout_count++;
+ return i;
+}
- tx_ring = &priv->ring[timeout_queue];
- napi = &tx_ring->tqp_vector->napi;
+static void hns3_dump_queue_stats(struct net_device *ndev,
+ struct hns3_enet_ring *tx_ring,
+ int timeout_queue)
+{
+ struct napi_struct *napi = &tx_ring->tqp_vector->napi;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
netdev_info(ndev,
"tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
@@ -2716,6 +2732,48 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
"seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n",
tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more,
tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
+}
+
+static void hns3_dump_queue_reg(struct net_device *ndev,
+ struct hns3_enet_ring *tx_ring)
+{
+ netdev_info(ndev,
+ "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_NUM_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_HEAD_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TAIL_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_ERR_REG),
+ readl(tx_ring->tqp_vector->mask_addr));
+ netdev_info(ndev,
+ "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_EN_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TC_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_FBDNUM_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_OFFSET_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_EBDNUM_REG),
+ hns3_tqp_read_reg(tx_ring,
+ HNS3_RING_TX_RING_EBD_OFFSET_REG));
+}
+
+static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+ struct hns3_enet_ring *tx_ring;
+ int timeout_queue;
+
+ timeout_queue = hns3_get_timeout_queue(ndev);
+ if (timeout_queue >= ndev->num_tx_queues) {
+ netdev_info(ndev,
+ "no netdev TX timeout queue found, timeout count: %llu\n",
+ priv->tx_timeout_count);
+ return false;
+ }
+
+ priv->tx_timeout_count++;
+
+ tx_ring = &priv->ring[timeout_queue];
+ hns3_dump_queue_stats(ndev, tx_ring, timeout_queue);
/* When mac received many pause frames continuous, it's unable to send
* packets, which may cause tx timeout
@@ -2728,32 +2786,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt);
}
- hw_head = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_HEAD_REG);
- hw_tail = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_TAIL_REG);
- fbd_num = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_FBDNUM_REG);
- fbd_oft = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_OFFSET_REG);
- ebd_num = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_EBDNUM_REG);
- ebd_oft = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_EBD_OFFSET_REG);
- bd_num = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_BD_NUM_REG);
- bd_err = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_BD_ERR_REG);
- ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG);
- tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG);
-
- netdev_info(ndev,
- "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
- bd_num, hw_head, hw_tail, bd_err,
- readl(tx_ring->tqp_vector->mask_addr));
- netdev_info(ndev,
- "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
- ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft);
+ hns3_dump_queue_reg(ndev, tx_ring);
return true;
}
@@ -2836,14 +2869,16 @@ static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf,
static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
if (!h->ae_algo->ops->set_vf_mac)
return -EOPNOTSUPP;
if (is_multicast_ether_addr(mac)) {
+ hnae3_format_mac_addr(format_mac_addr, mac);
netdev_err(netdev,
- "Invalid MAC:%pM specified. Could not set MAC\n",
- mac);
+ "Invalid MAC:%s specified. Could not set MAC\n",
+ format_mac_addr);
return -EINVAL;
}
@@ -3497,17 +3532,13 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
for (i = 0; i < cleand_count; i++) {
desc_cb = &ring->desc_cb[ring->next_to_use];
if (desc_cb->reuse_flag) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.reuse_pg_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, reuse_pg_cnt);
hns3_reuse_buffer(ring, ring->next_to_use);
} else {
ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
if (ret) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
hns3_rl_err(ring_to_netdev(ring),
"alloc rx buffer failed: %d\n",
@@ -3519,9 +3550,7 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
}
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.non_reuse_pg++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, non_reuse_pg);
}
ring_ptr_move_fw(ring, next_to_use);
@@ -3536,6 +3565,34 @@ static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
return page_count(cb->priv) == cb->pagecnt_bias;
}
+static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i,
+ struct hns3_enet_ring *ring,
+ int pull_len,
+ struct hns3_desc_cb *desc_cb)
+{
+ struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
+ u32 frag_offset = desc_cb->page_offset + pull_len;
+ int size = le16_to_cpu(desc->rx.size);
+ u32 frag_size = size - pull_len;
+ void *frag = napi_alloc_frag(frag_size);
+
+ if (unlikely(!frag)) {
+ hns3_ring_stats_update(ring, frag_alloc_err);
+
+ hns3_rl_err(ring_to_netdev(ring),
+ "failed to allocate rx frag\n");
+ return -ENOMEM;
+ }
+
+ desc_cb->reuse_flag = 1;
+ memcpy(frag, desc_cb->buf + frag_offset, frag_size);
+ skb_add_rx_frag(skb, i, virt_to_page(frag),
+ offset_in_page(frag), frag_size, frag_size);
+
+ hns3_ring_stats_update(ring, frag_alloc);
+ return 0;
+}
+
static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb)
@@ -3545,6 +3602,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
int size = le16_to_cpu(desc->rx.size);
u32 truesize = hns3_buf_size(ring);
u32 frag_size = size - pull_len;
+ int ret = 0;
bool reused;
if (ring->page_pool) {
@@ -3579,27 +3637,9 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
desc_cb->page_offset = 0;
desc_cb->reuse_flag = 1;
} else if (frag_size <= ring->rx_copybreak) {
- void *frag = napi_alloc_frag(frag_size);
-
- if (unlikely(!frag)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.frag_alloc_err++;
- u64_stats_update_end(&ring->syncp);
-
- hns3_rl_err(ring_to_netdev(ring),
- "failed to allocate rx frag\n");
+ ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb);
+ if (ret)
goto out;
- }
-
- desc_cb->reuse_flag = 1;
- memcpy(frag, desc_cb->buf + frag_offset, frag_size);
- skb_add_rx_frag(skb, i, virt_to_page(frag),
- offset_in_page(frag), frag_size, frag_size);
-
- u64_stats_update_begin(&ring->syncp);
- ring->stats.frag_alloc++;
- u64_stats_update_end(&ring->syncp);
- return;
}
out:
@@ -3682,9 +3722,7 @@ static bool hns3_checksum_complete(struct hns3_enet_ring *ring,
hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE)
return false;
- u64_stats_update_begin(&ring->syncp);
- ring->stats.csum_complete++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, csum_complete);
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)csum);
@@ -3758,9 +3796,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
BIT(HNS3_RXD_OL3E_B) |
BIT(HNS3_RXD_OL4E_B)))) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.l3l4_csum_err++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, l3l4_csum_err);
return;
}
@@ -3851,10 +3887,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
skb = ring->skb;
if (unlikely(!skb)) {
hns3_rl_err(netdev, "alloc rx skb fail\n");
-
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
return -ENOMEM;
}
@@ -3885,9 +3918,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
if (ring->page_pool)
skb_mark_for_recycle(skb);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.seg_pkt_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, seg_pkt_cnt);
ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
__skb_put(skb, ring->pull_len);
@@ -4015,6 +4046,39 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
skb_set_hash(skb, rss_hash, rss_type);
}
+static void hns3_handle_rx_ts_info(struct net_device *netdev,
+ struct hns3_desc *desc, struct sk_buff *skb,
+ u32 bd_base_info)
+{
+ if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) {
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u32 nsec = le32_to_cpu(desc->ts_nsec);
+ u32 sec = le32_to_cpu(desc->ts_sec);
+
+ if (h->ae_algo->ops->get_rx_hwts)
+ h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec);
+ }
+}
+
+static void hns3_handle_rx_vlan_tag(struct hns3_enet_ring *ring,
+ struct hns3_desc *desc, struct sk_buff *skb,
+ u32 l234info)
+{
+ struct net_device *netdev = ring_to_netdev(ring);
+
+ /* Based on hw strategy, the tag offloaded will be stored at
+ * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+ * in one layer tag case.
+ */
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ u16 vlan_tag;
+
+ if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ vlan_tag);
+ }
+}
+
static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
{
struct net_device *netdev = ring_to_netdev(ring);
@@ -4037,26 +4101,9 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
ol_info = le32_to_cpu(desc->rx.ol_info);
csum = le16_to_cpu(desc->csum);
- if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) {
- struct hnae3_handle *h = hns3_get_handle(netdev);
- u32 nsec = le32_to_cpu(desc->ts_nsec);
- u32 sec = le32_to_cpu(desc->ts_sec);
-
- if (h->ae_algo->ops->get_rx_hwts)
- h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec);
- }
-
- /* Based on hw strategy, the tag offloaded will be stored at
- * ot_vlan_tag in two layer tag case, and stored at vlan_tag
- * in one layer tag case.
- */
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
- u16 vlan_tag;
+ hns3_handle_rx_ts_info(netdev, desc, skb, bd_base_info);
- if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- vlan_tag);
- }
+ hns3_handle_rx_vlan_tag(ring, desc, skb, l234info);
if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
BIT(HNS3_RXD_L2E_B))))) {
@@ -4079,9 +4126,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
ret = hns3_set_gro_and_checksum(ring, skb, l234info,
bd_base_info, ol_info, csum);
if (unlikely(ret)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.rx_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, rx_err_cnt);
return ret;
}
@@ -4297,87 +4342,70 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
return rx_pkt_total;
}
-static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
- struct hnae3_ring_chain_node *head)
+static int hns3_create_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
+ struct hnae3_ring_chain_node **head,
+ bool is_tx)
{
+ u32 bit_value = is_tx ? HNAE3_RING_TYPE_TX : HNAE3_RING_TYPE_RX;
+ u32 field_value = is_tx ? HNAE3_RING_GL_TX : HNAE3_RING_GL_RX;
+ struct hnae3_ring_chain_node *cur_chain = *head;
struct pci_dev *pdev = tqp_vector->handle->pdev;
- struct hnae3_ring_chain_node *cur_chain = head;
struct hnae3_ring_chain_node *chain;
- struct hns3_enet_ring *tx_ring;
- struct hns3_enet_ring *rx_ring;
-
- tx_ring = tqp_vector->tx_group.ring;
- if (tx_ring) {
- cur_chain->tqp_index = tx_ring->tqp->tqp_index;
- hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_TX);
- hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
-
- cur_chain->next = NULL;
-
- while (tx_ring->next) {
- tx_ring = tx_ring->next;
-
- chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
- GFP_KERNEL);
- if (!chain)
- goto err_free_chain;
-
- cur_chain->next = chain;
- chain->tqp_index = tx_ring->tqp->tqp_index;
- hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_TX);
- hnae3_set_field(chain->int_gl_idx,
- HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S,
- HNAE3_RING_GL_TX);
-
- cur_chain = chain;
- }
- }
+ struct hns3_enet_ring *ring;
- rx_ring = tqp_vector->rx_group.ring;
- if (!tx_ring && rx_ring) {
- cur_chain->next = NULL;
- cur_chain->tqp_index = rx_ring->tqp->tqp_index;
- hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_RX);
- hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+ ring = is_tx ? tqp_vector->tx_group.ring : tqp_vector->rx_group.ring;
- rx_ring = rx_ring->next;
+ if (cur_chain) {
+ while (cur_chain->next)
+ cur_chain = cur_chain->next;
}
- while (rx_ring) {
+ while (ring) {
chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
if (!chain)
- goto err_free_chain;
-
- cur_chain->next = chain;
- chain->tqp_index = rx_ring->tqp->tqp_index;
+ return -ENOMEM;
+ if (cur_chain)
+ cur_chain->next = chain;
+ else
+ *head = chain;
+ chain->tqp_index = ring->tqp->tqp_index;
hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_RX);
- hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+ bit_value);
+ hnae3_set_field(chain->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, field_value);
cur_chain = chain;
- rx_ring = rx_ring->next;
+ ring = ring->next;
}
return 0;
+}
+
+static struct hnae3_ring_chain_node *
+hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector)
+{
+ struct pci_dev *pdev = tqp_vector->handle->pdev;
+ struct hnae3_ring_chain_node *cur_chain = NULL;
+ struct hnae3_ring_chain_node *chain;
+
+ if (hns3_create_ring_chain(tqp_vector, &cur_chain, true))
+ goto err_free_chain;
+
+ if (hns3_create_ring_chain(tqp_vector, &cur_chain, false))
+ goto err_free_chain;
+
+ return cur_chain;
err_free_chain:
- cur_chain = head->next;
while (cur_chain) {
chain = cur_chain->next;
devm_kfree(&pdev->dev, cur_chain);
cur_chain = chain;
}
- head->next = NULL;
- return -ENOMEM;
+ return NULL;
}
static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
@@ -4386,7 +4414,7 @@ static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
struct pci_dev *pdev = tqp_vector->handle->pdev;
struct hnae3_ring_chain_node *chain_tmp, *chain;
- chain = head->next;
+ chain = head;
while (chain) {
chain_tmp = chain->next;
@@ -4501,7 +4529,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
}
for (i = 0; i < priv->vector_num; i++) {
- struct hnae3_ring_chain_node vector_ring_chain;
+ struct hnae3_ring_chain_node *vector_ring_chain;
tqp_vector = &priv->tqp_vector[i];
@@ -4511,15 +4539,16 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
tqp_vector->tx_group.total_packets = 0;
tqp_vector->handle = h;
- ret = hns3_get_vector_ring_chain(tqp_vector,
- &vector_ring_chain);
- if (ret)
+ vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector);
+ if (!vector_ring_chain) {
+ ret = -ENOMEM;
goto map_ring_fail;
+ }
ret = h->ae_algo->ops->map_ring_to_vector(h,
- tqp_vector->vector_irq, &vector_ring_chain);
+ tqp_vector->vector_irq, vector_ring_chain);
- hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
+ hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain);
if (ret)
goto map_ring_fail;
@@ -4618,7 +4647,7 @@ static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
{
- struct hnae3_ring_chain_node vector_ring_chain;
+ struct hnae3_ring_chain_node *vector_ring_chain;
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_tqp_vector *tqp_vector;
int i;
@@ -4633,13 +4662,14 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
* chain between vector and ring, we should go on to deal with
* the remaining options.
*/
- if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain))
+ vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector);
+ if (!vector_ring_chain)
dev_warn(priv->dev, "failed to get ring chain\n");
h->ae_algo->ops->unmap_ring_from_vector(h,
- tqp_vector->vector_irq, &vector_ring_chain);
+ tqp_vector->vector_irq, vector_ring_chain);
- hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
+ hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain);
hns3_clear_ring_group(&tqp_vector->rx_group);
hns3_clear_ring_group(&tqp_vector->tx_group);
@@ -4934,6 +4964,7 @@ static void hns3_uninit_all_ring(struct hns3_nic_priv *priv)
static int hns3_init_mac_addr(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hnae3_handle *h = priv->ae_handle;
u8 mac_addr_temp[ETH_ALEN];
int ret = 0;
@@ -4944,8 +4975,9 @@ static int hns3_init_mac_addr(struct net_device *netdev)
/* Check if the MAC address is valid, if not get a random one */
if (!is_valid_ether_addr(mac_addr_temp)) {
eth_hw_addr_random(netdev);
- dev_warn(priv->dev, "using random MAC address %pM\n",
- netdev->dev_addr);
+ hnae3_format_mac_addr(format_mac_addr, netdev->dev_addr);
+ dev_warn(priv->dev, "using random MAC address %s\n",
+ format_mac_addr);
} else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) {
eth_hw_addr_set(netdev, mac_addr_temp);
ether_addr_copy(netdev->perm_addr, mac_addr_temp);
@@ -4997,8 +5029,10 @@ static void hns3_client_stop(struct hnae3_handle *handle)
static void hns3_info_show(struct hns3_nic_priv *priv)
{
struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
- dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
+ hnae3_format_mac_addr(format_mac_addr, priv->netdev->dev_addr);
+ dev_info(priv->dev, "MAC address: %s\n", format_mac_addr);
dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size);
dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size);
@@ -5287,9 +5321,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
if (ret) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
/* if alloc new buffer fail, exit directly
* and reclear in up flow.
*/
@@ -5531,8 +5563,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
return 0;
}
-static int hns3_reset_notify(struct hnae3_handle *handle,
- enum hnae3_reset_notify_type type)
+int hns3_reset_notify(struct hnae3_handle *handle,
+ enum hnae3_reset_notify_type type)
{
int ret = 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 1715c98d906d..a05a0c7423ce 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -10,6 +10,9 @@
#include "hnae3.h"
+struct iphdr;
+struct ipv6hdr;
+
enum hns3_nic_state {
HNS3_NIC_STATE_TESTING,
HNS3_NIC_STATE_RESETTING,
@@ -621,6 +624,11 @@ static inline int ring_space(struct hns3_enet_ring *ring)
(begin - end)) - 1;
}
+static inline u32 hns3_tqp_read_reg(struct hns3_enet_ring *ring, u32 reg)
+{
+ return readl_relaxed(ring->tqp->io_base + reg);
+}
+
static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
{
return readl(base + reg);
@@ -655,6 +663,13 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define hns3_buf_size(_ring) ((_ring)->buf_size)
+#define hns3_ring_stats_update(ring, cnt) do { \
+ typeof(ring) (tmp) = (ring); \
+ u64_stats_update_begin(&(tmp)->syncp); \
+ ((tmp)->stats.cnt)++; \
+ u64_stats_update_end(&(tmp)->syncp); \
+} while (0) \
+
static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
{
#if (PAGE_SIZE < 8192)
@@ -705,6 +720,8 @@ void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
u32 ql_value);
void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
+int hns3_reset_notify(struct hnae3_handle *handle,
+ enum hnae3_reset_notify_type type);
#ifdef CONFIG_HNS3_DCB
void hns3_dcbnl_setup(struct hnae3_handle *handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index c9b4568d7a8d..c06c39ece80d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -643,11 +643,13 @@ static u32 hns3_get_link(struct net_device *netdev)
}
static void hns3_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
- int queue_num = h->kinfo.num_tqps;
+ int rx_queue_index = h->kinfo.num_tqps;
if (hns3_nic_resetting(netdev)) {
netdev_err(netdev, "dev resetting!");
@@ -658,7 +660,8 @@ static void hns3_get_ringparam(struct net_device *netdev,
param->rx_max_pending = HNS3_RING_MAX_PENDING;
param->tx_pending = priv->ring[0].desc_num;
- param->rx_pending = priv->ring[queue_num].desc_num;
+ param->rx_pending = priv->ring[rx_queue_index].desc_num;
+ kernel_param->rx_buf_len = priv->ring[rx_queue_index].buf_size;
}
static void hns3_get_pauseparam(struct net_device *netdev,
@@ -1064,14 +1067,23 @@ static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv)
}
static int hns3_check_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param)
{
+#define RX_BUF_LEN_2K 2048
+#define RX_BUF_LEN_4K 4096
if (hns3_nic_resetting(ndev))
return -EBUSY;
if (param->rx_mini_pending || param->rx_jumbo_pending)
return -EINVAL;
+ if (kernel_param->rx_buf_len != RX_BUF_LEN_2K &&
+ kernel_param->rx_buf_len != RX_BUF_LEN_4K) {
+ netdev_err(ndev, "Rx buf len only support 2048 and 4096\n");
+ return -EINVAL;
+ }
+
if (param->tx_pending > HNS3_RING_MAX_PENDING ||
param->tx_pending < HNS3_RING_MIN_PENDING ||
param->rx_pending > HNS3_RING_MAX_PENDING ||
@@ -1084,8 +1096,26 @@ static int hns3_check_ringparam(struct net_device *ndev,
return 0;
}
+static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ h->kinfo.rx_buf_len = rx_buf_len;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ h->kinfo.tqp[i]->buf_size = rx_buf_len;
+ priv->ring[i + h->kinfo.num_tqps].buf_size = rx_buf_len;
+ }
+
+ return 0;
+}
+
static int hns3_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
@@ -1094,9 +1124,10 @@ static int hns3_set_ringparam(struct net_device *ndev,
u32 old_tx_desc_num, new_tx_desc_num;
u32 old_rx_desc_num, new_rx_desc_num;
u16 queue_num = h->kinfo.num_tqps;
+ u32 old_rx_buf_len;
int ret, i;
- ret = hns3_check_ringparam(ndev, param);
+ ret = hns3_check_ringparam(ndev, param, kernel_param);
if (ret)
return ret;
@@ -1105,8 +1136,10 @@ static int hns3_set_ringparam(struct net_device *ndev,
new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
old_tx_desc_num = priv->ring[0].desc_num;
old_rx_desc_num = priv->ring[queue_num].desc_num;
+ old_rx_buf_len = priv->ring[queue_num].buf_size;
if (old_tx_desc_num == new_tx_desc_num &&
- old_rx_desc_num == new_rx_desc_num)
+ old_rx_desc_num == new_rx_desc_num &&
+ kernel_param->rx_buf_len == old_rx_buf_len)
return 0;
tmp_rings = hns3_backup_ringparam(priv);
@@ -1117,19 +1150,22 @@ static int hns3_set_ringparam(struct net_device *ndev,
}
netdev_info(ndev,
- "Changing Tx/Rx ring depth from %u/%u to %u/%u\n",
+ "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %d to %d\n",
old_tx_desc_num, old_rx_desc_num,
- new_tx_desc_num, new_rx_desc_num);
+ new_tx_desc_num, new_rx_desc_num,
+ old_rx_buf_len, kernel_param->rx_buf_len);
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num);
+ hns3_change_rx_buf_len(ndev, kernel_param->rx_buf_len);
ret = hns3_init_all_ring(priv);
if (ret) {
- netdev_err(ndev, "Change bd num fail, revert to old value(%d)\n",
+ netdev_err(ndev, "set ringparam fail, revert to old value(%d)\n",
ret);
+ hns3_change_rx_buf_len(ndev, old_rx_buf_len);
hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
old_rx_desc_num);
for (i = 0; i < h->kinfo.num_tqps * 2; i++)
@@ -1699,6 +1735,7 @@ static int hns3_get_tunable(struct net_device *netdev,
void *data)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
int ret = 0;
switch (tuna->id) {
@@ -1709,6 +1746,9 @@ static int hns3_get_tunable(struct net_device *netdev,
case ETHTOOL_RX_COPYBREAK:
*(u32 *)data = priv->rx_copybreak;
break;
+ case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
+ *(u32 *)data = h->kinfo.tx_spare_buf_size;
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -1717,11 +1757,43 @@ static int hns3_get_tunable(struct net_device *netdev,
return ret;
}
+static int hns3_set_tx_spare_buf_size(struct net_device *netdev,
+ u32 data)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int ret;
+
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
+ h->kinfo.tx_spare_buf_size = data;
+
+ ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hns3_reset_notify(h, HNAE3_UP_CLIENT);
+ if (ret)
+ hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
+
+ return ret;
+}
+
static int hns3_set_tunable(struct net_device *netdev,
const struct ethtool_tunable *tuna,
const void *data)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ u32 old_tx_spare_buf_size, new_tx_spare_buf_size;
struct hnae3_handle *h = priv->ae_handle;
int i, ret = 0;
@@ -1740,6 +1812,26 @@ static int hns3_set_tunable(struct net_device *netdev,
priv->ring[i].rx_copybreak = priv->rx_copybreak;
break;
+ case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
+ old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
+ new_tx_spare_buf_size = *(u32 *)data;
+ ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size);
+ if (ret) {
+ int ret1;
+
+ netdev_warn(netdev,
+ "change tx spare buf size fail, revert to old value\n");
+ ret1 = hns3_set_tx_spare_buf_size(netdev,
+ old_tx_spare_buf_size);
+ if (ret1) {
+ netdev_err(netdev,
+ "revert to old tx spare buf size fail\n");
+ return ret1;
+ }
+
+ return ret;
+ }
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -1755,6 +1847,8 @@ static int hns3_set_tunable(struct net_device *netdev,
ETHTOOL_COALESCE_MAX_FRAMES | \
ETHTOOL_COALESCE_USE_CQE)
+#define HNS3_ETHTOOL_RING ETHTOOL_RING_USE_RX_BUF_LEN
+
static int hns3_get_ts_info(struct net_device *netdev,
struct ethtool_ts_info *info)
{
@@ -1833,6 +1927,7 @@ static int hns3_get_link_ext_state(struct net_device *netdev,
static const struct ethtool_ops hns3vf_ethtool_ops = {
.supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
+ .supported_ring_params = HNS3_ETHTOOL_RING,
.get_drvinfo = hns3_get_drvinfo,
.get_ringparam = hns3_get_ringparam,
.set_ringparam = hns3_set_ringparam,
@@ -1864,6 +1959,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
static const struct ethtool_ops hns3_ethtool_ops = {
.supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
+ .supported_ring_params = HNS3_ETHTOOL_RING,
.self_test = hns3_self_test,
.get_drvinfo = hns3_get_drvinfo,
.get_link = hns3_get_link,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
deleted file mode 100644
index d1bf5c4c0abb..000000000000
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0+
-#
-# Makefile for the HISILICON network device drivers.
-#
-
-ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
-ccflags-y += -I $(srctree)/$(src)
-
-obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o hclge_ptp.o hclge_devlink.o
-
-hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
deleted file mode 100644
index c5d5466810bb..000000000000
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ /dev/null
@@ -1,591 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-// Copyright (c) 2016-2017 Hisilicon Limited.
-
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/dma-direction.h>
-#include "hclge_cmd.h"
-#include "hnae3.h"
-#include "hclge_main.h"
-
-#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
-
-static int hclge_ring_space(struct hclge_cmq_ring *ring)
-{
- int ntu = ring->next_to_use;
- int ntc = ring->next_to_clean;
- int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
-
- return ring->desc_num - used - 1;
-}
-
-static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head)
-{
- int ntu = ring->next_to_use;
- int ntc = ring->next_to_clean;
-
- if (ntu > ntc)
- return head >= ntc && head <= ntu;
-
- return head >= ntc || head <= ntu;
-}
-
-static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
-{
- int size = ring->desc_num * sizeof(struct hclge_desc);
-
- ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
- &ring->desc_dma_addr, GFP_KERNEL);
- if (!ring->desc)
- return -ENOMEM;
-
- return 0;
-}
-
-static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
-{
- int size = ring->desc_num * sizeof(struct hclge_desc);
-
- if (ring->desc) {
- dma_free_coherent(cmq_ring_to_dev(ring), size,
- ring->desc, ring->desc_dma_addr);
- ring->desc = NULL;
- }
-}
-
-static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
-{
- struct hclge_hw *hw = &hdev->hw;
- struct hclge_cmq_ring *ring =
- (ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
- int ret;
-
- ring->ring_type = ring_type;
- ring->dev = hdev;
-
- ret = hclge_alloc_cmd_desc(ring);
- if (ret) {
- dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n",
- (ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
- return ret;
- }
-
- return 0;
-}
-
-void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
-{
- desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
- if (is_read)
- desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
- else
- desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
-}
-
-void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
- enum hclge_opcode_type opcode, bool is_read)
-{
- memset((void *)desc, 0, sizeof(struct hclge_desc));
- desc->opcode = cpu_to_le16(opcode);
- desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
-
- if (is_read)
- desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
-}
-
-static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
-{
- dma_addr_t dma = ring->desc_dma_addr;
- struct hclge_dev *hdev = ring->dev;
- struct hclge_hw *hw = &hdev->hw;
- u32 reg_val;
-
- if (ring->ring_type == HCLGE_TYPE_CSQ) {
- hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
- lower_32_bits(dma));
- hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
- upper_32_bits(dma));
- reg_val = hclge_read_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG);
- reg_val &= HCLGE_NIC_SW_RST_RDY;
- reg_val |= ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S;
- hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
- } else {
- hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
- lower_32_bits(dma));
- hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
- upper_32_bits(dma));
- hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
- ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
- }
-}
-
-static void hclge_cmd_init_regs(struct hclge_hw *hw)
-{
- hclge_cmd_config_regs(&hw->cmq.csq);
- hclge_cmd_config_regs(&hw->cmq.crq);
-}
-
-static int hclge_cmd_csq_clean(struct hclge_hw *hw)
-{
- struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
- struct hclge_cmq_ring *csq = &hw->cmq.csq;
- u32 head;
- int clean;
-
- head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
- rmb(); /* Make sure head is ready before touch any data */
-
- if (!is_valid_csq_clean_head(csq, head)) {
- dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
- csq->next_to_use, csq->next_to_clean);
- dev_warn(&hdev->pdev->dev,
- "Disabling any further commands to IMP firmware\n");
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
- dev_warn(&hdev->pdev->dev,
- "IMP firmware watchdog reset soon expected!\n");
- return -EIO;
- }
-
- clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
- csq->next_to_clean = head;
- return clean;
-}
-
-static int hclge_cmd_csq_done(struct hclge_hw *hw)
-{
- u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
- return head == hw->cmq.csq.next_to_use;
-}
-
-static bool hclge_is_special_opcode(u16 opcode)
-{
- /* these commands have several descriptors,
- * and use the first one to save opcode and return value
- */
- static const u16 spec_opcode[] = {
- HCLGE_OPC_STATS_64_BIT,
- HCLGE_OPC_STATS_32_BIT,
- HCLGE_OPC_STATS_MAC,
- HCLGE_OPC_STATS_MAC_ALL,
- HCLGE_OPC_QUERY_32_BIT_REG,
- HCLGE_OPC_QUERY_64_BIT_REG,
- HCLGE_QUERY_CLEAR_MPF_RAS_INT,
- HCLGE_QUERY_CLEAR_PF_RAS_INT,
- HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
- HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
- HCLGE_QUERY_ALL_ERR_INFO
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
- if (spec_opcode[i] == opcode)
- return true;
- }
-
- return false;
-}
-
-struct errcode {
- u32 imp_errcode;
- int common_errno;
-};
-
-static void hclge_cmd_copy_desc(struct hclge_hw *hw, struct hclge_desc *desc,
- int num)
-{
- struct hclge_desc *desc_to_use;
- int handle = 0;
-
- while (handle < num) {
- desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
- *desc_to_use = desc[handle];
- (hw->cmq.csq.next_to_use)++;
- if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
- hw->cmq.csq.next_to_use = 0;
- handle++;
- }
-}
-
-static int hclge_cmd_convert_err_code(u16 desc_ret)
-{
- struct errcode hclge_cmd_errcode[] = {
- {HCLGE_CMD_EXEC_SUCCESS, 0},
- {HCLGE_CMD_NO_AUTH, -EPERM},
- {HCLGE_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
- {HCLGE_CMD_QUEUE_FULL, -EXFULL},
- {HCLGE_CMD_NEXT_ERR, -ENOSR},
- {HCLGE_CMD_UNEXE_ERR, -ENOTBLK},
- {HCLGE_CMD_PARA_ERR, -EINVAL},
- {HCLGE_CMD_RESULT_ERR, -ERANGE},
- {HCLGE_CMD_TIMEOUT, -ETIME},
- {HCLGE_CMD_HILINK_ERR, -ENOLINK},
- {HCLGE_CMD_QUEUE_ILLEGAL, -ENXIO},
- {HCLGE_CMD_INVALID, -EBADR},
- };
- u32 errcode_count = ARRAY_SIZE(hclge_cmd_errcode);
- u32 i;
-
- for (i = 0; i < errcode_count; i++)
- if (hclge_cmd_errcode[i].imp_errcode == desc_ret)
- return hclge_cmd_errcode[i].common_errno;
-
- return -EIO;
-}
-
-static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
- int num, int ntc)
-{
- u16 opcode, desc_ret;
- int handle;
-
- opcode = le16_to_cpu(desc[0].opcode);
- for (handle = 0; handle < num; handle++) {
- desc[handle] = hw->cmq.csq.desc[ntc];
- ntc++;
- if (ntc >= hw->cmq.csq.desc_num)
- ntc = 0;
- }
- if (likely(!hclge_is_special_opcode(opcode)))
- desc_ret = le16_to_cpu(desc[num - 1].retval);
- else
- desc_ret = le16_to_cpu(desc[0].retval);
-
- hw->cmq.last_status = desc_ret;
-
- return hclge_cmd_convert_err_code(desc_ret);
-}
-
-static int hclge_cmd_check_result(struct hclge_hw *hw, struct hclge_desc *desc,
- int num, int ntc)
-{
- struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
- bool is_completed = false;
- u32 timeout = 0;
- int handle, ret;
-
- /**
- * If the command is sync, wait for the firmware to write back,
- * if multi descriptors to be sent, use the first one to check
- */
- if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
- do {
- if (hclge_cmd_csq_done(hw)) {
- is_completed = true;
- break;
- }
- udelay(1);
- timeout++;
- } while (timeout < hw->cmq.tx_timeout);
- }
-
- if (!is_completed)
- ret = -EBADE;
- else
- ret = hclge_cmd_check_retval(hw, desc, num, ntc);
-
- /* Clean the command send queue */
- handle = hclge_cmd_csq_clean(hw);
- if (handle < 0)
- ret = handle;
- else if (handle != num)
- dev_warn(&hdev->pdev->dev,
- "cleaned %d, need to clean %d\n", handle, num);
- return ret;
-}
-
-/**
- * hclge_cmd_send - send command to command queue
- * @hw: pointer to the hw struct
- * @desc: prefilled descriptor for describing the command
- * @num : the number of descriptors to be sent
- *
- * This is the main send command for command queue, it
- * sends the queue, cleans the queue, etc
- **/
-int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
-{
- struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
- struct hclge_cmq_ring *csq = &hw->cmq.csq;
- int ret;
- int ntc;
-
- spin_lock_bh(&hw->cmq.csq.lock);
-
- if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
- spin_unlock_bh(&hw->cmq.csq.lock);
- return -EBUSY;
- }
-
- if (num > hclge_ring_space(&hw->cmq.csq)) {
- /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
- * need update the SW HEAD pointer csq->next_to_clean
- */
- csq->next_to_clean = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
- spin_unlock_bh(&hw->cmq.csq.lock);
- return -EBUSY;
- }
-
- /**
- * Record the location of desc in the ring for this time
- * which will be use for hardware to write back
- */
- ntc = hw->cmq.csq.next_to_use;
-
- hclge_cmd_copy_desc(hw, desc, num);
-
- /* Write to hardware */
- hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
-
- ret = hclge_cmd_check_result(hw, desc, num, ntc);
-
- spin_unlock_bh(&hw->cmq.csq.lock);
-
- return ret;
-}
-
-static void hclge_set_default_capability(struct hclge_dev *hdev)
-{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
-
- set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
- set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
- if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
- set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
- set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
- }
-}
-
-static const struct hclge_caps_bit_map hclge_cmd_caps_bit_map0[] = {
- {HCLGE_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
- {HCLGE_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B},
- {HCLGE_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
- {HCLGE_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
- {HCLGE_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
- {HCLGE_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
- {HCLGE_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B},
- {HCLGE_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B},
- {HCLGE_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B},
- {HCLGE_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B},
- {HCLGE_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B},
- {HCLGE_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
- {HCLGE_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
- {HCLGE_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
-};
-
-static void hclge_parse_capability(struct hclge_dev *hdev,
- struct hclge_query_version_cmd *cmd)
-{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- u32 caps, i;
-
- caps = __le32_to_cpu(cmd->caps[0]);
- for (i = 0; i < ARRAY_SIZE(hclge_cmd_caps_bit_map0); i++)
- if (hnae3_get_bit(caps, hclge_cmd_caps_bit_map0[i].imp_bit))
- set_bit(hclge_cmd_caps_bit_map0[i].local_bit,
- ae_dev->caps);
-}
-
-static __le32 hclge_build_api_caps(void)
-{
- u32 api_caps = 0;
-
- hnae3_set_bit(api_caps, HCLGE_API_CAP_FLEX_RSS_TBL_B, 1);
-
- return cpu_to_le32(api_caps);
-}
-
-static enum hclge_cmd_status
-hclge_cmd_query_version_and_capability(struct hclge_dev *hdev)
-{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- struct hclge_query_version_cmd *resp;
- struct hclge_desc desc;
- int ret;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
- resp = (struct hclge_query_version_cmd *)desc.data;
- resp->api_caps = hclge_build_api_caps();
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- return ret;
-
- hdev->fw_version = le32_to_cpu(resp->firmware);
-
- ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
- HNAE3_PCI_REVISION_BIT_SIZE;
- ae_dev->dev_version |= hdev->pdev->revision;
-
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
- hclge_set_default_capability(hdev);
-
- hclge_parse_capability(hdev, resp);
-
- return ret;
-}
-
-int hclge_cmd_queue_init(struct hclge_dev *hdev)
-{
- int ret;
-
- /* Setup the lock for command queue */
- spin_lock_init(&hdev->hw.cmq.csq.lock);
- spin_lock_init(&hdev->hw.cmq.crq.lock);
-
- /* Setup the queue entries for use cmd queue */
- hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
- hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
-
- /* Setup Tx write back timeout */
- hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT;
-
- /* Setup queue rings */
- ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CSQ);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "CSQ ring setup error %d\n", ret);
- return ret;
- }
-
- ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CRQ);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "CRQ ring setup error %d\n", ret);
- goto err_csq;
- }
-
- return 0;
-err_csq:
- hclge_free_cmd_desc(&hdev->hw.cmq.csq);
- return ret;
-}
-
-static int hclge_firmware_compat_config(struct hclge_dev *hdev, bool en)
-{
- struct hclge_firmware_compat_cmd *req;
- struct hclge_desc desc;
- u32 compat = 0;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
-
- if (en) {
- req = (struct hclge_firmware_compat_cmd *)desc.data;
-
- hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
- hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
- if (hnae3_dev_phy_imp_supported(hdev))
- hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1);
- hnae3_set_bit(compat, HCLGE_MAC_STATS_EXT_EN_B, 1);
- hnae3_set_bit(compat, HCLGE_SYNC_RX_RING_HEAD_EN_B, 1);
-
- req->compat = cpu_to_le32(compat);
- }
-
- return hclge_cmd_send(&hdev->hw, &desc, 1);
-}
-
-int hclge_cmd_init(struct hclge_dev *hdev)
-{
- int ret;
-
- spin_lock_bh(&hdev->hw.cmq.csq.lock);
- spin_lock(&hdev->hw.cmq.crq.lock);
-
- hdev->hw.cmq.csq.next_to_clean = 0;
- hdev->hw.cmq.csq.next_to_use = 0;
- hdev->hw.cmq.crq.next_to_clean = 0;
- hdev->hw.cmq.crq.next_to_use = 0;
-
- hclge_cmd_init_regs(&hdev->hw);
-
- spin_unlock(&hdev->hw.cmq.crq.lock);
- spin_unlock_bh(&hdev->hw.cmq.csq.lock);
-
- clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
-
- /* Check if there is new reset pending, because the higher level
- * reset may happen when lower level reset is being processed.
- */
- if ((hclge_is_reset_pending(hdev))) {
- dev_err(&hdev->pdev->dev,
- "failed to init cmd since reset %#lx pending\n",
- hdev->reset_pending);
- ret = -EBUSY;
- goto err_cmd_init;
- }
-
- /* get version and device capabilities */
- ret = hclge_cmd_query_version_and_capability(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to query version and capabilities, ret = %d\n",
- ret);
- goto err_cmd_init;
- }
-
- dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
- HNAE3_FW_VERSION_BYTE3_SHIFT),
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
- HNAE3_FW_VERSION_BYTE2_SHIFT),
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
- HNAE3_FW_VERSION_BYTE1_SHIFT),
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
- HNAE3_FW_VERSION_BYTE0_SHIFT));
-
- /* ask the firmware to enable some features, driver can work without
- * it.
- */
- ret = hclge_firmware_compat_config(hdev, true);
- if (ret)
- dev_warn(&hdev->pdev->dev,
- "Firmware compatible features not enabled(%d).\n",
- ret);
-
- return 0;
-
-err_cmd_init:
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
-
- return ret;
-}
-
-static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
-{
- hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
-}
-
-void hclge_cmd_uninit(struct hclge_dev *hdev)
-{
- hclge_firmware_compat_config(hdev, false);
-
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
- /* wait to ensure that the firmware completes the possible left
- * over commands.
- */
- msleep(HCLGE_CMDQ_CLEAR_WAIT_TIME);
- spin_lock_bh(&hdev->hw.cmq.csq.lock);
- spin_lock(&hdev->hw.cmq.crq.lock);
- hclge_cmd_uninit_regs(&hdev->hw);
- spin_unlock(&hdev->hw.cmq.crq.lock);
- spin_unlock_bh(&hdev->hw.cmq.csq.lock);
-
- hclge_free_cmd_desc(&hdev->hw.cmq.csq);
- hclge_free_cmd_desc(&hdev->hw.cmq.crq);
-}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index d24e59028798..f9d89511eb32 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -7,323 +7,21 @@
#include <linux/io.h>
#include <linux/etherdevice.h>
#include "hnae3.h"
-
-#define HCLGE_CMDQ_TX_TIMEOUT 30000
-#define HCLGE_CMDQ_CLEAR_WAIT_TIME 200
-#define HCLGE_DESC_DATA_LEN 6
+#include "hclge_comm_cmd.h"
struct hclge_dev;
-struct hclge_desc {
- __le16 opcode;
#define HCLGE_CMDQ_RX_INVLD_B 0
#define HCLGE_CMDQ_RX_OUTVLD_B 1
- __le16 flag;
- __le16 retval;
- __le16 rsv;
- __le32 data[HCLGE_DESC_DATA_LEN];
-};
-
-struct hclge_cmq_ring {
- dma_addr_t desc_dma_addr;
- struct hclge_desc *desc;
- struct hclge_dev *dev;
- u32 head;
- u32 tail;
-
- u16 buf_size;
- u16 desc_num;
- int next_to_use;
- int next_to_clean;
- u8 ring_type; /* cmq ring type */
- spinlock_t lock; /* Command queue lock */
-};
-
-enum hclge_cmd_return_status {
- HCLGE_CMD_EXEC_SUCCESS = 0,
- HCLGE_CMD_NO_AUTH = 1,
- HCLGE_CMD_NOT_SUPPORTED = 2,
- HCLGE_CMD_QUEUE_FULL = 3,
- HCLGE_CMD_NEXT_ERR = 4,
- HCLGE_CMD_UNEXE_ERR = 5,
- HCLGE_CMD_PARA_ERR = 6,
- HCLGE_CMD_RESULT_ERR = 7,
- HCLGE_CMD_TIMEOUT = 8,
- HCLGE_CMD_HILINK_ERR = 9,
- HCLGE_CMD_QUEUE_ILLEGAL = 10,
- HCLGE_CMD_INVALID = 11,
-};
-
-enum hclge_cmd_status {
- HCLGE_STATUS_SUCCESS = 0,
- HCLGE_ERR_CSQ_FULL = -1,
- HCLGE_ERR_CSQ_TIMEOUT = -2,
- HCLGE_ERR_CSQ_ERROR = -3,
-};
-
struct hclge_misc_vector {
u8 __iomem *addr;
int vector_irq;
char name[HNAE3_INT_NAME_LEN];
};
-struct hclge_cmq {
- struct hclge_cmq_ring csq;
- struct hclge_cmq_ring crq;
- u16 tx_timeout;
- enum hclge_cmd_status last_status;
-};
-
-#define HCLGE_CMD_FLAG_IN BIT(0)
-#define HCLGE_CMD_FLAG_OUT BIT(1)
-#define HCLGE_CMD_FLAG_NEXT BIT(2)
-#define HCLGE_CMD_FLAG_WR BIT(3)
-#define HCLGE_CMD_FLAG_NO_INTR BIT(4)
-#define HCLGE_CMD_FLAG_ERR_INTR BIT(5)
-
-enum hclge_opcode_type {
- /* Generic commands */
- HCLGE_OPC_QUERY_FW_VER = 0x0001,
- HCLGE_OPC_CFG_RST_TRIGGER = 0x0020,
- HCLGE_OPC_GBL_RST_STATUS = 0x0021,
- HCLGE_OPC_QUERY_FUNC_STATUS = 0x0022,
- HCLGE_OPC_QUERY_PF_RSRC = 0x0023,
- HCLGE_OPC_QUERY_VF_RSRC = 0x0024,
- HCLGE_OPC_GET_CFG_PARAM = 0x0025,
- HCLGE_OPC_PF_RST_DONE = 0x0026,
- HCLGE_OPC_QUERY_VF_RST_RDY = 0x0027,
-
- HCLGE_OPC_STATS_64_BIT = 0x0030,
- HCLGE_OPC_STATS_32_BIT = 0x0031,
- HCLGE_OPC_STATS_MAC = 0x0032,
- HCLGE_OPC_QUERY_MAC_REG_NUM = 0x0033,
- HCLGE_OPC_STATS_MAC_ALL = 0x0034,
-
- HCLGE_OPC_QUERY_REG_NUM = 0x0040,
- HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
- HCLGE_OPC_QUERY_64_BIT_REG = 0x0042,
- HCLGE_OPC_DFX_BD_NUM = 0x0043,
- HCLGE_OPC_DFX_BIOS_COMMON_REG = 0x0044,
- HCLGE_OPC_DFX_SSU_REG_0 = 0x0045,
- HCLGE_OPC_DFX_SSU_REG_1 = 0x0046,
- HCLGE_OPC_DFX_IGU_EGU_REG = 0x0047,
- HCLGE_OPC_DFX_RPU_REG_0 = 0x0048,
- HCLGE_OPC_DFX_RPU_REG_1 = 0x0049,
- HCLGE_OPC_DFX_NCSI_REG = 0x004A,
- HCLGE_OPC_DFX_RTC_REG = 0x004B,
- HCLGE_OPC_DFX_PPP_REG = 0x004C,
- HCLGE_OPC_DFX_RCB_REG = 0x004D,
- HCLGE_OPC_DFX_TQP_REG = 0x004E,
- HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
-
- HCLGE_OPC_QUERY_DEV_SPECS = 0x0050,
-
- /* MAC command */
- HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
- HCLGE_OPC_CONFIG_AN_MODE = 0x0304,
- HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
- HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
- HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
- HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310,
- HCLGE_OPC_MAC_TNL_INT_EN = 0x0311,
- HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312,
- HCLGE_OPC_COMMON_LOOPBACK = 0x0315,
- HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
-
- /* PTP commands */
- HCLGE_OPC_PTP_INT_EN = 0x0501,
- HCLGE_OPC_PTP_MODE_CFG = 0x0507,
-
- /* PFC/Pause commands */
- HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701,
- HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702,
- HCLGE_OPC_CFG_MAC_PARA = 0x0703,
- HCLGE_OPC_CFG_PFC_PARA = 0x0704,
- HCLGE_OPC_QUERY_MAC_TX_PKT_CNT = 0x0705,
- HCLGE_OPC_QUERY_MAC_RX_PKT_CNT = 0x0706,
- HCLGE_OPC_QUERY_PFC_TX_PKT_CNT = 0x0707,
- HCLGE_OPC_QUERY_PFC_RX_PKT_CNT = 0x0708,
- HCLGE_OPC_PRI_TO_TC_MAPPING = 0x0709,
- HCLGE_OPC_QOS_MAP = 0x070A,
-
- /* ETS/scheduler commands */
- HCLGE_OPC_TM_PG_TO_PRI_LINK = 0x0804,
- HCLGE_OPC_TM_QS_TO_PRI_LINK = 0x0805,
- HCLGE_OPC_TM_NQ_TO_QS_LINK = 0x0806,
- HCLGE_OPC_TM_RQ_TO_QS_LINK = 0x0807,
- HCLGE_OPC_TM_PORT_WEIGHT = 0x0808,
- HCLGE_OPC_TM_PG_WEIGHT = 0x0809,
- HCLGE_OPC_TM_QS_WEIGHT = 0x080A,
- HCLGE_OPC_TM_PRI_WEIGHT = 0x080B,
- HCLGE_OPC_TM_PRI_C_SHAPPING = 0x080C,
- HCLGE_OPC_TM_PRI_P_SHAPPING = 0x080D,
- HCLGE_OPC_TM_PG_C_SHAPPING = 0x080E,
- HCLGE_OPC_TM_PG_P_SHAPPING = 0x080F,
- HCLGE_OPC_TM_PORT_SHAPPING = 0x0810,
- HCLGE_OPC_TM_PG_SCH_MODE_CFG = 0x0812,
- HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813,
- HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
- HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
- HCLGE_OPC_TM_NODES = 0x0816,
- HCLGE_OPC_ETS_TC_WEIGHT = 0x0843,
- HCLGE_OPC_QSET_DFX_STS = 0x0844,
- HCLGE_OPC_PRI_DFX_STS = 0x0845,
- HCLGE_OPC_PG_DFX_STS = 0x0846,
- HCLGE_OPC_PORT_DFX_STS = 0x0847,
- HCLGE_OPC_SCH_NQ_CNT = 0x0848,
- HCLGE_OPC_SCH_RQ_CNT = 0x0849,
- HCLGE_OPC_TM_INTERNAL_STS = 0x0850,
- HCLGE_OPC_TM_INTERNAL_CNT = 0x0851,
- HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852,
-
- /* Packet buffer allocate commands */
- HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
- HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902,
- HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903,
- HCLGE_OPC_RX_COM_THRD_ALLOC = 0x0904,
- HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905,
- HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906,
-
- /* TQP management command */
- HCLGE_OPC_SET_TQP_MAP = 0x0A01,
-
- /* TQP commands */
- HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
- HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
- HCLGE_OPC_QUERY_TX_STATS = 0x0B03,
- HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04,
- HCLGE_OPC_CFG_RX_QUEUE = 0x0B11,
- HCLGE_OPC_QUERY_RX_POINTER = 0x0B12,
- HCLGE_OPC_QUERY_RX_STATS = 0x0B13,
- HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16,
- HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17,
- HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
- HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22,
-
- /* PPU commands */
- HCLGE_OPC_PPU_PF_OTHER_INT_DFX = 0x0B4A,
-
- /* TSO command */
- HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
- HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
-
- /* RSS commands */
- HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
- HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07,
- HCLGE_OPC_RSS_TC_MODE = 0x0D08,
- HCLGE_OPC_RSS_INPUT_TUPLE = 0x0D02,
-
- /* Promisuous mode command */
- HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01,
-
- /* Vlan offload commands */
- HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01,
- HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02,
-
- /* Interrupts commands */
- HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503,
- HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504,
-
- /* MAC commands */
- HCLGE_OPC_MAC_VLAN_ADD = 0x1000,
- HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001,
- HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002,
- HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
- HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004,
- HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
- HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
-
- /* MAC VLAN commands */
- HCLGE_OPC_MAC_VLAN_SWITCH_PARAM = 0x1033,
-
- /* VLAN commands */
- HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
- HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
- HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
- HCLGE_OPC_PORT_VLAN_BYPASS = 0x1103,
-
- /* Flow Director commands */
- HCLGE_OPC_FD_MODE_CTRL = 0x1200,
- HCLGE_OPC_FD_GET_ALLOCATION = 0x1201,
- HCLGE_OPC_FD_KEY_CONFIG = 0x1202,
- HCLGE_OPC_FD_TCAM_OP = 0x1203,
- HCLGE_OPC_FD_AD_OP = 0x1204,
- HCLGE_OPC_FD_CNT_OP = 0x1205,
- HCLGE_OPC_FD_USER_DEF_OP = 0x1207,
-
- /* MDIO command */
- HCLGE_OPC_MDIO_CONFIG = 0x1900,
-
- /* QCN commands */
- HCLGE_OPC_QCN_MOD_CFG = 0x1A01,
- HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02,
- HCLGE_OPC_QCN_SHAPPING_CFG = 0x1A03,
- HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04,
- HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05,
- HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06,
- HCLGE_OPC_QCN_AJUST_INIT = 0x1A07,
- HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08,
-
- /* Mailbox command */
- HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
-
- /* Led command */
- HCLGE_OPC_LED_STATUS_CFG = 0xB000,
-
- /* clear hardware resource command */
- HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B,
-
- /* NCL config command */
- HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
-
- /* IMP stats command */
- HCLGE_OPC_IMP_STATS_BD = 0x7012,
- HCLGE_OPC_IMP_STATS_INFO = 0x7013,
- HCLGE_OPC_IMP_COMPAT_CFG = 0x701A,
-
- /* SFP command */
- HCLGE_OPC_GET_SFP_EEPROM = 0x7100,
- HCLGE_OPC_GET_SFP_EXIST = 0x7101,
- HCLGE_OPC_GET_SFP_INFO = 0x7104,
-
- /* Error INT commands */
- HCLGE_MAC_COMMON_INT_EN = 0x030E,
- HCLGE_TM_SCH_ECC_INT_EN = 0x0829,
- HCLGE_SSU_ECC_INT_CMD = 0x0989,
- HCLGE_SSU_COMMON_INT_CMD = 0x098C,
- HCLGE_PPU_MPF_ECC_INT_CMD = 0x0B40,
- HCLGE_PPU_MPF_OTHER_INT_CMD = 0x0B41,
- HCLGE_PPU_PF_OTHER_INT_CMD = 0x0B42,
- HCLGE_COMMON_ECC_INT_CFG = 0x1505,
- HCLGE_QUERY_RAS_INT_STS_BD_NUM = 0x1510,
- HCLGE_QUERY_CLEAR_MPF_RAS_INT = 0x1511,
- HCLGE_QUERY_CLEAR_PF_RAS_INT = 0x1512,
- HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513,
- HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
- HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515,
- HCLGE_QUERY_ALL_ERR_BD_NUM = 0x1516,
- HCLGE_QUERY_ALL_ERR_INFO = 0x1517,
- HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580,
- HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581,
- HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584,
- HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD = 0x1585,
- HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD = 0x1586,
- HCLGE_IGU_EGU_TNL_INT_EN = 0x1803,
- HCLGE_IGU_COMMON_INT_EN = 0x1806,
- HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14,
- HCLGE_PPP_CMD0_INT_CMD = 0x2100,
- HCLGE_PPP_CMD1_INT_CMD = 0x2101,
- HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105,
- HCLGE_NCSI_INT_EN = 0x2401,
-
- /* PHY command */
- HCLGE_OPC_PHY_LINK_KSETTING = 0x7025,
- HCLGE_OPC_PHY_REG = 0x7026,
-
- /* Query link diagnosis info command */
- HCLGE_OPC_QUERY_LINK_DIAGNOSIS = 0x702A,
-};
+#define hclge_cmd_setup_basic_desc(desc, opcode, is_read) \
+ hclge_comm_cmd_setup_basic_desc(desc, opcode, is_read)
#define HCLGE_TQP_REG_OFFSET 0x80000
#define HCLGE_TQP_REG_SIZE 0x200
@@ -391,38 +89,6 @@ struct hclge_rx_priv_buff_cmd {
u8 rsv[6];
};
-enum HCLGE_CAP_BITS {
- HCLGE_CAP_UDP_GSO_B,
- HCLGE_CAP_QB_B,
- HCLGE_CAP_FD_FORWARD_TC_B,
- HCLGE_CAP_PTP_B,
- HCLGE_CAP_INT_QL_B,
- HCLGE_CAP_HW_TX_CSUM_B,
- HCLGE_CAP_TX_PUSH_B,
- HCLGE_CAP_PHY_IMP_B,
- HCLGE_CAP_TQP_TXRX_INDEP_B,
- HCLGE_CAP_HW_PAD_B,
- HCLGE_CAP_STASH_B,
- HCLGE_CAP_UDP_TUNNEL_CSUM_B,
- HCLGE_CAP_RAS_IMP_B = 12,
- HCLGE_CAP_FEC_B = 13,
- HCLGE_CAP_PAUSE_B = 14,
- HCLGE_CAP_RXD_ADV_LAYOUT_B = 15,
- HCLGE_CAP_PORT_VLAN_BYPASS_B = 17,
-};
-
-enum HCLGE_API_CAP_BITS {
- HCLGE_API_CAP_FLEX_RSS_TBL_B,
-};
-
-#define HCLGE_QUERY_CAP_LENGTH 3
-struct hclge_query_version_cmd {
- __le32 firmware;
- __le32 hardware;
- __le32 api_caps;
- __le32 caps[HCLGE_QUERY_CAP_LENGTH]; /* capabilities of device */
-};
-
#define HCLGE_RX_PRIV_EN_B 15
#define HCLGE_TC_NUM_ONE_DESC 4
struct hclge_priv_wl {
@@ -571,38 +237,10 @@ struct hclge_vf_num_cmd {
};
#define HCLGE_RSS_DEFAULT_OUTPORT_B 4
-#define HCLGE_RSS_HASH_KEY_OFFSET_B 4
-#define HCLGE_RSS_HASH_KEY_NUM 16
-struct hclge_rss_config_cmd {
- u8 hash_config;
- u8 rsv[7];
- u8 hash_key[HCLGE_RSS_HASH_KEY_NUM];
-};
-
-struct hclge_rss_input_tuple_cmd {
- u8 ipv4_tcp_en;
- u8 ipv4_udp_en;
- u8 ipv4_sctp_en;
- u8 ipv4_fragment_en;
- u8 ipv6_tcp_en;
- u8 ipv6_udp_en;
- u8 ipv6_sctp_en;
- u8 ipv6_fragment_en;
- u8 rsv[16];
-};
-#define HCLGE_RSS_CFG_TBL_SIZE 16
#define HCLGE_RSS_CFG_TBL_SIZE_H 4
-#define HCLGE_RSS_CFG_TBL_BW_H 2U
#define HCLGE_RSS_CFG_TBL_BW_L 8U
-struct hclge_rss_indirection_table_cmd {
- __le16 start_table_index;
- __le16 rss_set_bitmap;
- u8 rss_qid_h[HCLGE_RSS_CFG_TBL_SIZE_H];
- u8 rss_qid_l[HCLGE_RSS_CFG_TBL_SIZE];
-};
-
#define HCLGE_RSS_TC_OFFSET_S 0
#define HCLGE_RSS_TC_OFFSET_M GENMASK(10, 0)
#define HCLGE_RSS_TC_SIZE_MSB_B 11
@@ -610,10 +248,6 @@ struct hclge_rss_indirection_table_cmd {
#define HCLGE_RSS_TC_SIZE_M GENMASK(14, 12)
#define HCLGE_RSS_TC_SIZE_MSB_OFFSET 3
#define HCLGE_RSS_TC_VALID_B 15
-struct hclge_rss_tc_mode_cmd {
- __le16 rss_tc_mode[HCLGE_MAX_TC_NUM];
- u8 rsv[8];
-};
#define HCLGE_LINK_STATUS_UP_B 0
#define HCLGE_LINK_STATUS_UP_M BIT(HCLGE_LINK_STATUS_UP_B)
@@ -1015,16 +649,6 @@ struct hclge_common_lb_cmd {
#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */
#define HCLGE_NON_DCB_ADDITIONAL_BUF 0x1400 /* 5120 byte */
-#define HCLGE_TYPE_CRQ 0
-#define HCLGE_TYPE_CSQ 1
-
-/* this bit indicates that the driver is ready for hardware reset */
-#define HCLGE_NIC_SW_RST_RDY_B 16
-#define HCLGE_NIC_SW_RST_RDY BIT(HCLGE_NIC_SW_RST_RDY_B)
-
-#define HCLGE_NIC_CMQ_DESC_NUM 1024
-#define HCLGE_NIC_CMQ_DESC_NUM_S 3
-
#define HCLGE_LED_LOCATE_STATE_S 0
#define HCLGE_LED_LOCATE_STATE_M GENMASK(1, 0)
@@ -1147,16 +771,6 @@ struct hclge_query_ppu_pf_other_int_dfx_cmd {
u8 rsv[4];
};
-#define HCLGE_LINK_EVENT_REPORT_EN_B 0
-#define HCLGE_NCSI_ERROR_REPORT_EN_B 1
-#define HCLGE_PHY_IMP_EN_B 2
-#define HCLGE_MAC_STATS_EXT_EN_B 3
-#define HCLGE_SYNC_RX_RING_HEAD_EN_B 4
-struct hclge_firmware_compat_cmd {
- __le32 compat;
- u8 rsv[20];
-};
-
#define HCLGE_SFP_INFO_CMD_NUM 6
#define HCLGE_SFP_INFO_BD0_LEN 20
#define HCLGE_SFP_INFO_BDX_LEN 24
@@ -1239,44 +853,10 @@ struct hclge_phy_reg_cmd {
u8 rsv1[18];
};
-/* capabilities bits map between imp firmware and local driver */
-struct hclge_caps_bit_map {
- u16 imp_bit;
- u16 local_bit;
-};
-
-int hclge_cmd_init(struct hclge_dev *hdev);
-static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
-{
- writel(value, base + reg);
-}
-
-#define hclge_write_dev(a, reg, value) \
- hclge_write_reg((a)->io_base, reg, value)
-#define hclge_read_dev(a, reg) \
- hclge_read_reg((a)->io_base, reg)
-
-static inline u32 hclge_read_reg(u8 __iomem *base, u32 reg)
-{
- u8 __iomem *reg_addr = READ_ONCE(base);
-
- return readl(reg_addr + reg);
-}
-
-#define HCLGE_SEND_SYNC(flag) \
- ((flag) & HCLGE_CMD_FLAG_NO_INTR)
-
struct hclge_hw;
int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num);
-void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
- enum hclge_opcode_type opcode, bool is_read);
-void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read);
-
-enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
- struct hclge_desc *desc);
-enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
- struct hclge_desc *desc);
-
-void hclge_cmd_uninit(struct hclge_dev *hdev);
-int hclge_cmd_queue_init(struct hclge_dev *hdev);
+enum hclge_comm_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
+ struct hclge_desc *desc);
+enum hclge_comm_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
+ struct hclge_desc *desc);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 375ebf105a9a..69b8673436ca 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -203,7 +203,7 @@ static int hclge_map_update(struct hclge_dev *hdev)
if (ret)
return ret;
- hclge_rss_indir_init_cfg(hdev);
+ hclge_comm_rss_indir_init_cfg(hdev->ae_dev, &hdev->rss_cfg);
return hclge_rss_init_hw(hdev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 4e0a8c2f7c05..9b870e79c290 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -77,6 +77,10 @@ static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
.cmd = HCLGE_OPC_DFX_TQP_REG } },
};
+/* make sure: len(name) + interval >= maxlen(item data) + 2,
+ * for example, name = "pkt_num"(len: 7), the prototype of item data is u32,
+ * and print as "%u"(maxlen: 10), so the interval should be at least 5.
+ */
static void hclge_dbg_fill_content(char *content, u16 len,
const struct hclge_dbg_item *items,
const char **result, u16 size)
@@ -99,7 +103,7 @@ static void hclge_dbg_fill_content(char *content, u16 len,
static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
{
if (id)
- sprintf(buf, "vf%u", id - 1);
+ sprintf(buf, "vf%u", id - 1U);
else
sprintf(buf, "pf");
@@ -146,7 +150,7 @@ static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
desc->data[0] = cpu_to_le32(index);
for (i = 1; i < bd_num; i++) {
- desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
desc++;
hclge_cmd_setup_basic_desc(desc, cmd, true);
}
@@ -258,12 +262,29 @@ hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
return 0;
}
+static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = {
+ {HCLGE_MAC_TX_EN_B, "mac_trans_en"},
+ {HCLGE_MAC_RX_EN_B, "mac_rcv_en"},
+ {HCLGE_MAC_PAD_TX_B, "pad_trans_en"},
+ {HCLGE_MAC_PAD_RX_B, "pad_rcv_en"},
+ {HCLGE_MAC_1588_TX_B, "1588_trans_en"},
+ {HCLGE_MAC_1588_RX_B, "1588_rcv_en"},
+ {HCLGE_MAC_APP_LP_B, "mac_app_loop_en"},
+ {HCLGE_MAC_LINE_LP_B, "mac_line_loop_en"},
+ {HCLGE_MAC_FCS_TX_B, "mac_fcs_tx_en"},
+ {HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, "mac_rx_oversize_truncate_en"},
+ {HCLGE_MAC_RX_FCS_STRIP_B, "mac_rx_fcs_strip_en"},
+ {HCLGE_MAC_RX_FCS_B, "mac_rx_fcs_en"},
+ {HCLGE_MAC_TX_UNDER_MIN_ERR_B, "mac_tx_under_min_err_en"},
+ {HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"}
+};
+
static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
int len, int *pos)
{
struct hclge_config_mac_mode_cmd *req;
struct hclge_desc desc;
- u32 loop_en;
+ u32 loop_en, i, offset;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
@@ -278,39 +299,12 @@ static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
req = (struct hclge_config_mac_mode_cmd *)desc.data;
loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
- *pos += scnprintf(buf + *pos, len - *pos, "mac_trans_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B));
- *pos += scnprintf(buf + *pos, len - *pos, "mac_rcv_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B));
- *pos += scnprintf(buf + *pos, len - *pos, "pad_trans_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B));
- *pos += scnprintf(buf + *pos, len - *pos, "pad_rcv_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B));
- *pos += scnprintf(buf + *pos, len - *pos, "1588_trans_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B));
- *pos += scnprintf(buf + *pos, len - *pos, "1588_rcv_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B));
- *pos += scnprintf(buf + *pos, len - *pos, "mac_app_loop_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B));
- *pos += scnprintf(buf + *pos, len - *pos, "mac_line_loop_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B));
- *pos += scnprintf(buf + *pos, len - *pos, "mac_fcs_tx_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B));
- *pos += scnprintf(buf + *pos, len - *pos,
- "mac_rx_oversize_truncate_en: %#x\n",
- hnae3_get_bit(loop_en,
- HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B));
- *pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_strip_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B));
- *pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B));
- *pos += scnprintf(buf + *pos, len - *pos,
- "mac_tx_under_min_err_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B));
- *pos += scnprintf(buf + *pos, len - *pos,
- "mac_tx_oversize_truncate_en: %#x\n",
- hnae3_get_bit(loop_en,
- HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B));
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) {
+ offset = hclge_dbg_mac_en_status[i].offset;
+ *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
+ hclge_dbg_mac_en_status[i].message,
+ hnae3_get_bit(loop_en, offset));
+ }
return 0;
}
@@ -788,7 +782,6 @@ static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
-
if (!data_str)
return -ENOMEM;
@@ -1273,7 +1266,7 @@ static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
int i, ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret) {
@@ -1309,7 +1302,7 @@ static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
int i, ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret) {
@@ -1454,9 +1447,9 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
u32 *req;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
@@ -1614,8 +1607,19 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
return 0;
}
+static const struct hclge_dbg_status_dfx_info hclge_dbg_rst_info[] = {
+ {HCLGE_MISC_VECTOR_REG_BASE, "vector0 interrupt enable status"},
+ {HCLGE_MISC_RESET_STS_REG, "reset interrupt source"},
+ {HCLGE_MISC_VECTOR_INT_STS, "reset interrupt status"},
+ {HCLGE_RAS_PF_OTHER_INT_STS_REG, "RAS interrupt status"},
+ {HCLGE_GLOBAL_RESET_REG, "hardware reset status"},
+ {HCLGE_NIC_CSQ_DEPTH_REG, "handshake status"},
+ {HCLGE_FUN_RST_ING, "function reset status"}
+};
+
int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
{
+ u32 i, offset;
int pos = 0;
pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n",
@@ -1634,22 +1638,14 @@ int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
hdev->rst_stats.reset_cnt);
pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n",
hdev->rst_stats.reset_fail_cnt);
- pos += scnprintf(buf + pos, len - pos,
- "vector0 interrupt enable status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE));
- pos += scnprintf(buf + pos, len - pos, "reset interrupt source: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG));
- pos += scnprintf(buf + pos, len - pos, "reset interrupt status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS));
- pos += scnprintf(buf + pos, len - pos, "RAS interrupt status: 0x%x\n",
- hclge_read_dev(&hdev->hw,
- HCLGE_RAS_PF_OTHER_INT_STS_REG));
- pos += scnprintf(buf + pos, len - pos, "hardware reset status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
- pos += scnprintf(buf + pos, len - pos, "handshake status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG));
- pos += scnprintf(buf + pos, len - pos, "function reset status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING));
+
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) {
+ offset = hclge_dbg_rst_info[i].offset;
+ pos += scnprintf(buf + pos, len - pos, "%s: 0x%x\n",
+ hclge_dbg_rst_info[i].message,
+ hclge_read_dev(&hdev->hw, offset));
+ }
+
pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n",
hdev->state);
@@ -1771,7 +1767,7 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
#define HCLGE_MAX_NCL_CONFIG_LENGTH 16384
static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
- char *buf, int *len, int *pos)
+ char *buf, int len, int *pos)
{
#define HCLGE_CMD_DATA_NUM 6
@@ -1783,7 +1779,7 @@ static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
if (i == 0 && j == 0)
continue;
- *pos += scnprintf(buf + *pos, *len - *pos,
+ *pos += scnprintf(buf + *pos, len - *pos,
"0x%04x | 0x%08x\n", offset,
le32_to_cpu(desc[i].data[j]));
@@ -1821,7 +1817,7 @@ hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
if (ret)
return ret;
- hclge_ncl_config_data_print(desc, &index, buf, &len, &pos);
+ hclge_ncl_config_data_print(desc, &index, buf, len, &pos);
}
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index c526591a7240..724052928b88 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -94,6 +94,11 @@ struct hclge_dbg_func {
char *buf, int len);
};
+struct hclge_dbg_status_dfx_info {
+ u32 offset;
+ char message[HCLGE_DBG_MAX_DFX_MSG_LEN];
+};
+
static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
{false, "Reserved"},
{true, "BP_CPU_STATE"},
@@ -321,10 +326,10 @@ static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
{true, "IGU_RX_OUT_UDP0_PKT"},
{true, "IGU_RX_IN_UDP0_PKT"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
+ {true, "IGU_MC_CAR_DROP_PKT_L"},
+ {true, "IGU_MC_CAR_DROP_PKT_H"},
+ {true, "IGU_BC_CAR_DROP_PKT_L"},
+ {true, "IGU_BC_CAR_DROP_PKT_H"},
{false, "Reserved"},
{true, "IGU_RX_OVERSIZE_PKT_L"},
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 20e628c2bd44..42a9e73d8588 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1399,7 +1399,7 @@ static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en)
/* configure common error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false);
if (en) {
@@ -1498,7 +1498,7 @@ static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
/* configure PPP error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
@@ -1633,7 +1633,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
/* configure PPU error interrupts */
if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) {
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
if (en) {
desc[0].data[0] =
@@ -1718,7 +1718,7 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
/* configure SSU ecc error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_ECC_INT_CMD, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_ECC_INT_CMD, false);
if (en) {
desc[0].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN);
@@ -1740,7 +1740,7 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
/* configure SSU common error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_COMMON_INT_CMD, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false);
if (en) {
@@ -1963,7 +1963,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
&ae_dev->hw_err_reset_req);
/* clear all main PF RAS errors */
- hclge_cmd_reuse_desc(&desc[0], false);
+ hclge_comm_cmd_reuse_desc(&desc[0], false);
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret)
dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret);
@@ -2036,7 +2036,7 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
}
/* clear all PF RAS errors */
- hclge_cmd_reuse_desc(&desc[0], false);
+ hclge_comm_cmd_reuse_desc(&desc[0], false);
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret)
dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret);
@@ -2087,8 +2087,8 @@ static int hclge_log_rocee_axi_error(struct hclge_dev *hdev)
true);
hclge_cmd_setup_basic_desc(&desc[2], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
ret = hclge_cmd_send(&hdev->hw, &desc[0], 3);
if (ret) {
@@ -2119,7 +2119,7 @@ static int hclge_log_rocee_ecc_error(struct hclge_dev *hdev)
ret = hclge_cmd_query_error(hdev, &desc[0],
HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD,
- HCLGE_CMD_FLAG_NEXT);
+ HCLGE_COMM_CMD_FLAG_NEXT);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE ECC error sts\n", ret);
return ret;
@@ -2235,7 +2235,7 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
}
/* clear error status */
- hclge_cmd_reuse_desc(&desc[0], false);
+ hclge_comm_cmd_reuse_desc(&desc[0], false);
ret = hclge_cmd_send(&hdev->hw, &desc[0], 1);
if (ret) {
dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret);
@@ -2405,7 +2405,8 @@ static int hclge_clear_hw_msix_error(struct hclge_dev *hdev,
else
desc[0].opcode = cpu_to_le16(HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT);
- desc[0].flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
+ desc[0].flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR |
+ HCLGE_COMM_CMD_FLAG_IN);
return hclge_cmd_send(&hdev->hw, &desc[0], bd_num);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index c2a58101144e..24f7afacae02 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -24,6 +24,7 @@
#include "hclge_err.h"
#include "hnae3.h"
#include "hclge_devlink.h"
+#include "hclge_comm_cmd.h"
#define HCLGE_NAME "hclge"
@@ -90,20 +91,20 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
-static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG,
- HCLGE_NIC_CSQ_BASEADDR_H_REG,
- HCLGE_NIC_CSQ_DEPTH_REG,
- HCLGE_NIC_CSQ_TAIL_REG,
- HCLGE_NIC_CSQ_HEAD_REG,
- HCLGE_NIC_CRQ_BASEADDR_L_REG,
- HCLGE_NIC_CRQ_BASEADDR_H_REG,
- HCLGE_NIC_CRQ_DEPTH_REG,
- HCLGE_NIC_CRQ_TAIL_REG,
- HCLGE_NIC_CRQ_HEAD_REG,
- HCLGE_VECTOR0_CMDQ_SRC_REG,
- HCLGE_CMDQ_INTR_STS_REG,
- HCLGE_CMDQ_INTR_EN_REG,
- HCLGE_CMDQ_INTR_GEN_REG};
+static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CSQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CSQ_TAIL_REG,
+ HCLGE_COMM_NIC_CSQ_HEAD_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CRQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CRQ_TAIL_REG,
+ HCLGE_COMM_NIC_CRQ_HEAD_REG,
+ HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
+ HCLGE_COMM_CMDQ_INTR_STS_REG,
+ HCLGE_COMM_CMDQ_INTR_EN_REG,
+ HCLGE_COMM_CMDQ_INTR_GEN_REG};
static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
HCLGE_PF_OTHER_INT_REG,
@@ -370,14 +371,6 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
},
};
-static const u8 hclge_hash_key[] = {
- 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
- 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
- 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
- 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
- 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
-};
-
static const u32 hclge_dfx_bd_offset_list[] = {
HCLGE_DFX_BIOS_BD_OFFSET,
HCLGE_DFX_SSU_0_BD_OFFSET,
@@ -478,6 +471,20 @@ static const struct key_info tuple_key_info[] = {
offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
};
+/**
+ * hclge_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ **/
+int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
+{
+ return hclge_comm_cmd_send(&hw->hw, desc, num);
+}
+
static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
@@ -604,111 +611,6 @@ int hclge_mac_update_stats(struct hclge_dev *hdev)
return hclge_mac_update_stats_defective(hdev);
}
-static int hclge_tqps_update_stats(struct hnae3_handle *handle)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- struct hnae3_queue *queue;
- struct hclge_desc desc[1];
- struct hclge_tqp *tqp;
- int ret, i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- queue = handle->kinfo.tqp[i];
- tqp = container_of(queue, struct hclge_tqp, q);
- /* command : HCLGE_OPC_QUERY_IGU_STAT */
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
- true);
-
- desc[0].data[0] = cpu_to_le32(tqp->index);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Query tqp stat fail, status = %d,queue = %d\n",
- ret, i);
- return ret;
- }
- tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
- le32_to_cpu(desc[0].data[1]);
- }
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- queue = handle->kinfo.tqp[i];
- tqp = container_of(queue, struct hclge_tqp, q);
- /* command : HCLGE_OPC_QUERY_IGU_STAT */
- hclge_cmd_setup_basic_desc(&desc[0],
- HCLGE_OPC_QUERY_TX_STATS,
- true);
-
- desc[0].data[0] = cpu_to_le32(tqp->index);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Query tqp stat fail, status = %d,queue = %d\n",
- ret, i);
- return ret;
- }
- tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
- le32_to_cpu(desc[0].data[1]);
- }
-
- return 0;
-}
-
-static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_tqp *tqp;
- u64 *buff = data;
- int i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
- *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
- }
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
- *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
- }
-
- return buff;
-}
-
-static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
-
- /* each tqp has TX & RX two queues */
- return kinfo->num_tqps * (2);
-}
-
-static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- u8 *buff = data;
- int i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
- struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
- tqp->index);
- buff = buff + ETH_GSTRING_LEN;
- }
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
- struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
- tqp->index);
- buff = buff + ETH_GSTRING_LEN;
- }
-
- return buff;
-}
-
static int hclge_comm_get_count(struct hclge_dev *hdev,
const struct hclge_comm_stats_str strs[],
u32 size)
@@ -769,7 +671,7 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev)
handle = &hdev->vport[0].nic;
if (handle->client) {
- status = hclge_tqps_update_stats(handle);
+ status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
if (status) {
dev_err(&hdev->pdev->dev,
"Update TQPS stats fail, status = %d.\n",
@@ -799,7 +701,7 @@ static void hclge_update_stats(struct hnae3_handle *handle,
"Update MAC stats fail, status = %d.\n",
status);
- status = hclge_tqps_update_stats(handle);
+ status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
if (status)
dev_err(&hdev->pdev->dev,
"Update TQPS stats fail, status = %d.\n",
@@ -848,7 +750,7 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
} else if (stringset == ETH_SS_STATS) {
count = hclge_comm_get_count(hdev, g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string)) +
- hclge_tqps_get_sset_count(handle, stringset);
+ hclge_comm_tqps_get_sset_count(handle);
}
return count;
@@ -866,7 +768,7 @@ static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
size = ARRAY_SIZE(g_mac_stats_string);
p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
size, p);
- p = hclge_tqps_get_strings(handle, p);
+ p = hclge_comm_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) {
if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
@@ -900,7 +802,7 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
p = hclge_comm_get_stats(hdev, g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string), data);
- p = hclge_tqps_get_stats(handle, p);
+ p = hclge_comm_tqps_get_stats(handle, p);
}
static void hclge_get_mac_stat(struct hnae3_handle *handle,
@@ -1480,7 +1382,7 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
- ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
+ ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
@@ -1520,7 +1422,7 @@ static void hclge_check_dev_specs(struct hclge_dev *hdev)
if (!dev_specs->rss_ind_tbl_size)
dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
if (!dev_specs->rss_key_size)
- dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
+ dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
if (!dev_specs->max_tm_rate)
dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
if (!dev_specs->max_qset_num)
@@ -1567,7 +1469,7 @@ static int hclge_query_dev_specs(struct hclge_dev *hdev)
for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
true);
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
}
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
@@ -1613,12 +1515,39 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
}
+static void hclge_init_tc_config(struct hclge_dev *hdev)
+{
+ unsigned int i;
+
+ if (hdev->tc_max > HNAE3_MAX_TC ||
+ hdev->tc_max < 1) {
+ dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
+ hdev->tc_max);
+ hdev->tc_max = 1;
+ }
+
+ /* Dev does not support DCB */
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ hdev->tc_max = 1;
+ hdev->pfc_max = 0;
+ } else {
+ hdev->pfc_max = hdev->tc_max;
+ }
+
+ hdev->tm_info.num_tc = 1;
+
+ /* Currently not support uncontiuous tc */
+ for (i = 0; i < hdev->tm_info.num_tc; i++)
+ hnae3_set_bit(hdev->hw_tc_map, i, 1);
+
+ hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
+}
+
static int hclge_configure(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
const struct cpumask *cpumask = cpu_online_mask;
struct hclge_cfg cfg;
- unsigned int i;
int node, ret;
ret = hclge_get_cfg(hdev, &cfg);
@@ -1662,29 +1591,7 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
- if ((hdev->tc_max > HNAE3_MAX_TC) ||
- (hdev->tc_max < 1)) {
- dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
- hdev->tc_max);
- hdev->tc_max = 1;
- }
-
- /* Dev does not support DCB */
- if (!hnae3_dev_dcb_supported(hdev)) {
- hdev->tc_max = 1;
- hdev->pfc_max = 0;
- } else {
- hdev->pfc_max = hdev->tc_max;
- }
-
- hdev->tm_info.num_tc = 1;
-
- /* Currently not support uncontiuous tc */
- for (i = 0; i < hdev->tm_info.num_tc; i++)
- hnae3_set_bit(hdev->hw_tc_map, i, 1);
-
- hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
-
+ hclge_init_tc_config(hdev);
hclge_init_kdump_kernel_config(hdev);
/* Set the affinity based on numa node */
@@ -1736,11 +1643,11 @@ static int hclge_config_gro(struct hclge_dev *hdev)
static int hclge_alloc_tqps(struct hclge_dev *hdev)
{
- struct hclge_tqp *tqp;
+ struct hclge_comm_tqp *tqp;
int i;
hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
- sizeof(struct hclge_tqp), GFP_KERNEL);
+ sizeof(struct hclge_comm_tqp), GFP_KERNEL);
if (!hdev->htqp)
return -ENOMEM;
@@ -1759,11 +1666,11 @@ static int hclge_alloc_tqps(struct hclge_dev *hdev)
* HCLGE_TQP_MAX_SIZE_DEV_V2
*/
if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
- tqp->q.io_base = hdev->hw.io_base +
+ tqp->q.io_base = hdev->hw.hw.io_base +
HCLGE_TQP_REG_OFFSET +
i * HCLGE_TQP_REG_SIZE;
else
- tqp->q.io_base = hdev->hw.io_base +
+ tqp->q.io_base = hdev->hw.hw.io_base +
HCLGE_TQP_REG_OFFSET +
HCLGE_TQP_EXT_REG_OFFSET +
(i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
@@ -1864,8 +1771,8 @@ static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
kinfo = &nic->kinfo;
for (i = 0; i < vport->alloc_tqps; i++) {
- struct hclge_tqp *q =
- container_of(kinfo->tqp[i], struct hclge_tqp, q);
+ struct hclge_comm_tqp *q =
+ container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
bool is_pf;
int ret;
@@ -1885,7 +1792,7 @@ static int hclge_map_tqp(struct hclge_dev *hdev)
u16 i, num_vport;
num_vport = hdev->num_req_vfs + 1;
- for (i = 0; i < num_vport; i++) {
+ for (i = 0; i < num_vport; i++) {
int ret;
ret = hclge_map_tqp_to_vport(hdev, vport);
@@ -1907,7 +1814,7 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
nic->pdev = hdev->pdev;
nic->ae_algo = &ae_algo;
nic->numa_node_mask = hdev->numa_node_mask;
- nic->kinfo.io_base = hdev->hw.io_base;
+ nic->kinfo.io_base = hdev->hw.hw.io_base;
ret = hclge_knic_setup(vport, num_tqps,
hdev->num_tx_desc, hdev->num_rx_desc);
@@ -2416,9 +2323,9 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
/* The first descriptor set the NEXT bit to 1 */
if (i == 0)
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
else
- desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
@@ -2461,9 +2368,9 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev,
/* The first descriptor set the NEXT bit to 1 */
if (i == 0)
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
else
- desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
@@ -2592,8 +2499,8 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport)
roce->rinfo.base_vector = hdev->num_nic_msi;
roce->rinfo.netdev = nic->kinfo.netdev;
- roce->rinfo.roce_io_base = hdev->hw.io_base;
- roce->rinfo.roce_mem_base = hdev->hw.mem_base;
+ roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
+ roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
roce->pdev = nic->pdev;
roce->ae_algo = nic->ae_algo;
@@ -2653,11 +2560,38 @@ static u8 hclge_check_speed_dup(u8 duplex, int speed)
return duplex;
}
+static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = {
+ {HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M},
+ {HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M},
+ {HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G},
+ {HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G},
+ {HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G},
+ {HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G},
+ {HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G},
+ {HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G},
+ {HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G},
+};
+
+static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw)
+{
+ u16 i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) {
+ if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) {
+ *speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
u8 duplex)
{
struct hclge_config_mac_speed_dup_cmd *req;
struct hclge_desc desc;
+ u32 speed_fw;
int ret;
req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
@@ -2667,48 +2601,14 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
if (duplex)
hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
- switch (speed) {
- case HCLGE_MAC_SPEED_10M:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M);
- break;
- case HCLGE_MAC_SPEED_100M:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M);
- break;
- case HCLGE_MAC_SPEED_1G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G);
- break;
- case HCLGE_MAC_SPEED_10G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G);
- break;
- case HCLGE_MAC_SPEED_25G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G);
- break;
- case HCLGE_MAC_SPEED_40G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G);
- break;
- case HCLGE_MAC_SPEED_50G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G);
- break;
- case HCLGE_MAC_SPEED_100G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G);
- break;
- case HCLGE_MAC_SPEED_200G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G);
- break;
- default:
+ ret = hclge_convert_to_fw_speed(speed, &speed_fw);
+ if (ret) {
dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
- return -EINVAL;
+ return ret;
}
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S,
+ speed_fw);
hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1);
@@ -2933,16 +2833,20 @@ static int hclge_mac_init(struct hclge_dev *hdev)
static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
- !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) {
+ hdev->last_mbx_scheduled = jiffies;
mod_delayed_work(hclge_wq, &hdev->service_task, 0);
+ }
}
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
- !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
+ !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) {
+ hdev->last_rst_scheduled = jiffies;
mod_delayed_work(hclge_wq, &hdev->service_task, 0);
+ }
}
static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
@@ -3237,7 +3141,7 @@ static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
true);
@@ -3294,7 +3198,7 @@ hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
false);
@@ -3501,7 +3405,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
hdev->rst_stats.imp_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
@@ -3509,7 +3413,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
hdev->rst_stats.global_rst_cnt++;
@@ -3643,7 +3547,7 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
- vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
+ vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
hdev->vector_status[0] = 0;
hdev->num_msi_left -= 1;
@@ -3827,10 +3731,17 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
static void hclge_mailbox_service_task(struct hclge_dev *hdev)
{
if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
- test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
+ test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) ||
test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
return;
+ if (time_is_before_jiffies(hdev->last_mbx_scheduled +
+ HCLGE_MBX_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "mbx service task is scheduled after %ums on cpu%u!\n",
+ jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled),
+ smp_processor_id());
+
hclge_mbx_handler(hdev);
clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
@@ -3865,7 +3776,7 @@ static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
return;
}
msleep(HCLGE_PF_RESET_SYNC_TIME);
- hclge_cmd_reuse_desc(&desc, true);
+ hclge_comm_cmd_reuse_desc(&desc, true);
} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
@@ -4022,13 +3933,13 @@ static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
{
u32 reg_val;
- reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
if (enable)
- reg_val |= HCLGE_NIC_SW_RST_RDY;
+ reg_val |= HCLGE_COMM_NIC_SW_RST_RDY;
else
- reg_val &= ~HCLGE_NIC_SW_RST_RDY;
+ reg_val &= ~HCLGE_COMM_NIC_SW_RST_RDY;
- hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
+ hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val);
}
static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
@@ -4065,9 +3976,9 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
/* After performaning pf reset, it is not necessary to do the
* mailbox handling or send any command to firmware, because
* any mailbox handling or command to firmware is only valid
- * after hclge_cmd_init is called.
+ * after hclge_comm_cmd_init is called.
*/
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
hdev->rst_stats.pf_rst_cnt++;
break;
case HNAE3_FLR_RESET:
@@ -4480,6 +4391,13 @@ static void hclge_reset_service_task(struct hclge_dev *hdev)
if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
return;
+ if (time_is_before_jiffies(hdev->last_rst_scheduled +
+ HCLGE_RESET_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "reset service task is scheduled after %ums on cpu%u!\n",
+ jiffies_to_msecs(jiffies - hdev->last_rst_scheduled),
+ smp_processor_id());
+
down(&hdev->reset_sem);
set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
@@ -4614,11 +4532,11 @@ static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
/* need an extend offset to config vector >= 64 */
if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
- vector_info->io_addr = hdev->hw.io_base +
+ vector_info->io_addr = hdev->hw.hw.io_base +
HCLGE_VECTOR_REG_BASE +
(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
else
- vector_info->io_addr = hdev->hw.io_base +
+ vector_info->io_addr = hdev->hw.hw.io_base +
HCLGE_VECTOR_EXT_REG_BASE +
(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
HCLGE_VECTOR_REG_OFFSET_H +
@@ -4688,334 +4606,43 @@ static int hclge_put_vector(struct hnae3_handle *handle, int vector)
return 0;
}
-static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
-{
- return HCLGE_RSS_KEY_SIZE;
-}
-
-static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
- const u8 hfunc, const u8 *key)
-{
- struct hclge_rss_config_cmd *req;
- unsigned int key_offset = 0;
- struct hclge_desc desc;
- int key_counts;
- int key_size;
- int ret;
-
- key_counts = HCLGE_RSS_KEY_SIZE;
- req = (struct hclge_rss_config_cmd *)desc.data;
-
- while (key_counts) {
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
- false);
-
- req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
- req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
-
- key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
- memcpy(req->hash_key,
- key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
-
- key_counts -= key_size;
- key_offset++;
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Configure RSS config fail, status = %d\n",
- ret);
- return ret;
- }
- }
- return 0;
-}
-
-static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
-{
- struct hclge_rss_indirection_table_cmd *req;
- struct hclge_desc desc;
- int rss_cfg_tbl_num;
- u8 rss_msb_oft;
- u8 rss_msb_val;
- int ret;
- u16 qid;
- int i;
- u32 j;
-
- req = (struct hclge_rss_indirection_table_cmd *)desc.data;
- rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
- HCLGE_RSS_CFG_TBL_SIZE;
-
- for (i = 0; i < rss_cfg_tbl_num; i++) {
- hclge_cmd_setup_basic_desc
- (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
-
- req->start_table_index =
- cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
- req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
- for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
- qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
- req->rss_qid_l[j] = qid & 0xff;
- rss_msb_oft =
- j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
- rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
- (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
- req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
- }
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Configure rss indir table fail,status = %d\n",
- ret);
- return ret;
- }
- }
- return 0;
-}
-
-static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
- u16 *tc_size, u16 *tc_offset)
-{
- struct hclge_rss_tc_mode_cmd *req;
- struct hclge_desc desc;
- int ret;
- int i;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
- req = (struct hclge_rss_tc_mode_cmd *)desc.data;
-
- for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- u16 mode = 0;
-
- hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
- hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
- HCLGE_RSS_TC_SIZE_S, tc_size[i]);
- hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
- tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
- hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
- HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
-
- req->rss_tc_mode[i] = cpu_to_le16(mode);
- }
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Configure rss tc mode fail, status = %d\n", ret);
-
- return ret;
-}
-
-static void hclge_get_rss_type(struct hclge_vport *vport)
-{
- if (vport->rss_tuple_sets.ipv4_tcp_en ||
- vport->rss_tuple_sets.ipv4_udp_en ||
- vport->rss_tuple_sets.ipv4_sctp_en ||
- vport->rss_tuple_sets.ipv6_tcp_en ||
- vport->rss_tuple_sets.ipv6_udp_en ||
- vport->rss_tuple_sets.ipv6_sctp_en)
- vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
- else if (vport->rss_tuple_sets.ipv4_fragment_en ||
- vport->rss_tuple_sets.ipv6_fragment_en)
- vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
- else
- vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
-}
-
-static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
-{
- struct hclge_rss_input_tuple_cmd *req;
- struct hclge_desc desc;
- int ret;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
-
- req = (struct hclge_rss_input_tuple_cmd *)desc.data;
-
- /* Get the tuple cfg from pf */
- req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
- req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
- req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
- req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
- req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
- req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
- req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
- req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
- hclge_get_rss_type(&hdev->vport[0]);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Configure rss input fail, status = %d\n", ret);
- return ret;
-}
-
static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
u8 *key, u8 *hfunc)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
- int i;
+ struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg;
- /* Get hash algorithm */
- if (hfunc) {
- switch (vport->rss_algo) {
- case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
- *hfunc = ETH_RSS_HASH_TOP;
- break;
- case HCLGE_RSS_HASH_ALGO_SIMPLE:
- *hfunc = ETH_RSS_HASH_XOR;
- break;
- default:
- *hfunc = ETH_RSS_HASH_UNKNOWN;
- break;
- }
- }
-
- /* Get the RSS Key required by the user */
- if (key)
- memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
+ hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
- /* Get indirect table */
- if (indir)
- for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
- indir[i] = vport->rss_indirection_tbl[i];
+ hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
+ ae_dev->dev_specs.rss_ind_tbl_size);
return 0;
}
-static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
- u8 *hash_algo)
-{
- switch (hfunc) {
- case ETH_RSS_HASH_TOP:
- *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
- return 0;
- case ETH_RSS_HASH_XOR:
- *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
- return 0;
- case ETH_RSS_HASH_NO_CHANGE:
- *hash_algo = vport->rss_algo;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- u8 hash_algo;
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
int ret, i;
- ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
+ ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc);
if (ret) {
dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
return ret;
}
- /* Set the RSS Hash Key if specififed by the user */
- if (key) {
- ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
- if (ret)
- return ret;
-
- /* Update the shadow RSS key with user specified qids */
- memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
- } else {
- ret = hclge_set_rss_algo_key(hdev, hash_algo,
- vport->rss_hash_key);
- if (ret)
- return ret;
- }
- vport->rss_algo = hash_algo;
-
/* Update the shadow RSS table with user specified qids */
for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
- vport->rss_indirection_tbl[i] = indir[i];
+ rss_cfg->rss_indirection_tbl[i] = indir[i];
/* Update the hardware */
- return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
-}
-
-static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
-{
- u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
-
- if (nfc->data & RXH_L4_B_2_3)
- hash_sets |= HCLGE_D_PORT_BIT;
- else
- hash_sets &= ~HCLGE_D_PORT_BIT;
-
- if (nfc->data & RXH_IP_SRC)
- hash_sets |= HCLGE_S_IP_BIT;
- else
- hash_sets &= ~HCLGE_S_IP_BIT;
-
- if (nfc->data & RXH_IP_DST)
- hash_sets |= HCLGE_D_IP_BIT;
- else
- hash_sets &= ~HCLGE_D_IP_BIT;
-
- if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
- hash_sets |= HCLGE_V_TAG_BIT;
-
- return hash_sets;
-}
-
-static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
- struct ethtool_rxnfc *nfc,
- struct hclge_rss_input_tuple_cmd *req)
-{
- struct hclge_dev *hdev = vport->back;
- u8 tuple_sets;
-
- req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
- req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
- req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
- req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
- req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
- req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
- req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
- req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
-
- tuple_sets = hclge_get_rss_hash_bits(nfc);
- switch (nfc->flow_type) {
- case TCP_V4_FLOW:
- req->ipv4_tcp_en = tuple_sets;
- break;
- case TCP_V6_FLOW:
- req->ipv6_tcp_en = tuple_sets;
- break;
- case UDP_V4_FLOW:
- req->ipv4_udp_en = tuple_sets;
- break;
- case UDP_V6_FLOW:
- req->ipv6_udp_en = tuple_sets;
- break;
- case SCTP_V4_FLOW:
- req->ipv4_sctp_en = tuple_sets;
- break;
- case SCTP_V6_FLOW:
- if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
- (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
- return -EINVAL;
-
- req->ipv6_sctp_en = tuple_sets;
- break;
- case IPV4_FLOW:
- req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- break;
- case IPV6_FLOW:
- req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
+ return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw,
+ rss_cfg->rss_indirection_tbl);
}
static int hclge_set_rss_tuple(struct hnae3_handle *handle,
@@ -5023,92 +4650,20 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hclge_rss_input_tuple_cmd *req;
- struct hclge_desc desc;
int ret;
- if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- req = (struct hclge_rss_input_tuple_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
-
- ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to init rss tuple cmd, ret = %d\n", ret);
- return ret;
- }
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
+ &hdev->rss_cfg, nfc);
if (ret) {
dev_err(&hdev->pdev->dev,
- "Set rss tuple fail, status = %d\n", ret);
+ "failed to set rss tuple, ret = %d.\n", ret);
return ret;
}
- vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
- vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
- vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
- vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
- vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
- vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
- vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
- vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
- hclge_get_rss_type(vport);
- return 0;
-}
-
-static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
- u8 *tuple_sets)
-{
- switch (flow_type) {
- case TCP_V4_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
- break;
- case UDP_V4_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
- break;
- case TCP_V6_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
- break;
- case UDP_V6_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
- break;
- case SCTP_V4_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
- break;
- case SCTP_V6_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
- break;
- case IPV4_FLOW:
- case IPV6_FLOW:
- *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
- break;
- default:
- return -EINVAL;
- }
-
+ hclge_comm_get_rss_type(&vport->nic, &hdev->rss_cfg.rss_tuple_sets);
return 0;
}
-static u64 hclge_convert_rss_tuple(u8 tuple_sets)
-{
- u64 tuple_data = 0;
-
- if (tuple_sets & HCLGE_D_PORT_BIT)
- tuple_data |= RXH_L4_B_2_3;
- if (tuple_sets & HCLGE_S_PORT_BIT)
- tuple_data |= RXH_L4_B_0_1;
- if (tuple_sets & HCLGE_D_IP_BIT)
- tuple_data |= RXH_IP_DST;
- if (tuple_sets & HCLGE_S_IP_BIT)
- tuple_data |= RXH_IP_SRC;
-
- return tuple_data;
-}
-
static int hclge_get_rss_tuple(struct hnae3_handle *handle,
struct ethtool_rxnfc *nfc)
{
@@ -5118,11 +4673,12 @@ static int hclge_get_rss_tuple(struct hnae3_handle *handle,
nfc->data = 0;
- ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
+ ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type,
+ &tuple_sets);
if (ret || !tuple_sets)
return ret;
- nfc->data = hclge_convert_rss_tuple(tuple_sets);
+ nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
return 0;
}
@@ -5175,78 +4731,35 @@ static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
tc_offset[i] = tc_info->tqp_offset[i];
}
- return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+ return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
+ tc_size);
}
int hclge_rss_init_hw(struct hclge_dev *hdev)
{
- struct hclge_vport *vport = hdev->vport;
- u16 *rss_indir = vport[0].rss_indirection_tbl;
- u8 *key = vport[0].rss_hash_key;
- u8 hfunc = vport[0].rss_algo;
+ u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl;
+ u8 *key = hdev->rss_cfg.rss_hash_key;
+ u8 hfunc = hdev->rss_cfg.rss_algo;
int ret;
- ret = hclge_set_rss_indir_table(hdev, rss_indir);
+ ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
+ rss_indir);
if (ret)
return ret;
- ret = hclge_set_rss_algo_key(hdev, hfunc, key);
+ ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key);
if (ret)
return ret;
- ret = hclge_set_rss_input_tuple(hdev);
+ ret = hclge_comm_set_rss_input_tuple(&hdev->vport[0].nic,
+ &hdev->hw.hw, true,
+ &hdev->rss_cfg);
if (ret)
return ret;
return hclge_init_rss_tc_mode(hdev);
}
-void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
-{
- struct hclge_vport *vport = &hdev->vport[0];
- int i;
-
- for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
- vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
-}
-
-static int hclge_rss_init_cfg(struct hclge_dev *hdev)
-{
- u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
- int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
- struct hclge_vport *vport = &hdev->vport[0];
- u16 *rss_ind_tbl;
-
- if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
- rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
-
- vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
- vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport->rss_tuple_sets.ipv6_sctp_en =
- hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
- HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
- HCLGE_RSS_INPUT_TUPLE_SCTP;
- vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
-
- vport->rss_algo = rss_algo;
-
- rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
- sizeof(*rss_ind_tbl), GFP_KERNEL);
- if (!rss_ind_tbl)
- return -ENOMEM;
-
- vport->rss_indirection_tbl = rss_ind_tbl;
- memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
-
- hclge_rss_indir_init_cfg(hdev);
-
- return 0;
-}
-
int hclge_bind_ring_with_vector(struct hclge_vport *vport,
int vector_id, bool en,
struct hnae3_ring_chain_node *ring_chain)
@@ -5256,7 +4769,7 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
struct hclge_desc desc;
struct hclge_ctrl_vector_chain_cmd *req =
(struct hclge_ctrl_vector_chain_cmd *)desc.data;
- enum hclge_cmd_status status;
+ enum hclge_comm_cmd_status status;
enum hclge_opcode_type op;
u16 tqp_type_and_id;
int i;
@@ -5886,9 +5399,9 @@ static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
int ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
@@ -6790,7 +6303,7 @@ static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
if (vf > hdev->num_req_vfs) {
dev_err(&hdev->pdev->dev,
"Error: vf id (%u) should be less than %u\n",
- vf - 1, hdev->num_req_vfs);
+ vf - 1U, hdev->num_req_vfs);
return -EINVAL;
}
@@ -6800,7 +6313,7 @@ static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
if (ring >= tqps) {
dev_err(&hdev->pdev->dev,
"Error: queue id (%u) > max tqp num (%u)\n",
- ring, tqps - 1);
+ ring, tqps - 1U);
return -EINVAL;
}
@@ -7161,6 +6674,37 @@ static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
}
}
+static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev,
+ u16 location)
+{
+ struct hclge_fd_rule *rule = NULL;
+ struct hlist_node *node2;
+
+ hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
+ if (rule->location == location)
+ return rule;
+ else if (rule->location > location)
+ return NULL;
+ }
+
+ return NULL;
+}
+
+static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
+ fs->ring_cookie = RX_CLS_FLOW_DISC;
+ } else {
+ u64 vf_id;
+
+ fs->ring_cookie = rule->queue_id;
+ vf_id = rule->vf_id;
+ vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ fs->ring_cookie |= vf_id;
+ }
+}
+
static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd)
{
@@ -7168,7 +6712,6 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct hclge_fd_rule *rule = NULL;
struct hclge_dev *hdev = vport->back;
struct ethtool_rx_flow_spec *fs;
- struct hlist_node *node2;
if (!hnae3_dev_fd_supported(hdev))
return -EOPNOTSUPP;
@@ -7177,14 +6720,9 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
spin_lock_bh(&hdev->fd_rule_lock);
- hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
- if (rule->location >= fs->location)
- break;
- }
-
- if (!rule || fs->location != rule->location) {
+ rule = hclge_get_fd_rule(hdev, fs->location);
+ if (!rule) {
spin_unlock_bh(&hdev->fd_rule_lock);
-
return -ENOENT;
}
@@ -7222,16 +6760,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
hclge_fd_get_ext_info(fs, rule);
- if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
- fs->ring_cookie = RX_CLS_FLOW_DISC;
- } else {
- u64 vf_id;
-
- fs->ring_cookie = rule->queue_id;
- vf_id = rule->vf_id;
- vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
- fs->ring_cookie |= vf_id;
- }
+ hclge_fd_get_ring_cookie(fs, rule);
spin_unlock_bh(&hdev->fd_rule_lock);
@@ -7776,7 +7305,7 @@ static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
}
static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
@@ -7866,7 +7395,7 @@ static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
}
/* modify and write new config parameter */
- hclge_cmd_reuse_desc(&desc, false);
+ hclge_comm_cmd_reuse_desc(&desc, false);
req->switch_param = (req->switch_param & param_mask) | switch_param;
req->param_mask = param_mask;
@@ -7960,7 +7489,7 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
/* 3 Config mac work mode with loopback flag
* and its original configure parameters
*/
- hclge_cmd_reuse_desc(&desc, false);
+ hclge_comm_cmd_reuse_desc(&desc, false);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
@@ -7968,16 +7497,13 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
return ret;
}
-static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
- enum hnae3_loop loop_mode)
+static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en,
+ enum hnae3_loop loop_mode)
{
-#define HCLGE_COMMON_LB_RETRY_MS 10
-#define HCLGE_COMMON_LB_RETRY_NUM 100
-
struct hclge_common_lb_cmd *req;
struct hclge_desc desc;
- int ret, i = 0;
u8 loop_mode_b;
+ int ret;
req = (struct hclge_common_lb_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
@@ -7994,23 +7520,34 @@ static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
break;
default:
dev_err(&hdev->pdev->dev,
- "unsupported common loopback mode %d\n", loop_mode);
+ "unsupported loopback mode %d\n", loop_mode);
return -ENOTSUPP;
}
- if (en) {
+ req->mask = loop_mode_b;
+ if (en)
req->enable = loop_mode_b;
- req->mask = loop_mode_b;
- } else {
- req->mask = loop_mode_b;
- }
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
- "common loopback set fail, ret = %d\n", ret);
- return ret;
- }
+ "failed to send loopback cmd, loop_mode = %d, ret = %d\n",
+ loop_mode, ret);
+
+ return ret;
+}
+
+static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev)
+{
+#define HCLGE_COMMON_LB_RETRY_MS 10
+#define HCLGE_COMMON_LB_RETRY_NUM 100
+
+ struct hclge_common_lb_cmd *req;
+ struct hclge_desc desc;
+ u32 i = 0;
+ int ret;
+
+ req = (struct hclge_common_lb_cmd *)desc.data;
do {
msleep(HCLGE_COMMON_LB_RETRY_MS);
@@ -8019,20 +7556,34 @@ static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "common loopback get, ret = %d\n", ret);
+ "failed to get loopback done status, ret = %d\n",
+ ret);
return ret;
}
} while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
!(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
- dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
+ dev_err(&hdev->pdev->dev, "wait loopback timeout\n");
return -EBUSY;
} else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
- dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
+ dev_err(&hdev->pdev->dev, "failed to do loopback test\n");
return -EIO;
}
- return ret;
+
+ return 0;
+}
+
+static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
+ enum hnae3_loop loop_mode)
+{
+ int ret;
+
+ ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode);
+ if (ret)
+ return ret;
+
+ return hclge_cfg_common_loopback_wait(hdev);
}
static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
@@ -8213,22 +7764,6 @@ static int hclge_set_default_loopback(struct hclge_dev *hdev)
HNAE3_LOOP_PARALLEL_SERDES);
}
-static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
-{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hnae3_knic_private_info *kinfo;
- struct hnae3_queue *queue;
- struct hclge_tqp *tqp;
- int i;
-
- kinfo = &vport->nic.kinfo;
- for (i = 0; i < kinfo->num_tqps; i++) {
- queue = handle->kinfo.tqp[i];
- tqp = container_of(queue, struct hclge_tqp, q);
- memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
- }
-}
-
static void hclge_flush_link_update(struct hclge_dev *hdev)
{
#define HCLGE_FLUSH_LINK_TIMEOUT 100000
@@ -8270,7 +7805,7 @@ static int hclge_ae_start(struct hnae3_handle *handle)
hdev->hw.mac.link = 0;
/* reset tqp stats */
- hclge_reset_tqp_stats(handle);
+ hclge_comm_reset_tqp_stats(handle);
hclge_mac_start_phy(hdev);
@@ -8308,7 +7843,7 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
hclge_mac_stop_phy(hdev);
/* reset tqp stats */
- hclge_reset_tqp_stats(handle);
+ hclge_comm_reset_tqp_stats(handle);
hclge_update_link_status(hdev);
}
@@ -8511,14 +8046,14 @@ static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
if (is_mc) {
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
memcpy(desc[0].data,
req,
sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
hclge_cmd_setup_basic_desc(&desc[1],
HCLGE_OPC_MAC_VLAN_ADD,
true);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[2],
HCLGE_OPC_MAC_VLAN_ADD,
true);
@@ -8568,12 +8103,12 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
resp_code,
HCLGE_MAC_VLAN_ADD);
} else {
- hclge_cmd_reuse_desc(&mc_desc[0], false);
- mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_reuse_desc(&mc_desc[1], false);
- mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_reuse_desc(&mc_desc[2], false);
- mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
+ hclge_comm_cmd_reuse_desc(&mc_desc[0], false);
+ mc_desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_comm_cmd_reuse_desc(&mc_desc[1], false);
+ mc_desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_comm_cmd_reuse_desc(&mc_desc[2], false);
+ mc_desc[2].flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT);
memcpy(mc_desc[0].data, req,
sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
@@ -8743,6 +8278,7 @@ int hclge_update_mac_list(struct hclge_vport *vport,
enum HCLGE_MAC_ADDR_TYPE mac_type,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_node *mac_node;
struct list_head *list;
@@ -8767,9 +8303,10 @@ int hclge_update_mac_list(struct hclge_vport *vport,
/* if this address is never added, unnecessary to delete */
if (state == HCLGE_MAC_TO_DEL) {
spin_unlock_bh(&vport->mac_list_lock);
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_err(&hdev->pdev->dev,
- "failed to delete address %pM from mac list\n",
- addr);
+ "failed to delete address %s from mac list\n",
+ format_mac_addr);
return -ENOENT;
}
@@ -8802,6 +8339,7 @@ static int hclge_add_uc_addr(struct hnae3_handle *handle,
int hclge_add_uc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc;
@@ -8812,9 +8350,10 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
if (is_zero_ether_addr(addr) ||
is_broadcast_ether_addr(addr) ||
is_multicast_ether_addr(addr)) {
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_err(&hdev->pdev->dev,
- "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
- addr, is_zero_ether_addr(addr),
+ "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n",
+ format_mac_addr, is_zero_ether_addr(addr),
is_broadcast_ether_addr(addr),
is_multicast_ether_addr(addr));
return -EINVAL;
@@ -8871,6 +8410,7 @@ static int hclge_rm_uc_addr(struct hnae3_handle *handle,
int hclge_rm_uc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
int ret;
@@ -8879,8 +8419,9 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
if (is_zero_ether_addr(addr) ||
is_broadcast_ether_addr(addr) ||
is_multicast_ether_addr(addr)) {
- dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
- addr);
+ hnae3_format_mac_addr(format_mac_addr, addr);
+ dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n",
+ format_mac_addr);
return -EINVAL;
}
@@ -8911,6 +8452,7 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle,
int hclge_add_mc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc[3];
@@ -8919,9 +8461,10 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
/* mac addr check */
if (!is_multicast_ether_addr(addr)) {
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_err(&hdev->pdev->dev,
- "Add mc mac err! invalid mac:%pM.\n",
- addr);
+ "Add mc mac err! invalid mac:%s.\n",
+ format_mac_addr);
return -EINVAL;
}
memset(&req, 0, sizeof(req));
@@ -8973,16 +8516,18 @@ static int hclge_rm_mc_addr(struct hnae3_handle *handle,
int hclge_rm_mc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
- enum hclge_cmd_status status;
+ enum hclge_comm_cmd_status status;
struct hclge_desc desc[3];
/* mac addr check */
if (!is_multicast_ether_addr(addr)) {
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_dbg(&hdev->pdev->dev,
- "Remove mc mac err! invalid mac:%pM.\n",
- addr);
+ "Remove mc mac err! invalid mac:%s.\n",
+ format_mac_addr);
return -EINVAL;
}
@@ -9422,16 +8967,18 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
u8 *mac_addr)
{
struct hclge_vport *vport = hclge_get_vport(handle);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
vport = hclge_get_vf_vport(hdev, vf);
if (!vport)
return -EINVAL;
+ hnae3_format_mac_addr(format_mac_addr, mac_addr);
if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
dev_info(&hdev->pdev->dev,
- "Specified MAC(=%pM) is same as before, no change committed!\n",
- mac_addr);
+ "Specified MAC(=%s) is same as before, no change committed!\n",
+ format_mac_addr);
return 0;
}
@@ -9439,13 +8986,13 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
dev_info(&hdev->pdev->dev,
- "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
- vf, mac_addr);
+ "MAC of VF %d has been set to %s, and it will be reinitialized!\n",
+ vf, format_mac_addr);
return hclge_inform_reset_assert_to_vf(vport);
}
- dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
- vf, mac_addr);
+ dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s\n",
+ vf, format_mac_addr);
return 0;
}
@@ -9549,6 +9096,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
{
const unsigned char *new_addr = (const unsigned char *)p;
struct hclge_vport *vport = hclge_get_vport(handle);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
unsigned char *old_addr = NULL;
int ret;
@@ -9557,9 +9105,10 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
if (is_zero_ether_addr(new_addr) ||
is_broadcast_ether_addr(new_addr) ||
is_multicast_ether_addr(new_addr)) {
+ hnae3_format_mac_addr(format_mac_addr, new_addr);
dev_err(&hdev->pdev->dev,
- "change uc mac err! invalid mac: %pM.\n",
- new_addr);
+ "change uc mac err! invalid mac: %s.\n",
+ format_mac_addr);
return -EINVAL;
}
@@ -9577,9 +9126,10 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
spin_lock_bh(&vport->mac_list_lock);
ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
if (ret) {
+ hnae3_format_mac_addr(format_mac_addr, new_addr);
dev_err(&hdev->pdev->dev,
- "failed to change the mac addr:%pM, ret = %d\n",
- new_addr, ret);
+ "failed to change the mac addr:%s, ret = %d\n",
+ format_mac_addr, ret);
spin_unlock_bh(&vport->mac_list_lock);
if (!is_first)
@@ -9677,20 +9227,20 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to get vlan filter config, ret = %d.\n", ret);
+ dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n",
+ vf_id, ret);
return ret;
}
/* modify and write new config parameter */
- hclge_cmd_reuse_desc(&desc, false);
+ hclge_comm_cmd_reuse_desc(&desc, false);
req->vlan_fe = filter_en ?
(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
- ret);
+ dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n",
+ vf_id, ret);
return ret;
}
@@ -9809,7 +9359,7 @@ static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
hclge_cmd_setup_basic_desc(&desc[1],
HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
vf_byte_off = vfid / 8;
vf_byte_val = 1 << (vfid % 8);
@@ -9936,6 +9486,32 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
return ret;
}
+static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id,
+ u16 vlan_id, bool is_kill)
+{
+ /* vlan 0 may be added twice when 8021q module is enabled */
+ if (!is_kill && !vlan_id &&
+ test_bit(vport_id, hdev->vlan_table[vlan_id]))
+ return false;
+
+ if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
+ dev_warn(&hdev->pdev->dev,
+ "Add port vlan failed, vport %u is already in vlan %u\n",
+ vport_id, vlan_id);
+ return false;
+ }
+
+ if (is_kill &&
+ !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
+ dev_warn(&hdev->pdev->dev,
+ "Delete port vlan failed, vport %u is not in vlan %u\n",
+ vport_id, vlan_id);
+ return false;
+ }
+
+ return true;
+}
+
static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
u16 vport_id, u16 vlan_id,
bool is_kill)
@@ -9957,26 +9533,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
return ret;
}
- /* vlan 0 may be added twice when 8021q module is enabled */
- if (!is_kill && !vlan_id &&
- test_bit(vport_id, hdev->vlan_table[vlan_id]))
+ if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill))
return 0;
- if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
- dev_err(&hdev->pdev->dev,
- "Add port vlan failed, vport %u is already in vlan %u\n",
- vport_id, vlan_id);
- return -EINVAL;
- }
-
- if (is_kill &&
- !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
- dev_err(&hdev->pdev->dev,
- "Delete port vlan failed, vport %u is not in vlan %u\n",
- vport_id, vlan_id);
- return -EINVAL;
- }
-
for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
vport_num++;
@@ -10168,67 +9727,80 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
return status;
}
-static int hclge_init_vlan_config(struct hclge_dev *hdev)
+static int hclge_init_vlan_filter(struct hclge_dev *hdev)
{
-#define HCLGE_DEF_VLAN_TYPE 0x8100
-
- struct hnae3_handle *handle = &hdev->vport[0].nic;
struct hclge_vport *vport;
int ret;
int i;
- if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
- /* for revision 0x21, vf vlan filter is per function */
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- vport = &hdev->vport[i];
- ret = hclge_set_vlan_filter_ctrl(hdev,
- HCLGE_FILTER_TYPE_VF,
- HCLGE_FILTER_FE_EGRESS,
- true,
- vport->vport_id);
- if (ret)
- return ret;
- vport->cur_vlan_fltr_en = true;
- }
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS_V1_B,
+ true, 0);
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
- HCLGE_FILTER_FE_INGRESS, true,
- 0);
- if (ret)
- return ret;
- } else {
+ /* for revision 0x21, vf vlan filter is per function */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
- HCLGE_FILTER_FE_EGRESS_V1_B,
- true, 0);
+ HCLGE_FILTER_FE_EGRESS, true,
+ vport->vport_id);
if (ret)
return ret;
+ vport->cur_vlan_fltr_en = true;
}
- hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+ HCLGE_FILTER_FE_INGRESS, true, 0);
+}
- ret = hclge_set_vlan_protocol_type(hdev);
- if (ret)
- return ret;
+static int hclge_init_vlan_type(struct hclge_dev *hdev)
+{
+ hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q;
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- u16 vlan_tag;
- u8 qos;
+ return hclge_set_vlan_protocol_type(hdev);
+}
+static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev)
+{
+ struct hclge_port_base_vlan_config *cfg;
+ struct hclge_vport *vport;
+ int ret;
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
- vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
- qos = vport->port_base_vlan_cfg.vlan_info.qos;
+ cfg = &vport->port_base_vlan_cfg;
- ret = hclge_vlan_offload_cfg(vport,
- vport->port_base_vlan_cfg.state,
- vlan_tag, qos);
+ ret = hclge_vlan_offload_cfg(vport, cfg->state,
+ cfg->vlan_info.vlan_tag,
+ cfg->vlan_info.qos);
if (ret)
return ret;
}
+ return 0;
+}
+
+static int hclge_init_vlan_config(struct hclge_dev *hdev)
+{
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ int ret;
+
+ ret = hclge_init_vlan_filter(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_init_vlan_type(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_init_vport_vlan_offload(hdev);
+ if (ret)
+ return ret;
return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
@@ -10485,12 +10057,41 @@ static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
return false;
}
+static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport,
+ struct hclge_vlan_info *new_info,
+ struct hclge_vlan_info *old_info)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ /* add new VLAN tag */
+ ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto),
+ vport->vport_id, new_info->vlan_tag,
+ false);
+ if (ret)
+ return ret;
+
+ /* remove old VLAN tag */
+ if (old_info->vlan_tag == 0)
+ ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
+ true, 0);
+ else
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ old_info->vlan_tag, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clear vport%u port base vlan %u, ret = %d.\n",
+ vport->vport_id, old_info->vlan_tag, ret);
+
+ return ret;
+}
+
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info)
{
struct hnae3_handle *nic = &vport->nic;
struct hclge_vlan_info *old_vlan_info;
- struct hclge_dev *hdev = vport->back;
int ret;
old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
@@ -10503,38 +10104,12 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
goto out;
- if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
- /* add new VLAN tag */
- ret = hclge_set_vlan_filter_hw(hdev,
- htons(vlan_info->vlan_proto),
- vport->vport_id,
- vlan_info->vlan_tag,
- false);
- if (ret)
- return ret;
-
- /* remove old VLAN tag */
- if (old_vlan_info->vlan_tag == 0)
- ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
- true, 0);
- else
- ret = hclge_set_vlan_filter_hw(hdev,
- htons(ETH_P_8021Q),
- vport->vport_id,
- old_vlan_info->vlan_tag,
- true);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to clear vport%u port base vlan %u, ret = %d.\n",
- vport->vport_id, old_vlan_info->vlan_tag, ret);
- return ret;
- }
-
- goto out;
- }
-
- ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
- old_vlan_info);
+ if (state == HNAE3_PORT_BASE_VLAN_MODIFY)
+ ret = hclge_modify_port_base_vlan_tag(vport, vlan_info,
+ old_vlan_info);
+ else
+ ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
+ old_vlan_info);
if (ret)
return ret;
@@ -10881,11 +10456,11 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
{
+ struct hclge_comm_tqp *tqp;
struct hnae3_queue *queue;
- struct hclge_tqp *tqp;
queue = handle->kinfo.tqp[queue_id];
- tqp = container_of(queue, struct hclge_tqp, q);
+ tqp = container_of(queue, struct hclge_comm_tqp, q);
return tqp->index;
}
@@ -11442,10 +11017,11 @@ static int hclge_dev_mem_map(struct hclge_dev *hdev)
if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
return 0;
- hw->mem_base = devm_ioremap_wc(&pdev->dev,
- pci_resource_start(pdev, HCLGE_MEM_BAR),
- pci_resource_len(pdev, HCLGE_MEM_BAR));
- if (!hw->mem_base) {
+ hw->hw.mem_base =
+ devm_ioremap_wc(&pdev->dev,
+ pci_resource_start(pdev, HCLGE_MEM_BAR),
+ pci_resource_len(pdev, HCLGE_MEM_BAR));
+ if (!hw->hw.mem_base) {
dev_err(&pdev->dev, "failed to map device memory\n");
return -EFAULT;
}
@@ -11484,8 +11060,8 @@ static int hclge_pci_init(struct hclge_dev *hdev)
pci_set_master(pdev);
hw = &hdev->hw;
- hw->io_base = pcim_iomap(pdev, 2, 0);
- if (!hw->io_base) {
+ hw->hw.io_base = pcim_iomap(pdev, 2, 0);
+ if (!hw->hw.io_base) {
dev_err(&pdev->dev, "Can't map configuration register space\n");
ret = -ENOMEM;
goto err_clr_master;
@@ -11500,7 +11076,7 @@ static int hclge_pci_init(struct hclge_dev *hdev)
return 0;
err_unmap_io_base:
- pcim_iounmap(pdev, hdev->hw.io_base);
+ pcim_iounmap(pdev, hdev->hw.hw.io_base);
err_clr_master:
pci_clear_master(pdev);
pci_release_regions(pdev);
@@ -11514,10 +11090,10 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
- if (hdev->hw.mem_base)
- devm_iounmap(&pdev->dev, hdev->hw.mem_base);
+ if (hdev->hw.hw.mem_base)
+ devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
- pcim_iounmap(pdev, hdev->hw.io_base);
+ pcim_iounmap(pdev, hdev->hw.hw.io_base);
pci_free_irq_vectors(pdev);
pci_clear_master(pdev);
pci_release_mem_regions(pdev);
@@ -11556,29 +11132,25 @@ static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
int retry_cnt = 0;
int ret;
-retry:
- down(&hdev->reset_sem);
- set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
- hdev->reset_type = rst_type;
- ret = hclge_reset_prepare(hdev);
- if (ret || hdev->reset_pending) {
- dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
- ret);
- if (hdev->reset_pending ||
- retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
- dev_err(&hdev->pdev->dev,
- "reset_pending:0x%lx, retry_cnt:%d\n",
- hdev->reset_pending, retry_cnt);
- clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
- up(&hdev->reset_sem);
- msleep(HCLGE_RESET_RETRY_WAIT_MS);
- goto retry;
- }
+ while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
+ down(&hdev->reset_sem);
+ set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ hdev->reset_type = rst_type;
+ ret = hclge_reset_prepare(hdev);
+ if (!ret && !hdev->reset_pending)
+ break;
+
+ dev_err(&hdev->pdev->dev,
+ "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
+ ret, hdev->reset_pending, retry_cnt);
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+ msleep(HCLGE_RESET_RETRY_WAIT_MS);
}
/* disable misc vector before reset done */
hclge_enable_vector(&hdev->misc_vector, false);
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
if (hdev->reset_type == HNAE3_FLR_RESET)
hdev->rst_stats.flr_rst_cnt++;
@@ -11683,12 +11255,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_pci_uninit;
/* Firmware command queue initialize */
- ret = hclge_cmd_queue_init(hdev);
+ ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
if (ret)
goto err_devlink_uninit;
/* Firmware command initialize */
- ret = hclge_cmd_init(hdev);
+ ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
+ true, hdev->reset_pending);
if (ret)
goto err_cmd_uninit;
@@ -11776,7 +11349,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
- ret = hclge_rss_init_cfg(hdev);
+ ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev,
+ &hdev->rss_cfg);
if (ret) {
dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
goto err_mdiobus_unreg;
@@ -11861,11 +11435,11 @@ err_msi_irq_uninit:
err_msi_uninit:
pci_free_irq_vectors(pdev);
err_cmd_uninit:
- hclge_cmd_uninit(hdev);
+ hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
err_devlink_uninit:
hclge_devlink_uninit(hdev);
err_pci_uninit:
- pcim_iounmap(pdev, hdev->hw.io_base);
+ pcim_iounmap(pdev, hdev->hw.hw.io_base);
pci_clear_master(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -12112,7 +11686,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_reset_umv_space(hdev);
}
- ret = hclge_cmd_init(hdev);
+ ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
+ true, hdev->reset_pending);
if (ret) {
dev_err(&pdev->dev, "Cmd queue init failed\n");
return ret;
@@ -12252,7 +11827,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_config_nic_hw_error(hdev, false);
hclge_config_rocee_ras_interrupt(hdev, false);
- hclge_cmd_uninit(hdev);
+ hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
hclge_misc_irq_uninit(hdev);
hclge_devlink_uninit(hdev);
hclge_pci_uninit(hdev);
@@ -12288,19 +11863,43 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
*max_rss_size = hdev->pf_rss_size_max;
}
+static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
+ struct hclge_dev *hdev = vport->back;
+ u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
+ u16 tc_valid[HCLGE_MAX_TC_NUM];
+ u16 roundup_size;
+ unsigned int i;
+
+ roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size);
+ roundup_size = ilog2(roundup_size);
+ /* Set the RSS TC mode according to the new RSS size */
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ tc_valid[i] = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ tc_valid[i] = 1;
+ tc_size[i] = roundup_size;
+ tc_offset[i] = vport->nic.kinfo.rss_size * i;
+ }
+
+ return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
+ tc_size);
+}
+
static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
bool rxfh_configured)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
- u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
struct hclge_dev *hdev = vport->back;
- u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
u16 cur_rss_size = kinfo->rss_size;
u16 cur_tqps = kinfo->num_tqps;
- u16 tc_valid[HCLGE_MAX_TC_NUM];
- u16 roundup_size;
u32 *rss_indir;
unsigned int i;
int ret;
@@ -12313,20 +11912,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
return ret;
}
- roundup_size = roundup_pow_of_two(kinfo->rss_size);
- roundup_size = ilog2(roundup_size);
- /* Set the RSS TC mode according to the new RSS size */
- for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- tc_valid[i] = 0;
-
- if (!(hdev->hw_tc_map & BIT(i)))
- continue;
-
- tc_valid[i] = 1;
- tc_size[i] = roundup_size;
- tc_offset[i] = kinfo->rss_size * i;
- }
- ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+ ret = hclge_set_rss_tc_mode_cfg(handle);
if (ret)
return ret;
@@ -12508,7 +12094,7 @@ int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
true);
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
}
/* initialize the last command BD */
@@ -12552,7 +12138,7 @@ static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
hclge_cmd_setup_basic_desc(desc, cmd, true);
for (i = 0; i < bd_num - 1; i++) {
- desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
desc++;
hclge_cmd_setup_basic_desc(desc, cmd, true);
}
@@ -12985,7 +12571,7 @@ static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
/* bd0~bd4 need next flag */
if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
}
/* setup bd0, this bd contains offset and read length. */
@@ -13095,7 +12681,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.check_port_speed = hclge_check_port_speed,
.get_fec = hclge_get_fec,
.set_fec = hclge_set_fec,
- .get_rss_key_size = hclge_get_rss_key_size,
+ .get_rss_key_size = hclge_comm_get_rss_key_size,
.get_rss = hclge_get_rss,
.set_rss = hclge_set_rss,
.set_rss_tuple = hclge_set_rss_tuple,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index ebba603483a0..adfb26e79262 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -13,6 +13,8 @@
#include "hclge_cmd.h"
#include "hclge_ptp.h"
#include "hnae3.h"
+#include "hclge_comm_rss.h"
+#include "hclge_comm_tqp_stats.h"
#define HCLGE_MOD_VERSION "1.0"
#define HCLGE_DRIVER_NAME "hclge"
@@ -38,20 +40,7 @@
#define HCLGE_VECTOR_REG_OFFSET_H 0x1000
#define HCLGE_VECTOR_VF_OFFSET 0x100000
-#define HCLGE_NIC_CSQ_BASEADDR_L_REG 0x27000
-#define HCLGE_NIC_CSQ_BASEADDR_H_REG 0x27004
#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008
-#define HCLGE_NIC_CSQ_TAIL_REG 0x27010
-#define HCLGE_NIC_CSQ_HEAD_REG 0x27014
-#define HCLGE_NIC_CRQ_BASEADDR_L_REG 0x27018
-#define HCLGE_NIC_CRQ_BASEADDR_H_REG 0x2701C
-#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020
-#define HCLGE_NIC_CRQ_TAIL_REG 0x27024
-#define HCLGE_NIC_CRQ_HEAD_REG 0x27028
-
-#define HCLGE_CMDQ_INTR_STS_REG 0x27104
-#define HCLGE_CMDQ_INTR_EN_REG 0x27108
-#define HCLGE_CMDQ_INTR_GEN_REG 0x2710C
/* bar registers for common func */
#define HCLGE_GRO_EN_REG 0x28000
@@ -93,22 +82,6 @@
#define HCLGE_TQP_INTR_RL_REG 0x20900
#define HCLGE_RSS_IND_TBL_SIZE 512
-#define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0)
-#define HCLGE_RSS_KEY_SIZE 40
-#define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0
-#define HCLGE_RSS_HASH_ALGO_SIMPLE 1
-#define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2
-#define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0)
-
-#define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
-#define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
-#define HCLGE_D_PORT_BIT BIT(0)
-#define HCLGE_S_PORT_BIT BIT(1)
-#define HCLGE_D_IP_BIT BIT(2)
-#define HCLGE_S_IP_BIT BIT(3)
-#define HCLGE_V_TAG_BIT BIT(4)
-#define HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT \
- (HCLGE_D_IP_BIT | HCLGE_S_IP_BIT | HCLGE_V_TAG_BIT)
#define HCLGE_RSS_TC_SIZE_0 1
#define HCLGE_RSS_TC_SIZE_1 2
@@ -228,7 +201,6 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_MBX_HANDLING,
HCLGE_STATE_ERR_SERVICE_SCHED,
HCLGE_STATE_STATISTICS_UPDATING,
- HCLGE_STATE_CMD_DISABLE,
HCLGE_STATE_LINK_UPDATING,
HCLGE_STATE_RST_FAIL,
HCLGE_STATE_FD_TBL_CHANGED,
@@ -294,31 +266,9 @@ struct hclge_mac {
};
struct hclge_hw {
- void __iomem *io_base;
- void __iomem *mem_base;
+ struct hclge_comm_hw hw;
struct hclge_mac mac;
int num_vec;
- struct hclge_cmq cmq;
-};
-
-/* TQP stats */
-struct hlcge_tqp_stats {
- /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
- u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
- /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
- u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
-};
-
-struct hclge_tqp {
- /* copy of device pointer from pci_dev,
- * used when perform DMA mapping
- */
- struct device *dev;
- struct hnae3_queue q;
- struct hlcge_tqp_stats tqp_stats;
- u16 index; /* Global index in a NIC controller */
-
- bool alloced;
};
enum hclge_fc_mode {
@@ -641,6 +591,11 @@ struct key_info {
#define MAX_FD_FILTER_NUM 4096
#define HCLGE_ARFS_EXPIRE_INTERVAL 5UL
+#define hclge_read_dev(a, reg) \
+ hclge_comm_read_reg((a)->hw.io_base, reg)
+#define hclge_write_dev(a, reg, value) \
+ hclge_comm_write_reg((a)->hw.io_base, reg, value)
+
enum HCLGE_FD_ACTIVE_RULE_TYPE {
HCLGE_FD_RULE_NONE,
HCLGE_FD_ARFS_ACTIVE,
@@ -920,7 +875,7 @@ struct hclge_dev {
bool cur_promisc;
int num_alloc_vfs; /* Actual number of VFs allocated */
- struct hclge_tqp *htqp;
+ struct hclge_comm_tqp *htqp;
struct hclge_vport *vport;
struct dentry *hclge_dbgfs;
@@ -955,6 +910,8 @@ struct hclge_dev {
u16 hclge_fd_rule_num;
unsigned long serv_processed_cnt;
unsigned long last_serv_processed;
+ unsigned long last_rst_scheduled;
+ unsigned long last_mbx_scheduled;
unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)];
enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type;
u8 fd_en;
@@ -977,6 +934,7 @@ struct hclge_dev {
cpumask_t affinity_mask;
struct hclge_ptp *ptp;
struct devlink *devlink;
+ struct hclge_comm_rss_cfg rss_cfg;
};
/* VPort level vlan tag configuration for TX direction */
@@ -1003,17 +961,6 @@ struct hclge_rx_vtag_cfg {
bool strip_tag2_discard_en; /* Outer vlan tag discard for BD enable */
};
-struct hclge_rss_tuple_cfg {
- u8 ipv4_tcp_en;
- u8 ipv4_udp_en;
- u8 ipv4_sctp_en;
- u8 ipv4_fragment_en;
- u8 ipv6_tcp_en;
- u8 ipv6_udp_en;
- u8 ipv6_sctp_en;
- u8 ipv6_fragment_en;
-};
-
enum HCLGE_VPORT_STATE {
HCLGE_VPORT_STATE_ALIVE,
HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
@@ -1047,15 +994,6 @@ struct hclge_vf_info {
struct hclge_vport {
u16 alloc_tqps; /* Allocated Tx/Rx queues */
- u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
- /* User configured lookup table entries */
- u16 *rss_indirection_tbl;
- int rss_algo; /* User configured hash algorithm */
- /* User configured rss tuple sets */
- struct hclge_rss_tuple_cfg rss_tuple_sets;
-
- u16 alloc_rss_size;
-
u16 qs_offset;
u32 bw_limit; /* VSI BW Limit (0 = disabled) */
u8 dwrr;
@@ -1093,6 +1031,11 @@ struct hclge_speed_bit_map {
u32 speed_bit;
};
+struct hclge_mac_speed_map {
+ u32 speed_drv; /* speed defined in driver */
+ u32 speed_fw; /* speed defined in firmware */
+};
+
int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
bool en_mc_pmc, bool en_bc_pmc);
int hclge_add_uc_addr_common(struct hclge_vport *vport,
@@ -1111,7 +1054,8 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
static inline int hclge_get_queue_id(struct hnae3_queue *queue)
{
- struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
+ struct hclge_comm_tqp *tqp =
+ container_of(queue, struct hclge_comm_tqp, q);
return tqp->index;
}
@@ -1129,7 +1073,6 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
-void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
void hclge_mbx_handler(struct hclge_dev *hdev);
int hclge_reset_tqp(struct hnae3_handle *handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 65d78ee4d65a..6799d16de34b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -4,6 +4,7 @@
#include "hclge_main.h"
#include "hclge_mbx.h"
#include "hnae3.h"
+#include "hclge_comm_rss.h"
#define CREATE_TRACE_POINTS
#include "hclge_trace.h"
@@ -33,7 +34,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
{
struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
struct hclge_dev *hdev = vport->back;
- enum hclge_cmd_status status;
+ enum hclge_comm_cmd_status status;
struct hclge_desc desc;
u16 resp;
@@ -90,7 +91,7 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
{
struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
struct hclge_dev *hdev = vport->back;
- enum hclge_cmd_status status;
+ enum hclge_comm_cmd_status status;
struct hclge_desc desc;
resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
@@ -181,7 +182,7 @@ static int hclge_get_ring_chain_from_mbx(
if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n",
req->msg.param[i].tqp_index,
- vport->nic.kinfo.rss_size - 1);
+ vport->nic.kinfo.rss_size - 1U);
return -EINVAL;
}
}
@@ -612,15 +613,17 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
{
#define HCLGE_RSS_MBX_RESP_LEN 8
struct hclge_dev *hdev = vport->back;
+ struct hclge_comm_rss_cfg *rss_cfg;
u8 index;
index = mbx_req->msg.data[0];
+ rss_cfg = &hdev->rss_cfg;
/* Check the query index of rss_hash_key from VF, make sure no
* more than the size of rss_hash_key.
*/
if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
- sizeof(vport[0].rss_hash_key)) {
+ sizeof(rss_cfg->rss_hash_key)) {
dev_warn(&hdev->pdev->dev,
"failed to get the rss hash key, the index(%u) invalid !\n",
index);
@@ -628,7 +631,7 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
}
memcpy(resp_msg->data,
- &hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
+ &rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
HCLGE_RSS_MBX_RESP_LEN);
resp_msg->len = HCLGE_RSS_MBX_RESP_LEN;
}
@@ -661,9 +664,9 @@ static void hclge_handle_link_change_event(struct hclge_dev *hdev,
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
{
- u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
+ u32 tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
- return tail == hw->cmq.crq.next_to_use;
+ return tail == hw->hw.cmq.crq.next_to_use;
}
static void hclge_handle_ncsi_error(struct hclge_dev *hdev)
@@ -694,7 +697,7 @@ static void hclge_handle_vf_tbl(struct hclge_vport *vport,
void hclge_mbx_handler(struct hclge_dev *hdev)
{
- struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
+ struct hclge_comm_cmq_ring *crq = &hdev->hw.hw.cmq.crq;
struct hclge_respond_to_vf_msg resp_msg;
struct hclge_mbx_vf_to_pf_cmd *req;
struct hclge_vport *vport;
@@ -705,7 +708,8 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
/* handle all the mailbox requests in the queue */
while (!hclge_cmd_crq_empty(&hdev->hw)) {
- if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
+ &hdev->hw.hw.comm_state)) {
dev_warn(&hdev->pdev->dev,
"command queue needs re-initializing\n");
return;
@@ -848,6 +852,14 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
if (hnae3_get_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) {
resp_msg.status = ret;
+ if (time_is_before_jiffies(hdev->last_mbx_scheduled +
+ HCLGE_MBX_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "resp vport%u mbx(%u,%u) late\n",
+ req->mbx_src_vfid,
+ req->msg.code,
+ req->msg.subcode);
+
hclge_gen_resp_to_vf(vport, req, &resp_msg);
}
@@ -859,5 +871,6 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
}
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
- hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use);
+ hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CRQ_HEAD_REG,
+ crq->next_to_use);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 1231c34f0949..63d2be4349e3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -47,7 +47,7 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
struct hclge_desc desc;
int ret;
- if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state))
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false);
@@ -85,7 +85,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
struct hclge_desc desc;
int ret;
- if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state))
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
index fd0e20190b90..4200d0b6d931 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
@@ -4,6 +4,10 @@
#ifndef __HCLGE_MDIO_H
#define __HCLGE_MDIO_H
+#include "hnae3.h"
+
+struct hclge_dev;
+
int hclge_mac_mdio_config(struct hclge_dev *hdev);
int hclge_mac_connect_phy(struct hnae3_handle *handle);
void hclge_mac_disconnect_phy(struct hnae3_handle *handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index befa9bcc2f2f..a40b1583f114 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -464,7 +464,7 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
}
spin_lock_init(&ptp->lock);
- ptp->io_base = hdev->hw.io_base + HCLGE_PTP_REG_OFFSET;
+ ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
hdev->ptp = ptp;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
index 7a9b77de632a..bbee74cd8404 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
@@ -8,6 +8,9 @@
#include <linux/net_tstamp.h>
#include <linux/types.h>
+struct hclge_dev;
+struct ifreq;
+
#define HCLGE_PTP_REG_OFFSET 0x29000
#define HCLGE_PTP_TX_TS_SEQID_REG 0x0
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 429652a8cde1..089f4444b7e3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -678,8 +678,8 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
hclge_tm_update_kinfo_rss_size(vport);
kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
vport->dwrr = 100; /* 100 percent as init */
- vport->alloc_rss_size = kinfo->rss_size;
vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
+ hdev->rss_cfg.rss_size = kinfo->rss_size;
/* when enable mqprio, the tc_info has been updated. */
if (kinfo->tc_info.mqprio_active)
@@ -916,38 +916,63 @@ static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
return 0;
}
-static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
+static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev)
{
struct hclge_vport *vport = hdev->vport;
+ u16 i, k;
int ret;
- u32 i, k;
- if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
- /* Cfg qs -> pri mapping, one by one mapping */
- for (k = 0; k < hdev->num_alloc_vport; k++) {
- struct hnae3_knic_private_info *kinfo =
- &vport[k].nic.kinfo;
-
- for (i = 0; i < kinfo->tc_info.num_tc; i++) {
- ret = hclge_tm_qs_to_pri_map_cfg(
- hdev, vport[k].qs_offset + i, i);
- if (ret)
- return ret;
- }
+ /* Cfg qs -> pri mapping, one by one mapping */
+ for (k = 0; k < hdev->num_alloc_vport; k++) {
+ struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
+
+ for (i = 0; i < kinfo->tc_info.num_tc; i++) {
+ ret = hclge_tm_qs_to_pri_map_cfg(hdev,
+ vport[k].qs_offset + i,
+ i);
+ if (ret)
+ return ret;
}
- } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
- /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
- for (k = 0; k < hdev->num_alloc_vport; k++)
- for (i = 0; i < HNAE3_MAX_TC; i++) {
- ret = hclge_tm_qs_to_pri_map_cfg(
- hdev, vport[k].qs_offset + i, k);
- if (ret)
- return ret;
- }
- } else {
- return -EINVAL;
}
+ return 0;
+}
+
+static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ u16 i, k;
+ int ret;
+
+ /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
+ for (k = 0; k < hdev->num_alloc_vport; k++)
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ ret = hclge_tm_qs_to_pri_map_cfg(hdev,
+ vport[k].qs_offset + i,
+ k);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ u32 i;
+
+ if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE)
+ ret = hclge_tm_pri_q_qs_cfg_tc_base(hdev);
+ else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
+ ret = hclge_tm_pri_q_qs_cfg_vnet_base(hdev);
+ else
+ return -EINVAL;
+
+ if (ret)
+ return ret;
+
/* Cfg q -> qs mapping */
for (i = 0; i < hdev->num_alloc_vport; i++) {
ret = hclge_vport_q_to_qs_map(hdev, vport);
@@ -1274,6 +1299,27 @@ static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
return 0;
}
+static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ u16 i;
+
+ ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ ret = hclge_tm_qs_schd_mode_cfg(hdev,
+ vport[i].qs_offset + pri_id,
+ HCLGE_SCH_MODE_DWRR);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
@@ -1304,21 +1350,13 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
{
struct hclge_vport *vport = hdev->vport;
int ret;
- u8 i, k;
+ u8 i;
if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
for (i = 0; i < hdev->tm_info.num_tc; i++) {
- ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
+ ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i);
if (ret)
return ret;
-
- for (k = 0; k < hdev->num_alloc_vport; k++) {
- ret = hclge_tm_qs_schd_mode_cfg(
- hdev, vport[k].qs_offset + i,
- HCLGE_SCH_MODE_DWRR);
- if (ret)
- return ret;
- }
}
} else {
for (i = 0; i < hdev->num_alloc_vport; i++) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 1db7f40b4525..619cc30a2dfc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -6,6 +6,12 @@
#include <linux/types.h>
+#include "hnae3.h"
+
+struct hclge_dev;
+struct hclge_vport;
+enum hclge_opcode_type;
+
/* MAC Pause */
#define HCLGE_TX_MAC_PAUSE_EN_MSK BIT(0)
#define HCLGE_RX_MAC_PAUSE_EN_MSK BIT(1)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
deleted file mode 100644
index 51ff7d86ee90..000000000000
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0+
-#
-# Makefile for the HISILICON network device drivers.
-#
-
-ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
-ccflags-y += -I $(srctree)/$(src)
-
-obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
-hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o hclgevf_devlink.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
deleted file mode 100644
index e605c2c5bcce..000000000000
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ /dev/null
@@ -1,556 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-// Copyright (c) 2016-2017 Hisilicon Limited.
-
-#include <linux/device.h>
-#include <linux/dma-direction.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include "hclgevf_cmd.h"
-#include "hclgevf_main.h"
-#include "hnae3.h"
-
-#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
-
-static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
-{
- int ntc = ring->next_to_clean;
- int ntu = ring->next_to_use;
- int used;
-
- used = (ntu - ntc + ring->desc_num) % ring->desc_num;
-
- return ring->desc_num - used - 1;
-}
-
-static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring,
- int head)
-{
- int ntu = ring->next_to_use;
- int ntc = ring->next_to_clean;
-
- if (ntu > ntc)
- return head >= ntc && head <= ntu;
-
- return head >= ntc || head <= ntu;
-}
-
-static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
-{
- struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
- struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
- int clean;
- u32 head;
-
- head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
- rmb(); /* Make sure head is ready before touch any data */
-
- if (!hclgevf_is_valid_csq_clean_head(csq, head)) {
- dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
- csq->next_to_use, csq->next_to_clean);
- dev_warn(&hdev->pdev->dev,
- "Disabling any further commands to IMP firmware\n");
- set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
- return -EIO;
- }
-
- clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
- csq->next_to_clean = head;
- return clean;
-}
-
-static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw)
-{
- u32 head;
-
- head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
-
- return head == hw->cmq.csq.next_to_use;
-}
-
-static bool hclgevf_is_special_opcode(u16 opcode)
-{
- const u16 spec_opcode[] = {0x30, 0x31, 0x32};
- int i;
-
- for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
- if (spec_opcode[i] == opcode)
- return true;
- }
-
- return false;
-}
-
-static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
-{
- struct hclgevf_dev *hdev = ring->dev;
- struct hclgevf_hw *hw = &hdev->hw;
- u32 reg_val;
-
- if (ring->flag == HCLGEVF_TYPE_CSQ) {
- reg_val = lower_32_bits(ring->desc_dma_addr);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
- reg_val = upper_32_bits(ring->desc_dma_addr);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
-
- reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
- reg_val &= HCLGEVF_NIC_SW_RST_RDY;
- reg_val |= (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
-
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
- } else {
- reg_val = lower_32_bits(ring->desc_dma_addr);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
- reg_val = upper_32_bits(ring->desc_dma_addr);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
-
- reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
-
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
- }
-}
-
-static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw)
-{
- hclgevf_cmd_config_regs(&hw->cmq.csq);
- hclgevf_cmd_config_regs(&hw->cmq.crq);
-}
-
-static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
-{
- int size = ring->desc_num * sizeof(struct hclgevf_desc);
-
- ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
- &ring->desc_dma_addr, GFP_KERNEL);
- if (!ring->desc)
- return -ENOMEM;
-
- return 0;
-}
-
-static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
-{
- int size = ring->desc_num * sizeof(struct hclgevf_desc);
-
- if (ring->desc) {
- dma_free_coherent(cmq_ring_to_dev(ring), size,
- ring->desc, ring->desc_dma_addr);
- ring->desc = NULL;
- }
-}
-
-static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type)
-{
- struct hclgevf_hw *hw = &hdev->hw;
- struct hclgevf_cmq_ring *ring =
- (ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
- int ret;
-
- ring->dev = hdev;
- ring->flag = ring_type;
-
- /* allocate CSQ/CRQ descriptor */
- ret = hclgevf_alloc_cmd_desc(ring);
- if (ret)
- dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
- (ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
-
- return ret;
-}
-
-void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
- enum hclgevf_opcode_type opcode, bool is_read)
-{
- memset(desc, 0, sizeof(struct hclgevf_desc));
- desc->opcode = cpu_to_le16(opcode);
- desc->flag = cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR |
- HCLGEVF_CMD_FLAG_IN);
- if (is_read)
- desc->flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_WR);
- else
- desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
-}
-
-struct vf_errcode {
- u32 imp_errcode;
- int common_errno;
-};
-
-static void hclgevf_cmd_copy_desc(struct hclgevf_hw *hw,
- struct hclgevf_desc *desc, int num)
-{
- struct hclgevf_desc *desc_to_use;
- int handle = 0;
-
- while (handle < num) {
- desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
- *desc_to_use = desc[handle];
- (hw->cmq.csq.next_to_use)++;
- if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
- hw->cmq.csq.next_to_use = 0;
- handle++;
- }
-}
-
-static int hclgevf_cmd_convert_err_code(u16 desc_ret)
-{
- struct vf_errcode hclgevf_cmd_errcode[] = {
- {HCLGEVF_CMD_EXEC_SUCCESS, 0},
- {HCLGEVF_CMD_NO_AUTH, -EPERM},
- {HCLGEVF_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
- {HCLGEVF_CMD_QUEUE_FULL, -EXFULL},
- {HCLGEVF_CMD_NEXT_ERR, -ENOSR},
- {HCLGEVF_CMD_UNEXE_ERR, -ENOTBLK},
- {HCLGEVF_CMD_PARA_ERR, -EINVAL},
- {HCLGEVF_CMD_RESULT_ERR, -ERANGE},
- {HCLGEVF_CMD_TIMEOUT, -ETIME},
- {HCLGEVF_CMD_HILINK_ERR, -ENOLINK},
- {HCLGEVF_CMD_QUEUE_ILLEGAL, -ENXIO},
- {HCLGEVF_CMD_INVALID, -EBADR},
- };
- u32 errcode_count = ARRAY_SIZE(hclgevf_cmd_errcode);
- u32 i;
-
- for (i = 0; i < errcode_count; i++)
- if (hclgevf_cmd_errcode[i].imp_errcode == desc_ret)
- return hclgevf_cmd_errcode[i].common_errno;
-
- return -EIO;
-}
-
-static int hclgevf_cmd_check_retval(struct hclgevf_hw *hw,
- struct hclgevf_desc *desc, int num, int ntc)
-{
- u16 opcode, desc_ret;
- int handle;
-
- opcode = le16_to_cpu(desc[0].opcode);
- for (handle = 0; handle < num; handle++) {
- /* Get the result of hardware write back */
- desc[handle] = hw->cmq.csq.desc[ntc];
- ntc++;
- if (ntc == hw->cmq.csq.desc_num)
- ntc = 0;
- }
- if (likely(!hclgevf_is_special_opcode(opcode)))
- desc_ret = le16_to_cpu(desc[num - 1].retval);
- else
- desc_ret = le16_to_cpu(desc[0].retval);
- hw->cmq.last_status = desc_ret;
-
- return hclgevf_cmd_convert_err_code(desc_ret);
-}
-
-static int hclgevf_cmd_check_result(struct hclgevf_hw *hw,
- struct hclgevf_desc *desc, int num, int ntc)
-{
- struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
- bool is_completed = false;
- u32 timeout = 0;
- int handle, ret;
-
- /* If the command is sync, wait for the firmware to write back,
- * if multi descriptors to be sent, use the first one to check
- */
- if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
- do {
- if (hclgevf_cmd_csq_done(hw)) {
- is_completed = true;
- break;
- }
- udelay(1);
- timeout++;
- } while (timeout < hw->cmq.tx_timeout);
- }
-
- if (!is_completed)
- ret = -EBADE;
- else
- ret = hclgevf_cmd_check_retval(hw, desc, num, ntc);
-
- /* Clean the command send queue */
- handle = hclgevf_cmd_csq_clean(hw);
- if (handle < 0)
- ret = handle;
- else if (handle != num)
- dev_warn(&hdev->pdev->dev,
- "cleaned %d, need to clean %d\n", handle, num);
- return ret;
-}
-
-/* hclgevf_cmd_send - send command to command queue
- * @hw: pointer to the hw struct
- * @desc: prefilled descriptor for describing the command
- * @num : the number of descriptors to be sent
- *
- * This is the main send command for command queue, it
- * sends the queue, cleans the queue, etc
- */
-int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
-{
- struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
- struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
- int ret;
- int ntc;
-
- spin_lock_bh(&hw->cmq.csq.lock);
-
- if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
- spin_unlock_bh(&hw->cmq.csq.lock);
- return -EBUSY;
- }
-
- if (num > hclgevf_ring_space(&hw->cmq.csq)) {
- /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
- * need update the SW HEAD pointer csq->next_to_clean
- */
- csq->next_to_clean = hclgevf_read_dev(hw,
- HCLGEVF_NIC_CSQ_HEAD_REG);
- spin_unlock_bh(&hw->cmq.csq.lock);
- return -EBUSY;
- }
-
- /* Record the location of desc in the ring for this time
- * which will be use for hardware to write back
- */
- ntc = hw->cmq.csq.next_to_use;
-
- hclgevf_cmd_copy_desc(hw, desc, num);
-
- /* Write to hardware */
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG,
- hw->cmq.csq.next_to_use);
-
- ret = hclgevf_cmd_check_result(hw, desc, num, ntc);
-
- spin_unlock_bh(&hw->cmq.csq.lock);
-
- return ret;
-}
-
-static void hclgevf_set_default_capability(struct hclgevf_dev *hdev)
-{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
-
- set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
- set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
- set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
-}
-
-static const struct hclgevf_caps_bit_map hclgevf_cmd_caps_bit_map0[] = {
- {HCLGEVF_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
- {HCLGEVF_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
- {HCLGEVF_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
- {HCLGEVF_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
- {HCLGEVF_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
- {HCLGEVF_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
-};
-
-static void hclgevf_parse_capability(struct hclgevf_dev *hdev,
- struct hclgevf_query_version_cmd *cmd)
-{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- u32 caps, i;
-
- caps = __le32_to_cpu(cmd->caps[0]);
- for (i = 0; i < ARRAY_SIZE(hclgevf_cmd_caps_bit_map0); i++)
- if (hnae3_get_bit(caps, hclgevf_cmd_caps_bit_map0[i].imp_bit))
- set_bit(hclgevf_cmd_caps_bit_map0[i].local_bit,
- ae_dev->caps);
-}
-
-static __le32 hclgevf_build_api_caps(void)
-{
- u32 api_caps = 0;
-
- hnae3_set_bit(api_caps, HCLGEVF_API_CAP_FLEX_RSS_TBL_B, 1);
-
- return cpu_to_le32(api_caps);
-}
-
-static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev)
-{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- struct hclgevf_query_version_cmd *resp;
- struct hclgevf_desc desc;
- int status;
-
- resp = (struct hclgevf_query_version_cmd *)desc.data;
-
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
- resp->api_caps = hclgevf_build_api_caps();
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (status)
- return status;
-
- hdev->fw_version = le32_to_cpu(resp->firmware);
-
- ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
- HNAE3_PCI_REVISION_BIT_SIZE;
- ae_dev->dev_version |= hdev->pdev->revision;
-
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
- hclgevf_set_default_capability(hdev);
-
- hclgevf_parse_capability(hdev, resp);
-
- return status;
-}
-
-int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev)
-{
- int ret;
-
- /* Setup the lock for command queue */
- spin_lock_init(&hdev->hw.cmq.csq.lock);
- spin_lock_init(&hdev->hw.cmq.crq.lock);
-
- hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
- hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
- hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
-
- ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "CSQ ring setup error %d\n", ret);
- return ret;
- }
-
- ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "CRQ ring setup error %d\n", ret);
- goto err_csq;
- }
-
- return 0;
-err_csq:
- hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
- return ret;
-}
-
-static int hclgevf_firmware_compat_config(struct hclgevf_dev *hdev, bool en)
-{
- struct hclgevf_firmware_compat_cmd *req;
- struct hclgevf_desc desc;
- u32 compat = 0;
-
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_IMP_COMPAT_CFG, false);
-
- if (en) {
- req = (struct hclgevf_firmware_compat_cmd *)desc.data;
-
- hnae3_set_bit(compat, HCLGEVF_SYNC_RX_RING_HEAD_EN_B, 1);
-
- req->compat = cpu_to_le32(compat);
- }
-
- return hclgevf_cmd_send(&hdev->hw, &desc, 1);
-}
-
-int hclgevf_cmd_init(struct hclgevf_dev *hdev)
-{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- int ret;
-
- spin_lock_bh(&hdev->hw.cmq.csq.lock);
- spin_lock(&hdev->hw.cmq.crq.lock);
-
- /* initialize the pointers of async rx queue of mailbox */
- hdev->arq.hdev = hdev;
- hdev->arq.head = 0;
- hdev->arq.tail = 0;
- atomic_set(&hdev->arq.count, 0);
- hdev->hw.cmq.csq.next_to_clean = 0;
- hdev->hw.cmq.csq.next_to_use = 0;
- hdev->hw.cmq.crq.next_to_clean = 0;
- hdev->hw.cmq.crq.next_to_use = 0;
-
- hclgevf_cmd_init_regs(&hdev->hw);
-
- spin_unlock(&hdev->hw.cmq.crq.lock);
- spin_unlock_bh(&hdev->hw.cmq.csq.lock);
-
- clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
-
- /* Check if there is new reset pending, because the higher level
- * reset may happen when lower level reset is being processed.
- */
- if (hclgevf_is_reset_pending(hdev)) {
- ret = -EBUSY;
- goto err_cmd_init;
- }
-
- /* get version and device capabilities */
- ret = hclgevf_cmd_query_version_and_capability(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to query version and capabilities, ret = %d\n", ret);
- goto err_cmd_init;
- }
-
- dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
- HNAE3_FW_VERSION_BYTE3_SHIFT),
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
- HNAE3_FW_VERSION_BYTE2_SHIFT),
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
- HNAE3_FW_VERSION_BYTE1_SHIFT),
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
- HNAE3_FW_VERSION_BYTE0_SHIFT));
-
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) {
- /* ask the firmware to enable some features, driver can work
- * without it.
- */
- ret = hclgevf_firmware_compat_config(hdev, true);
- if (ret)
- dev_warn(&hdev->pdev->dev,
- "Firmware compatible features not enabled(%d).\n",
- ret);
- }
-
- return 0;
-
-err_cmd_init:
- set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
-
- return ret;
-}
-
-static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
-{
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
-}
-
-void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
-{
- hclgevf_firmware_compat_config(hdev, false);
- set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
- /* wait to ensure that the firmware completes the possible left
- * over commands.
- */
- msleep(HCLGEVF_CMDQ_CLEAR_WAIT_TIME);
- spin_lock_bh(&hdev->hw.cmq.csq.lock);
- spin_lock(&hdev->hw.cmq.crq.lock);
- hclgevf_cmd_uninit_regs(&hdev->hw);
- spin_unlock(&hdev->hw.cmq.crq.lock);
- spin_unlock_bh(&hdev->hw.cmq.csq.lock);
-
- hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
- hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
-}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index edc9e154061a..537b887fa0a2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -6,9 +6,8 @@
#include <linux/io.h>
#include <linux/types.h>
#include "hnae3.h"
+#include "hclge_comm_cmd.h"
-#define HCLGEVF_CMDQ_TX_TIMEOUT 30000
-#define HCLGEVF_CMDQ_CLEAR_WAIT_TIME 200
#define HCLGEVF_CMDQ_RX_INVLD_B 0
#define HCLGEVF_CMDQ_RX_OUTVLD_B 1
@@ -16,107 +15,6 @@ struct hclgevf_hw;
struct hclgevf_dev;
#define HCLGEVF_SYNC_RX_RING_HEAD_EN_B 4
-struct hclgevf_firmware_compat_cmd {
- __le32 compat;
- u8 rsv[20];
-};
-
-struct hclgevf_desc {
- __le16 opcode;
- __le16 flag;
- __le16 retval;
- __le16 rsv;
- __le32 data[6];
-};
-
-struct hclgevf_desc_cb {
- dma_addr_t dma;
- void *va;
- u32 length;
-};
-
-struct hclgevf_cmq_ring {
- dma_addr_t desc_dma_addr;
- struct hclgevf_desc *desc;
- struct hclgevf_desc_cb *desc_cb;
- struct hclgevf_dev *dev;
- u32 head;
- u32 tail;
-
- u16 buf_size;
- u16 desc_num;
- int next_to_use;
- int next_to_clean;
- u8 flag;
- spinlock_t lock; /* Command queue lock */
-};
-
-enum hclgevf_cmd_return_status {
- HCLGEVF_CMD_EXEC_SUCCESS = 0,
- HCLGEVF_CMD_NO_AUTH = 1,
- HCLGEVF_CMD_NOT_SUPPORTED = 2,
- HCLGEVF_CMD_QUEUE_FULL = 3,
- HCLGEVF_CMD_NEXT_ERR = 4,
- HCLGEVF_CMD_UNEXE_ERR = 5,
- HCLGEVF_CMD_PARA_ERR = 6,
- HCLGEVF_CMD_RESULT_ERR = 7,
- HCLGEVF_CMD_TIMEOUT = 8,
- HCLGEVF_CMD_HILINK_ERR = 9,
- HCLGEVF_CMD_QUEUE_ILLEGAL = 10,
- HCLGEVF_CMD_INVALID = 11,
-};
-
-enum hclgevf_cmd_status {
- HCLGEVF_STATUS_SUCCESS = 0,
- HCLGEVF_ERR_CSQ_FULL = -1,
- HCLGEVF_ERR_CSQ_TIMEOUT = -2,
- HCLGEVF_ERR_CSQ_ERROR = -3
-};
-
-struct hclgevf_cmq {
- struct hclgevf_cmq_ring csq;
- struct hclgevf_cmq_ring crq;
- u16 tx_timeout; /* Tx timeout */
- enum hclgevf_cmd_status last_status;
-};
-
-#define HCLGEVF_CMD_FLAG_IN_VALID_SHIFT 0
-#define HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT 1
-#define HCLGEVF_CMD_FLAG_NEXT_SHIFT 2
-#define HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT 3
-#define HCLGEVF_CMD_FLAG_NO_INTR_SHIFT 4
-#define HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT 5
-
-#define HCLGEVF_CMD_FLAG_IN BIT(HCLGEVF_CMD_FLAG_IN_VALID_SHIFT)
-#define HCLGEVF_CMD_FLAG_OUT BIT(HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT)
-#define HCLGEVF_CMD_FLAG_NEXT BIT(HCLGEVF_CMD_FLAG_NEXT_SHIFT)
-#define HCLGEVF_CMD_FLAG_WR BIT(HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT)
-#define HCLGEVF_CMD_FLAG_NO_INTR BIT(HCLGEVF_CMD_FLAG_NO_INTR_SHIFT)
-#define HCLGEVF_CMD_FLAG_ERR_INTR BIT(HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT)
-
-enum hclgevf_opcode_type {
- /* Generic command */
- HCLGEVF_OPC_QUERY_FW_VER = 0x0001,
- HCLGEVF_OPC_QUERY_VF_RSRC = 0x0024,
- HCLGEVF_OPC_QUERY_DEV_SPECS = 0x0050,
-
- /* TQP command */
- HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
- HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
- HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
- /* GRO command */
- HCLGEVF_OPC_GRO_GENERIC_CONFIG = 0x0C10,
- /* RSS cmd */
- HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
- HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02,
- HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07,
- HCLGEVF_OPC_RSS_TC_MODE = 0x0D08,
- /* Mailbox cmd */
- HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001,
-
- /* IMP stats command */
- HCLGEVF_OPC_IMP_COMPAT_CFG = 0x701A,
-};
#define HCLGEVF_TQP_REG_OFFSET 0x80000
#define HCLGEVF_TQP_REG_SIZE 0x200
@@ -156,34 +54,6 @@ struct hclgevf_ctrl_vector_chain {
u8 resv;
};
-enum HCLGEVF_CAP_BITS {
- HCLGEVF_CAP_UDP_GSO_B,
- HCLGEVF_CAP_QB_B,
- HCLGEVF_CAP_FD_FORWARD_TC_B,
- HCLGEVF_CAP_PTP_B,
- HCLGEVF_CAP_INT_QL_B,
- HCLGEVF_CAP_HW_TX_CSUM_B,
- HCLGEVF_CAP_TX_PUSH_B,
- HCLGEVF_CAP_PHY_IMP_B,
- HCLGEVF_CAP_TQP_TXRX_INDEP_B,
- HCLGEVF_CAP_HW_PAD_B,
- HCLGEVF_CAP_STASH_B,
- HCLGEVF_CAP_UDP_TUNNEL_CSUM_B,
- HCLGEVF_CAP_RXD_ADV_LAYOUT_B = 15,
-};
-
-enum HCLGEVF_API_CAP_BITS {
- HCLGEVF_API_CAP_FLEX_RSS_TBL_B,
-};
-
-#define HCLGEVF_QUERY_CAP_LENGTH 3
-struct hclgevf_query_version_cmd {
- __le32 firmware;
- __le32 hardware;
- __le32 api_caps;
- __le32 caps[HCLGEVF_QUERY_CAP_LENGTH]; /* capabilities of device */
-};
-
#define HCLGEVF_MSIX_OFT_ROCEE_S 0
#define HCLGEVF_MSIX_OFT_ROCEE_M (0xffff << HCLGEVF_MSIX_OFT_ROCEE_S)
#define HCLGEVF_VEC_NUM_S 0
@@ -203,50 +73,6 @@ struct hclgevf_cfg_gro_status_cmd {
u8 rsv[23];
};
-#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4
-#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4
-#define HCLGEVF_RSS_HASH_KEY_NUM 16
-struct hclgevf_rss_config_cmd {
- u8 hash_config;
- u8 rsv[7];
- u8 hash_key[HCLGEVF_RSS_HASH_KEY_NUM];
-};
-
-struct hclgevf_rss_input_tuple_cmd {
- u8 ipv4_tcp_en;
- u8 ipv4_udp_en;
- u8 ipv4_sctp_en;
- u8 ipv4_fragment_en;
- u8 ipv6_tcp_en;
- u8 ipv6_udp_en;
- u8 ipv6_sctp_en;
- u8 ipv6_fragment_en;
- u8 rsv[16];
-};
-
-#define HCLGEVF_RSS_CFG_TBL_SIZE 16
-
-struct hclgevf_rss_indirection_table_cmd {
- __le16 start_table_index;
- __le16 rss_set_bitmap;
- u8 rsv[4];
- u8 rss_result[HCLGEVF_RSS_CFG_TBL_SIZE];
-};
-
-#define HCLGEVF_RSS_TC_OFFSET_S 0
-#define HCLGEVF_RSS_TC_OFFSET_M GENMASK(10, 0)
-#define HCLGEVF_RSS_TC_SIZE_MSB_B 11
-#define HCLGEVF_RSS_TC_SIZE_S 12
-#define HCLGEVF_RSS_TC_SIZE_M GENMASK(14, 12)
-#define HCLGEVF_RSS_TC_VALID_B 15
-#define HCLGEVF_MAX_TC_NUM 8
-#define HCLGEVF_RSS_TC_SIZE_MSB_OFFSET 3
-
-struct hclgevf_rss_tc_mode_cmd {
- __le16 rss_tc_mode[HCLGEVF_MAX_TC_NUM];
- u8 rsv[8];
-};
-
#define HCLGEVF_LINK_STS_B 0
#define HCLGEVF_LINK_STATUS BIT(HCLGEVF_LINK_STS_B)
struct hclgevf_link_status_cmd {
@@ -273,9 +99,6 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
u8 rsv[14];
};
-#define HCLGEVF_TYPE_CRQ 0
-#define HCLGEVF_TYPE_CSQ 1
-
/* this bit indicates that the driver is ready for hardware reset */
#define HCLGEVF_NIC_SW_RST_RDY_B 16
#define HCLGEVF_NIC_SW_RST_RDY BIT(HCLGEVF_NIC_SW_RST_RDY_B)
@@ -285,6 +108,9 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
#define HCLGEVF_QUERY_DEV_SPECS_BD_NUM 4
+#define hclgevf_cmd_setup_basic_desc(desc, opcode, is_read) \
+ hclge_comm_cmd_setup_basic_desc(desc, opcode, is_read)
+
struct hclgevf_dev_specs_0_cmd {
__le32 rsv0;
__le32 mac_entry_num;
@@ -305,38 +131,6 @@ struct hclgevf_dev_specs_1_cmd {
u8 rsv1[18];
};
-/* capabilities bits map between imp firmware and local driver */
-struct hclgevf_caps_bit_map {
- u16 imp_bit;
- u16 local_bit;
-};
-
-static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value)
-{
- writel(value, base + reg);
-}
-
-static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
-{
- u8 __iomem *reg_addr = READ_ONCE(base);
-
- return readl(reg_addr + reg);
-}
-
-#define hclgevf_write_dev(a, reg, value) \
- hclgevf_write_reg((a)->io_base, reg, value)
-#define hclgevf_read_dev(a, reg) \
- hclgevf_read_reg((a)->io_base, reg)
-
-#define HCLGEVF_SEND_SYNC(flag) \
- ((flag) & HCLGEVF_CMD_FLAG_NO_INTR)
-
-int hclgevf_cmd_init(struct hclgevf_dev *hdev);
-void hclgevf_cmd_uninit(struct hclgevf_dev *hdev);
-int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev);
-
-int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num);
-void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
- enum hclgevf_opcode_type opcode,
- bool is_read);
+int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num);
+void hclgevf_arq_init(struct hclgevf_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 41afaeea881b..21442a9bb996 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -9,6 +9,7 @@
#include "hclge_mbx.h"
#include "hnae3.h"
#include "hclgevf_devlink.h"
+#include "hclge_comm_rss.h"
#define HCLGEVF_NAME "hclgevf"
@@ -30,30 +31,22 @@ static const struct pci_device_id ae_algovf_pci_tbl[] = {
{0, }
};
-static const u8 hclgevf_hash_key[] = {
- 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
- 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
- 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
- 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
- 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
-};
-
MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
-static const u32 cmdq_reg_addr_list[] = {HCLGEVF_NIC_CSQ_BASEADDR_L_REG,
- HCLGEVF_NIC_CSQ_BASEADDR_H_REG,
- HCLGEVF_NIC_CSQ_DEPTH_REG,
- HCLGEVF_NIC_CSQ_TAIL_REG,
- HCLGEVF_NIC_CSQ_HEAD_REG,
- HCLGEVF_NIC_CRQ_BASEADDR_L_REG,
- HCLGEVF_NIC_CRQ_BASEADDR_H_REG,
- HCLGEVF_NIC_CRQ_DEPTH_REG,
- HCLGEVF_NIC_CRQ_TAIL_REG,
- HCLGEVF_NIC_CRQ_HEAD_REG,
- HCLGEVF_VECTOR0_CMDQ_SRC_REG,
- HCLGEVF_VECTOR0_CMDQ_STATE_REG,
- HCLGEVF_CMDQ_INTR_EN_REG,
- HCLGEVF_CMDQ_INTR_GEN_REG};
+static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CSQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CSQ_TAIL_REG,
+ HCLGE_COMM_NIC_CSQ_HEAD_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CRQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CRQ_TAIL_REG,
+ HCLGE_COMM_NIC_CRQ_HEAD_REG,
+ HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
+ HCLGE_COMM_VECTOR0_CMDQ_STATE_REG,
+ HCLGE_COMM_CMDQ_INTR_EN_REG,
+ HCLGE_COMM_CMDQ_INTR_GEN_REG};
static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
HCLGEVF_RST_ING,
@@ -92,109 +85,40 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
HCLGEVF_TQP_INTR_GL2_REG,
HCLGEVF_TQP_INTR_RL_REG};
-static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
+/* hclgevf_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ */
+int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num)
{
- if (!handle->client)
- return container_of(handle, struct hclgevf_dev, nic);
- else if (handle->client->type == HNAE3_CLIENT_ROCE)
- return container_of(handle, struct hclgevf_dev, roce);
- else
- return container_of(handle, struct hclgevf_dev, nic);
+ return hclge_comm_cmd_send(&hw->hw, desc, num);
}
-static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
+void hclgevf_arq_init(struct hclgevf_dev *hdev)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_desc desc;
- struct hclgevf_tqp *tqp;
- int status;
- int i;
+ struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq;
- for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
- hclgevf_cmd_setup_basic_desc(&desc,
- HCLGEVF_OPC_QUERY_RX_STATUS,
- true);
-
- desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (status) {
- dev_err(&hdev->pdev->dev,
- "Query tqp stat fail, status = %d,queue = %d\n",
- status, i);
- return status;
- }
- tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
- le32_to_cpu(desc.data[1]);
-
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
- true);
-
- desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (status) {
- dev_err(&hdev->pdev->dev,
- "Query tqp stat fail, status = %d,queue = %d\n",
- status, i);
- return status;
- }
- tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
- le32_to_cpu(desc.data[1]);
- }
-
- return 0;
+ spin_lock(&cmdq->crq.lock);
+ /* initialize the pointers of async rx queue of mailbox */
+ hdev->arq.hdev = hdev;
+ hdev->arq.head = 0;
+ hdev->arq.tail = 0;
+ atomic_set(&hdev->arq.count, 0);
+ spin_unlock(&cmdq->crq.lock);
}
-static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclgevf_tqp *tqp;
- u64 *buff = data;
- int i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
- *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
- }
- for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
- *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
- }
-
- return buff;
-}
-
-static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
-
- return kinfo->num_tqps * 2;
-}
-
-static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
+static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- u8 *buff = data;
- int i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
- struct hclgevf_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
- tqp->index);
- buff += ETH_GSTRING_LEN;
- }
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
- struct hclgevf_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
- tqp->index);
- buff += ETH_GSTRING_LEN;
- }
-
- return buff;
+ if (!handle->client)
+ return container_of(handle, struct hclgevf_dev, nic);
+ else if (handle->client->type == HNAE3_CLIENT_ROCE)
+ return container_of(handle, struct hclgevf_dev, roce);
+ else
+ return container_of(handle, struct hclgevf_dev, nic);
}
static void hclgevf_update_stats(struct hnae3_handle *handle,
@@ -203,7 +127,7 @@ static void hclgevf_update_stats(struct hnae3_handle *handle,
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int status;
- status = hclgevf_tqps_update_stats(handle);
+ status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
if (status)
dev_err(&hdev->pdev->dev,
"VF update of TQPS stats fail, status = %d.\n",
@@ -215,7 +139,7 @@ static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
if (strset == ETH_SS_TEST)
return -EOPNOTSUPP;
else if (strset == ETH_SS_STATS)
- return hclgevf_tqps_get_sset_count(handle, strset);
+ return hclge_comm_tqps_get_sset_count(handle);
return 0;
}
@@ -226,12 +150,12 @@ static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
u8 *p = (char *)data;
if (strset == ETH_SS_STATS)
- p = hclgevf_tqps_get_strings(handle, p);
+ p = hclge_comm_tqps_get_strings(handle, p);
}
static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
{
- hclgevf_tqps_get_stats(handle, data);
+ hclge_comm_tqps_get_stats(handle, data);
}
static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code,
@@ -397,11 +321,11 @@ static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
{
- struct hclgevf_tqp *tqp;
+ struct hclge_comm_tqp *tqp;
int i;
hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
- sizeof(struct hclgevf_tqp), GFP_KERNEL);
+ sizeof(struct hclge_comm_tqp), GFP_KERNEL);
if (!hdev->htqp)
return -ENOMEM;
@@ -420,11 +344,11 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
* HCLGEVF_TQP_MAX_SIZE_DEV_V2.
*/
if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2)
- tqp->q.io_base = hdev->hw.io_base +
+ tqp->q.io_base = hdev->hw.hw.io_base +
HCLGEVF_TQP_REG_OFFSET +
i * HCLGEVF_TQP_REG_SIZE;
else
- tqp->q.io_base = hdev->hw.io_base +
+ tqp->q.io_base = hdev->hw.hw.io_base +
HCLGEVF_TQP_REG_OFFSET +
HCLGEVF_TQP_EXT_REG_OFFSET +
(i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) *
@@ -448,7 +372,7 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
kinfo->num_tx_desc = hdev->num_tx_desc;
kinfo->num_rx_desc = hdev->num_rx_desc;
kinfo->rx_buf_len = hdev->rx_buf_len;
- for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
+ for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++)
if (hdev->hw_tc_map & BIT(i))
num_tc++;
@@ -539,7 +463,7 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
nic->pdev = hdev->pdev;
nic->numa_node_mask = hdev->numa_node_mask;
nic->flags |= HNAE3_SUPPORT_VF;
- nic->kinfo.io_base = hdev->hw.io_base;
+ nic->kinfo.io_base = hdev->hw.hw.io_base;
ret = hclgevf_knic_setup(hdev);
if (ret)
@@ -576,7 +500,7 @@ static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
vector->vector = pci_irq_vector(hdev->pdev, i);
- vector->io_addr = hdev->hw.io_base +
+ vector->io_addr = hdev->hw.hw.io_base +
HCLGEVF_VECTOR_REG_BASE +
(i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
hdev->vector_status[i] = 0;
@@ -606,137 +530,11 @@ static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
return -EINVAL;
}
-static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
- const u8 hfunc, const u8 *key)
-{
- struct hclgevf_rss_config_cmd *req;
- unsigned int key_offset = 0;
- struct hclgevf_desc desc;
- int key_counts;
- int key_size;
- int ret;
-
- key_counts = HCLGEVF_RSS_KEY_SIZE;
- req = (struct hclgevf_rss_config_cmd *)desc.data;
-
- while (key_counts) {
- hclgevf_cmd_setup_basic_desc(&desc,
- HCLGEVF_OPC_RSS_GENERIC_CONFIG,
- false);
-
- req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK);
- req->hash_config |=
- (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
-
- key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts);
- memcpy(req->hash_key,
- key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
-
- key_counts -= key_size;
- key_offset++;
- ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Configure RSS config fail, status = %d\n",
- ret);
- return ret;
- }
- }
-
- return 0;
-}
-
-static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
-{
- return HCLGEVF_RSS_KEY_SIZE;
-}
-
-static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
-{
- const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
- struct hclgevf_rss_indirection_table_cmd *req;
- struct hclgevf_desc desc;
- int rss_cfg_tbl_num;
- int status;
- int i, j;
-
- req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
- rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
- HCLGEVF_RSS_CFG_TBL_SIZE;
-
- for (i = 0; i < rss_cfg_tbl_num; i++) {
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
- false);
- req->start_table_index =
- cpu_to_le16(i * HCLGEVF_RSS_CFG_TBL_SIZE);
- req->rss_set_bitmap = cpu_to_le16(HCLGEVF_RSS_SET_BITMAP_MSK);
- for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
- req->rss_result[j] =
- indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
-
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (status) {
- dev_err(&hdev->pdev->dev,
- "VF failed(=%d) to set RSS indirection table\n",
- status);
- return status;
- }
- }
-
- return 0;
-}
-
-static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
-{
- struct hclgevf_rss_tc_mode_cmd *req;
- u16 tc_offset[HCLGEVF_MAX_TC_NUM];
- u16 tc_valid[HCLGEVF_MAX_TC_NUM];
- u16 tc_size[HCLGEVF_MAX_TC_NUM];
- struct hclgevf_desc desc;
- u16 roundup_size;
- unsigned int i;
- int status;
-
- req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
-
- roundup_size = roundup_pow_of_two(rss_size);
- roundup_size = ilog2(roundup_size);
-
- for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
- tc_valid[i] = 1;
- tc_size[i] = roundup_size;
- tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0;
- }
-
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
- for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
- u16 mode = 0;
-
- hnae3_set_bit(mode, HCLGEVF_RSS_TC_VALID_B,
- (tc_valid[i] & 0x1));
- hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M,
- HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
- hnae3_set_bit(mode, HCLGEVF_RSS_TC_SIZE_MSB_B,
- tc_size[i] >> HCLGEVF_RSS_TC_SIZE_MSB_OFFSET &
- 0x1);
- hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M,
- HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
-
- req->rss_tc_mode[i] = cpu_to_le16(mode);
- }
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (status)
- dev_err(&hdev->pdev->dev,
- "VF failed(=%d) to set rss tc mode\n", status);
-
- return status;
-}
-
/* for revision 0x20, vf shared the same rss config with pf */
static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RSS_MBX_RESP_LEN 8
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN];
struct hclge_vf_to_pf_msg send_msg;
u16 msg_num, hash_key_index;
@@ -744,7 +542,7 @@ static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
int ret;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0);
- msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
+ msg_num = (HCLGE_COMM_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
HCLGEVF_RSS_MBX_RESP_LEN;
for (index = 0; index < msg_num; index++) {
send_msg.data[0] = index;
@@ -761,7 +559,7 @@ static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
if (index == msg_num - 1)
memcpy(&rss_cfg->rss_hash_key[hash_key_index],
&resp_msg[0],
- HCLGEVF_RSS_KEY_SIZE - hash_key_index);
+ HCLGE_COMM_RSS_KEY_SIZE - hash_key_index);
else
memcpy(&rss_cfg->rss_hash_key[hash_key_index],
&resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN);
@@ -774,29 +572,11 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
u8 *hfunc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- int i, ret;
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int ret;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
- /* Get hash algorithm */
- if (hfunc) {
- switch (rss_cfg->hash_algo) {
- case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ:
- *hfunc = ETH_RSS_HASH_TOP;
- break;
- case HCLGEVF_RSS_HASH_ALGO_SIMPLE:
- *hfunc = ETH_RSS_HASH_XOR;
- break;
- default:
- *hfunc = ETH_RSS_HASH_UNKNOWN;
- break;
- }
- }
-
- /* Get the RSS Key required by the user */
- if (key)
- memcpy(key, rss_cfg->rss_hash_key,
- HCLGEVF_RSS_KEY_SIZE);
+ hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
} else {
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
@@ -805,67 +585,28 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
if (ret)
return ret;
memcpy(key, rss_cfg->rss_hash_key,
- HCLGEVF_RSS_KEY_SIZE);
+ HCLGE_COMM_RSS_KEY_SIZE);
}
}
- if (indir)
- for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
- indir[i] = rss_cfg->rss_indirection_tbl[i];
+ hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
+ hdev->ae_dev->dev_specs.rss_ind_tbl_size);
return 0;
}
-static int hclgevf_parse_rss_hfunc(struct hclgevf_dev *hdev, const u8 hfunc,
- u8 *hash_algo)
-{
- switch (hfunc) {
- case ETH_RSS_HASH_TOP:
- *hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
- return 0;
- case ETH_RSS_HASH_XOR:
- *hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
- return 0;
- case ETH_RSS_HASH_NO_CHANGE:
- *hash_algo = hdev->rss_cfg.hash_algo;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- u8 hash_algo;
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
int ret, i;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
- ret = hclgevf_parse_rss_hfunc(hdev, hfunc, &hash_algo);
+ ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key,
+ hfunc);
if (ret)
return ret;
-
- /* Set the RSS Hash Key if specififed by the user */
- if (key) {
- ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "invalid hfunc type %u\n", hfunc);
- return ret;
- }
-
- /* Update the shadow RSS key with user specified qids */
- memcpy(rss_cfg->rss_hash_key, key,
- HCLGEVF_RSS_KEY_SIZE);
- } else {
- ret = hclgevf_set_rss_algo_key(hdev, hash_algo,
- rss_cfg->rss_hash_key);
- if (ret)
- return ret;
- }
- rss_cfg->hash_algo = hash_algo;
}
/* update the shadow RSS table with user specified qids */
@@ -873,179 +614,26 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
rss_cfg->rss_indirection_tbl[i] = indir[i];
/* update the hardware */
- return hclgevf_set_rss_indir_table(hdev);
-}
-
-static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
-{
- u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0;
-
- if (nfc->data & RXH_L4_B_2_3)
- hash_sets |= HCLGEVF_D_PORT_BIT;
- else
- hash_sets &= ~HCLGEVF_D_PORT_BIT;
-
- if (nfc->data & RXH_IP_SRC)
- hash_sets |= HCLGEVF_S_IP_BIT;
- else
- hash_sets &= ~HCLGEVF_S_IP_BIT;
-
- if (nfc->data & RXH_IP_DST)
- hash_sets |= HCLGEVF_D_IP_BIT;
- else
- hash_sets &= ~HCLGEVF_D_IP_BIT;
-
- if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
- hash_sets |= HCLGEVF_V_TAG_BIT;
-
- return hash_sets;
-}
-
-static int hclgevf_init_rss_tuple_cmd(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc,
- struct hclgevf_rss_input_tuple_cmd *req)
-{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- u8 tuple_sets;
-
- req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
- req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
- req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
- req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
- req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
- req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
- req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
- req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
-
- tuple_sets = hclgevf_get_rss_hash_bits(nfc);
- switch (nfc->flow_type) {
- case TCP_V4_FLOW:
- req->ipv4_tcp_en = tuple_sets;
- break;
- case TCP_V6_FLOW:
- req->ipv6_tcp_en = tuple_sets;
- break;
- case UDP_V4_FLOW:
- req->ipv4_udp_en = tuple_sets;
- break;
- case UDP_V6_FLOW:
- req->ipv6_udp_en = tuple_sets;
- break;
- case SCTP_V4_FLOW:
- req->ipv4_sctp_en = tuple_sets;
- break;
- case SCTP_V6_FLOW:
- if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
- (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
- return -EINVAL;
-
- req->ipv6_sctp_en = tuple_sets;
- break;
- case IPV4_FLOW:
- req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- break;
- case IPV6_FLOW:
- req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
+ return hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
+ rss_cfg->rss_indirection_tbl);
}
static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
struct ethtool_rxnfc *nfc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- struct hclgevf_rss_input_tuple_cmd *req;
- struct hclgevf_desc desc;
int ret;
if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP;
- if (nfc->data &
- ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
-
- ret = hclgevf_init_rss_tuple_cmd(handle, nfc, req);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to init rss tuple cmd, ret = %d\n", ret);
- return ret;
- }
-
- ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
+ &hdev->rss_cfg, nfc);
+ if (ret)
dev_err(&hdev->pdev->dev,
- "Set rss tuple fail, status = %d\n", ret);
- return ret;
- }
+ "failed to set rss tuple, ret = %d.\n", ret);
- rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
- rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
- rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
- rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
- rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
- rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
- rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
- rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
- return 0;
-}
-
-static int hclgevf_get_rss_tuple_by_flow_type(struct hclgevf_dev *hdev,
- int flow_type, u8 *tuple_sets)
-{
- switch (flow_type) {
- case TCP_V4_FLOW:
- *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_tcp_en;
- break;
- case UDP_V4_FLOW:
- *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_udp_en;
- break;
- case TCP_V6_FLOW:
- *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_tcp_en;
- break;
- case UDP_V6_FLOW:
- *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_udp_en;
- break;
- case SCTP_V4_FLOW:
- *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_sctp_en;
- break;
- case SCTP_V6_FLOW:
- *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_sctp_en;
- break;
- case IPV4_FLOW:
- case IPV6_FLOW:
- *tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static u64 hclgevf_convert_rss_tuple(u8 tuple_sets)
-{
- u64 tuple_data = 0;
-
- if (tuple_sets & HCLGEVF_D_PORT_BIT)
- tuple_data |= RXH_L4_B_2_3;
- if (tuple_sets & HCLGEVF_S_PORT_BIT)
- tuple_data |= RXH_L4_B_0_1;
- if (tuple_sets & HCLGEVF_D_IP_BIT)
- tuple_data |= RXH_IP_DST;
- if (tuple_sets & HCLGEVF_S_IP_BIT)
- tuple_data |= RXH_IP_SRC;
-
- return tuple_data;
+ return ret;
}
static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
@@ -1060,47 +648,20 @@ static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
nfc->data = 0;
- ret = hclgevf_get_rss_tuple_by_flow_type(hdev, nfc->flow_type,
- &tuple_sets);
+ ret = hclge_comm_get_rss_tuple(&hdev->rss_cfg, nfc->flow_type,
+ &tuple_sets);
if (ret || !tuple_sets)
return ret;
- nfc->data = hclgevf_convert_rss_tuple(tuple_sets);
+ nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
return 0;
}
-static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev,
- struct hclgevf_rss_cfg *rss_cfg)
-{
- struct hclgevf_rss_input_tuple_cmd *req;
- struct hclgevf_desc desc;
- int ret;
-
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
-
- req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
-
- req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
- req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
- req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
- req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
- req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
- req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
- req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
- req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
-
- ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Configure rss input fail, status = %d\n", ret);
- return ret;
-}
-
static int hclgevf_get_tc_size(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
return rss_cfg->rss_size;
}
@@ -1273,12 +834,11 @@ static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id,
u16 stream_id, bool enable)
{
struct hclgevf_cfg_com_tqp_queue_cmd *req;
- struct hclgevf_desc desc;
+ struct hclge_desc desc;
req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
- false);
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
req->stream_id = cpu_to_le16(stream_id);
if (enable)
@@ -1302,18 +862,6 @@ static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable)
return 0;
}
-static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclgevf_tqp *tqp;
- int i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
- memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
- }
-}
-
static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
{
struct hclge_vf_to_pf_msg send_msg;
@@ -1514,15 +1062,18 @@ static void hclgevf_config_mac_list(struct hclgevf_dev *hdev,
struct list_head *list,
enum HCLGEVF_MAC_ADDR_TYPE mac_type)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclgevf_mac_addr_node *mac_node, *tmp;
int ret;
list_for_each_entry_safe(mac_node, tmp, list, node) {
ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type);
if (ret) {
+ hnae3_format_mac_addr(format_mac_addr,
+ mac_node->mac_addr);
dev_err(&hdev->pdev->dev,
- "failed to configure mac %pM, state = %d, ret = %d\n",
- mac_node->mac_addr, mac_node->state, ret);
+ "failed to configure mac %s, state = %d, ret = %d\n",
+ format_mac_addr, mac_node->state, ret);
return;
}
if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
@@ -1859,13 +1410,13 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
int ret;
if (hdev->reset_type == HNAE3_VF_RESET)
- ret = readl_poll_timeout(hdev->hw.io_base +
+ ret = readl_poll_timeout(hdev->hw.hw.io_base +
HCLGEVF_VF_RST_ING, val,
!(val & HCLGEVF_VF_RST_ING_BIT),
HCLGEVF_RESET_WAIT_US,
HCLGEVF_RESET_WAIT_TIMEOUT_US);
else
- ret = readl_poll_timeout(hdev->hw.io_base +
+ ret = readl_poll_timeout(hdev->hw.hw.io_base +
HCLGEVF_RST_ING, val,
!(val & HCLGEVF_RST_ING_BITS),
HCLGEVF_RESET_WAIT_US,
@@ -1891,13 +1442,13 @@ static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable)
{
u32 reg_val;
- reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
+ reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
if (enable)
reg_val |= HCLGEVF_NIC_SW_RST_RDY;
else
reg_val &= ~HCLGEVF_NIC_SW_RST_RDY;
- hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
+ hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG,
reg_val);
}
@@ -1948,7 +1499,7 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
hdev->rst_stats.vf_func_rst_cnt++;
}
- set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
/* inform hardware that preparatory work is done */
msleep(HCLGEVF_RESET_SYNC_TIME);
hclgevf_reset_handshake(hdev, true);
@@ -1977,9 +1528,9 @@ static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
- hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG));
+ hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG));
dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
- hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG));
+ hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG));
dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
@@ -2163,24 +1714,20 @@ static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
int retry_cnt = 0;
int ret;
-retry:
- down(&hdev->reset_sem);
- set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
- hdev->reset_type = rst_type;
- ret = hclgevf_reset_prepare(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
- ret);
- if (hdev->reset_pending ||
- retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) {
- dev_err(&hdev->pdev->dev,
- "reset_pending:0x%lx, retry_cnt:%d\n",
- hdev->reset_pending, retry_cnt);
- clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
- up(&hdev->reset_sem);
- msleep(HCLGEVF_RESET_RETRY_WAIT_MS);
- goto retry;
- }
+ while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) {
+ down(&hdev->reset_sem);
+ set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ hdev->reset_type = rst_type;
+ ret = hclgevf_reset_prepare(hdev);
+ if (!ret && !hdev->reset_pending)
+ break;
+
+ dev_err(&hdev->pdev->dev,
+ "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
+ ret, hdev->reset_pending, retry_cnt);
+ clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+ msleep(HCLGEVF_RESET_RETRY_WAIT_MS);
}
/* disable misc vector before reset done */
@@ -2220,7 +1767,7 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
vector->vector_irq = pci_irq_vector(hdev->pdev,
HCLGEVF_MISC_VECTOR_NUM);
- vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
+ vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
/* vector status always valid for Vector 0 */
hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
@@ -2341,7 +1888,7 @@ static void hclgevf_keep_alive(struct hclgevf_dev *hdev)
struct hclge_vf_to_pf_msg send_msg;
int ret;
- if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
return;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0);
@@ -2378,7 +1925,7 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
}
if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL))
- hclgevf_tqps_update_stats(handle);
+ hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
/* VF does not need to request link status when this bit is set, because
* PF will push its link status to VFs when link status changed.
@@ -2419,7 +1966,7 @@ static void hclgevf_service_task(struct work_struct *work)
static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
{
- hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
+ hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr);
}
static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
@@ -2429,14 +1976,14 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
/* fetch the events from their corresponding regs */
cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
- HCLGEVF_VECTOR0_CMDQ_STATE_REG);
+ HCLGE_COMM_VECTOR0_CMDQ_STATE_REG);
if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
dev_info(&hdev->pdev->dev,
"receive reset interrupt 0x%x!\n", rst_ing_reg);
set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
- set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
hdev->rst_stats.vf_rst_cnt++;
/* set up VF hardware reset status, its PF will clear
@@ -2496,8 +2043,7 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
break;
}
- if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
- hclgevf_enable_vector(&hdev->misc_vector, true);
+ hclgevf_enable_vector(&hdev->misc_vector, true);
return IRQ_HANDLED;
}
@@ -2560,8 +2106,8 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
roce->rinfo.base_vector = hdev->roce_base_msix_offset;
roce->rinfo.netdev = nic->kinfo.netdev;
- roce->rinfo.roce_io_base = hdev->hw.io_base;
- roce->rinfo.roce_mem_base = hdev->hw.mem_base;
+ roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
+ roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
roce->pdev = nic->pdev;
roce->ae_algo = nic->ae_algo;
@@ -2573,13 +2119,13 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
static int hclgevf_config_gro(struct hclgevf_dev *hdev)
{
struct hclgevf_cfg_gro_status_cmd *req;
- struct hclgevf_desc desc;
+ struct hclge_desc desc;
int ret;
if (!hnae3_dev_gro_supported(hdev))
return 0;
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG,
false);
req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
@@ -2593,71 +2139,37 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev)
return ret;
}
-static int hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
-{
- u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- struct hclgevf_rss_tuple_cfg *tuple_sets;
- u32 i;
-
- rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
- rss_cfg->rss_size = hdev->nic.kinfo.rss_size;
- tuple_sets = &rss_cfg->rss_tuple_sets;
- if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
- u8 *rss_ind_tbl;
-
- rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
-
- rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
- sizeof(*rss_ind_tbl), GFP_KERNEL);
- if (!rss_ind_tbl)
- return -ENOMEM;
-
- rss_cfg->rss_indirection_tbl = rss_ind_tbl;
- memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
- HCLGEVF_RSS_KEY_SIZE);
-
- tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
- tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- tuple_sets->ipv6_sctp_en =
- hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
- HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT :
- HCLGEVF_RSS_INPUT_TUPLE_SCTP;
- tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- }
-
- /* Initialize RSS indirect table */
- for (i = 0; i < rss_ind_tbl_size; i++)
- rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
-
- return 0;
-}
-
static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
{
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ u16 tc_offset[HCLGE_COMM_MAX_TC_NUM];
+ u16 tc_valid[HCLGE_COMM_MAX_TC_NUM];
+ u16 tc_size[HCLGE_COMM_MAX_TC_NUM];
int ret;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
- ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
- rss_cfg->rss_hash_key);
+ ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw,
+ rss_cfg->rss_algo,
+ rss_cfg->rss_hash_key);
if (ret)
return ret;
- ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
+ ret = hclge_comm_set_rss_input_tuple(&hdev->nic, &hdev->hw.hw,
+ false, rss_cfg);
if (ret)
return ret;
}
- ret = hclgevf_set_rss_indir_table(hdev);
+ ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
+ rss_cfg->rss_indirection_tbl);
if (ret)
return ret;
- return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size);
+ hclge_comm_get_rss_tc_info(rss_cfg->rss_size, hdev->hw_tc_map,
+ tc_offset, tc_valid, tc_size);
+
+ return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
+ tc_valid, tc_size);
}
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
@@ -2711,7 +2223,7 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state);
- hclgevf_reset_tqp_stats(handle);
+ hclge_comm_reset_tqp_stats(handle);
hclgevf_request_link_info(hdev);
@@ -2729,7 +2241,7 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
if (hdev->reset_type != HNAE3_VF_RESET)
hclgevf_reset_tqp(handle);
- hclgevf_reset_tqp_stats(handle);
+ hclge_comm_reset_tqp_stats(handle);
hclgevf_update_link_status(hdev, 0);
}
@@ -3043,11 +2555,11 @@ static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev)
if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR)))
return 0;
- hw->mem_base = devm_ioremap_wc(&pdev->dev,
- pci_resource_start(pdev,
- HCLGEVF_MEM_BAR),
- pci_resource_len(pdev, HCLGEVF_MEM_BAR));
- if (!hw->mem_base) {
+ hw->hw.mem_base =
+ devm_ioremap_wc(&pdev->dev,
+ pci_resource_start(pdev, HCLGEVF_MEM_BAR),
+ pci_resource_len(pdev, HCLGEVF_MEM_BAR));
+ if (!hw->hw.mem_base) {
dev_err(&pdev->dev, "failed to map device memory\n");
return -EFAULT;
}
@@ -3081,9 +2593,8 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
pci_set_master(pdev);
hw = &hdev->hw;
- hw->hdev = hdev;
- hw->io_base = pci_iomap(pdev, 2, 0);
- if (!hw->io_base) {
+ hw->hw.io_base = pci_iomap(pdev, 2, 0);
+ if (!hw->hw.io_base) {
dev_err(&pdev->dev, "can't map configuration register space\n");
ret = -ENOMEM;
goto err_clr_master;
@@ -3096,7 +2607,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
return 0;
err_unmap_io_base:
- pci_iounmap(pdev, hdev->hw.io_base);
+ pci_iounmap(pdev, hdev->hw.hw.io_base);
err_clr_master:
pci_clear_master(pdev);
pci_release_regions(pdev);
@@ -3110,10 +2621,10 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
- if (hdev->hw.mem_base)
- devm_iounmap(&pdev->dev, hdev->hw.mem_base);
+ if (hdev->hw.hw.mem_base)
+ devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
- pci_iounmap(pdev, hdev->hw.io_base);
+ pci_iounmap(pdev, hdev->hw.hw.io_base);
pci_clear_master(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -3122,10 +2633,10 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
{
struct hclgevf_query_res_cmd *req;
- struct hclgevf_desc desc;
+ struct hclge_desc desc;
int ret;
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true);
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RSRC, true);
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -3179,13 +2690,13 @@ static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
ae_dev->dev_specs.max_non_tso_bd_num =
HCLGEVF_MAX_NON_TSO_BD_NUM;
ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
- ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE;
+ ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME;
}
static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
- struct hclgevf_desc *desc)
+ struct hclge_desc *desc)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclgevf_dev_specs_0_cmd *req0;
@@ -3212,7 +2723,7 @@ static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
if (!dev_specs->rss_ind_tbl_size)
dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
if (!dev_specs->rss_key_size)
- dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE;
+ dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
if (!dev_specs->max_int_gl)
dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
if (!dev_specs->max_frm_size)
@@ -3221,7 +2732,7 @@ static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
{
- struct hclgevf_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM];
+ struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM];
int ret;
int i;
@@ -3235,11 +2746,10 @@ static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
hclgevf_cmd_setup_basic_desc(&desc[i],
- HCLGEVF_OPC_QUERY_DEV_SPECS, true);
- desc[i].flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_NEXT);
+ HCLGE_OPC_QUERY_DEV_SPECS, true);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
}
- hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS,
- true);
+ hclgevf_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM);
if (ret)
@@ -3318,7 +2828,10 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
return ret;
}
- ret = hclgevf_cmd_init(hdev);
+ hclgevf_arq_init(hdev);
+ ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
+ &hdev->fw_version, false,
+ hdev->reset_pending);
if (ret) {
dev_err(&pdev->dev, "cmd failed %d\n", ret);
return ret;
@@ -3364,11 +2877,14 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret)
goto err_devlink_init;
- ret = hclgevf_cmd_queue_init(hdev);
+ ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
if (ret)
goto err_cmd_queue_init;
- ret = hclgevf_cmd_init(hdev);
+ hclgevf_arq_init(hdev);
+ ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
+ &hdev->fw_version, false,
+ hdev->reset_pending);
if (ret)
goto err_cmd_init;
@@ -3421,7 +2937,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
/* Initialize RSS for this VF */
- ret = hclgevf_rss_init_cfg(hdev);
+ ret = hclge_comm_rss_init_cfg(&hdev->nic, hdev->ae_dev,
+ &hdev->rss_cfg);
if (ret) {
dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
goto err_config;
@@ -3468,7 +2985,7 @@ err_misc_irq_init:
hclgevf_state_uninit(hdev);
hclgevf_uninit_msi(hdev);
err_cmd_init:
- hclgevf_cmd_uninit(hdev);
+ hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
err_cmd_queue_init:
hclgevf_devlink_uninit(hdev);
err_devlink_init:
@@ -3492,7 +3009,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
hclgevf_uninit_msi(hdev);
}
- hclgevf_cmd_uninit(hdev);
+ hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
hclgevf_devlink_uninit(hdev);
hclgevf_pci_uninit(hdev);
hclgevf_uninit_mac_list(hdev);
@@ -3595,6 +3112,9 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ u16 tc_offset[HCLGE_COMM_MAX_TC_NUM];
+ u16 tc_valid[HCLGE_COMM_MAX_TC_NUM];
+ u16 tc_size[HCLGE_COMM_MAX_TC_NUM];
u16 cur_rss_size = kinfo->rss_size;
u16 cur_tqps = kinfo->num_tqps;
u32 *rss_indir;
@@ -3603,7 +3123,10 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
hclgevf_update_rss_size(handle, new_tqps_num);
- ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size);
+ hclge_comm_get_rss_tc_info(cur_rss_size, hdev->hw_tc_map,
+ tc_offset, tc_valid, tc_size);
+ ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
+ tc_valid, tc_size);
if (ret)
return ret;
@@ -3704,7 +3227,7 @@ static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- return test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
}
static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
@@ -3862,7 +3385,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.update_stats = hclgevf_update_stats,
.get_strings = hclgevf_get_strings,
.get_sset_count = hclgevf_get_sset_count,
- .get_rss_key_size = hclgevf_get_rss_key_size,
+ .get_rss_key_size = hclge_comm_get_rss_key_size,
.get_rss = hclgevf_get_rss,
.set_rss = hclgevf_set_rss,
.get_rss_tuple = hclgevf_get_rss_tuple,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index f6f736c0091c..502ca1ce1a90 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -10,6 +10,8 @@
#include "hclge_mbx.h"
#include "hclgevf_cmd.h"
#include "hnae3.h"
+#include "hclge_comm_rss.h"
+#include "hclge_comm_tqp_stats.h"
#define HCLGEVF_MOD_VERSION "1.0"
#define HCLGEVF_DRIVER_NAME "hclgevf"
@@ -32,21 +34,6 @@
#define HCLGEVF_VECTOR_REG_OFFSET 0x4
#define HCLGEVF_VECTOR_VF_OFFSET 0x100000
-/* bar registers for cmdq */
-#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000
-#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004
-#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008
-#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010
-#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014
-#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018
-#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701C
-#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020
-#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024
-#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028
-
-#define HCLGEVF_CMDQ_INTR_EN_REG 0x27108
-#define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C
-
/* bar registers for common func */
#define HCLGEVF_GRO_EN_REG 0x28000
#define HCLGEVF_RXD_ADV_LAYOUT_EN_REG 0x28008
@@ -86,10 +73,6 @@
#define HCLGEVF_TQP_INTR_GL2_REG 0x20300
#define HCLGEVF_TQP_INTR_RL_REG 0x20900
-/* Vector0 interrupt CMDQ event source register(RW) */
-#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
-/* Vector0 interrupt CMDQ event status register(RO) */
-#define HCLGEVF_VECTOR0_CMDQ_STATE_REG 0x27104
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
/* RST register bits for RESET event */
@@ -112,27 +95,16 @@
#define HCLGEVF_WAIT_RESET_DONE 100
#define HCLGEVF_RSS_IND_TBL_SIZE 512
-#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
-#define HCLGEVF_RSS_KEY_SIZE 40
-#define HCLGEVF_RSS_HASH_ALGO_TOEPLITZ 0
-#define HCLGEVF_RSS_HASH_ALGO_SIMPLE 1
-#define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC 2
-#define HCLGEVF_RSS_HASH_ALGO_MASK 0xf
-
-#define HCLGEVF_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
-#define HCLGEVF_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
-#define HCLGEVF_D_PORT_BIT BIT(0)
-#define HCLGEVF_S_PORT_BIT BIT(1)
-#define HCLGEVF_D_IP_BIT BIT(2)
-#define HCLGEVF_S_IP_BIT BIT(3)
-#define HCLGEVF_V_TAG_BIT BIT(4)
-#define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT \
- (HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT)
#define HCLGEVF_MAC_MAX_FRAME 9728
#define HCLGEVF_STATS_TIMER_INTERVAL 36U
+#define hclgevf_read_dev(a, reg) \
+ hclge_comm_read_reg((a)->hw.io_base, reg)
+#define hclgevf_write_dev(a, reg, value) \
+ hclge_comm_write_reg((a)->hw.io_base, reg, value)
+
enum hclgevf_evt_cause {
HCLGEVF_VECTOR0_EVENT_RST,
HCLGEVF_VECTOR0_EVENT_MBX,
@@ -154,7 +126,6 @@ enum hclgevf_states {
HCLGEVF_STATE_RST_HANDLING,
HCLGEVF_STATE_MBX_SERVICE_SCHED,
HCLGEVF_STATE_MBX_HANDLING,
- HCLGEVF_STATE_CMD_DISABLE,
HCLGEVF_STATE_LINK_UPDATING,
HCLGEVF_STATE_PROMISC_CHANGED,
HCLGEVF_STATE_RST_FAIL,
@@ -173,29 +144,9 @@ struct hclgevf_mac {
};
struct hclgevf_hw {
- void __iomem *io_base;
- void __iomem *mem_base;
+ struct hclge_comm_hw hw;
int num_vec;
- struct hclgevf_cmq cmq;
struct hclgevf_mac mac;
- void *hdev; /* hchgevf device it is part of */
-};
-
-/* TQP stats */
-struct hlcgevf_tqp_stats {
- /* query_tqp_tx_queue_statistics, opcode id: 0x0B03 */
- u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
- /* query_tqp_rx_queue_statistics, opcode id: 0x0B13 */
- u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
-};
-
-struct hclgevf_tqp {
- struct device *dev; /* device for DMA mapping */
- struct hnae3_queue q;
- struct hlcgevf_tqp_stats tqp_stats;
- u16 index; /* global index in a NIC controller */
-
- bool alloced;
};
struct hclgevf_cfg {
@@ -208,27 +159,6 @@ struct hclgevf_cfg {
u32 numa_node_map;
};
-struct hclgevf_rss_tuple_cfg {
- u8 ipv4_tcp_en;
- u8 ipv4_udp_en;
- u8 ipv4_sctp_en;
- u8 ipv4_fragment_en;
- u8 ipv6_tcp_en;
- u8 ipv6_udp_en;
- u8 ipv6_sctp_en;
- u8 ipv6_fragment_en;
-};
-
-struct hclgevf_rss_cfg {
- u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */
- u32 hash_algo;
- u32 rss_size;
- u8 hw_tc_map;
- /* shadow table */
- u8 *rss_indirection_tbl;
- struct hclgevf_rss_tuple_cfg rss_tuple_sets;
-};
-
struct hclgevf_misc_vector {
u8 __iomem *addr;
int vector_irq;
@@ -273,7 +203,7 @@ struct hclgevf_dev {
struct hnae3_ae_dev *ae_dev;
struct hclgevf_hw hw;
struct hclgevf_misc_vector misc_vector;
- struct hclgevf_rss_cfg rss_cfg;
+ struct hclge_comm_rss_cfg rss_cfg;
unsigned long state;
unsigned long flr_state;
unsigned long default_reset_request;
@@ -324,7 +254,7 @@ struct hclgevf_dev {
struct delayed_work service_task;
- struct hclgevf_tqp *htqp;
+ struct hclge_comm_tqp *htqp;
struct hnae3_handle nic;
struct hnae3_handle roce;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index c5ac6ecf36e1..d5e0a3f762f7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -53,7 +53,8 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
}
while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
- if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
+ &hdev->hw.hw.comm_state))
return -EIO;
usleep_range(HCLGEVF_SLEEP_USECOND, HCLGEVF_SLEEP_USECOND * 2);
@@ -97,7 +98,7 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
u8 *resp_data, u16 resp_len)
{
struct hclge_mbx_vf_to_pf_cmd *req;
- struct hclgevf_desc desc;
+ struct hclge_desc desc;
int status;
req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
@@ -151,9 +152,9 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw)
{
- u32 tail = hclgevf_read_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG);
+ u32 tail = hclgevf_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
- return tail == hw->cmq.crq.next_to_use;
+ return tail == hw->hw.cmq.crq.next_to_use;
}
static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
@@ -212,14 +213,15 @@ static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev,
void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
{
struct hclge_mbx_pf_to_vf_cmd *req;
- struct hclgevf_cmq_ring *crq;
- struct hclgevf_desc *desc;
+ struct hclge_comm_cmq_ring *crq;
+ struct hclge_desc *desc;
u16 flag;
- crq = &hdev->hw.cmq.crq;
+ crq = &hdev->hw.hw.cmq.crq;
while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
- if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
+ &hdev->hw.hw.comm_state)) {
dev_info(&hdev->pdev->dev, "vf crq need init\n");
return;
}
@@ -269,7 +271,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
}
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
- hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CRQ_HEAD_REG,
+ hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CRQ_HEAD_REG,
crq->next_to_use);
}
@@ -296,7 +298,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
/* process all the async queue messages */
while (tail != hdev->arq.head) {
- if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
+ &hdev->hw.hw.comm_state)) {
dev_info(&hdev->pdev->dev,
"vf crq need init in async\n");
return;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index a85667078b72..93192f58ac88 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -547,7 +547,9 @@ static void hinic_get_drvinfo(struct net_device *netdev,
}
static void hinic_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
@@ -580,7 +582,9 @@ static int check_ringparam_valid(struct hinic_dev *nic_dev,
}
static int hinic_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
u16 new_sq_depth, new_rq_depth;
@@ -1205,8 +1209,6 @@ static u32 hinic_get_rxfh_indir_size(struct net_device *netdev)
return HINIC_RSS_INDIR_SIZE;
}
-#define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof(arr[0])))
-
#define HINIC_FUNC_STAT(_stat_item) { \
.name = #_stat_item, \
.size = sizeof_field(struct hinic_vport_stats, _stat_item), \
@@ -1374,7 +1376,7 @@ static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data)
break;
hinic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats);
- for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++, i++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++, i++) {
p = (char *)&txq_stats +
hinic_tx_queue_stats[j].offset;
data[i] = (hinic_tx_queue_stats[j].size ==
@@ -1387,7 +1389,7 @@ static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data)
break;
hinic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats);
- for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++, i++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++, i++) {
p = (char *)&rxq_stats +
hinic_rx_queue_stats[j].offset;
data[i] = (hinic_rx_queue_stats[j].size ==
@@ -1411,7 +1413,7 @@ static void hinic_get_ethtool_stats(struct net_device *netdev,
netif_err(nic_dev, drv, netdev,
"Failed to get vport stats from firmware\n");
- for (j = 0; j < ARRAY_LEN(hinic_function_stats); j++, i++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_function_stats); j++, i++) {
p = (char *)&vport_stats + hinic_function_stats[j].offset;
data[i] = (hinic_function_stats[j].size ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
@@ -1420,8 +1422,8 @@ static void hinic_get_ethtool_stats(struct net_device *netdev,
port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL);
if (!port_stats) {
memset(&data[i], 0,
- ARRAY_LEN(hinic_port_stats) * sizeof(*data));
- i += ARRAY_LEN(hinic_port_stats);
+ ARRAY_SIZE(hinic_port_stats) * sizeof(*data));
+ i += ARRAY_SIZE(hinic_port_stats);
goto get_drv_stats;
}
@@ -1430,7 +1432,7 @@ static void hinic_get_ethtool_stats(struct net_device *netdev,
netif_err(nic_dev, drv, netdev,
"Failed to get port stats from firmware\n");
- for (j = 0; j < ARRAY_LEN(hinic_port_stats); j++, i++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_port_stats); j++, i++) {
p = (char *)port_stats + hinic_port_stats[j].offset;
data[i] = (hinic_port_stats[j].size ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
@@ -1449,14 +1451,14 @@ static int hinic_get_sset_count(struct net_device *netdev, int sset)
switch (sset) {
case ETH_SS_TEST:
- return ARRAY_LEN(hinic_test_strings);
+ return ARRAY_SIZE(hinic_test_strings);
case ETH_SS_STATS:
q_num = nic_dev->num_qps;
- count = ARRAY_LEN(hinic_function_stats) +
- (ARRAY_LEN(hinic_tx_queue_stats) +
- ARRAY_LEN(hinic_rx_queue_stats)) * q_num;
+ count = ARRAY_SIZE(hinic_function_stats) +
+ (ARRAY_SIZE(hinic_tx_queue_stats) +
+ ARRAY_SIZE(hinic_rx_queue_stats)) * q_num;
- count += ARRAY_LEN(hinic_port_stats);
+ count += ARRAY_SIZE(hinic_port_stats);
return count;
default:
@@ -1476,27 +1478,27 @@ static void hinic_get_strings(struct net_device *netdev,
memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings));
return;
case ETH_SS_STATS:
- for (i = 0; i < ARRAY_LEN(hinic_function_stats); i++) {
+ for (i = 0; i < ARRAY_SIZE(hinic_function_stats); i++) {
memcpy(p, hinic_function_stats[i].name,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < ARRAY_LEN(hinic_port_stats); i++) {
+ for (i = 0; i < ARRAY_SIZE(hinic_port_stats); i++) {
memcpy(p, hinic_port_stats[i].name,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < nic_dev->num_qps; i++) {
- for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++) {
sprintf(p, hinic_tx_queue_stats[j].name, i);
p += ETH_GSTRING_LEN;
}
}
for (i = 0; i < nic_dev->num_qps; i++) {
- for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++) {
sprintf(p, hinic_rx_queue_stats[j].name, i);
p += ETH_GSTRING_LEN;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
index 06586173add7..998717f02136 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
@@ -814,7 +814,6 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain,
{
struct hinic_hwif *hwif = attr->hwif;
struct pci_dev *pdev = hwif->pdev;
- size_t cell_ctxt_size;
chain->hwif = hwif;
chain->chain_type = attr->chain_type;
@@ -826,8 +825,8 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain,
sema_init(&chain->sem, 1);
- cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt);
- chain->cell_ctxt = devm_kzalloc(&pdev->dev, cell_ctxt_size, GFP_KERNEL);
+ chain->cell_ctxt = devm_kcalloc(&pdev->dev, chain->num_cells,
+ sizeof(*chain->cell_ctxt), GFP_KERNEL);
if (!chain->cell_ctxt)
return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index 307a6d4af993..a627237f694b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -796,11 +796,10 @@ static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev,
struct hinic_cmdq_ctxt *cmdq_ctxts;
struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
- size_t cmdq_ctxts_size;
int err;
- cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts);
- cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL);
+ cmdq_ctxts = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES,
+ sizeof(*cmdq_ctxts), GFP_KERNEL);
if (!cmdq_ctxts)
return -ENOMEM;
@@ -884,7 +883,6 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
struct pci_dev *pdev = hwif->pdev;
struct hinic_hwdev *hwdev;
- size_t saved_wqs_size;
u16 max_wqe_size;
int err;
@@ -895,8 +893,8 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
if (!cmdqs->cmdq_buf_pool)
return -ENOMEM;
- saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq);
- cmdqs->saved_wqs = devm_kzalloc(&pdev->dev, saved_wqs_size, GFP_KERNEL);
+ cmdqs->saved_wqs = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES,
+ sizeof(*cmdqs->saved_wqs), GFP_KERNEL);
if (!cmdqs->saved_wqs) {
err = -ENOMEM;
goto err_saved_wqs;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 657a15447bd0..2127a48749a8 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -162,7 +162,6 @@ static int init_msix(struct hinic_hwdev *hwdev)
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
int nr_irqs, num_aeqs, num_ceqs;
- size_t msix_entries_size;
int i, err;
num_aeqs = HINIC_HWIF_NUM_AEQS(hwif);
@@ -171,8 +170,8 @@ static int init_msix(struct hinic_hwdev *hwdev)
if (nr_irqs > HINIC_HWIF_NUM_IRQS(hwif))
nr_irqs = HINIC_HWIF_NUM_IRQS(hwif);
- msix_entries_size = nr_irqs * sizeof(*hwdev->msix_entries);
- hwdev->msix_entries = devm_kzalloc(&pdev->dev, msix_entries_size,
+ hwdev->msix_entries = devm_kcalloc(&pdev->dev, nr_irqs,
+ sizeof(*hwdev->msix_entries),
GFP_KERNEL);
if (!hwdev->msix_entries)
return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index d3fc05a07fdb..045c47786a04 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -631,16 +631,15 @@ static int alloc_eq_pages(struct hinic_eq *eq)
struct hinic_hwif *hwif = eq->hwif;
struct pci_dev *pdev = hwif->pdev;
u32 init_val, addr, val;
- size_t addr_size;
int err, pg;
- addr_size = eq->num_pages * sizeof(*eq->dma_addr);
- eq->dma_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
+ eq->dma_addr = devm_kcalloc(&pdev->dev, eq->num_pages,
+ sizeof(*eq->dma_addr), GFP_KERNEL);
if (!eq->dma_addr)
return -ENOMEM;
- addr_size = eq->num_pages * sizeof(*eq->virt_addr);
- eq->virt_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
+ eq->virt_addr = devm_kcalloc(&pdev->dev, eq->num_pages,
+ sizeof(*eq->virt_addr), GFP_KERNEL);
if (!eq->virt_addr) {
err = -ENOMEM;
goto err_virt_addr_alloc;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
index a6e43d686293..c4a0ba6e183a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
@@ -375,31 +375,30 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
{
struct hinic_hwif *hwif = func_to_io->hwif;
struct pci_dev *pdev = hwif->pdev;
- size_t qps_size, wq_size, db_size;
void *ci_addr_base;
int i, j, err;
- qps_size = num_qps * sizeof(*func_to_io->qps);
- func_to_io->qps = devm_kzalloc(&pdev->dev, qps_size, GFP_KERNEL);
+ func_to_io->qps = devm_kcalloc(&pdev->dev, num_qps,
+ sizeof(*func_to_io->qps), GFP_KERNEL);
if (!func_to_io->qps)
return -ENOMEM;
- wq_size = num_qps * sizeof(*func_to_io->sq_wq);
- func_to_io->sq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
+ func_to_io->sq_wq = devm_kcalloc(&pdev->dev, num_qps,
+ sizeof(*func_to_io->sq_wq), GFP_KERNEL);
if (!func_to_io->sq_wq) {
err = -ENOMEM;
goto err_sq_wq;
}
- wq_size = num_qps * sizeof(*func_to_io->rq_wq);
- func_to_io->rq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
+ func_to_io->rq_wq = devm_kcalloc(&pdev->dev, num_qps,
+ sizeof(*func_to_io->rq_wq), GFP_KERNEL);
if (!func_to_io->rq_wq) {
err = -ENOMEM;
goto err_rq_wq;
}
- db_size = num_qps * sizeof(*func_to_io->sq_db);
- func_to_io->sq_db = devm_kzalloc(&pdev->dev, db_size, GFP_KERNEL);
+ func_to_io->sq_db = devm_kcalloc(&pdev->dev, num_qps,
+ sizeof(*func_to_io->sq_db), GFP_KERNEL);
if (!func_to_io->sq_db) {
err = -ENOMEM;
goto err_sq_db;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index 7f0f1aa3cedd..2d9b06d7caad 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -193,20 +193,20 @@ static int alloc_page_arrays(struct hinic_wqs *wqs)
{
struct hinic_hwif *hwif = wqs->hwif;
struct pci_dev *pdev = hwif->pdev;
- size_t size;
- size = wqs->num_pages * sizeof(*wqs->page_paddr);
- wqs->page_paddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wqs->page_paddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
+ sizeof(*wqs->page_paddr), GFP_KERNEL);
if (!wqs->page_paddr)
return -ENOMEM;
- size = wqs->num_pages * sizeof(*wqs->page_vaddr);
- wqs->page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wqs->page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
+ sizeof(*wqs->page_vaddr), GFP_KERNEL);
if (!wqs->page_vaddr)
goto err_page_vaddr;
- size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr);
- wqs->shadow_page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wqs->shadow_page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
+ sizeof(*wqs->shadow_page_vaddr),
+ GFP_KERNEL);
if (!wqs->shadow_page_vaddr)
goto err_page_shadow_vaddr;
@@ -379,15 +379,14 @@ static int alloc_wqes_shadow(struct hinic_wq *wq)
{
struct hinic_hwif *hwif = wq->hwif;
struct pci_dev *pdev = hwif->pdev;
- size_t size;
- size = wq->num_q_pages * wq->max_wqe_size;
- wq->shadow_wqe = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wq->shadow_wqe = devm_kcalloc(&pdev->dev, wq->num_q_pages,
+ wq->max_wqe_size, GFP_KERNEL);
if (!wq->shadow_wqe)
return -ENOMEM;
- size = wq->num_q_pages * sizeof(wq->prod_idx);
- wq->shadow_idx = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wq->shadow_idx = devm_kcalloc(&pdev->dev, wq->num_q_pages,
+ sizeof(wq->prod_idx), GFP_KERNEL);
if (!wq->shadow_idx)
goto err_shadow_idx;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index f9a766b8ac43..05329292d940 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -144,13 +144,12 @@ static int create_txqs(struct hinic_dev *nic_dev)
{
int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
struct net_device *netdev = nic_dev->netdev;
- size_t txq_size;
if (nic_dev->txqs)
return -EINVAL;
- txq_size = num_txqs * sizeof(*nic_dev->txqs);
- nic_dev->txqs = devm_kzalloc(&netdev->dev, txq_size, GFP_KERNEL);
+ nic_dev->txqs = devm_kcalloc(&netdev->dev, num_txqs,
+ sizeof(*nic_dev->txqs), GFP_KERNEL);
if (!nic_dev->txqs)
return -ENOMEM;
@@ -241,13 +240,12 @@ static int create_rxqs(struct hinic_dev *nic_dev)
{
int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev);
struct net_device *netdev = nic_dev->netdev;
- size_t rxq_size;
if (nic_dev->rxqs)
return -EINVAL;
- rxq_size = num_rxqs * sizeof(*nic_dev->rxqs);
- nic_dev->rxqs = devm_kzalloc(&netdev->dev, rxq_size, GFP_KERNEL);
+ nic_dev->rxqs = devm_kcalloc(&netdev->dev, num_rxqs,
+ sizeof(*nic_dev->rxqs), GFP_KERNEL);
if (!nic_dev->rxqs)
return -ENOMEM;
@@ -1394,12 +1392,8 @@ static int hinic_probe(struct pci_dev *pdev,
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- dev_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n");
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "Failed to set DMA mask\n");
- goto err_dma_mask;
- }
+ dev_err(&pdev->dev, "Failed to set DMA mask\n");
+ goto err_dma_mask;
}
err = nic_dev_init(pdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index fed3b6bc0d76..b33ed4d92b71 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -548,7 +548,7 @@ static int rx_request_irq(struct hinic_rxq *rxq)
goto err_req_irq;
cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
- err = irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
+ err = irq_set_affinity_and_hint(rq->irq, &rq->affinity_mask);
if (err)
goto err_irq_affinity;
@@ -565,7 +565,7 @@ static void rx_free_irq(struct hinic_rxq *rxq)
{
struct hinic_rq *rq = rxq->rq;
- irq_set_affinity_hint(rq->irq, NULL);
+ irq_update_affinity_hint(rq->irq, NULL);
free_irq(rq->irq, rxq);
rx_del_napi(rxq);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index c5bdb0d374ef..8d59babbf476 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -4,6 +4,7 @@
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*/
+#include <linux/if_vlan.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/u64_stats_sync.h>
@@ -862,7 +863,6 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
struct hinic_dev *nic_dev = netdev_priv(netdev);
struct hinic_hwdev *hwdev = nic_dev->hwdev;
int err, irqname_len;
- size_t sges_size;
txq->netdev = netdev;
txq->sq = sq;
@@ -871,13 +871,13 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
txq->max_sges = HINIC_MAX_SQ_BUFDESCS;
- sges_size = txq->max_sges * sizeof(*txq->sges);
- txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
+ txq->sges = devm_kcalloc(&netdev->dev, txq->max_sges,
+ sizeof(*txq->sges), GFP_KERNEL);
if (!txq->sges)
return -ENOMEM;
- sges_size = txq->max_sges * sizeof(*txq->free_sges);
- txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
+ txq->free_sges = devm_kcalloc(&netdev->dev, txq->max_sges,
+ sizeof(*txq->free_sges), GFP_KERNEL);
if (!txq->free_sges) {
err = -ENOMEM;
goto err_alloc_free_sges;
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index b482f6f633bd..3ee89ae496d0 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -1178,7 +1178,8 @@ found:
DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr));
for (i = 0; i < 6; i++)
- DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]));
+ DEB(DEB_PROBE,printk(" %2.2X", eth_addr[i]));
+ eth_hw_addr_set(dev, eth_addr);
DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
index c612ef526d16..3e7d7c4bafdc 100644
--- a/drivers/net/ethernet/i825xx/ether1.c
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -986,6 +986,7 @@ static int
ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct net_device *dev;
+ u8 addr[ETH_ALEN];
int i, ret = 0;
ether1_banner();
@@ -1015,7 +1016,8 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
}
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = readb(IDPROM_ADDRESS + (i << 2));
+ addr[i] = readb(IDPROM_ADDRESS + (i << 2));
+ eth_hw_addr_set(dev, addr);
if (ether1_init_2(dev)) {
ret = -ENODEV;
diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
index 48e001881c75..0af70094aba3 100644
--- a/drivers/net/ethernet/i825xx/lasi_82596.c
+++ b/drivers/net/ethernet/i825xx/lasi_82596.c
@@ -147,6 +147,7 @@ lan_init_chip(struct parisc_device *dev)
struct net_device *netdevice;
struct i596_private *lp;
int retval = -ENOMEM;
+ u8 addr[ETH_ALEN];
int i;
if (!dev->irq) {
@@ -167,13 +168,14 @@ lan_init_chip(struct parisc_device *dev)
netdevice->base_addr = dev->hpa.start;
netdevice->irq = dev->irq;
- if (pdc_lan_station_id(netdevice->dev_addr, netdevice->base_addr)) {
+ if (pdc_lan_station_id(addr, netdevice->base_addr)) {
for (i = 0; i < 6; i++) {
- netdevice->dev_addr[i] = gsc_readb(LAN_PROM_ADDR + i);
+ addr[i] = gsc_readb(LAN_PROM_ADDR + i);
}
printk(KERN_INFO
"%s: MAC of HP700 LAN read from EEPROM\n", __FILE__);
}
+ eth_hw_addr_set(netdevice, addr);
lp = netdev_priv(netdevice);
lp->options = dev->id.sversion == 0x72 ? OPT_SWAP_PORT : 0;
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 27937c5d7956..daec9ce04531 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -117,9 +117,10 @@ static int sni_82596_probe(struct platform_device *dev)
netdevice->dev_addr[5] = readb(eth_addr + 0x06);
iounmap(eth_addr);
- if (!netdevice->irq) {
+ if (netdevice->irq < 0) {
printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
__FILE__, netdevice->base_addr);
+ retval = netdevice->irq;
goto probe_failed;
}
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 6b3fc8823c54..fbea9f7efe8c 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2137,8 +2137,11 @@ emac_ethtool_set_link_ksettings(struct net_device *ndev,
return 0;
}
-static void emac_ethtool_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *rp)
+static void
+emac_ethtool_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 45ba40cf4d07..22fb0d109a68 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1890,6 +1890,7 @@ static struct attribute *veth_pool_attrs[] = {
&veth_size_attr,
NULL,
};
+ATTRIBUTE_GROUPS(veth_pool);
static const struct sysfs_ops veth_pool_ops = {
.show = veth_pool_show,
@@ -1899,7 +1900,7 @@ static const struct sysfs_ops veth_pool_ops = {
static struct kobj_type ktype_veth_pool = {
.release = NULL,
.sysfs_ops = &veth_pool_ops,
- .default_attrs = veth_pool_attrs,
+ .default_groups = veth_pool_groups,
};
static int ibmveth_resume(struct device *dev)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 0bb3911dd014..bda7a2a9d211 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -308,7 +308,7 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
if (adapter->fw_done_rc) {
dev_err(dev, "Couldn't map LTB, rc = %d\n",
adapter->fw_done_rc);
- rc = -1;
+ rc = -EIO;
goto out;
}
rc = 0;
@@ -540,13 +540,15 @@ static int init_stats_token(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
dma_addr_t stok;
+ int rc;
stok = dma_map_single(dev, &adapter->stats,
sizeof(struct ibmvnic_statistics),
DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, stok)) {
- dev_err(dev, "Couldn't map stats buffer\n");
- return -1;
+ rc = dma_mapping_error(dev, stok);
+ if (rc) {
+ dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc);
+ return rc;
}
adapter->stats_token = stok;
@@ -655,7 +657,7 @@ static int init_rx_pools(struct net_device *netdev)
u64 num_pools;
u64 pool_size; /* # of buffers in one pool */
u64 buff_size;
- int i, j;
+ int i, j, rc;
pool_size = adapter->req_rx_add_entries_per_subcrq;
num_pools = adapter->req_rx_queues;
@@ -674,7 +676,7 @@ static int init_rx_pools(struct net_device *netdev)
GFP_KERNEL);
if (!adapter->rx_pool) {
dev_err(dev, "Failed to allocate rx pools\n");
- return -1;
+ return -ENOMEM;
}
/* Set num_active_rx_pools early. If we fail below after partial
@@ -697,6 +699,7 @@ static int init_rx_pools(struct net_device *netdev)
GFP_KERNEL);
if (!rx_pool->free_map) {
dev_err(dev, "Couldn't alloc free_map %d\n", i);
+ rc = -ENOMEM;
goto out_release;
}
@@ -705,6 +708,7 @@ static int init_rx_pools(struct net_device *netdev)
GFP_KERNEL);
if (!rx_pool->rx_buff) {
dev_err(dev, "Couldn't alloc rx buffers\n");
+ rc = -ENOMEM;
goto out_release;
}
}
@@ -718,8 +722,9 @@ update_ltb:
dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
i, rx_pool->size, rx_pool->buff_size);
- if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
- rx_pool->size * rx_pool->buff_size))
+ rc = alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
+ rx_pool->size * rx_pool->buff_size);
+ if (rc)
goto out;
for (j = 0; j < rx_pool->size; ++j) {
@@ -756,7 +761,7 @@ out:
/* We failed to allocate one or more LTBs or map them on the VIOS.
* Hold onto the pools and any LTBs that we did allocate/map.
*/
- return -1;
+ return rc;
}
static void release_vpd_data(struct ibmvnic_adapter *adapter)
@@ -817,13 +822,13 @@ static int init_one_tx_pool(struct net_device *netdev,
sizeof(struct ibmvnic_tx_buff),
GFP_KERNEL);
if (!tx_pool->tx_buff)
- return -1;
+ return -ENOMEM;
tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
if (!tx_pool->free_map) {
kfree(tx_pool->tx_buff);
tx_pool->tx_buff = NULL;
- return -1;
+ return -ENOMEM;
}
for (i = 0; i < pool_size; i++)
@@ -914,7 +919,7 @@ static int init_tx_pools(struct net_device *netdev)
adapter->tx_pool = kcalloc(num_pools,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
if (!adapter->tx_pool)
- return -1;
+ return -ENOMEM;
adapter->tso_pool = kcalloc(num_pools,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
@@ -924,7 +929,7 @@ static int init_tx_pools(struct net_device *netdev)
if (!adapter->tso_pool) {
kfree(adapter->tx_pool);
adapter->tx_pool = NULL;
- return -1;
+ return -ENOMEM;
}
/* Set num_active_tx_pools early. If we fail below after partial
@@ -1113,7 +1118,7 @@ static int ibmvnic_login(struct net_device *netdev)
retry = false;
if (retry_count > retries) {
netdev_warn(netdev, "Login attempts exceeded\n");
- return -1;
+ return -EACCES;
}
adapter->init_done_rc = 0;
@@ -1154,25 +1159,26 @@ static int ibmvnic_login(struct net_device *netdev)
timeout)) {
netdev_warn(netdev,
"Capabilities query timed out\n");
- return -1;
+ return -ETIMEDOUT;
}
rc = init_sub_crqs(adapter);
if (rc) {
netdev_warn(netdev,
"SCRQ initialization failed\n");
- return -1;
+ return rc;
}
rc = init_sub_crq_irqs(adapter);
if (rc) {
netdev_warn(netdev,
"SCRQ irq initialization failed\n");
- return -1;
+ return rc;
}
} else if (adapter->init_done_rc) {
- netdev_warn(netdev, "Adapter login failed\n");
- return -1;
+ netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n",
+ adapter->init_done_rc);
+ return -EIO;
}
} while (retry);
@@ -1231,7 +1237,7 @@ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
netdev_err(netdev, "timeout setting link state\n");
- return -1;
+ return -ETIMEDOUT;
}
if (adapter->init_done_rc == PARTIALSUCCESS) {
@@ -2042,7 +2048,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_packets++;
tx_bytes += skb->len;
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
ret = NETDEV_TX_OK;
goto out;
@@ -2288,7 +2294,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
/* If someone else changed the adapter state
* when we dropped the rtnl, fail the reset
*/
- rc = -1;
+ rc = -EAGAIN;
goto out;
}
adapter->state = VNIC_CLOSED;
@@ -2330,10 +2336,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
}
rc = ibmvnic_reset_init(adapter, true);
- if (rc) {
- rc = IBMVNIC_INIT_FAILED;
+ if (rc)
goto out;
- }
/* If the adapter was in PROBE or DOWN state prior to the reset,
* exit here.
@@ -2598,6 +2602,7 @@ static void __ibmvnic_reset(struct work_struct *work)
struct ibmvnic_rwi *rwi;
unsigned long flags;
u32 reset_state;
+ int num_fails = 0;
int rc = 0;
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
@@ -2651,11 +2656,23 @@ static void __ibmvnic_reset(struct work_struct *work)
rc = do_hard_reset(adapter, rwi, reset_state);
rtnl_unlock();
}
- if (rc) {
- /* give backing device time to settle down */
+ if (rc)
+ num_fails++;
+ else
+ num_fails = 0;
+
+ /* If auto-priority-failover is enabled we can get
+ * back to back failovers during resets, resulting
+ * in at least two failed resets (from high-priority
+ * backing device to low-priority one and then back)
+ * If resets continue to fail beyond that, give the
+ * adapter some time to settle down before retrying.
+ */
+ if (num_fails >= 3) {
netdev_dbg(adapter->netdev,
- "[S:%s] Hard reset failed, waiting 60 secs\n",
- adapter_state_to_string(adapter->state));
+ "[S:%s] Hard reset failed %d times, waiting 60 secs\n",
+ adapter_state_to_string(adapter->state),
+ num_fails);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(60 * HZ);
}
@@ -3072,7 +3089,9 @@ static u32 ibmvnic_get_link(struct net_device *netdev)
}
static void ibmvnic_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
@@ -3092,7 +3111,9 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
}
static int ibmvnic_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int ret;
@@ -3759,7 +3780,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *adapter)
allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
if (!allqueues)
- return -1;
+ return -ENOMEM;
for (i = 0; i < total_queues; i++) {
allqueues[i] = init_sub_crq_queue(adapter);
@@ -3828,7 +3849,7 @@ tx_failed:
for (i = 0; i < registered_queues; i++)
release_sub_crq_queue(adapter, allqueues[i], 1);
kfree(allqueues);
- return -1;
+ return -ENOMEM;
}
static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
@@ -3836,11 +3857,25 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
struct device *dev = &adapter->vdev->dev;
union ibmvnic_crq crq;
int max_entries;
+ int cap_reqs;
+
+ /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
+ * the PROMISC flag). Initialize this count upfront. When the tasklet
+ * receives a response to all of these, it will send the next protocol
+ * message (QUERY_IP_OFFLOAD).
+ */
+ if (!(adapter->netdev->flags & IFF_PROMISC) ||
+ adapter->promisc_supported)
+ cap_reqs = 7;
+ else
+ cap_reqs = 6;
if (!retry) {
/* Sub-CRQ entries are 32 byte long */
int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
+ atomic_set(&adapter->running_cap_crqs, cap_reqs);
+
if (adapter->min_tx_entries_per_subcrq > entries_page ||
adapter->min_rx_add_entries_per_subcrq > entries_page) {
dev_err(dev, "Fatal, invalid entries per sub-crq\n");
@@ -3901,44 +3936,45 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
adapter->opt_rx_comp_queues;
adapter->req_rx_add_queues = adapter->max_rx_add_queues;
+ } else {
+ atomic_add(cap_reqs, &adapter->running_cap_crqs);
}
-
memset(&crq, 0, sizeof(crq));
crq.request_capability.first = IBMVNIC_CRQ_CMD;
crq.request_capability.cmd = REQUEST_CAPABILITY;
crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability =
cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
crq.request_capability.number =
cpu_to_be64(adapter->req_tx_entries_per_subcrq);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability =
cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
crq.request_capability.number =
cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_MTU);
crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
if (adapter->netdev->flags & IFF_PROMISC) {
@@ -3946,16 +3982,21 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED);
crq.request_capability.number = cpu_to_be64(1);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
}
} else {
crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED);
crq.request_capability.number = cpu_to_be64(0);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
}
+
+ /* Keep at end to catch any discrepancy between expected and actual
+ * CRQs sent.
+ */
+ WARN_ON(cap_reqs != 0);
}
static int pending_scrq(struct ibmvnic_adapter *adapter,
@@ -4187,7 +4228,7 @@ static int send_login(struct ibmvnic_adapter *adapter)
if (!adapter->tx_scrq || !adapter->rx_scrq) {
netdev_err(adapter->netdev,
"RX or TX queues are not allocated, device login failed\n");
- return -1;
+ return -ENOMEM;
}
release_login_buffer(adapter);
@@ -4307,7 +4348,7 @@ buf_map_failed:
kfree(login_buffer);
adapter->login_buf = NULL;
buf_alloc_failed:
- return -1;
+ return -ENOMEM;
}
static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
@@ -4349,118 +4390,132 @@ static void send_query_map(struct ibmvnic_adapter *adapter)
static void send_query_cap(struct ibmvnic_adapter *adapter)
{
union ibmvnic_crq crq;
+ int cap_reqs;
+
+ /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count
+ * upfront. When the tasklet receives a response to all of these, it
+ * can send out the next protocol messaage (REQUEST_CAPABILITY).
+ */
+ cap_reqs = 25;
+
+ atomic_set(&adapter->running_cap_crqs, cap_reqs);
- atomic_set(&adapter->running_cap_crqs, 0);
memset(&crq, 0, sizeof(crq));
crq.query_capability.first = IBMVNIC_CRQ_CMD;
crq.query_capability.cmd = QUERY_CAPABILITY;
crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_MTU);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_MTU);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
- atomic_inc(&adapter->running_cap_crqs);
+
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
+
+ /* Keep at end to catch any discrepancy between expected and actual
+ * CRQs sent.
+ */
+ WARN_ON(cap_reqs != 0);
}
static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
@@ -4764,6 +4819,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
char *name;
atomic_dec(&adapter->running_cap_crqs);
+ netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
+ atomic_read(&adapter->running_cap_crqs));
switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
case REQ_TX_QUEUES:
req_value = &adapter->req_tx_queues;
@@ -4827,10 +4884,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
}
/* Done receiving requested capabilities, query IP offload support */
- if (atomic_read(&adapter->running_cap_crqs) == 0) {
- adapter->wait_capability = false;
+ if (atomic_read(&adapter->running_cap_crqs) == 0)
send_query_ip_offload(adapter);
- }
}
static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
@@ -5128,10 +5183,8 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
}
out:
- if (atomic_read(&adapter->running_cap_crqs) == 0) {
- adapter->wait_capability = false;
+ if (atomic_read(&adapter->running_cap_crqs) == 0)
send_request_cap(adapter, 0);
- }
}
static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
@@ -5427,33 +5480,21 @@ static void ibmvnic_tasklet(struct tasklet_struct *t)
struct ibmvnic_crq_queue *queue = &adapter->crq;
union ibmvnic_crq *crq;
unsigned long flags;
- bool done = false;
spin_lock_irqsave(&queue->lock, flags);
- while (!done) {
- /* Pull all the valid messages off the CRQ */
- while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
- /* This barrier makes sure ibmvnic_next_crq()'s
- * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
- * before ibmvnic_handle_crq()'s
- * switch(gen_crq->first) and switch(gen_crq->cmd).
- */
- dma_rmb();
- ibmvnic_handle_crq(crq, adapter);
- crq->generic.first = 0;
- }
- /* remain in tasklet until all
- * capabilities responses are received
+ /* Pull all the valid messages off the CRQ */
+ while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
+ /* This barrier makes sure ibmvnic_next_crq()'s
+ * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
+ * before ibmvnic_handle_crq()'s
+ * switch(gen_crq->first) and switch(gen_crq->cmd).
*/
- if (!adapter->wait_capability)
- done = true;
+ dma_rmb();
+ ibmvnic_handle_crq(crq, adapter);
+ crq->generic.first = 0;
}
- /* if capabilities CRQ's were sent in this tasklet, the following
- * tasklet must wait until all responses are received
- */
- if (atomic_read(&adapter->running_cap_crqs) != 0)
- adapter->wait_capability = true;
+
spin_unlock_irqrestore(&queue->lock, flags);
}
@@ -5628,7 +5669,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
dev_err(dev, "Initialization sequence timed out\n");
- return -1;
+ return -ETIMEDOUT;
}
if (adapter->init_done_rc) {
@@ -5639,7 +5680,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
if (adapter->from_passive_init) {
adapter->state = VNIC_OPEN;
adapter->from_passive_init = false;
- return -1;
+ return -EINVAL;
}
if (reset &&
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index b8e42f67d897..4a7a56ff74ce 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -18,8 +18,6 @@
#define IBMVNIC_NAME "ibmvnic"
#define IBMVNIC_DRIVER_VERSION "1.0.1"
#define IBMVNIC_INVALID_MAP -1
-#define IBMVNIC_STATS_TIMEOUT 1
-#define IBMVNIC_INIT_FAILED 2
#define IBMVNIC_OPEN_FAILED 3
/* basic structures plus 100 2k buffers */
@@ -921,7 +919,6 @@ struct ibmvnic_adapter {
int login_rsp_buf_sz;
atomic_t running_cap_crqs;
- bool wait_capability;
struct ibmvnic_sub_crq_queue **tx_scrq ____cacheline_aligned;
struct ibmvnic_sub_crq_queue **rx_scrq ____cacheline_aligned;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 0b274d8fa45b..3facb55b7161 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -327,6 +327,16 @@ config ICE_SWITCHDEV
If unsure, say N.
+config ICE_HWTS
+ bool "Support HW cross-timestamp on platforms with PTM support"
+ default y
+ depends on ICE && X86
+ help
+ Say Y to enable hardware supported cross-timestamping on platforms
+ with PCIe PTM support. The cross-timestamp is available through
+ the PTP clock driver precise cross-timestamp ioctl
+ (PTP_SYS_OFFSET_PRECISE).
+
config FM10K
tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support"
default n
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 0bf3d47bb90d..4a8013f20152 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2557,7 +2557,9 @@ static int e100_set_eeprom(struct net_device *netdev,
}
static void e100_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nic *nic = netdev_priv(netdev);
struct param_range *rfds = &nic->params.rfds;
@@ -2570,7 +2572,9 @@ static void e100_get_ringparam(struct net_device *netdev,
}
static int e100_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nic *nic = netdev_priv(netdev);
struct param_range *rfds = &nic->params.rfds;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 0a57172dfcbc..32803b0cf1e8 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -539,7 +539,9 @@ static void e1000_get_drvinfo(struct net_device *netdev,
}
static void e1000_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -556,7 +558,9 @@ static void e1000_get_ringparam(struct net_device *netdev,
}
static int e1000_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 669060a2e6aa..3f5feb55cfba 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1953,7 +1953,8 @@ void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
static void
e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
- struct e1000_tx_buffer *buffer_info)
+ struct e1000_tx_buffer *buffer_info,
+ int budget)
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
@@ -1966,7 +1967,7 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
buffer_info->dma = 0;
}
if (buffer_info->skb) {
- dev_kfree_skb_any(buffer_info->skb);
+ napi_consume_skb(buffer_info->skb, budget);
buffer_info->skb = NULL;
}
buffer_info->time_stamp = 0;
@@ -1990,7 +1991,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
for (i = 0; i < tx_ring->count; i++) {
buffer_info = &tx_ring->buffer_info[i];
- e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+ e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
}
netdev_reset_queue(adapter->netdev);
@@ -2958,7 +2959,7 @@ dma_error:
i += tx_ring->count;
i--;
buffer_info = &tx_ring->buffer_info[i];
- e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+ e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
}
return 0;
@@ -3856,7 +3857,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
}
}
- e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+ e1000_unmap_and_free_tx_resource(adapter, buffer_info,
+ 64);
tx_desc->upper.data = 0;
if (unlikely(++i == tx_ring->count))
@@ -4382,7 +4384,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
if (!skb) {
unsigned int frag_len = e1000_frag_len(adapter);
- skb = build_skb(data - E1000_HEADROOM, frag_len);
+ skb = napi_build_skb(data - E1000_HEADROOM, frag_len);
if (!skb) {
adapter->alloc_rx_buff_failed++;
break;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 8515e00d1b40..b80ae9a82224 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -655,7 +655,9 @@ static void e1000_get_drvinfo(struct net_device *netdev,
}
static void e1000_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -666,7 +668,9 @@ static void e1000_get_ringparam(struct net_device *netdev,
}
static int e1000_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_ring *temp_tx = NULL, *temp_rx = NULL;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 44e2dc8328a2..635a95927e93 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3614,10 +3614,6 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
return -EINVAL;
- /* flags reserved for future extensions - must be zero */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 0d37f011d0ce..d53369e30040 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -502,7 +502,9 @@ static void fm10k_set_msglevel(struct net_device *netdev, u32 data)
}
static void fm10k_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
@@ -517,7 +519,9 @@ static void fm10k_get_ringparam(struct net_device *netdev,
}
static int fm10k_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_ring *temp_ring;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index 21eff0895a7a..f6d56867f857 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -143,7 +143,7 @@ s32 fm10k_tlv_attr_put_mac_vlan(u32 *msg, u16 attr_id,
* @vlan: location of buffer to store VLAN
*
* This function pulls the MAC address back out of the attribute and will
- * place it in the array pointed by by mac_addr. It will return success
+ * place it in the array pointed by mac_addr. It will return success
* if provided with a valid pointers.
**/
s32 fm10k_tlv_attr_get_mac_vlan(u32 *attr, u8 *mac_addr, u16 *vlan)
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 4d939af0a626..2e02cc68cd3f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -174,7 +174,6 @@ enum i40e_interrupt_policy {
struct i40e_lump_tracking {
u16 num_entries;
- u16 search_hint;
u16 list[0];
#define I40E_PILE_VALID_BIT 0x8000
#define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2)
@@ -848,12 +847,12 @@ struct i40e_vsi {
struct rtnl_link_stats64 net_stats_offsets;
struct i40e_eth_stats eth_stats;
struct i40e_eth_stats eth_stats_offsets;
- u32 tx_restart;
- u32 tx_busy;
+ u64 tx_restart;
+ u64 tx_busy;
u64 tx_linearize;
u64 tx_force_wb;
- u32 rx_buf_failed;
- u32 rx_page_failed;
+ u64 rx_buf_failed;
+ u64 rx_page_failed;
/* These are containers of ring pointers, allocated at run-time */
struct i40e_ring **rx_rings;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 593912b17609..7abef88801fb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -769,21 +769,22 @@ static bool i40e_asq_done(struct i40e_hw *hw)
}
/**
- * i40e_asq_send_command - send command to Admin Queue
+ * i40e_asq_send_command_atomic - send command to Admin Queue
* @hw: pointer to the hw struct
* @desc: prefilled descriptor describing the command (non DMA mem)
* @buff: buffer to use for indirect commands
* @buff_size: size of buffer for indirect commands
* @cmd_details: pointer to command details structure
+ * @is_atomic_context: is the function called in an atomic context?
*
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
**/
-i40e_status i40e_asq_send_command(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */
- u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details)
+i40e_status
+i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context)
{
i40e_status status = 0;
struct i40e_dma_mem *dma_buff = NULL;
@@ -910,7 +911,12 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
*/
if (i40e_asq_done(hw))
break;
- udelay(50);
+
+ if (is_atomic_context)
+ udelay(50);
+ else
+ usleep_range(40, 60);
+
total_delay += 50;
} while (total_delay < hw->aq.asq_cmd_timeout);
}
@@ -967,6 +973,15 @@ asq_send_command_error:
return status;
}
+i40e_status
+i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_asq_send_command_atomic(hw, desc, buff, buff_size,
+ cmd_details, false);
+}
+
/**
* i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
* @desc: pointer to the temp descriptor (non DMA mem)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 140b677f114d..60f9e0a6aaca 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -11,8 +11,8 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR_X722 0x0009
-#define I40E_FW_API_VERSION_MINOR_X710 0x0009
+#define I40E_FW_API_VERSION_MINOR_X722 0x000C
+#define I40E_FW_API_VERSION_MINOR_X710 0x000F
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
I40E_FW_API_VERSION_MINOR_X710 : \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index b4d3fed0d2f2..9ddeb015eb7e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -154,8 +154,8 @@ const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
return "I40E_ERR_INVALID_MAC_ADDR";
case I40E_ERR_DEVICE_NOT_SUPPORTED:
return "I40E_ERR_DEVICE_NOT_SUPPORTED";
- case I40E_ERR_MASTER_REQUESTS_PENDING:
- return "I40E_ERR_MASTER_REQUESTS_PENDING";
+ case I40E_ERR_PRIMARY_REQUESTS_PENDING:
+ return "I40E_ERR_PRIMARY_REQUESTS_PENDING";
case I40E_ERR_INVALID_LINK_SETTINGS:
return "I40E_ERR_INVALID_LINK_SETTINGS";
case I40E_ERR_AUTONEG_NOT_COMPLETE:
@@ -2073,7 +2073,8 @@ enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
cmd->seid = cpu_to_le16(seid);
cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
+ cmd_details, true);
return status;
}
@@ -2114,7 +2115,8 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
cmd->seid = cpu_to_le16(seid);
cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
+ cmd_details, true);
return status;
}
@@ -4136,7 +4138,6 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
struct i40e_filter_control_settings *settings)
{
u32 fcoe_cntx_size, fcoe_filt_size;
- u32 pe_cntx_size, pe_filt_size;
u32 fcoe_fmax;
u32 val;
@@ -4180,8 +4181,6 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
case I40E_HASH_FILTER_SIZE_256K:
case I40E_HASH_FILTER_SIZE_512K:
case I40E_HASH_FILTER_SIZE_1M:
- pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
- pe_filt_size <<= (u32)settings->pe_filt_num;
break;
default:
return I40E_ERR_PARAM;
@@ -4198,8 +4197,6 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
case I40E_DMA_CNTX_SIZE_64K:
case I40E_DMA_CNTX_SIZE_128K:
case I40E_DMA_CNTX_SIZE_256K:
- pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
- pe_cntx_size <<= (u32)settings->pe_cntx_num;
break;
default:
return I40E_ERR_PARAM;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 2c1b1da1220e..1e57cc8c47d7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -240,7 +240,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
(unsigned long int)vsi->net_stats_offsets.rx_compressed,
(unsigned long int)vsi->net_stats_offsets.tx_compressed);
dev_info(&pf->pdev->dev,
- " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
+ " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n",
vsi->tx_restart, vsi->tx_busy,
vsi->rx_buf_failed, vsi->rx_page_failed);
rcu_read_lock();
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 513ba6974355..091f36adbbe1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1916,7 +1916,9 @@ static void i40e_get_drvinfo(struct net_device *netdev,
}
static void i40e_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
@@ -1944,7 +1946,9 @@ static bool i40e_active_tx_ring_index(struct i40e_vsi *vsi, u16 index)
}
static int i40e_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
struct i40e_netdev_priv *np = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 61afc220fc6c..f70c478dafdb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -196,10 +196,6 @@ int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
* @id: an owner id to stick on the items assigned
*
* Returns the base item index of the lump, or negative for error
- *
- * The search_hint trick and lack of advanced fit-finding only work
- * because we're highly likely to have all the same size lump requests.
- * Linear search time and any fragmentation should be minimal.
**/
static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
u16 needed, u16 id)
@@ -214,8 +210,21 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
return -EINVAL;
}
- /* start the linear search with an imperfect hint */
- i = pile->search_hint;
+ /* Allocate last queue in the pile for FDIR VSI queue
+ * so it doesn't fragment the qp_pile
+ */
+ if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
+ if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
+ dev_err(&pf->pdev->dev,
+ "Cannot allocate queue %d for I40E_VSI_FDIR\n",
+ pile->num_entries - 1);
+ return -ENOMEM;
+ }
+ pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
+ return pile->num_entries - 1;
+ }
+
+ i = 0;
while (i < pile->num_entries) {
/* skip already allocated entries */
if (pile->list[i] & I40E_PILE_VALID_BIT) {
@@ -234,7 +243,6 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
for (j = 0; j < needed; j++)
pile->list[i+j] = id | I40E_PILE_VALID_BIT;
ret = i;
- pile->search_hint = i + j;
break;
}
@@ -257,7 +265,7 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
{
int valid_id = (id | I40E_PILE_VALID_BIT);
int count = 0;
- int i;
+ u16 i;
if (!pile || index >= pile->num_entries)
return -EINVAL;
@@ -269,8 +277,6 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
count++;
}
- if (count && index < pile->search_hint)
- pile->search_hint = index;
return count;
}
@@ -772,9 +778,9 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
struct rtnl_link_stats64 *ns; /* netdev stats */
struct i40e_eth_stats *oes;
struct i40e_eth_stats *es; /* device's eth stats */
- u32 tx_restart, tx_busy;
+ u64 tx_restart, tx_busy;
struct i40e_ring *p;
- u32 rx_page, rx_buf;
+ u64 rx_page, rx_buf;
u64 bytes, packets;
unsigned int start;
u64 tx_linearize;
@@ -3915,10 +3921,10 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
*
* get_cpu_mask returns a static constant mask with
* a permanent lifetime so it's ok to pass to
- * irq_set_affinity_hint without making a copy.
+ * irq_update_affinity_hint without making a copy.
*/
cpu = cpumask_local_spread(q_vector->v_idx, -1);
- irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
+ irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
}
vsi->irqs_ready = true;
@@ -3929,7 +3935,7 @@ free_queue_irqs:
vector--;
irq_num = pf->msix_entries[base + vector].vector;
irq_set_affinity_notifier(irq_num, NULL);
- irq_set_affinity_hint(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, &vsi->q_vectors[vector]);
}
return err;
@@ -4750,7 +4756,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
/* clear the affinity notifier in the IRQ descriptor */
irq_set_affinity_notifier(irq_num, NULL);
/* remove our suggested affinity mask for this IRQ */
- irq_set_affinity_hint(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
synchronize_irq(irq_num);
free_irq(irq_num, vsi->q_vectors[i]);
@@ -10574,15 +10580,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
}
i40e_get_oem_version(&pf->hw);
- if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
- ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
- hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
- /* The following delay is necessary for 4.33 firmware and older
- * to recover after EMP reset. 200 ms should suffice but we
- * put here 300 ms to be sure that FW is ready to operate
- * after reset.
- */
- mdelay(300);
+ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
+ /* The following delay is necessary for firmware update. */
+ mdelay(1000);
}
/* re-verify the eeprom if we just had an EMP reset */
@@ -11792,7 +11792,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
return -ENOMEM;
pf->irq_pile->num_entries = vectors;
- pf->irq_pile->search_hint = 0;
/* track first vector for misc interrupts, ignore return */
(void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
@@ -12595,7 +12594,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
goto sw_init_done;
}
pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
- pf->qp_pile->search_hint = 0;
pf->tx_timeout_recovery_level = 1;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index aaea297640e0..9241b6005ad3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -22,11 +22,15 @@ void i40e_adminq_init_ring_data(struct i40e_hw *hw);
i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *events_pending);
-i40e_status i40e_asq_send_command(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */
- u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details);
+i40e_status
+i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status
+i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context);
/* debug function for adminq */
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 09b1d5aed1c9..61e5789d78db 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -1205,10 +1205,6 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
INIT_WORK(&pf->ptp_extts0_work, i40e_ptp_extts0_work);
- /* Reserved for future extensions. */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
pf->ptp_tx = false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 8d0588a27a05..1908eed4fa5e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -413,6 +413,9 @@
#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
index 77be0702d07c..db3714a65dc7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -18,7 +18,7 @@ enum i40e_status_code {
I40E_ERR_ADAPTER_STOPPED = -9,
I40E_ERR_INVALID_MAC_ADDR = -10,
I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
- I40E_ERR_MASTER_REQUESTS_PENDING = -12,
+ I40E_ERR_PRIMARY_REQUESTS_PENDING = -12,
I40E_ERR_INVALID_LINK_SETTINGS = -13,
I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
I40E_ERR_RESET_FAILED = -15,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 10a83e5385c7..66cc79500c10 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2204,7 +2204,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = build_skb(xdp->data_hard_start, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
@@ -2322,7 +2322,7 @@ static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
result = I40E_XDP_REDIR;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 048f1678ab8a..dfdb6e786461 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1377,6 +1377,32 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
}
/**
+ * i40e_sync_vfr_reset
+ * @hw: pointer to hw struct
+ * @vf_id: VF identifier
+ *
+ * Before trigger hardware reset, we need to know if no other process has
+ * reserved the hardware for any reset operations. This check is done by
+ * examining the status of the RSTAT1 register used to signal the reset.
+ **/
+static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
+ reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
+ I40E_VFINT_ICR0_ADMINQ_MASK;
+ if (reg)
+ return 0;
+
+ usleep_range(100, 200);
+ }
+
+ return -EAGAIN;
+}
+
+/**
* i40e_trigger_vf_reset
* @vf: pointer to the VF structure
* @flr: VFLR was issued or not
@@ -1390,9 +1416,11 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
u32 reg, reg_idx, bit_idx;
+ bool vf_active;
+ u32 radq;
/* warn the VF */
- clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+ vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
/* Disable VF's configuration API during reset. The flag is re-enabled
* in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
@@ -1406,7 +1434,19 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
* just need to clean up, so don't hit the VFRTRIG register.
*/
if (!flr) {
- /* reset VF using VPGEN_VFRTRIG reg */
+ /* Sync VFR reset before trigger next one */
+ radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
+ I40E_VFINT_ICR0_ADMINQ_MASK;
+ if (vf_active && !radq)
+ /* waiting for finish reset by virtual driver */
+ if (i40e_sync_vfr_reset(hw, vf->vf_id))
+ dev_info(&pf->pdev->dev,
+ "Reset VF %d never finished\n",
+ vf->vf_id);
+
+ /* Reset VF using VPGEN_VFRTRIG reg. It is also setting
+ * in progress state in rstat1 register.
+ */
reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
@@ -2618,6 +2658,59 @@ error_param:
}
/**
+ * i40e_check_enough_queue - find big enough queue number
+ * @vf: pointer to the VF info
+ * @needed: the number of items needed
+ *
+ * Returns the base item index of the queue, or negative for error
+ **/
+static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
+{
+ unsigned int i, cur_queues, more, pool_size;
+ struct i40e_lump_tracking *pile;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ cur_queues = vsi->alloc_queue_pairs;
+
+ /* if current allocated queues are enough for need */
+ if (cur_queues >= needed)
+ return vsi->base_queue;
+
+ pile = pf->qp_pile;
+ if (cur_queues > 0) {
+ /* if the allocated queues are not zero
+ * just check if there are enough queues for more
+ * behind the allocated queues.
+ */
+ more = needed - cur_queues;
+ for (i = vsi->base_queue + cur_queues;
+ i < pile->num_entries; i++) {
+ if (pile->list[i] & I40E_PILE_VALID_BIT)
+ break;
+
+ if (more-- == 1)
+ /* there is enough */
+ return vsi->base_queue;
+ }
+ }
+
+ pool_size = 0;
+ for (i = 0; i < pile->num_entries; i++) {
+ if (pile->list[i] & I40E_PILE_VALID_BIT) {
+ pool_size = 0;
+ continue;
+ }
+ if (needed <= ++pool_size)
+ /* there is enough */
+ return i;
+ }
+
+ return -ENOMEM;
+}
+
+/**
* i40e_vc_request_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2651,6 +2744,12 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
req_pairs - cur_pairs,
pf->queues_left);
vfres->num_queue_pairs = pf->queues_left + cur_pairs;
+ } else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
+ dev_warn(&pf->pdev->dev,
+ "VF %d requested %d more queues, but there is not enough for it.\n",
+ vf->vf_id,
+ req_pairs - cur_pairs);
+ vfres->num_queue_pairs = cur_pairs;
} else {
/* successful request */
vf->num_req_queues = req_pairs;
@@ -2704,12 +2803,21 @@ error_param:
(u8 *)&stats, sizeof(stats));
}
+#define I40E_MAX_MACVLAN_PER_HW 3072
+#define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
+ (num_ports))
/* If the VF is not trusted restrict the number of MAC/VLAN it can program
* MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
*/
#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
#define I40E_VC_MAX_VLAN_PER_VF 16
+#define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports) \
+({ typeof(vf_num) vf_num_ = (vf_num); \
+ typeof(num_ports) num_ports_ = (num_ports); \
+ ((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ * \
+ I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) + \
+ I40E_VC_MAX_MAC_ADDR_PER_VF; })
/**
* i40e_check_vf_permission
* @vf: pointer to the VF info
@@ -2734,6 +2842,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
{
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
+ struct i40e_hw *hw = &pf->hw;
int mac2add_cnt = 0;
int i;
@@ -2775,12 +2884,26 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
* number of addresses. Check to make sure that the additions do not
* push us over the limit.
*/
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
- (i40e_count_filters(vsi) + mac2add_cnt) >
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ if ((i40e_count_filters(vsi) + mac2add_cnt) >
I40E_VC_MAX_MAC_ADDR_PER_VF) {
- dev_err(&pf->pdev->dev,
- "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
- return -EPERM;
+ dev_err(&pf->pdev->dev,
+ "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
+ return -EPERM;
+ }
+ /* If this VF is trusted, it can use more resources than untrusted.
+ * However to ensure that every trusted VF has appropriate number of
+ * resources, divide whole pool of resources per port and then across
+ * all VFs.
+ */
+ } else {
+ if ((i40e_count_filters(vsi) + mac2add_cnt) >
+ I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs,
+ hw->num_ports)) {
+ dev_err(&pf->pdev->dev,
+ "Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
+ return -EPERM;
+ }
}
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 49575a640a84..03c42fd0fea1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -19,6 +19,7 @@
#define I40E_MAX_VF_PROMISC_FLAGS 3
#define I40E_VF_STATE_WAIT_COUNT 20
+#define I40E_VFR_WAIT_COUNT 100
/* Various queue ctrls */
enum i40e_queue_ctrl {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index ea06e957393e..945b1bb9c6f4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -176,7 +176,7 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
goto out_failure;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index ea88f4597a07..bb962987f300 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -22,7 +22,6 @@
struct i40e_vsi;
struct xsk_buff_pool;
-struct zero_copy_allocator;
int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair);
int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair);
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 3789269ce741..59806d1f7e79 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -55,7 +55,8 @@ enum iavf_vsi_state_t {
struct iavf_vsi {
struct iavf_adapter *back;
struct net_device *netdev;
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ unsigned long active_cvlans[BITS_TO_LONGS(VLAN_N_VID)];
+ unsigned long active_svlans[BITS_TO_LONGS(VLAN_N_VID)];
u16 seid;
u16 id;
DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__);
@@ -137,14 +138,24 @@ struct iavf_q_vector {
struct iavf_mac_filter {
struct list_head list;
u8 macaddr[ETH_ALEN];
- bool is_new_mac; /* filter is new, wait for PF decision */
- bool remove; /* filter needs to be removed */
- bool add; /* filter needs to be added */
+ struct {
+ u8 is_new_mac:1; /* filter is new, wait for PF decision */
+ u8 remove:1; /* filter needs to be removed */
+ u8 add:1; /* filter needs to be added */
+ u8 is_primary:1; /* filter is a default VF MAC */
+ u8 padding:4;
+ };
+};
+
+#define IAVF_VLAN(vid, tpid) ((struct iavf_vlan){ vid, tpid })
+struct iavf_vlan {
+ u16 vid;
+ u16 tpid;
};
struct iavf_vlan_filter {
struct list_head list;
- u16 vlan;
+ struct iavf_vlan vlan;
bool remove; /* filter needs to be removed */
bool add; /* filter needs to be added */
};
@@ -177,6 +188,8 @@ enum iavf_state_t {
__IAVF_REMOVE, /* driver is being unloaded */
__IAVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
__IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
+ __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS,
+ __IAVF_INIT_CONFIG_ADAPTER,
__IAVF_INIT_SW, /* got resources, setting up structs */
__IAVF_INIT_FAILED, /* init failed, restarting procedure */
__IAVF_RESETTING, /* in reset */
@@ -274,38 +287,47 @@ struct iavf_adapter {
/* duplicates for common code */
#define IAVF_FLAG_DCB_ENABLED 0
/* flags for admin queue service task */
- u32 aq_required;
-#define IAVF_FLAG_AQ_ENABLE_QUEUES BIT(0)
-#define IAVF_FLAG_AQ_DISABLE_QUEUES BIT(1)
-#define IAVF_FLAG_AQ_ADD_MAC_FILTER BIT(2)
-#define IAVF_FLAG_AQ_ADD_VLAN_FILTER BIT(3)
-#define IAVF_FLAG_AQ_DEL_MAC_FILTER BIT(4)
-#define IAVF_FLAG_AQ_DEL_VLAN_FILTER BIT(5)
-#define IAVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6)
-#define IAVF_FLAG_AQ_MAP_VECTORS BIT(7)
-#define IAVF_FLAG_AQ_HANDLE_RESET BIT(8)
-#define IAVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */
-#define IAVF_FLAG_AQ_GET_CONFIG BIT(10)
+ u64 aq_required;
+#define IAVF_FLAG_AQ_ENABLE_QUEUES BIT_ULL(0)
+#define IAVF_FLAG_AQ_DISABLE_QUEUES BIT_ULL(1)
+#define IAVF_FLAG_AQ_ADD_MAC_FILTER BIT_ULL(2)
+#define IAVF_FLAG_AQ_ADD_VLAN_FILTER BIT_ULL(3)
+#define IAVF_FLAG_AQ_DEL_MAC_FILTER BIT_ULL(4)
+#define IAVF_FLAG_AQ_DEL_VLAN_FILTER BIT_ULL(5)
+#define IAVF_FLAG_AQ_CONFIGURE_QUEUES BIT_ULL(6)
+#define IAVF_FLAG_AQ_MAP_VECTORS BIT_ULL(7)
+#define IAVF_FLAG_AQ_HANDLE_RESET BIT_ULL(8)
+#define IAVF_FLAG_AQ_CONFIGURE_RSS BIT_ULL(9) /* direct AQ config */
+#define IAVF_FLAG_AQ_GET_CONFIG BIT_ULL(10)
/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */
-#define IAVF_FLAG_AQ_GET_HENA BIT(11)
-#define IAVF_FLAG_AQ_SET_HENA BIT(12)
-#define IAVF_FLAG_AQ_SET_RSS_KEY BIT(13)
-#define IAVF_FLAG_AQ_SET_RSS_LUT BIT(14)
-#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT(15)
-#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT(16)
-#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17)
-#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18)
-#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT(19)
-#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT(20)
-#define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT(21)
-#define IAVF_FLAG_AQ_DISABLE_CHANNELS BIT(22)
-#define IAVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23)
-#define IAVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24)
-#define IAVF_FLAG_AQ_ADD_FDIR_FILTER BIT(25)
-#define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT(26)
-#define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG BIT(27)
-#define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG BIT(28)
-#define IAVF_FLAG_AQ_REQUEST_STATS BIT(29)
+#define IAVF_FLAG_AQ_GET_HENA BIT_ULL(11)
+#define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12)
+#define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13)
+#define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14)
+#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT_ULL(15)
+#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT_ULL(16)
+#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT_ULL(17)
+#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT_ULL(18)
+#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT_ULL(19)
+#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT_ULL(20)
+#define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT_ULL(21)
+#define IAVF_FLAG_AQ_DISABLE_CHANNELS BIT_ULL(22)
+#define IAVF_FLAG_AQ_ADD_CLOUD_FILTER BIT_ULL(23)
+#define IAVF_FLAG_AQ_DEL_CLOUD_FILTER BIT_ULL(24)
+#define IAVF_FLAG_AQ_ADD_FDIR_FILTER BIT_ULL(25)
+#define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT_ULL(26)
+#define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG BIT_ULL(27)
+#define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG BIT_ULL(28)
+#define IAVF_FLAG_AQ_REQUEST_STATS BIT_ULL(29)
+#define IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS BIT_ULL(30)
+#define IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING BIT_ULL(31)
+#define IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING BIT_ULL(32)
+#define IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING BIT_ULL(33)
+#define IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING BIT_ULL(34)
+#define IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION BIT_ULL(35)
+#define IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION BIT_ULL(36)
+#define IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION BIT_ULL(37)
+#define IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION BIT_ULL(38)
/* OS defined structs */
struct net_device *netdev;
@@ -345,6 +367,14 @@ struct iavf_adapter {
VIRTCHNL_VF_OFFLOAD_RSS_PF)))
#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_VLAN)
+#define VLAN_V2_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
+ VIRTCHNL_VF_OFFLOAD_VLAN_V2)
+#define VLAN_V2_FILTERING_ALLOWED(_a) \
+ (VLAN_V2_ALLOWED((_a)) && \
+ ((_a)->vlan_v2_caps.filtering.filtering_support.outer || \
+ (_a)->vlan_v2_caps.filtering.filtering_support.inner))
+#define VLAN_FILTERING_ALLOWED(_a) \
+ (VLAN_ALLOWED((_a)) || VLAN_V2_FILTERING_ALLOWED((_a)))
#define ADV_LINK_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
#define FDIR_FLTR_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
@@ -356,6 +386,7 @@ struct iavf_adapter {
struct virtchnl_version_info pf_version;
#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
((_a)->pf_version.minor == 1))
+ struct virtchnl_vlan_caps vlan_v2_caps;
u16 msg_enable;
struct iavf_eth_stats current_stats;
struct iavf_vsi vsi;
@@ -444,6 +475,7 @@ static inline void iavf_change_state(struct iavf_adapter *adapter,
int iavf_up(struct iavf_adapter *adapter);
void iavf_down(struct iavf_adapter *adapter);
int iavf_process_config(struct iavf_adapter *adapter);
+int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter);
void iavf_schedule_reset(struct iavf_adapter *adapter);
void iavf_schedule_request_stats(struct iavf_adapter *adapter);
void iavf_reset(struct iavf_adapter *adapter);
@@ -462,6 +494,9 @@ int iavf_send_api_ver(struct iavf_adapter *adapter);
int iavf_verify_api_ver(struct iavf_adapter *adapter);
int iavf_send_vf_config_msg(struct iavf_adapter *adapter);
int iavf_get_vf_config(struct iavf_adapter *adapter);
+int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter);
+int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter);
+void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter);
void iavf_irq_enable(struct iavf_adapter *adapter, bool flush);
void iavf_configure_queues(struct iavf_adapter *adapter);
void iavf_deconfigure_queues(struct iavf_adapter *adapter);
@@ -497,6 +532,14 @@ void iavf_enable_channels(struct iavf_adapter *adapter);
void iavf_disable_channels(struct iavf_adapter *adapter);
void iavf_add_cloud_filter(struct iavf_adapter *adapter);
void iavf_del_cloud_filter(struct iavf_adapter *adapter);
+void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid);
+void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid);
+void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid);
+void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid);
+void
+iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
+ netdev_features_t prev_features,
+ netdev_features_t features);
void iavf_add_fdir_filter(struct iavf_adapter *adapter);
void iavf_del_fdir_filter(struct iavf_adapter *adapter);
void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
index 9fa3fa99b4c2..cd4e6a22d0f9 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
@@ -551,15 +551,13 @@ init_adminq_exit:
**/
enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
{
- enum iavf_status ret_code = 0;
-
if (iavf_check_asq_alive(hw))
iavf_aq_queue_shutdown(hw, true);
iavf_shutdown_asq(hw);
iavf_shutdown_arq(hw);
- return ret_code;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 461f5237a2f8..3bb56714beb0 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -331,9 +331,16 @@ static int iavf_get_link_ksettings(struct net_device *netdev,
**/
static int iavf_get_sset_count(struct net_device *netdev, int sset)
{
+ /* Report the maximum number queues, even if not every queue is
+ * currently configured. Since allocation of queues is in pairs,
+ * use netdev->real_num_tx_queues * 2. The real_num_tx_queues is set
+ * at device creation and never changes.
+ */
+
if (sset == ETH_SS_STATS)
return IAVF_STATS_LEN +
- (IAVF_QUEUE_STATS_LEN * 2 * IAVF_MAX_REQ_QUEUES);
+ (IAVF_QUEUE_STATS_LEN * 2 *
+ netdev->real_num_tx_queues);
else if (sset == ETH_SS_PRIV_FLAGS)
return IAVF_PRIV_FLAGS_STR_LEN;
else
@@ -360,17 +367,18 @@ static void iavf_get_ethtool_stats(struct net_device *netdev,
iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
rcu_read_lock();
- for (i = 0; i < IAVF_MAX_REQ_QUEUES; i++) {
+ /* As num_active_queues describe both tx and rx queues, we can use
+ * it to iterate over rings' stats.
+ */
+ for (i = 0; i < adapter->num_active_queues; i++) {
struct iavf_ring *ring;
- /* Avoid accessing un-allocated queues */
- ring = (i < adapter->num_active_queues ?
- &adapter->tx_rings[i] : NULL);
+ /* Tx rings stats */
+ ring = &adapter->tx_rings[i];
iavf_add_queue_stats(&data, ring);
- /* Avoid accessing un-allocated queues */
- ring = (i < adapter->num_active_queues ?
- &adapter->rx_rings[i] : NULL);
+ /* Rx rings stats */
+ ring = &adapter->rx_rings[i];
iavf_add_queue_stats(&data, ring);
}
rcu_read_unlock();
@@ -407,10 +415,10 @@ static void iavf_get_stat_strings(struct net_device *netdev, u8 *data)
iavf_add_stat_strings(&data, iavf_gstrings_stats);
- /* Queues are always allocated in pairs, so we just use num_tx_queues
- * for both Tx and Rx queues.
+ /* Queues are always allocated in pairs, so we just use
+ * real_num_tx_queues for both Tx and Rx queues.
*/
- for (i = 0; i < netdev->num_tx_queues; i++) {
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
"tx", i);
iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
@@ -583,12 +591,16 @@ static void iavf_get_drvinfo(struct net_device *netdev,
* iavf_get_ringparam - Get ring parameters
* @netdev: network interface device structure
* @ring: ethtool ringparam structure
+ * @kernel_ring: ethtool extenal ringparam structure
+ * @extack: netlink extended ACK report struct
*
* Returns current ring parameters. TX and RX rings are reported separately,
* but the number of rings is not reported.
**/
static void iavf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
@@ -602,12 +614,16 @@ static void iavf_get_ringparam(struct net_device *netdev,
* iavf_set_ringparam - Set ring parameters
* @netdev: network interface device structure
* @ring: ethtool ringparam structure
+ * @kernel_ring: ethtool external ringparam structure
+ * @extack: netlink extended ACK report struct
*
* Sets ring parameters. TX and RX rings are controlled separately, but the
* number of rings is not specified, so all rings get the same settings.
**/
static int iavf_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
@@ -1923,7 +1939,7 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
* @key: hash key
* @hfunc: hash function to use
*
- * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
**/
static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
@@ -1932,19 +1948,21 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
struct iavf_adapter *adapter = netdev_priv(netdev);
u16 i;
- /* We do not allow change in unsupported parameters */
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ /* Only support toeplitz hash function */
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (!indir)
+
+ if (!key && !indir)
return 0;
if (key)
memcpy(adapter->rss_key, key, adapter->rss_key_size);
- /* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < adapter->rss_lut_size; i++)
- adapter->rss_lut[i] = (u8)(indir[i]);
+ if (indir) {
+ /* Each 32 bits pointed by 'indir' is stored with a lut entry */
+ for (i = 0; i < adapter->rss_lut_size; i++)
+ adapter->rss_lut[i] = (u8)(indir[i]);
+ }
return iavf_config_rss(adapter);
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index e4439b095533..8125b9120615 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -463,14 +463,14 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
if (q_vector->tx.ring && q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name),
- "iavf-%s-TxRx-%d", basename, rx_int_idx++);
+ "iavf-%s-TxRx-%u", basename, rx_int_idx++);
tx_int_idx++;
} else if (q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name),
- "iavf-%s-rx-%d", basename, rx_int_idx++);
+ "iavf-%s-rx-%u", basename, rx_int_idx++);
} else if (q_vector->tx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name),
- "iavf-%s-tx-%d", basename, tx_int_idx++);
+ "iavf-%s-tx-%u", basename, tx_int_idx++);
} else {
/* skip this unused q_vector */
continue;
@@ -492,10 +492,10 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
/* Spread the IRQ affinity hints across online CPUs. Note that
* get_cpu_mask returns a mask with a permanent lifetime so
- * it's safe to use as a hint for irq_set_affinity_hint.
+ * it's safe to use as a hint for irq_update_affinity_hint.
*/
cpu = cpumask_local_spread(q_vector->v_idx, -1);
- irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
+ irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
}
return 0;
@@ -505,7 +505,7 @@ free_queue_irqs:
vector--;
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
irq_set_affinity_notifier(irq_num, NULL);
- irq_set_affinity_hint(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, &adapter->q_vectors[vector]);
}
return err;
@@ -557,7 +557,7 @@ static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
for (vector = 0; vector < q_vectors; vector++) {
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
irq_set_affinity_notifier(irq_num, NULL);
- irq_set_affinity_hint(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, &adapter->q_vectors[vector]);
}
}
@@ -646,14 +646,17 @@ static void iavf_configure_rx(struct iavf_adapter *adapter)
* mac_vlan_list_lock.
**/
static struct
-iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan)
+iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter,
+ struct iavf_vlan vlan)
{
struct iavf_vlan_filter *f;
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
- if (vlan == f->vlan)
+ if (f->vlan.vid == vlan.vid &&
+ f->vlan.tpid == vlan.tpid)
return f;
}
+
return NULL;
}
@@ -665,7 +668,8 @@ iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan)
* Returns ptr to the filter object or NULL when no memory available.
**/
static struct
-iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan)
+iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
+ struct iavf_vlan vlan)
{
struct iavf_vlan_filter *f = NULL;
@@ -694,7 +698,7 @@ clearout:
* @adapter: board private structure
* @vlan: VLAN tag
**/
-static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
+static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
{
struct iavf_vlan_filter *f;
@@ -720,8 +724,55 @@ static void iavf_restore_filters(struct iavf_adapter *adapter)
u16 vid;
/* re-add all VLAN filters */
- for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID)
- iavf_add_vlan(adapter, vid);
+ for_each_set_bit(vid, adapter->vsi.active_cvlans, VLAN_N_VID)
+ iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021Q));
+
+ for_each_set_bit(vid, adapter->vsi.active_svlans, VLAN_N_VID)
+ iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021AD));
+}
+
+/**
+ * iavf_get_num_vlans_added - get number of VLANs added
+ * @adapter: board private structure
+ */
+static u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
+{
+ return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) +
+ bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID);
+}
+
+/**
+ * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF
+ * @adapter: board private structure
+ *
+ * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN,
+ * do not impose a limit as that maintains current behavior and for
+ * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF.
+ **/
+static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter)
+{
+ /* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has
+ * never been a limit on the VF driver side
+ */
+ if (VLAN_ALLOWED(adapter))
+ return VLAN_N_VID;
+ else if (VLAN_V2_ALLOWED(adapter))
+ return adapter->vlan_v2_caps.filtering.max_filters;
+
+ return 0;
+}
+
+/**
+ * iavf_max_vlans_added - check if maximum VLANs allowed already exist
+ * @adapter: board private structure
+ **/
+static bool iavf_max_vlans_added(struct iavf_adapter *adapter)
+{
+ if (iavf_get_num_vlans_added(adapter) <
+ iavf_get_max_vlans_allowed(adapter))
+ return false;
+
+ return true;
}
/**
@@ -735,13 +786,23 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- if (!VLAN_ALLOWED(adapter))
+ if (!VLAN_FILTERING_ALLOWED(adapter))
+ return -EIO;
+
+ if (iavf_max_vlans_added(adapter)) {
+ netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n",
+ iavf_get_max_vlans_allowed(adapter));
return -EIO;
+ }
- if (iavf_add_vlan(adapter, vid) == NULL)
+ if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
return -ENOMEM;
- set_bit(vid, adapter->vsi.active_vlans);
+ if (proto == cpu_to_be16(ETH_P_8021Q))
+ set_bit(vid, adapter->vsi.active_cvlans);
+ else
+ set_bit(vid, adapter->vsi.active_svlans);
+
return 0;
}
@@ -756,8 +817,11 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- iavf_del_vlan(adapter, vid);
- clear_bit(vid, adapter->vsi.active_vlans);
+ iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
+ if (proto == cpu_to_be16(ETH_P_8021Q))
+ clear_bit(vid, adapter->vsi.active_cvlans);
+ else
+ clear_bit(vid, adapter->vsi.active_svlans);
return 0;
}
@@ -1152,6 +1216,86 @@ static void iavf_free_queues(struct iavf_adapter *adapter)
}
/**
+ * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload
+ * @adapter: board private structure
+ *
+ * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or
+ * stripped in certain descriptor fields. Instead of checking the offload
+ * capability bits in the hot path, cache the location the ring specific
+ * flags.
+ */
+void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_active_queues; i++) {
+ struct iavf_ring *tx_ring = &adapter->tx_rings[i];
+ struct iavf_ring *rx_ring = &adapter->rx_rings[i];
+
+ /* prevent multiple L2TAG bits being set after VFR */
+ tx_ring->flags &=
+ ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
+ IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2);
+ rx_ring->flags &=
+ ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
+ IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2);
+
+ if (VLAN_ALLOWED(adapter)) {
+ tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+ rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+ } else if (VLAN_V2_ALLOWED(adapter)) {
+ struct virtchnl_vlan_supported_caps *stripping_support;
+ struct virtchnl_vlan_supported_caps *insertion_support;
+
+ stripping_support =
+ &adapter->vlan_v2_caps.offloads.stripping_support;
+ insertion_support =
+ &adapter->vlan_v2_caps.offloads.insertion_support;
+
+ if (stripping_support->outer) {
+ if (stripping_support->outer &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
+ rx_ring->flags |=
+ IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+ else if (stripping_support->outer &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
+ rx_ring->flags |=
+ IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
+ } else if (stripping_support->inner) {
+ if (stripping_support->inner &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
+ rx_ring->flags |=
+ IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+ else if (stripping_support->inner &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
+ rx_ring->flags |=
+ IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
+ }
+
+ if (insertion_support->outer) {
+ if (insertion_support->outer &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
+ tx_ring->flags |=
+ IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+ else if (insertion_support->outer &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
+ tx_ring->flags |=
+ IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
+ } else if (insertion_support->inner) {
+ if (insertion_support->inner &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
+ tx_ring->flags |=
+ IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+ else if (insertion_support->inner &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
+ tx_ring->flags |=
+ IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
+ }
+ }
+ }
+}
+
+/**
* iavf_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
*
@@ -1212,6 +1356,8 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter)
adapter->num_active_queues = num_active_queues;
+ iavf_set_queue_vlan_tag_loc(adapter);
+
return 0;
err_out:
@@ -1584,6 +1730,8 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
{
if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
return iavf_send_vf_config_msg(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
+ return iavf_send_vf_offload_vlan_v2_msg(adapter);
if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
iavf_disable_queues(adapter);
return 0;
@@ -1717,6 +1865,39 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_del_adv_rss_cfg(adapter);
return 0;
}
+ if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) {
+ iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) {
+ iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) {
+ iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) {
+ iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) {
+ iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) {
+ iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) {
+ iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) {
+ iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
+ return 0;
+ }
+
if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
iavf_request_stats(adapter);
return 0;
@@ -1726,6 +1907,91 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
}
/**
+ * iavf_set_vlan_offload_features - set VLAN offload configuration
+ * @adapter: board private structure
+ * @prev_features: previous features used for comparison
+ * @features: updated features used for configuration
+ *
+ * Set the aq_required bit(s) based on the requested features passed in to
+ * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule
+ * the watchdog if any changes are requested to expedite the request via
+ * virtchnl.
+ **/
+void
+iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
+ netdev_features_t prev_features,
+ netdev_features_t features)
+{
+ bool enable_stripping = true, enable_insertion = true;
+ u16 vlan_ethertype = 0;
+ u64 aq_required = 0;
+
+ /* keep cases separate because one ethertype for offloads can be
+ * disabled at the same time as another is disabled, so check for an
+ * enabled ethertype first, then check for disabled. Default to
+ * ETH_P_8021Q so an ethertype is specified if disabling insertion and
+ * stripping.
+ */
+ if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
+ vlan_ethertype = ETH_P_8021AD;
+ else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
+ vlan_ethertype = ETH_P_8021Q;
+ else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
+ vlan_ethertype = ETH_P_8021AD;
+ else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
+ vlan_ethertype = ETH_P_8021Q;
+ else
+ vlan_ethertype = ETH_P_8021Q;
+
+ if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
+ enable_stripping = false;
+ if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
+ enable_insertion = false;
+
+ if (VLAN_ALLOWED(adapter)) {
+ /* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN
+ * stripping via virtchnl. VLAN insertion can be toggled on the
+ * netdev, but it doesn't require a virtchnl message
+ */
+ if (enable_stripping)
+ aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
+ else
+ aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
+
+ } else if (VLAN_V2_ALLOWED(adapter)) {
+ switch (vlan_ethertype) {
+ case ETH_P_8021Q:
+ if (enable_stripping)
+ aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
+ else
+ aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
+
+ if (enable_insertion)
+ aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
+ else
+ aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
+ break;
+ case ETH_P_8021AD:
+ if (enable_stripping)
+ aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
+ else
+ aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
+
+ if (enable_insertion)
+ aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
+ else
+ aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
+ break;
+ }
+ }
+
+ if (aq_required) {
+ adapter->aq_required |= aq_required;
+ mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+ }
+}
+
+/**
* iavf_startup - first step of driver startup
* @adapter: board private structure
*
@@ -1827,6 +2093,59 @@ err:
}
/**
+ * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES
+ * @adapter: board private structure
+ */
+int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
+{
+ int i, num_req_queues = adapter->num_req_queues;
+ struct iavf_vsi *vsi = &adapter->vsi;
+
+ for (i = 0; i < adapter->vf_res->num_vsis; i++) {
+ if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
+ adapter->vsi_res = &adapter->vf_res->vsi_res[i];
+ }
+ if (!adapter->vsi_res) {
+ dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
+ return -ENODEV;
+ }
+
+ if (num_req_queues &&
+ num_req_queues > adapter->vsi_res->num_queue_pairs) {
+ /* Problem. The PF gave us fewer queues than what we had
+ * negotiated in our request. Need a reset to see if we can't
+ * get back to a working state.
+ */
+ dev_err(&adapter->pdev->dev,
+ "Requested %d queues, but PF only gave us %d.\n",
+ num_req_queues,
+ adapter->vsi_res->num_queue_pairs);
+ adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
+ iavf_schedule_reset(adapter);
+
+ return -EAGAIN;
+ }
+ adapter->num_req_queues = 0;
+ adapter->vsi.id = adapter->vsi_res->vsi_id;
+
+ adapter->vsi.back = adapter;
+ adapter->vsi.base_vector = 1;
+ adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
+ vsi->netdev = adapter->netdev;
+ vsi->qs_handle = adapter->vsi_res->qset_handle;
+ if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ adapter->rss_key_size = adapter->vf_res->rss_key_size;
+ adapter->rss_lut_size = adapter->vf_res->rss_lut_size;
+ } else {
+ adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
+ adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
+ }
+
+ return 0;
+}
+
+/**
* iavf_init_get_resources - third step of driver startup
* @adapter: board private structure
*
@@ -1837,7 +2156,6 @@ err:
**/
static void iavf_init_get_resources(struct iavf_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct iavf_hw *hw = &adapter->hw;
int err;
@@ -1855,7 +2173,7 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
err = iavf_get_vf_config(adapter);
if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) {
err = iavf_send_vf_config_msg(adapter);
- goto err;
+ goto err_alloc;
} else if (err == IAVF_ERR_PARAM) {
/* We only get ERR_PARAM if the device is in a very bad
* state or if we've been disabled for previous bad
@@ -1870,9 +2188,83 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
goto err_alloc;
}
- err = iavf_process_config(adapter);
+ err = iavf_parse_vf_resource_msg(adapter);
if (err)
goto err_alloc;
+
+ err = iavf_send_vf_offload_vlan_v2_msg(adapter);
+ if (err == -EOPNOTSUPP) {
+ /* underlying PF doesn't support VIRTCHNL_VF_OFFLOAD_VLAN_V2, so
+ * go directly to finishing initialization
+ */
+ iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
+ return;
+ } else if (err) {
+ dev_err(&pdev->dev, "Unable to send offload vlan v2 request (%d)\n",
+ err);
+ goto err_alloc;
+ }
+
+ /* underlying PF supports VIRTCHNL_VF_OFFLOAD_VLAN_V2, so update the
+ * state accordingly
+ */
+ iavf_change_state(adapter, __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS);
+ return;
+
+err_alloc:
+ kfree(adapter->vf_res);
+ adapter->vf_res = NULL;
+err:
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+}
+
+/**
+ * iavf_init_get_offload_vlan_v2_caps - part of driver startup
+ * @adapter: board private structure
+ *
+ * Function processes __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS driver state if the
+ * VF negotiates VIRTCHNL_VF_OFFLOAD_VLAN_V2. If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is
+ * not negotiated, then this state will never be entered.
+ **/
+static void iavf_init_get_offload_vlan_v2_caps(struct iavf_adapter *adapter)
+{
+ int ret;
+
+ WARN_ON(adapter->state != __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS);
+
+ memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps));
+
+ ret = iavf_get_vf_vlan_v2_caps(adapter);
+ if (ret) {
+ if (ret == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
+ iavf_send_vf_offload_vlan_v2_msg(adapter);
+ goto err;
+ }
+
+ iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
+ return;
+err:
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+}
+
+/**
+ * iavf_init_config_adapter - last part of driver startup
+ * @adapter: board private structure
+ *
+ * After all the supported capabilities are negotiated, then the
+ * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization.
+ */
+static void iavf_init_config_adapter(struct iavf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+
+ WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER);
+
+ if (iavf_process_config(adapter))
+ goto err;
+
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
@@ -1955,6 +2347,10 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
else
iavf_init_rss(adapter);
+ if (VLAN_V2_ALLOWED(adapter))
+ /* request initial VLAN offload settings */
+ iavf_set_vlan_offload_features(adapter, 0, netdev->features);
+
return;
err_mem:
iavf_free_rss(adapter);
@@ -1962,9 +2358,6 @@ err_register:
iavf_free_misc_irq(adapter);
err_sw_init:
iavf_reset_interrupt_capability(adapter);
-err_alloc:
- kfree(adapter->vf_res);
- adapter->vf_res = NULL;
err:
iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
@@ -2013,6 +2406,18 @@ static void iavf_watchdog_task(struct work_struct *work)
queue_delayed_work(iavf_wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
+ case __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS:
+ iavf_init_get_offload_vlan_v2_caps(adapter);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ msecs_to_jiffies(1));
+ return;
+ case __IAVF_INIT_CONFIG_ADAPTER:
+ iavf_init_config_adapter(adapter);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ msecs_to_jiffies(1));
+ return;
case __IAVF_INIT_FAILED:
if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
dev_err(&adapter->pdev->dev,
@@ -2066,10 +2471,13 @@ static void iavf_watchdog_task(struct work_struct *work)
iavf_send_api_ver(adapter);
}
} else {
+ int ret = iavf_process_aq_command(adapter);
+
/* An error will be returned if no commands were
* processed; use this opportunity to update stats
+ * if the error isn't -ENOTSUPP
*/
- if (iavf_process_aq_command(adapter) &&
+ if (ret && ret != -EOPNOTSUPP &&
adapter->state == __IAVF_RUNNING)
iavf_request_stats(adapter);
}
@@ -2308,6 +2716,13 @@ continue_reset:
}
adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
+ /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been
+ * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here,
+ * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until
+ * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have
+ * been successfully sent and negotiated
+ */
+ adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
spin_lock_bh(&adapter->mac_vlan_list_lock);
@@ -2913,7 +3328,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
match.mask->dst);
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
@@ -2923,7 +3338,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
match.mask->src);
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
@@ -2958,7 +3373,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
match.mask->vlan_id);
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
@@ -2982,7 +3397,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
be32_to_cpu(match.mask->dst));
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
@@ -2992,13 +3407,13 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
be32_to_cpu(match.mask->dst));
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
if (match.key->dst) {
vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
@@ -3019,7 +3434,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
if (ipv6_addr_any(&match.mask->dst)) {
dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
IPV6_ADDR_ANY);
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
/* src and dest IPv6 address should not be LOOPBACK
@@ -3029,7 +3444,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
ipv6_addr_loopback(&match.key->src)) {
dev_err(&adapter->pdev->dev,
"ipv6 addr should not be loopback\n");
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
if (!ipv6_addr_any(&match.mask->dst) ||
!ipv6_addr_any(&match.mask->src))
@@ -3054,7 +3469,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
be16_to_cpu(match.mask->src));
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
@@ -3064,7 +3479,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
be16_to_cpu(match.mask->dst));
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
if (match.key->dst) {
@@ -3431,6 +3846,8 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (CLIENT_ENABLED(adapter)) {
iavf_notify_client_l2_params(&adapter->vsi);
@@ -3442,6 +3859,11 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
+#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_HW_VLAN_STAG_RX | \
+ NETIF_F_HW_VLAN_STAG_TX)
+
/**
* iavf_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
@@ -3453,25 +3875,11 @@ static int iavf_set_features(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- /* Don't allow enabling VLAN features when adapter is not capable
- * of VLAN offload/filtering
- */
- if (!VLAN_ALLOWED(adapter)) {
- netdev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_FILTER);
- if (features & (NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_FILTER))
- return -EINVAL;
- } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
- if (features & NETIF_F_HW_VLAN_CTAG_RX)
- adapter->aq_required |=
- IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
- else
- adapter->aq_required |=
- IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
- }
+ /* trigger update on any VLAN feature change */
+ if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^
+ (features & NETIF_VLAN_OFFLOAD_FEATURES))
+ iavf_set_vlan_offload_features(adapter, netdev->features,
+ features);
return 0;
}
@@ -3535,6 +3943,228 @@ out_err:
}
/**
+ * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off
+ * @adapter: board private structure
+ *
+ * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
+ * were negotiated determine the VLAN features that can be toggled on and off.
+ **/
+static netdev_features_t
+iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter)
+{
+ netdev_features_t hw_features = 0;
+
+ if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
+ return hw_features;
+
+ /* Enable VLAN features if supported */
+ if (VLAN_ALLOWED(adapter)) {
+ hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX);
+ } else if (VLAN_V2_ALLOWED(adapter)) {
+ struct virtchnl_vlan_caps *vlan_v2_caps =
+ &adapter->vlan_v2_caps;
+ struct virtchnl_vlan_supported_caps *stripping_support =
+ &vlan_v2_caps->offloads.stripping_support;
+ struct virtchnl_vlan_supported_caps *insertion_support =
+ &vlan_v2_caps->offloads.insertion_support;
+
+ if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
+ stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) {
+ if (stripping_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_8100)
+ hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ if (stripping_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_88A8)
+ hw_features |= NETIF_F_HW_VLAN_STAG_RX;
+ } else if (stripping_support->inner !=
+ VIRTCHNL_VLAN_UNSUPPORTED &&
+ stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) {
+ if (stripping_support->inner &
+ VIRTCHNL_VLAN_ETHERTYPE_8100)
+ hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ }
+
+ if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
+ insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) {
+ if (insertion_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_8100)
+ hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ if (insertion_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_88A8)
+ hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ } else if (insertion_support->inner &&
+ insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) {
+ if (insertion_support->inner &
+ VIRTCHNL_VLAN_ETHERTYPE_8100)
+ hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ }
+ }
+
+ return hw_features;
+}
+
+/**
+ * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures
+ * @adapter: board private structure
+ *
+ * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
+ * were negotiated determine the VLAN features that are enabled by default.
+ **/
+static netdev_features_t
+iavf_get_netdev_vlan_features(struct iavf_adapter *adapter)
+{
+ netdev_features_t features = 0;
+
+ if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
+ return features;
+
+ if (VLAN_ALLOWED(adapter)) {
+ features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX;
+ } else if (VLAN_V2_ALLOWED(adapter)) {
+ struct virtchnl_vlan_caps *vlan_v2_caps =
+ &adapter->vlan_v2_caps;
+ struct virtchnl_vlan_supported_caps *filtering_support =
+ &vlan_v2_caps->filtering.filtering_support;
+ struct virtchnl_vlan_supported_caps *stripping_support =
+ &vlan_v2_caps->offloads.stripping_support;
+ struct virtchnl_vlan_supported_caps *insertion_support =
+ &vlan_v2_caps->offloads.insertion_support;
+ u32 ethertype_init;
+
+ /* give priority to outer stripping and don't support both outer
+ * and inner stripping
+ */
+ ethertype_init = vlan_v2_caps->offloads.ethertype_init;
+ if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
+ if (stripping_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_8100 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ features |= NETIF_F_HW_VLAN_CTAG_RX;
+ else if (stripping_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
+ features |= NETIF_F_HW_VLAN_STAG_RX;
+ } else if (stripping_support->inner !=
+ VIRTCHNL_VLAN_UNSUPPORTED) {
+ if (stripping_support->inner &
+ VIRTCHNL_VLAN_ETHERTYPE_8100 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ features |= NETIF_F_HW_VLAN_CTAG_RX;
+ }
+
+ /* give priority to outer insertion and don't support both outer
+ * and inner insertion
+ */
+ if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
+ if (insertion_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_8100 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
+ else if (insertion_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
+ features |= NETIF_F_HW_VLAN_STAG_TX;
+ } else if (insertion_support->inner !=
+ VIRTCHNL_VLAN_UNSUPPORTED) {
+ if (insertion_support->inner &
+ VIRTCHNL_VLAN_ETHERTYPE_8100 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
+ }
+
+ /* give priority to outer filtering and don't bother if both
+ * outer and inner filtering are enabled
+ */
+ ethertype_init = vlan_v2_caps->filtering.ethertype_init;
+ if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
+ if (filtering_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_8100 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (filtering_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
+ features |= NETIF_F_HW_VLAN_STAG_FILTER;
+ } else if (filtering_support->inner !=
+ VIRTCHNL_VLAN_UNSUPPORTED) {
+ if (filtering_support->inner &
+ VIRTCHNL_VLAN_ETHERTYPE_8100 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (filtering_support->inner &
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
+ features |= NETIF_F_HW_VLAN_STAG_FILTER;
+ }
+ }
+
+ return features;
+}
+
+#define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \
+ (!(((requested) & (feature_bit)) && \
+ !((allowed) & (feature_bit))))
+
+/**
+ * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support
+ * @adapter: board private structure
+ * @requested_features: stack requested NETDEV features
+ **/
+static netdev_features_t
+iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter,
+ netdev_features_t requested_features)
+{
+ netdev_features_t allowed_features;
+
+ allowed_features = iavf_get_netdev_vlan_hw_features(adapter) |
+ iavf_get_netdev_vlan_features(adapter);
+
+ if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
+ allowed_features,
+ NETIF_F_HW_VLAN_CTAG_TX))
+ requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+
+ if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
+ allowed_features,
+ NETIF_F_HW_VLAN_CTAG_RX))
+ requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+
+ if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
+ allowed_features,
+ NETIF_F_HW_VLAN_STAG_TX))
+ requested_features &= ~NETIF_F_HW_VLAN_STAG_TX;
+ if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
+ allowed_features,
+ NETIF_F_HW_VLAN_STAG_RX))
+ requested_features &= ~NETIF_F_HW_VLAN_STAG_RX;
+
+ if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
+ allowed_features,
+ NETIF_F_HW_VLAN_CTAG_FILTER))
+ requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
+ allowed_features,
+ NETIF_F_HW_VLAN_STAG_FILTER))
+ requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER;
+
+ if ((requested_features &
+ (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
+ (requested_features &
+ (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) &&
+ adapter->vlan_v2_caps.offloads.ethertype_match ==
+ VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) {
+ netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
+ requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_TX);
+ }
+
+ return requested_features;
+}
+
+/**
* iavf_fix_features - fix up the netdev feature bits
* @netdev: our net device
* @features: desired feature bits
@@ -3546,13 +4176,7 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- if (adapter->vf_res &&
- !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
- features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER);
-
- return features;
+ return iavf_fix_netdev_vlan_features(adapter, features);
}
static const struct net_device_ops iavf_netdev_ops = {
@@ -3604,39 +4228,11 @@ static int iavf_check_reset_complete(struct iavf_hw *hw)
int iavf_process_config(struct iavf_adapter *adapter)
{
struct virtchnl_vf_resource *vfres = adapter->vf_res;
- int i, num_req_queues = adapter->num_req_queues;
+ netdev_features_t hw_vlan_features, vlan_features;
struct net_device *netdev = adapter->netdev;
- struct iavf_vsi *vsi = &adapter->vsi;
netdev_features_t hw_enc_features;
netdev_features_t hw_features;
- /* got VF config message back from PF, now we can parse it */
- for (i = 0; i < vfres->num_vsis; i++) {
- if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
- adapter->vsi_res = &vfres->vsi_res[i];
- }
- if (!adapter->vsi_res) {
- dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
- return -ENODEV;
- }
-
- if (num_req_queues &&
- num_req_queues > adapter->vsi_res->num_queue_pairs) {
- /* Problem. The PF gave us fewer queues than what we had
- * negotiated in our request. Need a reset to see if we can't
- * get back to a working state.
- */
- dev_err(&adapter->pdev->dev,
- "Requested %d queues, but PF only gave us %d.\n",
- num_req_queues,
- adapter->vsi_res->num_queue_pairs);
- adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
- adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
- iavf_schedule_reset(adapter);
- return -ENODEV;
- }
- adapter->num_req_queues = 0;
-
hw_enc_features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
@@ -3680,19 +4276,19 @@ int iavf_process_config(struct iavf_adapter *adapter)
*/
hw_features = hw_enc_features;
- /* Enable VLAN features if supported */
- if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
- hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX);
+ /* get HW VLAN features that can be toggled */
+ hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);
+
/* Enable cloud filter if ADQ is supported */
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
hw_features |= NETIF_F_HW_TC;
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
hw_features |= NETIF_F_GSO_UDP_L4;
- netdev->hw_features |= hw_features;
+ netdev->hw_features |= hw_features | hw_vlan_features;
+ vlan_features = iavf_get_netdev_vlan_features(adapter);
- netdev->features |= hw_features;
+ netdev->features |= hw_features | vlan_features;
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -3717,21 +4313,6 @@ int iavf_process_config(struct iavf_adapter *adapter)
netdev->features &= ~NETIF_F_GSO;
}
- adapter->vsi.id = adapter->vsi_res->vsi_id;
-
- adapter->vsi.back = adapter;
- adapter->vsi.base_vector = 1;
- adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
- vsi->netdev = adapter->netdev;
- vsi->qs_handle = adapter->vsi_res->qset_handle;
- if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
- adapter->rss_key_size = vfres->rss_key_size;
- adapter->rss_lut_size = vfres->rss_lut_size;
- } else {
- adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
- adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
- }
-
return 0;
}
@@ -4001,6 +4582,7 @@ static void iavf_remove(struct pci_dev *pdev)
if (iavf_lock_timeout(&adapter->crit_lock, 5000))
dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
+ dev_info(&adapter->pdev->dev, "Removing device\n");
/* Shut down all the garbage mashers on the detention level */
iavf_change_state(adapter, __IAVF_REMOVE);
adapter->aq_required = 0;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 3525eab8e9f9..8cbe7ad1347c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -865,6 +865,9 @@ static void iavf_receive_skb(struct iavf_ring *rx_ring,
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(vlan_tag & VLAN_VID_MASK))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ else if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) &&
+ vlan_tag & VLAN_VID_MASK)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);
napi_gro_receive(&q_vector->napi, skb);
}
@@ -1363,7 +1366,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
net_prefetch(va);
/* build an skb around the page buffer */
- skb = build_skb(va - IAVF_SKB_PAD, truesize);
+ skb = napi_build_skb(va - IAVF_SKB_PAD, truesize);
if (unlikely(!skb))
return NULL;
@@ -1468,7 +1471,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
struct iavf_rx_buffer *rx_buffer;
union iavf_rx_desc *rx_desc;
unsigned int size;
- u16 vlan_tag;
+ u16 vlan_tag = 0;
u8 rx_ptype;
u64 qword;
@@ -1551,9 +1554,13 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
/* populate checksum, VLAN, and protocol */
iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
-
- vlan_tag = (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
- le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+ if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) &&
+ rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1)
+ vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1);
+ if (rx_desc->wb.qword2.ext_status &
+ cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) &&
+ rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2)
+ vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2);
iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
iavf_receive_skb(rx_ring, skb, vlan_tag);
@@ -1766,7 +1773,7 @@ tx_only:
if (likely(napi_complete_done(napi, work_done)))
iavf_update_enable_itr(vsi, q_vector);
- return min(work_done, budget - 1);
+ return min_t(int, work_done, budget - 1);
}
/**
@@ -1781,46 +1788,29 @@ tx_only:
* Returns error code indicate the frame should be dropped upon error and the
* otherwise returns 0 to indicate the flags has been set properly.
**/
-static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
- struct iavf_ring *tx_ring,
- u32 *flags)
+static void iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
+ struct iavf_ring *tx_ring, u32 *flags)
{
- __be16 protocol = skb->protocol;
u32 tx_flags = 0;
- if (protocol == htons(ETH_P_8021Q) &&
- !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
- /* When HW VLAN acceleration is turned off by the user the
- * stack sets the protocol to 8021q so that the driver
- * can take any steps required to support the SW only
- * VLAN handling. In our case the driver doesn't need
- * to take any further steps so just set the protocol
- * to the encapsulated ethertype.
- */
- skb->protocol = vlan_get_protocol(skb);
- goto out;
- }
- /* if we have a HW VLAN tag being added, default to the HW one */
- if (skb_vlan_tag_present(skb)) {
- tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
- tx_flags |= IAVF_TX_FLAGS_HW_VLAN;
- /* else if it is a SW VLAN, check the next protocol and store the tag */
- } else if (protocol == htons(ETH_P_8021Q)) {
- struct vlan_hdr *vhdr, _vhdr;
-
- vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
- if (!vhdr)
- return -EINVAL;
+ /* stack will only request hardware VLAN insertion offload for protocols
+ * that the driver supports and has enabled
+ */
+ if (!skb_vlan_tag_present(skb))
+ return;
- protocol = vhdr->h_vlan_encapsulated_proto;
- tx_flags |= ntohs(vhdr->h_vlan_TCI) << IAVF_TX_FLAGS_VLAN_SHIFT;
- tx_flags |= IAVF_TX_FLAGS_SW_VLAN;
+ tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
+ if (tx_ring->flags & IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2) {
+ tx_flags |= IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
+ } else if (tx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
+ tx_flags |= IAVF_TX_FLAGS_HW_VLAN;
+ } else {
+ dev_dbg(tx_ring->dev, "Unsupported Tx VLAN tag location requested\n");
+ return;
}
-out:
*flags = tx_flags;
- return 0;
}
/**
@@ -2440,8 +2430,13 @@ static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
first->gso_segs = 1;
/* prepare the xmit flags */
- if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
- goto out_drop;
+ iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags);
+ if (tx_flags & IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
+ cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2 <<
+ IAVF_TXD_CTX_QW1_CMD_SHIFT;
+ cd_l2tag2 = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
+ IAVF_TX_FLAGS_VLAN_SHIFT;
+ }
/* obtain protocol of skb */
protocol = vlan_get_protocol(skb);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index e5b9ba42dd00..2624bf6d009e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -243,19 +243,20 @@ static inline unsigned int iavf_txd_use_count(unsigned int size)
#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
#define IAVF_MIN_DESC_PENDING 4
-#define IAVF_TX_FLAGS_HW_VLAN BIT(1)
-#define IAVF_TX_FLAGS_SW_VLAN BIT(2)
-#define IAVF_TX_FLAGS_TSO BIT(3)
-#define IAVF_TX_FLAGS_IPV4 BIT(4)
-#define IAVF_TX_FLAGS_IPV6 BIT(5)
-#define IAVF_TX_FLAGS_FCCRC BIT(6)
-#define IAVF_TX_FLAGS_FSO BIT(7)
-#define IAVF_TX_FLAGS_FD_SB BIT(9)
-#define IAVF_TX_FLAGS_VXLAN_TUNNEL BIT(10)
-#define IAVF_TX_FLAGS_VLAN_MASK 0xffff0000
-#define IAVF_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
-#define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT 29
-#define IAVF_TX_FLAGS_VLAN_SHIFT 16
+#define IAVF_TX_FLAGS_HW_VLAN BIT(1)
+#define IAVF_TX_FLAGS_SW_VLAN BIT(2)
+#define IAVF_TX_FLAGS_TSO BIT(3)
+#define IAVF_TX_FLAGS_IPV4 BIT(4)
+#define IAVF_TX_FLAGS_IPV6 BIT(5)
+#define IAVF_TX_FLAGS_FCCRC BIT(6)
+#define IAVF_TX_FLAGS_FSO BIT(7)
+#define IAVF_TX_FLAGS_FD_SB BIT(9)
+#define IAVF_TX_FLAGS_VXLAN_TUNNEL BIT(10)
+#define IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(11)
+#define IAVF_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IAVF_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
+#define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT 29
+#define IAVF_TX_FLAGS_VLAN_SHIFT 16
struct iavf_tx_buffer {
struct iavf_tx_desc *next_to_watch;
@@ -362,6 +363,9 @@ struct iavf_ring {
u16 flags;
#define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0)
#define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
+#define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3)
+#define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(4)
+#define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(5)
/* stats structs */
struct iavf_queue_stats stats;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index d60bf7c21200..5ee1d118fd30 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -137,6 +137,7 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
VIRTCHNL_VF_OFFLOAD_ENCAP |
+ VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
VIRTCHNL_VF_OFFLOAD_ADQ |
@@ -155,6 +156,19 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
NULL, 0);
}
+int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter)
+{
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
+
+ if (!VLAN_V2_ALLOWED(adapter))
+ return -EOPNOTSUPP;
+
+ adapter->current_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS;
+
+ return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
+ NULL, 0);
+}
+
/**
* iavf_validate_num_queues
* @adapter: adapter structure
@@ -235,6 +249,45 @@ out:
return err;
}
+int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
+{
+ struct iavf_hw *hw = &adapter->hw;
+ struct iavf_arq_event_info event;
+ enum virtchnl_ops op;
+ enum iavf_status err;
+ u16 len;
+
+ len = sizeof(struct virtchnl_vlan_caps);
+ event.buf_len = len;
+ event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
+ if (!event.msg_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ while (1) {
+ /* When the AQ is empty, iavf_clean_arq_element will return
+ * nonzero and this loop will terminate.
+ */
+ err = iavf_clean_arq_element(hw, &event, NULL);
+ if (err)
+ goto out_alloc;
+ op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
+ if (op == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
+ break;
+ }
+
+ err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
+ if (err)
+ goto out_alloc;
+
+ memcpy(&adapter->vlan_v2_caps, event.msg_buf, min(event.msg_len, len));
+out_alloc:
+ kfree(event.msg_buf);
+out:
+ return err;
+}
+
/**
* iavf_configure_queues
* @adapter: adapter structure
@@ -589,7 +642,6 @@ static void iavf_mac_add_reject(struct iavf_adapter *adapter)
**/
void iavf_add_vlans(struct iavf_adapter *adapter)
{
- struct virtchnl_vlan_filter_list *vvfl;
int len, i = 0, count = 0;
struct iavf_vlan_filter *f;
bool more = false;
@@ -607,48 +659,105 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
if (f->add)
count++;
}
- if (!count || !VLAN_ALLOWED(adapter)) {
+ if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
- adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
- len = sizeof(struct virtchnl_vlan_filter_list) +
- (count * sizeof(u16));
- if (len > IAVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
- count = (IAVF_MAX_AQ_BUF_SIZE -
- sizeof(struct virtchnl_vlan_filter_list)) /
- sizeof(u16);
- len = sizeof(struct virtchnl_vlan_filter_list) +
- (count * sizeof(u16));
- more = true;
- }
- vvfl = kzalloc(len, GFP_ATOMIC);
- if (!vvfl) {
+ if (VLAN_ALLOWED(adapter)) {
+ struct virtchnl_vlan_filter_list *vvfl;
+
+ adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
+
+ len = sizeof(*vvfl) + (count * sizeof(u16));
+ if (len > IAVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
+ count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) /
+ sizeof(u16);
+ len = sizeof(*vvfl) + (count * sizeof(u16));
+ more = true;
+ }
+ vvfl = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ return;
+ }
+
+ vvfl->vsi_id = adapter->vsi_res->vsi_id;
+ vvfl->num_elements = count;
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->add) {
+ vvfl->vlan_id[i] = f->vlan.vid;
+ i++;
+ f->add = false;
+ if (i == count)
+ break;
+ }
+ }
+ if (!more)
+ adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+
spin_unlock_bh(&adapter->mac_vlan_list_lock);
- return;
- }
- vvfl->vsi_id = adapter->vsi_res->vsi_id;
- vvfl->num_elements = count;
- list_for_each_entry(f, &adapter->vlan_filter_list, list) {
- if (f->add) {
- vvfl->vlan_id[i] = f->vlan;
- i++;
- f->add = false;
- if (i == count)
- break;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
+ kfree(vvfl);
+ } else {
+ struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
+
+ adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2;
+
+ len = sizeof(*vvfl_v2) + ((count - 1) *
+ sizeof(struct virtchnl_vlan_filter));
+ if (len > IAVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
+ count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl_v2)) /
+ sizeof(struct virtchnl_vlan_filter);
+ len = sizeof(*vvfl_v2) +
+ ((count - 1) *
+ sizeof(struct virtchnl_vlan_filter));
+ more = true;
}
- }
- if (!more)
- adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
- spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ vvfl_v2 = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl_v2) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ return;
+ }
- iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
- kfree(vvfl);
+ vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
+ vvfl_v2->num_elements = count;
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->add) {
+ struct virtchnl_vlan_supported_caps *filtering_support =
+ &adapter->vlan_v2_caps.filtering.filtering_support;
+ struct virtchnl_vlan *vlan;
+
+ /* give priority over outer if it's enabled */
+ if (filtering_support->outer)
+ vlan = &vvfl_v2->filters[i].outer;
+ else
+ vlan = &vvfl_v2->filters[i].inner;
+
+ vlan->tci = f->vlan.vid;
+ vlan->tpid = f->vlan.tpid;
+
+ i++;
+ f->add = false;
+ if (i == count)
+ break;
+ }
+ }
+
+ if (!more)
+ adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN_V2,
+ (u8 *)vvfl_v2, len);
+ kfree(vvfl_v2);
+ }
}
/**
@@ -659,7 +768,6 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
**/
void iavf_del_vlans(struct iavf_adapter *adapter)
{
- struct virtchnl_vlan_filter_list *vvfl;
struct iavf_vlan_filter *f, *ftmp;
int len, i = 0, count = 0;
bool more = false;
@@ -680,56 +788,116 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
* filters marked for removal to enable bailing out before
* sending a virtchnl message
*/
- if (f->remove && !VLAN_ALLOWED(adapter)) {
+ if (f->remove && !VLAN_FILTERING_ALLOWED(adapter)) {
list_del(&f->list);
kfree(f);
} else if (f->remove) {
count++;
}
}
- if (!count) {
+ if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
- adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
- len = sizeof(struct virtchnl_vlan_filter_list) +
- (count * sizeof(u16));
- if (len > IAVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
- count = (IAVF_MAX_AQ_BUF_SIZE -
- sizeof(struct virtchnl_vlan_filter_list)) /
- sizeof(u16);
- len = sizeof(struct virtchnl_vlan_filter_list) +
- (count * sizeof(u16));
- more = true;
- }
- vvfl = kzalloc(len, GFP_ATOMIC);
- if (!vvfl) {
+ if (VLAN_ALLOWED(adapter)) {
+ struct virtchnl_vlan_filter_list *vvfl;
+
+ adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
+
+ len = sizeof(*vvfl) + (count * sizeof(u16));
+ if (len > IAVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
+ count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) /
+ sizeof(u16);
+ len = sizeof(*vvfl) + (count * sizeof(u16));
+ more = true;
+ }
+ vvfl = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ return;
+ }
+
+ vvfl->vsi_id = adapter->vsi_res->vsi_id;
+ vvfl->num_elements = count;
+ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+ if (f->remove) {
+ vvfl->vlan_id[i] = f->vlan.vid;
+ i++;
+ list_del(&f->list);
+ kfree(f);
+ if (i == count)
+ break;
+ }
+ }
+
+ if (!more)
+ adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
+
spin_unlock_bh(&adapter->mac_vlan_list_lock);
- return;
- }
- vvfl->vsi_id = adapter->vsi_res->vsi_id;
- vvfl->num_elements = count;
- list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
- if (f->remove) {
- vvfl->vlan_id[i] = f->vlan;
- i++;
- list_del(&f->list);
- kfree(f);
- if (i == count)
- break;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
+ kfree(vvfl);
+ } else {
+ struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
+
+ adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2;
+
+ len = sizeof(*vvfl_v2) +
+ ((count - 1) * sizeof(struct virtchnl_vlan_filter));
+ if (len > IAVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
+ count = (IAVF_MAX_AQ_BUF_SIZE -
+ sizeof(*vvfl_v2)) /
+ sizeof(struct virtchnl_vlan_filter);
+ len = sizeof(*vvfl_v2) +
+ ((count - 1) *
+ sizeof(struct virtchnl_vlan_filter));
+ more = true;
}
- }
- if (!more)
- adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
- spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ vvfl_v2 = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl_v2) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ return;
+ }
- iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
- kfree(vvfl);
+ vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
+ vvfl_v2->num_elements = count;
+ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+ if (f->remove) {
+ struct virtchnl_vlan_supported_caps *filtering_support =
+ &adapter->vlan_v2_caps.filtering.filtering_support;
+ struct virtchnl_vlan *vlan;
+
+ /* give priority over outer if it's enabled */
+ if (filtering_support->outer)
+ vlan = &vvfl_v2->filters[i].outer;
+ else
+ vlan = &vvfl_v2->filters[i].inner;
+
+ vlan->tci = f->vlan.vid;
+ vlan->tpid = f->vlan.tpid;
+
+ list_del(&f->list);
+ kfree(f);
+ i++;
+ if (i == count)
+ break;
+ }
+ }
+
+ if (!more)
+ adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN_V2,
+ (u8 *)vvfl_v2, len);
+ kfree(vvfl_v2);
+ }
}
/**
@@ -762,15 +930,23 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
if (flags & FLAG_VF_MULTICAST_PROMISC) {
adapter->flags |= IAVF_FLAG_ALLMULTI_ON;
adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI;
- dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
+ dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n",
+ adapter->netdev->name);
}
if (!flags) {
- adapter->flags &= ~(IAVF_FLAG_PROMISC_ON |
- IAVF_FLAG_ALLMULTI_ON);
- adapter->aq_required &= ~(IAVF_FLAG_AQ_RELEASE_PROMISC |
- IAVF_FLAG_AQ_RELEASE_ALLMULTI);
- dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
+ if (adapter->flags & IAVF_FLAG_PROMISC_ON) {
+ adapter->flags &= ~IAVF_FLAG_PROMISC_ON;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC;
+ dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
+ }
+
+ if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) {
+ adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI;
+ dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n",
+ adapter->netdev->name);
+ }
}
adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
@@ -948,6 +1124,204 @@ void iavf_disable_vlan_stripping(struct iavf_adapter *adapter)
iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0);
}
+/**
+ * iavf_tpid_to_vc_ethertype - transform from VLAN TPID to virtchnl ethertype
+ * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.)
+ */
+static u32 iavf_tpid_to_vc_ethertype(u16 tpid)
+{
+ switch (tpid) {
+ case ETH_P_8021Q:
+ return VIRTCHNL_VLAN_ETHERTYPE_8100;
+ case ETH_P_8021AD:
+ return VIRTCHNL_VLAN_ETHERTYPE_88A8;
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_set_vc_offload_ethertype - set virtchnl ethertype for offload message
+ * @adapter: adapter structure
+ * @msg: message structure used for updating offloads over virtchnl to update
+ * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.)
+ * @offload_op: opcode used to determine which support structure to check
+ */
+static int
+iavf_set_vc_offload_ethertype(struct iavf_adapter *adapter,
+ struct virtchnl_vlan_setting *msg, u16 tpid,
+ enum virtchnl_ops offload_op)
+{
+ struct virtchnl_vlan_supported_caps *offload_support;
+ u16 vc_ethertype = iavf_tpid_to_vc_ethertype(tpid);
+
+ /* reference the correct offload support structure */
+ switch (offload_op) {
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
+ offload_support =
+ &adapter->vlan_v2_caps.offloads.stripping_support;
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
+ case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
+ offload_support =
+ &adapter->vlan_v2_caps.offloads.insertion_support;
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "Invalid opcode %d for setting virtchnl ethertype to enable/disable VLAN offloads\n",
+ offload_op);
+ return -EINVAL;
+ }
+
+ /* make sure ethertype is supported */
+ if (offload_support->outer & vc_ethertype &&
+ offload_support->outer & VIRTCHNL_VLAN_TOGGLE) {
+ msg->outer_ethertype_setting = vc_ethertype;
+ } else if (offload_support->inner & vc_ethertype &&
+ offload_support->inner & VIRTCHNL_VLAN_TOGGLE) {
+ msg->inner_ethertype_setting = vc_ethertype;
+ } else {
+ dev_dbg(&adapter->pdev->dev, "opcode %d unsupported for VLAN TPID 0x%04x\n",
+ offload_op, tpid);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_clear_offload_v2_aq_required - clear AQ required bit for offload request
+ * @adapter: adapter structure
+ * @tpid: VLAN TPID
+ * @offload_op: opcode used to determine which AQ required bit to clear
+ */
+static void
+iavf_clear_offload_v2_aq_required(struct iavf_adapter *adapter, u16 tpid,
+ enum virtchnl_ops offload_op)
+{
+ switch (offload_op) {
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
+ if (tpid == ETH_P_8021Q)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
+ else if (tpid == ETH_P_8021AD)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
+ if (tpid == ETH_P_8021Q)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
+ else if (tpid == ETH_P_8021AD)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
+ if (tpid == ETH_P_8021Q)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
+ else if (tpid == ETH_P_8021AD)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
+ if (tpid == ETH_P_8021Q)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
+ else if (tpid == ETH_P_8021AD)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "Unsupported opcode %d specified for clearing aq_required bits for VIRTCHNL_VF_OFFLOAD_VLAN_V2 offload request\n",
+ offload_op);
+ }
+}
+
+/**
+ * iavf_send_vlan_offload_v2 - send offload enable/disable over virtchnl
+ * @adapter: adapter structure
+ * @tpid: VLAN TPID used for the command (i.e. 0x8100 or 0x88a8)
+ * @offload_op: offload_op used to make the request over virtchnl
+ */
+static void
+iavf_send_vlan_offload_v2(struct iavf_adapter *adapter, u16 tpid,
+ enum virtchnl_ops offload_op)
+{
+ struct virtchnl_vlan_setting *msg;
+ int len = sizeof(*msg);
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot send %d, command %d pending\n",
+ offload_op, adapter->current_op);
+ return;
+ }
+
+ adapter->current_op = offload_op;
+
+ msg = kzalloc(len, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ msg->vport_id = adapter->vsi_res->vsi_id;
+
+ /* always clear to prevent unsupported and endless requests */
+ iavf_clear_offload_v2_aq_required(adapter, tpid, offload_op);
+
+ /* only send valid offload requests */
+ if (!iavf_set_vc_offload_ethertype(adapter, msg, tpid, offload_op))
+ iavf_send_pf_msg(adapter, offload_op, (u8 *)msg, len);
+ else
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+
+ kfree(msg);
+}
+
+/**
+ * iavf_enable_vlan_stripping_v2 - enable VLAN stripping
+ * @adapter: adapter structure
+ * @tpid: VLAN TPID used to enable VLAN stripping
+ */
+void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid)
+{
+ iavf_send_vlan_offload_v2(adapter, tpid,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2);
+}
+
+/**
+ * iavf_disable_vlan_stripping_v2 - disable VLAN stripping
+ * @adapter: adapter structure
+ * @tpid: VLAN TPID used to disable VLAN stripping
+ */
+void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid)
+{
+ iavf_send_vlan_offload_v2(adapter, tpid,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2);
+}
+
+/**
+ * iavf_enable_vlan_insertion_v2 - enable VLAN insertion
+ * @adapter: adapter structure
+ * @tpid: VLAN TPID used to enable VLAN insertion
+ */
+void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
+{
+ iavf_send_vlan_offload_v2(adapter, tpid,
+ VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2);
+}
+
+/**
+ * iavf_disable_vlan_insertion_v2 - disable VLAN insertion
+ * @adapter: adapter structure
+ * @tpid: VLAN TPID used to disable VLAN insertion
+ */
+void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
+{
+ iavf_send_vlan_offload_v2(adapter, tpid,
+ VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2);
+}
+
#define IAVF_MAX_SPEED_STRLEN 13
/**
@@ -1017,7 +1391,7 @@ print_link_msg:
} else if (link_speed_mbps == SPEED_UNKNOWN) {
snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps");
} else {
- snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%u %s",
+ snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s",
link_speed_mbps, "Mbps");
}
@@ -1522,7 +1896,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
iavf_print_link_message(adapter);
break;
case VIRTCHNL_EVENT_RESET_IMPENDING:
- dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n");
+ dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
adapter->flags |= IAVF_FLAG_RESET_PENDING;
dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
@@ -1751,6 +2125,26 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+ iavf_parse_vf_resource_msg(adapter);
+
+ /* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the
+ * response to VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS to finish
+ * configuration
+ */
+ if (VLAN_V2_ALLOWED(adapter))
+ break;
+ /* fallthrough and finish config if VIRTCHNL_VF_OFFLOAD_VLAN_V2
+ * wasn't successfully negotiated with the PF
+ */
+ }
+ fallthrough;
+ case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: {
+ if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
+ memcpy(&adapter->vlan_v2_caps, msg,
+ min_t(u16, msglen,
+ sizeof(adapter->vlan_v2_caps)));
+
iavf_process_config(adapter);
/* unlock crit_lock before acquiring rtnl_lock as other
@@ -1758,13 +2152,23 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
* crit_lock
*/
mutex_unlock(&adapter->crit_lock);
+ /* VLAN capabilities can change during VFR, so make sure to
+ * update the netdev features with the new capabilities
+ */
rtnl_lock();
- netdev_update_features(adapter->netdev);
+ netdev_update_features(netdev);
rtnl_unlock();
if (iavf_lock_timeout(&adapter->crit_lock, 10000))
dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n",
__FUNCTION__);
+ /* Request VLAN offload settings */
+ if (VLAN_V2_ALLOWED(adapter))
+ iavf_set_vlan_offload_features(adapter, 0,
+ netdev->features);
+
+ iavf_set_queue_vlan_tag_loc(adapter);
+
}
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index b2db39ee5f85..4e16d185077d 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -112,7 +112,6 @@
#define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */
#define ICE_CHNL_START_TC 1
-#define ICE_CHNL_MAX_TC 16
#define ICE_MAX_RESET_WAIT 20
@@ -201,6 +200,7 @@ struct ice_channel {
struct ice_aqc_vsi_props info;
u64 max_tx_rate;
u64 min_tx_rate;
+ atomic_t num_sb_fltr;
struct ice_vsi *ch_vsi;
};
@@ -503,6 +503,7 @@ struct ice_pf {
struct pci_dev *pdev;
struct devlink_region *nvm_region;
+ struct devlink_region *sram_region;
struct devlink_region *devcaps_region;
/* devlink port data */
@@ -552,6 +553,7 @@ struct ice_pf {
spinlock_t aq_wait_lock;
struct hlist_head aq_wait_list;
wait_queue_head_t aq_wait_queue;
+ bool fw_emp_reset_disabled;
wait_queue_head_t reset_wait_queue;
@@ -576,6 +578,7 @@ struct ice_pf {
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
u8 stat_prev_loaded:1; /* has previous stats been loaded */
+ u8 rdma_mode;
u16 dcbx_cap;
u32 tx_timeout_count;
unsigned long tx_timeout_last_recovery;
@@ -789,6 +792,9 @@ static inline void ice_clear_sriov_cap(struct ice_pf *pf)
#define ICE_FD_STAT_PF_IDX(base_idx) \
((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT)
#define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx)
+#define ICE_FD_STAT_CH 1
+#define ICE_FD_CH_STAT_IDX(base_idx) \
+ (ICE_FD_STAT_PF_IDX(base_idx) + ICE_FD_STAT_CH)
/**
* ice_is_adq_active - any active ADQs
@@ -847,9 +853,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
int ice_plug_aux_dev(struct ice_pf *pf);
void ice_unplug_aux_dev(struct ice_pf *pf);
int ice_init_rdma(struct ice_pf *pf);
-const char *ice_stat_str(enum ice_status stat_err);
const char *ice_aq_str(enum ice_aq_err aq_err);
bool ice_is_wol_supported(struct ice_hw *hw);
+void ice_fdir_del_all_fltrs(struct ice_vsi *vsi);
int
ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
bool is_tun);
@@ -860,6 +866,7 @@ int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd);
int
ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,
u32 *rule_locs);
+void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx);
void ice_fdir_release_flows(struct ice_hw *hw);
void ice_fdir_replay_flows(struct ice_hw *hw);
void ice_fdir_replay_fltrs(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 4eef3488d86f..ad1dcfa5ff65 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -117,6 +117,8 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_NET_VER 0x004C
#define ICE_AQC_CAPS_PENDING_NET_VER 0x004D
#define ICE_AQC_CAPS_RDMA 0x0051
+#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
+#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
#define ICE_AQC_CAPS_NVM_MGMT 0x0080
u8 major_ver;
@@ -1408,6 +1410,11 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */
#define ICE_AQC_NVM_ACTIV_SEL_MASK ICE_M(0x7, 3)
#define ICE_AQC_NVM_FLASH_ONLY BIT(7)
+#define ICE_AQC_NVM_RESET_LVL_M ICE_M(0x3, 0) /* Write reply only */
+#define ICE_AQC_NVM_POR_FLAG 0
+#define ICE_AQC_NVM_PERST_FLAG 1
+#define ICE_AQC_NVM_EMPR_FLAG 2
+#define ICE_AQC_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
__le16 module_typeid;
__le16 length;
#define ICE_AQC_NVM_ERASE_LEN 0xFFFF
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index fafe020e46ee..1a5ece3bce79 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -776,7 +776,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_channel *ch = ring->ch;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
+ int status;
u16 pf_q;
u8 tc;
@@ -821,9 +821,9 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
ring->q_handle, 1, qg_buf, buf_len,
NULL);
if (status) {
- dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n",
- ice_stat_str(status));
- return -ENODEV;
+ dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
+ status);
+ return status;
}
/* Add Tx Queue TEID into the VSI Tx ring from the
@@ -946,7 +946,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
struct ice_pf *pf = vsi->back;
struct ice_q_vector *q_vector;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
+ int status;
u32 val;
/* clear cause_ena bit for disabled queues */
@@ -970,18 +970,18 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
rel_vmvf_num, NULL);
/* if the disable queue command was exercised during an
- * active reset flow, ICE_ERR_RESET_ONGOING is returned.
+ * active reset flow, -EBUSY is returned.
* This is not an error as the reset operation disables
* queues at the hardware level anyway.
*/
- if (status == ICE_ERR_RESET_ONGOING) {
+ if (status == -EBUSY) {
dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
- } else if (status == ICE_ERR_DOES_NOT_EXIST) {
+ } else if (status == -ENOENT) {
dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
} else if (status) {
- dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %s\n",
- ice_stat_str(status));
- return -ENODEV;
+ dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
+ status);
+ return status;
}
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
new file mode 100644
index 000000000000..57abd52386d0
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#ifndef _ICE_CGU_REGS_H_
+#define _ICE_CGU_REGS_H_
+
+#define NAC_CGU_DWORD9 0x24
+union nac_cgu_dword9 {
+ struct {
+ u32 time_ref_freq_sel : 3;
+ u32 clk_eref1_en : 1;
+ u32 clk_eref0_en : 1;
+ u32 time_ref_en : 1;
+ u32 time_sync_en : 1;
+ u32 one_pps_out_en : 1;
+ u32 clk_ref_synce_en : 1;
+ u32 clk_synce1_en : 1;
+ u32 clk_synce0_en : 1;
+ u32 net_clk_ref1_en : 1;
+ u32 net_clk_ref0_en : 1;
+ u32 clk_synce1_amp : 2;
+ u32 misc6 : 1;
+ u32 clk_synce0_amp : 2;
+ u32 one_pps_out_amp : 2;
+ u32 misc24 : 12;
+ } field;
+ u32 val;
+};
+
+#define NAC_CGU_DWORD19 0x4c
+union nac_cgu_dword19 {
+ struct {
+ u32 tspll_fbdiv_intgr : 8;
+ u32 fdpll_ulck_thr : 5;
+ u32 misc15 : 3;
+ u32 tspll_ndivratio : 4;
+ u32 tspll_iref_ndivratio : 3;
+ u32 misc19 : 1;
+ u32 japll_ndivratio : 4;
+ u32 japll_iref_ndivratio : 3;
+ u32 misc27 : 1;
+ } field;
+ u32 val;
+};
+
+#define NAC_CGU_DWORD22 0x58
+union nac_cgu_dword22 {
+ struct {
+ u32 fdpll_frac_div_out_nc : 2;
+ u32 fdpll_lock_int_for : 1;
+ u32 synce_hdov_int_for : 1;
+ u32 synce_lock_int_for : 1;
+ u32 fdpll_phlead_slip_nc : 1;
+ u32 fdpll_acc1_ovfl_nc : 1;
+ u32 fdpll_acc2_ovfl_nc : 1;
+ u32 synce_status_nc : 6;
+ u32 fdpll_acc1f_ovfl : 1;
+ u32 misc18 : 1;
+ u32 fdpllclk_div : 4;
+ u32 time1588clk_div : 4;
+ u32 synceclk_div : 4;
+ u32 synceclk_sel_div2 : 1;
+ u32 fdpllclk_sel_div2 : 1;
+ u32 time1588clk_sel_div2 : 1;
+ u32 misc3 : 1;
+ } field;
+ u32 val;
+};
+
+#define NAC_CGU_DWORD24 0x60
+union nac_cgu_dword24 {
+ struct {
+ u32 tspll_fbdiv_frac : 22;
+ u32 misc20 : 2;
+ u32 ts_pll_enable : 1;
+ u32 time_sync_tspll_align_sel : 1;
+ u32 ext_synce_sel : 1;
+ u32 ref1588_ck_div : 4;
+ u32 time_ref_sel : 1;
+ } field;
+ u32 val;
+};
+
+#define TSPLL_CNTR_BIST_SETTINGS 0x344
+union tspll_cntr_bist_settings {
+ struct {
+ u32 i_irefgen_settling_time_cntr_7_0 : 8;
+ u32 i_irefgen_settling_time_ro_standby_1_0 : 2;
+ u32 reserved195 : 5;
+ u32 i_plllock_sel_0 : 1;
+ u32 i_plllock_sel_1 : 1;
+ u32 i_plllock_cnt_6_0 : 7;
+ u32 i_plllock_cnt_10_7 : 4;
+ u32 reserved200 : 4;
+ } field;
+ u32 val;
+};
+
+#define TSPLL_RO_BWM_LF 0x370
+union tspll_ro_bwm_lf {
+ struct {
+ u32 bw_freqov_high_cri_7_0 : 8;
+ u32 bw_freqov_high_cri_9_8 : 2;
+ u32 biascaldone_cri : 1;
+ u32 plllock_gain_tran_cri : 1;
+ u32 plllock_true_lock_cri : 1;
+ u32 pllunlock_flag_cri : 1;
+ u32 afcerr_cri : 1;
+ u32 afcdone_cri : 1;
+ u32 feedfwrdgain_cal_cri_7_0 : 8;
+ u32 m2fbdivmod_cri_7_0 : 8;
+ } field;
+ u32 val;
+};
+
+#endif /* _ICE_CGU_REGS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index b3066d0fea8b..408d15a5b0e3 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2018, Intel Corporation. */
#include "ice_common.h"
-#include "ice_lib.h"
#include "ice_sched.h"
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
@@ -16,10 +15,10 @@
* This function sets the MAC type of the adapter based on the
* vendor ID and device ID stored in the HW structure.
*/
-static enum ice_status ice_set_mac_type(struct ice_hw *hw)
+static int ice_set_mac_type(struct ice_hw *hw)
{
if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
- return ICE_ERR_DEVICE_NOT_SUPPORTED;
+ return -ENODEV;
switch (hw->device_id) {
case ICE_DEV_ID_E810C_BACKPLANE:
@@ -99,7 +98,7 @@ bool ice_is_e810t(struct ice_hw *hw)
* Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
* configuration, flow director filters, etc.).
*/
-enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
+int ice_clear_pf_cfg(struct ice_hw *hw)
{
struct ice_aq_desc desc;
@@ -123,21 +122,21 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
* ice_discover_dev_caps is expected to be called before this function is
* called.
*/
-static enum ice_status
+static int
ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_manage_mac_read_resp *resp;
struct ice_aqc_manage_mac_read *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
u16 flags;
u8 i;
cmd = &desc.params.mac_read;
if (buf_size < sizeof(*resp))
- return ICE_ERR_BUF_TOO_SHORT;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
@@ -150,7 +149,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
- return ICE_ERR_CFG;
+ return -EIO;
}
/* A single port can report up to two (LAN and WoL) addresses */
@@ -176,7 +175,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
*
* Returns the various PHY capabilities supported on the Port (0x0600)
*/
-enum ice_status
+int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *pcaps,
struct ice_sq_cd *cd)
@@ -184,18 +183,18 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps *cmd;
u16 pcaps_size = sizeof(*pcaps);
struct ice_aq_desc desc;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
cmd = &desc.params.get_phy;
if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
!ice_fw_supports_report_dflt_cfg(hw))
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
@@ -252,7 +251,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
* returns error (ENOENT), then no cage present. If no cage present, then
* connection type is backplane or BASE-T.
*/
-static enum ice_status
+static int
ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
struct ice_sq_cd *cd)
{
@@ -418,7 +417,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
*
* Get Link Status (0x607). Returns the link status of the adapter.
*/
-enum ice_status
+int
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd)
{
@@ -429,12 +428,12 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_fc_info *hw_fc_info;
bool tx_pause, rx_pause;
struct ice_aq_desc desc;
- enum ice_status status;
struct ice_hw *hw;
u16 cmd_flags;
+ int status;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
li_old = &pi->phy.link_info_old;
hw_media_type = &pi->phy.media_type;
@@ -556,7 +555,7 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
*
* Set MAC configuration (0x0603)
*/
-enum ice_status
+int
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
{
struct ice_aqc_set_mac_cfg *cmd;
@@ -565,7 +564,7 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
cmd = &desc.params.set_mac_cfg;
if (max_frame_size == 0)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
@@ -580,17 +579,17 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
* ice_init_fltr_mgmt_struct - initializes filter management list and locks
* @hw: pointer to the HW struct
*/
-static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
+static int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
{
struct ice_switch_info *sw;
- enum ice_status status;
+ int status;
hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*hw->switch_info), GFP_KERNEL);
sw = hw->switch_info;
if (!sw)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
INIT_LIST_HEAD(&sw->vsi_list_map_head);
sw->prof_res_bm_init = 0;
@@ -666,17 +665,17 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
* ice_get_fw_log_cfg - get FW logging configuration
* @hw: pointer to the HW struct
*/
-static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
+static int ice_get_fw_log_cfg(struct ice_hw *hw)
{
struct ice_aq_desc desc;
- enum ice_status status;
__le16 *config;
+ int status;
u16 size;
size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
if (!config)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
@@ -738,15 +737,15 @@ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
* messages from FW to SW. Interrupts are typically disabled during the device's
* initialization phase.
*/
-static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
+static int ice_cfg_fw_log(struct ice_hw *hw, bool enable)
{
struct ice_aqc_fw_logging *cmd;
- enum ice_status status = 0;
u16 i, chgs = 0, len = 0;
struct ice_aq_desc desc;
__le16 *data = NULL;
u8 actv_evnts = 0;
void *buf = NULL;
+ int status = 0;
if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
return 0;
@@ -790,7 +789,7 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
sizeof(*data),
GFP_KERNEL);
if (!data)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
val = i << ICE_AQC_FW_LOG_ID_S;
@@ -904,12 +903,12 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_init_hw(struct ice_hw *hw)
+int ice_init_hw(struct ice_hw *hw)
{
struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status;
u16 mac_buf_len;
void *mac_buf;
+ int status;
/* Set MAC type based on DeviceID */
status = ice_set_mac_type(hw);
@@ -956,7 +955,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*hw->port_info), GFP_KERNEL);
if (!hw->port_info) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll_cqinit;
}
@@ -985,7 +984,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
if (!pcaps) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll_sched;
}
@@ -1006,7 +1005,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
/* need a valid SW entry point to build a Tx tree */
if (!hw->sw_entry_point_layer) {
ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
- status = ICE_ERR_CFG;
+ status = -EIO;
goto err_unroll_sched;
}
INIT_LIST_HEAD(&hw->agg_list);
@@ -1026,7 +1025,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
if (!mac_buf) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll_fltr_mgmt_struct;
}
@@ -1096,7 +1095,7 @@ void ice_deinit_hw(struct ice_hw *hw)
* ice_check_reset - Check to see if a global reset is complete
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_check_reset(struct ice_hw *hw)
+int ice_check_reset(struct ice_hw *hw)
{
u32 cnt, reg = 0, grst_timeout, uld_mask;
@@ -1116,7 +1115,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
if (cnt == grst_timeout) {
ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
- return ICE_ERR_RESET_FAILED;
+ return -EIO;
}
#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
@@ -1143,7 +1142,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
if (cnt == ICE_PF_RESET_WAIT_COUNT) {
ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
reg);
- return ICE_ERR_RESET_FAILED;
+ return -EIO;
}
return 0;
@@ -1156,7 +1155,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
* If a global reset has been triggered, this function checks
* for its completion and then issues the PF reset
*/
-static enum ice_status ice_pf_reset(struct ice_hw *hw)
+static int ice_pf_reset(struct ice_hw *hw)
{
u32 cnt, reg;
@@ -1169,7 +1168,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
(rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
/* poll on global reset currently in progress until done */
if (ice_check_reset(hw))
- return ICE_ERR_RESET_FAILED;
+ return -EIO;
return 0;
}
@@ -1194,7 +1193,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
if (cnt == ICE_PF_RESET_WAIT_COUNT) {
ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
- return ICE_ERR_RESET_FAILED;
+ return -EIO;
}
return 0;
@@ -1212,7 +1211,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
* This has to be cleared using ice_clear_pxe_mode again, once the AQ
* interface has been restored in the rebuild flow.
*/
-enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
+int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
{
u32 val = 0;
@@ -1228,7 +1227,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
val = GLGEN_RTRIG_GLOBR_M;
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
val |= rd32(hw, GLGEN_RTRIG);
@@ -1247,16 +1246,16 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
*
* Copies rxq context from dense structure to HW register space
*/
-static enum ice_status
+static int
ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
{
u8 i;
if (!ice_rxq_ctx)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
if (rxq_index > QRX_CTRL_MAX_INDEX)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Copy each dword separately to HW */
for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
@@ -1306,14 +1305,14 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* it to HW register space and enables the hardware to prefetch descriptors
* instead of only fetching them on demand
*/
-enum ice_status
+int
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
{
u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
if (!rlan_ctx)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
rlan_ctx->prefena = 1;
@@ -1369,9 +1368,8 @@ static int
ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd)
{
- return ice_status_to_errno(ice_sq_send_cmd(hw, ice_get_sbq(hw),
- (struct ice_aq_desc *)desc,
- buf, buf_size, cd));
+ return ice_sq_send_cmd(hw, ice_get_sbq(hw),
+ (struct ice_aq_desc *)desc, buf, buf_size, cd);
}
/**
@@ -1453,17 +1451,17 @@ static bool ice_should_retry_sq_send_cmd(u16 opcode)
* Retry sending the FW Admin Queue command, multiple times, to the FW Admin
* Queue if the EBUSY AQ error is returned.
*/
-static enum ice_status
+static int
ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aq_desc desc_cpy;
- enum ice_status status;
bool is_cmd_for_retry;
u8 *buf_cpy = NULL;
u8 idx = 0;
u16 opcode;
+ int status;
opcode = le16_to_cpu(desc->opcode);
is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
@@ -1473,7 +1471,7 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (buf) {
buf_cpy = kzalloc(buf_size, GFP_KERNEL);
if (!buf_cpy)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
memcpy(&desc_cpy, desc, sizeof(desc_cpy));
@@ -1510,13 +1508,13 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
*
* Helper function to send FW Admin Queue commands to the FW Admin Queue.
*/
-enum ice_status
+int
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
u16 buf_size, struct ice_sq_cd *cd)
{
struct ice_aqc_req_res *cmd = &desc->params.res_owner;
bool lock_acquired = false;
- enum ice_status status;
+ int status;
/* When a package download is in process (i.e. when the firmware's
* Global Configuration Lock resource is held), only the Download
@@ -1555,11 +1553,11 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
*
* Get the firmware version (0x0001) from the admin queue commands
*/
-enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
+int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
{
struct ice_aqc_get_ver *resp;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
resp = &desc.params.get_ver;
@@ -1590,7 +1588,7 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
*
* Send the driver version (0x0002) to the firmware
*/
-enum ice_status
+int
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd)
{
@@ -1601,7 +1599,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
cmd = &desc.params.driver_ver;
if (!dv)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
@@ -1627,7 +1625,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
* Tell the Firmware that we're shutting down the AdminQ and whether
* or not the driver is unloading as well (0x0003).
*/
-enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
+int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
{
struct ice_aqc_q_shutdown *cmd;
struct ice_aq_desc desc;
@@ -1654,12 +1652,12 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
* Requests common resource using the admin queue commands (0x0008).
* When attempting to acquire the Global Config Lock, the driver can
* learn of three states:
- * 1) ICE_SUCCESS - acquired lock, and can perform download package
- * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
- * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
- * successfully downloaded the package; the driver does
- * not have to download the package and can continue
- * loading
+ * 1) 0 - acquired lock, and can perform download package
+ * 2) -EIO - did not get lock, driver should fail to load
+ * 3) -EALREADY - did not get lock, but another driver has
+ * successfully downloaded the package; the driver does
+ * not have to download the package and can continue
+ * loading
*
* Note that if the caller is in an acquire lock, perform action, release lock
* phase of operation, it is possible that the FW may detect a timeout and issue
@@ -1668,14 +1666,14 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
* will likely get an error propagated back to it indicating the Download
* Package, Update Package or the Release Resource AQ commands timed out.
*/
-static enum ice_status
+static int
ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
struct ice_sq_cd *cd)
{
struct ice_aqc_req_res *cmd_resp;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd_resp = &desc.params.res_owner;
@@ -1707,15 +1705,15 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
} else if (le16_to_cpu(cmd_resp->status) ==
ICE_AQ_RES_GLBL_IN_PROG) {
*timeout = le32_to_cpu(cmd_resp->timeout);
- return ICE_ERR_AQ_ERROR;
+ return -EIO;
} else if (le16_to_cpu(cmd_resp->status) ==
ICE_AQ_RES_GLBL_DONE) {
- return ICE_ERR_AQ_NO_WORK;
+ return -EALREADY;
}
/* invalid FW response, force a timeout immediately */
*timeout = 0;
- return ICE_ERR_AQ_ERROR;
+ return -EIO;
}
/* If the resource is held by some other driver, the command completes
@@ -1737,7 +1735,7 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
*
* release common resource using the admin queue commands (0x0009)
*/
-static enum ice_status
+static int
ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
struct ice_sq_cd *cd)
{
@@ -1763,23 +1761,23 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
*
* This function will attempt to acquire the ownership of a resource.
*/
-enum ice_status
+int
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout)
{
#define ICE_RES_POLLING_DELAY_MS 10
u32 delay = ICE_RES_POLLING_DELAY_MS;
u32 time_left = timeout;
- enum ice_status status;
+ int status;
status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
- /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
+ /* A return code of -EALREADY means that another driver has
* previously acquired the resource and performed any necessary updates;
* in this case the caller does not obtain the resource and has no
* further work to do.
*/
- if (status == ICE_ERR_AQ_NO_WORK)
+ if (status == -EALREADY)
goto ice_acquire_res_exit;
if (status)
@@ -1792,7 +1790,7 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
timeout = (timeout > delay) ? timeout - delay : 0;
status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
- if (status == ICE_ERR_AQ_NO_WORK)
+ if (status == -EALREADY)
/* lock free, but no work to do */
break;
@@ -1800,15 +1798,15 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
/* lock acquired */
break;
}
- if (status && status != ICE_ERR_AQ_NO_WORK)
+ if (status && status != -EALREADY)
ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
ice_acquire_res_exit:
- if (status == ICE_ERR_AQ_NO_WORK) {
+ if (status == -EALREADY) {
if (access == ICE_RES_WRITE)
ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
else
- ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
+ ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n");
}
return status;
}
@@ -1822,16 +1820,15 @@ ice_acquire_res_exit:
*/
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
{
- enum ice_status status;
u32 total_delay = 0;
+ int status;
status = ice_aq_release_res(hw, res, 0, NULL);
/* there are some rare cases when trying to release the resource
* results in an admin queue timeout, so handle them correctly
*/
- while ((status == ICE_ERR_AQ_TIMEOUT) &&
- (total_delay < hw->adminq.sq_cmd_timeout)) {
+ while ((status == -EIO) && (total_delay < hw->adminq.sq_cmd_timeout)) {
mdelay(1);
status = ice_aq_release_res(hw, res, 0, NULL);
total_delay++;
@@ -1849,7 +1846,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
*
* Helper function to allocate/free resources using the admin queue commands
*/
-enum ice_status
+int
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
enum ice_adminq_opc opc, struct ice_sq_cd *cd)
@@ -1860,10 +1857,10 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
cmd = &desc.params.sw_res_ctrl;
if (!buf)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (buf_size < flex_array_size(buf, elem, num_entries))
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
@@ -1882,17 +1879,17 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
* @btm: allocate from bottom
* @res: pointer to array that will receive the resources
*/
-enum ice_status
+int
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
{
struct ice_aqc_alloc_free_res_elem *buf;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = struct_size(buf, elem, num);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Prepare buffer to allocate resource. */
buf->num_elems = cpu_to_le16(num);
@@ -1920,16 +1917,16 @@ ice_alloc_res_exit:
* @num: number of resources
* @res: pointer to array that contains the resources to free
*/
-enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
+int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
{
struct ice_aqc_alloc_free_res_elem *buf;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = struct_size(buf, elem, num);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Prepare buffer to free resource. */
buf->num_elems = cpu_to_le16(num);
@@ -2071,6 +2068,18 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
prefix, caps->max_mtu);
break;
+ case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
+ caps->pcie_reset_avoidance = (number > 0);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: pcie_reset_avoidance = %d\n", prefix,
+ caps->pcie_reset_avoidance);
+ break;
+ case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
+ caps->reset_restrict_support = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: reset_restrict_support = %d\n", prefix,
+ caps->reset_restrict_support);
+ break;
default:
/* Not one of the recognized common capabilities */
found = false;
@@ -2180,6 +2189,18 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
+ if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
+ info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
+ } else {
+ /* Unknown clock frequency, so assume a (probably incorrect)
+ * default to avoid out-of-bounds look ups of frequency
+ * related information.
+ */
+ ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
+ info->clk_freq);
+ info->time_ref = ICE_TIME_REF_FREQ_25_000;
+ }
+
ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
func_p->common_cap.ieee_1588);
ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
@@ -2486,19 +2507,19 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
* buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
* firmware could return) to avoid this.
*/
-enum ice_status
+int
ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aqc_list_caps *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.get_cap;
if (opc != ice_aqc_opc_list_func_caps &&
opc != ice_aqc_opc_list_dev_caps)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
@@ -2517,16 +2538,16 @@ ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
* Read the device capabilities and extract them into the dev_caps structure
* for later use.
*/
-enum ice_status
+int
ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
{
- enum ice_status status;
u32 cap_count = 0;
void *cbuf;
+ int status;
cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!cbuf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Although the driver doesn't know the number of capabilities the
* device will return, we can simply send a 4KB buffer, the maximum
@@ -2551,16 +2572,16 @@ ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
* Read the function capabilities and extract them into the func_caps structure
* for later use.
*/
-static enum ice_status
+static int
ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
{
- enum ice_status status;
u32 cap_count = 0;
void *cbuf;
+ int status;
cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!cbuf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Although the driver doesn't know the number of capabilities the
* device will return, we can simply send a 4KB buffer, the maximum
@@ -2650,9 +2671,9 @@ void ice_set_safe_mode_caps(struct ice_hw *hw)
* ice_get_caps - get info about the HW
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_get_caps(struct ice_hw *hw)
+int ice_get_caps(struct ice_hw *hw)
{
- enum ice_status status;
+ int status;
status = ice_discover_dev_caps(hw, &hw->dev_caps);
if (status)
@@ -2670,7 +2691,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw)
*
* This function is used to write MAC address to the NVM (0x0108).
*/
-enum ice_status
+int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd)
{
@@ -2692,7 +2713,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
*
* Tell the firmware that the driver is taking over from PXE (0x0110).
*/
-static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
+static int ice_aq_clear_pxe_mode(struct ice_hw *hw)
{
struct ice_aq_desc desc;
@@ -2903,15 +2924,15 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
* mode as the PF may not have the privilege to set some of the PHY Config
* parameters. This status will be indicated by the command response (0x0601).
*/
-enum ice_status
+int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!cfg)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Ensure that only valid bits of cfg->caps can be turned on. */
if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
@@ -2952,13 +2973,13 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
* ice_update_link_info - update status of the HW network link
* @pi: port info structure of the interested logical port
*/
-enum ice_status ice_update_link_info(struct ice_port_info *pi)
+int ice_update_link_info(struct ice_port_info *pi)
{
struct ice_link_status *li;
- enum ice_status status;
+ int status;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
li = &pi->phy.link_info;
@@ -2974,7 +2995,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi)
pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
GFP_KERNEL);
if (!pcaps)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
pcaps, NULL);
@@ -3070,7 +3091,7 @@ enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
* @cfg: PHY configuration data to set FC mode
* @req_mode: FC mode to configure
*/
-enum ice_status
+int
ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fc_mode req_mode)
{
@@ -3078,7 +3099,7 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
u8 pause_mask = 0x0;
if (!pi || !cfg)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
switch (req_mode) {
case ICE_FC_FULL:
@@ -3117,23 +3138,23 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
*
* Set the requested flow control mode.
*/
-enum ice_status
+int
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
{
struct ice_aqc_set_phy_cfg_data cfg = { 0 };
struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!pi || !aq_failures)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
*aq_failures = 0;
hw = pi->hw;
pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Get the current PHY config */
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
@@ -3258,22 +3279,22 @@ ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
* @cfg: PHY configuration data to set FEC mode
* @fec: FEC mode to configure
*/
-enum ice_status
+int
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec)
{
struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!pi || !cfg)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
hw = pi->hw;
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_aq_get_phy_caps(pi, false,
(ice_fw_supports_report_dflt_cfg(hw) ?
@@ -3313,7 +3334,7 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
cfg->link_fec_opt |= pcaps->link_fec_options;
break;
default:
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
break;
}
@@ -3344,13 +3365,13 @@ out:
* The variable link_up is invalid if status is non zero. As a
* result of this call, link status reporting becomes enabled
*/
-enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
+int ice_get_link_status(struct ice_port_info *pi, bool *link_up)
{
struct ice_phy_info *phy_info;
- enum ice_status status = 0;
+ int status = 0;
if (!pi || !link_up)
- return ICE_ERR_PARAM;
+ return -EINVAL;
phy_info = &pi->phy;
@@ -3375,7 +3396,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
*
* Sets up the link and restarts the Auto-Negotiation over the link.
*/
-enum ice_status
+int
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd)
{
@@ -3405,7 +3426,7 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
*
* Set event mask (0x0613)
*/
-enum ice_status
+int
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd)
{
@@ -3430,7 +3451,7 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
*
* Enable/disable loopback on a given port
*/
-enum ice_status
+int
ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
{
struct ice_aqc_set_mac_lb *cmd;
@@ -3453,7 +3474,7 @@ ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
*
* Set LED value for the given port (0x06e9)
*/
-enum ice_status
+int
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd)
{
@@ -3488,17 +3509,17 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
*
* Read/Write SFF EEPROM (0x06EE)
*/
-enum ice_status
+int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd)
{
struct ice_aqc_sff_eeprom *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!data || (mem_addr & 0xff00))
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
cmd = &desc.params.read_write_sff_param;
@@ -3527,23 +3548,23 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
*
* Internal function to get (0x0B05) or set (0x0B03) RSS look up table
*/
-static enum ice_status
+static int
__ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
{
u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
struct ice_aqc_get_set_rss_lut *cmd_resp;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
u8 *lut;
if (!params)
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_handle = params->vsi_handle;
lut = params->lut;
if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
- return ICE_ERR_PARAM;
+ return -EINVAL;
lut_size = params->lut_size;
lut_type = params->lut_type;
@@ -3572,7 +3593,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params
ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
break;
default:
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto ice_aq_get_set_rss_lut_exit;
}
@@ -3607,7 +3628,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params
}
fallthrough;
default:
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto ice_aq_get_set_rss_lut_exit;
}
@@ -3626,7 +3647,7 @@ ice_aq_get_set_rss_lut_exit:
*
* get the RSS lookup table, PF or VSI type
*/
-enum ice_status
+int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
{
return __ice_aq_get_set_rss_lut(hw, get_params, false);
@@ -3639,7 +3660,7 @@ ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_
*
* set the RSS lookup table, PF or VSI type
*/
-enum ice_status
+int
ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
{
return __ice_aq_get_set_rss_lut(hw, set_params, true);
@@ -3654,10 +3675,9 @@ ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_
*
* get (0x0B04) or set (0x0B02) the RSS key per VSI
*/
-static enum
-ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
- struct ice_aqc_get_set_rss_keys *key,
- bool set)
+static int
+__ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
+ struct ice_aqc_get_set_rss_keys *key, bool set)
{
struct ice_aqc_get_set_rss_key *cmd_resp;
u16 key_size = sizeof(*key);
@@ -3688,12 +3708,12 @@ ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
*
* get the RSS key per VSI
*/
-enum ice_status
+int
ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *key)
{
if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
- return ICE_ERR_PARAM;
+ return -EINVAL;
return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
key, false);
@@ -3707,12 +3727,12 @@ ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
*
* set the RSS key per VSI
*/
-enum ice_status
+int
ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys)
{
if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
- return ICE_ERR_PARAM;
+ return -EINVAL;
return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
keys, true);
@@ -3739,7 +3759,7 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
* Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
* flow.
*/
-static enum ice_status
+static int
ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
struct ice_sq_cd *cd)
@@ -3754,10 +3774,10 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
if (!qg_list)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
for (i = 0, list = qg_list; i < num_qgrps; i++) {
sum_size += struct_size(list, txqs, list->num_txqs);
@@ -3766,7 +3786,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
}
if (buf_size != sum_size)
- return ICE_ERR_PARAM;
+ return -EINVAL;
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
@@ -3787,7 +3807,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
*
* Disable LAN Tx queue (0x0C31)
*/
-static enum ice_status
+static int
ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
@@ -3796,18 +3816,18 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_dis_txq_item *item;
struct ice_aqc_dis_txqs *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
u16 i, sz = 0;
+ int status;
cmd = &desc.params.dis_txqs;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
/* qg_list can be NULL only in VM/VF reset flow */
if (!qg_list && !rst_src)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
cmd->num_entries = num_qgrps;
@@ -3856,7 +3876,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
}
if (buf_size != sz)
- return ICE_ERR_PARAM;
+ return -EINVAL;
do_aq:
status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
@@ -3914,8 +3934,7 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
cmd->num_qset_grps = num_qset_grps;
- return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, qset_list,
- buf_size, cd));
+ return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
}
/* End of FW Admin Queue command wrappers */
@@ -4111,7 +4130,7 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* @dest_ctx: pointer to memory for the packed structure
* @ce_info: a description of the structure to be transformed
*/
-enum ice_status
+int
ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info)
{
@@ -4141,7 +4160,7 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
break;
default:
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
}
}
@@ -4185,7 +4204,7 @@ ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
*
* This function adds one LAN queue
*/
-enum ice_status
+int
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd)
@@ -4193,19 +4212,19 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
struct ice_aqc_txsched_elem_data node = { 0 };
struct ice_sched_node *parent;
struct ice_q_ctx *q_ctx;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
- return ICE_ERR_CFG;
+ return -EIO;
if (num_qgrps > 1 || buf->num_txqs > 1)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
hw = pi->hw;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&pi->sched_lock);
@@ -4213,7 +4232,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
if (!q_ctx) {
ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
q_handle);
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto ena_txq_exit;
}
@@ -4221,7 +4240,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
ICE_SCHED_NODE_OWNER_LAN);
if (!parent) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto ena_txq_exit;
}
@@ -4290,20 +4309,20 @@ ena_txq_exit:
*
* This function removes queues and their corresponding nodes in SW DB
*/
-enum ice_status
+int
ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
u16 *q_handles, u16 *q_ids, u32 *q_teids,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
- enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_aqc_dis_txq_item *qg_list;
struct ice_q_ctx *q_ctx;
+ int status = -ENOENT;
struct ice_hw *hw;
u16 i, buf_size;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
- return ICE_ERR_CFG;
+ return -EIO;
hw = pi->hw;
@@ -4315,13 +4334,13 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
if (rst_src)
return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
vmvf_num, NULL);
- return ICE_ERR_CFG;
+ return -EIO;
}
buf_size = struct_size(qg_list, q_id, 1);
qg_list = kzalloc(buf_size, GFP_KERNEL);
if (!qg_list)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
mutex_lock(&pi->sched_lock);
@@ -4368,18 +4387,18 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
*
* This function adds/updates the VSI queues per TC.
*/
-static enum ice_status
+static int
ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *maxqs, u8 owner)
{
- enum ice_status status = 0;
+ int status = 0;
u8 i;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
- return ICE_ERR_CFG;
+ return -EIO;
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&pi->sched_lock);
@@ -4407,7 +4426,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
*
* This function adds/updates the VSI LAN queues per TC.
*/
-enum ice_status
+int
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *max_lanqs)
{
@@ -4428,9 +4447,8 @@ int
ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
u16 *max_rdmaqs)
{
- return ice_status_to_errno(ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap,
- max_rdmaqs,
- ICE_SCHED_NODE_OWNER_RDMA));
+ return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,
+ ICE_SCHED_NODE_OWNER_RDMA);
}
/**
@@ -4451,7 +4469,6 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
struct ice_aqc_txsched_elem_data node = { 0 };
struct ice_aqc_add_rdma_qset_data *buf;
struct ice_sched_node *parent;
- enum ice_status status;
struct ice_hw *hw;
u16 i, buf_size;
int ret;
@@ -4502,12 +4519,10 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
for (i = 0; i < num_qsets; i++) {
node.node_teid = buf->rdma_qsets[i].qset_teid;
- status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
- &node);
- if (status) {
- ret = ice_status_to_errno(status);
+ ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
+ &node);
+ if (ret)
break;
- }
qset_teid[i] = le32_to_cpu(node.node_teid);
}
rdma_error_exit:
@@ -4528,8 +4543,8 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id)
{
struct ice_aqc_dis_txq_item *qg_list;
- enum ice_status status = 0;
struct ice_hw *hw;
+ int status = 0;
u16 qg_size;
int i;
@@ -4568,7 +4583,7 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
mutex_unlock(&pi->sched_lock);
kfree(qg_list);
- return ice_status_to_errno(status);
+ return status;
}
/**
@@ -4577,7 +4592,7 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
*
* Initializes required config data for VSI, FD, ACL, and RSS before replay.
*/
-static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
+static int ice_replay_pre_init(struct ice_hw *hw)
{
struct ice_switch_info *sw = hw->switch_info;
u8 i;
@@ -4588,7 +4603,7 @@ static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
* will allow adding rules entries back to filt_rules list,
* which is operational list.
*/
- for (i = 0; i < ICE_SW_LKUP_LAST; i++)
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
list_replace_init(&sw->recp_list[i].filt_rules,
&sw->recp_list[i].filt_replay_rules);
ice_sched_replay_agg_vsi_preinit(hw);
@@ -4604,12 +4619,12 @@ static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
* Restore all VSI configuration after reset. It is required to call this
* function with main VSI first.
*/
-enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
+int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
{
- enum ice_status status;
+ int status;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Replay pre-initialization if there is any */
if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
@@ -4725,12 +4740,12 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
*
* This function queries HW element information
*/
-enum ice_status
+int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf)
{
u16 buf_size, num_elem_ret = 0;
- enum ice_status status;
+ int status;
buf_size = sizeof(*buf);
memset(buf, 0, buf_size);
@@ -4775,7 +4790,7 @@ ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
cmd->param_indx = idx;
cmd->param_val = cpu_to_le32(value);
- return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd));
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/**
@@ -4796,7 +4811,7 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
{
struct ice_aqc_driver_shared_params *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
return -EIO;
@@ -4810,7 +4825,7 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (status)
- return ice_status_to_errno(status);
+ return status;
*value = le32_to_cpu(cmd->param_val);
@@ -4840,7 +4855,7 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
cmd->gpio_num = pin_idx;
cmd->gpio_val = value ? 1 : 0;
- return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd));
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/**
@@ -4860,7 +4875,7 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
{
struct ice_aqc_gpio *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
cmd = &desc.params.read_write_gpio;
@@ -4869,7 +4884,7 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (status)
- return ice_status_to_errno(status);
+ return status;
*value = !!cmd->gpio_val;
return 0;
@@ -4903,13 +4918,13 @@ bool ice_fw_supports_link_override(struct ice_hw *hw)
*
* Gets the link default override for a port
*/
-enum ice_status
+int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi)
{
u16 i, tlv, tlv_len, tlv_start, buf, offset;
struct ice_hw *hw = pi->hw;
- enum ice_status status;
+ int status;
status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
@@ -4994,7 +5009,7 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
*
* Set the LLDP MIB. (0x0A08)
*/
-enum ice_status
+int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
@@ -5004,7 +5019,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
cmd = &desc.params.lldp_set_mib;
if (buf_size == 0 || !buf)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
@@ -5044,7 +5059,7 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
* @vsi_num: absolute HW index for VSI
* @add: boolean for if adding or removing a filter
*/
-enum ice_status
+int
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
{
struct ice_aqc_lldp_filter_ctrl *cmd;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 65c1b3244264..1c57097ddf0b 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -14,108 +14,108 @@
#define ICE_SQ_SEND_DELAY_TIME_MS 10
#define ICE_SQ_SEND_MAX_EXECUTE 3
-enum ice_status ice_init_hw(struct ice_hw *hw);
+int ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
-enum ice_status ice_check_reset(struct ice_hw *hw);
-enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
-enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
-enum ice_status ice_init_all_ctrlq(struct ice_hw *hw);
+int ice_check_reset(struct ice_hw *hw);
+int ice_reset(struct ice_hw *hw, enum ice_reset_req req);
+int ice_create_all_ctrlq(struct ice_hw *hw);
+int ice_init_all_ctrlq(struct ice_hw *hw);
void ice_shutdown_all_ctrlq(struct ice_hw *hw);
void ice_destroy_all_ctrlq(struct ice_hw *hw);
-enum ice_status
+int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending);
-enum ice_status
+int
ice_get_link_status(struct ice_port_info *pi, bool *link_up);
-enum ice_status ice_update_link_info(struct ice_port_info *pi);
-enum ice_status
+int ice_update_link_info(struct ice_port_info *pi);
+int
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
-enum ice_status
+int
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res);
-enum ice_status
+int
ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res);
-enum ice_status
+int
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
bool ice_is_sbq_supported(struct ice_hw *hw);
struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw);
-enum ice_status
+int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
void ice_clear_pxe_mode(struct ice_hw *hw);
-enum ice_status ice_get_caps(struct ice_hw *hw);
+int ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw);
-enum ice_status
+int
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
-enum ice_status
+int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);
-enum ice_status
+int
ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params);
-enum ice_status
+int
ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys);
-enum ice_status
+int
ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys);
bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
-enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
+int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[];
-enum ice_status
+int
ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info);
extern struct mutex ice_global_cfg_lock_sw;
-enum ice_status
+int
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd);
-enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
+int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps);
void
ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
u16 link_speeds_bitmap);
-enum ice_status
+int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
bool ice_is_e810(struct ice_hw *hw);
-enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
-enum ice_status
+int ice_clear_pf_cfg(struct ice_hw *hw);
+int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd);
bool ice_fw_supports_link_override(struct ice_hw *hw);
-enum ice_status
+int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
enum ice_fc_mode ice_caps_to_fc_mode(u8 caps);
enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options);
-enum ice_status
+int
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
bool ena_auto_link_update);
-enum ice_status
+int
ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fc_mode fc);
bool
@@ -125,27 +125,27 @@ void
ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_aqc_set_phy_cfg_data *cfg);
-enum ice_status
+int
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec);
-enum ice_status
+int
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd);
@@ -159,19 +159,19 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
int
ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id);
-enum ice_status
+int
ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
u16 *q_handle, u16 *q_ids, u32 *q_teids,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *max_lanqs);
-enum ice_status
+int
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd);
-enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
+int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
struct ice_q_ctx *
@@ -184,7 +184,7 @@ void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
bool ice_is_e810t(struct ice_hw *hw);
-enum ice_status
+int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
int
@@ -199,11 +199,11 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
int
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
-enum ice_status
+int
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 03bdb125be36..6bcfee295991 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -87,7 +87,7 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
@@ -96,7 +96,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
&cq->sq.desc_buf.pa,
GFP_KERNEL | __GFP_ZERO);
if (!cq->sq.desc_buf.va)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
cq->sq.desc_buf.size = size;
cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
@@ -107,7 +107,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->sq.desc_buf.va = NULL;
cq->sq.desc_buf.pa = 0;
cq->sq.desc_buf.size = 0;
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
return 0;
@@ -118,7 +118,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
@@ -127,7 +127,7 @@ ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
&cq->rq.desc_buf.pa,
GFP_KERNEL | __GFP_ZERO);
if (!cq->rq.desc_buf.va)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
cq->rq.desc_buf.size = size;
return 0;
}
@@ -154,7 +154,7 @@ static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
int i;
@@ -165,7 +165,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
sizeof(cq->rq.desc_buf), GFP_KERNEL);
if (!cq->rq.dma_head)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
/* allocate the mapped buffers */
@@ -218,7 +218,7 @@ unwind_alloc_rq_bufs:
devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
cq->rq.dma_head = NULL;
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
/**
@@ -226,7 +226,7 @@ unwind_alloc_rq_bufs:
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
int i;
@@ -235,7 +235,7 @@ ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
sizeof(cq->sq.desc_buf), GFP_KERNEL);
if (!cq->sq.dma_head)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
/* allocate the mapped buffers */
@@ -266,10 +266,10 @@ unwind_alloc_sq_bufs:
devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
cq->sq.dma_head = NULL;
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
-static enum ice_status
+static int
ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
{
/* Clear Head and Tail */
@@ -283,7 +283,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
/* Check one register to verify that config was applied */
if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
- return ICE_ERR_AQ_ERROR;
+ return -EIO;
return 0;
}
@@ -295,8 +295,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
*
* Configure base address and length registers for the transmit queue
*/
-static enum ice_status
-ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
}
@@ -308,10 +307,9 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
*
* Configure base address and length registers for the receive (event queue)
*/
-static enum ice_status
-ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status status;
+ int status;
status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
if (status)
@@ -361,19 +359,19 @@ do { \
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
*/
-static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code;
+ int ret_code;
if (cq->sq.count > 0) {
/* queue already initialized */
- ret_code = ICE_ERR_NOT_READY;
+ ret_code = -EBUSY;
goto init_ctrlq_exit;
}
/* verify input for valid configuration */
if (!cq->num_sq_entries || !cq->sq_buf_size) {
- ret_code = ICE_ERR_CFG;
+ ret_code = -EIO;
goto init_ctrlq_exit;
}
@@ -421,19 +419,19 @@ init_ctrlq_exit:
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
*/
-static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code;
+ int ret_code;
if (cq->rq.count > 0) {
/* queue already initialized */
- ret_code = ICE_ERR_NOT_READY;
+ ret_code = -EBUSY;
goto init_ctrlq_exit;
}
/* verify input for valid configuration */
if (!cq->num_rq_entries || !cq->rq_buf_size) {
- ret_code = ICE_ERR_CFG;
+ ret_code = -EIO;
goto init_ctrlq_exit;
}
@@ -474,15 +472,14 @@ init_ctrlq_exit:
*
* The main shutdown routine for the Control Transmit Queue
*/
-static enum ice_status
-ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code = 0;
+ int ret_code = 0;
mutex_lock(&cq->sq_lock);
if (!cq->sq.count) {
- ret_code = ICE_ERR_NOT_READY;
+ ret_code = -EBUSY;
goto shutdown_sq_out;
}
@@ -541,15 +538,14 @@ static bool ice_aq_ver_check(struct ice_hw *hw)
*
* The main shutdown routine for the Control Receive Queue
*/
-static enum ice_status
-ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code = 0;
+ int ret_code = 0;
mutex_lock(&cq->rq_lock);
if (!cq->rq.count) {
- ret_code = ICE_ERR_NOT_READY;
+ ret_code = -EBUSY;
goto shutdown_rq_out;
}
@@ -576,17 +572,17 @@ shutdown_rq_out:
* ice_init_check_adminq - Check version for Admin Queue to know if its alive
* @hw: pointer to the hardware structure
*/
-static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
+static int ice_init_check_adminq(struct ice_hw *hw)
{
struct ice_ctl_q_info *cq = &hw->adminq;
- enum ice_status status;
+ int status;
status = ice_aq_get_fw_ver(hw, NULL);
if (status)
goto init_ctrlq_free_rq;
if (!ice_aq_ver_check(hw)) {
- status = ICE_ERR_FW_API_VER;
+ status = -EIO;
goto init_ctrlq_free_rq;
}
@@ -612,10 +608,10 @@ init_ctrlq_free_rq:
*
* NOTE: this function does not initialize the controlq locks
*/
-static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
+static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{
struct ice_ctl_q_info *cq;
- enum ice_status ret_code;
+ int ret_code;
switch (q_type) {
case ICE_CTL_Q_ADMIN:
@@ -631,14 +627,14 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
cq = &hw->mailboxq;
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
cq->qtype = q_type;
/* verify input for valid configuration */
if (!cq->num_rq_entries || !cq->num_sq_entries ||
!cq->rq_buf_size || !cq->sq_buf_size) {
- return ICE_ERR_CFG;
+ return -EIO;
}
/* setup SQ command write back timeout */
@@ -751,10 +747,10 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw)
*
* NOTE: this function does not initialize the controlq locks.
*/
-enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
+int ice_init_all_ctrlq(struct ice_hw *hw)
{
- enum ice_status status;
u32 retry = 0;
+ int status;
/* Init FW admin queue */
do {
@@ -763,7 +759,7 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
return status;
status = ice_init_check_adminq(hw);
- if (status != ICE_ERR_AQ_FW_CRITICAL)
+ if (status != -EIO)
break;
ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
@@ -814,7 +810,7 @@ static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
* driver needs to re-initialize control queues at run time it should call
* ice_init_all_ctrlq instead.
*/
-enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
+int ice_create_all_ctrlq(struct ice_hw *hw)
{
ice_init_ctrlq_locks(&hw->adminq);
if (ice_is_sbq_supported(hw))
@@ -962,7 +958,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* This is the main send command routine for the ATQ. It runs the queue,
* cleans the queue, etc.
*/
-enum ice_status
+int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
@@ -970,27 +966,27 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_dma_mem *dma_buf = NULL;
struct ice_aq_desc *desc_on_ring;
bool cmd_completed = false;
- enum ice_status status = 0;
struct ice_sq_cd *details;
u32 total_delay = 0;
+ int status = 0;
u16 retval = 0;
u32 val = 0;
/* if reset is in progress return a soft error */
if (hw->reset_ongoing)
- return ICE_ERR_RESET_ONGOING;
+ return -EBUSY;
mutex_lock(&cq->sq_lock);
cq->sq_last_status = ICE_AQ_RC_OK;
if (!cq->sq.count) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
- status = ICE_ERR_AQ_EMPTY;
+ status = -EIO;
goto sq_send_command_error;
}
if ((buf && !buf_size) || (!buf && buf_size)) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto sq_send_command_error;
}
@@ -998,7 +994,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (buf_size > cq->sq_buf_size) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
buf_size);
- status = ICE_ERR_INVAL_SIZE;
+ status = -EINVAL;
goto sq_send_command_error;
}
@@ -1011,7 +1007,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (val >= cq->num_sq_entries) {
ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
val);
- status = ICE_ERR_AQ_EMPTY;
+ status = -EIO;
goto sq_send_command_error;
}
@@ -1028,7 +1024,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
*/
if (ice_clean_sq(hw, cq) == 0) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
- status = ICE_ERR_AQ_FULL;
+ status = -ENOSPC;
goto sq_send_command_error;
}
@@ -1082,7 +1078,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (copy_size > buf_size) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
copy_size, buf_size);
- status = ICE_ERR_AQ_ERROR;
+ status = -EIO;
} else {
memcpy(buf, dma_buf->va, copy_size);
}
@@ -1098,7 +1094,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
}
cmd_completed = true;
if (!status && retval != ICE_AQ_RC_OK)
- status = ICE_ERR_AQ_ERROR;
+ status = -EIO;
cq->sq_last_status = (enum ice_aq_err)retval;
}
@@ -1116,10 +1112,10 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
- status = ICE_ERR_AQ_FW_CRITICAL;
+ status = -EIO;
} else {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
- status = ICE_ERR_AQ_TIMEOUT;
+ status = -EIO;
}
}
@@ -1154,15 +1150,15 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
* the contents through e. It can also return how many events are
* left to process through 'pending'.
*/
-enum ice_status
+int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending)
{
u16 ntc = cq->rq.next_to_clean;
enum ice_aq_err rq_last_status;
- enum ice_status ret_code = 0;
struct ice_aq_desc *desc;
struct ice_dma_mem *bi;
+ int ret_code = 0;
u16 desc_idx;
u16 datalen;
u16 flags;
@@ -1176,7 +1172,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (!cq->rq.count) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
- ret_code = ICE_ERR_AQ_EMPTY;
+ ret_code = -EIO;
goto clean_rq_elem_err;
}
@@ -1185,7 +1181,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
- ret_code = ICE_ERR_AQ_NO_WORK;
+ ret_code = -EALREADY;
goto clean_rq_elem_out;
}
@@ -1196,7 +1192,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
if (flags & ICE_AQ_FLAG_ERR) {
- ret_code = ICE_ERR_AQ_ERROR;
+ ret_code = -EIO;
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
le16_to_cpu(desc->opcode), rq_last_status);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 241427cd9bc0..0b146a0d4205 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2019, Intel Corporation. */
#include "ice_common.h"
-#include "ice_lib.h"
#include "ice_sched.h"
#include "ice_dcb.h"
@@ -19,19 +18,19 @@
*
* Requests the complete LLDP MIB (entire packet). (0x0A00)
*/
-static enum ice_status
+static int
ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
u16 buf_size, u16 *local_len, u16 *remote_len,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_get_mib *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.lldp_get_mib;
if (buf_size == 0 || !buf)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib);
@@ -61,7 +60,7 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
* Enable or Disable posting of an event on ARQ when LLDP MIB
* associated with the interface changes (0x0A01)
*/
-static enum ice_status
+static int
ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
struct ice_sq_cd *cd)
{
@@ -89,7 +88,7 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
*
* Stop or Shutdown the embedded LLDP Agent (0x0A05)
*/
-enum ice_status
+int
ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd)
{
@@ -117,8 +116,7 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
*
* Start the embedded LLDP Agent on all ports. (0x0A06)
*/
-enum ice_status
-ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
+int ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_start *cmd;
struct ice_aq_desc desc;
@@ -598,18 +596,17 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
*
* Parse DCB configuration from the LLDPDU
*/
-static enum ice_status
-ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
+static int ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
{
struct ice_lldp_org_tlv *tlv;
- enum ice_status ret = 0;
u16 offset = 0;
+ int ret = 0;
u16 typelen;
u16 type;
u16 len;
if (!lldpmib || !dcbcfg)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* set to the start of LLDPDU */
lldpmib += ETH_HLEN;
@@ -649,17 +646,17 @@ ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
*
* Query DCB configuration from the firmware
*/
-enum ice_status
+int
ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg)
{
- enum ice_status ret;
u8 *lldpmib;
+ int ret;
/* Allocate the LLDPDU */
lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
if (!lldpmib)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib,
ICE_LLDPDU_SIZE, NULL, NULL, NULL);
@@ -684,17 +681,17 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
* @cd: pointer to command details structure or NULL
*
* Start/Stop the embedded dcbx Agent. In case that this wrapper function
- * returns ICE_SUCCESS, caller will need to check if FW returns back the same
+ * returns 0, caller will need to check if FW returns back the same
* value as stated in dcbx_agent_status, and react accordingly. (0x0A09)
*/
-enum ice_status
+int
ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_stop_start_specific_agent *cmd;
- enum ice_status status;
struct ice_aq_desc desc;
u16 opcode;
+ int status;
cmd = &desc.params.lldp_agent_ctrl;
@@ -724,7 +721,7 @@ ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
*
* Get CEE DCBX mode operational configuration from firmware (0x0A07)
*/
-static enum ice_status
+static int
ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
struct ice_aqc_get_cee_dcb_cfg_resp *buff,
struct ice_sq_cd *cd)
@@ -749,7 +746,7 @@ int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
{
struct ice_aqc_set_query_pfc_mode *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC)
return -EINVAL;
@@ -762,7 +759,7 @@ int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (status)
- return ice_status_to_errno(status);
+ return status;
/* FW will write the PFC mode set back into cmd->pfc_mode, but if DCB is
* disabled, FW will write back 0 to cmd->pfc_mode. After the AQ has
@@ -903,14 +900,13 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
*
* Get IEEE or CEE mode DCB configuration from the Firmware
*/
-static enum ice_status
-ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
+static int ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
{
struct ice_dcbx_cfg *dcbx_cfg = NULL;
- enum ice_status ret;
+ int ret;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (dcbx_mode == ICE_DCBX_MODE_IEEE)
dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
@@ -943,14 +939,14 @@ out:
*
* Get DCB configuration from the Firmware
*/
-enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
+int ice_get_dcb_cfg(struct ice_port_info *pi)
{
struct ice_aqc_get_cee_dcb_cfg_resp cee_cfg;
struct ice_dcbx_cfg *dcbx_cfg;
- enum ice_status ret;
+ int ret;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
if (!ret) {
@@ -974,13 +970,13 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
*
* Update DCB configuration from the Firmware
*/
-enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
+int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
{
struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
- enum ice_status ret = 0;
+ int ret = 0;
if (!hw->func_caps.common_cap.dcb)
- return ICE_ERR_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
qos_cfg->is_sw_lldp = true;
@@ -996,7 +992,7 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
return ret;
qos_cfg->is_sw_lldp = false;
} else if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) {
- return ICE_ERR_NOT_READY;
+ return -EBUSY;
}
/* Configure the LLDP MIB change event */
@@ -1016,19 +1012,19 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
*
* Configure (disable/enable) MIB
*/
-enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
+int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
{
struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
- enum ice_status ret;
+ int ret;
if (!hw->func_caps.common_cap.dcb)
- return ICE_ERR_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
/* Get DCBX status */
qos_cfg->dcbx_status = ice_get_dcbx_status(hw);
if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS)
- return ICE_ERR_NOT_READY;
+ return -EBUSY;
ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL);
if (!ret)
@@ -1469,16 +1465,16 @@ ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg)
*
* Set DCB configuration to the Firmware
*/
-enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
+int ice_set_dcb_cfg(struct ice_port_info *pi)
{
u8 mib_type, *lldpmib = NULL;
struct ice_dcbx_cfg *dcbcfg;
- enum ice_status ret;
struct ice_hw *hw;
u16 miblen;
+ int ret;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
@@ -1487,7 +1483,7 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
/* Allocate the LLDPDU */
lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
if (!lldpmib)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
@@ -1511,17 +1507,17 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
*
* query current port ETS configuration
*/
-static enum ice_status
+static int
ice_aq_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_query_port_ets *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
cmd = &desc.params.port_ets;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets);
cmd->port_teid = pi->root->info.node_teid;
@@ -1537,18 +1533,18 @@ ice_aq_query_port_ets(struct ice_port_info *pi,
*
* update the SW DB with the new TC changes
*/
-static enum ice_status
+static int
ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf)
{
struct ice_sched_node *node, *tc_node;
struct ice_aqc_txsched_elem_data elem;
- enum ice_status status = 0;
u32 teid1, teid2;
+ int status = 0;
u8 i, j;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* suspend the missing TC nodes */
for (i = 0; i < pi->root->num_children; i++) {
teid1 = le32_to_cpu(pi->root->children[i]->info.node_teid);
@@ -1605,12 +1601,12 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
* query current port ETS configuration and update the
* SW DB with the TC changes
*/
-enum ice_status
+int
ice_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
- enum ice_status status;
+ int status;
mutex_lock(&pi->sched_lock);
status = ice_aq_query_port_ets(pi, buf, buf_size, cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h
index 9b6f87a889a6..d73348f279f7 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.h
@@ -138,28 +138,27 @@ struct ice_cee_app_prio {
} __packed;
int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg);
-enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi);
-enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
-enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
-enum ice_status
+int ice_get_dcb_cfg(struct ice_port_info *pi);
+int ice_set_dcb_cfg(struct ice_port_info *pi);
+int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
+int
ice_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cmd_details);
#ifdef CONFIG_DCB
-enum ice_status
+int
ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd);
-enum ice_status
-ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd);
-enum ice_status
+int ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd);
+int
ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd);
-enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib);
+int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib);
#else /* CONFIG_DCB */
-static inline enum ice_status
+static inline int
ice_aq_stop_lldp(struct ice_hw __always_unused *hw,
bool __always_unused shutdown_lldp_agent,
bool __always_unused persist,
@@ -168,7 +167,7 @@ ice_aq_stop_lldp(struct ice_hw __always_unused *hw,
return 0;
}
-static inline enum ice_status
+static inline int
ice_aq_start_lldp(struct ice_hw __always_unused *hw,
bool __always_unused persist,
struct ice_sq_cd __always_unused *cd)
@@ -176,7 +175,7 @@ ice_aq_start_lldp(struct ice_hw __always_unused *hw,
return 0;
}
-static inline enum ice_status
+static inline int
ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw,
bool __always_unused start_dcbx_agent,
bool *dcbx_agent_status,
@@ -187,7 +186,7 @@ ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw,
return 0;
}
-static inline enum ice_status
+static inline int
ice_cfg_lldp_mib_change(struct ice_hw __always_unused *hw,
bool __always_unused ena_mib)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index a72e18320a22..b94d8daeaa58 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -528,7 +528,7 @@ void ice_dcb_rebuild(struct ice_pf *pf)
struct ice_aqc_port_ets_elem buf = { 0 };
struct device *dev = ice_pf_to_dev(pf);
struct ice_dcbx_cfg *err_cfg;
- enum ice_status ret;
+ int ret;
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index b9bd9f9472f6..a230edb38466 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Intel Corporation. */
+#include <linux/vmalloc.h>
+
#include "ice.h"
#include "ice_lib.h"
#include "ice_devlink.h"
@@ -39,13 +41,13 @@ static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx)
static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
+ int status;
status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf));
if (status)
/* We failed to locate the PBA, so just skip this entry */
- dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n",
- ice_stat_str(status));
+ dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %d\n",
+ status);
}
static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx)
@@ -251,7 +253,6 @@ static int ice_devlink_info_get(struct devlink *devlink,
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
struct ice_info_ctx *ctx;
- enum ice_status status;
size_t i;
int err;
@@ -266,20 +267,19 @@ static int ice_devlink_info_get(struct devlink *devlink,
return -ENOMEM;
/* discover capabilities first */
- status = ice_discover_dev_caps(hw, &ctx->dev_caps);
- if (status) {
- dev_dbg(dev, "Failed to discover device capabilities, status %s aq_err %s\n",
- ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_discover_dev_caps(hw, &ctx->dev_caps);
+ if (err) {
+ dev_dbg(dev, "Failed to discover device capabilities, status %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities");
- err = -EIO;
goto out_free_ctx;
}
if (ctx->dev_caps.common_cap.nvm_update_pending_orom) {
- status = ice_get_inactive_orom_ver(hw, &ctx->pending_orom);
- if (status) {
- dev_dbg(dev, "Unable to read inactive Option ROM version data, status %s aq_err %s\n",
- ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_get_inactive_orom_ver(hw, &ctx->pending_orom);
+ if (err) {
+ dev_dbg(dev, "Unable to read inactive Option ROM version data, status %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_orom = false;
@@ -287,10 +287,10 @@ static int ice_devlink_info_get(struct devlink *devlink,
}
if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) {
- status = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
- if (status) {
- dev_dbg(dev, "Unable to read inactive NVM version data, status %s aq_err %s\n",
- ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
+ if (err) {
+ dev_dbg(dev, "Unable to read inactive NVM version data, status %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_nvm = false;
@@ -298,10 +298,10 @@ static int ice_devlink_info_get(struct devlink *devlink,
}
if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) {
- status = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
- if (status) {
- dev_dbg(dev, "Unable to read inactive Netlist version data, status %s aq_err %s\n",
- ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
+ if (err) {
+ dev_dbg(dev, "Unable to read inactive Netlist version data, status %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_netlist = false;
@@ -373,63 +373,225 @@ out_free_ctx:
}
/**
- * ice_devlink_flash_update - Update firmware stored in flash on the device
- * @devlink: pointer to devlink associated with device to update
- * @params: flash update parameters
+ * ice_devlink_reload_empr_start - Start EMP reset to activate new firmware
+ * @devlink: pointer to the devlink instance to reload
+ * @netns_change: if true, the network namespace is changing
+ * @action: the action to perform. Must be DEVLINK_RELOAD_ACTION_FW_ACTIVATE
+ * @limit: limits on what reload should do, such as not resetting
* @extack: netlink extended ACK structure
*
- * Perform a device flash update. The bulk of the update logic is contained
- * within the ice_flash_pldm_image function.
+ * Allow user to activate new Embedded Management Processor firmware by
+ * issuing device specific EMP reset. Called in response to
+ * a DEVLINK_CMD_RELOAD with the DEVLINK_RELOAD_ACTION_FW_ACTIVATE.
*
- * Returns: zero on success, or an error code on failure.
+ * Note that teardown and rebuild of the driver state happens automatically as
+ * part of an interrupt and watchdog task. This is because all physical
+ * functions on the device must be able to reset when an EMP reset occurs from
+ * any source.
*/
static int
-ice_devlink_flash_update(struct devlink *devlink,
- struct devlink_flash_update_params *params,
- struct netlink_ext_ack *extack)
+ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- u8 preservation;
+ u8 pending;
int err;
- if (!params->overwrite_mask) {
- /* preserve all settings and identifiers */
- preservation = ICE_AQC_NVM_PRESERVE_ALL;
- } else if (params->overwrite_mask == DEVLINK_FLASH_OVERWRITE_SETTINGS) {
- /* overwrite settings, but preserve the vital device identifiers */
- preservation = ICE_AQC_NVM_PRESERVE_SELECTED;
- } else if (params->overwrite_mask == (DEVLINK_FLASH_OVERWRITE_SETTINGS |
- DEVLINK_FLASH_OVERWRITE_IDENTIFIERS)) {
- /* overwrite both settings and identifiers, preserve nothing */
- preservation = ICE_AQC_NVM_NO_PRESERVATION;
- } else {
- NL_SET_ERR_MSG_MOD(extack, "Requested overwrite mask is not supported");
- return -EOPNOTSUPP;
+ err = ice_get_pending_updates(pf, &pending, extack);
+ if (err)
+ return err;
+
+ /* pending is a bitmask of which flash banks have a pending update,
+ * including the main NVM bank, the Option ROM bank, and the netlist
+ * bank. If any of these bits are set, then there is a pending update
+ * waiting to be activated.
+ */
+ if (!pending) {
+ NL_SET_ERR_MSG_MOD(extack, "No pending firmware update");
+ return -ECANCELED;
}
- if (!hw->dev_caps.common_cap.nvm_unified_update) {
- NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update");
- return -EOPNOTSUPP;
+ if (pf->fw_emp_reset_disabled) {
+ NL_SET_ERR_MSG_MOD(extack, "EMP reset is not available. To activate firmware, a reboot or power cycle is needed");
+ return -ECANCELED;
}
- err = ice_check_for_pending_update(pf, NULL, extack);
- if (err)
+ dev_dbg(dev, "Issuing device EMP reset to activate firmware\n");
+
+ err = ice_aq_nvm_update_empr(hw);
+ if (err) {
+ dev_err(dev, "Failed to trigger EMP device reset to reload firmware, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to trigger EMP device reset to reload firmware");
return err;
+ }
- devlink_flash_update_status_notify(devlink, "Preparing to flash", NULL, 0, 0);
+ return 0;
+}
- return ice_flash_pldm_image(pf, params->fw, preservation, extack);
+/**
+ * ice_devlink_reload_empr_finish - Wait for EMP reset to finish
+ * @devlink: pointer to the devlink instance reloading
+ * @action: the action requested
+ * @limit: limits imposed by userspace, such as not resetting
+ * @actions_performed: on return, indicate what actions actually performed
+ * @extack: netlink extended ACK structure
+ *
+ * Wait for driver to finish rebuilding after EMP reset is completed. This
+ * includes time to wait for both the actual device reset as well as the time
+ * for the driver's rebuild to complete.
+ */
+static int
+ice_devlink_reload_empr_finish(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ int err;
+
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
+
+ err = ice_wait_for_reset(pf, 60 * HZ);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute");
+ return err;
+ }
+
+ return 0;
}
static const struct devlink_ops ice_devlink_ops = {
.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
+ /* The ice driver currently does not support driver reinit */
+ .reload_down = ice_devlink_reload_empr_start,
+ .reload_up = ice_devlink_reload_empr_finish,
.eswitch_mode_get = ice_eswitch_mode_get,
.eswitch_mode_set = ice_eswitch_mode_set,
.info_get = ice_devlink_info_get,
.flash_update = ice_devlink_flash_update,
};
+static int
+ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? true : false;
+
+ return 0;
+}
+
+static int
+ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ bool roce_ena = ctx->val.vbool;
+ int ret;
+
+ if (!roce_ena) {
+ ice_unplug_aux_dev(pf);
+ pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
+ return 0;
+ }
+
+ pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ ret = ice_plug_aux_dev(pf);
+ if (ret)
+ pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
+
+ return ret;
+}
+
+static int
+ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ return -EOPNOTSUPP;
+
+ if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP) {
+ NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+ice_devlink_enable_iw_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP;
+
+ return 0;
+}
+
+static int
+ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ bool iw_ena = ctx->val.vbool;
+ int ret;
+
+ if (!iw_ena) {
+ ice_unplug_aux_dev(pf);
+ pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
+ return 0;
+ }
+
+ pf->rdma_mode |= IIDC_RDMA_PROTOCOL_IWARP;
+ ret = ice_plug_aux_dev(pf);
+ if (ret)
+ pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
+
+ return ret;
+}
+
+static int
+ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ return -EOPNOTSUPP;
+
+ if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2) {
+ NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param ice_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ ice_devlink_enable_roce_get,
+ ice_devlink_enable_roce_set,
+ ice_devlink_enable_roce_validate),
+ DEVLINK_PARAM_GENERIC(ENABLE_IWARP, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ ice_devlink_enable_iw_get,
+ ice_devlink_enable_iw_set,
+ ice_devlink_enable_iw_validate),
+
+};
+
static void ice_devlink_free(void *devlink_ptr)
{
devlink_free((struct devlink *)devlink_ptr);
@@ -470,6 +632,7 @@ void ice_devlink_register(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
devlink_register(devlink);
}
@@ -484,6 +647,36 @@ void ice_devlink_unregister(struct ice_pf *pf)
devlink_unregister(priv_to_devlink(pf));
}
+int ice_devlink_register_params(struct ice_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ union devlink_param_value value;
+ int err;
+
+ err = devlink_params_register(devlink, ice_devlink_params,
+ ARRAY_SIZE(ice_devlink_params));
+ if (err)
+ return err;
+
+ value.vbool = false;
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP,
+ value);
+
+ value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags) ? true : false;
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ value);
+
+ return 0;
+}
+
+void ice_devlink_unregister_params(struct ice_pf *pf)
+{
+ devlink_params_unregister(priv_to_devlink(pf), ice_devlink_params,
+ ARRAY_SIZE(ice_devlink_params));
+}
+
/**
* ice_devlink_create_pf_port - Create a devlink port for this PF
* @pf: the PF to create a devlink port for
@@ -597,16 +790,20 @@ void ice_devlink_destroy_vf_port(struct ice_vf *vf)
}
/**
- * ice_devlink_nvm_snapshot - Capture a snapshot of the Shadow RAM contents
+ * ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents
* @devlink: the devlink instance
* @ops: the devlink region being snapshotted
* @extack: extended ACK response structure
* @data: on exit points to snapshot data buffer
*
* This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
- * the shadow-ram devlink region. It captures a snapshot of the shadow ram
- * contents. This snapshot can later be viewed via the devlink-region
- * interface.
+ * the nvm-flash devlink region. It captures a snapshot of the full NVM flash
+ * contents, including both banks of flash. This snapshot can later be viewed
+ * via the devlink-region interface.
+ *
+ * It captures the flash using the FLASH_ONLY bit set when reading via
+ * firmware, so it does not read the current Shadow RAM contents. For that,
+ * use the shadow-ram region.
*
* @returns zero on success, and updates the data pointer. Returns a non-zero
* error code on failure.
@@ -618,9 +815,9 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
void *nvm_data;
u32 nvm_size;
+ int status;
nvm_size = hw->flash.flash_size;
nvm_data = vzalloc(nvm_size);
@@ -633,7 +830,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
status, hw->adminq.sq_last_status);
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
vfree(nvm_data);
- return -EIO;
+ return status;
}
status = ice_read_flat_nvm(hw, 0, &nvm_size, nvm_data, false);
@@ -643,7 +840,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
ice_release_nvm(hw);
vfree(nvm_data);
- return -EIO;
+ return status;
}
ice_release_nvm(hw);
@@ -654,6 +851,66 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
}
/**
+ * ice_devlink_sram_snapshot - Capture a snapshot of the Shadow RAM contents
+ * @devlink: the devlink instance
+ * @ops: the devlink region being snapshotted
+ * @extack: extended ACK response structure
+ * @data: on exit points to snapshot data buffer
+ *
+ * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
+ * the shadow-ram devlink region. It captures a snapshot of the shadow ram
+ * contents. This snapshot can later be viewed via the devlink-region
+ * interface.
+ *
+ * @returns zero on success, and updates the data pointer. Returns a non-zero
+ * error code on failure.
+ */
+static int
+ice_devlink_sram_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops __always_unused *ops,
+ struct netlink_ext_ack *extack, u8 **data)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ u8 *sram_data;
+ u32 sram_size;
+ int err;
+
+ sram_size = hw->flash.sr_words * 2u;
+ sram_data = vzalloc(sram_size);
+ if (!sram_data)
+ return -ENOMEM;
+
+ err = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (err) {
+ dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
+ err, hw->adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+ vfree(sram_data);
+ return err;
+ }
+
+ /* Read from the Shadow RAM, rather than directly from NVM */
+ err = ice_read_flat_nvm(hw, 0, &sram_size, sram_data, true);
+ if (err) {
+ dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
+ sram_size, err, hw->adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read Shadow RAM contents");
+ ice_release_nvm(hw);
+ vfree(sram_data);
+ return err;
+ }
+
+ ice_release_nvm(hw);
+
+ *data = sram_data;
+
+ return 0;
+}
+
+/**
* ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities
* @devlink: the devlink instance
* @ops: the devlink region being snapshotted
@@ -675,8 +932,8 @@ ice_devlink_devcaps_snapshot(struct devlink *devlink,
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
void *devcaps;
+ int status;
devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN);
if (!devcaps)
@@ -689,7 +946,7 @@ ice_devlink_devcaps_snapshot(struct devlink *devlink,
status, hw->adminq.sq_last_status);
NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities");
vfree(devcaps);
- return -EIO;
+ return status;
}
*data = (u8 *)devcaps;
@@ -703,6 +960,12 @@ static const struct devlink_region_ops ice_nvm_region_ops = {
.snapshot = ice_devlink_nvm_snapshot,
};
+static const struct devlink_region_ops ice_sram_region_ops = {
+ .name = "shadow-ram",
+ .destructor = vfree,
+ .snapshot = ice_devlink_sram_snapshot,
+};
+
static const struct devlink_region_ops ice_devcaps_region_ops = {
.name = "device-caps",
.destructor = vfree,
@@ -720,7 +983,7 @@ void ice_devlink_init_regions(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
struct device *dev = ice_pf_to_dev(pf);
- u64 nvm_size;
+ u64 nvm_size, sram_size;
nvm_size = pf->hw.flash.flash_size;
pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1,
@@ -731,6 +994,15 @@ void ice_devlink_init_regions(struct ice_pf *pf)
pf->nvm_region = NULL;
}
+ sram_size = pf->hw.flash.sr_words * 2u;
+ pf->sram_region = devlink_region_create(devlink, &ice_sram_region_ops,
+ 1, sram_size);
+ if (IS_ERR(pf->sram_region)) {
+ dev_err(dev, "failed to create shadow-ram devlink region, err %ld\n",
+ PTR_ERR(pf->sram_region));
+ pf->sram_region = NULL;
+ }
+
pf->devcaps_region = devlink_region_create(devlink,
&ice_devcaps_region_ops, 10,
ICE_AQ_MAX_BUF_LEN);
@@ -751,6 +1023,10 @@ void ice_devlink_destroy_regions(struct ice_pf *pf)
{
if (pf->nvm_region)
devlink_region_destroy(pf->nvm_region);
+
+ if (pf->sram_region)
+ devlink_region_destroy(pf->sram_region);
+
if (pf->devcaps_region)
devlink_region_destroy(pf->devcaps_region);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h
index b7f9551e4fc4..fe006d9946f8 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.h
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.h
@@ -8,6 +8,8 @@ struct ice_pf *ice_allocate_pf(struct device *dev);
void ice_devlink_register(struct ice_pf *pf);
void ice_devlink_unregister(struct ice_pf *pf);
+int ice_devlink_register_params(struct ice_pf *pf);
+void ice_devlink_unregister_params(struct ice_pf *pf);
int ice_devlink_create_pf_port(struct ice_pf *pf);
void ice_devlink_destroy_pf_port(struct ice_pf *pf);
int ice_devlink_create_vf_port(struct ice_vf *vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index d1d7389b0bff..864692b157b6 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -10,6 +10,100 @@
#include "ice_tc_lib.h"
/**
+ * ice_eswitch_add_vf_mac_rule - add adv rule with VF's MAC
+ * @pf: pointer to PF struct
+ * @vf: pointer to VF struct
+ * @mac: VF's MAC address
+ *
+ * This function adds advanced rule that forwards packets with
+ * VF's MAC address (src MAC) to the corresponding switchdev ctrl VSI queue.
+ */
+int
+ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac)
+{
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_adv_rule_info rule_info = { 0 };
+ struct ice_adv_lkup_elem *list;
+ struct ice_hw *hw = &pf->hw;
+ const u16 lkups_cnt = 1;
+ int err;
+
+ list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
+ if (!list)
+ return -ENOMEM;
+
+ list[0].type = ICE_MAC_OFOS;
+ ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac);
+ eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr);
+
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
+ rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
+ rule_info.rx = false;
+ rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
+ ctrl_vsi->rxq_map[vf->vf_id];
+ rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
+ rule_info.flags_info.act_valid = true;
+
+ err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
+ vf->repr->mac_rule);
+ if (err)
+ dev_err(ice_pf_to_dev(pf), "Unable to add VF mac rule in switchdev mode for VF %d",
+ vf->vf_id);
+ else
+ vf->repr->rule_added = true;
+
+ kfree(list);
+ return err;
+}
+
+/**
+ * ice_eswitch_replay_vf_mac_rule - replay adv rule with VF's MAC
+ * @vf: pointer to vF struct
+ *
+ * This function replays VF's MAC rule after reset.
+ */
+void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf)
+{
+ int err;
+
+ if (!ice_is_switchdev_running(vf->pf))
+ return;
+
+ if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
+ err = ice_eswitch_add_vf_mac_rule(vf->pf, vf,
+ vf->hw_lan_addr.addr);
+ if (err) {
+ dev_err(ice_pf_to_dev(vf->pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
+ vf->hw_lan_addr.addr, vf->vf_id, err);
+ return;
+ }
+ vf->num_mac++;
+
+ ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
+ }
+}
+
+/**
+ * ice_eswitch_del_vf_mac_rule - delete adv rule with VF's MAC
+ * @vf: pointer to the VF struct
+ *
+ * Delete the advanced rule that was used to forward packets with the VF's MAC
+ * address (src MAC) to the corresponding switchdev ctrl VSI queue.
+ */
+void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf)
+{
+ if (!ice_is_switchdev_running(vf->pf))
+ return;
+
+ if (!vf->repr->rule_added)
+ return;
+
+ ice_rem_adv_rule_by_id(&vf->pf->hw, vf->repr->mac_rule);
+ vf->repr->rule_added = false;
+}
+
+/**
* ice_eswitch_setup_env - configure switchdev HW filters
* @pf: pointer to PF struct
*
@@ -21,7 +115,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
struct net_device *uplink_netdev = uplink_vsi->netdev;
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
- struct ice_port_info *pi = pf->hw.port_info;
bool rule_added = false;
ice_vsi_manage_vlan_stripping(ctrl_vsi, false);
@@ -42,29 +135,17 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
rule_added = true;
}
- if (ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, true, ICE_FLTR_TX))
- goto err_def_tx;
-
if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
goto err_override_uplink;
if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
goto err_override_control;
- if (ice_fltr_update_flags_dflt_rule(ctrl_vsi, pi->dflt_tx_vsi_rule_id,
- ICE_FLTR_TX,
- ICE_SINGLE_ACT_LB_ENABLE))
- goto err_update_action;
-
return 0;
-err_update_action:
- ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
err_override_control:
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
err_override_uplink:
- ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
-err_def_tx:
if (rule_added)
ice_clear_dflt_vsi(uplink_vsi->vsw);
err_def_rx:
@@ -167,21 +248,11 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
netif_keep_dst(vf->repr->netdev);
}
- kfree(ctrl_vsi->target_netdevs);
-
- ctrl_vsi->target_netdevs = kcalloc(max_vsi_num + 1,
- sizeof(*ctrl_vsi->target_netdevs),
- GFP_KERNEL);
- if (!ctrl_vsi->target_netdevs)
- goto err;
-
ice_for_each_vf(pf, i) {
struct ice_repr *repr = pf->vf[i].repr;
struct ice_vsi *vsi = repr->src_vsi;
struct metadata_dst *dst;
- ctrl_vsi->target_netdevs[vsi->vsi_num] = repr->netdev;
-
dst = repr->dst;
dst->u.port_info.port_id = vsi->vsi_num;
dst->u.port_info.lower_dev = repr->netdev;
@@ -214,7 +285,6 @@ ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
{
int i;
- kfree(ctrl_vsi->target_netdevs);
ice_for_each_vf(pf, i) {
struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
struct ice_vf *vf = &pf->vf[i];
@@ -320,7 +390,6 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
- ice_cfg_dflt_vsi(&pf->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
ice_clear_dflt_vsi(uplink_vsi->vsw);
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
@@ -375,24 +444,6 @@ static void ice_eswitch_napi_disable(struct ice_pf *pf)
}
/**
- * ice_eswitch_set_rxdid - configure rxdid on all Rx queues from VSI
- * @vsi: VSI to setup rxdid on
- * @rxdid: flex descriptor id
- */
-static void ice_eswitch_set_rxdid(struct ice_vsi *vsi, u32 rxdid)
-{
- struct ice_hw *hw = &vsi->back->hw;
- int i;
-
- ice_for_each_rxq(vsi, i) {
- struct ice_rx_ring *ring = vsi->rx_rings[i];
- u16 pf_q = vsi->rxq_map[ring->q_index];
-
- ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
- }
-}
-
-/**
* ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
* @pf: pointer to PF structure
*/
@@ -425,8 +476,6 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
ice_eswitch_napi_enable(pf);
- ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
-
return 0;
err_setup_reprs:
@@ -448,6 +497,7 @@ static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
ice_eswitch_napi_disable(pf);
ice_eswitch_release_env(pf);
+ ice_rem_adv_rule_for_vsi(&pf->hw, ctrl_vsi->idx);
ice_eswitch_release_reprs(pf, ctrl_vsi);
ice_vsi_release(ctrl_vsi);
ice_repr_rem_from_all_vfs(pf);
@@ -497,34 +547,6 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
}
/**
- * ice_eswitch_get_target_netdev - return port representor netdev
- * @rx_ring: pointer to Rx ring
- * @rx_desc: pointer to Rx descriptor
- *
- * When working in switchdev mode context (when control VSI is used), this
- * function returns netdev of appropriate port representor. For non-switchdev
- * context, regular netdev associated with Rx ring is returned.
- */
-struct net_device *
-ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc)
-{
- struct ice_32b_rx_flex_desc_nic_2 *desc;
- struct ice_vsi *vsi = rx_ring->vsi;
- struct ice_vsi *control_vsi;
- u16 target_vsi_id;
-
- control_vsi = vsi->back->switchdev.control_vsi;
- if (vsi != control_vsi)
- return rx_ring->netdev;
-
- desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc;
- target_vsi_id = le16_to_cpu(desc->src_vsi);
-
- return vsi->target_netdevs[target_vsi_id];
-}
-
-/**
* ice_eswitch_mode_get - get current eswitch mode
* @devlink: pointer to devlink structure
* @mode: output parameter for current eswitch mode
@@ -648,7 +670,6 @@ int ice_eswitch_rebuild(struct ice_pf *pf)
return status;
ice_eswitch_napi_enable(pf);
- ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
ice_eswitch_start_all_tx_queues(pf);
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index 364cd2a79c37..bd58d9d2e565 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -20,10 +20,11 @@ bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
void ice_eswitch_update_repr(struct ice_vsi *vsi);
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
-
-struct net_device *
-ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc);
+int
+ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf,
+ const u8 *mac);
+void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf);
+void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf);
void ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off);
@@ -33,6 +34,15 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
static inline void ice_eswitch_release(struct ice_pf *pf) { }
static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { }
+static inline void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf) { }
+static inline void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf) { }
+
+static inline int
+ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf,
+ const u8 *mac)
+{
+ return -EOPNOTSUPP;
+}
static inline void
ice_eswitch_set_target_vsi(struct sk_buff *skb,
@@ -67,13 +77,6 @@ static inline bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
return false;
}
-static inline struct net_device *
-ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc)
-{
- return rx_ring->netdev;
-}
-
static inline netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 572519e402f4..e2e3ef7fba7f 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -270,9 +270,8 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
struct device *dev;
- int ret = 0;
+ int ret;
u8 *buf;
dev = ice_pf_to_dev(pf);
@@ -285,22 +284,18 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
if (!buf)
return -ENOMEM;
- status = ice_acquire_nvm(hw, ICE_RES_READ);
- if (status) {
- dev_err(dev, "ice_acquire_nvm failed, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
+ ret = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (ret) {
+ dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %s\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
goto out;
}
- status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf,
- false);
- if (status) {
- dev_err(dev, "ice_read_flat_nvm failed, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
+ ret = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf,
+ false);
+ if (ret) {
+ dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %s\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
goto release;
}
@@ -342,14 +337,14 @@ static bool ice_active_vfs(struct ice_pf *pf)
static u64 ice_link_test(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- enum ice_status status;
bool link_up = false;
+ int status;
netdev_info(netdev, "link test\n");
status = ice_get_link_status(np->vsi->port_info, &link_up);
if (status) {
- netdev_err(netdev, "link query error, status = %s\n",
- ice_stat_str(status));
+ netdev_err(netdev, "link query error, status = %d\n",
+ status);
return 1;
}
@@ -1052,8 +1047,7 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
struct ice_link_status *link_info;
struct ice_vsi *vsi = np->vsi;
struct ice_port_info *pi;
- enum ice_status status;
- int err = 0;
+ int err;
pi = vsi->port_info;
@@ -1079,12 +1073,10 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
if (!caps)
return -ENOMEM;
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
- caps, NULL);
- if (status) {
- err = -EAGAIN;
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
+ caps, NULL);
+ if (err)
goto done;
- }
/* Set supported/configured FEC modes based on PHY capability */
if (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC)
@@ -1203,7 +1195,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) {
if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) {
- enum ice_status status;
+ int status;
/* Disable FW LLDP engine */
status = ice_cfg_lldp_mib_change(&pf->hw, false);
@@ -1232,8 +1224,8 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
pf->dcbx_cap &= ~DCB_CAP_DCBX_LLD_MANAGED;
pf->dcbx_cap |= DCB_CAP_DCBX_HOST;
} else {
- enum ice_status status;
bool dcbx_agent_status;
+ int status;
if (ice_get_pfc_mode(pf) == ICE_QOS_MODE_DSCP) {
clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
@@ -1288,8 +1280,10 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
}
if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
/* down and up VSI so that changes of Rx cfg are reflected. */
- ice_down(vsi);
- ice_up(vsi);
+ if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
+ ice_down(vsi);
+ ice_up(vsi);
+ }
}
/* don't allow modification of this flag when a single VF is in
* promiscuous mode because it's not supported
@@ -1938,8 +1932,7 @@ ice_get_link_ksettings(struct net_device *netdev,
struct ice_aqc_get_phy_caps_data *caps;
struct ice_link_status *hw_link_info;
struct ice_vsi *vsi = np->vsi;
- enum ice_status status;
- int err = 0;
+ int err;
ethtool_link_ksettings_zero_link_mode(ks, supported);
ethtool_link_ksettings_zero_link_mode(ks, advertising);
@@ -1990,12 +1983,10 @@ ice_get_link_ksettings(struct net_device *netdev,
if (!caps)
return -ENOMEM;
- status = ice_aq_get_phy_caps(vsi->port_info, false,
- ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
- if (status) {
- err = -EIO;
+ err = ice_aq_get_phy_caps(vsi->port_info, false,
+ ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
+ if (err)
goto done;
- }
/* Set the advertised flow control based on the PHY capability */
if ((caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) &&
@@ -2027,12 +2018,10 @@ ice_get_link_ksettings(struct net_device *netdev,
caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
- status = ice_aq_get_phy_caps(vsi->port_info, false,
- ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL);
- if (status) {
- err = -EIO;
+ err = ice_aq_get_phy_caps(vsi->port_info, false,
+ ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL);
+ if (err)
goto done;
- }
/* Set supported FEC modes based on PHY capability */
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
@@ -2210,11 +2199,10 @@ ice_set_link_ksettings(struct net_device *netdev,
struct ice_pf *pf = np->vsi->back;
struct ice_port_info *pi;
u8 autoneg_changed = 0;
- enum ice_status status;
u64 phy_type_high = 0;
u64 phy_type_low = 0;
- int err = 0;
bool linkup;
+ int err;
pi = np->vsi->port_info;
@@ -2234,15 +2222,13 @@ ice_set_link_ksettings(struct net_device *netdev,
/* Get the PHY capabilities based on media */
if (ice_fw_supports_report_dflt_cfg(pi->hw))
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
- phy_caps, NULL);
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
+ phy_caps, NULL);
else
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
- phy_caps, NULL);
- if (status) {
- err = -EIO;
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
+ phy_caps, NULL);
+ if (err)
goto done;
- }
/* save autoneg out of ksettings */
autoneg = copy_ks.base.autoneg;
@@ -2308,11 +2294,9 @@ ice_set_link_ksettings(struct net_device *netdev,
/* Call to get the current link speed */
pi->phy.get_link_info = true;
- status = ice_get_link_status(pi, &linkup);
- if (status) {
- err = -EIO;
+ err = ice_get_link_status(pi, &linkup);
+ if (err)
goto done;
- }
curr_link_speed = pi->phy.link_info.link_speed;
adv_link_speed = ice_ksettings_find_adv_link_speed(ks);
@@ -2381,10 +2365,9 @@ ice_set_link_ksettings(struct net_device *netdev,
}
/* make the aq call */
- status = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL);
- if (status) {
+ err = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL);
+ if (err) {
netdev_info(netdev, "Set phy config failed,\n");
- err = -EIO;
goto done;
}
@@ -2522,9 +2505,9 @@ static int
ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
u64 hashed_flds;
+ int status;
u32 hdrs;
dev = ice_pf_to_dev(pf);
@@ -2550,9 +2533,9 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs);
if (status) {
- dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %s\n",
- vsi->vsi_num, ice_stat_str(status));
- return -EINVAL;
+ dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n",
+ vsi->vsi_num, status);
+ return status;
}
return 0;
@@ -2686,7 +2669,9 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
}
static void
-ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -2704,7 +2689,9 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
}
static int
-ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_tx_ring *xdp_rings = NULL;
@@ -2949,7 +2936,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
struct ice_port_info *pi = np->vsi->port_info;
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_dcbx_cfg *dcbx_cfg;
- enum ice_status status;
+ int status;
/* Initialize pause params */
pause->rx_pause = 0;
@@ -2999,11 +2986,10 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
struct ice_vsi *vsi = np->vsi;
struct ice_hw *hw = &pf->hw;
struct ice_port_info *pi;
- enum ice_status status;
u8 aq_failures;
bool link_up;
- int err = 0;
u32 is_an;
+ int err;
pi = vsi->port_info;
hw_link_info = &pi->phy.link_info;
@@ -3029,11 +3015,11 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
return -ENOMEM;
/* Get current PHY config */
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
- NULL);
- if (status) {
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
+ NULL);
+ if (err) {
kfree(pcaps);
- return -EIO;
+ return err;
}
is_an = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE :
@@ -3069,22 +3055,19 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
return -EINVAL;
/* Set the FC mode and only restart AN if link is up */
- status = ice_set_fc(pi, &aq_failures, link_up);
+ err = ice_set_fc(pi, &aq_failures, link_up);
if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
- netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
- netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
- netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
}
@@ -3924,16 +3907,16 @@ ice_get_module_info(struct net_device *netdev,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u8 sff8472_comp = 0;
u8 sff8472_swap = 0;
u8 sff8636_rev = 0;
u8 value = 0;
+ int status;
status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 0x00, 0x00,
0, &value, 1, 0, NULL);
if (status)
- return -EIO;
+ return status;
switch (value) {
case ICE_MODULE_TYPE_SFP:
@@ -3941,12 +3924,12 @@ ice_get_module_info(struct net_device *netdev,
ICE_MODULE_SFF_8472_COMP, 0x00, 0,
&sff8472_comp, 1, 0, NULL);
if (status)
- return -EIO;
+ return status;
status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
ICE_MODULE_SFF_8472_SWAP, 0x00, 0,
&sff8472_swap, 1, 0, NULL);
if (status)
- return -EIO;
+ return status;
if (sff8472_swap & ICE_MODULE_SFF_ADDR_MODE) {
modinfo->type = ETH_MODULE_SFF_8079;
@@ -3966,7 +3949,7 @@ ice_get_module_info(struct net_device *netdev,
ICE_MODULE_REVISION_ADDR, 0x00, 0,
&sff8636_rev, 1, 0, NULL);
if (status)
- return -EIO;
+ return status;
/* Check revision compliance */
if (sff8636_rev > 0x02) {
/* Module is SFF-8636 compliant */
@@ -4001,11 +3984,11 @@ ice_get_module_eeprom(struct net_device *netdev,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
bool is_sfp = false;
unsigned int i, j;
u16 offset = 0;
u8 page = 0;
+ int status;
if (!ee || !ee->len || !data)
return -EINVAL;
@@ -4013,7 +3996,7 @@ ice_get_module_eeprom(struct net_device *netdev,
status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, value, 1, 0,
NULL);
if (status)
- return -EIO;
+ return status;
if (value[0] == ICE_MODULE_TYPE_SFP)
is_sfp = true;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index b6e7f47c8c78..5d10c4f84a36 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -5,6 +5,7 @@
#include "ice.h"
#include "ice_lib.h"
+#include "ice_fdir.h"
#include "ice_flow.h"
static struct in6_addr full_ipv6_addr_mask = {
@@ -205,7 +206,7 @@ int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd)
if (rule->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT)
fsp->ring_cookie = RX_CLS_FLOW_DISC;
else
- fsp->ring_cookie = rule->q_index;
+ fsp->ring_cookie = rule->orig_q_index;
idx = ice_ethtool_flow_to_fltr(fsp->flow_type);
if (idx == ICE_FLTR_PTYPE_NONF_NONE) {
@@ -257,6 +258,80 @@ release_lock:
}
/**
+ * ice_fdir_remap_entries - update the FDir entries in profile
+ * @prof: FDir structure pointer
+ * @tun: tunneled or non-tunneled packet
+ * @idx: FDir entry index
+ */
+static void
+ice_fdir_remap_entries(struct ice_fd_hw_prof *prof, int tun, int idx)
+{
+ if (idx != prof->cnt && tun < ICE_FD_HW_SEG_MAX) {
+ int i;
+
+ for (i = idx; i < (prof->cnt - 1); i++) {
+ u64 old_entry_h;
+
+ old_entry_h = prof->entry_h[i + 1][tun];
+ prof->entry_h[i][tun] = old_entry_h;
+ prof->vsi_h[i] = prof->vsi_h[i + 1];
+ }
+
+ prof->entry_h[i][tun] = 0;
+ prof->vsi_h[i] = 0;
+ }
+}
+
+/**
+ * ice_fdir_rem_adq_chnl - remove an ADQ channel from HW filter rules
+ * @hw: hardware structure containing filter list
+ * @vsi_idx: VSI handle
+ */
+void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx)
+{
+ int status, flow;
+
+ if (!hw->fdir_prof)
+ return;
+
+ for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
+ struct ice_fd_hw_prof *prof = hw->fdir_prof[flow];
+ int tun, i;
+
+ if (!prof || !prof->cnt)
+ continue;
+
+ for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+ u64 prof_id;
+
+ prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
+
+ for (i = 0; i < prof->cnt; i++) {
+ if (prof->vsi_h[i] != vsi_idx)
+ continue;
+
+ prof->entry_h[i][tun] = 0;
+ prof->vsi_h[i] = 0;
+ break;
+ }
+
+ /* after clearing FDir entries update the remaining */
+ ice_fdir_remap_entries(prof, tun, i);
+
+ /* find flow profile corresponding to prof_id and clear
+ * vsi_idx from bitmap.
+ */
+ status = ice_flow_rem_vsi_prof(hw, vsi_idx, prof_id);
+ if (status) {
+ dev_err(ice_hw_to_dev(hw), "ice_flow_rem_vsi_prof() failed status=%d\n",
+ status);
+ }
+ }
+ prof->cnt--;
+ }
+}
+
+/**
* ice_fdir_get_hw_prof - return the ice_fd_hw_proc associated with a flow
* @hw: hardware structure containing the filter list
* @blk: hardware block
@@ -514,6 +589,28 @@ ice_fdir_alloc_flow_prof(struct ice_hw *hw, enum ice_fltr_ptype flow)
}
/**
+ * ice_fdir_prof_vsi_idx - find or insert a vsi_idx in structure
+ * @prof: pointer to flow director HW profile
+ * @vsi_idx: vsi_idx to locate
+ *
+ * return the index of the vsi_idx. if vsi_idx is not found insert it
+ * into the vsi_h table.
+ */
+static u16
+ice_fdir_prof_vsi_idx(struct ice_fd_hw_prof *prof, int vsi_idx)
+{
+ u16 idx = 0;
+
+ for (idx = 0; idx < prof->cnt; idx++)
+ if (prof->vsi_h[idx] == vsi_idx)
+ return idx;
+
+ if (idx == prof->cnt)
+ prof->vsi_h[prof->cnt++] = vsi_idx;
+ return idx;
+}
+
+/**
* ice_fdir_set_hw_fltr_rule - Configure HW tables to generate a FDir rule
* @pf: pointer to the PF structure
* @seg: protocol header description pointer
@@ -530,11 +627,12 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
struct ice_flow_prof *prof = NULL;
struct ice_fd_hw_prof *hw_prof;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u64 entry1_h = 0;
u64 entry2_h = 0;
+ bool del_last;
u64 prof_id;
int err;
+ int idx;
main_vsi = ice_get_main_vsi(pf);
if (!main_vsi)
@@ -581,24 +679,20 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
* actions (NULL) and zero actions 0.
*/
prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
- status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
- TNL_SEG_CNT(tun), &prof);
- if (status)
- return ice_status_to_errno(status);
- status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
- main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
- seg, &entry1_h);
- if (status) {
- err = ice_status_to_errno(status);
+ err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
+ TNL_SEG_CNT(tun), &prof);
+ if (err)
+ return err;
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+ main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry1_h);
+ if (err)
goto err_prof;
- }
- status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
- ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
- seg, &entry2_h);
- if (status) {
- err = ice_status_to_errno(status);
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+ ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry2_h);
+ if (err)
goto err_entry;
- }
hw_prof->fdir_seg[tun] = seg;
hw_prof->entry_h[0][tun] = entry1_h;
@@ -608,8 +702,60 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
if (!hw_prof->cnt)
hw_prof->cnt = 2;
+ for (idx = 1; idx < ICE_CHNL_MAX_TC; idx++) {
+ u16 vsi_idx;
+ u16 vsi_h;
+
+ if (!ice_is_adq_active(pf) || !main_vsi->tc_map_vsi[idx])
+ continue;
+
+ entry1_h = 0;
+ vsi_h = main_vsi->tc_map_vsi[idx]->idx;
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
+ main_vsi->idx, vsi_h,
+ ICE_FLOW_PRIO_NORMAL, seg,
+ &entry1_h);
+ if (err) {
+ dev_err(dev, "Could not add Channel VSI %d to flow group\n",
+ idx);
+ goto err_unroll;
+ }
+
+ vsi_idx = ice_fdir_prof_vsi_idx(hw_prof,
+ main_vsi->tc_map_vsi[idx]->idx);
+ hw_prof->entry_h[vsi_idx][tun] = entry1_h;
+ }
+
return 0;
+err_unroll:
+ entry1_h = 0;
+ hw_prof->fdir_seg[tun] = NULL;
+
+ /* The variable del_last will be used to determine when to clean up
+ * the VSI group data. The VSI data is not needed if there are no
+ * segments.
+ */
+ del_last = true;
+ for (idx = 0; idx < ICE_FD_HW_SEG_MAX; idx++)
+ if (hw_prof->fdir_seg[idx]) {
+ del_last = false;
+ break;
+ }
+
+ for (idx = 0; idx < hw_prof->cnt; idx++) {
+ u16 vsi_num = ice_get_hw_vsi_num(hw, hw_prof->vsi_h[idx]);
+
+ if (!hw_prof->entry_h[idx][tun])
+ continue;
+ ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
+ ice_flow_rem_entry(hw, ICE_BLK_FD, hw_prof->entry_h[idx][tun]);
+ hw_prof->entry_h[idx][tun] = 0;
+ if (del_last)
+ hw_prof->vsi_h[idx] = 0;
+ }
+ if (del_last)
+ hw_prof->cnt = 0;
err_entry:
ice_rem_prof_id_flow(hw, ICE_BLK_FD,
ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id);
@@ -1174,6 +1320,31 @@ err_exit:
}
/**
+ * ice_update_per_q_fltr
+ * @vsi: ptr to VSI
+ * @q_index: queue index
+ * @inc: true to increment or false to decrement per queue filter count
+ *
+ * This function is used to keep track of per queue sideband filters
+ */
+static void ice_update_per_q_fltr(struct ice_vsi *vsi, u32 q_index, bool inc)
+{
+ struct ice_rx_ring *rx_ring;
+
+ if (!vsi->num_rxq || q_index >= vsi->num_rxq)
+ return;
+
+ rx_ring = vsi->rx_rings[q_index];
+ if (!rx_ring || !rx_ring->ch)
+ return;
+
+ if (inc)
+ atomic_inc(&rx_ring->ch->num_sb_fltr);
+ else
+ atomic_dec_if_positive(&rx_ring->ch->num_sb_fltr);
+}
+
+/**
* ice_fdir_write_fltr - send a flow director filter to the hardware
* @pf: PF data structure
* @input: filter structure
@@ -1190,7 +1361,6 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
struct ice_hw *hw = &pf->hw;
struct ice_fltr_desc desc;
struct ice_vsi *ctrl_vsi;
- enum ice_status status;
u8 *pkt, *frag_pkt;
bool has_frag;
int err;
@@ -1209,11 +1379,9 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
}
ice_fdir_get_prgm_desc(hw, input, &desc, add);
- status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
- if (status) {
- err = ice_status_to_errno(status);
+ err = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
+ if (err)
goto err_free_all;
- }
err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
if (err)
goto err_free_all;
@@ -1223,12 +1391,10 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
if (has_frag) {
/* does not return error */
ice_fdir_get_prgm_desc(hw, input, &desc, add);
- status = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true,
- is_tun);
- if (status) {
- err = ice_status_to_errno(status);
+ err = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true,
+ is_tun);
+ if (err)
goto err_frag;
- }
err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt);
if (err)
goto err_frag;
@@ -1324,13 +1490,32 @@ int ice_fdir_create_dflt_rules(struct ice_pf *pf)
}
/**
+ * ice_fdir_del_all_fltrs - Delete all flow director filters
+ * @vsi: the VSI being changed
+ *
+ * This function needs to be called while holding hw->fdir_fltr_lock
+ */
+void ice_fdir_del_all_fltrs(struct ice_vsi *vsi)
+{
+ struct ice_fdir_fltr *f_rule, *tmp;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+
+ list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) {
+ ice_fdir_write_all_fltr(pf, f_rule, false);
+ ice_fdir_update_cntrs(hw, f_rule->flow_type, false);
+ list_del(&f_rule->fltr_node);
+ devm_kfree(ice_pf_to_dev(pf), f_rule);
+ }
+}
+
+/**
* ice_vsi_manage_fdir - turn on/off flow director
* @vsi: the VSI being changed
* @ena: boolean value indicating if this is an enable or disable request
*/
void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena)
{
- struct ice_fdir_fltr *f_rule, *tmp;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
enum ice_fltr_ptype flow;
@@ -1344,13 +1529,8 @@ void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena)
mutex_lock(&hw->fdir_fltr_lock);
if (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags))
goto release_lock;
- list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) {
- /* ignore return value */
- ice_fdir_write_all_fltr(pf, f_rule, false);
- ice_fdir_update_cntrs(hw, f_rule->flow_type, false);
- list_del(&f_rule->fltr_node);
- devm_kfree(ice_hw_to_dev(hw), f_rule);
- }
+
+ ice_fdir_del_all_fltrs(vsi);
if (hw->fdir_prof)
for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX;
@@ -1401,18 +1581,25 @@ ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input,
{
struct ice_fdir_fltr *old_fltr;
struct ice_hw *hw = &pf->hw;
+ struct ice_vsi *vsi;
int err = -ENOENT;
/* Do not update filters during reset */
if (ice_is_reset_in_progress(pf->state))
return -EBUSY;
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return -EINVAL;
+
old_fltr = ice_fdir_find_fltr_by_idx(hw, fltr_idx);
if (old_fltr) {
err = ice_fdir_write_all_fltr(pf, old_fltr, false);
if (err)
return err;
ice_fdir_update_cntrs(hw, old_fltr->flow_type, false);
+ /* update sb-filters count, specific to ring->channel */
+ ice_update_per_q_fltr(vsi, old_fltr->orig_q_index, false);
if (!input && !hw->fdir_fltr_cnt[old_fltr->flow_type])
/* we just deleted the last filter of flow_type so we
* should also delete the HW filter info.
@@ -1424,6 +1611,8 @@ ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input,
if (!input)
return err;
ice_fdir_list_add_fltr(hw, input);
+ /* update sb-filters count, specific to ring->channel */
+ ice_update_per_q_fltr(vsi, input->orig_q_index, true);
ice_fdir_update_cntrs(hw, input->flow_type, true);
return 0;
}
@@ -1463,6 +1652,39 @@ int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
}
/**
+ * ice_update_ring_dest_vsi - update dest ring and dest VSI
+ * @vsi: pointer to target VSI
+ * @dest_vsi: ptr to dest VSI index
+ * @ring: ptr to dest ring
+ *
+ * This function updates destination VSI and queue if user specifies
+ * target queue which falls in channel's (aka ADQ) queue region
+ */
+static void
+ice_update_ring_dest_vsi(struct ice_vsi *vsi, u16 *dest_vsi, u32 *ring)
+{
+ struct ice_channel *ch;
+
+ list_for_each_entry(ch, &vsi->ch_list, list) {
+ if (!ch->ch_vsi)
+ continue;
+
+ /* make sure to locate corresponding channel based on "queue"
+ * specified
+ */
+ if ((*ring < ch->base_q) ||
+ (*ring >= (ch->base_q + ch->num_rxq)))
+ continue;
+
+ /* update the dest_vsi based on channel */
+ *dest_vsi = ch->ch_vsi->idx;
+
+ /* update the "ring" to be correct based on channel */
+ *ring -= ch->base_q;
+ }
+}
+
+/**
* ice_set_fdir_input_set - Set the input set for Flow Director
* @vsi: pointer to target VSI
* @fsp: pointer to ethtool Rx flow specification
@@ -1473,6 +1695,7 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
struct ice_fdir_fltr *input)
{
u16 dest_vsi, q_index = 0;
+ u16 orig_q_index = 0;
struct ice_pf *pf;
struct ice_hw *hw;
int flow_type;
@@ -1499,6 +1722,8 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
if (ring >= vsi->num_rxq)
return -EINVAL;
+ orig_q_index = ring;
+ ice_update_ring_dest_vsi(vsi, &dest_vsi, &ring);
dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
q_index = ring;
}
@@ -1507,6 +1732,11 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
input->q_index = q_index;
flow_type = fsp->flow_type & ~FLOW_EXT;
+ /* Record the original queue index as specified by user.
+ * with channel configuration 'q_index' becomes relative
+ * to TC (channel).
+ */
+ input->orig_q_index = orig_q_index;
input->dest_vsi = dest_vsi;
input->dest_ctl = dest_ctl;
input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
@@ -1694,6 +1924,8 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
remove_sw_rule:
ice_fdir_update_cntrs(hw, input->flow_type, false);
+ /* update sb-filters count, specific to ring->channel */
+ ice_update_per_q_fltr(vsi, input->orig_q_index, false);
list_del(&input->fltr_node);
release_lock:
mutex_unlock(&hw->fdir_fltr_lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index 4dca009bdd50..ae089d32ee9d 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -712,7 +712,7 @@ ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input,
* @hw: pointer to the hardware structure
* @cntr_id: returns counter index
*/
-enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id)
+int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id)
{
return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK,
ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id);
@@ -723,7 +723,7 @@ enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id)
* @hw: pointer to the hardware structure
* @cntr_id: counter index to be freed
*/
-enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id)
+int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id)
{
return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK,
ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id);
@@ -735,8 +735,7 @@ enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id)
* @cntr_id: returns counter index
* @num_fltr: number of filter entries to be allocated
*/
-enum ice_status
-ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
+int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
{
return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES,
ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr,
@@ -749,8 +748,7 @@ ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
* @cntr_id: returns counter index
* @num_fltr: number of filter entries to be allocated
*/
-enum ice_status
-ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
+int ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
{
return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES,
ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr,
@@ -872,7 +870,7 @@ static void ice_pkt_insert_mac_addr(u8 *pkt, u8 *addr)
* @frag: generate a fragment packet
* @tun: true implies generate a tunnel packet
*/
-enum ice_status
+int
ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
u8 *pkt, bool frag, bool tun)
{
@@ -919,15 +917,15 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
if (ice_fdir_pkt[idx].flow == flow)
break;
if (idx == ICE_FDIR_NUM_PKT)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!tun) {
memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len);
loc = pkt;
} else {
if (!ice_get_open_tunnel_port(hw, &tnl_port, TNL_ALL))
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
if (!ice_fdir_pkt[idx].tun_pkt)
- return ICE_ERR_PARAM;
+ return -EINVAL;
memcpy(pkt, ice_fdir_pkt[idx].tun_pkt,
ice_fdir_pkt[idx].tun_pkt_len);
ice_pkt_insert_u16(pkt, ICE_IPV4_UDP_DST_PORT_OFFSET,
@@ -1111,7 +1109,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
if (input->flex_fltr)
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h
index da4163856f4c..1b9b84490689 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
@@ -182,6 +182,7 @@ struct ice_fdir_fltr {
/* filter control */
u16 q_index;
+ u16 orig_q_index;
u16 dest_vsi;
u8 dest_ctl;
u8 cnt_ena;
@@ -201,16 +202,14 @@ struct ice_fdir_base_pkt {
const u8 *tun_pkt;
};
-enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id);
-enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id);
-enum ice_status
-ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
-enum ice_status
-ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
+int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id);
+int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id);
+int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
+int ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
void
ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input,
struct ice_fltr_desc *fdesc, bool add);
-enum ice_status
+int
ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
u8 *pkt, bool frag, bool tun);
int ice_get_fdir_cnt_all(struct ice_hw *hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 6ad1c2559724..4deb2c9446ec 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -314,6 +314,78 @@ ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
}
/**
+ * ice_hw_ptype_ena - check if the PTYPE is enabled or not
+ * @hw: pointer to the HW structure
+ * @ptype: the hardware PTYPE
+ */
+bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype)
+{
+ return ptype < ICE_FLOW_PTYPE_MAX &&
+ test_bit(ptype, hw->hw_ptype);
+}
+
+/**
+ * ice_marker_ptype_tcam_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the Marker PType TCAM entry to be returned
+ * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual Marker PType TCAM entries.
+ */
+static void *
+ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index,
+ u32 *offset)
+{
+ struct ice_marker_ptype_tcam_section *marker_ptype;
+
+ if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE)
+ return NULL;
+
+ if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ marker_ptype = section;
+ if (index >= le16_to_cpu(marker_ptype->count))
+ return NULL;
+
+ return marker_ptype->tcam + index;
+}
+
+/**
+ * ice_fill_hw_ptype - fill the enabled PTYPE bit information
+ * @hw: pointer to the HW structure
+ */
+static void ice_fill_hw_ptype(struct ice_hw *hw)
+{
+ struct ice_marker_ptype_tcam_entry *tcam;
+ struct ice_seg *seg = hw->seg;
+ struct ice_pkg_enum state;
+
+ bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX);
+ if (!seg)
+ return;
+
+ memset(&state, 0, sizeof(state));
+
+ do {
+ tcam = ice_pkg_enum_entry(seg, &state,
+ ICE_SID_RXPARSER_MARKER_PTYPE, NULL,
+ ice_marker_ptype_tcam_handler);
+ if (tcam &&
+ le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX &&
+ le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX)
+ set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype);
+
+ seg = NULL;
+ } while (tcam);
+}
+
+/**
* ice_boost_tcam_handler
* @sect_type: section type
* @section: pointer to section
@@ -358,7 +430,7 @@ ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
* if it is found. The ice_seg parameter must not be NULL since the first call
* to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
*/
-static enum ice_status
+static int
ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
struct ice_boost_tcam_entry **entry)
{
@@ -368,7 +440,7 @@ ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
memset(&state, 0, sizeof(state));
if (!ice_seg)
- return ICE_ERR_PARAM;
+ return -EINVAL;
do {
tcam = ice_pkg_enum_entry(ice_seg, &state,
@@ -383,7 +455,7 @@ ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
} while (tcam);
*entry = NULL;
- return ICE_ERR_CFG;
+ return -EIO;
}
/**
@@ -549,7 +621,7 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
* ------------------------------
* Result: key: b01 10 11 11 00 00
*/
-static enum ice_status
+static int
ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
u8 *key_inv)
{
@@ -558,7 +630,7 @@ ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
/* 'dont_care' and 'nvr_mtch' masks cannot overlap */
if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
- return ICE_ERR_CFG;
+ return -EIO;
*key = 0;
*key_inv = 0;
@@ -651,7 +723,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
* dc == NULL --> dc mask is all 0's (no don't care bits)
* nm == NULL --> nm mask is all 0's (no never match bits)
*/
-static enum ice_status
+static int
ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
u16 len)
{
@@ -660,11 +732,11 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
/* size must be a multiple of 2 bytes. */
if (size % 2)
- return ICE_ERR_CFG;
+ return -EIO;
half_size = size / 2;
if (off + len > half_size)
- return ICE_ERR_CFG;
+ return -EIO;
/* Make sure at most one bit is set in the never match mask. Having more
* than one never match mask bit set will cause HW to consume excessive
@@ -672,13 +744,13 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
*/
#define ICE_NVR_MTCH_BITS_MAX 1
if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
- return ICE_ERR_CFG;
+ return -EIO;
for (i = 0; i < len; i++)
if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
dc ? dc[i] : 0, nm ? nm[i] : 0,
key + off + i, key + half_size + off + i))
- return ICE_ERR_CFG;
+ return -EIO;
return 0;
}
@@ -692,25 +764,25 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
* or writing of the package. When attempting to obtain write access, the
* caller must check for the following two return values:
*
- * ICE_SUCCESS - Means the caller has acquired the global config lock
- * and can perform writing of the package.
- * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
- * package or has found that no update was necessary; in
- * this case, the caller can just skip performing any
- * update of the package.
- */
-static enum ice_status
+ * 0 - Means the caller has acquired the global config lock
+ * and can perform writing of the package.
+ * -EALREADY - Indicates another driver has already written the
+ * package or has found that no update was necessary; in
+ * this case, the caller can just skip performing any
+ * update of the package.
+ */
+static int
ice_acquire_global_cfg_lock(struct ice_hw *hw,
enum ice_aq_res_access_type access)
{
- enum ice_status status;
+ int status;
status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
ICE_GLOBAL_CFG_LOCK_TIMEOUT);
if (!status)
mutex_lock(&ice_global_cfg_lock_sw);
- else if (status == ICE_ERR_AQ_NO_WORK)
+ else if (status == -EALREADY)
ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
return status;
@@ -735,7 +807,7 @@ static void ice_release_global_cfg_lock(struct ice_hw *hw)
*
* This function will request ownership of the change lock.
*/
-enum ice_status
+int
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
@@ -765,14 +837,14 @@ void ice_release_change_lock(struct ice_hw *hw)
*
* Download Package (0x0C40)
*/
-static enum ice_status
+static int
ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, bool last_buf, u32 *error_offset,
u32 *error_info, struct ice_sq_cd *cd)
{
struct ice_aqc_download_pkg *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (error_offset)
*error_offset = 0;
@@ -787,7 +859,7 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
- if (status == ICE_ERR_AQ_ERROR) {
+ if (status == -EIO) {
/* Read error from buffer only when the FW returned an error */
struct ice_aqc_download_pkg_resp *resp;
@@ -813,14 +885,14 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
*
* Update Package (0x0C42)
*/
-static enum ice_status
+static int
ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
bool last_buf, u32 *error_offset, u32 *error_info,
struct ice_sq_cd *cd)
{
struct ice_aqc_download_pkg *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (error_offset)
*error_offset = 0;
@@ -835,7 +907,7 @@ ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
- if (status == ICE_ERR_AQ_ERROR) {
+ if (status == -EIO) {
/* Read error from buffer only when the FW returned an error */
struct ice_aqc_download_pkg_resp *resp;
@@ -892,11 +964,10 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
*
* Obtains change lock and updates package.
*/
-static enum ice_status
-ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+static int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
- enum ice_status status;
u32 offset, info, i;
+ int status;
status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
if (status)
@@ -921,6 +992,22 @@ ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
return status;
}
+static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err)
+{
+ switch (aq_err) {
+ case ICE_AQ_RC_ENOSEC:
+ case ICE_AQ_RC_EBADSIG:
+ return ICE_DDP_PKG_FILE_SIGNATURE_INVALID;
+ case ICE_AQ_RC_ESVN:
+ return ICE_DDP_PKG_FILE_REVISION_TOO_LOW;
+ case ICE_AQ_RC_EBADMAN:
+ case ICE_AQ_RC_EBADBUF:
+ return ICE_DDP_PKG_LOAD_ERROR;
+ default:
+ return ICE_DDP_PKG_ERR;
+ }
+}
+
/**
* ice_dwnld_cfg_bufs
* @hw: pointer to the hardware structure
@@ -931,15 +1018,17 @@ ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
* to the firmware. Metadata buffers are skipped, and the first metadata buffer
* found indicates that the rest of the buffers are all metadata buffers.
*/
-static enum ice_status
+static enum ice_ddp_state
ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
- enum ice_status status;
+ enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
struct ice_buf_hdr *bh;
+ enum ice_aq_err err;
u32 offset, info, i;
+ int status;
if (!bufs || !count)
- return ICE_ERR_PARAM;
+ return ICE_DDP_PKG_ERR;
/* If the first buffer's first section has its metadata bit set
* then there are no buffers to be downloaded, and the operation is
@@ -947,20 +1036,13 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
*/
bh = (struct ice_buf_hdr *)bufs;
if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
- return 0;
-
- /* reset pkg_dwnld_status in case this function is called in the
- * reset/rebuild flow
- */
- hw->pkg_dwnld_status = ICE_AQ_RC_OK;
+ return ICE_DDP_PKG_SUCCESS;
status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
if (status) {
- if (status == ICE_ERR_AQ_NO_WORK)
- hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
- else
- hw->pkg_dwnld_status = hw->adminq.sq_last_status;
- return status;
+ if (status == -EALREADY)
+ return ICE_DDP_PKG_ALREADY_LOADED;
+ return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
}
for (i = 0; i < count; i++) {
@@ -986,11 +1068,11 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
&offset, &info, NULL);
/* Save AQ status from download package */
- hw->pkg_dwnld_status = hw->adminq.sq_last_status;
if (status) {
ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
status, offset, info);
-
+ err = hw->adminq.sq_last_status;
+ state = ice_map_aq_err_to_ddp_state(err);
break;
}
@@ -1000,7 +1082,7 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
ice_release_global_cfg_lock(hw);
- return status;
+ return state;
}
/**
@@ -1012,7 +1094,7 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
*
* Get Package Info List (0x0C43)
*/
-static enum ice_status
+static int
ice_aq_get_pkg_info_list(struct ice_hw *hw,
struct ice_aqc_get_pkg_info_resp *pkg_info,
u16 buf_size, struct ice_sq_cd *cd)
@@ -1031,7 +1113,7 @@ ice_aq_get_pkg_info_list(struct ice_hw *hw,
*
* Handles the download of a complete package.
*/
-static enum ice_status
+static enum ice_ddp_state
ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
{
struct ice_buf_table *ice_buf_tbl;
@@ -1062,13 +1144,13 @@ ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
*
* Saves off the package details into the HW structure.
*/
-static enum ice_status
+static enum ice_ddp_state
ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
{
struct ice_generic_seg_hdr *seg_hdr;
if (!pkg_hdr)
- return ICE_ERR_PARAM;
+ return ICE_DDP_PKG_ERR;
seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
if (seg_hdr) {
@@ -1082,7 +1164,7 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
ICE_SID_METADATA);
if (!meta) {
ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
- return ICE_ERR_CFG;
+ return ICE_DDP_PKG_INVALID_FILE;
}
hw->pkg_ver = meta->ver;
@@ -1104,10 +1186,10 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
seg_hdr->seg_id);
} else {
ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
- return ICE_ERR_CFG;
+ return ICE_DDP_PKG_INVALID_FILE;
}
- return 0;
+ return ICE_DDP_PKG_SUCCESS;
}
/**
@@ -1116,21 +1198,22 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
*
* Store details of the package currently loaded in HW into the HW structure.
*/
-static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
+static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
{
+ enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
struct ice_aqc_get_pkg_info_resp *pkg_info;
- enum ice_status status;
u16 size;
u32 i;
size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
pkg_info = kzalloc(size, GFP_KERNEL);
if (!pkg_info)
- return ICE_ERR_NO_MEMORY;
+ return ICE_DDP_PKG_ERR;
- status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
- if (status)
+ if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) {
+ state = ICE_DDP_PKG_ERR;
goto init_pkg_free_alloc;
+ }
for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
#define ICE_PKG_FLAG_COUNT 4
@@ -1165,7 +1248,7 @@ static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
init_pkg_free_alloc:
kfree(pkg_info);
- return status;
+ return state;
}
/**
@@ -1176,28 +1259,28 @@ init_pkg_free_alloc:
* Verifies various attributes of the package file, including length, format
* version, and the requirement of at least one segment.
*/
-static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
+static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
{
u32 seg_count;
u32 i;
if (len < struct_size(pkg, seg_offset, 1))
- return ICE_ERR_BUF_TOO_SHORT;
+ return ICE_DDP_PKG_INVALID_FILE;
if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
- return ICE_ERR_CFG;
+ return ICE_DDP_PKG_INVALID_FILE;
/* pkg must have at least one segment */
seg_count = le32_to_cpu(pkg->seg_count);
if (seg_count < 1)
- return ICE_ERR_CFG;
+ return ICE_DDP_PKG_INVALID_FILE;
/* make sure segment array fits in package length */
if (len < struct_size(pkg, seg_offset, seg_count))
- return ICE_ERR_BUF_TOO_SHORT;
+ return ICE_DDP_PKG_INVALID_FILE;
/* all segments must fit within length */
for (i = 0; i < seg_count; i++) {
@@ -1206,16 +1289,16 @@ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
/* segment header must fit */
if (len < off + sizeof(*seg))
- return ICE_ERR_BUF_TOO_SHORT;
+ return ICE_DDP_PKG_INVALID_FILE;
seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
/* segment body must fit */
if (len < off + le32_to_cpu(seg->seg_size))
- return ICE_ERR_BUF_TOO_SHORT;
+ return ICE_DDP_PKG_INVALID_FILE;
}
- return 0;
+ return ICE_DDP_PKG_SUCCESS;
}
/**
@@ -1259,13 +1342,18 @@ static void ice_init_pkg_regs(struct ice_hw *hw)
* version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
* definitions.
*/
-static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
+static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
{
- if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
- pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
- return ICE_ERR_NOT_SUPPORTED;
+ if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ ||
+ (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
+ pkg_ver->minor > ICE_PKG_SUPP_VER_MNR))
+ return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH;
+ else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ ||
+ (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
+ pkg_ver->minor < ICE_PKG_SUPP_VER_MNR))
+ return ICE_DDP_PKG_FILE_VERSION_TOO_LOW;
- return 0;
+ return ICE_DDP_PKG_SUCCESS;
}
/**
@@ -1276,20 +1364,20 @@ static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
*
* This function checks the package version compatibility with driver and NVM
*/
-static enum ice_status
+static enum ice_ddp_state
ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
struct ice_seg **seg)
{
struct ice_aqc_get_pkg_info_resp *pkg;
- enum ice_status status;
+ enum ice_ddp_state state;
u16 size;
u32 i;
/* Check package version compatibility */
- status = ice_chk_pkg_version(&hw->pkg_ver);
- if (status) {
+ state = ice_chk_pkg_version(&hw->pkg_ver);
+ if (state) {
ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
- return status;
+ return state;
}
/* find ICE segment in given package */
@@ -1297,18 +1385,19 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
ospkg);
if (!*seg) {
ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
- return ICE_ERR_CFG;
+ return ICE_DDP_PKG_INVALID_FILE;
}
/* Check if FW is compatible with the OS package */
size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
pkg = kzalloc(size, GFP_KERNEL);
if (!pkg)
- return ICE_ERR_NO_MEMORY;
+ return ICE_DDP_PKG_ERR;
- status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
- if (status)
+ if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) {
+ state = ICE_DDP_PKG_LOAD_ERROR;
goto fw_ddp_compat_free_alloc;
+ }
for (i = 0; i < le32_to_cpu(pkg->count); i++) {
/* loop till we find the NVM package */
@@ -1318,7 +1407,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
pkg->pkg_info[i].ver.major ||
(*seg)->hdr.seg_format_ver.minor >
pkg->pkg_info[i].ver.minor) {
- status = ICE_ERR_FW_DDP_MISMATCH;
+ state = ICE_DDP_PKG_FW_MISMATCH;
ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
}
/* done processing NVM package so break */
@@ -1326,7 +1415,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
}
fw_ddp_compat_free_alloc:
kfree(pkg);
- return status;
+ return state;
}
/**
@@ -1367,7 +1456,7 @@ ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
* and store the index number in struct ice_switch_info *switch_info
* in HW for following use.
*/
-static enum ice_status ice_get_prof_index_max(struct ice_hw *hw)
+static int ice_get_prof_index_max(struct ice_hw *hw)
{
u16 prof_index = 0, j, max_prof_index = 0;
struct ice_pkg_enum state;
@@ -1379,7 +1468,7 @@ static enum ice_status ice_get_prof_index_max(struct ice_hw *hw)
memset(&state, 0, sizeof(state));
if (!hw->seg)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_seg = hw->seg;
@@ -1410,6 +1499,34 @@ static enum ice_status ice_get_prof_index_max(struct ice_hw *hw)
}
/**
+ * ice_get_ddp_pkg_state - get DDP pkg state after download
+ * @hw: pointer to the HW struct
+ * @already_loaded: indicates if pkg was already loaded onto the device
+ */
+static enum ice_ddp_state
+ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded)
+{
+ if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
+ hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
+ hw->pkg_ver.update == hw->active_pkg_ver.update &&
+ hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
+ !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) {
+ if (already_loaded)
+ return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED;
+ else
+ return ICE_DDP_PKG_SUCCESS;
+ } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
+ hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
+ return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED;
+ } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
+ hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
+ return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED;
+ } else {
+ return ICE_DDP_PKG_ERR;
+ }
+}
+
+/**
* ice_init_pkg - initialize/download package
* @hw: pointer to the hardware structure
* @buf: pointer to the package buffer
@@ -1434,53 +1551,54 @@ static enum ice_status ice_get_prof_index_max(struct ice_hw *hw)
* ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
* case.
*/
-enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
+enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
{
+ bool already_loaded = false;
+ enum ice_ddp_state state;
struct ice_pkg_hdr *pkg;
- enum ice_status status;
struct ice_seg *seg;
if (!buf || !len)
- return ICE_ERR_PARAM;
+ return ICE_DDP_PKG_ERR;
pkg = (struct ice_pkg_hdr *)buf;
- status = ice_verify_pkg(pkg, len);
- if (status) {
+ state = ice_verify_pkg(pkg, len);
+ if (state) {
ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
- status);
- return status;
+ state);
+ return state;
}
/* initialize package info */
- status = ice_init_pkg_info(hw, pkg);
- if (status)
- return status;
+ state = ice_init_pkg_info(hw, pkg);
+ if (state)
+ return state;
/* before downloading the package, check package version for
* compatibility with driver
*/
- status = ice_chk_pkg_compat(hw, pkg, &seg);
- if (status)
- return status;
+ state = ice_chk_pkg_compat(hw, pkg, &seg);
+ if (state)
+ return state;
/* initialize package hints and then download package */
ice_init_pkg_hints(hw, seg);
- status = ice_download_pkg(hw, seg);
- if (status == ICE_ERR_AQ_NO_WORK) {
+ state = ice_download_pkg(hw, seg);
+ if (state == ICE_DDP_PKG_ALREADY_LOADED) {
ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
- status = 0;
+ already_loaded = true;
}
/* Get information on the package currently loaded in HW, then make sure
* the driver is compatible with this version.
*/
- if (!status) {
- status = ice_get_pkg_info(hw);
- if (!status)
- status = ice_chk_pkg_version(&hw->active_pkg_ver);
+ if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) {
+ state = ice_get_pkg_info(hw);
+ if (!state)
+ state = ice_get_ddp_pkg_state(hw, already_loaded);
}
- if (!status) {
+ if (ice_is_init_pkg_successful(state)) {
hw->seg = seg;
/* on successful package download update other required
* registers to support the package and fill HW tables
@@ -1488,13 +1606,14 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
*/
ice_init_pkg_regs(hw);
ice_fill_blk_tbls(hw);
+ ice_fill_hw_ptype(hw);
ice_get_prof_index_max(hw);
} else {
ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
- status);
+ state);
}
- return status;
+ return state;
}
/**
@@ -1520,18 +1639,19 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
* package buffer, as the new copy will be managed by this function and
* related routines.
*/
-enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
+enum ice_ddp_state
+ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
{
- enum ice_status status;
+ enum ice_ddp_state state;
u8 *buf_copy;
if (!buf || !len)
- return ICE_ERR_PARAM;
+ return ICE_DDP_PKG_ERR;
buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
- status = ice_init_pkg(hw, buf_copy, len);
- if (status) {
+ state = ice_init_pkg(hw, buf_copy, len);
+ if (!ice_is_init_pkg_successful(state)) {
/* Free the copy, since we failed to initialize the package */
devm_kfree(ice_hw_to_dev(hw), buf_copy);
} else {
@@ -1540,7 +1660,23 @@ enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
hw->pkg_size = len;
}
- return status;
+ return state;
+}
+
+/**
+ * ice_is_init_pkg_successful - check if DDP init was successful
+ * @state: state of the DDP pkg after download
+ */
+bool ice_is_init_pkg_successful(enum ice_ddp_state state)
+{
+ switch (state) {
+ case ICE_DDP_PKG_SUCCESS:
+ case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
+ case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
+ return true;
+ default:
+ return false;
+ }
}
/**
@@ -1644,7 +1780,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
* NOTE: The caller of the function is responsible for freeing the memory
* allocated for every list entry.
*/
-enum ice_status
+int
ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
unsigned long *bm, struct list_head *fv_list)
{
@@ -1658,7 +1794,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
memset(&state, 0, sizeof(state));
if (!ids_cnt || !hw->seg)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_seg = hw->seg;
do {
@@ -1702,7 +1838,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
}
} while (fv);
if (list_empty(fv_list))
- return ICE_ERR_CFG;
+ return -EIO;
return 0;
err:
@@ -1711,7 +1847,7 @@ err:
devm_kfree(ice_hw_to_dev(hw), fvl);
}
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
/**
@@ -1779,7 +1915,7 @@ static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
* result in some wasted space in the buffer.
* Note: all package contents must be in Little Endian form.
*/
-static enum ice_status
+static int
ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
{
struct ice_buf_hdr *buf;
@@ -1787,17 +1923,17 @@ ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
u16 data_end;
if (!bld)
- return ICE_ERR_PARAM;
+ return -EINVAL;
buf = (struct ice_buf_hdr *)&bld->buf;
/* already an active section, can't increase table size */
section_count = le16_to_cpu(buf->section_count);
if (section_count > 0)
- return ICE_ERR_CFG;
+ return -EIO;
if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
- return ICE_ERR_CFG;
+ return -EIO;
bld->reserved_section_table_entries += count;
data_end = le16_to_cpu(buf->data_end) +
@@ -1959,19 +2095,19 @@ static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
* creating a package buffer with the tunnel info and issuing an update package
* command.
*/
-static enum ice_status
+static int
ice_create_tunnel(struct ice_hw *hw, u16 index,
enum ice_tunnel_type type, u16 port)
{
struct ice_boost_tcam_section *sect_rx, *sect_tx;
- enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
+ int status = -ENOSPC;
mutex_lock(&hw->tnl_lock);
bld = ice_pkg_buf_alloc(hw);
if (!bld) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto ice_create_tunnel_end;
}
@@ -2030,26 +2166,26 @@ ice_create_tunnel_end:
* targeting the specific updates requested and then performing an update
* package.
*/
-static enum ice_status
+static int
ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type,
u16 port)
{
struct ice_boost_tcam_section *sect_rx, *sect_tx;
- enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
+ int status = -ENOSPC;
mutex_lock(&hw->tnl_lock);
if (WARN_ON(!hw->tnl.tbl[index].valid ||
hw->tnl.tbl[index].type != type ||
hw->tnl.tbl[index].port != port)) {
- status = ICE_ERR_OUT_OF_RANGE;
+ status = -EIO;
goto ice_destroy_tunnel_end;
}
bld = ice_pkg_buf_alloc(hw);
if (!bld) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto ice_destroy_tunnel_end;
}
@@ -2097,7 +2233,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
enum ice_tunnel_type tnl_type;
- enum ice_status status;
+ int status;
u16 index;
tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
@@ -2105,8 +2241,8 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
if (status) {
- netdev_err(netdev, "Error adding UDP tunnel - %s\n",
- ice_stat_str(status));
+ netdev_err(netdev, "Error adding UDP tunnel - %d\n",
+ status);
return -EIO;
}
@@ -2121,15 +2257,15 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
enum ice_tunnel_type tnl_type;
- enum ice_status status;
+ int status;
tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
status = ice_destroy_tunnel(&pf->hw, ti->hw_priv, tnl_type,
ntohs(ti->port));
if (status) {
- netdev_err(netdev, "Error removing UDP tunnel - %s\n",
- ice_stat_str(status));
+ netdev_err(netdev, "Error removing UDP tunnel - %d\n",
+ status);
return -EIO;
}
@@ -2145,17 +2281,17 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
* @prot: variable to receive the protocol ID
* @off: variable to receive the protocol offset
*/
-enum ice_status
+int
ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
u8 *prot, u16 *off)
{
struct ice_fv_word *fv_ext;
if (prof >= hw->blk[blk].es.count)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (fv_idx >= hw->blk[blk].es.fvw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
@@ -2178,11 +2314,11 @@ ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
* PTG ID that contains it through the PTG parameter, with the value of
* ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
*/
-static enum ice_status
+static int
ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
{
if (ptype >= ICE_XLT1_CNT || !ptg)
- return ICE_ERR_PARAM;
+ return -EINVAL;
*ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
return 0;
@@ -2212,21 +2348,21 @@ static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
* This function will remove the ptype from the specific PTG, and move it to
* the default PTG (ICE_DEFAULT_PTG).
*/
-static enum ice_status
+static int
ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
{
struct ice_ptg_ptype **ch;
struct ice_ptg_ptype *p;
if (ptype > ICE_XLT1_CNT - 1)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
/* Should not happen if .in_use is set, bad config */
if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
- return ICE_ERR_CFG;
+ return -EIO;
/* find the ptype within this PTG, and bypass the link over it */
p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
@@ -2259,17 +2395,17 @@ ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
* a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
* default PTG.
*/
-static enum ice_status
+static int
ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
{
- enum ice_status status;
u8 original_ptg;
+ int status;
if (ptype > ICE_XLT1_CNT - 1)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
if (status)
@@ -2404,11 +2540,11 @@ ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
* This function will lookup the VSI entry in the XLT2 list and return
* the VSI group its associated with.
*/
-static enum ice_status
+static int
ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
{
if (!vsig || vsi >= ICE_MAX_VSI)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* As long as there's a default or valid VSIG associated with the input
* VSI, the functions returns a success. Any handling of VSIG will be
@@ -2473,7 +2609,7 @@ static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
* for, the list must match exactly, including the order in which the
* characteristics are listed.
*/
-static enum ice_status
+static int
ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
struct list_head *chs, u16 *vsig)
{
@@ -2487,7 +2623,7 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
return 0;
}
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
/**
@@ -2499,8 +2635,7 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
* The function will remove all VSIs associated with the input VSIG and move
* them to the DEFAULT_VSIG and mark the VSIG available.
*/
-static enum ice_status
-ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+static int ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
{
struct ice_vsig_prof *dtmp, *del;
struct ice_vsig_vsi *vsi_cur;
@@ -2508,10 +2643,10 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
idx = vsig & ICE_VSIG_IDX_M;
if (idx >= ICE_MAX_VSIGS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
@@ -2560,7 +2695,7 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
* The function will remove the input VSI from its VSI group and move it
* to the DEFAULT_VSIG.
*/
-static enum ice_status
+static int
ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
{
struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
@@ -2569,10 +2704,10 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
idx = vsig & ICE_VSIG_IDX_M;
if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
/* entry already in default VSIG, don't have to remove */
if (idx == ICE_DEFAULT_VSIG)
@@ -2580,7 +2715,7 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
if (!(*vsi_head))
- return ICE_ERR_CFG;
+ return -EIO;
vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
vsi_cur = (*vsi_head);
@@ -2597,7 +2732,7 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
/* verify if VSI was removed from group list */
if (!vsi_cur)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
vsi_cur->vsig = ICE_DEFAULT_VSIG;
vsi_cur->changed = 1;
@@ -2618,24 +2753,24 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
* move the entry to the DEFAULT_VSIG, update the original VSIG and
* then move entry to the new VSIG.
*/
-static enum ice_status
+static int
ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
{
struct ice_vsig_vsi *tmp;
- enum ice_status status;
u16 orig_vsig, idx;
+ int status;
idx = vsig & ICE_VSIG_IDX_M;
if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* if VSIG not in use and VSIG is not default type this VSIG
* doesn't exist.
*/
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
vsig != ICE_DEFAULT_VSIG)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
if (status)
@@ -2741,7 +2876,7 @@ ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
* @masks: masks for FV
* @prof_id: receives the profile ID
*/
-static enum ice_status
+static int
ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
{
@@ -2752,7 +2887,7 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
* field vector and mask. This will cause rule interference.
*/
if (blk == ICE_BLK_FD)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
for (i = 0; i < (u8)es->count; i++) {
u16 off = i * es->fvw;
@@ -2768,7 +2903,7 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
return 0;
}
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
/**
@@ -2821,14 +2956,14 @@ static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
* This function allocates a new entry in a Profile ID TCAM for a specific
* block.
*/
-static enum ice_status
+static int
ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
u16 *tcam_idx)
{
u16 res_type;
if (!ice_tcam_ent_rsrc_type(blk, &res_type))
- return ICE_ERR_PARAM;
+ return -EINVAL;
return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
}
@@ -2841,13 +2976,13 @@ ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
*
* This function frees an entry in a Profile ID TCAM for a specific block.
*/
-static enum ice_status
+static int
ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
{
u16 res_type;
if (!ice_tcam_ent_rsrc_type(blk, &res_type))
- return ICE_ERR_PARAM;
+ return -EINVAL;
return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
}
@@ -2861,15 +2996,14 @@ ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
* This function allocates a new profile ID, which also corresponds to a Field
* Vector (Extraction Sequence) entry.
*/
-static enum ice_status
-ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
+static int ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
{
- enum ice_status status;
u16 res_type;
u16 get_prof;
+ int status;
if (!ice_prof_id_rsrc_type(blk, &res_type))
- return ICE_ERR_PARAM;
+ return -EINVAL;
status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
if (!status)
@@ -2886,14 +3020,13 @@ ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
*
* This function frees a profile ID, which also corresponds to a Field Vector.
*/
-static enum ice_status
-ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+static int ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
{
u16 tmp_prof_id = (u16)prof_id;
u16 res_type;
if (!ice_prof_id_rsrc_type(blk, &res_type))
- return ICE_ERR_PARAM;
+ return -EINVAL;
return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
}
@@ -2904,11 +3037,10 @@ ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
* @blk: the block from which to free the profile ID
* @prof_id: the profile ID for which to increment the reference count
*/
-static enum ice_status
-ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+static int ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
{
if (prof_id > hw->blk[blk].es.count)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw->blk[blk].es.ref_count[prof_id]++;
@@ -3025,17 +3157,17 @@ static void ice_init_all_prof_masks(struct ice_hw *hw)
* @mask: the 16-bit mask
* @mask_idx: variable to receive the mask index
*/
-static enum ice_status
+static int
ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
u16 *mask_idx)
{
bool found_unused = false, found_copy = false;
- enum ice_status status = ICE_ERR_MAX_LIMIT;
u16 unused_idx = 0, copy_idx = 0;
+ int status = -ENOSPC;
u16 i;
if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&hw->blk[blk].masks.lock);
@@ -3093,15 +3225,15 @@ err_ice_alloc_prof_mask:
* @blk: hardware block
* @mask_idx: index of mask
*/
-static enum ice_status
+static int
ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
{
if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!(mask_idx >= hw->blk[blk].masks.first &&
mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
mutex_lock(&hw->blk[blk].masks.lock);
@@ -3135,14 +3267,14 @@ exit_ice_free_prof_mask:
* @blk: hardware block
* @prof_id: profile ID
*/
-static enum ice_status
+static int
ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
{
u32 mask_bm;
u16 i;
if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
- return ICE_ERR_PARAM;
+ return -EINVAL;
mask_bm = hw->blk[blk].es.mask_ena[prof_id];
for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
@@ -3197,7 +3329,7 @@ static void ice_shutdown_all_prof_masks(struct ice_hw *hw)
* @prof_id: profile ID
* @masks: masks
*/
-static enum ice_status
+static int
ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
u16 *masks)
{
@@ -3227,7 +3359,7 @@ ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
if (ena_mask & BIT(i))
ice_free_prof_mask(hw, blk, i);
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
}
/* enable the masks for this profile */
@@ -3269,11 +3401,11 @@ ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
* @blk: the block from which to free the profile ID
* @prof_id: the profile ID for which to decrement the reference count
*/
-static enum ice_status
+static int
ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
{
if (prof_id > hw->blk[blk].es.count)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (hw->blk[blk].es.ref_count[prof_id] > 0) {
if (!--hw->blk[blk].es.ref_count[prof_id]) {
@@ -3711,7 +3843,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
* ice_init_hw_tbls - init hardware table memory
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
+int ice_init_hw_tbls(struct ice_hw *hw)
{
u8 i;
@@ -3830,7 +3962,7 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
err:
ice_free_hw_tbls(hw);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
/**
@@ -3846,7 +3978,7 @@ err:
* @nm_msk: never match mask
* @key: output of profile ID key
*/
-static enum ice_status
+static int
ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
@@ -3902,7 +4034,7 @@ ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
* @dc_msk: don't care mask
* @nm_msk: never match mask
*/
-static enum ice_status
+static int
ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
@@ -3910,7 +4042,7 @@ ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
{
struct ice_prof_tcam_entry;
- enum ice_status status;
+ int status;
status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
@@ -3929,7 +4061,7 @@ ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
* @vsig: VSIG to query
* @refs: pointer to variable to receive the reference count
*/
-static enum ice_status
+static int
ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
{
u16 idx = vsig & ICE_VSIG_IDX_M;
@@ -3938,7 +4070,7 @@ ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
*refs = 0;
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
while (ptr) {
@@ -3979,7 +4111,7 @@ ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
struct ice_buf_build *bld, struct list_head *chgs)
{
@@ -3999,7 +4131,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
sizeof(p->es[0]));
if (!p)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
p->count = cpu_to_le16(1);
p->offset = cpu_to_le16(tmp->prof_id);
@@ -4017,7 +4149,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
struct ice_buf_build *bld, struct list_head *chgs)
{
@@ -4033,7 +4165,7 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
struct_size(p, entry, 1));
if (!p)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
p->count = cpu_to_le16(1);
p->entry[0].addr = cpu_to_le16(tmp->tcam_idx);
@@ -4053,7 +4185,7 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
struct list_head *chgs)
{
@@ -4069,7 +4201,7 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
struct_size(p, value, 1));
if (!p)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
p->count = cpu_to_le16(1);
p->offset = cpu_to_le16(tmp->ptype);
@@ -4085,7 +4217,7 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
struct list_head *chgs)
{
@@ -4104,7 +4236,7 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
struct_size(p, value, 1));
if (!p)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
p->count = cpu_to_le16(1);
p->offset = cpu_to_le16(tmp->vsi);
@@ -4124,18 +4256,18 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
* @blk: hardware block
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
struct list_head *chgs)
{
struct ice_buf_build *b;
struct ice_chs_chg *tmp;
- enum ice_status status;
u16 pkg_sects;
u16 xlt1 = 0;
u16 xlt2 = 0;
u16 tcam = 0;
u16 es = 0;
+ int status;
u16 sects;
/* count number of sections we need */
@@ -4167,7 +4299,7 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
/* Build update package buffer */
b = ice_pkg_buf_alloc(hw);
if (!b)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_pkg_buf_reserve_section(b, sects);
if (status)
@@ -4204,13 +4336,13 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
*/
pkg_sects = ice_pkg_buf_get_active_sections(b);
if (!pkg_sects || pkg_sects != sects) {
- status = ICE_ERR_INVAL_SIZE;
+ status = -EINVAL;
goto error_tmp;
}
/* update package */
status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
- if (status == ICE_ERR_AQ_ERROR)
+ if (status == -EIO)
ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
error_tmp:
@@ -4276,7 +4408,7 @@ static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
* @prof_id: profile ID
* @es: extraction sequence (length of array is determined by the block)
*/
-static enum ice_status
+static int
ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
{
DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
@@ -4308,7 +4440,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
es[i].off == ice_fd_pairs[j].off) {
- set_bit(j, pair_list);
+ __set_bit(j, pair_list);
pair_start[j] = i;
}
}
@@ -4331,7 +4463,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
/* check for room */
if (first_free + 1 < (s8)ice_fd_pairs[index].count)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
/* place in extraction sequence */
for (k = 0; k < ice_fd_pairs[index].count; k++) {
@@ -4341,7 +4473,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
ice_fd_pairs[index].off + (k * 2);
if (k > first_free)
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
/* keep track of non-relevant fields */
mask_sel |= BIT(first_free - k);
@@ -4452,7 +4584,7 @@ ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
* @attr: array of attributes that will be considered
* @attr_cnt: number of elements in the attribute array
*/
-static enum ice_status
+static int
ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
const struct ice_ptype_attributes *attr, u16 attr_cnt)
{
@@ -4468,11 +4600,11 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
&prof->attr[prof->ptg_cnt]);
if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
}
if (!found)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
return 0;
}
@@ -4493,7 +4625,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
* it will not be written until the first call to ice_add_flow that specifies
* the ID value used here.
*/
-enum ice_status
+int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
struct ice_fv_word *es, u16 *masks)
@@ -4501,9 +4633,9 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
struct ice_prof_map *prof;
- enum ice_status status;
u8 byte = 0;
u8 prof_id;
+ int status;
bitmap_zero(ptgs_used, ICE_XLT1_CNT);
@@ -4541,7 +4673,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
/* add profile info */
prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
if (!prof) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_add_prof;
}
@@ -4578,13 +4710,13 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
if (test_bit(ptg, ptgs_used))
continue;
- set_bit(ptg, ptgs_used);
+ __set_bit(ptg, ptgs_used);
/* Check to see there are any attributes for
* this PTYPE, and add them if found.
*/
status = ice_add_prof_attrib(prof, ptg, ptype,
attr, attr_cnt);
- if (status == ICE_ERR_MAX_LIMIT)
+ if (status == -ENOSPC)
break;
if (status) {
/* This is simple a PTYPE/PTG with no
@@ -4661,14 +4793,13 @@ ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
* @blk: hardware block
* @idx: the index to release
*/
-static enum ice_status
-ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
+static int ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
{
/* Masks to invoke a never match entry */
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
- enum ice_status status;
+ int status;
/* write the TCAM entry */
status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
@@ -4688,11 +4819,11 @@ ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
* @blk: hardware block
* @prof: pointer to profile structure to remove
*/
-static enum ice_status
+static int
ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
struct ice_vsig_prof *prof)
{
- enum ice_status status;
+ int status;
u16 i;
for (i = 0; i < prof->tcam_count; i++)
@@ -4701,7 +4832,7 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
status = ice_rel_tcam_idx(hw, blk,
prof->tcam[i].tcam_idx);
if (status)
- return ICE_ERR_HW_TABLE;
+ return -EIO;
}
return 0;
@@ -4714,14 +4845,14 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
* @vsig: the VSIG to remove
* @chg: the change list
*/
-static enum ice_status
+static int
ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
struct list_head *chg)
{
u16 idx = vsig & ICE_VSIG_IDX_M;
struct ice_vsig_vsi *vsi_cur;
struct ice_vsig_prof *d, *t;
- enum ice_status status;
+ int status;
/* remove TCAM entries */
list_for_each_entry_safe(d, t,
@@ -4748,7 +4879,7 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
GFP_KERNEL);
if (!p)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
p->type = ICE_VSIG_REM;
p->orig_vsig = vsig;
@@ -4771,13 +4902,13 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
* @hdl: profile handle indicating which profile to remove
* @chg: list to receive a record of changes
*/
-static enum ice_status
+static int
ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
struct list_head *chg)
{
u16 idx = vsig & ICE_VSIG_IDX_M;
struct ice_vsig_prof *p, *t;
- enum ice_status status;
+ int status;
list_for_each_entry_safe(p, t,
&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
@@ -4795,7 +4926,7 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
return status;
}
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
/**
@@ -4804,12 +4935,11 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
* @blk: hardware block
* @id: profile tracking ID
*/
-static enum ice_status
-ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
+static int ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_chs_chg *del, *tmp;
- enum ice_status status;
struct list_head chg;
+ int status;
u16 i;
INIT_LIST_HEAD(&chg);
@@ -4845,16 +4975,16 @@ err_ice_rem_flow_all:
* previously created through ice_add_prof. If any existing entries
* are associated with this profile, they will be removed as well.
*/
-enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
+int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_prof_map *pmap;
- enum ice_status status;
+ int status;
mutex_lock(&hw->blk[blk].es.prof_map_lock);
pmap = ice_search_prof_id(hw, blk, id);
if (!pmap) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto err_ice_rem_prof;
}
@@ -4881,20 +5011,20 @@ err_ice_rem_prof:
* @hdl: profile handle
* @chg: change list
*/
-static enum ice_status
+static int
ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
struct list_head *chg)
{
- enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_chs_chg *p;
+ int status = 0;
u16 i;
mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl);
if (!map) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto err_ice_get_prof;
}
@@ -4904,7 +5034,7 @@ ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
GFP_KERNEL);
if (!p) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_get_prof;
}
@@ -4936,7 +5066,7 @@ err_ice_get_prof:
*
* This routine makes a copy of the list of profiles in the specified VSIG.
*/
-static enum ice_status
+static int
ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
struct list_head *lst)
{
@@ -4964,7 +5094,7 @@ err_ice_get_profs_vsig:
devm_kfree(ice_hw_to_dev(hw), ent1);
}
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
/**
@@ -4974,25 +5104,25 @@ err_ice_get_profs_vsig:
* @lst: the list to be added to
* @hdl: profile handle of entry to add
*/
-static enum ice_status
+static int
ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
struct list_head *lst, u64 hdl)
{
- enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_vsig_prof *p;
+ int status = 0;
u16 i;
mutex_lock(&hw->blk[blk].es.prof_map_lock);
map = ice_search_prof_id(hw, blk, hdl);
if (!map) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto err_ice_add_prof_to_lst;
}
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_add_prof_to_lst;
}
@@ -5021,17 +5151,17 @@ err_ice_add_prof_to_lst:
* @vsig: the VSIG to move the VSI to
* @chg: the change list
*/
-static enum ice_status
+static int
ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
struct list_head *chg)
{
- enum ice_status status;
struct ice_chs_chg *p;
u16 orig_vsig;
+ int status;
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
if (!status)
@@ -5081,13 +5211,13 @@ ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
*
* This function appends an enable or disable TCAM entry in the change log
*/
-static enum ice_status
+static int
ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
u16 vsig, struct ice_tcam_inf *tcam,
struct list_head *chg)
{
- enum ice_status status;
struct ice_chs_chg *p;
+ int status;
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
@@ -5120,7 +5250,7 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
/* add TCAM to change list */
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
tcam->ptg, vsig, 0, tcam->attr.flags,
@@ -5154,13 +5284,13 @@ err_ice_prof_tcam_ena_dis:
* @vsig: the VSIG for which to adjust profile priorities
* @chg: the change list
*/
-static enum ice_status
+static int
ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
struct list_head *chg)
{
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
struct ice_vsig_prof *t;
- enum ice_status status;
+ int status;
u16 idx;
bitmap_zero(ptgs_used, ICE_XLT1_CNT);
@@ -5209,7 +5339,7 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
}
/* keep track of used ptgs */
- set_bit(t->tcam[i].ptg, ptgs_used);
+ __set_bit(t->tcam[i].ptg, ptgs_used);
}
}
@@ -5225,7 +5355,7 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
* @rev: true to add entries to the end of the list
* @chg: the change list
*/
-static enum ice_status
+static int
ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
bool rev, struct list_head *chg)
{
@@ -5233,26 +5363,26 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
- enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_vsig_prof *t;
struct ice_chs_chg *p;
u16 vsig_idx, i;
+ int status = 0;
/* Error, if this VSIG already has this profile */
if (ice_has_prof_vsig(hw, blk, vsig, hdl))
- return ICE_ERR_ALREADY_EXISTS;
+ return -EEXIST;
/* new VSIG profile structure */
t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL);
if (!t)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl);
if (!map) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto err_ice_add_prof_id_vsig;
}
@@ -5267,7 +5397,7 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
/* add TCAM to change list */
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_add_prof_id_vsig;
}
@@ -5337,21 +5467,21 @@ err_ice_add_prof_id_vsig:
* @hdl: the profile handle of the profile that will be added to the VSIG
* @chg: the change list
*/
-static enum ice_status
+static int
ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
struct list_head *chg)
{
- enum ice_status status;
struct ice_chs_chg *p;
u16 new_vsig;
+ int status;
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
new_vsig = ice_vsig_alloc(hw, blk);
if (!new_vsig) {
- status = ICE_ERR_HW_TABLE;
+ status = -EIO;
goto err_ice_create_prof_id_vsig;
}
@@ -5387,18 +5517,18 @@ err_ice_create_prof_id_vsig:
* @new_vsig: return of new VSIG
* @chg: the change list
*/
-static enum ice_status
+static int
ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
struct list_head *lst, u16 *new_vsig,
struct list_head *chg)
{
struct ice_vsig_prof *t;
- enum ice_status status;
+ int status;
u16 vsig;
vsig = ice_vsig_alloc(hw, blk);
if (!vsig)
- return ICE_ERR_HW_TABLE;
+ return -EIO;
status = ice_move_vsi(hw, blk, vsi, vsig, chg);
if (status)
@@ -5428,8 +5558,8 @@ static bool
ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
{
struct ice_vsig_prof *t;
- enum ice_status status;
struct list_head lst;
+ int status;
INIT_LIST_HEAD(&lst);
@@ -5459,14 +5589,14 @@ ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
* profile indicated by the ID parameter for the VSIs specified in the VSI
* array. Once successfully called, the flow will be enabled.
*/
-enum ice_status
+int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
{
struct ice_vsig_prof *tmp1, *del1;
struct ice_chs_chg *tmp, *del;
struct list_head union_lst;
- enum ice_status status;
struct list_head chg;
+ int status;
u16 vsig;
INIT_LIST_HEAD(&union_lst);
@@ -5492,7 +5622,7 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
* scenario
*/
if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
- status = ICE_ERR_ALREADY_EXISTS;
+ status = -EEXIST;
goto err_ice_add_prof_id_flow;
}
@@ -5600,7 +5730,7 @@ err_ice_add_prof_id_flow:
* @lst: list to remove the profile from
* @hdl: the profile handle indicating the profile to remove
*/
-static enum ice_status
+static int
ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
{
struct ice_vsig_prof *ent, *tmp;
@@ -5612,7 +5742,7 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
return 0;
}
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
/**
@@ -5626,13 +5756,13 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
* profile indicated by the ID parameter for the VSIs specified in the VSI
* array. Once successfully called, the flow will be disabled.
*/
-enum ice_status
+int
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
{
struct ice_vsig_prof *tmp1, *del1;
struct ice_chs_chg *tmp, *del;
struct list_head chg, copy;
- enum ice_status status;
+ int status;
u16 vsig;
INIT_LIST_HEAD(&copy);
@@ -5727,7 +5857,7 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
}
}
} else {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
}
/* update hardware tables */
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index a2863f38fd1f..6cbc29bcb02f 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -18,10 +18,67 @@
#define ICE_PKG_CNT 4
-enum ice_status
+enum ice_ddp_state {
+ /* Indicates that this call to ice_init_pkg
+ * successfully loaded the requested DDP package
+ */
+ ICE_DDP_PKG_SUCCESS = 0,
+
+ /* Generic error for already loaded errors, it is mapped later to
+ * the more specific one (one of the next 3)
+ */
+ ICE_DDP_PKG_ALREADY_LOADED = -1,
+
+ /* Indicates that a DDP package of the same version has already been
+ * loaded onto the device by a previous call or by another PF
+ */
+ ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2,
+
+ /* The device has a DDP package that is not supported by the driver */
+ ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3,
+
+ /* The device has a compatible package
+ * (but different from the request) already loaded
+ */
+ ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4,
+
+ /* The firmware loaded on the device is not compatible with
+ * the DDP package loaded
+ */
+ ICE_DDP_PKG_FW_MISMATCH = -5,
+
+ /* The DDP package file is invalid */
+ ICE_DDP_PKG_INVALID_FILE = -6,
+
+ /* The version of the DDP package provided is higher than
+ * the driver supports
+ */
+ ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7,
+
+ /* The version of the DDP package provided is lower than the
+ * driver supports
+ */
+ ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8,
+
+ /* The signature of the DDP package file provided is invalid */
+ ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -9,
+
+ /* The DDP package file security revision is too low and not
+ * supported by firmware
+ */
+ ICE_DDP_PKG_FILE_REVISION_TOO_LOW = -10,
+
+ /* An error occurred in firmware while loading the DDP package */
+ ICE_DDP_PKG_LOAD_ERROR = -11,
+
+ /* Other errors */
+ ICE_DDP_PKG_ERR = -12
+};
+
+int
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_change_lock(struct ice_hw *hw);
-enum ice_status
+int
ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
u8 *prot, u16 *off);
void
@@ -29,7 +86,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
unsigned long *bm);
void
ice_init_prof_result_bm(struct ice_hw *hw);
-enum ice_status
+int
ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
unsigned long *bm, struct list_head *fv_list);
bool
@@ -40,22 +97,26 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti);
-enum ice_status
+/* Rx parser PTYPE functions */
+bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
+
+/* XLT2/VSI group functions */
+int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
struct ice_fv_word *es, u16 *masks);
-enum ice_status
+int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
-enum ice_status
+int
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
-enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
-enum ice_status
+enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
+enum ice_ddp_state
ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
-enum ice_status ice_init_hw_tbls(struct ice_hw *hw);
+bool ice_is_init_pkg_successful(enum ice_ddp_state state);
+int ice_init_hw_tbls(struct ice_hw *hw);
void ice_free_seg(struct ice_hw *hw);
void ice_fill_blk_tbls(struct ice_hw *hw);
void ice_clear_hw_tbls(struct ice_hw *hw);
void ice_free_hw_tbls(struct ice_hw *hw);
-enum ice_status
-ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
+int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
#endif /* _ICE_FLEX_PIPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 0f572a36d021..fc087e0b5292 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -160,6 +160,7 @@ struct ice_meta_sect {
#define ICE_SID_CDID_KEY_BUILDER_RSS 47
#define ICE_SID_CDID_REDIR_RSS 48
+#define ICE_SID_RXPARSER_MARKER_PTYPE 55
#define ICE_SID_RXPARSER_BOOST_TCAM 56
#define ICE_SID_TXPARSER_BOOST_TCAM 66
@@ -201,6 +202,24 @@ enum ice_sect {
ICE_SECT_COUNT
};
+/* Packet Type (PTYPE) values */
+#define ICE_PTYPE_MAC_PAY 1
+#define ICE_PTYPE_IPV4_PAY 23
+#define ICE_PTYPE_IPV4_UDP_PAY 24
+#define ICE_PTYPE_IPV4_TCP_PAY 26
+#define ICE_PTYPE_IPV4_SCTP_PAY 27
+#define ICE_PTYPE_IPV6_PAY 89
+#define ICE_PTYPE_IPV6_UDP_PAY 90
+#define ICE_PTYPE_IPV6_TCP_PAY 92
+#define ICE_PTYPE_IPV6_SCTP_PAY 93
+#define ICE_MAC_IPV4_ESP 160
+#define ICE_MAC_IPV6_ESP 161
+#define ICE_MAC_IPV4_AH 162
+#define ICE_MAC_IPV6_AH 163
+#define ICE_MAC_IPV4_NAT_T_ESP 164
+#define ICE_MAC_IPV6_NAT_T_ESP 165
+#define ICE_MAC_IPV4_GTPU 329
+#define ICE_MAC_IPV6_GTPU 330
#define ICE_MAC_IPV4_GTPU_IPV4_FRAG 331
#define ICE_MAC_IPV4_GTPU_IPV4_PAY 332
#define ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY 333
@@ -221,6 +240,10 @@ enum ice_sect {
#define ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY 348
#define ICE_MAC_IPV6_GTPU_IPV6_TCP 349
#define ICE_MAC_IPV6_GTPU_IPV6_ICMPV6 350
+#define ICE_MAC_IPV4_PFCP_SESSION 352
+#define ICE_MAC_IPV6_PFCP_SESSION 354
+#define ICE_MAC_IPV4_L2TPV3 360
+#define ICE_MAC_IPV6_L2TPV3 361
/* Attributes that can modify PTYPE definitions.
*
@@ -329,6 +352,25 @@ struct ice_boost_tcam_section {
sizeof(struct ice_boost_tcam_entry), \
sizeof(struct ice_boost_tcam_entry))
+/* package Marker Ptype TCAM entry */
+struct ice_marker_ptype_tcam_entry {
+#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024
+ __le16 addr;
+ __le16 ptype;
+ u8 keys[20];
+};
+
+struct ice_marker_ptype_tcam_section {
+ __le16 count;
+ __le16 reserved;
+ struct ice_marker_ptype_tcam_entry tcam[];
+};
+
+#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF \
+ ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, 1) - \
+ sizeof(struct ice_marker_ptype_tcam_entry), \
+ sizeof(struct ice_marker_ptype_tcam_entry))
+
struct ice_xlt1_section {
__le16 count;
__le16 offset;
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index f160672448a0..beed4838dcbe 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -609,8 +609,6 @@ struct ice_flow_prof_params {
ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
ICE_FLOW_SEG_HDR_NAT_T_ESP)
-#define ICE_FLOW_SEG_HDRS_L2_MASK \
- (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
#define ICE_FLOW_SEG_HDRS_L3_MASK \
(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP)
#define ICE_FLOW_SEG_HDRS_L4_MASK \
@@ -625,8 +623,7 @@ struct ice_flow_prof_params {
* @segs: array of one or more packet segments that describe the flow
* @segs_cnt: number of packet segments provided
*/
-static enum ice_status
-ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
+static int ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
{
u8 i;
@@ -634,12 +631,12 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
/* Multiple L3 headers */
if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
!is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Multiple L4 headers */
if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
!is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
return 0;
@@ -700,8 +697,7 @@ static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
* This function identifies the packet types associated with the protocol
* headers being present in packet segments of the specified flow profile.
*/
-static enum ice_status
-ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
+static int ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
{
struct ice_flow_prof *prof;
u8 i;
@@ -898,7 +894,7 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
* field. It then allocates one or more extraction sequence entries for the
* given field, and fill the entries with protocol ID and offset information.
*/
-static enum ice_status
+static int
ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
u8 seg, enum ice_flow_field fld, u64 match)
{
@@ -1035,7 +1031,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
prot_id = ICE_PROT_GRE_OF;
break;
default:
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
}
/* Each extraction sequence entry is a word in size, and extracts a
@@ -1073,7 +1069,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
* does not exceed the block's capability
*/
if (params->es_cnt >= fv_words)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
/* some blocks require a reversed field vector layout */
if (hw->blk[params->blk].es.reverse)
@@ -1099,7 +1095,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
* @params: information about the flow to be processed
* @seg: index of packet segment whose raw fields are to be extracted
*/
-static enum ice_status
+static int
ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
u8 seg)
{
@@ -1112,12 +1108,12 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
if (params->prof->segs[seg].raws_cnt >
ARRAY_SIZE(params->prof->segs[seg].raws))
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
/* Offsets within the segment headers are not supported */
hdrs_sz = ice_flow_calc_seg_sz(params, seg);
if (!hdrs_sz)
- return ICE_ERR_PARAM;
+ return -EINVAL;
fv_words = hw->blk[params->blk].es.fvw;
@@ -1150,7 +1146,7 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
*/
if (params->es_cnt >= hw->blk[params->blk].es.count ||
params->es_cnt >= ICE_MAX_FV_WORDS)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
/* some blocks require a reversed field vector layout */
if (hw->blk[params->blk].es.reverse)
@@ -1176,12 +1172,12 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
* This function iterates through all matched fields in the given segments, and
* creates an extraction sequence for the fields.
*/
-static enum ice_status
+static int
ice_flow_create_xtrct_seq(struct ice_hw *hw,
struct ice_flow_prof_params *params)
{
struct ice_flow_prof *prof = params->prof;
- enum ice_status status = 0;
+ int status = 0;
u8 i;
for (i = 0; i < prof->segs_cnt; i++) {
@@ -1210,10 +1206,10 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw,
* @hw: pointer to the HW struct
* @params: information about the flow to be processed
*/
-static enum ice_status
+static int
ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
{
- enum ice_status status;
+ int status;
status = ice_flow_proc_seg_hdrs(params);
if (status)
@@ -1229,7 +1225,7 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
status = 0;
break;
default:
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
}
return status;
@@ -1329,12 +1325,12 @@ ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
* @blk: classification stage
* @entry: flow entry to be removed
*/
-static enum ice_status
+static int
ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
struct ice_flow_entry *entry)
{
if (!entry)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
list_del(&entry->l_entry);
@@ -1355,27 +1351,27 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
*
* Assumption: the caller has acquired the lock to the profile list
*/
-static enum ice_status
+static int
ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
enum ice_flow_dir dir, u64 prof_id,
struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_prof **prof)
{
struct ice_flow_prof_params *params;
- enum ice_status status;
+ int status;
u8 i;
if (!prof)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof),
GFP_KERNEL);
if (!params->prof) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto free_params;
}
@@ -1432,11 +1428,11 @@ free_params:
*
* Assumption: the caller has acquired the lock to the profile list
*/
-static enum ice_status
+static int
ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof)
{
- enum ice_status status;
+ int status;
/* Remove all remaining flow entries before removing the flow profile */
if (!list_empty(&prof->entries)) {
@@ -1474,11 +1470,11 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
* Assumption: the caller has acquired the lock to the profile list
* and the software VSI handle has been validated
*/
-static enum ice_status
+static int
ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof, u16 vsi_handle)
{
- enum ice_status status = 0;
+ int status = 0;
if (!test_bit(vsi_handle, prof->vsis)) {
status = ice_add_prof_id_flow(hw, blk,
@@ -1505,11 +1501,11 @@ ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
* Assumption: the caller has acquired the lock to the profile list
* and the software VSI handle has been validated
*/
-static enum ice_status
+static int
ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof, u16 vsi_handle)
{
- enum ice_status status = 0;
+ int status = 0;
if (test_bit(vsi_handle, prof->vsis)) {
status = ice_rem_prof_id_flow(hw, blk,
@@ -1536,21 +1532,21 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
* @segs_cnt: number of packet segments provided
* @prof: stores the returned flow profile added
*/
-enum ice_status
+int
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_prof **prof)
{
- enum ice_status status;
+ int status;
if (segs_cnt > ICE_FLOW_SEG_MAX)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
if (!segs_cnt)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!segs)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
status = ice_flow_val_hdrs(segs, segs_cnt);
if (status)
@@ -1574,17 +1570,16 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
* @blk: the block for which the flow profile is to be removed
* @prof_id: unique ID of the flow profile to be removed
*/
-enum ice_status
-ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
+int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
{
struct ice_flow_prof *prof;
- enum ice_status status;
+ int status;
mutex_lock(&hw->fl_profs_locks[blk]);
prof = ice_flow_find_prof_id(hw, blk, prof_id);
if (!prof) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto out;
}
@@ -1608,34 +1603,34 @@ out:
* @data: pointer to a data buffer containing flow entry's match values/masks
* @entry_h: pointer to buffer that receives the new flow entry's handle
*/
-enum ice_status
+int
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
void *data, u64 *entry_h)
{
struct ice_flow_entry *e = NULL;
struct ice_flow_prof *prof;
- enum ice_status status;
+ int status;
/* No flow entry data is expected for RSS */
if (!entry_h || (!data && blk != ICE_BLK_RSS))
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&hw->fl_profs_locks[blk]);
prof = ice_flow_find_prof_id(hw, blk, prof_id);
if (!prof) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
} else {
/* Allocate memory for the entry being added and associate
* the VSI to the found flow profile
*/
e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL);
if (!e)
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
else
status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
}
@@ -1654,7 +1649,7 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
case ICE_BLK_RSS:
break;
default:
- status = ICE_ERR_NOT_IMPL;
+ status = -EOPNOTSUPP;
goto out;
}
@@ -1680,15 +1675,14 @@ out:
* @blk: classification stage
* @entry_h: handle to the flow entry to be removed
*/
-enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
- u64 entry_h)
+int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h)
{
struct ice_flow_entry *entry;
struct ice_flow_prof *prof;
- enum ice_status status = 0;
+ int status = 0;
if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
- return ICE_ERR_PARAM;
+ return -EINVAL;
entry = ICE_FLOW_ENTRY_PTR(entry_h);
@@ -1812,6 +1806,57 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
seg->raws_cnt++;
}
+/**
+ * ice_flow_rem_vsi_prof - remove VSI from flow profile
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @prof_id: unique ID to identify this flow profile
+ *
+ * This function removes the flow entries associated to the input
+ * VSI handle and disassociate the VSI from the flow profile.
+ */
+int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id)
+{
+ struct ice_flow_prof *prof;
+ int status = 0;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return -EINVAL;
+
+ /* find flow profile pointer with input package block and profile ID */
+ prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
+ if (!prof) {
+ ice_debug(hw, ICE_DBG_PKG, "Cannot find flow profile id=%llu\n",
+ prof_id);
+ return -ENOENT;
+ }
+
+ /* Remove all remaining flow entries before removing the flow profile */
+ if (!list_empty(&prof->entries)) {
+ struct ice_flow_entry *e, *t;
+
+ mutex_lock(&prof->entries_lock);
+ list_for_each_entry_safe(e, t, &prof->entries, l_entry) {
+ if (e->vsi_handle != vsi_handle)
+ continue;
+
+ status = ice_flow_rem_entry_sync(hw, ICE_BLK_FD, e);
+ if (status)
+ break;
+ }
+ mutex_unlock(&prof->entries_lock);
+ }
+ if (status)
+ return status;
+
+ /* disassociate the flow profile from sw VSI handle */
+ status = ice_flow_disassoc_prof(hw, ICE_BLK_FD, prof, vsi_handle);
+ if (status)
+ ice_debug(hw, ICE_DBG_PKG, "ice_flow_disassoc_prof() failed with status=%d\n",
+ status);
+ return status;
+}
+
#define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
(ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
@@ -1836,7 +1881,7 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
* header value to set flow field segment for further use in flow
* profile entry or removal.
*/
-static enum ice_status
+static int
ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
u32 flow_hdr)
{
@@ -1853,15 +1898,15 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
- return ICE_ERR_PARAM;
+ return -EINVAL;
val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
if (val && !is_power_of_2(val))
- return ICE_ERR_CFG;
+ return -EIO;
val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
if (val && !is_power_of_2(val))
- return ICE_ERR_CFG;
+ return -EIO;
return 0;
}
@@ -1899,14 +1944,14 @@ void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
* the VSI from that profile. If the flow profile has no VSIs it will
* be removed.
*/
-enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_prof *p, *t;
- enum ice_status status = 0;
+ int status = 0;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (list_empty(&hw->fl_profs[blk]))
return 0;
@@ -1966,7 +2011,7 @@ ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
*
* Assumption: lock has already been acquired for RSS list
*/
-static enum ice_status
+static int
ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
{
struct ice_rss_cfg *r, *rss_cfg;
@@ -1981,7 +2026,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg),
GFP_KERNEL);
if (!rss_cfg)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
@@ -2022,21 +2067,21 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
*
* Assumption: lock has already been acquired for RSS list
*/
-static enum ice_status
+static int
ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs, u8 segs_cnt)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_prof *prof = NULL;
struct ice_flow_seg_info *segs;
- enum ice_status status;
+ int status;
if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
- return ICE_ERR_PARAM;
+ return -EINVAL;
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Construct the packet segment info from the hashed fields */
status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
@@ -2128,15 +2173,15 @@ exit:
* the input fields to hash on, the flow type and use the VSI number to add
* a flow entry to the profile.
*/
-enum ice_status
+int
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs)
{
- enum ice_status status;
+ int status;
if (hashed_flds == ICE_HASH_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&hw->rss_locks);
status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
@@ -2159,18 +2204,18 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
*
* Assumption: lock has already been acquired for RSS list
*/
-static enum ice_status
+static int
ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs, u8 segs_cnt)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_seg_info *segs;
struct ice_flow_prof *prof;
- enum ice_status status;
+ int status;
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Construct the packet segment info from the hashed fields */
status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
@@ -2182,7 +2227,7 @@ ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
vsi_handle,
ICE_FLOW_FIND_PROF_CHK_FLDS);
if (!prof) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto out;
}
@@ -2216,15 +2261,15 @@ out:
* removed. Calls are made to underlying flow s which will APIs
* turn build or update buffers for RSS XLT1 section.
*/
-enum ice_status __maybe_unused
+int __maybe_unused
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs)
{
- enum ice_status status;
+ int status;
if (hashed_flds == ICE_HASH_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&hw->rss_locks);
status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
@@ -2279,20 +2324,19 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
* message, convert it to ICE-compatible values, and configure RSS flow
* profiles.
*/
-enum ice_status
-ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
+int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
{
- enum ice_status status = 0;
+ int status = 0;
u64 hash_flds;
if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Make sure no unsupported bits are specified */
if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
- return ICE_ERR_CFG;
+ return -EIO;
hash_flds = avf_hash;
@@ -2352,7 +2396,7 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
}
if (rss_hash == ICE_HASH_INVALID)
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
ICE_FLOW_SEG_HDR_NONE);
@@ -2368,13 +2412,13 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
*/
-enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
{
- enum ice_status status = 0;
struct ice_rss_cfg *r;
+ int status = 0;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&hw->rss_locks);
list_for_each_entry(r, &hw->rss_list_head, l_entry) {
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 2a2d8c1536cb..84b6e4464a21 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -383,33 +383,31 @@ struct ice_rss_cfg {
u32 packet_hdr;
};
-enum ice_status
+int
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_prof **prof);
-enum ice_status
-ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
-enum ice_status
+int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
+int
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u64 entry_id, u16 vsi, enum ice_flow_priority prio,
void *data, u64 *entry_h);
-enum ice_status
-ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
+int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
void
ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
void
ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
u16 val_loc, u16 mask_loc);
+int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id);
void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
-ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
-enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
+int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
+int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+int
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs);
-enum ice_status
+int
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs);
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
index c2e78eaf4ccb..c29177c6bb9d 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.c
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -47,12 +47,69 @@ ice_fltr_add_entry_to_list(struct device *dev, struct ice_fltr_info *info,
}
/**
+ * ice_fltr_set_vlan_vsi_promisc
+ * @hw: pointer to the hardware structure
+ * @vsi: the VSI being configured
+ * @promisc_mask: mask of promiscuous config bits
+ *
+ * Set VSI with all associated VLANs to given promiscuous mode(s)
+ */
+int
+ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
+ u8 promisc_mask)
+{
+ return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false);
+}
+
+/**
+ * ice_fltr_clear_vlan_vsi_promisc
+ * @hw: pointer to the hardware structure
+ * @vsi: the VSI being configured
+ * @promisc_mask: mask of promiscuous config bits
+ *
+ * Clear VSI with all associated VLANs to given promiscuous mode(s)
+ */
+int
+ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
+ u8 promisc_mask)
+{
+ return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true);
+}
+
+/**
+ * ice_fltr_clear_vsi_promisc - clear specified promiscuous mode(s)
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to clear mode
+ * @promisc_mask: mask of promiscuous config bits to clear
+ * @vid: VLAN ID to clear VLAN promiscuous
+ */
+int
+ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid)
+{
+ return ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
+}
+
+/**
+ * ice_fltr_set_vsi_promisc - set given VSI to given promiscuous mode(s)
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to configure
+ * @promisc_mask: mask of promiscuous config bits
+ * @vid: VLAN ID to set VLAN promiscuous
+ */
+int
+ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid)
+{
+ return ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
+}
+
+/**
* ice_fltr_add_mac_list - add list of MAC filters
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-enum ice_status
-ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list)
+int ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_add_mac(&vsi->back->hw, list);
}
@@ -62,8 +119,7 @@ ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-enum ice_status
-ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list)
+int ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_remove_mac(&vsi->back->hw, list);
}
@@ -73,8 +129,7 @@ ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-static enum ice_status
-ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list)
+static int ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_add_vlan(&vsi->back->hw, list);
}
@@ -84,7 +139,7 @@ ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-static enum ice_status
+static int
ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_remove_vlan(&vsi->back->hw, list);
@@ -95,8 +150,7 @@ ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-static enum ice_status
-ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list)
+static int ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_add_eth_mac(&vsi->back->hw, list);
}
@@ -106,8 +160,7 @@ ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-static enum ice_status
-ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list)
+static int ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_remove_eth_mac(&vsi->back->hw, list);
}
@@ -207,18 +260,17 @@ ice_fltr_add_eth_to_list(struct ice_vsi *vsi, struct list_head *list,
* @action: action to be performed on filter match
* @mac_action: pointer to add or remove MAC function
*/
-static enum ice_status
+static int
ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action,
- enum ice_status (*mac_action)(struct ice_vsi *,
- struct list_head *))
+ int (*mac_action)(struct ice_vsi *, struct list_head *))
{
- enum ice_status result;
LIST_HEAD(tmp_list);
+ int result;
if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action)) {
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
result = mac_action(vsi, &tmp_list);
@@ -233,21 +285,21 @@ ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac,
* @action: action to be performed on filter match
* @mac_action: pointer to add or remove MAC function
*/
-static enum ice_status
+static int
ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action,
- enum ice_status(*mac_action)
+ int(*mac_action)
(struct ice_vsi *, struct list_head *))
{
u8 broadcast[ETH_ALEN];
- enum ice_status result;
LIST_HEAD(tmp_list);
+ int result;
eth_broadcast_addr(broadcast);
if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action) ||
ice_fltr_add_mac_to_list(vsi, &tmp_list, broadcast, action)) {
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
result = mac_action(vsi, &tmp_list);
@@ -262,17 +314,16 @@ ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
* @action: action to be performed on filter match
* @vlan_action: pointer to add or remove VLAN function
*/
-static enum ice_status
+static int
ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id,
enum ice_sw_fwd_act_type action,
- enum ice_status (*vlan_action)(struct ice_vsi *,
- struct list_head *))
+ int (*vlan_action)(struct ice_vsi *, struct list_head *))
{
- enum ice_status result;
LIST_HEAD(tmp_list);
+ int result;
if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan_id, action))
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
result = vlan_action(vsi, &tmp_list);
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
@@ -287,17 +338,16 @@ ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id,
* @action: action to be performed on filter match
* @eth_action: pointer to add or remove ethertype function
*/
-static enum ice_status
+static int
ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action,
- enum ice_status (*eth_action)(struct ice_vsi *,
- struct list_head *))
+ int (*eth_action)(struct ice_vsi *, struct list_head *))
{
- enum ice_status result;
LIST_HEAD(tmp_list);
+ int result;
if (ice_fltr_add_eth_to_list(vsi, &tmp_list, ethertype, flag, action))
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
result = eth_action(vsi, &tmp_list);
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
@@ -310,8 +360,8 @@ ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
* @mac: MAC to add
* @action: action to be performed on filter match
*/
-enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_add_mac_list);
}
@@ -322,7 +372,7 @@ enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
* @mac: MAC to add
* @action: action to be performed on filter match
*/
-enum ice_status
+int
ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action)
{
@@ -336,8 +386,8 @@ ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
* @mac: filter MAC to remove
* @action: action to remove
*/
-enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_remove_mac_list);
}
@@ -348,8 +398,8 @@ enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
* @vlan_id: VLAN ID to add
* @action: action to be performed on filter match
*/
-enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id,
+ enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_vlan(vsi, vlan_id, action,
ice_fltr_add_vlan_list);
@@ -361,8 +411,8 @@ enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id,
* @vlan_id: filter VLAN to remove
* @action: action to remove
*/
-enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id,
+ enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_vlan(vsi, vlan_id, action,
ice_fltr_remove_vlan_list);
@@ -375,8 +425,8 @@ enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id,
* @flag: direction of packet to be filtered, Tx or Rx
* @action: action to be performed on filter match
*/
-enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
ice_fltr_add_eth_list);
@@ -389,89 +439,9 @@ enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
* @flag: direction of filter
* @action: action to remove
*/
-enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype,
- u16 flag, enum ice_sw_fwd_act_type action)
+int ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
ice_fltr_remove_eth_list);
}
-
-/**
- * ice_fltr_update_rule_flags - update lan_en/lb_en flags
- * @hw: pointer to hw
- * @rule_id: id of rule being updated
- * @recipe_id: recipe id of rule
- * @act: current action field
- * @type: Rx or Tx
- * @src: source VSI
- * @new_flags: combinations of lb_en and lan_en
- */
-static enum ice_status
-ice_fltr_update_rule_flags(struct ice_hw *hw, u16 rule_id, u16 recipe_id,
- u32 act, u16 type, u16 src, u32 new_flags)
-{
- struct ice_aqc_sw_rules_elem *s_rule;
- enum ice_status err;
- u32 flags_mask;
-
- s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
-
- flags_mask = ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
- act &= ~flags_mask;
- act |= (flags_mask & new_flags);
-
- s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(recipe_id);
- s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id);
- s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
-
- if (type & ICE_FLTR_RX) {
- s_rule->pdata.lkup_tx_rx.src =
- cpu_to_le16(hw->port_info->lport);
- s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
-
- } else {
- s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(src);
- s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
- }
-
- err = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
- ice_aqc_opc_update_sw_rules, NULL);
-
- kfree(s_rule);
- return err;
-}
-
-/**
- * ice_fltr_build_action - build action for rule
- * @vsi_id: id of VSI which is use to build action
- */
-static u32 ice_fltr_build_action(u16 vsi_id)
-{
- return ((vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M) |
- ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
-}
-
-/**
- * ice_fltr_update_flags_dflt_rule - update flags on default rule
- * @vsi: pointer to VSI
- * @rule_id: id of rule
- * @direction: Tx or Rx
- * @new_flags: flags to update
- *
- * Function updates flags on default rule with ICE_SW_LKUP_DFLT.
- *
- * Flags should be a combination of ICE_SINGLE_ACT_LB_ENABLE and
- * ICE_SINGLE_ACT_LAN_ENABLE.
- */
-enum ice_status
-ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction,
- u32 new_flags)
-{
- u32 action = ice_fltr_build_action(vsi->vsi_num);
- struct ice_hw *hw = &vsi->back->hw;
-
- return ice_fltr_update_rule_flags(hw, rule_id, ICE_SW_LKUP_DFLT, action,
- direction, vsi->vsi_num, new_flags);
-}
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.h b/drivers/net/ethernet/intel/ice/ice_fltr.h
index 8eec4febead1..3eb42479175f 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.h
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.h
@@ -5,38 +5,49 @@
#define _ICE_FLTR_H_
void ice_fltr_free_list(struct device *dev, struct list_head *h);
-enum ice_status
+int
+ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
+ u8 promisc_mask);
+int
+ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
+ u8 promisc_mask);
+int
+ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid);
+int
+ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid);
+int
ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list,
const u8 *mac, enum ice_sw_fwd_act_type action);
-enum ice_status
+int
ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action);
-enum ice_status
+int
ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action);
-enum ice_status
-ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list);
-enum ice_status
+int ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list);
+int
ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action);
-enum ice_status
-ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list);
+int ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list);
-enum ice_status
+int
ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vid,
enum ice_sw_fwd_act_type action);
-enum ice_status
+int
ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vid,
enum ice_sw_fwd_act_type action);
-enum ice_status
+int
ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action);
-enum ice_status
+int
ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action);
void ice_fltr_remove_all(struct ice_vsi *vsi);
-enum ice_status
-ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction,
- u32 new_flags);
+
+int
+ice_fltr_update_flags(struct ice_vsi *vsi, u16 rule_id, u16 recipe_id,
+ u32 new_flags);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index f8601d5b0b19..665a344fb9c0 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -16,6 +16,18 @@ struct ice_fwu_priv {
/* Track which NVM banks to activate at the end of the update */
u8 activate_flags;
+
+ /* Track the firmware response of the required reset to complete the
+ * flash update.
+ *
+ * 0 - ICE_AQC_NVM_POR_FLAG - A full power on is required
+ * 1 - ICE_AQC_NVM_PERST_FLAG - A cold PCIe reset is required
+ * 2 - ICE_AQC_NVM_EMPR_FLAG - An EMP reset is required
+ */
+ u8 reset_level;
+
+ /* Track if EMP reset is available */
+ u8 emp_reset_available;
};
/**
@@ -40,8 +52,8 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length)
struct device *dev = context->dev;
struct ice_pf *pf = priv->pf;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u8 *package_data;
+ int status;
dev_dbg(dev, "Sending PLDM record package data to firmware\n");
@@ -54,9 +66,8 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length)
kfree(package_data);
if (status) {
- dev_err(dev, "Failed to send record package data to firmware, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_err(dev, "Failed to send record package data to firmware, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to record package data to firmware");
return -EIO;
}
@@ -203,8 +214,8 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
struct device *dev = context->dev;
struct ice_pf *pf = priv->pf;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
size_t length;
+ int status;
switch (component->identifier) {
case NVM_COMP_ID_OROM:
@@ -240,9 +251,8 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
kfree(comp_tbl);
if (status) {
- dev_err(dev, "Failed to transfer component table to firmware, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_err(dev, "Failed to transfer component table to firmware, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to transfer component table to firmware");
return -EIO;
}
@@ -259,6 +269,7 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
* @block_size: size of the block to write, up to 4k
* @block: pointer to block of data to write
* @last_cmd: whether this is the last command
+ * @reset_level: storage for reset level required
* @extack: netlink extended ACK structure
*
* Write a block of data to a flash module, and await for the completion
@@ -266,18 +277,24 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
*
* Note this function assumes the caller has acquired the NVM resource.
*
+ * On successful return, reset level indicates the device reset required to
+ * complete the update.
+ *
+ * 0 - ICE_AQC_NVM_POR_FLAG - A full power on is required
+ * 1 - ICE_AQC_NVM_PERST_FLAG - A cold PCIe reset is required
+ * 2 - ICE_AQC_NVM_EMPR_FLAG - An EMP reset is required
+ *
* Returns: zero on success, or a negative error code on failure.
*/
static int
ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
u16 block_size, u8 *block, bool last_cmd,
- struct netlink_ext_ack *extack)
+ u8 *reset_level, struct netlink_ext_ack *extack)
{
u16 completion_module, completion_retval;
struct device *dev = ice_pf_to_dev(pf);
struct ice_rq_event_info event;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u32 completion_offset;
int err;
@@ -286,11 +303,11 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
dev_dbg(dev, "Writing block of %u bytes for module 0x%02x at offset %u\n",
block_size, module, offset);
- status = ice_aq_update_nvm(hw, module, offset, block_size, block,
- last_cmd, 0, NULL);
- if (status) {
- dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %s aq_err %s\n",
- module, block_size, offset, ice_stat_str(status),
+ err = ice_aq_update_nvm(hw, module, offset, block_size, block,
+ last_cmd, 0, NULL);
+ if (err) {
+ dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %d aq_err %s\n",
+ module, block_size, offset, err,
ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module");
return -EIO;
@@ -338,6 +355,24 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
return -EIO;
}
+ /* For the last command to write the NVM bank, newer versions of
+ * firmware indicate the required level of reset to complete
+ * activation of firmware. If the firmware supports this, cache the
+ * response for indicating to the user later. Otherwise, assume that
+ * a full power cycle is required.
+ */
+ if (reset_level && last_cmd && module == ICE_SR_1ST_NVM_BANK_PTR) {
+ if (hw->dev_caps.common_cap.pcie_reset_avoidance) {
+ *reset_level = (event.desc.params.nvm.cmd_flags &
+ ICE_AQC_NVM_RESET_LVL_M);
+ dev_dbg(dev, "Firmware reported required reset level as %u\n",
+ *reset_level);
+ } else {
+ *reset_level = ICE_AQC_NVM_POR_FLAG;
+ dev_dbg(dev, "Firmware doesn't support indicating required reset level. Assuming a power cycle is required\n");
+ }
+ }
+
return 0;
}
@@ -348,6 +383,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
* @component: the name of the component being updated
* @image: buffer of image data to write to the NVM
* @length: length of the buffer
+ * @reset_level: storage for reset level required
* @extack: netlink extended ACK structure
*
* Loop over the data for a given NVM module and program it in 4 Kb
@@ -360,7 +396,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
*/
static int
ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component,
- const u8 *image, u32 length,
+ const u8 *image, u32 length, u8 *reset_level,
struct netlink_ext_ack *extack)
{
struct device *dev = ice_pf_to_dev(pf);
@@ -394,7 +430,8 @@ ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component,
memcpy(block, image + offset, block_size);
err = ice_write_one_nvm_block(pf, module, offset, block_size,
- block, last_cmd, extack);
+ block, last_cmd, reset_level,
+ extack);
if (err)
break;
@@ -445,7 +482,6 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
struct ice_rq_event_info event;
struct ice_hw *hw = &pf->hw;
struct devlink *devlink;
- enum ice_status status;
int err;
dev_dbg(dev, "Beginning erase of flash component '%s', module 0x%02x\n", component, module);
@@ -456,10 +492,10 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
devlink_flash_update_timeout_notify(devlink, "Erasing", component, ICE_FW_ERASE_TIMEOUT);
- status = ice_aq_erase_nvm(hw, module, NULL);
- if (status) {
- dev_err(dev, "Failed to erase %s (module 0x%02x), err %s aq_err %s\n",
- component, module, ice_stat_str(status),
+ err = ice_aq_erase_nvm(hw, module, NULL);
+ if (err) {
+ dev_err(dev, "Failed to erase %s (module 0x%02x), err %d aq_err %s\n",
+ component, module, err,
ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to erase flash module");
err = -EIO;
@@ -511,6 +547,7 @@ out_notify_devlink:
* ice_switch_flash_banks - Tell firmware to switch NVM banks
* @pf: Pointer to the PF data structure
* @activate_flags: flags used for the activation command
+ * @emp_reset_available: on return, indicates if EMP reset is available
* @extack: netlink extended ACK structure
*
* Notify firmware to activate the newly written flash banks, and wait for the
@@ -518,27 +555,43 @@ out_notify_devlink:
*
* Returns: zero on success or an error code on failure.
*/
-static int ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
- struct netlink_ext_ack *extack)
+static int
+ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
+ u8 *emp_reset_available, struct netlink_ext_ack *extack)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_rq_event_info event;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u16 completion_retval;
+ u8 response_flags;
int err;
memset(&event, 0, sizeof(event));
- status = ice_nvm_write_activate(hw, activate_flags);
- if (status) {
- dev_err(dev, "Failed to switch active flash banks, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_nvm_write_activate(hw, activate_flags, &response_flags);
+ if (err) {
+ dev_err(dev, "Failed to switch active flash banks, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to switch active flash banks");
return -EIO;
}
+ /* Newer versions of firmware have support to indicate whether an EMP
+ * reset to reload firmware is available. For older firmware, EMP
+ * reset is always available.
+ */
+ if (emp_reset_available) {
+ if (hw->dev_caps.common_cap.reset_restrict_support) {
+ *emp_reset_available = response_flags & ICE_AQC_NVM_EMPR_ENA;
+ dev_dbg(dev, "Firmware indicated that EMP reset is %s\n",
+ *emp_reset_available ?
+ "available" : "not available");
+ } else {
+ *emp_reset_available = ICE_AQC_NVM_EMPR_ENA;
+ dev_dbg(dev, "Firmware does not support restricting EMP reset availability\n");
+ }
+ }
+
err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write_activate, 30 * HZ,
&event);
if (err) {
@@ -579,6 +632,7 @@ ice_flash_component(struct pldmfw *context, struct pldmfw_component *component)
struct netlink_ext_ack *extack = priv->extack;
struct ice_pf *pf = priv->pf;
const char *name;
+ u8 *reset_level;
u16 module;
u8 flag;
int err;
@@ -587,16 +641,19 @@ ice_flash_component(struct pldmfw *context, struct pldmfw_component *component)
case NVM_COMP_ID_OROM:
module = ICE_SR_1ST_OROM_BANK_PTR;
flag = ICE_AQC_NVM_ACTIV_SEL_OROM;
+ reset_level = NULL;
name = "fw.undi";
break;
case NVM_COMP_ID_NVM:
module = ICE_SR_1ST_NVM_BANK_PTR;
flag = ICE_AQC_NVM_ACTIV_SEL_NVM;
+ reset_level = &priv->reset_level;
name = "fw.mgmt";
break;
case NVM_COMP_ID_NETLIST:
module = ICE_SR_NETLIST_BANK_PTR;
flag = ICE_AQC_NVM_ACTIV_SEL_NETLIST;
+ reset_level = NULL;
name = "fw.netlist";
break;
default:
@@ -616,7 +673,8 @@ ice_flash_component(struct pldmfw *context, struct pldmfw_component *component)
return err;
return ice_write_nvm_module(pf, module, name, component->component_data,
- component->component_size, extack);
+ component->component_size, reset_level,
+ extack);
}
/**
@@ -634,110 +692,75 @@ static int ice_finalize_update(struct pldmfw *context)
struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context);
struct netlink_ext_ack *extack = priv->extack;
struct ice_pf *pf = priv->pf;
+ struct devlink *devlink;
+ int err;
/* Finally, notify firmware to activate the written NVM banks */
- return ice_switch_flash_banks(pf, priv->activate_flags, extack);
-}
+ err = ice_switch_flash_banks(pf, priv->activate_flags,
+ &priv->emp_reset_available, extack);
+ if (err)
+ return err;
-static const struct pldmfw_ops ice_fwu_ops = {
- .match_record = &pldmfw_op_pci_match_record,
- .send_package_data = &ice_send_package_data,
- .send_component_table = &ice_send_component_table,
- .flash_component = &ice_flash_component,
- .finalize_update = &ice_finalize_update,
-};
+ devlink = priv_to_devlink(pf);
-/**
- * ice_flash_pldm_image - Write a PLDM-formatted firmware image to the device
- * @pf: private device driver structure
- * @fw: firmware object pointing to the relevant firmware file
- * @preservation: preservation level to request from firmware
- * @extack: netlink extended ACK structure
- *
- * Parse the data for a given firmware file, verifying that it is a valid PLDM
- * formatted image that matches this device.
- *
- * Extract the device record Package Data and Component Tables and send them
- * to the firmware. Extract and write the flash data for each of the three
- * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify
- * firmware once the data is written to the inactive banks.
- *
- * Returns: zero on success or a negative error code on failure.
- */
-int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
- u8 preservation, struct netlink_ext_ack *extack)
-{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- struct ice_fwu_priv priv;
- enum ice_status status;
- int err;
+ /* If the required reset is EMPR, but EMPR is disabled, report that
+ * a reboot is required instead.
+ */
+ if (priv->reset_level == ICE_AQC_NVM_EMPR_FLAG &&
+ !priv->emp_reset_available) {
+ dev_dbg(ice_pf_to_dev(pf), "Firmware indicated EMP reset as sufficient, but EMP reset is disabled\n");
+ priv->reset_level = ICE_AQC_NVM_PERST_FLAG;
+ }
- switch (preservation) {
- case ICE_AQC_NVM_PRESERVE_ALL:
- case ICE_AQC_NVM_PRESERVE_SELECTED:
- case ICE_AQC_NVM_NO_PRESERVATION:
- case ICE_AQC_NVM_FACTORY_DEFAULT:
+ switch (priv->reset_level) {
+ case ICE_AQC_NVM_EMPR_FLAG:
+ devlink_flash_update_status_notify(devlink,
+ "Activate new firmware by devlink reload",
+ NULL, 0, 0);
break;
+ case ICE_AQC_NVM_PERST_FLAG:
+ devlink_flash_update_status_notify(devlink,
+ "Activate new firmware by rebooting the system",
+ NULL, 0, 0);
+ break;
+ case ICE_AQC_NVM_POR_FLAG:
default:
- WARN(1, "Unexpected preservation level request %u", preservation);
- return -EINVAL;
- }
-
- memset(&priv, 0, sizeof(priv));
-
- priv.context.ops = &ice_fwu_ops;
- priv.context.dev = dev;
- priv.extack = extack;
- priv.pf = pf;
- priv.activate_flags = preservation;
-
- status = ice_acquire_nvm(hw, ICE_RES_WRITE);
- if (status) {
- dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
- return -EIO;
- }
-
- err = pldmfw_flash_image(&priv.context, fw);
- if (err == -ENOENT) {
- dev_err(dev, "Firmware image has no record matching this device\n");
- NL_SET_ERR_MSG_MOD(extack, "Firmware image has no record matching this device");
- } else if (err) {
- /* Do not set a generic extended ACK message here. A more
- * specific message may already have been set by one of our
- * ops.
- */
- dev_err(dev, "Failed to flash PLDM image, err %d", err);
+ devlink_flash_update_status_notify(devlink,
+ "Activate new firmware by power cycling the system",
+ NULL, 0, 0);
+ break;
}
- ice_release_nvm(hw);
+ pf->fw_emp_reset_disabled = !priv->emp_reset_available;
- return err;
+ return 0;
}
+static const struct pldmfw_ops ice_fwu_ops = {
+ .match_record = &pldmfw_op_pci_match_record,
+ .send_package_data = &ice_send_package_data,
+ .send_component_table = &ice_send_component_table,
+ .flash_component = &ice_flash_component,
+ .finalize_update = &ice_finalize_update,
+};
+
/**
- * ice_check_for_pending_update - Check for a pending flash update
+ * ice_get_pending_updates - Check if the component has a pending update
* @pf: the PF driver structure
- * @component: if not NULL, the name of the component being updated
- * @extack: Netlink extended ACK structure
+ * @pending: on return, bitmap of updates pending
+ * @extack: Netlink extended ACK
*
- * Check whether the device already has a pending flash update. If such an
- * update is found, cancel it so that the requested update may proceed.
+ * Check if the device has any pending updates on any flash components.
*
- * Returns: zero on success, or a negative error code on failure.
+ * Returns: zero on success, or a negative error code on failure. Updates
+ * pending with the bitmap of pending updates.
*/
-int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
- struct netlink_ext_ack *extack)
+int ice_get_pending_updates(struct ice_pf *pf, u8 *pending,
+ struct netlink_ext_ack *extack)
{
- struct devlink *devlink = priv_to_devlink(pf);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw_dev_caps *dev_caps;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
- u8 pending = 0;
int err;
dev_caps = kzalloc(sizeof(*dev_caps), GFP_KERNEL);
@@ -749,30 +772,60 @@ int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
* may have changed, e.g. if an update was previously completed and
* the system has not yet rebooted.
*/
- status = ice_discover_dev_caps(hw, dev_caps);
- if (status) {
+ err = ice_discover_dev_caps(hw, dev_caps);
+ if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to read device capabilities");
kfree(dev_caps);
- return -EIO;
+ return err;
}
+ *pending = 0;
+
if (dev_caps->common_cap.nvm_update_pending_nvm) {
dev_info(dev, "The fw.mgmt flash component has a pending update\n");
- pending |= ICE_AQC_NVM_ACTIV_SEL_NVM;
+ *pending |= ICE_AQC_NVM_ACTIV_SEL_NVM;
}
if (dev_caps->common_cap.nvm_update_pending_orom) {
dev_info(dev, "The fw.undi flash component has a pending update\n");
- pending |= ICE_AQC_NVM_ACTIV_SEL_OROM;
+ *pending |= ICE_AQC_NVM_ACTIV_SEL_OROM;
}
if (dev_caps->common_cap.nvm_update_pending_netlist) {
dev_info(dev, "The fw.netlist flash component has a pending update\n");
- pending |= ICE_AQC_NVM_ACTIV_SEL_NETLIST;
+ *pending |= ICE_AQC_NVM_ACTIV_SEL_NETLIST;
}
kfree(dev_caps);
+ return 0;
+}
+
+/**
+ * ice_cancel_pending_update - Cancel any pending update for a component
+ * @pf: the PF driver structure
+ * @component: if not NULL, the name of the component being updated
+ * @extack: Netlink extended ACK structure
+ *
+ * Cancel any pending update for the specified component. If component is
+ * NULL, all device updates will be canceled.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+ice_cancel_pending_update(struct ice_pf *pf, const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ u8 pending;
+ int err;
+
+ err = ice_get_pending_updates(pf, &pending, extack);
+ if (err)
+ return err;
+
/* If the flash_update request is for a specific component, ignore all
* of the other components.
*/
@@ -798,17 +851,107 @@ int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
"Canceling previous pending update",
component, 0, 0);
- status = ice_acquire_nvm(hw, ICE_RES_WRITE);
- if (status) {
- dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_acquire_nvm(hw, ICE_RES_WRITE);
+ if (err) {
+ dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
- return -EIO;
+ return err;
}
pending |= ICE_AQC_NVM_REVERT_LAST_ACTIV;
- err = ice_switch_flash_banks(pf, pending, extack);
+ err = ice_switch_flash_banks(pf, pending, NULL, extack);
+
+ ice_release_nvm(hw);
+
+ /* Since we've canceled the pending update, we no longer know if EMP
+ * reset is restricted.
+ */
+ pf->fw_emp_reset_disabled = false;
+
+ return err;
+}
+
+/**
+ * ice_devlink_flash_update - Write a firmware image to the device
+ * @devlink: pointer to devlink associated with the device to update
+ * @params: devlink flash update parameters
+ * @extack: netlink extended ACK structure
+ *
+ * Parse the data for a given firmware file, verifying that it is a valid PLDM
+ * formatted image that matches this device.
+ *
+ * Extract the device record Package Data and Component Tables and send them
+ * to the firmware. Extract and write the flash data for each of the three
+ * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify
+ * firmware once the data is written to the inactive banks.
+ *
+ * Returns: zero on success or a negative error code on failure.
+ */
+int ice_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ struct ice_fwu_priv priv;
+ u8 preservation;
+ int err;
+
+ if (!params->overwrite_mask) {
+ /* preserve all settings and identifiers */
+ preservation = ICE_AQC_NVM_PRESERVE_ALL;
+ } else if (params->overwrite_mask == DEVLINK_FLASH_OVERWRITE_SETTINGS) {
+ /* overwrite settings, but preserve the vital device identifiers */
+ preservation = ICE_AQC_NVM_PRESERVE_SELECTED;
+ } else if (params->overwrite_mask == (DEVLINK_FLASH_OVERWRITE_SETTINGS |
+ DEVLINK_FLASH_OVERWRITE_IDENTIFIERS)) {
+ /* overwrite both settings and identifiers, preserve nothing */
+ preservation = ICE_AQC_NVM_NO_PRESERVATION;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Requested overwrite mask is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!hw->dev_caps.common_cap.nvm_unified_update) {
+ NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update");
+ return -EOPNOTSUPP;
+ }
+
+ memset(&priv, 0, sizeof(priv));
+
+ priv.context.ops = &ice_fwu_ops;
+ priv.context.dev = dev;
+ priv.extack = extack;
+ priv.pf = pf;
+ priv.activate_flags = preservation;
+
+ devlink_flash_update_status_notify(devlink, "Preparing to flash", NULL, 0, 0);
+
+ err = ice_cancel_pending_update(pf, NULL, extack);
+ if (err)
+ return err;
+
+ err = ice_acquire_nvm(hw, ICE_RES_WRITE);
+ if (err) {
+ dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
+ return err;
+ }
+
+ err = pldmfw_flash_image(&priv.context, params->fw);
+ if (err == -ENOENT) {
+ dev_err(dev, "Firmware image has no record matching this device\n");
+ NL_SET_ERR_MSG_MOD(extack, "Firmware image has no record matching this device");
+ } else if (err) {
+ /* Do not set a generic extended ACK message here. A more
+ * specific message may already have been set by one of our
+ * ops.
+ */
+ dev_err(dev, "Failed to flash PLDM image, err %d", err);
+ }
ice_release_nvm(hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.h b/drivers/net/ethernet/intel/ice/ice_fw_update.h
index c6390f6851ff..750574885716 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.h
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.h
@@ -4,9 +4,10 @@
#ifndef _ICE_FW_UPDATE_H_
#define _ICE_FW_UPDATE_H_
-int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
- u8 preservation, struct netlink_ext_ack *extack);
-int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
- struct netlink_ext_ack *extack);
+int ice_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack);
+int ice_get_pending_updates(struct ice_pf *pf, u8 *pending,
+ struct netlink_ext_ack *extack);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index a49082485642..d16738a3d3a7 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -100,6 +100,7 @@
#define PF_SB_ATQT 0x0022FE00
#define PF_SB_ATQT_ATQT_S 0
#define PF_SB_ATQT_ATQT_M ICE_M(0x3FF, 0)
+#define PF_SB_REM_DEV_CTL 0x002300F0
#define PRTDCB_GENC 0x00083000
#define PRTDCB_GENC_PFCLDA_S 16
#define PRTDCB_GENC_PFCLDA_M ICE_M(0xFFFF, 16)
@@ -440,6 +441,10 @@
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
#define PRTRPB_RDPC 0x000AC260
+#define GLHH_ART_CTL 0x000A41D4
+#define GLHH_ART_CTL_ACTIVE_M BIT(0)
+#define GLHH_ART_TIME_H 0x000A41D8
+#define GLHH_ART_TIME_L 0x000A41DC
#define GLTSYN_AUX_IN_0(_i) (0x000889D8 + ((_i) * 4))
#define GLTSYN_AUX_IN_0_INT_ENA_M BIT(4)
#define GLTSYN_AUX_OUT_0(_i) (0x00088998 + ((_i) * 4))
@@ -452,6 +457,8 @@
#define GLTSYN_ENA_TSYN_ENA_M BIT(0)
#define GLTSYN_EVNT_H_0(_i) (0x00088970 + ((_i) * 4))
#define GLTSYN_EVNT_L_0(_i) (0x00088968 + ((_i) * 4))
+#define GLTSYN_HHTIME_H(_i) (0x00088900 + ((_i) * 4))
+#define GLTSYN_HHTIME_L(_i) (0x000888F8 + ((_i) * 4))
#define GLTSYN_INCVAL_H(_i) (0x00088920 + ((_i) * 4))
#define GLTSYN_INCVAL_L(_i) (0x00088918 + ((_i) * 4))
#define GLTSYN_SHADJ_H(_i) (0x00088910 + ((_i) * 4))
@@ -468,6 +475,8 @@
#define GLTSYN_TGT_L_0(_i) (0x00088928 + ((_i) * 4))
#define GLTSYN_TIME_H(_i) (0x000888D8 + ((_i) * 4))
#define GLTSYN_TIME_L(_i) (0x000888D0 + ((_i) * 4))
+#define PFHH_SEM 0x000A4200 /* Reset Source: PFR */
+#define PFHH_SEM_BUSY_M BIT(0)
#define PFTSYN_SEM 0x00088880
#define PFTSYN_SEM_BUSY_M BIT(0)
#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index adcc9a251595..fc3580167e7b 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -288,7 +288,7 @@ int ice_plug_aux_dev(struct ice_pf *pf)
adev->id = pf->aux_idx;
adev->dev.release = ice_adev_release;
adev->dev.parent = &pf->pdev->dev;
- adev->name = IIDC_RDMA_ROCE_NAME;
+ adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp";
ret = auxiliary_device_init(adev);
if (ret) {
@@ -335,6 +335,6 @@ int ice_init_rdma(struct ice_pf *pf)
dev_err(dev, "failed to reserve vectors for RDMA\n");
return ret;
}
-
+ pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
return ice_plug_aux_dev(pf);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 09a3297cd63c..0c187cf04fcf 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -291,7 +291,7 @@ void ice_vsi_delete(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_vsi_ctx *ctxt;
- enum ice_status status;
+ int status;
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
@@ -305,8 +305,8 @@ void ice_vsi_delete(struct ice_vsi *vsi)
status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
if (status)
- dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %s\n",
- vsi->vsi_num, ice_stat_str(status));
+ dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
+ vsi->vsi_num, status);
kfree(ctxt);
}
@@ -570,10 +570,16 @@ static int ice_alloc_fd_res(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back;
u32 g_val, b_val;
- /* Flow Director filters are only allocated/assigned to the PF VSI which
- * passes the traffic. The CTRL VSI is only used to add/delete filters
- * so we don't allocate resources to it
+ /* Flow Director filters are only allocated/assigned to the PF VSI or
+ * CHNL VSI which passes the traffic. The CTRL VSI is only used to
+ * add/delete filters so resources are not allocated to it
*/
+ if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ return -EPERM;
+
+ if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF ||
+ vsi->type == ICE_VSI_CHNL))
+ return -EPERM;
/* FD filters from guaranteed pool per VSI */
g_val = pf->hw.func_caps.fd_fltr_guar;
@@ -585,19 +591,56 @@ static int ice_alloc_fd_res(struct ice_vsi *vsi)
if (!b_val)
return -EPERM;
- if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF))
- return -EPERM;
+ /* PF main VSI gets only 64 FD resources from guaranteed pool
+ * when ADQ is configured.
+ */
+#define ICE_PF_VSI_GFLTR 64
- if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
- return -EPERM;
+ /* determine FD filter resources per VSI from shared(best effort) and
+ * dedicated pool
+ */
+ if (vsi->type == ICE_VSI_PF) {
+ vsi->num_gfltr = g_val;
+ /* if MQPRIO is configured, main VSI doesn't get all FD
+ * resources from guaranteed pool. PF VSI gets 64 FD resources
+ */
+ if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
+ if (g_val < ICE_PF_VSI_GFLTR)
+ return -EPERM;
+ /* allow bare minimum entries for PF VSI */
+ vsi->num_gfltr = ICE_PF_VSI_GFLTR;
+ }
- vsi->num_gfltr = g_val / pf->num_alloc_vsi;
+ /* each VSI gets same "best_effort" quota */
+ vsi->num_bfltr = b_val;
+ } else if (vsi->type == ICE_VSI_VF) {
+ vsi->num_gfltr = 0;
- /* each VSI gets same "best_effort" quota */
- vsi->num_bfltr = b_val;
+ /* each VSI gets same "best_effort" quota */
+ vsi->num_bfltr = b_val;
+ } else {
+ struct ice_vsi *main_vsi;
+ int numtc;
- if (vsi->type == ICE_VSI_VF) {
- vsi->num_gfltr = 0;
+ main_vsi = ice_get_main_vsi(pf);
+ if (!main_vsi)
+ return -EPERM;
+
+ if (!main_vsi->all_numtc)
+ return -EINVAL;
+
+ /* figure out ADQ numtc */
+ numtc = main_vsi->all_numtc - ICE_CHNL_START_TC;
+
+ /* only one TC but still asking resources for channels,
+ * invalid config
+ */
+ if (numtc < ICE_CHNL_START_TC)
+ return -EPERM;
+
+ g_val -= ICE_PF_VSI_GFLTR;
+ /* channel VSIs gets equal share from guaranteed pool */
+ vsi->num_gfltr = g_val / numtc;
/* each VSI gets same "best_effort" quota */
vsi->num_bfltr = b_val;
@@ -709,15 +752,15 @@ bool ice_is_aux_ena(struct ice_pf *pf)
static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
+ int status;
if (ice_is_safe_mode(pf))
return;
status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
if (status)
- dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %s\n",
- vsi->vsi_num, ice_stat_str(status));
+ dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n",
+ vsi->vsi_num, status);
}
/**
@@ -942,7 +985,7 @@ static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
u16 dflt_q, report_q, val;
if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL &&
- vsi->type != ICE_VSI_VF)
+ vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL)
return;
val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
@@ -1545,8 +1588,8 @@ ice_vsi_cfg_rss_exit:
static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
+ int status;
dev = ice_pf_to_dev(pf);
if (ice_is_safe_mode(pf)) {
@@ -1557,8 +1600,8 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
if (status)
- dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %s\n",
- vsi->vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
+ vsi->vsi_num, status);
}
/**
@@ -1577,8 +1620,8 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
struct device *dev;
+ int status;
dev = ice_pf_to_dev(pf);
if (ice_is_safe_mode(pf)) {
@@ -1590,57 +1633,57 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for IPv6 with input set IPv6 src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for sctp4 with input set IP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for sctp6 with input set IPv6 src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
}
/**
@@ -1749,22 +1792,21 @@ ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action)
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
- int err = 0;
+ int err;
dev = ice_pf_to_dev(pf);
- status = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI);
- if (!status) {
+ err = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI);
+ if (!err) {
vsi->num_vlan--;
- } else if (status == ICE_ERR_DOES_NOT_EXIST) {
- dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %s\n",
- vid, vsi->vsi_num, ice_stat_str(status));
+ } else if (err == -ENOENT) {
+ dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, error: %d\n",
+ vid, vsi->vsi_num, err);
+ err = 0;
} else {
- dev_err(dev, "Error removing VLAN %d on vsi %i error: %s\n",
- vid, vsi->vsi_num, ice_stat_str(status));
- err = -EIO;
+ dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n",
+ vid, vsi->vsi_num, err);
}
return err;
@@ -2105,8 +2147,7 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
{
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt;
- enum ice_status status;
- int ret = 0;
+ int ret;
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
@@ -2124,12 +2165,10 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
- status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
+ ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %s\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -2148,8 +2187,7 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
{
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt;
- enum ice_status status;
- int ret = 0;
+ int ret;
/* do not allow modifying VLAN stripping when a port VLAN is configured
* on this VSI
@@ -2177,12 +2215,10 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
- status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %s aq_err %s\n",
- ena, ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
+ ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %s\n",
+ ena, ret, ice_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -2324,10 +2360,9 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
if (status) {
- netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %s, aq_err = %s\n",
+ netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %s\n",
ena ? "En" : "Dis", vsi->idx, vsi->vsi_num,
- ice_stat_str(status),
- ice_aq_str(pf->hw.adminq.sq_last_status));
+ status, ice_aq_str(pf->hw.adminq.sq_last_status));
goto err_out;
}
@@ -2405,11 +2440,11 @@ clear_reg_idx:
*/
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
{
- enum ice_status (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
- enum ice_sw_fwd_act_type act);
+ int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
+ enum ice_sw_fwd_act_type act);
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
+ int status;
dev = ice_pf_to_dev(pf);
eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth;
@@ -2428,9 +2463,9 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
}
if (status)
- dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n",
+ dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
create ? "adding" : "removing", tx ? "TX" : "RX",
- vsi->vsi_num, ice_stat_str(status));
+ vsi->vsi_num, status);
}
/**
@@ -2450,7 +2485,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
struct ice_port_info *port_info;
struct ice_pf *pf = vsi->back;
u32 agg_node_id_start = 0;
- enum ice_status status;
+ int status;
/* create (as needed) scheduler aggregator node and move VSI into
* corresponding aggregator node
@@ -2576,7 +2611,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = ice_pf_to_dev(pf);
- enum ice_status status;
struct ice_vsi *vsi;
int ret, i;
@@ -2725,11 +2759,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
}
dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
- max_txqs);
- if (status) {
- dev_err(dev, "VSI %d failed lan queue config, error %s\n",
- vsi->vsi_num, ice_stat_str(status));
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+ if (ret) {
+ dev_err(dev, "VSI %d failed lan queue config, error %d\n",
+ vsi->vsi_num, ret);
goto unroll_clear_rings;
}
@@ -3036,8 +3070,8 @@ void ice_napi_del(struct ice_vsi *vsi)
*/
int ice_vsi_release(struct ice_vsi *vsi)
{
- enum ice_status err;
struct ice_pf *pf;
+ int err;
if (!vsi->back)
return -ENODEV;
@@ -3273,7 +3307,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
int prev_num_q_vectors = 0;
struct ice_vf *vf = NULL;
enum ice_vsi_type vtype;
- enum ice_status status;
struct ice_pf *pf;
int ret, i;
@@ -3421,14 +3454,14 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
/* If MQPRIO is set, means channel code path, hence for main
* VSI's, use TC as 1
*/
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
else
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
- vsi->tc_cfg.ena_tc, max_txqs);
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
+ vsi->tc_cfg.ena_tc, max_txqs);
- if (status) {
- dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %s\n",
- vsi->vsi_num, ice_stat_str(status));
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n",
+ vsi->vsi_num, ret);
if (init_vsi) {
ret = -EIO;
goto err_vectors;
@@ -3663,7 +3696,6 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
struct ice_vsi_ctx *ctx;
- enum ice_status status;
struct device *dev;
int i, ret = 0;
u8 num_tc = 0;
@@ -3705,25 +3737,22 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
/* must to indicate which section of VSI context are being modified */
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
- status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
- if (status) {
+ ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
+ if (ret) {
dev_info(dev, "Failed VSI Update\n");
- ret = -EIO;
goto out;
}
if (vsi->type == ICE_VSI_PF &&
test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1,
- max_txqs);
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
else
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
- vsi->tc_cfg.ena_tc, max_txqs);
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
+ vsi->tc_cfg.ena_tc, max_txqs);
- if (status) {
- dev_err(dev, "VSI %d failed TC config, error %s\n",
- vsi->vsi_num, ice_stat_str(status));
- ret = -EIO;
+ if (ret) {
+ dev_err(dev, "VSI %d failed TC config, error %d\n",
+ vsi->vsi_num, ret);
goto out;
}
ice_vsi_update_q_map(vsi, ctx);
@@ -3776,39 +3805,6 @@ void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
}
/**
- * ice_status_to_errno - convert from enum ice_status to Linux errno
- * @err: ice_status value to convert
- */
-int ice_status_to_errno(enum ice_status err)
-{
- switch (err) {
- case ICE_SUCCESS:
- return 0;
- case ICE_ERR_DOES_NOT_EXIST:
- return -ENOENT;
- case ICE_ERR_OUT_OF_RANGE:
- case ICE_ERR_AQ_ERROR:
- case ICE_ERR_AQ_TIMEOUT:
- case ICE_ERR_AQ_EMPTY:
- case ICE_ERR_AQ_FW_CRITICAL:
- return -EIO;
- case ICE_ERR_PARAM:
- case ICE_ERR_INVAL_SIZE:
- return -EINVAL;
- case ICE_ERR_NO_MEMORY:
- return -ENOMEM;
- case ICE_ERR_MAX_LIMIT:
- return -EAGAIN;
- case ICE_ERR_RESET_ONGOING:
- return -EBUSY;
- case ICE_ERR_AQ_FULL:
- return -ENOSPC;
- default:
- return -EINVAL;
- }
-}
-
-/**
* ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
* @sw: switch to check if its default forwarding VSI is free
*
@@ -3849,8 +3845,8 @@ bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
*/
int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
{
- enum ice_status status;
struct device *dev;
+ int status;
if (!sw || !vsi)
return -EINVAL;
@@ -3873,9 +3869,9 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
if (status) {
- dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %s\n",
- vsi->vsi_num, ice_stat_str(status));
- return -EIO;
+ dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
+ vsi->vsi_num, status);
+ return status;
}
sw->dflt_vsi = vsi;
@@ -3895,8 +3891,8 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
int ice_clear_dflt_vsi(struct ice_sw *sw)
{
struct ice_vsi *dflt_vsi;
- enum ice_status status;
struct device *dev;
+ int status;
if (!sw)
return -EINVAL;
@@ -3912,8 +3908,8 @@ int ice_clear_dflt_vsi(struct ice_sw *sw)
status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
ICE_FLTR_RX);
if (status) {
- dev_err(dev, "Failed to clear the default forwarding VSI %d, error %s\n",
- dflt_vsi->vsi_num, ice_stat_str(status));
+ dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
+ dflt_vsi->vsi_num, status);
return -EIO;
}
@@ -3987,8 +3983,8 @@ int ice_get_link_speed_kbps(struct ice_vsi *vsi)
int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
+ int status;
int speed;
dev = ice_pf_to_dev(pf);
@@ -4014,7 +4010,7 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n",
min_tx_rate, ice_vsi_type_str(vsi->type),
vsi->idx);
- return -EIO;
+ return status;
}
dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n",
@@ -4026,7 +4022,7 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
if (status) {
dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n",
ice_vsi_type_str(vsi->type), vsi->idx);
- return -EIO;
+ return status;
}
dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n",
@@ -4048,8 +4044,8 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
+ int status;
int speed;
dev = ice_pf_to_dev(pf);
@@ -4075,7 +4071,7 @@ int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n",
max_tx_rate, ice_vsi_type_str(vsi->type),
vsi->idx);
- return -EIO;
+ return status;
}
dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n",
@@ -4087,7 +4083,7 @@ int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
if (status) {
dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n",
ice_vsi_type_str(vsi->type), vsi->idx);
- return -EIO;
+ return status;
}
dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n",
@@ -4107,7 +4103,7 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
struct device *dev = ice_pf_to_dev(vsi->back);
struct ice_port_info *pi = vsi->port_info;
struct ice_hw *hw = pi->hw;
- enum ice_status status;
+ int status;
if (vsi->type != ICE_VSI_PF)
return -EINVAL;
@@ -4119,16 +4115,16 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
* a success code. Return an error if FW returns an error code other
* than ICE_AQ_RC_EMODE
*/
- if (status == ICE_ERR_AQ_ERROR) {
+ if (status == -EIO) {
if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
- dev_warn(dev, "can't set link to %s, err %s aq_err %s. not fatal, continuing\n",
- (ena ? "ON" : "OFF"), ice_stat_str(status),
+ dev_warn(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
+ (ena ? "ON" : "OFF"), status,
ice_aq_str(hw->adminq.sq_last_status));
} else if (status) {
- dev_err(dev, "can't set link to %s, err %s aq_err %s\n",
- (ena ? "ON" : "OFF"), ice_stat_str(status),
+ dev_err(dev, "can't set link to %s, err %d aq_err %s\n",
+ (ena ? "ON" : "OFF"), status,
ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
+ return status;
}
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 6c803407b67d..b2ed189527d6 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -103,15 +103,11 @@ void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes);
void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes);
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
-
-int ice_status_to_errno(enum ice_status err);
-
void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl);
void ice_write_itr(struct ice_ring_container *rc, u16 itr);
void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
-enum ice_status
-ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
+int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
bool ice_is_safe_mode(struct ice_pf *pf);
bool ice_is_aux_ena(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 73c61cdb036f..30814435f779 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -155,7 +155,6 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
*/
static int ice_init_mac_fltr(struct ice_pf *pf)
{
- enum ice_status status;
struct ice_vsi *vsi;
u8 *perm_addr;
@@ -164,11 +163,7 @@ static int ice_init_mac_fltr(struct ice_pf *pf)
return -EINVAL;
perm_addr = vsi->port_info->mac.perm_addr;
- status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
- if (status)
- return -EIO;
-
- return 0;
+ return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
}
/**
@@ -237,36 +232,43 @@ static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
}
/**
- * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
+ * ice_set_promisc - Enable promiscuous mode for a given PF
* @vsi: the VSI being configured
* @promisc_m: mask of promiscuous config bits
- * @set_promisc: enable or disable promisc flag request
*
*/
-static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
+static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
{
- struct ice_hw *hw = &vsi->back->hw;
- enum ice_status status = 0;
+ int status;
if (vsi->type != ICE_VSI_PF)
return 0;
- if (vsi->num_vlan > 1) {
- status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
- set_promisc);
- } else {
- if (set_promisc)
- status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
- 0);
- else
- status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
- 0);
- }
+ if (vsi->num_vlan > 1)
+ status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
+ else
+ status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
+ return status;
+}
- if (status)
- return -EIO;
+/**
+ * ice_clear_promisc - Disable promiscuous mode for a given PF
+ * @vsi: the VSI being configured
+ * @promisc_m: mask of promiscuous config bits
+ *
+ */
+static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
+{
+ int status;
- return 0;
+ if (vsi->type != ICE_VSI_PF)
+ return 0;
+
+ if (vsi->num_vlan > 1)
+ status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
+ else
+ status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
+ return status;
}
/**
@@ -282,10 +284,9 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
bool promisc_forced_on = false;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status = 0;
u32 changed_flags = 0;
u8 promisc_m;
- int err = 0;
+ int err;
if (!vsi->netdev)
return -EINVAL;
@@ -315,25 +316,23 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
/* Remove MAC addresses in the unsync list */
- status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
+ err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
- if (status) {
+ if (err) {
netdev_err(netdev, "Failed to delete MAC filters\n");
/* if we failed because of alloc failures, just bail */
- if (status == ICE_ERR_NO_MEMORY) {
- err = -ENOMEM;
+ if (err == -ENOMEM)
goto out;
- }
}
/* Add MAC addresses in the sync list */
- status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
+ err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
ice_fltr_free_list(dev, &vsi->tmp_sync_list);
/* If filter is added successfully or already exists, do not go into
* 'if' condition and report it as error. Instead continue processing
* rest of the function.
*/
- if (status && status != ICE_ERR_ALREADY_EXISTS) {
+ if (err && err != -EEXIST) {
netdev_err(netdev, "Failed to add MAC filters\n");
/* If there is no more space for new umac filters, VSI
* should go into promiscuous mode. There should be some
@@ -346,10 +345,10 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
vsi->vsi_num);
} else {
- err = -EIO;
goto out;
}
}
+ err = 0;
/* check for changes in promiscuous modes */
if (changed_flags & IFF_ALLMULTI) {
if (vsi->current_netdev_flags & IFF_ALLMULTI) {
@@ -358,7 +357,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
else
promisc_m = ICE_MCAST_PROMISC_BITS;
- err = ice_cfg_promisc(vsi, promisc_m, true);
+ err = ice_set_promisc(vsi, promisc_m);
if (err) {
netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
vsi->vsi_num);
@@ -372,7 +371,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
else
promisc_m = ICE_MCAST_PROMISC_BITS;
- err = ice_cfg_promisc(vsi, promisc_m, false);
+ err = ice_clear_promisc(vsi, promisc_m);
if (err) {
netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
vsi->vsi_num);
@@ -396,6 +395,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
~IFF_PROMISC;
goto out_promisc;
}
+ err = 0;
ice_cfg_vlan_pruning(vsi, false);
}
} else {
@@ -472,6 +472,25 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
}
/**
+ * ice_clear_sw_switch_recipes - clear switch recipes
+ * @pf: board private structure
+ *
+ * Mark switch recipes as not created in sw structures. There are cases where
+ * rules (especially advanced rules) need to be restored, either re-read from
+ * hardware or added again. For example after the reset. 'recp_created' flag
+ * prevents from doing that and need to be cleared upfront.
+ */
+static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
+{
+ struct ice_sw_recipe *recp;
+ u8 i;
+
+ recp = pf->hw.switch_info->recp_list;
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
+ recp[i].recp_created = false;
+}
+
+/**
* ice_prepare_for_reset - prep for reset
* @pf: board private structure
* @reset_type: reset type requested
@@ -501,6 +520,11 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_for_each_vf(pf, i)
ice_set_vf_state_qs_dis(&pf->vf[i]);
+ if (ice_is_eswitch_mode_switchdev(pf)) {
+ if (reset_type != ICE_RESET_PFR)
+ ice_clear_sw_switch_recipes(pf);
+ }
+
/* release ADQ specific HW and SW resources */
vsi = ice_get_main_vsi(pf);
if (!vsi)
@@ -539,7 +563,7 @@ skip:
ice_pf_dis_all_vsi(pf, false);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_release(pf);
+ ice_ptp_prepare_for_reset(pf);
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
@@ -695,12 +719,12 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
{
struct ice_aqc_get_phy_caps_data *caps;
const char *an_advertised;
- enum ice_status status;
const char *fec_req;
const char *speed;
const char *fec;
const char *fc;
const char *an;
+ int status;
if (!vsi)
return;
@@ -1020,10 +1044,10 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_phy_info *phy_info;
- enum ice_status status;
struct ice_vsi *vsi;
u16 old_link_speed;
bool old_link;
+ int status;
phy_info = &pi->phy;
phy_info->link_info_old = phy_info->link_info;
@@ -1036,8 +1060,8 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
*/
status = ice_update_link_info(pi);
if (status)
- dev_dbg(dev, "Failed to update link status on port %d, err %s aq_err %s\n",
- pi->lport, ice_stat_str(status),
+ dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
+ pi->lport, status,
ice_aq_str(pi->hw->adminq.sq_last_status));
ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
@@ -1063,6 +1087,9 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (link_up == old_link && link_speed == old_link_speed)
return 0;
+ if (!ice_is_e810(&pf->hw))
+ ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
+
if (ice_is_dcb_active(pf)) {
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
ice_dcb_rebuild(pf);
@@ -1407,15 +1434,15 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
return 0;
do {
- enum ice_status ret;
u16 opcode;
+ int ret;
ret = ice_clean_rq_elem(hw, cq, &event, &pending);
- if (ret == ICE_ERR_AQ_NO_WORK)
+ if (ret == -EALREADY)
break;
if (ret) {
- dev_err(dev, "%s Receive Queue event error %s\n", qtype,
- ice_stat_str(ret));
+ dev_err(dev, "%s Receive Queue event error %d\n", qtype,
+ ret);
break;
}
@@ -1866,19 +1893,17 @@ static int ice_init_nvm_phy_type(struct ice_port_info *pi)
{
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_pf *pf = pi->hw->back;
- enum ice_status status;
- int err = 0;
+ int err;
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps,
- NULL);
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
+ pcaps, NULL);
- if (status) {
+ if (err) {
dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
- err = -EIO;
goto out;
}
@@ -1977,8 +2002,7 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi)
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_phy_info *phy = &pi->phy;
struct ice_pf *pf = pi->hw->back;
- enum ice_status status;
- int err = 0;
+ int err;
if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
return -EIO;
@@ -1988,14 +2012,13 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi)
return -ENOMEM;
if (ice_fw_supports_report_dflt_cfg(pi->hw))
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
- pcaps, NULL);
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
+ pcaps, NULL);
else
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
- pcaps, NULL);
- if (status) {
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
+ pcaps, NULL);
+ if (err) {
dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
- err = -EIO;
goto err_out;
}
@@ -2049,8 +2072,7 @@ static int ice_configure_phy(struct ice_vsi *vsi)
struct ice_aqc_set_phy_cfg_data *cfg;
struct ice_phy_info *phy = &pi->phy;
struct ice_pf *pf = vsi->back;
- enum ice_status status;
- int err = 0;
+ int err;
/* Ensure we have media as we cannot configure a medialess port */
if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
@@ -2070,12 +2092,11 @@ static int ice_configure_phy(struct ice_vsi *vsi)
return -ENOMEM;
/* Get current PHY config */
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
- NULL);
- if (status) {
- dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
- vsi->vsi_num, ice_stat_str(status));
- err = -EIO;
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
+ NULL);
+ if (err) {
+ dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
+ vsi->vsi_num, err);
goto done;
}
@@ -2089,15 +2110,14 @@ static int ice_configure_phy(struct ice_vsi *vsi)
/* Use PHY topology as baseline for configuration */
memset(pcaps, 0, sizeof(*pcaps));
if (ice_fw_supports_report_dflt_cfg(pi->hw))
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
- pcaps, NULL);
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
+ pcaps, NULL);
else
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
- pcaps, NULL);
- if (status) {
- dev_err(dev, "Failed to get PHY caps, VSI %d error %s\n",
- vsi->vsi_num, ice_stat_str(status));
- err = -EIO;
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
+ pcaps, NULL);
+ if (err) {
+ dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
+ vsi->vsi_num, err);
goto done;
}
@@ -2150,12 +2170,10 @@ static int ice_configure_phy(struct ice_vsi *vsi)
/* Enable link and link update */
cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
- status = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
- if (status) {
- dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
- vsi->vsi_num, ice_stat_str(status));
- err = -EIO;
- }
+ err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
+ if (err)
+ dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
+ vsi->vsi_num, err);
kfree(cfg);
done:
@@ -2549,9 +2567,9 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
.vsi_map_offset = vsi->alloc_txq,
.mapping_mode = ICE_VSI_MAP_CONTIG
};
- enum ice_status status;
struct device *dev;
int i, v_idx;
+ int status;
dev = ice_pf_to_dev(pf);
vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
@@ -2605,8 +2623,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
- dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
- ice_stat_str(status));
+ dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
+ status);
goto clear_xdp_rings;
}
@@ -3520,7 +3538,7 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi;
- int status = 0;
+ int status;
if (ice_is_reset_in_progress(pf->state))
return -EBUSY;
@@ -3533,10 +3551,8 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
INIT_LIST_HEAD(&vsi->ch_list);
status = ice_cfg_netdev(vsi);
- if (status) {
- status = -ENODEV;
+ if (status)
goto unroll_vsi_setup;
- }
/* netdev has to be configured before setting frame size */
ice_vsi_cfg_frame_size(vsi);
@@ -3560,14 +3576,13 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
if (status) {
dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n",
vsi->vsi_num, status);
- status = -EINVAL;
goto unroll_napi_add;
}
status = ice_init_mac_fltr(pf);
if (status)
goto free_cpu_rx_map;
- return status;
+ return 0;
free_cpu_rx_map:
ice_free_cpu_rx_rmap(vsi);
@@ -4023,8 +4038,8 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
{
struct ice_vsi *vsi = ice_get_main_vsi(pf);
struct ice_vsi_ctx *ctxt;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!vsi)
return;
@@ -4054,9 +4069,8 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
} else {
vsi->info.sec_flags = ctxt->info.sec_flags;
vsi->info.sw_flags2 = ctxt->info.sw_flags2;
@@ -4069,108 +4083,80 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
/**
* ice_log_pkg_init - log result of DDP package load
* @hw: pointer to hardware info
- * @status: status of package load
+ * @state: state of package load
*/
-static void
-ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
+static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
{
- struct ice_pf *pf = (struct ice_pf *)hw->back;
- struct device *dev = ice_pf_to_dev(pf);
+ struct ice_pf *pf = hw->back;
+ struct device *dev;
- switch (*status) {
- case ICE_SUCCESS:
- /* The package download AdminQ command returned success because
- * this download succeeded or ICE_ERR_AQ_NO_WORK since there is
- * already a package loaded on the device.
- */
- if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
- hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
- hw->pkg_ver.update == hw->active_pkg_ver.update &&
- hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
- !memcmp(hw->pkg_name, hw->active_pkg_name,
- sizeof(hw->pkg_name))) {
- if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
- dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
- hw->active_pkg_name,
- hw->active_pkg_ver.major,
- hw->active_pkg_ver.minor,
- hw->active_pkg_ver.update,
- hw->active_pkg_ver.draft);
- else
- dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
- hw->active_pkg_name,
- hw->active_pkg_ver.major,
- hw->active_pkg_ver.minor,
- hw->active_pkg_ver.update,
- hw->active_pkg_ver.draft);
- } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
- hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
- dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
- hw->active_pkg_name,
- hw->active_pkg_ver.major,
- hw->active_pkg_ver.minor,
- ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
- *status = ICE_ERR_NOT_SUPPORTED;
- } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
- hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
- dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
- hw->active_pkg_name,
- hw->active_pkg_ver.major,
- hw->active_pkg_ver.minor,
- hw->active_pkg_ver.update,
- hw->active_pkg_ver.draft,
- hw->pkg_name,
- hw->pkg_ver.major,
- hw->pkg_ver.minor,
- hw->pkg_ver.update,
- hw->pkg_ver.draft);
- } else {
- dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n");
- *status = ICE_ERR_NOT_SUPPORTED;
- }
+ dev = ice_pf_to_dev(pf);
+
+ switch (state) {
+ case ICE_DDP_PKG_SUCCESS:
+ dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
+ hw->active_pkg_name,
+ hw->active_pkg_ver.major,
+ hw->active_pkg_ver.minor,
+ hw->active_pkg_ver.update,
+ hw->active_pkg_ver.draft);
+ break;
+ case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
+ dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
+ hw->active_pkg_name,
+ hw->active_pkg_ver.major,
+ hw->active_pkg_ver.minor,
+ hw->active_pkg_ver.update,
+ hw->active_pkg_ver.draft);
+ break;
+ case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
+ dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
+ hw->active_pkg_name,
+ hw->active_pkg_ver.major,
+ hw->active_pkg_ver.minor,
+ ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
break;
- case ICE_ERR_FW_DDP_MISMATCH:
+ case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
+ dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
+ hw->active_pkg_name,
+ hw->active_pkg_ver.major,
+ hw->active_pkg_ver.minor,
+ hw->active_pkg_ver.update,
+ hw->active_pkg_ver.draft,
+ hw->pkg_name,
+ hw->pkg_ver.major,
+ hw->pkg_ver.minor,
+ hw->pkg_ver.update,
+ hw->pkg_ver.draft);
+ break;
+ case ICE_DDP_PKG_FW_MISMATCH:
dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
break;
- case ICE_ERR_BUF_TOO_SHORT:
- case ICE_ERR_CFG:
+ case ICE_DDP_PKG_INVALID_FILE:
dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
break;
- case ICE_ERR_NOT_SUPPORTED:
- /* Package File version not supported */
- if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
- (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
- hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
- dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
- else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
- (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
- hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
- dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
- ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
+ case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
+ dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
break;
- case ICE_ERR_AQ_ERROR:
- switch (hw->pkg_dwnld_status) {
- case ICE_AQ_RC_ENOSEC:
- case ICE_AQ_RC_EBADSIG:
- dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
- return;
- case ICE_AQ_RC_ESVN:
- dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
- return;
- case ICE_AQ_RC_EBADMAN:
- case ICE_AQ_RC_EBADBUF:
- dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
- /* poll for reset to complete */
- if (ice_check_reset(hw))
- dev_err(dev, "Error resetting device. Please reload the driver\n");
- return;
- default:
- break;
- }
- fallthrough;
+ case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
+ dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
+ ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
+ break;
+ case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
+ dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
+ break;
+ case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
+ dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
+ break;
+ case ICE_DDP_PKG_LOAD_ERROR:
+ dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
+ /* poll for reset to complete */
+ if (ice_check_reset(hw))
+ dev_err(dev, "Error resetting device. Please reload the driver\n");
+ break;
+ case ICE_DDP_PKG_ERR:
default:
- dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n",
- *status);
+ dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n");
break;
}
}
@@ -4186,24 +4172,24 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
static void
ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
{
- enum ice_status status = ICE_ERR_PARAM;
+ enum ice_ddp_state state = ICE_DDP_PKG_ERR;
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
/* Load DDP Package */
if (firmware && !hw->pkg_copy) {
- status = ice_copy_and_init_pkg(hw, firmware->data,
- firmware->size);
- ice_log_pkg_init(hw, &status);
+ state = ice_copy_and_init_pkg(hw, firmware->data,
+ firmware->size);
+ ice_log_pkg_init(hw, state);
} else if (!firmware && hw->pkg_copy) {
/* Reload package during rebuild after CORER/GLOBR reset */
- status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
- ice_log_pkg_init(hw, &status);
+ state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
+ ice_log_pkg_init(hw, state);
} else {
dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
}
- if (status) {
+ if (!ice_is_init_pkg_successful(state)) {
/* Safe Mode */
clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
return;
@@ -4234,9 +4220,9 @@ static void ice_verify_cacheline_size(struct ice_pf *pf)
* ice_send_version - update firmware with driver version
* @pf: PF struct
*
- * Returns ICE_SUCCESS on success, else error code
+ * Returns 0 on success, else error code
*/
-static enum ice_status ice_send_version(struct ice_pf *pf)
+static int ice_send_version(struct ice_pf *pf)
{
struct ice_driver_ver dv;
@@ -4526,7 +4512,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
* true
*/
if (ice_is_safe_mode(pf)) {
- dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
/* we already got function/device capabilities but these don't
* reflect what the driver needs to do in safe mode. Instead of
* adding conditional logic everywhere to ignore these
@@ -4721,6 +4706,10 @@ probe_done:
if (err)
goto err_netdev_reg;
+ err = ice_devlink_register_params(pf);
+ if (err)
+ goto err_netdev_reg;
+
/* ready to go, so clear down state bit */
clear_bit(ICE_DOWN, pf->state);
if (ice_is_aux_ena(pf)) {
@@ -4728,7 +4717,7 @@ probe_done:
if (pf->aux_idx < 0) {
dev_err(dev, "Failed to allocate device ID for AUX driver\n");
err = -ENOMEM;
- goto err_netdev_reg;
+ goto err_devlink_reg_param;
}
err = ice_init_rdma(pf);
@@ -4747,6 +4736,8 @@ probe_done:
err_init_aux_unroll:
pf->adev = NULL;
ida_free(&ice_aux_ida, pf->aux_idx);
+err_devlink_reg_param:
+ ice_devlink_unregister_params(pf);
err_netdev_reg:
err_send_version_unroll:
ice_vsi_release_all(pf);
@@ -4803,9 +4794,9 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u8 mac_addr[ETH_ALEN];
struct ice_vsi *vsi;
+ int status;
u8 flags;
if (!pf->wol_ena)
@@ -4827,9 +4818,8 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf)
status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
if (status)
- dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
}
/**
@@ -4861,6 +4851,7 @@ static void ice_remove(struct pci_dev *pdev)
ice_unplug_aux_dev(pf);
if (pf->aux_idx >= 0)
ida_free(&ice_aux_ida, pf->aux_idx);
+ ice_devlink_unregister_params(pf);
set_bit(ICE_DOWN, pf->state);
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
@@ -5367,11 +5358,10 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
struct sockaddr *addr = pi;
- enum ice_status status;
u8 old_mac[ETH_ALEN];
u8 flags = 0;
- int err = 0;
u8 *mac;
+ int err;
mac = (u8 *)addr->sa_data;
@@ -5403,22 +5393,22 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
netif_addr_unlock_bh(netdev);
/* Clean up old MAC filter. Not an error if old filter doesn't exist */
- status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
- if (status && status != ICE_ERR_DOES_NOT_EXIST) {
+ err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
+ if (err && err != -ENOENT) {
err = -EADDRNOTAVAIL;
goto err_update_filters;
}
/* Add filter for new MAC. If filter exists, return success */
- status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
- if (status == ICE_ERR_ALREADY_EXISTS)
+ err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
+ if (err == -EEXIST)
/* Although this MAC filter is already present in hardware it's
* possible in some cases (e.g. bonding) that dev_addr was
* modified outside of the driver and needs to be restored back
* to this value.
*/
netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
- else if (status)
+ else if (err)
/* error if the new filter addition failed */
err = -EADDRNOTAVAIL;
@@ -5437,10 +5427,10 @@ err_update_filters:
/* write new MAC address to the firmware */
flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
- status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
- if (status) {
- netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
- mac, ice_stat_str(status));
+ err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
+ if (err) {
+ netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
+ mac, err);
}
return 0;
}
@@ -5482,8 +5472,8 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- enum ice_status status;
u16 q_handle;
+ int status;
u8 tc;
/* Validate maxrate requested is within permitted range */
@@ -5503,13 +5493,11 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
else
status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
q_handle, ICE_MAX_BW, maxrate * 1000);
- if (status) {
- netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
- ice_stat_str(status));
- return -EIO;
- }
+ if (status)
+ netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
+ status);
- return 0;
+ return status;
}
/**
@@ -5879,6 +5867,8 @@ static int ice_up_complete(struct ice_vsi *vsi)
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
+ if (!ice_is_e810(&pf->hw))
+ ice_ptp_link_change(pf, pf->hw.pf_id, true);
}
/* clear this now, and the first stats read will be used as baseline */
@@ -6269,15 +6259,18 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
/**
* ice_down - Shutdown the connection
* @vsi: The VSI being stopped
+ *
+ * Caller of this function is expected to set the vsi->state ICE_DOWN bit
*/
int ice_down(struct ice_vsi *vsi)
{
int i, tx_err, rx_err, link_err = 0;
- /* Caller of this function is expected to set the
- * vsi->state ICE_DOWN bit
- */
+ WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
+
if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ if (!ice_is_e810(&vsi->back->hw))
+ ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
netif_carrier_off(vsi->netdev);
netif_tx_disable(vsi->netdev);
} else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
@@ -6543,7 +6536,6 @@ static void ice_vsi_release_all(struct ice_pf *pf)
static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
{
struct device *dev = ice_pf_to_dev(pf);
- enum ice_status status;
int i, err;
ice_for_each_vsi(pf, i) {
@@ -6561,12 +6553,11 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
}
/* replay filters for the VSI */
- status = ice_replay_vsi(&pf->hw, vsi->idx);
- if (status) {
- dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
- ice_stat_str(status), vsi->idx,
- ice_vsi_type_str(type));
- return -EIO;
+ err = ice_replay_vsi(&pf->hw, vsi->idx);
+ if (err) {
+ dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
+ err, vsi->idx, ice_vsi_type_str(type));
+ return err;
}
/* Re-map HW VSI number, using VSI handle that has been
@@ -6629,7 +6620,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- enum ice_status ret;
int err;
if (test_bit(ICE_DOWN, pf->state))
@@ -6637,10 +6627,17 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
- ret = ice_init_all_ctrlq(hw);
- if (ret) {
- dev_err(dev, "control queues init failed %s\n",
- ice_stat_str(ret));
+ if (reset_type == ICE_RESET_EMPR) {
+ /* If an EMP reset has occurred, any previously pending flash
+ * update will have completed. We no longer know whether or
+ * not the NVM update EMP reset is restricted.
+ */
+ pf->fw_emp_reset_disabled = false;
+ }
+
+ err = ice_init_all_ctrlq(hw);
+ if (err) {
+ dev_err(dev, "control queues init failed %d\n", err);
goto err_init_ctrlq;
}
@@ -6654,10 +6651,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_load_pkg(NULL, pf);
}
- ret = ice_clear_pf_cfg(hw);
- if (ret) {
- dev_err(dev, "clear PF configuration failed %s\n",
- ice_stat_str(ret));
+ err = ice_clear_pf_cfg(hw);
+ if (err) {
+ dev_err(dev, "clear PF configuration failed %d\n", err);
goto err_init_ctrlq;
}
@@ -6669,21 +6665,21 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_clear_pxe_mode(hw);
- ret = ice_init_nvm(hw);
- if (ret) {
- dev_err(dev, "ice_init_nvm failed %s\n", ice_stat_str(ret));
+ err = ice_init_nvm(hw);
+ if (err) {
+ dev_err(dev, "ice_init_nvm failed %d\n", err);
goto err_init_ctrlq;
}
- ret = ice_get_caps(hw);
- if (ret) {
- dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
+ err = ice_get_caps(hw);
+ if (err) {
+ dev_err(dev, "ice_get_caps failed %d\n", err);
goto err_init_ctrlq;
}
- ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
- if (ret) {
- dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret));
+ err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
+ if (err) {
+ dev_err(dev, "set_mac_cfg failed %d\n", err);
goto err_init_ctrlq;
}
@@ -6721,7 +6717,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
* fail.
*/
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_init(pf);
+ ice_ptp_reset(pf);
/* rebuild PF VSI */
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
@@ -6730,6 +6726,10 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
+ /* configure PTP timestamping after VSI rebuild */
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ ice_ptp_cfg_timestamp(pf, false);
+
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
if (err) {
dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
@@ -6766,10 +6766,10 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_update_pf_netdev_link(pf);
/* tell the firmware we are up */
- ret = ice_send_version(pf);
- if (ret) {
- dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
- ice_stat_str(ret));
+ err = ice_send_version(pf);
+ if (err) {
+ dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
+ err);
goto err_vsi_rebuild;
}
@@ -6950,78 +6950,6 @@ const char *ice_aq_str(enum ice_aq_err aq_err)
}
/**
- * ice_stat_str - convert status err code to a string
- * @stat_err: the status error code to convert
- */
-const char *ice_stat_str(enum ice_status stat_err)
-{
- switch (stat_err) {
- case ICE_SUCCESS:
- return "OK";
- case ICE_ERR_PARAM:
- return "ICE_ERR_PARAM";
- case ICE_ERR_NOT_IMPL:
- return "ICE_ERR_NOT_IMPL";
- case ICE_ERR_NOT_READY:
- return "ICE_ERR_NOT_READY";
- case ICE_ERR_NOT_SUPPORTED:
- return "ICE_ERR_NOT_SUPPORTED";
- case ICE_ERR_BAD_PTR:
- return "ICE_ERR_BAD_PTR";
- case ICE_ERR_INVAL_SIZE:
- return "ICE_ERR_INVAL_SIZE";
- case ICE_ERR_DEVICE_NOT_SUPPORTED:
- return "ICE_ERR_DEVICE_NOT_SUPPORTED";
- case ICE_ERR_RESET_FAILED:
- return "ICE_ERR_RESET_FAILED";
- case ICE_ERR_FW_API_VER:
- return "ICE_ERR_FW_API_VER";
- case ICE_ERR_NO_MEMORY:
- return "ICE_ERR_NO_MEMORY";
- case ICE_ERR_CFG:
- return "ICE_ERR_CFG";
- case ICE_ERR_OUT_OF_RANGE:
- return "ICE_ERR_OUT_OF_RANGE";
- case ICE_ERR_ALREADY_EXISTS:
- return "ICE_ERR_ALREADY_EXISTS";
- case ICE_ERR_NVM:
- return "ICE_ERR_NVM";
- case ICE_ERR_NVM_CHECKSUM:
- return "ICE_ERR_NVM_CHECKSUM";
- case ICE_ERR_BUF_TOO_SHORT:
- return "ICE_ERR_BUF_TOO_SHORT";
- case ICE_ERR_NVM_BLANK_MODE:
- return "ICE_ERR_NVM_BLANK_MODE";
- case ICE_ERR_IN_USE:
- return "ICE_ERR_IN_USE";
- case ICE_ERR_MAX_LIMIT:
- return "ICE_ERR_MAX_LIMIT";
- case ICE_ERR_RESET_ONGOING:
- return "ICE_ERR_RESET_ONGOING";
- case ICE_ERR_HW_TABLE:
- return "ICE_ERR_HW_TABLE";
- case ICE_ERR_DOES_NOT_EXIST:
- return "ICE_ERR_DOES_NOT_EXIST";
- case ICE_ERR_FW_DDP_MISMATCH:
- return "ICE_ERR_FW_DDP_MISMATCH";
- case ICE_ERR_AQ_ERROR:
- return "ICE_ERR_AQ_ERROR";
- case ICE_ERR_AQ_TIMEOUT:
- return "ICE_ERR_AQ_TIMEOUT";
- case ICE_ERR_AQ_FULL:
- return "ICE_ERR_AQ_FULL";
- case ICE_ERR_AQ_NO_WORK:
- return "ICE_ERR_AQ_NO_WORK";
- case ICE_ERR_AQ_EMPTY:
- return "ICE_ERR_AQ_EMPTY";
- case ICE_ERR_AQ_FW_CRITICAL:
- return "ICE_ERR_AQ_FW_CRITICAL";
- }
-
- return "ICE_ERR_UNKNOWN";
-}
-
-/**
* ice_set_rss_lut - Set RSS LUT
* @vsi: Pointer to VSI structure
* @lut: Lookup table
@@ -7033,7 +6961,7 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
{
struct ice_aq_get_set_rss_lut_params params = {};
struct ice_hw *hw = &vsi->back->hw;
- enum ice_status status;
+ int status;
if (!lut)
return -EINVAL;
@@ -7044,14 +6972,11 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
params.lut = lut;
status = ice_aq_set_rss_lut(hw, &params);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
- }
+ if (status)
+ dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
- return 0;
+ return status;
}
/**
@@ -7064,20 +6989,17 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
{
struct ice_hw *hw = &vsi->back->hw;
- enum ice_status status;
+ int status;
if (!seed)
return -EINVAL;
status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
- }
+ if (status)
+ dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
- return 0;
+ return status;
}
/**
@@ -7092,7 +7014,7 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
{
struct ice_aq_get_set_rss_lut_params params = {};
struct ice_hw *hw = &vsi->back->hw;
- enum ice_status status;
+ int status;
if (!lut)
return -EINVAL;
@@ -7103,14 +7025,11 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
params.lut = lut;
status = ice_aq_get_rss_lut(hw, &params);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
- }
+ if (status)
+ dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
- return 0;
+ return status;
}
/**
@@ -7123,20 +7042,17 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
{
struct ice_hw *hw = &vsi->back->hw;
- enum ice_status status;
+ int status;
if (!seed)
return -EINVAL;
status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
- }
+ if (status)
+ dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
- return 0;
+ return status;
}
/**
@@ -7177,8 +7093,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
struct ice_aqc_vsi_props *vsi_props;
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt;
- enum ice_status status;
- int ret = 0;
+ int ret;
vsi_props = &vsi->info;
@@ -7196,12 +7111,10 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
- status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
- bmode, ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
+ ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
+ bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
goto out;
}
/* Update sw flags for book keeping */
@@ -7233,7 +7146,6 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
struct ice_pf *pf = np->vsi->back;
struct nlattr *attr, *br_spec;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
struct ice_sw *pf_sw;
int rem, v, err = 0;
@@ -7267,14 +7179,14 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
/* Update the unicast switch filter rules for the corresponding
* switch of the netdev
*/
- status = ice_update_sw_rule_bridge_mode(hw);
- if (status) {
- netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
- mode, ice_stat_str(status),
+ err = ice_update_sw_rule_bridge_mode(hw);
+ if (err) {
+ netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
+ mode, err,
ice_aq_str(hw->adminq.sq_last_status));
/* revert hw->evb_veb */
hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
- return -EIO;
+ return err;
}
pf_sw->bridge_mode = mode;
@@ -7552,6 +7464,67 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi,
}
/**
+ * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
+ * @pf: ptr to PF device
+ * @vsi: ptr to VSI
+ */
+static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ bool added = false;
+ struct ice_hw *hw;
+ int flow;
+
+ if (!(vsi->num_gfltr || vsi->num_bfltr))
+ return -EINVAL;
+
+ hw = &pf->hw;
+ for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
+ struct ice_fd_hw_prof *prof;
+ int tun, status;
+ u64 entry_h;
+
+ if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
+ hw->fdir_prof[flow]->cnt))
+ continue;
+
+ for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+ enum ice_flow_priority prio;
+ u64 prof_id;
+
+ /* add this VSI to FDir profile for this flow */
+ prio = ICE_FLOW_PRIO_NORMAL;
+ prof = hw->fdir_prof[flow];
+ prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
+ status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
+ prof->vsi_h[0], vsi->idx,
+ prio, prof->fdir_seg[tun],
+ &entry_h);
+ if (status) {
+ dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
+ vsi->idx, flow);
+ continue;
+ }
+
+ prof->entry_h[prof->cnt][tun] = entry_h;
+ }
+
+ /* store VSI for filter replay and delete */
+ prof->vsi_h[prof->cnt] = vsi->idx;
+ prof->cnt++;
+
+ added = true;
+ dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
+ flow);
+ }
+
+ if (!added)
+ dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
+
+ return 0;
+}
+
+/**
* ice_add_channel - add a channel by adding VSI
* @pf: ptr to PF device
* @sw_id: underlying HW switching element ID
@@ -7575,6 +7548,8 @@ static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
return -EINVAL;
}
+ ice_add_vsi_to_fdir(pf, vsi);
+
ch->sw_id = sw_id;
ch->vsi_num = vsi->vsi_num;
ch->info.mapping_flags = vsi->info.mapping_flags;
@@ -7875,6 +7850,15 @@ static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
if (rem_fltr)
ice_rem_all_chnl_fltrs(pf);
+ /* remove ntuple filters since queue configuration is being changed */
+ if (vsi->netdev->features & NETIF_F_NTUPLE) {
+ struct ice_hw *hw = &pf->hw;
+
+ mutex_lock(&hw->fdir_fltr_lock);
+ ice_fdir_del_all_fltrs(vsi);
+ mutex_unlock(&hw->fdir_fltr_lock);
+ }
+
/* perform cleanup for channels if they exist */
list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
struct ice_vsi *ch_vsi;
@@ -7905,6 +7889,9 @@ static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
}
}
+ /* Release FD resources for the channel VSI */
+ ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
+
/* clear the VSI from scheduler tree */
ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
@@ -8449,7 +8436,6 @@ int ice_open_internal(struct net_device *netdev)
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_port_info *pi;
- enum ice_status status;
int err;
if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
@@ -8460,11 +8446,10 @@ int ice_open_internal(struct net_device *netdev)
netif_carrier_off(netdev);
pi = vsi->port_info;
- status = ice_update_link_info(pi);
- if (status) {
- netdev_err(netdev, "Failed to get link info, error %s\n",
- ice_stat_str(status));
- return -EIO;
+ err = ice_update_link_info(pi);
+ if (err) {
+ netdev_err(netdev, "Failed to get link info, error %d\n", err);
+ return err;
}
ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index fee37a5844cf..4eb0599714f4 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */
+#include <linux/vmalloc.h>
+
#include "ice_common.h"
/**
@@ -16,7 +18,7 @@
*
* Read the NVM using the admin queue commands (0x0701)
*/
-static enum ice_status
+static int
ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
void *data, bool last_command, bool read_shadow_ram,
struct ice_sq_cd *cd)
@@ -27,7 +29,7 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
cmd = &desc.params.nvm;
if (offset > ICE_AQC_NVM_MAX_OFFSET)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read);
@@ -60,21 +62,21 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
* Returns a status code on failure. Note that the data pointer may be
* partially updated if some reads succeed before a failure.
*/
-enum ice_status
+int
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram)
{
- enum ice_status status;
u32 inlen = *length;
u32 bytes_read = 0;
bool last_cmd;
+ int status;
*length = 0;
/* Verify the length of the read if this is for the Shadow RAM */
if (read_shadow_ram && ((offset + inlen) > (hw->flash.sr_words * 2u))) {
ice_debug(hw, ICE_DBG_NVM, "NVM error: requested offset is beyond Shadow RAM limit\n");
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
do {
@@ -119,7 +121,7 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
*
* Update the NVM using the admin queue commands (0x0703)
*/
-enum ice_status
+int
ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command, u8 command_flags,
struct ice_sq_cd *cd)
@@ -131,7 +133,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write);
@@ -158,8 +160,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
*
* Erase the NVM sector using the admin queue commands (0x0702)
*/
-enum ice_status
-ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
+int ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
struct ice_aqc_nvm *cmd;
@@ -184,12 +185,11 @@ ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
*
* Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm.
*/
-static enum ice_status
-ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
+static int ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
{
u32 bytes = sizeof(u16);
- enum ice_status status;
__le16 data_local;
+ int status;
/* Note that ice_read_flat_nvm takes into account the 4Kb AdminQ and
* Shadow RAM sector restrictions necessary when reading from the NVM.
@@ -210,8 +210,7 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
*
* This function will request NVM ownership.
*/
-enum ice_status
-ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
+int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
if (hw->flash.blank_nvm_mode)
return 0;
@@ -318,18 +317,18 @@ static u32 ice_get_flash_bank_offset(struct ice_hw *hw, enum ice_bank_select ban
* hw->flash.banks data being setup by ice_determine_active_flash_banks()
* during initialization.
*/
-static enum ice_status
+static int
ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module,
u32 offset, u8 *data, u32 length)
{
- enum ice_status status;
+ int status;
u32 start;
start = ice_get_flash_bank_offset(hw, bank, module);
if (!start) {
ice_debug(hw, ICE_DBG_NVM, "Unable to calculate flash bank offset for module 0x%04x\n",
module);
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
status = ice_acquire_nvm(hw, ICE_RES_READ);
@@ -353,11 +352,11 @@ ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module,
* Read the specified word from the active NVM module. This includes the CSS
* header at the start of the NVM module.
*/
-static enum ice_status
+static int
ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
- enum ice_status status;
__le16 data_local;
+ int status;
status = ice_read_flash_module(hw, bank, ICE_SR_1ST_NVM_BANK_PTR, offset * sizeof(u16),
(__force u8 *)&data_local, sizeof(u16));
@@ -377,7 +376,7 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1
* Read the specified word from the copy of the Shadow RAM found in the
* specified NVM module.
*/
-static enum ice_status
+static int
ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data);
@@ -392,11 +391,11 @@ ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u
*
* Read a word from the specified netlist bank.
*/
-static enum ice_status
+static int
ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
- enum ice_status status;
__le16 data_local;
+ int status;
status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, offset * sizeof(u16),
(__force u8 *)&data_local, sizeof(u16));
@@ -414,9 +413,9 @@ ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset
*
* Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
*/
-enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
+int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
{
- enum ice_status status;
+ int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (!status) {
@@ -438,13 +437,13 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
* Area (PFA) and returns the TLV pointer and length. The caller can
* use these to read the variable length TLV value.
*/
-enum ice_status
+int
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type)
{
- enum ice_status status;
u16 pfa_len, pfa_ptr;
u16 next_tlv;
+ int status;
status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
if (status) {
@@ -482,7 +481,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
*module_tlv_len = tlv_len;
return 0;
}
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
}
/* Check next TLV, i.e. current TLV pointer + length + 2 words
* (for current TLV's type and length)
@@ -490,7 +489,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
next_tlv = next_tlv + tlv_len + 2;
}
/* Module does not exist */
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
/**
@@ -501,12 +500,11 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
*
* Reads the part number string from the NVM.
*/
-enum ice_status
-ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
+int ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
{
u16 pba_tlv, pba_tlv_len;
- enum ice_status status;
u16 pba_word, pba_size;
+ int status;
u16 i;
status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
@@ -525,7 +523,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
if (pba_tlv_len < pba_size) {
ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n");
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
}
/* Subtract one to get PBA word count (PBA Size word is included in
@@ -534,7 +532,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
pba_size--;
if (pba_num_size < (((u32)pba_size * 2) + 1)) {
ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n");
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
for (i = 0; i < pba_size; i++) {
@@ -561,11 +559,11 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
* Read the NVM EETRACK ID and map version of the main NVM image bank, filling
* in the NVM info structure.
*/
-static enum ice_status
+static int
ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nvm_info *nvm)
{
u16 eetrack_lo, eetrack_hi, ver;
- enum ice_status status;
+ int status;
status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_DEV_STARTER_VER, &ver);
if (status) {
@@ -601,7 +599,7 @@ ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nv
* inactive NVM bank. Used to access version data for a pending update that
* has not yet been activated.
*/
-enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm)
+int ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm)
{
return ice_get_nvm_ver_info(hw, ICE_INACTIVE_FLASH_BANK, nvm);
}
@@ -615,49 +613,73 @@ enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info
* Searches through the Option ROM flash contents to locate the CIVD data for
* the image.
*/
-static enum ice_status
+static int
ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
struct ice_orom_civd_info *civd)
{
- struct ice_orom_civd_info tmp;
- enum ice_status status;
+ u8 *orom_data;
+ int status;
u32 offset;
/* The CIVD section is located in the Option ROM aligned to 512 bytes.
* The first 4 bytes must contain the ASCII characters "$CIV".
* A simple modulo 256 sum of all of the bytes of the structure must
* equal 0.
+ *
+ * The exact location is unknown and varies between images but is
+ * usually somewhere in the middle of the bank. We need to scan the
+ * Option ROM bank to locate it.
+ *
+ * It's significantly faster to read the entire Option ROM up front
+ * using the maximum page size, than to read each possible location
+ * with a separate firmware command.
*/
+ orom_data = vzalloc(hw->flash.banks.orom_size);
+ if (!orom_data)
+ return -ENOMEM;
+
+ status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, 0,
+ orom_data, hw->flash.banks.orom_size);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n");
+ return status;
+ }
+
+ /* Scan the memory buffer to locate the CIVD data section */
for (offset = 0; (offset + 512) <= hw->flash.banks.orom_size; offset += 512) {
+ struct ice_orom_civd_info *tmp;
u8 sum = 0, i;
- status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR,
- offset, (u8 *)&tmp, sizeof(tmp));
- if (status) {
- ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM CIVD data\n");
- return status;
- }
+ tmp = (struct ice_orom_civd_info *)&orom_data[offset];
/* Skip forward until we find a matching signature */
- if (memcmp("$CIV", tmp.signature, sizeof(tmp.signature)) != 0)
+ if (memcmp("$CIV", tmp->signature, sizeof(tmp->signature)) != 0)
continue;
+ ice_debug(hw, ICE_DBG_NVM, "Found CIVD section at offset %u\n",
+ offset);
+
/* Verify that the simple checksum is zero */
- for (i = 0; i < sizeof(tmp); i++)
+ for (i = 0; i < sizeof(*tmp); i++)
/* cppcheck-suppress objectIndex */
- sum += ((u8 *)&tmp)[i];
+ sum += ((u8 *)tmp)[i];
if (sum) {
ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n",
sum);
- return ICE_ERR_NVM;
+ goto err_invalid_checksum;
}
- *civd = tmp;
+ *civd = *tmp;
+ vfree(orom_data);
return 0;
}
- return ICE_ERR_NVM;
+ ice_debug(hw, ICE_DBG_NVM, "Unable to locate CIVD data within the Option ROM\n");
+
+err_invalid_checksum:
+ vfree(orom_data);
+ return -EIO;
}
/**
@@ -669,12 +691,12 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
* Read Option ROM version and security revision from the Option ROM flash
* section.
*/
-static enum ice_status
+static int
ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_info *orom)
{
struct ice_orom_civd_info civd;
- enum ice_status status;
u32 combo_ver;
+ int status;
status = ice_get_orom_civd_data(hw, bank, &civd);
if (status) {
@@ -700,7 +722,7 @@ ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_o
* section of flash. Used to access version data for a pending update that has
* not yet been activated.
*/
-enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom)
+int ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom)
{
return ice_get_orom_ver_info(hw, ICE_INACTIVE_FLASH_BANK, orom);
}
@@ -715,13 +737,13 @@ enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_inf
* Topology section to find the Netlist ID block and extract the relevant
* information into the netlist version structure.
*/
-static enum ice_status
+static int
ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
struct ice_netlist_info *netlist)
{
u16 module_id, length, node_count, i;
- enum ice_status status;
u16 *id_blk;
+ int status;
status = ice_read_netlist_module(hw, bank, ICE_NETLIST_TYPE_OFFSET, &module_id);
if (status)
@@ -730,7 +752,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
if (module_id != ICE_NETLIST_LINK_TOPO_MOD_ID) {
ice_debug(hw, ICE_DBG_NVM, "Expected netlist module_id ID of 0x%04x, but got 0x%04x\n",
ICE_NETLIST_LINK_TOPO_MOD_ID, module_id);
- return ICE_ERR_NVM;
+ return -EIO;
}
status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_MODULE_LEN, &length);
@@ -741,7 +763,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
if (length < ICE_NETLIST_ID_BLK_SIZE) {
ice_debug(hw, ICE_DBG_NVM, "Netlist Link Topology module too small. Expected at least %u words, but got %u words.\n",
ICE_NETLIST_ID_BLK_SIZE, length);
- return ICE_ERR_NVM;
+ return -EIO;
}
status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_NODE_COUNT, &node_count);
@@ -751,7 +773,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
id_blk = kcalloc(ICE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL);
if (!id_blk)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Read out the entire Netlist ID Block at once. */
status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR,
@@ -791,7 +813,7 @@ exit_error:
* extract version data of a pending flash update in order to display the
* version data.
*/
-enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist)
+int ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist)
{
return ice_get_netlist_info(hw, ICE_INACTIVE_FLASH_BANK, netlist);
}
@@ -804,10 +826,10 @@ enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netli
* the actual size is smaller. Use bisection to determine the accessible size
* of flash memory.
*/
-static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
+static int ice_discover_flash_size(struct ice_hw *hw)
{
u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1;
- enum ice_status status;
+ int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status)
@@ -819,7 +841,7 @@ static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
u8 data;
status = ice_read_flat_nvm(hw, offset, &len, &data, false);
- if (status == ICE_ERR_AQ_ERROR &&
+ if (status == -EIO &&
hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n",
__func__, offset);
@@ -859,10 +881,9 @@ err_read_flat_nvm:
* sector size by using the highest bit. The reported pointer value will be in
* bytes, intended for flat NVM reads.
*/
-static enum ice_status
-ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
+static int ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
{
- enum ice_status status;
+ int status;
u16 value;
status = ice_read_sr_word(hw, offset, &value);
@@ -891,10 +912,9 @@ ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
* Each area size word is specified in 4KB sector units. This function reports
* the size in bytes, intended for flat NVM reads.
*/
-static enum ice_status
-ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
+static int ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
{
- enum ice_status status;
+ int status;
u16 value;
status = ice_read_sr_word(hw, offset, &value);
@@ -917,12 +937,11 @@ ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
* structure for later use in order to calculate the correct offset to read
* from the active module.
*/
-static enum ice_status
-ice_determine_active_flash_banks(struct ice_hw *hw)
+static int ice_determine_active_flash_banks(struct ice_hw *hw)
{
struct ice_bank_info *banks = &hw->flash.banks;
- enum ice_status status;
u16 ctrl_word;
+ int status;
status = ice_read_sr_word(hw, ICE_SR_NVM_CTRL_WORD, &ctrl_word);
if (status) {
@@ -933,7 +952,7 @@ ice_determine_active_flash_banks(struct ice_hw *hw)
/* Check that the control word indicates validity */
if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) {
ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n");
- return ICE_ERR_CFG;
+ return -EIO;
}
if (!(ctrl_word & ICE_SR_CTRL_WORD_NVM_BANK))
@@ -997,12 +1016,12 @@ ice_determine_active_flash_banks(struct ice_hw *hw)
* This function reads and populates NVM settings such as Shadow RAM size,
* max_timeout, and blank_nvm_mode
*/
-enum ice_status ice_init_nvm(struct ice_hw *hw)
+int ice_init_nvm(struct ice_hw *hw)
{
struct ice_flash_info *flash = &hw->flash;
- enum ice_status status;
u32 fla, gens_stat;
u8 sr_size;
+ int status;
/* The SR size is stored regardless of the NVM programming mode
* as the blank mode may be used in the factory line.
@@ -1021,7 +1040,7 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
/* Blank programming mode */
flash->blank_nvm_mode = true;
ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n");
- return ICE_ERR_NVM_BLANK_MODE;
+ return -EIO;
}
status = ice_discover_flash_size(hw);
@@ -1060,11 +1079,11 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
*
* Verify NVM PFA checksum validity (0x0706)
*/
-enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
+int ice_nvm_validate_checksum(struct ice_hw *hw)
{
struct ice_aqc_nvm_checksum *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status)
@@ -1080,7 +1099,7 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
if (!status)
if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT)
- status = ICE_ERR_NVM_CHECKSUM;
+ status = -EIO;
return status;
}
@@ -1088,22 +1107,35 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
/**
* ice_nvm_write_activate
* @hw: pointer to the HW struct
- * @cmd_flags: NVM activate admin command bits (banks to be validated)
+ * @cmd_flags: flags for write activate command
+ * @response_flags: response indicators from firmware
*
* Update the control word with the required banks' validity bits
* and dumps the Shadow RAM to flash (0x0707)
+ *
+ * cmd_flags controls which banks to activate, and the preservation level to
+ * use when activating the NVM bank.
+ *
+ * On successful return of the firmware command, the response_flags variable
+ * is updated with the flags reported by firmware indicating certain status,
+ * such as whether EMP reset is enabled.
*/
-enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags)
+int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags, u8 *response_flags)
{
struct ice_aqc_nvm *cmd;
struct ice_aq_desc desc;
+ int err;
cmd = &desc.params.nvm;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
cmd->cmd_flags = cmd_flags;
- return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!err && response_flags)
+ *response_flags = cmd->cmd_flags;
+
+ return err;
}
/**
@@ -1113,7 +1145,7 @@ enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags)
* Update empr (0x0709). This command allows SW to
* request an EMPR to activate new FW.
*/
-enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw)
+int ice_aq_nvm_update_empr(struct ice_hw *hw)
{
struct ice_aq_desc desc;
@@ -1136,7 +1168,7 @@ enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw)
* as part of the NVM update as the first cmd in the flow.
*/
-enum ice_status
+int
ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
u16 length, struct ice_sq_cd *cd)
{
@@ -1144,7 +1176,7 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
struct ice_aq_desc desc;
if (length != 0 && !data)
- return ICE_ERR_PARAM;
+ return -EINVAL;
cmd = &desc.params.pkg_data;
@@ -1173,17 +1205,17 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
* the TransferFlag is set to End or StartAndEnd.
*/
-enum ice_status
+int
ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
u8 transfer_flag, u8 *comp_response,
u8 *comp_response_code, struct ice_sq_cd *cd)
{
struct ice_aqc_nvm_pass_comp_tbl *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!data || !comp_response || !comp_response_code)
- return ICE_ERR_PARAM;
+ return -EINVAL;
cmd = &desc.params.pass_comp_tbl;
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
index c6f05f43d593..856d1ad4398b 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.h
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -12,38 +12,34 @@ struct ice_orom_civd_info {
__le16 combo_name[32]; /* Unicode string representing the Combo Image version */
} __packed;
-enum ice_status
-ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
+int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_nvm(struct ice_hw *hw);
-enum ice_status
+int
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram);
-enum ice_status
+int
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type);
-enum ice_status
-ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom);
-enum ice_status
-ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm);
-enum ice_status
+int ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom);
+int ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm);
+int
ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist);
-enum ice_status
-ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
-enum ice_status ice_init_nvm(struct ice_hw *hw);
-enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
-enum ice_status
+int ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
+int ice_init_nvm(struct ice_hw *hw);
+int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
+int
ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command, u8 command_flags,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd);
-enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
-enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags);
-enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw);
-enum ice_status
+int ice_nvm_validate_checksum(struct ice_hw *hw);
+int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags, u8 *response_flags);
+int ice_aq_nvm_update_empr(struct ice_hw *hw);
+int
ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
u16 length, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
u8 transfer_flag, u8 *comp_response,
u8 *comp_response_code, struct ice_sq_cd *cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 442b031b0edc..ae291d442539 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -6,6 +6,8 @@
#define E810_OUT_PROP_DELAY_NS 1
+#define UNKNOWN_INCVAL_E822 0x100000000ULL
+
static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
/* name idx func chan */
{ "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } },
@@ -281,6 +283,8 @@ static void ice_set_tx_tstamp(struct ice_pf *pf, bool on)
else
val &= ~PFINT_OICR_TSYN_TX_M;
wr32(&pf->hw, PFINT_OICR_ENA, val);
+
+ pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
}
/**
@@ -303,6 +307,9 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
continue;
vsi->rx_rings[i]->ptp_rx = on;
}
+
+ pf->ptp.tstamp_config.rx_filter = on ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
}
/**
@@ -313,18 +320,10 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
* This function will configure timestamping during PTP initialization
* and deinitialization
*/
-static void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena)
+void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena)
{
ice_set_tx_tstamp(pf, ena);
ice_set_rx_tstamp(pf, ena);
-
- if (ena) {
- pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
- pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
- } else {
- pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
- pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
- }
}
/**
@@ -682,6 +681,406 @@ static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
}
/**
+ * ice_base_incval - Get base timer increment value
+ * @pf: Board private structure
+ *
+ * Look up the base timer increment value for this device. The base increment
+ * value is used to define the nominal clock tick rate. This increment value
+ * is programmed during device initialization. It is also used as the basis
+ * for calculating adjustments using scaled_ppm.
+ */
+static u64 ice_base_incval(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ u64 incval;
+
+ if (ice_is_e810(hw))
+ incval = ICE_PTP_NOMINAL_INCVAL_E810;
+ else if (ice_e822_time_ref(hw) < NUM_ICE_TIME_REF_FREQ)
+ incval = ice_e822_nominal_incval(ice_e822_time_ref(hw));
+ else
+ incval = UNKNOWN_INCVAL_E822;
+
+ dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
+ incval);
+
+ return incval;
+}
+
+/**
+ * ice_ptp_reset_ts_memory_quad - Reset timestamp memory for one quad
+ * @pf: The PF private data structure
+ * @quad: The quad (0-4)
+ */
+static void ice_ptp_reset_ts_memory_quad(struct ice_pf *pf, int quad)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M);
+ ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M);
+}
+
+/**
+ * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state
+ * @port: PTP port for which Tx FIFO is checked
+ */
+static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
+{
+ int quad = port->port_num / ICE_PORTS_PER_QUAD;
+ int offs = port->port_num % ICE_PORTS_PER_QUAD;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ u32 val, phy_sts;
+ int err;
+
+ pf = ptp_port_to_pf(port);
+ hw = &pf->hw;
+
+ if (port->tx_fifo_busy_cnt == FIFO_OK)
+ return 0;
+
+ /* need to read FIFO state */
+ if (offs == 0 || offs == 1)
+ err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO01_STATUS,
+ &val);
+ else
+ err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO23_STATUS,
+ &val);
+
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n",
+ port->port_num, err);
+ return err;
+ }
+
+ if (offs & 0x1)
+ phy_sts = (val & Q_REG_FIFO13_M) >> Q_REG_FIFO13_S;
+ else
+ phy_sts = (val & Q_REG_FIFO02_M) >> Q_REG_FIFO02_S;
+
+ if (phy_sts & FIFO_EMPTY) {
+ port->tx_fifo_busy_cnt = FIFO_OK;
+ return 0;
+ }
+
+ port->tx_fifo_busy_cnt++;
+
+ dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n",
+ port->tx_fifo_busy_cnt, port->port_num);
+
+ if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) {
+ dev_dbg(ice_pf_to_dev(pf),
+ "Port %d Tx FIFO still not empty; resetting quad %d\n",
+ port->port_num, quad);
+ ice_ptp_reset_ts_memory_quad(pf, quad);
+ port->tx_fifo_busy_cnt = FIFO_OK;
+ return 0;
+ }
+
+ return -EAGAIN;
+}
+
+/**
+ * ice_ptp_check_tx_offset_valid - Check if the Tx PHY offset is valid
+ * @port: the PTP port to check
+ *
+ * Checks whether the Tx offset for the PHY associated with this port is
+ * valid. Returns 0 if the offset is valid, and a non-zero error code if it is
+ * not.
+ */
+static int ice_ptp_check_tx_offset_valid(struct ice_ptp_port *port)
+{
+ struct ice_pf *pf = ptp_port_to_pf(port);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+ int err;
+
+ err = ice_ptp_check_tx_fifo(port);
+ if (err)
+ return err;
+
+ err = ice_read_phy_reg_e822(hw, port->port_num, P_REG_TX_OV_STATUS,
+ &val);
+ if (err) {
+ dev_err(dev, "Failed to read TX_OV_STATUS for port %d, err %d\n",
+ port->port_num, err);
+ return -EAGAIN;
+ }
+
+ if (!(val & P_REG_TX_OV_STATUS_OV_M))
+ return -EAGAIN;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_check_rx_offset_valid - Check if the Rx PHY offset is valid
+ * @port: the PTP port to check
+ *
+ * Checks whether the Rx offset for the PHY associated with this port is
+ * valid. Returns 0 if the offset is valid, and a non-zero error code if it is
+ * not.
+ */
+static int ice_ptp_check_rx_offset_valid(struct ice_ptp_port *port)
+{
+ struct ice_pf *pf = ptp_port_to_pf(port);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ int err;
+ u32 val;
+
+ err = ice_read_phy_reg_e822(hw, port->port_num, P_REG_RX_OV_STATUS,
+ &val);
+ if (err) {
+ dev_err(dev, "Failed to read RX_OV_STATUS for port %d, err %d\n",
+ port->port_num, err);
+ return err;
+ }
+
+ if (!(val & P_REG_RX_OV_STATUS_OV_M))
+ return -EAGAIN;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_check_offset_valid - Check port offset valid bit
+ * @port: Port for which offset valid bit is checked
+ *
+ * Returns 0 if both Tx and Rx offset are valid, and -EAGAIN if one of the
+ * offset is not ready.
+ */
+static int ice_ptp_check_offset_valid(struct ice_ptp_port *port)
+{
+ int tx_err, rx_err;
+
+ /* always check both Tx and Rx offset validity */
+ tx_err = ice_ptp_check_tx_offset_valid(port);
+ rx_err = ice_ptp_check_rx_offset_valid(port);
+
+ if (tx_err || rx_err)
+ return -EAGAIN;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_wait_for_offset_valid - Check for valid Tx and Rx offsets
+ * @work: Pointer to the kthread_work structure for this task
+ *
+ * Check whether both the Tx and Rx offsets are valid for enabling the vernier
+ * calibration.
+ *
+ * Once we have valid offsets from hardware, update the total Tx and Rx
+ * offsets, and exit bypass mode. This enables more precise timestamps using
+ * the extra data measured during the vernier calibration process.
+ */
+static void ice_ptp_wait_for_offset_valid(struct kthread_work *work)
+{
+ struct ice_ptp_port *port;
+ int err;
+ struct device *dev;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+
+ port = container_of(work, struct ice_ptp_port, ov_work.work);
+ pf = ptp_port_to_pf(port);
+ hw = &pf->hw;
+ dev = ice_pf_to_dev(pf);
+
+ if (ice_ptp_check_offset_valid(port)) {
+ /* Offsets not ready yet, try again later */
+ kthread_queue_delayed_work(pf->ptp.kworker,
+ &port->ov_work,
+ msecs_to_jiffies(100));
+ return;
+ }
+
+ /* Offsets are valid, so it is safe to exit bypass mode */
+ err = ice_phy_exit_bypass_e822(hw, port->port_num);
+ if (err) {
+ dev_warn(dev, "Failed to exit bypass mode for PHY port %u, err %d\n",
+ port->port_num, err);
+ return;
+ }
+}
+
+/**
+ * ice_ptp_port_phy_stop - Stop timestamping for a PHY port
+ * @ptp_port: PTP port to stop
+ */
+static int
+ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
+{
+ struct ice_pf *pf = ptp_port_to_pf(ptp_port);
+ u8 port = ptp_port->port_num;
+ struct ice_hw *hw = &pf->hw;
+ int err;
+
+ if (ice_is_e810(hw))
+ return 0;
+
+ mutex_lock(&ptp_port->ps_lock);
+
+ kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
+
+ err = ice_stop_phy_timer_e822(hw, port, true);
+ if (err)
+ dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
+ port, err);
+
+ mutex_unlock(&ptp_port->ps_lock);
+
+ return err;
+}
+
+/**
+ * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping
+ * @ptp_port: PTP port for which the PHY start is set
+ *
+ * Start the PHY timestamping block, and initiate Vernier timestamping
+ * calibration. If timestamping cannot be calibrated (such as if link is down)
+ * then disable the timestamping block instead.
+ */
+static int
+ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
+{
+ struct ice_pf *pf = ptp_port_to_pf(ptp_port);
+ u8 port = ptp_port->port_num;
+ struct ice_hw *hw = &pf->hw;
+ int err;
+
+ if (ice_is_e810(hw))
+ return 0;
+
+ if (!ptp_port->link_up)
+ return ice_ptp_port_phy_stop(ptp_port);
+
+ mutex_lock(&ptp_port->ps_lock);
+
+ kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
+
+ /* temporarily disable Tx timestamps while calibrating PHY offset */
+ ptp_port->tx.calibrating = true;
+ ptp_port->tx_fifo_busy_cnt = 0;
+
+ /* Start the PHY timer in bypass mode */
+ err = ice_start_phy_timer_e822(hw, port, true);
+ if (err)
+ goto out_unlock;
+
+ /* Enable Tx timestamps right away */
+ ptp_port->tx.calibrating = false;
+
+ kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0);
+
+out_unlock:
+ if (err)
+ dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
+ port, err);
+
+ mutex_unlock(&ptp_port->ps_lock);
+
+ return err;
+}
+
+/**
+ * ice_ptp_link_change - Set or clear port registers for timestamping
+ * @pf: Board private structure
+ * @port: Port for which the PHY start is set
+ * @linkup: Link is up or down
+ */
+int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+{
+ struct ice_ptp_port *ptp_port;
+
+ if (!test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ return 0;
+
+ if (port >= ICE_NUM_EXTERNAL_PORTS)
+ return -EINVAL;
+
+ ptp_port = &pf->ptp.port;
+ if (ptp_port->port_num != port)
+ return -EINVAL;
+
+ /* Update cached link err for this port immediately */
+ ptp_port->link_up = linkup;
+
+ if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ /* PTP is not setup */
+ return -EAGAIN;
+
+ return ice_ptp_port_phy_restart(ptp_port);
+}
+
+/**
+ * ice_ptp_reset_ts_memory - Reset timestamp memory for all quads
+ * @pf: The PF private data structure
+ */
+static void ice_ptp_reset_ts_memory(struct ice_pf *pf)
+{
+ int quad;
+
+ quad = pf->hw.port_info->lport / ICE_PORTS_PER_QUAD;
+ ice_ptp_reset_ts_memory_quad(pf, quad);
+}
+
+/**
+ * ice_ptp_tx_ena_intr - Enable or disable the Tx timestamp interrupt
+ * @pf: PF private structure
+ * @ena: bool value to enable or disable interrupt
+ * @threshold: Minimum number of packets at which intr is triggered
+ *
+ * Utility function to enable or disable Tx timestamp interrupt and threshold
+ */
+static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold)
+{
+ struct ice_hw *hw = &pf->hw;
+ int err = 0;
+ int quad;
+ u32 val;
+
+ ice_ptp_reset_ts_memory(pf);
+
+ for (quad = 0; quad < ICE_MAX_QUAD; quad++) {
+ err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG,
+ &val);
+ if (err)
+ break;
+
+ if (ena) {
+ val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
+ val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M;
+ val |= ((threshold << Q_REG_TX_MEM_GBL_CFG_INTR_THR_S) &
+ Q_REG_TX_MEM_GBL_CFG_INTR_THR_M);
+ } else {
+ val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
+ }
+
+ err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG,
+ val);
+ if (err)
+ break;
+ }
+
+ if (err)
+ dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n",
+ err);
+ return err;
+}
+
+/**
+ * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block
+ * @pf: Board private structure
+ */
+static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
+{
+ ice_ptp_port_phy_restart(&pf->ptp.port);
+}
+
+/**
* ice_ptp_adjfine - Adjust clock increment rate
* @info: the driver's PTP info structure
* @scaled_ppm: Parts per million with 16-bit fractional field
@@ -698,7 +1097,7 @@ static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
int neg_adj = 0;
int err;
- incval = ICE_PTP_NOMINAL_INCVAL_E810;
+ incval = ice_base_incval(pf);
if (scaled_ppm < 0) {
neg_adj = 1;
@@ -905,7 +1304,10 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
NSEC_PER_SEC) * NSEC_PER_SEC + phase;
- start_time -= E810_OUT_PROP_DELAY_NS;
+ if (ice_is_e810(hw))
+ start_time -= E810_OUT_PROP_DELAY_NS;
+ else
+ start_time -= ice_e822_pps_delay(ice_e822_time_ref(hw));
/* 2. Write TARGET time */
wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
@@ -1088,6 +1490,12 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
struct ice_hw *hw = &pf->hw;
int err;
+ /* For Vernier mode, we need to recalibrate after new settime
+ * Start with disabling timestamp block
+ */
+ if (pf->ptp.port.link_up)
+ ice_ptp_port_phy_stop(&pf->ptp.port);
+
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
goto exit;
@@ -1104,6 +1512,10 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
/* Reenable periodic outputs */
ice_ptp_enable_all_clkout(pf);
+
+ /* Recalibrate and re-enable timestamp block */
+ if (pf->ptp.port.link_up)
+ ice_ptp_port_phy_restart(&pf->ptp.port);
exit:
if (err) {
dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
@@ -1177,6 +1589,101 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
return 0;
}
+#ifdef CONFIG_ICE_HWTS
+/**
+ * ice_ptp_get_syncdevicetime - Get the cross time stamp info
+ * @device: Current device time
+ * @system: System counter value read synchronously with device time
+ * @ctx: Context provided by timekeeping code
+ *
+ * Read device and system (ART) clock simultaneously and return the corrected
+ * clock values in ns.
+ */
+static int
+ice_ptp_get_syncdevicetime(ktime_t *device,
+ struct system_counterval_t *system,
+ void *ctx)
+{
+ struct ice_pf *pf = (struct ice_pf *)ctx;
+ struct ice_hw *hw = &pf->hw;
+ u32 hh_lock, hh_art_ctl;
+ int i;
+
+ /* Get the HW lock */
+ hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
+ if (hh_lock & PFHH_SEM_BUSY_M) {
+ dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
+ return -EFAULT;
+ }
+
+ /* Start the ART and device clock sync sequence */
+ hh_art_ctl = rd32(hw, GLHH_ART_CTL);
+ hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
+ wr32(hw, GLHH_ART_CTL, hh_art_ctl);
+
+#define MAX_HH_LOCK_TRIES 100
+
+ for (i = 0; i < MAX_HH_LOCK_TRIES; i++) {
+ /* Wait for sync to complete */
+ hh_art_ctl = rd32(hw, GLHH_ART_CTL);
+ if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
+ udelay(1);
+ continue;
+ } else {
+ u32 hh_ts_lo, hh_ts_hi, tmr_idx;
+ u64 hh_ts;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
+ /* Read ART time */
+ hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
+ hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
+ hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
+ *system = convert_art_ns_to_tsc(hh_ts);
+ /* Read Device source clock time */
+ hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
+ hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
+ hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
+ *device = ns_to_ktime(hh_ts);
+ break;
+ }
+ }
+ /* Release HW lock */
+ hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
+ hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
+ wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
+
+ if (i == MAX_HH_LOCK_TRIES)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_getcrosststamp_e822 - Capture a device cross timestamp
+ * @info: the driver's PTP info structure
+ * @cts: The memory to fill the cross timestamp info
+ *
+ * Capture a cross timestamp between the ART and the device PTP hardware
+ * clock. Fill the cross timestamp information and report it back to the
+ * caller.
+ *
+ * This is only valid for E822 devices which have support for generating the
+ * cross timestamp via PCIe PTM.
+ *
+ * In order to correctly correlate the ART timestamp back to the TSC time, the
+ * CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
+ */
+static int
+ice_ptp_getcrosststamp_e822(struct ptp_clock_info *info,
+ struct system_device_crosststamp *cts)
+{
+ struct ice_pf *pf = ptp_info_to_pf(info);
+
+ return get_device_system_crosststamp(ice_ptp_get_syncdevicetime,
+ pf, NULL, cts);
+}
+#endif /* CONFIG_ICE_HWTS */
+
/**
* ice_ptp_get_ts_config - ioctl interface to read the timestamping config
* @pf: Board private structure
@@ -1205,10 +1712,6 @@ int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
static int
ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
{
- /* Reserved for future extensions. */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
ice_set_tx_tstamp(pf, false);
@@ -1238,7 +1741,6 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
- config->rx_filter = HWTSTAMP_FILTER_ALL;
ice_set_rx_tstamp(pf, true);
break;
default:
@@ -1270,8 +1772,8 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
if (err)
return err;
- /* Save these settings for future reference */
- pf->ptp.tstamp_config = config;
+ /* Return the actual configuration set */
+ config = pf->ptp.tstamp_config;
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
@@ -1403,6 +1905,26 @@ static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info)
}
/**
+ * ice_ptp_set_funcs_e822 - Set specialized functions for E822 support
+ * @pf: Board private structure
+ * @info: PTP info to fill
+ *
+ * Assign functions to the PTP capabiltiies structure for E822 devices.
+ * Functions which operate across all device families should be set directly
+ * in ice_ptp_set_caps. Only add functions here which are distinct for E822
+ * devices.
+ */
+static void
+ice_ptp_set_funcs_e822(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+#ifdef CONFIG_ICE_HWTS
+ if (boot_cpu_has(X86_FEATURE_ART) &&
+ boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
+ info->getcrosststamp = ice_ptp_getcrosststamp_e822;
+#endif /* CONFIG_ICE_HWTS */
+}
+
+/**
* ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
* @pf: Board private structure
* @info: PTP info to fill
@@ -1441,7 +1963,10 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
info->gettimex64 = ice_ptp_gettimex64;
info->settime64 = ice_ptp_settime64;
- ice_ptp_set_funcs_e810(pf, info);
+ if (ice_is_e810(&pf->hw))
+ ice_ptp_set_funcs_e810(pf, info);
+ else
+ ice_ptp_set_funcs_e822(pf, info);
}
/**
@@ -1588,7 +2113,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
u8 idx;
/* Check if this tracker is initialized */
- if (!tx->init)
+ if (!tx->init || tx->calibrating)
return -1;
spin_lock(&tx->lock);
@@ -1704,13 +2229,34 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
kfree(tx->tstamps);
tx->tstamps = NULL;
- kfree(tx->in_use);
+ bitmap_free(tx->in_use);
tx->in_use = NULL;
tx->len = 0;
}
/**
+ * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps
+ * @pf: Board private structure
+ * @tx: the Tx tracking structure to initialize
+ * @port: the port this structure tracks
+ *
+ * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
+ * the timestamp block is shared for all ports in the same quad. To avoid
+ * ports using the same timestamp index, logically break the block of
+ * registers into chunks based on the port number.
+ */
+static int
+ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
+{
+ tx->quad = port / ICE_PORTS_PER_QUAD;
+ tx->quad_offset = tx->quad * INDEX_PER_PORT;
+ tx->len = INDEX_PER_PORT;
+
+ return ice_ptp_alloc_tx_tracker(tx);
+}
+
+/**
* ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
@@ -1781,6 +2327,130 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
}
/**
+ * ice_ptp_reset - Initialize PTP hardware clock support after reset
+ * @pf: Board private structure
+ */
+void ice_ptp_reset(struct ice_pf *pf)
+{
+ struct ice_ptp *ptp = &pf->ptp;
+ struct ice_hw *hw = &pf->hw;
+ struct timespec64 ts;
+ int err, itr = 1;
+ u64 time_diff;
+
+ if (test_bit(ICE_PFR_REQ, pf->state))
+ goto pfr;
+
+ if (!hw->func_caps.ts_func_info.src_tmr_owned)
+ goto reset_ts;
+
+ err = ice_ptp_init_phc(hw);
+ if (err)
+ goto err;
+
+ /* Acquire the global hardware lock */
+ if (!ice_ptp_lock(hw)) {
+ err = -EBUSY;
+ goto err;
+ }
+
+ /* Write the increment time value to PHY and LAN */
+ err = ice_ptp_write_incval(hw, ice_base_incval(pf));
+ if (err) {
+ ice_ptp_unlock(hw);
+ goto err;
+ }
+
+ /* Write the initial Time value to PHY and LAN using the cached PHC
+ * time before the reset and time difference between stopping and
+ * starting the clock.
+ */
+ if (ptp->cached_phc_time) {
+ time_diff = ktime_get_real_ns() - ptp->reset_time;
+ ts = ns_to_timespec64(ptp->cached_phc_time + time_diff);
+ } else {
+ ts = ktime_to_timespec64(ktime_get_real());
+ }
+ err = ice_ptp_write_init(pf, &ts);
+ if (err) {
+ ice_ptp_unlock(hw);
+ goto err;
+ }
+
+ /* Release the global hardware lock */
+ ice_ptp_unlock(hw);
+
+ if (!ice_is_e810(hw)) {
+ /* Enable quad interrupts */
+ err = ice_ptp_tx_ena_intr(pf, true, itr);
+ if (err)
+ goto err;
+ }
+
+reset_ts:
+ /* Restart the PHY timestamping block */
+ ice_ptp_reset_phy_timestamping(pf);
+
+pfr:
+ /* Init Tx structures */
+ if (ice_is_e810(&pf->hw)) {
+ err = ice_ptp_init_tx_e810(pf, &ptp->port.tx);
+ } else {
+ kthread_init_delayed_work(&ptp->port.ov_work,
+ ice_ptp_wait_for_offset_valid);
+ err = ice_ptp_init_tx_e822(pf, &ptp->port.tx,
+ ptp->port.port_num);
+ }
+ if (err)
+ goto err;
+
+ set_bit(ICE_FLAG_PTP, pf->flags);
+
+ /* Start periodic work going */
+ kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
+
+ dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
+ return;
+
+err:
+ dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
+}
+
+/**
+ * ice_ptp_prepare_for_reset - Prepare PTP for reset
+ * @pf: Board private structure
+ */
+void ice_ptp_prepare_for_reset(struct ice_pf *pf)
+{
+ struct ice_ptp *ptp = &pf->ptp;
+ u8 src_tmr;
+
+ clear_bit(ICE_FLAG_PTP, pf->flags);
+
+ /* Disable timestamping for both Tx and Rx */
+ ice_ptp_cfg_timestamp(pf, false);
+
+ kthread_cancel_delayed_work_sync(&ptp->work);
+ kthread_cancel_work_sync(&ptp->extts_work);
+
+ if (test_bit(ICE_PFR_REQ, pf->state))
+ return;
+
+ ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
+
+ /* Disable periodic outputs */
+ ice_ptp_disable_all_clkout(pf);
+
+ src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
+
+ /* Disable source clock */
+ wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
+
+ /* Acquire PHC and system timer to restore after reset */
+ ptp->reset_time = ktime_get_real_ns();
+}
+
+/**
* ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
* @pf: Board private structure
*
@@ -1790,27 +2460,16 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
*/
static int ice_ptp_init_owner(struct ice_pf *pf)
{
- struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
struct timespec64 ts;
- u8 src_idx;
- int err;
-
- wr32(hw, GLTSYN_SYNC_DLAY, 0);
+ int err, itr = 1;
- /* Clear some HW residue and enable source clock */
- src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
-
- /* Enable source clocks */
- wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
-
- /* Enable PHY time sync */
- err = ice_ptp_init_phy_e810(hw);
- if (err)
- goto err_exit;
-
- /* Clear event status indications for auxiliary pins */
- (void)rd32(hw, GLTSYN_STAT(src_idx));
+ err = ice_ptp_init_phc(hw);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n",
+ err);
+ return err;
+ }
/* Acquire the global hardware lock */
if (!ice_ptp_lock(hw)) {
@@ -1819,7 +2478,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
}
/* Write the increment time value to PHY and LAN */
- err = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
+ err = ice_ptp_write_incval(hw, ice_base_incval(pf));
if (err) {
ice_ptp_unlock(hw);
goto err_exit;
@@ -1836,6 +2495,13 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
/* Release the global hardware lock */
ice_ptp_unlock(hw);
+ if (!ice_is_e810(hw)) {
+ /* Enable quad interrupts */
+ err = ice_ptp_tx_ena_intr(pf, true, itr);
+ if (err)
+ goto err_exit;
+ }
+
/* Ensure we have a clock device */
err = ice_ptp_create_clock(pf);
if (err)
@@ -1849,72 +2515,106 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
err_clk:
pf->ptp.clock = NULL;
err_exit:
- dev_err(dev, "PTP failed to register clock, err %d\n", err);
-
return err;
}
/**
- * ice_ptp_init - Initialize the PTP support after device probe or reset
+ * ice_ptp_init_work - Initialize PTP work threads
* @pf: Board private structure
+ * @ptp: PF PTP structure
+ */
+static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
+{
+ struct kthread_worker *kworker;
+
+ /* Initialize work functions */
+ kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
+ kthread_init_work(&ptp->extts_work, ice_ptp_extts_work);
+
+ /* Allocate a kworker for handling work required for the ports
+ * connected to the PTP hardware clock.
+ */
+ kworker = kthread_create_worker(0, "ice-ptp-%s",
+ dev_name(ice_pf_to_dev(pf)));
+ if (IS_ERR(kworker))
+ return PTR_ERR(kworker);
+
+ ptp->kworker = kworker;
+
+ /* Start periodic work going */
+ kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_init_port - Initialize PTP port structure
+ * @pf: Board private structure
+ * @ptp_port: PTP port structure
+ */
+static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
+{
+ mutex_init(&ptp_port->ps_lock);
+
+ if (ice_is_e810(&pf->hw))
+ return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
+
+ kthread_init_delayed_work(&ptp_port->ov_work,
+ ice_ptp_wait_for_offset_valid);
+ return ice_ptp_init_tx_e822(pf, &ptp_port->tx, ptp_port->port_num);
+}
+
+/**
+ * ice_ptp_init - Initialize PTP hardware clock support
+ * @pf: Board private structure
+ *
+ * Set up the device for interacting with the PTP hardware clock for all
+ * functions, both the function that owns the clock hardware, and the
+ * functions connected to the clock hardware.
*
- * This function sets device up for PTP support. The first time it is run, it
- * will create a clock device. It does not create a clock device if one
- * already exists. It also reconfigures the device after a reset.
+ * The clock owner will allocate and register a ptp_clock with the
+ * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work
+ * items used for asynchronous work such as Tx timestamps and periodic work.
*/
void ice_ptp_init(struct ice_pf *pf)
{
- struct device *dev = ice_pf_to_dev(pf);
- struct kthread_worker *kworker;
+ struct ice_ptp *ptp = &pf->ptp;
struct ice_hw *hw = &pf->hw;
int err;
- /* PTP is currently only supported on E810 devices */
- if (!ice_is_e810(hw))
- return;
-
- /* Check if this PF owns the source timer */
+ /* If this function owns the clock hardware, it must allocate and
+ * configure the PTP clock device to represent it.
+ */
if (hw->func_caps.ts_func_info.src_tmr_owned) {
err = ice_ptp_init_owner(pf);
if (err)
- return;
+ goto err;
}
- /* Disable timestamping for both Tx and Rx */
- ice_ptp_cfg_timestamp(pf, false);
-
- /* Initialize the PTP port Tx timestamp tracker */
- ice_ptp_init_tx_e810(pf, &pf->ptp.port.tx);
-
- /* Initialize work functions */
- kthread_init_delayed_work(&pf->ptp.work, ice_ptp_periodic_work);
- kthread_init_work(&pf->ptp.extts_work, ice_ptp_extts_work);
+ ptp->port.port_num = hw->pf_id;
+ err = ice_ptp_init_port(pf, &ptp->port);
+ if (err)
+ goto err;
- /* Allocate a kworker for handling work required for the ports
- * connected to the PTP hardware clock.
- */
- kworker = kthread_create_worker(0, "ice-ptp-%s", dev_name(dev));
- if (IS_ERR(kworker)) {
- err = PTR_ERR(kworker);
- goto err_kworker;
- }
- pf->ptp.kworker = kworker;
+ /* Start the PHY timestamping block */
+ ice_ptp_reset_phy_timestamping(pf);
set_bit(ICE_FLAG_PTP, pf->flags);
+ err = ice_ptp_init_work(pf, ptp);
+ if (err)
+ goto err;
- /* Start periodic work going */
- kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 0);
-
- dev_info(dev, "PTP init successful\n");
+ dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
return;
-err_kworker:
+err:
/* If we registered a PTP clock, release it */
if (pf->ptp.clock) {
- ptp_clock_unregister(pf->ptp.clock);
+ ptp_clock_unregister(ptp->clock);
pf->ptp.clock = NULL;
}
- dev_err(dev, "PTP failed %d\n", err);
+ clear_bit(ICE_FLAG_PTP, pf->flags);
+ dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
}
/**
@@ -1938,6 +2638,8 @@ void ice_ptp_release(struct ice_pf *pf)
kthread_cancel_delayed_work_sync(&pf->ptp.work);
+ ice_ptp_port_phy_stop(&pf->ptp.port);
+ mutex_destroy(&pf->ptp.port.ps_lock);
if (pf->ptp.kworker) {
kthread_destroy_worker(pf->ptp.kworker);
pf->ptp.kworker = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 53c15fc9d996..afd048d69959 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -82,6 +82,8 @@ struct ice_tx_tstamp {
* @quad_offset: offset into timestamp block of the quad to get the real index
* @len: length of the tstamps and in_use fields.
* @init: if true, the tracker is initialized;
+ * @calibrating: if true, the PHY is calibrating the Tx offset. During this
+ * window, timestamps are temporarily disabled.
*/
struct ice_ptp_tx {
struct kthread_work work;
@@ -92,6 +94,7 @@ struct ice_ptp_tx {
u8 quad_offset;
u8 len;
u8 init;
+ u8 calibrating;
};
/* Quad and port information for initializing timestamp blocks */
@@ -101,15 +104,24 @@ struct ice_ptp_tx {
/**
* struct ice_ptp_port - data used to initialize an external port for PTP
*
- * This structure contains PTP data related to the external ports. Currently
- * it is used for tracking the Tx timestamps of a port. In the future this
- * structure will also hold information for the E822 port initialization
- * logic.
+ * This structure contains data indicating whether a single external port is
+ * ready for PTP functionality. It is used to track the port initialization
+ * and determine when the port's PHY offset is valid.
*
* @tx: Tx timestamp tracking for this port
+ * @ov_work: delayed work task for tracking when PHY offset is valid
+ * @ps_lock: mutex used to protect the overall PTP PHY start procedure
+ * @link_up: indicates whether the link is up
+ * @tx_fifo_busy_cnt: number of times the Tx FIFO was busy
+ * @port_num: the port number this structure represents
*/
struct ice_ptp_port {
struct ice_ptp_tx tx;
+ struct kthread_delayed_work ov_work;
+ struct mutex ps_lock; /* protects overall PTP PHY start procedure */
+ bool link_up;
+ u8 tx_fifo_busy_cnt;
+ u8 port_num;
};
#define GLTSYN_TGT_H_IDX_MAX 4
@@ -127,6 +139,7 @@ struct ice_ptp_port {
* @info: structure defining PTP hardware capabilities
* @clock: pointer to registered PTP clock device
* @tstamp_config: hardware timestamping configuration
+ * @reset_time: kernel time after clock stop on reset
*/
struct ice_ptp {
struct ice_ptp_port port;
@@ -140,6 +153,7 @@ struct ice_ptp {
struct ptp_clock_info info;
struct ptp_clock *clock;
struct hwtstamp_config tstamp_config;
+ u64 reset_time;
};
#define __ptp_port_to_ptp(p) \
@@ -152,9 +166,15 @@ struct ice_ptp {
#define ptp_info_to_pf(i) \
container_of(__ptp_info_to_ptp((i)), struct ice_pf, ptp)
+#define PFTSYN_SEM_BYTES 4
#define PTP_SHARED_CLK_IDX_VALID BIT(31)
+#define TS_CMD_MASK 0xF
+#define SYNC_EXEC_CMD 0x3
#define ICE_PTP_TS_VALID BIT(0)
+#define FIFO_EMPTY BIT(2)
+#define FIFO_OK 0xFF
+#define ICE_PTP_FIFO_NUM_CHECKS 5
/* Per-channel register definitions */
#define GLTSYN_AUX_OUT(_chan, _idx) (GLTSYN_AUX_OUT_0(_idx) + ((_chan) * 8))
#define GLTSYN_AUX_IN(_chan, _idx) (GLTSYN_AUX_IN_0(_idx) + ((_chan) * 8))
@@ -175,11 +195,13 @@ struct ice_ptp {
#define N_PER_OUT_E810T 3
#define N_PER_OUT_E810T_NO_SMA 2
#define N_EXT_TS_E810_NO_SMA 2
+#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4))
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
struct ice_pf;
int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr);
int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr);
+void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena);
int ice_get_ptp_clock_index(struct ice_pf *pf);
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
@@ -188,8 +210,11 @@ void ice_ptp_process_ts(struct ice_pf *pf);
void
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb);
+void ice_ptp_reset(struct ice_pf *pf);
+void ice_ptp_prepare_for_reset(struct ice_pf *pf);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
+int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup);
#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
{
@@ -201,6 +226,7 @@ static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
return -EOPNOTSUPP;
}
+static inline void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) { }
static inline int ice_get_ptp_clock_index(struct ice_pf *pf)
{
return -1;
@@ -216,7 +242,11 @@ static inline void ice_ptp_process_ts(struct ice_pf *pf) { }
static inline void
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { }
+static inline void ice_ptp_reset(struct ice_pf *pf) { }
+static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf) { }
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
+static inline int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+{ return 0; }
#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
#endif /* _ICE_PTP_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
new file mode 100644
index 000000000000..4109aa3b2fcd
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -0,0 +1,374 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#ifndef _ICE_PTP_CONSTS_H_
+#define _ICE_PTP_CONSTS_H_
+
+/* Constant definitions related to the hardware clock used for PTP 1588
+ * features and functionality.
+ */
+/* Constants defined for the PTP 1588 clock hardware. */
+
+/* struct ice_time_ref_info_e822
+ *
+ * E822 hardware can use different sources as the reference for the PTP
+ * hardware clock. Each clock has different characteristics such as a slightly
+ * different frequency, etc.
+ *
+ * This lookup table defines several constants that depend on the current time
+ * reference. See the struct ice_time_ref_info_e822 for information about the
+ * meaning of each constant.
+ */
+const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = {
+ /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
+ {
+ /* pll_freq */
+ 823437500, /* 823.4375 MHz PLL */
+ /* nominal_incval */
+ 0x136e44fabULL,
+ /* pps_delay */
+ 11,
+ },
+
+ /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
+ {
+ /* pll_freq */
+ 783360000, /* 783.36 MHz */
+ /* nominal_incval */
+ 0x146cc2177ULL,
+ /* pps_delay */
+ 12,
+ },
+
+ /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
+ {
+ /* pll_freq */
+ 796875000, /* 796.875 MHz */
+ /* nominal_incval */
+ 0x141414141ULL,
+ /* pps_delay */
+ 12,
+ },
+
+ /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
+ {
+ /* pll_freq */
+ 816000000, /* 816 MHz */
+ /* nominal_incval */
+ 0x139b9b9baULL,
+ /* pps_delay */
+ 12,
+ },
+
+ /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
+ {
+ /* pll_freq */
+ 830078125, /* 830.78125 MHz */
+ /* nominal_incval */
+ 0x134679aceULL,
+ /* pps_delay */
+ 11,
+ },
+
+ /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
+ {
+ /* pll_freq */
+ 783360000, /* 783.36 MHz */
+ /* nominal_incval */
+ 0x146cc2177ULL,
+ /* pps_delay */
+ 12,
+ },
+};
+
+const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
+ /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
+ {
+ /* refclk_pre_div */
+ 1,
+ /* feedback_div */
+ 197,
+ /* frac_n_div */
+ 2621440,
+ /* post_pll_div */
+ 6,
+ },
+
+ /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
+ {
+ /* refclk_pre_div */
+ 5,
+ /* feedback_div */
+ 223,
+ /* frac_n_div */
+ 524288,
+ /* post_pll_div */
+ 7,
+ },
+
+ /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
+ {
+ /* refclk_pre_div */
+ 5,
+ /* feedback_div */
+ 223,
+ /* frac_n_div */
+ 524288,
+ /* post_pll_div */
+ 7,
+ },
+
+ /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
+ {
+ /* refclk_pre_div */
+ 5,
+ /* feedback_div */
+ 159,
+ /* frac_n_div */
+ 1572864,
+ /* post_pll_div */
+ 6,
+ },
+
+ /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
+ {
+ /* refclk_pre_div */
+ 5,
+ /* feedback_div */
+ 159,
+ /* frac_n_div */
+ 1572864,
+ /* post_pll_div */
+ 6,
+ },
+
+ /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
+ {
+ /* refclk_pre_div */
+ 10,
+ /* feedback_div */
+ 223,
+ /* frac_n_div */
+ 524288,
+ /* post_pll_div */
+ 7,
+ },
+};
+
+/* struct ice_vernier_info_e822
+ *
+ * E822 hardware calibrates the delay of the timestamp indication from the
+ * actual packet transmission or reception during the initialization of the
+ * PHY. To do this, the hardware mechanism uses some conversions between the
+ * various clocks within the PHY block. This table defines constants used to
+ * calculate the correct conversion ratios in the PHY registers.
+ *
+ * Many of the values relate to the PAR/PCS clock conversion registers. For
+ * these registers, a value of 0 means that the associated register is not
+ * used by this link speed, and that the register should be cleared by writing
+ * 0. Other values specify the clock frequency in Hz.
+ */
+const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD] = {
+ /* ICE_PTP_LNK_SPD_1G */
+ {
+ /* tx_par_clk */
+ 31250000, /* 31.25 MHz */
+ /* rx_par_clk */
+ 31250000, /* 31.25 MHz */
+ /* tx_pcs_clk */
+ 125000000, /* 125 MHz */
+ /* rx_pcs_clk */
+ 125000000, /* 125 MHz */
+ /* tx_desk_rsgb_par */
+ 0, /* unused */
+ /* rx_desk_rsgb_par */
+ 0, /* unused */
+ /* tx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* rx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* tx_fixed_delay */
+ 25140,
+ /* pmd_adj_divisor */
+ 10000000,
+ /* rx_fixed_delay */
+ 17372,
+ },
+ /* ICE_PTP_LNK_SPD_10G */
+ {
+ /* tx_par_clk */
+ 257812500, /* 257.8125 MHz */
+ /* rx_par_clk */
+ 257812500, /* 257.8125 MHz */
+ /* tx_pcs_clk */
+ 156250000, /* 156.25 MHz */
+ /* rx_pcs_clk */
+ 156250000, /* 156.25 MHz */
+ /* tx_desk_rsgb_par */
+ 0, /* unused */
+ /* rx_desk_rsgb_par */
+ 0, /* unused */
+ /* tx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* rx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* tx_fixed_delay */
+ 6938,
+ /* pmd_adj_divisor */
+ 82500000,
+ /* rx_fixed_delay */
+ 6212,
+ },
+ /* ICE_PTP_LNK_SPD_25G */
+ {
+ /* tx_par_clk */
+ 644531250, /* 644.53125 MHZ */
+ /* rx_par_clk */
+ 644531250, /* 644.53125 MHz */
+ /* tx_pcs_clk */
+ 390625000, /* 390.625 MHz */
+ /* rx_pcs_clk */
+ 390625000, /* 390.625 MHz */
+ /* tx_desk_rsgb_par */
+ 0, /* unused */
+ /* rx_desk_rsgb_par */
+ 0, /* unused */
+ /* tx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* rx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* tx_fixed_delay */
+ 2778,
+ /* pmd_adj_divisor */
+ 206250000,
+ /* rx_fixed_delay */
+ 2491,
+ },
+ /* ICE_PTP_LNK_SPD_25G_RS */
+ {
+ /* tx_par_clk */
+ 0, /* unused */
+ /* rx_par_clk */
+ 0, /* unused */
+ /* tx_pcs_clk */
+ 0, /* unused */
+ /* rx_pcs_clk */
+ 0, /* unused */
+ /* tx_desk_rsgb_par */
+ 161132812, /* 162.1328125 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_par */
+ 161132812, /* 162.1328125 MHz Reed Solomon gearbox */
+ /* tx_desk_rsgb_pcs */
+ 97656250, /* 97.62625 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_pcs */
+ 97656250, /* 97.62625 MHz Reed Solomon gearbox */
+ /* tx_fixed_delay */
+ 3928,
+ /* pmd_adj_divisor */
+ 206250000,
+ /* rx_fixed_delay */
+ 29535,
+ },
+ /* ICE_PTP_LNK_SPD_40G */
+ {
+ /* tx_par_clk */
+ 257812500,
+ /* rx_par_clk */
+ 257812500,
+ /* tx_pcs_clk */
+ 156250000, /* 156.25 MHz */
+ /* rx_pcs_clk */
+ 156250000, /* 156.25 MHz */
+ /* tx_desk_rsgb_par */
+ 0, /* unused */
+ /* rx_desk_rsgb_par */
+ 156250000, /* 156.25 MHz deskew clock */
+ /* tx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* rx_desk_rsgb_pcs */
+ 156250000, /* 156.25 MHz deskew clock */
+ /* tx_fixed_delay */
+ 5666,
+ /* pmd_adj_divisor */
+ 82500000,
+ /* rx_fixed_delay */
+ 4244,
+ },
+ /* ICE_PTP_LNK_SPD_50G */
+ {
+ /* tx_par_clk */
+ 644531250, /* 644.53125 MHZ */
+ /* rx_par_clk */
+ 644531250, /* 644.53125 MHZ */
+ /* tx_pcs_clk */
+ 390625000, /* 390.625 MHz */
+ /* rx_pcs_clk */
+ 390625000, /* 390.625 MHz */
+ /* tx_desk_rsgb_par */
+ 0, /* unused */
+ /* rx_desk_rsgb_par */
+ 195312500, /* 193.3125 MHz deskew clock */
+ /* tx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* rx_desk_rsgb_pcs */
+ 195312500, /* 193.3125 MHz deskew clock */
+ /* tx_fixed_delay */
+ 2778,
+ /* pmd_adj_divisor */
+ 206250000,
+ /* rx_fixed_delay */
+ 2868,
+ },
+ /* ICE_PTP_LNK_SPD_50G_RS */
+ {
+ /* tx_par_clk */
+ 0, /* unused */
+ /* rx_par_clk */
+ 644531250, /* 644.53125 MHz */
+ /* tx_pcs_clk */
+ 0, /* unused */
+ /* rx_pcs_clk */
+ 644531250, /* 644.53125 MHz */
+ /* tx_desk_rsgb_par */
+ 322265625, /* 322.265625 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_par */
+ 322265625, /* 322.265625 MHz Reed Solomon gearbox */
+ /* tx_desk_rsgb_pcs */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_pcs */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* tx_fixed_delay */
+ 2095,
+ /* pmd_adj_divisor */
+ 206250000,
+ /* rx_fixed_delay */
+ 14524,
+ },
+ /* ICE_PTP_LNK_SPD_100G_RS */
+ {
+ /* tx_par_clk */
+ 0, /* unused */
+ /* rx_par_clk */
+ 644531250, /* 644.53125 MHz */
+ /* tx_pcs_clk */
+ 0, /* unused */
+ /* rx_pcs_clk */
+ 644531250, /* 644.53125 MHz */
+ /* tx_desk_rsgb_par */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_par */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* tx_desk_rsgb_pcs */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_pcs */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* tx_fixed_delay */
+ 1620,
+ /* pmd_adj_divisor */
+ 206250000,
+ /* rx_fixed_delay */
+ 7775,
+ },
+};
+
+#endif /* _ICE_PTP_CONSTS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 29f947c0cd2e..ec8450f034e6 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -3,6 +3,8 @@
#include "ice_common.h"
#include "ice_ptp_hw.h"
+#include "ice_ptp_consts.h"
+#include "ice_cgu_regs.h"
/* Low level functions for interacting with and managing the device clock used
* for the Precision Time Protocol.
@@ -29,6 +31,15 @@
*
* For E810 devices, the increment frequency is 812.5 MHz
*
+ * For E822 devices the clock can be derived from different sources, and the
+ * increment has an effective frequency of one of the following:
+ * - 823.4375 MHz
+ * - 783.36 MHz
+ * - 796.875 MHz
+ * - 816 MHz
+ * - 830.078125 MHz
+ * - 783.36 MHz
+ *
* The hardware captures timestamps in the PHY for incoming packets, and for
* outgoing packets on request. To support this, the PHY maintains a timer
* that matches the lower 64 bits of the global source timer.
@@ -37,6 +48,24 @@
* shadow registers are used to prepare the desired initial values. A special
* sync command is issued to trigger copying from the shadow registers into
* the appropriate source and PHY registers simultaneously.
+ *
+ * The driver supports devices which have different PHYs with subtly different
+ * mechanisms to program and control the timers. We divide the devices into
+ * families named after the first major device, E810 and similar devices, and
+ * E822 and similar devices.
+ *
+ * - E822 based devices have additional support for fine grained Vernier
+ * calibration which requires significant setup
+ * - The layout of timestamp data in the PHY register blocks is different
+ * - The way timer synchronization commands are issued is different.
+ *
+ * To support this, very low level functions have an e810 or e822 suffix
+ * indicating what type of device they work on. Higher level abstractions for
+ * tasks that can be done on both devices do not have the suffix and will
+ * correctly look up the appropriate low level function when running.
+ *
+ * Functions which only make sense on a single device family may not have
+ * a suitable generic implementation
*/
/**
@@ -51,6 +80,2447 @@ u8 ice_get_ptp_src_clock_index(struct ice_hw *hw)
return hw->func_caps.ts_func_info.tmr_index_assoc;
}
+/**
+ * ice_ptp_read_src_incval - Read source timer increment value
+ * @hw: pointer to HW struct
+ *
+ * Read the increment value of the source timer and return it.
+ */
+static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
+{
+ u32 lo, hi;
+ u8 tmr_idx;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+
+ return ((u64)(hi & INCVAL_HIGH_M) << 32) | lo;
+}
+
+/**
+ * ice_ptp_src_cmd - Prepare source timer for a timer command
+ * @hw: pointer to HW structure
+ * @cmd: Timer command
+ *
+ * Prepare the source timer for an upcoming timer sync command.
+ */
+static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 cmd_val;
+ u8 tmr_idx;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+ cmd_val = tmr_idx << SEL_CPK_SRC;
+
+ switch (cmd) {
+ case INIT_TIME:
+ cmd_val |= GLTSYN_CMD_INIT_TIME;
+ break;
+ case INIT_INCVAL:
+ cmd_val |= GLTSYN_CMD_INIT_INCVAL;
+ break;
+ case ADJ_TIME:
+ cmd_val |= GLTSYN_CMD_ADJ_TIME;
+ break;
+ case ADJ_TIME_AT_TIME:
+ cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
+ break;
+ case READ_TIME:
+ cmd_val |= GLTSYN_CMD_READ_TIME;
+ break;
+ }
+
+ wr32(hw, GLTSYN_CMD, cmd_val);
+}
+
+/**
+ * ice_ptp_exec_tmr_cmd - Execute all prepared timer commands
+ * @hw: pointer to HW struct
+ *
+ * Write the SYNC_EXEC_CMD bit to the GLTSYN_CMD_SYNC register, and flush the
+ * write immediately. This triggers the hardware to begin executing all of the
+ * source and PHY timer commands synchronously.
+ */
+static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
+{
+ wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
+ ice_flush(hw);
+}
+
+/* E822 family functions
+ *
+ * The following functions operate on the E822 family of devices.
+ */
+
+/**
+ * ice_fill_phy_msg_e822 - Fill message data for a PHY register access
+ * @msg: the PHY message buffer to fill in
+ * @port: the port to access
+ * @offset: the register offset
+ */
+static void
+ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
+{
+ int phy_port, phy, quadtype;
+
+ phy_port = port % ICE_PORTS_PER_PHY;
+ phy = port / ICE_PORTS_PER_PHY;
+ quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_NUM_QUAD_TYPE;
+
+ if (quadtype == 0) {
+ msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
+ msg->msg_addr_high = P_Q0_H(P_0_BASE + offset, phy_port);
+ } else {
+ msg->msg_addr_low = P_Q1_L(P_4_BASE + offset, phy_port);
+ msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
+ }
+
+ if (phy == 0)
+ msg->dest_dev = rmn_0;
+ else if (phy == 1)
+ msg->dest_dev = rmn_1;
+ else
+ msg->dest_dev = rmn_2;
+}
+
+/**
+ * ice_is_64b_phy_reg_e822 - Check if this is a 64bit PHY register
+ * @low_addr: the low address to check
+ * @high_addr: on return, contains the high address of the 64bit register
+ *
+ * Checks if the provided low address is one of the known 64bit PHY values
+ * represented as two 32bit registers. If it is, return the appropriate high
+ * register offset to use.
+ */
+static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
+{
+ switch (low_addr) {
+ case P_REG_PAR_PCS_TX_OFFSET_L:
+ *high_addr = P_REG_PAR_PCS_TX_OFFSET_U;
+ return true;
+ case P_REG_PAR_PCS_RX_OFFSET_L:
+ *high_addr = P_REG_PAR_PCS_RX_OFFSET_U;
+ return true;
+ case P_REG_PAR_TX_TIME_L:
+ *high_addr = P_REG_PAR_TX_TIME_U;
+ return true;
+ case P_REG_PAR_RX_TIME_L:
+ *high_addr = P_REG_PAR_RX_TIME_U;
+ return true;
+ case P_REG_TOTAL_TX_OFFSET_L:
+ *high_addr = P_REG_TOTAL_TX_OFFSET_U;
+ return true;
+ case P_REG_TOTAL_RX_OFFSET_L:
+ *high_addr = P_REG_TOTAL_RX_OFFSET_U;
+ return true;
+ case P_REG_UIX66_10G_40G_L:
+ *high_addr = P_REG_UIX66_10G_40G_U;
+ return true;
+ case P_REG_UIX66_25G_100G_L:
+ *high_addr = P_REG_UIX66_25G_100G_U;
+ return true;
+ case P_REG_TX_CAPTURE_L:
+ *high_addr = P_REG_TX_CAPTURE_U;
+ return true;
+ case P_REG_RX_CAPTURE_L:
+ *high_addr = P_REG_RX_CAPTURE_U;
+ return true;
+ case P_REG_TX_TIMER_INC_PRE_L:
+ *high_addr = P_REG_TX_TIMER_INC_PRE_U;
+ return true;
+ case P_REG_RX_TIMER_INC_PRE_L:
+ *high_addr = P_REG_RX_TIMER_INC_PRE_U;
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_is_40b_phy_reg_e822 - Check if this is a 40bit PHY register
+ * @low_addr: the low address to check
+ * @high_addr: on return, contains the high address of the 40bit value
+ *
+ * Checks if the provided low address is one of the known 40bit PHY values
+ * split into two registers with the lower 8 bits in the low register and the
+ * upper 32 bits in the high register. If it is, return the appropriate high
+ * register offset to use.
+ */
+static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
+{
+ switch (low_addr) {
+ case P_REG_TIMETUS_L:
+ *high_addr = P_REG_TIMETUS_U;
+ return true;
+ case P_REG_PAR_RX_TUS_L:
+ *high_addr = P_REG_PAR_RX_TUS_U;
+ return true;
+ case P_REG_PAR_TX_TUS_L:
+ *high_addr = P_REG_PAR_TX_TUS_U;
+ return true;
+ case P_REG_PCS_RX_TUS_L:
+ *high_addr = P_REG_PCS_RX_TUS_U;
+ return true;
+ case P_REG_PCS_TX_TUS_L:
+ *high_addr = P_REG_PCS_TX_TUS_U;
+ return true;
+ case P_REG_DESK_PAR_RX_TUS_L:
+ *high_addr = P_REG_DESK_PAR_RX_TUS_U;
+ return true;
+ case P_REG_DESK_PAR_TX_TUS_L:
+ *high_addr = P_REG_DESK_PAR_TX_TUS_U;
+ return true;
+ case P_REG_DESK_PCS_RX_TUS_L:
+ *high_addr = P_REG_DESK_PCS_RX_TUS_U;
+ return true;
+ case P_REG_DESK_PCS_TX_TUS_L:
+ *high_addr = P_REG_DESK_PCS_TX_TUS_U;
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_read_phy_reg_e822 - Read a PHY register
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @offset: PHY register offset to read
+ * @val: on return, the contents read from the PHY
+ *
+ * Read a PHY register for the given port over the device sideband queue.
+ */
+int
+ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
+{
+ struct ice_sbq_msg_input msg = {0};
+ int err;
+
+ ice_fill_phy_msg_e822(&msg, port, offset);
+ msg.opcode = ice_sbq_msg_rd;
+
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
+ }
+
+ *val = msg.data;
+
+ return 0;
+}
+
+/**
+ * ice_read_64b_phy_reg_e822 - Read a 64bit value from PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: on return, the contents of the 64bit value from the PHY registers
+ *
+ * Reads the two registers associated with a 64bit value and returns it in the
+ * val pointer. The offset always specifies the lower register offset to use.
+ * The high offset is looked up. This function only operates on registers
+ * known to be two parts of a 64bit value.
+ */
+static int
+ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
+{
+ u32 low, high;
+ u16 high_addr;
+ int err;
+
+ /* Only operate on registers known to be split into two 32bit
+ * registers.
+ */
+ if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
+ ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
+ low_addr);
+ return -EINVAL;
+ }
+
+ err = ice_read_phy_reg_e822(hw, port, low_addr, &low);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_read_phy_reg_e822(hw, port, high_addr, &high);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ *val = (u64)high << 32 | low;
+
+ return 0;
+}
+
+/**
+ * ice_write_phy_reg_e822 - Write a PHY register
+ * @hw: pointer to the HW struct
+ * @port: PHY port to write to
+ * @offset: PHY register offset to write
+ * @val: The value to write to the register
+ *
+ * Write a PHY register for the given port over the device sideband queue.
+ */
+int
+ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
+{
+ struct ice_sbq_msg_input msg = {0};
+ int err;
+
+ ice_fill_phy_msg_e822(&msg, port, offset);
+ msg.opcode = ice_sbq_msg_wr;
+ msg.data = val;
+
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY
+ * @hw: pointer to the HW struct
+ * @port: port to write to
+ * @low_addr: offset of the low register
+ * @val: 40b value to write
+ *
+ * Write the provided 40b value to the two associated registers by splitting
+ * it up into two chunks, the lower 8 bits and the upper 32 bits.
+ */
+static int
+ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
+{
+ u32 low, high;
+ u16 high_addr;
+ int err;
+
+ /* Only operate on registers known to be split into a lower 8 bit
+ * register and an upper 32 bit register.
+ */
+ if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) {
+ ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n",
+ low_addr);
+ return -EINVAL;
+ }
+
+ low = (u32)(val & P_REG_40B_LOW_M);
+ high = (u32)(val >> P_REG_40B_HIGH_S);
+
+ err = ice_write_phy_reg_e822(hw, port, low_addr, low);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_write_phy_reg_e822(hw, port, high_addr, high);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: the contents of the 64bit value to write to PHY
+ *
+ * Write the 64bit value to the two associated 32bit PHY registers. The offset
+ * is always specified as the lower register, and the high address is looked
+ * up. This function only operates on registers known to be two parts of
+ * a 64bit value.
+ */
+static int
+ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
+{
+ u32 low, high;
+ u16 high_addr;
+ int err;
+
+ /* Only operate on registers known to be split into two 32bit
+ * registers.
+ */
+ if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
+ ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
+ low_addr);
+ return -EINVAL;
+ }
+
+ low = lower_32_bits(val);
+ high = upper_32_bits(val);
+
+ err = ice_write_phy_reg_e822(hw, port, low_addr, low);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_write_phy_reg_e822(hw, port, high_addr, high);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_fill_quad_msg_e822 - Fill message data for quad register access
+ * @msg: the PHY message buffer to fill in
+ * @quad: the quad to access
+ * @offset: the register offset
+ *
+ * Fill a message buffer for accessing a register in a quad shared between
+ * multiple PHYs.
+ */
+static void
+ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
+{
+ u32 addr;
+
+ msg->dest_dev = rmn_0;
+
+ if ((quad % ICE_NUM_QUAD_TYPE) == 0)
+ addr = Q_0_BASE + offset;
+ else
+ addr = Q_1_BASE + offset;
+
+ msg->msg_addr_low = lower_16_bits(addr);
+ msg->msg_addr_high = upper_16_bits(addr);
+}
+
+/**
+ * ice_read_quad_reg_e822 - Read a PHY quad register
+ * @hw: pointer to the HW struct
+ * @quad: quad to read from
+ * @offset: quad register offset to read
+ * @val: on return, the contents read from the quad
+ *
+ * Read a quad register over the device sideband queue. Quad registers are
+ * shared between multiple PHYs.
+ */
+int
+ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
+{
+ struct ice_sbq_msg_input msg = {0};
+ int err;
+
+ if (quad >= ICE_MAX_QUAD)
+ return -EINVAL;
+
+ ice_fill_quad_msg_e822(&msg, quad, offset);
+ msg.opcode = ice_sbq_msg_rd;
+
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
+ }
+
+ *val = msg.data;
+
+ return 0;
+}
+
+/**
+ * ice_write_quad_reg_e822 - Write a PHY quad register
+ * @hw: pointer to the HW struct
+ * @quad: quad to write to
+ * @offset: quad register offset to write
+ * @val: The value to write to the register
+ *
+ * Write a quad register over the device sideband queue. Quad registers are
+ * shared between multiple PHYs.
+ */
+int
+ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
+{
+ struct ice_sbq_msg_input msg = {0};
+ int err;
+
+ if (quad >= ICE_MAX_QUAD)
+ return -EINVAL;
+
+ ice_fill_quad_msg_e822(&msg, quad, offset);
+ msg.opcode = ice_sbq_msg_wr;
+ msg.data = val;
+
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad block
+ * @hw: pointer to the HW struct
+ * @quad: the quad to read from
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the two associated registers in the
+ * quad memory block that is shared between the internal PHYs of the E822
+ * family of devices.
+ */
+static int
+ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
+{
+ u16 lo_addr, hi_addr;
+ u32 lo, hi;
+ int err;
+
+ lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
+ hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
+
+ err = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ err = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ /* For E822 based internal PHYs, the timestamp is reported with the
+ * lower 8 bits in the low register, and the upper 32 bits in the high
+ * register.
+ */
+ *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
+
+ return 0;
+}
+
+/**
+ * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block
+ * @hw: pointer to the HW struct
+ * @quad: the quad to read from
+ * @idx: the timestamp index to reset
+ *
+ * Clear a timestamp, resetting its valid bit, from the PHY quad block that is
+ * shared between the internal PHYs on the E822 devices.
+ */
+static int
+ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
+{
+ u16 lo_addr, hi_addr;
+ int err;
+
+ lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
+ hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
+
+ err = ice_write_quad_reg_e822(hw, quad, lo_addr, 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ err = ice_write_quad_reg_e822(hw, quad, hi_addr, 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_read_cgu_reg_e822 - Read a CGU register
+ * @hw: pointer to the HW struct
+ * @addr: Register address to read
+ * @val: storage for register value read
+ *
+ * Read the contents of a register of the Clock Generation Unit. Only
+ * applicable to E822 devices.
+ */
+static int
+ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
+{
+ struct ice_sbq_msg_input cgu_msg;
+ int err;
+
+ cgu_msg.opcode = ice_sbq_msg_rd;
+ cgu_msg.dest_dev = cgu;
+ cgu_msg.msg_addr_low = addr;
+ cgu_msg.msg_addr_high = 0x0;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
+ addr, err);
+ return err;
+ }
+
+ *val = cgu_msg.data;
+
+ return err;
+}
+
+/**
+ * ice_write_cgu_reg_e822 - Write a CGU register
+ * @hw: pointer to the HW struct
+ * @addr: Register address to write
+ * @val: value to write into the register
+ *
+ * Write the specified value to a register of the Clock Generation Unit. Only
+ * applicable to E822 devices.
+ */
+static int
+ice_write_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 val)
+{
+ struct ice_sbq_msg_input cgu_msg;
+ int err;
+
+ cgu_msg.opcode = ice_sbq_msg_wr;
+ cgu_msg.dest_dev = cgu;
+ cgu_msg.msg_addr_low = addr;
+ cgu_msg.msg_addr_high = 0x0;
+ cgu_msg.data = val;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
+ addr, err);
+ return err;
+ }
+
+ return err;
+}
+
+/**
+ * ice_clk_freq_str - Convert time_ref_freq to string
+ * @clk_freq: Clock frequency
+ *
+ * Convert the specified TIME_REF clock frequency to a string.
+ */
+static const char *ice_clk_freq_str(u8 clk_freq)
+{
+ switch ((enum ice_time_ref_freq)clk_freq) {
+ case ICE_TIME_REF_FREQ_25_000:
+ return "25 MHz";
+ case ICE_TIME_REF_FREQ_122_880:
+ return "122.88 MHz";
+ case ICE_TIME_REF_FREQ_125_000:
+ return "125 MHz";
+ case ICE_TIME_REF_FREQ_153_600:
+ return "153.6 MHz";
+ case ICE_TIME_REF_FREQ_156_250:
+ return "156.25 MHz";
+ case ICE_TIME_REF_FREQ_245_760:
+ return "245.76 MHz";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_clk_src_str - Convert time_ref_src to string
+ * @clk_src: Clock source
+ *
+ * Convert the specified clock source to its string name.
+ */
+static const char *ice_clk_src_str(u8 clk_src)
+{
+ switch ((enum ice_clk_src)clk_src) {
+ case ICE_CLK_SRC_TCX0:
+ return "TCX0";
+ case ICE_CLK_SRC_TIME_REF:
+ return "TIME_REF";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit
+ * @hw: pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCX0)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the PLL which drives the PTP hardware clock.
+ */
+static int
+ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ union tspll_ro_bwm_lf bwm_lf;
+ union nac_cgu_dword19 dw19;
+ union nac_cgu_dword22 dw22;
+ union nac_cgu_dword24 dw24;
+ union nac_cgu_dword9 dw9;
+ int err;
+
+ if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
+ clk_freq);
+ return -EINVAL;
+ }
+
+ if (clk_src >= NUM_ICE_CLK_SRC) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
+ clk_src);
+ return -EINVAL;
+ }
+
+ if (clk_src == ICE_CLK_SRC_TCX0 &&
+ clk_freq != ICE_TIME_REF_FREQ_25_000) {
+ dev_warn(ice_hw_to_dev(hw),
+ "TCX0 only supports 25 MHz frequency\n");
+ return -EINVAL;
+ }
+
+ err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ if (err)
+ return err;
+
+ /* Log the current clock configuration */
+ ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+ dw24.field.ts_pll_enable ? "enabled" : "disabled",
+ ice_clk_src_str(dw24.field.time_ref_sel),
+ ice_clk_freq_str(dw9.field.time_ref_freq_sel),
+ bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
+
+ /* Disable the PLL before changing the clock source or frequency */
+ if (dw24.field.ts_pll_enable) {
+ dw24.field.ts_pll_enable = 0;
+
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+ }
+
+ /* Set the frequency */
+ dw9.field.time_ref_freq_sel = clk_freq;
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL feedback divisor */
+ err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val);
+ if (err)
+ return err;
+
+ dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
+ dw19.field.tspll_ndivratio = 1;
+
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL post divisor */
+ err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val);
+ if (err)
+ return err;
+
+ dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
+ dw22.field.time1588clk_sel_div2 = 0;
+
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL pre divisor and clock source */
+ err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
+ if (err)
+ return err;
+
+ dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
+ dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
+ dw24.field.time_ref_sel = clk_src;
+
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+
+ /* Finally, enable the PLL */
+ dw24.field.ts_pll_enable = 1;
+
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+
+ /* Wait to verify if the PLL locks */
+ usleep_range(1000, 5000);
+
+ err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ if (err)
+ return err;
+
+ if (!bwm_lf.field.plllock_true_lock_cri) {
+ dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
+ return -EBUSY;
+ }
+
+ /* Log the current clock configuration */
+ ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+ dw24.field.ts_pll_enable ? "enabled" : "disabled",
+ ice_clk_src_str(dw24.field.time_ref_sel),
+ ice_clk_freq_str(dw9.field.time_ref_freq_sel),
+ bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
+
+ return 0;
+}
+
+/**
+ * ice_init_cgu_e822 - Initialize CGU with settings from firmware
+ * @hw: pointer to the HW structure
+ *
+ * Initialize the Clock Generation Unit of the E822 device.
+ */
+static int ice_init_cgu_e822(struct ice_hw *hw)
+{
+ struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
+ union tspll_cntr_bist_settings cntr_bist;
+ int err;
+
+ err = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
+ &cntr_bist.val);
+ if (err)
+ return err;
+
+ /* Disable sticky lock detection so lock err reported is accurate */
+ cntr_bist.field.i_plllock_sel_0 = 0;
+ cntr_bist.field.i_plllock_sel_1 = 0;
+
+ err = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
+ cntr_bist.val);
+ if (err)
+ return err;
+
+ /* Configure the CGU PLL using the parameters from the function
+ * capabilities.
+ */
+ err = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref,
+ (enum ice_clk_src)ts_info->clk_src);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_set_vernier_wl - Set the window length for vernier calibration
+ * @hw: pointer to the HW struct
+ *
+ * Set the window length used for the vernier port calibration process.
+ */
+static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
+{
+ u8 port;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ int err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_WL,
+ PTP_VERNIER_WL);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_init_phc_e822 - Perform E822 specific PHC initialization
+ * @hw: pointer to HW struct
+ *
+ * Perform PHC initialization steps specific to E822 devices.
+ */
+static int ice_ptp_init_phc_e822(struct ice_hw *hw)
+{
+ int err;
+ u32 regval;
+
+ /* Enable reading switch and PHY registers over the sideband queue */
+#define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1)
+#define PF_SB_REM_DEV_CTL_PHY0 BIT(2)
+ regval = rd32(hw, PF_SB_REM_DEV_CTL);
+ regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ |
+ PF_SB_REM_DEV_CTL_PHY0);
+ wr32(hw, PF_SB_REM_DEV_CTL, regval);
+
+ /* Initialize the Clock Generation Unit */
+ err = ice_init_cgu_e822(hw);
+ if (err)
+ return err;
+
+ /* Set window length for all the ports */
+ return ice_ptp_set_vernier_wl(hw);
+}
+
+/**
+ * ice_ptp_prep_phy_time_e822 - Prepare PHY port with initial time
+ * @hw: pointer to the HW struct
+ * @time: Time to initialize the PHY port clocks to
+ *
+ * Program the PHY port registers with a new initial time value. The port
+ * clock will be initialized once the driver issues an INIT_TIME sync
+ * command. The time value is the upper 32 bits of the PHY timer, usually in
+ * units of nominal nanoseconds.
+ */
+static int
+ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time)
+{
+ u64 phy_time;
+ u8 port;
+ int err;
+
+ /* The time represents the upper 32 bits of the PHY timer, so we need
+ * to shift to account for this when programming.
+ */
+ phy_time = (u64)time << 32;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ /* Tx case */
+ err = ice_write_64b_phy_reg_e822(hw, port,
+ P_REG_TX_TIMER_INC_PRE_L,
+ phy_time);
+ if (err)
+ goto exit_err;
+
+ /* Rx case */
+ err = ice_write_64b_phy_reg_e822(hw, port,
+ P_REG_RX_TIMER_INC_PRE_L,
+ phy_time);
+ if (err)
+ goto exit_err;
+ }
+
+ return 0;
+
+exit_err:
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n",
+ port, err);
+
+ return err;
+}
+
+/**
+ * ice_ptp_prep_port_adj_e822 - Prepare a single port for time adjust
+ * @hw: pointer to HW struct
+ * @port: Port number to be programmed
+ * @time: time in cycles to adjust the port Tx and Rx clocks
+ *
+ * Program the port for an atomic adjustment by writing the Tx and Rx timer
+ * registers. The atomic adjustment won't be completed until the driver issues
+ * an ADJ_TIME command.
+ *
+ * Note that time is not in units of nanoseconds. It is in clock time
+ * including the lower sub-nanosecond portion of the port timer.
+ *
+ * Negative adjustments are supported using 2s complement arithmetic.
+ */
+int
+ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time)
+{
+ u32 l_time, u_time;
+ int err;
+
+ l_time = lower_32_bits(time);
+ u_time = upper_32_bits(time);
+
+ /* Tx case */
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_L,
+ l_time);
+ if (err)
+ goto exit_err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_U,
+ u_time);
+ if (err)
+ goto exit_err;
+
+ /* Rx case */
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_L,
+ l_time);
+ if (err)
+ goto exit_err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_U,
+ u_time);
+ if (err)
+ goto exit_err;
+
+ return 0;
+
+exit_err:
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n",
+ port, err);
+ return err;
+}
+
+/**
+ * ice_ptp_prep_phy_adj_e822 - Prep PHY ports for a time adjustment
+ * @hw: pointer to HW struct
+ * @adj: adjustment in nanoseconds
+ *
+ * Prepare the PHY ports for an atomic time adjustment by programming the PHY
+ * Tx and Rx port registers. The actual adjustment is completed by issuing an
+ * ADJ_TIME or ADJ_TIME_AT_TIME sync command.
+ */
+static int
+ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
+{
+ s64 cycles;
+ u8 port;
+
+ /* The port clock supports adjustment of the sub-nanosecond portion of
+ * the clock. We shift the provided adjustment in nanoseconds to
+ * calculate the appropriate adjustment to program into the PHY ports.
+ */
+ if (adj > 0)
+ cycles = (s64)adj << 32;
+ else
+ cycles = -(((s64)-adj) << 32);
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ int err;
+
+ err = ice_ptp_prep_port_adj_e822(hw, port, cycles);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_prep_phy_incval_e822 - Prepare PHY ports for time adjustment
+ * @hw: pointer to HW struct
+ * @incval: new increment value to prepare
+ *
+ * Prepare each of the PHY ports for a new increment value by programming the
+ * port's TIMETUS registers. The new increment value will be updated after
+ * issuing an INIT_INCVAL command.
+ */
+static int
+ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval)
+{
+ int err;
+ u8 port;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L,
+ incval);
+ if (err)
+ goto exit_err;
+ }
+
+ return 0;
+
+exit_err:
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n",
+ port, err);
+
+ return err;
+}
+
+/**
+ * ice_ptp_read_port_capture - Read a port's local time capture
+ * @hw: pointer to HW struct
+ * @port: Port number to read
+ * @tx_ts: on return, the Tx port time capture
+ * @rx_ts: on return, the Rx port time capture
+ *
+ * Read the port's Tx and Rx local time capture values.
+ *
+ * Note this has no equivalent for the E810 devices.
+ */
+static int
+ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
+{
+ int err;
+
+ /* Tx case */
+ err = ice_read_64b_phy_reg_e822(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
+ err);
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "tx_init = 0x%016llx\n",
+ (unsigned long long)*tx_ts);
+
+ /* Rx case */
+ err = ice_read_64b_phy_reg_e822(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
+ err);
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "rx_init = 0x%016llx\n",
+ (unsigned long long)*rx_ts);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_one_port_cmd - Prepare a single PHY port for a timer command
+ * @hw: pointer to HW struct
+ * @port: Port to which cmd has to be sent
+ * @cmd: Command to be sent to the port
+ *
+ * Prepare the requested port for an upcoming timer sync command.
+ *
+ * Note there is no equivalent of this operation on E810, as that device
+ * always handles all external PHYs internally.
+ */
+static int
+ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 cmd_val, val;
+ u8 tmr_idx;
+ int err;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+ cmd_val = tmr_idx << SEL_PHY_SRC;
+ switch (cmd) {
+ case INIT_TIME:
+ cmd_val |= PHY_CMD_INIT_TIME;
+ break;
+ case INIT_INCVAL:
+ cmd_val |= PHY_CMD_INIT_INCVAL;
+ break;
+ case ADJ_TIME:
+ cmd_val |= PHY_CMD_ADJ_TIME;
+ break;
+ case READ_TIME:
+ cmd_val |= PHY_CMD_READ_TIME;
+ break;
+ case ADJ_TIME_AT_TIME:
+ cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
+ break;
+ }
+
+ /* Tx case */
+ /* Read, modify, write */
+ err = ice_read_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ /* Modify necessary bits only and perform write */
+ val &= ~TS_CMD_MASK;
+ val |= cmd_val;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ /* Rx case */
+ /* Read, modify, write */
+ err = ice_read_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ /* Modify necessary bits only and perform write */
+ val &= ~TS_CMD_MASK;
+ val |= cmd_val;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command
+ * @hw: pointer to the HW struct
+ * @cmd: timer command to prepare
+ *
+ * Prepare all ports connected to this device for an upcoming timer sync
+ * command.
+ */
+static int
+ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u8 port;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ int err;
+
+ err = ice_ptp_one_port_cmd(hw, port, cmd);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* E822 Vernier calibration functions
+ *
+ * The following functions are used as part of the vernier calibration of
+ * a port. This calibration increases the precision of the timestamps on the
+ * port.
+ */
+
+/**
+ * ice_phy_get_speed_and_fec_e822 - Get link speed and FEC based on serdes mode
+ * @hw: pointer to HW struct
+ * @port: the port to read from
+ * @link_out: if non-NULL, holds link speed on success
+ * @fec_out: if non-NULL, holds FEC algorithm on success
+ *
+ * Read the serdes data for the PHY port and extract the link speed and FEC
+ * algorithm.
+ */
+static int
+ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
+ enum ice_ptp_link_spd *link_out,
+ enum ice_ptp_fec_mode *fec_out)
+{
+ enum ice_ptp_link_spd link;
+ enum ice_ptp_fec_mode fec;
+ u32 serdes;
+ int err;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_LINK_SPEED, &serdes);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n");
+ return err;
+ }
+
+ /* Determine the FEC algorithm */
+ fec = (enum ice_ptp_fec_mode)P_REG_LINK_SPEED_FEC_MODE(serdes);
+
+ serdes &= P_REG_LINK_SPEED_SERDES_M;
+
+ /* Determine the link speed */
+ if (fec == ICE_PTP_FEC_MODE_RS_FEC) {
+ switch (serdes) {
+ case ICE_PTP_SERDES_25G:
+ link = ICE_PTP_LNK_SPD_25G_RS;
+ break;
+ case ICE_PTP_SERDES_50G:
+ link = ICE_PTP_LNK_SPD_50G_RS;
+ break;
+ case ICE_PTP_SERDES_100G:
+ link = ICE_PTP_LNK_SPD_100G_RS;
+ break;
+ default:
+ return -EIO;
+ }
+ } else {
+ switch (serdes) {
+ case ICE_PTP_SERDES_1G:
+ link = ICE_PTP_LNK_SPD_1G;
+ break;
+ case ICE_PTP_SERDES_10G:
+ link = ICE_PTP_LNK_SPD_10G;
+ break;
+ case ICE_PTP_SERDES_25G:
+ link = ICE_PTP_LNK_SPD_25G;
+ break;
+ case ICE_PTP_SERDES_40G:
+ link = ICE_PTP_LNK_SPD_40G;
+ break;
+ case ICE_PTP_SERDES_50G:
+ link = ICE_PTP_LNK_SPD_50G;
+ break;
+ default:
+ return -EIO;
+ }
+ }
+
+ if (link_out)
+ *link_out = link;
+ if (fec_out)
+ *fec_out = fec;
+
+ return 0;
+}
+
+/**
+ * ice_phy_cfg_lane_e822 - Configure PHY quad for single/multi-lane timestamp
+ * @hw: pointer to HW struct
+ * @port: to configure the quad for
+ */
+static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
+{
+ enum ice_ptp_link_spd link_spd;
+ int err;
+ u32 val;
+ u8 quad;
+
+ err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, NULL);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n",
+ err);
+ return;
+ }
+
+ quad = port / ICE_PORTS_PER_QUAD;
+
+ err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n",
+ err);
+ return;
+ }
+
+ if (link_spd >= ICE_PTP_LNK_SPD_40G)
+ val &= ~Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
+ else
+ val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
+
+ err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n",
+ err);
+ return;
+ }
+}
+
+/**
+ * ice_phy_cfg_uix_e822 - Configure Serdes UI to TU conversion for E822
+ * @hw: pointer to the HW structure
+ * @port: the port to configure
+ *
+ * Program the conversion ration of Serdes clock "unit intervals" (UIs) to PHC
+ * hardware clock time units (TUs). That is, determine the number of TUs per
+ * serdes unit interval, and program the UIX registers with this conversion.
+ *
+ * This conversion is used as part of the calibration process when determining
+ * the additional error of a timestamp vs the real time of transmission or
+ * receipt of the packet.
+ *
+ * Hardware uses the number of TUs per 66 UIs, written to the UIX registers
+ * for the two main serdes clock rates, 10G/40G and 25G/100G serdes clocks.
+ *
+ * To calculate the conversion ratio, we use the following facts:
+ *
+ * a) the clock frequency in Hz (cycles per second)
+ * b) the number of TUs per cycle (the increment value of the clock)
+ * c) 1 second per 1 billion nanoseconds
+ * d) the duration of 66 UIs in nanoseconds
+ *
+ * Given these facts, we can use the following table to work out what ratios
+ * to multiply in order to get the number of TUs per 66 UIs:
+ *
+ * cycles | 1 second | incval (TUs) | nanoseconds
+ * -------+--------------+--------------+-------------
+ * second | 1 billion ns | cycle | 66 UIs
+ *
+ * To perform the multiplication using integers without too much loss of
+ * precision, we can take use the following equation:
+ *
+ * (freq * incval * 6600 LINE_UI ) / ( 100 * 1 billion)
+ *
+ * We scale up to using 6600 UI instead of 66 in order to avoid fractional
+ * nanosecond UIs (66 UI at 10G/40G is 6.4 ns)
+ *
+ * The increment value has a maximum expected range of about 34 bits, while
+ * the frequency value is about 29 bits. Multiplying these values shouldn't
+ * overflow the 64 bits. However, we must then further multiply them again by
+ * the Serdes unit interval duration. To avoid overflow here, we split the
+ * overall divide by 1e11 into a divide by 256 (shift down by 8 bits) and
+ * a divide by 390,625,000. This does lose some precision, but avoids
+ * miscalculation due to arithmetic overflow.
+ */
+static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
+{
+ u64 cur_freq, clk_incval, tu_per_sec, uix;
+ int err;
+
+ cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ clk_incval = ice_ptp_read_src_incval(hw);
+
+ /* Calculate TUs per second divided by 256 */
+ tu_per_sec = (cur_freq * clk_incval) >> 8;
+
+#define LINE_UI_10G_40G 640 /* 6600 UIs is 640 nanoseconds at 10Gb/40Gb */
+#define LINE_UI_25G_100G 256 /* 6600 UIs is 256 nanoseconds at 25Gb/100Gb */
+
+ /* Program the 10Gb/40Gb conversion ratio */
+ uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000);
+
+ err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L,
+ uix);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n",
+ err);
+ return err;
+ }
+
+ /* Program the 25Gb/100Gb conversion ratio */
+ uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000);
+
+ err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L,
+ uix);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_phy_cfg_parpcs_e822 - Configure TUs per PAR/PCS clock cycle
+ * @hw: pointer to the HW struct
+ * @port: port to configure
+ *
+ * Configure the number of TUs for the PAR and PCS clocks used as part of the
+ * timestamp calibration process. This depends on the link speed, as the PHY
+ * uses different markers depending on the speed.
+ *
+ * 1Gb/10Gb/25Gb:
+ * - Tx/Rx PAR/PCS markers
+ *
+ * 25Gb RS:
+ * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
+ *
+ * 40Gb/50Gb:
+ * - Tx/Rx PAR/PCS markers
+ * - Rx Deskew PAR/PCS markers
+ *
+ * 50G RS and 100GB RS:
+ * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
+ * - Rx Deskew PAR/PCS markers
+ * - Tx PAR/PCS markers
+ *
+ * To calculate the conversion, we use the PHC clock frequency (cycles per
+ * second), the increment value (TUs per cycle), and the related PHY clock
+ * frequency to calculate the TUs per unit of the PHY link clock. The
+ * following table shows how the units convert:
+ *
+ * cycles | TUs | second
+ * -------+-------+--------
+ * second | cycle | cycles
+ *
+ * For each conversion register, look up the appropriate frequency from the
+ * e822 PAR/PCS table and calculate the TUs per unit of that clock. Program
+ * this to the appropriate register, preparing hardware to perform timestamp
+ * calibration to calculate the total Tx or Rx offset to adjust the timestamp
+ * in order to calibrate for the internal PHY delays.
+ *
+ * Note that the increment value ranges up to ~34 bits, and the clock
+ * frequency is ~29 bits, so multiplying them together should fit within the
+ * 64 bit arithmetic.
+ */
+static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
+{
+ u64 cur_freq, clk_incval, tu_per_sec, phy_tus;
+ enum ice_ptp_link_spd link_spd;
+ enum ice_ptp_fec_mode fec_mode;
+ int err;
+
+ err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ if (err)
+ return err;
+
+ cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ clk_incval = ice_ptp_read_src_incval(hw);
+
+ /* Calculate TUs per cycle of the PHC clock */
+ tu_per_sec = cur_freq * clk_incval;
+
+ /* For each PHY conversion register, look up the appropriate link
+ * speed frequency and determine the TUs per that clock's cycle time.
+ * Split this into a high and low value and then program the
+ * appropriate register. If that link speed does not use the
+ * associated register, write zeros to clear it instead.
+ */
+
+ /* P_REG_PAR_TX_TUS */
+ if (e822_vernier[link_spd].tx_par_clk)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].tx_par_clk);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_TX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_PAR_RX_TUS */
+ if (e822_vernier[link_spd].rx_par_clk)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].rx_par_clk);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_RX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_PCS_TX_TUS */
+ if (e822_vernier[link_spd].tx_pcs_clk)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].tx_pcs_clk);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_TX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_PCS_RX_TUS */
+ if (e822_vernier[link_spd].rx_pcs_clk)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].rx_pcs_clk);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_RX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_DESK_PAR_TX_TUS */
+ if (e822_vernier[link_spd].tx_desk_rsgb_par)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].tx_desk_rsgb_par);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_TX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_DESK_PAR_RX_TUS */
+ if (e822_vernier[link_spd].rx_desk_rsgb_par)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].rx_desk_rsgb_par);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_RX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_DESK_PCS_TX_TUS */
+ if (e822_vernier[link_spd].tx_desk_rsgb_pcs)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].tx_desk_rsgb_pcs);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_TX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_DESK_PCS_RX_TUS */
+ if (e822_vernier[link_spd].rx_desk_rsgb_pcs)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].rx_desk_rsgb_pcs);
+ else
+ phy_tus = 0;
+
+ return ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_RX_TUS_L,
+ phy_tus);
+}
+
+/**
+ * ice_calc_fixed_tx_offset_e822 - Calculated Fixed Tx offset for a port
+ * @hw: pointer to the HW struct
+ * @link_spd: the Link speed to calculate for
+ *
+ * Calculate the fixed offset due to known static latency data.
+ */
+static u64
+ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
+{
+ u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
+
+ cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ clk_incval = ice_ptp_read_src_incval(hw);
+
+ /* Calculate TUs per second */
+ tu_per_sec = cur_freq * clk_incval;
+
+ /* Calculate number of TUs to add for the fixed Tx latency. Since the
+ * latency measurement is in 1/100th of a nanosecond, we need to
+ * multiply by tu_per_sec and then divide by 1e11. This calculation
+ * overflows 64 bit integer arithmetic, so break it up into two
+ * divisions by 1e4 first then by 1e7.
+ */
+ fixed_offset = div_u64(tu_per_sec, 10000);
+ fixed_offset *= e822_vernier[link_spd].tx_fixed_delay;
+ fixed_offset = div_u64(fixed_offset, 10000000);
+
+ return fixed_offset;
+}
+
+/**
+ * ice_phy_cfg_tx_offset_e822 - Configure total Tx timestamp offset
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to configure
+ *
+ * Program the P_REG_TOTAL_TX_OFFSET register with the total number of TUs to
+ * adjust Tx timestamps by. This is calculated by combining some known static
+ * latency along with the Vernier offset computations done by hardware.
+ *
+ * This function must be called only after the offset registers are valid,
+ * i.e. after the Vernier calibration wait has passed, to ensure that the PHY
+ * has measured the offset.
+ *
+ * To avoid overflow, when calculating the offset based on the known static
+ * latency values, we use measurements in 1/100th of a nanosecond, and divide
+ * the TUs per second up front. This avoids overflow while allowing
+ * calculation of the adjustment using integer arithmetic.
+ */
+static int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
+{
+ enum ice_ptp_link_spd link_spd;
+ enum ice_ptp_fec_mode fec_mode;
+ u64 total_offset, val;
+ int err;
+
+ err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ if (err)
+ return err;
+
+ total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd);
+
+ /* Read the first Vernier offset from the PHY register and add it to
+ * the total offset.
+ */
+ if (link_spd == ICE_PTP_LNK_SPD_1G ||
+ link_spd == ICE_PTP_LNK_SPD_10G ||
+ link_spd == ICE_PTP_LNK_SPD_25G ||
+ link_spd == ICE_PTP_LNK_SPD_25G_RS ||
+ link_spd == ICE_PTP_LNK_SPD_40G ||
+ link_spd == ICE_PTP_LNK_SPD_50G) {
+ err = ice_read_64b_phy_reg_e822(hw, port,
+ P_REG_PAR_PCS_TX_OFFSET_L,
+ &val);
+ if (err)
+ return err;
+
+ total_offset += val;
+ }
+
+ /* For Tx, we only need to use the second Vernier offset for
+ * multi-lane link speeds with RS-FEC. The lanes will always be
+ * aligned.
+ */
+ if (link_spd == ICE_PTP_LNK_SPD_50G_RS ||
+ link_spd == ICE_PTP_LNK_SPD_100G_RS) {
+ err = ice_read_64b_phy_reg_e822(hw, port,
+ P_REG_PAR_TX_TIME_L,
+ &val);
+ if (err)
+ return err;
+
+ total_offset += val;
+ }
+
+ /* Now that the total offset has been calculated, program it to the
+ * PHY and indicate that the Tx offset is ready. After this,
+ * timestamps will be enabled.
+ */
+ err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L,
+ total_offset);
+ if (err)
+ return err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_phy_cfg_fixed_tx_offset_e822 - Configure Tx offset for bypass mode
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to configure
+ *
+ * Calculate and program the fixed Tx offset, and indicate that the offset is
+ * ready. This can be used when operating in bypass mode.
+ */
+static int
+ice_phy_cfg_fixed_tx_offset_e822(struct ice_hw *hw, u8 port)
+{
+ enum ice_ptp_link_spd link_spd;
+ enum ice_ptp_fec_mode fec_mode;
+ u64 total_offset;
+ int err;
+
+ err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ if (err)
+ return err;
+
+ total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd);
+
+ /* Program the fixed Tx offset into the P_REG_TOTAL_TX_OFFSET_L
+ * register, then indicate that the Tx offset is ready. After this,
+ * timestamps will be enabled.
+ *
+ * Note that this skips including the more precise offsets generated
+ * by the Vernier calibration.
+ */
+ err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L,
+ total_offset);
+ if (err)
+ return err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_phy_calc_pmd_adj_e822 - Calculate PMD adjustment for Rx
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to adjust for
+ * @link_spd: the current link speed of the PHY
+ * @fec_mode: the current FEC mode of the PHY
+ * @pmd_adj: on return, the amount to adjust the Rx total offset by
+ *
+ * Calculates the adjustment to Rx timestamps due to PMD alignment in the PHY.
+ * This varies by link speed and FEC mode. The value calculated accounts for
+ * various delays caused when receiving a packet.
+ */
+static int
+ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
+ enum ice_ptp_link_spd link_spd,
+ enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj)
+{
+ u64 cur_freq, clk_incval, tu_per_sec, mult, adj;
+ u8 pmd_align;
+ u32 val;
+ int err;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n",
+ err);
+ return err;
+ }
+
+ pmd_align = (u8)val;
+
+ cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ clk_incval = ice_ptp_read_src_incval(hw);
+
+ /* Calculate TUs per second */
+ tu_per_sec = cur_freq * clk_incval;
+
+ /* The PMD alignment adjustment measurement depends on the link speed,
+ * and whether FEC is enabled. For each link speed, the alignment
+ * adjustment is calculated by dividing a value by the length of
+ * a Time Unit in nanoseconds.
+ *
+ * 1G: align == 4 ? 10 * 0.8 : (align + 6 % 10) * 0.8
+ * 10G: align == 65 ? 0 : (align * 0.1 * 32/33)
+ * 10G w/FEC: align * 0.1 * 32/33
+ * 25G: align == 65 ? 0 : (align * 0.4 * 32/33)
+ * 25G w/FEC: align * 0.4 * 32/33
+ * 40G: align == 65 ? 0 : (align * 0.1 * 32/33)
+ * 40G w/FEC: align * 0.1 * 32/33
+ * 50G: align == 65 ? 0 : (align * 0.4 * 32/33)
+ * 50G w/FEC: align * 0.8 * 32/33
+ *
+ * For RS-FEC, if align is < 17 then we must also add 1.6 * 32/33.
+ *
+ * To allow for calculating this value using integer arithmetic, we
+ * instead start with the number of TUs per second, (inverse of the
+ * length of a Time Unit in nanoseconds), multiply by a value based
+ * on the PMD alignment register, and then divide by the right value
+ * calculated based on the table above. To avoid integer overflow this
+ * division is broken up into a step of dividing by 125 first.
+ */
+ if (link_spd == ICE_PTP_LNK_SPD_1G) {
+ if (pmd_align == 4)
+ mult = 10;
+ else
+ mult = (pmd_align + 6) % 10;
+ } else if (link_spd == ICE_PTP_LNK_SPD_10G ||
+ link_spd == ICE_PTP_LNK_SPD_25G ||
+ link_spd == ICE_PTP_LNK_SPD_40G ||
+ link_spd == ICE_PTP_LNK_SPD_50G) {
+ /* If Clause 74 FEC, always calculate PMD adjust */
+ if (pmd_align != 65 || fec_mode == ICE_PTP_FEC_MODE_CLAUSE74)
+ mult = pmd_align;
+ else
+ mult = 0;
+ } else if (link_spd == ICE_PTP_LNK_SPD_25G_RS ||
+ link_spd == ICE_PTP_LNK_SPD_50G_RS ||
+ link_spd == ICE_PTP_LNK_SPD_100G_RS) {
+ if (pmd_align < 17)
+ mult = pmd_align + 40;
+ else
+ mult = pmd_align;
+ } else {
+ ice_debug(hw, ICE_DBG_PTP, "Unknown link speed %d, skipping PMD adjustment\n",
+ link_spd);
+ mult = 0;
+ }
+
+ /* In some cases, there's no need to adjust for the PMD alignment */
+ if (!mult) {
+ *pmd_adj = 0;
+ return 0;
+ }
+
+ /* Calculate the adjustment by multiplying TUs per second by the
+ * appropriate multiplier and divisor. To avoid overflow, we first
+ * divide by 125, and then handle remaining divisor based on the link
+ * speed pmd_adj_divisor value.
+ */
+ adj = div_u64(tu_per_sec, 125);
+ adj *= mult;
+ adj = div_u64(adj, e822_vernier[link_spd].pmd_adj_divisor);
+
+ /* Finally, for 25G-RS and 50G-RS, a further adjustment for the Rx
+ * cycle count is necessary.
+ */
+ if (link_spd == ICE_PTP_LNK_SPD_25G_RS) {
+ u64 cycle_adj;
+ u8 rx_cycle;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_RX_40_TO_160_CNT,
+ &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n",
+ err);
+ return err;
+ }
+
+ rx_cycle = val & P_REG_RX_40_TO_160_CNT_RXCYC_M;
+ if (rx_cycle) {
+ mult = (4 - rx_cycle) * 40;
+
+ cycle_adj = div_u64(tu_per_sec, 125);
+ cycle_adj *= mult;
+ cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
+
+ adj += cycle_adj;
+ }
+ } else if (link_spd == ICE_PTP_LNK_SPD_50G_RS) {
+ u64 cycle_adj;
+ u8 rx_cycle;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_RX_80_TO_160_CNT,
+ &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n",
+ err);
+ return err;
+ }
+
+ rx_cycle = val & P_REG_RX_80_TO_160_CNT_RXCYC_M;
+ if (rx_cycle) {
+ mult = rx_cycle * 40;
+
+ cycle_adj = div_u64(tu_per_sec, 125);
+ cycle_adj *= mult;
+ cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
+
+ adj += cycle_adj;
+ }
+ }
+
+ /* Return the calculated adjustment */
+ *pmd_adj = adj;
+
+ return 0;
+}
+
+/**
+ * ice_calc_fixed_rx_offset_e822 - Calculated the fixed Rx offset for a port
+ * @hw: pointer to HW struct
+ * @link_spd: The Link speed to calculate for
+ *
+ * Determine the fixed Rx latency for a given link speed.
+ */
+static u64
+ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
+{
+ u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
+
+ cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ clk_incval = ice_ptp_read_src_incval(hw);
+
+ /* Calculate TUs per second */
+ tu_per_sec = cur_freq * clk_incval;
+
+ /* Calculate number of TUs to add for the fixed Rx latency. Since the
+ * latency measurement is in 1/100th of a nanosecond, we need to
+ * multiply by tu_per_sec and then divide by 1e11. This calculation
+ * overflows 64 bit integer arithmetic, so break it up into two
+ * divisions by 1e4 first then by 1e7.
+ */
+ fixed_offset = div_u64(tu_per_sec, 10000);
+ fixed_offset *= e822_vernier[link_spd].rx_fixed_delay;
+ fixed_offset = div_u64(fixed_offset, 10000000);
+
+ return fixed_offset;
+}
+
+/**
+ * ice_phy_cfg_rx_offset_e822 - Configure total Rx timestamp offset
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to configure
+ *
+ * Program the P_REG_TOTAL_RX_OFFSET register with the number of Time Units to
+ * adjust Rx timestamps by. This combines calculations from the Vernier offset
+ * measurements taken in hardware with some data about known fixed delay as
+ * well as adjusting for multi-lane alignment delay.
+ *
+ * This function must be called only after the offset registers are valid,
+ * i.e. after the Vernier calibration wait has passed, to ensure that the PHY
+ * has measured the offset.
+ *
+ * To avoid overflow, when calculating the offset based on the known static
+ * latency values, we use measurements in 1/100th of a nanosecond, and divide
+ * the TUs per second up front. This avoids overflow while allowing
+ * calculation of the adjustment using integer arithmetic.
+ */
+static int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
+{
+ enum ice_ptp_link_spd link_spd;
+ enum ice_ptp_fec_mode fec_mode;
+ u64 total_offset, pmd, val;
+ int err;
+
+ err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ if (err)
+ return err;
+
+ total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd);
+
+ /* Read the first Vernier offset from the PHY register and add it to
+ * the total offset.
+ */
+ err = ice_read_64b_phy_reg_e822(hw, port,
+ P_REG_PAR_PCS_RX_OFFSET_L,
+ &val);
+ if (err)
+ return err;
+
+ total_offset += val;
+
+ /* For Rx, all multi-lane link speeds include a second Vernier
+ * calibration, because the lanes might not be aligned.
+ */
+ if (link_spd == ICE_PTP_LNK_SPD_40G ||
+ link_spd == ICE_PTP_LNK_SPD_50G ||
+ link_spd == ICE_PTP_LNK_SPD_50G_RS ||
+ link_spd == ICE_PTP_LNK_SPD_100G_RS) {
+ err = ice_read_64b_phy_reg_e822(hw, port,
+ P_REG_PAR_RX_TIME_L,
+ &val);
+ if (err)
+ return err;
+
+ total_offset += val;
+ }
+
+ /* In addition, Rx must account for the PMD alignment */
+ err = ice_phy_calc_pmd_adj_e822(hw, port, link_spd, fec_mode, &pmd);
+ if (err)
+ return err;
+
+ /* For RS-FEC, this adjustment adds delay, but for other modes, it
+ * subtracts delay.
+ */
+ if (fec_mode == ICE_PTP_FEC_MODE_RS_FEC)
+ total_offset += pmd;
+ else
+ total_offset -= pmd;
+
+ /* Now that the total offset has been calculated, program it to the
+ * PHY and indicate that the Rx offset is ready. After this,
+ * timestamps will be enabled.
+ */
+ err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L,
+ total_offset);
+ if (err)
+ return err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_phy_cfg_fixed_rx_offset_e822 - Configure fixed Rx offset for bypass mode
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to configure
+ *
+ * Calculate and program the fixed Rx offset, and indicate that the offset is
+ * ready. This can be used when operating in bypass mode.
+ */
+static int
+ice_phy_cfg_fixed_rx_offset_e822(struct ice_hw *hw, u8 port)
+{
+ enum ice_ptp_link_spd link_spd;
+ enum ice_ptp_fec_mode fec_mode;
+ u64 total_offset;
+ int err;
+
+ err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ if (err)
+ return err;
+
+ total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd);
+
+ /* Program the fixed Rx offset into the P_REG_TOTAL_RX_OFFSET_L
+ * register, then indicate that the Rx offset is ready. After this,
+ * timestamps will be enabled.
+ *
+ * Note that this skips including the more precise offsets generated
+ * by Vernier calibration.
+ */
+ err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L,
+ total_offset);
+ if (err)
+ return err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_read_phy_and_phc_time_e822 - Simultaneously capture PHC and PHY time
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read
+ * @phy_time: on return, the 64bit PHY timer value
+ * @phc_time: on return, the lower 64bits of PHC time
+ *
+ * Issue a READ_TIME timer command to simultaneously capture the PHY and PHC
+ * timer values.
+ */
+static int
+ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
+ u64 *phc_time)
+{
+ u64 tx_time, rx_time;
+ u32 zo, lo;
+ u8 tmr_idx;
+ int err;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ /* Prepare the PHC timer for a READ_TIME capture command */
+ ice_ptp_src_cmd(hw, READ_TIME);
+
+ /* Prepare the PHY timer for a READ_TIME capture command */
+ err = ice_ptp_one_port_cmd(hw, port, READ_TIME);
+ if (err)
+ return err;
+
+ /* Issue the sync to start the READ_TIME capture */
+ ice_ptp_exec_tmr_cmd(hw);
+
+ /* Read the captured PHC time from the shadow time registers */
+ zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
+ lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
+ *phc_time = (u64)lo << 32 | zo;
+
+ /* Read the captured PHY time from the PHY shadow registers */
+ err = ice_ptp_read_port_capture(hw, port, &tx_time, &rx_time);
+ if (err)
+ return err;
+
+ /* If the PHY Tx and Rx timers don't match, log a warning message.
+ * Note that this should not happen in normal circumstances since the
+ * driver always programs them together.
+ */
+ if (tx_time != rx_time)
+ dev_warn(ice_hw_to_dev(hw),
+ "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n",
+ port, (unsigned long long)tx_time,
+ (unsigned long long)rx_time);
+
+ *phy_time = tx_time;
+
+ return 0;
+}
+
+/**
+ * ice_sync_phy_timer_e822 - Synchronize the PHY timer with PHC timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to synchronize
+ *
+ * Perform an adjustment to ensure that the PHY and PHC timers are in sync.
+ * This is done by issuing a READ_TIME command which triggers a simultaneous
+ * read of the PHY timer and PHC timer. Then we use the difference to
+ * calculate an appropriate 2s complement addition to add to the PHY timer in
+ * order to ensure it reads the same value as the primary PHC timer.
+ */
+static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
+{
+ u64 phc_time, phy_time, difference;
+ int err;
+
+ if (!ice_ptp_lock(hw)) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n");
+ return -EBUSY;
+ }
+
+ err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
+ if (err)
+ goto err_unlock;
+
+ /* Calculate the amount required to add to the port time in order for
+ * it to match the PHC time.
+ *
+ * Note that the port adjustment is done using 2s complement
+ * arithmetic. This is convenient since it means that we can simply
+ * calculate the difference between the PHC time and the port time,
+ * and it will be interpreted correctly.
+ */
+ difference = phc_time - phy_time;
+
+ err = ice_ptp_prep_port_adj_e822(hw, port, (s64)difference);
+ if (err)
+ goto err_unlock;
+
+ err = ice_ptp_one_port_cmd(hw, port, ADJ_TIME);
+ if (err)
+ goto err_unlock;
+
+ /* Issue the sync to activate the time adjustment */
+ ice_ptp_exec_tmr_cmd(hw);
+
+ /* Re-capture the timer values to flush the command registers and
+ * verify that the time was properly adjusted.
+ */
+ err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
+ if (err)
+ goto err_unlock;
+
+ dev_info(ice_hw_to_dev(hw),
+ "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n",
+ port, (unsigned long long)phy_time,
+ (unsigned long long)phc_time);
+
+ ice_ptp_unlock(hw);
+
+ return 0;
+
+err_unlock:
+ ice_ptp_unlock(hw);
+ return err;
+}
+
+/**
+ * ice_stop_phy_timer_e822 - Stop the PHY clock timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to stop
+ * @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS
+ *
+ * Stop the clock of a PHY port. This must be done as part of the flow to
+ * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
+ * initialized or when link speed changes.
+ */
+int
+ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)
+{
+ int err;
+ u32 val;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 0);
+ if (err)
+ return err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 0);
+ if (err)
+ return err;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
+ if (err)
+ return err;
+
+ val &= ~P_REG_PS_START_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ val &= ~P_REG_PS_ENA_CLK_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ if (soft_reset) {
+ val |= P_REG_PS_SFT_RESET_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port);
+
+ return 0;
+}
+
+/**
+ * ice_start_phy_timer_e822 - Start the PHY clock timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to start
+ * @bypass: if true, start the PHY in bypass mode
+ *
+ * Start the clock of a PHY port. This must be done as part of the flow to
+ * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
+ * initialized or when link speed changes.
+ *
+ * Bypass mode enables timestamps immediately without waiting for Vernier
+ * calibration to complete. Hardware will still continue taking Vernier
+ * measurements on Tx or Rx of packets, but they will not be applied to
+ * timestamps. Use ice_phy_exit_bypass_e822 to exit bypass mode once hardware
+ * has completed offset calculation.
+ */
+int
+ice_start_phy_timer_e822(struct ice_hw *hw, u8 port, bool bypass)
+{
+ u32 lo, hi, val;
+ u64 incval;
+ u8 tmr_idx;
+ int err;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ err = ice_stop_phy_timer_e822(hw, port, false);
+ if (err)
+ return err;
+
+ ice_phy_cfg_lane_e822(hw, port);
+
+ err = ice_phy_cfg_uix_e822(hw, port);
+ if (err)
+ return err;
+
+ err = ice_phy_cfg_parpcs_e822(hw, port);
+ if (err)
+ return err;
+
+ lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+ incval = (u64)hi << 32 | lo;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, incval);
+ if (err)
+ return err;
+
+ err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
+ if (err)
+ return err;
+
+ ice_ptp_exec_tmr_cmd(hw);
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
+ if (err)
+ return err;
+
+ val |= P_REG_PS_SFT_RESET_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ val |= P_REG_PS_START_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ val &= ~P_REG_PS_SFT_RESET_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
+ if (err)
+ return err;
+
+ ice_ptp_exec_tmr_cmd(hw);
+
+ val |= P_REG_PS_ENA_CLK_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ val |= P_REG_PS_LOAD_OFFSET_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ ice_ptp_exec_tmr_cmd(hw);
+
+ err = ice_sync_phy_timer_e822(hw, port);
+ if (err)
+ return err;
+
+ if (bypass) {
+ val |= P_REG_PS_BYPASS_MODE_M;
+ /* Enter BYPASS mode, enabling timestamps immediately. */
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ /* Program the fixed Tx offset */
+ err = ice_phy_cfg_fixed_tx_offset_e822(hw, port);
+ if (err)
+ return err;
+
+ /* Program the fixed Rx offset */
+ err = ice_phy_cfg_fixed_rx_offset_e822(hw, port);
+ if (err)
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port);
+
+ return 0;
+}
+
+/**
+ * ice_phy_exit_bypass_e822 - Exit bypass mode, after vernier calculations
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to configure
+ *
+ * After hardware finishes vernier calculations for the Tx and Rx offset, this
+ * function can be used to exit bypass mode by updating the total Tx and Rx
+ * offsets, and then disabling bypass. This will enable hardware to include
+ * the more precise offset calibrations, increasing precision of the generated
+ * timestamps.
+ *
+ * This cannot be done until hardware has measured the offsets, which requires
+ * waiting until at least one packet has been sent and received by the device.
+ */
+int ice_phy_exit_bypass_e822(struct ice_hw *hw, u8 port)
+{
+ int err;
+ u32 val;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ if (!(val & P_REG_TX_OV_STATUS_OV_M)) {
+ ice_debug(hw, ICE_DBG_PTP, "Tx offset is not yet valid for port %u\n",
+ port);
+ return -EBUSY;
+ }
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ if (!(val & P_REG_TX_OV_STATUS_OV_M)) {
+ ice_debug(hw, ICE_DBG_PTP, "Rx offset is not yet valid for port %u\n",
+ port);
+ return -EBUSY;
+ }
+
+ err = ice_phy_cfg_tx_offset_e822(hw, port);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to program total Tx offset for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ err = ice_phy_cfg_rx_offset_e822(hw, port);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to program total Rx offset for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ /* Exit bypass mode now that the offset has been updated */
+ err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read P_REG_PS for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ if (!(val & P_REG_PS_BYPASS_MODE_M))
+ ice_debug(hw, ICE_DBG_PTP, "Port %u not in bypass mode\n",
+ port);
+
+ val &= ~P_REG_PS_BYPASS_MODE_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to disable bypass for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ dev_info(ice_hw_to_dev(hw), "Exiting bypass mode on PHY port %u\n",
+ port);
+
+ return 0;
+}
+
/* E810 functions
*
* The following functions operate on the E810 series devices which use
@@ -68,18 +2538,18 @@ u8 ice_get_ptp_src_clock_index(struct ice_hw *hw)
static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
{
struct ice_sbq_msg_input msg = {0};
- int status;
+ int err;
msg.msg_addr_low = lower_16_bits(addr);
msg.msg_addr_high = upper_16_bits(addr);
msg.opcode = ice_sbq_msg_rd;
msg.dest_dev = rmn_0;
- status = ice_sbq_rw_reg(hw, &msg);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, status %d\n",
- status);
- return status;
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
}
*val = msg.data;
@@ -98,7 +2568,7 @@ static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
{
struct ice_sbq_msg_input msg = {0};
- int status;
+ int err;
msg.msg_addr_low = lower_16_bits(addr);
msg.msg_addr_high = upper_16_bits(addr);
@@ -106,11 +2576,11 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
msg.dest_dev = rmn_0;
msg.data = val;
- status = ice_sbq_rw_reg(hw, &msg);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, status %d\n",
- status);
- return status;
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
}
return 0;
@@ -130,23 +2600,23 @@ static int
ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
{
u32 lo_addr, hi_addr, lo, hi;
- int status;
+ int err;
lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
- status = ice_read_phy_reg_e810(hw, lo_addr, &lo);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, status %d\n",
- status);
- return status;
+ err = ice_read_phy_reg_e810(hw, lo_addr, &lo);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
+ err);
+ return err;
}
- status = ice_read_phy_reg_e810(hw, hi_addr, &hi);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, status %d\n",
- status);
- return status;
+ err = ice_read_phy_reg_e810(hw, hi_addr, &hi);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
+ err);
+ return err;
}
/* For E810 devices, the timestamp is reported with the lower 32 bits
@@ -169,23 +2639,23 @@ ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
{
u32 lo_addr, hi_addr;
- int status;
+ int err;
lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
- status = ice_write_phy_reg_e810(hw, lo_addr, 0);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, lo_addr, 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
+ err);
+ return err;
}
- status = ice_write_phy_reg_e810(hw, hi_addr, 0);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, hi_addr, 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
+ err);
+ return err;
}
return 0;
@@ -200,17 +2670,32 @@ static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
*/
int ice_ptp_init_phy_e810(struct ice_hw *hw)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
- GLTSYN_ENA_TSYN_ENA_M);
- if (status)
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
+ GLTSYN_ENA_TSYN_ENA_M);
+ if (err)
ice_debug(hw, ICE_DBG_PTP, "PTP failed in ena_phy_time_syn %d\n",
- status);
+ err);
- return status;
+ return err;
+}
+
+/**
+ * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
+ * @hw: pointer to HW struct
+ *
+ * Perform E810-specific PTP hardware clock initialization steps.
+ */
+static int ice_ptp_init_phc_e810(struct ice_hw *hw)
+{
+ /* Ensure synchronization delay is zero */
+ wr32(hw, GLTSYN_SYNC_DLAY, 0);
+
+ /* Initialize the PHY */
+ return ice_ptp_init_phy_e810(hw);
}
/**
@@ -227,22 +2712,22 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw)
*/
static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, err %d\n",
+ err);
+ return err;
}
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, err %d\n",
+ err);
+ return err;
}
return 0;
@@ -263,26 +2748,26 @@ static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
*/
static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Adjustments are represented as signed 2's complement values in
* nanoseconds. Sub-nanosecond adjustment is not supported.
*/
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, err %d\n",
+ err);
+ return err;
}
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, err %d\n",
+ err);
+ return err;
}
return 0;
@@ -300,25 +2785,25 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
{
u32 high, low;
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
low = lower_32_bits(incval);
high = upper_32_bits(incval);
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, err %d\n",
+ err);
+ return err;
}
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, err %d\n",
+ err);
+ return err;
}
return 0;
@@ -335,7 +2820,7 @@ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
u32 cmd_val, val;
- int status;
+ int err;
switch (cmd) {
case INIT_TIME:
@@ -356,20 +2841,20 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
}
/* Read, modify, write */
- status = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, status %d\n", status);
- return status;
+ err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err);
+ return err;
}
/* Modify necessary bits only and perform write */
val &= ~TS_CMD_MASK_E810;
val |= cmd_val;
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, status %d\n", status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err);
+ return err;
}
return 0;
@@ -377,12 +2862,9 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
/* Device agnostic functions
*
- * The following functions implement useful behavior to hide the differences
- * between E810 and other devices. They call the device-specific
- * implementations where necessary.
- *
- * Currently, the driver only supports E810, but future work will enable
- * support for E822-based devices.
+ * The following functions implement shared behavior common to both E822 and
+ * E810 devices, possibly calling a device specific implementation where
+ * necessary.
*/
/**
@@ -433,42 +2915,6 @@ void ice_ptp_unlock(struct ice_hw *hw)
}
/**
- * ice_ptp_src_cmd - Prepare source timer for a timer command
- * @hw: pointer to HW structure
- * @cmd: Timer command
- *
- * Prepare the source timer for an upcoming timer sync command.
- */
-static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
-{
- u32 cmd_val;
- u8 tmr_idx;
-
- tmr_idx = ice_get_ptp_src_clock_index(hw);
- cmd_val = tmr_idx << SEL_CPK_SRC;
-
- switch (cmd) {
- case INIT_TIME:
- cmd_val |= GLTSYN_CMD_INIT_TIME;
- break;
- case INIT_INCVAL:
- cmd_val |= GLTSYN_CMD_INIT_INCVAL;
- break;
- case ADJ_TIME:
- cmd_val |= GLTSYN_CMD_ADJ_TIME;
- break;
- case ADJ_TIME_AT_TIME:
- cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
- break;
- case READ_TIME:
- cmd_val |= GLTSYN_CMD_READ_TIME;
- break;
- }
-
- wr32(hw, GLTSYN_CMD, cmd_val);
-}
-
-/**
* ice_ptp_tmr_cmd - Prepare and trigger a timer sync command
* @hw: pointer to HW struct
* @cmd: the command to issue
@@ -480,23 +2926,26 @@ static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
*/
static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
- int status;
+ int err;
/* First, prepare the source timer */
ice_ptp_src_cmd(hw, cmd);
/* Next, prepare the ports */
- status = ice_ptp_port_cmd_e810(hw, cmd);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, status %d\n",
- cmd, status);
- return status;
+ if (ice_is_e810(hw))
+ err = ice_ptp_port_cmd_e810(hw, cmd);
+ else
+ err = ice_ptp_port_cmd_e822(hw, cmd);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n",
+ cmd, err);
+ return err;
}
- /* Write the sync command register to drive both source and PHY timer commands
- * synchronously
+ /* Write the sync command register to drive both source and PHY timer
+ * commands synchronously
*/
- wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
+ ice_ptp_exec_tmr_cmd(hw);
return 0;
}
@@ -516,8 +2965,8 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
*/
int ice_ptp_init_time(struct ice_hw *hw, u64 time)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
@@ -528,9 +2977,12 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
/* PHY timers */
/* Fill Rx and Tx ports and send msg to PHY */
- status = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
- if (status)
- return status;
+ if (ice_is_e810(hw))
+ err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
+ else
+ err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF);
+ if (err)
+ return err;
return ice_ptp_tmr_cmd(hw, INIT_TIME);
}
@@ -551,8 +3003,8 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
*/
int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
@@ -560,9 +3012,12 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
- status = ice_ptp_prep_phy_incval_e810(hw, incval);
- if (status)
- return status;
+ if (ice_is_e810(hw))
+ err = ice_ptp_prep_phy_incval_e810(hw, incval);
+ else
+ err = ice_ptp_prep_phy_incval_e822(hw, incval);
+ if (err)
+ return err;
return ice_ptp_tmr_cmd(hw, INIT_INCVAL);
}
@@ -576,16 +3031,16 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
*/
int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
{
- int status;
+ int err;
if (!ice_ptp_lock(hw))
return -EBUSY;
- status = ice_ptp_write_incval(hw, incval);
+ err = ice_ptp_write_incval(hw, incval);
ice_ptp_unlock(hw);
- return status;
+ return err;
}
/**
@@ -603,8 +3058,8 @@ int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
*/
int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
@@ -616,9 +3071,12 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
- status = ice_ptp_prep_phy_adj_e810(hw, adj);
- if (status)
- return status;
+ if (ice_is_e810(hw))
+ err = ice_ptp_prep_phy_adj_e810(hw, adj);
+ else
+ err = ice_ptp_prep_phy_adj_e822(hw, adj);
+ if (err)
+ return err;
return ice_ptp_tmr_cmd(hw, ADJ_TIME);
}
@@ -630,11 +3088,16 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
* @idx: the timestamp index to read
* @tstamp: on return, the 40bit timestamp value
*
- * Read a 40bit timestamp value out of the timestamp block.
+ * Read a 40bit timestamp value out of the timestamp block. For E822 devices,
+ * the block is the quad to read from. For E810 devices, the block is the
+ * logical port to read from.
*/
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
{
- return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
+ if (ice_is_e810(hw))
+ return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
+ else
+ return ice_read_phy_tstamp_e822(hw, block, idx, tstamp);
}
/**
@@ -643,11 +3106,16 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
* @block: the block to read from
* @idx: the timestamp index to reset
*
- * Clear a timestamp, resetting its valid bit, from the timestamp block.
+ * Clear a timestamp, resetting its valid bit, from the timestamp block. For
+ * E822 devices, the block is the quad to clear from. For E810 devices, the
+ * block is the logical port to clear from.
*/
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
- return ice_clear_phy_tstamp_e810(hw, block, idx);
+ if (ice_is_e810(hw))
+ return ice_clear_phy_tstamp_e810(hw, block, idx);
+ else
+ return ice_clear_phy_tstamp_e822(hw, block, idx);
}
/* E810T SMA functions
@@ -800,3 +3268,25 @@ bool ice_is_pca9575_present(struct ice_hw *hw)
return !status && handle;
}
+
+/**
+ * ice_ptp_init_phc - Initialize PTP hardware clock
+ * @hw: pointer to the HW struct
+ *
+ * Perform the steps required to initialize the PTP hardware clock.
+ */
+int ice_ptp_init_phc(struct ice_hw *hw)
+{
+ u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ /* Enable source clocks */
+ wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
+
+ /* Clear event err indications for auxiliary pins */
+ (void)rd32(hw, GLTSYN_STAT(src_idx));
+
+ if (ice_is_e810(hw))
+ return ice_ptp_init_phc_e810(hw);
+ else
+ return ice_ptp_init_phc_e822(hw);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index b2984b5c22c1..519e75462e67 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -12,6 +12,112 @@ enum ice_ptp_tmr_cmd {
READ_TIME
};
+enum ice_ptp_serdes {
+ ICE_PTP_SERDES_1G,
+ ICE_PTP_SERDES_10G,
+ ICE_PTP_SERDES_25G,
+ ICE_PTP_SERDES_40G,
+ ICE_PTP_SERDES_50G,
+ ICE_PTP_SERDES_100G
+};
+
+enum ice_ptp_link_spd {
+ ICE_PTP_LNK_SPD_1G,
+ ICE_PTP_LNK_SPD_10G,
+ ICE_PTP_LNK_SPD_25G,
+ ICE_PTP_LNK_SPD_25G_RS,
+ ICE_PTP_LNK_SPD_40G,
+ ICE_PTP_LNK_SPD_50G,
+ ICE_PTP_LNK_SPD_50G_RS,
+ ICE_PTP_LNK_SPD_100G_RS,
+ NUM_ICE_PTP_LNK_SPD /* Must be last */
+};
+
+enum ice_ptp_fec_mode {
+ ICE_PTP_FEC_MODE_NONE,
+ ICE_PTP_FEC_MODE_CLAUSE74,
+ ICE_PTP_FEC_MODE_RS_FEC
+};
+
+/**
+ * struct ice_time_ref_info_e822
+ * @pll_freq: Frequency of PLL that drives timer ticks in Hz
+ * @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L
+ * @pps_delay: propagation delay of the PPS output signal
+ *
+ * Characteristic information for the various TIME_REF sources possible in the
+ * E822 devices
+ */
+struct ice_time_ref_info_e822 {
+ u64 pll_freq;
+ u64 nominal_incval;
+ u8 pps_delay;
+};
+
+/**
+ * struct ice_vernier_info_e822
+ * @tx_par_clk: Frequency used to calculate P_REG_PAR_TX_TUS
+ * @rx_par_clk: Frequency used to calculate P_REG_PAR_RX_TUS
+ * @tx_pcs_clk: Frequency used to calculate P_REG_PCS_TX_TUS
+ * @rx_pcs_clk: Frequency used to calculate P_REG_PCS_RX_TUS
+ * @tx_desk_rsgb_par: Frequency used to calculate P_REG_DESK_PAR_TX_TUS
+ * @rx_desk_rsgb_par: Frequency used to calculate P_REG_DESK_PAR_RX_TUS
+ * @tx_desk_rsgb_pcs: Frequency used to calculate P_REG_DESK_PCS_TX_TUS
+ * @rx_desk_rsgb_pcs: Frequency used to calculate P_REG_DESK_PCS_RX_TUS
+ * @tx_fixed_delay: Fixed Tx latency measured in 1/100th nanoseconds
+ * @pmd_adj_divisor: Divisor used to calculate PDM alignment adjustment
+ * @rx_fixed_delay: Fixed Rx latency measured in 1/100th nanoseconds
+ *
+ * Table of constants used during as part of the Vernier calibration of the Tx
+ * and Rx timestamps. This includes frequency values used to compute TUs per
+ * PAR/PCS clock cycle, and static delay values measured during hardware
+ * design.
+ *
+ * Note that some values are not used for all link speeds, and the
+ * P_REG_DESK_PAR* registers may represent different clock markers at
+ * different link speeds, either the deskew marker for multi-lane link speeds
+ * or the Reed Solomon gearbox marker for RS-FEC.
+ */
+struct ice_vernier_info_e822 {
+ u32 tx_par_clk;
+ u32 rx_par_clk;
+ u32 tx_pcs_clk;
+ u32 rx_pcs_clk;
+ u32 tx_desk_rsgb_par;
+ u32 rx_desk_rsgb_par;
+ u32 tx_desk_rsgb_pcs;
+ u32 rx_desk_rsgb_pcs;
+ u32 tx_fixed_delay;
+ u32 pmd_adj_divisor;
+ u32 rx_fixed_delay;
+};
+
+/**
+ * struct ice_cgu_pll_params_e822
+ * @refclk_pre_div: Reference clock pre-divisor
+ * @feedback_div: Feedback divisor
+ * @frac_n_div: Fractional divisor
+ * @post_pll_div: Post PLL divisor
+ *
+ * Clock Generation Unit parameters used to program the PLL based on the
+ * selected TIME_REF frequency.
+ */
+struct ice_cgu_pll_params_e822 {
+ u32 refclk_pre_div;
+ u32 feedback_div;
+ u32 frac_n_div;
+ u32 post_pll_div;
+};
+
+extern const struct
+ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
+
+/* Table of constants related to possible TIME_REF sources */
+extern const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ];
+
+/* Table of constants for Vernier calibration on E822 */
+extern const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD];
+
/* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for
* the E810 devices. Based off of a PLL with an 812.5 MHz frequency.
*/
@@ -27,6 +133,59 @@ int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval);
int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj);
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp);
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx);
+int ice_ptp_init_phc(struct ice_hw *hw);
+
+/* E822 family functions */
+int ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val);
+int ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val);
+int ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val);
+int ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val);
+int ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time);
+
+/**
+ * ice_e822_time_ref - Get the current TIME_REF from capabilities
+ * @hw: pointer to the HW structure
+ *
+ * Returns the current TIME_REF from the capabilities structure.
+ */
+static inline enum ice_time_ref_freq ice_e822_time_ref(struct ice_hw *hw)
+{
+ return hw->func_caps.ts_func_info.time_ref;
+}
+
+/**
+ * ice_set_e822_time_ref - Set new TIME_REF
+ * @hw: pointer to the HW structure
+ * @time_ref: new TIME_REF to set
+ *
+ * Update the TIME_REF in the capabilities structure in response to some
+ * change, such as an update to the CGU registers.
+ */
+static inline void
+ice_set_e822_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref)
+{
+ hw->func_caps.ts_func_info.time_ref = time_ref;
+}
+
+static inline u64 ice_e822_pll_freq(enum ice_time_ref_freq time_ref)
+{
+ return e822_time_ref[time_ref].pll_freq;
+}
+
+static inline u64 ice_e822_nominal_incval(enum ice_time_ref_freq time_ref)
+{
+ return e822_time_ref[time_ref].nominal_incval;
+}
+
+static inline u64 ice_e822_pps_delay(enum ice_time_ref_freq time_ref)
+{
+ return e822_time_ref[time_ref].pps_delay;
+}
+
+/* E822 Vernier calibration functions */
+int ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset);
+int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port, bool bypass);
+int ice_phy_exit_bypass_e822(struct ice_hw *hw, u8 port);
/* E810 family functions */
int ice_ptp_init_phy_e810(struct ice_hw *hw);
@@ -36,19 +195,194 @@ bool ice_is_pca9575_present(struct ice_hw *hw);
#define PFTSYN_SEM_BYTES 4
+#define ICE_PTP_CLOCK_INDEX_0 0x00
+#define ICE_PTP_CLOCK_INDEX_1 0x01
+
/* PHY timer commands */
#define SEL_CPK_SRC 8
+#define SEL_PHY_SRC 3
/* Time Sync command Definitions */
#define GLTSYN_CMD_INIT_TIME BIT(0)
#define GLTSYN_CMD_INIT_INCVAL BIT(1)
+#define GLTSYN_CMD_INIT_TIME_INCVAL (BIT(0) | BIT(1))
#define GLTSYN_CMD_ADJ_TIME BIT(2)
#define GLTSYN_CMD_ADJ_INIT_TIME (BIT(2) | BIT(3))
#define GLTSYN_CMD_READ_TIME BIT(7)
+/* PHY port Time Sync command definitions */
+#define PHY_CMD_INIT_TIME BIT(0)
+#define PHY_CMD_INIT_INCVAL BIT(1)
+#define PHY_CMD_ADJ_TIME (BIT(0) | BIT(1))
+#define PHY_CMD_ADJ_TIME_AT_TIME (BIT(0) | BIT(2))
+#define PHY_CMD_READ_TIME (BIT(0) | BIT(1) | BIT(2))
+
#define TS_CMD_MASK_E810 0xFF
+#define TS_CMD_MASK 0xF
#define SYNC_EXEC_CMD 0x3
+/* Macros to derive port low and high addresses on both quads */
+#define P_Q0_L(a, p) ((((a) + (0x2000 * (p)))) & 0xFFFF)
+#define P_Q0_H(a, p) ((((a) + (0x2000 * (p)))) >> 16)
+#define P_Q1_L(a, p) ((((a) - (0x2000 * ((p) - ICE_PORTS_PER_QUAD)))) & 0xFFFF)
+#define P_Q1_H(a, p) ((((a) - (0x2000 * ((p) - ICE_PORTS_PER_QUAD)))) >> 16)
+
+/* PHY QUAD register base addresses */
+#define Q_0_BASE 0x94000
+#define Q_1_BASE 0x114000
+
+/* Timestamp memory reset registers */
+#define Q_REG_TS_CTRL 0x618
+#define Q_REG_TS_CTRL_S 0
+#define Q_REG_TS_CTRL_M BIT(0)
+
+/* Timestamp availability status registers */
+#define Q_REG_TX_MEMORY_STATUS_L 0xCF0
+#define Q_REG_TX_MEMORY_STATUS_U 0xCF4
+
+/* Tx FIFO status registers */
+#define Q_REG_FIFO23_STATUS 0xCF8
+#define Q_REG_FIFO01_STATUS 0xCFC
+#define Q_REG_FIFO02_S 0
+#define Q_REG_FIFO02_M ICE_M(0x3FF, 0)
+#define Q_REG_FIFO13_S 10
+#define Q_REG_FIFO13_M ICE_M(0x3FF, 10)
+
+/* Interrupt control Config registers */
+#define Q_REG_TX_MEM_GBL_CFG 0xC08
+#define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_S 0
+#define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M BIT(0)
+#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_S 1
+#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_M ICE_M(0xFF, 1)
+#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_S 9
+#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_M ICE_M(0x3F, 9)
+#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_S 15
+#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M BIT(15)
+
+/* Tx Timestamp data registers */
+#define Q_REG_TX_MEMORY_BANK_START 0xA00
+
+/* PHY port register base addresses */
+#define P_0_BASE 0x80000
+#define P_4_BASE 0x106000
+
+/* Timestamp init registers */
+#define P_REG_RX_TIMER_INC_PRE_L 0x46C
+#define P_REG_RX_TIMER_INC_PRE_U 0x470
+#define P_REG_TX_TIMER_INC_PRE_L 0x44C
+#define P_REG_TX_TIMER_INC_PRE_U 0x450
+
+/* Timestamp match and adjust target registers */
+#define P_REG_RX_TIMER_CNT_ADJ_L 0x474
+#define P_REG_RX_TIMER_CNT_ADJ_U 0x478
+#define P_REG_TX_TIMER_CNT_ADJ_L 0x454
+#define P_REG_TX_TIMER_CNT_ADJ_U 0x458
+
+/* Timestamp capture registers */
+#define P_REG_RX_CAPTURE_L 0x4D8
+#define P_REG_RX_CAPTURE_U 0x4DC
+#define P_REG_TX_CAPTURE_L 0x4B4
+#define P_REG_TX_CAPTURE_U 0x4B8
+
+/* Timestamp PHY incval registers */
+#define P_REG_TIMETUS_L 0x410
+#define P_REG_TIMETUS_U 0x414
+
+#define P_REG_40B_LOW_M 0xFF
+#define P_REG_40B_HIGH_S 8
+
+/* PHY window length registers */
+#define P_REG_WL 0x40C
+
+#define PTP_VERNIER_WL 0x111ed
+
+/* PHY start registers */
+#define P_REG_PS 0x408
+#define P_REG_PS_START_S 0
+#define P_REG_PS_START_M BIT(0)
+#define P_REG_PS_BYPASS_MODE_S 1
+#define P_REG_PS_BYPASS_MODE_M BIT(1)
+#define P_REG_PS_ENA_CLK_S 2
+#define P_REG_PS_ENA_CLK_M BIT(2)
+#define P_REG_PS_LOAD_OFFSET_S 3
+#define P_REG_PS_LOAD_OFFSET_M BIT(3)
+#define P_REG_PS_SFT_RESET_S 11
+#define P_REG_PS_SFT_RESET_M BIT(11)
+
+/* PHY offset valid registers */
+#define P_REG_TX_OV_STATUS 0x4D4
+#define P_REG_TX_OV_STATUS_OV_S 0
+#define P_REG_TX_OV_STATUS_OV_M BIT(0)
+#define P_REG_RX_OV_STATUS 0x4F8
+#define P_REG_RX_OV_STATUS_OV_S 0
+#define P_REG_RX_OV_STATUS_OV_M BIT(0)
+
+/* PHY offset ready registers */
+#define P_REG_TX_OR 0x45C
+#define P_REG_RX_OR 0x47C
+
+/* PHY total offset registers */
+#define P_REG_TOTAL_RX_OFFSET_L 0x460
+#define P_REG_TOTAL_RX_OFFSET_U 0x464
+#define P_REG_TOTAL_TX_OFFSET_L 0x440
+#define P_REG_TOTAL_TX_OFFSET_U 0x444
+
+/* Timestamp PAR/PCS registers */
+#define P_REG_UIX66_10G_40G_L 0x480
+#define P_REG_UIX66_10G_40G_U 0x484
+#define P_REG_UIX66_25G_100G_L 0x488
+#define P_REG_UIX66_25G_100G_U 0x48C
+#define P_REG_DESK_PAR_RX_TUS_L 0x490
+#define P_REG_DESK_PAR_RX_TUS_U 0x494
+#define P_REG_DESK_PAR_TX_TUS_L 0x498
+#define P_REG_DESK_PAR_TX_TUS_U 0x49C
+#define P_REG_DESK_PCS_RX_TUS_L 0x4A0
+#define P_REG_DESK_PCS_RX_TUS_U 0x4A4
+#define P_REG_DESK_PCS_TX_TUS_L 0x4A8
+#define P_REG_DESK_PCS_TX_TUS_U 0x4AC
+#define P_REG_PAR_RX_TUS_L 0x420
+#define P_REG_PAR_RX_TUS_U 0x424
+#define P_REG_PAR_TX_TUS_L 0x428
+#define P_REG_PAR_TX_TUS_U 0x42C
+#define P_REG_PCS_RX_TUS_L 0x430
+#define P_REG_PCS_RX_TUS_U 0x434
+#define P_REG_PCS_TX_TUS_L 0x438
+#define P_REG_PCS_TX_TUS_U 0x43C
+#define P_REG_PAR_RX_TIME_L 0x4F0
+#define P_REG_PAR_RX_TIME_U 0x4F4
+#define P_REG_PAR_TX_TIME_L 0x4CC
+#define P_REG_PAR_TX_TIME_U 0x4D0
+#define P_REG_PAR_PCS_RX_OFFSET_L 0x4E8
+#define P_REG_PAR_PCS_RX_OFFSET_U 0x4EC
+#define P_REG_PAR_PCS_TX_OFFSET_L 0x4C4
+#define P_REG_PAR_PCS_TX_OFFSET_U 0x4C8
+#define P_REG_LINK_SPEED 0x4FC
+#define P_REG_LINK_SPEED_SERDES_S 0
+#define P_REG_LINK_SPEED_SERDES_M ICE_M(0x7, 0)
+#define P_REG_LINK_SPEED_FEC_MODE_S 3
+#define P_REG_LINK_SPEED_FEC_MODE_M ICE_M(0x3, 3)
+#define P_REG_LINK_SPEED_FEC_MODE(reg) \
+ (((reg) & P_REG_LINK_SPEED_FEC_MODE_M) >> \
+ P_REG_LINK_SPEED_FEC_MODE_S)
+
+/* PHY timestamp related registers */
+#define P_REG_PMD_ALIGNMENT 0x0FC
+#define P_REG_RX_80_TO_160_CNT 0x6FC
+#define P_REG_RX_80_TO_160_CNT_RXCYC_S 0
+#define P_REG_RX_80_TO_160_CNT_RXCYC_M BIT(0)
+#define P_REG_RX_40_TO_160_CNT 0x8FC
+#define P_REG_RX_40_TO_160_CNT_RXCYC_S 0
+#define P_REG_RX_40_TO_160_CNT_RXCYC_M ICE_M(0x3, 0)
+
+/* Rx FIFO status registers */
+#define P_REG_RX_OV_FS 0x4F8
+#define P_REG_RX_OV_FS_FIFO_STATUS_S 2
+#define P_REG_RX_OV_FS_FIFO_STATUS_M ICE_M(0x3FF, 2)
+
+/* Timestamp command registers */
+#define P_REG_TX_TMR_CMD 0x448
+#define P_REG_RX_TMR_CMD 0x468
+
/* E810 timesync enable register */
#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4))
@@ -68,9 +402,20 @@ bool ice_is_pca9575_present(struct ice_hw *hw);
/* Timestamp block macros */
#define TS_LOW_M 0xFFFFFFFF
+#define TS_HIGH_M 0xFF
#define TS_HIGH_S 32
+#define TS_PHY_LOW_M 0xFF
+#define TS_PHY_HIGH_M 0xFFFFFFFF
+#define TS_PHY_HIGH_S 8
+
#define BYTES_PER_IDX_ADDR_L_U 8
+#define BYTES_PER_IDX_ADDR_L 4
+
+/* Internal PHY timestamp address */
+#define TS_L(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U))
+#define TS_H(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U + \
+ BYTES_PER_IDX_ADDR_L))
/* External PHY timestamp address */
#define TS_EXT(a, port, idx) ((a) + (0x1000 * (port)) + \
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index af8e6ef5f571..dcc310e29300 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -244,6 +244,14 @@ static int ice_repr_add(struct ice_vf *vf)
if (!repr)
return -ENOMEM;
+#ifdef CONFIG_ICE_SWITCHDEV
+ repr->mac_rule = kzalloc(sizeof(*repr->mac_rule), GFP_KERNEL);
+ if (!repr->mac_rule) {
+ err = -ENOMEM;
+ goto err_alloc_rule;
+ }
+#endif
+
repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
if (!repr->netdev) {
err = -ENOMEM;
@@ -287,6 +295,11 @@ err_alloc_q_vector:
free_netdev(repr->netdev);
repr->netdev = NULL;
err_alloc:
+#ifdef CONFIG_ICE_SWITCHDEV
+ kfree(repr->mac_rule);
+ repr->mac_rule = NULL;
+err_alloc_rule:
+#endif
kfree(repr);
vf->repr = NULL;
return err;
@@ -304,6 +317,10 @@ static void ice_repr_rem(struct ice_vf *vf)
unregister_netdev(vf->repr->netdev);
free_netdev(vf->repr->netdev);
vf->repr->netdev = NULL;
+#ifdef CONFIG_ICE_SWITCHDEV
+ kfree(vf->repr->mac_rule);
+ vf->repr->mac_rule = NULL;
+#endif
kfree(vf->repr);
vf->repr = NULL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
index 806de22933c6..0c77ff050d15 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.h
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -13,6 +13,11 @@ struct ice_repr {
struct ice_q_vector *q_vector;
struct net_device *netdev;
struct metadata_dst *dst;
+#ifdef CONFIG_ICE_SWITCHDEV
+ /* info about slow path MAC rule */
+ struct ice_rule_query_data *mac_rule;
+ u8 rule_added;
+#endif
};
int ice_repr_add_for_all_vfs(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index ce3c7bded4cb..7947223536e3 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -11,7 +11,7 @@
* This function inserts the root node of the scheduling tree topology
* to the SW DB.
*/
-static enum ice_status
+static int
ice_sched_add_root_node(struct ice_port_info *pi,
struct ice_aqc_txsched_elem_data *info)
{
@@ -19,20 +19,20 @@ ice_sched_add_root_node(struct ice_port_info *pi,
struct ice_hw *hw;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
if (!root)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* coverity[suspicious_sizeof] */
root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
sizeof(*root), GFP_KERNEL);
if (!root->children) {
devm_kfree(ice_hw_to_dev(hw), root);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
memcpy(&root->info, info, sizeof(*info));
@@ -96,14 +96,14 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
*
* This function sends a scheduling elements cmd (cmd_opc)
*/
-static enum ice_status
+static int
ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
u16 elems_req, void *buf, u16 buf_size,
u16 *elems_resp, struct ice_sq_cd *cd)
{
struct ice_aqc_sched_elem_cmd *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.sched_elem_cmd;
ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
@@ -127,7 +127,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
*
* Query scheduling elements (0x0404)
*/
-enum ice_status
+int
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd)
@@ -145,18 +145,18 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
*
* This function inserts a scheduler node to the SW DB.
*/
-enum ice_status
+int
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data *info)
{
struct ice_aqc_txsched_elem_data elem;
struct ice_sched_node *parent;
struct ice_sched_node *node;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
@@ -166,7 +166,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
if (!parent) {
ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n",
le32_to_cpu(info->parent_teid));
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
/* query the current node information from FW before adding it
@@ -178,7 +178,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
if (!node)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
if (hw->max_children[layer]) {
/* coverity[suspicious_sizeof] */
node->children = devm_kcalloc(ice_hw_to_dev(hw),
@@ -186,7 +186,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
sizeof(*node), GFP_KERNEL);
if (!node->children) {
devm_kfree(ice_hw_to_dev(hw), node);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
}
@@ -209,7 +209,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
*
* Delete scheduling elements (0x040F)
*/
-static enum ice_status
+static int
ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_delete_elem *buf, u16 buf_size,
u16 *grps_del, struct ice_sq_cd *cd)
@@ -228,19 +228,19 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
*
* This function remove nodes from HW
*/
-static enum ice_status
+static int
ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
u16 num_nodes, u32 *node_teids)
{
struct ice_aqc_delete_elem *buf;
u16 i, num_groups_removed = 0;
- enum ice_status status;
u16 buf_size;
+ int status;
buf_size = struct_size(buf, teid, num_nodes);
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
buf->hdr.parent_teid = parent->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(num_nodes);
@@ -369,14 +369,14 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
*
* Get default scheduler topology (0x400)
*/
-static enum ice_status
+static int
ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
struct ice_aqc_get_topo_elem *buf, u16 buf_size,
u8 *num_branches, struct ice_sq_cd *cd)
{
struct ice_aqc_get_topo *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.get_topo;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
@@ -399,7 +399,7 @@ ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
*
* Add scheduling elements (0x0401)
*/
-static enum ice_status
+static int
ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_add_elem *buf, u16 buf_size,
u16 *grps_added, struct ice_sq_cd *cd)
@@ -420,7 +420,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
*
* Configure scheduling elements (0x0403)
*/
-static enum ice_status
+static int
ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_cfgd, struct ice_sq_cd *cd)
@@ -441,7 +441,7 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
*
* Move scheduling elements (0x0408)
*/
-static enum ice_status
+static int
ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_move_elem *buf, u16 buf_size,
u16 *grps_movd, struct ice_sq_cd *cd)
@@ -462,7 +462,7 @@ ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
*
* Suspend scheduling elements (0x0409)
*/
-static enum ice_status
+static int
ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
@@ -482,7 +482,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
*
* resume scheduling elements (0x040A)
*/
-static enum ice_status
+static int
ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
@@ -500,7 +500,7 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
*
* Query scheduler resource allocation (0x0412)
*/
-static enum ice_status
+static int
ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
struct ice_aqc_query_txsched_res_resp *buf,
struct ice_sq_cd *cd)
@@ -520,18 +520,18 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
*
* This function suspends or resumes HW nodes
*/
-static enum ice_status
+static int
ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
bool suspend)
{
u16 i, buf_size, num_elem_ret = 0;
- enum ice_status status;
__le32 *buf;
+ int status;
buf_size = sizeof(*buf) * num_nodes;
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
for (i = 0; i < num_nodes; i++)
buf[i] = cpu_to_le32(node_teids[i]);
@@ -558,7 +558,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
* @tc: TC number
* @new_numqs: number of queues
*/
-static enum ice_status
+static int
ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
{
struct ice_vsi_ctx *vsi_ctx;
@@ -566,7 +566,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* allocate LAN queue contexts */
if (!vsi_ctx->lan_q_ctx[tc]) {
vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
@@ -574,7 +574,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
sizeof(*q_ctx),
GFP_KERNEL);
if (!vsi_ctx->lan_q_ctx[tc])
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
vsi_ctx->num_lan_q_entries[tc] = new_numqs;
return 0;
}
@@ -585,7 +585,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
sizeof(*q_ctx), GFP_KERNEL);
if (!q_ctx)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
prev_num * sizeof(*q_ctx));
devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
@@ -602,7 +602,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
* @tc: TC number
* @new_numqs: number of queues
*/
-static enum ice_status
+static int
ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
{
struct ice_vsi_ctx *vsi_ctx;
@@ -610,7 +610,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* allocate RDMA queue contexts */
if (!vsi_ctx->rdma_q_ctx[tc]) {
vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
@@ -618,7 +618,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
sizeof(*q_ctx),
GFP_KERNEL);
if (!vsi_ctx->rdma_q_ctx[tc])
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
return 0;
}
@@ -629,7 +629,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
sizeof(*q_ctx), GFP_KERNEL);
if (!q_ctx)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc],
prev_num * sizeof(*q_ctx));
devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]);
@@ -651,14 +651,14 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
*
* RL profile function to add, query, or remove profile(s)
*/
-static enum ice_status
+static int
ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,
u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
{
struct ice_aqc_rl_profile *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.rl_profile;
@@ -682,7 +682,7 @@ ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
*
* Add RL profile (0x0410)
*/
-static enum ice_status
+static int
ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
u16 *num_profiles_added, struct ice_sq_cd *cd)
@@ -702,7 +702,7 @@ ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
*
* Remove RL profile (0x0415)
*/
-static enum ice_status
+static int
ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
u16 *num_profiles_removed, struct ice_sq_cd *cd)
@@ -721,24 +721,24 @@ ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
* its associated parameters from HW DB,and locally. The caller needs to
* hold scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_del_rl_profile(struct ice_hw *hw,
struct ice_aqc_rl_profile_info *rl_info)
{
struct ice_aqc_rl_profile_elem *buf;
u16 num_profiles_removed;
- enum ice_status status;
u16 num_profiles = 1;
+ int status;
if (rl_info->prof_id_ref != 0)
- return ICE_ERR_IN_USE;
+ return -EBUSY;
/* Safe to remove profile ID */
buf = &rl_info->profile;
status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
&num_profiles_removed, NULL);
if (status || num_profiles_removed != num_profiles)
- return ICE_ERR_CFG;
+ return -EIO;
/* Delete stale entry now */
list_del(&rl_info->list_entry);
@@ -763,7 +763,7 @@ static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
&pi->rl_prof_list[ln], list_entry) {
struct ice_hw *hw = pi->hw;
- enum ice_status status;
+ int status;
rl_prof_elem->prof_id_ref = 0;
status = ice_sched_del_rl_profile(hw, rl_prof_elem);
@@ -875,7 +875,7 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
*
* This function add nodes to HW as well as to SW DB for a given layer
*/
-static enum ice_status
+static int
ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer, u16 num_nodes,
u16 *num_nodes_added, u32 *first_node_teid)
@@ -883,15 +883,15 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
struct ice_sched_node *prev, *new_node;
struct ice_aqc_add_elem *buf;
u16 i, num_groups_added = 0;
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
size_t buf_size;
+ int status = 0;
u32 teid;
buf_size = struct_size(buf, generic, num_nodes);
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
buf->hdr.parent_teid = parent->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(num_nodes);
@@ -918,7 +918,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
hw->adminq.sq_last_status);
devm_kfree(ice_hw_to_dev(hw), buf);
- return ICE_ERR_CFG;
+ return -EIO;
}
*num_nodes_added = num_nodes;
@@ -974,7 +974,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
*
* Add nodes into specific HW layer.
*/
-static enum ice_status
+static int
ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer,
@@ -989,7 +989,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
return 0;
if (!parent || layer < pi->hw->sw_entry_point_layer)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* max children per node per layer */
max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
@@ -998,8 +998,8 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
if ((parent->num_children + num_nodes) > max_child_nodes) {
/* Fail if the parent is a TC node */
if (parent == tc_node)
- return ICE_ERR_CFG;
- return ICE_ERR_MAX_LIMIT;
+ return -EIO;
+ return -ENOSPC;
}
return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
@@ -1018,7 +1018,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
*
* This function add nodes to a given layer.
*/
-static enum ice_status
+static int
ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer,
@@ -1027,7 +1027,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
{
u32 *first_teid_ptr = first_node_teid;
u16 new_num_nodes = num_nodes;
- enum ice_status status = 0;
+ int status = 0;
*num_nodes_added = 0;
while (*num_nodes_added < num_nodes) {
@@ -1045,14 +1045,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
if (*num_nodes_added > num_nodes) {
ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes,
*num_nodes_added);
- status = ICE_ERR_CFG;
+ status = -EIO;
break;
}
/* break if all the nodes are added successfully */
if (!status && (*num_nodes_added == num_nodes))
break;
/* break if the error is not max limit */
- if (status && status != ICE_ERR_MAX_LIMIT)
+ if (status && status != -ENOSPC)
break;
/* Exceeded the max children */
max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
@@ -1152,7 +1152,7 @@ static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
}
if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
u32 teid = le32_to_cpu(node->info.node_teid);
- enum ice_status status;
+ int status;
/* remove the default leaf node */
status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
@@ -1198,23 +1198,23 @@ static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
* resources, default topology created by firmware and storing the information
* in SW DB.
*/
-enum ice_status ice_sched_init_port(struct ice_port_info *pi)
+int ice_sched_init_port(struct ice_port_info *pi)
{
struct ice_aqc_get_topo_elem *buf;
- enum ice_status status;
struct ice_hw *hw;
u8 num_branches;
u16 num_elems;
+ int status;
u8 i, j;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
/* Query the Default Topology from FW */
buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Query default scheduling tree topology */
status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
@@ -1226,7 +1226,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
num_branches);
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto err_init_port;
}
@@ -1237,7 +1237,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
num_elems);
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto err_init_port;
}
@@ -1300,11 +1300,11 @@ err_init_port:
*
* query FW for allocated scheduler resources and store in HW struct
*/
-enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
+int ice_sched_query_res_alloc(struct ice_hw *hw)
{
struct ice_aqc_query_txsched_res_resp *buf;
- enum ice_status status = 0;
__le16 max_sibl;
+ int status = 0;
u16 i;
if (hw->layer_info)
@@ -1312,7 +1312,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
if (status)
@@ -1341,7 +1341,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
sizeof(*hw->layer_info)),
GFP_KERNEL);
if (!hw->layer_info) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto sched_query_out;
}
@@ -1614,31 +1614,31 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
* This function adds the VSI child nodes to tree. It gets called for
* LAN and RDMA separately.
*/
-static enum ice_status
+static int
ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *tc_node, u16 *num_nodes,
u8 owner)
{
struct ice_sched_node *parent, *node;
struct ice_hw *hw = pi->hw;
- enum ice_status status;
u32 first_node_teid;
u16 num_added = 0;
u8 i, qgl, vsil;
+ int status;
qgl = ice_sched_get_qgrp_layer(hw);
vsil = ice_sched_get_vsi_layer(hw);
parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
for (i = vsil + 1; i <= qgl; i++) {
if (!parent)
- return ICE_ERR_CFG;
+ return -EIO;
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
num_nodes[i],
&first_node_teid,
&num_added);
if (status || num_nodes[i] != num_added)
- return ICE_ERR_CFG;
+ return -EIO;
/* The newly added node can be a new parent for the next
* layer nodes
@@ -1717,18 +1717,18 @@ ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi,
* This function adds the VSI supported nodes into Tx tree including the
* VSI, its parent and intermediate nodes in below layers
*/
-static enum ice_status
+static int
ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *tc_node, u16 *num_nodes)
{
struct ice_sched_node *parent = tc_node;
- enum ice_status status;
u32 first_node_teid;
u16 num_added = 0;
u8 i, vsil;
+ int status;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsil = ice_sched_get_vsi_layer(pi->hw);
for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
@@ -1737,7 +1737,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
&first_node_teid,
&num_added);
if (status || num_nodes[i] != num_added)
- return ICE_ERR_CFG;
+ return -EIO;
/* The newly added node can be a new parent for the next
* layer nodes
@@ -1749,7 +1749,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
parent = parent->children[0];
if (!parent)
- return ICE_ERR_CFG;
+ return -EIO;
if (i == vsil)
parent->vsi_handle = vsi_handle;
@@ -1766,7 +1766,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
*
* This function adds a new VSI into scheduler tree
*/
-static enum ice_status
+static int
ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
{
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
@@ -1774,7 +1774,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* calculate number of supported nodes needed for this VSI */
ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes);
@@ -1794,7 +1794,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
*
* This function updates the VSI child nodes based on the number of queues
*/
-static enum ice_status
+static int
ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
u8 tc, u16 new_numqs, u8 owner)
{
@@ -1802,21 +1802,21 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *vsi_node;
struct ice_sched_node *tc_node;
struct ice_vsi_ctx *vsi_ctx;
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
u16 prev_numqs;
+ int status = 0;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_CFG;
+ return -EIO;
vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
if (!vsi_node)
- return ICE_ERR_CFG;
+ return -EIO;
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (owner == ICE_SCHED_NODE_OWNER_LAN)
prev_numqs = vsi_ctx->sched.max_lanq[tc];
@@ -1869,22 +1869,22 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
* enabled and VSI is in suspended state then resume the VSI back. If TC is
* disabled then suspend the VSI if it is not already.
*/
-enum ice_status
+int
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable)
{
struct ice_sched_node *vsi_node, *tc_node;
struct ice_vsi_ctx *vsi_ctx;
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
+ int status = 0;
ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
/* suspend the VSI if TC is not enabled */
@@ -1908,7 +1908,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
if (!vsi_node)
- return ICE_ERR_CFG;
+ return -EIO;
vsi_ctx->sched.vsi_node[tc] = vsi_node;
vsi_node->in_use = true;
@@ -1993,11 +1993,11 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
* This function removes the VSI and its LAN or RDMA children nodes from the
* scheduler tree.
*/
-static enum ice_status
+static int
ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
{
- enum ice_status status = ICE_ERR_PARAM;
struct ice_vsi_ctx *vsi_ctx;
+ int status = -EINVAL;
u8 i;
ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
@@ -2022,7 +2022,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
if (ice_sched_is_leaf_node_present(vsi_node)) {
ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
- status = ICE_ERR_IN_USE;
+ status = -EBUSY;
goto exit_sched_rm_vsi_cfg;
}
while (j < vsi_node->num_children) {
@@ -2065,7 +2065,7 @@ exit_sched_rm_vsi_cfg:
* This function clears the VSI and its LAN children nodes from scheduler tree
* for all TCs.
*/
-enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
+int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
{
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
}
@@ -2078,7 +2078,7 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
* This function clears the VSI and its RDMA children nodes from scheduler tree
* for all TCs.
*/
-enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
+int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
{
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
}
@@ -2188,36 +2188,36 @@ ice_sched_update_parent(struct ice_sched_node *new_parent,
*
* This function move the child nodes to a given parent.
*/
-static enum ice_status
+static int
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list)
{
struct ice_aqc_move_elem *buf;
struct ice_sched_node *node;
- enum ice_status status = 0;
u16 i, grps_movd = 0;
struct ice_hw *hw;
+ int status = 0;
u16 buf_len;
hw = pi->hw;
if (!parent || !num_items)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Does parent have enough space */
if (parent->num_children + num_items >
hw->max_children[parent->tx_sched_layer])
- return ICE_ERR_AQ_FULL;
+ return -ENOSPC;
buf_len = struct_size(buf, teid, 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
for (i = 0; i < num_items; i++) {
node = ice_sched_find_node_by_teid(pi->root, list[i]);
if (!node) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto move_err_exit;
}
@@ -2228,7 +2228,7 @@ ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
status = ice_aq_move_sched_elems(hw, 1, buf, buf_len,
&grps_movd, NULL);
if (status && grps_movd != 1) {
- status = ICE_ERR_CFG;
+ status = -EIO;
goto move_err_exit;
}
@@ -2251,28 +2251,28 @@ move_err_exit:
* This function moves a VSI to an aggregator node or its subtree.
* Intermediate nodes may be created if required.
*/
-static enum ice_status
+static int
ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
u8 tc)
{
struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent;
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
u32 first_node_teid, vsi_teid;
- enum ice_status status;
u16 num_nodes_added;
u8 aggl, vsil, i;
+ int status;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_CFG;
+ return -EIO;
agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
if (!agg_node)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
if (!vsi_node)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
/* Is this VSI already part of given aggregator? */
if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node))
@@ -2302,7 +2302,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
&first_node_teid,
&num_nodes_added);
if (status || num_nodes[i] != num_nodes_added)
- return ICE_ERR_CFG;
+ return -EIO;
/* The newly added node can be a new parent for the next
* layer nodes
@@ -2314,7 +2314,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
parent = parent->children[0];
if (!parent)
- return ICE_ERR_CFG;
+ return -EIO;
}
move_nodes:
@@ -2333,14 +2333,14 @@ move_nodes:
* aggregator VSI info based on passed in boolean parameter rm_vsi_info. The
* caller holds the scheduler lock.
*/
-static enum ice_status
+static int
ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi,
struct ice_sched_agg_info *agg_info, u8 tc,
bool rm_vsi_info)
{
struct ice_sched_agg_vsi_info *agg_vsi_info;
struct ice_sched_agg_vsi_info *tmp;
- enum ice_status status = 0;
+ int status = 0;
list_for_each_entry_safe(agg_vsi_info, tmp, &agg_info->agg_vsi_list,
list_entry) {
@@ -2397,7 +2397,7 @@ ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node)
* This function removes the aggregator node and intermediate nodes if any
* from the given TC
*/
-static enum ice_status
+static int
ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
{
struct ice_sched_node *tc_node, *agg_node;
@@ -2405,15 +2405,15 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_CFG;
+ return -EIO;
agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
if (!agg_node)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
/* Can't remove the aggregator node if it has children */
if (ice_sched_is_agg_inuse(pi, agg_node))
- return ICE_ERR_IN_USE;
+ return -EBUSY;
/* need to remove the whole subtree if aggregator node is the
* only child.
@@ -2422,7 +2422,7 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
struct ice_sched_node *parent = agg_node->parent;
if (!parent)
- return ICE_ERR_CFG;
+ return -EIO;
if (parent->num_children > 1)
break;
@@ -2445,11 +2445,11 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
* the aggregator configuration completely for requested TC. The caller needs
* to hold the scheduler lock.
*/
-static enum ice_status
+static int
ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info,
u8 tc, bool rm_vsi_info)
{
- enum ice_status status = 0;
+ int status = 0;
/* If nothing to remove - return success */
if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
@@ -2478,7 +2478,7 @@ exit_rm_agg_cfg_tc:
* Save aggregator TC bitmap. This function needs to be called with scheduler
* lock held.
*/
-static enum ice_status
+static int
ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
unsigned long *tc_bitmap)
{
@@ -2486,7 +2486,7 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
agg_info = ice_get_agg_info(pi->hw, agg_id);
if (!agg_info)
- return ICE_ERR_PARAM;
+ return -EINVAL;
bitmap_copy(agg_info->replay_tc_bitmap, tc_bitmap,
ICE_MAX_TRAFFIC_CLASS);
return 0;
@@ -2501,20 +2501,20 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
* This function creates an aggregator node and intermediate nodes if required
* for the given TC
*/
-static enum ice_status
+static int
ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
{
struct ice_sched_node *parent, *agg_node, *tc_node;
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
u32 first_node_teid;
u16 num_nodes_added;
+ int status = 0;
u8 i, aggl;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_CFG;
+ return -EIO;
agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
/* Does Agg node already exist ? */
@@ -2549,14 +2549,14 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
parent = tc_node;
for (i = hw->sw_entry_point_layer; i <= aggl; i++) {
if (!parent)
- return ICE_ERR_CFG;
+ return -EIO;
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
num_nodes[i],
&first_node_teid,
&num_nodes_added);
if (status || num_nodes[i] != num_nodes_added)
- return ICE_ERR_CFG;
+ return -EIO;
/* The newly added node can be a new parent for the next
* layer nodes
@@ -2591,13 +2591,13 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
* resources and remove aggregator ID.
* This function needs to be called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
enum ice_agg_type agg_type, unsigned long *tc_bitmap)
{
struct ice_sched_agg_info *agg_info;
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
+ int status = 0;
u8 tc;
agg_info = ice_get_agg_info(hw, agg_id);
@@ -2606,7 +2606,7 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
agg_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*agg_info),
GFP_KERNEL);
if (!agg_info)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
agg_info->agg_id = agg_id;
agg_info->agg_type = agg_type;
@@ -2653,19 +2653,17 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
*
* This function configures aggregator node(s).
*/
-enum ice_status
+int
ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type,
u8 tc_bitmap)
{
unsigned long bitmap = tc_bitmap;
- enum ice_status status;
+ int status;
mutex_lock(&pi->sched_lock);
- status = ice_sched_cfg_agg(pi, agg_id, agg_type,
- (unsigned long *)&bitmap);
+ status = ice_sched_cfg_agg(pi, agg_id, agg_type, &bitmap);
if (!status)
- status = ice_save_agg_tc_bitmap(pi, agg_id,
- (unsigned long *)&bitmap);
+ status = ice_save_agg_tc_bitmap(pi, agg_id, &bitmap);
mutex_unlock(&pi->sched_lock);
return status;
}
@@ -2724,7 +2722,7 @@ ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle)
* Save VSI to aggregator TC bitmap. This function needs to call with scheduler
* lock held.
*/
-static enum ice_status
+static int
ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
unsigned long *tc_bitmap)
{
@@ -2733,11 +2731,11 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
agg_info = ice_get_agg_info(pi->hw, agg_id);
if (!agg_info)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* check if entry already exist */
agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
if (!agg_vsi_info)
- return ICE_ERR_PARAM;
+ return -EINVAL;
bitmap_copy(agg_vsi_info->replay_tc_bitmap, tc_bitmap,
ICE_MAX_TRAFFIC_CLASS);
return 0;
@@ -2754,21 +2752,21 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
* already associated to the aggregator node then no operation is performed on
* the tree. This function needs to be called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
u16 vsi_handle, unsigned long *tc_bitmap)
{
struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL;
struct ice_sched_agg_info *agg_info, *old_agg_info;
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
+ int status = 0;
u8 tc;
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
agg_info = ice_get_agg_info(hw, agg_id);
if (!agg_info)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* If the VSI is already part of another aggregator then update
* its VSI info list
*/
@@ -2790,7 +2788,7 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
agg_vsi_info = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*agg_vsi_info), GFP_KERNEL);
if (!agg_vsi_info)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* add VSI ID into the aggregator list */
agg_vsi_info->vsi_handle = vsi_handle;
@@ -2851,14 +2849,14 @@ static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
* returns success or error on config sched element failure. The caller
* needs to hold scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
struct ice_aqc_txsched_elem_data *info)
{
struct ice_aqc_txsched_elem_data buf;
- enum ice_status status;
u16 elem_cfgd = 0;
u16 num_elems = 1;
+ int status;
buf = *info;
/* Parent TEID is reserved field in this aq call */
@@ -2874,7 +2872,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
&elem_cfgd, NULL);
if (status || elem_cfgd != num_elems) {
ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
- return ICE_ERR_CFG;
+ return -EIO;
}
/* Config success case */
@@ -2893,7 +2891,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
*
* This function configures node element's BW allocation.
*/
-static enum ice_status
+static int
ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
enum ice_rl_type rl_type, u16 bw_alloc)
{
@@ -2909,7 +2907,7 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc);
} else {
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
/* Configure element */
@@ -2925,12 +2923,12 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
*
* Move or associate VSI to a new or default aggregator node.
*/
-enum ice_status
+int
ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
u8 tc_bitmap)
{
unsigned long bitmap = tc_bitmap;
- enum ice_status status;
+ int status;
mutex_lock(&pi->sched_lock);
status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle,
@@ -3098,12 +3096,12 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
*
* This function converts the BW to profile structure format.
*/
-static enum ice_status
+static int
ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
struct ice_aqc_rl_profile_elem *profile)
{
- enum ice_status status = ICE_ERR_PARAM;
s64 bytes_per_sec, ts_rate, mv_tmp;
+ int status = -EINVAL;
bool found = false;
s32 encode = 0;
s64 mv = 0;
@@ -3150,7 +3148,7 @@ ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
profile->rl_encode = cpu_to_le16(encode);
status = 0;
} else {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
}
return status;
@@ -3176,9 +3174,9 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
struct ice_aqc_rl_profile_info *rl_prof_elem;
u16 profiles_added = 0, num_profiles = 1;
struct ice_aqc_rl_profile_elem *buf;
- enum ice_status status;
struct ice_hw *hw;
u8 profile_type;
+ int status;
if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
return NULL;
@@ -3249,7 +3247,7 @@ exit_add_rl_prof:
*
* This function configures node element's BW limit.
*/
-static enum ice_status
+static int
ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
enum ice_rl_type rl_type, u16 rl_prof_id)
{
@@ -3268,7 +3266,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
* hence only one of them may be set for any given element
*/
if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
- return ICE_ERR_CFG;
+ return -EIO;
data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
break;
@@ -3291,7 +3289,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) &&
(le16_to_cpu(data->eir_bw.bw_profile_idx) !=
ICE_SCHED_DFLT_RL_PROF_ID))
- return ICE_ERR_CFG;
+ return -EIO;
/* EIR BW is set to default, disable it */
data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR;
/* Okay to enable shared BW now */
@@ -3300,7 +3298,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
break;
default:
/* Unknown rate limit type */
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
/* Configure element */
@@ -3420,15 +3418,15 @@ ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
* 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
* scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
u16 profile_id)
{
struct ice_aqc_rl_profile_info *rl_prof_elem;
- enum ice_status status = 0;
+ int status = 0;
if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Check the existing list for RL profile */
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
list_entry)
@@ -3441,11 +3439,11 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
/* Remove old profile ID from database */
status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
- if (status && status != ICE_ERR_IN_USE)
+ if (status && status != -EBUSY)
ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
break;
}
- if (status == ICE_ERR_IN_USE)
+ if (status == -EBUSY)
status = 0;
return status;
}
@@ -3461,16 +3459,16 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
* type CIR, EIR, or SRL to default. This function needs to be called
* with the scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
struct ice_sched_node *node,
enum ice_rl_type rl_type, u8 layer_num)
{
- enum ice_status status;
struct ice_hw *hw;
u8 profile_type;
u16 rl_prof_id;
u16 old_id;
+ int status;
hw = pi->hw;
switch (rl_type) {
@@ -3488,7 +3486,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
/* Save existing RL prof ID for later clean up */
old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
@@ -3518,7 +3516,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
* them may be set for any given element. This function needs to be called
* with the scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
struct ice_sched_node *node,
u8 layer_num, enum ice_rl_type rl_type, u32 bw)
@@ -3562,14 +3560,14 @@ ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
* node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
* ID from local database. The caller needs to hold scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw, u8 layer_num)
{
struct ice_aqc_rl_profile_info *rl_prof_info;
- enum ice_status status = ICE_ERR_PARAM;
struct ice_hw *hw = pi->hw;
u16 old_id, rl_prof_id;
+ int status = -EINVAL;
rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);
if (!rl_prof_info)
@@ -3608,31 +3606,31 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
* It updates node's BW limit parameters like BW RL profile ID of type CIR,
* EIR, or SRL. The caller needs to hold scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw)
{
struct ice_sched_node *cfg_node = node;
- enum ice_status status;
+ int status;
struct ice_hw *hw;
u8 layer_num;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
/* Remove unused RL profile IDs from HW and SW DB */
ice_sched_rm_unused_rl_prof(pi);
layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
node->tx_sched_layer);
if (layer_num >= hw->num_tx_sched_layers)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (rl_type == ICE_SHARED_BW) {
/* SRL node may be different */
cfg_node = ice_sched_get_srl_node(node, layer_num);
if (!cfg_node)
- return ICE_ERR_CFG;
+ return -EIO;
}
/* EIR BW and Shared BW profiles are mutually exclusive and
* hence only one of them may be set for any given element
@@ -3657,7 +3655,7 @@ ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
* type CIR, EIR, or SRL to default. This function needs to be called
* with the scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
struct ice_sched_node *node,
enum ice_rl_type rl_type)
@@ -3675,7 +3673,7 @@ ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
* behalf of the requested node (first argument). This function needs to be
* called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
{
/* SRL profiles are not available on all layers. Check if the
@@ -3690,7 +3688,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
(node->parent && node->parent->num_children == 1)))
return 0;
- return ICE_ERR_CFG;
+ return -EIO;
}
/**
@@ -3701,7 +3699,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
*
* Save BW information of queue type node for post replay use.
*/
-static enum ice_status
+static int
ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
{
switch (rl_type) {
@@ -3715,7 +3713,7 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
return 0;
}
@@ -3731,16 +3729,16 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
*
* This function sets BW limit of queue scheduling node.
*/
-static enum ice_status
+static int
ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw)
{
- enum ice_status status = ICE_ERR_PARAM;
struct ice_sched_node *node;
struct ice_q_ctx *q_ctx;
+ int status = -EINVAL;
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&pi->sched_lock);
q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
if (!q_ctx)
@@ -3762,7 +3760,7 @@ ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
node->tx_sched_layer);
if (sel_layer >= pi->hw->num_tx_sched_layers) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto exit_q_bw_lmt;
}
status = ice_sched_validate_srl_node(node, sel_layer);
@@ -3794,7 +3792,7 @@ exit_q_bw_lmt:
*
* This function configures BW limit of queue scheduling node.
*/
-enum ice_status
+int
ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw)
{
@@ -3812,7 +3810,7 @@ ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
*
* This function configures BW default limit of queue scheduling node.
*/
-enum ice_status
+int
ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type)
{
@@ -3880,13 +3878,13 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
* This function sets BW limit of VSI or Aggregator scheduling node
* based on TC information from passed in argument BW.
*/
-static enum ice_status
+int
ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
enum ice_agg_type agg_type, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
- enum ice_status status = ICE_ERR_PARAM;
struct ice_sched_node *node;
+ int status = -EINVAL;
if (!pi)
return status;
@@ -3921,7 +3919,7 @@ exit_set_node_bw_lmt_per_tc:
* This function configures BW limit of VSI scheduling node based on TC
* information.
*/
-enum ice_status
+int
ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
@@ -3948,7 +3946,7 @@ ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
* This function configures default BW limit of VSI scheduling node based on TC
* information.
*/
-enum ice_status
+int
ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type)
{
@@ -3976,13 +3974,13 @@ ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
* burst size value is used for future rate limit calls. It doesn't change the
* existing or previously created RL profiles.
*/
-enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
+int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
{
u16 burst_size_to_prog;
if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
bytes > ICE_MAX_BURST_SIZE_ALLOWED)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (ice_round_to_num(bytes, 64) <=
ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
/* 64 byte granularity case */
@@ -4017,13 +4015,13 @@ enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
* This function configures node element's priority value. It
* needs to be called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
u8 priority)
{
struct ice_aqc_txsched_elem_data buf;
struct ice_aqc_txsched_elem *data;
- enum ice_status status;
+ int status;
buf = node->info;
data = &buf.data;
@@ -4044,12 +4042,12 @@ ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
* This function restores node's BW from bw_t_info. The caller needs
* to hold the scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
struct ice_bw_type_info *bw_t_info)
{
struct ice_port_info *pi = hw->port_info;
- enum ice_status status = ICE_ERR_PARAM;
+ int status = -EINVAL;
u16 bw_alloc;
if (!node)
@@ -4137,7 +4135,7 @@ void ice_sched_replay_agg(struct ice_hw *hw)
if (!bitmap_equal(agg_info->tc_bitmap, agg_info->replay_tc_bitmap,
ICE_MAX_TRAFFIC_CLASS)) {
DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
- enum ice_status status;
+ int status;
bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
ice_sched_get_ena_tc_bitmap(pi,
@@ -4191,18 +4189,17 @@ void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw)
* their node bandwidth information. This function needs to be called with
* scheduler lock held.
*/
-static enum ice_status
-ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
+static int ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
{
DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
struct ice_sched_agg_vsi_info *agg_vsi_info;
struct ice_port_info *pi = hw->port_info;
struct ice_sched_agg_info *agg_info;
- enum ice_status status;
+ int status;
bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
if (!agg_info)
return 0; /* Not present in list - default Agg case */
@@ -4233,10 +4230,10 @@ ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
* This function replays association of VSI to aggregator type nodes, and
* node bandwidth information.
*/
-enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
+int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
{
struct ice_port_info *pi = hw->port_info;
- enum ice_status status;
+ int status;
mutex_lock(&pi->sched_lock);
status = ice_sched_replay_vsi_agg(hw, vsi_handle);
@@ -4252,14 +4249,13 @@ enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
* This function replays queue type node bandwidth. This function needs to be
* called with scheduler lock held.
*/
-enum ice_status
-ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
+int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
{
struct ice_sched_node *q_node;
/* Following also checks the presence of node in tree */
q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
if (!q_node)
- return ICE_ERR_PARAM;
+ return -EINVAL;
return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 6bddcbecaf5e..4f91577fed56 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -65,12 +65,12 @@ struct ice_sched_agg_info {
};
/* FW AQ command calls */
-enum ice_status
+int
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd);
-enum ice_status ice_sched_init_port(struct ice_port_info *pi);
-enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
+int ice_sched_init_port(struct ice_port_info *pi);
+int ice_sched_query_res_alloc(struct ice_hw *hw);
void ice_sched_get_psm_clk_freq(struct ice_hw *hw);
void ice_sched_clear_port(struct ice_port_info *pi);
@@ -79,7 +79,7 @@ void ice_sched_clear_agg(struct ice_hw *hw);
struct ice_sched_node *
ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
-enum ice_status
+int
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data *info);
void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node);
@@ -87,35 +87,38 @@ struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc);
struct ice_sched_node *
ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u8 owner);
-enum ice_status
+int
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
-enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
-enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);
+int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
+int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);
/* Tx scheduler rate limiter functions */
-enum ice_status
+int
ice_cfg_agg(struct ice_port_info *pi, u32 agg_id,
enum ice_agg_type agg_type, u8 tc_bitmap);
-enum ice_status
+int
ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
u8 tc_bitmap);
-enum ice_status
+int
ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw);
-enum ice_status
+int
ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type);
-enum ice_status
+int
ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type, u32 bw);
-enum ice_status
+int
ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type);
-enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
+int
+ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
+ enum ice_agg_type agg_type, u8 tc,
+ enum ice_rl_type rl_type, u32 bw);
+int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
void ice_sched_replay_agg(struct ice_hw *hw);
-enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
-ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
+int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
+int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index aa11d07793d4..52c6bac41bf7 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -18,7 +18,7 @@
* queue and asynchronously sending message via
* ice_sq_send_cmd() function
*/
-enum ice_status
+int
ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd)
{
@@ -228,7 +228,7 @@ ice_mbx_traverse(struct ice_hw *hw,
* sent per VF and marks the VF as malicious if it exceeds
* the permissible number of messages to send.
*/
-static enum ice_status
+static int
ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
enum ice_mbx_snapshot_state *new_state,
bool *is_malvf)
@@ -236,7 +236,7 @@ ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
if (vf_id >= snap->mbx_vf.vfcntr_len)
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
/* increment the message count in the VF array */
snap->mbx_vf.vf_cntr[vf_id]++;
@@ -297,7 +297,7 @@ static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
* Detect: If pending message count exceeds watermark traverse
* the static snapshot and look for a malicious VF.
*/
-enum ice_status
+int
ice_mbx_vf_state_handler(struct ice_hw *hw,
struct ice_mbx_data *mbx_data, u16 vf_id,
bool *is_malvf)
@@ -306,10 +306,10 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
struct ice_mbx_snap_buffer_data *snap_buf;
struct ice_ctl_q_info *cq = &hw->mailboxq;
enum ice_mbx_snapshot_state new_state;
- enum ice_status status = 0;
+ int status = 0;
if (!is_malvf || !mbx_data)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
/* When entering the mailbox state machine assume that the VF
* is not malicious until detected.
@@ -320,7 +320,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
* interrupt is not less than the defined AVF message threshold.
*/
if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
/* The watermark value should not be lesser than the threshold limit
* set for the number of asynchronous messages a VF can send to mailbox
@@ -329,7 +329,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
*/
if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
- return ICE_ERR_PARAM;
+ return -EINVAL;
new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
snap_buf = &snap->mbx_buf;
@@ -383,7 +383,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
default:
new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
- status = ICE_ERR_CFG;
+ status = -EIO;
}
snap_buf->state = new_state;
@@ -405,20 +405,20 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
* the input vf_id against the bitmap to verify if the VF has been
* detected in any previous mailbox iterations.
*/
-enum ice_status
+int
ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
u16 bitmap_len, u16 vf_id, bool *report_malvf)
{
if (!all_malvfs || !report_malvf)
- return ICE_ERR_PARAM;
+ return -EINVAL;
*report_malvf = false;
if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
if (vf_id >= bitmap_len)
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
/* If the vf_id is found in the bitmap set bit and boolean to true */
if (!test_and_set_bit(vf_id, all_malvfs))
@@ -441,19 +441,19 @@ ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
* that the new VF loaded is not considered malicious before going
* through the overflow detection algorithm.
*/
-enum ice_status
+int
ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
u16 bitmap_len, u16 vf_id)
{
if (!snap || !all_malvfs)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (bitmap_len < snap->mbx_vf.vfcntr_len)
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
/* Ensure VF ID value is not larger than bitmap or VF counter length */
if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len)
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
/* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */
clear_bit(vf_id, all_malvfs);
@@ -482,7 +482,7 @@ ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
* called to ensure that the vf_count can be compared against the number
* of VFs supported as defined in the functional capabilities of the device.
*/
-enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
+int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
{
struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
@@ -491,13 +491,13 @@ enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
* the functional capabilities of the PF.
*/
if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count,
sizeof(*snap->mbx_vf.vf_cntr),
GFP_KERNEL);
if (!snap->mbx_vf.vf_cntr)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Setting the VF counter length to the number of allocated
* VFs for given PF's functional capabilities.
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
index 161dc55d9e9c..68686a3fd7e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.h
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -14,24 +14,24 @@
#define ICE_ASYNC_VF_MSG_THRESHOLD 63
#ifdef CONFIG_PCI_IOV
-enum ice_status
+int
ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd);
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
-enum ice_status
+int
ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
u16 vf_id, bool *is_mal_vf);
-enum ice_status
+int
ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
u16 bitmap_len, u16 vf_id);
-enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count);
+int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count);
void ice_mbx_deinit_snapshot(struct ice_hw *hw);
-enum ice_status
+int
ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
u16 bitmap_len, u16 vf_id, bool *report_malvf);
#else /* CONFIG_PCI_IOV */
-static inline enum ice_status
+static inline int
ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
u16 __always_unused vfid, u32 __always_unused v_opcode,
u32 __always_unused v_retval, u8 __always_unused *msg,
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
deleted file mode 100644
index dbf66057371d..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, Intel Corporation. */
-
-#ifndef _ICE_STATUS_H_
-#define _ICE_STATUS_H_
-
-/* Error Codes */
-enum ice_status {
- ICE_SUCCESS = 0,
-
- /* Generic codes : Range -1..-49 */
- ICE_ERR_PARAM = -1,
- ICE_ERR_NOT_IMPL = -2,
- ICE_ERR_NOT_READY = -3,
- ICE_ERR_NOT_SUPPORTED = -4,
- ICE_ERR_BAD_PTR = -5,
- ICE_ERR_INVAL_SIZE = -6,
- ICE_ERR_DEVICE_NOT_SUPPORTED = -8,
- ICE_ERR_RESET_FAILED = -9,
- ICE_ERR_FW_API_VER = -10,
- ICE_ERR_NO_MEMORY = -11,
- ICE_ERR_CFG = -12,
- ICE_ERR_OUT_OF_RANGE = -13,
- ICE_ERR_ALREADY_EXISTS = -14,
- ICE_ERR_DOES_NOT_EXIST = -15,
- ICE_ERR_IN_USE = -16,
- ICE_ERR_MAX_LIMIT = -17,
- ICE_ERR_RESET_ONGOING = -18,
- ICE_ERR_HW_TABLE = -19,
- ICE_ERR_FW_DDP_MISMATCH = -20,
-
- ICE_ERR_NVM = -50,
- ICE_ERR_NVM_CHECKSUM = -51,
- ICE_ERR_BUF_TOO_SHORT = -52,
- ICE_ERR_NVM_BLANK_MODE = -53,
- ICE_ERR_AQ_ERROR = -100,
- ICE_ERR_AQ_TIMEOUT = -101,
- ICE_ERR_AQ_FULL = -102,
- ICE_ERR_AQ_NO_WORK = -103,
- ICE_ERR_AQ_EMPTY = -104,
- ICE_ERR_AQ_FW_CRITICAL = -105,
-};
-
-#endif /* _ICE_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 183d93033890..11ae0bee3590 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -528,7 +528,7 @@ static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
* Allocate memory for the entire recipe table and initialize the structures/
* entries corresponding to basic recipes.
*/
-enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
+int ice_init_def_sw_recp(struct ice_hw *hw)
{
struct ice_sw_recipe *recps;
u8 i;
@@ -536,7 +536,7 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
sizeof(*recps), GFP_KERNEL);
if (!recps)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
recps[i].root_rid = i;
@@ -576,14 +576,14 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
* in response buffer. The caller of this function to use *num_elems while
* parsing the response buffer.
*/
-static enum ice_status
+static int
ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
u16 buf_size, u16 *req_desc, u16 *num_elems,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_sw_cfg *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
cmd = &desc.params.get_sw_conf;
@@ -606,14 +606,14 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
*
* Add a VSI context to the hardware (0x0210)
*/
-static enum ice_status
+static int
ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_aqc_add_update_free_vsi_resp *res;
struct ice_aqc_add_get_update_free_vsi *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.vsi_cmd;
res = &desc.params.add_update_free_vsi_res;
@@ -650,14 +650,14 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
*
* Free VSI context info from hardware (0x0213)
*/
-static enum ice_status
+static int
ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
bool keep_vsi_alloc, struct ice_sq_cd *cd)
{
struct ice_aqc_add_update_free_vsi_resp *resp;
struct ice_aqc_add_get_update_free_vsi *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.vsi_cmd;
resp = &desc.params.add_update_free_vsi_res;
@@ -685,14 +685,14 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
*
* Update VSI context in the hardware (0x0211)
*/
-static enum ice_status
+static int
ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_aqc_add_update_free_vsi_resp *resp;
struct ice_aqc_add_get_update_free_vsi *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.vsi_cmd;
resp = &desc.params.add_update_free_vsi_res;
@@ -832,15 +832,15 @@ void ice_clear_all_vsi_ctx(struct ice_hw *hw)
* If this function gets called after reset for existing VSIs then update
* with the new HW VSI number in the corresponding VSI handle list entry.
*/
-enum ice_status
+int
ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_vsi_ctx *tmp_vsi_ctx;
- enum ice_status status;
+ int status;
if (vsi_handle >= ICE_MAX_VSI)
- return ICE_ERR_PARAM;
+ return -EINVAL;
status = ice_aq_add_vsi(hw, vsi_ctx, cd);
if (status)
return status;
@@ -851,7 +851,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
sizeof(*tmp_vsi_ctx), GFP_KERNEL);
if (!tmp_vsi_ctx) {
ice_aq_free_vsi(hw, vsi_ctx, false, cd);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
*tmp_vsi_ctx = *vsi_ctx;
ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
@@ -873,14 +873,14 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
*
* Free VSI context info from hardware as well as from VSI handle list
*/
-enum ice_status
+int
ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
bool keep_vsi_alloc, struct ice_sq_cd *cd)
{
- enum ice_status status;
+ int status;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
if (!status)
@@ -897,12 +897,12 @@ ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
*
* Update VSI context in the hardware
*/
-enum ice_status
+int
ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
return ice_aq_update_vsi(hw, vsi_ctx, cd);
}
@@ -927,7 +927,7 @@ ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
else
ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
- return ice_status_to_errno(ice_update_vsi(hw, vsi_handle, ctx, NULL));
+ return ice_update_vsi(hw, vsi_handle, ctx, NULL);
}
/**
@@ -939,20 +939,20 @@ ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
*
* allocates or free a VSI list resource
*/
-static enum ice_status
+static int
ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
enum ice_sw_lkup_type lkup_type,
enum ice_adminq_opc opc)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
struct ice_aqc_res_elem *vsi_ele;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = struct_size(sw_buf, elem, 1);
sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
if (!sw_buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
sw_buf->num_elems = cpu_to_le16(1);
if (lkup_type == ICE_SW_LKUP_MAC ||
@@ -966,7 +966,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
sw_buf->res_type =
cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
} else {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto ice_aq_alloc_free_vsi_list_exit;
}
@@ -998,17 +998,17 @@ ice_aq_alloc_free_vsi_list_exit:
*
* Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
*/
-enum ice_status
+int
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (opc != ice_aqc_opc_add_sw_rules &&
opc != ice_aqc_opc_update_sw_rules &&
opc != ice_aqc_opc_remove_sw_rules)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
@@ -1018,7 +1018,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
if (opc != ice_aqc_opc_add_sw_rules &&
hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
return status;
}
@@ -1032,7 +1032,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
*
* Add(0x0290)
*/
-static enum ice_status
+static int
ice_aq_add_recipe(struct ice_hw *hw,
struct ice_aqc_recipe_data_elem *s_recipe_list,
u16 num_recipes, struct ice_sq_cd *cd)
@@ -1069,18 +1069,18 @@ ice_aq_add_recipe(struct ice_hw *hw,
* The caller must supply enough space in s_recipe_list to hold all possible
* recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
*/
-static enum ice_status
+static int
ice_aq_get_recipe(struct ice_hw *hw,
struct ice_aqc_recipe_data_elem *s_recipe_list,
u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
{
struct ice_aqc_add_get_recipe *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
u16 buf_size;
+ int status;
if (*num_recipes != ICE_MAX_NUM_RECIPES)
- return ICE_ERR_PARAM;
+ return -EINVAL;
cmd = &desc.params.add_get_recipe;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
@@ -1104,7 +1104,7 @@ ice_aq_get_recipe(struct ice_hw *hw,
* @cd: pointer to command details structure or NULL
* Recipe to profile association (0x0291)
*/
-static enum ice_status
+static int
ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
struct ice_sq_cd *cd)
{
@@ -1130,13 +1130,13 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
* @cd: pointer to command details structure or NULL
* Associate profile ID with given recipe (0x0293)
*/
-static enum ice_status
+static int
ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
struct ice_sq_cd *cd)
{
struct ice_aqc_recipe_to_profile *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.recipe_to_profile;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
@@ -1154,16 +1154,16 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
* @hw: pointer to the hardware structure
* @rid: recipe ID returned as response to AQ call
*/
-static enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
+static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = struct_size(sw_buf, elem, 1);
sw_buf = kzalloc(buf_len, GFP_KERNEL);
if (!sw_buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
sw_buf->num_elems = cpu_to_le16(1);
sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
@@ -1230,7 +1230,7 @@ ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
* bookkeeping so that we have a current list of all the recipes that are
* programmed in the firmware.
*/
-static enum ice_status
+static int
ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
bool *refresh_required)
{
@@ -1238,16 +1238,16 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
struct ice_aqc_recipe_data_elem *tmp;
u16 num_recps = ICE_MAX_NUM_RECIPES;
struct ice_prot_lkup_ext *lkup_exts;
- enum ice_status status;
u8 fv_word_idx = 0;
u16 sub_recps;
+ int status;
bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
/* we need a buffer big enough to accommodate all the recipes */
tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
if (!tmp)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
tmp[0].recipe_indx = rid;
status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
@@ -1284,7 +1284,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
GFP_KERNEL);
if (!rg_entry) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll;
}
@@ -1364,7 +1364,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
GFP_KERNEL);
if (!recps[rid].root_buf) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll;
}
@@ -1407,19 +1407,19 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
/* ice_get_initial_sw_cfg - Get initial port and default VSI data
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
+int ice_get_initial_sw_cfg(struct ice_hw *hw)
{
struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
- enum ice_status status;
u16 req_desc = 0;
u16 num_elems;
+ int status;
u16 i;
rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
GFP_KERNEL);
if (!rbuf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Multiple calls to ice_aq_get_sw_cfg may be required
* to get all the switch configuration information. The need
@@ -1670,7 +1670,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
* Create a large action to hold software marker and update the switch rule
* entry pointed by m_ent with newly created large action
*/
-static enum ice_status
+static int
ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
u16 sw_marker, u16 l_id)
{
@@ -1681,14 +1681,14 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
* 3. GENERIC VALUE action to hold the software marker ID
*/
const u16 num_lg_acts = 3;
- enum ice_status status;
u16 lg_act_size;
u16 rules_size;
+ int status;
u32 act;
u16 id;
if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Create two back-to-back switch rules and submit them to the HW using
* one memory buffer:
@@ -1699,7 +1699,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
if (!lg_act)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
@@ -1808,19 +1808,19 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
* Call AQ command to add a new switch rule or update existing switch rule
* using the given VSI list ID
*/
-static enum ice_status
+static int
ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
enum ice_sw_lkup_type lkup_type)
{
struct ice_aqc_sw_rules_elem *s_rule;
- enum ice_status status;
u16 s_rule_size;
u16 rule_type;
+ int status;
int i;
if (!num_vsi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (lkup_type == ICE_SW_LKUP_MAC ||
lkup_type == ICE_SW_LKUP_MAC_VLAN ||
@@ -1834,15 +1834,15 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
else
- return ICE_ERR_PARAM;
+ return -EINVAL;
s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
for (i = 0; i < num_vsi; i++) {
if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto exit;
}
/* AQ call requires hw_vsi_id(s) */
@@ -1869,11 +1869,11 @@ exit:
* @vsi_list_id: stores the ID of the VSI list to be created
* @lkup_type: switch rule filter's lookup type
*/
-static enum ice_status
+static int
ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
{
- enum ice_status status;
+ int status;
status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
ice_aqc_opc_alloc_res);
@@ -1895,7 +1895,7 @@ ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
* to the corresponding filter management list to track this switch rule
* and VSI mapping
*/
-static enum ice_status
+static int
ice_create_pkt_fwd_rule(struct ice_hw *hw,
struct ice_fltr_list_entry *f_entry)
{
@@ -1903,16 +1903,16 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw,
struct ice_aqc_sw_rules_elem *s_rule;
enum ice_sw_lkup_type l_type;
struct ice_sw_recipe *recp;
- enum ice_status status;
+ int status;
s_rule = devm_kzalloc(ice_hw_to_dev(hw),
ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
GFP_KERNEL);
if (!fm_entry) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto ice_create_pkt_fwd_rule_exit;
}
@@ -1959,16 +1959,16 @@ ice_create_pkt_fwd_rule_exit:
* Call AQ command to update a previously created switch rule with a
* VSI list ID
*/
-static enum ice_status
+static int
ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
{
struct ice_aqc_sw_rules_elem *s_rule;
- enum ice_status status;
+ int status;
s_rule = devm_kzalloc(ice_hw_to_dev(hw),
ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
@@ -1988,13 +1988,13 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
*
* Updates unicast switch filter rules based on VEB/VEPA mode
*/
-enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
+int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
{
struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_mgmt_list_entry *fm_entry;
- enum ice_status status = 0;
struct list_head *rule_head;
struct mutex *rule_lock; /* Lock to protect filter rule list */
+ int status = 0;
rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
@@ -2044,24 +2044,24 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
* Add the new VSI to the previously created VSI list set
* using the update switch rule command
*/
-static enum ice_status
+static int
ice_add_update_vsi_list(struct ice_hw *hw,
struct ice_fltr_mgmt_list_entry *m_entry,
struct ice_fltr_info *cur_fltr,
struct ice_fltr_info *new_fltr)
{
- enum ice_status status = 0;
u16 vsi_list_id = 0;
+ int status = 0;
if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
(cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
/* Only one entry existed in the mapping and it was not already
@@ -2073,7 +2073,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */
if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
- return ICE_ERR_ALREADY_EXISTS;
+ return -EEXIST;
vsi_handle_arr[0] = cur_fltr->vsi_handle;
vsi_handle_arr[1] = new_fltr->vsi_handle;
@@ -2101,7 +2101,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
vsi_list_id);
if (!m_entry->vsi_list_info)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* If this entry was large action then the large action needs
* to be updated to point to FWD to VSI list
@@ -2116,7 +2116,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
enum ice_adminq_opc opcode;
if (!m_entry->vsi_list_info)
- return ICE_ERR_CFG;
+ return -EIO;
/* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
@@ -2209,7 +2209,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
*
* Adds or updates the rule lists for a given recipe
*/
-static enum ice_status
+static int
ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
struct ice_fltr_list_entry *f_entry)
{
@@ -2217,10 +2217,10 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
struct ice_fltr_info *new_fltr, *cur_fltr;
struct ice_fltr_mgmt_list_entry *m_entry;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = 0;
+ int status = 0;
if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
f_entry->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
@@ -2255,18 +2255,18 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
* The VSI list should be emptied before this function is called to remove the
* VSI list.
*/
-static enum ice_status
+static int
ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
enum ice_sw_lkup_type lkup_type)
{
struct ice_aqc_sw_rules_elem *s_rule;
- enum ice_status status;
u16 s_rule_size;
+ int status;
s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
@@ -2288,21 +2288,21 @@ ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
* @fm_list: filter management entry for which the VSI list management needs to
* be done
*/
-static enum ice_status
+static int
ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
struct ice_fltr_mgmt_list_entry *fm_list)
{
enum ice_sw_lkup_type lkup_type;
- enum ice_status status = 0;
u16 vsi_list_id;
+ int status = 0;
if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
fm_list->vsi_count == 0)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* A rule with the VSI being removed does not exist */
if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
lkup_type = fm_list->fltr_info.lkup_type;
vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
@@ -2324,7 +2324,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
ICE_MAX_VSI);
if (!ice_is_vsi_valid(hw, rem_vsi_handle))
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
/* Make sure VSI list is empty before removing it below */
status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
@@ -2375,19 +2375,19 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
* @recp_id: recipe ID for which the rule needs to removed
* @f_entry: rule entry containing filter information
*/
-static enum ice_status
+static int
ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
struct ice_fltr_list_entry *f_entry)
{
struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_mgmt_list_entry *list_elem;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = 0;
bool remove_rule = false;
u16 vsi_handle;
+ int status = 0;
if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
f_entry->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
@@ -2395,14 +2395,14 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
mutex_lock(rule_lock);
list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
if (!list_elem) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto exit;
}
if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
remove_rule = true;
} else if (!list_elem->vsi_list_info) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto exit;
} else if (list_elem->vsi_list_info->ref_cnt > 1) {
/* a ref_cnt > 1 indicates that the vsi_list is being
@@ -2435,7 +2435,7 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
GFP_KERNEL);
if (!s_rule) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto exit;
}
@@ -2590,7 +2590,7 @@ bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
* check for duplicates in this case, removing duplicates from a given
* list should be taken care of in the caller of this function.
*/
-enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
+int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
struct ice_fltr_list_entry *m_list_itr;
@@ -2598,12 +2598,12 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
u16 total_elem_left, s_rule_size;
struct ice_switch_info *sw;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = 0;
u16 num_unicast = 0;
+ int status = 0;
u8 elem_sent;
if (!m_list || !hw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
s_rule = NULL;
sw = hw->switch_info;
@@ -2616,23 +2616,23 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
m_list_itr->fltr_info.flag = ICE_FLTR_TX;
vsi_handle = m_list_itr->fltr_info.vsi_handle;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
/* update the src in case it is VSI num */
if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
- return ICE_ERR_PARAM;
+ return -EINVAL;
m_list_itr->fltr_info.src = hw_vsi_id;
if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
is_zero_ether_addr(add))
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
/* Don't overwrite the unicast address */
mutex_lock(rule_lock);
if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
&m_list_itr->fltr_info)) {
mutex_unlock(rule_lock);
- return ICE_ERR_ALREADY_EXISTS;
+ return -EEXIST;
}
mutex_unlock(rule_lock);
num_unicast++;
@@ -2660,7 +2660,7 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
GFP_KERNEL);
if (!s_rule) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto ice_add_mac_exit;
}
@@ -2710,7 +2710,7 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*fm_entry), GFP_KERNEL);
if (!fm_entry) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto ice_add_mac_exit;
}
fm_entry->fltr_info = *f_info;
@@ -2737,7 +2737,7 @@ ice_add_mac_exit:
* @hw: pointer to the hardware structure
* @f_entry: filter entry containing one VLAN information
*/
-static enum ice_status
+static int
ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
{
struct ice_switch_info *sw = hw->switch_info;
@@ -2746,10 +2746,10 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
enum ice_sw_lkup_type lkup_type;
u16 vsi_list_id = 0, vsi_handle;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = 0;
+ int status = 0;
if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
f_entry->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
@@ -2757,10 +2757,10 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
/* VLAN ID should only be 12 bits */
if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (new_fltr->src_id != ICE_SRC_ID_VSI)
- return ICE_ERR_PARAM;
+ return -EINVAL;
new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
lkup_type = new_fltr->lkup_type;
@@ -2799,7 +2799,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
new_fltr);
if (!v_list_itr) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto exit;
}
/* reuse VSI list for new rule and increment ref_cnt */
@@ -2835,7 +2835,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
if (v_list_itr->vsi_count > 1 &&
v_list_itr->vsi_list_info->ref_cnt > 1) {
ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
- status = ICE_ERR_CFG;
+ status = -EIO;
goto exit;
}
@@ -2845,7 +2845,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
/* A rule already exists with the new VSI being added */
if (cur_handle == vsi_handle) {
- status = ICE_ERR_ALREADY_EXISTS;
+ status = -EEXIST;
goto exit;
}
@@ -2890,16 +2890,16 @@ exit:
* @hw: pointer to the hardware structure
* @v_list: list of VLAN entries and forwarding information
*/
-enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
+int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
{
struct ice_fltr_list_entry *v_list_itr;
if (!v_list || !hw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_for_each_entry(v_list_itr, v_list, list_entry) {
if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
- return ICE_ERR_PARAM;
+ return -EINVAL;
v_list_itr->fltr_info.flag = ICE_FLTR_TX;
v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
if (v_list_itr->status)
@@ -2917,13 +2917,12 @@ enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
* the filter list with the necessary fields (including flags to
* indicate Tx or Rx rules).
*/
-enum ice_status
-ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
+int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
{
struct ice_fltr_list_entry *em_list_itr;
if (!em_list || !hw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_for_each_entry(em_list_itr, em_list, list_entry) {
enum ice_sw_lkup_type l_type =
@@ -2931,7 +2930,7 @@ ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
l_type != ICE_SW_LKUP_ETHERTYPE)
- return ICE_ERR_PARAM;
+ return -EINVAL;
em_list_itr->status = ice_add_rule_internal(hw, l_type,
em_list_itr);
@@ -2946,13 +2945,12 @@ ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
* @hw: pointer to the hardware structure
* @em_list: list of ethertype or ethertype MAC entries
*/
-enum ice_status
-ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
+int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
{
struct ice_fltr_list_entry *em_list_itr, *tmp;
if (!em_list || !hw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
enum ice_sw_lkup_type l_type =
@@ -2960,7 +2958,7 @@ ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
l_type != ICE_SW_LKUP_ETHERTYPE)
- return ICE_ERR_PARAM;
+ return -EINVAL;
em_list_itr->status = ice_remove_rule_internal(hw, l_type,
em_list_itr);
@@ -3020,18 +3018,17 @@ ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
* add filter rule to set/unset given VSI as default VSI for the switch
* (represented by swid)
*/
-enum ice_status
-ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
+int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
{
struct ice_aqc_sw_rules_elem *s_rule;
struct ice_fltr_info f_info;
enum ice_adminq_opc opcode;
- enum ice_status status;
u16 s_rule_size;
u16 hw_vsi_id;
+ int status;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
@@ -3039,7 +3036,7 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
memset(&f_info, 0, sizeof(f_info));
@@ -3137,18 +3134,18 @@ ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
* This function removes either a MAC filter rule or a specific VSI from a
* VSI list for a multicast MAC address.
*
- * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
- * ice_add_mac. Caller should be aware that this call will only work if all
- * the entries passed into m_list were added previously. It will not attempt to
- * do a partial remove of entries that were found.
+ * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should
+ * be aware that this call will only work if all the entries passed into m_list
+ * were added previously. It will not attempt to do a partial remove of entries
+ * that were found.
*/
-enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
+int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_fltr_list_entry *list_itr, *tmp;
struct mutex *rule_lock; /* Lock to protect filter rule list */
if (!m_list)
- return ICE_ERR_PARAM;
+ return -EINVAL;
rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
@@ -3157,11 +3154,11 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
u16 vsi_handle;
if (l_type != ICE_SW_LKUP_MAC)
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_handle = list_itr->fltr_info.vsi_handle;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_itr->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, vsi_handle);
@@ -3174,7 +3171,7 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
&list_itr->fltr_info)) {
mutex_unlock(rule_lock);
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
mutex_unlock(rule_lock);
}
@@ -3192,19 +3189,18 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
* @hw: pointer to the hardware structure
* @v_list: list of VLAN entries and forwarding information
*/
-enum ice_status
-ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
+int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
{
struct ice_fltr_list_entry *v_list_itr, *tmp;
if (!v_list || !hw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
if (l_type != ICE_SW_LKUP_VLAN)
- return ICE_ERR_PARAM;
+ return -EINVAL;
v_list_itr->status = ice_remove_rule_internal(hw,
ICE_SW_LKUP_VLAN,
v_list_itr);
@@ -3242,7 +3238,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
* fltr_info.fwd_id fields. These are set such that later logic can
* extract which VSI to remove the fltr from, and pass on that information.
*/
-static enum ice_status
+static int
ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
struct list_head *vsi_list_head,
struct ice_fltr_info *fi)
@@ -3254,7 +3250,7 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
*/
tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
if (!tmp)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
tmp->fltr_info = *fi;
@@ -3285,17 +3281,17 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
* Note that this means all entries in vsi_list_head must be explicitly
* deallocated by the caller when done with list.
*/
-static enum ice_status
+static int
ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
struct list_head *lkup_list_head,
struct list_head *vsi_list_head)
{
struct ice_fltr_mgmt_list_entry *fm_entry;
- enum ice_status status = 0;
+ int status = 0;
/* check to make sure VSI ID is valid and within boundary */
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
@@ -3349,9 +3345,8 @@ static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
* @recp_id: recipe ID for which the rule needs to removed
* @v_list: list of promisc entries
*/
-static enum ice_status
-ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
- struct list_head *v_list)
+static int
+ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list)
{
struct ice_fltr_list_entry *v_list_itr, *tmp;
@@ -3371,7 +3366,7 @@ ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
* @promisc_mask: mask of promiscuous config bits to clear
* @vid: VLAN ID to clear VLAN promiscuous
*/
-enum ice_status
+int
ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid)
{
@@ -3381,11 +3376,11 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
struct ice_fltr_mgmt_list_entry *itr;
struct list_head *rule_head;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = 0;
+ int status = 0;
u8 recipe_id;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
@@ -3444,20 +3439,20 @@ free_fltr_list:
* @promisc_mask: mask of promiscuous config bits
* @vid: VLAN ID to set VLAN promiscuous
*/
-enum ice_status
+int
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
{
enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
struct ice_fltr_list_entry f_list_entry;
struct ice_fltr_info new_fltr;
- enum ice_status status = 0;
bool is_tx_fltr;
+ int status = 0;
u16 hw_vsi_id;
int pkt_type;
u8 recipe_id;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
memset(&new_fltr, 0, sizeof(new_fltr));
@@ -3558,7 +3553,7 @@ set_promisc_exit:
*
* Configure VSI with all associated VLANs to given promiscuous mode(s)
*/
-enum ice_status
+int
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc)
{
@@ -3567,8 +3562,8 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
struct list_head vsi_list_head;
struct list_head *vlan_head;
struct mutex *vlan_lock; /* Lock to protect filter rule list */
- enum ice_status status;
u16 vlan_id;
+ int status;
INIT_LIST_HEAD(&vsi_list_head);
vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
@@ -3616,7 +3611,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
struct list_head *rule_head;
struct ice_fltr_list_entry *tmp;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status;
+ int status;
INIT_LIST_HEAD(&remove_list_head);
rule_lock = &sw->recp_list[lkup].filt_rule_lock;
@@ -3681,19 +3676,19 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
* @num_items: number of entries requested for FD resource type
* @counter_id: counter index returned by AQ call
*/
-enum ice_status
+int
ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 *counter_id)
{
struct ice_aqc_alloc_free_res_elem *buf;
- enum ice_status status;
u16 buf_len;
+ int status;
/* Allocate resource */
buf_len = struct_size(buf, elem, 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
buf->num_elems = cpu_to_le16(num_items);
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
@@ -3719,19 +3714,19 @@ exit:
* @num_items: number of entries to be freed for FD resource type
* @counter_id: counter ID resource which needs to be freed
*/
-enum ice_status
+int
ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id)
{
struct ice_aqc_alloc_free_res_elem *buf;
- enum ice_status status;
u16 buf_len;
+ int status;
/* Free resource */
buf_len = struct_size(buf, elem, 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
buf->num_elems = cpu_to_le16(num_items);
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
@@ -3941,7 +3936,7 @@ ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
* and start grouping them in 4-word groups. Each group makes up one
* recipe.
*/
-static enum ice_status
+static int
ice_create_first_fit_recp_def(struct ice_hw *hw,
struct ice_prot_lkup_ext *lkup_exts,
struct list_head *rg_list,
@@ -3965,7 +3960,7 @@ ice_create_first_fit_recp_def(struct ice_hw *hw,
sizeof(*entry),
GFP_KERNEL);
if (!entry)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
list_add(&entry->l_entry, rg_list);
grp = &entry->r_group;
(*recp_cnt)++;
@@ -3991,7 +3986,7 @@ ice_create_first_fit_recp_def(struct ice_hw *hw,
* Helper function to fill in the field vector indices for protocol-offset
* pairs. These indexes are then ultimately programmed into a recipe.
*/
-static enum ice_status
+static int
ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
struct list_head *rg_list)
{
@@ -4033,7 +4028,7 @@ ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
* invalid pair
*/
if (!found)
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
}
@@ -4075,10 +4070,8 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
u16 bit;
- bitmap_zero(possible_idx, ICE_MAX_FV_WORDS);
bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
- bitmap_zero(free_idx, ICE_MAX_FV_WORDS);
bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
@@ -4115,7 +4108,7 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
* @rm: recipe management list entry
* @profiles: bitmap of profiles that will be associated.
*/
-static enum ice_status
+static int
ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
unsigned long *profiles)
{
@@ -4123,11 +4116,11 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
struct ice_aqc_recipe_data_elem *tmp;
struct ice_aqc_recipe_data_elem *buf;
struct ice_recp_grp_entry *entry;
- enum ice_status status;
u16 free_res_idx;
u16 recipe_count;
u8 chain_idx;
u8 recps = 0;
+ int status;
/* When more than one recipe are required, another recipe is needed to
* chain them together. Matching a tunnel metadata ID takes up one of
@@ -4143,22 +4136,22 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
if (rm->n_grp_count > 1) {
if (rm->n_grp_count > free_res_idx)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
rm->n_grp_count++;
}
if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
if (!tmp)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
GFP_KERNEL);
if (!buf) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_mem;
}
@@ -4218,7 +4211,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
*/
if (chain_idx >= ICE_MAX_FV_WORDS) {
ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
- status = ICE_ERR_MAX_LIMIT;
+ status = -ENOSPC;
goto err_unroll;
}
@@ -4249,7 +4242,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
sizeof(buf[0].recipe_bitmap));
} else {
- status = ICE_ERR_BAD_PTR;
+ status = -EINVAL;
goto err_unroll;
}
/* Applicable only for ROOT_RECIPE, set the fwd_priority for
@@ -4285,7 +4278,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
sizeof(*last_chain_entry),
GFP_KERNEL);
if (!last_chain_entry) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll;
}
last_chain_entry->rid = rid;
@@ -4320,7 +4313,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
sizeof(buf[recps].recipe_bitmap));
} else {
- status = ICE_ERR_BAD_PTR;
+ status = -EINVAL;
goto err_unroll;
}
buf[recps].content.act_ctrl_fwd_priority = rm->priority;
@@ -4354,7 +4347,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
}
if (!idx_found) {
- status = ICE_ERR_OUT_OF_RANGE;
+ status = -EIO;
goto err_unroll;
}
@@ -4407,12 +4400,12 @@ err_mem:
* @rm: recipe management list entry
* @lkup_exts: lookup elements
*/
-static enum ice_status
+static int
ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
struct ice_prot_lkup_ext *lkup_exts)
{
- enum ice_status status;
u8 recp_count = 0;
+ int status;
rm->n_grp_count = 0;
@@ -4442,21 +4435,21 @@ ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
* @bm: bitmap of field vectors to consider
* @fv_list: pointer to a list that holds the returned field vectors
*/
-static enum ice_status
+static int
ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
unsigned long *bm, struct list_head *fv_list)
{
- enum ice_status status;
u8 *prot_ids;
+ int status;
u16 i;
prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL);
if (!prot_ids)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
for (i = 0; i < lkups_cnt; i++)
if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
- status = ICE_ERR_CFG;
+ status = -EIO;
goto free_mem;
}
@@ -4493,7 +4486,7 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
* @rinfo: other information regarding the rule e.g. priority and action info
* @lkup_exts: lookup word structure
*/
-static enum ice_status
+static int
ice_add_special_words(struct ice_adv_rule_info *rinfo,
struct ice_prot_lkup_ext *lkup_exts)
{
@@ -4510,7 +4503,7 @@ ice_add_special_words(struct ice_adv_rule_info *rinfo,
lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
lkup_exts->field_mask[word] = mask;
} else {
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
}
}
@@ -4561,7 +4554,7 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
* @rinfo: other information regarding the rule e.g. priority and action info
* @rid: return the recipe ID of the recipe created
*/
-static enum ice_status
+static int
ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
{
@@ -4572,16 +4565,16 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
struct ice_sw_fv_list_entry *fvit;
struct ice_recp_grp_entry *r_tmp;
struct ice_sw_fv_list_entry *tmp;
- enum ice_status status = 0;
struct ice_sw_recipe *rm;
+ int status = 0;
u8 i;
if (!lkups_cnt)
- return ICE_ERR_PARAM;
+ return -EINVAL;
lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
if (!lkup_exts)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Determine the number of words to be matched and if it exceeds a
* recipe's restrictions
@@ -4590,20 +4583,20 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 count;
if (lkups[i].type >= ICE_PROTOCOL_LAST) {
- status = ICE_ERR_CFG;
+ status = -EIO;
goto err_free_lkup_exts;
}
count = ice_fill_valid_words(&lkups[i], lkup_exts);
if (!count) {
- status = ICE_ERR_CFG;
+ status = -EIO;
goto err_free_lkup_exts;
}
}
rm = kzalloc(sizeof(*rm), GFP_KERNEL);
if (!rm) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_free_lkup_exts;
}
@@ -4849,7 +4842,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
* @pkt_len: packet length of dummy packet
* @offsets: offset info for the dummy packet
*/
-static enum ice_status
+static int
ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
struct ice_aqc_sw_rules_elem *s_rule,
const u8 *dummy_pkt, u16 pkt_len,
@@ -4883,7 +4876,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
}
/* this should never happen in a correct calling sequence */
if (!found)
- return ICE_ERR_PARAM;
+ return -EINVAL;
switch (lkups[i].type) {
case ICE_MAC_OFOS:
@@ -4920,12 +4913,12 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
len = sizeof(struct ice_udp_tnl_hdr);
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
/* the length should be a word multiple */
if (len % ICE_BYTES_PER_WORD)
- return ICE_ERR_CFG;
+ return -EIO;
/* We have the offset to the header start, the length, the
* caller's header values and mask. Use this information to
@@ -4955,7 +4948,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
* @pkt: dummy packet to fill in
* @offsets: offset info for the dummy packet
*/
-static enum ice_status
+static int
ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
{
@@ -4964,11 +4957,11 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
switch (tun_type) {
case ICE_SW_TUN_VXLAN:
if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
- return ICE_ERR_CFG;
+ return -EIO;
break;
case ICE_SW_TUN_GENEVE:
if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
- return ICE_ERR_CFG;
+ return -EIO;
break;
default:
/* Nothing needs to be done for this tunnel type */
@@ -4989,7 +4982,7 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
}
}
- return ICE_ERR_CFG;
+ return -EIO;
}
/**
@@ -5054,25 +5047,25 @@ ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
* Add the new VSI to the previously created VSI list set
* using the update switch rule command
*/
-static enum ice_status
+static int
ice_adv_add_update_vsi_list(struct ice_hw *hw,
struct ice_adv_fltr_mgmt_list_entry *m_entry,
struct ice_adv_rule_info *cur_fltr,
struct ice_adv_rule_info *new_fltr)
{
- enum ice_status status;
u16 vsi_list_id = 0;
+ int status;
if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
(cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
/* Only one entry existed in the mapping and it was not already
@@ -5085,7 +5078,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */
if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
new_fltr->sw_act.fwd_id.hw_vsi_id)
- return ICE_ERR_ALREADY_EXISTS;
+ return -EEXIST;
vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
@@ -5118,7 +5111,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
u16 vsi_handle = new_fltr->sw_act.vsi_handle;
if (!m_entry->vsi_list_info)
- return ICE_ERR_CFG;
+ return -EIO;
/* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
@@ -5160,7 +5153,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
* rinfo describes other information related to this rule such as forwarding
* IDs, priority of this rule, etc.
*/
-enum ice_status
+int
ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
struct ice_rule_query_data *added_entry)
@@ -5171,10 +5164,10 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
struct ice_aqc_sw_rules_elem *s_rule = NULL;
struct list_head *rule_head;
struct ice_switch_info *sw;
- enum ice_status status;
const u8 *pkt = NULL;
u16 word_cnt;
u32 act = 0;
+ int status;
u8 q_rgn;
/* Initialize profile to result index bitmap */
@@ -5184,7 +5177,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
if (!lkups_cnt)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* get # of words we need to match */
word_cnt = 0;
@@ -5198,13 +5191,13 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* make sure that we can locate a dummy packet */
ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
&pkt_offsets);
if (!pkt) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto err_ice_add_adv_rule;
}
@@ -5212,11 +5205,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
- return ICE_ERR_CFG;
+ return -EIO;
vsi_handle = rinfo->sw_act.vsi_handle;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
rinfo->sw_act.fwd_id.hw_vsi_id =
@@ -5250,7 +5243,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
if (!rinfo->flags_info.act_valid) {
act |= ICE_SINGLE_ACT_LAN_ENABLE;
act |= ICE_SINGLE_ACT_LB_ENABLE;
@@ -5284,7 +5277,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
ICE_SINGLE_ACT_VALID_BIT;
break;
default:
- status = ICE_ERR_CFG;
+ status = -EIO;
goto err_ice_add_adv_rule;
}
@@ -5329,14 +5322,14 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
sizeof(struct ice_adv_fltr_mgmt_list_entry),
GFP_KERNEL);
if (!adv_fltr) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_add_adv_rule;
}
adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
lkups_cnt * sizeof(*lkups), GFP_KERNEL);
if (!adv_fltr->lkups) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_add_adv_rule;
}
@@ -5379,12 +5372,12 @@ err_ice_add_adv_rule:
* Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
* It is required to pass valid VSI handle.
*/
-static enum ice_status
+static int
ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
struct list_head *list_head)
{
struct ice_fltr_mgmt_list_entry *itr;
- enum ice_status status = 0;
+ int status = 0;
u16 hw_vsi_id;
if (list_empty(list_head))
@@ -5433,22 +5426,22 @@ end:
* @fm_list: filter management entry for which the VSI list management needs to
* be done
*/
-static enum ice_status
+static int
ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
struct ice_adv_fltr_mgmt_list_entry *fm_list)
{
struct ice_vsi_list_map_info *vsi_list_info;
enum ice_sw_lkup_type lkup_type;
- enum ice_status status;
u16 vsi_list_id;
+ int status;
if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
fm_list->vsi_count == 0)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* A rule with the VSI being removed does not exist */
if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
lkup_type = ICE_SW_LKUP_LAST;
vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
@@ -5468,7 +5461,7 @@ ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
ICE_MAX_VSI);
if (!ice_is_vsi_valid(hw, rem_vsi_handle))
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
/* Make sure VSI list is empty before removing it below */
status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
@@ -5532,27 +5525,27 @@ ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
* header. rinfo describes other information related to this rule such as
* forwarding IDs, priority of this rule, etc.
*/
-static enum ice_status
+static int
ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
{
struct ice_adv_fltr_mgmt_list_entry *list_elem;
struct ice_prot_lkup_ext lkup_exts;
- enum ice_status status = 0;
bool remove_rule = false;
struct mutex *rule_lock; /* Lock to protect filter rule list */
u16 i, rid, vsi_handle;
+ int status = 0;
memset(&lkup_exts, 0, sizeof(lkup_exts));
for (i = 0; i < lkups_cnt; i++) {
u16 count;
if (lkups[i].type >= ICE_PROTOCOL_LAST)
- return ICE_ERR_CFG;
+ return -EIO;
count = ice_fill_valid_words(&lkups[i], &lkup_exts);
if (!count)
- return ICE_ERR_CFG;
+ return -EIO;
}
/* Create any special protocol/offset pairs, such as looking at tunnel
@@ -5565,7 +5558,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
/* If did not find a recipe that match the existing criteria */
if (rid == ICE_MAX_NUM_RECIPES)
- return ICE_ERR_PARAM;
+ return -EINVAL;
rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
@@ -5597,7 +5590,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
s_rule->pdata.lkup_tx_rx.act = 0;
s_rule->pdata.lkup_tx_rx.index =
cpu_to_le16(list_elem->rule_info.fltr_rule_id);
@@ -5605,7 +5598,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
rule_buf_sz, 1,
ice_aqc_opc_remove_sw_rules, NULL);
- if (!status || status == ICE_ERR_DOES_NOT_EXIST) {
+ if (!status || status == -ENOENT) {
struct ice_switch_info *sw = hw->switch_info;
mutex_lock(rule_lock);
@@ -5630,7 +5623,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
* the remove_entry parameter. This function will remove rule for a given
* vsi_handle with a given rule_id which is passed as parameter in remove_entry
*/
-enum ice_status
+int
ice_rem_adv_rule_by_id(struct ice_hw *hw,
struct ice_rule_query_data *remove_entry)
{
@@ -5641,7 +5634,7 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw,
sw = hw->switch_info;
if (!sw->recp_list[remove_entry->rid].recp_created)
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_head = &sw->recp_list[remove_entry->rid].filt_rules;
list_for_each_entry(list_itr, list_head, list_entry) {
if (list_itr->rule_info.fltr_rule_id ==
@@ -5653,7 +5646,92 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw,
}
}
/* either list is empty or unable to find rule */
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
+}
+
+/**
+ * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
+ * given VSI handle
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
+ *
+ * This function is used to remove all the rules for a given VSI and as soon
+ * as removing a rule fails, it will return immediately with the error code,
+ * else it will return success.
+ */
+int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
+ struct ice_vsi_list_map_info *map_info;
+ struct ice_adv_rule_info rinfo;
+ struct list_head *list_head;
+ struct ice_switch_info *sw;
+ int status;
+ u8 rid;
+
+ sw = hw->switch_info;
+ for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
+ if (!sw->recp_list[rid].recp_created)
+ continue;
+ if (!sw->recp_list[rid].adv_rule)
+ continue;
+
+ list_head = &sw->recp_list[rid].filt_rules;
+ list_for_each_entry_safe(list_itr, tmp_entry, list_head,
+ list_entry) {
+ rinfo = list_itr->rule_info;
+
+ if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
+ map_info = list_itr->vsi_list_info;
+ if (!map_info)
+ continue;
+
+ if (!test_bit(vsi_handle, map_info->vsi_map))
+ continue;
+ } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
+ continue;
+ }
+
+ rinfo.sw_act.vsi_handle = vsi_handle;
+ status = ice_rem_adv_rule(hw, list_itr->lkups,
+ list_itr->lkups_cnt, &rinfo);
+ if (status)
+ return status;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: driver VSI handle
+ * @list_head: list for which filters need to be replayed
+ *
+ * Replay the advanced rule for the given VSI.
+ */
+static int
+ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
+ struct list_head *list_head)
+{
+ struct ice_rule_query_data added_entry = { 0 };
+ struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
+ int status = 0;
+
+ if (list_empty(list_head))
+ return status;
+ list_for_each_entry(adv_fltr, list_head, list_entry) {
+ struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
+ u16 lk_cnt = adv_fltr->lkups_cnt;
+
+ if (vsi_handle != rinfo->sw_act.vsi_handle)
+ continue;
+ status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
+ &added_entry);
+ if (status)
+ break;
+ }
+ return status;
}
/**
@@ -5663,17 +5741,20 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw,
*
* Replays filters for requested VSI via vsi_handle.
*/
-enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
+int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
{
struct ice_switch_info *sw = hw->switch_info;
- enum ice_status status = 0;
+ int status;
u8 i;
- for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
struct list_head *head;
head = &sw->recp_list[i].filt_replay_rules;
- status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
+ if (!sw->recp_list[i].adv_rule)
+ status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
+ else
+ status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
if (status)
return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index d8a38906f16f..d8334beaaa8a 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -256,7 +256,7 @@ struct ice_vsi_list_map_info {
struct ice_fltr_list_entry {
struct list_head list_entry;
- enum ice_status status;
+ int status;
struct ice_fltr_info fltr_info;
};
@@ -302,75 +302,69 @@ enum ice_promisc_flags {
};
/* VSI related commands */
-enum ice_status
+int
ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
bool keep_vsi_alloc, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle);
void ice_clear_all_vsi_ctx(struct ice_hw *hw);
/* Switch config */
-enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
+int ice_get_initial_sw_cfg(struct ice_hw *hw);
-enum ice_status
+int
ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 *counter_id);
-enum ice_status
+int
ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id);
/* Switch/bridge related commands */
-enum ice_status
+int
ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
struct ice_rule_query_data *added_entry);
-enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
-enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
-enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
-enum ice_status
-ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list);
-enum ice_status
-ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
-int
-ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable);
+int ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
+int ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
+int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
+int ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
+int ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle);
bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle);
+int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list);
+int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
+int ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
-ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
-enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
/* Promisc/defport setup for VSIs */
-enum ice_status
-ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction);
-enum ice_status
+int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction);
+int
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid);
-enum ice_status
+int
ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid);
-enum ice_status
+int
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc);
-enum ice_status
-ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
+int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle);
+int
ice_rem_adv_rule_by_id(struct ice_hw *hw,
struct ice_rule_query_data *remove_entry);
-enum ice_status ice_init_def_sw_recp(struct ice_hw *hw);
+int ice_init_def_sw_recp(struct ice_hw *hw);
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
+int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
-enum ice_status
+int
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd);
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index 25cca5c4ae57..e8aab664270a 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -398,9 +398,8 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
struct ice_hw *hw = &vsi->back->hw;
struct ice_adv_lkup_elem *list;
u32 flags = fltr->flags;
- enum ice_status status;
int lkups_cnt;
- int ret = 0;
+ int ret;
int i;
if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
@@ -449,14 +448,13 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
/* specify the cookie as filter_rule_id */
rule_info.fltr_rule_id = fltr->cookie;
- status = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
- if (status == ICE_ERR_ALREADY_EXISTS) {
+ ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
+ if (ret == -EEXIST) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
ret = -EINVAL;
goto exit;
- } else if (status) {
+ } else if (ret) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
- ret = -EIO;
goto exit;
}
@@ -1162,7 +1160,7 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_rem.vsi_handle = fltr->dest_id;
err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);
if (err) {
- if (err == ICE_ERR_DOES_NOT_EXIST) {
+ if (err == -ENOENT) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist");
return -ENOENT;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index dccf09eefc75..3e38695f1c9d 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -3,8 +3,9 @@
/* The driver transmit and receive code */
-#include <linux/prefetch.h>
#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/prefetch.h>
#include <linux/bpf_trace.h>
#include <net/dsfield.h>
#include <net/xdp.h>
@@ -219,6 +220,10 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
+ /* get the bql data ready */
+ if (!ice_ring_is_xdp(tx_ring))
+ netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
+
tx_buf = &tx_ring->tx_buf[i];
tx_desc = ICE_TX_DESC(tx_ring, i);
i -= tx_ring->count;
@@ -232,6 +237,9 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
if (!eop_desc)
break;
+ /* follow the guidelines of other drivers */
+ prefetchw(&tx_buf->skb->users);
+
smp_rmb(); /* prevent any other reads prior to eop_desc */
ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
@@ -304,8 +312,10 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
- netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
- total_bytes);
+ if (ice_ring_is_xdp(tx_ring))
+ return !!budget;
+
+ netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes);
#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
@@ -314,11 +324,9 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
* sees the new next_to_clean.
*/
smp_mb();
- if (__netif_subqueue_stopped(tx_ring->netdev,
- tx_ring->q_index) &&
+ if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&
!test_bit(ICE_VSI_DOWN, vsi->state)) {
- netif_wake_subqueue(tx_ring->netdev,
- tx_ring->q_index);
+ netif_tx_wake_queue(txring_txq(tx_ring));
++tx_ring->tx_stats.restart_q;
}
}
@@ -568,7 +576,7 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
goto out_failure;
return ICE_XDP_REDIR;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
@@ -940,7 +948,7 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
*/
net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = build_skb(xdp->data_hard_start, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
@@ -1524,7 +1532,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
*/
static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
{
- netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
+ netif_tx_stop_queue(txring_txq(tx_ring));
/* Memory barrier before checking head and tail */
smp_mb();
@@ -1532,8 +1540,8 @@ static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
if (likely(ICE_DESC_UNUSED(tx_ring) < size))
return -EBUSY;
- /* A reprieve! - use start_subqueue because it doesn't call schedule */
- netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_tx_start_queue(txring_txq(tx_ring));
++tx_ring->tx_stats.restart_q;
return 0;
}
@@ -1575,6 +1583,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
struct sk_buff *skb;
skb_frag_t *frag;
dma_addr_t dma;
+ bool kick;
td_tag = off->td_l2tag1;
td_cmd = off->td_cmd;
@@ -1656,9 +1665,6 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
tx_buf = &tx_ring->tx_buf[i];
}
- /* record bytecount for BQL */
- netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
-
/* record SW timestamp if HW timestamp is not available */
skb_tx_timestamp(first->skb);
@@ -1687,7 +1693,10 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
+ kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount,
+ netdev_xmit_more());
+ if (kick)
+ /* notify HW of packet */
writel(i, tx_ring->tail);
return;
@@ -2272,6 +2281,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
return NETDEV_TX_BUSY;
}
+ /* prefetch for bql data which is infrequently used */
+ netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring));
+
offload.tx_ring = tx_ring;
/* record the location of the first descriptor for this packet */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 1dd7e84f41f8..0e87b98e0966 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019, Intel Corporation. */
+#include <linux/filter.h>
+
#include "ice_txrx_lib.h"
#include "ice_eswitch.h"
#include "ice_lib.h"
@@ -187,8 +189,7 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring,
ice_rx_hash(rx_ring, rx_desc, skb, ptype);
/* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, ice_eswitch_get_target_netdev
- (rx_ring, rx_desc));
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
ice_rx_csum(rx_ring, skb, rx_desc, ptype);
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 9e0c2923c62e..546145dd1f02 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -6,8 +6,8 @@
#define ICE_BYTES_PER_WORD 2
#define ICE_BYTES_PER_DWORD 4
+#define ICE_CHNL_MAX_TC 16
-#include "ice_status.h"
#include "ice_hw_autogen.h"
#include "ice_osdep.h"
#include "ice_controlq.h"
@@ -230,8 +230,8 @@ enum ice_fd_hw_seg {
ICE_FD_HW_SEG_MAX,
};
-/* 2 VSI = 1 ICE_VSI_PF + 1 ICE_VSI_CTRL */
-#define ICE_MAX_FDIR_VSI_PER_FILTER 2
+/* 1 ICE_VSI_PF + 1 ICE_VSI_CTRL + ICE_CHNL_MAX_TC */
+#define ICE_MAX_FDIR_VSI_PER_FILTER (2 + ICE_CHNL_MAX_TC)
struct ice_fd_hw_prof {
struct ice_flow_seg_info *fdir_seg[ICE_FD_HW_SEG_MAX];
@@ -279,6 +279,10 @@ struct ice_hw_common_caps {
#define ICE_NVM_PENDING_NETLIST BIT(2)
bool nvm_unified_update;
#define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
+ /* PCIe reset avoidance */
+ bool pcie_reset_avoidance;
+ /* Post update reset restriction */
+ bool reset_restrict_support;
};
/* IEEE 1588 TIME_SYNC specific info */
@@ -295,9 +299,30 @@ struct ice_hw_common_caps {
#define ICE_TS_TMR_IDX_ASSOC_S 24
#define ICE_TS_TMR_IDX_ASSOC_M BIT(24)
+/* TIME_REF clock rate specification */
+enum ice_time_ref_freq {
+ ICE_TIME_REF_FREQ_25_000 = 0,
+ ICE_TIME_REF_FREQ_122_880 = 1,
+ ICE_TIME_REF_FREQ_125_000 = 2,
+ ICE_TIME_REF_FREQ_153_600 = 3,
+ ICE_TIME_REF_FREQ_156_250 = 4,
+ ICE_TIME_REF_FREQ_245_760 = 5,
+
+ NUM_ICE_TIME_REF_FREQ
+};
+
+/* Clock source specification */
+enum ice_clk_src {
+ ICE_CLK_SRC_TCX0 = 0, /* Temperature compensated oscillator */
+ ICE_CLK_SRC_TIME_REF = 1, /* Use TIME_REF reference clock */
+
+ NUM_ICE_CLK_SRC
+};
+
struct ice_ts_func_info {
/* Function specific info */
- u32 clk_freq;
+ enum ice_time_ref_freq time_ref;
+ u8 clk_freq;
u8 clk_src;
u8 tmr_index_assoc;
u8 ena;
@@ -873,8 +898,6 @@ struct ice_hw {
u8 active_pkg_name[ICE_PKG_NAME_SIZE];
u8 active_pkg_in_nvm;
- enum ice_aq_err pkg_dwnld_status;
-
/* Driver's package ver - (from the Ice Metadata section) */
struct ice_pkg_ver pkg_ver;
u8 pkg_name[ICE_PKG_NAME_SIZE];
@@ -919,6 +942,7 @@ struct ice_hw {
struct mutex rss_locks; /* protect RSS configuration */
struct list_head rss_list_head;
struct ice_mbx_snapshot mbx_snapshot;
+ DECLARE_BITMAP(hw_ptype, ICE_FLOW_PTYPE_MAX);
u16 io_expander_handle;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index eee180d8c024..d64df81d4893 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -47,197 +47,6 @@ struct virtchnl_fdir_fltr_conf {
u32 flow_id;
};
-static enum virtchnl_proto_hdr_type vc_pattern_ether[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_tcp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_TCP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_udp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_sctp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_SCTP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_tcp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_TCP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_udp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_sctp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_SCTP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_GTPU_IP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu_eh[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_GTPU_IP,
- VIRTCHNL_PROTO_HDR_GTPU_EH,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_l2tpv3[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_L2TPV3,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_l2tpv3[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_L2TPV3,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_esp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_ESP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_esp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_ESP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_ah[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_AH,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_ah[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_AH,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_nat_t_esp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_ESP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_nat_t_esp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_ESP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_pfcp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_PFCP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_pfcp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_PFCP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-struct virtchnl_fdir_pattern_match_item {
- enum virtchnl_proto_hdr_type *list;
- u64 input_set;
- u64 *meta;
-};
-
-static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_os[] = {
- {vc_pattern_ipv4, 0, NULL},
- {vc_pattern_ipv4_tcp, 0, NULL},
- {vc_pattern_ipv4_udp, 0, NULL},
- {vc_pattern_ipv4_sctp, 0, NULL},
- {vc_pattern_ipv6, 0, NULL},
- {vc_pattern_ipv6_tcp, 0, NULL},
- {vc_pattern_ipv6_udp, 0, NULL},
- {vc_pattern_ipv6_sctp, 0, NULL},
-};
-
-static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_comms[] = {
- {vc_pattern_ipv4, 0, NULL},
- {vc_pattern_ipv4_tcp, 0, NULL},
- {vc_pattern_ipv4_udp, 0, NULL},
- {vc_pattern_ipv4_sctp, 0, NULL},
- {vc_pattern_ipv6, 0, NULL},
- {vc_pattern_ipv6_tcp, 0, NULL},
- {vc_pattern_ipv6_udp, 0, NULL},
- {vc_pattern_ipv6_sctp, 0, NULL},
- {vc_pattern_ether, 0, NULL},
- {vc_pattern_ipv4_gtpu, 0, NULL},
- {vc_pattern_ipv4_gtpu_eh, 0, NULL},
- {vc_pattern_ipv4_l2tpv3, 0, NULL},
- {vc_pattern_ipv6_l2tpv3, 0, NULL},
- {vc_pattern_ipv4_esp, 0, NULL},
- {vc_pattern_ipv6_esp, 0, NULL},
- {vc_pattern_ipv4_ah, 0, NULL},
- {vc_pattern_ipv6_ah, 0, NULL},
- {vc_pattern_ipv4_nat_t_esp, 0, NULL},
- {vc_pattern_ipv6_nat_t_esp, 0, NULL},
- {vc_pattern_ipv4_pfcp, 0, NULL},
- {vc_pattern_ipv6_pfcp, 0, NULL},
-};
-
struct virtchnl_fdir_inset_map {
enum virtchnl_proto_hdr_field field;
enum ice_flow_field fld;
@@ -751,7 +560,6 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
struct ice_flow_seg_info *old_seg;
struct ice_flow_prof *prof = NULL;
struct ice_fd_hw_prof *vf_prof;
- enum ice_status status;
struct device *dev;
struct ice_pf *pf;
struct ice_hw *hw;
@@ -794,29 +602,26 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow,
tun ? ICE_FLTR_PTYPE_MAX : 0);
- status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
- tun + 1, &prof);
- ret = ice_status_to_errno(status);
+ ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
+ tun + 1, &prof);
if (ret) {
dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
flow, vf->vf_id);
goto err_exit;
}
- status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
- vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
- seg, &entry1_h);
- ret = ice_status_to_errno(status);
+ ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
+ vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry1_h);
if (ret) {
dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
flow, vf->vf_id);
goto err_prof;
}
- status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
- ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
- seg, &entry2_h);
- ret = ice_status_to_errno(status);
+ ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
+ ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry2_h);
if (ret) {
dev_dbg(dev,
"Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
@@ -911,83 +716,6 @@ err_exit:
}
/**
- * ice_vc_fdir_match_pattern
- * @fltr: virtual channel add cmd buffer
- * @type: virtual channel protocol filter header type
- *
- * Matching the header type by comparing fltr and type's value.
- *
- * Return: true on success, and false on error.
- */
-static bool
-ice_vc_fdir_match_pattern(struct virtchnl_fdir_add *fltr,
- enum virtchnl_proto_hdr_type *type)
-{
- struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
- int i = 0;
-
- while ((i < proto->count) &&
- (*type == proto->proto_hdr[i].type) &&
- (*type != VIRTCHNL_PROTO_HDR_NONE)) {
- type++;
- i++;
- }
-
- return ((i == proto->count) && (*type == VIRTCHNL_PROTO_HDR_NONE));
-}
-
-/**
- * ice_vc_fdir_get_pattern - get while list pattern
- * @vf: pointer to the VF info
- * @len: filter list length
- *
- * Return: pointer to allowed filter list
- */
-static const struct virtchnl_fdir_pattern_match_item *
-ice_vc_fdir_get_pattern(struct ice_vf *vf, int *len)
-{
- const struct virtchnl_fdir_pattern_match_item *item;
- struct ice_pf *pf = vf->pf;
- struct ice_hw *hw;
-
- hw = &pf->hw;
- if (!strncmp(hw->active_pkg_name, "ICE COMMS Package",
- sizeof(hw->active_pkg_name))) {
- item = vc_fdir_pattern_comms;
- *len = ARRAY_SIZE(vc_fdir_pattern_comms);
- } else {
- item = vc_fdir_pattern_os;
- *len = ARRAY_SIZE(vc_fdir_pattern_os);
- }
-
- return item;
-}
-
-/**
- * ice_vc_fdir_search_pattern
- * @vf: pointer to the VF info
- * @fltr: virtual channel add cmd buffer
- *
- * Search for matched pattern from supported pattern list
- *
- * Return: 0 on success, and other on error.
- */
-static int
-ice_vc_fdir_search_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr)
-{
- const struct virtchnl_fdir_pattern_match_item *pattern;
- int len, i;
-
- pattern = ice_vc_fdir_get_pattern(vf, &len);
-
- for (i = 0; i < len; i++)
- if (ice_vc_fdir_match_pattern(fltr, pattern[i].list))
- return 0;
-
- return -EINVAL;
-}
-
-/**
* ice_vc_fdir_parse_pattern
* @vf: pointer to the VF info
* @fltr: virtual channel add cmd buffer
@@ -1299,11 +1027,11 @@ static int
ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
struct virtchnl_fdir_fltr_conf *conf)
{
+ struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
int ret;
- ret = ice_vc_fdir_search_pattern(vf, fltr);
- if (ret)
- return ret;
+ if (!ice_vc_validate_pattern(vf, proto))
+ return -EINVAL;
ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
if (ret)
@@ -1467,7 +1195,6 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
struct ice_fdir_fltr *input = &conf->input;
struct ice_vsi *vsi, *ctrl_vsi;
struct ice_fltr_desc desc;
- enum ice_status status;
struct device *dev;
struct ice_pf *pf;
struct ice_hw *hw;
@@ -1497,8 +1224,7 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
return -ENOMEM;
ice_fdir_get_prgm_desc(hw, input, &desc, add);
- status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
- ret = ice_status_to_errno(status);
+ ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
if (ret) {
dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
vf->vf_id, input->flow_type);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 6427e7ec93de..39b80124d282 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -9,6 +9,7 @@
#include "ice_flow.h"
#include "ice_eswitch.h"
#include "ice_virtchnl_allowlist.h"
+#include "ice_flex_pipe.h"
#define FIELD_SELECTOR(proto_hdr_field) \
BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
@@ -18,18 +19,7 @@ struct ice_vc_hdr_match_type {
u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
};
-static const struct ice_vc_hdr_match_type ice_vc_hdr_list_os[] = {
- {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
- {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
- ICE_FLOW_SEG_HDR_IPV_OTHER},
- {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
- ICE_FLOW_SEG_HDR_IPV_OTHER},
- {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
- {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
- {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
-};
-
-static const struct ice_vc_hdr_match_type ice_vc_hdr_list_comms[] = {
+static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
{VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
{VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
{VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
@@ -67,83 +57,7 @@ struct ice_vc_hash_field_match_type {
};
static const struct
-ice_vc_hash_field_match_type ice_vc_hash_field_list_os[] = {
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
- ICE_FLOW_HASH_IPV4},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
- ICE_FLOW_HASH_IPV6},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
- ICE_FLOW_HASH_TCP_PORT},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
- ICE_FLOW_HASH_UDP_PORT},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
- ICE_FLOW_HASH_SCTP_PORT},
-};
-
-static const struct
-ice_vc_hash_field_match_type ice_vc_hash_field_list_comms[] = {
+ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
@@ -289,37 +203,6 @@ static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
}
/**
- * ice_err_to_virt_err - translate errors for VF return code
- * @ice_err: error return code
- */
-static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
-{
- switch (ice_err) {
- case ICE_SUCCESS:
- return VIRTCHNL_STATUS_SUCCESS;
- case ICE_ERR_BAD_PTR:
- case ICE_ERR_INVAL_SIZE:
- case ICE_ERR_DEVICE_NOT_SUPPORTED:
- case ICE_ERR_PARAM:
- case ICE_ERR_CFG:
- return VIRTCHNL_STATUS_ERR_PARAM;
- case ICE_ERR_NO_MEMORY:
- return VIRTCHNL_STATUS_ERR_NO_MEMORY;
- case ICE_ERR_NOT_READY:
- case ICE_ERR_RESET_FAILED:
- case ICE_ERR_FW_API_VER:
- case ICE_ERR_AQ_ERROR:
- case ICE_ERR_AQ_TIMEOUT:
- case ICE_ERR_AQ_FULL:
- case ICE_ERR_AQ_NO_WORK:
- case ICE_ERR_AQ_EMPTY:
- return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
- default:
- return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
- }
-}
-
-/**
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
* @pf: pointer to the PF structure
* @v_opcode: operation code
@@ -770,8 +653,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
struct ice_hw *hw = &vsi->back->hw;
struct ice_aqc_vsi_props *info;
struct ice_vsi_ctx *ctxt;
- enum ice_status status;
- int ret = 0;
+ int ret;
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
@@ -794,12 +676,10 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
ICE_AQ_VSI_PROP_SW_VALID);
- status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (status) {
- dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
+ ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret) {
+ dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -968,8 +848,8 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
{
struct device *dev = ice_pf_to_dev(vf->pf);
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- enum ice_status status;
u8 broadcast[ETH_ALEN];
+ int status;
if (ice_is_eswitch_mode_switchdev(vf->pf))
return 0;
@@ -977,9 +857,9 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
eth_broadcast_addr(broadcast);
status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
if (status) {
- dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n",
- vf->vf_id, ice_stat_str(status));
- return ice_status_to_errno(status);
+ dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
+ vf->vf_id, status);
+ return status;
}
vf->num_mac++;
@@ -988,10 +868,10 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI);
if (status) {
- dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
+ dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
&vf->hw_lan_addr.addr[0], vf->vf_id,
- ice_stat_str(status));
- return ice_status_to_errno(status);
+ status);
+ return status;
}
vf->num_mac++;
@@ -1341,45 +1221,50 @@ static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
ice_flush(hw);
}
-/**
- * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
- * @vf: pointer to the VF info
- * @vsi: the VSI being configured
- * @promisc_m: mask of promiscuous config bits
- * @rm_promisc: promisc flag request from the VF to remove or add filter
- *
- * This function configures VF VSI promiscuous mode, based on the VF requests,
- * for Unicast, Multicast and VLAN
- */
-static enum ice_status
-ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
- bool rm_promisc)
+static int
+ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
{
- struct ice_pf *pf = vf->pf;
- enum ice_status status = 0;
- struct ice_hw *hw;
+ struct ice_hw *hw = &vsi->back->hw;
+ int status;
- hw = &pf->hw;
- if (vsi->num_vlan) {
- status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
- rm_promisc);
- } else if (vf->port_vlan_info) {
- if (rm_promisc)
- status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
- vf->port_vlan_info);
- else
- status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
- vf->port_vlan_info);
- } else {
- if (rm_promisc)
- status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
- 0);
- else
- status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
- 0);
+ if (vf->port_vlan_info)
+ status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
+ vf->port_vlan_info & VLAN_VID_MASK);
+ else if (vsi->num_vlan > 1)
+ status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
+ else
+ status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
+
+ if (status && status != -EEXIST) {
+ dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
+ vf->vf_id, status);
+ return status;
+ }
+
+ return 0;
+}
+
+static int
+ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ int status;
+
+ if (vf->port_vlan_info)
+ status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+ vf->port_vlan_info & VLAN_VID_MASK);
+ else if (vsi->num_vlan > 1)
+ status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
+ else
+ status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
+
+ if (status && status != -ENOENT) {
+ dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
+ vf->vf_id, status);
+ return status;
}
- return status;
+ return 0;
}
static void ice_vf_clear_counters(struct ice_vf *vf)
@@ -1415,8 +1300,8 @@ static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
+ int status;
if (!vsi->agg_node)
return;
@@ -1743,10 +1628,12 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
else
promisc_m = ICE_UCAST_PROMISC_BITS;
- if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
+ if (ice_vf_clear_vsi_promisc(vf, vsi, promisc_m))
dev_err(dev, "disabling promiscuous mode failed\n");
}
+ ice_eswitch_del_vf_mac_rule(vf);
+
ice_vf_fdir_exit(vf);
ice_vf_fdir_init(vf);
/* clean VF control VSI when resetting VF since it should be setup
@@ -1765,6 +1652,7 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
ice_vf_post_vsi_rebuild(vf);
vsi = ice_get_vf_vsi(vf);
ice_eswitch_update_repr(vsi);
+ ice_eswitch_replay_vf_mac_rule(vf);
/* if the VF has been reset allow it to come up again */
if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id))
@@ -1846,7 +1734,6 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf)
{
struct ice_pf *pf = vf->pf;
u8 broadcast[ETH_ALEN];
- enum ice_status status;
struct ice_vsi *vsi;
struct device *dev;
int err;
@@ -1866,11 +1753,10 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf)
}
eth_broadcast_addr(broadcast);
- status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
- if (status) {
- dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
- vf->vf_id, ice_stat_str(status));
- err = ice_status_to_errno(status);
+ err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+ if (err) {
+ dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n",
+ vf->vf_id, err);
goto release_vsi;
}
@@ -2116,7 +2002,6 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
struct device *dev = ice_pf_to_dev(pf);
- enum ice_status status;
int err;
err = ice_check_sriov_allowed(pf);
@@ -2136,9 +2021,9 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
return -EBUSY;
}
- status = ice_mbx_init_snapshot(&pf->hw, num_vfs);
- if (status)
- return ice_status_to_errno(status);
+ err = ice_mbx_init_snapshot(&pf->hw, num_vfs);
+ if (err)
+ return err;
err = ice_pci_sriov_ena(pf, num_vfs);
if (err) {
@@ -2272,9 +2157,9 @@ int
ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
- enum ice_status aq_ret;
struct device *dev;
struct ice_pf *pf;
+ int aq_ret;
if (!vf)
return -EINVAL;
@@ -2306,8 +2191,8 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
- dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
- vf->vf_id, ice_stat_str(aq_ret),
+ dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n",
+ vf->vf_id, aq_ret,
ice_aq_str(pf->hw.mailboxq.sq_last_status));
return -EIO;
}
@@ -2556,6 +2441,100 @@ static bool ice_vc_isvalid_ring_len(u16 ring_len)
}
/**
+ * ice_vc_validate_pattern
+ * @vf: pointer to the VF info
+ * @proto: virtchnl protocol headers
+ *
+ * validate the pattern is supported or not.
+ *
+ * Return: true on success, false on error.
+ */
+bool
+ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
+{
+ bool is_ipv4 = false;
+ bool is_ipv6 = false;
+ bool is_udp = false;
+ u16 ptype = -1;
+ int i = 0;
+
+ while (i < proto->count &&
+ proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
+ switch (proto->proto_hdr[i].type) {
+ case VIRTCHNL_PROTO_HDR_ETH:
+ ptype = ICE_PTYPE_MAC_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV4:
+ ptype = ICE_PTYPE_IPV4_PAY;
+ is_ipv4 = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV6:
+ ptype = ICE_PTYPE_IPV6_PAY;
+ is_ipv6 = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_UDP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_UDP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_UDP_PAY;
+ is_udp = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_TCP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_TCP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_TCP_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_SCTP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_SCTP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_SCTP_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_GTPU_IP:
+ case VIRTCHNL_PROTO_HDR_GTPU_EH:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_GTPU;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_GTPU;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_L2TPV3:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_L2TPV3;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_L2TPV3;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_ESP:
+ if (is_ipv4)
+ ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
+ ICE_MAC_IPV4_ESP;
+ else if (is_ipv6)
+ ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
+ ICE_MAC_IPV6_ESP;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_AH:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_AH;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_AH;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_PFCP:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_PFCP_SESSION;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_PFCP_SESSION;
+ goto out;
+ default:
+ break;
+ }
+ i++;
+ }
+
+out:
+ return ice_hw_ptype_ena(&vf->pf->hw, ptype);
+}
+
+/**
* ice_vc_parse_rss_cfg - parses hash fields and headers from
* a specific virtchnl RSS cfg
* @hw: pointer to the hardware
@@ -2578,18 +2557,10 @@ ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
const struct ice_vc_hdr_match_type *hdr_list;
int i, hf_list_len, hdr_list_len;
- if (!strncmp(hw->active_pkg_name, "ICE COMMS Package",
- sizeof(hw->active_pkg_name))) {
- hf_list = ice_vc_hash_field_list_comms;
- hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_comms);
- hdr_list = ice_vc_hdr_list_comms;
- hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_comms);
- } else {
- hf_list = ice_vc_hash_field_list_os;
- hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_os);
- hdr_list = ice_vc_hdr_list_os;
- hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_os);
- }
+ hf_list = ice_vc_hash_field_list;
+ hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
+ hdr_list = ice_vc_hdr_list;
+ hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
struct virtchnl_proto_hdr *proto_hdr =
@@ -2691,10 +2662,15 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
goto error_param;
}
+ if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
struct ice_vsi_ctx *ctx;
- enum ice_status status;
u8 lut_type, hash_type;
+ int status;
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
@@ -2723,9 +2699,8 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
if (status) {
- dev_err(dev, "update VSI for RSS failed, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
} else {
vsi->info.q_opt_rss = ctx->info.q_opt_rss;
@@ -2750,19 +2725,18 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
vsi->vsi_num, v_ret);
}
} else {
- enum ice_status status;
+ int status;
status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
addl_hdrs);
- /* We just ignore ICE_ERR_DOES_NOT_EXIST, because
- * if two configurations share the same profile remove
- * one of them actually removes both, since the
- * profile is deleted.
+ /* We just ignore -ENOENT, because if two configurations
+ * share the same profile remove one of them actually
+ * removes both, since the profile is deleted.
*/
- if (status && status != ICE_ERR_DOES_NOT_EXIST) {
+ if (status && status != -ENOENT) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%s\n",
- vf->vf_id, ice_stat_str(status));
+ dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
+ vf->vf_id, status);
}
}
}
@@ -2920,7 +2894,6 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
struct ice_pf *pf = np->vsi->back;
struct ice_vsi_ctx *ctx;
struct ice_vsi *vf_vsi;
- enum ice_status status;
struct device *dev;
struct ice_vf *vf;
int ret;
@@ -2970,12 +2943,10 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
}
- status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
- if (status) {
- dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
- ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
- ice_stat_str(status));
- ret = -EIO;
+ ret = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
+ if (ret) {
+ dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d\n",
+ ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, ret);
goto out;
}
@@ -3021,10 +2992,10 @@ bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- enum ice_status mcast_status = 0, ucast_status = 0;
bool rm_promisc, alluni = false, allmulti = false;
struct virtchnl_promisc_info *info =
(struct virtchnl_promisc_info *)msg;
+ int mcast_err = 0, ucast_err = 0;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
struct device *dev;
@@ -3106,24 +3077,21 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
ucast_m = ICE_UCAST_PROMISC_BITS;
}
- ucast_status = ice_vf_set_vsi_promisc(vf, vsi, ucast_m,
- !alluni);
- if (ucast_status) {
- dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed\n",
- alluni ? "en" : "dis", vf->vf_id);
- v_ret = ice_err_to_virt_err(ucast_status);
- }
+ if (alluni)
+ ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m);
+ else
+ ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
- mcast_status = ice_vf_set_vsi_promisc(vf, vsi, mcast_m,
- !allmulti);
- if (mcast_status) {
- dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed\n",
- allmulti ? "en" : "dis", vf->vf_id);
- v_ret = ice_err_to_virt_err(mcast_status);
- }
+ if (allmulti)
+ mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
+ else
+ mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
+
+ if (ucast_err || mcast_err)
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
}
- if (!mcast_status) {
+ if (!mcast_err) {
if (allmulti &&
!test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
dev_info(dev, "VF %u successfully set multicast promiscuous mode\n",
@@ -3133,7 +3101,7 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
vf->vf_id);
}
- if (!ucast_status) {
+ if (!ucast_err) {
if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
dev_info(dev, "VF %u successfully set unicast promiscuous mode\n",
vf->vf_id);
@@ -3813,8 +3781,7 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
{
struct device *dev = ice_pf_to_dev(vf->pf);
u8 *mac_addr = vc_ether_addr->addr;
- enum ice_status status;
- int ret = 0;
+ int ret;
/* device MAC already added */
if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr))
@@ -3825,18 +3792,17 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
return -EPERM;
}
- status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
- if (status == ICE_ERR_ALREADY_EXISTS) {
+ ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
+ if (ret == -EEXIST) {
dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
vf->vf_id);
/* don't return since we might need to update
* the primary MAC in ice_vfhw_mac_add() below
*/
- ret = -EEXIST;
- } else if (status) {
- dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
- mac_addr, vf->vf_id, ice_stat_str(status));
- return -EIO;
+ } else if (ret) {
+ dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
+ mac_addr, vf->vf_id, ret);
+ return ret;
} else {
vf->num_mac++;
}
@@ -3913,20 +3879,20 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
{
struct device *dev = ice_pf_to_dev(vf->pf);
u8 *mac_addr = vc_ether_addr->addr;
- enum ice_status status;
+ int status;
if (!ice_can_vf_change_mac(vf) &&
ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
return 0;
status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
- if (status == ICE_ERR_DOES_NOT_EXIST) {
+ if (status == -ENOENT) {
dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
vf->vf_id);
return -ENOENT;
} else if (status) {
- dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
- mac_addr, vf->vf_id, ice_stat_str(status));
+ dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
+ mac_addr, vf->vf_id, status);
return -EIO;
}
@@ -4533,6 +4499,7 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
for (i = 0; i < al->num_elements; i++) {
u8 *mac_addr = al->list[i].addr;
+ int result;
if (!is_unicast_ether_addr(mac_addr) ||
ether_addr_equal(mac_addr, vf->hw_lan_addr.addr))
@@ -4544,6 +4511,13 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
goto handle_mac_exit;
}
+ result = ice_eswitch_add_vf_mac_rule(pf, vf, mac_addr);
+ if (result) {
+ dev_err(ice_pf_to_dev(pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
+ mac_addr, vf->vf_id, result);
+ goto handle_mac_exit;
+ }
+
ice_vfhw_mac_add(vf, &al->list[i]);
vf->num_mac++;
break;
@@ -5289,9 +5263,9 @@ ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
s16 vf_id = le16_to_cpu(event->desc.retval);
struct device *dev = ice_pf_to_dev(pf);
struct ice_mbx_data mbxdata;
- enum ice_status status;
bool malvf = false;
struct ice_vf *vf;
+ int status;
if (ice_validate_vf_id(pf, vf_id))
return false;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 7e28ecbbe7af..752487a1bdd6 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -203,6 +203,8 @@ void
ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_print_vfs_mdd_events(struct ice_pf *pf);
void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
+bool
+ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
int
ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index c895351b25e0..2388837d6d6c 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -481,7 +481,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
goto out_failure;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 9265901455cd..b9b9d35494d2 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -792,7 +792,6 @@ s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
**/
s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
{
- s32 ret_val = 0;
struct e1000_nvm_info *nvm = &hw->nvm;
nvm->ops.acquire = igb_acquire_nvm_i210;
@@ -813,7 +812,7 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
nvm->ops.validate = NULL;
nvm->ops.update = NULL;
}
- return ret_val;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index fb1029352c3e..51a2dcaf553d 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -864,7 +864,9 @@ static void igb_get_drvinfo(struct net_device *netdev,
}
static void igb_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -875,7 +877,9 @@ static void igb_get_ringparam(struct net_device *netdev,
}
static int igb_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct igb_ring *temp_ring;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 446894dde182..38ba92022cd4 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2927,7 +2927,7 @@ static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
/* Avoid transmit queue timeout since we share it with the slow path */
- nq->trans_start = jiffies;
+ txq_trans_cond_update(nq);
ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
__netif_tx_unlock(nq);
@@ -2961,7 +2961,7 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
__netif_tx_lock(nq, cpu);
/* Avoid transmit queue timeout since we share it with the slow path */
- nq->trans_start = jiffies;
+ txq_trans_cond_update(nq);
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
@@ -6739,12 +6739,119 @@ void igb_update_stats(struct igb_adapter *adapter)
}
}
-static void igb_tsync_interrupt(struct igb_adapter *adapter)
+static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
{
+ int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_PEROUT, tsintr_tt);
+ struct e1000_hw *hw = &adapter->hw;
+ struct timespec64 ts;
+ u32 tsauxc;
+
+ if (pin < 0 || pin >= IGB_N_PEROUT)
+ return;
+
+ spin_lock(&adapter->tmreg_lock);
+
+ if (hw->mac.type == e1000_82580 ||
+ hw->mac.type == e1000_i354 ||
+ hw->mac.type == e1000_i350) {
+ s64 ns = timespec64_to_ns(&adapter->perout[pin].period);
+ u32 systiml, systimh, level_mask, level, rem;
+ u64 systim, now;
+
+ /* read systim registers in sequence */
+ rd32(E1000_SYSTIMR);
+ systiml = rd32(E1000_SYSTIML);
+ systimh = rd32(E1000_SYSTIMH);
+ systim = (((u64)(systimh & 0xFF)) << 32) | ((u64)systiml);
+ now = timecounter_cyc2time(&adapter->tc, systim);
+
+ if (pin < 2) {
+ level_mask = (tsintr_tt == 1) ? 0x80000 : 0x40000;
+ level = (rd32(E1000_CTRL) & level_mask) ? 1 : 0;
+ } else {
+ level_mask = (tsintr_tt == 1) ? 0x80 : 0x40;
+ level = (rd32(E1000_CTRL_EXT) & level_mask) ? 1 : 0;
+ }
+
+ div_u64_rem(now, ns, &rem);
+ systim = systim + (ns - rem);
+
+ /* synchronize pin level with rising/falling edges */
+ div_u64_rem(now, ns << 1, &rem);
+ if (rem < ns) {
+ /* first half of period */
+ if (level == 0) {
+ /* output is already low, skip this period */
+ systim += ns;
+ pr_notice("igb: periodic output on %s missed falling edge\n",
+ adapter->sdp_config[pin].name);
+ }
+ } else {
+ /* second half of period */
+ if (level == 1) {
+ /* output is already high, skip this period */
+ systim += ns;
+ pr_notice("igb: periodic output on %s missed rising edge\n",
+ adapter->sdp_config[pin].name);
+ }
+ }
+
+ /* for this chip family tv_sec is the upper part of the binary value,
+ * so not seconds
+ */
+ ts.tv_nsec = (u32)systim;
+ ts.tv_sec = ((u32)(systim >> 32)) & 0xFF;
+ } else {
+ ts = timespec64_add(adapter->perout[pin].start,
+ adapter->perout[pin].period);
+ }
+
+ /* u32 conversion of tv_sec is safe until y2106 */
+ wr32((tsintr_tt == 1) ? E1000_TRGTTIML1 : E1000_TRGTTIML0, ts.tv_nsec);
+ wr32((tsintr_tt == 1) ? E1000_TRGTTIMH1 : E1000_TRGTTIMH0, (u32)ts.tv_sec);
+ tsauxc = rd32(E1000_TSAUXC);
+ tsauxc |= TSAUXC_EN_TT0;
+ wr32(E1000_TSAUXC, tsauxc);
+ adapter->perout[pin].start = ts;
+
+ spin_unlock(&adapter->tmreg_lock);
+}
+
+static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
+{
+ int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_EXTTS, tsintr_tt);
+ int auxstmpl = (tsintr_tt == 1) ? E1000_AUXSTMPL1 : E1000_AUXSTMPL0;
+ int auxstmph = (tsintr_tt == 1) ? E1000_AUXSTMPH1 : E1000_AUXSTMPH0;
struct e1000_hw *hw = &adapter->hw;
struct ptp_clock_event event;
struct timespec64 ts;
- u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
+
+ if (pin < 0 || pin >= IGB_N_EXTTS)
+ return;
+
+ if (hw->mac.type == e1000_82580 ||
+ hw->mac.type == e1000_i354 ||
+ hw->mac.type == e1000_i350) {
+ s64 ns = rd32(auxstmpl);
+
+ ns += ((s64)(rd32(auxstmph) & 0xFF)) << 32;
+ ts = ns_to_timespec64(ns);
+ } else {
+ ts.tv_nsec = rd32(auxstmpl);
+ ts.tv_sec = rd32(auxstmph);
+ }
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = tsintr_tt;
+ event.timestamp = ts.tv_sec * 1000000000ULL + ts.tv_nsec;
+ ptp_clock_event(adapter->ptp_clock, &event);
+}
+
+static void igb_tsync_interrupt(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ack = 0, tsicr = rd32(E1000_TSICR);
+ struct ptp_clock_event event;
if (tsicr & TSINTR_SYS_WRAP) {
event.type = PTP_CLOCK_PPS;
@@ -6760,51 +6867,22 @@ static void igb_tsync_interrupt(struct igb_adapter *adapter)
}
if (tsicr & TSINTR_TT0) {
- spin_lock(&adapter->tmreg_lock);
- ts = timespec64_add(adapter->perout[0].start,
- adapter->perout[0].period);
- /* u32 conversion of tv_sec is safe until y2106 */
- wr32(E1000_TRGTTIML0, ts.tv_nsec);
- wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
- tsauxc = rd32(E1000_TSAUXC);
- tsauxc |= TSAUXC_EN_TT0;
- wr32(E1000_TSAUXC, tsauxc);
- adapter->perout[0].start = ts;
- spin_unlock(&adapter->tmreg_lock);
+ igb_perout(adapter, 0);
ack |= TSINTR_TT0;
}
if (tsicr & TSINTR_TT1) {
- spin_lock(&adapter->tmreg_lock);
- ts = timespec64_add(adapter->perout[1].start,
- adapter->perout[1].period);
- wr32(E1000_TRGTTIML1, ts.tv_nsec);
- wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
- tsauxc = rd32(E1000_TSAUXC);
- tsauxc |= TSAUXC_EN_TT1;
- wr32(E1000_TSAUXC, tsauxc);
- adapter->perout[1].start = ts;
- spin_unlock(&adapter->tmreg_lock);
+ igb_perout(adapter, 1);
ack |= TSINTR_TT1;
}
if (tsicr & TSINTR_AUTT0) {
- nsec = rd32(E1000_AUXSTMPL0);
- sec = rd32(E1000_AUXSTMPH0);
- event.type = PTP_CLOCK_EXTTS;
- event.index = 0;
- event.timestamp = sec * 1000000000ULL + nsec;
- ptp_clock_event(adapter->ptp_clock, &event);
+ igb_extts(adapter, 0);
ack |= TSINTR_AUTT0;
}
if (tsicr & TSINTR_AUTT1) {
- nsec = rd32(E1000_AUXSTMPL1);
- sec = rd32(E1000_AUXSTMPH1);
- event.type = PTP_CLOCK_EXTTS;
- event.index = 1;
- event.timestamp = sec * 1000000000ULL + nsec;
- ptp_clock_event(adapter->ptp_clock, &event);
+ igb_extts(adapter, 1);
ack |= TSINTR_AUTT1;
}
@@ -8367,7 +8445,7 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = build_skb(xdp->data_hard_start, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
@@ -8422,7 +8500,7 @@ static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
result = IGB_XDP_REDIR;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(adapter->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 0011b15e678c..6580fcddb4be 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -69,6 +69,7 @@
#define IGB_NBITS_82580 40
static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
+static void igb_ptp_sdp_init(struct igb_adapter *adapter);
/* SYSTIM read access for the 82576 */
static u64 igb_ptp_read_82576(const struct cyclecounter *cc)
@@ -507,6 +508,158 @@ static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin, int freq)
wr32(E1000_CTRL_EXT, ctrl_ext);
}
+static int igb_ptp_feature_enable_82580(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct igb_adapter *igb =
+ container_of(ptp, struct igb_adapter, ptp_caps);
+ u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, systiml,
+ systimh, level_mask, level, rem;
+ struct e1000_hw *hw = &igb->hw;
+ struct timespec64 ts, start;
+ unsigned long flags;
+ u64 systim, now;
+ int pin = -1;
+ s64 ns;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ if (on) {
+ pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS,
+ rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+ }
+ if (rq->extts.index == 1) {
+ tsauxc_mask = TSAUXC_EN_TS1;
+ tsim_mask = TSINTR_AUTT1;
+ } else {
+ tsauxc_mask = TSAUXC_EN_TS0;
+ tsim_mask = TSINTR_AUTT0;
+ }
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+ tsauxc = rd32(E1000_TSAUXC);
+ tsim = rd32(E1000_TSIM);
+ if (on) {
+ igb_pin_extts(igb, rq->extts.index, pin);
+ tsauxc |= tsauxc_mask;
+ tsim |= tsim_mask;
+ } else {
+ tsauxc &= ~tsauxc_mask;
+ tsim &= ~tsim_mask;
+ }
+ wr32(E1000_TSAUXC, tsauxc);
+ wr32(E1000_TSIM, tsim);
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+ return 0;
+
+ case PTP_CLK_REQ_PEROUT:
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
+ if (on) {
+ pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT,
+ rq->perout.index);
+ if (pin < 0)
+ return -EBUSY;
+ }
+ ts.tv_sec = rq->perout.period.sec;
+ ts.tv_nsec = rq->perout.period.nsec;
+ ns = timespec64_to_ns(&ts);
+ ns = ns >> 1;
+ if (on && ns < 8LL)
+ return -EINVAL;
+ ts = ns_to_timespec64(ns);
+ if (rq->perout.index == 1) {
+ tsauxc_mask = TSAUXC_EN_TT1;
+ tsim_mask = TSINTR_TT1;
+ trgttiml = E1000_TRGTTIML1;
+ trgttimh = E1000_TRGTTIMH1;
+ } else {
+ tsauxc_mask = TSAUXC_EN_TT0;
+ tsim_mask = TSINTR_TT0;
+ trgttiml = E1000_TRGTTIML0;
+ trgttimh = E1000_TRGTTIMH0;
+ }
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+ tsauxc = rd32(E1000_TSAUXC);
+ tsim = rd32(E1000_TSIM);
+ if (rq->perout.index == 1) {
+ tsauxc &= ~(TSAUXC_EN_TT1 | TSAUXC_EN_CLK1 | TSAUXC_ST1);
+ tsim &= ~TSINTR_TT1;
+ } else {
+ tsauxc &= ~(TSAUXC_EN_TT0 | TSAUXC_EN_CLK0 | TSAUXC_ST0);
+ tsim &= ~TSINTR_TT0;
+ }
+ if (on) {
+ int i = rq->perout.index;
+
+ /* read systim registers in sequence */
+ rd32(E1000_SYSTIMR);
+ systiml = rd32(E1000_SYSTIML);
+ systimh = rd32(E1000_SYSTIMH);
+ systim = (((u64)(systimh & 0xFF)) << 32) | ((u64)systiml);
+ now = timecounter_cyc2time(&igb->tc, systim);
+
+ if (pin < 2) {
+ level_mask = (i == 1) ? 0x80000 : 0x40000;
+ level = (rd32(E1000_CTRL) & level_mask) ? 1 : 0;
+ } else {
+ level_mask = (i == 1) ? 0x80 : 0x40;
+ level = (rd32(E1000_CTRL_EXT) & level_mask) ? 1 : 0;
+ }
+
+ div_u64_rem(now, ns, &rem);
+ systim = systim + (ns - rem);
+
+ /* synchronize pin level with rising/falling edges */
+ div_u64_rem(now, ns << 1, &rem);
+ if (rem < ns) {
+ /* first half of period */
+ if (level == 0) {
+ /* output is already low, skip this period */
+ systim += ns;
+ }
+ } else {
+ /* second half of period */
+ if (level == 1) {
+ /* output is already high, skip this period */
+ systim += ns;
+ }
+ }
+
+ start = ns_to_timespec64(systim + (ns - rem));
+ igb_pin_perout(igb, i, pin, 0);
+ igb->perout[i].start.tv_sec = start.tv_sec;
+ igb->perout[i].start.tv_nsec = start.tv_nsec;
+ igb->perout[i].period.tv_sec = ts.tv_sec;
+ igb->perout[i].period.tv_nsec = ts.tv_nsec;
+
+ wr32(trgttiml, (u32)systim);
+ wr32(trgttimh, ((u32)(systim >> 32)) & 0xFF);
+ tsauxc |= tsauxc_mask;
+ tsim |= tsim_mask;
+ }
+ wr32(E1000_TSAUXC, tsauxc);
+ wr32(E1000_TSIM, tsim);
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+ return 0;
+
+ case PTP_CLK_REQ_PPS:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
@@ -1015,10 +1168,6 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
bool is_l2 = false;
u32 regval;
- /* reserved for future extensions */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
@@ -1192,7 +1341,6 @@ void igb_ptp_init(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- int i;
switch (hw->mac.type) {
case e1000_82576:
@@ -1215,16 +1363,21 @@ void igb_ptp_init(struct igb_adapter *adapter)
case e1000_82580:
case e1000_i354:
case e1000_i350:
+ igb_ptp_sdp_init(adapter);
snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 62499999;
- adapter->ptp_caps.n_ext_ts = 0;
+ adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS;
+ adapter->ptp_caps.n_per_out = IGB_N_PEROUT;
+ adapter->ptp_caps.n_pins = IGB_N_SDP;
adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82580;
adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
- adapter->ptp_caps.enable = igb_ptp_feature_enable;
+ adapter->ptp_caps.enable = igb_ptp_feature_enable_82580;
+ adapter->ptp_caps.verify = igb_ptp_verify_pin;
adapter->cc.read = igb_ptp_read_82580;
adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580);
adapter->cc.mult = 1;
@@ -1233,13 +1386,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
break;
case e1000_i210:
case e1000_i211:
- for (i = 0; i < IGB_N_SDP; i++) {
- struct ptp_pin_desc *ppd = &adapter->sdp_config[i];
-
- snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i);
- ppd->index = i;
- ppd->func = PTP_PF_NONE;
- }
+ igb_ptp_sdp_init(adapter);
snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 62499999;
@@ -1285,6 +1432,23 @@ void igb_ptp_init(struct igb_adapter *adapter)
}
/**
+ * igb_ptp_sdp_init - utility function which inits the SDP config structs
+ * @adapter: Board private structure.
+ **/
+void igb_ptp_sdp_init(struct igb_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < IGB_N_SDP; i++) {
+ struct ptp_pin_desc *ppd = &adapter->sdp_config[i];
+
+ snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i);
+ ppd->index = i;
+ ppd->func = PTP_PF_NONE;
+ }
+}
+
+/**
* igb_ptp_suspend - Disable PTP work items and prepare for suspend
* @adapter: Board private structure
*
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 06e5bd646a0e..9d4322b74163 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -175,7 +175,9 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
}
static void igbvf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct igbvf_ring *tx_ring = adapter->tx_ring;
@@ -188,7 +190,9 @@ static void igbvf_get_ringparam(struct net_device *netdev,
}
static int igbvf_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct igbvf_ring *temp_ring;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 4d988da68394..b78407289741 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1520,7 +1520,7 @@ static void igbvf_reset(struct igbvf_adapter *adapter)
/* Allow time for pending master requests to run */
if (mac->ops.reset_hw(hw))
- dev_warn(&adapter->pdev->dev, "PF still resetting\n");
+ dev_info(&adapter->pdev->dev, "PF still resetting\n");
mac->ops.init_hw(hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index c7fe61509d5b..5c66b97c0cfa 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -85,9 +85,6 @@
#define IGC_WUFC_EXT_FILTER_MASK GENMASK(31, 8)
-/* Physical Func Reset Done Indication */
-#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000
-
/* Loop limit on how long we wait for auto-negotiation to complete */
#define COPPER_LINK_UP_LIMIT 10
#define PHY_AUTO_NEG_LIMIT 45
@@ -584,7 +581,6 @@
#define IGC_GEN_POLL_TIMEOUT 1920
/* PHY Control Register */
-#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
#define MII_CR_POWER_DOWN 0x0800 /* Power down */
#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
@@ -605,9 +601,6 @@
#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
-/* Bit definitions for valid PHY IDs. I = Integrated E = External */
-#define I225_I_PHY_ID 0x67C9DC00
-
/* MDI Control */
#define IGC_MDIC_DATA_MASK 0x0000FFFF
#define IGC_MDIC_REG_MASK 0x001F0000
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index e0a76ac1bbbc..8cc077b712ad 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -567,8 +567,11 @@ static int igc_ethtool_set_eeprom(struct net_device *netdev,
return ret_val;
}
-static void igc_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+static void
+igc_ethtool_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -578,8 +581,11 @@ static void igc_ethtool_get_ringparam(struct net_device *netdev,
ring->tx_pending = adapter->tx_ring_count;
}
-static int igc_ethtool_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+static int
+igc_ethtool_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_ring *temp_ring;
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 587db7483f25..b1e72ec5f131 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -55,7 +55,6 @@ enum igc_mac_type {
enum igc_phy_type {
igc_phy_unknown = 0,
- igc_phy_none,
igc_phy_i225,
};
@@ -68,8 +67,6 @@ enum igc_media_type {
enum igc_nvm_type {
igc_nvm_unknown = 0,
igc_nvm_eeprom_spi,
- igc_nvm_flash_hw,
- igc_nvm_invm,
};
struct igc_info {
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index b6807e16eea9..66ea566488d1 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -473,13 +473,11 @@ s32 igc_init_nvm_params_i225(struct igc_hw *hw)
/* NVM Function Pointers */
if (igc_get_flash_presence_i225(hw)) {
- hw->nvm.type = igc_nvm_flash_hw;
nvm->ops.read = igc_read_nvm_srrd_i225;
nvm->ops.write = igc_write_nvm_srwr_i225;
nvm->ops.validate = igc_validate_nvm_checksum_i225;
nvm->ops.update = igc_update_nvm_checksum_i225;
} else {
- hw->nvm.type = igc_nvm_invm;
nvm->ops.read = igc_read_nvm_eerd;
nvm->ops.write = NULL;
nvm->ops.validate = NULL;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index d28a80a00953..2f17f36e94fd 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -1718,24 +1718,26 @@ static void igc_add_rx_frag(struct igc_ring *rx_ring,
static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
struct igc_rx_buffer *rx_buffer,
- union igc_adv_rx_desc *rx_desc,
- unsigned int size)
+ struct xdp_buff *xdp)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+ unsigned int size = xdp->data_end - xdp->data;
unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
+ unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
/* prefetch first cache line of first page */
- net_prefetch(va);
+ net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = build_skb(va - IGC_SKB_PAD, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
/* update pointers within the skb to store the data */
- skb_reserve(skb, IGC_SKB_PAD);
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
__skb_put(skb, size);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
igc_rx_buffer_flip(rx_buffer, truesize);
return skb;
@@ -1746,6 +1748,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
struct xdp_buff *xdp,
ktime_t timestamp)
{
+ unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int size = xdp->data_end - xdp->data;
unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
void *va = xdp->data;
@@ -1753,10 +1756,11 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- net_prefetch(va);
+ net_prefetch(xdp->data_meta);
/* allocate a skb to store the frags */
- skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi,
+ IGC_RX_HDR_LEN + metasize);
if (unlikely(!skb))
return NULL;
@@ -1769,7 +1773,13 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+ memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
+ ALIGN(headlen + metasize, sizeof(long)));
+
+ if (metasize) {
+ skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
/* update all of the pointers */
size -= headlen;
@@ -2231,7 +2241,7 @@ static int __igc_xdp_run_prog(struct igc_adapter *adapter,
return IGC_XDP_REDIRECT;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(adapter->netdev, prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
@@ -2354,7 +2364,8 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
if (!skb) {
xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq);
xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
- igc_rx_offset(rx_ring) + pkt_offset, size, false);
+ igc_rx_offset(rx_ring) + pkt_offset,
+ size, true);
skb = igc_xdp_run_prog(adapter, &xdp);
}
@@ -2378,7 +2389,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
} else if (skb)
igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
else if (ring_uses_build_skb(rx_ring))
- skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
+ skb = igc_build_skb(rx_ring, rx_buffer, &xdp);
else
skb = igc_construct_skb(rx_ring, rx_buffer, &xdp,
timestamp);
@@ -2448,8 +2459,10 @@ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize);
- if (metasize)
+ if (metasize) {
skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
return skb;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 4f9245aa79a1..0d6e3215e98f 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -560,10 +560,6 @@ static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter)
static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
struct hwtstamp_config *config)
{
- /* reserved for future extensions */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
igc_ptp_disable_tx_timestamp(adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c
index a8cf5374be47..aeeb34e64610 100644
--- a/drivers/net/ethernet/intel/igc/igc_xdp.c
+++ b/drivers/net/ethernet/intel/igc/igc_xdp.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Intel Corporation. */
+#include <linux/if_vlan.h>
#include <net/xdp_sock_drv.h>
#include "igc.h"
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index 582099a5ad41..46efcfab7234 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -464,7 +464,9 @@ ixgb_get_drvinfo(struct net_device *netdev,
static void
ixgb_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_desc_ring *txdr = &adapter->tx_ring;
@@ -478,7 +480,9 @@ ixgb_get_ringparam(struct net_device *netdev,
static int
ixgb_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_desc_ring *txdr = &adapter->tx_ring;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 8362822316a9..f70967c32116 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1118,7 +1118,9 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
}
static void ixgbe_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
@@ -1131,7 +1133,9 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
}
static int ixgbe_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *temp_ring;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 45e2ec4d264d..89b467006291 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2170,7 +2170,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* build an skb to around the page buffer */
- skb = build_skb(xdp->data_hard_start, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
@@ -2235,7 +2235,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
result = IXGBE_XDP_REDIR;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
@@ -3247,8 +3247,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
/* If Flow Director is enabled, set interrupt affinity */
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
/* assign the mask for this irq */
- irq_set_affinity_hint(entry->vector,
- &q_vector->affinity_mask);
+ irq_update_affinity_hint(entry->vector,
+ &q_vector->affinity_mask);
}
}
@@ -3264,8 +3264,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
free_queue_irqs:
while (vector) {
vector--;
- irq_set_affinity_hint(adapter->msix_entries[vector].vector,
- NULL);
+ irq_update_affinity_hint(adapter->msix_entries[vector].vector,
+ NULL);
free_irq(adapter->msix_entries[vector].vector,
adapter->q_vector[vector]);
}
@@ -3398,7 +3398,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
continue;
/* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(entry->vector, NULL);
+ irq_update_affinity_hint(entry->vector, NULL);
free_irq(entry->vector, q_vector);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 23ddfd79fc8b..336426a67ac1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -992,10 +992,6 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
bool is_l2 = false;
u32 regval;
- /* reserved for future extensions */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
index a82533f21d36..bba3feaf3318 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -35,8 +35,6 @@ int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter,
struct xsk_buff_pool *pool,
u16 qid);
-void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);
-
bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count);
int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index db2bc58dfcfd..b3fd8e5cd85b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -131,7 +131,7 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
goto out_failure;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 6bace746eaac..5f08779c0e4e 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -281,6 +281,10 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ERR_INVALID_MAC_ADDR -1
#define IXGBE_ERR_RESET_FAILED -2
#define IXGBE_ERR_INVALID_ARGUMENT -3
+#define IXGBE_ERR_CONFIG -4
+#define IXGBE_ERR_MBX -5
+#define IXGBE_ERR_TIMEOUT -6
+#define IXGBE_ERR_PARAM -7
/* Transmit Config masks */
#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 8380f905e708..3b41f83c8dff 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -225,7 +225,9 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev,
}
static void ixgbevf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -236,7 +238,9 @@ static void ixgbevf_get_ringparam(struct net_device *netdev,
}
static int ixgbevf_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index e3e4676af9e4..e763cee0695e 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -40,16 +40,16 @@ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
spin_lock_bh(&adapter->mbx_lock);
- ret = hw->mbx.ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
+ ret = ixgbevf_write_mbx(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
if (ret)
goto out;
- ret = hw->mbx.ops.read_posted(hw, msgbuf, 2);
+ ret = ixgbevf_poll_mbx(hw, msgbuf, 2);
if (ret)
goto out;
ret = (int)msgbuf[1];
- if (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK && ret >= 0)
+ if (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE && ret >= 0)
ret = -1;
out:
@@ -77,11 +77,11 @@ static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa)
spin_lock_bh(&adapter->mbx_lock);
- err = hw->mbx.ops.write_posted(hw, msgbuf, 2);
+ err = ixgbevf_write_mbx(hw, msgbuf, 2);
if (err)
goto out;
- err = hw->mbx.ops.read_posted(hw, msgbuf, 2);
+ err = ixgbevf_poll_mbx(hw, msgbuf, 2);
if (err)
goto out;
@@ -623,6 +623,7 @@ void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
switch (adapter->hw.api_version) {
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
break;
default:
return;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index a0e325774819..e257390a4f6a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -430,6 +430,7 @@ extern const struct ixgbevf_info ixgbevf_X540_vf_info;
extern const struct ixgbevf_info ixgbevf_X550_vf_info;
extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
+extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy;
extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info;
extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
@@ -491,4 +492,8 @@ void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
#define hw_dbg(hw, format, arg...) \
netdev_dbg(ixgbevf_hw_to_netdev(hw), format, ## arg)
+
+s32 ixgbevf_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size);
+s32 ixgbevf_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size);
+
#endif /* _IXGBEVF_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index d81811ab4ec4..0015fcf1df2b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -944,7 +944,7 @@ static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = build_skb(xdp->data_hard_start, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
@@ -1070,7 +1070,7 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
goto out_failure;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
@@ -2266,6 +2266,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
static const int api[] = {
+ ixgbe_mbox_api_15,
ixgbe_mbox_api_14,
ixgbe_mbox_api_13,
ixgbe_mbox_api_12,
@@ -2284,6 +2285,12 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
idx++;
}
+ if (hw->api_version >= ixgbe_mbox_api_15) {
+ hw->mbx.ops.init_params(hw);
+ memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
+ sizeof(struct ixgbe_mbx_operations));
+ }
+
spin_unlock_bh(&adapter->mbx_lock);
}
@@ -2627,6 +2634,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
if (adapter->xdp_prog &&
hw->mac.max_tx_queues == rss)
rss = rss > 3 ? 2 : 1;
@@ -4565,7 +4573,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
hw->mac.type = ii->mac;
- memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
+ memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy,
sizeof(struct ixgbe_mbx_operations));
/* setup the private structure */
@@ -4625,6 +4633,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
(ETH_HLEN + ETH_FCS_LEN);
break;
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index 6bc1953263b9..a55dd978f7ca 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -15,16 +15,15 @@ static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw)
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
+ if (!countdown || !mbx->ops.check_for_msg)
+ return IXGBE_ERR_CONFIG;
+
while (countdown && mbx->ops.check_for_msg(hw)) {
countdown--;
udelay(mbx->udelay);
}
- /* if we failed, all future posted messages fail until reset */
- if (!countdown)
- mbx->timeout = 0;
-
- return countdown ? 0 : IXGBE_ERR_MBX;
+ return countdown ? 0 : IXGBE_ERR_TIMEOUT;
}
/**
@@ -38,87 +37,82 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
+ if (!countdown || !mbx->ops.check_for_ack)
+ return IXGBE_ERR_CONFIG;
+
while (countdown && mbx->ops.check_for_ack(hw)) {
countdown--;
udelay(mbx->udelay);
}
- /* if we failed, all future posted messages fail until reset */
- if (!countdown)
- mbx->timeout = 0;
-
- return countdown ? 0 : IXGBE_ERR_MBX;
+ return countdown ? 0 : IXGBE_ERR_TIMEOUT;
}
/**
- * ixgbevf_read_posted_mbx - Wait for message notification and receive message
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
+ * ixgbevf_read_mailbox_vf - read VF's mailbox register
+ * @hw: pointer to the HW structure
*
- * returns 0 if it successfully received a message notification and
- * copied it into the receive buffer.
+ * This function is used to read the mailbox register dedicated for VF without
+ * losing the read to clear status bits.
**/
-static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+static u32 ixgbevf_read_mailbox_vf(struct ixgbe_hw *hw)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_ERR_MBX;
+ u32 vf_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
- if (!mbx->ops.read)
- goto out;
+ vf_mailbox |= hw->mbx.vf_mailbox;
+ hw->mbx.vf_mailbox |= vf_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
- ret_val = ixgbevf_poll_for_msg(hw);
-
- /* if ack received read message, otherwise we timed out */
- if (!ret_val)
- ret_val = mbx->ops.read(hw, msg, size);
-out:
- return ret_val;
+ return vf_mailbox;
}
/**
- * ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
+ * ixgbevf_clear_msg_vf - clear PF status bit
+ * @hw: pointer to the HW structure
*
- * returns 0 if it successfully copied message into the buffer and
- * received an ack to that message within delay * timeout period
+ * This function is used to clear PFSTS bit in the VFMAILBOX register
**/
-static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+static void ixgbevf_clear_msg_vf(struct ixgbe_hw *hw)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_ERR_MBX;
+ u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
- /* exit if either we can't write or there isn't a defined timeout */
- if (!mbx->ops.write || !mbx->timeout)
- goto out;
+ if (vf_mailbox & IXGBE_VFMAILBOX_PFSTS) {
+ hw->mbx.stats.reqs++;
+ hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFSTS;
+ }
+}
- /* send msg */
- ret_val = mbx->ops.write(hw, msg, size);
+/**
+ * ixgbevf_clear_ack_vf - clear PF ACK bit
+ * @hw: pointer to the HW structure
+ *
+ * This function is used to clear PFACK bit in the VFMAILBOX register
+ **/
+static void ixgbevf_clear_ack_vf(struct ixgbe_hw *hw)
+{
+ u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
- /* if msg sent wait until we receive an ack */
- if (!ret_val)
- ret_val = ixgbevf_poll_for_ack(hw);
-out:
- return ret_val;
+ if (vf_mailbox & IXGBE_VFMAILBOX_PFACK) {
+ hw->mbx.stats.acks++;
+ hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFACK;
+ }
}
/**
- * ixgbevf_read_v2p_mailbox - read v2p mailbox
- * @hw: pointer to the HW structure
+ * ixgbevf_clear_rst_vf - clear PF reset bit
+ * @hw: pointer to the HW structure
*
- * This function is used to read the v2p mailbox without losing the read to
- * clear status bits.
+ * This function is used to clear reset indication and reset done bit in
+ * VFMAILBOX register after reset the shared resources and the reset sequence.
**/
-static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw)
+static void ixgbevf_clear_rst_vf(struct ixgbe_hw *hw)
{
- u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
-
- v2p_mailbox |= hw->mbx.v2p_mailbox;
- hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
+ u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
- return v2p_mailbox;
+ if (vf_mailbox & (IXGBE_VFMAILBOX_RSTI | IXGBE_VFMAILBOX_RSTD)) {
+ hw->mbx.stats.rsts++;
+ hw->mbx.vf_mailbox &= ~(IXGBE_VFMAILBOX_RSTI |
+ IXGBE_VFMAILBOX_RSTD);
+ }
}
/**
@@ -131,14 +125,12 @@ static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw)
**/
static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
{
- u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw);
+ u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
s32 ret_val = IXGBE_ERR_MBX;
- if (v2p_mailbox & mask)
+ if (vf_mailbox & mask)
ret_val = 0;
- hw->mbx.v2p_mailbox &= ~mask;
-
return ret_val;
}
@@ -172,6 +164,7 @@ static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw)
if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
ret_val = 0;
+ ixgbevf_clear_ack_vf(hw);
hw->mbx.stats.acks++;
}
@@ -191,6 +184,7 @@ static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
IXGBE_VFMAILBOX_RSTI))) {
ret_val = 0;
+ ixgbevf_clear_rst_vf(hw);
hw->mbx.stats.rsts++;
}
@@ -205,19 +199,59 @@ static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
**/
static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
{
- s32 ret_val = IXGBE_ERR_MBX;
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+ int countdown = mbx->timeout;
+ u32 vf_mailbox;
- /* Take ownership of the buffer */
- IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
+ if (!mbx->timeout)
+ return ret_val;
- /* reserve mailbox for VF use */
- if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
- ret_val = 0;
+ while (countdown--) {
+ /* Reserve mailbox for VF use */
+ vf_mailbox = ixgbevf_read_mailbox_vf(hw);
+ vf_mailbox |= IXGBE_VFMAILBOX_VFU;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+
+ /* Verify that VF is the owner of the lock */
+ if (ixgbevf_read_mailbox_vf(hw) & IXGBE_VFMAILBOX_VFU) {
+ ret_val = 0;
+ break;
+ }
+
+ /* Wait a bit before trying again */
+ udelay(mbx->udelay);
+ }
+
+ if (ret_val)
+ ret_val = IXGBE_ERR_TIMEOUT;
return ret_val;
}
/**
+ * ixgbevf_release_mbx_lock_vf - release mailbox lock
+ * @hw: pointer to the HW structure
+ **/
+static void ixgbevf_release_mbx_lock_vf(struct ixgbe_hw *hw)
+{
+ u32 vf_mailbox;
+
+ /* Return ownership of the buffer */
+ vf_mailbox = ixgbevf_read_mailbox_vf(hw);
+ vf_mailbox &= ~IXGBE_VFMAILBOX_VFU;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+}
+
+/**
+ * ixgbevf_release_mbx_lock_vf_legacy - release mailbox lock
+ * @hw: pointer to the HW structure
+ **/
+static void ixgbevf_release_mbx_lock_vf_legacy(struct ixgbe_hw *__always_unused hw)
+{
+}
+
+/**
* ixgbevf_write_mbx_vf - Write a message to the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
@@ -227,6 +261,50 @@ static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
**/
static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
+ u32 vf_mailbox;
+ s32 ret_val;
+ u16 i;
+
+ /* lock the mailbox to prevent PF/VF race condition */
+ ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbevf_clear_msg_vf(hw);
+ ixgbevf_clear_ack_vf(hw);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ /* interrupt the PF to tell it a message has been sent */
+ vf_mailbox = ixgbevf_read_mailbox_vf(hw);
+ vf_mailbox |= IXGBE_VFMAILBOX_REQ;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+
+ /* if msg sent wait until we receive an ack */
+ ret_val = ixgbevf_poll_for_ack(hw);
+
+out_no_write:
+ hw->mbx.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_write_mbx_vf_legacy - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully copied message into the buffer
+ **/
+static s32 ixgbevf_write_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
s32 ret_val;
u16 i;
@@ -237,7 +315,9 @@ static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
/* flush msg and acks as we are overwriting the message buffer */
ixgbevf_check_for_msg_vf(hw);
+ ixgbevf_clear_msg_vf(hw);
ixgbevf_check_for_ack_vf(hw);
+ ixgbevf_clear_ack_vf(hw);
/* copy the caller specified message to the mailbox memory buffer */
for (i = 0; i < size; i++)
@@ -263,6 +343,42 @@ out_no_write:
**/
static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
+ u32 vf_mailbox;
+ s32 ret_val;
+ u16 i;
+
+ /* check if there is a message from PF */
+ ret_val = ixgbevf_check_for_msg_vf(hw);
+ if (ret_val)
+ return ret_val;
+
+ ixgbevf_clear_msg_vf(hw);
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+ /* Acknowledge receipt */
+ vf_mailbox = ixgbevf_read_mailbox_vf(hw);
+ vf_mailbox |= IXGBE_VFMAILBOX_ACK;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_read_mbx_vf_legacy - Reads a message from the inbox intended for VF
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully read message from buffer
+ **/
+static s32 ixgbevf_read_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
s32 ret_val = 0;
u16 i;
@@ -298,7 +414,7 @@ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
/* start mailbox as timed out and let the reset_hw call set the timeout
* value to begin communications
*/
- mbx->timeout = 0;
+ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
mbx->udelay = IXGBE_VF_MBX_INIT_DELAY;
mbx->size = IXGBE_VFMAILBOX_SIZE;
@@ -312,12 +428,79 @@ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
return 0;
}
+/**
+ * ixgbevf_poll_mbx - Wait for message and read it from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully read message from buffer
+ **/
+s32 ixgbevf_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+
+ if (!mbx->ops.read || !mbx->ops.check_for_msg || !mbx->timeout)
+ return ret_val;
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ ret_val = ixgbevf_poll_for_msg(hw);
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_write_mbx - Write a message to the mailbox and wait for ACK
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully copied message into the buffer and
+ * received an ACK to that message within specified period
+ **/
+s32 ixgbevf_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+
+ /**
+ * exit if either we can't write, release
+ * or there is no timeout defined
+ */
+ if (!mbx->ops.write || !mbx->ops.check_for_ack || !mbx->ops.release ||
+ !mbx->timeout)
+ return ret_val;
+
+ if (size > mbx->size)
+ ret_val = IXGBE_ERR_PARAM;
+ else
+ ret_val = mbx->ops.write(hw, msg, size);
+
+ return ret_val;
+}
+
const struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
.init_params = ixgbevf_init_mbx_params_vf,
+ .release = ixgbevf_release_mbx_lock_vf,
.read = ixgbevf_read_mbx_vf,
.write = ixgbevf_write_mbx_vf,
- .read_posted = ixgbevf_read_posted_mbx,
- .write_posted = ixgbevf_write_posted_mbx,
+ .check_for_msg = ixgbevf_check_for_msg_vf,
+ .check_for_ack = ixgbevf_check_for_ack_vf,
+ .check_for_rst = ixgbevf_check_for_rst_vf,
+};
+
+const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy = {
+ .init_params = ixgbevf_init_mbx_params_vf,
+ .release = ixgbevf_release_mbx_lock_vf_legacy,
+ .read = ixgbevf_read_mbx_vf_legacy,
+ .write = ixgbevf_write_mbx_vf_legacy,
.check_for_msg = ixgbevf_check_for_msg_vf,
.check_for_ack = ixgbevf_check_for_ack_vf,
.check_for_rst = ixgbevf_check_for_rst_vf,
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 853796c8ef0e..7346ccf014a5 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -7,7 +7,6 @@
#include "vf.h"
#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
-#define IXGBE_ERR_MBX -100
#define IXGBE_VFMAILBOX 0x002FC
#define IXGBE_VFMBMEM 0x00200
@@ -39,14 +38,17 @@
/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
* PF. The reverse is true if it is IXGBE_PF_*.
- * Message ACK's are the value or'd with 0xF0000000
+ * Message results are the value or'd with 0xF0000000
*/
-/* Messages below or'd with this are the ACK */
-#define IXGBE_VT_MSGTYPE_ACK 0x80000000
-/* Messages below or'd with this are the NACK */
-#define IXGBE_VT_MSGTYPE_NACK 0x40000000
-/* Indicates that VF is still clear to send requests */
-#define IXGBE_VT_MSGTYPE_CTS 0x20000000
+#define IXGBE_VT_MSGTYPE_SUCCESS 0x80000000 /* Messages or'd with this
+ * have succeeded
+ */
+#define IXGBE_VT_MSGTYPE_FAILURE 0x40000000 /* Messages or'd with this
+ * have failed
+ */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ * clear to send requests
+ */
#define IXGBE_VT_MSGINFO_SHIFT 16
/* bits 23:16 are used for exra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
@@ -63,6 +65,7 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */
+ ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index d459f5c8e98f..61d8970c6d1d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -13,13 +13,12 @@
static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
u32 *retmsg, u16 size)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 retval = mbx->ops.write_posted(hw, msg, size);
+ s32 retval = ixgbevf_write_mbx(hw, msg, size);
if (retval)
return retval;
- return mbx->ops.read_posted(hw, retmsg, size);
+ return ixgbevf_poll_mbx(hw, retmsg, size);
}
/**
@@ -75,6 +74,9 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
/* reset the api version */
hw->api_version = ixgbe_mbox_api_10;
+ hw->mbx.ops.init_params(hw);
+ memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy,
+ sizeof(struct ixgbe_mbx_operations));
IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
IXGBE_WRITE_FLUSH(hw);
@@ -92,7 +94,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
msgbuf[0] = IXGBE_VF_RESET;
- mbx->ops.write_posted(hw, msgbuf, 1);
+ ixgbevf_write_mbx(hw, msgbuf, 1);
mdelay(10);
@@ -100,7 +102,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
* also set up the mc_filter_type which is piggy backed
* on the mac address in word 3
*/
- ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
+ ret_val = ixgbevf_poll_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
if (ret_val)
return ret_val;
@@ -108,11 +110,11 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
* to indicate that no MAC address has yet been assigned for
* the VF.
*/
- if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
- msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS) &&
+ msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_FAILURE))
return IXGBE_ERR_INVALID_MAC_ADDR;
- if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
+ if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS))
ether_addr_copy(hw->mac.perm_addr, addr);
hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
@@ -269,7 +271,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
if (!ret_val) {
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_FAILURE))
return -ENOMEM;
}
@@ -311,6 +313,7 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
* is not supported for this device type.
*/
switch (hw->api_version) {
+ case ixgbe_mbox_api_15:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
@@ -323,12 +326,12 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
msgbuf[0] = IXGBE_VF_GET_RETA;
- err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
+ err = ixgbevf_write_mbx(hw, msgbuf, 1);
if (err)
return err;
- err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
+ err = ixgbevf_poll_mbx(hw, msgbuf, dwords + 1);
if (err)
return err;
@@ -336,14 +339,14 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* If the operation has been refused by a PF return -EPERM */
- if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_FAILURE))
return -EPERM;
/* If we didn't get an ACK there must have been
* some sort of mailbox error so we should treat it
* as such.
*/
- if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
+ if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_SUCCESS))
return IXGBE_ERR_MBX;
/* ixgbevf doesn't support more than 2 queues at the moment */
@@ -379,6 +382,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
* or if the operation is not supported for this device type.
*/
switch (hw->api_version) {
+ case ixgbe_mbox_api_15:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
@@ -390,12 +394,12 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
}
msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
- err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
+ err = ixgbevf_write_mbx(hw, msgbuf, 1);
if (err)
return err;
- err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
+ err = ixgbevf_poll_mbx(hw, msgbuf, 11);
if (err)
return err;
@@ -403,14 +407,14 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* If the operation has been refused by a PF return -EPERM */
- if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_FAILURE))
return -EPERM;
/* If we didn't get an ACK there must have been
* some sort of mailbox error so we should treat it
* as such.
*/
- if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
+ if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_SUCCESS))
return IXGBE_ERR_MBX;
memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
@@ -442,7 +446,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
/* if nacked the address was rejected, use "perm_addr" */
if (!ret_val &&
- (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
+ (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_FAILURE))) {
ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
return IXGBE_ERR_MBX;
}
@@ -545,8 +549,9 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
return -EOPNOTSUPP;
fallthrough;
- case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
break;
default:
return -EOPNOTSUPP;
@@ -561,7 +566,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
return err;
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_FAILURE))
return -EPERM;
return 0;
@@ -606,7 +611,7 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
- if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
+ if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_SUCCESS))
err = IXGBE_ERR_INVALID_ARGUMENT;
mbx_err:
@@ -705,12 +710,15 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
/* if the read failed it could just be a mailbox collision, best wait
* until we are called again and don't report an error
*/
- if (mbx->ops.read(hw, &in_msg, 1))
+ if (mbx->ops.read(hw, &in_msg, 1)) {
+ if (hw->api_version >= ixgbe_mbox_api_15)
+ mac->get_link_status = false;
goto out;
+ }
if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
/* msg is not CTS and is NACK we must have lost CTS status */
- if (in_msg & IXGBE_VT_MSGTYPE_NACK)
+ if (in_msg & IXGBE_VT_MSGTYPE_FAILURE)
ret_val = -1;
goto out;
}
@@ -816,7 +824,7 @@ static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
if (ret_val)
return ret_val;
if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
- (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
+ (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE))
return IXGBE_ERR_MBX;
return 0;
@@ -863,7 +871,8 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* Store value and return 0 on success */
- if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
+ if (msg[0] == (IXGBE_VF_API_NEGOTIATE |
+ IXGBE_VT_MSGTYPE_SUCCESS)) {
hw->api_version = api;
return 0;
}
@@ -901,6 +910,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
break;
default:
return 0;
@@ -918,7 +928,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
* some sort of mailbox error so we should treat it
* as such
*/
- if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
+ if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_SUCCESS))
return IXGBE_ERR_MBX;
/* record and validate values from message */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 1d8209df4162..54158dac8707 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -73,10 +73,9 @@ struct ixgbe_mac_info {
struct ixgbe_mbx_operations {
s32 (*init_params)(struct ixgbe_hw *hw);
+ void (*release)(struct ixgbe_hw *hw);
s32 (*read)(struct ixgbe_hw *, u32 *, u16);
s32 (*write)(struct ixgbe_hw *, u32 *, u16);
- s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16);
- s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16);
s32 (*check_for_msg)(struct ixgbe_hw *);
s32 (*check_for_ack)(struct ixgbe_hw *);
s32 (*check_for_rst)(struct ixgbe_hw *);
@@ -96,7 +95,7 @@ struct ixgbe_mbx_info {
struct ixgbe_mbx_stats stats;
u32 timeout;
u32 udelay;
- u32 v2p_mailbox;
+ u32 vf_mailbox;
u16 size;
};
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 072391c494ce..9b6fa27b7daf 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -65,8 +65,8 @@
/* use 2 static channels for TX/RX */
#define LTQ_ETOP_TX_CHANNEL 1
#define LTQ_ETOP_RX_CHANNEL 6
-#define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL)
-#define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL)
+#define IS_TX(x) ((x) == LTQ_ETOP_TX_CHANNEL)
+#define IS_RX(x) ((x) == LTQ_ETOP_RX_CHANNEL)
#define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x))
#define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y))
@@ -111,9 +111,9 @@ ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
if (!ch->skb[ch->dma.desc])
return -ENOMEM;
- ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(&priv->pdev->dev,
- ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN,
- DMA_FROM_DEVICE);
+ ch->dma.desc_base[ch->dma.desc].addr =
+ dma_map_single(&priv->pdev->dev, ch->skb[ch->dma.desc]->data,
+ MAX_DMA_DATA_LEN, DMA_FROM_DEVICE);
ch->dma.desc_base[ch->dma.desc].addr =
CPHYSADDR(ch->skb[ch->dma.desc]->data);
ch->dma.desc_base[ch->dma.desc].ctl =
@@ -135,7 +135,7 @@ ltq_etop_hw_receive(struct ltq_etop_chan *ch)
spin_lock_irqsave(&priv->lock, flags);
if (ltq_etop_alloc_skb(ch)) {
netdev_err(ch->netdev,
- "failed to allocate new rx buffer, stopping DMA\n");
+ "failed to allocate new rx buffer, stopping DMA\n");
ltq_dma_close(&ch->dma);
}
ch->dma.desc++;
@@ -185,7 +185,7 @@ ltq_etop_poll_tx(struct napi_struct *napi, int budget)
dev_kfree_skb_any(ch->skb[ch->tx_free]);
ch->skb[ch->tx_free] = NULL;
memset(&ch->dma.desc_base[ch->tx_free], 0,
- sizeof(struct ltq_dma_desc));
+ sizeof(struct ltq_dma_desc));
ch->tx_free++;
ch->tx_free %= LTQ_DESC_NUM;
}
@@ -218,6 +218,7 @@ ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
free_irq(ch->dma.irq, priv);
if (IS_RX(ch->idx)) {
int desc;
+
for (desc = 0; desc < LTQ_DESC_NUM; desc++)
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
}
@@ -246,18 +247,18 @@ ltq_etop_hw_init(struct net_device *dev)
switch (priv->pldata->mii_mode) {
case PHY_INTERFACE_MODE_RMII:
- ltq_etop_w32_mask(ETOP_MII_MASK,
- ETOP_MII_REVERSE, LTQ_ETOP_CFG);
+ ltq_etop_w32_mask(ETOP_MII_MASK, ETOP_MII_REVERSE,
+ LTQ_ETOP_CFG);
break;
case PHY_INTERFACE_MODE_MII:
- ltq_etop_w32_mask(ETOP_MII_MASK,
- ETOP_MII_NORMAL, LTQ_ETOP_CFG);
+ ltq_etop_w32_mask(ETOP_MII_MASK, ETOP_MII_NORMAL,
+ LTQ_ETOP_CFG);
break;
default:
netdev_err(dev, "unknown mii mode %d\n",
- priv->pldata->mii_mode);
+ priv->pldata->mii_mode);
return -ENOTSUPP;
}
@@ -270,7 +271,8 @@ ltq_etop_hw_init(struct net_device *dev)
int irq = LTQ_DMA_CH0_INT + i;
struct ltq_etop_chan *ch = &priv->ch[i];
- ch->idx = ch->dma.nr = i;
+ ch->dma.nr = i;
+ ch->idx = ch->dma.nr;
ch->dma.dev = &priv->pdev->dev;
if (IS_TX(i)) {
@@ -305,9 +307,9 @@ ltq_etop_hw_init(struct net_device *dev)
static void
ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "Lantiq ETOP", sizeof(info->driver));
- strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, "Lantiq ETOP", sizeof(info->driver));
+ strscpy(info->bus_info, "internal", sizeof(info->bus_info));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static const struct ethtool_ops ltq_etop_ethtool_ops = {
@@ -399,7 +401,7 @@ ltq_etop_mdio_init(struct net_device *dev)
priv->mii_bus->write = ltq_etop_mdio_wr;
priv->mii_bus->name = "ltq_mii";
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- priv->pdev->name, priv->pdev->id);
+ priv->pdev->name, priv->pdev->id);
if (mdiobus_register(priv->mii_bus)) {
err = -ENXIO;
goto err_out_free_mdiobus;
@@ -496,8 +498,9 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
netif_trans_update(dev);
spin_lock_irqsave(&priv->lock, flags);
- desc->addr = ((unsigned int) dma_map_single(&priv->pdev->dev, skb->data, len,
+ desc->addr = ((unsigned int)dma_map_single(&priv->pdev->dev, skb->data, len,
DMA_TO_DEVICE)) - byte_offset;
+ /* Make sure the address is written before we give it to HW */
wmb();
desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
@@ -539,7 +542,7 @@ ltq_etop_set_mac_address(struct net_device *dev, void *p)
spin_lock_irqsave(&priv->lock, flags);
ltq_etop_w32(*((u32 *)dev->dev_addr), LTQ_ETOP_MAC_DA0);
ltq_etop_w32(*((u16 *)&dev->dev_addr[4]) << 16,
- LTQ_ETOP_MAC_DA1);
+ LTQ_ETOP_MAC_DA1);
spin_unlock_irqrestore(&priv->lock, flags);
}
return ret;
@@ -652,15 +655,15 @@ ltq_etop_probe(struct platform_device *pdev)
}
res = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), dev_name(&pdev->dev));
+ resource_size(res), dev_name(&pdev->dev));
if (!res) {
dev_err(&pdev->dev, "failed to request etop resource\n");
err = -EBUSY;
goto err_out;
}
- ltq_etop_membase = devm_ioremap(&pdev->dev,
- res->start, resource_size(res));
+ ltq_etop_membase = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
if (!ltq_etop_membase) {
dev_err(&pdev->dev, "failed to remap etop engine %d\n",
pdev->id);
@@ -687,22 +690,22 @@ ltq_etop_probe(struct platform_device *pdev)
err = device_property_read_u32(&pdev->dev, "lantiq,tx-burst-length", &priv->tx_burst_len);
if (err < 0) {
dev_err(&pdev->dev, "unable to read tx-burst-length property\n");
- return err;
+ goto err_free;
}
err = device_property_read_u32(&pdev->dev, "lantiq,rx-burst-length", &priv->rx_burst_len);
if (err < 0) {
dev_err(&pdev->dev, "unable to read rx-burst-length property\n");
- return err;
+ goto err_free;
}
for (i = 0; i < MAX_DMA_CHAN; i++) {
if (IS_TX(i))
netif_napi_add(dev, &priv->ch[i].napi,
- ltq_etop_poll_tx, 8);
+ ltq_etop_poll_tx, 8);
else if (IS_RX(i))
netif_napi_add(dev, &priv->ch[i].napi,
- ltq_etop_poll_rx, 32);
+ ltq_etop_poll_rx, 32);
priv->ch[i].netdev = dev;
}
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 80bfaf2fec92..41d11137cde0 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -27,6 +27,9 @@
#define XRX200_DMA_TX 1
#define XRX200_DMA_BURST_LEN 8
+#define XRX200_DMA_PACKET_COMPLETE 0
+#define XRX200_DMA_PACKET_IN_PROGRESS 1
+
/* cpu port mac */
#define PMAC_RX_IPG 0x0024
#define PMAC_RX_IPG_MASK 0xf
@@ -60,7 +63,14 @@ struct xrx200_chan {
struct napi_struct napi;
struct ltq_dma_channel dma;
- struct sk_buff *skb[LTQ_DESC_NUM];
+
+ union {
+ struct sk_buff *skb[LTQ_DESC_NUM];
+ void *rx_buff[LTQ_DESC_NUM];
+ };
+
+ struct sk_buff *skb_head;
+ struct sk_buff *skb_tail;
struct xrx200_priv *priv;
};
@@ -72,6 +82,7 @@ struct xrx200_priv {
struct xrx200_chan chan_rx;
u16 rx_buf_size;
+ u16 rx_skb_size;
struct net_device *net_dev;
struct device *dev;
@@ -109,6 +120,12 @@ static int xrx200_buffer_size(int mtu)
return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN);
}
+static int xrx200_skb_size(u16 buf_size)
+{
+ return SKB_DATA_ALIGN(buf_size + NET_SKB_PAD + NET_IP_ALIGN) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+}
+
/* drop all the packets from the DMA ring */
static void xrx200_flush_dma(struct xrx200_chan *ch)
{
@@ -167,30 +184,29 @@ static int xrx200_close(struct net_device *net_dev)
return 0;
}
-static int xrx200_alloc_skb(struct xrx200_chan *ch)
+static int xrx200_alloc_buf(struct xrx200_chan *ch, void *(*alloc)(unsigned int size))
{
- struct sk_buff *skb = ch->skb[ch->dma.desc];
+ void *buf = ch->rx_buff[ch->dma.desc];
struct xrx200_priv *priv = ch->priv;
dma_addr_t mapping;
int ret = 0;
- ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(priv->net_dev,
- priv->rx_buf_size);
- if (!ch->skb[ch->dma.desc]) {
+ ch->rx_buff[ch->dma.desc] = alloc(priv->rx_skb_size);
+ if (!ch->rx_buff[ch->dma.desc]) {
ret = -ENOMEM;
goto skip;
}
- mapping = dma_map_single(priv->dev, ch->skb[ch->dma.desc]->data,
+ mapping = dma_map_single(priv->dev, ch->rx_buff[ch->dma.desc],
priv->rx_buf_size, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(priv->dev, mapping))) {
- dev_kfree_skb_any(ch->skb[ch->dma.desc]);
- ch->skb[ch->dma.desc] = skb;
+ skb_free_frag(ch->rx_buff[ch->dma.desc]);
+ ch->rx_buff[ch->dma.desc] = buf;
ret = -ENOMEM;
goto skip;
}
- ch->dma.desc_base[ch->dma.desc].addr = mapping;
+ ch->dma.desc_base[ch->dma.desc].addr = mapping + NET_SKB_PAD + NET_IP_ALIGN;
/* Make sure the address is written before we give it to HW */
wmb();
skip:
@@ -204,12 +220,14 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
{
struct xrx200_priv *priv = ch->priv;
struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
- struct sk_buff *skb = ch->skb[ch->dma.desc];
- int len = (desc->ctl & LTQ_DMA_SIZE_MASK);
+ void *buf = ch->rx_buff[ch->dma.desc];
+ u32 ctl = desc->ctl;
+ int len = (ctl & LTQ_DMA_SIZE_MASK);
struct net_device *net_dev = priv->net_dev;
+ struct sk_buff *skb;
int ret;
- ret = xrx200_alloc_skb(ch);
+ ret = xrx200_alloc_buf(ch, napi_alloc_frag);
ch->dma.desc++;
ch->dma.desc %= LTQ_DESC_NUM;
@@ -220,13 +238,39 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
return ret;
}
+ skb = build_skb(buf, priv->rx_skb_size);
+ skb_reserve(skb, NET_SKB_PAD);
skb_put(skb, len);
- skb->protocol = eth_type_trans(skb, net_dev);
- netif_receive_skb(skb);
- net_dev->stats.rx_packets++;
- net_dev->stats.rx_bytes += len;
- return 0;
+ /* add buffers to skb via skb->frag_list */
+ if (ctl & LTQ_DMA_SOP) {
+ ch->skb_head = skb;
+ ch->skb_tail = skb;
+ skb_reserve(skb, NET_IP_ALIGN);
+ } else if (ch->skb_head) {
+ if (ch->skb_head == ch->skb_tail)
+ skb_shinfo(ch->skb_tail)->frag_list = skb;
+ else
+ ch->skb_tail->next = skb;
+ ch->skb_tail = skb;
+ ch->skb_head->len += skb->len;
+ ch->skb_head->data_len += skb->len;
+ ch->skb_head->truesize += skb->truesize;
+ }
+
+ if (ctl & LTQ_DMA_EOP) {
+ ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev);
+ netif_receive_skb(ch->skb_head);
+ net_dev->stats.rx_packets++;
+ net_dev->stats.rx_bytes += ch->skb_head->len;
+ ch->skb_head = NULL;
+ ch->skb_tail = NULL;
+ ret = XRX200_DMA_PACKET_COMPLETE;
+ } else {
+ ret = XRX200_DMA_PACKET_IN_PROGRESS;
+ }
+
+ return ret;
}
static int xrx200_poll_rx(struct napi_struct *napi, int budget)
@@ -241,7 +285,9 @@ static int xrx200_poll_rx(struct napi_struct *napi, int budget)
if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
ret = xrx200_hw_receive(ch);
- if (ret)
+ if (ret == XRX200_DMA_PACKET_IN_PROGRESS)
+ continue;
+ if (ret != XRX200_DMA_PACKET_COMPLETE)
return ret;
rx++;
} else {
@@ -362,12 +408,13 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
struct xrx200_chan *ch_rx = &priv->chan_rx;
int old_mtu = net_dev->mtu;
bool running = false;
- struct sk_buff *skb;
+ void *buff;
int curr_desc;
int ret = 0;
net_dev->mtu = new_mtu;
priv->rx_buf_size = xrx200_buffer_size(new_mtu);
+ priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
if (new_mtu <= old_mtu)
return ret;
@@ -383,14 +430,15 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
ch_rx->dma.desc++) {
- skb = ch_rx->skb[ch_rx->dma.desc];
- ret = xrx200_alloc_skb(ch_rx);
+ buff = ch_rx->rx_buff[ch_rx->dma.desc];
+ ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag);
if (ret) {
net_dev->mtu = old_mtu;
priv->rx_buf_size = xrx200_buffer_size(old_mtu);
+ priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
break;
}
- dev_kfree_skb_any(skb);
+ skb_free_frag(buff);
}
ch_rx->dma.desc = curr_desc;
@@ -443,7 +491,7 @@ static int xrx200_dma_init(struct xrx200_priv *priv)
ltq_dma_alloc_rx(&ch_rx->dma);
for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
ch_rx->dma.desc++) {
- ret = xrx200_alloc_skb(ch_rx);
+ ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag);
if (ret)
goto rx_free;
}
@@ -478,7 +526,7 @@ rx_ring_free:
/* free the allocated RX ring */
for (i = 0; i < LTQ_DESC_NUM; i++) {
if (priv->chan_rx.skb[i])
- dev_kfree_skb_any(priv->chan_rx.skb[i]);
+ skb_free_frag(priv->chan_rx.rx_buff[i]);
}
rx_free:
@@ -495,7 +543,7 @@ static void xrx200_hw_cleanup(struct xrx200_priv *priv)
/* free the allocated RX ring */
for (i = 0; i < LTQ_DESC_NUM; i++)
- dev_kfree_skb_any(priv->chan_rx.skb[i]);
+ skb_free_frag(priv->chan_rx.rx_buff[i]);
}
static int xrx200_probe(struct platform_device *pdev)
@@ -520,6 +568,7 @@ static int xrx200_probe(struct platform_device *pdev)
net_dev->min_mtu = ETH_ZLEN;
net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(0);
priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN);
+ priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
/* load the memory ranges */
priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
@@ -564,8 +613,10 @@ static int xrx200_probe(struct platform_device *pdev)
PMAC_HD_CTL);
/* setup NAPI */
- netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32);
- netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32);
+ netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx,
+ NAPI_POLL_WEIGHT);
+ netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping,
+ NAPI_POLL_WEIGHT);
platform_set_drvdata(pdev, priv);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index bb14fa2241a3..105247582684 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1638,7 +1638,9 @@ static int mv643xx_eth_set_coalesce(struct net_device *dev,
}
static void
-mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
+mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er,
+ struct kernel_ethtool_ringparam *kernel_er,
+ struct netlink_ext_ack *extack)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
@@ -1650,7 +1652,9 @@ mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
}
static int
-mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
+mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er,
+ struct kernel_ethtool_ringparam *kernel_er,
+ struct netlink_ext_ack *extack)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
@@ -3197,7 +3201,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
dev->hw_features = dev->features;
dev->priv_flags |= IFF_UNICAST_FLT;
- dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
+ netif_set_gso_max_segs(dev, MV643XX_MAX_TSO_SEGS);
/* MTU range: 64 - 9500 */
dev->min_mtu = 64;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 5a7bdca22a63..83c8908f0cc7 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -38,6 +38,7 @@
#include <net/ipv6.h>
#include <net/tso.h>
#include <net/page_pool.h>
+#include <net/pkt_cls.h>
#include <linux/bpf_trace.h>
/* Registers */
@@ -247,12 +248,39 @@
#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
#define MVNETA_PORT_TX_RESET 0x3cf0
#define MVNETA_PORT_TX_DMA_RESET BIT(0)
+#define MVNETA_TXQ_CMD1_REG 0x3e00
+#define MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 BIT(3)
+#define MVNETA_TXQ_CMD1_BW_LIM_EN BIT(0)
+#define MVNETA_REFILL_NUM_CLK_REG 0x3e08
+#define MVNETA_REFILL_MAX_NUM_CLK 0x0000ffff
#define MVNETA_TX_MTU 0x3e0c
#define MVNETA_TX_TOKEN_SIZE 0x3e14
#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
+#define MVNETA_TXQ_BUCKET_REFILL_REG(q) (0x3e20 + ((q) << 2))
+#define MVNETA_TXQ_BUCKET_REFILL_PERIOD_MASK 0x3ff00000
+#define MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT 20
+#define MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX 0x0007ffff
#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
+/* The values of the bucket refill base period and refill period are taken from
+ * the reference manual, and adds up to a base resolution of 10Kbps. This allows
+ * to cover all rate-limit values from 10Kbps up to 5Gbps
+ */
+
+/* Base period for the rate limit algorithm */
+#define MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS 100
+
+/* Number of Base Period to wait between each bucket refill */
+#define MVNETA_TXQ_BUCKET_REFILL_PERIOD 1000
+
+/* The base resolution for rate limiting, in bps. Any max_rate value should be
+ * a multiple of that value.
+ */
+#define MVNETA_TXQ_RATE_LIMIT_RESOLUTION (NSEC_PER_SEC / \
+ (MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS * \
+ MVNETA_TXQ_BUCKET_REFILL_PERIOD))
+
#define MVNETA_LPI_CTRL_0 0x2cc0
#define MVNETA_LPI_CTRL_1 0x2cc4
#define MVNETA_LPI_REQUEST_ENABLE BIT(0)
@@ -492,13 +520,13 @@ struct mvneta_port {
u8 mcast_count[256];
u16 tx_ring_size;
u16 rx_ring_size;
- u8 prio_tc_map[8];
phy_interface_t phy_interface;
struct device_node *dn;
unsigned int tx_csum_limit;
struct phylink *phylink;
struct phylink_config phylink_config;
+ struct phylink_pcs phylink_pcs;
struct phy *comphy;
struct mvneta_bm *bm_priv;
@@ -2212,7 +2240,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(pp->dev, prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(pp->dev, prog, act);
@@ -3819,58 +3847,31 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
return 0;
}
-static void mvneta_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
+static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ return container_of(pcs, struct mvneta_port, phylink_pcs);
+}
+static int mvneta_pcs_validate(struct phylink_pcs *pcs,
+ unsigned long *supported,
+ const struct phylink_link_state *state)
+{
/* We only support QSGMII, SGMII, 802.3z and RGMII modes.
* When in 802.3z mode, we must have AN enabled:
* "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
* When <PortType> = 1 (1000BASE-X) this field must be set to 1."
*/
if (phy_interface_mode_is_8023z(state->interface) &&
- !phylink_test(state->advertising, Autoneg)) {
- linkmode_zero(supported);
- return;
- }
-
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
-
- /* Asymmetric pause is unsupported */
- phylink_set(mask, Pause);
-
- /* Half-duplex at speeds higher than 100Mbit is unsupported */
- if (state->interface != PHY_INTERFACE_MODE_2500BASEX) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- }
-
- if (state->interface == PHY_INTERFACE_MODE_2500BASEX) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
-
- if (!phy_interface_mode_is_8023z(state->interface)) {
- /* 10M and 100M are only supported in non-802.3z mode */
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- }
+ !phylink_test(state->advertising, Autoneg))
+ return -EINVAL;
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ return 0;
}
-static void mvneta_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static void mvneta_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
{
- struct net_device *ndev = to_net_dev(config->dev);
- struct mvneta_port *pp = netdev_priv(ndev);
+ struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
u32 gmac_stat;
gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
@@ -3888,17 +3889,71 @@ static void mvneta_mac_pcs_get_state(struct phylink_config *config,
state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
- state->pause = 0;
if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
state->pause |= MLO_PAUSE_RX;
if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
state->pause |= MLO_PAUSE_TX;
}
-static void mvneta_mac_an_restart(struct phylink_config *config)
+static int mvneta_pcs_config(struct phylink_pcs *pcs,
+ unsigned int mode, phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
{
- struct net_device *ndev = to_net_dev(config->dev);
- struct mvneta_port *pp = netdev_priv(ndev);
+ struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
+ u32 mask, val, an, old_an, changed;
+
+ mask = MVNETA_GMAC_INBAND_AN_ENABLE |
+ MVNETA_GMAC_INBAND_RESTART_AN |
+ MVNETA_GMAC_AN_SPEED_EN |
+ MVNETA_GMAC_AN_FLOW_CTRL_EN |
+ MVNETA_GMAC_AN_DUPLEX_EN;
+
+ if (phylink_autoneg_inband(mode)) {
+ mask |= MVNETA_GMAC_CONFIG_MII_SPEED |
+ MVNETA_GMAC_CONFIG_GMII_SPEED |
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+ val = MVNETA_GMAC_INBAND_AN_ENABLE;
+
+ if (interface == PHY_INTERFACE_MODE_SGMII) {
+ /* SGMII mode receives the speed and duplex from PHY */
+ val |= MVNETA_GMAC_AN_SPEED_EN |
+ MVNETA_GMAC_AN_DUPLEX_EN;
+ } else {
+ /* 802.3z mode has fixed speed and duplex */
+ val |= MVNETA_GMAC_CONFIG_GMII_SPEED |
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+ /* The FLOW_CTRL_EN bit selects either the hardware
+ * automatically or the CONFIG_FLOW_CTRL manually
+ * controls the GMAC pause mode.
+ */
+ if (permit_pause_to_mac)
+ val |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
+
+ /* Update the advertisement bits */
+ mask |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
+ if (phylink_test(advertising, Pause))
+ val |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
+ }
+ } else {
+ /* Phy or fixed speed - disable in-band AN modes */
+ val = 0;
+ }
+
+ old_an = an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ an = (an & ~mask) | val;
+ changed = old_an ^ an;
+ if (changed)
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, an);
+
+ /* We are only interested in the advertisement bits changing */
+ return !!(changed & MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL);
+}
+
+static void mvneta_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
@@ -3907,6 +3962,47 @@ static void mvneta_mac_an_restart(struct phylink_config *config)
gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
}
+static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = {
+ .pcs_validate = mvneta_pcs_validate,
+ .pcs_get_state = mvneta_pcs_get_state,
+ .pcs_config = mvneta_pcs_config,
+ .pcs_an_restart = mvneta_pcs_an_restart,
+};
+
+static int mvneta_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 val;
+
+ if (pp->phy_interface != interface ||
+ phylink_autoneg_inband(mode)) {
+ /* Force the link down when changing the interface or if in
+ * in-band mode. According to Armada 370 documentation, we
+ * can only change the port mode and in-band enable when the
+ * link is down.
+ */
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
+ val |= MVNETA_GMAC_FORCE_LINK_DOWN;
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+ }
+
+ if (pp->phy_interface != interface)
+ WARN_ON(phy_power_off(pp->comphy));
+
+ /* Enable the 1ms clock */
+ if (phylink_autoneg_inband(mode)) {
+ unsigned long rate = clk_get_rate(pp->clk);
+
+ mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER,
+ MVNETA_GMAC_1MS_CLOCK_ENABLE | (rate / 1000));
+ }
+
+ return 0;
+}
+
static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -3915,20 +4011,11 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4);
- u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
- u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
MVNETA_GMAC2_PORT_RESET);
new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE);
- new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
- new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
- MVNETA_GMAC_INBAND_RESTART_AN |
- MVNETA_GMAC_AN_SPEED_EN |
- MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL |
- MVNETA_GMAC_AN_FLOW_CTRL_EN |
- MVNETA_GMAC_AN_DUPLEX_EN);
/* Even though it might look weird, when we're configured in
* SGMII or QSGMII mode, the RGMII bit needs to be set.
@@ -3940,9 +4027,6 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
phy_interface_mode_is_8023z(state->interface))
new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;
- if (phylink_test(state->advertising, Pause))
- new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
-
if (!phylink_autoneg_inband(mode)) {
/* Phy or fixed speed - nothing to do, leave the
* configured speed, duplex and flow control as-is.
@@ -3950,66 +4034,23 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
/* SGMII mode receives the state from the PHY */
new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
- new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
- new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
- MVNETA_GMAC_FORCE_LINK_PASS |
- MVNETA_GMAC_CONFIG_MII_SPEED |
- MVNETA_GMAC_CONFIG_GMII_SPEED |
- MVNETA_GMAC_CONFIG_FULL_DUPLEX)) |
- MVNETA_GMAC_INBAND_AN_ENABLE |
- MVNETA_GMAC_AN_SPEED_EN |
- MVNETA_GMAC_AN_DUPLEX_EN;
} else {
/* 802.3z negotiation - only 1000base-X */
new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
- new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
- new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
- MVNETA_GMAC_FORCE_LINK_PASS |
- MVNETA_GMAC_CONFIG_MII_SPEED)) |
- MVNETA_GMAC_INBAND_AN_ENABLE |
- MVNETA_GMAC_CONFIG_GMII_SPEED |
- /* The MAC only supports FD mode */
- MVNETA_GMAC_CONFIG_FULL_DUPLEX;
-
- if (state->pause & MLO_PAUSE_AN && state->an_enabled)
- new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
- }
-
- /* Armada 370 documentation says we can only change the port mode
- * and in-band enable when the link is down, so force it down
- * while making these changes. We also do this for GMAC_CTRL2
- */
- if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X ||
- (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE ||
- (new_an ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) {
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
- (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) |
- MVNETA_GMAC_FORCE_LINK_DOWN);
}
-
/* When at 2.5G, the link partner can send frames with shortened
* preambles.
*/
if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
- if (pp->phy_interface != state->interface) {
- if (pp->comphy)
- WARN_ON(phy_power_off(pp->comphy));
- WARN_ON(mvneta_config_interface(pp, state->interface));
- }
-
if (new_ctrl0 != gmac_ctrl0)
mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
if (new_ctrl2 != gmac_ctrl2)
mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
if (new_ctrl4 != gmac_ctrl4)
mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4);
- if (new_clk != gmac_clk)
- mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
- if (new_an != gmac_an)
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an);
if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
@@ -4018,6 +4059,36 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
}
}
+static int mvneta_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 val, clk;
+
+ /* Disable 1ms clock if not in in-band mode */
+ if (!phylink_autoneg_inband(mode)) {
+ clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
+ clk &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
+ mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, clk);
+ }
+
+ if (pp->phy_interface != interface)
+ /* Enable the Serdes PHY */
+ WARN_ON(mvneta_config_interface(pp, interface));
+
+ /* Allow the link to come up if in in-band mode, otherwise the
+ * link is forced via mac_link_down()/mac_link_up()
+ */
+ if (phylink_autoneg_inband(mode)) {
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+ }
+
+ return 0;
+}
+
static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
{
u32 lpi_ctl1;
@@ -4104,10 +4175,10 @@ static void mvneta_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops mvneta_phylink_ops = {
- .validate = mvneta_validate,
- .mac_pcs_get_state = mvneta_mac_pcs_get_state,
- .mac_an_restart = mvneta_mac_an_restart,
+ .validate = phylink_generic_validate,
+ .mac_prepare = mvneta_mac_prepare,
.mac_config = mvneta_mac_config,
+ .mac_finish = mvneta_mac_finish,
.mac_link_down = mvneta_mac_link_down,
.mac_link_up = mvneta_mac_link_up,
};
@@ -4539,8 +4610,11 @@ static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
}
-static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+static void
+mvneta_ethtool_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct mvneta_port *pp = netdev_priv(netdev);
@@ -4550,8 +4624,11 @@ static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
ring->tx_pending = pp->tx_ring_size;
}
-static int mvneta_ethtool_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+static int
+mvneta_ethtool_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct mvneta_port *pp = netdev_priv(dev);
@@ -4919,43 +4996,144 @@ static void mvneta_clear_rx_prio_map(struct mvneta_port *pp)
mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0);
}
-static void mvneta_setup_rx_prio_map(struct mvneta_port *pp)
+static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq)
{
- u32 val = 0;
- int i;
+ u32 val = mvreg_read(pp, MVNETA_VLAN_PRIO_TO_RXQ);
- for (i = 0; i < rxq_number; i++)
- val |= MVNETA_VLAN_PRIO_RXQ_MAP(i, pp->prio_tc_map[i]);
+ val &= ~MVNETA_VLAN_PRIO_RXQ_MAP(pri, 0x7);
+ val |= MVNETA_VLAN_PRIO_RXQ_MAP(pri, rxq);
mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val);
}
+static int mvneta_enable_per_queue_rate_limit(struct mvneta_port *pp)
+{
+ unsigned long core_clk_rate;
+ u32 refill_cycles;
+ u32 val;
+
+ core_clk_rate = clk_get_rate(pp->clk);
+ if (!core_clk_rate)
+ return -EINVAL;
+
+ refill_cycles = MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS /
+ (NSEC_PER_SEC / core_clk_rate);
+
+ if (refill_cycles > MVNETA_REFILL_MAX_NUM_CLK)
+ return -EINVAL;
+
+ /* Enable bw limit algorithm version 3 */
+ val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG);
+ val &= ~(MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN);
+ mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val);
+
+ /* Set the base refill rate */
+ mvreg_write(pp, MVNETA_REFILL_NUM_CLK_REG, refill_cycles);
+
+ return 0;
+}
+
+static void mvneta_disable_per_queue_rate_limit(struct mvneta_port *pp)
+{
+ u32 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG);
+
+ val |= (MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN);
+ mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val);
+}
+
+static int mvneta_setup_queue_rates(struct mvneta_port *pp, int queue,
+ u64 min_rate, u64 max_rate)
+{
+ u32 refill_val, rem;
+ u32 val = 0;
+
+ /* Convert to from Bps to bps */
+ max_rate *= 8;
+
+ if (min_rate)
+ return -EINVAL;
+
+ refill_val = div_u64_rem(max_rate, MVNETA_TXQ_RATE_LIMIT_RESOLUTION,
+ &rem);
+
+ if (rem || !refill_val ||
+ refill_val > MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX)
+ return -EINVAL;
+
+ val = refill_val;
+ val |= (MVNETA_TXQ_BUCKET_REFILL_PERIOD <<
+ MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT);
+
+ mvreg_write(pp, MVNETA_TXQ_BUCKET_REFILL_REG(queue), val);
+
+ return 0;
+}
+
static int mvneta_setup_mqprio(struct net_device *dev,
- struct tc_mqprio_qopt *qopt)
+ struct tc_mqprio_qopt_offload *mqprio)
{
struct mvneta_port *pp = netdev_priv(dev);
+ int rxq, txq, tc, ret;
u8 num_tc;
- int i;
- qopt->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- num_tc = qopt->num_tc;
+ if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS)
+ return 0;
+
+ num_tc = mqprio->qopt.num_tc;
if (num_tc > rxq_number)
return -EINVAL;
+ mvneta_clear_rx_prio_map(pp);
+
if (!num_tc) {
- mvneta_clear_rx_prio_map(pp);
+ mvneta_disable_per_queue_rate_limit(pp);
netdev_reset_tc(dev);
return 0;
}
- memcpy(pp->prio_tc_map, qopt->prio_tc_map, sizeof(pp->prio_tc_map));
+ netdev_set_num_tc(dev, mqprio->qopt.num_tc);
+
+ for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
+ netdev_set_tc_queue(dev, tc, mqprio->qopt.count[tc],
+ mqprio->qopt.offset[tc]);
+
+ for (rxq = mqprio->qopt.offset[tc];
+ rxq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
+ rxq++) {
+ if (rxq >= rxq_number)
+ return -EINVAL;
- mvneta_setup_rx_prio_map(pp);
+ mvneta_map_vlan_prio_to_rxq(pp, tc, rxq);
+ }
+ }
- netdev_set_num_tc(dev, qopt->num_tc);
- for (i = 0; i < qopt->num_tc; i++)
- netdev_set_tc_queue(dev, i, qopt->count[i], qopt->offset[i]);
+ if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
+ mvneta_disable_per_queue_rate_limit(pp);
+ return 0;
+ }
+
+ if (mqprio->qopt.num_tc > txq_number)
+ return -EINVAL;
+
+ ret = mvneta_enable_per_queue_rate_limit(pp);
+ if (ret)
+ return ret;
+
+ for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
+ for (txq = mqprio->qopt.offset[tc];
+ txq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
+ txq++) {
+ if (txq >= txq_number)
+ return -EINVAL;
+
+ ret = mvneta_setup_queue_rates(pp, txq,
+ mqprio->min_rate[tc],
+ mqprio->max_rate[tc]);
+ if (ret)
+ return ret;
+ }
+ }
return 0;
}
@@ -5166,6 +5344,9 @@ static int mvneta_probe(struct platform_device *pdev)
pp->phylink_config.dev = &dev->dev;
pp->phylink_config.type = PHYLINK_NETDEV;
+ pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
+ MAC_100 | MAC_1000FD | MAC_2500FD;
+
phy_interface_set_rgmii(pp->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_QSGMII,
pp->phylink_config.supported_interfaces);
@@ -5237,6 +5418,9 @@ static int mvneta_probe(struct platform_device *pdev)
goto err_clk;
}
+ pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops;
+ phylink_set_pcs(phylink, &pp->phylink_pcs);
+
/* Alloc per-cpu port structure */
pp->ports = alloc_percpu(struct mvneta_pcpu_port);
if (!pp->ports) {
@@ -5355,7 +5539,7 @@ static int mvneta_probe(struct platform_device *pdev)
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
- dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
+ netif_set_gso_max_segs(dev, MVNETA_MAX_TSO_SEGS);
/* MTU range: 68 - 9676 */
dev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index cf8acabb90ac..ad73a488fc5f 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -1239,7 +1239,8 @@ struct mvpp2_port {
phy_interface_t phy_interface;
struct phylink *phylink;
struct phylink_config phylink_config;
- struct phylink_pcs phylink_pcs;
+ struct phylink_pcs pcs_gmac;
+ struct phylink_pcs pcs_xlg;
struct phy *comphy;
struct mvpp2_bm_pool *pool_long;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 6da8a595026b..7cdbf8b8bbf6 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1488,6 +1488,7 @@ static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
static bool mvpp2_is_xlg(phy_interface_t interface)
{
return interface == PHY_INTERFACE_MODE_10GBASER ||
+ interface == PHY_INTERFACE_MODE_5GBASER ||
interface == PHY_INTERFACE_MODE_XAUI;
}
@@ -1627,6 +1628,7 @@ static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface)
case PHY_INTERFACE_MODE_2500BASEX:
mvpp22_gop_init_sgmii(port);
break;
+ case PHY_INTERFACE_MODE_5GBASER:
case PHY_INTERFACE_MODE_10GBASER:
if (!mvpp2_port_supports_xlg(port))
goto invalid_conf;
@@ -2186,6 +2188,7 @@ static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port,
xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
switch (interface) {
+ case PHY_INTERFACE_MODE_5GBASER:
case PHY_INTERFACE_MODE_10GBASER:
val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
@@ -3820,7 +3823,7 @@ mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog,
}
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(port->dev, prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(port->dev, prog, act);
@@ -5139,9 +5142,6 @@ static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- if (config.flags)
- return -EINVAL;
-
if (config.tx_type != HWTSTAMP_TX_OFF &&
config.tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
@@ -5433,8 +5433,11 @@ static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
sizeof(drvinfo->bus_info));
}
-static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+static void
+mvpp2_ethtool_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct mvpp2_port *port = netdev_priv(dev);
@@ -5444,8 +5447,11 @@ static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
ring->tx_pending = port->tx_ring_size;
}
-static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+static int
+mvpp2_ethtool_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct mvpp2_port *port = netdev_priv(dev);
u16 prev_rx_ring_size = port->rx_ring_size;
@@ -6109,18 +6115,26 @@ static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
return container_of(config, struct mvpp2_port, phylink_config);
}
-static struct mvpp2_port *mvpp2_pcs_to_port(struct phylink_pcs *pcs)
+static struct mvpp2_port *mvpp2_pcs_xlg_to_port(struct phylink_pcs *pcs)
{
- return container_of(pcs, struct mvpp2_port, phylink_pcs);
+ return container_of(pcs, struct mvpp2_port, pcs_xlg);
+}
+
+static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mvpp2_port, pcs_gmac);
}
static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
- struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs);
u32 val;
- state->speed = SPEED_10000;
+ if (port->phy_interface == PHY_INTERFACE_MODE_5GBASER)
+ state->speed = SPEED_5000;
+ else
+ state->speed = SPEED_10000;
state->duplex = 1;
state->an_complete = 1;
@@ -6149,10 +6163,25 @@ static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
.pcs_config = mvpp2_xlg_pcs_config,
};
+static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs,
+ unsigned long *supported,
+ const struct phylink_link_state *state)
+{
+ /* When in 802.3z mode, we must have AN enabled:
+ * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
+ * When <PortType> = 1 (1000BASE-X) this field must be set to 1.
+ */
+ if (phy_interface_mode_is_8023z(state->interface) &&
+ !phylink_test(state->advertising, Autoneg))
+ return -EINVAL;
+
+ return 0;
+}
+
static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
- struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
u32 val;
val = readl(port->base + MVPP2_GMAC_STATUS0);
@@ -6189,7 +6218,7 @@ static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
const unsigned long *advertising,
bool permit_pause_to_mac)
{
- struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
u32 mask, val, an, old_an, changed;
mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
@@ -6243,7 +6272,7 @@ static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
{
- struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
@@ -6253,78 +6282,12 @@ static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
}
static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
+ .pcs_validate = mvpp2_gmac_pcs_validate,
.pcs_get_state = mvpp2_gmac_pcs_get_state,
.pcs_config = mvpp2_gmac_pcs_config,
.pcs_an_restart = mvpp2_gmac_pcs_an_restart,
};
-static void mvpp2_phylink_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct mvpp2_port *port = mvpp2_phylink_to_port(config);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- /* When in 802.3z mode, we must have AN enabled:
- * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
- * When <PortType> = 1 (1000BASE-X) this field must be set to 1.
- */
- if (phy_interface_mode_is_8023z(state->interface) &&
- !phylink_test(state->advertising, Autoneg))
- goto empty_set;
-
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
-
- if (port->priv->global_tx_fc) {
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- }
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_XAUI:
- if (mvpp2_port_supports_xlg(port)) {
- phylink_set_10g_modes(mask);
- phylink_set(mask, 10000baseKR_Full);
- }
- break;
-
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_SGMII:
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- break;
-
- case PHY_INTERFACE_MODE_1000BASEX:
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- break;
-
- case PHY_INTERFACE_MODE_2500BASEX:
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- break;
-
- default:
- goto empty_set;
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
- return;
-
-empty_set:
- linkmode_zero(supported);
-}
-
static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -6404,8 +6367,23 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
}
-static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface)
+static struct phylink_pcs *mvpp2_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+
+ /* Select the appropriate PCS operations depending on the
+ * configured interface mode. We will only switch to a mode
+ * that the validate() checks have already passed.
+ */
+ if (mvpp2_is_xlg(interface))
+ return &port->pcs_xlg;
+ else
+ return &port->pcs_gmac;
+}
+
+static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
{
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
@@ -6454,31 +6432,9 @@ static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
}
}
- /* Select the appropriate PCS operations depending on the
- * configured interface mode. We will only switch to a mode
- * that the validate() checks have already passed.
- */
- if (mvpp2_is_xlg(interface))
- port->phylink_pcs.ops = &mvpp2_phylink_xlg_pcs_ops;
- else
- port->phylink_pcs.ops = &mvpp2_phylink_gmac_pcs_ops;
-
return 0;
}
-static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface)
-{
- struct mvpp2_port *port = mvpp2_phylink_to_port(config);
- int ret;
-
- ret = mvpp2__mac_prepare(config, mode, interface);
- if (ret == 0)
- phylink_set_pcs(port->phylink, &port->phylink_pcs);
-
- return ret;
-}
-
static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -6649,7 +6605,8 @@ static void mvpp2_mac_link_down(struct phylink_config *config,
}
static const struct phylink_mac_ops mvpp2_phylink_ops = {
- .validate = mvpp2_phylink_validate,
+ .validate = phylink_generic_validate,
+ .mac_select_pcs = mvpp2_select_pcs,
.mac_prepare = mvpp2_mac_prepare,
.mac_config = mvpp2_mac_config,
.mac_finish = mvpp2_mac_finish,
@@ -6667,12 +6624,15 @@ static void mvpp2_acpi_start(struct mvpp2_port *port)
struct phylink_link_state state = {
.interface = port->phy_interface,
};
- mvpp2__mac_prepare(&port->phylink_config, MLO_AN_INBAND,
- port->phy_interface);
+ struct phylink_pcs *pcs;
+
+ pcs = mvpp2_select_pcs(&port->phylink_config, port->phy_interface);
+
+ mvpp2_mac_prepare(&port->phylink_config, MLO_AN_INBAND,
+ port->phy_interface);
mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
- port->phylink_pcs.ops->pcs_config(&port->phylink_pcs, MLO_AN_INBAND,
- port->phy_interface,
- state.advertising, false);
+ pcs->ops->pcs_config(pcs, MLO_AN_INBAND, port->phy_interface,
+ state.advertising, false);
mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND,
port->phy_interface);
mvpp2_mac_link_up(&port->phylink_config, NULL,
@@ -6901,7 +6861,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
mvpp2_set_hw_csum(port, port->pool_long->id);
dev->vlan_features |= features;
- dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
+ netif_set_gso_max_segs(dev, MVPP2_MAX_TSO_SEGS);
dev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 68 - 9704 */
@@ -6913,12 +6873,44 @@ static int mvpp2_port_probe(struct platform_device *pdev,
if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
port->phylink_config.dev = &dev->dev;
port->phylink_config.type = PHYLINK_NETDEV;
+ port->phylink_config.mac_capabilities =
+ MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10;
+
+ if (port->priv->global_tx_fc)
+ port->phylink_config.mac_capabilities |=
+ MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
if (mvpp2_port_supports_xlg(port)) {
- __set_bit(PHY_INTERFACE_MODE_10GBASER,
- port->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_XAUI,
- port->phylink_config.supported_interfaces);
+ /* If a COMPHY is present, we can support any of
+ * the serdes modes and switch between them.
+ */
+ if (comphy) {
+ __set_bit(PHY_INTERFACE_MODE_5GBASER,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_XAUI,
+ port->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) {
+ __set_bit(PHY_INTERFACE_MODE_5GBASER,
+ port->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_10GBASER) {
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ port->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_XAUI) {
+ __set_bit(PHY_INTERFACE_MODE_XAUI,
+ port->phylink_config.supported_interfaces);
+ }
+
+ if (comphy)
+ port->phylink_config.mac_capabilities |=
+ MAC_10000FD | MAC_5000FD;
+ else if (phy_mode == PHY_INTERFACE_MODE_5GBASER)
+ port->phylink_config.mac_capabilities |=
+ MAC_5000FD;
+ else
+ port->phylink_config.mac_capabilities |=
+ MAC_10000FD;
}
if (mvpp2_port_supports_rgmii(port))
@@ -6948,6 +6940,9 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->phylink_config.supported_interfaces);
}
+ port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
+ port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
+
phylink = phylink_create(&port->phylink_config, port_fwnode,
phy_mode, &mvpp2_phylink_ops);
if (IS_ERR(phylink)) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 186d00a9ab35..3631d612aaca 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -1570,6 +1570,8 @@ static struct mac_ops cgx_mac_ops = {
.mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm,
.mac_pause_frm_config = cgx_lmac_pause_frm_config,
.mac_enadis_ptp_config = cgx_lmac_ptp_config,
+ .mac_rx_tx_enable = cgx_lmac_rx_tx_enable,
+ .mac_tx_enable = cgx_lmac_tx_enable,
};
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index fc6e7423cbd8..b33e7d1d0851 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -107,6 +107,9 @@ struct mac_ops {
void (*mac_enadis_ptp_config)(void *cgxd,
int lmac_id,
bool enable);
+
+ int (*mac_rx_tx_enable)(void *cgxd, int lmac_id, bool enable);
+ int (*mac_tx_enable)(void *cgxd, int lmac_id, bool enable);
};
struct cgx {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 4e79e918a161..58e2aeebc14f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -732,6 +732,7 @@ enum nix_af_status {
NIX_AF_ERR_BANDPROF_INVAL_REQ = -428,
NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
+ NIX_AF_ERR_LINK_CREDITS = -431,
};
/* For NIX RX vtag action */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index 0fe7ad35e36f..4180376fa676 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -185,7 +185,6 @@ enum npc_kpu_parser_state {
NPC_S_KPU2_QINQ,
NPC_S_KPU2_ETAG,
NPC_S_KPU2_EXDSA,
- NPC_S_KPU2_NGIO,
NPC_S_KPU2_CPT_CTAG,
NPC_S_KPU2_CPT_QINQ,
NPC_S_KPU3_CTAG,
@@ -212,6 +211,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU5_NSH,
NPC_S_KPU5_CPT_IP,
NPC_S_KPU5_CPT_IP6,
+ NPC_S_KPU5_NGIO,
NPC_S_KPU6_IP6_EXT,
NPC_S_KPU6_IP6_HOP_DEST,
NPC_S_KPU6_IP6_ROUT,
@@ -1124,15 +1124,6 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
NPC_S_KPU1_ETHER, 0xff,
NPC_ETYPE_CTAG,
0xffff,
- NPC_ETYPE_NGIO,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
NPC_ETYPE_CTAG,
0xffff,
0x0000,
@@ -1968,6 +1959,15 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
},
{
NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_NGIO,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
NPC_ETYPE_PPPOE,
0xffff,
0x0000,
@@ -2750,15 +2750,6 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU2_NGIO, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU2_CPT_CTAG, 0xff,
NPC_ETYPE_IP,
0xffff,
@@ -5090,6 +5081,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU5_NGIO, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -8425,14 +8425,6 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 12, 0, 0, 0,
- NPC_S_KPU2_NGIO, 12, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 12, 0, 0, 0,
NPC_S_KPU2_CTAG2, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
@@ -9196,6 +9188,14 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_NGIO, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 14, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
@@ -9892,14 +9892,6 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_NGIO,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
NPC_S_KPU5_CPT_IP, 6, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
@@ -11974,6 +11966,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NGIO,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LC, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index d6321de3cc17..e682b7bfde64 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -60,6 +60,8 @@ struct ptp *ptp_get(void)
/* Check driver is bound to PTP block */
if (!ptp)
ptp = ERR_PTR(-EPROBE_DEFER);
+ else
+ pci_dev_get(ptp->pdev);
return ptp;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index e695fa0e82a9..9ea2f6ac38ec 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -30,6 +30,8 @@ static struct mac_ops rpm_mac_ops = {
.mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
.mac_pause_frm_config = rpm_lmac_pause_frm_config,
.mac_enadis_ptp_config = rpm_lmac_ptp_config,
+ .mac_rx_tx_enable = rpm_lmac_rx_tx_enable,
+ .mac_tx_enable = rpm_lmac_tx_enable,
};
struct mac_ops *rpm_get_mac_ops(void)
@@ -54,6 +56,43 @@ int rpm_get_nr_lmacs(void *rpmd)
return hweight8(rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS) & 0xFULL);
}
+int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg, last;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ last = cfg;
+ if (enable)
+ cfg |= RPM_TX_EN;
+ else
+ cfg &= ~(RPM_TX_EN);
+
+ if (cfg != last)
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ return !!(last & RPM_TX_EN);
+}
+
+int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (enable)
+ cfg |= RPM_RX_EN | RPM_TX_EN;
+ else
+ cfg &= ~(RPM_RX_EN | RPM_TX_EN);
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ return 0;
+}
+
void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable)
{
rpm_t *rpm = rpmd;
@@ -252,23 +291,20 @@ int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
if (!rpm || lmac_id >= rpm->lmac_count)
return -ENODEV;
lmac_type = rpm->mac_ops->get_lmac_type(rpm, lmac_id);
- if (lmac_type == LMAC_MODE_100G_R) {
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1);
-
- if (enable)
- cfg |= RPMX_MTI_PCS_LBK;
- else
- cfg &= ~RPMX_MTI_PCS_LBK;
- rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg);
- } else {
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_LPCSX_CONTROL1);
- if (enable)
- cfg |= RPMX_MTI_PCS_LBK;
- else
- cfg &= ~RPMX_MTI_PCS_LBK;
- rpm_write(rpm, lmac_id, RPMX_MTI_LPCSX_CONTROL1, cfg);
+
+ if (lmac_type == LMAC_MODE_QSGMII || lmac_type == LMAC_MODE_SGMII) {
+ dev_err(&rpm->pdev->dev, "loopback not supported for LPC mode\n");
+ return 0;
}
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1);
+
+ if (enable)
+ cfg |= RPMX_MTI_PCS_LBK;
+ else
+ cfg &= ~RPMX_MTI_PCS_LBK;
+ rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg);
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index 57c8a687b488..ff580311edd0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -43,6 +43,8 @@
#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
#define RPM_LMAC_FWI 0xa
+#define RPM_TX_EN BIT_ULL(0)
+#define RPM_RX_EN BIT_ULL(1)
/* Function Declarations */
int rpm_get_nr_lmacs(void *rpmd);
@@ -57,4 +59,6 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat);
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat);
void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable);
#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 3ca6b942ebe2..54e1b27a7dfe 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -520,8 +520,11 @@ static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
- if (err)
- dev_err(rvu->dev, "HW block:%d reset failed\n", blkaddr);
+ if (err) {
+ dev_err(rvu->dev, "HW block:%d reset timeout retrying again\n", blkaddr);
+ while (rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true) == -EBUSY)
+ ;
+ }
}
static void rvu_reset_all_blocks(struct rvu *rvu)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 66e45d733824..5ed94cfb47d2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -806,6 +806,7 @@ bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
+int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
int type);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 2ca182a4ce82..8a7ac5a8b821 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -441,16 +441,26 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
{
int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
+ void *cgxd;
if (!is_cgx_config_permitted(rvu, pcifunc))
return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
+}
- cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
+int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
+{
+ struct mac_ops *mac_ops;
- return 0;
+ mac_ops = get_mac_ops(cgxd);
+ return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
}
void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index 45357deecabb..a73a8017e0ee 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -172,14 +172,13 @@ static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
{
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
- char irq_name[16];
int i, ret;
for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) {
- snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i);
+ sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i);
ret = rvu_cpt_do_register_interrupt(block, off + i,
rvu_cpt_af_flt_intr_handler,
- irq_name);
+ &rvu->irq_name[(off + i) * NAME_SIZE]);
if (ret)
goto err;
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index a09a507369ac..d1eddb769a41 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1224,6 +1224,8 @@ static void print_nix_cn10k_sq_ctx(struct seq_file *m,
seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
+ seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
+ sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 70bacd38a6d9..d0ab8f233a02 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -41,7 +41,7 @@ static bool rvu_common_request_irq(struct rvu *rvu, int offset,
struct rvu_devlink *rvu_dl = rvu->rvu_dl;
int rc;
- sprintf(&rvu->irq_name[offset * NAME_SIZE], name);
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name);
rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
&rvu->irq_name[offset * NAME_SIZE], rvu_dl);
if (rc)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index d8b1948aaa0a..97fb61915379 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -512,11 +512,11 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
lmac_chan_cnt = cfg & 0xFF;
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
- sdp_chan_cnt = cfg & 0xFFF;
-
cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ sdp_chan_cnt = cfg & 0xFFF;
sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
@@ -2068,8 +2068,8 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
/* enable cgx tx if disabled */
if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
- restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
- lmac_id, true);
+ restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, true);
}
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
@@ -2092,7 +2092,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
rvu_cgx_enadis_rx_bp(rvu, pf, true);
/* restore cgx tx state */
if (restore_tx_en)
- cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+ rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
return err;
}
@@ -3878,7 +3878,7 @@ nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
/* Enable cgx tx if disabled for credits to be back */
if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
- restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
+ restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
lmac_id, true);
}
@@ -3891,8 +3891,8 @@ nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0));
}
- rc = -EBUSY;
- poll_tmo = jiffies + usecs_to_jiffies(10000);
+ rc = NIX_AF_ERR_LINK_CREDITS;
+ poll_tmo = jiffies + usecs_to_jiffies(200000);
/* Wait for credits to return */
do {
if (time_after(jiffies, poll_tmo))
@@ -3918,7 +3918,7 @@ exit:
/* Restore state of cgx tx */
if (restore_tx_en)
- cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+ rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
mutex_unlock(&rvu->rsrc_lock);
return rc;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index c0005a1feee6..91f86d77cd41 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -402,6 +402,7 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index, struct mcam_entry *entry,
bool *enable)
{
+ struct rvu_npc_mcam_rule *rule;
u16 owner, target_func;
struct rvu_pfvf *pfvf;
u64 rx_action;
@@ -423,6 +424,12 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
*enable = false;
+ /* fix up not needed for the rules added by user(ntuple filters) */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == index)
+ return;
+ }
+
/* copy VF default entry action to the VF mcam entry */
rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
target_func);
@@ -489,8 +496,8 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
}
/* PF installing VF rule */
- if (intf == NIX_INTF_RX && actindex < mcam->bmap_entries)
- npc_fixup_vf_rule(rvu, mcam, blkaddr, index, entry, &enable);
+ if (is_npc_intf_rx(intf) && actindex < mcam->bmap_entries)
+ npc_fixup_vf_rule(rvu, mcam, blkaddr, actindex, entry, &enable);
/* Set 'action' */
rvu_write64(rvu, blkaddr,
@@ -916,7 +923,8 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 pcifunc, u64 rx_action)
{
int actindex, index, bank, entry;
- bool enable;
+ struct rvu_npc_mcam_rule *rule;
+ bool enable, update;
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
return;
@@ -924,6 +932,14 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
mutex_lock(&mcam->lock);
for (index = 0; index < mcam->bmap_entries; index++) {
if (mcam->entry2target_pffunc[index] == pcifunc) {
+ update = true;
+ /* update not needed for the rules added via ntuple filters */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == index)
+ update = false;
+ }
+ if (!update)
+ continue;
bank = npc_get_bank(mcam, index);
actindex = index;
entry = index & (mcam->banksize - 1);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index ff2b21999f36..19c53e591d0d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -1098,14 +1098,6 @@ find_rule:
write_req.cntr = rule->cntr;
}
- err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
- &write_rsp);
- if (err) {
- rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
- if (new)
- kfree(rule);
- return err;
- }
/* update rule */
memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet));
memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask));
@@ -1132,6 +1124,18 @@ find_rule:
if (req->default_rule)
pfvf->def_ucast_rule = rule;
+ /* write to mcam entry registers */
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
+ &write_rsp);
+ if (err) {
+ rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
+ if (new) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ return err;
+ }
+
/* VF's MAC address is being changed via PF */
if (pf_set_vfs_mac) {
ether_addr_copy(pfvf->default_mac, req->packet.dmac);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 61e52812983f..14509fc64cce 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -603,6 +603,7 @@ static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
size++;
tar_addr |= ((size - 1) & 0x7) << 4;
}
+ dma_wmb();
memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs);
/* Perform LMTST flush */
cn10k_lmt_flush(val, tar_addr);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 80d4ce61f442..d85db90632d6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -360,7 +360,9 @@ static int otx2_set_pauseparam(struct net_device *netdev,
}
static void otx2_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct otx2_qset *qs = &pfvf->qset;
@@ -372,7 +374,9 @@ static void otx2_get_ringparam(struct net_device *netdev,
}
static int otx2_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
bool if_up = netif_running(netdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 1e0d0c9c1dac..d39341e4ab37 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -394,7 +394,12 @@ static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
dst_mdev->msg_size = mbox_hdr->msg_size;
dst_mdev->num_msgs = num_msgs;
err = otx2_sync_mbox_msg(dst_mbox);
- if (err) {
+ /* Error code -EIO indicate there is a communication failure
+ * to the AF. Rest of the error codes indicate that AF processed
+ * VF messages and set the error codes in response messages
+ * (if any) so simply forward responses to VF.
+ */
+ if (err == -EIO) {
dev_warn(pf->dev,
"AF not responding to VF%d messages\n", vf);
/* restore PF mbase and exit */
@@ -2002,10 +2007,6 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
otx2_config_hw_tx_tstamp(pfvf, false);
@@ -2741,7 +2742,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
- netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
+ netif_set_gso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2_netdev_ops;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 0cc6353254bf..7c4068c5d1ac 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -1198,7 +1198,7 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
put_page(page);
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act);
break;
case XDP_ABORTED:
trace_xdp_exception(pfvf->netdev, prog, act);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 78944ad3492f..925b74ebb8b0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -663,7 +663,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features |= NETIF_F_NTUPLE;
netdev->hw_features |= NETIF_F_RXALL;
- netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
+ netif_set_gso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2vf_netdev_ops;
@@ -684,7 +684,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_detach_rsrc;
+ goto err_ptp_destroy;
}
err = otx2_wq_init(vf);
@@ -709,6 +709,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_unreg_netdev:
unregister_netdev(netdev);
+err_ptp_destroy:
+ otx2_ptp_destroy(vf);
err_detach_rsrc:
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
qmem_free(vf->dev, vf->dync_lmt);
@@ -742,6 +744,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
if (vf->otx2_wq)
destroy_workqueue(vf->otx2_wq);
+ otx2_ptp_destroy(vf);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
diff --git a/drivers/net/ethernet/marvell/prestera/Makefile b/drivers/net/ethernet/marvell/prestera/Makefile
index 0609df8b913d..d395f4131648 100644
--- a/drivers/net/ethernet/marvell/prestera/Makefile
+++ b/drivers/net/ethernet/marvell/prestera/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_PRESTERA) += prestera.o
prestera-objs := prestera_main.o prestera_hw.o prestera_dsa.o \
prestera_rxtx.o prestera_devlink.o prestera_ethtool.o \
prestera_switchdev.o prestera_acl.o prestera_flow.o \
- prestera_flower.o prestera_span.o
+ prestera_flower.o prestera_span.o prestera_counter.o \
+ prestera_router.o prestera_router_hw.o
obj-$(CONFIG_PRESTERA_PCI) += prestera_pci.o
diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h
index 2a4c14c704c0..2fd9ef2fe5d6 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera.h
@@ -225,6 +225,29 @@ struct prestera_event {
};
};
+enum prestera_if_type {
+ /* the interface is of port type (dev,port) */
+ PRESTERA_IF_PORT_E = 0,
+
+ /* the interface is of lag type (lag-id) */
+ PRESTERA_IF_LAG_E = 1,
+
+ /* the interface is of Vid type (vlan-id) */
+ PRESTERA_IF_VID_E = 3,
+};
+
+struct prestera_iface {
+ enum prestera_if_type type;
+ struct {
+ u32 hw_dev_num;
+ u32 port_num;
+ } dev_port;
+ u32 hw_dev_num;
+ u16 vr_id;
+ u16 lag_id;
+ u16 vlan_id;
+};
+
struct prestera_switchdev;
struct prestera_span;
struct prestera_rxtx;
@@ -247,11 +270,21 @@ struct prestera_switch {
u32 mtu_min;
u32 mtu_max;
u8 id;
+ struct prestera_router *router;
struct prestera_lag *lags;
+ struct prestera_counter *counter;
u8 lag_member_max;
u8 lag_max;
};
+struct prestera_router {
+ struct prestera_switch *sw;
+ struct list_head vr_list;
+ struct list_head rif_entry_list;
+ struct notifier_block inetaddr_nb;
+ struct notifier_block inetaddr_valid_nb;
+};
+
struct prestera_rxtx_params {
bool use_sdma;
u32 map_addr;
@@ -279,6 +312,9 @@ struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
int prestera_port_autoneg_set(struct prestera_port *port, u64 link_modes);
+int prestera_router_init(struct prestera_switch *sw);
+void prestera_router_fini(struct prestera_switch *sw);
+
struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id);
int prestera_port_cfg_mac_read(struct prestera_port *port,
@@ -293,6 +329,8 @@ int prestera_port_pvid_set(struct prestera_port *port, u16 vid);
bool prestera_netdev_check(const struct net_device *dev);
+int prestera_is_valid_mac_addr(struct prestera_port *port, const u8 *addr);
+
bool prestera_port_is_lag_member(const struct prestera_port *port);
struct prestera_lag *prestera_lag_by_id(struct prestera_switch *sw, u16 id);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
index 83c75ffb1a1c..f0d9f592173b 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
@@ -1,35 +1,72 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
-/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
+/* Copyright (c) 2020-2021 Marvell International Ltd. All rights reserved */
#include <linux/rhashtable.h>
-#include "prestera.h"
-#include "prestera_hw.h"
#include "prestera_acl.h"
-#include "prestera_span.h"
+#include "prestera_flow.h"
+#include "prestera_hw.h"
+#include "prestera.h"
+
+#define ACL_KEYMASK_SIZE \
+ (sizeof(__be32) * __PRESTERA_ACL_RULE_MATCH_TYPE_MAX)
struct prestera_acl {
struct prestera_switch *sw;
+ struct list_head vtcam_list;
struct list_head rules;
+ struct rhashtable ruleset_ht;
+ struct rhashtable acl_rule_entry_ht;
+ struct idr uid;
+};
+
+struct prestera_acl_ruleset_ht_key {
+ struct prestera_flow_block *block;
+};
+
+struct prestera_acl_rule_entry {
+ struct rhash_head ht_node;
+ struct prestera_acl_rule_entry_key key;
+ u32 hw_id;
+ u32 vtcam_id;
+ struct {
+ struct {
+ u8 valid:1;
+ } accept, drop, trap;
+ struct {
+ u32 id;
+ struct prestera_counter_block *block;
+ } counter;
+ };
};
struct prestera_acl_ruleset {
+ struct rhash_head ht_node; /* Member of acl HT */
+ struct prestera_acl_ruleset_ht_key ht_key;
struct rhashtable rule_ht;
- struct prestera_switch *sw;
- u16 id;
+ struct prestera_acl *acl;
+ unsigned long rule_count;
+ refcount_t refcount;
+ void *keymask;
+ u32 vtcam_id;
+ u16 pcl_id;
+ bool offload;
};
-struct prestera_acl_rule {
- struct rhash_head ht_node;
+struct prestera_acl_vtcam {
struct list_head list;
- struct list_head match_list;
- struct list_head action_list;
- struct prestera_flow_block *block;
- unsigned long cookie;
- u32 priority;
- u8 n_actions;
- u8 n_matches;
+ __be32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ refcount_t refcount;
u32 id;
+ bool is_keymask_set;
+ u8 lookup;
+};
+
+static const struct rhashtable_params prestera_acl_ruleset_ht_params = {
+ .key_len = sizeof(struct prestera_acl_ruleset_ht_key),
+ .key_offset = offsetof(struct prestera_acl_ruleset, ht_key),
+ .head_offset = offsetof(struct prestera_acl_ruleset, ht_node),
+ .automatic_shrinking = true,
};
static const struct rhashtable_params prestera_acl_rule_ht_params = {
@@ -39,28 +76,48 @@ static const struct rhashtable_params prestera_acl_rule_ht_params = {
.automatic_shrinking = true,
};
+static const struct rhashtable_params __prestera_acl_rule_entry_ht_params = {
+ .key_offset = offsetof(struct prestera_acl_rule_entry, key),
+ .head_offset = offsetof(struct prestera_acl_rule_entry, ht_node),
+ .key_len = sizeof(struct prestera_acl_rule_entry_key),
+ .automatic_shrinking = true,
+};
+
static struct prestera_acl_ruleset *
-prestera_acl_ruleset_create(struct prestera_switch *sw)
+prestera_acl_ruleset_create(struct prestera_acl *acl,
+ struct prestera_flow_block *block)
{
struct prestera_acl_ruleset *ruleset;
+ u32 uid = 0;
int err;
ruleset = kzalloc(sizeof(*ruleset), GFP_KERNEL);
if (!ruleset)
return ERR_PTR(-ENOMEM);
+ ruleset->acl = acl;
+ ruleset->ht_key.block = block;
+ refcount_set(&ruleset->refcount, 1);
+
err = rhashtable_init(&ruleset->rule_ht, &prestera_acl_rule_ht_params);
if (err)
goto err_rhashtable_init;
- err = prestera_hw_acl_ruleset_create(sw, &ruleset->id);
+ err = idr_alloc_u32(&acl->uid, NULL, &uid, U8_MAX, GFP_KERNEL);
if (err)
goto err_ruleset_create;
- ruleset->sw = sw;
+ /* make pcl-id based on uid */
+ ruleset->pcl_id = (u8)uid;
+ err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
+ prestera_acl_ruleset_ht_params);
+ if (err)
+ goto err_ruleset_ht_insert;
return ruleset;
+err_ruleset_ht_insert:
+ idr_remove(&acl->uid, uid);
err_ruleset_create:
rhashtable_destroy(&ruleset->rule_ht);
err_rhashtable_init:
@@ -68,117 +125,164 @@ err_rhashtable_init:
return ERR_PTR(err);
}
-static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset)
+void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
+ void *keymask)
{
- prestera_hw_acl_ruleset_del(ruleset->sw, ruleset->id);
- rhashtable_destroy(&ruleset->rule_ht);
- kfree(ruleset);
+ ruleset->keymask = kmemdup(keymask, ACL_KEYMASK_SIZE, GFP_KERNEL);
}
-struct prestera_flow_block *
-prestera_acl_block_create(struct prestera_switch *sw, struct net *net)
+int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset)
{
- struct prestera_flow_block *block;
+ u32 vtcam_id;
+ int err;
- block = kzalloc(sizeof(*block), GFP_KERNEL);
- if (!block)
- return NULL;
- INIT_LIST_HEAD(&block->binding_list);
- block->net = net;
- block->sw = sw;
-
- block->ruleset = prestera_acl_ruleset_create(sw);
- if (IS_ERR(block->ruleset)) {
- kfree(block);
- return NULL;
- }
+ if (ruleset->offload)
+ return -EEXIST;
+
+ err = prestera_acl_vtcam_id_get(ruleset->acl, 0,
+ ruleset->keymask, &vtcam_id);
+ if (err)
+ return err;
- return block;
+ ruleset->vtcam_id = vtcam_id;
+ ruleset->offload = true;
+ return 0;
}
-void prestera_acl_block_destroy(struct prestera_flow_block *block)
+static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset)
{
- prestera_acl_ruleset_destroy(block->ruleset);
- WARN_ON(!list_empty(&block->binding_list));
- kfree(block);
+ struct prestera_acl *acl = ruleset->acl;
+ u8 uid = ruleset->pcl_id & PRESTERA_ACL_KEYMASK_PCL_ID_USER;
+
+ rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
+ prestera_acl_ruleset_ht_params);
+
+ if (ruleset->offload)
+ WARN_ON(prestera_acl_vtcam_id_put(acl, ruleset->vtcam_id));
+
+ idr_remove(&acl->uid, uid);
+
+ rhashtable_destroy(&ruleset->rule_ht);
+ kfree(ruleset->keymask);
+ kfree(ruleset);
}
-static struct prestera_flow_block_binding *
-prestera_acl_block_lookup(struct prestera_flow_block *block,
- struct prestera_port *port)
+static struct prestera_acl_ruleset *
+__prestera_acl_ruleset_lookup(struct prestera_acl *acl,
+ struct prestera_flow_block *block)
{
- struct prestera_flow_block_binding *binding;
+ struct prestera_acl_ruleset_ht_key ht_key;
- list_for_each_entry(binding, &block->binding_list, list)
- if (binding->port == port)
- return binding;
-
- return NULL;
+ memset(&ht_key, 0, sizeof(ht_key));
+ ht_key.block = block;
+ return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
+ prestera_acl_ruleset_ht_params);
}
-int prestera_acl_block_bind(struct prestera_flow_block *block,
- struct prestera_port *port)
+struct prestera_acl_ruleset *
+prestera_acl_ruleset_lookup(struct prestera_acl *acl,
+ struct prestera_flow_block *block)
{
- struct prestera_flow_block_binding *binding;
- int err;
+ struct prestera_acl_ruleset *ruleset;
- if (WARN_ON(prestera_acl_block_lookup(block, port)))
- return -EEXIST;
+ ruleset = __prestera_acl_ruleset_lookup(acl, block);
+ if (!ruleset)
+ return ERR_PTR(-ENOENT);
- binding = kzalloc(sizeof(*binding), GFP_KERNEL);
- if (!binding)
- return -ENOMEM;
- binding->span_id = PRESTERA_SPAN_INVALID_ID;
- binding->port = port;
+ refcount_inc(&ruleset->refcount);
+ return ruleset;
+}
- err = prestera_hw_acl_port_bind(port, block->ruleset->id);
- if (err)
- goto err_rules_bind;
+struct prestera_acl_ruleset *
+prestera_acl_ruleset_get(struct prestera_acl *acl,
+ struct prestera_flow_block *block)
+{
+ struct prestera_acl_ruleset *ruleset;
- list_add(&binding->list, &block->binding_list);
- return 0;
+ ruleset = __prestera_acl_ruleset_lookup(acl, block);
+ if (ruleset) {
+ refcount_inc(&ruleset->refcount);
+ return ruleset;
+ }
-err_rules_bind:
- kfree(binding);
- return err;
+ return prestera_acl_ruleset_create(acl, block);
}
-int prestera_acl_block_unbind(struct prestera_flow_block *block,
- struct prestera_port *port)
+void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset)
{
- struct prestera_flow_block_binding *binding;
+ if (!refcount_dec_and_test(&ruleset->refcount))
+ return;
- binding = prestera_acl_block_lookup(block, port);
- if (!binding)
- return -ENOENT;
-
- list_del(&binding->list);
+ prestera_acl_ruleset_destroy(ruleset);
+}
- prestera_hw_acl_port_unbind(port, block->ruleset->id);
+int prestera_acl_ruleset_bind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_port *port)
+{
+ struct prestera_acl_iface iface = {
+ .type = PRESTERA_ACL_IFACE_TYPE_PORT,
+ .port = port
+ };
- kfree(binding);
- return 0;
+ return prestera_hw_vtcam_iface_bind(port->sw, &iface, ruleset->vtcam_id,
+ ruleset->pcl_id);
}
-struct prestera_acl_ruleset *
-prestera_acl_block_ruleset_get(struct prestera_flow_block *block)
+int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_port *port)
{
- return block->ruleset;
+ struct prestera_acl_iface iface = {
+ .type = PRESTERA_ACL_IFACE_TYPE_PORT,
+ .port = port
+ };
+
+ return prestera_hw_vtcam_iface_unbind(port->sw, &iface,
+ ruleset->vtcam_id);
}
-u16 prestera_acl_rule_ruleset_id_get(const struct prestera_acl_rule *rule)
+static int prestera_acl_ruleset_block_bind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_flow_block *block)
{
- return rule->block->ruleset->id;
+ struct prestera_flow_block_binding *binding;
+ int err;
+
+ block->ruleset_zero = ruleset;
+ list_for_each_entry(binding, &block->binding_list, list) {
+ err = prestera_acl_ruleset_bind(ruleset, binding->port);
+ if (err)
+ goto rollback;
+ }
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(binding, &block->binding_list,
+ list)
+ err = prestera_acl_ruleset_unbind(ruleset, binding->port);
+ block->ruleset_zero = NULL;
+
+ return err;
}
-struct net *prestera_acl_block_net(struct prestera_flow_block *block)
+static void
+prestera_acl_ruleset_block_unbind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_flow_block *block)
{
- return block->net;
+ struct prestera_flow_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ prestera_acl_ruleset_unbind(ruleset, binding->port);
+ block->ruleset_zero = NULL;
}
-struct prestera_switch *prestera_acl_block_sw(struct prestera_flow_block *block)
+void
+prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule, u16 pcl_id)
{
- return block->sw;
+ struct prestera_acl_match *r_match = &rule->re_key.match;
+ __be16 pcl_id_mask = htons(PRESTERA_ACL_KEYMASK_PCL_ID);
+ __be16 pcl_id_key = htons(pcl_id);
+
+ rule_match_set(r_match->key, PCL_ID, pcl_id_key);
+ rule_match_set(r_match->mask, PCL_ID, pcl_id_mask);
}
struct prestera_acl_rule *
@@ -189,8 +293,13 @@ prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset,
prestera_acl_rule_ht_params);
}
+bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset)
+{
+ return ruleset->offload;
+}
+
struct prestera_acl_rule *
-prestera_acl_rule_create(struct prestera_flow_block *block,
+prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset,
unsigned long cookie)
{
struct prestera_acl_rule *rule;
@@ -199,178 +308,436 @@ prestera_acl_rule_create(struct prestera_flow_block *block,
if (!rule)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&rule->match_list);
- INIT_LIST_HEAD(&rule->action_list);
+ rule->ruleset = ruleset;
rule->cookie = cookie;
- rule->block = block;
+
+ refcount_inc(&ruleset->refcount);
return rule;
}
-struct list_head *
-prestera_acl_rule_match_list_get(struct prestera_acl_rule *rule)
+void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule,
+ u32 priority)
{
- return &rule->match_list;
+ rule->priority = priority;
}
-struct list_head *
-prestera_acl_rule_action_list_get(struct prestera_acl_rule *rule)
+void prestera_acl_rule_destroy(struct prestera_acl_rule *rule)
{
- return &rule->action_list;
+ prestera_acl_ruleset_put(rule->ruleset);
+ kfree(rule);
}
-int prestera_acl_rule_action_add(struct prestera_acl_rule *rule,
- struct prestera_acl_rule_action_entry *entry)
+int prestera_acl_rule_add(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule)
{
- struct prestera_acl_rule_action_entry *a_entry;
+ int err;
+ struct prestera_acl_ruleset *ruleset = rule->ruleset;
+ struct prestera_flow_block *block = ruleset->ht_key.block;
- a_entry = kmalloc(sizeof(*a_entry), GFP_KERNEL);
- if (!a_entry)
- return -ENOMEM;
+ /* try to add rule to hash table first */
+ err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
+ prestera_acl_rule_ht_params);
+ if (err)
+ goto err_ht_insert;
- memcpy(a_entry, entry, sizeof(*entry));
- list_add(&a_entry->list, &rule->action_list);
+ prestera_acl_rule_keymask_pcl_id_set(rule, ruleset->pcl_id);
+ rule->re_arg.vtcam_id = ruleset->vtcam_id;
+ rule->re_key.prio = rule->priority;
- rule->n_actions++;
+ /* setup counter */
+ rule->re_arg.count.valid = true;
+ rule->re_arg.count.client = PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0;
+
+ rule->re = prestera_acl_rule_entry_find(sw->acl, &rule->re_key);
+ err = WARN_ON(rule->re) ? -EEXIST : 0;
+ if (err)
+ goto err_rule_add;
+
+ rule->re = prestera_acl_rule_entry_create(sw->acl, &rule->re_key,
+ &rule->re_arg);
+ err = !rule->re ? -EINVAL : 0;
+ if (err)
+ goto err_rule_add;
+
+ /* bind the block (all ports) to chain index 0 */
+ if (!ruleset->rule_count) {
+ err = prestera_acl_ruleset_block_bind(ruleset, block);
+ if (err)
+ goto err_acl_block_bind;
+ }
+
+ list_add_tail(&rule->list, &sw->acl->rules);
+ ruleset->rule_count++;
return 0;
+
+err_acl_block_bind:
+ prestera_acl_rule_entry_destroy(sw->acl, rule->re);
+err_rule_add:
+ rule->re = NULL;
+ rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
+ prestera_acl_rule_ht_params);
+err_ht_insert:
+ return err;
}
-u8 prestera_acl_rule_action_len(struct prestera_acl_rule *rule)
+void prestera_acl_rule_del(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule)
{
- return rule->n_actions;
+ struct prestera_acl_ruleset *ruleset = rule->ruleset;
+ struct prestera_flow_block *block = ruleset->ht_key.block;
+
+ rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
+ prestera_acl_rule_ht_params);
+ ruleset->rule_count--;
+ list_del(&rule->list);
+
+ prestera_acl_rule_entry_destroy(sw->acl, rule->re);
+
+ /* unbind block (all ports) */
+ if (!ruleset->rule_count)
+ prestera_acl_ruleset_block_unbind(ruleset, block);
}
-u32 prestera_acl_rule_priority_get(struct prestera_acl_rule *rule)
+int prestera_acl_rule_get_stats(struct prestera_acl *acl,
+ struct prestera_acl_rule *rule,
+ u64 *packets, u64 *bytes, u64 *last_use)
{
- return rule->priority;
+ u64 current_packets;
+ u64 current_bytes;
+ int err;
+
+ err = prestera_counter_stats_get(acl->sw->counter,
+ rule->re->counter.block,
+ rule->re->counter.id,
+ &current_packets, &current_bytes);
+ if (err)
+ return err;
+
+ *packets = current_packets;
+ *bytes = current_bytes;
+ *last_use = jiffies;
+
+ return 0;
}
-void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule,
- u32 priority)
+struct prestera_acl_rule_entry *
+prestera_acl_rule_entry_find(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry_key *key)
{
- rule->priority = priority;
+ return rhashtable_lookup_fast(&acl->acl_rule_entry_ht, key,
+ __prestera_acl_rule_entry_ht_params);
}
-int prestera_acl_rule_match_add(struct prestera_acl_rule *rule,
- struct prestera_acl_rule_match_entry *entry)
+static int __prestera_acl_rule_entry2hw_del(struct prestera_switch *sw,
+ struct prestera_acl_rule_entry *e)
{
- struct prestera_acl_rule_match_entry *m_entry;
+ return prestera_hw_vtcam_rule_del(sw, e->vtcam_id, e->hw_id);
+}
- m_entry = kmalloc(sizeof(*m_entry), GFP_KERNEL);
- if (!m_entry)
- return -ENOMEM;
+static int __prestera_acl_rule_entry2hw_add(struct prestera_switch *sw,
+ struct prestera_acl_rule_entry *e)
+{
+ struct prestera_acl_hw_action_info act_hw[PRESTERA_ACL_RULE_ACTION_MAX];
+ int act_num;
- memcpy(m_entry, entry, sizeof(*entry));
- list_add(&m_entry->list, &rule->match_list);
+ memset(&act_hw, 0, sizeof(act_hw));
+ act_num = 0;
- rule->n_matches++;
- return 0;
+ /* accept */
+ if (e->accept.valid) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_ACCEPT;
+ act_num++;
+ }
+ /* drop */
+ if (e->drop.valid) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_DROP;
+ act_num++;
+ }
+ /* trap */
+ if (e->trap.valid) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_TRAP;
+ act_num++;
+ }
+ /* counter */
+ if (e->counter.block) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_COUNT;
+ act_hw[act_num].count.id = e->counter.id;
+ act_num++;
+ }
+
+ return prestera_hw_vtcam_rule_add(sw, e->vtcam_id, e->key.prio,
+ e->key.match.key, e->key.match.mask,
+ act_hw, act_num, &e->hw_id);
}
-u8 prestera_acl_rule_match_len(struct prestera_acl_rule *rule)
+static void
+__prestera_acl_rule_entry_act_destruct(struct prestera_switch *sw,
+ struct prestera_acl_rule_entry *e)
{
- return rule->n_matches;
+ /* counter */
+ prestera_counter_put(sw->counter, e->counter.block, e->counter.id);
}
-void prestera_acl_rule_destroy(struct prestera_acl_rule *rule)
+void prestera_acl_rule_entry_destroy(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry *e)
{
- struct prestera_acl_rule_action_entry *a_entry;
- struct prestera_acl_rule_match_entry *m_entry;
- struct list_head *pos, *n;
+ int ret;
- list_for_each_safe(pos, n, &rule->match_list) {
- m_entry = list_entry(pos, typeof(*m_entry), list);
- list_del(pos);
- kfree(m_entry);
- }
+ rhashtable_remove_fast(&acl->acl_rule_entry_ht, &e->ht_node,
+ __prestera_acl_rule_entry_ht_params);
+
+ ret = __prestera_acl_rule_entry2hw_del(acl->sw, e);
+ WARN_ON(ret && ret != -ENODEV);
- list_for_each_safe(pos, n, &rule->action_list) {
- a_entry = list_entry(pos, typeof(*a_entry), list);
- list_del(pos);
- kfree(a_entry);
+ __prestera_acl_rule_entry_act_destruct(acl->sw, e);
+ kfree(e);
+}
+
+static int
+__prestera_acl_rule_entry_act_construct(struct prestera_switch *sw,
+ struct prestera_acl_rule_entry *e,
+ struct prestera_acl_rule_entry_arg *arg)
+{
+ /* accept */
+ e->accept.valid = arg->accept.valid;
+ /* drop */
+ e->drop.valid = arg->drop.valid;
+ /* trap */
+ e->trap.valid = arg->trap.valid;
+ /* counter */
+ if (arg->count.valid) {
+ int err;
+
+ err = prestera_counter_get(sw->counter, arg->count.client,
+ &e->counter.block,
+ &e->counter.id);
+ if (err)
+ goto err_out;
}
- kfree(rule);
+ return 0;
+
+err_out:
+ __prestera_acl_rule_entry_act_destruct(sw, e);
+ return -EINVAL;
}
-int prestera_acl_rule_add(struct prestera_switch *sw,
- struct prestera_acl_rule *rule)
+struct prestera_acl_rule_entry *
+prestera_acl_rule_entry_create(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry_key *key,
+ struct prestera_acl_rule_entry_arg *arg)
{
- u32 rule_id;
+ struct prestera_acl_rule_entry *e;
int err;
- /* try to add rule to hash table first */
- err = rhashtable_insert_fast(&rule->block->ruleset->rule_ht,
- &rule->ht_node,
- prestera_acl_rule_ht_params);
- if (err)
- return err;
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ goto err_kzalloc;
- /* add rule to hw */
- err = prestera_hw_acl_rule_add(sw, rule, &rule_id);
+ memcpy(&e->key, key, sizeof(*key));
+ e->vtcam_id = arg->vtcam_id;
+ err = __prestera_acl_rule_entry_act_construct(acl->sw, e, arg);
if (err)
- goto err_rule_add;
+ goto err_act_construct;
- rule->id = rule_id;
+ err = __prestera_acl_rule_entry2hw_add(acl->sw, e);
+ if (err)
+ goto err_hw_add;
- list_add_tail(&rule->list, &sw->acl->rules);
+ err = rhashtable_insert_fast(&acl->acl_rule_entry_ht, &e->ht_node,
+ __prestera_acl_rule_entry_ht_params);
+ if (err)
+ goto err_ht_insert;
- return 0;
+ return e;
-err_rule_add:
- rhashtable_remove_fast(&rule->block->ruleset->rule_ht, &rule->ht_node,
- prestera_acl_rule_ht_params);
- return err;
+err_ht_insert:
+ WARN_ON(__prestera_acl_rule_entry2hw_del(acl->sw, e));
+err_hw_add:
+ __prestera_acl_rule_entry_act_destruct(acl->sw, e);
+err_act_construct:
+ kfree(e);
+err_kzalloc:
+ return NULL;
}
-void prestera_acl_rule_del(struct prestera_switch *sw,
- struct prestera_acl_rule *rule)
+static int __prestera_acl_vtcam_id_try_fit(struct prestera_acl *acl, u8 lookup,
+ void *keymask, u32 *vtcam_id)
{
- rhashtable_remove_fast(&rule->block->ruleset->rule_ht, &rule->ht_node,
- prestera_acl_rule_ht_params);
- list_del(&rule->list);
- prestera_hw_acl_rule_del(sw, rule->id);
+ struct prestera_acl_vtcam *vtcam;
+ int i;
+
+ list_for_each_entry(vtcam, &acl->vtcam_list, list) {
+ if (lookup != vtcam->lookup)
+ continue;
+
+ if (!keymask && !vtcam->is_keymask_set)
+ goto vtcam_found;
+
+ if (!(keymask && vtcam->is_keymask_set))
+ continue;
+
+ /* try to fit with vtcam keymask */
+ for (i = 0; i < __PRESTERA_ACL_RULE_MATCH_TYPE_MAX; i++) {
+ __be32 __keymask = ((__be32 *)keymask)[i];
+
+ if (!__keymask)
+ /* vtcam keymask in not interested */
+ continue;
+
+ if (__keymask & ~vtcam->keymask[i])
+ /* keymask does not fit the vtcam keymask */
+ break;
+ }
+
+ if (i == __PRESTERA_ACL_RULE_MATCH_TYPE_MAX)
+ /* keymask fits vtcam keymask, return it */
+ goto vtcam_found;
+ }
+
+ /* nothing is found */
+ return -ENOENT;
+
+vtcam_found:
+ refcount_inc(&vtcam->refcount);
+ *vtcam_id = vtcam->id;
+ return 0;
}
-int prestera_acl_rule_get_stats(struct prestera_switch *sw,
- struct prestera_acl_rule *rule,
- u64 *packets, u64 *bytes, u64 *last_use)
+int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
+ void *keymask, u32 *vtcam_id)
{
- u64 current_packets;
- u64 current_bytes;
+ struct prestera_acl_vtcam *vtcam;
+ u32 new_vtcam_id;
int err;
- err = prestera_hw_acl_rule_stats_get(sw, rule->id, &current_packets,
- &current_bytes);
- if (err)
- return err;
+ /* find the vtcam that suits keymask. We do not expect to have
+ * a big number of vtcams, so, the list type for vtcam list is
+ * fine for now
+ */
+ list_for_each_entry(vtcam, &acl->vtcam_list, list) {
+ if (lookup != vtcam->lookup)
+ continue;
+
+ if (!keymask && !vtcam->is_keymask_set) {
+ refcount_inc(&vtcam->refcount);
+ goto vtcam_found;
+ }
+
+ if (keymask && vtcam->is_keymask_set &&
+ !memcmp(keymask, vtcam->keymask, sizeof(vtcam->keymask))) {
+ refcount_inc(&vtcam->refcount);
+ goto vtcam_found;
+ }
+ }
- *packets = current_packets;
- *bytes = current_bytes;
- *last_use = jiffies;
+ /* vtcam not found, try to create new one */
+ vtcam = kzalloc(sizeof(*vtcam), GFP_KERNEL);
+ if (!vtcam)
+ return -ENOMEM;
+
+ err = prestera_hw_vtcam_create(acl->sw, lookup, keymask, &new_vtcam_id,
+ PRESTERA_HW_VTCAM_DIR_INGRESS);
+ if (err) {
+ kfree(vtcam);
+
+ /* cannot create new, try to fit into existing vtcam */
+ if (__prestera_acl_vtcam_id_try_fit(acl, lookup,
+ keymask, &new_vtcam_id))
+ return err;
+ *vtcam_id = new_vtcam_id;
+ return 0;
+ }
+
+ vtcam->id = new_vtcam_id;
+ vtcam->lookup = lookup;
+ if (keymask) {
+ memcpy(vtcam->keymask, keymask, sizeof(vtcam->keymask));
+ vtcam->is_keymask_set = true;
+ }
+ refcount_set(&vtcam->refcount, 1);
+ list_add_rcu(&vtcam->list, &acl->vtcam_list);
+
+vtcam_found:
+ *vtcam_id = vtcam->id;
return 0;
}
+int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id)
+{
+ struct prestera_acl_vtcam *vtcam;
+ int err;
+
+ list_for_each_entry(vtcam, &acl->vtcam_list, list) {
+ if (vtcam_id != vtcam->id)
+ continue;
+
+ if (!refcount_dec_and_test(&vtcam->refcount))
+ return 0;
+
+ err = prestera_hw_vtcam_destroy(acl->sw, vtcam->id);
+ if (err && err != -ENODEV) {
+ refcount_set(&vtcam->refcount, 1);
+ return err;
+ }
+
+ list_del(&vtcam->list);
+ kfree(vtcam);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
int prestera_acl_init(struct prestera_switch *sw)
{
struct prestera_acl *acl;
+ int err;
acl = kzalloc(sizeof(*acl), GFP_KERNEL);
if (!acl)
return -ENOMEM;
+ acl->sw = sw;
INIT_LIST_HEAD(&acl->rules);
+ INIT_LIST_HEAD(&acl->vtcam_list);
+ idr_init(&acl->uid);
+
+ err = rhashtable_init(&acl->acl_rule_entry_ht,
+ &__prestera_acl_rule_entry_ht_params);
+ if (err)
+ goto err_acl_rule_entry_ht_init;
+
+ err = rhashtable_init(&acl->ruleset_ht,
+ &prestera_acl_ruleset_ht_params);
+ if (err)
+ goto err_ruleset_ht_init;
+
sw->acl = acl;
- acl->sw = sw;
return 0;
+
+err_ruleset_ht_init:
+ rhashtable_destroy(&acl->acl_rule_entry_ht);
+err_acl_rule_entry_ht_init:
+ kfree(acl);
+ return err;
}
void prestera_acl_fini(struct prestera_switch *sw)
{
struct prestera_acl *acl = sw->acl;
+ WARN_ON(!idr_is_empty(&acl->uid));
+ idr_destroy(&acl->uid);
+
+ WARN_ON(!list_empty(&acl->vtcam_list));
WARN_ON(!list_empty(&acl->rules));
+
+ rhashtable_destroy(&acl->ruleset_ht);
+ rhashtable_destroy(&acl->acl_rule_entry_ht);
+
kfree(acl);
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
index 39b7869be659..40f6c1d961fa 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
@@ -1,114 +1,130 @@
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
-/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */
+/* Copyright (c) 2020-2021 Marvell International Ltd. All rights reserved. */
#ifndef _PRESTERA_ACL_H_
#define _PRESTERA_ACL_H_
-enum prestera_acl_rule_match_entry_type {
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE = 1,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_SRC,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_DST,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE
+#include <linux/types.h>
+#include "prestera_counter.h"
+
+#define PRESTERA_ACL_KEYMASK_PCL_ID 0x3FF
+#define PRESTERA_ACL_KEYMASK_PCL_ID_USER \
+ (PRESTERA_ACL_KEYMASK_PCL_ID & 0x00FF)
+
+#define rule_match_set_n(match_p, type, val_p, size) \
+ memcpy(&(match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type], \
+ val_p, size)
+#define rule_match_set(match_p, type, val) \
+ memcpy(&(match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type], \
+ &(val), sizeof(val))
+
+enum prestera_acl_match_type {
+ PRESTERA_ACL_RULE_MATCH_TYPE_PCL_ID,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ETH_TYPE,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ETH_DMAC_0,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ETH_DMAC_1,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ETH_SMAC_0,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ETH_SMAC_1,
+ PRESTERA_ACL_RULE_MATCH_TYPE_IP_PROTO,
+ PRESTERA_ACL_RULE_MATCH_TYPE_SYS_PORT,
+ PRESTERA_ACL_RULE_MATCH_TYPE_SYS_DEV,
+ PRESTERA_ACL_RULE_MATCH_TYPE_IP_SRC,
+ PRESTERA_ACL_RULE_MATCH_TYPE_IP_DST,
+ PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_SRC,
+ PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_DST,
+ PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_RANGE_SRC,
+ PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_RANGE_DST,
+ PRESTERA_ACL_RULE_MATCH_TYPE_VLAN_ID,
+ PRESTERA_ACL_RULE_MATCH_TYPE_VLAN_TPID,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ICMP_TYPE,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ICMP_CODE,
+
+ __PRESTERA_ACL_RULE_MATCH_TYPE_MAX
};
enum prestera_acl_rule_action {
- PRESTERA_ACL_RULE_ACTION_ACCEPT,
- PRESTERA_ACL_RULE_ACTION_DROP,
- PRESTERA_ACL_RULE_ACTION_TRAP
+ PRESTERA_ACL_RULE_ACTION_ACCEPT = 0,
+ PRESTERA_ACL_RULE_ACTION_DROP = 1,
+ PRESTERA_ACL_RULE_ACTION_TRAP = 2,
+ PRESTERA_ACL_RULE_ACTION_COUNT = 7,
+
+ PRESTERA_ACL_RULE_ACTION_MAX
};
-struct prestera_switch;
-struct prestera_port;
-struct prestera_acl_rule;
-struct prestera_acl_ruleset;
+enum {
+ PRESTERA_ACL_IFACE_TYPE_PORT,
+ PRESTERA_ACL_IFACE_TYPE_INDEX
+};
-struct prestera_flow_block_binding {
- struct list_head list;
- struct prestera_port *port;
- int span_id;
+struct prestera_acl_match {
+ __be32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ __be32 mask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
};
-struct prestera_flow_block {
- struct list_head binding_list;
- struct prestera_switch *sw;
- struct net *net;
- struct prestera_acl_ruleset *ruleset;
- struct flow_block_cb *block_cb;
+struct prestera_acl_action_count {
+ u32 id;
};
-struct prestera_acl_rule_action_entry {
- struct list_head list;
- enum prestera_acl_rule_action id;
+struct prestera_acl_rule_entry_key {
+ u32 prio;
+ struct prestera_acl_match match;
};
-struct prestera_acl_rule_match_entry {
- struct list_head list;
- enum prestera_acl_rule_match_entry_type type;
+struct prestera_acl_hw_action_info {
+ enum prestera_acl_rule_action id;
union {
+ struct prestera_acl_action_count count;
+ };
+};
+
+/* This struct (arg) used only to be passed as parameter for
+ * acl_rule_entry_create. Must be flat. Can contain object keys, which will be
+ * resolved to object links, before saving to acl_rule_entry struct
+ */
+struct prestera_acl_rule_entry_arg {
+ u32 vtcam_id;
+ struct {
struct {
- u8 key;
- u8 mask;
- } u8;
- struct {
- u16 key;
- u16 mask;
- } u16;
- struct {
- u32 key;
- u32 mask;
- } u32;
- struct {
- u64 key;
- u64 mask;
- } u64;
+ u8 valid:1;
+ } accept, drop, trap;
struct {
- u8 key[ETH_ALEN];
- u8 mask[ETH_ALEN];
- } mac;
- } keymask;
+ u8 valid:1;
+ u32 client;
+ } count;
+ };
+};
+
+struct prestera_acl_rule {
+ struct rhash_head ht_node; /* Member of acl HT */
+ struct list_head list;
+ struct prestera_acl_ruleset *ruleset;
+ unsigned long cookie;
+ u32 priority;
+ struct prestera_acl_rule_entry_key re_key;
+ struct prestera_acl_rule_entry_arg re_arg;
+ struct prestera_acl_rule_entry *re;
};
+struct prestera_acl_iface {
+ union {
+ struct prestera_port *port;
+ u32 index;
+ };
+ u8 type;
+};
+
+struct prestera_acl;
+struct prestera_switch;
+struct prestera_flow_block;
+
int prestera_acl_init(struct prestera_switch *sw);
void prestera_acl_fini(struct prestera_switch *sw);
-struct prestera_flow_block *
-prestera_acl_block_create(struct prestera_switch *sw, struct net *net);
-void prestera_acl_block_destroy(struct prestera_flow_block *block);
-struct net *prestera_acl_block_net(struct prestera_flow_block *block);
-struct prestera_switch *prestera_acl_block_sw(struct prestera_flow_block *block);
-int prestera_acl_block_bind(struct prestera_flow_block *block,
- struct prestera_port *port);
-int prestera_acl_block_unbind(struct prestera_flow_block *block,
- struct prestera_port *port);
-struct prestera_acl_ruleset *
-prestera_acl_block_ruleset_get(struct prestera_flow_block *block);
+
struct prestera_acl_rule *
-prestera_acl_rule_create(struct prestera_flow_block *block,
+prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset,
unsigned long cookie);
-u32 prestera_acl_rule_priority_get(struct prestera_acl_rule *rule);
void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule,
u32 priority);
-u16 prestera_acl_rule_ruleset_id_get(const struct prestera_acl_rule *rule);
-struct list_head *
-prestera_acl_rule_action_list_get(struct prestera_acl_rule *rule);
-u8 prestera_acl_rule_action_len(struct prestera_acl_rule *rule);
-u8 prestera_acl_rule_match_len(struct prestera_acl_rule *rule);
-int prestera_acl_rule_action_add(struct prestera_acl_rule *rule,
- struct prestera_acl_rule_action_entry *entry);
-struct list_head *
-prestera_acl_rule_match_list_get(struct prestera_acl_rule *rule);
-int prestera_acl_rule_match_add(struct prestera_acl_rule *rule,
- struct prestera_acl_rule_match_entry *entry);
void prestera_acl_rule_destroy(struct prestera_acl_rule *rule);
struct prestera_acl_rule *
prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset,
@@ -117,8 +133,39 @@ int prestera_acl_rule_add(struct prestera_switch *sw,
struct prestera_acl_rule *rule);
void prestera_acl_rule_del(struct prestera_switch *sw,
struct prestera_acl_rule *rule);
-int prestera_acl_rule_get_stats(struct prestera_switch *sw,
+int prestera_acl_rule_get_stats(struct prestera_acl *acl,
struct prestera_acl_rule *rule,
u64 *packets, u64 *bytes, u64 *last_use);
+struct prestera_acl_rule_entry *
+prestera_acl_rule_entry_find(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry_key *key);
+void prestera_acl_rule_entry_destroy(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry *e);
+struct prestera_acl_rule_entry *
+prestera_acl_rule_entry_create(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry_key *key,
+ struct prestera_acl_rule_entry_arg *arg);
+struct prestera_acl_ruleset *
+prestera_acl_ruleset_get(struct prestera_acl *acl,
+ struct prestera_flow_block *block);
+struct prestera_acl_ruleset *
+prestera_acl_ruleset_lookup(struct prestera_acl *acl,
+ struct prestera_flow_block *block);
+void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
+ void *keymask);
+bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset);
+int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset);
+void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset);
+int prestera_acl_ruleset_bind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_port *port);
+int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_port *port);
+void
+prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule,
+ u16 pcl_id);
+
+int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
+ void *keymask, u32 *vtcam_id);
+int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id);
#endif /* _PRESTERA_ACL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.c b/drivers/net/ethernet/marvell/prestera/prestera_counter.c
new file mode 100644
index 000000000000..4cd53a2dae46
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.c
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2021 Marvell International Ltd. All rights reserved */
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_acl.h"
+#include "prestera_counter.h"
+
+#define COUNTER_POLL_TIME (msecs_to_jiffies(1000))
+#define COUNTER_RESCHED_TIME (msecs_to_jiffies(50))
+#define COUNTER_BULK_SIZE (256)
+
+struct prestera_counter {
+ struct prestera_switch *sw;
+ struct delayed_work stats_dw;
+ struct mutex mtx; /* protect block_list */
+ struct prestera_counter_block **block_list;
+ u32 total_read;
+ u32 block_list_len;
+ u32 curr_idx;
+ bool is_fetching;
+};
+
+struct prestera_counter_block {
+ struct list_head list;
+ u32 id;
+ u32 offset;
+ u32 num_counters;
+ u32 client;
+ struct idr counter_idr;
+ refcount_t refcnt;
+ struct mutex mtx; /* protect stats and counter_idr */
+ struct prestera_counter_stats *stats;
+ u8 *counter_flag;
+ bool is_updating;
+ bool full;
+};
+
+enum {
+ COUNTER_FLAG_READY = 0,
+ COUNTER_FLAG_INVALID = 1
+};
+
+static bool
+prestera_counter_is_ready(struct prestera_counter_block *block, u32 id)
+{
+ return block->counter_flag[id - block->offset] == COUNTER_FLAG_READY;
+}
+
+static void prestera_counter_lock(struct prestera_counter *counter)
+{
+ mutex_lock(&counter->mtx);
+}
+
+static void prestera_counter_unlock(struct prestera_counter *counter)
+{
+ mutex_unlock(&counter->mtx);
+}
+
+static void prestera_counter_block_lock(struct prestera_counter_block *block)
+{
+ mutex_lock(&block->mtx);
+}
+
+static void prestera_counter_block_unlock(struct prestera_counter_block *block)
+{
+ mutex_unlock(&block->mtx);
+}
+
+static bool prestera_counter_block_incref(struct prestera_counter_block *block)
+{
+ return refcount_inc_not_zero(&block->refcnt);
+}
+
+static bool prestera_counter_block_decref(struct prestera_counter_block *block)
+{
+ return refcount_dec_and_test(&block->refcnt);
+}
+
+/* must be called with prestera_counter_block_lock() */
+static void prestera_counter_stats_clear(struct prestera_counter_block *block,
+ u32 counter_id)
+{
+ memset(&block->stats[counter_id - block->offset], 0,
+ sizeof(*block->stats));
+}
+
+static struct prestera_counter_block *
+prestera_counter_block_lookup_not_full(struct prestera_counter *counter,
+ u32 client)
+{
+ u32 i;
+
+ prestera_counter_lock(counter);
+ for (i = 0; i < counter->block_list_len; i++) {
+ if (counter->block_list[i] &&
+ counter->block_list[i]->client == client &&
+ !counter->block_list[i]->full &&
+ prestera_counter_block_incref(counter->block_list[i])) {
+ prestera_counter_unlock(counter);
+ return counter->block_list[i];
+ }
+ }
+ prestera_counter_unlock(counter);
+
+ return NULL;
+}
+
+static int prestera_counter_block_list_add(struct prestera_counter *counter,
+ struct prestera_counter_block *block)
+{
+ struct prestera_counter_block **arr;
+ u32 i;
+
+ prestera_counter_lock(counter);
+
+ for (i = 0; i < counter->block_list_len; i++) {
+ if (counter->block_list[i])
+ continue;
+
+ counter->block_list[i] = block;
+ prestera_counter_unlock(counter);
+ return 0;
+ }
+
+ arr = krealloc(counter->block_list, (counter->block_list_len + 1) *
+ sizeof(*counter->block_list), GFP_KERNEL);
+ if (!arr) {
+ prestera_counter_unlock(counter);
+ return -ENOMEM;
+ }
+
+ counter->block_list = arr;
+ counter->block_list[counter->block_list_len] = block;
+ counter->block_list_len++;
+ prestera_counter_unlock(counter);
+ return 0;
+}
+
+static struct prestera_counter_block *
+prestera_counter_block_get(struct prestera_counter *counter, u32 client)
+{
+ struct prestera_counter_block *block;
+ int err;
+
+ block = prestera_counter_block_lookup_not_full(counter, client);
+ if (block)
+ return block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return ERR_PTR(-ENOMEM);
+
+ err = prestera_hw_counter_block_get(counter->sw, client,
+ &block->id, &block->offset,
+ &block->num_counters);
+ if (err)
+ goto err_block;
+
+ block->stats = kcalloc(block->num_counters,
+ sizeof(*block->stats), GFP_KERNEL);
+ if (!block->stats) {
+ err = -ENOMEM;
+ goto err_stats;
+ }
+
+ block->counter_flag = kcalloc(block->num_counters,
+ sizeof(*block->counter_flag),
+ GFP_KERNEL);
+ if (!block->counter_flag) {
+ err = -ENOMEM;
+ goto err_flag;
+ }
+
+ block->client = client;
+ mutex_init(&block->mtx);
+ refcount_set(&block->refcnt, 1);
+ idr_init_base(&block->counter_idr, block->offset);
+
+ err = prestera_counter_block_list_add(counter, block);
+ if (err)
+ goto err_list_add;
+
+ return block;
+
+err_list_add:
+ idr_destroy(&block->counter_idr);
+ mutex_destroy(&block->mtx);
+ kfree(block->counter_flag);
+err_flag:
+ kfree(block->stats);
+err_stats:
+ prestera_hw_counter_block_release(counter->sw, block->id);
+err_block:
+ kfree(block);
+ return ERR_PTR(err);
+}
+
+static void prestera_counter_block_put(struct prestera_counter *counter,
+ struct prestera_counter_block *block)
+{
+ u32 i;
+
+ if (!prestera_counter_block_decref(block))
+ return;
+
+ prestera_counter_lock(counter);
+ for (i = 0; i < counter->block_list_len; i++) {
+ if (counter->block_list[i] &&
+ counter->block_list[i]->id == block->id) {
+ counter->block_list[i] = NULL;
+ break;
+ }
+ }
+ prestera_counter_unlock(counter);
+
+ WARN_ON(!idr_is_empty(&block->counter_idr));
+
+ prestera_hw_counter_block_release(counter->sw, block->id);
+ idr_destroy(&block->counter_idr);
+ mutex_destroy(&block->mtx);
+ kfree(block->stats);
+ kfree(block);
+}
+
+static int prestera_counter_get_vacant(struct prestera_counter_block *block,
+ u32 *id)
+{
+ int free_id;
+
+ if (block->full)
+ return -ENOSPC;
+
+ prestera_counter_block_lock(block);
+ free_id = idr_alloc_cyclic(&block->counter_idr, NULL, block->offset,
+ block->offset + block->num_counters,
+ GFP_KERNEL);
+ if (free_id < 0) {
+ if (free_id == -ENOSPC)
+ block->full = true;
+
+ prestera_counter_block_unlock(block);
+ return free_id;
+ }
+ *id = free_id;
+ prestera_counter_block_unlock(block);
+
+ return 0;
+}
+
+int prestera_counter_get(struct prestera_counter *counter, u32 client,
+ struct prestera_counter_block **bl, u32 *counter_id)
+{
+ struct prestera_counter_block *block;
+ int err;
+ u32 id;
+
+get_next_block:
+ block = prestera_counter_block_get(counter, client);
+ if (IS_ERR(block))
+ return PTR_ERR(block);
+
+ err = prestera_counter_get_vacant(block, &id);
+ if (err) {
+ prestera_counter_block_put(counter, block);
+
+ if (err == -ENOSPC)
+ goto get_next_block;
+
+ return err;
+ }
+
+ prestera_counter_block_lock(block);
+ if (block->is_updating)
+ block->counter_flag[id - block->offset] = COUNTER_FLAG_INVALID;
+ prestera_counter_block_unlock(block);
+
+ *counter_id = id;
+ *bl = block;
+
+ return 0;
+}
+
+void prestera_counter_put(struct prestera_counter *counter,
+ struct prestera_counter_block *block, u32 counter_id)
+{
+ if (!block)
+ return;
+
+ prestera_counter_block_lock(block);
+ idr_remove(&block->counter_idr, counter_id);
+ block->full = false;
+ prestera_counter_stats_clear(block, counter_id);
+ prestera_counter_block_unlock(block);
+
+ prestera_hw_counter_clear(counter->sw, block->id, counter_id);
+ prestera_counter_block_put(counter, block);
+}
+
+static u32 prestera_counter_block_idx_next(struct prestera_counter *counter,
+ u32 curr_idx)
+{
+ u32 idx, i, start = curr_idx + 1;
+
+ prestera_counter_lock(counter);
+ for (i = 0; i < counter->block_list_len; i++) {
+ idx = (start + i) % counter->block_list_len;
+ if (!counter->block_list[idx])
+ continue;
+
+ prestera_counter_unlock(counter);
+ return idx;
+ }
+ prestera_counter_unlock(counter);
+
+ return 0;
+}
+
+static struct prestera_counter_block *
+prestera_counter_block_get_by_idx(struct prestera_counter *counter, u32 idx)
+{
+ if (idx >= counter->block_list_len)
+ return NULL;
+
+ prestera_counter_lock(counter);
+
+ if (!counter->block_list[idx] ||
+ !prestera_counter_block_incref(counter->block_list[idx])) {
+ prestera_counter_unlock(counter);
+ return NULL;
+ }
+
+ prestera_counter_unlock(counter);
+ return counter->block_list[idx];
+}
+
+static void prestera_counter_stats_work(struct work_struct *work)
+{
+ struct delayed_work *dl_work =
+ container_of(work, struct delayed_work, work);
+ struct prestera_counter *counter =
+ container_of(dl_work, struct prestera_counter, stats_dw);
+ struct prestera_counter_block *block;
+ u32 resched_time = COUNTER_POLL_TIME;
+ u32 count = COUNTER_BULK_SIZE;
+ bool done = false;
+ int err;
+ u32 i;
+
+ block = prestera_counter_block_get_by_idx(counter, counter->curr_idx);
+ if (!block) {
+ if (counter->is_fetching)
+ goto abort;
+
+ goto next;
+ }
+
+ if (!counter->is_fetching) {
+ err = prestera_hw_counter_trigger(counter->sw, block->id);
+ if (err)
+ goto abort;
+
+ prestera_counter_block_lock(block);
+ block->is_updating = true;
+ prestera_counter_block_unlock(block);
+
+ counter->is_fetching = true;
+ counter->total_read = 0;
+ resched_time = COUNTER_RESCHED_TIME;
+ goto resched;
+ }
+
+ prestera_counter_block_lock(block);
+ err = prestera_hw_counters_get(counter->sw, counter->total_read,
+ &count, &done,
+ &block->stats[counter->total_read]);
+ prestera_counter_block_unlock(block);
+ if (err)
+ goto abort;
+
+ counter->total_read += count;
+ if (!done || counter->total_read < block->num_counters) {
+ resched_time = COUNTER_RESCHED_TIME;
+ goto resched;
+ }
+
+ for (i = 0; i < block->num_counters; i++) {
+ if (block->counter_flag[i] == COUNTER_FLAG_INVALID) {
+ prestera_counter_block_lock(block);
+ block->counter_flag[i] = COUNTER_FLAG_READY;
+ memset(&block->stats[i], 0, sizeof(*block->stats));
+ prestera_counter_block_unlock(block);
+ }
+ }
+
+ prestera_counter_block_lock(block);
+ block->is_updating = false;
+ prestera_counter_block_unlock(block);
+
+ goto next;
+abort:
+ prestera_hw_counter_abort(counter->sw);
+next:
+ counter->is_fetching = false;
+ counter->curr_idx =
+ prestera_counter_block_idx_next(counter, counter->curr_idx);
+resched:
+ if (block)
+ prestera_counter_block_put(counter, block);
+
+ schedule_delayed_work(&counter->stats_dw, resched_time);
+}
+
+/* Can be executed without rtnl_lock().
+ * So pay attention when something changing.
+ */
+int prestera_counter_stats_get(struct prestera_counter *counter,
+ struct prestera_counter_block *block,
+ u32 counter_id, u64 *packets, u64 *bytes)
+{
+ if (!block || !prestera_counter_is_ready(block, counter_id)) {
+ *packets = 0;
+ *bytes = 0;
+ return 0;
+ }
+
+ prestera_counter_block_lock(block);
+ *packets = block->stats[counter_id - block->offset].packets;
+ *bytes = block->stats[counter_id - block->offset].bytes;
+
+ prestera_counter_stats_clear(block, counter_id);
+ prestera_counter_block_unlock(block);
+
+ return 0;
+}
+
+int prestera_counter_init(struct prestera_switch *sw)
+{
+ struct prestera_counter *counter;
+
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return -ENOMEM;
+
+ counter->block_list = kzalloc(sizeof(*counter->block_list), GFP_KERNEL);
+ if (!counter->block_list) {
+ kfree(counter);
+ return -ENOMEM;
+ }
+
+ mutex_init(&counter->mtx);
+ counter->block_list_len = 1;
+ counter->sw = sw;
+ sw->counter = counter;
+
+ INIT_DELAYED_WORK(&counter->stats_dw, prestera_counter_stats_work);
+ schedule_delayed_work(&counter->stats_dw, COUNTER_POLL_TIME);
+
+ return 0;
+}
+
+void prestera_counter_fini(struct prestera_switch *sw)
+{
+ struct prestera_counter *counter = sw->counter;
+ u32 i;
+
+ cancel_delayed_work_sync(&counter->stats_dw);
+
+ for (i = 0; i < counter->block_list_len; i++)
+ WARN_ON(counter->block_list[i]);
+
+ mutex_destroy(&counter->mtx);
+ kfree(counter->block_list);
+ kfree(counter);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.h b/drivers/net/ethernet/marvell/prestera/prestera_counter.h
new file mode 100644
index 000000000000..ad6b73907799
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2021 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_COUNTER_H_
+#define _PRESTERA_COUNTER_H_
+
+#include <linux/types.h>
+
+struct prestera_counter_stats {
+ u64 packets;
+ u64 bytes;
+};
+
+struct prestera_switch;
+struct prestera_counter;
+struct prestera_counter_block;
+
+int prestera_counter_init(struct prestera_switch *sw);
+void prestera_counter_fini(struct prestera_switch *sw);
+
+int prestera_counter_get(struct prestera_counter *counter, u32 client,
+ struct prestera_counter_block **block,
+ u32 *counter_id);
+void prestera_counter_put(struct prestera_counter *counter,
+ struct prestera_counter_block *block, u32 counter_id);
+int prestera_counter_stats_get(struct prestera_counter *counter,
+ struct prestera_counter_block *block,
+ u32 counter_id, u64 *packets, u64 *bytes);
+
+#endif /* _PRESTERA_COUNTER_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.c b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
index c9891e968259..d849f046ece7 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flow.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
@@ -40,6 +40,11 @@ static int prestera_flow_block_flower_cb(struct prestera_flow_block *block,
return 0;
case FLOW_CLS_STATS:
return prestera_flower_stats(block, f);
+ case FLOW_CLS_TMPLT_CREATE:
+ return prestera_flower_tmplt_create(block, f);
+ case FLOW_CLS_TMPLT_DESTROY:
+ prestera_flower_tmplt_destroy(block, f);
+ return 0;
default:
return -EOPNOTSUPP;
}
@@ -60,11 +65,102 @@ static int prestera_flow_block_cb(enum tc_setup_type type,
}
}
+static void prestera_flow_block_destroy(void *cb_priv)
+{
+ struct prestera_flow_block *block = cb_priv;
+
+ prestera_flower_template_cleanup(block);
+
+ WARN_ON(!list_empty(&block->binding_list));
+
+ kfree(block);
+}
+
+static struct prestera_flow_block *
+prestera_flow_block_create(struct prestera_switch *sw, struct net *net)
+{
+ struct prestera_flow_block *block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return NULL;
+
+ INIT_LIST_HEAD(&block->binding_list);
+ block->net = net;
+ block->sw = sw;
+
+ return block;
+}
+
static void prestera_flow_block_release(void *cb_priv)
{
struct prestera_flow_block *block = cb_priv;
- prestera_acl_block_destroy(block);
+ prestera_flow_block_destroy(block);
+}
+
+static bool
+prestera_flow_block_is_bound(const struct prestera_flow_block *block)
+{
+ return block->ruleset_zero;
+}
+
+static struct prestera_flow_block_binding *
+prestera_flow_block_lookup(struct prestera_flow_block *block,
+ struct prestera_port *port)
+{
+ struct prestera_flow_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ if (binding->port == port)
+ return binding;
+
+ return NULL;
+}
+
+static int prestera_flow_block_bind(struct prestera_flow_block *block,
+ struct prestera_port *port)
+{
+ struct prestera_flow_block_binding *binding;
+ int err;
+
+ binding = kzalloc(sizeof(*binding), GFP_KERNEL);
+ if (!binding)
+ return -ENOMEM;
+
+ binding->span_id = PRESTERA_SPAN_INVALID_ID;
+ binding->port = port;
+
+ if (prestera_flow_block_is_bound(block)) {
+ err = prestera_acl_ruleset_bind(block->ruleset_zero, port);
+ if (err)
+ goto err_ruleset_bind;
+ }
+
+ list_add(&binding->list, &block->binding_list);
+ return 0;
+
+err_ruleset_bind:
+ kfree(binding);
+ return err;
+}
+
+static int prestera_flow_block_unbind(struct prestera_flow_block *block,
+ struct prestera_port *port)
+{
+ struct prestera_flow_block_binding *binding;
+
+ binding = prestera_flow_block_lookup(block, port);
+ if (!binding)
+ return -ENOENT;
+
+ list_del(&binding->list);
+
+ if (prestera_flow_block_is_bound(block))
+ prestera_acl_ruleset_unbind(block->ruleset_zero, port);
+
+ kfree(binding);
+ return 0;
}
static struct prestera_flow_block *
@@ -78,7 +174,7 @@ prestera_flow_block_get(struct prestera_switch *sw,
block_cb = flow_block_cb_lookup(f->block,
prestera_flow_block_cb, sw);
if (!block_cb) {
- block = prestera_acl_block_create(sw, f->net);
+ block = prestera_flow_block_create(sw, f->net);
if (!block)
return ERR_PTR(-ENOMEM);
@@ -86,7 +182,7 @@ prestera_flow_block_get(struct prestera_switch *sw,
sw, block,
prestera_flow_block_release);
if (IS_ERR(block_cb)) {
- prestera_acl_block_destroy(block);
+ prestera_flow_block_destroy(block);
return ERR_CAST(block_cb);
}
@@ -110,7 +206,7 @@ static void prestera_flow_block_put(struct prestera_flow_block *block)
return;
flow_block_cb_free(block_cb);
- prestera_acl_block_destroy(block);
+ prestera_flow_block_destroy(block);
}
static int prestera_setup_flow_block_bind(struct prestera_port *port,
@@ -128,7 +224,7 @@ static int prestera_setup_flow_block_bind(struct prestera_port *port,
block_cb = block->block_cb;
- err = prestera_acl_block_bind(block, port);
+ err = prestera_flow_block_bind(block, port);
if (err)
goto err_block_bind;
@@ -162,7 +258,7 @@ static void prestera_setup_flow_block_unbind(struct prestera_port *port,
prestera_span_destroy(block);
- err = prestera_acl_block_unbind(block, port);
+ err = prestera_flow_block_unbind(block, port);
if (err)
goto error;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.h b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
index 467c7038cace..1ea5b745bf72 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flow.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
@@ -7,6 +7,24 @@
#include <net/flow_offload.h>
struct prestera_port;
+struct prestera_switch;
+struct prestera_flower_template;
+
+struct prestera_flow_block_binding {
+ struct list_head list;
+ struct prestera_port *port;
+ int span_id;
+};
+
+struct prestera_flow_block {
+ struct list_head binding_list;
+ struct prestera_switch *sw;
+ struct net *net;
+ struct prestera_acl_ruleset *ruleset_zero;
+ struct flow_block_cb *block_cb;
+ struct prestera_flower_template *tmplt;
+ unsigned int rule_count;
+};
int prestera_flow_block_setup(struct prestera_port *port,
struct flow_block_offload *f);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
index e571ba09ec08..19c1417fd05f 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
@@ -3,42 +3,61 @@
#include "prestera.h"
#include "prestera_acl.h"
+#include "prestera_flow.h"
#include "prestera_flower.h"
+struct prestera_flower_template {
+ struct prestera_acl_ruleset *ruleset;
+};
+
+void prestera_flower_template_cleanup(struct prestera_flow_block *block)
+{
+ if (block->tmplt) {
+ /* put the reference to the ruleset kept in create */
+ prestera_acl_ruleset_put(block->tmplt->ruleset);
+ kfree(block->tmplt);
+ block->tmplt = NULL;
+ return;
+ }
+}
+
static int prestera_flower_parse_actions(struct prestera_flow_block *block,
struct prestera_acl_rule *rule,
struct flow_action *flow_action,
struct netlink_ext_ack *extack)
{
- struct prestera_acl_rule_action_entry a_entry;
const struct flow_action_entry *act;
- int err, i;
+ int i;
+ /* whole struct (rule->re_arg) must be initialized with 0 */
if (!flow_action_has_entries(flow_action))
return 0;
flow_action_for_each(i, act, flow_action) {
- memset(&a_entry, 0, sizeof(a_entry));
-
switch (act->id) {
case FLOW_ACTION_ACCEPT:
- a_entry.id = PRESTERA_ACL_RULE_ACTION_ACCEPT;
+ if (rule->re_arg.accept.valid)
+ return -EEXIST;
+
+ rule->re_arg.accept.valid = 1;
break;
case FLOW_ACTION_DROP:
- a_entry.id = PRESTERA_ACL_RULE_ACTION_DROP;
+ if (rule->re_arg.drop.valid)
+ return -EEXIST;
+
+ rule->re_arg.drop.valid = 1;
break;
case FLOW_ACTION_TRAP:
- a_entry.id = PRESTERA_ACL_RULE_ACTION_TRAP;
+ if (rule->re_arg.trap.valid)
+ return -EEXIST;
+
+ rule->re_arg.trap.valid = 1;
break;
default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
pr_err("Unsupported action\n");
return -EOPNOTSUPP;
}
-
- err = prestera_acl_rule_action_add(rule, &a_entry);
- if (err)
- return err;
}
return 0;
@@ -47,12 +66,12 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block,
static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
struct flow_cls_offload *f,
struct prestera_flow_block *block)
-{
- struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
- struct prestera_acl_rule_match_entry m_entry = {0};
+{ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
+ struct prestera_acl_match *r_match = &rule->re_key.match;
+ struct prestera_port *port;
struct net_device *ingress_dev;
struct flow_match_meta match;
- struct prestera_port *port;
+ __be16 key, mask;
flow_rule_match_meta(f_rule, &match);
if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
@@ -61,7 +80,7 @@ static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
return -EINVAL;
}
- ingress_dev = __dev_get_by_index(prestera_acl_block_net(block),
+ ingress_dev = __dev_get_by_index(block->net,
match.key->ingress_ifindex);
if (!ingress_dev) {
NL_SET_ERR_MSG_MOD(f->common.extack,
@@ -76,22 +95,28 @@ static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
}
port = netdev_priv(ingress_dev);
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT;
- m_entry.keymask.u64.key = port->hw_id | ((u64)port->dev_id << 32);
- m_entry.keymask.u64.mask = ~(u64)0;
+ mask = htons(0x1FFF);
+ key = htons(port->hw_id);
+ rule_match_set(r_match->key, SYS_PORT, key);
+ rule_match_set(r_match->mask, SYS_PORT, mask);
+
+ mask = htons(0x1FF);
+ key = htons(port->dev_id);
+ rule_match_set(r_match->key, SYS_DEV, key);
+ rule_match_set(r_match->mask, SYS_DEV, mask);
+
+ return 0;
- return prestera_acl_rule_match_add(rule, &m_entry);
}
static int prestera_flower_parse(struct prestera_flow_block *block,
struct prestera_acl_rule *rule,
struct flow_cls_offload *f)
-{
- struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
+{ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = f_rule->match.dissector;
- struct prestera_acl_rule_match_entry m_entry;
- u16 n_proto_mask = 0;
- u16 n_proto_key = 0;
+ struct prestera_acl_match *r_match = &rule->re_key.match;
+ __be16 n_proto_mask = 0;
+ __be16 n_proto_key = 0;
u16 addr_type = 0;
u8 ip_proto = 0;
int err;
@@ -129,32 +154,19 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
struct flow_match_basic match;
flow_rule_match_basic(f_rule, &match);
- n_proto_key = ntohs(match.key->n_proto);
- n_proto_mask = ntohs(match.mask->n_proto);
+ n_proto_key = match.key->n_proto;
+ n_proto_mask = match.mask->n_proto;
- if (n_proto_key == ETH_P_ALL) {
+ if (ntohs(match.key->n_proto) == ETH_P_ALL) {
n_proto_key = 0;
n_proto_mask = 0;
}
- /* add eth type key,mask */
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE;
- m_entry.keymask.u16.key = n_proto_key;
- m_entry.keymask.u16.mask = n_proto_mask;
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
-
- /* add ip proto key,mask */
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO;
- m_entry.keymask.u8.key = match.key->ip_proto;
- m_entry.keymask.u8.mask = match.mask->ip_proto;
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, ETH_TYPE, n_proto_key);
+ rule_match_set(r_match->mask, ETH_TYPE, n_proto_mask);
+ rule_match_set(r_match->key, IP_PROTO, match.key->ip_proto);
+ rule_match_set(r_match->mask, IP_PROTO, match.mask->ip_proto);
ip_proto = match.key->ip_proto;
}
@@ -163,27 +175,27 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
flow_rule_match_eth_addrs(f_rule, &match);
- /* add ethernet dst key,mask */
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC;
- memcpy(&m_entry.keymask.mac.key,
- &match.key->dst, sizeof(match.key->dst));
- memcpy(&m_entry.keymask.mac.mask,
- &match.mask->dst, sizeof(match.mask->dst));
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
-
- /* add ethernet src key,mask */
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC;
- memcpy(&m_entry.keymask.mac.key,
- &match.key->src, sizeof(match.key->src));
- memcpy(&m_entry.keymask.mac.mask,
- &match.mask->src, sizeof(match.mask->src));
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ /* DA key, mask */
+ rule_match_set_n(r_match->key,
+ ETH_DMAC_0, &match.key->dst[0], 4);
+ rule_match_set_n(r_match->key,
+ ETH_DMAC_1, &match.key->dst[4], 2);
+
+ rule_match_set_n(r_match->mask,
+ ETH_DMAC_0, &match.mask->dst[0], 4);
+ rule_match_set_n(r_match->mask,
+ ETH_DMAC_1, &match.mask->dst[4], 2);
+
+ /* SA key, mask */
+ rule_match_set_n(r_match->key,
+ ETH_SMAC_0, &match.key->src[0], 4);
+ rule_match_set_n(r_match->key,
+ ETH_SMAC_1, &match.key->src[4], 2);
+
+ rule_match_set_n(r_match->mask,
+ ETH_SMAC_0, &match.mask->src[0], 4);
+ rule_match_set_n(r_match->mask,
+ ETH_SMAC_1, &match.mask->src[4], 2);
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
@@ -191,25 +203,11 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
flow_rule_match_ipv4_addrs(f_rule, &match);
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC;
- memcpy(&m_entry.keymask.u32.key,
- &match.key->src, sizeof(match.key->src));
- memcpy(&m_entry.keymask.u32.mask,
- &match.mask->src, sizeof(match.mask->src));
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, IP_SRC, match.key->src);
+ rule_match_set(r_match->mask, IP_SRC, match.mask->src);
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST;
- memcpy(&m_entry.keymask.u32.key,
- &match.key->dst, sizeof(match.key->dst));
- memcpy(&m_entry.keymask.u32.mask,
- &match.mask->dst, sizeof(match.mask->dst));
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, IP_DST, match.key->dst);
+ rule_match_set(r_match->mask, IP_DST, match.mask->dst);
}
if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS)) {
@@ -224,21 +222,11 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
flow_rule_match_ports(f_rule, &match);
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC;
- m_entry.keymask.u16.key = ntohs(match.key->src);
- m_entry.keymask.u16.mask = ntohs(match.mask->src);
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, L4_PORT_SRC, match.key->src);
+ rule_match_set(r_match->mask, L4_PORT_SRC, match.mask->src);
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST;
- m_entry.keymask.u16.key = ntohs(match.key->dst);
- m_entry.keymask.u16.mask = ntohs(match.mask->dst);
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, L4_PORT_DST, match.key->dst);
+ rule_match_set(r_match->mask, L4_PORT_DST, match.mask->dst);
}
if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) {
@@ -247,22 +235,15 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
flow_rule_match_vlan(f_rule, &match);
if (match.mask->vlan_id != 0) {
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID;
- m_entry.keymask.u16.key = match.key->vlan_id;
- m_entry.keymask.u16.mask = match.mask->vlan_id;
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ __be16 key = cpu_to_be16(match.key->vlan_id);
+ __be16 mask = cpu_to_be16(match.mask->vlan_id);
+
+ rule_match_set(r_match->key, VLAN_ID, key);
+ rule_match_set(r_match->mask, VLAN_ID, mask);
}
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID;
- m_entry.keymask.u16.key = ntohs(match.key->vlan_tpid);
- m_entry.keymask.u16.mask = ntohs(match.mask->vlan_tpid);
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, VLAN_TPID, match.key->vlan_tpid);
+ rule_match_set(r_match->mask, VLAN_TPID, match.mask->vlan_tpid);
}
if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ICMP)) {
@@ -270,90 +251,164 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
flow_rule_match_icmp(f_rule, &match);
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE;
- m_entry.keymask.u8.key = match.key->type;
- m_entry.keymask.u8.mask = match.mask->type;
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, ICMP_TYPE, match.key->type);
+ rule_match_set(r_match->mask, ICMP_TYPE, match.mask->type);
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE;
- m_entry.keymask.u8.key = match.key->code;
- m_entry.keymask.u8.mask = match.mask->code;
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, ICMP_CODE, match.key->code);
+ rule_match_set(r_match->mask, ICMP_CODE, match.mask->code);
}
- return prestera_flower_parse_actions(block, rule,
- &f->rule->action,
+ return prestera_flower_parse_actions(block, rule, &f->rule->action,
f->common.extack);
}
int prestera_flower_replace(struct prestera_flow_block *block,
struct flow_cls_offload *f)
{
- struct prestera_switch *sw = prestera_acl_block_sw(block);
+ struct prestera_acl_ruleset *ruleset;
+ struct prestera_acl *acl = block->sw->acl;
struct prestera_acl_rule *rule;
int err;
- rule = prestera_acl_rule_create(block, f->cookie);
- if (IS_ERR(rule))
- return PTR_ERR(rule);
+ ruleset = prestera_acl_ruleset_get(acl, block);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ /* increments the ruleset reference */
+ rule = prestera_acl_rule_create(ruleset, f->cookie);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_rule_create;
+ }
err = prestera_flower_parse(block, rule, f);
if (err)
- goto err_flower_parse;
+ goto err_rule_add;
+
+ if (!prestera_acl_ruleset_is_offload(ruleset)) {
+ err = prestera_acl_ruleset_offload(ruleset);
+ if (err)
+ goto err_ruleset_offload;
+ }
- err = prestera_acl_rule_add(sw, rule);
+ err = prestera_acl_rule_add(block->sw, rule);
if (err)
goto err_rule_add;
+ prestera_acl_ruleset_put(ruleset);
return 0;
+err_ruleset_offload:
err_rule_add:
-err_flower_parse:
prestera_acl_rule_destroy(rule);
+err_rule_create:
+ prestera_acl_ruleset_put(ruleset);
return err;
}
void prestera_flower_destroy(struct prestera_flow_block *block,
struct flow_cls_offload *f)
{
+ struct prestera_acl_ruleset *ruleset;
struct prestera_acl_rule *rule;
- struct prestera_switch *sw;
- rule = prestera_acl_rule_lookup(prestera_acl_block_ruleset_get(block),
- f->cookie);
+ ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block);
+ if (IS_ERR(ruleset))
+ return;
+
+ rule = prestera_acl_rule_lookup(ruleset, f->cookie);
if (rule) {
- sw = prestera_acl_block_sw(block);
- prestera_acl_rule_del(sw, rule);
+ prestera_acl_rule_del(block->sw, rule);
prestera_acl_rule_destroy(rule);
}
+ prestera_acl_ruleset_put(ruleset);
+
+}
+
+int prestera_flower_tmplt_create(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ struct prestera_flower_template *template;
+ struct prestera_acl_ruleset *ruleset;
+ struct prestera_acl_rule rule;
+ int err;
+
+ memset(&rule, 0, sizeof(rule));
+ err = prestera_flower_parse(block, &rule, f);
+ if (err)
+ return err;
+
+ template = kmalloc(sizeof(*template), GFP_KERNEL);
+ if (!template) {
+ err = -ENOMEM;
+ goto err_malloc;
+ }
+
+ prestera_acl_rule_keymask_pcl_id_set(&rule, 0);
+ ruleset = prestera_acl_ruleset_get(block->sw->acl, block);
+ if (IS_ERR_OR_NULL(ruleset)) {
+ err = -EINVAL;
+ goto err_ruleset_get;
+ }
+
+ /* preserve keymask/template to this ruleset */
+ prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask);
+
+ /* skip error, as it is not possible to reject template operation,
+ * so, keep the reference to the ruleset for rules to be added
+ * to that ruleset later. In case of offload fail, the ruleset
+ * will be offloaded again during adding a new rule. Also,
+ * unlikly possble that ruleset is already offloaded at this staage.
+ */
+ prestera_acl_ruleset_offload(ruleset);
+
+ /* keep the reference to the ruleset */
+ template->ruleset = ruleset;
+ block->tmplt = template;
+ return 0;
+
+err_ruleset_get:
+ kfree(template);
+err_malloc:
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Create chain template failed");
+ return err;
+}
+
+void prestera_flower_tmplt_destroy(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ prestera_flower_template_cleanup(block);
}
int prestera_flower_stats(struct prestera_flow_block *block,
struct flow_cls_offload *f)
{
- struct prestera_switch *sw = prestera_acl_block_sw(block);
+ struct prestera_acl_ruleset *ruleset;
struct prestera_acl_rule *rule;
u64 packets;
u64 lastuse;
u64 bytes;
int err;
- rule = prestera_acl_rule_lookup(prestera_acl_block_ruleset_get(block),
- f->cookie);
- if (!rule)
- return -EINVAL;
+ ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ rule = prestera_acl_rule_lookup(ruleset, f->cookie);
+ if (!rule) {
+ err = -EINVAL;
+ goto err_rule_get_stats;
+ }
- err = prestera_acl_rule_get_stats(sw, rule, &packets, &bytes, &lastuse);
+ err = prestera_acl_rule_get_stats(block->sw->acl, rule, &packets,
+ &bytes, &lastuse);
if (err)
- return err;
+ goto err_rule_get_stats;
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
- FLOW_ACTION_HW_STATS_IMMEDIATE);
- return 0;
+ FLOW_ACTION_HW_STATS_DELAYED);
+
+err_rule_get_stats:
+ prestera_acl_ruleset_put(ruleset);
+ return err;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.h b/drivers/net/ethernet/marvell/prestera/prestera_flower.h
index 91e045eec58b..dc3aa4280e9f 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.h
@@ -1,11 +1,12 @@
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
-/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */
+/* Copyright (c) 2020-2021 Marvell International Ltd. All rights reserved. */
#ifndef _PRESTERA_FLOWER_H_
#define _PRESTERA_FLOWER_H_
#include <net/pkt_cls.h>
+struct prestera_switch;
struct prestera_flow_block;
int prestera_flower_replace(struct prestera_flow_block *block,
@@ -14,5 +15,10 @@ void prestera_flower_destroy(struct prestera_flow_block *block,
struct flow_cls_offload *f);
int prestera_flower_stats(struct prestera_flow_block *block,
struct flow_cls_offload *f);
+int prestera_flower_tmplt_create(struct prestera_flow_block *block,
+ struct flow_cls_offload *f);
+void prestera_flower_tmplt_destroy(struct prestera_flow_block *block,
+ struct flow_cls_offload *f);
+void prestera_flower_template_cleanup(struct prestera_flow_block *block);
#endif /* _PRESTERA_FLOWER_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
index 9b8b1ed474fc..e6bfadc874c5 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -9,6 +9,7 @@
#include "prestera.h"
#include "prestera_hw.h"
#include "prestera_acl.h"
+#include "prestera_counter.h"
#define PRESTERA_SWITCH_INIT_TIMEOUT_MS (30 * 1000)
@@ -38,13 +39,24 @@ enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD = 0x402,
PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE = 0x403,
- PRESTERA_CMD_TYPE_ACL_RULE_ADD = 0x500,
- PRESTERA_CMD_TYPE_ACL_RULE_DELETE = 0x501,
- PRESTERA_CMD_TYPE_ACL_RULE_STATS_GET = 0x510,
- PRESTERA_CMD_TYPE_ACL_RULESET_CREATE = 0x520,
- PRESTERA_CMD_TYPE_ACL_RULESET_DELETE = 0x521,
- PRESTERA_CMD_TYPE_ACL_PORT_BIND = 0x530,
- PRESTERA_CMD_TYPE_ACL_PORT_UNBIND = 0x531,
+ PRESTERA_CMD_TYPE_COUNTER_GET = 0x510,
+ PRESTERA_CMD_TYPE_COUNTER_ABORT = 0x511,
+ PRESTERA_CMD_TYPE_COUNTER_TRIGGER = 0x512,
+ PRESTERA_CMD_TYPE_COUNTER_BLOCK_GET = 0x513,
+ PRESTERA_CMD_TYPE_COUNTER_BLOCK_RELEASE = 0x514,
+ PRESTERA_CMD_TYPE_COUNTER_CLEAR = 0x515,
+
+ PRESTERA_CMD_TYPE_VTCAM_CREATE = 0x540,
+ PRESTERA_CMD_TYPE_VTCAM_DESTROY = 0x541,
+ PRESTERA_CMD_TYPE_VTCAM_RULE_ADD = 0x550,
+ PRESTERA_CMD_TYPE_VTCAM_RULE_DELETE = 0x551,
+ PRESTERA_CMD_TYPE_VTCAM_IFACE_BIND = 0x560,
+ PRESTERA_CMD_TYPE_VTCAM_IFACE_UNBIND = 0x561,
+
+ PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE = 0x600,
+ PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE = 0x601,
+ PRESTERA_CMD_TYPE_ROUTER_VR_CREATE = 0x630,
+ PRESTERA_CMD_TYPE_ROUTER_VR_DELETE = 0x631,
PRESTERA_CMD_TYPE_RXTX_INIT = 0x800,
@@ -359,76 +371,84 @@ struct prestera_msg_bridge_resp {
u8 pad[2];
};
-struct prestera_msg_acl_action {
- __le32 id;
- __le32 reserved[5];
+struct prestera_msg_vtcam_create_req {
+ struct prestera_msg_cmd cmd;
+ __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ u8 direction;
+ u8 lookup;
+ u8 pad[2];
};
-struct prestera_msg_acl_match {
- __le32 type;
- __le32 __reserved;
- union {
- struct {
- u8 key;
- u8 mask;
- } u8;
- struct {
- __le16 key;
- __le16 mask;
- } u16;
- struct {
- __le32 key;
- __le32 mask;
- } u32;
- struct {
- __le64 key;
- __le64 mask;
- } u64;
- struct {
- u8 key[ETH_ALEN];
- u8 mask[ETH_ALEN];
- } mac;
- } keymask;
+struct prestera_msg_vtcam_destroy_req {
+ struct prestera_msg_cmd cmd;
+ __le32 vtcam_id;
};
-struct prestera_msg_acl_rule_req {
+struct prestera_msg_vtcam_rule_add_req {
struct prestera_msg_cmd cmd;
- __le32 id;
- __le32 priority;
- __le16 ruleset_id;
- u8 n_actions;
- u8 n_matches;
+ __le32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ __le32 vtcam_id;
+ __le32 prio;
+ __le32 n_act;
};
-struct prestera_msg_acl_rule_resp {
- struct prestera_msg_ret ret;
+struct prestera_msg_vtcam_rule_del_req {
+ struct prestera_msg_cmd cmd;
+ __le32 vtcam_id;
__le32 id;
};
-struct prestera_msg_acl_rule_stats_resp {
+struct prestera_msg_vtcam_bind_req {
+ struct prestera_msg_cmd cmd;
+ union {
+ struct {
+ __le32 hw_id;
+ __le32 dev_id;
+ } port;
+ __le32 index;
+ };
+ __le32 vtcam_id;
+ __le16 pcl_id;
+ __le16 type;
+};
+
+struct prestera_msg_vtcam_resp {
struct prestera_msg_ret ret;
- __le64 packets;
- __le64 bytes;
+ __le32 vtcam_id;
+ __le32 rule_id;
};
-struct prestera_msg_acl_ruleset_bind_req {
- struct prestera_msg_cmd cmd;
- __le32 port;
- __le32 dev;
- __le16 ruleset_id;
- u8 pad[2];
+struct prestera_msg_acl_action {
+ __le32 id;
+ __le32 __reserved;
+ union {
+ struct {
+ __le32 id;
+ } count;
+ __le32 reserved[6];
+ };
};
-struct prestera_msg_acl_ruleset_req {
+struct prestera_msg_counter_req {
struct prestera_msg_cmd cmd;
- __le16 id;
- u8 pad[2];
+ __le32 client;
+ __le32 block_id;
+ __le32 num_counters;
+};
+
+struct prestera_msg_counter_stats {
+ __le64 packets;
+ __le64 bytes;
};
-struct prestera_msg_acl_ruleset_resp {
+struct prestera_msg_counter_resp {
struct prestera_msg_ret ret;
- __le16 id;
- u8 pad[2];
+ __le32 block_id;
+ __le32 offset;
+ __le32 num_counters;
+ __le32 done;
+ struct prestera_msg_counter_stats stats[];
};
struct prestera_msg_span_req {
@@ -465,6 +485,48 @@ struct prestera_msg_rxtx_resp {
__le32 map_addr;
};
+struct prestera_msg_iface {
+ union {
+ struct {
+ __le32 dev;
+ __le32 port;
+ };
+ __le16 lag_id;
+ };
+ __le16 vr_id;
+ __le16 vid;
+ u8 type;
+ u8 __pad[3];
+};
+
+struct prestera_msg_rif_req {
+ struct prestera_msg_cmd cmd;
+ struct prestera_msg_iface iif;
+ __le32 mtu;
+ __le16 rif_id;
+ __le16 __reserved;
+ u8 mac[ETH_ALEN];
+ u8 __pad[2];
+};
+
+struct prestera_msg_rif_resp {
+ struct prestera_msg_ret ret;
+ __le16 rif_id;
+ u8 __pad[2];
+};
+
+struct prestera_msg_vr_req {
+ struct prestera_msg_cmd cmd;
+ __le16 vr_id;
+ u8 __pad[2];
+};
+
+struct prestera_msg_vr_resp {
+ struct prestera_msg_ret ret;
+ __le16 vr_id;
+ u8 __pad[2];
+};
+
struct prestera_msg_lag_req {
struct prestera_msg_cmd cmd;
__le32 port;
@@ -521,14 +583,24 @@ static void prestera_hw_build_tests(void)
BUILD_BUG_ON(sizeof(struct prestera_msg_vlan_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_fdb_req) != 28);
BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_req) != 16);
- BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_req) != 16);
- BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_bind_req) != 16);
- BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_req) != 8);
BUILD_BUG_ON(sizeof(struct prestera_msg_span_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_stp_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_req) != 8);
BUILD_BUG_ON(sizeof(struct prestera_msg_lag_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_cpu_code_counter_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_create_req) != 84);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_destroy_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_rule_add_req) != 168);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_rule_del_req) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_bind_req) != 20);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_acl_action) != 32);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_counter_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_counter_stats) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_rif_req) != 36);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vr_req) != 8);
+
+ /* structure that are part of req/resp fw messages */
+ BUILD_BUG_ON(sizeof(struct prestera_msg_iface) != 16);
/* check responses */
BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8);
@@ -537,11 +609,12 @@ static void prestera_hw_build_tests(void)
BUILD_BUG_ON(sizeof(struct prestera_msg_port_stats_resp) != 248);
BUILD_BUG_ON(sizeof(struct prestera_msg_port_info_resp) != 20);
BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_resp) != 12);
- BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_resp) != 12);
- BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_stats_resp) != 24);
- BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_resp) != 12);
BUILD_BUG_ON(sizeof(struct prestera_msg_span_resp) != 12);
BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_resp) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_counter_resp) != 24);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_rif_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vr_resp) != 12);
/* check events */
BUILD_BUG_ON(sizeof(struct prestera_msg_event_port) != 20);
@@ -1044,225 +1117,162 @@ static void prestera_hw_remote_fc_to_eth(u8 fc, bool *pause, bool *asym_pause)
}
}
-int prestera_hw_acl_ruleset_create(struct prestera_switch *sw, u16 *ruleset_id)
+int prestera_hw_vtcam_create(struct prestera_switch *sw,
+ u8 lookup, const u32 *keymask, u32 *vtcam_id,
+ enum prestera_hw_vtcam_direction_t dir)
{
- struct prestera_msg_acl_ruleset_resp resp;
- struct prestera_msg_acl_ruleset_req req;
int err;
+ struct prestera_msg_vtcam_resp resp;
+ struct prestera_msg_vtcam_create_req req = {
+ .lookup = lookup,
+ .direction = dir,
+ };
+
+ if (keymask)
+ memcpy(req.keymask, keymask, sizeof(req.keymask));
+ else
+ memset(req.keymask, 0, sizeof(req.keymask));
- err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULESET_CREATE,
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_VTCAM_CREATE,
&req.cmd, sizeof(req), &resp.ret, sizeof(resp));
if (err)
return err;
- *ruleset_id = __le16_to_cpu(resp.id);
-
+ *vtcam_id = __le32_to_cpu(resp.vtcam_id);
return 0;
}
-int prestera_hw_acl_ruleset_del(struct prestera_switch *sw, u16 ruleset_id)
+int prestera_hw_vtcam_destroy(struct prestera_switch *sw, u32 vtcam_id)
{
- struct prestera_msg_acl_ruleset_req req = {
- .id = __cpu_to_le16(ruleset_id),
+ struct prestera_msg_vtcam_destroy_req req = {
+ .vtcam_id = __cpu_to_le32(vtcam_id),
};
- return prestera_cmd(sw, PRESTERA_CMD_TYPE_ACL_RULESET_DELETE,
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_DESTROY,
&req.cmd, sizeof(req));
}
-static int prestera_hw_acl_actions_put(struct prestera_msg_acl_action *action,
- struct prestera_acl_rule *rule)
+static int
+prestera_acl_rule_add_put_action(struct prestera_msg_acl_action *action,
+ struct prestera_acl_hw_action_info *info)
{
- struct list_head *a_list = prestera_acl_rule_action_list_get(rule);
- struct prestera_acl_rule_action_entry *a_entry;
- int i = 0;
-
- list_for_each_entry(a_entry, a_list, list) {
- action[i].id = __cpu_to_le32(a_entry->id);
-
- switch (a_entry->id) {
- case PRESTERA_ACL_RULE_ACTION_ACCEPT:
- case PRESTERA_ACL_RULE_ACTION_DROP:
- case PRESTERA_ACL_RULE_ACTION_TRAP:
- /* just rule action id, no specific data */
- break;
- default:
- return -EINVAL;
- }
+ action->id = __cpu_to_le32(info->id);
- i++;
- }
-
- return 0;
-}
-
-static int prestera_hw_acl_matches_put(struct prestera_msg_acl_match *match,
- struct prestera_acl_rule *rule)
-{
- struct list_head *m_list = prestera_acl_rule_match_list_get(rule);
- struct prestera_acl_rule_match_entry *m_entry;
- int i = 0;
-
- list_for_each_entry(m_entry, m_list, list) {
- match[i].type = __cpu_to_le32(m_entry->type);
-
- switch (m_entry->type) {
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID:
- match[i].keymask.u16.key =
- __cpu_to_le16(m_entry->keymask.u16.key);
- match[i].keymask.u16.mask =
- __cpu_to_le16(m_entry->keymask.u16.mask);
- break;
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO:
- match[i].keymask.u8.key = m_entry->keymask.u8.key;
- match[i].keymask.u8.mask = m_entry->keymask.u8.mask;
- break;
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC:
- memcpy(match[i].keymask.mac.key,
- m_entry->keymask.mac.key,
- sizeof(match[i].keymask.mac.key));
- memcpy(match[i].keymask.mac.mask,
- m_entry->keymask.mac.mask,
- sizeof(match[i].keymask.mac.mask));
- break;
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_SRC:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_DST:
- match[i].keymask.u32.key =
- __cpu_to_le32(m_entry->keymask.u32.key);
- match[i].keymask.u32.mask =
- __cpu_to_le32(m_entry->keymask.u32.mask);
- break;
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT:
- match[i].keymask.u64.key =
- __cpu_to_le64(m_entry->keymask.u64.key);
- match[i].keymask.u64.mask =
- __cpu_to_le64(m_entry->keymask.u64.mask);
- break;
- default:
- return -EINVAL;
- }
-
- i++;
+ switch (info->id) {
+ case PRESTERA_ACL_RULE_ACTION_ACCEPT:
+ case PRESTERA_ACL_RULE_ACTION_DROP:
+ case PRESTERA_ACL_RULE_ACTION_TRAP:
+ /* just rule action id, no specific data */
+ break;
+ case PRESTERA_ACL_RULE_ACTION_COUNT:
+ action->count.id = __cpu_to_le32(info->count.id);
+ break;
+ default:
+ return -EINVAL;
}
return 0;
}
-int prestera_hw_acl_rule_add(struct prestera_switch *sw,
- struct prestera_acl_rule *rule,
- u32 *rule_id)
+int prestera_hw_vtcam_rule_add(struct prestera_switch *sw,
+ u32 vtcam_id, u32 prio, void *key, void *keymask,
+ struct prestera_acl_hw_action_info *act,
+ u8 n_act, u32 *rule_id)
{
- struct prestera_msg_acl_action *actions;
- struct prestera_msg_acl_match *matches;
- struct prestera_msg_acl_rule_resp resp;
- struct prestera_msg_acl_rule_req *req;
- u8 n_actions;
- u8 n_matches;
+ struct prestera_msg_acl_action *actions_msg;
+ struct prestera_msg_vtcam_rule_add_req *req;
+ struct prestera_msg_vtcam_resp resp;
void *buff;
u32 size;
int err;
+ u8 i;
- n_actions = prestera_acl_rule_action_len(rule);
- n_matches = prestera_acl_rule_match_len(rule);
-
- size = sizeof(*req) + sizeof(*actions) * n_actions +
- sizeof(*matches) * n_matches;
+ size = sizeof(*req) + sizeof(*actions_msg) * n_act;
buff = kzalloc(size, GFP_KERNEL);
if (!buff)
return -ENOMEM;
req = buff;
- actions = buff + sizeof(*req);
- matches = buff + sizeof(*req) + sizeof(*actions) * n_actions;
-
- /* put acl actions into the message */
- err = prestera_hw_acl_actions_put(actions, rule);
- if (err)
- goto free_buff;
+ req->n_act = __cpu_to_le32(n_act);
+ actions_msg = buff + sizeof(*req);
/* put acl matches into the message */
- err = prestera_hw_acl_matches_put(matches, rule);
- if (err)
- goto free_buff;
+ memcpy(req->key, key, sizeof(req->key));
+ memcpy(req->keymask, keymask, sizeof(req->keymask));
- req->ruleset_id = __cpu_to_le16(prestera_acl_rule_ruleset_id_get(rule));
- req->priority = __cpu_to_le32(prestera_acl_rule_priority_get(rule));
- req->n_actions = prestera_acl_rule_action_len(rule);
- req->n_matches = prestera_acl_rule_match_len(rule);
+ /* put acl actions into the message */
+ for (i = 0; i < n_act; i++) {
+ err = prestera_acl_rule_add_put_action(&actions_msg[i],
+ &act[i]);
+ if (err)
+ goto free_buff;
+ }
- err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULE_ADD,
+ req->vtcam_id = __cpu_to_le32(vtcam_id);
+ req->prio = __cpu_to_le32(prio);
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_VTCAM_RULE_ADD,
&req->cmd, size, &resp.ret, sizeof(resp));
if (err)
goto free_buff;
- *rule_id = __le32_to_cpu(resp.id);
+ *rule_id = __le32_to_cpu(resp.rule_id);
free_buff:
kfree(buff);
return err;
}
-int prestera_hw_acl_rule_del(struct prestera_switch *sw, u32 rule_id)
+int prestera_hw_vtcam_rule_del(struct prestera_switch *sw,
+ u32 vtcam_id, u32 rule_id)
{
- struct prestera_msg_acl_rule_req req = {
+ struct prestera_msg_vtcam_rule_del_req req = {
+ .vtcam_id = __cpu_to_le32(vtcam_id),
.id = __cpu_to_le32(rule_id)
};
- return prestera_cmd(sw, PRESTERA_CMD_TYPE_ACL_RULE_DELETE,
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_RULE_DELETE,
&req.cmd, sizeof(req));
}
-int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw, u32 rule_id,
- u64 *packets, u64 *bytes)
+int prestera_hw_vtcam_iface_bind(struct prestera_switch *sw,
+ struct prestera_acl_iface *iface,
+ u32 vtcam_id, u16 pcl_id)
{
- struct prestera_msg_acl_rule_stats_resp resp;
- struct prestera_msg_acl_rule_req req = {
- .id = __cpu_to_le32(rule_id)
+ struct prestera_msg_vtcam_bind_req req = {
+ .vtcam_id = __cpu_to_le32(vtcam_id),
+ .type = __cpu_to_le16(iface->type),
+ .pcl_id = __cpu_to_le16(pcl_id)
};
- int err;
-
- err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULE_STATS_GET,
- &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
- if (err)
- return err;
-
- *packets = __le64_to_cpu(resp.packets);
- *bytes = __le64_to_cpu(resp.bytes);
-
- return 0;
-}
-int prestera_hw_acl_port_bind(const struct prestera_port *port, u16 ruleset_id)
-{
- struct prestera_msg_acl_ruleset_bind_req req = {
- .port = __cpu_to_le32(port->hw_id),
- .dev = __cpu_to_le32(port->dev_id),
- .ruleset_id = __cpu_to_le16(ruleset_id),
- };
+ if (iface->type == PRESTERA_ACL_IFACE_TYPE_PORT) {
+ req.port.dev_id = __cpu_to_le32(iface->port->dev_id);
+ req.port.hw_id = __cpu_to_le32(iface->port->hw_id);
+ } else {
+ req.index = __cpu_to_le32(iface->index);
+ }
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_ACL_PORT_BIND,
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_IFACE_BIND,
&req.cmd, sizeof(req));
}
-int prestera_hw_acl_port_unbind(const struct prestera_port *port,
- u16 ruleset_id)
+int prestera_hw_vtcam_iface_unbind(struct prestera_switch *sw,
+ struct prestera_acl_iface *iface,
+ u32 vtcam_id)
{
- struct prestera_msg_acl_ruleset_bind_req req = {
- .port = __cpu_to_le32(port->hw_id),
- .dev = __cpu_to_le32(port->dev_id),
- .ruleset_id = __cpu_to_le16(ruleset_id),
+ struct prestera_msg_vtcam_bind_req req = {
+ .vtcam_id = __cpu_to_le32(vtcam_id),
+ .type = __cpu_to_le16(iface->type)
};
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_ACL_PORT_UNBIND,
+ if (iface->type == PRESTERA_ACL_IFACE_TYPE_PORT) {
+ req.port.dev_id = __cpu_to_le32(iface->port->dev_id);
+ req.port.hw_id = __cpu_to_le32(iface->port->hw_id);
+ } else {
+ req.index = __cpu_to_le32(iface->index);
+ }
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_IFACE_UNBIND,
&req.cmd, sizeof(req));
}
@@ -1796,6 +1806,91 @@ int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id)
&req.cmd, sizeof(req));
}
+static int prestera_iface_to_msg(struct prestera_iface *iface,
+ struct prestera_msg_iface *msg_if)
+{
+ switch (iface->type) {
+ case PRESTERA_IF_PORT_E:
+ case PRESTERA_IF_VID_E:
+ msg_if->port = __cpu_to_le32(iface->dev_port.port_num);
+ msg_if->dev = __cpu_to_le32(iface->dev_port.hw_dev_num);
+ break;
+ case PRESTERA_IF_LAG_E:
+ msg_if->lag_id = __cpu_to_le16(iface->lag_id);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ msg_if->vr_id = __cpu_to_le16(iface->vr_id);
+ msg_if->vid = __cpu_to_le16(iface->vlan_id);
+ msg_if->type = iface->type;
+ return 0;
+}
+
+int prestera_hw_rif_create(struct prestera_switch *sw,
+ struct prestera_iface *iif, u8 *mac, u16 *rif_id)
+{
+ struct prestera_msg_rif_resp resp;
+ struct prestera_msg_rif_req req;
+ int err;
+
+ memcpy(req.mac, mac, ETH_ALEN);
+
+ err = prestera_iface_to_msg(iif, &req.iif);
+ if (err)
+ return err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *rif_id = __le16_to_cpu(resp.rif_id);
+ return err;
+}
+
+int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id,
+ struct prestera_iface *iif)
+{
+ struct prestera_msg_rif_req req = {
+ .rif_id = __cpu_to_le16(rif_id),
+ };
+ int err;
+
+ err = prestera_iface_to_msg(iif, &req.iif);
+ if (err)
+ return err;
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id)
+{
+ struct prestera_msg_vr_resp resp;
+ struct prestera_msg_vr_req req;
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_VR_CREATE,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *vr_id = __le16_to_cpu(resp.vr_id);
+ return err;
+}
+
+int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id)
+{
+ struct prestera_msg_vr_req req = {
+ .vr_id = __cpu_to_le16(vr_id),
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_VR_DELETE, &req.cmd,
+ sizeof(req));
+}
+
int prestera_hw_rxtx_init(struct prestera_switch *sw,
struct prestera_rxtx_params *params)
{
@@ -1916,3 +2011,100 @@ void prestera_hw_event_handler_unregister(struct prestera_switch *sw,
list_del_rcu(&eh->list);
kfree_rcu(eh, rcu);
}
+
+int prestera_hw_counter_trigger(struct prestera_switch *sw, u32 block_id)
+{
+ struct prestera_msg_counter_req req = {
+ .block_id = __cpu_to_le32(block_id)
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_TRIGGER,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_counter_abort(struct prestera_switch *sw)
+{
+ struct prestera_msg_counter_req req;
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_ABORT,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_counters_get(struct prestera_switch *sw, u32 idx,
+ u32 *len, bool *done,
+ struct prestera_counter_stats *stats)
+{
+ struct prestera_msg_counter_resp *resp;
+ struct prestera_msg_counter_req req = {
+ .block_id = __cpu_to_le32(idx),
+ .num_counters = __cpu_to_le32(*len),
+ };
+ size_t size = struct_size(resp, stats, *len);
+ int err, i;
+
+ resp = kmalloc(size, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_COUNTER_GET,
+ &req.cmd, sizeof(req), &resp->ret, size);
+ if (err)
+ goto free_buff;
+
+ for (i = 0; i < __le32_to_cpu(resp->num_counters); i++) {
+ stats[i].packets += __le64_to_cpu(resp->stats[i].packets);
+ stats[i].bytes += __le64_to_cpu(resp->stats[i].bytes);
+ }
+
+ *len = __le32_to_cpu(resp->num_counters);
+ *done = __le32_to_cpu(resp->done);
+
+free_buff:
+ kfree(resp);
+ return err;
+}
+
+int prestera_hw_counter_block_get(struct prestera_switch *sw,
+ u32 client, u32 *block_id, u32 *offset,
+ u32 *num_counters)
+{
+ struct prestera_msg_counter_resp resp;
+ struct prestera_msg_counter_req req = {
+ .client = __cpu_to_le32(client)
+ };
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_COUNTER_BLOCK_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *block_id = __le32_to_cpu(resp.block_id);
+ *offset = __le32_to_cpu(resp.offset);
+ *num_counters = __le32_to_cpu(resp.num_counters);
+
+ return 0;
+}
+
+int prestera_hw_counter_block_release(struct prestera_switch *sw,
+ u32 block_id)
+{
+ struct prestera_msg_counter_req req = {
+ .block_id = __cpu_to_le32(block_id)
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_BLOCK_RELEASE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id,
+ u32 counter_id)
+{
+ struct prestera_msg_counter_req req = {
+ .block_id = __cpu_to_le32(block_id),
+ .num_counters = __cpu_to_le32(counter_id)
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_CLEAR,
+ &req.cmd, sizeof(req));
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
index 57a3c2e5b112..3ff12bae5909 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
@@ -5,6 +5,7 @@
#define _PRESTERA_HW_H_
#include <linux/types.h>
+#include "prestera_acl.h"
enum prestera_accept_frm_type {
PRESTERA_ACCEPT_FRAME_TYPE_TAGGED,
@@ -111,18 +112,32 @@ enum prestera_hw_cpu_code_cnt_t {
PRESTERA_HW_CPU_CODE_CNT_TYPE_TRAP = 1,
};
+enum prestera_hw_vtcam_direction_t {
+ PRESTERA_HW_VTCAM_DIR_INGRESS = 0,
+ PRESTERA_HW_VTCAM_DIR_EGRESS = 1,
+};
+
+enum {
+ PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0 = 0,
+ PRESTERA_HW_COUNTER_CLIENT_LOOKUP_1 = 1,
+ PRESTERA_HW_COUNTER_CLIENT_LOOKUP_2 = 2,
+};
+
struct prestera_switch;
struct prestera_port;
struct prestera_port_stats;
struct prestera_port_caps;
enum prestera_event_type;
struct prestera_event;
-struct prestera_acl_rule;
typedef void (*prestera_event_cb_t)
(struct prestera_switch *sw, struct prestera_event *evt, void *arg);
struct prestera_rxtx_params;
+struct prestera_acl_hw_action_info;
+struct prestera_acl_iface;
+struct prestera_counter_stats;
+struct prestera_iface;
/* Switch API */
int prestera_hw_switch_init(struct prestera_switch *sw);
@@ -186,21 +201,37 @@ int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id);
int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id);
int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id);
-/* ACL API */
-int prestera_hw_acl_ruleset_create(struct prestera_switch *sw,
- u16 *ruleset_id);
-int prestera_hw_acl_ruleset_del(struct prestera_switch *sw,
- u16 ruleset_id);
-int prestera_hw_acl_rule_add(struct prestera_switch *sw,
- struct prestera_acl_rule *rule,
- u32 *rule_id);
-int prestera_hw_acl_rule_del(struct prestera_switch *sw, u32 rule_id);
-int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw,
- u32 rule_id, u64 *packets, u64 *bytes);
-int prestera_hw_acl_port_bind(const struct prestera_port *port,
- u16 ruleset_id);
-int prestera_hw_acl_port_unbind(const struct prestera_port *port,
- u16 ruleset_id);
+/* vTCAM API */
+int prestera_hw_vtcam_create(struct prestera_switch *sw,
+ u8 lookup, const u32 *keymask, u32 *vtcam_id,
+ enum prestera_hw_vtcam_direction_t direction);
+int prestera_hw_vtcam_rule_add(struct prestera_switch *sw, u32 vtcam_id,
+ u32 prio, void *key, void *keymask,
+ struct prestera_acl_hw_action_info *act,
+ u8 n_act, u32 *rule_id);
+int prestera_hw_vtcam_rule_del(struct prestera_switch *sw,
+ u32 vtcam_id, u32 rule_id);
+int prestera_hw_vtcam_destroy(struct prestera_switch *sw, u32 vtcam_id);
+int prestera_hw_vtcam_iface_bind(struct prestera_switch *sw,
+ struct prestera_acl_iface *iface,
+ u32 vtcam_id, u16 pcl_id);
+int prestera_hw_vtcam_iface_unbind(struct prestera_switch *sw,
+ struct prestera_acl_iface *iface,
+ u32 vtcam_id);
+
+/* Counter API */
+int prestera_hw_counter_trigger(struct prestera_switch *sw, u32 block_id);
+int prestera_hw_counter_abort(struct prestera_switch *sw);
+int prestera_hw_counters_get(struct prestera_switch *sw, u32 idx,
+ u32 *len, bool *done,
+ struct prestera_counter_stats *stats);
+int prestera_hw_counter_block_get(struct prestera_switch *sw,
+ u32 client, u32 *block_id, u32 *offset,
+ u32 *num_counters);
+int prestera_hw_counter_block_release(struct prestera_switch *sw,
+ u32 block_id);
+int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id,
+ u32 counter_id);
/* SPAN API */
int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id);
@@ -208,6 +239,16 @@ int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id);
int prestera_hw_span_unbind(const struct prestera_port *port);
int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id);
+/* Router API */
+int prestera_hw_rif_create(struct prestera_switch *sw,
+ struct prestera_iface *iif, u8 *mac, u16 *rif_id);
+int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id,
+ struct prestera_iface *iif);
+
+/* Virtual Router API */
+int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id);
+int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id);
+
/* Event handlers */
int prestera_hw_event_handler_register(struct prestera_switch *sw,
enum prestera_event_type type,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index c687dc9aa973..cad93f747d0c 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -18,6 +18,7 @@
#include "prestera_rxtx.h"
#include "prestera_devlink.h"
#include "prestera_ethtool.h"
+#include "prestera_counter.h"
#include "prestera_switchdev.h"
#define PRESTERA_MTU_DEFAULT 1536
@@ -162,7 +163,7 @@ static netdev_tx_t prestera_port_xmit(struct sk_buff *skb,
return prestera_rxtx_xmit(netdev_priv(dev), skb);
}
-static int prestera_is_valid_mac_addr(struct prestera_port *port, u8 *addr)
+int prestera_is_valid_mac_addr(struct prestera_port *port, const u8 *addr)
{
if (!is_valid_ether_addr(addr))
return -EADDRNOTAVAIL;
@@ -901,6 +902,10 @@ static int prestera_switch_init(struct prestera_switch *sw)
if (err)
return err;
+ err = prestera_router_init(sw);
+ if (err)
+ goto err_router_init;
+
err = prestera_switchdev_init(sw);
if (err)
goto err_swdev_register;
@@ -913,6 +918,10 @@ static int prestera_switch_init(struct prestera_switch *sw)
if (err)
goto err_handlers_register;
+ err = prestera_counter_init(sw);
+ if (err)
+ goto err_counter_init;
+
err = prestera_acl_init(sw);
if (err)
goto err_acl_init;
@@ -945,12 +954,16 @@ err_dl_register:
err_span_init:
prestera_acl_fini(sw);
err_acl_init:
+ prestera_counter_fini(sw);
+err_counter_init:
prestera_event_handlers_unregister(sw);
err_handlers_register:
prestera_rxtx_switch_fini(sw);
err_rxtx_register:
prestera_switchdev_fini(sw);
err_swdev_register:
+ prestera_router_fini(sw);
+err_router_init:
prestera_netdev_event_handler_unregister(sw);
prestera_hw_switch_fini(sw);
@@ -965,9 +978,11 @@ static void prestera_switch_fini(struct prestera_switch *sw)
prestera_devlink_traps_unregister(sw);
prestera_span_fini(sw);
prestera_acl_fini(sw);
+ prestera_counter_fini(sw);
prestera_event_handlers_unregister(sw);
prestera_rxtx_switch_fini(sw);
prestera_switchdev_fini(sw);
+ prestera_router_fini(sw);
prestera_netdev_event_handler_unregister(sw);
prestera_hw_switch_fini(sw);
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c
new file mode 100644
index 000000000000..6ef4d32b8fdd
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/inetdevice.h>
+#include <net/switchdev.h>
+
+#include "prestera.h"
+#include "prestera_router_hw.h"
+
+/* This util to be used, to convert kernel rules for default vr in hw_vr */
+static u32 prestera_fix_tb_id(u32 tb_id)
+{
+ if (tb_id == RT_TABLE_UNSPEC ||
+ tb_id == RT_TABLE_LOCAL ||
+ tb_id == RT_TABLE_DEFAULT)
+ tb_id = RT_TABLE_MAIN;
+
+ return tb_id;
+}
+
+static int __prestera_inetaddr_port_event(struct net_device *port_dev,
+ unsigned long event,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_port *port = netdev_priv(port_dev);
+ struct prestera_rif_entry_key re_key = {};
+ struct prestera_rif_entry *re;
+ u32 kern_tb_id;
+ int err;
+
+ err = prestera_is_valid_mac_addr(port, port_dev->dev_addr);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "RIF MAC must have the same prefix");
+ return err;
+ }
+
+ kern_tb_id = l3mdev_fib_table(port_dev);
+ re_key.iface.type = PRESTERA_IF_PORT_E;
+ re_key.iface.dev_port.hw_dev_num = port->dev_id;
+ re_key.iface.dev_port.port_num = port->hw_id;
+ re = prestera_rif_entry_find(port->sw, &re_key);
+
+ switch (event) {
+ case NETDEV_UP:
+ if (re) {
+ NL_SET_ERR_MSG_MOD(extack, "RIF already exist");
+ return -EEXIST;
+ }
+ re = prestera_rif_entry_create(port->sw, &re_key,
+ prestera_fix_tb_id(kern_tb_id),
+ port_dev->dev_addr);
+ if (!re) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't create RIF");
+ return -EINVAL;
+ }
+ dev_hold(port_dev);
+ break;
+ case NETDEV_DOWN:
+ if (!re) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't find RIF");
+ return -EEXIST;
+ }
+ prestera_rif_entry_destroy(port->sw, re);
+ dev_put(port_dev);
+ break;
+ }
+
+ return 0;
+}
+
+static int __prestera_inetaddr_event(struct prestera_switch *sw,
+ struct net_device *dev,
+ unsigned long event,
+ struct netlink_ext_ack *extack)
+{
+ if (!prestera_netdev_check(dev) || netif_is_bridge_port(dev) ||
+ netif_is_lag_port(dev) || netif_is_ovs_port(dev))
+ return 0;
+
+ return __prestera_inetaddr_port_event(dev, event, extack);
+}
+
+static int __prestera_inetaddr_cb(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+ struct net_device *dev = ifa->ifa_dev->dev;
+ struct prestera_router *router = container_of(nb,
+ struct prestera_router,
+ inetaddr_nb);
+ struct in_device *idev;
+ int err = 0;
+
+ if (event != NETDEV_DOWN)
+ goto out;
+
+ /* Ignore if this is not latest address */
+ idev = __in_dev_get_rtnl(dev);
+ if (idev && idev->ifa_list)
+ goto out;
+
+ err = __prestera_inetaddr_event(router->sw, dev, event, NULL);
+out:
+ return notifier_from_errno(err);
+}
+
+static int __prestera_inetaddr_valid_cb(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct in_validator_info *ivi = (struct in_validator_info *)ptr;
+ struct net_device *dev = ivi->ivi_dev->dev;
+ struct prestera_router *router = container_of(nb,
+ struct prestera_router,
+ inetaddr_valid_nb);
+ struct in_device *idev;
+ int err = 0;
+
+ if (event != NETDEV_UP)
+ goto out;
+
+ /* Ignore if this is not first address */
+ idev = __in_dev_get_rtnl(dev);
+ if (idev && idev->ifa_list)
+ goto out;
+
+ if (ipv4_is_multicast(ivi->ivi_addr)) {
+ NL_SET_ERR_MSG_MOD(ivi->extack,
+ "Multicast addr on RIF is not supported");
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = __prestera_inetaddr_event(router->sw, dev, event, ivi->extack);
+out:
+ return notifier_from_errno(err);
+}
+
+int prestera_router_init(struct prestera_switch *sw)
+{
+ struct prestera_router *router;
+ int err;
+
+ router = kzalloc(sizeof(*sw->router), GFP_KERNEL);
+ if (!router)
+ return -ENOMEM;
+
+ sw->router = router;
+ router->sw = sw;
+
+ err = prestera_router_hw_init(sw);
+ if (err)
+ goto err_router_lib_init;
+
+ router->inetaddr_valid_nb.notifier_call = __prestera_inetaddr_valid_cb;
+ err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
+ if (err)
+ goto err_register_inetaddr_validator_notifier;
+
+ router->inetaddr_nb.notifier_call = __prestera_inetaddr_cb;
+ err = register_inetaddr_notifier(&router->inetaddr_nb);
+ if (err)
+ goto err_register_inetaddr_notifier;
+
+ return 0;
+
+err_register_inetaddr_notifier:
+ unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
+err_register_inetaddr_validator_notifier:
+ prestera_router_hw_fini(sw);
+err_router_lib_init:
+ kfree(sw->router);
+ return err;
+}
+
+void prestera_router_fini(struct prestera_switch *sw)
+{
+ unregister_inetaddr_notifier(&sw->router->inetaddr_nb);
+ unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb);
+ prestera_router_hw_fini(sw);
+ kfree(sw->router);
+ sw->router = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
new file mode 100644
index 000000000000..e5592b69ad37
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */
+
+#include <linux/rhashtable.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_router_hw.h"
+#include "prestera_acl.h"
+
+/* +--+
+ * +------->|vr|
+ * | +--+
+ * |
+ * +-+-------+
+ * |rif_entry|
+ * +---------+
+ * Rif is
+ * used as
+ * entry point
+ * for vr in hw
+ */
+
+int prestera_router_hw_init(struct prestera_switch *sw)
+{
+ INIT_LIST_HEAD(&sw->router->vr_list);
+ INIT_LIST_HEAD(&sw->router->rif_entry_list);
+
+ return 0;
+}
+
+void prestera_router_hw_fini(struct prestera_switch *sw)
+{
+ WARN_ON(!list_empty(&sw->router->vr_list));
+ WARN_ON(!list_empty(&sw->router->rif_entry_list));
+}
+
+static struct prestera_vr *__prestera_vr_find(struct prestera_switch *sw,
+ u32 tb_id)
+{
+ struct prestera_vr *vr;
+
+ list_for_each_entry(vr, &sw->router->vr_list, router_node) {
+ if (vr->tb_id == tb_id)
+ return vr;
+ }
+
+ return NULL;
+}
+
+static struct prestera_vr *__prestera_vr_create(struct prestera_switch *sw,
+ u32 tb_id,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_vr *vr;
+ int err;
+
+ vr = kzalloc(sizeof(*vr), GFP_KERNEL);
+ if (!vr) {
+ err = -ENOMEM;
+ goto err_alloc_vr;
+ }
+
+ vr->tb_id = tb_id;
+
+ err = prestera_hw_vr_create(sw, &vr->hw_vr_id);
+ if (err)
+ goto err_hw_create;
+
+ list_add(&vr->router_node, &sw->router->vr_list);
+
+ return vr;
+
+err_hw_create:
+ kfree(vr);
+err_alloc_vr:
+ return ERR_PTR(err);
+}
+
+static void __prestera_vr_destroy(struct prestera_switch *sw,
+ struct prestera_vr *vr)
+{
+ list_del(&vr->router_node);
+ prestera_hw_vr_delete(sw, vr->hw_vr_id);
+ kfree(vr);
+}
+
+static struct prestera_vr *prestera_vr_get(struct prestera_switch *sw, u32 tb_id,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_vr *vr;
+
+ vr = __prestera_vr_find(sw, tb_id);
+ if (vr) {
+ refcount_inc(&vr->refcount);
+ } else {
+ vr = __prestera_vr_create(sw, tb_id, extack);
+ if (IS_ERR(vr))
+ return ERR_CAST(vr);
+
+ refcount_set(&vr->refcount, 1);
+ }
+
+ return vr;
+}
+
+static void prestera_vr_put(struct prestera_switch *sw, struct prestera_vr *vr)
+{
+ if (refcount_dec_and_test(&vr->refcount))
+ __prestera_vr_destroy(sw, vr);
+}
+
+/* iface is overhead struct. vr_id also can be removed. */
+static int
+__prestera_rif_entry_key_copy(const struct prestera_rif_entry_key *in,
+ struct prestera_rif_entry_key *out)
+{
+ memset(out, 0, sizeof(*out));
+
+ switch (in->iface.type) {
+ case PRESTERA_IF_PORT_E:
+ out->iface.dev_port.hw_dev_num = in->iface.dev_port.hw_dev_num;
+ out->iface.dev_port.port_num = in->iface.dev_port.port_num;
+ break;
+ case PRESTERA_IF_LAG_E:
+ out->iface.lag_id = in->iface.lag_id;
+ break;
+ case PRESTERA_IF_VID_E:
+ out->iface.vlan_id = in->iface.vlan_id;
+ break;
+ default:
+ WARN(1, "Unsupported iface type");
+ return -EINVAL;
+ }
+
+ out->iface.type = in->iface.type;
+ return 0;
+}
+
+struct prestera_rif_entry *
+prestera_rif_entry_find(const struct prestera_switch *sw,
+ const struct prestera_rif_entry_key *k)
+{
+ struct prestera_rif_entry *rif_entry;
+ struct prestera_rif_entry_key lk; /* lookup key */
+
+ if (__prestera_rif_entry_key_copy(k, &lk))
+ return NULL;
+
+ list_for_each_entry(rif_entry, &sw->router->rif_entry_list,
+ router_node) {
+ if (!memcmp(k, &rif_entry->key, sizeof(*k)))
+ return rif_entry;
+ }
+
+ return NULL;
+}
+
+void prestera_rif_entry_destroy(struct prestera_switch *sw,
+ struct prestera_rif_entry *e)
+{
+ struct prestera_iface iface;
+
+ list_del(&e->router_node);
+
+ memcpy(&iface, &e->key.iface, sizeof(iface));
+ iface.vr_id = e->vr->hw_vr_id;
+ prestera_hw_rif_delete(sw, e->hw_id, &iface);
+
+ prestera_vr_put(sw, e->vr);
+ kfree(e);
+}
+
+struct prestera_rif_entry *
+prestera_rif_entry_create(struct prestera_switch *sw,
+ struct prestera_rif_entry_key *k,
+ u32 tb_id, const unsigned char *addr)
+{
+ int err;
+ struct prestera_rif_entry *e;
+ struct prestera_iface iface;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ goto err_kzalloc;
+
+ if (__prestera_rif_entry_key_copy(k, &e->key))
+ goto err_key_copy;
+
+ e->vr = prestera_vr_get(sw, tb_id, NULL);
+ if (IS_ERR(e->vr))
+ goto err_vr_get;
+
+ memcpy(&e->addr, addr, sizeof(e->addr));
+
+ /* HW */
+ memcpy(&iface, &e->key.iface, sizeof(iface));
+ iface.vr_id = e->vr->hw_vr_id;
+ err = prestera_hw_rif_create(sw, &iface, e->addr, &e->hw_id);
+ if (err)
+ goto err_hw_create;
+
+ list_add(&e->router_node, &sw->router->rif_entry_list);
+
+ return e;
+
+err_hw_create:
+ prestera_vr_put(sw, e->vr);
+err_vr_get:
+err_key_copy:
+ kfree(e);
+err_kzalloc:
+ return NULL;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
new file mode 100644
index 000000000000..b6b028551868
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_ROUTER_HW_H_
+#define _PRESTERA_ROUTER_HW_H_
+
+struct prestera_vr {
+ struct list_head router_node;
+ refcount_t refcount;
+ u32 tb_id; /* key (kernel fib table id) */
+ u16 hw_vr_id; /* virtual router ID */
+ u8 __pad[2];
+};
+
+struct prestera_rif_entry {
+ struct prestera_rif_entry_key {
+ struct prestera_iface iface;
+ } key;
+ struct prestera_vr *vr;
+ unsigned char addr[ETH_ALEN];
+ u16 hw_id; /* rif_id */
+ struct list_head router_node; /* ht */
+};
+
+struct prestera_rif_entry *
+prestera_rif_entry_find(const struct prestera_switch *sw,
+ const struct prestera_rif_entry_key *k);
+void prestera_rif_entry_destroy(struct prestera_switch *sw,
+ struct prestera_rif_entry *e);
+struct prestera_rif_entry *
+prestera_rif_entry_create(struct prestera_switch *sw,
+ struct prestera_rif_entry_key *k,
+ u32 tb_id, const unsigned char *addr);
+int prestera_router_hw_init(struct prestera_switch *sw);
+void prestera_router_hw_fini(struct prestera_switch *sw);
+
+#endif /* _PRESTERA_ROUTER_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.c b/drivers/net/ethernet/marvell/prestera/prestera_span.c
index 3cafca827bb7..845e9d8c8cc7 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_span.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_span.c
@@ -7,6 +7,7 @@
#include "prestera.h"
#include "prestera_hw.h"
#include "prestera_acl.h"
+#include "prestera_flow.h"
#include "prestera_span.h"
struct prestera_span_entry {
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 1d607bc6b59e..52bef50f5a0d 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1388,7 +1388,6 @@ static int pxa168_eth_probe(struct platform_device *pdev)
{
struct pxa168_eth_private *pep = NULL;
struct net_device *dev = NULL;
- struct resource *res;
struct clk *clk;
struct device_node *np;
int err;
@@ -1419,9 +1418,11 @@ static int pxa168_eth_probe(struct platform_device *pdev)
goto err_netdev;
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- BUG_ON(!res);
- dev->irq = res->start;
+ err = platform_get_irq(pdev, 0);
+ if (err == -EPROBE_DEFER)
+ goto err_netdev;
+ BUG_ON(dev->irq < 0);
+ dev->irq = err;
dev->netdev_ops = &pxa168_eth_netdev_ops;
dev->watchdog_timeo = 2 * HZ;
dev->base_addr = 0;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 0c864e5bf0a6..cf03c67fbf40 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -492,7 +492,9 @@ static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data)
}
static void skge_get_ring_param(struct net_device *dev,
- struct ethtool_ringparam *p)
+ struct ethtool_ringparam *p,
+ struct kernel_ethtool_ringparam *kernel_p,
+ struct netlink_ext_ack *extack)
{
struct skge_port *skge = netdev_priv(dev);
@@ -504,7 +506,9 @@ static void skge_get_ring_param(struct net_device *dev,
}
static int skge_set_ring_param(struct net_device *dev,
- struct ethtool_ringparam *p)
+ struct ethtool_ringparam *p,
+ struct kernel_ethtool_ringparam *kernel_p,
+ struct netlink_ext_ack *extack)
{
struct skge_port *skge = netdev_priv(dev);
int err = 0;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 28b5b9341145..ea16b1dd6a98 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4149,7 +4149,9 @@ static unsigned long roundup_ring_size(unsigned long pending)
}
static void sky2_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct sky2_port *sky2 = netdev_priv(dev);
@@ -4161,7 +4163,9 @@ static void sky2_get_ringparam(struct net_device *dev,
}
static int sky2_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct sky2_port *sky2 = netdev_priv(dev);
@@ -4266,96 +4270,36 @@ static int sky2_get_eeprom_len(struct net_device *dev)
return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
}
-static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy)
-{
- unsigned long start = jiffies;
-
- while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) {
- /* Can take up to 10.6 ms for write */
- if (time_after(jiffies, start + HZ/4)) {
- dev_err(&hw->pdev->dev, "VPD cycle timed out\n");
- return -ETIMEDOUT;
- }
- msleep(1);
- }
-
- return 0;
-}
-
-static int sky2_vpd_read(struct sky2_hw *hw, int cap, void *data,
- u16 offset, size_t length)
-{
- int rc = 0;
-
- while (length > 0) {
- u32 val;
-
- sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset);
- rc = sky2_vpd_wait(hw, cap, 0);
- if (rc)
- break;
-
- val = sky2_pci_read32(hw, cap + PCI_VPD_DATA);
-
- memcpy(data, &val, min(sizeof(val), length));
- offset += sizeof(u32);
- data += sizeof(u32);
- length -= sizeof(u32);
- }
-
- return rc;
-}
-
-static int sky2_vpd_write(struct sky2_hw *hw, int cap, const void *data,
- u16 offset, unsigned int length)
-{
- unsigned int i;
- int rc = 0;
-
- for (i = 0; i < length; i += sizeof(u32)) {
- u32 val = *(u32 *)(data + i);
-
- sky2_pci_write32(hw, cap + PCI_VPD_DATA, val);
- sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F);
-
- rc = sky2_vpd_wait(hw, cap, PCI_VPD_ADDR_F);
- if (rc)
- break;
- }
- return rc;
-}
-
static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
u8 *data)
{
struct sky2_port *sky2 = netdev_priv(dev);
- int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
-
- if (!cap)
- return -EINVAL;
+ int rc;
eeprom->magic = SKY2_EEPROM_MAGIC;
+ rc = pci_read_vpd_any(sky2->hw->pdev, eeprom->offset, eeprom->len,
+ data);
+ if (rc < 0)
+ return rc;
+
+ eeprom->len = rc;
- return sky2_vpd_read(sky2->hw, cap, data, eeprom->offset, eeprom->len);
+ return 0;
}
static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
u8 *data)
{
struct sky2_port *sky2 = netdev_priv(dev);
- int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
-
- if (!cap)
- return -EINVAL;
+ int rc;
if (eeprom->magic != SKY2_EEPROM_MAGIC)
return -EINVAL;
- /* Partial writes not supported */
- if ((eeprom->offset & 3) || (eeprom->len & 3))
- return -EINVAL;
+ rc = pci_write_vpd_any(sky2->hw->pdev, eeprom->offset, eeprom->len,
+ data);
- return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
+ return rc < 0 ? rc : 0;
}
static netdev_features_t sky2_fix_features(struct net_device *dev,
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index c357c193378e..86d356b4388d 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config NET_VENDOR_MEDIATEK
bool "MediaTek devices"
- depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620
+ depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620 || COMPILE_TEST
help
If you have a Mediatek SoC with ethernet, say Y.
@@ -10,6 +10,7 @@ if NET_VENDOR_MEDIATEK
config NET_MEDIATEK_SOC
tristate "MediaTek SoC Gigabit Ethernet support"
depends on NET_DSA || !NET_DSA
+ select PINCTRL
select PHYLINK
select DIMLIB
help
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 75d67d1b5f6b..f02d07ec5ccb 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -91,46 +91,96 @@ static int mtk_mdio_busy_wait(struct mtk_eth *eth)
}
dev_err(eth->dev, "mdio: MDIO timeout\n");
- return -1;
+ return -ETIMEDOUT;
}
-static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
- u32 phy_register, u32 write_data)
+static int _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
+ u32 write_data)
{
- if (mtk_mdio_busy_wait(eth))
- return -1;
+ int ret;
- write_data &= 0xffff;
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
- mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
- (phy_register << PHY_IAC_REG_SHIFT) |
- (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
- MTK_PHY_IAC);
+ if (phy_reg & MII_ADDR_C45) {
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_C45_ADDR |
+ PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_WRITE |
+ PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(write_data),
+ MTK_PHY_IAC);
+ } else {
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C22 |
+ PHY_IAC_CMD_WRITE |
+ PHY_IAC_REG(phy_reg) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(write_data),
+ MTK_PHY_IAC);
+ }
- if (mtk_mdio_busy_wait(eth))
- return -1;
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
return 0;
}
-static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
+static int _mtk_mdio_read(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
{
- u32 d;
-
- if (mtk_mdio_busy_wait(eth))
- return 0xffff;
+ int ret;
- mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
- (phy_reg << PHY_IAC_REG_SHIFT) |
- (phy_addr << PHY_IAC_ADDR_SHIFT),
- MTK_PHY_IAC);
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
- if (mtk_mdio_busy_wait(eth))
- return 0xffff;
+ if (phy_reg & MII_ADDR_C45) {
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_C45_ADDR |
+ PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_C45_READ |
+ PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
+ PHY_IAC_ADDR(phy_addr),
+ MTK_PHY_IAC);
+ } else {
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C22 |
+ PHY_IAC_CMD_C22_READ |
+ PHY_IAC_REG(phy_reg) |
+ PHY_IAC_ADDR(phy_addr),
+ MTK_PHY_IAC);
+ }
- d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
- return d;
+ return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
}
static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
@@ -217,7 +267,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
phylink_config);
struct mtk_eth *eth = mac->hw;
u32 mcr_cur, mcr_new, sid, i;
- int val, ge_mode, err;
+ int val, ge_mode, err = 0;
/* MT76x8 has no hardware settings between for the MAC */
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
@@ -463,94 +513,8 @@ static void mtk_mac_link_up(struct phylink_config *config,
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
-static void mtk_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct mtk_mac *mac = container_of(config, struct mtk_mac,
- phylink_config);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
- phy_interface_mode_is_rgmii(state->interface)) &&
- !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
- !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
- !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
- (state->interface == PHY_INTERFACE_MODE_SGMII ||
- phy_interface_mode_is_8023z(state->interface)))) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_TRGMII:
- phylink_set(mask, 1000baseT_Full);
- break;
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_2500BASEX:
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 2500baseX_Full);
- break;
- case PHY_INTERFACE_MODE_GMII:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- phylink_set(mask, 1000baseT_Half);
- fallthrough;
- case PHY_INTERFACE_MODE_SGMII:
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- fallthrough;
- case PHY_INTERFACE_MODE_MII:
- case PHY_INTERFACE_MODE_RMII:
- case PHY_INTERFACE_MODE_REVMII:
- case PHY_INTERFACE_MODE_NA:
- default:
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- break;
- }
-
- if (state->interface == PHY_INTERFACE_MODE_NA) {
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 2500baseX_Full);
- }
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- phylink_set(mask, 1000baseX_Full);
- }
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- }
- }
-
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- /* We can only operate at 2500BaseX or 1000BaseX. If requested
- * to advertise both, only report advertising at 2500BaseX.
- */
- phylink_helper_basex_speed(state);
-}
-
static const struct phylink_mac_ops mtk_phylink_ops = {
- .validate = mtk_validate,
+ .validate = phylink_generic_validate,
.mac_pcs_get_state = mtk_mac_pcs_get_state,
.mac_an_restart = mtk_mac_an_restart,
.mac_config = mtk_mac_config,
@@ -583,6 +547,7 @@ static int mtk_mdio_init(struct mtk_eth *eth)
eth->mii_bus->name = "mdio";
eth->mii_bus->read = mtk_mdio_read;
eth->mii_bus->write = mtk_mdio_write;
+ eth->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
eth->mii_bus->priv = eth;
eth->mii_bus->parent = eth->dev;
@@ -2297,7 +2262,6 @@ static int mtk_open(struct net_device *dev)
/* we run 2 netdevs on the same dma ring so we only bring it up once */
if (!refcount_read(&eth->dma_refcnt)) {
u32 gdm_config = MTK_GDMA_TO_PDMA;
- int err;
err = mtk_start_dma(eth);
if (err)
@@ -3009,6 +2973,33 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->phylink_config.dev = &eth->netdev[id]->dev;
mac->phylink_config.type = PHYLINK_NETDEV;
+ /* This driver makes use of state->speed/state->duplex in
+ * mac_config
+ */
+ mac->phylink_config.legacy_pre_march2020 = true;
+ mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ mac->phylink_config.supported_interfaces);
+
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
+ phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
+
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
+ __set_bit(PHY_INTERFACE_MODE_TRGMII,
+ mac->phylink_config.supported_interfaces);
+
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ mac->phylink_config.supported_interfaces);
+ }
phylink = phylink_create(&mac->phylink_config,
of_fwnode_handle(mac->of_node),
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 5ef70dd8b49c..c9d42be314b5 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -341,11 +341,20 @@
/* PHY Indirect Access Control registers */
#define MTK_PHY_IAC 0x10004
#define PHY_IAC_ACCESS BIT(31)
-#define PHY_IAC_READ BIT(19)
-#define PHY_IAC_WRITE BIT(18)
-#define PHY_IAC_START BIT(16)
-#define PHY_IAC_ADDR_SHIFT 20
-#define PHY_IAC_REG_SHIFT 25
+#define PHY_IAC_REG_MASK GENMASK(29, 25)
+#define PHY_IAC_REG(x) FIELD_PREP(PHY_IAC_REG_MASK, (x))
+#define PHY_IAC_ADDR_MASK GENMASK(24, 20)
+#define PHY_IAC_ADDR(x) FIELD_PREP(PHY_IAC_ADDR_MASK, (x))
+#define PHY_IAC_CMD_MASK GENMASK(19, 18)
+#define PHY_IAC_CMD_C45_ADDR FIELD_PREP(PHY_IAC_CMD_MASK, 0)
+#define PHY_IAC_CMD_WRITE FIELD_PREP(PHY_IAC_CMD_MASK, 1)
+#define PHY_IAC_CMD_C22_READ FIELD_PREP(PHY_IAC_CMD_MASK, 2)
+#define PHY_IAC_CMD_C45_READ FIELD_PREP(PHY_IAC_CMD_MASK, 3)
+#define PHY_IAC_START_MASK GENMASK(17, 16)
+#define PHY_IAC_START_C45 FIELD_PREP(PHY_IAC_START_MASK, 0)
+#define PHY_IAC_START_C22 FIELD_PREP(PHY_IAC_START_MASK, 1)
+#define PHY_IAC_DATA_MASK GENMASK(15, 0)
+#define PHY_IAC_DATA(x) FIELD_PREP(PHY_IAC_DATA_MASK, (x))
#define PHY_IAC_TIMEOUT HZ
#define MTK_MAC_MISC 0x1000c
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
index 98b1d3577bcd..d4b482340cb9 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
@@ -207,9 +207,6 @@ int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
struct dentry *root;
root = debugfs_create_dir("mtk_ppe", NULL);
- if (!root)
- return -ENOMEM;
-
debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all);
debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 10238bedd694..ed5038d98ef6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1141,7 +1141,9 @@ static void mlx4_en_get_pauseparam(struct net_device *dev,
}
static int mlx4_en_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -1208,7 +1210,9 @@ out:
}
static void mlx4_en_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f1c10f2bda78..c61dc7ae0c05 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -33,6 +33,7 @@
#include <linux/bpf.h>
#include <linux/etherdevice.h>
+#include <linux/filter.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>
#include <linux/delay.h>
@@ -2427,10 +2428,6 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
/* device doesn't support time stamping */
if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 650e6a1844ae..8cfc649f226b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -812,7 +812,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
trace_xdp_exception(dev, xdp_prog, act);
goto xdp_drop_no_cnt; /* Drop on xmit failure */
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(dev, xdp_prog, act);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 9e48509ed3b2..414e390e6b48 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -244,9 +244,9 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
cpumask_empty(eq->affinity_mask))
return;
- hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
+ hint_err = irq_update_affinity_hint(eq->irq, eq->affinity_mask);
if (hint_err)
- mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err);
+ mlx4_warn(dev, "irq_update_affinity_hint failed, err %d\n", hint_err);
}
#endif
@@ -1123,9 +1123,7 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
if (eq_table->eq[i].have_irq) {
free_cpumask_var(eq_table->eq[i].affinity_mask);
-#if defined(CONFIG_SMP)
- irq_set_affinity_hint(eq_table->eq[i].irq, NULL);
-#endif
+ irq_update_affinity_hint(eq_table->eq[i].irq, NULL);
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
eq_table->eq[i].have_irq = 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 92056452a9e3..4ba1a78c6515 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -115,6 +115,7 @@ config MLX5_TC_CT
config MLX5_TC_SAMPLE
bool "MLX5 TC sample offload support"
depends on MLX5_CLS_ACT
+ depends on PSAMPLE=y || PSAMPLE=n || MLX5_CORE=m
default y
help
Say Y here if you want to support offloading sample rules via tc
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index e63bb9ceb9c0..fcfd38fa9e6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -46,6 +46,15 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o \
en/tc/post_act.o en/tc/int_port.o
+
+mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/act/trap.o \
+ en/tc/act/accept.o en/tc/act/mark.o en/tc/act/goto.o \
+ en/tc/act/tun.o en/tc/act/csum.o en/tc/act/pedit.o \
+ en/tc/act/vlan.o en/tc/act/vlan_mangle.o en/tc/act/mpls.o \
+ en/tc/act/mirred.o en/tc/act/mirred_nic.o \
+ en/tc/act/ct.o en/tc/act/sample.o en/tc/act/ptype.o \
+ en/tc/act/redirect_ingress.o
+
mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
@@ -95,11 +104,12 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_ste.o steering/dr_send.o \
steering/dr_ste_v0.o steering/dr_ste_v1.o \
steering/dr_cmd.o steering/dr_fw.o \
- steering/dr_action.o steering/fs_dr.o
+ steering/dr_action.o steering/fs_dr.o \
+ steering/dr_dbg.o
#
# SF device
#
-mlx5_core-$(CONFIG_MLX5_SF) += sf/vhca_event.o sf/dev/dev.o sf/dev/driver.o
+mlx5_core-$(CONFIG_MLX5_SF) += sf/vhca_event.o sf/dev/dev.o sf/dev/driver.o irq_affinity.o
#
# SF manager
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index a46284ca5172..17fe05809653 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -148,8 +148,12 @@ static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
if (!refcount_dec_and_test(&ent->refcnt))
return;
- if (ent->idx >= 0)
- cmd_free_index(ent->cmd, ent->idx);
+ if (ent->idx >= 0) {
+ struct mlx5_cmd *cmd = ent->cmd;
+
+ cmd_free_index(cmd, ent->idx);
+ up(ent->page_queue ? &cmd->pages_sem : &cmd->sem);
+ }
cmd_free_ent(ent);
}
@@ -900,25 +904,6 @@ static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode)
return cmd->allowed_opcode == opcode;
}
-static int cmd_alloc_index_retry(struct mlx5_cmd *cmd)
-{
- unsigned long alloc_end = jiffies + msecs_to_jiffies(1000);
- int idx;
-
-retry:
- idx = cmd_alloc_index(cmd);
- if (idx < 0 && time_before(jiffies, alloc_end)) {
- /* Index allocation can fail on heavy load of commands. This is a temporary
- * situation as the current command already holds the semaphore, meaning that
- * another command completion is being handled and it is expected to release
- * the entry index soon.
- */
- cpu_relax();
- goto retry;
- }
- return idx;
-}
-
bool mlx5_cmd_is_down(struct mlx5_core_dev *dev)
{
return pci_channel_offline(dev->pdev) ||
@@ -946,7 +931,7 @@ static void cmd_work_handler(struct work_struct *work)
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
if (!ent->page_queue) {
- alloc_ret = cmd_alloc_index_retry(cmd);
+ alloc_ret = cmd_alloc_index(cmd);
if (alloc_ret < 0) {
mlx5_core_err_rl(dev, "failed to allocate command entry\n");
if (ent->callback) {
@@ -1602,8 +1587,6 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
vector = vec & 0xffffffff;
for (i = 0; i < (1 << cmd->log_sz); i++) {
if (test_bit(i, &vector)) {
- struct semaphore *sem;
-
ent = cmd->ent_arr[i];
/* if we already completed the command, ignore it */
@@ -1626,10 +1609,6 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
cmd_ent_put(ent);
- if (ent->page_queue)
- sem = &cmd->pages_sem;
- else
- sem = &cmd->sem;
ent->ts2 = ktime_get_ns();
memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
dump_command(dev, ent, 0);
@@ -1683,7 +1662,6 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
*/
complete(&ent->done);
}
- up(sem);
}
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index a8b84d53dfb0..ba6dad97e308 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -538,7 +538,7 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
return add_drivers(dev);
}
-static bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
+bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
{
u64 fsystem_guid, psystem_guid;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 1c98652b244a..d1093bb2d436 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -546,6 +546,13 @@ static int mlx5_devlink_enable_remote_dev_reset_get(struct devlink *devlink, u32
return 0;
}
+static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ return (val.vu16 >= 64 && val.vu16 <= 4096) ? 0 : -EINVAL;
+}
+
static const struct devlink_param mlx5_devlink_params[] = {
DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
"flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
@@ -570,6 +577,10 @@ static const struct devlink_param mlx5_devlink_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
mlx5_devlink_enable_remote_dev_reset_get,
mlx5_devlink_enable_remote_dev_reset_set, NULL),
+ DEVLINK_PARAM_GENERIC(IO_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, mlx5_devlink_eq_depth_validate),
+ DEVLINK_PARAM_GENERIC(EVENT_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, mlx5_devlink_eq_depth_validate),
};
static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
@@ -608,6 +619,16 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
value);
}
#endif
+
+ value.vu32 = MLX5_COMP_EQ_SIZE;
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
+ value);
+
+ value.vu32 = MLX5_NUM_ASYNC_EQE;
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
+ value);
}
static const struct devlink_param enable_eth_param =
@@ -752,6 +773,66 @@ static void mlx5_devlink_auxdev_params_unregister(struct devlink *devlink)
mlx5_devlink_eth_param_unregister(devlink);
}
+static int mlx5_devlink_max_uc_list_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (val.vu32 == 0) {
+ NL_SET_ERR_MSG_MOD(extack, "max_macs value must be greater than 0");
+ return -EINVAL;
+ }
+
+ if (!is_power_of_2(val.vu32)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only power of 2 values are supported for max_macs");
+ return -EINVAL;
+ }
+
+ if (ilog2(val.vu32) >
+ MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list)) {
+ NL_SET_ERR_MSG_MOD(extack, "max_macs value is out of the supported range");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param max_uc_list_param =
+ DEVLINK_PARAM_GENERIC(MAX_MACS, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, mlx5_devlink_max_uc_list_validate);
+
+static int mlx5_devlink_max_uc_list_param_register(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ union devlink_param_value value;
+ int err;
+
+ if (!MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list_wr_supported))
+ return 0;
+
+ err = devlink_param_register(devlink, &max_uc_list_param);
+ if (err)
+ return err;
+
+ value.vu32 = 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list);
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ value);
+ return 0;
+}
+
+static void
+mlx5_devlink_max_uc_list_param_unregister(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (!MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list_wr_supported))
+ return;
+
+ devlink_param_unregister(devlink, &max_uc_list_param);
+}
+
#define MLX5_TRAP_DROP(_id, _group_id) \
DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
@@ -811,6 +892,10 @@ int mlx5_devlink_register(struct devlink *devlink)
if (err)
goto auxdev_reg_err;
+ err = mlx5_devlink_max_uc_list_param_register(devlink);
+ if (err)
+ goto max_uc_list_err;
+
err = mlx5_devlink_traps_register(devlink);
if (err)
goto traps_reg_err;
@@ -821,6 +906,8 @@ int mlx5_devlink_register(struct devlink *devlink)
return 0;
traps_reg_err:
+ mlx5_devlink_max_uc_list_param_unregister(devlink);
+max_uc_list_err:
mlx5_devlink_auxdev_params_unregister(devlink);
auxdev_reg_err:
devlink_params_unregister(devlink, mlx5_devlink_params,
@@ -831,6 +918,7 @@ auxdev_reg_err:
void mlx5_devlink_unregister(struct devlink *devlink)
{
mlx5_devlink_traps_unregister(devlink);
+ mlx5_devlink_max_uc_list_param_unregister(devlink);
mlx5_devlink_auxdev_params_unregister(devlink);
devlink_params_unregister(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index b47a0d3ef22f..812e6810cb3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -145,7 +145,6 @@ struct page_pool;
#define MLX5E_MIN_NUM_CHANNELS 0x1
#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2)
-#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_TX_XSK_POLL_BUDGET 64
#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
@@ -173,7 +172,7 @@ struct page_pool;
#define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\
ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
-#define MLX5E_MAX_KLM_PER_WQE(mdev) \
+#define MLX5E_MAX_KLM_PER_WQE \
MLX5E_KLM_ENTRIES_PER_WQE(MLX5E_TX_MPW_MAX_NUM_DS << MLX5_MKEY_BSF_OCTO_SIZE)
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
@@ -877,10 +876,8 @@ struct mlx5e_trap;
struct mlx5e_priv {
/* priv data path fields - start */
- /* +1 for port ptp ts */
- struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC +
- MLX5E_QOS_MAX_LEAF_NODES];
- int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
+ struct mlx5e_txqsq **txq2sq;
+ int **channel_tc2realtxq;
int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC];
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx_dp dcbx_dp;
@@ -895,7 +892,7 @@ struct mlx5e_priv {
struct mlx5e_channels channels;
u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
struct mlx5e_rx_res *rx_res;
- u32 tx_rates[MLX5E_MAX_NUM_SQS];
+ u32 *tx_rates;
struct mlx5e_flow_steering fs;
@@ -911,7 +908,7 @@ struct mlx5e_priv {
struct net_device *netdev;
struct mlx5e_trap *en_trap;
struct mlx5e_stats stats;
- struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
+ struct mlx5e_channel_stats **channel_stats;
struct mlx5e_channel_stats trap_stats;
struct mlx5e_ptp_stats ptp_stats;
u16 stats_nch;
@@ -958,6 +955,12 @@ struct mlx5e_rx_handlers {
extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic;
+enum mlx5e_profile_feature {
+ MLX5E_PROFILE_FEATURE_PTP_RX,
+ MLX5E_PROFILE_FEATURE_PTP_TX,
+ MLX5E_PROFILE_FEATURE_QOS_HTB,
+};
+
struct mlx5e_profile {
int (*init)(struct mlx5_core_dev *mdev,
struct net_device *netdev);
@@ -971,14 +974,18 @@ struct mlx5e_profile {
int (*update_rx)(struct mlx5e_priv *priv);
void (*update_stats)(struct mlx5e_priv *priv);
void (*update_carrier)(struct mlx5e_priv *priv);
+ int (*max_nch_limit)(struct mlx5_core_dev *mdev);
unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
mlx5e_stats_grp_t *stats_grps;
const struct mlx5e_rx_handlers *rx_handlers;
int max_tc;
u8 rq_groups;
- bool rx_ptp_support;
+ u32 features;
};
+#define mlx5e_profile_feature_cap(profile, feature) \
+ ((profile)->features & BIT(MLX5E_PROFILE_FEATURE_##feature))
+
void mlx5e_build_ptys2ethtool_map(void);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
@@ -1056,7 +1063,6 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
mlx5e_fp_preactivate preactivate,
void *context, bool reset);
int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv);
-int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
@@ -1147,9 +1153,12 @@ void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch);
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
- struct ethtool_coalesce *coal);
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal);
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
- struct ethtool_coalesce *coal);
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack);
int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
struct ethtool_link_ksettings *link_ksettings);
int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
@@ -1185,8 +1194,7 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
struct mlx5_core_dev *mdev);
void mlx5e_priv_cleanup(struct mlx5e_priv *priv);
struct net_device *
-mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
- unsigned int txqs, unsigned int rxqs);
+mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile);
int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
index d290d7276b8d..b4f3bd7d346e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
@@ -20,7 +20,7 @@ mlx5e_hv_vhca_fill_ring_stats(struct mlx5e_priv *priv, int ch,
struct mlx5e_channel_stats *stats;
int tc;
- stats = &priv->channel_stats[ch];
+ stats = priv->channel_stats[ch];
data->rx_packets = stats->rq.packets;
data->rx_bytes = stats->rq.bytes;
@@ -120,14 +120,14 @@ static void mlx5e_hv_vhca_stats_cleanup(struct mlx5_hv_vhca_agent *agent)
cancel_delayed_work_sync(&priv->stats_agent.work);
}
-int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
+void mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
{
int buf_len = mlx5e_hv_vhca_stats_buf_size(priv);
struct mlx5_hv_vhca_agent *agent;
priv->stats_agent.buf = kvzalloc(buf_len, GFP_KERNEL);
if (!priv->stats_agent.buf)
- return -ENOMEM;
+ return;
agent = mlx5_hv_vhca_agent_create(priv->mdev->hv_vhca,
MLX5_HV_VHCA_AGENT_STATS,
@@ -142,13 +142,11 @@ int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
PTR_ERR(agent));
kvfree(priv->stats_agent.buf);
- return IS_ERR_OR_NULL(agent);
+ return;
}
priv->stats_agent.agent = agent;
INIT_DELAYED_WORK(&priv->stats_agent.work, mlx5e_hv_vhca_stats_work);
-
- return 0;
}
void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h
index 664463faf77b..29c8c6d3260f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h
@@ -7,19 +7,12 @@
#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
-int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv);
+void mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv);
void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv);
#else
-
-static inline int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
-{
- return 0;
-}
-
-static inline void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv)
-{
-}
+static inline void mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv) {}
+static inline void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv) {}
#endif
#endif /* __MLX5_EN_STATS_VHCA_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c
index 7edde4d536fd..17325c5d6516 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c
@@ -155,3 +155,61 @@ struct mlx5_modify_hdr *mlx5e_mod_hdr_get(struct mlx5e_mod_hdr_handle *mh)
return mh->modify_hdr;
}
+char *
+mlx5e_mod_hdr_alloc(struct mlx5_core_dev *mdev, int namespace,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
+{
+ int new_num_actions, max_hw_actions;
+ size_t new_sz, old_sz;
+ void *ret;
+
+ if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
+ goto out;
+
+ max_hw_actions = mlx5e_mod_hdr_max_actions(mdev, namespace);
+ new_num_actions = min(max_hw_actions,
+ mod_hdr_acts->actions ?
+ mod_hdr_acts->max_actions * 2 : 1);
+ if (mod_hdr_acts->max_actions == new_num_actions)
+ return ERR_PTR(-ENOSPC);
+
+ new_sz = MLX5_MH_ACT_SZ * new_num_actions;
+ old_sz = mod_hdr_acts->max_actions * MLX5_MH_ACT_SZ;
+
+ if (mod_hdr_acts->is_static) {
+ ret = kzalloc(new_sz, GFP_KERNEL);
+ if (ret) {
+ memcpy(ret, mod_hdr_acts->actions, old_sz);
+ mod_hdr_acts->is_static = false;
+ }
+ } else {
+ ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
+ if (ret)
+ memset(ret + old_sz, 0, new_sz - old_sz);
+ }
+ if (!ret)
+ return ERR_PTR(-ENOMEM);
+
+ mod_hdr_acts->actions = ret;
+ mod_hdr_acts->max_actions = new_num_actions;
+
+out:
+ return mod_hdr_acts->actions + (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
+}
+
+void
+mlx5e_mod_hdr_dealloc(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
+{
+ if (!mod_hdr_acts->is_static)
+ kfree(mod_hdr_acts->actions);
+
+ mod_hdr_acts->actions = NULL;
+ mod_hdr_acts->num_actions = 0;
+ mod_hdr_acts->max_actions = 0;
+}
+
+char *
+mlx5e_mod_hdr_get_item(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, int pos)
+{
+ return mod_hdr_acts->actions + (pos * MLX5_MH_ACT_SZ);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h
index 33b23d8f9182..b8dac418d0a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h
@@ -7,14 +7,32 @@
#include <linux/hashtable.h>
#include <linux/mlx5/fs.h>
+#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
+
struct mlx5e_mod_hdr_handle;
struct mlx5e_tc_mod_hdr_acts {
int num_actions;
int max_actions;
+ bool is_static;
void *actions;
};
+#define DECLARE_MOD_HDR_ACTS_ACTIONS(name, len) \
+ u8 name[len][MLX5_MH_ACT_SZ] = {}
+
+#define DECLARE_MOD_HDR_ACTS(name, acts_arr) \
+ struct mlx5e_tc_mod_hdr_acts name = { \
+ .max_actions = ARRAY_SIZE(acts_arr), \
+ .is_static = true, \
+ .actions = acts_arr, \
+ }
+
+char *mlx5e_mod_hdr_alloc(struct mlx5_core_dev *mdev, int namespace,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
+void mlx5e_mod_hdr_dealloc(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
+char *mlx5e_mod_hdr_get_item(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, int pos);
+
struct mlx5e_mod_hdr_handle *
mlx5e_mod_hdr_attach(struct mlx5_core_dev *mdev,
struct mod_hdr_tbl *tbl,
@@ -28,4 +46,12 @@ struct mlx5_modify_hdr *mlx5e_mod_hdr_get(struct mlx5e_mod_hdr_handle *mh);
void mlx5e_mod_hdr_tbl_init(struct mod_hdr_tbl *tbl);
void mlx5e_mod_hdr_tbl_destroy(struct mod_hdr_tbl *tbl);
+static inline int mlx5e_mod_hdr_max_actions(struct mlx5_core_dev *mdev, int namespace)
+{
+ if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
+ return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
+ else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
+ return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
+}
+
#endif /* __MLX5E_EN_MOD_HDR_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index f8c29022dbb2..66180ffb4606 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -717,7 +717,7 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
u32 wqebbs;
- max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev);
+ max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE;
max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr;
rest = max_hd_per_wqe % max_klm_per_umr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 18d542b1c5cb..82baafd3c00c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -768,7 +768,7 @@ int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv)
{
struct mlx5e_ptp_fs *ptp_fs;
- if (!priv->profile->rx_ptp_support)
+ if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
return 0;
ptp_fs = kzalloc(sizeof(*ptp_fs), GFP_KERNEL);
@@ -783,7 +783,7 @@ void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv)
{
struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
- if (!priv->profile->rx_ptp_support)
+ if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
return;
mlx5e_ptp_rx_unset_fs(priv);
@@ -794,7 +794,7 @@ int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set)
{
struct mlx5e_ptp *c = priv->channels.ptp;
- if (!priv->profile->rx_ptp_support)
+ if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
return 0;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 50977f01a050..00449df98a5e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+#include <net/sch_generic.h>
#include "en.h"
#include "params.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index fcb0892c08a9..0991345c4ae5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -517,6 +517,9 @@ int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *
void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
{
+ if (!netdev)
+ return -EOPNOTSUPP;
+
switch (type) {
case TC_SETUP_BLOCK:
return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 614cd9477600..60bc5b577ab9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -573,7 +573,7 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
snprintf(err_str, sizeof(err_str),
"TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u",
sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
- jiffies_to_usecs(jiffies - sq->txq->trans_start));
+ jiffies_to_usecs(jiffies - READ_ONCE(sq->txq->trans_start)));
mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
return to_ctx.status;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index 0015a81eb9a1..24c32f73040a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -37,7 +37,6 @@ struct mlx5e_rx_res {
/* API for rx_res_rss_* */
static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
unsigned int init_nch)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
@@ -52,7 +51,7 @@ static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
return -ENOMEM;
err = mlx5e_rss_init(rss, res->mdev, inner_ft_support, res->drop_rqn,
- init_pkt_merge_param);
+ &res->pkt_merge_param);
if (err)
goto err_rss_free;
@@ -277,8 +276,7 @@ struct mlx5e_rx_res *mlx5e_rx_res_alloc(void)
return kvzalloc(sizeof(struct mlx5e_rx_res), GFP_KERNEL);
}
-static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param)
+static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
struct mlx5e_tir_builder *builder;
@@ -309,7 +307,7 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
inner_ft_support);
- mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
+ mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
mlx5e_tir_builder_build_direct(builder);
err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true);
@@ -339,7 +337,7 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
inner_ft_support);
- mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
+ mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
mlx5e_tir_builder_build_direct(builder);
err = mlx5e_tir_init(&res->channels[ix].xsk_tir, builder, res->mdev, true);
@@ -454,11 +452,11 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
res->pkt_merge_param = *init_pkt_merge_param;
init_rwsem(&res->pkt_merge_param_sem);
- err = mlx5e_rx_res_rss_init_def(res, init_pkt_merge_param, init_nch);
+ err = mlx5e_rx_res_rss_init_def(res, init_nch);
if (err)
goto err_out;
- err = mlx5e_rx_res_channels_init(res, init_pkt_merge_param);
+ err = mlx5e_rx_res_channels_init(res);
if (err)
goto err_rss_destroy;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
new file mode 100644
index 000000000000..b0de6b999675
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_accept(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ return true;
+}
+
+static int
+tc_act_parse_accept(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_accept = {
+ .can_offload = tc_act_can_offload_accept,
+ .parse_action = tc_act_parse_accept,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
new file mode 100644
index 000000000000..e600924e30ea
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+#include "mlx5_core.h"
+
+/* Must be aligned with enum flow_action_id. */
+static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = {
+ &mlx5e_tc_act_accept,
+ &mlx5e_tc_act_drop,
+ &mlx5e_tc_act_trap,
+ &mlx5e_tc_act_goto,
+ &mlx5e_tc_act_mirred,
+ &mlx5e_tc_act_mirred,
+ &mlx5e_tc_act_redirect_ingress,
+ NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
+ &mlx5e_tc_act_vlan,
+ &mlx5e_tc_act_vlan,
+ &mlx5e_tc_act_vlan_mangle,
+ &mlx5e_tc_act_tun_encap,
+ &mlx5e_tc_act_tun_decap,
+ &mlx5e_tc_act_pedit,
+ &mlx5e_tc_act_pedit,
+ &mlx5e_tc_act_csum,
+ NULL, /* FLOW_ACTION_MARK, */
+ &mlx5e_tc_act_ptype,
+ NULL, /* FLOW_ACTION_PRIORITY, */
+ NULL, /* FLOW_ACTION_WAKE, */
+ NULL, /* FLOW_ACTION_QUEUE, */
+ &mlx5e_tc_act_sample,
+ NULL, /* FLOW_ACTION_POLICE, */
+ &mlx5e_tc_act_ct,
+ NULL, /* FLOW_ACTION_CT_METADATA, */
+ &mlx5e_tc_act_mpls_push,
+ &mlx5e_tc_act_mpls_pop,
+};
+
+/* Must be aligned with enum flow_action_id. */
+static struct mlx5e_tc_act *tc_acts_nic[NUM_FLOW_ACTIONS] = {
+ &mlx5e_tc_act_accept,
+ &mlx5e_tc_act_drop,
+ NULL, /* FLOW_ACTION_TRAP, */
+ &mlx5e_tc_act_goto,
+ &mlx5e_tc_act_mirred_nic,
+ NULL, /* FLOW_ACTION_MIRRED, */
+ NULL, /* FLOW_ACTION_REDIRECT_INGRESS, */
+ NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
+ NULL, /* FLOW_ACTION_VLAN_PUSH, */
+ NULL, /* FLOW_ACTION_VLAN_POP, */
+ NULL, /* FLOW_ACTION_VLAN_MANGLE, */
+ NULL, /* FLOW_ACTION_TUNNEL_ENCAP, */
+ NULL, /* FLOW_ACTION_TUNNEL_DECAP, */
+ &mlx5e_tc_act_pedit,
+ &mlx5e_tc_act_pedit,
+ &mlx5e_tc_act_csum,
+ &mlx5e_tc_act_mark,
+ NULL, /* FLOW_ACTION_PTYPE, */
+ NULL, /* FLOW_ACTION_PRIORITY, */
+ NULL, /* FLOW_ACTION_WAKE, */
+ NULL, /* FLOW_ACTION_QUEUE, */
+ NULL, /* FLOW_ACTION_SAMPLE, */
+ NULL, /* FLOW_ACTION_POLICE, */
+ &mlx5e_tc_act_ct,
+};
+
+/**
+ * mlx5e_tc_act_get() - Get an action parser for an action id.
+ * @act_id: Flow action id.
+ * @ns_type: flow namespace type.
+ */
+struct mlx5e_tc_act *
+mlx5e_tc_act_get(enum flow_action_id act_id,
+ enum mlx5_flow_namespace_type ns_type)
+{
+ struct mlx5e_tc_act **tc_acts;
+
+ tc_acts = ns_type == MLX5_FLOW_NAMESPACE_FDB ? tc_acts_fdb : tc_acts_nic;
+
+ return tc_acts[act_id];
+}
+
+/**
+ * mlx5e_tc_act_init_parse_state() - Init a new parse_state.
+ * @parse_state: Parsing state.
+ * @flow: mlx5e tc flow being handled.
+ * @flow_action: flow action to parse.
+ * @extack: to set an error msg.
+ *
+ * The same parse_state should be passed to action parsers
+ * for tracking the current parsing state.
+ */
+void
+mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
+ struct mlx5e_tc_flow *flow,
+ struct flow_action *flow_action,
+ struct netlink_ext_ack *extack)
+{
+ memset(parse_state, 0, sizeof(*parse_state));
+ parse_state->flow = flow;
+ parse_state->num_actions = flow_action->num_entries;
+ parse_state->extack = extack;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
new file mode 100644
index 000000000000..26efa33de56f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_TC_ACT_H__
+#define __MLX5_EN_TC_ACT_H__
+
+#include <net/tc_act/tc_pedit.h>
+#include <net/flow_offload.h>
+#include <linux/netlink.h>
+#include "eswitch.h"
+#include "pedit.h"
+
+struct mlx5_flow_attr;
+
+struct mlx5e_tc_act_parse_state {
+ unsigned int num_actions;
+ struct mlx5e_tc_flow *flow;
+ struct netlink_ext_ack *extack;
+ bool encap;
+ bool decap;
+ bool mpls_push;
+ bool ptype_host;
+ const struct ip_tunnel_info *tun_info;
+ struct pedit_headers_action hdrs[__PEDIT_CMD_MAX];
+ int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
+ int if_count;
+ struct mlx5_tc_ct_priv *ct_priv;
+};
+
+struct mlx5e_tc_act {
+ bool (*can_offload)(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index);
+
+ int (*parse_action)(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr);
+
+ int (*post_parse)(struct mlx5e_tc_act_parse_state *parse_state,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr);
+};
+
+extern struct mlx5e_tc_act mlx5e_tc_act_drop;
+extern struct mlx5e_tc_act mlx5e_tc_act_trap;
+extern struct mlx5e_tc_act mlx5e_tc_act_accept;
+extern struct mlx5e_tc_act mlx5e_tc_act_mark;
+extern struct mlx5e_tc_act mlx5e_tc_act_goto;
+extern struct mlx5e_tc_act mlx5e_tc_act_tun_encap;
+extern struct mlx5e_tc_act mlx5e_tc_act_tun_decap;
+extern struct mlx5e_tc_act mlx5e_tc_act_csum;
+extern struct mlx5e_tc_act mlx5e_tc_act_pedit;
+extern struct mlx5e_tc_act mlx5e_tc_act_vlan;
+extern struct mlx5e_tc_act mlx5e_tc_act_vlan_mangle;
+extern struct mlx5e_tc_act mlx5e_tc_act_mpls_push;
+extern struct mlx5e_tc_act mlx5e_tc_act_mpls_pop;
+extern struct mlx5e_tc_act mlx5e_tc_act_mirred;
+extern struct mlx5e_tc_act mlx5e_tc_act_mirred_nic;
+extern struct mlx5e_tc_act mlx5e_tc_act_ct;
+extern struct mlx5e_tc_act mlx5e_tc_act_sample;
+extern struct mlx5e_tc_act mlx5e_tc_act_ptype;
+extern struct mlx5e_tc_act mlx5e_tc_act_redirect_ingress;
+
+struct mlx5e_tc_act *
+mlx5e_tc_act_get(enum flow_action_id act_id,
+ enum mlx5_flow_namespace_type ns_type);
+
+void
+mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
+ struct mlx5e_tc_flow *flow,
+ struct flow_action *flow_action,
+ struct netlink_ext_ack *extack);
+
+#endif /* __MLX5_EN_TC_ACT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c
new file mode 100644
index 000000000000..29920ef0180a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/tc_act/tc_csum.h>
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+csum_offload_supported(struct mlx5e_priv *priv,
+ u32 action,
+ u32 update_flags,
+ struct netlink_ext_ack *extack)
+{
+ u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
+ TCA_CSUM_UPDATE_FLAG_UDP;
+
+ /* The HW recalcs checksums only if re-writing headers */
+ if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "TC csum action is only offloaded with pedit");
+ netdev_warn(priv->netdev,
+ "TC csum action is only offloaded with pedit\n");
+ return false;
+ }
+
+ if (update_flags & ~prot_flags) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't offload TC csum action for some header/s");
+ netdev_warn(priv->netdev,
+ "can't offload TC csum action for some header/s - flags %#x\n",
+ update_flags);
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+tc_act_can_offload_csum(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+
+ return csum_offload_supported(flow->priv, flow->attr->action,
+ act->csum_flags, parse_state->extack);
+}
+
+static int
+tc_act_parse_csum(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_csum = {
+ .can_offload = tc_act_can_offload_csum,
+ .parse_action = tc_act_parse_csum,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
new file mode 100644
index 000000000000..06ec30cdb269
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+#include "en/tc_ct.h"
+
+static bool
+tc_act_can_offload_ct(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+
+ if (flow_flag_test(parse_state->flow, SAMPLE)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Sample action with connection tracking is not supported");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ int err;
+
+ err = mlx5_tc_ct_parse_action(parse_state->ct_priv, attr,
+ &attr->parse_attr->mod_hdr_acts,
+ act, parse_state->extack);
+ if (err)
+ return err;
+
+ flow_flag_set(parse_state->flow, CT);
+
+ if (mlx5e_is_eswitch_flow(parse_state->flow))
+ attr->esw_attr->split_count = attr->esw_attr->out_count;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_ct = {
+ .can_offload = tc_act_can_offload_ct,
+ .parse_action = tc_act_parse_ct,
+};
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
new file mode 100644
index 000000000000..2e29a23bed12
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_drop(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ return true;
+}
+
+static int
+tc_act_parse_drop(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_drop = {
+ .can_offload = tc_act_can_offload_drop,
+ .parse_action = tc_act_parse_drop,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
new file mode 100644
index 000000000000..f44515061228
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+#include "eswitch.h"
+
+static int
+validate_goto_chain(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ bool is_esw = mlx5e_is_eswitch_flow(flow);
+ bool ft_flow = mlx5e_is_ft_flow(flow);
+ u32 dest_chain = act->chain_index;
+ struct mlx5_fs_chains *chains;
+ struct mlx5_eswitch *esw;
+ u32 reformat_and_fwd;
+ u32 max_chain;
+
+ esw = priv->mdev->priv.eswitch;
+ chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv);
+ max_chain = mlx5_chains_get_chain_range(chains);
+ reformat_and_fwd = is_esw ?
+ MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
+ MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, reformat_and_fwd_to_table);
+
+ if (ft_flow) {
+ NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5_chains_backwards_supported(chains) &&
+ dest_chain <= flow->attr->chain) {
+ NL_SET_ERR_MSG_MOD(extack, "Goto lower numbered chain isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (dest_chain > max_chain) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Requested destination chain is out of supported range");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow->attr->action & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
+ !reformat_and_fwd) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Goto chain is not allowed if action has reformat or decap");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static bool
+tc_act_can_offload_goto(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+
+ if (validate_goto_chain(flow->priv, flow, act, extack))
+ return false;
+
+ return true;
+}
+
+static int
+tc_act_parse_goto(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->dest_chain = act->chain_index;
+
+ return 0;
+}
+
+static int
+tc_act_post_parse_goto(struct mlx5e_tc_act_parse_state *parse_state,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+
+ if (!attr->dest_chain)
+ return 0;
+
+ if (parse_state->decap) {
+ /* It can be supported if we'll create a mapping for
+ * the tunnel device only (without tunnel), and set
+ * this tunnel id with this decap flow.
+ *
+ * On restore (miss), we'll just set this saved tunnel
+ * device.
+ */
+
+ NL_SET_ERR_MSG_MOD(extack, "Decap with goto isn't supported");
+ netdev_warn(priv->netdev, "Decap with goto isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5e_is_eswitch_flow(flow) && parse_attr->mirred_ifindex[0]) {
+ NL_SET_ERR_MSG_MOD(extack, "Mirroring goto chain rules isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_goto = {
+ .can_offload = tc_act_can_offload_goto,
+ .parse_action = tc_act_parse_goto,
+ .post_parse = tc_act_post_parse_goto,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c
new file mode 100644
index 000000000000..d775c3d9edf3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en_tc.h"
+
+static bool
+tc_act_can_offload_mark(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ if (act->mark & ~MLX5E_TC_FLOW_ID_MASK) {
+ NL_SET_ERR_MSG_MOD(parse_state->extack, "Bad flow mark, only 16 bit supported");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_mark(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ attr->nic_attr->flow_tag = act->mark;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_mark = {
+ .can_offload = tc_act_can_offload_mark,
+ .parse_action = tc_act_parse_mark,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
new file mode 100644
index 000000000000..c614fc7fdc9c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/if_macvlan.h>
+#include <linux/if_vlan.h>
+#include <net/bareudp.h>
+#include <net/bonding.h>
+#include "act.h"
+#include "vlan.h"
+#include "en/tc_tun_encap.h"
+#include "en/tc_priv.h"
+#include "en_rep.h"
+
+static bool
+same_vf_reps(struct mlx5e_priv *priv, struct net_device *out_dev)
+{
+ return mlx5e_eswitch_vf_rep(priv->netdev) &&
+ priv->netdev == out_dev;
+}
+
+static int
+verify_uplink_forwarding(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr,
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_rep_priv *rep_priv;
+
+ /* Forwarding non encapsulated traffic between
+ * uplink ports is allowed only if
+ * termination_table_raw_traffic cap is set.
+ *
+ * Input vport was stored attr->in_rep.
+ * In LAG case, *priv* is the private data of
+ * uplink which may be not the input vport.
+ */
+ rep_priv = mlx5e_rep_to_rep_priv(attr->esw_attr->in_rep);
+
+ if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
+ mlx5e_eswitch_uplink_rep(out_dev)))
+ return 0;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
+ termination_table_raw_traffic)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "devices are both uplink, can't offload forwarding");
+ return -EOPNOTSUPP;
+ } else if (out_dev != rep_priv->netdev) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "devices are not the same uplink, can't offload forwarding");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static bool
+is_duplicated_output_device(struct net_device *dev,
+ struct net_device *out_dev,
+ int *ifindexes, int if_count,
+ struct netlink_ext_ack *extack)
+{
+ int i;
+
+ for (i = 0; i < if_count; i++) {
+ if (ifindexes[i] == out_dev->ifindex) {
+ NL_SET_ERR_MSG_MOD(extack, "can't duplicate output to same device");
+ netdev_err(dev, "can't duplicate output to same device: %s\n",
+ out_dev->name);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static struct net_device *
+get_fdb_out_dev(struct net_device *uplink_dev, struct net_device *out_dev)
+{
+ struct net_device *fdb_out_dev = out_dev;
+ struct net_device *uplink_upper;
+
+ rcu_read_lock();
+ uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
+ if (uplink_upper && netif_is_lag_master(uplink_upper) &&
+ uplink_upper == out_dev) {
+ fdb_out_dev = uplink_dev;
+ } else if (netif_is_lag_master(out_dev)) {
+ fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev));
+ if (fdb_out_dev &&
+ (!mlx5e_eswitch_rep(fdb_out_dev) ||
+ !netdev_port_same_parent_id(fdb_out_dev, uplink_dev)))
+ fdb_out_dev = NULL;
+ }
+ rcu_read_unlock();
+ return fdb_out_dev;
+}
+
+static bool
+tc_act_can_offload_mirred(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct net_device *out_dev = act->dev;
+ struct mlx5e_priv *priv = flow->priv;
+ struct mlx5_esw_flow_attr *esw_attr;
+
+ parse_attr = flow->attr->parse_attr;
+ esw_attr = flow->attr->esw_attr;
+
+ if (!out_dev) {
+ /* out_dev is NULL when filters with
+ * non-existing mirred device are replayed to
+ * the driver.
+ */
+ return false;
+ }
+
+ if (parse_state->mpls_push && !netif_is_bareudp(out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "mpls is supported only through a bareudp device");
+ return false;
+ }
+
+ if (mlx5e_is_ft_flow(flow) && out_dev == priv->netdev) {
+ /* Ignore forward to self rules generated
+ * by adding both mlx5 devs to the flow table
+ * block on a normal nft offload setup.
+ */
+ return false;
+ }
+
+ if (esw_attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't support more output ports, can't offload forwarding");
+ netdev_warn(priv->netdev,
+ "can't support more than %d output ports, can't offload forwarding\n",
+ esw_attr->out_count);
+ return false;
+ }
+
+ if (parse_state->encap ||
+ netdev_port_same_parent_id(priv->netdev, out_dev) ||
+ netif_is_ovs_master(out_dev))
+ return true;
+
+ if (parse_attr->filter_dev != priv->netdev) {
+ /* All mlx5 devices are called to configure
+ * high level device filters. Therefore, the
+ * *attempt* to install a filter on invalid
+ * eswitch should not trigger an explicit error
+ */
+ return false;
+ }
+
+ NL_SET_ERR_MSG_MOD(extack, "devices are not on same switch HW, can't offload forwarding");
+
+ return false;
+}
+
+static int
+parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct net_device *out_dev = act->dev;
+
+ parse_attr->mirred_ifindex[esw_attr->out_count] = out_dev->ifindex;
+ parse_attr->tun_info[esw_attr->out_count] =
+ mlx5e_dup_tun_info(parse_state->tun_info);
+
+ if (!parse_attr->tun_info[esw_attr->out_count])
+ return -ENOMEM;
+
+ parse_state->encap = false;
+ esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP;
+ esw_attr->out_count++;
+ /* attr->dests[].rep is resolved when we handle encap */
+
+ return 0;
+}
+
+static int
+parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct net_device *out_dev = act->dev;
+ struct net_device *uplink_dev;
+ struct mlx5e_priv *out_priv;
+ struct mlx5_eswitch *esw;
+ int *ifindexes;
+ int if_count;
+ int err;
+
+ esw = priv->mdev->priv.eswitch;
+ uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
+ ifindexes = parse_state->ifindexes;
+ if_count = parse_state->if_count;
+
+ if (is_duplicated_output_device(priv->netdev, out_dev, ifindexes, if_count, extack))
+ return -EOPNOTSUPP;
+
+ parse_state->ifindexes[if_count] = out_dev->ifindex;
+ parse_state->if_count++;
+
+ out_dev = get_fdb_out_dev(uplink_dev, out_dev);
+ if (!out_dev)
+ return -ENODEV;
+
+ if (is_vlan_dev(out_dev)) {
+ err = mlx5e_tc_act_vlan_add_push_action(priv, attr, &out_dev, extack);
+ if (err)
+ return err;
+ }
+
+ if (is_vlan_dev(parse_attr->filter_dev)) {
+ err = mlx5e_tc_act_vlan_add_pop_action(priv, attr, extack);
+ if (err)
+ return err;
+ }
+
+ if (netif_is_macvlan(out_dev))
+ out_dev = macvlan_dev_real_dev(out_dev);
+
+ err = verify_uplink_forwarding(priv, attr, out_dev, extack);
+ if (err)
+ return err;
+
+ if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "devices are not on same switch HW, can't offload forwarding");
+ return -EOPNOTSUPP;
+ }
+
+ if (same_vf_reps(priv, out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "can't forward from a VF to itself");
+ return -EOPNOTSUPP;
+ }
+
+ out_priv = netdev_priv(out_dev);
+ rpriv = out_priv->ppriv;
+ esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
+ esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
+ esw_attr->out_count++;
+
+ return 0;
+}
+
+static int
+parse_mirred_ovs_master(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct net_device *out_dev = act->dev;
+ int err;
+
+ err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
+ MLX5E_TC_INT_PORT_EGRESS,
+ &attr->action, esw_attr->out_count);
+ if (err)
+ return err;
+
+ esw_attr->out_count++;
+ return 0;
+}
+
+static int
+tc_act_parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct net_device *out_dev = act->dev;
+ int err = -EOPNOTSUPP;
+
+ if (parse_state->encap)
+ err = parse_mirred_encap(parse_state, act, attr);
+ else if (netdev_port_same_parent_id(priv->netdev, out_dev))
+ err = parse_mirred(parse_state, act, priv, attr);
+ else if (netif_is_ovs_master(out_dev))
+ err = parse_mirred_ovs_master(parse_state, act, priv, attr);
+
+ if (err)
+ return err;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_mirred = {
+ .can_offload = tc_act_can_offload_mirred,
+ .parse_action = tc_act_parse_mirred,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c
new file mode 100644
index 000000000000..2c74567b6d25
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+ struct net_device *out_dev = act->dev;
+ struct mlx5e_priv *priv = flow->priv;
+
+ if (act->id != FLOW_ACTION_REDIRECT)
+ return false;
+
+ if (priv->netdev->netdev_ops != out_dev->netdev_ops ||
+ !mlx5e_same_hw_devs(priv, netdev_priv(out_dev))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "devices are not on same switch HW, can't offload forwarding");
+ netdev_warn(priv->netdev,
+ "devices %s %s not on same switch HW, can't offload forwarding\n",
+ netdev_name(priv->netdev),
+ out_dev->name);
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ attr->parse_attr->mirred_ifindex[0] = act->dev->ifindex;
+ flow_flag_set(parse_state->flow, HAIRPIN);
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_mirred_nic = {
+ .can_offload = tc_act_can_offload_mirred_nic,
+ .parse_action = tc_act_parse_mirred_nic,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
new file mode 100644
index 000000000000..784fc4f68b1e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <net/bareudp.h>
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_priv *priv = parse_state->flow->priv;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_l2_to_l3_tunnel) ||
+ act->mpls_push.proto != htons(ETH_P_MPLS_UC)) {
+ NL_SET_ERR_MSG_MOD(extack, "mpls push is supported only for mpls_uc protocol");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ parse_state->mpls_push = true;
+
+ return 0;
+}
+
+static bool
+tc_act_can_offload_mpls_pop(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+ struct net_device *filter_dev;
+
+ filter_dev = flow->attr->parse_attr->filter_dev;
+
+ /* we only support mpls pop if it is the first action
+ * and the filter net device is bareudp. Subsequent
+ * actions can be pedit and the last can be mirred
+ * egress redirect.
+ */
+ if (act_index) {
+ NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only as first action");
+ return false;
+ }
+
+ if (!netif_is_bareudp(filter_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only on bareudp devices");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_mpls_pop(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ attr->parse_attr->eth.h_proto = act->mpls_pop.proto;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_flag_set(parse_state->flow, L3_TO_L2_DECAP);
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_mpls_push = {
+ .can_offload = tc_act_can_offload_mpls_push,
+ .parse_action = tc_act_parse_mpls_push,
+};
+
+struct mlx5e_tc_act mlx5e_tc_act_mpls_pop = {
+ .can_offload = tc_act_can_offload_mpls_pop,
+ .parse_action = tc_act_parse_mpls_pop,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
new file mode 100644
index 000000000000..79addbbef087
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/if_vlan.h>
+#include "act.h"
+#include "pedit.h"
+#include "en/tc_priv.h"
+#include "en/mod_hdr.h"
+
+static int pedit_header_offsets[] = {
+ [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
+ [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
+ [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
+ [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
+ [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
+};
+
+#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
+
+static int
+set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
+ struct pedit_headers_action *hdrs,
+ struct netlink_ext_ack *extack)
+{
+ u32 *curr_pmask, *curr_pval;
+
+ curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
+ curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
+
+ if (*curr_pmask & mask) { /* disallow acting twice on the same location */
+ NL_SET_ERR_MSG_MOD(extack,
+ "curr_pmask and new mask same. Acting twice on same location");
+ goto out_err;
+ }
+
+ *curr_pmask |= mask;
+ *curr_pval |= (val & mask);
+
+ return 0;
+
+out_err:
+ return -EOPNOTSUPP;
+}
+
+static int
+parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ struct netlink_ext_ack *extack)
+{
+ u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
+ u8 htype = act->mangle.htype;
+ int err = -EOPNOTSUPP;
+ u32 mask, val, offset;
+
+ if (htype == FLOW_ACT_MANGLE_UNSPEC) {
+ NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
+ goto out_err;
+ }
+
+ if (!mlx5e_mod_hdr_max_actions(priv->mdev, namespace)) {
+ NL_SET_ERR_MSG_MOD(extack, "The pedit offload action is not supported");
+ goto out_err;
+ }
+
+ mask = act->mangle.mask;
+ val = act->mangle.val;
+ offset = act->mangle.offset;
+
+ err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd], extack);
+ if (err)
+ goto out_err;
+
+ hdrs[cmd].pedits++;
+
+ return 0;
+out_err:
+ return err;
+}
+
+static int
+parse_pedit_to_reformat(const struct flow_action_entry *act,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct netlink_ext_ack *extack)
+{
+ u32 mask, val, offset;
+ u32 *p;
+
+ if (act->id != FLOW_ACTION_MANGLE) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action id");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
+ NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
+ return -EOPNOTSUPP;
+ }
+
+ mask = ~act->mangle.mask;
+ val = act->mangle.val;
+ offset = act->mangle.offset;
+ p = (u32 *)&parse_attr->eth;
+ *(p + (offset >> 2)) |= (val & mask);
+
+ return 0;
+}
+
+int
+mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
+ if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
+ return parse_pedit_to_reformat(act, parse_attr, extack);
+
+ return parse_pedit_to_modify_hdr(priv, act, namespace, parse_attr, hdrs, extack);
+}
+
+static bool
+tc_act_can_offload_pedit(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ return true;
+}
+
+static int
+tc_act_parse_pedit(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+ enum mlx5_flow_namespace_type ns_type;
+ int err;
+
+ ns_type = mlx5e_get_flow_namespace(flow);
+
+ err = mlx5e_tc_act_pedit_parse_action(flow->priv, act, ns_type,
+ attr->parse_attr, parse_state->hdrs,
+ flow, parse_state->extack);
+ if (err)
+ return err;
+
+ if (flow_flag_test(flow, L3_TO_L2_DECAP))
+ goto out;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ esw_attr->split_count = esw_attr->out_count;
+
+out:
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_pedit = {
+ .can_offload = tc_act_can_offload_pedit,
+ .parse_action = tc_act_parse_pedit,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h
new file mode 100644
index 000000000000..da8ab03af58f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_TC_ACT_PEDIT_H__
+#define __MLX5_EN_TC_ACT_PEDIT_H__
+
+#include "en_tc.h"
+
+struct pedit_headers {
+ struct ethhdr eth;
+ struct vlan_hdr vlan;
+ struct iphdr ip4;
+ struct ipv6hdr ip6;
+ struct tcphdr tcp;
+ struct udphdr udp;
+};
+
+struct pedit_headers_action {
+ struct pedit_headers vals;
+ struct pedit_headers masks;
+ u32 pedits;
+};
+
+int
+mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack);
+
+#endif /* __MLX5_EN_TC_ACT_PEDIT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c
new file mode 100644
index 000000000000..0819110193dc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_ptype(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ return true;
+}
+
+static int
+tc_act_parse_ptype(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+
+ if (act->ptype != PACKET_HOST) {
+ NL_SET_ERR_MSG_MOD(extack, "skbedit ptype is only supported with type host");
+ return -EOPNOTSUPP;
+ }
+
+ parse_state->ptype_host = true;
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_ptype = {
+ .can_offload = tc_act_can_offload_ptype,
+ .parse_action = tc_act_parse_ptype,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
new file mode 100644
index 000000000000..1c32e24e528d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct net_device *out_dev = act->dev;
+ struct mlx5_esw_flow_attr *esw_attr;
+
+ parse_attr = flow->attr->parse_attr;
+ esw_attr = flow->attr->esw_attr;
+
+ if (!out_dev)
+ return false;
+
+ if (!netif_is_ovs_master(out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "redirect to ingress is supported only for OVS internal ports");
+ return false;
+ }
+
+ if (netif_is_ovs_master(parse_attr->filter_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "redirect to ingress is not supported from internal port");
+ return false;
+ }
+
+ if (!parse_state->ptype_host) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "redirect to int port ingress requires ptype=host action");
+ return false;
+ }
+
+ if (esw_attr->out_count) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "redirect to int port ingress is supported only as single destination");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct net_device *out_dev = act->dev;
+ int err;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+
+ err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
+ MLX5E_TC_INT_PORT_INGRESS,
+ &attr->action, esw_attr->out_count);
+ if (err)
+ return err;
+
+ esw_attr->out_count++;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_redirect_ingress = {
+ .can_offload = tc_act_can_offload_redirect_ingress,
+ .parse_action = tc_act_parse_redirect_ingress,
+};
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c
new file mode 100644
index 000000000000..6699bdf5cf01
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <net/psample.h>
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_sample(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+
+ if (flow_flag_test(parse_state->flow, CT)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Sample action with connection tracking is not supported");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_sample(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_sample_attr *sample_attr;
+
+ sample_attr = kzalloc(sizeof(*attr->sample_attr), GFP_KERNEL);
+ if (!sample_attr)
+ return -ENOMEM;
+
+ sample_attr->rate = act->sample.rate;
+ sample_attr->group_num = act->sample.psample_group->group_num;
+
+ if (act->sample.truncate)
+ sample_attr->trunc_size = act->sample.trunc_size;
+
+ attr->sample_attr = sample_attr;
+ flow_flag_set(parse_state->flow, SAMPLE);
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_sample = {
+ .can_offload = tc_act_can_offload_sample,
+ .parse_action = tc_act_parse_sample,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
new file mode 100644
index 000000000000..046b64c2cec4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+
+ if (parse_state->num_actions != 1) {
+ NL_SET_ERR_MSG_MOD(extack, "action trap is supported as a sole action only");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_trap(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_trap = {
+ .can_offload = tc_act_can_offload_trap,
+ .parse_action = tc_act_parse_trap,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c
new file mode 100644
index 000000000000..6f4a2cf46afd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_tun_encap.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_tun_encap(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ if (!act->tunnel) {
+ NL_SET_ERR_MSG_MOD(parse_state->extack,
+ "Zero tunnel attributes is not supported");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_tun_encap(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ parse_state->tun_info = act->tunnel;
+ parse_state->encap = true;
+
+ return 0;
+}
+
+static bool
+tc_act_can_offload_tun_decap(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ return true;
+}
+
+static int
+tc_act_parse_tun_decap(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ parse_state->decap = true;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_tun_encap = {
+ .can_offload = tc_act_can_offload_tun_encap,
+ .parse_action = tc_act_parse_tun_encap,
+};
+
+struct mlx5e_tc_act mlx5e_tc_act_tun_decap = {
+ .can_offload = tc_act_can_offload_tun_decap,
+ .parse_action = tc_act_parse_tun_decap,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
new file mode 100644
index 000000000000..70fc0c2d8813
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/if_vlan.h>
+#include "act.h"
+#include "vlan.h"
+#include "en/tc_priv.h"
+
+static int
+add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ u32 *action, struct netlink_ext_ack *extack)
+{
+ const struct flow_action_entry prio_tag_act = {
+ .vlan.vid = 0,
+ .vlan.prio =
+ MLX5_GET(fte_match_set_lyr_2_4,
+ mlx5e_get_match_headers_value(*action,
+ &parse_attr->spec),
+ first_prio) &
+ MLX5_GET(fte_match_set_lyr_2_4,
+ mlx5e_get_match_headers_criteria(*action,
+ &parse_attr->spec),
+ first_prio),
+ };
+
+ return mlx5e_tc_act_vlan_add_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
+ &prio_tag_act, parse_attr, hdrs, action,
+ extack);
+}
+
+static int
+parse_tc_vlan_action(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act,
+ struct mlx5_esw_flow_attr *attr,
+ u32 *action,
+ struct netlink_ext_ack *extack)
+{
+ u8 vlan_idx = attr->total_vlan;
+
+ if (vlan_idx >= MLX5_FS_VLAN_DEPTH) {
+ NL_SET_ERR_MSG_MOD(extack, "Total vlans used is greater than supported");
+ return -EOPNOTSUPP;
+ }
+
+ switch (act->id) {
+ case FLOW_ACTION_VLAN_POP:
+ if (vlan_idx) {
+ if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
+ MLX5_FS_VLAN_DEPTH)) {
+ NL_SET_ERR_MSG_MOD(extack, "vlan pop action is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
+ } else {
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ }
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ attr->vlan_vid[vlan_idx] = act->vlan.vid;
+ attr->vlan_prio[vlan_idx] = act->vlan.prio;
+ attr->vlan_proto[vlan_idx] = act->vlan.proto;
+ if (!attr->vlan_proto[vlan_idx])
+ attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
+
+ if (vlan_idx) {
+ if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
+ MLX5_FS_VLAN_DEPTH)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "vlan push action is not supported for vlan depth > 1");
+ return -EOPNOTSUPP;
+ }
+
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
+ } else {
+ if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
+ (act->vlan.proto != htons(ETH_P_8021Q) ||
+ act->vlan.prio)) {
+ NL_SET_ERR_MSG_MOD(extack, "vlan push action is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+ }
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN");
+ return -EINVAL;
+ }
+
+ attr->total_vlan = vlan_idx + 1;
+
+ return 0;
+}
+
+int
+mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr,
+ struct net_device **out_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *vlan_dev = *out_dev;
+ struct flow_action_entry vlan_act = {
+ .id = FLOW_ACTION_VLAN_PUSH,
+ .vlan.vid = vlan_dev_vlan_id(vlan_dev),
+ .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
+ .vlan.prio = 0,
+ };
+ int err;
+
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, extack);
+ if (err)
+ return err;
+
+ rcu_read_lock();
+ *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
+ rcu_read_unlock();
+ if (!*out_dev)
+ return -ENODEV;
+
+ if (is_vlan_dev(*out_dev))
+ err = mlx5e_tc_act_vlan_add_push_action(priv, attr, out_dev, extack);
+
+ return err;
+}
+
+int
+mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_action_entry vlan_act = {
+ .id = FLOW_ACTION_VLAN_POP,
+ };
+ int nest_level, err = 0;
+
+ nest_level = attr->parse_attr->filter_dev->lower_level -
+ priv->netdev->lower_level;
+ while (nest_level--) {
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action,
+ extack);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static bool
+tc_act_can_offload_vlan(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ return true;
+}
+
+static int
+tc_act_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ int err;
+
+ if (act->id == FLOW_ACTION_VLAN_PUSH &&
+ (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
+ /* Replace vlan pop+push with vlan modify */
+ attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ err = mlx5e_tc_act_vlan_add_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB, act,
+ attr->parse_attr, parse_state->hdrs,
+ &attr->action, parse_state->extack);
+ } else {
+ err = parse_tc_vlan_action(priv, act, esw_attr, &attr->action,
+ parse_state->extack);
+ }
+
+ if (err)
+ return err;
+
+ esw_attr->split_count = esw_attr->out_count;
+
+ return 0;
+}
+
+static int
+tc_act_post_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
+ struct pedit_headers_action *hdrs = parse_state->hdrs;
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ int err;
+
+ if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
+ attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
+ /* For prio tag mode, replace vlan pop with rewrite vlan prio
+ * tag rewrite.
+ */
+ attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
+ &attr->action, extack);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_vlan = {
+ .can_offload = tc_act_can_offload_vlan,
+ .parse_action = tc_act_parse_vlan,
+ .post_parse = tc_act_post_parse_vlan,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h
new file mode 100644
index 000000000000..3d62f13ab61f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_TC_ACT_VLAN_H__
+#define __MLX5_EN_TC_ACT_VLAN_H__
+
+#include <net/flow_offload.h>
+#include "en/tc_priv.h"
+
+struct pedit_headers_action;
+
+int
+mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr,
+ struct net_device **out_dev,
+ struct netlink_ext_ack *extack);
+
+int
+mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr,
+ struct netlink_ext_ack *extack);
+
+int
+mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
+ const struct flow_action_entry *act,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ u32 *action, struct netlink_ext_ack *extack);
+
+#endif /* __MLX5_EN_TC_ACT_VLAN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
new file mode 100644
index 000000000000..63e36e7f53e3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/if_vlan.h>
+#include "act.h"
+#include "vlan.h"
+#include "en/tc_priv.h"
+
+struct pedit_headers_action;
+
+int
+mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
+ const struct flow_action_entry *act,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ u32 *action, struct netlink_ext_ack *extack)
+{
+ u16 mask16 = VLAN_VID_MASK;
+ u16 val16 = act->vlan.vid & VLAN_VID_MASK;
+ const struct flow_action_entry pedit_act = {
+ .id = FLOW_ACTION_MANGLE,
+ .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
+ .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
+ .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
+ .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
+ };
+ u8 match_prio_mask, match_prio_val;
+ void *headers_c, *headers_v;
+ int err;
+
+ headers_c = mlx5e_get_match_headers_criteria(*action, &parse_attr->spec);
+ headers_v = mlx5e_get_match_headers_value(*action, &parse_attr->spec);
+
+ if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
+ MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN rewrite action must have VLAN protocol match");
+ return -EOPNOTSUPP;
+ }
+
+ match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
+ match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
+ if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
+ NL_SET_ERR_MSG_MOD(extack, "Changing VLAN prio is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlx5e_tc_act_pedit_parse_action(priv, &pedit_act, namespace, parse_attr, hdrs,
+ NULL, extack);
+ *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ return err;
+}
+
+static bool
+tc_act_can_offload_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ return true;
+}
+
+static int
+tc_act_parse_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ enum mlx5_flow_namespace_type ns_type;
+ int err;
+
+ ns_type = mlx5e_get_flow_namespace(parse_state->flow);
+ err = mlx5e_tc_act_vlan_add_rewrite_action(priv, ns_type, act,
+ attr->parse_attr, parse_state->hdrs,
+ &attr->action, parse_state->extack);
+ if (err)
+ return err;
+
+ if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ attr->esw_attr->split_count = attr->esw_attr->out_count;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_vlan_mangle = {
+ .can_offload = tc_act_can_offload_vlan_mangle,
+ .parse_action = tc_act_parse_vlan_mangle,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
index df6888c4793c..ff4b4f8a5a9d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
@@ -5,6 +5,7 @@
#include <net/psample.h>
#include "en/mapping.h"
#include "en/tc/post_act.h"
+#include "en/mod_hdr.h"
#include "sample.h"
#include "eswitch.h"
#include "en_tc.h"
@@ -255,12 +256,12 @@ sample_modify_hdr_get(struct mlx5_core_dev *mdev, u32 obj_id,
goto err_modify_hdr;
}
- dealloc_mod_hdr_actions(&mod_acts);
+ mlx5e_mod_hdr_dealloc(&mod_acts);
return modify_hdr;
err_modify_hdr:
err_post_act:
- dealloc_mod_hdr_actions(&mod_acts);
+ mlx5e_mod_hdr_dealloc(&mod_acts);
err_set_regc0:
return ERR_PTR(err);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 2445e2ae3324..4a0d38d219ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -14,6 +14,7 @@
#include <linux/workqueue.h>
#include <linux/refcount.h>
#include <linux/xarray.h>
+#include <linux/if_macvlan.h>
#include "lib/fs_chains.h"
#include "en/tc_ct.h"
@@ -36,6 +37,12 @@
#define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen)
#define MLX5_CT_LABELS_MASK GENMASK(MLX5_CT_LABELS_BITS - 1, 0)
+/* Statically allocate modify actions for
+ * ipv6 and port nat (5) + tuple fields (4) + nic mode zone restore (1) = 10.
+ * This will be increased dynamically if needed (for the ipv6 snat + dnat).
+ */
+#define MLX5_CT_MIN_MOD_ACTS 10
+
#define ct_dbg(fmt, args...)\
netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args)
@@ -320,7 +327,33 @@ mlx5_tc_ct_rule_to_tuple_nat(struct mlx5_ct_tuple *tuple,
}
static int
-mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
+mlx5_tc_ct_get_flow_source_match(struct mlx5_tc_ct_priv *ct_priv,
+ struct net_device *ndev)
+{
+ struct mlx5e_priv *other_priv = netdev_priv(ndev);
+ struct mlx5_core_dev *mdev = ct_priv->dev;
+ bool vf_rep, uplink_rep;
+
+ vf_rep = mlx5e_eswitch_vf_rep(ndev) && mlx5_same_hw_devs(mdev, other_priv->mdev);
+ uplink_rep = mlx5e_eswitch_uplink_rep(ndev) && mlx5_same_hw_devs(mdev, other_priv->mdev);
+
+ if (vf_rep)
+ return MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
+ if (uplink_rep)
+ return MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+ if (is_vlan_dev(ndev))
+ return mlx5_tc_ct_get_flow_source_match(ct_priv, vlan_dev_real_dev(ndev));
+ if (netif_is_macvlan(ndev))
+ return mlx5_tc_ct_get_flow_source_match(ct_priv, macvlan_dev_real_dev(ndev));
+ if (mlx5e_get_tc_tun(ndev) || netif_is_lag_master(ndev))
+ return MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+
+ return MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT;
+}
+
+static int
+mlx5_tc_ct_set_tuple_match(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_flow_spec *spec,
struct flow_rule *rule)
{
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -335,8 +368,7 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
flow_rule_match_basic(rule, &match);
- mlx5e_tc_set_ethertype(priv->mdev, &match, true, headers_c,
- headers_v);
+ mlx5e_tc_set_ethertype(ct_priv->dev, &match, true, headers_c, headers_v);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
match.mask->ip_proto);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
@@ -432,6 +464,23 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
ntohs(match.key->flags));
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
+ struct flow_match_meta match;
+
+ flow_rule_match_meta(rule, &match);
+
+ if (match.key->ingress_ifindex & match.mask->ingress_ifindex) {
+ struct net_device *dev;
+
+ dev = dev_get_by_index(&init_net, match.key->ingress_ifindex);
+ if (dev && MLX5_CAP_ESW_FLOWTABLE(ct_priv->dev, flow_source))
+ spec->flow_context.flow_source =
+ mlx5_tc_ct_get_flow_source_match(ct_priv, dev);
+
+ dev_put(dev);
+ }
+ }
+
return 0;
}
@@ -609,22 +658,15 @@ mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
struct flow_action *flow_action = &flow_rule->action;
struct mlx5_core_dev *mdev = ct_priv->dev;
struct flow_action_entry *act;
- size_t action_size;
char *modact;
int err, i;
- action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
-
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_MANGLE: {
- err = alloc_mod_hdr_actions(mdev, ct_priv->ns_type,
- mod_acts);
- if (err)
- return err;
-
- modact = mod_acts->actions +
- mod_acts->num_actions * action_size;
+ modact = mlx5e_mod_hdr_alloc(mdev, ct_priv->ns_type, mod_acts);
+ if (IS_ERR(modact))
+ return PTR_ERR(modact);
err = mlx5_tc_ct_parse_mangle_to_mod_act(act, modact);
if (err)
@@ -652,7 +694,8 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5e_mod_hdr_handle **mh,
u8 zone_restore_id, bool nat)
{
- struct mlx5e_tc_mod_hdr_acts mod_acts = {};
+ DECLARE_MOD_HDR_ACTS_ACTIONS(actions_arr, MLX5_CT_MIN_MOD_ACTS);
+ DECLARE_MOD_HDR_ACTS(mod_acts, actions_arr);
struct flow_action_entry *meta;
u16 ct_state = 0;
int err;
@@ -706,11 +749,11 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
attr->modify_hdr = mlx5e_mod_hdr_get(*mh);
}
- dealloc_mod_hdr_actions(&mod_acts);
+ mlx5e_mod_hdr_dealloc(&mod_acts);
return 0;
err_mapping:
- dealloc_mod_hdr_actions(&mod_acts);
+ mlx5e_mod_hdr_dealloc(&mod_acts);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
return err;
}
@@ -770,7 +813,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB)
attr->esw_attr->in_mdev = priv->mdev;
- mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
+ mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
zone_rule->rule = mlx5_tc_rule_insert(priv, spec, attr);
@@ -907,12 +950,9 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_tuple rev_tuple = entry->tuple;
struct mlx5_ct_counter *shared_counter;
struct mlx5_ct_entry *rev_entry;
- __be16 tmp_port;
/* get the reversed tuple */
- tmp_port = rev_tuple.port.src;
- rev_tuple.port.src = rev_tuple.port.dst;
- rev_tuple.port.dst = tmp_port;
+ swap(rev_tuple.port.src, rev_tuple.port.dst);
if (rev_tuple.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
__be32 tmp_addr = rev_tuple.ip.src_v4;
@@ -1460,7 +1500,7 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
}
pre_ct->miss_rule = rule;
- dealloc_mod_hdr_actions(&pre_mod_acts);
+ mlx5e_mod_hdr_dealloc(&pre_mod_acts);
kvfree(spec);
return 0;
@@ -1469,7 +1509,7 @@ err_miss_rule:
err_flow_rule:
mlx5_modify_header_dealloc(dev, pre_ct->modify_hdr);
err_mapping:
- dealloc_mod_hdr_actions(&pre_mod_acts);
+ mlx5e_mod_hdr_dealloc(&pre_mod_acts);
kvfree(spec);
return err;
}
@@ -1865,14 +1905,14 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
attr->ct_attr.ct_flow = ct_flow;
- dealloc_mod_hdr_actions(&pre_mod_acts);
+ mlx5e_mod_hdr_dealloc(&pre_mod_acts);
return ct_flow->pre_ct_rule;
err_insert_orig:
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
err_mapping:
- dealloc_mod_hdr_actions(&pre_mod_acts);
+ mlx5e_mod_hdr_dealloc(&pre_mod_acts);
mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
err_get_chain:
kfree(ct_flow->pre_ct_attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index b689701ac7d8..f832c26ff2c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -5,11 +5,14 @@
#define __MLX5_EN_TC_PRIV_H__
#include "en_tc.h"
+#include "en/tc/act/act.h"
#define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
#define MLX5E_TC_MAX_SPLITS 1
+#define mlx5e_nic_chains(priv) ((priv)->fs.tc.chains)
+
enum {
MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
@@ -37,6 +40,7 @@ struct mlx5e_tc_flow_parse_attr {
struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
struct ethhdr eth;
+ struct mlx5e_tc_act_parse_state parse_state;
};
/* Helper struct for accessing a struct containing list_head array.
@@ -115,7 +119,11 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr);
+bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
+bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow);
bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow);
+int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow);
+bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv);
static inline void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
{
@@ -176,4 +184,8 @@ struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow);
struct mlx5e_tc_int_port_priv *
mlx5e_get_int_port_priv(struct mlx5e_priv *priv);
+
+void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec);
+void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec);
+
#endif /* __MLX5_EN_TC_PRIV_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index a5e450973225..378fc8e3bd97 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2018 Mellanox Technologies. */
+#include <net/inet_ecn.h>
#include <net/vxlan.h>
#include <net/gre.h>
#include <net/geneve.h>
@@ -103,7 +104,7 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
}
static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
+ struct net_device *dev,
struct mlx5e_tc_tun_route_attr *attr)
{
struct net_device *route_dev;
@@ -122,13 +123,13 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
attr->fl.fl4.flowi4_oif = uplink_dev->ifindex;
} else {
- struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev);
+ struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(dev);
if (tunnel && tunnel->get_remote_ifindex)
- attr->fl.fl4.flowi4_oif = tunnel->get_remote_ifindex(mirred_dev);
+ attr->fl.fl4.flowi4_oif = tunnel->get_remote_ifindex(dev);
}
- rt = ip_route_output_key(dev_net(mirred_dev), &attr->fl.fl4);
+ rt = ip_route_output_key(dev_net(dev), &attr->fl.fl4);
if (IS_ERR(rt))
return PTR_ERR(rt);
@@ -235,7 +236,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
int err;
/* add the IP fields */
- attr.fl.fl4.flowi4_tos = tun_key->tos;
+ attr.fl.fl4.flowi4_tos = tun_key->tos & ~INET_ECN_MASK;
attr.fl.fl4.daddr = tun_key->u.ipv4.dst;
attr.fl.fl4.saddr = tun_key->u.ipv4.src;
attr.ttl = tun_key->ttl;
@@ -350,7 +351,7 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
int err;
/* add the IP fields */
- attr.fl.fl4.flowi4_tos = tun_key->tos;
+ attr.fl.fl4.flowi4_tos = tun_key->tos & ~INET_ECN_MASK;
attr.fl.fl4.daddr = tun_key->u.ipv4.dst;
attr.fl.fl4.saddr = tun_key->u.ipv4.src;
attr.ttl = tun_key->ttl;
@@ -440,10 +441,10 @@ release_neigh:
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
+ struct net_device *dev,
struct mlx5e_tc_tun_route_attr *attr)
{
- struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev);
+ struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(dev);
struct net_device *route_dev;
struct net_device *out_dev;
struct dst_entry *dst;
@@ -451,8 +452,8 @@ static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
int ret;
if (tunnel && tunnel->get_remote_ifindex)
- attr->fl.fl6.flowi6_oif = tunnel->get_remote_ifindex(mirred_dev);
- dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, &attr->fl.fl6,
+ attr->fl.fl6.flowi6_oif = tunnel->get_remote_ifindex(dev);
+ dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(dev), NULL, &attr->fl.fl6,
NULL);
if (IS_ERR(dst))
return PTR_ERR(dst);
@@ -708,7 +709,8 @@ release_neigh:
int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
- struct mlx5_flow_attr *flow_attr)
+ struct mlx5_flow_attr *flow_attr,
+ struct net_device *filter_dev)
{
struct mlx5_esw_flow_attr *esw_attr = flow_attr->esw_attr;
struct mlx5e_tc_int_port *int_port;
@@ -720,14 +722,14 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
/* Addresses are swapped for decap */
attr.fl.fl4.saddr = esw_attr->rx_tun_attr->dst_ip.v4;
attr.fl.fl4.daddr = esw_attr->rx_tun_attr->src_ip.v4;
- err = mlx5e_route_lookup_ipv4_get(priv, priv->netdev, &attr);
+ err = mlx5e_route_lookup_ipv4_get(priv, filter_dev, &attr);
}
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
else if (flow_attr->tun_ip_version == 6) {
/* Addresses are swapped for decap */
attr.fl.fl6.saddr = esw_attr->rx_tun_attr->dst_ip.v6;
attr.fl.fl6.daddr = esw_attr->rx_tun_attr->src_ip.v6;
- err = mlx5e_route_lookup_ipv6_get(priv, priv->netdev, &attr);
+ err = mlx5e_route_lookup_ipv6_get(priv, filter_dev, &attr);
}
#endif
else
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index aa092eaeaec3..b38f693bbb52 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -94,7 +94,8 @@ mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
#endif
int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
- struct mlx5_flow_attr *attr);
+ struct mlx5_flow_attr *attr,
+ struct net_device *filter_dev);
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 042b1abe1437..9918ed8c059b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -1159,7 +1159,7 @@ int mlx5e_attach_decap_route(struct mlx5e_priv *priv,
tbl_time_before = mlx5e_route_tbl_get_last_update(priv);
tbl_time_after = tbl_time_before;
- err = mlx5e_tc_tun_route_lookup(priv, &parse_attr->spec, attr);
+ err = mlx5e_tc_tun_route_lookup(priv, &parse_attr->spec, attr, parse_attr->filter_dev);
if (err || !esw_attr->rx_tun_attr->decap_vport)
goto out;
@@ -1480,7 +1480,7 @@ static void mlx5e_reoffload_decap(struct mlx5e_priv *priv,
parse_attr = attr->parse_attr;
spec = &parse_attr->spec;
- err = mlx5e_tc_tun_route_lookup(priv, spec, attr);
+ err = mlx5e_tc_tun_route_lookup(priv, spec, attr, parse_attr->filter_dev);
if (err) {
mlx5_core_warn(priv->mdev, "Failed to lookup route for flow, %d\n",
err);
@@ -1579,6 +1579,8 @@ mlx5e_init_fib_work_ipv4(struct mlx5e_priv *priv,
struct net_device *fib_dev;
fen_info = container_of(info, struct fib_entry_notifier_info, info);
+ if (fen_info->fi->nh)
+ return NULL;
fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
if (!fib_dev || fib_dev->netdev_ops != &mlx5e_netdev_ops ||
fen_info->dst_len != 32)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 2f0df5cc1a2d..338d65e2c9ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -151,7 +151,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
rq->stats->xdp_redirect++;
return true;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rq->netdev, prog, act);
fallthrough;
case XDP_ABORTED:
xdp_abort:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
index 7b562d2c8a19..279cd8f4e79f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
@@ -11,13 +11,13 @@ static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv,
{
struct device *dev = mlx5_core_dma_dev(priv->mdev);
- return xsk_pool_dma_map(pool, dev, 0);
+ return xsk_pool_dma_map(pool, dev, DMA_ATTR_SKIP_CPU_SYNC);
}
static void mlx5e_xsk_unmap_pool(struct mlx5e_priv *priv,
struct xsk_buff_pool *pool)
{
- return xsk_pool_dma_unmap(pool, 0);
+ return xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC);
}
static int mlx5e_xsk_get_pools(struct mlx5e_xsk *xsk)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 8526a5fbbf0b..25eac9e20342 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -68,7 +68,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->xdpsq = &c->rq_xdpsq;
rq->xsk_pool = pool;
- rq->stats = &c->priv->channel_stats[c->ix].xskrq;
+ rq->stats = &c->priv->channel_stats[c->ix]->xskrq;
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
rq_xdp_ix = c->ix + params->num_channels * MLX5E_RQ_GROUP_XSK;
err = mlx5e_rq_set_handlers(rq, params, xsk);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 15711814d2d2..96064a2033f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -611,7 +611,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
priv_rx->rxq = rxq;
priv_rx->sk = sk;
- priv_rx->rq_stats = &priv->channel_stats[rxq].rq;
+ priv_rx->rq_stats = &priv->channel_stats[rxq]->rq;
priv_rx->sw_stats = &priv->tls->sw_stats;
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index fe5d82fa6e92..49cca6bd49a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -556,7 +556,7 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- priv->channel_stats[arfs_rule->rxq].rq.arfs_err++;
+ priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++;
mlx5e_dbg(HW, priv,
"%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n",
__func__, arfs_rule->filter_id, arfs_rule->rxq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index c2ea5fad48dd..57d755db1cf5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -314,7 +314,9 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
}
static void mlx5e_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -380,7 +382,9 @@ unlock:
}
static int mlx5e_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -511,7 +515,8 @@ static int mlx5e_set_channels(struct net_device *dev,
}
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
- struct ethtool_coalesce *coal)
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal)
{
struct dim_cq_moder *rx_moder, *tx_moder;
@@ -528,6 +533,11 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
coal->tx_max_coalesced_frames = tx_moder->pkts;
coal->use_adaptive_tx_coalesce = priv->channels.params.tx_dim_enabled;
+ kernel_coal->use_cqe_mode_rx =
+ MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_BASED_MODER);
+ kernel_coal->use_cqe_mode_tx =
+ MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_CQE_BASED_MODER);
+
return 0;
}
@@ -538,7 +548,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return mlx5e_ethtool_get_coalesce(priv, coal);
+ return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
}
#define MLX5E_MAX_COAL_TIME MLX5_MAX_CQ_PERIOD
@@ -578,14 +588,26 @@ mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coal
}
}
+/* convert a boolean value of cq_mode to mlx5 period mode
+ * true : MLX5_CQ_PERIOD_MODE_START_FROM_CQE
+ * false : MLX5_CQ_PERIOD_MODE_START_FROM_EQE
+ */
+static int cqe_mode_to_period_mode(bool val)
+{
+ return val ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+}
+
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
- struct ethtool_coalesce *coal)
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
{
struct dim_cq_moder *rx_moder, *tx_moder;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params new_params;
bool reset_rx, reset_tx;
bool reset = true;
+ u8 cq_period_mode;
int err = 0;
if (!MLX5_CAP_GEN(mdev, cq_moderation))
@@ -605,6 +627,12 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
return -ERANGE;
}
+ if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
+ !MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) {
+ NL_SET_ERR_MSG_MOD(extack, "cqe_mode_rx/tx is not supported on this device");
+ return -EOPNOTSUPP;
+ }
+
mutex_lock(&priv->state_lock);
new_params = priv->channels.params;
@@ -621,6 +649,18 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled;
+ cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_rx);
+ if (cq_period_mode != rx_moder->cq_period_mode) {
+ mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode);
+ reset_rx = true;
+ }
+
+ cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_tx);
+ if (cq_period_mode != tx_moder->cq_period_mode) {
+ mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode);
+ reset_tx = true;
+ }
+
if (reset_rx) {
u8 mode = MLX5E_GET_PFLAG(&new_params,
MLX5E_PFLAG_RX_CQE_BASED_MODER);
@@ -656,9 +696,9 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_priv *priv = netdev_priv(netdev);
- return mlx5e_ethtool_set_coalesce(priv, coal);
+ return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack);
}
static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
@@ -1843,24 +1883,19 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
bool is_rx_cq)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_params new_params;
- bool mode_changed;
u8 cq_period_mode, current_cq_period_mode;
+ struct mlx5e_params new_params;
+
+ if (enable && !MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
+ return -EOPNOTSUPP;
+
+ cq_period_mode = cqe_mode_to_period_mode(enable);
- cq_period_mode = enable ?
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
- MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
current_cq_period_mode = is_rx_cq ?
priv->channels.params.rx_cq_moderation.cq_period_mode :
priv->channels.params.tx_cq_moderation.cq_period_mode;
- mode_changed = cq_period_mode != current_cq_period_mode;
-
- if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
- !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
- return -EOPNOTSUPP;
- if (!mode_changed)
+ if (cq_period_mode == current_cq_period_mode)
return 0;
new_params = priv->channels.params;
@@ -1894,7 +1929,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
if (curr_val == new_val)
return 0;
- if (new_val && !priv->profile->rx_ptp_support && rx_filter) {
+ if (new_val && !mlx5e_profile_feature_cap(priv->profile, PTP_RX) && rx_filter) {
netdev_err(priv->netdev,
"Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n");
return -EINVAL;
@@ -2358,7 +2393,8 @@ static void mlx5e_get_rmon_stats(struct net_device *netdev,
const struct ethtool_ops mlx5e_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
- ETHTOOL_COALESCE_USE_ADAPTIVE,
+ ETHTOOL_COALESCE_USE_ADAPTIVE |
+ ETHTOOL_COALESCE_USE_CQE,
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ext_state = mlx5e_get_link_ext_state,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 41379844eee1..bf80fb612449 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -37,6 +37,7 @@
#include <net/geneve.h>
#include <linux/bpf.h>
#include <linux/if_bridge.h>
+#include <linux/filter.h>
#include <net/page_pool.h>
#include <net/xdp_sock_drv.h>
#include "eswitch.h"
@@ -479,7 +480,7 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->xdpsq = &c->rq_xdpsq;
- rq->stats = &c->priv->channel_stats[c->ix].rq;
+ rq->stats = &c->priv->channel_stats[c->ix]->rq;
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
err = mlx5e_rq_set_handlers(rq, params, NULL);
if (err)
@@ -1159,10 +1160,10 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->xsk_pool = xsk_pool;
sq->stats = sq->xsk_pool ?
- &c->priv->channel_stats[c->ix].xsksq :
+ &c->priv->channel_stats[c->ix]->xsksq :
is_redirect ?
- &c->priv->channel_stats[c->ix].xdpsq :
- &c->priv->channel_stats[c->ix].rq_xdpsq;
+ &c->priv->channel_stats[c->ix]->xdpsq :
+ &c->priv->channel_stats[c->ix]->rq_xdpsq;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1938,7 +1939,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
params, &cparam->txq_sq, &c->sq[tc], tc,
qos_queue_group_id,
- &c->priv->channel_stats[c->ix].sq[tc]);
+ &c->priv->channel_stats[c->ix]->sq[tc]);
if (err)
goto err_close_sqs;
}
@@ -2193,6 +2194,30 @@ static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev);
}
+static int mlx5e_channel_stats_alloc(struct mlx5e_priv *priv, int ix, int cpu)
+{
+ if (ix > priv->stats_nch) {
+ netdev_warn(priv->netdev, "Unexpected channel stats index %d > %d\n", ix,
+ priv->stats_nch);
+ return -EINVAL;
+ }
+
+ if (priv->channel_stats[ix])
+ return 0;
+
+ /* Asymmetric dynamic memory allocation.
+ * Freed in mlx5e_priv_arrays_free, not on channel closure.
+ */
+ mlx5e_dbg(DRV, priv, "Creating channel stats %d\n", ix);
+ priv->channel_stats[ix] = kvzalloc_node(sizeof(**priv->channel_stats),
+ GFP_KERNEL, cpu_to_node(cpu));
+ if (!priv->channel_stats[ix])
+ return -ENOMEM;
+ priv->stats_nch++;
+
+ return 0;
+}
+
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam,
@@ -2210,6 +2235,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err)
return err;
+ err = mlx5e_channel_stats_alloc(priv, ix, cpu);
+ if (err)
+ return err;
+
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
return -ENOMEM;
@@ -2224,7 +2253,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
c->num_tc = mlx5e_get_dcb_num_tc(params);
c->xdp = !!params->xdp_prog;
- c->stats = &priv->channel_stats[ix].ch;
+ c->stats = &priv->channel_stats[ix]->ch;
c->aff_mask = irq_get_effective_affinity_mask(irq);
c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
@@ -2615,7 +2644,7 @@ static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
}
}
-int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
+static int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
{
u16 count = priv->channels.params.num_channels;
int err;
@@ -3388,7 +3417,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
int i;
for (i = 0; i < priv->stats_nch; i++) {
- struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];
+ struct mlx5e_channel_stats *channel_stats = priv->channel_stats[i];
struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
int j;
@@ -3576,11 +3605,6 @@ static int set_feature_hw_gro(struct net_device *netdev, bool enable)
new_params = priv->channels.params;
if (enable) {
- if (MLX5E_GET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
- netdev_warn(netdev, "Can't set HW-GRO when CQE compress is active\n");
- err = -EINVAL;
- goto out;
- }
new_params.packet_merge.type = MLX5E_PACKET_MERGE_SHAMPO;
new_params.packet_merge.shampo.match_criteria_type =
MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED;
@@ -3842,6 +3866,11 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
features &= ~NETIF_F_RXHASH;
if (netdev->features & NETIF_F_RXHASH)
netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
+
+ if (features & NETIF_F_GRO_HW) {
+ netdev_warn(netdev, "Disabling HW-GRO, not supported when CQE compress is active\n");
+ features &= ~NETIF_F_GRO_HW;
+ }
}
if (mlx5e_is_uplink_rep(priv))
@@ -4054,7 +4083,7 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
goto err_unlock;
}
- if (!priv->profile->rx_ptp_support)
+ if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
err = mlx5e_hwstamp_config_no_ptp_rx(priv,
config.rx_filter != HWTSTAMP_FILTER_NONE);
else
@@ -4789,15 +4818,22 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
}
if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
- netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
- netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) {
- netdev->hw_features |= NETIF_F_GSO_GRE;
- netdev->hw_enc_features |= NETIF_F_GSO_GRE;
- netdev->gso_partial_features |= NETIF_F_GSO_GRE;
+ netdev->hw_features |= NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM;
+ netdev->hw_enc_features |= NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM;
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM;
}
if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
@@ -5109,9 +5145,23 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
.stats_grps = mlx5e_nic_stats_grps,
.stats_grps_num = mlx5e_nic_stats_grps_num,
- .rx_ptp_support = true,
+ .features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) |
+ BIT(MLX5E_PROFILE_FEATURE_PTP_TX) |
+ BIT(MLX5E_PROFILE_FEATURE_QOS_HTB),
};
+static int mlx5e_profile_max_num_channels(struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile)
+{
+ int nch;
+
+ nch = mlx5e_get_max_num_channels(mdev);
+
+ if (profile->max_nch_limit)
+ nch = min_t(int, nch, profile->max_nch_limit(mdev));
+ return nch;
+}
+
static unsigned int
mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
const struct mlx5e_profile *profile)
@@ -5120,7 +5170,7 @@ mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
unsigned int max_nch, tmp;
/* core resources */
- max_nch = mlx5e_get_max_num_channels(mdev);
+ max_nch = mlx5e_profile_max_num_channels(mdev, profile);
/* netdev rx queues */
tmp = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
@@ -5144,12 +5194,17 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
struct net_device *netdev,
struct mlx5_core_dev *mdev)
{
+ int nch, num_txqs, node, i;
+
+ num_txqs = netdev->num_tx_queues;
+ nch = mlx5e_calc_max_nch(mdev, netdev, profile);
+ node = dev_to_node(mlx5_core_dma_dev(mdev));
+
/* priv init */
priv->mdev = mdev;
priv->netdev = netdev;
priv->msglevel = MLX5E_MSG_LEVEL;
- priv->max_nch = mlx5e_calc_max_nch(mdev, netdev, profile);
- priv->stats_nch = priv->max_nch;
+ priv->max_nch = nch;
priv->max_opened_tc = 1;
if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL))
@@ -5166,11 +5221,46 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
if (!priv->wq)
goto err_free_cpumask;
+ priv->txq2sq = kcalloc_node(num_txqs, sizeof(*priv->txq2sq), GFP_KERNEL, node);
+ if (!priv->txq2sq)
+ goto err_destroy_workqueue;
+
+ priv->tx_rates = kcalloc_node(num_txqs, sizeof(*priv->tx_rates), GFP_KERNEL, node);
+ if (!priv->tx_rates)
+ goto err_free_txq2sq;
+
+ priv->channel_tc2realtxq =
+ kcalloc_node(nch, sizeof(*priv->channel_tc2realtxq), GFP_KERNEL, node);
+ if (!priv->channel_tc2realtxq)
+ goto err_free_tx_rates;
+
+ for (i = 0; i < nch; i++) {
+ priv->channel_tc2realtxq[i] =
+ kcalloc_node(profile->max_tc, sizeof(**priv->channel_tc2realtxq),
+ GFP_KERNEL, node);
+ if (!priv->channel_tc2realtxq[i])
+ goto err_free_channel_tc2realtxq;
+ }
+
+ priv->channel_stats =
+ kcalloc_node(nch, sizeof(*priv->channel_stats), GFP_KERNEL, node);
+ if (!priv->channel_stats)
+ goto err_free_channel_tc2realtxq;
+
return 0;
+err_free_channel_tc2realtxq:
+ while (--i >= 0)
+ kfree(priv->channel_tc2realtxq[i]);
+ kfree(priv->channel_tc2realtxq);
+err_free_tx_rates:
+ kfree(priv->tx_rates);
+err_free_txq2sq:
+ kfree(priv->txq2sq);
+err_destroy_workqueue:
+ destroy_workqueue(priv->wq);
err_free_cpumask:
free_cpumask_var(priv->scratchpad.cpumask);
-
return -ENOMEM;
}
@@ -5182,6 +5272,14 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
if (!priv->mdev)
return;
+ for (i = 0; i < priv->stats_nch; i++)
+ kvfree(priv->channel_stats[i]);
+ kfree(priv->channel_stats);
+ for (i = 0; i < priv->max_nch; i++)
+ kfree(priv->channel_tc2realtxq[i]);
+ kfree(priv->channel_tc2realtxq);
+ kfree(priv->tx_rates);
+ kfree(priv->txq2sq);
destroy_workqueue(priv->wq);
free_cpumask_var(priv->scratchpad.cpumask);
@@ -5197,13 +5295,44 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
memset(priv, 0, sizeof(*priv));
}
+static unsigned int mlx5e_get_max_num_txqs(struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile)
+{
+ unsigned int nch, ptp_txqs, qos_txqs;
+
+ nch = mlx5e_profile_max_num_channels(mdev, profile);
+
+ ptp_txqs = MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn) &&
+ mlx5e_profile_feature_cap(profile, PTP_TX) ?
+ profile->max_tc : 0;
+
+ qos_txqs = mlx5_qos_is_supported(mdev) &&
+ mlx5e_profile_feature_cap(profile, QOS_HTB) ?
+ mlx5e_qos_max_leaf_nodes(mdev) : 0;
+
+ return nch * profile->max_tc + ptp_txqs + qos_txqs;
+}
+
+static unsigned int mlx5e_get_max_num_rxqs(struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile)
+{
+ unsigned int nch;
+
+ nch = mlx5e_profile_max_num_channels(mdev, profile);
+
+ return nch * profile->rq_groups;
+}
+
struct net_device *
-mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
- unsigned int txqs, unsigned int rxqs)
+mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile)
{
struct net_device *netdev;
+ unsigned int txqs, rxqs;
int err;
+ txqs = mlx5e_get_max_num_txqs(mdev, profile);
+ rxqs = mlx5e_get_max_num_rxqs(mdev, profile);
+
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), txqs, rxqs);
if (!netdev) {
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
@@ -5405,7 +5534,7 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
static int mlx5e_resume(struct auxiliary_device *adev)
{
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
- struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
+ struct mlx5e_priv *priv = auxiliary_get_drvdata(adev);
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = edev->mdev;
int err;
@@ -5428,7 +5557,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
{
- struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
+ struct mlx5e_priv *priv = auxiliary_get_drvdata(adev);
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -5448,22 +5577,10 @@ static int mlx5e_probe(struct auxiliary_device *adev,
struct mlx5_core_dev *mdev = edev->mdev;
struct net_device *netdev;
pm_message_t state = {};
- unsigned int txqs, rxqs, ptp_txqs = 0;
struct mlx5e_priv *priv;
- int qos_sqs = 0;
int err;
- int nch;
-
- if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
- ptp_txqs = profile->max_tc;
- if (mlx5_qos_is_supported(mdev))
- qos_sqs = mlx5e_qos_max_leaf_nodes(mdev);
-
- nch = mlx5e_get_max_num_channels(mdev);
- txqs = nch * profile->max_tc + ptp_txqs + qos_sqs;
- rxqs = nch * profile->rq_groups;
- netdev = mlx5e_create_netdev(mdev, profile, txqs, rxqs);
+ netdev = mlx5e_create_netdev(mdev, profile);
if (!netdev) {
mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
return -ENOMEM;
@@ -5472,7 +5589,7 @@ static int mlx5e_probe(struct auxiliary_device *adev,
mlx5e_build_nic_netdev(netdev);
priv = netdev_priv(netdev);
- dev_set_drvdata(&adev->dev, priv);
+ auxiliary_set_drvdata(adev, priv);
priv->profile = profile;
priv->ppriv = NULL;
@@ -5520,7 +5637,7 @@ err_destroy_netdev:
static void mlx5e_remove(struct auxiliary_device *adev)
{
- struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
+ struct mlx5e_priv *priv = auxiliary_get_drvdata(adev);
pm_message_t state = {};
mlx5e_dcbnl_delete_app(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 48895d79796a..06d1f46f1688 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -50,6 +50,7 @@
#include "fs_core.h"
#include "lib/mlx5.h"
#include "lib/devcom.h"
+#include "lib/vxlan.h"
#define CREATE_TRACE_POINTS
#include "diag/en_rep_tracepoint.h"
#include "en_accel/ipsec.h"
@@ -219,16 +220,22 @@ static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
}
}
-static void mlx5e_rep_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+static void
+mlx5e_rep_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(dev);
mlx5e_ethtool_get_ringparam(priv, param);
}
-static int mlx5e_rep_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+static int
+mlx5e_rep_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -258,7 +265,7 @@ static int mlx5e_rep_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return mlx5e_ethtool_get_coalesce(priv, coal);
+ return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
}
static int mlx5e_rep_set_coalesce(struct net_device *netdev,
@@ -268,7 +275,7 @@ static int mlx5e_rep_set_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return mlx5e_ethtool_set_coalesce(priv, coal);
+ return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack);
}
static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
@@ -585,6 +592,12 @@ bool mlx5e_eswitch_vf_rep(const struct net_device *netdev)
return netdev->netdev_ops == &mlx5e_netdev_ops_rep;
}
+static int mlx5e_rep_max_nch_limit(struct mlx5_core_dev *mdev)
+{
+ return (1 << MLX5_CAP_GEN(mdev, log_max_tir)) /
+ mlx5_eswitch_get_total_vports(mdev);
+}
+
static void mlx5e_build_rep_params(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -1027,6 +1040,7 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
rtnl_lock();
if (netif_running(netdev))
mlx5e_open(netdev);
+ udp_tunnel_nic_reset_ntf(priv->netdev);
netif_device_attach(netdev);
rtnl_unlock();
}
@@ -1048,6 +1062,7 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
mlx5_notifier_unregister(mdev, &priv->events_nb);
mlx5e_rep_tc_disable(priv);
mlx5_lag_remove_netdev(mdev, priv->netdev);
+ mlx5_vxlan_reset_to_default(mdev->vxlan);
}
static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
@@ -1107,7 +1122,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5e_rep_stats_grps,
.stats_grps_num = mlx5e_rep_stats_grps_num,
- .rx_ptp_support = false,
+ .max_nch_limit = mlx5e_rep_max_nch_limit,
};
static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
@@ -1128,7 +1143,6 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
.stats_grps = mlx5e_ul_rep_stats_grps,
.stats_grps_num = mlx5e_ul_rep_stats_grps_num,
- .rx_ptp_support = false,
};
/* e-Switch vport representors */
@@ -1179,14 +1193,10 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
struct devlink_port *dl_port;
struct net_device *netdev;
struct mlx5e_priv *priv;
- unsigned int txqs, rxqs;
- int nch, err;
+ int err;
profile = &mlx5e_rep_profile;
- nch = mlx5e_get_max_num_channels(dev);
- txqs = nch * profile->max_tc;
- rxqs = nch * profile->rq_groups;
- netdev = mlx5e_create_netdev(dev, profile, txqs, rxqs);
+ netdev = mlx5e_create_netdev(dev, profile);
if (!netdev) {
mlx5_core_warn(dev,
"Failed to create representor netdev for vport %d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 793511d5ee4c..e86ccc22fb82 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -37,6 +37,7 @@
#include <net/ip6_checksum.h>
#include <net/page_pool.h>
#include <net/inet_ecn.h>
+#include <net/gro.h>
#include <net/udp.h>
#include <net/tcp.h>
#include "en.h"
@@ -278,8 +279,8 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
if (unlikely(!dma_info->page))
return -ENOMEM;
- dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
- PAGE_SIZE, rq->buff.map_dir);
+ dma_info->addr = dma_map_page_attrs(rq->pdev, dma_info->page, 0, PAGE_SIZE,
+ rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
page_pool_recycle_direct(rq->page_pool, dma_info->page);
dma_info->page = NULL;
@@ -300,7 +301,8 @@ static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
{
- dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir);
+ dma_unmap_page_attrs(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
}
void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
@@ -618,7 +620,7 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
struct mlx5e_icosq *sq = rq->icosq;
int i, err, max_klm_entries, len;
- max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev);
+ max_klm_entries = MLX5E_MAX_KLM_PER_WQE;
klm_entries = bitmap_find_window(shampo->bitmap,
shampo->hd_per_wqe,
shampo->hd_per_wq, shampo->pi);
@@ -1602,6 +1604,12 @@ static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
}
}
+static void mlx5e_handle_rx_err_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ trigger_report(rq, cqe);
+ rq->stats->wqe_err++;
+}
+
static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
@@ -1615,8 +1623,7 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
- trigger_report(rq, cqe);
- rq->stats->wqe_err++;
+ mlx5e_handle_rx_err_cqe(rq, cqe);
goto free_wqe;
}
@@ -1669,7 +1676,7 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
- rq->stats->wqe_err++;
+ mlx5e_handle_rx_err_cqe(rq, cqe);
goto free_wqe;
}
@@ -1718,8 +1725,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
wi->consumed_strides += cstrides;
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
- trigger_report(rq, cqe);
- rq->stats->wqe_err++;
+ mlx5e_handle_rx_err_cqe(rq, cqe);
goto mpwrq_cqe_out;
}
@@ -1987,8 +1993,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
wi->consumed_strides += cstrides;
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
- trigger_report(rq, cqe);
- stats->wqe_err++;
+ mlx5e_handle_rx_err_cqe(rq, cqe);
goto mpwrq_cqe_out;
}
@@ -2057,8 +2062,7 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
wi->consumed_strides += cstrides;
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
- trigger_report(rq, cqe);
- rq->stats->wqe_err++;
+ mlx5e_handle_rx_err_cqe(rq, cqe);
goto mpwrq_cqe_out;
}
@@ -2188,7 +2192,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
priv = mlx5i_epriv(netdev);
tstamp = &priv->tstamp;
- stats = &priv->channel_stats[rq->ix].rq;
+ stats = rq->stats;
flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
g = (flags_rqpn >> 28) & 3;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 2a9bfc3ffa2e..26e326fe503c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -35,6 +35,7 @@
#include "en_accel/tls.h"
#include "en_accel/en_accel.h"
#include "en/ptp.h"
+#include "en/port.h"
static unsigned int stats_grps_num(struct mlx5e_priv *priv)
{
@@ -463,7 +464,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
for (i = 0; i < priv->stats_nch; i++) {
struct mlx5e_channel_stats *channel_stats =
- &priv->channel_stats[i];
+ priv->channel_stats[i];
int j;
mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
@@ -1158,12 +1159,99 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
}
-void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
- struct ethtool_fec_stats *fec_stats)
+static int fec_num_lanes(struct mlx5_core_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {};
+ int err;
+
+ MLX5_SET(pmlp_reg, in, local_port, 1);
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_PMLP, 0, 0);
+ if (err)
+ return 0;
+
+ return MLX5_GET(pmlp_reg, out, width);
+}
+
+static int fec_active_mode(struct mlx5_core_dev *mdev)
+{
+ unsigned long fec_active_long;
+ u32 fec_active;
+
+ if (mlx5e_get_fec_mode(mdev, &fec_active, NULL))
+ return MLX5E_FEC_NOFEC;
+
+ fec_active_long = fec_active;
+ return find_first_bit(&fec_active_long, sizeof(unsigned long) * BITS_PER_BYTE);
+}
+
+#define MLX5E_STATS_SET_FEC_BLOCK(idx) ({ \
+ fec_stats->corrected_blocks.lanes[(idx)] = \
+ MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
+ fc_fec_corrected_blocks_lane##idx); \
+ fec_stats->uncorrectable_blocks.lanes[(idx)] = \
+ MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
+ fc_fec_uncorrectable_blocks_lane##idx); \
+})
+
+static void fec_set_fc_stats(struct ethtool_fec_stats *fec_stats,
+ u32 *ppcnt, u8 lanes)
+{
+ if (lanes > 3) { /* 4 lanes */
+ MLX5E_STATS_SET_FEC_BLOCK(3);
+ MLX5E_STATS_SET_FEC_BLOCK(2);
+ }
+ if (lanes > 1) /* 2 lanes */
+ MLX5E_STATS_SET_FEC_BLOCK(1);
+ if (lanes > 0) /* 1 lane */
+ MLX5E_STATS_SET_FEC_BLOCK(0);
+}
+
+static void fec_set_rs_stats(struct ethtool_fec_stats *fec_stats, u32 *ppcnt)
+{
+ fec_stats->corrected_blocks.total =
+ MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
+ rs_fec_corrected_blocks);
+ fec_stats->uncorrectable_blocks.total =
+ MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
+ rs_fec_uncorrectable_blocks);
+}
+
+static void fec_set_block_stats(struct mlx5e_priv *priv,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ int mode = fec_active_mode(mdev);
+
+ if (mode == MLX5E_FEC_NOFEC)
+ return;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
+ if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0))
+ return;
+
+ switch (mode) {
+ case MLX5E_FEC_RS_528_514:
+ case MLX5E_FEC_RS_544_514:
+ case MLX5E_FEC_LLRS_272_257_1:
+ fec_set_rs_stats(fec_stats, out);
+ return;
+ case MLX5E_FEC_FIRECODE:
+ fec_set_fc_stats(fec_stats, out, fec_num_lanes(mdev));
+ }
+}
+
+static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
+ struct ethtool_fec_stats *fec_stats)
{
u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)];
struct mlx5_core_dev *mdev = priv->mdev;
- u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
@@ -1181,6 +1269,13 @@ void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
phy_corrected_bits);
}
+void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
+ struct ethtool_fec_stats *fec_stats)
+{
+ fec_set_corrected_bits_total(priv, fec_stats);
+ fec_set_block_stats(priv, fec_stats);
+}
+
#define PPORT_ETH_EXT_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
@@ -2076,7 +2171,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
for (i = 0; i < NUM_PTP_CH_STATS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
- ptp_ch_stats_desc[i].format);
+ "%s", ptp_ch_stats_desc[i].format);
if (priv->tx_ptp_opened) {
for (tc = 0; tc < priv->max_opened_tc; tc++)
@@ -2197,21 +2292,21 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_CH_STATS; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->ch,
ch_stats_desc, j);
for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_RQ_STATS; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq,
rq_stats_desc, j);
for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xskrq,
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xskrq,
xskrq_stats_desc, j);
for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq_xdpsq,
rq_xdpsq_stats_desc, j);
}
@@ -2219,17 +2314,17 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->sq[tc],
sq_stats_desc, j);
for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xsksq,
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xsksq,
xsksq_stats_desc, j);
for (j = 0; j < NUM_XDPSQ_STATS; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xdpsq,
xdpsq_stats_desc, j);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 5e454a14428f..3d908a7e1406 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -39,10 +39,6 @@
#include <linux/rhashtable.h>
#include <linux/refcount.h>
#include <linux/completion.h>
-#include <linux/if_macvlan.h>
-#include <net/tc_act/tc_pedit.h>
-#include <net/tc_act/tc_csum.h>
-#include <net/psample.h>
#include <net/arp.h>
#include <net/ipv6_stubs.h>
#include <net/bareudp.h>
@@ -62,6 +58,7 @@
#include "en/mod_hdr.h"
#include "en/tc_tun_encap.h"
#include "en/tc/sample.h"
+#include "en/tc/act/act.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
#include "lib/fs_chains.h"
@@ -70,9 +67,6 @@
#include "lag/lag.h"
#include "lag/mp.h"
-#define nic_chains(priv) ((priv)->fs.tc.chains)
-#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
-
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
@@ -209,12 +203,9 @@ mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
char *modact;
int err;
- err = alloc_mod_hdr_actions(mdev, ns, mod_hdr_acts);
- if (err)
- return err;
-
- modact = mod_hdr_acts->actions +
- (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
+ modact = mlx5e_mod_hdr_alloc(mdev, ns, mod_hdr_acts);
+ if (IS_ERR(modact))
+ return PTR_ERR(modact);
/* Firmware has 5bit length field and 0 means 32bits */
if (mlen == 32)
@@ -333,7 +324,7 @@ void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
char *modact;
- modact = mod_hdr_acts->actions + (act_id * MLX5_MH_ACT_SZ);
+ modact = mlx5e_mod_hdr_get_item(mod_hdr_acts, act_id);
/* Firmware has 5bit length field and 0 means 32bits */
if (mlen == 32)
@@ -403,7 +394,7 @@ bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
return flow_flag_test(flow, ESWITCH);
}
-static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
+bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
{
return flow_flag_test(flow, FT);
}
@@ -413,7 +404,7 @@ bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
return flow_flag_test(flow, OFFLOADED);
}
-static int get_flow_name_space(struct mlx5e_tc_flow *flow)
+int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow)
{
return mlx5e_is_eswitch_flow(flow) ?
MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
@@ -424,7 +415,7 @@ get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- return get_flow_name_space(flow) == MLX5_FLOW_NAMESPACE_FDB ?
+ return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
&esw->offloads.mod_hdr :
&priv->fs.tc.mod_hdr;
}
@@ -437,7 +428,7 @@ static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_mod_hdr_handle *mh;
mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
- get_flow_name_space(flow),
+ mlx5e_get_flow_namespace(flow),
&parse_attr->mod_hdr_acts);
if (IS_ERR(mh))
return PTR_ERR(mh);
@@ -941,7 +932,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5_flow_context *flow_context = &spec->flow_context;
- struct mlx5_fs_chains *nic_chains = nic_chains(priv);
+ struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv);
struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
struct mlx5e_tc_table *tc = &priv->fs.tc;
struct mlx5_flow_destination dest[2] = {};
@@ -1076,7 +1067,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
+ mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
if (err)
return err;
}
@@ -1095,7 +1086,7 @@ void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
- struct mlx5_fs_chains *nic_chains = nic_chains(priv);
+ struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv);
mlx5_del_flow_rules(rule);
@@ -1127,21 +1118,21 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
mutex_lock(&priv->fs.tc.t_lock);
if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
!IS_ERR_OR_NULL(tc->t)) {
- mlx5_chains_put_table(nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
+ mlx5_chains_put_table(mlx5e_nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
priv->fs.tc.t = NULL;
}
mutex_unlock(&priv->fs.tc.t_lock);
- kvfree(attr->parse_attr);
-
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
- mlx5_fc_destroy(priv->mdev, attr->counter);
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
+ mlx5_fc_destroy(priv->mdev, attr->counter);
if (flow_flag_test(flow, HAIRPIN))
mlx5e_hairpin_flow_del(priv, flow);
+ kvfree(attr->parse_attr);
kfree(flow->attr);
}
@@ -1303,8 +1294,6 @@ static void remove_unready_flow(struct mlx5e_tc_flow *flow)
mutex_unlock(&uplink_priv->unready_flows_lock);
}
-static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv);
-
bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev)
{
struct mlx5_core_dev *out_mdev, *route_mdev;
@@ -1319,7 +1308,7 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_
route_mdev->coredev_type != MLX5_COREDEV_VF)
return false;
- return same_hw_devs(out_priv, route_priv);
+ return mlx5e_same_hw_devs(out_priv, route_priv);
}
int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
@@ -1366,7 +1355,7 @@ int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
struct mlx5_modify_hdr *mod_hdr;
mod_hdr = mlx5_modify_header_alloc(priv->mdev,
- get_flow_name_space(flow),
+ mlx5e_get_flow_namespace(flow),
mod_hdr_acts->num_actions,
mod_hdr_acts->actions);
if (IS_ERR(mod_hdr))
@@ -1625,15 +1614,12 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- dealloc_mod_hdr_actions(&attr->parse_attr->mod_hdr_acts);
+ mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
if (vf_tun && attr->modify_hdr)
mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
else
mlx5e_detach_mod_hdr(priv, flow);
}
- kfree(attr->sample_attr);
- kvfree(attr->parse_attr);
- kvfree(attr->esw_attr->rx_tun_attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
mlx5_fc_destroy(esw_attr->counter_dev, attr->counter);
@@ -1647,6 +1633,9 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (flow_flag_test(flow, L3_TO_L2_DECAP))
mlx5e_detach_decap(priv, flow);
+ kfree(attr->sample_attr);
+ kvfree(attr->esw_attr->rx_tun_attr);
+ kvfree(attr->parse_attr);
kfree(flow->attr);
}
@@ -1949,6 +1938,111 @@ u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer)
return ip_version;
}
+/* Tunnel device follows RFC 6040, see include/net/inet_ecn.h.
+ * And changes inner ip_ecn depending on inner and outer ip_ecn as follows:
+ * +---------+----------------------------------------+
+ * |Arriving | Arriving Outer Header |
+ * | Inner +---------+---------+---------+----------+
+ * | Header | Not-ECT | ECT(0) | ECT(1) | CE |
+ * +---------+---------+---------+---------+----------+
+ * | Not-ECT | Not-ECT | Not-ECT | Not-ECT | <drop> |
+ * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE* |
+ * | ECT(1) | ECT(1) | ECT(1) | ECT(1)* | CE* |
+ * | CE | CE | CE | CE | CE |
+ * +---------+---------+---------+---------+----------+
+ *
+ * Tc matches on inner after decapsulation on tunnel device, but hw offload matches
+ * the inner ip_ecn value before hardware decap action.
+ *
+ * Cells marked are changed from original inner packet ip_ecn value during decap, and
+ * so matching those values on inner ip_ecn before decap will fail.
+ *
+ * The following helper allows offload when inner ip_ecn won't be changed by outer ip_ecn,
+ * except for the outer ip_ecn = CE, where in all cases inner ip_ecn will be changed to CE,
+ * and such we can drop the inner ip_ecn=CE match.
+ */
+
+static int mlx5e_tc_verify_tunnel_ecn(struct mlx5e_priv *priv,
+ struct flow_cls_offload *f,
+ bool *match_inner_ecn)
+{
+ u8 outer_ecn_mask = 0, outer_ecn_key = 0, inner_ecn_mask = 0, inner_ecn_key = 0;
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_match_ip match;
+
+ *match_inner_ecn = true;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
+ flow_rule_match_enc_ip(rule, &match);
+ outer_ecn_key = match.key->tos & INET_ECN_MASK;
+ outer_ecn_mask = match.mask->tos & INET_ECN_MASK;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ flow_rule_match_ip(rule, &match);
+ inner_ecn_key = match.key->tos & INET_ECN_MASK;
+ inner_ecn_mask = match.mask->tos & INET_ECN_MASK;
+ }
+
+ if (outer_ecn_mask != 0 && outer_ecn_mask != INET_ECN_MASK) {
+ NL_SET_ERR_MSG_MOD(extack, "Partial match on enc_tos ecn bits isn't supported");
+ netdev_warn(priv->netdev, "Partial match on enc_tos ecn bits isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!outer_ecn_mask) {
+ if (!inner_ecn_mask)
+ return 0;
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported");
+ netdev_warn(priv->netdev,
+ "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (inner_ecn_mask && inner_ecn_mask != INET_ECN_MASK) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported");
+ netdev_warn(priv->netdev,
+ "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!inner_ecn_mask)
+ return 0;
+
+ /* Both inner and outer have full mask on ecn */
+
+ if (outer_ecn_key == INET_ECN_ECT_1) {
+ /* inner ecn might change by DECAP action */
+
+ NL_SET_ERR_MSG_MOD(extack, "Match on enc_tos ecn = ECT(1) isn't supported");
+ netdev_warn(priv->netdev, "Match on enc_tos ecn = ECT(1) isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (outer_ecn_key != INET_ECN_CE)
+ return 0;
+
+ if (inner_ecn_key != INET_ECN_CE) {
+ /* Can't happen in software, as packet ecn will be changed to CE after decap */
+ NL_SET_ERR_MSG_MOD(extack,
+ "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported");
+ netdev_warn(priv->netdev,
+ "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ /* outer ecn = CE, inner ecn = CE, as decap will change inner ecn to CE in anycase,
+ * drop match on inner ecn
+ */
+ *match_inner_ecn = false;
+
+ return 0;
+}
+
static int parse_tunnel_attr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
@@ -2054,16 +2148,14 @@ static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
outer_headers);
}
-static void *get_match_headers_value(u32 flags,
- struct mlx5_flow_spec *spec)
+void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec)
{
return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
get_match_inner_headers_value(spec) :
get_match_outer_headers_value(spec);
}
-static void *get_match_headers_criteria(u32 flags,
- struct mlx5_flow_spec *spec)
+void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec)
{
return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
get_match_inner_headers_criteria(spec) :
@@ -2144,6 +2236,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
enum fs_flow_table_type fs_type;
+ bool match_inner_ecn = true;
u16 addr_type = 0;
u8 ip_proto = 0;
u8 *match_level;
@@ -2197,6 +2290,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
headers_c = get_match_inner_headers_criteria(spec);
headers_v = get_match_inner_headers_value(spec);
}
+
+ err = mlx5e_tc_verify_tunnel_ecn(priv, f, &match_inner_ecn);
+ if (err)
+ return err;
}
err = mlx5e_flower_parse_meta(filter_dev, f);
@@ -2420,10 +2517,12 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
struct flow_match_ip match;
flow_rule_match_ip(rule, &match);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
- match.mask->tos & 0x3);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
- match.key->tos & 0x3);
+ if (match_inner_ecn) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
+ match.mask->tos & 0x3);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
+ match.key->tos & 0x3);
+ }
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
match.mask->tos >> 2);
@@ -2608,55 +2707,6 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
return err;
}
-struct pedit_headers {
- struct ethhdr eth;
- struct vlan_hdr vlan;
- struct iphdr ip4;
- struct ipv6hdr ip6;
- struct tcphdr tcp;
- struct udphdr udp;
-};
-
-struct pedit_headers_action {
- struct pedit_headers vals;
- struct pedit_headers masks;
- u32 pedits;
-};
-
-static int pedit_header_offsets[] = {
- [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
- [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
- [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
- [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
- [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
-};
-
-#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
-
-static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
- struct pedit_headers_action *hdrs,
- struct netlink_ext_ack *extack)
-{
- u32 *curr_pmask, *curr_pval;
-
- curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
- curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
-
- if (*curr_pmask & mask) { /* disallow acting twice on the same location */
- NL_SET_ERR_MSG_MOD(extack,
- "curr_pmask and new mask same. Acting twice on same location");
- goto out_err;
- }
-
- *curr_pmask |= mask;
- *curr_pval |= (val & mask);
-
- return 0;
-
-out_err:
- return -EOPNOTSUPP;
-}
-
struct mlx5_fields {
u8 field;
u8 field_bsize;
@@ -2768,26 +2818,23 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
- int i, action_size, first, last, next_z;
void *headers_c, *headers_v, *action, *vals_p;
u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
struct mlx5e_tc_mod_hdr_acts *mod_acts;
- struct mlx5_fields *f;
unsigned long mask, field_mask;
- int err;
+ int i, first, last, next_z;
+ struct mlx5_fields *f;
u8 cmd;
mod_acts = &parse_attr->mod_hdr_acts;
- headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
- headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
+ headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec);
+ headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec);
set_masks = &hdrs[0].masks;
add_masks = &hdrs[1].masks;
set_vals = &hdrs[0].vals;
add_vals = &hdrs[1].vals;
- action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
-
for (i = 0; i < ARRAY_SIZE(fields); i++) {
bool skip;
@@ -2855,18 +2902,16 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts);
- if (err) {
+ action = mlx5e_mod_hdr_alloc(priv->mdev, namespace, mod_acts);
+ if (IS_ERR(action)) {
NL_SET_ERR_MSG_MOD(extack,
"too many pedit actions, can't offload");
mlx5_core_warn(priv->mdev,
"mlx5: parsed %d pedit actions, can't do more\n",
mod_acts->num_actions);
- return err;
+ return PTR_ERR(action);
}
- action = mod_acts->actions +
- (mod_acts->num_actions * action_size);
MLX5_SET(set_action_in, action, action_type, cmd);
MLX5_SET(set_action_in, action, field, f->field);
@@ -2896,141 +2941,8 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
return 0;
}
-static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
- int namespace)
-{
- if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
- return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
- else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
- return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
-}
-
-int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
- int namespace,
- struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
-{
- int action_size, new_num_actions, max_hw_actions;
- size_t new_sz, old_sz;
- void *ret;
-
- if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
- return 0;
-
- action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
-
- max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
- namespace);
- new_num_actions = min(max_hw_actions,
- mod_hdr_acts->actions ?
- mod_hdr_acts->max_actions * 2 : 1);
- if (mod_hdr_acts->max_actions == new_num_actions)
- return -ENOSPC;
-
- new_sz = action_size * new_num_actions;
- old_sz = mod_hdr_acts->max_actions * action_size;
- ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
- if (!ret)
- return -ENOMEM;
-
- memset(ret + old_sz, 0, new_sz - old_sz);
- mod_hdr_acts->actions = ret;
- mod_hdr_acts->max_actions = new_num_actions;
-
- return 0;
-}
-
-void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
-{
- kfree(mod_hdr_acts->actions);
- mod_hdr_acts->actions = NULL;
- mod_hdr_acts->num_actions = 0;
- mod_hdr_acts->max_actions = 0;
-}
-
static const struct pedit_headers zero_masks = {};
-static int
-parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
- const struct flow_action_entry *act, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
- struct netlink_ext_ack *extack)
-{
- u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
- int err = -EOPNOTSUPP;
- u32 mask, val, offset;
- u8 htype;
-
- htype = act->mangle.htype;
- err = -EOPNOTSUPP; /* can't be all optimistic */
-
- if (htype == FLOW_ACT_MANGLE_UNSPEC) {
- NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
- goto out_err;
- }
-
- if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
- NL_SET_ERR_MSG_MOD(extack,
- "The pedit offload action is not supported");
- goto out_err;
- }
-
- mask = act->mangle.mask;
- val = act->mangle.val;
- offset = act->mangle.offset;
-
- err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd], extack);
- if (err)
- goto out_err;
-
- hdrs[cmd].pedits++;
-
- return 0;
-out_err:
- return err;
-}
-
-static int
-parse_pedit_to_reformat(const struct flow_action_entry *act,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct netlink_ext_ack *extack)
-{
- u32 mask, val, offset;
- u32 *p;
-
- if (act->id != FLOW_ACTION_MANGLE) {
- NL_SET_ERR_MSG_MOD(extack, "Unsupported action id");
- return -EOPNOTSUPP;
- }
-
- if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
- NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
- return -EOPNOTSUPP;
- }
-
- mask = ~act->mangle.mask;
- val = act->mangle.val;
- offset = act->mangle.offset;
- p = (u32 *)&parse_attr->eth;
- *(p + (offset >> 2)) |= (val & mask);
-
- return 0;
-}
-
-static int parse_tc_pedit_action(struct mlx5e_priv *priv,
- const struct flow_action_entry *act, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
-{
- if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
- return parse_pedit_to_reformat(act, parse_attr, extack);
-
- return parse_pedit_to_modify_hdr(priv, act, namespace,
- parse_attr, hdrs, extack);
-}
-
static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
@@ -3062,39 +2974,10 @@ static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
return 0;
out_dealloc_parsed_actions:
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
+ mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
return err;
}
-static bool csum_offload_supported(struct mlx5e_priv *priv,
- u32 action,
- u32 update_flags,
- struct netlink_ext_ack *extack)
-{
- u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
- TCA_CSUM_UPDATE_FLAG_UDP;
-
- /* The HW recalcs checksums only if re-writing headers */
- if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
- NL_SET_ERR_MSG_MOD(extack,
- "TC csum action is only offloaded with pedit");
- netdev_warn(priv->netdev,
- "TC csum action is only offloaded with pedit\n");
- return false;
- }
-
- if (update_flags & ~prot_flags) {
- NL_SET_ERR_MSG_MOD(extack,
- "can't offload TC csum action for some header/s");
- netdev_warn(priv->netdev,
- "can't offload TC csum action for some header/s - flags %#x\n",
- update_flags);
- return false;
- }
-
- return true;
-}
-
struct ip_ttl_word {
__u8 ttl;
__u8 protocol;
@@ -3217,8 +3100,8 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
u8 ip_proto;
int i;
- headers_c = get_match_headers_criteria(actions, spec);
- headers_v = get_match_headers_value(actions, spec);
+ headers_c = mlx5e_get_match_headers_criteria(actions, spec);
+ headers_v = mlx5e_get_match_headers_value(actions, spec);
ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
/* for non-IP we only re-write MACs, so we're okay */
@@ -3325,7 +3208,7 @@ static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv
return priv->mdev == peer_priv->mdev;
}
-static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
+bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
{
struct mlx5_core_dev *fmdev, *pmdev;
u64 fsystem_guid, psystem_guid;
@@ -3339,126 +3222,45 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
return (fsystem_guid == psystem_guid);
}
-static bool same_vf_reps(struct mlx5e_priv *priv,
- struct net_device *out_dev)
-{
- return mlx5e_eswitch_vf_rep(priv->netdev) &&
- priv->netdev == out_dev;
-}
-
-static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
- const struct flow_action_entry *act,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
- u32 *action, struct netlink_ext_ack *extack)
-{
- u16 mask16 = VLAN_VID_MASK;
- u16 val16 = act->vlan.vid & VLAN_VID_MASK;
- const struct flow_action_entry pedit_act = {
- .id = FLOW_ACTION_MANGLE,
- .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
- .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
- .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
- .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
- };
- u8 match_prio_mask, match_prio_val;
- void *headers_c, *headers_v;
- int err;
-
- headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
- headers_v = get_match_headers_value(*action, &parse_attr->spec);
-
- if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
- MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
- NL_SET_ERR_MSG_MOD(extack,
- "VLAN rewrite action must have VLAN protocol match");
- return -EOPNOTSUPP;
- }
-
- match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
- match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
- if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Changing VLAN prio is not supported");
- return -EOPNOTSUPP;
- }
-
- err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr, hdrs, NULL, extack);
- *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
-
- return err;
-}
-
static int
-add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
- u32 *action, struct netlink_ext_ack *extack)
-{
- const struct flow_action_entry prio_tag_act = {
- .vlan.vid = 0,
- .vlan.prio =
- MLX5_GET(fte_match_set_lyr_2_4,
- get_match_headers_value(*action,
- &parse_attr->spec),
- first_prio) &
- MLX5_GET(fte_match_set_lyr_2_4,
- get_match_headers_criteria(*action,
- &parse_attr->spec),
- first_prio),
- };
-
- return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
- &prio_tag_act, parse_attr, hdrs, action,
- extack);
-}
-
-static int validate_goto_chain(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- const struct flow_action_entry *act,
- u32 actions,
- struct netlink_ext_ack *extack)
+parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
+ struct flow_action *flow_action)
{
- bool is_esw = mlx5e_is_eswitch_flow(flow);
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
struct mlx5_flow_attr *attr = flow->attr;
- bool ft_flow = mlx5e_is_ft_flow(flow);
- u32 dest_chain = act->chain_index;
- struct mlx5_fs_chains *chains;
- struct mlx5_eswitch *esw;
- u32 reformat_and_fwd;
- u32 max_chain;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5e_priv *priv = flow->priv;
+ const struct flow_action_entry *act;
+ struct mlx5e_tc_act *tc_act;
+ int err, i;
- esw = priv->mdev->priv.eswitch;
- chains = is_esw ? esw_chains(esw) : nic_chains(priv);
- max_chain = mlx5_chains_get_chain_range(chains);
- reformat_and_fwd = is_esw ?
- MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
- MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, reformat_and_fwd_to_table);
-
- if (ft_flow) {
- NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
- return -EOPNOTSUPP;
- }
+ ns_type = mlx5e_get_flow_namespace(flow);
- if (!mlx5_chains_backwards_supported(chains) &&
- dest_chain <= attr->chain) {
- NL_SET_ERR_MSG_MOD(extack,
- "Goto lower numbered chain isn't supported");
- return -EOPNOTSUPP;
- }
+ flow_action_for_each(i, act, flow_action) {
+ tc_act = mlx5e_tc_act_get(act->id, ns_type);
+ if (!tc_act) {
+ NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
+ return -EOPNOTSUPP;
+ }
- if (dest_chain > max_chain) {
- NL_SET_ERR_MSG_MOD(extack,
- "Requested destination chain is out of supported range");
- return -EOPNOTSUPP;
+ if (!tc_act->can_offload(parse_state, act, i))
+ return -EOPNOTSUPP;
+
+ err = tc_act->parse_action(parse_state, act, priv, attr);
+ if (err)
+ return err;
}
- if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
- MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
- !reformat_and_fwd) {
- NL_SET_ERR_MSG_MOD(extack,
- "Goto chain is not allowed if action has reformat or decap");
- return -EOPNOTSUPP;
+ flow_action_for_each(i, act, flow_action) {
+ tc_act = mlx5e_tc_act_get(act->id, ns_type);
+ if (!tc_act || !tc_act->post_parse ||
+ !tc_act->can_offload(parse_state, act, i))
+ continue;
+
+ err = tc_act->post_parse(parse_state, priv, attr);
+ if (err)
+ return err;
}
return 0;
@@ -3479,19 +3281,19 @@ actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
!hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits)
return 0;
- ns_type = get_flow_name_space(flow);
+ ns_type = mlx5e_get_flow_namespace(flow);
err = alloc_tc_pedit_action(priv, ns_type, parse_attr, hdrs,
&attr->action, extack);
if (err)
return err;
- /* In case all pedit actions are skipped, remove the MOD_HDR flag. */
if (parse_attr->mod_hdr_acts.num_actions > 0)
return 0;
+ /* In case all pedit actions are skipped, remove the MOD_HDR flag. */
attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
+ mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
if (ns_type != MLX5_FLOW_NAMESPACE_FDB)
return 0;
@@ -3504,19 +3306,9 @@ actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
}
static int
-parse_tc_nic_actions(struct mlx5e_priv *priv,
- struct flow_action *flow_action,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
+flow_action_supported(struct flow_action *flow_action,
+ struct netlink_ext_ack *extack)
{
- struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5_flow_attr *attr = flow->attr;
- struct pedit_headers_action hdrs[2] = {};
- const struct flow_action_entry *act;
- struct mlx5_nic_flow_attr *nic_attr;
- u32 action = 0;
- int err, i;
-
if (!flow_action_has_entries(flow_action)) {
NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
return -EINVAL;
@@ -3528,108 +3320,35 @@ parse_tc_nic_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- nic_attr = attr->nic_attr;
- nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
- parse_attr = attr->parse_attr;
-
- flow_action_for_each(i, act, flow_action) {
- switch (act->id) {
- case FLOW_ACTION_ACCEPT:
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- break;
- case FLOW_ACTION_DROP:
- action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- break;
- case FLOW_ACTION_MANGLE:
- case FLOW_ACTION_ADD:
- err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
- parse_attr, hdrs, NULL, extack);
- if (err)
- return err;
-
- action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- break;
- case FLOW_ACTION_VLAN_MANGLE:
- err = add_vlan_rewrite_action(priv,
- MLX5_FLOW_NAMESPACE_KERNEL,
- act, parse_attr, hdrs,
- &action, extack);
- if (err)
- return err;
-
- break;
- case FLOW_ACTION_CSUM:
- if (csum_offload_supported(priv, action,
- act->csum_flags,
- extack))
- break;
-
- return -EOPNOTSUPP;
- case FLOW_ACTION_REDIRECT: {
- struct net_device *peer_dev = act->dev;
-
- if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
- same_hw_devs(priv, netdev_priv(peer_dev))) {
- parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
- flow_flag_set(flow, HAIRPIN);
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- } else {
- NL_SET_ERR_MSG_MOD(extack,
- "device is not on same HW, can't offload");
- netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
- peer_dev->name);
- return -EOPNOTSUPP;
- }
- }
- break;
- case FLOW_ACTION_MARK: {
- u32 mark = act->mark;
-
- if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
- NL_SET_ERR_MSG_MOD(extack,
- "Bad flow mark - only 16 bit is supported");
- return -EOPNOTSUPP;
- }
-
- nic_attr->flow_tag = mark;
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- }
- break;
- case FLOW_ACTION_GOTO:
- err = validate_goto_chain(priv, flow, act, action,
- extack);
- if (err)
- return err;
+ return 0;
+}
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- attr->dest_chain = act->chain_index;
- break;
- case FLOW_ACTION_CT:
- err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr,
- &parse_attr->mod_hdr_acts,
- act, extack);
- if (err)
- return err;
+static int
+parse_tc_nic_actions(struct mlx5e_priv *priv,
+ struct flow_action *flow_action,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_tc_act_parse_state *parse_state;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
+ struct pedit_headers_action *hdrs;
+ int err;
- flow_flag_set(flow, CT);
- break;
- default:
- NL_SET_ERR_MSG_MOD(extack,
- "The offload action is not supported in NIC action");
- return -EOPNOTSUPP;
- }
- }
+ err = flow_action_supported(flow_action, extack);
+ if (err)
+ return err;
- attr->action = action;
+ attr->nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
+ parse_attr = attr->parse_attr;
+ parse_state = &parse_attr->parse_state;
+ mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
+ parse_state->ct_priv = get_ct_priv(priv);
+ hdrs = parse_state->hdrs;
- if (attr->dest_chain && parse_attr->mirred_ifindex[0]) {
- NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
- return -EOPNOTSUPP;
- }
+ err = parse_tc_actions(parse_state, flow_action);
+ if (err)
+ return err;
err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
if (err)
@@ -3651,147 +3370,7 @@ static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
mlx5e_eswitch_vf_rep(priv->netdev) &&
mlx5e_eswitch_vf_rep(peer_netdev) &&
- same_hw_devs(priv, peer_priv));
-}
-
-static int parse_tc_vlan_action(struct mlx5e_priv *priv,
- const struct flow_action_entry *act,
- struct mlx5_esw_flow_attr *attr,
- u32 *action,
- struct netlink_ext_ack *extack)
-{
- u8 vlan_idx = attr->total_vlan;
-
- if (vlan_idx >= MLX5_FS_VLAN_DEPTH) {
- NL_SET_ERR_MSG_MOD(extack, "Total vlans used is greater than supported");
- return -EOPNOTSUPP;
- }
-
- switch (act->id) {
- case FLOW_ACTION_VLAN_POP:
- if (vlan_idx) {
- if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
- MLX5_FS_VLAN_DEPTH)) {
- NL_SET_ERR_MSG_MOD(extack,
- "vlan pop action is not supported");
- return -EOPNOTSUPP;
- }
-
- *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
- } else {
- *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
- }
- break;
- case FLOW_ACTION_VLAN_PUSH:
- attr->vlan_vid[vlan_idx] = act->vlan.vid;
- attr->vlan_prio[vlan_idx] = act->vlan.prio;
- attr->vlan_proto[vlan_idx] = act->vlan.proto;
- if (!attr->vlan_proto[vlan_idx])
- attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
-
- if (vlan_idx) {
- if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
- MLX5_FS_VLAN_DEPTH)) {
- NL_SET_ERR_MSG_MOD(extack,
- "vlan push action is not supported for vlan depth > 1");
- return -EOPNOTSUPP;
- }
-
- *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
- } else {
- if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
- (act->vlan.proto != htons(ETH_P_8021Q) ||
- act->vlan.prio)) {
- NL_SET_ERR_MSG_MOD(extack,
- "vlan push action is not supported");
- return -EOPNOTSUPP;
- }
-
- *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
- }
- break;
- default:
- NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN");
- return -EINVAL;
- }
-
- attr->total_vlan = vlan_idx + 1;
-
- return 0;
-}
-
-static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
- struct net_device *out_dev)
-{
- struct net_device *fdb_out_dev = out_dev;
- struct net_device *uplink_upper;
-
- rcu_read_lock();
- uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
- if (uplink_upper && netif_is_lag_master(uplink_upper) &&
- uplink_upper == out_dev) {
- fdb_out_dev = uplink_dev;
- } else if (netif_is_lag_master(out_dev)) {
- fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev));
- if (fdb_out_dev &&
- (!mlx5e_eswitch_rep(fdb_out_dev) ||
- !netdev_port_same_parent_id(fdb_out_dev, uplink_dev)))
- fdb_out_dev = NULL;
- }
- rcu_read_unlock();
- return fdb_out_dev;
-}
-
-static int add_vlan_push_action(struct mlx5e_priv *priv,
- struct mlx5_flow_attr *attr,
- struct net_device **out_dev,
- u32 *action,
- struct netlink_ext_ack *extack)
-{
- struct net_device *vlan_dev = *out_dev;
- struct flow_action_entry vlan_act = {
- .id = FLOW_ACTION_VLAN_PUSH,
- .vlan.vid = vlan_dev_vlan_id(vlan_dev),
- .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
- .vlan.prio = 0,
- };
- int err;
-
- err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action, extack);
- if (err)
- return err;
-
- rcu_read_lock();
- *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
- rcu_read_unlock();
- if (!*out_dev)
- return -ENODEV;
-
- if (is_vlan_dev(*out_dev))
- err = add_vlan_push_action(priv, attr, out_dev, action, extack);
-
- return err;
-}
-
-static int add_vlan_pop_action(struct mlx5e_priv *priv,
- struct mlx5_flow_attr *attr,
- u32 *action,
- struct netlink_ext_ack *extack)
-{
- struct flow_action_entry vlan_act = {
- .id = FLOW_ACTION_VLAN_POP,
- };
- int nest_level, err = 0;
-
- nest_level = attr->parse_attr->filter_dev->lower_level -
- priv->netdev->lower_level;
- while (nest_level--) {
- err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action, extack);
- if (err)
- return err;
- }
-
- return err;
+ mlx5e_same_hw_devs(priv, peer_priv));
}
static bool same_hw_reps(struct mlx5e_priv *priv,
@@ -3803,7 +3382,7 @@ static bool same_hw_reps(struct mlx5e_priv *priv,
return mlx5e_eswitch_rep(priv->netdev) &&
mlx5e_eswitch_rep(peer_netdev) &&
- same_hw_devs(priv, peer_priv);
+ mlx5e_same_hw_devs(priv, peer_priv);
}
static bool is_lag_dev(struct mlx5e_priv *priv,
@@ -3827,66 +3406,6 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
same_port_devs(priv, netdev_priv(out_dev));
}
-static bool is_duplicated_output_device(struct net_device *dev,
- struct net_device *out_dev,
- int *ifindexes, int if_count,
- struct netlink_ext_ack *extack)
-{
- int i;
-
- for (i = 0; i < if_count; i++) {
- if (ifindexes[i] == out_dev->ifindex) {
- NL_SET_ERR_MSG_MOD(extack,
- "can't duplicate output to same device");
- netdev_err(dev, "can't duplicate output to same device: %s\n",
- out_dev->name);
- return true;
- }
- }
-
- return false;
-}
-
-static int verify_uplink_forwarding(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- struct net_device *out_dev,
- struct netlink_ext_ack *extack)
-{
- struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_rep_priv *rep_priv;
-
- /* Forwarding non encapsulated traffic between
- * uplink ports is allowed only if
- * termination_table_raw_traffic cap is set.
- *
- * Input vport was stored attr->in_rep.
- * In LAG case, *priv* is the private data of
- * uplink which may be not the input vport.
- */
- rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
-
- if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
- mlx5e_eswitch_uplink_rep(out_dev)))
- return 0;
-
- if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
- termination_table_raw_traffic)) {
- NL_SET_ERR_MSG_MOD(extack,
- "devices are both uplink, can't offload forwarding");
- pr_err("devices %s %s are both uplink, can't offload forwarding\n",
- priv->netdev->name, out_dev->name);
- return -EOPNOTSUPP;
- } else if (out_dev != rep_priv->netdev) {
- NL_SET_ERR_MSG_MOD(extack,
- "devices are not the same uplink, can't offload forwarding");
- pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
- priv->netdev->name, out_dev->name);
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
int ifindex,
@@ -3926,386 +3445,33 @@ int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
return 0;
}
-static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
- struct flow_action *flow_action,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
+static int
+parse_tc_fdb_actions(struct mlx5e_priv *priv,
+ struct flow_action *flow_action,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
- struct pedit_headers_action hdrs[2] = {};
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_tc_act_parse_state *parse_state;
struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5e_sample_attr sample_attr = {};
- const struct ip_tunnel_info *info = NULL;
struct mlx5_flow_attr *attr = flow->attr;
- int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
- bool ft_flow = mlx5e_is_ft_flow(flow);
- const struct flow_action_entry *act;
struct mlx5_esw_flow_attr *esw_attr;
- bool encap = false, decap = false;
- u32 action = attr->action;
- int err, i, if_count = 0;
- bool ptype_host = false;
- bool mpls_push = false;
-
- if (!flow_action_has_entries(flow_action)) {
- NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
- return -EINVAL;
- }
+ struct pedit_headers_action *hdrs;
+ int err;
- if (!flow_action_hw_stats_check(flow_action, extack,
- FLOW_ACTION_HW_STATS_DELAYED_BIT)) {
- NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
- return -EOPNOTSUPP;
- }
+ err = flow_action_supported(flow_action, extack);
+ if (err)
+ return err;
esw_attr = attr->esw_attr;
parse_attr = attr->parse_attr;
+ parse_state = &parse_attr->parse_state;
+ mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
+ parse_state->ct_priv = get_ct_priv(priv);
+ hdrs = parse_state->hdrs;
- flow_action_for_each(i, act, flow_action) {
- switch (act->id) {
- case FLOW_ACTION_ACCEPT:
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT;
- break;
- case FLOW_ACTION_PTYPE:
- if (act->ptype != PACKET_HOST) {
- NL_SET_ERR_MSG_MOD(extack,
- "skbedit ptype is only supported with type host");
- return -EOPNOTSUPP;
- }
-
- ptype_host = true;
- break;
- case FLOW_ACTION_DROP:
- action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- break;
- case FLOW_ACTION_TRAP:
- if (!flow_offload_has_one_action(flow_action)) {
- NL_SET_ERR_MSG_MOD(extack,
- "action trap is supported as a sole action only");
- return -EOPNOTSUPP;
- }
- action |= (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT);
- attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
- break;
- case FLOW_ACTION_MPLS_PUSH:
- if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
- reformat_l2_to_l3_tunnel) ||
- act->mpls_push.proto != htons(ETH_P_MPLS_UC)) {
- NL_SET_ERR_MSG_MOD(extack,
- "mpls push is supported only for mpls_uc protocol");
- return -EOPNOTSUPP;
- }
- mpls_push = true;
- break;
- case FLOW_ACTION_MPLS_POP:
- /* we only support mpls pop if it is the first action
- * and the filter net device is bareudp. Subsequent
- * actions can be pedit and the last can be mirred
- * egress redirect.
- */
- if (i) {
- NL_SET_ERR_MSG_MOD(extack,
- "mpls pop supported only as first action");
- return -EOPNOTSUPP;
- }
- if (!netif_is_bareudp(parse_attr->filter_dev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "mpls pop supported only on bareudp devices");
- return -EOPNOTSUPP;
- }
-
- parse_attr->eth.h_proto = act->mpls_pop.proto;
- action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- flow_flag_set(flow, L3_TO_L2_DECAP);
- break;
- case FLOW_ACTION_MANGLE:
- case FLOW_ACTION_ADD:
- err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
- parse_attr, hdrs, flow, extack);
- if (err)
- return err;
-
- if (!flow_flag_test(flow, L3_TO_L2_DECAP)) {
- action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- esw_attr->split_count = esw_attr->out_count;
- }
- break;
- case FLOW_ACTION_CSUM:
- if (csum_offload_supported(priv, action,
- act->csum_flags, extack))
- break;
-
- return -EOPNOTSUPP;
- case FLOW_ACTION_REDIRECT_INGRESS: {
- struct net_device *out_dev;
-
- out_dev = act->dev;
- if (!out_dev)
- return -EOPNOTSUPP;
-
- if (!netif_is_ovs_master(out_dev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "redirect to ingress is supported only for OVS internal ports");
- return -EOPNOTSUPP;
- }
-
- if (netif_is_ovs_master(parse_attr->filter_dev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "redirect to ingress is not supported from internal port");
- return -EOPNOTSUPP;
- }
-
- if (!ptype_host) {
- NL_SET_ERR_MSG_MOD(extack,
- "redirect to int port ingress requires ptype=host action");
- return -EOPNOTSUPP;
- }
-
- if (esw_attr->out_count) {
- NL_SET_ERR_MSG_MOD(extack,
- "redirect to int port ingress is supported only as single destination");
- return -EOPNOTSUPP;
- }
-
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
-
- err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
- MLX5E_TC_INT_PORT_INGRESS,
- &action, esw_attr->out_count);
- if (err)
- return err;
-
- esw_attr->out_count++;
-
- break;
- }
- case FLOW_ACTION_REDIRECT:
- case FLOW_ACTION_MIRRED: {
- struct mlx5e_priv *out_priv;
- struct net_device *out_dev;
-
- out_dev = act->dev;
- if (!out_dev) {
- /* out_dev is NULL when filters with
- * non-existing mirred device are replayed to
- * the driver.
- */
- return -EINVAL;
- }
-
- if (mpls_push && !netif_is_bareudp(out_dev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "mpls is supported only through a bareudp device");
- return -EOPNOTSUPP;
- }
-
- if (ft_flow && out_dev == priv->netdev) {
- /* Ignore forward to self rules generated
- * by adding both mlx5 devs to the flow table
- * block on a normal nft offload setup.
- */
- return -EOPNOTSUPP;
- }
-
- if (esw_attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
- NL_SET_ERR_MSG_MOD(extack,
- "can't support more output ports, can't offload forwarding");
- netdev_warn(priv->netdev,
- "can't support more than %d output ports, can't offload forwarding\n",
- esw_attr->out_count);
- return -EOPNOTSUPP;
- }
-
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- if (encap) {
- parse_attr->mirred_ifindex[esw_attr->out_count] =
- out_dev->ifindex;
- parse_attr->tun_info[esw_attr->out_count] =
- mlx5e_dup_tun_info(info);
- if (!parse_attr->tun_info[esw_attr->out_count])
- return -ENOMEM;
- encap = false;
- esw_attr->dests[esw_attr->out_count].flags |=
- MLX5_ESW_DEST_ENCAP;
- esw_attr->out_count++;
- /* attr->dests[].rep is resolved when we
- * handle encap
- */
- } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
-
- if (is_duplicated_output_device(priv->netdev,
- out_dev,
- ifindexes,
- if_count,
- extack))
- return -EOPNOTSUPP;
-
- ifindexes[if_count] = out_dev->ifindex;
- if_count++;
-
- out_dev = get_fdb_out_dev(uplink_dev, out_dev);
- if (!out_dev)
- return -ENODEV;
-
- if (is_vlan_dev(out_dev)) {
- err = add_vlan_push_action(priv, attr,
- &out_dev,
- &action, extack);
- if (err)
- return err;
- }
-
- if (is_vlan_dev(parse_attr->filter_dev)) {
- err = add_vlan_pop_action(priv, attr,
- &action, extack);
- if (err)
- return err;
- }
-
- if (netif_is_macvlan(out_dev))
- out_dev = macvlan_dev_real_dev(out_dev);
-
- err = verify_uplink_forwarding(priv, flow, out_dev, extack);
- if (err)
- return err;
-
- if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "devices are not on same switch HW, can't offload forwarding");
- return -EOPNOTSUPP;
- }
-
- if (same_vf_reps(priv, out_dev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "can't forward from a VF to itself");
- return -EOPNOTSUPP;
- }
-
- out_priv = netdev_priv(out_dev);
- rpriv = out_priv->ppriv;
- esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
- esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
- esw_attr->out_count++;
- } else if (netif_is_ovs_master(out_dev)) {
- err = mlx5e_set_fwd_to_int_port_actions(priv, attr,
- out_dev->ifindex,
- MLX5E_TC_INT_PORT_EGRESS,
- &action,
- esw_attr->out_count);
- if (err)
- return err;
-
- esw_attr->out_count++;
- } else if (parse_attr->filter_dev != priv->netdev) {
- /* All mlx5 devices are called to configure
- * high level device filters. Therefore, the
- * *attempt* to install a filter on invalid
- * eswitch should not trigger an explicit error
- */
- return -EINVAL;
- } else {
- NL_SET_ERR_MSG_MOD(extack,
- "devices are not on same switch HW, can't offload forwarding");
- netdev_warn(priv->netdev,
- "devices %s %s not on same switch HW, can't offload forwarding\n",
- priv->netdev->name,
- out_dev->name);
- return -EOPNOTSUPP;
- }
- }
- break;
- case FLOW_ACTION_TUNNEL_ENCAP:
- info = act->tunnel;
- if (info) {
- encap = true;
- } else {
- NL_SET_ERR_MSG_MOD(extack,
- "Zero tunnel attributes is not supported");
- return -EOPNOTSUPP;
- }
-
- break;
- case FLOW_ACTION_VLAN_PUSH:
- case FLOW_ACTION_VLAN_POP:
- if (act->id == FLOW_ACTION_VLAN_PUSH &&
- (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
- /* Replace vlan pop+push with vlan modify */
- action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
- err = add_vlan_rewrite_action(priv,
- MLX5_FLOW_NAMESPACE_FDB,
- act, parse_attr, hdrs,
- &action, extack);
- } else {
- err = parse_tc_vlan_action(priv, act, esw_attr, &action, extack);
- }
- if (err)
- return err;
-
- esw_attr->split_count = esw_attr->out_count;
- break;
- case FLOW_ACTION_VLAN_MANGLE:
- err = add_vlan_rewrite_action(priv,
- MLX5_FLOW_NAMESPACE_FDB,
- act, parse_attr, hdrs,
- &action, extack);
- if (err)
- return err;
-
- esw_attr->split_count = esw_attr->out_count;
- break;
- case FLOW_ACTION_TUNNEL_DECAP:
- decap = true;
- break;
- case FLOW_ACTION_GOTO:
- err = validate_goto_chain(priv, flow, act, action,
- extack);
- if (err)
- return err;
-
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- attr->dest_chain = act->chain_index;
- break;
- case FLOW_ACTION_CT:
- if (flow_flag_test(flow, SAMPLE)) {
- NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
- return -EOPNOTSUPP;
- }
- err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr,
- &parse_attr->mod_hdr_acts,
- act, extack);
- if (err)
- return err;
-
- flow_flag_set(flow, CT);
- esw_attr->split_count = esw_attr->out_count;
- break;
- case FLOW_ACTION_SAMPLE:
- if (flow_flag_test(flow, CT)) {
- NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
- return -EOPNOTSUPP;
- }
- sample_attr.rate = act->sample.rate;
- sample_attr.group_num = act->sample.psample_group->group_num;
- if (act->sample.truncate)
- sample_attr.trunc_size = act->sample.trunc_size;
- flow_flag_set(flow, SAMPLE);
- break;
- default:
- NL_SET_ERR_MSG_MOD(extack,
- "The offload action is not supported in FDB action");
- return -EOPNOTSUPP;
- }
- }
+ err = parse_tc_actions(parse_state, flow_action);
+ if (err)
+ return err;
/* Forward to/from internal port can only have 1 dest */
if ((netif_is_ovs_master(parse_attr->filter_dev) || esw_attr->dest_int_port) &&
@@ -4315,23 +3481,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- /* always set IP version for indirect table handling */
- attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
-
- if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
- action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
- /* For prio tag mode, replace vlan pop with rewrite vlan prio
- * tag rewrite.
- */
- action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
- err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
- &action, extack);
- if (err)
- return err;
- }
-
- attr->action = action;
-
err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
if (err)
return err;
@@ -4339,30 +3488,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
return -EOPNOTSUPP;
- if (attr->dest_chain && decap) {
- /* It can be supported if we'll create a mapping for
- * the tunnel device only (without tunnel), and set
- * this tunnel id with this decap flow.
- *
- * On restore (miss), we'll just set this saved tunnel
- * device.
- */
-
- NL_SET_ERR_MSG(extack, "Decap with goto isn't supported");
- netdev_warn(priv->netdev, "Decap with goto isn't supported");
- return -EOPNOTSUPP;
- }
-
- /* Allocate sample attribute only when there is a sample action and
- * no errors after parsing.
- */
- if (flow_flag_test(flow, SAMPLE)) {
- attr->sample_attr = kzalloc(sizeof(*attr->sample_attr), GFP_KERNEL);
- if (!attr->sample_attr)
- return -ENOMEM;
- *attr->sample_attr = sample_attr;
- }
-
return 0;
}
@@ -4459,7 +3584,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
flow->cookie = f->cookie;
flow->priv = priv;
- attr = mlx5_alloc_flow_attr(get_flow_name_space(flow));
+ attr = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow));
if (!attr)
goto err_free;
@@ -4554,6 +3679,9 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
+ /* always set IP version for indirect table handling */
+ flow->attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
+
err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
if (err)
goto err_free;
@@ -4715,7 +3843,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
err_free:
flow_flag_set(flow, FAILED);
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
+ mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
mlx5e_flow_put(priv, flow);
out:
return err;
@@ -5015,14 +4143,8 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma)
{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct netlink_ext_ack *extack = ma->common.extack;
- if (!mlx5_esw_qos_enabled(esw)) {
- NL_SET_ERR_MSG_MOD(extack, "QoS is not supported on this device");
- return -EOPNOTSUPP;
- }
-
if (ma->common.prio != 1) {
NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
return -EINVAL;
@@ -5064,7 +4186,7 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
u16 peer_vhca_id;
int bkt;
- if (!same_hw_devs(priv, peer_priv))
+ if (!mlx5e_same_hw_devs(priv, peer_priv))
return;
peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index fdb222793027..5ffae9b13066 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -151,7 +151,6 @@ enum {
int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
-bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags);
@@ -247,11 +246,6 @@ int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow);
-int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
- int namespace,
- struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
-void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
-
struct mlx5e_tc_flow;
u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 792e0d6aa861..48a45aa54a3c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -19,6 +19,7 @@
#include "lib/clock.h"
#include "diag/fw_tracer.h"
#include "mlx5_irq.h"
+#include "devlink.h"
enum {
MLX5_EQE_OWNER_INIT_VAL = 0x1,
@@ -58,6 +59,8 @@ struct mlx5_eq_table {
struct mutex lock; /* sync async eqs creations */
int num_comp_eqs;
struct mlx5_irq_table *irq_table;
+ struct mlx5_irq **comp_irqs;
+ struct mlx5_irq *ctrl_irq;
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *rmap;
#endif
@@ -265,8 +268,8 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
u8 log_eq_stride = ilog2(MLX5_EQE_SIZE);
struct mlx5_priv *priv = &dev->priv;
- u16 vecidx = param->irq_index;
__be64 *pas;
+ u16 vecidx;
void *eqc;
int inlen;
u32 *in;
@@ -288,20 +291,16 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
mlx5_init_fbc(eq->frag_buf.frags, log_eq_stride, log_eq_size, &eq->fbc);
init_eq_buf(eq);
- eq->irq = mlx5_irq_request(dev, vecidx, param->affinity);
- if (IS_ERR(eq->irq)) {
- err = PTR_ERR(eq->irq);
- goto err_buf;
- }
-
+ eq->irq = param->irq;
vecidx = mlx5_irq_get_index(eq->irq);
+
inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->frag_buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
- goto err_irq;
+ goto err_buf;
}
pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
@@ -345,8 +344,6 @@ err_eq:
err_in:
kvfree(in);
-err_irq:
- mlx5_irq_release(eq->irq);
err_buf:
mlx5_frag_buf_free(dev, &eq->frag_buf);
return err;
@@ -400,7 +397,6 @@ static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
if (err)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
eq->eqn);
- mlx5_irq_release(eq->irq);
mlx5_frag_buf_free(dev, &eq->frag_buf);
return err;
@@ -593,11 +589,8 @@ setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
eq->irq_nb.notifier_call = mlx5_eq_async_int;
spin_lock_init(&eq->lock);
- if (!zalloc_cpumask_var(&param->affinity, GFP_KERNEL))
- return -ENOMEM;
err = create_async_eq(dev, &eq->core, param);
- free_cpumask_var(param->affinity);
if (err) {
mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err);
return err;
@@ -622,17 +615,38 @@ static void cleanup_async_eq(struct mlx5_core_dev *dev,
name, err);
}
+static u16 async_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
+ &val);
+ if (!err)
+ return val.vu32;
+ mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err);
+ return MLX5_NUM_ASYNC_EQE;
+}
static int create_async_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_param param = {};
int err;
+ /* All the async_eqs are using single IRQ, request one IRQ and share its
+ * index among all the async_eqs of this device.
+ */
+ table->ctrl_irq = mlx5_ctrl_irq_request(dev);
+ if (IS_ERR(table->ctrl_irq))
+ return PTR_ERR(table->ctrl_irq);
+
MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
mlx5_eq_notifier_register(dev, &table->cq_err_nb);
param = (struct mlx5_eq_param) {
- .irq_index = MLX5_IRQ_EQ_CTRL,
+ .irq = table->ctrl_irq,
.nent = MLX5_NUM_CMD_EQE,
.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
};
@@ -645,8 +659,8 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
param = (struct mlx5_eq_param) {
- .irq_index = MLX5_IRQ_EQ_CTRL,
- .nent = MLX5_NUM_ASYNC_EQE,
+ .irq = table->ctrl_irq,
+ .nent = async_eq_depth_devlink_param_get(dev),
};
gather_async_events_mask(dev, param.mask);
@@ -655,7 +669,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
goto err2;
param = (struct mlx5_eq_param) {
- .irq_index = MLX5_IRQ_EQ_CTRL,
+ .irq = table->ctrl_irq,
.nent = /* TODO: sriov max_vf + */ 1,
.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
};
@@ -674,6 +688,7 @@ err2:
err1:
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
+ mlx5_ctrl_irq_release(table->ctrl_irq);
return err;
}
@@ -688,6 +703,7 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev)
cleanup_async_eq(dev, &table->cmd_eq, "cmd");
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
+ mlx5_ctrl_irq_release(table->ctrl_irq);
}
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
@@ -715,12 +731,10 @@ mlx5_eq_create_generic(struct mlx5_core_dev *dev,
struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
int err;
- if (!cpumask_available(param->affinity))
- return ERR_PTR(-EINVAL);
-
if (!eq)
return ERR_PTR(-ENOMEM);
+ param->irq = dev->priv.eq_table->ctrl_irq;
err = create_async_eq(dev, eq, param);
if (err) {
kvfree(eq);
@@ -780,6 +794,54 @@ void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
}
EXPORT_SYMBOL(mlx5_eq_update_ci);
+static void comp_irqs_release(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+
+ if (mlx5_core_is_sf(dev))
+ mlx5_irq_affinity_irqs_release(dev, table->comp_irqs, table->num_comp_eqs);
+ else
+ mlx5_irqs_release_vectors(table->comp_irqs, table->num_comp_eqs);
+ kfree(table->comp_irqs);
+}
+
+static int comp_irqs_request(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ int ncomp_eqs = table->num_comp_eqs;
+ u16 *cpus;
+ int ret;
+ int i;
+
+ ncomp_eqs = table->num_comp_eqs;
+ table->comp_irqs = kcalloc(ncomp_eqs, sizeof(*table->comp_irqs), GFP_KERNEL);
+ if (!table->comp_irqs)
+ return -ENOMEM;
+ if (mlx5_core_is_sf(dev)) {
+ ret = mlx5_irq_affinity_irqs_request_auto(dev, ncomp_eqs, table->comp_irqs);
+ if (ret < 0)
+ goto free_irqs;
+ return ret;
+ }
+
+ cpus = kcalloc(ncomp_eqs, sizeof(*cpus), GFP_KERNEL);
+ if (!cpus) {
+ ret = -ENOMEM;
+ goto free_irqs;
+ }
+ for (i = 0; i < ncomp_eqs; i++)
+ cpus[i] = cpumask_local_spread(i, dev->priv.numa_node);
+ ret = mlx5_irqs_request_vectors(dev, cpus, ncomp_eqs, table->comp_irqs);
+ kfree(cpus);
+ if (ret < 0)
+ goto free_irqs;
+ return ret;
+
+free_irqs:
+ kfree(table->comp_irqs);
+ return ret;
+}
+
static void destroy_comp_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
@@ -794,6 +856,22 @@ static void destroy_comp_eqs(struct mlx5_core_dev *dev)
tasklet_disable(&eq->tasklet_ctx.task);
kfree(eq);
}
+ comp_irqs_release(dev);
+}
+
+static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
+ &val);
+ if (!err)
+ return val.vu32;
+ mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err);
+ return MLX5_COMP_EQ_SIZE;
}
static int create_comp_eqs(struct mlx5_core_dev *dev)
@@ -805,12 +883,13 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
int err;
int i;
+ ncomp_eqs = comp_irqs_request(dev);
+ if (ncomp_eqs < 0)
+ return ncomp_eqs;
INIT_LIST_HEAD(&table->comp_eqs_list);
- ncomp_eqs = table->num_comp_eqs;
- nent = MLX5_COMP_EQ_SIZE;
+ nent = comp_eq_depth_devlink_param_get(dev);
for (i = 0; i < ncomp_eqs; i++) {
struct mlx5_eq_param param = {};
- int vecidx = i;
eq = kzalloc(sizeof(*eq), GFP_KERNEL);
if (!eq) {
@@ -825,18 +904,11 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
eq->irq_nb.notifier_call = mlx5_eq_comp_int;
param = (struct mlx5_eq_param) {
- .irq_index = vecidx,
+ .irq = table->comp_irqs[i],
.nent = nent,
};
- if (!zalloc_cpumask_var(&param.affinity, GFP_KERNEL)) {
- err = -ENOMEM;
- goto clean_eq;
- }
- cpumask_set_cpu(cpumask_local_spread(i, dev->priv.numa_node),
- param.affinity);
err = create_map_eq(dev, &eq->core, &param);
- free_cpumask_var(param.affinity);
if (err)
goto clean_eq;
err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
@@ -850,7 +922,9 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
list_add_tail(&eq->list, &table->comp_eqs_list);
}
+ table->num_comp_eqs = ncomp_eqs;
return 0;
+
clean_eq:
kfree(eq);
clean:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
index 425c91814b34..c275fe028b6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
@@ -14,6 +14,7 @@
#include "fs_core.h"
#include "esw/indir_table.h"
#include "lib/fs_chains.h"
+#include "en/mod_hdr.h"
#define MLX5_ESW_INDIR_TABLE_SIZE 128
#define MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX (MLX5_ESW_INDIR_TABLE_SIZE - 2)
@@ -226,7 +227,7 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
goto err_handle;
}
- dealloc_mod_hdr_actions(&mod_acts);
+ mlx5e_mod_hdr_dealloc(&mod_acts);
rule->handle = handle;
rule->vni = esw_attr->rx_tun_attr->vni;
rule->mh = flow_act.modify_hdr;
@@ -243,7 +244,7 @@ err_table:
mlx5_modify_header_dealloc(esw->dev, flow_act.modify_hdr);
err_mod_hdr_alloc:
err_mod_hdr_regc1:
- dealloc_mod_hdr_actions(&mod_acts);
+ mlx5e_mod_hdr_dealloc(&mod_acts);
err_mod_hdr_regc0:
err_ethertype:
kfree(rule);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
index df277a6cddc0..9d17206d1625 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -431,7 +431,7 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int err = 0;
if (!mlx5_esw_allowed(esw))
- return -EPERM;
+ return vlan ? -EPERM : 0;
if (vlan || qos)
set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
@@ -522,9 +522,7 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
return PTR_ERR(evport);
mutex_lock(&esw->state_lock);
- err = mlx5_esw_qos_set_vport_min_rate(esw, evport, min_rate, NULL);
- if (!err)
- err = mlx5_esw_qos_set_vport_max_rate(esw, evport, max_rate, NULL);
+ err = mlx5_esw_qos_set_vport_rate(esw, evport, max_rate, min_rate);
mutex_unlock(&esw->state_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index d377ddc70fc7..11bbcd5f5b8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -204,10 +204,8 @@ static int esw_qos_normalize_groups_min_rate(struct mlx5_eswitch *esw, u32 divid
return 0;
}
-int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw,
- struct mlx5_vport *evport,
- u32 min_rate,
- struct netlink_ext_ack *extack)
+static int esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
+ u32 min_rate, struct netlink_ext_ack *extack)
{
u32 fw_max_bw_share, previous_min_rate;
bool min_rate_supported;
@@ -231,10 +229,8 @@ int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw,
return err;
}
-int mlx5_esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw,
- struct mlx5_vport *evport,
- u32 max_rate,
- struct netlink_ext_ack *extack)
+static int esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
+ u32 max_rate, struct netlink_ext_ack *extack)
{
u32 act_max_rate = max_rate;
bool max_rate_supported;
@@ -432,16 +428,13 @@ static int esw_qos_vport_update_group(struct mlx5_eswitch *esw,
}
static struct mlx5_esw_rate_group *
-esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
+__esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_esw_rate_group *group;
u32 divider;
int err;
- if (!MLX5_CAP_QOS(esw->dev, log_esw_max_sched_depth))
- return ERR_PTR(-EOPNOTSUPP);
-
group = kzalloc(sizeof(*group), GFP_KERNEL);
if (!group)
return ERR_PTR(-ENOMEM);
@@ -482,9 +475,32 @@ err_sched_elem:
return ERR_PTR(err);
}
-static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
- struct mlx5_esw_rate_group *group,
- struct netlink_ext_ack *extack)
+static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack);
+static void esw_qos_put(struct mlx5_eswitch *esw);
+
+static struct mlx5_esw_rate_group *
+esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_rate_group *group;
+ int err;
+
+ if (!MLX5_CAP_QOS(esw->dev, log_esw_max_sched_depth))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ err = esw_qos_get(esw, extack);
+ if (err)
+ return ERR_PTR(err);
+
+ group = __esw_qos_create_rate_group(esw, extack);
+ if (IS_ERR(group))
+ esw_qos_put(esw);
+
+ return group;
+}
+
+static int __esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
+ struct mlx5_esw_rate_group *group,
+ struct netlink_ext_ack *extack)
{
u32 divider;
int err;
@@ -503,7 +519,21 @@ static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR_ID failed");
trace_mlx5_esw_group_qos_destroy(esw->dev, group, group->tsar_ix);
+
kfree(group);
+
+ return err;
+}
+
+static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
+ struct mlx5_esw_rate_group *group,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = __esw_qos_destroy_rate_group(esw, group, extack);
+ esw_qos_put(esw);
+
return err;
}
@@ -526,7 +556,7 @@ static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
return false;
}
-void mlx5_esw_qos_create(struct mlx5_eswitch *esw)
+static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_core_dev *dev = esw->dev;
@@ -534,14 +564,10 @@ void mlx5_esw_qos_create(struct mlx5_eswitch *esw)
int err;
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
- return;
+ return -EOPNOTSUPP;
if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
- return;
-
- mutex_lock(&esw->state_lock);
- if (esw->qos.enabled)
- goto unlock;
+ return -EOPNOTSUPP;
MLX5_SET(scheduling_context, tsar_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
@@ -555,75 +581,94 @@ void mlx5_esw_qos_create(struct mlx5_eswitch *esw)
&esw->qos.root_tsar_ix);
if (err) {
esw_warn(dev, "E-Switch create root TSAR failed (%d)\n", err);
- goto unlock;
+ return err;
}
INIT_LIST_HEAD(&esw->qos.groups);
if (MLX5_CAP_QOS(dev, log_esw_max_sched_depth)) {
- esw->qos.group0 = esw_qos_create_rate_group(esw, NULL);
+ esw->qos.group0 = __esw_qos_create_rate_group(esw, extack);
if (IS_ERR(esw->qos.group0)) {
esw_warn(dev, "E-Switch create rate group 0 failed (%ld)\n",
PTR_ERR(esw->qos.group0));
+ err = PTR_ERR(esw->qos.group0);
goto err_group0;
}
}
- esw->qos.enabled = true;
-unlock:
- mutex_unlock(&esw->state_lock);
- return;
+ refcount_set(&esw->qos.refcnt, 1);
+
+ return 0;
err_group0:
- err = mlx5_destroy_scheduling_element_cmd(esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- esw->qos.root_tsar_ix);
- if (err)
- esw_warn(esw->dev, "E-Switch destroy root TSAR failed (%d)\n", err);
- mutex_unlock(&esw->state_lock);
+ if (mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH,
+ esw->qos.root_tsar_ix))
+ esw_warn(esw->dev, "E-Switch destroy root TSAR failed.\n");
+
+ return err;
}
-void mlx5_esw_qos_destroy(struct mlx5_eswitch *esw)
+static void esw_qos_destroy(struct mlx5_eswitch *esw)
{
- struct devlink *devlink = priv_to_devlink(esw->dev);
int err;
- devlink_rate_nodes_destroy(devlink);
- mutex_lock(&esw->state_lock);
- if (!esw->qos.enabled)
- goto unlock;
-
if (esw->qos.group0)
- esw_qos_destroy_rate_group(esw, esw->qos.group0, NULL);
+ __esw_qos_destroy_rate_group(esw, esw->qos.group0, NULL);
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
SCHEDULING_HIERARCHY_E_SWITCH,
esw->qos.root_tsar_ix);
if (err)
esw_warn(esw->dev, "E-Switch destroy root TSAR failed (%d)\n", err);
+}
- esw->qos.enabled = false;
-unlock:
- mutex_unlock(&esw->state_lock);
+static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
+{
+ int err = 0;
+
+ lockdep_assert_held(&esw->state_lock);
+
+ if (!refcount_inc_not_zero(&esw->qos.refcnt)) {
+ /* esw_qos_create() set refcount to 1 only on success.
+ * No need to decrement on failure.
+ */
+ err = esw_qos_create(esw, extack);
+ }
+
+ return err;
}
-int mlx5_esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- u32 max_rate, u32 bw_share)
+static void esw_qos_put(struct mlx5_eswitch *esw)
+{
+ lockdep_assert_held(&esw->state_lock);
+ if (refcount_dec_and_test(&esw->qos.refcnt))
+ esw_qos_destroy(esw);
+}
+
+static int esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack)
{
int err;
lockdep_assert_held(&esw->state_lock);
- if (!esw->qos.enabled)
+ if (vport->qos.enabled)
return 0;
- if (vport->qos.enabled)
- return -EEXIST;
+ err = esw_qos_get(esw, extack);
+ if (err)
+ return err;
vport->qos.group = esw->qos.group0;
err = esw_qos_vport_create_sched_element(esw, vport, max_rate, bw_share);
- if (!err) {
- vport->qos.enabled = true;
- trace_mlx5_esw_vport_qos_create(vport, bw_share, max_rate);
- }
+ if (err)
+ goto err_out;
+
+ vport->qos.enabled = true;
+ trace_mlx5_esw_vport_qos_create(vport, bw_share, max_rate);
+
+ return 0;
+
+err_out:
+ esw_qos_put(esw);
return err;
}
@@ -633,7 +678,7 @@ void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
int err;
lockdep_assert_held(&esw->state_lock);
- if (!esw->qos.enabled || !vport->qos.enabled)
+ if (!vport->qos.enabled)
return;
WARN(vport->qos.group && vport->qos.group != esw->qos.group0,
"Disabling QoS on port before detaching it from group");
@@ -645,8 +690,27 @@ void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
vport->vport, err);
- vport->qos.enabled = false;
+ memset(&vport->qos, 0, sizeof(vport->qos));
trace_mlx5_esw_vport_qos_destroy(vport);
+
+ esw_qos_put(esw);
+}
+
+int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ u32 min_rate, u32 max_rate)
+{
+ int err;
+
+ lockdep_assert_held(&esw->state_lock);
+ err = esw_qos_vport_enable(esw, vport, 0, 0, NULL);
+ if (err)
+ return err;
+
+ err = esw_qos_set_vport_min_rate(esw, vport, min_rate, NULL);
+ if (!err)
+ err = esw_qos_set_vport_max_rate(esw, vport, max_rate, NULL);
+
+ return err;
}
int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps)
@@ -654,22 +718,29 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32
u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_vport *vport;
u32 bitmask;
+ int err;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return PTR_ERR(vport);
- if (!vport->qos.enabled)
- return -EOPNOTSUPP;
-
- MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
- bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
+ mutex_lock(&esw->state_lock);
+ if (!vport->qos.enabled) {
+ /* Eswitch QoS wasn't enabled yet. Enable it and vport QoS. */
+ err = esw_qos_vport_enable(esw, vport, rate_mbps, vport->qos.bw_share, NULL);
+ } else {
+ MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
+
+ bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
+ err = mlx5_modify_scheduling_element_cmd(esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ ctx,
+ vport->qos.esw_tsar_ix,
+ bitmask);
+ }
+ mutex_unlock(&esw->state_lock);
- return mlx5_modify_scheduling_element_cmd(esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- ctx,
- vport->qos.esw_tsar_ix,
- bitmask);
+ return err;
}
#define MLX5_LINKSPEED_UNIT 125000 /* 1Mbps in Bps */
@@ -728,7 +799,12 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void
return err;
mutex_lock(&esw->state_lock);
- err = mlx5_esw_qos_set_vport_min_rate(esw, vport, tx_share, extack);
+ err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
+ if (err)
+ goto unlock;
+
+ err = esw_qos_set_vport_min_rate(esw, vport, tx_share, extack);
+unlock:
mutex_unlock(&esw->state_lock);
return err;
}
@@ -749,7 +825,12 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *
return err;
mutex_lock(&esw->state_lock);
- err = mlx5_esw_qos_set_vport_max_rate(esw, vport, tx_max, extack);
+ err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
+ if (err)
+ goto unlock;
+
+ err = esw_qos_set_vport_max_rate(esw, vport, tx_max, extack);
+unlock:
mutex_unlock(&esw->state_lock);
return err;
}
@@ -846,7 +927,9 @@ int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw,
int err;
mutex_lock(&esw->state_lock);
- err = esw_qos_vport_update_group(esw, vport, group, extack);
+ err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
+ if (!err)
+ err = esw_qos_vport_update_group(esw, vport, group, extack);
mutex_unlock(&esw->state_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
index 28451abe2d2f..0141e9d52037 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
@@ -6,18 +6,8 @@
#ifdef CONFIG_MLX5_ESWITCH
-int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw,
- struct mlx5_vport *evport,
- u32 min_rate,
- struct netlink_ext_ack *extack);
-int mlx5_esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw,
- struct mlx5_vport *evport,
- u32 max_rate,
- struct netlink_ext_ack *extack);
-void mlx5_esw_qos_create(struct mlx5_eswitch *esw);
-void mlx5_esw_qos_destroy(struct mlx5_eswitch *esw);
-int mlx5_esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- u32 max_rate, u32 bw_share);
+int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
+ u32 max_rate, u32 min_rate);
void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 51a8cecc4a7c..458ec0bca1b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -781,9 +781,6 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
if (err)
return err;
- /* Attach vport to the eswitch rate limiter */
- mlx5_esw_qos_vport_enable(esw, vport, vport->qos.max_rate, vport->qos.bw_share);
-
if (mlx5_esw_is_manager_vport(esw, vport_num))
return 0;
@@ -1260,8 +1257,6 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
- mlx5_esw_qos_create(esw);
-
esw->mode = mode;
if (mode == MLX5_ESWITCH_LEGACY) {
@@ -1290,7 +1285,6 @@ abort:
if (mode == MLX5_ESWITCH_OFFLOADS)
mlx5_rescan_drivers(esw->dev);
- mlx5_esw_qos_destroy(esw);
mlx5_esw_acls_ns_cleanup(esw);
return err;
}
@@ -1338,6 +1332,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
{
+ struct devlink *devlink = priv_to_devlink(esw->dev);
int old_mode;
lockdep_assert_held_write(&esw->mode_lock);
@@ -1367,7 +1362,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
if (old_mode == MLX5_ESWITCH_OFFLOADS)
mlx5_rescan_drivers(esw->dev);
- mlx5_esw_qos_destroy(esw);
+ devlink_rate_nodes_destroy(devlink);
+
mlx5_esw_acls_ns_cleanup(esw);
if (clear_vf)
@@ -1576,6 +1572,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
lockdep_register_key(&esw->mode_lock_key);
init_rwsem(&esw->mode_lock);
lockdep_set_class(&esw->mode_lock, &esw->mode_lock_key);
+ refcount_set(&esw->qos.refcnt, 0);
esw->enabled_vports = 0;
esw->mode = MLX5_ESWITCH_NONE;
@@ -1614,6 +1611,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
+ WARN_ON(refcount_read(&esw->qos.refcnt));
lockdep_unregister_key(&esw->mode_lock_key);
mutex_destroy(&esw->state_lock);
WARN_ON(!xa_empty(&esw->offloads.vhca_map));
@@ -1703,82 +1701,6 @@ bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF);
}
-static bool
-is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
-{
- return vport_num == MLX5_VPORT_PF ||
- mlx5_eswitch_is_vf_vport(esw, vport_num) ||
- mlx5_esw_is_sf_vport(esw, vport_num);
-}
-
-int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port,
- u8 *hw_addr, int *hw_addr_len,
- struct netlink_ext_ack *extack)
-{
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- int err = -EOPNOTSUPP;
- u16 vport_num;
-
- esw = mlx5_devlink_eswitch_get(port->devlink);
- if (IS_ERR(esw))
- return PTR_ERR(esw);
-
- vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
- if (!is_port_function_supported(esw, vport_num))
- return -EOPNOTSUPP;
-
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport)) {
- NL_SET_ERR_MSG_MOD(extack, "Invalid port");
- return PTR_ERR(vport);
- }
-
- mutex_lock(&esw->state_lock);
- if (vport->enabled) {
- ether_addr_copy(hw_addr, vport->info.mac);
- *hw_addr_len = ETH_ALEN;
- err = 0;
- }
- mutex_unlock(&esw->state_lock);
- return err;
-}
-
-int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port,
- const u8 *hw_addr, int hw_addr_len,
- struct netlink_ext_ack *extack)
-{
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- int err = -EOPNOTSUPP;
- u16 vport_num;
-
- esw = mlx5_devlink_eswitch_get(port->devlink);
- if (IS_ERR(esw)) {
- NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
- return PTR_ERR(esw);
- }
-
- vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
- if (!is_port_function_supported(esw, vport_num)) {
- NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
- return -EINVAL;
- }
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport)) {
- NL_SET_ERR_MSG_MOD(extack, "Invalid port");
- return PTR_ERR(vport);
- }
-
- mutex_lock(&esw->state_lock);
- if (vport->enabled)
- err = mlx5_esw_set_vport_mac_locked(esw, vport, hw_addr);
- else
- NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
- mutex_unlock(&esw->state_lock);
- return err;
-}
-
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
u16 vport, int link_state)
{
@@ -1835,8 +1757,10 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
ivi->qos = evport->info.qos;
ivi->spoofchk = evport->info.spoofchk;
ivi->trusted = evport->info.trusted;
- ivi->min_tx_rate = evport->qos.min_rate;
- ivi->max_tx_rate = evport->qos.max_rate;
+ if (evport->qos.enabled) {
+ ivi->min_tx_rate = evport->qos.min_rate;
+ ivi->max_tx_rate = evport->qos.max_rate;
+ }
mutex_unlock(&esw->state_lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 42f8ee2e5d9f..ead5e8acc8be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -308,10 +308,14 @@ struct mlx5_eswitch {
atomic64_t user_count;
struct {
- bool enabled;
u32 root_tsar_ix;
struct mlx5_esw_rate_group *group0;
struct list_head groups; /* Protected by esw->state_lock */
+
+ /* Protected by esw->state_lock.
+ * Initially 0, meaning no QoS users and QoS is disabled.
+ */
+ refcount_t refcnt;
} qos;
struct mlx5_esw_bridge_offloads *br_offloads;
@@ -339,9 +343,6 @@ void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps);
-bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
-int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable);
-
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
@@ -516,11 +517,6 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u16 vport, u16 vlan, u8 qos, u8 set_flags);
-static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch *esw)
-{
- return esw->qos.enabled;
-}
-
static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
u8 vlan_depth)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 32bc08a39925..9a7b25692505 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -295,26 +295,28 @@ esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
int *i)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
- int j, err;
+ int err;
if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
return -EOPNOTSUPP;
- for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
- err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
- if (err)
- goto err_setup_chain;
+ /* flow steering cannot handle more than one dest with the same ft
+ * in a single flow
+ */
+ if (esw_attr->out_count - esw_attr->split_count > 1)
+ return -EOPNOTSUPP;
- if (esw_attr->dests[j].pkt_reformat) {
- flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- flow_act->pkt_reformat = esw_attr->dests[j].pkt_reformat;
- }
+ err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
+ if (err)
+ return err;
+
+ if (esw_attr->dests[esw_attr->split_count].pkt_reformat) {
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act->pkt_reformat = esw_attr->dests[esw_attr->split_count].pkt_reformat;
}
- return 0;
+ (*i)++;
-err_setup_chain:
- esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
- return err;
+ return 0;
}
static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
@@ -3867,3 +3869,62 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
return vport->metadata;
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);
+
+static bool
+is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return vport_num == MLX5_VPORT_PF ||
+ mlx5_eswitch_is_vf_vport(esw, vport_num) ||
+ mlx5_esw_is_sf_vport(esw, vport_num);
+}
+
+int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port,
+ u8 *hw_addr, int *hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ u16 vport_num;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
+ if (!is_port_function_supported(esw, vport_num))
+ return -EOPNOTSUPP;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid port");
+ return PTR_ERR(vport);
+ }
+
+ mutex_lock(&esw->state_lock);
+ ether_addr_copy(hw_addr, vport->info.mac);
+ *hw_addr_len = ETH_ALEN;
+ mutex_unlock(&esw->state_lock);
+ return 0;
+}
+
+int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port,
+ const u8 *hw_addr, int hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ u16 vport_num;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw)) {
+ NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
+ return PTR_ERR(esw);
+ }
+
+ vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
+ if (!is_port_function_supported(esw, vport_num)) {
+ NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
+ return -EINVAL;
+ }
+
+ return mlx5_eswitch_set_vport_mac(esw, vport_num, hw_addr);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 750b21124a1a..dafe341358c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -451,7 +451,8 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
list_for_each_entry(dst, &fte->node.children, node.list) {
if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
- if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ if ((dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
+ dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
num_encap++;
num_fwd_destinations++;
@@ -788,7 +789,8 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
int err;
u32 *in;
- if (namespace == MLX5_FLOW_NAMESPACE_FDB)
+ if (namespace == MLX5_FLOW_NAMESPACE_FDB ||
+ namespace == MLX5_FLOW_NAMESPACE_FDB_BYPASS)
max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
else
max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
@@ -860,6 +862,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
switch (namespace) {
case MLX5_FLOW_NAMESPACE_FDB:
+ case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
table_type = FS_FT_FDB;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 386ab9a2d490..b628917e38e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1525,7 +1525,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
struct mlx5_flow_destination *d2)
{
if (d1->type == d2->type) {
- if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ if (((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
+ d1->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
d1->vport.num == d2->vport.num &&
d1->vport.flags == d2->vport.flags &&
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
@@ -2206,6 +2207,22 @@ struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
+static bool is_nic_rx_ns(enum mlx5_flow_namespace_type type)
+{
+ switch (type) {
+ case MLX5_FLOW_NAMESPACE_BYPASS:
+ case MLX5_FLOW_NAMESPACE_LAG:
+ case MLX5_FLOW_NAMESPACE_OFFLOADS:
+ case MLX5_FLOW_NAMESPACE_ETHTOOL:
+ case MLX5_FLOW_NAMESPACE_KERNEL:
+ case MLX5_FLOW_NAMESPACE_LEFTOVERS:
+ case MLX5_FLOW_NAMESPACE_ANCHOR:
+ return true;
+ default:
+ return false;
+ }
+}
+
struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type)
{
@@ -2235,31 +2252,39 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
if (steering->sniffer_tx_root_ns)
return &steering->sniffer_tx_root_ns->ns;
return NULL;
- default:
+ case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
+ root_ns = steering->fdb_root_ns;
+ prio = FDB_BYPASS_PATH;
break;
- }
-
- if (type == MLX5_FLOW_NAMESPACE_EGRESS ||
- type == MLX5_FLOW_NAMESPACE_EGRESS_KERNEL) {
+ case MLX5_FLOW_NAMESPACE_EGRESS:
+ case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
root_ns = steering->egress_root_ns;
prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
- } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX:
root_ns = steering->rdma_rx_root_ns;
prio = RDMA_RX_BYPASS_PRIO;
- } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) {
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL:
root_ns = steering->rdma_rx_root_ns;
prio = RDMA_RX_KERNEL_PRIO;
- } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX:
root_ns = steering->rdma_tx_root_ns;
- } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS) {
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
root_ns = steering->rdma_rx_root_ns;
prio = RDMA_RX_COUNTERS_PRIO;
- } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS) {
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS:
root_ns = steering->rdma_tx_root_ns;
prio = RDMA_TX_COUNTERS_PRIO;
- } else { /* Must be NIC RX */
+ break;
+ default: /* Must be NIC RX */
+ WARN_ON(!is_nic_rx_ns(type));
root_ns = steering->root_ns;
prio = type;
+ break;
}
if (!root_ns)
@@ -2822,6 +2847,28 @@ static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
return 0;
}
+static int create_fdb_bypass(struct mlx5_flow_steering *steering)
+{
+ struct mlx5_flow_namespace *ns;
+ struct fs_prio *prio;
+ int i;
+
+ prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH, 0);
+ if (IS_ERR(prio))
+ return PTR_ERR(prio);
+
+ ns = fs_create_namespace(prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
+ if (IS_ERR(ns))
+ return PTR_ERR(ns);
+
+ for (i = 0; i < MLX5_BY_PASS_NUM_REGULAR_PRIOS; i++) {
+ prio = fs_create_prio(ns, i, 1);
+ if (IS_ERR(prio))
+ return PTR_ERR(prio);
+ }
+ return 0;
+}
+
static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
{
struct fs_prio *maj_prio;
@@ -2831,12 +2878,10 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
if (!steering->fdb_root_ns)
return -ENOMEM;
- maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH,
- 1);
- if (IS_ERR(maj_prio)) {
- err = PTR_ERR(maj_prio);
+ err = create_fdb_bypass(steering);
+ if (err)
goto out_err;
- }
+
err = create_fdb_fast_path(steering);
if (err)
goto out_err;
@@ -3038,6 +3083,11 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
steering->dev = dev;
dev->priv.steering = steering;
+ if (mlx5_fs_dr_is_supported(dev))
+ steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
+ else
+ steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
+
steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
sizeof(struct mlx5_flow_group), 0,
0, NULL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 7711db245c63..5469b08d635f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -203,7 +203,7 @@ struct mlx5_ft_underlay_qp {
u32 qpn;
};
-#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_c00
+#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_e00
/* Calculate the fte_match_param length and without the reserved length.
* Make sure the reserved field is the last.
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 7e0e04cf26f8..b406e0367af6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -38,9 +38,10 @@
#include "fs_cmd.h"
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
+#define MLX5_FC_BULK_QUERY_ALLOC_PERIOD msecs_to_jiffies(180 * 1000)
/* Max number of counters to query in bulk read is 32K */
#define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
-#define MLX5_SF_NUM_COUNTERS_BULK 8
+#define MLX5_INIT_COUNTERS_BULK 8
#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
#define MLX5_FC_POOL_USED_BUFF_RATIO 10
@@ -145,13 +146,15 @@ static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
spin_unlock(&fc_stats->counters_idr_lock);
}
-static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
+static int get_init_bulk_query_len(struct mlx5_core_dev *dev)
{
- int num_counters_bulk = mlx5_core_is_sf(dev) ?
- MLX5_SF_NUM_COUNTERS_BULK :
- MLX5_SW_MAX_COUNTERS_BULK;
+ return min_t(int, MLX5_INIT_COUNTERS_BULK,
+ (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
+}
- return min_t(int, num_counters_bulk,
+static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
+{
+ return min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
(1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
}
@@ -177,7 +180,7 @@ static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
bool query_more_counters = (first->id <= last_id);
- int max_bulk_len = get_max_bulk_query_len(dev);
+ int cur_bulk_len = fc_stats->bulk_query_len;
u32 *data = fc_stats->bulk_query_out;
struct mlx5_fc *counter = first;
u32 bulk_base_id;
@@ -189,7 +192,7 @@ static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
bulk_base_id = counter->id & ~0x3;
/* number of counters to query inc. the last counter */
- bulk_len = min_t(int, max_bulk_len,
+ bulk_len = min_t(int, cur_bulk_len,
ALIGN(last_id - bulk_base_id + 1, 4));
err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len,
@@ -230,6 +233,41 @@ static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
mlx5_fc_free(dev, counter);
}
+static void mlx5_fc_stats_bulk_query_size_increase(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ int max_bulk_len = get_max_bulk_query_len(dev);
+ unsigned long now = jiffies;
+ u32 *bulk_query_out_tmp;
+ int max_out_len;
+
+ if (fc_stats->bulk_query_alloc_failed &&
+ time_before(now, fc_stats->next_bulk_query_alloc))
+ return;
+
+ max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
+ bulk_query_out_tmp = kzalloc(max_out_len, GFP_KERNEL);
+ if (!bulk_query_out_tmp) {
+ mlx5_core_warn_once(dev,
+ "Can't increase flow counters bulk query buffer size, insufficient memory, bulk_size(%d)\n",
+ max_bulk_len);
+ fc_stats->bulk_query_alloc_failed = true;
+ fc_stats->next_bulk_query_alloc =
+ now + MLX5_FC_BULK_QUERY_ALLOC_PERIOD;
+ return;
+ }
+
+ kfree(fc_stats->bulk_query_out);
+ fc_stats->bulk_query_out = bulk_query_out_tmp;
+ fc_stats->bulk_query_len = max_bulk_len;
+ if (fc_stats->bulk_query_alloc_failed) {
+ mlx5_core_info(dev,
+ "Flow counters bulk query buffer size increased, bulk_size(%d)\n",
+ max_bulk_len);
+ fc_stats->bulk_query_alloc_failed = false;
+ }
+}
+
static void mlx5_fc_stats_work(struct work_struct *work)
{
struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
@@ -247,15 +285,22 @@ static void mlx5_fc_stats_work(struct work_struct *work)
queue_delayed_work(fc_stats->wq, &fc_stats->work,
fc_stats->sampling_interval);
- llist_for_each_entry(counter, addlist, addlist)
+ llist_for_each_entry(counter, addlist, addlist) {
mlx5_fc_stats_insert(dev, counter);
+ fc_stats->num_counters++;
+ }
llist_for_each_entry_safe(counter, tmp, dellist, dellist) {
mlx5_fc_stats_remove(dev, counter);
mlx5_fc_release(dev, counter);
+ fc_stats->num_counters--;
}
+ if (fc_stats->bulk_query_len < get_max_bulk_query_len(dev) &&
+ fc_stats->num_counters > get_init_bulk_query_len(dev))
+ mlx5_fc_stats_bulk_query_size_increase(dev);
+
if (time_before(now, fc_stats->next_query) ||
list_empty(&fc_stats->counters))
return;
@@ -378,8 +423,8 @@ EXPORT_SYMBOL(mlx5_fc_destroy);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- int max_bulk_len;
- int max_out_len;
+ int init_bulk_len;
+ int init_out_len;
spin_lock_init(&fc_stats->counters_idr_lock);
idr_init(&fc_stats->counters_idr);
@@ -387,11 +432,12 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
init_llist_head(&fc_stats->addlist);
init_llist_head(&fc_stats->dellist);
- max_bulk_len = get_max_bulk_query_len(dev);
- max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
- fc_stats->bulk_query_out = kzalloc(max_out_len, GFP_KERNEL);
+ init_bulk_len = get_init_bulk_query_len(dev);
+ init_out_len = mlx5_cmd_fc_get_bulk_query_out_len(init_bulk_len);
+ fc_stats->bulk_query_out = kzalloc(init_out_len, GFP_KERNEL);
if (!fc_stats->bulk_query_out)
return -ENOMEM;
+ fc_stats->bulk_query_len = init_bulk_len;
fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
if (!fc_stats->wq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 3ca998874c50..737df402c927 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -420,6 +420,11 @@ static void print_health_info(struct mlx5_core_dev *dev)
if (!ioread8(&h->synd))
return;
+ if (ioread32be(&h->fw_ver) == 0xFFFFFFFF) {
+ mlx5_log(dev, LOGLEVEL_ERR, "PCI slot is unavailable\n");
+ return;
+ }
+
rfr_severity = ioread8(&h->rfr_severity);
severity = mlx5_health_get_severity(rfr_severity);
mlx5_log(dev, severity, "Health issue observed, %s, severity(%d) %s:\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 962d41418ce7..f4f7eaf16446 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -67,7 +67,9 @@ static void mlx5i_get_ethtool_stats(struct net_device *dev,
}
static int mlx5i_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
@@ -75,7 +77,9 @@ static int mlx5i_set_ringparam(struct net_device *dev,
}
static void mlx5i_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
@@ -105,7 +109,7 @@ static int mlx5i_set_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
- return mlx5e_ethtool_set_coalesce(priv, coal);
+ return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack);
}
static int mlx5i_get_coalesce(struct net_device *netdev,
@@ -115,7 +119,7 @@ static int mlx5i_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
- return mlx5e_ethtool_get_coalesce(priv, coal);
+ return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
}
static int mlx5i_get_ts_info(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index ea1efdecc88c..0a99a020a3b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -110,14 +110,14 @@ void mlx5i_cleanup(struct mlx5e_priv *priv)
static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
{
- struct mlx5e_sw_stats s = { 0 };
+ struct rtnl_link_stats64 s = {};
int i, j;
for (i = 0; i < priv->stats_nch; i++) {
struct mlx5e_channel_stats *channel_stats;
struct mlx5e_rq_stats *rq_stats;
- channel_stats = &priv->channel_stats[i];
+ channel_stats = priv->channel_stats[i];
rq_stats = &channel_stats->rq;
s.rx_packets += rq_stats->packets;
@@ -128,11 +128,17 @@ static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
s.tx_packets += sq_stats->packets;
s.tx_bytes += sq_stats->bytes;
- s.tx_queue_dropped += sq_stats->dropped;
+ s.tx_dropped += sq_stats->dropped;
}
}
- memcpy(&priv->stats.sw, &s, sizeof(s));
+ memset(&priv->stats.sw, 0, sizeof(s));
+
+ priv->stats.sw.rx_packets = s.rx_packets;
+ priv->stats.sw.rx_bytes = s.rx_bytes;
+ priv->stats.sw.tx_packets = s.tx_packets;
+ priv->stats.sw.tx_bytes = s.tx_bytes;
+ priv->stats.sw.tx_queue_dropped = s.tx_dropped;
}
void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
@@ -443,7 +449,6 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5i_stats_grps,
.stats_grps_num = mlx5i_stats_grps_num,
- .rx_ptp_support = false,
};
/* mlx5i netdev NDos */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 5308f23702bc..0b86e78dbc0e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -350,7 +350,6 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
.rx_handlers = &mlx5i_rx_handlers,
.max_tc = MLX5I_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
- .rx_ptp_support = false,
};
const struct mlx5e_profile *mlx5i_pkey_get_profile(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
new file mode 100644
index 000000000000..380a208ab137
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "mlx5_core.h"
+#include "mlx5_irq.h"
+#include "pci_irq.h"
+
+static void cpu_put(struct mlx5_irq_pool *pool, int cpu)
+{
+ pool->irqs_per_cpu[cpu]--;
+}
+
+static void cpu_get(struct mlx5_irq_pool *pool, int cpu)
+{
+ pool->irqs_per_cpu[cpu]++;
+}
+
+/* Gets the least loaded CPU. e.g.: the CPU with least IRQs bound to it */
+static int cpu_get_least_loaded(struct mlx5_irq_pool *pool,
+ const struct cpumask *req_mask)
+{
+ int best_cpu = -1;
+ int cpu;
+
+ for_each_cpu_and(cpu, req_mask, cpu_online_mask) {
+ /* CPU has zero IRQs on it. No need to search any more CPUs. */
+ if (!pool->irqs_per_cpu[cpu]) {
+ best_cpu = cpu;
+ break;
+ }
+ if (best_cpu < 0)
+ best_cpu = cpu;
+ if (pool->irqs_per_cpu[cpu] < pool->irqs_per_cpu[best_cpu])
+ best_cpu = cpu;
+ }
+ if (best_cpu == -1) {
+ /* There isn't online CPUs in req_mask */
+ mlx5_core_err(pool->dev, "NO online CPUs in req_mask (%*pbl)\n",
+ cpumask_pr_args(req_mask));
+ best_cpu = cpumask_first(cpu_online_mask);
+ }
+ pool->irqs_per_cpu[best_cpu]++;
+ return best_cpu;
+}
+
+/* Creating an IRQ from irq_pool */
+static struct mlx5_irq *
+irq_pool_request_irq(struct mlx5_irq_pool *pool, const struct cpumask *req_mask)
+{
+ cpumask_var_t auto_mask;
+ struct mlx5_irq *irq;
+ u32 irq_index;
+ int err;
+
+ if (!zalloc_cpumask_var(&auto_mask, GFP_KERNEL))
+ return ERR_PTR(-ENOMEM);
+ err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, GFP_KERNEL);
+ if (err)
+ return ERR_PTR(err);
+ if (pool->irqs_per_cpu) {
+ if (cpumask_weight(req_mask) > 1)
+ /* if req_mask contain more then one CPU, set the least loadad CPU
+ * of req_mask
+ */
+ cpumask_set_cpu(cpu_get_least_loaded(pool, req_mask), auto_mask);
+ else
+ cpu_get(pool, cpumask_first(req_mask));
+ }
+ irq = mlx5_irq_alloc(pool, irq_index, cpumask_empty(auto_mask) ? req_mask : auto_mask);
+ free_cpumask_var(auto_mask);
+ return irq;
+}
+
+/* Looking for the IRQ with the smallest refcount that fits req_mask.
+ * If pool is sf_comp_pool, then we are looking for an IRQ with any of the
+ * requested CPUs in req_mask.
+ * for example: req_mask = 0xf, irq0_mask = 0x10, irq1_mask = 0x1. irq0_mask
+ * isn't subset of req_mask, so we will skip it. irq1_mask is subset of req_mask,
+ * we don't skip it.
+ * If pool is sf_ctrl_pool, then all IRQs have the same mask, so any IRQ will
+ * fit. And since mask is subset of itself, we will pass the first if bellow.
+ */
+static struct mlx5_irq *
+irq_pool_find_least_loaded(struct mlx5_irq_pool *pool, const struct cpumask *req_mask)
+{
+ int start = pool->xa_num_irqs.min;
+ int end = pool->xa_num_irqs.max;
+ struct mlx5_irq *irq = NULL;
+ struct mlx5_irq *iter;
+ int irq_refcount = 0;
+ unsigned long index;
+
+ lockdep_assert_held(&pool->lock);
+ xa_for_each_range(&pool->irqs, index, iter, start, end) {
+ struct cpumask *iter_mask = mlx5_irq_get_affinity_mask(iter);
+ int iter_refcount = mlx5_irq_read_locked(iter);
+
+ if (!cpumask_subset(iter_mask, req_mask))
+ /* skip IRQs with a mask which is not subset of req_mask */
+ continue;
+ if (iter_refcount < pool->min_threshold)
+ /* If we found an IRQ with less than min_thres, return it */
+ return iter;
+ if (!irq || iter_refcount < irq_refcount) {
+ /* In case we won't find an IRQ with less than min_thres,
+ * keep a pointer to the least used IRQ
+ */
+ irq_refcount = iter_refcount;
+ irq = iter;
+ }
+ }
+ return irq;
+}
+
+/**
+ * mlx5_irq_affinity_request - request an IRQ according to the given mask.
+ * @pool: IRQ pool to request from.
+ * @req_mask: cpumask requested for this IRQ.
+ *
+ * This function returns a pointer to IRQ, or ERR_PTR in case of error.
+ */
+struct mlx5_irq *
+mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, const struct cpumask *req_mask)
+{
+ struct mlx5_irq *least_loaded_irq, *new_irq;
+
+ mutex_lock(&pool->lock);
+ least_loaded_irq = irq_pool_find_least_loaded(pool, req_mask);
+ if (least_loaded_irq &&
+ mlx5_irq_read_locked(least_loaded_irq) < pool->min_threshold)
+ goto out;
+ /* We didn't find an IRQ with less than min_thres, try to allocate a new IRQ */
+ new_irq = irq_pool_request_irq(pool, req_mask);
+ if (IS_ERR(new_irq)) {
+ if (!least_loaded_irq) {
+ /* We failed to create an IRQ and we didn't find an IRQ */
+ mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %ld\n",
+ PTR_ERR(new_irq));
+ mutex_unlock(&pool->lock);
+ return new_irq;
+ }
+ /* We failed to create a new IRQ for the requested affinity,
+ * sharing existing IRQ.
+ */
+ goto out;
+ }
+ least_loaded_irq = new_irq;
+ goto unlock;
+out:
+ mlx5_irq_get_locked(least_loaded_irq);
+ if (mlx5_irq_read_locked(least_loaded_irq) > pool->max_threshold)
+ mlx5_core_dbg(pool->dev, "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n",
+ pci_irq_vector(pool->dev->pdev,
+ mlx5_irq_get_index(least_loaded_irq)), pool->name,
+ mlx5_irq_read_locked(least_loaded_irq) / MLX5_EQ_REFS_PER_IRQ);
+unlock:
+ mutex_unlock(&pool->lock);
+ return least_loaded_irq;
+}
+
+void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev, struct mlx5_irq **irqs,
+ int num_irqs)
+{
+ struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
+ int i;
+
+ for (i = 0; i < num_irqs; i++) {
+ int cpu = cpumask_first(mlx5_irq_get_affinity_mask(irqs[i]));
+
+ synchronize_irq(pci_irq_vector(pool->dev->pdev,
+ mlx5_irq_get_index(irqs[i])));
+ if (mlx5_irq_put(irqs[i]))
+ if (pool->irqs_per_cpu)
+ cpu_put(pool, cpu);
+ }
+}
+
+/**
+ * mlx5_irq_affinity_irqs_request_auto - request one or more IRQs for mlx5 device.
+ * @dev: mlx5 device that is requesting the IRQs.
+ * @nirqs: number of IRQs to request.
+ * @irqs: an output array of IRQs pointers.
+ *
+ * Each IRQ is bounded to at most 1 CPU.
+ * This function is requesting IRQs according to the default assignment.
+ * The default assignment policy is:
+ * - in each iteration, request the least loaded IRQ which is not bound to any
+ * CPU of the previous IRQs requested.
+ *
+ * This function returns the number of IRQs requested, (which might be smaller than
+ * @nirqs), if successful, or a negative error code in case of an error.
+ */
+int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
+ struct mlx5_irq **irqs)
+{
+ struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
+ cpumask_var_t req_mask;
+ struct mlx5_irq *irq;
+ int i = 0;
+
+ if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_copy(req_mask, cpu_online_mask);
+ for (i = 0; i < nirqs; i++) {
+ if (mlx5_irq_pool_is_sf_pool(pool))
+ irq = mlx5_irq_affinity_request(pool, req_mask);
+ else
+ /* In case SF pool doesn't exists, fallback to the PF IRQs.
+ * The PF IRQs are already allocated and binded to CPU
+ * at this point. Hence, only an index is needed.
+ */
+ irq = mlx5_irq_request(dev, i, NULL);
+ if (IS_ERR(irq))
+ break;
+ irqs[i] = irq;
+ cpumask_clear_cpu(cpumask_first(mlx5_irq_get_affinity_mask(irq)), req_mask);
+ mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
+ pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
+ cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
+ mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
+ }
+ free_cpumask_var(req_mask);
+ if (!i)
+ return PTR_ERR(irq);
+ return i;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index bf4d3cbefa63..1ca01a5b6cdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -268,10 +268,8 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
fen_info = container_of(info, struct fib_entry_notifier_info,
info);
fi = fen_info->fi;
- if (fi->nh) {
- NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
- return notifier_from_errno(-EINVAL);
- }
+ if (fi->nh)
+ return NOTIFY_DONE;
fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
if (fib_dev != ldev->pf[MLX5_LAG_P1].netdev &&
fib_dev != ldev->pf[MLX5_LAG_P2].netdev) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 65083496f913..2c774f367199 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -98,6 +98,8 @@ enum {
MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
};
+#define LOG_MAX_SUPPORTED_QPS 0xff
+
static struct mlx5_profile profile[] = {
[0] = {
.mask = 0,
@@ -109,7 +111,7 @@ static struct mlx5_profile profile[] = {
[2] = {
.mask = MLX5_PROF_MASK_QP_SIZE |
MLX5_PROF_MASK_MR_CACHE,
- .log_max_qp = 18,
+ .log_max_qp = LOG_MAX_SUPPORTED_QPS,
.mr_cache[0] = {
.size = 500,
.limit = 250
@@ -484,10 +486,26 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
}
+static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ &val);
+ if (!err)
+ return val.vu32;
+ mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err);
+ return err;
+}
+
static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
{
struct mlx5_profile *prof = &dev->profile;
void *set_hca_cap;
+ int max_uc_list;
int err;
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
@@ -507,7 +525,9 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
to_fw_pkey_sz(dev, 128));
/* Check log_max_qp from HCA caps to set in current profile */
- if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
+ if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) {
+ prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
+ } else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
prof->log_max_qp,
MLX5_CAP_GEN_MAX(dev, log_max_qp));
@@ -561,6 +581,11 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
if (MLX5_CAP_GEN(dev, roce_rw_supported))
MLX5_SET(cmd_hca_cap, set_hca_cap, roce, mlx5_is_roce_init_enabled(dev));
+ max_uc_list = max_uc_list_get_devlink_param(dev);
+ if (max_uc_list > 0)
+ MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_current_uc_list,
+ ilog2(max_uc_list));
+
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
}
@@ -1604,12 +1629,28 @@ static void remove_one(struct pci_dev *pdev)
mlx5_devlink_free(devlink);
}
+#define mlx5_pci_trace(dev, fmt, ...) ({ \
+ struct mlx5_core_dev *__dev = (dev); \
+ mlx5_core_info(__dev, "%s Device state = %d health sensors: %d pci_status: %d. " fmt, \
+ __func__, __dev->state, mlx5_health_check_fatal_sensors(__dev), \
+ __dev->pci_status, ##__VA_ARGS__); \
+})
+
+static const char *result2str(enum pci_ers_result result)
+{
+ return result == PCI_ERS_RESULT_NEED_RESET ? "need reset" :
+ result == PCI_ERS_RESULT_DISCONNECT ? "disconnect" :
+ result == PCI_ERS_RESULT_RECOVERED ? "recovered" :
+ "unknown";
+}
+
static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ enum pci_ers_result res;
- mlx5_core_info(dev, "%s was called\n", __func__);
+ mlx5_pci_trace(dev, "Enter, pci channel state = %d\n", state);
mlx5_enter_error_state(dev, false);
mlx5_error_sw_reset(dev);
@@ -1617,8 +1658,11 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
mlx5_drain_health_wq(dev);
mlx5_pci_disable_device(dev);
- return state == pci_channel_io_perm_failure ?
+ res = state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+
+ mlx5_pci_trace(dev, "Exit, result = %d, %s\n", res, result2str(res));
+ return res;
}
/* wait for the device to show vital signs by waiting
@@ -1652,28 +1696,34 @@ static int wait_vital(struct pci_dev *pdev)
static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
{
+ enum pci_ers_result res = PCI_ERS_RESULT_DISCONNECT;
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
int err;
- mlx5_core_info(dev, "%s was called\n", __func__);
+ mlx5_pci_trace(dev, "Enter\n");
err = mlx5_pci_enable_device(dev);
if (err) {
mlx5_core_err(dev, "%s: mlx5_pci_enable_device failed with error code: %d\n",
__func__, err);
- return PCI_ERS_RESULT_DISCONNECT;
+ goto out;
}
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
- if (wait_vital(pdev)) {
- mlx5_core_err(dev, "%s: wait_vital timed out\n", __func__);
- return PCI_ERS_RESULT_DISCONNECT;
+ err = wait_vital(pdev);
+ if (err) {
+ mlx5_core_err(dev, "%s: wait vital failed with error code: %d\n",
+ __func__, err);
+ goto out;
}
- return PCI_ERS_RESULT_RECOVERED;
+ res = PCI_ERS_RESULT_RECOVERED;
+out:
+ mlx5_pci_trace(dev, "Exit, err = %d, result = %d, %s\n", err, res, result2str(res));
+ return res;
}
static void mlx5_pci_resume(struct pci_dev *pdev)
@@ -1681,14 +1731,12 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
int err;
- mlx5_core_info(dev, "%s was called\n", __func__);
+ mlx5_pci_trace(dev, "Enter, loading driver..\n");
err = mlx5_load_one(dev);
- if (err)
- mlx5_core_err(dev, "%s: mlx5_load_one failed with error code: %d\n",
- __func__, err);
- else
- mlx5_core_info(dev, "%s: device recovered\n", __func__);
+
+ mlx5_pci_trace(dev, "Done, err = %d, device %s\n", err,
+ !err ? "recovered" : "Failed");
}
static const struct pci_error_handlers mlx5_err_handler = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index bb677329ea08..6f8baa0f2a73 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -305,5 +305,6 @@ static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
bool mlx5_eth_supported(struct mlx5_core_dev *dev);
bool mlx5_rdma_supported(struct mlx5_core_dev *dev);
bool mlx5_vnet_supported(struct mlx5_core_dev *dev);
+bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev);
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
index 8116815663a7..23cb63fa4588 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -22,12 +22,40 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int devfn,
int msix_vec_count);
int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs);
+struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev);
+void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq);
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
struct cpumask *affinity);
-void mlx5_irq_release(struct mlx5_irq *irq);
+int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
+ struct mlx5_irq **irqs);
+void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs);
int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq);
int mlx5_irq_get_index(struct mlx5_irq *irq);
+struct mlx5_irq_pool;
+#ifdef CONFIG_MLX5_SF
+int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
+ struct mlx5_irq **irqs);
+struct mlx5_irq *mlx5_irq_affinity_request(struct mlx5_irq_pool *pool,
+ const struct cpumask *req_mask);
+void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev, struct mlx5_irq **irqs,
+ int num_irqs);
+#else
+static inline int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
+ struct mlx5_irq **irqs)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct mlx5_irq *
+mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, const struct cpumask *req_mask)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev,
+ struct mlx5_irq **irqs, int num_irqs) {}
+#endif
#endif /* __MLX5_IRQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index bcee30f5de0a..41807ef55201 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -7,15 +7,12 @@
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
#include "mlx5_irq.h"
+#include "pci_irq.h"
#include "lib/sf.h"
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
-#define MLX5_MAX_IRQ_NAME (32)
-/* max irq_index is 2047, so four chars */
-#define MLX5_MAX_IRQ_IDX_CHARS (4)
-
#define MLX5_SFS_PER_CTRL_IRQ 64
#define MLX5_IRQ_CTRL_SF_MAX 8
/* min num of vectors for SFs to be enabled */
@@ -25,7 +22,6 @@
#define MLX5_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX)
#define MLX5_EQ_SHARE_IRQ_MIN_COMP (1)
#define MLX5_EQ_SHARE_IRQ_MIN_CTRL (4)
-#define MLX5_EQ_REFS_PER_IRQ (2)
struct mlx5_irq {
struct atomic_notifier_head nh;
@@ -37,16 +33,6 @@ struct mlx5_irq {
int irqn;
};
-struct mlx5_irq_pool {
- char name[MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS];
- struct xa_limit xa_num_irqs;
- struct mutex lock; /* sync IRQs creations */
- struct xarray irqs;
- u32 max_threshold;
- u32 min_threshold;
- struct mlx5_core_dev *dev;
-};
-
struct mlx5_irq_table {
struct mlx5_irq_pool *pf_pool;
struct mlx5_irq_pool *sf_ctrl_pool;
@@ -143,28 +129,38 @@ static void irq_release(struct mlx5_irq *irq)
struct mlx5_irq_pool *pool = irq->pool;
xa_erase(&pool->irqs, irq->index);
- /* free_irq requires that affinity and rmap will be cleared
+ /* free_irq requires that affinity_hint and rmap will be cleared
* before calling it. This is why there is asymmetry with set_rmap
* which should be called after alloc_irq but before request_irq.
*/
- irq_set_affinity_hint(irq->irqn, NULL);
+ irq_update_affinity_hint(irq->irqn, NULL);
free_cpumask_var(irq->mask);
free_irq(irq->irqn, &irq->nh);
kfree(irq);
}
-static void irq_put(struct mlx5_irq *irq)
+int mlx5_irq_put(struct mlx5_irq *irq)
{
struct mlx5_irq_pool *pool = irq->pool;
+ int ret = 0;
mutex_lock(&pool->lock);
irq->refcount--;
- if (!irq->refcount)
+ if (!irq->refcount) {
irq_release(irq);
+ ret = 1;
+ }
mutex_unlock(&pool->lock);
+ return ret;
+}
+
+int mlx5_irq_read_locked(struct mlx5_irq *irq)
+{
+ lockdep_assert_held(&irq->pool->lock);
+ return irq->refcount;
}
-static int irq_get_locked(struct mlx5_irq *irq)
+int mlx5_irq_get_locked(struct mlx5_irq *irq)
{
lockdep_assert_held(&irq->pool->lock);
if (WARN_ON_ONCE(!irq->refcount))
@@ -178,7 +174,7 @@ static int irq_get(struct mlx5_irq *irq)
int err;
mutex_lock(&irq->pool->lock);
- err = irq_get_locked(irq);
+ err = mlx5_irq_get_locked(irq);
mutex_unlock(&irq->pool->lock);
return err;
}
@@ -210,12 +206,8 @@ static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx);
}
-static bool irq_pool_is_sf_pool(struct mlx5_irq_pool *pool)
-{
- return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf"));
-}
-
-static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
+struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
+ const struct cpumask *affinity)
{
struct mlx5_core_dev *dev = pool->dev;
char name[MLX5_MAX_IRQ_NAME];
@@ -226,7 +218,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
if (!irq)
return ERR_PTR(-ENOMEM);
irq->irqn = pci_irq_vector(dev->pdev, i);
- if (!irq_pool_is_sf_pool(pool))
+ if (!mlx5_irq_pool_is_sf_pool(pool))
irq_set_name(pool, name, i);
else
irq_sf_set_name(pool, name, i);
@@ -244,6 +236,10 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
err = -ENOMEM;
goto err_cpumask;
}
+ if (affinity) {
+ cpumask_copy(irq->mask, affinity);
+ irq_set_affinity_and_hint(irq->irqn, irq->mask);
+ }
irq->pool = pool;
irq->refcount = 1;
irq->index = i;
@@ -255,6 +251,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
}
return irq;
err_xa:
+ irq_update_affinity_hint(irq->irqn, NULL);
free_cpumask_var(irq->mask);
err_cpumask:
free_irq(irq->irqn, &irq->nh);
@@ -275,7 +272,7 @@ int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
return -ENOENT;
ret = atomic_notifier_chain_register(&irq->nh, nb);
if (ret)
- irq_put(irq);
+ mlx5_irq_put(irq);
return ret;
}
@@ -284,7 +281,7 @@ int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
int err = 0;
err = atomic_notifier_chain_unregister(&irq->nh, nb);
- irq_put(irq);
+ mlx5_irq_put(irq);
return err;
}
@@ -300,131 +297,121 @@ int mlx5_irq_get_index(struct mlx5_irq *irq)
/* irq_pool API */
-/* creating an irq from irq_pool */
-static struct mlx5_irq *irq_pool_create_irq(struct mlx5_irq_pool *pool,
- struct cpumask *affinity)
+/* requesting an irq from a given pool according to given index */
+static struct mlx5_irq *
+irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
+ struct cpumask *affinity)
{
struct mlx5_irq *irq;
- u32 irq_index;
- int err;
- err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs,
- GFP_KERNEL);
- if (err)
- return ERR_PTR(err);
- irq = irq_request(pool, irq_index);
- if (IS_ERR(irq))
- return irq;
- cpumask_copy(irq->mask, affinity);
- irq_set_affinity_hint(irq->irqn, irq->mask);
+ mutex_lock(&pool->lock);
+ irq = xa_load(&pool->irqs, vecidx);
+ if (irq) {
+ mlx5_irq_get_locked(irq);
+ goto unlock;
+ }
+ irq = mlx5_irq_alloc(pool, vecidx, affinity);
+unlock:
+ mutex_unlock(&pool->lock);
return irq;
}
-/* looking for the irq with the smallest refcount and the same affinity */
-static struct mlx5_irq *irq_pool_find_least_loaded(struct mlx5_irq_pool *pool,
- struct cpumask *affinity)
+static struct mlx5_irq_pool *sf_ctrl_irq_pool_get(struct mlx5_irq_table *irq_table)
{
- int start = pool->xa_num_irqs.min;
- int end = pool->xa_num_irqs.max;
- struct mlx5_irq *irq = NULL;
- struct mlx5_irq *iter;
- unsigned long index;
+ return irq_table->sf_ctrl_pool;
+}
- lockdep_assert_held(&pool->lock);
- xa_for_each_range(&pool->irqs, index, iter, start, end) {
- if (!cpumask_equal(iter->mask, affinity))
- continue;
- if (iter->refcount < pool->min_threshold)
- return iter;
- if (!irq || iter->refcount < irq->refcount)
- irq = iter;
- }
- return irq;
+static struct mlx5_irq_pool *sf_irq_pool_get(struct mlx5_irq_table *irq_table)
+{
+ return irq_table->sf_comp_pool;
}
-/* requesting an irq from a given pool according to given affinity */
-static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool,
- struct cpumask *affinity)
+struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev)
{
- struct mlx5_irq *least_loaded_irq, *new_irq;
+ struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
+ struct mlx5_irq_pool *pool = NULL;
- mutex_lock(&pool->lock);
- least_loaded_irq = irq_pool_find_least_loaded(pool, affinity);
- if (least_loaded_irq &&
- least_loaded_irq->refcount < pool->min_threshold)
- goto out;
- new_irq = irq_pool_create_irq(pool, affinity);
- if (IS_ERR(new_irq)) {
- if (!least_loaded_irq) {
- mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %ld\n",
- PTR_ERR(new_irq));
- mutex_unlock(&pool->lock);
- return new_irq;
- }
- /* We failed to create a new IRQ for the requested affinity,
- * sharing existing IRQ.
- */
- goto out;
- }
- least_loaded_irq = new_irq;
- goto unlock;
-out:
- irq_get_locked(least_loaded_irq);
- if (least_loaded_irq->refcount > pool->max_threshold)
- mlx5_core_dbg(pool->dev, "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n",
- least_loaded_irq->irqn, pool->name,
- least_loaded_irq->refcount / MLX5_EQ_REFS_PER_IRQ);
-unlock:
- mutex_unlock(&pool->lock);
- return least_loaded_irq;
+ if (mlx5_core_is_sf(dev))
+ pool = sf_irq_pool_get(irq_table);
+
+ /* In some configs, there won't be a pool of SFs IRQs. Hence, returning
+ * the PF IRQs pool in case the SF pool doesn't exist.
+ */
+ return pool ? pool : irq_table->pf_pool;
}
-/* requesting an irq from a given pool according to given index */
-static struct mlx5_irq *
-irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
- struct cpumask *affinity)
+static struct mlx5_irq_pool *ctrl_irq_pool_get(struct mlx5_core_dev *dev)
{
- struct mlx5_irq *irq;
+ struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
+ struct mlx5_irq_pool *pool = NULL;
- mutex_lock(&pool->lock);
- irq = xa_load(&pool->irqs, vecidx);
- if (irq) {
- irq_get_locked(irq);
- goto unlock;
+ if (mlx5_core_is_sf(dev))
+ pool = sf_ctrl_irq_pool_get(irq_table);
+
+ /* In some configs, there won't be a pool of SFs IRQs. Hence, returning
+ * the PF IRQs pool in case the SF pool doesn't exist.
+ */
+ return pool ? pool : irq_table->pf_pool;
+}
+
+/**
+ * mlx5_irqs_release - release one or more IRQs back to the system.
+ * @irqs: IRQs to be released.
+ * @nirqs: number of IRQs to be released.
+ */
+static void mlx5_irqs_release(struct mlx5_irq **irqs, int nirqs)
+{
+ int i;
+
+ for (i = 0; i < nirqs; i++) {
+ synchronize_irq(irqs[i]->irqn);
+ mlx5_irq_put(irqs[i]);
}
- irq = irq_request(pool, vecidx);
- if (IS_ERR(irq) || !affinity)
- goto unlock;
- cpumask_copy(irq->mask, affinity);
- if (!irq_pool_is_sf_pool(pool) && !pool->xa_num_irqs.max &&
- cpumask_empty(irq->mask))
- cpumask_set_cpu(cpumask_first(cpu_online_mask), irq->mask);
- irq_set_affinity_hint(irq->irqn, irq->mask);
-unlock:
- mutex_unlock(&pool->lock);
- return irq;
}
-static struct mlx5_irq_pool *find_sf_irq_pool(struct mlx5_irq_table *irq_table,
- int i, struct cpumask *affinity)
+/**
+ * mlx5_ctrl_irq_release - release a ctrl IRQ back to the system.
+ * @ctrl_irq: ctrl IRQ to be released.
+ */
+void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq)
{
- if (cpumask_empty(affinity) && i == MLX5_IRQ_EQ_CTRL)
- return irq_table->sf_ctrl_pool;
- return irq_table->sf_comp_pool;
+ mlx5_irqs_release(&ctrl_irq, 1);
}
/**
- * mlx5_irq_release - release an IRQ back to the system.
- * @irq: irq to be released.
+ * mlx5_ctrl_irq_request - request a ctrl IRQ for mlx5 device.
+ * @dev: mlx5 device that requesting the IRQ.
+ *
+ * This function returns a pointer to IRQ, or ERR_PTR in case of error.
*/
-void mlx5_irq_release(struct mlx5_irq *irq)
+struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
{
- synchronize_irq(irq->irqn);
- irq_put(irq);
+ struct mlx5_irq_pool *pool = ctrl_irq_pool_get(dev);
+ cpumask_var_t req_mask;
+ struct mlx5_irq *irq;
+
+ if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL))
+ return ERR_PTR(-ENOMEM);
+ cpumask_copy(req_mask, cpu_online_mask);
+ if (!mlx5_irq_pool_is_sf_pool(pool)) {
+ /* In case we are allocating a control IRQ for PF/VF */
+ if (!pool->xa_num_irqs.max) {
+ cpumask_clear(req_mask);
+ /* In case we only have a single IRQ for PF/VF */
+ cpumask_set_cpu(cpumask_first(cpu_online_mask), req_mask);
+ }
+ /* Allocate the IRQ in the last index of the pool */
+ irq = irq_pool_request_vector(pool, pool->xa_num_irqs.max, req_mask);
+ } else {
+ irq = mlx5_irq_affinity_request(pool, req_mask);
+ }
+
+ free_cpumask_var(req_mask);
+ return irq;
}
/**
- * mlx5_irq_request - request an IRQ for mlx5 device.
+ * mlx5_irq_request - request an IRQ for mlx5 PF/VF device.
* @dev: mlx5 device that requesting the IRQ.
* @vecidx: vector index of the IRQ. This argument is ignore if affinity is
* provided.
@@ -439,23 +426,8 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
struct mlx5_irq_pool *pool;
struct mlx5_irq *irq;
- if (mlx5_core_is_sf(dev)) {
- pool = find_sf_irq_pool(irq_table, vecidx, affinity);
- if (!pool)
- /* we don't have IRQs for SFs, using the PF IRQs */
- goto pf_irq;
- if (cpumask_empty(affinity) && !strcmp(pool->name, "mlx5_sf_comp"))
- /* In case an SF user request IRQ with vecidx */
- irq = irq_pool_request_vector(pool, vecidx, NULL);
- else
- irq = irq_pool_request_affinity(pool, affinity);
- goto out;
- }
-pf_irq:
pool = irq_table->pf_pool;
- vecidx = (vecidx == MLX5_IRQ_EQ_CTRL) ? pool->xa_num_irqs.max : vecidx;
irq = irq_pool_request_vector(pool, vecidx, affinity);
-out:
if (IS_ERR(irq))
return irq;
mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n",
@@ -464,6 +436,51 @@ out:
return irq;
}
+/**
+ * mlx5_irqs_release_vectors - release one or more IRQs back to the system.
+ * @irqs: IRQs to be released.
+ * @nirqs: number of IRQs to be released.
+ */
+void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs)
+{
+ mlx5_irqs_release(irqs, nirqs);
+}
+
+/**
+ * mlx5_irqs_request_vectors - request one or more IRQs for mlx5 device.
+ * @dev: mlx5 device that is requesting the IRQs.
+ * @cpus: CPUs array for binding the IRQs
+ * @nirqs: number of IRQs to request.
+ * @irqs: an output array of IRQs pointers.
+ *
+ * Each IRQ is bound to at most 1 CPU.
+ * This function is requests nirqs IRQs, starting from @vecidx.
+ *
+ * This function returns the number of IRQs requested, (which might be smaller than
+ * @nirqs), if successful, or a negative error code in case of an error.
+ */
+int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
+ struct mlx5_irq **irqs)
+{
+ cpumask_var_t req_mask;
+ struct mlx5_irq *irq;
+ int i;
+
+ if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL))
+ return -ENOMEM;
+ for (i = 0; i < nirqs; i++) {
+ cpumask_set_cpu(cpus[i], req_mask);
+ irq = mlx5_irq_request(dev, i, req_mask);
+ if (IS_ERR(irq))
+ break;
+ cpumask_clear(req_mask);
+ irqs[i] = irq;
+ }
+
+ free_cpumask_var(req_mask);
+ return i ? i : PTR_ERR(irq);
+}
+
static struct mlx5_irq_pool *
irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
u32 min_threshold, u32 max_threshold)
@@ -479,7 +496,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
pool->xa_num_irqs.max = start + size - 1;
if (name)
snprintf(pool->name, MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS,
- name);
+ "%s", name);
pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
@@ -500,6 +517,7 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
irq_release(irq);
xa_destroy(&pool->irqs);
mutex_destroy(&pool->lock);
+ kfree(pool->irqs_per_cpu);
kvfree(pool);
}
@@ -547,7 +565,17 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pf_vec)
err = PTR_ERR(table->sf_comp_pool);
goto err_sf_ctrl;
}
+
+ table->sf_comp_pool->irqs_per_cpu = kcalloc(nr_cpu_ids, sizeof(u16), GFP_KERNEL);
+ if (!table->sf_comp_pool->irqs_per_cpu) {
+ err = -ENOMEM;
+ goto err_irqs_per_cpu;
+ }
+
return 0;
+
+err_irqs_per_cpu:
+ irq_pool_free(table->sf_comp_pool);
err_sf_ctrl:
irq_pool_free(table->sf_ctrl_pool);
err_pf:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
new file mode 100644
index 000000000000..5c7e68bee43a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __PCI_IRQ_H__
+#define __PCI_IRQ_H__
+
+#include <linux/mlx5/driver.h>
+
+#define MLX5_MAX_IRQ_NAME (32)
+/* max irq_index is 2047, so four chars */
+#define MLX5_MAX_IRQ_IDX_CHARS (4)
+#define MLX5_EQ_REFS_PER_IRQ (2)
+
+struct mlx5_irq;
+
+struct mlx5_irq_pool {
+ char name[MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS];
+ struct xa_limit xa_num_irqs;
+ struct mutex lock; /* sync IRQs creations */
+ struct xarray irqs;
+ u32 max_threshold;
+ u32 min_threshold;
+ u16 *irqs_per_cpu;
+ struct mlx5_core_dev *dev;
+};
+
+struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev);
+static inline bool mlx5_irq_pool_is_sf_pool(struct mlx5_irq_pool *pool)
+{
+ return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf"));
+}
+
+struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
+ const struct cpumask *affinity);
+int mlx5_irq_get_locked(struct mlx5_irq *irq);
+int mlx5_irq_read_locked(struct mlx5_irq *irq);
+int mlx5_irq_put(struct mlx5_irq *irq);
+
+#endif /* __PCI_IRQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index f37db7cc32a6..7da012ff0d41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -30,10 +30,7 @@ bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev)
{
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
- if (!mlx5_sf_dev_supported(dev))
- return false;
-
- return !xa_empty(&table->devices);
+ return table && !xa_empty(&table->devices);
}
static ssize_t sfnum_show(struct device *dev, struct device_attribute *attr, char *buf)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
index 252d6017387d..17aa348989cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
@@ -247,7 +247,7 @@ int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_sf_hw_table *table;
u16 max_ext_fn = 0;
- u16 ext_base_id;
+ u16 ext_base_id = 0;
u16 max_fn = 0;
u16 base_id;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 07936841ce99..c61a5e83c78c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -1560,6 +1560,12 @@ dr_action_modify_check_is_ttl_modify(const void *sw_action)
return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL;
}
+static bool dr_action_modify_ttl_ignore(struct mlx5dr_domain *dmn)
+{
+ return !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps) &&
+ !MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify);
+}
+
static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
u32 max_hw_actions,
u32 num_sw_actions,
@@ -1591,8 +1597,13 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
if (ret)
return ret;
- if (!(*modify_ttl))
- *modify_ttl = dr_action_modify_check_is_ttl_modify(sw_action);
+ if (!(*modify_ttl) &&
+ dr_action_modify_check_is_ttl_modify(sw_action)) {
+ if (dr_action_modify_ttl_ignore(dmn))
+ continue;
+
+ *modify_ttl = true;
+ }
/* Convert SW action to HW action */
ret = dr_action_modify_sw_to_hw(dmn,
@@ -1631,7 +1642,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
* modify actions doesn't exceeds the limit
*/
hw_idx++;
- if ((num_sw_actions + hw_idx - i) >= max_hw_actions) {
+ if (hw_idx >= max_hw_actions) {
mlx5dr_dbg(dmn, "Modify header action number exceeds HW limit\n");
return -EINVAL;
}
@@ -1642,6 +1653,10 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
hw_idx++;
}
+ /* if the resulting HW actions list is empty, add NOP action */
+ if (!hw_idx)
+ hw_idx++;
+
*num_hw_actions = hw_idx;
return 0;
@@ -1792,7 +1807,7 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
int mlx5dr_action_destroy(struct mlx5dr_action *action)
{
- if (refcount_read(&action->refcount) > 1)
+ if (WARN_ON_ONCE(refcount_read(&action->refcount) > 1))
return -EBUSY;
switch (action->action_type) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 1d8febed0d76..4dd619d238cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -132,6 +132,13 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new);
+ /* geneve_tlv_option_0_exist is the indication of
+ * STE support for lookup type flex_parser_ok
+ */
+ caps->flex_parser_ok_bits_supp =
+ MLX5_CAP_FLOWTABLE(mdev,
+ flow_table_properties_nic_receive.ft_field_support.geneve_tlv_option_0_exist);
+
if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
@@ -152,7 +159,7 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->flex_parser_id_mpls_over_gre =
MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_gre);
- if (caps->flex_protocols & mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
caps->flex_parser_id_mpls_over_udp =
MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_udp_label);
@@ -599,7 +606,8 @@ static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
for (i = 0; i < fte->dests_size; i++) {
if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
- if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ if ((fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
+ fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
num_encap++;
num_fwd_destinations++;
@@ -724,12 +732,19 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
id = fte->dest_arr[i].ft_id;
break;
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
- id = fte->dest_arr[i].vport.num;
- MLX5_SET(dest_format_struct, in_dests,
- destination_eswitch_owner_vhca_id_valid,
- !!(fte->dest_arr[i].vport.flags &
- MLX5_FLOW_DEST_VPORT_VHCA_ID));
+ if (type == MLX5_FLOW_DESTINATION_TYPE_VPORT) {
+ id = fte->dest_arr[i].vport.num;
+ MLX5_SET(dest_format_struct, in_dests,
+ destination_eswitch_owner_vhca_id_valid,
+ !!(fte->dest_arr[i].vport.flags &
+ MLX5_FLOW_DEST_VPORT_VHCA_ID));
+ } else {
+ id = 0;
+ MLX5_SET(dest_format_struct, in_dests,
+ destination_eswitch_owner_vhca_id_valid, 1);
+ }
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id,
fte->dest_arr[i].vport.vhca_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
new file mode 100644
index 000000000000..2784cd59fefe
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
@@ -0,0 +1,649 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include "dr_types.h"
+
+#define DR_DBG_PTR_TO_ID(p) ((u64)(uintptr_t)(p) & 0xFFFFFFFFULL)
+
+enum dr_dump_rec_type {
+ DR_DUMP_REC_TYPE_DOMAIN = 3000,
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER = 3001,
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_DEV_ATTR = 3002,
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT = 3003,
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS = 3004,
+ DR_DUMP_REC_TYPE_DOMAIN_SEND_RING = 3005,
+
+ DR_DUMP_REC_TYPE_TABLE = 3100,
+ DR_DUMP_REC_TYPE_TABLE_RX = 3101,
+ DR_DUMP_REC_TYPE_TABLE_TX = 3102,
+
+ DR_DUMP_REC_TYPE_MATCHER = 3200,
+ DR_DUMP_REC_TYPE_MATCHER_MASK = 3201,
+ DR_DUMP_REC_TYPE_MATCHER_RX = 3202,
+ DR_DUMP_REC_TYPE_MATCHER_TX = 3203,
+ DR_DUMP_REC_TYPE_MATCHER_BUILDER = 3204,
+
+ DR_DUMP_REC_TYPE_RULE = 3300,
+ DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 = 3301,
+ DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V0 = 3302,
+ DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V1 = 3303,
+ DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V1 = 3304,
+
+ DR_DUMP_REC_TYPE_ACTION_ENCAP_L2 = 3400,
+ DR_DUMP_REC_TYPE_ACTION_ENCAP_L3 = 3401,
+ DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR = 3402,
+ DR_DUMP_REC_TYPE_ACTION_DROP = 3403,
+ DR_DUMP_REC_TYPE_ACTION_QP = 3404,
+ DR_DUMP_REC_TYPE_ACTION_FT = 3405,
+ DR_DUMP_REC_TYPE_ACTION_CTR = 3406,
+ DR_DUMP_REC_TYPE_ACTION_TAG = 3407,
+ DR_DUMP_REC_TYPE_ACTION_VPORT = 3408,
+ DR_DUMP_REC_TYPE_ACTION_DECAP_L2 = 3409,
+ DR_DUMP_REC_TYPE_ACTION_DECAP_L3 = 3410,
+ DR_DUMP_REC_TYPE_ACTION_DEVX_TIR = 3411,
+ DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN = 3412,
+ DR_DUMP_REC_TYPE_ACTION_POP_VLAN = 3413,
+ DR_DUMP_REC_TYPE_ACTION_SAMPLER = 3415,
+ DR_DUMP_REC_TYPE_ACTION_INSERT_HDR = 3420,
+ DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR = 3421
+};
+
+void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl)
+{
+ mutex_lock(&tbl->dmn->dump_info.dbg_mutex);
+ list_add_tail(&tbl->dbg_node, &tbl->dmn->dbg_tbl_list);
+ mutex_unlock(&tbl->dmn->dump_info.dbg_mutex);
+}
+
+void mlx5dr_dbg_tbl_del(struct mlx5dr_table *tbl)
+{
+ mutex_lock(&tbl->dmn->dump_info.dbg_mutex);
+ list_del(&tbl->dbg_node);
+ mutex_unlock(&tbl->dmn->dump_info.dbg_mutex);
+}
+
+void mlx5dr_dbg_rule_add(struct mlx5dr_rule *rule)
+{
+ struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
+
+ mutex_lock(&dmn->dump_info.dbg_mutex);
+ list_add_tail(&rule->dbg_node, &rule->matcher->dbg_rule_list);
+ mutex_unlock(&dmn->dump_info.dbg_mutex);
+}
+
+void mlx5dr_dbg_rule_del(struct mlx5dr_rule *rule)
+{
+ struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
+
+ mutex_lock(&dmn->dump_info.dbg_mutex);
+ list_del(&rule->dbg_node);
+ mutex_unlock(&dmn->dump_info.dbg_mutex);
+}
+
+static u64 dr_dump_icm_to_idx(u64 icm_addr)
+{
+ return (icm_addr >> 6) & 0xffffffff;
+}
+
+#define DR_HEX_SIZE 256
+
+static void
+dr_dump_hex_print(char hex[DR_HEX_SIZE], char *src, u32 size)
+{
+ if (WARN_ON_ONCE(DR_HEX_SIZE < 2 * size + 1))
+ size = DR_HEX_SIZE / 2 - 1; /* truncate */
+
+ bin2hex(hex, src, size);
+ hex[2 * size] = 0; /* NULL-terminate */
+}
+
+static int
+dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
+ struct mlx5dr_rule_action_member *action_mem)
+{
+ struct mlx5dr_action *action = action_mem->action;
+ const u64 action_id = DR_DBG_PTR_TO_ID(action);
+
+ switch (action->action_type) {
+ case DR_ACTION_TYP_DROP:
+ seq_printf(file, "%d,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_DROP, action_id, rule_id);
+ break;
+ case DR_ACTION_TYP_FT:
+ if (action->dest_tbl->is_fw_tbl)
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_FT, action_id,
+ rule_id, action->dest_tbl->fw_tbl.id);
+ else
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_FT, action_id,
+ rule_id, action->dest_tbl->tbl->table_id);
+
+ break;
+ case DR_ACTION_TYP_CTR:
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_CTR, action_id, rule_id,
+ action->ctr->ctr_id + action->ctr->offset);
+ break;
+ case DR_ACTION_TYP_TAG:
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_TAG, action_id, rule_id,
+ action->flow_tag->flow_tag);
+ break;
+ case DR_ACTION_TYP_MODIFY_HDR:
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id,
+ rule_id, action->rewrite->index);
+ break;
+ case DR_ACTION_TYP_VPORT:
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id,
+ action->vport->caps->num);
+ break;
+ case DR_ACTION_TYP_TNL_L2_TO_L2:
+ seq_printf(file, "%d,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_DECAP_L2, action_id,
+ rule_id);
+ break;
+ case DR_ACTION_TYP_TNL_L3_TO_L2:
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id,
+ rule_id, action->rewrite->index);
+ break;
+ case DR_ACTION_TYP_L2_TO_TNL_L2:
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_ENCAP_L2, action_id,
+ rule_id, action->reformat->id);
+ break;
+ case DR_ACTION_TYP_L2_TO_TNL_L3:
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_ENCAP_L3, action_id,
+ rule_id, action->reformat->id);
+ break;
+ case DR_ACTION_TYP_POP_VLAN:
+ seq_printf(file, "%d,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_POP_VLAN, action_id,
+ rule_id);
+ break;
+ case DR_ACTION_TYP_PUSH_VLAN:
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN, action_id,
+ rule_id, action->push_vlan->vlan_hdr);
+ break;
+ case DR_ACTION_TYP_INSERT_HDR:
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_INSERT_HDR, action_id,
+ rule_id, action->reformat->id,
+ action->reformat->param_0,
+ action->reformat->param_1);
+ break;
+ case DR_ACTION_TYP_REMOVE_HDR:
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR, action_id,
+ rule_id, action->reformat->id,
+ action->reformat->param_0,
+ action->reformat->param_1);
+ break;
+ case DR_ACTION_TYP_SAMPLER:
+ seq_printf(file,
+ "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_SAMPLER, action_id, rule_id,
+ 0, 0, action->sampler->sampler_id,
+ action->sampler->rx_icm_addr,
+ action->sampler->tx_icm_addr);
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int
+dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
+ bool is_rx, const u64 rule_id, u8 format_ver)
+{
+ char hw_ste_dump[DR_HEX_SIZE];
+ u32 mem_rec_type;
+
+ if (format_ver == MLX5_STEERING_FORMAT_CONNECTX_5) {
+ mem_rec_type = is_rx ? DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 :
+ DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V0;
+ } else {
+ mem_rec_type = is_rx ? DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V1 :
+ DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V1;
+ }
+
+ dr_dump_hex_print(hw_ste_dump, (char *)ste->hw_ste, DR_STE_SIZE_REDUCED);
+
+ seq_printf(file, "%d,0x%llx,0x%llx,%s\n", mem_rec_type,
+ dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)), rule_id,
+ hw_ste_dump);
+
+ return 0;
+}
+
+static int
+dr_dump_rule_rx_tx(struct seq_file *file, struct mlx5dr_rule_rx_tx *rule_rx_tx,
+ bool is_rx, const u64 rule_id, u8 format_ver)
+{
+ struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES];
+ struct mlx5dr_ste *curr_ste = rule_rx_tx->last_rule_ste;
+ int ret, i;
+
+ if (mlx5dr_rule_get_reverse_rule_members(ste_arr, curr_ste, &i))
+ return 0;
+
+ while (i--) {
+ ret = dr_dump_rule_mem(file, ste_arr[i], is_rx, rule_id,
+ format_ver);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
+{
+ struct mlx5dr_rule_action_member *action_mem;
+ const u64 rule_id = DR_DBG_PTR_TO_ID(rule);
+ struct mlx5dr_rule_rx_tx *rx = &rule->rx;
+ struct mlx5dr_rule_rx_tx *tx = &rule->tx;
+ u8 format_ver;
+ int ret;
+
+ format_ver = rule->matcher->tbl->dmn->info.caps.sw_format_ver;
+
+ seq_printf(file, "%d,0x%llx,0x%llx\n", DR_DUMP_REC_TYPE_RULE, rule_id,
+ DR_DBG_PTR_TO_ID(rule->matcher));
+
+ if (rx->nic_matcher) {
+ ret = dr_dump_rule_rx_tx(file, rx, true, rule_id, format_ver);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (tx->nic_matcher) {
+ ret = dr_dump_rule_rx_tx(file, tx, false, rule_id, format_ver);
+ if (ret < 0)
+ return ret;
+ }
+
+ list_for_each_entry(action_mem, &rule->rule_actions_list, list) {
+ ret = dr_dump_rule_action_mem(file, rule_id, action_mem);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+dr_dump_matcher_mask(struct seq_file *file, struct mlx5dr_match_param *mask,
+ u8 criteria, const u64 matcher_id)
+{
+ char dump[DR_HEX_SIZE];
+
+ seq_printf(file, "%d,0x%llx,", DR_DUMP_REC_TYPE_MATCHER_MASK,
+ matcher_id);
+
+ if (criteria & DR_MATCHER_CRITERIA_OUTER) {
+ dr_dump_hex_print(dump, (char *)&mask->outer, sizeof(mask->outer));
+ seq_printf(file, "%s,", dump);
+ } else {
+ seq_puts(file, ",");
+ }
+
+ if (criteria & DR_MATCHER_CRITERIA_INNER) {
+ dr_dump_hex_print(dump, (char *)&mask->inner, sizeof(mask->inner));
+ seq_printf(file, "%s,", dump);
+ } else {
+ seq_puts(file, ",");
+ }
+
+ if (criteria & DR_MATCHER_CRITERIA_MISC) {
+ dr_dump_hex_print(dump, (char *)&mask->misc, sizeof(mask->misc));
+ seq_printf(file, "%s,", dump);
+ } else {
+ seq_puts(file, ",");
+ }
+
+ if (criteria & DR_MATCHER_CRITERIA_MISC2) {
+ dr_dump_hex_print(dump, (char *)&mask->misc2, sizeof(mask->misc2));
+ seq_printf(file, "%s,", dump);
+ } else {
+ seq_puts(file, ",");
+ }
+
+ if (criteria & DR_MATCHER_CRITERIA_MISC3) {
+ dr_dump_hex_print(dump, (char *)&mask->misc3, sizeof(mask->misc3));
+ seq_printf(file, "%s\n", dump);
+ } else {
+ seq_puts(file, ",\n");
+ }
+
+ return 0;
+}
+
+static int
+dr_dump_matcher_builder(struct seq_file *file, struct mlx5dr_ste_build *builder,
+ u32 index, bool is_rx, const u64 matcher_id)
+{
+ seq_printf(file, "%d,0x%llx,%d,%d,0x%x\n",
+ DR_DUMP_REC_TYPE_MATCHER_BUILDER, matcher_id, index, is_rx,
+ builder->lu_type);
+
+ return 0;
+}
+
+static int
+dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
+ struct mlx5dr_matcher_rx_tx *matcher_rx_tx,
+ const u64 matcher_id)
+{
+ enum dr_dump_rec_type rec_type;
+ int i, ret;
+
+ rec_type = is_rx ? DR_DUMP_REC_TYPE_MATCHER_RX :
+ DR_DUMP_REC_TYPE_MATCHER_TX;
+
+ seq_printf(file, "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n",
+ rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx),
+ matcher_id, matcher_rx_tx->num_of_builders,
+ dr_dump_icm_to_idx(matcher_rx_tx->s_htbl->chunk->icm_addr),
+ dr_dump_icm_to_idx(matcher_rx_tx->e_anchor->chunk->icm_addr));
+
+ for (i = 0; i < matcher_rx_tx->num_of_builders; i++) {
+ ret = dr_dump_matcher_builder(file,
+ &matcher_rx_tx->ste_builder[i],
+ i, is_rx, matcher_id);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+dr_dump_matcher(struct seq_file *file, struct mlx5dr_matcher *matcher)
+{
+ struct mlx5dr_matcher_rx_tx *rx = &matcher->rx;
+ struct mlx5dr_matcher_rx_tx *tx = &matcher->tx;
+ u64 matcher_id;
+ int ret;
+
+ matcher_id = DR_DBG_PTR_TO_ID(matcher);
+
+ seq_printf(file, "%d,0x%llx,0x%llx,%d\n", DR_DUMP_REC_TYPE_MATCHER,
+ matcher_id, DR_DBG_PTR_TO_ID(matcher->tbl), matcher->prio);
+
+ ret = dr_dump_matcher_mask(file, &matcher->mask,
+ matcher->match_criteria, matcher_id);
+ if (ret < 0)
+ return ret;
+
+ if (rx->nic_tbl) {
+ ret = dr_dump_matcher_rx_tx(file, true, rx, matcher_id);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (tx->nic_tbl) {
+ ret = dr_dump_matcher_rx_tx(file, false, tx, matcher_id);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+dr_dump_matcher_all(struct seq_file *file, struct mlx5dr_matcher *matcher)
+{
+ struct mlx5dr_rule *rule;
+ int ret;
+
+ ret = dr_dump_matcher(file, matcher);
+ if (ret < 0)
+ return ret;
+
+ list_for_each_entry(rule, &matcher->dbg_rule_list, dbg_node) {
+ ret = dr_dump_rule(file, rule);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+dr_dump_table_rx_tx(struct seq_file *file, bool is_rx,
+ struct mlx5dr_table_rx_tx *table_rx_tx,
+ const u64 table_id)
+{
+ enum dr_dump_rec_type rec_type;
+
+ rec_type = is_rx ? DR_DUMP_REC_TYPE_TABLE_RX :
+ DR_DUMP_REC_TYPE_TABLE_TX;
+
+ seq_printf(file, "%d,0x%llx,0x%llx\n", rec_type, table_id,
+ dr_dump_icm_to_idx(table_rx_tx->s_anchor->chunk->icm_addr));
+
+ return 0;
+}
+
+static int dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
+{
+ struct mlx5dr_table_rx_tx *rx = &table->rx;
+ struct mlx5dr_table_rx_tx *tx = &table->tx;
+ int ret;
+
+ seq_printf(file, "%d,0x%llx,0x%llx,%d,%d\n", DR_DUMP_REC_TYPE_TABLE,
+ DR_DBG_PTR_TO_ID(table), DR_DBG_PTR_TO_ID(table->dmn),
+ table->table_type, table->level);
+
+ if (rx->nic_dmn) {
+ ret = dr_dump_table_rx_tx(file, true, rx,
+ DR_DBG_PTR_TO_ID(table));
+ if (ret < 0)
+ return ret;
+ }
+
+ if (tx->nic_dmn) {
+ ret = dr_dump_table_rx_tx(file, false, tx,
+ DR_DBG_PTR_TO_ID(table));
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static int dr_dump_table_all(struct seq_file *file, struct mlx5dr_table *tbl)
+{
+ struct mlx5dr_matcher *matcher;
+ int ret;
+
+ ret = dr_dump_table(file, tbl);
+ if (ret < 0)
+ return ret;
+
+ list_for_each_entry(matcher, &tbl->matcher_list, list_node) {
+ ret = dr_dump_matcher_all(file, matcher);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static int
+dr_dump_send_ring(struct seq_file *file, struct mlx5dr_send_ring *ring,
+ const u64 domain_id)
+{
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_DOMAIN_SEND_RING, DR_DBG_PTR_TO_ID(ring),
+ domain_id, ring->cq->mcq.cqn, ring->qp->qpn);
+ return 0;
+}
+
+static int
+dr_dump_domain_info_flex_parser(struct seq_file *file,
+ const char *flex_parser_name,
+ const u8 flex_parser_value,
+ const u64 domain_id)
+{
+ seq_printf(file, "%d,0x%llx,%s,0x%x\n",
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER, domain_id,
+ flex_parser_name, flex_parser_value);
+ return 0;
+}
+
+static int
+dr_dump_domain_info_caps(struct seq_file *file, struct mlx5dr_cmd_caps *caps,
+ const u64 domain_id)
+{
+ struct mlx5dr_cmd_vport_cap *vport_caps;
+ unsigned long i, vports_num;
+
+ xa_for_each(&caps->vports.vports_caps_xa, vports_num, vport_caps)
+ ; /* count the number of vports in xarray */
+
+ seq_printf(file, "%d,0x%llx,0x%x,0x%llx,0x%llx,0x%x,%lu,%d\n",
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS, domain_id, caps->gvmi,
+ caps->nic_rx_drop_address, caps->nic_tx_drop_address,
+ caps->flex_protocols, vports_num, caps->eswitch_manager);
+
+ xa_for_each(&caps->vports.vports_caps_xa, i, vport_caps) {
+ vport_caps = xa_load(&caps->vports.vports_caps_xa, i);
+
+ seq_printf(file, "%d,0x%llx,%lu,0x%x,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT, domain_id, i,
+ vport_caps->vport_gvmi, vport_caps->icm_address_rx,
+ vport_caps->icm_address_tx);
+ }
+ return 0;
+}
+
+static int
+dr_dump_domain_info(struct seq_file *file, struct mlx5dr_domain_info *info,
+ const u64 domain_id)
+{
+ int ret;
+
+ ret = dr_dump_domain_info_caps(file, &info->caps, domain_id);
+ if (ret < 0)
+ return ret;
+
+ ret = dr_dump_domain_info_flex_parser(file, "icmp_dw0",
+ info->caps.flex_parser_id_icmp_dw0,
+ domain_id);
+ if (ret < 0)
+ return ret;
+
+ ret = dr_dump_domain_info_flex_parser(file, "icmp_dw1",
+ info->caps.flex_parser_id_icmp_dw1,
+ domain_id);
+ if (ret < 0)
+ return ret;
+
+ ret = dr_dump_domain_info_flex_parser(file, "icmpv6_dw0",
+ info->caps.flex_parser_id_icmpv6_dw0,
+ domain_id);
+ if (ret < 0)
+ return ret;
+
+ ret = dr_dump_domain_info_flex_parser(file, "icmpv6_dw1",
+ info->caps.flex_parser_id_icmpv6_dw1,
+ domain_id);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int
+dr_dump_domain(struct seq_file *file, struct mlx5dr_domain *dmn)
+{
+ u64 domain_id = DR_DBG_PTR_TO_ID(dmn);
+ int ret;
+
+ seq_printf(file, "%d,0x%llx,%d,0%x,%d,%s\n", DR_DUMP_REC_TYPE_DOMAIN,
+ domain_id, dmn->type, dmn->info.caps.gvmi,
+ dmn->info.supp_sw_steering, pci_name(dmn->mdev->pdev));
+
+ ret = dr_dump_domain_info(file, &dmn->info, domain_id);
+ if (ret < 0)
+ return ret;
+
+ if (dmn->info.supp_sw_steering) {
+ ret = dr_dump_send_ring(file, dmn->send_ring, domain_id);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dr_dump_domain_all(struct seq_file *file, struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_table *tbl;
+ int ret;
+
+ mutex_lock(&dmn->dump_info.dbg_mutex);
+ mlx5dr_domain_lock(dmn);
+
+ ret = dr_dump_domain(file, dmn);
+ if (ret < 0)
+ goto unlock_mutex;
+
+ list_for_each_entry(tbl, &dmn->dbg_tbl_list, dbg_node) {
+ ret = dr_dump_table_all(file, tbl);
+ if (ret < 0)
+ break;
+ }
+
+unlock_mutex:
+ mlx5dr_domain_unlock(dmn);
+ mutex_unlock(&dmn->dump_info.dbg_mutex);
+ return ret;
+}
+
+static int dr_dump_show(struct seq_file *file, void *priv)
+{
+ return dr_dump_domain_all(file, file->private);
+}
+DEFINE_SHOW_ATTRIBUTE(dr_dump);
+
+void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn)
+{
+ struct mlx5_core_dev *dev = dmn->mdev;
+ char file_name[128];
+
+ if (dmn->type != MLX5DR_DOMAIN_TYPE_FDB) {
+ mlx5_core_warn(dev,
+ "Steering dump is not supported for NIC RX/TX domains\n");
+ return;
+ }
+
+ dmn->dump_info.steering_debugfs =
+ debugfs_create_dir("steering", dev->priv.dbg_root);
+ dmn->dump_info.fdb_debugfs =
+ debugfs_create_dir("fdb", dmn->dump_info.steering_debugfs);
+
+ sprintf(file_name, "dmn_%p", dmn);
+ debugfs_create_file(file_name, 0444, dmn->dump_info.fdb_debugfs,
+ dmn, &dr_dump_fops);
+
+ INIT_LIST_HEAD(&dmn->dbg_tbl_list);
+ mutex_init(&dmn->dump_info.dbg_mutex);
+}
+
+void mlx5dr_dbg_uninit_dump(struct mlx5dr_domain *dmn)
+{
+ debugfs_remove_recursive(dmn->dump_info.steering_debugfs);
+ mutex_destroy(&dmn->dump_info.dbg_mutex);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
new file mode 100644
index 000000000000..def6cf853eea
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+struct mlx5dr_dbg_dump_info {
+ struct mutex dbg_mutex; /* protect dbg lists */
+ struct dentry *steering_debugfs;
+ struct dentry *fdb_debugfs;
+};
+
+void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn);
+void mlx5dr_dbg_uninit_dump(struct mlx5dr_domain *dmn);
+void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl);
+void mlx5dr_dbg_tbl_del(struct mlx5dr_table *tbl);
+void mlx5dr_dbg_rule_add(struct mlx5dr_rule *rule);
+void mlx5dr_dbg_rule_del(struct mlx5dr_rule *rule);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index c54cc45f63dc..5fa7f9d6d8b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -395,7 +395,7 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
}
dr_domain_init_csum_recalc_fts(dmn);
-
+ mlx5dr_dbg_init_dump(dmn);
return dmn;
uninit_caps:
@@ -431,11 +431,12 @@ int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
{
- if (refcount_read(&dmn->refcount) > 1)
+ if (WARN_ON_ONCE(refcount_read(&dmn->refcount) > 1))
return -EBUSY;
/* make sure resources are not used by the hardware */
mlx5dr_cmd_sync_steering(dmn->mdev);
+ mlx5dr_dbg_uninit_dump(dmn);
dr_domain_uninit_csum_recalc_fts(dmn);
dr_domain_uninit_resources(dmn);
dr_domain_caps_uninit(dmn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index 793365242e85..e87cf498c77b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -141,6 +141,19 @@ static bool dr_mask_is_tnl_geneve_tlv_opt(struct mlx5dr_match_misc3 *misc3)
}
static bool
+dr_matcher_supp_flex_parser_ok(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_parser_ok_bits_supp;
+}
+
+static bool dr_mask_is_tnl_geneve_tlv_opt_exist_set(struct mlx5dr_match_misc *misc,
+ struct mlx5dr_domain *dmn)
+{
+ return dr_matcher_supp_flex_parser_ok(&dmn->info.caps) &&
+ misc->geneve_tlv_option_0_exist;
+}
+
+static bool
dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps)
{
return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
@@ -359,7 +372,7 @@ static bool dr_mask_is_tnl_mpls_over_gre(struct mlx5dr_match_param *mask,
static int dr_matcher_supp_tnl_mpls_over_udp(struct mlx5dr_cmd_caps *caps)
{
- return caps->flex_protocols & mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED;
+ return caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED;
}
static bool dr_mask_is_tnl_mpls_over_udp(struct mlx5dr_match_param *mask,
@@ -368,6 +381,12 @@ static bool dr_mask_is_tnl_mpls_over_udp(struct mlx5dr_match_param *mask,
return DR_MASK_IS_OUTER_MPLS_OVER_UDP_SET(&mask->misc2) &&
dr_matcher_supp_tnl_mpls_over_udp(&dmn->info.caps);
}
+
+static bool dr_mask_is_tnl_header_0_1_set(struct mlx5dr_match_misc5 *misc5)
+{
+ return misc5->tunnel_header_0 || misc5->tunnel_header_1;
+}
+
int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
enum mlx5dr_ipv outer_ipv,
@@ -424,6 +443,9 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC4)
mask.misc4 = matcher->mask.misc4;
+ if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC5)
+ mask.misc5 = matcher->mask.misc5;
+
ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
&matcher->mask, NULL);
if (ret)
@@ -443,7 +465,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (matcher->match_criteria & (DR_MATCHER_CRITERIA_OUTER |
DR_MATCHER_CRITERIA_MISC |
DR_MATCHER_CRITERIA_MISC2 |
- DR_MATCHER_CRITERIA_MISC3)) {
+ DR_MATCHER_CRITERIA_MISC3 |
+ DR_MATCHER_CRITERIA_MISC5)) {
inner = false;
if (dr_mask_is_wqe_metadata_set(&mask.misc2))
@@ -511,6 +534,10 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_tnl_geneve_tlv_opt(ste_ctx, &sb[idx++],
&mask, &dmn->info.caps,
inner, rx);
+ if (dr_mask_is_tnl_geneve_tlv_opt_exist_set(&mask.misc, dmn))
+ mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(ste_ctx, &sb[idx++],
+ &mask, &dmn->info.caps,
+ inner, rx);
} else if (dr_mask_is_tnl_gtpu_any(&mask, dmn)) {
if (dr_mask_is_tnl_gtpu_flex_parser_0(&mask, dmn))
mlx5dr_ste_build_tnl_gtpu_flex_parser_0(ste_ctx, &sb[idx++],
@@ -525,6 +552,9 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (dr_mask_is_tnl_gtpu(&mask, dmn))
mlx5dr_ste_build_tnl_gtpu(ste_ctx, &sb[idx++],
&mask, inner, rx);
+ } else if (dr_mask_is_tnl_header_0_1_set(&mask.misc5)) {
+ mlx5dr_ste_build_tnl_header_0_1(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
}
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
@@ -653,10 +683,10 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
return 0;
}
-static int dr_matcher_connect(struct mlx5dr_domain *dmn,
- struct mlx5dr_matcher_rx_tx *curr_nic_matcher,
- struct mlx5dr_matcher_rx_tx *next_nic_matcher,
- struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
+static int dr_nic_matcher_connect(struct mlx5dr_domain *dmn,
+ struct mlx5dr_matcher_rx_tx *curr_nic_matcher,
+ struct mlx5dr_matcher_rx_tx *next_nic_matcher,
+ struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
{
struct mlx5dr_table_rx_tx *nic_tbl = curr_nic_matcher->nic_tbl;
struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
@@ -712,58 +742,50 @@ static int dr_matcher_connect(struct mlx5dr_domain *dmn,
return 0;
}
-static int dr_matcher_add_to_tbl(struct mlx5dr_matcher *matcher)
+int mlx5dr_matcher_add_to_tbl_nic(struct mlx5dr_domain *dmn,
+ struct mlx5dr_matcher_rx_tx *nic_matcher)
{
- struct mlx5dr_matcher *next_matcher, *prev_matcher, *tmp_matcher;
- struct mlx5dr_table *tbl = matcher->tbl;
- struct mlx5dr_domain *dmn = tbl->dmn;
+ struct mlx5dr_matcher_rx_tx *next_nic_matcher, *prev_nic_matcher, *tmp_nic_matcher;
+ struct mlx5dr_table_rx_tx *nic_tbl = nic_matcher->nic_tbl;
bool first = true;
int ret;
- next_matcher = NULL;
- list_for_each_entry(tmp_matcher, &tbl->matcher_list, matcher_list) {
- if (tmp_matcher->prio >= matcher->prio) {
- next_matcher = tmp_matcher;
+ /* If the nic matcher is already on its parent nic table list,
+ * then it is already connected to the chain of nic matchers.
+ */
+ if (!list_empty(&nic_matcher->list_node))
+ return 0;
+
+ next_nic_matcher = NULL;
+ list_for_each_entry(tmp_nic_matcher, &nic_tbl->nic_matcher_list, list_node) {
+ if (tmp_nic_matcher->prio >= nic_matcher->prio) {
+ next_nic_matcher = tmp_nic_matcher;
break;
}
first = false;
}
- prev_matcher = NULL;
- if (next_matcher && !first)
- prev_matcher = list_prev_entry(next_matcher, matcher_list);
+ prev_nic_matcher = NULL;
+ if (next_nic_matcher && !first)
+ prev_nic_matcher = list_prev_entry(next_nic_matcher, list_node);
else if (!first)
- prev_matcher = list_last_entry(&tbl->matcher_list,
- struct mlx5dr_matcher,
- matcher_list);
-
- if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
- dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) {
- ret = dr_matcher_connect(dmn, &matcher->rx,
- next_matcher ? &next_matcher->rx : NULL,
- prev_matcher ? &prev_matcher->rx : NULL);
- if (ret)
- return ret;
- }
+ prev_nic_matcher = list_last_entry(&nic_tbl->nic_matcher_list,
+ struct mlx5dr_matcher_rx_tx,
+ list_node);
- if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
- dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX) {
- ret = dr_matcher_connect(dmn, &matcher->tx,
- next_matcher ? &next_matcher->tx : NULL,
- prev_matcher ? &prev_matcher->tx : NULL);
- if (ret)
- return ret;
- }
+ ret = dr_nic_matcher_connect(dmn, nic_matcher,
+ next_nic_matcher, prev_nic_matcher);
+ if (ret)
+ return ret;
- if (prev_matcher)
- list_add(&matcher->matcher_list, &prev_matcher->matcher_list);
- else if (next_matcher)
- list_add_tail(&matcher->matcher_list,
- &next_matcher->matcher_list);
+ if (prev_nic_matcher)
+ list_add(&nic_matcher->list_node, &prev_nic_matcher->list_node);
+ else if (next_nic_matcher)
+ list_add_tail(&nic_matcher->list_node, &next_nic_matcher->list_node);
else
- list_add(&matcher->matcher_list, &tbl->matcher_list);
+ list_add(&nic_matcher->list_node, &nic_matcher->nic_tbl->nic_matcher_list);
- return 0;
+ return ret;
}
static void dr_matcher_uninit_nic(struct mlx5dr_matcher_rx_tx *nic_matcher)
@@ -822,6 +844,9 @@ static int dr_matcher_init_nic(struct mlx5dr_matcher *matcher,
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
int ret;
+ nic_matcher->prio = matcher->prio;
+ INIT_LIST_HEAD(&nic_matcher->list_node);
+
ret = dr_matcher_set_all_ste_builders(matcher, nic_matcher);
if (ret)
return ret;
@@ -872,13 +897,12 @@ uninit_nic_rx:
return ret;
}
-static int dr_matcher_init(struct mlx5dr_matcher *matcher,
- struct mlx5dr_match_parameters *mask)
+static int dr_matcher_copy_param(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_match_parameters *mask)
{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_match_parameters consumed_mask;
- struct mlx5dr_table *tbl = matcher->tbl;
- struct mlx5dr_domain *dmn = tbl->dmn;
- int i, ret;
+ int i, ret = 0;
if (matcher->match_criteria >= DR_MATCHER_CRITERIA_MAX) {
mlx5dr_err(dmn, "Invalid match criteria attribute\n");
@@ -898,10 +922,36 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
consumed_mask.match_sz = mask->match_sz;
memcpy(consumed_mask.match_buf, mask->match_buf, mask->match_sz);
mlx5dr_ste_copy_param(matcher->match_criteria,
- &matcher->mask, &consumed_mask,
- true);
+ &matcher->mask, &consumed_mask, true);
+
+ /* Check that all mask data was consumed */
+ for (i = 0; i < consumed_mask.match_sz; i++) {
+ if (!((u8 *)consumed_mask.match_buf)[i])
+ continue;
+
+ mlx5dr_dbg(dmn,
+ "Match param mask contains unsupported parameters\n");
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ kfree(consumed_mask.match_buf);
}
+ return ret;
+}
+
+static int dr_matcher_init(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_match_parameters *mask)
+{
+ struct mlx5dr_table *tbl = matcher->tbl;
+ struct mlx5dr_domain *dmn = tbl->dmn;
+ int ret;
+
+ ret = dr_matcher_copy_param(matcher, mask);
+ if (ret)
+ return ret;
+
switch (dmn->type) {
case MLX5DR_DOMAIN_TYPE_NIC_RX:
matcher->rx.nic_tbl = &tbl->rx;
@@ -919,23 +969,23 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
default:
WARN_ON(true);
ret = -EINVAL;
- goto free_consumed_mask;
}
- /* Check that all mask data was consumed */
- for (i = 0; i < consumed_mask.match_sz; i++) {
- if (!((u8 *)consumed_mask.match_buf)[i])
- continue;
+ return ret;
+}
- mlx5dr_dbg(dmn, "Match param mask contains unsupported parameters\n");
- ret = -EOPNOTSUPP;
- goto free_consumed_mask;
- }
+static void dr_matcher_add_to_dbg_list(struct mlx5dr_matcher *matcher)
+{
+ mutex_lock(&matcher->tbl->dmn->dump_info.dbg_mutex);
+ list_add(&matcher->list_node, &matcher->tbl->matcher_list);
+ mutex_unlock(&matcher->tbl->dmn->dump_info.dbg_mutex);
+}
- ret = 0;
-free_consumed_mask:
- kfree(consumed_mask.match_buf);
- return ret;
+static void dr_matcher_remove_from_dbg_list(struct mlx5dr_matcher *matcher)
+{
+ mutex_lock(&matcher->tbl->dmn->dump_info.dbg_mutex);
+ list_del(&matcher->list_node);
+ mutex_unlock(&matcher->tbl->dmn->dump_info.dbg_mutex);
}
struct mlx5dr_matcher *
@@ -957,7 +1007,8 @@ mlx5dr_matcher_create(struct mlx5dr_table *tbl,
matcher->prio = priority;
matcher->match_criteria = match_criteria_enable;
refcount_set(&matcher->refcount, 1);
- INIT_LIST_HEAD(&matcher->matcher_list);
+ INIT_LIST_HEAD(&matcher->list_node);
+ INIT_LIST_HEAD(&matcher->dbg_rule_list);
mlx5dr_domain_lock(tbl->dmn);
@@ -965,16 +1016,12 @@ mlx5dr_matcher_create(struct mlx5dr_table *tbl,
if (ret)
goto free_matcher;
- ret = dr_matcher_add_to_tbl(matcher);
- if (ret)
- goto matcher_uninit;
+ dr_matcher_add_to_dbg_list(matcher);
mlx5dr_domain_unlock(tbl->dmn);
return matcher;
-matcher_uninit:
- dr_matcher_uninit(matcher);
free_matcher:
mlx5dr_domain_unlock(tbl->dmn);
kfree(matcher);
@@ -983,10 +1030,10 @@ dec_ref:
return NULL;
}
-static int dr_matcher_disconnect(struct mlx5dr_domain *dmn,
- struct mlx5dr_table_rx_tx *nic_tbl,
- struct mlx5dr_matcher_rx_tx *next_nic_matcher,
- struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
+static int dr_matcher_disconnect_nic(struct mlx5dr_domain *dmn,
+ struct mlx5dr_table_rx_tx *nic_tbl,
+ struct mlx5dr_matcher_rx_tx *next_nic_matcher,
+ struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
{
struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
struct mlx5dr_htbl_connect_info info;
@@ -1013,43 +1060,34 @@ static int dr_matcher_disconnect(struct mlx5dr_domain *dmn,
&info, true);
}
-static int dr_matcher_remove_from_tbl(struct mlx5dr_matcher *matcher)
+int mlx5dr_matcher_remove_from_tbl_nic(struct mlx5dr_domain *dmn,
+ struct mlx5dr_matcher_rx_tx *nic_matcher)
{
- struct mlx5dr_matcher *prev_matcher, *next_matcher;
- struct mlx5dr_table *tbl = matcher->tbl;
- struct mlx5dr_domain *dmn = tbl->dmn;
- int ret = 0;
+ struct mlx5dr_matcher_rx_tx *prev_nic_matcher, *next_nic_matcher;
+ struct mlx5dr_table_rx_tx *nic_tbl = nic_matcher->nic_tbl;
+ int ret;
- if (list_is_last(&matcher->matcher_list, &tbl->matcher_list))
- next_matcher = NULL;
- else
- next_matcher = list_next_entry(matcher, matcher_list);
+ /* If the nic matcher is not on its parent nic table list,
+ * then it is detached - no need to disconnect it.
+ */
+ if (list_empty(&nic_matcher->list_node))
+ return 0;
- if (matcher->matcher_list.prev == &tbl->matcher_list)
- prev_matcher = NULL;
+ if (list_is_last(&nic_matcher->list_node, &nic_tbl->nic_matcher_list))
+ next_nic_matcher = NULL;
else
- prev_matcher = list_prev_entry(matcher, matcher_list);
-
- if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
- dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) {
- ret = dr_matcher_disconnect(dmn, &tbl->rx,
- next_matcher ? &next_matcher->rx : NULL,
- prev_matcher ? &prev_matcher->rx : NULL);
- if (ret)
- return ret;
- }
+ next_nic_matcher = list_next_entry(nic_matcher, list_node);
- if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
- dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX) {
- ret = dr_matcher_disconnect(dmn, &tbl->tx,
- next_matcher ? &next_matcher->tx : NULL,
- prev_matcher ? &prev_matcher->tx : NULL);
- if (ret)
- return ret;
- }
+ if (nic_matcher->list_node.prev == &nic_tbl->nic_matcher_list)
+ prev_nic_matcher = NULL;
+ else
+ prev_nic_matcher = list_prev_entry(nic_matcher, list_node);
- list_del(&matcher->matcher_list);
+ ret = dr_matcher_disconnect_nic(dmn, nic_tbl, next_nic_matcher, prev_nic_matcher);
+ if (ret)
+ return ret;
+ list_del_init(&nic_matcher->list_node);
return 0;
}
@@ -1057,12 +1095,12 @@ int mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher)
{
struct mlx5dr_table *tbl = matcher->tbl;
- if (refcount_read(&matcher->refcount) > 1)
+ if (WARN_ON_ONCE(refcount_read(&matcher->refcount) > 1))
return -EBUSY;
mlx5dr_domain_lock(tbl->dmn);
- dr_matcher_remove_from_tbl(matcher);
+ dr_matcher_remove_from_dbg_list(matcher);
dr_matcher_uninit(matcher);
refcount_dec(&matcher->tbl->refcount);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 6a390e981b09..b4374578425b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -5,11 +5,6 @@
#define DR_RULE_MAX_STE_CHAIN (DR_RULE_MAX_STES + DR_ACTION_MAX_STES)
-struct mlx5dr_rule_action_member {
- struct mlx5dr_action *action;
- struct list_head list;
-};
-
static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste *new_last_ste,
struct list_head *miss_list,
@@ -979,14 +974,36 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
return false;
}
}
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
+ s_idx = offsetof(struct mlx5dr_match_param, misc5);
+ e_idx = min(s_idx + sizeof(param->misc5), value_size);
+
+ if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
+ mlx5dr_err(matcher->tbl->dmn, "Rule misc5 parameters contains a value not specified by mask\n");
+ return false;
+ }
+ }
return true;
}
static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
struct mlx5dr_rule_rx_tx *nic_rule)
{
+ /* Check if this nic rule was actually created, or was it skipped
+ * and only the other type of the RX/TX nic rule was created.
+ */
+ if (!nic_rule->last_rule_ste)
+ return 0;
+
mlx5dr_domain_nic_lock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
dr_rule_clean_rule_members(rule, nic_rule);
+
+ nic_rule->nic_matcher->rules--;
+ if (!nic_rule->nic_matcher->rules)
+ mlx5dr_matcher_remove_from_tbl_nic(rule->matcher->tbl->dmn,
+ nic_rule->nic_matcher);
+
mlx5dr_domain_nic_unlock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
return 0;
@@ -1003,6 +1020,8 @@ static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
{
struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
+ mlx5dr_dbg_rule_del(rule);
+
switch (dmn->type) {
case MLX5DR_DOMAIN_TYPE_NIC_RX:
dr_rule_destroy_rule_nic(rule, &rule->rx);
@@ -1091,24 +1110,28 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
mlx5dr_domain_nic_lock(nic_dmn);
+ ret = mlx5dr_matcher_add_to_tbl_nic(dmn, nic_matcher);
+ if (ret)
+ goto free_hw_ste;
+
ret = mlx5dr_matcher_select_builders(matcher,
nic_matcher,
dr_rule_get_ipv(&param->outer),
dr_rule_get_ipv(&param->inner));
if (ret)
- goto free_hw_ste;
+ goto remove_from_nic_tbl;
/* Set the tag values inside the ste array */
ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
if (ret)
- goto free_hw_ste;
+ goto remove_from_nic_tbl;
/* Set the actions values/addresses inside the ste array */
ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions,
num_actions, hw_ste_arr,
&new_hw_ste_arr_sz);
if (ret)
- goto free_hw_ste;
+ goto remove_from_nic_tbl;
cur_htbl = nic_matcher->s_htbl;
@@ -1155,6 +1178,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
if (htbl)
mlx5dr_htbl_put(htbl);
+ nic_matcher->rules++;
+
mlx5dr_domain_nic_unlock(nic_dmn);
kfree(hw_ste_arr);
@@ -1168,6 +1193,10 @@ free_rule:
list_del(&ste_info->send_list);
kfree(ste_info);
}
+
+remove_from_nic_tbl:
+ mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
+
free_hw_ste:
mlx5dr_domain_nic_unlock(nic_dmn);
kfree(hw_ste_arr);
@@ -1257,6 +1286,8 @@ dr_rule_create_rule(struct mlx5dr_matcher *matcher,
if (ret)
goto remove_action_members;
+ INIT_LIST_HEAD(&rule->dbg_node);
+ mlx5dr_dbg_rule_add(rule);
return rule;
remove_action_members:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 219a5474a8a4..7e61742e58a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -719,6 +719,8 @@ static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec, bo
spec->vxlan_vni = IFC_GET_CLR(fte_match_set_misc, mask, vxlan_vni, clr);
spec->geneve_vni = IFC_GET_CLR(fte_match_set_misc, mask, geneve_vni, clr);
+ spec->geneve_tlv_option_0_exist =
+ IFC_GET_CLR(fte_match_set_misc, mask, geneve_tlv_option_0_exist, clr);
spec->geneve_oam = IFC_GET_CLR(fte_match_set_misc, mask, geneve_oam, clr);
spec->outer_ipv6_flow_label =
@@ -880,6 +882,26 @@ static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec,
IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_3, clr);
}
+static void dr_ste_copy_mask_misc5(char *mask, struct mlx5dr_match_misc5 *spec, bool clr)
+{
+ spec->macsec_tag_0 =
+ IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_0, clr);
+ spec->macsec_tag_1 =
+ IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_1, clr);
+ spec->macsec_tag_2 =
+ IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_2, clr);
+ spec->macsec_tag_3 =
+ IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_3, clr);
+ spec->tunnel_header_0 =
+ IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_0, clr);
+ spec->tunnel_header_1 =
+ IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_1, clr);
+ spec->tunnel_header_2 =
+ IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_2, clr);
+ spec->tunnel_header_3 =
+ IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_3, clr);
+}
+
void mlx5dr_ste_copy_param(u8 match_criteria,
struct mlx5dr_match_param *set_param,
struct mlx5dr_match_parameters *mask,
@@ -966,6 +988,20 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
}
dr_ste_copy_mask_misc4(buff, &set_param->misc4, clr);
}
+
+ param_location += sizeof(struct mlx5dr_match_misc4);
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
+ if (mask->match_sz < param_location +
+ sizeof(struct mlx5dr_match_misc5)) {
+ memcpy(tail_param, data + param_location,
+ mask->match_sz - param_location);
+ buff = tail_param;
+ } else {
+ buff = data + param_location;
+ }
+ dr_ste_copy_mask_misc5(buff, &set_param->misc5, clr);
+ }
}
void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
@@ -1180,6 +1216,21 @@ void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask);
}
+void mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx)
+{
+ if (!ste_ctx->build_tnl_geneve_tlv_opt_exist_init)
+ return;
+
+ sb->rx = rx;
+ sb->caps = caps;
+ sb->inner = inner;
+ ste_ctx->build_tnl_geneve_tlv_opt_exist_init(sb, mask);
+}
+
void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
@@ -1269,6 +1320,16 @@ void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_flex_parser_1_init(sb, mask);
}
+void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ sb->rx = rx;
+ sb->inner = inner;
+ ste_ctx->build_tnl_header_0_1_init(sb, mask);
+}
+
static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
[MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
[MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
index 2d52d065dc8b..ca8fa32b8680 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
@@ -135,12 +135,14 @@ struct mlx5dr_ste_ctx {
void DR_STE_CTX_BUILDER(tnl_vxlan_gpe);
void DR_STE_CTX_BUILDER(tnl_geneve);
void DR_STE_CTX_BUILDER(tnl_geneve_tlv_opt);
+ void DR_STE_CTX_BUILDER(tnl_geneve_tlv_opt_exist);
void DR_STE_CTX_BUILDER(register_0);
void DR_STE_CTX_BUILDER(register_1);
void DR_STE_CTX_BUILDER(src_gvmi_qpn);
void DR_STE_CTX_BUILDER(flex_parser_0);
void DR_STE_CTX_BUILDER(flex_parser_1);
void DR_STE_CTX_BUILDER(tnl_gtpu);
+ void DR_STE_CTX_BUILDER(tnl_header_0_1);
void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_0);
void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_1);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
index b0649c2877dd..2d62950f7a29 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -80,6 +80,7 @@ enum {
DR_STE_V0_LU_TYPE_GENERAL_PURPOSE = 0x18,
DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0 = 0x2f,
DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1 = 0x30,
+ DR_STE_V0_LU_TYPE_TUNNEL_HEADER = 0x34,
DR_STE_V0_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
};
@@ -1704,7 +1705,7 @@ static void dr_ste_v0_set_flex_parser(u32 *misc4_field_id,
u32 id = *misc4_field_id;
u8 *parser_ptr;
- if (parser_is_used[id])
+ if (id >= DR_NUM_OF_FLEX_PARSERS || parser_is_used[id])
return;
parser_is_used[id] = true;
@@ -1875,6 +1876,27 @@ dr_ste_v0_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag;
}
+static int dr_ste_v0_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ uint8_t *tag)
+{
+ struct mlx5dr_match_misc5 *misc5 = &value->misc5;
+
+ DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_0, misc5, tunnel_header_0);
+ DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_1, misc5, tunnel_header_1);
+
+ return 0;
+}
+
+static void dr_ste_v0_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ sb->lu_type = DR_STE_V0_LU_TYPE_TUNNEL_HEADER;
+ dr_ste_v0_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_tnl_header_0_1_tag;
+}
+
struct mlx5dr_ste_ctx ste_ctx_v0 = {
/* Builders */
.build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init,
@@ -1903,6 +1925,7 @@ struct mlx5dr_ste_ctx ste_ctx_v0 = {
.build_flex_parser_0_init = &dr_ste_v0_build_flex_parser_0_init,
.build_flex_parser_1_init = &dr_ste_v0_build_flex_parser_1_init,
.build_tnl_gtpu_init = &dr_ste_v0_build_flex_parser_tnl_gtpu_init,
+ .build_tnl_header_0_1_init = &dr_ste_v0_build_tnl_header_0_1_init,
.build_tnl_gtpu_flex_parser_0_init = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_init,
.build_tnl_gtpu_flex_parser_1_init = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_init,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index cb9cf67b0a02..6ca06800f1d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -47,6 +47,7 @@ enum {
DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011,
DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
@@ -1713,6 +1714,27 @@ dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tag;
}
+static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ uint8_t *tag)
+{
+ struct mlx5dr_match_misc5 *misc5 = &value->misc5;
+
+ DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_0, misc5, tunnel_header_0);
+ DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_1, misc5, tunnel_header_1);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+ dr_ste_v1_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_tnl_header_0_1_tag;
+}
+
static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
@@ -1833,7 +1855,7 @@ static void dr_ste_v1_set_flex_parser(u32 *misc4_field_id,
u32 id = *misc4_field_id;
u8 *parser_ptr;
- if (parser_is_used[id])
+ if (id >= DR_NUM_OF_FLEX_PARSERS || parser_is_used[id])
return;
parser_is_used[id] = true;
@@ -1921,6 +1943,32 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag;
}
+static int
+dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ uint8_t *tag)
+{
+ u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ if (misc->geneve_tlv_option_0_exist) {
+ MLX5_SET(ste_flex_parser_ok, tag, flex_parsers_ok, 1 << parser_id);
+ misc->geneve_tlv_option_0_exist = 0;
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_OK;
+ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(mask, sb, sb->bit_mask);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag;
+}
+
static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
@@ -2020,12 +2068,14 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = {
.build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
.build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
.build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
+ .build_tnl_geneve_tlv_opt_exist_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
.build_register_0_init = &dr_ste_v1_build_register_0_init,
.build_register_1_init = &dr_ste_v1_build_register_1_init,
.build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
.build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
.build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
.build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
+ .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init,
.build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
.build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index 30ae3cda6d2e..8ca110643cc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -3,69 +3,66 @@
#include "dr_types.h"
-int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
- struct mlx5dr_action *action)
+static int dr_table_set_miss_action_nic(struct mlx5dr_domain *dmn,
+ struct mlx5dr_table_rx_tx *nic_tbl,
+ struct mlx5dr_action *action)
{
- struct mlx5dr_matcher *last_matcher = NULL;
+ struct mlx5dr_matcher_rx_tx *last_nic_matcher = NULL;
struct mlx5dr_htbl_connect_info info;
struct mlx5dr_ste_htbl *last_htbl;
int ret;
+ if (!list_empty(&nic_tbl->nic_matcher_list))
+ last_nic_matcher = list_last_entry(&nic_tbl->nic_matcher_list,
+ struct mlx5dr_matcher_rx_tx,
+ list_node);
+
+ if (last_nic_matcher)
+ last_htbl = last_nic_matcher->e_anchor;
+ else
+ last_htbl = nic_tbl->s_anchor;
+
+ if (action)
+ nic_tbl->default_icm_addr =
+ nic_tbl->nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX ?
+ action->dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
+ action->dest_tbl->tbl->tx.s_anchor->chunk->icm_addr;
+ else
+ nic_tbl->default_icm_addr = nic_tbl->nic_dmn->default_icm_addr;
+
+ info.type = CONNECT_MISS;
+ info.miss_icm_addr = nic_tbl->default_icm_addr;
+
+ ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_tbl->nic_dmn,
+ last_htbl, &info, true);
+ if (ret)
+ mlx5dr_dbg(dmn, "Failed to set NIC RX/TX miss action, ret %d\n", ret);
+
+ return ret;
+}
+
+int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
+ struct mlx5dr_action *action)
+{
+ int ret;
+
if (action && action->action_type != DR_ACTION_TYP_FT)
return -EOPNOTSUPP;
mlx5dr_domain_lock(tbl->dmn);
- if (!list_empty(&tbl->matcher_list))
- last_matcher = list_last_entry(&tbl->matcher_list,
- struct mlx5dr_matcher,
- matcher_list);
-
if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX ||
tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) {
- if (last_matcher)
- last_htbl = last_matcher->rx.e_anchor;
- else
- last_htbl = tbl->rx.s_anchor;
-
- tbl->rx.default_icm_addr = action ?
- action->dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
- tbl->rx.nic_dmn->default_icm_addr;
-
- info.type = CONNECT_MISS;
- info.miss_icm_addr = tbl->rx.default_icm_addr;
-
- ret = mlx5dr_ste_htbl_init_and_postsend(tbl->dmn,
- tbl->rx.nic_dmn,
- last_htbl,
- &info, true);
- if (ret) {
- mlx5dr_dbg(tbl->dmn, "Failed to set RX miss action, ret %d\n", ret);
+ ret = dr_table_set_miss_action_nic(tbl->dmn, &tbl->rx, action);
+ if (ret)
goto out;
- }
}
if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX ||
tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) {
- if (last_matcher)
- last_htbl = last_matcher->tx.e_anchor;
- else
- last_htbl = tbl->tx.s_anchor;
-
- tbl->tx.default_icm_addr = action ?
- action->dest_tbl->tbl->tx.s_anchor->chunk->icm_addr :
- tbl->tx.nic_dmn->default_icm_addr;
-
- info.type = CONNECT_MISS;
- info.miss_icm_addr = tbl->tx.default_icm_addr;
-
- ret = mlx5dr_ste_htbl_init_and_postsend(tbl->dmn,
- tbl->tx.nic_dmn,
- last_htbl, &info, true);
- if (ret) {
- mlx5dr_dbg(tbl->dmn, "Failed to set TX miss action, ret %d\n", ret);
+ ret = dr_table_set_miss_action_nic(tbl->dmn, &tbl->tx, action);
+ if (ret)
goto out;
- }
}
/* Release old action */
@@ -122,6 +119,8 @@ static int dr_table_init_nic(struct mlx5dr_domain *dmn,
struct mlx5dr_htbl_connect_info info;
int ret;
+ INIT_LIST_HEAD(&nic_tbl->nic_matcher_list);
+
nic_tbl->default_icm_addr = nic_dmn->default_icm_addr;
nic_tbl->s_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
@@ -266,6 +265,8 @@ struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level, u
if (ret)
goto uninit_tbl;
+ INIT_LIST_HEAD(&tbl->dbg_node);
+ mlx5dr_dbg_tbl_add(tbl);
return tbl;
uninit_tbl:
@@ -281,9 +282,10 @@ int mlx5dr_table_destroy(struct mlx5dr_table *tbl)
{
int ret;
- if (refcount_read(&tbl->refcount) > 1)
+ if (WARN_ON_ONCE(refcount_read(&tbl->refcount) > 1))
return -EBUSY;
+ mlx5dr_dbg_tbl_del(tbl);
ret = dr_table_destroy_sw_owned_tbl(tbl);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 2333c2439c28..1b3d484b99be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -11,6 +11,7 @@
#include "lib/mlx5.h"
#include "mlx5_ifc_dr.h"
#include "mlx5dr.h"
+#include "dr_dbg.h"
#define DR_RULE_MAX_STES 18
#define DR_ACTION_MAX_STES 5
@@ -104,7 +105,8 @@ enum mlx5dr_matcher_criteria {
DR_MATCHER_CRITERIA_MISC2 = 1 << 3,
DR_MATCHER_CRITERIA_MISC3 = 1 << 4,
DR_MATCHER_CRITERIA_MISC4 = 1 << 5,
- DR_MATCHER_CRITERIA_MAX = 1 << 6,
+ DR_MATCHER_CRITERIA_MISC5 = 1 << 6,
+ DR_MATCHER_CRITERIA_MAX = 1 << 7,
};
enum mlx5dr_action_type {
@@ -440,6 +442,11 @@ void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
+void mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx);
void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
@@ -454,6 +461,10 @@ void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
+void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
@@ -494,57 +505,64 @@ struct mlx5dr_match_spec {
/* Incoming packet Ethertype - this is the Ethertype
* following the last VLAN tag of the packet
*/
- u32 ethertype:16;
u32 smac_15_0:16; /* Source MAC address of incoming packet */
+ u32 ethertype:16;
+
u32 dmac_47_16; /* Destination MAC address of incoming packet */
- /* VLAN ID of first VLAN tag in the incoming packet.
+
+ u32 dmac_15_0:16; /* Destination MAC address of incoming packet */
+ /* Priority of first VLAN tag in the incoming packet.
* Valid only when cvlan_tag==1 or svlan_tag==1
*/
- u32 first_vid:12;
+ u32 first_prio:3;
/* CFI bit of first VLAN tag in the incoming packet.
* Valid only when cvlan_tag==1 or svlan_tag==1
*/
u32 first_cfi:1;
- /* Priority of first VLAN tag in the incoming packet.
+ /* VLAN ID of first VLAN tag in the incoming packet.
* Valid only when cvlan_tag==1 or svlan_tag==1
*/
- u32 first_prio:3;
- u32 dmac_15_0:16; /* Destination MAC address of incoming packet */
- /* TCP flags. ;Bit 0: FIN;Bit 1: SYN;Bit 2: RST;Bit 3: PSH;Bit 4: ACK;
- * Bit 5: URG;Bit 6: ECE;Bit 7: CWR;Bit 8: NS
+ u32 first_vid:12;
+
+ u32 ip_protocol:8; /* IP protocol */
+ /* Differentiated Services Code Point derived from
+ * Traffic Class/TOS field of IPv6/v4
*/
- u32 tcp_flags:9;
- u32 ip_version:4; /* IP version */
- u32 frag:1; /* Packet is an IP fragment */
- /* The first vlan in the packet is s-vlan (0x8a88).
- * cvlan_tag and svlan_tag cannot be set together
+ u32 ip_dscp:6;
+ /* Explicit Congestion Notification derived from
+ * Traffic Class/TOS field of IPv6/v4
*/
- u32 svlan_tag:1;
+ u32 ip_ecn:2;
/* The first vlan in the packet is c-vlan (0x8100).
* cvlan_tag and svlan_tag cannot be set together
*/
u32 cvlan_tag:1;
- /* Explicit Congestion Notification derived from
- * Traffic Class/TOS field of IPv6/v4
+ /* The first vlan in the packet is s-vlan (0x8a88).
+ * cvlan_tag and svlan_tag cannot be set together
*/
- u32 ip_ecn:2;
- /* Differentiated Services Code Point derived from
- * Traffic Class/TOS field of IPv6/v4
+ u32 svlan_tag:1;
+ u32 frag:1; /* Packet is an IP fragment */
+ u32 ip_version:4; /* IP version */
+ /* TCP flags. ;Bit 0: FIN;Bit 1: SYN;Bit 2: RST;Bit 3: PSH;Bit 4: ACK;
+ * Bit 5: URG;Bit 6: ECE;Bit 7: CWR;Bit 8: NS
*/
- u32 ip_dscp:6;
- u32 ip_protocol:8; /* IP protocol */
+ u32 tcp_flags:9;
+
+ /* TCP source port.;tcp and udp sport/dport are mutually exclusive */
+ u32 tcp_sport:16;
/* TCP destination port.
* tcp and udp sport/dport are mutually exclusive
*/
u32 tcp_dport:16;
- /* TCP source port.;tcp and udp sport/dport are mutually exclusive */
- u32 tcp_sport:16;
+
+ u32 reserved_auto1:24;
u32 ttl_hoplimit:8;
- u32 reserved:24;
- /* UDP destination port.;tcp and udp sport/dport are mutually exclusive */
- u32 udp_dport:16;
+
/* UDP source port.;tcp and udp sport/dport are mutually exclusive */
u32 udp_sport:16;
+ /* UDP destination port.;tcp and udp sport/dport are mutually exclusive */
+ u32 udp_dport:16;
+
/* IPv6 source address of incoming packets
* For IPv4 address use bits 31:0 (rest of the bits are reserved)
* This field should be qualified by an appropriate ethertype
@@ -588,96 +606,114 @@ struct mlx5dr_match_spec {
};
struct mlx5dr_match_misc {
- u32 source_sqn:24; /* Source SQN */
- u32 source_vhca_port:4;
- /* used with GRE, sequence number exist when gre_s_present == 1 */
- u32 gre_s_present:1;
- /* used with GRE, key exist when gre_k_present == 1 */
- u32 gre_k_present:1;
- u32 reserved_auto1:1;
/* used with GRE, checksum exist when gre_c_present == 1 */
u32 gre_c_present:1;
+ u32 reserved_auto1:1;
+ /* used with GRE, key exist when gre_k_present == 1 */
+ u32 gre_k_present:1;
+ /* used with GRE, sequence number exist when gre_s_present == 1 */
+ u32 gre_s_present:1;
+ u32 source_vhca_port:4;
+ u32 source_sqn:24; /* Source SQN */
+
+ u32 source_eswitch_owner_vhca_id:16;
/* Source port.;0xffff determines wire port */
u32 source_port:16;
- u32 source_eswitch_owner_vhca_id:16;
- /* VLAN ID of first VLAN tag the inner header of the incoming packet.
- * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
- */
- u32 inner_second_vid:12;
- /* CFI bit of first VLAN tag in the inner header of the incoming packet.
- * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
- */
- u32 inner_second_cfi:1;
- /* Priority of second VLAN tag in the inner header of the incoming packet.
- * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
- */
- u32 inner_second_prio:3;
- /* VLAN ID of first VLAN tag the outer header of the incoming packet.
+
+ /* Priority of second VLAN tag in the outer header of the incoming packet.
* Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
*/
- u32 outer_second_vid:12;
+ u32 outer_second_prio:3;
/* CFI bit of first VLAN tag in the outer header of the incoming packet.
* Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
*/
u32 outer_second_cfi:1;
- /* Priority of second VLAN tag in the outer header of the incoming packet.
+ /* VLAN ID of first VLAN tag the outer header of the incoming packet.
* Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
*/
- u32 outer_second_prio:3;
- u32 gre_protocol:16; /* GRE Protocol (outer) */
- u32 reserved_auto3:12;
- /* The second vlan in the inner header of the packet is s-vlan (0x8a88).
- * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
+ u32 outer_second_vid:12;
+ /* Priority of second VLAN tag in the inner header of the incoming packet.
+ * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
*/
- u32 inner_second_svlan_tag:1;
- /* The second vlan in the outer header of the packet is s-vlan (0x8a88).
+ u32 inner_second_prio:3;
+ /* CFI bit of first VLAN tag in the inner header of the incoming packet.
+ * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
+ */
+ u32 inner_second_cfi:1;
+ /* VLAN ID of first VLAN tag the inner header of the incoming packet.
+ * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
+ */
+ u32 inner_second_vid:12;
+
+ u32 outer_second_cvlan_tag:1;
+ u32 inner_second_cvlan_tag:1;
+ /* The second vlan in the outer header of the packet is c-vlan (0x8100).
* outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
*/
u32 outer_second_svlan_tag:1;
/* The second vlan in the inner header of the packet is c-vlan (0x8100).
* inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
*/
- u32 inner_second_cvlan_tag:1;
- /* The second vlan in the outer header of the packet is c-vlan (0x8100).
+ u32 inner_second_svlan_tag:1;
+ /* The second vlan in the outer header of the packet is s-vlan (0x8a88).
* outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
*/
- u32 outer_second_cvlan_tag:1;
- u32 gre_key_l:8; /* GRE Key [7:0] (outer) */
+ u32 reserved_auto2:12;
+ /* The second vlan in the inner header of the packet is s-vlan (0x8a88).
+ * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
+ */
+ u32 gre_protocol:16; /* GRE Protocol (outer) */
+
u32 gre_key_h:24; /* GRE Key[31:8] (outer) */
- u32 reserved_auto4:8;
+ u32 gre_key_l:8; /* GRE Key [7:0] (outer) */
+
u32 vxlan_vni:24; /* VXLAN VNI (outer) */
- u32 geneve_oam:1; /* GENEVE OAM field (outer) */
- u32 reserved_auto5:7;
+ u32 reserved_auto3:8;
+
u32 geneve_vni:24; /* GENEVE VNI field (outer) */
+ u32 reserved_auto4:6;
+ u32 geneve_tlv_option_0_exist:1;
+ u32 geneve_oam:1; /* GENEVE OAM field (outer) */
+
+ u32 reserved_auto5:12;
u32 outer_ipv6_flow_label:20; /* Flow label of incoming IPv6 packet (outer) */
+
u32 reserved_auto6:12;
u32 inner_ipv6_flow_label:20; /* Flow label of incoming IPv6 packet (inner) */
- u32 reserved_auto7:12;
- u32 geneve_protocol_type:16; /* GENEVE protocol type (outer) */
+
+ u32 reserved_auto7:10;
u32 geneve_opt_len:6; /* GENEVE OptLen (outer) */
- u32 reserved_auto8:10;
+ u32 geneve_protocol_type:16; /* GENEVE protocol type (outer) */
+
+ u32 reserved_auto8:8;
u32 bth_dst_qp:24; /* Destination QP in BTH header */
- u32 reserved_auto9:8;
- u8 reserved_auto10[20];
+
+ u32 reserved_auto9;
+ u32 outer_esp_spi;
+ u32 reserved_auto10[3];
};
struct mlx5dr_match_misc2 {
- u32 outer_first_mpls_ttl:8; /* First MPLS TTL (outer) */
- u32 outer_first_mpls_s_bos:1; /* First MPLS S_BOS (outer) */
- u32 outer_first_mpls_exp:3; /* First MPLS EXP (outer) */
u32 outer_first_mpls_label:20; /* First MPLS LABEL (outer) */
- u32 inner_first_mpls_ttl:8; /* First MPLS TTL (inner) */
- u32 inner_first_mpls_s_bos:1; /* First MPLS S_BOS (inner) */
- u32 inner_first_mpls_exp:3; /* First MPLS EXP (inner) */
+ u32 outer_first_mpls_exp:3; /* First MPLS EXP (outer) */
+ u32 outer_first_mpls_s_bos:1; /* First MPLS S_BOS (outer) */
+ u32 outer_first_mpls_ttl:8; /* First MPLS TTL (outer) */
+
u32 inner_first_mpls_label:20; /* First MPLS LABEL (inner) */
- u32 outer_first_mpls_over_gre_ttl:8; /* last MPLS TTL (outer) */
- u32 outer_first_mpls_over_gre_s_bos:1; /* last MPLS S_BOS (outer) */
- u32 outer_first_mpls_over_gre_exp:3; /* last MPLS EXP (outer) */
+ u32 inner_first_mpls_exp:3; /* First MPLS EXP (inner) */
+ u32 inner_first_mpls_s_bos:1; /* First MPLS S_BOS (inner) */
+ u32 inner_first_mpls_ttl:8; /* First MPLS TTL (inner) */
+
u32 outer_first_mpls_over_gre_label:20; /* last MPLS LABEL (outer) */
- u32 outer_first_mpls_over_udp_ttl:8; /* last MPLS TTL (outer) */
- u32 outer_first_mpls_over_udp_s_bos:1; /* last MPLS S_BOS (outer) */
- u32 outer_first_mpls_over_udp_exp:3; /* last MPLS EXP (outer) */
+ u32 outer_first_mpls_over_gre_exp:3; /* last MPLS EXP (outer) */
+ u32 outer_first_mpls_over_gre_s_bos:1; /* last MPLS S_BOS (outer) */
+ u32 outer_first_mpls_over_gre_ttl:8; /* last MPLS TTL (outer) */
+
u32 outer_first_mpls_over_udp_label:20; /* last MPLS LABEL (outer) */
+ u32 outer_first_mpls_over_udp_exp:3; /* last MPLS EXP (outer) */
+ u32 outer_first_mpls_over_udp_s_bos:1; /* last MPLS S_BOS (outer) */
+ u32 outer_first_mpls_over_udp_ttl:8; /* last MPLS TTL (outer) */
+
u32 metadata_reg_c_7; /* metadata_reg_c_7 */
u32 metadata_reg_c_6; /* metadata_reg_c_6 */
u32 metadata_reg_c_5; /* metadata_reg_c_5 */
@@ -687,7 +723,7 @@ struct mlx5dr_match_misc2 {
u32 metadata_reg_c_1; /* metadata_reg_c_1 */
u32 metadata_reg_c_0; /* metadata_reg_c_0 */
u32 metadata_reg_a; /* metadata_reg_a */
- u8 reserved_auto2[12];
+ u32 reserved_auto1[3];
};
struct mlx5dr_match_misc3 {
@@ -695,24 +731,34 @@ struct mlx5dr_match_misc3 {
u32 outer_tcp_seq_num;
u32 inner_tcp_ack_num;
u32 outer_tcp_ack_num;
- u32 outer_vxlan_gpe_vni:24;
+
u32 reserved_auto1:8;
- u32 reserved_auto2:16;
- u32 outer_vxlan_gpe_flags:8;
+ u32 outer_vxlan_gpe_vni:24;
+
u32 outer_vxlan_gpe_next_protocol:8;
+ u32 outer_vxlan_gpe_flags:8;
+ u32 reserved_auto2:16;
+
u32 icmpv4_header_data;
u32 icmpv6_header_data;
- u8 icmpv6_code;
- u8 icmpv6_type;
- u8 icmpv4_code;
+
u8 icmpv4_type;
+ u8 icmpv4_code;
+ u8 icmpv6_type;
+ u8 icmpv6_code;
+
u32 geneve_tlv_option_0_data;
- u8 gtpu_msg_flags;
- u8 gtpu_msg_type;
+
u32 gtpu_teid;
+
+ u8 gtpu_msg_type;
+ u8 gtpu_msg_flags;
+ u32 reserved_auto3:16;
+
u32 gtpu_dw_2;
u32 gtpu_first_ext_dw_0;
u32 gtpu_dw_0;
+ u32 reserved_auto4;
};
struct mlx5dr_match_misc4 {
@@ -724,6 +770,18 @@ struct mlx5dr_match_misc4 {
u32 prog_sample_field_id_2;
u32 prog_sample_field_value_3;
u32 prog_sample_field_id_3;
+ u32 reserved_auto1[8];
+};
+
+struct mlx5dr_match_misc5 {
+ u32 macsec_tag_0;
+ u32 macsec_tag_1;
+ u32 macsec_tag_2;
+ u32 macsec_tag_3;
+ u32 tunnel_header_0;
+ u32 tunnel_header_1;
+ u32 tunnel_header_2;
+ u32 tunnel_header_3;
};
struct mlx5dr_match_param {
@@ -733,6 +791,7 @@ struct mlx5dr_match_param {
struct mlx5dr_match_misc2 misc2;
struct mlx5dr_match_misc3 misc3;
struct mlx5dr_match_misc4 misc4;
+ struct mlx5dr_match_misc5 misc5;
};
#define DR_MASK_IS_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \
@@ -789,6 +848,7 @@ struct mlx5dr_cmd_caps {
u8 flex_parser_id_gtpu_teid;
u8 flex_parser_id_gtpu_dw_2;
u8 flex_parser_id_gtpu_first_ext_dw_0;
+ u8 flex_parser_ok_bits_supp;
u8 max_ft_level;
u16 roce_min_src_udp;
u8 sw_format_ver;
@@ -843,12 +903,15 @@ struct mlx5dr_domain {
struct mlx5dr_domain_info info;
struct xarray csum_fts_xa;
struct mlx5dr_ste_ctx *ste_ctx;
+ struct list_head dbg_tbl_list;
+ struct mlx5dr_dbg_dump_info dump_info;
};
struct mlx5dr_table_rx_tx {
struct mlx5dr_ste_htbl *s_anchor;
struct mlx5dr_domain_rx_tx *nic_dmn;
u64 default_icm_addr;
+ struct list_head nic_matcher_list;
};
struct mlx5dr_table {
@@ -862,6 +925,7 @@ struct mlx5dr_table {
struct list_head matcher_list;
struct mlx5dr_action *miss_action;
refcount_t refcount;
+ struct list_head dbg_node;
};
struct mlx5dr_matcher_rx_tx {
@@ -875,18 +939,21 @@ struct mlx5dr_matcher_rx_tx {
u8 num_of_builders_arr[DR_RULE_IPV_MAX][DR_RULE_IPV_MAX];
u64 default_icm_addr;
struct mlx5dr_table_rx_tx *nic_tbl;
+ u32 prio;
+ struct list_head list_node;
+ u32 rules;
};
struct mlx5dr_matcher {
struct mlx5dr_table *tbl;
struct mlx5dr_matcher_rx_tx rx;
struct mlx5dr_matcher_rx_tx tx;
- struct list_head matcher_list;
+ struct list_head list_node; /* Used for both matchers and dbg managing */
u32 prio;
struct mlx5dr_match_param mask;
u8 match_criteria;
refcount_t refcount;
- struct mlx5dv_flow_matcher *dv_matcher;
+ struct list_head dbg_rule_list;
};
struct mlx5dr_ste_action_modify_field {
@@ -958,6 +1025,11 @@ struct mlx5dr_action_flow_tag {
u32 flow_tag;
};
+struct mlx5dr_rule_action_member {
+ struct mlx5dr_action *action;
+ struct list_head list;
+};
+
struct mlx5dr_action {
enum mlx5dr_action_type action_type;
refcount_t refcount;
@@ -998,6 +1070,7 @@ struct mlx5dr_rule {
struct mlx5dr_rule_rx_tx rx;
struct mlx5dr_rule_rx_tx tx;
struct list_head rule_actions_list;
+ struct list_head dbg_node;
u32 flow_source;
};
@@ -1050,6 +1123,11 @@ static inline void mlx5dr_domain_unlock(struct mlx5dr_domain *dmn)
mlx5dr_domain_nic_unlock(&dmn->info.rx);
}
+int mlx5dr_matcher_add_to_tbl_nic(struct mlx5dr_domain *dmn,
+ struct mlx5dr_matcher_rx_tx *nic_matcher);
+int mlx5dr_matcher_remove_from_tbl_nic(struct mlx5dr_domain *dmn,
+ struct mlx5dr_matcher_rx_tx *nic_matcher);
+
int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
enum mlx5dr_ipv outer_ipv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 2632d5ae9bc0..a476da2424f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2019 Mellanox Technologies */
+#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
#include "fs_core.h"
#include "fs_cmd.h"
@@ -194,6 +195,15 @@ static struct mlx5dr_action *create_vport_action(struct mlx5dr_domain *domain,
dest_attr->vport.vhca_id);
}
+static struct mlx5dr_action *create_uplink_action(struct mlx5dr_domain *domain,
+ struct mlx5_flow_rule *dst)
+{
+ struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
+
+ return mlx5dr_action_create_dest_vport(domain, MLX5_VPORT_UPLINK, 1,
+ dest_attr->vport.vhca_id);
+}
+
static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain,
struct mlx5_flow_rule *dst)
{
@@ -218,7 +228,8 @@ static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domai
static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
{
- return dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ return (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
+ dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
}
@@ -411,8 +422,11 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
term_actions[num_term_actions++].dest = tmp_action;
break;
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
- tmp_action = create_vport_action(domain, dst);
+ tmp_action = type == MLX5_FLOW_DESTINATION_TYPE_VPORT ?
+ create_vport_action(domain, dst) :
+ create_uplink_action(domain, dst);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
index d2a937f69784..9604b2091358 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
@@ -447,6 +447,14 @@ struct mlx5_ifc_ste_flex_parser_1_bits {
u8 flex_parser_4[0x20];
};
+struct mlx5_ifc_ste_flex_parser_ok_bits {
+ u8 flex_parser_3[0x20];
+ u8 flex_parser_2[0x20];
+ u8 flex_parsers_ok[0x8];
+ u8 reserved_at_48[0x18];
+ u8 flex_parser_0[0x20];
+};
+
struct mlx5_ifc_ste_flex_parser_tnl_bits {
u8 flex_parser_tunneling_header_63_32[0x20];
@@ -490,6 +498,14 @@ struct mlx5_ifc_ste_flex_parser_tnl_gtpu_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_ste_tunnel_header_bits {
+ u8 tunnel_header_0[0x20];
+
+ u8 tunnel_header_1[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_ste_general_purpose_bits {
u8 general_purpose_lookup_field[0x20];
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
index 92b798f8e73a..ceeb7f4c3f6c 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
@@ -33,8 +33,11 @@ static void mlxbf_gige_get_regs(struct net_device *netdev,
memcpy_fromio(p, priv->base, MLXBF_GIGE_MMIO_REG_SZ);
}
-static void mlxbf_gige_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+static void
+mlxbf_gige_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct mlxbf_gige *priv = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index d1ae248e125c..4683312861ac 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -66,7 +66,7 @@ config MLXSW_SPECTRUM
default m
help
This driver supports Mellanox Technologies
- Spectrum/Spectrum-2/Spectrum-3 Ethernet Switch ASICs.
+ Spectrum/Spectrum-2/Spectrum-3/Spectrum-4 Ethernet Switch ASICs.
To compile this driver as a module, choose M here: the
module will be called mlxsw_spectrum.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index 392ce3cb27f7..51b260d54237 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -935,6 +935,18 @@ static inline int mlxsw_cmd_sw2hw_rdq(struct mlxsw_core *mlxsw_core,
*/
MLXSW_ITEM32(cmd_mbox, sw2hw_dq, cq, 0x00, 24, 8);
+enum mlxsw_cmd_mbox_sw2hw_dq_sdq_lp {
+ MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE,
+ MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE,
+};
+
+/* cmd_mbox_sw2hw_dq_sdq_lp
+ * SDQ local Processing
+ * 0: local processing by wqe.lp
+ * 1: local processing (ignoring wqe.lp)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, sdq_lp, 0x00, 23, 1);
+
/* cmd_mbox_sw2hw_dq_sdq_tclass
* SDQ: CPU Egress TClass
* RDQ: Reserved
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 3fd3812b8f31..866b9357939b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -47,7 +47,7 @@ static struct workqueue_struct *mlxsw_owq;
struct mlxsw_core_port {
struct devlink_port devlink_port;
void *port_driver_priv;
- u8 local_port;
+ u16 local_port;
};
void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
@@ -77,7 +77,7 @@ struct mlxsw_core {
bool enable_string_tlv;
} emad;
struct {
- u8 *mapping; /* lag_id+port_index to local_port mapping */
+ u16 *mapping; /* lag_id+port_index to local_port mapping */
} lag;
struct mlxsw_res res;
struct mlxsw_hwmon *hwmon;
@@ -160,7 +160,7 @@ static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core, bool reload)
devlink_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS);
if (!reload)
- devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
+ devlink_resources_unregister(priv_to_devlink(mlxsw_core));
kfree(mlxsw_core->ports);
}
@@ -718,7 +718,7 @@ static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
}
/* called with rcu read lock held */
-static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
+static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u16 local_port,
void *priv)
{
struct mlxsw_core *mlxsw_core = priv;
@@ -1708,6 +1708,124 @@ static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg,
static const struct mlxsw_listener mlxsw_core_health_listener =
MLXSW_EVENTL(mlxsw_core_health_listener_func, MFDE, MFDE);
+static int
+mlxsw_core_health_fw_fatal_dump_fatal_cause(const char *mfde_pl,
+ struct devlink_fmsg *fmsg)
+{
+ u32 val, tile_v;
+ int err;
+
+ val = mlxsw_reg_mfde_fatal_cause_id_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "cause_id", val);
+ if (err)
+ return err;
+ tile_v = mlxsw_reg_mfde_fatal_cause_tile_v_get(mfde_pl);
+ if (tile_v) {
+ val = mlxsw_reg_mfde_fatal_cause_tile_index_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_core_health_fw_fatal_dump_fw_assert(const char *mfde_pl,
+ struct devlink_fmsg *fmsg)
+{
+ u32 val, tile_v;
+ int err;
+
+ val = mlxsw_reg_mfde_fw_assert_var0_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "var0", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_var1_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "var1", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_var2_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "var2", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_var3_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "var3", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_var4_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "var4", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_existptr_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "existptr", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_callra_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "callra", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_oe_get(mfde_pl);
+ err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
+ if (err)
+ return err;
+ tile_v = mlxsw_reg_mfde_fw_assert_tile_v_get(mfde_pl);
+ if (tile_v) {
+ val = mlxsw_reg_mfde_fw_assert_tile_index_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val);
+ if (err)
+ return err;
+ }
+ val = mlxsw_reg_mfde_fw_assert_ext_synd_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "ext_synd", val);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int
+mlxsw_core_health_fw_fatal_dump_kvd_im_stop(const char *mfde_pl,
+ struct devlink_fmsg *fmsg)
+{
+ u32 val;
+ int err;
+
+ val = mlxsw_reg_mfde_kvd_im_stop_oe_get(mfde_pl);
+ err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_kvd_im_stop_pipes_mask_get(mfde_pl);
+ return devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val);
+}
+
+static int
+mlxsw_core_health_fw_fatal_dump_crspace_to(const char *mfde_pl,
+ struct devlink_fmsg *fmsg)
+{
+ u32 val;
+ int err;
+
+ val = mlxsw_reg_mfde_crspace_to_log_address_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "log_address", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_crspace_to_oe_get(mfde_pl);
+ err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_crspace_to_log_id_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_crspace_to_log_ip_get(mfde_pl);
+ err = devlink_fmsg_u64_pair_put(fmsg, "log_ip", val);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg, void *priv_ctx,
struct netlink_ext_ack *extack)
@@ -1741,6 +1859,46 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor
case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP:
val_str = "KVD insertion machine stopped";
break;
+ case MLXSW_REG_MFDE_EVENT_ID_TEST:
+ val_str = "Test";
+ break;
+ case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT:
+ val_str = "FW assert";
+ break;
+ case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE:
+ val_str = "Fatal cause";
+ break;
+ default:
+ val_str = NULL;
+ }
+ if (val_str) {
+ err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str);
+ if (err)
+ return err;
+ }
+
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "severity");
+ if (err)
+ return err;
+
+ val = mlxsw_reg_mfde_severity_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "id", val);
+ if (err)
+ return err;
+ switch (val) {
+ case MLXSW_REG_MFDE_SEVERITY_FATL:
+ val_str = "Fatal";
+ break;
+ case MLXSW_REG_MFDE_SEVERITY_NRML:
+ val_str = "Normal";
+ break;
+ case MLXSW_REG_MFDE_SEVERITY_INTR:
+ val_str = "Debug";
+ break;
default:
val_str = NULL;
}
@@ -1749,6 +1907,7 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor
if (err)
return err;
}
+
err = devlink_fmsg_arr_pair_nest_end(fmsg);
if (err)
return err;
@@ -1800,24 +1959,18 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor
if (err)
return err;
- if (event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO) {
- val = mlxsw_reg_mfde_log_address_get(mfde_pl);
- err = devlink_fmsg_u32_pair_put(fmsg, "log_address", val);
- if (err)
- return err;
- val = mlxsw_reg_mfde_log_id_get(mfde_pl);
- err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val);
- if (err)
- return err;
- val = mlxsw_reg_mfde_log_ip_get(mfde_pl);
- err = devlink_fmsg_u64_pair_put(fmsg, "log_ip", val);
- if (err)
- return err;
- } else if (event_id == MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP) {
- val = mlxsw_reg_mfde_pipes_mask_get(mfde_pl);
- err = devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val);
- if (err)
- return err;
+ switch (event_id) {
+ case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO:
+ return mlxsw_core_health_fw_fatal_dump_crspace_to(mfde_pl,
+ fmsg);
+ case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP:
+ return mlxsw_core_health_fw_fatal_dump_kvd_im_stop(mfde_pl,
+ fmsg);
+ case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT:
+ return mlxsw_core_health_fw_fatal_dump_fw_assert(mfde_pl, fmsg);
+ case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE:
+ return mlxsw_core_health_fw_fatal_dump_fatal_cause(mfde_pl,
+ fmsg);
}
return 0;
@@ -1959,7 +2112,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
- alloc_size = sizeof(u8) *
+ alloc_size = sizeof(*mlxsw_core->lag.mapping) *
MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) *
MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
@@ -2033,7 +2186,7 @@ err_alloc_lag_mapping:
mlxsw_ports_fini(mlxsw_core, reload);
err_ports_init:
if (!reload)
- devlink_resources_unregister(devlink, NULL);
+ devlink_resources_unregister(devlink);
err_register_resources:
mlxsw_bus->fini(bus_priv);
err_bus_init:
@@ -2099,7 +2252,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
kfree(mlxsw_core->lag.mapping);
mlxsw_ports_fini(mlxsw_core, reload);
if (!reload)
- devlink_resources_unregister(devlink, NULL);
+ devlink_resources_unregister(devlink);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
if (!reload)
devlink_free(devlink);
@@ -2108,7 +2261,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
reload_fail_deinit:
mlxsw_core_params_unregister(mlxsw_core);
- devlink_resources_unregister(devlink, NULL);
+ devlink_resources_unregister(devlink);
devlink_free(devlink);
}
EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
@@ -2130,7 +2283,7 @@ int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
EXPORT_SYMBOL(mlxsw_core_skb_transmit);
void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
- struct sk_buff *skb, u8 local_port)
+ struct sk_buff *skb, u16 local_port)
{
if (mlxsw_core->driver->ptp_transmitted)
mlxsw_core->driver->ptp_transmitted(mlxsw_core, skb,
@@ -2208,7 +2361,7 @@ mlxsw_core_rx_listener_state_set(struct mlxsw_core *mlxsw_core,
rxl_item->enabled = enabled;
}
-static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
+static void mlxsw_core_event_listener_func(struct sk_buff *skb, u16 local_port,
void *priv)
{
struct mlxsw_event_listener_item *event_listener_item = priv;
@@ -2641,7 +2794,7 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
{
struct mlxsw_rx_listener_item *rxl_item;
const struct mlxsw_rx_listener *rxl;
- u8 local_port;
+ u16 local_port;
bool found = false;
if (rx_info->is_lag) {
@@ -2699,7 +2852,7 @@ static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
}
void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
- u16 lag_id, u8 port_index, u8 local_port)
+ u16 lag_id, u8 port_index, u16 local_port)
{
int index = mlxsw_core_lag_mapping_index(mlxsw_core,
lag_id, port_index);
@@ -2708,8 +2861,8 @@ void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
-u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
- u16 lag_id, u8 port_index)
+u16 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
+ u16 lag_id, u8 port_index)
{
int index = mlxsw_core_lag_mapping_index(mlxsw_core,
lag_id, port_index);
@@ -2719,7 +2872,7 @@ u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
- u16 lag_id, u8 local_port)
+ u16 lag_id, u16 local_port)
{
int i;
@@ -2747,7 +2900,7 @@ u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_res_get);
-static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
+static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
enum devlink_port_flavour flavour,
u32 port_number, bool split,
u32 split_port_subnumber,
@@ -2778,7 +2931,7 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
return err;
}
-static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
+static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port)
{
struct mlxsw_core_port *mlxsw_core_port =
&mlxsw_core->ports[local_port];
@@ -2788,7 +2941,7 @@ static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
}
-int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
u32 port_number, bool split,
u32 split_port_subnumber,
bool splittable, u32 lanes,
@@ -2810,7 +2963,7 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
}
EXPORT_SYMBOL(mlxsw_core_port_init);
-void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
+void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port)
{
atomic_dec(&mlxsw_core->active_ports_count);
@@ -2845,7 +2998,7 @@ void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_cpu_port_fini);
-void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
+void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv, struct net_device *dev)
{
struct mlxsw_core_port *mlxsw_core_port =
@@ -2857,7 +3010,7 @@ void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
}
EXPORT_SYMBOL(mlxsw_core_port_eth_set);
-void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
+void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv)
{
struct mlxsw_core_port *mlxsw_core_port =
@@ -2869,7 +3022,7 @@ void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
}
EXPORT_SYMBOL(mlxsw_core_port_ib_set);
-void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
+void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv)
{
struct mlxsw_core_port *mlxsw_core_port =
@@ -2882,7 +3035,7 @@ void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
EXPORT_SYMBOL(mlxsw_core_port_clear);
enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
- u8 local_port)
+ u16 local_port)
{
struct mlxsw_core_port *mlxsw_core_port =
&mlxsw_core->ports[local_port];
@@ -2895,7 +3048,7 @@ EXPORT_SYMBOL(mlxsw_core_port_type_get);
struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
- u8 local_port)
+ u16 local_port)
{
struct mlxsw_core_port *mlxsw_core_port =
&mlxsw_core->ports[local_port];
@@ -2905,7 +3058,7 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
-bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u8 local_port)
+bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port)
{
const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
int i;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 12023a550007..f30bb8614e69 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -54,7 +54,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, bool reload);
struct mlxsw_tx_info {
- u8 local_port;
+ u16 local_port;
bool is_emad;
};
@@ -67,7 +67,7 @@ struct mlxsw_rx_md_info {
u16 tx_sys_port;
u16 tx_lag_id;
};
- u8 tx_lag_port_index; /* Valid when 'tx_port_is_lag' is set. */
+ u16 tx_lag_port_index; /* Valid when 'tx_port_is_lag' is set. */
u8 tx_tc;
u8 latency_valid:1,
tx_congestion_valid:1,
@@ -82,11 +82,11 @@ bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
- struct sk_buff *skb, u8 local_port);
+ struct sk_buff *skb, u16 local_port);
struct mlxsw_rx_listener {
- void (*func)(struct sk_buff *skb, u8 local_port, void *priv);
- u8 local_port;
+ void (*func)(struct sk_buff *skb, u16 local_port, void *priv);
+ u16 local_port;
u8 mirror_reason;
u16 trap_id;
};
@@ -209,7 +209,7 @@ struct mlxsw_rx_info {
u16 sys_port;
u16 lag_id;
} u;
- u8 lag_port_index;
+ u16 lag_port_index;
u8 mirror_reason;
int trap_id;
};
@@ -218,36 +218,36 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
struct mlxsw_rx_info *rx_info);
void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
- u16 lag_id, u8 port_index, u8 local_port);
-u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
- u16 lag_id, u8 port_index);
+ u16 lag_id, u8 port_index, u16 local_port);
+u16 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
+ u16 lag_id, u8 port_index);
void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
- u16 lag_id, u8 local_port);
+ u16 lag_id, u16 local_port);
void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port);
-int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
u32 port_number, bool split, u32 split_port_subnumber,
bool splittable, u32 lanes,
const unsigned char *switch_id,
unsigned char switch_id_len);
-void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port);
+void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port);
int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core,
void *port_driver_priv,
const unsigned char *switch_id,
unsigned char switch_id_len);
void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core);
-void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
+void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv, struct net_device *dev);
-void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
+void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv);
-void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
+void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv);
enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
- u8 local_port);
+ u16 local_port);
struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
- u8 local_port);
-bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u8 local_port);
+ u16 local_port);
+bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port);
struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
@@ -316,11 +316,11 @@ struct mlxsw_driver {
struct netlink_ext_ack *extack);
void (*fini)(struct mlxsw_core *mlxsw_core);
int (*basic_trap_groups_set)(struct mlxsw_core *mlxsw_core);
- int (*port_type_set)(struct mlxsw_core *mlxsw_core, u8 local_port,
+ int (*port_type_set)(struct mlxsw_core *mlxsw_core, u16 local_port,
enum devlink_port_type new_type);
- int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port,
+ int (*port_split)(struct mlxsw_core *mlxsw_core, u16 local_port,
unsigned int count, struct netlink_ext_ack *extack);
- int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port,
+ int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u16 local_port,
struct netlink_ext_ack *extack);
int (*sb_pool_get)(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index,
@@ -394,7 +394,7 @@ struct mlxsw_driver {
* is responsible for freeing the passed-in SKB.
*/
void (*ptp_transmitted)(struct mlxsw_core *mlxsw_core,
- struct sk_buff *skb, u8 local_port);
+ struct sk_buff *skb, u16 local_port);
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 78d9c0196f2b..77e82e6cf6e8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -113,7 +113,7 @@ static const struct rhashtable_params mlxsw_afa_set_ht_params = {
};
struct mlxsw_afa_fwd_entry_ht_key {
- u8 local_port;
+ u16 local_port;
};
struct mlxsw_afa_fwd_entry {
@@ -555,7 +555,7 @@ int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block)
EXPORT_SYMBOL(mlxsw_afa_block_terminate);
static struct mlxsw_afa_fwd_entry *
-mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port)
+mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u16 local_port)
{
struct mlxsw_afa_fwd_entry *fwd_entry;
int err;
@@ -598,7 +598,7 @@ static void mlxsw_afa_fwd_entry_destroy(struct mlxsw_afa *mlxsw_afa,
}
static struct mlxsw_afa_fwd_entry *
-mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u8 local_port)
+mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u16 local_port)
{
struct mlxsw_afa_fwd_entry_ht_key ht_key = {0};
struct mlxsw_afa_fwd_entry *fwd_entry;
@@ -647,7 +647,7 @@ mlxsw_afa_fwd_entry_ref_destructor(struct mlxsw_afa_block *block,
}
static struct mlxsw_afa_fwd_entry_ref *
-mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port)
+mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u16 local_port)
{
struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
struct mlxsw_afa_fwd_entry *fwd_entry;
@@ -1352,7 +1352,7 @@ EXPORT_SYMBOL(mlxsw_afa_block_append_trap_and_forward);
struct mlxsw_afa_mirror {
struct mlxsw_afa_resource resource;
int span_id;
- u8 local_in_port;
+ u16 local_in_port;
bool ingress;
};
@@ -1379,7 +1379,7 @@ mlxsw_afa_mirror_destructor(struct mlxsw_afa_block *block,
}
static struct mlxsw_afa_mirror *
-mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, u8 local_in_port,
+mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, u16 local_in_port,
const struct net_device *out_dev, bool ingress)
{
struct mlxsw_afa_mirror *mirror;
@@ -1423,7 +1423,7 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
}
int
-mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u8 local_in_port,
+mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u16 local_in_port,
const struct net_device *out_dev, bool ingress,
struct netlink_ext_ack *extack)
{
@@ -1663,7 +1663,7 @@ mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type,
}
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
- u8 local_port, bool in_port,
+ u16 local_port, bool in_port,
struct netlink_ext_ack *extack)
{
struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
@@ -2038,7 +2038,7 @@ static void mlxsw_afa_sampler_pack(char *payload, u8 mirror_agent, u32 rate)
struct mlxsw_afa_sampler {
struct mlxsw_afa_resource resource;
int span_id;
- u8 local_port;
+ u16 local_port;
bool ingress;
};
@@ -2061,7 +2061,7 @@ static void mlxsw_afa_sampler_destructor(struct mlxsw_afa_block *block,
}
static struct mlxsw_afa_sampler *
-mlxsw_afa_sampler_create(struct mlxsw_afa_block *block, u8 local_port,
+mlxsw_afa_sampler_create(struct mlxsw_afa_block *block, u16 local_port,
struct psample_group *psample_group, u32 rate,
u32 trunc_size, bool truncate, bool ingress,
struct netlink_ext_ack *extack)
@@ -2104,7 +2104,7 @@ mlxsw_afa_block_append_allocated_sampler(struct mlxsw_afa_block *block,
return 0;
}
-int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u8 local_port,
+int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u16 local_port,
struct psample_group *psample_group,
u32 rate, u32 trunc_size, bool truncate,
bool ingress,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index b65bf98eb5ab..16cbd6acbb01 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -17,24 +17,24 @@ struct mlxsw_afa_ops {
void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first);
int (*kvdl_set_activity_get)(void *priv, u32 kvdl_index,
bool *activity);
- int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port);
+ int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u16 local_port);
void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index);
int (*counter_index_get)(void *priv, unsigned int *p_counter_index);
void (*counter_index_put)(void *priv, unsigned int counter_index);
- int (*mirror_add)(void *priv, u8 local_in_port,
+ int (*mirror_add)(void *priv, u16 local_in_port,
const struct net_device *out_dev,
bool ingress, int *p_span_id);
- void (*mirror_del)(void *priv, u8 local_in_port, int span_id,
+ void (*mirror_del)(void *priv, u16 local_in_port, int span_id,
bool ingress);
int (*policer_add)(void *priv, u64 rate_bytes_ps, u32 burst,
u16 *p_policer_index,
struct netlink_ext_ack *extack);
void (*policer_del)(void *priv, u16 policer_index);
- int (*sampler_add)(void *priv, u8 local_port,
+ int (*sampler_add)(void *priv, u16 local_port,
struct psample_group *psample_group, u32 rate,
u32 trunc_size, bool truncate, bool ingress,
int *p_span_id, struct netlink_ext_ack *extack);
- void (*sampler_del)(void *priv, u8 local_port, int span_id,
+ void (*sampler_del)(void *priv, u16 local_port, int span_id,
bool ingress);
bool dummy_first_set;
};
@@ -62,12 +62,12 @@ int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);
int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
u16 trap_id);
int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
- u8 local_in_port,
+ u16 local_in_port,
const struct net_device *out_dev,
bool ingress,
struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
- u8 local_port, bool in_port,
+ u16 local_port, bool in_port,
struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
u16 vid, u8 pcp, u8 et,
@@ -98,7 +98,7 @@ int mlxsw_afa_block_append_police(struct mlxsw_afa_block *block,
u32 fa_index, u64 rate_bytes_ps, u32 burst,
u16 *p_policer_index,
struct netlink_ext_ack *extack);
-int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u8 local_port,
+int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u16 local_port,
struct psample_group *psample_group,
u32 rate, u32 trunc_size, bool truncate,
bool ingress,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index f1b09c2f9eda..bd1a51a0a540 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -32,8 +32,8 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
- MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_8_10, 0x18, 17, 3),
- MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_0_7, 0x18, 20, 8),
+ MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x18, 17, 3),
+ MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_LSB, 0x18, 20, 8),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index a47a17c04c62..3a037fe47211 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -33,8 +33,8 @@ enum mlxsw_afk_element {
MLXSW_AFK_ELEMENT_IP_TTL_,
MLXSW_AFK_ELEMENT_IP_ECN,
MLXSW_AFK_ELEMENT_IP_DSCP,
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10,
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
MLXSW_AFK_ELEMENT_MAX,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index ab70a873a01a..cfafbeb42586 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -367,6 +367,42 @@ mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val) \
__mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
}
+#define LOCAL_PORT_LSB_SIZE 8
+#define LOCAL_PORT_MSB_SIZE 2
+
+#define MLXSW_ITEM32_LP(_type, _cname, _offset1, _shift1, _offset2, _shift2) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, local_port) = { \
+ .offset = _offset1, \
+ .shift = _shift1, \
+ .size = {.bits = LOCAL_PORT_LSB_SIZE,}, \
+ .name = #_type "_" #_cname "_local_port", \
+}; \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, lp_msb) = { \
+ .offset = _offset2, \
+ .shift = _shift2, \
+ .size = {.bits = LOCAL_PORT_MSB_SIZE,}, \
+ .name = #_type "_" #_cname "_lp_msb", \
+}; \
+static inline u32 __maybe_unused \
+mlxsw_##_type##_##_cname##_local_port_get(const char *buf) \
+{ \
+ u32 local_port, lp_msb; \
+ \
+ local_port = __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, \
+ local_port), 0); \
+ lp_msb = __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, lp_msb), \
+ 0); \
+ return (lp_msb << LOCAL_PORT_LSB_SIZE) + local_port; \
+} \
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_local_port_set(char *buf, u32 val) \
+{ \
+ __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, local_port), 0, \
+ val & ((1 << LOCAL_PORT_LSB_SIZE) - 1)); \
+ __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, lp_msb), 0, \
+ val >> LOCAL_PORT_LSB_SIZE); \
+}
+
#define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
_step, _instepoffset, _norealshift) \
static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 5d4dfa5ddbb5..10d13f5f9c7d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -38,7 +38,7 @@ struct mlxsw_m {
struct mlxsw_m_port {
struct net_device *dev;
struct mlxsw_m *mlxsw_m;
- u8 local_port;
+ u16 local_port;
u8 module;
};
@@ -180,7 +180,7 @@ static const struct ethtool_ops mlxsw_m_port_ethtool_ops = {
};
static int
-mlxsw_m_port_module_info_get(struct mlxsw_m *mlxsw_m, u8 local_port,
+mlxsw_m_port_module_info_get(struct mlxsw_m *mlxsw_m, u16 local_port,
u8 *p_module, u8 *p_width)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
@@ -214,7 +214,7 @@ mlxsw_m_port_dev_addr_get(struct mlxsw_m_port *mlxsw_m_port)
}
static int
-mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module)
+mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u16 local_port, u8 module)
{
struct mlxsw_m_port *mlxsw_m_port;
struct net_device *dev;
@@ -277,7 +277,7 @@ err_alloc_etherdev:
return err;
}
-static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u8 local_port)
+static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u16 local_port)
{
struct mlxsw_m_port *mlxsw_m_port = mlxsw_m->ports[local_port];
@@ -288,7 +288,7 @@ static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u8 local_port)
mlxsw_core_port_fini(mlxsw_m->core, local_port);
}
-static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port,
+static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u16 local_port,
u8 *last_module)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index a15c95a10bae..f91dde4df152 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -285,6 +285,7 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
int tclass;
+ int lp;
int i;
int err;
@@ -292,9 +293,12 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
q->consumer_counter = 0;
tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC :
MLXSW_PCI_SDQ_CTL_TC;
+ lp = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE :
+ MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE;
/* Set CQ of same number of this SDQ. */
mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
+ mlxsw_cmd_mbox_sw2hw_dq_sdq_lp_set(mbox, lp);
mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass);
mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
@@ -1678,7 +1682,7 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
wqe = elem_info->elem;
mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
- mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
+ mlxsw_pci_wqe_lp_set(wqe, 0);
mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
@@ -1973,6 +1977,7 @@ int mlxsw_pci_driver_register(struct pci_driver *pci_driver)
{
pci_driver->probe = mlxsw_pci_probe;
pci_driver->remove = mlxsw_pci_remove;
+ pci_driver->shutdown = mlxsw_pci_remove;
return pci_register_driver(pci_driver);
}
EXPORT_SYMBOL(mlxsw_pci_driver_register);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
index 9899c1a2ea8f..cacc2f9fa1d4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -9,6 +9,7 @@
#define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84
#define PCI_DEVICE_ID_MELLANOX_SPECTRUM2 0xcf6c
#define PCI_DEVICE_ID_MELLANOX_SPECTRUM3 0xcf70
+#define PCI_DEVICE_ID_MELLANOX_SPECTRUM4 0xcf80
#if IS_ENABLED(CONFIG_MLXSW_PCI)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 8d420eb8ade2..24cc65018b41 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -69,52 +69,6 @@ MLXSW_REG_DEFINE(spad, MLXSW_REG_SPAD_ID, MLXSW_REG_SPAD_LEN);
*/
MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6);
-/* SMID - Switch Multicast ID
- * --------------------------
- * The MID record maps from a MID (Multicast ID), which is a unique identifier
- * of the multicast group within the stacking domain, into a list of local
- * ports into which the packet is replicated.
- */
-#define MLXSW_REG_SMID_ID 0x2007
-#define MLXSW_REG_SMID_LEN 0x240
-
-MLXSW_REG_DEFINE(smid, MLXSW_REG_SMID_ID, MLXSW_REG_SMID_LEN);
-
-/* reg_smid_swid
- * Switch partition ID.
- * Access: Index
- */
-MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8);
-
-/* reg_smid_mid
- * Multicast identifier - global identifier that represents the multicast group
- * across all devices.
- * Access: Index
- */
-MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16);
-
-/* reg_smid_port
- * Local port memebership (1 bit per port).
- * Access: RW
- */
-MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1);
-
-/* reg_smid_port_mask
- * Local port mask (1 bit per port).
- * Access: W
- */
-MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1);
-
-static inline void mlxsw_reg_smid_pack(char *payload, u16 mid,
- u8 port, bool set)
-{
- MLXSW_REG_ZERO(smid, payload);
- mlxsw_reg_smid_swid_set(payload, 0);
- mlxsw_reg_smid_mid_set(payload, mid);
- mlxsw_reg_smid_port_set(payload, port, set);
- mlxsw_reg_smid_port_mask_set(payload, port, 1);
-}
-
/* SSPR - Switch System Port Record Register
* -----------------------------------------
* Configures the system port to local port mapping.
@@ -141,7 +95,7 @@ MLXSW_ITEM32(reg, sspr, m, 0x00, 31, 1);
*
* Access: RW
*/
-MLXSW_ITEM32(reg, sspr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, sspr, 0x00, 16, 0x00, 12);
/* reg_sspr_sub_port
* Virtual port within the physical port.
@@ -161,7 +115,7 @@ MLXSW_ITEM32(reg, sspr, sub_port, 0x00, 8, 8);
*/
MLXSW_ITEM32(reg, sspr, system_port, 0x04, 0, 16);
-static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_sspr_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(sspr, payload);
mlxsw_reg_sspr_m_set(payload, 1);
@@ -407,7 +361,7 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
enum mlxsw_reg_sfd_rec_policy policy,
const char *mac, u16 fid_vid,
enum mlxsw_reg_sfd_rec_action action,
- u8 local_port)
+ u16 local_port)
{
mlxsw_reg_sfd_rec_pack(payload, rec_index,
MLXSW_REG_SFD_REC_TYPE_UNICAST, mac, action);
@@ -417,15 +371,6 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
}
-static inline void mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index,
- char *mac, u16 *p_fid_vid,
- u8 *p_local_port)
-{
- mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac);
- *p_fid_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index);
- *p_local_port = mlxsw_reg_sfd_uc_system_port_get(payload, rec_index);
-}
-
/* reg_sfd_uc_lag_sub_port
* LAG sub port.
* Must be 0 if multichannel VEPA is not enabled.
@@ -478,15 +423,6 @@ mlxsw_reg_sfd_uc_lag_pack(char *payload, int rec_index,
mlxsw_reg_sfd_uc_lag_lag_id_set(payload, rec_index, lag_id);
}
-static inline void mlxsw_reg_sfd_uc_lag_unpack(char *payload, int rec_index,
- char *mac, u16 *p_vid,
- u16 *p_lag_id)
-{
- mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac);
- *p_vid = mlxsw_reg_sfd_uc_lag_fid_vid_get(payload, rec_index);
- *p_lag_id = mlxsw_reg_sfd_uc_lag_lag_id_get(payload, rec_index);
-}
-
/* reg_sfd_mc_pgi
*
* Multicast port group index - index into the port group table.
@@ -568,19 +504,43 @@ static inline void
mlxsw_reg_sfd_uc_tunnel_pack(char *payload, int rec_index,
enum mlxsw_reg_sfd_rec_policy policy,
const char *mac, u16 fid,
- enum mlxsw_reg_sfd_rec_action action, u32 uip,
+ enum mlxsw_reg_sfd_rec_action action,
enum mlxsw_reg_sfd_uc_tunnel_protocol proto)
{
mlxsw_reg_sfd_rec_pack(payload, rec_index,
MLXSW_REG_SFD_REC_TYPE_UNICAST_TUNNEL, mac,
action);
mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
- mlxsw_reg_sfd_uc_tunnel_uip_msb_set(payload, rec_index, uip >> 24);
- mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip);
mlxsw_reg_sfd_uc_tunnel_fid_set(payload, rec_index, fid);
mlxsw_reg_sfd_uc_tunnel_protocol_set(payload, rec_index, proto);
}
+static inline void
+mlxsw_reg_sfd_uc_tunnel_pack4(char *payload, int rec_index,
+ enum mlxsw_reg_sfd_rec_policy policy,
+ const char *mac, u16 fid,
+ enum mlxsw_reg_sfd_rec_action action, u32 uip)
+{
+ mlxsw_reg_sfd_uc_tunnel_uip_msb_set(payload, rec_index, uip >> 24);
+ mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip);
+ mlxsw_reg_sfd_uc_tunnel_pack(payload, rec_index, policy, mac, fid,
+ action,
+ MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4);
+}
+
+static inline void
+mlxsw_reg_sfd_uc_tunnel_pack6(char *payload, int rec_index, const char *mac,
+ u16 fid, enum mlxsw_reg_sfd_rec_action action,
+ u32 uip_ptr)
+{
+ mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip_ptr);
+ /* Only static policy is supported for IPv6 unicast tunnel entry. */
+ mlxsw_reg_sfd_uc_tunnel_pack(payload, rec_index,
+ MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY,
+ mac, fid, action,
+ MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV6);
+}
+
enum mlxsw_reg_tunnel_port {
MLXSW_REG_TUNNEL_PORT_NVE,
MLXSW_REG_TUNNEL_PORT_VPLS,
@@ -692,7 +652,7 @@ MLXSW_ITEM32_INDEXED(reg, sfn, mac_system_port, MLXSW_REG_SFN_BASE_LEN, 0, 16,
static inline void mlxsw_reg_sfn_mac_unpack(char *payload, int rec_index,
char *mac, u16 *p_vid,
- u8 *p_local_port)
+ u16 *p_local_port)
{
mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac);
*p_vid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index);
@@ -781,7 +741,7 @@ MLXSW_REG_DEFINE(spms, MLXSW_REG_SPMS_ID, MLXSW_REG_SPMS_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, spms, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spms, 0x00, 16, 0x00, 12);
enum mlxsw_reg_spms_state {
MLXSW_REG_SPMS_STATE_NO_CHANGE,
@@ -800,7 +760,7 @@ enum mlxsw_reg_spms_state {
*/
MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2);
-static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_spms_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(spms, payload);
mlxsw_reg_spms_local_port_set(payload, local_port);
@@ -833,7 +793,7 @@ MLXSW_ITEM32(reg, spvid, tport, 0x00, 24, 1);
* When tport = 1: Tunnel port.
* Access: Index
*/
-MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spvid, 0x00, 16, 0x00, 12);
/* reg_spvid_sub_port
* Virtual port within the physical port.
@@ -868,7 +828,7 @@ MLXSW_ITEM32(reg, spvid, et_vlan, 0x04, 16, 2);
*/
MLXSW_ITEM32(reg, spvid, pvid, 0x04, 0, 12);
-static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid,
+static inline void mlxsw_reg_spvid_pack(char *payload, u16 local_port, u16 pvid,
u8 et_vlan)
{
MLXSW_REG_ZERO(spvid, payload);
@@ -911,7 +871,7 @@ MLXSW_ITEM32(reg, spvm, pte, 0x00, 30, 1);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, spvm, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spvm, 0x00, 16, 0x00, 12);
/* reg_spvm_sub_port
* Virtual port within the physical port.
@@ -959,7 +919,7 @@ MLXSW_ITEM32_INDEXED(reg, spvm, rec_vid,
MLXSW_REG_SPVM_BASE_LEN, 0, 12,
MLXSW_REG_SPVM_REC_LEN, 0, false);
-static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_spvm_pack(char *payload, u16 local_port,
u16 vid_begin, u16 vid_end,
bool is_member, bool untagged)
{
@@ -994,7 +954,7 @@ MLXSW_REG_DEFINE(spaft, MLXSW_REG_SPAFT_ID, MLXSW_REG_SPAFT_LEN);
*
* Note: CPU port is not supported (all tag types are allowed).
*/
-MLXSW_ITEM32(reg, spaft, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spaft, 0x00, 16, 0x00, 12);
/* reg_spaft_sub_port
* Virtual port within the physical port.
@@ -1021,7 +981,7 @@ MLXSW_ITEM32(reg, spaft, allow_prio_tagged, 0x04, 30, 1);
*/
MLXSW_ITEM32(reg, spaft, allow_tagged, 0x04, 29, 1);
-static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_spaft_pack(char *payload, u16 local_port,
bool allow_untagged)
{
MLXSW_REG_ZERO(spaft, payload);
@@ -1126,76 +1086,6 @@ mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type,
mlxsw_reg_sfgc_mid_set(payload, MLXSW_PORT_MID);
}
-/* SFTR - Switch Flooding Table Register
- * -------------------------------------
- * The switch flooding table is used for flooding packet replication. The table
- * defines a bit mask of ports for packet replication.
- */
-#define MLXSW_REG_SFTR_ID 0x2012
-#define MLXSW_REG_SFTR_LEN 0x420
-
-MLXSW_REG_DEFINE(sftr, MLXSW_REG_SFTR_ID, MLXSW_REG_SFTR_LEN);
-
-/* reg_sftr_swid
- * Switch partition ID with which to associate the port.
- * Access: Index
- */
-MLXSW_ITEM32(reg, sftr, swid, 0x00, 24, 8);
-
-/* reg_sftr_flood_table
- * Flooding table index to associate with the specific type on the specific
- * switch partition.
- * Access: Index
- */
-MLXSW_ITEM32(reg, sftr, flood_table, 0x00, 16, 6);
-
-/* reg_sftr_index
- * Index. Used as an index into the Flooding Table in case the table is
- * configured to use VID / FID or FID Offset.
- * Access: Index
- */
-MLXSW_ITEM32(reg, sftr, index, 0x00, 0, 16);
-
-/* reg_sftr_table_type
- * See mlxsw_flood_table_type
- * Access: RW
- */
-MLXSW_ITEM32(reg, sftr, table_type, 0x04, 16, 3);
-
-/* reg_sftr_range
- * Range of entries to update
- * Access: Index
- */
-MLXSW_ITEM32(reg, sftr, range, 0x04, 0, 16);
-
-/* reg_sftr_port
- * Local port membership (1 bit per port).
- * Access: RW
- */
-MLXSW_ITEM_BIT_ARRAY(reg, sftr, port, 0x20, 0x20, 1);
-
-/* reg_sftr_cpu_port_mask
- * CPU port mask (1 bit per port).
- * Access: W
- */
-MLXSW_ITEM_BIT_ARRAY(reg, sftr, port_mask, 0x220, 0x20, 1);
-
-static inline void mlxsw_reg_sftr_pack(char *payload,
- unsigned int flood_table,
- unsigned int index,
- enum mlxsw_flood_table_type table_type,
- unsigned int range, u8 port, bool set)
-{
- MLXSW_REG_ZERO(sftr, payload);
- mlxsw_reg_sftr_swid_set(payload, 0);
- mlxsw_reg_sftr_flood_table_set(payload, flood_table);
- mlxsw_reg_sftr_index_set(payload, index);
- mlxsw_reg_sftr_table_type_set(payload, table_type);
- mlxsw_reg_sftr_range_set(payload, range);
- mlxsw_reg_sftr_port_set(payload, port, set);
- mlxsw_reg_sftr_port_mask_set(payload, port, 1);
-}
-
/* SFDF - Switch Filtering DB Flush
* --------------------------------
* The switch filtering DB flush register is used to flush the FDB.
@@ -1347,7 +1237,7 @@ MLXSW_ITEM32(reg, sldr, num_ports, 0x04, 24, 8);
MLXSW_ITEM32_INDEXED(reg, sldr, system_port, 0x08, 0, 16, 4, 0, false);
static inline void mlxsw_reg_sldr_lag_add_port_pack(char *payload, u8 lag_id,
- u8 local_port)
+ u16 local_port)
{
MLXSW_REG_ZERO(sldr, payload);
mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_ADD_PORT_LIST);
@@ -1357,7 +1247,7 @@ static inline void mlxsw_reg_sldr_lag_add_port_pack(char *payload, u8 lag_id,
}
static inline void mlxsw_reg_sldr_lag_remove_port_pack(char *payload, u8 lag_id,
- u8 local_port)
+ u16 local_port)
{
MLXSW_REG_ZERO(sldr, payload);
mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_REMOVE_PORT_LIST);
@@ -1397,7 +1287,7 @@ MLXSW_ITEM32(reg, slcr, pp, 0x00, 24, 1);
* Reserved when pp = Global Configuration
* Access: Index
*/
-MLXSW_ITEM32(reg, slcr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, slcr, 0x00, 16, 0x00, 12);
enum mlxsw_reg_slcr_type {
MLXSW_REG_SLCR_TYPE_CRC, /* default */
@@ -1515,7 +1405,7 @@ MLXSW_ITEM32(reg, slcor, col, 0x00, 30, 2);
* Not supported for CPU port
* Access: Index
*/
-MLXSW_ITEM32(reg, slcor, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, slcor, 0x00, 16, 0x00, 12);
/* reg_slcor_lag_id
* LAG Identifier. Index into the LAG descriptor table.
@@ -1531,7 +1421,7 @@ MLXSW_ITEM32(reg, slcor, lag_id, 0x00, 0, 10);
MLXSW_ITEM32(reg, slcor, port_index, 0x04, 0, 10);
static inline void mlxsw_reg_slcor_pack(char *payload,
- u8 local_port, u16 lag_id,
+ u16 local_port, u16 lag_id,
enum mlxsw_reg_slcor_col col)
{
MLXSW_REG_ZERO(slcor, payload);
@@ -1541,7 +1431,7 @@ static inline void mlxsw_reg_slcor_pack(char *payload,
}
static inline void mlxsw_reg_slcor_port_add_pack(char *payload,
- u8 local_port, u16 lag_id,
+ u16 local_port, u16 lag_id,
u8 port_index)
{
mlxsw_reg_slcor_pack(payload, local_port, lag_id,
@@ -1550,21 +1440,21 @@ static inline void mlxsw_reg_slcor_port_add_pack(char *payload,
}
static inline void mlxsw_reg_slcor_port_remove_pack(char *payload,
- u8 local_port, u16 lag_id)
+ u16 local_port, u16 lag_id)
{
mlxsw_reg_slcor_pack(payload, local_port, lag_id,
MLXSW_REG_SLCOR_COL_LAG_REMOVE_PORT);
}
static inline void mlxsw_reg_slcor_col_enable_pack(char *payload,
- u8 local_port, u16 lag_id)
+ u16 local_port, u16 lag_id)
{
mlxsw_reg_slcor_pack(payload, local_port, lag_id,
MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED);
}
static inline void mlxsw_reg_slcor_col_disable_pack(char *payload,
- u8 local_port, u16 lag_id)
+ u16 local_port, u16 lag_id)
{
mlxsw_reg_slcor_pack(payload, local_port, lag_id,
MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED);
@@ -1583,7 +1473,7 @@ MLXSW_REG_DEFINE(spmlr, MLXSW_REG_SPMLR_ID, MLXSW_REG_SPMLR_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, spmlr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spmlr, 0x00, 16, 0x00, 12);
/* reg_spmlr_sub_port
* Virtual port within the physical port.
@@ -1611,7 +1501,7 @@ enum mlxsw_reg_spmlr_learn_mode {
*/
MLXSW_ITEM32(reg, spmlr, learn_mode, 0x04, 30, 2);
-static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_spmlr_pack(char *payload, u16 local_port,
enum mlxsw_reg_spmlr_learn_mode mode)
{
MLXSW_REG_ZERO(spmlr, payload);
@@ -1642,7 +1532,7 @@ MLXSW_ITEM32(reg, svfa, swid, 0x00, 24, 8);
*
* Note: Reserved for 802.1Q FIDs.
*/
-MLXSW_ITEM32(reg, svfa, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, svfa, 0x00, 16, 0x00, 12);
enum mlxsw_reg_svfa_mt {
MLXSW_REG_SVFA_MT_VID_TO_FID,
@@ -1696,7 +1586,7 @@ MLXSW_ITEM32(reg, svfa, counter_set_type, 0x08, 24, 8);
*/
MLXSW_ITEM32(reg, svfa, counter_index, 0x08, 0, 24);
-static inline void mlxsw_reg_svfa_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_svfa_pack(char *payload, u16 local_port,
enum mlxsw_reg_svfa_mt mt, bool valid,
u16 fid, u16 vid)
{
@@ -1733,7 +1623,7 @@ MLXSW_ITEM32(reg, spvtr, tport, 0x00, 24, 1);
* When tport = 1: tunnel port.
* Access: Index
*/
-MLXSW_ITEM32(reg, spvtr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spvtr, 0x00, 16, 0x00, 12);
/* reg_spvtr_ippe
* Ingress Port Prio Mode Update Enable.
@@ -1803,7 +1693,7 @@ enum mlxsw_reg_spvtr_epvid_mode {
MLXSW_ITEM32(reg, spvtr, epvid_mode, 0x04, 0, 4);
static inline void mlxsw_reg_spvtr_pack(char *payload, bool tport,
- u8 local_port,
+ u16 local_port,
enum mlxsw_reg_spvtr_ipvid_mode ipvid_mode)
{
MLXSW_REG_ZERO(spvtr, payload);
@@ -1828,7 +1718,7 @@ MLXSW_REG_DEFINE(svpe, MLXSW_REG_SVPE_ID, MLXSW_REG_SVPE_LEN);
*
* Note: CPU port is not supported (uses VLAN mode only).
*/
-MLXSW_ITEM32(reg, svpe, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, svpe, 0x00, 16, 0x00, 12);
/* reg_svpe_vp_en
* Virtual port enable.
@@ -1838,7 +1728,7 @@ MLXSW_ITEM32(reg, svpe, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, svpe, vp_en, 0x00, 8, 1);
-static inline void mlxsw_reg_svpe_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_svpe_pack(char *payload, u16 local_port,
bool enable)
{
MLXSW_REG_ZERO(svpe, payload);
@@ -1948,7 +1838,7 @@ MLXSW_REG_DEFINE(spvmlr, MLXSW_REG_SPVMLR_ID, MLXSW_REG_SPVMLR_LEN);
*
* Note: CPU port is not supported.
*/
-MLXSW_ITEM32(reg, spvmlr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spvmlr, 0x00, 16, 0x00, 12);
/* reg_spvmlr_num_rec
* Number of records to update.
@@ -1971,7 +1861,7 @@ MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_learn_enable, MLXSW_REG_SPVMLR_BASE_LEN,
MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_vid, MLXSW_REG_SPVMLR_BASE_LEN, 0, 12,
MLXSW_REG_SPVMLR_REC_LEN, 0x00, false);
-static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_spvmlr_pack(char *payload, u16 local_port,
u16 vid_begin, u16 vid_end,
bool learn_enable)
{
@@ -2009,7 +1899,7 @@ MLXSW_REG_DEFINE(spvc, MLXSW_REG_SPVC_ID, MLXSW_REG_SPVC_LEN);
* through Rx port i and a Tx port j then port i and port j must have the
* same configuration.
*/
-MLXSW_ITEM32(reg, spvc, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spvc, 0x00, 16, 0x00, 12);
/* reg_spvc_inner_et2
* Vlan Tag1 EtherType2 enable.
@@ -2074,7 +1964,7 @@ MLXSW_ITEM32(reg, spvc, inner_et0, 0x08, 1, 1);
*/
MLXSW_ITEM32(reg, spvc, et0, 0x08, 0, 1);
-static inline void mlxsw_reg_spvc_pack(char *payload, u8 local_port, bool et1,
+static inline void mlxsw_reg_spvc_pack(char *payload, u16 local_port, bool et1,
bool et0)
{
MLXSW_REG_ZERO(spvc, payload);
@@ -2104,7 +1994,7 @@ MLXSW_REG_DEFINE(spevet, MLXSW_REG_SPEVET_ID, MLXSW_REG_SPEVET_LEN);
* Not supported to CPU port.
* Access: Index
*/
-MLXSW_ITEM32(reg, spevet, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spevet, 0x00, 16, 0x00, 12);
/* reg_spevet_et_vlan
* Egress EtherType VLAN to push when SPVID.egr_et_set field set for the packet:
@@ -2115,7 +2005,7 @@ MLXSW_ITEM32(reg, spevet, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, spevet, et_vlan, 0x04, 16, 2);
-static inline void mlxsw_reg_spevet_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_spevet_pack(char *payload, u16 local_port,
u8 et_vlan)
{
MLXSW_REG_ZERO(spevet, payload);
@@ -2123,6 +2013,122 @@ static inline void mlxsw_reg_spevet_pack(char *payload, u8 local_port,
mlxsw_reg_spevet_et_vlan_set(payload, et_vlan);
}
+/* SFTR-V2 - Switch Flooding Table Version 2 Register
+ * --------------------------------------------------
+ * The switch flooding table is used for flooding packet replication. The table
+ * defines a bit mask of ports for packet replication.
+ */
+#define MLXSW_REG_SFTR2_ID 0x202F
+#define MLXSW_REG_SFTR2_LEN 0x120
+
+MLXSW_REG_DEFINE(sftr2, MLXSW_REG_SFTR2_ID, MLXSW_REG_SFTR2_LEN);
+
+/* reg_sftr2_swid
+ * Switch partition ID with which to associate the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr2, swid, 0x00, 24, 8);
+
+/* reg_sftr2_flood_table
+ * Flooding table index to associate with the specific type on the specific
+ * switch partition.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr2, flood_table, 0x00, 16, 6);
+
+/* reg_sftr2_index
+ * Index. Used as an index into the Flooding Table in case the table is
+ * configured to use VID / FID or FID Offset.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr2, index, 0x00, 0, 16);
+
+/* reg_sftr2_table_type
+ * See mlxsw_flood_table_type
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sftr2, table_type, 0x04, 16, 3);
+
+/* reg_sftr2_range
+ * Range of entries to update
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr2, range, 0x04, 0, 16);
+
+/* reg_sftr2_port
+ * Local port membership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr2, port, 0x20, 0x80, 1);
+
+/* reg_sftr2_port_mask
+ * Local port mask (1 bit per port).
+ * Access: WO
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr2, port_mask, 0xA0, 0x80, 1);
+
+static inline void mlxsw_reg_sftr2_pack(char *payload,
+ unsigned int flood_table,
+ unsigned int index,
+ enum mlxsw_flood_table_type table_type,
+ unsigned int range, u16 port, bool set)
+{
+ MLXSW_REG_ZERO(sftr2, payload);
+ mlxsw_reg_sftr2_swid_set(payload, 0);
+ mlxsw_reg_sftr2_flood_table_set(payload, flood_table);
+ mlxsw_reg_sftr2_index_set(payload, index);
+ mlxsw_reg_sftr2_table_type_set(payload, table_type);
+ mlxsw_reg_sftr2_range_set(payload, range);
+ mlxsw_reg_sftr2_port_set(payload, port, set);
+ mlxsw_reg_sftr2_port_mask_set(payload, port, 1);
+}
+
+/* SMID-V2 - Switch Multicast ID Version 2 Register
+ * ------------------------------------------------
+ * The MID record maps from a MID (Multicast ID), which is a unique identifier
+ * of the multicast group within the stacking domain, into a list of local
+ * ports into which the packet is replicated.
+ */
+#define MLXSW_REG_SMID2_ID 0x2034
+#define MLXSW_REG_SMID2_LEN 0x120
+
+MLXSW_REG_DEFINE(smid2, MLXSW_REG_SMID2_ID, MLXSW_REG_SMID2_LEN);
+
+/* reg_smid2_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid2, swid, 0x00, 24, 8);
+
+/* reg_smid2_mid
+ * Multicast identifier - global identifier that represents the multicast group
+ * across all devices.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid2, mid, 0x00, 0, 16);
+
+/* reg_smid2_port
+ * Local port memebership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid2, port, 0x20, 0x80, 1);
+
+/* reg_smid2_port_mask
+ * Local port mask (1 bit per port).
+ * Access: WO
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid2, port_mask, 0xA0, 0x80, 1);
+
+static inline void mlxsw_reg_smid2_pack(char *payload, u16 mid, u16 port,
+ bool set)
+{
+ MLXSW_REG_ZERO(smid2, payload);
+ mlxsw_reg_smid2_swid_set(payload, 0);
+ mlxsw_reg_smid2_mid_set(payload, mid);
+ mlxsw_reg_smid2_port_set(payload, port, set);
+ mlxsw_reg_smid2_port_mask_set(payload, port, 1);
+}
+
/* CWTP - Congetion WRED ECN TClass Profile
* ----------------------------------------
* Configures the profiles for queues of egress port and traffic class
@@ -2139,7 +2145,7 @@ MLXSW_REG_DEFINE(cwtp, MLXSW_REG_CWTP_ID, MLXSW_REG_CWTP_LEN);
* Not supported for CPU port
* Access: Index
*/
-MLXSW_ITEM32(reg, cwtp, local_port, 0, 16, 8);
+MLXSW_ITEM32_LP(reg, cwtp, 0x00, 16, 0x00, 12);
/* reg_cwtp_traffic_class
* Traffic Class to configure
@@ -2173,7 +2179,7 @@ MLXSW_ITEM32_INDEXED(reg, cwtp, profile_max, MLXSW_REG_CWTP_BASE_LEN,
#define MLXSW_REG_CWTP_MAX_PROFILE 2
#define MLXSW_REG_CWTP_DEFAULT_PROFILE 1
-static inline void mlxsw_reg_cwtp_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_cwtp_pack(char *payload, u16 local_port,
u8 traffic_class)
{
int i;
@@ -2217,7 +2223,7 @@ MLXSW_REG_DEFINE(cwtpm, MLXSW_REG_CWTPM_ID, MLXSW_REG_CWTPM_LEN);
* Not supported for CPU port
* Access: Index
*/
-MLXSW_ITEM32(reg, cwtpm, local_port, 0, 16, 8);
+MLXSW_ITEM32_LP(reg, cwtpm, 0x00, 16, 0x00, 12);
/* reg_cwtpm_traffic_class
* Traffic Class to configure
@@ -2291,7 +2297,7 @@ MLXSW_ITEM32(reg, cwtpm, ntcp_r, 64, 0, 2);
#define MLXSW_REG_CWTPM_RESET_PROFILE 0
-static inline void mlxsw_reg_cwtpm_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_cwtpm_pack(char *payload, u16 local_port,
u8 traffic_class, u8 profile,
bool wred, bool ecn)
{
@@ -2363,7 +2369,7 @@ MLXSW_ITEM32(reg, ppbt, op, 0x00, 28, 3);
* Local port. Not including CPU port.
* Access: Index
*/
-MLXSW_ITEM32(reg, ppbt, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, ppbt, 0x00, 16, 0x00, 12);
/* reg_ppbt_g
* group - When set, the binding is of an ACL group. When cleared,
@@ -2382,7 +2388,7 @@ MLXSW_ITEM32(reg, ppbt, acl_info, 0x10, 0, 16);
static inline void mlxsw_reg_ppbt_pack(char *payload, enum mlxsw_reg_pxbt_e e,
enum mlxsw_reg_pxbt_op op,
- u8 local_port, u16 acl_info)
+ u16 local_port, u16 acl_info)
{
MLXSW_REG_ZERO(ppbt, payload);
mlxsw_reg_ppbt_e_set(payload, e);
@@ -3513,7 +3519,7 @@ MLXSW_REG_DEFINE(qpts, MLXSW_REG_QPTS_ID, MLXSW_REG_QPTS_LEN);
*
* Note: CPU port is supported.
*/
-MLXSW_ITEM32(reg, qpts, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qpts, 0x00, 16, 0x00, 12);
enum mlxsw_reg_qpts_trust_state {
MLXSW_REG_QPTS_TRUST_STATE_PCP = 1,
@@ -3526,7 +3532,7 @@ enum mlxsw_reg_qpts_trust_state {
*/
MLXSW_ITEM32(reg, qpts, trust_state, 0x04, 0, 3);
-static inline void mlxsw_reg_qpts_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_qpts_pack(char *payload, u16 local_port,
enum mlxsw_reg_qpts_trust_state ts)
{
MLXSW_REG_ZERO(qpts, payload);
@@ -3717,7 +3723,7 @@ MLXSW_REG_DEFINE(qtct, MLXSW_REG_QTCT_ID, MLXSW_REG_QTCT_LEN);
*
* Note: CPU port is not supported.
*/
-MLXSW_ITEM32(reg, qtct, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qtct, 0x00, 16, 0x00, 12);
/* reg_qtct_sub_port
* Virtual port within the physical port.
@@ -3742,7 +3748,7 @@ MLXSW_ITEM32(reg, qtct, switch_prio, 0x00, 0, 4);
*/
MLXSW_ITEM32(reg, qtct, tclass, 0x04, 0, 4);
-static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_qtct_pack(char *payload, u16 local_port,
u8 switch_prio, u8 tclass)
{
MLXSW_REG_ZERO(qtct, payload);
@@ -3766,7 +3772,7 @@ MLXSW_REG_DEFINE(qeec, MLXSW_REG_QEEC_ID, MLXSW_REG_QEEC_LEN);
*
* Note: CPU port is supported.
*/
-MLXSW_ITEM32(reg, qeec, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qeec, 0x00, 16, 0x00, 12);
enum mlxsw_reg_qeec_hr {
MLXSW_REG_QEEC_HR_PORT,
@@ -3908,8 +3914,9 @@ MLXSW_ITEM32(reg, qeec, max_shaper_bs, 0x1C, 0, 6);
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1 5
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2 11
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 11
+#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4 11
-static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_qeec_pack(char *payload, u16 local_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
u8 next_index)
{
@@ -3920,7 +3927,7 @@ static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
mlxsw_reg_qeec_next_element_index_set(payload, next_index);
}
-static inline void mlxsw_reg_qeec_ptps_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_qeec_ptps_pack(char *payload, u16 local_port,
bool ptps)
{
MLXSW_REG_ZERO(qeec, payload);
@@ -3944,7 +3951,7 @@ MLXSW_REG_DEFINE(qrwe, MLXSW_REG_QRWE_ID, MLXSW_REG_QRWE_LEN);
*
* Note: CPU port is supported. No support for router port.
*/
-MLXSW_ITEM32(reg, qrwe, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qrwe, 0x00, 16, 0x00, 12);
/* reg_qrwe_dscp
* Whether to enable DSCP rewrite (default is 0, don't rewrite).
@@ -3958,7 +3965,7 @@ MLXSW_ITEM32(reg, qrwe, dscp, 0x04, 1, 1);
*/
MLXSW_ITEM32(reg, qrwe, pcp, 0x04, 0, 1);
-static inline void mlxsw_reg_qrwe_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_qrwe_pack(char *payload, u16 local_port,
bool rewrite_pcp, bool rewrite_dscp)
{
MLXSW_REG_ZERO(qrwe, payload);
@@ -3985,7 +3992,7 @@ MLXSW_REG_DEFINE(qpdsm, MLXSW_REG_QPDSM_ID, MLXSW_REG_QPDSM_LEN);
* Local Port. Supported for data packets from CPU port.
* Access: Index
*/
-MLXSW_ITEM32(reg, qpdsm, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qpdsm, 0x00, 16, 0x00, 12);
/* reg_qpdsm_prio_entry_color0_e
* Enable update of the entry for color 0 and a given port.
@@ -4038,7 +4045,7 @@ MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color2_dscp,
MLXSW_REG_QPDSM_BASE_LEN, 8, 6,
MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
-static inline void mlxsw_reg_qpdsm_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_qpdsm_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(qpdsm, payload);
mlxsw_reg_qpdsm_local_port_set(payload, local_port);
@@ -4071,7 +4078,7 @@ MLXSW_REG_DEFINE(qpdp, MLXSW_REG_QPDP_ID, MLXSW_REG_QPDP_LEN);
* Local Port. Supported for data packets from CPU port.
* Access: Index
*/
-MLXSW_ITEM32(reg, qpdp, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qpdp, 0x00, 16, 0x00, 12);
/* reg_qpdp_switch_prio
* Default port Switch Priority (default 0)
@@ -4079,7 +4086,7 @@ MLXSW_ITEM32(reg, qpdp, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, qpdp, switch_prio, 0x04, 0, 4);
-static inline void mlxsw_reg_qpdp_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_qpdp_pack(char *payload, u16 local_port,
u8 switch_prio)
{
MLXSW_REG_ZERO(qpdp, payload);
@@ -4106,7 +4113,7 @@ MLXSW_REG_DEFINE(qpdpm, MLXSW_REG_QPDPM_ID, MLXSW_REG_QPDPM_LEN);
* Local Port. Supported for data packets from CPU port.
* Access: Index
*/
-MLXSW_ITEM32(reg, qpdpm, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qpdpm, 0x00, 16, 0x00, 12);
/* reg_qpdpm_dscp_e
* Enable update of the specific entry. When cleared, the switch_prio and color
@@ -4125,7 +4132,7 @@ MLXSW_ITEM16_INDEXED(reg, qpdpm, dscp_entry_prio,
MLXSW_REG_QPDPM_BASE_LEN, 0, 4,
MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
-static inline void mlxsw_reg_qpdpm_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_qpdpm_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(qpdpm, payload);
mlxsw_reg_qpdpm_local_port_set(payload, local_port);
@@ -4157,7 +4164,7 @@ MLXSW_REG_DEFINE(qtctm, MLXSW_REG_QTCTM_ID, MLXSW_REG_QTCTM_LEN);
* No support for CPU port.
* Access: Index
*/
-MLXSW_ITEM32(reg, qtctm, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qtctm, 0x00, 16, 0x00, 12);
/* reg_qtctm_mc
* Multicast Mode
@@ -4167,7 +4174,7 @@ MLXSW_ITEM32(reg, qtctm, local_port, 0x00, 16, 8);
MLXSW_ITEM32(reg, qtctm, mc, 0x04, 0, 1);
static inline void
-mlxsw_reg_qtctm_pack(char *payload, u8 local_port, bool mc)
+mlxsw_reg_qtctm_pack(char *payload, u16 local_port, bool mc)
{
MLXSW_REG_ZERO(qtctm, payload);
mlxsw_reg_qtctm_local_port_set(payload, local_port);
@@ -4300,7 +4307,7 @@ MLXSW_ITEM32(reg, pmlp, rxtx, 0x00, 31, 1);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pmlp, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pmlp, 0x00, 16, 0x00, 12);
/* reg_pmlp_width
* 0 - Unmap local port.
@@ -4331,7 +4338,7 @@ MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 4, 0x04, 0x00, false);
*/
MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 4, 0x04, 0x00, false);
-static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_pmlp_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(pmlp, payload);
mlxsw_reg_pmlp_local_port_set(payload, local_port);
@@ -4350,7 +4357,7 @@ MLXSW_REG_DEFINE(pmtu, MLXSW_REG_PMTU_ID, MLXSW_REG_PMTU_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pmtu, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pmtu, 0x00, 16, 0x00, 12);
/* reg_pmtu_max_mtu
* Maximum MTU.
@@ -4378,7 +4385,7 @@ MLXSW_ITEM32(reg, pmtu, admin_mtu, 0x08, 16, 16);
*/
MLXSW_ITEM32(reg, pmtu, oper_mtu, 0x0C, 16, 16);
-static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_pmtu_pack(char *payload, u16 local_port,
u16 new_mtu)
{
MLXSW_REG_ZERO(pmtu, payload);
@@ -4412,7 +4419,7 @@ MLXSW_ITEM32(reg, ptys, an_disable_admin, 0x00, 30, 1);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, ptys, 0x00, 16, 0x00, 12);
#define MLXSW_REG_PTYS_PROTO_MASK_IB BIT(0)
#define MLXSW_REG_PTYS_PROTO_MASK_ETH BIT(2)
@@ -4572,7 +4579,7 @@ enum mlxsw_reg_ptys_connector_type {
*/
MLXSW_ITEM32(reg, ptys, connector_type, 0x2C, 0, 4);
-static inline void mlxsw_reg_ptys_eth_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_ptys_eth_pack(char *payload, u16 local_port,
u32 proto_admin, bool autoneg)
{
MLXSW_REG_ZERO(ptys, payload);
@@ -4582,7 +4589,7 @@ static inline void mlxsw_reg_ptys_eth_pack(char *payload, u8 local_port,
mlxsw_reg_ptys_an_disable_admin_set(payload, !autoneg);
}
-static inline void mlxsw_reg_ptys_ext_eth_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_ptys_ext_eth_pack(char *payload, u16 local_port,
u32 proto_admin, bool autoneg)
{
MLXSW_REG_ZERO(ptys, payload);
@@ -4624,7 +4631,7 @@ static inline void mlxsw_reg_ptys_ext_eth_unpack(char *payload,
mlxsw_reg_ptys_ext_eth_proto_oper_get(payload);
}
-static inline void mlxsw_reg_ptys_ib_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_ptys_ib_pack(char *payload, u16 local_port,
u16 proto_admin, u16 link_width)
{
MLXSW_REG_ZERO(ptys, payload);
@@ -4672,7 +4679,7 @@ MLXSW_ITEM32(reg, ppad, single_base_mac, 0x00, 28, 1);
* port number, if single_base_mac = 0 then local_port is reserved
* Access: RW
*/
-MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, ppad, 0x00, 16, 0x00, 24);
/* reg_ppad_mac
* If single_base_mac = 0 - base MAC address, mac[7:0] is reserved.
@@ -4682,7 +4689,7 @@ MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8);
MLXSW_ITEM_BUF(reg, ppad, mac, 0x02, 6);
static inline void mlxsw_reg_ppad_pack(char *payload, bool single_base_mac,
- u8 local_port)
+ u16 local_port)
{
MLXSW_REG_ZERO(ppad, payload);
mlxsw_reg_ppad_single_base_mac_set(payload, !!single_base_mac);
@@ -4711,7 +4718,7 @@ MLXSW_ITEM32(reg, paos, swid, 0x00, 24, 8);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, paos, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, paos, 0x00, 16, 0x00, 12);
/* reg_paos_admin_status
* Port administrative state (the desired state of the port):
@@ -4756,7 +4763,7 @@ MLXSW_ITEM32(reg, paos, ee, 0x04, 30, 1);
*/
MLXSW_ITEM32(reg, paos, e, 0x04, 0, 2);
-static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_paos_pack(char *payload, u16 local_port,
enum mlxsw_port_admin_status status)
{
MLXSW_REG_ZERO(paos, payload);
@@ -4782,7 +4789,7 @@ MLXSW_REG_DEFINE(pfcc, MLXSW_REG_PFCC_ID, MLXSW_REG_PFCC_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pfcc, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pfcc, 0x00, 16, 0x00, 12);
/* reg_pfcc_pnat
* Port number access type. Determines the way local_port is interpreted:
@@ -4899,7 +4906,7 @@ static inline void mlxsw_reg_pfcc_prio_pack(char *payload, u8 pfc_en)
mlxsw_reg_pfcc_pfcrx_set(payload, pfc_en);
}
-static inline void mlxsw_reg_pfcc_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_pfcc_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(pfcc, payload);
mlxsw_reg_pfcc_local_port_set(payload, local_port);
@@ -4928,11 +4935,9 @@ MLXSW_ITEM32(reg, ppcnt, swid, 0x00, 24, 8);
/* reg_ppcnt_local_port
* Local port number.
- * 255 indicates all ports on the device, and is only allowed
- * for Set() operation.
* Access: Index
*/
-MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, ppcnt, 0x00, 16, 0x00, 12);
/* reg_ppcnt_pnat
* Port number access type:
@@ -4981,6 +4986,14 @@ MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6);
*/
MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1);
+/* reg_ppcnt_lp_gl
+ * Local port global variable.
+ * 0: local_port 255 = all ports of the device.
+ * 1: local_port indicates local port number for all ports.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ppcnt, lp_gl, 0x04, 30, 1);
+
/* reg_ppcnt_prio_tc
* Priority for counter set that support per priority, valid values: 0-7.
* Traffic class for counter set that support per traffic class,
@@ -5404,7 +5417,7 @@ MLXSW_ITEM64(reg, ppcnt, wred_discard,
MLXSW_ITEM64(reg, ppcnt, ecn_marked_tc,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
-static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_ppcnt_pack(char *payload, u16 local_port,
enum mlxsw_reg_ppcnt_grp grp,
u8 prio_tc)
{
@@ -5414,6 +5427,7 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
mlxsw_reg_ppcnt_pnat_set(payload, 0);
mlxsw_reg_ppcnt_grp_set(payload, grp);
mlxsw_reg_ppcnt_clr_set(payload, 0);
+ mlxsw_reg_ppcnt_lp_gl_set(payload, 1);
mlxsw_reg_ppcnt_prio_tc_set(payload, prio_tc);
}
@@ -5430,7 +5444,7 @@ MLXSW_REG_DEFINE(plib, MLXSW_REG_PLIB_ID, MLXSW_REG_PLIB_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, plib, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, plib, 0x00, 16, 0x00, 12);
/* reg_plib_ib_port
* InfiniBand port remapping for local_port.
@@ -5468,7 +5482,7 @@ MLXSW_ITEM32(reg, pptb, mm, 0x00, 28, 2);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pptb, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pptb, 0x00, 16, 0x00, 12);
/* reg_pptb_um
* Enables the update of the untagged_buf field.
@@ -5515,7 +5529,7 @@ MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff_msb, 0x0C, 0x04, 4);
#define MLXSW_REG_PPTB_ALL_PRIO 0xFF
-static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_pptb_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(pptb, payload);
mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM);
@@ -5545,7 +5559,7 @@ MLXSW_REG_DEFINE(pbmc, MLXSW_REG_PBMC_ID, MLXSW_REG_PBMC_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pbmc, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pbmc, 0x00, 16, 0x00, 12);
/* reg_pbmc_xoff_timer_value
* When device generates a pause frame, it uses this value as the pause
@@ -5612,7 +5626,7 @@ MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xoff_threshold, 0x0C, 16, 16,
MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xon_threshold, 0x0C, 0, 16,
0x08, 0x04, false);
-static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_pbmc_pack(char *payload, u16 local_port,
u16 xoff_timer_value, u16 xoff_refresh)
{
MLXSW_REG_ZERO(pbmc, payload);
@@ -5661,7 +5675,7 @@ MLXSW_ITEM32(reg, pspa, swid, 0x00, 24, 8);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pspa, 0x00, 16, 0x00, 0);
/* reg_pspa_sub_port
* Virtual port within the local port. Set to 0 when virtual ports are
@@ -5670,7 +5684,7 @@ MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, pspa, sub_port, 0x00, 8, 8);
-static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
+static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u16 local_port)
{
MLXSW_REG_ZERO(pspa, payload);
mlxsw_reg_pspa_swid_set(payload, swid);
@@ -5772,7 +5786,7 @@ MLXSW_REG_DEFINE(pplr, MLXSW_REG_PPLR_ID, MLXSW_REG_PPLR_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pplr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pplr, 0x00, 16, 0x00, 12);
/* Phy local loopback. When set the port's egress traffic is looped back
* to the receiver and the port transmitter is disabled.
@@ -5785,7 +5799,7 @@ MLXSW_ITEM32(reg, pplr, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, pplr, lb_en, 0x04, 0, 8);
-static inline void mlxsw_reg_pplr_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_pplr_pack(char *payload, u16 local_port,
bool phy_local)
{
MLXSW_REG_ZERO(pplr, payload);
@@ -5846,7 +5860,7 @@ MLXSW_ITEM32(reg, pmtdb, status, 0x00, 0, 4);
* the module.
* Access: RO
*/
-MLXSW_ITEM16_INDEXED(reg, pmtdb, port_num, 0x04, 0, 8, 0x02, 0x00, false);
+MLXSW_ITEM16_INDEXED(reg, pmtdb, port_num, 0x04, 0, 10, 0x02, 0x00, false);
static inline void mlxsw_reg_pmtdb_pack(char *payload, u8 slot_index, u8 module,
u8 ports_width, u8 num_ports)
@@ -5915,7 +5929,7 @@ MLXSW_REG_DEFINE(pddr, MLXSW_REG_PDDR_ID, MLXSW_REG_PDDR_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pddr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pddr, 0x00, 16, 0x00, 12);
enum mlxsw_reg_pddr_page_select {
MLXSW_REG_PDDR_PAGE_SELECT_TROUBLESHOOTING_INFO = 1,
@@ -5944,7 +5958,7 @@ MLXSW_ITEM32(reg, pddr, trblsh_group_opcode, 0x08, 0, 16);
*/
MLXSW_ITEM32(reg, pddr, trblsh_status_opcode, 0x0C, 0, 16);
-static inline void mlxsw_reg_pddr_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_pddr_pack(char *payload, u16 local_port,
u8 page_select)
{
MLXSW_REG_ZERO(pddr, payload);
@@ -6014,7 +6028,7 @@ MLXSW_REG_DEFINE(pllp, MLXSW_REG_PLLP_ID, MLXSW_REG_PLLP_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pllp, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pllp, 0x00, 16, 0x00, 12);
/* reg_pllp_label_port
* Front panel label of the port.
@@ -6034,7 +6048,7 @@ MLXSW_ITEM32(reg, pllp, split_num, 0x04, 0, 4);
*/
MLXSW_ITEM32(reg, pllp, slot_index, 0x08, 0, 4);
-static inline void mlxsw_reg_pllp_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_pllp_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(pllp, payload);
mlxsw_reg_pllp_local_port_set(payload, local_port);
@@ -10245,7 +10259,7 @@ MLXSW_REG_DEFINE(mpar, MLXSW_REG_MPAR_ID, MLXSW_REG_MPAR_LEN);
* The local port to mirror the packets from.
* Access: Index
*/
-MLXSW_ITEM32(reg, mpar, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, mpar, 0x00, 16, 0x00, 4);
enum mlxsw_reg_mpar_i_e {
MLXSW_REG_MPAR_TYPE_EGRESS,
@@ -10282,7 +10296,7 @@ MLXSW_ITEM32(reg, mpar, pa_id, 0x04, 0, 4);
*/
MLXSW_ITEM32(reg, mpar, probability_rate, 0x08, 0, 32);
-static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_mpar_pack(char *payload, u16 local_port,
enum mlxsw_reg_mpar_i_e i_e,
bool enable, u8 pa_id,
u32 probability_rate)
@@ -10386,7 +10400,7 @@ MLXSW_REG_DEFINE(mlcr, MLXSW_REG_MLCR_ID, MLXSW_REG_MLCR_LEN);
* Local port number.
* Access: RW
*/
-MLXSW_ITEM32(reg, mlcr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, mlcr, 0x00, 16, 0x00, 24);
#define MLXSW_REG_MLCR_DURATION_MAX 0xFFFF
@@ -10405,7 +10419,7 @@ MLXSW_ITEM32(reg, mlcr, beacon_duration, 0x04, 0, 16);
*/
MLXSW_ITEM32(reg, mlcr, beacon_remain, 0x08, 0, 16);
-static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_mlcr_pack(char *payload, u16 local_port,
bool active)
{
MLXSW_REG_ZERO(mlcr, payload);
@@ -10778,7 +10792,7 @@ MLXSW_REG_DEFINE(mpsc, MLXSW_REG_MPSC_ID, MLXSW_REG_MPSC_LEN);
* Not supported for CPU port
* Access: Index
*/
-MLXSW_ITEM32(reg, mpsc, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, mpsc, 0x00, 16, 0x00, 12);
/* reg_mpsc_e
* Enable sampling on port local_port
@@ -10795,7 +10809,7 @@ MLXSW_ITEM32(reg, mpsc, e, 0x04, 30, 1);
*/
MLXSW_ITEM32(reg, mpsc, rate, 0x08, 0, 32);
-static inline void mlxsw_reg_mpsc_pack(char *payload, u8 local_port, bool e,
+static inline void mlxsw_reg_mpsc_pack(char *payload, u16 local_port, bool e,
u32 rate)
{
MLXSW_REG_ZERO(mpsc, payload);
@@ -11003,7 +11017,7 @@ MLXSW_REG_DEFINE(momte, MLXSW_REG_MOMTE_ID, MLXSW_REG_MOMTE_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, momte, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, momte, 0x00, 16, 0x00, 12);
enum mlxsw_reg_momte_type {
MLXSW_REG_MOMTE_TYPE_WRED = 0x20,
@@ -11030,7 +11044,7 @@ MLXSW_ITEM32(reg, momte, type, 0x04, 0, 8);
*/
MLXSW_ITEM_BIT_ARRAY(reg, momte, tclass_en, 0x08, 0x08, 1);
-static inline void mlxsw_reg_momte_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_momte_pack(char *payload, u16 local_port,
enum mlxsw_reg_momte_type type)
{
MLXSW_REG_ZERO(momte, payload);
@@ -11098,7 +11112,7 @@ MLXSW_REG_DEFINE(mtpptr, MLXSW_REG_MTPPTR_ID, MLXSW_REG_MTPPTR_LEN);
* Not supported for CPU port.
* Access: Index
*/
-MLXSW_ITEM32(reg, mtpptr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, mtpptr, 0x00, 16, 0x00, 12);
enum mlxsw_reg_mtpptr_dir {
MLXSW_REG_MTPPTR_DIR_INGRESS,
@@ -11305,7 +11319,7 @@ mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
* -----------------------------------
*/
#define MLXSW_REG_MFDE_ID 0x9200
-#define MLXSW_REG_MFDE_LEN 0x18
+#define MLXSW_REG_MFDE_LEN 0x30
MLXSW_REG_DEFINE(mfde, MLXSW_REG_MFDE_ID, MLXSW_REG_MFDE_LEN);
@@ -11315,10 +11329,32 @@ MLXSW_REG_DEFINE(mfde, MLXSW_REG_MFDE_ID, MLXSW_REG_MFDE_LEN);
*/
MLXSW_ITEM32(reg, mfde, irisc_id, 0x00, 24, 8);
+enum mlxsw_reg_mfde_severity {
+ /* Unrecoverable switch behavior */
+ MLXSW_REG_MFDE_SEVERITY_FATL = 2,
+ /* Unexpected state with possible systemic failure */
+ MLXSW_REG_MFDE_SEVERITY_NRML = 3,
+ /* Unexpected state without systemic failure */
+ MLXSW_REG_MFDE_SEVERITY_INTR = 5,
+};
+
+/* reg_mfde_severity
+ * The severity of the event.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, severity, 0x00, 16, 8);
+
enum mlxsw_reg_mfde_event_id {
+ /* CRspace timeout */
MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO = 1,
/* KVD insertion machine stopped */
MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP,
+ /* Triggered by MFGD.trigger_test */
+ MLXSW_REG_MFDE_EVENT_ID_TEST,
+ /* Triggered when firmware hits an assert */
+ MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT,
+ /* Fatal error interrupt from hardware */
+ MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE,
};
/* reg_mfde_event_id
@@ -11359,32 +11395,110 @@ MLXSW_ITEM32(reg, mfde, command_type, 0x04, 24, 2);
*/
MLXSW_ITEM32(reg, mfde, reg_attr_id, 0x04, 0, 16);
-/* reg_mfde_log_address
+/* reg_mfde_crspace_to_log_address
* crspace address accessed, which resulted in timeout.
- * Valid in case event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO
* Access: RO
*/
-MLXSW_ITEM32(reg, mfde, log_address, 0x10, 0, 32);
+MLXSW_ITEM32(reg, mfde, crspace_to_log_address, 0x10, 0, 32);
+
+/* reg_mfde_crspace_to_oe
+ * 0 - New event
+ * 1 - Old event, occurred before MFGD activation.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, crspace_to_oe, 0x14, 24, 1);
-/* reg_mfde_log_id
+/* reg_mfde_crspace_to_log_id
* Which irisc triggered the timeout.
- * Valid in case event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO
* Access: RO
*/
-MLXSW_ITEM32(reg, mfde, log_id, 0x14, 0, 4);
+MLXSW_ITEM32(reg, mfde, crspace_to_log_id, 0x14, 0, 4);
-/* reg_mfde_log_ip
+/* reg_mfde_crspace_to_log_ip
* IP (instruction pointer) that triggered the timeout.
- * Valid in case event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO
* Access: RO
*/
-MLXSW_ITEM64(reg, mfde, log_ip, 0x18, 0, 64);
+MLXSW_ITEM64(reg, mfde, crspace_to_log_ip, 0x18, 0, 64);
+
+/* reg_mfde_kvd_im_stop_oe
+ * 0 - New event
+ * 1 - Old event, occurred before MFGD activation.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, kvd_im_stop_oe, 0x10, 24, 1);
-/* reg_mfde_pipes_mask
+/* reg_mfde_kvd_im_stop_pipes_mask
* Bit per kvh pipe.
* Access: RO
*/
-MLXSW_ITEM32(reg, mfde, pipes_mask, 0x10, 0, 16);
+MLXSW_ITEM32(reg, mfde, kvd_im_stop_pipes_mask, 0x10, 0, 16);
+
+/* reg_mfde_fw_assert_var0-4
+ * Variables passed to assert.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_var0, 0x10, 0, 32);
+MLXSW_ITEM32(reg, mfde, fw_assert_var1, 0x14, 0, 32);
+MLXSW_ITEM32(reg, mfde, fw_assert_var2, 0x18, 0, 32);
+MLXSW_ITEM32(reg, mfde, fw_assert_var3, 0x1C, 0, 32);
+MLXSW_ITEM32(reg, mfde, fw_assert_var4, 0x20, 0, 32);
+
+/* reg_mfde_fw_assert_existptr
+ * The instruction pointer when assert was triggered.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_existptr, 0x24, 0, 32);
+
+/* reg_mfde_fw_assert_callra
+ * The return address after triggering assert.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_callra, 0x28, 0, 32);
+
+/* reg_mfde_fw_assert_oe
+ * 0 - New event
+ * 1 - Old event, occurred before MFGD activation.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_oe, 0x2C, 24, 1);
+
+/* reg_mfde_fw_assert_tile_v
+ * 0: The assert was from main
+ * 1: The assert was from a tile
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_tile_v, 0x2C, 23, 1);
+
+/* reg_mfde_fw_assert_tile_index
+ * When tile_v=1, the tile_index that caused the assert.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_tile_index, 0x2C, 16, 6);
+
+/* reg_mfde_fw_assert_ext_synd
+ * A generated one-to-one identifier which is specific per-assert.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_ext_synd, 0x2C, 0, 16);
+
+/* reg_mfde_fatal_cause_id
+ * HW interrupt cause id.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fatal_cause_id, 0x10, 0, 18);
+
+/* reg_mfde_fatal_cause_tile_v
+ * 0: The assert was from main
+ * 1: The assert was from a tile
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fatal_cause_tile_v, 0x14, 23, 1);
+
+/* reg_mfde_fatal_cause_tile_index
+ * When tile_v=1, the tile_index that caused the assert.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fatal_cause_tile_index, 0x14, 16, 6);
/* TNGCR - Tunneling NVE General Configuration Register
* ----------------------------------------------------
@@ -11692,7 +11806,7 @@ MLXSW_REG_DEFINE(tnqdr, MLXSW_REG_TNQDR_ID, MLXSW_REG_TNQDR_LEN);
* Local port number (receive port). CPU port is supported.
* Access: Index
*/
-MLXSW_ITEM32(reg, tnqdr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, tnqdr, 0x00, 16, 0x00, 12);
/* reg_tnqdr_dscp
* For encapsulation, the default DSCP.
@@ -11700,7 +11814,7 @@ MLXSW_ITEM32(reg, tnqdr, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, tnqdr, dscp, 0x04, 0, 6);
-static inline void mlxsw_reg_tnqdr_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_tnqdr_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(tnqdr, payload);
mlxsw_reg_tnqdr_local_port_set(payload, local_port);
@@ -12028,7 +12142,7 @@ MLXSW_REG_DEFINE(sbcm, MLXSW_REG_SBCM_ID, MLXSW_REG_SBCM_LEN);
* For Egress: excludes IP Router
* Access: Index
*/
-MLXSW_ITEM32(reg, sbcm, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, sbcm, 0x00, 16, 0x00, 4);
/* reg_sbcm_pg_buff
* PG buffer - Port PG (dir=ingress) / traffic class (dir=egress)
@@ -12082,7 +12196,7 @@ MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24);
*/
MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4);
-static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff,
+static inline void mlxsw_reg_sbcm_pack(char *payload, u16 local_port, u8 pg_buff,
enum mlxsw_reg_sbxx_dir dir,
u32 min_buff, u32 max_buff,
bool infi_max, u8 pool)
@@ -12114,7 +12228,7 @@ MLXSW_REG_DEFINE(sbpm, MLXSW_REG_SBPM_ID, MLXSW_REG_SBPM_LEN);
* For Egress: excludes IP Router
* Access: Index
*/
-MLXSW_ITEM32(reg, sbpm, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, sbpm, 0x00, 16, 0x00, 12);
/* reg_sbpm_pool
* The pool associated to quota counting on the local_port.
@@ -12168,7 +12282,7 @@ MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24);
*/
MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24);
-static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool,
+static inline void mlxsw_reg_sbpm_pack(char *payload, u16 local_port, u8 pool,
enum mlxsw_reg_sbxx_dir dir, bool clr,
u32 min_buff, u32 max_buff)
{
@@ -12266,6 +12380,16 @@ MLXSW_REG_DEFINE(sbsr, MLXSW_REG_SBSR_ID, MLXSW_REG_SBSR_LEN);
*/
MLXSW_ITEM32(reg, sbsr, clr, 0x00, 31, 1);
+#define MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE 256
+
+/* reg_sbsr_port_page
+ * Determines the range of the ports specified in the 'ingress_port_mask'
+ * and 'egress_port_mask' bit masks.
+ * {ingress,egress}_port_mask[x] is (256 * port_page) + x
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbsr, port_page, 0x04, 0, 4);
+
/* reg_sbsr_ingress_port_mask
* Bit vector for all ingress network ports.
* Indicates which of the ports (for which the relevant bit is set)
@@ -12353,7 +12477,7 @@ MLXSW_REG_DEFINE(sbib, MLXSW_REG_SBIB_ID, MLXSW_REG_SBIB_LEN);
* Not supported for CPU port and router port
* Access: Index
*/
-MLXSW_ITEM32(reg, sbib, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, sbib, 0x00, 16, 0x00, 12);
/* reg_sbib_buff_size
* Units represented in cells
@@ -12363,7 +12487,7 @@ MLXSW_ITEM32(reg, sbib, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, sbib, buff_size, 0x08, 0, 24);
-static inline void mlxsw_reg_sbib_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_sbib_pack(char *payload, u16 local_port,
u32 buff_size)
{
MLXSW_REG_ZERO(sbib, payload);
@@ -12374,7 +12498,6 @@ static inline void mlxsw_reg_sbib_pack(char *payload, u8 local_port,
static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(sgcr),
MLXSW_REG(spad),
- MLXSW_REG(smid),
MLXSW_REG(sspr),
MLXSW_REG(sfdat),
MLXSW_REG(sfd),
@@ -12384,7 +12507,6 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(spvm),
MLXSW_REG(spaft),
MLXSW_REG(sfgc),
- MLXSW_REG(sftr),
MLXSW_REG(sfdf),
MLXSW_REG(sldr),
MLXSW_REG(slcr),
@@ -12397,6 +12519,8 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(spvmlr),
MLXSW_REG(spvc),
MLXSW_REG(spevet),
+ MLXSW_REG(sftr2),
+ MLXSW_REG(smid2),
MLXSW_REG(cwtp),
MLXSW_REG(cwtpm),
MLXSW_REG(pgcr),
@@ -12556,7 +12680,7 @@ MLXSW_ITEM32(reg, pude, swid, 0x00, 24, 8);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pude, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pude, 0x00, 16, 0x00, 12);
/* reg_pude_admin_status
* Port administrative state (the desired state).
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 03e5bad4e405..aa411dec62f0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -46,8 +46,8 @@
#include "spectrum_trap.h"
#define MLXSW_SP1_FWREV_MAJOR 13
-#define MLXSW_SP1_FWREV_MINOR 2008
-#define MLXSW_SP1_FWREV_SUBMINOR 3326
+#define MLXSW_SP1_FWREV_MINOR 2010
+#define MLXSW_SP1_FWREV_SUBMINOR 1006
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -63,8 +63,8 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
"." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
#define MLXSW_SP2_FWREV_MAJOR 29
-#define MLXSW_SP2_FWREV_MINOR 2008
-#define MLXSW_SP2_FWREV_SUBMINOR 3326
+#define MLXSW_SP2_FWREV_MINOR 2010
+#define MLXSW_SP2_FWREV_SUBMINOR 1006
static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
.major = MLXSW_SP2_FWREV_MAJOR,
@@ -78,8 +78,8 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
"." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
#define MLXSW_SP3_FWREV_MAJOR 30
-#define MLXSW_SP3_FWREV_MINOR 2008
-#define MLXSW_SP3_FWREV_SUBMINOR 3326
+#define MLXSW_SP3_FWREV_MINOR 2010
+#define MLXSW_SP3_FWREV_SUBMINOR 1006
static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
.major = MLXSW_SP3_FWREV_MAJOR,
@@ -95,6 +95,7 @@ static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
+static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4";
static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
@@ -303,7 +304,7 @@ int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
- unsigned char *addr)
+ const unsigned char *addr)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char ppad_pl[MLXSW_REG_PPAD_LEN];
@@ -352,7 +353,7 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
}
static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u8 swid)
+ u16 local_port, u8 swid)
{
char pspa_pl[MLXSW_REG_PSPA_LEN];
@@ -483,7 +484,7 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
}
static int
-mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
struct mlxsw_sp_port_mapping *port_mapping)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
@@ -535,7 +536,7 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
}
static int
-mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
const struct mlxsw_sp_port_mapping *port_mapping)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
@@ -560,7 +561,7 @@ err_pmlp_write:
return err;
}
-static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port,
u8 module)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
@@ -1474,7 +1475,7 @@ mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u8 *port_number,
+ u16 local_port, u8 *port_number,
u8 *split_port_subnumber,
u8 *slot_index)
{
@@ -1490,7 +1491,7 @@ static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
bool split,
struct mlxsw_sp_port_mapping *port_mapping)
{
@@ -1781,7 +1782,7 @@ err_port_swid_set:
return err;
}
-static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
u8 module = mlxsw_sp_port->mapping.module;
@@ -1848,12 +1849,12 @@ static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
kfree(mlxsw_sp_port);
}
-static bool mlxsw_sp_local_port_valid(u8 local_port)
+static bool mlxsw_sp_local_port_valid(u16 local_port)
{
return local_port != MLXSW_PORT_CPU_PORT;
}
-static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port)
{
if (!mlxsw_sp_local_port_valid(local_port))
return false;
@@ -1971,7 +1972,7 @@ mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
split_port_mapping = *port_mapping;
split_port_mapping.width /= count;
for (i = 0; i < count; i++) {
- u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+ u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
if (!mlxsw_sp_local_port_valid(s_local_port))
continue;
@@ -1987,7 +1988,7 @@ mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
err_port_create:
for (i--; i >= 0; i--) {
- u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+ u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
@@ -2004,7 +2005,7 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
/* Go over original unsplit ports in the gap and recreate them. */
for (i = 0; i < count; i++) {
- u8 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+ u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
port_mapping = mlxsw_sp->port_mapping[local_port];
if (!port_mapping || !mlxsw_sp_local_port_valid(local_port))
@@ -2015,14 +2016,14 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
}
static struct mlxsw_sp_port *
-mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port)
{
if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
return mlxsw_sp->ports[local_port];
return NULL;
}
-static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
+static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
unsigned int count,
struct netlink_ext_ack *extack)
{
@@ -2065,7 +2066,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
port_mapping = mlxsw_sp_port->mapping;
for (i = 0; i < count; i++) {
- u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+ u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
@@ -2085,7 +2086,7 @@ err_port_split_create:
return err;
}
-static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
+static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
@@ -2121,7 +2122,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
}
for (i = 0; i < count; i++) {
- u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+ u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
@@ -2148,7 +2149,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
struct mlxsw_sp_port *mlxsw_sp_port;
enum mlxsw_reg_pude_oper_status status;
unsigned int max_ports;
- u8 local_port;
+ u16 local_port;
max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
local_port = mlxsw_reg_pude_local_port_get(pude_pl);
@@ -2174,7 +2175,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
char *mtpptr_pl, bool ingress)
{
- u8 local_port;
+ u16 local_port;
u8 num_rec;
int i;
@@ -2212,7 +2213,7 @@ static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
}
void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
- u8 local_port, void *priv)
+ u16 local_port, void *priv)
{
struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
@@ -2236,7 +2237,7 @@ void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
netif_receive_skb(skb);
}
-static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
void *priv)
{
skb->offload_fwd_mark = 1;
@@ -2244,7 +2245,7 @@ static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
}
static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
- u8 local_port, void *priv)
+ u16 local_port, void *priv)
{
skb->offload_l3_fwd_mark = 1;
skb->offload_fwd_mark = 1;
@@ -2252,7 +2253,7 @@ static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
}
void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port)
+ u16 local_port)
{
mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
}
@@ -2755,6 +2756,140 @@ static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
mutex_destroy(&mlxsw_sp->parsing.lock);
}
+struct mlxsw_sp_ipv6_addr_node {
+ struct in6_addr key;
+ struct rhash_head ht_node;
+ u32 kvdl_index;
+ refcount_t refcount;
+};
+
+static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key),
+ .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node),
+ .key_len = sizeof(struct in6_addr),
+ .automatic_shrinking = true,
+};
+
+static int
+mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6,
+ u32 *p_kvdl_index)
+{
+ struct mlxsw_sp_ipv6_addr_node *node;
+ char rips_pl[MLXSW_REG_RIPS_LEN];
+ int err;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
+ MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ p_kvdl_index);
+ if (err)
+ return err;
+
+ mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
+ if (err)
+ goto err_rips_write;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ err = -ENOMEM;
+ goto err_node_alloc;
+ }
+
+ node->key = *addr6;
+ node->kvdl_index = *p_kvdl_index;
+ refcount_set(&node->refcount, 1);
+
+ err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht,
+ &node->ht_node,
+ mlxsw_sp_ipv6_addr_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return 0;
+
+err_rhashtable_insert:
+ kfree(node);
+err_node_alloc:
+err_rips_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ *p_kvdl_index);
+ return err;
+}
+
+static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipv6_addr_node *node)
+{
+ u32 kvdl_index = node->kvdl_index;
+
+ rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node,
+ mlxsw_sp_ipv6_addr_ht_params);
+ kfree(node);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ kvdl_index);
+}
+
+int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6,
+ u32 *p_kvdl_index)
+{
+ struct mlxsw_sp_ipv6_addr_node *node;
+ int err = 0;
+
+ mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
+ node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
+ mlxsw_sp_ipv6_addr_ht_params);
+ if (node) {
+ refcount_inc(&node->refcount);
+ *p_kvdl_index = node->kvdl_index;
+ goto out_unlock;
+ }
+
+ err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index);
+
+out_unlock:
+ mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
+ return err;
+}
+
+void
+mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6)
+{
+ struct mlxsw_sp_ipv6_addr_node *node;
+
+ mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
+ node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
+ mlxsw_sp_ipv6_addr_ht_params);
+ if (WARN_ON(!node))
+ goto out_unlock;
+
+ if (!refcount_dec_and_test(&node->refcount))
+ goto out_unlock;
+
+ mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node);
+
+out_unlock:
+ mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
+}
+
+static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht,
+ &mlxsw_sp_ipv6_addr_ht_params);
+ if (err)
+ return err;
+
+ mutex_init(&mlxsw_sp->ipv6_addr_ht_lock);
+ return 0;
+}
+
+static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock);
+ rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht);
+}
+
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info,
struct netlink_ext_ack *extack)
@@ -2843,6 +2978,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_afa_init;
}
+ err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n");
+ goto err_ipv6_addr_ht_init;
+ }
+
err = mlxsw_sp_nve_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
@@ -2944,6 +3085,8 @@ err_router_init:
err_acl_init:
mlxsw_sp_nve_fini(mlxsw_sp);
err_nve_init:
+ mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
+err_ipv6_addr_ht_init:
mlxsw_sp_afa_fini(mlxsw_sp);
err_afa_init:
mlxsw_sp_counter_pool_fini(mlxsw_sp);
@@ -3013,6 +3156,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
+ mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
@@ -3042,6 +3186,7 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
+ mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
@@ -3058,6 +3203,36 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
+static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
+ mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
+ mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
+ mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops;
+ mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
+ mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
+ mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
+ mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops;
+ mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
+ mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
+ mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
+ mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
+ mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
+ mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
+ mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
+ mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
+ mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
+ mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
+ mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
+ mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
+
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
+}
+
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
@@ -3075,6 +3250,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_router_fini(mlxsw_sp);
mlxsw_sp_acl_fini(mlxsw_sp);
mlxsw_sp_nve_fini(mlxsw_sp);
+ mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
mlxsw_sp_afa_fini(mlxsw_sp);
mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
@@ -3336,7 +3512,7 @@ err_resources_rif_mac_profile_register:
err_policer_resources_register:
err_resources_counter_register:
err_resources_span_register:
- devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
+ devlink_resources_unregister(priv_to_devlink(mlxsw_core));
return err;
}
@@ -3370,7 +3546,7 @@ err_resources_rif_mac_profile_register:
err_policer_resources_register:
err_resources_counter_register:
err_resources_span_register:
- devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
+ devlink_resources_unregister(priv_to_devlink(mlxsw_core));
return err;
}
@@ -3486,7 +3662,7 @@ static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
}
static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
- struct sk_buff *skb, u8 local_port)
+ struct sk_buff *skb, u16 local_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
@@ -3616,6 +3792,45 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.temp_warn_enabled = true,
};
+static struct mlxsw_driver mlxsw_sp4_driver = {
+ .kind = mlxsw_sp4_driver_name,
+ .priv_size = sizeof(struct mlxsw_sp),
+ .init = mlxsw_sp4_init,
+ .fini = mlxsw_sp_fini,
+ .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
+ .port_split = mlxsw_sp_port_split,
+ .port_unsplit = mlxsw_sp_port_unsplit,
+ .sb_pool_get = mlxsw_sp_sb_pool_get,
+ .sb_pool_set = mlxsw_sp_sb_pool_set,
+ .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
+ .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
+ .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
+ .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
+ .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
+ .trap_init = mlxsw_sp_trap_init,
+ .trap_fini = mlxsw_sp_trap_fini,
+ .trap_action_set = mlxsw_sp_trap_action_set,
+ .trap_group_init = mlxsw_sp_trap_group_init,
+ .trap_group_set = mlxsw_sp_trap_group_set,
+ .trap_policer_init = mlxsw_sp_trap_policer_init,
+ .trap_policer_fini = mlxsw_sp_trap_policer_fini,
+ .trap_policer_set = mlxsw_sp_trap_policer_set,
+ .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
+ .txhdr_construct = mlxsw_sp_txhdr_construct,
+ .resources_register = mlxsw_sp2_resources_register,
+ .params_register = mlxsw_sp2_params_register,
+ .params_unregister = mlxsw_sp2_params_unregister,
+ .ptp_transmitted = mlxsw_sp_ptp_transmitted,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sp2_config_profile,
+ .res_query_enabled = true,
+ .fw_fatal_enabled = true,
+ .temp_warn_enabled = true,
+};
+
bool mlxsw_sp_port_dev_check(const struct net_device *dev)
{
return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
@@ -4783,6 +4998,16 @@ static struct pci_driver mlxsw_sp3_pci_driver = {
.id_table = mlxsw_sp3_pci_id_table,
};
+static const struct pci_device_id mlxsw_sp4_pci_id_table[] = {
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0},
+ {0, },
+};
+
+static struct pci_driver mlxsw_sp4_pci_driver = {
+ .name = mlxsw_sp4_driver_name,
+ .id_table = mlxsw_sp4_pci_id_table,
+};
+
static int __init mlxsw_sp_module_init(void)
{
int err;
@@ -4802,6 +5027,10 @@ static int __init mlxsw_sp_module_init(void)
if (err)
goto err_sp3_core_driver_register;
+ err = mlxsw_core_driver_register(&mlxsw_sp4_driver);
+ if (err)
+ goto err_sp4_core_driver_register;
+
err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
if (err)
goto err_sp1_pci_driver_register;
@@ -4814,13 +5043,21 @@ static int __init mlxsw_sp_module_init(void)
if (err)
goto err_sp3_pci_driver_register;
+ err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver);
+ if (err)
+ goto err_sp4_pci_driver_register;
+
return 0;
+err_sp4_pci_driver_register:
+ mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
err_sp3_pci_driver_register:
mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
err_sp2_pci_driver_register:
mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
err_sp1_pci_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
+err_sp4_core_driver_register:
mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
err_sp3_core_driver_register:
mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
@@ -4834,9 +5071,11 @@ err_sp1_core_driver_register:
static void __exit mlxsw_sp_module_exit(void)
{
+ mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver);
mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
+ mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
@@ -4853,6 +5092,7 @@ MODULE_DESCRIPTION("Mellanox Spectrum driver");
MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
+MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table);
MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 32fdd37657dd..bb2442e1f705 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -190,6 +190,7 @@ struct mlxsw_sp {
const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops;
const struct mlxsw_sp_acl_rulei_ops *acl_rulei_ops;
const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops;
+ const struct mlxsw_sp_acl_bf_ops *acl_bf_ops;
const struct mlxsw_sp_nve_ops **nve_ops_arr;
const struct mlxsw_sp_sb_vals *sb_vals;
const struct mlxsw_sp_sb_ops *sb_ops;
@@ -203,6 +204,8 @@ struct mlxsw_sp {
const struct mlxsw_listener *listeners;
size_t listeners_count;
u32 lowest_shaper_bs;
+ struct rhashtable ipv6_addr_ht;
+ struct mutex ipv6_addr_ht_lock; /* Protects ipv6_addr_ht */
};
struct mlxsw_sp_ptp_ops {
@@ -217,13 +220,13 @@ struct mlxsw_sp_ptp_ops {
* is responsible for freeing the passed-in SKB.
*/
void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port);
+ u16 local_port);
/* Notify a driver that a timestamped packet was transmitted. Driver
* is responsible for freeing the passed-in SKB.
*/
void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port);
+ u16 local_port);
int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port,
struct hwtstamp_config *config);
@@ -261,7 +264,7 @@ enum mlxsw_sp_sample_trigger_type {
struct mlxsw_sp_sample_trigger {
enum mlxsw_sp_sample_trigger_type type;
- u8 local_port; /* Reserved when trigger type is not ingress / egress. */
+ u16 local_port; /* Reserved when trigger type is not ingress / egress. */
};
struct mlxsw_sp_sample_params {
@@ -308,7 +311,7 @@ struct mlxsw_sp_port {
struct net_device *dev;
struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
struct mlxsw_sp *mlxsw_sp;
- u8 local_port;
+ u16 local_port;
u8 lagged:1,
split:1;
u16 pvid;
@@ -370,7 +373,7 @@ struct mlxsw_sp_port_type_speed_ops {
u32 (*to_ptys_speed_lanes)(struct mlxsw_sp *mlxsw_sp, u8 width,
const struct ethtool_link_ksettings *cmd);
void (*reg_ptys_eth_pack)(struct mlxsw_sp *mlxsw_sp, char *payload,
- u8 local_port, u32 proto_admin, bool autoneg);
+ u16 local_port, u32 proto_admin, bool autoneg);
void (*reg_ptys_eth_unpack)(struct mlxsw_sp *mlxsw_sp, char *payload,
u32 *p_eth_proto_cap,
u32 *p_eth_proto_admin,
@@ -441,7 +444,7 @@ static inline struct mlxsw_sp_port *
mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
{
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 local_port;
+ u16 local_port;
local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core,
lag_id, port_index);
@@ -587,6 +590,11 @@ mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
void
mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_sample_trigger *trigger);
+int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6,
+ u32 *p_kvdl_index);
+void
+mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6);
extern const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals;
extern const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals;
@@ -621,9 +629,9 @@ extern struct notifier_block mlxsw_sp_switchdev_notifier;
/* spectrum.c */
void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
- u8 local_port, void *priv);
+ u16 local_port, void *priv);
void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port);
+ u16 local_port);
int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed);
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
@@ -729,7 +737,7 @@ void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev);
u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev);
-u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
+u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
enum mlxsw_sp_l3proto ul_proto,
const union mlxsw_sp_l3addr *ul_sip,
@@ -1099,6 +1107,11 @@ extern const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops;
/* spectrum_acl_flex_keys.c */
extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops;
extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops;
+extern const struct mlxsw_afk_ops mlxsw_sp4_afk_ops;
+
+/* spectrum_acl_bloom_filter.c */
+extern const struct mlxsw_sp_acl_bf_ops mlxsw_sp2_acl_bf_ops;
+extern const struct mlxsw_sp_acl_bf_ops mlxsw_sp4_acl_bf_ops;
/* spectrum_matchall.c */
struct mlxsw_sp_mall_ops {
@@ -1222,7 +1235,7 @@ bool mlxsw_sp_fid_vni_is_set(const struct mlxsw_sp_fid *fid);
void mlxsw_sp_fid_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
const struct net_device *nve_dev);
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
- enum mlxsw_sp_flood_type packet_type, u8 local_port,
+ enum mlxsw_sp_flood_type packet_type, u16 local_port,
bool member);
int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
@@ -1310,6 +1323,17 @@ void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *fid,
enum mlxsw_sp_l3proto proto,
union mlxsw_sp_l3addr *addr);
+int mlxsw_sp_nve_ipv6_addr_kvdl_set(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6,
+ u32 *p_kvdl_index);
+void mlxsw_sp_nve_ipv6_addr_kvdl_unset(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6);
+int
+mlxsw_sp_nve_ipv6_addr_map_replace(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid_index,
+ const struct in6_addr *new_addr6);
+void mlxsw_sp_nve_ipv6_addr_map_del(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid_index);
int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
struct mlxsw_sp_nve_params *params,
struct netlink_ext_ack *extack);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
index a11d911302f1..e4f4cded2b6f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
@@ -45,8 +45,8 @@ static int mlxsw_sp2_mr_tcam_bind_group(struct mlxsw_sp *mlxsw_sp,
}
static const enum mlxsw_afk_element mlxsw_sp2_mr_tcam_usage_ipv4[] = {
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10,
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
MLXSW_AFK_ELEMENT_SRC_IP_0_31,
MLXSW_AFK_ELEMENT_DST_IP_0_31,
};
@@ -89,8 +89,8 @@ static void mlxsw_sp2_mr_tcam_ipv4_fini(struct mlxsw_sp2_mr_tcam *mr_tcam)
}
static const enum mlxsw_afk_element mlxsw_sp2_mr_tcam_usage_ipv6[] = {
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10,
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
MLXSW_AFK_ELEMENT_SRC_IP_96_127,
MLXSW_AFK_ELEMENT_SRC_IP_64_95,
MLXSW_AFK_ELEMENT_SRC_IP_32_63,
@@ -189,10 +189,10 @@ mlxsw_sp2_mr_tcam_rule_parse(struct mlxsw_sp_acl_rule *rule,
rulei = mlxsw_sp_acl_rule_rulei(rule);
rulei->priority = priority;
- mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7,
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
key->vrid, GENMASK(7, 0));
mlxsw_sp_acl_rulei_keymask_u32(rulei,
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
key->vrid >> 8, GENMASK(2, 0));
switch (key->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 67cedfa76f78..70c11bfac08f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -406,7 +406,7 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 local_port;
+ u16 local_port;
bool in_port;
if (out_dev) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
index dbd3bebf11ec..e2aced7ab454 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
@@ -17,9 +17,9 @@ struct mlxsw_sp_acl_bf {
};
/* Bloom filter uses a crc-16 hash over chunks of data which contain 4 key
- * blocks, eRP ID and region ID. In Spectrum-2, region key is combined of up to
- * 12 key blocks, so there can be up to 3 chunks in the Bloom filter key,
- * depending on the actual number of key blocks used in the region.
+ * blocks, eRP ID and region ID. In Spectrum-2 and above, region key is combined
+ * of up to 12 key blocks, so there can be up to 3 chunks in the Bloom filter
+ * key, depending on the actual number of key blocks used in the region.
* The layout of the Bloom filter key is as follows:
*
* +-------------------------+------------------------+------------------------+
@@ -27,7 +27,9 @@ struct mlxsw_sp_acl_bf {
* +-------------------------+------------------------+------------------------+
*/
#define MLXSW_BLOOM_KEY_CHUNKS 3
-#define MLXSW_BLOOM_KEY_LEN 69
+
+/* Spectrum-2 and Spectrum-3 chunks */
+#define MLXSW_SP2_BLOOM_KEY_LEN 69
/* Each chunk size is 23 bytes. 18 bytes of it contain 4 key blocks, each is
* 36 bits, 2 bytes which hold eRP ID and region ID, and 3 bytes of zero
@@ -42,31 +44,21 @@ struct mlxsw_sp_acl_bf {
* | 0 | region ID | eRP ID | 4 Key blocks (18 Bytes) |
* +---------+-----------+----------+-----------------------------------+
*/
-#define MLXSW_BLOOM_CHUNK_PAD_BYTES 3
-#define MLXSW_BLOOM_CHUNK_KEY_BYTES 18
-#define MLXSW_BLOOM_KEY_CHUNK_BYTES 23
+#define MLXSW_SP2_BLOOM_CHUNK_PAD_BYTES 3
+#define MLXSW_SP2_BLOOM_CHUNK_KEY_BYTES 18
+#define MLXSW_SP2_BLOOM_KEY_CHUNK_BYTES 23
/* The offset of the key block within a chunk is 5 bytes as it comes after
* 3 bytes of zero padding and 16 bits of region ID and eRP ID.
*/
-#define MLXSW_BLOOM_CHUNK_KEY_OFFSET 5
+#define MLXSW_SP2_BLOOM_CHUNK_KEY_OFFSET 5
-/* Each chunk contains 4 key blocks. Chunk 2 uses key blocks 11-8,
- * and we need to populate it with 4 key blocks copied from the entry encoded
- * key. Since the encoded key contains a padding, key block 11 starts at offset
- * 2. block 7 that is used in chunk 1 starts at offset 20 as 4 key blocks take
- * 18 bytes.
- * This array defines key offsets for easy access when copying key blocks from
- * entry key to Bloom filter chunk.
- */
-static const u8 chunk_key_offsets[MLXSW_BLOOM_KEY_CHUNKS] = {2, 20, 38};
-
-/* This table is just the CRC of each possible byte. It is
- * computed, Msbit first, for the Bloom filter polynomial
- * which is 0x8529 (1 + x^3 + x^5 + x^8 + x^10 + x^15 and
+/* This table is just the CRC of each possible byte which is used for
+ * Spectrum-{2-3}. It is computed, Msbit first, for the Bloom filter
+ * polynomial which is 0x8529 (1 + x^3 + x^5 + x^8 + x^10 + x^15 and
* the implicit x^16).
*/
-static const u16 mlxsw_sp_acl_bf_crc_tab[256] = {
+static const u16 mlxsw_sp2_acl_bf_crc16_tab[256] = {
0x0000, 0x8529, 0x8f7b, 0x0a52, 0x9bdf, 0x1ef6, 0x14a4, 0x918d,
0xb297, 0x37be, 0x3dec, 0xb8c5, 0x2948, 0xac61, 0xa633, 0x231a,
0xe007, 0x652e, 0x6f7c, 0xea55, 0x7bd8, 0xfef1, 0xf4a3, 0x718a,
@@ -101,24 +93,146 @@ static const u16 mlxsw_sp_acl_bf_crc_tab[256] = {
0x0c4c, 0x8965, 0x8337, 0x061e, 0x9793, 0x12ba, 0x18e8, 0x9dc1,
};
-static u16 mlxsw_sp_acl_bf_crc_byte(u16 crc, u8 c)
+/* Spectrum-4 chunks */
+#define MLXSW_SP4_BLOOM_KEY_LEN 60
+
+/* In Spectrum-4, there is no padding. Each chunk size is 20 bytes.
+ * 18 bytes of it contain 4 key blocks, each is 36 bits, and 2 bytes which hold
+ * eRP ID and region ID.
+ * The layout of each chunk is as follows:
+ *
+ * +----------------------+-----------------------------------+
+ * | 2 bytes | 18 bytes |
+ * +-----------+----------+-----------------------------------+
+ * | 157:148 | 147:144 | 143:0 |
+ * +---------+-----------+----------+-------------------------+
+ * | region ID | eRP ID | 4 Key blocks (18 Bytes) |
+ * +-----------+----------+-----------------------------------+
+ */
+
+#define MLXSW_SP4_BLOOM_CHUNK_PAD_BYTES 0
+#define MLXSW_SP4_BLOOM_CHUNK_KEY_BYTES 18
+#define MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES 20
+
+/* The offset of the key block within a chunk is 2 bytes as it comes after
+ * 16 bits of region ID and eRP ID.
+ */
+#define MLXSW_SP4_BLOOM_CHUNK_KEY_OFFSET 2
+
+/* For Spectrum-4, two hash functions are used, CRC-10 and CRC-6 based.
+ * The result is combination of the two calculations -
+ * 6 bit column are MSB (result of CRC-6),
+ * 10 bit row are LSB (result of CRC-10).
+ */
+
+/* This table is just the CRC of each possible byte which is used for
+ * Spectrum-4. It is computed, Msbit first, for the Bloom filter
+ * polynomial which is 0x1b (1 + x^1 + x^3 + x^4 and the implicit x^10).
+ */
+static const u16 mlxsw_sp4_acl_bf_crc10_tab[256] = {
+0x0000, 0x001b, 0x0036, 0x002d, 0x006c, 0x0077, 0x005a, 0x0041,
+0x00d8, 0x00c3, 0x00ee, 0x00f5, 0x00b4, 0x00af, 0x0082, 0x0099,
+0x01b0, 0x01ab, 0x0186, 0x019d, 0x01dc, 0x01c7, 0x01ea, 0x01f1,
+0x0168, 0x0173, 0x015e, 0x0145, 0x0104, 0x011f, 0x0132, 0x0129,
+0x0360, 0x037b, 0x0356, 0x034d, 0x030c, 0x0317, 0x033a, 0x0321,
+0x03b8, 0x03a3, 0x038e, 0x0395, 0x03d4, 0x03cf, 0x03e2, 0x03f9,
+0x02d0, 0x02cb, 0x02e6, 0x02fd, 0x02bc, 0x02a7, 0x028a, 0x0291,
+0x0208, 0x0213, 0x023e, 0x0225, 0x0264, 0x027f, 0x0252, 0x0249,
+0x02db, 0x02c0, 0x02ed, 0x02f6, 0x02b7, 0x02ac, 0x0281, 0x029a,
+0x0203, 0x0218, 0x0235, 0x022e, 0x026f, 0x0274, 0x0259, 0x0242,
+0x036b, 0x0370, 0x035d, 0x0346, 0x0307, 0x031c, 0x0331, 0x032a,
+0x03b3, 0x03a8, 0x0385, 0x039e, 0x03df, 0x03c4, 0x03e9, 0x03f2,
+0x01bb, 0x01a0, 0x018d, 0x0196, 0x01d7, 0x01cc, 0x01e1, 0x01fa,
+0x0163, 0x0178, 0x0155, 0x014e, 0x010f, 0x0114, 0x0139, 0x0122,
+0x000b, 0x0010, 0x003d, 0x0026, 0x0067, 0x007c, 0x0051, 0x004a,
+0x00d3, 0x00c8, 0x00e5, 0x00fe, 0x00bf, 0x00a4, 0x0089, 0x0092,
+0x01ad, 0x01b6, 0x019b, 0x0180, 0x01c1, 0x01da, 0x01f7, 0x01ec,
+0x0175, 0x016e, 0x0143, 0x0158, 0x0119, 0x0102, 0x012f, 0x0134,
+0x001d, 0x0006, 0x002b, 0x0030, 0x0071, 0x006a, 0x0047, 0x005c,
+0x00c5, 0x00de, 0x00f3, 0x00e8, 0x00a9, 0x00b2, 0x009f, 0x0084,
+0x02cd, 0x02d6, 0x02fb, 0x02e0, 0x02a1, 0x02ba, 0x0297, 0x028c,
+0x0215, 0x020e, 0x0223, 0x0238, 0x0279, 0x0262, 0x024f, 0x0254,
+0x037d, 0x0366, 0x034b, 0x0350, 0x0311, 0x030a, 0x0327, 0x033c,
+0x03a5, 0x03be, 0x0393, 0x0388, 0x03c9, 0x03d2, 0x03ff, 0x03e4,
+0x0376, 0x036d, 0x0340, 0x035b, 0x031a, 0x0301, 0x032c, 0x0337,
+0x03ae, 0x03b5, 0x0398, 0x0383, 0x03c2, 0x03d9, 0x03f4, 0x03ef,
+0x02c6, 0x02dd, 0x02f0, 0x02eb, 0x02aa, 0x02b1, 0x029c, 0x0287,
+0x021e, 0x0205, 0x0228, 0x0233, 0x0272, 0x0269, 0x0244, 0x025f,
+0x0016, 0x000d, 0x0020, 0x003b, 0x007a, 0x0061, 0x004c, 0x0057,
+0x00ce, 0x00d5, 0x00f8, 0x00e3, 0x00a2, 0x00b9, 0x0094, 0x008f,
+0x01a6, 0x01bd, 0x0190, 0x018b, 0x01ca, 0x01d1, 0x01fc, 0x01e7,
+0x017e, 0x0165, 0x0148, 0x0153, 0x0112, 0x0109, 0x0124, 0x013f,
+};
+
+/* This table is just the CRC of each possible byte which is used for
+ * Spectrum-4. It is computed, Msbit first, for the Bloom filter
+ * polynomial which is 0x2d (1 + x^2+ x^3 + x^5 and the implicit x^6).
+ */
+static const u8 mlxsw_sp4_acl_bf_crc6_tab[256] = {
+0x00, 0x2d, 0x37, 0x1a, 0x03, 0x2e, 0x34, 0x19,
+0x06, 0x2b, 0x31, 0x1c, 0x05, 0x28, 0x32, 0x1f,
+0x0c, 0x21, 0x3b, 0x16, 0x0f, 0x22, 0x38, 0x15,
+0x0a, 0x27, 0x3d, 0x10, 0x09, 0x24, 0x3e, 0x13,
+0x18, 0x35, 0x2f, 0x02, 0x1b, 0x36, 0x2c, 0x01,
+0x1e, 0x33, 0x29, 0x04, 0x1d, 0x30, 0x2a, 0x07,
+0x14, 0x39, 0x23, 0x0e, 0x17, 0x3a, 0x20, 0x0d,
+0x12, 0x3f, 0x25, 0x08, 0x11, 0x3c, 0x26, 0x0b,
+0x30, 0x1d, 0x07, 0x2a, 0x33, 0x1e, 0x04, 0x29,
+0x36, 0x1b, 0x01, 0x2c, 0x35, 0x18, 0x02, 0x2f,
+0x3c, 0x11, 0x0b, 0x26, 0x3f, 0x12, 0x08, 0x25,
+0x3a, 0x17, 0x0d, 0x20, 0x39, 0x14, 0x0e, 0x23,
+0x28, 0x05, 0x1f, 0x32, 0x2b, 0x06, 0x1c, 0x31,
+0x2e, 0x03, 0x19, 0x34, 0x2d, 0x00, 0x1a, 0x37,
+0x24, 0x09, 0x13, 0x3e, 0x27, 0x0a, 0x10, 0x3d,
+0x22, 0x0f, 0x15, 0x38, 0x21, 0x0c, 0x16, 0x3b,
+0x0d, 0x20, 0x3a, 0x17, 0x0e, 0x23, 0x39, 0x14,
+0x0b, 0x26, 0x3c, 0x11, 0x08, 0x25, 0x3f, 0x12,
+0x01, 0x2c, 0x36, 0x1b, 0x02, 0x2f, 0x35, 0x18,
+0x07, 0x2a, 0x30, 0x1d, 0x04, 0x29, 0x33, 0x1e,
+0x15, 0x38, 0x22, 0x0f, 0x16, 0x3b, 0x21, 0x0c,
+0x13, 0x3e, 0x24, 0x09, 0x10, 0x3d, 0x27, 0x0a,
+0x19, 0x34, 0x2e, 0x03, 0x1a, 0x37, 0x2d, 0x00,
+0x1f, 0x32, 0x28, 0x05, 0x1c, 0x31, 0x2b, 0x06,
+0x3d, 0x10, 0x0a, 0x27, 0x3e, 0x13, 0x09, 0x24,
+0x3b, 0x16, 0x0c, 0x21, 0x38, 0x15, 0x0f, 0x22,
+0x31, 0x1c, 0x06, 0x2b, 0x32, 0x1f, 0x05, 0x28,
+0x37, 0x1a, 0x00, 0x2d, 0x34, 0x19, 0x03, 0x2e,
+0x25, 0x08, 0x12, 0x3f, 0x26, 0x0b, 0x11, 0x3c,
+0x23, 0x0e, 0x14, 0x39, 0x20, 0x0d, 0x17, 0x3a,
+0x29, 0x04, 0x1e, 0x33, 0x2a, 0x07, 0x1d, 0x30,
+0x2f, 0x02, 0x18, 0x35, 0x2c, 0x01, 0x1b, 0x36,
+};
+
+/* Each chunk contains 4 key blocks. Chunk 2 uses key blocks 11-8,
+ * and we need to populate it with 4 key blocks copied from the entry encoded
+ * key. The original keys layout is same for Spectrum-{2,3,4}.
+ * Since the encoded key contains a 2 bytes padding, key block 11 starts at
+ * offset 2. block 7 that is used in chunk 1 starts at offset 20 as 4 key blocks
+ * take 18 bytes. See 'MLXSW_SP2_AFK_BLOCK_LAYOUT' for more details.
+ * This array defines key offsets for easy access when copying key blocks from
+ * entry key to Bloom filter chunk.
+ */
+static const u8 chunk_key_offsets[MLXSW_BLOOM_KEY_CHUNKS] = {2, 20, 38};
+
+static u16 mlxsw_sp2_acl_bf_crc16_byte(u16 crc, u8 c)
{
- return (crc << 8) ^ mlxsw_sp_acl_bf_crc_tab[(crc >> 8) ^ c];
+ return (crc << 8) ^ mlxsw_sp2_acl_bf_crc16_tab[(crc >> 8) ^ c];
}
-static u16 mlxsw_sp_acl_bf_crc(const u8 *buffer, size_t len)
+static u16 mlxsw_sp2_acl_bf_crc(const u8 *buffer, size_t len)
{
u16 crc = 0;
while (len--)
- crc = mlxsw_sp_acl_bf_crc_byte(crc, *buffer++);
+ crc = mlxsw_sp2_acl_bf_crc16_byte(crc, *buffer++);
return crc;
}
static void
-mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_atcam_entry *aentry,
- char *output, u8 *len)
+__mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ char *output, u8 *len, u8 max_chunks, u8 pad_bytes,
+ u8 key_offset, u8 chunk_key_len, u8 chunk_len)
{
struct mlxsw_afk_key_info *key_info = aregion->region->key_info;
u8 chunk_index, chunk_count, block_count;
@@ -129,37 +243,168 @@ mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
chunk_count = 1 + ((block_count - 1) >> 2);
erp_region_id = cpu_to_be16(aentry->ht_key.erp_id |
(aregion->region->id << 4));
- for (chunk_index = MLXSW_BLOOM_KEY_CHUNKS - chunk_count;
- chunk_index < MLXSW_BLOOM_KEY_CHUNKS; chunk_index++) {
- memset(chunk, 0, MLXSW_BLOOM_CHUNK_PAD_BYTES);
- memcpy(chunk + MLXSW_BLOOM_CHUNK_PAD_BYTES, &erp_region_id,
+ for (chunk_index = max_chunks - chunk_count; chunk_index < max_chunks;
+ chunk_index++) {
+ memset(chunk, 0, pad_bytes);
+ memcpy(chunk + pad_bytes, &erp_region_id,
sizeof(erp_region_id));
- memcpy(chunk + MLXSW_BLOOM_CHUNK_KEY_OFFSET,
+ memcpy(chunk + key_offset,
&aentry->enc_key[chunk_key_offsets[chunk_index]],
- MLXSW_BLOOM_CHUNK_KEY_BYTES);
- chunk += MLXSW_BLOOM_KEY_CHUNK_BYTES;
+ chunk_key_len);
+ chunk += chunk_len;
}
- *len = chunk_count * MLXSW_BLOOM_KEY_CHUNK_BYTES;
+ *len = chunk_count * chunk_len;
+}
+
+static void
+mlxsw_sp2_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ char *output, u8 *len)
+{
+ __mlxsw_sp_acl_bf_key_encode(aregion, aentry, output, len,
+ MLXSW_BLOOM_KEY_CHUNKS,
+ MLXSW_SP2_BLOOM_CHUNK_PAD_BYTES,
+ MLXSW_SP2_BLOOM_CHUNK_KEY_OFFSET,
+ MLXSW_SP2_BLOOM_CHUNK_KEY_BYTES,
+ MLXSW_SP2_BLOOM_KEY_CHUNK_BYTES);
}
static unsigned int
-mlxsw_sp_acl_bf_rule_count_index_get(struct mlxsw_sp_acl_bf *bf,
- unsigned int erp_bank,
- unsigned int bf_index)
+mlxsw_sp2_acl_bf_index_get(struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
{
- return erp_bank * bf->bank_size + bf_index;
+ char bf_key[MLXSW_SP2_BLOOM_KEY_LEN];
+ u8 bf_size;
+
+ mlxsw_sp2_acl_bf_key_encode(aregion, aentry, bf_key, &bf_size);
+ return mlxsw_sp2_acl_bf_crc(bf_key, bf_size);
+}
+
+static u16 mlxsw_sp4_acl_bf_crc10_byte(u16 crc, u8 c)
+{
+ u8 index = ((crc >> 2) ^ c) & 0xff;
+
+ return ((crc << 8) ^ mlxsw_sp4_acl_bf_crc10_tab[index]) & 0x3ff;
+}
+
+static u16 mlxsw_sp4_acl_bf_crc6_byte(u16 crc, u8 c)
+{
+ u8 index = (crc ^ c) & 0xff;
+
+ return ((crc << 6) ^ (mlxsw_sp4_acl_bf_crc6_tab[index] << 2)) & 0xfc;
+}
+
+static u16 mlxsw_sp4_acl_bf_crc(const u8 *buffer, size_t len)
+{
+ u16 crc_row = 0, crc_col = 0;
+
+ while (len--) {
+ crc_row = mlxsw_sp4_acl_bf_crc10_byte(crc_row, *buffer);
+ crc_col = mlxsw_sp4_acl_bf_crc6_byte(crc_col, *buffer);
+ buffer++;
+ }
+
+ crc_col >>= 2;
+
+ /* 6 bit column are MSB, 10 bit row are LSB */
+ return (crc_col << 10) | crc_row;
+}
+
+static void right_shift_array(char *arr, u8 len, u8 shift_bits)
+{
+ u8 byte_mask = 0xff >> shift_bits;
+ int i;
+
+ if (WARN_ON(!shift_bits || shift_bits >= 8))
+ return;
+
+ for (i = len - 1; i >= 0; i--) {
+ /* The first iteration looks like out-of-bounds access,
+ * but actually references a buffer that the array is shifted
+ * into. This move is legal as we never send the last chunk to
+ * this function.
+ */
+ arr[i + 1] &= byte_mask;
+ arr[i + 1] |= arr[i] << (8 - shift_bits);
+ arr[i] = arr[i] >> shift_bits;
+ }
+}
+
+static void mlxsw_sp4_bf_key_shift_chunks(u8 chunk_count, char *output)
+{
+ /* The chunks are suppoosed to be continuous, with no padding.
+ * Since region ID and eRP ID use 14 bits, and not fully 2 bytes,
+ * and in Spectrum-4 there is no padding, it is necessary to shift some
+ * chunks 2 bits right.
+ */
+ switch (chunk_count) {
+ case 2:
+ /* The chunks are copied as follow:
+ * +-------------+-----------------+
+ * | Chunk 0 | Chunk 1 |
+ * | IDs | keys |(**) IDs | keys |
+ * +-------------+-----------------+
+ * In (**), there are two unused bits, therefore, chunk 0 needs
+ * to be shifted two bits right.
+ */
+ right_shift_array(output, MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES, 2);
+ break;
+ case 3:
+ /* The chunks are copied as follow:
+ * +-------------+-----------------+-----------------+
+ * | Chunk 0 | Chunk 1 | Chunk 2 |
+ * | IDs | keys |(**) IDs | keys |(**) IDs | keys |
+ * +-------------+-----------------+-----------------+
+ * In (**), there are two unused bits, therefore, chunk 1 needs
+ * to be shifted two bits right and chunk 0 needs to be shifted
+ * four bits right.
+ */
+ right_shift_array(output + MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES,
+ MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES, 2);
+ right_shift_array(output, MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES, 4);
+ break;
+ default:
+ WARN_ON(chunk_count > MLXSW_BLOOM_KEY_CHUNKS);
+ }
+}
+
+static void
+mlxsw_sp4_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ char *output, u8 *len)
+{
+ struct mlxsw_afk_key_info *key_info = aregion->region->key_info;
+ u8 block_count = mlxsw_afk_key_info_blocks_count_get(key_info);
+ u8 chunk_count = 1 + ((block_count - 1) >> 2);
+
+ __mlxsw_sp_acl_bf_key_encode(aregion, aentry, output, len,
+ MLXSW_BLOOM_KEY_CHUNKS,
+ MLXSW_SP4_BLOOM_CHUNK_PAD_BYTES,
+ MLXSW_SP4_BLOOM_CHUNK_KEY_OFFSET,
+ MLXSW_SP4_BLOOM_CHUNK_KEY_BYTES,
+ MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES);
+ mlxsw_sp4_bf_key_shift_chunks(chunk_count, output);
}
static unsigned int
-mlxsw_sp_acl_bf_index_get(struct mlxsw_sp_acl_bf *bf,
- struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_atcam_entry *aentry)
+mlxsw_sp4_acl_bf_index_get(struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
{
- char bf_key[MLXSW_BLOOM_KEY_LEN];
+ char bf_key[MLXSW_SP4_BLOOM_KEY_LEN] = {};
u8 bf_size;
- mlxsw_sp_acl_bf_key_encode(aregion, aentry, bf_key, &bf_size);
- return mlxsw_sp_acl_bf_crc(bf_key, bf_size);
+ mlxsw_sp4_acl_bf_key_encode(aregion, aentry, bf_key, &bf_size);
+ return mlxsw_sp4_acl_bf_crc(bf_key, bf_size);
+}
+
+static unsigned int
+mlxsw_sp_acl_bf_rule_count_index_get(struct mlxsw_sp_acl_bf *bf,
+ unsigned int erp_bank,
+ unsigned int bf_index)
+{
+ return erp_bank * bf->bank_size + bf_index;
}
int
@@ -176,7 +421,7 @@ mlxsw_sp_acl_bf_entry_add(struct mlxsw_sp *mlxsw_sp,
mutex_lock(&bf->lock);
- bf_index = mlxsw_sp_acl_bf_index_get(bf, aregion, aentry);
+ bf_index = mlxsw_sp->acl_bf_ops->index_get(bf, aregion, aentry);
rule_index = mlxsw_sp_acl_bf_rule_count_index_get(bf, erp_bank,
bf_index);
@@ -219,7 +464,7 @@ mlxsw_sp_acl_bf_entry_del(struct mlxsw_sp *mlxsw_sp,
mutex_lock(&bf->lock);
- bf_index = mlxsw_sp_acl_bf_index_get(bf, aregion, aentry);
+ bf_index = mlxsw_sp->acl_bf_ops->index_get(bf, aregion, aentry);
rule_index = mlxsw_sp_acl_bf_rule_count_index_get(bf, erp_bank,
bf_index);
@@ -267,3 +512,11 @@ void mlxsw_sp_acl_bf_fini(struct mlxsw_sp_acl_bf *bf)
mutex_destroy(&bf->lock);
kfree(bf);
}
+
+const struct mlxsw_sp_acl_bf_ops mlxsw_sp2_acl_bf_ops = {
+ .index_get = mlxsw_sp2_acl_bf_index_get,
+};
+
+const struct mlxsw_sp_acl_bf_ops mlxsw_sp4_acl_bf_ops = {
+ .index_get = mlxsw_sp4_acl_bf_index_get,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
index c72aa38424dc..50806594d977 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -83,7 +83,7 @@ static int mlxsw_sp2_act_kvdl_set_activity_get(void *priv, u32 kvdl_index,
}
static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
- u8 local_port)
+ u16 local_port)
{
struct mlxsw_sp *mlxsw_sp = priv;
char ppbs_pl[MLXSW_REG_PPBS_LEN];
@@ -132,7 +132,7 @@ mlxsw_sp_act_counter_index_put(void *priv, unsigned int counter_index)
}
static int
-mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port,
+mlxsw_sp_act_mirror_add(void *priv, u16 local_in_port,
const struct net_device *out_dev,
bool ingress, int *p_span_id)
{
@@ -159,7 +159,7 @@ err_analyzed_port_get:
}
static void
-mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress)
+mlxsw_sp_act_mirror_del(void *priv, u16 local_in_port, int span_id, bool ingress)
{
struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = priv;
@@ -192,7 +192,7 @@ static void mlxsw_sp_act_policer_del(void *priv, u16 policer_index)
policer_index);
}
-static int mlxsw_sp1_act_sampler_add(void *priv, u8 local_port,
+static int mlxsw_sp1_act_sampler_add(void *priv, u16 local_port,
struct psample_group *psample_group,
u32 rate, u32 trunc_size, bool truncate,
bool ingress, int *p_span_id,
@@ -202,7 +202,7 @@ static int mlxsw_sp1_act_sampler_add(void *priv, u8 local_port,
return -EOPNOTSUPP;
}
-static void mlxsw_sp1_act_sampler_del(void *priv, u8 local_port, int span_id,
+static void mlxsw_sp1_act_sampler_del(void *priv, u16 local_port, int span_id,
bool ingress)
{
WARN_ON_ONCE(1);
@@ -224,7 +224,7 @@ const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = {
.sampler_del = mlxsw_sp1_act_sampler_del,
};
-static int mlxsw_sp2_act_sampler_add(void *priv, u8 local_port,
+static int mlxsw_sp2_act_sampler_add(void *priv, u16 local_port,
struct psample_group *psample_group,
u32 rate, u32 trunc_size, bool truncate,
bool ingress, int *p_span_id,
@@ -272,7 +272,7 @@ err_span_agent_get:
return err;
}
-static void mlxsw_sp2_act_sampler_del(void *priv, u8 local_port, int span_id,
+static void mlxsw_sp2_act_sampler_del(void *priv, u16 local_port, int span_id,
bool ingress)
{
struct mlxsw_sp_sample_trigger trigger = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
index 279c241f76f0..00c32320f891 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -168,8 +168,8 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = {
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4[] = {
- MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_0_7, 0x04, 24, 8),
- MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_8_10, 0x00, 0, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 24, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x00, 0, 3),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = {
@@ -311,3 +311,45 @@ const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = {
.encode_block = mlxsw_sp2_afk_encode_block,
.clear_block = mlxsw_sp2_afk_clear_block,
};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 18, 12),
+ MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 9, -1, true), /* RX_ACL_SYSTEM_PORT */
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4b[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 13, 8),
+ MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x04, 21, 4, 0, true),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4),
+};
+
+static const struct mlxsw_afk_block mlxsw_sp4_afk_blocks[] = {
+ MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_mac_0),
+ MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_mac_1),
+ MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_mac_2),
+ MLXSW_AFK_BLOCK(0x13, mlxsw_sp_afk_element_info_mac_3),
+ MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4),
+ MLXSW_AFK_BLOCK(0x1A, mlxsw_sp_afk_element_info_mac_5b),
+ MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0),
+ MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1),
+ MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2),
+ MLXSW_AFK_BLOCK(0x35, mlxsw_sp_afk_element_info_ipv4_4b),
+ MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0),
+ MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1),
+ MLXSW_AFK_BLOCK(0x47, mlxsw_sp_afk_element_info_ipv6_2b),
+ MLXSW_AFK_BLOCK(0x43, mlxsw_sp_afk_element_info_ipv6_3),
+ MLXSW_AFK_BLOCK(0x44, mlxsw_sp_afk_element_info_ipv6_4),
+ MLXSW_AFK_BLOCK(0x45, mlxsw_sp_afk_element_info_ipv6_5),
+ MLXSW_AFK_BLOCK(0x90, mlxsw_sp_afk_element_info_l4_0),
+ MLXSW_AFK_BLOCK(0x92, mlxsw_sp_afk_element_info_l4_2),
+};
+
+const struct mlxsw_afk_ops mlxsw_sp4_afk_ops = {
+ .blocks = mlxsw_sp4_afk_blocks,
+ .blocks_count = ARRAY_SIZE(mlxsw_sp4_afk_blocks),
+ .encode_block = mlxsw_sp2_afk_encode_block,
+ .clear_block = mlxsw_sp2_afk_clear_block,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index a41df10ade9b..edbbc89e7a71 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -287,6 +287,12 @@ void mlxsw_sp_acl_erps_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_bf;
+struct mlxsw_sp_acl_bf_ops {
+ unsigned int (*index_get)(struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry);
+};
+
int
mlxsw_sp_acl_bf_entry_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_bf *bf,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index d78cf5a7220a..98f26f596e30 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -160,7 +160,7 @@ static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
}
static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u8 pg_buff,
+ u16 local_port, u8 pg_buff,
enum mlxsw_reg_sbxx_dir dir)
{
struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];
@@ -173,7 +173,7 @@ static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
}
static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u16 pool_index)
+ u16 local_port, u16 pool_index)
{
return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
}
@@ -202,7 +202,7 @@ static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
return 0;
}
-static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u16 local_port,
u8 pg_buff, u32 min_buff, u32 max_buff,
bool infi_max, u16 pool_index)
{
@@ -232,7 +232,7 @@ static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
return 0;
}
-static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u16 local_port,
u16 pool_index, u32 min_buff, u32 max_buff)
{
const struct mlxsw_sp_sb_pool_des *des =
@@ -253,7 +253,7 @@ static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
return 0;
}
-static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u16 local_port,
u16 pool_index, struct list_head *bulk_list)
{
const struct mlxsw_sp_sb_pool_des *des =
@@ -279,7 +279,7 @@ static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
}
-static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u16 local_port,
u16 pool_index, struct list_head *bulk_list)
{
const struct mlxsw_sp_sb_pool_des *des =
@@ -919,7 +919,7 @@ mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
}
-static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u16 local_port,
enum mlxsw_reg_sbxx_dir dir,
const struct mlxsw_sp_sb_cm *cms,
size_t cms_len)
@@ -1037,7 +1037,7 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp_cpu_port_sb_pms[] = {
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
};
-static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u16 local_port,
const struct mlxsw_sp_sb_pm *pms,
bool skip_ingress)
{
@@ -1416,7 +1416,7 @@ int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
pool_index);
@@ -1432,7 +1432,7 @@ int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
u32 max_buff;
int err;
@@ -1458,7 +1458,7 @@ int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
@@ -1479,7 +1479,7 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
const struct mlxsw_sp_sb_cm *cm;
u8 pg_buff = tc_index;
enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
@@ -1526,7 +1526,7 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
u8 masked_count;
- u8 local_port_1;
+ u16 local_port_1;
};
static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
@@ -1536,7 +1536,7 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
u8 masked_count;
- u8 local_port;
+ u16 local_port;
int rec_index = 0;
struct mlxsw_sp_sb_cm *cm;
int i;
@@ -1582,13 +1582,12 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
unsigned int sb_index)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u16 local_port, local_port_1, last_local_port;
struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
+ u8 masked_count, current_page = 0;
unsigned long cb_priv = 0;
LIST_HEAD(bulk_list);
char *sbsr_pl;
- u8 masked_count;
- u8 local_port_1;
- u8 local_port;
int i;
int err;
int err2;
@@ -1602,6 +1601,10 @@ next_batch:
local_port_1 = local_port;
masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, false);
+ mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
+ last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
+ MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
+
for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
@@ -1609,6 +1612,10 @@ next_batch:
for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
+ if (local_port > last_local_port) {
+ current_page++;
+ goto do_query;
+ }
if (local_port != MLXSW_PORT_CPU_PORT) {
/* Ingress quotas are not supported for the CPU port */
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
@@ -1651,10 +1658,11 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
unsigned int sb_index)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u16 local_port, last_local_port;
LIST_HEAD(bulk_list);
- char *sbsr_pl;
unsigned int masked_count;
- u8 local_port;
+ u8 current_page = 0;
+ char *sbsr_pl;
int i;
int err;
int err2;
@@ -1667,6 +1675,10 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
next_batch:
masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, true);
+ mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
+ last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
+ MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
+
for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
@@ -1674,6 +1686,10 @@ next_batch:
for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
+ if (local_port > last_local_port) {
+ current_page++;
+ goto do_query;
+ }
if (local_port != MLXSW_PORT_CPU_PORT) {
/* Ingress quotas are not supported for the CPU port */
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
@@ -1715,7 +1731,7 @@ int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
pool_index);
@@ -1732,7 +1748,7 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 84d4460f3dcd..20530712eadb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -1491,7 +1491,7 @@ static u32 mlxsw_sp1_to_ptys_speed_lanes(struct mlxsw_sp *mlxsw_sp, u8 width,
static void
mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload,
- u8 local_port, u32 proto_admin, bool autoneg)
+ u16 local_port, u32 proto_admin, bool autoneg)
{
mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg);
}
@@ -1969,7 +1969,7 @@ static u32 mlxsw_sp2_to_ptys_speed_lanes(struct mlxsw_sp *mlxsw_sp, u8 width,
static void
mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload,
- u8 local_port, u32 proto_admin,
+ u16 local_port, u32 proto_admin,
bool autoneg)
{
mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 004c42274e48..ce80931f0402 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -317,13 +317,13 @@ mlxsw_sp_fid_flood_table_lookup(const struct mlxsw_sp_fid *fid,
}
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
- enum mlxsw_sp_flood_type packet_type, u8 local_port,
+ enum mlxsw_sp_flood_type packet_type, u16 local_port,
bool member)
{
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
const struct mlxsw_sp_flood_table *flood_table;
- char *sftr_pl;
+ char *sftr2_pl;
int err;
if (WARN_ON(!fid_family->flood_tables || !ops->flood_index))
@@ -333,16 +333,16 @@ int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
if (!flood_table)
return -ESRCH;
- sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
- if (!sftr_pl)
+ sftr2_pl = kmalloc(MLXSW_REG_SFTR2_LEN, GFP_KERNEL);
+ if (!sftr2_pl)
return -ENOMEM;
- mlxsw_reg_sftr_pack(sftr_pl, flood_table->table_index,
- ops->flood_index(fid), flood_table->table_type, 1,
- local_port, member);
- err = mlxsw_reg_write(fid_family->mlxsw_sp->core, MLXSW_REG(sftr),
- sftr_pl);
- kfree(sftr_pl);
+ mlxsw_reg_sftr2_pack(sftr2_pl, flood_table->table_index,
+ ops->flood_index(fid), flood_table->table_type, 1,
+ local_port, member);
+ err = mlxsw_reg_write(fid_family->mlxsw_sp->core, MLXSW_REG(sftr2),
+ sftr2_pl);
+ kfree(sftr2_pl);
return err;
}
@@ -439,7 +439,7 @@ static int mlxsw_sp_fid_vni_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
}
static int __mlxsw_sp_fid_port_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
- u8 local_port, u16 vid, bool valid)
+ u16 local_port, u16 vid, bool valid)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
char svfa_pl[MLXSW_REG_SVFA_LEN];
@@ -573,7 +573,7 @@ static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
int err;
err = __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
@@ -601,7 +601,7 @@ mlxsw_sp_fid_8021d_port_vid_unmap(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
@@ -784,7 +784,7 @@ static int mlxsw_sp_fid_rfid_port_vid_map(struct mlxsw_sp_fid *fid,
u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
int err;
/* We only need to transition the port to virtual mode since
@@ -808,7 +808,7 @@ mlxsw_sp_fid_rfid_port_vid_unmap(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index be3791ca6069..bb417db773b9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -203,7 +203,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
*/
burst = roundup_pow_of_two(act->police.burst);
err = mlxsw_sp_acl_rulei_act_police(mlxsw_sp, rulei,
- act->police.index,
+ act->hw_index,
act->police.rate_bytes_ps,
burst, extack);
if (err)
@@ -508,7 +508,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
- if (mlxsw_sp_flow_block_is_egress_bound(block)) {
+ if (mlxsw_sp_flow_block_is_egress_bound(block) &&
+ match.mask->vlan_id) {
NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index ad3926de88f2..01cf5a6a26bd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -568,37 +568,21 @@ static int
mlxsw_sp2_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry)
{
- char rips_pl[MLXSW_REG_RIPS_LEN];
struct __ip6_tnl_parm parms6;
- int err;
-
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
- MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
- &ipip_entry->dip_kvdl_index);
- if (err)
- return err;
parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
- mlxsw_reg_rips_pack(rips_pl, ipip_entry->dip_kvdl_index,
- &parms6.raddr);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
- if (err)
- goto err_rips_write;
-
- return 0;
-
-err_rips_write:
- mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
- ipip_entry->dip_kvdl_index);
- return err;
+ return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp, &parms6.raddr,
+ &ipip_entry->dip_kvdl_index);
}
static void
mlxsw_sp2_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_ipip_entry *ipip_entry)
{
- mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
- ipip_entry->dip_kvdl_index);
+ struct __ip6_tnl_parm parms6;
+
+ parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, &parms6.raddr);
}
static const struct mlxsw_sp_ipip_ops mlxsw_sp2_ipip_gre6_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index 9eba8fa684ae..d2b57a045aa4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -130,15 +130,25 @@ mlxsw_sp_nve_mc_record_ipv6_entry_add(struct mlxsw_sp_nve_mc_record *mc_record,
struct mlxsw_sp_nve_mc_entry *mc_entry,
const union mlxsw_sp_l3addr *addr)
{
- WARN_ON(1);
+ u32 kvdl_index;
+ int err;
+
+ err = mlxsw_sp_ipv6_addr_kvdl_index_get(mc_record->mlxsw_sp,
+ &addr->addr6, &kvdl_index);
+ if (err)
+ return err;
- return -EINVAL;
+ mc_entry->ipv6_entry.addr6 = addr->addr6;
+ mc_entry->ipv6_entry.addr6_kvdl_index = kvdl_index;
+ return 0;
}
static void
mlxsw_sp_nve_mc_record_ipv6_entry_del(const struct mlxsw_sp_nve_mc_record *mc_record,
const struct mlxsw_sp_nve_mc_entry *mc_entry)
{
+ mlxsw_sp_ipv6_addr_put(mc_record->mlxsw_sp,
+ &mc_entry->ipv6_entry.addr6);
}
static void
@@ -787,6 +797,142 @@ static void mlxsw_sp_nve_fdb_clear_offload(struct mlxsw_sp *mlxsw_sp,
ops->fdb_clear_offload(nve_dev, vni);
}
+struct mlxsw_sp_nve_ipv6_ht_key {
+ u8 mac[ETH_ALEN];
+ u16 fid_index;
+};
+
+struct mlxsw_sp_nve_ipv6_ht_node {
+ struct rhash_head ht_node;
+ struct list_head list;
+ struct mlxsw_sp_nve_ipv6_ht_key key;
+ struct in6_addr addr6;
+};
+
+static const struct rhashtable_params mlxsw_sp_nve_ipv6_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_nve_ipv6_ht_key),
+ .key_offset = offsetof(struct mlxsw_sp_nve_ipv6_ht_node, key),
+ .head_offset = offsetof(struct mlxsw_sp_nve_ipv6_ht_node, ht_node),
+};
+
+int mlxsw_sp_nve_ipv6_addr_kvdl_set(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6,
+ u32 *p_kvdl_index)
+{
+ return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp, addr6, p_kvdl_index);
+}
+
+void mlxsw_sp_nve_ipv6_addr_kvdl_unset(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6)
+{
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, addr6);
+}
+
+static struct mlxsw_sp_nve_ipv6_ht_node *
+mlxsw_sp_nve_ipv6_ht_node_lookup(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid_index)
+{
+ struct mlxsw_sp_nve_ipv6_ht_key key = {};
+
+ ether_addr_copy(key.mac, mac);
+ key.fid_index = fid_index;
+ return rhashtable_lookup_fast(&mlxsw_sp->nve->ipv6_ht, &key,
+ mlxsw_sp_nve_ipv6_ht_params);
+}
+
+static int mlxsw_sp_nve_ipv6_ht_insert(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u16 fid_index,
+ const struct in6_addr *addr6)
+{
+ struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node;
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+ int err;
+
+ ipv6_ht_node = kzalloc(sizeof(*ipv6_ht_node), GFP_KERNEL);
+ if (!ipv6_ht_node)
+ return -ENOMEM;
+
+ ether_addr_copy(ipv6_ht_node->key.mac, mac);
+ ipv6_ht_node->key.fid_index = fid_index;
+ ipv6_ht_node->addr6 = *addr6;
+
+ err = rhashtable_insert_fast(&nve->ipv6_ht, &ipv6_ht_node->ht_node,
+ mlxsw_sp_nve_ipv6_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ list_add(&ipv6_ht_node->list, &nve->ipv6_addr_list);
+
+ return 0;
+
+err_rhashtable_insert:
+ kfree(ipv6_ht_node);
+ return err;
+}
+
+static void
+mlxsw_sp_nve_ipv6_ht_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+
+ list_del(&ipv6_ht_node->list);
+ rhashtable_remove_fast(&nve->ipv6_ht, &ipv6_ht_node->ht_node,
+ mlxsw_sp_nve_ipv6_ht_params);
+ kfree(ipv6_ht_node);
+}
+
+int
+mlxsw_sp_nve_ipv6_addr_map_replace(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid_index,
+ const struct in6_addr *new_addr6)
+{
+ struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node;
+
+ ASSERT_RTNL();
+
+ ipv6_ht_node = mlxsw_sp_nve_ipv6_ht_node_lookup(mlxsw_sp, mac,
+ fid_index);
+ if (!ipv6_ht_node)
+ return mlxsw_sp_nve_ipv6_ht_insert(mlxsw_sp, mac, fid_index,
+ new_addr6);
+
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipv6_ht_node->addr6);
+ ipv6_ht_node->addr6 = *new_addr6;
+ return 0;
+}
+
+void mlxsw_sp_nve_ipv6_addr_map_del(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid_index)
+{
+ struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node;
+
+ ASSERT_RTNL();
+
+ ipv6_ht_node = mlxsw_sp_nve_ipv6_ht_node_lookup(mlxsw_sp, mac,
+ fid_index);
+ if (WARN_ON(!ipv6_ht_node))
+ return;
+
+ mlxsw_sp_nve_ipv6_ht_remove(mlxsw_sp, ipv6_ht_node);
+}
+
+static void mlxsw_sp_nve_ipv6_addr_flush_by_fid(struct mlxsw_sp *mlxsw_sp,
+ u16 fid_index)
+{
+ struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node, *tmp;
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+
+ list_for_each_entry_safe(ipv6_ht_node, tmp, &nve->ipv6_addr_list,
+ list) {
+ if (ipv6_ht_node->key.fid_index != fid_index)
+ continue;
+
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipv6_ht_node->addr6);
+ mlxsw_sp_nve_ipv6_ht_remove(mlxsw_sp, ipv6_ht_node);
+ }
+}
+
int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
struct mlxsw_sp_nve_params *params,
struct netlink_ext_ack *extack)
@@ -845,6 +991,7 @@ void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nve_flood_ip_flush(mlxsw_sp, fid);
mlxsw_sp_nve_fdb_flush_by_fid(mlxsw_sp, fid_index);
+ mlxsw_sp_nve_ipv6_addr_flush_by_fid(mlxsw_sp, fid_index);
if (WARN_ON(mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex) ||
mlxsw_sp_fid_vni(fid, &vni)))
@@ -981,7 +1128,13 @@ int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp)
err = rhashtable_init(&nve->mc_list_ht,
&mlxsw_sp_nve_mc_list_ht_params);
if (err)
- goto err_rhashtable_init;
+ goto err_mc_rhashtable_init;
+
+ err = rhashtable_init(&nve->ipv6_ht, &mlxsw_sp_nve_ipv6_ht_params);
+ if (err)
+ goto err_ipv6_rhashtable_init;
+
+ INIT_LIST_HEAD(&nve->ipv6_addr_list);
err = mlxsw_sp_nve_qos_init(mlxsw_sp);
if (err)
@@ -1000,8 +1153,10 @@ int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp)
err_nve_resources_query:
err_nve_ecn_init:
err_nve_qos_init:
+ rhashtable_destroy(&nve->ipv6_ht);
+err_ipv6_rhashtable_init:
rhashtable_destroy(&nve->mc_list_ht);
-err_rhashtable_init:
+err_mc_rhashtable_init:
mlxsw_sp->nve = NULL;
kfree(nve);
return err;
@@ -1010,6 +1165,8 @@ err_rhashtable_init:
void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp)
{
WARN_ON(mlxsw_sp->nve->num_nve_tunnels);
+ WARN_ON(!list_empty(&mlxsw_sp->nve->ipv6_addr_list));
+ rhashtable_destroy(&mlxsw_sp->nve->ipv6_ht);
rhashtable_destroy(&mlxsw_sp->nve->mc_list_ht);
kfree(mlxsw_sp->nve);
mlxsw_sp->nve = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
index 98d1fdc25eac..0d21de1d0395 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
@@ -23,6 +23,8 @@ struct mlxsw_sp_nve_config {
struct mlxsw_sp_nve {
struct mlxsw_sp_nve_config config;
struct rhashtable mc_list_ht;
+ struct rhashtable ipv6_ht;
+ struct list_head ipv6_addr_list; /* Saves hash table nodes. */
struct mlxsw_sp *mlxsw_sp;
const struct mlxsw_sp_nve_ops **nve_ops_arr;
unsigned int num_nve_tunnels; /* Protected by RTNL */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
index d018d2da5949..d309b77a0194 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
@@ -10,8 +10,48 @@
#include "spectrum.h"
#include "spectrum_nve.h"
-#define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \
+#define MLXSW_SP_NVE_VXLAN_IPV4_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \
VXLAN_F_LEARN)
+#define MLXSW_SP_NVE_VXLAN_IPV6_SUPPORTED_FLAGS (VXLAN_F_IPV6 | \
+ VXLAN_F_UDP_ZERO_CSUM6_TX | \
+ VXLAN_F_UDP_ZERO_CSUM6_RX)
+
+static bool mlxsw_sp_nve_vxlan_ipv4_flags_check(const struct vxlan_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for TX");
+ return false;
+ }
+
+ if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_IPV4_SUPPORTED_FLAGS) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag");
+ return false;
+ }
+
+ return true;
+}
+
+static bool mlxsw_sp_nve_vxlan_ipv6_flags_check(const struct vxlan_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for TX");
+ return false;
+ }
+
+ if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for RX");
+ return false;
+ }
+
+ if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_IPV6_SUPPORTED_FLAGS) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag");
+ return false;
+ }
+
+ return true;
+}
static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
const struct mlxsw_sp_nve_params *params,
@@ -20,11 +60,6 @@ static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
struct vxlan_dev *vxlan = netdev_priv(params->dev);
struct vxlan_config *cfg = &vxlan->cfg;
- if (cfg->saddr.sa.sa_family != AF_INET) {
- NL_SET_ERR_MSG_MOD(extack, "VxLAN: Only IPv4 underlay is supported");
- return false;
- }
-
if (vxlan_addr_multicast(&cfg->remote_ip)) {
NL_SET_ERR_MSG_MOD(extack, "VxLAN: Multicast destination IP is not supported");
return false;
@@ -55,14 +90,15 @@ static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
return false;
}
- if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) {
- NL_SET_ERR_MSG_MOD(extack, "VxLAN: UDP checksum is not supported");
- return false;
- }
-
- if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS) {
- NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag");
- return false;
+ switch (cfg->saddr.sa.sa_family) {
+ case AF_INET:
+ if (!mlxsw_sp_nve_vxlan_ipv4_flags_check(cfg, extack))
+ return false;
+ break;
+ case AF_INET6:
+ if (!mlxsw_sp_nve_vxlan_ipv6_flags_check(cfg, extack))
+ return false;
+ break;
}
if (cfg->ttl == 0) {
@@ -90,6 +126,22 @@ static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
return mlxsw_sp_nve_vxlan_can_offload(nve, params, extack);
}
+static void
+mlxsw_sp_nve_vxlan_ul_proto_sip_config(const struct vxlan_config *cfg,
+ struct mlxsw_sp_nve_config *config)
+{
+ switch (cfg->saddr.sa.sa_family) {
+ case AF_INET:
+ config->ul_proto = MLXSW_SP_L3_PROTO_IPV4;
+ config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr;
+ break;
+ case AF_INET6:
+ config->ul_proto = MLXSW_SP_L3_PROTO_IPV6;
+ config->ul_sip.addr6 = cfg->saddr.sin6.sin6_addr;
+ break;
+ }
+}
+
static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
const struct mlxsw_sp_nve_params *params,
struct mlxsw_sp_nve_config *config)
@@ -102,8 +154,7 @@ static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
config->flowlabel = cfg->label;
config->learning_en = cfg->flags & VXLAN_F_LEARN ? 1 : 0;
config->ul_tb_id = RT_TABLE_MAIN;
- config->ul_proto = MLXSW_SP_L3_PROTO_IPV4;
- config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr;
+ mlxsw_sp_nve_vxlan_ul_proto_sip_config(cfg, config);
config->udp_dport = cfg->dst_port;
}
@@ -111,6 +162,7 @@ static void
mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl,
const struct mlxsw_sp_nve_config *config)
{
+ struct in6_addr addr6;
u8 udp_sport;
mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true,
@@ -122,7 +174,18 @@ mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl,
get_random_bytes(&udp_sport, sizeof(udp_sport));
udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80;
mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport);
- mlxsw_reg_tngcr_usipv4_set(tngcr_pl, be32_to_cpu(config->ul_sip.addr4));
+
+ switch (config->ul_proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_reg_tngcr_usipv4_set(tngcr_pl,
+ be32_to_cpu(config->ul_sip.addr4));
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ addr6 = config->ul_sip.addr6;
+ mlxsw_reg_tngcr_usipv6_memcpy_to(tngcr_pl,
+ (const char *)&addr6);
+ break;
+ }
}
static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 1a180384e7e8..0ff163fbc775 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -36,7 +36,7 @@ struct mlxsw_sp_ptp_state {
};
struct mlxsw_sp1_ptp_key {
- u8 local_port;
+ u16 local_port;
u8 message_type;
u16 sequence_id;
u8 domain_number;
@@ -406,7 +406,7 @@ mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp,
* This case is similar to 2) above.
*/
static void mlxsw_sp1_ptp_packet_finish(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port,
+ struct sk_buff *skb, u16 local_port,
bool ingress,
struct skb_shared_hwtstamps *hwtstamps)
{
@@ -524,7 +524,7 @@ static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp,
}
static void mlxsw_sp1_ptp_got_packet(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port,
+ struct sk_buff *skb, u16 local_port,
bool ingress)
{
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -564,7 +564,7 @@ immediate:
}
void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
- u8 local_port, u8 message_type,
+ u16 local_port, u8 message_type,
u8 domain_number, u16 sequence_id,
u64 timestamp)
{
@@ -599,14 +599,14 @@ void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
}
void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port)
+ u16 local_port)
{
skb_reset_mac_header(skb);
mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, true);
}
void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port)
+ struct sk_buff *skb, u16 local_port)
{
mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, false);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index 1d43a3755285..c06cd1384bca 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -31,13 +31,13 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state);
void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port);
+ u16 local_port);
void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port);
+ struct sk_buff *skb, u16 local_port);
void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
- u8 local_port, u8 message_type,
+ u16 local_port, u8 message_type,
u8 domain_number, u16 sequence_id,
u64 timestamp);
@@ -80,20 +80,20 @@ static inline void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state)
}
static inline void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port)
+ struct sk_buff *skb, u16 local_port)
{
mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
}
static inline void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port)
+ struct sk_buff *skb, u16 local_port)
{
dev_kfree_skb_any(skb);
}
static inline void
mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
- u8 local_port, u8 message_type,
+ u16 local_port, u8 message_type,
u8 domain_number,
u16 sequence_id, u64 timestamp)
{
@@ -159,13 +159,13 @@ static inline void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state)
}
static inline void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port)
+ struct sk_buff *skb, u16 local_port)
{
mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
}
static inline void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port)
+ struct sk_buff *skb, u16 local_port)
{
dev_kfree_skb_any(skb);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index c34833ff1dde..d40762cfc453 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1307,6 +1307,10 @@ mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
addr_prefix_len = 32;
break;
case MLXSW_SP_L3_PROTO_IPV6:
+ addrp = &addr->addr6;
+ addr_len = 16;
+ addr_prefix_len = 128;
+ break;
default:
WARN_ON(1);
return NULL;
@@ -7002,6 +7006,8 @@ mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
+ u32 tb_id = mlxsw_sp_fix_tb_id(rt->fib6_table->tb6_id);
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
int ifindex = nhgi->nexthops[0].ifindex;
struct mlxsw_sp_ipip_entry *ipip_entry;
@@ -7015,6 +7021,14 @@ mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
ipip_entry);
}
+ if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
+ MLXSW_SP_L3_PROTO_IPV6, &dip)) {
+ u32 tunnel_index;
+
+ tunnel_index = router->nve_decap_config.tunnel_index;
+ fib_entry->decap.tunnel_index = tunnel_index;
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
+ }
return 0;
}
@@ -8369,9 +8383,6 @@ mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac)
int id;
idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) {
- if (!profile)
- continue;
-
if (ether_addr_equal_masked(profile->mac_prefix, mac,
mlxsw_sp->mac_mask))
return profile;
@@ -9344,7 +9355,7 @@ static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
-u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
+u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
{
return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index f5f819aa9a65..f9671cc53002 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -37,7 +37,7 @@ struct mlxsw_sp_span {
struct mlxsw_sp_span_analyzed_port {
struct list_head list; /* Member of analyzed_ports_list */
refcount_t ref_count;
- u8 local_port;
+ u16 local_port;
bool ingress;
};
@@ -46,7 +46,7 @@ struct mlxsw_sp_span_trigger_entry {
struct mlxsw_sp_span *span;
const struct mlxsw_sp_span_trigger_ops *ops;
refcount_t ref_count;
- u8 local_port;
+ u16 local_port;
enum mlxsw_sp_span_trigger trigger;
struct mlxsw_sp_span_trigger_parms parms;
};
@@ -179,7 +179,7 @@ mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
{
struct mlxsw_sp_port *dest_port = sparms.dest_port;
struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
- u8 local_port = dest_port->local_port;
+ u16 local_port = dest_port->local_port;
char mpat_pl[MLXSW_REG_MPAT_LEN];
int pa_id = span_entry->id;
@@ -199,7 +199,7 @@ mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
{
struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port;
struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
- u8 local_port = dest_port->local_port;
+ u16 local_port = dest_port->local_port;
char mpat_pl[MLXSW_REG_MPAT_LEN];
int pa_id = span_entry->id;
@@ -480,7 +480,7 @@ mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
{
struct mlxsw_sp_port *dest_port = sparms.dest_port;
struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
- u8 local_port = dest_port->local_port;
+ u16 local_port = dest_port->local_port;
char mpat_pl[MLXSW_REG_MPAT_LEN];
int pa_id = span_entry->id;
@@ -584,7 +584,7 @@ mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
{
struct mlxsw_sp_port *dest_port = sparms.dest_port;
struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
- u8 local_port = dest_port->local_port;
+ u16 local_port = dest_port->local_port;
char mpat_pl[MLXSW_REG_MPAT_LEN];
int pa_id = span_entry->id;
@@ -650,7 +650,7 @@ mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry,
{
struct mlxsw_sp_port *dest_port = sparms.dest_port;
struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
- u8 local_port = dest_port->local_port;
+ u16 local_port = dest_port->local_port;
char mpat_pl[MLXSW_REG_MPAT_LEN];
int pa_id = span_entry->id;
@@ -997,7 +997,7 @@ static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp_port *mlxsw_sp_por
}
static struct mlxsw_sp_span_analyzed_port *
-mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u8 local_port,
+mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u16 local_port,
bool ingress)
{
struct mlxsw_sp_span_analyzed_port *analyzed_port;
@@ -1165,7 +1165,7 @@ int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_span_analyzed_port *analyzed_port;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
int err = 0;
mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
@@ -1193,7 +1193,7 @@ void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_span_analyzed_port *analyzed_port;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 81c7e8a7fcf5..65c1724c63b0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -865,17 +865,17 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
u16 mid_idx, bool add)
{
- char *smid_pl;
+ char *smid2_pl;
int err;
- smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
- if (!smid_pl)
+ smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
+ if (!smid2_pl)
return -ENOMEM;
- mlxsw_reg_smid_pack(smid_pl, mid_idx,
- mlxsw_sp_router_port(mlxsw_sp), add);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
- kfree(smid_pl);
+ mlxsw_reg_smid2_pack(smid2_pl, mid_idx,
+ mlxsw_sp_router_port(mlxsw_sp), add);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
+ kfree(smid2_pl);
return err;
}
@@ -980,7 +980,7 @@ mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp_bridge_device *bridge_device;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
u16 vid = mlxsw_sp_port_vlan->vid;
struct mlxsw_sp_fid *fid;
int err;
@@ -1029,7 +1029,7 @@ mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
u16 vid = mlxsw_sp_port_vlan->vid;
mlxsw_sp_port_vlan->fid = NULL;
@@ -1290,38 +1290,52 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
MLXSW_REG_SFD_OP_WRITE_REMOVE;
}
-static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
- const char *mac, u16 fid,
- enum mlxsw_sp_l3proto proto,
- const union mlxsw_sp_l3addr *addr,
- bool adding, bool dynamic)
+static int
+mlxsw_sp_port_fdb_tun_uc_op4(struct mlxsw_sp *mlxsw_sp, bool dynamic,
+ const char *mac, u16 fid, __be32 addr, bool adding)
{
- enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto;
char *sfd_pl;
u8 num_rec;
u32 uip;
int err;
- switch (proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- uip = be32_to_cpu(addr->addr4);
- sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4;
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- default:
- WARN_ON(1);
- return -EOPNOTSUPP;
- }
+ sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+ if (!sfd_pl)
+ return -ENOMEM;
+
+ uip = be32_to_cpu(addr);
+ mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
+ mlxsw_reg_sfd_uc_tunnel_pack4(sfd_pl, 0,
+ mlxsw_sp_sfd_rec_policy(dynamic), mac,
+ fid, MLXSW_REG_SFD_REC_ACTION_NOP, uip);
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
+ if (err)
+ goto out;
+
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+ err = -EBUSY;
+
+out:
+ kfree(sfd_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u16 fid,
+ u32 kvdl_index, bool adding)
+{
+ char *sfd_pl;
+ u8 num_rec;
+ int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
if (!sfd_pl)
return -ENOMEM;
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
- mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0,
- mlxsw_sp_sfd_rec_policy(dynamic), mac, fid,
- MLXSW_REG_SFD_REC_ACTION_NOP, uip,
- sfd_proto);
+ mlxsw_reg_sfd_uc_tunnel_pack6(sfd_pl, 0, mac, fid,
+ MLXSW_REG_SFD_REC_ACTION_NOP, kvdl_index);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
@@ -1335,7 +1349,80 @@ out:
return err;
}
-static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_port_fdb_tun_uc_op6_add(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u16 fid,
+ const struct in6_addr *addr)
+{
+ u32 kvdl_index;
+ int err;
+
+ err = mlxsw_sp_nve_ipv6_addr_kvdl_set(mlxsw_sp, addr, &kvdl_index);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid,
+ kvdl_index, true);
+ if (err)
+ goto err_sfd_write;
+
+ err = mlxsw_sp_nve_ipv6_addr_map_replace(mlxsw_sp, mac, fid, addr);
+ if (err)
+ /* Replace can fail only for creating new mapping, so removing
+ * the FDB entry in the error path is OK.
+ */
+ goto err_addr_replace;
+
+ return 0;
+
+err_addr_replace:
+ mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, kvdl_index,
+ false);
+err_sfd_write:
+ mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
+ return err;
+}
+
+static void mlxsw_sp_port_fdb_tun_uc_op6_del(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u16 fid,
+ const struct in6_addr *addr)
+{
+ mlxsw_sp_nve_ipv6_addr_map_del(mlxsw_sp, mac, fid);
+ mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, 0, false);
+ mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
+}
+
+static int
+mlxsw_sp_port_fdb_tun_uc_op6(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid, const struct in6_addr *addr, bool adding)
+{
+ if (adding)
+ return mlxsw_sp_port_fdb_tun_uc_op6_add(mlxsw_sp, mac, fid,
+ addr);
+
+ mlxsw_sp_port_fdb_tun_uc_op6_del(mlxsw_sp, mac, fid, addr);
+ return 0;
+}
+
+static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u16 fid,
+ enum mlxsw_sp_l3proto proto,
+ const union mlxsw_sp_l3addr *addr,
+ bool adding, bool dynamic)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return mlxsw_sp_port_fdb_tun_uc_op4(mlxsw_sp, dynamic, mac, fid,
+ addr->addr4, adding);
+ case MLXSW_SP_L3_PROTO_IPV6:
+ return mlxsw_sp_port_fdb_tun_uc_op6(mlxsw_sp, mac, fid,
+ &addr->addr6, adding);
+ default:
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+}
+
+static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
const char *mac, u16 fid, bool adding,
enum mlxsw_reg_sfd_rec_action action,
enum mlxsw_reg_sfd_rec_policy policy)
@@ -1363,7 +1450,7 @@ out:
return err;
}
-static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
const char *mac, u16 fid, bool adding,
bool dynamic)
{
@@ -1477,30 +1564,30 @@ static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
long *ports_bitmap,
bool set_router_port)
{
- char *smid_pl;
+ char *smid2_pl;
int err, i;
- smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
- if (!smid_pl)
+ smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
+ if (!smid2_pl)
return -ENOMEM;
- mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
+ mlxsw_reg_smid2_pack(smid2_pl, mid_idx, 0, false);
for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
if (mlxsw_sp->ports[i])
- mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
+ mlxsw_reg_smid2_port_mask_set(smid2_pl, i, 1);
}
- mlxsw_reg_smid_port_mask_set(smid_pl,
- mlxsw_sp_router_port(mlxsw_sp), 1);
+ mlxsw_reg_smid2_port_mask_set(smid2_pl,
+ mlxsw_sp_router_port(mlxsw_sp), 1);
for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
- mlxsw_reg_smid_port_set(smid_pl, i, 1);
+ mlxsw_reg_smid2_port_set(smid2_pl, i, 1);
- mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
- set_router_port);
+ mlxsw_reg_smid2_port_set(smid2_pl, mlxsw_sp_router_port(mlxsw_sp),
+ set_router_port);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
- kfree(smid_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
+ kfree(smid2_pl);
return err;
}
@@ -1508,16 +1595,16 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 mid_idx, bool add)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char *smid_pl;
+ char *smid2_pl;
int err;
- smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
- if (!smid_pl)
+ smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
+ if (!smid2_pl)
return -ENOMEM;
- mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
- kfree(smid_pl);
+ mlxsw_reg_smid2_pack(smid2_pl, mid_idx, mlxsw_sp_port->local_port, add);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
+ kfree(smid2_pl);
return err;
}
@@ -2536,7 +2623,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_port *mlxsw_sp_port;
enum switchdev_notifier_type type;
char mac[ETH_ALEN];
- u8 local_port;
+ u16 local_port;
u16 vid, fid;
bool do_notification = true;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 26d01adbedad..47b061b99160 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -60,7 +60,7 @@ enum {
};
static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port,
+ u16 local_port,
struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
@@ -85,7 +85,7 @@ static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
return 0;
}
-static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
struct devlink_port *in_devlink_port;
@@ -109,7 +109,7 @@ static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
consume_skb(skb);
}
-static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
u32 cookie_index = mlxsw_skb_cb(skb)->rx_md_info.cookie_index;
@@ -138,7 +138,7 @@ static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u8 local_port,
consume_skb(skb);
}
-static int __mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port,
+static int __mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
struct devlink_port *in_devlink_port;
@@ -164,7 +164,7 @@ static int __mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port,
return 0;
}
-static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
int err;
@@ -176,14 +176,14 @@ static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port,
netif_receive_skb(skb);
}
-static void mlxsw_sp_rx_mark_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_mark_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
skb->offload_fwd_mark = 1;
mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
}
-static void mlxsw_sp_rx_l3_mark_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_l3_mark_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
skb->offload_l3_fwd_mark = 1;
@@ -191,7 +191,7 @@ static void mlxsw_sp_rx_l3_mark_listener(struct sk_buff *skb, u8 local_port,
mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
}
-static void mlxsw_sp_rx_ptp_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_ptp_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
@@ -212,7 +212,7 @@ static struct mlxsw_sp_port *
mlxsw_sp_sample_tx_port_get(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_rx_md_info *rx_md_info)
{
- u8 local_port;
+ u16 local_port;
if (!rx_md_info->tx_port_valid)
return NULL;
@@ -257,7 +257,7 @@ static void mlxsw_sp_psample_md_init(struct mlxsw_sp *mlxsw_sp,
md->latency <<= MLXSW_SP_MIRROR_LATENCY_SHIFT;
}
-static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
@@ -293,7 +293,7 @@ out:
consume_skb(skb);
}
-static void mlxsw_sp_rx_sample_tx_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_sample_tx_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
struct mlxsw_rx_md_info *rx_md_info = &mlxsw_skb_cb(skb)->rx_md_info;
@@ -343,7 +343,7 @@ out:
consume_skb(skb);
}
-static void mlxsw_sp_rx_sample_acl_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_sample_acl_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 99c0c1491af2..d024983815da 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -6317,11 +6317,15 @@ static int netdev_set_pauseparam(struct net_device *dev,
* netdev_get_ringparam - get tx/rx ring parameters
* @dev: Network device.
* @ring: Ethtool RING settings data structure.
+ * @kernel_ring: Ethtool external RING settings data structure.
+ * @extack: Netlink handle.
*
* This procedure returns the TX/RX ring settings.
*/
static void netdev_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 735eea1dacf1..ed7a35c3ceac 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -55,6 +55,7 @@ config LAN743X
To compile this driver as a module, choose M here. The module will be
called lan743x.
+source "drivers/net/ethernet/microchip/lan966x/Kconfig"
source "drivers/net/ethernet/microchip/sparx5/Kconfig"
endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile
index c77dc0379bfd..9faa41436198 100644
--- a/drivers/net/ethernet/microchip/Makefile
+++ b/drivers/net/ethernet/microchip/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_LAN743X) += lan743x.o
lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o
+obj-$(CONFIG_LAN966X_SWITCH) += lan966x/
obj-$(CONFIG_SPARX5_SWITCH) += sparx5/
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 7d7647481f70..8c6390d95158 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1739,13 +1739,10 @@ static int lan743x_tx_ring_init(struct lan743x_tx *tx)
}
if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
DMA_BIT_MASK(64))) {
- if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
- DMA_BIT_MASK(32))) {
- dev_warn(&tx->adapter->pdev->dev,
- "lan743x_: No suitable DMA available\n");
- ret = -ENOMEM;
- goto cleanup;
- }
+ dev_warn(&tx->adapter->pdev->dev,
+ "lan743x_: No suitable DMA available\n");
+ ret = -ENOMEM;
+ goto cleanup;
}
ring_allocation_size = ALIGN(tx->ring_size *
sizeof(struct lan743x_tx_descriptor),
@@ -2284,13 +2281,10 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
}
if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
DMA_BIT_MASK(64))) {
- if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
- DMA_BIT_MASK(32))) {
- dev_warn(&rx->adapter->pdev->dev,
- "lan743x_: No suitable DMA available\n");
- ret = -ENOMEM;
- goto cleanup;
- }
+ dev_warn(&rx->adapter->pdev->dev,
+ "lan743x_: No suitable DMA available\n");
+ ret = -ENOMEM;
+ goto cleanup;
}
ring_allocation_size = ALIGN(rx->ring_size *
sizeof(struct lan743x_rx_descriptor),
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 9380e396f648..8b7a8d879083 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -1305,12 +1305,6 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- if (config.flags) {
- netif_warn(adapter, drv, adapter->netdev,
- "ignoring hwtstamp_config.flags == 0x%08X, expected 0\n",
- config.flags);
- }
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig
new file mode 100644
index 000000000000..ac273f84b69e
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/Kconfig
@@ -0,0 +1,9 @@
+config LAN966X_SWITCH
+ tristate "Lan966x switch driver"
+ depends on HAS_IOMEM
+ depends on OF
+ depends on NET_SWITCHDEV
+ select PHYLINK
+ select PACKING
+ help
+ This driver supports the Lan966x network switch device.
diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile
new file mode 100644
index 000000000000..040cfff9f577
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Microchip Lan966x network device drivers.
+#
+
+obj-$(CONFIG_LAN966X_SWITCH) += lan966x-switch.o
+
+lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \
+ lan966x_mac.o lan966x_ethtool.o lan966x_switchdev.o \
+ lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
new file mode 100644
index 000000000000..614f12c2fe6a
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
@@ -0,0 +1,682 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/netdevice.h>
+
+#include "lan966x_main.h"
+
+/* Number of traffic classes */
+#define LAN966X_NUM_TC 8
+#define LAN966X_STATS_CHECK_DELAY (2 * HZ)
+
+static const struct lan966x_stat_layout lan966x_stats_layout[] = {
+ { .name = "rx_octets", .offset = 0x00, },
+ { .name = "rx_unicast", .offset = 0x01, },
+ { .name = "rx_multicast", .offset = 0x02 },
+ { .name = "rx_broadcast", .offset = 0x03 },
+ { .name = "rx_short", .offset = 0x04 },
+ { .name = "rx_frag", .offset = 0x05 },
+ { .name = "rx_jabber", .offset = 0x06 },
+ { .name = "rx_crc", .offset = 0x07 },
+ { .name = "rx_symbol_err", .offset = 0x08 },
+ { .name = "rx_sz_64", .offset = 0x09 },
+ { .name = "rx_sz_65_127", .offset = 0x0a},
+ { .name = "rx_sz_128_255", .offset = 0x0b},
+ { .name = "rx_sz_256_511", .offset = 0x0c },
+ { .name = "rx_sz_512_1023", .offset = 0x0d },
+ { .name = "rx_sz_1024_1526", .offset = 0x0e },
+ { .name = "rx_sz_jumbo", .offset = 0x0f },
+ { .name = "rx_pause", .offset = 0x10 },
+ { .name = "rx_control", .offset = 0x11 },
+ { .name = "rx_long", .offset = 0x12 },
+ { .name = "rx_cat_drop", .offset = 0x13 },
+ { .name = "rx_red_prio_0", .offset = 0x14 },
+ { .name = "rx_red_prio_1", .offset = 0x15 },
+ { .name = "rx_red_prio_2", .offset = 0x16 },
+ { .name = "rx_red_prio_3", .offset = 0x17 },
+ { .name = "rx_red_prio_4", .offset = 0x18 },
+ { .name = "rx_red_prio_5", .offset = 0x19 },
+ { .name = "rx_red_prio_6", .offset = 0x1a },
+ { .name = "rx_red_prio_7", .offset = 0x1b },
+ { .name = "rx_yellow_prio_0", .offset = 0x1c },
+ { .name = "rx_yellow_prio_1", .offset = 0x1d },
+ { .name = "rx_yellow_prio_2", .offset = 0x1e },
+ { .name = "rx_yellow_prio_3", .offset = 0x1f },
+ { .name = "rx_yellow_prio_4", .offset = 0x20 },
+ { .name = "rx_yellow_prio_5", .offset = 0x21 },
+ { .name = "rx_yellow_prio_6", .offset = 0x22 },
+ { .name = "rx_yellow_prio_7", .offset = 0x23 },
+ { .name = "rx_green_prio_0", .offset = 0x24 },
+ { .name = "rx_green_prio_1", .offset = 0x25 },
+ { .name = "rx_green_prio_2", .offset = 0x26 },
+ { .name = "rx_green_prio_3", .offset = 0x27 },
+ { .name = "rx_green_prio_4", .offset = 0x28 },
+ { .name = "rx_green_prio_5", .offset = 0x29 },
+ { .name = "rx_green_prio_6", .offset = 0x2a },
+ { .name = "rx_green_prio_7", .offset = 0x2b },
+ { .name = "rx_assembly_err", .offset = 0x2c },
+ { .name = "rx_smd_err", .offset = 0x2d },
+ { .name = "rx_assembly_ok", .offset = 0x2e },
+ { .name = "rx_merge_frag", .offset = 0x2f },
+ { .name = "rx_pmac_octets", .offset = 0x30, },
+ { .name = "rx_pmac_unicast", .offset = 0x31, },
+ { .name = "rx_pmac_multicast", .offset = 0x32 },
+ { .name = "rx_pmac_broadcast", .offset = 0x33 },
+ { .name = "rx_pmac_short", .offset = 0x34 },
+ { .name = "rx_pmac_frag", .offset = 0x35 },
+ { .name = "rx_pmac_jabber", .offset = 0x36 },
+ { .name = "rx_pmac_crc", .offset = 0x37 },
+ { .name = "rx_pmac_symbol_err", .offset = 0x38 },
+ { .name = "rx_pmac_sz_64", .offset = 0x39 },
+ { .name = "rx_pmac_sz_65_127", .offset = 0x3a },
+ { .name = "rx_pmac_sz_128_255", .offset = 0x3b },
+ { .name = "rx_pmac_sz_256_511", .offset = 0x3c },
+ { .name = "rx_pmac_sz_512_1023", .offset = 0x3d },
+ { .name = "rx_pmac_sz_1024_1526", .offset = 0x3e },
+ { .name = "rx_pmac_sz_jumbo", .offset = 0x3f },
+ { .name = "rx_pmac_pause", .offset = 0x40 },
+ { .name = "rx_pmac_control", .offset = 0x41 },
+ { .name = "rx_pmac_long", .offset = 0x42 },
+
+ { .name = "tx_octets", .offset = 0x80, },
+ { .name = "tx_unicast", .offset = 0x81, },
+ { .name = "tx_multicast", .offset = 0x82 },
+ { .name = "tx_broadcast", .offset = 0x83 },
+ { .name = "tx_col", .offset = 0x84 },
+ { .name = "tx_drop", .offset = 0x85 },
+ { .name = "tx_pause", .offset = 0x86 },
+ { .name = "tx_sz_64", .offset = 0x87 },
+ { .name = "tx_sz_65_127", .offset = 0x88 },
+ { .name = "tx_sz_128_255", .offset = 0x89 },
+ { .name = "tx_sz_256_511", .offset = 0x8a },
+ { .name = "tx_sz_512_1023", .offset = 0x8b },
+ { .name = "tx_sz_1024_1526", .offset = 0x8c },
+ { .name = "tx_sz_jumbo", .offset = 0x8d },
+ { .name = "tx_yellow_prio_0", .offset = 0x8e },
+ { .name = "tx_yellow_prio_1", .offset = 0x8f },
+ { .name = "tx_yellow_prio_2", .offset = 0x90 },
+ { .name = "tx_yellow_prio_3", .offset = 0x91 },
+ { .name = "tx_yellow_prio_4", .offset = 0x92 },
+ { .name = "tx_yellow_prio_5", .offset = 0x93 },
+ { .name = "tx_yellow_prio_6", .offset = 0x94 },
+ { .name = "tx_yellow_prio_7", .offset = 0x95 },
+ { .name = "tx_green_prio_0", .offset = 0x96 },
+ { .name = "tx_green_prio_1", .offset = 0x97 },
+ { .name = "tx_green_prio_2", .offset = 0x98 },
+ { .name = "tx_green_prio_3", .offset = 0x99 },
+ { .name = "tx_green_prio_4", .offset = 0x9a },
+ { .name = "tx_green_prio_5", .offset = 0x9b },
+ { .name = "tx_green_prio_6", .offset = 0x9c },
+ { .name = "tx_green_prio_7", .offset = 0x9d },
+ { .name = "tx_aged", .offset = 0x9e },
+ { .name = "tx_llct", .offset = 0x9f },
+ { .name = "tx_ct", .offset = 0xa0 },
+ { .name = "tx_mm_hold", .offset = 0xa1 },
+ { .name = "tx_merge_frag", .offset = 0xa2 },
+ { .name = "tx_pmac_octets", .offset = 0xa3, },
+ { .name = "tx_pmac_unicast", .offset = 0xa4, },
+ { .name = "tx_pmac_multicast", .offset = 0xa5 },
+ { .name = "tx_pmac_broadcast", .offset = 0xa6 },
+ { .name = "tx_pmac_pause", .offset = 0xa7 },
+ { .name = "tx_pmac_sz_64", .offset = 0xa8 },
+ { .name = "tx_pmac_sz_65_127", .offset = 0xa9 },
+ { .name = "tx_pmac_sz_128_255", .offset = 0xaa },
+ { .name = "tx_pmac_sz_256_511", .offset = 0xab },
+ { .name = "tx_pmac_sz_512_1023", .offset = 0xac },
+ { .name = "tx_pmac_sz_1024_1526", .offset = 0xad },
+ { .name = "tx_pmac_sz_jumbo", .offset = 0xae },
+
+ { .name = "dr_local", .offset = 0x100 },
+ { .name = "dr_tail", .offset = 0x101 },
+ { .name = "dr_yellow_prio_0", .offset = 0x102 },
+ { .name = "dr_yellow_prio_1", .offset = 0x103 },
+ { .name = "dr_yellow_prio_2", .offset = 0x104 },
+ { .name = "dr_yellow_prio_3", .offset = 0x105 },
+ { .name = "dr_yellow_prio_4", .offset = 0x106 },
+ { .name = "dr_yellow_prio_5", .offset = 0x107 },
+ { .name = "dr_yellow_prio_6", .offset = 0x108 },
+ { .name = "dr_yellow_prio_7", .offset = 0x109 },
+ { .name = "dr_green_prio_0", .offset = 0x10a },
+ { .name = "dr_green_prio_1", .offset = 0x10b },
+ { .name = "dr_green_prio_2", .offset = 0x10c },
+ { .name = "dr_green_prio_3", .offset = 0x10d },
+ { .name = "dr_green_prio_4", .offset = 0x10e },
+ { .name = "dr_green_prio_5", .offset = 0x10f },
+ { .name = "dr_green_prio_6", .offset = 0x110 },
+ { .name = "dr_green_prio_7", .offset = 0x111 },
+};
+
+/* The following numbers are indexes into lan966x_stats_layout[] */
+#define SYS_COUNT_RX_OCT 0
+#define SYS_COUNT_RX_UC 1
+#define SYS_COUNT_RX_MC 2
+#define SYS_COUNT_RX_BC 3
+#define SYS_COUNT_RX_SHORT 4
+#define SYS_COUNT_RX_FRAG 5
+#define SYS_COUNT_RX_JABBER 6
+#define SYS_COUNT_RX_CRC 7
+#define SYS_COUNT_RX_SYMBOL_ERR 8
+#define SYS_COUNT_RX_SZ_64 9
+#define SYS_COUNT_RX_SZ_65_127 10
+#define SYS_COUNT_RX_SZ_128_255 11
+#define SYS_COUNT_RX_SZ_256_511 12
+#define SYS_COUNT_RX_SZ_512_1023 13
+#define SYS_COUNT_RX_SZ_1024_1526 14
+#define SYS_COUNT_RX_SZ_JUMBO 15
+#define SYS_COUNT_RX_PAUSE 16
+#define SYS_COUNT_RX_CONTROL 17
+#define SYS_COUNT_RX_LONG 18
+#define SYS_COUNT_RX_CAT_DROP 19
+#define SYS_COUNT_RX_RED_PRIO_0 20
+#define SYS_COUNT_RX_RED_PRIO_1 21
+#define SYS_COUNT_RX_RED_PRIO_2 22
+#define SYS_COUNT_RX_RED_PRIO_3 23
+#define SYS_COUNT_RX_RED_PRIO_4 24
+#define SYS_COUNT_RX_RED_PRIO_5 25
+#define SYS_COUNT_RX_RED_PRIO_6 26
+#define SYS_COUNT_RX_RED_PRIO_7 27
+#define SYS_COUNT_RX_YELLOW_PRIO_0 28
+#define SYS_COUNT_RX_YELLOW_PRIO_1 29
+#define SYS_COUNT_RX_YELLOW_PRIO_2 30
+#define SYS_COUNT_RX_YELLOW_PRIO_3 31
+#define SYS_COUNT_RX_YELLOW_PRIO_4 32
+#define SYS_COUNT_RX_YELLOW_PRIO_5 33
+#define SYS_COUNT_RX_YELLOW_PRIO_6 34
+#define SYS_COUNT_RX_YELLOW_PRIO_7 35
+#define SYS_COUNT_RX_GREEN_PRIO_0 36
+#define SYS_COUNT_RX_GREEN_PRIO_1 37
+#define SYS_COUNT_RX_GREEN_PRIO_2 38
+#define SYS_COUNT_RX_GREEN_PRIO_3 39
+#define SYS_COUNT_RX_GREEN_PRIO_4 40
+#define SYS_COUNT_RX_GREEN_PRIO_5 41
+#define SYS_COUNT_RX_GREEN_PRIO_6 42
+#define SYS_COUNT_RX_GREEN_PRIO_7 43
+#define SYS_COUNT_RX_ASSEMBLY_ERR 44
+#define SYS_COUNT_RX_SMD_ERR 45
+#define SYS_COUNT_RX_ASSEMBLY_OK 46
+#define SYS_COUNT_RX_MERGE_FRAG 47
+#define SYS_COUNT_RX_PMAC_OCT 48
+#define SYS_COUNT_RX_PMAC_UC 49
+#define SYS_COUNT_RX_PMAC_MC 50
+#define SYS_COUNT_RX_PMAC_BC 51
+#define SYS_COUNT_RX_PMAC_SHORT 52
+#define SYS_COUNT_RX_PMAC_FRAG 53
+#define SYS_COUNT_RX_PMAC_JABBER 54
+#define SYS_COUNT_RX_PMAC_CRC 55
+#define SYS_COUNT_RX_PMAC_SYMBOL_ERR 56
+#define SYS_COUNT_RX_PMAC_SZ_64 57
+#define SYS_COUNT_RX_PMAC_SZ_65_127 58
+#define SYS_COUNT_RX_PMAC_SZ_128_255 59
+#define SYS_COUNT_RX_PMAC_SZ_256_511 60
+#define SYS_COUNT_RX_PMAC_SZ_512_1023 61
+#define SYS_COUNT_RX_PMAC_SZ_1024_1526 62
+#define SYS_COUNT_RX_PMAC_SZ_JUMBO 63
+#define SYS_COUNT_RX_PMAC_PAUSE 64
+#define SYS_COUNT_RX_PMAC_CONTROL 65
+#define SYS_COUNT_RX_PMAC_LONG 66
+
+#define SYS_COUNT_TX_OCT 67
+#define SYS_COUNT_TX_UC 68
+#define SYS_COUNT_TX_MC 69
+#define SYS_COUNT_TX_BC 70
+#define SYS_COUNT_TX_COL 71
+#define SYS_COUNT_TX_DROP 72
+#define SYS_COUNT_TX_PAUSE 73
+#define SYS_COUNT_TX_SZ_64 74
+#define SYS_COUNT_TX_SZ_65_127 75
+#define SYS_COUNT_TX_SZ_128_255 76
+#define SYS_COUNT_TX_SZ_256_511 77
+#define SYS_COUNT_TX_SZ_512_1023 78
+#define SYS_COUNT_TX_SZ_1024_1526 79
+#define SYS_COUNT_TX_SZ_JUMBO 80
+#define SYS_COUNT_TX_YELLOW_PRIO_0 81
+#define SYS_COUNT_TX_YELLOW_PRIO_1 82
+#define SYS_COUNT_TX_YELLOW_PRIO_2 83
+#define SYS_COUNT_TX_YELLOW_PRIO_3 84
+#define SYS_COUNT_TX_YELLOW_PRIO_4 85
+#define SYS_COUNT_TX_YELLOW_PRIO_5 86
+#define SYS_COUNT_TX_YELLOW_PRIO_6 87
+#define SYS_COUNT_TX_YELLOW_PRIO_7 88
+#define SYS_COUNT_TX_GREEN_PRIO_0 89
+#define SYS_COUNT_TX_GREEN_PRIO_1 90
+#define SYS_COUNT_TX_GREEN_PRIO_2 91
+#define SYS_COUNT_TX_GREEN_PRIO_3 92
+#define SYS_COUNT_TX_GREEN_PRIO_4 93
+#define SYS_COUNT_TX_GREEN_PRIO_5 94
+#define SYS_COUNT_TX_GREEN_PRIO_6 95
+#define SYS_COUNT_TX_GREEN_PRIO_7 96
+#define SYS_COUNT_TX_AGED 97
+#define SYS_COUNT_TX_LLCT 98
+#define SYS_COUNT_TX_CT 99
+#define SYS_COUNT_TX_MM_HOLD 100
+#define SYS_COUNT_TX_MERGE_FRAG 101
+#define SYS_COUNT_TX_PMAC_OCT 102
+#define SYS_COUNT_TX_PMAC_UC 103
+#define SYS_COUNT_TX_PMAC_MC 104
+#define SYS_COUNT_TX_PMAC_BC 105
+#define SYS_COUNT_TX_PMAC_PAUSE 106
+#define SYS_COUNT_TX_PMAC_SZ_64 107
+#define SYS_COUNT_TX_PMAC_SZ_65_127 108
+#define SYS_COUNT_TX_PMAC_SZ_128_255 109
+#define SYS_COUNT_TX_PMAC_SZ_256_511 110
+#define SYS_COUNT_TX_PMAC_SZ_512_1023 111
+#define SYS_COUNT_TX_PMAC_SZ_1024_1526 112
+#define SYS_COUNT_TX_PMAC_SZ_JUMBO 113
+
+#define SYS_COUNT_DR_LOCAL 114
+#define SYS_COUNT_DR_TAIL 115
+#define SYS_COUNT_DR_YELLOW_PRIO_0 116
+#define SYS_COUNT_DR_YELLOW_PRIO_1 117
+#define SYS_COUNT_DR_YELLOW_PRIO_2 118
+#define SYS_COUNT_DR_YELLOW_PRIO_3 119
+#define SYS_COUNT_DR_YELLOW_PRIO_4 120
+#define SYS_COUNT_DR_YELLOW_PRIO_5 121
+#define SYS_COUNT_DR_YELLOW_PRIO_6 122
+#define SYS_COUNT_DR_YELLOW_PRIO_7 123
+#define SYS_COUNT_DR_GREEN_PRIO_0 124
+#define SYS_COUNT_DR_GREEN_PRIO_1 125
+#define SYS_COUNT_DR_GREEN_PRIO_2 126
+#define SYS_COUNT_DR_GREEN_PRIO_3 127
+#define SYS_COUNT_DR_GREEN_PRIO_4 128
+#define SYS_COUNT_DR_GREEN_PRIO_5 129
+#define SYS_COUNT_DR_GREEN_PRIO_6 130
+#define SYS_COUNT_DR_GREEN_PRIO_7 131
+
+/* Add a possibly wrapping 32 bit value to a 64 bit counter */
+static void lan966x_add_cnt(u64 *cnt, u32 val)
+{
+ if (val < (*cnt & U32_MAX))
+ *cnt += (u64)1 << 32; /* value has wrapped */
+
+ *cnt = (*cnt & ~(u64)U32_MAX) + val;
+}
+
+static void lan966x_stats_update(struct lan966x *lan966x)
+{
+ int i, j;
+
+ mutex_lock(&lan966x->stats_lock);
+
+ for (i = 0; i < lan966x->num_phys_ports; i++) {
+ uint idx = i * lan966x->num_stats;
+
+ lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(i),
+ lan966x, SYS_STAT_CFG);
+
+ for (j = 0; j < lan966x->num_stats; j++) {
+ u32 offset = lan966x->stats_layout[j].offset;
+
+ lan966x_add_cnt(&lan966x->stats[idx++],
+ lan_rd(lan966x, SYS_CNT(offset)));
+ }
+ }
+
+ mutex_unlock(&lan966x->stats_lock);
+}
+
+static int lan966x_get_sset_count(struct net_device *dev, int sset)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return lan966x->num_stats;
+}
+
+static void lan966x_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct lan966x_port *port = netdev_priv(netdev);
+ struct lan966x *lan966x = port->lan966x;
+ int i;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < lan966x->num_stats; i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ lan966x->stats_layout[i].name, ETH_GSTRING_LEN);
+}
+
+static void lan966x_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int i;
+
+ /* check and update now */
+ lan966x_stats_update(lan966x);
+
+ /* Copy all counters */
+ for (i = 0; i < lan966x->num_stats; i++)
+ *data++ = lan966x->stats[port->chip_port *
+ lan966x->num_stats + i];
+}
+
+static void lan966x_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 idx;
+
+ lan966x_stats_update(lan966x);
+
+ idx = port->chip_port * lan966x->num_stats;
+
+ mutex_lock(&lan966x->stats_lock);
+
+ mac_stats->FramesTransmittedOK =
+ lan966x->stats[idx + SYS_COUNT_TX_UC] +
+ lan966x->stats[idx + SYS_COUNT_TX_MC] +
+ lan966x->stats[idx + SYS_COUNT_TX_BC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_UC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC];
+ mac_stats->SingleCollisionFrames =
+ lan966x->stats[idx + SYS_COUNT_TX_COL];
+ mac_stats->MultipleCollisionFrames = 0;
+ mac_stats->FramesReceivedOK =
+ lan966x->stats[idx + SYS_COUNT_RX_UC] +
+ lan966x->stats[idx + SYS_COUNT_RX_MC] +
+ lan966x->stats[idx + SYS_COUNT_RX_BC];
+ mac_stats->FrameCheckSequenceErrors =
+ lan966x->stats[idx + SYS_COUNT_RX_CRC] +
+ lan966x->stats[idx + SYS_COUNT_RX_CRC];
+ mac_stats->AlignmentErrors = 0;
+ mac_stats->OctetsTransmittedOK =
+ lan966x->stats[idx + SYS_COUNT_TX_OCT] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT];
+ mac_stats->FramesWithDeferredXmissions =
+ lan966x->stats[idx + SYS_COUNT_TX_MM_HOLD];
+ mac_stats->LateCollisions = 0;
+ mac_stats->FramesAbortedDueToXSColls = 0;
+ mac_stats->FramesLostDueToIntMACXmitError = 0;
+ mac_stats->CarrierSenseErrors = 0;
+ mac_stats->OctetsReceivedOK =
+ lan966x->stats[idx + SYS_COUNT_RX_OCT];
+ mac_stats->FramesLostDueToIntMACRcvError = 0;
+ mac_stats->MulticastFramesXmittedOK =
+ lan966x->stats[idx + SYS_COUNT_TX_MC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC];
+ mac_stats->BroadcastFramesXmittedOK =
+ lan966x->stats[idx + SYS_COUNT_TX_BC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC];
+ mac_stats->FramesWithExcessiveDeferral = 0;
+ mac_stats->MulticastFramesReceivedOK =
+ lan966x->stats[idx + SYS_COUNT_RX_MC];
+ mac_stats->BroadcastFramesReceivedOK =
+ lan966x->stats[idx + SYS_COUNT_RX_BC];
+ mac_stats->InRangeLengthErrors =
+ lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_CRC] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_CRC];
+ mac_stats->OutOfRangeLengthField =
+ lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
+ mac_stats->FrameTooLongErrors =
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
+
+ mutex_unlock(&lan966x->stats_lock);
+}
+
+static const struct ethtool_rmon_hist_range lan966x_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 10239 },
+ {}
+};
+
+static void lan966x_get_eth_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 idx;
+
+ lan966x_stats_update(lan966x);
+
+ idx = port->chip_port * lan966x->num_stats;
+
+ mutex_lock(&lan966x->stats_lock);
+
+ rmon_stats->undersize_pkts =
+ lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT];
+ rmon_stats->oversize_pkts =
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
+ rmon_stats->fragments =
+ lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG];
+ rmon_stats->jabbers =
+ lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER];
+ rmon_stats->hist[0] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_64];
+ rmon_stats->hist[1] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_65_127];
+ rmon_stats->hist[2] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_128_255];
+ rmon_stats->hist[3] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_256_511];
+ rmon_stats->hist[4] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_512_1023];
+ rmon_stats->hist[5] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526];
+ rmon_stats->hist[6] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526];
+
+ rmon_stats->hist_tx[0] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_64];
+ rmon_stats->hist_tx[1] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_65_127];
+ rmon_stats->hist_tx[2] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_128_255];
+ rmon_stats->hist_tx[3] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_256_511];
+ rmon_stats->hist_tx[4] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_512_1023];
+ rmon_stats->hist_tx[5] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526];
+ rmon_stats->hist_tx[6] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526];
+
+ mutex_unlock(&lan966x->stats_lock);
+
+ *ranges = lan966x_rmon_ranges;
+}
+
+static int lan966x_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct lan966x_port *port = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_get(port->phylink, cmd);
+}
+
+static int lan966x_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct lan966x_port *port = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_set(port->phylink, cmd);
+}
+
+static void lan966x_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ phylink_ethtool_get_pauseparam(port->phylink, pause);
+}
+
+static int lan966x_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ return phylink_ethtool_set_pauseparam(port->phylink, pause);
+}
+
+const struct ethtool_ops lan966x_ethtool_ops = {
+ .get_link_ksettings = lan966x_get_link_ksettings,
+ .set_link_ksettings = lan966x_set_link_ksettings,
+ .get_pauseparam = lan966x_get_pauseparam,
+ .set_pauseparam = lan966x_set_pauseparam,
+ .get_sset_count = lan966x_get_sset_count,
+ .get_strings = lan966x_get_strings,
+ .get_ethtool_stats = lan966x_get_ethtool_stats,
+ .get_eth_mac_stats = lan966x_get_eth_mac_stats,
+ .get_rmon_stats = lan966x_get_eth_rmon_stats,
+ .get_link = ethtool_op_get_link,
+};
+
+static void lan966x_check_stats_work(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct lan966x *lan966x = container_of(del_work, struct lan966x,
+ stats_work);
+
+ lan966x_stats_update(lan966x);
+
+ queue_delayed_work(lan966x->stats_queue, &lan966x->stats_work,
+ LAN966X_STATS_CHECK_DELAY);
+}
+
+void lan966x_stats_get(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 idx;
+ int i;
+
+ idx = port->chip_port * lan966x->num_stats;
+
+ mutex_lock(&lan966x->stats_lock);
+
+ stats->rx_bytes = lan966x->stats[idx + SYS_COUNT_RX_OCT] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_OCT];
+
+ stats->rx_packets = lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_CRC] +
+ lan966x->stats[idx + SYS_COUNT_RX_SYMBOL_ERR] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_JUMBO] +
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_JUMBO];
+
+ stats->multicast = lan966x->stats[idx + SYS_COUNT_RX_MC] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_MC];
+
+ stats->rx_errors = lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_CRC] +
+ lan966x->stats[idx + SYS_COUNT_RX_SYMBOL_ERR] +
+ lan966x->stats[idx + SYS_COUNT_RX_LONG];
+
+ stats->rx_dropped = dev->stats.rx_dropped +
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_DR_LOCAL] +
+ lan966x->stats[idx + SYS_COUNT_DR_TAIL];
+
+ for (i = 0; i < LAN966X_NUM_TC; i++) {
+ stats->rx_dropped +=
+ (lan966x->stats[idx + SYS_COUNT_DR_YELLOW_PRIO_0 + i] +
+ lan966x->stats[idx + SYS_COUNT_DR_GREEN_PRIO_0 + i]);
+ }
+
+ /* Get Tx stats */
+ stats->tx_bytes = lan966x->stats[idx + SYS_COUNT_TX_OCT] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT];
+
+ stats->tx_packets = lan966x->stats[idx + SYS_COUNT_TX_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_JUMBO] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_JUMBO];
+
+ stats->tx_dropped = lan966x->stats[idx + SYS_COUNT_TX_DROP] +
+ lan966x->stats[idx + SYS_COUNT_TX_AGED];
+
+ stats->collisions = lan966x->stats[idx + SYS_COUNT_TX_COL];
+
+ mutex_unlock(&lan966x->stats_lock);
+}
+
+int lan966x_stats_init(struct lan966x *lan966x)
+{
+ char queue_name[32];
+
+ lan966x->stats_layout = lan966x_stats_layout;
+ lan966x->num_stats = ARRAY_SIZE(lan966x_stats_layout);
+ lan966x->stats = devm_kcalloc(lan966x->dev, lan966x->num_phys_ports *
+ lan966x->num_stats,
+ sizeof(u64), GFP_KERNEL);
+ if (!lan966x->stats)
+ return -ENOMEM;
+
+ /* Init stats worker */
+ mutex_init(&lan966x->stats_lock);
+ snprintf(queue_name, sizeof(queue_name), "%s-stats",
+ dev_name(lan966x->dev));
+ lan966x->stats_queue = create_singlethread_workqueue(queue_name);
+ INIT_DELAYED_WORK(&lan966x->stats_work, lan966x_check_stats_work);
+ queue_delayed_work(lan966x->stats_queue, &lan966x->stats_work,
+ LAN966X_STATS_CHECK_DELAY);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
new file mode 100644
index 000000000000..da5ca7188679
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <net/switchdev.h>
+
+#include "lan966x_main.h"
+
+struct lan966x_fdb_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ struct lan966x *lan966x;
+ unsigned long event;
+};
+
+struct lan966x_fdb_entry {
+ struct list_head list;
+ unsigned char mac[ETH_ALEN] __aligned(2);
+ u16 vid;
+ u32 references;
+};
+
+static struct lan966x_fdb_entry *
+lan966x_fdb_find_entry(struct lan966x *lan966x,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) {
+ if (fdb_entry->vid == fdb_info->vid &&
+ ether_addr_equal(fdb_entry->mac, fdb_info->addr))
+ return fdb_entry;
+ }
+
+ return NULL;
+}
+
+static void lan966x_fdb_add_entry(struct lan966x *lan966x,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ fdb_entry = lan966x_fdb_find_entry(lan966x, fdb_info);
+ if (fdb_entry) {
+ fdb_entry->references++;
+ return;
+ }
+
+ fdb_entry = kzalloc(sizeof(*fdb_entry), GFP_KERNEL);
+ if (!fdb_entry)
+ return;
+
+ ether_addr_copy(fdb_entry->mac, fdb_info->addr);
+ fdb_entry->vid = fdb_info->vid;
+ fdb_entry->references = 1;
+ list_add_tail(&fdb_entry->list, &lan966x->fdb_entries);
+}
+
+static bool lan966x_fdb_del_entry(struct lan966x *lan966x,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_fdb_entry *fdb_entry, *tmp;
+
+ list_for_each_entry_safe(fdb_entry, tmp, &lan966x->fdb_entries,
+ list) {
+ if (fdb_entry->vid == fdb_info->vid &&
+ ether_addr_equal(fdb_entry->mac, fdb_info->addr)) {
+ fdb_entry->references--;
+ if (!fdb_entry->references) {
+ list_del(&fdb_entry->list);
+ kfree(fdb_entry);
+ return true;
+ }
+ break;
+ }
+ }
+
+ return false;
+}
+
+void lan966x_fdb_write_entries(struct lan966x *lan966x, u16 vid)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) {
+ if (fdb_entry->vid != vid)
+ continue;
+
+ lan966x_mac_cpu_learn(lan966x, fdb_entry->mac, fdb_entry->vid);
+ }
+}
+
+void lan966x_fdb_erase_entries(struct lan966x *lan966x, u16 vid)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) {
+ if (fdb_entry->vid != vid)
+ continue;
+
+ lan966x_mac_cpu_forget(lan966x, fdb_entry->mac, fdb_entry->vid);
+ }
+}
+
+static void lan966x_fdb_purge_entries(struct lan966x *lan966x)
+{
+ struct lan966x_fdb_entry *fdb_entry, *tmp;
+
+ list_for_each_entry_safe(fdb_entry, tmp, &lan966x->fdb_entries, list) {
+ list_del(&fdb_entry->list);
+ kfree(fdb_entry);
+ }
+}
+
+int lan966x_fdb_init(struct lan966x *lan966x)
+{
+ INIT_LIST_HEAD(&lan966x->fdb_entries);
+ lan966x->fdb_work = alloc_ordered_workqueue("lan966x_order", 0);
+ if (!lan966x->fdb_work)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void lan966x_fdb_deinit(struct lan966x *lan966x)
+{
+ destroy_workqueue(lan966x->fdb_work);
+ lan966x_fdb_purge_entries(lan966x);
+}
+
+static void lan966x_fdb_event_work(struct work_struct *work)
+{
+ struct lan966x_fdb_event_work *fdb_work =
+ container_of(work, struct lan966x_fdb_event_work, work);
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct net_device *dev = fdb_work->dev;
+ struct lan966x_port *port;
+ struct lan966x *lan966x;
+ int ret;
+
+ fdb_info = &fdb_work->fdb_info;
+ lan966x = fdb_work->lan966x;
+
+ if (lan966x_netdevice_check(dev)) {
+ port = netdev_priv(dev);
+
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ lan966x_mac_add_entry(lan966x, port, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ lan966x_mac_del_entry(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ }
+ } else {
+ if (!netif_is_bridge_master(dev))
+ goto out;
+
+ /* In case the bridge is called */
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ /* If there is no front port in this vlan, there is no
+ * point to copy the frame to CPU because it would be
+ * just dropped at later point. So add it only if
+ * there is a port but it is required to store the fdb
+ * entry for later point when a port actually gets in
+ * the vlan.
+ */
+ lan966x_fdb_add_entry(lan966x, fdb_info);
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ fdb_info->vid))
+ break;
+
+ lan966x_mac_cpu_learn(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ ret = lan966x_fdb_del_entry(lan966x, fdb_info);
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ fdb_info->vid))
+ break;
+
+ if (ret)
+ lan966x_mac_cpu_forget(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ }
+ }
+
+out:
+ kfree(fdb_work->fdb_info.addr);
+ kfree(fdb_work);
+ dev_put(dev);
+}
+
+int lan966x_handle_fdb(struct net_device *dev,
+ struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_fdb_event_work *fdb_work;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (lan966x_netdevice_check(orig_dev) &&
+ !fdb_info->added_by_user)
+ break;
+
+ fdb_work = kzalloc(sizeof(*fdb_work), GFP_ATOMIC);
+ if (!fdb_work)
+ return -ENOMEM;
+
+ fdb_work->dev = orig_dev;
+ fdb_work->lan966x = lan966x;
+ fdb_work->event = event;
+ INIT_WORK(&fdb_work->work, lan966x_fdb_event_work);
+ memcpy(&fdb_work->fdb_info, fdb_info, sizeof(fdb_work->fdb_info));
+ fdb_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!fdb_work->fdb_info.addr)
+ goto err_addr_alloc;
+
+ ether_addr_copy((u8 *)fdb_work->fdb_info.addr, fdb_info->addr);
+ dev_hold(orig_dev);
+
+ queue_work(lan966x->fdb_work, &fdb_work->work);
+ break;
+ }
+
+ return 0;
+err_addr_alloc:
+ kfree(fdb_work);
+ return -ENOMEM;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h
new file mode 100644
index 000000000000..ca3314789d18
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __LAN966X_IFH_H__
+#define __LAN966X_IFH_H__
+
+/* Fields with description (*) should just be cleared upon injection
+ * IFH is transmitted MSByte first (Highest bit pos sent as MSB of first byte)
+ */
+
+#define IFH_LEN 7
+
+/* Timestamp for frame */
+#define IFH_POS_TIMESTAMP 192
+
+/* Bypass analyzer with a prefilled IFH */
+#define IFH_POS_BYPASS 191
+
+/* Masqueraded injection with masq_port defining logical source port */
+#define IFH_POS_MASQ 190
+
+/* Masqueraded port number for injection */
+#define IFH_POS_MASQ_PORT 186
+
+/* Frame length (*) */
+#define IFH_POS_LEN 178
+
+/* Cell filling mode. Full(0),Etype(1), LlctOpt(2), Llct(3) */
+#define IFH_POS_WRDMODE 176
+
+/* Frame has 16 bits rtag removed compared to line data */
+#define IFH_POS_RTAG48 175
+
+/* Frame has a redundancy tag */
+#define IFH_POS_HAS_RED_TAG 174
+
+/* Frame has been cut through forwarded (*) */
+#define IFH_POS_CUTTHRU 173
+
+/* Rewriter command */
+#define IFH_POS_REW_CMD 163
+
+/* Enable OAM-related rewriting. PDU_TYPE encodes OAM type. */
+#define IFH_POS_REW_OAM 162
+
+/* PDU type. Encoding: (0-NONE, 1-Y1731_CCM, 2-MRP_TST, 3-MRP_ITST, 4-DLR_BCN,
+ * 5-DLR_ADV, 6-RTE_NULL_INJ, 7-IPV4, 8-IPV6, 9-Y1731_NON_CCM).
+ */
+#define IFH_POS_PDU_TYPE 158
+
+/* Update FCS before transmission */
+#define IFH_POS_FCS_UPD 157
+
+/* Classified DSCP value of frame */
+#define IFH_POS_DSCP 151
+
+/* Yellow indication */
+#define IFH_POS_DP 150
+
+/* Process in RTE/inbound */
+#define IFH_POS_RTE_INB_UPDATE 149
+
+/* Number of tags to pop from frame */
+#define IFH_POS_POP_CNT 147
+
+/* Number of tags in front of the ethertype */
+#define IFH_POS_ETYPE_OFS 145
+
+/* Logical source port of frame (*) */
+#define IFH_POS_SRCPORT 141
+
+/* Sequence number in redundancy tag */
+#define IFH_POS_SEQ_NUM 120
+
+/* Stagd flag and classified TCI of frame (PCP/DEI/VID) */
+#define IFH_POS_TCI 103
+
+/* Classified internal priority for queuing */
+#define IFH_POS_QOS_CLASS 100
+
+/* Bit mask with eight cpu copy classses */
+#define IFH_POS_CPUQ 92
+
+/* Relearn + learn flags (*) */
+#define IFH_POS_LEARN_FLAGS 90
+
+/* SFLOW identifier for frame (0-8: Tx port, 9: Rx sampling, 15: No sampling) */
+#define IFH_POS_SFLOW_ID 86
+
+/* Set if an ACL/S2 rule was hit (*).
+ * Super priority: acl_hit=0 and acl_hit(4)=1.
+ */
+#define IFH_POS_ACL_HIT 85
+
+/* S2 rule index hit (*) */
+#define IFH_POS_ACL_IDX 79
+
+/* ISDX as classified by S1 */
+#define IFH_POS_ISDX 71
+
+/* Destination ports for frame */
+#define IFH_POS_DSTS 62
+
+/* Storm policer to be applied: None/Uni/Multi/Broad (*) */
+#define IFH_POS_FLOOD 60
+
+/* Redundancy tag operation */
+#define IFH_POS_SEQ_OP 58
+
+/* Classified internal priority for resourcemgt, tagging etc */
+#define IFH_POS_IPV 55
+
+/* Frame is for AFI use */
+#define IFH_POS_AFI 54
+
+/* Internal aging value (*) */
+#define IFH_POS_AGED 52
+
+/* RTP Identifier */
+#define IFH_POS_RTP_ID 42
+
+/* RTP MRPD flow */
+#define IFH_POS_RTP_SUBID 41
+
+/* Profinet DataStatus or opcua GroupVersion MSB */
+#define IFH_POS_PN_DATA_STATUS 33
+
+/* Profinet transfer status (1 iff the status is 0) */
+#define IFH_POS_PN_TRANSF_STATUS_ZERO 32
+
+/* Profinet cycle counter or opcua NetworkMessageNumber */
+#define IFH_POS_PN_CC 16
+
+#define IFH_WID_TIMESTAMP 32
+#define IFH_WID_BYPASS 1
+#define IFH_WID_MASQ 1
+#define IFH_WID_MASQ_PORT 4
+#define IFH_WID_LEN 14
+#define IFH_WID_WRDMODE 2
+#define IFH_WID_RTAG48 1
+#define IFH_WID_HAS_RED_TAG 1
+#define IFH_WID_CUTTHRU 1
+#define IFH_WID_REW_CMD 10
+#define IFH_WID_REW_OAM 1
+#define IFH_WID_PDU_TYPE 4
+#define IFH_WID_FCS_UPD 1
+#define IFH_WID_DSCP 6
+#define IFH_WID_DP 1
+#define IFH_WID_RTE_INB_UPDATE 1
+#define IFH_WID_POP_CNT 2
+#define IFH_WID_ETYPE_OFS 2
+#define IFH_WID_SRCPORT 4
+#define IFH_WID_SEQ_NUM 16
+#define IFH_WID_TCI 17
+#define IFH_WID_QOS_CLASS 3
+#define IFH_WID_CPUQ 8
+#define IFH_WID_LEARN_FLAGS 2
+#define IFH_WID_SFLOW_ID 4
+#define IFH_WID_ACL_HIT 1
+#define IFH_WID_ACL_IDX 6
+#define IFH_WID_ISDX 8
+#define IFH_WID_DSTS 9
+#define IFH_WID_FLOOD 2
+#define IFH_WID_SEQ_OP 2
+#define IFH_WID_IPV 3
+#define IFH_WID_AFI 1
+#define IFH_WID_AGED 2
+#define IFH_WID_RTP_ID 10
+#define IFH_WID_RTP_SUBID 1
+#define IFH_WID_PN_DATA_STATUS 8
+#define IFH_WID_PN_TRANSF_STATUS_ZERO 1
+#define IFH_WID_PN_CC 16
+
+#endif /* __LAN966X_IFH_H__ */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
new file mode 100644
index 000000000000..ce5970bdcc6a
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <net/switchdev.h>
+#include "lan966x_main.h"
+
+#define LAN966X_MAC_COLUMNS 4
+#define MACACCESS_CMD_IDLE 0
+#define MACACCESS_CMD_LEARN 1
+#define MACACCESS_CMD_FORGET 2
+#define MACACCESS_CMD_AGE 3
+#define MACACCESS_CMD_GET_NEXT 4
+#define MACACCESS_CMD_INIT 5
+#define MACACCESS_CMD_READ 6
+#define MACACCESS_CMD_WRITE 7
+#define MACACCESS_CMD_SYNC_GET_NEXT 8
+
+#define LAN966X_MAC_INVALID_ROW -1
+
+struct lan966x_mac_entry {
+ struct list_head list;
+ unsigned char mac[ETH_ALEN] __aligned(2);
+ u16 vid;
+ u16 port_index;
+ int row;
+};
+
+struct lan966x_mac_raw_entry {
+ u32 mach;
+ u32 macl;
+ u32 maca;
+ bool processed;
+};
+
+static int lan966x_mac_get_status(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, ANA_MACACCESS);
+}
+
+static int lan966x_mac_wait_for_completion(struct lan966x *lan966x)
+{
+ u32 val;
+
+ return readx_poll_timeout_atomic(lan966x_mac_get_status,
+ lan966x, val,
+ (ANA_MACACCESS_MAC_TABLE_CMD_GET(val)) ==
+ MACACCESS_CMD_IDLE,
+ TABLE_UPDATE_SLEEP_US,
+ TABLE_UPDATE_TIMEOUT_US);
+}
+
+static void lan966x_mac_select(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid)
+{
+ u32 macl = 0, mach = 0;
+
+ /* Set the MAC address to handle and the vlan associated in a format
+ * understood by the hardware.
+ */
+ mach |= vid << 16;
+ mach |= mac[0] << 8;
+ mach |= mac[1] << 0;
+ macl |= mac[2] << 24;
+ macl |= mac[3] << 16;
+ macl |= mac[4] << 8;
+ macl |= mac[5] << 0;
+
+ lan_wr(macl, lan966x, ANA_MACLDATA);
+ lan_wr(mach, lan966x, ANA_MACHDATA);
+}
+
+static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ lan966x_mac_select(lan966x, mac, vid);
+
+ /* Issue a write command */
+ lan_wr(ANA_MACACCESS_VALID_SET(1) |
+ ANA_MACACCESS_CHANGE2SW_SET(0) |
+ ANA_MACACCESS_MAC_CPU_COPY_SET(cpu_copy) |
+ ANA_MACACCESS_DEST_IDX_SET(pgid) |
+ ANA_MACACCESS_ENTRYTYPE_SET(type) |
+ ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_LEARN),
+ lan966x, ANA_MACACCESS);
+
+ return lan966x_mac_wait_for_completion(lan966x);
+}
+
+/* The mask of the front ports is encoded inside the mac parameter via a call
+ * to lan966x_mdb_encode_mac().
+ */
+int lan966x_mac_ip_learn(struct lan966x *lan966x,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ WARN_ON(type != ENTRYTYPE_MACV4 && type != ENTRYTYPE_MACV6);
+
+ return __lan966x_mac_learn(lan966x, 0, cpu_copy, mac, vid, type);
+}
+
+int lan966x_mac_learn(struct lan966x *lan966x, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ WARN_ON(type != ENTRYTYPE_NORMAL && type != ENTRYTYPE_LOCKED);
+
+ return __lan966x_mac_learn(lan966x, port, false, mac, vid, type);
+}
+
+int lan966x_mac_forget(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ lan966x_mac_select(lan966x, mac, vid);
+
+ /* Issue a forget command */
+ lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) |
+ ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_FORGET),
+ lan966x, ANA_MACACCESS);
+
+ return lan966x_mac_wait_for_completion(lan966x);
+}
+
+int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid)
+{
+ return lan966x_mac_learn(lan966x, PGID_CPU, addr, vid, ENTRYTYPE_LOCKED);
+}
+
+int lan966x_mac_cpu_forget(struct lan966x *lan966x, const char *addr, u16 vid)
+{
+ return lan966x_mac_forget(lan966x, addr, vid, ENTRYTYPE_LOCKED);
+}
+
+void lan966x_mac_set_ageing(struct lan966x *lan966x,
+ u32 ageing)
+{
+ lan_rmw(ANA_AUTOAGE_AGE_PERIOD_SET(ageing / 2),
+ ANA_AUTOAGE_AGE_PERIOD,
+ lan966x, ANA_AUTOAGE);
+}
+
+void lan966x_mac_init(struct lan966x *lan966x)
+{
+ /* Clear the MAC table */
+ lan_wr(MACACCESS_CMD_INIT, lan966x, ANA_MACACCESS);
+ lan966x_mac_wait_for_completion(lan966x);
+
+ spin_lock_init(&lan966x->mac_lock);
+ INIT_LIST_HEAD(&lan966x->mac_entries);
+}
+
+static struct lan966x_mac_entry *lan966x_mac_alloc_entry(const unsigned char *mac,
+ u16 vid, u16 port_index)
+{
+ struct lan966x_mac_entry *mac_entry;
+
+ mac_entry = kzalloc(sizeof(*mac_entry), GFP_KERNEL);
+ if (!mac_entry)
+ return NULL;
+
+ memcpy(mac_entry->mac, mac, ETH_ALEN);
+ mac_entry->vid = vid;
+ mac_entry->port_index = port_index;
+ mac_entry->row = LAN966X_MAC_INVALID_ROW;
+ return mac_entry;
+}
+
+static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x,
+ const unsigned char *mac,
+ u16 vid, u16 port_index)
+{
+ struct lan966x_mac_entry *res = NULL;
+ struct lan966x_mac_entry *mac_entry;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry(mac_entry, &lan966x->mac_entries, list) {
+ if (mac_entry->vid == vid &&
+ ether_addr_equal(mac, mac_entry->mac) &&
+ mac_entry->port_index == port_index) {
+ res = mac_entry;
+ break;
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+
+ return res;
+}
+
+static int lan966x_mac_lookup(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid, enum macaccess_entry_type type)
+{
+ int ret;
+
+ lan966x_mac_select(lan966x, mac, vid);
+
+ /* Issue a read command */
+ lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) |
+ ANA_MACACCESS_VALID_SET(1) |
+ ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_READ),
+ lan966x, ANA_MACACCESS);
+
+ ret = lan966x_mac_wait_for_completion(lan966x);
+ if (ret)
+ return ret;
+
+ return ANA_MACACCESS_VALID_GET(lan_rd(lan966x, ANA_MACACCESS));
+}
+
+static void lan966x_fdb_call_notifiers(enum switchdev_notifier_type type,
+ const char *mac, u16 vid,
+ struct net_device *dev)
+{
+ struct switchdev_notifier_fdb_info info = { 0 };
+
+ info.addr = mac;
+ info.vid = vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(type, dev, &info.info, NULL);
+}
+
+int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
+ const unsigned char *addr, u16 vid)
+{
+ struct lan966x_mac_entry *mac_entry;
+
+ if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL))
+ return 0;
+
+ /* In case the entry already exists, don't add it again to SW,
+ * just update HW, but we need to look in the actual HW because
+ * it is possible for an entry to be learn by HW and before we
+ * get the interrupt the frame will reach CPU and the CPU will
+ * add the entry but without the extern_learn flag.
+ */
+ mac_entry = lan966x_mac_find_entry(lan966x, addr, vid, port->chip_port);
+ if (mac_entry)
+ return lan966x_mac_learn(lan966x, port->chip_port,
+ addr, vid, ENTRYTYPE_LOCKED);
+
+ mac_entry = lan966x_mac_alloc_entry(addr, vid, port->chip_port);
+ if (!mac_entry)
+ return -ENOMEM;
+
+ spin_lock(&lan966x->mac_lock);
+ list_add_tail(&mac_entry->list, &lan966x->mac_entries);
+ spin_unlock(&lan966x->mac_lock);
+
+ lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED);
+ lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid, port->dev);
+
+ return 0;
+}
+
+int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr,
+ u16 vid)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
+ list) {
+ if (mac_entry->vid == vid &&
+ ether_addr_equal(addr, mac_entry->mac)) {
+ lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+
+ return 0;
+}
+
+void lan966x_mac_purge_entries(struct lan966x *lan966x)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
+ list) {
+ lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+ spin_unlock(&lan966x->mac_lock);
+}
+
+static void lan966x_mac_notifiers(enum switchdev_notifier_type type,
+ unsigned char *mac, u32 vid,
+ struct net_device *dev)
+{
+ rtnl_lock();
+ lan966x_fdb_call_notifiers(type, mac, vid, dev);
+ rtnl_unlock();
+}
+
+static void lan966x_mac_process_raw_entry(struct lan966x_mac_raw_entry *raw_entry,
+ u8 *mac, u16 *vid, u32 *dest_idx)
+{
+ mac[0] = (raw_entry->mach >> 8) & 0xff;
+ mac[1] = (raw_entry->mach >> 0) & 0xff;
+ mac[2] = (raw_entry->macl >> 24) & 0xff;
+ mac[3] = (raw_entry->macl >> 16) & 0xff;
+ mac[4] = (raw_entry->macl >> 8) & 0xff;
+ mac[5] = (raw_entry->macl >> 0) & 0xff;
+
+ *vid = (raw_entry->mach >> 16) & 0xfff;
+ *dest_idx = ANA_MACACCESS_DEST_IDX_GET(raw_entry->maca);
+}
+
+static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
+ struct lan966x_mac_raw_entry *raw_entries)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+ unsigned char mac[ETH_ALEN] __aligned(2);
+ u32 dest_idx;
+ u32 column;
+ u16 vid;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, list) {
+ bool found = false;
+
+ if (mac_entry->row != row)
+ continue;
+
+ for (column = 0; column < LAN966X_MAC_COLUMNS; ++column) {
+ /* All the valid entries are at the start of the row,
+ * so when get one invalid entry it can just skip the
+ * rest of the columns
+ */
+ if (!ANA_MACACCESS_VALID_GET(raw_entries[column].maca))
+ break;
+
+ lan966x_mac_process_raw_entry(&raw_entries[column],
+ mac, &vid, &dest_idx);
+ WARN_ON(dest_idx > lan966x->num_phys_ports);
+
+ /* If the entry in SW is found, then there is nothing
+ * to do
+ */
+ if (mac_entry->vid == vid &&
+ ether_addr_equal(mac_entry->mac, mac) &&
+ mac_entry->port_index == dest_idx) {
+ raw_entries[column].processed = true;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ /* Notify the bridge that the entry doesn't exist
+ * anymore in the HW and remove the entry from the SW
+ * list
+ */
+ lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+ mac_entry->mac, mac_entry->vid,
+ lan966x->ports[mac_entry->port_index]->dev);
+
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+
+ /* Now go to the list of columns and see if any entry was not in the SW
+ * list, then that means that the entry is new so it needs to notify the
+ * bridge.
+ */
+ for (column = 0; column < LAN966X_MAC_COLUMNS; ++column) {
+ /* All the valid entries are at the start of the row, so when
+ * get one invalid entry it can just skip the rest of the columns
+ */
+ if (!ANA_MACACCESS_VALID_GET(raw_entries[column].maca))
+ break;
+
+ /* If the entry already exists then don't do anything */
+ if (raw_entries[column].processed)
+ continue;
+
+ lan966x_mac_process_raw_entry(&raw_entries[column],
+ mac, &vid, &dest_idx);
+ WARN_ON(dest_idx > lan966x->num_phys_ports);
+
+ mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx);
+ if (!mac_entry)
+ return;
+
+ mac_entry->row = row;
+
+ spin_lock(&lan966x->mac_lock);
+ list_add_tail(&mac_entry->list, &lan966x->mac_entries);
+ spin_unlock(&lan966x->mac_lock);
+
+ lan966x_mac_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+ mac, vid, lan966x->ports[dest_idx]->dev);
+ }
+}
+
+irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x)
+{
+ struct lan966x_mac_raw_entry entry[LAN966X_MAC_COLUMNS] = { 0 };
+ u32 index, column;
+ bool stop = true;
+ u32 val;
+
+ /* Start the scan from 0, 0 */
+ lan_wr(ANA_MACTINDX_M_INDEX_SET(0) |
+ ANA_MACTINDX_BUCKET_SET(0),
+ lan966x, ANA_MACTINDX);
+
+ while (1) {
+ lan_rmw(ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_SYNC_GET_NEXT),
+ ANA_MACACCESS_MAC_TABLE_CMD,
+ lan966x, ANA_MACACCESS);
+ lan966x_mac_wait_for_completion(lan966x);
+
+ val = lan_rd(lan966x, ANA_MACTINDX);
+ index = ANA_MACTINDX_M_INDEX_GET(val);
+ column = ANA_MACTINDX_BUCKET_GET(val);
+
+ /* The SYNC-GET-NEXT returns all the entries(4) in a row in
+ * which is suffered a change. By change it means that new entry
+ * was added or an entry was removed because of ageing.
+ * It would return all the columns for that row. And after that
+ * it would return the next row The stop conditions of the
+ * SYNC-GET-NEXT is when it reaches 'directly' to row 0
+ * column 3. So if SYNC-GET-NEXT returns row 0 and column 0
+ * then it is required to continue to read more even if it
+ * reaches row 0 and column 3.
+ */
+ if (index == 0 && column == 0)
+ stop = false;
+
+ if (column == LAN966X_MAC_COLUMNS - 1 &&
+ index == 0 && stop)
+ break;
+
+ entry[column].mach = lan_rd(lan966x, ANA_MACHDATA);
+ entry[column].macl = lan_rd(lan966x, ANA_MACLDATA);
+ entry[column].maca = lan_rd(lan966x, ANA_MACACCESS);
+
+ /* Once all the columns are read process them */
+ if (column == LAN966X_MAC_COLUMNS - 1) {
+ lan966x_mac_irq_process(lan966x, index, entry);
+ /* A row was processed so it is safe to assume that the
+ * next row/column can be the stop condition
+ */
+ stop = true;
+ }
+ }
+
+ lan_rmw(ANA_ANAINTR_INTR_SET(0),
+ ANA_ANAINTR_INTR,
+ lan966x, ANA_ANAINTR);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
new file mode 100644
index 000000000000..1f60fd125a1d
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -0,0 +1,1002 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/module.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/packing.h>
+#include <linux/phy/phy.h>
+#include <linux/reset.h>
+
+#include "lan966x_main.h"
+
+#define XTR_EOF_0 0x00000080U
+#define XTR_EOF_1 0x01000080U
+#define XTR_EOF_2 0x02000080U
+#define XTR_EOF_3 0x03000080U
+#define XTR_PRUNED 0x04000080U
+#define XTR_ABORT 0x05000080U
+#define XTR_ESCAPE 0x06000080U
+#define XTR_NOT_READY 0x07000080U
+#define XTR_VALID_BYTES(x) (4 - (((x) >> 24) & 3))
+
+#define READL_SLEEP_US 10
+#define READL_TIMEOUT_US 100000000
+
+#define IO_RANGES 2
+
+static const struct of_device_id lan966x_match[] = {
+ { .compatible = "microchip,lan966x-switch" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lan966x_match);
+
+struct lan966x_main_io_resource {
+ enum lan966x_target id;
+ phys_addr_t offset;
+ int range;
+};
+
+static const struct lan966x_main_io_resource lan966x_main_iomap[] = {
+ { TARGET_CPU, 0xc0000, 0 }, /* 0xe00c0000 */
+ { TARGET_ORG, 0, 1 }, /* 0xe2000000 */
+ { TARGET_GCB, 0x4000, 1 }, /* 0xe2004000 */
+ { TARGET_QS, 0x8000, 1 }, /* 0xe2008000 */
+ { TARGET_CHIP_TOP, 0x10000, 1 }, /* 0xe2010000 */
+ { TARGET_REW, 0x14000, 1 }, /* 0xe2014000 */
+ { TARGET_SYS, 0x28000, 1 }, /* 0xe2028000 */
+ { TARGET_DEV, 0x34000, 1 }, /* 0xe2034000 */
+ { TARGET_DEV + 1, 0x38000, 1 }, /* 0xe2038000 */
+ { TARGET_DEV + 2, 0x3c000, 1 }, /* 0xe203c000 */
+ { TARGET_DEV + 3, 0x40000, 1 }, /* 0xe2040000 */
+ { TARGET_DEV + 4, 0x44000, 1 }, /* 0xe2044000 */
+ { TARGET_DEV + 5, 0x48000, 1 }, /* 0xe2048000 */
+ { TARGET_DEV + 6, 0x4c000, 1 }, /* 0xe204c000 */
+ { TARGET_DEV + 7, 0x50000, 1 }, /* 0xe2050000 */
+ { TARGET_QSYS, 0x100000, 1 }, /* 0xe2100000 */
+ { TARGET_AFI, 0x120000, 1 }, /* 0xe2120000 */
+ { TARGET_ANA, 0x140000, 1 }, /* 0xe2140000 */
+};
+
+static int lan966x_create_targets(struct platform_device *pdev,
+ struct lan966x *lan966x)
+{
+ struct resource *iores[IO_RANGES];
+ void __iomem *begin[IO_RANGES];
+ int idx;
+
+ /* Initially map the entire range and after that update each target to
+ * point inside the region at the correct offset. It is possible that
+ * other devices access the same region so don't add any checks about
+ * this.
+ */
+ for (idx = 0; idx < IO_RANGES; idx++) {
+ iores[idx] = platform_get_resource(pdev, IORESOURCE_MEM,
+ idx);
+ if (!iores[idx]) {
+ dev_err(&pdev->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
+
+ begin[idx] = devm_ioremap(&pdev->dev,
+ iores[idx]->start,
+ resource_size(iores[idx]));
+ if (!begin[idx]) {
+ dev_err(&pdev->dev, "Unable to get registers: %s\n",
+ iores[idx]->name);
+ return -ENOMEM;
+ }
+ }
+
+ for (idx = 0; idx < ARRAY_SIZE(lan966x_main_iomap); idx++) {
+ const struct lan966x_main_io_resource *iomap =
+ &lan966x_main_iomap[idx];
+
+ lan966x->regs[iomap->id] = begin[iomap->range] + iomap->offset;
+ }
+
+ return 0;
+}
+
+static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ const struct sockaddr *addr = p;
+ int ret;
+
+ /* Learn the new net device MAC address in the mac table. */
+ ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, HOST_PVID);
+ if (ret)
+ return ret;
+
+ /* Then forget the previous one. */
+ ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, HOST_PVID);
+ if (ret)
+ return ret;
+
+ eth_hw_addr_set(dev, addr->sa_data);
+ return ret;
+}
+
+static int lan966x_port_get_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int ret;
+
+ ret = snprintf(buf, len, "p%d", port->chip_port);
+ if (ret >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int lan966x_port_open(struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int err;
+
+ /* Enable receiving frames on the port, and activate auto-learning of
+ * MAC addresses.
+ */
+ lan_rmw(ANA_PORT_CFG_LEARNAUTO_SET(1) |
+ ANA_PORT_CFG_RECV_ENA_SET(1) |
+ ANA_PORT_CFG_PORTID_VAL_SET(port->chip_port),
+ ANA_PORT_CFG_LEARNAUTO |
+ ANA_PORT_CFG_RECV_ENA |
+ ANA_PORT_CFG_PORTID_VAL,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+
+ err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0);
+ if (err) {
+ netdev_err(dev, "Could not attach to PHY\n");
+ return err;
+ }
+
+ phylink_start(port->phylink);
+
+ return 0;
+}
+
+static int lan966x_port_stop(struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ lan966x_port_config_down(port);
+ phylink_stop(port->phylink);
+ phylink_disconnect_phy(port->phylink);
+
+ return 0;
+}
+
+static int lan966x_port_inj_status(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, QS_INJ_STATUS);
+}
+
+static int lan966x_port_inj_ready(struct lan966x *lan966x, u8 grp)
+{
+ u32 val;
+
+ return readx_poll_timeout_atomic(lan966x_port_inj_status, lan966x, val,
+ QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+}
+
+static int lan966x_port_ifh_xmit(struct sk_buff *skb,
+ __be32 *ifh,
+ struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 i, count, last;
+ u8 grp = 0;
+ u32 val;
+ int err;
+
+ val = lan_rd(lan966x, QS_INJ_STATUS);
+ if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp)) ||
+ (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)))
+ return NETDEV_TX_BUSY;
+
+ /* Write start of frame */
+ lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
+ QS_INJ_CTRL_SOF_SET(1),
+ lan966x, QS_INJ_CTRL(grp));
+
+ /* Write IFH header */
+ for (i = 0; i < IFH_LEN; ++i) {
+ /* Wait until the fifo is ready */
+ err = lan966x_port_inj_ready(lan966x, grp);
+ if (err)
+ return NETDEV_TX_BUSY;
+
+ lan_wr((__force u32)ifh[i], lan966x, QS_INJ_WR(grp));
+ }
+
+ /* Write frame */
+ count = DIV_ROUND_UP(skb->len, 4);
+ last = skb->len % 4;
+ for (i = 0; i < count; ++i) {
+ /* Wait until the fifo is ready */
+ err = lan966x_port_inj_ready(lan966x, grp);
+ if (err)
+ return NETDEV_TX_BUSY;
+
+ lan_wr(((u32 *)skb->data)[i], lan966x, QS_INJ_WR(grp));
+ }
+
+ /* Add padding */
+ while (i < (LAN966X_BUFFER_MIN_SZ / 4)) {
+ /* Wait until the fifo is ready */
+ err = lan966x_port_inj_ready(lan966x, grp);
+ if (err)
+ return NETDEV_TX_BUSY;
+
+ lan_wr(0, lan966x, QS_INJ_WR(grp));
+ ++i;
+ }
+
+ /* Inidcate EOF and valid bytes in the last word */
+ lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
+ QS_INJ_CTRL_VLD_BYTES_SET(skb->len < LAN966X_BUFFER_MIN_SZ ?
+ 0 : last) |
+ QS_INJ_CTRL_EOF_SET(1),
+ lan966x, QS_INJ_CTRL(grp));
+
+ /* Add dummy CRC */
+ lan_wr(0, lan966x, QS_INJ_WR(grp));
+ skb_tx_timestamp(skb);
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ dev_consume_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static void lan966x_ifh_set_bypass(void *ifh, u64 bypass)
+{
+ packing(ifh, &bypass, IFH_POS_BYPASS + IFH_WID_BYPASS - 1,
+ IFH_POS_BYPASS, IFH_LEN * 4, PACK, 0);
+}
+
+static void lan966x_ifh_set_port(void *ifh, u64 bypass)
+{
+ packing(ifh, &bypass, IFH_POS_DSTS + IFH_WID_DSTS - 1,
+ IFH_POS_DSTS, IFH_LEN * 4, PACK, 0);
+}
+
+static void lan966x_ifh_set_qos_class(void *ifh, u64 bypass)
+{
+ packing(ifh, &bypass, IFH_POS_QOS_CLASS + IFH_WID_QOS_CLASS - 1,
+ IFH_POS_QOS_CLASS, IFH_LEN * 4, PACK, 0);
+}
+
+static void lan966x_ifh_set_ipv(void *ifh, u64 bypass)
+{
+ packing(ifh, &bypass, IFH_POS_IPV + IFH_WID_IPV - 1,
+ IFH_POS_IPV, IFH_LEN * 4, PACK, 0);
+}
+
+static void lan966x_ifh_set_vid(void *ifh, u64 vid)
+{
+ packing(ifh, &vid, IFH_POS_TCI + IFH_WID_TCI - 1,
+ IFH_POS_TCI, IFH_LEN * 4, PACK, 0);
+}
+
+static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ __be32 ifh[IFH_LEN];
+
+ memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
+
+ lan966x_ifh_set_bypass(ifh, 1);
+ lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
+ lan966x_ifh_set_qos_class(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
+ lan966x_ifh_set_ipv(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
+ lan966x_ifh_set_vid(ifh, skb_vlan_tag_get(skb));
+
+ return lan966x_port_ifh_xmit(skb, ifh, dev);
+}
+
+static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(new_mtu),
+ lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+static int lan966x_mc_unsync(struct net_device *dev, const unsigned char *addr)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ return lan966x_mac_forget(lan966x, addr, HOST_PVID, ENTRYTYPE_LOCKED);
+}
+
+static int lan966x_mc_sync(struct net_device *dev, const unsigned char *addr)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ return lan966x_mac_cpu_learn(lan966x, addr, HOST_PVID);
+}
+
+static void lan966x_port_set_rx_mode(struct net_device *dev)
+{
+ __dev_mc_sync(dev, lan966x_mc_sync, lan966x_mc_unsync);
+}
+
+static int lan966x_port_get_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ ppid->id_len = sizeof(lan966x->base_mac);
+ memcpy(&ppid->id, &lan966x->base_mac, ppid->id_len);
+
+ return 0;
+}
+
+static const struct net_device_ops lan966x_port_netdev_ops = {
+ .ndo_open = lan966x_port_open,
+ .ndo_stop = lan966x_port_stop,
+ .ndo_start_xmit = lan966x_port_xmit,
+ .ndo_change_mtu = lan966x_port_change_mtu,
+ .ndo_set_rx_mode = lan966x_port_set_rx_mode,
+ .ndo_get_phys_port_name = lan966x_port_get_phys_port_name,
+ .ndo_get_stats64 = lan966x_stats_get,
+ .ndo_set_mac_address = lan966x_port_set_mac_address,
+ .ndo_get_port_parent_id = lan966x_port_get_parent_id,
+};
+
+bool lan966x_netdevice_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &lan966x_port_netdev_ops;
+}
+
+static int lan966x_port_xtr_status(struct lan966x *lan966x, u8 grp)
+{
+ return lan_rd(lan966x, QS_XTR_RD(grp));
+}
+
+static int lan966x_port_xtr_ready(struct lan966x *lan966x, u8 grp)
+{
+ u32 val;
+
+ return read_poll_timeout(lan966x_port_xtr_status, val,
+ val != XTR_NOT_READY,
+ READL_SLEEP_US, READL_TIMEOUT_US, false,
+ lan966x, grp);
+}
+
+static int lan966x_rx_frame_word(struct lan966x *lan966x, u8 grp, u32 *rval)
+{
+ u32 bytes_valid;
+ u32 val;
+ int err;
+
+ val = lan_rd(lan966x, QS_XTR_RD(grp));
+ if (val == XTR_NOT_READY) {
+ err = lan966x_port_xtr_ready(lan966x, grp);
+ if (err)
+ return -EIO;
+ }
+
+ switch (val) {
+ case XTR_ABORT:
+ return -EIO;
+ case XTR_EOF_0:
+ case XTR_EOF_1:
+ case XTR_EOF_2:
+ case XTR_EOF_3:
+ case XTR_PRUNED:
+ bytes_valid = XTR_VALID_BYTES(val);
+ val = lan_rd(lan966x, QS_XTR_RD(grp));
+ if (val == XTR_ESCAPE)
+ *rval = lan_rd(lan966x, QS_XTR_RD(grp));
+ else
+ *rval = val;
+
+ return bytes_valid;
+ case XTR_ESCAPE:
+ *rval = lan_rd(lan966x, QS_XTR_RD(grp));
+
+ return 4;
+ default:
+ *rval = val;
+
+ return 4;
+ }
+}
+
+static void lan966x_ifh_get_src_port(void *ifh, u64 *src_port)
+{
+ packing(ifh, src_port, IFH_POS_SRCPORT + IFH_WID_SRCPORT - 1,
+ IFH_POS_SRCPORT, IFH_LEN * 4, UNPACK, 0);
+}
+
+static void lan966x_ifh_get_len(void *ifh, u64 *len)
+{
+ packing(ifh, len, IFH_POS_LEN + IFH_WID_LEN - 1,
+ IFH_POS_LEN, IFH_LEN * 4, UNPACK, 0);
+}
+
+static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+ int i, grp = 0, err = 0;
+
+ if (!(lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp)))
+ return IRQ_NONE;
+
+ do {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ int sz = 0, buf_len;
+ u64 src_port, len;
+ u32 ifh[IFH_LEN];
+ u32 *buf;
+ u32 val;
+
+ for (i = 0; i < IFH_LEN; i++) {
+ err = lan966x_rx_frame_word(lan966x, grp, &ifh[i]);
+ if (err != 4)
+ goto recover;
+ }
+
+ err = 0;
+
+ lan966x_ifh_get_src_port(ifh, &src_port);
+ lan966x_ifh_get_len(ifh, &len);
+
+ WARN_ON(src_port >= lan966x->num_phys_ports);
+
+ dev = lan966x->ports[src_port]->dev;
+ skb = netdev_alloc_skb(dev, len);
+ if (unlikely(!skb)) {
+ netdev_err(dev, "Unable to allocate sk_buff\n");
+ err = -ENOMEM;
+ break;
+ }
+ buf_len = len - ETH_FCS_LEN;
+ buf = (u32 *)skb_put(skb, buf_len);
+
+ len = 0;
+ do {
+ sz = lan966x_rx_frame_word(lan966x, grp, &val);
+ if (sz < 0) {
+ kfree_skb(skb);
+ goto recover;
+ }
+
+ *buf++ = val;
+ len += sz;
+ } while (len < buf_len);
+
+ /* Read the FCS */
+ sz = lan966x_rx_frame_word(lan966x, grp, &val);
+ if (sz < 0) {
+ kfree_skb(skb);
+ goto recover;
+ }
+
+ /* Update the statistics if part of the FCS was read before */
+ len -= ETH_FCS_LEN - sz;
+
+ if (unlikely(dev->features & NETIF_F_RXFCS)) {
+ buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
+ *buf = val;
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (lan966x->bridge_mask & BIT(src_port))
+ skb->offload_fwd_mark = 1;
+
+ netif_rx_ni(skb);
+ dev->stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+
+recover:
+ if (sz < 0 || err)
+ lan_rd(lan966x, QS_XTR_RD(grp));
+
+ } while (lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lan966x_ana_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+
+ return lan966x_mac_irq_handler(lan966x);
+}
+
+static void lan966x_cleanup_ports(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; p++) {
+ port = lan966x->ports[p];
+ if (!port)
+ continue;
+
+ if (port->dev)
+ unregister_netdev(port->dev);
+
+ if (port->phylink) {
+ rtnl_lock();
+ lan966x_port_stop(port->dev);
+ rtnl_unlock();
+ phylink_destroy(port->phylink);
+ port->phylink = NULL;
+ }
+
+ if (port->fwnode)
+ fwnode_handle_put(port->fwnode);
+ }
+
+ disable_irq(lan966x->xtr_irq);
+ lan966x->xtr_irq = -ENXIO;
+
+ if (lan966x->ana_irq) {
+ disable_irq(lan966x->ana_irq);
+ lan966x->ana_irq = -ENXIO;
+ }
+}
+
+static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
+ phy_interface_t phy_mode,
+ struct fwnode_handle *portnp)
+{
+ struct lan966x_port *port;
+ struct phylink *phylink;
+ struct net_device *dev;
+ int err;
+
+ if (p >= lan966x->num_phys_ports)
+ return -EINVAL;
+
+ dev = devm_alloc_etherdev_mqs(lan966x->dev,
+ sizeof(struct lan966x_port), 8, 1);
+ if (!dev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(dev, lan966x->dev);
+ port = netdev_priv(dev);
+ port->dev = dev;
+ port->lan966x = lan966x;
+ port->chip_port = p;
+ lan966x->ports[p] = port;
+
+ dev->max_mtu = ETH_MAX_MTU;
+
+ dev->netdev_ops = &lan966x_port_netdev_ops;
+ dev->ethtool_ops = &lan966x_ethtool_ops;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ dev->needed_headroom = IFH_LEN * sizeof(u32);
+
+ eth_hw_addr_gen(dev, lan966x->base_mac, p + 1);
+
+ lan966x_mac_learn(lan966x, PGID_CPU, dev->dev_addr, HOST_PVID,
+ ENTRYTYPE_LOCKED);
+
+ port->phylink_config.dev = &port->dev->dev;
+ port->phylink_config.type = PHYLINK_NETDEV;
+ port->phylink_pcs.poll = true;
+ port->phylink_pcs.ops = &lan966x_phylink_pcs_ops;
+
+ port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_QSGMII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ port->phylink_config.supported_interfaces);
+
+ phylink = phylink_create(&port->phylink_config,
+ portnp,
+ phy_mode,
+ &lan966x_phylink_mac_ops);
+ if (IS_ERR(phylink)) {
+ port->dev = NULL;
+ return PTR_ERR(phylink);
+ }
+
+ port->phylink = phylink;
+ phylink_set_pcs(phylink, &port->phylink_pcs);
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(lan966x->dev, "register_netdev failed\n");
+ return err;
+ }
+
+ lan966x_vlan_port_set_vlan_aware(port, 0);
+ lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
+ lan966x_vlan_port_apply(port);
+
+ return 0;
+}
+
+static void lan966x_init(struct lan966x *lan966x)
+{
+ u32 p, i;
+
+ /* MAC table initialization */
+ lan966x_mac_init(lan966x);
+
+ lan966x_vlan_init(lan966x);
+
+ /* Flush queues */
+ lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) |
+ GENMASK(1, 0),
+ lan966x, QS_XTR_FLUSH);
+
+ /* Allow to drain */
+ mdelay(1);
+
+ /* All Queues normal */
+ lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) &
+ ~(GENMASK(1, 0)),
+ lan966x, QS_XTR_FLUSH);
+
+ /* Set MAC age time to default value, the entry is aged after
+ * 2 * AGE_PERIOD
+ */
+ lan_wr(ANA_AUTOAGE_AGE_PERIOD_SET(BR_DEFAULT_AGEING_TIME / 2 / HZ),
+ lan966x, ANA_AUTOAGE);
+
+ /* Disable learning for frames discarded by VLAN ingress filtering */
+ lan_rmw(ANA_ADVLEARN_VLAN_CHK_SET(1),
+ ANA_ADVLEARN_VLAN_CHK,
+ lan966x, ANA_ADVLEARN);
+
+ /* Setup frame ageing - "2 sec" - The unit is 6.5 us on lan966x */
+ lan_wr(SYS_FRM_AGING_AGE_TX_ENA_SET(1) |
+ (20000000 / 65),
+ lan966x, SYS_FRM_AGING);
+
+ /* Map the 8 CPU extraction queues to CPU port */
+ lan_wr(0, lan966x, QSYS_CPU_GROUP_MAP);
+
+ /* Do byte-swap and expect status after last data word
+ * Extraction: Mode: manual extraction) | Byte_swap
+ */
+ lan_wr(QS_XTR_GRP_CFG_MODE_SET(1) |
+ QS_XTR_GRP_CFG_BYTE_SWAP_SET(1),
+ lan966x, QS_XTR_GRP_CFG(0));
+
+ /* Injection: Mode: manual injection | Byte_swap */
+ lan_wr(QS_INJ_GRP_CFG_MODE_SET(1) |
+ QS_INJ_GRP_CFG_BYTE_SWAP_SET(1),
+ lan966x, QS_INJ_GRP_CFG(0));
+
+ lan_rmw(QS_INJ_CTRL_GAP_SIZE_SET(0),
+ QS_INJ_CTRL_GAP_SIZE,
+ lan966x, QS_INJ_CTRL(0));
+
+ /* Enable IFH insertion/parsing on CPU ports */
+ lan_wr(SYS_PORT_MODE_INCL_INJ_HDR_SET(1) |
+ SYS_PORT_MODE_INCL_XTR_HDR_SET(1),
+ lan966x, SYS_PORT_MODE(CPU_PORT));
+
+ /* Setup flooding PGIDs */
+ lan_wr(ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(PGID_MCIPV4) |
+ ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(PGID_MC) |
+ ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(PGID_MC) |
+ ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(PGID_MC),
+ lan966x, ANA_FLOODING_IPMC);
+
+ /* There are 8 priorities */
+ for (i = 0; i < 8; ++i)
+ lan_rmw(ANA_FLOODING_FLD_MULTICAST_SET(PGID_MC) |
+ ANA_FLOODING_FLD_UNICAST_SET(PGID_UC) |
+ ANA_FLOODING_FLD_BROADCAST_SET(PGID_BC),
+ ANA_FLOODING_FLD_MULTICAST |
+ ANA_FLOODING_FLD_UNICAST |
+ ANA_FLOODING_FLD_BROADCAST,
+ lan966x, ANA_FLOODING(i));
+
+ for (i = 0; i < PGID_ENTRIES; ++i)
+ /* Set all the entries to obey VLAN_VLAN */
+ lan_rmw(ANA_PGID_CFG_OBEY_VLAN_SET(1),
+ ANA_PGID_CFG_OBEY_VLAN,
+ lan966x, ANA_PGID_CFG(i));
+
+ for (p = 0; p < lan966x->num_phys_ports; p++) {
+ /* Disable bridging by default */
+ lan_rmw(ANA_PGID_PGID_SET(0x0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(p + PGID_SRC));
+
+ /* Do not forward BPDU frames to the front ports and copy them
+ * to CPU
+ */
+ lan_wr(0xffff, lan966x, ANA_CPU_FWD_BPDU_CFG(p));
+ }
+
+ /* Set source buffer size for each priority and each port to 1500 bytes */
+ for (i = 0; i <= QSYS_Q_RSRV; ++i) {
+ lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(i));
+ lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(512 + i));
+ }
+
+ /* Enable switching to/from cpu port */
+ lan_wr(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) |
+ QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) |
+ QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1),
+ lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
+
+ /* Configure and enable the CPU port */
+ lan_rmw(ANA_PGID_PGID_SET(0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(CPU_PORT));
+ lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT)),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_CPU));
+
+ /* Multicast to all other ports */
+ lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_MC));
+
+ /* This will be controlled by mrouter ports */
+ lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_MCIPV4));
+
+ /* Unicast to all other ports */
+ lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_UC));
+
+ /* Broadcast to the CPU port and to other ports */
+ lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT) | GENMASK(lan966x->num_phys_ports - 1, 0)),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_BC));
+
+ lan_wr(REW_PORT_CFG_NO_REWRITE_SET(1),
+ lan966x, REW_PORT_CFG(CPU_PORT));
+
+ lan_rmw(ANA_ANAINTR_INTR_ENA_SET(1),
+ ANA_ANAINTR_INTR_ENA,
+ lan966x, ANA_ANAINTR);
+}
+
+static int lan966x_ram_init(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, SYS_RAM_INIT);
+}
+
+static int lan966x_reset_switch(struct lan966x *lan966x)
+{
+ struct reset_control *switch_reset, *phy_reset;
+ int val = 0;
+ int ret;
+
+ switch_reset = devm_reset_control_get_shared(lan966x->dev, "switch");
+ if (IS_ERR(switch_reset))
+ return dev_err_probe(lan966x->dev, PTR_ERR(switch_reset),
+ "Could not obtain switch reset");
+
+ phy_reset = devm_reset_control_get_shared(lan966x->dev, "phy");
+ if (IS_ERR(phy_reset))
+ return dev_err_probe(lan966x->dev, PTR_ERR(phy_reset),
+ "Could not obtain phy reset\n");
+
+ reset_control_reset(switch_reset);
+ reset_control_reset(phy_reset);
+
+ lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG);
+ lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT);
+ ret = readx_poll_timeout(lan966x_ram_init, lan966x,
+ val, (val & BIT(1)) == 0, READL_SLEEP_US,
+ READL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ lan_wr(SYS_RESET_CFG_CORE_ENA_SET(1), lan966x, SYS_RESET_CFG);
+
+ return 0;
+}
+
+static int lan966x_probe(struct platform_device *pdev)
+{
+ struct fwnode_handle *ports, *portnp;
+ struct lan966x *lan966x;
+ u8 mac_addr[ETH_ALEN];
+ int err, i;
+
+ lan966x = devm_kzalloc(&pdev->dev, sizeof(*lan966x), GFP_KERNEL);
+ if (!lan966x)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, lan966x);
+ lan966x->dev = &pdev->dev;
+
+ if (!device_get_mac_address(&pdev->dev, mac_addr)) {
+ ether_addr_copy(lan966x->base_mac, mac_addr);
+ } else {
+ pr_info("MAC addr was not set, use random MAC\n");
+ eth_random_addr(lan966x->base_mac);
+ lan966x->base_mac[5] &= 0xf0;
+ }
+
+ ports = device_get_named_child_node(&pdev->dev, "ethernet-ports");
+ if (!ports)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "no ethernet-ports child found\n");
+
+ err = lan966x_create_targets(pdev, lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "Failed to create targets");
+
+ err = lan966x_reset_switch(lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Reset failed");
+
+ i = 0;
+ fwnode_for_each_available_child_node(ports, portnp)
+ ++i;
+
+ lan966x->num_phys_ports = i;
+ lan966x->ports = devm_kcalloc(&pdev->dev, lan966x->num_phys_ports,
+ sizeof(struct lan966x_port *),
+ GFP_KERNEL);
+ if (!lan966x->ports)
+ return -ENOMEM;
+
+ /* There QS system has 32KB of memory */
+ lan966x->shared_queue_sz = LAN966X_BUFFER_MEMORY;
+
+ /* set irq */
+ lan966x->xtr_irq = platform_get_irq_byname(pdev, "xtr");
+ if (lan966x->xtr_irq <= 0)
+ return -EINVAL;
+
+ err = devm_request_threaded_irq(&pdev->dev, lan966x->xtr_irq, NULL,
+ lan966x_xtr_irq_handler, IRQF_ONESHOT,
+ "frame extraction", lan966x);
+ if (err) {
+ pr_err("Unable to use xtr irq");
+ return -ENODEV;
+ }
+
+ lan966x->ana_irq = platform_get_irq_byname(pdev, "ana");
+ if (lan966x->ana_irq) {
+ err = devm_request_threaded_irq(&pdev->dev, lan966x->ana_irq, NULL,
+ lan966x_ana_irq_handler, IRQF_ONESHOT,
+ "ana irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Unable to use ana irq");
+ }
+
+ /* init switch */
+ lan966x_init(lan966x);
+ lan966x_stats_init(lan966x);
+
+ /* go over the child nodes */
+ fwnode_for_each_available_child_node(ports, portnp) {
+ phy_interface_t phy_mode;
+ struct phy *serdes;
+ u32 p;
+
+ if (fwnode_property_read_u32(portnp, "reg", &p))
+ continue;
+
+ phy_mode = fwnode_get_phy_mode(portnp);
+ err = lan966x_probe_port(lan966x, p, phy_mode, portnp);
+ if (err)
+ goto cleanup_ports;
+
+ /* Read needed configuration */
+ lan966x->ports[p]->config.portmode = phy_mode;
+ lan966x->ports[p]->fwnode = fwnode_handle_get(portnp);
+
+ serdes = devm_of_phy_get(lan966x->dev, to_of_node(portnp), NULL);
+ if (!IS_ERR(serdes))
+ lan966x->ports[p]->serdes = serdes;
+
+ lan966x_port_init(lan966x->ports[p]);
+ }
+
+ lan966x_mdb_init(lan966x);
+ err = lan966x_fdb_init(lan966x);
+ if (err)
+ goto cleanup_ports;
+
+ return 0;
+
+cleanup_ports:
+ fwnode_handle_put(portnp);
+
+ lan966x_cleanup_ports(lan966x);
+
+ cancel_delayed_work_sync(&lan966x->stats_work);
+ destroy_workqueue(lan966x->stats_queue);
+ mutex_destroy(&lan966x->stats_lock);
+
+ return err;
+}
+
+static int lan966x_remove(struct platform_device *pdev)
+{
+ struct lan966x *lan966x = platform_get_drvdata(pdev);
+
+ lan966x_cleanup_ports(lan966x);
+
+ cancel_delayed_work_sync(&lan966x->stats_work);
+ destroy_workqueue(lan966x->stats_queue);
+ mutex_destroy(&lan966x->stats_lock);
+
+ lan966x_mac_purge_entries(lan966x);
+ lan966x_mdb_deinit(lan966x);
+ lan966x_fdb_deinit(lan966x);
+
+ return 0;
+}
+
+static struct platform_driver lan966x_driver = {
+ .probe = lan966x_probe,
+ .remove = lan966x_remove,
+ .driver = {
+ .name = "lan966x-switch",
+ .of_match_table = lan966x_match,
+ },
+};
+
+static int __init lan966x_switch_driver_init(void)
+{
+ int ret;
+
+ lan966x_register_notifier_blocks();
+
+ ret = platform_driver_register(&lan966x_driver);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ lan966x_unregister_notifier_blocks();
+ return ret;
+}
+
+static void __exit lan966x_switch_driver_exit(void)
+{
+ platform_driver_unregister(&lan966x_driver);
+ lan966x_unregister_notifier_blocks();
+}
+
+module_init(lan966x_switch_driver_init);
+module_exit(lan966x_switch_driver_exit);
+
+MODULE_DESCRIPTION("Microchip LAN966X switch driver");
+MODULE_AUTHOR("Horatiu Vultur <horatiu.vultur@microchip.com>");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
new file mode 100644
index 000000000000..99c6d0a9f946
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __LAN966X_MAIN_H__
+#define __LAN966X_MAIN_H__
+
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/jiffies.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <net/switchdev.h>
+
+#include "lan966x_regs.h"
+#include "lan966x_ifh.h"
+
+#define TABLE_UPDATE_SLEEP_US 10
+#define TABLE_UPDATE_TIMEOUT_US 100000
+
+#define LAN966X_BUFFER_CELL_SZ 64
+#define LAN966X_BUFFER_MEMORY (160 * 1024)
+#define LAN966X_BUFFER_MIN_SZ 60
+
+#define PGID_AGGR 64
+#define PGID_SRC 80
+#define PGID_ENTRIES 89
+
+#define UNAWARE_PVID 0
+#define HOST_PVID 4095
+
+/* Reserved amount for (SRC, PRIO) at index 8*SRC + PRIO */
+#define QSYS_Q_RSRV 95
+
+#define CPU_PORT 8
+
+/* Reserved PGIDs */
+#define PGID_CPU (PGID_AGGR - 6)
+#define PGID_UC (PGID_AGGR - 5)
+#define PGID_BC (PGID_AGGR - 4)
+#define PGID_MC (PGID_AGGR - 3)
+#define PGID_MCIPV4 (PGID_AGGR - 2)
+#define PGID_MCIPV6 (PGID_AGGR - 1)
+
+/* Non-reserved PGIDs, used for general purpose */
+#define PGID_GP_START (CPU_PORT + 1)
+#define PGID_GP_END PGID_CPU
+
+#define LAN966X_SPEED_NONE 0
+#define LAN966X_SPEED_2500 1
+#define LAN966X_SPEED_1000 1
+#define LAN966X_SPEED_100 2
+#define LAN966X_SPEED_10 3
+
+/* MAC table entry types.
+ * ENTRYTYPE_NORMAL is subject to aging.
+ * ENTRYTYPE_LOCKED is not subject to aging.
+ * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast.
+ * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast.
+ */
+enum macaccess_entry_type {
+ ENTRYTYPE_NORMAL = 0,
+ ENTRYTYPE_LOCKED,
+ ENTRYTYPE_MACV4,
+ ENTRYTYPE_MACV6,
+};
+
+struct lan966x_port;
+
+struct lan966x_stat_layout {
+ u32 offset;
+ char name[ETH_GSTRING_LEN];
+};
+
+struct lan966x {
+ struct device *dev;
+
+ u8 num_phys_ports;
+ struct lan966x_port **ports;
+
+ void __iomem *regs[NUM_TARGETS];
+
+ int shared_queue_sz;
+
+ u8 base_mac[ETH_ALEN];
+
+ struct net_device *bridge;
+ u16 bridge_mask;
+ u16 bridge_fwd_mask;
+
+ struct list_head mac_entries;
+ spinlock_t mac_lock; /* lock for mac_entries list */
+
+ u16 vlan_mask[VLAN_N_VID];
+ DECLARE_BITMAP(cpu_vlan_mask, VLAN_N_VID);
+
+ /* stats */
+ const struct lan966x_stat_layout *stats_layout;
+ u32 num_stats;
+
+ /* workqueue for reading stats */
+ struct mutex stats_lock;
+ u64 *stats;
+ struct delayed_work stats_work;
+ struct workqueue_struct *stats_queue;
+
+ /* interrupts */
+ int xtr_irq;
+ int ana_irq;
+
+ /* worqueue for fdb */
+ struct workqueue_struct *fdb_work;
+ struct list_head fdb_entries;
+
+ /* mdb */
+ struct list_head mdb_entries;
+ struct list_head pgid_entries;
+};
+
+struct lan966x_port_config {
+ phy_interface_t portmode;
+ const unsigned long *advertising;
+ int speed;
+ int duplex;
+ u32 pause;
+ bool inband;
+ bool autoneg;
+};
+
+struct lan966x_port {
+ struct net_device *dev;
+ struct lan966x *lan966x;
+
+ u8 chip_port;
+ u16 pvid;
+ u16 vid;
+ bool vlan_aware;
+
+ bool learn_ena;
+
+ struct phylink_config phylink_config;
+ struct phylink_pcs phylink_pcs;
+ struct lan966x_port_config config;
+ struct phylink *phylink;
+ struct phy *serdes;
+ struct fwnode_handle *fwnode;
+};
+
+extern const struct phylink_mac_ops lan966x_phylink_mac_ops;
+extern const struct phylink_pcs_ops lan966x_phylink_pcs_ops;
+extern const struct ethtool_ops lan966x_ethtool_ops;
+
+bool lan966x_netdevice_check(const struct net_device *dev);
+
+void lan966x_register_notifier_blocks(void);
+void lan966x_unregister_notifier_blocks(void);
+
+void lan966x_stats_get(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
+int lan966x_stats_init(struct lan966x *lan966x);
+
+void lan966x_port_config_down(struct lan966x_port *port);
+void lan966x_port_config_up(struct lan966x_port *port);
+void lan966x_port_status_get(struct lan966x_port *port,
+ struct phylink_link_state *state);
+int lan966x_port_pcs_set(struct lan966x_port *port,
+ struct lan966x_port_config *config);
+void lan966x_port_init(struct lan966x_port *port);
+
+int lan966x_mac_ip_learn(struct lan966x *lan966x,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type);
+int lan966x_mac_learn(struct lan966x *lan966x, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type);
+int lan966x_mac_forget(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type);
+int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid);
+int lan966x_mac_cpu_forget(struct lan966x *lan966x, const char *addr, u16 vid);
+void lan966x_mac_init(struct lan966x *lan966x);
+void lan966x_mac_set_ageing(struct lan966x *lan966x,
+ u32 ageing);
+int lan966x_mac_del_entry(struct lan966x *lan966x,
+ const unsigned char *addr,
+ u16 vid);
+int lan966x_mac_add_entry(struct lan966x *lan966x,
+ struct lan966x_port *port,
+ const unsigned char *addr,
+ u16 vid);
+void lan966x_mac_purge_entries(struct lan966x *lan966x);
+irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x);
+
+void lan966x_vlan_init(struct lan966x *lan966x);
+void lan966x_vlan_port_apply(struct lan966x_port *port);
+bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid);
+void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
+ bool vlan_aware);
+int lan966x_vlan_port_set_vid(struct lan966x_port *port,
+ u16 vid,
+ bool pvid,
+ bool untagged);
+void lan966x_vlan_port_add_vlan(struct lan966x_port *port,
+ u16 vid,
+ bool pvid,
+ bool untagged);
+void lan966x_vlan_port_del_vlan(struct lan966x_port *port, u16 vid);
+void lan966x_vlan_cpu_add_vlan(struct lan966x *lan966x, u16 vid);
+void lan966x_vlan_cpu_del_vlan(struct lan966x *lan966x, u16 vid);
+
+void lan966x_fdb_write_entries(struct lan966x *lan966x, u16 vid);
+void lan966x_fdb_erase_entries(struct lan966x *lan966x, u16 vid);
+int lan966x_fdb_init(struct lan966x *lan966x);
+void lan966x_fdb_deinit(struct lan966x *lan966x);
+int lan966x_handle_fdb(struct net_device *dev,
+ struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info);
+
+void lan966x_mdb_init(struct lan966x *lan966x);
+void lan966x_mdb_deinit(struct lan966x *lan966x);
+int lan966x_handle_port_mdb_add(struct lan966x_port *port,
+ const struct switchdev_obj *obj);
+int lan966x_handle_port_mdb_del(struct lan966x_port *port,
+ const struct switchdev_obj *obj);
+void lan966x_mdb_erase_entries(struct lan966x *lan966x, u16 vid);
+void lan966x_mdb_write_entries(struct lan966x *lan966x, u16 vid);
+
+static inline void __iomem *lan_addr(void __iomem *base[],
+ int id, int tinst, int tcnt,
+ int gbase, int ginst,
+ int gcnt, int gwidth,
+ int raddr, int rinst,
+ int rcnt, int rwidth)
+{
+ WARN_ON((tinst) >= tcnt);
+ WARN_ON((ginst) >= gcnt);
+ WARN_ON((rinst) >= rcnt);
+ return base[id + (tinst)] +
+ gbase + ((ginst) * gwidth) +
+ raddr + ((rinst) * rwidth);
+}
+
+static inline u32 lan_rd(struct lan966x *lan966x, int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ return readl(lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+}
+
+static inline void lan_wr(u32 val, struct lan966x *lan966x,
+ int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ writel(val, lan_addr(lan966x->regs, id, tinst, tcnt,
+ gbase, ginst, gcnt, gwidth,
+ raddr, rinst, rcnt, rwidth));
+}
+
+static inline void lan_rmw(u32 val, u32 mask, struct lan966x *lan966x,
+ int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ u32 nval;
+
+ nval = readl(lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+ nval = (nval & ~mask) | (val & mask);
+ writel(nval, lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+}
+
+#endif /* __LAN966X_MAIN_H__ */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c
new file mode 100644
index 000000000000..c68d0a99d292
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <net/switchdev.h>
+
+#include "lan966x_main.h"
+
+struct lan966x_pgid_entry {
+ struct list_head list;
+ int index;
+ refcount_t refcount;
+ u16 ports;
+};
+
+struct lan966x_mdb_entry {
+ struct list_head list;
+ unsigned char mac[ETH_ALEN];
+ u16 vid;
+ u16 ports;
+ struct lan966x_pgid_entry *pgid;
+ u8 cpu_copy;
+};
+
+void lan966x_mdb_init(struct lan966x *lan966x)
+{
+ INIT_LIST_HEAD(&lan966x->mdb_entries);
+ INIT_LIST_HEAD(&lan966x->pgid_entries);
+}
+
+static void lan966x_mdb_purge_mdb_entries(struct lan966x *lan966x)
+{
+ struct lan966x_mdb_entry *mdb_entry, *tmp;
+
+ list_for_each_entry_safe(mdb_entry, tmp, &lan966x->mdb_entries, list) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ }
+}
+
+static void lan966x_mdb_purge_pgid_entries(struct lan966x *lan966x)
+{
+ struct lan966x_pgid_entry *pgid_entry, *tmp;
+
+ list_for_each_entry_safe(pgid_entry, tmp, &lan966x->pgid_entries, list) {
+ list_del(&pgid_entry->list);
+ kfree(pgid_entry);
+ }
+}
+
+void lan966x_mdb_deinit(struct lan966x *lan966x)
+{
+ lan966x_mdb_purge_mdb_entries(lan966x);
+ lan966x_mdb_purge_pgid_entries(lan966x);
+}
+
+static struct lan966x_mdb_entry *
+lan966x_mdb_entry_get(struct lan966x *lan966x,
+ const unsigned char *mac,
+ u16 vid)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ if (ether_addr_equal(mdb_entry->mac, mac) &&
+ mdb_entry->vid == vid)
+ return mdb_entry;
+ }
+
+ return NULL;
+}
+
+static struct lan966x_mdb_entry *
+lan966x_mdb_entry_add(struct lan966x *lan966x,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+
+ mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
+ if (!mdb_entry)
+ return ERR_PTR(-ENOMEM);
+
+ ether_addr_copy(mdb_entry->mac, mdb->addr);
+ mdb_entry->vid = mdb->vid;
+
+ list_add_tail(&mdb_entry->list, &lan966x->mdb_entries);
+
+ return mdb_entry;
+}
+
+static void lan966x_mdb_encode_mac(unsigned char *mac,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ ether_addr_copy(mac, mdb_entry->mac);
+
+ if (type == ENTRYTYPE_MACV4) {
+ mac[0] = 0;
+ mac[1] = mdb_entry->ports >> 8;
+ mac[2] = mdb_entry->ports & 0xff;
+ } else if (type == ENTRYTYPE_MACV6) {
+ mac[0] = mdb_entry->ports >> 8;
+ mac[1] = mdb_entry->ports & 0xff;
+ }
+}
+
+static int lan966x_mdb_ip_add(struct lan966x_port *port,
+ const struct switchdev_obj_port_mdb *mdb,
+ enum macaccess_entry_type type)
+{
+ bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_mdb_entry *mdb_entry;
+ unsigned char mac[ETH_ALEN];
+ bool cpu_copy = false;
+
+ mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid);
+ if (!mdb_entry) {
+ mdb_entry = lan966x_mdb_entry_add(lan966x, mdb);
+ if (IS_ERR(mdb_entry))
+ return PTR_ERR(mdb_entry);
+ } else {
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ }
+
+ if (cpu_port)
+ mdb_entry->cpu_copy++;
+ else
+ mdb_entry->ports |= BIT(port->chip_port);
+
+ /* Copy the frame to CPU only if the CPU is in the VLAN */
+ if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, mdb_entry->vid) &&
+ mdb_entry->cpu_copy)
+ cpu_copy = true;
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ return lan966x_mac_ip_learn(lan966x, cpu_copy,
+ mac, mdb_entry->vid, type);
+}
+
+static int lan966x_mdb_ip_del(struct lan966x_port *port,
+ const struct switchdev_obj_port_mdb *mdb,
+ enum macaccess_entry_type type)
+{
+ bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_mdb_entry *mdb_entry;
+ unsigned char mac[ETH_ALEN];
+ u16 ports;
+
+ mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid);
+ if (!mdb_entry)
+ return -ENOENT;
+
+ ports = mdb_entry->ports;
+ if (cpu_port) {
+ /* If there are still other references to the CPU port then
+ * there is no point to delete and add again the same entry
+ */
+ mdb_entry->cpu_copy--;
+ if (mdb_entry->cpu_copy)
+ return 0;
+ } else {
+ ports &= ~BIT(port->chip_port);
+ }
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+
+ mdb_entry->ports = ports;
+
+ if (!mdb_entry->ports && !mdb_entry->cpu_copy) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ return 0;
+ }
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ return lan966x_mac_ip_learn(lan966x, mdb_entry->cpu_copy,
+ mac, mdb_entry->vid, type);
+}
+
+static struct lan966x_pgid_entry *
+lan966x_pgid_entry_add(struct lan966x *lan966x, int index, u16 ports)
+{
+ struct lan966x_pgid_entry *pgid_entry;
+
+ pgid_entry = kzalloc(sizeof(*pgid_entry), GFP_KERNEL);
+ if (!pgid_entry)
+ return ERR_PTR(-ENOMEM);
+
+ pgid_entry->ports = ports;
+ pgid_entry->index = index;
+ refcount_set(&pgid_entry->refcount, 1);
+
+ list_add_tail(&pgid_entry->list, &lan966x->pgid_entries);
+
+ return pgid_entry;
+}
+
+static struct lan966x_pgid_entry *
+lan966x_pgid_entry_get(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry)
+{
+ struct lan966x_pgid_entry *pgid_entry;
+ int index;
+
+ /* Try to find an existing pgid that uses the same ports as the
+ * mdb_entry
+ */
+ list_for_each_entry(pgid_entry, &lan966x->pgid_entries, list) {
+ if (pgid_entry->ports == mdb_entry->ports) {
+ refcount_inc(&pgid_entry->refcount);
+ return pgid_entry;
+ }
+ }
+
+ /* Try to find an empty pgid entry and allocate one in case it finds it,
+ * otherwise it means that there are no more resources
+ */
+ for (index = PGID_GP_START; index < PGID_GP_END; index++) {
+ bool used = false;
+
+ list_for_each_entry(pgid_entry, &lan966x->pgid_entries, list) {
+ if (pgid_entry->index == index) {
+ used = true;
+ break;
+ }
+ }
+
+ if (!used)
+ return lan966x_pgid_entry_add(lan966x, index,
+ mdb_entry->ports);
+ }
+
+ return ERR_PTR(-ENOSPC);
+}
+
+static void lan966x_pgid_entry_del(struct lan966x *lan966x,
+ struct lan966x_pgid_entry *pgid_entry)
+{
+ if (!refcount_dec_and_test(&pgid_entry->refcount))
+ return;
+
+ list_del(&pgid_entry->list);
+ kfree(pgid_entry);
+}
+
+static int lan966x_mdb_l2_add(struct lan966x_port *port,
+ const struct switchdev_obj_port_mdb *mdb,
+ enum macaccess_entry_type type)
+{
+ bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_pgid_entry *pgid_entry;
+ struct lan966x_mdb_entry *mdb_entry;
+ unsigned char mac[ETH_ALEN];
+
+ mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid);
+ if (!mdb_entry) {
+ mdb_entry = lan966x_mdb_entry_add(lan966x, mdb);
+ if (IS_ERR(mdb_entry))
+ return PTR_ERR(mdb_entry);
+ } else {
+ lan966x_pgid_entry_del(lan966x, mdb_entry->pgid);
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ }
+
+ if (cpu_port) {
+ mdb_entry->ports |= BIT(CPU_PORT);
+ mdb_entry->cpu_copy++;
+ } else {
+ mdb_entry->ports |= BIT(port->chip_port);
+ }
+
+ pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry);
+ if (IS_ERR(pgid_entry)) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ return PTR_ERR(pgid_entry);
+ }
+ mdb_entry->pgid = pgid_entry;
+
+ /* Copy the frame to CPU only if the CPU is in the VLAN */
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, mdb_entry->vid) &&
+ mdb_entry->cpu_copy)
+ mdb_entry->ports &= BIT(CPU_PORT);
+
+ lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_entry->index));
+
+ return lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac,
+ mdb_entry->vid, type);
+}
+
+static int lan966x_mdb_l2_del(struct lan966x_port *port,
+ const struct switchdev_obj_port_mdb *mdb,
+ enum macaccess_entry_type type)
+{
+ bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_pgid_entry *pgid_entry;
+ struct lan966x_mdb_entry *mdb_entry;
+ unsigned char mac[ETH_ALEN];
+ u16 ports;
+
+ mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid);
+ if (!mdb_entry)
+ return -ENOENT;
+
+ ports = mdb_entry->ports;
+ if (cpu_port) {
+ /* If there are still other references to the CPU port then
+ * there is no point to delete and add again the same entry
+ */
+ mdb_entry->cpu_copy--;
+ if (mdb_entry->cpu_copy)
+ return 0;
+
+ ports &= ~BIT(CPU_PORT);
+ } else {
+ ports &= ~BIT(port->chip_port);
+ }
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ lan966x_pgid_entry_del(lan966x, mdb_entry->pgid);
+
+ mdb_entry->ports = ports;
+
+ if (!mdb_entry->ports) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ return 0;
+ }
+
+ pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry);
+ if (IS_ERR(pgid_entry)) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ return PTR_ERR(pgid_entry);
+ }
+ mdb_entry->pgid = pgid_entry;
+
+ lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_entry->index));
+
+ return lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac,
+ mdb_entry->vid, type);
+}
+
+static enum macaccess_entry_type
+lan966x_mdb_classify(const unsigned char *mac)
+{
+ if (mac[0] == 0x01 && mac[1] == 0x00 && mac[2] == 0x5e)
+ return ENTRYTYPE_MACV4;
+ if (mac[0] == 0x33 && mac[1] == 0x33)
+ return ENTRYTYPE_MACV6;
+ return ENTRYTYPE_LOCKED;
+}
+
+int lan966x_handle_port_mdb_add(struct lan966x_port *port,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ enum macaccess_entry_type type;
+
+ /* Split the way the entries are added for ipv4/ipv6 and for l2. The
+ * reason is that for ipv4/ipv6 it doesn't require to use any pgid
+ * entry, while for l2 is required to use pgid entries
+ */
+ type = lan966x_mdb_classify(mdb->addr);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6)
+ return lan966x_mdb_ip_add(port, mdb, type);
+
+ return lan966x_mdb_l2_add(port, mdb, type);
+}
+
+int lan966x_handle_port_mdb_del(struct lan966x_port *port,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ enum macaccess_entry_type type;
+
+ /* Split the way the entries are removed for ipv4/ipv6 and for l2. The
+ * reason is that for ipv4/ipv6 it doesn't require to use any pgid
+ * entry, while for l2 is required to use pgid entries
+ */
+ type = lan966x_mdb_classify(mdb->addr);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6)
+ return lan966x_mdb_ip_del(port, mdb, type);
+
+ return lan966x_mdb_l2_del(port, mdb, type);
+}
+
+static void lan966x_mdb_ip_cpu_copy(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ unsigned char mac[ETH_ALEN];
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ lan966x_mac_ip_learn(lan966x, true, mac, mdb_entry->vid, type);
+}
+
+static void lan966x_mdb_l2_cpu_copy(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ struct lan966x_pgid_entry *pgid_entry;
+ unsigned char mac[ETH_ALEN];
+
+ lan966x_pgid_entry_del(lan966x, mdb_entry->pgid);
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+
+ mdb_entry->ports |= BIT(CPU_PORT);
+
+ pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry);
+ if (IS_ERR(pgid_entry))
+ return;
+
+ mdb_entry->pgid = pgid_entry;
+
+ lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_entry->index));
+
+ lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac,
+ mdb_entry->vid, type);
+}
+
+void lan966x_mdb_write_entries(struct lan966x *lan966x, u16 vid)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+ enum macaccess_entry_type type;
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ if (mdb_entry->vid != vid || !mdb_entry->cpu_copy)
+ continue;
+
+ type = lan966x_mdb_classify(mdb_entry->mac);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6)
+ lan966x_mdb_ip_cpu_copy(lan966x, mdb_entry, type);
+ else
+ lan966x_mdb_l2_cpu_copy(lan966x, mdb_entry, type);
+ }
+}
+
+static void lan966x_mdb_ip_cpu_remove(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ unsigned char mac[ETH_ALEN];
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ lan966x_mac_ip_learn(lan966x, false, mac, mdb_entry->vid, type);
+}
+
+static void lan966x_mdb_l2_cpu_remove(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ struct lan966x_pgid_entry *pgid_entry;
+ unsigned char mac[ETH_ALEN];
+
+ lan966x_pgid_entry_del(lan966x, mdb_entry->pgid);
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+
+ mdb_entry->ports &= ~BIT(CPU_PORT);
+
+ pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry);
+ if (IS_ERR(pgid_entry))
+ return;
+
+ mdb_entry->pgid = pgid_entry;
+
+ lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_entry->index));
+
+ lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac,
+ mdb_entry->vid, type);
+}
+
+void lan966x_mdb_erase_entries(struct lan966x *lan966x, u16 vid)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+ enum macaccess_entry_type type;
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ if (mdb_entry->vid != vid || !mdb_entry->cpu_copy)
+ continue;
+
+ type = lan966x_mdb_classify(mdb_entry->mac);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6)
+ lan966x_mdb_ip_cpu_remove(lan966x, mdb_entry, type);
+ else
+ lan966x_mdb_l2_cpu_remove(lan966x, mdb_entry, type);
+ }
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
new file mode 100644
index 000000000000..b66a9aa00ea4
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/module.h>
+#include <linux/phylink.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/phy/phy.h>
+#include <linux/sfp.h>
+
+#include "lan966x_main.h"
+
+static void lan966x_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static int lan966x_phylink_mac_prepare(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t iface)
+{
+ struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+ int err;
+
+ if (port->serdes) {
+ err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET,
+ iface);
+ if (err) {
+ netdev_err(to_net_dev(config->dev),
+ "Could not set mode of SerDes\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void lan966x_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+ struct lan966x_port_config *port_config = &port->config;
+
+ port_config->duplex = duplex;
+ port_config->speed = speed;
+ port_config->pause = 0;
+ port_config->pause |= tx_pause ? MLO_PAUSE_TX : 0;
+ port_config->pause |= rx_pause ? MLO_PAUSE_RX : 0;
+
+ lan966x_port_config_up(port);
+}
+
+static void lan966x_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+ struct lan966x *lan966x = port->lan966x;
+
+ lan966x_port_config_down(port);
+
+ /* Take PCS out of reset */
+ lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
+ DEV_CLOCK_CFG_PCS_TX_RST_SET(0),
+ DEV_CLOCK_CFG_PCS_RX_RST |
+ DEV_CLOCK_CFG_PCS_TX_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+}
+
+static struct lan966x_port *lan966x_pcs_to_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct lan966x_port, phylink_pcs);
+}
+
+static void lan966x_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct lan966x_port *port = lan966x_pcs_to_port(pcs);
+
+ lan966x_port_status_get(port, state);
+}
+
+static int lan966x_pcs_config(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct lan966x_port *port = lan966x_pcs_to_port(pcs);
+ struct lan966x_port_config config;
+ int ret;
+
+ config = port->config;
+ config.portmode = interface;
+ config.inband = phylink_autoneg_inband(mode);
+ config.autoneg = phylink_test(advertising, Autoneg);
+ config.advertising = advertising;
+
+ ret = lan966x_port_pcs_set(port, &config);
+ if (ret)
+ netdev_err(port->dev, "port PCS config failed: %d\n", ret);
+
+ return ret;
+}
+
+static void lan966x_pcs_aneg_restart(struct phylink_pcs *pcs)
+{
+ /* Currently not used */
+}
+
+const struct phylink_mac_ops lan966x_phylink_mac_ops = {
+ .validate = phylink_generic_validate,
+ .mac_config = lan966x_phylink_mac_config,
+ .mac_prepare = lan966x_phylink_mac_prepare,
+ .mac_link_down = lan966x_phylink_mac_link_down,
+ .mac_link_up = lan966x_phylink_mac_link_up,
+};
+
+const struct phylink_pcs_ops lan966x_phylink_pcs_ops = {
+ .pcs_get_state = lan966x_pcs_get_state,
+ .pcs_config = lan966x_pcs_config,
+ .pcs_an_restart = lan966x_pcs_aneg_restart,
+};
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
new file mode 100644
index 000000000000..237555845a52
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/netdevice.h>
+#include <linux/phy/phy.h>
+
+#include "lan966x_main.h"
+
+/* Watermark encode */
+#define MULTIPLIER_BIT BIT(8)
+static u32 lan966x_wm_enc(u32 value)
+{
+ value /= LAN966X_BUFFER_CELL_SZ;
+
+ if (value >= MULTIPLIER_BIT) {
+ value /= 16;
+ if (value >= MULTIPLIER_BIT)
+ value = (MULTIPLIER_BIT - 1);
+
+ value |= MULTIPLIER_BIT;
+ }
+
+ return value;
+}
+
+static void lan966x_port_link_down(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val, delay = 0;
+
+ /* 0.5: Disable any AFI */
+ lan_rmw(AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(1) |
+ AFI_PORT_CFG_FRM_OUT_MAX_SET(0),
+ AFI_PORT_CFG_FC_SKIP_TTI_INJ |
+ AFI_PORT_CFG_FRM_OUT_MAX,
+ lan966x, AFI_PORT_CFG(port->chip_port));
+
+ /* wait for reg afi_port_frm_out to become 0 for the port */
+ while (true) {
+ val = lan_rd(lan966x, AFI_PORT_FRM_OUT(port->chip_port));
+ if (!AFI_PORT_FRM_OUT_FRM_OUT_CNT_GET(val))
+ break;
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+ delay++;
+ if (delay == 2000) {
+ pr_err("AFI timeout chip port %u", port->chip_port);
+ break;
+ }
+ }
+
+ delay = 0;
+
+ /* 1: Reset the PCS Rx clock domain */
+ lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(1),
+ DEV_CLOCK_CFG_PCS_RX_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ /* 2: Disable MAC frame reception */
+ lan_rmw(DEV_MAC_ENA_CFG_RX_ENA_SET(0),
+ DEV_MAC_ENA_CFG_RX_ENA,
+ lan966x, DEV_MAC_ENA_CFG(port->chip_port));
+
+ /* 3: Disable traffic being sent to or from switch port */
+ lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
+ QSYS_SW_PORT_MODE_PORT_ENA,
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ /* 4: Disable dequeuing from the egress queues */
+ lan_rmw(QSYS_PORT_MODE_DEQUEUE_DIS_SET(1),
+ QSYS_PORT_MODE_DEQUEUE_DIS,
+ lan966x, QSYS_PORT_MODE(port->chip_port));
+
+ /* 5: Disable Flowcontrol */
+ lan_rmw(SYS_PAUSE_CFG_PAUSE_ENA_SET(0),
+ SYS_PAUSE_CFG_PAUSE_ENA,
+ lan966x, SYS_PAUSE_CFG(port->chip_port));
+
+ /* 5.1: Disable PFC */
+ lan_rmw(QSYS_SW_PORT_MODE_TX_PFC_ENA_SET(0),
+ QSYS_SW_PORT_MODE_TX_PFC_ENA,
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ /* 6: Wait a worst case time 8ms (jumbo/10Mbit) */
+ usleep_range(8 * USEC_PER_MSEC, 9 * USEC_PER_MSEC);
+
+ /* 7: Disable HDX backpressure */
+ lan_rmw(SYS_FRONT_PORT_MODE_HDX_MODE_SET(0),
+ SYS_FRONT_PORT_MODE_HDX_MODE,
+ lan966x, SYS_FRONT_PORT_MODE(port->chip_port));
+
+ /* 8: Flush the queues accociated with the port */
+ lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(3),
+ QSYS_SW_PORT_MODE_AGING_MODE,
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ /* 9: Enable dequeuing from the egress queues */
+ lan_rmw(QSYS_PORT_MODE_DEQUEUE_DIS_SET(0),
+ QSYS_PORT_MODE_DEQUEUE_DIS,
+ lan966x, QSYS_PORT_MODE(port->chip_port));
+
+ /* 10: Wait until flushing is complete */
+ while (true) {
+ val = lan_rd(lan966x, QSYS_SW_STATUS(port->chip_port));
+ if (!QSYS_SW_STATUS_EQ_AVAIL_GET(val))
+ break;
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+ delay++;
+ if (delay == 2000) {
+ pr_err("Flush timeout chip port %u", port->chip_port);
+ break;
+ }
+ }
+
+ /* 11: Reset the Port and MAC clock domains */
+ lan_rmw(DEV_MAC_ENA_CFG_TX_ENA_SET(0),
+ DEV_MAC_ENA_CFG_TX_ENA,
+ lan966x, DEV_MAC_ENA_CFG(port->chip_port));
+
+ lan_rmw(DEV_CLOCK_CFG_PORT_RST_SET(1),
+ DEV_CLOCK_CFG_PORT_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ lan_rmw(DEV_CLOCK_CFG_MAC_TX_RST_SET(1) |
+ DEV_CLOCK_CFG_MAC_RX_RST_SET(1) |
+ DEV_CLOCK_CFG_PORT_RST_SET(1),
+ DEV_CLOCK_CFG_MAC_TX_RST |
+ DEV_CLOCK_CFG_MAC_RX_RST |
+ DEV_CLOCK_CFG_PORT_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ /* 12: Clear flushing */
+ lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(2),
+ QSYS_SW_PORT_MODE_AGING_MODE,
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ /* The port is disabled and flushed, now set up the port in the
+ * new operating mode
+ */
+}
+
+static void lan966x_port_link_up(struct lan966x_port *port)
+{
+ struct lan966x_port_config *config = &port->config;
+ struct lan966x *lan966x = port->lan966x;
+ int speed = 0, mode = 0;
+ int atop_wm = 0;
+
+ switch (config->speed) {
+ case SPEED_10:
+ speed = LAN966X_SPEED_10;
+ break;
+ case SPEED_100:
+ speed = LAN966X_SPEED_100;
+ break;
+ case SPEED_1000:
+ speed = LAN966X_SPEED_1000;
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
+ break;
+ case SPEED_2500:
+ speed = LAN966X_SPEED_2500;
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
+ break;
+ }
+
+ /* Also the GIGA_MODE_ENA(1) needs to be set regardless of the
+ * port speed for QSGMII ports.
+ */
+ if (config->portmode == PHY_INTERFACE_MODE_QSGMII)
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
+
+ lan_wr(config->duplex | mode,
+ lan966x, DEV_MAC_MODE_CFG(port->chip_port));
+
+ lan_rmw(DEV_MAC_IFG_CFG_TX_IFG_SET(config->duplex ? 6 : 5) |
+ DEV_MAC_IFG_CFG_RX_IFG1_SET(config->speed == SPEED_10 ? 2 : 1) |
+ DEV_MAC_IFG_CFG_RX_IFG2_SET(2),
+ DEV_MAC_IFG_CFG_TX_IFG |
+ DEV_MAC_IFG_CFG_RX_IFG1 |
+ DEV_MAC_IFG_CFG_RX_IFG2,
+ lan966x, DEV_MAC_IFG_CFG(port->chip_port));
+
+ lan_rmw(DEV_MAC_HDX_CFG_SEED_SET(4) |
+ DEV_MAC_HDX_CFG_SEED_LOAD_SET(1),
+ DEV_MAC_HDX_CFG_SEED |
+ DEV_MAC_HDX_CFG_SEED_LOAD,
+ lan966x, DEV_MAC_HDX_CFG(port->chip_port));
+
+ if (config->portmode == PHY_INTERFACE_MODE_GMII) {
+ if (config->speed == SPEED_1000)
+ lan_rmw(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(1),
+ CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA,
+ lan966x,
+ CHIP_TOP_CUPHY_PORT_CFG(port->chip_port));
+ else
+ lan_rmw(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(0),
+ CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA,
+ lan966x,
+ CHIP_TOP_CUPHY_PORT_CFG(port->chip_port));
+ }
+
+ /* No PFC */
+ lan_wr(ANA_PFC_CFG_FC_LINK_SPEED_SET(speed),
+ lan966x, ANA_PFC_CFG(port->chip_port));
+
+ lan_rmw(DEV_PCS1G_CFG_PCS_ENA_SET(1),
+ DEV_PCS1G_CFG_PCS_ENA,
+ lan966x, DEV_PCS1G_CFG(port->chip_port));
+
+ lan_rmw(DEV_PCS1G_SD_CFG_SD_ENA_SET(0),
+ DEV_PCS1G_SD_CFG_SD_ENA,
+ lan966x, DEV_PCS1G_SD_CFG(port->chip_port));
+
+ /* Set Pause WM hysteresis, start/stop are in 1518 byte units */
+ lan_wr(SYS_PAUSE_CFG_PAUSE_ENA_SET(1) |
+ SYS_PAUSE_CFG_PAUSE_STOP_SET(lan966x_wm_enc(4 * 1518)) |
+ SYS_PAUSE_CFG_PAUSE_START_SET(lan966x_wm_enc(6 * 1518)),
+ lan966x, SYS_PAUSE_CFG(port->chip_port));
+
+ /* Set SMAC of Pause frame (00:00:00:00:00:00) */
+ lan_wr(0, lan966x, DEV_FC_MAC_LOW_CFG(port->chip_port));
+ lan_wr(0, lan966x, DEV_FC_MAC_HIGH_CFG(port->chip_port));
+
+ /* Flow control */
+ lan_rmw(SYS_MAC_FC_CFG_FC_LINK_SPEED_SET(speed) |
+ SYS_MAC_FC_CFG_FC_LATENCY_CFG_SET(7) |
+ SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_SET(1) |
+ SYS_MAC_FC_CFG_PAUSE_VAL_CFG_SET(0xffff) |
+ SYS_MAC_FC_CFG_RX_FC_ENA_SET(config->pause & MLO_PAUSE_RX ? 1 : 0) |
+ SYS_MAC_FC_CFG_TX_FC_ENA_SET(config->pause & MLO_PAUSE_TX ? 1 : 0),
+ SYS_MAC_FC_CFG_FC_LINK_SPEED |
+ SYS_MAC_FC_CFG_FC_LATENCY_CFG |
+ SYS_MAC_FC_CFG_ZERO_PAUSE_ENA |
+ SYS_MAC_FC_CFG_PAUSE_VAL_CFG |
+ SYS_MAC_FC_CFG_RX_FC_ENA |
+ SYS_MAC_FC_CFG_TX_FC_ENA,
+ lan966x, SYS_MAC_FC_CFG(port->chip_port));
+
+ /* Tail dropping watermark */
+ atop_wm = lan966x->shared_queue_sz;
+
+ /* The total memory size is diveded by number of front ports plus CPU
+ * port
+ */
+ lan_wr(lan966x_wm_enc(atop_wm / lan966x->num_phys_ports + 1), lan966x,
+ SYS_ATOP(port->chip_port));
+ lan_wr(lan966x_wm_enc(atop_wm), lan966x, SYS_ATOP_TOT_CFG);
+
+ /* This needs to be at the end */
+ /* Enable MAC module */
+ lan_wr(DEV_MAC_ENA_CFG_RX_ENA_SET(1) |
+ DEV_MAC_ENA_CFG_TX_ENA_SET(1),
+ lan966x, DEV_MAC_ENA_CFG(port->chip_port));
+
+ /* Take out the clock from reset */
+ lan_wr(DEV_CLOCK_CFG_LINK_SPEED_SET(speed),
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ /* Core: Enable port for frame transfer */
+ lan_wr(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) |
+ QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) |
+ QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1),
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ lan_rmw(AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(0) |
+ AFI_PORT_CFG_FRM_OUT_MAX_SET(16),
+ AFI_PORT_CFG_FC_SKIP_TTI_INJ |
+ AFI_PORT_CFG_FRM_OUT_MAX,
+ lan966x, AFI_PORT_CFG(port->chip_port));
+}
+
+void lan966x_port_config_down(struct lan966x_port *port)
+{
+ lan966x_port_link_down(port);
+}
+
+void lan966x_port_config_up(struct lan966x_port *port)
+{
+ lan966x_port_link_up(port);
+}
+
+void lan966x_port_status_get(struct lan966x_port *port,
+ struct phylink_link_state *state)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool link_down;
+ u16 bmsr = 0;
+ u16 lp_adv;
+ u32 val;
+
+ val = lan_rd(lan966x, DEV_PCS1G_STICKY(port->chip_port));
+ link_down = DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(val);
+ if (link_down)
+ lan_wr(val, lan966x, DEV_PCS1G_STICKY(port->chip_port));
+
+ /* Get both current Link and Sync status */
+ val = lan_rd(lan966x, DEV_PCS1G_LINK_STATUS(port->chip_port));
+ state->link = DEV_PCS1G_LINK_STATUS_LINK_STATUS_GET(val) &&
+ DEV_PCS1G_LINK_STATUS_SYNC_STATUS_GET(val);
+ state->link &= !link_down;
+
+ /* Get PCS ANEG status register */
+ val = lan_rd(lan966x, DEV_PCS1G_ANEG_STATUS(port->chip_port));
+ /* Aneg complete provides more information */
+ if (DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(val)) {
+ state->an_complete = true;
+
+ bmsr |= state->link ? BMSR_LSTATUS : 0;
+ bmsr |= BMSR_ANEGCOMPLETE;
+
+ lp_adv = DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(val);
+ phylink_mii_c22_pcs_decode_state(state, bmsr, lp_adv);
+ } else {
+ if (!state->link)
+ return;
+
+ if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ state->speed = SPEED_1000;
+ else if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ state->speed = SPEED_2500;
+
+ state->duplex = DUPLEX_FULL;
+ }
+}
+
+int lan966x_port_pcs_set(struct lan966x_port *port,
+ struct lan966x_port_config *config)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool inband_aneg = false;
+ bool outband;
+
+ if (config->inband) {
+ if (config->portmode == PHY_INTERFACE_MODE_SGMII ||
+ config->portmode == PHY_INTERFACE_MODE_QSGMII)
+ inband_aneg = true; /* Cisco-SGMII in-band-aneg */
+ else if (config->portmode == PHY_INTERFACE_MODE_1000BASEX &&
+ config->autoneg)
+ inband_aneg = true; /* Clause-37 in-band-aneg */
+
+ outband = false;
+ } else {
+ outband = true;
+ }
+
+ /* Disable or enable inband */
+ lan_rmw(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(outband),
+ DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA,
+ lan966x, DEV_PCS1G_MODE_CFG(port->chip_port));
+
+ /* Enable PCS */
+ lan_wr(DEV_PCS1G_CFG_PCS_ENA_SET(1),
+ lan966x, DEV_PCS1G_CFG(port->chip_port));
+
+ if (inband_aneg) {
+ int adv = phylink_mii_c22_pcs_encode_advertisement(config->portmode,
+ config->advertising);
+ if (adv >= 0)
+ /* Enable in-band aneg */
+ lan_wr(DEV_PCS1G_ANEG_CFG_ADV_ABILITY_SET(adv) |
+ DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) |
+ DEV_PCS1G_ANEG_CFG_ENA_SET(1) |
+ DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_SET(1),
+ lan966x, DEV_PCS1G_ANEG_CFG(port->chip_port));
+ } else {
+ lan_wr(0, lan966x, DEV_PCS1G_ANEG_CFG(port->chip_port));
+ }
+
+ /* Take PCS out of reset */
+ lan_rmw(DEV_CLOCK_CFG_LINK_SPEED_SET(2) |
+ DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
+ DEV_CLOCK_CFG_PCS_TX_RST_SET(0),
+ DEV_CLOCK_CFG_LINK_SPEED |
+ DEV_CLOCK_CFG_PCS_RX_RST |
+ DEV_CLOCK_CFG_PCS_TX_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ port->config = *config;
+
+ return 0;
+}
+
+void lan966x_port_init(struct lan966x_port *port)
+{
+ struct lan966x_port_config *config = &port->config;
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(0),
+ ANA_PORT_CFG_LEARN_ENA,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+
+ lan966x_port_config_down(port);
+
+ if (config->portmode != PHY_INTERFACE_MODE_QSGMII)
+ return;
+
+ lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
+ DEV_CLOCK_CFG_PCS_TX_RST_SET(0) |
+ DEV_CLOCK_CFG_LINK_SPEED_SET(LAN966X_SPEED_1000),
+ DEV_CLOCK_CFG_PCS_RX_RST |
+ DEV_CLOCK_CFG_PCS_TX_RST |
+ DEV_CLOCK_CFG_LINK_SPEED,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
new file mode 100644
index 000000000000..797560172aca
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
@@ -0,0 +1,871 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+
+/* This file is autogenerated by cml-utils 2021-10-10 13:25:08 +0200.
+ * Commit ID: 26db2002924973d36a30b369c94f025a678fe9ea (dirty)
+ */
+
+#ifndef _LAN966X_REGS_H_
+#define _LAN966X_REGS_H_
+
+#include <linux/bitfield.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+
+enum lan966x_target {
+ TARGET_AFI = 2,
+ TARGET_ANA = 3,
+ TARGET_CHIP_TOP = 5,
+ TARGET_CPU = 6,
+ TARGET_DEV = 13,
+ TARGET_GCB = 27,
+ TARGET_ORG = 36,
+ TARGET_QS = 42,
+ TARGET_QSYS = 46,
+ TARGET_REW = 47,
+ TARGET_SYS = 52,
+ NUM_TARGETS = 66
+};
+
+#define __REG(...) __VA_ARGS__
+
+/* AFI:PORT_TBL:PORT_FRM_OUT */
+#define AFI_PORT_FRM_OUT(g) __REG(TARGET_AFI, 0, 1, 98816, g, 10, 8, 0, 0, 1, 4)
+
+#define AFI_PORT_FRM_OUT_FRM_OUT_CNT GENMASK(26, 16)
+#define AFI_PORT_FRM_OUT_FRM_OUT_CNT_SET(x)\
+ FIELD_PREP(AFI_PORT_FRM_OUT_FRM_OUT_CNT, x)
+#define AFI_PORT_FRM_OUT_FRM_OUT_CNT_GET(x)\
+ FIELD_GET(AFI_PORT_FRM_OUT_FRM_OUT_CNT, x)
+
+/* AFI:PORT_TBL:PORT_CFG */
+#define AFI_PORT_CFG(g) __REG(TARGET_AFI, 0, 1, 98816, g, 10, 8, 4, 0, 1, 4)
+
+#define AFI_PORT_CFG_FC_SKIP_TTI_INJ BIT(16)
+#define AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(x)\
+ FIELD_PREP(AFI_PORT_CFG_FC_SKIP_TTI_INJ, x)
+#define AFI_PORT_CFG_FC_SKIP_TTI_INJ_GET(x)\
+ FIELD_GET(AFI_PORT_CFG_FC_SKIP_TTI_INJ, x)
+
+#define AFI_PORT_CFG_FRM_OUT_MAX GENMASK(9, 0)
+#define AFI_PORT_CFG_FRM_OUT_MAX_SET(x)\
+ FIELD_PREP(AFI_PORT_CFG_FRM_OUT_MAX, x)
+#define AFI_PORT_CFG_FRM_OUT_MAX_GET(x)\
+ FIELD_GET(AFI_PORT_CFG_FRM_OUT_MAX, x)
+
+/* ANA:ANA:ADVLEARN */
+#define ANA_ADVLEARN __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 0, 0, 1, 4)
+
+#define ANA_ADVLEARN_VLAN_CHK BIT(0)
+#define ANA_ADVLEARN_VLAN_CHK_SET(x)\
+ FIELD_PREP(ANA_ADVLEARN_VLAN_CHK, x)
+#define ANA_ADVLEARN_VLAN_CHK_GET(x)\
+ FIELD_GET(ANA_ADVLEARN_VLAN_CHK, x)
+
+/* ANA:ANA:VLANMASK */
+#define ANA_VLANMASK __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 8, 0, 1, 4)
+
+/* ANA:ANA:ANAINTR */
+#define ANA_ANAINTR __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 16, 0, 1, 4)
+
+#define ANA_ANAINTR_INTR BIT(1)
+#define ANA_ANAINTR_INTR_SET(x)\
+ FIELD_PREP(ANA_ANAINTR_INTR, x)
+#define ANA_ANAINTR_INTR_GET(x)\
+ FIELD_GET(ANA_ANAINTR_INTR, x)
+
+#define ANA_ANAINTR_INTR_ENA BIT(0)
+#define ANA_ANAINTR_INTR_ENA_SET(x)\
+ FIELD_PREP(ANA_ANAINTR_INTR_ENA, x)
+#define ANA_ANAINTR_INTR_ENA_GET(x)\
+ FIELD_GET(ANA_ANAINTR_INTR_ENA, x)
+
+/* ANA:ANA:AUTOAGE */
+#define ANA_AUTOAGE __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 44, 0, 1, 4)
+
+#define ANA_AUTOAGE_AGE_PERIOD GENMASK(20, 1)
+#define ANA_AUTOAGE_AGE_PERIOD_SET(x)\
+ FIELD_PREP(ANA_AUTOAGE_AGE_PERIOD, x)
+#define ANA_AUTOAGE_AGE_PERIOD_GET(x)\
+ FIELD_GET(ANA_AUTOAGE_AGE_PERIOD, x)
+
+/* ANA:ANA:FLOODING */
+#define ANA_FLOODING(r) __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 68, r, 8, 4)
+
+#define ANA_FLOODING_FLD_UNICAST GENMASK(17, 12)
+#define ANA_FLOODING_FLD_UNICAST_SET(x)\
+ FIELD_PREP(ANA_FLOODING_FLD_UNICAST, x)
+#define ANA_FLOODING_FLD_UNICAST_GET(x)\
+ FIELD_GET(ANA_FLOODING_FLD_UNICAST, x)
+
+#define ANA_FLOODING_FLD_BROADCAST GENMASK(11, 6)
+#define ANA_FLOODING_FLD_BROADCAST_SET(x)\
+ FIELD_PREP(ANA_FLOODING_FLD_BROADCAST, x)
+#define ANA_FLOODING_FLD_BROADCAST_GET(x)\
+ FIELD_GET(ANA_FLOODING_FLD_BROADCAST, x)
+
+#define ANA_FLOODING_FLD_MULTICAST GENMASK(5, 0)
+#define ANA_FLOODING_FLD_MULTICAST_SET(x)\
+ FIELD_PREP(ANA_FLOODING_FLD_MULTICAST, x)
+#define ANA_FLOODING_FLD_MULTICAST_GET(x)\
+ FIELD_GET(ANA_FLOODING_FLD_MULTICAST, x)
+
+/* ANA:ANA:FLOODING_IPMC */
+#define ANA_FLOODING_IPMC __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 100, 0, 1, 4)
+
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL GENMASK(23, 18)
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(x)\
+ FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC4_CTRL, x)
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_GET(x)\
+ FIELD_GET(ANA_FLOODING_IPMC_FLD_MC4_CTRL, x)
+
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA GENMASK(17, 12)
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(x)\
+ FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC4_DATA, x)
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA_GET(x)\
+ FIELD_GET(ANA_FLOODING_IPMC_FLD_MC4_DATA, x)
+
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL GENMASK(11, 6)
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(x)\
+ FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC6_CTRL, x)
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_GET(x)\
+ FIELD_GET(ANA_FLOODING_IPMC_FLD_MC6_CTRL, x)
+
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA GENMASK(5, 0)
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(x)\
+ FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC6_DATA, x)
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA_GET(x)\
+ FIELD_GET(ANA_FLOODING_IPMC_FLD_MC6_DATA, x)
+
+/* ANA:PGID:PGID */
+#define ANA_PGID(g) __REG(TARGET_ANA, 0, 1, 27648, g, 89, 8, 0, 0, 1, 4)
+
+#define ANA_PGID_PGID GENMASK(8, 0)
+#define ANA_PGID_PGID_SET(x)\
+ FIELD_PREP(ANA_PGID_PGID, x)
+#define ANA_PGID_PGID_GET(x)\
+ FIELD_GET(ANA_PGID_PGID, x)
+
+/* ANA:PGID:PGID_CFG */
+#define ANA_PGID_CFG(g) __REG(TARGET_ANA, 0, 1, 27648, g, 89, 8, 4, 0, 1, 4)
+
+#define ANA_PGID_CFG_OBEY_VLAN BIT(0)
+#define ANA_PGID_CFG_OBEY_VLAN_SET(x)\
+ FIELD_PREP(ANA_PGID_CFG_OBEY_VLAN, x)
+#define ANA_PGID_CFG_OBEY_VLAN_GET(x)\
+ FIELD_GET(ANA_PGID_CFG_OBEY_VLAN, x)
+
+/* ANA:ANA_TABLES:MACHDATA */
+#define ANA_MACHDATA __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 40, 0, 1, 4)
+
+/* ANA:ANA_TABLES:MACLDATA */
+#define ANA_MACLDATA __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 44, 0, 1, 4)
+
+/* ANA:ANA_TABLES:MACACCESS */
+#define ANA_MACACCESS __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 48, 0, 1, 4)
+
+#define ANA_MACACCESS_CHANGE2SW BIT(17)
+#define ANA_MACACCESS_CHANGE2SW_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_CHANGE2SW, x)
+#define ANA_MACACCESS_CHANGE2SW_GET(x)\
+ FIELD_GET(ANA_MACACCESS_CHANGE2SW, x)
+
+#define ANA_MACACCESS_MAC_CPU_COPY BIT(16)
+#define ANA_MACACCESS_MAC_CPU_COPY_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_MAC_CPU_COPY, x)
+#define ANA_MACACCESS_MAC_CPU_COPY_GET(x)\
+ FIELD_GET(ANA_MACACCESS_MAC_CPU_COPY, x)
+
+#define ANA_MACACCESS_VALID BIT(12)
+#define ANA_MACACCESS_VALID_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_VALID, x)
+#define ANA_MACACCESS_VALID_GET(x)\
+ FIELD_GET(ANA_MACACCESS_VALID, x)
+
+#define ANA_MACACCESS_ENTRYTYPE GENMASK(11, 10)
+#define ANA_MACACCESS_ENTRYTYPE_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_ENTRYTYPE, x)
+#define ANA_MACACCESS_ENTRYTYPE_GET(x)\
+ FIELD_GET(ANA_MACACCESS_ENTRYTYPE, x)
+
+#define ANA_MACACCESS_DEST_IDX GENMASK(9, 4)
+#define ANA_MACACCESS_DEST_IDX_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_DEST_IDX, x)
+#define ANA_MACACCESS_DEST_IDX_GET(x)\
+ FIELD_GET(ANA_MACACCESS_DEST_IDX, x)
+
+#define ANA_MACACCESS_MAC_TABLE_CMD GENMASK(3, 0)
+#define ANA_MACACCESS_MAC_TABLE_CMD_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_MAC_TABLE_CMD, x)
+#define ANA_MACACCESS_MAC_TABLE_CMD_GET(x)\
+ FIELD_GET(ANA_MACACCESS_MAC_TABLE_CMD, x)
+
+/* ANA:ANA_TABLES:MACTINDX */
+#define ANA_MACTINDX __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 52, 0, 1, 4)
+
+#define ANA_MACTINDX_BUCKET GENMASK(12, 11)
+#define ANA_MACTINDX_BUCKET_SET(x)\
+ FIELD_PREP(ANA_MACTINDX_BUCKET, x)
+#define ANA_MACTINDX_BUCKET_GET(x)\
+ FIELD_GET(ANA_MACTINDX_BUCKET, x)
+
+#define ANA_MACTINDX_M_INDEX GENMASK(10, 0)
+#define ANA_MACTINDX_M_INDEX_SET(x)\
+ FIELD_PREP(ANA_MACTINDX_M_INDEX, x)
+#define ANA_MACTINDX_M_INDEX_GET(x)\
+ FIELD_GET(ANA_MACTINDX_M_INDEX, x)
+
+/* ANA:ANA_TABLES:VLAN_PORT_MASK */
+#define ANA_VLAN_PORT_MASK __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 56, 0, 1, 4)
+
+#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK GENMASK(8, 0)
+#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_SET(x)\
+ FIELD_PREP(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, x)
+#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_GET(x)\
+ FIELD_GET(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, x)
+
+/* ANA:ANA_TABLES:VLANACCESS */
+#define ANA_VLANACCESS __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 60, 0, 1, 4)
+
+#define ANA_VLANACCESS_VLAN_TBL_CMD GENMASK(1, 0)
+#define ANA_VLANACCESS_VLAN_TBL_CMD_SET(x)\
+ FIELD_PREP(ANA_VLANACCESS_VLAN_TBL_CMD, x)
+#define ANA_VLANACCESS_VLAN_TBL_CMD_GET(x)\
+ FIELD_GET(ANA_VLANACCESS_VLAN_TBL_CMD, x)
+
+/* ANA:ANA_TABLES:VLANTIDX */
+#define ANA_VLANTIDX __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 64, 0, 1, 4)
+
+#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS BIT(18)
+#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS_SET(x)\
+ FIELD_PREP(ANA_VLANTIDX_VLAN_PGID_CPU_DIS, x)
+#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS_GET(x)\
+ FIELD_GET(ANA_VLANTIDX_VLAN_PGID_CPU_DIS, x)
+
+#define ANA_VLANTIDX_V_INDEX GENMASK(11, 0)
+#define ANA_VLANTIDX_V_INDEX_SET(x)\
+ FIELD_PREP(ANA_VLANTIDX_V_INDEX, x)
+#define ANA_VLANTIDX_V_INDEX_GET(x)\
+ FIELD_GET(ANA_VLANTIDX_V_INDEX, x)
+
+/* ANA:PORT:VLAN_CFG */
+#define ANA_VLAN_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 0, 0, 1, 4)
+
+#define ANA_VLAN_CFG_VLAN_AWARE_ENA BIT(20)
+#define ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(x)\
+ FIELD_PREP(ANA_VLAN_CFG_VLAN_AWARE_ENA, x)
+#define ANA_VLAN_CFG_VLAN_AWARE_ENA_GET(x)\
+ FIELD_GET(ANA_VLAN_CFG_VLAN_AWARE_ENA, x)
+
+#define ANA_VLAN_CFG_VLAN_POP_CNT GENMASK(19, 18)
+#define ANA_VLAN_CFG_VLAN_POP_CNT_SET(x)\
+ FIELD_PREP(ANA_VLAN_CFG_VLAN_POP_CNT, x)
+#define ANA_VLAN_CFG_VLAN_POP_CNT_GET(x)\
+ FIELD_GET(ANA_VLAN_CFG_VLAN_POP_CNT, x)
+
+#define ANA_VLAN_CFG_VLAN_VID GENMASK(11, 0)
+#define ANA_VLAN_CFG_VLAN_VID_SET(x)\
+ FIELD_PREP(ANA_VLAN_CFG_VLAN_VID, x)
+#define ANA_VLAN_CFG_VLAN_VID_GET(x)\
+ FIELD_GET(ANA_VLAN_CFG_VLAN_VID, x)
+
+/* ANA:PORT:DROP_CFG */
+#define ANA_DROP_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 4, 0, 1, 4)
+
+#define ANA_DROP_CFG_DROP_UNTAGGED_ENA BIT(6)
+#define ANA_DROP_CFG_DROP_UNTAGGED_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_UNTAGGED_ENA, x)
+#define ANA_DROP_CFG_DROP_UNTAGGED_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_UNTAGGED_ENA, x)
+
+#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA BIT(3)
+#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA, x)
+#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA, x)
+
+#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA BIT(2)
+#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, x)
+#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, x)
+
+#define ANA_DROP_CFG_DROP_MC_SMAC_ENA BIT(0)
+#define ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x)
+#define ANA_DROP_CFG_DROP_MC_SMAC_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x)
+
+/* ANA:PORT:CPU_FWD_CFG */
+#define ANA_CPU_FWD_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 96, 0, 1, 4)
+
+#define ANA_CPU_FWD_CFG_SRC_COPY_ENA BIT(3)
+#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_SET(x)\
+ FIELD_PREP(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x)
+#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_GET(x)\
+ FIELD_GET(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x)
+
+/* ANA:PORT:CPU_FWD_BPDU_CFG */
+#define ANA_CPU_FWD_BPDU_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 100, 0, 1, 4)
+
+/* ANA:PORT:PORT_CFG */
+#define ANA_PORT_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 112, 0, 1, 4)
+
+#define ANA_PORT_CFG_LEARNAUTO BIT(6)
+#define ANA_PORT_CFG_LEARNAUTO_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_LEARNAUTO, x)
+#define ANA_PORT_CFG_LEARNAUTO_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_LEARNAUTO, x)
+
+#define ANA_PORT_CFG_LEARN_ENA BIT(5)
+#define ANA_PORT_CFG_LEARN_ENA_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_LEARN_ENA, x)
+#define ANA_PORT_CFG_LEARN_ENA_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_LEARN_ENA, x)
+
+#define ANA_PORT_CFG_RECV_ENA BIT(4)
+#define ANA_PORT_CFG_RECV_ENA_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_RECV_ENA, x)
+#define ANA_PORT_CFG_RECV_ENA_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_RECV_ENA, x)
+
+#define ANA_PORT_CFG_PORTID_VAL GENMASK(3, 0)
+#define ANA_PORT_CFG_PORTID_VAL_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_PORTID_VAL, x)
+#define ANA_PORT_CFG_PORTID_VAL_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_PORTID_VAL, x)
+
+/* ANA:PFC:PFC_CFG */
+#define ANA_PFC_CFG(g) __REG(TARGET_ANA, 0, 1, 30720, g, 8, 64, 0, 0, 1, 4)
+
+#define ANA_PFC_CFG_FC_LINK_SPEED GENMASK(1, 0)
+#define ANA_PFC_CFG_FC_LINK_SPEED_SET(x)\
+ FIELD_PREP(ANA_PFC_CFG_FC_LINK_SPEED, x)
+#define ANA_PFC_CFG_FC_LINK_SPEED_GET(x)\
+ FIELD_GET(ANA_PFC_CFG_FC_LINK_SPEED, x)
+
+/* CHIP_TOP:CUPHY_CFG:CUPHY_PORT_CFG */
+#define CHIP_TOP_CUPHY_PORT_CFG(r) __REG(TARGET_CHIP_TOP, 0, 1, 16, 0, 1, 20, 8, r, 2, 4)
+
+#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA BIT(0)
+#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(x)\
+ FIELD_PREP(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, x)
+#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_GET(x)\
+ FIELD_GET(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, x)
+
+/* DEV:PORT_MODE:CLOCK_CFG */
+#define DEV_CLOCK_CFG(t) __REG(TARGET_DEV, t, 8, 0, 0, 1, 28, 0, 0, 1, 4)
+
+#define DEV_CLOCK_CFG_MAC_TX_RST BIT(7)
+#define DEV_CLOCK_CFG_MAC_TX_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_MAC_TX_RST, x)
+#define DEV_CLOCK_CFG_MAC_TX_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_MAC_TX_RST, x)
+
+#define DEV_CLOCK_CFG_MAC_RX_RST BIT(6)
+#define DEV_CLOCK_CFG_MAC_RX_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_MAC_RX_RST, x)
+#define DEV_CLOCK_CFG_MAC_RX_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_MAC_RX_RST, x)
+
+#define DEV_CLOCK_CFG_PCS_TX_RST BIT(5)
+#define DEV_CLOCK_CFG_PCS_TX_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_PCS_TX_RST, x)
+#define DEV_CLOCK_CFG_PCS_TX_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_PCS_TX_RST, x)
+
+#define DEV_CLOCK_CFG_PCS_RX_RST BIT(4)
+#define DEV_CLOCK_CFG_PCS_RX_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_PCS_RX_RST, x)
+#define DEV_CLOCK_CFG_PCS_RX_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_PCS_RX_RST, x)
+
+#define DEV_CLOCK_CFG_PORT_RST BIT(3)
+#define DEV_CLOCK_CFG_PORT_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_PORT_RST, x)
+#define DEV_CLOCK_CFG_PORT_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_PORT_RST, x)
+
+#define DEV_CLOCK_CFG_LINK_SPEED GENMASK(1, 0)
+#define DEV_CLOCK_CFG_LINK_SPEED_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_LINK_SPEED, x)
+#define DEV_CLOCK_CFG_LINK_SPEED_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_LINK_SPEED, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV_MAC_ENA_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 0, 0, 1, 4)
+
+#define DEV_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEV_MAC_ENA_CFG_RX_ENA_SET(x)\
+ FIELD_PREP(DEV_MAC_ENA_CFG_RX_ENA, x)
+#define DEV_MAC_ENA_CFG_RX_ENA_GET(x)\
+ FIELD_GET(DEV_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEV_MAC_ENA_CFG_TX_ENA BIT(0)
+#define DEV_MAC_ENA_CFG_TX_ENA_SET(x)\
+ FIELD_PREP(DEV_MAC_ENA_CFG_TX_ENA, x)
+#define DEV_MAC_ENA_CFG_TX_ENA_GET(x)\
+ FIELD_GET(DEV_MAC_ENA_CFG_TX_ENA, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_MODE_CFG */
+#define DEV_MAC_MODE_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 4, 0, 1, 4)
+
+#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4)
+#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV_MAC_MODE_CFG_GIGA_MODE_ENA, x)
+#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA_GET(x)\
+ FIELD_GET(DEV_MAC_MODE_CFG_GIGA_MODE_ENA, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 8, 0, 1, 4)
+
+#define DEV_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0)
+#define DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
+ FIELD_PREP(DEV_MAC_MAXLEN_CFG_MAX_LEN, x)
+#define DEV_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
+ FIELD_GET(DEV_MAC_MAXLEN_CFG_MAX_LEN, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_IFG_CFG */
+#define DEV_MAC_IFG_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 20, 0, 1, 4)
+
+#define DEV_MAC_IFG_CFG_TX_IFG GENMASK(12, 8)
+#define DEV_MAC_IFG_CFG_TX_IFG_SET(x)\
+ FIELD_PREP(DEV_MAC_IFG_CFG_TX_IFG, x)
+#define DEV_MAC_IFG_CFG_TX_IFG_GET(x)\
+ FIELD_GET(DEV_MAC_IFG_CFG_TX_IFG, x)
+
+#define DEV_MAC_IFG_CFG_RX_IFG2 GENMASK(7, 4)
+#define DEV_MAC_IFG_CFG_RX_IFG2_SET(x)\
+ FIELD_PREP(DEV_MAC_IFG_CFG_RX_IFG2, x)
+#define DEV_MAC_IFG_CFG_RX_IFG2_GET(x)\
+ FIELD_GET(DEV_MAC_IFG_CFG_RX_IFG2, x)
+
+#define DEV_MAC_IFG_CFG_RX_IFG1 GENMASK(3, 0)
+#define DEV_MAC_IFG_CFG_RX_IFG1_SET(x)\
+ FIELD_PREP(DEV_MAC_IFG_CFG_RX_IFG1, x)
+#define DEV_MAC_IFG_CFG_RX_IFG1_GET(x)\
+ FIELD_GET(DEV_MAC_IFG_CFG_RX_IFG1, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_HDX_CFG */
+#define DEV_MAC_HDX_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 24, 0, 1, 4)
+
+#define DEV_MAC_HDX_CFG_SEED GENMASK(23, 16)
+#define DEV_MAC_HDX_CFG_SEED_SET(x)\
+ FIELD_PREP(DEV_MAC_HDX_CFG_SEED, x)
+#define DEV_MAC_HDX_CFG_SEED_GET(x)\
+ FIELD_GET(DEV_MAC_HDX_CFG_SEED, x)
+
+#define DEV_MAC_HDX_CFG_SEED_LOAD BIT(12)
+#define DEV_MAC_HDX_CFG_SEED_LOAD_SET(x)\
+ FIELD_PREP(DEV_MAC_HDX_CFG_SEED_LOAD, x)
+#define DEV_MAC_HDX_CFG_SEED_LOAD_GET(x)\
+ FIELD_GET(DEV_MAC_HDX_CFG_SEED_LOAD, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_FC_MAC_LOW_CFG */
+#define DEV_FC_MAC_LOW_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 32, 0, 1, 4)
+
+/* DEV:MAC_CFG_STATUS:MAC_FC_MAC_HIGH_CFG */
+#define DEV_FC_MAC_HIGH_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 36, 0, 1, 4)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_CFG */
+#define DEV_PCS1G_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 0, 0, 1, 4)
+
+#define DEV_PCS1G_CFG_PCS_ENA BIT(0)
+#define DEV_PCS1G_CFG_PCS_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_CFG_PCS_ENA, x)
+#define DEV_PCS1G_CFG_PCS_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_CFG_PCS_ENA, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */
+#define DEV_PCS1G_MODE_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 4, 0, 1, 4)
+
+#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0)
+#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
+#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_SD_CFG */
+#define DEV_PCS1G_SD_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 8, 0, 1, 4)
+
+#define DEV_PCS1G_SD_CFG_SD_ENA BIT(0)
+#define DEV_PCS1G_SD_CFG_SD_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_SD_CFG_SD_ENA, x)
+#define DEV_PCS1G_SD_CFG_SD_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_SD_CFG_SD_ENA, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */
+#define DEV_PCS1G_ANEG_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 12, 0, 1, 4)
+
+#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY GENMASK(31, 16)
+#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_CFG_ADV_ABILITY, x)
+#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_CFG_ADV_ABILITY, x)
+
+#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA BIT(8)
+#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x)
+#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x)
+
+#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT BIT(1)
+#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT, x)
+#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT, x)
+
+#define DEV_PCS1G_ANEG_CFG_ENA BIT(0)
+#define DEV_PCS1G_ANEG_CFG_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_CFG_ENA, x)
+#define DEV_PCS1G_ANEG_CFG_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_CFG_ENA, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */
+#define DEV_PCS1G_ANEG_STATUS(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 32, 0, 1, 4)
+
+#define DEV_PCS1G_ANEG_STATUS_LP_ADV GENMASK(31, 16)
+#define DEV_PCS1G_ANEG_STATUS_LP_ADV_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_STATUS_LP_ADV, x)
+#define DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_STATUS_LP_ADV, x)
+
+#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0)
+#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
+#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */
+#define DEV_PCS1G_LINK_STATUS(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 40, 0, 1, 4)
+
+#define DEV_PCS1G_LINK_STATUS_LINK_STATUS BIT(4)
+#define DEV_PCS1G_LINK_STATUS_LINK_STATUS_SET(x)\
+ FIELD_PREP(DEV_PCS1G_LINK_STATUS_LINK_STATUS, x)
+#define DEV_PCS1G_LINK_STATUS_LINK_STATUS_GET(x)\
+ FIELD_GET(DEV_PCS1G_LINK_STATUS_LINK_STATUS, x)
+
+#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS BIT(0)
+#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS_SET(x)\
+ FIELD_PREP(DEV_PCS1G_LINK_STATUS_SYNC_STATUS, x)
+#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS_GET(x)\
+ FIELD_GET(DEV_PCS1G_LINK_STATUS_SYNC_STATUS, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_STICKY */
+#define DEV_PCS1G_STICKY(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 48, 0, 1, 4)
+
+#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY BIT(4)
+#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_SET(x)\
+ FIELD_PREP(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\
+ FIELD_GET(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+
+/* DEVCPU_QS:XTR:XTR_GRP_CFG */
+#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 0, r, 2, 4)
+
+#define QS_XTR_GRP_CFG_MODE GENMASK(3, 2)
+#define QS_XTR_GRP_CFG_MODE_SET(x)\
+ FIELD_PREP(QS_XTR_GRP_CFG_MODE, x)
+#define QS_XTR_GRP_CFG_MODE_GET(x)\
+ FIELD_GET(QS_XTR_GRP_CFG_MODE, x)
+
+#define QS_XTR_GRP_CFG_BYTE_SWAP BIT(0)
+#define QS_XTR_GRP_CFG_BYTE_SWAP_SET(x)\
+ FIELD_PREP(QS_XTR_GRP_CFG_BYTE_SWAP, x)
+#define QS_XTR_GRP_CFG_BYTE_SWAP_GET(x)\
+ FIELD_GET(QS_XTR_GRP_CFG_BYTE_SWAP, x)
+
+/* DEVCPU_QS:XTR:XTR_RD */
+#define QS_XTR_RD(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 8, r, 2, 4)
+
+/* DEVCPU_QS:XTR:XTR_FLUSH */
+#define QS_XTR_FLUSH __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 24, 0, 1, 4)
+
+/* DEVCPU_QS:XTR:XTR_DATA_PRESENT */
+#define QS_XTR_DATA_PRESENT __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 28, 0, 1, 4)
+
+/* DEVCPU_QS:INJ:INJ_GRP_CFG */
+#define QS_INJ_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 0, r, 2, 4)
+
+#define QS_INJ_GRP_CFG_MODE GENMASK(3, 2)
+#define QS_INJ_GRP_CFG_MODE_SET(x)\
+ FIELD_PREP(QS_INJ_GRP_CFG_MODE, x)
+#define QS_INJ_GRP_CFG_MODE_GET(x)\
+ FIELD_GET(QS_INJ_GRP_CFG_MODE, x)
+
+#define QS_INJ_GRP_CFG_BYTE_SWAP BIT(0)
+#define QS_INJ_GRP_CFG_BYTE_SWAP_SET(x)\
+ FIELD_PREP(QS_INJ_GRP_CFG_BYTE_SWAP, x)
+#define QS_INJ_GRP_CFG_BYTE_SWAP_GET(x)\
+ FIELD_GET(QS_INJ_GRP_CFG_BYTE_SWAP, x)
+
+/* DEVCPU_QS:INJ:INJ_WR */
+#define QS_INJ_WR(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 8, r, 2, 4)
+
+/* DEVCPU_QS:INJ:INJ_CTRL */
+#define QS_INJ_CTRL(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 16, r, 2, 4)
+
+#define QS_INJ_CTRL_GAP_SIZE GENMASK(24, 21)
+#define QS_INJ_CTRL_GAP_SIZE_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_GAP_SIZE, x)
+#define QS_INJ_CTRL_GAP_SIZE_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_GAP_SIZE, x)
+
+#define QS_INJ_CTRL_EOF BIT(19)
+#define QS_INJ_CTRL_EOF_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_EOF, x)
+#define QS_INJ_CTRL_EOF_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_EOF, x)
+
+#define QS_INJ_CTRL_SOF BIT(18)
+#define QS_INJ_CTRL_SOF_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_SOF, x)
+#define QS_INJ_CTRL_SOF_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_SOF, x)
+
+#define QS_INJ_CTRL_VLD_BYTES GENMASK(17, 16)
+#define QS_INJ_CTRL_VLD_BYTES_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_VLD_BYTES, x)
+#define QS_INJ_CTRL_VLD_BYTES_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_VLD_BYTES, x)
+
+/* DEVCPU_QS:INJ:INJ_STATUS */
+#define QS_INJ_STATUS __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 24, 0, 1, 4)
+
+#define QS_INJ_STATUS_WMARK_REACHED GENMASK(5, 4)
+#define QS_INJ_STATUS_WMARK_REACHED_SET(x)\
+ FIELD_PREP(QS_INJ_STATUS_WMARK_REACHED, x)
+#define QS_INJ_STATUS_WMARK_REACHED_GET(x)\
+ FIELD_GET(QS_INJ_STATUS_WMARK_REACHED, x)
+
+#define QS_INJ_STATUS_FIFO_RDY GENMASK(3, 2)
+#define QS_INJ_STATUS_FIFO_RDY_SET(x)\
+ FIELD_PREP(QS_INJ_STATUS_FIFO_RDY, x)
+#define QS_INJ_STATUS_FIFO_RDY_GET(x)\
+ FIELD_GET(QS_INJ_STATUS_FIFO_RDY, x)
+
+/* QSYS:SYSTEM:PORT_MODE */
+#define QSYS_PORT_MODE(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 0, r, 10, 4)
+
+#define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1)
+#define QSYS_PORT_MODE_DEQUEUE_DIS_SET(x)\
+ FIELD_PREP(QSYS_PORT_MODE_DEQUEUE_DIS, x)
+#define QSYS_PORT_MODE_DEQUEUE_DIS_GET(x)\
+ FIELD_GET(QSYS_PORT_MODE_DEQUEUE_DIS, x)
+
+/* QSYS:SYSTEM:SWITCH_PORT_MODE */
+#define QSYS_SW_PORT_MODE(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 80, r, 9, 4)
+
+#define QSYS_SW_PORT_MODE_PORT_ENA BIT(18)
+#define QSYS_SW_PORT_MODE_PORT_ENA_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_PORT_ENA, x)
+#define QSYS_SW_PORT_MODE_PORT_ENA_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_PORT_ENA, x)
+
+#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG GENMASK(16, 14)
+#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_SCH_NEXT_CFG, x)
+#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_SCH_NEXT_CFG, x)
+
+#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE BIT(12)
+#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_INGRESS_DROP_MODE, x)
+#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_INGRESS_DROP_MODE, x)
+
+#define QSYS_SW_PORT_MODE_TX_PFC_ENA GENMASK(11, 4)
+#define QSYS_SW_PORT_MODE_TX_PFC_ENA_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_TX_PFC_ENA, x)
+#define QSYS_SW_PORT_MODE_TX_PFC_ENA_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_TX_PFC_ENA, x)
+
+#define QSYS_SW_PORT_MODE_AGING_MODE GENMASK(1, 0)
+#define QSYS_SW_PORT_MODE_AGING_MODE_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_AGING_MODE, x)
+#define QSYS_SW_PORT_MODE_AGING_MODE_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_AGING_MODE, x)
+
+/* QSYS:SYSTEM:SW_STATUS */
+#define QSYS_SW_STATUS(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 164, r, 9, 4)
+
+#define QSYS_SW_STATUS_EQ_AVAIL GENMASK(7, 0)
+#define QSYS_SW_STATUS_EQ_AVAIL_SET(x)\
+ FIELD_PREP(QSYS_SW_STATUS_EQ_AVAIL, x)
+#define QSYS_SW_STATUS_EQ_AVAIL_GET(x)\
+ FIELD_GET(QSYS_SW_STATUS_EQ_AVAIL, x)
+
+/* QSYS:SYSTEM:CPU_GROUP_MAP */
+#define QSYS_CPU_GROUP_MAP __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 204, 0, 1, 4)
+
+/* QSYS:RES_CTRL:RES_CFG */
+#define QSYS_RES_CFG(g) __REG(TARGET_QSYS, 0, 1, 32768, g, 1024, 8, 0, 0, 1, 4)
+
+/* REW:PORT:PORT_VLAN_CFG */
+#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 0, 0, 1, 4)
+
+#define REW_PORT_VLAN_CFG_PORT_TPID GENMASK(31, 16)
+#define REW_PORT_VLAN_CFG_PORT_TPID_SET(x)\
+ FIELD_PREP(REW_PORT_VLAN_CFG_PORT_TPID, x)
+#define REW_PORT_VLAN_CFG_PORT_TPID_GET(x)\
+ FIELD_GET(REW_PORT_VLAN_CFG_PORT_TPID, x)
+
+#define REW_PORT_VLAN_CFG_PORT_VID GENMASK(11, 0)
+#define REW_PORT_VLAN_CFG_PORT_VID_SET(x)\
+ FIELD_PREP(REW_PORT_VLAN_CFG_PORT_VID, x)
+#define REW_PORT_VLAN_CFG_PORT_VID_GET(x)\
+ FIELD_GET(REW_PORT_VLAN_CFG_PORT_VID, x)
+
+/* REW:PORT:TAG_CFG */
+#define REW_TAG_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 4, 0, 1, 4)
+
+#define REW_TAG_CFG_TAG_CFG GENMASK(8, 7)
+#define REW_TAG_CFG_TAG_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CFG_TAG_CFG, x)
+#define REW_TAG_CFG_TAG_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CFG_TAG_CFG, x)
+
+#define REW_TAG_CFG_TAG_TPID_CFG GENMASK(6, 5)
+#define REW_TAG_CFG_TAG_TPID_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CFG_TAG_TPID_CFG, x)
+#define REW_TAG_CFG_TAG_TPID_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CFG_TAG_TPID_CFG, x)
+
+/* REW:PORT:PORT_CFG */
+#define REW_PORT_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 8, 0, 1, 4)
+
+#define REW_PORT_CFG_NO_REWRITE BIT(0)
+#define REW_PORT_CFG_NO_REWRITE_SET(x)\
+ FIELD_PREP(REW_PORT_CFG_NO_REWRITE, x)
+#define REW_PORT_CFG_NO_REWRITE_GET(x)\
+ FIELD_GET(REW_PORT_CFG_NO_REWRITE, x)
+
+/* SYS:SYSTEM:RESET_CFG */
+#define SYS_RESET_CFG __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 0, 0, 1, 4)
+
+#define SYS_RESET_CFG_CORE_ENA BIT(0)
+#define SYS_RESET_CFG_CORE_ENA_SET(x)\
+ FIELD_PREP(SYS_RESET_CFG_CORE_ENA, x)
+#define SYS_RESET_CFG_CORE_ENA_GET(x)\
+ FIELD_GET(SYS_RESET_CFG_CORE_ENA, x)
+
+/* SYS:SYSTEM:PORT_MODE */
+#define SYS_PORT_MODE(r) __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 44, r, 10, 4)
+
+#define SYS_PORT_MODE_INCL_INJ_HDR GENMASK(5, 4)
+#define SYS_PORT_MODE_INCL_INJ_HDR_SET(x)\
+ FIELD_PREP(SYS_PORT_MODE_INCL_INJ_HDR, x)
+#define SYS_PORT_MODE_INCL_INJ_HDR_GET(x)\
+ FIELD_GET(SYS_PORT_MODE_INCL_INJ_HDR, x)
+
+#define SYS_PORT_MODE_INCL_XTR_HDR GENMASK(3, 2)
+#define SYS_PORT_MODE_INCL_XTR_HDR_SET(x)\
+ FIELD_PREP(SYS_PORT_MODE_INCL_XTR_HDR, x)
+#define SYS_PORT_MODE_INCL_XTR_HDR_GET(x)\
+ FIELD_GET(SYS_PORT_MODE_INCL_XTR_HDR, x)
+
+/* SYS:SYSTEM:FRONT_PORT_MODE */
+#define SYS_FRONT_PORT_MODE(r) __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 84, r, 8, 4)
+
+#define SYS_FRONT_PORT_MODE_HDX_MODE BIT(1)
+#define SYS_FRONT_PORT_MODE_HDX_MODE_SET(x)\
+ FIELD_PREP(SYS_FRONT_PORT_MODE_HDX_MODE, x)
+#define SYS_FRONT_PORT_MODE_HDX_MODE_GET(x)\
+ FIELD_GET(SYS_FRONT_PORT_MODE_HDX_MODE, x)
+
+/* SYS:SYSTEM:FRM_AGING */
+#define SYS_FRM_AGING __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 116, 0, 1, 4)
+
+#define SYS_FRM_AGING_AGE_TX_ENA BIT(20)
+#define SYS_FRM_AGING_AGE_TX_ENA_SET(x)\
+ FIELD_PREP(SYS_FRM_AGING_AGE_TX_ENA, x)
+#define SYS_FRM_AGING_AGE_TX_ENA_GET(x)\
+ FIELD_GET(SYS_FRM_AGING_AGE_TX_ENA, x)
+
+/* SYS:SYSTEM:STAT_CFG */
+#define SYS_STAT_CFG __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 120, 0, 1, 4)
+
+#define SYS_STAT_CFG_STAT_VIEW GENMASK(9, 0)
+#define SYS_STAT_CFG_STAT_VIEW_SET(x)\
+ FIELD_PREP(SYS_STAT_CFG_STAT_VIEW, x)
+#define SYS_STAT_CFG_STAT_VIEW_GET(x)\
+ FIELD_GET(SYS_STAT_CFG_STAT_VIEW, x)
+
+/* SYS:PAUSE_CFG:PAUSE_CFG */
+#define SYS_PAUSE_CFG(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 0, r, 9, 4)
+
+#define SYS_PAUSE_CFG_PAUSE_START GENMASK(18, 10)
+#define SYS_PAUSE_CFG_PAUSE_START_SET(x)\
+ FIELD_PREP(SYS_PAUSE_CFG_PAUSE_START, x)
+#define SYS_PAUSE_CFG_PAUSE_START_GET(x)\
+ FIELD_GET(SYS_PAUSE_CFG_PAUSE_START, x)
+
+#define SYS_PAUSE_CFG_PAUSE_STOP GENMASK(9, 1)
+#define SYS_PAUSE_CFG_PAUSE_STOP_SET(x)\
+ FIELD_PREP(SYS_PAUSE_CFG_PAUSE_STOP, x)
+#define SYS_PAUSE_CFG_PAUSE_STOP_GET(x)\
+ FIELD_GET(SYS_PAUSE_CFG_PAUSE_STOP, x)
+
+#define SYS_PAUSE_CFG_PAUSE_ENA BIT(0)
+#define SYS_PAUSE_CFG_PAUSE_ENA_SET(x)\
+ FIELD_PREP(SYS_PAUSE_CFG_PAUSE_ENA, x)
+#define SYS_PAUSE_CFG_PAUSE_ENA_GET(x)\
+ FIELD_GET(SYS_PAUSE_CFG_PAUSE_ENA, x)
+
+/* SYS:PAUSE_CFG:ATOP */
+#define SYS_ATOP(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 40, r, 9, 4)
+
+/* SYS:PAUSE_CFG:ATOP_TOT_CFG */
+#define SYS_ATOP_TOT_CFG __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 76, 0, 1, 4)
+
+/* SYS:PAUSE_CFG:MAC_FC_CFG */
+#define SYS_MAC_FC_CFG(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 80, r, 8, 4)
+
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED GENMASK(27, 26)
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_FC_LINK_SPEED, x)
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_FC_LINK_SPEED, x)
+
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG GENMASK(25, 20)
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_FC_LATENCY_CFG, x)
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_FC_LATENCY_CFG, x)
+
+#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA BIT(18)
+#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_ZERO_PAUSE_ENA, x)
+#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_ZERO_PAUSE_ENA, x)
+
+#define SYS_MAC_FC_CFG_TX_FC_ENA BIT(17)
+#define SYS_MAC_FC_CFG_TX_FC_ENA_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_TX_FC_ENA, x)
+#define SYS_MAC_FC_CFG_TX_FC_ENA_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_TX_FC_ENA, x)
+
+#define SYS_MAC_FC_CFG_RX_FC_ENA BIT(16)
+#define SYS_MAC_FC_CFG_RX_FC_ENA_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_RX_FC_ENA, x)
+#define SYS_MAC_FC_CFG_RX_FC_ENA_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_RX_FC_ENA, x)
+
+#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG GENMASK(15, 0)
+#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_PAUSE_VAL_CFG, x)
+#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_PAUSE_VAL_CFG, x)
+
+/* SYS:STAT:CNT */
+#define SYS_CNT(g) __REG(TARGET_SYS, 0, 1, 0, g, 896, 4, 0, 0, 1, 4)
+
+/* SYS:RAM_CTRL:RAM_INIT */
+#define SYS_RAM_INIT __REG(TARGET_SYS, 0, 1, 4432, 0, 1, 4, 0, 0, 1, 4)
+
+#define SYS_RAM_INIT_RAM_INIT BIT(1)
+#define SYS_RAM_INIT_RAM_INIT_SET(x)\
+ FIELD_PREP(SYS_RAM_INIT_RAM_INIT, x)
+#define SYS_RAM_INIT_RAM_INIT_GET(x)\
+ FIELD_GET(SYS_RAM_INIT_RAM_INIT, x)
+
+#endif /* _LAN966X_REGS_H_ */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
new file mode 100644
index 000000000000..7de55f6a4da8
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/if_bridge.h>
+#include <net/switchdev.h>
+
+#include "lan966x_main.h"
+
+static struct notifier_block lan966x_netdevice_nb __read_mostly;
+static struct notifier_block lan966x_switchdev_nb __read_mostly;
+static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly;
+
+static void lan966x_port_set_mcast_flood(struct lan966x_port *port,
+ bool enabled)
+{
+ u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_MC));
+
+ val = ANA_PGID_PGID_GET(val);
+ if (enabled)
+ val |= BIT(port->chip_port);
+ else
+ val &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PGID_PGID_SET(val),
+ ANA_PGID_PGID,
+ port->lan966x, ANA_PGID(PGID_MC));
+}
+
+static void lan966x_port_set_ucast_flood(struct lan966x_port *port,
+ bool enabled)
+{
+ u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_UC));
+
+ val = ANA_PGID_PGID_GET(val);
+ if (enabled)
+ val |= BIT(port->chip_port);
+ else
+ val &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PGID_PGID_SET(val),
+ ANA_PGID_PGID,
+ port->lan966x, ANA_PGID(PGID_UC));
+}
+
+static void lan966x_port_set_bcast_flood(struct lan966x_port *port,
+ bool enabled)
+{
+ u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_BC));
+
+ val = ANA_PGID_PGID_GET(val);
+ if (enabled)
+ val |= BIT(port->chip_port);
+ else
+ val &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PGID_PGID_SET(val),
+ ANA_PGID_PGID,
+ port->lan966x, ANA_PGID(PGID_BC));
+}
+
+static void lan966x_port_set_learning(struct lan966x_port *port, bool enabled)
+{
+ lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(enabled),
+ ANA_PORT_CFG_LEARN_ENA,
+ port->lan966x, ANA_PORT_CFG(port->chip_port));
+
+ port->learn_ena = enabled;
+}
+
+static void lan966x_port_bridge_flags(struct lan966x_port *port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & BR_MCAST_FLOOD)
+ lan966x_port_set_mcast_flood(port,
+ !!(flags.val & BR_MCAST_FLOOD));
+
+ if (flags.mask & BR_FLOOD)
+ lan966x_port_set_ucast_flood(port,
+ !!(flags.val & BR_FLOOD));
+
+ if (flags.mask & BR_BCAST_FLOOD)
+ lan966x_port_set_bcast_flood(port,
+ !!(flags.val & BR_BCAST_FLOOD));
+
+ if (flags.mask & BR_LEARNING)
+ lan966x_port_set_learning(port,
+ !!(flags.val & BR_LEARNING));
+}
+
+static int lan966x_port_pre_bridge_flags(struct lan966x_port *port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & ~(BR_MCAST_FLOOD | BR_FLOOD | BR_BCAST_FLOOD |
+ BR_LEARNING))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void lan966x_update_fwd_mask(struct lan966x *lan966x)
+{
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; i++) {
+ struct lan966x_port *port = lan966x->ports[i];
+ unsigned long mask = 0;
+
+ if (port && lan966x->bridge_fwd_mask & BIT(i))
+ mask = lan966x->bridge_fwd_mask & ~BIT(i);
+
+ mask |= BIT(CPU_PORT);
+
+ lan_wr(ANA_PGID_PGID_SET(mask),
+ lan966x, ANA_PGID(PGID_SRC + i));
+ }
+}
+
+static void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool learn_ena = false;
+
+ if ((state == BR_STATE_FORWARDING || state == BR_STATE_LEARNING) &&
+ port->learn_ena)
+ learn_ena = true;
+
+ if (state == BR_STATE_FORWARDING)
+ lan966x->bridge_fwd_mask |= BIT(port->chip_port);
+ else
+ lan966x->bridge_fwd_mask &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(learn_ena),
+ ANA_PORT_CFG_LEARN_ENA,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+
+ lan966x_update_fwd_mask(lan966x);
+}
+
+static void lan966x_port_ageing_set(struct lan966x_port *port,
+ unsigned long ageing_clock_t)
+{
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+ u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
+
+ lan966x_mac_set_ageing(port->lan966x, ageing_time);
+}
+
+static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int err = 0;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ lan966x_port_bridge_flags(port, attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ err = lan966x_port_pre_bridge_flags(port, attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ lan966x_port_stp_state_set(port, attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ lan966x_port_ageing_set(port, attr->u.ageing_time);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ lan966x_vlan_port_set_vlan_aware(port, attr->u.vlan_filtering);
+ lan966x_vlan_port_apply(port);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_port_bridge_join(struct lan966x_port *port,
+ struct net_device *bridge,
+ struct netlink_ext_ack *extack)
+{
+ struct switchdev_brport_flags flags = {0};
+ struct lan966x *lan966x = port->lan966x;
+ struct net_device *dev = port->dev;
+ int err;
+
+ if (!lan966x->bridge_mask) {
+ lan966x->bridge = bridge;
+ } else {
+ if (lan966x->bridge != bridge) {
+ NL_SET_ERR_MSG_MOD(extack, "Not allow to add port to different bridge");
+ return -ENODEV;
+ }
+ }
+
+ err = switchdev_bridge_port_offload(dev, dev, port,
+ &lan966x_switchdev_nb,
+ &lan966x_switchdev_blocking_nb,
+ false, extack);
+ if (err)
+ return err;
+
+ lan966x->bridge_mask |= BIT(port->chip_port);
+
+ flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = flags.mask;
+ lan966x_port_bridge_flags(port, flags);
+
+ return 0;
+}
+
+static void lan966x_port_bridge_leave(struct lan966x_port *port,
+ struct net_device *bridge)
+{
+ struct switchdev_brport_flags flags = {0};
+ struct lan966x *lan966x = port->lan966x;
+
+ flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = flags.mask & ~BR_LEARNING;
+ lan966x_port_bridge_flags(port, flags);
+
+ lan966x->bridge_mask &= ~BIT(port->chip_port);
+
+ if (!lan966x->bridge_mask)
+ lan966x->bridge = NULL;
+
+ /* Set the port back to host mode */
+ lan966x_vlan_port_set_vlan_aware(port, false);
+ lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
+ lan966x_vlan_port_apply(port);
+}
+
+static int lan966x_port_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct netlink_ext_ack *extack;
+ int err = 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ err = lan966x_port_bridge_join(port, info->upper_dev,
+ extack);
+ else
+ lan966x_port_bridge_leave(port, info->upper_dev);
+ }
+
+ return err;
+}
+
+static int lan966x_port_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ if (netif_is_bridge_master(info->upper_dev) && !info->linking)
+ switchdev_bridge_port_unoffload(port->dev, port,
+ &lan966x_switchdev_nb,
+ &lan966x_switchdev_blocking_nb);
+
+ return NOTIFY_DONE;
+}
+
+static int lan966x_foreign_bridging_check(struct net_device *bridge,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = NULL;
+ bool has_foreign = false;
+ struct net_device *dev;
+ struct list_head *iter;
+
+ if (!netif_is_bridge_master(bridge))
+ return 0;
+
+ netdev_for_each_lower_dev(bridge, dev, iter) {
+ if (lan966x_netdevice_check(dev)) {
+ struct lan966x_port *port = netdev_priv(dev);
+
+ if (lan966x) {
+ /* Bridge already has at least one port of a
+ * lan966x switch inside it, check that it's
+ * the same instance of the driver.
+ */
+ if (port->lan966x != lan966x) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Bridging between multiple lan966x switches disallowed");
+ return -EINVAL;
+ }
+ } else {
+ /* This is the first lan966x port inside this
+ * bridge
+ */
+ lan966x = port->lan966x;
+ }
+ } else {
+ has_foreign = true;
+ }
+
+ if (lan966x && has_foreign) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Bridging lan966x ports with foreign interfaces disallowed");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int lan966x_bridge_check(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ return lan966x_foreign_bridging_check(info->upper_dev,
+ info->info.extack);
+}
+
+static int lan966x_netdevice_port_event(struct net_device *dev,
+ struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ int err = 0;
+
+ if (!lan966x_netdevice_check(dev)) {
+ if (event == NETDEV_CHANGEUPPER)
+ return lan966x_bridge_check(dev, ptr);
+ return 0;
+ }
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ err = lan966x_port_prechangeupper(dev, ptr);
+ break;
+ case NETDEV_CHANGEUPPER:
+ err = lan966x_bridge_check(dev, ptr);
+ if (err)
+ return err;
+
+ err = lan966x_port_changeupper(dev, ptr);
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ int ret;
+
+ ret = lan966x_netdevice_port_event(dev, nb, event, ptr);
+
+ return notifier_from_errno(ret);
+}
+
+static bool lan966x_foreign_dev_check(const struct net_device *dev,
+ const struct net_device *foreign_dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ if (netif_is_bridge_master(foreign_dev))
+ if (lan966x->bridge != foreign_dev)
+ return true;
+
+ return false;
+}
+
+static int lan966x_switchdev_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_port_attr_set);
+ return notifier_from_errno(err);
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
+ lan966x_netdevice_check,
+ lan966x_foreign_dev_check,
+ lan966x_handle_fdb,
+ NULL);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int lan966x_handle_port_vlan_add(struct lan966x_port *port,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct lan966x *lan966x = port->lan966x;
+
+ /* When adding a port to a vlan, we get a callback for the port but
+ * also for the bridge. When get the callback for the bridge just bail
+ * out. Then when the bridge is added to the vlan, then we get a
+ * callback here but in this case the flags has set:
+ * BRIDGE_VLAN_INFO_BRENTRY. In this case it means that the CPU
+ * port is added to the vlan, so the broadcast frames and unicast frames
+ * with dmac of the bridge should be foward to CPU.
+ */
+ if (netif_is_bridge_master(obj->orig_dev) &&
+ !(v->flags & BRIDGE_VLAN_INFO_BRENTRY))
+ return 0;
+
+ if (!netif_is_bridge_master(obj->orig_dev))
+ lan966x_vlan_port_add_vlan(port, v->vid,
+ v->flags & BRIDGE_VLAN_INFO_PVID,
+ v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ else
+ lan966x_vlan_cpu_add_vlan(lan966x, v->vid);
+
+ return 0;
+}
+
+static int lan966x_handle_port_obj_add(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int err;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = lan966x_handle_port_vlan_add(port, obj);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = lan966x_handle_port_mdb_add(port, obj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_handle_port_vlan_del(struct lan966x_port *port,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct lan966x *lan966x = port->lan966x;
+
+ if (!netif_is_bridge_master(obj->orig_dev))
+ lan966x_vlan_port_del_vlan(port, v->vid);
+ else
+ lan966x_vlan_cpu_del_vlan(lan966x, v->vid);
+
+ return 0;
+}
+
+static int lan966x_handle_port_obj_del(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int err;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = lan966x_handle_port_vlan_del(port, obj);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = lan966x_handle_port_mdb_del(port, obj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_switchdev_blocking_event(struct notifier_block *nb,
+ unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_handle_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_handle_port_obj_del);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block lan966x_netdevice_nb __read_mostly = {
+ .notifier_call = lan966x_netdevice_event,
+};
+
+static struct notifier_block lan966x_switchdev_nb __read_mostly = {
+ .notifier_call = lan966x_switchdev_event,
+};
+
+static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = {
+ .notifier_call = lan966x_switchdev_blocking_event,
+};
+
+void lan966x_register_notifier_blocks(void)
+{
+ register_netdevice_notifier(&lan966x_netdevice_nb);
+ register_switchdev_notifier(&lan966x_switchdev_nb);
+ register_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
+}
+
+void lan966x_unregister_notifier_blocks(void)
+{
+ unregister_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
+ unregister_switchdev_notifier(&lan966x_switchdev_nb);
+ unregister_netdevice_notifier(&lan966x_netdevice_nb);
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
new file mode 100644
index 000000000000..8d7260cd7da9
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+#define VLANACCESS_CMD_IDLE 0
+#define VLANACCESS_CMD_READ 1
+#define VLANACCESS_CMD_WRITE 2
+#define VLANACCESS_CMD_INIT 3
+
+static int lan966x_vlan_get_status(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, ANA_VLANACCESS);
+}
+
+static int lan966x_vlan_wait_for_completion(struct lan966x *lan966x)
+{
+ u32 val;
+
+ return readx_poll_timeout(lan966x_vlan_get_status,
+ lan966x, val,
+ (val & ANA_VLANACCESS_VLAN_TBL_CMD) ==
+ VLANACCESS_CMD_IDLE,
+ TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
+}
+
+static void lan966x_vlan_set_mask(struct lan966x *lan966x, u16 vid)
+{
+ u16 mask = lan966x->vlan_mask[vid];
+ bool cpu_dis;
+
+ cpu_dis = !(mask & BIT(CPU_PORT));
+
+ /* Set flags and the VID to configure */
+ lan_rmw(ANA_VLANTIDX_VLAN_PGID_CPU_DIS_SET(cpu_dis) |
+ ANA_VLANTIDX_V_INDEX_SET(vid),
+ ANA_VLANTIDX_VLAN_PGID_CPU_DIS |
+ ANA_VLANTIDX_V_INDEX,
+ lan966x, ANA_VLANTIDX);
+
+ /* Set the vlan port members mask */
+ lan_rmw(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_SET(mask),
+ ANA_VLAN_PORT_MASK_VLAN_PORT_MASK,
+ lan966x, ANA_VLAN_PORT_MASK);
+
+ /* Issue a write command */
+ lan_rmw(ANA_VLANACCESS_VLAN_TBL_CMD_SET(VLANACCESS_CMD_WRITE),
+ ANA_VLANACCESS_VLAN_TBL_CMD,
+ lan966x, ANA_VLANACCESS);
+
+ if (lan966x_vlan_wait_for_completion(lan966x))
+ dev_err(lan966x->dev, "Vlan set mask failed\n");
+}
+
+static void lan966x_vlan_port_add_vlan_mask(struct lan966x_port *port, u16 vid)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 p = port->chip_port;
+
+ lan966x->vlan_mask[vid] |= BIT(p);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static void lan966x_vlan_port_del_vlan_mask(struct lan966x_port *port, u16 vid)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 p = port->chip_port;
+
+ lan966x->vlan_mask[vid] &= ~BIT(p);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static bool lan966x_vlan_port_any_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ return !!(lan966x->vlan_mask[vid] & ~BIT(CPU_PORT));
+}
+
+static void lan966x_vlan_cpu_add_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ lan966x->vlan_mask[vid] |= BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static void lan966x_vlan_cpu_del_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ lan966x->vlan_mask[vid] &= ~BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static void lan966x_vlan_cpu_add_cpu_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ __set_bit(vid, lan966x->cpu_vlan_mask);
+}
+
+static void lan966x_vlan_cpu_del_cpu_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ __clear_bit(vid, lan966x->cpu_vlan_mask);
+}
+
+bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ return test_bit(vid, lan966x->cpu_vlan_mask);
+}
+
+static u16 lan966x_vlan_port_get_pvid(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ if (!(lan966x->bridge_mask & BIT(port->chip_port)))
+ return HOST_PVID;
+
+ return port->vlan_aware ? port->pvid : UNAWARE_PVID;
+}
+
+int lan966x_vlan_port_set_vid(struct lan966x_port *port, u16 vid,
+ bool pvid, bool untagged)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* Egress vlan classification */
+ if (untagged && port->vid != vid) {
+ if (port->vid) {
+ dev_err(lan966x->dev,
+ "Port already has a native VLAN: %d\n",
+ port->vid);
+ return -EBUSY;
+ }
+ port->vid = vid;
+ }
+
+ /* Default ingress vlan classification */
+ if (pvid)
+ port->pvid = vid;
+
+ return 0;
+}
+
+static void lan966x_vlan_port_remove_vid(struct lan966x_port *port, u16 vid)
+{
+ if (port->pvid == vid)
+ port->pvid = 0;
+
+ if (port->vid == vid)
+ port->vid = 0;
+}
+
+void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
+ bool vlan_aware)
+{
+ port->vlan_aware = vlan_aware;
+}
+
+void lan966x_vlan_port_apply(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u16 pvid;
+ u32 val;
+
+ pvid = lan966x_vlan_port_get_pvid(port);
+
+ /* Ingress clasification (ANA_PORT_VLAN_CFG) */
+ /* Default vlan to classify for untagged frames (may be zero) */
+ val = ANA_VLAN_CFG_VLAN_VID_SET(pvid);
+ if (port->vlan_aware)
+ val |= ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(1) |
+ ANA_VLAN_CFG_VLAN_POP_CNT_SET(1);
+
+ lan_rmw(val,
+ ANA_VLAN_CFG_VLAN_VID | ANA_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_VLAN_CFG_VLAN_POP_CNT,
+ lan966x, ANA_VLAN_CFG(port->chip_port));
+
+ /* Drop frames with multicast source address */
+ val = ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(1);
+ if (port->vlan_aware && !pvid)
+ /* If port is vlan-aware and tagged, drop untagged and priority
+ * tagged frames.
+ */
+ val |= ANA_DROP_CFG_DROP_UNTAGGED_ENA_SET(1) |
+ ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_SET(1) |
+ ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_SET(1);
+
+ lan_wr(val, lan966x, ANA_DROP_CFG(port->chip_port));
+
+ /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q */
+ val = REW_TAG_CFG_TAG_TPID_CFG_SET(0);
+ if (port->vlan_aware) {
+ if (port->vid)
+ /* Tag all frames except when VID == DEFAULT_VLAN */
+ val |= REW_TAG_CFG_TAG_CFG_SET(1);
+ else
+ val |= REW_TAG_CFG_TAG_CFG_SET(3);
+ }
+
+ /* Update only some bits in the register */
+ lan_rmw(val,
+ REW_TAG_CFG_TAG_TPID_CFG | REW_TAG_CFG_TAG_CFG,
+ lan966x, REW_TAG_CFG(port->chip_port));
+
+ /* Set default VLAN and tag type to 8021Q */
+ lan_rmw(REW_PORT_VLAN_CFG_PORT_TPID_SET(ETH_P_8021Q) |
+ REW_PORT_VLAN_CFG_PORT_VID_SET(port->vid),
+ REW_PORT_VLAN_CFG_PORT_TPID |
+ REW_PORT_VLAN_CFG_PORT_VID,
+ lan966x, REW_PORT_VLAN_CFG(port->chip_port));
+}
+
+void lan966x_vlan_port_add_vlan(struct lan966x_port *port,
+ u16 vid,
+ bool pvid,
+ bool untagged)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* If the CPU(br) is already part of the vlan then add the fdb
+ * entries in MAC table to copy the frames to the CPU(br).
+ * If the CPU(br) is not part of the vlan then it would
+ * just drop the frames.
+ */
+ if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, vid)) {
+ lan966x_vlan_cpu_add_vlan_mask(lan966x, vid);
+ lan966x_fdb_write_entries(lan966x, vid);
+ lan966x_mdb_write_entries(lan966x, vid);
+ }
+
+ lan966x_vlan_port_set_vid(port, vid, pvid, untagged);
+ lan966x_vlan_port_add_vlan_mask(port, vid);
+ lan966x_vlan_port_apply(port);
+}
+
+void lan966x_vlan_port_del_vlan(struct lan966x_port *port, u16 vid)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan966x_vlan_port_remove_vid(port, vid);
+ lan966x_vlan_port_del_vlan_mask(port, vid);
+ lan966x_vlan_port_apply(port);
+
+ /* In case there are no other ports in vlan then remove the CPU from
+ * that vlan but still keep it in the mask because it may be needed
+ * again then another port gets added in that vlan
+ */
+ if (!lan966x_vlan_port_any_vlan_mask(lan966x, vid)) {
+ lan966x_vlan_cpu_del_vlan_mask(lan966x, vid);
+ lan966x_fdb_erase_entries(lan966x, vid);
+ lan966x_mdb_erase_entries(lan966x, vid);
+ }
+}
+
+void lan966x_vlan_cpu_add_vlan(struct lan966x *lan966x, u16 vid)
+{
+ /* Add an entry in the MAC table for the CPU
+ * Add the CPU part of the vlan only if there is another port in that
+ * vlan otherwise all the broadcast frames in that vlan will go to CPU
+ * even if none of the ports are in the vlan and then the CPU will just
+ * need to discard these frames. It is required to store this
+ * information so when a front port is added then it would add also the
+ * CPU port.
+ */
+ if (lan966x_vlan_port_any_vlan_mask(lan966x, vid)) {
+ lan966x_vlan_cpu_add_vlan_mask(lan966x, vid);
+ lan966x_mdb_write_entries(lan966x, vid);
+ }
+
+ lan966x_vlan_cpu_add_cpu_vlan_mask(lan966x, vid);
+ lan966x_fdb_write_entries(lan966x, vid);
+}
+
+void lan966x_vlan_cpu_del_vlan(struct lan966x *lan966x, u16 vid)
+{
+ /* Remove the CPU part of the vlan */
+ lan966x_vlan_cpu_del_cpu_vlan_mask(lan966x, vid);
+ lan966x_vlan_cpu_del_vlan_mask(lan966x, vid);
+ lan966x_fdb_erase_entries(lan966x, vid);
+ lan966x_mdb_erase_entries(lan966x, vid);
+}
+
+void lan966x_vlan_init(struct lan966x *lan966x)
+{
+ u16 port, vid;
+
+ /* Clear VLAN table, by default all ports are members of all VLANS */
+ lan_rmw(ANA_VLANACCESS_VLAN_TBL_CMD_SET(VLANACCESS_CMD_INIT),
+ ANA_VLANACCESS_VLAN_TBL_CMD,
+ lan966x, ANA_VLANACCESS);
+ lan966x_vlan_wait_for_completion(lan966x);
+
+ for (vid = 1; vid < VLAN_N_VID; vid++) {
+ lan966x->vlan_mask[vid] = 0;
+ lan966x_vlan_set_mask(lan966x, vid);
+ }
+
+ /* Set all the ports + cpu to be part of HOST_PVID and UNAWARE_PVID */
+ lan966x->vlan_mask[HOST_PVID] =
+ GENMASK(lan966x->num_phys_ports - 1, 0) | BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, HOST_PVID);
+
+ lan966x->vlan_mask[UNAWARE_PVID] =
+ GENMASK(lan966x->num_phys_ports - 1, 0) | BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, UNAWARE_PVID);
+
+ lan966x_vlan_cpu_add_cpu_vlan_mask(lan966x, UNAWARE_PVID);
+
+ /* Configure the CPU port to be vlan aware */
+ lan_wr(ANA_VLAN_CFG_VLAN_VID_SET(0) |
+ ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(1) |
+ ANA_VLAN_CFG_VLAN_POP_CNT_SET(1),
+ lan966x, ANA_VLAN_CFG(CPU_PORT));
+
+ /* Set vlan ingress filter mask to all ports */
+ lan_wr(GENMASK(lan966x->num_phys_ports, 0),
+ lan966x, ANA_VLANMASK);
+
+ for (port = 0; port < lan966x->num_phys_ports; port++) {
+ lan_wr(0, lan966x, REW_PORT_VLAN_CFG(port));
+ lan_wr(0, lan966x, REW_TAG_CFG(port));
+ }
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 4625d4fb4cde..16266275dd36 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -292,6 +292,33 @@ static int sparx5_create_port(struct sparx5 *sparx5,
spx5_port->phylink_config.dev = &spx5_port->ndev->dev;
spx5_port->phylink_config.type = PHYLINK_NETDEV;
spx5_port->phylink_config.pcs_poll = true;
+ spx5_port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
+ MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD |
+ MAC_2500FD | MAC_5000FD | MAC_10000FD | MAC_25000FD;
+
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ spx5_port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_QSGMII,
+ spx5_port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ spx5_port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ spx5_port->phylink_config.supported_interfaces);
+
+ if (spx5_port->conf.bandwidth == SPEED_5000 ||
+ spx5_port->conf.bandwidth == SPEED_10000 ||
+ spx5_port->conf.bandwidth == SPEED_25000)
+ __set_bit(PHY_INTERFACE_MODE_5GBASER,
+ spx5_port->phylink_config.supported_interfaces);
+
+ if (spx5_port->conf.bandwidth == SPEED_10000 ||
+ spx5_port->conf.bandwidth == SPEED_25000)
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ spx5_port->phylink_config.supported_interfaces);
+
+ if (spx5_port->conf.bandwidth == SPEED_25000)
+ __set_bit(PHY_INTERFACE_MODE_25GBASER,
+ spx5_port->phylink_config.supported_interfaces);
phylink = phylink_create(&spx5_port->phylink_config,
of_fwnode_handle(config->node),
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
index fb74752de0ca..8ba33bc1a001 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
@@ -26,79 +26,6 @@ static bool port_conf_has_changed(struct sparx5_port_config *a, struct sparx5_po
return false;
}
-static void sparx5_phylink_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct sparx5_port *port = netdev_priv(to_net_dev(config->dev));
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_5GBASER:
- case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_25GBASER:
- case PHY_INTERFACE_MODE_NA:
- if (port->conf.bandwidth == SPEED_5000)
- phylink_set(mask, 5000baseT_Full);
- if (port->conf.bandwidth == SPEED_10000) {
- phylink_set(mask, 5000baseT_Full);
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseER_Full);
- }
- if (port->conf.bandwidth == SPEED_25000) {
- phylink_set(mask, 5000baseT_Full);
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseER_Full);
- phylink_set(mask, 25000baseCR_Full);
- phylink_set(mask, 25000baseSR_Full);
- }
- if (state->interface != PHY_INTERFACE_MODE_NA)
- break;
- fallthrough;
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- if (state->interface != PHY_INTERFACE_MODE_NA)
- break;
- fallthrough;
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_2500BASEX:
- if (state->interface != PHY_INTERFACE_MODE_2500BASEX) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- }
- if (state->interface == PHY_INTERFACE_MODE_2500BASEX ||
- state->interface == PHY_INTERFACE_MODE_NA) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
- break;
- default:
- linkmode_zero(supported);
- return;
- }
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
static void sparx5_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
@@ -202,7 +129,7 @@ const struct phylink_pcs_ops sparx5_phylink_pcs_ops = {
};
const struct phylink_mac_ops sparx5_phylink_mac_ops = {
- .validate = sparx5_phylink_validate,
+ .validate = phylink_generic_validate,
.mac_config = sparx5_phylink_mac_config,
.mac_link_down = sparx5_phylink_mac_link_down,
.mac_link_up = sparx5_phylink_mac_link_up,
diff --git a/drivers/net/ethernet/microsoft/mana/Makefile b/drivers/net/ethernet/microsoft/mana/Makefile
index 0edd5bb685f3..e16a4221f571 100644
--- a/drivers/net/ethernet/microsoft/mana/Makefile
+++ b/drivers/net/ethernet/microsoft/mana/Makefile
@@ -3,4 +3,4 @@
# Makefile for the Microsoft Azure Network Adapter driver
obj-$(CONFIG_MICROSOFT_MANA) += mana.o
-mana-objs := gdma_main.o shm_channel.o hw_channel.o mana_en.o mana_ethtool.o
+mana-objs := gdma_main.o shm_channel.o hw_channel.o mana_en.o mana_ethtool.o mana_bpf.o
diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h
index d047ee876f12..9a12607fb511 100644
--- a/drivers/net/ethernet/microsoft/mana/mana.h
+++ b/drivers/net/ethernet/microsoft/mana/mana.h
@@ -289,6 +289,8 @@ struct mana_rxq {
struct mana_cq rx_cq;
+ struct completion fence_event;
+
struct net_device *ndev;
/* Total number of receive buffers to be allocated */
@@ -298,6 +300,9 @@ struct mana_rxq {
struct mana_stats stats;
+ struct bpf_prog __rcu *bpf_prog;
+ struct xdp_rxq_info xdp_rxq;
+
/* MUST BE THE LAST MEMBER:
* Each receive buffer has an associated mana_recv_buf_oob.
*/
@@ -353,6 +358,8 @@ struct mana_port_context {
/* This points to an array of num_queues of RQ pointers. */
struct mana_rxq **rxqs;
+ struct bpf_prog *bpf_prog;
+
/* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
unsigned int max_queues;
unsigned int num_queues;
@@ -367,6 +374,7 @@ struct mana_port_context {
struct mana_ethtool_stats eth_stats;
};
+int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
bool update_hash, bool update_tab);
@@ -377,6 +385,13 @@ int mana_detach(struct net_device *ndev, bool from_close);
int mana_probe(struct gdma_dev *gd, bool resuming);
void mana_remove(struct gdma_dev *gd, bool suspending);
+void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
+u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
+ struct xdp_buff *xdp, void *buf_va, uint pkt_len);
+struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
+void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
+int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
+
extern const struct ethtool_ops mana_ethtool_ops;
struct mana_obj_spec {
diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
new file mode 100644
index 000000000000..1d2f948b5c00
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mm.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <net/xdp.h>
+
+#include "mana.h"
+
+void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev)
+{
+ u16 txq_idx = skb_get_queue_mapping(skb);
+ struct netdev_queue *ndevtxq;
+ int rc;
+
+ __skb_push(skb, ETH_HLEN);
+
+ ndevtxq = netdev_get_tx_queue(ndev, txq_idx);
+ __netif_tx_lock(ndevtxq, smp_processor_id());
+
+ rc = mana_start_xmit(skb, ndev);
+
+ __netif_tx_unlock(ndevtxq);
+
+ if (dev_xmit_complete(rc))
+ return;
+
+ dev_kfree_skb_any(skb);
+ ndev->stats.tx_dropped++;
+}
+
+u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
+ struct xdp_buff *xdp, void *buf_va, uint pkt_len)
+{
+ struct bpf_prog *prog;
+ u32 act = XDP_PASS;
+
+ rcu_read_lock();
+ prog = rcu_dereference(rxq->bpf_prog);
+
+ if (!prog)
+ goto out;
+
+ xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq);
+ xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, false);
+
+ act = bpf_prog_run_xdp(prog, xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ case XDP_TX:
+ case XDP_DROP:
+ break;
+
+ case XDP_ABORTED:
+ trace_xdp_exception(ndev, prog, act);
+ break;
+
+ default:
+ bpf_warn_invalid_xdp_action(ndev, prog, act);
+ }
+
+out:
+ rcu_read_unlock();
+
+ return act;
+}
+
+static unsigned int mana_xdp_fraglen(unsigned int len)
+{
+ return SKB_DATA_ALIGN(len) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+}
+
+struct bpf_prog *mana_xdp_get(struct mana_port_context *apc)
+{
+ ASSERT_RTNL();
+
+ return apc->bpf_prog;
+}
+
+static struct bpf_prog *mana_chn_xdp_get(struct mana_port_context *apc)
+{
+ return rtnl_dereference(apc->rxqs[0]->bpf_prog);
+}
+
+/* Set xdp program on channels */
+void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog)
+{
+ struct bpf_prog *old_prog = mana_chn_xdp_get(apc);
+ unsigned int num_queues = apc->num_queues;
+ int i;
+
+ ASSERT_RTNL();
+
+ if (old_prog == prog)
+ return;
+
+ if (prog)
+ bpf_prog_add(prog, num_queues);
+
+ for (i = 0; i < num_queues; i++)
+ rcu_assign_pointer(apc->rxqs[i]->bpf_prog, prog);
+
+ if (old_prog)
+ for (i = 0; i < num_queues; i++)
+ bpf_prog_put(old_prog);
+}
+
+static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ struct bpf_prog *old_prog;
+ int buf_max;
+
+ old_prog = mana_xdp_get(apc);
+
+ if (!old_prog && !prog)
+ return 0;
+
+ buf_max = XDP_PACKET_HEADROOM + mana_xdp_fraglen(ndev->mtu + ETH_HLEN);
+ if (prog && buf_max > PAGE_SIZE) {
+ netdev_err(ndev, "XDP: mtu:%u too large, buf_max:%u\n",
+ ndev->mtu, buf_max);
+ NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large");
+
+ return -EOPNOTSUPP;
+ }
+
+ /* One refcnt of the prog is hold by the caller already, so
+ * don't increase refcnt for this one.
+ */
+ apc->bpf_prog = prog;
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (apc->port_is_up)
+ mana_chn_setxdp(apc, prog);
+
+ return 0;
+}
+
+int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
+{
+ struct netlink_ext_ack *extack = bpf->extack;
+ int ret;
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return mana_xdp_set(ndev, bpf->prog, extack);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 72cbf45c42d8..498d0f999275 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright (c) 2021, Microsoft Corporation. */
+#include <uapi/linux/bpf.h>
+
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -125,7 +127,7 @@ frag_err:
return -ENOMEM;
}
-static int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
struct mana_port_context *apc = netdev_priv(ndev);
@@ -378,6 +380,7 @@ static const struct net_device_ops mana_devops = {
.ndo_start_xmit = mana_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_get_stats64 = mana_get_stats64,
+ .ndo_bpf = mana_bpf,
};
static void mana_cleanup_port_context(struct mana_port_context *apc)
@@ -749,6 +752,61 @@ out:
return err;
}
+static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
+{
+ struct mana_fence_rq_resp resp = {};
+ struct mana_fence_rq_req req = {};
+ int err;
+
+ init_completion(&rxq->fence_event);
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
+ sizeof(req), sizeof(resp));
+ req.wq_obj_handle = rxq->rxobj;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
+ rxq->rxq_idx, err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
+ rxq->rxq_idx, err, resp.hdr.status);
+ if (!err)
+ err = -EPROTO;
+
+ return err;
+ }
+
+ if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
+ netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
+ rxq->rxq_idx);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void mana_fence_rqs(struct mana_port_context *apc)
+{
+ unsigned int rxq_idx;
+ struct mana_rxq *rxq;
+ int err;
+
+ for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
+ rxq = apc->rxqs[rxq_idx];
+ err = mana_fence_rq(apc, rxq);
+
+ /* In case of any error, use sleep instead. */
+ if (err)
+ msleep(100);
+ }
+}
+
static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
{
u32 used_space_old;
@@ -906,6 +964,25 @@ static void mana_post_pkt_rxq(struct mana_rxq *rxq)
WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
}
+static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len,
+ struct xdp_buff *xdp)
+{
+ struct sk_buff *skb = build_skb(buf_va, PAGE_SIZE);
+
+ if (!skb)
+ return NULL;
+
+ if (xdp->data_hard_start) {
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ skb_put(skb, xdp->data_end - xdp->data);
+ } else {
+ skb_reserve(skb, XDP_PACKET_HEADROOM);
+ skb_put(skb, pkt_len);
+ }
+
+ return skb;
+}
+
static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
struct mana_rxq *rxq)
{
@@ -914,8 +991,10 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
uint pkt_len = cqe->ppi[0].pkt_len;
u16 rxq_idx = rxq->rxq_idx;
struct napi_struct *napi;
+ struct xdp_buff xdp = {};
struct sk_buff *skb;
u32 hash_value;
+ u32 act;
rxq->rx_cq.work_done++;
napi = &rxq->rx_cq.napi;
@@ -925,15 +1004,16 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
return;
}
- skb = build_skb(buf_va, PAGE_SIZE);
+ act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
- if (!skb) {
- free_page((unsigned long)buf_va);
- ++ndev->stats.rx_dropped;
- return;
- }
+ if (act != XDP_PASS && act != XDP_TX)
+ goto drop;
+
+ skb = mana_build_skb(buf_va, pkt_len, &xdp);
+
+ if (!skb)
+ goto drop;
- skb_put(skb, pkt_len);
skb->dev = napi->dev;
skb->protocol = eth_type_trans(skb, ndev);
@@ -954,12 +1034,24 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
}
+ if (act == XDP_TX) {
+ skb_set_queue_mapping(skb, rxq_idx);
+ mana_xdp_tx(skb, ndev);
+ return;
+ }
+
napi_gro_receive(napi, skb);
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->packets++;
rx_stats->bytes += pkt_len;
u64_stats_update_end(&rx_stats->syncp);
+ return;
+
+drop:
+ free_page((unsigned long)buf_va);
+ ++ndev->stats.rx_dropped;
+ return;
}
static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
@@ -988,7 +1080,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
return;
case CQE_RX_OBJECT_FENCE:
- netdev_err(ndev, "RX Fencing is unsupported\n");
+ complete(&rxq->fence_event);
return;
default:
@@ -1016,7 +1108,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
new_page = alloc_page(GFP_ATOMIC);
if (new_page) {
- da = dma_map_page(dev, new_page, 0, rxq->datasize,
+ da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, da)) {
@@ -1291,6 +1383,9 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
napi_synchronize(napi);
napi_disable(napi);
+
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+
netif_napi_del(napi);
mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
@@ -1342,7 +1437,8 @@ static int mana_alloc_rx_wqe(struct mana_port_context *apc,
if (!page)
return -ENOMEM;
- da = dma_map_page(dev, page, 0, rxq->datasize, DMA_FROM_DEVICE);
+ da = dma_map_page(dev, page, XDP_PACKET_HEADROOM, rxq->datasize,
+ DMA_FROM_DEVICE);
if (dma_mapping_error(dev, da)) {
__free_page(page);
@@ -1485,6 +1581,12 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
netif_napi_add(ndev, &cq->napi, mana_poll, 1);
+
+ WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
+ cq->napi.napi_id));
+ WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL));
+
napi_enable(&cq->napi);
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
@@ -1572,6 +1674,7 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
bool update_hash, bool update_tab)
{
u32 queue_idx;
+ int err;
int i;
if (update_tab) {
@@ -1581,7 +1684,13 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
}
}
- return mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
+ err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
+ if (err)
+ return err;
+
+ mana_fence_rqs(apc);
+
+ return 0;
}
static int mana_init_port(struct net_device *ndev)
@@ -1650,6 +1759,8 @@ int mana_alloc_queues(struct net_device *ndev)
if (err)
goto destroy_vport;
+ mana_chn_setxdp(apc, mana_xdp_get(apc));
+
return 0;
destroy_vport:
@@ -1698,6 +1809,8 @@ static int mana_dealloc_queues(struct net_device *ndev)
if (apc->port_is_up)
return -EINVAL;
+ mana_chn_setxdp(apc, NULL);
+
/* No packet can be transmitted now since apc->port_is_up is false.
* There is still a tiny chance that mana_poll_tx_cq() can re-enable
* a txq because it may not timely see apc->port_is_up being cleared
@@ -1724,9 +1837,6 @@ static int mana_dealloc_queues(struct net_device *ndev)
return err;
}
- /* TODO: Implement RX fencing */
- ssleep(1);
-
mana_destroy_vport(apc);
return 0;
diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile
index 722c27694b21..41b34a509308 100644
--- a/drivers/net/ethernet/mscc/Makefile
+++ b/drivers/net/ethernet/mscc/Makefile
@@ -7,9 +7,11 @@ mscc_ocelot_switch_lib-y := \
ocelot_vcap.o \
ocelot_flower.o \
ocelot_ptp.o \
- ocelot_devlink.o
+ ocelot_devlink.o \
+ vsc7514_regs.o
mscc_ocelot_switch_lib-$(CONFIG_BRIDGE_MRP) += ocelot_mrp.o
obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot.o
mscc_ocelot-y := \
+ ocelot_fdma.o \
ocelot_vsc7514.o \
ocelot_net.o
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 1e4ad953cffb..455293aa6343 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -61,9 +61,9 @@ static void ocelot_mact_select(struct ocelot *ocelot,
}
-int ocelot_mact_learn(struct ocelot *ocelot, int port,
- const unsigned char mac[ETH_ALEN],
- unsigned int vid, enum macaccess_entry_type type)
+static int __ocelot_mact_learn(struct ocelot *ocelot, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid, enum macaccess_entry_type type)
{
u32 cmd = ANA_TABLES_MACACCESS_VALID |
ANA_TABLES_MACACCESS_DEST_IDX(port) |
@@ -83,8 +83,6 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
if (mc_ports & BIT(ocelot->num_phys_ports))
cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY;
- mutex_lock(&ocelot->mact_lock);
-
ocelot_mact_select(ocelot, mac, vid);
/* Issue a write command */
@@ -92,9 +90,20 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
err = ocelot_mact_wait_for_completion(ocelot);
+ return err;
+}
+
+int ocelot_mact_learn(struct ocelot *ocelot, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid, enum macaccess_entry_type type)
+{
+ int ret;
+
+ mutex_lock(&ocelot->mact_lock);
+ ret = __ocelot_mact_learn(ocelot, port, mac, vid, type);
mutex_unlock(&ocelot->mact_lock);
- return err;
+ return ret;
}
EXPORT_SYMBOL(ocelot_mact_learn);
@@ -120,6 +129,66 @@ int ocelot_mact_forget(struct ocelot *ocelot,
}
EXPORT_SYMBOL(ocelot_mact_forget);
+int ocelot_mact_lookup(struct ocelot *ocelot, int *dst_idx,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid, enum macaccess_entry_type *type)
+{
+ int val;
+
+ mutex_lock(&ocelot->mact_lock);
+
+ ocelot_mact_select(ocelot, mac, vid);
+
+ /* Issue a read command with MACACCESS_VALID=1. */
+ ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID |
+ ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_READ),
+ ANA_TABLES_MACACCESS);
+
+ if (ocelot_mact_wait_for_completion(ocelot)) {
+ mutex_unlock(&ocelot->mact_lock);
+ return -ETIMEDOUT;
+ }
+
+ /* Read back the entry flags */
+ val = ocelot_read(ocelot, ANA_TABLES_MACACCESS);
+
+ mutex_unlock(&ocelot->mact_lock);
+
+ if (!(val & ANA_TABLES_MACACCESS_VALID))
+ return -ENOENT;
+
+ *dst_idx = ANA_TABLES_MACACCESS_DEST_IDX_X(val);
+ *type = ANA_TABLES_MACACCESS_ENTRYTYPE_X(val);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_mact_lookup);
+
+int ocelot_mact_learn_streamdata(struct ocelot *ocelot, int dst_idx,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type,
+ int sfid, int ssid)
+{
+ int ret;
+
+ mutex_lock(&ocelot->mact_lock);
+
+ ocelot_write(ocelot,
+ (sfid < 0 ? 0 : ANA_TABLES_STREAMDATA_SFID_VALID) |
+ ANA_TABLES_STREAMDATA_SFID(sfid) |
+ (ssid < 0 ? 0 : ANA_TABLES_STREAMDATA_SSID_VALID) |
+ ANA_TABLES_STREAMDATA_SSID(ssid),
+ ANA_TABLES_STREAMDATA);
+
+ ret = __ocelot_mact_learn(ocelot, dst_idx, mac, vid, type);
+
+ mutex_unlock(&ocelot->mact_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(ocelot_mact_learn_streamdata);
+
static void ocelot_mact_init(struct ocelot *ocelot)
{
/* Configure the learning mode entries attributes:
@@ -594,9 +663,17 @@ void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port,
struct ocelot_port *ocelot_port = ocelot->ports[port];
int err;
+ ocelot_port->speed = SPEED_UNKNOWN;
+
ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA,
DEV_MAC_ENA_CFG);
+ if (ocelot->ops->cut_through_fwd) {
+ mutex_lock(&ocelot->fwd_domain_lock);
+ ocelot->ops->cut_through_fwd(ocelot);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+ }
+
ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0);
err = ocelot_port_flush(ocelot, port);
@@ -628,6 +705,8 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
int mac_speed, mode = 0;
u32 mac_fc_cfg;
+ ocelot_port->speed = speed;
+
/* The MAC might be integrated in systems where the MAC speed is fixed
* and it's the PCS who is performing the rate adaptation, so we have
* to write "1000Mbps" into the LINK_SPEED field of DEV_CLOCK_CFG
@@ -692,7 +771,10 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
- ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, tx_pause);
+ /* Don't attempt to send PAUSE frames on the NPI port, it's broken */
+ if (port != ocelot->npi)
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA,
+ tx_pause);
/* Undo the effects of ocelot_phylink_mac_link_down:
* enable MAC module
@@ -700,6 +782,15 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
+ /* If the port supports cut-through forwarding, update the masks before
+ * enabling forwarding on the port.
+ */
+ if (ocelot->ops->cut_through_fwd) {
+ mutex_lock(&ocelot->fwd_domain_lock);
+ ocelot->ops->cut_through_fwd(ocelot);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+ }
+
/* Core: Enable port for frame transfer */
ocelot_fields_write(ocelot, port,
QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
@@ -966,14 +1057,34 @@ static int ocelot_xtr_poll_xfh(struct ocelot *ocelot, int grp, u32 *xfh)
return 0;
}
-int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
+void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb,
+ u64 timestamp)
{
struct skb_shared_hwtstamps *shhwtstamps;
u64 tod_in_ns, full_ts_in_ns;
+ struct timespec64 ts;
+
+ ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
+
+ tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
+ if ((tod_in_ns & 0xffffffff) < timestamp)
+ full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) |
+ timestamp;
+ else
+ full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) |
+ timestamp;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamps->hwtstamp = full_ts_in_ns;
+}
+EXPORT_SYMBOL(ocelot_ptp_rx_timestamp);
+
+int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
+{
u64 timestamp, src_port, len;
u32 xfh[OCELOT_TAG_LEN / 4];
struct net_device *dev;
- struct timespec64 ts;
struct sk_buff *skb;
int sz, buf_len;
u32 val, *buf;
@@ -1029,21 +1140,8 @@ int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
*buf = val;
}
- if (ocelot->ptp) {
- ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
-
- tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
- if ((tod_in_ns & 0xffffffff) < timestamp)
- full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) |
- timestamp;
- else
- full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) |
- timestamp;
-
- shhwtstamps = skb_hwtstamps(skb);
- memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
- shhwtstamps->hwtstamp = full_ts_in_ns;
- }
+ if (ocelot->ptp)
+ ocelot_ptp_rx_timestamp(ocelot, skb, timestamp);
/* Everything we see on an interface that is in the HW bridge
* has already been forwarded.
@@ -1076,6 +1174,18 @@ bool ocelot_can_inject(struct ocelot *ocelot, int grp)
}
EXPORT_SYMBOL(ocelot_can_inject);
+void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag)
+{
+ ocelot_ifh_set_bypass(ifh, 1);
+ ocelot_ifh_set_dest(ifh, BIT_ULL(port));
+ ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C);
+ if (vlan_tag)
+ ocelot_ifh_set_vlan_tci(ifh, vlan_tag);
+ if (rew_op)
+ ocelot_ifh_set_rew_op(ifh, rew_op);
+}
+EXPORT_SYMBOL(ocelot_ifh_port_set);
+
void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
u32 rew_op, struct sk_buff *skb)
{
@@ -1085,11 +1195,7 @@ void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
- ocelot_ifh_set_bypass(ifh, 1);
- ocelot_ifh_set_dest(ifh, BIT_ULL(port));
- ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C);
- ocelot_ifh_set_vlan_tci(ifh, skb_vlan_tag_get(skb));
- ocelot_ifh_set_rew_op(ifh, rew_op);
+ ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb));
for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp);
@@ -1238,6 +1344,43 @@ static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col,
return 0;
}
+int ocelot_mact_flush(struct ocelot *ocelot, int port)
+{
+ int err;
+
+ mutex_lock(&ocelot->mact_lock);
+
+ /* Program ageing filter for a single port */
+ ocelot_write(ocelot, ANA_ANAGEFIL_PID_EN | ANA_ANAGEFIL_PID_VAL(port),
+ ANA_ANAGEFIL);
+
+ /* Flushing dynamic FDB entries requires two successive age scans */
+ ocelot_write(ocelot,
+ ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_AGE),
+ ANA_TABLES_MACACCESS);
+
+ err = ocelot_mact_wait_for_completion(ocelot);
+ if (err) {
+ mutex_unlock(&ocelot->mact_lock);
+ return err;
+ }
+
+ /* And second... */
+ ocelot_write(ocelot,
+ ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_AGE),
+ ANA_TABLES_MACACCESS);
+
+ err = ocelot_mact_wait_for_completion(ocelot);
+
+ /* Restore ageing filter */
+ ocelot_write(ocelot, 0, ANA_ANAGEFIL);
+
+ mutex_unlock(&ocelot->mact_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ocelot_mact_flush);
+
int ocelot_fdb_dump(struct ocelot *ocelot, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
@@ -1514,10 +1657,6 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
- /* reserved for future extensions */
- if (cfg.flags)
- return -EINVAL;
-
/* Tx type sanity check */
switch (cfg.tx_type) {
case HWTSTAMP_TX_ON:
@@ -1688,8 +1827,7 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL(ocelot_get_ts_info);
-static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond,
- bool only_active_ports)
+static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond)
{
u32 mask = 0;
int port;
@@ -1700,26 +1838,25 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond,
if (!ocelot_port)
continue;
- if (ocelot_port->bond == bond) {
- if (only_active_ports && !ocelot_port->lag_tx_active)
- continue;
-
+ if (ocelot_port->bond == bond)
mask |= BIT(port);
- }
}
return mask;
}
-static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port,
- struct net_device *bridge)
+u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port)
{
struct ocelot_port *ocelot_port = ocelot->ports[src_port];
+ const struct net_device *bridge;
u32 mask = 0;
int port;
- if (!ocelot_port || ocelot_port->bridge != bridge ||
- ocelot_port->stp_state != BR_STATE_FORWARDING)
+ if (!ocelot_port || ocelot_port->stp_state != BR_STATE_FORWARDING)
+ return 0;
+
+ bridge = ocelot_port->bridge;
+ if (!bridge)
return 0;
for (port = 0; port < ocelot->num_phys_ports; port++) {
@@ -1735,8 +1872,9 @@ static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port,
return mask;
}
+EXPORT_SYMBOL_GPL(ocelot_get_bridge_fwd_mask);
-static u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot)
+u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot)
{
u32 mask = 0;
int port;
@@ -1753,12 +1891,22 @@ static u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot)
return mask;
}
+EXPORT_SYMBOL_GPL(ocelot_get_dsa_8021q_cpu_mask);
-void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
+void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining)
{
unsigned long cpu_fwd_mask;
int port;
+ lockdep_assert_held(&ocelot->fwd_domain_lock);
+
+ /* If cut-through forwarding is supported, update the masks before a
+ * port joins the forwarding domain, to avoid potential underruns if it
+ * has the highest speed from the new domain.
+ */
+ if (joining && ocelot->ops->cut_through_fwd)
+ ocelot->ops->cut_through_fwd(ocelot);
+
/* If a DSA tag_8021q CPU exists, it needs to be included in the
* regular forwarding path of the front ports regardless of whether
* those are bridged or standalone.
@@ -1786,16 +1934,13 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
mask = GENMASK(ocelot->num_phys_ports - 1, 0);
mask &= ~cpu_fwd_mask;
} else if (ocelot_port->bridge) {
- struct net_device *bridge = ocelot_port->bridge;
struct net_device *bond = ocelot_port->bond;
- mask = ocelot_get_bridge_fwd_mask(ocelot, port, bridge);
+ mask = ocelot_get_bridge_fwd_mask(ocelot, port);
mask |= cpu_fwd_mask;
mask &= ~BIT(port);
- if (bond) {
- mask &= ~ocelot_get_bond_mask(ocelot, bond,
- false);
- }
+ if (bond)
+ mask &= ~ocelot_get_bond_mask(ocelot, bond);
} else {
/* Standalone ports forward only to DSA tag_8021q CPU
* ports (if those exist), or to the hardware CPU port
@@ -1806,6 +1951,16 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
ocelot_write_rix(ocelot, mask, ANA_PGID_PGID, PGID_SRC + port);
}
+
+ /* If cut-through forwarding is supported and a port is leaving, there
+ * is a chance that cut-through was disabled on the other ports due to
+ * the port which is leaving (it has a higher link speed). We need to
+ * update the cut-through masks of the remaining ports no earlier than
+ * after the port has left, to prevent underruns from happening between
+ * the cut-through update and the forwarding domain update.
+ */
+ if (!joining && ocelot->ops->cut_through_fwd)
+ ocelot->ops->cut_through_fwd(ocelot);
}
EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask);
@@ -1814,6 +1969,8 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
struct ocelot_port *ocelot_port = ocelot->ports[port];
u32 learn_ena = 0;
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot_port->stp_state = state;
if ((state == BR_STATE_LEARNING || state == BR_STATE_FORWARDING) &&
@@ -1823,7 +1980,9 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
ocelot_rmw_gix(ocelot, learn_ena, ANA_PORT_PORT_CFG_LEARN_ENA,
ANA_PORT_PORT_CFG, port);
- ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot, state == BR_STATE_FORWARDING);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_bridge_stp_state_set);
@@ -2053,9 +2212,13 @@ void ocelot_port_bridge_join(struct ocelot *ocelot, int port,
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot_port->bridge = bridge;
- ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot, true);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_port_bridge_join);
@@ -2064,11 +2227,15 @@ void ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot_port->bridge = NULL;
ocelot_port_set_pvid(ocelot, port, NULL);
ocelot_port_manage_port_tag(ocelot, port);
- ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot, false);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_port_bridge_leave);
@@ -2112,13 +2279,17 @@ static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
if (!bond || (visited & BIT(lag)))
continue;
- bond_mask = ocelot_get_bond_mask(ocelot, bond, true);
+ bond_mask = ocelot_get_bond_mask(ocelot, bond);
for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
// Destination mask
ocelot_write_rix(ocelot, bond_mask,
ANA_PGID_PGID, port);
- aggr_idx[num_active_ports++] = port;
+
+ if (ocelot_port->lag_tx_active)
+ aggr_idx[num_active_ports++] = port;
}
for_each_aggr_pgid(ocelot, i) {
@@ -2167,8 +2338,7 @@ static void ocelot_setup_logical_port_ids(struct ocelot *ocelot)
bond = ocelot_port->bond;
if (bond) {
- int lag = __ffs(ocelot_get_bond_mask(ocelot, bond,
- false));
+ int lag = __ffs(ocelot_get_bond_mask(ocelot, bond));
ocelot_rmw_gix(ocelot,
ANA_PORT_PORT_CFG_PORTID_VAL(lag),
@@ -2190,12 +2360,16 @@ int ocelot_port_lag_join(struct ocelot *ocelot, int port,
if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
return -EOPNOTSUPP;
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot->ports[port]->bond = bond;
ocelot_setup_logical_port_ids(ocelot);
- ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot, true);
ocelot_set_aggr_pgids(ocelot);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+
return 0;
}
EXPORT_SYMBOL(ocelot_port_lag_join);
@@ -2203,11 +2377,15 @@ EXPORT_SYMBOL(ocelot_port_lag_join);
void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
struct net_device *bond)
{
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot->ports[port]->bond = NULL;
ocelot_setup_logical_port_ids(ocelot);
- ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot, false);
ocelot_set_aggr_pgids(ocelot);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_port_lag_leave);
@@ -2498,6 +2676,7 @@ int ocelot_init(struct ocelot *ocelot)
mutex_init(&ocelot->stats_lock);
mutex_init(&ocelot->ptp_lock);
mutex_init(&ocelot->mact_lock);
+ mutex_init(&ocelot->fwd_domain_lock);
spin_lock_init(&ocelot->ptp_clock_lock);
spin_lock_init(&ocelot->ts_id_lock);
snprintf(queue_name, sizeof(queue_name), "%s-stats",
@@ -2521,6 +2700,9 @@ int ocelot_init(struct ocelot *ocelot)
ocelot_vcap_init(ocelot);
ocelot_cpu_port_init(ocelot);
+ if (ocelot->ops->psfp_init)
+ ocelot->ops->psfp_init(ocelot);
+
for (port = 0; port < ocelot->num_phys_ports; port++) {
/* Clear all counters (5 groups) */
ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port) |
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index e43da09b8f91..bf4eff6d7086 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -32,6 +32,8 @@
#define OCELOT_PTP_QUEUE_SZ 128
+#define OCELOT_JUMBO_MTU 9000
+
struct ocelot_port_tc {
bool block_shared;
unsigned long offload_cnt;
@@ -55,19 +57,6 @@ struct ocelot_dump_ctx {
int idx;
};
-/* MAC table entry types.
- * ENTRYTYPE_NORMAL is subject to aging.
- * ENTRYTYPE_LOCKED is not subject to aging.
- * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast.
- * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast.
- */
-enum macaccess_entry_type {
- ENTRYTYPE_NORMAL = 0,
- ENTRYTYPE_LOCKED,
- ENTRYTYPE_MACv4,
- ENTRYTYPE_MACv6,
-};
-
/* A (PGID) port mask structure, encoding the 2^ocelot->num_phys_ports
* possibilities of egress port masks for L2 multicast traffic.
* For a switch with 9 user ports, there are 512 possible port masks, but the
diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.c b/drivers/net/ethernet/mscc/ocelot_fdma.c
new file mode 100644
index 000000000000..dffa597bffe6
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_fdma.c
@@ -0,0 +1,894 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Microsemi SoCs FDMA driver
+ *
+ * Copyright (c) 2021 Microchip
+ *
+ * Page recycling code is mostly taken from gianfar driver.
+ */
+
+#include <linux/align.h>
+#include <linux/bitops.h>
+#include <linux/dmapool.h>
+#include <linux/dsa/ocelot.h>
+#include <linux/netdevice.h>
+#include <linux/of_platform.h>
+#include <linux/skbuff.h>
+
+#include "ocelot_fdma.h"
+#include "ocelot_qs.h"
+
+DEFINE_STATIC_KEY_FALSE(ocelot_fdma_enabled);
+
+static void ocelot_fdma_writel(struct ocelot *ocelot, u32 reg, u32 data)
+{
+ regmap_write(ocelot->targets[FDMA], reg, data);
+}
+
+static u32 ocelot_fdma_readl(struct ocelot *ocelot, u32 reg)
+{
+ u32 retval;
+
+ regmap_read(ocelot->targets[FDMA], reg, &retval);
+
+ return retval;
+}
+
+static dma_addr_t ocelot_fdma_idx_dma(dma_addr_t base, u16 idx)
+{
+ return base + idx * sizeof(struct ocelot_fdma_dcb);
+}
+
+static u16 ocelot_fdma_dma_idx(dma_addr_t base, dma_addr_t dma)
+{
+ return (dma - base) / sizeof(struct ocelot_fdma_dcb);
+}
+
+static u16 ocelot_fdma_idx_next(u16 idx, u16 ring_sz)
+{
+ return unlikely(idx == ring_sz - 1) ? 0 : idx + 1;
+}
+
+static u16 ocelot_fdma_idx_prev(u16 idx, u16 ring_sz)
+{
+ return unlikely(idx == 0) ? ring_sz - 1 : idx - 1;
+}
+
+static int ocelot_fdma_rx_ring_free(struct ocelot_fdma *fdma)
+{
+ struct ocelot_fdma_rx_ring *rx_ring = &fdma->rx_ring;
+
+ if (rx_ring->next_to_use >= rx_ring->next_to_clean)
+ return OCELOT_FDMA_RX_RING_SIZE -
+ (rx_ring->next_to_use - rx_ring->next_to_clean) - 1;
+ else
+ return rx_ring->next_to_clean - rx_ring->next_to_use - 1;
+}
+
+static int ocelot_fdma_tx_ring_free(struct ocelot_fdma *fdma)
+{
+ struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring;
+
+ if (tx_ring->next_to_use >= tx_ring->next_to_clean)
+ return OCELOT_FDMA_TX_RING_SIZE -
+ (tx_ring->next_to_use - tx_ring->next_to_clean) - 1;
+ else
+ return tx_ring->next_to_clean - tx_ring->next_to_use - 1;
+}
+
+static bool ocelot_fdma_tx_ring_empty(struct ocelot_fdma *fdma)
+{
+ struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring;
+
+ return tx_ring->next_to_clean == tx_ring->next_to_use;
+}
+
+static void ocelot_fdma_activate_chan(struct ocelot *ocelot, dma_addr_t dma,
+ int chan)
+{
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_DCB_LLP(chan), dma);
+ /* Barrier to force memory writes to DCB to be completed before starting
+ * the channel.
+ */
+ wmb();
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_ACTIVATE, BIT(chan));
+}
+
+static int ocelot_fdma_wait_chan_safe(struct ocelot *ocelot, int chan)
+{
+ unsigned long timeout;
+ u32 safe;
+
+ timeout = jiffies + usecs_to_jiffies(OCELOT_FDMA_CH_SAFE_TIMEOUT_US);
+ do {
+ safe = ocelot_fdma_readl(ocelot, MSCC_FDMA_CH_SAFE);
+ if (safe & BIT(chan))
+ return 0;
+ } while (time_after(jiffies, timeout));
+
+ return -ETIMEDOUT;
+}
+
+static void ocelot_fdma_dcb_set_data(struct ocelot_fdma_dcb *dcb,
+ dma_addr_t dma_addr,
+ size_t size)
+{
+ u32 offset = dma_addr & 0x3;
+
+ dcb->llp = 0;
+ dcb->datap = ALIGN_DOWN(dma_addr, 4);
+ dcb->datal = ALIGN_DOWN(size, 4);
+ dcb->stat = MSCC_FDMA_DCB_STAT_BLOCKO(offset);
+}
+
+static bool ocelot_fdma_rx_alloc_page(struct ocelot *ocelot,
+ struct ocelot_fdma_rx_buf *rxb)
+{
+ dma_addr_t mapping;
+ struct page *page;
+
+ page = dev_alloc_page();
+ if (unlikely(!page))
+ return false;
+
+ mapping = dma_map_page(ocelot->dev, page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ocelot->dev, mapping))) {
+ __free_page(page);
+ return false;
+ }
+
+ rxb->page = page;
+ rxb->page_offset = 0;
+ rxb->dma_addr = mapping;
+
+ return true;
+}
+
+static int ocelot_fdma_alloc_rx_buffs(struct ocelot *ocelot, u16 alloc_cnt)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ struct ocelot_fdma_rx_ring *rx_ring;
+ struct ocelot_fdma_rx_buf *rxb;
+ struct ocelot_fdma_dcb *dcb;
+ dma_addr_t dma_addr;
+ int ret = 0;
+ u16 idx;
+
+ rx_ring = &fdma->rx_ring;
+ idx = rx_ring->next_to_use;
+
+ while (alloc_cnt--) {
+ rxb = &rx_ring->bufs[idx];
+ /* try reuse page */
+ if (unlikely(!rxb->page)) {
+ if (unlikely(!ocelot_fdma_rx_alloc_page(ocelot, rxb))) {
+ dev_err_ratelimited(ocelot->dev,
+ "Failed to allocate rx\n");
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ dcb = &rx_ring->dcbs[idx];
+ dma_addr = rxb->dma_addr + rxb->page_offset;
+ ocelot_fdma_dcb_set_data(dcb, dma_addr, OCELOT_FDMA_RXB_SIZE);
+
+ idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
+ /* Chain the DCB to the next one */
+ dcb->llp = ocelot_fdma_idx_dma(rx_ring->dcbs_dma, idx);
+ }
+
+ rx_ring->next_to_use = idx;
+ rx_ring->next_to_alloc = idx;
+
+ return ret;
+}
+
+static bool ocelot_fdma_tx_dcb_set_skb(struct ocelot *ocelot,
+ struct ocelot_fdma_tx_buf *tx_buf,
+ struct ocelot_fdma_dcb *dcb,
+ struct sk_buff *skb)
+{
+ dma_addr_t mapping;
+
+ mapping = dma_map_single(ocelot->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(ocelot->dev, mapping)))
+ return false;
+
+ dma_unmap_addr_set(tx_buf, dma_addr, mapping);
+
+ ocelot_fdma_dcb_set_data(dcb, mapping, OCELOT_FDMA_RX_SIZE);
+ tx_buf->skb = skb;
+ dcb->stat |= MSCC_FDMA_DCB_STAT_BLOCKL(skb->len);
+ dcb->stat |= MSCC_FDMA_DCB_STAT_SOF | MSCC_FDMA_DCB_STAT_EOF;
+
+ return true;
+}
+
+static bool ocelot_fdma_check_stop_rx(struct ocelot *ocelot)
+{
+ u32 llp;
+
+ /* Check if the FDMA hits the DCB with LLP == NULL */
+ llp = ocelot_fdma_readl(ocelot, MSCC_FDMA_DCB_LLP(MSCC_FDMA_XTR_CHAN));
+ if (unlikely(llp))
+ return false;
+
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_DISABLE,
+ BIT(MSCC_FDMA_XTR_CHAN));
+
+ return true;
+}
+
+static void ocelot_fdma_rx_set_llp(struct ocelot_fdma_rx_ring *rx_ring)
+{
+ struct ocelot_fdma_dcb *dcb;
+ unsigned int idx;
+
+ idx = ocelot_fdma_idx_prev(rx_ring->next_to_use,
+ OCELOT_FDMA_RX_RING_SIZE);
+ dcb = &rx_ring->dcbs[idx];
+ dcb->llp = 0;
+}
+
+static void ocelot_fdma_rx_restart(struct ocelot *ocelot)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ struct ocelot_fdma_rx_ring *rx_ring;
+ const u8 chan = MSCC_FDMA_XTR_CHAN;
+ dma_addr_t new_llp, dma_base;
+ unsigned int idx;
+ u32 llp_prev;
+ int ret;
+
+ rx_ring = &fdma->rx_ring;
+ ret = ocelot_fdma_wait_chan_safe(ocelot, chan);
+ if (ret) {
+ dev_err_ratelimited(ocelot->dev,
+ "Unable to stop RX channel\n");
+ return;
+ }
+
+ ocelot_fdma_rx_set_llp(rx_ring);
+
+ /* FDMA stopped on the last DCB that contained a NULL LLP, since
+ * we processed some DCBs in RX, there is free space, and we must set
+ * DCB_LLP to point to the next DCB
+ */
+ llp_prev = ocelot_fdma_readl(ocelot, MSCC_FDMA_DCB_LLP_PREV(chan));
+ dma_base = rx_ring->dcbs_dma;
+
+ /* Get the next DMA addr located after LLP == NULL DCB */
+ idx = ocelot_fdma_dma_idx(dma_base, llp_prev);
+ idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
+ new_llp = ocelot_fdma_idx_dma(dma_base, idx);
+
+ /* Finally reactivate the channel */
+ ocelot_fdma_activate_chan(ocelot, new_llp, chan);
+}
+
+static bool ocelot_fdma_add_rx_frag(struct ocelot_fdma_rx_buf *rxb, u32 stat,
+ struct sk_buff *skb, bool first)
+{
+ int size = MSCC_FDMA_DCB_STAT_BLOCKL(stat);
+ struct page *page = rxb->page;
+
+ if (likely(first)) {
+ skb_put(skb, size);
+ } else {
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rxb->page_offset, size, OCELOT_FDMA_RX_SIZE);
+ }
+
+ /* Try to reuse page */
+ if (unlikely(page_ref_count(page) != 1 || page_is_pfmemalloc(page)))
+ return false;
+
+ /* Change offset to the other half */
+ rxb->page_offset ^= OCELOT_FDMA_RX_SIZE;
+
+ page_ref_inc(page);
+
+ return true;
+}
+
+static void ocelot_fdma_reuse_rx_page(struct ocelot *ocelot,
+ struct ocelot_fdma_rx_buf *old_rxb)
+{
+ struct ocelot_fdma_rx_ring *rx_ring = &ocelot->fdma->rx_ring;
+ struct ocelot_fdma_rx_buf *new_rxb;
+
+ new_rxb = &rx_ring->bufs[rx_ring->next_to_alloc];
+ rx_ring->next_to_alloc = ocelot_fdma_idx_next(rx_ring->next_to_alloc,
+ OCELOT_FDMA_RX_RING_SIZE);
+
+ /* Copy page reference */
+ *new_rxb = *old_rxb;
+
+ /* Sync for use by the device */
+ dma_sync_single_range_for_device(ocelot->dev, old_rxb->dma_addr,
+ old_rxb->page_offset,
+ OCELOT_FDMA_RX_SIZE, DMA_FROM_DEVICE);
+}
+
+static struct sk_buff *ocelot_fdma_get_skb(struct ocelot *ocelot, u32 stat,
+ struct ocelot_fdma_rx_buf *rxb,
+ struct sk_buff *skb)
+{
+ bool first = false;
+
+ /* Allocate skb head and data */
+ if (likely(!skb)) {
+ void *buff_addr = page_address(rxb->page) +
+ rxb->page_offset;
+
+ skb = build_skb(buff_addr, OCELOT_FDMA_SKBFRAG_SIZE);
+ if (unlikely(!skb)) {
+ dev_err_ratelimited(ocelot->dev,
+ "build_skb failed !\n");
+ return NULL;
+ }
+ first = true;
+ }
+
+ dma_sync_single_range_for_cpu(ocelot->dev, rxb->dma_addr,
+ rxb->page_offset, OCELOT_FDMA_RX_SIZE,
+ DMA_FROM_DEVICE);
+
+ if (ocelot_fdma_add_rx_frag(rxb, stat, skb, first)) {
+ /* Reuse the free half of the page for the next_to_alloc DCB*/
+ ocelot_fdma_reuse_rx_page(ocelot, rxb);
+ } else {
+ /* page cannot be reused, unmap it */
+ dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ }
+
+ /* clear rx buff content */
+ rxb->page = NULL;
+
+ return skb;
+}
+
+static bool ocelot_fdma_receive_skb(struct ocelot *ocelot, struct sk_buff *skb)
+{
+ struct net_device *ndev;
+ void *xfh = skb->data;
+ u64 timestamp;
+ u64 src_port;
+
+ skb_pull(skb, OCELOT_TAG_LEN);
+
+ ocelot_xfh_get_src_port(xfh, &src_port);
+ if (unlikely(src_port >= ocelot->num_phys_ports))
+ return false;
+
+ ndev = ocelot_port_to_netdev(ocelot, src_port);
+ if (unlikely(!ndev))
+ return false;
+
+ pskb_trim(skb, skb->len - ETH_FCS_LEN);
+
+ skb->dev = ndev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ skb->dev->stats.rx_bytes += skb->len;
+ skb->dev->stats.rx_packets++;
+
+ if (ocelot->ptp) {
+ ocelot_xfh_get_rew_val(xfh, &timestamp);
+ ocelot_ptp_rx_timestamp(ocelot, skb, timestamp);
+ }
+
+ if (likely(!skb_defer_rx_timestamp(skb)))
+ netif_receive_skb(skb);
+
+ return true;
+}
+
+static int ocelot_fdma_rx_get(struct ocelot *ocelot, int budget)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ struct ocelot_fdma_rx_ring *rx_ring;
+ struct ocelot_fdma_rx_buf *rxb;
+ struct ocelot_fdma_dcb *dcb;
+ struct sk_buff *skb;
+ int work_done = 0;
+ int cleaned_cnt;
+ u32 stat;
+ u16 idx;
+
+ cleaned_cnt = ocelot_fdma_rx_ring_free(fdma);
+ rx_ring = &fdma->rx_ring;
+ skb = rx_ring->skb;
+
+ while (budget--) {
+ idx = rx_ring->next_to_clean;
+ dcb = &rx_ring->dcbs[idx];
+ stat = dcb->stat;
+ if (MSCC_FDMA_DCB_STAT_BLOCKL(stat) == 0)
+ break;
+
+ /* New packet is a start of frame but we already got a skb set,
+ * we probably lost an EOF packet, free skb
+ */
+ if (unlikely(skb && (stat & MSCC_FDMA_DCB_STAT_SOF))) {
+ dev_kfree_skb(skb);
+ skb = NULL;
+ }
+
+ rxb = &rx_ring->bufs[idx];
+ /* Fetch next to clean buffer from the rx_ring */
+ skb = ocelot_fdma_get_skb(ocelot, stat, rxb, skb);
+ if (unlikely(!skb))
+ break;
+
+ work_done++;
+ cleaned_cnt++;
+
+ idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
+ rx_ring->next_to_clean = idx;
+
+ if (unlikely(stat & MSCC_FDMA_DCB_STAT_ABORT ||
+ stat & MSCC_FDMA_DCB_STAT_PD)) {
+ dev_err_ratelimited(ocelot->dev,
+ "DCB aborted or pruned\n");
+ dev_kfree_skb(skb);
+ skb = NULL;
+ continue;
+ }
+
+ /* We still need to process the other fragment of the packet
+ * before delivering it to the network stack
+ */
+ if (!(stat & MSCC_FDMA_DCB_STAT_EOF))
+ continue;
+
+ if (unlikely(!ocelot_fdma_receive_skb(ocelot, skb)))
+ dev_kfree_skb(skb);
+
+ skb = NULL;
+ }
+
+ rx_ring->skb = skb;
+
+ if (cleaned_cnt)
+ ocelot_fdma_alloc_rx_buffs(ocelot, cleaned_cnt);
+
+ return work_done;
+}
+
+static void ocelot_fdma_wakeup_netdev(struct ocelot *ocelot)
+{
+ struct ocelot_port_private *priv;
+ struct ocelot_port *ocelot_port;
+ struct net_device *dev;
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ ocelot_port = ocelot->ports[port];
+ if (!ocelot_port)
+ continue;
+ priv = container_of(ocelot_port, struct ocelot_port_private,
+ port);
+ dev = priv->dev;
+
+ if (unlikely(netif_queue_stopped(dev)))
+ netif_wake_queue(dev);
+ }
+}
+
+static void ocelot_fdma_tx_cleanup(struct ocelot *ocelot, int budget)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ struct ocelot_fdma_tx_ring *tx_ring;
+ struct ocelot_fdma_tx_buf *buf;
+ unsigned int new_null_llp_idx;
+ struct ocelot_fdma_dcb *dcb;
+ bool end_of_list = false;
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ u32 dcb_llp;
+ u16 ntc;
+ int ret;
+
+ tx_ring = &fdma->tx_ring;
+
+ /* Purge the TX packets that have been sent up to the NULL llp or the
+ * end of done list.
+ */
+ while (!ocelot_fdma_tx_ring_empty(fdma)) {
+ ntc = tx_ring->next_to_clean;
+ dcb = &tx_ring->dcbs[ntc];
+ if (!(dcb->stat & MSCC_FDMA_DCB_STAT_PD))
+ break;
+
+ buf = &tx_ring->bufs[ntc];
+ skb = buf->skb;
+ dma_unmap_single(ocelot->dev, dma_unmap_addr(buf, dma_addr),
+ skb->len, DMA_TO_DEVICE);
+ napi_consume_skb(skb, budget);
+ dcb_llp = dcb->llp;
+
+ /* Only update after accessing all dcb fields */
+ tx_ring->next_to_clean = ocelot_fdma_idx_next(ntc,
+ OCELOT_FDMA_TX_RING_SIZE);
+
+ /* If we hit the NULL LLP, stop, we might need to reload FDMA */
+ if (dcb_llp == 0) {
+ end_of_list = true;
+ break;
+ }
+ }
+
+ /* No need to try to wake if there were no TX cleaned_cnt up. */
+ if (ocelot_fdma_tx_ring_free(fdma))
+ ocelot_fdma_wakeup_netdev(ocelot);
+
+ /* If there is still some DCBs to be processed by the FDMA or if the
+ * pending list is empty, there is no need to restart the FDMA.
+ */
+ if (!end_of_list || ocelot_fdma_tx_ring_empty(fdma))
+ return;
+
+ ret = ocelot_fdma_wait_chan_safe(ocelot, MSCC_FDMA_INJ_CHAN);
+ if (ret) {
+ dev_warn(ocelot->dev,
+ "Failed to wait for TX channel to stop\n");
+ return;
+ }
+
+ /* Set NULL LLP to be the last DCB used */
+ new_null_llp_idx = ocelot_fdma_idx_prev(tx_ring->next_to_use,
+ OCELOT_FDMA_TX_RING_SIZE);
+ dcb = &tx_ring->dcbs[new_null_llp_idx];
+ dcb->llp = 0;
+
+ dma = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, tx_ring->next_to_clean);
+ ocelot_fdma_activate_chan(ocelot, dma, MSCC_FDMA_INJ_CHAN);
+}
+
+static int ocelot_fdma_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct ocelot_fdma *fdma = container_of(napi, struct ocelot_fdma, napi);
+ struct ocelot *ocelot = fdma->ocelot;
+ int work_done = 0;
+ bool rx_stopped;
+
+ ocelot_fdma_tx_cleanup(ocelot, budget);
+
+ rx_stopped = ocelot_fdma_check_stop_rx(ocelot);
+
+ work_done = ocelot_fdma_rx_get(ocelot, budget);
+
+ if (rx_stopped)
+ ocelot_fdma_rx_restart(ocelot);
+
+ if (work_done < budget) {
+ napi_complete_done(&fdma->napi, work_done);
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA,
+ BIT(MSCC_FDMA_INJ_CHAN) |
+ BIT(MSCC_FDMA_XTR_CHAN));
+ }
+
+ return work_done;
+}
+
+static irqreturn_t ocelot_fdma_interrupt(int irq, void *dev_id)
+{
+ u32 ident, llp, frm, err, err_code;
+ struct ocelot *ocelot = dev_id;
+
+ ident = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_IDENT);
+ frm = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_FRM);
+ llp = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_LLP);
+
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP, llp & ident);
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM, frm & ident);
+ if (frm || llp) {
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0);
+ napi_schedule(&ocelot->fdma->napi);
+ }
+
+ err = ocelot_fdma_readl(ocelot, MSCC_FDMA_EVT_ERR);
+ if (unlikely(err)) {
+ err_code = ocelot_fdma_readl(ocelot, MSCC_FDMA_EVT_ERR_CODE);
+ dev_err_ratelimited(ocelot->dev,
+ "Error ! chans mask: %#x, code: %#x\n",
+ err, err_code);
+
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_EVT_ERR, err);
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_EVT_ERR_CODE, err_code);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void ocelot_fdma_send_skb(struct ocelot *ocelot,
+ struct ocelot_fdma *fdma, struct sk_buff *skb)
+{
+ struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring;
+ struct ocelot_fdma_tx_buf *tx_buf;
+ struct ocelot_fdma_dcb *dcb;
+ dma_addr_t dma;
+ u16 next_idx;
+
+ dcb = &tx_ring->dcbs[tx_ring->next_to_use];
+ tx_buf = &tx_ring->bufs[tx_ring->next_to_use];
+ if (!ocelot_fdma_tx_dcb_set_skb(ocelot, tx_buf, dcb, skb)) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ next_idx = ocelot_fdma_idx_next(tx_ring->next_to_use,
+ OCELOT_FDMA_TX_RING_SIZE);
+ skb_tx_timestamp(skb);
+
+ /* If the FDMA TX chan is empty, then enqueue the DCB directly */
+ if (ocelot_fdma_tx_ring_empty(fdma)) {
+ dma = ocelot_fdma_idx_dma(tx_ring->dcbs_dma,
+ tx_ring->next_to_use);
+ ocelot_fdma_activate_chan(ocelot, dma, MSCC_FDMA_INJ_CHAN);
+ } else {
+ /* Chain the DCBs */
+ dcb->llp = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, next_idx);
+ }
+
+ tx_ring->next_to_use = next_idx;
+}
+
+static int ocelot_fdma_prepare_skb(struct ocelot *ocelot, int port, u32 rew_op,
+ struct sk_buff *skb, struct net_device *dev)
+{
+ int needed_headroom = max_t(int, OCELOT_TAG_LEN - skb_headroom(skb), 0);
+ int needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
+ void *ifh;
+ int err;
+
+ if (unlikely(needed_headroom || needed_tailroom ||
+ skb_header_cloned(skb))) {
+ err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
+ GFP_ATOMIC);
+ if (unlikely(err)) {
+ dev_kfree_skb_any(skb);
+ return 1;
+ }
+ }
+
+ err = skb_linearize(skb);
+ if (err) {
+ net_err_ratelimited("%s: skb_linearize error (%d)!\n",
+ dev->name, err);
+ dev_kfree_skb_any(skb);
+ return 1;
+ }
+
+ ifh = skb_push(skb, OCELOT_TAG_LEN);
+ skb_put(skb, ETH_FCS_LEN);
+ memset(ifh, 0, OCELOT_TAG_LEN);
+ ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb));
+
+ return 0;
+}
+
+int ocelot_fdma_inject_frame(struct ocelot *ocelot, int port, u32 rew_op,
+ struct sk_buff *skb, struct net_device *dev)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ int ret = NETDEV_TX_OK;
+
+ spin_lock(&fdma->tx_ring.xmit_lock);
+
+ if (ocelot_fdma_tx_ring_free(fdma) == 0) {
+ netif_stop_queue(dev);
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ if (ocelot_fdma_prepare_skb(ocelot, port, rew_op, skb, dev))
+ goto out;
+
+ ocelot_fdma_send_skb(ocelot, fdma, skb);
+
+out:
+ spin_unlock(&fdma->tx_ring.xmit_lock);
+
+ return ret;
+}
+
+static void ocelot_fdma_free_rx_ring(struct ocelot *ocelot)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ struct ocelot_fdma_rx_ring *rx_ring;
+ struct ocelot_fdma_rx_buf *rxb;
+ u16 idx;
+
+ rx_ring = &fdma->rx_ring;
+ idx = rx_ring->next_to_clean;
+
+ /* Free the pages held in the RX ring */
+ while (idx != rx_ring->next_to_use) {
+ rxb = &rx_ring->bufs[idx];
+ dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ __free_page(rxb->page);
+ idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
+ }
+
+ if (fdma->rx_ring.skb)
+ dev_kfree_skb_any(fdma->rx_ring.skb);
+}
+
+static void ocelot_fdma_free_tx_ring(struct ocelot *ocelot)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ struct ocelot_fdma_tx_ring *tx_ring;
+ struct ocelot_fdma_tx_buf *txb;
+ struct sk_buff *skb;
+ u16 idx;
+
+ tx_ring = &fdma->tx_ring;
+ idx = tx_ring->next_to_clean;
+
+ while (idx != tx_ring->next_to_use) {
+ txb = &tx_ring->bufs[idx];
+ skb = txb->skb;
+ dma_unmap_single(ocelot->dev, dma_unmap_addr(txb, dma_addr),
+ skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_TX_RING_SIZE);
+ }
+}
+
+static int ocelot_fdma_rings_alloc(struct ocelot *ocelot)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ struct ocelot_fdma_dcb *dcbs;
+ unsigned int adjust;
+ dma_addr_t dcbs_dma;
+ int ret;
+
+ /* Create a pool of consistent memory blocks for hardware descriptors */
+ fdma->dcbs_base = dmam_alloc_coherent(ocelot->dev,
+ OCELOT_DCBS_HW_ALLOC_SIZE,
+ &fdma->dcbs_dma_base, GFP_KERNEL);
+ if (!fdma->dcbs_base)
+ return -ENOMEM;
+
+ /* DCBs must be aligned on a 32bit boundary */
+ dcbs = fdma->dcbs_base;
+ dcbs_dma = fdma->dcbs_dma_base;
+ if (!IS_ALIGNED(dcbs_dma, 4)) {
+ adjust = dcbs_dma & 0x3;
+ dcbs_dma = ALIGN(dcbs_dma, 4);
+ dcbs = (void *)dcbs + adjust;
+ }
+
+ /* TX queue */
+ fdma->tx_ring.dcbs = dcbs;
+ fdma->tx_ring.dcbs_dma = dcbs_dma;
+ spin_lock_init(&fdma->tx_ring.xmit_lock);
+
+ /* RX queue */
+ fdma->rx_ring.dcbs = dcbs + OCELOT_FDMA_TX_RING_SIZE;
+ fdma->rx_ring.dcbs_dma = dcbs_dma + OCELOT_FDMA_TX_DCB_SIZE;
+ ret = ocelot_fdma_alloc_rx_buffs(ocelot,
+ ocelot_fdma_tx_ring_free(fdma));
+ if (ret) {
+ ocelot_fdma_free_rx_ring(ocelot);
+ return ret;
+ }
+
+ /* Set the last DCB LLP as NULL, this is normally done when restarting
+ * the RX chan, but this is for the first run
+ */
+ ocelot_fdma_rx_set_llp(&fdma->rx_ring);
+
+ return 0;
+}
+
+void ocelot_fdma_netdev_init(struct ocelot *ocelot, struct net_device *dev)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+
+ dev->needed_headroom = OCELOT_TAG_LEN;
+ dev->needed_tailroom = ETH_FCS_LEN;
+
+ if (fdma->ndev)
+ return;
+
+ fdma->ndev = dev;
+ netif_napi_add(dev, &fdma->napi, ocelot_fdma_napi_poll,
+ OCELOT_FDMA_WEIGHT);
+}
+
+void ocelot_fdma_netdev_deinit(struct ocelot *ocelot, struct net_device *dev)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+
+ if (fdma->ndev == dev) {
+ netif_napi_del(&fdma->napi);
+ fdma->ndev = NULL;
+ }
+}
+
+void ocelot_fdma_init(struct platform_device *pdev, struct ocelot *ocelot)
+{
+ struct device *dev = ocelot->dev;
+ struct ocelot_fdma *fdma;
+ int ret;
+
+ fdma = devm_kzalloc(dev, sizeof(*fdma), GFP_KERNEL);
+ if (!fdma)
+ return;
+
+ ocelot->fdma = fdma;
+ ocelot->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0);
+
+ fdma->ocelot = ocelot;
+ fdma->irq = platform_get_irq_byname(pdev, "fdma");
+ ret = devm_request_irq(dev, fdma->irq, ocelot_fdma_interrupt, 0,
+ dev_name(dev), ocelot);
+ if (ret)
+ goto err_free_fdma;
+
+ ret = ocelot_fdma_rings_alloc(ocelot);
+ if (ret)
+ goto err_free_irq;
+
+ static_branch_enable(&ocelot_fdma_enabled);
+
+ return;
+
+err_free_irq:
+ devm_free_irq(dev, fdma->irq, fdma);
+err_free_fdma:
+ devm_kfree(dev, fdma);
+
+ ocelot->fdma = NULL;
+}
+
+void ocelot_fdma_start(struct ocelot *ocelot)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+
+ /* Reconfigure for extraction and injection using DMA */
+ ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_MODE(2), QS_INJ_GRP_CFG, 0);
+ ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(0), QS_INJ_CTRL, 0);
+
+ ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_MODE(2), QS_XTR_GRP_CFG, 0);
+
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP, 0xffffffff);
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM, 0xffffffff);
+
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP_ENA,
+ BIT(MSCC_FDMA_INJ_CHAN) | BIT(MSCC_FDMA_XTR_CHAN));
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM_ENA,
+ BIT(MSCC_FDMA_XTR_CHAN));
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA,
+ BIT(MSCC_FDMA_INJ_CHAN) | BIT(MSCC_FDMA_XTR_CHAN));
+
+ napi_enable(&fdma->napi);
+
+ ocelot_fdma_activate_chan(ocelot, ocelot->fdma->rx_ring.dcbs_dma,
+ MSCC_FDMA_XTR_CHAN);
+}
+
+void ocelot_fdma_deinit(struct ocelot *ocelot)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0);
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_FORCEDIS,
+ BIT(MSCC_FDMA_XTR_CHAN));
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_FORCEDIS,
+ BIT(MSCC_FDMA_INJ_CHAN));
+ napi_synchronize(&fdma->napi);
+ napi_disable(&fdma->napi);
+
+ ocelot_fdma_free_rx_ring(ocelot);
+ ocelot_fdma_free_tx_ring(ocelot);
+}
diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.h b/drivers/net/ethernet/mscc/ocelot_fdma.h
new file mode 100644
index 000000000000..2fc8e1dd7230
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_fdma.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi SoCs FDMA driver
+ *
+ * Copyright (c) 2021 Microchip
+ */
+#ifndef _MSCC_OCELOT_FDMA_H_
+#define _MSCC_OCELOT_FDMA_H_
+
+#include "ocelot.h"
+
+#define MSCC_FDMA_DCB_STAT_BLOCKO(x) (((x) << 20) & GENMASK(31, 20))
+#define MSCC_FDMA_DCB_STAT_BLOCKO_M GENMASK(31, 20)
+#define MSCC_FDMA_DCB_STAT_BLOCKO_X(x) (((x) & GENMASK(31, 20)) >> 20)
+#define MSCC_FDMA_DCB_STAT_PD BIT(19)
+#define MSCC_FDMA_DCB_STAT_ABORT BIT(18)
+#define MSCC_FDMA_DCB_STAT_EOF BIT(17)
+#define MSCC_FDMA_DCB_STAT_SOF BIT(16)
+#define MSCC_FDMA_DCB_STAT_BLOCKL_M GENMASK(15, 0)
+#define MSCC_FDMA_DCB_STAT_BLOCKL(x) ((x) & GENMASK(15, 0))
+
+#define MSCC_FDMA_DCB_LLP(x) ((x) * 4 + 0x0)
+#define MSCC_FDMA_DCB_LLP_PREV(x) ((x) * 4 + 0xA0)
+#define MSCC_FDMA_CH_SAFE 0xcc
+#define MSCC_FDMA_CH_ACTIVATE 0xd0
+#define MSCC_FDMA_CH_DISABLE 0xd4
+#define MSCC_FDMA_CH_FORCEDIS 0xd8
+#define MSCC_FDMA_EVT_ERR 0x164
+#define MSCC_FDMA_EVT_ERR_CODE 0x168
+#define MSCC_FDMA_INTR_LLP 0x16c
+#define MSCC_FDMA_INTR_LLP_ENA 0x170
+#define MSCC_FDMA_INTR_FRM 0x174
+#define MSCC_FDMA_INTR_FRM_ENA 0x178
+#define MSCC_FDMA_INTR_ENA 0x184
+#define MSCC_FDMA_INTR_IDENT 0x188
+
+#define MSCC_FDMA_INJ_CHAN 2
+#define MSCC_FDMA_XTR_CHAN 0
+
+#define OCELOT_FDMA_WEIGHT 32
+
+#define OCELOT_FDMA_CH_SAFE_TIMEOUT_US 10
+
+#define OCELOT_FDMA_RX_RING_SIZE 512
+#define OCELOT_FDMA_TX_RING_SIZE 128
+
+#define OCELOT_FDMA_RX_DCB_SIZE (OCELOT_FDMA_RX_RING_SIZE * \
+ sizeof(struct ocelot_fdma_dcb))
+#define OCELOT_FDMA_TX_DCB_SIZE (OCELOT_FDMA_TX_RING_SIZE * \
+ sizeof(struct ocelot_fdma_dcb))
+/* +4 allows for word alignment after allocation */
+#define OCELOT_DCBS_HW_ALLOC_SIZE (OCELOT_FDMA_RX_DCB_SIZE + \
+ OCELOT_FDMA_TX_DCB_SIZE + \
+ 4)
+
+#define OCELOT_FDMA_RX_SIZE (PAGE_SIZE / 2)
+
+#define OCELOT_FDMA_SKBFRAG_OVR (4 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define OCELOT_FDMA_RXB_SIZE ALIGN_DOWN(OCELOT_FDMA_RX_SIZE - OCELOT_FDMA_SKBFRAG_OVR, 4)
+#define OCELOT_FDMA_SKBFRAG_SIZE (OCELOT_FDMA_RXB_SIZE + OCELOT_FDMA_SKBFRAG_OVR)
+
+DECLARE_STATIC_KEY_FALSE(ocelot_fdma_enabled);
+
+struct ocelot_fdma_dcb {
+ u32 llp;
+ u32 datap;
+ u32 datal;
+ u32 stat;
+} __packed;
+
+/**
+ * struct ocelot_fdma_tx_buf - TX buffer structure
+ * @skb: SKB currently used in the corresponding DCB.
+ * @dma_addr: SKB DMA mapped address.
+ */
+struct ocelot_fdma_tx_buf {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+};
+
+/**
+ * struct ocelot_fdma_tx_ring - TX ring description of DCBs
+ *
+ * @dcbs: DCBs allocated for the ring
+ * @dcbs_dma: DMA base address of the DCBs
+ * @bufs: List of TX buffer associated to the DCBs
+ * @xmit_lock: lock for concurrent xmit access
+ * @next_to_clean: Next DCB to be cleaned in tx_cleanup
+ * @next_to_use: Next available DCB to send SKB
+ */
+struct ocelot_fdma_tx_ring {
+ struct ocelot_fdma_dcb *dcbs;
+ dma_addr_t dcbs_dma;
+ struct ocelot_fdma_tx_buf bufs[OCELOT_FDMA_TX_RING_SIZE];
+ /* Protect concurrent xmit calls */
+ spinlock_t xmit_lock;
+ u16 next_to_clean;
+ u16 next_to_use;
+};
+
+/**
+ * struct ocelot_fdma_rx_buf - RX buffer structure
+ * @page: Struct page used in this buffer
+ * @page_offset: Current page offset (either 0 or PAGE_SIZE/2)
+ * @dma_addr: DMA address of the page
+ */
+struct ocelot_fdma_rx_buf {
+ struct page *page;
+ u32 page_offset;
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct ocelot_fdma_rx_ring - TX ring description of DCBs
+ *
+ * @dcbs: DCBs allocated for the ring
+ * @dcbs_dma: DMA base address of the DCBs
+ * @bufs: List of RX buffer associated to the DCBs
+ * @skb: SKB currently received by the netdev
+ * @next_to_clean: Next DCB to be cleaned NAPI polling
+ * @next_to_use: Next available DCB to send SKB
+ * @next_to_alloc: Next buffer that needs to be allocated (page reuse or alloc)
+ */
+struct ocelot_fdma_rx_ring {
+ struct ocelot_fdma_dcb *dcbs;
+ dma_addr_t dcbs_dma;
+ struct ocelot_fdma_rx_buf bufs[OCELOT_FDMA_RX_RING_SIZE];
+ struct sk_buff *skb;
+ u16 next_to_clean;
+ u16 next_to_use;
+ u16 next_to_alloc;
+};
+
+/**
+ * struct ocelot_fdma - FDMA context
+ *
+ * @irq: FDMA interrupt
+ * @ndev: Net device used to initialize NAPI
+ * @dcbs_base: Memory coherent DCBs
+ * @dcbs_dma_base: DMA base address of memory coherent DCBs
+ * @tx_ring: Injection ring
+ * @rx_ring: Extraction ring
+ * @napi: NAPI context
+ * @ocelot: Back-pointer to ocelot struct
+ */
+struct ocelot_fdma {
+ int irq;
+ struct net_device *ndev;
+ struct ocelot_fdma_dcb *dcbs_base;
+ dma_addr_t dcbs_dma_base;
+ struct ocelot_fdma_tx_ring tx_ring;
+ struct ocelot_fdma_rx_ring rx_ring;
+ struct napi_struct napi;
+ struct ocelot *ocelot;
+};
+
+void ocelot_fdma_init(struct platform_device *pdev, struct ocelot *ocelot);
+void ocelot_fdma_start(struct ocelot *ocelot);
+void ocelot_fdma_deinit(struct ocelot *ocelot);
+int ocelot_fdma_inject_frame(struct ocelot *fdma, int port, u32 rew_op,
+ struct sk_buff *skb, struct net_device *dev);
+void ocelot_fdma_netdev_init(struct ocelot *ocelot, struct net_device *dev);
+void ocelot_fdma_netdev_deinit(struct ocelot *ocelot,
+ struct net_device *dev);
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 769a8159373e..949858891973 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -20,6 +20,9 @@
(1 * VCAP_BLOCK + (lookup) * VCAP_LOOKUP)
#define VCAP_IS2_CHAIN(lookup, pag) \
(2 * VCAP_BLOCK + (lookup) * VCAP_LOOKUP + (pag))
+/* PSFP chain and block ID */
+#define PSFP_BLOCK_ID OCELOT_NUM_VCAP_BLOCKS
+#define OCELOT_PSFP_CHAIN (3 * VCAP_BLOCK)
static int ocelot_chain_to_block(int chain, bool ingress)
{
@@ -46,6 +49,9 @@ static int ocelot_chain_to_block(int chain, bool ingress)
if (chain == VCAP_IS2_CHAIN(lookup, pag))
return VCAP_IS2;
+ if (chain == OCELOT_PSFP_CHAIN)
+ return PSFP_BLOCK_ID;
+
return -EOPNOTSUPP;
}
@@ -84,7 +90,8 @@ static bool ocelot_is_goto_target_valid(int goto_target, int chain,
goto_target == VCAP_IS1_CHAIN(1) ||
goto_target == VCAP_IS1_CHAIN(2) ||
goto_target == VCAP_IS2_CHAIN(0, 0) ||
- goto_target == VCAP_IS2_CHAIN(1, 0));
+ goto_target == VCAP_IS2_CHAIN(1, 0) ||
+ goto_target == OCELOT_PSFP_CHAIN);
if (chain == VCAP_IS1_CHAIN(0))
return (goto_target == VCAP_IS1_CHAIN(1));
@@ -111,7 +118,11 @@ static bool ocelot_is_goto_target_valid(int goto_target, int chain,
if (chain == VCAP_IS2_CHAIN(0, pag))
return (goto_target == VCAP_IS2_CHAIN(1, pag));
- /* VCAP IS2 lookup 1 cannot jump anywhere */
+ /* VCAP IS2 lookup 1 can goto to PSFP block if hardware support */
+ for (pag = 0; pag < VCAP_IS2_NUM_PAG; pag++)
+ if (chain == VCAP_IS2_CHAIN(1, pag))
+ return (goto_target == OCELOT_PSFP_CHAIN);
+
return false;
}
@@ -211,6 +222,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
const struct flow_action_entry *a;
enum ocelot_tag_tpid_sel tpid;
int i, chain, egress_port;
+ u32 pol_ix, pol_max;
u64 rate;
int err;
@@ -269,10 +281,14 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_POLICE:
+ if (filter->block_id == PSFP_BLOCK_ID) {
+ filter->type = OCELOT_PSFP_FILTER_OFFLOAD;
+ break;
+ }
if (filter->block_id != VCAP_IS2 ||
filter->lookup != 0) {
NL_SET_ERR_MSG_MOD(extack,
- "Police action can only be offloaded to VCAP IS2 lookup 0");
+ "Police action can only be offloaded to VCAP IS2 lookup 0 or PSFP");
return -EOPNOTSUPP;
}
if (filter->goto_target != -1) {
@@ -286,6 +302,20 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
return -EOPNOTSUPP;
}
filter->action.police_ena = true;
+
+ pol_ix = a->hw_index + ocelot->vcap_pol.base;
+ pol_max = ocelot->vcap_pol.max;
+
+ if (ocelot->vcap_pol.max2 && pol_ix > pol_max) {
+ pol_ix += ocelot->vcap_pol.base2 - pol_max - 1;
+ pol_max = ocelot->vcap_pol.max2;
+ }
+
+ if (pol_ix >= pol_max)
+ return -EINVAL;
+
+ filter->action.pol_ix = pol_ix;
+
rate = a->police.rate_bytes_ps;
filter->action.pol.rate = div_u64(rate, 1000) * 8;
filter->action.pol.burst = a->police.burst;
@@ -399,6 +429,14 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->action.pcp_a_val = a->vlan.prio;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
+ case FLOW_ACTION_GATE:
+ if (filter->block_id != PSFP_BLOCK_ID) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Gate action can only be offloaded to PSFP chain");
+ return -EOPNOTSUPP;
+ }
+ filter->type = OCELOT_PSFP_FILTER_OFFLOAD;
+ break;
default:
NL_SET_ERR_MSG_MOD(extack, "Cannot offload action");
return -EOPNOTSUPP;
@@ -407,7 +445,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
if (filter->goto_target == -1) {
if ((filter->block_id == VCAP_IS2 && filter->lookup == 1) ||
- chain == 0) {
+ chain == 0 || filter->block_id == PSFP_BLOCK_ID) {
allow_missing_goto_target = true;
} else {
NL_SET_ERR_MSG_MOD(extack, "Missing GOTO action");
@@ -521,13 +559,6 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
return -EOPNOTSUPP;
}
- if (filter->block_id == VCAP_IS1 &&
- !is_zero_ether_addr(match.mask->dst)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Key type S1_NORMAL cannot match on destination MAC");
- return -EOPNOTSUPP;
- }
-
/* The hw support mac matches only for MAC_ETYPE key,
* therefore if other matches(port, tcp flags, etc) are added
* then just bail out
@@ -542,6 +573,14 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
return -EOPNOTSUPP;
flow_rule_match_eth_addrs(rule, &match);
+
+ if (filter->block_id == VCAP_IS1 &&
+ !is_zero_ether_addr(match.mask->dst)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Key type S1_NORMAL cannot match on destination MAC");
+ return -EOPNOTSUPP;
+ }
+
filter->key_type = OCELOT_VCAP_KEY_ETYPE;
ether_addr_copy(filter->key.etype.dmac.value,
match.key->dst);
@@ -689,6 +728,10 @@ static int ocelot_flower_parse(struct ocelot *ocelot, int port, bool ingress,
if (ret)
return ret;
+ /* PSFP filter need to parse key by stream identification function. */
+ if (filter->type == OCELOT_PSFP_FILTER_OFFLOAD)
+ return 0;
+
return ocelot_flower_parse_key(ocelot, port, ingress, f, filter);
}
@@ -763,13 +806,34 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
struct netlink_ext_ack *extack = f->common.extack;
struct ocelot_vcap_filter *filter;
int chain = f->common.chain_index;
- int ret;
+ int block_id, ret;
if (chain && !ocelot_find_vcap_filter_that_points_at(ocelot, chain)) {
NL_SET_ERR_MSG_MOD(extack, "No default GOTO action points to this chain");
return -EOPNOTSUPP;
}
+ block_id = ocelot_chain_to_block(chain, ingress);
+ if (block_id < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload to this chain");
+ return -EOPNOTSUPP;
+ }
+
+ filter = ocelot_vcap_block_find_filter_by_id(&ocelot->block[block_id],
+ f->cookie, true);
+ if (filter) {
+ /* Filter already exists on other ports */
+ if (!ingress) {
+ NL_SET_ERR_MSG_MOD(extack, "VCAP ES0 does not support shared filters");
+ return -EOPNOTSUPP;
+ }
+
+ filter->ingress_port_mask |= BIT(port);
+
+ return ocelot_vcap_filter_replace(ocelot, filter);
+ }
+
+ /* Filter didn't exist, create it now */
filter = ocelot_vcap_filter_create(ocelot, port, ingress, f);
if (!filter)
return -ENOMEM;
@@ -792,6 +856,15 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
if (filter->type == OCELOT_VCAP_FILTER_DUMMY)
return ocelot_vcap_dummy_filter_add(ocelot, filter);
+ if (filter->type == OCELOT_PSFP_FILTER_OFFLOAD) {
+ kfree(filter);
+ if (ocelot->ops->psfp_filter_add)
+ return ocelot->ops->psfp_filter_add(ocelot, port, f);
+
+ NL_SET_ERR_MSG_MOD(extack, "PSFP chain is not supported in HW");
+ return -EOPNOTSUPP;
+ }
+
return ocelot_vcap_filter_add(ocelot, filter, f->common.extack);
}
EXPORT_SYMBOL_GPL(ocelot_cls_flower_replace);
@@ -807,6 +880,13 @@ int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
if (block_id < 0)
return 0;
+ if (block_id == PSFP_BLOCK_ID) {
+ if (ocelot->ops->psfp_filter_del)
+ return ocelot->ops->psfp_filter_del(ocelot, f);
+
+ return -EOPNOTSUPP;
+ }
+
block = &ocelot->block[block_id];
filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie, true);
@@ -816,6 +896,12 @@ int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
if (filter->type == OCELOT_VCAP_FILTER_DUMMY)
return ocelot_vcap_dummy_filter_del(ocelot, filter);
+ if (ingress) {
+ filter->ingress_port_mask &= ~BIT(port);
+ if (filter->ingress_port_mask)
+ return ocelot_vcap_filter_replace(ocelot, filter);
+ }
+
return ocelot_vcap_filter_del(ocelot, filter);
}
EXPORT_SYMBOL_GPL(ocelot_cls_flower_destroy);
@@ -825,12 +911,25 @@ int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
{
struct ocelot_vcap_filter *filter;
struct ocelot_vcap_block *block;
+ struct flow_stats stats = {0};
int block_id, ret;
block_id = ocelot_chain_to_block(f->common.chain_index, ingress);
if (block_id < 0)
return 0;
+ if (block_id == PSFP_BLOCK_ID) {
+ if (ocelot->ops->psfp_stats_get) {
+ ret = ocelot->ops->psfp_stats_get(ocelot, f, &stats);
+ if (ret)
+ return ret;
+
+ goto stats_update;
+ }
+
+ return -EOPNOTSUPP;
+ }
+
block = &ocelot->block[block_id];
filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie, true);
@@ -841,7 +940,10 @@ int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
if (ret)
return ret;
- flow_stats_update(&f->stats, 0x0, filter->stats.pkts, 0, 0x0,
+ stats.pkts = filter->stats.pkts;
+
+stats_update:
+ flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops, 0x0,
FLOW_ACTION_HW_STATS_IMMEDIATE);
return 0;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index eaeba60b1bba..e271b6225b72 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -15,6 +15,7 @@
#include <net/pkt_cls.h>
#include "ocelot.h"
#include "ocelot_vcap.h"
+#include "ocelot_fdma.h"
#define OCELOT_MAC_QUIRKS OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP
@@ -457,7 +458,8 @@ static netdev_tx_t ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
int port = priv->chip_port;
u32 rew_op = 0;
- if (!ocelot_can_inject(ocelot, 0))
+ if (!static_branch_unlikely(&ocelot_fdma_enabled) &&
+ !ocelot_can_inject(ocelot, 0))
return NETDEV_TX_BUSY;
/* Check if timestamping is needed */
@@ -475,9 +477,13 @@ static netdev_tx_t ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
rew_op = ocelot_ptp_rew_op(skb);
}
- ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
+ if (static_branch_unlikely(&ocelot_fdma_enabled)) {
+ ocelot_fdma_inject_frame(ocelot, port, rew_op, skb, dev);
+ } else {
+ ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
- kfree_skb(skb);
+ consume_skb(skb);
+ }
return NETDEV_TX_OK;
}
@@ -764,10 +770,23 @@ static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return phy_mii_ioctl(dev->phydev, ifr, cmd);
}
+static int ocelot_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+
+ ocelot_port_set_maxlen(ocelot, priv->chip_port, new_mtu);
+ WRITE_ONCE(dev->mtu, new_mtu);
+
+ return 0;
+}
+
static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_open = ocelot_port_open,
.ndo_stop = ocelot_port_stop,
.ndo_start_xmit = ocelot_port_xmit,
+ .ndo_change_mtu = ocelot_change_mtu,
.ndo_set_rx_mode = ocelot_set_rx_mode,
.ndo_set_mac_address = ocelot_port_set_mac_address,
.ndo_get_stats64 = ocelot_get_stats64,
@@ -1168,7 +1187,7 @@ static int ocelot_netdevice_bridge_join(struct net_device *dev,
ocelot_port_bridge_join(ocelot, port, bridge);
err = switchdev_bridge_port_offload(brport_dev, dev, priv,
- &ocelot_netdevice_nb,
+ &ocelot_switchdev_nb,
&ocelot_switchdev_blocking_nb,
false, extack);
if (err)
@@ -1182,7 +1201,7 @@ static int ocelot_netdevice_bridge_join(struct net_device *dev,
err_switchdev_sync:
switchdev_bridge_port_unoffload(brport_dev, priv,
- &ocelot_netdevice_nb,
+ &ocelot_switchdev_nb,
&ocelot_switchdev_blocking_nb);
err_switchdev_offload:
ocelot_port_bridge_leave(ocelot, port, bridge);
@@ -1195,7 +1214,7 @@ static void ocelot_netdevice_pre_bridge_leave(struct net_device *dev,
struct ocelot_port_private *priv = netdev_priv(dev);
switchdev_bridge_port_unoffload(brport_dev, priv,
- &ocelot_netdevice_nb,
+ &ocelot_switchdev_nb,
&ocelot_switchdev_blocking_nb);
}
@@ -1498,40 +1517,6 @@ struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = {
.notifier_call = ocelot_switchdev_blocking_event,
};
-static void vsc7514_phylink_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct net_device *ndev = to_net_dev(config->dev);
- struct ocelot_port_private *priv = netdev_priv(ndev);
- struct ocelot_port *ocelot_port = &priv->port;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = {};
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != ocelot_port->phy_mode) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
-
- phylink_set(mask, Pause);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
static void vsc7514_phylink_mac_config(struct phylink_config *config,
unsigned int link_an_mode,
const struct phylink_link_state *state)
@@ -1590,7 +1575,7 @@ static void vsc7514_phylink_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops ocelot_phylink_ops = {
- .validate = vsc7514_phylink_validate,
+ .validate = phylink_generic_validate,
.mac_config = vsc7514_phylink_mac_config,
.mac_link_down = vsc7514_phylink_mac_link_down,
.mac_link_up = vsc7514_phylink_mac_link_up,
@@ -1654,6 +1639,11 @@ static int ocelot_port_phylink_create(struct ocelot *ocelot, int port,
priv->phylink_config.dev = &priv->dev->dev;
priv->phylink_config.type = PHYLINK_NETDEV;
+ priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
+
+ __set_bit(ocelot_port->phy_mode,
+ priv->phylink_config.supported_interfaces);
phylink = phylink_create(&priv->phylink_config,
of_fwnode_handle(portnp),
@@ -1699,12 +1689,16 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
dev->netdev_ops = &ocelot_port_netdev_ops;
dev->ethtool_ops = &ocelot_ethtool_ops;
+ dev->max_mtu = OCELOT_JUMBO_MTU;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS |
NETIF_F_HW_TC;
dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
- eth_hw_addr_gen(dev, ocelot->base_mac, port);
+ err = of_get_ethdev_address(portnp, dev);
+ if (err)
+ eth_hw_addr_gen(dev, ocelot->base_mac, port);
+
ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr,
OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
@@ -1714,14 +1708,20 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
if (err)
goto out;
+ if (ocelot->fdma)
+ ocelot_fdma_netdev_init(ocelot, dev);
+
err = register_netdev(dev);
if (err) {
dev_err(ocelot->dev, "register_netdev failed\n");
- goto out;
+ goto out_fdma_deinit;
}
return 0;
+out_fdma_deinit:
+ if (ocelot->fdma)
+ ocelot_fdma_netdev_deinit(ocelot, dev);
out:
ocelot->ports[port] = NULL;
free_netdev(dev);
@@ -1734,9 +1734,14 @@ void ocelot_release_port(struct ocelot_port *ocelot_port)
struct ocelot_port_private *priv = container_of(ocelot_port,
struct ocelot_port_private,
port);
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ struct ocelot_fdma *fdma = ocelot->fdma;
unregister_netdev(priv->dev);
+ if (fdma)
+ ocelot_fdma_netdev_deinit(ocelot, priv->dev);
+
if (priv->phylink) {
rtnl_lock();
phylink_disconnect_phy(priv->phylink);
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index 337cd08b1a54..d3544413a8a4 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -887,10 +887,18 @@ static void vcap_entry_set(struct ocelot *ocelot, int ix,
return es0_entry_set(ocelot, ix, filter);
}
-static int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix,
- struct ocelot_policer *pol)
+struct vcap_policer_entry {
+ struct list_head list;
+ refcount_t refcount;
+ u32 pol_ix;
+};
+
+int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix,
+ struct ocelot_policer *pol)
{
struct qos_policer_conf pp = { 0 };
+ struct vcap_policer_entry *tmp;
+ int ret;
if (!pol)
return -EINVAL;
@@ -899,57 +907,74 @@ static int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix,
pp.pir = pol->rate;
pp.pbs = pol->burst;
- return qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+ list_for_each_entry(tmp, &ocelot->vcap_pol.pol_list, list)
+ if (tmp->pol_ix == pol_ix) {
+ refcount_inc(&tmp->refcount);
+ return 0;
+ }
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+ if (ret) {
+ kfree(tmp);
+ return ret;
+ }
+
+ tmp->pol_ix = pol_ix;
+ refcount_set(&tmp->refcount, 1);
+ list_add_tail(&tmp->list, &ocelot->vcap_pol.pol_list);
+
+ return 0;
}
+EXPORT_SYMBOL(ocelot_vcap_policer_add);
-static void ocelot_vcap_policer_del(struct ocelot *ocelot,
- struct ocelot_vcap_block *block,
- u32 pol_ix)
+int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix)
{
- struct ocelot_vcap_filter *filter;
struct qos_policer_conf pp = {0};
- int index = -1;
-
- if (pol_ix < block->pol_lpr)
- return;
-
- list_for_each_entry(filter, &block->rules, list) {
- index++;
- if (filter->block_id == VCAP_IS2 &&
- filter->action.police_ena &&
- filter->action.pol_ix < pol_ix) {
- filter->action.pol_ix += 1;
- ocelot_vcap_policer_add(ocelot, filter->action.pol_ix,
- &filter->action.pol);
- is2_entry_set(ocelot, index, filter);
+ struct vcap_policer_entry *tmp, *n;
+ u8 z = 0;
+
+ list_for_each_entry_safe(tmp, n, &ocelot->vcap_pol.pol_list, list)
+ if (tmp->pol_ix == pol_ix) {
+ z = refcount_dec_and_test(&tmp->refcount);
+ if (z) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
}
- }
- pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
- qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+ if (z) {
+ pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
+ return qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+ }
- block->pol_lpr++;
+ return 0;
}
+EXPORT_SYMBOL(ocelot_vcap_policer_del);
-static void ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
- struct ocelot_vcap_block *block,
- struct ocelot_vcap_filter *filter)
+static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
+ struct ocelot_vcap_block *block,
+ struct ocelot_vcap_filter *filter)
{
struct ocelot_vcap_filter *tmp;
struct list_head *pos, *n;
+ int ret;
if (filter->block_id == VCAP_IS2 && filter->action.police_ena) {
- block->pol_lpr--;
- filter->action.pol_ix = block->pol_lpr;
- ocelot_vcap_policer_add(ocelot, filter->action.pol_ix,
- &filter->action.pol);
+ ret = ocelot_vcap_policer_add(ocelot, filter->action.pol_ix,
+ &filter->action.pol);
+ if (ret)
+ return ret;
}
block->count++;
if (list_empty(&block->rules)) {
list_add(&filter->list, &block->rules);
- return;
+ return 0;
}
list_for_each_safe(pos, n, &block->rules) {
@@ -958,6 +983,8 @@ static void ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
break;
}
list_add(&filter->list, pos->prev);
+
+ return 0;
}
static bool ocelot_vcap_filter_equal(const struct ocelot_vcap_filter *a,
@@ -1132,7 +1159,7 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
struct netlink_ext_ack *extack)
{
struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
- int i, index;
+ int i, index, ret;
if (!ocelot_exclusive_mac_etype_filter_rules(ocelot, filter)) {
NL_SET_ERR_MSG_MOD(extack,
@@ -1141,7 +1168,9 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
}
/* Add filter to the linked list */
- ocelot_vcap_filter_add_to_block(ocelot, block, filter);
+ ret = ocelot_vcap_filter_add_to_block(ocelot, block, filter);
+ if (ret)
+ return ret;
/* Get the index of the inserted filter */
index = ocelot_vcap_block_get_filter_index(block, filter);
@@ -1174,7 +1203,7 @@ static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot,
if (ocelot_vcap_filter_equal(filter, tmp)) {
if (tmp->block_id == VCAP_IS2 &&
tmp->action.police_ena)
- ocelot_vcap_policer_del(ocelot, block,
+ ocelot_vcap_policer_del(ocelot,
tmp->action.pol_ix);
list_del(pos);
@@ -1366,13 +1395,13 @@ int ocelot_vcap_init(struct ocelot *ocelot)
struct vcap_props *vcap = &ocelot->vcap[i];
INIT_LIST_HEAD(&block->rules);
- block->pol_lpr = OCELOT_POLICER_DISCARD - 1;
ocelot_vcap_detect_constants(ocelot, vcap);
ocelot_vcap_init_one(ocelot, vcap);
}
INIT_LIST_HEAD(&ocelot->dummy_rules);
+ INIT_LIST_HEAD(&ocelot->vcap_pol.pol_list);
return 0;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 38103b0255b0..4f4a495a60ad 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -18,313 +18,24 @@
#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot_hsio.h>
+#include <soc/mscc/vsc7514_regs.h>
+#include "ocelot_fdma.h"
#include "ocelot.h"
-static const u32 ocelot_ana_regmap[] = {
- REG(ANA_ADVLEARN, 0x009000),
- REG(ANA_VLANMASK, 0x009004),
- REG(ANA_PORT_B_DOMAIN, 0x009008),
- REG(ANA_ANAGEFIL, 0x00900c),
- REG(ANA_ANEVENTS, 0x009010),
- REG(ANA_STORMLIMIT_BURST, 0x009014),
- REG(ANA_STORMLIMIT_CFG, 0x009018),
- REG(ANA_ISOLATED_PORTS, 0x009028),
- REG(ANA_COMMUNITY_PORTS, 0x00902c),
- REG(ANA_AUTOAGE, 0x009030),
- REG(ANA_MACTOPTIONS, 0x009034),
- REG(ANA_LEARNDISC, 0x009038),
- REG(ANA_AGENCTRL, 0x00903c),
- REG(ANA_MIRRORPORTS, 0x009040),
- REG(ANA_EMIRRORPORTS, 0x009044),
- REG(ANA_FLOODING, 0x009048),
- REG(ANA_FLOODING_IPMC, 0x00904c),
- REG(ANA_SFLOW_CFG, 0x009050),
- REG(ANA_PORT_MODE, 0x009080),
- REG(ANA_PGID_PGID, 0x008c00),
- REG(ANA_TABLES_ANMOVED, 0x008b30),
- REG(ANA_TABLES_MACHDATA, 0x008b34),
- REG(ANA_TABLES_MACLDATA, 0x008b38),
- REG(ANA_TABLES_MACACCESS, 0x008b3c),
- REG(ANA_TABLES_MACTINDX, 0x008b40),
- REG(ANA_TABLES_VLANACCESS, 0x008b44),
- REG(ANA_TABLES_VLANTIDX, 0x008b48),
- REG(ANA_TABLES_ISDXACCESS, 0x008b4c),
- REG(ANA_TABLES_ISDXTIDX, 0x008b50),
- REG(ANA_TABLES_ENTRYLIM, 0x008b00),
- REG(ANA_TABLES_PTP_ID_HIGH, 0x008b54),
- REG(ANA_TABLES_PTP_ID_LOW, 0x008b58),
- REG(ANA_MSTI_STATE, 0x008e00),
- REG(ANA_PORT_VLAN_CFG, 0x007000),
- REG(ANA_PORT_DROP_CFG, 0x007004),
- REG(ANA_PORT_QOS_CFG, 0x007008),
- REG(ANA_PORT_VCAP_CFG, 0x00700c),
- REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007010),
- REG(ANA_PORT_VCAP_S2_CFG, 0x00701c),
- REG(ANA_PORT_PCP_DEI_MAP, 0x007020),
- REG(ANA_PORT_CPU_FWD_CFG, 0x007060),
- REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007064),
- REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007068),
- REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00706c),
- REG(ANA_PORT_PORT_CFG, 0x007070),
- REG(ANA_PORT_POL_CFG, 0x007074),
- REG(ANA_PORT_PTP_CFG, 0x007078),
- REG(ANA_PORT_PTP_DLY1_CFG, 0x00707c),
- REG(ANA_OAM_UPM_LM_CNT, 0x007c00),
- REG(ANA_PORT_PTP_DLY2_CFG, 0x007080),
- REG(ANA_PFC_PFC_CFG, 0x008800),
- REG(ANA_PFC_PFC_TIMER, 0x008804),
- REG(ANA_IPT_OAM_MEP_CFG, 0x008000),
- REG(ANA_IPT_IPT, 0x008004),
- REG(ANA_PPT_PPT, 0x008ac0),
- REG(ANA_FID_MAP_FID_MAP, 0x000000),
- REG(ANA_AGGR_CFG, 0x0090b4),
- REG(ANA_CPUQ_CFG, 0x0090b8),
- REG(ANA_CPUQ_CFG2, 0x0090bc),
- REG(ANA_CPUQ_8021_CFG, 0x0090c0),
- REG(ANA_DSCP_CFG, 0x009100),
- REG(ANA_DSCP_REWR_CFG, 0x009200),
- REG(ANA_VCAP_RNG_TYPE_CFG, 0x009240),
- REG(ANA_VCAP_RNG_VAL_CFG, 0x009260),
- REG(ANA_VRAP_CFG, 0x009280),
- REG(ANA_VRAP_HDR_DATA, 0x009284),
- REG(ANA_VRAP_HDR_MASK, 0x009288),
- REG(ANA_DISCARD_CFG, 0x00928c),
- REG(ANA_FID_CFG, 0x009290),
- REG(ANA_POL_PIR_CFG, 0x004000),
- REG(ANA_POL_CIR_CFG, 0x004004),
- REG(ANA_POL_MODE_CFG, 0x004008),
- REG(ANA_POL_PIR_STATE, 0x00400c),
- REG(ANA_POL_CIR_STATE, 0x004010),
- REG(ANA_POL_STATE, 0x004014),
- REG(ANA_POL_FLOWC, 0x008b80),
- REG(ANA_POL_HYST, 0x008bec),
- REG(ANA_POL_MISC_CFG, 0x008bf0),
-};
-
-static const u32 ocelot_qs_regmap[] = {
- REG(QS_XTR_GRP_CFG, 0x000000),
- REG(QS_XTR_RD, 0x000008),
- REG(QS_XTR_FRM_PRUNING, 0x000010),
- REG(QS_XTR_FLUSH, 0x000018),
- REG(QS_XTR_DATA_PRESENT, 0x00001c),
- REG(QS_XTR_CFG, 0x000020),
- REG(QS_INJ_GRP_CFG, 0x000024),
- REG(QS_INJ_WR, 0x00002c),
- REG(QS_INJ_CTRL, 0x000034),
- REG(QS_INJ_STATUS, 0x00003c),
- REG(QS_INJ_ERR, 0x000040),
- REG(QS_INH_DBG, 0x000048),
-};
-
-static const u32 ocelot_qsys_regmap[] = {
- REG(QSYS_PORT_MODE, 0x011200),
- REG(QSYS_SWITCH_PORT_MODE, 0x011234),
- REG(QSYS_STAT_CNT_CFG, 0x011264),
- REG(QSYS_EEE_CFG, 0x011268),
- REG(QSYS_EEE_THRES, 0x011294),
- REG(QSYS_IGR_NO_SHARING, 0x011298),
- REG(QSYS_EGR_NO_SHARING, 0x01129c),
- REG(QSYS_SW_STATUS, 0x0112a0),
- REG(QSYS_EXT_CPU_CFG, 0x0112d0),
- REG(QSYS_PAD_CFG, 0x0112d4),
- REG(QSYS_CPU_GROUP_MAP, 0x0112d8),
- REG(QSYS_QMAP, 0x0112dc),
- REG(QSYS_ISDX_SGRP, 0x011400),
- REG(QSYS_TIMED_FRAME_ENTRY, 0x014000),
- REG(QSYS_TFRM_MISC, 0x011310),
- REG(QSYS_TFRM_PORT_DLY, 0x011314),
- REG(QSYS_TFRM_TIMER_CFG_1, 0x011318),
- REG(QSYS_TFRM_TIMER_CFG_2, 0x01131c),
- REG(QSYS_TFRM_TIMER_CFG_3, 0x011320),
- REG(QSYS_TFRM_TIMER_CFG_4, 0x011324),
- REG(QSYS_TFRM_TIMER_CFG_5, 0x011328),
- REG(QSYS_TFRM_TIMER_CFG_6, 0x01132c),
- REG(QSYS_TFRM_TIMER_CFG_7, 0x011330),
- REG(QSYS_TFRM_TIMER_CFG_8, 0x011334),
- REG(QSYS_RED_PROFILE, 0x011338),
- REG(QSYS_RES_QOS_MODE, 0x011378),
- REG(QSYS_RES_CFG, 0x012000),
- REG(QSYS_RES_STAT, 0x012004),
- REG(QSYS_EGR_DROP_MODE, 0x01137c),
- REG(QSYS_EQ_CTRL, 0x011380),
- REG(QSYS_EVENTS_CORE, 0x011384),
- REG(QSYS_CIR_CFG, 0x000000),
- REG(QSYS_EIR_CFG, 0x000004),
- REG(QSYS_SE_CFG, 0x000008),
- REG(QSYS_SE_DWRR_CFG, 0x00000c),
- REG(QSYS_SE_CONNECT, 0x00003c),
- REG(QSYS_SE_DLB_SENSE, 0x000040),
- REG(QSYS_CIR_STATE, 0x000044),
- REG(QSYS_EIR_STATE, 0x000048),
- REG(QSYS_SE_STATE, 0x00004c),
- REG(QSYS_HSCH_MISC_CFG, 0x011388),
-};
-
-static const u32 ocelot_rew_regmap[] = {
- REG(REW_PORT_VLAN_CFG, 0x000000),
- REG(REW_TAG_CFG, 0x000004),
- REG(REW_PORT_CFG, 0x000008),
- REG(REW_DSCP_CFG, 0x00000c),
- REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010),
- REG(REW_PTP_CFG, 0x000050),
- REG(REW_PTP_DLY1_CFG, 0x000054),
- REG(REW_DSCP_REMAP_DP1_CFG, 0x000690),
- REG(REW_DSCP_REMAP_CFG, 0x000790),
- REG(REW_STAT_CFG, 0x000890),
- REG(REW_PPT, 0x000680),
-};
-
-static const u32 ocelot_sys_regmap[] = {
- REG(SYS_COUNT_RX_OCTETS, 0x000000),
- REG(SYS_COUNT_RX_UNICAST, 0x000004),
- REG(SYS_COUNT_RX_MULTICAST, 0x000008),
- REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
- REG(SYS_COUNT_RX_SHORTS, 0x000010),
- REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
- REG(SYS_COUNT_RX_JABBERS, 0x000018),
- REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
- REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
- REG(SYS_COUNT_RX_64, 0x000024),
- REG(SYS_COUNT_RX_65_127, 0x000028),
- REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
- REG(SYS_COUNT_RX_PAUSE, 0x00003c),
- REG(SYS_COUNT_RX_CONTROL, 0x000040),
- REG(SYS_COUNT_RX_LONGS, 0x000044),
- REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048),
- REG(SYS_COUNT_TX_OCTETS, 0x000100),
- REG(SYS_COUNT_TX_UNICAST, 0x000104),
- REG(SYS_COUNT_TX_MULTICAST, 0x000108),
- REG(SYS_COUNT_TX_BROADCAST, 0x00010c),
- REG(SYS_COUNT_TX_COLLISION, 0x000110),
- REG(SYS_COUNT_TX_DROPS, 0x000114),
- REG(SYS_COUNT_TX_PAUSE, 0x000118),
- REG(SYS_COUNT_TX_64, 0x00011c),
- REG(SYS_COUNT_TX_65_127, 0x000120),
- REG(SYS_COUNT_TX_128_511, 0x000124),
- REG(SYS_COUNT_TX_512_1023, 0x000128),
- REG(SYS_COUNT_TX_1024_1526, 0x00012c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000130),
- REG(SYS_COUNT_TX_AGING, 0x000170),
- REG(SYS_RESET_CFG, 0x000508),
- REG(SYS_CMID, 0x00050c),
- REG(SYS_VLAN_ETYPE_CFG, 0x000510),
- REG(SYS_PORT_MODE, 0x000514),
- REG(SYS_FRONT_PORT_MODE, 0x000548),
- REG(SYS_FRM_AGING, 0x000574),
- REG(SYS_STAT_CFG, 0x000578),
- REG(SYS_SW_STATUS, 0x00057c),
- REG(SYS_MISC_CFG, 0x0005ac),
- REG(SYS_REW_MAC_HIGH_CFG, 0x0005b0),
- REG(SYS_REW_MAC_LOW_CFG, 0x0005dc),
- REG(SYS_CM_ADDR, 0x000500),
- REG(SYS_CM_DATA, 0x000504),
- REG(SYS_PAUSE_CFG, 0x000608),
- REG(SYS_PAUSE_TOT_CFG, 0x000638),
- REG(SYS_ATOP, 0x00063c),
- REG(SYS_ATOP_TOT_CFG, 0x00066c),
- REG(SYS_MAC_FC_CFG, 0x000670),
- REG(SYS_MMGT, 0x00069c),
- REG(SYS_MMGT_FAST, 0x0006a0),
- REG(SYS_EVENTS_DIF, 0x0006a4),
- REG(SYS_EVENTS_CORE, 0x0006b4),
- REG(SYS_CNT, 0x000000),
- REG(SYS_PTP_STATUS, 0x0006b8),
- REG(SYS_PTP_TXSTAMP, 0x0006bc),
- REG(SYS_PTP_NXT, 0x0006c0),
- REG(SYS_PTP_CFG, 0x0006c4),
-};
-
-static const u32 ocelot_vcap_regmap[] = {
- /* VCAP_CORE_CFG */
- REG(VCAP_CORE_UPDATE_CTRL, 0x000000),
- REG(VCAP_CORE_MV_CFG, 0x000004),
- /* VCAP_CORE_CACHE */
- REG(VCAP_CACHE_ENTRY_DAT, 0x000008),
- REG(VCAP_CACHE_MASK_DAT, 0x000108),
- REG(VCAP_CACHE_ACTION_DAT, 0x000208),
- REG(VCAP_CACHE_CNT_DAT, 0x000308),
- REG(VCAP_CACHE_TG_DAT, 0x000388),
- /* VCAP_CONST */
- REG(VCAP_CONST_VCAP_VER, 0x000398),
- REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c),
- REG(VCAP_CONST_ENTRY_CNT, 0x0003a0),
- REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4),
- REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8),
- REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac),
- REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0),
- REG(VCAP_CONST_CNT_WIDTH, 0x0003b4),
- REG(VCAP_CONST_CORE_CNT, 0x0003b8),
- REG(VCAP_CONST_IF_CNT, 0x0003bc),
-};
-
-static const u32 ocelot_ptp_regmap[] = {
- REG(PTP_PIN_CFG, 0x000000),
- REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
- REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
- REG(PTP_PIN_TOD_NSEC, 0x00000c),
- REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014),
- REG(PTP_PIN_WF_LOW_PERIOD, 0x000018),
- REG(PTP_CFG_MISC, 0x0000a0),
- REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
- REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
-};
-
-static const u32 ocelot_dev_gmii_regmap[] = {
- REG(DEV_CLOCK_CFG, 0x0),
- REG(DEV_PORT_MISC, 0x4),
- REG(DEV_EVENTS, 0x8),
- REG(DEV_EEE_CFG, 0xc),
- REG(DEV_RX_PATH_DELAY, 0x10),
- REG(DEV_TX_PATH_DELAY, 0x14),
- REG(DEV_PTP_PREDICT_CFG, 0x18),
- REG(DEV_MAC_ENA_CFG, 0x1c),
- REG(DEV_MAC_MODE_CFG, 0x20),
- REG(DEV_MAC_MAXLEN_CFG, 0x24),
- REG(DEV_MAC_TAGS_CFG, 0x28),
- REG(DEV_MAC_ADV_CHK_CFG, 0x2c),
- REG(DEV_MAC_IFG_CFG, 0x30),
- REG(DEV_MAC_HDX_CFG, 0x34),
- REG(DEV_MAC_DBG_CFG, 0x38),
- REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c),
- REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40),
- REG(DEV_MAC_STICKY, 0x44),
- REG(PCS1G_CFG, 0x48),
- REG(PCS1G_MODE_CFG, 0x4c),
- REG(PCS1G_SD_CFG, 0x50),
- REG(PCS1G_ANEG_CFG, 0x54),
- REG(PCS1G_ANEG_NP_CFG, 0x58),
- REG(PCS1G_LB_CFG, 0x5c),
- REG(PCS1G_DBG_CFG, 0x60),
- REG(PCS1G_CDET_CFG, 0x64),
- REG(PCS1G_ANEG_STATUS, 0x68),
- REG(PCS1G_ANEG_NP_STATUS, 0x6c),
- REG(PCS1G_LINK_STATUS, 0x70),
- REG(PCS1G_LINK_DOWN_CNT, 0x74),
- REG(PCS1G_STICKY, 0x78),
- REG(PCS1G_DEBUG_STATUS, 0x7c),
- REG(PCS1G_LPI_CFG, 0x80),
- REG(PCS1G_LPI_WAKE_ERROR_CNT, 0x84),
- REG(PCS1G_LPI_STATUS, 0x88),
- REG(PCS1G_TSTPAT_MODE_CFG, 0x8c),
- REG(PCS1G_TSTPAT_STATUS, 0x90),
- REG(DEV_PCS_FX100_CFG, 0x94),
- REG(DEV_PCS_FX100_STATUS, 0x98),
-};
+#define VSC7514_VCAP_POLICER_BASE 128
+#define VSC7514_VCAP_POLICER_MAX 191
static const u32 *ocelot_regmap[TARGET_MAX] = {
- [ANA] = ocelot_ana_regmap,
- [QS] = ocelot_qs_regmap,
- [QSYS] = ocelot_qsys_regmap,
- [REW] = ocelot_rew_regmap,
- [SYS] = ocelot_sys_regmap,
- [S0] = ocelot_vcap_regmap,
- [S1] = ocelot_vcap_regmap,
- [S2] = ocelot_vcap_regmap,
- [PTP] = ocelot_ptp_regmap,
- [DEV_GMII] = ocelot_dev_gmii_regmap,
+ [ANA] = vsc7514_ana_regmap,
+ [QS] = vsc7514_qs_regmap,
+ [QSYS] = vsc7514_qsys_regmap,
+ [REW] = vsc7514_rew_regmap,
+ [SYS] = vsc7514_sys_regmap,
+ [S0] = vsc7514_vcap_regmap,
+ [S1] = vsc7514_vcap_regmap,
+ [S2] = vsc7514_vcap_regmap,
+ [PTP] = vsc7514_ptp_regmap,
+ [DEV_GMII] = vsc7514_dev_gmii_regmap,
};
static const struct reg_field ocelot_regfields[REGFIELD_MAX] = {
@@ -633,211 +344,6 @@ static const struct ocelot_ops ocelot_ops = {
.netdev_to_port = ocelot_netdev_to_port,
};
-static const struct vcap_field vsc7514_vcap_es0_keys[] = {
- [VCAP_ES0_EGR_PORT] = { 0, 4},
- [VCAP_ES0_IGR_PORT] = { 4, 4},
- [VCAP_ES0_RSV] = { 8, 2},
- [VCAP_ES0_L2_MC] = { 10, 1},
- [VCAP_ES0_L2_BC] = { 11, 1},
- [VCAP_ES0_VID] = { 12, 12},
- [VCAP_ES0_DP] = { 24, 1},
- [VCAP_ES0_PCP] = { 25, 3},
-};
-
-static const struct vcap_field vsc7514_vcap_es0_actions[] = {
- [VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2},
- [VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1},
- [VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2},
- [VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1},
- [VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2},
- [VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2},
- [VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2},
- [VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1},
- [VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2},
- [VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2},
- [VCAP_ES0_ACT_VID_A_VAL] = { 17, 12},
- [VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3},
- [VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1},
- [VCAP_ES0_ACT_VID_B_VAL] = { 33, 12},
- [VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3},
- [VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1},
- [VCAP_ES0_ACT_RSV] = { 49, 24},
- [VCAP_ES0_ACT_HIT_STICKY] = { 73, 1},
-};
-
-static const struct vcap_field vsc7514_vcap_is1_keys[] = {
- [VCAP_IS1_HK_TYPE] = { 0, 1},
- [VCAP_IS1_HK_LOOKUP] = { 1, 2},
- [VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 12},
- [VCAP_IS1_HK_RSV] = { 15, 9},
- [VCAP_IS1_HK_OAM_Y1731] = { 24, 1},
- [VCAP_IS1_HK_L2_MC] = { 25, 1},
- [VCAP_IS1_HK_L2_BC] = { 26, 1},
- [VCAP_IS1_HK_IP_MC] = { 27, 1},
- [VCAP_IS1_HK_VLAN_TAGGED] = { 28, 1},
- [VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 29, 1},
- [VCAP_IS1_HK_TPID] = { 30, 1},
- [VCAP_IS1_HK_VID] = { 31, 12},
- [VCAP_IS1_HK_DEI] = { 43, 1},
- [VCAP_IS1_HK_PCP] = { 44, 3},
- /* Specific Fields for IS1 Half Key S1_NORMAL */
- [VCAP_IS1_HK_L2_SMAC] = { 47, 48},
- [VCAP_IS1_HK_ETYPE_LEN] = { 95, 1},
- [VCAP_IS1_HK_ETYPE] = { 96, 16},
- [VCAP_IS1_HK_IP_SNAP] = {112, 1},
- [VCAP_IS1_HK_IP4] = {113, 1},
- /* Layer-3 Information */
- [VCAP_IS1_HK_L3_FRAGMENT] = {114, 1},
- [VCAP_IS1_HK_L3_FRAG_OFS_GT0] = {115, 1},
- [VCAP_IS1_HK_L3_OPTIONS] = {116, 1},
- [VCAP_IS1_HK_L3_DSCP] = {117, 6},
- [VCAP_IS1_HK_L3_IP4_SIP] = {123, 32},
- /* Layer-4 Information */
- [VCAP_IS1_HK_TCP_UDP] = {155, 1},
- [VCAP_IS1_HK_TCP] = {156, 1},
- [VCAP_IS1_HK_L4_SPORT] = {157, 16},
- [VCAP_IS1_HK_L4_RNG] = {173, 8},
- /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */
- [VCAP_IS1_HK_IP4_INNER_TPID] = { 47, 1},
- [VCAP_IS1_HK_IP4_INNER_VID] = { 48, 12},
- [VCAP_IS1_HK_IP4_INNER_DEI] = { 60, 1},
- [VCAP_IS1_HK_IP4_INNER_PCP] = { 61, 3},
- [VCAP_IS1_HK_IP4_IP4] = { 64, 1},
- [VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 65, 1},
- [VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 66, 1},
- [VCAP_IS1_HK_IP4_L3_OPTIONS] = { 67, 1},
- [VCAP_IS1_HK_IP4_L3_DSCP] = { 68, 6},
- [VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 74, 32},
- [VCAP_IS1_HK_IP4_L3_IP4_SIP] = {106, 32},
- [VCAP_IS1_HK_IP4_L3_PROTO] = {138, 8},
- [VCAP_IS1_HK_IP4_TCP_UDP] = {146, 1},
- [VCAP_IS1_HK_IP4_TCP] = {147, 1},
- [VCAP_IS1_HK_IP4_L4_RNG] = {148, 8},
- [VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = {156, 32},
-};
-
-static const struct vcap_field vsc7514_vcap_is1_actions[] = {
- [VCAP_IS1_ACT_DSCP_ENA] = { 0, 1},
- [VCAP_IS1_ACT_DSCP_VAL] = { 1, 6},
- [VCAP_IS1_ACT_QOS_ENA] = { 7, 1},
- [VCAP_IS1_ACT_QOS_VAL] = { 8, 3},
- [VCAP_IS1_ACT_DP_ENA] = { 11, 1},
- [VCAP_IS1_ACT_DP_VAL] = { 12, 1},
- [VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8},
- [VCAP_IS1_ACT_PAG_VAL] = { 21, 8},
- [VCAP_IS1_ACT_RSV] = { 29, 9},
- /* The fields below are incorrectly shifted by 2 in the manual */
- [VCAP_IS1_ACT_VID_REPLACE_ENA] = { 38, 1},
- [VCAP_IS1_ACT_VID_ADD_VAL] = { 39, 12},
- [VCAP_IS1_ACT_FID_SEL] = { 51, 2},
- [VCAP_IS1_ACT_FID_VAL] = { 53, 13},
- [VCAP_IS1_ACT_PCP_DEI_ENA] = { 66, 1},
- [VCAP_IS1_ACT_PCP_VAL] = { 67, 3},
- [VCAP_IS1_ACT_DEI_VAL] = { 70, 1},
- [VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 71, 1},
- [VCAP_IS1_ACT_VLAN_POP_CNT] = { 72, 2},
- [VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4},
- [VCAP_IS1_ACT_HIT_STICKY] = { 78, 1},
-};
-
-static const struct vcap_field vsc7514_vcap_is2_keys[] = {
- /* Common: 46 bits */
- [VCAP_IS2_TYPE] = { 0, 4},
- [VCAP_IS2_HK_FIRST] = { 4, 1},
- [VCAP_IS2_HK_PAG] = { 5, 8},
- [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 12},
- [VCAP_IS2_HK_RSV2] = { 25, 1},
- [VCAP_IS2_HK_HOST_MATCH] = { 26, 1},
- [VCAP_IS2_HK_L2_MC] = { 27, 1},
- [VCAP_IS2_HK_L2_BC] = { 28, 1},
- [VCAP_IS2_HK_VLAN_TAGGED] = { 29, 1},
- [VCAP_IS2_HK_VID] = { 30, 12},
- [VCAP_IS2_HK_DEI] = { 42, 1},
- [VCAP_IS2_HK_PCP] = { 43, 3},
- /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
- [VCAP_IS2_HK_L2_DMAC] = { 46, 48},
- [VCAP_IS2_HK_L2_SMAC] = { 94, 48},
- /* MAC_ETYPE (TYPE=000) */
- [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = {142, 16},
- [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = {158, 16},
- [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = {174, 8},
- [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = {182, 3},
- /* MAC_LLC (TYPE=001) */
- [VCAP_IS2_HK_MAC_LLC_L2_LLC] = {142, 40},
- /* MAC_SNAP (TYPE=010) */
- [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = {142, 40},
- /* MAC_ARP (TYPE=011) */
- [VCAP_IS2_HK_MAC_ARP_SMAC] = { 46, 48},
- [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 94, 1},
- [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 95, 1},
- [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 96, 1},
- [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 97, 1},
- [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 98, 1},
- [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 99, 1},
- [VCAP_IS2_HK_MAC_ARP_OPCODE] = {100, 2},
- [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = {102, 32},
- [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = {134, 32},
- [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = {166, 1},
- /* IP4_TCP_UDP / IP4_OTHER common */
- [VCAP_IS2_HK_IP4] = { 46, 1},
- [VCAP_IS2_HK_L3_FRAGMENT] = { 47, 1},
- [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 48, 1},
- [VCAP_IS2_HK_L3_OPTIONS] = { 49, 1},
- [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 50, 1},
- [VCAP_IS2_HK_L3_TOS] = { 51, 8},
- [VCAP_IS2_HK_L3_IP4_DIP] = { 59, 32},
- [VCAP_IS2_HK_L3_IP4_SIP] = { 91, 32},
- [VCAP_IS2_HK_DIP_EQ_SIP] = {123, 1},
- /* IP4_TCP_UDP (TYPE=100) */
- [VCAP_IS2_HK_TCP] = {124, 1},
- [VCAP_IS2_HK_L4_DPORT] = {125, 16},
- [VCAP_IS2_HK_L4_SPORT] = {141, 16},
- [VCAP_IS2_HK_L4_RNG] = {157, 8},
- [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {165, 1},
- [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {166, 1},
- [VCAP_IS2_HK_L4_FIN] = {167, 1},
- [VCAP_IS2_HK_L4_SYN] = {168, 1},
- [VCAP_IS2_HK_L4_RST] = {169, 1},
- [VCAP_IS2_HK_L4_PSH] = {170, 1},
- [VCAP_IS2_HK_L4_ACK] = {171, 1},
- [VCAP_IS2_HK_L4_URG] = {172, 1},
- [VCAP_IS2_HK_L4_1588_DOM] = {173, 8},
- [VCAP_IS2_HK_L4_1588_VER] = {181, 4},
- /* IP4_OTHER (TYPE=101) */
- [VCAP_IS2_HK_IP4_L3_PROTO] = {124, 8},
- [VCAP_IS2_HK_L3_PAYLOAD] = {132, 56},
- /* IP6_STD (TYPE=110) */
- [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 46, 1},
- [VCAP_IS2_HK_L3_IP6_SIP] = { 47, 128},
- [VCAP_IS2_HK_IP6_L3_PROTO] = {175, 8},
- /* OAM (TYPE=111) */
- [VCAP_IS2_HK_OAM_MEL_FLAGS] = {142, 7},
- [VCAP_IS2_HK_OAM_VER] = {149, 5},
- [VCAP_IS2_HK_OAM_OPCODE] = {154, 8},
- [VCAP_IS2_HK_OAM_FLAGS] = {162, 8},
- [VCAP_IS2_HK_OAM_MEPID] = {170, 16},
- [VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = {186, 1},
- [VCAP_IS2_HK_OAM_IS_Y1731] = {187, 1},
-};
-
-static const struct vcap_field vsc7514_vcap_is2_actions[] = {
- [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1},
- [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1},
- [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3},
- [VCAP_IS2_ACT_MASK_MODE] = { 5, 2},
- [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1},
- [VCAP_IS2_ACT_LRN_DIS] = { 8, 1},
- [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1},
- [VCAP_IS2_ACT_POLICE_IDX] = { 10, 9},
- [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1},
- [VCAP_IS2_ACT_PORT_MASK] = { 20, 11},
- [VCAP_IS2_ACT_REW_OP] = { 31, 9},
- [VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 40, 1},
- [VCAP_IS2_ACT_RSV] = { 41, 2},
- [VCAP_IS2_ACT_ACL_ID] = { 43, 6},
- [VCAP_IS2_ACT_HIT_CNT] = { 49, 32},
-};
-
static struct vcap_props vsc7514_vcap_props[] = {
[VCAP_ES0] = {
.action_type_width = 0,
@@ -1045,6 +551,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
{ S1, "s1" },
{ S2, "s2" },
{ PTP, "ptp", 1 },
+ { FDMA, "fdma", 1 },
};
if (!np && !pdev->dev.platform_data)
@@ -1080,6 +587,9 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->targets[io_target[i].id] = target;
}
+ if (ocelot->targets[FDMA])
+ ocelot_fdma_init(pdev, ocelot);
+
hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio");
if (IS_ERR(hsio)) {
dev_err(&pdev->dev, "missing hsio syscon\n");
@@ -1129,6 +639,10 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->num_flooding_pgids = 1;
ocelot->vcap = vsc7514_vcap_props;
+
+ ocelot->vcap_pol.base = VSC7514_VCAP_POLICER_BASE;
+ ocelot->vcap_pol.max = VSC7514_VCAP_POLICER_MAX;
+
ocelot->npi = -1;
err = ocelot_init(ocelot);
@@ -1139,6 +653,9 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
if (err)
goto out_ocelot_devlink_unregister;
+ if (ocelot->fdma)
+ ocelot_fdma_start(ocelot);
+
err = ocelot_devlink_sb_register(ocelot);
if (err)
goto out_ocelot_release_ports;
@@ -1179,6 +696,8 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
{
struct ocelot *ocelot = platform_get_drvdata(pdev);
+ if (ocelot->fdma)
+ ocelot_fdma_deinit(ocelot);
devlink_unregister(ocelot->devlink);
ocelot_deinit_timestamp(ocelot);
ocelot_devlink_sb_unregister(ocelot);
diff --git a/drivers/net/ethernet/mscc/vsc7514_regs.c b/drivers/net/ethernet/mscc/vsc7514_regs.c
new file mode 100644
index 000000000000..c2af4eb8ca5d
--- /dev/null
+++ b/drivers/net/ethernet/mscc/vsc7514_regs.c
@@ -0,0 +1,523 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ * Copyright (c) 2021 Innovative Advantage
+ */
+#include <soc/mscc/ocelot_vcap.h>
+#include <soc/mscc/vsc7514_regs.h>
+#include "ocelot.h"
+
+const u32 vsc7514_ana_regmap[] = {
+ REG(ANA_ADVLEARN, 0x009000),
+ REG(ANA_VLANMASK, 0x009004),
+ REG(ANA_PORT_B_DOMAIN, 0x009008),
+ REG(ANA_ANAGEFIL, 0x00900c),
+ REG(ANA_ANEVENTS, 0x009010),
+ REG(ANA_STORMLIMIT_BURST, 0x009014),
+ REG(ANA_STORMLIMIT_CFG, 0x009018),
+ REG(ANA_ISOLATED_PORTS, 0x009028),
+ REG(ANA_COMMUNITY_PORTS, 0x00902c),
+ REG(ANA_AUTOAGE, 0x009030),
+ REG(ANA_MACTOPTIONS, 0x009034),
+ REG(ANA_LEARNDISC, 0x009038),
+ REG(ANA_AGENCTRL, 0x00903c),
+ REG(ANA_MIRRORPORTS, 0x009040),
+ REG(ANA_EMIRRORPORTS, 0x009044),
+ REG(ANA_FLOODING, 0x009048),
+ REG(ANA_FLOODING_IPMC, 0x00904c),
+ REG(ANA_SFLOW_CFG, 0x009050),
+ REG(ANA_PORT_MODE, 0x009080),
+ REG(ANA_PGID_PGID, 0x008c00),
+ REG(ANA_TABLES_ANMOVED, 0x008b30),
+ REG(ANA_TABLES_MACHDATA, 0x008b34),
+ REG(ANA_TABLES_MACLDATA, 0x008b38),
+ REG(ANA_TABLES_MACACCESS, 0x008b3c),
+ REG(ANA_TABLES_MACTINDX, 0x008b40),
+ REG(ANA_TABLES_VLANACCESS, 0x008b44),
+ REG(ANA_TABLES_VLANTIDX, 0x008b48),
+ REG(ANA_TABLES_ISDXACCESS, 0x008b4c),
+ REG(ANA_TABLES_ISDXTIDX, 0x008b50),
+ REG(ANA_TABLES_ENTRYLIM, 0x008b00),
+ REG(ANA_TABLES_PTP_ID_HIGH, 0x008b54),
+ REG(ANA_TABLES_PTP_ID_LOW, 0x008b58),
+ REG(ANA_MSTI_STATE, 0x008e00),
+ REG(ANA_PORT_VLAN_CFG, 0x007000),
+ REG(ANA_PORT_DROP_CFG, 0x007004),
+ REG(ANA_PORT_QOS_CFG, 0x007008),
+ REG(ANA_PORT_VCAP_CFG, 0x00700c),
+ REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007010),
+ REG(ANA_PORT_VCAP_S2_CFG, 0x00701c),
+ REG(ANA_PORT_PCP_DEI_MAP, 0x007020),
+ REG(ANA_PORT_CPU_FWD_CFG, 0x007060),
+ REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007064),
+ REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007068),
+ REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00706c),
+ REG(ANA_PORT_PORT_CFG, 0x007070),
+ REG(ANA_PORT_POL_CFG, 0x007074),
+ REG(ANA_PORT_PTP_CFG, 0x007078),
+ REG(ANA_PORT_PTP_DLY1_CFG, 0x00707c),
+ REG(ANA_OAM_UPM_LM_CNT, 0x007c00),
+ REG(ANA_PORT_PTP_DLY2_CFG, 0x007080),
+ REG(ANA_PFC_PFC_CFG, 0x008800),
+ REG(ANA_PFC_PFC_TIMER, 0x008804),
+ REG(ANA_IPT_OAM_MEP_CFG, 0x008000),
+ REG(ANA_IPT_IPT, 0x008004),
+ REG(ANA_PPT_PPT, 0x008ac0),
+ REG(ANA_FID_MAP_FID_MAP, 0x000000),
+ REG(ANA_AGGR_CFG, 0x0090b4),
+ REG(ANA_CPUQ_CFG, 0x0090b8),
+ REG(ANA_CPUQ_CFG2, 0x0090bc),
+ REG(ANA_CPUQ_8021_CFG, 0x0090c0),
+ REG(ANA_DSCP_CFG, 0x009100),
+ REG(ANA_DSCP_REWR_CFG, 0x009200),
+ REG(ANA_VCAP_RNG_TYPE_CFG, 0x009240),
+ REG(ANA_VCAP_RNG_VAL_CFG, 0x009260),
+ REG(ANA_VRAP_CFG, 0x009280),
+ REG(ANA_VRAP_HDR_DATA, 0x009284),
+ REG(ANA_VRAP_HDR_MASK, 0x009288),
+ REG(ANA_DISCARD_CFG, 0x00928c),
+ REG(ANA_FID_CFG, 0x009290),
+ REG(ANA_POL_PIR_CFG, 0x004000),
+ REG(ANA_POL_CIR_CFG, 0x004004),
+ REG(ANA_POL_MODE_CFG, 0x004008),
+ REG(ANA_POL_PIR_STATE, 0x00400c),
+ REG(ANA_POL_CIR_STATE, 0x004010),
+ REG(ANA_POL_STATE, 0x004014),
+ REG(ANA_POL_FLOWC, 0x008b80),
+ REG(ANA_POL_HYST, 0x008bec),
+ REG(ANA_POL_MISC_CFG, 0x008bf0),
+};
+EXPORT_SYMBOL(vsc7514_ana_regmap);
+
+const u32 vsc7514_qs_regmap[] = {
+ REG(QS_XTR_GRP_CFG, 0x000000),
+ REG(QS_XTR_RD, 0x000008),
+ REG(QS_XTR_FRM_PRUNING, 0x000010),
+ REG(QS_XTR_FLUSH, 0x000018),
+ REG(QS_XTR_DATA_PRESENT, 0x00001c),
+ REG(QS_XTR_CFG, 0x000020),
+ REG(QS_INJ_GRP_CFG, 0x000024),
+ REG(QS_INJ_WR, 0x00002c),
+ REG(QS_INJ_CTRL, 0x000034),
+ REG(QS_INJ_STATUS, 0x00003c),
+ REG(QS_INJ_ERR, 0x000040),
+ REG(QS_INH_DBG, 0x000048),
+};
+EXPORT_SYMBOL(vsc7514_qs_regmap);
+
+const u32 vsc7514_qsys_regmap[] = {
+ REG(QSYS_PORT_MODE, 0x011200),
+ REG(QSYS_SWITCH_PORT_MODE, 0x011234),
+ REG(QSYS_STAT_CNT_CFG, 0x011264),
+ REG(QSYS_EEE_CFG, 0x011268),
+ REG(QSYS_EEE_THRES, 0x011294),
+ REG(QSYS_IGR_NO_SHARING, 0x011298),
+ REG(QSYS_EGR_NO_SHARING, 0x01129c),
+ REG(QSYS_SW_STATUS, 0x0112a0),
+ REG(QSYS_EXT_CPU_CFG, 0x0112d0),
+ REG(QSYS_PAD_CFG, 0x0112d4),
+ REG(QSYS_CPU_GROUP_MAP, 0x0112d8),
+ REG(QSYS_QMAP, 0x0112dc),
+ REG(QSYS_ISDX_SGRP, 0x011400),
+ REG(QSYS_TIMED_FRAME_ENTRY, 0x014000),
+ REG(QSYS_TFRM_MISC, 0x011310),
+ REG(QSYS_TFRM_PORT_DLY, 0x011314),
+ REG(QSYS_TFRM_TIMER_CFG_1, 0x011318),
+ REG(QSYS_TFRM_TIMER_CFG_2, 0x01131c),
+ REG(QSYS_TFRM_TIMER_CFG_3, 0x011320),
+ REG(QSYS_TFRM_TIMER_CFG_4, 0x011324),
+ REG(QSYS_TFRM_TIMER_CFG_5, 0x011328),
+ REG(QSYS_TFRM_TIMER_CFG_6, 0x01132c),
+ REG(QSYS_TFRM_TIMER_CFG_7, 0x011330),
+ REG(QSYS_TFRM_TIMER_CFG_8, 0x011334),
+ REG(QSYS_RED_PROFILE, 0x011338),
+ REG(QSYS_RES_QOS_MODE, 0x011378),
+ REG(QSYS_RES_CFG, 0x012000),
+ REG(QSYS_RES_STAT, 0x012004),
+ REG(QSYS_EGR_DROP_MODE, 0x01137c),
+ REG(QSYS_EQ_CTRL, 0x011380),
+ REG(QSYS_EVENTS_CORE, 0x011384),
+ REG(QSYS_CIR_CFG, 0x000000),
+ REG(QSYS_EIR_CFG, 0x000004),
+ REG(QSYS_SE_CFG, 0x000008),
+ REG(QSYS_SE_DWRR_CFG, 0x00000c),
+ REG(QSYS_SE_CONNECT, 0x00003c),
+ REG(QSYS_SE_DLB_SENSE, 0x000040),
+ REG(QSYS_CIR_STATE, 0x000044),
+ REG(QSYS_EIR_STATE, 0x000048),
+ REG(QSYS_SE_STATE, 0x00004c),
+ REG(QSYS_HSCH_MISC_CFG, 0x011388),
+};
+EXPORT_SYMBOL(vsc7514_qsys_regmap);
+
+const u32 vsc7514_rew_regmap[] = {
+ REG(REW_PORT_VLAN_CFG, 0x000000),
+ REG(REW_TAG_CFG, 0x000004),
+ REG(REW_PORT_CFG, 0x000008),
+ REG(REW_DSCP_CFG, 0x00000c),
+ REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010),
+ REG(REW_PTP_CFG, 0x000050),
+ REG(REW_PTP_DLY1_CFG, 0x000054),
+ REG(REW_DSCP_REMAP_DP1_CFG, 0x000690),
+ REG(REW_DSCP_REMAP_CFG, 0x000790),
+ REG(REW_STAT_CFG, 0x000890),
+ REG(REW_PPT, 0x000680),
+};
+EXPORT_SYMBOL(vsc7514_rew_regmap);
+
+const u32 vsc7514_sys_regmap[] = {
+ REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
+ REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
+ REG(SYS_COUNT_RX_SHORTS, 0x000010),
+ REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
+ REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
+ REG(SYS_COUNT_RX_64, 0x000024),
+ REG(SYS_COUNT_RX_65_127, 0x000028),
+ REG(SYS_COUNT_RX_128_255, 0x00002c),
+ REG(SYS_COUNT_RX_256_1023, 0x000030),
+ REG(SYS_COUNT_RX_1024_1526, 0x000034),
+ REG(SYS_COUNT_RX_1527_MAX, 0x000038),
+ REG(SYS_COUNT_RX_PAUSE, 0x00003c),
+ REG(SYS_COUNT_RX_CONTROL, 0x000040),
+ REG(SYS_COUNT_RX_LONGS, 0x000044),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048),
+ REG(SYS_COUNT_TX_OCTETS, 0x000100),
+ REG(SYS_COUNT_TX_UNICAST, 0x000104),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000108),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00010c),
+ REG(SYS_COUNT_TX_COLLISION, 0x000110),
+ REG(SYS_COUNT_TX_DROPS, 0x000114),
+ REG(SYS_COUNT_TX_PAUSE, 0x000118),
+ REG(SYS_COUNT_TX_64, 0x00011c),
+ REG(SYS_COUNT_TX_65_127, 0x000120),
+ REG(SYS_COUNT_TX_128_511, 0x000124),
+ REG(SYS_COUNT_TX_512_1023, 0x000128),
+ REG(SYS_COUNT_TX_1024_1526, 0x00012c),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000130),
+ REG(SYS_COUNT_TX_AGING, 0x000170),
+ REG(SYS_RESET_CFG, 0x000508),
+ REG(SYS_CMID, 0x00050c),
+ REG(SYS_VLAN_ETYPE_CFG, 0x000510),
+ REG(SYS_PORT_MODE, 0x000514),
+ REG(SYS_FRONT_PORT_MODE, 0x000548),
+ REG(SYS_FRM_AGING, 0x000574),
+ REG(SYS_STAT_CFG, 0x000578),
+ REG(SYS_SW_STATUS, 0x00057c),
+ REG(SYS_MISC_CFG, 0x0005ac),
+ REG(SYS_REW_MAC_HIGH_CFG, 0x0005b0),
+ REG(SYS_REW_MAC_LOW_CFG, 0x0005dc),
+ REG(SYS_CM_ADDR, 0x000500),
+ REG(SYS_CM_DATA, 0x000504),
+ REG(SYS_PAUSE_CFG, 0x000608),
+ REG(SYS_PAUSE_TOT_CFG, 0x000638),
+ REG(SYS_ATOP, 0x00063c),
+ REG(SYS_ATOP_TOT_CFG, 0x00066c),
+ REG(SYS_MAC_FC_CFG, 0x000670),
+ REG(SYS_MMGT, 0x00069c),
+ REG(SYS_MMGT_FAST, 0x0006a0),
+ REG(SYS_EVENTS_DIF, 0x0006a4),
+ REG(SYS_EVENTS_CORE, 0x0006b4),
+ REG(SYS_CNT, 0x000000),
+ REG(SYS_PTP_STATUS, 0x0006b8),
+ REG(SYS_PTP_TXSTAMP, 0x0006bc),
+ REG(SYS_PTP_NXT, 0x0006c0),
+ REG(SYS_PTP_CFG, 0x0006c4),
+};
+EXPORT_SYMBOL(vsc7514_sys_regmap);
+
+const u32 vsc7514_vcap_regmap[] = {
+ /* VCAP_CORE_CFG */
+ REG(VCAP_CORE_UPDATE_CTRL, 0x000000),
+ REG(VCAP_CORE_MV_CFG, 0x000004),
+ /* VCAP_CORE_CACHE */
+ REG(VCAP_CACHE_ENTRY_DAT, 0x000008),
+ REG(VCAP_CACHE_MASK_DAT, 0x000108),
+ REG(VCAP_CACHE_ACTION_DAT, 0x000208),
+ REG(VCAP_CACHE_CNT_DAT, 0x000308),
+ REG(VCAP_CACHE_TG_DAT, 0x000388),
+ /* VCAP_CONST */
+ REG(VCAP_CONST_VCAP_VER, 0x000398),
+ REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c),
+ REG(VCAP_CONST_ENTRY_CNT, 0x0003a0),
+ REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4),
+ REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8),
+ REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac),
+ REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0),
+ REG(VCAP_CONST_CNT_WIDTH, 0x0003b4),
+ REG(VCAP_CONST_CORE_CNT, 0x0003b8),
+ REG(VCAP_CONST_IF_CNT, 0x0003bc),
+};
+EXPORT_SYMBOL(vsc7514_vcap_regmap);
+
+const u32 vsc7514_ptp_regmap[] = {
+ REG(PTP_PIN_CFG, 0x000000),
+ REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
+ REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
+ REG(PTP_PIN_TOD_NSEC, 0x00000c),
+ REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014),
+ REG(PTP_PIN_WF_LOW_PERIOD, 0x000018),
+ REG(PTP_CFG_MISC, 0x0000a0),
+ REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
+ REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
+};
+EXPORT_SYMBOL(vsc7514_ptp_regmap);
+
+const u32 vsc7514_dev_gmii_regmap[] = {
+ REG(DEV_CLOCK_CFG, 0x0),
+ REG(DEV_PORT_MISC, 0x4),
+ REG(DEV_EVENTS, 0x8),
+ REG(DEV_EEE_CFG, 0xc),
+ REG(DEV_RX_PATH_DELAY, 0x10),
+ REG(DEV_TX_PATH_DELAY, 0x14),
+ REG(DEV_PTP_PREDICT_CFG, 0x18),
+ REG(DEV_MAC_ENA_CFG, 0x1c),
+ REG(DEV_MAC_MODE_CFG, 0x20),
+ REG(DEV_MAC_MAXLEN_CFG, 0x24),
+ REG(DEV_MAC_TAGS_CFG, 0x28),
+ REG(DEV_MAC_ADV_CHK_CFG, 0x2c),
+ REG(DEV_MAC_IFG_CFG, 0x30),
+ REG(DEV_MAC_HDX_CFG, 0x34),
+ REG(DEV_MAC_DBG_CFG, 0x38),
+ REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c),
+ REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40),
+ REG(DEV_MAC_STICKY, 0x44),
+ REG(PCS1G_CFG, 0x48),
+ REG(PCS1G_MODE_CFG, 0x4c),
+ REG(PCS1G_SD_CFG, 0x50),
+ REG(PCS1G_ANEG_CFG, 0x54),
+ REG(PCS1G_ANEG_NP_CFG, 0x58),
+ REG(PCS1G_LB_CFG, 0x5c),
+ REG(PCS1G_DBG_CFG, 0x60),
+ REG(PCS1G_CDET_CFG, 0x64),
+ REG(PCS1G_ANEG_STATUS, 0x68),
+ REG(PCS1G_ANEG_NP_STATUS, 0x6c),
+ REG(PCS1G_LINK_STATUS, 0x70),
+ REG(PCS1G_LINK_DOWN_CNT, 0x74),
+ REG(PCS1G_STICKY, 0x78),
+ REG(PCS1G_DEBUG_STATUS, 0x7c),
+ REG(PCS1G_LPI_CFG, 0x80),
+ REG(PCS1G_LPI_WAKE_ERROR_CNT, 0x84),
+ REG(PCS1G_LPI_STATUS, 0x88),
+ REG(PCS1G_TSTPAT_MODE_CFG, 0x8c),
+ REG(PCS1G_TSTPAT_STATUS, 0x90),
+ REG(DEV_PCS_FX100_CFG, 0x94),
+ REG(DEV_PCS_FX100_STATUS, 0x98),
+};
+EXPORT_SYMBOL(vsc7514_dev_gmii_regmap);
+
+const struct vcap_field vsc7514_vcap_es0_keys[] = {
+ [VCAP_ES0_EGR_PORT] = { 0, 4 },
+ [VCAP_ES0_IGR_PORT] = { 4, 4 },
+ [VCAP_ES0_RSV] = { 8, 2 },
+ [VCAP_ES0_L2_MC] = { 10, 1 },
+ [VCAP_ES0_L2_BC] = { 11, 1 },
+ [VCAP_ES0_VID] = { 12, 12 },
+ [VCAP_ES0_DP] = { 24, 1 },
+ [VCAP_ES0_PCP] = { 25, 3 },
+};
+EXPORT_SYMBOL(vsc7514_vcap_es0_keys);
+
+const struct vcap_field vsc7514_vcap_es0_actions[] = {
+ [VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2 },
+ [VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1 },
+ [VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2 },
+ [VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1 },
+ [VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2 },
+ [VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2 },
+ [VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2 },
+ [VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1 },
+ [VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2 },
+ [VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2 },
+ [VCAP_ES0_ACT_VID_A_VAL] = { 17, 12 },
+ [VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3 },
+ [VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1 },
+ [VCAP_ES0_ACT_VID_B_VAL] = { 33, 12 },
+ [VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3 },
+ [VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1 },
+ [VCAP_ES0_ACT_RSV] = { 49, 24 },
+ [VCAP_ES0_ACT_HIT_STICKY] = { 73, 1 },
+};
+EXPORT_SYMBOL(vsc7514_vcap_es0_actions);
+
+const struct vcap_field vsc7514_vcap_is1_keys[] = {
+ [VCAP_IS1_HK_TYPE] = { 0, 1 },
+ [VCAP_IS1_HK_LOOKUP] = { 1, 2 },
+ [VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 12 },
+ [VCAP_IS1_HK_RSV] = { 15, 9 },
+ [VCAP_IS1_HK_OAM_Y1731] = { 24, 1 },
+ [VCAP_IS1_HK_L2_MC] = { 25, 1 },
+ [VCAP_IS1_HK_L2_BC] = { 26, 1 },
+ [VCAP_IS1_HK_IP_MC] = { 27, 1 },
+ [VCAP_IS1_HK_VLAN_TAGGED] = { 28, 1 },
+ [VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 29, 1 },
+ [VCAP_IS1_HK_TPID] = { 30, 1 },
+ [VCAP_IS1_HK_VID] = { 31, 12 },
+ [VCAP_IS1_HK_DEI] = { 43, 1 },
+ [VCAP_IS1_HK_PCP] = { 44, 3 },
+ /* Specific Fields for IS1 Half Key S1_NORMAL */
+ [VCAP_IS1_HK_L2_SMAC] = { 47, 48 },
+ [VCAP_IS1_HK_ETYPE_LEN] = { 95, 1 },
+ [VCAP_IS1_HK_ETYPE] = { 96, 16 },
+ [VCAP_IS1_HK_IP_SNAP] = { 112, 1 },
+ [VCAP_IS1_HK_IP4] = { 113, 1 },
+ /* Layer-3 Information */
+ [VCAP_IS1_HK_L3_FRAGMENT] = { 114, 1 },
+ [VCAP_IS1_HK_L3_FRAG_OFS_GT0] = { 115, 1 },
+ [VCAP_IS1_HK_L3_OPTIONS] = { 116, 1 },
+ [VCAP_IS1_HK_L3_DSCP] = { 117, 6 },
+ [VCAP_IS1_HK_L3_IP4_SIP] = { 123, 32 },
+ /* Layer-4 Information */
+ [VCAP_IS1_HK_TCP_UDP] = { 155, 1 },
+ [VCAP_IS1_HK_TCP] = { 156, 1 },
+ [VCAP_IS1_HK_L4_SPORT] = { 157, 16 },
+ [VCAP_IS1_HK_L4_RNG] = { 173, 8 },
+ /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */
+ [VCAP_IS1_HK_IP4_INNER_TPID] = { 47, 1 },
+ [VCAP_IS1_HK_IP4_INNER_VID] = { 48, 12 },
+ [VCAP_IS1_HK_IP4_INNER_DEI] = { 60, 1 },
+ [VCAP_IS1_HK_IP4_INNER_PCP] = { 61, 3 },
+ [VCAP_IS1_HK_IP4_IP4] = { 64, 1 },
+ [VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 65, 1 },
+ [VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 66, 1 },
+ [VCAP_IS1_HK_IP4_L3_OPTIONS] = { 67, 1 },
+ [VCAP_IS1_HK_IP4_L3_DSCP] = { 68, 6 },
+ [VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 74, 32 },
+ [VCAP_IS1_HK_IP4_L3_IP4_SIP] = { 106, 32 },
+ [VCAP_IS1_HK_IP4_L3_PROTO] = { 138, 8 },
+ [VCAP_IS1_HK_IP4_TCP_UDP] = { 146, 1 },
+ [VCAP_IS1_HK_IP4_TCP] = { 147, 1 },
+ [VCAP_IS1_HK_IP4_L4_RNG] = { 148, 8 },
+ [VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = { 156, 32 },
+};
+EXPORT_SYMBOL(vsc7514_vcap_is1_keys);
+
+const struct vcap_field vsc7514_vcap_is1_actions[] = {
+ [VCAP_IS1_ACT_DSCP_ENA] = { 0, 1 },
+ [VCAP_IS1_ACT_DSCP_VAL] = { 1, 6 },
+ [VCAP_IS1_ACT_QOS_ENA] = { 7, 1 },
+ [VCAP_IS1_ACT_QOS_VAL] = { 8, 3 },
+ [VCAP_IS1_ACT_DP_ENA] = { 11, 1 },
+ [VCAP_IS1_ACT_DP_VAL] = { 12, 1 },
+ [VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8 },
+ [VCAP_IS1_ACT_PAG_VAL] = { 21, 8 },
+ [VCAP_IS1_ACT_RSV] = { 29, 9 },
+ /* The fields below are incorrectly shifted by 2 in the manual */
+ [VCAP_IS1_ACT_VID_REPLACE_ENA] = { 38, 1 },
+ [VCAP_IS1_ACT_VID_ADD_VAL] = { 39, 12 },
+ [VCAP_IS1_ACT_FID_SEL] = { 51, 2 },
+ [VCAP_IS1_ACT_FID_VAL] = { 53, 13 },
+ [VCAP_IS1_ACT_PCP_DEI_ENA] = { 66, 1 },
+ [VCAP_IS1_ACT_PCP_VAL] = { 67, 3 },
+ [VCAP_IS1_ACT_DEI_VAL] = { 70, 1 },
+ [VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 71, 1 },
+ [VCAP_IS1_ACT_VLAN_POP_CNT] = { 72, 2 },
+ [VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4 },
+ [VCAP_IS1_ACT_HIT_STICKY] = { 78, 1 },
+};
+EXPORT_SYMBOL(vsc7514_vcap_is1_actions);
+
+const struct vcap_field vsc7514_vcap_is2_keys[] = {
+ /* Common: 46 bits */
+ [VCAP_IS2_TYPE] = { 0, 4 },
+ [VCAP_IS2_HK_FIRST] = { 4, 1 },
+ [VCAP_IS2_HK_PAG] = { 5, 8 },
+ [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 12 },
+ [VCAP_IS2_HK_RSV2] = { 25, 1 },
+ [VCAP_IS2_HK_HOST_MATCH] = { 26, 1 },
+ [VCAP_IS2_HK_L2_MC] = { 27, 1 },
+ [VCAP_IS2_HK_L2_BC] = { 28, 1 },
+ [VCAP_IS2_HK_VLAN_TAGGED] = { 29, 1 },
+ [VCAP_IS2_HK_VID] = { 30, 12 },
+ [VCAP_IS2_HK_DEI] = { 42, 1 },
+ [VCAP_IS2_HK_PCP] = { 43, 3 },
+ /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
+ [VCAP_IS2_HK_L2_DMAC] = { 46, 48 },
+ [VCAP_IS2_HK_L2_SMAC] = { 94, 48 },
+ /* MAC_ETYPE (TYPE=000) */
+ [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = { 142, 16 },
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = { 158, 16 },
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = { 174, 8 },
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = { 182, 3 },
+ /* MAC_LLC (TYPE=001) */
+ [VCAP_IS2_HK_MAC_LLC_L2_LLC] = { 142, 40 },
+ /* MAC_SNAP (TYPE=010) */
+ [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = { 142, 40 },
+ /* MAC_ARP (TYPE=011) */
+ [VCAP_IS2_HK_MAC_ARP_SMAC] = { 46, 48 },
+ [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 94, 1 },
+ [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 95, 1 },
+ [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 96, 1 },
+ [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 97, 1 },
+ [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 98, 1 },
+ [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 99, 1 },
+ [VCAP_IS2_HK_MAC_ARP_OPCODE] = { 100, 2 },
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = { 102, 32 },
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = { 134, 32 },
+ [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = { 166, 1 },
+ /* IP4_TCP_UDP / IP4_OTHER common */
+ [VCAP_IS2_HK_IP4] = { 46, 1 },
+ [VCAP_IS2_HK_L3_FRAGMENT] = { 47, 1 },
+ [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 48, 1 },
+ [VCAP_IS2_HK_L3_OPTIONS] = { 49, 1 },
+ [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 50, 1 },
+ [VCAP_IS2_HK_L3_TOS] = { 51, 8 },
+ [VCAP_IS2_HK_L3_IP4_DIP] = { 59, 32 },
+ [VCAP_IS2_HK_L3_IP4_SIP] = { 91, 32 },
+ [VCAP_IS2_HK_DIP_EQ_SIP] = { 123, 1 },
+ /* IP4_TCP_UDP (TYPE=100) */
+ [VCAP_IS2_HK_TCP] = { 124, 1 },
+ [VCAP_IS2_HK_L4_DPORT] = { 125, 16 },
+ [VCAP_IS2_HK_L4_SPORT] = { 141, 16 },
+ [VCAP_IS2_HK_L4_RNG] = { 157, 8 },
+ [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = { 165, 1 },
+ [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = { 166, 1 },
+ [VCAP_IS2_HK_L4_FIN] = { 167, 1 },
+ [VCAP_IS2_HK_L4_SYN] = { 168, 1 },
+ [VCAP_IS2_HK_L4_RST] = { 169, 1 },
+ [VCAP_IS2_HK_L4_PSH] = { 170, 1 },
+ [VCAP_IS2_HK_L4_ACK] = { 171, 1 },
+ [VCAP_IS2_HK_L4_URG] = { 172, 1 },
+ [VCAP_IS2_HK_L4_1588_DOM] = { 173, 8 },
+ [VCAP_IS2_HK_L4_1588_VER] = { 181, 4 },
+ /* IP4_OTHER (TYPE=101) */
+ [VCAP_IS2_HK_IP4_L3_PROTO] = { 124, 8 },
+ [VCAP_IS2_HK_L3_PAYLOAD] = { 132, 56 },
+ /* IP6_STD (TYPE=110) */
+ [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 46, 1 },
+ [VCAP_IS2_HK_L3_IP6_SIP] = { 47, 128 },
+ [VCAP_IS2_HK_IP6_L3_PROTO] = { 175, 8 },
+ /* OAM (TYPE=111) */
+ [VCAP_IS2_HK_OAM_MEL_FLAGS] = { 142, 7 },
+ [VCAP_IS2_HK_OAM_VER] = { 149, 5 },
+ [VCAP_IS2_HK_OAM_OPCODE] = { 154, 8 },
+ [VCAP_IS2_HK_OAM_FLAGS] = { 162, 8 },
+ [VCAP_IS2_HK_OAM_MEPID] = { 170, 16 },
+ [VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = { 186, 1 },
+ [VCAP_IS2_HK_OAM_IS_Y1731] = { 187, 1 },
+};
+EXPORT_SYMBOL(vsc7514_vcap_is2_keys);
+
+const struct vcap_field vsc7514_vcap_is2_actions[] = {
+ [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1 },
+ [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1 },
+ [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3 },
+ [VCAP_IS2_ACT_MASK_MODE] = { 5, 2 },
+ [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1 },
+ [VCAP_IS2_ACT_LRN_DIS] = { 8, 1 },
+ [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1 },
+ [VCAP_IS2_ACT_POLICE_IDX] = { 10, 9 },
+ [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1 },
+ [VCAP_IS2_ACT_PORT_MASK] = { 20, 11 },
+ [VCAP_IS2_ACT_REW_OP] = { 31, 9 },
+ [VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 40, 1 },
+ [VCAP_IS2_ACT_RSV] = { 41, 2 },
+ [VCAP_IS2_ACT_ACL_ID] = { 43, 6 },
+ [VCAP_IS2_ACT_HIT_CNT] = { 49, 32 },
+};
+EXPORT_SYMBOL(vsc7514_vcap_is2_actions);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 5736fcdafd7a..50ac3ee2577a 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1704,7 +1704,9 @@ myri10ge_set_pauseparam(struct net_device *netdev,
static void
myri10ge_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
@@ -3740,7 +3742,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct myri10ge_priv *mgp;
struct device *dev = &pdev->dev;
int status = -ENXIO;
- int dac_enabled;
unsigned hdr_offset, ss_offset;
static int board_number;
@@ -3780,15 +3781,8 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
myri10ge_mask_surprise_down(pdev);
pci_set_master(pdev);
- dac_enabled = 1;
status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (status != 0) {
- dac_enabled = 0;
- dev_err(&pdev->dev,
- "64-bit pci address mask was refused, trying 32-bit\n");
- status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- }
- if (status != 0) {
dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
goto abort_with_enabled;
}
@@ -3872,10 +3866,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
- netdev->features = netdev->hw_features;
-
- if (dac_enabled)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features = netdev->hw_features | NETIF_F_HIGHDMA;
netdev->vlan_features |= mgp->features;
if (mgp->fw_ver_tiny < 37)
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index d74a80f010c5..3f371faeb6d0 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -114,6 +114,7 @@ static int sonic_probe1(struct net_device *dev)
struct sonic_local *lp = netdev_priv(dev);
int err = -ENODEV;
int i;
+ unsigned char addr[ETH_ALEN];
if (!request_mem_region(dev->base_addr, SONIC_MEM_SIZE, jazz_sonic_string))
return -EBUSY;
@@ -143,9 +144,10 @@ static int sonic_probe1(struct net_device *dev)
SONIC_WRITE(SONIC_CEP,0);
for (i=0; i<3; i++) {
val = SONIC_READ(SONIC_CAP0-i);
- dev->dev_addr[i*2] = val;
- dev->dev_addr[i*2+1] = val >> 8;
+ addr[i*2] = val;
+ addr[i*2+1] = val >> 8;
}
+ eth_hw_addr_set(dev, addr);
lp->dma_bitmode = SONIC_BITMODE32;
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 8709d700e15a..b16f7c830f9b 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -203,6 +203,7 @@ static void mac_onboard_sonic_ethernet_addr(struct net_device *dev)
struct sonic_local *lp = netdev_priv(dev);
const int prom_addr = ONBOARD_SONIC_PROM_BASE;
unsigned short val;
+ u8 addr[ETH_ALEN];
/*
* On NuBus boards we can sometimes look in the ROM resources.
@@ -213,7 +214,8 @@ static void mac_onboard_sonic_ethernet_addr(struct net_device *dev)
int i;
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = SONIC_READ_PROM(i);
+ addr[i] = SONIC_READ_PROM(i);
+ eth_hw_addr_set(dev, addr);
if (!INVALID_MAC(dev->dev_addr))
return;
@@ -222,7 +224,8 @@ static void mac_onboard_sonic_ethernet_addr(struct net_device *dev)
* source has a rather long and detailed historical account of
* why this is so.
*/
- bit_reverse_addr(dev->dev_addr);
+ bit_reverse_addr(addr);
+ eth_hw_addr_set(dev, addr);
if (!INVALID_MAC(dev->dev_addr))
return;
@@ -243,14 +246,15 @@ static void mac_onboard_sonic_ethernet_addr(struct net_device *dev)
SONIC_WRITE(SONIC_CEP, 15);
val = SONIC_READ(SONIC_CAP2);
- dev->dev_addr[5] = val >> 8;
- dev->dev_addr[4] = val & 0xff;
+ addr[5] = val >> 8;
+ addr[4] = val & 0xff;
val = SONIC_READ(SONIC_CAP1);
- dev->dev_addr[3] = val >> 8;
- dev->dev_addr[2] = val & 0xff;
+ addr[3] = val >> 8;
+ addr[2] = val & 0xff;
val = SONIC_READ(SONIC_CAP0);
- dev->dev_addr[1] = val >> 8;
- dev->dev_addr[0] = val & 0xff;
+ addr[1] = val >> 8;
+ addr[0] = val & 0xff;
+ eth_hw_addr_set(dev, addr);
if (!INVALID_MAC(dev->dev_addr))
return;
@@ -355,13 +359,16 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
static int mac_sonic_nubus_ethernet_addr(struct net_device *dev,
unsigned long prom_addr, int id)
{
+ u8 addr[ETH_ALEN];
int i;
+
for(i = 0; i < 6; i++)
- dev->dev_addr[i] = SONIC_READ_PROM(i);
+ addr[i] = SONIC_READ_PROM(i);
/* Some of the addresses are bit-reversed */
if (id != MACSONIC_DAYNA)
- bit_reverse_addr(dev->dev_addr);
+ bit_reverse_addr(addr);
+ eth_hw_addr_set(dev, addr);
return 0;
}
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index 0a02d8bd0a3e..52fef34d43f9 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -127,6 +127,7 @@ static int sonic_probe1(struct net_device *dev)
unsigned int base_addr = dev->base_addr;
int i;
int err = 0;
+ unsigned char addr[ETH_ALEN];
if (!request_mem_region(base_addr, 0x100, xtsonic_string))
return -EBUSY;
@@ -163,9 +164,10 @@ static int sonic_probe1(struct net_device *dev)
for (i=0; i<3; i++) {
unsigned int val = SONIC_READ(SONIC_CAP0-i);
- dev->dev_addr[i*2] = val;
- dev->dev_addr[i*2+1] = val >> 8;
+ addr[i*2] = val;
+ addr[i*2+1] = val >> 8;
}
+ eth_hw_addr_set(dev, addr);
lp->dma_bitmode = SONIC_BITMODE32;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index d1c32c65db05..6dd451adc331 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5461,8 +5461,11 @@ static int s2io_ethtool_set_led(struct net_device *dev,
return 0;
}
-static void s2io_ethtool_gringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+static void
+s2io_ethtool_gringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct s2io_nic *sp = netdev_priv(dev);
int i, tx_desc_count = 0, rx_desc_count = 0;
@@ -7655,7 +7658,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
struct s2io_nic *sp;
struct net_device *dev;
int i, j, ret;
- int dma_flag = false;
u32 mac_up, mac_down;
u64 val64 = 0, tmp64 = 0;
struct XENA_dev_config __iomem *bar0 = NULL;
@@ -7677,17 +7679,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
return ret;
}
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
- dma_flag = true;
- if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- DBG_PRINT(ERR_DBG,
- "Unable to obtain 64bit DMA for coherent allocations\n");
- pci_disable_device(pdev);
- return -ENOMEM;
- }
- } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
} else {
pci_disable_device(pdev);
return -ENOMEM;
@@ -7717,7 +7710,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
sp = netdev_priv(dev);
sp->dev = dev;
sp->pdev = pdev;
- sp->high_dma_flag = dma_flag;
sp->device_enabled_once = false;
if (rx_ring_mode == 1)
sp->rxd_mode = RXD_MODE_1;
@@ -7865,9 +7857,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_RXCSUM | NETIF_F_LRO;
dev->features |= dev->hw_features |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
- if (sp->high_dma_flag == true)
- dev->features |= NETIF_F_HIGHDMA;
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HIGHDMA;
dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
INIT_WORK(&sp->set_link_task, s2io_set_link);
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
index a4266d1544ab..cb7080eb5912 100644
--- a/drivers/net/ethernet/neterion/s2io.h
+++ b/drivers/net/ethernet/neterion/s2io.h
@@ -873,7 +873,6 @@ struct s2io_nic {
struct mac_addr def_mac_addr[256];
struct net_device_stats stats;
- int high_dma_flag;
int device_enabled_once;
char name[60];
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 1969009a91e7..aa7c093f1f91 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -3159,10 +3159,6 @@ static int vxge_hwtstamp_set(struct vxgedev *vdev, void __user *data)
if (copy_from_user(&config, data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
/* Transmit HW Timestamp not supported */
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
@@ -3353,7 +3349,7 @@ static const struct net_device_ops vxge_netdev_ops = {
};
static int vxge_device_register(struct __vxge_hw_device *hldev,
- struct vxge_config *config, int high_dma,
+ struct vxge_config *config,
int no_of_vpath, struct vxgedev **vdev_out)
{
struct net_device *ndev;
@@ -3425,11 +3421,7 @@ static int vxge_device_register(struct __vxge_hw_device *hldev,
vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
"%s : checksumming enabled", __func__);
- if (high_dma) {
- ndev->features |= NETIF_F_HIGHDMA;
- vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
- "%s : using High DMA", __func__);
- }
+ ndev->features |= NETIF_F_HIGHDMA;
/* MTU range: 68 - 9600 */
ndev->min_mtu = VXGE_HW_MIN_MTU;
@@ -4286,7 +4278,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
struct __vxge_hw_device *hldev;
enum vxge_hw_status status;
int ret;
- int high_dma = 0;
u64 vpath_mask = 0;
struct vxgedev *vdev;
struct vxge_config *ll_config = NULL;
@@ -4376,22 +4367,9 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit0;
}
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
vxge_debug_ll_config(VXGE_TRACE,
"%s : using 64bit DMA", __func__);
-
- high_dma = 1;
-
- if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- vxge_debug_init(VXGE_ERR,
- "%s : unable to obtain 64bit DMA for "
- "consistent allocations", __func__);
- ret = -ENOMEM;
- goto _exit1;
- }
- } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s : using 32bit DMA", __func__);
} else {
ret = -ENOMEM;
goto _exit1;
@@ -4559,8 +4537,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
- ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
- &vdev);
+ ret = vxge_device_register(hldev, ll_config, no_of_vpath, &vdev);
if (ret) {
ret = -EINVAL;
goto _exit4;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 2af9faee96c5..f448c5682594 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -43,15 +43,14 @@ static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
struct circ_buf *ring;
ring = &priv->stats_ids.free_list;
- /* Check if buffer is full. */
- if (!CIRC_SPACE(ring->head, ring->tail,
- priv->stats_ring_size * NFP_FL_STATS_ELEM_RS -
- NFP_FL_STATS_ELEM_RS + 1))
+ /* Check if buffer is full, stats_ring_size must be power of 2 */
+ if (!CIRC_SPACE(ring->head, ring->tail, priv->stats_ring_size))
return -ENOBUFS;
- memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS);
- ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) %
- (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
+ /* Each increment of head represents size of NFP_FL_STATS_ELEM_RS */
+ memcpy(&ring->buf[ring->head * NFP_FL_STATS_ELEM_RS],
+ &stats_context_id, NFP_FL_STATS_ELEM_RS);
+ ring->head = (ring->head + 1) & (priv->stats_ring_size - 1);
return 0;
}
@@ -86,11 +85,14 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
return -ENOENT;
}
- memcpy(&temp_stats_id, &ring->buf[ring->tail], NFP_FL_STATS_ELEM_RS);
+ /* Each increment of tail represents size of NFP_FL_STATS_ELEM_RS */
+ memcpy(&temp_stats_id, &ring->buf[ring->tail * NFP_FL_STATS_ELEM_RS],
+ NFP_FL_STATS_ELEM_RS);
*stats_context_id = temp_stats_id;
- memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS);
- ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) %
- (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
+ memcpy(&ring->buf[ring->tail * NFP_FL_STATS_ELEM_RS], &freed_stats_id,
+ NFP_FL_STATS_ELEM_RS);
+ /* stats_ring_size must be power of 2 */
+ ring->tail = (ring->tail + 1) & (priv->stats_ring_size - 1);
return 0;
}
@@ -138,13 +140,18 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
struct circ_buf *ring;
ring = &priv->mask_ids.mask_id_free_list;
- /* Checking if buffer is full. */
+ /* Checking if buffer is full,
+ * NFP_FLOWER_MASK_ENTRY_RS must be power of 2
+ */
if (CIRC_SPACE(ring->head, ring->tail, NFP_FLOWER_MASK_ENTRY_RS) == 0)
return -ENOBUFS;
- memcpy(&ring->buf[ring->head], &mask_id, NFP_FLOWER_MASK_ELEMENT_RS);
- ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) %
- (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
+ /* Each increment of head represents size of
+ * NFP_FLOWER_MASK_ELEMENT_RS
+ */
+ memcpy(&ring->buf[ring->head * NFP_FLOWER_MASK_ELEMENT_RS], &mask_id,
+ NFP_FLOWER_MASK_ELEMENT_RS);
+ ring->head = (ring->head + 1) & (NFP_FLOWER_MASK_ENTRY_RS - 1);
priv->mask_ids.last_used[mask_id] = ktime_get();
@@ -171,7 +178,11 @@ static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
if (ring->head == ring->tail)
goto err_not_found;
- memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS);
+ /* Each increment of tail represents size of
+ * NFP_FLOWER_MASK_ELEMENT_RS
+ */
+ memcpy(&temp_id, &ring->buf[ring->tail * NFP_FLOWER_MASK_ELEMENT_RS],
+ NFP_FLOWER_MASK_ELEMENT_RS);
*mask_id = temp_id;
reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id],
@@ -180,9 +191,10 @@ static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
if (ktime_before(ktime_get(), reuse_timeout))
goto err_not_found;
- memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS);
- ring->tail = (ring->tail + NFP_FLOWER_MASK_ELEMENT_RS) %
- (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
+ memcpy(&ring->buf[ring->tail * NFP_FLOWER_MASK_ELEMENT_RS], &freed_id,
+ NFP_FLOWER_MASK_ELEMENT_RS);
+ /* NFP_FLOWER_MASK_ENTRY_RS must be power of 2 */
+ ring->tail = (ring->tail + 1) & (NFP_FLOWER_MASK_ENTRY_RS - 1);
return 0;
@@ -338,11 +350,6 @@ int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie,
nfp_flow->meta.mask_len,
&nfp_flow->meta.flags, &new_mask_id)) {
NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id");
- if (nfp_release_stats_entry(app, stats_cxt)) {
- NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
- err = -EINVAL;
- goto err_remove_rhash;
- }
err = -ENOENT;
goto err_remove_rhash;
}
@@ -359,21 +366,6 @@ int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie,
check_entry = nfp_flower_search_fl_table(app, cookie, netdev);
if (check_entry) {
NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot offload duplicate flow entry");
- if (nfp_release_stats_entry(app, stats_cxt)) {
- NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
- err = -EINVAL;
- goto err_remove_mask;
- }
-
- if (!nfp_flow->pre_tun_rule.dev &&
- !nfp_check_mask_remove(app, nfp_flow->mask_data,
- nfp_flow->meta.mask_len,
- NULL, &new_mask_id)) {
- NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id");
- err = -EINVAL;
- goto err_remove_mask;
- }
-
err = -EEXIST;
goto err_remove_mask;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 224089d04d98..f97eff5afd12 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1867,6 +1867,9 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *
void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
{
+ if (!netdev)
+ return -EOPNOTSUPP;
+
if (!nfp_fl_is_netdev_to_offload(netdev))
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 850bfdf83d0a..79257ec41987 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1944,7 +1944,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
xdp_prog, act);
continue;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(dp->netdev, xdp_prog, act);
@@ -4097,7 +4097,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = nn->max_mtu;
- netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
+ netif_set_gso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
netif_carrier_off(netdev);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index cf7882933993..e0c27471bcdb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -381,7 +381,9 @@ err_bad_set:
}
static void nfp_net_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nfp_net *nn = netdev_priv(netdev);
@@ -406,7 +408,9 @@ static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
}
static int nfp_net_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nfp_net *nn = netdev_priv(netdev);
u32 rxd_cnt, txd_cnt;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 369f6ae700c7..181ac8e789a3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -286,8 +286,8 @@ nfp_repr_transfer_features(struct net_device *netdev, struct net_device *lower)
if (repr->dst->u.port_info.lower_dev != lower)
return;
- netdev->gso_max_size = lower->gso_max_size;
- netdev->gso_max_segs = lower->gso_max_segs;
+ netif_set_gso_max_size(netdev, lower->gso_max_size);
+ netif_set_gso_max_segs(netdev, lower->gso_max_segs);
netdev_update_features(netdev);
}
@@ -381,7 +381,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
/* Advertise but disable TSO by default. */
netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
- netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
+ netif_set_gso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
netdev->features |= NETIF_F_LLTX;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 9b530d7509a4..660013f716d4 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -4651,7 +4651,10 @@ static int nv_nway_reset(struct net_device *dev)
return ret;
}
-static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
+static void nv_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct fe_priv *np = netdev_priv(dev);
@@ -4662,7 +4665,10 @@ static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* r
ring->tx_pending = np->tx_ring_size;
}
-static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
+static int nv_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 660b07cb5b92..84cc79e928c8 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -270,9 +270,13 @@ static int pch_gbe_nway_reset(struct net_device *netdev)
* pch_gbe_get_ringparam - Report ring sizes
* @netdev: Network interface device structure
* @ring: Ring param structure
+ * @kernel_ring: Ring external param structure
+ * @extack: netlink handle
*/
static void pch_gbe_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
struct pch_gbe_tx_ring *txdr = adapter->tx_ring;
@@ -288,12 +292,16 @@ static void pch_gbe_get_ringparam(struct net_device *netdev,
* pch_gbe_set_ringparam - Set ring sizes
* @netdev: Network interface device structure
* @ring: Ring param structure
+ * @kernel_ring: Ring external param structure
+ * @extack: netlink handle
* Returns
* 0: Successful.
* Negative value: Failed.
*/
static int pch_gbe_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
struct pch_gbe_tx_ring *txdr, *tx_old;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 71d234291fc5..1dc40c537281 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -210,9 +210,6 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
- if (cfg.flags) /* reserved for future extensions */
- return -EINVAL;
-
/* Get ieee1588's dev information */
pdev = adapter->ptp_pdev;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
index e1a304886a3c..4c7e0c991105 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
@@ -69,7 +69,9 @@ pasemi_mac_ethtool_set_msglevel(struct net_device *netdev,
static void
pasemi_mac_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct pasemi_mac *mac = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index c54d735b9e2e..386a5cf1e224 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -512,7 +512,9 @@ static int ionic_set_coalesce(struct net_device *netdev,
}
static void ionic_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ionic_lif *lif = netdev_priv(netdev);
@@ -523,7 +525,9 @@ static void ionic_get_ringparam(struct net_device *netdev,
}
static int ionic_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic_queue_params qparam;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index a075643f5826..3c4a84ea6321 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -392,7 +392,9 @@ netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
static void
netxen_nic_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct netxen_adapter *adapter = netdev_priv(dev);
@@ -430,7 +432,9 @@ netxen_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
static int
netxen_nic_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct netxen_adapter *adapter = netdev_priv(dev);
u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 452494f8c298..65e20693c549 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -1036,12 +1036,12 @@ static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
u32 type, vf;
for (type = 0; type < MAX_CONN_TYPES; type++) {
- kfree(p_mngr->acquired[type].cid_map);
+ bitmap_free(p_mngr->acquired[type].cid_map);
p_mngr->acquired[type].max_count = 0;
p_mngr->acquired[type].start_cid = 0;
for (vf = 0; vf < MAX_NUM_VFS; vf++) {
- kfree(p_mngr->acquired_vf[type][vf].cid_map);
+ bitmap_free(p_mngr->acquired_vf[type][vf].cid_map);
p_mngr->acquired_vf[type][vf].max_count = 0;
p_mngr->acquired_vf[type][vf].start_cid = 0;
}
@@ -1054,15 +1054,10 @@ qed_cid_map_alloc_single(struct qed_hwfn *p_hwfn,
u32 cid_start,
u32 cid_count, struct qed_cid_acquired_map *p_map)
{
- u32 size;
-
if (!cid_count)
return 0;
- size = DIV_ROUND_UP(cid_count,
- sizeof(unsigned long) * BITS_PER_BYTE) *
- sizeof(unsigned long);
- p_map->cid_map = kzalloc(size, GFP_KERNEL);
+ p_map->cid_map = bitmap_zalloc(cid_count, GFP_KERNEL);
if (!p_map->cid_map)
return -ENOMEM;
@@ -1216,7 +1211,6 @@ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
struct qed_cid_acquired_map *p_map;
struct qed_conn_type_cfg *p_cfg;
int type;
- u32 len;
/* Reset acquired cids */
for (type = 0; type < MAX_CONN_TYPES; type++) {
@@ -1225,11 +1219,7 @@ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
p_cfg = &p_mngr->conn_cfg[type];
if (p_cfg->cid_count) {
p_map = &p_mngr->acquired[type];
- len = DIV_ROUND_UP(p_map->max_count,
- sizeof(unsigned long) *
- BITS_PER_BYTE) *
- sizeof(unsigned long);
- memset(p_map->cid_map, 0, len);
+ bitmap_zero(p_map->cid_map, p_map->max_count);
}
if (!p_cfg->cids_per_vf)
@@ -1237,11 +1227,7 @@ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
for (vf = 0; vf < MAX_NUM_VFS; vf++) {
p_map = &p_mngr->acquired_vf[type][vf];
- len = DIV_ROUND_UP(p_map->max_count,
- sizeof(unsigned long) *
- BITS_PER_BYTE) *
- sizeof(unsigned long);
- memset(p_map->cid_map, 0, len);
+ bitmap_zero(p_map->cid_map, p_map->max_count);
}
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index f2cedbd9489c..ed1a84542ad2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -2721,6 +2721,25 @@ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
#define NUM_STORMS 6
/**
+ * qed_get_protocol_type_str(): Get a string for Protocol type.
+ *
+ * @protocol_type: Protocol type (using enum protocol_type).
+ *
+ * Return: String.
+ */
+const char *qed_get_protocol_type_str(u32 protocol_type);
+
+/**
+ * qed_get_ramrod_cmd_id_str(): Get a string for Ramrod command ID.
+ *
+ * @protocol_type: Protocol type (using enum protocol_type).
+ * @ramrod_cmd_id: Ramrod command ID (using per-protocol enum <protocol>_ramrod_cmd_id).
+ *
+ * Return: String.
+ */
+const char *qed_get_ramrod_cmd_id_str(u32 protocol_type, u32 ramrod_cmd_id);
+
+/**
* qed_set_rdma_error_level(): Sets the RDMA assert level.
* If the severity of the error will be
* above the level, the FW will assert.
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index 321c43408153..0ce37f2460a4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -210,6 +210,82 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
(XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
XSTORM_PQ_INFO_OFFSET(pq_id))
+static const char * const s_protocol_types[] = {
+ "PROTOCOLID_ISCSI", "PROTOCOLID_FCOE", "PROTOCOLID_ROCE",
+ "PROTOCOLID_CORE", "PROTOCOLID_ETH", "PROTOCOLID_IWARP",
+ "PROTOCOLID_TOE", "PROTOCOLID_PREROCE", "PROTOCOLID_COMMON",
+ "PROTOCOLID_TCP", "PROTOCOLID_RDMA", "PROTOCOLID_SCSI",
+};
+
+static const char *s_ramrod_cmd_ids[][28] = {
+ {
+ "ISCSI_RAMROD_CMD_ID_UNUSED", "ISCSI_RAMROD_CMD_ID_INIT_FUNC",
+ "ISCSI_RAMROD_CMD_ID_DESTROY_FUNC",
+ "ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN",
+ "ISCSI_RAMROD_CMD_ID_UPDATE_CONN",
+ "ISCSI_RAMROD_CMD_ID_TERMINATION_CONN",
+ "ISCSI_RAMROD_CMD_ID_CLEAR_SQ", "ISCSI_RAMROD_CMD_ID_MAC_UPDATE",
+ "ISCSI_RAMROD_CMD_ID_CONN_STATS", },
+ { "FCOE_RAMROD_CMD_ID_INIT_FUNC", "FCOE_RAMROD_CMD_ID_DESTROY_FUNC",
+ "FCOE_RAMROD_CMD_ID_STAT_FUNC",
+ "FCOE_RAMROD_CMD_ID_OFFLOAD_CONN",
+ "FCOE_RAMROD_CMD_ID_TERMINATE_CONN", },
+ { "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT",
+ "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR",
+ "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ",
+ "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ",
+ "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ",
+ "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING",
+ "RDMA_RAMROD_STOP_NS_TRACKING", "ROCE_RAMROD_CREATE_QP",
+ "ROCE_RAMROD_MODIFY_QP", "ROCE_RAMROD_QUERY_QP",
+ "ROCE_RAMROD_DESTROY_QP", "ROCE_RAMROD_CREATE_UD_QP",
+ "ROCE_RAMROD_DESTROY_UD_QP", "ROCE_RAMROD_FUNC_UPDATE",
+ "ROCE_RAMROD_SUSPEND_QP", "ROCE_RAMROD_QUERY_SUSPENDED_QP",
+ "ROCE_RAMROD_CREATE_SUSPENDED_QP", "ROCE_RAMROD_RESUME_QP",
+ "ROCE_RAMROD_SUSPEND_UD_QP", "ROCE_RAMROD_RESUME_UD_QP",
+ "ROCE_RAMROD_CREATE_SUSPENDED_UD_QP", "ROCE_RAMROD_FLUSH_DPT_QP", },
+ { "CORE_RAMROD_UNUSED", "CORE_RAMROD_RX_QUEUE_START",
+ "CORE_RAMROD_TX_QUEUE_START", "CORE_RAMROD_RX_QUEUE_STOP",
+ "CORE_RAMROD_TX_QUEUE_STOP",
+ "CORE_RAMROD_RX_QUEUE_FLUSH",
+ "CORE_RAMROD_TX_QUEUE_UPDATE", "CORE_RAMROD_QUEUE_STATS_QUERY", },
+ { "ETH_RAMROD_UNUSED", "ETH_RAMROD_VPORT_START",
+ "ETH_RAMROD_VPORT_UPDATE", "ETH_RAMROD_VPORT_STOP",
+ "ETH_RAMROD_RX_QUEUE_START", "ETH_RAMROD_RX_QUEUE_STOP",
+ "ETH_RAMROD_TX_QUEUE_START", "ETH_RAMROD_TX_QUEUE_STOP",
+ "ETH_RAMROD_FILTERS_UPDATE", "ETH_RAMROD_RX_QUEUE_UPDATE",
+ "ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION",
+ "ETH_RAMROD_RX_ADD_OPENFLOW_FILTER",
+ "ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER",
+ "ETH_RAMROD_RX_ADD_UDP_FILTER",
+ "ETH_RAMROD_RX_DELETE_UDP_FILTER",
+ "ETH_RAMROD_RX_CREATE_GFT_ACTION",
+ "ETH_RAMROD_RX_UPDATE_GFT_FILTER", "ETH_RAMROD_TX_QUEUE_UPDATE",
+ "ETH_RAMROD_RGFS_FILTER_ADD", "ETH_RAMROD_RGFS_FILTER_DEL",
+ "ETH_RAMROD_TGFS_FILTER_ADD", "ETH_RAMROD_TGFS_FILTER_DEL",
+ "ETH_RAMROD_GFS_COUNTERS_REPORT_REQUEST", },
+ { "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT",
+ "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR",
+ "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ",
+ "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ",
+ "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ",
+ "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING",
+ "RDMA_RAMROD_STOP_NS_TRACKING",
+ "IWARP_RAMROD_CMD_ID_TCP_OFFLOAD",
+ "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD",
+ "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR",
+ "IWARP_RAMROD_CMD_ID_CREATE_QP", "IWARP_RAMROD_CMD_ID_QUERY_QP",
+ "IWARP_RAMROD_CMD_ID_MODIFY_QP",
+ "IWARP_RAMROD_CMD_ID_DESTROY_QP",
+ "IWARP_RAMROD_CMD_ID_ABORT_TCP_OFFLOAD", },
+ { NULL }, /*TOE*/
+ { NULL }, /*PREROCE*/
+ { "COMMON_RAMROD_UNUSED", "COMMON_RAMROD_PF_START",
+ "COMMON_RAMROD_PF_STOP", "COMMON_RAMROD_VF_START",
+ "COMMON_RAMROD_VF_STOP", "COMMON_RAMROD_PF_UPDATE",
+ "COMMON_RAMROD_RL_UPDATE", "COMMON_RAMROD_EMPTY", }
+};
+
/******************** INTERNAL IMPLEMENTATION *********************/
/* Returns the external VOQ number */
@@ -1647,6 +1723,32 @@ void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
}
+const char *qed_get_protocol_type_str(u32 protocol_type)
+{
+ if (protocol_type >= ARRAY_SIZE(s_protocol_types))
+ return "Invalid protocol type";
+
+ return s_protocol_types[protocol_type];
+}
+
+const char *qed_get_ramrod_cmd_id_str(u32 protocol_type, u32 ramrod_cmd_id)
+{
+ const char *ramrod_cmd_id_str;
+
+ if (protocol_type >= ARRAY_SIZE(s_ramrod_cmd_ids))
+ return "Invalid protocol type";
+
+ if (ramrod_cmd_id >= ARRAY_SIZE(s_ramrod_cmd_ids[0]))
+ return "Invalid Ramrod command ID";
+
+ ramrod_cmd_id_str = s_ramrod_cmd_ids[protocol_type][ramrod_cmd_id];
+
+ if (!ramrod_cmd_id_str)
+ return "Invalid Ramrod command ID";
+
+ return ramrod_cmd_id_str;
+}
+
static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id)
{
switch (storm_id) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 6958adeca86d..82e74f62b677 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -2399,3 +2399,25 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return rc;
}
+
+int qed_int_get_sb_dbg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_sb_info *p_sb, struct qed_sb_info_dbg *p_info)
+{
+ u16 sbid = p_sb->igu_sb_id;
+ u32 i;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ if (sbid >= NUM_OF_SBS(p_hwfn->cdev))
+ return -EINVAL;
+
+ p_info->igu_prod = qed_rd(p_hwfn, p_ptt, IGU_REG_PRODUCER_MEMORY + sbid * 4);
+ p_info->igu_cons = qed_rd(p_hwfn, p_ptt, IGU_REG_CONSUMER_MEM + sbid * 4);
+
+ for (i = 0; i < PIS_PER_SB; i++)
+ p_info->pi[i] = (u16)qed_rd(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + sbid * 4 * PIS_PER_SB + i * 4);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 84c17e97f569..7e5127f61744 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -186,6 +186,19 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable);
/**
+ * qed_int_get_sb_dbg: Read debug information regarding a given SB
+ *
+ * @p_hwfn: hw function pointer
+ * @p_ptt: ptt resource
+ * @p_sb: pointer to status block for which we want to get info
+ * @p_info: pointer to struct to fill with information regarding SB
+ *
+ * Return: 0 with status block info filled on success, otherwise return error
+ */
+int qed_int_get_sb_dbg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_sb_info *p_sb, struct qed_sb_info_dbg *p_info);
+
+/**
* qed_db_rec_handler(): Doorbell Recovery handler.
* Run doorbell recovery in case of PF overflow (and flush DORQ if
* needed).
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 7673b3e07736..c5003fa1a25e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -255,27 +255,6 @@ static void __exit qed_exit(void)
}
module_exit(qed_exit);
-/* Check if the DMA controller on the machine can properly handle the DMA
- * addressing required by the device.
- */
-static int qed_set_coherency_mask(struct qed_dev *cdev)
-{
- struct device *dev = &cdev->pdev->dev;
-
- if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
- if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
- DP_NOTICE(cdev,
- "Can't request 64-bit consistent allocations\n");
- return -EIO;
- }
- } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
- DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
- return -EIO;
- }
-
- return 0;
-}
-
static void qed_free_pci(struct qed_dev *cdev)
{
struct pci_dev *pdev = cdev->pdev;
@@ -351,9 +330,12 @@ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
DP_NOTICE(cdev, "Cannot find power management capability\n");
- rc = qed_set_coherency_mask(cdev);
- if (rc)
+ rc = dma_set_mask_and_coherent(&cdev->pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ DP_NOTICE(cdev, "Can't request DMA addresses\n");
+ rc = -EIO;
goto err2;
+ }
cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
@@ -447,7 +429,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->wol_support = true;
dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn);
-
+ dev_info->esl = qed_mcp_is_esl_supported(p_hwfn);
dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
} else {
qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
@@ -2936,6 +2918,30 @@ out:
return status;
}
+static int
+qed_get_sb_info(struct qed_dev *cdev, struct qed_sb_info *sb,
+ u16 qid, struct qed_sb_info_dbg *sb_dbg)
+{
+ struct qed_hwfn *hwfn = &cdev->hwfns[qid % cdev->num_hwfns];
+ struct qed_ptt *ptt;
+ int rc;
+
+ if (IS_VF(cdev))
+ return -EINVAL;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, "Can't acquire PTT\n");
+ return -EAGAIN;
+ }
+
+ memset(sb_dbg, 0, sizeof(*sb_dbg));
+ rc = qed_int_get_sb_dbg(hwfn, ptt, sb, sb_dbg);
+
+ qed_ptt_release(hwfn, ptt);
+ return rc;
+}
+
static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
u8 dev_addr, u32 offset, u32 len)
{
@@ -2978,11 +2984,54 @@ static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
return rc;
}
+static __printf(2, 3) void qed_mfw_report(struct qed_dev *cdev, char *fmt, ...)
+{
+ char buf[QED_MFW_REPORT_STR_SIZE];
+ struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
+ va_list vl;
+
+ va_start(vl, fmt);
+ vsnprintf(buf, QED_MFW_REPORT_STR_SIZE, fmt, vl);
+ va_end(vl);
+
+ if (IS_PF(cdev)) {
+ p_hwfn = QED_LEADING_HWFN(cdev);
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (p_ptt) {
+ qed_mcp_send_raw_debug_data(p_hwfn, p_ptt, buf, strlen(buf));
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+ }
+}
+
static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev)
{
return QED_AFFIN_HWFN_IDX(cdev);
}
+static int qed_get_esl_status(struct qed_dev *cdev, bool *esl_active)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ int rc = 0;
+
+ *esl_active = false;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ rc = qed_mcp_get_esl_status(hwfn, ptt, esl_active);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
static struct qed_selftest_ops qed_selftest_ops_pass = {
.selftest_memory = &qed_selftest_memory,
.selftest_interrupt = &qed_selftest_interrupt,
@@ -3038,6 +3087,9 @@ const struct qed_common_ops qed_common_ops_pass = {
.read_nvm_cfg = &qed_nvm_flash_cfg_read,
.read_nvm_cfg_len = &qed_nvm_flash_cfg_len,
.set_grc_config = &qed_set_grc_config,
+ .mfw_report = &qed_mfw_report,
+ .get_sb_info = &qed_get_sb_info,
+ .get_esl_status = &qed_get_esl_status,
};
void qed_get_protocol_stats(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 64678a256f3b..da1eadabcb41 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -4158,3 +4158,25 @@ qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
return qed_mcp_send_debug_data(p_hwfn, p_ptt,
QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size);
}
+
+bool qed_mcp_is_esl_supported(struct qed_hwfn *p_hwfn)
+{
+ return !!(p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_ENHANCED_SYS_LCK);
+}
+
+int qed_mcp_get_esl_status(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool *active)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MANAGEMENT_STATUS, 0, &resp, &param);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to send ESL command, rc = %d\n", rc);
+ return rc;
+ }
+
+ *active = !!(param & FW_MB_PARAM_MANAGEMENT_STATUS_LOCKDOWN_ENABLED);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 564723800d15..369e1892450a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -15,6 +15,8 @@
#include "qed_hsi.h"
#include "qed_dev_api.h"
+#define QED_MFW_REPORT_STR_SIZE 256
+
struct qed_mcp_link_speed_params {
bool autoneg;
@@ -1337,4 +1339,24 @@ int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
u32 len);
+
+/**
+ * qed_mcp_is_esl_supported(): Return whether management firmware support ESL or not.
+ *
+ * @p_hwfn: hw function pointer
+ *
+ * Return: true if esl is supported, otherwise return false
+ */
+bool qed_mcp_is_esl_supported(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_mcp_get_esl_status(): Get enhanced system lockdown status
+ *
+ * @p_hwfn: hw function pointer
+ * @p_ptt: ptt resource pointer
+ * @active: ESL active status data pointer
+ *
+ * Return: 0 with esl status info on success, otherwise return error
+ */
+int qed_mcp_get_esl_status(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool *active);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
index 8a0e3c5d4bda..b70ee8200e15 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
@@ -1191,6 +1191,7 @@ enum drv_msg_code_enum {
DRV_MSG_CODE_CFG_VF_MSIX = DRV_MSG_CODE(0xc001),
DRV_MSG_CODE_CFG_PF_VFS_MSIX = DRV_MSG_CODE(0xc002),
DRV_MSG_CODE_DEBUG_DATA_SEND = DRV_MSG_CODE(0xc004),
+ DRV_MSG_CODE_GET_MANAGEMENT_STATUS = DRV_MSG_CODE(0xc007),
};
#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 6f1a52e6beb2..b5e35f433a20 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -550,6 +550,8 @@
0x1 << 1)
#define IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN ( \
0x1 << 0)
+#define IGU_REG_PRODUCER_MEMORY 0x182000UL
+#define IGU_REG_CONSUMER_MEM 0x183000UL
#define IGU_REG_MAPPING_MEMORY \
0x184000UL
#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 648176dfb871..3b54da963554 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -85,10 +85,12 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
goto err;
}
- DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
- "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
- opaque_cid, cmd, protocol,
- (unsigned long)&p_ent->ramrod,
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Initialized: CID %08x %s:[%02x] %s:%02x data_addr %llx comp_mode [%s]\n",
+ opaque_cid, qed_get_ramrod_cmd_id_str(protocol, cmd),
+ cmd, qed_get_protocol_type_str(protocol), protocol,
+ (unsigned long long)(uintptr_t)&p_ent->ramrod,
D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
"MODE_CB"));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index e0473729b161..d01b9245f811 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -139,10 +139,13 @@ err:
if (!p_ptt)
return -EBUSY;
qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL,
- "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
+ "Ramrod is stuck [CID %08x %s:%02x %s:%02x echo %04x]\n",
le32_to_cpu(p_ent->elem.hdr.cid),
+ qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
+ p_ent->elem.hdr.cmd_id),
p_ent->elem.hdr.cmd_id,
- p_ent->elem.hdr.protocol_id,
+ qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
+ p_ent->elem.hdr.protocol_id,
le16_to_cpu(p_ent->elem.hdr.echo));
qed_ptt_release(p_hwfn, p_ptt);
@@ -170,13 +173,16 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
- "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Ramrod hdr: [CID 0x%08x %s:0x%02x %s:0x%02x] Data ptr: [%08x:%08x] Cmpltion Mode: %s\n",
p_ent->elem.hdr.cid,
+ qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
+ p_ent->elem.hdr.cmd_id),
p_ent->elem.hdr.cmd_id,
- p_ent->elem.hdr.protocol_id,
- p_ent->elem.data_ptr.hi,
- p_ent->elem.data_ptr.lo,
+ qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
+ p_ent->elem.hdr.protocol_id,
+ p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
"MODE_CB"));
@@ -271,17 +277,27 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
{
qed_spq_async_comp_cb cb;
- if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
+ if (!p_hwfn->p_spq)
return -EINVAL;
+ if (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE) {
+ DP_ERR(p_hwfn, "Wrong protocol: %s:%d\n",
+ qed_get_protocol_type_str(p_eqe->protocol_id),
+ p_eqe->protocol_id);
+
+ return -EINVAL;
+ }
+
cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
if (cb) {
return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
&p_eqe->data, p_eqe->fw_return_code);
} else {
DP_NOTICE(p_hwfn,
- "Unknown Async completion for protocol: %d\n",
+ "Unknown Async completion for %s:%d\n",
+ qed_get_protocol_type_str(p_eqe->protocol_id),
p_eqe->protocol_id);
+
return -EINVAL;
}
}
@@ -830,8 +846,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
if (p_hwfn->cdev->recov_in_prog) {
DP_VERBOSE(p_hwfn,
QED_MSG_SPQ,
- "Recovery is in progress. Skip spq post [cmd %02x protocol %02x]\n",
- p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
+ "Recovery is in progress. Skip spq post [%s:%02x %s:%02x]\n",
+ qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
+ p_ent->elem.hdr.cmd_id),
+ p_ent->elem.hdr.cmd_id,
+ qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
+ p_ent->elem.hdr.protocol_id);
/* Let the flow complete w/o any error handling */
qed_spq_recov_set_ret_code(p_ent, fw_return_code);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 8284c4c1528f..97a7ab0826ed 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -168,6 +168,8 @@ enum {
QEDE_PRI_FLAG_CMT,
QEDE_PRI_FLAG_SMART_AN_SUPPORT, /* MFW supports SmartAN */
QEDE_PRI_FLAG_RECOVER_ON_ERROR,
+ QEDE_PRI_FLAG_ESL_SUPPORT, /* MFW supports Enhanced System Lockdown */
+ QEDE_PRI_FLAG_ESL_ACTIVE, /* Enhanced System Lockdown Active status */
QEDE_PRI_FLAG_LEN,
};
@@ -175,6 +177,8 @@ static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
"Coupled-Function",
"SmartAN capable",
"Recover on error",
+ "ESL capable",
+ "ESL active",
};
enum qede_ethtool_tests {
@@ -478,6 +482,7 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
static u32 qede_get_priv_flags(struct net_device *dev)
{
struct qede_dev *edev = netdev_priv(dev);
+ bool esl_active;
u32 flags = 0;
if (edev->dev_info.common.num_hwfns > 1)
@@ -489,6 +494,14 @@ static u32 qede_get_priv_flags(struct net_device *dev)
if (edev->err_flags & BIT(QEDE_ERR_IS_RECOVERABLE))
flags |= BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR);
+ if (edev->dev_info.common.esl)
+ flags |= BIT(QEDE_PRI_FLAG_ESL_SUPPORT);
+
+ edev->ops->common->get_esl_status(edev->cdev, &esl_active);
+
+ if (esl_active)
+ flags |= BIT(QEDE_PRI_FLAG_ESL_ACTIVE);
+
return flags;
}
@@ -888,7 +901,9 @@ int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal,
}
static void qede_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct qede_dev *edev = netdev_priv(dev);
@@ -899,7 +914,9 @@ static void qede_get_ringparam(struct net_device *dev,
}
static int qede_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct qede_dev *edev = netdev_priv(dev);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 999abcfe3310..b242000a77fd 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -10,6 +10,7 @@
#include <linux/bpf_trace.h>
#include <net/udp_tunnel.h>
#include <linux/ip.h>
+#include <net/gro.h>
#include <net/ipv6.h>
#include <net/tcp.h>
#include <linux/if_ether.h>
@@ -1152,7 +1153,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
qede_rx_bd_ring_consume(rxq);
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(edev->ndev, prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(edev->ndev, prog, act);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 06c6a5813606..b4e5a15e308b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -509,34 +509,95 @@ static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
}
-static void qede_tx_log_print(struct qede_dev *edev, struct qede_tx_queue *txq)
+static void qede_fp_sb_dump(struct qede_dev *edev, struct qede_fastpath *fp)
{
+ char *p_sb = (char *)fp->sb_info->sb_virt;
+ u32 sb_size, i;
+
+ sb_size = sizeof(struct status_block);
+
+ for (i = 0; i < sb_size; i += 8)
+ DP_NOTICE(edev,
+ "%02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX\n",
+ p_sb[i], p_sb[i + 1], p_sb[i + 2], p_sb[i + 3],
+ p_sb[i + 4], p_sb[i + 5], p_sb[i + 6], p_sb[i + 7]);
+}
+
+static void
+qede_txq_fp_log_metadata(struct qede_dev *edev,
+ struct qede_fastpath *fp, struct qede_tx_queue *txq)
+{
+ struct qed_chain *p_chain = &txq->tx_pbl;
+
+ /* Dump txq/fp/sb ids etc. other metadata */
DP_NOTICE(edev,
- "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
- txq->index, le16_to_cpu(*txq->hw_cons_ptr),
- qed_chain_get_cons_idx(&txq->tx_pbl),
- qed_chain_get_prod_idx(&txq->tx_pbl),
- jiffies);
+ "fpid 0x%x sbid 0x%x txqid [0x%x] ndev_qid [0x%x] cos [0x%x] p_chain %p cap %d size %d jiffies %lu HZ 0x%x\n",
+ fp->id, fp->sb_info->igu_sb_id, txq->index, txq->ndev_txq_id, txq->cos,
+ p_chain, p_chain->capacity, p_chain->size, jiffies, HZ);
+
+ /* Dump all the relevant prod/cons indexes */
+ DP_NOTICE(edev,
+ "hw cons %04x sw_tx_prod=0x%x, sw_tx_cons=0x%x, bd_prod 0x%x bd_cons 0x%x\n",
+ le16_to_cpu(*txq->hw_cons_ptr), txq->sw_tx_prod, txq->sw_tx_cons,
+ qed_chain_get_prod_idx(p_chain), qed_chain_get_cons_idx(p_chain));
+}
+
+static void
+qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq)
+{
+ struct qed_sb_info_dbg sb_dbg;
+ int rc;
+
+ /* sb info */
+ qede_fp_sb_dump(edev, fp);
+
+ memset(&sb_dbg, 0, sizeof(sb_dbg));
+ rc = edev->ops->common->get_sb_info(edev->cdev, fp->sb_info, (u16)fp->id, &sb_dbg);
+
+ DP_NOTICE(edev, "IGU: prod %08x cons %08x CAU Tx %04x\n",
+ sb_dbg.igu_prod, sb_dbg.igu_cons, sb_dbg.pi[TX_PI(txq->cos)]);
+
+ /* report to mfw */
+ edev->ops->common->mfw_report(edev->cdev,
+ "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
+ txq->index, le16_to_cpu(*txq->hw_cons_ptr),
+ qed_chain_get_cons_idx(&txq->tx_pbl),
+ qed_chain_get_prod_idx(&txq->tx_pbl), jiffies);
+ if (!rc)
+ edev->ops->common->mfw_report(edev->cdev,
+ "Txq[%d]: SB[0x%04x] - IGU: prod %08x cons %08x CAU Tx %04x\n",
+ txq->index, fp->sb_info->igu_sb_id,
+ sb_dbg.igu_prod, sb_dbg.igu_cons,
+ sb_dbg.pi[TX_PI(txq->cos)]);
}
static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct qede_dev *edev = netdev_priv(dev);
- struct qede_tx_queue *txq;
- int cos;
+ int i;
netif_carrier_off(dev);
DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue);
- if (!(edev->fp_array[txqueue].type & QEDE_FASTPATH_TX))
- return;
+ for_each_queue(i) {
+ struct qede_tx_queue *txq;
+ struct qede_fastpath *fp;
+ int cos;
- for_each_cos_in_txq(edev, cos) {
- txq = &edev->fp_array[txqueue].txq[cos];
+ fp = &edev->fp_array[i];
+ if (!(fp->type & QEDE_FASTPATH_TX))
+ continue;
- if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
- qed_chain_get_prod_idx(&txq->tx_pbl))
- qede_tx_log_print(edev, txq);
+ for_each_cos_in_txq(edev, cos) {
+ txq = &fp->txq[cos];
+
+ /* Dump basic metadata for all queues */
+ qede_txq_fp_log_metadata(edev, fp, txq);
+
+ if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
+ qed_chain_get_prod_idx(&txq->tx_pbl))
+ qede_tx_log_print(edev, fp, txq);
+ }
}
if (IS_VF(edev))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 8c28fabb0ff6..39176e765767 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -304,11 +304,6 @@ int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
"HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n",
config.tx_type, config.rx_filter);
- if (config.flags) {
- DP_ERR(edev, "config.flags is reserved for future use\n");
- return -EINVAL;
- }
-
ptp->hw_ts_ioctl_called = 1;
ptp->tx_type = config.tx_type;
ptp->rx_filter = config.rx_filter;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 71523d747e93..b30589a135c2 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3750,7 +3750,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
struct net_device *ndev = NULL;
struct ql3_adapter *qdev = NULL;
static int cards_found;
- int pci_using_dac, err;
+ int err;
err = pci_enable_device(pdev);
if (err) {
@@ -3766,11 +3766,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
- pci_using_dac = 1;
- else if (!(err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))))
- pci_using_dac = 0;
-
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
pr_err("%s no usable DMA configuration\n", pci_name(pdev));
goto err_out_free_regions;
@@ -3797,8 +3793,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
qdev->msg_enable = netif_msg_init(debug, default_msg);
- if (pci_using_dac)
- ndev->features |= NETIF_F_HIGHDMA;
+ ndev->features |= NETIF_F_HIGHDMA;
if (qdev->device_id == QL3032_DEVICE_ID)
ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index be7abee160e7..b25102fded7b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1698,7 +1698,7 @@ int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter, u16 port);
int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter, u16 port);
int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
int qlcnic_read_mac_addr(struct qlcnic_adapter *);
-int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
+int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *);
void qlcnic_set_netdev_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
void qlcnic_sriov_vf_set_multi(struct net_device *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 6f1d9c1fd1b0..23cd47d588e5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -609,7 +609,7 @@ int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *);
int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int);
int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *,
u32, u8 *, int);
-int qlcnic_83xx_init(struct qlcnic_adapter *, int);
+int qlcnic_83xx_init(struct qlcnic_adapter *);
int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *);
void qlcnic_83xx_idc_poll_dev_state(struct work_struct *);
void qlcnic_83xx_idc_exit(struct qlcnic_adapter *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 27dffa299ca6..dbb800769cb6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2432,7 +2432,7 @@ static void qlcnic_83xx_init_rings(struct qlcnic_adapter *adapter)
qlcnic_set_sds_ring_count(adapter, rx_cnt);
}
-int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
+int qlcnic_83xx_init(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
int err = 0;
@@ -2466,7 +2466,7 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
goto exit;
if (qlcnic_sriov_vf_check(adapter)) {
- err = qlcnic_sriov_vf_init(adapter, pci_using_dac);
+ err = qlcnic_sriov_vf_init(adapter);
if (err)
goto detach_mbx;
else
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index fc364b4ab6eb..e10fe071a40f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -632,7 +632,9 @@ qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
static void
qlcnic_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
@@ -663,7 +665,9 @@ qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
static int
qlcnic_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u16 num_rxd, num_jumbo_rxd, num_txd;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index ed84f0f97623..d320567b2cca 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2258,8 +2258,7 @@ static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
}
int
-qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
- int pci_using_dac)
+qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
int err;
struct pci_dev *pdev = adapter->pdev;
@@ -2278,20 +2277,15 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_IPV6_CSUM | NETIF_F_GRO |
- NETIF_F_HW_VLAN_CTAG_RX);
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA);
netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM);
+ NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA);
if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
}
- if (pci_using_dac) {
- netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_HIGHDMA;
- }
-
if (qlcnic_vlan_tx_check(adapter))
netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX);
@@ -2341,20 +2335,6 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
return 0;
}
-static int qlcnic_set_dma_mask(struct pci_dev *pdev, int *pci_using_dac)
-{
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
- *pci_using_dac = 1;
- else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
- *pci_using_dac = 0;
- else {
- dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
- return -EIO;
- }
-
- return 0;
-}
-
void qlcnic_free_tx_rings(struct qlcnic_adapter *adapter)
{
int ring;
@@ -2441,8 +2421,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev = NULL;
struct qlcnic_adapter *adapter = NULL;
struct qlcnic_hardware_context *ahw;
- int err, pci_using_dac = -1;
char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
+ int err;
err = pci_enable_device(pdev);
if (err)
@@ -2453,9 +2433,11 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_disable_pdev;
}
- err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
- if (err)
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
goto err_out_disable_pdev;
+ }
err = pci_request_regions(pdev, qlcnic_driver_name);
if (err)
@@ -2569,7 +2551,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_check_vf(adapter, ent);
adapter->portnum = adapter->ahw->pci_func;
- err = qlcnic_83xx_init(adapter, pci_using_dac);
+ err = qlcnic_83xx_init(adapter);
if (err) {
switch (err) {
case -ENOTRECOVERABLE:
@@ -2633,7 +2615,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (adapter->portnum == 0)
qlcnic_set_drv_version(adapter);
- err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
+ err = qlcnic_setup_netdev(adapter, netdev);
if (err)
goto err_out_disable_mbx_intr;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index d0111cb3b40e..c42b99cd58bd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -188,7 +188,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *, int);
void qlcnic_sriov_cleanup(struct qlcnic_adapter *);
void __qlcnic_sriov_cleanup(struct qlcnic_adapter *);
void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *);
-int qlcnic_sriov_vf_init(struct qlcnic_adapter *, int);
+int qlcnic_sriov_vf_init(struct qlcnic_adapter *);
void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *);
int qlcnic_sriov_func_to_index(struct qlcnic_adapter *, u8);
void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *, u32);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 42a44c97572a..9282321c2e7f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -525,8 +525,7 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
return 0;
}
-static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
- int pci_using_dac)
+static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter)
{
int err;
@@ -571,7 +570,7 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
if (err)
goto err_out_send_channel_term;
- err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
+ err = qlcnic_setup_netdev(adapter, adapter->netdev);
if (err)
goto err_out_send_channel_term;
@@ -614,7 +613,7 @@ static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter)
return 0;
}
-int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
+int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
int err;
@@ -631,7 +630,7 @@ int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
if (err)
return err;
- err = qlcnic_sriov_setup_vf(adapter, pci_using_dac);
+ err = qlcnic_sriov_setup_vf(adapter);
if (err)
return err;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
index f72e13b83869..f502db9cdea9 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
@@ -133,7 +133,9 @@ static int emac_nway_reset(struct net_device *netdev)
}
static void emac_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct emac_adapter *adpt = netdev_priv(netdev);
@@ -144,7 +146,9 @@ static void emac_get_ringparam(struct net_device *netdev,
}
static int emac_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct emac_adapter *adpt = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index d59fff2fbcc6..792ce9a323cd 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -246,7 +246,9 @@ qcaspi_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
}
static void
-qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
+qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct qcaspi *qca = netdev_priv(dev);
@@ -257,7 +259,9 @@ qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
}
static int
-qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
+qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
const struct net_device_ops *ops = dev->netdev_ops;
struct qcaspi *qca = netdev_priv(dev);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 4f39f843bb3a..ad7b9e9d7f95 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1388,7 +1388,9 @@ static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info
}
static void cp_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
ring->rx_max_pending = CP_RX_RING_SIZE;
ring->tx_max_pending = CP_TX_RING_SIZE;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 86c44bc5f73f..19e2621e0645 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -615,6 +615,7 @@ struct rtl8169_private {
struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
u16 cp_cmd;
u32 irq_mask;
+ int irq;
struct clk *clk;
struct {
@@ -1873,7 +1874,9 @@ static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
}
static void rtl8169_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *data)
+ struct ethtool_ringparam *data,
+ struct kernel_ethtool_ringparam *kernel_data,
+ struct netlink_ext_ack *extack)
{
data->rx_max_pending = NUM_RX_DESC;
data->rx_pending = NUM_RX_DESC;
@@ -1969,8 +1972,11 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{ 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 },
/* 8125A family. */
- { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 },
- { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 },
+ { 0x7cf, 0x609, RTL_GIGA_MAC_VER_61 },
+ /* It seems only XID 609 made it to the mass market.
+ * { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 },
+ * { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 },
+ */
/* RTL8117 */
{ 0x7cf, 0x54b, RTL_GIGA_MAC_VER_53 },
@@ -1978,17 +1984,26 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
/* 8168EP family. */
{ 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 },
- { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 },
- { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 },
+ /* It seems this chip version never made it to
+ * the wild. Let's disable detection.
+ * { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 },
+ * { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 },
+ */
/* 8168H family. */
{ 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 },
- { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 },
+ /* It seems this chip version never made it to
+ * the wild. Let's disable detection.
+ * { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 },
+ */
/* 8168G family. */
{ 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 },
{ 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 },
- { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 },
+ /* It seems this chip version never made it to
+ * the wild. Let's disable detection.
+ * { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 },
+ */
{ 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 },
/* 8168F family. */
@@ -4698,7 +4713,7 @@ static int rtl8169_close(struct net_device *dev)
cancel_work_sync(&tp->wk.work);
- free_irq(pci_irq_vector(pdev, 0), tp);
+ free_irq(tp->irq, tp);
phy_disconnect(tp->phydev);
@@ -4719,7 +4734,7 @@ static void rtl8169_netpoll(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp);
+ rtl8169_interrupt(tp->irq, tp);
}
#endif
@@ -4753,8 +4768,7 @@ static int rtl_open(struct net_device *dev)
rtl_request_firmware(tp);
irqflags = pci_dev_msi_enabled(pdev) ? IRQF_NO_THREAD : IRQF_SHARED;
- retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt,
- irqflags, dev->name, tp);
+ retval = request_irq(tp->irq, rtl8169_interrupt, irqflags, dev->name, tp);
if (retval < 0)
goto err_release_fw_2;
@@ -4771,7 +4785,7 @@ out:
return retval;
err_free_irq:
- free_irq(pci_irq_vector(pdev, 0), tp);
+ free_irq(tp->irq, tp);
err_release_fw_2:
rtl_release_firmware(tp);
rtl8169_rx_clear(tp);
@@ -5272,12 +5286,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- /* Disable ASPM L1 as that cause random device stop working
- * problems as well as full system hangs for some PCIe devices users.
- */
- rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
- tp->aspm_manageable = !rc;
-
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
if (rc < 0) {
@@ -5320,6 +5328,17 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->mac_version = chipset;
+ /* Disable ASPM L1 as that cause random device stop working
+ * problems as well as full system hangs for some PCIe devices users.
+ * Chips from RTL8168h partially have issues with L1.2, but seem
+ * to work fine with L1 and L1.1.
+ */
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_45)
+ rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
+ else
+ rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
+ tp->aspm_manageable = !rc;
+
tp->dash_type = rtl_check_dash(tp);
tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
@@ -5341,6 +5360,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "Can't allocate interrupt\n");
return rc;
}
+ tp->irq = pci_irq_vector(pdev, 0);
INIT_WORK(&tp->wk.work, rtl_task);
@@ -5375,12 +5395,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
if (rtl_chip_supports_csum_v2(tp)) {
dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
- dev->gso_max_size = RTL_GSO_MAX_SIZE_V2;
- dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2;
+ netif_set_gso_max_size(dev, RTL_GSO_MAX_SIZE_V2);
+ netif_set_gso_max_segs(dev, RTL_GSO_MAX_SEGS_V2);
} else {
dev->hw_features |= NETIF_F_SG | NETIF_F_TSO;
- dev->gso_max_size = RTL_GSO_MAX_SIZE_V1;
- dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1;
+ netif_set_gso_max_size(dev, RTL_GSO_MAX_SIZE_V1);
+ netif_set_gso_max_segs(dev, RTL_GSO_MAX_SEGS_V1);
}
dev->hw_features |= NETIF_F_RXALL;
@@ -5416,8 +5436,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
- rtl_chip_infos[chipset].name, dev->dev_addr, xid,
- pci_irq_vector(pdev, 0));
+ rtl_chip_infos[chipset].name, dev->dev_addr, xid, tp->irq);
if (jumbo_max)
netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
@@ -5441,7 +5460,9 @@ static struct pci_driver rtl8169_pci_driver = {
.probe = rtl_init_one,
.remove = rtl_remove_one,
.shutdown = rtl_shutdown,
- .driver.pm = pm_ptr(&rtl8169_pm_ops),
+#ifdef CONFIG_PM
+ .driver.pm = &rtl8169_pm_ops,
+#endif
};
module_pci_driver(rtl8169_pci_driver);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index b4c597f4040c..b215cde68e10 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -30,8 +30,7 @@
#include <linux/spinlock.h>
#include <linux/sys_soc.h>
#include <linux/reset.h>
-
-#include <asm/div64.h>
+#include <linux/math64.h>
#include "ravb.h"
@@ -1605,7 +1604,9 @@ static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
}
static void ravb_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -1616,7 +1617,9 @@ static void ravb_get_ringparam(struct net_device *ndev,
}
static int ravb_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
@@ -2218,10 +2221,6 @@ static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
if (copy_from_user(&config, req->ifr_data, sizeof(config)))
return -EFAULT;
- /* Reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
tstamp_tx_ctrl = 0;
@@ -2488,8 +2487,7 @@ static int ravb_set_gti(struct net_device *ndev)
if (!rate)
return -EINVAL;
- inc = 1000000000ULL << 20;
- do_div(inc, rate);
+ inc = div64_ul(1000000000ULL << 20, rate);
if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index a3fbb2221c9a..d947a628e166 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2296,7 +2296,9 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
}
static void sh_eth_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -2307,7 +2309,9 @@ static void sh_eth_get_ringparam(struct net_device *ndev,
}
static int sh_eth_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int ret;
@@ -3364,8 +3368,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* MDIO bus init */
ret = sh_mdio_init(mdp, pd);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "MDIO init failed: %d\n", ret);
+ dev_err_probe(&pdev->dev, ret, "MDIO init failed\n");
goto out_release;
}
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index ba4062881eed..3fcea211716c 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1995,17 +1995,6 @@ static int rocker_port_get_phys_port_name(struct net_device *dev,
return err ? -EOPNOTSUPP : 0;
}
-static int rocker_port_change_proto_down(struct net_device *dev,
- bool proto_down)
-{
- struct rocker_port *rocker_port = netdev_priv(dev);
-
- if (rocker_port->dev->flags & IFF_UP)
- rocker_port_set_enable(rocker_port, !proto_down);
- rocker_port->dev->proto_down = proto_down;
- return 0;
-}
-
static void rocker_port_neigh_destroy(struct net_device *dev,
struct neighbour *n)
{
@@ -2037,7 +2026,6 @@ static const struct net_device_ops rocker_port_netdev_ops = {
.ndo_set_mac_address = rocker_port_set_mac_address,
.ndo_change_mtu = rocker_port_change_mtu,
.ndo_get_phys_port_name = rocker_port_get_phys_port_name,
- .ndo_change_proto_down = rocker_port_change_proto_down,
.ndo_neigh_destroy = rocker_port_neigh_destroy,
.ndo_get_port_parent_id = rocker_port_get_port_parent_id,
};
@@ -2882,19 +2870,10 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_pci_request_regions;
}
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
- goto err_pci_set_dma_mask;
- }
- } else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "dma_set_mask failed\n");
- goto err_pci_set_dma_mask;
- }
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "dma_set_mask failed\n");
+ goto err_pci_set_dma_mask;
}
if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 3e1ca7a8d029..bc70c6abd6a5 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -2783,7 +2783,8 @@ static void ofdpa_fib4_abort(struct rocker *rocker)
if (!ofdpa_port)
continue;
nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
- ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
+ ofdpa_flow_tbl_del(ofdpa_port,
+ OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT,
flow_entry);
}
spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 16a4cbae9326..c672f92d65e9 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -749,6 +749,7 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
const struct ether3_data *data = id->data;
struct net_device *dev;
int bus_type, ret;
+ u8 addr[ETH_ALEN];
ether3_banner();
@@ -776,7 +777,8 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
priv(dev)->seeq = priv(dev)->base + data->base_offset;
dev->irq = ec->irq;
- ether3_addr(dev->dev_addr, ec);
+ ether3_addr(addr, ec);
+ eth_hw_addr_set(dev, addr);
priv(dev)->dev = dev;
timer_setup(&priv(dev)->timer, ether3_ledoff, 0);
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index 835c838b7dfa..5dba4125d953 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -20,8 +20,11 @@
/* This is the maximum number of descriptor rings supported by the QDMA */
#define EFX_EF100_MAX_DMAQ_SIZE 16384UL
-static void ef100_ethtool_get_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *ring)
+static void
+ef100_ethtool_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct efx_nic *efx = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index e77a5cb4e40d..f79b14a119ae 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -996,11 +996,11 @@ static int ef100_process_design_param(struct efx_nic *efx,
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN:
nic_data->tso_max_payload_len = min_t(u64, reader->value, GSO_MAX_SIZE);
- efx->net_dev->gso_max_size = nic_data->tso_max_payload_len;
+ netif_set_gso_max_size(efx->net_dev, nic_data->tso_max_payload_len);
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS:
nic_data->tso_max_payload_num_segs = min_t(u64, reader->value, 0xffff);
- efx->net_dev->gso_max_segs = nic_data->tso_max_payload_num_segs;
+ netif_set_gso_max_segs(efx->net_dev, nic_data->tso_max_payload_num_segs);
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES:
nic_data->tso_max_frames = min_t(u64, reader->value, 0xffff);
@@ -1125,7 +1125,7 @@ static int ef100_probe_main(struct efx_nic *efx)
nic_data->tso_max_frames = ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES_DEFAULT;
nic_data->tso_max_payload_num_segs = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS_DEFAULT;
nic_data->tso_max_payload_len = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN_DEFAULT;
- net_dev->gso_max_segs = ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT;
+ netif_set_gso_max_segs(net_dev, ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
/* Read design parameters */
rc = ef100_check_design_params(efx);
if (rc) {
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 6960a2fe2b53..302dc835ac3d 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -5,6 +5,7 @@
* Copyright 2005-2013 Solarflare Communications Inc.
*/
+#include <linux/filter.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
@@ -709,7 +710,7 @@ static int efx_register_netdev(struct efx_nic *efx)
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
net_dev->priv_flags |= IFF_UNICAST_FLT;
net_dev->ethtool_ops = &efx_ethtool_ops;
- net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
+ netif_set_gso_max_segs(net_dev, EFX_TSO_MAX_SEGS);
net_dev->min_mtu = EFX_MIN_MTU;
net_dev->max_mtu = EFX_MAX_MTU;
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index 3dbea028b325..ead550ae2709 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -10,6 +10,7 @@
#include "net_driver.h"
#include <linux/module.h>
+#include <linux/filter.h>
#include "efx_channels.h"
#include "efx.h"
#include "efx_common.h"
@@ -818,11 +819,8 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
old_txq_entries = efx->txq_entries;
efx->rxq_entries = rxq_entries;
efx->txq_entries = txq_entries;
- for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- efx->channel[i] = other_channel[i];
- other_channel[i] = channel;
- }
+ for (i = 0; i < efx->n_channels; i++)
+ swap(efx->channel[i], other_channel[i]);
/* Restart buffer table allocation */
efx->next_buffer_table = next_buffer_table;
@@ -864,11 +862,8 @@ rollback:
/* Swap back */
efx->rxq_entries = old_rxq_entries;
efx->txq_entries = old_txq_entries;
- for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- efx->channel[i] = other_channel[i];
- other_channel[i] = channel;
- }
+ for (i = 0; i < efx->n_channels; i++)
+ swap(efx->channel[i], other_channel[i]);
goto out;
}
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index f187631b2c5c..af37c990217e 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -9,6 +9,7 @@
*/
#include "net_driver.h"
+#include <linux/filter.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <net/gre.h>
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index e002ce21788d..48506373721a 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -157,8 +157,11 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
return 0;
}
-static void efx_ethtool_get_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *ring)
+static void
+efx_ethtool_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -168,8 +171,11 @@ static void efx_ethtool_get_ringparam(struct net_device *net_dev,
ring->tx_pending = efx->txq_entries;
}
-static int efx_ethtool_set_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *ring)
+static int
+efx_ethtool_set_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct efx_nic *efx = netdev_priv(net_dev);
u32 txq_entries;
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 314c9c69eb0e..60c595ef7589 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2267,7 +2267,7 @@ static int ef4_register_netdev(struct ef4_nic *efx)
net_dev->irq = efx->pci_dev->irq;
net_dev->netdev_ops = &ef4_netdev_ops;
net_dev->ethtool_ops = &ef4_ethtool_ops;
- net_dev->gso_max_segs = EF4_TSO_MAX_SEGS;
+ netif_set_gso_max_segs(net_dev, EF4_TSO_MAX_SEGS);
net_dev->min_mtu = EF4_MIN_MTU;
net_dev->max_mtu = EF4_MAX_MTU;
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index 137e8a7aeaa1..907254b36663 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -637,8 +637,11 @@ static int ef4_ethtool_set_coalesce(struct net_device *net_dev,
return 0;
}
-static void ef4_ethtool_get_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *ring)
+static void
+ef4_ethtool_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ef4_nic *efx = netdev_priv(net_dev);
@@ -648,8 +651,11 @@ static void ef4_ethtool_get_ringparam(struct net_device *net_dev,
ring->tx_pending = efx->txq_entries;
}
-static int ef4_ethtool_set_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *ring)
+static int
+ef4_ethtool_set_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ef4_nic *efx = netdev_priv(net_dev);
u32 txq_entries;
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c
index c4fe3c48ac46..899cc1671004 100644
--- a/drivers/net/ethernet/sfc/mcdi_port_common.c
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.c
@@ -71,7 +71,6 @@ int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
u32 flags, u32 loopback_mode, u32 loopback_speed)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN);
- int rc;
BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
@@ -80,9 +79,8 @@ int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode);
MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed);
- rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
+ return efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- return rc;
}
int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 797e51802ccb..f0ef515e2ade 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1765,9 +1765,6 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
{
int rc;
- if (init->flags)
- return -EINVAL;
-
if ((init->tx_type != HWTSTAMP_TX_OFF) &&
(init->tx_type != HWTSTAMP_TX_ON))
return -ERANGE;
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 606750938b89..2375cef577e4 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -338,7 +338,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
break;
default:
- bpf_warn_invalid_xdp_action(xdp_act);
+ bpf_warn_invalid_xdp_action(efx->net_dev, xdp_prog, xdp_act);
efx_free_rx_buffers(rx_queue, rx_buf, 1);
channel->n_rx_xdp_bad_drops++;
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index 0ce403fa5f1a..af661c65ffe2 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -856,6 +856,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
word configuration_register;
word memory_info_register;
word memory_cfg_register;
+ u8 addr[ETH_ALEN];
/* Grab the region so that no one else tries to probe our ioports. */
if (!request_region(ioaddr, SMC_IO_EXTENT, DRV_NAME))
@@ -924,9 +925,10 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
word address;
address = inw( ioaddr + ADDR0 + i );
- dev->dev_addr[ i + 1] = address >> 8;
- dev->dev_addr[ i ] = address & 0xFF;
+ addr[i + 1] = address >> 8;
+ addr[i] = address & 0xFF;
}
+ eth_hw_addr_set(dev, addr);
/* get the memory information */
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index de7d8bf2c226..556bd353dd42 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -933,7 +933,7 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
}
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(priv->ndev, prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(priv->ndev, prog, act);
@@ -1977,11 +1977,12 @@ static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
static int netsec_probe(struct platform_device *pdev)
{
- struct resource *mmio_res, *eeprom_res, *irq_res;
+ struct resource *mmio_res, *eeprom_res;
struct netsec_priv *priv;
u32 hw_ver, phy_addr = 0;
struct net_device *ndev;
int ret;
+ int irq;
mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mmio_res) {
@@ -1995,11 +1996,9 @@ static int netsec_probe(struct platform_device *pdev)
return -ENODEV;
}
- irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq_res) {
- dev_err(&pdev->dev, "No IRQ resource found.\n");
- return -ENODEV;
- }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
ndev = alloc_etherdev(sizeof(*priv));
if (!ndev)
@@ -2010,7 +2009,7 @@ static int netsec_probe(struct platform_device *pdev)
spin_lock_init(&priv->reglock);
SET_NETDEV_DEV(ndev, &pdev->dev);
platform_set_drvdata(pdev, priv);
- ndev->irq = irq_res->start;
+ ndev->irq = irq;
priv->dev = &pdev->dev;
priv->ndev = ndev;
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 9160f9ed363a..6b5d96bced47 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -317,6 +317,7 @@ enum tx_frame_status {
tx_not_ls = 0x1,
tx_err = 0x2,
tx_dma_own = 0x4,
+ tx_err_bump_tc = 0x8,
};
enum dma_irq_status {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
index adfeb8d3293d..62a69a91ab22 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
@@ -12,6 +12,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
@@ -48,46 +49,60 @@
#define DWMAC_RX_VARDELAY(d) ((d) << DWMAC_RX_VARDELAY_SHIFT)
#define DWMAC_RXN_VARDELAY(d) ((d) << DWMAC_RXN_VARDELAY_SHIFT)
+struct oxnas_dwmac;
+
+struct oxnas_dwmac_data {
+ int (*setup)(struct oxnas_dwmac *dwmac);
+};
+
struct oxnas_dwmac {
struct device *dev;
struct clk *clk;
struct regmap *regmap;
+ const struct oxnas_dwmac_data *data;
};
-static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
+static int oxnas_dwmac_setup_ox810se(struct oxnas_dwmac *dwmac)
{
- struct oxnas_dwmac *dwmac = priv;
unsigned int value;
int ret;
- /* Reset HW here before changing the glue configuration */
- ret = device_reset(dwmac->dev);
- if (ret)
+ ret = regmap_read(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, &value);
+ if (ret < 0)
return ret;
- ret = clk_prepare_enable(dwmac->clk);
- if (ret)
- return ret;
+ /* Enable GMII_GTXCLK to follow GMII_REFCLK, required for gigabit PHY */
+ value |= BIT(DWMAC_CKEN_GTX) |
+ /* Use simple mux for 25/125 Mhz clock switching */
+ BIT(DWMAC_SIMPLE_MUX);
+
+ regmap_write(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, value);
+
+ return 0;
+}
+
+static int oxnas_dwmac_setup_ox820(struct oxnas_dwmac *dwmac)
+{
+ unsigned int value;
+ int ret;
ret = regmap_read(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, &value);
- if (ret < 0) {
- clk_disable_unprepare(dwmac->clk);
+ if (ret < 0)
return ret;
- }
/* Enable GMII_GTXCLK to follow GMII_REFCLK, required for gigabit PHY */
value |= BIT(DWMAC_CKEN_GTX) |
/* Use simple mux for 25/125 Mhz clock switching */
- BIT(DWMAC_SIMPLE_MUX) |
- /* set auto switch tx clock source */
- BIT(DWMAC_AUTO_TX_SOURCE) |
- /* enable tx & rx vardelay */
- BIT(DWMAC_CKEN_TX_OUT) |
- BIT(DWMAC_CKEN_TXN_OUT) |
- BIT(DWMAC_CKEN_TX_IN) |
- BIT(DWMAC_CKEN_RX_OUT) |
- BIT(DWMAC_CKEN_RXN_OUT) |
- BIT(DWMAC_CKEN_RX_IN);
+ BIT(DWMAC_SIMPLE_MUX) |
+ /* set auto switch tx clock source */
+ BIT(DWMAC_AUTO_TX_SOURCE) |
+ /* enable tx & rx vardelay */
+ BIT(DWMAC_CKEN_TX_OUT) |
+ BIT(DWMAC_CKEN_TXN_OUT) |
+ BIT(DWMAC_CKEN_TX_IN) |
+ BIT(DWMAC_CKEN_RX_OUT) |
+ BIT(DWMAC_CKEN_RXN_OUT) |
+ BIT(DWMAC_CKEN_RX_IN);
regmap_write(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, value);
/* set tx & rx vardelay */
@@ -100,6 +115,27 @@ static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
return 0;
}
+static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
+{
+ struct oxnas_dwmac *dwmac = priv;
+ int ret;
+
+ /* Reset HW here before changing the glue configuration */
+ ret = device_reset(dwmac->dev);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(dwmac->clk);
+ if (ret)
+ return ret;
+
+ ret = dwmac->data->setup(dwmac);
+ if (ret)
+ clk_disable_unprepare(dwmac->clk);
+
+ return ret;
+}
+
static void oxnas_dwmac_exit(struct platform_device *pdev, void *priv)
{
struct oxnas_dwmac *dwmac = priv;
@@ -128,6 +164,12 @@ static int oxnas_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
+ dwmac->data = (const struct oxnas_dwmac_data *)of_device_get_match_data(&pdev->dev);
+ if (!dwmac->data) {
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+
dwmac->dev = &pdev->dev;
plat_dat->bsp_priv = dwmac;
plat_dat->init = oxnas_dwmac_init;
@@ -166,8 +208,23 @@ err_remove_config_dt:
return ret;
}
+static const struct oxnas_dwmac_data ox810se_dwmac_data = {
+ .setup = oxnas_dwmac_setup_ox810se,
+};
+
+static const struct oxnas_dwmac_data ox820_dwmac_data = {
+ .setup = oxnas_dwmac_setup_ox820,
+};
+
static const struct of_device_id oxnas_dwmac_match[] = {
- { .compatible = "oxsemi,ox820-dwmac" },
+ {
+ .compatible = "oxsemi,ox810se-dwmac",
+ .data = &ox810se_dwmac_data,
+ },
+ {
+ .compatible = "oxsemi,ox820-dwmac",
+ .data = &ox820_dwmac_data,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, oxnas_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 5c74b6279d69..2ffa0a11eea5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -113,8 +113,10 @@ static void rgmii_updatel(struct qcom_ethqos *ethqos,
rgmii_writel(ethqos, temp, offset);
}
-static void rgmii_dump(struct qcom_ethqos *ethqos)
+static void rgmii_dump(void *priv)
{
+ struct qcom_ethqos *ethqos = priv;
+
dev_dbg(&ethqos->pdev->dev, "Rgmii register dump\n");
dev_dbg(&ethqos->pdev->dev, "RGMII_IO_MACRO_CONFIG: %x\n",
rgmii_readl(ethqos, RGMII_IO_MACRO_CONFIG));
@@ -447,6 +449,24 @@ static void ethqos_fix_mac_speed(void *priv, unsigned int speed)
ethqos_configure(ethqos);
}
+static int ethqos_clks_config(void *priv, bool enabled)
+{
+ struct qcom_ethqos *ethqos = priv;
+ int ret = 0;
+
+ if (enabled) {
+ ret = clk_prepare_enable(ethqos->rgmii_clk);
+ if (ret) {
+ dev_err(&ethqos->pdev->dev, "rgmii_clk enable failed\n");
+ return ret;
+ }
+ } else {
+ clk_disable_unprepare(ethqos->rgmii_clk);
+ }
+
+ return ret;
+}
+
static int qcom_ethqos_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -466,6 +486,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
return PTR_ERR(plat_dat);
}
+ plat_dat->clks_config = ethqos_clks_config;
+
ethqos = devm_kzalloc(&pdev->dev, sizeof(*ethqos), GFP_KERNEL);
if (!ethqos) {
ret = -ENOMEM;
@@ -489,7 +511,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
goto err_mem;
}
- ret = clk_prepare_enable(ethqos->rgmii_clk);
+ ret = ethqos_clks_config(ethqos, true);
if (ret)
goto err_mem;
@@ -499,6 +521,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->bsp_priv = ethqos;
plat_dat->fix_mac_speed = ethqos_fix_mac_speed;
+ plat_dat->dump_debug_regs = rgmii_dump;
plat_dat->has_gmac4 = 1;
plat_dat->pmt = 1;
plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
@@ -507,12 +530,10 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
if (ret)
goto err_clk;
- rgmii_dump(ethqos);
-
return ret;
err_clk:
- clk_disable_unprepare(ethqos->rgmii_clk);
+ ethqos_clks_config(ethqos, false);
err_mem:
stmmac_remove_config_dt(pdev, plat_dat);
@@ -530,7 +551,7 @@ static int qcom_ethqos_remove(struct platform_device *pdev)
return -ENODEV;
ret = stmmac_pltfr_remove(pdev);
- clk_disable_unprepare(ethqos->rgmii_clk);
+ ethqos_clks_config(ethqos, false);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 617d0e4c6495..09644ab0d87a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -756,7 +756,7 @@ static int sun8i_dwmac_reset(struct stmmac_priv *priv)
if (err) {
dev_err(priv->device, "EMAC reset timeout\n");
- return -EFAULT;
+ return err;
}
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index e2e0f977875d..dde5b772a5af 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -22,21 +22,21 @@
#define ETHER_CLK_SEL_RMII_CLK_EN BIT(2)
#define ETHER_CLK_SEL_RMII_CLK_RST BIT(3)
#define ETHER_CLK_SEL_DIV_SEL_2 BIT(4)
-#define ETHER_CLK_SEL_DIV_SEL_20 BIT(0)
+#define ETHER_CLK_SEL_DIV_SEL_20 0
#define ETHER_CLK_SEL_FREQ_SEL_125M (BIT(9) | BIT(8))
#define ETHER_CLK_SEL_FREQ_SEL_50M BIT(9)
#define ETHER_CLK_SEL_FREQ_SEL_25M BIT(8)
#define ETHER_CLK_SEL_FREQ_SEL_2P5M 0
-#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN BIT(0)
+#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN 0
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC BIT(10)
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV BIT(11)
-#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_IN BIT(0)
+#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_IN 0
#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC BIT(12)
#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV BIT(13)
-#define ETHER_CLK_SEL_TX_CLK_O_TX_I BIT(0)
+#define ETHER_CLK_SEL_TX_CLK_O_TX_I 0
#define ETHER_CLK_SEL_TX_CLK_O_RMII_I BIT(14)
#define ETHER_CLK_SEL_TX_O_E_N_IN BIT(15)
-#define ETHER_CLK_SEL_RMII_CLK_SEL_IN BIT(0)
+#define ETHER_CLK_SEL_RMII_CLK_SEL_IN 0
#define ETHER_CLK_SEL_RMII_CLK_SEL_RX_C BIT(16)
#define ETHER_CLK_SEL_RX_TX_CLK_EN (ETHER_CLK_SEL_RX_CLK_EN | ETHER_CLK_SEL_TX_CLK_EN)
@@ -96,31 +96,41 @@ static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed)
val |= ETHER_CLK_SEL_TX_O_E_N_IN;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+ /* Set Clock-Mux, Start clock, Set TX_O direction */
switch (dwmac->phy_intf_sel) {
case ETHER_CONFIG_INTF_RGMII:
val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val &= ~ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
break;
case ETHER_CONFIG_INTF_RMII:
val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV |
- ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC | ETHER_CLK_SEL_TX_O_E_N_IN |
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV | ETHER_CLK_SEL_TX_O_E_N_IN |
ETHER_CLK_SEL_RMII_CLK_SEL_RX_C;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RMII_CLK_RST;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
break;
case ETHER_CONFIG_INTF_MII:
default:
val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC |
- ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV | ETHER_CLK_SEL_TX_O_E_N_IN |
- ETHER_CLK_SEL_RMII_CLK_EN;
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC | ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
break;
}
- /* Start clock */
- writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
- val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
- writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
-
- val &= ~ETHER_CLK_SEL_TX_O_E_N_IN;
- writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
-
spin_unlock_irqrestore(&dwmac->lock, flags);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index cbf4429fb1d2..d3b4765c1a5b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -32,6 +32,8 @@ static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
return tx_not_ls;
if (unlikely(tdes3 & TDES3_ERROR_SUMMARY)) {
+ ret = tx_err;
+
if (unlikely(tdes3 & TDES3_JABBER_TIMEOUT))
x->tx_jabber++;
if (unlikely(tdes3 & TDES3_PACKET_FLUSHED))
@@ -53,16 +55,16 @@ static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL))
x->tx_deferred++;
- if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR))
+ if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR)) {
x->tx_underflow++;
+ ret |= tx_err_bump_tc;
+ }
if (unlikely(tdes3 & TDES3_IP_HDR_ERROR))
x->tx_ip_header_error++;
if (unlikely(tdes3 & TDES3_PAYLOAD_ERROR))
x->tx_payload_error++;
-
- ret = tx_err;
}
if (unlikely(tdes3 & TDES3_DEFERRED))
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 873b9e3e5da2..5b195d5051d6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -10,7 +10,6 @@
#define __STMMAC_H__
#define STMMAC_RESOURCE_NAME "stmmaceth"
-#define DRV_MODULE_VERSION "Jan_2016"
#include <linux/clk.h>
#include <linux/hrtimer.h>
@@ -23,6 +22,7 @@
#include <linux/net_tstamp.h>
#include <linux/reset.h>
#include <net/page_pool.h>
+#include <uapi/linux/bpf.h>
struct stmmac_resources {
void __iomem *addr;
@@ -175,11 +175,14 @@ struct stmmac_flow_entry {
/* Rx Frame Steering */
enum stmmac_rfs_type {
STMMAC_RFS_T_VLAN,
+ STMMAC_RFS_T_LLDP,
+ STMMAC_RFS_T_1588,
STMMAC_RFS_T_MAX,
};
struct stmmac_rfs_entry {
unsigned long cookie;
+ u16 etype;
int in_use;
int type;
int tc;
@@ -191,7 +194,6 @@ struct stmmac_priv {
u32 tx_coal_timer[MTL_MAX_TX_QUEUES];
u32 rx_coal_frames[MTL_MAX_TX_QUEUES];
- int tx_coalesce;
int hwts_tx_en;
bool tx_path_in_lpi_mode;
bool tso;
@@ -226,7 +228,6 @@ struct stmmac_priv {
unsigned int flow_ctrl;
unsigned int pause;
struct mii_bus *mii;
- int mii_irq[PHY_MAX_ADDR];
struct phylink_config phylink_config;
struct phylink *phylink;
@@ -334,8 +335,8 @@ void stmmac_set_ethtool_ops(struct net_device *netdev);
int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags);
void stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
-int stmmac_open(struct net_device *dev);
-int stmmac_release(struct net_device *dev);
+int stmmac_xdp_open(struct net_device *dev);
+void stmmac_xdp_release(struct net_device *dev);
int stmmac_resume(struct device *dev);
int stmmac_suspend(struct device *dev);
int stmmac_dvr_remove(struct device *dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index d89455803bed..164dff5ec32e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -290,7 +290,6 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
strlcpy(info->bus_info, pci_name(priv->plat->pdev),
sizeof(info->bus_info));
}
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
@@ -463,7 +462,9 @@ static int stmmac_nway_reset(struct net_device *dev)
}
static void stmmac_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct stmmac_priv *priv = netdev_priv(netdev);
@@ -474,7 +475,9 @@ static void stmmac_get_ringparam(struct net_device *netdev,
}
static int stmmac_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
ring->rx_pending < DMA_MIN_RX_SIZE ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8ded4be08b00..639a753266e6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -132,6 +132,8 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
+static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
+ u32 rxmode, u32 chan);
#ifdef CONFIG_DEBUG_FS
static const struct net_device_ops stmmac_netdev_ops;
@@ -400,7 +402,7 @@ static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
* Description: this function is to verify and enter in LPI mode in case of
* EEE.
*/
-static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
+static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queue;
@@ -410,13 +412,14 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
if (tx_q->dirty_tx != tx_q->cur_tx)
- return; /* still unfinished work */
+ return -EBUSY; /* still unfinished work */
}
/* Check and enter in LPI mode */
if (!priv->tx_path_in_lpi_mode)
stmmac_set_eee_mode(priv, priv->hw,
priv->plat->en_tx_lpi_clockgating);
+ return 0;
}
/**
@@ -448,8 +451,8 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
{
struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
- stmmac_enable_eee_mode(priv);
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+ if (stmmac_enable_eee_mode(priv))
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
}
/**
@@ -518,14 +521,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
return true;
}
-static inline u32 stmmac_cdc_adjust(struct stmmac_priv *priv)
-{
- /* Correct the clk domain crossing(CDC) error */
- if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate)
- return (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate;
- return 0;
-}
-
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
* @priv: driver private structure
* @p : descriptor pointer
@@ -557,7 +552,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
}
if (found) {
- ns -= stmmac_cdc_adjust(priv);
+ ns -= priv->plat->cdc_error_adj;
memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
shhwtstamp.hwtstamp = ns_to_ktime(ns);
@@ -594,7 +589,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
- ns -= stmmac_cdc_adjust(priv);
+ ns -= priv->plat->cdc_error_adj;
netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
shhwtstamp = skb_hwtstamps(skb);
@@ -644,10 +639,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
__func__, config.flags, config.tx_type, config.rx_filter);
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
if (config.tx_type != HWTSTAMP_TX_OFF &&
config.tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
@@ -899,6 +890,9 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
int ret;
+ if (priv->plat->ptp_clk_freq_config)
+ priv->plat->ptp_clk_freq_config(priv);
+
ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
if (ret)
return ret;
@@ -921,8 +915,6 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
- stmmac_ptp_register(priv);
-
return 0;
}
@@ -2394,7 +2386,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
bool work_done = true;
/* Avoids TX time-out as we are sharing with slow path */
- nq->trans_start = jiffies;
+ txq_trans_cond_update(nq);
budget = min(budget, stmmac_tx_avail(priv, queue));
@@ -2478,6 +2470,21 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
return !!budget && work_done;
}
+static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
+{
+ if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
+ tc += 64;
+
+ if (priv->plat->force_thresh_dma_mode)
+ stmmac_set_dma_operation_mode(priv, tc, tc, chan);
+ else
+ stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
+ chan);
+
+ priv->xstats.threshold = tc;
+ }
+}
+
/**
* stmmac_tx_clean - to manage the transmission completion
* @priv: driver private structure
@@ -2543,6 +2550,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
/* ... verify the status error condition */
if (unlikely(status & tx_err)) {
priv->dev->stats.tx_errors++;
+ if (unlikely(status & tx_err_bump_tc))
+ stmmac_bump_dma_threshold(priv, queue);
} else {
priv->dev->stats.tx_packets++;
priv->xstats.tx_pkt_n++;
@@ -2640,8 +2649,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
priv->eee_sw_timer_en) {
- stmmac_enable_eee_mode(priv);
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+ if (stmmac_enable_eee_mode(priv))
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
}
/* We still have pending packets, let's call for a new scheduling */
@@ -2793,21 +2802,7 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
for (chan = 0; chan < tx_channel_count; chan++) {
if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
/* Try to bump up the dma threshold on this failure */
- if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
- (tc <= 256)) {
- tc += 64;
- if (priv->plat->force_thresh_dma_mode)
- stmmac_set_dma_operation_mode(priv,
- tc,
- tc,
- chan);
- else
- stmmac_set_dma_operation_mode(priv,
- tc,
- SF_DMA_MODE,
- chan);
- priv->xstats.threshold = tc;
- }
+ stmmac_bump_dma_threshold(priv, chan);
} else if (unlikely(status[chan] == tx_hard_error)) {
stmmac_tx_err(priv, chan);
}
@@ -3245,7 +3240,7 @@ static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
- * @init_ptp: initialize PTP if set
+ * @ptp_register: register PTP if set
* Description:
* this is the main function to setup the HW in a usable state because the
* dma engine is reset, the core registers are configured (e.g. AXI,
@@ -3255,7 +3250,7 @@ static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
+static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_cnt = priv->plat->rx_queues_to_use;
@@ -3312,13 +3307,13 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_mmc_setup(priv);
- if (init_ptp) {
- ret = stmmac_init_ptp(priv);
- if (ret == -EOPNOTSUPP)
- netdev_warn(priv->dev, "PTP not supported by HW\n");
- else if (ret)
- netdev_warn(priv->dev, "PTP init failed\n");
- }
+ ret = stmmac_init_ptp(priv);
+ if (ret == -EOPNOTSUPP)
+ netdev_warn(priv->dev, "PTP not supported by HW\n");
+ else if (ret)
+ netdev_warn(priv->dev, "PTP init failed\n");
+ else if (ptp_register)
+ stmmac_ptp_register(priv);
priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
@@ -3677,7 +3672,7 @@ static int stmmac_request_irq(struct net_device *dev)
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-int stmmac_open(struct net_device *dev)
+static int stmmac_open(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
int mode = priv->plat->phy_interface;
@@ -3801,7 +3796,7 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
* Description:
* This is the stop entry point of the driver.
*/
-int stmmac_release(struct net_device *dev)
+static int stmmac_release(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
@@ -4697,7 +4692,7 @@ static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
__netif_tx_lock(nq, cpu);
/* Avoids TX time-out as we are sharing with slow path */
- nq->trans_start = jiffies;
+ txq_trans_cond_update(nq);
res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
if (res == STMMAC_XDP_TX)
@@ -4730,7 +4725,7 @@ static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
res = STMMAC_XDP_REDIRECT;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(priv->dev, prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(priv->dev, prog, act);
@@ -5761,21 +5756,7 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
if (unlikely(status & tx_hard_error_bump_tc)) {
/* Try to bump up the dma threshold on this failure */
- if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
- tc <= 256) {
- tc += 64;
- if (priv->plat->force_thresh_dma_mode)
- stmmac_set_dma_operation_mode(priv,
- tc,
- tc,
- chan);
- else
- stmmac_set_dma_operation_mode(priv,
- tc,
- SF_DMA_MODE,
- chan);
- priv->xstats.threshold = tc;
- }
+ stmmac_bump_dma_threshold(priv, chan);
} else if (unlikely(status == tx_hard_error)) {
stmmac_tx_err(priv, chan);
}
@@ -6335,7 +6316,7 @@ static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
__netif_tx_lock(nq, cpu);
/* Avoids TX time-out as we are sharing with slow path */
- nq->trans_start = jiffies;
+ txq_trans_cond_update(nq);
for (i = 0; i < num_frames; i++) {
int res;
@@ -6471,6 +6452,140 @@ void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
spin_unlock_irqrestore(&ch->lock, flags);
}
+void stmmac_xdp_release(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 chan;
+
+ /* Disable NAPI process */
+ stmmac_disable_all_queues(priv);
+
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+
+ /* Free the IRQ lines */
+ stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
+
+ /* Stop TX/RX DMA channels */
+ stmmac_stop_all_dma(priv);
+
+ /* Release and free the Rx/Tx resources */
+ free_dma_desc_resources(priv);
+
+ /* Disable the MAC Rx/Tx */
+ stmmac_mac_set(priv, priv->ioaddr, false);
+
+ /* set trans_start so we don't get spurious
+ * watchdogs during reset
+ */
+ netif_trans_update(dev);
+ netif_carrier_off(dev);
+}
+
+int stmmac_xdp_open(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_cnt, tx_cnt);
+ struct stmmac_rx_queue *rx_q;
+ struct stmmac_tx_queue *tx_q;
+ u32 buf_size;
+ bool sph_en;
+ u32 chan;
+ int ret;
+
+ ret = alloc_dma_desc_resources(priv);
+ if (ret < 0) {
+ netdev_err(dev, "%s: DMA descriptors allocation failed\n",
+ __func__);
+ goto dma_desc_error;
+ }
+
+ ret = init_dma_desc_rings(dev, GFP_KERNEL);
+ if (ret < 0) {
+ netdev_err(dev, "%s: DMA descriptors initialization failed\n",
+ __func__);
+ goto init_error;
+ }
+
+ /* DMA CSR Channel configuration */
+ for (chan = 0; chan < dma_csr_ch; chan++)
+ stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+
+ /* Adjust Split header */
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+
+ /* DMA RX Channel Configuration */
+ for (chan = 0; chan < rx_cnt; chan++) {
+ rx_q = &priv->rx_queue[chan];
+
+ stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ rx_q->dma_rx_phy, chan);
+
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (rx_q->buf_alloc_num *
+ sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+ rx_q->rx_tail_addr, chan);
+
+ if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
+ buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ buf_size,
+ rx_q->queue_index);
+ } else {
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ priv->dma_buf_sz,
+ rx_q->queue_index);
+ }
+
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+ }
+
+ /* DMA TX Channel Configuration */
+ for (chan = 0; chan < tx_cnt; chan++) {
+ tx_q = &priv->tx_queue[chan];
+
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, chan);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy;
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
+ tx_q->tx_tail_addr, chan);
+
+ hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ tx_q->txtimer.function = stmmac_tx_timer;
+ }
+
+ /* Enable the MAC Rx/Tx */
+ stmmac_mac_set(priv, priv->ioaddr, true);
+
+ /* Start Rx & Tx DMA Channels */
+ stmmac_start_all_dma(priv);
+
+ ret = stmmac_request_irq(dev);
+ if (ret)
+ goto irq_error;
+
+ /* Enable NAPI process*/
+ stmmac_enable_all_queues(priv);
+ netif_carrier_on(dev);
+ netif_tx_start_all_queues(dev);
+
+ return 0;
+
+irq_error:
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+
+ stmmac_hw_teardown(dev);
+init_error:
+ free_dma_desc_resources(priv);
+dma_desc_error:
+ return ret;
+}
+
int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
{
struct stmmac_priv *priv = netdev_priv(dev);
@@ -7046,7 +7161,8 @@ int stmmac_dvr_probe(struct device *device,
pm_runtime_get_noresume(device);
pm_runtime_set_active(device);
- pm_runtime_enable(device);
+ if (!pm_runtime_enabled(device))
+ pm_runtime_enable(device);
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI) {
@@ -7094,6 +7210,9 @@ int stmmac_dvr_probe(struct device *device,
stmmac_init_fs(ndev);
#endif
+ if (priv->plat->dump_debug_regs)
+ priv->plat->dump_debug_regs(priv->plat->bsp_priv);
+
/* Let pm_runtime_put() disable the clocks.
* If CONFIG_PM is not enabled, the clocks will stay powered.
*/
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index be9b58b2abf9..1c9f02f9c317 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -297,9 +297,6 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
{
int i;
- if (priv->plat->ptp_clk_freq_config)
- priv->plat->ptp_clk_freq_config(priv);
-
for (i = 0; i < priv->dma_cap.pps_out_num; i++) {
if (i >= STMMAC_PPS_MAX)
break;
@@ -309,6 +306,11 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
if (priv->plat->ptp_max_adj)
stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj;
+ /* Calculate the clock domain crossing (CDC) error if necessary */
+ priv->plat->cdc_error_adj = 0;
+ if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate)
+ priv->plat->cdc_error_adj = (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate;
+
stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num;
stmmac_ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index d0a2b289f460..d61766eeac6d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -237,6 +237,8 @@ static int tc_rfs_init(struct stmmac_priv *priv)
int i;
priv->rfs_entries_max[STMMAC_RFS_T_VLAN] = 8;
+ priv->rfs_entries_max[STMMAC_RFS_T_LLDP] = 1;
+ priv->rfs_entries_max[STMMAC_RFS_T_1588] = 1;
for (i = 0; i < STMMAC_RFS_T_MAX; i++)
priv->rfs_entries_total += priv->rfs_entries_max[i];
@@ -451,6 +453,8 @@ static int tc_parse_flow_actions(struct stmmac_priv *priv,
return 0;
}
+#define ETHER_TYPE_FULL_MASK cpu_to_be16(~0)
+
static int tc_add_basic_flow(struct stmmac_priv *priv,
struct flow_cls_offload *cls,
struct stmmac_flow_entry *entry)
@@ -464,6 +468,7 @@ static int tc_add_basic_flow(struct stmmac_priv *priv,
return -EINVAL;
flow_rule_match_basic(rule, &match);
+
entry->ip_proto = match.key->ip_proto;
return 0;
}
@@ -724,6 +729,114 @@ static int tc_del_vlan_flow(struct stmmac_priv *priv,
return 0;
}
+static int tc_add_ethtype_flow(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls)
+{
+ struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
+ struct flow_match_basic match;
+
+ if (!entry) {
+ entry = tc_find_rfs(priv, cls, true);
+ if (!entry)
+ return -ENOENT;
+ }
+
+ /* Nothing to do here */
+ if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
+ return -EINVAL;
+
+ if (tc < 0) {
+ netdev_err(priv->dev, "Invalid traffic class\n");
+ return -EINVAL;
+ }
+
+ flow_rule_match_basic(rule, &match);
+
+ if (match.mask->n_proto) {
+ u16 etype = ntohs(match.key->n_proto);
+
+ if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
+ netdev_err(priv->dev, "Only full mask is supported for EthType filter");
+ return -EINVAL;
+ }
+ switch (etype) {
+ case ETH_P_LLDP:
+ if (priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP] >=
+ priv->rfs_entries_max[STMMAC_RFS_T_LLDP])
+ return -ENOENT;
+
+ entry->type = STMMAC_RFS_T_LLDP;
+ priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]++;
+
+ stmmac_rx_queue_routing(priv, priv->hw,
+ PACKET_DCBCPQ, tc);
+ break;
+ case ETH_P_1588:
+ if (priv->rfs_entries_cnt[STMMAC_RFS_T_1588] >=
+ priv->rfs_entries_max[STMMAC_RFS_T_1588])
+ return -ENOENT;
+
+ entry->type = STMMAC_RFS_T_1588;
+ priv->rfs_entries_cnt[STMMAC_RFS_T_1588]++;
+
+ stmmac_rx_queue_routing(priv, priv->hw,
+ PACKET_PTPQ, tc);
+ break;
+ default:
+ netdev_err(priv->dev, "EthType(0x%x) is not supported", etype);
+ return -EINVAL;
+ }
+
+ entry->in_use = true;
+ entry->cookie = cls->cookie;
+ entry->tc = tc;
+ entry->etype = etype;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int tc_del_ethtype_flow(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls)
+{
+ struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
+
+ if (!entry || !entry->in_use ||
+ entry->type < STMMAC_RFS_T_LLDP ||
+ entry->type > STMMAC_RFS_T_1588)
+ return -ENOENT;
+
+ switch (entry->etype) {
+ case ETH_P_LLDP:
+ stmmac_rx_queue_routing(priv, priv->hw,
+ PACKET_DCBCPQ, 0);
+ priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]--;
+ break;
+ case ETH_P_1588:
+ stmmac_rx_queue_routing(priv, priv->hw,
+ PACKET_PTPQ, 0);
+ priv->rfs_entries_cnt[STMMAC_RFS_T_1588]--;
+ break;
+ default:
+ netdev_err(priv->dev, "EthType(0x%x) is not supported",
+ entry->etype);
+ return -EINVAL;
+ }
+
+ entry->in_use = false;
+ entry->cookie = 0;
+ entry->tc = 0;
+ entry->etype = 0;
+ entry->type = 0;
+
+ return 0;
+}
+
static int tc_add_flow_cls(struct stmmac_priv *priv,
struct flow_cls_offload *cls)
{
@@ -733,6 +846,10 @@ static int tc_add_flow_cls(struct stmmac_priv *priv,
if (!ret)
return ret;
+ ret = tc_add_ethtype_flow(priv, cls);
+ if (!ret)
+ return ret;
+
return tc_add_vlan_flow(priv, cls);
}
@@ -745,6 +862,10 @@ static int tc_del_flow_cls(struct stmmac_priv *priv,
if (!ret)
return ret;
+ ret = tc_del_ethtype_flow(priv, cls);
+ if (!ret)
+ return ret;
+
return tc_del_vlan_flow(priv, cls);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
index 2a616c6f7cd0..9d4d8c3dad0a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
@@ -119,7 +119,7 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
need_update = !!priv->xdp_prog != !!prog;
if (if_running && need_update)
- stmmac_release(dev);
+ stmmac_xdp_release(dev);
old_prog = xchg(&priv->xdp_prog, prog);
if (old_prog)
@@ -129,7 +129,7 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
priv->sph = priv->sph_cap && !stmmac_xdp_is_enabled(priv);
if (if_running && need_update)
- stmmac_open(dev);
+ stmmac_xdp_open(dev);
return 0;
}
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index d2d4f47c7e28..dba9f12efa1c 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -4893,8 +4893,8 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
unsigned long casreg_len;
struct net_device *dev;
struct cas *cp;
- int i, err, pci_using_dac;
u16 pci_cmd;
+ int i, err;
u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
if (cas_version_printed++ == 0)
@@ -4965,23 +4965,10 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Configure DMA attributes. */
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (err < 0) {
- dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
- "for consistent allocations\n");
- goto err_out_free_res;
- }
-
- } else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "No usable DMA configuration, "
- "aborting\n");
- goto err_out_free_res;
- }
- pci_using_dac = 0;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
+ goto err_out_free_res;
}
casreg_len = pci_resource_len(pdev, 0);
@@ -5087,8 +5074,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
- if (pci_using_dac)
- dev->features |= NETIF_F_HIGHDMA;
+ dev->features |= NETIF_F_HIGHDMA;
/* MTU range: 60 - varies or 9000 */
dev->min_mtu = CAS_MIN_MTU;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 0775a5542f2f..985073eba3bd 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1884,10 +1884,10 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *ndev;
struct bdx_priv *priv;
- int err, pci_using_dac, port;
unsigned long pciaddr;
u32 regionSize;
struct pci_nic *nic;
+ int err, port;
ENTER;
@@ -1900,16 +1900,10 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) /* it triggers interrupt, dunno why. */
goto err_pci; /* it's not a problem though */
- if (!(err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) &&
- !(err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)))) {
- pci_using_dac = 1;
- } else {
- if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) ||
- (err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)))) {
- pr_err("No usable DMA configuration, aborting\n");
- goto err_dma;
- }
- pci_using_dac = 0;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ pr_err("No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_regions(pdev, BDX_DRV_NAME);
@@ -1982,16 +1976,14 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* these fields are used for info purposes only
* so we can have them same for all ports of the board */
ndev->if_port = port;
- ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
- | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM
- ;
+ ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM |
+ NETIF_F_HIGHDMA;
+
ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX;
- if (pci_using_dac)
- ndev->features |= NETIF_F_HIGHDMA;
-
/************** priv ****************/
priv = nic->priv[port] = netdev_priv(ndev);
@@ -2245,9 +2237,13 @@ static inline int bdx_tx_fifo_size_to_packets(int tx_size)
* bdx_get_ringparam - report ring sizes
* @netdev
* @ring
+ * @kernel_ring
+ * @extack
*/
static void
-bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct bdx_priv *priv = netdev_priv(netdev);
@@ -2262,9 +2258,13 @@ bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
* bdx_set_ringparam - set ring sizes
* @netdev
* @ring
+ * @kernel_ring
+ * @extack
*/
static int
-bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct bdx_priv *priv = netdev_priv(netdev);
int rx_size = 0;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index b05de9b61ad6..d45b6bb86f0b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -453,8 +453,11 @@ static int am65_cpsw_set_channels(struct net_device *ndev,
return am65_cpsw_nuss_update_tx_chns(common, chs->tx_count);
}
-static void am65_cpsw_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+static void
+am65_cpsw_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index ffbbda8f4d41..8251d7eb001b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -345,7 +345,7 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
netif_txq = netdev_get_tx_queue(ndev, txqueue);
tx_chn = &common->tx_chns[txqueue];
- trans_start = netif_txq->trans_start;
+ trans_start = READ_ONCE(netif_txq->trans_start);
netdev_err(ndev, "txq:%d DRV_XOFF:%d tmo:%u dql_avail:%d free_desc:%zu\n",
txqueue,
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 7449436fc87c..bef5e68dac31 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -817,7 +817,9 @@ static void cpmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
}
static void cpmac_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct cpmac_priv *priv = netdev_priv(dev);
@@ -833,7 +835,9 @@ static void cpmac_get_ringparam(struct net_device *dev,
}
static int cpmac_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct cpmac_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 33142d505fc8..03575c017500 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -349,7 +349,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
struct cpsw_common *cpsw = ndev_to_cpsw(xmeta->ndev);
int pkt_size = cpsw->rx_packet_max;
int ret = 0, port, ch = xmeta->ch;
- int headroom = CPSW_HEADROOM;
+ int headroom = CPSW_HEADROOM_NA;
struct net_device *ndev = xmeta->ndev;
struct cpsw_priv *priv;
struct page_pool *pool;
@@ -392,7 +392,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
}
if (priv->xdp_prog) {
- int headroom = CPSW_HEADROOM, size = len;
+ int size = len;
xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
if (status & CPDMA_RX_VLAN_ENCAP) {
@@ -442,7 +442,7 @@ requeue:
xmeta->ndev = ndev;
xmeta->ch = ch;
- dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM;
+ dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA;
ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
pkt_size, 0);
if (ret < 0) {
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index 158c8d3793f4..aa42141be3c0 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -658,7 +658,9 @@ err:
}
void cpsw_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
@@ -671,7 +673,9 @@ void cpsw_get_ringparam(struct net_device *ndev,
}
int cpsw_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
int descs_num, ret;
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 279e261e4720..bd4b1528cf99 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -283,7 +283,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
{
struct page *new_page, *page = token;
void *pa = page_address(page);
- int headroom = CPSW_HEADROOM;
+ int headroom = CPSW_HEADROOM_NA;
struct cpsw_meta_xdp *xmeta;
struct cpsw_common *cpsw;
struct net_device *ndev;
@@ -336,7 +336,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
}
if (priv->xdp_prog) {
- int headroom = CPSW_HEADROOM, size = len;
+ int size = len;
xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
if (status & CPDMA_RX_VLAN_ENCAP) {
@@ -386,7 +386,7 @@ requeue:
xmeta->ndev = ndev;
xmeta->ch = ch;
- dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM;
+ dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA;
ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
pkt_size, 0);
if (ret < 0) {
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index ecc2a6b7e28f..8f6817f346ba 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -626,10 +626,6 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
- /* reserved for future extensions */
- if (cfg.flags)
- return -EINVAL;
-
if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
@@ -710,20 +706,26 @@ int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
struct cpsw_priv *priv = netdev_priv(dev);
struct cpsw_common *cpsw = priv->cpsw;
int slave_no = cpsw_slave_index(cpsw, priv);
+ struct phy_device *phy;
if (!netif_running(dev))
return -EINVAL;
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return cpsw_hwtstamp_set(dev, req);
- case SIOCGHWTSTAMP:
- return cpsw_hwtstamp_get(dev, req);
+ phy = cpsw->slaves[slave_no].phy;
+
+ if (!phy_has_hwtstamp(phy)) {
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return cpsw_hwtstamp_set(dev, req);
+ case SIOCGHWTSTAMP:
+ return cpsw_hwtstamp_get(dev, req);
+ }
}
- if (!cpsw->slaves[slave_no].phy)
- return -EOPNOTSUPP;
- return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
+ if (phy)
+ return phy_mii_ioctl(phy, req, cmd);
+
+ return -EOPNOTSUPP;
}
int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
@@ -1120,7 +1122,7 @@ int cpsw_fill_rx_channels(struct cpsw_priv *priv)
xmeta->ndev = priv->ndev;
xmeta->ch = ch;
- dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM;
+ dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM_NA;
ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch,
page, dma,
cpsw->rx_packet_max,
@@ -1144,7 +1146,7 @@ int cpsw_fill_rx_channels(struct cpsw_priv *priv)
static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
int size)
{
- struct page_pool_params pp_params;
+ struct page_pool_params pp_params = {};
struct page_pool *pool;
pp_params.order = 0;
@@ -1360,7 +1362,7 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
xdp_do_flush_map();
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(ndev, prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(ndev, prog, act);
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 435668ee542d..74555970730c 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -6,6 +6,8 @@
#ifndef DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_
#define DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_
+#include <uapi/linux/bpf.h>
+
#include "davinci_cpdma.h"
#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
@@ -491,9 +493,13 @@ int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata);
int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata);
int cpsw_nway_reset(struct net_device *ndev);
void cpsw_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering);
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack);
int cpsw_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering);
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack);
int cpsw_set_channels_common(struct net_device *ndev,
struct ethtool_channels *chs,
cpdma_handler_fn rx_handler);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index d55f06120ce7..31df3267a01a 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1454,23 +1454,33 @@ static int emac_dev_open(struct net_device *ndev)
}
/* Request IRQ */
- while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ,
- res_num))) {
- for (irq_num = res->start; irq_num <= res->end; irq_num++) {
- if (request_irq(irq_num, emac_irq, 0, ndev->name,
- ndev)) {
- dev_err(emac_dev,
- "DaVinci EMAC: request_irq() failed\n");
- ret = -EBUSY;
+ if (dev_of_node(&priv->pdev->dev)) {
+ while ((ret = platform_get_irq_optional(priv->pdev, res_num)) != -ENXIO) {
+ if (ret < 0)
+ goto rollback;
+ ret = request_irq(ret, emac_irq, 0, ndev->name, ndev);
+ if (ret) {
+ dev_err(emac_dev, "DaVinci EMAC: request_irq() failed\n");
goto rollback;
}
+ res_num++;
}
- res_num++;
+ } else {
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, res_num))) {
+ for (irq_num = res->start; irq_num <= res->end; irq_num++) {
+ ret = request_irq(irq_num, emac_irq, 0, ndev->name, ndev);
+ if (ret) {
+ dev_err(emac_dev, "DaVinci EMAC: request_irq() failed\n");
+ goto rollback;
+ }
+ }
+ res_num++;
+ }
+ /* prepare counters for rollback in case of an error */
+ res_num--;
+ irq_num--;
}
- /* prepare counters for rollback in case of an error */
- res_num--;
- irq_num--;
/* Start/Enable EMAC hardware */
emac_hw_enable(priv);
@@ -1554,16 +1564,24 @@ err:
napi_disable(&priv->napi);
rollback:
- for (q = res_num; q >= 0; q--) {
- res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q);
- /* at the first iteration, irq_num is already set to the
- * right value
- */
- if (q != res_num)
- irq_num = res->end;
+ if (dev_of_node(&priv->pdev->dev)) {
+ for (q = res_num - 1; q >= 0; q--) {
+ irq_num = platform_get_irq(priv->pdev, q);
+ if (irq_num > 0)
+ free_irq(irq_num, ndev);
+ }
+ } else {
+ for (q = res_num; q >= 0; q--) {
+ res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q);
+ /* at the first iteration, irq_num is already set to the
+ * right value
+ */
+ if (q != res_num)
+ irq_num = res->end;
- for (m = irq_num; m >= res->start; m--)
- free_irq(m, ndev);
+ for (m = irq_num; m >= res->start; m--)
+ free_irq(m, ndev);
+ }
}
cpdma_ctlr_stop(priv->dma);
pm_runtime_put(&priv->pdev->dev);
@@ -1899,13 +1917,10 @@ static int davinci_emac_probe(struct platform_device *pdev)
goto err_free_txchan;
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&pdev->dev, "error getting irq res\n");
- rc = -ENOENT;
+ rc = platform_get_irq(pdev, 0);
+ if (rc < 0)
goto err_free_rxchan;
- }
- ndev->irq = res->start;
+ ndev->irq = rc;
rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr);
if (!rc)
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 33c1592d5381..751fb0bc65c5 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2654,10 +2654,6 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
- /* reserved for future extensions */
- if (cfg.flags)
- return -EINVAL;
-
switch (cfg.tx_type) {
case HWTSTAMP_TX_OFF:
gbe_dev->tx_ts_enabled = 0;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index f50f9a43d3ea..f47b8358669d 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -595,24 +595,24 @@ spider_net_set_multi(struct net_device *netdev)
int i;
u32 reg;
struct spider_net_card *card = netdev_priv(netdev);
- DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES) = {};
+ DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES);
spider_net_set_promisc(card);
if (netdev->flags & IFF_ALLMULTI) {
- for (i = 0; i < SPIDER_NET_MULTICAST_HASHES; i++) {
- set_bit(i, bitmask);
- }
+ bitmap_fill(bitmask, SPIDER_NET_MULTICAST_HASHES);
goto write_hash;
}
+ bitmap_zero(bitmask, SPIDER_NET_MULTICAST_HASHES);
+
/* well, we know, what the broadcast hash value is: it's xfd
hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
- set_bit(0xfd, bitmask);
+ __set_bit(0xfd, bitmask);
netdev_for_each_mc_addr(ha, netdev) {
hash = spider_net_get_multicast_hash(netdev, ha->addr);
- set_bit(hash, bitmask);
+ __set_bit(hash, bitmask);
}
write_hash:
diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
index 54f655a68148..93110dba0bfa 100644
--- a/drivers/net/ethernet/toshiba/spider_net_ethtool.c
+++ b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
@@ -110,7 +110,9 @@ spider_net_ethtool_nway_reset(struct net_device *netdev)
static void
spider_net_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct spider_net_card *card = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index cf0917b29e30..5251fc324221 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1091,20 +1091,22 @@ static int tsi108_get_mac(struct net_device *dev)
struct tsi108_prv_data *data = netdev_priv(dev);
u32 word1 = TSI_READ(TSI108_MAC_ADDR1);
u32 word2 = TSI_READ(TSI108_MAC_ADDR2);
+ u8 addr[ETH_ALEN];
/* Note that the octets are reversed from what the manual says,
* producing an even weirder ordering...
*/
if (word2 == 0 && word1 == 0) {
- dev->dev_addr[0] = 0x00;
- dev->dev_addr[1] = 0x06;
- dev->dev_addr[2] = 0xd2;
- dev->dev_addr[3] = 0x00;
- dev->dev_addr[4] = 0x00;
+ addr[0] = 0x00;
+ addr[1] = 0x06;
+ addr[2] = 0xd2;
+ addr[3] = 0x00;
+ addr[4] = 0x00;
if (0x8 == data->phy)
- dev->dev_addr[5] = 0x01;
+ addr[5] = 0x01;
else
- dev->dev_addr[5] = 0x02;
+ addr[5] = 0x02;
+ eth_hw_addr_set(dev, addr);
word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
@@ -1114,12 +1116,13 @@ static int tsi108_get_mac(struct net_device *dev)
TSI_WRITE(TSI108_MAC_ADDR1, word1);
TSI_WRITE(TSI108_MAC_ADDR2, word2);
} else {
- dev->dev_addr[0] = (word2 >> 16) & 0xff;
- dev->dev_addr[1] = (word2 >> 24) & 0xff;
- dev->dev_addr[2] = (word1 >> 0) & 0xff;
- dev->dev_addr[3] = (word1 >> 8) & 0xff;
- dev->dev_addr[4] = (word1 >> 16) & 0xff;
- dev->dev_addr[5] = (word1 >> 24) & 0xff;
+ addr[0] = (word2 >> 16) & 0xff;
+ addr[1] = (word2 >> 24) & 0xff;
+ addr[2] = (word1 >> 0) & 0xff;
+ addr[3] = (word1 >> 8) & 0xff;
+ addr[4] = (word1 >> 16) & 0xff;
+ addr[5] = (word1 >> 24) & 0xff;
+ eth_hw_addr_set(dev, addr);
}
if (!is_valid_ether_addr(dev->dev_addr)) {
@@ -1136,14 +1139,12 @@ static int tsi108_set_mac(struct net_device *dev, void *addr)
{
struct tsi108_prv_data *data = netdev_priv(dev);
u32 word1, word2;
- int i;
if (!is_valid_ether_addr(addr))
return -EADDRNOTAVAIL;
- for (i = 0; i < 6; i++)
- /* +2 is for the offset of the HW addr type */
- dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
+ /* +2 is for the offset of the HW addr type */
+ eth_hw_addr_set(dev, ((unsigned char *)addr) + 2);
word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
diff --git a/drivers/net/ethernet/vertexcom/Kconfig b/drivers/net/ethernet/vertexcom/Kconfig
new file mode 100644
index 000000000000..4184a635fe01
--- /dev/null
+++ b/drivers/net/ethernet/vertexcom/Kconfig
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Vertexcom network device configuration
+#
+
+config NET_VENDOR_VERTEXCOM
+ bool "Vertexcom devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Vertexcom cards. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_VERTEXCOM
+
+config MSE102X
+ tristate "Vertexcom MSE102x SPI"
+ depends on SPI
+ help
+ SPI driver for Vertexcom MSE102x SPI attached network chip.
+
+endif # NET_VENDOR_VERTEXCOM
diff --git a/drivers/net/ethernet/vertexcom/Makefile b/drivers/net/ethernet/vertexcom/Makefile
new file mode 100644
index 000000000000..f8b12e312637
--- /dev/null
+++ b/drivers/net/ethernet/vertexcom/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Vertexcom network device drivers.
+#
+
+obj-$(CONFIG_MSE102X) += mse102x.o
diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
new file mode 100644
index 000000000000..89a31783fbb4
--- /dev/null
+++ b/drivers/net/ethernet/vertexcom/mse102x.c
@@ -0,0 +1,769 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 in-tech smart charging GmbH
+ *
+ * driver is based on micrel/ks8851_spi.c
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/cache.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <linux/spi/spi.h>
+#include <linux/of_net.h>
+
+#define MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER)
+
+#define DRV_NAME "mse102x"
+
+#define DET_CMD 0x0001
+#define DET_SOF 0x0002
+#define DET_DFT 0x55AA
+
+#define CMD_SHIFT 12
+#define CMD_RTS (0x1 << CMD_SHIFT)
+#define CMD_CTR (0x2 << CMD_SHIFT)
+
+#define CMD_MASK GENMASK(15, CMD_SHIFT)
+#define LEN_MASK GENMASK(CMD_SHIFT - 1, 0)
+
+#define DET_CMD_LEN 4
+#define DET_SOF_LEN 2
+#define DET_DFT_LEN 2
+
+#define MIN_FREQ_HZ 6000000
+#define MAX_FREQ_HZ 7142857
+
+struct mse102x_stats {
+ u64 xfer_err;
+ u64 invalid_cmd;
+ u64 invalid_ctr;
+ u64 invalid_dft;
+ u64 invalid_len;
+ u64 invalid_rts;
+ u64 invalid_sof;
+ u64 tx_timeout;
+};
+
+static const char mse102x_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "SPI transfer errors",
+ "Invalid command",
+ "Invalid CTR",
+ "Invalid DFT",
+ "Invalid frame length",
+ "Invalid RTS",
+ "Invalid SOF",
+ "TX timeout",
+};
+
+struct mse102x_net {
+ struct net_device *ndev;
+
+ u8 rxd[8];
+ u8 txd[8];
+
+ u32 msg_enable ____cacheline_aligned;
+
+ struct sk_buff_head txq;
+ struct mse102x_stats stats;
+};
+
+struct mse102x_net_spi {
+ struct mse102x_net mse102x;
+ struct mutex lock; /* Protect SPI frame transfer */
+ struct work_struct tx_work;
+ struct spi_device *spidev;
+ struct spi_message spi_msg;
+ struct spi_transfer spi_xfer;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *device_root;
+#endif
+};
+
+#define to_mse102x_spi(mse) container_of((mse), struct mse102x_net_spi, mse102x)
+
+#ifdef CONFIG_DEBUG_FS
+
+static int mse102x_info_show(struct seq_file *s, void *what)
+{
+ struct mse102x_net_spi *mses = s->private;
+
+ seq_printf(s, "TX ring size : %u\n",
+ skb_queue_len(&mses->mse102x.txq));
+
+ seq_printf(s, "IRQ : %d\n",
+ mses->spidev->irq);
+
+ seq_printf(s, "SPI effective speed : %lu\n",
+ (unsigned long)mses->spi_xfer.effective_speed_hz);
+ seq_printf(s, "SPI mode : %x\n",
+ mses->spidev->mode);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(mse102x_info);
+
+static void mse102x_init_device_debugfs(struct mse102x_net_spi *mses)
+{
+ mses->device_root = debugfs_create_dir(dev_name(&mses->mse102x.ndev->dev),
+ NULL);
+
+ debugfs_create_file("info", S_IFREG | 0444, mses->device_root, mses,
+ &mse102x_info_fops);
+}
+
+static void mse102x_remove_device_debugfs(struct mse102x_net_spi *mses)
+{
+ debugfs_remove_recursive(mses->device_root);
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+static void mse102x_init_device_debugfs(struct mse102x_net_spi *mses)
+{
+}
+
+static void mse102x_remove_device_debugfs(struct mse102x_net_spi *mses)
+{
+}
+
+#endif
+
+/* SPI register read/write calls.
+ *
+ * All these calls issue SPI transactions to access the chip's registers. They
+ * all require that the necessary lock is held to prevent accesses when the
+ * chip is busy transferring packet data.
+ */
+
+static void mse102x_tx_cmd_spi(struct mse102x_net *mse, u16 cmd)
+{
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+ struct spi_transfer *xfer = &mses->spi_xfer;
+ struct spi_message *msg = &mses->spi_msg;
+ __be16 txb[2];
+ int ret;
+
+ txb[0] = cpu_to_be16(DET_CMD);
+ txb[1] = cpu_to_be16(cmd);
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = NULL;
+ xfer->len = DET_CMD_LEN;
+
+ ret = spi_sync(mses->spidev, msg);
+ if (ret < 0) {
+ netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
+ __func__, ret);
+ mse->stats.xfer_err++;
+ }
+}
+
+static int mse102x_rx_cmd_spi(struct mse102x_net *mse, u8 *rxb)
+{
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+ struct spi_transfer *xfer = &mses->spi_xfer;
+ struct spi_message *msg = &mses->spi_msg;
+ __be16 *txb = (__be16 *)mse->txd;
+ __be16 *cmd = (__be16 *)mse->rxd;
+ u8 *trx = mse->rxd;
+ int ret;
+
+ txb[0] = 0;
+ txb[1] = 0;
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = trx;
+ xfer->len = DET_CMD_LEN;
+
+ ret = spi_sync(mses->spidev, msg);
+ if (ret < 0) {
+ netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
+ __func__, ret);
+ mse->stats.xfer_err++;
+ } else if (*cmd != cpu_to_be16(DET_CMD)) {
+ net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
+ __func__, *cmd);
+ mse->stats.invalid_cmd++;
+ ret = -EIO;
+ } else {
+ memcpy(rxb, trx + 2, 2);
+ }
+
+ return ret;
+}
+
+static inline void mse102x_push_header(struct sk_buff *skb)
+{
+ __be16 *header = skb_push(skb, DET_SOF_LEN);
+
+ *header = cpu_to_be16(DET_SOF);
+}
+
+static inline void mse102x_put_footer(struct sk_buff *skb)
+{
+ __be16 *footer = skb_put(skb, DET_DFT_LEN);
+
+ *footer = cpu_to_be16(DET_DFT);
+}
+
+static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp,
+ unsigned int pad)
+{
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+ struct spi_transfer *xfer = &mses->spi_xfer;
+ struct spi_message *msg = &mses->spi_msg;
+ struct sk_buff *tskb;
+ int ret;
+
+ netif_dbg(mse, tx_queued, mse->ndev, "%s: skb %p, %d@%p\n",
+ __func__, txp, txp->len, txp->data);
+
+ if ((skb_headroom(txp) < DET_SOF_LEN) ||
+ (skb_tailroom(txp) < DET_DFT_LEN + pad)) {
+ tskb = skb_copy_expand(txp, DET_SOF_LEN, DET_DFT_LEN + pad,
+ GFP_KERNEL);
+ if (!tskb)
+ return -ENOMEM;
+
+ dev_kfree_skb(txp);
+ txp = tskb;
+ }
+
+ mse102x_push_header(txp);
+
+ if (pad)
+ skb_put_zero(txp, pad);
+
+ mse102x_put_footer(txp);
+
+ xfer->tx_buf = txp->data;
+ xfer->rx_buf = NULL;
+ xfer->len = txp->len;
+
+ ret = spi_sync(mses->spidev, msg);
+ if (ret < 0) {
+ netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
+ __func__, ret);
+ mse->stats.xfer_err++;
+ }
+
+ return ret;
+}
+
+static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff,
+ unsigned int frame_len)
+{
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+ struct spi_transfer *xfer = &mses->spi_xfer;
+ struct spi_message *msg = &mses->spi_msg;
+ __be16 *sof = (__be16 *)buff;
+ __be16 *dft = (__be16 *)(buff + DET_SOF_LEN + frame_len);
+ int ret;
+
+ xfer->rx_buf = buff;
+ xfer->tx_buf = NULL;
+ xfer->len = DET_SOF_LEN + frame_len + DET_DFT_LEN;
+
+ ret = spi_sync(mses->spidev, msg);
+ if (ret < 0) {
+ netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
+ __func__, ret);
+ mse->stats.xfer_err++;
+ } else if (*sof != cpu_to_be16(DET_SOF)) {
+ netdev_dbg(mse->ndev, "%s: SPI start of frame is invalid (0x%04x)\n",
+ __func__, *sof);
+ mse->stats.invalid_sof++;
+ ret = -EIO;
+ } else if (*dft != cpu_to_be16(DET_DFT)) {
+ netdev_dbg(mse->ndev, "%s: SPI frame tail is invalid (0x%04x)\n",
+ __func__, *dft);
+ mse->stats.invalid_dft++;
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static void mse102x_dump_packet(const char *msg, int len, const char *data)
+{
+ printk(KERN_DEBUG ": %s - packet len:%d\n", msg, len);
+ print_hex_dump(KERN_DEBUG, "pk data: ", DUMP_PREFIX_OFFSET, 16, 1,
+ data, len, true);
+}
+
+static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
+{
+ struct sk_buff *skb;
+ unsigned int rxalign;
+ unsigned int rxlen;
+ __be16 rx = 0;
+ u16 cmd_resp;
+ u8 *rxpkt;
+ int ret;
+
+ mse102x_tx_cmd_spi(mse, CMD_CTR);
+ ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
+ cmd_resp = be16_to_cpu(rx);
+
+ if (ret || ((cmd_resp & CMD_MASK) != CMD_RTS)) {
+ usleep_range(50, 100);
+
+ mse102x_tx_cmd_spi(mse, CMD_CTR);
+ ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
+ if (ret)
+ return;
+
+ cmd_resp = be16_to_cpu(rx);
+ if ((cmd_resp & CMD_MASK) != CMD_RTS) {
+ net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
+ __func__, cmd_resp);
+ mse->stats.invalid_rts++;
+ return;
+ }
+
+ net_dbg_ratelimited("%s: Unexpected response to first CMD\n",
+ __func__);
+ }
+
+ rxlen = cmd_resp & LEN_MASK;
+ if (!rxlen) {
+ net_dbg_ratelimited("%s: No frame length defined\n", __func__);
+ mse->stats.invalid_len++;
+ return;
+ }
+
+ rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4);
+ skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign);
+ if (!skb)
+ return;
+
+ /* 2 bytes Start of frame (before ethernet header)
+ * 2 bytes Data frame tail (after ethernet frame)
+ * They are copied, but ignored.
+ */
+ rxpkt = skb_put(skb, rxlen) - DET_SOF_LEN;
+ if (mse102x_rx_frame_spi(mse, rxpkt, rxlen)) {
+ mse->ndev->stats.rx_errors++;
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ if (netif_msg_pktdata(mse))
+ mse102x_dump_packet(__func__, skb->len, skb->data);
+
+ skb->protocol = eth_type_trans(skb, mse->ndev);
+ netif_rx_ni(skb);
+
+ mse->ndev->stats.rx_packets++;
+ mse->ndev->stats.rx_bytes += rxlen;
+}
+
+static int mse102x_tx_pkt_spi(struct mse102x_net *mse, struct sk_buff *txb,
+ unsigned long work_timeout)
+{
+ unsigned int pad = 0;
+ __be16 rx = 0;
+ u16 cmd_resp;
+ int ret;
+ bool first = true;
+
+ if (txb->len < 60)
+ pad = 60 - txb->len;
+
+ while (1) {
+ mse102x_tx_cmd_spi(mse, CMD_RTS | (txb->len + pad));
+ ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
+ cmd_resp = be16_to_cpu(rx);
+
+ if (!ret) {
+ /* ready to send frame ? */
+ if (cmd_resp == CMD_CTR)
+ break;
+
+ net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
+ __func__, cmd_resp);
+ mse->stats.invalid_ctr++;
+ }
+
+ /* It's not predictable how long / many retries it takes to
+ * send at least one packet, so TX timeouts are possible.
+ * That's the reason why the netdev watchdog is not used here.
+ */
+ if (time_after(jiffies, work_timeout))
+ return -ETIMEDOUT;
+
+ if (first) {
+ /* throttle at first issue */
+ netif_stop_queue(mse->ndev);
+ /* fast retry */
+ usleep_range(50, 100);
+ first = false;
+ } else {
+ msleep(20);
+ }
+ }
+
+ ret = mse102x_tx_frame_spi(mse, txb, pad);
+ if (ret)
+ net_dbg_ratelimited("%s: Failed to send (%d), drop frame\n",
+ __func__, ret);
+
+ return ret;
+}
+
+#define TX_QUEUE_MAX 10
+
+static void mse102x_tx_work(struct work_struct *work)
+{
+ /* Make sure timeout is sufficient to transfer TX_QUEUE_MAX frames */
+ unsigned long work_timeout = jiffies + msecs_to_jiffies(1000);
+ struct mse102x_net_spi *mses;
+ struct mse102x_net *mse;
+ struct sk_buff *txb;
+ int ret = 0;
+
+ mses = container_of(work, struct mse102x_net_spi, tx_work);
+ mse = &mses->mse102x;
+
+ while ((txb = skb_dequeue(&mse->txq))) {
+ mutex_lock(&mses->lock);
+ ret = mse102x_tx_pkt_spi(mse, txb, work_timeout);
+ mutex_unlock(&mses->lock);
+ if (ret) {
+ mse->ndev->stats.tx_dropped++;
+ } else {
+ mse->ndev->stats.tx_bytes += txb->len;
+ mse->ndev->stats.tx_packets++;
+ }
+
+ dev_kfree_skb(txb);
+ }
+
+ if (ret == -ETIMEDOUT) {
+ if (netif_msg_timer(mse))
+ netdev_err(mse->ndev, "tx work timeout\n");
+
+ mse->stats.tx_timeout++;
+ }
+
+ netif_wake_queue(mse->ndev);
+}
+
+static netdev_tx_t mse102x_start_xmit_spi(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct mse102x_net *mse = netdev_priv(ndev);
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+
+ netif_dbg(mse, tx_queued, ndev,
+ "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
+
+ skb_queue_tail(&mse->txq, skb);
+
+ if (skb_queue_len(&mse->txq) >= TX_QUEUE_MAX)
+ netif_stop_queue(ndev);
+
+ schedule_work(&mses->tx_work);
+
+ return NETDEV_TX_OK;
+}
+
+static void mse102x_init_mac(struct mse102x_net *mse, struct device_node *np)
+{
+ struct net_device *ndev = mse->ndev;
+ int ret = of_get_ethdev_address(np, ndev);
+
+ if (ret) {
+ eth_hw_addr_random(ndev);
+ netdev_err(ndev, "Using random MAC address: %pM\n",
+ ndev->dev_addr);
+ }
+}
+
+/* Assumption: this is called for every incoming packet */
+static irqreturn_t mse102x_irq(int irq, void *_mse)
+{
+ struct mse102x_net *mse = _mse;
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+
+ mutex_lock(&mses->lock);
+ mse102x_rx_pkt_spi(mse);
+ mutex_unlock(&mses->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int mse102x_net_open(struct net_device *ndev)
+{
+ struct mse102x_net *mse = netdev_priv(ndev);
+ int ret;
+
+ ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT,
+ ndev->name, mse);
+ if (ret < 0) {
+ netdev_err(ndev, "Failed to get irq: %d\n", ret);
+ return ret;
+ }
+
+ netif_dbg(mse, ifup, ndev, "opening\n");
+
+ netif_start_queue(ndev);
+
+ netif_carrier_on(ndev);
+
+ netif_dbg(mse, ifup, ndev, "network device up\n");
+
+ return 0;
+}
+
+static int mse102x_net_stop(struct net_device *ndev)
+{
+ struct mse102x_net *mse = netdev_priv(ndev);
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+
+ netif_info(mse, ifdown, ndev, "shutting down\n");
+
+ netif_carrier_off(mse->ndev);
+
+ /* stop any outstanding work */
+ flush_work(&mses->tx_work);
+
+ netif_stop_queue(ndev);
+
+ skb_queue_purge(&mse->txq);
+
+ free_irq(ndev->irq, mse);
+
+ return 0;
+}
+
+static const struct net_device_ops mse102x_netdev_ops = {
+ .ndo_open = mse102x_net_open,
+ .ndo_stop = mse102x_net_stop,
+ .ndo_start_xmit = mse102x_start_xmit_spi,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/* ethtool support */
+
+static void mse102x_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *di)
+{
+ strscpy(di->driver, DRV_NAME, sizeof(di->driver));
+ strscpy(di->bus_info, dev_name(ndev->dev.parent), sizeof(di->bus_info));
+}
+
+static u32 mse102x_get_msglevel(struct net_device *ndev)
+{
+ struct mse102x_net *mse = netdev_priv(ndev);
+
+ return mse->msg_enable;
+}
+
+static void mse102x_set_msglevel(struct net_device *ndev, u32 to)
+{
+ struct mse102x_net *mse = netdev_priv(ndev);
+
+ mse->msg_enable = to;
+}
+
+static void mse102x_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *estats, u64 *data)
+{
+ struct mse102x_net *mse = netdev_priv(ndev);
+ struct mse102x_stats *st = &mse->stats;
+
+ memcpy(data, st, ARRAY_SIZE(mse102x_gstrings_stats) * sizeof(u64));
+}
+
+static void mse102x_get_strings(struct net_device *ndev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, &mse102x_gstrings_stats,
+ sizeof(mse102x_gstrings_stats));
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static int mse102x_get_sset_count(struct net_device *ndev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(mse102x_gstrings_stats);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct ethtool_ops mse102x_ethtool_ops = {
+ .get_drvinfo = mse102x_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = mse102x_get_msglevel,
+ .set_msglevel = mse102x_set_msglevel,
+ .get_ethtool_stats = mse102x_get_ethtool_stats,
+ .get_strings = mse102x_get_strings,
+ .get_sset_count = mse102x_get_sset_count,
+};
+
+/* driver bus management functions */
+
+#ifdef CONFIG_PM_SLEEP
+
+static int mse102x_suspend(struct device *dev)
+{
+ struct mse102x_net *mse = dev_get_drvdata(dev);
+ struct net_device *ndev = mse->ndev;
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ mse102x_net_stop(ndev);
+ }
+
+ return 0;
+}
+
+static int mse102x_resume(struct device *dev)
+{
+ struct mse102x_net *mse = dev_get_drvdata(dev);
+ struct net_device *ndev = mse->ndev;
+
+ if (netif_running(ndev)) {
+ mse102x_net_open(ndev);
+ netif_device_attach(ndev);
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mse102x_pm_ops, mse102x_suspend, mse102x_resume);
+
+static int mse102x_probe_spi(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct mse102x_net_spi *mses;
+ struct net_device *ndev;
+ struct mse102x_net *mse;
+ int ret;
+
+ spi->bits_per_word = 8;
+ spi->mode |= SPI_MODE_3;
+ /* enforce minimum speed to ensure device functionality */
+ spi->master->min_speed_hz = MIN_FREQ_HZ;
+
+ if (!spi->max_speed_hz)
+ spi->max_speed_hz = MAX_FREQ_HZ;
+
+ if (spi->max_speed_hz < MIN_FREQ_HZ ||
+ spi->max_speed_hz > MAX_FREQ_HZ) {
+ dev_err(&spi->dev, "SPI max frequency out of range (min: %u, max: %u)\n",
+ MIN_FREQ_HZ, MAX_FREQ_HZ);
+ return -EINVAL;
+ }
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Unable to setup SPI device: %d\n", ret);
+ return ret;
+ }
+
+ ndev = devm_alloc_etherdev(dev, sizeof(struct mse102x_net_spi));
+ if (!ndev)
+ return -ENOMEM;
+
+ ndev->needed_tailroom += ALIGN(DET_DFT_LEN, 4);
+ ndev->needed_headroom += ALIGN(DET_SOF_LEN, 4);
+ ndev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ ndev->tx_queue_len = 100;
+
+ mse = netdev_priv(ndev);
+ mses = to_mse102x_spi(mse);
+
+ mses->spidev = spi;
+ mutex_init(&mses->lock);
+ INIT_WORK(&mses->tx_work, mse102x_tx_work);
+
+ /* initialise pre-made spi transfer messages */
+ spi_message_init(&mses->spi_msg);
+ spi_message_add_tail(&mses->spi_xfer, &mses->spi_msg);
+
+ ndev->irq = spi->irq;
+ mse->ndev = ndev;
+
+ /* set the default message enable */
+ mse->msg_enable = netif_msg_init(-1, MSG_DEFAULT);
+
+ skb_queue_head_init(&mse->txq);
+
+ SET_NETDEV_DEV(ndev, dev);
+
+ dev_set_drvdata(dev, mse);
+
+ netif_carrier_off(mse->ndev);
+ ndev->netdev_ops = &mse102x_netdev_ops;
+ ndev->ethtool_ops = &mse102x_ethtool_ops;
+
+ mse102x_init_mac(mse, dev->of_node);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(dev, "failed to register network device: %d\n", ret);
+ return ret;
+ }
+
+ mse102x_init_device_debugfs(mses);
+
+ return 0;
+}
+
+static int mse102x_remove_spi(struct spi_device *spi)
+{
+ struct mse102x_net *mse = dev_get_drvdata(&spi->dev);
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+
+ if (netif_msg_drv(mse))
+ dev_info(&spi->dev, "remove\n");
+
+ mse102x_remove_device_debugfs(mses);
+ unregister_netdev(mse->ndev);
+
+ return 0;
+}
+
+static const struct of_device_id mse102x_match_table[] = {
+ { .compatible = "vertexcom,mse1021" },
+ { .compatible = "vertexcom,mse1022" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mse102x_match_table);
+
+static struct spi_driver mse102x_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = mse102x_match_table,
+ .pm = &mse102x_pm_ops,
+ },
+ .probe = mse102x_probe_spi,
+ .remove = mse102x_remove_spi,
+};
+module_spi_driver(mse102x_driver);
+
+MODULE_DESCRIPTION("MSE102x Network driver");
+MODULE_AUTHOR("Stefan Wahren <stefan.wahren@in-tech.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index e7065c9a8e38..b900ab5aef2a 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1276,8 +1276,11 @@ static const struct attribute_group temac_attr_group = {
* ethtool support
*/
-static void ll_temac_ethtools_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+static void
+ll_temac_ethtools_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct temac_local *lp = netdev_priv(ndev);
@@ -1291,8 +1294,11 @@ static void ll_temac_ethtools_get_ringparam(struct net_device *ndev,
ering->tx_pending = lp->tx_bd_num;
}
-static int ll_temac_ethtools_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+static int
+ll_temac_ethtools_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct temac_local *lp = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 9b068b81ae09..377c94ec2486 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -41,8 +41,9 @@
#include "xilinx_axienet.h"
/* Descriptors defines for Tx and Rx DMA */
-#define TX_BD_NUM_DEFAULT 64
+#define TX_BD_NUM_DEFAULT 128
#define RX_BD_NUM_DEFAULT 1024
+#define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
#define TX_BD_NUM_MAX 4096
#define RX_BD_NUM_MAX 4096
@@ -496,7 +497,8 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
static int __axienet_device_reset(struct axienet_local *lp)
{
- u32 timeout;
+ u32 value;
+ int ret;
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* process of Axi DMA takes a while to complete as all pending
@@ -506,15 +508,23 @@ static int __axienet_device_reset(struct axienet_local *lp)
* they both reset the entire DMA core, so only one needs to be used.
*/
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
- timeout = DELAY_OF_ONE_MILLISEC;
- while (axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET) &
- XAXIDMA_CR_RESET_MASK) {
- udelay(1);
- if (--timeout == 0) {
- netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
- __func__);
- return -ETIMEDOUT;
- }
+ ret = read_poll_timeout(axienet_dma_in32, value,
+ !(value & XAXIDMA_CR_RESET_MASK),
+ DELAY_OF_ONE_MILLISEC, 50000, false, lp,
+ XAXIDMA_TX_CR_OFFSET);
+ if (ret) {
+ dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
+ return ret;
+ }
+
+ /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
+ ret = read_poll_timeout(axienet_ior, value,
+ value & XAE_INT_PHYRSTCMPLT_MASK,
+ DELAY_OF_ONE_MILLISEC, 50000, false, lp,
+ XAE_IS_OFFSET);
+ if (ret) {
+ dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
+ return ret;
}
return 0;
@@ -623,6 +633,8 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
break;
+ /* Ensure we see complete descriptor update */
+ dma_rmb();
phys = desc_get_phys_addr(lp, cur_p);
dma_unmap_single(ndev->dev.parent, phys,
(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
@@ -631,13 +643,15 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
dev_consume_skb_irq(cur_p->skb);
- cur_p->cntrl = 0;
cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
cur_p->app4 = 0;
- cur_p->status = 0;
cur_p->skb = NULL;
+ /* ensure our transmit path and device don't prematurely see status cleared */
+ wmb();
+ cur_p->cntrl = 0;
+ cur_p->status = 0;
if (sizep)
*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
@@ -647,6 +661,32 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
}
/**
+ * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
+ * @lp: Pointer to the axienet_local structure
+ * @num_frag: The number of BDs to check for
+ *
+ * Return: 0, on success
+ * NETDEV_TX_BUSY, if any of the descriptors are not free
+ *
+ * This function is invoked before BDs are allocated and transmission starts.
+ * This function returns 0 if a BD or group of BDs can be allocated for
+ * transmission. If the BD or any of the BDs are not free the function
+ * returns a busy status. This is invoked from axienet_start_xmit.
+ */
+static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
+ int num_frag)
+{
+ struct axidma_bd *cur_p;
+
+ /* Ensure we see all descriptor updates from device or TX IRQ path */
+ rmb();
+ cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
+ if (cur_p->cntrl)
+ return NETDEV_TX_BUSY;
+ return 0;
+}
+
+/**
* axienet_start_xmit_done - Invoked once a transmit is completed by the
* Axi DMA Tx channel.
* @ndev: Pointer to the net_device structure
@@ -675,30 +715,8 @@ static void axienet_start_xmit_done(struct net_device *ndev)
/* Matches barrier in axienet_start_xmit */
smp_mb();
- netif_wake_queue(ndev);
-}
-
-/**
- * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
- * @lp: Pointer to the axienet_local structure
- * @num_frag: The number of BDs to check for
- *
- * Return: 0, on success
- * NETDEV_TX_BUSY, if any of the descriptors are not free
- *
- * This function is invoked before BDs are allocated and transmission starts.
- * This function returns 0 if a BD or group of BDs can be allocated for
- * transmission. If the BD or any of the BDs are not free the function
- * returns a busy status. This is invoked from axienet_start_xmit.
- */
-static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
- int num_frag)
-{
- struct axidma_bd *cur_p;
- cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
- if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
- return NETDEV_TX_BUSY;
- return 0;
+ if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
+ netif_wake_queue(ndev);
}
/**
@@ -730,20 +748,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
num_frag = skb_shinfo(skb)->nr_frags;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
- if (axienet_check_tx_bd_space(lp, num_frag)) {
- if (netif_queue_stopped(ndev))
- return NETDEV_TX_BUSY;
-
+ if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
+ /* Should not happen as last start_xmit call should have
+ * checked for sufficient space and queue should only be
+ * woken when sufficient space is available.
+ */
netif_stop_queue(ndev);
-
- /* Matches barrier in axienet_start_xmit_done */
- smp_mb();
-
- /* Space might have just been freed - check again */
- if (axienet_check_tx_bd_space(lp, num_frag))
- return NETDEV_TX_BUSY;
-
- netif_wake_queue(ndev);
+ if (net_ratelimit())
+ netdev_warn(ndev, "TX ring unexpectedly full\n");
+ return NETDEV_TX_BUSY;
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -804,6 +817,18 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (++lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0;
+ /* Stop queue if next transmit may not have space */
+ if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
+ netif_stop_queue(ndev);
+
+ /* Matches barrier in axienet_start_xmit_done */
+ smp_mb();
+
+ /* Space might have just been freed - check again */
+ if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
+ netif_wake_queue(ndev);
+ }
+
return NETDEV_TX_OK;
}
@@ -834,6 +859,8 @@ static void axienet_recv(struct net_device *ndev)
tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
+ /* Ensure we see complete descriptor update */
+ dma_rmb();
phys = desc_get_phys_addr(lp, cur_p);
dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
DMA_FROM_DEVICE);
@@ -1323,8 +1350,11 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
}
-static void axienet_ethtools_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+static void
+axienet_ethtools_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
@@ -1338,15 +1368,19 @@ static void axienet_ethtools_get_ringparam(struct net_device *ndev,
ering->tx_pending = lp->tx_bd_num;
}
-static int axienet_ethtools_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+static int
+axienet_ethtools_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
if (ering->rx_pending > RX_BD_NUM_MAX ||
ering->rx_mini_pending ||
ering->rx_jumbo_pending ||
- ering->rx_pending > TX_BD_NUM_MAX)
+ ering->tx_pending < TX_BD_NUM_MIN ||
+ ering->tx_pending > TX_BD_NUM_MAX)
return -EINVAL;
if (netif_running(ndev))
@@ -1503,65 +1537,6 @@ static const struct ethtool_ops axienet_ethtool_ops = {
.nway_reset = axienet_ethtools_nway_reset,
};
-static void axienet_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct net_device *ndev = to_net_dev(config->dev);
- struct axienet_local *lp = netdev_priv(ndev);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- /* Only support the mode we are configured for */
- switch (state->interface) {
- case PHY_INTERFACE_MODE_NA:
- break;
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_SGMII:
- if (lp->switch_x_sgmii)
- break;
- fallthrough;
- default:
- if (state->interface != lp->phy_mode) {
- netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n",
- phy_modes(state->interface),
- phy_modes(lp->phy_mode));
- linkmode_zero(supported);
- return;
- }
- }
-
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
-
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, Pause);
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_NA:
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_GMII:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 1000baseT_Full);
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
- break;
- fallthrough;
- case PHY_INTERFACE_MODE_MII:
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 10baseT_Full);
- fallthrough;
- default:
- break;
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
static void axienet_mac_pcs_get_state(struct phylink_config *config,
struct phylink_link_state *state)
{
@@ -1687,7 +1662,7 @@ static void axienet_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops axienet_phylink_ops = {
- .validate = axienet_validate,
+ .validate = phylink_generic_validate,
.mac_pcs_get_state = axienet_mac_pcs_get_state,
.mac_an_restart = axienet_mac_an_restart,
.mac_prepare = axienet_mac_prepare,
@@ -2080,6 +2055,11 @@ static int axienet_probe(struct platform_device *pdev)
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
+ /* Reset core now that clocks are enabled, prior to accessing MDIO */
+ ret = __axienet_device_reset(lp);
+ if (ret)
+ goto cleanup_clk;
+
lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (lp->phy_node) {
ret = axienet_mdio_setup(lp);
@@ -2104,6 +2084,17 @@ static int axienet_probe(struct platform_device *pdev)
lp->phylink_config.dev = &ndev->dev;
lp->phylink_config.type = PHYLINK_NETDEV;
+ lp->phylink_config.legacy_pre_march2020 = true;
+ lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10FD | MAC_100FD | MAC_1000FD;
+
+ __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
+ if (lp->switch_x_sgmii) {
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ lp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ lp->phylink_config.supported_interfaces);
+ }
lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
lp->phy_mode,
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 0815de581c7f..519599480b15 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1133,14 +1133,11 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
lp->ndev = ndev;
/* Get IRQ for the device */
- res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev, "no IRQ found\n");
- rc = -ENXIO;
+ rc = platform_get_irq(ofdev, 0);
+ if (rc < 0)
goto error;
- }
- ndev->irq = res->start;
+ ndev->irq = rc;
res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
lp->base_addr = devm_ioremap_resource(&ofdev->dev, res);
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 65fdad1107fc..df77a22d1b81 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -382,9 +382,6 @@ static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
- if (cfg.flags) /* reserved for future extensions */
- return -EINVAL;
-
ret = ixp46x_ptp_find(&port->timesync_regs, &port->phc_index);
if (ret)
return ret;
diff --git a/drivers/net/fddi/skfp/hwmtm.c b/drivers/net/fddi/skfp/hwmtm.c
index 107039056511..145767d98445 100644
--- a/drivers/net/fddi/skfp/hwmtm.c
+++ b/drivers/net/fddi/skfp/hwmtm.c
@@ -38,10 +38,10 @@
-------------------------------------------------------------
*/
#ifdef COMMON_MB_POOL
-static SMbuf *mb_start = 0 ;
-static SMbuf *mb_free = 0 ;
+static SMbuf *mb_start;
+static SMbuf *mb_free;
static int mb_init = FALSE ;
-static int call_count = 0 ;
+static int call_count;
#endif
/*
diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c
index 6b68a53f1b38..72c31f0013ad 100644
--- a/drivers/net/fddi/skfp/smt.c
+++ b/drivers/net/fddi/skfp/smt.c
@@ -1846,10 +1846,10 @@ void smt_swap_para(struct smt_header *sm, int len, int direction)
}
}
+
static void smt_string_swap(char *data, const char *format, int len)
{
const char *open_paren = NULL ;
- int x ;
while (len > 0 && *format) {
switch (*format) {
@@ -1876,19 +1876,13 @@ static void smt_string_swap(char *data, const char *format, int len)
len-- ;
break ;
case 's' :
- x = data[0] ;
- data[0] = data[1] ;
- data[1] = x ;
+ swap(data[0], data[1]) ;
data += 2 ;
len -= 2 ;
break ;
case 'l' :
- x = data[0] ;
- data[0] = data[3] ;
- data[3] = x ;
- x = data[1] ;
- data[1] = data[2] ;
- data[2] = x ;
+ swap(data[0], data[3]) ;
+ swap(data[1], data[2]) ;
data += 4 ;
len -= 4 ;
break ;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 1ab94b5f9bbf..c1fdd721a730 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -17,6 +17,7 @@
#include <net/gro_cells.h>
#include <net/rtnetlink.h>
#include <net/geneve.h>
+#include <net/gro.h>
#include <net/protocol.h>
#define GENEVE_NETDEV_VER "0.6"
@@ -516,18 +517,15 @@ static struct sk_buff *geneve_gro_receive(struct sock *sk,
type = gh->proto_type;
- rcu_read_lock();
ptype = gro_find_receive_by_type(type);
if (!ptype)
- goto out_unlock;
+ goto out;
skb_gro_pull(skb, gh_len);
skb_gro_postpull_rcsum(skb, gh, gh_len);
pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
flush = 0;
-out_unlock:
- rcu_read_unlock();
out:
skb_gro_flush_final(skb, pp, flush);
@@ -547,13 +545,10 @@ static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
gh_len = geneve_hlen(gh);
type = gh->proto_type;
- rcu_read_lock();
ptype = gro_find_complete_by_type(type);
if (ptype)
err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
- rcu_read_unlock();
-
skb_set_inner_mac_header(skb, nhoff + gh_len);
return err;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 8a19a06b505d..b1fc153125d9 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -681,8 +681,8 @@ static void sixpack_close(struct tty_struct *tty)
}
/* Perform I/O control on an active 6pack channel. */
-static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int sixpack_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
{
struct sixpack *sp = sp_get(tty);
struct net_device *dev;
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index b0edb91bb10a..8297411e87ea 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -30,6 +30,7 @@
/*****************************************************************************/
#include <linux/capability.h>
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/net.h>
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index edde9c3ae12b..c251e04ae047 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -806,8 +806,8 @@ static void mkiss_close(struct tty_struct *tty)
}
/* Perform I/O control on an active ax25 channel. */
-static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int mkiss_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
{
struct mkiss *ax = mkiss_get(tty);
struct net_device *dev;
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 3d59dac063ac..f90830d3dfa6 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -148,6 +148,7 @@
/* ----------------------------------------------------------------------- */
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/signal.h>
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 6376b8485976..980f2be32f05 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -950,9 +950,7 @@ static int yam_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __
ym = memdup_user(data, sizeof(struct yamdrv_ioctl_mcs));
if (IS_ERR(ym))
return PTR_ERR(ym);
- if (ym->cmd != SIOCYAMSMCS)
- return -EINVAL;
- if (ym->bitrate > YAM_MAXBITRATE) {
+ if (ym->cmd != SIOCYAMSMCS || ym->bitrate > YAM_MAXBITRATE) {
kfree(ym);
return -EINVAL;
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 315278a7cf88..cf69da0e296c 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -164,6 +164,7 @@ struct hv_netvsc_packet {
u32 total_bytes;
u32 send_buf_index;
u32 total_data_buflen;
+ struct hv_dma_range *dma_range;
};
#define NETVSC_HASH_KEYLEN 40
@@ -1074,6 +1075,7 @@ struct netvsc_device {
/* Receive buffer allocated by us but manages by NetVSP */
void *recv_buf;
+ void *recv_original_buf;
u32 recv_buf_size; /* allocated bytes */
struct vmbus_gpadl recv_buf_gpadl_handle;
u32 recv_section_cnt;
@@ -1082,6 +1084,7 @@ struct netvsc_device {
/* Send buffer allocated by us */
void *send_buf;
+ void *send_original_buf;
u32 send_buf_size;
struct vmbus_gpadl send_buf_gpadl_handle;
u32 send_section_cnt;
@@ -1731,4 +1734,6 @@ struct rndis_message {
#define RETRY_US_HI 10000
#define RETRY_MAX 2000 /* >10 sec */
+void netvsc_dma_unmap(struct hv_device *hv_dev,
+ struct hv_netvsc_packet *packet);
#endif /* _HYPERV_NET_H */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 396bc1c204e6..afa81a9480cc 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -153,9 +153,22 @@ static void free_netvsc_device(struct rcu_head *head)
int i;
kfree(nvdev->extension);
- vfree(nvdev->recv_buf);
- vfree(nvdev->send_buf);
- kfree(nvdev->send_section_map);
+
+ if (nvdev->recv_original_buf) {
+ hv_unmap_memory(nvdev->recv_buf);
+ vfree(nvdev->recv_original_buf);
+ } else {
+ vfree(nvdev->recv_buf);
+ }
+
+ if (nvdev->send_original_buf) {
+ hv_unmap_memory(nvdev->send_buf);
+ vfree(nvdev->send_original_buf);
+ } else {
+ vfree(nvdev->send_buf);
+ }
+
+ bitmap_free(nvdev->send_section_map);
for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
@@ -336,8 +349,8 @@ static int netvsc_init_buf(struct hv_device *device,
struct net_device *ndev = hv_get_drvdata(device);
struct nvsp_message *init_packet;
unsigned int buf_size;
- size_t map_words;
int i, ret = 0;
+ void *vaddr;
/* Get receive buffer area. */
buf_size = device_info->recv_sections * device_info->recv_section_size;
@@ -373,6 +386,17 @@ static int netvsc_init_buf(struct hv_device *device,
goto cleanup;
}
+ if (hv_isolation_type_snp()) {
+ vaddr = hv_map_memory(net_device->recv_buf, buf_size);
+ if (!vaddr) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ net_device->recv_original_buf = net_device->recv_buf;
+ net_device->recv_buf = vaddr;
+ }
+
/* Notify the NetVsp of the gpadl handle */
init_packet = &net_device->channel_init_pkt;
memset(init_packet, 0, sizeof(struct nvsp_message));
@@ -476,6 +500,17 @@ static int netvsc_init_buf(struct hv_device *device,
goto cleanup;
}
+ if (hv_isolation_type_snp()) {
+ vaddr = hv_map_memory(net_device->send_buf, buf_size);
+ if (!vaddr) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ net_device->send_original_buf = net_device->send_buf;
+ net_device->send_buf = vaddr;
+ }
+
/* Notify the NetVsp of the gpadl handle */
init_packet = &net_device->channel_init_pkt;
memset(init_packet, 0, sizeof(struct nvsp_message));
@@ -528,10 +563,9 @@ static int netvsc_init_buf(struct hv_device *device,
net_device->send_section_size, net_device->send_section_cnt);
/* Setup state for managing the send buffer. */
- map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
-
- net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
- if (net_device->send_section_map == NULL) {
+ net_device->send_section_map = bitmap_zalloc(net_device->send_section_cnt,
+ GFP_KERNEL);
+ if (!net_device->send_section_map) {
ret = -ENOMEM;
goto cleanup;
}
@@ -766,7 +800,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
/* Notify the layer above us */
if (likely(skb)) {
- const struct hv_netvsc_packet *packet
+ struct hv_netvsc_packet *packet
= (struct hv_netvsc_packet *)skb->cb;
u32 send_index = packet->send_buf_index;
struct netvsc_stats *tx_stats;
@@ -782,6 +816,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
tx_stats->bytes += packet->total_bytes;
u64_stats_update_end(&tx_stats->syncp);
+ netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
napi_consume_skb(skb, budget);
}
@@ -946,6 +981,88 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
memset(dest, 0, padding);
}
+void netvsc_dma_unmap(struct hv_device *hv_dev,
+ struct hv_netvsc_packet *packet)
+{
+ u32 page_count = packet->cp_partial ?
+ packet->page_buf_cnt - packet->rmsg_pgcnt :
+ packet->page_buf_cnt;
+ int i;
+
+ if (!hv_is_isolation_supported())
+ return;
+
+ if (!packet->dma_range)
+ return;
+
+ for (i = 0; i < page_count; i++)
+ dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma,
+ packet->dma_range[i].mapping_size,
+ DMA_TO_DEVICE);
+
+ kfree(packet->dma_range);
+}
+
+/* netvsc_dma_map - Map swiotlb bounce buffer with data page of
+ * packet sent by vmbus_sendpacket_pagebuffer() in the Isolation
+ * VM.
+ *
+ * In isolation VM, netvsc send buffer has been marked visible to
+ * host and so the data copied to send buffer doesn't need to use
+ * bounce buffer. The data pages handled by vmbus_sendpacket_pagebuffer()
+ * may not be copied to send buffer and so these pages need to be
+ * mapped with swiotlb bounce buffer. netvsc_dma_map() is to do
+ * that. The pfns in the struct hv_page_buffer need to be converted
+ * to bounce buffer's pfn. The loop here is necessary because the
+ * entries in the page buffer array are not necessarily full
+ * pages of data. Each entry in the array has a separate offset and
+ * len that may be non-zero, even for entries in the middle of the
+ * array. And the entries are not physically contiguous. So each
+ * entry must be individually mapped rather than as a contiguous unit.
+ * So not use dma_map_sg() here.
+ */
+static int netvsc_dma_map(struct hv_device *hv_dev,
+ struct hv_netvsc_packet *packet,
+ struct hv_page_buffer *pb)
+{
+ u32 page_count = packet->cp_partial ?
+ packet->page_buf_cnt - packet->rmsg_pgcnt :
+ packet->page_buf_cnt;
+ dma_addr_t dma;
+ int i;
+
+ if (!hv_is_isolation_supported())
+ return 0;
+
+ packet->dma_range = kcalloc(page_count,
+ sizeof(*packet->dma_range),
+ GFP_KERNEL);
+ if (!packet->dma_range)
+ return -ENOMEM;
+
+ for (i = 0; i < page_count; i++) {
+ char *src = phys_to_virt((pb[i].pfn << HV_HYP_PAGE_SHIFT)
+ + pb[i].offset);
+ u32 len = pb[i].len;
+
+ dma = dma_map_single(&hv_dev->device, src, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&hv_dev->device, dma)) {
+ kfree(packet->dma_range);
+ return -ENOMEM;
+ }
+
+ /* pb[].offset and pb[].len are not changed during dma mapping
+ * and so not reassign.
+ */
+ packet->dma_range[i].dma = dma;
+ packet->dma_range[i].mapping_size = len;
+ pb[i].pfn = dma >> HV_HYP_PAGE_SHIFT;
+ }
+
+ return 0;
+}
+
static inline int netvsc_send_pkt(
struct hv_device *device,
struct hv_netvsc_packet *packet,
@@ -986,14 +1103,24 @@ static inline int netvsc_send_pkt(
trace_nvsp_send_pkt(ndev, out_channel, rpkt);
+ packet->dma_range = NULL;
if (packet->page_buf_cnt) {
if (packet->cp_partial)
pb += packet->rmsg_pgcnt;
+ ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
+ if (ret) {
+ ret = -EAGAIN;
+ goto exit;
+ }
+
ret = vmbus_sendpacket_pagebuffer(out_channel,
pb, packet->page_buf_cnt,
&nvmsg, sizeof(nvmsg),
req_id);
+
+ if (ret)
+ netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
} else {
ret = vmbus_sendpacket(out_channel,
&nvmsg, sizeof(nvmsg),
@@ -1001,6 +1128,7 @@ static inline int netvsc_send_pkt(
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
}
+exit:
if (ret == 0) {
atomic_inc_return(&nvchan->queue_sends);
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
index aa877da113f8..7856905414eb 100644
--- a/drivers/net/hyperv/netvsc_bpf.c
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -68,7 +68,7 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(ndev, prog, act);
}
out:
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 7e66ae1d2a59..3646469433b1 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1858,7 +1858,9 @@ static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
}
static void netvsc_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct net_device_context *ndevctx = netdev_priv(ndev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
@@ -1870,7 +1872,9 @@ static void netvsc_get_ringparam(struct net_device *ndev,
}
static int netvsc_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct net_device_context *ndevctx = netdev_priv(ndev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
@@ -2512,6 +2516,7 @@ static int netvsc_probe(struct hv_device *dev,
net->netdev_ops = &device_ops;
net->ethtool_ops = &ethtool_ops;
SET_NETDEV_DEV(net, &dev->device);
+ dma_set_min_align_mask(&dev->device, HV_HYP_PAGE_SIZE - 1);
/* We always need headroom for rndis header */
net->needed_headroom = RNDIS_AND_PPI_SIZE;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index f6c9c2a670f9..448fcc325ed7 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -361,6 +361,8 @@ static void rndis_filter_receive_response(struct net_device *ndev,
}
}
+ netvsc_dma_unmap(((struct net_device_context *)
+ netdev_priv(ndev))->device_ctx, &request->pkt);
complete(&request->wait_event);
} else {
netdev_err(ndev,
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 31f522b8e54e..1c64d5347b8e 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -36,30 +37,55 @@
#include <net/net_namespace.h>
#define TX_Q_LIMIT 32
+
+struct ifb_q_stats {
+ u64 packets;
+ u64 bytes;
+ struct u64_stats_sync sync;
+};
+
struct ifb_q_private {
struct net_device *dev;
struct tasklet_struct ifb_tasklet;
int tasklet_pending;
int txqnum;
struct sk_buff_head rq;
- u64 rx_packets;
- u64 rx_bytes;
- struct u64_stats_sync rsync;
-
- struct u64_stats_sync tsync;
- u64 tx_packets;
- u64 tx_bytes;
struct sk_buff_head tq;
+ struct ifb_q_stats rx_stats;
+ struct ifb_q_stats tx_stats;
} ____cacheline_aligned_in_smp;
struct ifb_dev_private {
struct ifb_q_private *tx_private;
};
+/* For ethtools stats. */
+struct ifb_q_stats_desc {
+ char desc[ETH_GSTRING_LEN];
+ size_t offset;
+};
+
+#define IFB_Q_STAT(m) offsetof(struct ifb_q_stats, m)
+
+static const struct ifb_q_stats_desc ifb_q_stats_desc[] = {
+ { "packets", IFB_Q_STAT(packets) },
+ { "bytes", IFB_Q_STAT(bytes) },
+};
+
+#define IFB_Q_STATS_LEN ARRAY_SIZE(ifb_q_stats_desc)
+
static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
static int ifb_open(struct net_device *dev);
static int ifb_close(struct net_device *dev);
+static void ifb_update_q_stats(struct ifb_q_stats *stats, int len)
+{
+ u64_stats_update_begin(&stats->sync);
+ stats->packets++;
+ stats->bytes += len;
+ u64_stats_update_end(&stats->sync);
+}
+
static void ifb_ri_tasklet(struct tasklet_struct *t)
{
struct ifb_q_private *txp = from_tasklet(txp, t, ifb_tasklet);
@@ -83,10 +109,7 @@ static void ifb_ri_tasklet(struct tasklet_struct *t)
#endif
nf_skip_egress(skb, true);
- u64_stats_update_begin(&txp->tsync);
- txp->tx_packets++;
- txp->tx_bytes += skb->len;
- u64_stats_update_end(&txp->tsync);
+ ifb_update_q_stats(&txp->tx_stats, skb->len);
rcu_read_lock();
skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
@@ -139,18 +162,18 @@ static void ifb_stats64(struct net_device *dev,
for (i = 0; i < dev->num_tx_queues; i++,txp++) {
do {
- start = u64_stats_fetch_begin_irq(&txp->rsync);
- packets = txp->rx_packets;
- bytes = txp->rx_bytes;
- } while (u64_stats_fetch_retry_irq(&txp->rsync, start));
+ start = u64_stats_fetch_begin_irq(&txp->rx_stats.sync);
+ packets = txp->rx_stats.packets;
+ bytes = txp->rx_stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&txp->rx_stats.sync, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
do {
- start = u64_stats_fetch_begin_irq(&txp->tsync);
- packets = txp->tx_packets;
- bytes = txp->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&txp->tsync, start));
+ start = u64_stats_fetch_begin_irq(&txp->tx_stats.sync);
+ packets = txp->tx_stats.packets;
+ bytes = txp->tx_stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&txp->tx_stats.sync, start));
stats->tx_packets += packets;
stats->tx_bytes += bytes;
}
@@ -173,14 +196,83 @@ static int ifb_dev_init(struct net_device *dev)
txp->dev = dev;
__skb_queue_head_init(&txp->rq);
__skb_queue_head_init(&txp->tq);
- u64_stats_init(&txp->rsync);
- u64_stats_init(&txp->tsync);
+ u64_stats_init(&txp->rx_stats.sync);
+ u64_stats_init(&txp->tx_stats.sync);
tasklet_setup(&txp->ifb_tasklet, ifb_ri_tasklet);
netif_tx_start_queue(netdev_get_tx_queue(dev, i));
}
return 0;
}
+static void ifb_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ u8 *p = buf;
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < dev->real_num_rx_queues; i++)
+ for (j = 0; j < IFB_Q_STATS_LEN; j++)
+ ethtool_sprintf(&p, "rx_queue_%u_%.18s",
+ i, ifb_q_stats_desc[j].desc);
+
+ for (i = 0; i < dev->real_num_tx_queues; i++)
+ for (j = 0; j < IFB_Q_STATS_LEN; j++)
+ ethtool_sprintf(&p, "tx_queue_%u_%.18s",
+ i, ifb_q_stats_desc[j].desc);
+
+ break;
+ }
+}
+
+static int ifb_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return IFB_Q_STATS_LEN * (dev->real_num_rx_queues +
+ dev->real_num_tx_queues);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void ifb_fill_stats_data(u64 **data,
+ struct ifb_q_stats *q_stats)
+{
+ void *stats_base = (void *)q_stats;
+ unsigned int start;
+ size_t offset;
+ int j;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&q_stats->sync);
+ for (j = 0; j < IFB_Q_STATS_LEN; j++) {
+ offset = ifb_q_stats_desc[j].offset;
+ (*data)[j] = *(u64 *)(stats_base + offset);
+ }
+ } while (u64_stats_fetch_retry_irq(&q_stats->sync, start));
+
+ *data += IFB_Q_STATS_LEN;
+}
+
+static void ifb_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ifb_dev_private *dp = netdev_priv(dev);
+ struct ifb_q_private *txp;
+ int i;
+
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ txp = dp->tx_private + i;
+ ifb_fill_stats_data(&data, &txp->rx_stats);
+ }
+
+ for (i = 0; i < dev->real_num_tx_queues; i++) {
+ txp = dp->tx_private + i;
+ ifb_fill_stats_data(&data, &txp->tx_stats);
+ }
+}
+
static const struct net_device_ops ifb_netdev_ops = {
.ndo_open = ifb_open,
.ndo_stop = ifb_close,
@@ -190,6 +282,12 @@ static const struct net_device_ops ifb_netdev_ops = {
.ndo_init = ifb_dev_init,
};
+static const struct ethtool_ops ifb_ethtool_ops = {
+ .get_strings = ifb_get_strings,
+ .get_sset_count = ifb_get_sset_count,
+ .get_ethtool_stats = ifb_get_ethtool_stats,
+};
+
#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
@@ -213,6 +311,7 @@ static void ifb_setup(struct net_device *dev)
{
/* Initialize the device structure. */
dev->netdev_ops = &ifb_netdev_ops;
+ dev->ethtool_ops = &ifb_ethtool_ops;
/* Fill in device structure with ethernet-generic values. */
ether_setup(dev);
@@ -241,10 +340,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
struct ifb_dev_private *dp = netdev_priv(dev);
struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
- u64_stats_update_begin(&txp->rsync);
- txp->rx_packets++;
- txp->rx_bytes += skb->len;
- u64_stats_update_end(&txp->rsync);
+ ifb_update_q_stats(&txp->rx_stats, skb->len);
if (!skb->redirected || !skb->skb_iif) {
dev_kfree_skb(skb);
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index a2fcdb1abdb9..bc981043cc80 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -93,6 +93,7 @@
#define GSI_CHANNEL_STOP_RETRIES 10
#define GSI_CHANNEL_MODEM_HALT_RETRIES 10
+#define GSI_CHANNEL_MODEM_FLOW_RETRIES 5 /* disable flow control only */
#define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
#define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */
@@ -339,10 +340,10 @@ static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
* completion to be signaled. Returns true if the command completes
* or false if it times out.
*/
-static bool
-gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
+static bool gsi_command(struct gsi *gsi, u32 reg, u32 val)
{
unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
+ struct completion *completion = &gsi->completion;
reinit_completion(completion);
@@ -366,8 +367,6 @@ gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
enum gsi_evt_cmd_opcode opcode)
{
- struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- struct completion *completion = &evt_ring->completion;
struct device *dev = gsi->dev;
bool timeout;
u32 val;
@@ -378,7 +377,7 @@ static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
- timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val);
gsi_irq_ev_ctrl_disable(gsi);
@@ -478,7 +477,6 @@ static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
static void
gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
{
- struct completion *completion = &channel->completion;
u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi;
struct device *dev = gsi->dev;
@@ -490,7 +488,7 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
val = u32_encode_bits(channel_id, CH_CHID_FMASK);
val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
- timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val);
gsi_irq_ch_ctrl_disable(gsi);
@@ -1074,13 +1072,10 @@ static void gsi_isr_chan_ctrl(struct gsi *gsi)
while (channel_mask) {
u32 channel_id = __ffs(channel_mask);
- struct gsi_channel *channel;
channel_mask ^= BIT(channel_id);
- channel = &gsi->channel[channel_id];
-
- complete(&channel->completion);
+ complete(&gsi->completion);
}
}
@@ -1094,13 +1089,10 @@ static void gsi_isr_evt_ctrl(struct gsi *gsi)
while (event_mask) {
u32 evt_ring_id = __ffs(event_mask);
- struct gsi_evt_ring *evt_ring;
event_mask ^= BIT(evt_ring_id);
- evt_ring = &gsi->evt_ring[evt_ring_id];
-
- complete(&evt_ring->completion);
+ complete(&gsi->completion);
}
}
@@ -1110,7 +1102,7 @@ gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
{
if (code == GSI_OUT_OF_RESOURCES) {
dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
- complete(&gsi->channel[channel_id].completion);
+ complete(&gsi->completion);
return;
}
@@ -1127,7 +1119,7 @@ gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
u32 channel_id = gsi_channel_id(evt_ring->channel);
- complete(&evt_ring->completion);
+ complete(&gsi->completion);
dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
channel_id);
return;
@@ -1171,18 +1163,23 @@ static void gsi_isr_gp_int1(struct gsi *gsi)
u32 result;
u32 val;
- /* This interrupt is used to handle completions of the two GENERIC
- * GSI commands. We use these to allocate and halt channels on
- * the modem's behalf due to a hardware quirk on IPA v4.2. Once
- * allocated, the modem "owns" these channels, and as a result we
- * have no way of knowing the channel's state at any given time.
+ /* This interrupt is used to handle completions of GENERIC GSI
+ * commands. We use these to allocate and halt channels on the
+ * modem's behalf due to a hardware quirk on IPA v4.2. The modem
+ * "owns" channels even when the AP allocates them, and have no
+ * way of knowing whether a modem channel's state has been changed.
+ *
+ * We also use GENERIC commands to enable/disable channel flow
+ * control for IPA v4.2+.
*
* It is recommended that we halt the modem channels we allocated
* when shutting down, but it's possible the channel isn't running
* at the time we issue the HALT command. We'll get an error in
* that case, but it's harmless (the channel is already halted).
+ * Similarly, we could get an error back when updating flow control
+ * on a channel because it's not in the proper state.
*
- * For this reason, we silently ignore a CHANNEL_NOT_RUNNING error
+ * In either case, we silently ignore a CHANNEL_NOT_RUNNING error
* if we receive it.
*/
val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
@@ -1648,19 +1645,25 @@ static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
}
+/* We use generic commands only to operate on modem channels. We don't have
+ * the ability to determine channel state for a modem channel, so we simply
+ * issue the command and wait for it to complete.
+ */
static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
- enum gsi_generic_cmd_opcode opcode)
+ enum gsi_generic_cmd_opcode opcode,
+ u8 params)
{
- struct completion *completion = &gsi->completion;
bool timeout;
u32 val;
- /* The error global interrupt type is always enabled (until we
- * teardown), so we won't change that. A generic EE command
- * completes with a GSI global interrupt of type GP_INT1. We
- * only perform one generic command at a time (to allocate or
- * halt a modem channel) and only from this function. So we
- * enable the GP_INT1 IRQ type here while we're expecting it.
+ /* The error global interrupt type is always enabled (until we tear
+ * down), so we will keep it enabled.
+ *
+ * A generic EE command completes with a GSI global interrupt of
+ * type GP_INT1. We only perform one generic command at a time
+ * (to allocate, halt, or enable/disable flow control on a modem
+ * channel), and only from this function. So we enable the GP_INT1
+ * IRQ type here, and disable it again after the command completes.
*/
val = BIT(ERROR_INT) | BIT(GP_INT1);
iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
@@ -1674,8 +1677,9 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
+ val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK);
- timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val);
/* Disable the GP_INT1 IRQ type again */
iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
@@ -1692,7 +1696,7 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
{
return gsi_generic_command(gsi, channel_id,
- GSI_GENERIC_ALLOCATE_CHANNEL);
+ GSI_GENERIC_ALLOCATE_CHANNEL, 0);
}
static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
@@ -1702,7 +1706,7 @@ static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
do
ret = gsi_generic_command(gsi, channel_id,
- GSI_GENERIC_HALT_CHANNEL);
+ GSI_GENERIC_HALT_CHANNEL, 0);
while (ret == -EAGAIN && retries--);
if (ret)
@@ -1710,6 +1714,32 @@ static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
ret, channel_id);
}
+/* Enable or disable flow control for a modem GSI TX channel (IPA v4.2+) */
+void
+gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id, bool enable)
+{
+ u32 retries = 0;
+ u32 command;
+ int ret;
+
+ command = enable ? GSI_GENERIC_ENABLE_FLOW_CONTROL
+ : GSI_GENERIC_DISABLE_FLOW_CONTROL;
+ /* Disabling flow control on IPA v4.11+ can return -EAGAIN if enable
+ * is underway. In this case we need to retry the command.
+ */
+ if (!enable && gsi->version >= IPA_VERSION_4_11)
+ retries = GSI_CHANNEL_MODEM_FLOW_RETRIES;
+
+ do
+ ret = gsi_generic_command(gsi, channel_id, command, 0);
+ while (ret == -EAGAIN && retries--);
+
+ if (ret)
+ dev_err(gsi->dev,
+ "error %d %sabling mode channel %u flow control\n",
+ ret, enable ? "en" : "dis", channel_id);
+}
+
/* Setup function for channels */
static int gsi_channel_setup(struct gsi *gsi)
{
@@ -1975,18 +2005,6 @@ static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
gsi_evt_ring_id_free(gsi, evt_ring_id);
}
-/* Init function for event rings; there is no gsi_evt_ring_exit() */
-static void gsi_evt_ring_init(struct gsi *gsi)
-{
- u32 evt_ring_id = 0;
-
- gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
- gsi->ieob_enabled_bitmap = 0;
- do
- init_completion(&gsi->evt_ring[evt_ring_id].completion);
- while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
-}
-
static bool gsi_channel_data_valid(struct gsi *gsi,
const struct ipa_gsi_endpoint_data *data)
{
@@ -2069,7 +2087,6 @@ static int gsi_channel_init_one(struct gsi *gsi,
channel->tlv_count = data->channel.tlv_count;
channel->tre_count = tre_count;
channel->event_count = data->channel.event_count;
- init_completion(&channel->completion);
ret = gsi_channel_evt_ring_init(channel);
if (ret)
@@ -2129,7 +2146,8 @@ static int gsi_channel_init(struct gsi *gsi, u32 count,
/* IPA v4.2 requires the AP to allocate channels for the modem */
modem_alloc = gsi->version == IPA_VERSION_4_2;
- gsi_evt_ring_init(gsi); /* No matching exit required */
+ gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
+ gsi->ieob_enabled_bitmap = 0;
/* The endpoint data array is indexed by endpoint name */
for (i = 0; i < count; i++) {
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 88b80dc3db79..9cc657658811 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -101,6 +101,7 @@ enum gsi_channel_state {
GSI_CHANNEL_STATE_STARTED = 0x2,
GSI_CHANNEL_STATE_STOPPED = 0x3,
GSI_CHANNEL_STATE_STOP_IN_PROC = 0x4,
+ GSI_CHANNEL_STATE_FLOW_CONTROLLED = 0x5, /* IPA v4.2-v4.9 */
GSI_CHANNEL_STATE_ERROR = 0xf,
};
@@ -114,8 +115,6 @@ struct gsi_channel {
u16 tre_count;
u16 event_count;
- struct completion completion; /* signals channel command completion */
-
struct gsi_ring tre_ring;
u32 evt_ring_id;
@@ -141,28 +140,27 @@ enum gsi_evt_ring_state {
struct gsi_evt_ring {
struct gsi_channel *channel;
- struct completion completion; /* signals event ring state changes */
struct gsi_ring ring;
};
struct gsi {
struct device *dev; /* Same as IPA device */
enum ipa_version version;
- struct net_device dummy_dev; /* needed for NAPI */
void __iomem *virt_raw; /* I/O mapped address range */
void __iomem *virt; /* Adjusted for most registers */
u32 irq;
u32 channel_count;
u32 evt_ring_count;
- struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
- struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
u32 event_bitmap; /* allocated event rings */
u32 modem_channel_bitmap; /* modem channels to allocate */
u32 type_enabled_bitmap; /* GSI IRQ types enabled */
u32 ieob_enabled_bitmap; /* IEOB IRQ enabled (event rings) */
- struct completion completion; /* for global EE commands */
int result; /* Negative errno (generic commands) */
+ struct completion completion; /* Signals GSI command completion */
struct mutex mutex; /* protects commands, programming */
+ struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
+ struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
+ struct net_device dummy_dev; /* needed for NAPI */
};
/**
@@ -219,6 +217,15 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id);
int gsi_channel_stop(struct gsi *gsi, u32 channel_id);
/**
+ * gsi_modem_channel_flow_control() - Set channel flow control state (IPA v4.2+)
+ * @gsi: GSI pointer returned by gsi_setup()
+ * @channel_id: Modem TX channel to control
+ * @enable: Whether to enable flow control (i.e., prevent flow)
+ */
+void gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id,
+ bool enable);
+
+/**
* gsi_channel_reset() - Reset an allocated GSI channel
* @gsi: GSI pointer
* @channel_id: Channel to be reset
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
index bf9593d9eaea..8906f4381032 100644
--- a/drivers/net/ipa/gsi_reg.h
+++ b/drivers/net/ipa/gsi_reg.h
@@ -313,11 +313,15 @@ enum gsi_evt_cmd_opcode {
#define GENERIC_OPCODE_FMASK GENMASK(4, 0)
#define GENERIC_CHID_FMASK GENMASK(9, 5)
#define GENERIC_EE_FMASK GENMASK(13, 10)
+#define GENERIC_PARAMS_FMASK GENMASK(31, 24) /* IPA v4.11+ */
/** enum gsi_generic_cmd_opcode - GENERIC_OPCODE field values in GENERIC_CMD */
enum gsi_generic_cmd_opcode {
GSI_GENERIC_HALT_CHANNEL = 0x1,
GSI_GENERIC_ALLOCATE_CHANNEL = 0x2,
+ GSI_GENERIC_ENABLE_FLOW_CONTROL = 0x3, /* IPA v4.2+ */
+ GSI_GENERIC_DISABLE_FLOW_CONTROL = 0x4, /* IPA v4.2+ */
+ GSI_GENERIC_QUERY_FLOW_CONTROL = 0x5, /* IPA v4.11+ */
};
/* The next register is present for IPA v3.5.1 and above */
diff --git a/drivers/net/ipa/ipa_data-v4.5.c b/drivers/net/ipa/ipa_data-v4.5.c
index e62ab9c3ac67..2da2c4194f2e 100644
--- a/drivers/net/ipa/ipa_data-v4.5.c
+++ b/drivers/net/ipa/ipa_data-v4.5.c
@@ -420,15 +420,10 @@ static const struct ipa_mem_data ipa_mem_data = {
/* Interconnect rates are in 1000 byte/second units */
static const struct ipa_interconnect_data ipa_interconnect_data[] = {
{
- .name = "memory-a",
+ .name = "memory",
.peak_bandwidth = 600000, /* 600 MBps */
.average_bandwidth = 150000, /* 150 MBps */
},
- {
- .name = "memory-b",
- .peak_bandwidth = 1804000, /* 1.804 GBps */
- .average_bandwidth = 150000, /* 150 MBps */
- },
/* Average rate is unused for the next two interconnects */
{
.name = "imem",
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 03a170993420..68291a3efd04 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -237,7 +237,8 @@ static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
}
/* suspend_delay represents suspend for RX, delay for TX endpoints.
- * Note that suspend is not supported starting with IPA v4.0.
+ * Note that suspend is not supported starting with IPA v4.0, and
+ * delay mode should not be used starting with IPA v4.2.
*/
static bool
ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
@@ -248,11 +249,8 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
u32 mask;
u32 val;
- /* Suspend is not supported for IPA v4.0+. Delay doesn't work
- * correctly on IPA v4.2.
- */
if (endpoint->toward_ipa)
- WARN_ON(ipa->version == IPA_VERSION_4_2);
+ WARN_ON(ipa->version >= IPA_VERSION_4_2);
else
WARN_ON(ipa->version >= IPA_VERSION_4_0);
@@ -270,15 +268,15 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
return state;
}
-/* We currently don't care what the previous state was for delay mode */
+/* We don't care what the previous state was for delay mode */
static void
ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
{
+ /* Delay mode should not be used for IPA v4.2+ */
+ WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
WARN_ON(!endpoint->toward_ipa);
- /* Delay mode doesn't work properly for IPA v4.2 */
- if (endpoint->ipa->version != IPA_VERSION_4_2)
- (void)ipa_endpoint_init_ctrl(endpoint, enable);
+ (void)ipa_endpoint_init_ctrl(endpoint, enable);
}
static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
@@ -355,26 +353,29 @@ ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
return suspended;
}
-/* Enable or disable delay or suspend mode on all modem endpoints */
+/* Put all modem RX endpoints into suspend mode, and stop transmission
+ * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is
+ * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
+ * control instead.
+ */
void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
{
u32 endpoint_id;
- /* DELAY mode doesn't work correctly on IPA v4.2 */
- if (ipa->version == IPA_VERSION_4_2)
- return;
-
for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
if (endpoint->ee_id != GSI_EE_MODEM)
continue;
- /* Set TX delay mode or RX suspend mode */
- if (endpoint->toward_ipa)
+ if (!endpoint->toward_ipa)
+ (void)ipa_endpoint_program_suspend(endpoint, enable);
+ else if (ipa->version < IPA_VERSION_4_2)
ipa_endpoint_program_delay(endpoint, enable);
else
- (void)ipa_endpoint_program_suspend(endpoint, enable);
+ gsi_modem_channel_flow_control(&ipa->gsi,
+ endpoint->channel_id,
+ enable);
}
}
@@ -860,7 +861,7 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
}
static void
-ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
+ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
{
u32 endpoint_id = endpoint->endpoint_id;
u32 offset;
@@ -874,6 +875,19 @@ ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
+/* Assumes HOL_BLOCK is in disabled state */
+static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
+ u32 microseconds)
+{
+ ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
+ ipa_endpoint_init_hol_block_en(endpoint, true);
+}
+
+static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
+{
+ ipa_endpoint_init_hol_block_en(endpoint, false);
+}
+
void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
{
u32 i;
@@ -884,9 +898,8 @@ void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
continue;
- ipa_endpoint_init_hol_block_enable(endpoint, false);
- ipa_endpoint_init_hol_block_timer(endpoint, 0);
- ipa_endpoint_init_hol_block_enable(endpoint, true);
+ ipa_endpoint_init_hol_block_disable(endpoint);
+ ipa_endpoint_init_hol_block_enable(endpoint, 0);
}
}
@@ -1067,27 +1080,38 @@ static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one)
{
struct gsi *gsi;
u32 backlog;
+ int delta;
- if (!endpoint->replenish_enabled) {
+ if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) {
if (add_one)
atomic_inc(&endpoint->replenish_saved);
return;
}
+ /* If already active, just update the backlog */
+ if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) {
+ if (add_one)
+ atomic_inc(&endpoint->replenish_backlog);
+ return;
+ }
+
while (atomic_dec_not_zero(&endpoint->replenish_backlog))
if (ipa_endpoint_replenish_one(endpoint))
goto try_again_later;
+
+ clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
+
if (add_one)
atomic_inc(&endpoint->replenish_backlog);
return;
try_again_later:
- /* The last one didn't succeed, so fix the backlog */
- backlog = atomic_inc_return(&endpoint->replenish_backlog);
+ clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
- if (add_one)
- atomic_inc(&endpoint->replenish_backlog);
+ /* The last one didn't succeed, so fix the backlog */
+ delta = add_one ? 2 : 1;
+ backlog = atomic_add_return(delta, &endpoint->replenish_backlog);
/* Whenever a receive buffer transaction completes we'll try to
* replenish again. It's unlikely, but if we fail to supply even
@@ -1107,7 +1131,7 @@ static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
u32 max_backlog;
u32 saved;
- endpoint->replenish_enabled = true;
+ set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
atomic_add(saved, &endpoint->replenish_backlog);
@@ -1121,7 +1145,7 @@ static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
{
u32 backlog;
- endpoint->replenish_enabled = false;
+ clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
atomic_add(backlog, &endpoint->replenish_saved);
}
@@ -1141,18 +1165,19 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
{
struct sk_buff *skb;
+ if (!endpoint->netdev)
+ return;
+
skb = __dev_alloc_skb(len, GFP_ATOMIC);
- if (skb) {
- skb_put(skb, len);
- memcpy(skb->data, data, len);
- skb->truesize += extra;
- }
+ if (!skb)
+ return;
- /* Now receive it, or drop it if there's no netdev */
- if (endpoint->netdev)
- ipa_modem_skb_rx(endpoint->netdev, skb);
- else if (skb)
- dev_kfree_skb_any(skb);
+ /* Copy the data into the socket buffer and receive it */
+ skb_put(skb, len);
+ memcpy(skb->data, data, len);
+ skb->truesize += extra;
+
+ ipa_modem_skb_rx(endpoint->netdev, skb);
}
static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
@@ -1519,10 +1544,19 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
{
- if (endpoint->toward_ipa)
- ipa_endpoint_program_delay(endpoint, false);
- else
+ if (endpoint->toward_ipa) {
+ /* Newer versions of IPA use GSI channel flow control
+ * instead of endpoint DELAY mode to prevent sending data.
+ * Flow control is disabled for newly-allocated channels,
+ * and we can assume flow control is not (ever) enabled
+ * for AP TX channels.
+ */
+ if (endpoint->ipa->version < IPA_VERSION_4_2)
+ ipa_endpoint_program_delay(endpoint, false);
+ } else {
+ /* Ensure suspend mode is off on all AP RX endpoints */
(void)ipa_endpoint_program_suspend(endpoint, false);
+ }
ipa_endpoint_init_cfg(endpoint);
ipa_endpoint_init_nat(endpoint);
ipa_endpoint_init_hdr(endpoint);
@@ -1530,6 +1564,8 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
ipa_endpoint_init_hdr_metadata_mask(endpoint);
ipa_endpoint_init_mode(endpoint);
ipa_endpoint_init_aggr(endpoint);
+ if (!endpoint->toward_ipa)
+ ipa_endpoint_init_hol_block_disable(endpoint);
ipa_endpoint_init_deaggr(endpoint);
ipa_endpoint_init_rsrc_grp(endpoint);
ipa_endpoint_init_seq(endpoint);
@@ -1666,7 +1702,8 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
/* RX transactions require a single TRE, so the maximum
* backlog is the same as the maximum outstanding TREs.
*/
- endpoint->replenish_enabled = false;
+ clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
+ clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
atomic_set(&endpoint->replenish_saved,
gsi_channel_tre_max(gsi, endpoint->channel_id));
atomic_set(&endpoint->replenish_backlog, 0);
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index 0a859d10312d..0313cdc607de 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -41,6 +41,19 @@ enum ipa_endpoint_name {
#define IPA_ENDPOINT_MAX 32 /* Max supported by driver */
/**
+ * enum ipa_replenish_flag: RX buffer replenish flags
+ *
+ * @IPA_REPLENISH_ENABLED: Whether receive buffer replenishing is enabled
+ * @IPA_REPLENISH_ACTIVE: Whether replenishing is underway
+ * @IPA_REPLENISH_COUNT: Number of defined replenish flags
+ */
+enum ipa_replenish_flag {
+ IPA_REPLENISH_ENABLED,
+ IPA_REPLENISH_ACTIVE,
+ IPA_REPLENISH_COUNT, /* Number of flags (must be last) */
+};
+
+/**
* struct ipa_endpoint - IPA endpoint information
* @ipa: IPA pointer
* @ee_id: Execution environmnent endpoint is associated with
@@ -51,7 +64,7 @@ enum ipa_endpoint_name {
* @trans_tre_max: Maximum number of TRE descriptors per transaction
* @evt_ring_id: GSI event ring used by the endpoint
* @netdev: Network device pointer, if endpoint uses one
- * @replenish_enabled: Whether receive buffer replenishing is enabled
+ * @replenish_flags: Replenishing state flags
* @replenish_ready: Number of replenish transactions without doorbell
* @replenish_saved: Replenish requests held while disabled
* @replenish_backlog: Number of buffers needed to fill hardware queue
@@ -72,7 +85,7 @@ struct ipa_endpoint {
struct net_device *netdev;
/* Receive buffer replenishing for RX endpoints */
- bool replenish_enabled;
+ DECLARE_BITMAP(replenish_flags, IPA_REPLENISH_COUNT);
u32 replenish_ready;
atomic_t replenish_saved;
atomic_t replenish_backlog;
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index a448ec198bee..3757ce3de2c5 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -734,7 +734,7 @@ static int ipa_probe(struct platform_device *pdev)
if (ret)
goto err_endpoint_exit;
- ret = ipa_modem_init(ipa, modem_init);
+ ret = ipa_smp2p_init(ipa, modem_init);
if (ret)
goto err_table_exit;
@@ -776,7 +776,7 @@ err_deconfig:
ipa_deconfig(ipa);
err_power_put:
pm_runtime_put_noidle(dev);
- ipa_modem_exit(ipa);
+ ipa_smp2p_exit(ipa);
err_table_exit:
ipa_table_exit(ipa);
err_endpoint_exit:
@@ -827,7 +827,7 @@ static int ipa_remove(struct platform_device *pdev)
ipa_deconfig(ipa);
out_power_put:
pm_runtime_put_noidle(dev);
- ipa_modem_exit(ipa);
+ ipa_smp2p_exit(ipa);
ipa_table_exit(ipa);
ipa_endpoint_exit(ipa);
gsi_exit(&ipa->gsi);
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 4337b0920d3d..1e9eae208e44 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -266,9 +266,7 @@ static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
}
/* Now see if any required regions are not defined */
- for (mem_id = find_first_zero_bit(regions, IPA_MEM_COUNT);
- mem_id < IPA_MEM_COUNT;
- mem_id = find_next_zero_bit(regions, IPA_MEM_COUNT, mem_id + 1)) {
+ for_each_clear_bit(mem_id, regions, IPA_MEM_COUNT) {
if (ipa_mem_id_required(ipa, mem_id))
dev_err(dev, "required memory region %u missing\n",
mem_id);
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index d0ab4d70c303..27d87097433f 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -442,16 +442,6 @@ static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
return NOTIFY_OK;
}
-int ipa_modem_init(struct ipa *ipa, bool modem_init)
-{
- return ipa_smp2p_init(ipa, modem_init);
-}
-
-void ipa_modem_exit(struct ipa *ipa)
-{
- ipa_smp2p_exit(ipa);
-}
-
int ipa_modem_config(struct ipa *ipa)
{
void *notifier;
diff --git a/drivers/net/ipa/ipa_modem.h b/drivers/net/ipa/ipa_modem.h
index 5e6e3d234454..e64ccc2402e9 100644
--- a/drivers/net/ipa/ipa_modem.h
+++ b/drivers/net/ipa/ipa_modem.h
@@ -18,9 +18,6 @@ void ipa_modem_skb_rx(struct net_device *netdev, struct sk_buff *skb);
void ipa_modem_suspend(struct net_device *netdev);
void ipa_modem_resume(struct net_device *netdev);
-int ipa_modem_init(struct ipa *ipa, bool modem_init);
-void ipa_modem_exit(struct ipa *ipa);
-
int ipa_modem_config(struct ipa *ipa);
void ipa_modem_deconfig(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 1da334f54944..2f5a58bfc529 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -419,21 +419,26 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
dma_addr_t hash_addr;
dma_addr_t addr;
+ u32 zero_offset;
u16 hash_count;
+ u32 zero_size;
u16 hash_size;
u16 count;
u16 size;
- /* The number of filtering endpoints determines number of entries
- * in the filter table. The hashed and non-hashed filter table
- * will have the same number of entries. The size of the route
- * table region determines the number of entries it has.
- */
+ /* Compute the number of table entries to initialize */
if (filter) {
- /* Include one extra "slot" to hold the filter map itself */
+ /* The number of filtering endpoints determines number of
+ * entries in the filter table; we also add one more "slot"
+ * to hold the bitmap itself. The size of the hashed filter
+ * table is either the same as the non-hashed one, or zero.
+ */
count = 1 + hweight32(ipa->filter_map);
hash_count = hash_mem->size ? count : 0;
} else {
+ /* The size of a route table region determines the number
+ * of entries it has.
+ */
count = mem->size / sizeof(__le64);
hash_count = hash_mem->size / sizeof(__le64);
}
@@ -445,13 +450,42 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
ipa_cmd_table_init_add(trans, opcode, size, mem->offset, addr,
hash_size, hash_mem->offset, hash_addr);
+ if (!filter)
+ return;
+
+ /* Zero the unused space in the filter table */
+ zero_offset = mem->offset + size;
+ zero_size = mem->size - size;
+ ipa_cmd_dma_shared_mem_add(trans, zero_offset, zero_size,
+ ipa->zero_addr, true);
+ if (!hash_size)
+ return;
+
+ /* Zero the unused space in the hashed filter table */
+ zero_offset = hash_mem->offset + hash_size;
+ zero_size = hash_mem->size - hash_size;
+ ipa_cmd_dma_shared_mem_add(trans, zero_offset, zero_size,
+ ipa->zero_addr, true);
}
int ipa_table_setup(struct ipa *ipa)
{
struct gsi_trans *trans;
- trans = ipa_cmd_trans_alloc(ipa, 4);
+ /* We will need at most 8 TREs:
+ * - IPv4:
+ * - One for route table initialization (non-hashed and hashed)
+ * - One for filter table initialization (non-hashed and hashed)
+ * - One to zero unused entries in the non-hashed filter table
+ * - One to zero unused entries in the hashed filter table
+ * - IPv6:
+ * - One for route table initialization (non-hashed and hashed)
+ * - One for filter table initialization (non-hashed and hashed)
+ * - One to zero unused entries in the non-hashed filter table
+ * - One to zero unused entries in the hashed filter table
+ * All platforms support at least 8 TREs in a transaction.
+ */
+ trans = ipa_cmd_trans_alloc(ipa, 8);
if (!trans) {
dev_err(&ipa->pdev->dev, "no transaction for table setup\n");
return -EBUSY;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 6cd50106e611..c613900c3811 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -291,8 +291,7 @@ void ipvlan_process_multicast(struct work_struct *work)
else
kfree_skb(skb);
}
- if (dev)
- dev_put(dev);
+ dev_put(dev);
cond_resched();
}
}
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 1d2f4e7d7324..696e245f6d00 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -100,8 +100,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
netdev_rx_handler_unregister(dev);
cancel_work_sync(&port->wq);
while ((skb = __skb_dequeue(&port->backlog)) != NULL) {
- if (skb->dev)
- dev_put(skb->dev);
+ dev_put(skb->dev);
kfree_skb(skb);
}
ida_destroy(&port->ida);
@@ -140,8 +139,8 @@ static int ipvlan_init(struct net_device *dev)
dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES;
dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS;
dev->hw_enc_features |= dev->features;
- dev->gso_max_size = phy_dev->gso_max_size;
- dev->gso_max_segs = phy_dev->gso_max_segs;
+ netif_set_gso_max_size(dev, phy_dev->gso_max_size);
+ netif_set_gso_max_segs(dev, phy_dev->gso_max_segs);
dev->hard_header_len = phy_dev->hard_header_len;
netdev_lockdep_set_classes(dev);
@@ -763,8 +762,8 @@ static int ipvlan_device_event(struct notifier_block *unused,
case NETDEV_FEAT_CHANGE:
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
- ipvlan->dev->gso_max_size = dev->gso_max_size;
- ipvlan->dev->gso_max_segs = dev->gso_max_segs;
+ netif_set_gso_max_size(ipvlan->dev, dev->gso_max_size);
+ netif_set_gso_max_segs(ipvlan->dev, dev->gso_max_segs);
netdev_update_features(ipvlan->dev);
}
break;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index a1c77cc00416..ed0edf5884ef 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -44,6 +44,7 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
+#include <net/sch_generic.h>
#include <net/sock.h>
#include <net/checksum.h>
#include <linux/if_ether.h> /* For the statistics structure. */
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index d2f830ec2969..6ef5f77be4d0 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -900,8 +900,8 @@ static int macvlan_init(struct net_device *dev)
dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;
dev->vlan_features |= ALWAYS_ON_OFFLOADS;
dev->hw_enc_features |= dev->features;
- dev->gso_max_size = lowerdev->gso_max_size;
- dev->gso_max_segs = lowerdev->gso_max_segs;
+ netif_set_gso_max_size(dev, lowerdev->gso_max_size);
+ netif_set_gso_max_segs(dev, lowerdev->gso_max_segs);
dev->hard_header_len = lowerdev->hard_header_len;
macvlan_set_lockdep_class(dev);
@@ -1171,7 +1171,6 @@ static const struct net_device_ops macvlan_netdev_ops = {
#endif
.ndo_get_iflink = macvlan_dev_get_iflink,
.ndo_features_check = passthru_features_check,
- .ndo_change_proto_down = dev_change_proto_down_generic,
};
void macvlan_common_setup(struct net_device *dev)
@@ -1182,7 +1181,7 @@ void macvlan_common_setup(struct net_device *dev)
dev->max_mtu = ETH_MAX_MTU;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
netif_keep_dst(dev);
- dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->priv_flags |= IFF_UNICAST_FLT | IFF_CHANGE_PROTO_DOWN;
dev->netdev_ops = &macvlan_netdev_ops;
dev->needs_free_netdev = true;
dev->header_ops = &macvlan_hard_header_ops;
@@ -1748,8 +1747,8 @@ static int macvlan_device_event(struct notifier_block *unused,
break;
case NETDEV_FEAT_CHANGE:
list_for_each_entry(vlan, &port->vlans, list) {
- vlan->dev->gso_max_size = dev->gso_max_size;
- vlan->dev->gso_max_segs = dev->gso_max_segs;
+ netif_set_gso_max_size(vlan->dev, dev->gso_max_size);
+ netif_set_gso_max_segs(vlan->dev, dev->gso_max_segs);
netdev_update_features(vlan->dev);
}
break;
diff --git a/drivers/net/mctp/Kconfig b/drivers/net/mctp/Kconfig
index d8f966cedc89..2929471395ae 100644
--- a/drivers/net/mctp/Kconfig
+++ b/drivers/net/mctp/Kconfig
@@ -3,6 +3,24 @@ if MCTP
menu "MCTP Device Drivers"
+config MCTP_SERIAL
+ tristate "MCTP serial transport"
+ depends on TTY
+ select CRC_CCITT
+ help
+ This driver provides an MCTP-over-serial interface, through a
+ serial line-discipline, as defined by DMTF specification "DSP0253 -
+ MCTP Serial Transport Binding". By attaching the ldisc to a serial
+ device, we get a new net device to transport MCTP packets.
+
+ This allows communication with external MCTP endpoints which use
+ serial as their transport. It can also be used as an easy way to
+ provide MCTP connectivity between virtual machines, by forwarding
+ data between simple virtual serial devices.
+
+ Say y here if you need to connect to MCTP endpoints over serial. To
+ compile as a module, use m; the module will be called mctp-serial.
+
endmenu
endif
diff --git a/drivers/net/mctp/Makefile b/drivers/net/mctp/Makefile
index e69de29bb2d1..d32622613ce4 100644
--- a/drivers/net/mctp/Makefile
+++ b/drivers/net/mctp/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MCTP_SERIAL) += mctp-serial.o
diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c
new file mode 100644
index 000000000000..eaa6fb3224bc
--- /dev/null
+++ b/drivers/net/mctp/mctp-serial.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Management Component Transport Protocol (MCTP) - serial transport
+ * binding. This driver is an implementation of the DMTF specificiation
+ * "DSP0253 - Management Component Transport Protocol (MCTP) Serial Transport
+ * Binding", available at:
+ *
+ * https://www.dmtf.org/sites/default/files/standards/documents/DSP0253_1.0.0.pdf
+ *
+ * This driver provides DSP0253-type MCTP-over-serial transport using a Linux
+ * tty device, by setting the N_MCTP line discipline on the tty.
+ *
+ * Copyright (c) 2021 Code Construct
+ */
+
+#include <linux/idr.h>
+#include <linux/if_arp.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/tty.h>
+#include <linux/workqueue.h>
+#include <linux/crc-ccitt.h>
+
+#include <linux/mctp.h>
+#include <net/mctp.h>
+#include <net/pkt_sched.h>
+
+#define MCTP_SERIAL_MTU 68 /* base mtu (64) + mctp header */
+#define MCTP_SERIAL_FRAME_MTU (MCTP_SERIAL_MTU + 6) /* + serial framing */
+
+#define MCTP_SERIAL_VERSION 0x1 /* DSP0253 defines a single version: 1 */
+
+#define BUFSIZE MCTP_SERIAL_FRAME_MTU
+
+#define BYTE_FRAME 0x7e
+#define BYTE_ESC 0x7d
+
+static DEFINE_IDA(mctp_serial_ida);
+
+enum mctp_serial_state {
+ STATE_IDLE,
+ STATE_START,
+ STATE_HEADER,
+ STATE_DATA,
+ STATE_ESCAPE,
+ STATE_TRAILER,
+ STATE_DONE,
+ STATE_ERR,
+};
+
+struct mctp_serial {
+ struct net_device *netdev;
+ struct tty_struct *tty;
+
+ int idx;
+
+ /* protects our rx & tx state machines; held during both paths */
+ spinlock_t lock;
+
+ struct work_struct tx_work;
+ enum mctp_serial_state txstate, rxstate;
+ u16 txfcs, rxfcs, rxfcs_rcvd;
+ unsigned int txlen, rxlen;
+ unsigned int txpos, rxpos;
+ unsigned char txbuf[BUFSIZE],
+ rxbuf[BUFSIZE];
+};
+
+static bool needs_escape(unsigned char c)
+{
+ return c == BYTE_ESC || c == BYTE_FRAME;
+}
+
+static int next_chunk_len(struct mctp_serial *dev)
+{
+ int i;
+
+ /* either we have no bytes to send ... */
+ if (dev->txpos == dev->txlen)
+ return 0;
+
+ /* ... or the next byte to send is an escaped byte; requiring a
+ * single-byte chunk...
+ */
+ if (needs_escape(dev->txbuf[dev->txpos]))
+ return 1;
+
+ /* ... or we have one or more bytes up to the next escape - this chunk
+ * will be those non-escaped bytes, and does not include the escaped
+ * byte.
+ */
+ for (i = 1; i + dev->txpos + 1 < dev->txlen; i++) {
+ if (needs_escape(dev->txbuf[dev->txpos + i + 1]))
+ break;
+ }
+
+ return i;
+}
+
+static int write_chunk(struct mctp_serial *dev, unsigned char *buf, int len)
+{
+ return dev->tty->ops->write(dev->tty, buf, len);
+}
+
+static void mctp_serial_tx_work(struct work_struct *work)
+{
+ struct mctp_serial *dev = container_of(work, struct mctp_serial,
+ tx_work);
+ unsigned char c, buf[3];
+ unsigned long flags;
+ int len, txlen;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /* txstate represents the next thing to send */
+ switch (dev->txstate) {
+ case STATE_START:
+ dev->txpos = 0;
+ fallthrough;
+ case STATE_HEADER:
+ buf[0] = BYTE_FRAME;
+ buf[1] = MCTP_SERIAL_VERSION;
+ buf[2] = dev->txlen;
+
+ if (!dev->txpos)
+ dev->txfcs = crc_ccitt(0, buf + 1, 2);
+
+ txlen = write_chunk(dev, buf + dev->txpos, 3 - dev->txpos);
+ if (txlen <= 0) {
+ dev->txstate = STATE_ERR;
+ } else {
+ dev->txpos += txlen;
+ if (dev->txpos == 3) {
+ dev->txstate = STATE_DATA;
+ dev->txpos = 0;
+ }
+ }
+ break;
+
+ case STATE_ESCAPE:
+ buf[0] = dev->txbuf[dev->txpos] & ~0x20;
+ txlen = write_chunk(dev, buf, 1);
+ if (txlen <= 0) {
+ dev->txstate = STATE_ERR;
+ } else {
+ dev->txpos += txlen;
+ if (dev->txpos == dev->txlen) {
+ dev->txstate = STATE_TRAILER;
+ dev->txpos = 0;
+ }
+ }
+
+ break;
+
+ case STATE_DATA:
+ len = next_chunk_len(dev);
+ if (len) {
+ c = dev->txbuf[dev->txpos];
+ if (len == 1 && needs_escape(c)) {
+ buf[0] = BYTE_ESC;
+ buf[1] = c & ~0x20;
+ dev->txfcs = crc_ccitt_byte(dev->txfcs, c);
+ txlen = write_chunk(dev, buf, 2);
+ if (txlen == 2)
+ dev->txpos++;
+ else if (txlen == 1)
+ dev->txstate = STATE_ESCAPE;
+ else
+ dev->txstate = STATE_ERR;
+ } else {
+ txlen = write_chunk(dev,
+ dev->txbuf + dev->txpos,
+ len);
+ if (txlen <= 0) {
+ dev->txstate = STATE_ERR;
+ } else {
+ dev->txfcs = crc_ccitt(dev->txfcs,
+ dev->txbuf +
+ dev->txpos,
+ txlen);
+ dev->txpos += txlen;
+ }
+ }
+ if (dev->txstate == STATE_DATA &&
+ dev->txpos == dev->txlen) {
+ dev->txstate = STATE_TRAILER;
+ dev->txpos = 0;
+ }
+ break;
+ }
+ dev->txstate = STATE_TRAILER;
+ dev->txpos = 0;
+ fallthrough;
+
+ case STATE_TRAILER:
+ buf[0] = dev->txfcs >> 8;
+ buf[1] = dev->txfcs & 0xff;
+ buf[2] = BYTE_FRAME;
+ txlen = write_chunk(dev, buf + dev->txpos, 3 - dev->txpos);
+ if (txlen <= 0) {
+ dev->txstate = STATE_ERR;
+ } else {
+ dev->txpos += txlen;
+ if (dev->txpos == 3) {
+ dev->txstate = STATE_DONE;
+ dev->txpos = 0;
+ }
+ }
+ break;
+ default:
+ netdev_err_once(dev->netdev, "invalid tx state %d\n",
+ dev->txstate);
+ }
+
+ if (dev->txstate == STATE_DONE) {
+ dev->netdev->stats.tx_packets++;
+ dev->netdev->stats.tx_bytes += dev->txlen;
+ dev->txlen = 0;
+ dev->txpos = 0;
+ clear_bit(TTY_DO_WRITE_WAKEUP, &dev->tty->flags);
+ dev->txstate = STATE_IDLE;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ netif_wake_queue(dev->netdev);
+ } else {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ }
+}
+
+static netdev_tx_t mctp_serial_tx(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct mctp_serial *dev = netdev_priv(ndev);
+ unsigned long flags;
+
+ WARN_ON(dev->txstate != STATE_IDLE);
+
+ if (skb->len > MCTP_SERIAL_MTU) {
+ dev->netdev->stats.tx_dropped++;
+ goto out;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ netif_stop_queue(dev->netdev);
+ skb_copy_bits(skb, 0, dev->txbuf, skb->len);
+ dev->txpos = 0;
+ dev->txlen = skb->len;
+ dev->txstate = STATE_START;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ set_bit(TTY_DO_WRITE_WAKEUP, &dev->tty->flags);
+ schedule_work(&dev->tx_work);
+
+out:
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static void mctp_serial_tty_write_wakeup(struct tty_struct *tty)
+{
+ struct mctp_serial *dev = tty->disc_data;
+
+ schedule_work(&dev->tx_work);
+}
+
+static void mctp_serial_rx(struct mctp_serial *dev)
+{
+ struct mctp_skb_cb *cb;
+ struct sk_buff *skb;
+
+ if (dev->rxfcs != dev->rxfcs_rcvd) {
+ dev->netdev->stats.rx_dropped++;
+ dev->netdev->stats.rx_crc_errors++;
+ return;
+ }
+
+ skb = netdev_alloc_skb(dev->netdev, dev->rxlen);
+ if (!skb) {
+ dev->netdev->stats.rx_dropped++;
+ return;
+ }
+
+ skb->protocol = htons(ETH_P_MCTP);
+ skb_put_data(skb, dev->rxbuf, dev->rxlen);
+ skb_reset_network_header(skb);
+
+ cb = __mctp_cb(skb);
+ cb->halen = 0;
+
+ netif_rx_ni(skb);
+ dev->netdev->stats.rx_packets++;
+ dev->netdev->stats.rx_bytes += dev->rxlen;
+}
+
+static void mctp_serial_push_header(struct mctp_serial *dev, unsigned char c)
+{
+ switch (dev->rxpos) {
+ case 0:
+ if (c == BYTE_FRAME)
+ dev->rxpos++;
+ else
+ dev->rxstate = STATE_ERR;
+ break;
+ case 1:
+ if (c == MCTP_SERIAL_VERSION) {
+ dev->rxpos++;
+ dev->rxfcs = crc_ccitt_byte(0, c);
+ } else {
+ dev->rxstate = STATE_ERR;
+ }
+ break;
+ case 2:
+ if (c > MCTP_SERIAL_FRAME_MTU) {
+ dev->rxstate = STATE_ERR;
+ } else {
+ dev->rxlen = c;
+ dev->rxpos = 0;
+ dev->rxstate = STATE_DATA;
+ dev->rxfcs = crc_ccitt_byte(dev->rxfcs, c);
+ }
+ break;
+ }
+}
+
+static void mctp_serial_push_trailer(struct mctp_serial *dev, unsigned char c)
+{
+ switch (dev->rxpos) {
+ case 0:
+ dev->rxfcs_rcvd = c << 8;
+ dev->rxpos++;
+ break;
+ case 1:
+ dev->rxfcs_rcvd |= c;
+ dev->rxpos++;
+ break;
+ case 2:
+ if (c != BYTE_FRAME) {
+ dev->rxstate = STATE_ERR;
+ } else {
+ mctp_serial_rx(dev);
+ dev->rxlen = 0;
+ dev->rxpos = 0;
+ dev->rxstate = STATE_IDLE;
+ }
+ break;
+ }
+}
+
+static void mctp_serial_push(struct mctp_serial *dev, unsigned char c)
+{
+ switch (dev->rxstate) {
+ case STATE_IDLE:
+ dev->rxstate = STATE_HEADER;
+ fallthrough;
+ case STATE_HEADER:
+ mctp_serial_push_header(dev, c);
+ break;
+
+ case STATE_ESCAPE:
+ c |= 0x20;
+ fallthrough;
+ case STATE_DATA:
+ if (dev->rxstate != STATE_ESCAPE && c == BYTE_ESC) {
+ dev->rxstate = STATE_ESCAPE;
+ } else {
+ dev->rxfcs = crc_ccitt_byte(dev->rxfcs, c);
+ dev->rxbuf[dev->rxpos] = c;
+ dev->rxpos++;
+ dev->rxstate = STATE_DATA;
+ if (dev->rxpos == dev->rxlen) {
+ dev->rxpos = 0;
+ dev->rxstate = STATE_TRAILER;
+ }
+ }
+ break;
+
+ case STATE_TRAILER:
+ mctp_serial_push_trailer(dev, c);
+ break;
+
+ case STATE_ERR:
+ if (c == BYTE_FRAME)
+ dev->rxstate = STATE_IDLE;
+ break;
+
+ default:
+ netdev_err_once(dev->netdev, "invalid rx state %d\n",
+ dev->rxstate);
+ }
+}
+
+static void mctp_serial_tty_receive_buf(struct tty_struct *tty,
+ const unsigned char *c,
+ const char *f, int len)
+{
+ struct mctp_serial *dev = tty->disc_data;
+ int i;
+
+ if (!netif_running(dev->netdev))
+ return;
+
+ /* we don't (currently) use the flag bytes, just data. */
+ for (i = 0; i < len; i++)
+ mctp_serial_push(dev, c[i]);
+}
+
+static const struct net_device_ops mctp_serial_netdev_ops = {
+ .ndo_start_xmit = mctp_serial_tx,
+};
+
+static void mctp_serial_setup(struct net_device *ndev)
+{
+ ndev->type = ARPHRD_MCTP;
+
+ /* we limit at the fixed MTU, which is also the MCTP-standard
+ * baseline MTU, so is also our minimum
+ */
+ ndev->mtu = MCTP_SERIAL_MTU;
+ ndev->max_mtu = MCTP_SERIAL_MTU;
+ ndev->min_mtu = MCTP_SERIAL_MTU;
+
+ ndev->hard_header_len = 0;
+ ndev->addr_len = 0;
+ ndev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
+ ndev->flags = IFF_NOARP;
+ ndev->netdev_ops = &mctp_serial_netdev_ops;
+ ndev->needs_free_netdev = true;
+}
+
+static int mctp_serial_open(struct tty_struct *tty)
+{
+ struct mctp_serial *dev;
+ struct net_device *ndev;
+ char name[32];
+ int idx, rc;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!tty->ops->write)
+ return -EOPNOTSUPP;
+
+ idx = ida_alloc(&mctp_serial_ida, GFP_KERNEL);
+ if (idx < 0)
+ return idx;
+
+ snprintf(name, sizeof(name), "mctpserial%d", idx);
+ ndev = alloc_netdev(sizeof(*dev), name, NET_NAME_ENUM,
+ mctp_serial_setup);
+ if (!ndev) {
+ rc = -ENOMEM;
+ goto free_ida;
+ }
+
+ dev = netdev_priv(ndev);
+ dev->idx = idx;
+ dev->tty = tty;
+ dev->netdev = ndev;
+ dev->txstate = STATE_IDLE;
+ dev->rxstate = STATE_IDLE;
+ spin_lock_init(&dev->lock);
+ INIT_WORK(&dev->tx_work, mctp_serial_tx_work);
+
+ rc = register_netdev(ndev);
+ if (rc)
+ goto free_netdev;
+
+ tty->receive_room = 64 * 1024;
+ tty->disc_data = dev;
+
+ return 0;
+
+free_netdev:
+ free_netdev(ndev);
+
+free_ida:
+ ida_free(&mctp_serial_ida, idx);
+ return rc;
+}
+
+static void mctp_serial_close(struct tty_struct *tty)
+{
+ struct mctp_serial *dev = tty->disc_data;
+ int idx = dev->idx;
+
+ unregister_netdev(dev->netdev);
+ cancel_work_sync(&dev->tx_work);
+ ida_free(&mctp_serial_ida, idx);
+}
+
+static struct tty_ldisc_ops mctp_ldisc = {
+ .owner = THIS_MODULE,
+ .num = N_MCTP,
+ .name = "mctp",
+ .open = mctp_serial_open,
+ .close = mctp_serial_close,
+ .receive_buf = mctp_serial_tty_receive_buf,
+ .write_wakeup = mctp_serial_tty_write_wakeup,
+};
+
+static int __init mctp_serial_init(void)
+{
+ return tty_register_ldisc(&mctp_ldisc);
+}
+
+static void __exit mctp_serial_exit(void)
+{
+ tty_unregister_ldisc(&mctp_ldisc);
+}
+
+module_init(mctp_serial_init);
+module_exit(mctp_serial_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jeremy Kerr <jk@codeconstruct.com.au>");
+MODULE_DESCRIPTION("MCTP Serial transport");
diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig
index 6da1fcb25847..bfa16826a6e1 100644
--- a/drivers/net/mdio/Kconfig
+++ b/drivers/net/mdio/Kconfig
@@ -141,7 +141,7 @@ config MDIO_MVUSB
config MDIO_MSCC_MIIM
tristate "Microsemi MIIM interface support"
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && REGMAP_MMIO
select MDIO_DEVRES
help
This driver supports the MIIM (MDIO) interface found in the network
diff --git a/drivers/net/mdio/mdio-ipq8064.c b/drivers/net/mdio/mdio-ipq8064.c
index bd1aea2d5a26..37e0d8b6da07 100644
--- a/drivers/net/mdio/mdio-ipq8064.c
+++ b/drivers/net/mdio/mdio-ipq8064.c
@@ -127,7 +127,7 @@ ipq8064_mdio_probe(struct platform_device *pdev)
if (of_address_to_resource(np, 0, &res))
return -ENOMEM;
- base = ioremap(res.start, resource_size(&res));
+ base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
if (!base)
return -ENOMEM;
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index 17f98f609ec8..7d2abaf2b2c9 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -10,10 +10,12 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/mdio/mdio-mscc-miim.h>
#include <linux/module.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#define MSCC_MIIM_REG_STATUS 0x0
#define MSCC_MIIM_STATUS_STAT_PENDING BIT(2)
@@ -35,37 +37,52 @@
#define MSCC_PHY_REG_PHY_STATUS 0x4
struct mscc_miim_dev {
- void __iomem *regs;
- void __iomem *phy_regs;
+ struct regmap *regs;
+ int mii_status_offset;
+ struct regmap *phy_regs;
+ int phy_reset_offset;
};
/* When high resolution timers aren't built-in: we can't use usleep_range() as
* we would sleep way too long. Use udelay() instead.
*/
-#define mscc_readl_poll_timeout(addr, val, cond, delay_us, timeout_us) \
-({ \
- if (!IS_ENABLED(CONFIG_HIGH_RES_TIMERS)) \
- readl_poll_timeout_atomic(addr, val, cond, delay_us, \
- timeout_us); \
- readl_poll_timeout(addr, val, cond, delay_us, timeout_us); \
+#define mscc_readx_poll_timeout(op, addr, val, cond, delay_us, timeout_us)\
+({ \
+ if (!IS_ENABLED(CONFIG_HIGH_RES_TIMERS)) \
+ readx_poll_timeout_atomic(op, addr, val, cond, delay_us, \
+ timeout_us); \
+ readx_poll_timeout(op, addr, val, cond, delay_us, timeout_us); \
})
-static int mscc_miim_wait_ready(struct mii_bus *bus)
+static int mscc_miim_status(struct mii_bus *bus)
{
struct mscc_miim_dev *miim = bus->priv;
+ int val, ret;
+
+ ret = regmap_read(miim->regs,
+ MSCC_MIIM_REG_STATUS + miim->mii_status_offset, &val);
+ if (ret < 0) {
+ WARN_ONCE(1, "mscc miim status read error %d\n", ret);
+ return ret;
+ }
+
+ return val;
+}
+
+static int mscc_miim_wait_ready(struct mii_bus *bus)
+{
u32 val;
- return mscc_readl_poll_timeout(miim->regs + MSCC_MIIM_REG_STATUS, val,
+ return mscc_readx_poll_timeout(mscc_miim_status, bus, val,
!(val & MSCC_MIIM_STATUS_STAT_BUSY), 50,
10000);
}
static int mscc_miim_wait_pending(struct mii_bus *bus)
{
- struct mscc_miim_dev *miim = bus->priv;
u32 val;
- return mscc_readl_poll_timeout(miim->regs + MSCC_MIIM_REG_STATUS, val,
+ return mscc_readx_poll_timeout(mscc_miim_status, bus, val,
!(val & MSCC_MIIM_STATUS_STAT_PENDING),
50, 10000);
}
@@ -80,15 +97,29 @@ static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum)
if (ret)
goto out;
- writel(MSCC_MIIM_CMD_VLD | (mii_id << MSCC_MIIM_CMD_PHYAD_SHIFT) |
- (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | MSCC_MIIM_CMD_OPR_READ,
- miim->regs + MSCC_MIIM_REG_CMD);
+ ret = regmap_write(miim->regs,
+ MSCC_MIIM_REG_CMD + miim->mii_status_offset,
+ MSCC_MIIM_CMD_VLD |
+ (mii_id << MSCC_MIIM_CMD_PHYAD_SHIFT) |
+ (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) |
+ MSCC_MIIM_CMD_OPR_READ);
+
+ if (ret < 0) {
+ WARN_ONCE(1, "mscc miim write cmd reg error %d\n", ret);
+ goto out;
+ }
ret = mscc_miim_wait_ready(bus);
if (ret)
goto out;
- val = readl(miim->regs + MSCC_MIIM_REG_DATA);
+ ret = regmap_read(miim->regs,
+ MSCC_MIIM_REG_DATA + miim->mii_status_offset, &val);
+ if (ret < 0) {
+ WARN_ONCE(1, "mscc miim read data reg error %d\n", ret);
+ goto out;
+ }
+
if (val & MSCC_MIIM_DATA_ERROR) {
ret = -EIO;
goto out;
@@ -109,12 +140,16 @@ static int mscc_miim_write(struct mii_bus *bus, int mii_id,
if (ret < 0)
goto out;
- writel(MSCC_MIIM_CMD_VLD | (mii_id << MSCC_MIIM_CMD_PHYAD_SHIFT) |
- (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) |
- (value << MSCC_MIIM_CMD_WRDATA_SHIFT) |
- MSCC_MIIM_CMD_OPR_WRITE,
- miim->regs + MSCC_MIIM_REG_CMD);
+ ret = regmap_write(miim->regs,
+ MSCC_MIIM_REG_CMD + miim->mii_status_offset,
+ MSCC_MIIM_CMD_VLD |
+ (mii_id << MSCC_MIIM_CMD_PHYAD_SHIFT) |
+ (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) |
+ (value << MSCC_MIIM_CMD_WRDATA_SHIFT) |
+ MSCC_MIIM_CMD_OPR_WRITE);
+ if (ret < 0)
+ WARN_ONCE(1, "mscc miim write error %d\n", ret);
out:
return ret;
}
@@ -122,51 +157,116 @@ out:
static int mscc_miim_reset(struct mii_bus *bus)
{
struct mscc_miim_dev *miim = bus->priv;
+ int offset = miim->phy_reset_offset;
+ int ret;
if (miim->phy_regs) {
- writel(0, miim->phy_regs + MSCC_PHY_REG_PHY_CFG);
- writel(0x1ff, miim->phy_regs + MSCC_PHY_REG_PHY_CFG);
+ ret = regmap_write(miim->phy_regs,
+ MSCC_PHY_REG_PHY_CFG + offset, 0);
+ if (ret < 0) {
+ WARN_ONCE(1, "mscc reset set error %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(miim->phy_regs,
+ MSCC_PHY_REG_PHY_CFG + offset, 0x1ff);
+ if (ret < 0) {
+ WARN_ONCE(1, "mscc reset clear error %d\n", ret);
+ return ret;
+ }
+
mdelay(500);
}
return 0;
}
-static int mscc_miim_probe(struct platform_device *pdev)
+static const struct regmap_config mscc_miim_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+int mscc_miim_setup(struct device *dev, struct mii_bus **pbus, const char *name,
+ struct regmap *mii_regmap, int status_offset)
{
- struct mscc_miim_dev *dev;
- struct resource *res;
+ struct mscc_miim_dev *miim;
struct mii_bus *bus;
- int ret;
- bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*dev));
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*miim));
if (!bus)
return -ENOMEM;
- bus->name = "mscc_miim";
+ bus->name = name;
bus->read = mscc_miim_read;
bus->write = mscc_miim_write;
bus->reset = mscc_miim_reset;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
- bus->parent = &pdev->dev;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(dev));
+ bus->parent = dev;
+
+ miim = bus->priv;
+
+ *pbus = bus;
+
+ miim->regs = mii_regmap;
+ miim->mii_status_offset = status_offset;
+
+ *pbus = bus;
+
+ return 0;
+}
+EXPORT_SYMBOL(mscc_miim_setup);
+
+static int mscc_miim_probe(struct platform_device *pdev)
+{
+ struct regmap *mii_regmap, *phy_regmap = NULL;
+ void __iomem *regs, *phy_regs;
+ struct mscc_miim_dev *miim;
+ struct resource *res;
+ struct mii_bus *bus;
+ int ret;
- dev = bus->priv;
- dev->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
- if (IS_ERR(dev->regs)) {
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(regs)) {
dev_err(&pdev->dev, "Unable to map MIIM registers\n");
- return PTR_ERR(dev->regs);
+ return PTR_ERR(regs);
+ }
+
+ mii_regmap = devm_regmap_init_mmio(&pdev->dev, regs,
+ &mscc_miim_regmap_config);
+
+ if (IS_ERR(mii_regmap)) {
+ dev_err(&pdev->dev, "Unable to create MIIM regmap\n");
+ return PTR_ERR(mii_regmap);
}
/* This resource is optional */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
- dev->phy_regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dev->phy_regs)) {
+ phy_regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(phy_regs)) {
dev_err(&pdev->dev, "Unable to map internal phy registers\n");
- return PTR_ERR(dev->phy_regs);
+ return PTR_ERR(phy_regs);
}
+
+ phy_regmap = devm_regmap_init_mmio(&pdev->dev, phy_regs,
+ &mscc_miim_regmap_config);
+ if (IS_ERR(phy_regmap)) {
+ dev_err(&pdev->dev, "Unable to create phy register regmap\n");
+ return PTR_ERR(phy_regmap);
+ }
+ }
+
+ ret = mscc_miim_setup(&pdev->dev, &bus, "mscc_miim", mii_regmap, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to setup the MDIO bus\n");
+ return ret;
}
+ miim = bus->priv;
+ miim->phy_regs = phy_regmap;
+ miim->phy_reset_offset = 0;
+
ret = of_mdiobus_register(bus, pdev->dev.of_node);
if (ret < 0) {
dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index ccecba908ded..ab8cd5551020 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -721,7 +721,7 @@ restart:
__netpoll_cleanup(&nt->np);
spin_lock_irqsave(&target_list_lock, flags);
- dev_put(nt->np.dev);
+ dev_put_track(nt->np.dev, &nt->np.dev_tracker);
nt->np.dev = NULL;
nt->enabled = false;
stopped = true;
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 54345c096a16..08d7b465a0de 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -1622,7 +1622,7 @@ err_params_unregister:
devlink_params_unregister(devlink, nsim_devlink_params,
ARRAY_SIZE(nsim_devlink_params));
err_dl_unregister:
- devlink_resources_unregister(devlink, NULL);
+ devlink_resources_unregister(devlink);
err_vfc_free:
kfree(nsim_dev->vfconfigs);
err_devlink_free:
@@ -1668,7 +1668,7 @@ void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev)
nsim_dev_debugfs_exit(nsim_dev);
devlink_params_unregister(devlink, nsim_devlink_params,
ARRAY_SIZE(nsim_devlink_params));
- devlink_resources_unregister(devlink, NULL);
+ devlink_resources_unregister(devlink);
kfree(nsim_dev->vfconfigs);
devlink_free(devlink);
dev_set_drvdata(&nsim_bus_dev->dev, NULL);
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index a6a713b31aad..ffd9f84b6644 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -65,7 +65,9 @@ static int nsim_set_coalesce(struct net_device *dev,
}
static void nsim_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct netdevsim *ns = netdev_priv(dev);
@@ -73,7 +75,9 @@ static void nsim_get_ringparam(struct net_device *dev,
}
static int nsim_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct netdevsim *ns = netdev_priv(dev);
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index af36cd647bf5..fd3445374955 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -22,6 +22,11 @@
#define IF_MODE_SPEED_MSK GENMASK(3, 2)
#define IF_MODE_HALF_DUPLEX BIT(4)
+struct lynx_pcs {
+ struct phylink_pcs pcs;
+ struct mdio_device *mdio;
+};
+
enum sgmii_speed {
SGMII_SPEED_10 = 0,
SGMII_SPEED_100 = 1,
@@ -30,6 +35,15 @@ enum sgmii_speed {
};
#define phylink_pcs_to_lynx(pl_pcs) container_of((pl_pcs), struct lynx_pcs, pcs)
+#define lynx_to_phylink_pcs(lynx) (&(lynx)->pcs)
+
+struct mdio_device *lynx_get_mdio_device(struct phylink_pcs *pcs)
+{
+ struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs);
+
+ return lynx->mdio;
+}
+EXPORT_SYMBOL(lynx_get_mdio_device);
static void lynx_pcs_get_state_usxgmii(struct mdio_device *pcs,
struct phylink_link_state *state)
@@ -329,25 +343,27 @@ static const struct phylink_pcs_ops lynx_pcs_phylink_ops = {
.pcs_link_up = lynx_pcs_link_up,
};
-struct lynx_pcs *lynx_pcs_create(struct mdio_device *mdio)
+struct phylink_pcs *lynx_pcs_create(struct mdio_device *mdio)
{
- struct lynx_pcs *lynx_pcs;
+ struct lynx_pcs *lynx;
- lynx_pcs = kzalloc(sizeof(*lynx_pcs), GFP_KERNEL);
- if (!lynx_pcs)
+ lynx = kzalloc(sizeof(*lynx), GFP_KERNEL);
+ if (!lynx)
return NULL;
- lynx_pcs->mdio = mdio;
- lynx_pcs->pcs.ops = &lynx_pcs_phylink_ops;
- lynx_pcs->pcs.poll = true;
+ lynx->mdio = mdio;
+ lynx->pcs.ops = &lynx_pcs_phylink_ops;
+ lynx->pcs.poll = true;
- return lynx_pcs;
+ return lynx_to_phylink_pcs(lynx);
}
EXPORT_SYMBOL(lynx_pcs_create);
-void lynx_pcs_destroy(struct lynx_pcs *pcs)
+void lynx_pcs_destroy(struct phylink_pcs *pcs)
{
- kfree(pcs);
+ struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs);
+
+ kfree(lynx);
}
EXPORT_SYMBOL(lynx_pcs_destroy);
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index dae95d9a07e8..5b6c0d120e09 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -421,7 +421,7 @@ static int at803x_set_wol(struct phy_device *phydev,
const u8 *mac;
int ret, irq_enabled;
unsigned int i;
- const unsigned int offsets[] = {
+ static const unsigned int offsets[] = {
AT803X_LOC_MAC_ADDR_32_47_OFFSET,
AT803X_LOC_MAC_ADDR_16_31_OFFSET,
AT803X_LOC_MAC_ADDR_0_15_OFFSET,
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index bb5104ae4610..3c683e0e40e9 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -854,6 +854,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54616S",
/* PHY_GBIT_FEATURES */
+ .soft_reset = genphy_soft_reset,
.config_init = bcm54xx_config_init,
.config_aneg = bcm54616s_config_aneg,
.config_intr = bcm_phy_config_intr,
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 705c16675b80..c2d1a85ec559 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1235,9 +1235,6 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
- if (cfg.flags) /* reserved for future extensions */
- return -EINVAL;
-
if (cfg.tx_type < 0 || cfg.tx_type > HWTSTAMP_TX_ONESTEP_SYNC)
return -ERANGE;
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index 7113925606f7..b4ff9c5073a3 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -16,6 +16,7 @@
#include <dt-bindings/net/ti-dp83869.h>
#define DP83869_PHY_ID 0x2000a0f1
+#define DP83561_PHY_ID 0x2000a1a4
#define DP83869_DEVADDR 0x1f
#define MII_DP83869_PHYCTRL 0x10
@@ -878,34 +879,35 @@ static int dp83869_phy_reset(struct phy_device *phydev)
return dp83869_config_init(phydev);
}
-static struct phy_driver dp83869_driver[] = {
- {
- PHY_ID_MATCH_MODEL(DP83869_PHY_ID),
- .name = "TI DP83869",
-
- .probe = dp83869_probe,
- .config_init = dp83869_config_init,
- .soft_reset = dp83869_phy_reset,
-
- /* IRQ related */
- .config_intr = dp83869_config_intr,
- .handle_interrupt = dp83869_handle_interrupt,
- .read_status = dp83869_read_status,
- .get_tunable = dp83869_get_tunable,
- .set_tunable = dp83869_set_tunable,
+#define DP83869_PHY_DRIVER(_id, _name) \
+{ \
+ PHY_ID_MATCH_MODEL(_id), \
+ .name = (_name), \
+ .probe = dp83869_probe, \
+ .config_init = dp83869_config_init, \
+ .soft_reset = dp83869_phy_reset, \
+ .config_intr = dp83869_config_intr, \
+ .handle_interrupt = dp83869_handle_interrupt, \
+ .read_status = dp83869_read_status, \
+ .get_tunable = dp83869_get_tunable, \
+ .set_tunable = dp83869_set_tunable, \
+ .get_wol = dp83869_get_wol, \
+ .set_wol = dp83869_set_wol, \
+ .suspend = genphy_suspend, \
+ .resume = genphy_resume, \
+}
- .get_wol = dp83869_get_wol,
- .set_wol = dp83869_set_wol,
+static struct phy_driver dp83869_driver[] = {
+ DP83869_PHY_DRIVER(DP83869_PHY_ID, "TI DP83869"),
+ DP83869_PHY_DRIVER(DP83561_PHY_ID, "TI DP83561-SP"),
- .suspend = genphy_suspend,
- .resume = genphy_resume,
- },
};
module_phy_driver(dp83869_driver);
static struct mdio_device_id __maybe_unused dp83869_tbl[] = {
{ PHY_ID_MATCH_MODEL(DP83869_PHY_ID) },
+ { PHY_ID_MATCH_MODEL(DP83561_PHY_ID) },
{ }
};
MODULE_DEVICE_TABLE(mdio, dp83869_tbl);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 4fcfca4e1702..fa71fb7a66b5 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -189,6 +189,8 @@
#define MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_SGMII 0x4
#define MII_88E1510_GEN_CTRL_REG_1_RESET 0x8000 /* Soft reset */
+#define MII_88E1510_MSCR_2 0x15
+
#define MII_VCT5_TX_RX_MDI0_COUPLING 0x10
#define MII_VCT5_TX_RX_MDI1_COUPLING 0x11
#define MII_VCT5_TX_RX_MDI2_COUPLING 0x12
@@ -1225,28 +1227,28 @@ static int m88e1118_config_aneg(struct phy_device *phydev)
static int m88e1118_config_init(struct phy_device *phydev)
{
+ u16 leds;
int err;
- /* Change address */
- err = marvell_set_page(phydev, MII_MARVELL_MSCR_PAGE);
- if (err < 0)
- return err;
-
/* Enable 1000 Mbit */
- err = phy_write(phydev, 0x15, 0x1070);
+ err = phy_write_paged(phydev, MII_MARVELL_MSCR_PAGE,
+ MII_88E1121_PHY_MSCR_REG, 0x1070);
if (err < 0)
return err;
- /* Change address */
- err = marvell_set_page(phydev, MII_MARVELL_LED_PAGE);
- if (err < 0)
- return err;
+ if (phy_interface_is_rgmii(phydev)) {
+ err = m88e1121_config_aneg_rgmii_delays(phydev);
+ if (err < 0)
+ return err;
+ }
/* Adjust LED Control */
if (phydev->dev_flags & MARVELL_PHY_M1118_DNS323_LEDS)
- err = phy_write(phydev, 0x10, 0x1100);
+ leds = 0x1100;
else
- err = phy_write(phydev, 0x10, 0x021e);
+ leds = 0x021e;
+
+ err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE, 0x10, leds);
if (err < 0)
return err;
@@ -1254,7 +1256,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
if (err < 0)
return err;
- /* Reset address */
+ /* Reset page register */
err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
return err;
@@ -1932,6 +1934,58 @@ static void marvell_get_stats(struct phy_device *phydev,
data[i] = marvell_get_stat(phydev, i);
}
+static int m88e1510_loopback(struct phy_device *phydev, bool enable)
+{
+ int err;
+
+ if (enable) {
+ u16 bmcr_ctl = 0, mscr2_ctl = 0;
+
+ if (phydev->speed == SPEED_1000)
+ bmcr_ctl = BMCR_SPEED1000;
+ else if (phydev->speed == SPEED_100)
+ bmcr_ctl = BMCR_SPEED100;
+
+ if (phydev->duplex == DUPLEX_FULL)
+ bmcr_ctl |= BMCR_FULLDPLX;
+
+ err = phy_write(phydev, MII_BMCR, bmcr_ctl);
+ if (err < 0)
+ return err;
+
+ if (phydev->speed == SPEED_1000)
+ mscr2_ctl = BMCR_SPEED1000;
+ else if (phydev->speed == SPEED_100)
+ mscr2_ctl = BMCR_SPEED100;
+
+ err = phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
+ MII_88E1510_MSCR_2, BMCR_SPEED1000 |
+ BMCR_SPEED100, mscr2_ctl);
+ if (err < 0)
+ return err;
+
+ /* Need soft reset to have speed configuration takes effect */
+ err = genphy_soft_reset(phydev);
+ if (err < 0)
+ return err;
+
+ /* FIXME: Based on trial and error test, it seem 1G need to have
+ * delay between soft reset and loopback enablement.
+ */
+ if (phydev->speed == SPEED_1000)
+ msleep(1000);
+
+ return phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
+ BMCR_LOOPBACK);
+ } else {
+ err = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, 0);
+ if (err < 0)
+ return err;
+
+ return phy_config_aneg(phydev);
+ }
+}
+
static int marvell_vct5_wait_complete(struct phy_device *phydev)
{
int i;
@@ -3078,7 +3132,7 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
- .set_loopback = genphy_loopback,
+ .set_loopback = m88e1510_loopback,
.get_tunable = m88e1011_get_tunable,
.set_tunable = m88e1011_set_tunable,
.cable_test_start = marvell_vct7_cable_test_start,
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index c198722e4871..58d602985877 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -176,9 +176,11 @@ static void mdiobus_release(struct device *d)
{
struct mii_bus *bus = to_mii_bus(d);
- BUG_ON(bus->state != MDIOBUS_RELEASED &&
- /* for compatibility with error handling in drivers */
- bus->state != MDIOBUS_ALLOCATED);
+ WARN(bus->state != MDIOBUS_RELEASED &&
+ /* for compatibility with error handling in drivers */
+ bus->state != MDIOBUS_ALLOCATED,
+ "%s: not in RELEASED or ALLOCATED state\n",
+ bus->id);
kfree(bus);
}
@@ -532,8 +534,9 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
bus->parent->of_node->fwnode.flags |=
FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD;
- BUG_ON(bus->state != MDIOBUS_ALLOCATED &&
- bus->state != MDIOBUS_UNREGISTERED);
+ WARN(bus->state != MDIOBUS_ALLOCATED &&
+ bus->state != MDIOBUS_UNREGISTERED,
+ "%s: not in ALLOCATED or UNREGISTERED state\n", bus->id);
bus->owner = owner;
bus->dev.parent = bus->parent;
@@ -594,7 +597,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
mdiobus_setup_mdiodev_from_board_info(bus, mdiobus_create_device);
bus->state = MDIOBUS_REGISTERED;
- pr_info("%s: probed\n", bus->name);
+ dev_dbg(&bus->dev, "probed\n");
return 0;
error:
@@ -661,7 +664,8 @@ void mdiobus_free(struct mii_bus *bus)
return;
}
- BUG_ON(bus->state != MDIOBUS_UNREGISTERED);
+ WARN(bus->state != MDIOBUS_UNREGISTERED,
+ "%s: not in UNREGISTERED state\n", bus->id);
bus->state = MDIOBUS_RELEASED;
put_device(&bus->dev);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 44a24b99c894..a7ebcdab415b 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -66,6 +66,19 @@
#define KSZ8081_LMD_SHORT_INDICATOR BIT(12)
#define KSZ8081_LMD_DELTA_TIME_MASK GENMASK(8, 0)
+/* Lan8814 general Interrupt control/status reg in GPHY specific block. */
+#define LAN8814_INTC 0x18
+#define LAN8814_INTS 0x1B
+
+#define LAN8814_INT_LINK_DOWN BIT(2)
+#define LAN8814_INT_LINK_UP BIT(0)
+#define LAN8814_INT_LINK (LAN8814_INT_LINK_UP |\
+ LAN8814_INT_LINK_DOWN)
+
+#define LAN8814_INTR_CTRL_REG 0x34
+#define LAN8814_INTR_CTRL_REG_POLARITY BIT(1)
+#define LAN8814_INTR_CTRL_REG_INTR_ENABLE BIT(0)
+
/* PHY Control 1 */
#define MII_KSZPHY_CTRL_1 0x1e
#define KSZ8081_CTRL1_MDIX_STAT BIT(4)
@@ -1565,6 +1578,14 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
#define LAN_EXT_PAGE_ACCESS_ADDRESS_DATA 0x17
#define LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC 0x4000
+#define LAN8814_QSGMII_SOFT_RESET 0x43
+#define LAN8814_QSGMII_SOFT_RESET_BIT BIT(0)
+#define LAN8814_QSGMII_PCS1G_ANEG_CONFIG 0x13
+#define LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA BIT(3)
+#define LAN8814_ALIGN_SWAP 0x4a
+#define LAN8814_ALIGN_TX_A_B_SWAP 0x1
+#define LAN8814_ALIGN_TX_A_B_SWAP_MASK GENMASK(2, 0)
+
#define LAN8804_ALIGN_SWAP 0x4a
#define LAN8804_ALIGN_TX_A_B_SWAP 0x1
#define LAN8804_ALIGN_TX_A_B_SWAP_MASK GENMASK(2, 0)
@@ -1601,6 +1622,29 @@ static int lanphy_write_page_reg(struct phy_device *phydev, int page, u16 addr,
return 0;
}
+static int lan8814_config_init(struct phy_device *phydev)
+{
+ int val;
+
+ /* Reset the PHY */
+ val = lanphy_read_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET);
+ val |= LAN8814_QSGMII_SOFT_RESET_BIT;
+ lanphy_write_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET, val);
+
+ /* Disable ANEG with QSGMII PCS Host side */
+ val = lanphy_read_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG);
+ val &= ~LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA;
+ lanphy_write_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG, val);
+
+ /* MDI-X setting for swap A,B transmit */
+ val = lanphy_read_page_reg(phydev, 2, LAN8814_ALIGN_SWAP);
+ val &= ~LAN8814_ALIGN_TX_A_B_SWAP_MASK;
+ val |= LAN8814_ALIGN_TX_A_B_SWAP;
+ lanphy_write_page_reg(phydev, 2, LAN8814_ALIGN_SWAP, val);
+
+ return 0;
+}
+
static int lan8804_config_init(struct phy_device *phydev)
{
int val;
@@ -1620,6 +1664,58 @@ static int lan8804_config_init(struct phy_device *phydev)
return 0;
}
+static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, LAN8814_INTS);
+ if (irq_status < 0)
+ return IRQ_NONE;
+
+ if (!(irq_status & LAN8814_INT_LINK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
+static int lan8814_ack_interrupt(struct phy_device *phydev)
+{
+ /* bit[12..0] int status, which is a read and clear register. */
+ int rc;
+
+ rc = phy_read(phydev, LAN8814_INTS);
+
+ return (rc < 0) ? rc : 0;
+}
+
+static int lan8814_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ lanphy_write_page_reg(phydev, 4, LAN8814_INTR_CTRL_REG,
+ LAN8814_INTR_CTRL_REG_POLARITY |
+ LAN8814_INTR_CTRL_REG_INTR_ENABLE);
+
+ /* enable / disable interrupts */
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = lan8814_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ err = phy_write(phydev, LAN8814_INTC, LAN8814_INT_LINK);
+ } else {
+ err = phy_write(phydev, LAN8814_INTC, 0);
+ if (err)
+ return err;
+
+ err = lan8814_ack_interrupt(phydev);
+ }
+
+ return err;
+}
+
static struct phy_driver ksphy_driver[] = {
{
.phy_id = PHY_ID_KS8737,
@@ -1630,8 +1726,8 @@ static struct phy_driver ksphy_driver[] = {
.config_init = kszphy_config_init,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8021,
.phy_id_mask = 0x00ffffff,
@@ -1645,8 +1741,8 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8031,
.phy_id_mask = 0x00ffffff,
@@ -1660,8 +1756,8 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8041,
.phy_id_mask = MICREL_PHY_ID_MASK,
@@ -1692,8 +1788,8 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
}, {
.name = "Micrel KSZ8051",
/* PHY_BASIC_FEATURES */
@@ -1706,8 +1802,8 @@ static struct phy_driver ksphy_driver[] = {
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
.match_phy_device = ksz8051_match_phy_device,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8001,
.name = "Micrel KSZ8001 or KS8721",
@@ -1721,8 +1817,8 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8081,
.name = "Micrel KSZ8081 or KSZ8091",
@@ -1752,8 +1848,8 @@ static struct phy_driver ksphy_driver[] = {
.config_init = ksz8061_config_init,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ9021,
.phy_id_mask = 0x000ffffe,
@@ -1768,8 +1864,8 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
.read_mmd = genphy_read_mmd_unsupported,
.write_mmd = genphy_write_mmd_unsupported,
}, {
@@ -1787,12 +1883,13 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
+ .suspend = kszphy_suspend,
.resume = kszphy_resume,
}, {
.phy_id = PHY_ID_LAN8814,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip INDY Gigabit Quad PHY",
+ .config_init = lan8814_config_init,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.soft_reset = genphy_soft_reset,
@@ -1802,6 +1899,8 @@ static struct phy_driver ksphy_driver[] = {
.get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = kszphy_resume,
+ .config_intr = lan8814_config_intr,
+ .handle_interrupt = lan8814_handle_interrupt,
}, {
.phy_id = PHY_ID_LAN8804,
.phy_id_mask = MICREL_PHY_ID_MASK,
@@ -1829,7 +1928,7 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
+ .suspend = kszphy_suspend,
.resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8873MLL,
diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
index edb951695b13..34f829845d06 100644
--- a/drivers/net/phy/mscc/mscc_ptp.c
+++ b/drivers/net/phy/mscc/mscc_ptp.c
@@ -1057,9 +1057,6 @@ static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
- if (cfg.flags)
- return -EINVAL;
-
switch (cfg.tx_type) {
case HWTSTAMP_TX_ONESTEP_SYNC:
one_step = true;
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index 91a327f67a42..06fdbae509a7 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -97,6 +97,11 @@
#define VEND1_TX_IPG_LENGTH 0xAFD1
#define COUNTER_EN BIT(15)
+#define VEND1_PTP_CONFIG 0x1102
+#define EXT_TRG_EDGE BIT(1)
+#define PPS_OUT_POL BIT(2)
+#define PPS_OUT_EN BIT(3)
+
#define VEND1_LTC_LOAD_CTRL 0x1105
#define READ_LTC BIT(2)
#define LOAD_LTC BIT(0)
@@ -132,6 +137,13 @@
#define VEND1_EGR_RING_DATA_3 0x1151
#define VEND1_EGR_RING_CTRL 0x1154
+#define VEND1_EXT_TRG_TS_DATA_0 0x1121
+#define VEND1_EXT_TRG_TS_DATA_1 0x1122
+#define VEND1_EXT_TRG_TS_DATA_2 0x1123
+#define VEND1_EXT_TRG_TS_DATA_3 0x1124
+#define VEND1_EXT_TRG_TS_DATA_4 0x1125
+#define VEND1_EXT_TRG_TS_CTRL 0x1126
+
#define RING_DATA_0_DOMAIN_NUMBER GENMASK(7, 0)
#define RING_DATA_0_MSG_TYPE GENMASK(11, 8)
#define RING_DATA_0_SEC_4_2 GENMASK(14, 2)
@@ -162,6 +174,17 @@
#define VEND1_RX_PIPE_DLY_NS 0x114B
#define VEND1_RX_PIPEDLY_SUBNS 0x114C
+#define VEND1_GPIO_FUNC_CONFIG_BASE 0x2C40
+#define GPIO_FUNC_EN BIT(15)
+#define GPIO_FUNC_PTP BIT(6)
+#define GPIO_SIGNAL_PTP_TRIGGER 0x01
+#define GPIO_SIGNAL_PPS_OUT 0x12
+#define GPIO_DISABLE 0
+#define GPIO_PPS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
+ GPIO_SIGNAL_PPS_OUT)
+#define GPIO_EXTTS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
+ GPIO_SIGNAL_PTP_TRIGGER)
+
#define RGMII_PERIOD_PS 8000U
#define PS_PER_DEGREE div_u64(RGMII_PERIOD_PS, 360)
#define MIN_ID_PS 1644U
@@ -199,6 +222,9 @@ struct nxp_c45_phy {
int hwts_rx;
u32 tx_delay;
u32 rx_delay;
+ struct timespec64 extts_ts;
+ int extts_index;
+ bool extts;
};
struct nxp_c45_phy_stats {
@@ -339,6 +365,21 @@ static bool nxp_c45_match_ts(struct ptp_header *header,
header->domain_number == hwts->domain_number;
}
+static void nxp_c45_get_extts(struct nxp_c45_phy *priv,
+ struct timespec64 *extts)
+{
+ extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
+ VEND1_EXT_TRG_TS_DATA_0);
+ extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
+ VEND1_EXT_TRG_TS_DATA_1) << 16;
+ extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
+ VEND1_EXT_TRG_TS_DATA_2);
+ extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
+ VEND1_EXT_TRG_TS_DATA_3) << 16;
+ phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EXT_TRG_TS_CTRL,
+ RING_DONE);
+}
+
static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
struct nxp_c45_hwts *hwts)
{
@@ -409,6 +450,7 @@ static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
bool poll_txts = nxp_c45_poll_txts(priv->phydev);
struct skb_shared_hwtstamps *shhwtstamps_rx;
+ struct ptp_clock_event event;
struct nxp_c45_hwts hwts;
bool reschedule = false;
struct timespec64 ts;
@@ -439,9 +481,181 @@ static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
netif_rx_ni(skb);
}
+ if (priv->extts) {
+ nxp_c45_get_extts(priv, &ts);
+ if (timespec64_compare(&ts, &priv->extts_ts) != 0) {
+ priv->extts_ts = ts;
+ event.index = priv->extts_index;
+ event.type = PTP_CLOCK_EXTTS;
+ event.timestamp = ns_to_ktime(timespec64_to_ns(&ts));
+ ptp_clock_event(priv->ptp_clock, &event);
+ }
+ reschedule = true;
+ }
+
return reschedule ? 1 : -1;
}
+static void nxp_c45_gpio_config(struct nxp_c45_phy *priv,
+ int pin, u16 pin_cfg)
+{
+ struct phy_device *phydev = priv->phydev;
+
+ phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GPIO_FUNC_CONFIG_BASE + pin, pin_cfg);
+}
+
+static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
+ struct ptp_perout_request *perout, int on)
+{
+ struct phy_device *phydev = priv->phydev;
+ int pin;
+
+ if (perout->flags & ~PTP_PEROUT_PHASE)
+ return -EOPNOTSUPP;
+
+ pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
+ if (pin < 0)
+ return pin;
+
+ if (!on) {
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG,
+ PPS_OUT_EN);
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG,
+ PPS_OUT_POL);
+
+ nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
+
+ return 0;
+ }
+
+ /* The PPS signal is fixed to 1 second and is always generated when the
+ * seconds counter is incremented. The start time is not configurable.
+ * If the clock is adjusted, the PPS signal is automatically readjusted.
+ */
+ if (perout->period.sec != 1 || perout->period.nsec != 0) {
+ phydev_warn(phydev, "The period can be set only to 1 second.");
+ return -EINVAL;
+ }
+
+ if (!(perout->flags & PTP_PEROUT_PHASE)) {
+ if (perout->start.sec != 0 || perout->start.nsec != 0) {
+ phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds.");
+ return -EINVAL;
+ }
+ } else {
+ if (perout->phase.nsec != 0 &&
+ perout->phase.nsec != (NSEC_PER_SEC >> 1)) {
+ phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds.");
+ return -EINVAL;
+ }
+
+ if (perout->phase.nsec == 0)
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PTP_CONFIG, PPS_OUT_POL);
+ else
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PTP_CONFIG, PPS_OUT_POL);
+ }
+
+ nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG);
+
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG, PPS_OUT_EN);
+
+ return 0;
+}
+
+static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
+ struct ptp_extts_request *extts, int on)
+{
+ int pin;
+
+ if (extts->flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ /* Sampling on both edges is not supported */
+ if ((extts->flags & PTP_RISING_EDGE) &&
+ (extts->flags & PTP_FALLING_EDGE))
+ return -EOPNOTSUPP;
+
+ pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index);
+ if (pin < 0)
+ return pin;
+
+ if (!on) {
+ nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
+ priv->extts = false;
+
+ return 0;
+ }
+
+ if (extts->flags & PTP_RISING_EDGE)
+ phy_clear_bits_mmd(priv->phydev, MDIO_MMD_VEND1,
+ VEND1_PTP_CONFIG, EXT_TRG_EDGE);
+
+ if (extts->flags & PTP_FALLING_EDGE)
+ phy_set_bits_mmd(priv->phydev, MDIO_MMD_VEND1,
+ VEND1_PTP_CONFIG, EXT_TRG_EDGE);
+
+ nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG);
+ priv->extts = true;
+ priv->extts_index = extts->index;
+ ptp_schedule_worker(priv->ptp_clock, 0);
+
+ return 0;
+}
+
+static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *req, int on)
+{
+ struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
+
+ switch (req->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return nxp_c45_extts_enable(priv, &req->extts, on);
+ case PTP_CLK_REQ_PEROUT:
+ return nxp_c45_perout_enable(priv, &req->perout, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct ptp_pin_desc nxp_c45_ptp_pins[] = {
+ { "nxp_c45_gpio0", 0, PTP_PF_NONE},
+ { "nxp_c45_gpio1", 1, PTP_PF_NONE},
+ { "nxp_c45_gpio2", 2, PTP_PF_NONE},
+ { "nxp_c45_gpio3", 3, PTP_PF_NONE},
+ { "nxp_c45_gpio4", 4, PTP_PF_NONE},
+ { "nxp_c45_gpio5", 5, PTP_PF_NONE},
+ { "nxp_c45_gpio6", 6, PTP_PF_NONE},
+ { "nxp_c45_gpio7", 7, PTP_PF_NONE},
+ { "nxp_c45_gpio8", 8, PTP_PF_NONE},
+ { "nxp_c45_gpio9", 9, PTP_PF_NONE},
+ { "nxp_c45_gpio10", 10, PTP_PF_NONE},
+ { "nxp_c45_gpio11", 11, PTP_PF_NONE},
+};
+
+static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins))
+ return -EINVAL;
+
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ case PTP_PF_EXTTS:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
{
priv->caps = (struct ptp_clock_info) {
@@ -452,7 +666,13 @@ static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
.adjtime = nxp_c45_ptp_adjtime,
.gettimex64 = nxp_c45_ptp_gettimex64,
.settime64 = nxp_c45_ptp_settime64,
+ .enable = nxp_c45_ptp_enable,
+ .verify = nxp_c45_ptp_verify_pin,
.do_aux_work = nxp_c45_do_aux_work,
+ .pin_config = nxp_c45_ptp_pins,
+ .n_pins = ARRAY_SIZE(nxp_c45_ptp_pins),
+ .n_ext_ts = 1,
+ .n_per_out = 1,
};
priv->ptp_clock = ptp_clock_register(&priv->caps,
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 2870c33b8975..271fc01f7f7f 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -162,11 +162,11 @@ static const struct phy_setting settings[] = {
PHY_SETTING( 2500, FULL, 2500baseT_Full ),
PHY_SETTING( 2500, FULL, 2500baseX_Full ),
/* 1G */
- PHY_SETTING( 1000, FULL, 1000baseKX_Full ),
PHY_SETTING( 1000, FULL, 1000baseT_Full ),
PHY_SETTING( 1000, HALF, 1000baseT_Half ),
PHY_SETTING( 1000, FULL, 1000baseT1_Full ),
PHY_SETTING( 1000, FULL, 1000baseX_Full ),
+ PHY_SETTING( 1000, FULL, 1000baseKX_Full ),
/* 100M */
PHY_SETTING( 100, FULL, 100baseT_Full ),
PHY_SETTING( 100, FULL, 100baseT1_Full ),
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 74d8e1dc125f..ce0bb5951b81 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1746,6 +1746,9 @@ void phy_detach(struct phy_device *phydev)
phy_driver_is_genphy_10g(phydev))
device_release_driver(&phydev->mdio.dev);
+ /* Assert the reset signal */
+ phy_device_reset(phydev, 1);
+
/*
* The phydev might go away on the put_device() below, so avoid
* a use-after-free bug by reading the underlying bus first.
@@ -1757,9 +1760,6 @@ void phy_detach(struct phy_device *phydev)
ndev_owner = dev->dev.parent->driver->owner;
if (ndev_owner != bus->owner)
module_put(bus->owner);
-
- /* Assert the reset signal */
- phy_device_reset(phydev, 1);
}
EXPORT_SYMBOL(phy_detach);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index ea82ea5660e7..420201858564 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -166,6 +166,307 @@ static const char *phylink_an_mode_str(unsigned int mode)
return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown";
}
+static void phylink_caps_to_linkmodes(unsigned long *linkmodes,
+ unsigned long caps)
+{
+ if (caps & MAC_SYM_PAUSE)
+ __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes);
+
+ if (caps & MAC_ASYM_PAUSE)
+ __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes);
+
+ if (caps & MAC_10HD)
+ __set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, linkmodes);
+
+ if (caps & MAC_10FD)
+ __set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, linkmodes);
+
+ if (caps & MAC_100HD) {
+ __set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100baseFX_Half_BIT, linkmodes);
+ }
+
+ if (caps & MAC_100FD) {
+ __set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100baseFX_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_1000HD)
+ __set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, linkmodes);
+
+ if (caps & MAC_1000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_1000baseT1_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_2500FD) {
+ __set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_5000FD)
+ __set_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, linkmodes);
+
+ if (caps & MAC_10000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_25000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_40000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_50000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_56000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_100000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseDR_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_200000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_400000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, linkmodes);
+ }
+}
+
+/**
+ * phylink_get_linkmodes() - get acceptable link modes
+ * @linkmodes: ethtool linkmode mask (must be already initialised)
+ * @interface: phy interface mode defined by &typedef phy_interface_t
+ * @mac_capabilities: bitmask of MAC capabilities
+ *
+ * Set all possible pause, speed and duplex linkmodes in @linkmodes that
+ * are supported by the @interface mode and @mac_capabilities. @linkmodes
+ * must have been initialised previously.
+ */
+void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface,
+ unsigned long mac_capabilities)
+{
+ unsigned long caps = MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_USXGMII:
+ caps |= MAC_10000FD | MAC_5000FD | MAC_2500FD;
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_GMII:
+ caps |= MAC_1000HD | MAC_1000FD;
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_REVRMII:
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_SMII:
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_MII:
+ caps |= MAC_10HD | MAC_10FD;
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_100BASEX:
+ caps |= MAC_100HD | MAC_100FD;
+ break;
+
+ case PHY_INTERFACE_MODE_TBI:
+ case PHY_INTERFACE_MODE_MOCA:
+ case PHY_INTERFACE_MODE_RTBI:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ caps |= MAC_1000HD;
+ fallthrough;
+ case PHY_INTERFACE_MODE_TRGMII:
+ caps |= MAC_1000FD;
+ break;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ caps |= MAC_2500FD;
+ break;
+
+ case PHY_INTERFACE_MODE_5GBASER:
+ caps |= MAC_5000FD;
+ break;
+
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_10GKR:
+ caps |= MAC_10000FD;
+ break;
+
+ case PHY_INTERFACE_MODE_25GBASER:
+ caps |= MAC_25000FD;
+ break;
+
+ case PHY_INTERFACE_MODE_XLGMII:
+ caps |= MAC_40000FD;
+ break;
+
+ case PHY_INTERFACE_MODE_INTERNAL:
+ caps |= ~0;
+ break;
+
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_MAX:
+ break;
+ }
+
+ phylink_caps_to_linkmodes(linkmodes, caps & mac_capabilities);
+}
+EXPORT_SYMBOL_GPL(phylink_get_linkmodes);
+
+/**
+ * phylink_generic_validate() - generic validate() callback implementation
+ * @config: a pointer to a &struct phylink_config.
+ * @supported: ethtool bitmask for supported link modes.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * Generic implementation of the validate() callback that MAC drivers can
+ * use when they pass the range of supported interfaces and MAC capabilities.
+ * This makes use of phylink_get_linkmodes().
+ */
+void phylink_generic_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
+ phylink_get_linkmodes(mask, state->interface, config->mac_capabilities);
+
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
+}
+EXPORT_SYMBOL_GPL(phylink_generic_validate);
+
+static int phylink_validate_mac_and_pcs(struct phylink *pl,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct phylink_pcs *pcs;
+ int ret;
+
+ /* Get the PCS for this interface mode */
+ if (pl->mac_ops->mac_select_pcs) {
+ pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface);
+ if (IS_ERR(pcs))
+ return PTR_ERR(pcs);
+ } else {
+ pcs = pl->pcs;
+ }
+
+ if (pcs) {
+ /* The PCS, if present, must be setup before phylink_create()
+ * has been called. If the ops is not initialised, print an
+ * error and backtrace rather than oopsing the kernel.
+ */
+ if (!pcs->ops) {
+ phylink_err(pl, "interface %s: uninitialised PCS\n",
+ phy_modes(state->interface));
+ dump_stack();
+ return -EINVAL;
+ }
+
+ /* Validate the link parameters with the PCS */
+ if (pcs->ops->pcs_validate) {
+ ret = pcs->ops->pcs_validate(pcs, supported, state);
+ if (ret < 0 || phylink_is_empty_linkmode(supported))
+ return -EINVAL;
+
+ /* Ensure the advertising mask is a subset of the
+ * supported mask.
+ */
+ linkmode_and(state->advertising, state->advertising,
+ supported);
+ }
+ }
+
+ /* Then validate the link parameters with the MAC */
+ pl->mac_ops->validate(pl->config, supported, state);
+
+ return phylink_is_empty_linkmode(supported) ? -EINVAL : 0;
+}
+
static int phylink_validate_any(struct phylink *pl, unsigned long *supported,
struct phylink_link_state *state)
{
@@ -181,9 +482,10 @@ static int phylink_validate_any(struct phylink *pl, unsigned long *supported,
t = *state;
t.interface = intf;
- pl->mac_ops->validate(pl->config, s, &t);
- linkmode_or(all_s, all_s, s);
- linkmode_or(all_adv, all_adv, t.advertising);
+ if (!phylink_validate_mac_and_pcs(pl, s, &t)) {
+ linkmode_or(all_s, all_s, s);
+ linkmode_or(all_adv, all_adv, t.advertising);
+ }
}
}
@@ -205,9 +507,7 @@ static int phylink_validate(struct phylink *pl, unsigned long *supported,
return -EINVAL;
}
- pl->mac_ops->validate(pl->config, supported, state);
-
- return phylink_is_empty_linkmode(supported) ? -EINVAL : 0;
+ return phylink_validate_mac_and_pcs(pl, supported, state);
}
static int phylink_parse_fixedlink(struct phylink *pl,
@@ -489,7 +789,7 @@ static void phylink_mac_pcs_an_restart(struct phylink *pl)
phylink_autoneg_inband(pl->cur_link_an_mode)) {
if (pl->pcs_ops)
pl->pcs_ops->pcs_an_restart(pl->pcs);
- else
+ else if (pl->config->legacy_pre_march2020)
pl->mac_ops->mac_an_restart(pl->config);
}
}
@@ -497,10 +797,21 @@ static void phylink_mac_pcs_an_restart(struct phylink *pl)
static void phylink_major_config(struct phylink *pl, bool restart,
const struct phylink_link_state *state)
{
+ struct phylink_pcs *pcs = NULL;
int err;
phylink_dbg(pl, "major config %s\n", phy_modes(state->interface));
+ if (pl->mac_ops->mac_select_pcs) {
+ pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface);
+ if (IS_ERR(pcs)) {
+ phylink_err(pl,
+ "mac_select_pcs unexpectedly failed: %pe\n",
+ pcs);
+ return;
+ }
+ }
+
if (pl->mac_ops->mac_prepare) {
err = pl->mac_ops->mac_prepare(pl->config, pl->cur_link_an_mode,
state->interface);
@@ -511,6 +822,12 @@ static void phylink_major_config(struct phylink *pl, bool restart,
}
}
+ /* If we have a new PCS, switch to the new PCS after preparing the MAC
+ * for the change.
+ */
+ if (pcs)
+ phylink_set_pcs(pl, pcs);
+
phylink_mac_config(pl, state);
if (pl->pcs_ops) {
@@ -550,7 +867,7 @@ static int phylink_change_inband_advert(struct phylink *pl)
if (test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state))
return 0;
- if (!pl->pcs_ops) {
+ if (!pl->pcs_ops && pl->config->legacy_pre_march2020) {
/* Legacy method */
phylink_mac_config(pl, &pl->link_config);
phylink_mac_pcs_an_restart(pl);
@@ -601,7 +918,8 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
if (pl->pcs_ops)
pl->pcs_ops->pcs_get_state(pl->pcs, state);
- else if (pl->mac_ops->mac_pcs_get_state)
+ else if (pl->mac_ops->mac_pcs_get_state &&
+ pl->config->legacy_pre_march2020)
pl->mac_ops->mac_pcs_get_state(pl->config, state);
else
state->link = 0;
@@ -795,12 +1113,11 @@ static void phylink_resolve(struct work_struct *w)
}
phylink_major_config(pl, false, &link_state);
pl->link_config.interface = link_state.interface;
- } else if (!pl->pcs_ops) {
+ } else if (!pl->pcs_ops && pl->config->legacy_pre_march2020) {
/* The interface remains unchanged, only the speed,
* duplex or pause settings have changed. Call the
* old mac_config() method to configure the MAC/PCS
- * only if we do not have a PCS installed (an
- * unconverted user.)
+ * only if we do not have a legacy MAC driver.
*/
phylink_mac_config(pl, &link_state);
}
@@ -837,6 +1154,12 @@ static void phylink_run_resolve_and_disable(struct phylink *pl, int bit)
}
}
+static void phylink_enable_and_run_resolve(struct phylink *pl, int bit)
+{
+ clear_bit(bit, &pl->phylink_disable_state);
+ phylink_run_resolve(pl);
+}
+
static void phylink_fixed_poll(struct timer_list *t)
{
struct phylink *pl = container_of(t, struct phylink, link_poll);
@@ -896,6 +1219,14 @@ struct phylink *phylink_create(struct phylink_config *config,
struct phylink *pl;
int ret;
+ /* Validate the supplied configuration */
+ if (mac_ops->mac_select_pcs &&
+ phy_interface_empty(config->supported_interfaces)) {
+ dev_err(config->dev,
+ "phylink: error: empty supported_interfaces but mac_select_pcs() method present\n");
+ return ERR_PTR(-EINVAL);
+ }
+
pl = kzalloc(sizeof(*pl), GFP_KERNEL);
if (!pl)
return ERR_PTR(-ENOMEM);
@@ -963,9 +1294,10 @@ EXPORT_SYMBOL_GPL(phylink_create);
* @pl: a pointer to a &struct phylink returned from phylink_create()
* @pcs: a pointer to the &struct phylink_pcs
*
- * Bind the MAC PCS to phylink. This may be called after phylink_create(),
- * in mac_prepare() or mac_config() methods if it is desired to dynamically
- * change the PCS.
+ * Bind the MAC PCS to phylink. This may be called after phylink_create().
+ * If it is desired to dynamically change the PCS, then the preferred method
+ * is to use mac_select_pcs(), but it may also be called in mac_prepare()
+ * or mac_config().
*
* Please note that there are behavioural changes with the mac_config()
* callback if a PCS is present (denoting a newer setup) so removing a PCS
@@ -976,6 +1308,14 @@ void phylink_set_pcs(struct phylink *pl, struct phylink_pcs *pcs)
{
pl->pcs = pcs;
pl->pcs_ops = pcs->ops;
+
+ if (!pl->phylink_disable_state &&
+ pl->cfg_link_an_mode == MLO_AN_INBAND) {
+ if (pl->config->pcs_poll || pcs->poll)
+ mod_timer(&pl->link_poll, jiffies + HZ);
+ else
+ del_timer(&pl->link_poll);
+ }
}
EXPORT_SYMBOL_GPL(phylink_set_pcs);
@@ -1096,7 +1436,8 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
mutex_unlock(&phy->lock);
phylink_dbg(pl,
- "phy: setting supported %*pb advertising %*pb\n",
+ "phy: %s setting supported %*pb advertising %*pb\n",
+ phy_modes(interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported,
__ETHTOOL_LINK_MODE_MASK_NBITS, phy->advertising);
@@ -1214,6 +1555,12 @@ int phylink_fwnode_phy_connect(struct phylink *pl,
if (!phy_dev)
return -ENODEV;
+ /* Use PHY device/driver interface */
+ if (pl->link_interface == PHY_INTERFACE_MODE_NA) {
+ pl->link_interface = phy_dev->interface;
+ pl->link_config.interface = pl->link_interface;
+ }
+
ret = phy_attach_direct(pl->netdev, phy_dev, flags,
pl->link_interface);
if (ret) {
@@ -1314,8 +1661,7 @@ void phylink_start(struct phylink *pl)
*/
phylink_mac_initial_config(pl, true);
- clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
- phylink_run_resolve(pl);
+ phylink_enable_and_run_resolve(pl, PHYLINK_DISABLE_STOPPED);
if (pl->cfg_link_an_mode == MLO_AN_FIXED && pl->link_gpio) {
int irq = gpiod_to_irq(pl->link_gpio);
@@ -1456,8 +1802,7 @@ void phylink_resume(struct phylink *pl)
phylink_mac_initial_config(pl, true);
/* Re-enable and re-resolve the link parameters */
- clear_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state);
- phylink_run_resolve(pl);
+ phylink_enable_and_run_resolve(pl, PHYLINK_DISABLE_MAC_WOL);
} else {
phylink_start(pl);
}
@@ -2386,8 +2731,7 @@ static void phylink_sfp_link_up(void *upstream)
ASSERT_RTNL();
- clear_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
- phylink_run_resolve(pl);
+ phylink_enable_and_run_resolve(pl, PHYLINK_DISABLE_LINK);
}
/* The Broadcom BCM84881 in the Methode DM7052 is unable to provide a SGMII
@@ -2587,31 +2931,22 @@ void phylink_decode_usxgmii_word(struct phylink_link_state *state,
EXPORT_SYMBOL_GPL(phylink_decode_usxgmii_word);
/**
- * phylink_mii_c22_pcs_get_state() - read the MAC PCS state
- * @pcs: a pointer to a &struct mdio_device.
+ * phylink_mii_c22_pcs_decode_state() - Decode MAC PCS state from MII registers
* @state: a pointer to a &struct phylink_link_state.
+ * @bmsr: The value of the %MII_BMSR register
+ * @lpa: The value of the %MII_LPA register
*
* Helper for MAC PCS supporting the 802.3 clause 22 register set for
* clause 37 negotiation and/or SGMII control.
*
- * Read the MAC PCS state from the MII device configured in @config and
- * parse the Clause 37 or Cisco SGMII link partner negotiation word into
- * the phylink @state structure. This is suitable to be directly plugged
- * into the mac_pcs_get_state() member of the struct phylink_mac_ops
- * structure.
+ * Parse the Clause 37 or Cisco SGMII link partner negotiation word into
+ * the phylink @state structure. This is suitable to be used for implementing
+ * the mac_pcs_get_state() member of the struct phylink_mac_ops structure if
+ * accessing @bmsr and @lpa cannot be done with MDIO directly.
*/
-void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
- struct phylink_link_state *state)
+void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state,
+ u16 bmsr, u16 lpa)
{
- int bmsr, lpa;
-
- bmsr = mdiodev_read(pcs, MII_BMSR);
- lpa = mdiodev_read(pcs, MII_LPA);
- if (bmsr < 0 || lpa < 0) {
- state->link = false;
- return;
- }
-
state->link = !!(bmsr & BMSR_LSTATUS);
state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
/* If there is no link or autonegotiation is disabled, the LP advertisement
@@ -2639,28 +2974,54 @@ void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
break;
}
}
+EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_decode_state);
+
+/**
+ * phylink_mii_c22_pcs_get_state() - read the MAC PCS state
+ * @pcs: a pointer to a &struct mdio_device.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * Helper for MAC PCS supporting the 802.3 clause 22 register set for
+ * clause 37 negotiation and/or SGMII control.
+ *
+ * Read the MAC PCS state from the MII device configured in @config and
+ * parse the Clause 37 or Cisco SGMII link partner negotiation word into
+ * the phylink @state structure. This is suitable to be directly plugged
+ * into the mac_pcs_get_state() member of the struct phylink_mac_ops
+ * structure.
+ */
+void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
+ struct phylink_link_state *state)
+{
+ int bmsr, lpa;
+
+ bmsr = mdiodev_read(pcs, MII_BMSR);
+ lpa = mdiodev_read(pcs, MII_LPA);
+ if (bmsr < 0 || lpa < 0) {
+ state->link = false;
+ return;
+ }
+
+ phylink_mii_c22_pcs_decode_state(state, bmsr, lpa);
+}
EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_get_state);
/**
- * phylink_mii_c22_pcs_set_advertisement() - configure the clause 37 PCS
+ * phylink_mii_c22_pcs_encode_advertisement() - configure the clause 37 PCS
* advertisement
- * @pcs: a pointer to a &struct mdio_device.
* @interface: the PHY interface mode being configured
* @advertising: the ethtool advertisement mask
*
* Helper for MAC PCS supporting the 802.3 clause 22 register set for
* clause 37 negotiation and/or SGMII control.
*
- * Configure the clause 37 PCS advertisement as specified by @state. This
- * does not trigger a renegotiation; phylink will do that via the
- * mac_an_restart() method of the struct phylink_mac_ops structure.
+ * Encode the clause 37 PCS advertisement as specified by @interface and
+ * @advertising.
*
- * Returns negative error code on failure to configure the advertisement,
- * zero if no change has been made, or one if the advertisement has changed.
+ * Return: The new value for @adv, or ``-EINVAL`` if it should not be changed.
*/
-int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs,
- phy_interface_t interface,
- const unsigned long *advertising)
+int phylink_mii_c22_pcs_encode_advertisement(phy_interface_t interface,
+ const unsigned long *advertising)
{
u16 adv;
@@ -2674,18 +3035,15 @@ int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs,
if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
advertising))
adv |= ADVERTISE_1000XPSE_ASYM;
-
- return mdiodev_modify_changed(pcs, MII_ADVERTISE, 0xffff, adv);
-
+ return adv;
case PHY_INTERFACE_MODE_SGMII:
- return mdiodev_modify_changed(pcs, MII_ADVERTISE, 0xffff, 0x0001);
-
+ return 0x0001;
default:
/* Nothing to do for other modes */
- return 0;
+ return -EINVAL;
}
}
-EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_set_advertisement);
+EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_encode_advertisement);
/**
* phylink_mii_c22_pcs_config() - configure clause 22 PCS
@@ -2703,16 +3061,18 @@ int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode,
phy_interface_t interface,
const unsigned long *advertising)
{
- bool changed;
+ bool changed = 0;
u16 bmcr;
- int ret;
+ int ret, adv;
- ret = phylink_mii_c22_pcs_set_advertisement(pcs, interface,
- advertising);
- if (ret < 0)
- return ret;
-
- changed = ret > 0;
+ adv = phylink_mii_c22_pcs_encode_advertisement(interface, advertising);
+ if (adv >= 0) {
+ ret = mdiobus_modify_changed(pcs->bus, pcs->addr,
+ MII_ADVERTISE, 0xffff, adv);
+ if (ret < 0)
+ return ret;
+ changed = ret;
+ }
/* Ensure ISOLATE bit is disabled */
if (mode == MLO_AN_INBAND &&
@@ -2725,7 +3085,7 @@ int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode,
if (ret < 0)
return ret;
- return changed ? 1 : 0;
+ return changed;
}
EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_config);
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 0c6c0d1843bc..c1512c9925a6 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -651,6 +651,11 @@ struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode)
else if (ret < 0)
return ERR_PTR(ret);
+ if (!fwnode_device_is_available(ref.fwnode)) {
+ fwnode_handle_put(ref.fwnode);
+ return NULL;
+ }
+
bus = sfp_bus_get(ref.fwnode);
fwnode_handle_put(ref.fwnode);
if (!bus)
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index ab77a9f439ef..4720b24ca51b 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -1641,17 +1641,20 @@ static int sfp_sm_probe_for_phy(struct sfp *sfp)
static int sfp_module_parse_power(struct sfp *sfp)
{
u32 power_mW = 1000;
+ bool supports_a2;
if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_POWER_DECL))
power_mW = 1500;
if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_HIGH_POWER_LEVEL))
power_mW = 2000;
+ supports_a2 = sfp->id.ext.sff8472_compliance !=
+ SFP_SFF8472_COMPLIANCE_NONE ||
+ sfp->id.ext.diagmon & SFP_DIAGMON_DDM;
+
if (power_mW > sfp->max_power_mW) {
/* Module power specification exceeds the allowed maximum. */
- if (sfp->id.ext.sff8472_compliance ==
- SFP_SFF8472_COMPLIANCE_NONE &&
- !(sfp->id.ext.diagmon & SFP_DIAGMON_DDM)) {
+ if (!supports_a2) {
/* The module appears not to implement bus address
* 0xa2, so assume that the module powers up in the
* indicated mode.
@@ -1668,11 +1671,25 @@ static int sfp_module_parse_power(struct sfp *sfp)
}
}
+ if (power_mW <= 1000) {
+ /* Modules below 1W do not require a power change sequence */
+ sfp->module_power_mW = power_mW;
+ return 0;
+ }
+
+ if (!supports_a2) {
+ /* The module power level is below the host maximum and the
+ * module appears not to implement bus address 0xa2, so assume
+ * that the module powers up in the indicated mode.
+ */
+ return 0;
+ }
+
/* If the module requires a higher power mode, but also requires
* an address change sequence, warn the user that the module may
* not be functional.
*/
- if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE && power_mW > 1000) {
+ if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE) {
dev_warn(sfp->dev,
"Address Change Sequence not supported but module requires %u.%uW, module may not be functional\n",
power_mW / 1000, (power_mW / 100) % 10);
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index f4429b93a9c8..15a179631903 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -281,8 +281,7 @@ ppp_asynctty_write(struct tty_struct *tty, struct file *file,
*/
static int
-ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+ppp_asynctty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
{
struct asyncppp *ap = ap_get(tty);
int err, val;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 1180a0e2445f..4a365f15533e 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -69,6 +69,8 @@
#define MPHDRLEN 6 /* multilink protocol header length */
#define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */
+#define PPP_PROTO_LEN 2
+
/*
* An instance of /dev/ppp can be associated with either a ppp
* interface unit or a ppp channel. In both cases, file->private_data
@@ -173,6 +175,7 @@ struct channel {
spinlock_t downl; /* protects `chan', file.xq dequeue */
struct ppp *ppp; /* ppp unit we're connected to */
struct net *chan_net; /* the net channel belongs to */
+ netns_tracker ns_tracker;
struct list_head clist; /* link in list of channels per unit */
rwlock_t upl; /* protects `ppp' and 'bridge' */
struct channel __rcu *bridge; /* "bridged" ppp channel */
@@ -497,6 +500,9 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
if (!pf)
return -ENXIO;
+ /* All PPP packets should start with the 2-byte protocol */
+ if (count < PPP_PROTO_LEN)
+ return -EINVAL;
ret = -ENOMEM;
skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL);
if (!skb)
@@ -1764,7 +1770,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
}
++ppp->stats64.tx_packets;
- ppp->stats64.tx_bytes += skb->len - 2;
+ ppp->stats64.tx_bytes += skb->len - PPP_PROTO_LEN;
switch (proto) {
case PPP_IP:
@@ -2879,7 +2885,7 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
pch->ppp = NULL;
pch->chan = chan;
- pch->chan_net = get_net(net);
+ pch->chan_net = get_net_track(net, &pch->ns_tracker, GFP_KERNEL);
chan->ppp = pch;
init_ppp_file(&pch->file, CHANNEL);
pch->file.hdrlen = chan->hdrlen;
@@ -3519,7 +3525,7 @@ ppp_disconnect_channel(struct channel *pch)
*/
static void ppp_destroy_channel(struct channel *pch)
{
- put_net(pch->chan_net);
+ put_net_track(pch->chan_net, &pch->ns_tracker);
pch->chan_net = NULL;
atomic_dec(&channel_count);
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index b3a71b409a80..18283b7b94bc 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -274,8 +274,7 @@ ppp_sync_write(struct tty_struct *tty, struct file *file,
}
static int
-ppp_synctty_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+ppp_synctty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
{
struct syncppp *ap = sp_get(tty);
int __user *p = (int __user *)arg;
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 9f3b4c1aa5ce..98f586f910fb 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -1072,8 +1072,8 @@ static void slip_unesc6(struct slip *sl, unsigned char s)
#endif /* CONFIG_SLIP_MODE_SLIP6 */
/* Perform I/O control on an active SLIP channel. */
-static int slip_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int slip_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
{
struct slip *sl = tty->disc_data;
unsigned int tmp;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 45a67e72a02c..fed85447701a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1602,7 +1602,7 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
case XDP_PASS:
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(tun->dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(tun->dev, xdp_prog, act);
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index ea8aa8c33241..1a627ba4b850 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1377,11 +1377,12 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
dev->mii.phy_id = 0x03;
dev->mii.supports_gmii = 1;
- dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM;
+ dev->net->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_TSO;
- dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM;
+ dev->net->hw_features |= dev->net->features;
+
+ netif_set_gso_max_size(dev->net, 16384);
/* Enable checksum offload */
*tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
@@ -1526,17 +1527,19 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
u32 tx_hdr1, tx_hdr2;
int frame_size = dev->maxpacket;
- int mss = skb_shinfo(skb)->gso_size;
int headroom;
void *ptr;
tx_hdr1 = skb->len;
- tx_hdr2 = mss;
+ tx_hdr2 = skb_shinfo(skb)->gso_size; /* Set TSO mss */
if (((skb->len + 8) % frame_size) == 0)
tx_hdr2 |= 0x80008000; /* Enable padding */
headroom = skb_headroom(skb) - 8;
+ if ((dev->net->features & NETIF_F_SG) && skb_linearize(skb))
+ return NULL;
+
if ((skb_header_cloned(skb) || headroom < 0) &&
pskb_expand_head(skb, headroom < 0 ? 8 : 0, 0, GFP_ATOMIC)) {
dev_kfree_skb_any(skb);
@@ -1547,6 +1550,8 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
put_unaligned_le32(tx_hdr1, ptr);
put_unaligned_le32(tx_hdr2, ptr + 4);
+ usbnet_set_skb_tx_stats(skb, (skb_shinfo(skb)->gso_segs ?: 1), 0);
+
return skb;
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 075f8abde5cd..b8e20a3f2b84 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -67,7 +67,7 @@
#define DEFAULT_TSO_CSUM_ENABLE (true)
#define DEFAULT_VLAN_FILTER_ENABLE (true)
#define DEFAULT_VLAN_RX_OFFLOAD (true)
-#define TX_OVERHEAD (8)
+#define TX_ALIGNMENT (4)
#define RXW_PADDING 2
#define LAN78XX_USB_VENDOR_ID (0x0424)
@@ -92,6 +92,41 @@
WAKE_MCAST | WAKE_BCAST | \
WAKE_ARP | WAKE_MAGIC)
+#define LAN78XX_NAPI_WEIGHT 64
+
+#define TX_URB_NUM 10
+#define TX_SS_URB_NUM TX_URB_NUM
+#define TX_HS_URB_NUM TX_URB_NUM
+#define TX_FS_URB_NUM TX_URB_NUM
+
+/* A single URB buffer must be large enough to hold a complete jumbo packet
+ */
+#define TX_SS_URB_SIZE (32 * 1024)
+#define TX_HS_URB_SIZE (16 * 1024)
+#define TX_FS_URB_SIZE (10 * 1024)
+
+#define RX_SS_URB_NUM 30
+#define RX_HS_URB_NUM 10
+#define RX_FS_URB_NUM 10
+#define RX_SS_URB_SIZE TX_SS_URB_SIZE
+#define RX_HS_URB_SIZE TX_HS_URB_SIZE
+#define RX_FS_URB_SIZE TX_FS_URB_SIZE
+
+#define SS_BURST_CAP_SIZE RX_SS_URB_SIZE
+#define SS_BULK_IN_DELAY 0x2000
+#define HS_BURST_CAP_SIZE RX_HS_URB_SIZE
+#define HS_BULK_IN_DELAY 0x2000
+#define FS_BURST_CAP_SIZE RX_FS_URB_SIZE
+#define FS_BULK_IN_DELAY 0x2000
+
+#define TX_CMD_LEN 8
+#define TX_SKB_MIN_LEN (TX_CMD_LEN + ETH_HLEN)
+#define LAN78XX_TSO_SIZE(dev) ((dev)->tx_urb_size - TX_SKB_MIN_LEN)
+
+#define RX_CMD_LEN 10
+#define RX_SKB_MIN_LEN (RX_CMD_LEN + ETH_HLEN)
+#define RX_MAX_FRAME_LEN(mtu) ((mtu) + ETH_HLEN + VLAN_HLEN)
+
/* USB related defines */
#define BULK_IN_PIPE 1
#define BULK_OUT_PIPE 2
@@ -387,14 +422,22 @@ struct lan78xx_net {
struct usb_interface *intf;
void *driver_priv;
- int rx_qlen;
- int tx_qlen;
+ unsigned int tx_pend_data_len;
+ size_t n_tx_urbs;
+ size_t n_rx_urbs;
+ size_t tx_urb_size;
+ size_t rx_urb_size;
+
+ struct sk_buff_head rxq_free;
struct sk_buff_head rxq;
+ struct sk_buff_head rxq_done;
+ struct sk_buff_head rxq_overflow;
+ struct sk_buff_head txq_free;
struct sk_buff_head txq;
- struct sk_buff_head done;
struct sk_buff_head txq_pend;
- struct tasklet_struct bh;
+ struct napi_struct napi;
+
struct delayed_work wq;
int msg_enable;
@@ -406,8 +449,8 @@ struct lan78xx_net {
struct mutex phy_mutex; /* for phy access */
unsigned int pipe_in, pipe_out, pipe_intr;
- u32 hard_mtu; /* count any extra framing */
- size_t rx_urb_size; /* size for rx urbs */
+ unsigned int bulk_in_delay;
+ unsigned int burst_cap;
unsigned long flags;
@@ -445,6 +488,129 @@ static int msg_level = -1;
module_param(msg_level, int, 0);
MODULE_PARM_DESC(msg_level, "Override default message level");
+static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
+{
+ if (skb_queue_empty(buf_pool))
+ return NULL;
+
+ return skb_dequeue(buf_pool);
+}
+
+static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
+ struct sk_buff *buf)
+{
+ buf->data = buf->head;
+ skb_reset_tail_pointer(buf);
+
+ buf->len = 0;
+ buf->data_len = 0;
+
+ skb_queue_tail(buf_pool, buf);
+}
+
+static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
+{
+ struct skb_data *entry;
+ struct sk_buff *buf;
+
+ while (!skb_queue_empty(buf_pool)) {
+ buf = skb_dequeue(buf_pool);
+ if (buf) {
+ entry = (struct skb_data *)buf->cb;
+ usb_free_urb(entry->urb);
+ dev_kfree_skb_any(buf);
+ }
+ }
+}
+
+static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
+ size_t n_urbs, size_t urb_size,
+ struct lan78xx_net *dev)
+{
+ struct skb_data *entry;
+ struct sk_buff *buf;
+ struct urb *urb;
+ int i;
+
+ skb_queue_head_init(buf_pool);
+
+ for (i = 0; i < n_urbs; i++) {
+ buf = alloc_skb(urb_size, GFP_ATOMIC);
+ if (!buf)
+ goto error;
+
+ if (skb_linearize(buf) != 0) {
+ dev_kfree_skb_any(buf);
+ goto error;
+ }
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ dev_kfree_skb_any(buf);
+ goto error;
+ }
+
+ entry = (struct skb_data *)buf->cb;
+ entry->urb = urb;
+ entry->dev = dev;
+ entry->length = 0;
+ entry->num_of_packet = 0;
+
+ skb_queue_tail(buf_pool, buf);
+ }
+
+ return 0;
+
+error:
+ lan78xx_free_buf_pool(buf_pool);
+
+ return -ENOMEM;
+}
+
+static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
+{
+ return lan78xx_get_buf(&dev->rxq_free);
+}
+
+static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
+ struct sk_buff *rx_buf)
+{
+ lan78xx_release_buf(&dev->rxq_free, rx_buf);
+}
+
+static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
+{
+ lan78xx_free_buf_pool(&dev->rxq_free);
+}
+
+static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
+{
+ return lan78xx_alloc_buf_pool(&dev->rxq_free,
+ dev->n_rx_urbs, dev->rx_urb_size, dev);
+}
+
+static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
+{
+ return lan78xx_get_buf(&dev->txq_free);
+}
+
+static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
+ struct sk_buff *tx_buf)
+{
+ lan78xx_release_buf(&dev->txq_free, tx_buf);
+}
+
+static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
+{
+ lan78xx_free_buf_pool(&dev->txq_free);
+}
+
+static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
+{
+ return lan78xx_alloc_buf_pool(&dev->txq_free,
+ dev->n_tx_urbs, dev->tx_urb_size, dev);
+}
+
static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
{
u32 *buf;
@@ -1202,6 +1368,8 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
return 0;
}
+static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
+
static int lan78xx_mac_reset(struct lan78xx_net *dev)
{
unsigned long start_time = jiffies;
@@ -1333,7 +1501,9 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
jiffies + STAT_UPDATE_TIMER);
}
- tasklet_schedule(&dev->bh);
+ lan78xx_rx_urb_submit_all(dev);
+
+ napi_schedule(&dev->napi);
}
return 0;
@@ -2373,37 +2543,24 @@ found:
static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
{
struct lan78xx_net *dev = netdev_priv(netdev);
- int ll_mtu = new_mtu + netdev->hard_header_len;
- int old_hard_mtu = dev->hard_mtu;
- int old_rx_urb_size = dev->rx_urb_size;
+ int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
int ret;
/* no second zero-length packet read wanted after mtu-sized packets */
- if ((ll_mtu % dev->maxpacket) == 0)
+ if ((max_frame_len % dev->maxpacket) == 0)
return -EDOM;
ret = usb_autopm_get_interface(dev->intf);
if (ret < 0)
return ret;
- lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
-
- netdev->mtu = new_mtu;
-
- dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
- if (dev->rx_urb_size == old_hard_mtu) {
- dev->rx_urb_size = dev->hard_mtu;
- if (dev->rx_urb_size > old_rx_urb_size) {
- if (netif_running(dev->net)) {
- unlink_urbs(dev, &dev->rxq);
- tasklet_schedule(&dev->bh);
- }
- }
- }
+ ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
+ if (!ret)
+ netdev->mtu = new_mtu;
usb_autopm_put_interface(dev->intf);
- return 0;
+ return ret;
}
static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
@@ -2559,6 +2716,44 @@ static void lan78xx_init_ltm(struct lan78xx_net *dev)
lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
}
+static int lan78xx_urb_config_init(struct lan78xx_net *dev)
+{
+ int result = 0;
+
+ switch (dev->udev->speed) {
+ case USB_SPEED_SUPER:
+ dev->rx_urb_size = RX_SS_URB_SIZE;
+ dev->tx_urb_size = TX_SS_URB_SIZE;
+ dev->n_rx_urbs = RX_SS_URB_NUM;
+ dev->n_tx_urbs = TX_SS_URB_NUM;
+ dev->bulk_in_delay = SS_BULK_IN_DELAY;
+ dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
+ break;
+ case USB_SPEED_HIGH:
+ dev->rx_urb_size = RX_HS_URB_SIZE;
+ dev->tx_urb_size = TX_HS_URB_SIZE;
+ dev->n_rx_urbs = RX_HS_URB_NUM;
+ dev->n_tx_urbs = TX_HS_URB_NUM;
+ dev->bulk_in_delay = HS_BULK_IN_DELAY;
+ dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
+ break;
+ case USB_SPEED_FULL:
+ dev->rx_urb_size = RX_FS_URB_SIZE;
+ dev->tx_urb_size = TX_FS_URB_SIZE;
+ dev->n_rx_urbs = RX_FS_URB_NUM;
+ dev->n_tx_urbs = TX_FS_URB_NUM;
+ dev->bulk_in_delay = FS_BULK_IN_DELAY;
+ dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
+ break;
+ default:
+ netdev_warn(dev->net, "USB bus speed not supported\n");
+ result = -EIO;
+ break;
+ }
+
+ return result;
+}
+
static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
{
return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
@@ -2766,28 +2961,11 @@ static int lan78xx_reset(struct lan78xx_net *dev)
/* Init LTM */
lan78xx_init_ltm(dev);
- if (dev->udev->speed == USB_SPEED_SUPER) {
- buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
- dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
- dev->rx_qlen = 4;
- dev->tx_qlen = 4;
- } else if (dev->udev->speed == USB_SPEED_HIGH) {
- buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
- dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
- dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
- dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
- } else {
- buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
- dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
- dev->rx_qlen = 4;
- dev->tx_qlen = 4;
- }
-
- ret = lan78xx_write_reg(dev, BURST_CAP, buf);
+ ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
if (ret < 0)
return ret;
- ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
+ ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
if (ret < 0)
return ret;
@@ -2900,7 +3078,7 @@ static int lan78xx_reset(struct lan78xx_net *dev)
return ret;
ret = lan78xx_set_rx_max_frame_length(dev,
- dev->net->mtu + VLAN_ETH_HLEN);
+ RX_MAX_FRAME_LEN(dev->net->mtu));
return ret;
}
@@ -2980,6 +3158,8 @@ static int lan78xx_open(struct net_device *net)
dev->link_on = false;
+ napi_enable(&dev->napi);
+
lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
done:
mutex_unlock(&dev->dev_mutex);
@@ -3013,15 +3193,16 @@ static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
dev->wait = NULL;
remove_wait_queue(&unlink_wakeup, &wait);
- while (!skb_queue_empty(&dev->done)) {
- struct skb_data *entry;
- struct sk_buff *skb;
+ /* empty Rx done, Rx overflow and Tx pend queues
+ */
+ while (!skb_queue_empty(&dev->rxq_done)) {
+ struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
- skb = skb_dequeue(&dev->done);
- entry = (struct skb_data *)(skb->cb);
- usb_free_urb(entry->urb);
- dev_kfree_skb(skb);
+ lan78xx_release_rx_buf(dev, skb);
}
+
+ skb_queue_purge(&dev->rxq_overflow);
+ skb_queue_purge(&dev->txq_pend);
}
static int lan78xx_stop(struct net_device *net)
@@ -3037,7 +3218,7 @@ static int lan78xx_stop(struct net_device *net)
clear_bit(EVENT_DEV_OPEN, &dev->flags);
netif_stop_queue(net);
- tasklet_kill(&dev->bh);
+ napi_disable(&dev->napi);
lan78xx_terminate_urbs(dev);
@@ -3073,48 +3254,6 @@ static int lan78xx_stop(struct net_device *net)
return 0;
}
-static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
- struct sk_buff *skb, gfp_t flags)
-{
- u32 tx_cmd_a, tx_cmd_b;
- void *ptr;
-
- if (skb_cow_head(skb, TX_OVERHEAD)) {
- dev_kfree_skb_any(skb);
- return NULL;
- }
-
- if (skb_linearize(skb)) {
- dev_kfree_skb_any(skb);
- return NULL;
- }
-
- tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
-
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
-
- tx_cmd_b = 0;
- if (skb_is_gso(skb)) {
- u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
-
- tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
-
- tx_cmd_a |= TX_CMD_A_LSO_;
- }
-
- if (skb_vlan_tag_present(skb)) {
- tx_cmd_a |= TX_CMD_A_IVTG_;
- tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
- }
-
- ptr = skb_push(skb, 8);
- put_unaligned_le32(tx_cmd_a, ptr);
- put_unaligned_le32(tx_cmd_b, ptr + 4);
-
- return skb;
-}
-
static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
struct sk_buff_head *list, enum skb_state state)
{
@@ -3128,12 +3267,13 @@ static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
__skb_unlink(skb, list);
spin_unlock(&list->lock);
- spin_lock(&dev->done.lock);
+ spin_lock(&dev->rxq_done.lock);
+
+ __skb_queue_tail(&dev->rxq_done, skb);
+ if (skb_queue_len(&dev->rxq_done) == 1)
+ napi_schedule(&dev->napi);
- __skb_queue_tail(&dev->done, skb);
- if (skb_queue_len(&dev->done) == 1)
- tasklet_schedule(&dev->bh);
- spin_unlock_irqrestore(&dev->done.lock, flags);
+ spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
return old_state;
}
@@ -3148,7 +3288,7 @@ static void tx_complete(struct urb *urb)
dev->net->stats.tx_packets += entry->num_of_packet;
dev->net->stats.tx_bytes += entry->length;
} else {
- dev->net->stats.tx_errors++;
+ dev->net->stats.tx_errors += entry->num_of_packet;
switch (urb->status) {
case -EPIPE:
@@ -3181,7 +3321,15 @@ static void tx_complete(struct urb *urb)
usb_autopm_put_interface_async(dev->intf);
- defer_bh(dev, skb, &dev->txq, tx_done);
+ skb_unlink(skb, &dev->txq);
+
+ lan78xx_release_tx_buf(dev, skb);
+
+ /* Re-schedule NAPI if Tx data pending but no URBs in progress.
+ */
+ if (skb_queue_empty(&dev->txq) &&
+ !skb_queue_empty(&dev->txq_pend))
+ napi_schedule(&dev->napi);
}
static void lan78xx_queue_skb(struct sk_buff_head *list,
@@ -3193,35 +3341,96 @@ static void lan78xx_queue_skb(struct sk_buff_head *list,
entry->state = state;
}
+static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
+{
+ return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
+}
+
+static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
+{
+ return dev->tx_pend_data_len;
+}
+
+static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
+ struct sk_buff *skb,
+ unsigned int *tx_pend_data_len)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->txq_pend.lock, flags);
+
+ __skb_queue_tail(&dev->txq_pend, skb);
+
+ dev->tx_pend_data_len += skb->len;
+ *tx_pend_data_len = dev->tx_pend_data_len;
+
+ spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
+}
+
+static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
+ struct sk_buff *skb,
+ unsigned int *tx_pend_data_len)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->txq_pend.lock, flags);
+
+ __skb_queue_head(&dev->txq_pend, skb);
+
+ dev->tx_pend_data_len += skb->len;
+ *tx_pend_data_len = dev->tx_pend_data_len;
+
+ spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
+}
+
+static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
+ struct sk_buff **skb,
+ unsigned int *tx_pend_data_len)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->txq_pend.lock, flags);
+
+ *skb = __skb_dequeue(&dev->txq_pend);
+ if (*skb)
+ dev->tx_pend_data_len -= (*skb)->len;
+ *tx_pend_data_len = dev->tx_pend_data_len;
+
+ spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
+}
+
static netdev_tx_t
lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
{
struct lan78xx_net *dev = netdev_priv(net);
- struct sk_buff *skb2 = NULL;
+ unsigned int tx_pend_data_len;
if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
schedule_delayed_work(&dev->wq, 0);
- if (skb) {
- skb_tx_timestamp(skb);
- skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
- }
+ skb_tx_timestamp(skb);
- if (skb2) {
- skb_queue_tail(&dev->txq_pend, skb2);
+ lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
- /* throttle TX patch at slower than SUPER SPEED USB */
- if ((dev->udev->speed < USB_SPEED_SUPER) &&
- (skb_queue_len(&dev->txq_pend) > 10))
- netif_stop_queue(net);
- } else {
- netif_dbg(dev, tx_err, dev->net,
- "lan78xx_tx_prep return NULL\n");
- dev->net->stats.tx_errors++;
- dev->net->stats.tx_dropped++;
- }
+ /* Set up a Tx URB if none is in progress */
+
+ if (skb_queue_empty(&dev->txq))
+ napi_schedule(&dev->napi);
+
+ /* Stop stack Tx queue if we have enough data to fill
+ * all the free Tx URBs.
+ */
+ if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
+ netif_stop_queue(net);
+
+ netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
+ tx_pend_data_len, lan78xx_tx_urb_space(dev));
- tasklet_schedule(&dev->bh);
+ /* Kick off transmission of pending data */
+
+ if (!skb_queue_empty(&dev->txq_free))
+ napi_schedule(&dev->napi);
+ }
return NETDEV_TX_OK;
}
@@ -3278,9 +3487,6 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
goto out1;
}
- dev->net->hard_header_len += TX_OVERHEAD;
- dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
-
/* Init all registers */
ret = lan78xx_reset(dev);
if (ret) {
@@ -3359,8 +3565,6 @@ static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
{
- int status;
-
dev->net->stats.rx_packets++;
dev->net->stats.rx_bytes += skb->len;
@@ -3373,21 +3577,21 @@ static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
if (skb_defer_rx_timestamp(skb))
return;
- status = netif_rx(skb);
- if (status != NET_RX_SUCCESS)
- netif_dbg(dev, rx_err, dev->net,
- "netif_rx status %d\n", status);
+ napi_gro_receive(&dev->napi, skb);
}
-static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
+static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
+ int budget, int *work_done)
{
- if (skb->len < dev->net->hard_header_len)
+ if (skb->len < RX_SKB_MIN_LEN)
return 0;
+ /* Extract frames from the URB buffer and pass each one to
+ * the stack in a new NAPI SKB.
+ */
while (skb->len > 0) {
u32 rx_cmd_a, rx_cmd_b, align_count, size;
u16 rx_cmd_c;
- struct sk_buff *skb2;
unsigned char *packet;
rx_cmd_a = get_unaligned_le32(skb->data);
@@ -3409,41 +3613,36 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
netif_dbg(dev, rx_err, dev->net,
"Error rx_cmd_a=0x%08x", rx_cmd_a);
} else {
- /* last frame in this batch */
- if (skb->len == size) {
- lan78xx_rx_csum_offload(dev, skb,
- rx_cmd_a, rx_cmd_b);
- lan78xx_rx_vlan_offload(dev, skb,
- rx_cmd_a, rx_cmd_b);
-
- skb_trim(skb, skb->len - 4); /* remove fcs */
- skb->truesize = size + sizeof(struct sk_buff);
-
- return 1;
- }
+ u32 frame_len = size - ETH_FCS_LEN;
+ struct sk_buff *skb2;
- skb2 = skb_clone(skb, GFP_ATOMIC);
- if (unlikely(!skb2)) {
- netdev_warn(dev->net, "Error allocating skb");
+ skb2 = napi_alloc_skb(&dev->napi, frame_len);
+ if (!skb2)
return 0;
- }
- skb2->len = size;
- skb2->data = packet;
- skb_set_tail_pointer(skb2, size);
+ memcpy(skb2->data, packet, frame_len);
+
+ skb_put(skb2, frame_len);
lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
- skb_trim(skb2, skb2->len - 4); /* remove fcs */
- skb2->truesize = size + sizeof(struct sk_buff);
-
- lan78xx_skb_return(dev, skb2);
+ /* Processing of the URB buffer must complete once
+ * it has started. If the NAPI work budget is exhausted
+ * while frames remain they are added to the overflow
+ * queue for delivery in the next NAPI polling cycle.
+ */
+ if (*work_done < budget) {
+ lan78xx_skb_return(dev, skb2);
+ ++(*work_done);
+ } else {
+ skb_queue_tail(&dev->rxq_overflow, skb2);
+ }
}
skb_pull(skb, size);
- /* padding bytes before the next frame starts */
+ /* skip padding bytes before the next frame starts */
if (skb->len)
skb_pull(skb, align_count);
}
@@ -3451,85 +3650,13 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
return 1;
}
-static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
+static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
+ int budget, int *work_done)
{
- if (!lan78xx_rx(dev, skb)) {
+ if (!lan78xx_rx(dev, skb, budget, work_done)) {
+ netif_dbg(dev, rx_err, dev->net, "drop\n");
dev->net->stats.rx_errors++;
- goto done;
- }
-
- if (skb->len) {
- lan78xx_skb_return(dev, skb);
- return;
}
-
- netif_dbg(dev, rx_err, dev->net, "drop\n");
- dev->net->stats.rx_errors++;
-done:
- skb_queue_tail(&dev->done, skb);
-}
-
-static void rx_complete(struct urb *urb);
-
-static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
-{
- struct sk_buff *skb;
- struct skb_data *entry;
- unsigned long lockflags;
- size_t size = dev->rx_urb_size;
- int ret = 0;
-
- skb = netdev_alloc_skb_ip_align(dev->net, size);
- if (!skb) {
- usb_free_urb(urb);
- return -ENOMEM;
- }
-
- entry = (struct skb_data *)skb->cb;
- entry->urb = urb;
- entry->dev = dev;
- entry->length = 0;
-
- usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
- skb->data, size, rx_complete, skb);
-
- spin_lock_irqsave(&dev->rxq.lock, lockflags);
-
- if (netif_device_present(dev->net) &&
- netif_running(dev->net) &&
- !test_bit(EVENT_RX_HALT, &dev->flags) &&
- !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- switch (ret) {
- case 0:
- lan78xx_queue_skb(&dev->rxq, skb, rx_start);
- break;
- case -EPIPE:
- lan78xx_defer_kevent(dev, EVENT_RX_HALT);
- break;
- case -ENODEV:
- case -ENOENT:
- netif_dbg(dev, ifdown, dev->net, "device gone\n");
- netif_device_detach(dev->net);
- break;
- case -EHOSTUNREACH:
- ret = -ENOLINK;
- break;
- default:
- netif_dbg(dev, rx_err, dev->net,
- "rx submit, %d\n", ret);
- tasklet_schedule(&dev->bh);
- }
- } else {
- netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
- ret = -ENOLINK;
- }
- spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
- if (ret) {
- dev_kfree_skb_any(skb);
- usb_free_urb(urb);
- }
- return ret;
}
static void rx_complete(struct urb *urb)
@@ -3540,13 +3667,18 @@ static void rx_complete(struct urb *urb)
int urb_status = urb->status;
enum skb_state state;
+ netif_dbg(dev, rx_status, dev->net,
+ "rx done: status %d", urb->status);
+
skb_put(skb, urb->actual_length);
state = rx_done;
- entry->urb = NULL;
+
+ if (urb != entry->urb)
+ netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
switch (urb_status) {
case 0:
- if (skb->len < dev->net->hard_header_len) {
+ if (skb->len < RX_SKB_MIN_LEN) {
state = rx_cleanup;
dev->net->stats.rx_errors++;
dev->net->stats.rx_length_errors++;
@@ -3564,16 +3696,12 @@ static void rx_complete(struct urb *urb)
netif_dbg(dev, ifdown, dev->net,
"rx shutdown, code %d\n", urb_status);
state = rx_cleanup;
- entry->urb = urb;
- urb = NULL;
break;
case -EPROTO:
case -ETIME:
case -EILSEQ:
dev->net->stats.rx_errors++;
state = rx_cleanup;
- entry->urb = urb;
- urb = NULL;
break;
/* data overrun ... flush fifo? */
@@ -3589,203 +3717,327 @@ static void rx_complete(struct urb *urb)
}
state = defer_bh(dev, skb, &dev->rxq, state);
+}
- if (urb) {
- if (netif_running(dev->net) &&
- !test_bit(EVENT_RX_HALT, &dev->flags) &&
- state != unlink_start) {
- rx_submit(dev, urb, GFP_ATOMIC);
- return;
+static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
+{
+ struct skb_data *entry = (struct skb_data *)skb->cb;
+ size_t size = dev->rx_urb_size;
+ struct urb *urb = entry->urb;
+ unsigned long lockflags;
+ int ret = 0;
+
+ usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
+ skb->data, size, rx_complete, skb);
+
+ spin_lock_irqsave(&dev->rxq.lock, lockflags);
+
+ if (netif_device_present(dev->net) &&
+ netif_running(dev->net) &&
+ !test_bit(EVENT_RX_HALT, &dev->flags) &&
+ !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+ ret = usb_submit_urb(urb, flags);
+ switch (ret) {
+ case 0:
+ lan78xx_queue_skb(&dev->rxq, skb, rx_start);
+ break;
+ case -EPIPE:
+ lan78xx_defer_kevent(dev, EVENT_RX_HALT);
+ break;
+ case -ENODEV:
+ case -ENOENT:
+ netif_dbg(dev, ifdown, dev->net, "device gone\n");
+ netif_device_detach(dev->net);
+ break;
+ case -EHOSTUNREACH:
+ ret = -ENOLINK;
+ napi_schedule(&dev->napi);
+ break;
+ default:
+ netif_dbg(dev, rx_err, dev->net,
+ "rx submit, %d\n", ret);
+ napi_schedule(&dev->napi);
+ break;
}
- usb_free_urb(urb);
+ } else {
+ netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
+ ret = -ENOLINK;
}
- netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
+ spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
+
+ if (ret)
+ lan78xx_release_rx_buf(dev, skb);
+
+ return ret;
}
-static void lan78xx_tx_bh(struct lan78xx_net *dev)
+static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
{
- int length;
- struct urb *urb = NULL;
- struct skb_data *entry;
- unsigned long flags;
- struct sk_buff_head *tqp = &dev->txq_pend;
- struct sk_buff *skb, *skb2;
- int ret;
- int count, pos;
- int skb_totallen, pkt_cnt;
-
- skb_totallen = 0;
- pkt_cnt = 0;
- count = 0;
- length = 0;
- spin_lock_irqsave(&tqp->lock, flags);
- skb_queue_walk(tqp, skb) {
- if (skb_is_gso(skb)) {
- if (!skb_queue_is_first(tqp, skb)) {
- /* handle previous packets first */
- break;
- }
- count = 1;
- length = skb->len - TX_OVERHEAD;
- __skb_unlink(skb, tqp);
- spin_unlock_irqrestore(&tqp->lock, flags);
- goto gso_skb;
- }
+ struct sk_buff *rx_buf;
- if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
+ /* Ensure the maximum number of Rx URBs is submitted
+ */
+ while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
+ if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
break;
- skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
- pkt_cnt++;
- }
- spin_unlock_irqrestore(&tqp->lock, flags);
-
- /* copy to a single skb */
- skb = alloc_skb(skb_totallen, GFP_ATOMIC);
- if (!skb)
- goto drop;
-
- skb_put(skb, skb_totallen);
-
- for (count = pos = 0; count < pkt_cnt; count++) {
- skb2 = skb_dequeue(tqp);
- if (skb2) {
- length += (skb2->len - TX_OVERHEAD);
- memcpy(skb->data + pos, skb2->data, skb2->len);
- pos += roundup(skb2->len, sizeof(u32));
- dev_kfree_skb(skb2);
- }
}
+}
-gso_skb:
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb)
- goto drop;
+static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
+ struct sk_buff *rx_buf)
+{
+ /* reset SKB data pointers */
- entry = (struct skb_data *)skb->cb;
- entry->urb = urb;
- entry->dev = dev;
- entry->length = length;
- entry->num_of_packet = count;
+ rx_buf->data = rx_buf->head;
+ skb_reset_tail_pointer(rx_buf);
+ rx_buf->len = 0;
+ rx_buf->data_len = 0;
- spin_lock_irqsave(&dev->txq.lock, flags);
- ret = usb_autopm_get_interface_async(dev->intf);
- if (ret < 0) {
- spin_unlock_irqrestore(&dev->txq.lock, flags);
- goto drop;
- }
+ rx_submit(dev, rx_buf, GFP_ATOMIC);
+}
- usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
- skb->data, skb->len, tx_complete, skb);
+static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
+{
+ u32 tx_cmd_a;
+ u32 tx_cmd_b;
- if (length % dev->maxpacket == 0) {
- /* send USB_ZERO_PACKET */
- urb->transfer_flags |= URB_ZERO_PACKET;
- }
+ tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
-#ifdef CONFIG_PM
- /* if this triggers the device is still a sleep */
- if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
- /* transmission will be done in resume */
- usb_anchor_urb(urb, &dev->deferred);
- /* no use to process more packets */
- netif_stop_queue(dev->net);
- usb_put_urb(urb);
- spin_unlock_irqrestore(&dev->txq.lock, flags);
- netdev_dbg(dev->net, "Delaying transmission for resumption\n");
- return;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
+
+ tx_cmd_b = 0;
+ if (skb_is_gso(skb)) {
+ u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
+
+ tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
+
+ tx_cmd_a |= TX_CMD_A_LSO_;
}
-#endif
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- switch (ret) {
- case 0:
- netif_trans_update(dev->net);
- lan78xx_queue_skb(&dev->txq, skb, tx_start);
- if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
- netif_stop_queue(dev->net);
- break;
- case -EPIPE:
- netif_stop_queue(dev->net);
- lan78xx_defer_kevent(dev, EVENT_TX_HALT);
- usb_autopm_put_interface_async(dev->intf);
- break;
- case -ENODEV:
- case -ENOENT:
- netif_dbg(dev, tx_err, dev->net,
- "tx: submit urb err %d (disconnected?)", ret);
- netif_device_detach(dev->net);
- break;
- default:
- usb_autopm_put_interface_async(dev->intf);
- netif_dbg(dev, tx_err, dev->net,
- "tx: submit urb err %d\n", ret);
- break;
+ if (skb_vlan_tag_present(skb)) {
+ tx_cmd_a |= TX_CMD_A_IVTG_;
+ tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
}
- spin_unlock_irqrestore(&dev->txq.lock, flags);
+ put_unaligned_le32(tx_cmd_a, buffer);
+ put_unaligned_le32(tx_cmd_b, buffer + 4);
+}
- if (ret) {
- netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
-drop:
- dev->net->stats.tx_dropped++;
- if (skb)
+static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
+ struct sk_buff *tx_buf)
+{
+ struct skb_data *entry = (struct skb_data *)tx_buf->cb;
+ int remain = dev->tx_urb_size;
+ u8 *tx_data = tx_buf->data;
+ u32 urb_len = 0;
+
+ entry->num_of_packet = 0;
+ entry->length = 0;
+
+ /* Work through the pending SKBs and copy the data of each SKB into
+ * the URB buffer if there room for all the SKB data.
+ *
+ * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
+ */
+ while (remain >= TX_SKB_MIN_LEN) {
+ unsigned int pending_bytes;
+ unsigned int align_bytes;
+ struct sk_buff *skb;
+ unsigned int len;
+
+ lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
+
+ if (!skb)
+ break;
+
+ align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
+ TX_ALIGNMENT;
+ len = align_bytes + TX_CMD_LEN + skb->len;
+ if (len > remain) {
+ lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
+ break;
+ }
+
+ tx_data += align_bytes;
+
+ lan78xx_fill_tx_cmd_words(skb, tx_data);
+ tx_data += TX_CMD_LEN;
+
+ len = skb->len;
+ if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
+ struct net_device_stats *stats = &dev->net->stats;
+
+ stats->tx_dropped++;
dev_kfree_skb_any(skb);
- usb_free_urb(urb);
- } else {
- netif_dbg(dev, tx_queued, dev->net,
- "> tx, len %d, type 0x%x\n", length, skb->protocol);
+ tx_data -= TX_CMD_LEN;
+ continue;
+ }
+
+ tx_data += len;
+ entry->length += len;
+ entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
+
+ dev_kfree_skb_any(skb);
+
+ urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
+
+ remain = dev->tx_urb_size - urb_len;
}
+
+ skb_put(tx_buf, urb_len);
+
+ return entry;
}
-static void lan78xx_rx_bh(struct lan78xx_net *dev)
+static void lan78xx_tx_bh(struct lan78xx_net *dev)
{
- struct urb *urb;
- int i;
+ int ret;
- if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
- for (i = 0; i < 10; i++) {
- if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
- break;
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (urb)
- if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
- return;
+ /* Start the stack Tx queue if it was stopped
+ */
+ netif_tx_lock(dev->net);
+ if (netif_queue_stopped(dev->net)) {
+ if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
+ netif_wake_queue(dev->net);
+ }
+ netif_tx_unlock(dev->net);
+
+ /* Go through the Tx pending queue and set up URBs to transfer
+ * the data to the device. Stop if no more pending data or URBs,
+ * or if an error occurs when a URB is submitted.
+ */
+ do {
+ struct skb_data *entry;
+ struct sk_buff *tx_buf;
+ unsigned long flags;
+
+ if (skb_queue_empty(&dev->txq_pend))
+ break;
+
+ tx_buf = lan78xx_get_tx_buf(dev);
+ if (!tx_buf)
+ break;
+
+ entry = lan78xx_tx_buf_fill(dev, tx_buf);
+
+ spin_lock_irqsave(&dev->txq.lock, flags);
+ ret = usb_autopm_get_interface_async(dev->intf);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&dev->txq.lock, flags);
+ goto out;
}
- if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
- tasklet_schedule(&dev->bh);
- }
- if (skb_queue_len(&dev->txq) < dev->tx_qlen)
- netif_wake_queue(dev->net);
+ usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
+ tx_buf->data, tx_buf->len, tx_complete,
+ tx_buf);
+
+ if (tx_buf->len % dev->maxpacket == 0) {
+ /* send USB_ZERO_PACKET */
+ entry->urb->transfer_flags |= URB_ZERO_PACKET;
+ }
+
+#ifdef CONFIG_PM
+ /* if device is asleep stop outgoing packet processing */
+ if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+ usb_anchor_urb(entry->urb, &dev->deferred);
+ netif_stop_queue(dev->net);
+ spin_unlock_irqrestore(&dev->txq.lock, flags);
+ netdev_dbg(dev->net,
+ "Delaying transmission for resumption\n");
+ return;
+ }
+#endif
+ ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
+ switch (ret) {
+ case 0:
+ netif_trans_update(dev->net);
+ lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
+ break;
+ case -EPIPE:
+ netif_stop_queue(dev->net);
+ lan78xx_defer_kevent(dev, EVENT_TX_HALT);
+ usb_autopm_put_interface_async(dev->intf);
+ break;
+ case -ENODEV:
+ case -ENOENT:
+ netif_dbg(dev, tx_err, dev->net,
+ "tx submit urb err %d (disconnected?)", ret);
+ netif_device_detach(dev->net);
+ break;
+ default:
+ usb_autopm_put_interface_async(dev->intf);
+ netif_dbg(dev, tx_err, dev->net,
+ "tx submit urb err %d\n", ret);
+ break;
+ }
+
+ spin_unlock_irqrestore(&dev->txq.lock, flags);
+
+ if (ret) {
+ netdev_warn(dev->net, "failed to tx urb %d\n", ret);
+out:
+ dev->net->stats.tx_dropped += entry->num_of_packet;
+ lan78xx_release_tx_buf(dev, tx_buf);
+ }
+ } while (ret == 0);
}
-static void lan78xx_bh(struct tasklet_struct *t)
+static int lan78xx_bh(struct lan78xx_net *dev, int budget)
{
- struct lan78xx_net *dev = from_tasklet(dev, t, bh);
- struct sk_buff *skb;
+ struct sk_buff_head done;
+ struct sk_buff *rx_buf;
struct skb_data *entry;
+ unsigned long flags;
+ int work_done = 0;
+
+ /* Pass frames received in the last NAPI cycle before
+ * working on newly completed URBs.
+ */
+ while (!skb_queue_empty(&dev->rxq_overflow)) {
+ lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
+ ++work_done;
+ }
+
+ /* Take a snapshot of the done queue and move items to a
+ * temporary queue. Rx URB completions will continue to add
+ * to the done queue.
+ */
+ __skb_queue_head_init(&done);
- while ((skb = skb_dequeue(&dev->done))) {
- entry = (struct skb_data *)(skb->cb);
+ spin_lock_irqsave(&dev->rxq_done.lock, flags);
+ skb_queue_splice_init(&dev->rxq_done, &done);
+ spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
+
+ /* Extract receive frames from completed URBs and
+ * pass them to the stack. Re-submit each completed URB.
+ */
+ while ((work_done < budget) &&
+ (rx_buf = __skb_dequeue(&done))) {
+ entry = (struct skb_data *)(rx_buf->cb);
switch (entry->state) {
case rx_done:
- entry->state = rx_cleanup;
- rx_process(dev, skb);
- continue;
- case tx_done:
- usb_free_urb(entry->urb);
- dev_kfree_skb(skb);
- continue;
+ rx_process(dev, rx_buf, budget, &work_done);
+ break;
case rx_cleanup:
- usb_free_urb(entry->urb);
- dev_kfree_skb(skb);
- continue;
+ break;
default:
- netdev_dbg(dev->net, "skb state %d\n", entry->state);
- return;
+ netdev_dbg(dev->net, "rx buf state %d\n",
+ entry->state);
+ break;
}
+
+ lan78xx_rx_urb_resubmit(dev, rx_buf);
}
+ /* If budget was consumed before processing all the URBs put them
+ * back on the front of the done queue. They will be first to be
+ * processed in the next NAPI cycle.
+ */
+ spin_lock_irqsave(&dev->rxq_done.lock, flags);
+ skb_queue_splice(&done, &dev->rxq_done);
+ spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
+
if (netif_device_present(dev->net) && netif_running(dev->net)) {
/* reset update timer delta */
if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
@@ -3794,12 +4046,61 @@ static void lan78xx_bh(struct tasklet_struct *t)
jiffies + STAT_UPDATE_TIMER);
}
- if (!skb_queue_empty(&dev->txq_pend))
- lan78xx_tx_bh(dev);
+ /* Submit all free Rx URBs */
if (!test_bit(EVENT_RX_HALT, &dev->flags))
- lan78xx_rx_bh(dev);
+ lan78xx_rx_urb_submit_all(dev);
+
+ /* Submit new Tx URBs */
+
+ lan78xx_tx_bh(dev);
}
+
+ return work_done;
+}
+
+static int lan78xx_poll(struct napi_struct *napi, int budget)
+{
+ struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
+ int result = budget;
+ int work_done;
+
+ /* Don't do any work if the device is suspended */
+
+ if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+ napi_complete_done(napi, 0);
+ return 0;
+ }
+
+ /* Process completed URBs and submit new URBs */
+
+ work_done = lan78xx_bh(dev, budget);
+
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+
+ /* Start a new polling cycle if data was received or
+ * data is waiting to be transmitted.
+ */
+ if (!skb_queue_empty(&dev->rxq_done)) {
+ napi_schedule(napi);
+ } else if (netif_carrier_ok(dev->net)) {
+ if (skb_queue_empty(&dev->txq) &&
+ !skb_queue_empty(&dev->txq_pend)) {
+ napi_schedule(napi);
+ } else {
+ netif_tx_lock(dev->net);
+ if (netif_queue_stopped(dev->net)) {
+ netif_wake_queue(dev->net);
+ napi_schedule(napi);
+ }
+ netif_tx_unlock(dev->net);
+ }
+ }
+ result = work_done;
+ }
+
+ return result;
}
static void lan78xx_delayedwork(struct work_struct *work)
@@ -3845,7 +4146,7 @@ static void lan78xx_delayedwork(struct work_struct *work)
status);
} else {
clear_bit(EVENT_RX_HALT, &dev->flags);
- tasklet_schedule(&dev->bh);
+ napi_schedule(&dev->napi);
}
}
@@ -3939,6 +4240,8 @@ static void lan78xx_disconnect(struct usb_interface *intf)
set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
+ netif_napi_del(&dev->napi);
+
udev = interface_to_usbdev(intf);
net = dev->net;
@@ -3963,6 +4266,9 @@ static void lan78xx_disconnect(struct usb_interface *intf)
lan78xx_unbind(dev, intf);
+ lan78xx_free_tx_resources(dev);
+ lan78xx_free_rx_resources(dev);
+
usb_kill_urb(dev->urb_intr);
usb_free_urb(dev->urb_intr);
@@ -3975,14 +4281,16 @@ static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
struct lan78xx_net *dev = netdev_priv(net);
unlink_urbs(dev, &dev->txq);
- tasklet_schedule(&dev->bh);
+ napi_schedule(&dev->napi);
}
static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
struct net_device *netdev,
netdev_features_t features)
{
- if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
+ struct lan78xx_net *dev = netdev_priv(netdev);
+
+ if (skb->len > LAN78XX_TSO_SIZE(dev))
features &= ~NETIF_F_GSO_MASK;
features = vlan_features_check(skb, features);
@@ -4048,12 +4356,31 @@ static int lan78xx_probe(struct usb_interface *intf,
skb_queue_head_init(&dev->rxq);
skb_queue_head_init(&dev->txq);
- skb_queue_head_init(&dev->done);
+ skb_queue_head_init(&dev->rxq_done);
skb_queue_head_init(&dev->txq_pend);
+ skb_queue_head_init(&dev->rxq_overflow);
mutex_init(&dev->phy_mutex);
mutex_init(&dev->dev_mutex);
- tasklet_setup(&dev->bh, lan78xx_bh);
+ ret = lan78xx_urb_config_init(dev);
+ if (ret < 0)
+ goto out2;
+
+ ret = lan78xx_alloc_tx_resources(dev);
+ if (ret < 0)
+ goto out2;
+
+ ret = lan78xx_alloc_rx_resources(dev);
+ if (ret < 0)
+ goto out3;
+
+ /* MTU range: 68 - 9000 */
+ netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
+
+ netif_set_gso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
+
+ netif_napi_add(netdev, &dev->napi, lan78xx_poll, LAN78XX_NAPI_WEIGHT);
+
INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
init_usb_anchor(&dev->deferred);
@@ -4068,27 +4395,27 @@ static int lan78xx_probe(struct usb_interface *intf,
if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
ret = -ENODEV;
- goto out2;
+ goto out4;
}
dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
ret = -ENODEV;
- goto out2;
+ goto out4;
}
dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
ret = -ENODEV;
- goto out2;
+ goto out4;
}
ep_intr = &intf->cur_altsetting->endpoint[2];
if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
ret = -ENODEV;
- goto out2;
+ goto out4;
}
dev->pipe_intr = usb_rcvintpipe(dev->udev,
@@ -4096,30 +4423,25 @@ static int lan78xx_probe(struct usb_interface *intf,
ret = lan78xx_bind(dev, intf);
if (ret < 0)
- goto out2;
-
- if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
- netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
-
- /* MTU range: 68 - 9000 */
- netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
- netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
+ goto out4;
period = ep_intr->desc.bInterval;
maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
buf = kmalloc(maxp, GFP_KERNEL);
- if (buf) {
- dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
- if (!dev->urb_intr) {
- ret = -ENOMEM;
- kfree(buf);
- goto out3;
- } else {
- usb_fill_int_urb(dev->urb_intr, dev->udev,
- dev->pipe_intr, buf, maxp,
- intr_complete, dev, period);
- dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
- }
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out5;
+ }
+
+ dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
+ if (!dev->urb_intr) {
+ ret = -ENOMEM;
+ goto out6;
+ } else {
+ usb_fill_int_urb(dev->urb_intr, dev->udev,
+ dev->pipe_intr, buf, maxp,
+ intr_complete, dev, period);
+ dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
}
dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
@@ -4127,7 +4449,7 @@ static int lan78xx_probe(struct usb_interface *intf,
/* Reject broken descriptors. */
if (dev->maxpacket == 0) {
ret = -ENODEV;
- goto out4;
+ goto out6;
}
/* driver requires remote-wakeup capability during autosuspend. */
@@ -4135,12 +4457,12 @@ static int lan78xx_probe(struct usb_interface *intf,
ret = lan78xx_phy_init(dev);
if (ret < 0)
- goto out4;
+ goto out7;
ret = register_netdev(netdev);
if (ret != 0) {
netif_err(dev, probe, netdev, "couldn't register the device\n");
- goto out5;
+ goto out8;
}
usb_set_intfdata(intf, dev);
@@ -4155,12 +4477,19 @@ static int lan78xx_probe(struct usb_interface *intf,
return 0;
-out5:
+out8:
phy_disconnect(netdev->phydev);
-out4:
+out7:
usb_free_urb(dev->urb_intr);
-out3:
+out6:
+ kfree(buf);
+out5:
lan78xx_unbind(dev, intf);
+out4:
+ netif_napi_del(&dev->napi);
+ lan78xx_free_rx_resources(dev);
+out3:
+ lan78xx_free_tx_resources(dev);
out2:
free_netdev(netdev);
out1:
@@ -4581,8 +4910,7 @@ static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
if (!netif_device_present(dev->net) ||
!netif_carrier_ok(dev->net) ||
pipe_halted) {
- usb_free_urb(urb);
- dev_kfree_skb(skb);
+ lan78xx_release_tx_buf(dev, skb);
continue;
}
@@ -4592,15 +4920,14 @@ static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
netif_trans_update(dev->net);
lan78xx_queue_skb(&dev->txq, skb, tx_start);
} else {
- usb_free_urb(urb);
- dev_kfree_skb(skb);
-
if (ret == -EPIPE) {
netif_stop_queue(dev->net);
pipe_halted = true;
} else if (ret == -ENODEV) {
netif_device_detach(dev->net);
}
+
+ lan78xx_release_tx_buf(dev, skb);
}
}
@@ -4632,8 +4959,7 @@ static int lan78xx_resume(struct usb_interface *intf)
if (ret < 0) {
if (ret == -ENODEV)
netif_device_detach(dev->net);
-
- netdev_warn(dev->net, "Failed to submit intr URB");
+ netdev_warn(dev->net, "Failed to submit intr URB");
}
}
@@ -4652,14 +4978,14 @@ static int lan78xx_resume(struct usb_interface *intf)
if (!pipe_halted &&
netif_device_present(dev->net) &&
- (skb_queue_len(&dev->txq) < dev->tx_qlen))
+ (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
netif_start_queue(dev->net);
ret = lan78xx_start_tx_path(dev);
if (ret < 0)
goto out;
- tasklet_schedule(&dev->bh);
+ napi_schedule(&dev->napi);
if (!timer_pending(&dev->stat_monitor)) {
dev->delta = 1;
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 326cc4e749d8..fdda0616704e 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -108,8 +108,16 @@ static const char driver_name[] = "MOSCHIP usb-ethernet driver";
static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data)
{
- return usbnet_read_cmd(dev, MCS7830_RD_BREQ, MCS7830_RD_BMREQ,
- 0x0000, index, data, size);
+ int ret;
+
+ ret = usbnet_read_cmd(dev, MCS7830_RD_BREQ, MCS7830_RD_BMREQ,
+ 0x0000, index, data, size);
+ if (ret < 0)
+ return ret;
+ else if (ret < size)
+ return -ENODATA;
+
+ return ret;
}
static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *data)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index f510e8219470..37e5f3495362 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1316,6 +1316,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
{QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
{QMI_FIXED_INTF(0x19d2, 0x1432, 3)}, /* ZTE ME3620 */
+ {QMI_FIXED_INTF(0x19d2, 0x1485, 5)}, /* ZTE MF286D */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x2001, 0x7e16, 3)}, /* D-Link DWM-221 */
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
@@ -1401,6 +1402,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
+ {QMI_QUIRK_SET_DTR(0x22de, 0x9051, 2)}, /* Hucom Wireless HM-211S/K */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
{QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index ef6010a3d33a..ee41088c5251 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -9018,7 +9018,9 @@ static int rtl8152_set_tunable(struct net_device *netdev,
}
static void rtl8152_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct r8152 *tp = netdev_priv(netdev);
@@ -9027,7 +9029,9 @@ static void rtl8152_get_ringparam(struct net_device *netdev,
}
static int rtl8152_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct r8152 *tp = netdev_priv(netdev);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index abe0149ed917..bc1e3dd67c04 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1962,7 +1962,8 @@ static const struct driver_info smsc95xx_info = {
.bind = smsc95xx_bind,
.unbind = smsc95xx_unbind,
.link_reset = smsc95xx_link_reset,
- .reset = smsc95xx_start_phy,
+ .reset = smsc95xx_reset,
+ .check_connect = smsc95xx_start_phy,
.stop = smsc95xx_stop,
.rx_fixup = smsc95xx_rx_fixup,
.tx_fixup = smsc95xx_tx_fixup,
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 2acdb8ad6c71..354a963075c5 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -134,29 +134,22 @@ static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *inf
static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
- char *p = (char *)buf;
+ u8 *p = buf;
int i, j;
switch(stringset) {
case ETH_SS_STATS:
memcpy(p, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
p += sizeof(ethtool_stats_keys);
- for (i = 0; i < dev->real_num_rx_queues; i++) {
- for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
- snprintf(p, ETH_GSTRING_LEN,
- "rx_queue_%u_%.18s",
- i, veth_rq_stats_desc[j].desc);
- p += ETH_GSTRING_LEN;
- }
- }
- for (i = 0; i < dev->real_num_tx_queues; i++) {
- for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
- snprintf(p, ETH_GSTRING_LEN,
- "tx_queue_%u_%.18s",
- i, veth_tq_stats_desc[j].desc);
- p += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < dev->real_num_rx_queues; i++)
+ for (j = 0; j < VETH_RQ_STATS_LEN; j++)
+ ethtool_sprintf(&p, "rx_queue_%u_%.18s",
+ i, veth_rq_stats_desc[j].desc);
+
+ for (i = 0; i < dev->real_num_tx_queues; i++)
+ for (j = 0; j < VETH_TQ_STATS_LEN; j++)
+ ethtool_sprintf(&p, "tx_queue_%u_%.18s",
+ i, veth_tq_stats_desc[j].desc);
break;
}
}
@@ -342,7 +335,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
*/
use_napi = rcu_access_pointer(rq->napi) &&
veth_skb_is_eligible_for_gro(dev, rcv, skb);
- skb_record_rx_queue(skb, rxq);
}
skb_tx_timestamp(skb);
@@ -651,7 +643,7 @@ static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
rcu_read_unlock();
goto xdp_xmit;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rq->dev, xdp_prog, act);
@@ -801,7 +793,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
rcu_read_unlock();
goto xdp_xmit;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rq->dev, xdp_prog, act);
@@ -1693,8 +1685,8 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
if (ifmp && (dev->ifindex != 0))
peer->ifindex = ifmp->ifi_index;
- peer->gso_max_size = dev->gso_max_size;
- peer->gso_max_segs = dev->gso_max_segs;
+ netif_set_gso_max_size(peer, dev->gso_max_size);
+ netif_set_gso_max_segs(peer, dev->gso_max_segs);
err = register_netdevice(peer);
put_net(net);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b107835242ad..a801ea40908f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -812,7 +812,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
rcu_read_unlock();
goto xdp_xmit;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(vi->dev, xdp_prog, act);
@@ -1022,7 +1022,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
rcu_read_unlock();
goto xdp_xmit;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(vi->dev, xdp_prog, act);
@@ -2101,7 +2101,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
stragglers = num_cpu >= vi->curr_queue_pairs ?
num_cpu % vi->curr_queue_pairs :
0;
- cpu = cpumask_next(-1, cpu_online_mask);
+ cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < vi->curr_queue_pairs; i++) {
group_size = stride + (i < stragglers ? 1 : 0);
@@ -2171,7 +2171,9 @@ static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
}
static void virtnet_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -2691,7 +2693,7 @@ static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
txqueue, sq->name, sq->vq->index, sq->vq->name,
- jiffies_to_usecs(jiffies - txq->trans_start));
+ jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
}
static const struct net_device_ops virtnet_netdev = {
@@ -3310,7 +3312,7 @@ static int virtnet_probe(struct virtio_device *vdev)
return 0;
free_unregister_netdev:
- vi->vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
unregister_netdev(dev);
free_failover:
@@ -3326,7 +3328,7 @@ free:
static void remove_vq_common(struct virtnet_info *vi)
{
- vi->vdev->config->reset(vi->vdev);
+ virtio_reset_device(vi->vdev);
/* Free unused buffers in both send and recv, if any. */
free_unused_bufs(vi);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index fd407c0e2856..d9d90baac72a 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -3159,14 +3159,14 @@ out:
static void
-vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
+vmxnet3_declare_features(struct vmxnet3_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_LRO;
+ NETIF_F_LRO | NETIF_F_HIGHDMA;
if (VMXNET3_VERSION_GE_4(adapter)) {
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
@@ -3179,8 +3179,6 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
- if (dma64)
- netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->vlan_features = netdev->hw_features &
~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX);
@@ -3397,7 +3395,6 @@ vmxnet3_probe_device(struct pci_dev *pdev,
#endif
};
int err;
- bool dma64;
u32 ver;
struct net_device *netdev;
struct vmxnet3_adapter *adapter;
@@ -3439,15 +3436,10 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
- dma64 = true;
- } else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "dma_set_mask failed\n");
- goto err_set_mask;
- }
- dma64 = false;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "dma_set_mask failed\n");
+ goto err_set_mask;
}
spin_lock_init(&adapter->cmd_lock);
@@ -3614,7 +3606,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
}
SET_NETDEV_DEV(netdev, &pdev->dev);
- vmxnet3_declare_features(adapter, dma64);
+ vmxnet3_declare_features(adapter);
adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 16f3a2057b90..3172d46c0335 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -575,10 +575,11 @@ vmxnet3_get_link_ksettings(struct net_device *netdev,
return 0;
}
-
static void
vmxnet3_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -595,10 +596,11 @@ vmxnet3_get_ringparam(struct net_device *netdev,
param->rx_jumbo_pending = adapter->rx_ring2_size;
}
-
static int
vmxnet3_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index b2242a082431..e0b1ab99a359 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -34,6 +34,7 @@
#include <net/addrconf.h>
#include <net/l3mdev.h>
#include <net/fib_rules.h>
+#include <net/sch_generic.h>
#include <net/netns/generic.h>
#include <net/netfilter/nf_conntrack.h>
@@ -814,9 +815,9 @@ static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
*/
if (rt6) {
dst = &rt6->dst;
- dev_put(dst->dev);
+ dev_replace_track(dst->dev, net->loopback_dev,
+ &dst->dev_tracker, GFP_KERNEL);
dst->dev = net->loopback_dev;
- dev_hold(dst->dev);
dst_release(dst);
}
}
@@ -1061,9 +1062,9 @@ static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf)
*/
if (rth) {
dst = &rth->dst;
- dev_put(dst->dev);
+ dev_replace_track(dst->dev, net->loopback_dev,
+ &dst->dev_tracker, GFP_KERNEL);
dst->dev = net->loopback_dev;
- dev_hold(dst->dev);
dst_release(dst);
}
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 141635a35c28..359d16780dbb 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -17,6 +17,7 @@
#include <linux/ethtool.h>
#include <net/arp.h>
#include <net/ndisc.h>
+#include <net/gro.h>
#include <net/ipv6_stubs.h>
#include <net/ip.h>
#include <net/icmp.h>
@@ -3233,7 +3234,6 @@ static const struct net_device_ops vxlan_netdev_ether_ops = {
.ndo_fdb_dump = vxlan_fdb_dump,
.ndo_fdb_get = vxlan_fdb_get,
.ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
- .ndo_change_proto_down = dev_change_proto_down_generic,
};
static const struct net_device_ops vxlan_netdev_raw_ops = {
@@ -3304,7 +3304,7 @@ static void vxlan_setup(struct net_device *dev)
dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
netif_keep_dst(dev);
- dev->priv_flags |= IFF_NO_QUEUE;
+ dev->priv_flags |= IFF_NO_QUEUE | IFF_CHANGE_PROTO_DOWN;
/* MTU range: 68 - 65535 */
dev->min_mtu = ETH_MIN_MTU;
@@ -3747,7 +3747,7 @@ static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf,
if (!conf->dst_port) {
if (conf->flags & VXLAN_F_GPE)
- conf->dst_port = htons(4790); /* IANA VXLAN-GPE port */
+ conf->dst_port = htons(IANA_VXLAN_GPE_UDP_PORT);
else
conf->dst_port = htons(vxlan_port);
}
@@ -3810,8 +3810,8 @@ static void vxlan_config_apply(struct net_device *dev,
if (lowerdev) {
dst->remote_ifindex = conf->remote_ifindex;
- dev->gso_max_size = lowerdev->gso_max_size;
- dev->gso_max_segs = lowerdev->gso_max_segs;
+ netif_set_gso_max_size(dev, lowerdev->gso_max_size);
+ netif_set_gso_max_segs(dev, lowerdev->gso_max_segs);
needed_headroom = lowerdev->hard_header_len;
needed_headroom += lowerdev->needed_headroom;
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index cda1b4ce6b21..5ae2d27b5da9 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -306,9 +306,8 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
else
bd_status = R_E_S | R_I_S | R_W_S;
- iowrite16be(bd_status, &priv->rx_bd_base[i].status);
- iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
- &priv->rx_bd_base[i].buf);
+ priv->rx_bd_base[i].status = cpu_to_be16(bd_status);
+ priv->rx_bd_base[i].buf = cpu_to_be32(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH);
}
for (i = 0; i < TX_BD_RING_LEN; i++) {
@@ -317,10 +316,10 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
else
bd_status = T_I_S | T_TC_S | T_W_S;
- iowrite16be(bd_status, &priv->tx_bd_base[i].status);
- iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
- &priv->tx_bd_base[i].buf);
+ priv->tx_bd_base[i].status = cpu_to_be16(bd_status);
+ priv->tx_bd_base[i].buf = cpu_to_be32(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH);
}
+ dma_wmb();
return 0;
@@ -352,10 +351,10 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
- struct qe_bd __iomem *bd;
+ struct qe_bd *bd;
u16 bd_status;
unsigned long flags;
- u16 *proto_head;
+ __be16 *proto_head;
switch (dev->type) {
case ARPHRD_RAWHDLC:
@@ -368,14 +367,14 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
skb_push(skb, HDLC_HEAD_LEN);
- proto_head = (u16 *)skb->data;
+ proto_head = (__be16 *)skb->data;
*proto_head = htons(DEFAULT_HDLC_HEAD);
dev->stats.tx_bytes += skb->len;
break;
case ARPHRD_PPP:
- proto_head = (u16 *)skb->data;
+ proto_head = (__be16 *)skb->data;
if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
dev->stats.tx_dropped++;
dev_kfree_skb(skb);
@@ -398,9 +397,10 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
netdev_sent_queue(dev, skb->len);
spin_lock_irqsave(&priv->lock, flags);
+ dma_rmb();
/* Start from the next BD that should be filled */
bd = priv->curtx_bd;
- bd_status = ioread16be(&bd->status);
+ bd_status = be16_to_cpu(bd->status);
/* Save the skb pointer so we can free it later */
priv->tx_skbuff[priv->skb_curtx] = skb;
@@ -415,8 +415,8 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
/* set bd status and length */
bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
- iowrite16be(skb->len, &bd->length);
- iowrite16be(bd_status, &bd->status);
+ bd->length = cpu_to_be16(skb->len);
+ bd->status = cpu_to_be16(bd_status);
/* Move to next BD in the ring */
if (!(bd_status & T_W_S))
@@ -458,8 +458,9 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
u16 bd_status;
int tx_restart = 0;
+ dma_rmb();
bd = priv->dirty_tx;
- bd_status = ioread16be(&bd->status);
+ bd_status = be16_to_cpu(bd->status);
/* Normal processing. */
while ((bd_status & T_R_S) == 0) {
@@ -503,7 +504,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
bd += 1;
else
bd = priv->tx_bd_base;
- bd_status = ioread16be(&bd->status);
+ bd_status = be16_to_cpu(bd->status);
}
priv->dirty_tx = bd;
@@ -524,8 +525,9 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
u16 length, howmany = 0;
u8 *bdbuffer;
+ dma_rmb();
bd = priv->currx_bd;
- bd_status = ioread16be(&bd->status);
+ bd_status = be16_to_cpu(bd->status);
/* while there are received buffers and BD is full (~R_E) */
while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
@@ -549,7 +551,7 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
}
bdbuffer = priv->rx_buffer +
(priv->currx_bdnum * MAX_RX_BUF_LENGTH);
- length = ioread16be(&bd->length);
+ length = be16_to_cpu(bd->length);
switch (dev->type) {
case ARPHRD_RAWHDLC:
@@ -593,7 +595,7 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
netif_receive_skb(skb);
recycle:
- iowrite16be((bd_status & R_W_S) | R_E_S | R_I_S, &bd->status);
+ bd->status = cpu_to_be16((bd_status & R_W_S) | R_E_S | R_I_S);
/* update to point at the next bd */
if (bd_status & R_W_S) {
@@ -608,8 +610,9 @@ recycle:
bd += 1;
}
- bd_status = ioread16be(&bd->status);
+ bd_status = be16_to_cpu(bd->status);
}
+ dma_rmb();
priv->currx_bd = bd;
return howmany;
@@ -721,7 +724,7 @@ static int uhdlc_open(struct net_device *dev)
/* Enable the TDM port */
if (priv->tsa)
- utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
+ qe_setbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port);
priv->hdlc_busy = 1;
netif_device_attach(priv->ndev);
@@ -812,7 +815,7 @@ static int uhdlc_close(struct net_device *dev)
(u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
if (priv->tsa)
- utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
+ qe_clrbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port);
ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
@@ -848,7 +851,7 @@ static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
#ifdef CONFIG_PM
static void store_clk_config(struct ucc_hdlc_private *priv)
{
- struct qe_mux *qe_mux_reg = &qe_immr->qmx;
+ struct qe_mux __iomem *qe_mux_reg = &qe_immr->qmx;
/* store si clk */
priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
@@ -863,7 +866,7 @@ static void store_clk_config(struct ucc_hdlc_private *priv)
static void resume_clk_config(struct ucc_hdlc_private *priv)
{
- struct qe_mux *qe_mux_reg = &qe_immr->qmx;
+ struct qe_mux __iomem *qe_mux_reg = &qe_immr->qmx;
memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
@@ -990,9 +993,8 @@ static int uhdlc_resume(struct device *dev)
else
bd_status = R_E_S | R_I_S | R_W_S;
- iowrite16be(bd_status, &priv->rx_bd_base[i].status);
- iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
- &priv->rx_bd_base[i].buf);
+ priv->rx_bd_base[i].status = cpu_to_be16(bd_status);
+ priv->rx_bd_base[i].buf = cpu_to_be32(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH);
}
for (i = 0; i < TX_BD_RING_LEN; i++) {
@@ -1001,10 +1003,10 @@ static int uhdlc_resume(struct device *dev)
else
bd_status = T_I_S | T_TC_S | T_W_S;
- iowrite16be(bd_status, &priv->tx_bd_base[i].status);
- iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
- &priv->tx_bd_base[i].buf);
+ priv->tx_bd_base[i].status = cpu_to_be16(bd_status);
+ priv->tx_bd_base[i].buf = cpu_to_be32(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH);
}
+ dma_wmb();
/* if hdlc is busy enable TX and RX */
if (priv->hdlc_busy == 1) {
@@ -1018,7 +1020,7 @@ static int uhdlc_resume(struct device *dev)
/* Enable the TDM port */
if (priv->tsa)
- utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
+ qe_setbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port);
}
napi_enable(&priv->napi);
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 88a36a069311..0b7d9f2f2b8b 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -17,13 +17,19 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
-#include <linux/platform_data/wan_ixp4xx_hss.h>
#include <linux/poll.h>
#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
#include <linux/soc/ixp4xx/npe.h>
#include <linux/soc/ixp4xx/qmgr.h>
#include <linux/soc/ixp4xx/cpu.h>
+/* This is what all IXP4xx platforms we know uses, if more frequencies
+ * are needed, we need to migrate to the clock framework.
+ */
+#define IXP4XX_TIMER_FREQ 66666000
+
#define DEBUG_DESC 0
#define DEBUG_RX 0
#define DEBUG_TX 0
@@ -50,7 +56,6 @@
#define NAPI_WEIGHT 16
/* Queue IDs */
-#define HSS0_CHL_RXTRIG_QUEUE 12 /* orig size = 32 dwords */
#define HSS0_PKT_RX_QUEUE 13 /* orig size = 32 dwords */
#define HSS0_PKT_TX0_QUEUE 14 /* orig size = 16 dwords */
#define HSS0_PKT_TX1_QUEUE 15
@@ -62,7 +67,6 @@
#define HSS0_PKT_RXFREE3_QUEUE 21
#define HSS0_PKT_TXDONE_QUEUE 22 /* orig size = 64 dwords */
-#define HSS1_CHL_RXTRIG_QUEUE 10
#define HSS1_PKT_RX_QUEUE 0
#define HSS1_PKT_TX0_QUEUE 5
#define HSS1_PKT_TX1_QUEUE 6
@@ -252,9 +256,19 @@ typedef void buffer_t;
struct port {
struct device *dev;
struct npe *npe;
+ unsigned int txreadyq;
+ unsigned int rxtrigq;
+ unsigned int rxfreeq;
+ unsigned int rxq;
+ unsigned int txq;
+ unsigned int txdoneq;
+ struct gpio_desc *cts;
+ struct gpio_desc *rts;
+ struct gpio_desc *dcd;
+ struct gpio_desc *dtr;
+ struct gpio_desc *clk_internal;
struct net_device *netdev;
struct napi_struct napi;
- struct hss_plat_info *plat;
buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
struct desc *desc_tab; /* coherent */
dma_addr_t desc_tab_phys;
@@ -322,14 +336,6 @@ static int ports_open;
static struct dma_pool *dma_pool;
static DEFINE_SPINLOCK(npe_lock);
-static const struct {
- int tx, txdone, rx, rxfree;
-} queue_ids[2] = {{HSS0_PKT_TX0_QUEUE, HSS0_PKT_TXDONE_QUEUE, HSS0_PKT_RX_QUEUE,
- HSS0_PKT_RXFREE0_QUEUE},
- {HSS1_PKT_TX0_QUEUE, HSS1_PKT_TXDONE_QUEUE, HSS1_PKT_RX_QUEUE,
- HSS1_PKT_RXFREE0_QUEUE},
-};
-
/*****************************************************************************
* utility functions
****************************************************************************/
@@ -645,7 +651,7 @@ static void hss_hdlc_rx_irq(void *pdev)
#if DEBUG_RX
printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name);
#endif
- qmgr_disable_irq(queue_ids[port->id].rx);
+ qmgr_disable_irq(port->rxq);
napi_schedule(&port->napi);
}
@@ -653,8 +659,8 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
{
struct port *port = container_of(napi, struct port, napi);
struct net_device *dev = port->netdev;
- unsigned int rxq = queue_ids[port->id].rx;
- unsigned int rxfreeq = queue_ids[port->id].rxfree;
+ unsigned int rxq = port->rxq;
+ unsigned int rxfreeq = port->rxfreeq;
int received = 0;
#if DEBUG_RX
@@ -795,7 +801,7 @@ static void hss_hdlc_txdone_irq(void *pdev)
#if DEBUG_TX
printk(KERN_DEBUG DRV_NAME ": hss_hdlc_txdone_irq\n");
#endif
- while ((n_desc = queue_get_desc(queue_ids[port->id].txdone,
+ while ((n_desc = queue_get_desc(port->txdoneq,
port, 1)) >= 0) {
struct desc *desc;
int start;
@@ -813,8 +819,8 @@ static void hss_hdlc_txdone_irq(void *pdev)
free_buffer_irq(port->tx_buff_tab[n_desc]);
port->tx_buff_tab[n_desc] = NULL;
- start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
- queue_put_desc(port->plat->txreadyq,
+ start = qmgr_stat_below_low_watermark(port->txreadyq);
+ queue_put_desc(port->txreadyq,
tx_desc_phys(port, n_desc), desc);
if (start) { /* TX-ready queue was empty */
#if DEBUG_TX
@@ -829,7 +835,7 @@ static void hss_hdlc_txdone_irq(void *pdev)
static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct port *port = dev_to_port(dev);
- unsigned int txreadyq = port->plat->txreadyq;
+ unsigned int txreadyq = port->txreadyq;
int len, offset, bytes, n;
void *mem;
u32 phys;
@@ -889,7 +895,7 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
desc->buf_len = desc->pkt_len = len;
wmb();
- queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc);
+ queue_put_desc(port->txq, tx_desc_phys(port, n), desc);
if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
#if DEBUG_TX
@@ -916,40 +922,40 @@ static int request_hdlc_queues(struct port *port)
{
int err;
- err = qmgr_request_queue(queue_ids[port->id].rxfree, RX_DESCS, 0, 0,
+ err = qmgr_request_queue(port->rxfreeq, RX_DESCS, 0, 0,
"%s:RX-free", port->netdev->name);
if (err)
return err;
- err = qmgr_request_queue(queue_ids[port->id].rx, RX_DESCS, 0, 0,
+ err = qmgr_request_queue(port->rxq, RX_DESCS, 0, 0,
"%s:RX", port->netdev->name);
if (err)
goto rel_rxfree;
- err = qmgr_request_queue(queue_ids[port->id].tx, TX_DESCS, 0, 0,
+ err = qmgr_request_queue(port->txq, TX_DESCS, 0, 0,
"%s:TX", port->netdev->name);
if (err)
goto rel_rx;
- err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
+ err = qmgr_request_queue(port->txreadyq, TX_DESCS, 0, 0,
"%s:TX-ready", port->netdev->name);
if (err)
goto rel_tx;
- err = qmgr_request_queue(queue_ids[port->id].txdone, TX_DESCS, 0, 0,
+ err = qmgr_request_queue(port->txdoneq, TX_DESCS, 0, 0,
"%s:TX-done", port->netdev->name);
if (err)
goto rel_txready;
return 0;
rel_txready:
- qmgr_release_queue(port->plat->txreadyq);
+ qmgr_release_queue(port->txreadyq);
rel_tx:
- qmgr_release_queue(queue_ids[port->id].tx);
+ qmgr_release_queue(port->txq);
rel_rx:
- qmgr_release_queue(queue_ids[port->id].rx);
+ qmgr_release_queue(port->rxq);
rel_rxfree:
- qmgr_release_queue(queue_ids[port->id].rxfree);
+ qmgr_release_queue(port->rxfreeq);
printk(KERN_DEBUG "%s: unable to request hardware queues\n",
port->netdev->name);
return err;
@@ -957,11 +963,11 @@ rel_rxfree:
static void release_hdlc_queues(struct port *port)
{
- qmgr_release_queue(queue_ids[port->id].rxfree);
- qmgr_release_queue(queue_ids[port->id].rx);
- qmgr_release_queue(queue_ids[port->id].txdone);
- qmgr_release_queue(queue_ids[port->id].tx);
- qmgr_release_queue(port->plat->txreadyq);
+ qmgr_release_queue(port->rxfreeq);
+ qmgr_release_queue(port->rxq);
+ qmgr_release_queue(port->txdoneq);
+ qmgr_release_queue(port->txq);
+ qmgr_release_queue(port->txreadyq);
}
static int init_hdlc_queues(struct port *port)
@@ -1046,11 +1052,24 @@ static void destroy_hdlc_queues(struct port *port)
}
}
+static irqreturn_t hss_hdlc_dcd_irq(int irq, void *data)
+{
+ struct net_device *dev = data;
+ struct port *port = dev_to_port(dev);
+ int val;
+
+ val = gpiod_get_value(port->dcd);
+ hss_hdlc_set_carrier(dev, val);
+
+ return IRQ_HANDLED;
+}
+
static int hss_hdlc_open(struct net_device *dev)
{
struct port *port = dev_to_port(dev);
unsigned long flags;
int i, err = 0;
+ int val;
err = hdlc_open(dev);
if (err)
@@ -1069,32 +1088,44 @@ static int hss_hdlc_open(struct net_device *dev)
goto err_destroy_queues;
spin_lock_irqsave(&npe_lock, flags);
- if (port->plat->open) {
- err = port->plat->open(port->id, dev, hss_hdlc_set_carrier);
- if (err)
- goto err_unlock;
+
+ /* Set the carrier, the GPIO is flagged active low so this will return
+ * 1 if DCD is asserted.
+ */
+ val = gpiod_get_value(port->dcd);
+ hss_hdlc_set_carrier(dev, val);
+
+ /* Set up an IRQ for DCD */
+ err = request_irq(gpiod_to_irq(port->dcd), hss_hdlc_dcd_irq, 0, "IXP4xx HSS", dev);
+ if (err) {
+ dev_err(&dev->dev, "ixp4xx_hss: failed to request DCD IRQ (%i)\n", err);
+ goto err_unlock;
}
+ /* GPIOs are flagged active low so this asserts DTR and RTS */
+ gpiod_set_value(port->dtr, 1);
+ gpiod_set_value(port->rts, 1);
+
spin_unlock_irqrestore(&npe_lock, flags);
/* Populate queues with buffers, no failure after this point */
for (i = 0; i < TX_DESCS; i++)
- queue_put_desc(port->plat->txreadyq,
+ queue_put_desc(port->txreadyq,
tx_desc_phys(port, i), tx_desc_ptr(port, i));
for (i = 0; i < RX_DESCS; i++)
- queue_put_desc(queue_ids[port->id].rxfree,
+ queue_put_desc(port->rxfreeq,
rx_desc_phys(port, i), rx_desc_ptr(port, i));
napi_enable(&port->napi);
netif_start_queue(dev);
- qmgr_set_irq(queue_ids[port->id].rx, QUEUE_IRQ_SRC_NOT_EMPTY,
+ qmgr_set_irq(port->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
hss_hdlc_rx_irq, dev);
- qmgr_set_irq(queue_ids[port->id].txdone, QUEUE_IRQ_SRC_NOT_EMPTY,
+ qmgr_set_irq(port->txdoneq, QUEUE_IRQ_SRC_NOT_EMPTY,
hss_hdlc_txdone_irq, dev);
- qmgr_enable_irq(queue_ids[port->id].txdone);
+ qmgr_enable_irq(port->txdoneq);
ports_open++;
@@ -1125,15 +1156,15 @@ static int hss_hdlc_close(struct net_device *dev)
spin_lock_irqsave(&npe_lock, flags);
ports_open--;
- qmgr_disable_irq(queue_ids[port->id].rx);
+ qmgr_disable_irq(port->rxq);
netif_stop_queue(dev);
napi_disable(&port->napi);
hss_stop_hdlc(port);
- while (queue_get_desc(queue_ids[port->id].rxfree, port, 0) >= 0)
+ while (queue_get_desc(port->rxfreeq, port, 0) >= 0)
buffs--;
- while (queue_get_desc(queue_ids[port->id].rx, port, 0) >= 0)
+ while (queue_get_desc(port->rxq, port, 0) >= 0)
buffs--;
if (buffs)
@@ -1141,12 +1172,12 @@ static int hss_hdlc_close(struct net_device *dev)
buffs);
buffs = TX_DESCS;
- while (queue_get_desc(queue_ids[port->id].tx, port, 1) >= 0)
+ while (queue_get_desc(port->txq, port, 1) >= 0)
buffs--; /* cancel TX */
i = 0;
do {
- while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
+ while (queue_get_desc(port->txreadyq, port, 1) >= 0)
buffs--;
if (!buffs)
break;
@@ -1159,10 +1190,12 @@ static int hss_hdlc_close(struct net_device *dev)
if (!buffs)
printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
#endif
- qmgr_disable_irq(queue_ids[port->id].txdone);
+ qmgr_disable_irq(port->txdoneq);
- if (port->plat->close)
- port->plat->close(port->id, dev);
+ free_irq(gpiod_to_irq(port->dcd), dev);
+ /* GPIOs are flagged active low so this de-asserts DTR and RTS */
+ gpiod_set_value(port->dtr, 0);
+ gpiod_set_value(port->rts, 0);
spin_unlock_irqrestore(&npe_lock, flags);
destroy_hdlc_queues(port);
@@ -1254,6 +1287,21 @@ static void find_best_clock(u32 timer_freq, u32 rate, u32 *best, u32 *reg)
}
}
+static int hss_hdlc_set_clock(struct port *port, unsigned int clock_type)
+{
+ switch (clock_type) {
+ case CLOCK_DEFAULT:
+ case CLOCK_EXT:
+ gpiod_set_value(port->clk_internal, 0);
+ return CLOCK_EXT;
+ case CLOCK_INT:
+ gpiod_set_value(port->clk_internal, 1);
+ return CLOCK_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
static int hss_hdlc_ioctl(struct net_device *dev, struct if_settings *ifs)
{
const size_t size = sizeof(sync_serial_settings);
@@ -1286,8 +1334,7 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct if_settings *ifs)
return -EFAULT;
clk = new_line.clock_type;
- if (port->plat->set_clock)
- clk = port->plat->set_clock(port->id, clk);
+ hss_hdlc_set_clock(port, clk);
if (clk != CLOCK_EXT && clk != CLOCK_INT)
return -EINVAL; /* No such clock setting */
@@ -1297,7 +1344,7 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct if_settings *ifs)
port->clock_type = clk; /* Update settings */
if (clk == CLOCK_INT) {
- find_best_clock(port->plat->timer_freq,
+ find_best_clock(IXP4XX_TIMER_FREQ,
new_line.clock_rate,
&port->clock_rate, &port->clock_reg);
} else {
@@ -1335,77 +1382,139 @@ static const struct net_device_ops hss_hdlc_ops = {
.ndo_siocwandev = hss_hdlc_ioctl,
};
-static int hss_init_one(struct platform_device *pdev)
+static int ixp4xx_hss_probe(struct platform_device *pdev)
{
+ struct of_phandle_args queue_spec;
+ struct of_phandle_args npe_spec;
+ struct device *dev = &pdev->dev;
+ struct net_device *ndev;
+ struct device_node *np;
struct port *port;
- struct net_device *dev;
hdlc_device *hdlc;
int err;
- port = kzalloc(sizeof(*port), GFP_KERNEL);
+ np = dev->of_node;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
- port->npe = npe_request(0);
+ err = of_parse_phandle_with_fixed_args(np, "intel,npe-handle", 1, 0,
+ &npe_spec);
+ if (err)
+ return dev_err_probe(dev, err, "no NPE engine specified\n");
+ /* NPE ID 0x00, 0x10, 0x20... */
+ port->npe = npe_request(npe_spec.args[0] << 4);
if (!port->npe) {
- err = -ENODEV;
- goto err_free;
+ dev_err(dev, "unable to obtain NPE instance\n");
+ return -ENODEV;
}
- dev = alloc_hdlcdev(port);
+ /* Get the TX ready queue as resource from queue manager */
+ err = of_parse_phandle_with_fixed_args(np, "intek,queue-chl-txready", 1, 0,
+ &queue_spec);
+ if (err)
+ return dev_err_probe(dev, err, "no txready queue phandle\n");
+ port->txreadyq = queue_spec.args[0];
+ /* Get the RX trig queue as resource from queue manager */
+ err = of_parse_phandle_with_fixed_args(np, "intek,queue-chl-rxtrig", 1, 0,
+ &queue_spec);
+ if (err)
+ return dev_err_probe(dev, err, "no rxtrig queue phandle\n");
+ port->rxtrigq = queue_spec.args[0];
+ /* Get the RX queue as resource from queue manager */
+ err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-rx", 1, 0,
+ &queue_spec);
+ if (err)
+ return dev_err_probe(dev, err, "no RX queue phandle\n");
+ port->rxq = queue_spec.args[0];
+ /* Get the TX queue as resource from queue manager */
+ err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-tx", 1, 0,
+ &queue_spec);
+ if (err)
+ return dev_err_probe(dev, err, "no RX queue phandle\n");
+ port->txq = queue_spec.args[0];
+ /* Get the RX free queue as resource from queue manager */
+ err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-rxfree", 1, 0,
+ &queue_spec);
+ if (err)
+ return dev_err_probe(dev, err, "no RX free queue phandle\n");
+ port->rxfreeq = queue_spec.args[0];
+ /* Get the TX done queue as resource from queue manager */
+ err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-txdone", 1, 0,
+ &queue_spec);
+ if (err)
+ return dev_err_probe(dev, err, "no TX done queue phandle\n");
+ port->txdoneq = queue_spec.args[0];
+
+ /* Obtain all the line control GPIOs */
+ port->cts = devm_gpiod_get(dev, "cts", GPIOD_OUT_LOW);
+ if (IS_ERR(port->cts))
+ return dev_err_probe(dev, PTR_ERR(port->cts), "unable to get CTS GPIO\n");
+ port->rts = devm_gpiod_get(dev, "rts", GPIOD_OUT_LOW);
+ if (IS_ERR(port->rts))
+ return dev_err_probe(dev, PTR_ERR(port->rts), "unable to get RTS GPIO\n");
+ port->dcd = devm_gpiod_get(dev, "dcd", GPIOD_IN);
+ if (IS_ERR(port->dcd))
+ return dev_err_probe(dev, PTR_ERR(port->dcd), "unable to get DCD GPIO\n");
+ port->dtr = devm_gpiod_get(dev, "dtr", GPIOD_OUT_LOW);
+ if (IS_ERR(port->dtr))
+ return dev_err_probe(dev, PTR_ERR(port->dtr), "unable to get DTR GPIO\n");
+ port->clk_internal = devm_gpiod_get(dev, "clk-internal", GPIOD_OUT_LOW);
+ if (IS_ERR(port->clk_internal))
+ return dev_err_probe(dev, PTR_ERR(port->clk_internal),
+ "unable to get CLK internal GPIO\n");
+
+ ndev = alloc_hdlcdev(port);
port->netdev = alloc_hdlcdev(port);
if (!port->netdev) {
err = -ENOMEM;
goto err_plat;
}
- SET_NETDEV_DEV(dev, &pdev->dev);
- hdlc = dev_to_hdlc(dev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ hdlc = dev_to_hdlc(ndev);
hdlc->attach = hss_hdlc_attach;
hdlc->xmit = hss_hdlc_xmit;
- dev->netdev_ops = &hss_hdlc_ops;
- dev->tx_queue_len = 100;
+ ndev->netdev_ops = &hss_hdlc_ops;
+ ndev->tx_queue_len = 100;
port->clock_type = CLOCK_EXT;
port->clock_rate = 0;
port->clock_reg = CLK42X_SPEED_2048KHZ;
port->id = pdev->id;
port->dev = &pdev->dev;
- port->plat = pdev->dev.platform_data;
- netif_napi_add(dev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT);
+ netif_napi_add(ndev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT);
- err = register_hdlc_device(dev);
+ err = register_hdlc_device(ndev);
if (err)
goto err_free_netdev;
platform_set_drvdata(pdev, port);
- netdev_info(dev, "initialized\n");
+ netdev_info(ndev, "initialized\n");
return 0;
err_free_netdev:
- free_netdev(dev);
+ free_netdev(ndev);
err_plat:
npe_release(port->npe);
-err_free:
- kfree(port);
return err;
}
-static int hss_remove_one(struct platform_device *pdev)
+static int ixp4xx_hss_remove(struct platform_device *pdev)
{
struct port *port = platform_get_drvdata(pdev);
unregister_hdlc_device(port->netdev);
free_netdev(port->netdev);
npe_release(port->npe);
- kfree(port);
return 0;
}
static struct platform_driver ixp4xx_hss_driver = {
.driver.name = DRV_NAME,
- .probe = hss_init_one,
- .remove = hss_remove_one,
+ .probe = ixp4xx_hss_probe,
+ .remove = ixp4xx_hss_remove,
};
static int __init hss_init_module(void)
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index ed687bf6ec47..6a142dc85c37 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -550,7 +550,7 @@ static int lmc_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
(timeout-- > 0))
cpu_relax();
- printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear it's memory\n", dev->name, 500000-timeout);
+ printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear its memory\n", dev->name, 500000-timeout);
for(pos = 0; pos < xc.len; pos++){
switch(data[pos]){
diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c
index c0cfd9b36c0b..720952b92e78 100644
--- a/drivers/net/wireguard/noise.c
+++ b/drivers/net/wireguard/noise.c
@@ -302,6 +302,41 @@ void wg_noise_set_static_identity_private_key(
static_identity->static_public, private_key);
}
+static void hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen, const size_t keylen)
+{
+ struct blake2s_state state;
+ u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 };
+ u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32));
+ int i;
+
+ if (keylen > BLAKE2S_BLOCK_SIZE) {
+ blake2s_init(&state, BLAKE2S_HASH_SIZE);
+ blake2s_update(&state, key, keylen);
+ blake2s_final(&state, x_key);
+ } else
+ memcpy(x_key, key, keylen);
+
+ for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
+ x_key[i] ^= 0x36;
+
+ blake2s_init(&state, BLAKE2S_HASH_SIZE);
+ blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
+ blake2s_update(&state, in, inlen);
+ blake2s_final(&state, i_hash);
+
+ for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
+ x_key[i] ^= 0x5c ^ 0x36;
+
+ blake2s_init(&state, BLAKE2S_HASH_SIZE);
+ blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
+ blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE);
+ blake2s_final(&state, i_hash);
+
+ memcpy(out, i_hash, BLAKE2S_HASH_SIZE);
+ memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE);
+ memzero_explicit(i_hash, BLAKE2S_HASH_SIZE);
+}
+
/* This is Hugo Krawczyk's HKDF:
* - https://eprint.iacr.org/2010/264.pdf
* - https://tools.ietf.org/html/rfc5869
@@ -322,14 +357,14 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
((third_len || third_dst) && (!second_len || !second_dst))));
/* Extract entropy from data into secret */
- blake2s256_hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN);
+ hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN);
if (!first_dst || !first_len)
goto out;
/* Expand first key: key = secret, data = 0x1 */
output[0] = 1;
- blake2s256_hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE);
+ hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE);
memcpy(first_dst, output, first_len);
if (!second_dst || !second_len)
@@ -337,8 +372,7 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
/* Expand second key: key = secret, data = first-key || 0x2 */
output[BLAKE2S_HASH_SIZE] = 2;
- blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1,
- BLAKE2S_HASH_SIZE);
+ hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE);
memcpy(second_dst, output, second_len);
if (!third_dst || !third_len)
@@ -346,8 +380,7 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
/* Expand third key: key = secret, data = second-key || 0x3 */
output[BLAKE2S_HASH_SIZE] = 3;
- blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1,
- BLAKE2S_HASH_SIZE);
+ hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE);
memcpy(third_dst, output, third_len);
out:
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
index e2388107f7fd..583adb37ee1e 100644
--- a/drivers/net/wireguard/queueing.h
+++ b/drivers/net/wireguard/queueing.h
@@ -79,9 +79,7 @@ static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
u8 sw_hash = skb->sw_hash;
u32 hash = skb->hash;
skb_scrub_packet(skb, true);
- memset(&skb->headers_start, 0,
- offsetof(struct sk_buff, headers_end) -
- offsetof(struct sk_buff, headers_start));
+ memset(&skb->headers, 0, sizeof(skb->headers));
if (encapsulating) {
skb->l4_hash = l4_hash;
skb->sw_hash = sw_hash;
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 0e9bad33fac8..141c1b5a7b1f 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -153,6 +153,10 @@ static void ar5523_cmd_rx_cb(struct urb *urb)
ar5523_err(ar, "Invalid reply to WDCMSG_TARGET_START");
return;
}
+ if (!cmd->odata) {
+ ar5523_err(ar, "Unexpected WDCMSG_TARGET_START reply");
+ return;
+ }
memcpy(cmd->odata, hdr + 1, sizeof(u32));
cmd->olen = sizeof(u32);
cmd->res = 0;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 5935e0973d14..8f5b8eb368fa 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -12,6 +12,7 @@
#include <linux/dmi.h>
#include <linux/ctype.h>
#include <linux/pm_qos.h>
+#include <linux/nvmem-consumer.h>
#include <asm/byteorder.h>
#include "core.h"
@@ -89,6 +90,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
+ .credit_size_workaround = false,
.tx_stats_over_pktlog = true,
.dynamic_sar_support = false,
},
@@ -124,6 +126,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
+ .credit_size_workaround = false,
.tx_stats_over_pktlog = true,
.dynamic_sar_support = false,
},
@@ -160,6 +163,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
+ .credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
@@ -190,6 +194,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.uart_pin_workaround = true,
.tx_stats_over_pktlog = false,
+ .credit_size_workaround = false,
.bmi_large_size_download = true,
.supports_peer_stats_info = true,
.dynamic_sar_support = true,
@@ -226,6 +231,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
+ .credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
@@ -261,6 +267,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
+ .credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
@@ -296,6 +303,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
+ .credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
@@ -334,6 +342,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = true,
+ .credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.supports_peer_stats_info = true,
.dynamic_sar_support = true,
@@ -376,6 +385,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
+ .credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
@@ -424,6 +434,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
+ .credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
@@ -469,6 +480,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
+ .credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
@@ -504,6 +516,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
+ .credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
@@ -541,6 +554,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = true,
+ .credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
@@ -570,6 +584,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.uart_pin_workaround = true,
+ .credit_size_workaround = true,
.dynamic_sar_support = false,
},
{
@@ -611,6 +626,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
+ .credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
@@ -639,6 +655,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = true,
.hw_filter_reset_required = false,
.fw_diag_ce_download = false,
+ .credit_size_workaround = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = true,
},
@@ -714,6 +731,7 @@ static void ath10k_send_suspend_complete(struct ath10k *ar)
static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
{
+ bool mtu_workaround = ar->hw_params.credit_size_workaround;
int ret;
u32 param = 0;
@@ -731,7 +749,7 @@ static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
param |= HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET;
- if (mode == ATH10K_FIRMWARE_MODE_NORMAL)
+ if (mode == ATH10K_FIRMWARE_MODE_NORMAL && !mtu_workaround)
param |= HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE;
else
param &= ~HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE;
@@ -935,7 +953,8 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
}
if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT ||
- ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE)
+ ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE ||
+ ar->cal_mode == ATH10K_PRE_CAL_MODE_NVMEM)
bmi_board_id_param = BMI_PARAM_GET_FLASH_BOARD_ID;
else
bmi_board_id_param = BMI_PARAM_GET_EEPROM_BOARD_ID;
@@ -1726,7 +1745,8 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
/* As of now pre-cal is valid for 10_4 variants */
if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT ||
- ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE)
+ ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE ||
+ ar->cal_mode == ATH10K_PRE_CAL_MODE_NVMEM)
bmi_otp_exe_param = BMI_PARAM_FLASH_SECTION_ALL;
ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result);
@@ -1853,6 +1873,39 @@ out_free:
return ret;
}
+static int ath10k_download_cal_nvmem(struct ath10k *ar, const char *cell_name)
+{
+ struct nvmem_cell *cell;
+ void *buf;
+ size_t len;
+ int ret;
+
+ cell = devm_nvmem_cell_get(ar->dev, cell_name);
+ if (IS_ERR(cell)) {
+ ret = PTR_ERR(cell);
+ return ret;
+ }
+
+ buf = nvmem_cell_read(cell, &len);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ if (ar->hw_params.cal_data_len != len) {
+ kfree(buf);
+ ath10k_warn(ar, "invalid calibration data length in nvmem-cell '%s': %zu != %u\n",
+ cell_name, len, ar->hw_params.cal_data_len);
+ return -EMSGSIZE;
+ }
+
+ ret = ath10k_download_board_data(ar, buf, len);
+ kfree(buf);
+ if (ret)
+ ath10k_warn(ar, "failed to download calibration data from nvmem-cell '%s': %d\n",
+ cell_name, ret);
+
+ return ret;
+}
+
int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
struct ath10k_fw_file *fw_file)
{
@@ -2087,6 +2140,18 @@ static int ath10k_core_pre_cal_download(struct ath10k *ar)
{
int ret;
+ ret = ath10k_download_cal_nvmem(ar, "pre-calibration");
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_PRE_CAL_MODE_NVMEM;
+ goto success;
+ } else if (ret == -EPROBE_DEFER) {
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find a pre-calibration nvmem-cell, try file next: %d\n",
+ ret);
+
ret = ath10k_download_cal_file(ar, ar->pre_cal_file);
if (ret == 0) {
ar->cal_mode = ATH10K_PRE_CAL_MODE_FILE;
@@ -2153,6 +2218,18 @@ static int ath10k_download_cal_data(struct ath10k *ar)
"pre cal download procedure failed, try cal file: %d\n",
ret);
+ ret = ath10k_download_cal_nvmem(ar, "calibration");
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_CAL_MODE_NVMEM;
+ goto done;
+ } else if (ret == -EPROBE_DEFER) {
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find a calibration nvmem-cell, try file next: %d\n",
+ ret);
+
ret = ath10k_download_cal_file(ar, ar->cal_file);
if (ret == 0) {
ar->cal_mode = ATH10K_CAL_MODE_FILE;
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 5aeff2d9f6cf..9f6680b3be0a 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -877,8 +877,10 @@ enum ath10k_cal_mode {
ATH10K_CAL_MODE_FILE,
ATH10K_CAL_MODE_OTP,
ATH10K_CAL_MODE_DT,
+ ATH10K_CAL_MODE_NVMEM,
ATH10K_PRE_CAL_MODE_FILE,
ATH10K_PRE_CAL_MODE_DT,
+ ATH10K_PRE_CAL_MODE_NVMEM,
ATH10K_CAL_MODE_EEPROM,
};
@@ -898,10 +900,14 @@ static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
return "otp";
case ATH10K_CAL_MODE_DT:
return "dt";
+ case ATH10K_CAL_MODE_NVMEM:
+ return "nvmem";
case ATH10K_PRE_CAL_MODE_FILE:
return "pre-cal-file";
case ATH10K_PRE_CAL_MODE_DT:
return "pre-cal-dt";
+ case ATH10K_PRE_CAL_MODE_NVMEM:
+ return "pre-cal-nvmem";
case ATH10K_CAL_MODE_EEPROM:
return "eeprom";
}
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
index 55e7e11d06d9..fe6b6f97a916 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.c
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -1522,7 +1522,7 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
mutex_lock(&ar->dump_mutex);
dump_data = (struct ath10k_dump_file_data *)(buf);
- strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
+ strscpy(dump_data->df_magic, "ATH10K-FW-DUMP",
sizeof(dump_data->df_magic));
dump_data->len = cpu_to_le32(len);
@@ -1543,11 +1543,11 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info);
dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains);
- strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
+ strscpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
sizeof(dump_data->fw_ver));
dump_data->kernel_ver_code = 0;
- strlcpy(dump_data->kernel_ver, init_utsname()->release,
+ strscpy(dump_data->kernel_ver, init_utsname()->release,
sizeof(dump_data->kernel_ver));
dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index a6de08d3bf4a..9a3a8907389b 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -1401,115 +1401,6 @@ enum htt_dbg_stats_status {
};
/*
- * target -> host statistics upload
- *
- * The following field definitions describe the format of the HTT target
- * to host stats upload confirmation message.
- * The message contains a cookie echoed from the HTT host->target stats
- * upload request, which identifies which request the confirmation is
- * for, and a series of tag-length-value stats information elements.
- * The tag-length header for each stats info element also includes a
- * status field, to indicate whether the request for the stat type in
- * question was fully met, partially met, unable to be met, or invalid
- * (if the stat type in question is disabled in the target).
- * A special value of all 1's in this status field is used to indicate
- * the end of the series of stats info elements.
- *
- *
- * |31 16|15 8|7 5|4 0|
- * |------------------------------------------------------------|
- * | reserved | msg type |
- * |------------------------------------------------------------|
- * | cookie LSBs |
- * |------------------------------------------------------------|
- * | cookie MSBs |
- * |------------------------------------------------------------|
- * | stats entry length | reserved | S |stat type|
- * |------------------------------------------------------------|
- * | |
- * | type-specific stats info |
- * | |
- * |------------------------------------------------------------|
- * | stats entry length | reserved | S |stat type|
- * |------------------------------------------------------------|
- * | |
- * | type-specific stats info |
- * | |
- * |------------------------------------------------------------|
- * | n/a | reserved | 111 | n/a |
- * |------------------------------------------------------------|
- * Header fields:
- * - MSG_TYPE
- * Bits 7:0
- * Purpose: identifies this is a statistics upload confirmation message
- * Value: 0x9
- * - COOKIE_LSBS
- * Bits 31:0
- * Purpose: Provide a mechanism to match a target->host stats confirmation
- * message with its preceding host->target stats request message.
- * Value: LSBs of the opaque cookie specified by the host-side requestor
- * - COOKIE_MSBS
- * Bits 31:0
- * Purpose: Provide a mechanism to match a target->host stats confirmation
- * message with its preceding host->target stats request message.
- * Value: MSBs of the opaque cookie specified by the host-side requestor
- *
- * Stats Information Element tag-length header fields:
- * - STAT_TYPE
- * Bits 4:0
- * Purpose: identifies the type of statistics info held in the
- * following information element
- * Value: htt_dbg_stats_type
- * - STATUS
- * Bits 7:5
- * Purpose: indicate whether the requested stats are present
- * Value: htt_dbg_stats_status, including a special value (0x7) to mark
- * the completion of the stats entry series
- * - LENGTH
- * Bits 31:16
- * Purpose: indicate the stats information size
- * Value: This field specifies the number of bytes of stats information
- * that follows the element tag-length header.
- * It is expected but not required that this length is a multiple of
- * 4 bytes. Even if the length is not an integer multiple of 4, the
- * subsequent stats entry header will begin on a 4-byte aligned
- * boundary.
- */
-
-#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_MASK 0x1F
-#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_LSB 0
-#define HTT_STATS_CONF_ITEM_INFO_STATUS_MASK 0xE0
-#define HTT_STATS_CONF_ITEM_INFO_STATUS_LSB 5
-
-struct htt_stats_conf_item {
- union {
- u8 info;
- struct {
- u8 stat_type:5; /* %HTT_DBG_STATS_ */
- u8 status:3; /* %HTT_DBG_STATS_STATUS_ */
- } __packed;
- } __packed;
- u8 pad;
- __le16 length;
- u8 payload[]; /* roundup(length, 4) long */
-} __packed;
-
-struct htt_stats_conf {
- u8 pad[3];
- __le32 cookie_lsb;
- __le32 cookie_msb;
-
- /* each item has variable length! */
- struct htt_stats_conf_item items[];
-} __packed;
-
-static inline struct htt_stats_conf_item *htt_stats_conf_next_item(
- const struct htt_stats_conf_item *item)
-{
- return (void *)item + sizeof(*item) + roundup(item->length, 4);
-}
-
-/*
* host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank
*
* The following field definitions describe the format of the HTT host
@@ -1828,7 +1719,6 @@ struct htt_resp {
struct htt_rc_update rc_update;
struct htt_rx_test rx_test;
struct htt_pktlog_msg pktlog_msg;
- struct htt_stats_conf stats_conf;
struct htt_rx_pn_ind rx_pn_ind;
struct htt_rx_offload_ind rx_offload_ind;
struct htt_rx_in_ord_ind rx_in_ord_ind;
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index d6b8bdcef416..b793eac2cfac 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -147,6 +147,9 @@ void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
htt->num_pending_tx--;
if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
+
+ if (htt->num_pending_tx == 0)
+ wake_up(&htt->empty_tx_wq);
}
int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 6b03c7787e36..591ef7416b61 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -618,6 +618,9 @@ struct ath10k_hw_params {
*/
bool uart_pin_workaround;
+ /* Workaround for the credit size calculation */
+ bool credit_size_workaround;
+
/* tx stats support over pktlog */
bool tx_stats_over_pktlog;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 1f73fbfee0c0..b11aaee8b8c0 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -6380,13 +6380,14 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
scan_timeout = min_t(u32, arg.max_rest_time *
(arg.n_channels - 1) + (req->duration +
ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) *
- arg.n_channels, arg.max_scan_time + 200);
-
+ arg.n_channels, arg.max_scan_time);
} else {
- /* Add a 200ms margin to account for event/command processing */
- scan_timeout = arg.max_scan_time + 200;
+ scan_timeout = arg.max_scan_time;
}
+ /* Add a 200ms margin to account for event/command processing */
+ scan_timeout += 200;
+
ret = ath10k_start_scan(ar, &arg);
if (ret) {
ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 7c9ea0c073d8..6f8b64218894 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -82,8 +82,6 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
flags = skb_cb->flags;
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
ath10k_htt_tx_dec_pending(htt);
- if (htt->num_pending_tx == 0)
- wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
rcu_read_lock();
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 7c1c2658cb5f..62c453a21e49 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -2611,9 +2611,36 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_mac_handle_beacon(ar, skb);
if (ieee80211_is_beacon(hdr->frame_control) ||
- ieee80211_is_probe_resp(hdr->frame_control))
+ ieee80211_is_probe_resp(hdr->frame_control)) {
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ enum cfg80211_bss_frame_type ftype;
+ u8 *ies;
+ int ies_ch;
+
status->boottime_ns = ktime_get_boottime_ns();
+ if (!ar->scan_channel)
+ goto drop;
+
+ ies = mgmt->u.beacon.variable;
+
+ if (ieee80211_is_beacon(mgmt->frame_control))
+ ftype = CFG80211_BSS_FTYPE_BEACON;
+ else
+ ftype = CFG80211_BSS_FTYPE_PRESP;
+
+ ies_ch = cfg80211_get_ies_channel_number(mgmt->u.beacon.variable,
+ skb_tail_pointer(skb) - ies,
+ sband->band, ftype);
+
+ if (ies_ch > 0 && ies_ch != channel) {
+ ath10k_dbg(ar, ATH10K_DBG_MGMT,
+ "channel mismatched ds channel %d scan channel %d\n",
+ ies_ch, channel);
+ goto drop;
+ }
+ }
+
ath10k_dbg(ar, ATH10K_DBG_MGMT,
"event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
skb, skb->len,
@@ -2627,6 +2654,10 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
ieee80211_rx_ni(ar->hw, skb);
return 0;
+
+drop:
+ dev_kfree_skb(skb);
+ return 0;
}
static int freq_to_idx(struct ath10k *ar, int freq)
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 01bfd09a9d88..4abd12e78028 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -3478,7 +3478,9 @@ struct wmi_phyerr_event {
__le32 num_phyerrs;
__le32 tsf_l32;
__le32 tsf_u32;
- struct wmi_phyerr phyerrs[];
+
+ /* array of struct wmi_phyerr */
+ u8 phyerrs[];
} __packed;
struct wmi_10_4_phyerr_event {
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index 8c9c781afc3e..3fb0aa000825 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -175,8 +175,11 @@ static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
ath11k_ahb_ext_grp_disable(irq_grp);
- napi_synchronize(&irq_grp->napi);
- napi_disable(&irq_grp->napi);
+ if (irq_grp->napi_enabled) {
+ napi_synchronize(&irq_grp->napi);
+ napi_disable(&irq_grp->napi);
+ irq_grp->napi_enabled = false;
+ }
}
}
@@ -206,13 +209,13 @@ static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset)
static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
{
- const struct ce_pipe_config *ce_config;
+ const struct ce_attr *ce_attr;
- ce_config = &ab->hw_params.target_ce_config[ce_id];
- if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_OUT)
+ ce_attr = &ab->hw_params.host_ce_config[ce_id];
+ if (ce_attr->src_nentries)
ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
- if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_IN) {
+ if (ce_attr->dest_nentries) {
ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS);
ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
CE_HOST_IE_3_ADDRESS);
@@ -221,13 +224,13 @@ static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
{
- const struct ce_pipe_config *ce_config;
+ const struct ce_attr *ce_attr;
- ce_config = &ab->hw_params.target_ce_config[ce_id];
- if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_OUT)
+ ce_attr = &ab->hw_params.host_ce_config[ce_id];
+ if (ce_attr->src_nentries)
ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
- if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_IN) {
+ if (ce_attr->dest_nentries) {
ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS);
ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
CE_HOST_IE_3_ADDRESS);
@@ -300,7 +303,10 @@ static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
- napi_enable(&irq_grp->napi);
+ if (!irq_grp->napi_enabled) {
+ napi_enable(&irq_grp->napi);
+ irq_grp->napi_enabled = true;
+ }
ath11k_ahb_ext_grp_enable(irq_grp);
}
}
diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
index de8b632b058c..aaa7b05ff49d 100644
--- a/drivers/net/wireless/ath/ath11k/ce.c
+++ b/drivers/net/wireless/ath/ath11k/ce.c
@@ -14,6 +14,7 @@ const struct ce_attr ath11k_host_ce_config_ipq8074[] = {
.src_nentries = 16,
.src_sz_max = 2048,
.dest_nentries = 0,
+ .send_cb = ath11k_htc_tx_completion_handler,
},
/* CE1: target->host HTT + HTC control */
@@ -40,6 +41,7 @@ const struct ce_attr ath11k_host_ce_config_ipq8074[] = {
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
+ .send_cb = ath11k_htc_tx_completion_handler,
},
/* CE4: host->target HTT */
@@ -73,11 +75,12 @@ const struct ce_attr ath11k_host_ce_config_ipq8074[] = {
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
+ .send_cb = ath11k_htc_tx_completion_handler,
},
/* CE8: target autonomous hif_memcpy */
{
- .flags = CE_ATTR_FLAGS,
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
@@ -89,6 +92,7 @@ const struct ce_attr ath11k_host_ce_config_ipq8074[] = {
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
+ .send_cb = ath11k_htc_tx_completion_handler,
},
/* CE10: target->host HTT */
@@ -142,6 +146,7 @@ const struct ce_attr ath11k_host_ce_config_qca6390[] = {
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
+ .send_cb = ath11k_htc_tx_completion_handler,
},
/* CE4: host->target HTT */
@@ -175,6 +180,7 @@ const struct ce_attr ath11k_host_ce_config_qca6390[] = {
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
+ .send_cb = ath11k_htc_tx_completion_handler,
},
/* CE8: target autonomous hif_memcpy */
@@ -220,6 +226,7 @@ const struct ce_attr ath11k_host_ce_config_qcn9074[] = {
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
+ .send_cb = ath11k_htc_tx_completion_handler,
},
/* CE4: host->target HTT */
@@ -489,18 +496,32 @@ err_unlock:
return skb;
}
-static void ath11k_ce_send_done_cb(struct ath11k_ce_pipe *pipe)
+static void ath11k_ce_tx_process_cb(struct ath11k_ce_pipe *pipe)
{
struct ath11k_base *ab = pipe->ab;
struct sk_buff *skb;
+ struct sk_buff_head list;
+ __skb_queue_head_init(&list);
while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) {
if (!skb)
continue;
dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len,
DMA_TO_DEVICE);
- dev_kfree_skb_any(skb);
+
+ if ((!pipe->send_cb) || ab->hw_params.credit_flow) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list))) {
+ ath11k_dbg(ab, ATH11K_DBG_AHB, "tx ce pipe %d len %d\n",
+ pipe->pipe_num, skb->len);
+ pipe->send_cb(ab, skb);
}
}
@@ -636,7 +657,7 @@ static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
pipe->attr_flags = attr->flags;
if (attr->src_nentries) {
- pipe->send_cb = ath11k_ce_send_done_cb;
+ pipe->send_cb = attr->send_cb;
nentries = roundup_pow_of_two(attr->src_nentries);
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
@@ -667,9 +688,10 @@ static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id)
{
struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
+ const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
- if (pipe->send_cb)
- pipe->send_cb(pipe);
+ if (attr->src_nentries)
+ ath11k_ce_tx_process_cb(pipe);
if (pipe->recv_cb)
ath11k_ce_recv_process_cb(pipe);
@@ -678,9 +700,10 @@ void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id)
void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id)
{
struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
+ const struct ce_attr *attr = &ab->hw_params.host_ce_config[pipe_id];
- if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb)
- pipe->send_cb(pipe);
+ if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && attr->src_nentries)
+ ath11k_ce_tx_process_cb(pipe);
}
EXPORT_SYMBOL(ath11k_ce_per_engine_service);
@@ -953,6 +976,7 @@ int ath11k_ce_init_pipes(struct ath11k_base *ab)
void ath11k_ce_free_pipes(struct ath11k_base *ab)
{
struct ath11k_ce_pipe *pipe;
+ struct ath11k_ce_ring *ce_ring;
int desc_sz;
int i;
@@ -964,22 +988,24 @@ void ath11k_ce_free_pipes(struct ath11k_base *ab)
if (pipe->src_ring) {
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
+ ce_ring = pipe->src_ring;
dma_free_coherent(ab->dev,
pipe->src_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
- pipe->src_ring->base_addr_owner_space,
- pipe->src_ring->base_addr_ce_space);
+ ce_ring->base_addr_owner_space_unaligned,
+ ce_ring->base_addr_ce_space_unaligned);
kfree(pipe->src_ring);
pipe->src_ring = NULL;
}
if (pipe->dest_ring) {
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
+ ce_ring = pipe->dest_ring;
dma_free_coherent(ab->dev,
pipe->dest_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
- pipe->dest_ring->base_addr_owner_space,
- pipe->dest_ring->base_addr_ce_space);
+ ce_ring->base_addr_owner_space_unaligned,
+ ce_ring->base_addr_ce_space_unaligned);
kfree(pipe->dest_ring);
pipe->dest_ring = NULL;
}
@@ -987,11 +1013,12 @@ void ath11k_ce_free_pipes(struct ath11k_base *ab)
if (pipe->status_ring) {
desc_sz =
ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
+ ce_ring = pipe->status_ring;
dma_free_coherent(ab->dev,
pipe->status_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
- pipe->status_ring->base_addr_owner_space,
- pipe->status_ring->base_addr_ce_space);
+ ce_ring->base_addr_owner_space_unaligned,
+ ce_ring->base_addr_ce_space_unaligned);
kfree(pipe->status_ring);
pipe->status_ring = NULL;
}
diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h
index 713f766cac22..8255b6cfab0c 100644
--- a/drivers/net/wireless/ath/ath11k/ce.h
+++ b/drivers/net/wireless/ath/ath11k/ce.h
@@ -101,6 +101,7 @@ struct ce_attr {
unsigned int dest_nentries;
void (*recv_cb)(struct ath11k_base *, struct sk_buff *);
+ void (*send_cb)(struct ath11k_base *, struct sk_buff *);
};
#define CE_DESC_RING_ALIGN 8
@@ -154,7 +155,7 @@ struct ath11k_ce_pipe {
unsigned int buf_sz;
unsigned int rx_buf_needed;
- void (*send_cb)(struct ath11k_ce_pipe *);
+ void (*send_cb)(struct ath11k_base *, struct sk_buff *);
void (*recv_cb)(struct ath11k_base *, struct sk_buff *);
struct tasklet_struct intr_tq;
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index b5a2af3ffc3e..293563b3f784 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -52,6 +52,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 11,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq8074,
.svc_to_ce_map_len = 21,
+ .rfkill_pin = 0,
+ .rfkill_cfg = 0,
+ .rfkill_on_level = 0,
.single_pdev_only = false,
.rxdma1_enable = true,
.num_rxmda_per_pdev = 1,
@@ -74,14 +77,26 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT),
.supports_monitor = true,
+ .full_monitor_mode = false,
.supports_shadow_regs = false,
.idle_ps = false,
+ .supports_sta_ps = false,
.cold_boot_calib = true,
+ .fw_mem_mode = 0,
+ .num_vdevs = 16 + 1,
+ .num_peers = 512,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
+ .supports_regdb = false,
.fix_l1ss = true,
+ .credit_flow = false,
.max_tx_ring = DP_TCL_NUM_RING_MAX,
.hal_params = &ath11k_hw_hal_params_ipq8074,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = true,
+ .wakeup_mhi = false,
+ .supports_rssi_stats = false,
+ .fw_wmi_diag_event = false,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -104,6 +119,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 11,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq6018,
.svc_to_ce_map_len = 19,
+ .rfkill_pin = 0,
+ .rfkill_cfg = 0,
+ .rfkill_on_level = 0,
.single_pdev_only = false,
.rxdma1_enable = true,
.num_rxmda_per_pdev = 1,
@@ -123,14 +141,26 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT),
.supports_monitor = true,
+ .full_monitor_mode = false,
.supports_shadow_regs = false,
.idle_ps = false,
+ .supports_sta_ps = false,
.cold_boot_calib = true,
+ .fw_mem_mode = 0,
+ .num_vdevs = 16 + 1,
+ .num_peers = 512,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
+ .supports_regdb = false,
.fix_l1ss = true,
+ .credit_flow = false,
.max_tx_ring = DP_TCL_NUM_RING_MAX,
.hal_params = &ath11k_hw_hal_params_ipq8074,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = true,
+ .wakeup_mhi = false,
+ .supports_rssi_stats = false,
+ .fw_wmi_diag_event = false,
},
{
.name = "qca6390 hw2.0",
@@ -153,6 +183,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
+ .rfkill_pin = 48,
+ .rfkill_cfg = 0,
+ .rfkill_on_level = 1,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 2,
@@ -171,14 +204,26 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
.supports_monitor = false,
+ .full_monitor_mode = false,
.supports_shadow_regs = true,
.idle_ps = true,
+ .supports_sta_ps = true,
.cold_boot_calib = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 16 + 1,
+ .num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
+ .supports_regdb = true,
.fix_l1ss = true,
+ .credit_flow = true,
.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
.hal_params = &ath11k_hw_hal_params_qca6390,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .wakeup_mhi = true,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = true,
},
{
.name = "qcn9074 hw1.0",
@@ -201,6 +246,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qcn9074,
.svc_to_ce_map_len = 18,
+ .rfkill_pin = 0,
+ .rfkill_cfg = 0,
+ .rfkill_on_level = 0,
.rxdma1_enable = true,
.num_rxmda_per_pdev = 1,
.rx_mac_buf_ring = false,
@@ -219,14 +267,26 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT),
.supports_monitor = true,
+ .full_monitor_mode = true,
.supports_shadow_regs = false,
.idle_ps = false,
+ .supports_sta_ps = false,
.cold_boot_calib = false,
+ .fw_mem_mode = 2,
+ .num_vdevs = 8,
+ .num_peers = 128,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
+ .supports_regdb = false,
.fix_l1ss = true,
+ .credit_flow = false,
.max_tx_ring = DP_TCL_NUM_RING_MAX,
.hal_params = &ath11k_hw_hal_params_ipq8074,
+ .supports_dynamic_smps_6ghz = true,
+ .alloc_cacheable_memory = true,
+ .wakeup_mhi = false,
+ .supports_rssi_stats = false,
+ .fw_wmi_diag_event = false,
},
{
.name = "wcn6855 hw2.0",
@@ -249,6 +309,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
+ .rfkill_pin = 0,
+ .rfkill_cfg = 0,
+ .rfkill_on_level = 0,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 2,
@@ -267,14 +330,88 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
.supports_monitor = false,
+ .full_monitor_mode = false,
.supports_shadow_regs = true,
.idle_ps = true,
+ .supports_sta_ps = true,
.cold_boot_calib = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 16 + 1,
+ .num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
+ .supports_regdb = true,
.fix_l1ss = false,
+ .credit_flow = true,
.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
.hal_params = &ath11k_hw_hal_params_qca6390,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .wakeup_mhi = true,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = true,
+ },
+ {
+ .name = "wcn6855 hw2.1",
+ .hw_rev = ATH11K_HW_WCN6855_HW21,
+ .fw = {
+ .dir = "WCN6855/hw2.1",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &wcn6855_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .internal_sleep_clock = true,
+ .regs = &wcn6855_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .rfkill_pin = 0,
+ .rfkill_cfg = 0,
+ .rfkill_on_level = 0,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxmda_per_pdev = 2,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ .supports_monitor = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .supports_sta_ps = true,
+ .cold_boot_calib = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 16 + 1,
+ .num_peers = 512,
+ .supports_suspend = true,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
+ .supports_regdb = true,
+ .fix_l1ss = false,
+ .credit_flow = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+ .hal_params = &ath11k_hw_hal_params_qca6390,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .wakeup_mhi = true,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = true,
},
};
@@ -392,11 +529,26 @@ static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
scnprintf(variant, sizeof(variant), ",variant=%s",
ab->qmi.target.bdf_ext);
- scnprintf(name, name_len,
- "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
- ath11k_bus_str(ab->hif.bus),
- ab->qmi.target.chip_id,
- ab->qmi.target.board_id, variant);
+ switch (ab->id.bdf_search) {
+ case ATH11K_BDF_SEARCH_BUS_AND_BOARD:
+ scnprintf(name, name_len,
+ "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
+ ath11k_bus_str(ab->hif.bus),
+ ab->id.vendor, ab->id.device,
+ ab->id.subsystem_vendor,
+ ab->id.subsystem_device,
+ ab->qmi.target.chip_id,
+ ab->qmi.target.board_id,
+ variant);
+ break;
+ default:
+ scnprintf(name, name_len,
+ "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
+ ath11k_bus_str(ab->hif.bus),
+ ab->qmi.target.chip_id,
+ ab->qmi.target.board_id, variant);
+ break;
+ }
ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot using board name '%s'\n", name);
@@ -620,10 +772,12 @@ err:
return ret;
}
-static int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
- struct ath11k_board_data *bd)
+int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
+ struct ath11k_board_data *bd,
+ const char *name)
{
- bd->fw = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_BOARD_FILE);
+ bd->fw = ath11k_core_firmware_request(ab, name);
+
if (IS_ERR(bd->fw))
return PTR_ERR(bd->fw);
@@ -633,7 +787,7 @@ static int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
return 0;
}
-#define BOARD_NAME_SIZE 100
+#define BOARD_NAME_SIZE 200
int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
{
char boardname[BOARD_NAME_SIZE];
@@ -651,7 +805,7 @@ int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
goto success;
ab->bd_api = 1;
- ret = ath11k_core_fetch_board_data_api_1(ab, bd);
+ ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_DEFAULT_BOARD_FILE);
if (ret) {
ath11k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n",
ab->hw_params.fw.dir);
@@ -663,6 +817,18 @@ success:
return 0;
}
+int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd)
+{
+ int ret;
+
+ ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_REGDB_FILE_NAME);
+ if (ret)
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to fetch %s from %s\n",
+ ATH11K_REGDB_FILE_NAME, ab->hw_params.fw.dir);
+
+ return ret;
+}
+
static void ath11k_core_stop(struct ath11k_base *ab)
{
if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
@@ -893,6 +1059,27 @@ err_firmware_stop:
return ret;
}
+static int ath11k_core_rfkill_config(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ int ret = 0, i;
+
+ if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
+ return 0;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+
+ ret = ath11k_mac_rfkill_config(ar);
+ if (ret && ret != -EOPNOTSUPP) {
+ ath11k_warn(ab, "failed to configure rfkill: %d", ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
{
int ret;
@@ -939,6 +1126,13 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
goto err_core_stop;
}
ath11k_hif_irq_enable(ab);
+
+ ret = ath11k_core_rfkill_config(ab);
+ if (ret && ret != -EOPNOTSUPP) {
+ ath11k_err(ab, "failed to config rfkill: %d\n", ret);
+ goto err_core_stop;
+ }
+
mutex_unlock(&ab->core_lock);
return 0;
@@ -969,7 +1163,7 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
ath11k_dp_free(ab);
ath11k_hal_srng_deinit(ab);
- ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1;
ret = ath11k_hal_srng_init(ab);
if (ret)
@@ -1003,6 +1197,8 @@ void ath11k_core_halt(struct ath11k *ar)
ath11k_mac_peer_cleanup_all(ar);
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->regd_update_work);
+ cancel_work_sync(&ab->update_11d_work);
+ cancel_work_sync(&ab->rfkill_work);
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
synchronize_rcu();
@@ -1010,6 +1206,56 @@ void ath11k_core_halt(struct ath11k *ar)
idr_init(&ar->txmgmt_idr);
}
+static void ath11k_rfkill_work(struct work_struct *work)
+{
+ struct ath11k_base *ab = container_of(work, struct ath11k_base, rfkill_work);
+ struct ath11k *ar;
+ bool rfkill_radio_on;
+ int i;
+
+ spin_lock_bh(&ab->base_lock);
+ rfkill_radio_on = ab->rfkill_radio_on;
+ spin_unlock_bh(&ab->base_lock);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ if (!ar)
+ continue;
+
+ /* notify cfg80211 radio state change */
+ ath11k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
+ wiphy_rfkill_set_hw_state(ar->hw->wiphy, !rfkill_radio_on);
+ }
+}
+
+static void ath11k_update_11d(struct work_struct *work)
+{
+ struct ath11k_base *ab = container_of(work, struct ath11k_base, update_11d_work);
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ struct wmi_set_current_country_params set_current_param = {};
+ int ret, i;
+
+ spin_lock_bh(&ab->base_lock);
+ memcpy(&set_current_param.alpha2, &ab->new_alpha2, 2);
+ spin_unlock_bh(&ab->base_lock);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "update 11d new cc %c%c\n",
+ set_current_param.alpha2[0],
+ set_current_param.alpha2[1]);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+
+ ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "pdev id %d failed set current country code: %d\n",
+ i, ret);
+ }
+}
+
static void ath11k_core_restart(struct work_struct *work)
{
struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work);
@@ -1043,6 +1289,7 @@ static void ath11k_core_restart(struct work_struct *work)
idr_for_each(&ar->txmgmt_idr,
ath11k_mac_tx_mgmt_pending_free, ar);
idr_destroy(&ar->txmgmt_idr);
+ wake_up(&ar->txmgmt_empty_waitq);
}
wake_up(&ab->wmi_ab.tx_credits_wq);
@@ -1179,12 +1426,15 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
mutex_init(&ab->core_lock);
spin_lock_init(&ab->base_lock);
+ mutex_init(&ab->vdev_id_11d_lock);
INIT_LIST_HEAD(&ab->peers);
init_waitqueue_head(&ab->peer_mapping_wq);
init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
init_waitqueue_head(&ab->qmi.cold_boot_waitq);
INIT_WORK(&ab->restart_work, ath11k_core_restart);
+ INIT_WORK(&ab->update_11d_work, ath11k_update_11d);
+ INIT_WORK(&ab->rfkill_work, ath11k_rfkill_work);
timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
init_completion(&ab->wow.wakeup_completed);
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 31d234a51c79..9e88ccca5ca7 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -47,6 +47,11 @@ enum ath11k_supported_bw {
ATH11K_BW_160 = 3,
};
+enum ath11k_bdf_search {
+ ATH11K_BDF_SEARCH_DEFAULT,
+ ATH11K_BDF_SEARCH_BUS_AND_BOARD,
+};
+
enum wme_ac {
WME_AC_BE,
WME_AC_BK,
@@ -112,6 +117,7 @@ enum ath11k_hw_rev {
ATH11K_HW_IPQ6018_HW10,
ATH11K_HW_QCN9074_HW10,
ATH11K_HW_WCN6855_HW20,
+ ATH11K_HW_WCN6855_HW21,
};
enum ath11k_firmware_mode {
@@ -136,6 +142,7 @@ struct ath11k_ext_irq_grp {
u32 num_irq;
u32 grp_id;
u64 timestamp;
+ bool napi_enabled;
struct napi_struct napi;
struct net_device napi_ndev;
};
@@ -194,6 +201,9 @@ enum ath11k_dev_flags {
ATH11K_FLAG_REGISTERED,
ATH11K_FLAG_QMI_FAIL,
ATH11K_FLAG_HTC_SUSPEND_COMPLETE,
+ ATH11K_FLAG_CE_IRQ_ENABLED,
+ ATH11K_FLAG_EXT_IRQ_ENABLED,
+ ATH11K_FLAG_FIXED_MEM_RGN,
};
enum ath11k_monitor_flags {
@@ -240,6 +250,7 @@ struct ath11k_vif {
bool is_started;
bool is_up;
bool spectral_enabled;
+ bool ps;
u32 aid;
u8 bssid[ETH_ALEN];
struct cfg80211_bitrate_mask bitrate_mask;
@@ -249,6 +260,8 @@ struct ath11k_vif {
int txpower;
bool rsnie_present;
bool wpaie_present;
+ bool bcca_zero_sent;
+ bool do_not_send_tmpl;
struct ieee80211_chanctx_conf chanctx;
};
@@ -370,10 +383,13 @@ struct ath11k_sta {
struct work_struct update_wk;
struct work_struct set_4addr_wk;
struct rate_info txrate;
+ u32 peer_nss;
struct rate_info last_txrate;
u64 rx_duration;
u64 tx_duration;
u8 rssi_comb;
+ s8 rssi_beacon;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
struct ath11k_htt_tx_stats *tx_stats;
struct ath11k_rx_peer_stats *rx_stats;
@@ -404,6 +420,10 @@ enum ath11k_state {
/* Antenna noise floor */
#define ATH11K_DEFAULT_NOISE_FLOOR -95
+#define ATH11K_INVALID_RSSI_FULL -1
+
+#define ATH11K_INVALID_RSSI_EMPTY -128
+
struct ath11k_fw_stats {
struct dentry *debugfs_fwstats;
u32 pdev_id;
@@ -539,6 +559,7 @@ struct ath11k {
/* protects txmgmt_idr data */
spinlock_t txmgmt_idr_lock;
atomic_t num_pending_mgmt_tx;
+ wait_queue_head_t txmgmt_empty_waitq;
/* cycle count is reported twice for each visited channel during scan.
* access protected by data_lock
@@ -577,6 +598,11 @@ struct ath11k {
#endif
bool dfs_block_radar_events;
struct ath11k_thermal thermal;
+ u32 vdev_id_11d_scan;
+ struct completion finish_11d_scan;
+ struct completion finish_11d_ch_list;
+ bool pending_11d;
+ bool regdom_set_by_user;
};
struct ath11k_band_cap {
@@ -703,6 +729,11 @@ struct ath11k_base {
/* Protects data like peers */
spinlock_t base_lock;
struct ath11k_pdev pdevs[MAX_RADIOS];
+ struct {
+ enum WMI_HOST_WLAN_BAND supported_bands;
+ u32 pdev_id;
+ } target_pdev_ids[MAX_RADIOS];
+ u8 target_pdev_count;
struct ath11k_pdev __rcu *pdevs_active[MAX_RADIOS];
struct ath11k_hal_reg_capabilities_ext hal_reg_cap[MAX_RADIOS];
unsigned long long free_vdev_map;
@@ -713,7 +744,6 @@ struct ath11k_base {
u32 wlan_init_status;
int irq_num[ATH11K_IRQ_NUM_MAX];
struct ath11k_ext_irq_grp ext_irq_grp[ATH11K_EXT_IRQ_GRP_NUM_MAX];
- struct napi_struct *napi;
struct ath11k_targ_cap target_caps;
u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE];
bool pdevs_macaddr_valid;
@@ -746,6 +776,8 @@ struct ath11k_base {
struct completion driver_recovery;
struct workqueue_struct *workqueue;
struct work_struct restart_work;
+ struct work_struct update_11d_work;
+ u8 new_alpha2[3];
struct {
/* protected by data_lock */
u32 fw_crash_counter;
@@ -754,11 +786,25 @@ struct ath11k_base {
struct ath11k_dbring_cap *db_caps;
u32 num_db_cap;
+ struct work_struct rfkill_work;
+
+ /* true means radio is on */
+ bool rfkill_radio_on;
+ /* To synchronize 11d scan vdev id */
+ struct mutex vdev_id_11d_lock;
struct timer_list mon_reap_timer;
struct completion htc_suspend;
+ struct {
+ enum ath11k_bdf_search bdf_search;
+ u32 vendor;
+ u32 device;
+ u32 subsystem_vendor;
+ u32 subsystem_device;
+ } id;
+
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};
@@ -934,6 +980,10 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
void ath11k_core_free(struct ath11k_base *ath11k);
int ath11k_core_fetch_bdf(struct ath11k_base *ath11k,
struct ath11k_board_data *bd);
+int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd);
+int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
+ struct ath11k_board_data *bd,
+ const char *name);
void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd);
int ath11k_core_check_dt(struct ath11k_base *ath11k);
diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c
index fd98ba5b1130..eda67ebfc4c2 100644
--- a/drivers/net/wireless/ath/ath11k/dbring.c
+++ b/drivers/net/wireless/ath/ath11k/dbring.c
@@ -6,6 +6,35 @@
#include "core.h"
#include "debug.h"
+#define ATH11K_DB_MAGIC_VALUE 0xdeadbeaf
+
+int ath11k_dbring_validate_buffer(struct ath11k *ar, void *buffer, u32 size)
+{
+ u32 *temp;
+ int idx;
+
+ size = size >> 2;
+
+ for (idx = 0, temp = buffer; idx < size; idx++, temp++) {
+ if (*temp == ATH11K_DB_MAGIC_VALUE)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void ath11k_dbring_fill_magic_value(struct ath11k *ar,
+ void *buffer, u32 size)
+{
+ u32 *temp;
+ int idx;
+
+ size = size >> 2;
+
+ for (idx = 0, temp = buffer; idx < size; idx++, temp++)
+ *temp++ = ATH11K_DB_MAGIC_VALUE;
+}
+
static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
struct ath11k_dbring *ring,
struct ath11k_dbring_element *buff)
@@ -26,6 +55,7 @@ static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
ptr_unaligned = buff->payload;
ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align);
+ ath11k_dbring_fill_magic_value(ar, ptr_aligned, ring->buf_sz);
paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz,
DMA_FROM_DEVICE);
@@ -87,17 +117,23 @@ static int ath11k_dbring_fill_bufs(struct ath11k *ar,
req_entries = min(num_free, ring->bufs_max);
num_remain = req_entries;
align = ring->buf_align;
- size = sizeof(*buff) + ring->buf_sz + align - 1;
+ size = ring->buf_sz + align - 1;
while (num_remain > 0) {
- buff = kzalloc(size, GFP_ATOMIC);
+ buff = kzalloc(sizeof(*buff), GFP_ATOMIC);
if (!buff)
break;
+ buff->payload = kzalloc(size, GFP_ATOMIC);
+ if (!buff->payload) {
+ kfree(buff);
+ break;
+ }
ret = ath11k_dbring_bufs_replenish(ar, ring, buff);
if (ret) {
ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
num_remain, req_entries);
+ kfree(buff->payload);
kfree(buff);
break;
}
@@ -282,7 +318,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
num_entry = ev->fixed.num_buf_release_entry;
- size = sizeof(*buff) + ring->buf_sz + ring->buf_align - 1;
+ size = ring->buf_sz + ring->buf_align - 1;
num_buff_reaped = 0;
spin_lock_bh(&srng->lock);
@@ -319,7 +355,8 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
ring->handler(ar, &handler_data);
}
- memset(buff, 0, size);
+ buff->paddr = 0;
+ memset(buff->payload, 0, size);
ath11k_dbring_bufs_replenish(ar, ring, buff);
}
@@ -346,6 +383,7 @@ void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
idr_remove(&ring->bufs_idr, buf_id);
dma_unmap_single(ar->ab->dev, buff->paddr,
ring->buf_sz, DMA_FROM_DEVICE);
+ kfree(buff->payload);
kfree(buff);
}
diff --git a/drivers/net/wireless/ath/ath11k/dbring.h b/drivers/net/wireless/ath/ath11k/dbring.h
index f7fce9ef9c36..ef906c687b8c 100644
--- a/drivers/net/wireless/ath/ath11k/dbring.h
+++ b/drivers/net/wireless/ath/ath11k/dbring.h
@@ -13,7 +13,7 @@
struct ath11k_dbring_element {
dma_addr_t paddr;
- u8 payload[0];
+ u8 *payload;
};
struct ath11k_dbring_data {
@@ -76,4 +76,6 @@ int ath11k_dbring_get_cap(struct ath11k_base *ab,
struct ath11k_dbring_cap *db_cap);
void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring);
void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring);
+int ath11k_dbring_validate_buffer(struct ath11k *ar, void *data, u32 size);
+
#endif /* ATH11K_DBRING_H */
diff --git a/drivers/net/wireless/ath/ath11k/debug.c b/drivers/net/wireless/ath/ath11k/debug.c
index c86de95fbdc5..958d87429062 100644
--- a/drivers/net/wireless/ath/ath11k/debug.c
+++ b/drivers/net/wireless/ath/ath11k/debug.c
@@ -17,7 +17,7 @@ void ath11k_info(struct ath11k_base *ab, const char *fmt, ...)
va_start(args, fmt);
vaf.va = &args;
dev_info(ab->dev, "%pV", &vaf);
- /* TODO: Trace the log */
+ trace_ath11k_log_info(ab, &vaf);
va_end(args);
}
EXPORT_SYMBOL(ath11k_info);
@@ -32,7 +32,7 @@ void ath11k_err(struct ath11k_base *ab, const char *fmt, ...)
va_start(args, fmt);
vaf.va = &args;
dev_err(ab->dev, "%pV", &vaf);
- /* TODO: Trace the log */
+ trace_ath11k_log_err(ab, &vaf);
va_end(args);
}
EXPORT_SYMBOL(ath11k_err);
@@ -47,7 +47,7 @@ void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...)
va_start(args, fmt);
vaf.va = &args;
dev_warn_ratelimited(ab->dev, "%pV", &vaf);
- /* TODO: Trace the log */
+ trace_ath11k_log_warn(ab, &vaf);
va_end(args);
}
EXPORT_SYMBOL(ath11k_warn);
@@ -68,7 +68,7 @@ void __ath11k_dbg(struct ath11k_base *ab, enum ath11k_debug_mask mask,
if (ath11k_debug_mask & mask)
dev_printk(KERN_DEBUG, ab->dev, "%pV", &vaf);
- /* TODO: trace log */
+ trace_ath11k_log_dbg(ab, mask, &vaf);
va_end(args);
}
@@ -100,6 +100,10 @@ void ath11k_dbg_dump(struct ath11k_base *ab,
dev_printk(KERN_DEBUG, ab->dev, "%s\n", linebuf);
}
}
+
+ /* tracing code doesn't like null strings */
+ trace_ath11k_log_dbg_dump(ab, msg ? msg : "", prefix ? prefix : "",
+ buf, len);
}
EXPORT_SYMBOL(ath11k_dbg_dump);
diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h
index 659a275e2eb3..fbbd5fe02aa8 100644
--- a/drivers/net/wireless/ath/ath11k/debug.h
+++ b/drivers/net/wireless/ath/ath11k/debug.h
@@ -60,7 +60,8 @@ static inline void ath11k_dbg_dump(struct ath11k_base *ab,
#define ath11k_dbg(ar, dbg_mask, fmt, ...) \
do { \
- if (ath11k_debug_mask & dbg_mask) \
+ if ((ath11k_debug_mask & dbg_mask) || \
+ trace_ath11k_log_dbg_enabled()) \
__ath11k_dbg(ar, dbg_mask, fmt, ##__VA_ARGS__); \
} while (0)
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index 80afd35337a1..198ade90b725 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -3,6 +3,8 @@
* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
*/
+#include <linux/vmalloc.h>
+
#include "debugfs.h"
#include "core.h"
@@ -126,6 +128,11 @@ void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb
goto complete;
}
+ if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
+ ar->debug.fw_stats_done = true;
+ goto complete;
+ }
+
if (stats.stats_id == WMI_REQUEST_VDEV_STAT) {
if (list_empty(&stats.vdevs)) {
ath11k_warn(ab, "empty vdev stats");
@@ -195,7 +202,7 @@ static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
* received 'update stats' event, we keep a 3 seconds timeout in case,
* fw_stats_done is not marked yet
*/
- timeout = jiffies + msecs_to_jiffies(3 * HZ);
+ timeout = jiffies + msecs_to_jiffies(3 * 1000);
ath11k_debugfs_fw_stats_reset(ar);
@@ -229,6 +236,38 @@ static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
return 0;
}
+int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id,
+ u32 vdev_id, u32 stats_id)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct stats_request_params req_param;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ req_param.pdev_id = pdev_id;
+ req_param.vdev_id = vdev_id;
+ req_param.stats_id = stats_id;
+
+ ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ if (ret)
+ ath11k_warn(ab, "failed to request fw stats: %d\n", ret);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "debug get fw stat pdev id %d vdev id %d stats id 0x%x\n",
+ pdev_id, vdev_id, stats_id);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
{
struct ath11k *ar = inode->i_private;
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
index ec743a015dc7..4c0740394c95 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs.h
@@ -117,6 +117,8 @@ void ath11k_debugfs_unregister(struct ath11k *ar);
void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb);
void ath11k_debugfs_fw_stats_init(struct ath11k *ar);
+int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id,
+ u32 vdev_id, u32 stats_id);
static inline bool ath11k_debugfs_is_pktlog_lite_mode_enabled(struct ath11k *ar)
{
@@ -216,6 +218,12 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
return 0;
}
+static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
+ u32 pdev_id, u32 vdev_id, u32 stats_id)
+{
+ return 0;
+}
+
#endif /* CONFIG_MAC80211_DEBUGFS*/
#endif /* _ATH11K_DEBUGFS_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
index fecd9718f5ce..1b1acbdf837a 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
@@ -126,85 +126,9 @@ void ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta,
}
void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar,
- struct sk_buff *msdu,
struct hal_tx_status *ts)
{
- struct ath11k_base *ab = ar->ab;
- struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
- enum hal_tx_rate_stats_pkt_type pkt_type;
- enum hal_tx_rate_stats_sgi sgi;
- enum hal_tx_rate_stats_bw bw;
- struct ath11k_peer *peer;
- struct ath11k_sta *arsta;
- struct ieee80211_sta *sta;
- u16 rate;
- u8 rate_idx = 0;
- int ret;
- u8 mcs;
-
- rcu_read_lock();
- spin_lock_bh(&ab->base_lock);
- peer = ath11k_peer_find_by_id(ab, ts->peer_id);
- if (!peer || !peer->sta) {
- ath11k_warn(ab, "failed to find the peer\n");
- spin_unlock_bh(&ab->base_lock);
- rcu_read_unlock();
- return;
- }
-
- sta = peer->sta;
- arsta = (struct ath11k_sta *)sta->drv_priv;
-
- memset(&arsta->txrate, 0, sizeof(arsta->txrate));
- pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE,
- ts->rate_stats);
- mcs = FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS,
- ts->rate_stats);
- sgi = FIELD_GET(HAL_TX_RATE_STATS_INFO0_SGI,
- ts->rate_stats);
- bw = FIELD_GET(HAL_TX_RATE_STATS_INFO0_BW, ts->rate_stats);
-
- if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11A ||
- pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11B) {
- ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
- pkt_type,
- &rate_idx,
- &rate);
- if (ret < 0)
- goto err_out;
- arsta->txrate.legacy = rate;
- } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11N) {
- if (mcs > 7) {
- ath11k_warn(ab, "Invalid HT mcs index %d\n", mcs);
- goto err_out;
- }
-
- arsta->txrate.mcs = mcs + 8 * (arsta->last_txrate.nss - 1);
- arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
- if (sgi)
- arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
- } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AC) {
- if (mcs > 9) {
- ath11k_warn(ab, "Invalid VHT mcs index %d\n", mcs);
- goto err_out;
- }
-
- arsta->txrate.mcs = mcs;
- arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
- if (sgi)
- arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
- } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
- /* TODO */
- }
-
- arsta->txrate.nss = arsta->last_txrate.nss;
- arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
-
- ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
-
-err_out:
- spin_unlock_bh(&ab->base_lock);
- rcu_read_unlock();
+ ath11k_dp_tx_update_txcompl(ar, ts);
}
static ssize_t ath11k_dbg_sta_dump_tx_stats(struct file *file,
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.h b/drivers/net/wireless/ath/ath11k/debugfs_sta.h
index 18dc65d9edcf..e6c11b3a40aa 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_sta.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.h
@@ -19,7 +19,6 @@ void ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta,
struct ath11k_per_peer_tx_stats *peer_stats,
u8 legacy_rate_idx);
void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar,
- struct sk_buff *msdu,
struct hal_tx_status *ts);
#else /* CONFIG_ATH11K_DEBUGFS */
@@ -34,7 +33,6 @@ ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta,
}
static inline void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar,
- struct sk_buff *msdu,
struct hal_tx_status *ts)
{
}
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index 8baaeeb8cf82..8b790ce72e5d 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -101,8 +101,11 @@ void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
if (!ring->vaddr_unaligned)
return;
- dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
- ring->paddr_unaligned);
+ if (ring->cached)
+ kfree(ring->vaddr_unaligned);
+ else
+ dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
+ ring->paddr_unaligned);
ring->vaddr_unaligned = NULL;
}
@@ -222,6 +225,7 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
int ret;
+ bool cached = false;
if (max_entries < 0 || entry_sz < 0)
return -EINVAL;
@@ -230,9 +234,29 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
num_entries = max_entries;
ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
- ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
- &ring->paddr_unaligned,
- GFP_KERNEL);
+
+ if (ab->hw_params.alloc_cacheable_memory) {
+ /* Allocate the reo dst and tx completion rings from cacheable memory */
+ switch (type) {
+ case HAL_REO_DST:
+ case HAL_WBM2SW_RELEASE:
+ cached = true;
+ break;
+ default:
+ cached = false;
+ }
+
+ if (cached) {
+ ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
+ ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned);
+ }
+ }
+
+ if (!cached)
+ ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
+ &ring->paddr_unaligned,
+ GFP_KERNEL);
+
if (!ring->vaddr_unaligned)
return -ENOMEM;
@@ -292,6 +316,11 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
return -EINVAL;
}
+ if (cached) {
+ params.flags |= HAL_SRNG_FLAGS_CACHED;
+ ring->cached = 1;
+ }
+
ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
if (ret < 0) {
ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
@@ -742,13 +771,12 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
const struct ath11k_hw_hal_params *hal_params;
int grp_id = irq_grp->grp_id;
int work_done = 0;
- int i = 0, j;
+ int i, j;
int tot_work_done = 0;
- while (ab->hw_params.ring_mask->tx[grp_id] >> i) {
- if (ab->hw_params.ring_mask->tx[grp_id] & BIT(i))
- ath11k_dp_tx_completion_handler(ab, i);
- i++;
+ if (ab->hw_params.ring_mask->tx[grp_id]) {
+ i = __fls(ab->hw_params.ring_mask->tx[grp_id]);
+ ath11k_dp_tx_completion_handler(ab, i);
}
if (ab->hw_params.ring_mask->rx_err[grp_id]) {
@@ -1023,6 +1051,7 @@ int ath11k_dp_alloc(struct ath11k_base *ab)
INIT_LIST_HEAD(&dp->reo_cmd_list);
INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
+ INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list);
spin_lock_init(&dp->reo_cmd_lock);
dp->reo_cmd_cache_flush_count = 0;
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index 4794ca04f213..409d6cc5a1d5 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -64,6 +64,7 @@ struct dp_srng {
dma_addr_t paddr;
int size;
u32 ring_id;
+ u8 cached;
};
struct dp_rxdma_ring {
@@ -88,6 +89,19 @@ struct dp_tx_ring {
int tx_status_tail;
};
+enum dp_mon_status_buf_state {
+ /* PPDU id matches in dst ring and status ring */
+ DP_MON_STATUS_MATCH,
+ /* status ring dma is not done */
+ DP_MON_STATUS_NO_DMA,
+ /* status ring is lagging, reap status ring */
+ DP_MON_STATUS_LAG,
+ /* status ring is leading, reap dst ring and drop */
+ DP_MON_STATUS_LEAD,
+ /* replinish monitor status ring */
+ DP_MON_STATUS_REPLINISH,
+};
+
struct ath11k_pdev_mon_stats {
u32 status_ppdu_state;
u32 status_ppdu_start;
@@ -103,6 +117,12 @@ struct ath11k_pdev_mon_stats {
u32 dup_mon_buf_cnt;
};
+struct dp_full_mon_mpdu {
+ struct list_head list;
+ struct sk_buff *head;
+ struct sk_buff *tail;
+};
+
struct dp_link_desc_bank {
void *vaddr_unaligned;
void *vaddr;
@@ -134,7 +154,11 @@ struct ath11k_mon_data {
u32 mon_last_buf_cookie;
u64 mon_last_linkdesc_paddr;
u16 chan_noise_floor;
-
+ bool hold_mon_dst_ring;
+ enum dp_mon_status_buf_state buf_state;
+ dma_addr_t mon_status_paddr;
+ struct dp_full_mon_mpdu *mon_mpdu;
+ struct hal_sw_mon_ring_entries sw_mon_entries;
struct ath11k_pdev_mon_stats rx_mon_stats;
/* lock for monitor data */
spinlock_t mon_lock;
@@ -244,6 +268,7 @@ struct ath11k_dp {
struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
struct list_head reo_cmd_list;
struct list_head reo_cmd_cache_flush_list;
+ struct list_head dp_full_mon_mpdu_list;
u32 reo_cmd_cache_flush_count;
/**
* protects access to below fields,
@@ -291,6 +316,7 @@ enum htt_h2t_msg_type {
HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG = 0xc,
HTT_H2T_MSG_TYPE_EXT_STATS_CFG = 0x10,
HTT_H2T_MSG_TYPE_PPDU_STATS_CFG = 0x11,
+ HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE = 0x17,
};
#define HTT_VER_REQ_INFO_MSG_ID GENMASK(7, 0)
@@ -517,7 +543,8 @@ struct htt_ppdu_stats_cfg_cmd {
} __packed;
#define HTT_PPDU_STATS_CFG_MSG_TYPE GENMASK(7, 0)
-#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 8)
+#define HTT_PPDU_STATS_CFG_SOC_STATS BIT(8)
+#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 9)
#define HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK GENMASK(31, 16)
enum htt_ppdu_stats_tag_type {
@@ -955,6 +982,33 @@ struct htt_rx_ring_tlv_filter {
u32 pkt_filter_flags3; /* DATA */
};
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ENABLE BIT(0)
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ZERO_MPDUS_END BIT(1)
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END BIT(2)
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING GENMASK(10, 3)
+
+/**
+ * Enumeration for full monitor mode destination ring select
+ * 0 - REO destination ring select
+ * 1 - FW destination ring select
+ * 2 - SW destination ring select
+ * 3 - Release destination ring select
+ */
+enum htt_rx_full_mon_release_ring {
+ HTT_RX_MON_RING_REO,
+ HTT_RX_MON_RING_FW,
+ HTT_RX_MON_RING_SW,
+ HTT_RX_MON_RING_RELEASE,
+};
+
+struct htt_rx_full_monitor_mode_cfg_cmd {
+ u32 info0;
+ u32 cfg;
+} __packed;
+
/* HTT message target->host */
enum htt_t2h_msg_type {
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index c5320847b80a..c212a789421e 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -20,13 +20,15 @@
#define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
-static u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc)
+static inline
+u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc);
}
-static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline
+enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc))
return HAL_ENCRYPT_TYPE_OPEN;
@@ -34,32 +36,34 @@ static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_bas
return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);
}
-static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc);
}
-static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline
+u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc);
}
-static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline
+bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
}
-static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc);
}
-static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
- struct sk_buff *skb)
+static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
@@ -67,8 +71,8 @@ static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
return ieee80211_has_morefrags(hdr->frame_control);
}
-static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
- struct sk_buff *skb)
+static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
@@ -76,37 +80,37 @@ static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
}
-static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc);
}
-static void *ath11k_dp_rx_get_attention(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_attention(desc);
}
-static bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
+static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
{
return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
__le32_to_cpu(attn->info2));
}
-static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn)
+static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn)
{
return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
__le32_to_cpu(attn->info1));
}
-static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn)
+static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn)
{
return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
__le32_to_cpu(attn->info1));
}
-static bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
+static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
{
return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
__le32_to_cpu(attn->info2)) ==
@@ -154,68 +158,68 @@ static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab,
return errmap & DP_RX_MPDU_ERR_MSDU_LEN;
}
-static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc);
}
-static u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc);
}
-static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc);
}
-static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc);
}
-static u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);
}
-static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc);
}
-static u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc));
}
-static u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc);
}
-static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc);
}
-static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);
}
-static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc);
}
@@ -233,14 +237,14 @@ static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab,
ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc);
}
-static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn)
+static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn)
{
return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
__le32_to_cpu(attn->info1));
}
-static u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
- struct hal_rx_desc *rx_desc)
+static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
+ struct hal_rx_desc *rx_desc)
{
u8 *rx_pkt_hdr;
@@ -249,8 +253,8 @@ static u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
return rx_pkt_hdr;
}
-static bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
- struct hal_rx_desc *rx_desc)
+static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
+ struct hal_rx_desc *rx_desc)
{
u32 tlv_tag;
@@ -259,15 +263,15 @@ static bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
return tlv_tag == HAL_RX_MPDU_START;
}
-static u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab,
- struct hal_rx_desc *rx_desc)
+static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab,
+ struct hal_rx_desc *rx_desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
}
-static void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
- struct hal_rx_desc *desc,
- u16 len)
+static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
+ struct hal_rx_desc *desc,
+ u16 len)
{
ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
}
@@ -1356,25 +1360,6 @@ int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
return 0;
}
-static inline u32 ath11k_he_gi_to_nl80211_he_gi(u8 sgi)
-{
- u32 ret = 0;
-
- switch (sgi) {
- case RX_MSDU_START_SGI_0_8_US:
- ret = NL80211_RATE_INFO_HE_GI_0_8;
- break;
- case RX_MSDU_START_SGI_1_6_US:
- ret = NL80211_RATE_INFO_HE_GI_1_6;
- break;
- case RX_MSDU_START_SGI_3_2_US:
- ret = NL80211_RATE_INFO_HE_GI_3_2;
- break;
- }
-
- return ret;
-}
-
static void
ath11k_update_per_peer_tx_stats(struct ath11k *ar,
struct htt_ppdu_stats *ppdu_stats, u8 user)
@@ -1493,14 +1478,15 @@ ath11k_update_per_peer_tx_stats(struct ath11k *ar,
arsta->txrate.mcs = mcs;
arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
arsta->txrate.he_dcm = dcm;
- arsta->txrate.he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi);
- arsta->txrate.he_ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc(
- (user_rate->ru_end -
+ arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
+ arsta->txrate.he_ru_alloc = ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc
+ ((user_rate->ru_end -
user_rate->ru_start) + 1);
break;
}
arsta->txrate.nss = nss;
+
arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
arsta->tx_duration += tx_duration;
memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
@@ -2380,7 +2366,7 @@ static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
}
rx_status->encoding = RX_ENC_HE;
rx_status->nss = nss;
- rx_status->he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi);
+ rx_status->he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
break;
}
@@ -2596,36 +2582,30 @@ free_out:
static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
struct napi_struct *napi,
struct sk_buff_head *msdu_list,
- int *quota, int ring_id)
+ int mac_id)
{
- struct ath11k_skb_rxcb *rxcb;
struct sk_buff *msdu;
struct ath11k *ar;
struct ieee80211_rx_status rx_status = {0};
- u8 mac_id;
int ret;
if (skb_queue_empty(msdu_list))
return;
- rcu_read_lock();
-
- while (*quota && (msdu = __skb_dequeue(msdu_list))) {
- rxcb = ATH11K_SKB_RXCB(msdu);
- mac_id = rxcb->mac_id;
- ar = ab->pdevs[mac_id].ar;
- if (!rcu_dereference(ab->pdevs_active[mac_id])) {
- dev_kfree_skb_any(msdu);
- continue;
- }
+ if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) {
+ __skb_queue_purge(msdu_list);
+ return;
+ }
- if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
- dev_kfree_skb_any(msdu);
- continue;
- }
+ ar = ab->pdevs[mac_id].ar;
+ if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) {
+ __skb_queue_purge(msdu_list);
+ return;
+ }
+ while ((msdu = __skb_dequeue(msdu_list))) {
ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
- if (ret) {
+ if (unlikely(ret)) {
ath11k_dbg(ab, ATH11K_DBG_DATA,
"Unable to process msdu %d", ret);
dev_kfree_skb_any(msdu);
@@ -2633,10 +2613,7 @@ static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
}
ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
- (*quota)--;
}
-
- rcu_read_unlock();
}
int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
@@ -2645,19 +2622,21 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
struct ath11k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring;
int num_buffs_reaped[MAX_RADIOS] = {0};
- struct sk_buff_head msdu_list;
+ struct sk_buff_head msdu_list[MAX_RADIOS];
struct ath11k_skb_rxcb *rxcb;
int total_msdu_reaped = 0;
struct hal_srng *srng;
struct sk_buff *msdu;
- int quota = budget;
bool done = false;
int buf_id, mac_id;
struct ath11k *ar;
- u32 *rx_desc;
+ struct hal_reo_dest_ring *desc;
+ enum hal_reo_dest_ring_push_reason push_reason;
+ u32 cookie;
int i;
- __skb_queue_head_init(&msdu_list);
+ for (i = 0; i < MAX_RADIOS; i++)
+ __skb_queue_head_init(&msdu_list[i]);
srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
@@ -2666,13 +2645,11 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
ath11k_hal_srng_access_begin(ab, srng);
try_again:
- while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
- struct hal_reo_dest_ring desc = *(struct hal_reo_dest_ring *)rx_desc;
- enum hal_reo_dest_ring_push_reason push_reason;
- u32 cookie;
-
+ while (likely(desc =
+ (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
+ srng))) {
cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
- desc.buf_addr_info.info1);
+ desc->buf_addr_info.info1);
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
cookie);
mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
@@ -2681,7 +2658,7 @@ try_again:
rx_ring = &ar->dp.rx_refill_buf_ring;
spin_lock_bh(&rx_ring->idr_lock);
msdu = idr_find(&rx_ring->bufs_idr, buf_id);
- if (!msdu) {
+ if (unlikely(!msdu)) {
ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
@@ -2697,37 +2674,41 @@ try_again:
DMA_FROM_DEVICE);
num_buffs_reaped[mac_id]++;
- total_msdu_reaped++;
push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
- desc.info0);
- if (push_reason !=
- HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
+ desc->info0);
+ if (unlikely(push_reason !=
+ HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
dev_kfree_skb_any(msdu);
ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
continue;
}
- rxcb->is_first_msdu = !!(desc.rx_msdu_info.info0 &
+ rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
- rxcb->is_last_msdu = !!(desc.rx_msdu_info.info0 &
+ rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
- rxcb->is_continuation = !!(desc.rx_msdu_info.info0 &
+ rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
- desc.rx_mpdu_info.meta_data);
+ desc->rx_mpdu_info.meta_data);
rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
- desc.rx_mpdu_info.info0);
+ desc->rx_mpdu_info.info0);
rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
- desc.info0);
+ desc->info0);
rxcb->mac_id = mac_id;
- __skb_queue_tail(&msdu_list, msdu);
+ __skb_queue_tail(&msdu_list[mac_id], msdu);
- if (total_msdu_reaped >= quota && !rxcb->is_continuation) {
+ if (rxcb->is_continuation) {
+ done = false;
+ } else {
+ total_msdu_reaped++;
done = true;
- break;
}
+
+ if (total_msdu_reaped >= budget)
+ break;
}
/* Hw might have updated the head pointer after we cached it.
@@ -2736,7 +2717,7 @@ try_again:
* head pointer so that we can reap complete MPDU in the current
* rx processing.
*/
- if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) {
+ if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) {
ath11k_hal_srng_access_end(ab, srng);
goto try_again;
}
@@ -2745,25 +2726,23 @@ try_again:
spin_unlock_bh(&srng->lock);
- if (!total_msdu_reaped)
+ if (unlikely(!total_msdu_reaped))
goto exit;
for (i = 0; i < ab->num_radios; i++) {
if (!num_buffs_reaped[i])
continue;
+ ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i);
+
ar = ab->pdevs[i].ar;
rx_ring = &ar->dp.rx_refill_buf_ring;
ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
ab->hw_params.hal_params->rx_buf_rbm);
}
-
- ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list,
- &quota, ring_id);
-
exit:
- return budget - quota;
+ return total_msdu_reaped;
}
static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
@@ -2771,6 +2750,7 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
{
struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
u32 num_msdu;
+ int i;
if (!rx_stats)
return;
@@ -2832,6 +2812,13 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
arsta->rssi_comb = ppdu_info->rssi_comb;
+
+ BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
+ ARRAY_SIZE(ppdu_info->rssi_chain_pri20));
+
+ for (i = 0; i < ARRAY_SIZE(arsta->chain_signal); i++)
+ arsta->chain_signal[i] = ppdu_info->rssi_chain_pri20[i];
+
rx_stats->rx_duration += ppdu_info->rx_duration;
arsta->rx_duration = rx_stats->rx_duration;
}
@@ -2945,6 +2932,43 @@ fail_desc_get:
return req_entries - num_remain;
}
+#define ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP 32535
+
+static void
+ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon,
+ struct hal_tlv_hdr *tlv)
+{
+ struct hal_rx_ppdu_start *ppdu_start;
+ u16 ppdu_id_diff, ppdu_id, tlv_len;
+ u8 *ptr;
+
+ /* PPDU id is part of second tlv, move ptr to second tlv */
+ tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
+ ptr = (u8 *)tlv;
+ ptr += sizeof(*tlv) + tlv_len;
+ tlv = (struct hal_tlv_hdr *)ptr;
+
+ if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_PPDU_START)
+ return;
+
+ ptr += sizeof(*tlv);
+ ppdu_start = (struct hal_rx_ppdu_start *)ptr;
+ ppdu_id = FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,
+ __le32_to_cpu(ppdu_start->info0));
+
+ if (pmon->sw_mon_entries.ppdu_id < ppdu_id) {
+ pmon->buf_state = DP_MON_STATUS_LEAD;
+ ppdu_id_diff = ppdu_id - pmon->sw_mon_entries.ppdu_id;
+ if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
+ pmon->buf_state = DP_MON_STATUS_LAG;
+ } else if (pmon->sw_mon_entries.ppdu_id > ppdu_id) {
+ pmon->buf_state = DP_MON_STATUS_LAG;
+ ppdu_id_diff = pmon->sw_mon_entries.ppdu_id - ppdu_id;
+ if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
+ pmon->buf_state = DP_MON_STATUS_LEAD;
+ }
+}
+
static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
int *budget, struct sk_buff_head *skb_list)
{
@@ -2952,6 +2976,7 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
const struct ath11k_hw_hal_params *hal_params;
struct ath11k_pdev_dp *dp;
struct dp_rxdma_ring *rx_ring;
+ struct ath11k_mon_data *pmon;
struct hal_srng *srng;
void *rx_mon_status_desc;
struct sk_buff *skb;
@@ -2965,6 +2990,7 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
dp = &ar->dp;
+ pmon = &dp->mon_data;
srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id);
rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
@@ -2977,8 +3003,10 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
*budget -= 1;
rx_mon_status_desc =
ath11k_hal_srng_src_peek(ab, srng);
- if (!rx_mon_status_desc)
+ if (!rx_mon_status_desc) {
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
break;
+ }
ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
&cookie, &rbm);
@@ -2991,6 +3019,7 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
goto move_next;
}
@@ -3010,10 +3039,18 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
FIELD_GET(HAL_TLV_HDR_TAG,
tlv->tl));
dev_kfree_skb_any(skb);
+ pmon->buf_state = DP_MON_STATUS_NO_DMA;
goto move_next;
}
+ if (ab->hw_params.full_monitor_mode) {
+ ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv);
+ if (paddr == pmon->mon_status_paddr)
+ pmon->buf_state = DP_MON_STATUS_MATCH;
+ }
__skb_queue_tail(skb_list, skb);
+ } else {
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
}
move_next:
skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
@@ -3064,10 +3101,10 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
if (!num_buffs_reaped)
goto exit;
- while ((skb = __skb_dequeue(&skb_list))) {
- memset(&ppdu_info, 0, sizeof(ppdu_info));
- ppdu_info.peer_id = HAL_INVALID_PEERID;
+ memset(&ppdu_info, 0, sizeof(ppdu_info));
+ ppdu_info.peer_id = HAL_INVALID_PEERID;
+ while ((skb = __skb_dequeue(&skb_list))) {
if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
@@ -3095,10 +3132,7 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
ath11k_dbg(ab, ATH11K_DBG_DATA,
"failed to find the peer with peer_id %d\n",
ppdu_info.peer_id);
- spin_unlock_bh(&ab->base_lock);
- rcu_read_unlock();
- dev_kfree_skb_any(skb);
- continue;
+ goto next_skb;
}
arsta = (struct ath11k_sta *)peer->sta->drv_priv;
@@ -3107,10 +3141,13 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
+next_skb:
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
dev_kfree_skb_any(skb);
+ memset(&ppdu_info, 0, sizeof(ppdu_info));
+ ppdu_info.peer_id = HAL_INVALID_PEERID;
}
exit:
return num_buffs_reaped;
@@ -3800,7 +3837,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
&rbm);
if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
- rbm != ab->hw_params.hal_params->rx_buf_rbm) {
+ rbm != HAL_RX_BUF_RBM_SW3_BM) {
ab->soc_stats.invalid_rbm++;
ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
ath11k_dp_rx_link_desc_return(ab, desc,
@@ -4829,7 +4866,7 @@ static struct sk_buff *
ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
u32 mac_id, struct sk_buff *head_msdu,
struct sk_buff *last_msdu,
- struct ieee80211_rx_status *rxs)
+ struct ieee80211_rx_status *rxs, bool *fcs_err)
{
struct ath11k_base *ab = ar->ab;
struct sk_buff *msdu, *prev_buf;
@@ -4839,12 +4876,17 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
u8 *dest, decap_format;
struct ieee80211_hdr_3addr *wh;
struct rx_attention *rx_attention;
+ u32 err_bitmap;
if (!head_msdu)
goto err_merge_fail;
rx_desc = (struct hal_rx_desc *)head_msdu->data;
rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc);
+ err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
+
+ if (err_bitmap & DP_RX_MPDU_ERR_FCS)
+ *fcs_err = true;
if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention))
return NULL;
@@ -4933,9 +4975,10 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
struct ath11k_pdev_dp *dp = &ar->dp;
struct sk_buff *mon_skb, *skb_next, *header;
struct ieee80211_rx_status *rxs = &dp->rx_status;
+ bool fcs_err = false;
mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
- tail_msdu, rxs);
+ tail_msdu, rxs, &fcs_err);
if (!mon_skb)
goto mon_deliver_fail;
@@ -4943,6 +4986,10 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
header = mon_skb;
rxs->flag = 0;
+
+ if (fcs_err)
+ rxs->flag = RX_FLAG_FAILED_FCS_CRC;
+
do {
skb_next = mon_skb->next;
if (!skb_next)
@@ -5091,6 +5138,357 @@ static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
}
}
+static u32
+ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar,
+ void *ring_entry, struct sk_buff **head_msdu,
+ struct sk_buff **tail_msdu,
+ struct hal_sw_mon_ring_entries *sw_mon_entries)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = &dp->mon_data;
+ struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
+ struct sk_buff *msdu = NULL, *last = NULL;
+ struct hal_sw_monitor_ring *sw_desc = ring_entry;
+ struct hal_rx_msdu_list msdu_list;
+ struct hal_rx_desc *rx_desc;
+ struct ath11k_skb_rxcb *rxcb;
+ void *rx_msdu_link_desc;
+ void *p_buf_addr_info, *p_last_buf_addr_info;
+ int buf_id, i = 0;
+ u32 rx_buf_size, rx_pkt_offset, l2_hdr_offset;
+ u32 rx_bufs_used = 0, msdu_cnt = 0;
+ u32 total_len = 0, frag_len = 0, sw_cookie;
+ u16 num_msdus = 0;
+ u8 rxdma_err, rbm;
+ bool is_frag, is_first_msdu;
+ bool drop_mpdu = false;
+
+ ath11k_hal_rx_sw_mon_ring_buf_paddr_get(ring_entry, sw_mon_entries);
+
+ sw_cookie = sw_mon_entries->mon_dst_sw_cookie;
+ sw_mon_entries->end_of_ppdu = false;
+ sw_mon_entries->drop_ppdu = false;
+ p_last_buf_addr_info = sw_mon_entries->dst_buf_addr_info;
+ msdu_cnt = sw_mon_entries->msdu_cnt;
+
+ sw_mon_entries->end_of_ppdu =
+ FIELD_GET(HAL_SW_MON_RING_INFO0_END_OF_PPDU, sw_desc->info0);
+ if (sw_mon_entries->end_of_ppdu)
+ return rx_bufs_used;
+
+ if (FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON,
+ sw_desc->info0) ==
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
+ rxdma_err =
+ FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE,
+ sw_desc->info0);
+ if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
+ pmon->rx_mon_stats.dest_mpdu_drop++;
+ drop_mpdu = true;
+ }
+ }
+
+ is_frag = false;
+ is_first_msdu = true;
+
+ do {
+ rx_msdu_link_desc =
+ (u8 *)pmon->link_desc_banks[sw_cookie].vaddr +
+ (sw_mon_entries->mon_dst_paddr -
+ pmon->link_desc_banks[sw_cookie].paddr);
+
+ ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
+ &num_msdus);
+
+ for (i = 0; i < num_msdus; i++) {
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ msdu_list.sw_cookie[i]);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!msdu) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "full mon msdu_pop: invalid buf_id %d\n",
+ buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ break;
+ }
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ if (!rxcb->unmapped) {
+ dma_unmap_single(ar->ab->dev, rxcb->paddr,
+ msdu->len +
+ skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ rxcb->unmapped = 1;
+ }
+ if (drop_mpdu) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "full mon: i %d drop msdu %p *ppdu_id %x\n",
+ i, msdu, sw_mon_entries->ppdu_id);
+ dev_kfree_skb_any(msdu);
+ msdu_cnt--;
+ goto next_msdu;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+
+ rx_pkt_offset = sizeof(struct hal_rx_desc);
+ l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
+
+ if (is_first_msdu) {
+ if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
+ drop_mpdu = true;
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ goto next_msdu;
+ }
+ is_first_msdu = false;
+ }
+
+ ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
+ &is_frag, &total_len,
+ &frag_len, &msdu_cnt);
+
+ rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
+
+ ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
+
+ if (!(*head_msdu))
+ *head_msdu = msdu;
+ else if (last)
+ last->next = msdu;
+
+ last = msdu;
+next_msdu:
+ rx_bufs_used++;
+ }
+
+ ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc,
+ &sw_mon_entries->mon_dst_paddr,
+ &sw_mon_entries->mon_dst_sw_cookie,
+ &rbm,
+ &p_buf_addr_info);
+
+ if (ath11k_dp_rx_monitor_link_desc_return(ar,
+ p_last_buf_addr_info,
+ dp->mac_id))
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "full mon: dp_rx_monitor_link_desc_return failed\n");
+
+ p_last_buf_addr_info = p_buf_addr_info;
+
+ } while (sw_mon_entries->mon_dst_paddr && msdu_cnt);
+
+ if (last)
+ last->next = NULL;
+
+ *tail_msdu = msdu;
+
+ return rx_bufs_used;
+}
+
+static int ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp *dp,
+ struct dp_full_mon_mpdu *mon_mpdu,
+ struct sk_buff *head,
+ struct sk_buff *tail)
+{
+ mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
+ if (!mon_mpdu)
+ return -ENOMEM;
+
+ list_add_tail(&mon_mpdu->list, &dp->dp_full_mon_mpdu_list);
+ mon_mpdu->head = head;
+ mon_mpdu->tail = tail;
+
+ return 0;
+}
+
+static void ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp *dp,
+ struct dp_full_mon_mpdu *mon_mpdu)
+{
+ struct dp_full_mon_mpdu *tmp;
+ struct sk_buff *tmp_msdu, *skb_next;
+
+ if (list_empty(&dp->dp_full_mon_mpdu_list))
+ return;
+
+ list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
+ list_del(&mon_mpdu->list);
+
+ tmp_msdu = mon_mpdu->head;
+ while (tmp_msdu) {
+ skb_next = tmp_msdu->next;
+ dev_kfree_skb_any(tmp_msdu);
+ tmp_msdu = skb_next;
+ }
+
+ kfree(mon_mpdu);
+ }
+}
+
+static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar,
+ int mac_id,
+ struct ath11k_mon_data *pmon,
+ struct napi_struct *napi)
+{
+ struct ath11k_pdev_mon_stats *rx_mon_stats;
+ struct dp_full_mon_mpdu *tmp;
+ struct dp_full_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
+ struct sk_buff *head_msdu, *tail_msdu;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_dp *dp = &ab->dp;
+ int ret;
+
+ rx_mon_stats = &pmon->rx_mon_stats;
+
+ list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
+ list_del(&mon_mpdu->list);
+ head_msdu = mon_mpdu->head;
+ tail_msdu = mon_mpdu->tail;
+ if (head_msdu && tail_msdu) {
+ ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu,
+ tail_msdu, napi);
+ rx_mon_stats->dest_mpdu_done++;
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n");
+ }
+ kfree(mon_mpdu);
+ }
+
+ return ret;
+}
+
+static int
+ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = &dp->mon_data;
+ struct hal_sw_mon_ring_entries *sw_mon_entries;
+ int quota = 0, work = 0, count;
+
+ sw_mon_entries = &pmon->sw_mon_entries;
+
+ while (pmon->hold_mon_dst_ring) {
+ quota = ath11k_dp_rx_process_mon_status(ab, mac_id,
+ napi, 1);
+ if (pmon->buf_state == DP_MON_STATUS_MATCH) {
+ count = sw_mon_entries->status_buf_count;
+ if (count > 1) {
+ quota += ath11k_dp_rx_process_mon_status(ab, mac_id,
+ napi, count);
+ }
+
+ ath11k_dp_rx_full_mon_deliver_ppdu(ar, dp->mac_id,
+ pmon, napi);
+ pmon->hold_mon_dst_ring = false;
+ } else if (!pmon->mon_status_paddr ||
+ pmon->buf_state == DP_MON_STATUS_LEAD) {
+ sw_mon_entries->drop_ppdu = true;
+ pmon->hold_mon_dst_ring = false;
+ }
+
+ if (!quota)
+ break;
+
+ work += quota;
+ }
+
+ if (sw_mon_entries->drop_ppdu)
+ ath11k_dp_rx_full_mon_drop_ppdu(&ab->dp, pmon->mon_mpdu);
+
+ return work;
+}
+
+static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = &dp->mon_data;
+ struct hal_sw_mon_ring_entries *sw_mon_entries;
+ struct ath11k_pdev_mon_stats *rx_mon_stats;
+ struct sk_buff *head_msdu, *tail_msdu;
+ void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
+ void *ring_entry;
+ u32 rx_bufs_used = 0, mpdu_rx_bufs_used;
+ int quota = 0, ret;
+ bool break_dst_ring = false;
+
+ spin_lock_bh(&pmon->mon_lock);
+
+ sw_mon_entries = &pmon->sw_mon_entries;
+ rx_mon_stats = &pmon->rx_mon_stats;
+
+ if (pmon->hold_mon_dst_ring) {
+ spin_unlock_bh(&pmon->mon_lock);
+ goto reap_status_ring;
+ }
+
+ ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
+ while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
+ head_msdu = NULL;
+ tail_msdu = NULL;
+
+ mpdu_rx_bufs_used = ath11k_dp_rx_full_mon_mpdu_pop(ar, ring_entry,
+ &head_msdu,
+ &tail_msdu,
+ sw_mon_entries);
+ rx_bufs_used += mpdu_rx_bufs_used;
+
+ if (!sw_mon_entries->end_of_ppdu) {
+ if (head_msdu) {
+ ret = ath11k_dp_rx_full_mon_prepare_mpdu(&ab->dp,
+ pmon->mon_mpdu,
+ head_msdu,
+ tail_msdu);
+ if (ret)
+ break_dst_ring = true;
+ }
+
+ goto next_entry;
+ } else {
+ if (!sw_mon_entries->ppdu_id &&
+ !sw_mon_entries->mon_status_paddr) {
+ break_dst_ring = true;
+ goto next_entry;
+ }
+ }
+
+ rx_mon_stats->dest_ppdu_done++;
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ pmon->buf_state = DP_MON_STATUS_LAG;
+ pmon->mon_status_paddr = sw_mon_entries->mon_status_paddr;
+ pmon->hold_mon_dst_ring = true;
+next_entry:
+ ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
+ mon_dst_srng);
+ if (break_dst_ring)
+ break;
+ }
+
+ ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
+ spin_unlock_bh(&pmon->mon_lock);
+
+ if (rx_bufs_used) {
+ ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
+ &dp->rxdma_mon_buf_ring,
+ rx_bufs_used,
+ HAL_RX_BUF_RBM_SW3_BM);
+ }
+
+reap_status_ring:
+ quota = ath11k_dp_rx_process_full_mon_status_ring(ab, mac_id,
+ napi, budget);
+
+ return quota;
+}
+
static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id,
struct napi_struct *napi, int budget)
{
@@ -5113,10 +5511,14 @@ int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
int ret = 0;
- if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
+ ab->hw_params.full_monitor_mode)
+ ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
+ else if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget);
else
ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
+
return ret;
}
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 879fb2a9dc0c..91d6244b6543 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -9,6 +9,7 @@
#include "debugfs_sta.h"
#include "hw.h"
#include "peer.h"
+#include "mac.h"
static enum hal_tcl_encap_type
ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb)
@@ -95,11 +96,11 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
u8 ring_selector = 0, ring_map = 0;
bool tcl_ring_retry;
- if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
return -ESHUTDOWN;
- if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
- !ieee80211_is_data(hdr->frame_control))
+ if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
+ !ieee80211_is_data(hdr->frame_control)))
return -ENOTSUPP;
pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
@@ -127,7 +128,7 @@ tcl_ring_sel:
DP_TX_IDR_SIZE - 1, GFP_ATOMIC);
spin_unlock_bh(&tx_ring->tx_idr_lock);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1)) {
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
return -ENOSPC;
@@ -152,7 +153,7 @@ tcl_ring_sel:
ti.meta_data_flags = arvif->tcl_metadata;
}
- if (ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW) {
+ if (unlikely(ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW)) {
if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) {
ti.encrypt_type =
ath11k_dp_tx_get_encrypt_type(skb_cb->cipher);
@@ -173,8 +174,8 @@ tcl_ring_sel:
ti.bss_ast_idx = arvif->ast_idx;
ti.dscp_tid_tbl_idx = 0;
- if (skb->ip_summed == CHECKSUM_PARTIAL &&
- ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW) {
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL &&
+ ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW)) {
ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) |
@@ -211,7 +212,7 @@ tcl_ring_sel:
}
ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
- if (dma_mapping_error(ab->dev, ti.paddr)) {
+ if (unlikely(dma_mapping_error(ab->dev, ti.paddr))) {
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
ath11k_warn(ab, "failed to DMA map data Tx buffer\n");
ret = -ENOMEM;
@@ -231,7 +232,7 @@ tcl_ring_sel:
ath11k_hal_srng_access_begin(ab, tcl_ring);
hal_tcl_desc = (void *)ath11k_hal_srng_src_get_next_entry(ab, tcl_ring);
- if (!hal_tcl_desc) {
+ if (unlikely(!hal_tcl_desc)) {
/* NOTE: It is highly unlikely we'll be running out of tcl_ring
* desc because the desc is directly enqueued onto hw queue.
*/
@@ -245,7 +246,7 @@ tcl_ring_sel:
* checking this ring earlier for each pkt tx.
* Restart ring selection if some rings are not checked yet.
*/
- if (ring_map != (BIT(ab->hw_params.max_tx_ring) - 1) &&
+ if (unlikely(ring_map != (BIT(ab->hw_params.max_tx_ring)) - 1) &&
ab->hw_params.max_tx_ring > 1) {
tcl_ring_retry = true;
ring_selector++;
@@ -293,20 +294,18 @@ static void ath11k_dp_tx_free_txbuf(struct ath11k_base *ab, u8 mac_id,
struct sk_buff *msdu;
struct ath11k_skb_cb *skb_cb;
- spin_lock_bh(&tx_ring->tx_idr_lock);
- msdu = idr_find(&tx_ring->txbuf_idr, msdu_id);
- if (!msdu) {
+ spin_lock(&tx_ring->tx_idr_lock);
+ msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
+ spin_unlock(&tx_ring->tx_idr_lock);
+
+ if (unlikely(!msdu)) {
ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
msdu_id);
- spin_unlock_bh(&tx_ring->tx_idr_lock);
return;
}
skb_cb = ATH11K_SKB_CB(msdu);
- idr_remove(&tx_ring->txbuf_idr, msdu_id);
- spin_unlock_bh(&tx_ring->tx_idr_lock);
-
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
dev_kfree_skb_any(msdu);
@@ -325,12 +324,13 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
struct ath11k_skb_cb *skb_cb;
struct ath11k *ar;
- spin_lock_bh(&tx_ring->tx_idr_lock);
- msdu = idr_find(&tx_ring->txbuf_idr, ts->msdu_id);
- if (!msdu) {
+ spin_lock(&tx_ring->tx_idr_lock);
+ msdu = idr_remove(&tx_ring->txbuf_idr, ts->msdu_id);
+ spin_unlock(&tx_ring->tx_idr_lock);
+
+ if (unlikely(!msdu)) {
ath11k_warn(ab, "htt tx completion for unknown msdu_id %d\n",
ts->msdu_id);
- spin_unlock_bh(&tx_ring->tx_idr_lock);
return;
}
@@ -339,9 +339,6 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
ar = skb_cb->ar;
- idr_remove(&tx_ring->txbuf_idr, ts->msdu_id);
- spin_unlock_bh(&tx_ring->tx_idr_lock);
-
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
@@ -418,6 +415,105 @@ static void ath11k_dp_tx_cache_peer_stats(struct ath11k *ar,
}
}
+void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
+ enum hal_tx_rate_stats_pkt_type pkt_type;
+ enum hal_tx_rate_stats_sgi sgi;
+ enum hal_tx_rate_stats_bw bw;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ struct ieee80211_sta *sta;
+ u16 rate, ru_tones;
+ u8 mcs, rate_idx, ofdma;
+ int ret;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, ts->peer_id);
+ if (!peer || !peer->sta) {
+ ath11k_dbg(ab, ATH11K_DBG_DP_TX,
+ "failed to find the peer by id %u\n", ts->peer_id);
+ goto err_out;
+ }
+
+ sta = peer->sta;
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ memset(&arsta->txrate, 0, sizeof(arsta->txrate));
+ pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE,
+ ts->rate_stats);
+ mcs = FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS,
+ ts->rate_stats);
+ sgi = FIELD_GET(HAL_TX_RATE_STATS_INFO0_SGI,
+ ts->rate_stats);
+ bw = FIELD_GET(HAL_TX_RATE_STATS_INFO0_BW, ts->rate_stats);
+ ru_tones = FIELD_GET(HAL_TX_RATE_STATS_INFO0_TONES_IN_RU, ts->rate_stats);
+ ofdma = FIELD_GET(HAL_TX_RATE_STATS_INFO0_OFDMA_TX, ts->rate_stats);
+
+ /* This is to prefer choose the real NSS value arsta->last_txrate.nss,
+ * if it is invalid, then choose the NSS value while assoc.
+ */
+ if (arsta->last_txrate.nss)
+ arsta->txrate.nss = arsta->last_txrate.nss;
+ else
+ arsta->txrate.nss = arsta->peer_nss;
+
+ if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11A ||
+ pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11B) {
+ ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
+ pkt_type,
+ &rate_idx,
+ &rate);
+ if (ret < 0)
+ goto err_out;
+ arsta->txrate.legacy = rate;
+ } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11N) {
+ if (mcs > 7) {
+ ath11k_warn(ab, "Invalid HT mcs index %d\n", mcs);
+ goto err_out;
+ }
+
+ if (arsta->txrate.nss != 0)
+ arsta->txrate.mcs = mcs + 8 * (arsta->txrate.nss - 1);
+ arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AC) {
+ if (mcs > 9) {
+ ath11k_warn(ab, "Invalid VHT mcs index %d\n", mcs);
+ goto err_out;
+ }
+
+ arsta->txrate.mcs = mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
+ if (mcs > 11) {
+ ath11k_warn(ab, "Invalid HE mcs index %d\n", mcs);
+ goto err_out;
+ }
+
+ arsta->txrate.mcs = mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
+ arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
+ }
+
+ arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ if (ofdma && pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
+ arsta->txrate.bw = RATE_INFO_BW_HE_RU;
+ arsta->txrate.he_ru_alloc =
+ ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
+ }
+
+ if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
+ ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
+
+err_out:
+ spin_unlock_bh(&ab->base_lock);
+}
+
static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
struct sk_buff *msdu,
struct hal_tx_status *ts)
@@ -435,16 +531,14 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
- rcu_read_lock();
-
- if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
+ if (unlikely(!rcu_access_pointer(ab->pdevs_active[ar->pdev_idx]))) {
dev_kfree_skb_any(msdu);
- goto exit;
+ return;
}
- if (!skb_cb->vif) {
+ if (unlikely(!skb_cb->vif)) {
dev_kfree_skb_any(msdu);
- goto exit;
+ return;
}
info = IEEE80211_SKB_CB(msdu);
@@ -465,7 +559,8 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
- if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) {
+ if (unlikely(ath11k_debugfs_is_extd_tx_stats_enabled(ar)) ||
+ ab->hw_params.single_pdev_only) {
if (ts->flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) {
if (ar->last_ppdu_id == 0) {
ar->last_ppdu_id = ts->ppdu_id;
@@ -473,12 +568,12 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
ar->cached_ppdu_id == ar->last_ppdu_id) {
ar->cached_ppdu_id = ar->last_ppdu_id;
ar->cached_stats.is_ampdu = true;
- ath11k_debugfs_sta_update_txcompl(ar, msdu, ts);
+ ath11k_dp_tx_update_txcompl(ar, ts);
memset(&ar->cached_stats, 0,
sizeof(struct ath11k_per_peer_tx_stats));
} else {
ar->cached_stats.is_ampdu = false;
- ath11k_debugfs_sta_update_txcompl(ar, msdu, ts);
+ ath11k_dp_tx_update_txcompl(ar, ts);
memset(&ar->cached_stats, 0,
sizeof(struct ath11k_per_peer_tx_stats));
}
@@ -494,9 +589,6 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
*/
ieee80211_tx_status(ar->hw, msdu);
-
-exit:
- rcu_read_unlock();
}
static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,
@@ -505,11 +597,11 @@ static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,
{
ts->buf_rel_source =
FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE, desc->info0);
- if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
- ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
+ if (unlikely(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
+ ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM))
return;
- if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
+ if (unlikely(ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW))
return;
ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON,
@@ -556,8 +648,9 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head);
}
- if ((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
- (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) == tx_ring->tx_status_tail)) {
+ if (unlikely((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
+ (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) ==
+ tx_ring->tx_status_tail))) {
/* TODO: Process pending tx_status messages when kfifo_is_full() */
ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
}
@@ -580,7 +673,7 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id);
msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id);
- if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
+ if (unlikely(ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)) {
ath11k_dp_tx_process_htt_tx_complete(ab,
(void *)tx_status,
mac_id, msdu_id,
@@ -588,16 +681,16 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
continue;
}
- spin_lock_bh(&tx_ring->tx_idr_lock);
- msdu = idr_find(&tx_ring->txbuf_idr, msdu_id);
- if (!msdu) {
+ spin_lock(&tx_ring->tx_idr_lock);
+ msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
+ if (unlikely(!msdu)) {
ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
msdu_id);
- spin_unlock_bh(&tx_ring->tx_idr_lock);
+ spin_unlock(&tx_ring->tx_idr_lock);
continue;
}
- idr_remove(&tx_ring->txbuf_idr, msdu_id);
- spin_unlock_bh(&tx_ring->tx_idr_lock);
+
+ spin_unlock(&tx_ring->tx_idr_lock);
ar = ab->pdevs[mac_id].ar;
@@ -903,7 +996,7 @@ int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask)
cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
- pdev_mask = 1 << (i + 1);
+ pdev_mask = 1 << (ar->pdev_idx + i);
cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask);
@@ -993,6 +1086,7 @@ ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
struct ath11k_dp *dp = &ab->dp;
struct sk_buff *skb;
struct htt_ext_stats_cfg_cmd *cmd;
+ u32 pdev_id;
int len = sizeof(*cmd);
int ret;
@@ -1006,7 +1100,12 @@ ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
memset(cmd, 0, sizeof(*cmd));
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
- cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id;
+ if (ab->hw_params.single_pdev_only)
+ pdev_id = ath11k_mac_get_target_pdev_id(ar);
+ else
+ pdev_id = ar->pdev->pdev_id;
+
+ cmd->hdr.pdev_mask = 1 << pdev_id;
cmd->hdr.stats_type = type;
cmd->cfg_param0 = cfg_params->cfg0;
@@ -1034,6 +1133,15 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
struct htt_rx_ring_tlv_filter tlv_filter = {0};
int ret = 0, ring_id = 0, i;
+ if (ab->hw_params.full_monitor_mode) {
+ ret = ath11k_dp_tx_htt_rx_full_mon_setup(ab,
+ dp->mac_id, !reset);
+ if (ret < 0) {
+ ath11k_err(ab, "failed to setup full monitor %d\n", ret);
+ return ret;
+ }
+ }
+
ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
if (!reset) {
@@ -1099,3 +1207,42 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
return ret;
}
+
+int ath11k_dp_tx_htt_rx_full_mon_setup(struct ath11k_base *ab, int mac_id,
+ bool config)
+{
+ struct htt_rx_full_monitor_mode_cfg_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len = sizeof(*cmd);
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_rx_full_monitor_mode_cfg_cmd *)skb->data;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->info0 = FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_MSG_TYPE,
+ HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
+
+ cmd->info0 |= FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_PDEV_ID, mac_id);
+
+ cmd->cfg = HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ENABLE |
+ FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING,
+ HTT_RX_MON_RING_SW);
+ if (config) {
+ cmd->cfg |= HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ZERO_MPDUS_END |
+ HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END;
+ }
+
+ ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.h b/drivers/net/wireless/ath/ath11k/dp_tx.h
index 698b907b878d..e87d65bfbf06 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.h
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.h
@@ -15,6 +15,7 @@ struct ath11k_dp_htt_wbm_tx_status {
int ack_rssi;
};
+void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts);
int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab);
int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
struct ath11k_sta *arsta, struct sk_buff *skb);
@@ -37,4 +38,6 @@ int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id,
int rx_buf_size,
struct htt_rx_ring_tlv_filter *tlv_filter);
+int ath11k_dp_tx_htt_rx_full_mon_setup(struct ath11k_base *ab, int mac_id,
+ bool config);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index eaa0edca5576..2ec09ae90080 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -627,6 +627,21 @@ u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng)
return NULL;
}
+static void ath11k_hal_srng_prefetch_desc(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 *desc;
+
+ /* prefetch only if desc is available */
+ desc = ath11k_hal_srng_dst_peek(ab, srng);
+ if (likely(desc)) {
+ dma_sync_single_for_cpu(ab->dev, virt_to_phys(desc),
+ (srng->entry_size * sizeof(u32)),
+ DMA_FROM_DEVICE);
+ prefetch(desc);
+ }
+}
+
u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
struct hal_srng *srng)
{
@@ -639,8 +654,15 @@ u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
- srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
- srng->ring_size;
+ srng->u.dst_ring.tp += srng->entry_size;
+
+ /* wrap around to start of ring*/
+ if (srng->u.dst_ring.tp == srng->ring_size)
+ srng->u.dst_ring.tp = 0;
+
+ /* Try to prefetch the next descriptor in the ring */
+ if (srng->flags & HAL_SRNG_FLAGS_CACHED)
+ ath11k_hal_srng_prefetch_desc(ab, srng);
return desc;
}
@@ -775,11 +797,16 @@ void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
{
lockdep_assert_held(&srng->lock);
- if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
srng->u.src_ring.cached_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
- else
+ } else {
srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
+
+ /* Try to prefetch the next descriptor in the ring */
+ if (srng->flags & HAL_SRNG_FLAGS_CACHED)
+ ath11k_hal_srng_prefetch_desc(ab, srng);
+ }
}
/* Update cached ring head/tail pointers to HW. ath11k_hal_srng_access_begin()
@@ -947,6 +974,7 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
srng->msi_data = params->msi_data;
srng->initialized = 1;
spin_lock_init(&srng->lock);
+ lockdep_set_class(&srng->lock, hal->srng_key + ring_id);
for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
srng->hwreg_base[i] = srng_config->reg_start[i] +
@@ -1233,6 +1261,24 @@ static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
return 0;
}
+static void ath11k_hal_register_srng_key(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ u32 ring_id;
+
+ for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
+ lockdep_register_key(hal->srng_key + ring_id);
+}
+
+static void ath11k_hal_unregister_srng_key(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ u32 ring_id;
+
+ for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
+ lockdep_unregister_key(hal->srng_key + ring_id);
+}
+
int ath11k_hal_srng_init(struct ath11k_base *ab)
{
struct ath11k_hal *hal = &ab->hal;
@@ -1252,6 +1298,8 @@ int ath11k_hal_srng_init(struct ath11k_base *ab)
if (ret)
goto err_free_cont_rdp;
+ ath11k_hal_register_srng_key(ab);
+
return 0;
err_free_cont_rdp:
@@ -1266,6 +1314,7 @@ void ath11k_hal_srng_deinit(struct ath11k_base *ab)
{
struct ath11k_hal *hal = &ab->hal;
+ ath11k_hal_unregister_srng_key(ab);
ath11k_hal_free_cont_rdp(ab);
ath11k_hal_free_cont_wrp(ab);
kfree(hal->srng_config);
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 35ed3a14e200..a7d9b4c551ad 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -513,6 +513,7 @@ enum hal_srng_dir {
#define HAL_SRNG_FLAGS_DATA_TLV_SWAP 0x00000020
#define HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN 0x00010000
#define HAL_SRNG_FLAGS_MSI_INTR 0x00020000
+#define HAL_SRNG_FLAGS_CACHED 0x20000000
#define HAL_SRNG_FLAGS_LMAC_RING 0x80000000
#define HAL_SRNG_TLV_HDR_TAG GENMASK(9, 1)
@@ -901,6 +902,8 @@ struct ath11k_hal {
/* shadow register configuration */
u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS];
int num_shadow_reg_configured;
+
+ struct lock_class_key srng_key[HAL_SRNG_RING_ID_MAX];
};
u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid);
diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h
index 00b595b84939..406767672844 100644
--- a/drivers/net/wireless/ath/ath11k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath11k/hal_desc.h
@@ -858,6 +858,25 @@ struct hal_reo_entrance_ring {
* this ring has looped around the ring.
*/
+#define HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON GENMASK(1, 0)
+#define HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE GENMASK(6, 2)
+#define HAL_SW_MON_RING_INFO0_MPDU_FRAG_NUMBER GENMASK(10, 7)
+#define HAL_SW_MON_RING_INFO0_FRAMELESS_BAR BIT(11)
+#define HAL_SW_MON_RING_INFO0_STATUS_BUF_CNT GENMASK(15, 12)
+#define HAL_SW_MON_RING_INFO0_END_OF_PPDU BIT(16)
+
+#define HAL_SW_MON_RING_INFO1_PHY_PPDU_ID GENMASK(15, 0)
+#define HAL_SW_MON_RING_INFO1_RING_ID GENMASK(27, 20)
+#define HAL_SW_MON_RING_INFO1_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_sw_monitor_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+ struct rx_mpdu_desc rx_mpdu_info;
+ struct ath11k_buffer_addr status_buf_addr_info;
+ u32 info0;
+ u32 info1;
+} __packed;
+
#define HAL_REO_CMD_HDR_INFO0_CMD_NUMBER GENMASK(15, 0)
#define HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED BIT(16)
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
index 329c404cfa80..a3b353a4b5f7 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -29,8 +29,7 @@ static int ath11k_hal_reo_cmd_queue_stats(struct hal_tlv_hdr *tlv,
FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
desc = (struct hal_reo_get_queue_stats *)tlv->value;
- memset(&desc->queue_addr_lo, 0,
- (sizeof(*desc) - sizeof(struct hal_reo_cmd_hdr)));
+ memset_startat(desc, 0, queue_addr_lo);
desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
@@ -62,8 +61,7 @@ static int ath11k_hal_reo_cmd_flush_cache(struct ath11k_hal *hal, struct hal_tlv
FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
desc = (struct hal_reo_flush_cache *)tlv->value;
- memset(&desc->cache_addr_lo, 0,
- (sizeof(*desc) - sizeof(struct hal_reo_cmd_hdr)));
+ memset_startat(desc, 0, cache_addr_lo);
desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
@@ -101,8 +99,7 @@ static int ath11k_hal_reo_cmd_update_rx_queue(struct hal_tlv_hdr *tlv,
FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
desc = (struct hal_reo_update_rx_queue *)tlv->value;
- memset(&desc->queue_addr_lo, 0,
- (sizeof(*desc) - sizeof(struct hal_reo_cmd_hdr)));
+ memset_startat(desc, 0, queue_addr_lo);
desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
@@ -374,7 +371,7 @@ int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
ret_buf_mgr = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
wbm_desc->buf_addr_info.info1);
- if (ret_buf_mgr != ab->hw_params.hal_params->rx_buf_rbm) {
+ if (ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
ab->soc_stats.invalid_rbm++;
return -EINVAL;
}
@@ -764,15 +761,17 @@ void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
* size changes and also send WMI message to FW to change the REO
* queue descriptor in Rx peer entry as part of dp_rx_tid_update.
*/
- memset(ext_desc, 0, 3 * sizeof(*ext_desc));
+ memset(ext_desc, 0, sizeof(*ext_desc));
ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
HAL_DESC_REO_QUEUE_EXT_DESC,
REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1);
ext_desc++;
+ memset(ext_desc, 0, sizeof(*ext_desc));
ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
HAL_DESC_REO_QUEUE_EXT_DESC,
REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2);
ext_desc++;
+ memset(ext_desc, 0, sizeof(*ext_desc));
ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
HAL_DESC_REO_QUEUE_EXT_DESC,
REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3);
@@ -1039,7 +1038,9 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
ru_tones = FIELD_GET(HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION,
info0);
- ppdu_info->ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
+ ppdu_info->ru_alloc =
+ ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(ru_tones);
+
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
break;
}
@@ -1080,6 +1081,9 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
break;
}
case HAL_PHYRX_RSSI_LEGACY: {
+ int i;
+ bool db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map);
struct hal_rx_phyrx_rssi_legacy_info *rssi =
(struct hal_rx_phyrx_rssi_legacy_info *)tlv_data;
@@ -1090,6 +1094,14 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
ppdu_info->rssi_comb =
FIELD_GET(HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB,
__le32_to_cpu(rssi->info0));
+
+ if (db2dbm) {
+ for (i = 0; i < ARRAY_SIZE(rssi->preamble); i++) {
+ ppdu_info->rssi_chain_pri20[i] =
+ le32_get_bits(rssi->preamble[i].rssi_2040,
+ HAL_RX_PHYRX_RSSI_PREAMBLE_PRI20);
+ }
+ }
break;
}
case HAL_RX_MPDU_START: {
@@ -1186,3 +1198,47 @@ void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
*pp_buf_addr = (void *)buf_addr_info;
}
+
+void
+ath11k_hal_rx_sw_mon_ring_buf_paddr_get(void *rx_desc,
+ struct hal_sw_mon_ring_entries *sw_mon_entries)
+{
+ struct hal_sw_monitor_ring *sw_mon_ring = rx_desc;
+ struct ath11k_buffer_addr *buf_addr_info;
+ struct ath11k_buffer_addr *status_buf_addr_info;
+ struct rx_mpdu_desc *rx_mpdu_desc_info_details;
+
+ rx_mpdu_desc_info_details = &sw_mon_ring->rx_mpdu_info;
+
+ sw_mon_entries->msdu_cnt = FIELD_GET(RX_MPDU_DESC_INFO0_MSDU_COUNT,
+ rx_mpdu_desc_info_details->info0);
+
+ buf_addr_info = &sw_mon_ring->buf_addr_info;
+ status_buf_addr_info = &sw_mon_ring->status_buf_addr_info;
+
+ sw_mon_entries->mon_dst_paddr = (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
+ buf_addr_info->info1)) << 32) |
+ FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
+ buf_addr_info->info0);
+
+ sw_mon_entries->mon_status_paddr =
+ (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
+ status_buf_addr_info->info1)) << 32) |
+ FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
+ status_buf_addr_info->info0);
+
+ sw_mon_entries->mon_dst_sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ buf_addr_info->info1);
+
+ sw_mon_entries->mon_status_sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ status_buf_addr_info->info1);
+
+ sw_mon_entries->status_buf_count = FIELD_GET(HAL_SW_MON_RING_INFO0_STATUS_BUF_CNT,
+ sw_mon_ring->info0);
+
+ sw_mon_entries->dst_buf_addr_info = buf_addr_info;
+ sw_mon_entries->status_buf_addr_info = status_buf_addr_info;
+
+ sw_mon_entries->ppdu_id =
+ FIELD_GET(HAL_SW_MON_RING_INFO1_PHY_PPDU_ID, sw_mon_ring->info1);
+}
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h
index 0f1f04b812b9..571054c6d7f8 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.h
@@ -77,6 +77,20 @@ enum hal_rx_mon_status {
HAL_RX_MON_STATUS_BUF_DONE,
};
+struct hal_sw_mon_ring_entries {
+ dma_addr_t mon_dst_paddr;
+ dma_addr_t mon_status_paddr;
+ u32 mon_dst_sw_cookie;
+ u32 mon_status_sw_cookie;
+ void *dst_buf_addr_info;
+ void *status_buf_addr_info;
+ u16 ppdu_id;
+ u8 status_buf_count;
+ u8 msdu_cnt;
+ bool end_of_ppdu;
+ bool drop_ppdu;
+};
+
struct hal_rx_mon_ppdu_info {
u32 ppdu_id;
u32 ppdu_ts;
@@ -98,6 +112,7 @@ struct hal_rx_mon_ppdu_info {
u8 ldpc;
u8 beamformed;
u8 rssi_comb;
+ u8 rssi_chain_pri20[HAL_RX_MAX_NSS];
u8 tid;
u8 dcm;
u8 ru_alloc;
@@ -248,8 +263,17 @@ struct hal_rx_he_sig_b2_ofdma_info {
#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB GENMASK(15, 8)
+#define HAL_RX_PHYRX_RSSI_PREAMBLE_PRI20 GENMASK(7, 0)
+
+struct hal_rx_phyrx_chain_rssi {
+ __le32 rssi_2040;
+ __le32 rssi_80;
+} __packed;
+
struct hal_rx_phyrx_rssi_legacy_info {
- __le32 rsvd[35];
+ __le32 rsvd[3];
+ struct hal_rx_phyrx_chain_rssi pre_rssi[HAL_RX_MAX_NSS];
+ struct hal_rx_phyrx_chain_rssi preamble[HAL_RX_MAX_NSS];
__le32 info0;
} __packed;
@@ -331,38 +355,14 @@ void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc,
dma_addr_t *paddr, u32 *sw_cookie,
void **pp_buf_addr_info, u8 *rbm,
u32 *msdu_cnt);
+void
+ath11k_hal_rx_sw_mon_ring_buf_paddr_get(void *rx_desc,
+ struct hal_sw_mon_ring_entries *sw_mon_ent);
enum hal_rx_mon_status
ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
struct hal_rx_mon_ppdu_info *ppdu_info,
struct sk_buff *skb);
-static inline u32 ath11k_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones)
-{
- u32 ret = 0;
-
- switch (ru_tones) {
- case RU_26:
- ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
- break;
- case RU_52:
- ret = NL80211_RATE_INFO_HE_RU_ALLOC_52;
- break;
- case RU_106:
- ret = NL80211_RATE_INFO_HE_RU_ALLOC_106;
- break;
- case RU_242:
- ret = NL80211_RATE_INFO_HE_RU_ALLOC_242;
- break;
- case RU_484:
- ret = NL80211_RATE_INFO_HE_RU_ALLOC_484;
- break;
- case RU_996:
- ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
- break;
- }
- return ret;
-}
-
#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0 0xDDBEEF
#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1 0xADBEEF
#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2 0xBDBEEF
diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c
index 54b1d34724d7..6913b7494b9b 100644
--- a/drivers/net/wireless/ath/ath11k/htc.c
+++ b/drivers/net/wireless/ath/ath11k/htc.c
@@ -81,6 +81,8 @@ int ath11k_htc_send(struct ath11k_htc *htc,
struct ath11k_base *ab = htc->ab;
int credits = 0;
int ret;
+ bool credit_flow_enabled = (ab->hw_params.credit_flow &&
+ ep->tx_credit_flow_enabled);
if (eid >= ATH11K_HTC_EP_COUNT) {
ath11k_warn(ab, "Invalid endpoint id: %d\n", eid);
@@ -89,7 +91,7 @@ int ath11k_htc_send(struct ath11k_htc *htc,
skb_push(skb, sizeof(struct ath11k_htc_hdr));
- if (ep->tx_credit_flow_enabled) {
+ if (credit_flow_enabled) {
credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
spin_lock_bh(&htc->tx_lock);
if (ep->tx_credits < credits) {
@@ -126,7 +128,7 @@ int ath11k_htc_send(struct ath11k_htc *htc,
err_unmap:
dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
err_credits:
- if (ep->tx_credit_flow_enabled) {
+ if (credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
ath11k_dbg(ab, ATH11K_DBG_HTC,
@@ -203,23 +205,25 @@ static int ath11k_htc_process_trailer(struct ath11k_htc *htc,
break;
}
- switch (record->hdr.id) {
- case ATH11K_HTC_RECORD_CREDITS:
- len = sizeof(struct ath11k_htc_credit_report);
- if (record->hdr.len < len) {
- ath11k_warn(ab, "Credit report too long\n");
- status = -EINVAL;
+ if (ab->hw_params.credit_flow) {
+ switch (record->hdr.id) {
+ case ATH11K_HTC_RECORD_CREDITS:
+ len = sizeof(struct ath11k_htc_credit_report);
+ if (record->hdr.len < len) {
+ ath11k_warn(ab, "Credit report too long\n");
+ status = -EINVAL;
+ break;
+ }
+ ath11k_htc_process_credit_report(htc,
+ record->credit_report,
+ record->hdr.len,
+ src_eid);
+ break;
+ default:
+ ath11k_warn(ab, "Unhandled record: id:%d length:%d\n",
+ record->hdr.id, record->hdr.len);
break;
}
- ath11k_htc_process_credit_report(htc,
- record->credit_report,
- record->hdr.len,
- src_eid);
- break;
- default:
- ath11k_warn(ab, "Unhandled record: id:%d length:%d\n",
- record->hdr.id, record->hdr.len);
- break;
}
if (status)
@@ -245,6 +249,29 @@ static void ath11k_htc_suspend_complete(struct ath11k_base *ab, bool ack)
complete(&ab->htc_suspend);
}
+void ath11k_htc_tx_completion_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k_htc *htc = &ab->htc;
+ struct ath11k_htc_ep *ep;
+ void (*ep_tx_complete)(struct ath11k_base *, struct sk_buff *);
+ u8 eid;
+
+ eid = ATH11K_SKB_CB(skb)->eid;
+ if (eid >= ATH11K_HTC_EP_COUNT)
+ return;
+
+ ep = &htc->endpoint[eid];
+ spin_lock_bh(&htc->tx_lock);
+ ep_tx_complete = ep->ep_ops.ep_tx_complete;
+ spin_unlock_bh(&htc->tx_lock);
+ if (!ep_tx_complete) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ ep_tx_complete(htc->ab, skb);
+}
+
void ath11k_htc_rx_completion_handler(struct ath11k_base *ab,
struct sk_buff *skb)
{
@@ -607,6 +634,11 @@ int ath11k_htc_connect_service(struct ath11k_htc *htc,
disable_credit_flow_ctrl = true;
}
+ if (!ab->hw_params.credit_flow) {
+ flags |= ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+ disable_credit_flow_ctrl = true;
+ }
+
req_msg->flags_len = FIELD_PREP(HTC_SVC_MSG_CONNECTIONFLAGS, flags);
req_msg->msg_svc_id |= FIELD_PREP(HTC_SVC_MSG_SERVICE_ID,
conn_req->service_id);
@@ -732,7 +764,10 @@ int ath11k_htc_start(struct ath11k_htc *htc)
msg->msg_id = FIELD_PREP(HTC_MSG_MESSAGEID,
ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID);
- ath11k_dbg(ab, ATH11K_DBG_HTC, "HTC is using TX credit flow control\n");
+ if (ab->hw_params.credit_flow)
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "HTC is using TX credit flow control\n");
+ else
+ msg->flags |= ATH11K_GLOBAL_DISABLE_CREDIT_FLOW;
status = ath11k_htc_send(htc, ATH11K_HTC_EP_0, skb);
if (status) {
diff --git a/drivers/net/wireless/ath/ath11k/htc.h b/drivers/net/wireless/ath/ath11k/htc.h
index 6c8a469d7f9d..f429b37cfdf7 100644
--- a/drivers/net/wireless/ath/ath11k/htc.h
+++ b/drivers/net/wireless/ath/ath11k/htc.h
@@ -83,8 +83,8 @@ enum ath11k_htc_conn_flags {
ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF = 0x1,
ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2,
ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY = 0x3,
- ATH11K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 1 << 2,
- ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 1 << 3
+ ATH11K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 0x4,
+ ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 0x8,
};
enum ath11k_htc_conn_svc_status {
@@ -116,6 +116,8 @@ struct ath11k_htc_conn_svc_resp {
u32 svc_meta_pad;
} __packed;
+#define ATH11K_GLOBAL_DISABLE_CREDIT_FLOW BIT(1)
+
struct ath11k_htc_setup_complete_extended {
u32 msg_id;
u32 flags;
@@ -305,5 +307,6 @@ int ath11k_htc_send(struct ath11k_htc *htc, enum ath11k_htc_ep_id eid,
struct sk_buff *ath11k_htc_alloc_skb(struct ath11k_base *ar, int size);
void ath11k_htc_rx_completion_handler(struct ath11k_base *ar,
struct sk_buff *skb);
-
+void ath11k_htc_tx_completion_handler(struct ath11k_base *ab,
+ struct sk_buff *skb);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
index da35fcf5bc56..3b0fdc1a6b3f 100644
--- a/drivers/net/wireless/ath/ath11k/hw.c
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -150,18 +150,18 @@ static void ath11k_hw_ipq8074_reo_setup(struct ath11k_base *ab)
static void ath11k_init_wmi_config_ipq8074(struct ath11k_base *ab,
struct target_resource_config *config)
{
- config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
+ config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS(ab);
if (ab->num_radios == 2) {
- config->num_peers = TARGET_NUM_PEERS(DBS);
- config->num_tids = TARGET_NUM_TIDS(DBS);
+ config->num_peers = TARGET_NUM_PEERS(ab, DBS);
+ config->num_tids = TARGET_NUM_TIDS(ab, DBS);
} else if (ab->num_radios == 3) {
- config->num_peers = TARGET_NUM_PEERS(DBS_SBS);
- config->num_tids = TARGET_NUM_TIDS(DBS_SBS);
+ config->num_peers = TARGET_NUM_PEERS(ab, DBS_SBS);
+ config->num_tids = TARGET_NUM_TIDS(ab, DBS_SBS);
} else {
/* Control should not reach here */
- config->num_peers = TARGET_NUM_PEERS(SINGLE);
- config->num_tids = TARGET_NUM_TIDS(SINGLE);
+ config->num_peers = TARGET_NUM_PEERS(ab, SINGLE);
+ config->num_tids = TARGET_NUM_TIDS(ab, SINGLE);
}
config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
@@ -1061,8 +1061,6 @@ const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074 = {
const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390 = {
.tx = {
ATH11K_TX_RING_MASK_0,
- ATH11K_TX_RING_MASK_1,
- ATH11K_TX_RING_MASK_2,
},
.rx_mon_status = {
0, 0, 0, 0,
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 19223d36846e..29934b36c14e 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -12,26 +12,26 @@
/* Target configuration defines */
/* Num VDEVS per radio */
-#define TARGET_NUM_VDEVS (16 + 1)
+#define TARGET_NUM_VDEVS(ab) (ab->hw_params.num_vdevs)
-#define TARGET_NUM_PEERS_PDEV (512 + TARGET_NUM_VDEVS)
+#define TARGET_NUM_PEERS_PDEV(ab) (ab->hw_params.num_peers + TARGET_NUM_VDEVS(ab))
/* Num of peers for Single Radio mode */
-#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV)
+#define TARGET_NUM_PEERS_SINGLE(ab) (TARGET_NUM_PEERS_PDEV(ab))
/* Num of peers for DBS */
-#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV)
+#define TARGET_NUM_PEERS_DBS(ab) (2 * TARGET_NUM_PEERS_PDEV(ab))
/* Num of peers for DBS_SBS */
-#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV)
+#define TARGET_NUM_PEERS_DBS_SBS(ab) (3 * TARGET_NUM_PEERS_PDEV(ab))
/* Max num of stations (per radio) */
-#define TARGET_NUM_STATIONS 512
+#define TARGET_NUM_STATIONS(ab) (ab->hw_params.num_peers)
-#define TARGET_NUM_PEERS(x) TARGET_NUM_PEERS_##x
+#define TARGET_NUM_PEERS(ab, x) TARGET_NUM_PEERS_##x(ab)
#define TARGET_NUM_PEER_KEYS 2
-#define TARGET_NUM_TIDS(x) (2 * TARGET_NUM_PEERS(x) + \
- 4 * TARGET_NUM_VDEVS + 8)
+#define TARGET_NUM_TIDS(ab, x) (2 * TARGET_NUM_PEERS(ab, x) + \
+ 4 * TARGET_NUM_VDEVS(ab) + 8)
#define TARGET_AST_SKID_LIMIT 16
#define TARGET_NUM_OFFLD_PEERS 4
@@ -77,6 +77,7 @@
#define ATH11K_DEFAULT_CAL_FILE "caldata.bin"
#define ATH11K_AMSS_FILE "amss.bin"
#define ATH11K_M3_FILE "m3.bin"
+#define ATH11K_REGDB_FILE_NAME "regdb.bin"
enum ath11k_hw_rate_cck {
ATH11K_HW_RATE_CCK_LP_11M = 0,
@@ -151,6 +152,9 @@ struct ath11k_hw_params {
u32 svc_to_ce_map_len;
bool single_pdev_only;
+ u32 rfkill_pin;
+ u32 rfkill_cfg;
+ u32 rfkill_on_level;
bool rxdma1_enable;
int num_rxmda_per_pdev;
@@ -168,14 +172,26 @@ struct ath11k_hw_params {
u16 interface_modes;
bool supports_monitor;
+ bool full_monitor_mode;
bool supports_shadow_regs;
bool idle_ps;
+ bool supports_sta_ps;
bool cold_boot_calib;
+ int fw_mem_mode;
+ u32 num_vdevs;
+ u32 num_peers;
bool supports_suspend;
u32 hal_desc_sz;
+ bool supports_regdb;
bool fix_l1ss;
+ bool credit_flow;
u8 max_tx_ring;
const struct ath11k_hw_hal_params *hal_params;
+ bool supports_dynamic_smps_6ghz;
+ bool alloc_cacheable_memory;
+ bool wakeup_mhi;
+ bool supports_rssi_stats;
+ bool fw_wmi_diag_event;
};
struct ath11k_hw_ops {
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 1cc55602787b..07f499d5ec92 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/mac80211.h>
@@ -245,6 +246,93 @@ static const u32 ath11k_smps_map[] = {
static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
+enum nl80211_he_ru_alloc ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy)
+{
+ enum nl80211_he_ru_alloc ret;
+
+ switch (ru_phy) {
+ case RU_26:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ break;
+ case RU_52:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ break;
+ case RU_106:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ break;
+ case RU_242:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ break;
+ case RU_484:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ break;
+ case RU_996:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ default:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ break;
+ }
+
+ return ret;
+}
+
+enum nl80211_he_ru_alloc ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones)
+{
+ enum nl80211_he_ru_alloc ret;
+
+ switch (ru_tones) {
+ case 26:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ break;
+ case 52:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ break;
+ case 106:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ break;
+ case 242:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ break;
+ case 484:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ break;
+ case 996:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case (996 * 2):
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
+ default:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ break;
+ }
+
+ return ret;
+}
+
+enum nl80211_he_gi ath11k_mac_he_gi_to_nl80211_he_gi(u8 sgi)
+{
+ enum nl80211_he_gi ret;
+
+ switch (sgi) {
+ case RX_MSDU_START_SGI_0_8_US:
+ ret = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
+ case RX_MSDU_START_SGI_1_6_US:
+ ret = NL80211_RATE_INFO_HE_GI_1_6;
+ break;
+ case RX_MSDU_START_SGI_3_2_US:
+ ret = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ default:
+ ret = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
+ }
+
+ return ret;
+}
+
u8 ath11k_mac_bw_to_mac80211_bw(u8 bw)
{
u8 ret = 0;
@@ -553,6 +641,67 @@ struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id)
return NULL;
}
+struct ath11k_vif *ath11k_mac_get_vif_up(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ struct ath11k_vif *arvif;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_up)
+ return arvif;
+ }
+ }
+
+ return NULL;
+}
+
+static bool ath11k_mac_band_match(enum nl80211_band band1, enum WMI_HOST_WLAN_BAND band2)
+{
+ return (((band1 == NL80211_BAND_2GHZ) && (band2 & WMI_HOST_WLAN_2G_CAP)) ||
+ (((band1 == NL80211_BAND_5GHZ) || (band1 == NL80211_BAND_6GHZ)) &&
+ (band2 & WMI_HOST_WLAN_5G_CAP)));
+}
+
+u8 ath11k_mac_get_target_pdev_id_from_vif(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ u8 pdev_id = ab->target_pdev_ids[0].pdev_id;
+ int i;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return pdev_id;
+
+ band = def.chan->band;
+
+ for (i = 0; i < ab->target_pdev_count; i++) {
+ if (ath11k_mac_band_match(band, ab->target_pdev_ids[i].supported_bands))
+ return ab->target_pdev_ids[i].pdev_id;
+ }
+
+ return pdev_id;
+}
+
+u8 ath11k_mac_get_target_pdev_id(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+
+ arvif = ath11k_mac_get_vif_up(ar->ab);
+
+ if (arvif)
+ return ath11k_mac_get_target_pdev_id_from_vif(arvif);
+ else
+ return ar->ab->target_pdev_ids[0].pdev_id;
+}
+
static void ath11k_pdev_caps_update(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
@@ -775,9 +924,9 @@ static int ath11k_mac_monitor_vdev_start(struct ath11k *ar, int vdev_id,
arg.channel.chan_radar = !!(channel->flags & IEEE80211_CHAN_RADAR);
arg.channel.min_power = 0;
- arg.channel.max_power = channel->max_power * 2;
- arg.channel.max_reg_power = channel->max_reg_power * 2;
- arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
+ arg.channel.max_power = channel->max_power;
+ arg.channel.max_reg_power = channel->max_reg_power;
+ arg.channel.max_antenna_gain = channel->max_antenna_gain;
arg.pref_tx_streams = ar->num_tx_chains;
arg.pref_rx_streams = ar->num_rx_chains;
@@ -1049,6 +1198,83 @@ static int ath11k_mac_monitor_stop(struct ath11k *ar)
return 0;
}
+static int ath11k_mac_vif_setup_ps(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_conf *conf = &ar->hw->conf;
+ enum wmi_sta_powersave_param param;
+ enum wmi_sta_ps_mode psmode;
+ int ret;
+ int timeout;
+ bool enable_ps;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (arvif->vif->type != NL80211_IFTYPE_STATION)
+ return 0;
+
+ enable_ps = arvif->ps;
+
+ if (!arvif->is_started) {
+ /* mac80211 can update vif powersave state while disconnected.
+ * Firmware doesn't behave nicely and consumes more power than
+ * necessary if PS is disabled on a non-started vdev. Hence
+ * force-enable PS for non-running vdevs.
+ */
+ psmode = WMI_STA_PS_MODE_ENABLED;
+ } else if (enable_ps) {
+ psmode = WMI_STA_PS_MODE_ENABLED;
+ param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+
+ timeout = conf->dynamic_ps_timeout;
+ if (timeout == 0) {
+ /* firmware doesn't like 0 */
+ timeout = ieee80211_tu_to_usec(vif->bss_conf.beacon_int) / 1000;
+ }
+
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
+ timeout);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ } else {
+ psmode = WMI_STA_PS_MODE_DISABLED;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev %d psmode %s\n",
+ arvif->vdev_id, psmode ? "enable" : "disable");
+
+ ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
+ psmode, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath11k_mac_config_ps(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_mac_vif_setup_ps(arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup powersave: %d\n", ret);
+ break;
+ }
+ }
+
+ return ret;
+}
+
static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath11k *ar = hw->priv;
@@ -1137,11 +1363,15 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies)))
arvif->rsnie_present = true;
+ else
+ arvif->rsnie_present = false;
if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPA,
ies, (skb_tail_pointer(bcn) - ies)))
arvif->wpaie_present = true;
+ else
+ arvif->wpaie_present = false;
ret = ath11k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
@@ -1154,6 +1384,26 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
return ret;
}
+void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
+{
+ struct ieee80211_vif *vif = arvif->vif;
+
+ if (!vif->color_change_active && !arvif->bcca_zero_sent)
+ return;
+
+ if (vif->color_change_active && ieee80211_beacon_cntdwn_is_complete(vif)) {
+ arvif->bcca_zero_sent = true;
+ ieee80211_color_change_finish(vif);
+ return;
+ }
+
+ arvif->bcca_zero_sent = false;
+
+ if (vif->color_change_active)
+ ieee80211_beacon_update_cntdwn(vif);
+ ath11k_mac_setup_bcn_tmpl(arvif);
+}
+
static void ath11k_control_beaconing(struct ath11k_vif *arvif,
struct ieee80211_bss_conf *info)
{
@@ -1819,7 +2069,6 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
struct ath11k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
- u8 ampdu_factor;
enum nl80211_band band;
u16 *he_mcs_mask;
u8 max_nss, he_mcs;
@@ -1827,6 +2076,9 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
int i, he_nss, nss_idx;
bool user_rate_valid = true;
u32 rx_nss, tx_nss, nss_160;
+ u8 ampdu_factor, rx_mcs_80, rx_mcs_160;
+ u16 mcs_160_map, mcs_80_map;
+ bool support_160;
if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
return;
@@ -1841,6 +2093,39 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
return;
arg->he_flag = true;
+ support_160 = !!(he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G);
+
+ /* Supported HE-MCS and NSS Set of peer he_cap is intersection with self he_cp */
+ mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+ mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+
+ if (support_160) {
+ for (i = 7; i >= 0; i--) {
+ u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3;
+
+ if (mcs_160 != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
+ rx_mcs_160 = i + 1;
+ break;
+ }
+ }
+ }
+
+ for (i = 7; i >= 0; i--) {
+ u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3;
+
+ if (mcs_80 != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
+ rx_mcs_80 = i + 1;
+ break;
+ }
+ }
+
+ if (support_160)
+ max_nss = min(rx_mcs_80, rx_mcs_160);
+ else
+ max_nss = rx_mcs_80;
+
+ arg->peer_nss = min(sta->rx_nss, max_nss);
memcpy_and_pad(&arg->peer_he_cap_macinfo,
sizeof(arg->peer_he_cap_macinfo),
@@ -2343,8 +2628,12 @@ static void ath11k_peer_assoc_prepare(struct ath11k *ar,
struct peer_assoc_params *arg,
bool reassoc)
{
+ struct ath11k_sta *arsta;
+
lockdep_assert_held(&ar->conf_mutex);
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+
memset(arg, 0, sizeof(*arg));
reinit_completion(&ar->peer_assoc_done);
@@ -2361,6 +2650,8 @@ static void ath11k_peer_assoc_prepare(struct ath11k *ar,
ath11k_peer_assoc_h_qos(ar, vif, sta, arg);
ath11k_peer_assoc_h_smps(sta, arg);
+ arsta->peer_nss = arg->peer_nss;
+
/* TODO: amsdu_disable req? */
}
@@ -2397,6 +2688,8 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
struct ath11k_vif *arvif = (void *)vif->drv_priv;
struct peer_assoc_params peer_arg;
struct ieee80211_sta *ap_sta;
+ struct ath11k_peer *peer;
+ bool is_auth = false;
int ret;
lockdep_assert_held(&ar->conf_mutex);
@@ -2418,6 +2711,7 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
rcu_read_unlock();
+ peer_arg.is_assoc = true;
ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (ret) {
ath11k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
@@ -2458,19 +2752,37 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
"mac vdev %d up (associated) bssid %pM aid %d\n",
arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
- /* Authorize BSS Peer */
- ret = ath11k_wmi_set_peer_param(ar, arvif->bssid,
- arvif->vdev_id,
- WMI_PEER_AUTHORIZE,
- 1);
- if (ret)
- ath11k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret);
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, arvif->bssid);
+ if (peer && peer->is_authorized)
+ is_auth = true;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (is_auth) {
+ ret = ath11k_wmi_set_peer_param(ar, arvif->bssid,
+ arvif->vdev_id,
+ WMI_PEER_AUTHORIZE,
+ 1);
+ if (ret)
+ ath11k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret);
+ }
ret = ath11k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
&bss_conf->he_obss_pd);
if (ret)
ath11k_warn(ar->ab, "failed to set vdev %i OBSS PD parameters: %d\n",
arvif->vdev_id, ret);
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_DTIM_POLICY,
+ WMI_DTIM_POLICY_STICK);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set vdev %d dtim policy: %d\n",
+ arvif->vdev_id, ret);
+
+ ath11k_mac_11d_scan_stop_all(ar->ab);
}
static void ath11k_bss_disassoc(struct ieee80211_hw *hw,
@@ -2805,10 +3117,17 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
"Set staggered beacon mode for VDEV: %d\n",
arvif->vdev_id);
- ret = ath11k_mac_setup_bcn_tmpl(arvif);
- if (ret)
- ath11k_warn(ar->ab, "failed to update bcn template: %d\n",
- ret);
+ if (!arvif->do_not_send_tmpl || !arvif->bcca_zero_sent) {
+ ret = ath11k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to update bcn template: %d\n",
+ ret);
+ }
+
+ if (arvif->bcca_zero_sent)
+ arvif->do_not_send_tmpl = true;
+ else
+ arvif->do_not_send_tmpl = false;
}
if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
@@ -2942,6 +3261,16 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
ath11k_mac_txpower_recalc(ar);
}
+ if (changed & BSS_CHANGED_PS &&
+ ar->ab->hw_params.supports_sta_ps) {
+ arvif->ps = vif->bss_conf.ps;
+
+ ret = ath11k_mac_config_ps(ar);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to setup ps on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
if (changed & BSS_CHANGED_MCAST_RATE &&
!ath11k_mac_vif_chan(arvif->vif, &def)) {
band = def.chan->band;
@@ -3009,6 +3338,25 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret)
ath11k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n",
arvif->vdev_id, ret);
+
+ param_id = WMI_VDEV_PARAM_BSS_COLOR;
+ if (info->he_bss_color.enabled)
+ param_value = info->he_bss_color.color <<
+ IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET;
+ else
+ param_value = IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id,
+ param_value);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set bss color param on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "bss color param 0x%x set on vdev %i\n",
+ param_value, arvif->vdev_id);
} else if (vif->type == NL80211_IFTYPE_STATION) {
ret = ath11k_wmi_send_bss_color_change_enable_cmd(ar,
arvif->vdev_id,
@@ -3164,6 +3512,7 @@ static int ath11k_start_scan(struct ath11k *ar,
struct scan_req_params *arg)
{
int ret;
+ unsigned long timeout = 1 * HZ;
lockdep_assert_held(&ar->conf_mutex);
@@ -3174,7 +3523,14 @@ static int ath11k_start_scan(struct ath11k *ar,
if (ret)
return ret;
- ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
+ if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map)) {
+ timeout = 5 * HZ;
+
+ if (ar->supports_6ghz)
+ timeout += 5 * HZ;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.started, timeout);
if (ret == 0) {
ret = ath11k_scan_stop(ar);
if (ret)
@@ -3231,15 +3587,38 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
if (ret)
goto exit;
+ /* Currently the pending_11d=true only happened 1 time while
+ * wlan interface up in ath11k_mac_11d_scan_start(), it is called by
+ * ath11k_mac_op_add_interface(), after wlan interface up,
+ * pending_11d=false always.
+ * If remove below wait, it always happened scan fail and lead connect
+ * fail while wlan interface up, because it has a 11d scan which is running
+ * in firmware, and lead this scan failed.
+ */
+ if (ar->pending_11d) {
+ long time_left;
+ unsigned long timeout = 5 * HZ;
+
+ if (ar->supports_6ghz)
+ timeout += 5 * HZ;
+
+ time_left = wait_for_completion_timeout(&ar->finish_11d_ch_list, timeout);
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac wait 11d channel list time left %ld\n", time_left);
+ }
+
memset(&arg, 0, sizeof(arg));
ath11k_wmi_start_scan_init(ar, &arg);
arg.vdev_id = arvif->vdev_id;
arg.scan_id = ATH11K_SCAN_ID;
if (req->ie_len) {
+ arg.extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
+ if (!arg.extraie.ptr) {
+ ret = -ENOMEM;
+ goto exit;
+ }
arg.extraie.len = req->ie_len;
- arg.extraie.ptr = kzalloc(req->ie_len, GFP_KERNEL);
- memcpy(arg.extraie.ptr, req->ie, req->ie_len);
}
if (req->n_ssids) {
@@ -3255,10 +3634,24 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
if (req->n_channels) {
arg.num_chan = req->n_channels;
+ arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
+ GFP_KERNEL);
+
+ if (!arg.chan_list) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
for (i = 0; i < arg.num_chan; i++)
arg.chan_list[i] = req->channels[i]->center_freq;
}
+ if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ arg.scan_f_add_spoofed_mac_in_probe = 1;
+ ether_addr_copy(arg.mac_addr.addr, req->mac_addr);
+ ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask);
+ }
+
ret = ath11k_start_scan(ar, &arg);
if (ret) {
ath11k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
@@ -3273,6 +3666,8 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
ATH11K_MAC_SCAN_TIMEOUT_MSECS));
exit:
+ kfree(arg.chan_list);
+
if (req->ie_len)
kfree(arg.extraie.ptr);
@@ -3316,9 +3711,7 @@ static int ath11k_install_key(struct ath11k_vif *arvif,
return 0;
if (cmd == DISABLE_KEY) {
- /* TODO: Check if FW expects value other than NONE for del */
- /* arg.key_cipher = WMI_CIPHER_NONE; */
- arg.key_len = 0;
+ arg.key_cipher = WMI_CIPHER_NONE;
arg.key_data = NULL;
goto install;
}
@@ -3450,7 +3843,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
/* flush the fragments cache during key (re)install to
* ensure all frags in the new frag list belong to the same key.
*/
- if (peer && cmd == SET_KEY)
+ if (peer && sta && cmd == SET_KEY)
ath11k_peer_frags_flush(ar, peer);
spin_unlock_bh(&ab->base_lock);
@@ -3685,6 +4078,7 @@ static int ath11k_station_assoc(struct ath11k *ar,
ath11k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc);
+ peer_arg.is_assoc = true;
ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (ret) {
ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
@@ -3824,11 +4218,27 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
ath11k_mac_max_he_nss(he_mcs_mask)));
if (changed & IEEE80211_RC_BW_CHANGED) {
- err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
- WMI_PEER_CHWIDTH, bw);
- if (err)
- ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n",
- sta->addr, bw, err);
+ /* Send peer assoc command before set peer bandwidth param to
+ * avoid the mismatch between the peer phymode and the peer
+ * bandwidth.
+ */
+ ath11k_peer_assoc_prepare(ar, arvif->vif, sta, &peer_arg, true);
+
+ peer_arg.is_assoc = false;
+ err = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (err) {
+ ath11k_warn(ar->ab, "failed to send peer assoc for STA %pM vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, err);
+ } else if (wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_CHWIDTH, bw);
+ if (err)
+ ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n",
+ sta->addr, bw, err);
+ } else {
+ ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ }
}
if (changed & IEEE80211_RC_NSS_CHANGED) {
@@ -3896,6 +4306,7 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
ath11k_peer_assoc_prepare(ar, arvif->vif, sta,
&peer_arg, true);
+ peer_arg.is_assoc = false;
err = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (err)
ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
@@ -4095,6 +4506,10 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_NOTEXIST)) {
ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
+ if (ar->ab->hw_params.vdev_start_delay &&
+ vif->type == NL80211_IFTYPE_STATION)
+ goto free;
+
ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
ath11k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
@@ -4116,6 +4531,7 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
}
spin_unlock_bh(&ar->ab->base_lock);
+free:
kfree(arsta->tx_stats);
arsta->tx_stats = NULL;
@@ -4131,6 +4547,34 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
ath11k_warn(ar->ab, "Failed to associate station: %pM\n",
sta->addr);
} else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer)
+ peer->is_authorized = true;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_AUTHORIZE,
+ 1);
+ if (ret)
+ ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ }
+ } else if (old_state == IEEE80211_STA_AUTHORIZED &&
+ new_state == IEEE80211_STA_ASSOC) {
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer)
+ peer->is_authorized = false;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
(vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT ||
@@ -4561,6 +5005,10 @@ ath11k_create_vht_cap(struct ath11k *ar, u32 rate_cap_tx_chainmask,
vht_cap.vht_supported = 1;
vht_cap.cap = ar->pdev->cap.vht_cap;
+ if (ar->pdev->cap.nss_ratio_enabled)
+ vht_cap.vht_mcs.tx_highest |=
+ cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
+
ath11k_set_vht_txbf_cap(ar, &vht_cap.cap);
rxmcs_map = 0;
@@ -4926,23 +5374,47 @@ static int __ath11k_set_antenna(struct ath11k *ar, u32 tx_ant, u32 rx_ant)
return 0;
}
-int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
+static void ath11k_mgmt_over_wmi_tx_drop(struct ath11k *ar, struct sk_buff *skb)
{
- struct sk_buff *msdu = skb;
+ int num_mgmt;
+
+ ieee80211_free_txskb(ar->hw, skb);
+
+ num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
+
+ if (num_mgmt < 0)
+ WARN_ON_ONCE(1);
+
+ if (!num_mgmt)
+ wake_up(&ar->txmgmt_empty_waitq);
+}
+
+static void ath11k_mac_tx_mgmt_free(struct ath11k *ar, int buf_id)
+{
+ struct sk_buff *msdu;
struct ieee80211_tx_info *info;
- struct ath11k *ar = ctx;
- struct ath11k_base *ab = ar->ab;
spin_lock_bh(&ar->txmgmt_idr_lock);
- idr_remove(&ar->txmgmt_idr, buf_id);
+ msdu = idr_remove(&ar->txmgmt_idr, buf_id);
spin_unlock_bh(&ar->txmgmt_idr_lock);
- dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
+
+ if (!msdu)
+ return;
+
+ dma_unmap_single(ar->ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
- ieee80211_free_txskb(ar->hw, msdu);
+ ath11k_mgmt_over_wmi_tx_drop(ar, msdu);
+}
+
+int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
+{
+ struct ath11k *ar = ctx;
+
+ ath11k_mac_tx_mgmt_free(ar, buf_id);
return 0;
}
@@ -4951,17 +5423,10 @@ static int ath11k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx)
{
struct ieee80211_vif *vif = ctx;
struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB((struct sk_buff *)skb);
- struct sk_buff *msdu = skb;
struct ath11k *ar = skb_cb->ar;
- struct ath11k_base *ab = ar->ab;
- if (skb_cb->vif == vif) {
- spin_lock_bh(&ar->txmgmt_idr_lock);
- idr_remove(&ar->txmgmt_idr, buf_id);
- spin_unlock_bh(&ar->txmgmt_idr_lock);
- dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len,
- DMA_TO_DEVICE);
- }
+ if (skb_cb->vif == vif)
+ ath11k_mac_tx_mgmt_free(ar, buf_id);
return 0;
}
@@ -4976,10 +5441,16 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
int buf_id;
int ret;
+ ATH11K_SKB_CB(skb)->ar = ar;
+
spin_lock_bh(&ar->txmgmt_idr_lock);
buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0,
ATH11K_TX_MGMT_NUM_PENDING_MAX, GFP_ATOMIC);
spin_unlock_bh(&ar->txmgmt_idr_lock);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac tx mgmt frame, buf id %d\n", buf_id);
+
if (buf_id < 0)
return -ENOSPC;
@@ -5026,7 +5497,7 @@ static void ath11k_mgmt_over_wmi_tx_purge(struct ath11k *ar)
struct sk_buff *skb;
while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL)
- ieee80211_free_txskb(ar->hw, skb);
+ ath11k_mgmt_over_wmi_tx_drop(ar, skb);
}
static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
@@ -5041,7 +5512,7 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
skb_cb = ATH11K_SKB_CB(skb);
if (!skb_cb->vif) {
ath11k_warn(ar->ab, "no vif found for mgmt frame\n");
- ieee80211_free_txskb(ar->hw, skb);
+ ath11k_mgmt_over_wmi_tx_drop(ar, skb);
continue;
}
@@ -5052,16 +5523,18 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
if (ret) {
ath11k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
arvif->vdev_id, ret);
- ieee80211_free_txskb(ar->hw, skb);
+ ath11k_mgmt_over_wmi_tx_drop(ar, skb);
} else {
- atomic_inc(&ar->num_pending_mgmt_tx);
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac tx mgmt frame, vdev_id %d\n",
+ arvif->vdev_id);
}
} else {
ath11k_warn(ar->ab,
"dropping mgmt frame for vdev %d, is_started %d\n",
arvif->vdev_id,
arvif->is_started);
- ieee80211_free_txskb(ar->hw, skb);
+ ath11k_mgmt_over_wmi_tx_drop(ar, skb);
}
}
}
@@ -5092,11 +5565,69 @@ static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb,
}
skb_queue_tail(q, skb);
+ atomic_inc(&ar->num_pending_mgmt_tx);
ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
return 0;
}
+int ath11k_mac_rfkill_config(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+ u32 param;
+ int ret;
+
+ if (ab->hw_params.rfkill_pin == 0)
+ return -EOPNOTSUPP;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "mac rfkill_pin %d rfkill_cfg %d rfkill_on_level %d",
+ ab->hw_params.rfkill_pin, ab->hw_params.rfkill_cfg,
+ ab->hw_params.rfkill_on_level);
+
+ param = FIELD_PREP(WMI_RFKILL_CFG_RADIO_LEVEL,
+ ab->hw_params.rfkill_on_level) |
+ FIELD_PREP(WMI_RFKILL_CFG_GPIO_PIN_NUM,
+ ab->hw_params.rfkill_pin) |
+ FIELD_PREP(WMI_RFKILL_CFG_PIN_AS_GPIO,
+ ab->hw_params.rfkill_cfg);
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_HW_RFKILL_CONFIG,
+ param, ar->pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to set rfkill config 0x%x: %d\n",
+ param, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath11k_mac_rfkill_enable_radio(struct ath11k *ar, bool enable)
+{
+ enum wmi_rfkill_enable_radio param;
+ int ret;
+
+ if (enable)
+ param = WMI_RFKILL_ENABLE_RADIO_ON;
+ else
+ param = WMI_RFKILL_ENABLE_RADIO_OFF;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac %d rfkill enable %d",
+ ar->pdev_idx, param);
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RFKILL_ENABLE,
+ param, ar->pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rfkill enable param %d: %d\n",
+ param, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
@@ -5138,7 +5669,7 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
arsta = (struct ath11k_sta *)control->sta->drv_priv;
ret = ath11k_dp_tx(ar, arvif, arsta, skb);
- if (ret) {
+ if (unlikely(ret)) {
ath11k_warn(ar->ab, "failed to transmit frame %d\n", ret);
ieee80211_free_txskb(ar->hw, skb);
}
@@ -5222,6 +5753,14 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
goto err;
}
+ if (test_bit(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi->wmi_ab->svc_map)) {
+ ret = ath11k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
+ if (ret) {
+ ath11k_err(ab, "failed to set prob req oui: %i\n", ret);
+ goto err;
+ }
+ }
+
ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
0, pdev->pdev_id);
if (ret) {
@@ -5320,6 +5859,8 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->regd_update_work);
+ cancel_work_sync(&ar->ab->update_11d_work);
+ cancel_work_sync(&ar->ab->rfkill_work);
spin_lock_bh(&ar->data_lock);
list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
@@ -5473,6 +6014,122 @@ static void ath11k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
}
}
+static bool ath11k_mac_vif_ap_active_any(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ struct ath11k_vif *arvif;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_AP)
+ return true;
+ }
+ }
+ return false;
+}
+
+void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait)
+{
+ struct wmi_11d_scan_start_params param;
+ int ret;
+
+ mutex_lock(&ar->ab->vdev_id_11d_lock);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev id for 11d scan %d\n",
+ ar->vdev_id_11d_scan);
+
+ if (ar->regdom_set_by_user)
+ goto fin;
+
+ if (ar->vdev_id_11d_scan != ATH11K_11D_INVALID_VDEV_ID)
+ goto fin;
+
+ if (!test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map))
+ goto fin;
+
+ if (ath11k_mac_vif_ap_active_any(ar->ab))
+ goto fin;
+
+ param.vdev_id = vdev_id;
+ param.start_interval_msec = 0;
+ param.scan_period_msec = ATH11K_SCAN_11D_INTERVAL;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac start 11d scan\n");
+
+ if (wait)
+ reinit_completion(&ar->finish_11d_scan);
+
+ ret = ath11k_wmi_send_11d_scan_start_cmd(ar, &param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start 11d scan vdev %d ret: %d\n",
+ vdev_id, ret);
+ } else {
+ ar->vdev_id_11d_scan = vdev_id;
+ if (wait) {
+ ar->pending_11d = true;
+ ret = wait_for_completion_timeout(&ar->finish_11d_scan,
+ 5 * HZ);
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac 11d scan left time %d\n", ret);
+
+ if (!ret)
+ ar->pending_11d = false;
+ }
+ }
+
+fin:
+ mutex_unlock(&ar->ab->vdev_id_11d_lock);
+}
+
+void ath11k_mac_11d_scan_stop(struct ath11k *ar)
+{
+ int ret;
+ u32 vdev_id;
+
+ if (!test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map))
+ return;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac stop 11d scan\n");
+
+ mutex_lock(&ar->ab->vdev_id_11d_lock);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac stop 11d vdev id %d\n",
+ ar->vdev_id_11d_scan);
+
+ if (ar->vdev_id_11d_scan != ATH11K_11D_INVALID_VDEV_ID) {
+ vdev_id = ar->vdev_id_11d_scan;
+
+ ret = ath11k_wmi_send_11d_scan_stop_cmd(ar, vdev_id);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to stopt 11d scan vdev %d ret: %d\n",
+ vdev_id, ret);
+ else
+ ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
+ }
+ mutex_unlock(&ar->ab->vdev_id_11d_lock);
+}
+
+void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "mac stop soc 11d scan\n");
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+
+ ath11k_mac_11d_scan_stop(ar);
+ }
+}
+
static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -5484,7 +6141,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
u32 param_id, param_value;
u16 nss;
int i;
- int ret;
+ int ret, fbret;
int bit;
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
@@ -5498,9 +6155,9 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
goto err;
}
- if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
+ if (ar->num_created_vdevs > (TARGET_NUM_VDEVS(ab) - 1)) {
ath11k_warn(ab, "failed to create vdev %u, reached max vdev limit %d\n",
- ar->num_created_vdevs, TARGET_NUM_VDEVS);
+ ar->num_created_vdevs, TARGET_NUM_VDEVS(ab));
ret = -EBUSY;
goto err;
}
@@ -5606,6 +6263,8 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
goto err_peer_del;
}
+
+ ath11k_mac_11d_scan_stop_all(ar->ab);
break;
case WMI_VDEV_TYPE_STA:
param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
@@ -5638,12 +6297,16 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
goto err_peer_del;
}
- ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, false);
+ ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id,
+ WMI_STA_PS_MODE_DISABLED);
if (ret) {
ath11k_warn(ar->ab, "failed to disable vdev %d ps mode: %d\n",
arvif->vdev_id, ret);
goto err_peer_del;
}
+
+ ath11k_mac_11d_scan_start(ar, arvif->vdev_id, true);
+
break;
case WMI_VDEV_TYPE_MONITOR:
set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
@@ -5686,17 +6349,17 @@ err_peer_del:
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
reinit_completion(&ar->peer_delete_done);
- ret = ath11k_wmi_send_peer_delete_cmd(ar, vif->addr,
- arvif->vdev_id);
- if (ret) {
+ fbret = ath11k_wmi_send_peer_delete_cmd(ar, vif->addr,
+ arvif->vdev_id);
+ if (fbret) {
ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
arvif->vdev_id, vif->addr);
goto err;
}
- ret = ath11k_wait_for_peer_delete_done(ar, arvif->vdev_id,
- vif->addr);
- if (ret)
+ fbret = ath11k_wait_for_peer_delete_done(ar, arvif->vdev_id,
+ vif->addr);
+ if (fbret)
goto err;
ar->num_peers--;
@@ -5745,6 +6408,9 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
ath11k_dbg(ab, ATH11K_DBG_MAC, "mac remove interface (vdev %d)\n",
arvif->vdev_id);
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ ath11k_mac_11d_scan_stop(ar);
+
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr);
if (ret)
@@ -5831,7 +6497,6 @@ static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
- changed_flags &= SUPPORTED_FILTERS;
*total_flags &= SUPPORTED_FILTERS;
ar->filter_flags = *total_flags;
@@ -5969,9 +6634,9 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
ath11k_phymodes[chandef->chan->band][chandef->width];
arg.channel.min_power = 0;
- arg.channel.max_power = chandef->chan->max_power * 2;
- arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
- arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
+ arg.channel.max_power = chandef->chan->max_power;
+ arg.channel.max_reg_power = chandef->chan->max_reg_power;
+ arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain;
arg.pref_tx_streams = ar->num_tx_chains;
arg.pref_rx_streams = ar->num_rx_chains;
@@ -6159,37 +6824,7 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
lockdep_assert_held(&ar->conf_mutex);
- for (i = 0; i < n_vifs; i++) {
- arvif = (void *)vifs[i].vif->drv_priv;
-
- if (vifs[i].vif->type == NL80211_IFTYPE_MONITOR)
- monitor_vif = true;
-
- ath11k_dbg(ab, ATH11K_DBG_MAC,
- "mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n",
- arvif->vdev_id,
- vifs[i].old_ctx->def.chan->center_freq,
- vifs[i].new_ctx->def.chan->center_freq,
- vifs[i].old_ctx->def.width,
- vifs[i].new_ctx->def.width);
-
- if (WARN_ON(!arvif->is_started))
- continue;
-
- if (WARN_ON(!arvif->is_up))
- continue;
-
- ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id);
- if (ret) {
- ath11k_warn(ab, "failed to down vdev %d: %d\n",
- arvif->vdev_id, ret);
- continue;
- }
-
- ar->num_started_vdevs--;
- }
-
- /* All relevant vdevs are downed and associated channel resources
+ /* Associated channel resources of all relevant vdevs
* should be available for the channel switch now.
*/
@@ -6468,6 +7103,19 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
arvif->is_started = false;
if (ab->hw_params.vdev_start_delay &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ ret = ath11k_peer_delete(ar, arvif->vdev_id, arvif->bssid);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to delete peer %pM for vdev %d: %d\n",
+ arvif->bssid, arvif->vdev_id, ret);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac removed peer %pM vdev %d after vdev stop\n",
+ arvif->bssid, arvif->vdev_id);
+ }
+
+ if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
ath11k_wmi_vdev_down(ar, arvif->vdev_id);
@@ -6481,6 +7129,9 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
ret);
}
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ ath11k_mac_11d_scan_start(ar, arvif->vdev_id, false);
+
mutex_unlock(&ar->conf_mutex);
}
@@ -6567,6 +7218,17 @@ static void ath11k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *v
ATH11K_FLUSH_TIMEOUT);
if (time_left == 0)
ath11k_warn(ar->ab, "failed to flush transmit queue %ld\n", time_left);
+
+ time_left = wait_event_timeout(ar->txmgmt_empty_waitq,
+ (atomic_read(&ar->num_pending_mgmt_tx) == 0),
+ ATH11K_FLUSH_TIMEOUT);
+ if (time_left == 0)
+ ath11k_warn(ar->ab, "failed to flush mgmt transmit queue %ld\n",
+ time_left);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac mgmt tx flush mgmt pending %d\n",
+ atomic_read(&ar->num_pending_mgmt_tx));
}
static int
@@ -7264,12 +7926,45 @@ exit:
return ret;
}
+static void ath11k_mac_put_chain_rssi(struct station_info *sinfo,
+ struct ath11k_sta *arsta,
+ char *pre,
+ bool clear)
+{
+ struct ath11k *ar = arsta->arvif->ar;
+ int i;
+ s8 rssi;
+
+ for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
+ sinfo->chains &= ~BIT(i);
+ rssi = arsta->chain_signal[i];
+ if (clear)
+ arsta->chain_signal[i] = ATH11K_INVALID_RSSI_FULL;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac sta statistics %s rssi[%d] %d\n", pre, i, rssi);
+
+ if (rssi != ATH11K_DEFAULT_NOISE_FLOOR &&
+ rssi != ATH11K_INVALID_RSSI_FULL &&
+ rssi != ATH11K_INVALID_RSSI_EMPTY &&
+ rssi != 0) {
+ sinfo->chain_signal[i] = rssi;
+ sinfo->chains |= BIT(i);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
+ }
+ }
+}
+
static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct station_info *sinfo)
{
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ s8 signal;
+ bool db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ar->ab->wmi_ab.svc_map);
sinfo->rx_duration = arsta->rx_duration;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
@@ -7277,25 +7972,47 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
sinfo->tx_duration = arsta->tx_duration;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
- if (!arsta->txrate.legacy && !arsta->txrate.nss)
- return;
+ if (arsta->txrate.legacy || arsta->txrate.nss) {
+ if (arsta->txrate.legacy) {
+ sinfo->txrate.legacy = arsta->txrate.legacy;
+ } else {
+ sinfo->txrate.mcs = arsta->txrate.mcs;
+ sinfo->txrate.nss = arsta->txrate.nss;
+ sinfo->txrate.bw = arsta->txrate.bw;
+ sinfo->txrate.he_gi = arsta->txrate.he_gi;
+ sinfo->txrate.he_dcm = arsta->txrate.he_dcm;
+ sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc;
+ }
+ sinfo->txrate.flags = arsta->txrate.flags;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ }
- if (arsta->txrate.legacy) {
- sinfo->txrate.legacy = arsta->txrate.legacy;
- } else {
- sinfo->txrate.mcs = arsta->txrate.mcs;
- sinfo->txrate.nss = arsta->txrate.nss;
- sinfo->txrate.bw = arsta->txrate.bw;
- sinfo->txrate.he_gi = arsta->txrate.he_gi;
- sinfo->txrate.he_dcm = arsta->txrate.he_dcm;
- sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc;
+ ath11k_mac_put_chain_rssi(sinfo, arsta, "ppdu", false);
+
+ if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) &&
+ arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ ar->ab->hw_params.supports_rssi_stats &&
+ !ath11k_debugfs_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+ WMI_REQUEST_RSSI_PER_CHAIN_STAT)) {
+ ath11k_mac_put_chain_rssi(sinfo, arsta, "fw stats", true);
}
- sinfo->txrate.flags = arsta->txrate.flags;
- sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
- /* TODO: Use real NF instead of default one. */
- sinfo->signal = arsta->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
- sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
+ signal = arsta->rssi_comb;
+ if (!signal &&
+ arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ ar->ab->hw_params.supports_rssi_stats &&
+ !(ath11k_debugfs_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+ WMI_REQUEST_VDEV_STAT)))
+ signal = arsta->rssi_beacon;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac sta statistics db2dbm %u rssi comb %d rssi beacon %d\n",
+ db2dbm, arsta->rssi_comb, arsta->rssi_beacon);
+
+ if (signal) {
+ sinfo->signal = db2dbm ? signal : signal + ATH11K_DEFAULT_NOISE_FLOOR;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
+ }
}
static const struct ieee80211_ops ath11k_ops = {
@@ -7633,6 +8350,9 @@ static int __ath11k_mac_register(struct ath11k *ar)
ar->hw->wiphy->interface_modes = ab->hw_params.interface_modes;
+ if (ab->hw_params.single_pdev_only && ar->supports_6ghz)
+ ieee80211_hw_set(ar->hw, SINGLE_SCAN_ON_ALL_BANDS);
+
ieee80211_hw_set(ar->hw, SIGNAL_DBM);
ieee80211_hw_set(ar->hw, SUPPORTS_PS);
ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
@@ -7672,7 +8392,8 @@ static int __ath11k_mac_register(struct ath11k *ar)
* for each band for a dual band capable radio. It will be tricky to
* handle it when the ht capability different for each band.
*/
- if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS || ar->supports_6ghz)
+ if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS ||
+ (ar->supports_6ghz && ab->hw_params.supports_dynamic_smps_6ghz))
ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
@@ -7688,11 +8409,16 @@ static int __ath11k_mac_register(struct ath11k *ar)
ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
NL80211_FEATURE_AP_SCAN;
- ar->max_num_stations = TARGET_NUM_STATIONS;
- ar->max_num_peers = TARGET_NUM_PEERS_PDEV;
+ ar->max_num_stations = TARGET_NUM_STATIONS(ab);
+ ar->max_num_peers = TARGET_NUM_PEERS_PDEV(ab);
ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
+ if (test_bit(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi->wmi_ab->svc_map)) {
+ ar->hw->wiphy->features |=
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+ }
+
ar->hw->queues = ATH11K_HW_MAX_QUEUES;
ar->hw->wiphy->tx_queue_len = ATH11K_QUEUE_LEN;
ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1;
@@ -7703,6 +8429,9 @@ static int __ath11k_mac_register(struct ath11k *ar)
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
+ if (test_bit(WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD, ar->ab->wmi_ab.svc_map))
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_BSS_COLOR);
ar->hw->wiphy->cipher_suites = cipher_suites;
ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -7784,7 +8513,7 @@ int ath11k_mac_register(struct ath11k_base *ab)
/* Initialize channel counters frequency value in hertz */
ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ;
- ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
@@ -7802,6 +8531,8 @@ int ath11k_mac_register(struct ath11k_base *ab)
ret = __ath11k_mac_register(ar);
if (ret)
goto err_cleanup;
+
+ init_waitqueue_head(&ar->txmgmt_empty_waitq);
}
return 0;
@@ -7879,6 +8610,9 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
ar->monitor_vdev_id = -1;
clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+ ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
+ init_completion(&ar->finish_11d_scan);
+ init_completion(&ar->finish_11d_ch_list);
}
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index 254ca4acc8e8..0e6c870b09c8 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -127,6 +127,13 @@ struct ath11k_generic_iter {
extern const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default;
+#define ATH11K_SCAN_11D_INTERVAL 600000
+#define ATH11K_11D_INVALID_VDEV_ID 0xFFFF
+
+void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait);
+void ath11k_mac_11d_scan_stop(struct ath11k *ar);
+void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab);
+
void ath11k_mac_destroy(struct ath11k_base *ab);
void ath11k_mac_unregister(struct ath11k_base *ab);
int ath11k_mac_register(struct ath11k_base *ab);
@@ -140,10 +147,16 @@ u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
void __ath11k_mac_scan_finish(struct ath11k *ar);
void ath11k_mac_scan_finish(struct ath11k *ar);
+int ath11k_mac_rfkill_enable_radio(struct ath11k *ar, bool enable);
+int ath11k_mac_rfkill_config(struct ath11k *ar);
struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id);
struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
u32 vdev_id);
+u8 ath11k_mac_get_target_pdev_id(struct ath11k *ar);
+u8 ath11k_mac_get_target_pdev_id_from_vif(struct ath11k_vif *arvif);
+struct ath11k_vif *ath11k_mac_get_vif_up(struct ath11k_base *ab);
+
struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id);
struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id);
@@ -151,8 +164,12 @@ void ath11k_mac_drain_tx(struct ath11k *ar);
void ath11k_mac_peer_cleanup_all(struct ath11k *ar);
int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx);
u8 ath11k_mac_bw_to_mac80211_bw(u8 bw);
+u32 ath11k_mac_he_gi_to_nl80211_he_gi(u8 sgi);
+enum nl80211_he_ru_alloc ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy);
+enum nl80211_he_ru_alloc ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones);
enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw bw);
enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher);
void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb);
void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id);
+void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index 49c0b1ad40a0..e4250ba8dfee 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -3,6 +3,9 @@
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/ioport.h>
#include "core.h"
#include "debug.h"
@@ -248,6 +251,7 @@ static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci)
u32 user_base_data, base_vector;
int ret, num_vectors, i;
int *irq;
+ unsigned int msi_data;
ret = ath11k_pci_get_user_msi_assignment(ab_pci,
"MHI", &num_vectors,
@@ -262,9 +266,15 @@ static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci)
if (!irq)
return -ENOMEM;
- for (i = 0; i < num_vectors; i++)
+ for (i = 0; i < num_vectors; i++) {
+ msi_data = base_vector;
+
+ if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
+ msi_data += i;
+
irq[i] = ath11k_pci_get_msi_irq(ab->dev,
- base_vector + i);
+ msi_data);
+ }
ab_pci->mhi_ctrl->irq = irq;
ab_pci->mhi_ctrl->nr_irqs = num_vectors;
@@ -311,6 +321,26 @@ static void ath11k_mhi_op_write_reg(struct mhi_controller *mhi_cntrl,
writel(val, addr);
}
+static int ath11k_mhi_read_addr_from_dt(struct mhi_controller *mhi_ctrl)
+{
+ struct device_node *np;
+ struct resource res;
+ int ret;
+
+ np = of_find_node_by_type(NULL, "memory");
+ if (!np)
+ return -ENOENT;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ return ret;
+
+ mhi_ctrl->iova_start = res.start + 0x1000000;
+ mhi_ctrl->iova_stop = res.end;
+
+ return 0;
+}
+
int ath11k_mhi_register(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
@@ -339,8 +369,18 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
return ret;
}
- mhi_ctrl->iova_start = 0;
- mhi_ctrl->iova_stop = 0xffffffff;
+ if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
+ mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
+
+ if (test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
+ ret = ath11k_mhi_read_addr_from_dt(mhi_ctrl);
+ if (ret < 0)
+ return ret;
+ } else {
+ mhi_ctrl->iova_start = 0;
+ mhi_ctrl->iova_stop = 0xFFFFFFFF;
+ }
+
mhi_ctrl->sbl_size = SZ_512K;
mhi_ctrl->seg_len = SZ_512K;
mhi_ctrl->fbc_download = true;
@@ -356,6 +396,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
break;
case ATH11K_HW_QCA6390_HW20:
case ATH11K_HW_WCN6855_HW20:
+ case ATH11K_HW_WCN6855_HW21:
ath11k_mhi_config = &ath11k_mhi_config_qca6390;
break;
default:
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 3d353e7c9d5c..de71ad594f34 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/of.h>
#include "pci.h"
#include "core.h"
@@ -16,7 +17,8 @@
#define ATH11K_PCI_BAR_NUM 0
#define ATH11K_PCI_DMA_MASK 32
-#define ATH11K_PCI_IRQ_CE0_OFFSET 3
+#define ATH11K_PCI_IRQ_CE0_OFFSET 3
+#define ATH11K_PCI_IRQ_DP_OFFSET 14
#define WINDOW_ENABLE_BIT 0x40000000
#define WINDOW_REG_ADDRESS 0x310c
@@ -25,7 +27,7 @@
#define WINDOW_RANGE_MASK GENMASK(18, 0)
#define TCSR_SOC_HW_VERSION 0x0224
-#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(16, 8)
+#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8)
#define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0)
/* BAR0 + 4k is always accessible, and no
@@ -76,6 +78,17 @@ static const struct ath11k_msi_config ath11k_msi_config[] = {
},
};
+static const struct ath11k_msi_config msi_config_one_msi = {
+ .total_vectors = 1,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 1, .base_vector = 0 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 0 },
+ { .name = "DP", .num_vectors = 1, .base_vector = 0 },
+ },
+};
+
static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
"bhi",
"mhi-er0",
@@ -182,7 +195,8 @@ void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value)
/* for offset beyond BAR + 4K - 32, may
* need to wakeup MHI to access.
*/
- if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
+ if (ab->hw_params.wakeup_mhi &&
+ test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
offset >= ACCESS_ALWAYS_OFF)
mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
@@ -206,7 +220,8 @@ void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value)
}
}
- if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
+ if (ab->hw_params.wakeup_mhi &&
+ test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
offset >= ACCESS_ALWAYS_OFF)
mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
}
@@ -219,7 +234,8 @@ u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset)
/* for offset beyond BAR + 4K - 32, may
* need to wakeup MHI to access.
*/
- if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
+ if (ab->hw_params.wakeup_mhi &&
+ test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
offset >= ACCESS_ALWAYS_OFF)
mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
@@ -243,7 +259,8 @@ u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset)
}
}
- if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
+ if (ab->hw_params.wakeup_mhi &&
+ test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
offset >= ACCESS_ALWAYS_OFF)
mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
@@ -481,11 +498,11 @@ int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_nam
for (idx = 0; idx < msi_config->total_users; idx++) {
if (strcmp(user_name, msi_config->users[idx].name) == 0) {
*num_vectors = msi_config->users[idx].num_vectors;
- *user_base_data = msi_config->users[idx].base_vector
- + ab_pci->msi_ep_base_data;
- *base_vector = msi_config->users[idx].base_vector;
+ *base_vector = msi_config->users[idx].base_vector;
+ *user_base_data = *base_vector + ab_pci->msi_ep_base_data;
- ath11k_dbg(ab, ATH11K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
user_name, *num_vectors, *user_base_data,
*base_vector);
@@ -556,16 +573,30 @@ static void ath11k_pci_free_irq(struct ath11k_base *ab)
static void ath11k_pci_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
u32 irq_idx;
+ /* In case of one MSI vector, we handle irq enable/disable in a
+ * uniform way since we only have one irq
+ */
+ if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
+ return;
+
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
enable_irq(ab->irq_num[irq_idx]);
}
static void ath11k_pci_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
u32 irq_idx;
+ /* In case of one MSI vector, we handle irq enable/disable in a
+ * uniform way since we only have one irq
+ */
+ if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
+ return;
+
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
disable_irq_nosync(ab->irq_num[irq_idx]);
}
@@ -574,6 +605,8 @@ static void ath11k_pci_ce_irqs_disable(struct ath11k_base *ab)
{
int i;
+ clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
+
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
@@ -598,20 +631,27 @@ static void ath11k_pci_sync_ce_irqs(struct ath11k_base *ab)
static void ath11k_pci_ce_tasklet(struct tasklet_struct *t)
{
struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
+ int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
- ath11k_pci_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
+ enable_irq(ce_pipe->ab->irq_num[irq_idx]);
}
static irqreturn_t ath11k_pci_ce_interrupt_handler(int irq, void *arg)
{
struct ath11k_ce_pipe *ce_pipe = arg;
+ struct ath11k_base *ab = ce_pipe->ab;
+ int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
+
+ if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
+ return IRQ_HANDLED;
/* last interrupt received for this CE */
ce_pipe->timestamp = jiffies;
- ath11k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+
tasklet_schedule(&ce_pipe->intr_tq);
return IRQ_HANDLED;
@@ -619,8 +659,15 @@ static irqreturn_t ath11k_pci_ce_interrupt_handler(int irq, void *arg)
static void ath11k_pci_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(irq_grp->ab);
int i;
+ /* In case of one MSI vector, we handle irq enable/disable
+ * in a uniform way since we only have one irq
+ */
+ if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
+ return;
+
for (i = 0; i < irq_grp->num_irq; i++)
disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
@@ -629,20 +676,32 @@ static void __ath11k_pci_ext_irq_disable(struct ath11k_base *sc)
{
int i;
+ clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &sc->dev_flags);
+
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
ath11k_pci_ext_grp_disable(irq_grp);
- napi_synchronize(&irq_grp->napi);
- napi_disable(&irq_grp->napi);
+ if (irq_grp->napi_enabled) {
+ napi_synchronize(&irq_grp->napi);
+ napi_disable(&irq_grp->napi);
+ irq_grp->napi_enabled = false;
+ }
}
}
static void ath11k_pci_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(irq_grp->ab);
int i;
+ /* In case of one MSI vector, we handle irq enable/disable in a
+ * uniform way since we only have one irq
+ */
+ if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
+ return;
+
for (i = 0; i < irq_grp->num_irq; i++)
enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
@@ -651,10 +710,15 @@ static void ath11k_pci_ext_irq_enable(struct ath11k_base *ab)
{
int i;
+ set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
+
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
- napi_enable(&irq_grp->napi);
+ if (!irq_grp->napi_enabled) {
+ napi_enable(&irq_grp->napi);
+ irq_grp->napi_enabled = true;
+ }
ath11k_pci_ext_grp_enable(irq_grp);
}
}
@@ -686,11 +750,13 @@ static int ath11k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
napi);
struct ath11k_base *ab = irq_grp->ab;
int work_done;
+ int i;
work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
- ath11k_pci_ext_grp_enable(irq_grp);
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
if (work_done > budget)
@@ -702,13 +768,19 @@ static int ath11k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg)
{
struct ath11k_ext_irq_grp *irq_grp = arg;
+ struct ath11k_base *ab = irq_grp->ab;
+ int i;
+
+ if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
+ return IRQ_HANDLED;
ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq);
/* last interrupt received for this group */
irq_grp->timestamp = jiffies;
- ath11k_pci_ext_grp_disable(irq_grp);
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
napi_schedule(&irq_grp->napi);
@@ -717,10 +789,10 @@ static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg)
static int ath11k_pci_ext_irq_config(struct ath11k_base *ab)
{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
int i, j, ret, num_vectors = 0;
- u32 user_base_data = 0, base_vector = 0, base_idx;
+ u32 user_base_data = 0, base_vector = 0;
- base_idx = ATH11K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX;
ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab), "DP",
&num_vectors,
&user_base_data,
@@ -750,7 +822,7 @@ static int ath11k_pci_ext_irq_config(struct ath11k_base *ab)
}
irq_grp->num_irq = num_irq;
- irq_grp->irqs[0] = base_idx + i;
+ irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i;
for (j = 0; j < irq_grp->num_irq; j++) {
int irq_idx = irq_grp->irqs[j];
@@ -764,23 +836,32 @@ static int ath11k_pci_ext_irq_config(struct ath11k_base *ab)
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
ret = request_irq(irq, ath11k_pci_ext_interrupt_handler,
- IRQF_SHARED,
+ ab_pci->irq_flags,
"DP_EXT_IRQ", irq_grp);
if (ret) {
ath11k_err(ab, "failed request irq %d: %d\n",
vector, ret);
return ret;
}
-
- disable_irq_nosync(ab->irq_num[irq_idx]);
}
+ ath11k_pci_ext_grp_disable(irq_grp);
}
return 0;
}
+static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci,
+ const struct cpumask *m)
+{
+ if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
+ return 0;
+
+ return irq_set_affinity_hint(ab_pci->pdev->irq, m);
+}
+
static int ath11k_pci_config_irq(struct ath11k_base *ab)
{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
struct ath11k_ce_pipe *ce_pipe;
u32 msi_data_start;
u32 msi_data_count, msi_data_idx;
@@ -794,6 +875,12 @@ static int ath11k_pci_config_irq(struct ath11k_base *ab)
if (ret)
return ret;
+ ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
+ if (ret) {
+ ath11k_err(ab, "failed to set irq affinity %d\n", ret);
+ return ret;
+ }
+
/* Configure CE irqs */
for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
@@ -808,12 +895,12 @@ static int ath11k_pci_config_irq(struct ath11k_base *ab)
tasklet_setup(&ce_pipe->intr_tq, ath11k_pci_ce_tasklet);
ret = request_irq(irq, ath11k_pci_ce_interrupt_handler,
- IRQF_SHARED, irq_name[irq_idx],
+ ab_pci->irq_flags, irq_name[irq_idx],
ce_pipe);
if (ret) {
ath11k_err(ab, "failed to request irq %d: %d\n",
irq_idx, ret);
- return ret;
+ goto err_irq_affinity_cleanup;
}
ab->irq_num[irq_idx] = irq;
@@ -824,9 +911,13 @@ static int ath11k_pci_config_irq(struct ath11k_base *ab)
ret = ath11k_pci_ext_irq_config(ab);
if (ret)
- return ret;
+ goto err_irq_affinity_cleanup;
return 0;
+
+err_irq_affinity_cleanup:
+ ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
+ return ret;
}
static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab)
@@ -848,6 +939,8 @@ static void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab)
{
int i;
+ set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
+
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
@@ -892,15 +985,25 @@ static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci)
msi_config->total_vectors,
msi_config->total_vectors,
PCI_IRQ_MSI);
- if (num_vectors != msi_config->total_vectors) {
- ath11k_err(ab, "failed to get %d MSI vectors, only %d available",
- msi_config->total_vectors, num_vectors);
-
- if (num_vectors >= 0)
- return -EINVAL;
- else
- return num_vectors;
+ if (num_vectors == msi_config->total_vectors) {
+ set_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags);
+ ab_pci->irq_flags = IRQF_SHARED;
+ } else {
+ num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
+ 1,
+ 1,
+ PCI_IRQ_MSI);
+ if (num_vectors < 0) {
+ ret = -EINVAL;
+ goto reset_msi_config;
+ }
+ clear_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags);
+ ab_pci->msi_config = &msi_config_one_msi;
+ ab_pci->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "request MSI one vector\n");
}
+ ath11k_info(ab, "MSI vectors: %d\n", num_vectors);
+
ath11k_pci_msi_disable(ab_pci);
msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
@@ -911,7 +1014,7 @@ static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci)
}
ab_pci->msi_ep_base_data = msi_desc->msg.data;
- if (msi_desc->msi_attrib.is_64)
+ if (msi_desc->pci.msi_attrib.is_64)
set_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags);
ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data);
@@ -921,6 +1024,7 @@ static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci)
free_msi_vector:
pci_free_irq_vectors(ab_pci->pdev);
+reset_msi_config:
return ret;
}
@@ -929,6 +1033,25 @@ static void ath11k_pci_free_msi(struct ath11k_pci *ab_pci)
pci_free_irq_vectors(ab_pci->pdev);
}
+static int ath11k_pci_config_msi_data(struct ath11k_pci *ab_pci)
+{
+ struct msi_desc *msi_desc;
+
+ msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
+ if (!msi_desc) {
+ ath11k_err(ab_pci->ab, "msi_desc is NULL!\n");
+ pci_free_irq_vectors(ab_pci->pdev);
+ return -EINVAL;
+ }
+
+ ab_pci->msi_ep_base_data = msi_desc->msg.data;
+
+ ath11k_dbg(ab_pci->ab, ATH11K_DBG_PCI, "pci after request_irq msi_ep_base_data %d\n",
+ ab_pci->msi_ep_base_data);
+
+ return 0;
+}
+
static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev)
{
struct ath11k_base *ab = ab_pci->ab;
@@ -1126,7 +1249,13 @@ static int ath11k_pci_start(struct ath11k_base *ab)
set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
- ath11k_pci_aspm_restore(ab_pci);
+ /* TODO: for now don't restore ASPM in case of single MSI
+ * vector as MHI register reading in M2 causes system hang.
+ */
+ if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
+ ath11k_pci_aspm_restore(ab_pci);
+ else
+ ath11k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n");
ath11k_pci_ce_irqs_enable(ab);
ath11k_ce_rx_post_buf(ab);
@@ -1225,7 +1354,7 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
{
struct ath11k_base *ab;
struct ath11k_pci *ab_pci;
- u32 soc_hw_version_major, soc_hw_version_minor;
+ u32 soc_hw_version_major, soc_hw_version_minor, addr;
int ret;
ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI,
@@ -1245,12 +1374,29 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, ab);
spin_lock_init(&ab_pci->window_lock);
+ /* Set fixed_mem_region to true for platforms support reserved memory
+ * from DT. If memory is reserved from DT for FW, ath11k driver need not
+ * allocate memory.
+ */
+ ret = of_property_read_u32(ab->dev->of_node, "memory-region", &addr);
+ if (!ret)
+ set_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags);
+
ret = ath11k_pci_claim(ab_pci, pdev);
if (ret) {
ath11k_err(ab, "failed to claim device: %d\n", ret);
goto err_free_core;
}
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
+ pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device);
+
+ ab->id.vendor = pdev->vendor;
+ ab->id.device = pdev->device;
+ ab->id.subsystem_vendor = pdev->subsystem_vendor;
+ ab->id.subsystem_device = pdev->subsystem_device;
+
switch (pci_dev->device) {
case QCA6390_DEVICE_ID:
ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
@@ -1273,13 +1419,26 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
ab->hw_rev = ATH11K_HW_QCN9074_HW10;
break;
case WCN6855_DEVICE_ID:
+ ab->id.bdf_search = ATH11K_BDF_SEARCH_BUS_AND_BOARD;
ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
switch (soc_hw_version_major) {
case 2:
- ab->hw_rev = ATH11K_HW_WCN6855_HW20;
+ switch (soc_hw_version_minor) {
+ case 0x00:
+ case 0x01:
+ ab->hw_rev = ATH11K_HW_WCN6855_HW20;
+ break;
+ case 0x10:
+ case 0x11:
+ ab->hw_rev = ATH11K_HW_WCN6855_HW21;
+ break;
+ default:
+ goto unsupported_wcn6855_soc;
+ }
break;
default:
+unsupported_wcn6855_soc:
dev_err(&pdev->dev, "Unsupported WCN6855 SOC hardware version: %d %d\n",
soc_hw_version_major, soc_hw_version_minor);
ret = -EOPNOTSUPP;
@@ -1328,6 +1487,17 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
goto err_ce_free;
}
+ /* kernel may allocate a dummy vector before request_irq and
+ * then allocate a real vector when request_irq is called.
+ * So get msi_data here again to avoid spurious interrupt
+ * as msi_data will configured to srngs.
+ */
+ ret = ath11k_pci_config_msi_data(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to config msi_data: %d\n", ret);
+ goto err_free_irq;
+ }
+
ret = ath11k_core_init(ab);
if (ret) {
ath11k_err(ab, "failed to init core: %d\n", ret);
@@ -1364,6 +1534,8 @@ static void ath11k_pci_remove(struct pci_dev *pdev)
struct ath11k_base *ab = pci_get_drvdata(pdev);
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
+
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath11k_pci_power_down(ab);
ath11k_debugfs_soc_destroy(ab);
diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h
index f3e645891d19..61d67b20a0eb 100644
--- a/drivers/net/wireless/ath/ath11k/pci.h
+++ b/drivers/net/wireless/ath/ath11k/pci.h
@@ -68,6 +68,7 @@ enum ath11k_pci_flags {
ATH11K_PCI_FLAG_INIT_DONE,
ATH11K_PCI_FLAG_IS_MSI_64,
ATH11K_PCI_ASPM_RESTORE,
+ ATH11K_PCI_FLAG_MULTI_MSI_VECTORS,
};
struct ath11k_pci {
@@ -87,6 +88,8 @@ struct ath11k_pci {
/* enum ath11k_pci_flags */
unsigned long flags;
u16 link_ctl;
+
+ unsigned long irq_flags;
};
static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h
index 619db001be8e..63fe5665badf 100644
--- a/drivers/net/wireless/ath/ath11k/peer.h
+++ b/drivers/net/wireless/ath/ath11k/peer.h
@@ -28,6 +28,7 @@ struct ath11k_peer {
u8 ucast_keyidx;
u16 sec_type;
u16 sec_type_grp;
+ bool is_authorized;
};
void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id);
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index fa73118de6db..65d3c6ba35ae 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -9,6 +9,8 @@
#include "core.h"
#include "debug.h"
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/ioport.h>
#include <linux/firmware.h>
#define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02
@@ -1582,11 +1584,55 @@ static struct qmi_elem_info qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei[] = {
},
};
+static struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
+ enablefwlog_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
+ enablefwlog),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_wlan_ini_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
{
struct qmi_wlanfw_host_cap_req_msg_v01 req;
struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_txn txn;
int ret = 0;
memset(&req, 0, sizeof(req));
@@ -1640,6 +1686,7 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_host_cap_req_msg_v01_ei, &req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath11k_warn(ab, "failed to send host capability request: %d\n", ret);
goto out;
}
@@ -1705,6 +1752,7 @@ static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab)
QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_ind_register_req_msg_v01_ei, req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath11k_warn(ab, "failed to send indication register request: %d\n",
ret);
goto out;
@@ -1734,7 +1782,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
{
struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_txn txn;
int ret = 0, i;
bool delayed;
@@ -1749,7 +1797,9 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
* failure to FW and FW will then request mulitple blocks of small
* chunk size memory.
*/
- if (!ab->bus_params.fixed_mem_region && ab->qmi.target_mem_delayed) {
+ if (!(ab->bus_params.fixed_mem_region ||
+ test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) &&
+ ab->qmi.target_mem_delayed) {
delayed = true;
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi delays mem_request %d\n",
ab->qmi.mem_seg_count);
@@ -1783,6 +1833,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_respond_mem_req_msg_v01_ei, req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath11k_warn(ab, "failed to respond qmi memory request: %d\n",
ret);
goto out;
@@ -1815,10 +1866,12 @@ static void ath11k_qmi_free_target_mem_chunk(struct ath11k_base *ab)
{
int i;
- if (ab->bus_params.fixed_mem_region)
- return;
-
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ if ((ab->bus_params.fixed_mem_region ||
+ test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) &&
+ ab->qmi.target_mem[i].iaddr)
+ iounmap(ab->qmi.target_mem[i].iaddr);
+
if (!ab->qmi.target_mem[i].vaddr)
continue;
@@ -1866,10 +1919,44 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
{
- int i, idx;
+ struct device *dev = ab->dev;
+ struct device_node *hremote_node = NULL;
+ struct resource res;
+ u32 host_ddr_sz;
+ int i, idx, ret;
for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) {
switch (ab->qmi.target_mem[i].type) {
+ case HOST_DDR_REGION_TYPE:
+ hremote_node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!hremote_node) {
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "qmi fail to get hremote_node\n");
+ return ret;
+ }
+
+ ret = of_address_to_resource(hremote_node, 0, &res);
+ if (ret) {
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "qmi fail to get reg from hremote\n");
+ return ret;
+ }
+
+ if (res.end - res.start + 1 < ab->qmi.target_mem[i].size) {
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "qmi fail to assign memory of sz\n");
+ return -EINVAL;
+ }
+
+ ab->qmi.target_mem[idx].paddr = res.start;
+ ab->qmi.target_mem[idx].iaddr =
+ ioremap(ab->qmi.target_mem[idx].paddr,
+ ab->qmi.target_mem[i].size);
+ ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ host_ddr_sz = ab->qmi.target_mem[i].size;
+ ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+ idx++;
+ break;
case BDF_MEM_REGION_TYPE:
ab->qmi.target_mem[idx].paddr = ab->hw_params.bdf_addr;
ab->qmi.target_mem[idx].vaddr = NULL;
@@ -1884,10 +1971,16 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
}
if (ath11k_cold_boot_cal && ab->hw_params.cold_boot_calib) {
- ab->qmi.target_mem[idx].paddr =
- ATH11K_QMI_CALDB_ADDRESS;
- ab->qmi.target_mem[idx].vaddr =
- (void *)ATH11K_QMI_CALDB_ADDRESS;
+ if (hremote_node) {
+ ab->qmi.target_mem[idx].paddr =
+ res.start + host_ddr_sz;
+ ab->qmi.target_mem[idx].iaddr =
+ ioremap(ab->qmi.target_mem[idx].paddr,
+ ab->qmi.target_mem[i].size);
+ } else {
+ ab->qmi.target_mem[idx].paddr =
+ ATH11K_QMI_CALDB_ADDRESS;
+ }
} else {
ab->qmi.target_mem[idx].paddr = 0;
ab->qmi.target_mem[idx].vaddr = NULL;
@@ -1911,7 +2004,7 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
{
struct qmi_wlanfw_cap_req_msg_v01 req;
struct qmi_wlanfw_cap_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_txn txn;
int ret = 0;
int r;
@@ -1930,6 +2023,7 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_cap_req_msg_v01_ei, &req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath11k_warn(ab, "failed to send qmi cap request: %d\n",
ret);
goto out;
@@ -2000,7 +2094,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
{
struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_txn txn;
const u8 *temp = data;
void __iomem *bdf_addr = NULL;
int ret;
@@ -2111,7 +2205,8 @@ err_free_req:
return ret;
}
-static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab)
+static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab,
+ bool regdb)
{
struct device *dev = ab->dev;
char filename[ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE];
@@ -2122,13 +2217,21 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab)
const u8 *tmp;
memset(&bd, 0, sizeof(bd));
- ret = ath11k_core_fetch_bdf(ab, &bd);
- if (ret) {
- ath11k_warn(ab, "qmi failed to fetch board file: %d\n", ret);
- goto out;
+
+ if (regdb) {
+ ret = ath11k_core_fetch_regdb(ab, &bd);
+ } else {
+ ret = ath11k_core_fetch_bdf(ab, &bd);
+ if (ret)
+ ath11k_warn(ab, "qmi failed to fetch board file: %d\n", ret);
}
- if (bd.len >= SELFMAG && memcmp(bd.data, ELFMAG, SELFMAG) == 0)
+ if (ret)
+ goto out;
+
+ if (regdb)
+ bdf_type = ATH11K_QMI_BDF_TYPE_REGDB;
+ else if (bd.len >= SELFMAG && memcmp(bd.data, ELFMAG, SELFMAG) == 0)
bdf_type = ATH11K_QMI_BDF_TYPE_ELF;
else
bdf_type = ATH11K_QMI_BDF_TYPE_BIN;
@@ -2143,8 +2246,8 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab)
goto out;
}
- /* QCA6390 does not support cal data, skip it */
- if (bdf_type == ATH11K_QMI_BDF_TYPE_ELF)
+ /* QCA6390/WCN6855 does not support cal data, skip it */
+ if (bdf_type == ATH11K_QMI_BDF_TYPE_ELF || bdf_type == ATH11K_QMI_BDF_TYPE_REGDB)
goto out;
if (ab->qmi.target.eeprom_caldata) {
@@ -2245,7 +2348,7 @@ static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
struct qmi_wlanfw_m3_info_req_msg_v01 req;
struct qmi_wlanfw_m3_info_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_txn txn;
int ret = 0;
memset(&req, 0, sizeof(req));
@@ -2277,6 +2380,7 @@ static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
qmi_wlanfw_m3_info_req_msg_v01_ei, &req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath11k_warn(ab, "failed to send m3 information request: %d\n",
ret);
goto out;
@@ -2303,7 +2407,7 @@ static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab,
{
struct qmi_wlanfw_wlan_mode_req_msg_v01 req;
struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_txn txn;
int ret = 0;
memset(&req, 0, sizeof(req));
@@ -2325,6 +2429,7 @@ static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab,
QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath11k_warn(ab, "failed to send wlan mode request (mode %d): %d\n",
mode, ret);
goto out;
@@ -2358,7 +2463,7 @@ static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab)
struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp;
struct ce_pipe_config *ce_cfg;
struct service_to_pipe *svc_cfg;
- struct qmi_txn txn = {};
+ struct qmi_txn txn;
int ret = 0, pipe_num;
ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce;
@@ -2419,6 +2524,7 @@ static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab)
QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath11k_warn(ab, "failed to send wlan config request: %d\n",
ret);
goto out;
@@ -2442,6 +2548,48 @@ out:
return ret;
}
+static int ath11k_qmi_wlanfw_wlan_ini_send(struct ath11k_base *ab, bool enable)
+{
+ int ret;
+ struct qmi_txn txn;
+ struct qmi_wlanfw_wlan_ini_req_msg_v01 req = {};
+ struct qmi_wlanfw_wlan_ini_resp_msg_v01 resp = {};
+
+ req.enablefwlog_valid = true;
+ req.enablefwlog = enable ? 1 : 0;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_wlan_ini_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_WLAN_INI_REQ_V01,
+ QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_ini_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan ini request, err = %d\n",
+ ret);
+ qmi_txn_cancel(&txn);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed wlan ini request, err = %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi wlan ini request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ }
+
+out:
+ return ret;
+}
+
void ath11k_qmi_firmware_stop(struct ath11k_base *ab)
{
int ret;
@@ -2462,6 +2610,14 @@ int ath11k_qmi_firmware_start(struct ath11k_base *ab,
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware start\n");
+ if (ab->hw_params.fw_wmi_diag_event) {
+ ret = ath11k_qmi_wlanfw_wlan_ini_send(ab, true);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan fw ini:%d\n", ret);
+ return ret;
+ }
+ }
+
ret = ath11k_qmi_wlanfw_wlan_cfg_send(ab);
if (ret < 0) {
ath11k_warn(ab, "qmi failed to send wlan cfg: %d\n", ret);
@@ -2573,7 +2729,10 @@ static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi)
return ret;
}
- ret = ath11k_qmi_load_bdf_qmi(ab);
+ if (ab->hw_params.supports_regdb)
+ ath11k_qmi_load_bdf_qmi(ab, true);
+
+ ret = ath11k_qmi_load_bdf_qmi(ab, false);
if (ret < 0) {
ath11k_warn(ab, "failed to load board data file: %d\n", ret);
return ret;
@@ -2614,7 +2773,8 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
msg->mem_seg[i].type, msg->mem_seg[i].size);
}
- if (ab->bus_params.fixed_mem_region) {
+ if (ab->bus_params.fixed_mem_region ||
+ test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
ret = ath11k_qmi_assign_target_mem_chunk(ab);
if (ret) {
ath11k_warn(ab, "failed to assign qmi target memory: %d\n",
@@ -2823,7 +2983,7 @@ int ath11k_qmi_init_service(struct ath11k_base *ab)
memset(&ab->qmi.target_mem, 0, sizeof(struct target_mem_chunk));
ab->qmi.ab = ab;
- ab->qmi.target_mem_mode = ATH11K_QMI_TARGET_MEM_MODE_DEFAULT;
+ ab->qmi.target_mem_mode = ab->hw_params.fw_mem_mode;
ret = qmi_handle_init(&ab->qmi.handle, ATH11K_QMI_RESP_LEN_MAX,
&ath11k_qmi_ops, ath11k_qmi_msg_handlers);
if (ret < 0) {
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index 3bb0f9ef7996..ba2eff4d59cb 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -34,14 +34,13 @@
#define QMI_WLANFW_MAX_DATA_SIZE_V01 6144
#define ATH11K_FIRMWARE_MODE_OFF 4
-#define ATH11K_QMI_TARGET_MEM_MODE_DEFAULT 0
#define ATH11K_COLD_BOOT_FW_RESET_DELAY (40 * HZ)
struct ath11k_base;
enum ath11k_qmi_file_type {
ATH11K_QMI_FILE_TYPE_BDF_GOLDEN,
- ATH11K_QMI_FILE_TYPE_CALDATA,
+ ATH11K_QMI_FILE_TYPE_CALDATA = 2,
ATH11K_QMI_FILE_TYPE_EEPROM,
ATH11K_QMI_MAX_FILE_TYPE,
};
@@ -49,6 +48,7 @@ enum ath11k_qmi_file_type {
enum ath11k_qmi_bdf_type {
ATH11K_QMI_BDF_TYPE_BIN = 0,
ATH11K_QMI_BDF_TYPE_ELF = 1,
+ ATH11K_QMI_BDF_TYPE_REGDB = 4,
};
enum ath11k_qmi_event_type {
@@ -95,6 +95,7 @@ struct target_mem_chunk {
u32 type;
dma_addr_t paddr;
u32 *vaddr;
+ void __iomem *iaddr;
};
struct target_info {
@@ -427,10 +428,12 @@ struct qmi_wlanfw_m3_info_resp_msg_v01 {
#define QMI_WLANFW_WLAN_MODE_RESP_MSG_V01_MAX_LEN 7
#define QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN 803
#define QMI_WLANFW_WLAN_CFG_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN 4
#define QMI_WLANFW_WLAN_MODE_REQ_V01 0x0022
#define QMI_WLANFW_WLAN_MODE_RESP_V01 0x0022
#define QMI_WLANFW_WLAN_CFG_REQ_V01 0x0023
#define QMI_WLANFW_WLAN_CFG_RESP_V01 0x0023
+#define QMI_WLANFW_WLAN_INI_REQ_V01 0x002F
#define QMI_WLANFW_MAX_STR_LEN_V01 16
#define QMI_WLANFW_MAX_NUM_CE_V01 12
#define QMI_WLANFW_MAX_NUM_SVC_V01 24
@@ -472,6 +475,16 @@ struct qmi_wlanfw_wlan_cfg_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+struct qmi_wlanfw_wlan_ini_req_msg_v01 {
+ /* Must be set to true if enablefwlog is being passed */
+ u8 enablefwlog_valid;
+ u8 enablefwlog;
+};
+
+struct qmi_wlanfw_wlan_ini_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
int ath11k_qmi_firmware_start(struct ath11k_base *ab,
u32 mode);
void ath11k_qmi_firmware_stop(struct ath11k_base *ab);
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index a66b5bdd2167..d6575feca5a2 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -2,6 +2,8 @@
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
+#include <linux/rtnetlink.h>
+
#include "core.h"
#include "debug.h"
@@ -86,6 +88,9 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
if (ret)
ath11k_warn(ar->ab,
"INIT Country code set to fw failed : %d\n", ret);
+
+ ath11k_mac_11d_scan_stop(ar);
+ ar->regdom_set_by_user = true;
}
int ath11k_reg_update_chan_list(struct ath11k *ar)
@@ -179,6 +184,11 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
kfree(params);
+ if (ar->pending_11d) {
+ complete(&ar->finish_11d_ch_list);
+ ar->pending_11d = false;
+ }
+
return ret;
}
@@ -244,8 +254,15 @@ int ath11k_regd_update(struct ath11k *ar)
goto err;
}
+ if (ar->pending_11d)
+ complete(&ar->finish_11d_scan);
+
rtnl_lock();
wiphy_lock(ar->hw->wiphy);
+
+ if (ar->pending_11d)
+ reinit_completion(&ar->finish_11d_ch_list);
+
ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy);
wiphy_unlock(ar->hw->wiphy);
rtnl_unlock();
@@ -456,6 +473,9 @@ ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw)
{
u16 bw;
+ if (end_freq <= start_freq)
+ return 0;
+
bw = end_freq - start_freq;
bw = min_t(u16, bw, max_bw);
@@ -463,8 +483,10 @@ ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw)
bw = 80;
else if (bw >= 40 && bw < 80)
bw = 40;
- else if (bw < 40)
+ else if (bw >= 20 && bw < 40)
bw = 20;
+ else
+ bw = 0;
return bw;
}
@@ -488,73 +510,77 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
struct cur_reg_rule *reg_rule,
u8 *rule_idx, u32 flags, u16 max_bw)
{
+ u32 start_freq;
u32 end_freq;
u16 bw;
u8 i;
i = *rule_idx;
+ /* there might be situations when even the input rule must be dropped */
+ i--;
+
+ /* frequencies below weather radar */
bw = ath11k_reg_adjust_bw(reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW, max_bw);
+ if (bw > 0) {
+ i++;
- ath11k_reg_update_rule(regd->reg_rules + i, reg_rule->start_freq,
- ETSI_WEATHER_RADAR_BAND_LOW, bw,
- reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ ath11k_reg_update_rule(regd->reg_rules + i,
+ reg_rule->start_freq,
+ ETSI_WEATHER_RADAR_BAND_LOW, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
- ath11k_dbg(ab, ATH11K_DBG_REG,
- "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
- i + 1, reg_rule->start_freq, ETSI_WEATHER_RADAR_BAND_LOW,
- bw, reg_rule->ant_gain, reg_rule->reg_power,
- regd->reg_rules[i].dfs_cac_ms,
- flags);
-
- if (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_HIGH)
- end_freq = ETSI_WEATHER_RADAR_BAND_HIGH;
- else
- end_freq = reg_rule->end_freq;
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, reg_rule->start_freq,
+ ETSI_WEATHER_RADAR_BAND_LOW, bw, reg_rule->ant_gain,
+ reg_rule->reg_power, regd->reg_rules[i].dfs_cac_ms,
+ flags);
+ }
- bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_LOW, end_freq,
- max_bw);
+ /* weather radar frequencies */
+ start_freq = max_t(u32, reg_rule->start_freq,
+ ETSI_WEATHER_RADAR_BAND_LOW);
+ end_freq = min_t(u32, reg_rule->end_freq, ETSI_WEATHER_RADAR_BAND_HIGH);
- i++;
+ bw = ath11k_reg_adjust_bw(start_freq, end_freq, max_bw);
+ if (bw > 0) {
+ i++;
- ath11k_reg_update_rule(regd->reg_rules + i,
- ETSI_WEATHER_RADAR_BAND_LOW, end_freq, bw,
- reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ ath11k_reg_update_rule(regd->reg_rules + i, start_freq,
+ end_freq, bw, reg_rule->ant_gain,
+ reg_rule->reg_power, flags);
- regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
+ regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
- ath11k_dbg(ab, ATH11K_DBG_REG,
- "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
- i + 1, ETSI_WEATHER_RADAR_BAND_LOW, end_freq,
- bw, reg_rule->ant_gain, reg_rule->reg_power,
- regd->reg_rules[i].dfs_cac_ms,
- flags);
-
- if (end_freq == reg_rule->end_freq) {
- regd->n_reg_rules--;
- *rule_idx = i;
- return;
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, start_freq, end_freq, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ regd->reg_rules[i].dfs_cac_ms, flags);
}
+ /* frequencies above weather radar */
bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_HIGH,
reg_rule->end_freq, max_bw);
+ if (bw > 0) {
+ i++;
- i++;
-
- ath11k_reg_update_rule(regd->reg_rules + i, ETSI_WEATHER_RADAR_BAND_HIGH,
- reg_rule->end_freq, bw,
- reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ ath11k_reg_update_rule(regd->reg_rules + i,
+ ETSI_WEATHER_RADAR_BAND_HIGH,
+ reg_rule->end_freq, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
- ath11k_dbg(ab, ATH11K_DBG_REG,
- "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
- i + 1, ETSI_WEATHER_RADAR_BAND_HIGH, reg_rule->end_freq,
- bw, reg_rule->ant_gain, reg_rule->reg_power,
- regd->reg_rules[i].dfs_cac_ms,
- flags);
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, ETSI_WEATHER_RADAR_BAND_HIGH,
+ reg_rule->end_freq, bw, reg_rule->ant_gain,
+ reg_rule->reg_power, regd->reg_rules[i].dfs_cac_ms,
+ flags);
+ }
*rule_idx = i;
}
diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
index ac4da99b5577..4100cc1449a2 100644
--- a/drivers/net/wireless/ath/ath11k/spectral.c
+++ b/drivers/net/wireless/ath/ath11k/spectral.c
@@ -581,6 +581,7 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
u16 length, freq;
u8 chan_width_mhz, bin_sz;
int ret;
+ u32 check_length;
lockdep_assert_held(&ar->spectral.lock);
@@ -614,6 +615,13 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
return -EINVAL;
}
+ check_length = sizeof(*fft_report) + (num_bins * ab->hw_params.spectral.fft_sz);
+ ret = ath11k_dbring_validate_buffer(ar, data, check_length);
+ if (ret) {
+ ath11k_warn(ar->ab, "found magic value in fft data, dropping\n");
+ return ret;
+ }
+
ret = ath11k_spectral_pull_search(ar, data, &search);
if (ret) {
ath11k_warn(ab, "failed to pull search report %d\n", ret);
@@ -747,6 +755,12 @@ static int ath11k_spectral_process_data(struct ath11k *ar,
goto err;
}
+ ret = ath11k_dbring_validate_buffer(ar, data, tlv_len);
+ if (ret) {
+ ath11k_warn(ar->ab, "found magic value in spectral summary, dropping\n");
+ goto err;
+ }
+
summary = (struct spectral_summary_fft_report *)tlv;
ath11k_spectral_pull_summary(ar, &param->meta,
summary, &summ_rpt);
diff --git a/drivers/net/wireless/ath/ath11k/trace.c b/drivers/net/wireless/ath/ath11k/trace.c
index f0cc49ba0387..6620650d7845 100644
--- a/drivers/net/wireless/ath/ath11k/trace.c
+++ b/drivers/net/wireless/ath/ath11k/trace.c
@@ -7,3 +7,4 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
+EXPORT_SYMBOL(__tracepoint_ath11k_log_dbg);
diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h
index 25d18e9d5b0b..a02e54735e88 100644
--- a/drivers/net/wireless/ath/ath11k/trace.h
+++ b/drivers/net/wireless/ath/ath11k/trace.h
@@ -14,12 +14,24 @@
#if !defined(CONFIG_ATH11K_TRACING)
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {} \
+static inline bool trace_##name##_enabled(void) \
+{ \
+ return false; \
+}
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
static inline void trace_ ## name(proto) {}
#endif /* !CONFIG_ATH11K_TRACING || __CHECKER__ */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ath11k
+#define ATH11K_MSG_MAX 400
+
TRACE_EVENT(ath11k_htt_pktlog,
TP_PROTO(struct ath11k *ar, const void *buf, u16 buf_len,
u32 pktlog_checksum),
@@ -108,6 +120,194 @@ TRACE_EVENT(ath11k_htt_rxdesc,
)
);
+DECLARE_EVENT_CLASS(ath11k_log_event,
+ TP_PROTO(struct ath11k_base *ab, struct va_format *vaf),
+ TP_ARGS(ab, vaf),
+ TP_STRUCT__entry(
+ __string(device, dev_name(ab->dev))
+ __string(driver, dev_driver_string(ab->dev))
+ __dynamic_array(char, msg, ATH11K_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __assign_str(device, dev_name(ab->dev));
+ __assign_str(driver, dev_driver_string(ab->dev));
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ ATH11K_MSG_MAX,
+ vaf->fmt,
+ *vaf->va) >= ATH11K_MSG_MAX);
+ ),
+ TP_printk(
+ "%s %s %s",
+ __get_str(driver),
+ __get_str(device),
+ __get_str(msg)
+ )
+);
+
+DEFINE_EVENT(ath11k_log_event, ath11k_log_err,
+ TP_PROTO(struct ath11k_base *ab, struct va_format *vaf),
+ TP_ARGS(ab, vaf)
+);
+
+DEFINE_EVENT(ath11k_log_event, ath11k_log_warn,
+ TP_PROTO(struct ath11k_base *ab, struct va_format *vaf),
+ TP_ARGS(ab, vaf)
+);
+
+DEFINE_EVENT(ath11k_log_event, ath11k_log_info,
+ TP_PROTO(struct ath11k_base *ab, struct va_format *vaf),
+ TP_ARGS(ab, vaf)
+);
+
+TRACE_EVENT(ath11k_wmi_cmd,
+ TP_PROTO(struct ath11k_base *ab, int id, const void *buf, size_t buf_len),
+
+ TP_ARGS(ab, id, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ab->dev))
+ __string(driver, dev_driver_string(ab->dev))
+ __field(unsigned int, id)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ab->dev));
+ __assign_str(driver, dev_driver_string(ab->dev));
+ __entry->id = id;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s id %d len %zu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->id,
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath11k_wmi_event,
+ TP_PROTO(struct ath11k_base *ab, int id, const void *buf, size_t buf_len),
+
+ TP_ARGS(ab, id, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ab->dev))
+ __string(driver, dev_driver_string(ab->dev))
+ __field(unsigned int, id)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ab->dev));
+ __assign_str(driver, dev_driver_string(ab->dev));
+ __entry->id = id;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s id %d len %zu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->id,
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath11k_log_dbg,
+ TP_PROTO(struct ath11k_base *ab, unsigned int level, struct va_format *vaf),
+
+ TP_ARGS(ab, level, vaf),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ab->dev))
+ __string(driver, dev_driver_string(ab->dev))
+ __field(unsigned int, level)
+ __dynamic_array(char, msg, ATH11K_MSG_MAX)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ab->dev));
+ __assign_str(driver, dev_driver_string(ab->dev));
+ __entry->level = level;
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ ATH11K_MSG_MAX, vaf->fmt,
+ *vaf->va) >= ATH11K_MSG_MAX);
+ ),
+
+ TP_printk(
+ "%s %s %s",
+ __get_str(driver),
+ __get_str(device),
+ __get_str(msg)
+ )
+);
+
+TRACE_EVENT(ath11k_log_dbg_dump,
+ TP_PROTO(struct ath11k_base *ab, const char *msg, const char *prefix,
+ const void *buf, size_t buf_len),
+
+ TP_ARGS(ab, msg, prefix, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ab->dev))
+ __string(driver, dev_driver_string(ab->dev))
+ __string(msg, msg)
+ __string(prefix, prefix)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ab->dev));
+ __assign_str(driver, dev_driver_string(ab->dev));
+ __assign_str(msg, msg);
+ __assign_str(prefix, prefix);
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s %s/%s\n",
+ __get_str(driver),
+ __get_str(device),
+ __get_str(prefix),
+ __get_str(msg)
+ )
+);
+
+TRACE_EVENT(ath11k_wmi_diag,
+ TP_PROTO(struct ath11k_base *ab, const void *data, size_t len),
+
+ TP_ARGS(ab, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ab->dev))
+ __string(driver, dev_driver_string(ab->dev))
+ __field(u16, len)
+ __dynamic_array(u8, data, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ab->dev));
+ __assign_str(driver, dev_driver_string(ab->dev));
+ __entry->len = len;
+ memcpy(__get_dynamic_array(data), data, len);
+ ),
+
+ TP_printk(
+ "%s %s tlv diag len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 5ae2ef4680d6..6b68ccf65e39 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -73,6 +73,14 @@ struct wmi_tlv_dma_buf_release_parse {
bool meta_data_done;
};
+struct wmi_tlv_fw_stats_parse {
+ const struct wmi_stats_event *ev;
+ const struct wmi_per_chain_rssi_stats *rssi;
+ struct ath11k_fw_stats *stats;
+ int rssi_num;
+ bool chain_rssi_done;
+};
+
static const struct wmi_tlv_policy wmi_tlv_policies[] = {
[WMI_TAG_ARRAY_BYTE]
= { .min_len = 0 },
@@ -120,6 +128,8 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
= { .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
[WMI_TAG_STATS_EVENT]
= { .min_len = sizeof(struct wmi_stats_event) },
+ [WMI_TAG_RFKILL_EVENT] = {
+ .min_len = sizeof(struct wmi_rfkill_state_change_ev) },
[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]
= { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
[WMI_TAG_HOST_SWFDA_EVENT] = {
@@ -128,6 +138,12 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
+ [WMI_TAG_OBSS_COLOR_COLLISION_EVT] = {
+ .min_len = sizeof(struct wmi_obss_color_collision_event) },
+ [WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
+ .min_len = sizeof(struct wmi_11d_new_cc_ev) },
+ [WMI_TAG_PER_CHAIN_RSSI_STATS] = {
+ .min_len = sizeof(struct wmi_per_chain_rssi_stats) },
};
#define PRIMAP(_hw_mode_) \
@@ -249,6 +265,8 @@ static int ath11k_wmi_cmd_send_nowait(struct ath11k_pdev_wmi *wmi, struct sk_buf
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
cmd_hdr->cmd_id = cmd;
+ trace_ath11k_wmi_cmd(ab, cmd_id, skb->data, skb->len);
+
memset(skb_cb, 0, sizeof(*skb_cb));
ret = ath11k_htc_send(&ab->htc, wmi->eid, skb);
@@ -267,21 +285,39 @@ int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
{
struct ath11k_wmi_base *wmi_sc = wmi->wmi_ab;
int ret = -EOPNOTSUPP;
+ struct ath11k_base *ab = wmi_sc->ab;
might_sleep();
- wait_event_timeout(wmi_sc->tx_credits_wq, ({
- ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
+ if (ab->hw_params.credit_flow) {
+ wait_event_timeout(wmi_sc->tx_credits_wq, ({
+ ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
- if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, &wmi_sc->ab->dev_flags))
- ret = -ESHUTDOWN;
+ if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH,
+ &wmi_sc->ab->dev_flags))
+ ret = -ESHUTDOWN;
- (ret != -EAGAIN);
- }), WMI_SEND_TIMEOUT_HZ);
+ (ret != -EAGAIN);
+ }), WMI_SEND_TIMEOUT_HZ);
+ } else {
+ wait_event_timeout(wmi->tx_ce_desc_wq, ({
+ ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
+
+ if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH,
+ &wmi_sc->ab->dev_flags))
+ ret = -ESHUTDOWN;
+
+ (ret != -ENOBUFS);
+ }), WMI_SEND_TIMEOUT_HZ);
+ }
if (ret == -EAGAIN)
ath11k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id);
+ if (ret == -ENOBUFS)
+ ath11k_warn(wmi_sc->ab, "ce desc not available for wmi command %d\n",
+ cmd_id);
+
return ret;
}
@@ -315,6 +351,7 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
struct ath11k_pdev *pdev)
{
struct wmi_mac_phy_capabilities *mac_phy_caps;
+ struct ath11k_base *ab = wmi_handle->wmi_ab->ab;
struct ath11k_band_cap *cap_band;
struct ath11k_pdev_cap *pdev_cap = &pdev->cap;
u32 phy_map;
@@ -346,6 +383,10 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
pdev->pdev_id = mac_phy_caps->pdev_id;
pdev_cap->supported_bands |= mac_phy_caps->supported_bands;
pdev_cap->ampdu_density = mac_phy_caps->ampdu_density;
+ ab->target_pdev_ids[ab->target_pdev_count].supported_bands =
+ mac_phy_caps->supported_bands;
+ ab->target_pdev_ids[ab->target_pdev_count].pdev_id = mac_phy_caps->pdev_id;
+ ab->target_pdev_count++;
/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
* band to band for a single radio, need to see how this should be
@@ -485,6 +526,8 @@ static int ath11k_pull_service_ready_tlv(struct ath11k_base *ab,
cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index;
cap->num_msdu_desc = ev->num_msdu_desc;
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi sys cap info 0x%x\n", cap->sys_cap_info);
+
return 0;
}
@@ -1244,7 +1287,8 @@ int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id,
return ret;
}
-int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable)
+int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id,
+ enum wmi_sta_ps_mode psmode)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_pdev_set_ps_mode_cmd *cmd;
@@ -1259,7 +1303,7 @@ int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable)
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STA_POWERSAVE_MODE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
- cmd->sta_ps_mode = enable;
+ cmd->sta_ps_mode = psmode;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
if (ret) {
@@ -1269,7 +1313,7 @@ int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable)
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"WMI vdev set psmode %d vdev id %d\n",
- enable, vdev_id);
+ psmode, vdev_id);
return ret;
}
@@ -1612,6 +1656,15 @@ int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
void *ptr;
int ret, len;
size_t aligned_len = roundup(bcn->len, 4);
+ struct ieee80211_vif *vif;
+ struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev_id);
+
+ if (!arvif) {
+ ath11k_warn(ar->ab, "failed to find arvif with vdev id %d\n", vdev_id);
+ return -EINVAL;
+ }
+
+ vif = arvif->vif;
len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
@@ -1624,8 +1677,12 @@ int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->tim_ie_offset = offs->tim_offset;
- cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0];
- cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1];
+
+ if (vif->csa_active) {
+ cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0];
+ cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1];
+ }
+
cmd->buf_len = bcn->len;
ptr = skb->data + sizeof(*cmd);
@@ -1689,7 +1746,8 @@ int ath11k_wmi_vdev_install_key(struct ath11k *ar,
tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
FIELD_PREP(WMI_TLV_LEN, key_len_aligned);
- memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned);
+ if (arg->key_data)
+ memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned);
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
if (ret) {
@@ -1762,7 +1820,7 @@ ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
cmd->peer_flags |= WMI_PEER_AUTH;
if (param->need_ptk_4_way) {
cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
- if (!hw_crypto_disabled)
+ if (!hw_crypto_disabled && param->is_assoc)
cmd->peer_flags &= ~WMI_PEER_AUTH;
}
if (param->need_gtk_2_way)
@@ -2069,7 +2127,7 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
void *ptr;
int i, ret, len;
u32 *tmp_ptr;
- u8 extraie_len_with_pad = 0;
+ u16 extraie_len_with_pad = 0;
struct hint_short_ssid *s_ssid = NULL;
struct hint_bssid *hint_bssid = NULL;
@@ -2088,7 +2146,7 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
len += sizeof(*bssid) * params->num_bssid;
len += TLV_HDR_SIZE;
- if (params->extraie.len)
+ if (params->extraie.len && params->extraie.len <= 0xFFFF)
extraie_len_with_pad =
roundup(params->extraie.len, sizeof(u32));
len += extraie_len_with_pad;
@@ -2137,6 +2195,8 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
cmd->num_ssids = params->num_ssids;
cmd->ie_len = params->extraie.len;
cmd->n_probes = params->n_probes;
+ ether_addr_copy(cmd->mac_addr.addr, params->mac_addr.addr);
+ ether_addr_copy(cmd->mac_mask.addr, params->mac_mask.addr);
ptr += sizeof(*cmd);
@@ -2195,7 +2255,7 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
FIELD_PREP(WMI_TLV_LEN, len);
ptr += TLV_HDR_SIZE;
- if (params->extraie.len)
+ if (extraie_len_with_pad)
memcpy(ptr, params->extraie.ptr,
params->extraie.len);
@@ -2386,6 +2446,8 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
tchan_info->reg_class_id);
*reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
tchan_info->antennamax);
+ *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
+ tchan_info->maxregpower);
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
@@ -2754,6 +2816,42 @@ out:
return ret;
}
+int ath11k_wmi_send_set_current_country_cmd(struct ath11k *ar,
+ struct wmi_set_current_country_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_set_current_country_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_current_country_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SET_CURRENT_COUNTRY_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = ar->pdev->pdev_id;
+ memcpy(&cmd->new_alpha2, &param->alpha2, 3);
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "set current country pdev id %d alpha2 %c%c\n",
+ ar->pdev->pdev_id,
+ param->alpha2[0],
+ param->alpha2[1]);
+
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
int
ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar,
struct thermal_mitigation_params *param)
@@ -2818,6 +2916,75 @@ ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar,
return ret;
}
+int ath11k_wmi_send_11d_scan_start_cmd(struct ath11k *ar,
+ struct wmi_11d_scan_start_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_11d_scan_start_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_11d_scan_start_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_START_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->vdev_id;
+ cmd->scan_period_msec = param->scan_period_msec;
+ cmd->start_interval_msec = param->start_interval_msec;
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "send 11d scan start vdev id %d period %d ms internal %d ms\n",
+ cmd->vdev_id,
+ cmd->scan_period_msec,
+ cmd->start_interval_msec);
+
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_send_11d_scan_stop_cmd(struct ath11k *ar, u32 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_11d_scan_stop_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_11d_scan_stop_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_STOP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "send 11d scan stop vdev id %d\n",
+ cmd->vdev_id);
+
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
@@ -3428,6 +3595,56 @@ int ath11k_wmi_fils_discovery(struct ath11k *ar, u32 vdev_id, u32 interval,
}
static void
+ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_obss_color_collision_event *ev;
+ struct ath11k_vif *arvif;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ rcu_read_lock();
+
+ ev = tb[WMI_TAG_OBSS_COLOR_COLLISION_EVT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch obss color collision ev");
+ goto exit;
+ }
+
+ arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
+ if (!arvif) {
+ ath11k_warn(ab, "failed to find arvif with vedv id %d in obss_color_collision_event\n",
+ ev->vdev_id);
+ goto exit;
+ }
+
+ switch (ev->evt_type) {
+ case WMI_BSS_COLOR_COLLISION_DETECTION:
+ ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap);
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
+ ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
+ break;
+ case WMI_BSS_COLOR_COLLISION_DISABLE:
+ case WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY:
+ case WMI_BSS_COLOR_FREE_SLOT_AVAILABLE:
+ break;
+ default:
+ ath11k_warn(ab, "received unknown obss color collision detection event\n");
+ }
+
+exit:
+ kfree(tb);
+ rcu_read_unlock();
+}
+
+static void
ath11k_fill_band_to_mac_param(struct ath11k_base *soc,
struct wmi_host_pdev_band_to_mac *band_to_mac)
{
@@ -4144,6 +4361,7 @@ static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
svc_rdy_ext->param.num_phy = svc_rdy_ext->soc_hal_reg_caps->num_phy;
soc->num_radios = 0;
+ soc->target_pdev_count = 0;
phy_id_map = svc_rdy_ext->pref_hw_mode_caps.phy_id_map;
while (phy_id_map && soc->num_radios < MAX_RADIOS) {
@@ -4781,6 +4999,7 @@ static int wmi_process_mgmt_tx_comp(struct ath11k *ar, u32 desc_id,
struct sk_buff *msdu;
struct ieee80211_tx_info *info;
struct ath11k_skb_cb *skb_cb;
+ int num_mgmt;
spin_lock_bh(&ar->txmgmt_idr_lock);
msdu = idr_find(&ar->txmgmt_idr, desc_id);
@@ -4804,10 +5023,19 @@ static int wmi_process_mgmt_tx_comp(struct ath11k *ar, u32 desc_id,
ieee80211_tx_status_irqsafe(ar->hw, msdu);
+ num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
+
/* WARN when we received this event without doing any mgmt tx */
- if (atomic_dec_if_positive(&ar->num_pending_mgmt_tx) < 0)
+ if (num_mgmt < 0)
WARN_ON_ONCE(1);
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi mgmt tx comp pending %d desc id %d\n",
+ num_mgmt, desc_id);
+
+ if (!num_mgmt)
+ wake_up(&ar->txmgmt_empty_waitq);
+
return 0;
}
@@ -5343,47 +5571,107 @@ ath11k_wmi_pull_bcn_stats(const struct wmi_bcn_stats *src,
dst->tx_bcn_outage_cnt = src->tx_bcn_outage_cnt;
}
-int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
- struct ath11k_fw_stats *stats)
+static int ath11k_wmi_tlv_rssi_chain_parse(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
{
- const void **tb;
- const struct wmi_stats_event *ev;
- const void *data;
- int i, ret;
- u32 len = skb->len;
+ struct wmi_tlv_fw_stats_parse *parse = data;
+ const struct wmi_stats_event *ev = parse->ev;
+ struct ath11k_fw_stats *stats = parse->stats;
+ struct ath11k *ar;
+ struct ath11k_vif *arvif;
+ struct ieee80211_sta *sta;
+ struct ath11k_sta *arsta;
+ const struct wmi_rssi_stats *stats_rssi = (const struct wmi_rssi_stats *)ptr;
+ int j, ret = 0;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, len, GFP_ATOMIC);
- if (IS_ERR(tb)) {
- ret = PTR_ERR(tb);
- ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
- return ret;
+ if (tag != WMI_TAG_RSSI_STATS)
+ return -EPROTO;
+
+ rcu_read_lock();
+
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+ stats->stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi stats vdev id %d mac %pM\n",
+ stats_rssi->vdev_id, stats_rssi->peer_macaddr.addr);
+
+ arvif = ath11k_mac_get_arvif(ar, stats_rssi->vdev_id);
+ if (!arvif) {
+ ath11k_warn(ab, "not found vif for vdev id %d\n",
+ stats_rssi->vdev_id);
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi stats bssid %pM vif %pK\n",
+ arvif->bssid, arvif->vif);
+
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw,
+ arvif->bssid,
+ NULL);
+ if (!sta) {
+ ath11k_warn(ab, "not found station for bssid %pM\n",
+ arvif->bssid);
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
+ ARRAY_SIZE(stats_rssi->rssi_avg_beacon));
+
+ for (j = 0; j < ARRAY_SIZE(arsta->chain_signal); j++) {
+ arsta->chain_signal[j] = stats_rssi->rssi_avg_beacon[j];
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi stats beacon rssi[%d] %d data rssi[%d] %d\n",
+ j,
+ stats_rssi->rssi_avg_beacon[j],
+ j,
+ stats_rssi->rssi_avg_data[j]);
}
- ev = tb[WMI_TAG_STATS_EVENT];
- data = tb[WMI_TAG_ARRAY_BYTE];
- if (!ev || !data) {
+exit:
+ rcu_read_unlock();
+ return ret;
+}
+
+static int ath11k_wmi_tlv_fw_stats_data_parse(struct ath11k_base *ab,
+ struct wmi_tlv_fw_stats_parse *parse,
+ const void *ptr,
+ u16 len)
+{
+ struct ath11k_fw_stats *stats = parse->stats;
+ const struct wmi_stats_event *ev = parse->ev;
+ struct ath11k *ar;
+ struct ath11k_vif *arvif;
+ struct ieee80211_sta *sta;
+ struct ath11k_sta *arsta;
+ int i, ret = 0;
+ const void *data = ptr;
+
+ if (!ev) {
ath11k_warn(ab, "failed to fetch update stats ev");
- kfree(tb);
return -EPROTO;
}
- ath11k_dbg(ab, ATH11K_DBG_WMI,
- "wmi stats update ev pdev_id %d pdev %i vdev %i bcn %i\n",
- ev->pdev_id,
- ev->num_pdev_stats, ev->num_vdev_stats,
- ev->num_bcn_stats);
-
- stats->pdev_id = ev->pdev_id;
stats->stats_id = 0;
+ rcu_read_lock();
+
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+
for (i = 0; i < ev->num_pdev_stats; i++) {
const struct wmi_pdev_stats *src;
struct ath11k_fw_stats_pdev *dst;
src = data;
if (len < sizeof(*src)) {
- kfree(tb);
- return -EPROTO;
+ ret = -EPROTO;
+ goto exit;
}
stats->stats_id = WMI_REQUEST_PDEV_STAT;
@@ -5407,12 +5695,29 @@ int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
src = data;
if (len < sizeof(*src)) {
- kfree(tb);
- return -EPROTO;
+ ret = -EPROTO;
+ goto exit;
}
stats->stats_id = WMI_REQUEST_VDEV_STAT;
+ arvif = ath11k_mac_get_arvif(ar, src->vdev_id);
+ if (arvif) {
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw,
+ arvif->bssid,
+ NULL);
+ if (sta) {
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+ arsta->rssi_beacon = src->beacon_snr;
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi stats vdev id %d snr %d\n",
+ src->vdev_id, src->beacon_snr);
+ } else {
+ ath11k_warn(ab, "not found station for bssid %pM\n",
+ arvif->bssid);
+ }
+ }
+
data += sizeof(*src);
len -= sizeof(*src);
@@ -5430,8 +5735,8 @@ int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
src = data;
if (len < sizeof(*src)) {
- kfree(tb);
- return -EPROTO;
+ ret = -EPROTO;
+ goto exit;
}
stats->stats_id = WMI_REQUEST_BCN_STAT;
@@ -5447,8 +5752,67 @@ int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
list_add_tail(&dst->list, &stats->bcn);
}
- kfree(tb);
- return 0;
+exit:
+ rcu_read_unlock();
+ return ret;
+}
+
+static int ath11k_wmi_tlv_fw_stats_parse(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_fw_stats_parse *parse = data;
+ int ret = 0;
+
+ switch (tag) {
+ case WMI_TAG_STATS_EVENT:
+ parse->ev = (struct wmi_stats_event *)ptr;
+ parse->stats->pdev_id = parse->ev->pdev_id;
+ break;
+ case WMI_TAG_ARRAY_BYTE:
+ ret = ath11k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len);
+ break;
+ case WMI_TAG_PER_CHAIN_RSSI_STATS:
+ parse->rssi = (struct wmi_per_chain_rssi_stats *)ptr;
+
+ if (parse->ev->stats_id & WMI_REQUEST_RSSI_PER_CHAIN_STAT)
+ parse->rssi_num = parse->rssi->num_per_chain_rssi_stats;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi stats id 0x%x num chain %d\n",
+ parse->ev->stats_id,
+ parse->rssi_num);
+ break;
+ case WMI_TAG_ARRAY_STRUCT:
+ if (parse->rssi_num && !parse->chain_rssi_done) {
+ ret = ath11k_wmi_tlv_iter(ab, ptr, len,
+ ath11k_wmi_tlv_rssi_chain_parse,
+ parse);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse rssi chain %d\n",
+ ret);
+ return ret;
+ }
+ parse->chain_rssi_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
+ struct ath11k_fw_stats *stats)
+{
+ struct wmi_tlv_fw_stats_parse parse = { };
+
+ stats->stats_id = 0;
+ parse.stats = stats;
+
+ return ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_fw_stats_parse,
+ &parse);
}
size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head)
@@ -5810,15 +6174,79 @@ static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab)
wake_up(&ab->wmi_ab.tx_credits_wq);
}
+static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ const struct wmi_11d_new_cc_ev *ev;
+ const void **tb;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT];
+ if (!ev) {
+ kfree(tb);
+ ath11k_warn(ab, "failed to fetch 11d new cc ev");
+ return -EPROTO;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ memcpy(&ab->new_alpha2, &ev->new_alpha2, 2);
+ spin_unlock_bh(&ab->base_lock);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi 11d new cc %c%c\n",
+ ab->new_alpha2[0],
+ ab->new_alpha2[1]);
+
+ kfree(tb);
+
+ queue_work(ab->workqueue, &ab->update_11d_work);
+
+ return 0;
+}
+
static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab,
struct sk_buff *skb)
{
+ struct ath11k_pdev_wmi *wmi = NULL;
+ u32 i;
+ u8 wmi_ep_count;
+ u8 eid;
+
+ eid = ATH11K_SKB_CB(skb)->eid;
dev_kfree_skb(skb);
+
+ if (eid >= ATH11K_HTC_EP_COUNT)
+ return;
+
+ wmi_ep_count = ab->htc.wmi_ep_count;
+ if (wmi_ep_count > ab->hw_params.max_radios)
+ return;
+
+ for (i = 0; i < ab->htc.wmi_ep_count; i++) {
+ if (ab->wmi_ab.wmi[i].eid == eid) {
+ wmi = &ab->wmi_ab.wmi[i];
+ break;
+ }
+ }
+
+ if (wmi)
+ wake_up(&wmi->tx_ce_desc_wq);
}
static bool ath11k_reg_is_world_alpha(char *alpha)
{
- return alpha[0] == '0' && alpha[1] == '0';
+ if (alpha[0] == '0' && alpha[1] == '0')
+ return true;
+
+ if (alpha[0] == 'n' && alpha[1] == 'a')
+ return true;
+
+ return false;
}
static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *skb)
@@ -5911,7 +6339,7 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk
ar = ab->pdevs[pdev_idx].ar;
kfree(ab->new_regd[pdev_idx]);
ab->new_regd[pdev_idx] = regd;
- ieee80211_queue_work(ar->hw, &ar->regd_update_work);
+ queue_work(ab->workqueue, &ar->regd_update_work);
} else {
/* This regd would be applied during mac registration and is
* held constant throughout for regd intersection purpose
@@ -6111,6 +6539,7 @@ static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff
static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *skb)
{
+ struct ath11k_vif *arvif;
u32 vdev_id, tx_status;
if (ath11k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
@@ -6118,6 +6547,17 @@ static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *s
ath11k_warn(ab, "failed to extract bcn tx status");
return;
}
+
+ rcu_read_lock();
+ arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_id);
+ if (!arvif) {
+ ath11k_warn(ab, "invalid vdev id %d in bcn_tx_status",
+ vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+ ath11k_mac_bcn_tx_event(arvif);
+ rcu_read_unlock();
}
static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *skb)
@@ -6398,6 +6838,7 @@ static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff
struct ieee80211_sta *sta;
struct ath11k_peer *peer;
struct ath11k *ar;
+ u32 vdev_id;
if (ath11k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
ath11k_warn(ab, "failed to extract peer sta kickout event");
@@ -6413,10 +6854,15 @@ static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff
if (!peer) {
ath11k_warn(ab, "peer not found %pM\n",
arg.mac_addr);
+ spin_unlock_bh(&ab->base_lock);
goto exit;
}
- ar = ath11k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
+ vdev_id = peer->vdev_id;
+
+ spin_unlock_bh(&ab->base_lock);
+
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
if (!ar) {
ath11k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
peer->vdev_id);
@@ -6437,7 +6883,6 @@ static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff
ieee80211_report_low_ack(sta, 10);
exit:
- spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
}
@@ -6893,6 +7338,40 @@ exit:
kfree(tb);
}
+static void ath11k_rfkill_state_change_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const struct wmi_rfkill_state_change_ev *ev;
+ const void **tb;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_RFKILL_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
+ ev->gpio_pin_num,
+ ev->int_type,
+ ev->radio_state);
+
+ spin_lock_bh(&ab->base_lock);
+ ab->rfkill_radio_on = (ev->radio_state == WMI_RFKILL_RADIO_STATE_ON);
+ spin_unlock_bh(&ab->base_lock);
+
+ queue_work(ab->workqueue, &ab->rfkill_work);
+ kfree(tb);
+}
+
static void
ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
struct sk_buff *skb)
@@ -7046,6 +7525,13 @@ static void ath11k_wmi_event_wow_wakeup_host(struct ath11k_base *ab, struct sk_b
complete(&ab->wow.wakeup_completed);
}
+static void
+ath11k_wmi_diag_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ trace_ath11k_wmi_diag(ab, skb->data, skb->len);
+}
+
static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -7054,6 +7540,8 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
id = FIELD_GET(WMI_CMD_HDR_CMD_ID, (cmd_hdr->cmd_id));
+ trace_ath11k_wmi_event(ab, id, skb->data, skb->len);
+
if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
goto out;
@@ -7138,6 +7626,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
ath11k_probe_resp_tx_status_event(ab, skb);
break;
+ case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
+ ath11k_wmi_obss_color_collision_event(ab, skb);
+ break;
/* add Unsupported events here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
@@ -7157,6 +7648,15 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_WOW_WAKEUP_HOST_EVENTID:
ath11k_wmi_event_wow_wakeup_host(ab, skb);
break;
+ case WMI_11D_NEW_COUNTRY_EVENTID:
+ ath11k_reg_11d_new_cc_event(ab, skb);
+ break;
+ case WMI_RFKILL_STATE_CHANGE_EVENTID:
+ ath11k_rfkill_state_change_event(ab, skb);
+ break;
+ case WMI_DIAG_EVENTID:
+ ath11k_wmi_diag_event(ab, skb);
+ break;
/* TODO: Add remaining events */
default:
ath11k_dbg(ab, ATH11K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
@@ -7199,6 +7699,7 @@ static int ath11k_connect_pdev_htc_service(struct ath11k_base *ab,
ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
+ init_waitqueue_head(&ab->wmi_ab.wmi[pdev_idx].tx_ce_desc_wq);
return 0;
}
@@ -7414,3 +7915,31 @@ int ath11k_wmi_wow_enable(struct ath11k *ar)
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
}
+
+int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
+ const u8 mac_addr[ETH_ALEN])
+{
+ struct sk_buff *skb;
+ struct wmi_scan_prob_req_oui_cmd *cmd;
+ u32 prob_req_oui;
+ int len;
+
+ prob_req_oui = (((u32)mac_addr[0]) << 16) |
+ (((u32)mac_addr[1]) << 8) | mac_addr[2];
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_scan_prob_req_oui_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_SCAN_PROB_REQ_OUI_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->prob_req_oui = prob_req_oui;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi scan prob req oui %d\n",
+ prob_req_oui);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SCAN_PROB_REQ_OUI_CMDID);
+}
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index 0584e68e7593..2f26ec1a8aa3 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -113,10 +113,10 @@ enum wmi_host_hw_mode_priority {
WMI_HOST_HW_MODE_MAX_PRI
};
-enum {
+enum WMI_HOST_WLAN_BAND {
WMI_HOST_WLAN_2G_CAP = 0x1,
WMI_HOST_WLAN_5G_CAP = 0x2,
- WMI_HOST_WLAN_2G_5G_CAP = 0x3,
+ WMI_HOST_WLAN_2G_5G_CAP = WMI_HOST_WLAN_2G_CAP | WMI_HOST_WLAN_5G_CAP,
};
/* Parameters used for WMI_VDEV_PARAM_AUTORATE_MISC_CFG command.
@@ -774,6 +774,8 @@ enum wmi_tlv_event_id {
WMI_MDNS_STATS_EVENTID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
WMI_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
WMI_SAP_OFL_DEL_STA_EVENTID,
+ WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID =
+ WMI_EVT_GRP_START_ID(WMI_GRP_OBSS_OFL),
WMI_OCB_SET_CONFIG_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_OCB),
WMI_OCB_GET_TSF_TIMER_RESP_EVENTID,
WMI_DCC_GET_STATS_RESP_EVENTID,
@@ -2167,6 +2169,13 @@ enum wmi_nss_ratio {
WMI_NSS_RATIO_2_NSS = 0x3,
};
+enum wmi_dtim_policy {
+ WMI_DTIM_POLICY_IGNORE = 1,
+ WMI_DTIM_POLICY_NORMAL = 2,
+ WMI_DTIM_POLICY_STICK = 3,
+ WMI_DTIM_POLICY_AUTO = 4,
+};
+
struct wmi_host_pdev_band_to_mac {
u32 pdev_id;
u32 start_freq;
@@ -2522,6 +2531,7 @@ struct ath11k_pdev_wmi {
enum ath11k_htc_ep_id eid;
const struct wmi_peer_flags_map *peer_flags;
u32 rx_decap_mode;
+ wait_queue_head_t tx_ce_desc_wq;
};
struct vdev_create_params {
@@ -3079,7 +3089,6 @@ enum scan_dwelltime_adaptive_mode {
#define WLAN_SCAN_MAX_NUM_SSID 10
#define WLAN_SCAN_MAX_NUM_BSSID 10
-#define WLAN_SCAN_MAX_NUM_CHANNELS 40
#define WLAN_SSID_MAX_LEN 32
@@ -3300,7 +3309,7 @@ struct scan_req_params {
u32 num_bssid;
u32 num_ssids;
u32 n_probes;
- u32 chan_list[WLAN_SCAN_MAX_NUM_CHANNELS];
+ u32 *chan_list;
u32 notify_scan_events;
struct wlan_ssid ssid[WLAN_SCAN_MAX_NUM_SSID];
struct wmi_mac_addr bssid_list[WLAN_SCAN_MAX_NUM_BSSID];
@@ -3311,6 +3320,8 @@ struct scan_req_params {
u32 num_hint_bssid;
struct hint_short_ssid hint_s_ssid[WLAN_SCAN_MAX_HINT_S_SSID];
struct hint_bssid hint_bssid[WLAN_SCAN_MAX_HINT_BSSID];
+ struct wmi_mac_addr mac_addr;
+ struct wmi_mac_addr mac_mask;
};
struct wmi_ssid_arg {
@@ -3617,6 +3628,7 @@ struct peer_assoc_params {
u32 peer_he_tx_mcs_set[WMI_HOST_MAX_HE_RATE_SET];
bool twt_responder;
bool twt_requester;
+ bool is_assoc;
struct ath11k_ppe_threshold peer_ppet;
};
@@ -3673,6 +3685,11 @@ struct wmi_scan_chan_list_cmd {
u32 pdev_id;
} __packed;
+struct wmi_scan_prob_req_oui_cmd {
+ u32 tlv_header;
+ u32 prob_req_oui;
+} __packed;
+
#define WMI_MGMT_SEND_DOWNLD_LEN 64
#define WMI_TX_PARAMS_DWORD0_POWER GENMASK(7, 0)
@@ -3766,6 +3783,16 @@ struct stats_request_params {
u32 pdev_id;
};
+struct wmi_set_current_country_params {
+ u8 alpha2[3];
+};
+
+struct wmi_set_current_country_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 new_alpha2;
+} __packed;
+
enum set_init_cc_type {
WMI_COUNTRY_INFO_TYPE_ALPHA,
WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE,
@@ -3799,6 +3826,28 @@ struct wmi_init_country_cmd {
} cc_info;
} __packed;
+struct wmi_11d_scan_start_params {
+ u32 vdev_id;
+ u32 scan_period_msec;
+ u32 start_interval_msec;
+};
+
+struct wmi_11d_scan_start_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 scan_period_msec;
+ u32 start_interval_msec;
+} __packed;
+
+struct wmi_11d_scan_stop_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+} __packed;
+
+struct wmi_11d_new_cc_ev {
+ u32 new_alpha2;
+} __packed;
+
#define THERMAL_LEVELS 1
struct tt_level_config {
u32 tmplwm;
@@ -4390,6 +4439,17 @@ struct wmi_stats_event {
u32 num_peer_extd2_stats;
} __packed;
+struct wmi_rssi_stats {
+ u32 vdev_id;
+ u32 rssi_avg_beacon[WMI_MAX_CHAINS];
+ u32 rssi_avg_data[WMI_MAX_CHAINS];
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_per_chain_rssi_stats {
+ u32 num_per_chain_rssi_stats;
+} __packed;
+
struct wmi_pdev_ctl_failsafe_chk_event {
u32 pdev_id;
u32 ctl_failsafe_status;
@@ -4914,6 +4974,13 @@ struct wmi_pdev_obss_pd_bitmap_cmd {
#define ATH11K_BSS_COLOR_COLLISION_DETECTION_STA_PERIOD_MS 10000
#define ATH11K_BSS_COLOR_COLLISION_DETECTION_AP_PERIOD_MS 5000
+enum wmi_bss_color_collision {
+ WMI_BSS_COLOR_COLLISION_DISABLE = 0,
+ WMI_BSS_COLOR_COLLISION_DETECTION,
+ WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY,
+ WMI_BSS_COLOR_FREE_SLOT_AVAILABLE,
+};
+
struct wmi_obss_color_collision_cfg_params_cmd {
u32 tlv_header;
u32 vdev_id;
@@ -4931,6 +4998,12 @@ struct wmi_bss_color_change_enable_params_cmd {
u32 enable;
} __packed;
+struct wmi_obss_color_collision_event {
+ u32 vdev_id;
+ u32 evt_type;
+ u64 obss_color_bitmap;
+} __packed;
+
#define ATH11K_IPV4_TH_SEED_SIZE 5
#define ATH11K_IPV6_TH_SEED_SIZE 11
@@ -5142,6 +5215,31 @@ struct target_resource_config {
u32 twt_ap_sta_count;
};
+enum wmi_sys_cap_info_flags {
+ WMI_SYS_CAP_INFO_RXTX_LED = BIT(0),
+ WMI_SYS_CAP_INFO_RFKILL = BIT(1),
+};
+
+#define WMI_RFKILL_CFG_GPIO_PIN_NUM GENMASK(5, 0)
+#define WMI_RFKILL_CFG_RADIO_LEVEL BIT(6)
+#define WMI_RFKILL_CFG_PIN_AS_GPIO GENMASK(10, 7)
+
+enum wmi_rfkill_enable_radio {
+ WMI_RFKILL_ENABLE_RADIO_ON = 0,
+ WMI_RFKILL_ENABLE_RADIO_OFF = 1,
+};
+
+enum wmi_rfkill_radio_state {
+ WMI_RFKILL_RADIO_STATE_OFF = 1,
+ WMI_RFKILL_RADIO_STATE_ON = 2,
+};
+
+struct wmi_rfkill_state_change_ev {
+ u32 gpio_pin_num;
+ u32 int_type;
+ u32 radio_state;
+} __packed;
+
#define WMI_MAX_MEM_REQS 32
#define MAX_RADIOS 3
@@ -5351,7 +5449,8 @@ int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr,
u32 vdev_id, u32 param_id, u32 param_val);
int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id,
u32 param_value, u8 pdev_id);
-int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable);
+int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id,
+ enum wmi_sta_ps_mode psmode);
int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab);
int ath11k_wmi_cmd_init(struct ath11k_base *ab);
int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab);
@@ -5415,9 +5514,16 @@ int ath11k_wmi_delba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 initiator, u32 reason);
int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
u32 vdev_id, u32 bcn_ctrl_op);
+int ath11k_wmi_send_set_current_country_cmd(struct ath11k *ar,
+ struct wmi_set_current_country_params *param);
int
ath11k_wmi_send_init_country_cmd(struct ath11k *ar,
struct wmi_init_country_params init_cc_param);
+
+int ath11k_wmi_send_11d_scan_start_cmd(struct ath11k *ar,
+ struct wmi_11d_scan_start_params *param);
+int ath11k_wmi_send_11d_scan_stop_cmd(struct ath11k *ar, u32 vdev_id);
+
int
ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar,
struct thermal_mitigation_params *param);
@@ -5474,5 +5580,6 @@ int ath11k_wmi_set_hw_mode(struct ath11k_base *ab,
enum wmi_host_hw_mode_config_type mode);
int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar);
int ath11k_wmi_wow_enable(struct ath11k *ar);
-
+int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
+ const u8 mac_addr[ETH_ALEN]);
#endif
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index cef17f33c69e..66d123f48085 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -727,6 +727,43 @@ ath5k_get_rate_hw_value(const struct ieee80211_hw *hw,
return hw_rate;
}
+static bool ath5k_merge_ratetbl(struct ieee80211_sta *sta,
+ struct ath5k_buf *bf,
+ struct ieee80211_tx_info *tx_info)
+{
+ struct ieee80211_sta_rates *ratetbl;
+ u8 i;
+
+ if (!sta)
+ return false;
+
+ ratetbl = rcu_dereference(sta->rates);
+ if (!ratetbl)
+ return false;
+
+ if (tx_info->control.rates[0].idx < 0 ||
+ tx_info->control.rates[0].count == 0)
+ {
+ i = 0;
+ } else {
+ bf->rates[0] = tx_info->control.rates[0];
+ i = 1;
+ }
+
+ for ( ; i < IEEE80211_TX_MAX_RATES; i++) {
+ bf->rates[i].idx = ratetbl->rate[i].idx;
+ bf->rates[i].flags = ratetbl->rate[i].flags;
+ if (tx_info->control.use_rts)
+ bf->rates[i].count = ratetbl->rate[i].count_rts;
+ else if (tx_info->control.use_cts_prot)
+ bf->rates[i].count = ratetbl->rate[i].count_cts;
+ else
+ bf->rates[i].count = ratetbl->rate[i].count;
+ }
+
+ return true;
+}
+
static int
ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
struct ath5k_txq *txq, int padsize,
@@ -737,6 +774,7 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
struct ieee80211_rate *rate;
+ struct ieee80211_sta *sta;
unsigned int mrr_rate[3], mrr_tries[3];
int i, ret;
u16 hw_rate;
@@ -753,8 +791,16 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
if (dma_mapping_error(ah->dev, bf->skbaddr))
return -ENOSPC;
- ieee80211_get_tx_rates(info->control.vif, (control) ? control->sta : NULL, skb, bf->rates,
- ARRAY_SIZE(bf->rates));
+ if (control)
+ sta = control->sta;
+ else
+ sta = NULL;
+
+ if (!ath5k_merge_ratetbl(sta, bf, info)) {
+ ieee80211_get_tx_rates(info->control.vif,
+ sta, skb, bf->rates,
+ ARRAY_SIZE(bf->rates));
+ }
rate = ath5k_get_rate(ah->hw, info, bf, 0);
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
index 112d8a9b8d43..d3534a29c4f0 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.h
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -153,12 +153,19 @@
* implementations.
*/
struct htc_frame_hdr {
- u8 eid;
- u8 flags;
-
- /* length of data (including trailer) that follows the header */
- __le16 payld_len;
-
+ struct_group_tagged(htc_frame_look_ahead, header,
+ union {
+ struct {
+ u8 eid;
+ u8 flags;
+
+ /* length of data (including trailer) that follows the header */
+ __le16 payld_len;
+
+ };
+ u32 word;
+ };
+ );
/* end of 4-byte lookahead */
u8 ctrl[2];
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index 998947ef63b6..e3874421c4c0 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -2260,19 +2260,16 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
{
struct htc_packet *packet = NULL;
- struct htc_frame_hdr *htc_hdr;
- u32 look_ahead;
+ struct htc_frame_look_ahead look_ahead;
- if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead,
+ if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead.word,
HTC_TARGET_RESPONSE_TIMEOUT))
return NULL;
ath6kl_dbg(ATH6KL_DBG_HTC,
- "htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
-
- htc_hdr = (struct htc_frame_hdr *)&look_ahead;
+ "htc rx wait ctrl look_ahead 0x%X\n", look_ahead.word);
- if (htc_hdr->eid != ENDPOINT_0)
+ if (look_ahead.eid != ENDPOINT_0)
return NULL;
packet = htc_get_control_buf(target, false);
@@ -2281,8 +2278,8 @@ static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
return NULL;
packet->info.rx.rx_flags = 0;
- packet->info.rx.exp_hdr = look_ahead;
- packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
+ packet->info.rx.exp_hdr = look_ahead.word;
+ packet->act_len = le16_to_cpu(look_ahead.payld_len) + HTC_HDR_LENGTH;
if (packet->act_len > packet->buf_len)
goto fail_ctrl_rx;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index ce9a0a53771e..fba5a847c3bb 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -120,7 +120,7 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
AR_ISR_TXEOL);
}
- ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
+ ah->intr_txqs = MS(s0_s, AR_ISR_S0_QCU_TXOK);
ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 7e27a06e5df1..dc24da1ff00b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -1005,24 +1005,20 @@ static void __ar955x_tx_iq_cal_sort(struct ath_hw *ah,
int i, int nmeasurement)
{
struct ath_common *common = ath9k_hw_common(ah);
- int im, ix, iy, temp;
+ int im, ix, iy;
for (im = 0; im < nmeasurement; im++) {
for (ix = 0; ix < MAXIQCAL - 1; ix++) {
for (iy = ix + 1; iy <= MAXIQCAL - 1; iy++) {
if (coeff->mag_coeff[i][im][iy] <
coeff->mag_coeff[i][im][ix]) {
- temp = coeff->mag_coeff[i][im][ix];
- coeff->mag_coeff[i][im][ix] =
- coeff->mag_coeff[i][im][iy];
- coeff->mag_coeff[i][im][iy] = temp;
+ swap(coeff->mag_coeff[i][im][ix],
+ coeff->mag_coeff[i][im][iy]);
}
if (coeff->phs_coeff[i][im][iy] <
coeff->phs_coeff[i][im][ix]) {
- temp = coeff->phs_coeff[i][im][ix];
- coeff->phs_coeff[i][im][ix] =
- coeff->phs_coeff[i][im][iy];
- coeff->phs_coeff[i][im][iy] = temp;
+ swap(coeff->phs_coeff[i][im][ix],
+ coeff->phs_coeff[i][im][iy]);
}
}
}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 860da13bfb6a..f06eec99de68 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -590,6 +590,13 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
return;
}
+ if (pkt_len > 2 * MAX_RX_BUF_SIZE) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: invalid pkt_len (%x)\n", pkt_len);
+ RX_STAT_INC(skb_dropped);
+ return;
+ }
+
pad_len = 4 - (pkt_len & 0x3);
if (pad_len == 4)
pad_len = 0;
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 0a1634238e67..6b45e63fae4b 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -281,6 +281,7 @@ struct ath9k_htc_rxbuf {
struct ath9k_htc_rx {
struct list_head rxbuf;
spinlock_t rxbuflock;
+ bool initialized;
};
#define ATH9K_HTC_TX_CLEANUP_INTERVAL 50 /* ms */
@@ -305,6 +306,7 @@ struct ath9k_htc_tx {
DECLARE_BITMAP(tx_slot, MAX_TX_BUF_NUM);
struct timer_list cleanup_timer;
spinlock_t tx_lock;
+ bool initialized;
};
struct ath9k_htc_tx_ctl {
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 8e69e8989f6d..6a850a0bfa8a 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -813,6 +813,11 @@ int ath9k_tx_init(struct ath9k_htc_priv *priv)
skb_queue_head_init(&priv->tx.data_vi_queue);
skb_queue_head_init(&priv->tx.data_vo_queue);
skb_queue_head_init(&priv->tx.tx_failed);
+
+ /* Allow ath9k_wmi_event_tasklet(WMI_TXSTATUS_EVENTID) to operate. */
+ smp_wmb();
+ priv->tx.initialized = true;
+
return 0;
}
@@ -1130,6 +1135,10 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
unsigned long flags;
+ /* Check if ath9k_rx_init() completed. */
+ if (!data_race(priv->rx.initialized))
+ goto err;
+
spin_lock_irqsave(&priv->rx.rxbuflock, flags);
list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
if (!tmp_buf->in_process) {
@@ -1185,6 +1194,10 @@ int ath9k_rx_init(struct ath9k_htc_priv *priv)
list_add_tail(&rxbuf->list, &priv->rx.rxbuf);
}
+ /* Allow ath9k_htc_rxep() to operate. */
+ smp_wmb();
+ priv->rx.initialized = true;
+
return 0;
err:
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index fe29ad4b9023..f315c54bd3ac 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -169,6 +169,10 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t)
&wmi->drv_priv->fatal_work);
break;
case WMI_TXSTATUS_EVENTID:
+ /* Check if ath9k_tx_init() completed. */
+ if (!data_race(priv->tx.initialized))
+ break;
+
spin_lock_bh(&priv->tx.tx_lock);
if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) {
spin_unlock_bh(&priv->tx.tx_lock);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 5691bd6eb82c..d0caf1de2bde 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -154,11 +154,52 @@ static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
seqno << IEEE80211_SEQ_SEQ_SHIFT);
}
+static bool ath_merge_ratetbl(struct ieee80211_sta *sta, struct ath_buf *bf,
+ struct ieee80211_tx_info *tx_info)
+{
+ struct ieee80211_sta_rates *ratetbl;
+ u8 i;
+
+ if (!sta)
+ return false;
+
+ ratetbl = rcu_dereference(sta->rates);
+ if (!ratetbl)
+ return false;
+
+ if (tx_info->control.rates[0].idx < 0 ||
+ tx_info->control.rates[0].count == 0)
+ {
+ i = 0;
+ } else {
+ bf->rates[0] = tx_info->control.rates[0];
+ i = 1;
+ }
+
+ for ( ; i < IEEE80211_TX_MAX_RATES; i++) {
+ bf->rates[i].idx = ratetbl->rate[i].idx;
+ bf->rates[i].flags = ratetbl->rate[i].flags;
+ if (tx_info->control.use_rts)
+ bf->rates[i].count = ratetbl->rate[i].count_rts;
+ else if (tx_info->control.use_cts_prot)
+ bf->rates[i].count = ratetbl->rate[i].count_cts;
+ else
+ bf->rates[i].count = ratetbl->rate[i].count;
+ }
+
+ return true;
+}
+
static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ath_buf *bf)
{
- ieee80211_get_tx_rates(vif, sta, bf->bf_mpdu, bf->rates,
- ARRAY_SIZE(bf->rates));
+ struct ieee80211_tx_info *tx_info;
+
+ tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
+
+ if (!ath_merge_ratetbl(sta, bf, tx_info))
+ ieee80211_get_tx_rates(vif, sta, bf->bf_mpdu, bf->rates,
+ ARRAY_SIZE(bf->rates));
}
static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index cca3b086aa70..49f7ee1c912b 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -307,8 +307,7 @@ static void carl9170_zap_queues(struct ar9170 *ar)
for (i = 0; i < ar->hw->queues; i++)
ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
- for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
- ar->mem_bitmap[i] = 0;
+ bitmap_zero(ar->mem_bitmap, ar->fw.mem_blocks);
rcu_read_lock();
list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
@@ -1968,9 +1967,7 @@ int carl9170_register(struct ar9170 *ar)
if (WARN_ON(ar->mem_bitmap))
return -EINVAL;
- ar->mem_bitmap = kcalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG),
- sizeof(unsigned long),
- GFP_KERNEL);
+ ar->mem_bitmap = bitmap_zalloc(ar->fw.mem_blocks, GFP_KERNEL);
if (!ar->mem_bitmap)
return -ENOMEM;
@@ -2085,7 +2082,7 @@ void carl9170_free(struct ar9170 *ar)
kfree_skb(ar->rx_failover);
ar->rx_failover = NULL;
- kfree(ar->mem_bitmap);
+ bitmap_free(ar->mem_bitmap);
ar->mem_bitmap = NULL;
kfree(ar->survey);
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 88444fe6d1c6..1b76f4434c06 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -275,12 +275,12 @@ static void carl9170_tx_release(struct kref *ref)
if (WARN_ON_ONCE(!ar))
return;
- BUILD_BUG_ON(
- offsetof(struct ieee80211_tx_info, status.ack_signal) != 20);
-
- memset(&txinfo->status.ack_signal, 0,
- sizeof(struct ieee80211_tx_info) -
- offsetof(struct ieee80211_tx_info, status.ack_signal));
+ /*
+ * This does not call ieee80211_tx_info_clear_status() because
+ * carl9170_tx_fill_rateinfo() has filled the rate information
+ * before we get to this point.
+ */
+ memset_after(&txinfo->status, 0, rates);
if (atomic_read(&ar->tx_total_queued))
ar->tx_schedule = true;
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index 8d5a16b558e6..774419c7f442 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -126,6 +126,7 @@ enum CountryCode {
CTRY_KOREA_ROC = 410,
CTRY_KOREA_ROC2 = 411,
CTRY_KOREA_ROC3 = 412,
+ CTRY_KOREA_ROC4 = 413,
CTRY_KUWAIT = 414,
CTRY_LATVIA = 428,
CTRY_LEBANON = 422,
diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h
index c4bd26e65949..cdb1e9a23734 100644
--- a/drivers/net/wireless/ath/regd_common.h
+++ b/drivers/net/wireless/ath/regd_common.h
@@ -76,6 +76,7 @@ enum EnumRd {
APL7_FCCA = 0x5C,
APL8_WORLD = 0x5D,
APL9_WORLD = 0x5E,
+ APL10_WORLD = 0x5F,
WOR0_WORLD = 0x60,
WOR1_WORLD = 0x61,
@@ -204,6 +205,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = {
{APL6_WORLD, CTL_ETSI, CTL_ETSI},
{APL8_WORLD, CTL_ETSI, CTL_ETSI},
{APL9_WORLD, CTL_ETSI, CTL_ETSI},
+ {APL10_WORLD, CTL_ETSI, CTL_ETSI},
{APL3_FCCA, CTL_FCC, CTL_FCC},
{APL7_FCCA, CTL_FCC, CTL_FCC},
@@ -426,6 +428,7 @@ static struct country_code_to_enum_rd allCountries[] = {
{CTRY_KOREA_ROC, APL9_WORLD, "KR"},
{CTRY_KOREA_ROC2, APL2_WORLD, "K2"},
{CTRY_KOREA_ROC3, APL9_WORLD, "K3"},
+ {CTRY_KOREA_ROC4, APL10_WORLD, "K4"},
{CTRY_KUWAIT, ETSI3_WORLD, "KW"},
{CTRY_LATVIA, ETSI1_WORLD, "LV"},
{CTRY_LEBANON, NULL1_WORLD, "LB"},
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index aff04ef66266..4e9e13941c8f 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -272,6 +272,21 @@ static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
return 0;
}
+static void wcn36xx_dxe_disable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
+{
+ int reg_data = 0;
+
+ wcn36xx_dxe_read_register(wcn,
+ WCN36XX_DXE_INT_MASK_REG,
+ &reg_data);
+
+ reg_data &= ~wcn_ch;
+
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_INT_MASK_REG,
+ (int)reg_data);
+}
+
static int wcn36xx_dxe_fill_skb(struct device *dev,
struct wcn36xx_dxe_ctl *ctl,
gfp_t gfp)
@@ -834,6 +849,53 @@ unlock:
return ret;
}
+static bool _wcn36xx_dxe_tx_channel_is_empty(struct wcn36xx_dxe_ch *ch)
+{
+ unsigned long flags;
+ struct wcn36xx_dxe_ctl *ctl_bd_start, *ctl_skb_start;
+ struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
+ bool ret = true;
+
+ spin_lock_irqsave(&ch->lock, flags);
+
+ /* Loop through ring buffer looking for nonempty entries. */
+ ctl_bd_start = ch->head_blk_ctl;
+ ctl_bd = ctl_bd_start;
+ ctl_skb_start = ctl_bd_start->next;
+ ctl_skb = ctl_skb_start;
+ do {
+ if (ctl_skb->skb) {
+ ret = false;
+ goto unlock;
+ }
+ ctl_bd = ctl_skb->next;
+ ctl_skb = ctl_bd->next;
+ } while (ctl_skb != ctl_skb_start);
+
+unlock:
+ spin_unlock_irqrestore(&ch->lock, flags);
+ return ret;
+}
+
+int wcn36xx_dxe_tx_flush(struct wcn36xx *wcn)
+{
+ int i = 0;
+
+ /* Called with mac80211 queues stopped. Wait for empty HW queues. */
+ do {
+ if (_wcn36xx_dxe_tx_channel_is_empty(&wcn->dxe_tx_l_ch) &&
+ _wcn36xx_dxe_tx_channel_is_empty(&wcn->dxe_tx_h_ch)) {
+ return 0;
+ }
+ /* This ieee80211_ops callback is specifically allowed to
+ * sleep.
+ */
+ usleep_range(1000, 1100);
+ } while (++i < 100);
+
+ return -EBUSY;
+}
+
int wcn36xx_dxe_init(struct wcn36xx *wcn)
{
int reg_data = 0, ret;
@@ -869,7 +931,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
WCN36XX_DXE_WQ_TX_L);
wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
- wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
/***************************************/
/* Init descriptors for TX HIGH channel */
@@ -893,9 +954,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
- /* Enable channel interrupts */
- wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
-
/***************************************/
/* Init descriptors for RX LOW channel */
/***************************************/
@@ -905,7 +963,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
goto out_err_rxl_ch;
}
-
/* For RX we need to preallocated buffers */
wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
@@ -928,9 +985,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
WCN36XX_DXE_REG_CTL_RX_L,
WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
- /* Enable channel interrupts */
- wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
-
/***************************************/
/* Init descriptors for RX HIGH channel */
/***************************************/
@@ -962,15 +1016,18 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
WCN36XX_DXE_REG_CTL_RX_H,
WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
- /* Enable channel interrupts */
- wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
-
ret = wcn36xx_dxe_request_irqs(wcn);
if (ret < 0)
goto out_err_irq;
timer_setup(&wcn->tx_ack_timer, wcn36xx_dxe_tx_timer, 0);
+ /* Enable channel interrupts */
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
+
return 0;
out_err_irq:
@@ -987,6 +1044,14 @@ out_err_txh_ch:
void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
{
+ int reg_data = 0;
+
+ /* Disable channel interrupts */
+ wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
+ wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
+ wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
+ wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
+
free_irq(wcn->tx_irq, wcn);
free_irq(wcn->rx_irq, wcn);
del_timer(&wcn->tx_ack_timer);
@@ -996,6 +1061,15 @@ void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
wcn->tx_ack_skb = NULL;
}
+ /* Put the DXE block into reset before freeing memory */
+ reg_data = WCN36XX_DXE_REG_RESET;
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
+
wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
+
+ wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
+ wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
+ wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
+ wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
}
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h
index 31b81b7547a3..26a31edf52e9 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.h
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.h
@@ -466,5 +466,6 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
struct wcn36xx_tx_bd *bd,
struct sk_buff *skb,
bool is_low);
+int wcn36xx_dxe_tx_flush(struct wcn36xx *wcn);
void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status);
#endif /* _DXE_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index 9bea2b01f9aa..2a1db9756fd5 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -3459,9 +3459,6 @@ struct wcn36xx_hal_missed_beacon_ind_msg {
/* Beacon Filtering data structures */
-/* The above structure would be followed by multiple of below mentioned
- * structure
- */
struct beacon_filter_ie {
u8 element_id;
u8 check_ie_presence;
@@ -3469,7 +3466,27 @@ struct beacon_filter_ie {
u8 value;
u8 bitmask;
u8 ref;
-};
+} __packed;
+
+#define WCN36XX_FILTER_CAPABILITY_MASK 0x73cf
+#define WCN36XX_FILTER_IE_DS_CHANNEL_MASK 0x00
+#define WCN36XX_FILTER_IE_ERP_FILTER_MASK 0xF8
+#define WCN36XX_FILTER_IE_EDCA_FILTER_MASK 0xF0
+#define WCN36XX_FILTER_IE_QOS_FILTER_MASK 0xF0
+#define WCN36XX_FILTER_IE_CHANNEL_SWITCH_MASK 0x00
+#define WCN36XX_FILTER_IE_HT_BYTE0_FILTER_MASK 0x00
+#define WCN36XX_FILTER_IE_HT_BYTE1_FILTER_MASK 0xF8
+#define WCN36XX_FILTER_IE_HT_BYTE2_FILTER_MASK 0xEB
+#define WCN36XX_FILTER_IE_HT_BYTE5_FILTER_MASK 0xFD
+#define WCN36XX_FILTER_IE_PWR_CONSTRAINT_MASK 0x00
+#define WCN36XX_FILTER_IE_OPMODE_NOTIF_MASK 0x00
+#define WCN36XX_FILTER_IE_VHTOP_CHWIDTH_MASK 0xFC
+#define WCN36XX_FILTER_IE_RSN_MASK 0x00
+#define WCN36XX_FILTER_IE_VENDOR_MASK 0x00
+
+/* The above structure would be followed by multiple of below mentioned
+ * structure
+ */
struct wcn36xx_hal_add_bcn_filter_req_msg {
struct wcn36xx_hal_msg_header header;
@@ -3480,14 +3497,14 @@ struct wcn36xx_hal_add_bcn_filter_req_msg {
u16 ie_num;
u8 bss_index;
u8 reserved;
-};
+} __packed;
struct wcn36xx_hal_rem_bcn_filter_req {
struct wcn36xx_hal_msg_header header;
u8 ie_Count;
u8 rem_ie_id[1];
-};
+} __packed;
#define WCN36XX_HAL_IPV4_ARP_REPLY_OFFLOAD 0
#define WCN36XX_HAL_IPV6_NEIGHBOR_DISCOVERY_OFFLOAD 1
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index b04533bbc3a4..9575d7373bf2 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -402,6 +402,7 @@ static void wcn36xx_change_opchannel(struct wcn36xx *wcn, int ch)
static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
{
struct wcn36xx *wcn = hw->priv;
+ int ret;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed);
@@ -417,17 +418,31 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
* want to receive/transmit regular data packets, then
* simply stop the scan session and exit PS mode.
*/
- wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN,
- wcn->sw_scan_vif);
- wcn->sw_scan_channel = 0;
+ if (wcn->sw_scan_channel)
+ wcn36xx_smd_end_scan(wcn, wcn->sw_scan_channel);
+ if (wcn->sw_scan_init) {
+ wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN,
+ wcn->sw_scan_vif);
+ }
} else if (wcn->sw_scan) {
/* A scan is ongoing, do not change the operating
* channel, but start a scan session on the channel.
*/
- wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN,
- wcn->sw_scan_vif);
+ if (wcn->sw_scan_channel)
+ wcn36xx_smd_end_scan(wcn, wcn->sw_scan_channel);
+ if (!wcn->sw_scan_init) {
+ /* This can fail if we are unable to notify the
+ * operating channel.
+ */
+ ret = wcn36xx_smd_init_scan(wcn,
+ HAL_SYS_MODE_SCAN,
+ wcn->sw_scan_vif);
+ if (ret) {
+ mutex_unlock(&wcn->conf_mutex);
+ return -EIO;
+ }
+ }
wcn36xx_smd_start_scan(wcn, ch);
- wcn->sw_scan_channel = ch;
} else {
wcn36xx_change_opchannel(wcn, ch);
}
@@ -707,6 +722,8 @@ static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw,
struct wcn36xx *wcn = hw->priv;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "sw_scan_start");
+
wcn->sw_scan = true;
wcn->sw_scan_vif = vif;
wcn->sw_scan_channel = 0;
@@ -721,8 +738,15 @@ static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw,
{
struct wcn36xx *wcn = hw->priv;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "sw_scan_complete");
+
/* ensure that any scan session is finished */
- wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN, wcn->sw_scan_vif);
+ if (wcn->sw_scan_channel)
+ wcn36xx_smd_end_scan(wcn, wcn->sw_scan_channel);
+ if (wcn->sw_scan_init) {
+ wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN,
+ wcn->sw_scan_vif);
+ }
wcn->sw_scan = false;
wcn->sw_scan_opchannel = 0;
}
@@ -910,6 +934,8 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
* place where AID is available.
*/
wcn36xx_smd_config_sta(wcn, vif, sta);
+ if (vif->type == NL80211_IFTYPE_STATION)
+ wcn36xx_smd_add_beacon_filter(wcn, vif);
wcn36xx_enable_keep_alive_null_packet(wcn, vif);
} else {
wcn36xx_dbg(WCN36XX_DBG_MAC,
@@ -1196,7 +1222,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
u16 tid = params->tid;
u16 *ssn = &params->ssn;
int ret = 0;
- u8 session;
+ int session;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
action, tid);
@@ -1208,9 +1234,11 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
sta_priv->tid = tid;
session = wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 0,
get_sta_index(vif, sta_priv));
+ if (session < 0) {
+ ret = session;
+ goto out;
+ }
wcn36xx_smd_add_ba(wcn, session);
- wcn36xx_smd_trigger_ba(wcn, get_sta_index(vif, sta_priv), tid,
- session);
break;
case IEEE80211_AMPDU_RX_STOP:
wcn36xx_smd_del_ba(wcn, tid, 0, get_sta_index(vif, sta_priv));
@@ -1220,6 +1248,18 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_START;
spin_unlock_bh(&sta_priv->ampdu_lock);
+ /* Replace the mac80211 ssn with the firmware one */
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu ssn = %u\n", *ssn);
+ wcn36xx_smd_trigger_ba(wcn, get_sta_index(vif, sta_priv), tid, ssn);
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu fw-ssn = %u\n", *ssn);
+
+ /* Start BA session */
+ session = wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 1,
+ get_sta_index(vif, sta_priv));
+ if (session < 0) {
+ ret = session;
+ goto out;
+ }
ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
@@ -1227,8 +1267,6 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_OPERATIONAL;
spin_unlock_bh(&sta_priv->ampdu_lock);
- wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 1,
- get_sta_index(vif, sta_priv));
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
@@ -1244,6 +1282,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
wcn36xx_err("Unknown AMPDU action\n");
}
+out:
mutex_unlock(&wcn->conf_mutex);
return ret;
@@ -1277,6 +1316,16 @@ static void wcn36xx_ipv6_addr_change(struct ieee80211_hw *hw,
}
#endif
+static void wcn36xx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ if (wcn36xx_dxe_tx_flush(wcn)) {
+ wcn36xx_err("Failed to flush hardware tx queues\n");
+ }
+}
+
static const struct ieee80211_ops wcn36xx_ops = {
.start = wcn36xx_start,
.stop = wcn36xx_stop,
@@ -1304,6 +1353,7 @@ static const struct ieee80211_ops wcn36xx_ops = {
#if IS_ENABLED(CONFIG_IPV6)
.ipv6_addr_change = wcn36xx_ipv6_addr_change,
#endif
+ .flush = wcn36xx_flush,
CFG80211_TESTMODE_CMD(wcn36xx_tm_cmd)
};
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index ed45e2cf039b..caeb68901326 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -722,6 +722,7 @@ int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode,
wcn36xx_err("hal_init_scan response failed err=%d\n", ret);
goto out;
}
+ wcn->sw_scan_init = true;
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
@@ -752,6 +753,7 @@ int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel)
wcn36xx_err("hal_start_scan response failed err=%d\n", ret);
goto out;
}
+ wcn->sw_scan_channel = scan_channel;
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
@@ -782,6 +784,7 @@ int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel)
wcn36xx_err("hal_end_scan response failed err=%d\n", ret);
goto out;
}
+ wcn->sw_scan_channel = 0;
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
@@ -823,6 +826,7 @@ int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
wcn36xx_err("hal_finish_scan response failed err=%d\n", ret);
goto out;
}
+ wcn->sw_scan_init = false;
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
@@ -940,7 +944,7 @@ int wcn36xx_smd_update_channel_list(struct wcn36xx *wcn, struct cfg80211_scan_re
INIT_HAL_MSG((*msg_body), WCN36XX_HAL_UPDATE_CHANNEL_LIST_REQ);
- msg_body->num_channel = min_t(u8, req->n_channels, sizeof(msg_body->channels));
+ msg_body->num_channel = min_t(u8, req->n_channels, ARRAY_SIZE(msg_body->channels));
for (i = 0; i < msg_body->num_channel; i++) {
struct wcn36xx_hal_channel_param *param = &msg_body->channels[i];
u32 min_power = WCN36XX_HAL_DEFAULT_MIN_POWER;
@@ -2557,6 +2561,7 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
&session_id);
if (ret) {
wcn36xx_err("hal_add_ba_session response failed err=%d\n", ret);
+ ret = -EINVAL;
goto out;
}
@@ -2622,27 +2627,43 @@ out:
return ret;
}
-static int wcn36xx_smd_trigger_ba_rsp(void *buf, int len)
+static int wcn36xx_smd_trigger_ba_rsp(void *buf, int len, struct add_ba_info *ba_info)
{
+ struct wcn36xx_hal_trigger_ba_rsp_candidate *candidate;
struct wcn36xx_hal_trigger_ba_rsp_msg *rsp;
+ int i;
if (len < sizeof(*rsp))
return -EINVAL;
rsp = (struct wcn36xx_hal_trigger_ba_rsp_msg *) buf;
+
+ if (rsp->candidate_cnt < 1)
+ return rsp->status ? rsp->status : -EINVAL;
+
+ candidate = (struct wcn36xx_hal_trigger_ba_rsp_candidate *)(buf + sizeof(*rsp));
+
+ for (i = 0; i < STACFG_MAX_TC; i++) {
+ ba_info[i] = candidate->ba_info[i];
+ }
+
return rsp->status;
}
-int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u8 session_id)
+int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u16 *ssn)
{
struct wcn36xx_hal_trigger_ba_req_msg msg_body;
struct wcn36xx_hal_trigger_ba_req_candidate *candidate;
+ struct add_ba_info ba_info[STACFG_MAX_TC];
int ret;
+ if (tid >= STACFG_MAX_TC)
+ return -EINVAL;
+
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_TRIGGER_BA_REQ);
- msg_body.session_id = session_id;
+ msg_body.session_id = 0; /* not really used */
msg_body.candidate_cnt = 1;
msg_body.header.len += sizeof(*candidate);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -2657,13 +2678,17 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u8 sessio
wcn36xx_err("Sending hal_trigger_ba failed\n");
goto out;
}
- ret = wcn36xx_smd_trigger_ba_rsp(wcn->hal_buf, wcn->hal_rsp_len);
+ ret = wcn36xx_smd_trigger_ba_rsp(wcn->hal_buf, wcn->hal_rsp_len, ba_info);
if (ret) {
wcn36xx_err("hal_trigger_ba response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
+
+ if (ssn)
+ *ssn = ba_info[tid].starting_seq_num;
+
return ret;
}
@@ -2732,7 +2757,7 @@ static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
tmp->bss_index);
vif = wcn36xx_priv_to_vif(tmp);
- ieee80211_connection_loss(vif);
+ ieee80211_beacon_loss(vif);
}
return 0;
}
@@ -2747,7 +2772,7 @@ static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
rsp->bss_index);
vif = wcn36xx_priv_to_vif(tmp);
- ieee80211_connection_loss(vif);
+ ieee80211_beacon_loss(vif);
return 0;
}
}
@@ -3168,6 +3193,91 @@ out:
return ret;
}
+#define BEACON_FILTER(eid, presence, offs, val, mask, ref_val) \
+ { \
+ .element_id = eid, \
+ .check_ie_presence = presence, \
+ .offset = offs, \
+ .value = val, \
+ .bitmask = mask, \
+ .ref = ref_val, \
+ }
+
+static const struct beacon_filter_ie bcn_filter_ies[] = {
+ BEACON_FILTER(WLAN_EID_DS_PARAMS, 0, 0, 0,
+ WCN36XX_FILTER_IE_DS_CHANNEL_MASK, 0),
+ BEACON_FILTER(WLAN_EID_ERP_INFO, 0, 0, 0,
+ WCN36XX_FILTER_IE_ERP_FILTER_MASK, 0),
+ BEACON_FILTER(WLAN_EID_EDCA_PARAM_SET, 0, 0, 0,
+ WCN36XX_FILTER_IE_EDCA_FILTER_MASK, 0),
+ BEACON_FILTER(WLAN_EID_QOS_CAPA, 0, 0, 0,
+ WCN36XX_FILTER_IE_QOS_FILTER_MASK, 0),
+ BEACON_FILTER(WLAN_EID_CHANNEL_SWITCH, 1, 0, 0,
+ WCN36XX_FILTER_IE_CHANNEL_SWITCH_MASK, 0),
+ BEACON_FILTER(WLAN_EID_HT_OPERATION, 0, 0, 0,
+ WCN36XX_FILTER_IE_HT_BYTE0_FILTER_MASK, 0),
+ BEACON_FILTER(WLAN_EID_HT_OPERATION, 0, 2, 0,
+ WCN36XX_FILTER_IE_HT_BYTE2_FILTER_MASK, 0),
+ BEACON_FILTER(WLAN_EID_HT_OPERATION, 0, 5, 0,
+ WCN36XX_FILTER_IE_HT_BYTE5_FILTER_MASK, 0),
+ BEACON_FILTER(WLAN_EID_PWR_CONSTRAINT, 0, 0, 0,
+ WCN36XX_FILTER_IE_PWR_CONSTRAINT_MASK, 0),
+ BEACON_FILTER(WLAN_EID_OPMODE_NOTIF, 0, 0, 0,
+ WCN36XX_FILTER_IE_OPMODE_NOTIF_MASK, 0),
+ BEACON_FILTER(WLAN_EID_VHT_OPERATION, 0, 0, 0,
+ WCN36XX_FILTER_IE_VHTOP_CHWIDTH_MASK, 0),
+ BEACON_FILTER(WLAN_EID_RSN, 1, 0, 0,
+ WCN36XX_FILTER_IE_RSN_MASK, 0),
+ BEACON_FILTER(WLAN_EID_VENDOR_SPECIFIC, 1, 0, 0,
+ WCN36XX_FILTER_IE_VENDOR_MASK, 0),
+};
+
+int wcn36xx_smd_add_beacon_filter(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx_hal_add_bcn_filter_req_msg msg_body, *body;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ u8 *payload;
+ size_t payload_size;
+ int ret;
+
+ if (!get_feat_caps(wcn->fw_feat_caps, BCN_FILTER))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BCN_FILTER_REQ);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ body = (struct wcn36xx_hal_add_bcn_filter_req_msg *)wcn->hal_buf;
+ body->capability_info = vif->bss_conf.assoc_capability;
+ body->capability_mask = WCN36XX_FILTER_CAPABILITY_MASK;
+ body->beacon_interval = vif->bss_conf.beacon_int;
+ body->ie_num = ARRAY_SIZE(bcn_filter_ies);
+ body->bss_index = vif_priv->bss_index;
+
+ payload = ((u8 *)body) + body->header.len;
+ payload_size = sizeof(bcn_filter_ies);
+ memcpy(payload, &bcn_filter_ies, payload_size);
+
+ body->header.len += payload_size;
+
+ ret = wcn36xx_smd_send_and_wait(wcn, body->header.len);
+ if (ret) {
+ wcn36xx_err("Sending add bcn_filter failed\n");
+ goto out;
+ }
+
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("add bcn filter response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
void *buf, int len, void *priv, u32 addr)
{
@@ -3223,6 +3333,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
case WCN36XX_HAL_ENTER_IMPS_RSP:
case WCN36XX_HAL_EXIT_IMPS_RSP:
case WCN36XX_HAL_UPDATE_CHANNEL_LIST_RSP:
+ case WCN36XX_HAL_ADD_BCN_FILTER_RSP:
memcpy(wcn->hal_buf, buf, len);
wcn->hal_rsp_len = len;
complete(&wcn->hal_rsp_compl);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index 88e045dad8f3..957cfa87fbde 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -137,7 +137,7 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
u8 sta_index);
int wcn36xx_smd_add_ba(struct wcn36xx *wcn, u8 session_id);
int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 direction, u8 sta_index);
-int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u8 session_id);
+int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u16 *ssn);
int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
@@ -167,4 +167,7 @@ int wcn36xx_smd_host_resume(struct wcn36xx *wcn);
int wcn36xx_smd_enter_imps(struct wcn36xx *wcn);
int wcn36xx_smd_exit_imps(struct wcn36xx *wcn);
+int wcn36xx_smd_add_beacon_filter(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif);
+
#endif /* _SMD_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index 75951ccbc840..dd58dde8c836 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -272,7 +272,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
const struct wcn36xx_rate *rate;
struct ieee80211_hdr *hdr;
struct wcn36xx_rx_bd *bd;
- struct ieee80211_supported_band *sband;
u16 fc, sn;
/*
@@ -314,8 +313,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
fc = __le16_to_cpu(hdr->frame_control);
sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
- status.freq = WCN36XX_CENTER_FREQ(wcn);
- status.band = WCN36XX_BAND(wcn);
status.mactime = 10;
status.signal = -get_rssi0(bd);
status.antenna = 1;
@@ -327,18 +324,36 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag);
+ if (bd->scan_learn) {
+ /* If packet originate from hardware scanning, extract the
+ * band/channel from bd descriptor.
+ */
+ u8 hwch = (bd->reserved0 << 4) + bd->rx_ch;
+
+ if (bd->rf_band != 1 && hwch <= sizeof(ab_rx_ch_map) && hwch >= 1) {
+ status.band = NL80211_BAND_5GHZ;
+ status.freq = ieee80211_channel_to_frequency(ab_rx_ch_map[hwch - 1],
+ status.band);
+ } else {
+ status.band = NL80211_BAND_2GHZ;
+ status.freq = ieee80211_channel_to_frequency(hwch, status.band);
+ }
+ } else {
+ status.band = WCN36XX_BAND(wcn);
+ status.freq = WCN36XX_CENTER_FREQ(wcn);
+ }
+
if (bd->rate_id < ARRAY_SIZE(wcn36xx_rate_table)) {
rate = &wcn36xx_rate_table[bd->rate_id];
status.encoding = rate->encoding;
status.enc_flags = rate->encoding_flags;
status.bw = rate->bw;
status.rate_idx = rate->mcs_or_legacy_index;
- sband = wcn->hw->wiphy->bands[status.band];
status.nss = 1;
if (status.band == NL80211_BAND_5GHZ &&
status.encoding == RX_ENC_LEGACY &&
- status.rate_idx >= sband->n_bitrates) {
+ status.rate_idx >= 4) {
/* no dsss rates in 5Ghz rates table */
status.rate_idx -= 4;
}
@@ -353,22 +368,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
ieee80211_is_probe_resp(hdr->frame_control))
status.boottime_ns = ktime_get_boottime_ns();
- if (bd->scan_learn) {
- /* If packet originates from hardware scanning, extract the
- * band/channel from bd descriptor.
- */
- u8 hwch = (bd->reserved0 << 4) + bd->rx_ch;
-
- if (bd->rf_band != 1 && hwch <= sizeof(ab_rx_ch_map) && hwch >= 1) {
- status.band = NL80211_BAND_5GHZ;
- status.freq = ieee80211_channel_to_frequency(ab_rx_ch_map[hwch - 1],
- status.band);
- } else {
- status.band = NL80211_BAND_2GHZ;
- status.freq = ieee80211_channel_to_frequency(hwch, status.band);
- }
- }
-
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
if (ieee80211_is_beacon(hdr->frame_control)) {
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 1c8d918137da..fbd0558c2c19 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -248,6 +248,7 @@ struct wcn36xx {
struct cfg80211_scan_request *scan_req;
bool sw_scan;
u8 sw_scan_opchannel;
+ bool sw_scan_init;
u8 sw_scan_channel;
struct ieee80211_vif *sw_scan_vif;
struct mutex scan_lock;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index fb727778312c..ba52318615ae 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -3901,6 +3901,24 @@ static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg,
cfg->wowl.active = true;
}
+static int brcmf_keepalive_start(struct brcmf_if *ifp, unsigned int interval)
+{
+ struct brcmf_mkeep_alive_pkt_le kalive = {0};
+ int ret = 0;
+
+ /* Configure Null function/data keepalive */
+ kalive.version = cpu_to_le16(1);
+ kalive.period_msec = cpu_to_le32(interval * MSEC_PER_SEC);
+ kalive.len_bytes = cpu_to_le16(0);
+ kalive.keep_alive_id = 0;
+
+ ret = brcmf_fil_iovar_data_set(ifp, "mkeep_alive", &kalive, sizeof(kalive));
+ if (ret)
+ brcmf_err("keep-alive packet config failed, ret=%d\n", ret);
+
+ return ret;
+}
+
static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
struct cfg80211_wowlan *wowl)
{
@@ -3947,6 +3965,9 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
} else {
/* Configure WOWL paramaters */
brcmf_configure_wowl(cfg, ifp, wowl);
+
+ /* Prevent disassociation due to inactivity with keep-alive */
+ brcmf_keepalive_start(ifp, 30);
}
exit:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index ff2ef557f0ea..e69d1e56996f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -1052,4 +1052,23 @@ struct brcmf_gscan_config {
struct brcmf_gscan_bucket_config bucket[1];
};
+/**
+ * struct brcmf_mkeep_alive_pkt_le - configuration data for keep-alive frame.
+ *
+ * @version: version for mkeep_alive
+ * @length: length of fixed parameters in the structure.
+ * @period_msec: keep-alive period in milliseconds.
+ * @len_bytes: size of the data.
+ * @keep_alive_id: ID (0 - 3).
+ * @data: keep-alive frame data.
+ */
+struct brcmf_mkeep_alive_pkt_le {
+ __le16 version;
+ __le16 length;
+ __le32 period_msec;
+ __le16 len_bytes;
+ u8 keep_alive_id;
+ u8 data[0];
+} __packed;
+
#endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c
index 2f3c451148db..2f8908074303 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c
@@ -4,6 +4,8 @@
*/
#include <asm/unaligned.h>
+
+#include <linux/math.h>
#include <linux/string.h>
#include <linux/bug.h>
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 45594f003ef7..452d08545d31 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -4672,7 +4672,7 @@ static ssize_t proc_write(struct file *file,
static int proc_status_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *apriv = dev->ml_priv;
CapabilityRid cap_rid;
StatusRid status_rid;
@@ -4756,7 +4756,7 @@ static int proc_stats_rid_open(struct inode *inode,
u16 rid)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *apriv = dev->ml_priv;
StatsRid stats;
int i, j;
@@ -4819,7 +4819,7 @@ static inline int sniffing_mode(struct airo_info *ai)
static void proc_config_on_close(struct inode *inode, struct file *file)
{
struct proc_data *data = file->private_data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
char *line;
@@ -5030,7 +5030,7 @@ static const char *get_rmode(__le16 mode)
static int proc_config_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
int i;
__le16 mode;
@@ -5120,7 +5120,7 @@ static int proc_config_open(struct inode *inode, struct file *file)
static void proc_SSID_on_close(struct inode *inode, struct file *file)
{
struct proc_data *data = file->private_data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
SsidRid SSID_rid;
int i;
@@ -5156,7 +5156,7 @@ static void proc_SSID_on_close(struct inode *inode, struct file *file)
static void proc_APList_on_close(struct inode *inode, struct file *file)
{
struct proc_data *data = file->private_data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
APListRid *APList_rid = &ai->APList;
int i;
@@ -5280,7 +5280,7 @@ static int set_wep_tx_idx(struct airo_info *ai, u16 index, int perm, int lock)
static void proc_wepkey_on_close(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
int i, rc;
char key[16];
@@ -5331,7 +5331,7 @@ static void proc_wepkey_on_close(struct inode *inode, struct file *file)
static int proc_wepkey_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
char *ptr;
WepKeyRid wkr;
@@ -5379,7 +5379,7 @@ static int proc_wepkey_open(struct inode *inode, struct file *file)
static int proc_SSID_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
int i;
char *ptr;
@@ -5423,7 +5423,7 @@ static int proc_SSID_open(struct inode *inode, struct file *file)
static int proc_APList_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
int i;
char *ptr;
@@ -5462,7 +5462,7 @@ static int proc_APList_open(struct inode *inode, struct file *file)
static int proc_BSSList_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
char *ptr;
BSSListRid BSSList_rid;
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 23037bfc9e4c..5727c7c00a28 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -2303,7 +2303,7 @@ static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
ssid);
}
-static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
+static int ipw_send_adapter_address(struct ipw_priv *priv, const u8 * mac)
{
if (!priv || !mac) {
IPW_ERROR("Invalid args\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 418ae4f870ab..c21c0c68849a 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -2,6 +2,7 @@
config IWLWIFI
tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) "
depends on PCI && HAS_IOMEM && CFG80211
+ depends on IWLMEI || !IWLMEI
select FW_LOADER
help
Select to build the driver supporting the:
@@ -146,3 +147,28 @@ config IWLWIFI_DEVICE_TRACING
endmenu
endif
+
+config IWLMEI
+ tristate "Intel Management Engine communication over WLAN"
+ depends on INTEL_MEI
+ depends on PM
+ help
+ Enables the iwlmei kernel module.
+
+ CSME stands for Converged Security and Management Engine. It is a CPU
+ on the chipset and runs a dedicated firmware. AMT (Active Management
+ Technology) is one of the applications that run on that CPU. AMT
+ allows to control the platform remotely.
+
+ This kernel module allows to communicate with the Intel Management
+ Engine over Wifi. This is supported starting from Tiger Lake
+ platforms and has been tested on 9260 devices only.
+ If AMT is configured not to use the wireless device, this module is
+ harmless (and useless).
+ Enabling this option on a platform that has a different device and
+ has Wireless enabled on AMT can prevent WiFi from working correctly.
+
+ For more information see
+ <https://software.intel.com/en-us/manageability/>
+
+ If unsure, say N.
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 0d4656efe908..75a703eb1bdf 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -30,5 +30,6 @@ ccflags-y += -I$(src)
obj-$(CONFIG_IWLDVM) += dvm/
obj-$(CONFIG_IWLMVM) += mvm/
+obj-$(CONFIG_IWLMEI) += mei/
CFLAGS_iwl-devtrace.o := -I$(src)
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 1572097bccf1..330ef04ca51a 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -9,7 +9,7 @@
#include "iwl-prph.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 67
+#define IWL_22000_UCODE_API_MAX 69
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@@ -54,7 +54,13 @@
#define IWL_BZ_A_GF4_A_FW_PRE "iwlwifi-bz-a0-gf4-a0-"
#define IWL_BZ_A_MR_A_FW_PRE "iwlwifi-bz-a0-mr-a0-"
#define IWL_BZ_A_FM_A_FW_PRE "iwlwifi-bz-a0-fm-a0-"
-#define IWL_GL_A_FM_A_FW_PRE "iwlwifi-gl-a0-fm7-a0-"
+#define IWL_GL_A_FM_A_FW_PRE "iwlwifi-gl-a0-fm-a0-"
+#define IWL_BZ_Z_GF_A_FW_PRE "iwlwifi-bz-z0-gf-a0-"
+#define IWL_BNJ_A_FM_A_FW_PRE "iwlwifi-BzBnj-a0-fm-a0-"
+#define IWL_BNJ_A_FM4_A_FW_PRE "iwlwifi-BzBnj-a0-fm4-a0-"
+#define IWL_BNJ_A_GF_A_FW_PRE "iwlwifi-BzBnj-a0-gf-a0-"
+#define IWL_BNJ_A_GF4_A_FW_PRE "iwlwifi-BzBnj-a0-gf4-a0-"
+#define IWL_BNJ_A_HR_B_FW_PRE "iwlwifi-BzBnj-a0-hr-b0-"
#define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \
@@ -113,6 +119,18 @@
IWL_BZ_A_FM_A_FW_PRE __stringify(api) ".ucode"
#define IWL_GL_A_FM_A_MODULE_FIRMWARE(api) \
IWL_GL_A_FM_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_BZ_Z_GF_A_MODULE_FIRMWARE(api) \
+ IWL_BZ_Z_GF_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_BNJ_A_FM_A_MODULE_FIRMWARE(api) \
+ IWL_BNJ_A_FM_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(api) \
+ IWL_BNJ_A_FM4_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_BNJ_A_GF_A_MODULE_FIRMWARE(api) \
+ IWL_BNJ_A_GF_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_BNJ_A_GF4_A_MODULE_FIRMWARE(api) \
+ IWL_BNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_BNJ_A_HR_B_MODULE_FIRMWARE(api) \
+ IWL_BNJ_A_HR_B_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -234,7 +252,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.dccm2_len = IWL_22000_DCCM2_LEN, \
.smem_offset = IWL_22000_SMEM_OFFSET, \
.smem_len = IWL_22000_SMEM_LEN, \
- .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
+ .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM, \
.apmg_not_supported = true, \
.trans.mq_rx_supported = true, \
.vht_mu_mimo_supported = true, \
@@ -626,7 +644,7 @@ const struct iwl_cfg iwl_ax200_cfg_cc = {
};
const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
- .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
+ .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201NGW)",
.fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -639,7 +657,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
};
const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
- .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
+ .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201D2W)",
.fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -652,7 +670,7 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
};
const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = {
- .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
+ .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201NGW)",
.fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -665,7 +683,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = {
};
const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = {
- .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
+ .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201D2W)",
.fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -696,13 +714,6 @@ const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0 = {
.num_rbds = IWL_NUM_RBDS_NON_HE,
};
-const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = {
- .name = "Intel(R) Wi-Fi 6 AX210 160MHz",
- .fw_name_pre = IWL_SO_A_HR_B_FW_PRE,
- IWL_DEVICE_AX210,
- .num_rbds = IWL_NUM_RBDS_AX210_HE,
-};
-
const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
.name = iwl_ax211_name,
.fw_name_pre = IWL_SO_A_GF_A_FW_PRE,
@@ -879,6 +890,47 @@ const struct iwl_cfg iwl_cfg_gl_a0_fm_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+const struct iwl_cfg iwl_cfg_bz_z0_gf_a0 = {
+ .fw_name_pre = IWL_BZ_Z_GF_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_BZ,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwl_cfg_bnj_a0_fm_a0 = {
+ .fw_name_pre = IWL_BNJ_A_FM_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_BZ,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwl_cfg_bnj_a0_fm4_a0 = {
+ .fw_name_pre = IWL_BNJ_A_FM4_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_BZ,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwl_cfg_bnj_a0_gf_a0 = {
+ .fw_name_pre = IWL_BNJ_A_GF_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_BZ,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwl_cfg_bnj_a0_gf4_a0 = {
+ .fw_name_pre = IWL_BNJ_A_GF4_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_BZ,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0 = {
+ .fw_name_pre = IWL_BNJ_A_HR_B_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_BZ,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
@@ -907,3 +959,8 @@ MODULE_FIRMWARE(IWL_BZ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BZ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BZ_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_GL_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BNJ_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BNJ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index fbd57a2b2bd5..90b9becd1673 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1974,12 +1974,8 @@ static void iwl_nic_config(struct iwl_op_mode *op_mode)
/* SKU Control */
iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
- CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP,
- (CSR_HW_REV_STEP(priv->trans->hw_rev) <<
- CSR_HW_IF_CONFIG_REG_POS_MAC_STEP) |
- (CSR_HW_REV_DASH(priv->trans->hw_rev) <<
- CSR_HW_IF_CONFIG_REG_POS_MAC_DASH));
+ CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH,
+ CSR_HW_REV_STEP_DASH(priv->trans->hw_rev));
/* write radio config values to register */
if (priv->nvm_data->radio_cfg_type <= EEPROM_RF_CONFIG_TYPE_MAX) {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index bf431fa4fe81..790c96df58cb 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -242,17 +242,16 @@ found:
IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg_range);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- __le32 *block_list_array,
- int *block_list_size)
+ struct iwl_tas_config_cmd_v3 *cmd)
{
union acpi_object *wifi_pkg, *data;
- int ret, tbl_rev, i;
- bool enabled;
+ int ret, tbl_rev, i, block_list_size, enabled;
data = iwl_acpi_get_object(fwrt->dev, ACPI_WTAS_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
+ /* try to read wtas table revision 1 or revision 0*/
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WTAS_WIFI_DATA_SIZE,
&tbl_rev);
@@ -261,40 +260,54 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
goto out_free;
}
- if (wifi_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
- tbl_rev != 0) {
+ if (tbl_rev == 1 && wifi_pkg->package.elements[1].type ==
+ ACPI_TYPE_INTEGER) {
+ u32 tas_selection =
+ (u32)wifi_pkg->package.elements[1].integer.value;
+ u16 override_iec =
+ (tas_selection & ACPI_WTAS_OVERRIDE_IEC_MSK) >> ACPI_WTAS_OVERRIDE_IEC_POS;
+ u16 enabled_iec = (tas_selection & ACPI_WTAS_ENABLE_IEC_MSK) >>
+ ACPI_WTAS_ENABLE_IEC_POS;
+
+ enabled = tas_selection & ACPI_WTAS_ENABLED_MSK;
+ cmd->override_tas_iec = cpu_to_le16(override_iec);
+ cmd->enable_tas_iec = cpu_to_le16(enabled_iec);
+
+ } else if (tbl_rev == 0 &&
+ wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) {
+ enabled = !!wifi_pkg->package.elements[1].integer.value;
+ } else {
ret = -EINVAL;
goto out_free;
}
- enabled = !!wifi_pkg->package.elements[1].integer.value;
-
if (!enabled) {
- *block_list_size = -1;
IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n");
ret = 0;
goto out_free;
}
+ IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n", tbl_rev);
if (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER ||
wifi_pkg->package.elements[2].integer.value >
APCI_WTAS_BLACK_LIST_MAX) {
IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n",
- wifi_pkg->package.elements[1].integer.value);
+ wifi_pkg->package.elements[2].integer.value);
ret = -EINVAL;
goto out_free;
}
- *block_list_size = wifi_pkg->package.elements[2].integer.value;
+ block_list_size = wifi_pkg->package.elements[2].integer.value;
+ cmd->block_list_size = cpu_to_le32(block_list_size);
- IWL_DEBUG_RADIO(fwrt, "TAS array size %d\n", *block_list_size);
- if (*block_list_size > APCI_WTAS_BLACK_LIST_MAX) {
+ IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", block_list_size);
+ if (block_list_size > APCI_WTAS_BLACK_LIST_MAX) {
IWL_DEBUG_RADIO(fwrt, "TAS invalid array size value %u\n",
- *block_list_size);
+ block_list_size);
ret = -EINVAL;
goto out_free;
}
- for (i = 0; i < *block_list_size; i++) {
+ for (i = 0; i < block_list_size; i++) {
u32 country;
if (wifi_pkg->package.elements[3 + i].type !=
@@ -306,11 +319,11 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
}
country = wifi_pkg->package.elements[3 + i].integer.value;
- block_list_array[i] = cpu_to_le32(country);
+ cmd->block_list_array[i] = cpu_to_le32(country);
IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country);
}
- ret = 0;
+ ret = 1;
out_free:
kfree(data);
return ret;
@@ -789,7 +802,7 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
* looking up in ACPI
*/
if (wifi_pkg->package.count !=
- min_size + profile_size * num_profiles) {
+ hdr_size + profile_size * num_profiles) {
ret = -EINVAL;
goto out_free;
}
@@ -852,6 +865,8 @@ read_table:
}
}
+ fwrt->geo_num_profiles = num_profiles;
+ fwrt->geo_enabled = true;
ret = 0;
out_free:
kfree(data);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 4aaa8a6b071b..22b3c665f91a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -65,10 +65,19 @@
#define ACPI_ECKV_WIFI_DATA_SIZE 2
/*
- * 1 type, 1 enabled, 1 block list size, 16 block list array
+ * TAS size: 1 elelment for type,
+ * 1 element for enabled field,
+ * 1 element for block list size,
+ * 16 elements for block list array
*/
#define APCI_WTAS_BLACK_LIST_MAX 16
#define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX)
+#define ACPI_WTAS_ENABLED_MSK 0x1
+#define ACPI_WTAS_OVERRIDE_IEC_MSK 0x2
+#define ACPI_WTAS_ENABLE_IEC_MSK 0x4
+#define ACPI_WTAS_OVERRIDE_IEC_POS 0x1
+#define ACPI_WTAS_ENABLE_IEC_POS 0x2
+
#define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \
IWL_NUM_SUB_BANDS_V1) + 2)
@@ -105,6 +114,11 @@ struct iwl_geo_profile {
struct iwl_geo_profile_band bands[ACPI_GEO_NUM_BANDS_REV2];
};
+/* Same thing as with SAR, all revisions fit in revision 2 */
+struct iwl_ppag_chain {
+ s8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2];
+};
+
enum iwl_dsm_funcs_rev_0 {
DSM_FUNC_QUERY = 0,
DSM_FUNC_DISABLE_SRD = 1,
@@ -198,8 +212,8 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
struct iwl_per_chain_offset *table,
u32 n_bands, u32 n_profiles);
-int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, __le32 *block_list_array,
- int *block_list_size);
+int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_config_cmd_v3 *cmd);
__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
@@ -280,8 +294,7 @@ static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
}
static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- __le32 *block_list_array,
- int *block_list_size)
+ struct iwl_tas_config_cmd_v3 *cmd)
{
return -ENOENT;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
index c840a97e6a62..e00ab21e7358 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018, 2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2020-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -97,6 +97,21 @@ struct iwl_alive_ntf_v5 {
struct iwl_sku_id sku_id;
} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_5 */
+struct iwl_imr_alive_info {
+ __le64 base_addr;
+ __le32 size;
+ __le32 enabled;
+} __packed; /* IMR_ALIVE_INFO_API_S_VER_1 */
+
+struct iwl_alive_ntf_v6 {
+ __le16 status;
+ __le16 flags;
+ struct iwl_lmac_alive lmac_data[2];
+ struct iwl_umac_alive umac_data;
+ struct iwl_sku_id sku_id;
+ struct iwl_imr_alive_info imr;
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_6 */
+
/**
* enum iwl_extended_cfg_flag - commands driver may send before
* finishing init flow
@@ -143,15 +158,6 @@ enum iwl_card_state_flags {
};
/**
- * struct iwl_radio_version_notif - information on the card state
- * ( CARD_STATE_NOTIFICATION = 0xa1 )
- * @flags: &enum iwl_card_state_flags
- */
-struct iwl_card_state_notif {
- __le32 flags;
-} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
-
-/**
* enum iwl_error_recovery_flags - flags for error recovery cmd
* @ERROR_RECOVERY_UPDATE_DB: update db from blob sent
* @ERROR_RECOVERY_END_OF_RECOVERY: end of recovery
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index ee6b5844a871..0703e41403a6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -51,7 +51,7 @@ enum iwl_legacy_cmds {
* @UCODE_ALIVE_NTFY:
* Alive data from the firmware, as described in
* &struct iwl_alive_ntf_v3 or &struct iwl_alive_ntf_v4 or
- * &struct iwl_alive_ntf_v5.
+ * &struct iwl_alive_ntf_v5 or &struct iwl_alive_ntf_v6.
*/
UCODE_ALIVE_NTFY = 0x1,
@@ -72,7 +72,8 @@ enum iwl_legacy_cmds {
/**
* @PHY_CONTEXT_CMD:
- * Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd.
+ * Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd
+ * or &struct iwl_phy_context_cmd_v1.
*/
PHY_CONTEXT_CMD = 0x8,
@@ -90,7 +91,8 @@ enum iwl_legacy_cmds {
/**
* @SCAN_CFG_CMD:
- * uses &struct iwl_scan_config_v1 or &struct iwl_scan_config
+ * uses &struct iwl_scan_config_v1, &struct iwl_scan_config_v2
+ * or &struct iwl_scan_config
*/
SCAN_CFG_CMD = 0xc,
@@ -356,7 +358,7 @@ enum iwl_legacy_cmds {
* &struct iwl_notif_statistics_v11,
* &struct iwl_notif_statistics_v10,
* &struct iwl_notif_statistics,
- * &struct iwl_statistics_operational_ntfy
+ * &struct iwl_statistics_operational_ntfy_ver_14
*/
STATISTICS_CMD = 0x9c,
@@ -365,6 +367,7 @@ enum iwl_legacy_cmds {
* one of &struct iwl_notif_statistics_v10,
* &struct iwl_notif_statistics_v11,
* &struct iwl_notif_statistic,
+ * &struct iwl_statistics_operational_ntfy_ver_14
* &struct iwl_statistics_operational_ntfy
*/
STATISTICS_NOTIFICATION = 0x9d,
@@ -383,13 +386,6 @@ enum iwl_legacy_cmds {
REDUCE_TX_POWER_CMD = 0x9f,
/**
- * @CARD_STATE_NOTIFICATION:
- * Card state (RF/CT kill) notification,
- * uses &struct iwl_card_state_notif
- */
- CARD_STATE_NOTIFICATION = 0xa1,
-
- /**
* @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif
*/
MISSED_BEACONS_NOTIFICATION = 0xa2,
@@ -612,6 +608,11 @@ enum iwl_system_subcmd_ids {
* @RFI_GET_FREQ_TABLE_CMD: &struct iwl_rfi_config_cmd
*/
RFI_GET_FREQ_TABLE_CMD = 0xc,
+
+ /**
+ * @SYSTEM_FEATURES_CONTROL_CMD: &struct iwl_system_features_control_cmd
+ */
+ SYSTEM_FEATURES_CONTROL_CMD = 0xd,
};
#endif /* __iwl_fw_api_commands_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index 1503119ea910..4cd9ab23954e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -554,7 +554,7 @@ struct iwl_wowlan_gtk_status_v1 {
} __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */
/**
- * struct iwl_wowlan_gtk_status - GTK status
+ * struct iwl_wowlan_gtk_status_v2 - GTK status
* @key: GTK material
* @key_len: GTK legth, if set to 0, the key is not available
* @key_flags: information about the key:
@@ -565,7 +565,7 @@ struct iwl_wowlan_gtk_status_v1 {
* @tkip_mic_key: TKIP RX MIC key
* @rsc: TSC RSC counters
*/
-struct iwl_wowlan_gtk_status {
+struct iwl_wowlan_gtk_status_v2 {
u8 key[WOWLAN_KEY_MAX_SIZE];
u8 key_len;
u8 key_flags;
@@ -574,6 +574,41 @@ struct iwl_wowlan_gtk_status {
struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 rsc;
} __packed; /* WOWLAN_GTK_MATERIAL_VER_2 */
+/**
+ * struct iwl_wowlan_all_rsc_tsc_v5 - key counters
+ * @ucast_rsc: unicast RSC values
+ * @mcast_rsc: multicast RSC values (per key map value)
+ * @sta_id: station ID
+ * @mcast_key_id_map: map of key id to @mcast_rsc entry
+ */
+struct iwl_wowlan_all_rsc_tsc_v5 {
+ __le64 ucast_rsc[IWL_MAX_TID_COUNT];
+ __le64 mcast_rsc[2][IWL_MAX_TID_COUNT];
+ __le32 sta_id;
+ u8 mcast_key_id_map[4];
+} __packed; /* ALL_TSC_RSC_API_S_VER_5 */
+
+/**
+ * struct iwl_wowlan_gtk_status_v3 - GTK status
+ * @key: GTK material
+ * @key_len: GTK length, if set to 0, the key is not available
+ * @key_flags: information about the key:
+ * bits[0:1]: key index assigned by the AP
+ * bits[2:6]: GTK index of the key in the internal DB
+ * bit[7]: Set iff this is the currently used GTK
+ * @reserved: padding
+ * @tkip_mic_key: TKIP RX MIC key
+ * @sc: RSC/TSC counters
+ */
+struct iwl_wowlan_gtk_status_v3 {
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ u8 key_len;
+ u8 key_flags;
+ u8 reserved[2];
+ u8 tkip_mic_key[IWL_MIC_KEY_SIZE];
+ struct iwl_wowlan_all_rsc_tsc_v5 sc;
+} __packed; /* WOWLAN_GTK_MATERIAL_VER_3 */
+
#define IWL_WOWLAN_GTK_IDX_MASK (BIT(0) | BIT(1))
/**
@@ -640,7 +675,7 @@ struct iwl_wowlan_status_v6 {
* @wake_packet: wakeup packet
*/
struct iwl_wowlan_status_v7 {
- struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_wowlan_gtk_status_v2 gtk[WOWLAN_GTK_KEYS_NUM];
struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
__le64 replay_ctr;
__le16 pattern_number;
@@ -676,7 +711,7 @@ struct iwl_wowlan_status_v7 {
* @wake_packet: wakeup packet
*/
struct iwl_wowlan_status_v9 {
- struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_wowlan_gtk_status_v2 gtk[WOWLAN_GTK_KEYS_NUM];
struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
__le64 replay_ctr;
__le16 pattern_number;
@@ -693,6 +728,44 @@ struct iwl_wowlan_status_v9 {
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
} __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_9 */
+/**
+ * struct iwl_wowlan_status_v12 - WoWLAN status
+ * @gtk: GTK data
+ * @igtk: IGTK data
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched pattern
+ * @non_qos_seq_ctr: non-QoS sequence counter to use next.
+ * Reserved if the struct has version >= 10.
+ * @qos_seq_ctr: QoS sequence counters to use next
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @num_of_gtk_rekeys: number of GTK rekeys
+ * @transmitted_ndps: number of transmitted neighbor discovery packets
+ * @received_beacons: number of received beacons
+ * @wake_packet_length: wakeup packet length
+ * @wake_packet_bufsize: wakeup packet buffer size
+ * @tid_tear_down: bit mask of tids whose BA sessions were closed
+ * in suspend state
+ * @reserved: unused
+ * @wake_packet: wakeup packet
+ */
+struct iwl_wowlan_status_v12 {
+ struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 non_qos_seq_ctr;
+ __le16 qos_seq_ctr[8];
+ __le32 wakeup_reasons;
+ __le32 num_of_gtk_rekeys;
+ __le32 transmitted_ndps;
+ __le32 received_beacons;
+ __le32 wake_packet_length;
+ __le32 wake_packet_bufsize;
+ u8 tid_tear_down;
+ u8 reserved[3];
+ u8 wake_packet[]; /* can be truncated from _length to _bufsize */
+} __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_12 */
+
/* TODO: NetDetect API */
#endif /* __iwl_fw_api_d3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 985b0dc5b52a..89236f42c5a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -32,6 +32,11 @@ enum iwl_data_path_subcmd_ids {
STA_HE_CTXT_CMD = 0x7,
/**
+ * @RLC_CONFIG_CMD: &struct iwl_rlc_config_cmd
+ */
+ RLC_CONFIG_CMD = 0x8,
+
+ /**
* @RFH_QUEUE_CONFIG_CMD: &struct iwl_rfh_queue_config
*/
RFH_QUEUE_CONFIG_CMD = 0xD,
@@ -195,4 +200,61 @@ struct iwl_thermal_dual_chain_request {
__le32 event;
} __packed; /* THERMAL_DUAL_CHAIN_DISABLE_REQ_NTFY_API_S_VER_1 */
+enum iwl_rlc_chain_info {
+ IWL_RLC_CHAIN_INFO_DRIVER_FORCE = BIT(0),
+ IWL_RLC_CHAIN_INFO_VALID = 0x000e,
+ IWL_RLC_CHAIN_INFO_FORCE = 0x0070,
+ IWL_RLC_CHAIN_INFO_FORCE_MIMO = 0x0380,
+ IWL_RLC_CHAIN_INFO_COUNT = 0x0c00,
+ IWL_RLC_CHAIN_INFO_MIMO_COUNT = 0x3000,
+};
+
+/**
+ * struct iwl_rlc_properties - RLC properties
+ * @rx_chain_info: RX chain info, &enum iwl_rlc_chain_info
+ * @reserved: reserved
+ */
+struct iwl_rlc_properties {
+ __le32 rx_chain_info;
+ __le32 reserved;
+} __packed; /* RLC_PROPERTIES_S_VER_1 */
+
+enum iwl_sad_mode {
+ IWL_SAD_MODE_ENABLED = BIT(0),
+ IWL_SAD_MODE_DEFAULT_ANT_MSK = 0x6,
+ IWL_SAD_MODE_DEFAULT_ANT_FW = 0x0,
+ IWL_SAD_MODE_DEFAULT_ANT_A = 0x2,
+ IWL_SAD_MODE_DEFAULT_ANT_B = 0x4,
+};
+
+/**
+ * struct iwl_sad_properties - SAD properties
+ * @chain_a_sad_mode: chain A SAD mode, &enum iwl_sad_mode
+ * @chain_b_sad_mode: chain B SAD mode, &enum iwl_sad_mode
+ * @mac_id: MAC index
+ * @reserved: reserved
+ */
+struct iwl_sad_properties {
+ __le32 chain_a_sad_mode;
+ __le32 chain_b_sad_mode;
+ __le32 mac_id;
+ __le32 reserved;
+} __packed;
+
+/**
+ * struct iwl_rlc_config_cmd - RLC configuration
+ * @phy_id: PHY index
+ * @rlc: RLC properties, &struct iwl_rlc_properties
+ * @sad: SAD (single antenna diversity) options, &struct iwl_sad_properties
+ * @flags: flags, &enum iwl_rlc_flags
+ * @reserved: reserved
+ */
+struct iwl_rlc_config_cmd {
+ __le32 phy_id;
+ struct iwl_rlc_properties rlc;
+ struct iwl_sad_properties sad;
+ u8 flags;
+ u8 reserved[3];
+} __packed; /* RLC_CONFIG_CMD_API_S_VER_2 */
+
#endif /* __iwl_fw_api_datapath_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index 3988f5fea33a..456b7eaac570 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -7,7 +7,6 @@
#include <linux/bitops.h>
-#define IWL_FW_INI_HW_SMEM_REGION_ID 15
#define IWL_FW_INI_MAX_REGION_ID 64
#define IWL_FW_INI_MAX_NAME 32
#define IWL_FW_INI_MAX_CFG_NAME 64
@@ -124,6 +123,9 @@ struct iwl_fw_ini_region_internal_buffer {
* @hdr: debug header
* @id: region id. Max id is &IWL_FW_INI_MAX_REGION_ID
* @type: region type. One of &enum iwl_fw_ini_region_type
+ * @sub_type: region sub type
+ * @sub_type_ver: region sub type version
+ * @reserved: not in use
* @name: region name
* @dev_addr: device address configuration. Used by
* &IWL_FW_INI_REGION_DEVICE_MEMORY, &IWL_FW_INI_REGION_PERIPHERY_MAC,
@@ -146,7 +148,10 @@ struct iwl_fw_ini_region_internal_buffer {
struct iwl_fw_ini_region_tlv {
struct iwl_fw_ini_header hdr;
__le32 id;
- __le32 type;
+ u8 type;
+ u8 sub_type;
+ u8 sub_type_ver;
+ u8 reserved;
u8 name[IWL_FW_INI_MAX_NAME];
union {
struct iwl_fw_ini_region_dev_addr dev_addr;
@@ -306,6 +311,7 @@ enum iwl_fw_ini_config_set_type {
* @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration
* @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration
* @IWL_FW_INI_ALLOCATION_ID_DBGC3: allocation meant for DBGC3 configuration
+ * @IWL_FW_INI_ALLOCATION_ID_DBGC4: allocation meant for DBGC4 configuration
* @IWL_FW_INI_ALLOCATION_NUM: number of allocation ids
*/
enum iwl_fw_ini_allocation_id {
@@ -313,6 +319,7 @@ enum iwl_fw_ini_allocation_id {
IWL_FW_INI_ALLOCATION_ID_DBGC1,
IWL_FW_INI_ALLOCATION_ID_DBGC2,
IWL_FW_INI_ALLOCATION_ID_DBGC3,
+ IWL_FW_INI_ALLOCATION_ID_DBGC4,
IWL_FW_INI_ALLOCATION_NUM,
}; /* FW_DEBUG_TLV_ALLOCATION_ID_E_VER_1 */
@@ -379,6 +386,17 @@ enum iwl_fw_ini_region_type {
IWL_FW_INI_REGION_NUM
}; /* FW_TLV_DEBUG_REGION_TYPE_API_E */
+enum iwl_fw_ini_region_device_memory_subtype {
+ IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_HW_SMEM = 1,
+ IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_UMAC_ERROR_TABLE = 5,
+ IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_1_ERROR_TABLE = 7,
+ IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_2_ERROR_TABLE = 10,
+ IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_1_ERROR_TABLE = 14,
+ IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_2_ERROR_TABLE = 16,
+ IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_1_ERROR_TABLE = 18,
+ IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_2_ERROR_TABLE = 20,
+}; /* FW_TLV_DEBUG_REGION_DEVICE_MEMORY_SUBTYPE_API_E */
+
/**
* enum iwl_fw_ini_time_point
*
@@ -465,4 +483,17 @@ enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = BIT(9),
IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = BIT(10),
};
+
+/**
+ * enum iwl_fw_ini_trigger_reset_fw_policy - Determines how to handle reset
+ *
+ * @IWL_FW_INI_RESET_FW_MODE_NOTHING: do not stop FW and reload (default)
+ * @IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY: stop FW without reload FW
+ * @IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW: stop FW with reload FW
+ */
+enum iwl_fw_ini_trigger_reset_fw_policy {
+ IWL_FW_INI_RESET_FW_MODE_NOTHING = 0,
+ IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY,
+ IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW
+};
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 3551a3f1c1aa..4949fcf85257 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -34,6 +34,11 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
TAS_CONFIG = 0x3,
/**
+ * @SAR_OFFSET_MAPPING_TABLE_CMD: &iwl_sar_offset_mapping_cmd
+ */
+ SAR_OFFSET_MAPPING_TABLE_CMD = 0x4,
+
+ /**
* @PNVM_INIT_COMPLETE_NTFY: &struct iwl_pnvm_init_complete_ntfy
*/
PNVM_INIT_COMPLETE_NTFY = 0xFE,
@@ -388,18 +393,33 @@ enum iwl_mcc_source {
MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11,
};
-#define IWL_TAS_BLACK_LIST_MAX 16
+#define IWL_TAS_BLOCK_LIST_MAX 16
/**
- * struct iwl_tas_config_cmd - configures the TAS
+ * struct iwl_tas_config_cmd_v2 - configures the TAS
* @block_list_size: size of relevant field in block_list_array
- * @block_list_array: block list countries (without TAS)
+ * @block_list_array: list of countries where TAS must be disabled
*/
-struct iwl_tas_config_cmd {
+struct iwl_tas_config_cmd_v2 {
__le32 block_list_size;
- __le32 block_list_array[IWL_TAS_BLACK_LIST_MAX];
+ __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
} __packed; /* TAS_CONFIG_CMD_API_S_VER_2 */
/**
+ * struct iwl_tas_config_cmd_v3 - configures the TAS
+ * @block_list_size: size of relevant field in block_list_array
+ * @block_list_array: list of countries where TAS must be disabled
+ * @override_tas_iec: indicates whether to override default value of IEC regulatory
+ * @enable_tas_iec: in case override_tas_iec is set -
+ * indicates whether IEC regulatory is enabled or disabled
+ */
+struct iwl_tas_config_cmd_v3 {
+ __le32 block_list_size;
+ __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
+ __le16 override_tas_iec;
+ __le16 enable_tas_iec;
+} __packed; /* TAS_CONFIG_CMD_API_S_VER_3 */
+
+/**
* enum iwl_lari_configs - bit masks for the various LARI config operations
* @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine
* @LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK: ETSI 5.8GHz SRD passive scan
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
index 68b788b92b7a..e66f77924f83 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018, 2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2020-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -150,11 +150,12 @@ struct iwl_phy_context_cmd {
/* COMMON_INDEX_HDR_API_S_VER_1 */
__le32 id_and_color;
__le32 action;
- /* PHY_CONTEXT_DATA_API_S_VER_3 */
+ /* PHY_CONTEXT_DATA_API_S_VER_3, PHY_CONTEXT_DATA_API_S_VER_4 */
struct iwl_fw_channel_info ci;
__le32 lmac_id;
- __le32 rxchain_info;
+ __le32 rxchain_info; /* reserved in _VER_4 */
__le32 dsp_cfg_flags;
__le32 reserved;
-} __packed; /* PHY_CONTEXT_CMD_API_VER_3 */
+} __packed; /* PHY_CONTEXT_CMD_API_VER_3, PHY_CONTEXT_CMD_API_VER_4 */
+
#endif /* __iwl_fw_api_phy_ctxt_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 4d671c878bb7..81318208f2f6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -419,7 +419,7 @@ struct iwl_geo_tx_power_profiles_cmd_v1 {
* struct iwl_geo_tx_power_profile_cmd_v2 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
- * @table_revision: BIOS table revision.
+ * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
*/
struct iwl_geo_tx_power_profiles_cmd_v2 {
__le32 ops;
@@ -431,7 +431,7 @@ struct iwl_geo_tx_power_profiles_cmd_v2 {
* struct iwl_geo_tx_power_profile_cmd_v3 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
- * @table_revision: BIOS table revision.
+ * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
*/
struct iwl_geo_tx_power_profiles_cmd_v3 {
__le32 ops;
@@ -443,7 +443,7 @@ struct iwl_geo_tx_power_profiles_cmd_v3 {
* struct iwl_geo_tx_power_profile_cmd_v4 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
- * @table_revision: BIOS table revision.
+ * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
*/
struct iwl_geo_tx_power_profiles_cmd_v4 {
__le32 ops;
@@ -455,7 +455,7 @@ struct iwl_geo_tx_power_profiles_cmd_v4 {
* struct iwl_geo_tx_power_profile_cmd_v5 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
- * @table_revision: BIOS table revision.
+ * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
*/
struct iwl_geo_tx_power_profiles_cmd_v5 {
__le32 ops;
@@ -503,6 +503,20 @@ union iwl_ppag_table_cmd {
} v2;
} __packed;
+#define MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE 26
+#define MCC_TO_SAR_OFFSET_TABLE_COL_SIZE 13
+
+/**
+ * struct iwl_sar_offset_mapping_cmd - struct for SAR_OFFSET_MAPPING_TABLE_CMD
+ * @offset_map: mapping a mcc to a geo sar group
+ * @reserved: reserved
+ */
+struct iwl_sar_offset_mapping_cmd {
+ u8 offset_map[MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE]
+ [MCC_TO_SAR_OFFSET_TABLE_COL_SIZE];
+ u16 reserved;
+} __packed; /*SAR_OFFSET_MAPPING_TABLE_CMD_API_S*/
+
/**
* struct iwl_beacon_filter_cmd
* REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index a09081d7ed45..173a6991587b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -116,9 +116,20 @@ enum IWL_TLC_MNG_NSS {
IWL_TLC_NSS_MAX
};
-enum IWL_TLC_HT_BW_RATES {
- IWL_TLC_HT_BW_NONE_160,
- IWL_TLC_HT_BW_160,
+/**
+ * enum IWL_TLC_MCS_PER_BW - mcs index per BW
+ * @IWL_TLC_MCS_PER_BW_80: mcs for bw - 20Hhz, 40Hhz, 80Hhz
+ * @IWL_TLC_MCS_PER_BW_160: mcs for bw - 160Mhz
+ * @IWL_TLC_MCS_PER_BW_320: mcs for bw - 320Mhz
+ * @IWL_TLC_MCS_PER_BW_NUM_V3: number of entries up to version 3
+ * @IWL_TLC_MCS_PER_BW_NUM_V4: number of entries from version 4
+ */
+enum IWL_TLC_MCS_PER_BW {
+ IWL_TLC_MCS_PER_BW_80,
+ IWL_TLC_MCS_PER_BW_160,
+ IWL_TLC_MCS_PER_BW_320,
+ IWL_TLC_MCS_PER_BW_NUM_V3 = IWL_TLC_MCS_PER_BW_160 + 1,
+ IWL_TLC_MCS_PER_BW_NUM_V4 = IWL_TLC_MCS_PER_BW_320 + 1,
};
/**
@@ -131,8 +142,8 @@ enum IWL_TLC_HT_BW_RATES {
* @amsdu: TX amsdu is supported
* @flags: bitmask of &enum iwl_tlc_mng_cfg_flags
* @non_ht_rates: bitmap of supported legacy rates
- * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per <nss, channel-width>
- * pair (0 - 80mhz width and below, 1 - 160mhz).
+ * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per &enum IWL_TLC_MCS_PER_BW
+ * <nss, channel-width> pair (0 - 80mhz width and below, 1 - 160mhz).
* @max_mpdu_len: max MPDU length, in bytes
* @sgi_ch_width_supp: bitmap of SGI support per channel width
* use BIT(@enum iwl_tlc_mng_cfg_cw)
@@ -140,7 +151,7 @@ enum IWL_TLC_HT_BW_RATES {
* @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI),
* set zero for no limit.
*/
-struct iwl_tlc_config_cmd {
+struct iwl_tlc_config_cmd_v3 {
u8 sta_id;
u8 reserved1[3];
u8 max_ch_width;
@@ -149,7 +160,7 @@ struct iwl_tlc_config_cmd {
u8 amsdu;
__le16 flags;
__le16 non_ht_rates;
- __le16 ht_rates[IWL_TLC_NSS_MAX][2];
+ __le16 ht_rates[IWL_TLC_NSS_MAX][IWL_TLC_MCS_PER_BW_NUM_V3];
__le16 max_mpdu_len;
u8 sgi_ch_width_supp;
u8 reserved2;
@@ -157,6 +168,37 @@ struct iwl_tlc_config_cmd {
} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_3 */
/**
+ * struct tlc_config_cmd - TLC configuration
+ * @sta_id: station id
+ * @reserved1: reserved
+ * @max_ch_width: max supported channel width from &enum iwl_tlc_mng_cfg_cw
+ * @mode: &enum iwl_tlc_mng_cfg_mode
+ * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains
+ * @sgi_ch_width_supp: bitmap of SGI support per channel width
+ * use BIT(&enum iwl_tlc_mng_cfg_cw)
+ * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags
+ * @non_ht_rates: bitmap of supported legacy rates
+ * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per <nss, channel-width>
+ * pair (0 - 80mhz width and below, 1 - 160mhz, 2 - 320mhz).
+ * @max_mpdu_len: max MPDU length, in bytes
+ * @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI),
+ * set zero for no limit.
+ */
+struct iwl_tlc_config_cmd_v4 {
+ u8 sta_id;
+ u8 reserved1[3];
+ u8 max_ch_width;
+ u8 mode;
+ u8 chains;
+ u8 sgi_ch_width_supp;
+ __le16 flags;
+ __le16 non_ht_rates;
+ __le16 ht_rates[IWL_TLC_NSS_MAX][IWL_TLC_MCS_PER_BW_NUM_V4];
+ __le16 max_mpdu_len;
+ __le16 max_tx_op;
+} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_4 */
+
+/**
* enum iwl_tlc_update_flags - updated fields
* @IWL_TLC_NOTIF_FLAG_RATE: last initial rate update
* @IWL_TLC_NOTIF_FLAG_AMSDU: umsdu parameters update
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 8b200379f7c2..5413087ae909 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -82,6 +82,16 @@ enum iwl_scan_offload_band_selection {
IWL_SCAN_OFFLOAD_SELECT_ANY = 0xc,
};
+enum iwl_scan_offload_auth_alg {
+ IWL_AUTH_ALGO_UNSUPPORTED = 0x00,
+ IWL_AUTH_ALGO_NONE = 0x01,
+ IWL_AUTH_ALGO_PSK = 0x02,
+ IWL_AUTH_ALGO_8021X = 0x04,
+ IWL_AUTH_ALGO_SAE = 0x08,
+ IWL_AUTH_ALGO_8021X_SHA384 = 0x10,
+ IWL_AUTH_ALGO_OWE = 0x20,
+};
+
/**
* struct iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S
* @ssid_index: index to ssid list in fixed part
@@ -201,7 +211,7 @@ struct iwl_scan_channel_cfg_lmac {
__le32 iter_interval;
} __packed;
-/*
+/**
* struct iwl_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1
* @offset: offset in the data block
* @len: length of the segment
@@ -211,7 +221,8 @@ struct iwl_scan_probe_segment {
__le16 len;
} __packed;
-/* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_2
+/**
+ * struct iwl_scan_probe_req_v1 - PROBE_REQUEST_FRAME_API_S_VER_2
* @mac_header: first (and common) part of the probe
* @band_data: band specific data
* @common_data: last (and common) part of the probe
@@ -224,7 +235,8 @@ struct iwl_scan_probe_req_v1 {
u8 buf[SCAN_OFFLOAD_PROBE_REQ_SIZE];
} __packed;
-/* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_v2
+/**
+ * struct iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_v2
* @mac_header: first (and common) part of the probe
* @band_data: band specific data
* @common_data: last (and common) part of the probe
@@ -247,7 +259,8 @@ enum iwl_scan_channel_flags {
IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER = BIT(6),
};
-/* struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
+/**
+ * struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
* @flags: enum iwl_scan_channel_flags
* @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
* involved.
@@ -492,7 +505,7 @@ struct iwl_scan_dwell {
} __packed;
/**
- * struct iwl_scan_config_v1
+ * struct iwl_scan_config_v1 - scan configuration command
* @flags: enum scan_config_flags
* @tx_chains: valid_tx antenna - ANT_* definitions
* @rx_chains: valid_rx antenna - ANT_* definitions
@@ -524,6 +537,21 @@ struct iwl_scan_config_v1 {
#define SCAN_LB_LMAC_IDX 0
#define SCAN_HB_LMAC_IDX 1
+/**
+ * struct iwl_scan_config_v2 - scan configuration command
+ * @flags: enum scan_config_flags
+ * @tx_chains: valid_tx antenna - ANT_* definitions
+ * @rx_chains: valid_rx antenna - ANT_* definitions
+ * @legacy_rates: default legacy rates - enum scan_config_rates
+ * @out_of_channel_time: default max out of serving channel time
+ * @suspend_time: default max suspend time
+ * @dwell: dwells for the scan
+ * @mac_addr: default mac address to be used in probes
+ * @bcast_sta_id: the index of the station in the fw
+ * @channel_flags: default channel flags - enum iwl_channel_flags
+ * scan_config_channel_flag
+ * @channel_array: default supported channels
+ */
struct iwl_scan_config_v2 {
__le32 flags;
__le32 tx_chains;
@@ -539,7 +567,7 @@ struct iwl_scan_config_v2 {
} __packed; /* SCAN_CONFIG_DB_CMD_API_S_2 */
/**
- * struct iwl_scan_config
+ * struct iwl_scan_config - scan configuration command
* @enable_cam_mode: whether to enable CAM mode.
* @enable_promiscouos_mode: whether to enable promiscouos mode
* @bcast_sta_id: the index of the station in the fw. Deprecated starting with
@@ -640,6 +668,10 @@ enum iwl_umac_scan_general_flags2 {
* @IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN_FILTER_IN: in case
* &IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN is enabled and scan is
* activated over 6GHz PSC channels, filter in beacons and probe responses.
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE: if set, send probe requests in a minimum
+ * rate of 5.5Mpbs, filter in broadcast probe responses and set the max
+ * channel time indication field in the FILS request parameters element
+ * (if included by the driver in the probe request IEs).
*/
enum iwl_umac_scan_general_flags_v2 {
IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC = BIT(0),
@@ -657,6 +689,20 @@ enum iwl_umac_scan_general_flags_v2 {
IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN = BIT(12),
IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN = BIT(13),
IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN_FILTER_IN = BIT(14),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE = BIT(15),
+};
+
+/**
+ * enum iwl_umac_scan_general_params_flags2 - UMAC scan general flags2
+ *
+ * @IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB: scan event scheduling
+ * should be aware of a P2P GO operation on the 2GHz band.
+ * @IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB: scan event scheduling
+ * should be aware of a P2P GO operation on the 5GHz or 6GHz band.
+ */
+enum iwl_umac_scan_general_params_flags2 {
+ IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB = BIT(0),
+ IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB = BIT(1),
};
/**
@@ -941,8 +987,8 @@ struct iwl_scan_channel_params_v6 {
} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_6 */
/**
- * struct iwl_scan_general_params_v10
- * @flags: &enum iwl_umac_scan_flags
+ * struct iwl_scan_general_params_v11
+ * @flags: &enum iwl_umac_scan_general_flags_v2
* @reserved: reserved for future
* @scan_start_mac_id: report the scan start TSF time according to this mac TSF
* @active_dwell: dwell time for active scan per LMAC
@@ -952,7 +998,8 @@ struct iwl_scan_channel_params_v6 {
* for 5GHz channels
* @adwell_default_social_chn: adaptive dwell default number of
* APs per social channel
- * @reserved1: reserved for future
+ * @flags2: for version 11 see &enum iwl_umac_scan_general_params_flags2.
+ * Otherwise reserved.
* @adwell_max_budget: the maximal number of TUs that adaptive dwell
* can add to the total scan time
* @max_out_of_time: max out of serving channel time, per LMAC
@@ -963,7 +1010,7 @@ struct iwl_scan_channel_params_v6 {
* @num_of_fragments: number of fragments needed for full fragmented
* scan coverage.
*/
-struct iwl_scan_general_params_v10 {
+struct iwl_scan_general_params_v11 {
__le16 flags;
u8 reserved;
u8 scan_start_mac_id;
@@ -971,14 +1018,14 @@ struct iwl_scan_general_params_v10 {
u8 adwell_default_2g;
u8 adwell_default_5g;
u8 adwell_default_social_chn;
- u8 reserved1;
+ u8 flags2;
__le16 adwell_max_budget;
__le32 max_out_of_time[SCAN_TWO_LMACS];
__le32 suspend_time[SCAN_TWO_LMACS];
__le32 scan_priority;
u8 passive_dwell[SCAN_TWO_LMACS];
u8 num_of_fragments[SCAN_TWO_LMACS];
-} __packed; /* SCAN_GENERAL_PARAMS_API_S_VER_10 */
+} __packed; /* SCAN_GENERAL_PARAMS_API_S_VER_11 and *_VER_10 */
/**
* struct iwl_scan_periodic_parms_v1
@@ -994,31 +1041,31 @@ struct iwl_scan_periodic_parms_v1 {
/**
* struct iwl_scan_req_params_v12
- * @general_params: &struct iwl_scan_general_params_v10
+ * @general_params: &struct iwl_scan_general_params_v11
* @channel_params: &struct iwl_scan_channel_params_v4
* @periodic_params: &struct iwl_scan_periodic_parms_v1
* @probe_params: &struct iwl_scan_probe_params_v3
*/
struct iwl_scan_req_params_v12 {
- struct iwl_scan_general_params_v10 general_params;
+ struct iwl_scan_general_params_v11 general_params;
struct iwl_scan_channel_params_v4 channel_params;
struct iwl_scan_periodic_parms_v1 periodic_params;
struct iwl_scan_probe_params_v3 probe_params;
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_12 */
/**
- * struct iwl_scan_req_params_v14
- * @general_params: &struct iwl_scan_general_params_v10
+ * struct iwl_scan_req_params_v15
+ * @general_params: &struct iwl_scan_general_params_v11
* @channel_params: &struct iwl_scan_channel_params_v6
* @periodic_params: &struct iwl_scan_periodic_parms_v1
* @probe_params: &struct iwl_scan_probe_params_v4
*/
-struct iwl_scan_req_params_v14 {
- struct iwl_scan_general_params_v10 general_params;
+struct iwl_scan_req_params_v15 {
+ struct iwl_scan_general_params_v11 general_params;
struct iwl_scan_channel_params_v6 channel_params;
struct iwl_scan_periodic_parms_v1 periodic_params;
struct iwl_scan_probe_params_v4 probe_params;
-} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_14 */
+} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_15 and *_VER_14 */
/**
* struct iwl_scan_req_umac_v12
@@ -1033,16 +1080,16 @@ struct iwl_scan_req_umac_v12 {
} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_12 */
/**
- * struct iwl_scan_req_umac_v14
+ * struct iwl_scan_req_umac_v15
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
* @scan_params: scan parameters
*/
-struct iwl_scan_req_umac_v14 {
+struct iwl_scan_req_umac_v15 {
__le32 uid;
__le32 ooc_priority;
- struct iwl_scan_req_params_v14 scan_params;
-} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_14 */
+ struct iwl_scan_req_params_v15 scan_params;
+} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_15 and *_VER_14 */
/**
* struct iwl_umac_scan_abort
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
index 18cca15caa3a..898e62326e6c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018, 2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2020 - 2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -432,6 +432,7 @@ enum iwl_fw_statistics_type {
FW_STATISTICS_HE,
}; /* FW_STATISTICS_TYPE_API_E_VER_1 */
+#define IWL_STATISTICS_TYPE_MSK 0x7f
/**
* struct iwl_statistics_ntfy_hdr
*
@@ -446,10 +447,97 @@ struct iwl_statistics_ntfy_hdr {
}; /* STATISTICS_NTFY_HDR_API_S_VER_1 */
/**
+ * struct iwl_statistics_ntfy_per_mac
+ *
+ * @beacon_filter_average_energy: Average energy [-dBm] of the 2
+ * antennas.
+ * @air_time: air time
+ * @beacon_counter: all beacons (both filtered and not filtered)
+ * @beacon_average_energy: all beacons (both filtered and not
+ * filtered)
+ * @beacon_rssi_a: beacon RSSI on antenna A
+ * @beacon_rssi_b: beacon RSSI on antenna B
+ * @rx_bytes: RX byte count
+ */
+struct iwl_statistics_ntfy_per_mac {
+ __le32 beacon_filter_average_energy;
+ __le32 air_time;
+ __le32 beacon_counter;
+ __le32 beacon_average_energy;
+ __le32 beacon_rssi_a;
+ __le32 beacon_rssi_b;
+ __le32 rx_bytes;
+} __packed; /* STATISTICS_NTFY_PER_MAC_API_S_VER_1 */
+
+#define IWL_STATS_MAX_BW_INDEX 5
+/** struct iwl_statistics_ntfy_per_phy
+ * @channel_load: channel load
+ * @channel_load_by_us: device contribution to MCLM
+ * @channel_load_not_by_us: other devices' contribution to MCLM
+ * @clt: CLT HW timer (TIM_CH_LOAD2)
+ * @act: active accumulator SW
+ * @elp: elapsed time accumulator SW
+ * @rx_detected_per_ch_width: number of deferred TX per channel width,
+ * 0 - 20, 1/2/3 - 40/80/160
+ * @success_per_ch_width: number of frames that got ACK/BACK/CTS
+ * per channel BW. note, BACK counted as 1
+ * @fail_per_ch_width: number of frames that didn't get ACK/BACK/CTS
+ * per channel BW. note BACK counted as 1
+ * @last_tx_ch_width_indx: last txed frame channel width index
+ */
+struct iwl_statistics_ntfy_per_phy {
+ __le32 channel_load;
+ __le32 channel_load_by_us;
+ __le32 channel_load_not_by_us;
+ __le32 clt;
+ __le32 act;
+ __le32 elp;
+ __le32 rx_detected_per_ch_width[IWL_STATS_MAX_BW_INDEX];
+ __le32 success_per_ch_width[IWL_STATS_MAX_BW_INDEX];
+ __le32 fail_per_ch_width[IWL_STATS_MAX_BW_INDEX];
+ __le32 last_tx_ch_width_indx;
+} __packed; /* STATISTICS_NTFY_PER_PHY_API_S_VER_1 */
+
+/**
+ * struct iwl_statistics_ntfy_per_sta
+ *
+ * @average_energy: in fact it is minus the energy..
+ */
+struct iwl_statistics_ntfy_per_sta {
+ __le32 average_energy;
+} __packed; /* STATISTICS_NTFY_PER_STA_API_S_VER_1 */
+
+#define IWL_STATS_MAX_PHY_OPERTINAL 3
+/**
* struct iwl_statistics_operational_ntfy
*
* @hdr: general statistics header
* @flags: bitmap of possible notification structures
+ * @per_mac_stats: per mac statistics, &struct iwl_statistics_ntfy_per_mac
+ * @per_phy_stats: per phy statistics, &struct iwl_statistics_ntfy_per_phy
+ * @per_sta_stats: per sta statistics, &struct iwl_statistics_ntfy_per_sta
+ * @rx_time: rx time
+ * @tx_time: usec the radio is transmitting.
+ * @on_time_rf: The total time in usec the RF is awake.
+ * @on_time_scan: usec the radio is awake due to scan.
+ */
+struct iwl_statistics_operational_ntfy {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 flags;
+ struct iwl_statistics_ntfy_per_mac per_mac_stats[MAC_INDEX_AUX];
+ struct iwl_statistics_ntfy_per_phy per_phy_stats[IWL_STATS_MAX_PHY_OPERTINAL];
+ struct iwl_statistics_ntfy_per_sta per_sta_stats[IWL_MVM_STATION_COUNT_MAX];
+ __le64 rx_time;
+ __le64 tx_time;
+ __le64 on_time_rf;
+ __le64 on_time_scan;
+} __packed; /* STATISTICS_OPERATIONAL_NTFY_API_S_VER_15 */
+
+/**
+ * struct iwl_statistics_operational_ntfy_ver_14
+ *
+ * @hdr: general statistics header
+ * @flags: bitmap of possible notification structures
* @mac_id: mac on which the beacon was received
* @beacon_filter_average_energy: Average energy [-dBm] of the 2
* antennas.
@@ -469,7 +557,7 @@ struct iwl_statistics_ntfy_hdr {
* @average_energy: in fact it is minus the energy..
* @reserved: reserved
*/
-struct iwl_statistics_operational_ntfy {
+struct iwl_statistics_operational_ntfy_ver_14 {
struct iwl_statistics_ntfy_hdr hdr;
__le32 flags;
__le32 mac_id;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h b/drivers/net/wireless/intel/iwlwifi/fw/api/system.h
index c5df1171462b..acf5d4b9a214 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/system.h
@@ -1,11 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2019-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2019-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
-#ifndef __iwl_fw_api_soc_h__
-#define __iwl_fw_api_soc_h__
+#ifndef __iwl_fw_api_system_h__
+#define __iwl_fw_api_system_h__
#define SOC_CONFIG_CMD_FLAGS_DISCRETE BIT(0)
#define SOC_CONFIG_CMD_FLAGS_LOW_LATENCY BIT(1)
@@ -32,4 +32,12 @@ struct iwl_soc_configuration_cmd {
* SOC_CONFIGURATION_CMD_S_VER_2
*/
-#endif /* __iwl_fw_api_soc_h__ */
+/**
+ * struct iwl_system_features_control_cmd - system features control command
+ * @features: bitmap of features to disable
+ */
+struct iwl_system_features_control_cmd {
+ __le32 features[4];
+} __packed; /* SYSTEM_FEATURES_CONTROL_CMD_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_system_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index 4a74c0ea0f31..e73cc7380a26 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -177,6 +177,17 @@ enum iwl_tx_offload_assist_flags_pos {
#define IWL_TX_CMD_OFFLD_MH_MASK 0x1f
#define IWL_TX_CMD_OFFLD_IP_HDR_MASK 0x3f
+enum iwl_tx_offload_assist_bz {
+ IWL_TX_CMD_OFFLD_BZ_RESULT_OFFS = 0x000003ff,
+ IWL_TX_CMD_OFFLD_BZ_START_OFFS = 0x001ff800,
+ IWL_TX_CMD_OFFLD_BZ_MH_LEN = 0x07c00000,
+ IWL_TX_CMD_OFFLD_BZ_MH_PAD = 0x08000000,
+ IWL_TX_CMD_OFFLD_BZ_AMSDU = 0x10000000,
+ IWL_TX_CMD_OFFLD_BZ_ZERO2ONES = 0x20000000,
+ IWL_TX_CMD_OFFLD_BZ_ENABLE_CSUM = 0x40000000,
+ IWL_TX_CMD_OFFLD_BZ_PARTIAL_CSUM = 0x80000000,
+};
+
/* TODO: complete documentation for try_cnt and btkill_cnt */
/**
* struct iwl_tx_cmd - TX command struct to FW
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index a39013c401c9..7ad9cee925da 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -880,7 +880,7 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
dump_info->hw_type =
cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev));
dump_info->hw_step =
- cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
+ cpu_to_le32(fwrt->trans->hw_rev_step);
memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
sizeof(dump_info->fw_human_readable));
strncpy(dump_info->dev_human_readable, fwrt->trans->name,
@@ -1165,8 +1165,7 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
le32_to_cpu(reg->dev_addr.size));
- if ((le32_to_cpu(reg->id) & IWL_FW_INI_REGION_V2_MASK) ==
- IWL_FW_INI_HW_SMEM_REGION_ID &&
+ if (reg->sub_type == IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_HW_SMEM &&
fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf)
fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx,
range->data,
@@ -1565,7 +1564,7 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
iwl_write_prph_no_grab(fwrt->trans, DBGI_SRAM_TARGET_ACCESS_CFG,
DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK);
for (i = 0; i < (le32_to_cpu(reg->dev_addr.size) / 4); i++) {
- prph_data = iwl_read_prph(fwrt->trans, (i % 2) ?
+ prph_data = iwl_read_prph_no_grab(fwrt->trans, (i % 2) ?
DBGI_SRAM_TARGET_ACCESS_RDATA_MSB :
DBGI_SRAM_TARGET_ACCESS_RDATA_LSB);
if (prph_data == 0x5a5a5a5a) {
@@ -1988,17 +1987,18 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_dump_entry *entry;
- struct iwl_fw_error_dump_data *tlv;
+ struct iwl_fw_ini_error_dump_data *tlv;
struct iwl_fw_ini_error_dump_header *header;
- u32 type = le32_to_cpu(reg->type), id = le32_to_cpu(reg->id);
+ u32 type = reg->type;
+ u32 id = le32_to_cpu(reg->id);
u32 num_of_ranges, i, size;
void *range;
/*
- * The higher part of the ID in version 2 is irrelevant for
+ * The higher part of the ID from 2 is irrelevant for
* us, so mask it out.
*/
- if (le32_to_cpu(reg->hdr.version) == 2)
+ if (le32_to_cpu(reg->hdr.version) >= 2)
id &= IWL_FW_INI_REGION_V2_MASK;
if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr ||
@@ -2017,6 +2017,9 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
tlv = (void *)entry->data;
tlv->type = reg->type;
+ tlv->sub_type = reg->sub_type;
+ tlv->sub_type_ver = reg->sub_type_ver;
+ tlv->reserved = reg->reserved;
tlv->len = cpu_to_le32(size);
IWL_DEBUG_FW(fwrt, "WRT: Collecting region: id=%d, type=%d\n", id,
@@ -2099,7 +2102,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
dump->ver_type = cpu_to_le32(fwrt->dump.fw_ver.type);
dump->ver_subtype = cpu_to_le32(fwrt->dump.fw_ver.subtype);
- dump->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
+ dump->hw_step = cpu_to_le32(fwrt->trans->hw_rev_step);
/*
* Several HWs all have type == 0x42, so we'll override this value
@@ -2107,7 +2110,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
*/
hw_type = CSR_HW_REV_TYPE(fwrt->trans->hw_rev);
if (hw_type == IWL_AX210_HW_TYPE) {
- u32 prph_val = iwl_read_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR_GEN2);
+ u32 prph_val = iwl_read_umac_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR);
u32 is_jacket = !!(prph_val & WFPM_OTP_CFG1_IS_JACKET_BIT);
u32 is_cdb = !!(prph_val & WFPM_OTP_CFG1_IS_CDB_BIT);
u32 masked_bits = is_jacket | (is_cdb << 1);
@@ -2291,7 +2294,7 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
}
reg = (void *)reg_data.reg_tlv->data;
- reg_type = le32_to_cpu(reg->type);
+ reg_type = reg->type;
if (reg_type >= ARRAY_SIZE(iwl_dump_ini_region_ops))
continue;
@@ -2716,6 +2719,9 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
+ if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY)
+ iwl_force_nmi(fwrt->trans);
+
out:
if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
iwl_fw_error_dump_data_free(dump_data);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
index 016b3a4c5f51..b90f1e9ce691 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dump.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
@@ -12,6 +12,7 @@
#include "iwl-io.h"
#include "iwl-prph.h"
#include "iwl-csr.h"
+#include "pnvm.h"
/*
* Note: This structure is read from the device with IO accesses,
@@ -19,53 +20,6 @@
* read with u32-sized accesses, any members with a different size
* need to be ordered correctly though!
*/
-struct iwl_error_event_table_v1 {
- u32 valid; /* (nonzero) valid, (0) log is empty */
- u32 error_id; /* type of error */
- u32 pc; /* program counter */
- u32 blink1; /* branch link */
- u32 blink2; /* branch link */
- u32 ilink1; /* interrupt link */
- u32 ilink2; /* interrupt link */
- u32 data1; /* error-specific data */
- u32 data2; /* error-specific data */
- u32 data3; /* error-specific data */
- u32 bcon_time; /* beacon timer */
- u32 tsf_low; /* network timestamp function timer */
- u32 tsf_hi; /* network timestamp function timer */
- u32 gp1; /* GP1 timer register */
- u32 gp2; /* GP2 timer register */
- u32 gp3; /* GP3 timer register */
- u32 ucode_ver; /* uCode version */
- u32 hw_ver; /* HW Silicon version */
- u32 brd_ver; /* HW board version */
- u32 log_pc; /* log program counter */
- u32 frame_ptr; /* frame pointer */
- u32 stack_ptr; /* stack pointer */
- u32 hcmd; /* last host command header */
- u32 isr0; /* isr status register LMPM_NIC_ISR0:
- * rxtx_flag */
- u32 isr1; /* isr status register LMPM_NIC_ISR1:
- * host_flag */
- u32 isr2; /* isr status register LMPM_NIC_ISR2:
- * enc_flag */
- u32 isr3; /* isr status register LMPM_NIC_ISR3:
- * time_flag */
- u32 isr4; /* isr status register LMPM_NIC_ISR4:
- * wico interrupt */
- u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
- u32 wait_event; /* wait event() caller address */
- u32 l2p_control; /* L2pControlField */
- u32 l2p_duration; /* L2pDurationField */
- u32 l2p_mhvalid; /* L2pMhValidBits */
- u32 l2p_addr_match; /* L2pAddrMatchStat */
- u32 lmpm_pmg_sel; /* indicate which clocks are turned on
- * (LMPM_PMG_SEL) */
- u32 u_timestamp; /* indicate when the date and time of the
- * compilation */
- u32 flow_handler; /* FH read/write pointers, RX credit */
-} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
-
struct iwl_error_event_table {
u32 valid; /* (nonzero) valid, (0) log is empty */
u32 error_id; /* type of error */
@@ -147,6 +101,7 @@ static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt)
struct iwl_trans *trans = fwrt->trans;
struct iwl_umac_error_event_table table = {};
u32 base = fwrt->trans->dbg.umac_error_event_table;
+ char pnvm_name[MAX_PNVM_NAME];
if (!base &&
!(fwrt->trans->dbg.error_event_table_tlv_status &
@@ -164,6 +119,13 @@ static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt)
fwrt->trans->status, table.valid);
}
+ if ((table.error_id & ~FW_SYSASSERT_CPU_MASK) ==
+ FW_SYSASSERT_PNVM_MISSING) {
+ iwl_pnvm_get_fs_name(trans, pnvm_name, sizeof(pnvm_name));
+ IWL_ERR(fwrt, "PNVM data is missing, please install %s\n",
+ pnvm_name);
+ }
+
IWL_ERR(fwrt, "0x%08X | %s\n", table.error_id,
iwl_fw_lookup_assert_desc(table.error_id));
IWL_ERR(fwrt, "0x%08X | umac branchlink1\n", table.blink1);
@@ -212,7 +174,9 @@ static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_nu
IWL_ERR(trans, "HW error, resetting before reading\n");
/* reset the device */
- iwl_trans_sw_reset(trans);
+ err = iwl_trans_sw_reset(trans, true);
+ if (err)
+ return;
err = iwl_finish_nic_init(trans);
if (err)
@@ -295,21 +259,21 @@ struct iwl_tcm_error_event_table {
u32 reserved[4];
} __packed; /* TCM_LOG_ERROR_TABLE_API_S_VER_1 */
-static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt)
+static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt, int idx)
{
struct iwl_trans *trans = fwrt->trans;
struct iwl_tcm_error_event_table table = {};
- u32 base = fwrt->trans->dbg.tcm_error_event_table;
+ u32 base = fwrt->trans->dbg.tcm_error_event_table[idx];
int i;
+ u32 flag = idx ? IWL_ERROR_EVENT_TABLE_TCM2 :
+ IWL_ERROR_EVENT_TABLE_TCM1;
- if (!base ||
- !(fwrt->trans->dbg.error_event_table_tlv_status &
- IWL_ERROR_EVENT_TABLE_TCM))
+ if (!base || !(fwrt->trans->dbg.error_event_table_tlv_status & flag))
return;
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
- IWL_ERR(fwrt, "TCM status:\n");
+ IWL_ERR(fwrt, "TCM%d status:\n", idx + 1);
IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
IWL_ERR(fwrt, "0x%08X | tcm branchlink2\n", table.blink2);
IWL_ERR(fwrt, "0x%08X | tcm interruptlink1\n", table.ilink1);
@@ -328,13 +292,72 @@ static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt)
for (i = 0; i < ARRAY_SIZE(table.sw_status); i++)
IWL_ERR(fwrt, "0x%08X | tcm SW status[%d]\n",
table.sw_status[i], i);
+}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
- u32 scratch = iwl_read32(trans, CSR_FUNC_SCRATCH);
+/*
+ * RCM error struct.
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_rcm_error_event_table {
+ u32 valid;
+ u32 error_id;
+ u32 blink2;
+ u32 ilink1;
+ u32 ilink2;
+ u32 data1, data2, data3;
+ u32 logpc;
+ u32 frame_pointer;
+ u32 stack_pointer;
+ u32 msgid;
+ u32 isr;
+ u32 frame_hw_status;
+ u32 mbx_lmac_to_rcm_req;
+ u32 mbx_rcm_to_lmac_req;
+ u32 mh_ctl;
+ u32 mh_addr1_lo;
+ u32 mh_info;
+ u32 mh_err;
+ u32 reserved[3];
+} __packed; /* RCM_LOG_ERROR_TABLE_API_S_VER_1 */
+
+static void iwl_fwrt_dump_rcm_error_log(struct iwl_fw_runtime *fwrt, int idx)
+{
+ struct iwl_trans *trans = fwrt->trans;
+ struct iwl_rcm_error_event_table table = {};
+ u32 base = fwrt->trans->dbg.rcm_error_event_table[idx];
+ u32 flag = idx ? IWL_ERROR_EVENT_TABLE_RCM2 :
+ IWL_ERROR_EVENT_TABLE_RCM1;
- IWL_ERR(fwrt, "Function Scratch status:\n");
- IWL_ERR(fwrt, "0x%08X | Func Scratch\n", scratch);
- }
+ if (!base || !(fwrt->trans->dbg.error_event_table_tlv_status & flag))
+ return;
+
+ iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+ IWL_ERR(fwrt, "RCM%d status:\n", idx + 1);
+ IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
+ IWL_ERR(fwrt, "0x%08X | rcm branchlink2\n", table.blink2);
+ IWL_ERR(fwrt, "0x%08X | rcm interruptlink1\n", table.ilink1);
+ IWL_ERR(fwrt, "0x%08X | rcm interruptlink2\n", table.ilink2);
+ IWL_ERR(fwrt, "0x%08X | rcm data1\n", table.data1);
+ IWL_ERR(fwrt, "0x%08X | rcm data2\n", table.data2);
+ IWL_ERR(fwrt, "0x%08X | rcm data3\n", table.data3);
+ IWL_ERR(fwrt, "0x%08X | rcm log PC\n", table.logpc);
+ IWL_ERR(fwrt, "0x%08X | rcm frame pointer\n", table.frame_pointer);
+ IWL_ERR(fwrt, "0x%08X | rcm stack pointer\n", table.stack_pointer);
+ IWL_ERR(fwrt, "0x%08X | rcm msg ID\n", table.msgid);
+ IWL_ERR(fwrt, "0x%08X | rcm ISR status\n", table.isr);
+ IWL_ERR(fwrt, "0x%08X | frame HW status\n", table.frame_hw_status);
+ IWL_ERR(fwrt, "0x%08X | LMAC-to-RCM request mbox\n",
+ table.mbx_lmac_to_rcm_req);
+ IWL_ERR(fwrt, "0x%08X | RCM-to-LMAC request mbox\n",
+ table.mbx_rcm_to_lmac_req);
+ IWL_ERR(fwrt, "0x%08X | MAC header control\n", table.mh_ctl);
+ IWL_ERR(fwrt, "0x%08X | MAC header addr1 low\n", table.mh_addr1_lo);
+ IWL_ERR(fwrt, "0x%08X | MAC header info\n", table.mh_info);
+ IWL_ERR(fwrt, "0x%08X | MAC header error\n", table.mh_err);
}
static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt)
@@ -418,8 +441,18 @@ void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt)
if (fwrt->trans->dbg.lmac_error_event_table[1])
iwl_fwrt_dump_lmac_error_log(fwrt, 1);
iwl_fwrt_dump_umac_error_log(fwrt);
- iwl_fwrt_dump_tcm_error_log(fwrt);
+ iwl_fwrt_dump_tcm_error_log(fwrt, 0);
+ iwl_fwrt_dump_rcm_error_log(fwrt, 0);
+ iwl_fwrt_dump_tcm_error_log(fwrt, 1);
+ iwl_fwrt_dump_rcm_error_log(fwrt, 1);
iwl_fwrt_dump_iml_error_log(fwrt);
iwl_fwrt_dump_fseq_regs(fwrt);
+
+ if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ u32 scratch = iwl_read32(fwrt->trans, CSR_FUNC_SCRATCH);
+
+ IWL_ERR(fwrt, "Function Scratch status:\n");
+ IWL_ERR(fwrt, "0x%08X | Func Scratch\n", scratch);
+ }
}
IWL_EXPORT_SYMBOL(iwl_fwrt_dump_error_logs);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 9036b32ec765..079fa0023bd8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -232,6 +232,24 @@ struct iwl_fw_error_dump_mem {
#define IWL_INI_DUMP_INFO_TYPE BIT(31)
/**
+ * struct iwl_fw_error_dump_data - data for one type
+ * @type: &enum iwl_fw_ini_region_type
+ * @sub_type: sub type id
+ * @sub_type_ver: sub type version
+ * @reserved: not in use
+ * @len: the length starting from %data
+ * @data: the data itself
+ */
+struct iwl_fw_ini_error_dump_data {
+ u8 type;
+ u8 sub_type;
+ u8 sub_type_ver;
+ u8 reserved;
+ __le32 len;
+ __u8 data[];
+} __packed;
+
+/**
* struct iwl_fw_ini_dump_entry
* @list: list of dump entries
* @size: size of the data
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 3d572f5024bb..e4ebda64cd52 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -98,7 +98,6 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_PNVM_VERSION = 62,
IWL_UCODE_TLV_PNVM_SKU = 64,
- IWL_UCODE_TLV_TCM_DEBUG_ADDRS = 65,
IWL_UCODE_TLV_SEC_TABLE_ADDR = 66,
IWL_UCODE_TLV_D3_KEK_KCK_ADDR = 67,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.c b/drivers/net/wireless/intel/iwlwifi/fw/img.c
index 24a966673691..530674a35eeb 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright(c) 2019 - 2020 Intel Corporation
+ * Copyright(c) 2019 - 2021 Intel Corporation
*/
#include "img.h"
@@ -49,10 +49,9 @@ u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def)
}
EXPORT_SYMBOL_GPL(iwl_fw_lookup_notif_ver);
-#define FW_SYSASSERT_CPU_MASK 0xf0000000
static const struct {
const char *name;
- u8 num;
+ u32 num;
} advanced_lookup[] = {
{ "NMI_INTERRUPT_WDG", 0x34 },
{ "SYSASSERT", 0x35 },
@@ -73,6 +72,7 @@ static const struct {
{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+ { "PNVM_MISSING", FW_SYSASSERT_PNVM_MISSING },
{ "ADVANCED_SYSASSERT", 0 },
};
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index 993bda17fa30..fa7b1780064c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -279,4 +279,8 @@ u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
const char *iwl_fw_lookup_assert_desc(u32 num);
+
+#define FW_SYSASSERT_CPU_MASK 0xf0000000
+#define FW_SYSASSERT_PNVM_MISSING 0x0010070d
+
#endif /* __iwl_fw_img_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index 566957ac4539..139ece879fab 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -8,7 +8,7 @@
#include "dbg.h"
#include "debugfs.h"
-#include "fw/api/soc.h"
+#include "fw/api/system.h"
#include "fw/api/commands.h"
#include "fw/api/rx.h"
#include "fw/api/datapath.h"
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 69799f1ed2c4..3cb0ddbe3ab2 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -156,8 +156,13 @@ struct iwl_fw_runtime {
u8 sar_chain_b_profile;
struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES_REV3];
u32 geo_rev;
- union iwl_ppag_table_cmd ppag_table;
+ u32 geo_num_profiles;
+ bool geo_enabled;
+ struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS];
+ u32 ppag_flags;
u32 ppag_ver;
+ struct iwl_sar_offset_mapping_cmd sgom_table;
+ bool sgom_enabled;
#endif
};
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index 009dd4be597b..bd82c24811c8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -11,6 +11,7 @@
#include "fw/uefi.h"
#include "fw/api/alive.h"
#include <linux/efi.h>
+#include "fw/runtime.h"
#define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b, \
0xb2, 0xec, 0xf5, 0xa3, \
@@ -266,3 +267,90 @@ out:
return data;
}
+
+#ifdef CONFIG_ACPI
+static int iwl_uefi_sgom_parse(struct uefi_cnv_wlan_sgom_data *sgom_data,
+ struct iwl_fw_runtime *fwrt)
+{
+ int i, j;
+
+ if (sgom_data->revision != 1)
+ return -EINVAL;
+
+ memcpy(fwrt->sgom_table.offset_map, sgom_data->offset_map,
+ sizeof(fwrt->sgom_table.offset_map));
+
+ for (i = 0; i < MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE; i++) {
+ for (j = 0; j < MCC_TO_SAR_OFFSET_TABLE_COL_SIZE; j++) {
+ /* since each byte is composed of to values, */
+ /* one for each letter, */
+ /* extract and check each of them separately */
+ u8 value = fwrt->sgom_table.offset_map[i][j];
+ u8 low = value & 0xF;
+ u8 high = (value & 0xF0) >> 4;
+
+ if (high > fwrt->geo_num_profiles)
+ high = 0;
+ if (low > fwrt->geo_num_profiles)
+ low = 0;
+ fwrt->sgom_table.offset_map[i][j] = (high << 4) | low;
+ }
+ }
+
+ fwrt->sgom_enabled = true;
+ return 0;
+}
+
+void iwl_uefi_get_sgom_table(struct iwl_trans *trans,
+ struct iwl_fw_runtime *fwrt)
+{
+ struct efivar_entry *sgom_efivar;
+ struct uefi_cnv_wlan_sgom_data *data;
+ unsigned long package_size;
+ int err, ret;
+
+ if (!fwrt->geo_enabled)
+ return;
+
+ sgom_efivar = kzalloc(sizeof(*sgom_efivar), GFP_KERNEL);
+ if (!sgom_efivar)
+ return;
+
+ memcpy(&sgom_efivar->var.VariableName, IWL_UEFI_SGOM_NAME,
+ sizeof(IWL_UEFI_SGOM_NAME));
+ sgom_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
+
+ /* TODO: we hardcode a maximum length here, because reading
+ * from the UEFI is not working. To implement this properly,
+ * we have to call efivar_entry_size().
+ */
+ package_size = IWL_HARDCODED_SGOM_SIZE;
+
+ data = kmalloc(package_size, GFP_KERNEL);
+ if (!data) {
+ data = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ err = efivar_entry_get(sgom_efivar, NULL, &package_size, data);
+ if (err) {
+ IWL_DEBUG_FW(trans,
+ "SGOM UEFI variable not found %d\n", err);
+ goto out_free;
+ }
+
+ IWL_DEBUG_FW(trans, "Read SGOM from UEFI with size %lu\n",
+ package_size);
+
+ ret = iwl_uefi_sgom_parse(data, fwrt);
+ if (ret < 0)
+ IWL_DEBUG_FW(trans, "Cannot read SGOM tables. rev is invalid\n");
+
+out_free:
+ kfree(data);
+
+out:
+ kfree(sgom_efivar);
+}
+IWL_EXPORT_SYMBOL(iwl_uefi_get_sgom_table);
+#endif /* CONFIG_ACPI */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
index d552c656ac9f..09d2a971b3a0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
@@ -7,6 +7,7 @@
#define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm"
#define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower"
+#define IWL_UEFI_SGOM_NAME L"UefiCnvWlanSarGeoOffsetMapping"
/*
* TODO: we have these hardcoded values that the caller must pass,
@@ -16,6 +17,7 @@
*/
#define IWL_HARDCODED_PNVM_SIZE 4096
#define IWL_HARDCODED_REDUCE_POWER_SIZE 32768
+#define IWL_HARDCODED_SGOM_SIZE 339
struct pnvm_sku_package {
u8 rev;
@@ -25,6 +27,16 @@ struct pnvm_sku_package {
u8 data[];
} __packed;
+struct uefi_cnv_wlan_sgom_data {
+ u8 revision;
+ u8 offset_map[IWL_HARDCODED_SGOM_SIZE - 1];
+} __packed;
+
+/*
+ * This is known to be broken on v4.19 and to work on v5.4. Until we
+ * figure out why this is the case and how to make it work, simply
+ * disable the feature in old kernels.
+ */
#ifdef CONFIG_EFI
void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len);
void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len);
@@ -42,4 +54,12 @@ void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
}
#endif /* CONFIG_EFI */
+#if defined(CONFIG_EFI) && defined(CONFIG_ACPI)
+void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt);
+#else
+static inline
+void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt)
+{
+}
+#endif
#endif /* __iwl_fw_uefi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 665167a223f6..e122b8b4e1fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -84,6 +84,10 @@ enum iwl_nvm_type {
#define IWL_DEFAULT_MAX_TX_POWER 22
#define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\
NETIF_F_TSO | NETIF_F_TSO6)
+#define IWL_TX_CSUM_NETIF_FLAGS_BZ (NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6)
+#define IWL_CSUM_NETIF_FLAGS_MASK (IWL_TX_CSUM_NETIF_FLAGS | \
+ IWL_TX_CSUM_NETIF_FLAGS_BZ | \
+ NETIF_F_RXCSUM)
/* Antenna presence definitions */
#define ANT_NONE 0x0
@@ -448,6 +452,9 @@ struct iwl_cfg {
#define IWL_CFG_NO_CDB 0x0
#define IWL_CFG_CDB 0x1
+#define IWL_CFG_NO_JACKET 0x0
+#define IWL_CFG_IS_JACKET 0x1
+
#define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4)
#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9)
#define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10)
@@ -462,6 +469,7 @@ struct iwl_dev_info {
u8 no_160;
u8 cores;
u8 cdb;
+ u8 jacket;
const struct iwl_cfg *cfg;
const char *name;
};
@@ -610,7 +618,6 @@ extern const struct iwl_cfg killer1650x_2ax_cfg;
extern const struct iwl_cfg killer1650w_2ax_cfg;
extern const struct iwl_cfg iwl_qnj_b0_hr_b0_cfg;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0;
-extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0;
extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long;
extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0;
@@ -634,6 +641,12 @@ extern const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0;
extern const struct iwl_cfg iwl_cfg_bz_a0_mr_a0;
extern const struct iwl_cfg iwl_cfg_bz_a0_fm_a0;
extern const struct iwl_cfg iwl_cfg_gl_a0_fm_a0;
+extern const struct iwl_cfg iwl_cfg_bz_z0_gf_a0;
+extern const struct iwl_cfg iwl_cfg_bnj_a0_fm_a0;
+extern const struct iwl_cfg iwl_cfg_bnj_a0_fm4_a0;
+extern const struct iwl_cfg iwl_cfg_bnj_a0_gf_a0;
+extern const struct iwl_cfg iwl_cfg_bnj_a0_gf4_a0;
+extern const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index ff79a2ecb242..f90d4662c164 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -105,9 +105,14 @@
/* GIO Chicken Bits (PCI Express bus link power management) */
#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
-/* Doorbell NMI (since Bz) */
+#define CSR_IPC_SLEEP_CONTROL (CSR_BASE + 0x114)
+#define CSR_IPC_SLEEP_CONTROL_SUSPEND 0x3
+#define CSR_IPC_SLEEP_CONTROL_RESUME 0
+
+/* Doorbell - since Bz
+ * connected to UREG_DOORBELL_TO_ISR6 (lower 16 bits only)
+ */
#define CSR_DOORBELL_VECTOR (CSR_BASE + 0x130)
-#define CSR_DOORBELL_VECTOR_NMI BIT(1)
/* host chicken bits */
#define CSR_HOST_CHICKEN (CSR_BASE + 0x204)
@@ -143,8 +148,7 @@
#define CSR_FUNC_SCRATCH_INIT_VALUE (0x01010101)
/* Bits for CSR_HW_IF_CONFIG_REG */
-#define CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH (0x00000003)
-#define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP (0x0000000C)
+#define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH (0x0000000F)
#define CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM (0x00000080)
#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0)
#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
@@ -287,8 +291,7 @@
#define CSR_GP_CNTRL_REG_FLAG_SW_RESET BIT(31)
/* HW REV */
-#define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0)
-#define CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2)
+#define CSR_HW_REV_STEP_DASH(_val) ((_val) & CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH)
#define CSR_HW_REV_TYPE(_val) (((_val) & 0x000FFF0) >> 4)
/* HW RFID */
@@ -306,6 +309,7 @@ enum {
SILICON_A_STEP = 0,
SILICON_B_STEP,
SILICON_C_STEP,
+ SILICON_Z_STEP = 0xf,
};
@@ -328,10 +332,10 @@ enum {
#define CSR_HW_REV_TYPE_7265D (0x0000210)
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
#define CSR_HW_REV_TYPE_QNJ (0x0000360)
-#define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364)
-#define CSR_HW_REV_TYPE_QU_B0 (0x0000334)
-#define CSR_HW_REV_TYPE_QU_C0 (0x0000338)
-#define CSR_HW_REV_TYPE_QUZ (0x0000354)
+#define CSR_HW_REV_TYPE_QNJ_B0 (0x0000361)
+#define CSR_HW_REV_TYPE_QU_B0 (0x0000331)
+#define CSR_HW_REV_TYPE_QU_C0 (0x0000332)
+#define CSR_HW_REV_TYPE_QUZ (0x0000351)
#define CSR_HW_REV_TYPE_HR_CDB (0x0000340)
#define CSR_HW_REV_TYPE_SO (0x0000370)
#define CSR_HW_REV_TYPE_TY (0x0000420)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 7ab98b419cc1..c73672d61356 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -59,7 +59,7 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
[IWL_DBG_TLV_TYPE_DEBUG_INFO] = {.min_ver = 1, .max_ver = 1,},
[IWL_DBG_TLV_TYPE_BUF_ALLOC] = {.min_ver = 1, .max_ver = 1,},
[IWL_DBG_TLV_TYPE_HCMD] = {.min_ver = 1, .max_ver = 1,},
- [IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 2,},
+ [IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 3,},
[IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,},
[IWL_DBG_TLV_TYPE_CONF_SET] = {.min_ver = 1, .max_ver = 1,},
};
@@ -177,14 +177,14 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
const struct iwl_fw_ini_region_tlv *reg = (const void *)tlv->data;
struct iwl_ucode_tlv **active_reg;
u32 id = le32_to_cpu(reg->id);
- u32 type = le32_to_cpu(reg->type);
+ u8 type = reg->type;
u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
/*
- * The higher part of the ID in version 2 is irrelevant for
+ * The higher part of the ID in from version 2 is irrelevant for
* us, so mask it out.
*/
- if (le32_to_cpu(reg->hdr.version) == 2)
+ if (le32_to_cpu(reg->hdr.version) >= 2)
id &= IWL_FW_INI_REGION_V2_MASK;
if (le32_to_cpu(tlv->length) < sizeof(*reg))
@@ -233,6 +233,7 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
const struct iwl_fw_ini_trigger_tlv *trig = (const void *)tlv->data;
struct iwl_fw_ini_trigger_tlv *dup_trig;
u32 tp = le32_to_cpu(trig->time_point);
+ u32 rf = le32_to_cpu(trig->reset_fw);
struct iwl_ucode_tlv *dup = NULL;
int ret;
@@ -247,6 +248,10 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
return -EINVAL;
}
+ IWL_DEBUG_FW(trans,
+ "WRT: time point %u for trigger TLV with reset_fw %u\n",
+ tp, rf);
+ trans->dbg.last_tp_resetfw = 0xFF;
if (!le32_to_cpu(trig->occurrences)) {
dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length),
GFP_KERNEL);
@@ -300,14 +305,21 @@ static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv,
bool ext)
{
- const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0];
- u32 type = le32_to_cpu(tlv->type);
- u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
- u32 domain = le32_to_cpu(hdr->domain);
enum iwl_ini_cfg_state *cfg_state = ext ?
&trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg;
+ const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0];
+ u32 type;
+ u32 tlv_idx;
+ u32 domain;
int ret;
+ if (le32_to_cpu(tlv->length) < sizeof(*hdr))
+ return;
+
+ type = le32_to_cpu(tlv->type);
+ tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
+ domain = le32_to_cpu(hdr->domain);
+
if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
!(domain & trans->dbg.domains_bitmap)) {
IWL_DEBUG_FW(trans,
@@ -473,7 +485,7 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
int res;
if (!iwlwifi_mod_params.enable_ini ||
- trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_9000)
+ trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000)
return;
res = firmware_request_nowarn(&fw, yoyo_bin, dev);
@@ -1159,6 +1171,8 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
u32 num_data = iwl_tlv_array_len(&node->tlv, dump_data.trig,
data);
int ret, i;
+ u32 tp = le32_to_cpu(dump_data.trig->time_point);
+
if (!num_data) {
ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync);
@@ -1177,8 +1191,42 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
break;
}
}
- }
+ fwrt->trans->dbg.restart_required = FALSE;
+ IWL_DEBUG_INFO(fwrt, "WRT: tp %d, reset_fw %d\n",
+ tp, dump_data.trig->reset_fw);
+ IWL_DEBUG_INFO(fwrt, "WRT: restart_required %d, last_tp_resetfw %d\n",
+ fwrt->trans->dbg.restart_required,
+ fwrt->trans->dbg.last_tp_resetfw);
+
+ if (fwrt->trans->trans_cfg->device_family ==
+ IWL_DEVICE_FAMILY_9000) {
+ fwrt->trans->dbg.restart_required = TRUE;
+ } else if (tp == IWL_FW_INI_TIME_POINT_FW_ASSERT &&
+ fwrt->trans->dbg.last_tp_resetfw ==
+ IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
+ fwrt->trans->dbg.restart_required = FALSE;
+ fwrt->trans->dbg.last_tp_resetfw = 0xFF;
+ IWL_DEBUG_FW(fwrt, "WRT: FW_ASSERT due to reset_fw_mode-no restart\n");
+ } else if (le32_to_cpu(dump_data.trig->reset_fw) ==
+ IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) {
+ IWL_DEBUG_INFO(fwrt, "WRT: stop and reload firmware\n");
+ fwrt->trans->dbg.restart_required = TRUE;
+ } else if (le32_to_cpu(dump_data.trig->reset_fw) ==
+ IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
+ IWL_DEBUG_INFO(fwrt, "WRT: stop only and no reload firmware\n");
+ fwrt->trans->dbg.restart_required = FALSE;
+ fwrt->trans->dbg.last_tp_resetfw =
+ le32_to_cpu(dump_data.trig->reset_fw);
+ } else if (le32_to_cpu(dump_data.trig->reset_fw) ==
+ IWL_FW_INI_RESET_FW_MODE_NOTHING) {
+ IWL_DEBUG_INFO(fwrt,
+ "WRT: nothing need to be done after debug collection\n");
+ } else {
+ IWL_ERR(fwrt, "WRT: wrong resetfw %d\n",
+ le32_to_cpu(dump_data.trig->reset_fw));
+ }
+ }
return 0;
}
@@ -1244,7 +1292,7 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
}
reg = (void *)(*active_reg)->data;
- reg_type = le32_to_cpu(reg->type);
+ reg_type = reg->type;
if (reg_type != IWL_FW_INI_REGION_DRAM_BUFFER ||
!(BIT(le32_to_cpu(reg->dram_alloc_id)) & failed_alloc))
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 5cec467b995b..83e3b731ad29 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -130,6 +130,9 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
iwl_free_fw_img(drv, drv->fw.img + i);
+
+ /* clear the data for the aborted load case */
+ memset(&drv->fw, 0, sizeof(drv->fw));
}
static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
@@ -163,8 +166,8 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
char tag[8];
if (drv->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
- (CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_B_STEP &&
- CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_C_STEP)) {
+ (drv->trans->hw_rev_step != SILICON_B_STEP &&
+ drv->trans->hw_rev_step != SILICON_C_STEP)) {
IWL_ERR(drv,
"Only HW steps B and C are currently supported (0x%0x)\n",
drv->trans->hw_rev);
@@ -586,6 +589,66 @@ static void iwl_drv_set_dump_exclude(struct iwl_drv *drv,
excl->size = le32_to_cpu(fw->size);
}
+static void iwl_parse_dbg_tlv_assert_tables(struct iwl_drv *drv,
+ const struct iwl_ucode_tlv *tlv)
+{
+ const struct iwl_fw_ini_region_tlv *region;
+ u32 length = le32_to_cpu(tlv->length);
+ u32 addr;
+
+ if (length < offsetof(typeof(*region), special_mem) +
+ sizeof(region->special_mem))
+ return;
+
+ region = (void *)tlv->data;
+ addr = le32_to_cpu(region->special_mem.base_addr);
+ addr += le32_to_cpu(region->special_mem.offset);
+ addr &= ~FW_ADDR_CACHE_CONTROL;
+
+ if (region->type != IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY)
+ return;
+
+ switch (region->sub_type) {
+ case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_UMAC_ERROR_TABLE:
+ drv->trans->dbg.umac_error_event_table = addr;
+ drv->trans->dbg.error_event_table_tlv_status |=
+ IWL_ERROR_EVENT_TABLE_UMAC;
+ break;
+ case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_1_ERROR_TABLE:
+ drv->trans->dbg.lmac_error_event_table[0] = addr;
+ drv->trans->dbg.error_event_table_tlv_status |=
+ IWL_ERROR_EVENT_TABLE_LMAC1;
+ break;
+ case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_2_ERROR_TABLE:
+ drv->trans->dbg.lmac_error_event_table[1] = addr;
+ drv->trans->dbg.error_event_table_tlv_status |=
+ IWL_ERROR_EVENT_TABLE_LMAC2;
+ break;
+ case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_1_ERROR_TABLE:
+ drv->trans->dbg.tcm_error_event_table[0] = addr;
+ drv->trans->dbg.error_event_table_tlv_status |=
+ IWL_ERROR_EVENT_TABLE_TCM1;
+ break;
+ case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_2_ERROR_TABLE:
+ drv->trans->dbg.tcm_error_event_table[1] = addr;
+ drv->trans->dbg.error_event_table_tlv_status |=
+ IWL_ERROR_EVENT_TABLE_TCM2;
+ break;
+ case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_1_ERROR_TABLE:
+ drv->trans->dbg.rcm_error_event_table[0] = addr;
+ drv->trans->dbg.error_event_table_tlv_status |=
+ IWL_ERROR_EVENT_TABLE_RCM1;
+ break;
+ case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_2_ERROR_TABLE:
+ drv->trans->dbg.rcm_error_event_table[1] = addr;
+ drv->trans->dbg.error_event_table_tlv_status |=
+ IWL_ERROR_EVENT_TABLE_RCM2;
+ break;
+ default:
+ break;
+ }
+}
+
static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
const struct firmware *ucode_raw,
struct iwl_firmware_pieces *pieces,
@@ -1153,21 +1216,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
IWL_ERROR_EVENT_TABLE_LMAC1;
break;
}
- case IWL_UCODE_TLV_TCM_DEBUG_ADDRS: {
- struct iwl_fw_tcm_error_addr *ptr = (void *)tlv_data;
-
- if (tlv_len != sizeof(*ptr))
- goto invalid_tlv_len;
- drv->trans->dbg.tcm_error_event_table =
- le32_to_cpu(ptr->addr) & ~FW_ADDR_CACHE_CONTROL;
- drv->trans->dbg.error_event_table_tlv_status |=
- IWL_ERROR_EVENT_TABLE_TCM;
- break;
- }
+ case IWL_UCODE_TLV_TYPE_REGIONS:
+ iwl_parse_dbg_tlv_assert_tables(drv, tlv);
+ fallthrough;
case IWL_UCODE_TLV_TYPE_DEBUG_INFO:
case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
case IWL_UCODE_TLV_TYPE_HCMD:
- case IWL_UCODE_TLV_TYPE_REGIONS:
case IWL_UCODE_TLV_TYPE_TRIGGERS:
case IWL_UCODE_TLV_TYPE_CONF_SET:
if (iwlwifi_mod_params.enable_ini)
@@ -1375,6 +1429,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
int i;
bool load_module = false;
bool usniffer_images = false;
+ bool failure = true;
fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH;
fw->ucode_capa.standard_phy_calibration_size =
@@ -1635,15 +1690,9 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* else from proceeding if the module fails to load
* or hangs loading.
*/
- if (load_module) {
+ if (load_module)
request_module("%s", op->name);
-#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
- if (err)
- IWL_ERR(drv,
- "failed to load module %s (error %d), is dynamic loading enabled?\n",
- op->name, err);
-#endif
- }
+ failure = false;
goto free;
try_again:
@@ -1659,6 +1708,9 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
complete(&drv->request_firmware_complete);
device_release_driver(drv->trans->dev);
free:
+ if (failure)
+ iwl_dealloc_ucode(drv);
+
if (pieces) {
for (i = 0; i < ARRAY_SIZE(pieces->img); i++)
kfree(pieces->img[i].sec);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
index f12b86563728..d7a7835b935c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -10,6 +10,7 @@
#include "iwl-modparams.h"
#include "iwl-eeprom-parse.h"
+#if IS_ENABLED(CONFIG_IWLDVM)
/* EEPROM offset definitions */
/* indirect access definitions */
@@ -647,6 +648,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
return n_channels;
}
+#endif
int iwl_init_sband_channels(struct iwl_nvm_data *data,
struct ieee80211_supported_band *sband,
@@ -750,6 +752,7 @@ void iwl_init_ht_hw_capab(struct iwl_trans *trans,
}
}
+#if IS_ENABLED(CONFIG_IWLDVM)
static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const u8 *eeprom, size_t eeprom_size)
@@ -873,3 +876,4 @@ iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
return NULL;
}
IWL_EXPORT_SYMBOL(iwl_parse_eeprom_data);
+#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index aaa3b65be4e6..e6fd4941a4cb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -580,7 +580,7 @@ struct iwl_rb_status {
__le16 closed_fr_num;
__le16 finished_rb_num;
__le16 finished_fr_nam;
- __le32 __unused;
+ __le32 __spare;
} __packed;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 46917b4216b3..253eac4cbf59 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -218,7 +218,7 @@ void iwl_force_nmi(struct iwl_trans *trans)
UREG_DOORBELL_TO_ISR6_NMI_BIT);
else
iwl_write32(trans, CSR_DOORBELL_VECTOR,
- CSR_DOORBELL_VECTOR_NMI);
+ UREG_DOORBELL_TO_ISR6_NMI_BIT);
}
IWL_EXPORT_SYMBOL(iwl_force_nmi);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index f470f9aea50f..dd58c8f9aa11 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -22,6 +22,7 @@
#include "fw/api/commands.h"
#include "fw/api/cmdhdr.h"
#include "fw/img.h"
+#include "mei/iwl-mei.h"
/* NVM offsets (in words) definitions */
enum nvm_offsets {
@@ -607,7 +608,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
.phy_cap_info[9] =
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
- IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED,
+ (IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED <<
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_POS),
.phy_cap_info[10] =
IEEE80211_HE_PHY_CAP10_HE_MU_M1RU_MAX_LTF,
},
@@ -664,7 +666,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242,
.phy_cap_info[9] =
- IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED,
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED
+ << IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_POS,
},
/*
* Set default Tx/Rx HE MCS NSS Support field.
@@ -1115,6 +1118,66 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
struct iwl_nvm_data *
+iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ const struct iwl_mei_nvm *mei_nvm,
+ const struct iwl_fw *fw)
+{
+ struct iwl_nvm_data *data;
+ u32 sbands_flags = 0;
+ u8 rx_chains = fw->valid_rx_ant;
+ u8 tx_chains = fw->valid_rx_ant;
+
+ if (cfg->uhb_supported)
+ data = kzalloc(struct_size(data, channels,
+ IWL_NVM_NUM_CHANNELS_UHB),
+ GFP_KERNEL);
+ else
+ data = kzalloc(struct_size(data, channels,
+ IWL_NVM_NUM_CHANNELS_EXT),
+ GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ BUILD_BUG_ON(ARRAY_SIZE(mei_nvm->channels) !=
+ IWL_NVM_NUM_CHANNELS_UHB);
+ data->nvm_version = mei_nvm->nvm_version;
+
+ iwl_set_radio_cfg(cfg, data, mei_nvm->radio_cfg);
+ if (data->valid_tx_ant)
+ tx_chains &= data->valid_tx_ant;
+ if (data->valid_rx_ant)
+ rx_chains &= data->valid_rx_ant;
+
+ data->sku_cap_mimo_disabled = false;
+ data->sku_cap_band_24ghz_enable = true;
+ data->sku_cap_band_52ghz_enable = true;
+ data->sku_cap_11n_enable =
+ !(iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL);
+ data->sku_cap_11ac_enable = true;
+ data->sku_cap_11ax_enable =
+ mei_nvm->caps & MEI_NVM_CAPS_11AX_SUPPORT;
+
+ data->lar_enabled = mei_nvm->caps & MEI_NVM_CAPS_LARI_SUPPORT;
+
+ data->n_hw_addrs = mei_nvm->n_hw_addrs;
+ /* If no valid mac address was found - bail out */
+ if (iwl_set_hw_address(trans, cfg, data, NULL, NULL)) {
+ kfree(data);
+ return NULL;
+ }
+
+ if (data->lar_enabled &&
+ fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT))
+ sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
+
+ iwl_init_sbands(trans, data, mei_nvm->channels, tx_chains, rx_chains,
+ sbands_flags, true, fw);
+
+ return data;
+}
+IWL_EXPORT_SYMBOL(iwl_parse_mei_nvm_data);
+
+struct iwl_nvm_data *
iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_fw *fw,
const __be16 *nvm_hw, const __le16 *nvm_sw,
@@ -1548,7 +1611,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
/* nvm file validation, dword_buff[2] holds the file version */
if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
- CSR_HW_REV_STEP(trans->hw_rev) == SILICON_C_STEP &&
+ trans->hw_rev_step == SILICON_C_STEP &&
le32_to_cpu(dword_buff[2]) < 0xE4A) {
ret = -EFAULT;
goto out;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index e1f5a9741850..e01f7751cf11 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2015, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2015, 2018-2021 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_nvm_parse_h__
@@ -8,6 +8,7 @@
#include <net/cfg80211.h>
#include "iwl-eeprom-parse.h"
+#include "mei/iwl-mei.h"
/**
* enum iwl_nvm_sbands_flags - modification flags for the channel profiles
@@ -81,4 +82,12 @@ void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data,
struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
const struct iwl_fw *fw);
+/**
+ * iwl_parse_mei_nvm_data - parse the mei_nvm_data and get an iwl_nvm_data
+ */
+struct iwl_nvm_data *
+iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ const struct iwl_mei_nvm *mei_nvm,
+ const struct iwl_fw *fw);
+
#endif /* __iwl_nvm_parse_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index a84ab02cf9d7..95b3dae7b504 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -347,9 +347,7 @@
#define RADIO_REG_SYS_MANUAL_DFT_0 0xAD4078
#define RFIC_REG_RD 0xAD0470
#define WFPM_CTRL_REG 0xA03030
-#define WFPM_CTRL_REG_GEN2 0xd03030
#define WFPM_OTP_CFG1_ADDR 0x00a03098
-#define WFPM_OTP_CFG1_ADDR_GEN2 0x00d03098
#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(4)
#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(5)
@@ -455,6 +453,13 @@ enum {
#define UREG_DOORBELL_TO_ISR6_RESUME BIT(19)
#define UREG_DOORBELL_TO_ISR6_PNVM BIT(20)
+/*
+ * From BZ family driver triggers this bit for suspend and resume
+ * The driver should update CSR_IPC_SLEEP_CONTROL before triggering
+ * this interrupt with suspend/resume value
+ */
+#define UREG_DOORBELL_TO_ISR6_SLEEP_CTRL BIT(31)
+
#define CNVI_MBOX_C 0xA3400C
#define FSEQ_ERROR_CODE 0xA340C8
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 4ebb1871bd1f..1bcaa3598785 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -193,7 +193,10 @@ enum iwl_error_event_table_status {
IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
- IWL_ERROR_EVENT_TABLE_TCM = BIT(3),
+ IWL_ERROR_EVENT_TABLE_TCM1 = BIT(3),
+ IWL_ERROR_EVENT_TABLE_TCM2 = BIT(4),
+ IWL_ERROR_EVENT_TABLE_RCM1 = BIT(5),
+ IWL_ERROR_EVENT_TABLE_RCM2 = BIT(6),
};
/**
@@ -296,6 +299,8 @@ enum iwl_d3_status {
* are sent
* @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
* @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
+ * @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once,
+ * e.g. for testing
*/
enum iwl_trans_status {
STATUS_SYNC_HCMD_ACTIVE,
@@ -308,6 +313,7 @@ enum iwl_trans_status {
STATUS_TRANS_GOING_IDLE,
STATUS_TRANS_IDLE,
STATUS_TRANS_DEAD,
+ STATUS_SUPPRESS_CMD_ERROR_ONCE,
};
static inline int
@@ -593,7 +599,7 @@ struct iwl_trans_ops {
void (*configure)(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg);
void (*set_pmi)(struct iwl_trans *trans, bool state);
- void (*sw_reset)(struct iwl_trans *trans);
+ int (*sw_reset)(struct iwl_trans *trans, bool retake_ownership);
bool (*grab_nic_access)(struct iwl_trans *trans);
void (*release_nic_access)(struct iwl_trans *trans);
void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
@@ -725,7 +731,8 @@ struct iwl_self_init_dram {
* @trigger_tlv: array of pointers to triggers TLVs for debug
* @lmac_error_event_table: addrs of lmacs error tables
* @umac_error_event_table: addr of umac error table
- * @tcm_error_event_table: address of TCM error table
+ * @tcm_error_event_table: address(es) of TCM error table(s)
+ * @rcm_error_event_table: address(es) of RCM error table(s)
* @error_event_table_tlv_status: bitmap that indicates what error table
* pointers was recevied via TLV. uses enum &iwl_error_event_table_status
* @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
@@ -752,7 +759,8 @@ struct iwl_trans_debug {
u32 lmac_error_event_table[2];
u32 umac_error_event_table;
- u32 tcm_error_event_table;
+ u32 tcm_error_event_table[2];
+ u32 rcm_error_event_table[2];
unsigned int error_event_table_tlv_status;
enum iwl_ini_cfg_state internal_ini_cfg;
@@ -775,6 +783,8 @@ struct iwl_trans_debug {
u32 domains_bitmap;
u32 ucode_preset;
+ bool restart_required;
+ u32 last_tp_resetfw;
};
struct iwl_dma_ptr {
@@ -924,6 +934,7 @@ struct iwl_trans_txqs {
/**
* struct iwl_trans - transport common data
*
+ * @csme_own - true if we couldn't get ownership on the device
* @ops - pointer to iwl_trans_ops
* @op_mode - pointer to the op_mode
* @trans_cfg: the trans-specific configuration part
@@ -937,6 +948,7 @@ struct iwl_trans_txqs {
* @hw_id: a u32 with the ID of the device / sub-device.
* Set during transport allocation.
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
+ * @hw_rev_step: The mac step of the HW
* @pm_support: set to true in start_hw if link pm is supported
* @ltr_enabled: set to true if the LTR is enabled
* @wide_cmd_header: true when ucode supports wide command header format
@@ -958,6 +970,7 @@ struct iwl_trans_txqs {
* @iwl_trans_txqs: transport tx queues data.
*/
struct iwl_trans {
+ bool csme_own;
const struct iwl_trans_ops *ops;
struct iwl_op_mode *op_mode;
const struct iwl_cfg_trans_params *trans_cfg;
@@ -969,6 +982,7 @@ struct iwl_trans {
struct device *dev;
u32 max_skb_frags;
u32 hw_rev;
+ u32 hw_rev_step;
u32 hw_rf_id;
u32 hw_id;
char hw_id_str[52];
@@ -1382,10 +1396,12 @@ static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
trans->ops->set_pmi(trans, state);
}
-static inline void iwl_trans_sw_reset(struct iwl_trans *trans)
+static inline int iwl_trans_sw_reset(struct iwl_trans *trans,
+ bool retake_ownership)
{
if (trans->ops->sw_reset)
- trans->ops->sw_reset(trans);
+ return trans->ops->sw_reset(trans, retake_ownership);
+ return 0;
}
static inline void
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/Makefile b/drivers/net/wireless/intel/iwlwifi/mei/Makefile
new file mode 100644
index 000000000000..8e3ef0347db7
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mei/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_IWLMEI) += iwlmei.o
+iwlmei-y += main.o
+iwlmei-y += net.o
+iwlmei-$(CONFIG_IWLWIFI_DEVICE_TRACING) += trace.o
+CFLAGS_trace.o := -I$(src)
+
+ccflags-y += -I $(srctree)/$(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/internal.h b/drivers/net/wireless/intel/iwlwifi/mei/internal.h
new file mode 100644
index 000000000000..92fea7dd71e2
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mei/internal.h
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+#ifndef __IWLMEI_INTERNAL_H_
+#define __IWLMEI_INTERNAL_H_
+
+#include <uapi/linux/if_ether.h>
+#include <linux/netdevice.h>
+
+#include "sap.h"
+
+rx_handler_result_t iwl_mei_rx_filter(struct sk_buff *skb,
+ const struct iwl_sap_oob_filters *filters,
+ bool *pass_to_csme);
+
+void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx);
+
+#endif /* __IWLMEI_INTERNAL_H_ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
new file mode 100644
index 000000000000..67122cfa2292
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
@@ -0,0 +1,505 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+#ifndef __iwl_mei_h__
+#define __iwl_mei_h__
+
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/ieee80211.h>
+
+/**
+ * DOC: Introduction
+ *
+ * iwlmei is the kernel module that is in charge of the commnunication between
+ * the iwlwifi driver and the CSME firmware's WLAN driver. This communication
+ * uses the SAP protocol defined in another file.
+ * iwlwifi can request or release ownership on the WiFi device through iwlmei.
+ * iwlmei may notify iwlwifi about certain events: what filter iwlwifi should
+ * use to passthrough inbound packets to the CSME firmware for example. iwlmei
+ * may also use iwlwifi to send traffic. This means that we need communication
+ * from iwlmei to iwlwifi and the other way around.
+ */
+
+/**
+ * DOC: Life cycle
+ *
+ * iwlmei exports symbols that are needed by iwlwifi so that iwlmei will always
+ * be loaded when iwlwifi is alive. iwlwifi registers itself to iwlmei and
+ * provides the pointers to the functions that iwlmei calls whenever needed.
+ * iwlwifi calls iwlmei through direct and context-free function calls.
+ * It is assumed that only one device is accessible to the CSME firmware and
+ * under the scope of iwlmei so that it is valid not to have any context passed
+ * to iwlmei's functions.
+ *
+ * There are cases in which iwlmei can't access the CSME firmware, because the
+ * CSME firmware is undergoing a reset, or the mei bus decided to unbind the
+ * device. In those cases, iwlmei will need not to send requests over the mei
+ * bus. Instead, it needs to cache the requests from iwlwifi and fulfill them
+ * when the mei bus is available again.
+ *
+ * iwlmei can call iwlwifi as long as iwlwifi is registered to iwlmei. When
+ * iwlwifi goes down (the PCI device is unbound, or the iwlwifi is unloaded)
+ * iwlwifi needs to unregister from iwlmei.
+ */
+
+/**
+ * DOC: Memory layout
+ *
+ * Since iwlwifi calls iwlmei without any context, iwlmei needs to hold a
+ * global pointer to its data (which is in the mei client device's private
+ * data area). If there was no bind on the mei bus, this pointer is NULL and
+ * iwlmei knows not access to the CSME firmware upon requests from iwlwifi.
+ *
+ * iwlmei needs to cache requests from iwlwifi when there is no mei client
+ * device available (when iwlmei has been removed from the mei bus). In this
+ * case, all iwlmei's data that resides in the mei client device's private data
+ * area is unavailable. For this specific case, a separate caching area is
+ * needed.
+ */
+
+/**
+ * DOC: Concurrency
+ *
+ * iwlwifi can call iwlmei at any time. iwlmei will take care to synchronize
+ * the calls from iwlwifi with its internal flows. iwlwifi must not call iwlmei
+ * in flows that cannot sleep. Moreover, iwlwifi must not call iwlmei in flows
+ * that originated from iwlmei.
+ */
+
+/**
+ * DOC: Probe and remove from mei bus driver
+ *
+ * When the mei bus driver enumerates its devices, it calls the iwlmei's probe
+ * function which will send the %SAP_ME_MSG_START message. The probe completes
+ * before the response (%SAP_ME_MSG_START_OK) is received. This response will
+ * be handle by the Rx path. Once it arrives, the connection to the CSME
+ * firmware is considered established and iwlwifi's requests can be treated
+ * against the CSME firmware.
+ *
+ * When the mei bus driver removes the device, iwlmei loses all the data that
+ * was attached to the mei client device. It clears the global pointer to the
+ * mei client device since it is not available anymore. This will cause all the
+ * requests coming from iwlwifi to be cached. This flow takes the global mutex
+ * to be synchronized with all the requests coming from iwlwifi.
+ */
+
+/**
+ * DOC: Driver load when CSME owns the device
+ *
+ * When the driver (iwlwifi) is loaded while CSME owns the device,
+ * it'll ask CSME to release the device through HW registers. CSME
+ * will release the device only in the case that there is no connection
+ * through the mei bus. If there is a mei bus connection, CSME will refuse
+ * to release the ownership on the device through the HW registers. In that
+ * case, iwlwifi must first request ownership using the SAP protocol.
+ *
+ * Once iwlwifi will request ownership through the SAP protocol, CSME will
+ * grant the ownership on the device through the HW registers as well.
+ * In order to request ownership over SAP, we first need to have an interface
+ * which means that we need to register to mac80211.
+ * This can't happen before we get the NVM that contains all the capabilities
+ * of the device. Reading the NVM usually requires the load the firmware, but
+ * this is impossible as long as we don't have ownership on the device.
+ * In order to solve this chicken and egg problem, the host driver can get
+ * the NVM through CSME which owns the device. It can send
+ * %SAP_MSG_NOTIF_GET_NVM, which will be replied by %SAP_MSG_NOTIF_NVM with
+ * the NVM's content that the host driver needs.
+ */
+
+/**
+ * DOC: CSME behavior regarding the ownership requests
+ *
+ * The ownership requests from the host can come in two different ways:
+ * - the HW registers in iwl_pcie_set_hw_ready
+ * - using the Software Arbitration Protocol (SAP)
+ *
+ * The host can ask CSME who owns the device with %SAP_MSG_NOTIF_WHO_OWNS_NIC,
+ * and it can request ownership with %SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP.
+ * The host will first use %SAP_MSG_NOTIF_WHO_OWNS_NIC to know what state
+ * CSME is in. In case CSME thinks it owns the device, the host can ask for
+ * ownership with %SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP.
+ *
+ * Here the table that describes CSME's behavior upon ownership request:
+ *
+ * +-------------------+------------+--------------+-----------------------------+------------+
+ * | State | HW reg bit | Reply for | Event | HW reg bit |
+ * | | before | WHO_OWNS_NIC | | after |
+ * +===================+============+==============+=============================+============+
+ * | WiAMT not | 0 | Host | HW register or | 0 |
+ * | operational | Host owner | | HOST_ASKS_FOR_NIC_OWNERSHIP | Host owner |
+ * +-------------------+------------+--------------+-----------------------------+------------+
+ * | Operational & | 1 | N/A | HW register | 0 |
+ * | SAP down & | CSME owner | | | Host owner |
+ * | no session active | | | | |
+ * +-------------------+------------+--------------+-----------------------------+------------+
+ * | Operational & | 1 | CSME | HW register | 1 |
+ * | SAP up | CSME owner | | | CSME owner |
+ * +-------------------+------------+--------------+-----------------------------+------------+
+ * | Operational & | 1 | CSME | HOST_ASKS_FOR_NIC_OWNERSHIP | 0 |
+ * | SAP up | CSME owner | | | Host owner |
+ * +-------------------+------------+--------------+-----------------------------+------------+
+ */
+
+/**
+ * DOC: Driver load when CSME is associated and a session is active
+ *
+ * A "session" is active when CSME is associated to an access point and the
+ * link is used to attach a remote driver or to control the system remotely.
+ * When a session is active, we want to make sure it won't disconnect when we
+ * take ownership on the device.
+ * In this case, the driver can get the device, but it'll need to make
+ * sure that it'll connect to the exact same AP (same BSSID).
+ * In order to do so, CSME will send the connection parameters through
+ * SAP and then the host can check if it can connect to this same AP.
+ * If yes, it can request ownership through SAP and connect quickly without
+ * scanning all the channels, but just probing the AP on the channel that
+ * CSME was connected to.
+ * In order to signal this specific scenario to iwlwifi, iwlmei will
+ * immediately require iwlwifi to report RF-Kill to the network stack. This
+ * RF-Kill will prevent the stack from getting the device, and it has a reason
+ * that tells the userspace that the device is in RF-Kill because it is not
+ * owned by the host. Once the userspace has configured the right profile,
+ * it'll be able to let iwlmei know that it can request ownership over SAP
+ * which will remove the RF-Kill, and finally allow the host to connect.
+ * The host has then 3 seconds to connect (including DHCP). Had the host
+ * failed to connect within those 3 seconds, CSME will take the device back.
+ */
+
+/**
+ * DOC: Datapath
+ *
+ * CSME can transmit packets, through the netdev that it gets from the wifi
+ * driver. It'll send packet in the 802.3 format and simply call
+ * dev_queue_xmit.
+ *
+ * For Rx, iwlmei registers a Rx handler that it attaches to the netdev. iwlmei
+ * may catch packets and send them to CSME, it can then either drop them so
+ * that they are invisible to user space, or let them go the user space.
+ *
+ * Packets transmitted by the user space do not need to be forwarded to CSME
+ * with the exception of the DHCP request. In order to know what IP is used
+ * by the user space, CSME needs to get the DHCP request. See
+ * iwl_mei_tx_copy_to_csme().
+ */
+
+/**
+ * enum iwl_mei_nvm_caps - capabilities for MEI NVM
+ * @MEI_NVM_CAPS_LARI_SUPPORT: Lari is supported
+ * @MEI_NVM_CAPS_11AX_SUPPORT: 11AX is supported
+ */
+enum iwl_mei_nvm_caps {
+ MEI_NVM_CAPS_LARI_SUPPORT = BIT(0),
+ MEI_NVM_CAPS_11AX_SUPPORT = BIT(1),
+};
+
+/**
+ * struct iwl_mei_nvm - used to pass the NVM from CSME
+ * @hw_addr: The MAC address
+ * @n_hw_addrs: The number of MAC addresses
+ * @reserved: For alignment.
+ * @radio_cfg: The radio configuration.
+ * @caps: See &enum iwl_mei_nvm_caps.
+ * @nvm_version: The version of the NVM.
+ * @channels: The data for each channel.
+ *
+ * If a field is added, it must correspond to the SAP structure.
+ */
+struct iwl_mei_nvm {
+ u8 hw_addr[ETH_ALEN];
+ u8 n_hw_addrs;
+ u8 reserved;
+ u32 radio_cfg;
+ u32 caps;
+ u32 nvm_version;
+ u32 channels[110];
+};
+
+/**
+ * enum iwl_mei_pairwise_cipher - cipher for UCAST key
+ * @IWL_MEI_CIPHER_NONE: none
+ * @IWL_MEI_CIPHER_CCMP: ccmp
+ * @IWL_MEI_CIPHER_GCMP: gcmp
+ * @IWL_MEI_CIPHER_GCMP_256: gcmp 256
+ *
+ * Note that those values are dictated by the CSME firmware API (see sap.h)
+ */
+enum iwl_mei_pairwise_cipher {
+ IWL_MEI_CIPHER_NONE = 0,
+ IWL_MEI_CIPHER_CCMP = 4,
+ IWL_MEI_CIPHER_GCMP = 8,
+ IWL_MEI_CIPHER_GCMP_256 = 9,
+};
+
+/**
+ * enum iwl_mei_akm_auth - a combination of AKM and AUTH method
+ * @IWL_MEI_AKM_AUTH_OPEN: No encryption
+ * @IWL_MEI_AKM_AUTH_RSNA: 1X profile
+ * @IWL_MEI_AKM_AUTH_RSNA_PSK: PSK profile
+ * @IWL_MEI_AKM_AUTH_SAE: SAE profile
+ *
+ * Note that those values are dictated by the CSME firmware API (see sap.h)
+ */
+enum iwl_mei_akm_auth {
+ IWL_MEI_AKM_AUTH_OPEN = 0,
+ IWL_MEI_AKM_AUTH_RSNA = 6,
+ IWL_MEI_AKM_AUTH_RSNA_PSK = 7,
+ IWL_MEI_AKM_AUTH_SAE = 9,
+};
+
+/**
+ * struct iwl_mei_conn_info - connection info
+ * @lp_state: link protection state
+ * @auth_mode: authentication mode
+ * @ssid_len: the length of SSID
+ * @ssid: the SSID
+ * @pairwise_cipher: the cipher used for unicast packets
+ * @channel: the associated channel
+ * @band: the associated band
+ * @bssid: the BSSID
+ */
+struct iwl_mei_conn_info {
+ u8 lp_state;
+ u8 auth_mode;
+ u8 ssid_len;
+ u8 channel;
+ u8 band;
+ u8 pairwise_cipher;
+ u8 bssid[ETH_ALEN];
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+};
+
+/**
+ * struct iwl_mei_colloc_info - collocated AP info
+ * @channel: the channel of the collocated AP
+ * @bssid: the BSSID of the collocated AP
+ */
+struct iwl_mei_colloc_info {
+ u8 channel;
+ u8 bssid[ETH_ALEN];
+};
+
+/*
+ * struct iwl_mei_ops - driver's operations called by iwlmei
+ * Operations will not be called more than once concurrently.
+ * It's not allowed to call iwlmei functions from this context.
+ *
+ * @me_conn_status: provide information about CSME's current connection.
+ * @rfkill: called when the wifi driver should report a change in the rfkill
+ * status.
+ * @roaming_forbidden: indicates whether roaming is forbidden.
+ * @sap_connected: indicate that SAP is now connected. Will be called in case
+ * the wifi driver registered to iwlmei before SAP connection succeeded or
+ * when the SAP connection is re-established.
+ * @nic_stolen: this means that device is no longer available. The device can
+ * still be used until the callback returns.
+ */
+struct iwl_mei_ops {
+ void (*me_conn_status)(void *priv,
+ const struct iwl_mei_conn_info *conn_info);
+ void (*rfkill)(void *priv, bool blocked);
+ void (*roaming_forbidden)(void *priv, bool forbidden);
+ void (*sap_connected)(void *priv);
+ void (*nic_stolen)(void *priv);
+};
+
+#if IS_ENABLED(CONFIG_IWLMEI)
+
+/**
+ * iwl_mei_is_connected() - is the connection to the CSME firmware established?
+ *
+ * Return: true if we have a SAP connection
+ */
+bool iwl_mei_is_connected(void);
+
+/**
+ * iwl_mei_get_nvm() - returns the NVM for the device
+ *
+ * It is the caller's responsibility to free the memory returned
+ * by this function.
+ * This function blocks (sleeps) until the NVM is ready.
+ *
+ * Return: the NVM as received from CSME
+ */
+struct iwl_mei_nvm *iwl_mei_get_nvm(void);
+
+/**
+ * iwl_mei_get_ownership() - request ownership
+ *
+ * This function blocks until ownership is granted or timeout expired.
+ *
+ * Return: 0 in case we could get ownership on the device
+ */
+int iwl_mei_get_ownership(void);
+
+/**
+ * iwl_mei_set_rfkill_state() - set SW and HW RF kill states
+ * @hw_rfkill: HW RF kill state.
+ * @sw_rfkill: SW RF kill state.
+ *
+ * This function must be called when SW RF kill is issued by the user.
+ */
+void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill);
+
+/**
+ * iwl_mei_set_nic_info() - set mac address
+ * @mac_address: mac address to set
+ * @nvm_address: NVM mac adsress to set
+ *
+ * This function must be called upon mac address change.
+ */
+void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address);
+
+/**
+ * iwl_mei_set_country_code() - set new country code
+ * @mcc: the new applied MCC
+ *
+ * This function must be called upon country code update
+ */
+void iwl_mei_set_country_code(u16 mcc);
+
+/**
+ * iwl_mei_set_power_limit() - set TX power limit
+ * @power_limit: pointer to an array of 10 elements (le16) represents the power
+ * restrictions per chain.
+ *
+ * This function must be called upon power restrictions change
+ */
+void iwl_mei_set_power_limit(const __le16 *power_limit);
+
+/**
+ * iwl_mei_register() - register the wifi driver to iwlmei
+ * @priv: a pointer to the wifi driver's context. Cannot be NULL.
+ * @ops: the ops structure.
+ *
+ * Return: 0 unless something went wrong. It is illegal to call any
+ * other API function before this function is called and succeeds.
+ *
+ * Only one wifi driver instance (wifi device instance really)
+ * can register at a time.
+ */
+int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops);
+
+/**
+ * iwl_mei_start_unregister() - unregister the wifi driver from iwlmei
+ *
+ * From this point on, iwlmei will not used the callbacks provided by
+ * the driver, but the device is still usable.
+ */
+void iwl_mei_start_unregister(void);
+
+/**
+ * iwl_mei_unregister_complete() - complete the unregistration
+ *
+ * Must be called after iwl_mei_start_unregister. When this function returns,
+ * the device is owned by CSME.
+ */
+void iwl_mei_unregister_complete(void);
+
+/**
+ * iwl_mei_set_netdev() - sets the netdev for Tx / Rx.
+ * @netdev: the net_device
+ *
+ * The caller should set the netdev to a non-NULL value when the
+ * interface is added. Packets might be sent to the driver immediately
+ * afterwards.
+ * The caller should set the netdev to NULL when the interface is removed.
+ * This function will call synchronize_net() after setting the netdev to NULL.
+ * Only when this function returns, can the caller assume that iwlmei will
+ * no longer inject packets into the netdev's Tx path.
+ *
+ * Context: This function can sleep and assumes rtnl_lock is taken.
+ * The netdev must be set to NULL before iwl_mei_start_unregister() is called.
+ */
+void iwl_mei_set_netdev(struct net_device *netdev);
+
+/**
+ * iwl_mei_tx_copy_to_csme() - must be called for each packet sent by
+ * the wifi driver.
+ * @skb: the skb sent
+ * @ivlen: the size of the IV that needs to be skipped after the MAC and
+ * before the SNAP header.
+ *
+ * This function doesn't take any lock, it simply tries to catch DHCP
+ * packets sent by the wifi driver. If the packet is a DHCP packet, it
+ * will send it to CSME. This function must not be called for virtual
+ * interfaces that are not monitored by CSME, meaning it must be called
+ * only for packets transmitted by the netdevice that was registered
+ * with iwl_mei_set_netdev().
+ */
+void iwl_mei_tx_copy_to_csme(struct sk_buff *skb, unsigned int ivlen);
+
+/**
+ * iwl_mei_host_associated() - must be called when iwlwifi associated.
+ * @conn_info: pointer to the connection info structure.
+ * @colloc_info: pointer to the collocated AP info. This is relevant only in
+ * case of UHB associated AP, otherwise set to NULL.
+ */
+void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info,
+ const struct iwl_mei_colloc_info *colloc_info);
+
+/**
+ * iwl_mei_host_disassociated() - must be called when iwlwifi disassociated.
+ */
+void iwl_mei_host_disassociated(void);
+
+/**
+ * iwl_mei_device_down() - must be called when the device is down
+ */
+void iwl_mei_device_down(void);
+
+#else
+
+static inline bool iwl_mei_is_connected(void)
+{ return false; }
+
+static inline struct iwl_mei_nvm *iwl_mei_get_nvm(void)
+{ return NULL; }
+
+static inline int iwl_mei_get_ownership(void)
+{ return 0; }
+
+static inline void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill)
+{}
+
+static inline void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address)
+{}
+
+static inline void iwl_mei_set_country_code(u16 mcc)
+{}
+
+static inline void iwl_mei_set_power_limit(__le16 *power_limit)
+{}
+
+static inline int iwl_mei_register(void *priv,
+ const struct iwl_mei_ops *ops)
+{ return 0; }
+
+static inline void iwl_mei_start_unregister(void)
+{}
+
+static inline void iwl_mei_unregister_complete(void)
+{}
+
+static inline void iwl_mei_set_netdev(struct net_device *netdev)
+{}
+
+static inline void iwl_mei_tx_copy_to_csme(struct sk_buff *skb,
+ unsigned int ivlen)
+{}
+
+static inline void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info,
+ const struct iwl_mei_colloc_info *colloc_info)
+{}
+
+static inline void iwl_mei_host_disassociated(void)
+{}
+
+static inline void iwl_mei_device_down(void)
+{}
+
+#endif /* CONFIG_IWLMEI */
+
+#endif /* __iwl_mei_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c
new file mode 100644
index 000000000000..d9733aaf6f6e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c
@@ -0,0 +1,2001 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/ieee80211.h>
+#include <linux/rtnetlink.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/rcupdate.h>
+#include <linux/debugfs.h>
+#include <linux/skbuff.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+#include <net/cfg80211.h>
+
+#include "internal.h"
+#include "iwl-mei.h"
+#include "trace.h"
+#include "trace-data.h"
+#include "sap.h"
+
+MODULE_DESCRIPTION("The Intel(R) wireless / CSME firmware interface");
+MODULE_LICENSE("GPL");
+
+#define MEI_WLAN_UUID UUID_LE(0x13280904, 0x7792, 0x4fcb, \
+ 0xa1, 0xaa, 0x5e, 0x70, 0xcb, 0xb1, 0xe8, 0x65)
+
+/*
+ * Since iwlwifi calls iwlmei without any context, hold a pointer to the
+ * mei_cl_device structure here.
+ * Define a mutex that will synchronize all the flows between iwlwifi and
+ * iwlmei.
+ * Note that iwlmei can't have several instances, so it ok to have static
+ * variables here.
+ */
+static struct mei_cl_device *iwl_mei_global_cldev;
+static DEFINE_MUTEX(iwl_mei_mutex);
+static unsigned long iwl_mei_status;
+
+enum iwl_mei_status_bits {
+ IWL_MEI_STATUS_SAP_CONNECTED,
+};
+
+bool iwl_mei_is_connected(void)
+{
+ return test_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
+}
+EXPORT_SYMBOL_GPL(iwl_mei_is_connected);
+
+#define SAP_VERSION 3
+#define SAP_CONTROL_BLOCK_ID 0x21504153 /* SAP! in ASCII */
+
+struct iwl_sap_q_ctrl_blk {
+ __le32 wr_ptr;
+ __le32 rd_ptr;
+ __le32 size;
+};
+
+enum iwl_sap_q_idx {
+ SAP_QUEUE_IDX_NOTIF = 0,
+ SAP_QUEUE_IDX_DATA,
+ SAP_QUEUE_IDX_MAX,
+};
+
+struct iwl_sap_dir {
+ __le32 reserved;
+ struct iwl_sap_q_ctrl_blk q_ctrl_blk[SAP_QUEUE_IDX_MAX];
+};
+
+enum iwl_sap_dir_idx {
+ SAP_DIRECTION_HOST_TO_ME = 0,
+ SAP_DIRECTION_ME_TO_HOST,
+ SAP_DIRECTION_MAX,
+};
+
+struct iwl_sap_shared_mem_ctrl_blk {
+ __le32 sap_id;
+ __le32 size;
+ struct iwl_sap_dir dir[SAP_DIRECTION_MAX];
+};
+
+/*
+ * The shared area has the following layout:
+ *
+ * +-----------------------------------+
+ * |struct iwl_sap_shared_mem_ctrl_blk |
+ * +-----------------------------------+
+ * |Host -> ME data queue |
+ * +-----------------------------------+
+ * |Host -> ME notif queue |
+ * +-----------------------------------+
+ * |ME -> Host data queue |
+ * +-----------------------------------+
+ * |ME -> host notif queue |
+ * +-----------------------------------+
+ * |SAP control block id (SAP!) |
+ * +-----------------------------------+
+ */
+
+#define SAP_H2M_DATA_Q_SZ 48256
+#define SAP_M2H_DATA_Q_SZ 24128
+#define SAP_H2M_NOTIF_Q_SZ 2240
+#define SAP_M2H_NOTIF_Q_SZ 62720
+
+#define _IWL_MEI_SAP_SHARED_MEM_SZ \
+ (sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \
+ SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ + \
+ SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4)
+
+#define IWL_MEI_SAP_SHARED_MEM_SZ \
+ (roundup(_IWL_MEI_SAP_SHARED_MEM_SZ, PAGE_SIZE))
+
+struct iwl_mei_shared_mem_ptrs {
+ struct iwl_sap_shared_mem_ctrl_blk *ctrl;
+ void *q_head[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX];
+ size_t q_size[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX];
+};
+
+struct iwl_mei_filters {
+ struct rcu_head rcu_head;
+ struct iwl_sap_oob_filters filters;
+};
+
+/**
+ * struct iwl_mei - holds the private date for iwl_mei
+ *
+ * @get_nvm_wq: the wait queue for the get_nvm flow
+ * @send_csa_msg_wk: used to defer the transmission of the CHECK_SHARED_AREA
+ * message. Used so that we can send CHECK_SHARED_AREA from atomic
+ * contexts.
+ * @get_ownership_wq: the wait queue for the get_ownership_flow
+ * @shared_mem: the memory that is shared between CSME and the host
+ * @cldev: the pointer to the MEI client device
+ * @nvm: the data returned by the CSME for the NVM
+ * @filters: the filters sent by CSME
+ * @got_ownership: true if we own the device
+ * @amt_enabled: true if CSME has wireless enabled
+ * @csa_throttled: when true, we can't send CHECK_SHARED_AREA over the MEI
+ * bus, but rather need to wait until send_csa_msg_wk runs
+ * @csme_taking_ownership: true when CSME is taking ownership. Used to remember
+ * to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down
+ * flow.
+ * @csa_throttle_end_wk: used when &csa_throttled is true
+ * @data_q_lock: protects the access to the data queues which are
+ * accessed without the mutex.
+ * @sap_seq_no: the sequence number for the SAP messages
+ * @seq_no: the sequence number for the SAP messages
+ * @dbgfs_dir: the debugfs dir entry
+ */
+struct iwl_mei {
+ wait_queue_head_t get_nvm_wq;
+ struct work_struct send_csa_msg_wk;
+ wait_queue_head_t get_ownership_wq;
+ struct iwl_mei_shared_mem_ptrs shared_mem;
+ struct mei_cl_device *cldev;
+ struct iwl_mei_nvm *nvm;
+ struct iwl_mei_filters __rcu *filters;
+ bool got_ownership;
+ bool amt_enabled;
+ bool csa_throttled;
+ bool csme_taking_ownership;
+ struct delayed_work csa_throttle_end_wk;
+ spinlock_t data_q_lock;
+
+ atomic_t sap_seq_no;
+ atomic_t seq_no;
+
+ struct dentry *dbgfs_dir;
+};
+
+/**
+ * struct iwl_mei_cache - cache for the parameters from iwlwifi
+ * @ops: Callbacks to iwlwifi.
+ * @netdev: The netdev that will be used to transmit / receive packets.
+ * @conn_info: The connection info message triggered by iwlwifi's association.
+ * @power_limit: pointer to an array of 10 elements (le16) represents the power
+ * restrictions per chain.
+ * @rf_kill: rf kill state.
+ * @mcc: MCC info
+ * @mac_address: interface MAC address.
+ * @nvm_address: NVM MAC address.
+ * @priv: A pointer to iwlwifi.
+ *
+ * This used to cache the configurations coming from iwlwifi's way. The data
+ * is cached here so that we can buffer the configuration even if we don't have
+ * a bind from the mei bus and hence, on iwl_mei structure.
+ */
+struct iwl_mei_cache {
+ const struct iwl_mei_ops *ops;
+ struct net_device __rcu *netdev;
+ const struct iwl_sap_notif_connection_info *conn_info;
+ const __le16 *power_limit;
+ u32 rf_kill;
+ u16 mcc;
+ u8 mac_address[6];
+ u8 nvm_address[6];
+ void *priv;
+};
+
+static struct iwl_mei_cache iwl_mei_cache = {
+ .rf_kill = SAP_HW_RFKILL_DEASSERTED | SAP_SW_RFKILL_DEASSERTED
+};
+
+static void iwl_mei_free_shared_mem(struct mei_cl_device *cldev)
+{
+ struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
+
+ if (mei_cldev_dma_unmap(cldev))
+ dev_err(&cldev->dev, "Couldn't unmap the shared mem properly\n");
+ memset(&mei->shared_mem, 0, sizeof(mei->shared_mem));
+}
+
+#define HBM_DMA_BUF_ID_WLAN 1
+
+static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev)
+{
+ struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
+ struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem;
+
+ mem->ctrl = mei_cldev_dma_map(cldev, HBM_DMA_BUF_ID_WLAN,
+ IWL_MEI_SAP_SHARED_MEM_SZ);
+
+ if (IS_ERR(mem->ctrl)) {
+ int ret = PTR_ERR(mem->ctrl);
+
+ dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
+ ret);
+ mem->ctrl = NULL;
+
+ return ret;
+ }
+
+ memset(mem->ctrl, 0, IWL_MEI_SAP_SHARED_MEM_SZ);
+
+ return 0;
+}
+
+static void iwl_mei_init_shared_mem(struct iwl_mei *mei)
+{
+ struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem;
+ struct iwl_sap_dir *h2m;
+ struct iwl_sap_dir *m2h;
+ int dir, queue;
+ u8 *q_head;
+
+ mem->ctrl->sap_id = cpu_to_le32(SAP_CONTROL_BLOCK_ID);
+
+ mem->ctrl->size = cpu_to_le32(sizeof(*mem->ctrl));
+
+ h2m = &mem->ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
+ m2h = &mem->ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
+
+ h2m->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size =
+ cpu_to_le32(SAP_H2M_DATA_Q_SZ);
+ h2m->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size =
+ cpu_to_le32(SAP_H2M_NOTIF_Q_SZ);
+ m2h->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size =
+ cpu_to_le32(SAP_M2H_DATA_Q_SZ);
+ m2h->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size =
+ cpu_to_le32(SAP_M2H_NOTIF_Q_SZ);
+
+ /* q_head points to the start of the first queue */
+ q_head = (void *)(mem->ctrl + 1);
+
+ /* Initialize the queue heads */
+ for (dir = 0; dir < SAP_DIRECTION_MAX; dir++) {
+ for (queue = 0; queue < SAP_QUEUE_IDX_MAX; queue++) {
+ mem->q_head[dir][queue] = q_head;
+ q_head +=
+ le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size);
+ mem->q_size[dir][queue] =
+ le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size);
+ }
+ }
+
+ *(__le32 *)q_head = cpu_to_le32(SAP_CONTROL_BLOCK_ID);
+}
+
+static ssize_t iwl_mei_write_cyclic_buf(struct mei_cl_device *cldev,
+ struct iwl_sap_q_ctrl_blk *notif_q,
+ u8 *q_head,
+ const struct iwl_sap_hdr *hdr,
+ u32 q_sz)
+{
+ u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
+ u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
+ size_t room_in_buf;
+ size_t tx_sz = sizeof(*hdr) + le16_to_cpu(hdr->len);
+
+ if (rd > q_sz || wr > q_sz) {
+ dev_err(&cldev->dev,
+ "Pointers are past the end of the buffer\n");
+ return -EINVAL;
+ }
+
+ room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr;
+
+ /* we don't have enough room for the data to write */
+ if (room_in_buf < tx_sz) {
+ dev_err(&cldev->dev,
+ "Not enough room in the buffer\n");
+ return -ENOSPC;
+ }
+
+ if (wr + tx_sz <= q_sz) {
+ memcpy(q_head + wr, hdr, tx_sz);
+ } else {
+ memcpy(q_head + wr, hdr, q_sz - wr);
+ memcpy(q_head, (u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr));
+ }
+
+ WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
+ return 0;
+}
+
+static bool iwl_mei_host_to_me_data_pending(const struct iwl_mei *mei)
+{
+ struct iwl_sap_q_ctrl_blk *notif_q;
+ struct iwl_sap_dir *dir;
+
+ dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
+ notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
+
+ if (READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr))
+ return true;
+
+ notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
+ return READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr);
+}
+
+static int iwl_mei_send_check_shared_area(struct mei_cl_device *cldev)
+{
+ struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
+ struct iwl_sap_me_msg_start msg = {
+ .hdr.type = cpu_to_le32(SAP_ME_MSG_CHECK_SHARED_AREA),
+ .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)),
+ };
+ int ret;
+
+ lockdep_assert_held(&iwl_mei_mutex);
+
+ if (mei->csa_throttled)
+ return 0;
+
+ trace_iwlmei_me_msg(&msg.hdr, true);
+ ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg));
+ if (ret != sizeof(msg)) {
+ dev_err(&cldev->dev,
+ "failed to send the SAP_ME_MSG_CHECK_SHARED_AREA message %d\n",
+ ret);
+ return ret;
+ }
+
+ mei->csa_throttled = true;
+
+ schedule_delayed_work(&mei->csa_throttle_end_wk,
+ msecs_to_jiffies(100));
+
+ return 0;
+}
+
+static void iwl_mei_csa_throttle_end_wk(struct work_struct *wk)
+{
+ struct iwl_mei *mei =
+ container_of(wk, struct iwl_mei, csa_throttle_end_wk.work);
+
+ mutex_lock(&iwl_mei_mutex);
+
+ mei->csa_throttled = false;
+
+ if (iwl_mei_host_to_me_data_pending(mei))
+ iwl_mei_send_check_shared_area(mei->cldev);
+
+ mutex_unlock(&iwl_mei_mutex);
+}
+
+static int iwl_mei_send_sap_msg_payload(struct mei_cl_device *cldev,
+ struct iwl_sap_hdr *hdr)
+{
+ struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
+ struct iwl_sap_q_ctrl_blk *notif_q;
+ struct iwl_sap_dir *dir;
+ void *q_head;
+ u32 q_sz;
+ int ret;
+
+ lockdep_assert_held(&iwl_mei_mutex);
+
+ if (!mei->shared_mem.ctrl) {
+ dev_err(&cldev->dev,
+ "No shared memory, can't send any SAP message\n");
+ return -EINVAL;
+ }
+
+ if (!iwl_mei_is_connected()) {
+ dev_err(&cldev->dev,
+ "Can't send a SAP message if we're not connected\n");
+ return -ENODEV;
+ }
+
+ hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
+ dev_dbg(&cldev->dev, "Sending %d\n", hdr->type);
+
+ dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
+ notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
+ q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF];
+ q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF];
+ ret = iwl_mei_write_cyclic_buf(q_head, notif_q, q_head, hdr, q_sz);
+
+ if (ret < 0)
+ return ret;
+
+ trace_iwlmei_sap_cmd(hdr, true);
+
+ return iwl_mei_send_check_shared_area(cldev);
+}
+
+void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx)
+{
+ struct iwl_sap_q_ctrl_blk *notif_q;
+ struct iwl_sap_dir *dir;
+ struct iwl_mei *mei;
+ size_t room_in_buf;
+ size_t tx_sz;
+ size_t hdr_sz;
+ u32 q_sz;
+ u32 rd;
+ u32 wr;
+ void *q_head;
+
+ if (!iwl_mei_global_cldev)
+ return;
+
+ mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+
+ /*
+ * We access this path for Rx packets (the more common case)
+ * and from Tx path when we send DHCP packets, the latter is
+ * very unlikely.
+ * Take the lock already here to make sure we see that remove()
+ * might have cleared the IWL_MEI_STATUS_SAP_CONNECTED bit.
+ */
+ spin_lock_bh(&mei->data_q_lock);
+
+ if (!iwl_mei_is_connected()) {
+ spin_unlock_bh(&mei->data_q_lock);
+ return;
+ }
+
+ /*
+ * We are in a RCU critical section and the remove from the CSME bus
+ * which would free this memory waits for the readers to complete (this
+ * is done in netdev_rx_handler_unregister).
+ */
+ dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
+ notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
+ q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA];
+ q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA];
+
+ rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
+ wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
+ hdr_sz = cb_tx ? sizeof(struct iwl_sap_cb_data) :
+ sizeof(struct iwl_sap_hdr);
+ tx_sz = skb->len + hdr_sz;
+
+ if (rd > q_sz || wr > q_sz) {
+ dev_err(&mei->cldev->dev,
+ "can't write the data: pointers are past the end of the buffer\n");
+ goto out;
+ }
+
+ room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr;
+
+ /* we don't have enough room for the data to write */
+ if (room_in_buf < tx_sz) {
+ dev_err(&mei->cldev->dev,
+ "Not enough room in the buffer for this data\n");
+ goto out;
+ }
+
+ if (skb_headroom(skb) < hdr_sz) {
+ dev_err(&mei->cldev->dev,
+ "Not enough headroom in the skb to write the SAP header\n");
+ goto out;
+ }
+
+ if (cb_tx) {
+ struct iwl_sap_cb_data *cb_hdr = skb_push(skb, sizeof(*cb_hdr));
+
+ cb_hdr->hdr.type = cpu_to_le16(SAP_MSG_CB_DATA_PACKET);
+ cb_hdr->hdr.len = cpu_to_le16(skb->len - sizeof(cb_hdr->hdr));
+ cb_hdr->hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
+ cb_hdr->to_me_filt_status = cpu_to_le32(BIT(CB_TX_DHCP_FILT_IDX));
+ cb_hdr->data_len = cpu_to_le32(skb->len - sizeof(*cb_hdr));
+ trace_iwlmei_sap_data(skb, IWL_SAP_TX_DHCP);
+ } else {
+ struct iwl_sap_hdr *hdr = skb_push(skb, sizeof(*hdr));
+
+ hdr->type = cpu_to_le16(SAP_MSG_DATA_PACKET);
+ hdr->len = cpu_to_le16(skb->len - sizeof(*hdr));
+ hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
+ trace_iwlmei_sap_data(skb, IWL_SAP_TX_DATA_FROM_AIR);
+ }
+
+ if (wr + tx_sz <= q_sz) {
+ skb_copy_bits(skb, 0, q_head + wr, tx_sz);
+ } else {
+ skb_copy_bits(skb, 0, q_head + wr, q_sz - wr);
+ skb_copy_bits(skb, q_sz - wr, q_head, tx_sz - (q_sz - wr));
+ }
+
+ WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
+
+out:
+ spin_unlock_bh(&mei->data_q_lock);
+}
+
+static int
+iwl_mei_send_sap_msg(struct mei_cl_device *cldev, u16 type)
+{
+ struct iwl_sap_hdr msg = {
+ .type = cpu_to_le16(type),
+ };
+
+ return iwl_mei_send_sap_msg_payload(cldev, &msg);
+}
+
+static void iwl_mei_send_csa_msg_wk(struct work_struct *wk)
+{
+ struct iwl_mei *mei =
+ container_of(wk, struct iwl_mei, send_csa_msg_wk);
+
+ if (!iwl_mei_is_connected())
+ return;
+
+ mutex_lock(&iwl_mei_mutex);
+
+ iwl_mei_send_check_shared_area(mei->cldev);
+
+ mutex_unlock(&iwl_mei_mutex);
+}
+
+/* Called in a RCU read critical section from netif_receive_skb */
+static rx_handler_result_t iwl_mei_rx_handler(struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ struct iwl_mei *mei =
+ rcu_dereference(skb->dev->rx_handler_data);
+ struct iwl_mei_filters *filters = rcu_dereference(mei->filters);
+ bool rx_for_csme = false;
+ rx_handler_result_t res;
+
+ /*
+ * remove() unregisters this handler and synchronize_net, so this
+ * should never happen.
+ */
+ if (!iwl_mei_is_connected()) {
+ dev_err(&mei->cldev->dev,
+ "Got an Rx packet, but we're not connected to SAP?\n");
+ return RX_HANDLER_PASS;
+ }
+
+ if (filters)
+ res = iwl_mei_rx_filter(skb, &filters->filters, &rx_for_csme);
+ else
+ res = RX_HANDLER_PASS;
+
+ /*
+ * The data is already on the ring of the shared area, all we
+ * need to do is to tell the CSME firmware to check what we have
+ * there.
+ */
+ if (rx_for_csme)
+ schedule_work(&mei->send_csa_msg_wk);
+
+ if (res != RX_HANDLER_PASS) {
+ trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_DROPPED_FROM_AIR);
+ dev_kfree_skb(skb);
+ }
+
+ return res;
+}
+
+static void
+iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev,
+ const struct iwl_sap_me_msg_start_ok *rsp,
+ ssize_t len)
+{
+ struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
+
+ if (len != sizeof(*rsp)) {
+ dev_err(&cldev->dev,
+ "got invalid SAP_ME_MSG_START_OK from CSME firmware\n");
+ dev_err(&cldev->dev,
+ "size is incorrect: %zd instead of %zu\n",
+ len, sizeof(*rsp));
+ return;
+ }
+
+ if (rsp->supported_version != SAP_VERSION) {
+ dev_err(&cldev->dev,
+ "didn't get the expected version: got %d\n",
+ rsp->supported_version);
+ return;
+ }
+
+ mutex_lock(&iwl_mei_mutex);
+ set_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
+ /* wifi driver has registered already */
+ if (iwl_mei_cache.ops) {
+ iwl_mei_send_sap_msg(mei->cldev,
+ SAP_MSG_NOTIF_WIFIDR_UP);
+ iwl_mei_cache.ops->sap_connected(iwl_mei_cache.priv);
+ }
+
+ mutex_unlock(&iwl_mei_mutex);
+}
+
+static void iwl_mei_handle_csme_filters(struct mei_cl_device *cldev,
+ const struct iwl_sap_csme_filters *filters)
+{
+ struct iwl_mei *mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+ struct iwl_mei_filters *new_filters;
+ struct iwl_mei_filters *old_filters;
+
+ old_filters =
+ rcu_dereference_protected(mei->filters,
+ lockdep_is_held(&iwl_mei_mutex));
+
+ new_filters = kzalloc(sizeof(*new_filters), GFP_KERNEL);
+ if (!new_filters)
+ return;
+
+ /* Copy the OOB filters */
+ new_filters->filters = filters->filters;
+
+ rcu_assign_pointer(mei->filters, new_filters);
+
+ if (old_filters)
+ kfree_rcu(old_filters, rcu_head);
+}
+
+static void
+iwl_mei_handle_conn_status(struct mei_cl_device *cldev,
+ const struct iwl_sap_notif_conn_status *status)
+{
+ struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
+ struct iwl_mei_conn_info conn_info = {
+ .lp_state = le32_to_cpu(status->link_prot_state),
+ .ssid_len = le32_to_cpu(status->conn_info.ssid_len),
+ .channel = status->conn_info.channel,
+ .band = status->conn_info.band,
+ .auth_mode = le32_to_cpu(status->conn_info.auth_mode),
+ .pairwise_cipher = le32_to_cpu(status->conn_info.pairwise_cipher),
+ };
+
+ if (!iwl_mei_cache.ops ||
+ conn_info.ssid_len > ARRAY_SIZE(conn_info.ssid))
+ return;
+
+ memcpy(conn_info.ssid, status->conn_info.ssid, conn_info.ssid_len);
+ ether_addr_copy(conn_info.bssid, status->conn_info.bssid);
+
+ iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info);
+
+ /*
+ * Update the Rfkill state in case the host does not own the device:
+ * if we are in Link Protection, ask to not touch the device, else,
+ * unblock rfkill.
+ * If the host owns the device, inform the user space whether it can
+ * roam.
+ */
+ if (mei->got_ownership)
+ iwl_mei_cache.ops->roaming_forbidden(iwl_mei_cache.priv,
+ status->link_prot_state);
+ else
+ iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv,
+ status->link_prot_state);
+}
+
+static void iwl_mei_set_init_conf(struct iwl_mei *mei)
+{
+ struct iwl_sap_notif_host_link_up link_msg = {
+ .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP),
+ .hdr.len = cpu_to_le16(sizeof(link_msg) - sizeof(link_msg.hdr)),
+ };
+ struct iwl_sap_notif_country_code mcc_msg = {
+ .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE),
+ .hdr.len = cpu_to_le16(sizeof(mcc_msg) - sizeof(mcc_msg.hdr)),
+ .mcc = cpu_to_le16(iwl_mei_cache.mcc),
+ };
+ struct iwl_sap_notif_sar_limits sar_msg = {
+ .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS),
+ .hdr.len = cpu_to_le16(sizeof(sar_msg) - sizeof(sar_msg.hdr)),
+ };
+ struct iwl_sap_notif_host_nic_info nic_info_msg = {
+ .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO),
+ .hdr.len = cpu_to_le16(sizeof(nic_info_msg) - sizeof(nic_info_msg.hdr)),
+ };
+ struct iwl_sap_msg_dw rfkill_msg = {
+ .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE),
+ .hdr.len = cpu_to_le16(sizeof(rfkill_msg) - sizeof(rfkill_msg.hdr)),
+ .val = cpu_to_le32(iwl_mei_cache.rf_kill),
+ };
+
+ iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WHO_OWNS_NIC);
+
+ if (iwl_mei_cache.conn_info) {
+ link_msg.conn_info = *iwl_mei_cache.conn_info;
+ iwl_mei_send_sap_msg_payload(mei->cldev, &link_msg.hdr);
+ }
+
+ iwl_mei_send_sap_msg_payload(mei->cldev, &mcc_msg.hdr);
+
+ if (iwl_mei_cache.power_limit) {
+ memcpy(sar_msg.sar_chain_info_table, iwl_mei_cache.power_limit,
+ sizeof(sar_msg.sar_chain_info_table));
+ iwl_mei_send_sap_msg_payload(mei->cldev, &sar_msg.hdr);
+ }
+
+ ether_addr_copy(nic_info_msg.mac_address, iwl_mei_cache.mac_address);
+ ether_addr_copy(nic_info_msg.nvm_address, iwl_mei_cache.nvm_address);
+ iwl_mei_send_sap_msg_payload(mei->cldev, &nic_info_msg.hdr);
+
+ iwl_mei_send_sap_msg_payload(mei->cldev, &rfkill_msg.hdr);
+}
+
+static void iwl_mei_handle_amt_state(struct mei_cl_device *cldev,
+ const struct iwl_sap_msg_dw *dw)
+{
+ struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
+ struct net_device *netdev;
+
+ /*
+ * First take rtnl and only then the mutex to avoid an ABBA
+ * with iwl_mei_set_netdev()
+ */
+ rtnl_lock();
+ mutex_lock(&iwl_mei_mutex);
+
+ netdev = rcu_dereference_protected(iwl_mei_cache.netdev,
+ lockdep_is_held(&iwl_mei_mutex));
+
+ if (mei->amt_enabled == !!le32_to_cpu(dw->val))
+ goto out;
+
+ mei->amt_enabled = dw->val;
+
+ if (mei->amt_enabled) {
+ if (netdev)
+ netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei);
+
+ iwl_mei_set_init_conf(mei);
+ } else {
+ if (iwl_mei_cache.ops)
+ iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
+ if (netdev)
+ netdev_rx_handler_unregister(netdev);
+ }
+
+out:
+ mutex_unlock(&iwl_mei_mutex);
+ rtnl_unlock();
+}
+
+static void iwl_mei_handle_nic_owner(struct mei_cl_device *cldev,
+ const struct iwl_sap_msg_dw *dw)
+{
+ struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
+
+ mei->got_ownership = dw->val != cpu_to_le32(SAP_NIC_OWNER_ME);
+}
+
+static void iwl_mei_handle_can_release_ownership(struct mei_cl_device *cldev,
+ const void *payload)
+{
+ /* We can get ownership and driver is registered, go ahead */
+ if (iwl_mei_cache.ops)
+ iwl_mei_send_sap_msg(cldev,
+ SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP);
+}
+
+static void iwl_mei_handle_csme_taking_ownership(struct mei_cl_device *cldev,
+ const void *payload)
+{
+ struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
+
+ dev_info(&cldev->dev, "CSME takes ownership\n");
+
+ mei->got_ownership = false;
+
+ /*
+ * Remember to send CSME_OWNERSHIP_CONFIRMED when the wifi driver
+ * is finished taking the device down.
+ */
+ mei->csme_taking_ownership = true;
+
+ if (iwl_mei_cache.ops)
+ iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true);
+}
+
+static void iwl_mei_handle_nvm(struct mei_cl_device *cldev,
+ const struct iwl_sap_nvm *sap_nvm)
+{
+ struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
+ const struct iwl_mei_nvm *mei_nvm = (const void *)sap_nvm;
+ int i;
+
+ kfree(mei->nvm);
+ mei->nvm = kzalloc(sizeof(*mei_nvm), GFP_KERNEL);
+ if (!mei->nvm)
+ return;
+
+ ether_addr_copy(mei->nvm->hw_addr, sap_nvm->hw_addr);
+ mei->nvm->n_hw_addrs = sap_nvm->n_hw_addrs;
+ mei->nvm->radio_cfg = le32_to_cpu(sap_nvm->radio_cfg);
+ mei->nvm->caps = le32_to_cpu(sap_nvm->caps);
+ mei->nvm->nvm_version = le32_to_cpu(sap_nvm->nvm_version);
+
+ for (i = 0; i < ARRAY_SIZE(mei->nvm->channels); i++)
+ mei->nvm->channels[i] = le32_to_cpu(sap_nvm->channels[i]);
+
+ wake_up_all(&mei->get_nvm_wq);
+}
+
+static void iwl_mei_handle_rx_host_own_req(struct mei_cl_device *cldev,
+ const struct iwl_sap_msg_dw *dw)
+{
+ struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
+
+ /*
+ * This means that we can't use the wifi device right now, CSME is not
+ * ready to let us use it.
+ */
+ if (!dw->val) {
+ dev_info(&cldev->dev, "Ownership req denied\n");
+ return;
+ }
+
+ mei->got_ownership = true;
+ wake_up_all(&mei->get_ownership_wq);
+
+ iwl_mei_send_sap_msg(cldev,
+ SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED);
+
+ /* We can now start the connection, unblock rfkill */
+ if (iwl_mei_cache.ops)
+ iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
+}
+
+static void iwl_mei_handle_ping(struct mei_cl_device *cldev,
+ const struct iwl_sap_hdr *hdr)
+{
+ iwl_mei_send_sap_msg(cldev, SAP_MSG_NOTIF_PONG);
+}
+
+static void iwl_mei_handle_sap_msg(struct mei_cl_device *cldev,
+ const struct iwl_sap_hdr *hdr)
+{
+ u16 len = le16_to_cpu(hdr->len) + sizeof(*hdr);
+ u16 type = le16_to_cpu(hdr->type);
+
+ dev_dbg(&cldev->dev,
+ "Got a new SAP message: type %d, len %d, seq %d\n",
+ le16_to_cpu(hdr->type), len,
+ le32_to_cpu(hdr->seq_num));
+
+#define SAP_MSG_HANDLER(_cmd, _handler, _sz) \
+ case SAP_MSG_NOTIF_ ## _cmd: \
+ if (len < _sz) { \
+ dev_err(&cldev->dev, \
+ "Bad size for %d: %u < %u\n", \
+ le16_to_cpu(hdr->type), \
+ (unsigned int)len, \
+ (unsigned int)_sz); \
+ break; \
+ } \
+ mutex_lock(&iwl_mei_mutex); \
+ _handler(cldev, (const void *)hdr); \
+ mutex_unlock(&iwl_mei_mutex); \
+ break
+
+#define SAP_MSG_HANDLER_NO_LOCK(_cmd, _handler, _sz) \
+ case SAP_MSG_NOTIF_ ## _cmd: \
+ if (len < _sz) { \
+ dev_err(&cldev->dev, \
+ "Bad size for %d: %u < %u\n", \
+ le16_to_cpu(hdr->type), \
+ (unsigned int)len, \
+ (unsigned int)_sz); \
+ break; \
+ } \
+ _handler(cldev, (const void *)hdr); \
+ break
+
+#define SAP_MSG_HANDLER_NO_HANDLER(_cmd, _sz) \
+ case SAP_MSG_NOTIF_ ## _cmd: \
+ if (len < _sz) { \
+ dev_err(&cldev->dev, \
+ "Bad size for %d: %u < %u\n", \
+ le16_to_cpu(hdr->type), \
+ (unsigned int)len, \
+ (unsigned int)_sz); \
+ break; \
+ } \
+ break
+
+ switch (type) {
+ SAP_MSG_HANDLER(PING, iwl_mei_handle_ping, 0);
+ SAP_MSG_HANDLER(CSME_FILTERS,
+ iwl_mei_handle_csme_filters,
+ sizeof(struct iwl_sap_csme_filters));
+ SAP_MSG_HANDLER(CSME_CONN_STATUS,
+ iwl_mei_handle_conn_status,
+ sizeof(struct iwl_sap_notif_conn_status));
+ SAP_MSG_HANDLER_NO_LOCK(AMT_STATE,
+ iwl_mei_handle_amt_state,
+ sizeof(struct iwl_sap_msg_dw));
+ SAP_MSG_HANDLER_NO_HANDLER(PONG, 0);
+ SAP_MSG_HANDLER(NVM, iwl_mei_handle_nvm,
+ sizeof(struct iwl_sap_nvm));
+ SAP_MSG_HANDLER(CSME_REPLY_TO_HOST_OWNERSHIP_REQ,
+ iwl_mei_handle_rx_host_own_req,
+ sizeof(struct iwl_sap_msg_dw));
+ SAP_MSG_HANDLER(NIC_OWNER, iwl_mei_handle_nic_owner,
+ sizeof(struct iwl_sap_msg_dw));
+ SAP_MSG_HANDLER(CSME_CAN_RELEASE_OWNERSHIP,
+ iwl_mei_handle_can_release_ownership, 0);
+ SAP_MSG_HANDLER(CSME_TAKING_OWNERSHIP,
+ iwl_mei_handle_csme_taking_ownership, 0);
+ default:
+ /*
+ * This is not really an error, there are message that we decided
+ * to ignore, yet, it is useful to be able to leave a note if debug
+ * is enabled.
+ */
+ dev_dbg(&cldev->dev, "Unsupported message: type %d, len %d\n",
+ le16_to_cpu(hdr->type), len);
+ }
+
+#undef SAP_MSG_HANDLER
+#undef SAP_MSG_HANDLER_NO_LOCK
+}
+
+static void iwl_mei_read_from_q(const u8 *q_head, u32 q_sz,
+ u32 *_rd, u32 wr,
+ void *_buf, u32 len)
+{
+ u8 *buf = _buf;
+ u32 rd = *_rd;
+
+ if (rd + len <= q_sz) {
+ memcpy(buf, q_head + rd, len);
+ rd += len;
+ } else {
+ memcpy(buf, q_head + rd, q_sz - rd);
+ memcpy(buf + q_sz - rd, q_head, len - (q_sz - rd));
+ rd = len - (q_sz - rd);
+ }
+
+ *_rd = rd;
+}
+
+#define QOS_HDR_IV_SNAP_LEN (sizeof(struct ieee80211_qos_hdr) + \
+ IEEE80211_TKIP_IV_LEN + \
+ sizeof(rfc1042_header) + ETH_TLEN)
+
+static void iwl_mei_handle_sap_data(struct mei_cl_device *cldev,
+ const u8 *q_head, u32 q_sz,
+ u32 rd, u32 wr, ssize_t valid_rx_sz,
+ struct sk_buff_head *tx_skbs)
+{
+ struct iwl_sap_hdr hdr;
+ struct net_device *netdev =
+ rcu_dereference_protected(iwl_mei_cache.netdev,
+ lockdep_is_held(&iwl_mei_mutex));
+
+ if (!netdev)
+ return;
+
+ while (valid_rx_sz >= sizeof(hdr)) {
+ struct ethhdr *ethhdr;
+ unsigned char *data;
+ struct sk_buff *skb;
+ u16 len;
+
+ iwl_mei_read_from_q(q_head, q_sz, &rd, wr, &hdr, sizeof(hdr));
+ valid_rx_sz -= sizeof(hdr);
+ len = le16_to_cpu(hdr.len);
+
+ if (valid_rx_sz < len) {
+ dev_err(&cldev->dev,
+ "Data queue is corrupted: valid data len %zd, len %d\n",
+ valid_rx_sz, len);
+ break;
+ }
+
+ if (len < sizeof(*ethhdr)) {
+ dev_err(&cldev->dev,
+ "Data len is smaller than an ethernet header? len = %d\n",
+ len);
+ }
+
+ valid_rx_sz -= len;
+
+ if (le16_to_cpu(hdr.type) != SAP_MSG_DATA_PACKET) {
+ dev_err(&cldev->dev, "Unsupported Rx data: type %d, len %d\n",
+ le16_to_cpu(hdr.type), len);
+ continue;
+ }
+
+ /* We need enough room for the WiFi header + SNAP + IV */
+ skb = netdev_alloc_skb(netdev, len + QOS_HDR_IV_SNAP_LEN);
+
+ skb_reserve(skb, QOS_HDR_IV_SNAP_LEN);
+ ethhdr = skb_push(skb, sizeof(*ethhdr));
+
+ iwl_mei_read_from_q(q_head, q_sz, &rd, wr,
+ ethhdr, sizeof(*ethhdr));
+ len -= sizeof(*ethhdr);
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb->protocol = ethhdr->h_proto;
+
+ data = skb_put(skb, len);
+ iwl_mei_read_from_q(q_head, q_sz, &rd, wr, data, len);
+
+ /*
+ * Enqueue the skb here so that it can be sent later when we
+ * do not hold the mutex. TX'ing a packet with a mutex held is
+ * possible, but it wouldn't be nice to forbid the TX path to
+ * call any of iwlmei's functions, since every API from iwlmei
+ * needs the mutex.
+ */
+ __skb_queue_tail(tx_skbs, skb);
+ }
+}
+
+static void iwl_mei_handle_sap_rx_cmd(struct mei_cl_device *cldev,
+ const u8 *q_head, u32 q_sz,
+ u32 rd, u32 wr, ssize_t valid_rx_sz)
+{
+ struct page *p = alloc_page(GFP_KERNEL);
+ struct iwl_sap_hdr *hdr;
+
+ if (!p)
+ return;
+
+ hdr = page_address(p);
+
+ while (valid_rx_sz >= sizeof(*hdr)) {
+ u16 len;
+
+ iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr, sizeof(*hdr));
+ valid_rx_sz -= sizeof(*hdr);
+ len = le16_to_cpu(hdr->len);
+
+ if (valid_rx_sz < len)
+ break;
+
+ iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr + 1, len);
+
+ trace_iwlmei_sap_cmd(hdr, false);
+ iwl_mei_handle_sap_msg(cldev, hdr);
+ valid_rx_sz -= len;
+ }
+
+ /* valid_rx_sz must be 0 now... */
+ if (valid_rx_sz)
+ dev_err(&cldev->dev,
+ "More data in the buffer although we read it all\n");
+
+ __free_page(p);
+}
+
+static void iwl_mei_handle_sap_rx(struct mei_cl_device *cldev,
+ struct iwl_sap_q_ctrl_blk *notif_q,
+ const u8 *q_head,
+ struct sk_buff_head *skbs,
+ u32 q_sz)
+{
+ u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
+ u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
+ ssize_t valid_rx_sz;
+
+ if (rd > q_sz || wr > q_sz) {
+ dev_err(&cldev->dev,
+ "Pointers are past the buffer limit\n");
+ return;
+ }
+
+ if (rd == wr)
+ return;
+
+ valid_rx_sz = wr > rd ? wr - rd : q_sz - rd + wr;
+
+ if (skbs)
+ iwl_mei_handle_sap_data(cldev, q_head, q_sz, rd, wr,
+ valid_rx_sz, skbs);
+ else
+ iwl_mei_handle_sap_rx_cmd(cldev, q_head, q_sz, rd, wr,
+ valid_rx_sz);
+
+ /* Increment the read pointer to point to the write pointer */
+ WRITE_ONCE(notif_q->rd_ptr, cpu_to_le32(wr));
+}
+
+static void iwl_mei_handle_check_shared_area(struct mei_cl_device *cldev)
+{
+ struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
+ struct iwl_sap_q_ctrl_blk *notif_q;
+ struct sk_buff_head tx_skbs;
+ struct iwl_sap_dir *dir;
+ void *q_head;
+ u32 q_sz;
+
+ if (!mei->shared_mem.ctrl)
+ return;
+
+ dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
+ notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
+ q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF];
+ q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF];
+
+ /*
+ * Do not hold the mutex here, but rather each and every message
+ * handler takes it.
+ * This allows message handlers to take it at a certain time.
+ */
+ iwl_mei_handle_sap_rx(cldev, notif_q, q_head, NULL, q_sz);
+
+ mutex_lock(&iwl_mei_mutex);
+ dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
+ notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
+ q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA];
+ q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA];
+
+ __skb_queue_head_init(&tx_skbs);
+
+ iwl_mei_handle_sap_rx(cldev, notif_q, q_head, &tx_skbs, q_sz);
+
+ if (skb_queue_empty(&tx_skbs)) {
+ mutex_unlock(&iwl_mei_mutex);
+ return;
+ }
+
+ /*
+ * Take the RCU read lock before we unlock the mutex to make sure that
+ * even if the netdev is replaced by another non-NULL netdev right after
+ * we unlock the mutex, the old netdev will still be valid when we
+ * transmit the frames. We can't allow to replace the netdev here because
+ * the skbs hold a pointer to the netdev.
+ */
+ rcu_read_lock();
+
+ mutex_unlock(&iwl_mei_mutex);
+
+ if (!rcu_access_pointer(iwl_mei_cache.netdev)) {
+ dev_err(&cldev->dev, "Can't Tx without a netdev\n");
+ skb_queue_purge(&tx_skbs);
+ goto out;
+ }
+
+ while (!skb_queue_empty(&tx_skbs)) {
+ struct sk_buff *skb = __skb_dequeue(&tx_skbs);
+
+ trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_TO_AIR);
+ dev_queue_xmit(skb);
+ }
+
+out:
+ rcu_read_unlock();
+}
+
+static void iwl_mei_rx(struct mei_cl_device *cldev)
+{
+ struct iwl_sap_me_msg_hdr *hdr;
+ u8 msg[100];
+ ssize_t ret;
+
+ ret = mei_cldev_recv(cldev, (u8 *)&msg, sizeof(msg));
+ if (ret < 0) {
+ dev_err(&cldev->dev, "failed to receive data: %zd\n", ret);
+ return;
+ }
+
+ if (ret == 0) {
+ dev_err(&cldev->dev, "got an empty response\n");
+ return;
+ }
+
+ hdr = (void *)msg;
+ trace_iwlmei_me_msg(hdr, false);
+
+ switch (le32_to_cpu(hdr->type)) {
+ case SAP_ME_MSG_START_OK:
+ BUILD_BUG_ON(sizeof(struct iwl_sap_me_msg_start_ok) >
+ sizeof(msg));
+
+ iwl_mei_handle_rx_start_ok(cldev, (void *)msg, ret);
+ break;
+ case SAP_ME_MSG_CHECK_SHARED_AREA:
+ iwl_mei_handle_check_shared_area(cldev);
+ break;
+ default:
+ dev_err(&cldev->dev, "got a RX notification: %d\n",
+ le32_to_cpu(hdr->type));
+ break;
+ }
+}
+
+static int iwl_mei_send_start(struct mei_cl_device *cldev)
+{
+ struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
+ struct iwl_sap_me_msg_start msg = {
+ .hdr.type = cpu_to_le32(SAP_ME_MSG_START),
+ .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)),
+ .hdr.len = cpu_to_le32(sizeof(msg)),
+ .supported_versions[0] = SAP_VERSION,
+ .init_data_seq_num = cpu_to_le16(0x100),
+ .init_notif_seq_num = cpu_to_le16(0x800),
+ };
+ int ret;
+
+ trace_iwlmei_me_msg(&msg.hdr, true);
+ ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg));
+ if (ret != sizeof(msg)) {
+ dev_err(&cldev->dev,
+ "failed to send the SAP_ME_MSG_START message %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int iwl_mei_enable(struct mei_cl_device *cldev)
+{
+ int ret;
+
+ ret = mei_cldev_enable(cldev);
+ if (ret < 0) {
+ dev_err(&cldev->dev, "failed to enable the device: %d\n", ret);
+ return ret;
+ }
+
+ ret = mei_cldev_register_rx_cb(cldev, iwl_mei_rx);
+ if (ret) {
+ dev_err(&cldev->dev,
+ "failed to register to the rx cb: %d\n", ret);
+ mei_cldev_disable(cldev);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct iwl_mei_nvm *iwl_mei_get_nvm(void)
+{
+ struct iwl_mei_nvm *nvm = NULL;
+ struct iwl_mei *mei;
+ int ret;
+
+ mutex_lock(&iwl_mei_mutex);
+
+ if (!iwl_mei_is_connected())
+ goto out;
+
+ mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+
+ if (!mei)
+ goto out;
+
+ ret = iwl_mei_send_sap_msg(iwl_mei_global_cldev,
+ SAP_MSG_NOTIF_GET_NVM);
+ if (ret)
+ goto out;
+
+ mutex_unlock(&iwl_mei_mutex);
+
+ ret = wait_event_timeout(mei->get_nvm_wq, mei->nvm, 2 * HZ);
+ if (!ret)
+ return NULL;
+
+ mutex_lock(&iwl_mei_mutex);
+
+ if (!iwl_mei_is_connected())
+ goto out;
+
+ mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+
+ if (!mei)
+ goto out;
+
+ if (mei->nvm)
+ nvm = kmemdup(mei->nvm, sizeof(*mei->nvm), GFP_KERNEL);
+
+out:
+ mutex_unlock(&iwl_mei_mutex);
+ return nvm;
+}
+EXPORT_SYMBOL_GPL(iwl_mei_get_nvm);
+
+int iwl_mei_get_ownership(void)
+{
+ struct iwl_mei *mei;
+ int ret;
+
+ mutex_lock(&iwl_mei_mutex);
+
+ /* In case we didn't have a bind */
+ if (!iwl_mei_is_connected()) {
+ ret = 0;
+ goto out;
+ }
+
+ mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+
+ if (!mei) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (!mei->amt_enabled) {
+ ret = 0;
+ goto out;
+ }
+
+ if (mei->got_ownership) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = iwl_mei_send_sap_msg(mei->cldev,
+ SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP);
+ if (ret)
+ goto out;
+
+ mutex_unlock(&iwl_mei_mutex);
+
+ ret = wait_event_timeout(mei->get_ownership_wq,
+ mei->got_ownership, HZ / 2);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ mutex_lock(&iwl_mei_mutex);
+
+ /* In case we didn't have a bind */
+ if (!iwl_mei_is_connected()) {
+ ret = 0;
+ goto out;
+ }
+
+ mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+
+ if (!mei) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = !mei->got_ownership;
+
+out:
+ mutex_unlock(&iwl_mei_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iwl_mei_get_ownership);
+
+void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info,
+ const struct iwl_mei_colloc_info *colloc_info)
+{
+ struct iwl_sap_notif_host_link_up msg = {
+ .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP),
+ .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
+ .conn_info = {
+ .ssid_len = cpu_to_le32(conn_info->ssid_len),
+ .channel = conn_info->channel,
+ .band = conn_info->band,
+ .pairwise_cipher = cpu_to_le32(conn_info->pairwise_cipher),
+ .auth_mode = cpu_to_le32(conn_info->auth_mode),
+ },
+ };
+ struct iwl_mei *mei;
+
+ if (conn_info->ssid_len > ARRAY_SIZE(msg.conn_info.ssid))
+ return;
+
+ memcpy(msg.conn_info.ssid, conn_info->ssid, conn_info->ssid_len);
+ memcpy(msg.conn_info.bssid, conn_info->bssid, ETH_ALEN);
+
+ if (colloc_info) {
+ msg.colloc_channel = colloc_info->channel;
+ msg.colloc_band = colloc_info->channel <= 14 ? 0 : 1;
+ memcpy(msg.colloc_bssid, colloc_info->bssid, ETH_ALEN);
+ }
+
+ mutex_lock(&iwl_mei_mutex);
+
+ if (!iwl_mei_is_connected())
+ goto out;
+
+ mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+
+ if (!mei)
+ goto out;
+
+ if (!mei->amt_enabled)
+ goto out;
+
+ iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
+
+out:
+ kfree(iwl_mei_cache.conn_info);
+ iwl_mei_cache.conn_info =
+ kmemdup(&msg.conn_info, sizeof(msg.conn_info), GFP_KERNEL);
+ mutex_unlock(&iwl_mei_mutex);
+}
+EXPORT_SYMBOL_GPL(iwl_mei_host_associated);
+
+void iwl_mei_host_disassociated(void)
+{
+ struct iwl_mei *mei;
+ struct iwl_sap_notif_host_link_down msg = {
+ .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_DOWN),
+ .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
+ .type = HOST_LINK_DOWN_TYPE_LONG,
+ };
+
+ mutex_lock(&iwl_mei_mutex);
+
+ if (!iwl_mei_is_connected())
+ goto out;
+
+ mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+
+ if (!mei)
+ goto out;
+
+ iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
+
+out:
+ kfree(iwl_mei_cache.conn_info);
+ iwl_mei_cache.conn_info = NULL;
+ mutex_unlock(&iwl_mei_mutex);
+}
+EXPORT_SYMBOL_GPL(iwl_mei_host_disassociated);
+
+void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill)
+{
+ struct iwl_mei *mei;
+ u32 rfkill_state = 0;
+ struct iwl_sap_msg_dw msg = {
+ .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE),
+ .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
+ };
+
+ if (!sw_rfkill)
+ rfkill_state |= SAP_SW_RFKILL_DEASSERTED;
+
+ if (!hw_rfkill)
+ rfkill_state |= SAP_HW_RFKILL_DEASSERTED;
+
+ mutex_lock(&iwl_mei_mutex);
+
+ if (!iwl_mei_is_connected())
+ goto out;
+
+ msg.val = cpu_to_le32(rfkill_state);
+
+ mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+
+ if (!mei)
+ goto out;
+
+ iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
+
+out:
+ iwl_mei_cache.rf_kill = rfkill_state;
+ mutex_unlock(&iwl_mei_mutex);
+}
+EXPORT_SYMBOL_GPL(iwl_mei_set_rfkill_state);
+
+void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address)
+{
+ struct iwl_mei *mei;
+ struct iwl_sap_notif_host_nic_info msg = {
+ .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO),
+ .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
+ };
+
+ mutex_lock(&iwl_mei_mutex);
+
+ if (!iwl_mei_is_connected())
+ goto out;
+
+ ether_addr_copy(msg.mac_address, mac_address);
+ ether_addr_copy(msg.nvm_address, nvm_address);
+
+ mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+
+ if (!mei)
+ goto out;
+
+ iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
+
+out:
+ ether_addr_copy(iwl_mei_cache.mac_address, mac_address);
+ ether_addr_copy(iwl_mei_cache.nvm_address, nvm_address);
+ mutex_unlock(&iwl_mei_mutex);
+}
+EXPORT_SYMBOL_GPL(iwl_mei_set_nic_info);
+
+void iwl_mei_set_country_code(u16 mcc)
+{
+ struct iwl_mei *mei;
+ struct iwl_sap_notif_country_code msg = {
+ .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE),
+ .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
+ .mcc = cpu_to_le16(mcc),
+ };
+
+ mutex_lock(&iwl_mei_mutex);
+
+ if (!iwl_mei_is_connected())
+ goto out;
+
+ mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+
+ if (!mei)
+ goto out;
+
+ iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
+
+out:
+ iwl_mei_cache.mcc = mcc;
+ mutex_unlock(&iwl_mei_mutex);
+}
+EXPORT_SYMBOL_GPL(iwl_mei_set_country_code);
+
+void iwl_mei_set_power_limit(const __le16 *power_limit)
+{
+ struct iwl_mei *mei;
+ struct iwl_sap_notif_sar_limits msg = {
+ .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS),
+ .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
+ };
+
+ mutex_lock(&iwl_mei_mutex);
+
+ if (!iwl_mei_is_connected())
+ goto out;
+
+ mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+
+ if (!mei)
+ goto out;
+
+ memcpy(msg.sar_chain_info_table, power_limit, sizeof(msg.sar_chain_info_table));
+
+ iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
+
+out:
+ kfree(iwl_mei_cache.power_limit);
+ iwl_mei_cache.power_limit = kmemdup(power_limit,
+ sizeof(msg.sar_chain_info_table), GFP_KERNEL);
+ mutex_unlock(&iwl_mei_mutex);
+}
+EXPORT_SYMBOL_GPL(iwl_mei_set_power_limit);
+
+void iwl_mei_set_netdev(struct net_device *netdev)
+{
+ struct iwl_mei *mei;
+
+ mutex_lock(&iwl_mei_mutex);
+
+ if (!iwl_mei_is_connected()) {
+ rcu_assign_pointer(iwl_mei_cache.netdev, netdev);
+ goto out;
+ }
+
+ mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+
+ if (!mei)
+ goto out;
+
+ if (!netdev) {
+ struct net_device *dev =
+ rcu_dereference_protected(iwl_mei_cache.netdev,
+ lockdep_is_held(&iwl_mei_mutex));
+
+ if (!dev)
+ goto out;
+
+ netdev_rx_handler_unregister(dev);
+ }
+
+ rcu_assign_pointer(iwl_mei_cache.netdev, netdev);
+
+ if (netdev && mei->amt_enabled)
+ netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei);
+
+out:
+ mutex_unlock(&iwl_mei_mutex);
+}
+EXPORT_SYMBOL_GPL(iwl_mei_set_netdev);
+
+void iwl_mei_device_down(void)
+{
+ struct iwl_mei *mei;
+
+ mutex_lock(&iwl_mei_mutex);
+
+ if (!iwl_mei_is_connected())
+ goto out;
+
+ mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+
+ if (!mei)
+ goto out;
+
+ if (!mei->csme_taking_ownership)
+ goto out;
+
+ iwl_mei_send_sap_msg(mei->cldev,
+ SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED);
+ mei->csme_taking_ownership = false;
+out:
+ mutex_unlock(&iwl_mei_mutex);
+}
+EXPORT_SYMBOL_GPL(iwl_mei_device_down);
+
+int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
+{
+ int ret;
+
+ /*
+ * We must have a non-NULL priv pointer to not crash when there are
+ * multiple WiFi devices.
+ */
+ if (!priv)
+ return -EINVAL;
+
+ mutex_lock(&iwl_mei_mutex);
+
+ /* do not allow registration if someone else already registered */
+ if (iwl_mei_cache.priv || iwl_mei_cache.ops) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ iwl_mei_cache.priv = priv;
+ iwl_mei_cache.ops = ops;
+
+ if (iwl_mei_global_cldev) {
+ struct iwl_mei *mei =
+ mei_cldev_get_drvdata(iwl_mei_global_cldev);
+
+ /* we have already a SAP connection */
+ if (iwl_mei_is_connected())
+ iwl_mei_send_sap_msg(mei->cldev,
+ SAP_MSG_NOTIF_WIFIDR_UP);
+ }
+ ret = 0;
+
+out:
+ mutex_unlock(&iwl_mei_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iwl_mei_register);
+
+void iwl_mei_start_unregister(void)
+{
+ mutex_lock(&iwl_mei_mutex);
+
+ /* At this point, the wifi driver should have removed the netdev */
+ if (rcu_access_pointer(iwl_mei_cache.netdev))
+ pr_err("Still had a netdev pointer set upon unregister\n");
+
+ kfree(iwl_mei_cache.conn_info);
+ iwl_mei_cache.conn_info = NULL;
+ kfree(iwl_mei_cache.power_limit);
+ iwl_mei_cache.power_limit = NULL;
+ iwl_mei_cache.ops = NULL;
+ /* leave iwl_mei_cache.priv non-NULL to prevent any new registration */
+
+ mutex_unlock(&iwl_mei_mutex);
+}
+EXPORT_SYMBOL_GPL(iwl_mei_start_unregister);
+
+void iwl_mei_unregister_complete(void)
+{
+ mutex_lock(&iwl_mei_mutex);
+
+ iwl_mei_cache.priv = NULL;
+
+ if (iwl_mei_global_cldev) {
+ struct iwl_mei *mei =
+ mei_cldev_get_drvdata(iwl_mei_global_cldev);
+
+ iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WIFIDR_DOWN);
+ mei->got_ownership = false;
+ }
+
+ mutex_unlock(&iwl_mei_mutex);
+}
+EXPORT_SYMBOL_GPL(iwl_mei_unregister_complete);
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+
+static ssize_t
+iwl_mei_dbgfs_send_start_message_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+
+ mutex_lock(&iwl_mei_mutex);
+
+ if (!iwl_mei_global_cldev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = iwl_mei_send_start(iwl_mei_global_cldev);
+
+out:
+ mutex_unlock(&iwl_mei_mutex);
+ return ret ?: count;
+}
+
+static const struct file_operations iwl_mei_dbgfs_send_start_message_ops = {
+ .write = iwl_mei_dbgfs_send_start_message_write,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static ssize_t iwl_mei_dbgfs_req_ownership_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ iwl_mei_get_ownership();
+
+ return count;
+}
+
+static const struct file_operations iwl_mei_dbgfs_req_ownership_ops = {
+ .write = iwl_mei_dbgfs_req_ownership_write,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static void iwl_mei_dbgfs_register(struct iwl_mei *mei)
+{
+ mei->dbgfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+ if (!mei->dbgfs_dir)
+ return;
+
+ debugfs_create_ulong("status", S_IRUSR,
+ mei->dbgfs_dir, &iwl_mei_status);
+ debugfs_create_file("send_start_message", S_IWUSR, mei->dbgfs_dir,
+ mei, &iwl_mei_dbgfs_send_start_message_ops);
+ debugfs_create_file("req_ownership", S_IWUSR, mei->dbgfs_dir,
+ mei, &iwl_mei_dbgfs_req_ownership_ops);
+}
+
+static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei)
+{
+ debugfs_remove_recursive(mei->dbgfs_dir);
+ mei->dbgfs_dir = NULL;
+}
+
+#else
+
+static void iwl_mei_dbgfs_register(struct iwl_mei *mei) {}
+static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {}
+
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * iwl_mei_probe - the probe function called by the mei bus enumeration
+ *
+ * This allocates the data needed by iwlmei and sets a pointer to this data
+ * into the mei_cl_device's drvdata.
+ * It starts the SAP protocol by sending the SAP_ME_MSG_START without
+ * waiting for the answer. The answer will be caught later by the Rx callback.
+ */
+static int iwl_mei_probe(struct mei_cl_device *cldev,
+ const struct mei_cl_device_id *id)
+{
+ struct iwl_mei *mei;
+ int ret;
+
+ mei = devm_kzalloc(&cldev->dev, sizeof(*mei), GFP_KERNEL);
+ if (!mei)
+ return -ENOMEM;
+
+ init_waitqueue_head(&mei->get_nvm_wq);
+ INIT_WORK(&mei->send_csa_msg_wk, iwl_mei_send_csa_msg_wk);
+ INIT_DELAYED_WORK(&mei->csa_throttle_end_wk,
+ iwl_mei_csa_throttle_end_wk);
+ init_waitqueue_head(&mei->get_ownership_wq);
+ spin_lock_init(&mei->data_q_lock);
+
+ mei_cldev_set_drvdata(cldev, mei);
+ mei->cldev = cldev;
+
+ /*
+ * The CSME firmware needs to boot the internal WLAN client. Wait here
+ * so that the DMA map request will succeed.
+ */
+ msleep(20);
+
+ ret = iwl_mei_alloc_shared_mem(cldev);
+ if (ret)
+ goto free;
+
+ iwl_mei_init_shared_mem(mei);
+
+ ret = iwl_mei_enable(cldev);
+ if (ret)
+ goto free_shared_mem;
+
+ iwl_mei_dbgfs_register(mei);
+
+ /*
+ * We now have a Rx function in place, start the SAP procotol
+ * we expect to get the SAP_ME_MSG_START_OK response later on.
+ */
+ mutex_lock(&iwl_mei_mutex);
+ ret = iwl_mei_send_start(cldev);
+ mutex_unlock(&iwl_mei_mutex);
+ if (ret)
+ goto debugfs_unregister;
+
+ /* must be last */
+ iwl_mei_global_cldev = cldev;
+
+ return 0;
+
+debugfs_unregister:
+ iwl_mei_dbgfs_unregister(mei);
+ mei_cldev_disable(cldev);
+free_shared_mem:
+ iwl_mei_free_shared_mem(cldev);
+free:
+ mei_cldev_set_drvdata(cldev, NULL);
+ devm_kfree(&cldev->dev, mei);
+
+ return ret;
+}
+
+#define SEND_SAP_MAX_WAIT_ITERATION 10
+
+static void iwl_mei_remove(struct mei_cl_device *cldev)
+{
+ struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
+ int i;
+
+ /*
+ * We are being removed while the bus is active, it means we are
+ * going to suspend/ shutdown, so the NIC will disappear.
+ */
+ if (mei_cldev_enabled(cldev) && iwl_mei_cache.ops)
+ iwl_mei_cache.ops->nic_stolen(iwl_mei_cache.priv);
+
+ if (rcu_access_pointer(iwl_mei_cache.netdev)) {
+ struct net_device *dev;
+
+ /*
+ * First take rtnl and only then the mutex to avoid an ABBA
+ * with iwl_mei_set_netdev()
+ */
+ rtnl_lock();
+ mutex_lock(&iwl_mei_mutex);
+
+ /*
+ * If we are suspending and the wifi driver hasn't removed it's netdev
+ * yet, do it now. In any case, don't change the cache.netdev pointer.
+ */
+ dev = rcu_dereference_protected(iwl_mei_cache.netdev,
+ lockdep_is_held(&iwl_mei_mutex));
+
+ netdev_rx_handler_unregister(dev);
+ mutex_unlock(&iwl_mei_mutex);
+ rtnl_unlock();
+ }
+
+ mutex_lock(&iwl_mei_mutex);
+
+ /*
+ * Tell CSME that we are going down so that it won't access the
+ * memory anymore, make sure this message goes through immediately.
+ */
+ mei->csa_throttled = false;
+ iwl_mei_send_sap_msg(mei->cldev,
+ SAP_MSG_NOTIF_HOST_GOES_DOWN);
+
+ for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) {
+ if (!iwl_mei_host_to_me_data_pending(mei))
+ break;
+
+ msleep(5);
+ }
+
+ /*
+ * If we couldn't make sure that CSME saw the HOST_GOES_DOWN message,
+ * it means that it will probably keep reading memory that we are going
+ * to unmap and free, expect IOMMU error messages.
+ */
+ if (i == SEND_SAP_MAX_WAIT_ITERATION)
+ dev_err(&mei->cldev->dev,
+ "Couldn't get ACK from CSME on HOST_GOES_DOWN message\n");
+
+ mutex_unlock(&iwl_mei_mutex);
+
+ /*
+ * This looks strange, but this lock is taken here to make sure that
+ * iwl_mei_add_data_to_ring called from the Tx path sees that we
+ * clear the IWL_MEI_STATUS_SAP_CONNECTED bit.
+ * Rx isn't a problem because the rx_handler can't be called after
+ * having been unregistered.
+ */
+ spin_lock_bh(&mei->data_q_lock);
+ clear_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
+ spin_unlock_bh(&mei->data_q_lock);
+
+ if (iwl_mei_cache.ops)
+ iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
+
+ /*
+ * mei_cldev_disable will return only after all the MEI Rx is done.
+ * It must be called when iwl_mei_mutex is *not* held, since it waits
+ * for our Rx handler to complete.
+ * After it returns, no new Rx will start.
+ */
+ mei_cldev_disable(cldev);
+
+ /*
+ * Since the netdev was already removed and the netdev's removal
+ * includes a call to synchronize_net() so that we know there won't be
+ * any new Rx that will trigger the following workers.
+ */
+ cancel_work_sync(&mei->send_csa_msg_wk);
+ cancel_delayed_work_sync(&mei->csa_throttle_end_wk);
+
+ /*
+ * If someone waits for the ownership, let him know that we are going
+ * down and that we are not connected anymore. He'll be able to take
+ * the device.
+ */
+ wake_up_all(&mei->get_ownership_wq);
+
+ mutex_lock(&iwl_mei_mutex);
+
+ iwl_mei_global_cldev = NULL;
+
+ wake_up_all(&mei->get_nvm_wq);
+
+ iwl_mei_free_shared_mem(cldev);
+
+ iwl_mei_dbgfs_unregister(mei);
+
+ mei_cldev_set_drvdata(cldev, NULL);
+
+ kfree(mei->nvm);
+
+ kfree(rcu_access_pointer(mei->filters));
+
+ devm_kfree(&cldev->dev, mei);
+
+ mutex_unlock(&iwl_mei_mutex);
+}
+
+static const struct mei_cl_device_id iwl_mei_tbl[] = {
+ { KBUILD_MODNAME, MEI_WLAN_UUID, MEI_CL_VERSION_ANY},
+
+ /* required last entry */
+ { }
+};
+
+/*
+ * Do not export the device table because this module is loaded by
+ * iwlwifi's dependency.
+ */
+
+static struct mei_cl_driver iwl_mei_cl_driver = {
+ .id_table = iwl_mei_tbl,
+ .name = KBUILD_MODNAME,
+ .probe = iwl_mei_probe,
+ .remove = iwl_mei_remove,
+};
+
+module_mei_cl_driver(iwl_mei_cl_driver);
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/net.c b/drivers/net/wireless/intel/iwlwifi/mei/net.c
new file mode 100644
index 000000000000..5f966af69720
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mei/net.c
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_arp.h>
+#include <uapi/linux/icmp.h>
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ieee80211.h>
+
+#include <net/cfg80211.h>
+#include <net/ip.h>
+
+#include <linux/if_arp.h>
+#include <linux/icmp.h>
+#include <linux/udp.h>
+#include <linux/ip.h>
+#include <linux/mm.h>
+
+#include "internal.h"
+#include "sap.h"
+#include "iwl-mei.h"
+
+/*
+ * Returns true if further filtering should be stopped. Only in that case
+ * pass_to_csme and rx_handler_res are set. Otherwise, next level of filters
+ * should be checked.
+ */
+static bool iwl_mei_rx_filter_eth(const struct ethhdr *ethhdr,
+ const struct iwl_sap_oob_filters *filters,
+ bool *pass_to_csme,
+ rx_handler_result_t *rx_handler_res)
+{
+ const struct iwl_sap_eth_filter *filt;
+
+ /* This filter is not relevant for UCAST packet */
+ if (!is_multicast_ether_addr(ethhdr->h_dest) ||
+ is_broadcast_ether_addr(ethhdr->h_dest))
+ return false;
+
+ for (filt = &filters->eth_filters[0];
+ filt < &filters->eth_filters[0] + ARRAY_SIZE(filters->eth_filters);
+ filt++) {
+ /* Assume there are no enabled filter after a disabled one */
+ if (!(filt->flags & SAP_ETH_FILTER_ENABLED))
+ break;
+
+ if (compare_ether_header(filt->mac_address, ethhdr->h_dest))
+ continue;
+
+ /* Packet needs to reach the host's stack */
+ if (filt->flags & SAP_ETH_FILTER_COPY)
+ *rx_handler_res = RX_HANDLER_PASS;
+ else
+ *rx_handler_res = RX_HANDLER_CONSUMED;
+
+ /* We have an authoritative answer, stop filtering */
+ if (filt->flags & SAP_ETH_FILTER_STOP) {
+ *pass_to_csme = true;
+ return true;
+ }
+
+ return false;
+ }
+
+ /* MCAST frames that don't match layer 2 filters are not sent to ME */
+ *pass_to_csme = false;
+
+ return true;
+}
+
+/*
+ * Returns true iff the frame should be passed to CSME in which case
+ * rx_handler_res is set.
+ */
+static bool iwl_mei_rx_filter_arp(struct sk_buff *skb,
+ const struct iwl_sap_oob_filters *filters,
+ rx_handler_result_t *rx_handler_res)
+{
+ const struct iwl_sap_ipv4_filter *filt = &filters->ipv4_filter;
+ const struct arphdr *arp;
+ const __be32 *target_ip;
+ u32 flags = le32_to_cpu(filt->flags);
+
+ if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
+ return false;
+
+ arp = arp_hdr(skb);
+
+ /* Handle only IPv4 over ethernet ARP frames */
+ if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
+ arp->ar_pro != htons(ETH_P_IP))
+ return false;
+
+ /*
+ * After the ARP header, we have:
+ * src MAC address - 6 bytes
+ * src IP address - 4 bytes
+ * target MAC addess - 6 bytes
+ */
+ target_ip = (void *)((u8 *)(arp + 1) +
+ ETH_ALEN + sizeof(__be32) + ETH_ALEN);
+
+ /*
+ * ARP request is forwarded to ME only if IP address match in the
+ * ARP request's target ip field.
+ */
+ if (arp->ar_op == htons(ARPOP_REQUEST) &&
+ (filt->flags & cpu_to_le32(SAP_IPV4_FILTER_ARP_REQ_PASS)) &&
+ (filt->ipv4_addr == 0 || filt->ipv4_addr == *target_ip)) {
+ if (flags & SAP_IPV4_FILTER_ARP_REQ_COPY)
+ *rx_handler_res = RX_HANDLER_PASS;
+ else
+ *rx_handler_res = RX_HANDLER_CONSUMED;
+
+ return true;
+ }
+
+ /* ARP reply is always forwarded to ME regardless of the IP */
+ if (flags & SAP_IPV4_FILTER_ARP_RESP_PASS &&
+ arp->ar_op == htons(ARPOP_REPLY)) {
+ if (flags & SAP_IPV4_FILTER_ARP_RESP_COPY)
+ *rx_handler_res = RX_HANDLER_PASS;
+ else
+ *rx_handler_res = RX_HANDLER_CONSUMED;
+
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+iwl_mei_rx_filter_tcp_udp(struct sk_buff *skb, bool ip_match,
+ const struct iwl_sap_oob_filters *filters,
+ rx_handler_result_t *rx_handler_res)
+{
+ const struct iwl_sap_flex_filter *filt;
+
+ for (filt = &filters->flex_filters[0];
+ filt < &filters->flex_filters[0] + ARRAY_SIZE(filters->flex_filters);
+ filt++) {
+ if (!(filt->flags & SAP_FLEX_FILTER_ENABLED))
+ break;
+
+ /*
+ * We are required to have a match on the IP level and we didn't
+ * have such match.
+ */
+ if ((filt->flags &
+ (SAP_FLEX_FILTER_IPV4 | SAP_FLEX_FILTER_IPV6)) &&
+ !ip_match)
+ continue;
+
+ if ((filt->flags & SAP_FLEX_FILTER_UDP) &&
+ ip_hdr(skb)->protocol != IPPROTO_UDP)
+ continue;
+
+ if ((filt->flags & SAP_FLEX_FILTER_TCP) &&
+ ip_hdr(skb)->protocol != IPPROTO_TCP)
+ continue;
+
+ /*
+ * We must have either a TCP header or a UDP header, both
+ * starts with a source port and then a destination port.
+ * Both are big endian words.
+ * Use a UDP header and that will work for TCP as well.
+ */
+ if ((filt->src_port && filt->src_port != udp_hdr(skb)->source) ||
+ (filt->dst_port && filt->dst_port != udp_hdr(skb)->dest))
+ continue;
+
+ if (filt->flags & SAP_FLEX_FILTER_COPY)
+ *rx_handler_res = RX_HANDLER_PASS;
+ else
+ *rx_handler_res = RX_HANDLER_CONSUMED;
+
+ return true;
+ }
+
+ return false;
+}
+
+static bool iwl_mei_rx_filter_ipv4(struct sk_buff *skb,
+ const struct iwl_sap_oob_filters *filters,
+ rx_handler_result_t *rx_handler_res)
+{
+ const struct iwl_sap_ipv4_filter *filt = &filters->ipv4_filter;
+ const struct iphdr *iphdr;
+ unsigned int iphdrlen;
+ bool match;
+
+ if (!pskb_may_pull(skb, skb_network_offset(skb) + sizeof(*iphdr)) ||
+ !pskb_may_pull(skb, skb_network_offset(skb) +
+ sizeof(ip_hdrlen(skb) - sizeof(*iphdr))))
+ return false;
+
+ iphdrlen = ip_hdrlen(skb);
+ iphdr = ip_hdr(skb);
+ match = !filters->ipv4_filter.ipv4_addr ||
+ filters->ipv4_filter.ipv4_addr == iphdr->daddr;
+
+ skb_set_transport_header(skb, skb_network_offset(skb) + iphdrlen);
+
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ /*
+ * UDP header is shorter than TCP header and we look at the first bytes
+ * of the header anyway (see below).
+ * If we have a truncated TCP packet, let CSME handle this.
+ */
+ if (!pskb_may_pull(skb, skb_transport_offset(skb) +
+ sizeof(struct udphdr)))
+ return false;
+
+ return iwl_mei_rx_filter_tcp_udp(skb, match,
+ filters, rx_handler_res);
+
+ case IPPROTO_ICMP: {
+ struct icmphdr *icmp;
+
+ if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(*icmp)))
+ return false;
+
+ icmp = icmp_hdr(skb);
+
+ /*
+ * Don't pass echo requests to ME even if it wants it as we
+ * want the host to answer.
+ */
+ if ((filt->flags & cpu_to_le32(SAP_IPV4_FILTER_ICMP_PASS)) &&
+ match && (icmp->type != ICMP_ECHO || icmp->code != 0)) {
+ if (filt->flags & cpu_to_le32(SAP_IPV4_FILTER_ICMP_COPY))
+ *rx_handler_res = RX_HANDLER_PASS;
+ else
+ *rx_handler_res = RX_HANDLER_CONSUMED;
+
+ return true;
+ }
+ break;
+ }
+ case IPPROTO_ICMPV6:
+ /* TODO: Should we have the same ICMP request logic here too? */
+ if ((filters->icmpv6_flags & cpu_to_le32(SAP_ICMPV6_FILTER_ENABLED) &&
+ match)) {
+ if (filters->icmpv6_flags &
+ cpu_to_le32(SAP_ICMPV6_FILTER_COPY))
+ *rx_handler_res = RX_HANDLER_PASS;
+ else
+ *rx_handler_res = RX_HANDLER_CONSUMED;
+
+ return true;
+ }
+ break;
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+static bool iwl_mei_rx_filter_ipv6(struct sk_buff *skb,
+ const struct iwl_sap_oob_filters *filters,
+ rx_handler_result_t *rx_handler_res)
+{
+ *rx_handler_res = RX_HANDLER_PASS;
+
+ /* TODO */
+
+ return false;
+}
+
+static rx_handler_result_t
+iwl_mei_rx_pass_to_csme(struct sk_buff *skb,
+ const struct iwl_sap_oob_filters *filters,
+ bool *pass_to_csme)
+{
+ const struct ethhdr *ethhdr = (void *)skb_mac_header(skb);
+ rx_handler_result_t rx_handler_res = RX_HANDLER_PASS;
+ bool (*filt_handler)(struct sk_buff *skb,
+ const struct iwl_sap_oob_filters *filters,
+ rx_handler_result_t *rx_handler_res);
+
+ /*
+ * skb->data points the IP header / ARP header and the ETH header
+ * is in the headroom.
+ */
+ skb_reset_network_header(skb);
+
+ /*
+ * MCAST IP packets sent by us are received again here without
+ * an ETH header. Drop them here.
+ */
+ if (!skb_mac_offset(skb))
+ return RX_HANDLER_PASS;
+
+ if (skb_headroom(skb) < sizeof(*ethhdr))
+ return RX_HANDLER_PASS;
+
+ if (iwl_mei_rx_filter_eth(ethhdr, filters,
+ pass_to_csme, &rx_handler_res))
+ return rx_handler_res;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ filt_handler = iwl_mei_rx_filter_ipv4;
+ break;
+ case htons(ETH_P_ARP):
+ filt_handler = iwl_mei_rx_filter_arp;
+ break;
+ case htons(ETH_P_IPV6):
+ filt_handler = iwl_mei_rx_filter_ipv6;
+ break;
+ default:
+ *pass_to_csme = false;
+ return rx_handler_res;
+ }
+
+ *pass_to_csme = filt_handler(skb, filters, &rx_handler_res);
+
+ return rx_handler_res;
+}
+
+rx_handler_result_t iwl_mei_rx_filter(struct sk_buff *orig_skb,
+ const struct iwl_sap_oob_filters *filters,
+ bool *pass_to_csme)
+{
+ rx_handler_result_t ret;
+ struct sk_buff *skb;
+
+ ret = iwl_mei_rx_pass_to_csme(orig_skb, filters, pass_to_csme);
+
+ if (!*pass_to_csme)
+ return RX_HANDLER_PASS;
+
+ if (ret == RX_HANDLER_PASS)
+ skb = skb_copy(orig_skb, GFP_ATOMIC);
+ else
+ skb = orig_skb;
+
+ /* CSME wants the MAC header as well, push it back */
+ skb_push(skb, skb->data - skb_mac_header(skb));
+
+ /*
+ * Add the packet that CSME wants to get to the ring. Don't send the
+ * Check Shared Area HECI message since this is not possible from the
+ * Rx context. The caller will schedule a worker to do just that.
+ */
+ iwl_mei_add_data_to_ring(skb, false);
+
+ /*
+ * In case we drop the packet, don't free it, the caller will do that
+ * for us
+ */
+ if (ret == RX_HANDLER_PASS)
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+#define DHCP_SERVER_PORT 67
+#define DHCP_CLIENT_PORT 68
+void iwl_mei_tx_copy_to_csme(struct sk_buff *origskb, unsigned int ivlen)
+{
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb;
+ struct ethhdr ethhdr;
+ struct ethhdr *eth;
+
+ /* Catch DHCP packets */
+ if (origskb->protocol != htons(ETH_P_IP) ||
+ ip_hdr(origskb)->protocol != IPPROTO_UDP ||
+ udp_hdr(origskb)->source != htons(DHCP_CLIENT_PORT) ||
+ udp_hdr(origskb)->dest != htons(DHCP_SERVER_PORT))
+ return;
+
+ /*
+ * We could be a bit less aggressive here and not copy everything, but
+ * this is very rare anyway, do don't bother much.
+ */
+ skb = skb_copy(origskb, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ skb->protocol = origskb->protocol;
+
+ hdr = (void *)skb->data;
+
+ memcpy(ethhdr.h_dest, ieee80211_get_DA(hdr), ETH_ALEN);
+ memcpy(ethhdr.h_source, ieee80211_get_SA(hdr), ETH_ALEN);
+
+ /*
+ * Remove the ieee80211 header + IV + SNAP but leave the ethertype
+ * We still have enough headroom for the sap header.
+ */
+ pskb_pull(skb, ieee80211_hdrlen(hdr->frame_control) + ivlen + 6);
+ eth = skb_push(skb, sizeof(ethhdr.h_dest) + sizeof(ethhdr.h_source));
+ memcpy(eth, &ethhdr, sizeof(ethhdr.h_dest) + sizeof(ethhdr.h_source));
+
+ iwl_mei_add_data_to_ring(skb, true);
+
+ dev_kfree_skb(skb);
+}
+EXPORT_SYMBOL_GPL(iwl_mei_tx_copy_to_csme);
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/sap.h b/drivers/net/wireless/intel/iwlwifi/mei/sap.h
new file mode 100644
index 000000000000..11e3009121cc
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mei/sap.h
@@ -0,0 +1,733 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+#ifndef __sap_h__
+#define __sap_h__
+
+#include "mei/iwl-mei.h"
+
+/**
+ * DOC: Introduction
+ *
+ * SAP is the protocol used by the Intel Wireless driver (iwlwifi)
+ * and the wireless driver implemented in the CSME firmware.
+ * It allows to do several things:
+ * 1) Decide who is the owner of the device: CSME or the host
+ * 2) When the host is the owner of the device, CSME can still
+ * send and receive packets through iwlwifi.
+ *
+ * The protocol uses the ME interface (mei driver) to send
+ * messages to the CSME firmware. Those messages have a header
+ * &struct iwl_sap_me_msg_hdr and this header is followed
+ * by a payload.
+ *
+ * Since this messaging system cannot support high amounts of
+ * traffic, iwlwifi and the CSME firmware's WLAN driver have an
+ * addtional communication pipe to exchange information. The body
+ * of the message is copied to a shared area and the message that
+ * goes over the ME interface just signals the other side
+ * that a new message is waiting in the shared area. The ME
+ * interface is used only for signaling and not to transfer
+ * the payload.
+ *
+ * This shared area of memory is DMA'able mapped to be
+ * writable by both the CSME firmware and iwlwifi. It is
+ * mapped to address space of the device that controls the ME
+ * interface's DMA engine. Any data that iwlwifi needs to
+ * send to the CSME firmware needs to be copied to there.
+ */
+
+/**
+ * DOC: Initial Handshake
+ *
+ * Once we get a link to the CMSE's WLAN driver we start the handshake
+ * to establish the shared memory that will allow the communication between
+ * the CSME's WLAN driver and the host.
+ *
+ * 1) Host sends %SAP_ME_MSG_START message with the physical address
+ * of the shared area.
+ * 2) CSME replies with %SAP_ME_MSG_START_OK which includes the versions
+ * protocol versions supported by CSME.
+ */
+
+/**
+ * DOC: Host and driver state messages
+ *
+ * In order to let CSME konw about the host state and the host driver state,
+ * the host sends messages that let CSME know about the host's state.
+ * When the host driver is loaded, the host sends %SAP_MSG_NOTIF_WIFIDR_UP.
+ * When the host driver is unloaded, the host sends %SAP_MSG_NOTIF_WIFIDR_DOWN.
+ * When the iwlmei is unloaded, %SAP_MSG_NOTIF_HOST_GOES_DOWN is sent to let
+ * CSME know not to access the shared memory anymore since it'll be freed.
+ *
+ * CSME will reply to SAP_MSG_NOTIF_WIFIDR_UP by
+ * %SAP_MSG_NOTIF_AMT_STATE to let the host driver whether CSME can use the
+ * WiFi device or not followed by %SAP_MSG_NOTIF_CSME_CONN_STATUS to inform
+ * the host driver on the connection state of CSME.
+ *
+ * When host is associated to an AP, it must send %SAP_MSG_NOTIF_HOST_LINK_UP
+ * and when it disconnect from the AP, it must send
+ * %SAP_MSG_NOTIF_HOST_LINK_DOWN.
+ */
+
+/**
+ * DOC: Ownership
+ *
+ * The device can be controlled either by the CSME firmware or
+ * by the host driver: iwlwifi. There is a negotiaion between
+ * those two entities to determine who controls (or owns) the
+ * device. Since the CSME can control the device even when the
+ * OS is not working or even missing, the CSME can request the
+ * device if it comes to the conclusion that the OS's host driver
+ * is not operational. This is why the host driver needs to
+ * signal CSME that it is up and running. If the driver is
+ * unloaded, it'll signal CSME that it is going down so that
+ * CSME can take ownership.
+ */
+
+/**
+ * DOC: Ownership transfer
+ *
+ * When the host driver needs the device, it'll send the
+ * %SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP that will be replied by
+ * %SAP_MSG_NOTIF_CSME_REPLY_TO_HOST_OWNERSHIP_REQ which will let the
+ * host know whether the ownership is granted or no. If the ownership is
+ * granted, the hosts sends %SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED.
+ *
+ * When CSME requests ownership, it'll send the
+ * %SAP_MSG_NOTIF_CSME_TAKING_OWNERSHIP and give some time to host to stop
+ * accessing the device. The host needs to send
+ * %SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED to confirm that it won't access
+ * the device anymore. If the host failed to send this message fast enough,
+ * CSME will take ownership on the device anyway.
+ * When CSME is willing to release the ownership, it'll send
+ * %SAP_MSG_NOTIF_CSME_CAN_RELEASE_OWNERSHIP.
+ */
+
+/**
+ * DOC: Data messages
+ *
+ * Data messages must be sent and receives on a separate queue in the shared
+ * memory. Almost all the data messages use the %SAP_MSG_DATA_PACKET for both
+ * packets sent by CSME to the host to be sent to the AP or for packets
+ * received from the AP and sent by the host to CSME.
+ * CSME sends filters to the host to let the host what inbound packets it must
+ * send to CSME. Those filters are received by the host as a
+ * %SAP_MSG_NOTIF_CSME_FILTERS command.
+ * The only outbound packets that must be sent to CSME are the DHCP packets.
+ * Those packets must use the %SAP_MSG_CB_DATA_PACKET message.
+ */
+
+/**
+ * enum iwl_sap_me_msg_id - the ID of the ME message
+ * @SAP_ME_MSG_START: See &struct iwl_sap_me_msg_start.
+ * @SAP_ME_MSG_START_OK: See &struct iwl_sap_me_msg_start_ok.
+ * @SAP_ME_MSG_CHECK_SHARED_AREA: This message has no payload.
+ */
+enum iwl_sap_me_msg_id {
+ SAP_ME_MSG_START = 1,
+ SAP_ME_MSG_START_OK,
+ SAP_ME_MSG_CHECK_SHARED_AREA,
+};
+
+/**
+ * struct iwl_sap_me_msg_hdr - the header of the ME message
+ * @type: the type of the message, see &enum iwl_sap_me_msg_id.
+ * @seq_num: a sequence number used for debug only.
+ * @len: the length of the mssage.
+ */
+struct iwl_sap_me_msg_hdr {
+ __le32 type;
+ __le32 seq_num;
+ __le32 len;
+} __packed;
+
+/**
+ * struct iwl_sap_me_msg_start - used for the %SAP_ME_MSG_START message
+ * @hdr: See &struct iwl_sap_me_msg_hdr.
+ * @shared_mem: physical address of SAP shared memory area.
+ * @init_data_seq_num: seq_num of the first data packet HOST -> CSME.
+ * @init_notif_seq_num: seq_num of the first notification HOST -> CSME.
+ * @supported_versions: The host sends to the CSME a zero-terminated array
+ * of versions its supports.
+ *
+ * This message is sent by the host to CSME and will responded by the
+ * %SAP_ME_MSG_START_OK message.
+ */
+struct iwl_sap_me_msg_start {
+ struct iwl_sap_me_msg_hdr hdr;
+ __le64 shared_mem;
+ __le16 init_data_seq_num;
+ __le16 init_notif_seq_num;
+ u8 supported_versions[64];
+} __packed;
+
+/**
+ * struct iwl_sap_me_msg_start_ok - used for the %SAP_ME_MSG_START_OK
+ * @hdr: See &struct iwl_sap_me_msg_hdr
+ * @init_data_seq_num: Not used.
+ * @init_notif_seq_num: Not used
+ * @supported_version: The version that will be used.
+ * @reserved: For alignment.
+ *
+ * This message is sent by CSME to the host in response to the
+ * %SAP_ME_MSG_START message.
+ */
+struct iwl_sap_me_msg_start_ok {
+ struct iwl_sap_me_msg_hdr hdr;
+ __le16 init_data_seq_num;
+ __le16 init_notif_seq_num;
+ u8 supported_version;
+ u8 reserved[3];
+} __packed;
+
+/**
+ * enum iwl_sap_msg - SAP messages
+ * @SAP_MSG_NOTIF_BOTH_WAYS_MIN: Not used.
+ * @SAP_MSG_NOTIF_PING: No payload. Solicitate a response message (check-alive).
+ * @SAP_MSG_NOTIF_PONG: No payload. The response message.
+ * @SAP_MSG_NOTIF_BOTH_WAYS_MAX: Not used.
+ *
+ * @SAP_MSG_NOTIF_FROM_CSME_MIN: Not used.
+ * @SAP_MSG_NOTIF_CSME_FILTERS: TODO
+ * @SAP_MSG_NOTIF_AMT_STATE: Payload is a DW. Any non-zero value means
+ * that CSME is enabled.
+ * @SAP_MSG_NOTIF_CSME_REPLY_TO_HOST_OWNERSHIP_REQ: Payload is a DW. 0 means
+ * the host will not get ownership. Any other value means the host is
+ * the owner.
+ * @SAP_MSG_NOTIF_CSME_TAKING_OWNERSHIP: No payload.
+ * @SAP_MSG_NOTIF_TRIGGER_IP_REFRESH: No payload.
+ * @SAP_MSG_NOTIF_CSME_CAN_RELEASE_OWNERSHIP: No payload.
+ * @SAP_MSG_NOTIF_NIC_OWNER: Payload is a DW. See &enum iwl_sap_nic_owner.
+ * @SAP_MSG_NOTIF_CSME_CONN_STATUS: See &struct iwl_sap_notif_conn_status.
+ * @SAP_MSG_NOTIF_NVM: See &struct iwl_sap_nvm.
+ * @SAP_MSG_NOTIF_FROM_CSME_MAX: Not used.
+ *
+ * @SAP_MSG_NOTIF_FROM_HOST_MIN: Not used.
+ * @SAP_MSG_NOTIF_BAND_SELECTION: TODO
+ * @SAP_MSG_NOTIF_RADIO_STATE: Payload is a DW.
+ * See &enum iwl_sap_radio_state_bitmap.
+ * @SAP_MSG_NOTIF_NIC_INFO: See &struct iwl_sap_notif_host_nic_info.
+ * @SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP: No payload.
+ * @SAP_MSG_NOTIF_HOST_SUSPENDS: Payload is a DW. Bitmap described in
+ * &enum iwl_sap_notif_host_suspends_bitmap.
+ * @SAP_MSG_NOTIF_HOST_RESUMES: Payload is a DW. 0 or 1. 1 says that
+ * the CSME should re-initialize the init control block.
+ * @SAP_MSG_NOTIF_HOST_GOES_DOWN: No payload.
+ * @SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED: No payload.
+ * @SAP_MSG_NOTIF_COUNTRY_CODE: See &struct iwl_sap_notif_country_code.
+ * @SAP_MSG_NOTIF_HOST_LINK_UP: See &struct iwl_sap_notif_host_link_up.
+ * @SAP_MSG_NOTIF_HOST_LINK_DOWN: See &struct iwl_sap_notif_host_link_down.
+ * @SAP_MSG_NOTIF_WHO_OWNS_NIC: No payload.
+ * @SAP_MSG_NOTIF_WIFIDR_DOWN: No payload.
+ * @SAP_MSG_NOTIF_WIFIDR_UP: No payload.
+ * @SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED: No payload.
+ * @SAP_MSG_NOTIF_SAR_LIMITS: See &struct iwl_sap_notif_sar_limits.
+ * @SAP_MSG_NOTIF_GET_NVM: No payload. Triggers %SAP_MSG_NOTIF_NVM.
+ * @SAP_MSG_NOTIF_FROM_HOST_MAX: Not used.
+ *
+ * @SAP_MSG_DATA_MIN: Not used.
+ * @SAP_MSG_DATA_PACKET: Packets that passed the filters defined by
+ * %SAP_MSG_NOTIF_CSME_FILTERS. The payload is &struct iwl_sap_hdr with
+ * the payload of the packet immediately afterwards.
+ * @SAP_MSG_CB_DATA_PACKET: Indicates to CSME that we transmitted a specific
+ * packet. Used only for DHCP transmitted packets. See
+ * &struct iwl_sap_cb_data.
+ * @SAP_MSG_DATA_MAX: Not used.
+ */
+enum iwl_sap_msg {
+ SAP_MSG_NOTIF_BOTH_WAYS_MIN = 0,
+ SAP_MSG_NOTIF_PING = 1,
+ SAP_MSG_NOTIF_PONG = 2,
+ SAP_MSG_NOTIF_BOTH_WAYS_MAX,
+
+ SAP_MSG_NOTIF_FROM_CSME_MIN = 500,
+ SAP_MSG_NOTIF_CSME_FILTERS = SAP_MSG_NOTIF_FROM_CSME_MIN,
+ /* 501 is deprecated */
+ SAP_MSG_NOTIF_AMT_STATE = 502,
+ SAP_MSG_NOTIF_CSME_REPLY_TO_HOST_OWNERSHIP_REQ = 503,
+ SAP_MSG_NOTIF_CSME_TAKING_OWNERSHIP = 504,
+ SAP_MSG_NOTIF_TRIGGER_IP_REFRESH = 505,
+ SAP_MSG_NOTIF_CSME_CAN_RELEASE_OWNERSHIP = 506,
+ /* 507 is deprecated */
+ /* 508 is deprecated */
+ /* 509 is deprecated */
+ /* 510 is deprecated */
+ SAP_MSG_NOTIF_NIC_OWNER = 511,
+ SAP_MSG_NOTIF_CSME_CONN_STATUS = 512,
+ SAP_MSG_NOTIF_NVM = 513,
+ SAP_MSG_NOTIF_FROM_CSME_MAX,
+
+ SAP_MSG_NOTIF_FROM_HOST_MIN = 1000,
+ SAP_MSG_NOTIF_BAND_SELECTION = SAP_MSG_NOTIF_FROM_HOST_MIN,
+ SAP_MSG_NOTIF_RADIO_STATE = 1001,
+ SAP_MSG_NOTIF_NIC_INFO = 1002,
+ SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP = 1003,
+ SAP_MSG_NOTIF_HOST_SUSPENDS = 1004,
+ SAP_MSG_NOTIF_HOST_RESUMES = 1005,
+ SAP_MSG_NOTIF_HOST_GOES_DOWN = 1006,
+ SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED = 1007,
+ SAP_MSG_NOTIF_COUNTRY_CODE = 1008,
+ SAP_MSG_NOTIF_HOST_LINK_UP = 1009,
+ SAP_MSG_NOTIF_HOST_LINK_DOWN = 1010,
+ SAP_MSG_NOTIF_WHO_OWNS_NIC = 1011,
+ SAP_MSG_NOTIF_WIFIDR_DOWN = 1012,
+ SAP_MSG_NOTIF_WIFIDR_UP = 1013,
+ /* 1014 is deprecated */
+ SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED = 1015,
+ SAP_MSG_NOTIF_SAR_LIMITS = 1016,
+ SAP_MSG_NOTIF_GET_NVM = 1017,
+ SAP_MSG_NOTIF_FROM_HOST_MAX,
+
+ SAP_MSG_DATA_MIN = 2000,
+ SAP_MSG_DATA_PACKET = SAP_MSG_DATA_MIN,
+ SAP_MSG_CB_DATA_PACKET = 2001,
+ SAP_MSG_DATA_MAX,
+};
+
+/**
+ * struct iwl_sap_hdr - prefixes any SAP message
+ * @type: See &enum iwl_sap_msg.
+ * @len: The length of the message (header not included).
+ * @seq_num: For debug.
+ * @payload: The payload of the message.
+ */
+struct iwl_sap_hdr {
+ __le16 type;
+ __le16 len;
+ __le32 seq_num;
+ u8 payload[0];
+};
+
+/**
+ * struct iwl_sap_msg_dw - suits any DW long SAP message
+ * @hdr: The SAP header
+ * @val: The value of the DW.
+ */
+struct iwl_sap_msg_dw {
+ struct iwl_sap_hdr hdr;
+ __le32 val;
+};
+
+/**
+ * enum iwl_sap_nic_owner - used by %SAP_MSG_NOTIF_NIC_OWNER
+ * @SAP_NIC_OWNER_UNKNOWN: Not used.
+ * @SAP_NIC_OWNER_HOST: The host owns the NIC.
+ * @SAP_NIC_OWNER_ME: CSME owns the NIC.
+ */
+enum iwl_sap_nic_owner {
+ SAP_NIC_OWNER_UNKNOWN,
+ SAP_NIC_OWNER_HOST,
+ SAP_NIC_OWNER_ME,
+};
+
+enum iwl_sap_wifi_auth_type {
+ SAP_WIFI_AUTH_TYPE_OPEN = IWL_MEI_AKM_AUTH_OPEN,
+ SAP_WIFI_AUTH_TYPE_RSNA = IWL_MEI_AKM_AUTH_RSNA,
+ SAP_WIFI_AUTH_TYPE_RSNA_PSK = IWL_MEI_AKM_AUTH_RSNA_PSK,
+ SAP_WIFI_AUTH_TYPE_SAE = IWL_MEI_AKM_AUTH_SAE,
+ SAP_WIFI_AUTH_TYPE_MAX,
+};
+
+/**
+ * enum iwl_sap_wifi_cipher_alg
+ * @SAP_WIFI_CIPHER_ALG_NONE: TBD
+ * @SAP_WIFI_CIPHER_ALG_CCMP: TBD
+ * @SAP_WIFI_CIPHER_ALG_GCMP: TBD
+ * @SAP_WIFI_CIPHER_ALG_GCMP_256: TBD
+ */
+enum iwl_sap_wifi_cipher_alg {
+ SAP_WIFI_CIPHER_ALG_NONE = IWL_MEI_CIPHER_NONE,
+ SAP_WIFI_CIPHER_ALG_CCMP = IWL_MEI_CIPHER_CCMP,
+ SAP_WIFI_CIPHER_ALG_GCMP = IWL_MEI_CIPHER_GCMP,
+ SAP_WIFI_CIPHER_ALG_GCMP_256 = IWL_MEI_CIPHER_GCMP_256,
+};
+
+/**
+ * struct iwl_sap_notif_connection_info - nested in other structures
+ * @ssid_len: The length of the SSID.
+ * @ssid: The SSID.
+ * @auth_mode: The authentication mode. See &enum iwl_sap_wifi_auth_type.
+ * @pairwise_cipher: The cipher used for unicast packets.
+ * See &enum iwl_sap_wifi_cipher_alg.
+ * @channel: The channel on which we are associated.
+ * @band: The band on which we are associated.
+ * @reserved: For alignment.
+ * @bssid: The BSSID.
+ * @reserved1: For alignment.
+ */
+struct iwl_sap_notif_connection_info {
+ __le32 ssid_len;
+ u8 ssid[32];
+ __le32 auth_mode;
+ __le32 pairwise_cipher;
+ u8 channel;
+ u8 band;
+ __le16 reserved;
+ u8 bssid[6];
+ __le16 reserved1;
+} __packed;
+
+/**
+ * enum iwl_sap_scan_request - for the scan_request field
+ * @SCAN_REQUEST_FILTERING: Filtering is requested.
+ * @SCAN_REQUEST_FAST: Fast scan is requested.
+ */
+enum iwl_sap_scan_request {
+ SCAN_REQUEST_FILTERING = 1 << 0,
+ SCAN_REQUEST_FAST = 1 << 1,
+};
+
+/**
+ * struct iwl_sap_notif_conn_status - payload of %SAP_MSG_NOTIF_CSME_CONN_STATUS
+ * @hdr: The SAP header
+ * @link_prot_state: Non-zero if link protection is active.
+ * @scan_request: See &enum iwl_sap_scan_request.
+ * @conn_info: Information about the connection.
+ */
+struct iwl_sap_notif_conn_status {
+ struct iwl_sap_hdr hdr;
+ __le32 link_prot_state;
+ __le32 scan_request;
+ struct iwl_sap_notif_connection_info conn_info;
+} __packed;
+
+/**
+ * enum iwl_sap_radio_state_bitmap - used for %SAP_MSG_NOTIF_RADIO_STATE
+ * @SAP_SW_RFKILL_DEASSERTED: If set, SW RfKill is de-asserted
+ * @SAP_HW_RFKILL_DEASSERTED: If set, HW RfKill is de-asserted
+ *
+ * If both bits are set, then the radio is on.
+ */
+enum iwl_sap_radio_state_bitmap {
+ SAP_SW_RFKILL_DEASSERTED = 1 << 0,
+ SAP_HW_RFKILL_DEASSERTED = 1 << 1,
+};
+
+/**
+ * enum iwl_sap_notif_host_suspends_bitmap - used for %SAP_MSG_NOTIF_HOST_SUSPENDS
+ * @SAP_OFFER_NIC: TBD
+ * @SAP_FILTER_CONFIGURED: TBD
+ * @SAP_NLO_CONFIGURED: TBD
+ * @SAP_HOST_OWNS_NIC: TBD
+ * @SAP_LINK_PROTECTED: TBD
+ */
+enum iwl_sap_notif_host_suspends_bitmap {
+ SAP_OFFER_NIC = 1 << 0,
+ SAP_FILTER_CONFIGURED = 1 << 1,
+ SAP_NLO_CONFIGURED = 1 << 2,
+ SAP_HOST_OWNS_NIC = 1 << 3,
+ SAP_LINK_PROTECTED = 1 << 4,
+};
+
+/**
+ * struct iwl_sap_notif_country_code - payload of %SAP_MSG_NOTIF_COUNTRY_CODE
+ * @hdr: The SAP header
+ * @mcc: The country code.
+ * @source_id: TBD
+ * @reserved: For alignment.
+ * @diff_time: TBD
+ */
+struct iwl_sap_notif_country_code {
+ struct iwl_sap_hdr hdr;
+ __le16 mcc;
+ u8 source_id;
+ u8 reserved;
+ __le32 diff_time;
+} __packed;
+
+/**
+ * struct iwl_sap_notif_host_link_up - payload of %SAP_MSG_NOTIF_HOST_LINK_UP
+ * @hdr: The SAP header
+ * @conn_info: Information about the connection.
+ * @colloc_channel: The collocated channel
+ * @colloc_band: The band of the collocated channel.
+ * @reserved: For alignment.
+ * @colloc_bssid: The collocated BSSID.
+ * @reserved1: For alignment.
+ */
+struct iwl_sap_notif_host_link_up {
+ struct iwl_sap_hdr hdr;
+ struct iwl_sap_notif_connection_info conn_info;
+ u8 colloc_channel;
+ u8 colloc_band;
+ __le16 reserved;
+ u8 colloc_bssid[6];
+ __le16 reserved1;
+} __packed;
+
+/**
+ * enum iwl_sap_notif_link_down_type - used in &struct iwl_sap_notif_host_link_down
+ * @HOST_LINK_DOWN_TYPE_NONE: TBD
+ * @HOST_LINK_DOWN_TYPE_TEMPORARY: TBD
+ * @HOST_LINK_DOWN_TYPE_LONG: TBD
+ */
+enum iwl_sap_notif_link_down_type {
+ HOST_LINK_DOWN_TYPE_NONE,
+ HOST_LINK_DOWN_TYPE_TEMPORARY,
+ HOST_LINK_DOWN_TYPE_LONG,
+};
+
+/**
+ * struct iwl_sap_notif_host_link_down - payload for %SAP_MSG_NOTIF_HOST_LINK_DOWN
+ * @hdr: The SAP header
+ * @type: See &enum iwl_sap_notif_link_down_type.
+ * @reserved: For alignment.
+ * @reason_valid: If 0, ignore the next field.
+ * @reason: The reason of the disconnection.
+ */
+struct iwl_sap_notif_host_link_down {
+ struct iwl_sap_hdr hdr;
+ u8 type;
+ u8 reserved[2];
+ u8 reason_valid;
+ __le32 reason;
+} __packed;
+
+/**
+ * struct iwl_sap_notif_host_nic_info - payload for %SAP_MSG_NOTIF_NIC_INFO
+ * @hdr: The SAP header
+ * @mac_address: The MAC address as configured to the interface.
+ * @nvm_address: The MAC address as configured in the NVM.
+ */
+struct iwl_sap_notif_host_nic_info {
+ struct iwl_sap_hdr hdr;
+ u8 mac_address[6];
+ u8 nvm_address[6];
+} __packed;
+
+/**
+ * struct iwl_sap_notif_dw - payload is a dw
+ * @hdr: The SAP header.
+ * @dw: The payload.
+ */
+struct iwl_sap_notif_dw {
+ struct iwl_sap_hdr hdr;
+ __le32 dw;
+} __packed;
+
+/**
+ * struct iwl_sap_notif_sar_limits - payload for %SAP_MSG_NOTIF_SAR_LIMITS
+ * @hdr: The SAP header
+ * @sar_chain_info_table: Tx power limits.
+ */
+struct iwl_sap_notif_sar_limits {
+ struct iwl_sap_hdr hdr;
+ __le16 sar_chain_info_table[2][5];
+} __packed;
+
+/**
+ * enum iwl_sap_nvm_caps - capabilities for NVM SAP
+ * @SAP_NVM_CAPS_LARI_SUPPORT: Lari is supported
+ * @SAP_NVM_CAPS_11AX_SUPPORT: 11AX is supported
+ */
+enum iwl_sap_nvm_caps {
+ SAP_NVM_CAPS_LARI_SUPPORT = BIT(0),
+ SAP_NVM_CAPS_11AX_SUPPORT = BIT(1),
+};
+
+/**
+ * struct iwl_sap_nvm - payload for %SAP_MSG_NOTIF_NVM
+ * @hdr: The SAP header.
+ * @hw_addr: The MAC address
+ * @n_hw_addrs: The number of MAC addresses
+ * @reserved: For alignment.
+ * @radio_cfg: The radio configuration.
+ * @caps: See &enum iwl_sap_nvm_caps.
+ * @nvm_version: The version of the NVM.
+ * @channels: The data for each channel.
+ */
+struct iwl_sap_nvm {
+ struct iwl_sap_hdr hdr;
+ u8 hw_addr[6];
+ u8 n_hw_addrs;
+ u8 reserved;
+ __le32 radio_cfg;
+ __le32 caps;
+ __le32 nvm_version;
+ __le32 channels[110];
+} __packed;
+
+/**
+ * enum iwl_sap_eth_filter_flags - used in &struct iwl_sap_eth_filter
+ * @SAP_ETH_FILTER_STOP: Do not process further filters.
+ * @SAP_ETH_FILTER_COPY: Copy the packet to the CSME.
+ * @SAP_ETH_FILTER_ENABLED: If false, the filter should be ignored.
+ */
+enum iwl_sap_eth_filter_flags {
+ SAP_ETH_FILTER_STOP = BIT(0),
+ SAP_ETH_FILTER_COPY = BIT(1),
+ SAP_ETH_FILTER_ENABLED = BIT(2),
+};
+
+/**
+ * struct iwl_sap_eth_filter - a L2 filter
+ * @mac_address: Address to filter.
+ * @flags: See &enum iwl_sap_eth_filter_flags.
+ */
+struct iwl_sap_eth_filter {
+ u8 mac_address[6];
+ u8 flags;
+} __packed;
+
+/**
+ * enum iwl_sap_flex_filter_flags - used in &struct iwl_sap_flex_filter
+ * @SAP_FLEX_FILTER_COPY: Pass UDP / TCP packets to CSME.
+ * @SAP_FLEX_FILTER_ENABLED: If false, the filter should be ignored.
+ * @SAP_FLEX_FILTER_IPV4: Filter requires match on the IP address as well.
+ * @SAP_FLEX_FILTER_IPV6: Filter requires match on the IP address as well.
+ * @SAP_FLEX_FILTER_TCP: Filter should be applied on TCP packets.
+ * @SAP_FLEX_FILTER_UDP: Filter should be applied on UDP packets.
+ */
+enum iwl_sap_flex_filter_flags {
+ SAP_FLEX_FILTER_COPY = BIT(0),
+ SAP_FLEX_FILTER_ENABLED = BIT(1),
+ SAP_FLEX_FILTER_IPV6 = BIT(2),
+ SAP_FLEX_FILTER_IPV4 = BIT(3),
+ SAP_FLEX_FILTER_TCP = BIT(4),
+ SAP_FLEX_FILTER_UDP = BIT(5),
+};
+
+/**
+ * struct iwl_sap_flex_filter -
+ * @src_port: Source port in network format.
+ * @dst_port: Destination port in network format.
+ * @flags: Flags and protocol, see &enum iwl_sap_flex_filter_flags.
+ * @reserved: For alignment.
+ */
+struct iwl_sap_flex_filter {
+ __be16 src_port;
+ __be16 dst_port;
+ u8 flags;
+ u8 reserved;
+} __packed;
+
+/**
+ * enum iwl_sap_ipv4_filter_flags - used in &struct iwl_sap_ipv4_filter
+ * @SAP_IPV4_FILTER_ICMP_PASS: Pass ICMP packets to CSME.
+ * @SAP_IPV4_FILTER_ICMP_COPY: Pass ICMP packets to host.
+ * @SAP_IPV4_FILTER_ARP_REQ_PASS: Pass ARP requests to CSME.
+ * @SAP_IPV4_FILTER_ARP_REQ_COPY: Pass ARP requests to host.
+ * @SAP_IPV4_FILTER_ARP_RESP_PASS: Pass ARP responses to CSME.
+ * @SAP_IPV4_FILTER_ARP_RESP_COPY: Pass ARP responses to host.
+ */
+enum iwl_sap_ipv4_filter_flags {
+ SAP_IPV4_FILTER_ICMP_PASS = BIT(0),
+ SAP_IPV4_FILTER_ICMP_COPY = BIT(1),
+ SAP_IPV4_FILTER_ARP_REQ_PASS = BIT(2),
+ SAP_IPV4_FILTER_ARP_REQ_COPY = BIT(3),
+ SAP_IPV4_FILTER_ARP_RESP_PASS = BIT(4),
+ SAP_IPV4_FILTER_ARP_RESP_COPY = BIT(5),
+};
+
+/**
+ * struct iwl_sap_ipv4_filter-
+ * @ipv4_addr: The IP address to filer.
+ * @flags: See &enum iwl_sap_ipv4_filter_flags.
+ */
+struct iwl_sap_ipv4_filter {
+ __be32 ipv4_addr;
+ __le32 flags;
+} __packed;
+
+/**
+ * enum iwl_sap_ipv6_filter_flags -
+ * @SAP_IPV6_ADDR_FILTER_COPY: Pass packets to the host.
+ * @SAP_IPV6_ADDR_FILTER_ENABLED: If false, the filter should be ignored.
+ */
+enum iwl_sap_ipv6_filter_flags {
+ SAP_IPV6_ADDR_FILTER_COPY = BIT(0),
+ SAP_IPV6_ADDR_FILTER_ENABLED = BIT(1),
+};
+
+/**
+ * struct iwl_sap_ipv6_filter -
+ * @addr_lo24: Lowest 24 bits of the IPv6 address.
+ * @flags: See &enum iwl_sap_ipv6_filter_flags.
+ */
+struct iwl_sap_ipv6_filter {
+ u8 addr_lo24[3];
+ u8 flags;
+} __packed;
+
+/**
+ * enum iwl_sap_icmpv6_filter_flags -
+ * @SAP_ICMPV6_FILTER_ENABLED: If false, the filter should be ignored.
+ * @SAP_ICMPV6_FILTER_COPY: Pass packets to the host.
+ */
+enum iwl_sap_icmpv6_filter_flags {
+ SAP_ICMPV6_FILTER_ENABLED = BIT(0),
+ SAP_ICMPV6_FILTER_COPY = BIT(1),
+};
+
+/**
+ * enum iwl_sap_vlan_filter_flags -
+ * @SAP_VLAN_FILTER_VLAN_ID_MSK: TBD
+ * @SAP_VLAN_FILTER_ENABLED: If false, the filter should be ignored.
+ */
+enum iwl_sap_vlan_filter_flags {
+ SAP_VLAN_FILTER_VLAN_ID_MSK = 0x0FFF,
+ SAP_VLAN_FILTER_ENABLED = BIT(15),
+};
+
+/**
+ * struct iwl_sap_oob_filters - Out of band filters (for RX only)
+ * @flex_filters: Array of &struct iwl_sap_flex_filter.
+ * @icmpv6_flags: See &enum iwl_sap_icmpv6_filter_flags.
+ * @ipv6_filters: Array of &struct iwl_sap_ipv6_filter.
+ * @eth_filters: Array of &struct iwl_sap_eth_filter.
+ * @reserved: For alignment.
+ * @ipv4_filter: &struct iwl_sap_ipv4_filter.
+ * @vlan: See &enum iwl_sap_vlan_filter_flags.
+ */
+struct iwl_sap_oob_filters {
+ struct iwl_sap_flex_filter flex_filters[14];
+ __le32 icmpv6_flags;
+ struct iwl_sap_ipv6_filter ipv6_filters[4];
+ struct iwl_sap_eth_filter eth_filters[5];
+ u8 reserved;
+ struct iwl_sap_ipv4_filter ipv4_filter;
+ __le16 vlan[4];
+} __packed;
+
+/**
+ * struct iwl_sap_csme_filters - payload of %SAP_MSG_NOTIF_CSME_FILTERS
+ * @hdr: The SAP header.
+ * @mode: Not used.
+ * @mac_address: Not used.
+ * @reserved: For alignment.
+ * @cbfilters: Not used.
+ * @filters: Out of band filters.
+ */
+struct iwl_sap_csme_filters {
+ struct iwl_sap_hdr hdr;
+ __le32 mode;
+ u8 mac_address[6];
+ __le16 reserved;
+ u8 cbfilters[1728];
+ struct iwl_sap_oob_filters filters;
+} __packed;
+
+#define CB_TX_DHCP_FILT_IDX 30
+/**
+ * struct iwl_sap_cb_data - header to be added for transmitted packets.
+ * @hdr: The SAP header.
+ * @reserved: Not used.
+ * @to_me_filt_status: The filter that matches. Bit %CB_TX_DHCP_FILT_IDX should
+ * be set for DHCP (the only packet that uses this header).
+ * @reserved2: Not used.
+ * @data_len: The length of the payload.
+ * @payload: The payload of the transmitted packet.
+ */
+struct iwl_sap_cb_data {
+ struct iwl_sap_hdr hdr;
+ __le32 reserved[7];
+ __le32 to_me_filt_status;
+ __le32 reserved2;
+ __le32 data_len;
+ u8 payload[];
+};
+
+#endif /* __sap_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/trace-data.h b/drivers/net/wireless/intel/iwlwifi/mei/trace-data.h
new file mode 100644
index 000000000000..83639c6225ca
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mei/trace-data.h
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING)
+
+#define trace_iwlmei_sap_data(...)
+
+#else
+
+#if !defined(__IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_DATA) || defined(TRACE_HEADER_MULTI_READ)
+
+#ifndef __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_DATA
+enum iwl_sap_data_trace_type {
+ IWL_SAP_RX_DATA_TO_AIR,
+ IWL_SAP_TX_DATA_FROM_AIR,
+ IWL_SAP_RX_DATA_DROPPED_FROM_AIR,
+ IWL_SAP_TX_DHCP,
+};
+
+static inline size_t
+iwlmei_sap_data_offset(enum iwl_sap_data_trace_type trace_type)
+{
+ switch (trace_type) {
+ case IWL_SAP_RX_DATA_TO_AIR:
+ return 0;
+ case IWL_SAP_TX_DATA_FROM_AIR:
+ case IWL_SAP_RX_DATA_DROPPED_FROM_AIR:
+ return sizeof(struct iwl_sap_hdr);
+ case IWL_SAP_TX_DHCP:
+ return sizeof(struct iwl_sap_cb_data);
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ return 0;
+}
+#endif
+
+#define __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_DATA
+
+#include <linux/tracepoint.h>
+#include <linux/skbuff.h>
+#include "sap.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlmei_sap_data
+
+TRACE_EVENT(iwlmei_sap_data,
+ TP_PROTO(const struct sk_buff *skb,
+ enum iwl_sap_data_trace_type trace_type),
+ TP_ARGS(skb, trace_type),
+ TP_STRUCT__entry(
+ __dynamic_array(u8, data,
+ skb->len - iwlmei_sap_data_offset(trace_type))
+ __field(u32, trace_type)
+ ),
+ TP_fast_assign(
+ size_t offset = iwlmei_sap_data_offset(trace_type);
+ __entry->trace_type = trace_type;
+ skb_copy_bits(skb, offset, __get_dynamic_array(data),
+ skb->len - offset);
+ ),
+ TP_printk("sap_data:trace_type %d len %d",
+ __entry->trace_type, __get_dynamic_array_len(data))
+);
+
+/*
+ * If you add something here, add a stub in case
+ * !defined(CONFIG_IWLWIFI_DEVICE_TRACING)
+ */
+
+#endif /* __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_DATA */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace-data
+#include <trace/define_trace.h>
+
+#endif /* CONFIG_IWLWIFI_DEVICE_TRACING */
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/trace.c b/drivers/net/wireless/intel/iwlwifi/mei/trace.c
new file mode 100644
index 000000000000..47ac32ef9f69
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mei/trace.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+#include <linux/module.h>
+
+/* sparse doesn't like tracepoint macros */
+#ifndef __CHECKER__
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+#include "trace-data.h"
+
+#endif /* __CHECKER__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/trace.h b/drivers/net/wireless/intel/iwlwifi/mei/trace.h
new file mode 100644
index 000000000000..45ecb22ec84a
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mei/trace.h
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING)
+
+#define trace_iwlmei_sap_cmd(...)
+#define trace_iwlmei_me_msg(...)
+
+#else
+
+#if !defined(__IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_CMD) || defined(TRACE_HEADER_MULTI_READ)
+#define __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_CMD
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlmei_sap_cmd
+
+#include "mei/sap.h"
+
+TRACE_EVENT(iwlmei_sap_cmd,
+ TP_PROTO(const struct iwl_sap_hdr *sap_cmd, bool tx),
+ TP_ARGS(sap_cmd, tx),
+ TP_STRUCT__entry(
+ __dynamic_array(u8, cmd,
+ le16_to_cpu(sap_cmd->len) + sizeof(*sap_cmd))
+ __field(u8, tx)
+ __field(u16, type)
+ __field(u16, len)
+ __field(u32, seq)
+ ),
+ TP_fast_assign(
+ memcpy(__get_dynamic_array(cmd), sap_cmd,
+ le16_to_cpu(sap_cmd->len) + sizeof(*sap_cmd));
+ __entry->tx = tx;
+ __entry->type = le16_to_cpu(sap_cmd->type);
+ __entry->len = le16_to_cpu(sap_cmd->len);
+ __entry->seq = le32_to_cpu(sap_cmd->seq_num);
+ ),
+ TP_printk("sap_cmd %s: type %d len %d seq %d", __entry->tx ? "Tx" : "Rx",
+ __entry->type, __entry->len, __entry->seq)
+);
+
+TRACE_EVENT(iwlmei_me_msg,
+ TP_PROTO(const struct iwl_sap_me_msg_hdr *hdr, bool tx),
+ TP_ARGS(hdr, tx),
+ TP_STRUCT__entry(
+ __field(u8, type)
+ __field(u8, tx)
+ __field(u32, seq_num)
+ ),
+ TP_fast_assign(
+ __entry->type = le32_to_cpu(hdr->type);
+ __entry->seq_num = le32_to_cpu(hdr->seq_num);
+ __entry->tx = tx;
+ ),
+ TP_printk("ME message: %s: type %d seq %d", __entry->tx ? "Tx" : "Rx",
+ __entry->type, __entry->seq_num)
+);
+
+/*
+ * If you add something here, add a stub in case
+ * !defined(CONFIG_IWLWIFI_DEVICE_TRACING)
+ */
+
+#endif /* __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_CMD */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
+
+#endif /* CONFIG_IWLWIFI_DEVICE_TRACING */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index 75fc2d935e5d..11e814b7cad0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -10,5 +10,6 @@ iwlmvm-y += rfi.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwlmvm-$(CONFIG_PM) += d3.o
+iwlmvm-$(CONFIG_IWLMEI) += vendor-cmd.o
ccflags-y += -I $(srctree)/$(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index 9d0d01f27d92..c604f9f39b24 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -107,7 +107,7 @@
#define IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000
#define IWL_MVM_D3_DEBUG false
#define IWL_MVM_USE_TWT true
-#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10
+#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 20
#define IWL_MVM_USE_NSSN_SYNC 0
#define IWL_MVM_PHY_FILTER_CHAIN_A 0
#define IWL_MVM_PHY_FILTER_CHAIN_B 0
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index a19f646a324f..b400867e94f0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1391,6 +1391,13 @@ struct iwl_wowlan_status_data {
u8 tid_tear_down;
struct {
+ /* including RX MIC key for TKIP */
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ u8 len;
+ u8 flags;
+ } gtk;
+
+ struct {
/*
* We store both the TKIP and AES representations
* coming from the firmware because we decode the
@@ -1400,11 +1407,15 @@ struct iwl_wowlan_status_data {
struct {
struct ieee80211_key_seq seq[IWL_MAX_TID_COUNT];
} tkip, aes;
- /* including RX MIC key for TKIP */
- u8 key[WOWLAN_KEY_MAX_SIZE];
- u8 len;
- u8 flags;
- } gtk;
+
+ /*
+ * We use -1 for when we have valid data but don't know
+ * the key ID from firmware, and thus it needs to be
+ * installed with the last key (depending on rekeying).
+ */
+ s8 key_id;
+ bool valid;
+ } gtk_seq[2];
struct {
/* Same as above */
@@ -1556,12 +1567,10 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
kfree_skb(pkt);
}
-static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc,
- struct ieee80211_key_seq *seq)
+static void iwl_mvm_le64_to_aes_seq(__le64 le_pn, struct ieee80211_key_seq *seq)
{
- u64 pn;
+ u64 pn = le64_to_cpu(le_pn);
- pn = le64_to_cpu(sc->pn);
seq->ccmp.pn[0] = pn >> 40;
seq->ccmp.pn[1] = pn >> 32;
seq->ccmp.pn[2] = pn >> 24;
@@ -1570,6 +1579,20 @@ static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc,
seq->ccmp.pn[5] = pn;
}
+static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc,
+ struct ieee80211_key_seq *seq)
+{
+ iwl_mvm_le64_to_aes_seq(sc->pn, seq);
+}
+
+static void iwl_mvm_le64_to_tkip_seq(__le64 le_pn, struct ieee80211_key_seq *seq)
+{
+ u64 pn = le64_to_cpu(le_pn);
+
+ seq->tkip.iv16 = (u16)pn;
+ seq->tkip.iv32 = (u32)(pn >> 16);
+}
+
static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc,
struct ieee80211_key_seq *seq)
{
@@ -1630,10 +1653,12 @@ static void iwl_mvm_convert_key_counters(struct iwl_wowlan_status_data *status,
/* GTK RX counters */
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
iwl_mvm_tkip_sc_to_seq(&sc->tkip.multicast_rsc[i],
- &status->gtk.tkip.seq[i]);
+ &status->gtk_seq[0].tkip.seq[i]);
iwl_mvm_aes_sc_to_seq(&sc->aes.multicast_rsc[i],
- &status->gtk.aes.seq[i]);
+ &status->gtk_seq[0].aes.seq[i]);
}
+ status->gtk_seq[0].valid = true;
+ status->gtk_seq[0].key_id = -1;
/* PTK TX counter */
status->ptk.tkip.tx_pn = (u64)le16_to_cpu(sc->tkip.tsc.iv16) |
@@ -1649,24 +1674,103 @@ static void iwl_mvm_convert_key_counters(struct iwl_wowlan_status_data *status,
}
}
-static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm,
- struct ieee80211_key_conf *key,
- struct iwl_wowlan_status_data *status)
+static void
+iwl_mvm_convert_key_counters_v5_gtk_seq(struct iwl_wowlan_status_data *status,
+ struct iwl_wowlan_all_rsc_tsc_v5 *sc,
+ unsigned int idx, unsigned int key_id)
+{
+ int tid;
+
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ iwl_mvm_le64_to_tkip_seq(sc->mcast_rsc[idx][tid],
+ &status->gtk_seq[idx].tkip.seq[tid]);
+ iwl_mvm_le64_to_aes_seq(sc->mcast_rsc[idx][tid],
+ &status->gtk_seq[idx].aes.seq[tid]);
+ }
+
+ status->gtk_seq[idx].valid = true;
+ status->gtk_seq[idx].key_id = key_id;
+}
+
+static void
+iwl_mvm_convert_key_counters_v5(struct iwl_wowlan_status_data *status,
+ struct iwl_wowlan_all_rsc_tsc_v5 *sc)
+{
+ int i, tid;
+
+ BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_MAX_TID_COUNT);
+ BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_NUM_RSC);
+ BUILD_BUG_ON(ARRAY_SIZE(sc->mcast_rsc) != ARRAY_SIZE(status->gtk_seq));
+
+ /* GTK RX counters */
+ for (i = 0; i < ARRAY_SIZE(sc->mcast_key_id_map); i++) {
+ u8 entry = sc->mcast_key_id_map[i];
+
+ if (entry < ARRAY_SIZE(sc->mcast_rsc))
+ iwl_mvm_convert_key_counters_v5_gtk_seq(status, sc,
+ entry, i);
+ }
+
+ /* PTK TX counters not needed, assigned in device */
+
+ /* PTK RX counters */
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ iwl_mvm_le64_to_tkip_seq(sc->ucast_rsc[tid],
+ &status->ptk.tkip.seq[tid]);
+ iwl_mvm_le64_to_aes_seq(sc->ucast_rsc[tid],
+ &status->ptk.aes.seq[tid]);
+ }
+}
+
+static void iwl_mvm_set_key_rx_seq_idx(struct ieee80211_key_conf *key,
+ struct iwl_wowlan_status_data *status,
+ int idx)
{
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
- iwl_mvm_set_key_rx_seq_tids(key, status->gtk.aes.seq);
+ iwl_mvm_set_key_rx_seq_tids(key, status->gtk_seq[idx].aes.seq);
break;
case WLAN_CIPHER_SUITE_TKIP:
- iwl_mvm_set_key_rx_seq_tids(key, status->gtk.tkip.seq);
+ iwl_mvm_set_key_rx_seq_tids(key, status->gtk_seq[idx].tkip.seq);
break;
default:
WARN_ON(1);
}
}
+static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
+ struct iwl_wowlan_status_data *status,
+ bool installed)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(status->gtk_seq); i++) {
+ if (!status->gtk_seq[i].valid)
+ continue;
+
+ /* Handle the case where we know the key ID */
+ if (status->gtk_seq[i].key_id == key->keyidx) {
+ s8 new_key_id = -1;
+
+ if (status->num_of_gtk_rekeys)
+ new_key_id = status->gtk.flags &
+ IWL_WOWLAN_GTK_IDX_MASK;
+
+ /* Don't install a new key's value to an old key */
+ if (new_key_id != key->keyidx)
+ iwl_mvm_set_key_rx_seq_idx(key, status, i);
+ continue;
+ }
+
+ /* handle the case where we didn't, last key only */
+ if (status->gtk_seq[i].key_id == -1 &&
+ (!status->num_of_gtk_rekeys || installed))
+ iwl_mvm_set_key_rx_seq_idx(key, status, i);
+ }
+}
+
struct iwl_mvm_d3_gtk_iter_data {
struct iwl_mvm *mvm;
struct iwl_wowlan_status_data *status;
@@ -1740,8 +1844,9 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
if (data->status->num_of_gtk_rekeys)
ieee80211_remove_key(key);
- else if (data->last_gtk == key)
- iwl_mvm_set_key_rx_seq(data->mvm, key, data->status);
+
+ if (data->last_gtk == key)
+ iwl_mvm_set_key_rx_seq(key, data->status, false);
}
static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
@@ -1825,7 +1930,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
key = ieee80211_gtk_rekey_add(vif, &conf.conf);
if (IS_ERR(key))
return false;
- iwl_mvm_set_key_rx_seq(mvm, key, status);
+ iwl_mvm_set_key_rx_seq(key, status, true);
replay_ctr = cpu_to_be64(status->replay_ctr);
@@ -1893,9 +1998,10 @@ iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \
iwl_mvm_parse_wowlan_status_common(v6)
iwl_mvm_parse_wowlan_status_common(v7)
iwl_mvm_parse_wowlan_status_common(v9)
+iwl_mvm_parse_wowlan_status_common(v12)
-static void iwl_mvm_convert_gtk(struct iwl_wowlan_status_data *status,
- struct iwl_wowlan_gtk_status *data)
+static void iwl_mvm_convert_gtk_v2(struct iwl_wowlan_status_data *status,
+ struct iwl_wowlan_gtk_status_v2 *data)
{
BUILD_BUG_ON(sizeof(status->gtk.key) < sizeof(data->key));
BUILD_BUG_ON(NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY +
@@ -1913,6 +2019,26 @@ static void iwl_mvm_convert_gtk(struct iwl_wowlan_status_data *status,
data->tkip_mic_key, sizeof(data->tkip_mic_key));
}
+static void iwl_mvm_convert_gtk_v3(struct iwl_wowlan_status_data *status,
+ struct iwl_wowlan_gtk_status_v3 *data)
+{
+ /* The parts we need are identical in v2 and v3 */
+#define CHECK(_f) do { \
+ BUILD_BUG_ON(offsetof(struct iwl_wowlan_gtk_status_v2, _f) != \
+ offsetof(struct iwl_wowlan_gtk_status_v3, _f)); \
+ BUILD_BUG_ON(offsetofend(struct iwl_wowlan_gtk_status_v2, _f) !=\
+ offsetofend(struct iwl_wowlan_gtk_status_v3, _f)); \
+} while (0)
+
+ CHECK(key);
+ CHECK(key_len);
+ CHECK(key_flags);
+ CHECK(tkip_mic_key);
+#undef CHECK
+
+ iwl_mvm_convert_gtk_v2(status, (void *)data);
+}
+
static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
struct iwl_wowlan_igtk_status *data)
{
@@ -2012,7 +2138,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
goto out_free_resp;
iwl_mvm_convert_key_counters(status, &v7->gtk[0].rsc.all_tsc_rsc);
- iwl_mvm_convert_gtk(status, &v7->gtk[0]);
+ iwl_mvm_convert_gtk_v2(status, &v7->gtk[0]);
iwl_mvm_convert_igtk(status, &v7->igtk[0]);
} else if (notif_ver == 9 || notif_ver == 10 || notif_ver == 11) {
struct iwl_wowlan_status_v9 *v9 = (void *)cmd.resp_pkt->data;
@@ -2025,10 +2151,22 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
goto out_free_resp;
iwl_mvm_convert_key_counters(status, &v9->gtk[0].rsc.all_tsc_rsc);
- iwl_mvm_convert_gtk(status, &v9->gtk[0]);
+ iwl_mvm_convert_gtk_v2(status, &v9->gtk[0]);
iwl_mvm_convert_igtk(status, &v9->igtk[0]);
status->tid_tear_down = v9->tid_tear_down;
+ } else if (notif_ver == 12) {
+ struct iwl_wowlan_status_v12 *v12 = (void *)cmd.resp_pkt->data;
+
+ status = iwl_mvm_parse_wowlan_status_common_v12(mvm, v12, len);
+ if (IS_ERR(status))
+ goto out_free_resp;
+
+ iwl_mvm_convert_key_counters_v5(status, &v12->gtk[0].sc);
+ iwl_mvm_convert_gtk_v3(status, &v12->gtk[0]);
+ iwl_mvm_convert_igtk(status, &v12->igtk[0]);
+
+ status->tid_tear_down = v12->tid_tear_down;
} else {
IWL_ERR(mvm,
"Firmware advertises unknown WoWLAN status response %d!\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index ff66001d507e..fb4920b01dbb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1022,6 +1022,11 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
if (mvm->fw_restart >= 0)
mvm->fw_restart++;
+ if (count == 6 && !strcmp(buf, "nolog\n")) {
+ set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status);
+ set_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, &mvm->trans->status);
+ }
+
/* take the return value to make compiler happy - it will fail anyway */
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(LONG_GROUP, REPLY_ERROR),
@@ -1038,6 +1043,9 @@ static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf,
if (!iwl_mvm_firmware_running(mvm))
return -EIO;
+ if (count == 6 && !strcmp(buf, "nolog\n"))
+ set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status);
+
iwl_force_nmi(mvm->trans);
return count;
@@ -2064,6 +2072,7 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(dbg_time_point, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, 0200);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 949fb790f8fb..628aee634b2a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -511,7 +511,7 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]);
- if (sta->mfp)
+ if (sta->mfp && (peer->ftm.trigger_based || peer->ftm.non_trigger_based))
FTM_PUT_FLAG(PMF);
rcu_read_unlock();
@@ -1066,7 +1066,7 @@ static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm,
overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT;
alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA;
- rtt_avg = (alpha * rtt + (100 - alpha) * resp->rtt_avg) / 100;
+ rtt_avg = div_s64(alpha * rtt + (100 - alpha) * resp->rtt_avg, 100);
IWL_DEBUG_INFO(mvm,
"%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
index 73a82f07dc59..083f86fa5017 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -15,7 +15,7 @@
#include "fw/api/datapath.h"
#include "fw/api/phy.h"
#include "fw/api/config.h"
-#include "fw/api/soc.h"
+#include "fw/api/system.h"
#include "fw/api/alive.h"
#include "fw/api/binding.h"
#include "fw/api/cmdhdr.h"
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 863fec150e53..6f4690e56a46 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -12,8 +12,6 @@
#include "iwl-op-mode.h"
#include "fw/img.h"
#include "iwl-debug.h"
-#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
-#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
#include "iwl-prph.h"
#include "fw/acpi.h"
#include "fw/pnvm.h"
@@ -32,6 +30,9 @@
#define IWL_PPAG_MASK 3
#define IWL_PPAG_ETSI_MASK BIT(0)
+#define IWL_TAS_US_MCC 0x5553
+#define IWL_TAS_CANADA_MCC 0x4341
+
struct iwl_mvm_alive_data {
bool valid;
u32 scd_base_addr;
@@ -123,13 +124,15 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_lmac_alive *lmac2 = NULL;
u16 status;
u32 lmac_error_event_table, umac_error_table;
+ u32 version = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ UCODE_ALIVE_NTFY, 0);
/*
* For v5 and above, we can check the version, for older
* versions we need to check the size.
*/
- if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
- UCODE_ALIVE_NTFY, 0) == 5) {
+ if (version == 5 || version == 6) {
+ /* v5 and v6 are compatible (only IMR addition) */
struct iwl_alive_ntf_v5 *palive;
if (pkt_len < sizeof(*palive))
@@ -516,7 +519,6 @@ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_D);
}
}
-
#else /* CONFIG_ACPI */
static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
@@ -525,6 +527,49 @@ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
}
#endif /* CONFIG_ACPI */
+#if defined(CONFIG_ACPI) && defined(CONFIG_EFI)
+static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
+{
+ u8 cmd_ver;
+ int ret;
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ SAR_OFFSET_MAPPING_TABLE_CMD),
+ .flags = 0,
+ .data[0] = &mvm->fwrt.sgom_table,
+ .len[0] = sizeof(mvm->fwrt.sgom_table),
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+
+ if (!mvm->fwrt.sgom_enabled) {
+ IWL_DEBUG_RADIO(mvm, "SGOM table is disabled\n");
+ return 0;
+ }
+
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, REGULATORY_AND_NVM_GROUP,
+ SAR_OFFSET_MAPPING_TABLE_CMD,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ if (cmd_ver != 2) {
+ IWL_DEBUG_RADIO(mvm, "command version is unsupported. version = %d\n",
+ cmd_ver);
+ return 0;
+ }
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret < 0)
+ IWL_ERR(mvm, "failed to send SAR_OFFSET_MAPPING_CMD (%d)\n", ret);
+
+ return ret;
+}
+#else
+
+static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
+{
+ return 0;
+}
+#endif
+
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd;
@@ -757,6 +802,8 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
if (ret)
return ret;
+ iwl_mei_set_power_limit(per_chain);
+
IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
@@ -820,6 +867,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
u16 len;
u32 n_bands;
u32 n_profiles;
+ u32 sk = 0;
int ret;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
PER_CHAIN_LIMIT_OFFSET_CMD,
@@ -879,19 +927,26 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
if (ret)
return 0;
+ /* Only set to South Korea if the table revision is 1 */
+ if (mvm->fwrt.geo_rev == 1)
+ sk = 1;
+
/*
- * Set the revision on versions that contain it.
+ * Set the table_revision to South Korea (1) or not (0). The
+ * element name is misleading, as it doesn't contain the table
+ * revision number, but whether the South Korea variation
+ * should be used.
* This must be done after calling iwl_sar_geo_init().
*/
if (cmd_ver == 5)
- cmd.v5.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
+ cmd.v5.table_revision = cpu_to_le32(sk);
else if (cmd_ver == 4)
- cmd.v4.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
+ cmd.v4.table_revision = cpu_to_le32(sk);
else if (cmd_ver == 3)
- cmd.v3.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
+ cmd.v3.table_revision = cpu_to_le32(sk);
else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
IWL_UCODE_TLV_API_SAR_TABLE_VER))
- cmd.v2.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
+ cmd.v2.table_revision = cpu_to_le32(sk);
return iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(PHY_OPS_GROUP,
@@ -904,13 +959,8 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
union acpi_object *wifi_pkg, *data, *flags;
int i, j, ret, tbl_rev, num_sub_bands;
int idx = 2;
- s8 *gain;
- /*
- * The 'flags' field is the same in v1 and in v2 so we can just
- * use v1 to access it.
- */
- mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(0);
+ mvm->fwrt.ppag_flags = 0;
data = iwl_acpi_get_object(mvm->dev, ACPI_PPAG_METHOD);
if (IS_ERR(data))
@@ -922,8 +972,6 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev == 1 || tbl_rev == 2) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
- gain = mvm->fwrt.ppag_table.v2.gain[0];
- mvm->fwrt.ppag_ver = tbl_rev;
IWL_DEBUG_RADIO(mvm,
"Reading PPAG table v2 (tbl_rev=%d)\n",
tbl_rev);
@@ -943,8 +991,6 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
goto out_free;
}
num_sub_bands = IWL_NUM_SUB_BANDS_V1;
- gain = mvm->fwrt.ppag_table.v1.gain[0];
- mvm->fwrt.ppag_ver = 0;
IWL_DEBUG_RADIO(mvm, "Reading PPAG table v1 (tbl_rev=0)\n");
goto read_table;
}
@@ -952,6 +998,7 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
goto out_free;
read_table:
+ mvm->fwrt.ppag_ver = tbl_rev;
flags = &wifi_pkg->package.elements[1];
if (flags->type != ACPI_TYPE_INTEGER) {
@@ -959,10 +1006,9 @@ read_table:
goto out_free;
}
- mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(flags->integer.value &
- IWL_PPAG_MASK);
+ mvm->fwrt.ppag_flags = flags->integer.value & IWL_PPAG_MASK;
- if (!mvm->fwrt.ppag_table.v1.flags) {
+ if (!mvm->fwrt.ppag_flags) {
ret = 0;
goto out_free;
}
@@ -982,15 +1028,15 @@ read_table:
goto out_free;
}
- gain[i * num_sub_bands + j] = ent->integer.value;
+ mvm->fwrt.ppag_chains[i].subbands[j] = ent->integer.value;
if ((j == 0 &&
- (gain[i * num_sub_bands + j] > ACPI_PPAG_MAX_LB ||
- gain[i * num_sub_bands + j] < ACPI_PPAG_MIN_LB)) ||
+ (mvm->fwrt.ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB ||
+ mvm->fwrt.ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) ||
(j != 0 &&
- (gain[i * num_sub_bands + j] > ACPI_PPAG_MAX_HB ||
- gain[i * num_sub_bands + j] < ACPI_PPAG_MIN_HB))) {
- mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(0);
+ (mvm->fwrt.ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB ||
+ mvm->fwrt.ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) {
+ mvm->fwrt.ppag_flags = 0;
ret = -EINVAL;
goto out_free;
}
@@ -1005,6 +1051,7 @@ out_free:
int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
{
+ union iwl_ppag_table_cmd cmd;
u8 cmd_ver;
int i, j, ret, num_sub_bands, cmd_size;
s8 *gain;
@@ -1014,37 +1061,39 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
"PPAG capability not supported by FW, command not sent.\n");
return 0;
}
- if (!mvm->fwrt.ppag_table.v1.flags) {
+ if (!mvm->fwrt.ppag_flags) {
IWL_DEBUG_RADIO(mvm, "PPAG not enabled, command not sent.\n");
return 0;
}
+ /* The 'flags' field is the same in v1 and in v2 so we can just
+ * use v1 to access it.
+ */
+ cmd.v1.flags = cpu_to_le32(mvm->fwrt.ppag_flags);
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
PER_PLATFORM_ANT_GAIN_CMD,
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver == 1) {
num_sub_bands = IWL_NUM_SUB_BANDS_V1;
- gain = mvm->fwrt.ppag_table.v1.gain[0];
- cmd_size = sizeof(mvm->fwrt.ppag_table.v1);
+ gain = cmd.v1.gain[0];
+ cmd_size = sizeof(cmd.v1);
if (mvm->fwrt.ppag_ver == 1 || mvm->fwrt.ppag_ver == 2) {
IWL_DEBUG_RADIO(mvm,
"PPAG table rev is %d but FW supports v1, sending truncated table\n",
mvm->fwrt.ppag_ver);
- mvm->fwrt.ppag_table.v1.flags &=
- cpu_to_le32(IWL_PPAG_ETSI_MASK);
+ cmd.v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
}
} else if (cmd_ver == 2 || cmd_ver == 3) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
- gain = mvm->fwrt.ppag_table.v2.gain[0];
- cmd_size = sizeof(mvm->fwrt.ppag_table.v2);
+ gain = cmd.v2.gain[0];
+ cmd_size = sizeof(cmd.v2);
if (mvm->fwrt.ppag_ver == 0) {
IWL_DEBUG_RADIO(mvm,
"PPAG table is v1 but FW supports v2, sending padded table\n");
} else if (cmd_ver == 2 && mvm->fwrt.ppag_ver == 2) {
IWL_DEBUG_RADIO(mvm,
"PPAG table is v3 but FW supports v2, sending partial bitmap.\n");
- mvm->fwrt.ppag_table.v1.flags &=
- cpu_to_le32(IWL_PPAG_ETSI_MASK);
+ cmd.v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
}
} else {
IWL_DEBUG_RADIO(mvm, "Unsupported PPAG command version\n");
@@ -1053,6 +1102,8 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
for (j = 0; j < num_sub_bands; j++) {
+ gain[i * num_sub_bands + j] =
+ mvm->fwrt.ppag_chains[i].subbands[j];
IWL_DEBUG_RADIO(mvm,
"PPAG table: chain[%d] band[%d]: gain = %d\n",
i, j, gain[i * num_sub_bands + j]);
@@ -1061,7 +1112,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
PER_PLATFORM_ANT_GAIN_CMD),
- 0, cmd_size, &mvm->fwrt.ppag_table);
+ 0, cmd_size, &cmd);
if (ret < 0)
IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
ret);
@@ -1100,18 +1151,63 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm,
"System vendor '%s' is not in the approved list, disabling PPAG.\n",
dmi_get_system_info(DMI_SYS_VENDOR));
- mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(0);
+ mvm->fwrt.ppag_flags = 0;
return 0;
}
return iwl_mvm_ppag_send_cmd(mvm);
}
+static const struct dmi_system_id dmi_tas_approved_list[] = {
+ { .ident = "HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ },
+ },
+ { .ident = "SAMSUNG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+ },
+ },
+ { .ident = "LENOVO",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Lenovo"),
+ },
+ },
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ },
+ },
+
+ /* keep last */
+ {}
+};
+
+static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigned int mcc)
+{
+ int i;
+ u32 size = le32_to_cpu(*le_size);
+
+ /* Verify that there is room for another country */
+ if (size >= IWL_TAS_BLOCK_LIST_MAX)
+ return false;
+
+ for (i = 0; i < size; i++) {
+ if (list[i] == cpu_to_le32(mcc))
+ return true;
+ }
+
+ list[size++] = cpu_to_le32(mcc);
+ *le_size = cpu_to_le32(size);
+ return true;
+}
+
static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
{
int ret;
- struct iwl_tas_config_cmd cmd = {};
- int list_size;
+ struct iwl_tas_config_cmd_v3 cmd = {};
+ int cmd_size;
BUILD_BUG_ON(ARRAY_SIZE(cmd.block_list_array) <
APCI_WTAS_BLACK_LIST_MAX);
@@ -1121,7 +1217,7 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
return;
}
- ret = iwl_acpi_get_tas(&mvm->fwrt, cmd.block_list_array, &list_size);
+ ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"TAS table invalid or unavailable. (%d)\n",
@@ -1129,15 +1225,32 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
return;
}
- if (list_size < 0)
+ if (ret == 0)
return;
- /* list size if TAS enabled can only be non-negative */
- cmd.block_list_size = cpu_to_le32((u32)list_size);
+ if (!dmi_check_system(dmi_tas_approved_list)) {
+ IWL_DEBUG_RADIO(mvm,
+ "System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
+ dmi_get_system_info(DMI_SYS_VENDOR));
+ if ((!iwl_mvm_add_to_tas_block_list(cmd.block_list_array,
+ &cmd.block_list_size, IWL_TAS_US_MCC)) ||
+ (!iwl_mvm_add_to_tas_block_list(cmd.block_list_array,
+ &cmd.block_list_size, IWL_TAS_CANADA_MCC))) {
+ IWL_DEBUG_RADIO(mvm,
+ "Unable to add US/Canada to TAS block list, disabling TAS\n");
+ return;
+ }
+ }
+
+ cmd_size = iwl_fw_lookup_cmd_ver(mvm->fw, REGULATORY_AND_NVM_GROUP,
+ TAS_CONFIG,
+ IWL_FW_CMD_VER_UNKNOWN) < 3 ?
+ sizeof(struct iwl_tas_config_cmd_v2) :
+ sizeof(struct iwl_tas_config_cmd_v3);
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
TAS_CONFIG),
- 0, sizeof(cmd), &cmd);
+ 0, cmd_size, &cmd);
if (ret < 0)
IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
}
@@ -1336,6 +1449,7 @@ static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
{
}
+
#endif /* CONFIG_ACPI */
void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
@@ -1401,7 +1515,6 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
if (iwl_mvm_has_unified_ucode(mvm))
return iwl_run_unified_mvm_ucode(mvm);
- WARN_ON(!mvm->nvm_data);
ret = iwl_run_init_mvm_ucode(mvm);
if (ret) {
@@ -1631,6 +1744,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
else if (ret < 0)
goto error;
+ ret = iwl_mvm_sgom_init(mvm);
+ if (ret)
+ goto error;
+
iwl_mvm_tas_init(mvm);
iwl_mvm_leds_sync(mvm);
@@ -1705,20 +1822,6 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
return ret;
}
-void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
- u32 flags = le32_to_cpu(card_state_notif->flags);
-
- IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
- (flags & HW_CARD_DISABLED) ? "Kill" : "On",
- (flags & SW_CARD_DISABLED) ? "Kill" : "On",
- (flags & CT_KILL_CARD_DISABLED) ?
- "Reached" : "Not reached");
-}
-
void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 897e3b91ddb2..65f4fe3ef504 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -191,6 +191,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
if (IS_ERR_OR_NULL(resp)) {
IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
PTR_ERR_OR_ZERO(resp));
+ resp = NULL;
goto out;
}
@@ -212,7 +213,6 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
__le16_to_cpu(resp->cap), resp_ver);
/* Store the return source id */
src_id = resp->source_id;
- kfree(resp);
if (IS_ERR_OR_NULL(regd)) {
IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
PTR_ERR_OR_ZERO(regd));
@@ -224,7 +224,10 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
mvm->lar_regdom_set = true;
mvm->mcc_src = src_id;
+ iwl_mei_set_country_code(__le16_to_cpu(resp->mcc));
+
out:
+ kfree(resp);
return regd;
}
@@ -638,14 +641,21 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
if (iwl_mvm_is_oce_supported(mvm)) {
+ u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ IWL_ALWAYS_LONG_GROUP,
+ SCAN_REQ_UMAC, 0);
+
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP);
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME);
wiphy_ext_feature_set(hw->wiphy,
- NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION);
- wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE);
+
+ /* Old firmware also supports probe deferral and suppression */
+ if (scan_ver < 15)
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION);
}
if (mvm->nvm_data->sku_cap_11ax_enable &&
@@ -707,8 +717,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->netdev_features |= mvm->cfg->features;
if (!iwl_mvm_is_csum_supported(mvm))
- hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS |
- NETIF_F_RXCSUM);
+ hw->netdev_features &= ~IWL_CSUM_NETIF_FLAGS_MASK;
if (mvm->cfg->vht_mu_mimo_supported)
wiphy_ext_feature_set(hw->wiphy,
@@ -718,6 +727,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_PROTECTED_TWT);
+ iwl_mvm_vendor_cmds_register(mvm);
+
hw->wiphy->available_antennas_tx = iwl_mvm_get_valid_tx_ant(mvm);
hw->wiphy->available_antennas_rx = iwl_mvm_get_valid_rx_ant(mvm);
@@ -1084,6 +1095,27 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
+ ret = iwl_mvm_mei_get_ownership(mvm);
+ if (ret)
+ return ret;
+
+ if (mvm->mei_nvm_data) {
+ /* We got the NIC, we can now free the MEI NVM data */
+ kfree(mvm->mei_nvm_data);
+ mvm->mei_nvm_data = NULL;
+
+ /*
+ * We can't free the nvm_data we allocated based on the SAP
+ * data because we registered to cfg80211 with the channels
+ * allocated on mvm->nvm_data. Keep a pointer in temp_nvm_data
+ * just in order to be able free it later.
+ * NULLify nvm_data so that we will read the NVM from the
+ * firmware this time.
+ */
+ mvm->temp_nvm_data = mvm->nvm_data;
+ mvm->nvm_data = NULL;
+ }
+
if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
/*
* Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART
@@ -1144,6 +1176,8 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
mutex_unlock(&mvm->mutex);
+ iwl_mvm_mei_set_sw_rfkill_state(mvm);
+
return ret;
}
@@ -1261,6 +1295,8 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
*/
flush_work(&mvm->roc_done_wk);
+ iwl_mvm_mei_set_sw_rfkill_state(mvm);
+
mutex_lock(&mvm->mutex);
__iwl_mvm_mac_stop(mvm);
mutex_unlock(&mvm->mutex);
@@ -1531,6 +1567,15 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mvm->monitor_on = true;
iwl_mvm_vif_dbgfs_register(mvm, vif);
+
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
+ !mvm->csme_vif && mvm->mei_registered) {
+ iwl_mei_set_nic_info(vif->addr, mvm->nvm_data->hw_addr);
+ iwl_mei_set_netdev(ieee80211_vif_to_wdev(vif)->netdev);
+ mvm->csme_vif = vif;
+ }
+
goto out_unlock;
out_unbind:
@@ -1583,6 +1628,11 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
+ if (vif == mvm->csme_vif) {
+ iwl_mei_set_netdev(NULL);
+ mvm->csme_vif = NULL;
+ }
+
probe_data = rcu_dereference_protected(mvmvif->probe_resp_data,
lockdep_is_held(&mvm->mutex));
RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL);
@@ -1688,6 +1738,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
struct iwl_mvm_mc_iter_data iter_data = {
.mvm = mvm,
};
+ int ret;
lockdep_assert_held(&mvm->mutex);
@@ -1697,6 +1748,22 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_mc_iface_iterator, &iter_data);
+
+ /*
+ * Send a (synchronous) ech command so that we wait for the
+ * multiple asynchronous MCAST_FILTER_CMD commands sent by
+ * the interface iterator. Otherwise, we might get here over
+ * and over again (by userspace just sending a lot of these)
+ * and the CPU can send them faster than the firmware can
+ * process them.
+ * Note that the CPU is still faster - but with this we'll
+ * actually send fewer commands overall because the CPU will
+ * not schedule the work in mac80211 as frequently if it's
+ * still running when rescheduled (possibly multiple times).
+ */
+ ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL);
+ if (ret)
+ IWL_ERR(mvm, "Failed to synchronize multicast groups update\n");
}
static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
@@ -2148,24 +2215,24 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
}
flags |= STA_CTXT_HE_PACKET_EXT;
- } else if ((sta->he_cap.he_cap_elem.phy_cap_info[9] &
- IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) !=
- IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED) {
+ } else if (u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9],
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK)
+ != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) {
int low_th = -1;
int high_th = -1;
/* Take the PPE thresholds from the nominal padding info */
- switch (sta->he_cap.he_cap_elem.phy_cap_info[9] &
- IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) {
- case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_0US:
+ switch (u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9],
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK)) {
+ case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US:
low_th = IWL_HE_PKT_EXT_NONE;
high_th = IWL_HE_PKT_EXT_NONE;
break;
- case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_8US:
+ case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US:
low_th = IWL_HE_PKT_EXT_BPSK;
high_th = IWL_HE_PKT_EXT_NONE;
break;
- case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US:
+ case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US:
low_th = IWL_HE_PKT_EXT_NONE;
high_th = IWL_HE_PKT_EXT_BPSK;
break;
@@ -2393,6 +2460,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IEEE80211_SMPS_DYNAMIC);
}
} else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
+ iwl_mvm_mei_host_disassociated(mvm);
/*
* If update fails - SF might be running in associated
* mode while disassociated - which is forbidden.
@@ -3129,6 +3197,69 @@ static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm,
}
}
+static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_sta *mvm_sta)
+{
+#if IS_ENABLED(CONFIG_IWLMEI)
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mei_conn_info conn_info = {
+ .ssid_len = vif->bss_conf.ssid_len,
+ .channel = vif->bss_conf.chandef.chan->hw_value,
+ };
+
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ return;
+
+ if (!mvm->mei_registered)
+ return;
+
+ switch (mvm_sta->pairwise_cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ conn_info.pairwise_cipher = IWL_MEI_CIPHER_CCMP;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP_256;
+ break;
+ case 0:
+ /* open profile */
+ break;
+ default:
+ /* cipher not supported, don't send anything to iwlmei */
+ return;
+ }
+
+ switch (mvmvif->rekey_data.akm) {
+ case WLAN_AKM_SUITE_SAE & 0xff:
+ conn_info.auth_mode = IWL_MEI_AKM_AUTH_SAE;
+ break;
+ case WLAN_AKM_SUITE_PSK & 0xff:
+ conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA_PSK;
+ break;
+ case WLAN_AKM_SUITE_8021X & 0xff:
+ conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA;
+ break;
+ case 0:
+ /* open profile */
+ conn_info.auth_mode = IWL_MEI_AKM_AUTH_OPEN;
+ break;
+ default:
+ /* auth method / AKM not supported */
+ /* TODO: All the FT vesions of these? */
+ return;
+ }
+
+ memcpy(conn_info.ssid, vif->bss_conf.ssid, vif->bss_conf.ssid_len);
+ memcpy(conn_info.bssid, vif->bss_conf.bssid, ETH_ALEN);
+
+ /* TODO: add support for collocated AP data */
+ iwl_mei_host_associated(&conn_info, NULL);
+#endif
+}
+
static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -3273,12 +3404,18 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
* multicast data frames can be forwarded to the driver
*/
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+ iwl_mvm_mei_host_associated(mvm, vif, mvm_sta);
}
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
true);
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
+ /* once we move into assoc state, need to update rate scale to
+ * disable using wide bandwidth
+ */
+ iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+ false);
if (!sta->tdls) {
/* Multicast data frames are no longer allowed */
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
@@ -3301,16 +3438,16 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_AP) {
mvmvif->ap_assoc_sta_count--;
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
- } else if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
- /* remove session protection if still running */
+ } else if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
iwl_mvm_stop_session_protection(mvm, vif);
- }
ret = 0;
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_NONE) {
ret = 0;
} else if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST) {
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+ iwl_mvm_stop_session_protection(mvm, vif);
ret = iwl_mvm_rm_sta(mvm, vif, sta);
if (sta->tdls) {
iwl_mvm_recalc_tdls_state(mvm, vif, false);
@@ -3476,12 +3613,15 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct iwl_mvm_sta *mvmsta;
+ struct iwl_mvm_sta *mvmsta = NULL;
struct iwl_mvm_key_pn *ptk_pn;
int keyidx = key->keyidx;
int ret, i;
u8 key_offset;
+ if (sta)
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
if (!mvm->trans->trans_cfg->gen2) {
@@ -3582,7 +3722,7 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
}
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
- sta && iwl_mvm_has_new_rx_api(mvm) &&
+ mvmsta && iwl_mvm_has_new_rx_api(mvm) &&
key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
(key->cipher == WLAN_CIPHER_SUITE_CCMP ||
key->cipher == WLAN_CIPHER_SUITE_GCMP ||
@@ -3590,7 +3730,6 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
struct ieee80211_key_seq seq;
int tid, q;
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx]));
ptk_pn = kzalloc(struct_size(ptk_pn, q,
mvm->trans->num_rx_queues),
@@ -3617,6 +3756,9 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
else
key_offset = STA_KEY_IDX_INVALID;
+ if (mvmsta && key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ mvmsta->pairwise_cipher = key->cipher;
+
IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
if (ret) {
@@ -3657,12 +3799,11 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
break;
}
- if (sta && iwl_mvm_has_new_rx_api(mvm) &&
+ if (mvmsta && iwl_mvm_has_new_rx_api(mvm) &&
key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
(key->cipher == WLAN_CIPHER_SUITE_CCMP ||
key->cipher == WLAN_CIPHER_SUITE_GCMP ||
key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
ptk_pn = rcu_dereference_protected(
mvmsta->ptk_pn[keyidx],
lockdep_is_held(&mvm->mutex));
@@ -5382,6 +5523,10 @@ static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ return iwl_mvm_tx_csum_bz(mvm, head, true) ==
+ iwl_mvm_tx_csum_bz(mvm, skb, true);
+
/* For now don't aggregate IPv6 in AMSDU */
if (skb->protocol != htons(ETH_P_IP))
return false;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index a72d85086fe3..1dcbb0eb63c3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -30,6 +30,7 @@
#include "fw/runtime.h"
#include "fw/dbg.h"
#include "fw/acpi.h"
+#include "mei/iwl-mei.h"
#include "iwl-nvm-parse.h"
#include <linux/average.h>
@@ -93,11 +94,10 @@ struct iwl_mvm_phy_ctxt {
enum nl80211_chan_width width;
- /*
- * TODO: This should probably be removed. Currently here only for rate
- * scaling algorithm
- */
struct ieee80211_channel *channel;
+
+ /* track for RLC config command */
+ u32 center_freq1;
};
struct iwl_mvm_time_event_data {
@@ -830,6 +830,18 @@ struct iwl_mvm {
const char *nvm_file_name;
struct iwl_nvm_data *nvm_data;
+ struct iwl_mei_nvm *mei_nvm_data;
+ struct iwl_mvm_csme_conn_info __rcu *csme_conn_info;
+ bool mei_rfkill_blocked;
+ bool mei_registered;
+ struct work_struct sap_connected_wk;
+
+ /*
+ * NVM built based on the SAP data but that we can't free even after
+ * we get ownership because it contains the cfg80211's channel.
+ */
+ struct iwl_nvm_data *temp_nvm_data;
+
/* NVM sections */
struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
@@ -1021,6 +1033,8 @@ struct iwl_mvm {
/* Indicate if 32Khz external clock is valid */
u32 ext_clock_valid;
+ /* This vif used by CSME to send / receive traffic */
+ struct ieee80211_vif *csme_vif;
struct ieee80211_vif __rcu *csa_vif;
struct ieee80211_vif __rcu *csa_tx_blocked_vif;
u8 csa_tx_block_bcn_timeout;
@@ -1123,6 +1137,8 @@ struct iwl_mvm {
* @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
* @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
* @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it)
+ * @IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE: suppress one error log
+ * if this is set, when intentionally triggered
* @IWL_MVM_STATUS_STARTING: starting mac,
* used to disable restart flow while in STARTING state
*/
@@ -1136,9 +1152,15 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_FIRMWARE_RUNNING,
IWL_MVM_STATUS_NEED_FLUSH_P2P,
IWL_MVM_STATUS_IN_D3,
+ IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
IWL_MVM_STATUS_STARTING,
};
+struct iwl_mvm_csme_conn_info {
+ struct rcu_head rcu_head;
+ struct iwl_mei_conn_info conn_info;
+};
+
/* Keep track of completed init configuration */
enum iwl_mvm_init_status {
IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE = BIT(0),
@@ -1496,6 +1518,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
unsigned int tid);
+u32 iwl_mvm_tx_csum_bz(struct iwl_mvm *mvm, struct sk_buff *skb, bool amsdu);
#ifdef CONFIG_IWLWIFI_DEBUG
const char *iwl_mvm_get_tx_fail_reason(u32 status);
@@ -1601,8 +1624,6 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
-void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
@@ -1942,6 +1963,17 @@ void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm);
int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm);
int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget);
+#if IS_ENABLED(CONFIG_IWLMEI)
+
+/* vendor commands */
+void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm);
+
+#else
+
+static inline void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm) {}
+
+#endif
+
/* Location Aware Regulatory */
struct iwl_mcc_update_resp *
iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
@@ -2161,4 +2193,47 @@ enum iwl_location_cipher iwl_mvm_cipher_to_location_cipher(u32 cipher)
return IWL_LOCATION_CIPHER_INVALID;
}
}
+
+struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm);
+static inline int iwl_mvm_mei_get_ownership(struct iwl_mvm *mvm)
+{
+ if (mvm->mei_registered)
+ return iwl_mei_get_ownership();
+ return 0;
+}
+
+static inline void iwl_mvm_mei_tx_copy_to_csme(struct iwl_mvm *mvm,
+ struct sk_buff *skb,
+ unsigned int ivlen)
+{
+ if (mvm->mei_registered)
+ iwl_mei_tx_copy_to_csme(skb, ivlen);
+}
+
+static inline void iwl_mvm_mei_host_disassociated(struct iwl_mvm *mvm)
+{
+ if (mvm->mei_registered)
+ iwl_mei_host_disassociated();
+}
+
+static inline void iwl_mvm_mei_device_down(struct iwl_mvm *mvm)
+{
+ if (mvm->mei_registered)
+ iwl_mei_device_down();
+}
+
+static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm)
+{
+ bool sw_rfkill =
+ mvm->hw_registered ? rfkill_blocked(mvm->hw->wiphy->rfkill) : false;
+
+ if (mvm->mei_registered)
+ iwl_mei_set_rfkill_state(iwl_mvm_is_radio_killed(mvm),
+ sw_rfkill);
+}
+
+void iwl_mvm_send_roaming_forbidden_event(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool forbidden);
+
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index cd08e289cd9a..87630d38dc52 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -5,6 +5,7 @@
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#include <linux/module.h>
+#include <linux/rtnetlink.h>
#include <linux/vmalloc.h>
#include <net/mac80211.h>
@@ -26,6 +27,7 @@
#include "time-event.h"
#include "fw-api.h"
#include "fw/acpi.h"
+#include "fw/uefi.h"
#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -78,7 +80,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
- u32 reg_val = 0;
+ u32 reg_val;
u32 phy_config = iwl_mvm_get_phy_config(mvm);
radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >>
@@ -89,10 +91,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
FW_PHY_CFG_RADIO_DASH_POS;
/* SKU control */
- reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) <<
- CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
- reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) <<
- CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
+ reg_val = CSR_HW_REV_STEP_DASH(mvm->trans->hw_rev);
/* radio configuration */
reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
@@ -117,8 +116,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG;
iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
- CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
+ CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH |
CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
@@ -260,6 +258,7 @@ enum iwl_rx_handler_context {
/**
* struct iwl_rx_handlers handler for FW notification
* @cmd_id: command id
+ * @min_size: minimum size to expect for the notification
* @context: see &iwl_rx_handler_context
* @fn: the function is called when notification is received
*/
@@ -334,9 +333,6 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC,
struct iwl_umac_scan_iter_complete_notif),
- RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif,
- RX_HANDLER_SYNC, struct iwl_card_state_notif),
-
RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
RX_HANDLER_SYNC, struct iwl_missed_beacons_notif),
@@ -457,7 +453,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(STATISTICS_NOTIFICATION),
HCMD_NAME(EOSP_NOTIFICATION),
HCMD_NAME(REDUCE_TX_POWER_CMD),
- HCMD_NAME(CARD_STATE_NOTIFICATION),
HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
HCMD_NAME(TDLS_CONFIG_CMD),
HCMD_NAME(MAC_PM_POWER_TABLE),
@@ -502,6 +497,9 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
HCMD_NAME(SHARED_MEM_CFG_CMD),
HCMD_NAME(INIT_EXTENDED_CFG_CMD),
HCMD_NAME(FW_ERROR_RECOVERY_CMD),
+ HCMD_NAME(RFI_CONFIG_CMD),
+ HCMD_NAME(RFI_GET_FREQ_TABLE_CMD),
+ HCMD_NAME(SYSTEM_FEATURES_CONTROL_CMD),
};
/* Please keep this array *SORTED* by hex value.
@@ -534,6 +532,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(UPDATE_MU_GROUPS_CMD),
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
HCMD_NAME(STA_HE_CTXT_CMD),
+ HCMD_NAME(RLC_CONFIG_CMD),
HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
HCMD_NAME(TLC_MNG_CONFIG_CMD),
HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
@@ -683,14 +682,45 @@ static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
{
+ struct iwl_trans *trans = mvm->trans;
int ret;
+ if (trans->csme_own) {
+ if (WARN(!mvm->mei_registered,
+ "csme is owner, but we aren't registered to iwlmei\n"))
+ goto get_nvm_from_fw;
+
+ mvm->mei_nvm_data = iwl_mei_get_nvm();
+ if (mvm->mei_nvm_data) {
+ /*
+ * mvm->mei_nvm_data is set and because of that,
+ * we'll load the NVM from the FW when we'll get
+ * ownership.
+ */
+ mvm->nvm_data =
+ iwl_parse_mei_nvm_data(trans, trans->cfg,
+ mvm->mei_nvm_data, mvm->fw);
+ return 0;
+ }
+
+ IWL_ERR(mvm,
+ "Got a NULL NVM from CSME, trying to get it from the device\n");
+ }
+
+get_nvm_from_fw:
rtnl_lock();
wiphy_lock(mvm->hw->wiphy);
mutex_lock(&mvm->mutex);
- ret = iwl_run_init_mvm_ucode(mvm);
+ ret = iwl_trans_start_hw(mvm->trans);
+ if (ret) {
+ mutex_unlock(&mvm->mutex);
+ wiphy_unlock(mvm->hw->wiphy);
+ rtnl_unlock();
+ return ret;
+ }
+ ret = iwl_run_init_mvm_ucode(mvm);
if (ret && ret != -ERFKILL)
iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
if (!ret && iwl_mvm_is_lar_supported(mvm)) {
@@ -705,7 +735,7 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
wiphy_unlock(mvm->hw->wiphy);
rtnl_unlock();
- if (ret < 0)
+ if (ret)
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
return ret;
@@ -713,6 +743,7 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm)
{
+ struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
int ret;
iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
@@ -720,10 +751,17 @@ static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm)
ret = iwl_mvm_mac_setup_register(mvm);
if (ret)
return ret;
+
mvm->hw_registered = true;
iwl_mvm_dbgfs_register(mvm);
+ wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy,
+ mvm->mei_rfkill_blocked,
+ RFKILL_HARD_BLOCK_NOT_OWNER);
+
+ iwl_mvm_mei_set_sw_rfkill_state(mvm);
+
return 0;
}
@@ -904,6 +942,109 @@ static const struct iwl_dump_sanitize_ops iwl_mvm_sanitize_ops = {
.frob_mem = iwl_mvm_frob_mem,
};
+static void iwl_mvm_me_conn_status(void *priv, const struct iwl_mei_conn_info *conn_info)
+{
+ struct iwl_mvm *mvm = priv;
+ struct iwl_mvm_csme_conn_info *prev_conn_info, *curr_conn_info;
+
+ /*
+ * This is protected by the guarantee that this function will not be
+ * called twice on two different threads
+ */
+ prev_conn_info = rcu_dereference_protected(mvm->csme_conn_info, true);
+
+ curr_conn_info = kzalloc(sizeof(*curr_conn_info), GFP_KERNEL);
+ if (!curr_conn_info)
+ return;
+
+ curr_conn_info->conn_info = *conn_info;
+
+ rcu_assign_pointer(mvm->csme_conn_info, curr_conn_info);
+
+ if (prev_conn_info)
+ kfree_rcu(prev_conn_info, rcu_head);
+}
+
+static void iwl_mvm_mei_rfkill(void *priv, bool blocked)
+{
+ struct iwl_mvm *mvm = priv;
+
+ mvm->mei_rfkill_blocked = blocked;
+ if (!mvm->hw_registered)
+ return;
+
+ wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy,
+ mvm->mei_rfkill_blocked,
+ RFKILL_HARD_BLOCK_NOT_OWNER);
+}
+
+static void iwl_mvm_mei_roaming_forbidden(void *priv, bool forbidden)
+{
+ struct iwl_mvm *mvm = priv;
+
+ if (!mvm->hw_registered || !mvm->csme_vif)
+ return;
+
+ iwl_mvm_send_roaming_forbidden_event(mvm, mvm->csme_vif, forbidden);
+}
+
+static void iwl_mvm_sap_connected_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, sap_connected_wk);
+ int ret;
+
+ ret = iwl_mvm_start_get_nvm(mvm);
+ if (ret)
+ goto out_free;
+
+ ret = iwl_mvm_start_post_nvm(mvm);
+ if (ret)
+ goto out_free;
+
+ return;
+
+out_free:
+ IWL_ERR(mvm, "Couldn't get started...\n");
+ iwl_mei_start_unregister();
+ iwl_mei_unregister_complete();
+ iwl_fw_flush_dumps(&mvm->fwrt);
+ iwl_mvm_thermal_exit(mvm);
+ iwl_fw_runtime_free(&mvm->fwrt);
+ iwl_phy_db_free(mvm->phy_db);
+ kfree(mvm->scan_cmd);
+ iwl_trans_op_mode_leave(mvm->trans);
+ kfree(mvm->nvm_data);
+ kfree(mvm->mei_nvm_data);
+
+ ieee80211_free_hw(mvm->hw);
+}
+
+static void iwl_mvm_mei_sap_connected(void *priv)
+{
+ struct iwl_mvm *mvm = priv;
+
+ if (!mvm->hw_registered)
+ schedule_work(&mvm->sap_connected_wk);
+}
+
+static void iwl_mvm_mei_nic_stolen(void *priv)
+{
+ struct iwl_mvm *mvm = priv;
+
+ rtnl_lock();
+ cfg80211_shutdown_all_interfaces(mvm->hw->wiphy);
+ rtnl_unlock();
+}
+
+static const struct iwl_mei_ops mei_ops = {
+ .me_conn_status = iwl_mvm_me_conn_status,
+ .rfkill = iwl_mvm_mei_rfkill,
+ .roaming_forbidden = iwl_mvm_mei_roaming_forbidden,
+ .sap_connected = iwl_mvm_mei_sap_connected,
+ .nic_stolen = iwl_mvm_mei_nic_stolen,
+};
+
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
@@ -915,9 +1056,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
static const u8 no_reclaim_cmds[] = {
TX_CMD,
};
- int err, scan_size;
+ int scan_size;
u32 min_backoff;
- enum iwl_amsdu_size rb_size_default;
+ struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
/*
* We use IWL_MVM_STATION_COUNT_MAX to check the validity of the station
@@ -956,6 +1097,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
&iwl_mvm_sanitize_ops, mvm, dbgfs_dir);
iwl_mvm_get_acpi_tables(mvm);
+ iwl_uefi_get_sgom_table(trans, &mvm->fwrt);
mvm->init_status = 0;
@@ -1017,6 +1159,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
+ INIT_WORK(&mvm->sap_connected_wk, iwl_mvm_sap_connected_wk);
INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
@@ -1058,14 +1201,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- rb_size_default = IWL_AMSDU_2K;
- else
- rb_size_default = IWL_AMSDU_4K;
-
switch (iwlwifi_mod_params.amsdu_size) {
case IWL_AMSDU_DEF:
- trans_cfg.rx_buf_size = rb_size_default;
+ trans_cfg.rx_buf_size = IWL_AMSDU_4K;
break;
case IWL_AMSDU_4K:
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
@@ -1079,7 +1217,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
default:
pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
iwlwifi_mod_params.amsdu_size);
- trans_cfg.rx_buf_size = rb_size_default;
+ trans_cfg.rx_buf_size = IWL_AMSDU_4K;
}
trans->wide_cmd_header = true;
@@ -1139,10 +1277,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
IWL_DEBUG_EEPROM(mvm->trans->dev,
"working without external nvm file\n");
- err = iwl_trans_start_hw(mvm->trans);
- if (err)
- goto out_free;
-
scan_size = iwl_mvm_scan_size(mvm);
mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
@@ -1167,8 +1301,20 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->debugfs_dir = dbgfs_dir;
- if (iwl_mvm_start_get_nvm(mvm))
+ mvm->mei_registered = !iwl_mei_register(mvm, &mei_ops);
+
+ if (iwl_mvm_start_get_nvm(mvm)) {
+ /*
+ * Getting NVM failed while CSME is the owner, but we are
+ * registered to MEI, we'll get the NVM later when it'll be
+ * possible to get it from CSME.
+ */
+ if (trans->csme_own && mvm->mei_registered)
+ return op_mode;
+
goto out_thermal_exit;
+ }
+
if (iwl_mvm_start_post_nvm(mvm))
goto out_thermal_exit;
@@ -1177,6 +1323,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
out_thermal_exit:
iwl_mvm_thermal_exit(mvm);
+ if (mvm->mei_registered) {
+ iwl_mei_start_unregister();
+ iwl_mei_unregister_complete();
+ }
out_free:
iwl_fw_flush_dumps(&mvm->fwrt);
iwl_fw_runtime_free(&mvm->fwrt);
@@ -1203,6 +1353,7 @@ void iwl_mvm_stop_device(struct iwl_mvm *mvm)
iwl_trans_stop_device(mvm->trans);
iwl_free_fw_paging(&mvm->fwrt);
iwl_fw_dump_conf_clear(&mvm->fwrt);
+ iwl_mvm_mei_device_down(mvm);
}
static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
@@ -1210,11 +1361,33 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
int i;
+ if (mvm->mei_registered) {
+ rtnl_lock();
+ iwl_mei_set_netdev(NULL);
+ rtnl_unlock();
+ iwl_mei_start_unregister();
+ }
+
+ /*
+ * After we unregister from mei, the worker can't be scheduled
+ * anymore.
+ */
+ cancel_work_sync(&mvm->sap_connected_wk);
+
iwl_mvm_leds_exit(mvm);
iwl_mvm_thermal_exit(mvm);
- ieee80211_unregister_hw(mvm->hw);
+ /*
+ * If we couldn't get ownership on the device and we couldn't
+ * get the NVM from CSME, we haven't registered to mac80211.
+ * In that case, we didn't fail op_mode_start, because we are
+ * waiting for CSME to allow us to get the NVM to register to
+ * mac80211. If that didn't happen, we haven't registered to
+ * mac80211, hence the if below.
+ */
+ if (mvm->hw_registered)
+ ieee80211_unregister_hw(mvm->hw);
kfree(mvm->scan_cmd);
kfree(mvm->mcast_filter_cmd);
@@ -1229,6 +1402,9 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
mvm->phy_db = NULL;
kfree(mvm->nvm_data);
+ kfree(mvm->mei_nvm_data);
+ kfree(rcu_access_pointer(mvm->csme_conn_info));
+ kfree(mvm->temp_nvm_data);
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
@@ -1237,6 +1413,9 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
iwl_fw_runtime_free(&mvm->fwrt);
mutex_destroy(&mvm->mutex);
+ if (mvm->mei_registered)
+ iwl_mei_unregister_complete();
+
ieee80211_free_hw(mvm->hw);
}
@@ -1519,6 +1698,12 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
iwl_mvm_set_rfkill_state(mvm);
}
+struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm)
+{
+ return rcu_dereference_protected(mvm->csme_conn_info,
+ lockdep_is_held(&mvm->mutex));
+}
+
static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
@@ -1657,9 +1842,16 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
iwl_fw_error_collect(&mvm->fwrt, false);
- if (fw_error && mvm->fw_restart > 0)
+ if (fw_error && mvm->fw_restart > 0) {
mvm->fw_restart--;
- ieee80211_restart_hw(mvm->hw);
+ ieee80211_restart_hw(mvm->hw);
+ } else if (mvm->fwrt.trans->dbg.restart_required) {
+ IWL_DEBUG_INFO(mvm, "FW restart requested after debug collection\n");
+ mvm->fwrt.trans->dbg.restart_required = FALSE;
+ ieee80211_restart_hw(mvm->hw);
+ } else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) {
+ ieee80211_restart_hw(mvm->hw);
+ }
}
}
@@ -1667,7 +1859,9 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
- if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status))
+ if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) &&
+ !test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
+ &mvm->status))
iwl_mvm_dump_nic_error_log(mvm);
if (sync) {
@@ -1688,7 +1882,7 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync)
if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status))
return;
- iwl_mvm_nic_restart(mvm, true);
+ iwl_mvm_nic_restart(mvm, false);
}
static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
@@ -1737,6 +1931,9 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+ if (unlikely(queue >= mvm->trans->num_rx_queues))
+ return;
+
if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index 035336a9e755..9af40b0fa37a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -157,8 +157,43 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
/* Set the channel info data */
iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
- iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info,
+ /* we only support RLC command version 2 */
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
+ RLC_CONFIG_CMD, 0) < 2)
+ iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info,
+ chains_static, chains_dynamic);
+}
+
+static int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt,
+ u8 chains_static, u8 chains_dynamic)
+{
+ struct iwl_rlc_config_cmd cmd = {
+ .phy_id = cpu_to_le32(ctxt->id),
+ };
+
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
+ RLC_CONFIG_CMD, 0) < 2)
+ return 0;
+
+ BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_DRIVER_FORCE !=
+ PHY_RX_CHAIN_DRIVER_FORCE_MSK);
+ BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_VALID !=
+ PHY_RX_CHAIN_VALID_MSK);
+ BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_FORCE !=
+ PHY_RX_CHAIN_FORCE_SEL_MSK);
+ BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_FORCE_MIMO !=
+ PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK);
+ BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_COUNT != PHY_RX_CHAIN_CNT_MSK);
+ BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_MIMO_COUNT !=
+ PHY_RX_CHAIN_MIMO_CNT_MSK);
+
+ iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd.rlc.rx_chain_info,
chains_static, chains_dynamic);
+
+ return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(RLC_CONFIG_CMD,
+ DATA_PATH_GROUP, 2),
+ 0, sizeof(cmd), &cmd);
}
/*
@@ -177,7 +212,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
PHY_CONTEXT_CMD, 1);
- if (ver == 3) {
+ if (ver == 3 || ver == 4) {
struct iwl_phy_context_cmd cmd = {};
/* Set the command header fields */
@@ -211,9 +246,16 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
}
- if (ret)
+ if (ret) {
IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret);
- return ret;
+ return ret;
+ }
+
+ if (action != FW_CTXT_ACTION_REMOVE)
+ return iwl_mvm_phy_send_rlc(mvm, ctxt, chains_static,
+ chains_dynamic);
+
+ return 0;
}
/*
@@ -228,6 +270,8 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
lockdep_assert_held(&mvm->mutex);
ctxt->channel = chandef->chan;
+ ctxt->width = chandef->width;
+ ctxt->center_freq1 = chandef->center_freq1;
return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
chains_static, chains_dynamic,
@@ -257,6 +301,14 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
lockdep_assert_held(&mvm->mutex);
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
+ RLC_CONFIG_CMD, 0) >= 2 &&
+ ctxt->channel == chandef->chan &&
+ ctxt->width == chandef->width &&
+ ctxt->center_freq1 == chandef->center_freq1)
+ return iwl_mvm_phy_send_rlc(mvm, ctxt, chains_static,
+ chains_dynamic);
+
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
ctxt->channel->band != chandef->chan->band) {
@@ -275,6 +327,8 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
ctxt->channel = chandef->chan;
ctxt->width = chandef->width;
+ ctxt->center_freq1 = chandef->center_freq1;
+
return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
chains_static, chains_dynamic,
action);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
index 44344216a1a9..f054ce76bed5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
@@ -7,39 +7,57 @@
#include "fw/api/commands.h"
#include "fw/api/phy-ctxt.h"
-/**
+/*
* DDR needs frequency in units of 16.666MHz, so provide FW with the
* frequency values in the adjusted format.
*/
static const struct iwl_rfi_lut_entry iwl_rfi_table[IWL_RFI_LUT_SIZE] = {
- /* LPDDR4 */
+ /* frequency 2667MHz */
+ {cpu_to_le16(160), {50, 58, 60, 62, 64, 52, 54, 56},
+ {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
+ PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
+
+ /* frequency 2933MHz */
+ {cpu_to_le16(176), {149, 151, 153, 157, 159, 161, 165, 163, 167, 169,
+ 171, 173, 175},
+ {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
+ PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
+ PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
+
+ /* frequency 3200MHz */
+ {cpu_to_le16(192), {79, 81, 83, 85, 87, 89, 91, 93},
+ {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
+ PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,}},
/* frequency 3733MHz */
- {cpu_to_le16(223), {114, 116, 118, 120, 122,},
- {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
+ {cpu_to_le16(223), {114, 116, 118, 120, 122, 106, 110, 124, 126},
+ {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
+ PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
+
+ /* frequency 4000MHz */
+ {cpu_to_le16(240), {114, 151, 155, 157, 159, 161, 165},
+ {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
+ PHY_BAND_5, PHY_BAND_5,}},
/* frequency 4267MHz */
{cpu_to_le16(256), {79, 83, 85, 87, 89, 91, 93,},
{PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
PHY_BAND_6, PHY_BAND_6,}},
- /* DDR5ePOR */
-
- /* frequency 4000MHz */
- {cpu_to_le16(240), {3, 5, 7, 9, 11, 13, 15,},
- {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
- PHY_BAND_6, PHY_BAND_6,}},
-
/* frequency 4400MHz */
{cpu_to_le16(264), {111, 119, 123, 125, 129, 131, 133, 135, 143,},
{PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,}},
- /* LPDDR5iPOR */
-
/* frequency 5200MHz */
- {cpu_to_le16(312), {36, 38, 40, 42, 50,},
- {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
+ {cpu_to_le16(312), {36, 38, 40, 42, 44, 46, 50,},
+ {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
+ PHY_BAND_5, PHY_BAND_5,}},
+
+ /* frequency 5600MHz */
+ {cpu_to_le16(336), {106, 110, 112, 114, 116, 118, 120, 122},
+ {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
+ PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
/* frequency 6000MHz */
{cpu_to_le16(360), {3, 5, 7, 9, 11, 13, 15,},
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 958702403a45..66808c55aa0e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -129,7 +129,7 @@ int rs_fw_vht_highest_rx_mcs_index(const struct ieee80211_sta_vht_cap *vht_cap,
static void
rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
const struct ieee80211_sta_vht_cap *vht_cap,
- struct iwl_tlc_config_cmd *cmd)
+ struct iwl_tlc_config_cmd_v4 *cmd)
{
u16 supp;
int i, highest_mcs;
@@ -154,7 +154,7 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
- cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160] = cpu_to_le16(supp);
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(supp);
/*
* Check if VHT extended NSS indicates that the bandwidth/NSS
* configuration is supported - only for MCS 0 since we already
@@ -164,8 +164,8 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
ieee80211_get_vht_max_nss(&ieee_vht_cap,
IEEE80211_VHT_CHANWIDTH_160MHZ,
0, true, nss) >= nss)
- cmd->ht_rates[i][IWL_TLC_HT_BW_160] =
- cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160];
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] =
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80];
}
}
@@ -189,7 +189,7 @@ static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs)
static void
rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband,
- struct iwl_tlc_config_cmd *cmd)
+ struct iwl_tlc_config_cmd_v4 *cmd)
{
const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
@@ -219,7 +219,7 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
}
if (_mcs_80 > _tx_mcs_80)
_mcs_80 = _tx_mcs_80;
- cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160] =
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] =
cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80));
/* If one side doesn't support - mark both as not supporting */
@@ -230,14 +230,14 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
}
if (_mcs_160 > _tx_mcs_160)
_mcs_160 = _tx_mcs_160;
- cmd->ht_rates[i][IWL_TLC_HT_BW_160] =
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] =
cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160));
}
}
static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband,
- struct iwl_tlc_config_cmd *cmd)
+ struct iwl_tlc_config_cmd_v4 *cmd)
{
int i;
u16 supp = 0;
@@ -263,15 +263,15 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
} else if (ht_cap->ht_supported) {
cmd->mode = IWL_TLC_MNG_MODE_HT;
- cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_HT_BW_NONE_160] =
+ cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_MCS_PER_BW_80] =
cpu_to_le16(ht_cap->mcs.rx_mask[0]);
/* the station support only a single receive chain */
if (sta->smps_mode == IEEE80211_SMPS_STATIC)
- cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] =
+ cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] =
0;
else
- cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] =
+ cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] =
cpu_to_le16(ht_cap->mcs.rx_mask[1]);
}
}
@@ -291,8 +291,12 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
notif = (void *)pkt->data;
sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
if (IS_ERR_OR_NULL(sta)) {
- IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n",
- notif->sta_id);
+ /* can happen in remove station flow where mvm removed internally
+ * the station before removing from FW
+ */
+ IWL_DEBUG_RATE(mvm,
+ "Invalid mvm RCU pointer for sta id (%d) in TLC notification\n",
+ notif->sta_id);
goto out;
}
@@ -311,18 +315,19 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
if (flags & IWL_TLC_NOTIF_FLAG_RATE) {
char pretty_rate[100];
- if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
- TLC_MNG_UPDATE_NOTIF, 0) < 3) {
- rs_pretty_print_rate_v1(pretty_rate, sizeof(pretty_rate),
- le32_to_cpu(notif->rate));
- IWL_DEBUG_RATE(mvm,
- "Got rate in old format. Rate: %s. Converting.\n",
- pretty_rate);
- lq_sta->last_rate_n_flags =
- iwl_new_rate_from_v1(le32_to_cpu(notif->rate));
- } else {
- lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
- }
+ if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
+ TLC_MNG_UPDATE_NOTIF, 0) < 3) {
+ rs_pretty_print_rate_v1(pretty_rate,
+ sizeof(pretty_rate),
+ le32_to_cpu(notif->rate));
+ IWL_DEBUG_RATE(mvm,
+ "Got rate in old format. Rate: %s. Converting.\n",
+ pretty_rate);
+ lq_sta->last_rate_n_flags =
+ iwl_new_rate_from_v1(le32_to_cpu(notif->rate));
+ } else {
+ lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
+ }
rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate),
lq_sta->last_rate_n_flags);
IWL_DEBUG_RATE(mvm, "new rate: %s\n", pretty_rate);
@@ -418,23 +423,18 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0);
struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
- struct iwl_tlc_config_cmd cfg_cmd = {
+ struct iwl_tlc_config_cmd_v4 cfg_cmd = {
.sta_id = mvmsta->sta_id,
.max_ch_width = update ?
rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20,
.flags = cpu_to_le16(rs_fw_get_config_flags(mvm, sta, sband)),
.chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
.sgi_ch_width_supp = rs_fw_sgi_cw_support(sta),
- .max_mpdu_len = cpu_to_le16(max_amsdu_len),
- .amsdu = iwl_mvm_is_csum_supported(mvm),
+ .max_mpdu_len = iwl_mvm_is_csum_supported(mvm) ?
+ cpu_to_le16(max_amsdu_len) : 0,
};
int ret;
- u16 cmd_size = sizeof(cfg_cmd);
-
- /* In old versions of the API the struct is 4 bytes smaller */
- if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
- TLC_MNG_CONFIG_CMD, 0) < 3)
- cmd_size -= 4;
+ int cmd_ver;
memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
@@ -449,8 +449,41 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
*/
sta->max_amsdu_len = max_amsdu_len;
- ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size,
- &cfg_cmd);
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
+ TLC_MNG_CONFIG_CMD, 0);
+ if (cmd_ver == 4) {
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC,
+ sizeof(cfg_cmd), &cfg_cmd);
+ } else if (cmd_ver < 4) {
+ struct iwl_tlc_config_cmd_v3 cfg_cmd_v3 = {
+ .sta_id = cfg_cmd.sta_id,
+ .max_ch_width = cfg_cmd.max_ch_width,
+ .mode = cfg_cmd.mode,
+ .chains = cfg_cmd.chains,
+ .amsdu = !!cfg_cmd.max_mpdu_len,
+ .flags = cfg_cmd.flags,
+ .non_ht_rates = cfg_cmd.non_ht_rates,
+ .ht_rates[0][0] = cfg_cmd.ht_rates[0][0],
+ .ht_rates[0][1] = cfg_cmd.ht_rates[0][1],
+ .ht_rates[1][0] = cfg_cmd.ht_rates[1][0],
+ .ht_rates[1][1] = cfg_cmd.ht_rates[1][1],
+ .sgi_ch_width_supp = cfg_cmd.sgi_ch_width_supp,
+ .max_mpdu_len = cfg_cmd.max_mpdu_len,
+ };
+
+ u16 cmd_size = sizeof(cfg_cmd_v3);
+
+ /* In old versions of the API the struct is 4 bytes smaller */
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
+ TLC_MNG_CONFIG_CMD, 0) < 3)
+ cmd_size -= 4;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size,
+ &cfg_cmd_v3);
+ } else {
+ ret = -EINVAL;
+ }
+
if (ret)
IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index d22f40a5354d..64446a11ef98 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -527,40 +527,19 @@ struct iwl_mvm_stat_data {
u8 *beacon_average_energy;
};
-static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
+struct iwl_mvm_stat_data_all_macs {
+ struct iwl_mvm *mvm;
+ __le32 flags;
+ struct iwl_statistics_ntfy_per_mac *per_mac_stats;
+};
+
+static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig)
{
- struct iwl_mvm_stat_data *data = _data;
- struct iwl_mvm *mvm = data->mvm;
- int sig = -data->beacon_filter_average_energy;
- int last_event;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
int thold = vif->bss_conf.cqm_rssi_thold;
int hyst = vif->bss_conf.cqm_rssi_hyst;
- u16 id = le32_to_cpu(data->mac_id);
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- u16 vif_id = mvmvif->id;
-
- /* This doesn't need the MAC ID check since it's not taking the
- * data copied into the "data" struct, but rather the data from
- * the notification directly.
- */
- mvmvif->beacon_stats.num_beacons =
- le32_to_cpu(data->beacon_counter[vif_id]);
- mvmvif->beacon_stats.avg_signal =
- -data->beacon_average_energy[vif_id];
-
- /* make sure that beacon statistics don't go backwards with TCM
- * request to clear statistics
- */
- if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
- mvmvif->beacon_stats.accu_num_beacons +=
- mvmvif->beacon_stats.num_beacons;
-
- if (mvmvif->id != id)
- return;
-
- if (vif->type != NL80211_IFTYPE_STATION)
- return;
+ int last_event;
if (sig == 0) {
IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n");
@@ -618,6 +597,73 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
}
}
+static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_stat_data *data = _data;
+ int sig = -data->beacon_filter_average_energy;
+ u16 id = le32_to_cpu(data->mac_id);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u16 vif_id = mvmvif->id;
+
+ /* This doesn't need the MAC ID check since it's not taking the
+ * data copied into the "data" struct, but rather the data from
+ * the notification directly.
+ */
+ mvmvif->beacon_stats.num_beacons =
+ le32_to_cpu(data->beacon_counter[vif_id]);
+ mvmvif->beacon_stats.avg_signal =
+ -data->beacon_average_energy[vif_id];
+
+ if (mvmvif->id != id)
+ return;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ /* make sure that beacon statistics don't go backwards with TCM
+ * request to clear statistics
+ */
+ if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
+ mvmvif->beacon_stats.accu_num_beacons +=
+ mvmvif->beacon_stats.num_beacons;
+
+ iwl_mvm_update_vif_sig(vif, sig);
+}
+
+static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_stat_data_all_macs *data = _data;
+ struct iwl_statistics_ntfy_per_mac *mac_stats;
+ int sig;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u16 vif_id = mvmvif->id;
+
+ if (WARN_ONCE(vif_id > MAC_INDEX_AUX, "invalid vif id: %d", vif_id))
+ return;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ mac_stats = &data->per_mac_stats[vif_id];
+
+ mvmvif->beacon_stats.num_beacons =
+ le32_to_cpu(mac_stats->beacon_counter);
+ mvmvif->beacon_stats.avg_signal =
+ -le32_to_cpu(mac_stats->beacon_average_energy);
+
+ /* make sure that beacon statistics don't go backwards with TCM
+ * request to clear statistics
+ */
+ if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
+ mvmvif->beacon_stats.accu_num_beacons +=
+ mvmvif->beacon_stats.num_beacons;
+
+ sig = -le32_to_cpu(mac_stats->beacon_filter_average_energy);
+ iwl_mvm_update_vif_sig(vif, sig);
+}
+
static inline void
iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
{
@@ -684,47 +730,41 @@ iwl_mvm_update_tcm_from_stats(struct iwl_mvm *mvm, __le32 *air_time_le,
}
static void
-iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm,
- struct iwl_rx_packet *pkt)
+iwl_mvm_stats_ver_15(struct iwl_mvm *mvm,
+ struct iwl_statistics_operational_ntfy *stats)
+{
+ struct iwl_mvm_stat_data_all_macs data = {
+ .mvm = mvm,
+ .flags = stats->flags,
+ .per_mac_stats = stats->per_mac_stats,
+ };
+
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_stat_iterator_all_macs,
+ &data);
+}
+
+static void
+iwl_mvm_stats_ver_14(struct iwl_mvm *mvm,
+ struct iwl_statistics_operational_ntfy_ver_14 *stats)
{
struct iwl_mvm_stat_data data = {
.mvm = mvm,
};
+
u8 beacon_average_energy[MAC_INDEX_AUX];
- u8 average_energy[IWL_MVM_STATION_COUNT_MAX];
- struct iwl_statistics_operational_ntfy *stats;
- int expected_size;
__le32 flags;
int i;
- expected_size = sizeof(*stats);
- if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) < expected_size,
- "received invalid statistics size (%d)!, expected_size: %d\n",
- iwl_rx_packet_payload_len(pkt), expected_size))
- return;
-
- stats = (void *)&pkt->data;
-
- if (WARN_ONCE(stats->hdr.type != FW_STATISTICS_OPERATIONAL ||
- stats->hdr.version !=
- iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, STATISTICS_CMD, 0),
- "received unsupported hdr type %d, version %d\n",
- stats->hdr.type, stats->hdr.version))
- return;
-
flags = stats->flags;
- mvm->radio_stats.rx_time = le64_to_cpu(stats->rx_time);
- mvm->radio_stats.tx_time = le64_to_cpu(stats->tx_time);
- mvm->radio_stats.on_time_rf = le64_to_cpu(stats->on_time_rf);
- mvm->radio_stats.on_time_scan = le64_to_cpu(stats->on_time_scan);
-
- iwl_mvm_rx_stats_check_trigger(mvm, pkt);
data.mac_id = stats->mac_id;
data.beacon_filter_average_energy =
le32_to_cpu(stats->beacon_filter_average_energy);
data.flags = flags;
data.beacon_counter = stats->beacon_counter;
+
for (i = 0; i < ARRAY_SIZE(beacon_average_energy); i++)
beacon_average_energy[i] =
le32_to_cpu(stats->beacon_average_energy[i]);
@@ -735,9 +775,105 @@ iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_stat_iterator,
&data);
+}
+
+static bool iwl_mvm_verify_stats_len(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt,
+ u32 expected_size)
+{
+ struct iwl_statistics_ntfy_hdr *hdr;
+
+ if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) < expected_size,
+ "received invalid statistics size (%d)!, expected_size: %d\n",
+ iwl_rx_packet_payload_len(pkt), expected_size))
+ return false;
+
+ hdr = (void *)&pkt->data;
+
+ if (WARN_ONCE((hdr->type & IWL_STATISTICS_TYPE_MSK) != FW_STATISTICS_OPERATIONAL ||
+ hdr->version !=
+ iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, STATISTICS_NOTIFICATION, 0),
+ "received unsupported hdr type %d, version %d\n",
+ hdr->type, hdr->version))
+ return false;
+
+ if (WARN_ONCE(le16_to_cpu(hdr->size) != expected_size,
+ "received invalid statistics size in header (%d)!, expected_size: %d\n",
+ le16_to_cpu(hdr->size), expected_size))
+ return false;
+
+ return true;
+}
+
+static void
+iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ u8 average_energy[IWL_MVM_STATION_COUNT_MAX];
+ __le32 air_time[MAC_INDEX_AUX];
+ __le32 rx_bytes[MAC_INDEX_AUX];
+ __le32 flags = 0;
+ int i;
+ u32 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ STATISTICS_NOTIFICATION, 0);
+
+ if (WARN_ONCE(notif_ver > 15,
+ "invalid statistics version id: %d\n", notif_ver))
+ return;
+
+ if (notif_ver == 14) {
+ struct iwl_statistics_operational_ntfy_ver_14 *stats =
+ (void *)pkt->data;
+
+ if (!iwl_mvm_verify_stats_len(mvm, pkt, sizeof(*stats)))
+ return;
+
+ iwl_mvm_stats_ver_14(mvm, stats);
+
+ flags = stats->flags;
+ mvm->radio_stats.rx_time = le64_to_cpu(stats->rx_time);
+ mvm->radio_stats.tx_time = le64_to_cpu(stats->tx_time);
+ mvm->radio_stats.on_time_rf = le64_to_cpu(stats->on_time_rf);
+ mvm->radio_stats.on_time_scan =
+ le64_to_cpu(stats->on_time_scan);
+
+ for (i = 0; i < ARRAY_SIZE(average_energy); i++)
+ average_energy[i] = le32_to_cpu(stats->average_energy[i]);
+
+ for (i = 0; i < ARRAY_SIZE(air_time); i++) {
+ air_time[i] = stats->air_time[i];
+ rx_bytes[i] = stats->rx_bytes[i];
+ }
+ }
+
+ if (notif_ver == 15) {
+ struct iwl_statistics_operational_ntfy *stats =
+ (void *)pkt->data;
+
+ if (!iwl_mvm_verify_stats_len(mvm, pkt, sizeof(*stats)))
+ return;
+
+ iwl_mvm_stats_ver_15(mvm, stats);
+
+ flags = stats->flags;
+ mvm->radio_stats.rx_time = le64_to_cpu(stats->rx_time);
+ mvm->radio_stats.tx_time = le64_to_cpu(stats->tx_time);
+ mvm->radio_stats.on_time_rf = le64_to_cpu(stats->on_time_rf);
+ mvm->radio_stats.on_time_scan =
+ le64_to_cpu(stats->on_time_scan);
+
+ for (i = 0; i < ARRAY_SIZE(average_energy); i++)
+ average_energy[i] =
+ le32_to_cpu(stats->per_sta_stats[i].average_energy);
+
+ for (i = 0; i < ARRAY_SIZE(air_time); i++) {
+ air_time[i] = stats->per_mac_stats[i].air_time;
+ rx_bytes[i] = stats->per_mac_stats[i].rx_bytes;
+ }
+ }
+
+ iwl_mvm_rx_stats_check_trigger(mvm, pkt);
- for (i = 0; i < ARRAY_SIZE(average_energy); i++)
- average_energy[i] = le32_to_cpu(stats->average_energy[i]);
ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter,
average_energy);
/*
@@ -746,8 +882,7 @@ iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm,
* request and once in statistics notification.
*/
if (le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
- iwl_mvm_update_tcm_from_stats(mvm, stats->air_time,
- stats->rx_bytes);
+ iwl_mvm_update_tcm_from_stats(mvm, air_time, rx_bytes);
}
void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
@@ -761,8 +896,8 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
u8 *energy;
/* From ver 14 and up we use TLV statistics format */
- if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
- STATISTICS_CMD, 0) >= 14)
+ if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ STATISTICS_NOTIFICATION, 0) >= 14)
return iwl_mvm_handle_rx_statistics_tlv(mvm, pkt);
if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index e0601f802628..295629c5c035 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -121,12 +121,39 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
unsigned int headlen, fraglen, pad_len = 0;
unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ u8 mic_crc_len = u8_get_bits(desc->mac_flags1,
+ IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK) << 1;
if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
len -= 2;
pad_len = 2;
}
+ /*
+ * For non monitor interface strip the bytes the RADA might not have
+ * removed. As monitor interface cannot exist with other interfaces
+ * this removal is safe.
+ */
+ if (mic_crc_len && !ieee80211_hw_check(mvm->hw, RX_INCLUDES_FCS)) {
+ u32 pkt_flags = le32_to_cpu(pkt->len_n_flags);
+
+ /*
+ * If RADA was not enabled then decryption was not performed so
+ * the MIC cannot be removed.
+ */
+ if (!(pkt_flags & FH_RSCSR_RADA_EN)) {
+ if (WARN_ON(crypt_len > mic_crc_len))
+ return -EINVAL;
+
+ mic_crc_len -= crypt_len;
+ }
+
+ if (WARN_ON(mic_crc_len > len))
+ return -EINVAL;
+
+ len -= mic_crc_len;
+ }
+
/* If frame is small enough to fit in skb->head, pull it completely.
* If not, only pull ieee80211_hdr (including crypto if present, and
* an additional 8 bytes for SNAP/ethertype, see below) so that
@@ -149,18 +176,8 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
*/
hdrlen += crypt_len;
- if (WARN_ONCE(headlen < hdrlen,
- "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
- hdrlen, len, crypt_len)) {
- /*
- * We warn and trace because we want to be able to see
- * it in trace-cmd as well.
- */
- IWL_DEBUG_RX(mvm,
- "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
- hdrlen, len, crypt_len);
+ if (unlikely(headlen < hdrlen))
return -EINVAL;
- }
skb_put_data(skb, hdr, hdrlen);
skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
@@ -172,8 +189,12 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
* in the cases the hardware didn't handle, since it's rare to see
* such packets, even though the hardware did calculate the checksum
* in this case, just starting after the MAC header instead.
+ *
+ * Starting from Bz hardware, it calculates starting directly after
+ * the MAC header, so that matches mac80211's expectation.
*/
- if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ if (skb->ip_summed == CHECKSUM_COMPLETE &&
+ mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) {
struct {
u8 hdr[6];
__be16 type;
@@ -766,8 +787,11 @@ static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
rcu_read_lock();
ba_data = rcu_dereference(mvm->baid_map[baid]);
- if (WARN_ON_ONCE(!ba_data))
+ if (!ba_data) {
+ WARN(!(flags & IWL_MVM_RELEASE_FROM_RSS_SYNC),
+ "BAID %d not found in map\n", baid);
goto out;
+ }
sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
@@ -1961,8 +1985,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
} else if (format == RATE_MCS_VHT_MSK) {
u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
RATE_MCS_STBC_POS;
- rx_status->nss =
- ((rate_n_flags & RATE_MCS_NSS_MSK) >>
+ rx_status->nss = ((rate_n_flags & RATE_MCS_NSS_MSK) >>
RATE_MCS_NSS_POS) + 1;
rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
rx_status->encoding = RX_ENC_VHT;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index a138b5c4cce8..5f92a09db374 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -579,7 +579,9 @@ iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
profile->ssid_index = i;
/* Support any cipher and auth algorithm */
profile->unicast_cipher = 0xff;
- profile->auth_alg = 0xff;
+ profile->auth_alg = IWL_AUTH_ALGO_UNSUPPORTED |
+ IWL_AUTH_ALGO_NONE | IWL_AUTH_ALGO_PSK | IWL_AUTH_ALGO_8021X |
+ IWL_AUTH_ALGO_SAE | IWL_AUTH_ALGO_8021X_SHA384 | IWL_AUTH_ALGO_OWE;
profile->network_type = IWL_NETWORK_TYPE_ANY;
profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
@@ -1394,8 +1396,8 @@ static u32 iwl_mvm_scan_umac_ooc_priority(struct iwl_mvm_scan_params *params)
}
static void
-iwl_mvm_scan_umac_dwell_v10(struct iwl_mvm *mvm,
- struct iwl_scan_general_params_v10 *general_params,
+iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm,
+ struct iwl_scan_general_params_v11 *general_params,
struct iwl_mvm_scan_params *params)
{
struct iwl_mvm_scan_timing_params *timing, *hb_timing;
@@ -1826,8 +1828,6 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
}
}
- flags = bssid_bitmap | (s_ssid_bitmap << 16);
-
if (cfg80211_channel_is_psc(params->channels[i]) &&
psc_no_listen)
flags |= IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN;
@@ -1869,8 +1869,11 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
(s_max > 1 || b_max > 3));
}
if ((allow_passive && force_passive) ||
- (!flags && !cfg80211_channel_is_psc(params->channels[i])))
+ (!(bssid_bitmap | s_ssid_bitmap) &&
+ !cfg80211_channel_is_psc(params->channels[i])))
flags |= IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE;
+ else
+ flags |= bssid_bitmap | (s_ssid_bitmap << 16);
channel_cfg[i].flags |= cpu_to_le32(flags);
}
@@ -1924,22 +1927,19 @@ static void iwl_mvm_scan_6ghz_passive_scan(struct iwl_mvm *mvm,
}
/*
- * 6GHz passive scan is allowed while associated in a defined time
- * interval following HW reset or resume flow
+ * 6GHz passive scan is allowed in a defined time interval following HW
+ * reset or resume flow, or while not associated and a large interval
+ * has passed since the last 6GHz passive scan.
*/
- if (vif->bss_conf.assoc &&
+ if ((vif->bss_conf.assoc ||
+ time_after(mvm->last_6ghz_passive_scan_jiffies +
+ (IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) &&
(time_before(mvm->last_reset_or_resume_time_jiffies +
(IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT * HZ),
jiffies))) {
- IWL_DEBUG_SCAN(mvm, "6GHz passive scan: associated\n");
- return;
- }
-
- /* No need for 6GHz passive scan if not enough time elapsed */
- if (time_after(mvm->last_6ghz_passive_scan_jiffies +
- (IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) {
- IWL_DEBUG_SCAN(mvm,
- "6GHz passive scan: timeout did not expire\n");
+ IWL_DEBUG_SCAN(mvm, "6GHz passive scan: %s\n",
+ vif->bss_conf.assoc ? "associated" :
+ "timeout did not expire");
return;
}
@@ -2037,6 +2037,12 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
if (params->enable_6ghz_passive)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN;
+ if (iwl_mvm_is_oce_supported(mvm) &&
+ (params->flags & (NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP |
+ NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE |
+ NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME)))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE;
+
return flags;
}
@@ -2238,15 +2244,15 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}
static void
-iwl_mvm_scan_umac_fill_general_p_v10(struct iwl_mvm *mvm,
+iwl_mvm_scan_umac_fill_general_p_v11(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif,
- struct iwl_scan_general_params_v10 *gp,
+ struct iwl_scan_general_params_v11 *gp,
u16 gen_flags)
{
struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
- iwl_mvm_scan_umac_dwell_v10(mvm, gp, params);
+ iwl_mvm_scan_umac_dwell_v11(mvm, gp, params);
gp->flags = cpu_to_le16(gen_flags);
@@ -2350,7 +2356,7 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->uid = cpu_to_le32(uid);
gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
- iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
+ iwl_mvm_scan_umac_fill_general_p_v11(mvm, params, vif,
&scan_p->general_params,
gen_flags);
@@ -2367,12 +2373,13 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return 0;
}
-static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct iwl_mvm_scan_params *params, int type,
- int uid)
+static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params,
+ int type, int uid, u32 version)
{
- struct iwl_scan_req_umac_v14 *cmd = mvm->scan_cmd;
- struct iwl_scan_req_params_v14 *scan_p = &cmd->scan_params;
+ struct iwl_scan_req_umac_v15 *cmd = mvm->scan_cmd;
+ struct iwl_scan_req_params_v15 *scan_p = &cmd->scan_params;
struct iwl_scan_channel_params_v6 *cp = &scan_p->channel_params;
struct iwl_scan_probe_params_v4 *pb = &scan_p->probe_params;
int ret;
@@ -2385,7 +2392,7 @@ static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->uid = cpu_to_le32(uid);
gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
- iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
+ iwl_mvm_scan_umac_fill_general_p_v11(mvm, params, vif,
&scan_p->general_params,
gen_flags);
@@ -2425,6 +2432,20 @@ static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return 0;
}
+static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params, int type,
+ int uid)
+{
+ return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 14);
+}
+
+static int iwl_mvm_scan_umac_v15(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params, int type,
+ int uid)
+{
+ return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 15);
+}
+
static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
{
return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
@@ -2498,7 +2519,7 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
return -EIO;
}
-#define SCAN_TIMEOUT 20000
+#define SCAN_TIMEOUT 30000
void iwl_mvm_scan_timeout_wk(struct work_struct *work)
{
@@ -2540,6 +2561,7 @@ struct iwl_scan_umac_handler {
static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = {
/* set the newest version first to shorten the list traverse time */
+ IWL_SCAN_UMAC_HANDLER(15),
IWL_SCAN_UMAC_HANDLER(14),
IWL_SCAN_UMAC_HANDLER(12),
};
@@ -2940,15 +2962,14 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
1 * HZ);
}
-#define IWL_SCAN_REQ_UMAC_HANDLE_SIZE(_ver) { \
- case (_ver): return sizeof(struct iwl_scan_req_umac_v##_ver); \
-}
-
static int iwl_scan_req_umac_get_size(u8 scan_ver)
{
switch (scan_ver) {
- IWL_SCAN_REQ_UMAC_HANDLE_SIZE(14);
- IWL_SCAN_REQ_UMAC_HANDLE_SIZE(12);
+ case 12:
+ return sizeof(struct iwl_scan_req_umac_v12);
+ case 14:
+ case 15:
+ return sizeof(struct iwl_scan_req_umac_v15);
}
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index a64874c05ced..feab0bfcd7a2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2684,6 +2684,16 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
kfree_rcu(baid_data, rcu_head);
IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
+
+ /*
+ * After we've deleted it, do another queue sync
+ * so if an IWL_MVM_RXQ_NSSN_SYNC was concurrently
+ * running it won't find a new session in the old
+ * BAID. It can find the NULL pointer for the BAID,
+ * but we must not have it find a different session.
+ */
+ iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY,
+ true, NULL, 0);
}
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 32b4d1935788..e34b82b2a288 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
@@ -373,6 +373,7 @@ struct iwl_mvm_rxq_dup_data {
* @tx_ant: the index of the antenna to use for data tx to this station. Only
* used during connection establishment (e.g. for the 4 way handshake
* exchange).
+ * @pairwise_cipher: used to feed iwlmei upon authorization
*
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
* in the structure for use by driver. This structure is placed in that
@@ -415,6 +416,7 @@ struct iwl_mvm_sta {
u8 sleep_tx_count;
u8 avg_energy;
u8 tx_ant;
+ u32 pairwise_cipher;
};
u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index e91f8e889df7..ab06dcda1462 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -49,14 +49,13 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
/*
- * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
+ * Clear the ROC_RUNNING status bit.
* This will cause the TX path to drop offchannel transmissions.
* That would also be done by mac80211, but it is racy, in particular
* in the case that the time event actually completed in the firmware
* (which is handled in iwl_mvm_te_handle_notif).
*/
clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
- clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
synchronize_net();
@@ -82,9 +81,19 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true);
}
- } else {
+ }
+
+ /*
+ * Clear the ROC_AUX_RUNNING status bit.
+ * This will cause the TX path to drop offchannel transmissions.
+ * That would also be done by mac80211, but it is racy, in particular
+ * in the case that the time event actually completed in the firmware
+ * (which is handled in iwl_mvm_te_handle_notif).
+ */
+ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
/* do the same in case of hot spot 2.0 */
iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true);
+
/* In newer version of this command an aux station is added only
* in cases of dedicated tx queue and need to be removed in end
* of use */
@@ -687,11 +696,14 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
iwl_mvm_te_clear_data(mvm, te_data);
spin_unlock_bh(&mvm->time_event_lock);
- /* When session protection is supported, the te_data->id field
+ /* When session protection is used, the te_data->id field
* is reused to save session protection's configuration.
+ * For AUX ROC, HOT_SPOT_CMD is used and the te_data->id field is set
+ * to HOT_SPOT_CMD.
*/
if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) &&
+ id != HOT_SPOT_CMD) {
if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) {
/* Session protection is still ongoing. Cancel it */
iwl_mvm_cancel_session_protection(mvm, mvmvif, id);
@@ -1027,7 +1039,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_p2p_roc_finished(mvm);
} else {
iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
- &mvmvif->time_event_data);
+ &mvmvif->hs_time_event_data);
iwl_mvm_roc_finished(mvm);
}
@@ -1158,15 +1170,10 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color)),
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
.duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
};
- /* The time_event_data.id field is reused to save session
- * protection's configuration.
- */
- mvmvif->time_event_data.id = SESSION_PROTECT_CONF_ASSOC;
- cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id);
-
lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->time_event_lock);
@@ -1180,6 +1187,11 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
}
iwl_mvm_te_clear_data(mvm, te_data);
+ /*
+ * The time_event_data.id field is reused to save session
+ * protection's configuration.
+ */
+ te_data->id = le32_to_cpu(cmd.conf_id);
te_data->duration = le32_to_cpu(cmd.duration_tu);
te_data->vif = vif;
spin_unlock_bh(&mvm->time_event_lock);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 76e0b7b45980..6fa2c12f7955 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -39,11 +39,11 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
#define OPT_HDR(type, skb, off) \
(type *)(skb_network_header(skb) + (off))
-static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
- struct ieee80211_hdr *hdr,
- struct ieee80211_tx_info *info,
- u16 offload_assist)
+static u16 iwl_mvm_tx_csum_pre_bz(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_tx_info *info, bool amsdu)
{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ u16 offload_assist = 0;
#if IS_ENABLED(CONFIG_INET)
u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
u8 protocol = 0;
@@ -106,8 +106,7 @@ static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
/* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
- if (skb->protocol == htons(ETH_P_IP) &&
- (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) {
+ if (skb->protocol == htons(ETH_P_IP) && amsdu) {
ip_hdr(skb)->check = 0;
offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
}
@@ -132,9 +131,63 @@ static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
out:
#endif
+ if (amsdu)
+ offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
+ else if (ieee80211_hdrlen(hdr->frame_control) % 4)
+ /* padding is inserted later in transport */
+ offload_assist |= BIT(TX_CMD_OFFLD_PAD);
+
return offload_assist;
}
+u32 iwl_mvm_tx_csum_bz(struct iwl_mvm *mvm, struct sk_buff *skb, bool amsdu)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ u32 offload_assist = IWL_TX_CMD_OFFLD_BZ_PARTIAL_CSUM;
+ unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ unsigned int csum_start = skb_checksum_start_offset(skb);
+
+ offload_assist |= u32_encode_bits(hdrlen / 2,
+ IWL_TX_CMD_OFFLD_BZ_MH_LEN);
+ if (amsdu)
+ offload_assist |= IWL_TX_CMD_OFFLD_BZ_AMSDU;
+ else if (hdrlen % 4)
+ /* padding is inserted later in transport */
+ offload_assist |= IWL_TX_CMD_OFFLD_BZ_MH_PAD;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return offload_assist;
+
+ offload_assist |= IWL_TX_CMD_OFFLD_BZ_ENABLE_CSUM |
+ IWL_TX_CMD_OFFLD_BZ_ZERO2ONES;
+
+ /*
+ * mac80211 will always calculate checksum in software for
+ * non-fast-xmit, and so we can only do offloaded checksum
+ * for fast-xmit frames. In this case, we always have the
+ * RFC 1042 header present. skb_checksum_start_offset()
+ * returns the offset from the beginning, but the hardware
+ * needs it from after the header & SNAP header.
+ */
+ csum_start -= hdrlen + 8;
+
+ offload_assist |= u32_encode_bits(csum_start,
+ IWL_TX_CMD_OFFLD_BZ_START_OFFS);
+ offload_assist |= u32_encode_bits(csum_start + skb->csum_offset,
+ IWL_TX_CMD_OFFLD_BZ_RESULT_OFFS);
+
+ return offload_assist;
+}
+
+static u32 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_tx_info *info,
+ bool amsdu)
+{
+ if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
+ return iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu);
+ return iwl_mvm_tx_csum_bz(mvm, skb, amsdu);
+}
+
/*
* Sets most of the Tx cmd's fields
*/
@@ -146,7 +199,7 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
__le16 fc = hdr->frame_control;
u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
u32 len = skb->len + FCS_LEN;
- u16 offload_assist = 0;
+ bool amsdu = false;
u8 ac;
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) ||
@@ -166,8 +219,7 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
u8 *qc = ieee80211_get_qos_ctl(hdr);
tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
- if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
- offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
+ amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT;
} else if (ieee80211_is_back_req(fc)) {
struct ieee80211_bar *bar = (void *)skb->data;
u16 control = le16_to_cpu(bar->control);
@@ -234,14 +286,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
tx_cmd->sta_id = sta_id;
- /* padding is inserted later in transport */
- if (ieee80211_hdrlen(fc) % 4 &&
- !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
- offload_assist |= BIT(TX_CMD_OFFLD_PAD);
-
- tx_cmd->offload_assist |=
- cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info,
- offload_assist));
+ tx_cmd->offload_assist =
+ cpu_to_le16(iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu));
}
static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm,
@@ -463,27 +509,18 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
dev_cmd->hdr.cmd = TX_CMD;
if (iwl_mvm_has_new_tx_api(mvm)) {
- u16 offload_assist = 0;
u32 rate_n_flags = 0;
u16 flags = 0;
struct iwl_mvm_sta *mvmsta = sta ?
iwl_mvm_sta_from_mac80211(sta) : NULL;
+ bool amsdu = false;
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
- if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
- offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
+ amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT;
}
- offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info,
- offload_assist);
-
- /* padding is inserted later in transport */
- if (ieee80211_hdrlen(hdr->frame_control) % 4 &&
- !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
- offload_assist |= BIT(TX_CMD_OFFLD_PAD);
-
if (!info->control.hw_key)
flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
@@ -503,8 +540,10 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
if (mvm->trans->trans_cfg->device_family >=
IWL_DEVICE_FAMILY_AX210) {
struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
+ u32 offload_assist = iwl_mvm_tx_csum(mvm, skb,
+ info, amsdu);
- cmd->offload_assist |= cpu_to_le32(offload_assist);
+ cmd->offload_assist = cpu_to_le32(offload_assist);
/* Total # bytes to be transmitted */
cmd->len = cpu_to_le16((u16)skb->len);
@@ -516,8 +555,11 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
} else {
struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
+ u16 offload_assist = iwl_mvm_tx_csum_pre_bz(mvm, skb,
+ info,
+ amsdu);
- cmd->offload_assist |= cpu_to_le16(offload_assist);
+ cmd->offload_assist = cpu_to_le16(offload_assist);
/* Total # bytes to be transmitted */
cmd->len = cpu_to_le16((u16)skb->len);
@@ -1129,6 +1171,11 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
/* From now on, we cannot access info->control */
iwl_mvm_skb_prepare_status(skb, dev_cmd);
+ if (ieee80211_is_data(fc))
+ iwl_mvm_mei_tx_copy_to_csme(mvm, skb,
+ info->control.hw_key ?
+ info->control.hw_key->iv_len : 0);
+
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
goto drop_unlock_sta;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index caf1dcf48888..1f3e90e5dbd4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -340,25 +340,64 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ieee80211_request_smps(vif, smps_mode);
}
+static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ WARN_ON(pkt->hdr.cmd != STATISTICS_NOTIFICATION);
+
+ return true;
+}
+
int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
{
struct iwl_statistics_cmd scmd = {
.flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
};
+
struct iwl_host_cmd cmd = {
.id = STATISTICS_CMD,
.len[0] = sizeof(scmd),
.data[0] = &scmd,
- .flags = CMD_WANT_SKB,
};
int ret;
- ret = iwl_mvm_send_cmd(mvm, &cmd);
- if (ret)
- return ret;
+ /* From version 15 - STATISTICS_NOTIFICATION, the reply for
+ * STATISTICS_CMD is empty, and the response is with
+ * STATISTICS_NOTIFICATION notification
+ */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ STATISTICS_NOTIFICATION, 0) < 15) {
+ cmd.flags = CMD_WANT_SKB;
- iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
- iwl_free_resp(&cmd);
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret)
+ return ret;
+
+ iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
+ iwl_free_resp(&cmd);
+ } else {
+ struct iwl_notification_wait stats_wait;
+ static const u16 stats_complete[] = {
+ STATISTICS_NOTIFICATION,
+ };
+
+ iwl_init_notification_wait(&mvm->notif_wait, &stats_wait,
+ stats_complete, ARRAY_SIZE(stats_complete),
+ iwl_wait_stats_complete, NULL);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret) {
+ iwl_remove_notification(&mvm->notif_wait, &stats_wait);
+ return ret;
+ }
+
+ /* 200ms should be enough for FW to collect data from all
+ * LMACs and send STATISTICS_NOTIFICATION to host
+ */
+ ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 5);
+ if (ret)
+ return ret;
+ }
if (clear)
iwl_mvm_accu_radio_stats(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
new file mode 100644
index 000000000000..78450366312b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Intel Corporation
+ */
+#include "mvm.h"
+#include <linux/nl80211-vnd-intel.h>
+#include <net/netlink.h>
+
+static const struct nla_policy
+iwl_mvm_vendor_attr_policy[NUM_IWL_MVM_VENDOR_ATTR] = {
+ [IWL_MVM_VENDOR_ATTR_ROAMING_FORBIDDEN] = { .type = NLA_U8 },
+ [IWL_MVM_VENDOR_ATTR_AUTH_MODE] = { .type = NLA_U32 },
+ [IWL_MVM_VENDOR_ATTR_CHANNEL_NUM] = { .type = NLA_U8 },
+ [IWL_MVM_VENDOR_ATTR_SSID] = { .type = NLA_BINARY,
+ .len = IEEE80211_MAX_SSID_LEN },
+ [IWL_MVM_VENDOR_ATTR_BAND] = { .type = NLA_U8 },
+ [IWL_MVM_VENDOR_ATTR_COLLOC_CHANNEL] = { .type = NLA_U8 },
+ [IWL_MVM_VENDOR_ATTR_COLLOC_ADDR] = { .type = NLA_BINARY, .len = ETH_ALEN },
+};
+
+static int iwl_mvm_vendor_get_csme_conn_info(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_csme_conn_info *csme_conn_info;
+ struct sk_buff *skb;
+ int err = 0;
+
+ mutex_lock(&mvm->mutex);
+ csme_conn_info = iwl_mvm_get_csme_conn_info(mvm);
+
+ if (!csme_conn_info) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 200);
+ if (!skb) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+
+ if (nla_put_u32(skb, IWL_MVM_VENDOR_ATTR_AUTH_MODE,
+ csme_conn_info->conn_info.auth_mode) ||
+ nla_put(skb, IWL_MVM_VENDOR_ATTR_SSID,
+ csme_conn_info->conn_info.ssid_len,
+ csme_conn_info->conn_info.ssid) ||
+ nla_put_u32(skb, IWL_MVM_VENDOR_ATTR_STA_CIPHER,
+ csme_conn_info->conn_info.pairwise_cipher) ||
+ nla_put_u8(skb, IWL_MVM_VENDOR_ATTR_CHANNEL_NUM,
+ csme_conn_info->conn_info.channel) ||
+ nla_put(skb, IWL_MVM_VENDOR_ATTR_ADDR, ETH_ALEN,
+ csme_conn_info->conn_info.bssid)) {
+ kfree_skb(skb);
+ err = -ENOBUFS;
+ }
+
+out_unlock:
+ mutex_unlock(&mvm->mutex);
+ if (err)
+ return err;
+
+ return cfg80211_vendor_cmd_reply(skb);
+}
+
+static int iwl_mvm_vendor_host_get_ownership(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_mei_get_ownership(mvm);
+ mutex_unlock(&mvm->mutex);
+
+ return 0;
+}
+
+static const struct wiphy_vendor_command iwl_mvm_vendor_commands[] = {
+ {
+ .info = {
+ .vendor_id = INTEL_OUI,
+ .subcmd = IWL_MVM_VENDOR_CMD_GET_CSME_CONN_INFO,
+ },
+ .doit = iwl_mvm_vendor_get_csme_conn_info,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV,
+ .policy = iwl_mvm_vendor_attr_policy,
+ .maxattr = MAX_IWL_MVM_VENDOR_ATTR,
+ },
+ {
+ .info = {
+ .vendor_id = INTEL_OUI,
+ .subcmd = IWL_MVM_VENDOR_CMD_HOST_GET_OWNERSHIP,
+ },
+ .doit = iwl_mvm_vendor_host_get_ownership,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV,
+ .policy = iwl_mvm_vendor_attr_policy,
+ .maxattr = MAX_IWL_MVM_VENDOR_ATTR,
+ },
+};
+
+enum iwl_mvm_vendor_events_idx {
+ /* 0x0 - 0x3 are deprecated */
+ IWL_MVM_VENDOR_EVENT_IDX_ROAMING_FORBIDDEN = 4,
+ NUM_IWL_MVM_VENDOR_EVENT_IDX
+};
+
+static const struct nl80211_vendor_cmd_info
+iwl_mvm_vendor_events[NUM_IWL_MVM_VENDOR_EVENT_IDX] = {
+ [IWL_MVM_VENDOR_EVENT_IDX_ROAMING_FORBIDDEN] = {
+ .vendor_id = INTEL_OUI,
+ .subcmd = IWL_MVM_VENDOR_CMD_ROAMING_FORBIDDEN_EVENT,
+ },
+};
+
+void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm)
+{
+ mvm->hw->wiphy->vendor_commands = iwl_mvm_vendor_commands;
+ mvm->hw->wiphy->n_vendor_commands = ARRAY_SIZE(iwl_mvm_vendor_commands);
+ mvm->hw->wiphy->vendor_events = iwl_mvm_vendor_events;
+ mvm->hw->wiphy->n_vendor_events = ARRAY_SIZE(iwl_mvm_vendor_events);
+}
+
+void iwl_mvm_send_roaming_forbidden_event(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool forbidden)
+{
+ struct sk_buff *msg =
+ cfg80211_vendor_event_alloc(mvm->hw->wiphy,
+ ieee80211_vif_to_wdev(vif),
+ 200, IWL_MVM_VENDOR_EVENT_IDX_ROAMING_FORBIDDEN,
+ GFP_ATOMIC);
+ if (!msg)
+ return;
+
+ if (WARN_ON(!vif))
+ return;
+
+ if (nla_put(msg, IWL_MVM_VENDOR_ATTR_VIF_ADDR,
+ ETH_ALEN, vif->addr) ||
+ nla_put_u8(msg, IWL_MVM_VENDOR_ATTR_ROAMING_FORBIDDEN, forbidden))
+ goto nla_put_failure;
+
+ cfg80211_vendor_event(msg, GFP_ATOMIC);
+ return;
+
+ nla_put_failure:
+ kfree_skb(msg);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 5ce07f28e7c3..5178e852c5d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -510,16 +510,16 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
#define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
- _rf_id, _no_160, _cores, _cdb, _cfg, _name) \
+ _rf_id, _no_160, _cores, _cdb, _jacket, _cfg, _name) \
{ .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
.name = _name, .mac_type = _mac_type, .rf_type = _rf_type, \
.no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \
- .mac_step = _mac_step, .cdb = _cdb }
+ .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket }
#define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \
_IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \
- IWL_CFG_ANY, _cfg, _name)
+ IWL_CFG_ANY, IWL_CFG_ANY, _cfg, _name)
static const struct iwl_dev_info iwl_dev_info_table[] = {
#if IS_ENABLED(CONFIG_IWLMVM)
@@ -562,6 +562,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650i_name),
IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650s_name),
IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
@@ -665,97 +666,111 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x2726, 0x0510, iwlax211_cfg_snj_gf_a0, NULL),
IWL_DEV_INFO(0x2726, 0x1651, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650s_name),
IWL_DEV_INFO(0x2726, 0x1652, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650i_name),
- IWL_DEV_INFO(0x2726, 0x1671, iwlax211_cfg_snj_gf_a0, iwl_ax211_killer_1675s_name),
- IWL_DEV_INFO(0x2726, 0x1672, iwlax211_cfg_snj_gf_a0, iwl_ax211_killer_1675i_name),
IWL_DEV_INFO(0x2726, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name),
IWL_DEV_INFO(0x2726, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name),
IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name),
IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name),
+ /* SO with GF2 */
+ IWL_DEV_INFO(0x2726, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
+ IWL_DEV_INFO(0x2726, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
+ IWL_DEV_INFO(0x51F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
+ IWL_DEV_INFO(0x51F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
+ IWL_DEV_INFO(0x54F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
+ IWL_DEV_INFO(0x54F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
+ IWL_DEV_INFO(0x7A70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
+ IWL_DEV_INFO(0x7A70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
+ IWL_DEV_INFO(0x7AF0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
+ IWL_DEV_INFO(0x7AF0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
+
+ /* MA with GF2 */
+ IWL_DEV_INFO(0x7E40, 0x1671, iwl_cfg_ma_a0_gf_a0, iwl_ax211_killer_1675s_name),
+ IWL_DEV_INFO(0x7E40, 0x1672, iwl_cfg_ma_a0_gf_a0, iwl_ax211_killer_1675i_name),
+
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_2ac_cfg_soc, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_2ac_cfg_soc, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_2ac_cfg_soc, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_2ac_cfg_soc, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_2ac_cfg_soc, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_2ac_cfg_soc, iwl9560_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9260_2ac_cfg, iwl9461_160_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9260_2ac_cfg, iwl9461_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9260_2ac_cfg, iwl9462_160_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9260_2ac_cfg, iwl9462_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9260_2ac_cfg, iwl9270_160_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9260_2ac_cfg, iwl9270_name),
_IWL_DEV_INFO(0x271B, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9260_2ac_cfg, iwl9162_160_name),
_IWL_DEV_INFO(0x271B, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9260_2ac_cfg, iwl9162_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9260_2ac_cfg, iwl9260_160_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9260_2ac_cfg, iwl9260_name),
/* Qu with Jf */
@@ -763,176 +778,176 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_b0_jf_b0_cfg, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_b0_jf_b0_cfg, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_b0_jf_b0_cfg, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_b0_jf_b0_cfg, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550s_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550i_name),
/* Qu C step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_c0_jf_b0_cfg, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_c0_jf_b0_cfg, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_c0_jf_b0_cfg, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_c0_jf_b0_cfg, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550s_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550i_name),
/* QuZ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_quz_a0_jf_b0_cfg, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_quz_a0_jf_b0_cfg, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_quz_a0_jf_b0_cfg, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_quz_a0_jf_b0_cfg, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550s_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550i_name),
/* QnJ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qnj_b0_jf_b0_cfg, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qnj_b0_jf_b0_cfg, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qnj_b0_jf_b0_cfg, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qnj_b0_jf_b0_cfg, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qnj_b0_jf_b0_cfg, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qnj_b0_jf_b0_cfg, iwl9560_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550s_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550i_name),
/* Qu with Hr */
@@ -940,303 +955,352 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_qu_b0_hr1_b0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_qu_b0_hr_b0, iwl_ax203_name),
/* Qu C step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_qu_c0_hr1_b0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_qu_c0_hr_b0, iwl_ax203_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
+ iwl_qu_c0_hr_b0, iwl_ax201_name),
/* QuZ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_quz_a0_hr1_b0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_quz_a0_hr_b0, iwl_ax203_name),
/* QnJ with Hr */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_qnj_b0_hr_b0_cfg, iwl_ax201_name),
/* SnJ with Jf */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_snj_a0_jf_b0, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_snj_a0_jf_b0, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_snj_a0_jf_b0, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_snj_a0_jf_b0, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_snj_a0_jf_b0, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_snj_a0_jf_b0, iwl9560_name),
/* SnJ with Hr */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_snj_hr_b0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_snj_hr_b0, iwl_ax201_name),
/* Ma */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_ma_a0_hr_b0, iwl_ax201_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_ma_a0_gf_a0, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY,
iwl_cfg_ma_a0_gf4_a0, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_ma_a0_mr_a0, iwl_ax221_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_ma_a0_fm_a0, iwl_ax231_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_snj_a0_mr_a0, iwl_ax221_name),
/* So with Hr */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_so_a0_hr_a0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
/* So-F with Hr */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_so_a0_hr_a0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
/* So-F with Gf */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY,
+ iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name),
/* Bz */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_bz_a0_hr_b0, iwl_bz_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_bz_a0_gf_a0, iwl_bz_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY,
iwl_cfg_bz_a0_gf4_a0, iwl_bz_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_bz_a0_mr_a0, iwl_bz_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_bz_a0_fm_a0, iwl_bz_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_NO_JACKET,
iwl_cfg_gl_a0_fm_a0, iwl_bz_name),
+/* BZ Z step */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ, SILICON_Z_STEP,
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
+ iwl_cfg_bz_z0_gf_a0, iwl_bz_name),
+
+/* BNJ */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET,
+ iwl_cfg_bnj_a0_fm_a0, iwl_bz_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_IS_JACKET,
+ iwl_cfg_bnj_a0_fm4_a0, iwl_bz_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET,
+ iwl_cfg_bnj_a0_gf_a0, iwl_bz_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_IS_JACKET,
+ iwl_cfg_bnj_a0_gf4_a0, iwl_bz_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET,
+ iwl_cfg_bnj_a0_hr_b0, iwl_bz_name),
+
/* SoF with JF2 */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
/* SoF with JF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
/* SoF with JF2 */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
/* SoF with JF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
/* So with GF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY,
+ iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name),
/* So with JF2 */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
/* So with JF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name)
#endif /* CONFIG_IWLMVM */
@@ -1249,22 +1313,14 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
static int get_crf_id(struct iwl_trans *iwl_trans)
{
int ret = 0;
- u32 wfpm_ctrl_addr;
- u32 wfpm_otp_cfg_addr;
u32 sd_reg_ver_addr;
u32 cdb = 0;
u32 val;
- if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- wfpm_ctrl_addr = WFPM_CTRL_REG_GEN2;
- wfpm_otp_cfg_addr = WFPM_OTP_CFG1_ADDR_GEN2;
+ if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
sd_reg_ver_addr = SD_REG_VER_GEN2;
- /* Qu/Pu families have other addresses */
- } else {
- wfpm_ctrl_addr = WFPM_CTRL_REG;
- wfpm_otp_cfg_addr = WFPM_OTP_CFG1_ADDR;
+ else
sd_reg_ver_addr = SD_REG_VER;
- }
if (!iwl_trans_grab_nic_access(iwl_trans)) {
IWL_ERR(iwl_trans, "Failed to grab nic access before reading crf id\n");
@@ -1273,15 +1329,15 @@ static int get_crf_id(struct iwl_trans *iwl_trans)
}
/* Enable access to peripheral registers */
- val = iwl_read_umac_prph_no_grab(iwl_trans, wfpm_ctrl_addr);
+ val = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG);
val |= ENABLE_WFPM;
- iwl_write_umac_prph_no_grab(iwl_trans, wfpm_ctrl_addr, val);
+ iwl_write_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG, val);
/* Read crf info */
val = iwl_read_prph_no_grab(iwl_trans, sd_reg_ver_addr);
/* Read cdb info (also contains the jacket info if needed in the future */
- cdb = iwl_read_umac_prph_no_grab(iwl_trans, wfpm_otp_cfg_addr);
+ cdb = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR);
/* Map between crf id to rf id */
switch (REG_CRF_ID_TYPE(val)) {
@@ -1337,7 +1393,7 @@ out:
static const struct iwl_dev_info *
iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
u16 mac_type, u8 mac_step,
- u16 rf_type, u8 cdb, u8 rf_id, u8 no_160, u8 cores)
+ u16 rf_type, u8 cdb, u8 jacket, u8 rf_id, u8 no_160, u8 cores)
{
int num_devices = ARRAY_SIZE(iwl_dev_info_table);
int i;
@@ -1372,6 +1428,10 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
dev_info->cdb != cdb)
continue;
+ if (dev_info->jacket != (u8)IWL_CFG_ANY &&
+ dev_info->jacket != jacket)
+ continue;
+
if (dev_info->rf_id != (u8)IWL_CFG_ANY &&
dev_info->rf_id != rf_id)
continue;
@@ -1426,15 +1486,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* first trying to load the firmware etc. and potentially only
* detecting any problems when the first interface is brought up.
*/
- ret = iwl_finish_nic_init(iwl_trans);
- if (ret)
- goto out_free_trans;
- if (iwl_trans_grab_nic_access(iwl_trans)) {
- /* all good */
- iwl_trans_release_nic_access(iwl_trans);
- } else {
- ret = -EIO;
- goto out_free_trans;
+ ret = iwl_pcie_prepare_card_hw(iwl_trans);
+ if (!ret) {
+ ret = iwl_finish_nic_init(iwl_trans);
+ if (ret)
+ goto out_free_trans;
+ if (iwl_trans_grab_nic_access(iwl_trans)) {
+ /* all good */
+ iwl_trans_release_nic_access(iwl_trans);
+ } else {
+ ret = -EIO;
+ goto out_free_trans;
+ }
}
iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);
@@ -1453,9 +1516,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device,
CSR_HW_REV_TYPE(iwl_trans->hw_rev),
- CSR_HW_REV_STEP(iwl_trans->hw_rev),
+ iwl_trans->hw_rev_step,
CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id),
CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id),
+ CSR_HW_RFID_IS_JACKET(iwl_trans->hw_rf_id),
IWL_SUBDEVICE_RF_ID(pdev->subsystem_device),
IWL_SUBDEVICE_NO_160(pdev->subsystem_device),
IWL_SUBDEVICE_CORES(pdev->subsystem_device));
@@ -1494,21 +1558,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
iwl_trans->cfg = cfg_7265d;
- if (cfg == &iwlax210_2ax_cfg_so_hr_a0) {
- if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_TY) {
- iwl_trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0;
- } else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) ==
- CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF)) {
- iwl_trans->cfg = &iwlax210_2ax_cfg_so_jf_b0;
- } else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) ==
- CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF)) {
- iwl_trans->cfg = &iwlax211_2ax_cfg_so_gf_a0;
- } else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) ==
- CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF4)) {
- iwl_trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0;
- }
- }
-
/*
* This is a hack to switch from Qu B0 to Qu C0. We need to
* do this for all cfgs that use Qu B0, except for those using
@@ -1569,6 +1618,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_trans;
pci_set_drvdata(pdev, iwl_trans);
+
+ /* try to get ownership so that we'll know if we don't own it */
+ iwl_pcie_prepare_card_hw(iwl_trans);
+
iwl_trans->drv = iwl_drv_start(iwl_trans);
if (IS_ERR(iwl_trans->drv)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 14602d6d6699..8247014278f3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -2266,7 +2266,12 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
}
}
- if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
+ /*
+ * In some rare cases when the HW is in a bad state, we may
+ * get this interrupt too early, when prph_info is still NULL.
+ * So make sure that it's not NULL to prevent crashing.
+ */
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) {
u32 sleep_notif =
le32_to_cpu(trans_pcie->prph_info->sleep_notif);
if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 645cb4dd4e5a..0febdcacbd42 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -81,7 +81,7 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
/* Stop device's DMA activity */
iwl_pcie_apm_stop_master(trans);
- iwl_trans_sw_reset(trans);
+ iwl_trans_sw_reset(trans, false);
/*
* Clear "initialization complete" bit to move adapter from
@@ -105,9 +105,12 @@ static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER,
UREG_NIC_SET_NMI_DRIVER_RESET_HANDSHAKE);
- else
+ else if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE);
+ else
+ iwl_write32(trans, CSR_DOORBELL_VECTOR,
+ UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE);
/* wait 200ms */
ret = wait_event_timeout(trans_pcie->fw_reset_waitq,
@@ -166,7 +169,8 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
/* Stop the device, and put it in low power state */
iwl_pcie_gen2_apm_stop(trans, false);
- iwl_trans_sw_reset(trans);
+ /* re-take ownership to prevent other users from stealing the device */
+ iwl_trans_sw_reset(trans, true);
/*
* Upon stop, the IVAR table gets erased, so msi-x won't
@@ -196,9 +200,6 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
* interrupt
*/
iwl_enable_rfkill_int(trans);
-
- /* re-take ownership to prevent other users from stealing the device */
- iwl_pcie_prepare_card_hw(trans);
}
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 1efb53f78a62..a63386a01232 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -24,6 +24,7 @@
#include "fw/error-dump.h"
#include "fw/dbg.h"
#include "fw/api/tx.h"
+#include "mei/iwl-mei.h"
#include "internal.h"
#include "iwl-fh.h"
#include "iwl-context-info-gen3.h"
@@ -126,7 +127,8 @@ out:
kfree(buf);
}
-static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
+static int iwl_trans_pcie_sw_reset(struct iwl_trans *trans,
+ bool retake_ownership)
{
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
@@ -136,6 +138,11 @@ static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
iwl_set_bit(trans, CSR_RESET,
CSR_RESET_REG_FLAG_SW_RESET);
usleep_range(5000, 6000);
+
+ if (retake_ownership)
+ return iwl_pcie_prepare_card_hw(trans);
+
+ return 0;
}
static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
@@ -381,9 +388,11 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
- iwl_trans_pcie_sw_reset(trans);
+ ret = iwl_trans_pcie_sw_reset(trans, true);
+
+ if (!ret)
+ ret = iwl_finish_nic_init(trans);
- ret = iwl_finish_nic_init(trans);
if (WARN_ON(ret)) {
/* Release XTAL ON request */
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
@@ -408,7 +417,10 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
apmg_xtal_cfg_reg |
SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
- iwl_trans_pcie_sw_reset(trans);
+ ret = iwl_trans_pcie_sw_reset(trans, true);
+ if (ret)
+ IWL_ERR(trans,
+ "iwl_pcie_apm_lp_xtal_enable: failed to retake NIC ownership\n");
/* Enable LP XTAL by indirect access through CSR */
apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
@@ -514,7 +526,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
return;
}
- iwl_trans_pcie_sw_reset(trans);
+ iwl_trans_pcie_sw_reset(trans, false);
/*
* Clear "initialization complete" bit to move adapter from
@@ -594,8 +606,10 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
ret = iwl_pcie_set_hw_ready(trans);
/* If the card is ready, exit 0 */
- if (ret >= 0)
+ if (ret >= 0) {
+ trans->csme_own = false;
return 0;
+ }
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
@@ -608,8 +622,22 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
do {
ret = iwl_pcie_set_hw_ready(trans);
- if (ret >= 0)
+ if (ret >= 0) {
+ trans->csme_own = false;
return 0;
+ }
+
+ if (iwl_mei_is_connected()) {
+ IWL_DEBUG_INFO(trans,
+ "Couldn't prepare the card but SAP is connected\n");
+ trans->csme_own = true;
+ if (trans->trans_cfg->device_family !=
+ IWL_DEVICE_FAMILY_9000)
+ IWL_ERR(trans,
+ "SAP not supported for this NIC family\n");
+
+ return -EBUSY;
+ }
usleep_range(200, 1000);
t += 200;
@@ -1244,7 +1272,8 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
/* Stop the device, and put it in low power state */
iwl_pcie_apm_stop(trans, false);
- iwl_trans_pcie_sw_reset(trans);
+ /* re-take ownership to prevent other users from stealing the device */
+ iwl_trans_pcie_sw_reset(trans, true);
/*
* Upon stop, the IVAR table gets erased, so msi-x won't
@@ -1274,9 +1303,6 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
* interrupt
*/
iwl_enable_rfkill_int(trans);
-
- /* re-take ownership to prevent other users from stealing the device */
- iwl_pcie_prepare_card_hw(trans);
}
void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
@@ -1482,33 +1508,54 @@ void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
iwl_pcie_set_pwr(trans, true);
}
+static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+
+ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
+ iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
+ suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND :
+ UREG_DOORBELL_TO_ISR6_RESUME);
+ } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ iwl_write32(trans, CSR_IPC_SLEEP_CONTROL,
+ suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND :
+ CSR_IPC_SLEEP_CONTROL_RESUME);
+ iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
+ UREG_DOORBELL_TO_ISR6_SLEEP_CTRL);
+ } else {
+ return 0;
+ }
+
+ ret = wait_event_timeout(trans_pcie->sx_waitq,
+ trans_pcie->sx_complete, 2 * HZ);
+
+ /* Invalidate it toward next suspend or resume */
+ trans_pcie->sx_complete = false;
+
+ if (!ret) {
+ IWL_ERR(trans, "Timeout %s D3\n",
+ suspend ? "entering" : "exiting");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
bool reset)
{
int ret;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (!reset)
/* Enable persistence mode to avoid reset */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
- UREG_DOORBELL_TO_ISR6_SUSPEND);
-
- ret = wait_event_timeout(trans_pcie->sx_waitq,
- trans_pcie->sx_complete, 2 * HZ);
- /*
- * Invalidate it toward resume.
- */
- trans_pcie->sx_complete = false;
+ ret = iwl_pcie_d3_handshake(trans, true);
+ if (ret)
+ return ret;
- if (!ret) {
- IWL_ERR(trans, "Timeout entering D3\n");
- return -ETIMEDOUT;
- }
- }
iwl_pcie_d3_complete_suspend(trans, test, reset);
return 0;
@@ -1525,6 +1572,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
if (test) {
iwl_enable_interrupts(trans);
*status = IWL_D3_STATUS_ALIVE;
+ ret = 0;
goto out;
}
@@ -1573,25 +1621,10 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
*status = IWL_D3_STATUS_ALIVE;
out:
- if (*status == IWL_D3_STATUS_ALIVE &&
- trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- trans_pcie->sx_complete = false;
- iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
- UREG_DOORBELL_TO_ISR6_RESUME);
-
- ret = wait_event_timeout(trans_pcie->sx_waitq,
- trans_pcie->sx_complete, 2 * HZ);
- /*
- * Invalidate it toward next suspend.
- */
- trans_pcie->sx_complete = false;
+ if (*status == IWL_D3_STATUS_ALIVE)
+ ret = iwl_pcie_d3_handshake(trans, false);
- if (!ret) {
- IWL_ERR(trans, "Timeout exiting D3\n");
- return -ETIMEDOUT;
- }
- }
- return 0;
+ return ret;
}
static void
@@ -1778,9 +1811,7 @@ static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
- iwl_trans_pcie_sw_reset(trans);
-
- return 0;
+ return iwl_trans_pcie_sw_reset(trans, true);
}
static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
@@ -1800,7 +1831,9 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
if (err)
return err;
- iwl_trans_pcie_sw_reset(trans);
+ err = iwl_trans_pcie_sw_reset(trans, true);
+ if (err)
+ return err;
if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
trans->trans_cfg->integrated) {
@@ -3599,8 +3632,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
* in the old format.
*/
if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000)
- trans->hw_rev = (trans->hw_rev & 0xfff0) |
- (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
+ trans->hw_rev_step = trans->hw_rev & 0xF;
+ else
+ trans->hw_rev_step = (trans->hw_rev & 0xC) >> 2;
IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev);
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
index 451b06069350..0730657d54bf 100644
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
@@ -1072,6 +1072,7 @@ int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
return 0;
err_free_tfds:
dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
+ txq->tfds = NULL;
error:
if (txq->entries && cmd_queue)
for (i = 0; i < slots_num; i++)
@@ -1752,8 +1753,11 @@ static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans,
}
if (test_bit(STATUS_FW_ERROR, &trans->status)) {
- IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
- dump_stack();
+ if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
+ &trans->status)) {
+ IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
+ dump_stack();
+ }
ret = -EIO;
goto cancel;
}
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c
index 8bcc1cdcb75b..462ccc7d7d1a 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ap.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c
@@ -69,7 +69,7 @@ static void prism2_send_mgmt(struct net_device *dev,
#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
static int ap_debug_proc_show(struct seq_file *m, void *v)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
seq_printf(m, "BridgedUnicastFrames=%u\n", ap->bridged_unicast);
seq_printf(m, "BridgedMulticastFrames=%u\n", ap->bridged_multicast);
@@ -320,7 +320,7 @@ void hostap_deauth_all_stas(struct net_device *dev, struct ap_data *ap,
static int ap_control_proc_show(struct seq_file *m, void *v)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
char *policy_txt;
struct mac_entry *entry;
@@ -352,20 +352,20 @@ static int ap_control_proc_show(struct seq_file *m, void *v)
static void *ap_control_proc_start(struct seq_file *m, loff_t *_pos)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
spin_lock_bh(&ap->mac_restrictions.lock);
return seq_list_start_head(&ap->mac_restrictions.mac_list, *_pos);
}
static void *ap_control_proc_next(struct seq_file *m, void *v, loff_t *_pos)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
return seq_list_next(v, &ap->mac_restrictions.mac_list, _pos);
}
static void ap_control_proc_stop(struct seq_file *m, void *v)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
spin_unlock_bh(&ap->mac_restrictions.lock);
}
@@ -554,20 +554,20 @@ static int prism2_ap_proc_show(struct seq_file *m, void *v)
static void *prism2_ap_proc_start(struct seq_file *m, loff_t *_pos)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
spin_lock_bh(&ap->sta_table_lock);
return seq_list_start_head(&ap->sta_list, *_pos);
}
static void *prism2_ap_proc_next(struct seq_file *m, void *v, loff_t *_pos)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
return seq_list_next(v, &ap->sta_list, _pos);
}
static void prism2_ap_proc_stop(struct seq_file *m, void *v)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
spin_unlock_bh(&ap->sta_table_lock);
}
diff --git a/drivers/net/wireless/intersil/hostap/hostap_download.c b/drivers/net/wireless/intersil/hostap/hostap_download.c
index 7c6a5a6d1d45..3672291ced5c 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_download.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_download.c
@@ -227,7 +227,7 @@ static int prism2_download_aux_dump_proc_open(struct inode *inode, struct file *
sizeof(struct prism2_download_aux_dump));
if (ret == 0) {
struct seq_file *m = file->private_data;
- m->private = PDE_DATA(inode);
+ m->private = pde_data(inode);
}
return ret;
}
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index e459e7192ae9..b74f4cb5d6d3 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -1815,8 +1815,9 @@ static int prism2_tx_80211(struct sk_buff *skb, struct net_device *dev)
memset(&txdesc, 0, sizeof(txdesc));
/* skb->data starts with txdesc->frame_control */
- hdr_len = 24;
- skb_copy_from_linear_data(skb, &txdesc.frame_control, hdr_len);
+ hdr_len = sizeof(txdesc.header);
+ BUILD_BUG_ON(hdr_len != 24);
+ skb_copy_from_linear_data(skb, &txdesc.header, hdr_len);
if (ieee80211_is_data(txdesc.frame_control) &&
ieee80211_has_a4(txdesc.frame_control) &&
skb->len >= 30) {
diff --git a/drivers/net/wireless/intersil/hostap/hostap_proc.c b/drivers/net/wireless/intersil/hostap/hostap_proc.c
index 51c847d98755..61f68786056f 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_proc.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_proc.c
@@ -97,20 +97,20 @@ static int prism2_wds_proc_show(struct seq_file *m, void *v)
static void *prism2_wds_proc_start(struct seq_file *m, loff_t *_pos)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
read_lock_bh(&local->iface_lock);
return seq_list_start(&local->hostap_interfaces, *_pos);
}
static void *prism2_wds_proc_next(struct seq_file *m, void *v, loff_t *_pos)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
return seq_list_next(v, &local->hostap_interfaces, _pos);
}
static void prism2_wds_proc_stop(struct seq_file *m, void *v)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
read_unlock_bh(&local->iface_lock);
}
@@ -123,7 +123,7 @@ static const struct seq_operations prism2_wds_proc_seqops = {
static int prism2_bss_list_proc_show(struct seq_file *m, void *v)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
struct list_head *ptr = v;
struct hostap_bss_info *bss;
@@ -151,21 +151,21 @@ static int prism2_bss_list_proc_show(struct seq_file *m, void *v)
static void *prism2_bss_list_proc_start(struct seq_file *m, loff_t *_pos)
__acquires(&local->lock)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
spin_lock_bh(&local->lock);
return seq_list_start_head(&local->bss_list, *_pos);
}
static void *prism2_bss_list_proc_next(struct seq_file *m, void *v, loff_t *_pos)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
return seq_list_next(v, &local->bss_list, _pos);
}
static void prism2_bss_list_proc_stop(struct seq_file *m, void *v)
__releases(&local->lock)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
spin_unlock_bh(&local->lock);
}
@@ -198,7 +198,7 @@ static int prism2_crypt_proc_show(struct seq_file *m, void *v)
static ssize_t prism2_pda_proc_read(struct file *file, char __user *buf,
size_t count, loff_t *_pos)
{
- local_info_t *local = PDE_DATA(file_inode(file));
+ local_info_t *local = pde_data(file_inode(file));
size_t off;
if (local->pda == NULL || *_pos >= PRISM2_PDA_SIZE)
@@ -272,7 +272,7 @@ static int prism2_io_debug_proc_read(char *page, char **start, off_t off,
#ifndef PRISM2_NO_STATION_MODES
static int prism2_scan_results_proc_show(struct seq_file *m, void *v)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
unsigned long entry;
int i, len;
struct hfa384x_hostscan_result *scanres;
@@ -322,7 +322,7 @@ static int prism2_scan_results_proc_show(struct seq_file *m, void *v)
static void *prism2_scan_results_proc_start(struct seq_file *m, loff_t *_pos)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
spin_lock_bh(&local->lock);
/* We have a header (pos 0) + N results to show (pos 1...N) */
@@ -333,7 +333,7 @@ static void *prism2_scan_results_proc_start(struct seq_file *m, loff_t *_pos)
static void *prism2_scan_results_proc_next(struct seq_file *m, void *v, loff_t *_pos)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
++*_pos;
if (*_pos > local->last_scan_results_count)
@@ -343,7 +343,7 @@ static void *prism2_scan_results_proc_next(struct seq_file *m, void *v, loff_t *
static void prism2_scan_results_proc_stop(struct seq_file *m, void *v)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
spin_unlock_bh(&local->lock);
}
diff --git a/drivers/net/wireless/intersil/hostap/hostap_wlan.h b/drivers/net/wireless/intersil/hostap/hostap_wlan.h
index dd2603d9b5d3..c25cd21d18bd 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/intersil/hostap/hostap_wlan.h
@@ -115,12 +115,14 @@ struct hfa384x_tx_frame {
__le16 tx_control; /* HFA384X_TX_CTRL_ flags */
/* 802.11 */
- __le16 frame_control; /* parts not used */
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN]; /* filled by firmware */
- u8 addr3[ETH_ALEN];
- __le16 seq_ctrl; /* filled by firmware */
+ struct_group(header,
+ __le16 frame_control; /* parts not used */
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN]; /* filled by firmware */
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl; /* filled by firmware */
+ );
u8 addr4[ETH_ALEN];
__le16 data_len;
diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c
index 873fea59894f..8414aa208655 100644
--- a/drivers/net/wireless/intersil/p54/txrx.c
+++ b/drivers/net/wireless/intersil/p54/txrx.c
@@ -431,11 +431,7 @@ static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb)
* Clear manually, ieee80211_tx_info_clear_status would
* clear the counts too and we need them.
*/
- memset(&info->status.ack_signal, 0,
- sizeof(struct ieee80211_tx_info) -
- offsetof(struct ieee80211_tx_info, status.ack_signal));
- BUILD_BUG_ON(offsetof(struct ieee80211_tx_info,
- status.ack_signal) != 20);
+ memset_after(&info->status, 0, rates);
if (entry_hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
pad = entry_data->align[0];
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 23219f3747f8..8d54f9face2f 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1276,7 +1276,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
/* If the queue contains MAX_QUEUE skb's drop some */
if (skb_queue_len(&data->pending) >= MAX_QUEUE) {
- /* Droping until WARN_QUEUE level */
+ /* Dropping until WARN_QUEUE level */
while (skb_queue_len(&data->pending) >= WARN_QUEUE) {
ieee80211_free_txskb(hw, skb_dequeue(&data->pending));
data->tx_dropped++;
@@ -4498,7 +4498,7 @@ static void remove_vqs(struct virtio_device *vdev)
{
int i;
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
for (i = 0; i < ARRAY_SIZE(hwsim_vqs); i++) {
struct virtqueue *vq = hwsim_vqs[i];
diff --git a/drivers/net/wireless/marvell/libertas/host.h b/drivers/net/wireless/marvell/libertas/host.h
index dfa22468b14a..ceff4b92e7a1 100644
--- a/drivers/net/wireless/marvell/libertas/host.h
+++ b/drivers/net/wireless/marvell/libertas/host.h
@@ -308,10 +308,12 @@ struct txpd {
__le32 tx_packet_location;
/* Tx packet length */
__le16 tx_packet_length;
- /* First 2 byte of destination MAC address */
- u8 tx_dest_addr_high[2];
- /* Last 4 byte of destination MAC address */
- u8 tx_dest_addr_low[4];
+ struct_group_attr(tx_dest_addr, __packed,
+ /* First 2 byte of destination MAC address */
+ u8 tx_dest_addr_high[2];
+ /* Last 4 byte of destination MAC address */
+ u8 tx_dest_addr_low[4];
+ );
/* Pkt Priority */
u8 priority;
/* Pkt Trasnit Power control */
diff --git a/drivers/net/wireless/marvell/libertas/tx.c b/drivers/net/wireless/marvell/libertas/tx.c
index aeb481740df6..27304a98787d 100644
--- a/drivers/net/wireless/marvell/libertas/tx.c
+++ b/drivers/net/wireless/marvell/libertas/tx.c
@@ -113,6 +113,7 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
p802x_hdr = skb->data;
pkt_len = skb->len;
+ BUILD_BUG_ON(sizeof(txpd->tx_dest_addr) != ETH_ALEN);
if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) {
struct tx_radiotap_hdr *rtap_hdr = (void *)skb->data;
@@ -124,10 +125,10 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
pkt_len -= sizeof(*rtap_hdr);
/* copy destination address from 802.11 header */
- memcpy(txpd->tx_dest_addr_high, p802x_hdr + 4, ETH_ALEN);
+ memcpy(&txpd->tx_dest_addr, p802x_hdr + 4, ETH_ALEN);
} else {
/* copy destination address from 802.3 header */
- memcpy(txpd->tx_dest_addr_high, p802x_hdr, ETH_ALEN);
+ memcpy(&txpd->tx_dest_addr, p802x_hdr, ETH_ALEN);
}
txpd->tx_packet_length = cpu_to_le16(pkt_len);
diff --git a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
index 5d726545d987..631b5da09f86 100644
--- a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
+++ b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
@@ -268,10 +268,12 @@ struct txpd {
__le32 tx_packet_location;
/* Tx packet length */
__le16 tx_packet_length;
- /* First 2 byte of destination MAC address */
- u8 tx_dest_addr_high[2];
- /* Last 4 byte of destination MAC address */
- u8 tx_dest_addr_low[4];
+ struct_group_attr(tx_dest_addr, __packed,
+ /* First 2 byte of destination MAC address */
+ u8 tx_dest_addr_high[2];
+ /* Last 4 byte of destination MAC address */
+ u8 tx_dest_addr_low[4];
+ );
/* Pkt Priority */
u8 priority;
/* Pkt Trasnit Power control */
@@ -280,7 +282,7 @@ struct txpd {
u8 pktdelay_2ms;
/* reserved */
u8 reserved1;
-};
+} __packed;
/* RxPD Descriptor */
struct rxpd {
@@ -311,7 +313,7 @@ struct rxpd {
/* Pkt Priority */
u8 priority;
u8 reserved[3];
-};
+} __packed;
struct cmd_header {
__le16 command;
@@ -377,14 +379,14 @@ struct cmd_ds_mac_control {
struct cmd_header hdr;
__le16 action;
u16 reserved;
-};
+} __packed;
struct cmd_ds_802_11_mac_address {
struct cmd_header hdr;
__le16 action;
uint8_t macadd[ETH_ALEN];
-};
+} __packed;
struct cmd_ds_mac_multicast_addr {
struct cmd_header hdr;
@@ -392,27 +394,27 @@ struct cmd_ds_mac_multicast_addr {
__le16 action;
__le16 nr_of_adrs;
u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
-};
+} __packed;
struct cmd_ds_set_mode {
struct cmd_header hdr;
__le16 mode;
-};
+} __packed;
struct cmd_ds_set_bssid {
struct cmd_header hdr;
u8 bssid[6];
u8 activate;
-};
+} __packed;
struct cmd_ds_802_11_radio_control {
struct cmd_header hdr;
__le16 action;
__le16 control;
-};
+} __packed;
struct cmd_ds_802_11_rf_channel {
@@ -423,20 +425,20 @@ struct cmd_ds_802_11_rf_channel {
__le16 rftype; /* unused */
__le16 reserved; /* unused */
u8 channellist[32]; /* unused */
-};
+} __packed;
struct cmd_ds_set_boot2_ver {
struct cmd_header hdr;
__le16 action;
__le16 version;
-};
+} __packed;
struct cmd_ds_802_11_reset {
struct cmd_header hdr;
__le16 action;
-};
+} __packed;
struct cmd_ds_802_11_beacon_control {
struct cmd_header hdr;
@@ -444,14 +446,14 @@ struct cmd_ds_802_11_beacon_control {
__le16 action;
__le16 beacon_enable;
__le16 beacon_period;
-};
+} __packed;
struct cmd_ds_802_11_beacon_set {
struct cmd_header hdr;
__le16 len;
u8 beacon[MRVL_MAX_BCN_SIZE];
-};
+} __packed;
struct cmd_ctrl_node;
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index 71492211904b..02a1e1f547d8 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -232,7 +232,8 @@ static void lbtf_tx_work(struct work_struct *work)
ieee80211_get_tx_rate(priv->hw, info)->hw_value);
/* copy destination address from 802.11 header */
- memcpy(txpd->tx_dest_addr_high, skb->data + sizeof(struct txpd) + 4,
+ BUILD_BUG_ON(sizeof(txpd->tx_dest_addr) != ETH_ALEN);
+ memcpy(&txpd->tx_dest_addr, skb->data + sizeof(struct txpd) + 4,
ETH_ALEN);
txpd->tx_packet_length = cpu_to_le16(len);
txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd));
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 2ff23ab259ab..63c25c69ed2b 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -2071,9 +2071,11 @@ struct mwifiex_ie_types_robust_coex {
__le32 mode;
} __packed;
+#define MWIFIEX_VERSION_STR_LENGTH 128
+
struct host_cmd_ds_version_ext {
u8 version_str_sel;
- char version_str[128];
+ char version_str[MWIFIEX_VERSION_STR_LENGTH];
} __packed;
struct host_cmd_ds_mgmt_frame_reg {
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index f006a3d72b40..88c72d1827a0 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -332,7 +332,7 @@ void mwifiex_set_trans_start(struct net_device *dev)
int i;
for (i = 0; i < dev->num_tx_queues; i++)
- netdev_get_tx_queue(dev, i)->trans_start = jiffies;
+ txq_trans_cond_update(netdev_get_tx_queue(dev, i));
netif_trans_update(dev);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 19b996c6a260..ace7371c4773 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -226,6 +226,23 @@ exit_rx_proc:
return 0;
}
+static void maybe_quirk_fw_disable_ds(struct mwifiex_adapter *adapter)
+{
+ struct mwifiex_private *priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+ struct mwifiex_ver_ext ver_ext;
+
+ if (test_and_set_bit(MWIFIEX_IS_REQUESTING_FW_VEREXT, &adapter->work_flags))
+ return;
+
+ memset(&ver_ext, 0, sizeof(ver_ext));
+ ver_ext.version_str_sel = 1;
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_VERSION_EXT,
+ HostCmd_ACT_GEN_GET, 0, &ver_ext, false)) {
+ mwifiex_dbg(priv->adapter, MSG,
+ "Checking hardware revision failed.\n");
+ }
+}
+
/*
* The main process.
*
@@ -356,6 +373,7 @@ process_start:
if (adapter->hw_status == MWIFIEX_HW_STATUS_INIT_DONE) {
adapter->hw_status = MWIFIEX_HW_STATUS_READY;
mwifiex_init_fw_complete(adapter);
+ maybe_quirk_fw_disable_ds(adapter);
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 90012cbcfd15..332dd1c8db35 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -524,6 +524,7 @@ enum mwifiex_adapter_work_flags {
MWIFIEX_IS_SUSPENDED,
MWIFIEX_IS_HS_CONFIGURED,
MWIFIEX_IS_HS_ENABLING,
+ MWIFIEX_IS_REQUESTING_FW_VEREXT,
};
struct mwifiex_band_config {
@@ -646,7 +647,7 @@ struct mwifiex_private {
struct wireless_dev wdev;
struct mwifiex_chan_freq_power cfp;
u32 versionstrsel;
- char version_str[128];
+ char version_str[MWIFIEX_VERSION_STR_LENGTH];
#ifdef CONFIG_DEBUG_FS
struct dentry *dfs_dev_dir;
#endif
@@ -1055,6 +1056,8 @@ struct mwifiex_adapter {
void *devdump_data;
int devdump_len;
struct timer_list devdump_timer;
+
+ bool ignore_btcoex_events;
};
void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index c3f5583ea70d..d5fb29400bad 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -3152,6 +3152,9 @@ static int mwifiex_init_pcie(struct mwifiex_adapter *adapter)
if (ret)
goto err_alloc_buffers;
+ if (pdev->device == PCIE_DEVICE_ID_MARVELL_88W8897)
+ adapter->ignore_btcoex_events = true;
+
return 0;
err_alloc_buffers:
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index 6b5d35d9e69f..1a4ae8a42a31 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -708,11 +708,35 @@ static int mwifiex_ret_ver_ext(struct mwifiex_private *priv,
{
struct host_cmd_ds_version_ext *ver_ext = &resp->params.verext;
+ if (test_and_clear_bit(MWIFIEX_IS_REQUESTING_FW_VEREXT, &priv->adapter->work_flags)) {
+ if (strncmp(ver_ext->version_str, "ChipRev:20, BB:9b(10.00), RF:40(21)",
+ MWIFIEX_VERSION_STR_LENGTH) == 0) {
+ struct mwifiex_ds_auto_ds auto_ds = {
+ .auto_ds = DEEP_SLEEP_OFF,
+ };
+
+ mwifiex_dbg(priv->adapter, MSG,
+ "Bad HW revision detected, disabling deep sleep\n");
+
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
+ DIS_AUTO_PS, BITMAP_AUTO_DS, &auto_ds, false)) {
+ mwifiex_dbg(priv->adapter, MSG,
+ "Disabling deep sleep failed.\n");
+ }
+ }
+
+ return 0;
+ }
+
if (version_ext) {
version_ext->version_str_sel = ver_ext->version_str_sel;
memcpy(version_ext->version_str, ver_ext->version_str,
- sizeof(char) * 128);
- memcpy(priv->version_str, ver_ext->version_str, 128);
+ MWIFIEX_VERSION_STR_LENGTH);
+ memcpy(priv->version_str, ver_ext->version_str,
+ MWIFIEX_VERSION_STR_LENGTH);
+
+ /* Ensure the version string from the firmware is 0-terminated */
+ priv->version_str[MWIFIEX_VERSION_STR_LENGTH - 1] = '\0';
}
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 68c63268e2e6..7d42c5d2dbf6 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -365,10 +365,12 @@ static void mwifiex_process_uap_tx_pause(struct mwifiex_private *priv,
sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac);
if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) {
sta_ptr->tx_pause = tp->tx_pause;
+ spin_unlock_bh(&priv->sta_list_spinlock);
mwifiex_update_ralist_tx_pause(priv, tp->peermac,
tp->tx_pause);
+ } else {
+ spin_unlock_bh(&priv->sta_list_spinlock);
}
- spin_unlock_bh(&priv->sta_list_spinlock);
}
}
@@ -400,11 +402,13 @@ static void mwifiex_process_sta_tx_pause(struct mwifiex_private *priv,
sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac);
if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) {
sta_ptr->tx_pause = tp->tx_pause;
+ spin_unlock_bh(&priv->sta_list_spinlock);
mwifiex_update_ralist_tx_pause(priv,
tp->peermac,
tp->tx_pause);
+ } else {
+ spin_unlock_bh(&priv->sta_list_spinlock);
}
- spin_unlock_bh(&priv->sta_list_spinlock);
}
}
}
@@ -1058,6 +1062,9 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_BT_COEX_WLAN_PARA_CHANGE:
dev_dbg(adapter->dev, "EVENT: BT coex wlan param update\n");
+ if (adapter->ignore_btcoex_events)
+ break;
+
mwifiex_bt_coex_wlan_param_update_event(priv,
adapter->event_skb);
break;
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 9736aa0ab7fd..8f01fcbe9396 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -130,7 +130,8 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
default:
mwifiex_dbg(adapter, ERROR,
"unknown recv_type %#x\n", recv_type);
- return -1;
+ ret = -1;
+ goto exit_restore_skb;
}
break;
case MWIFIEX_USB_EP_DATA:
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 529e325498cd..864a2ba9efee 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -4225,9 +4225,11 @@ struct mwl8k_cmd_set_key {
__le32 key_info;
__le32 key_id;
__le16 key_len;
- __u8 key_material[MAX_ENCR_KEY_LENGTH];
- __u8 tkip_tx_mic_key[MIC_KEY_LENGTH];
- __u8 tkip_rx_mic_key[MIC_KEY_LENGTH];
+ struct {
+ __u8 key_material[MAX_ENCR_KEY_LENGTH];
+ __u8 tkip_tx_mic_key[MIC_KEY_LENGTH];
+ __u8 tkip_rx_mic_key[MIC_KEY_LENGTH];
+ } tkip;
__le16 tkip_rsc_low;
__le32 tkip_rsc_high;
__le16 tkip_tsc_low;
@@ -4375,7 +4377,7 @@ static int mwl8k_cmd_encryption_set_key(struct ieee80211_hw *hw,
goto done;
}
- memcpy(cmd->key_material, key->key, keymlen);
+ memcpy(&cmd->tkip, key->key, keymlen);
cmd->action = cpu_to_le32(action);
rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c
index b8bcf22a07fd..47e9911ee9fe 100644
--- a/drivers/net/wireless/mediatek/mt76/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/debugfs.c
@@ -82,7 +82,7 @@ static int mt76_rx_queues_read(struct seq_file *s, void *data)
queued = mt76_is_usb(dev) ? q->ndesc - q->queued : q->queued;
seq_printf(s, " %9d | %9d | %9d | %9d |\n",
- i, q->queued, q->head, q->tail);
+ i, queued, q->head, q->tail);
}
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 5e1c1506a4c6..3a9af8931c35 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -572,9 +572,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
if (data_len < len + q->buf_offset) {
dev_kfree_skb(q->rx_head);
q->rx_head = NULL;
-
- skb_free_frag(data);
- continue;
+ goto free_frag;
}
if (q->rx_head) {
@@ -582,11 +580,14 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
continue;
}
+ if (!more && dev->drv->rx_check &&
+ !(dev->drv->rx_check(dev, data, len)))
+ goto free_frag;
+
skb = build_skb(data, q->buf_size);
- if (!skb) {
- skb_free_frag(data);
- continue;
- }
+ if (!skb)
+ goto free_frag;
+
skb_reserve(skb, q->buf_offset);
if (q == &dev->q_rx[MT_RXQ_MCU]) {
@@ -603,6 +604,10 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
}
dev->drv->rx_skb(dev, q - dev->q_rx, skb);
+ continue;
+
+free_frag:
+ skb_free_frag(data);
}
mt76_dma_rx_fill(dev, q);
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
index 2d58aa31db93..a499861918fa 100644
--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -65,6 +65,8 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int offset, int len)
offset = be32_to_cpup(list);
ret = mtd_read(mtd, offset, len, &retlen, eep);
put_mtd_device(mtd);
+ if (mtd_is_bitflip(ret))
+ ret = 0;
if (ret) {
dev_err(dev->dev, "reading EEPROM from mtd %s failed: %i\n",
part, ret);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 62807dc311c1..8bb1c7ab5b50 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -185,7 +185,6 @@ const struct cfg80211_sar_capa mt76_sar_capa = {
.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
.freq_ranges = &mt76_sar_freq_ranges[0],
};
-EXPORT_SYMBOL_GPL(mt76_sar_capa);
static int mt76_led_init(struct mt76_dev *dev)
{
@@ -393,7 +392,7 @@ mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
phy->hw->wiphy->bands[band] = NULL;
}
-static void
+static int
mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
{
struct mt76_dev *dev = phy->dev;
@@ -411,8 +410,15 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
- wiphy->available_antennas_tx = dev->phy.antenna_mask;
- wiphy->available_antennas_rx = dev->phy.antenna_mask;
+ wiphy->available_antennas_tx = phy->antenna_mask;
+ wiphy->available_antennas_rx = phy->antenna_mask;
+
+ wiphy->sar_capa = &mt76_sar_capa;
+ phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
+ sizeof(struct mt76_freq_range_power),
+ GFP_KERNEL);
+ if (!phy->frp)
+ return -ENOMEM;
hw->txq_data_size = sizeof(struct mt76_txq);
hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
@@ -432,6 +438,8 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, AP_LINK_PS);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+
+ return 0;
}
struct mt76_phy *
@@ -472,7 +480,9 @@ int mt76_register_phy(struct mt76_phy *phy, bool vht,
{
int ret;
- mt76_phy_init(phy, phy->hw);
+ ret = mt76_phy_init(phy, phy->hw);
+ if (ret)
+ return ret;
if (phy->cap.has_2ghz) {
ret = mt76_init_sband_2g(phy, rates, n_rates);
@@ -591,7 +601,9 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
int ret;
dev_set_drvdata(dev->dev, dev);
- mt76_phy_init(phy, hw);
+ ret = mt76_phy_init(phy, hw);
+ if (ret)
+ return ret;
if (phy->cap.has_2ghz) {
ret = mt76_init_sband_2g(phy, rates, n_rates);
@@ -1163,10 +1175,12 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
if (ps)
set_bit(MT_WCID_FLAG_PS, &wcid->flags);
- else
- clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
dev->drv->sta_ps(dev, sta, ps);
+
+ if (!ps)
+ clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
+
ieee80211_sta_ps_transition(sta, ps);
}
@@ -1348,6 +1362,59 @@ int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt76_get_txpower);
+int mt76_init_sar_power(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar)
+{
+ struct mt76_phy *phy = hw->priv;
+ const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
+ int i;
+
+ if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
+ return -EINVAL;
+
+ for (i = 0; i < sar->num_sub_specs; i++) {
+ u32 index = sar->sub_specs[i].freq_range_index;
+ /* SAR specifies power limitaton in 0.25dbm */
+ s32 power = sar->sub_specs[i].power >> 1;
+
+ if (power > 127 || power < -127)
+ power = 127;
+
+ phy->frp[index].range = &capa->freq_ranges[index];
+ phy->frp[index].power = power;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_init_sar_power);
+
+int mt76_get_sar_power(struct mt76_phy *phy,
+ struct ieee80211_channel *chan,
+ int power)
+{
+ const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
+ int freq, i;
+
+ if (!capa || !phy->frp)
+ return power;
+
+ if (power > 127 || power < -127)
+ power = 127;
+
+ freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
+ for (i = 0 ; i < capa->num_freq_ranges; i++) {
+ if (phy->frp[i].range &&
+ freq >= phy->frp[i].range->start_freq &&
+ freq < phy->frp[i].range->end_freq) {
+ power = min_t(int, phy->frp[i].power, power);
+ break;
+ }
+ }
+
+ return power;
+}
+EXPORT_SYMBOL_GPL(mt76_get_sar_power);
+
static void
__mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
@@ -1494,7 +1561,6 @@ EXPORT_SYMBOL_GPL(mt76_init_queue);
u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx)
{
int offset = 0;
- struct ieee80211_rate *rate;
if (phy->chandef.chan->band != NL80211_BAND_2GHZ)
offset = 4;
@@ -1503,9 +1569,11 @@ u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx)
if (rateidx < 0)
rateidx = 0;
- rate = &mt76_rates[offset + rateidx];
+ rateidx += offset;
+ if (rateidx >= ARRAY_SIZE(mt76_rates))
+ rateidx = offset;
- return rate->hw_value;
+ return mt76_rates[rateidx].hw_value;
}
EXPORT_SYMBOL_GPL(mt76_calculate_default_rate);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index e2da720a91b6..404c3d1a70d6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -373,6 +373,8 @@ struct mt76_driver_ops {
bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
+ bool (*rx_check)(struct mt76_dev *dev, void *data, int len);
+
void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
struct sk_buff *skb);
@@ -495,6 +497,8 @@ struct mt76_usb {
};
#define MT76S_XMIT_BUF_SZ (16 * PAGE_SIZE)
+#define MT76S_NUM_TX_ENTRIES 256
+#define MT76S_NUM_RX_ENTRIES 512
struct mt76_sdio {
struct mt76_worker txrx_worker;
struct mt76_worker status_worker;
@@ -599,6 +603,8 @@ struct mt76_testmode_data {
u8 tx_power[4];
u8 tx_power_control;
+ u8 addr[3][ETH_ALEN];
+
u32 tx_pending;
u32 tx_queued;
u16 tx_queued_limit;
@@ -808,7 +814,6 @@ struct mt76_ethtool_worker_info {
}
extern struct ieee80211_rate mt76_rates[12];
-extern const struct cfg80211_sar_capa mt76_sar_capa;
#define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__)
#define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__)
@@ -1157,6 +1162,11 @@ int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy);
int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int *dbm);
+int mt76_init_sar_power(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar);
+int mt76_get_sar_power(struct mt76_phy *phy,
+ struct ieee80211_channel *chan,
+ int power);
void mt76_csa_check(struct mt76_dev *dev);
void mt76_csa_finish(struct mt76_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index fe03e31989bb..a272d64808c3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -202,10 +202,11 @@ void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort)
FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, port) |
FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, queue));
- WARN_ON_ONCE(!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY,
- 0, 5000));
+ mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 15000);
}
+ WARN_ON_ONCE(mt76_rr(dev, MT_DMA_FQCR0) & MT_DMA_FQCR0_BUSY);
+
mt76_wr(dev, MT_TX_ABORT, 0);
mt7603_wtbl_set_skip_tx(dev, idx, false);
@@ -525,6 +526,10 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR)
status->flag |= RX_FLAG_MMIC_ERROR;
+ /* ICV error or CCMP/BIP/WPI MIC error */
+ if (rxd2 & MT_RXD2_NORMAL_ICV_ERR)
+ status->flag |= RX_FLAG_ONLY_MONITOR;
+
if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
!(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) {
status->flag |= RX_FLAG_DECRYPTED;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 7ac4cd247a73..2b546bc05d82 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -133,13 +133,15 @@ void mt7603_init_edcca(struct mt7603_dev *dev)
}
static int
-mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
+mt7603_set_channel(struct ieee80211_hw *hw, struct cfg80211_chan_def *def)
{
+ struct mt7603_dev *dev = hw->priv;
u8 *rssi_data = (u8 *)dev->mt76.eeprom.data;
int idx, ret;
u8 bw = MT_BW_20;
bool failed = false;
+ ieee80211_stop_queues(hw);
cancel_delayed_work_sync(&dev->mphy.mac_work);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
@@ -205,9 +207,28 @@ out:
if (failed)
mt7603_mac_work(&dev->mphy.mac_work.work);
+ ieee80211_wake_queues(hw);
+
return ret;
}
+static int mt7603_set_sar_specs(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar)
+{
+ struct mt7603_dev *dev = hw->priv;
+ struct mt76_phy *mphy = &dev->mphy;
+ int err;
+
+ if (!cfg80211_chandef_valid(&mphy->chandef))
+ return -EINVAL;
+
+ err = mt76_init_sar_power(hw, sar);
+ if (err)
+ return err;
+
+ return mt7603_set_channel(hw, &mphy->chandef);
+}
+
static int
mt7603_config(struct ieee80211_hw *hw, u32 changed)
{
@@ -215,11 +236,8 @@ mt7603_config(struct ieee80211_hw *hw, u32 changed)
int ret = 0;
if (changed & (IEEE80211_CONF_CHANGE_CHANNEL |
- IEEE80211_CONF_CHANGE_POWER)) {
- ieee80211_stop_queues(hw);
- ret = mt7603_set_channel(dev, &hw->conf.chandef);
- ieee80211_wake_queues(hw);
- }
+ IEEE80211_CONF_CHANGE_POWER))
+ ret = mt7603_set_channel(hw, &hw->conf.chandef);
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
mutex_lock(&dev->mt76.mutex);
@@ -700,6 +718,7 @@ const struct ieee80211_ops mt7603_ops = {
.set_tim = mt76_set_tim,
.get_survey = mt76_get_survey,
.get_antenna = mt76_get_antenna,
+ .set_sar_specs = mt7603_set_sar_specs,
};
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
index 6abfe6b19afa..7884b952b720 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
@@ -403,7 +403,7 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
.tx_streams = n_chains,
.rx_streams = n_chains,
};
- s8 tx_power;
+ s8 tx_power = hw->conf.power_level * 2;
int i, ret;
if (dev->mphy.chandef.width == NL80211_CHAN_WIDTH_40) {
@@ -414,7 +414,7 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
req.center_chan -= 2;
}
- tx_power = hw->conf.power_level * 2;
+ tx_power = mt76_get_sar_power(&dev->mphy, chandef->chan, tx_power);
if (dev->mphy.antenna_mask == 3)
tx_power -= 6;
tx_power = min(tx_power, dev->tx_power_limit);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
index 6fd6f067da49..b53528014fbc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
@@ -359,6 +359,9 @@ mt7615_queues_acq(struct seq_file *s, void *data)
int acs = i / MT7615_MAX_WMM_SETS;
u32 ctrl, val, qlen = 0;
+ if (wmm_idx == 3 && is_mt7663(&dev->mt76))
+ continue;
+
val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, wmm_idx));
ctrl = BIT(31) | BIT(15) | (acs << 8);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index 47f23ac905a3..a753c7476d31 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -194,6 +194,7 @@ mt7615_check_offload_capability(struct mt7615_dev *dev)
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ wiphy->flags &= ~WIPHY_FLAG_4ADDR_STATION;
wiphy->max_remain_on_channel_duration = 5000;
wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index 423f69015e3e..ec25e5a95d44 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -249,6 +249,82 @@ static void mt7615_mac_fill_tm_rx(struct mt7615_phy *phy, __le32 *rxv)
#endif
}
+/* The HW does not translate the mac header to 802.3 for mesh point */
+static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt7615_sta *msta = (struct mt7615_sta *)status->wcid;
+ struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif;
+ struct ieee80211_hdr hdr;
+ struct ethhdr eth_hdr;
+ __le32 *rxd = (__le32 *)skb->data;
+ __le32 qos_ctrl, ht_ctrl;
+
+ if (FIELD_GET(MT_RXD1_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[1])) !=
+ MT_RXD1_NORMAL_U2M)
+ return -EINVAL;
+
+ if (!(le32_to_cpu(rxd[0]) & MT_RXD0_NORMAL_GROUP_4))
+ return -EINVAL;
+
+ if (!msta || !msta->vif)
+ return -EINVAL;
+
+ sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
+ vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
+
+ /* store the info from RXD and ethhdr to avoid being overridden */
+ memcpy(&eth_hdr, skb->data + hdr_gap, sizeof(eth_hdr));
+ hdr.frame_control = FIELD_GET(MT_RXD4_FRAME_CONTROL, rxd[4]);
+ hdr.seq_ctrl = FIELD_GET(MT_RXD6_SEQ_CTRL, rxd[6]);
+ qos_ctrl = FIELD_GET(MT_RXD6_QOS_CTL, rxd[6]);
+ ht_ctrl = FIELD_GET(MT_RXD7_HT_CONTROL, rxd[7]);
+
+ hdr.duration_id = 0;
+ ether_addr_copy(hdr.addr1, vif->addr);
+ ether_addr_copy(hdr.addr2, sta->addr);
+ switch (le16_to_cpu(hdr.frame_control) &
+ (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
+ case 0:
+ ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
+ break;
+ case IEEE80211_FCTL_FROMDS:
+ ether_addr_copy(hdr.addr3, eth_hdr.h_source);
+ break;
+ case IEEE80211_FCTL_TODS:
+ ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
+ break;
+ case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
+ ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
+ ether_addr_copy(hdr.addr4, eth_hdr.h_source);
+ break;
+ default:
+ break;
+ }
+
+ skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
+ if (eth_hdr.h_proto == htons(ETH_P_AARP) ||
+ eth_hdr.h_proto == htons(ETH_P_IPX))
+ ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
+ else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN))
+ ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
+ else
+ skb_pull(skb, 2);
+
+ if (ieee80211_has_order(hdr.frame_control))
+ memcpy(skb_push(skb, 2), &ht_ctrl, 2);
+ if (ieee80211_is_data_qos(hdr.frame_control))
+ memcpy(skb_push(skb, 2), &qos_ctrl, 2);
+ if (ieee80211_has_a4(hdr.frame_control))
+ memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
+ else
+ memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
+
+ status->flag &= ~(RX_FLAG_RADIOTAP_HE | RX_FLAG_RADIOTAP_HE_MU);
+ return 0;
+}
+
static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
@@ -263,6 +339,7 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
bool unicast, hdr_trans, remove_pad, insert_ccmp_hdr = false;
+ u16 hdr_gap;
int phy_idx;
int i, idx;
u8 chfreq, amsdu_info, qos_ctl = 0;
@@ -286,9 +363,16 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
return -EINVAL;
+ hdr_trans = rxd1 & MT_RXD1_NORMAL_HDR_TRANS;
+ if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_CM))
+ return -EINVAL;
+
+ /* ICV error or CCMP/BIP/WPI MIC error */
+ if (rxd2 & MT_RXD2_NORMAL_ICV_ERR)
+ status->flag |= RX_FLAG_ONLY_MONITOR;
+
unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M;
idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
- hdr_trans = rxd1 & MT_RXD1_NORMAL_HDR_TRANS;
status->wcid = mt7615_rx_get_wcid(dev, idx, unicast);
if (status->wcid) {
@@ -503,16 +587,42 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
return -EINVAL;
}
- skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
-
amsdu_info = FIELD_GET(MT_RXD1_NORMAL_PAYLOAD_FORMAT, rxd1);
status->amsdu = !!amsdu_info;
if (status->amsdu) {
status->first_amsdu = amsdu_info == MT_RXD1_FIRST_AMSDU_FRAME;
status->last_amsdu = amsdu_info == MT_RXD1_LAST_AMSDU_FRAME;
- if (!hdr_trans) {
- memmove(skb->data + 2, skb->data,
- ieee80211_get_hdrlen_from_skb(skb));
+ }
+
+ hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
+ if (hdr_trans && ieee80211_has_morefrags(fc)) {
+ if (mt7615_reverse_frag0_hdr_trans(skb, hdr_gap))
+ return -EINVAL;
+ hdr_trans = false;
+ } else {
+ int pad_start = 0;
+
+ skb_pull(skb, hdr_gap);
+ if (!hdr_trans && status->amsdu) {
+ pad_start = ieee80211_get_hdrlen_from_skb(skb);
+ } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
+ /*
+ * When header translation failure is indicated,
+ * the hardware will insert an extra 2-byte field
+ * containing the data length after the protocol
+ * type field.
+ */
+ pad_start = 12;
+ if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
+ pad_start += 4;
+
+ if (get_unaligned_be16(skb->data + pad_start) !=
+ skb->len - pad_start - 2)
+ pad_start = 0;
+ }
+
+ if (pad_start) {
+ memmove(skb->data + 2, skb->data, pad_start);
skb_pull(skb, 2);
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
index 46f283eb8d0f..e241c613091c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
@@ -86,6 +86,8 @@ enum rx_pkt_type {
#define MT_RXD6_SEQ_CTRL GENMASK(15, 0)
#define MT_RXD6_QOS_CTL GENMASK(31, 16)
+#define MT_RXD7_HT_CONTROL GENMASK(31, 0)
+
#define MT_RXV1_ACID_DET_H BIT(31)
#define MT_RXV1_ACID_DET_L BIT(30)
#define MT_RXV1_VHTA2_B8_B3 GENMASK(29, 24)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 890d9b07e156..82d625a16a62 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -73,7 +73,7 @@ static int mt7615_start(struct ieee80211_hw *hw)
goto out;
}
- ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH);
+ ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
if (ret)
goto out;
@@ -141,9 +141,6 @@ static int get_omac_idx(enum nl80211_iftype type, u64 mask)
if (i)
return i - 1;
- if (type != NL80211_IFTYPE_STATION)
- break;
-
/* next, try to find a free repeater entry for the sta */
i = get_free_idx(mask >> REPEATER_BSSID_START, 0,
REPEATER_BSSID_MAX - REPEATER_BSSID_START);
@@ -211,11 +208,9 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
mvif->mt76.omac_idx = idx;
mvif->mt76.band_idx = ext_phy;
- if (mt7615_ext_phy(dev))
- mvif->mt76.wmm_idx = ext_phy * (MT7615_MAX_WMM_SETS / 2) +
- mvif->mt76.idx % (MT7615_MAX_WMM_SETS / 2);
- else
- mvif->mt76.wmm_idx = mvif->mt76.idx % MT7615_MAX_WMM_SETS;
+ mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
+ if (ext_phy)
+ mvif->mt76.wmm_idx += 2;
dev->mt76.vif_mask |= BIT(mvif->mt76.idx);
dev->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
@@ -331,7 +326,7 @@ int mt7615_set_channel(struct mt7615_phy *phy)
goto out;
}
- ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_CHANNEL_SWITCH);
+ ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD(CHANNEL_SWITCH));
if (ret)
goto out;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index 25f9cbe2cd61..759dcf0e6783 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -87,7 +87,7 @@ struct mt7663_fw_buf {
void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
int cmd, int *wait_seq)
{
- int txd_len, mcu_cmd = cmd & MCU_CMD_MASK;
+ int txd_len, mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd);
struct mt7615_uni_txd *uni_txd;
struct mt7615_mcu_txd *mcu_txd;
u8 seq, q_idx, pkt_fmt;
@@ -103,10 +103,10 @@ void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
if (wait_seq)
*wait_seq = seq;
- txd_len = cmd & MCU_UNI_PREFIX ? sizeof(*uni_txd) : sizeof(*mcu_txd);
+ txd_len = cmd & __MCU_CMD_FIELD_UNI ? sizeof(*uni_txd) : sizeof(*mcu_txd);
txd = (__le32 *)skb_push(skb, txd_len);
- if (cmd != MCU_CMD_FW_SCATTER) {
+ if (cmd != MCU_CMD(FW_SCATTER)) {
q_idx = MT_TX_MCU_PORT_RX_Q0;
pkt_fmt = MT_TX_TYPE_CMD;
} else {
@@ -124,7 +124,7 @@ void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
FIELD_PREP(MT_TXD1_PKT_FMT, pkt_fmt);
txd[1] = cpu_to_le32(val);
- if (cmd & MCU_UNI_PREFIX) {
+ if (cmd & __MCU_CMD_FIELD_UNI) {
uni_txd = (struct mt7615_uni_txd *)txd;
uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd));
uni_txd->option = MCU_CMD_UNI_EXT_ACK;
@@ -142,28 +142,17 @@ void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
mcu_txd->s2d_index = MCU_S2D_H2N;
mcu_txd->pkt_type = MCU_PKT_ID;
mcu_txd->seq = seq;
+ mcu_txd->cid = mcu_cmd;
+ mcu_txd->ext_cid = FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd);
- switch (cmd & ~MCU_CMD_MASK) {
- case MCU_FW_PREFIX:
- mcu_txd->set_query = MCU_Q_NA;
- mcu_txd->cid = mcu_cmd;
- break;
- case MCU_CE_PREFIX:
- if (cmd & MCU_QUERY_MASK)
- mcu_txd->set_query = MCU_Q_QUERY;
- else
- mcu_txd->set_query = MCU_Q_SET;
- mcu_txd->cid = mcu_cmd;
- break;
- default:
- mcu_txd->cid = MCU_CMD_EXT_CID;
- if (cmd & MCU_QUERY_PREFIX)
+ if (mcu_txd->ext_cid || (cmd & __MCU_CMD_FIELD_CE)) {
+ if (cmd & __MCU_CMD_FIELD_QUERY)
mcu_txd->set_query = MCU_Q_QUERY;
else
mcu_txd->set_query = MCU_Q_SET;
- mcu_txd->ext_cid = mcu_cmd;
- mcu_txd->ext_cid_ack = 1;
- break;
+ mcu_txd->ext_cid_ack = !!mcu_txd->ext_cid;
+ } else {
+ mcu_txd->set_query = MCU_Q_NA;
}
}
EXPORT_SYMBOL_GPL(mt7615_mcu_fill_msg);
@@ -184,42 +173,32 @@ int mt7615_mcu_parse_response(struct mt76_dev *mdev, int cmd,
if (seq != rxd->seq)
return -EAGAIN;
- switch (cmd) {
- case MCU_CMD_PATCH_SEM_CONTROL:
+ if (cmd == MCU_CMD(PATCH_SEM_CONTROL)) {
skb_pull(skb, sizeof(*rxd) - 4);
ret = *skb->data;
- break;
- case MCU_EXT_CMD_GET_TEMP:
+ } else if (cmd == MCU_EXT_CMD(THERMAL_CTRL)) {
skb_pull(skb, sizeof(*rxd));
ret = le32_to_cpu(*(__le32 *)skb->data);
- break;
- case MCU_EXT_CMD_RF_REG_ACCESS | MCU_QUERY_PREFIX:
+ } else if (cmd == MCU_EXT_QUERY(RF_REG_ACCESS)) {
skb_pull(skb, sizeof(*rxd));
ret = le32_to_cpu(*(__le32 *)&skb->data[8]);
- break;
- case MCU_UNI_CMD_DEV_INFO_UPDATE:
- case MCU_UNI_CMD_BSS_INFO_UPDATE:
- case MCU_UNI_CMD_STA_REC_UPDATE:
- case MCU_UNI_CMD_HIF_CTRL:
- case MCU_UNI_CMD_OFFLOAD:
- case MCU_UNI_CMD_SUSPEND: {
+ } else if (cmd == MCU_UNI_CMD(DEV_INFO_UPDATE) ||
+ cmd == MCU_UNI_CMD(BSS_INFO_UPDATE) ||
+ cmd == MCU_UNI_CMD(STA_REC_UPDATE) ||
+ cmd == MCU_UNI_CMD(HIF_CTRL) ||
+ cmd == MCU_UNI_CMD(OFFLOAD) ||
+ cmd == MCU_UNI_CMD(SUSPEND)) {
struct mt7615_mcu_uni_event *event;
skb_pull(skb, sizeof(*rxd));
event = (struct mt7615_mcu_uni_event *)skb->data;
ret = le32_to_cpu(event->status);
- break;
- }
- case MCU_CMD_REG_READ: {
+ } else if (cmd == MCU_CE_QUERY(REG_READ)) {
struct mt7615_mcu_reg_event *event;
skb_pull(skb, sizeof(*rxd));
event = (struct mt7615_mcu_reg_event *)skb->data;
ret = (int)le32_to_cpu(event->val);
- break;
- }
- default:
- break;
}
return ret;
@@ -253,8 +232,7 @@ u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg)
.address = cpu_to_le32(reg),
};
- return mt76_mcu_send_msg(&dev->mt76,
- MCU_EXT_CMD_RF_REG_ACCESS | MCU_QUERY_PREFIX,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_QUERY(RF_REG_ACCESS),
&req, sizeof(req), true);
}
@@ -270,8 +248,8 @@ int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val)
.data = cpu_to_le32(val),
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RF_REG_ACCESS, &req,
- sizeof(req), false);
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RF_REG_ACCESS),
+ &req, sizeof(req), false);
}
void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en)
@@ -658,8 +636,8 @@ mt7615_mcu_muar_config(struct mt7615_dev *dev, struct ieee80211_vif *vif,
if (enable)
ether_addr_copy(req.addr, addr);
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_MUAR_UPDATE, &req,
- sizeof(req), true);
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MUAR_UPDATE),
+ &req, sizeof(req), true);
}
static int
@@ -702,7 +680,7 @@ mt7615_mcu_add_dev(struct mt7615_phy *phy, struct ieee80211_vif *vif,
return mt7615_mcu_muar_config(dev, vif, false, enable);
memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN);
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DEV_INFO_UPDATE,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(DEV_INFO_UPDATE),
&data, sizeof(data), true);
}
@@ -771,7 +749,7 @@ mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev,
dev_kfree_skb(skb);
out:
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_BCN_OFFLOAD, &req,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(BCN_OFFLOAD), &req,
sizeof(req), true);
}
@@ -802,8 +780,8 @@ mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
.band_idx = band,
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PM_STATE_CTRL, &req,
- sizeof(req), true);
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(PM_STATE_CTRL),
+ &req, sizeof(req), true);
}
static int
@@ -843,7 +821,7 @@ mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
bss = (struct bss_info_basic *)tlv;
bss->network_type = cpu_to_le32(type);
- bss->bmc_tx_wlan_idx = wlan_idx;
+ bss->bmc_wcid_lo = wlan_idx;
bss->wmm_idx = mvif->mt76.wmm_idx;
bss->active = enable;
@@ -944,7 +922,7 @@ mt7615_mcu_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
mt7615_mcu_bss_ext_tlv(skb, mvif);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD_BSS_INFO_UPDATE, true);
+ MCU_EXT_CMD(BSS_INFO_UPDATE), true);
}
static int
@@ -966,8 +944,8 @@ mt7615_mcu_wtbl_tx_ba(struct mt7615_dev *dev,
mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, true,
NULL, wtbl_hdr);
- err = mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE,
- true);
+ err = mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD(WTBL_UPDATE), true);
if (err < 0)
return err;
@@ -979,7 +957,7 @@ mt7615_mcu_wtbl_tx_ba(struct mt7615_dev *dev,
mt76_connac_mcu_sta_ba_tlv(skb, params, enable, true);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD_STA_REC_UPDATE, true);
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
}
static int
@@ -1001,7 +979,7 @@ mt7615_mcu_wtbl_rx_ba(struct mt7615_dev *dev,
mt76_connac_mcu_sta_ba_tlv(skb, params, enable, false);
err = mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD_STA_REC_UPDATE, true);
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
if (err < 0 || !enable)
return err;
@@ -1014,8 +992,8 @@ mt7615_mcu_wtbl_rx_ba(struct mt7615_dev *dev,
mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, false,
NULL, wtbl_hdr);
- return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE,
- true);
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD(WTBL_UPDATE), true);
}
static int
@@ -1057,7 +1035,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
NULL, wtbl_hdr);
}
- cmd = enable ? MCU_EXT_CMD_WTBL_UPDATE : MCU_EXT_CMD_STA_REC_UPDATE;
+ cmd = enable ? MCU_EXT_CMD(WTBL_UPDATE) : MCU_EXT_CMD(STA_REC_UPDATE);
skb = enable ? wskb : sskb;
err = mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true);
@@ -1068,7 +1046,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
return err;
}
- cmd = enable ? MCU_EXT_CMD_STA_REC_UPDATE : MCU_EXT_CMD_WTBL_UPDATE;
+ cmd = enable ? MCU_EXT_CMD(STA_REC_UPDATE) : MCU_EXT_CMD(WTBL_UPDATE);
skb = enable ? sskb : wskb;
return mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true);
@@ -1090,8 +1068,8 @@ mt7615_mcu_wtbl_update_hdr_trans(struct mt7615_dev *dev,
mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, &msta->wcid, NULL,
wtbl_hdr);
- return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE,
- true);
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD(WTBL_UPDATE), true);
}
static const struct mt7615_mcu_ops wtbl_update_ops = {
@@ -1136,7 +1114,7 @@ mt7615_mcu_sta_ba(struct mt7615_dev *dev,
sta_wtbl, wtbl_hdr);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD_STA_REC_UPDATE, true);
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
}
static int
@@ -1179,7 +1157,7 @@ mt7615_mcu_add_sta(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable)
{
return __mt7615_mcu_add_sta(phy->mt76, vif, sta, enable,
- MCU_EXT_CMD_STA_REC_UPDATE, false);
+ MCU_EXT_CMD(STA_REC_UPDATE), false);
}
static int
@@ -1191,7 +1169,7 @@ mt7615_mcu_sta_update_hdr_trans(struct mt7615_dev *dev,
return mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76,
vif, &msta->wcid,
- MCU_EXT_CMD_STA_REC_UPDATE);
+ MCU_EXT_CMD(STA_REC_UPDATE));
}
static const struct mt7615_mcu_ops sta_update_ops = {
@@ -1285,7 +1263,7 @@ mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev,
dev_kfree_skb(skb);
out:
- return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
&req, sizeof(req), true);
}
@@ -1314,7 +1292,7 @@ mt7615_mcu_uni_add_sta(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable)
{
return __mt7615_mcu_add_sta(phy->mt76, vif, sta, enable,
- MCU_UNI_CMD_STA_REC_UPDATE, true);
+ MCU_UNI_CMD(STA_REC_UPDATE), true);
}
static int
@@ -1348,7 +1326,7 @@ mt7615_mcu_uni_rx_ba(struct mt7615_dev *dev,
mt76_connac_mcu_sta_ba_tlv(skb, params, enable, false);
err = mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_UNI_CMD_STA_REC_UPDATE, true);
+ MCU_UNI_CMD(STA_REC_UPDATE), true);
if (err < 0 || !enable)
return err;
@@ -1369,7 +1347,7 @@ mt7615_mcu_uni_rx_ba(struct mt7615_dev *dev,
sta_wtbl, wtbl_hdr);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_UNI_CMD_STA_REC_UPDATE, true);
+ MCU_UNI_CMD(STA_REC_UPDATE), true);
}
static int
@@ -1381,7 +1359,7 @@ mt7615_mcu_sta_uni_update_hdr_trans(struct mt7615_dev *dev,
return mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76,
vif, &msta->wcid,
- MCU_UNI_CMD_STA_REC_UPDATE);
+ MCU_UNI_CMD(STA_REC_UPDATE));
}
static const struct mt7615_mcu_ops uni_update_ops = {
@@ -1399,7 +1377,7 @@ static const struct mt7615_mcu_ops uni_update_ops = {
int mt7615_mcu_restart(struct mt76_dev *dev)
{
- return mt76_mcu_send_msg(dev, MCU_CMD_RESTART_DL_REQ, NULL, 0, true);
+ return mt76_mcu_send_msg(dev, MCU_CMD(RESTART_DL_REQ), NULL, 0, true);
}
EXPORT_SYMBOL_GPL(mt7615_mcu_restart);
@@ -1445,7 +1423,7 @@ static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name)
goto out;
}
- ret = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER,
+ ret = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
fw->data + sizeof(*hdr), len);
if (ret) {
dev_err(dev->mt76.dev, "Failed to send firmware to device\n");
@@ -1508,7 +1486,7 @@ mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev,
return err;
}
- err = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER,
+ err = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
data + offset, len);
if (err) {
dev_err(dev->mt76.dev, "Failed to send firmware to device\n");
@@ -1644,7 +1622,7 @@ static int mt7615_load_firmware(struct mt7615_dev *dev)
if (!mt76_poll_msec(dev, MT_TOP_MISC2, MT_TOP_MISC2_FW_STATE,
FIELD_PREP(MT_TOP_MISC2_FW_STATE,
- FW_STATE_CR4_RDY), 500)) {
+ FW_STATE_RDY), 500)) {
dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
return -EIO;
}
@@ -1694,8 +1672,8 @@ int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl)
.ctrl_val = ctrl
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_FW_LOG_2_HOST, &data,
- sizeof(data), true);
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(FW_LOG_2_HOST),
+ &data, sizeof(data), true);
}
static int mt7615_mcu_cal_cache_apply(struct mt7615_dev *dev)
@@ -1707,7 +1685,7 @@ static int mt7615_mcu_cal_cache_apply(struct mt7615_dev *dev)
.cache_enable = true
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_CAL_CACHE, &data,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(CAL_CACHE), &data,
sizeof(data), false);
}
@@ -1756,7 +1734,7 @@ static int mt7663_load_n9(struct mt7615_dev *dev, const char *name)
goto out;
}
- ret = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER,
+ ret = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
fw->data + offset, len);
if (ret) {
dev_err(dev->mt76.dev, "Failed to send firmware\n");
@@ -1977,7 +1955,7 @@ int mt7615_mcu_set_eeprom(struct mt7615_dev *dev)
skb_put_data(skb, eep + offset, eep_len);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD_EFUSE_BUFFER_MODE, true);
+ MCU_EXT_CMD(EFUSE_BUFFER_MODE), true);
}
int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
@@ -2013,8 +1991,8 @@ int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
if (params->cw_max)
req.cw_max = cpu_to_le16(fls(params->cw_max));
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE, &req,
- sizeof(req), true);
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EDCA_UPDATE),
+ &req, sizeof(req), true);
}
int mt7615_mcu_set_dbdc(struct mt7615_dev *dev)
@@ -2072,7 +2050,7 @@ int mt7615_mcu_set_dbdc(struct mt7615_dev *dev)
ADD_DBDC_ENTRY(DBDC_TYPE_MGMT, 1, 1);
out:
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DBDC_CTRL, &req,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(DBDC_CTRL), &req,
sizeof(req), true);
}
@@ -2082,8 +2060,8 @@ int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev)
.operation = WTBL_RESET_ALL,
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE, &req,
- sizeof(req), true);
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(WTBL_UPDATE),
+ &req, sizeof(req), true);
}
int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
@@ -2103,8 +2081,8 @@ int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
.val = val,
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_CTRL, &req,
- sizeof(req), true);
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_CTRL),
+ &req, sizeof(req), true);
}
int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val)
@@ -2117,8 +2095,8 @@ int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val)
.min_lpn = cpu_to_le16(val),
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, &req,
- sizeof(req), true);
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RADAR_TH),
+ &req, sizeof(req), true);
}
int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev,
@@ -2146,8 +2124,8 @@ int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev,
#undef __req_field
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, &req,
- sizeof(req), true);
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RADAR_TH),
+ &req, sizeof(req), true);
}
int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index,
@@ -2193,8 +2171,8 @@ int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index,
#undef __req_field_u32
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, &req,
- sizeof(req), true);
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RADAR_TH),
+ &req, sizeof(req), true);
}
int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev)
@@ -2225,7 +2203,7 @@ int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev)
req.pattern[i].start_time = cpu_to_le32(ts);
}
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_PATTERN,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_PATTERN),
&req, sizeof(req), false);
}
@@ -2394,8 +2372,8 @@ int mt7615_mcu_get_temperature(struct mt7615_dev *dev)
u8 rsv[3];
} req = {};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_GET_TEMP, &req,
- sizeof(req), true);
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_CTRL),
+ &req, sizeof(req), true);
}
int mt7615_mcu_set_test_param(struct mt7615_dev *dev, u8 param, bool test_mode,
@@ -2415,8 +2393,8 @@ int mt7615_mcu_set_test_param(struct mt7615_dev *dev, u8 param, bool test_mode,
.value = cpu_to_le32(val),
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_ATE_CTRL, &req,
- sizeof(req), false);
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL),
+ &req, sizeof(req), false);
}
int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable)
@@ -2434,8 +2412,8 @@ int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable)
};
return mt76_mcu_send_msg(&dev->mt76,
- MCU_EXT_CMD_TX_POWER_FEATURE_CTRL, &req,
- sizeof(req), true);
+ MCU_EXT_CMD(TX_POWER_FEATURE_CTRL),
+ &req, sizeof(req), true);
}
static int mt7615_find_freq_idx(const u16 *freqs, int n_freqs, u16 cur)
@@ -2574,7 +2552,7 @@ again:
out:
req.center_freq = cpu_to_le16(center_freq);
- ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RXDCOC_CAL, &req,
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RXDCOC_CAL), &req,
sizeof(req), true);
if ((chandef->width == NL80211_CHAN_WIDTH_80P80 ||
@@ -2695,8 +2673,8 @@ again:
out:
req.center_freq = cpu_to_le16(center_freq);
- ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TXDPD_CAL, &req,
- sizeof(req), true);
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXDPD_CAL),
+ &req, sizeof(req), true);
if ((chandef->width == NL80211_CHAN_WIDTH_80P80 ||
chandef->width == NL80211_CHAN_WIDTH_160) && !req.is_freq2) {
@@ -2724,7 +2702,7 @@ int mt7615_mcu_set_rx_hdr_trans_blacklist(struct mt7615_dev *dev)
.etype = cpu_to_le16(ETH_P_PAE),
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RX_HDR_TRANS,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_HDR_TRANS),
&req, sizeof(req), false);
}
@@ -2759,13 +2737,13 @@ int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
if (vif->type != NL80211_IFTYPE_STATION)
return 0;
- err = mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_ABORT, &req_hdr,
- sizeof(req_hdr), false);
+ err = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_ABORT),
+ &req_hdr, sizeof(req_hdr), false);
if (err < 0 || !enable)
return err;
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_CONNECTED, &req,
- sizeof(req), false);
+ return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_CONNECTED),
+ &req, sizeof(req), false);
}
int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
@@ -2784,6 +2762,6 @@ int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
phy->roc_grant = false;
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_ROC, &req,
- sizeof(req), false);
+ return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_ROC),
+ &req, sizeof(req), false);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
index 98c383e400a1..47863ae9f30b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
@@ -76,35 +76,6 @@ struct mt7615_uni_txd {
u8 reserved2[4];
} __packed __aligned(4);
-/* event table */
-enum {
- MCU_EVENT_TARGET_ADDRESS_LEN = 0x01,
- MCU_EVENT_FW_START = 0x01,
- MCU_EVENT_GENERIC = 0x01,
- MCU_EVENT_ACCESS_REG = 0x02,
- MCU_EVENT_MT_PATCH_SEM = 0x04,
- MCU_EVENT_REG_ACCESS = 0x05,
- MCU_EVENT_SCAN_DONE = 0x0d,
- MCU_EVENT_ROC = 0x10,
- MCU_EVENT_BSS_ABSENCE = 0x11,
- MCU_EVENT_BSS_BEACON_LOSS = 0x13,
- MCU_EVENT_CH_PRIVILEGE = 0x18,
- MCU_EVENT_SCHED_SCAN_DONE = 0x23,
- MCU_EVENT_EXT = 0xed,
- MCU_EVENT_RESTART_DL = 0xef,
- MCU_EVENT_COREDUMP = 0xf0,
-};
-
-/* ext event table */
-enum {
- MCU_EXT_EVENT_PS_SYNC = 0x5,
- MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13,
- MCU_EXT_EVENT_THERMAL_PROTECT = 0x22,
- MCU_EXT_EVENT_ASSERT_DUMP = 0x23,
- MCU_EXT_EVENT_RDD_REPORT = 0x3a,
- MCU_EXT_EVENT_CSA_NOTIFY = 0x4f,
-};
-
enum {
MT_SKU_CCK_1_2 = 0,
MT_SKU_CCK_55_11,
@@ -234,20 +205,6 @@ struct mt7615_mcu_rdd_report {
#define MCU_PKT_ID 0xa0
enum {
- MCU_Q_QUERY,
- MCU_Q_SET,
- MCU_Q_RESERVED,
- MCU_Q_NA
-};
-
-enum {
- MCU_S2D_H2N,
- MCU_S2D_C2N,
- MCU_S2D_H2C,
- MCU_S2D_H2CN
-};
-
-enum {
MCU_ATE_SET_FREQ_OFFSET = 0xa,
MCU_ATE_SET_TX_POWER_CONTROL = 0x15,
};
@@ -281,21 +238,6 @@ struct mt7615_roc_tlv {
} __packed;
enum {
- PATCH_NOT_DL_SEM_FAIL = 0x0,
- PATCH_IS_DL = 0x1,
- PATCH_NOT_DL_SEM_SUCCESS = 0x2,
- PATCH_REL_SEM_SUCCESS = 0x3
-};
-
-enum {
- FW_STATE_INITIAL = 0,
- FW_STATE_FW_DOWNLOAD = 1,
- FW_STATE_NORMAL_OPERATION = 2,
- FW_STATE_NORMAL_TRX = 3,
- FW_STATE_CR4_RDY = 7
-};
-
-enum {
FW_STATE_PWR_ON = 1,
FW_STATE_N9_RDY = 2,
};
@@ -312,73 +254,4 @@ enum {
__DBDC_TYPE_MAX,
};
-struct bss_info_omac {
- __le16 tag;
- __le16 len;
- u8 hw_bss_idx;
- u8 omac_idx;
- u8 band_idx;
- u8 rsv0;
- __le32 conn_type;
- u32 rsv1;
-} __packed;
-
-struct bss_info_basic {
- __le16 tag;
- __le16 len;
- __le32 network_type;
- u8 active;
- u8 rsv0;
- __le16 bcn_interval;
- u8 bssid[ETH_ALEN];
- u8 wmm_idx;
- u8 dtim_period;
- u8 bmc_tx_wlan_idx;
- u8 cipher; /* not used */
- u8 phymode; /* not used */
- u8 rsv1[5];
-} __packed;
-
-struct bss_info_rf_ch {
- __le16 tag;
- __le16 len;
- u8 pri_ch;
- u8 central_ch0;
- u8 central_ch1;
- u8 bw;
-} __packed;
-
-struct bss_info_ext_bss {
- __le16 tag;
- __le16 len;
- __le32 mbss_tsf_offset; /* in unit of us */
- u8 rsv[8];
-} __packed;
-
-enum {
- BSS_INFO_OMAC,
- BSS_INFO_BASIC,
- BSS_INFO_RF_CH, /* optional, for BT/LTE coex */
- BSS_INFO_PM, /* sta only */
- BSS_INFO_UAPSD, /* sta only */
- BSS_INFO_ROAM_DETECTION, /* obsoleted */
- BSS_INFO_LQ_RM, /* obsoleted */
- BSS_INFO_EXT_BSS,
- BSS_INFO_BMC_INFO, /* for bmc rate control in CR4 */
- BSS_INFO_SYNC_MODE, /* obsoleted */
- BSS_INFO_RA,
- BSS_INFO_MAX_NUM
-};
-
-enum {
- CH_SWITCH_NORMAL = 0,
- CH_SWITCH_SCAN = 3,
- CH_SWITCH_MCC = 4,
- CH_SWITCH_DFS = 5,
- CH_SWITCH_BACKGROUND_SCAN_START = 6,
- CH_SWITCH_BACKGROUND_SCAN_RUNNING = 7,
- CH_SWITCH_BACKGROUND_SCAN_STOP = 8,
- CH_SWITCH_SCAN_BYPASS_DPD = 9
-};
-
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
index 71719c787511..33f72f3657d0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -135,6 +135,7 @@ static void mt7615_irq_tasklet(struct tasklet_struct *t)
if (is_mt7663(&dev->mt76)) {
mcu_int = mt76_rr(dev, MT_MCU2HOST_INT_STATUS);
mcu_int &= MT7663_MCU_CMD_ERROR_MASK;
+ mt76_wr(dev, MT_MCU2HOST_INT_STATUS, mcu_int);
} else {
mcu_int = mt76_rr(dev, MT_MCU_CMD);
mcu_int &= MT_MCU_CMD_ERROR_MASK;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
index a2465b49ecd0..87b4aa52ee0f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
@@ -28,8 +28,6 @@ static void mt7615_pci_init_work(struct work_struct *work)
return;
mt7615_init_work(dev);
- if (dev->dbdc_support)
- mt7615_register_ext_phy(dev);
}
static int mt7615_init_hardware(struct mt7615_dev *dev)
@@ -160,6 +158,12 @@ int mt7615_register_device(struct mt7615_dev *dev)
mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband);
mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband);
+ if (dev->dbdc_support) {
+ ret = mt7615_register_ext_phy(dev);
+ if (ret)
+ return ret;
+ }
+
return mt7615_init_debugfs(dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
index 59d99264f5e5..a3d1cfa729ed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
@@ -91,7 +91,7 @@ mt7615_tm_set_tx_power(struct mt7615_phy *phy)
}
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD_SET_TX_POWER_CTRL, false);
+ MCU_EXT_CMD(SET_TX_POWER_CTRL), false);
}
static void
@@ -185,36 +185,35 @@ mt7615_tm_set_tx_antenna(struct mt7615_phy *phy, bool en)
for (i = 0; i < 4; i++) {
mt76_rmw_field(dev, MT_WF_PHY_RFINTF3_0(i),
MT_WF_PHY_RFINTF3_0_ANT,
- (td->tx_antenna_mask & BIT(i)) ? 0 : 0xa);
-
+ (mask & BIT(i)) ? 0 : 0xa);
}
/* 2.4 GHz band */
mt76_rmw_field(dev, MT_ANT_SWITCH_CON(3), MT_ANT_SWITCH_CON_MODE(0),
- (td->tx_antenna_mask & BIT(0)) ? 0x8 : 0x1b);
+ (mask & BIT(0)) ? 0x8 : 0x1b);
mt76_rmw_field(dev, MT_ANT_SWITCH_CON(4), MT_ANT_SWITCH_CON_MODE(2),
- (td->tx_antenna_mask & BIT(1)) ? 0xe : 0x1b);
+ (mask & BIT(1)) ? 0xe : 0x1b);
mt76_rmw_field(dev, MT_ANT_SWITCH_CON(6), MT_ANT_SWITCH_CON_MODE1(0),
- (td->tx_antenna_mask & BIT(2)) ? 0x0 : 0xf);
+ (mask & BIT(2)) ? 0x0 : 0xf);
mt76_rmw_field(dev, MT_ANT_SWITCH_CON(7), MT_ANT_SWITCH_CON_MODE1(2),
- (td->tx_antenna_mask & BIT(3)) ? 0x6 : 0xf);
+ (mask & BIT(3)) ? 0x6 : 0xf);
/* 5 GHz band */
mt76_rmw_field(dev, MT_ANT_SWITCH_CON(4), MT_ANT_SWITCH_CON_MODE(1),
- (td->tx_antenna_mask & BIT(0)) ? 0xd : 0x1b);
+ (mask & BIT(0)) ? 0xd : 0x1b);
mt76_rmw_field(dev, MT_ANT_SWITCH_CON(2), MT_ANT_SWITCH_CON_MODE(3),
- (td->tx_antenna_mask & BIT(1)) ? 0x13 : 0x1b);
+ (mask & BIT(1)) ? 0x13 : 0x1b);
mt76_rmw_field(dev, MT_ANT_SWITCH_CON(7), MT_ANT_SWITCH_CON_MODE1(1),
- (td->tx_antenna_mask & BIT(2)) ? 0x5 : 0xf);
+ (mask & BIT(2)) ? 0x5 : 0xf);
mt76_rmw_field(dev, MT_ANT_SWITCH_CON(8), MT_ANT_SWITCH_CON_MODE1(3),
- (td->tx_antenna_mask & BIT(3)) ? 0xb : 0xf);
+ (mask & BIT(3)) ? 0xb : 0xf);
for (i = 0; i < 4; i++) {
u32 val;
val = mt7615_rf_rr(dev, i, 0x48);
val &= ~(0x3ff << 20);
- if (td->tx_antenna_mask & BIT(i))
+ if (mask & BIT(i))
val |= 3 << 20;
else
val |= (2 << 28) | (2 << 26) | (8 << 20);
@@ -229,7 +228,7 @@ mt7615_tm_set_tx_frames(struct mt7615_phy *phy, bool en)
struct ieee80211_tx_info *info;
struct sk_buff *skb = phy->mt76->test.tx_skb;
- mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH);
+ mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
mt7615_tm_set_tx_antenna(phy, en);
mt7615_tm_set_rx_enable(dev, !en);
if (!en || !skb)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
index 028ff432d811..0ebb4c3c336a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
@@ -21,7 +21,7 @@ mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
int ret, ep, len, pad;
mt7615_mcu_fill_msg(dev, skb, cmd, seq);
- if (cmd != MCU_CMD_FW_SCATTER)
+ if (cmd != MCU_CMD(FW_SCATTER))
ep = MT_EP_OUT_INBAND_CMD;
else
ep = MT_EP_OUT_AC_BE;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
index af43bcb54578..306e9eaea917 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -7,9 +7,6 @@ int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm)
{
struct mt76_dev *dev = phy->dev;
- if (!pm->enable)
- return 0;
-
if (mt76_is_usb(dev))
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 26b4b875dcc0..f79e3d5084f3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -13,8 +13,8 @@ int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option)
.addr = cpu_to_le32(addr),
};
- return mt76_mcu_send_msg(dev, MCU_CMD_FW_START_REQ, &req, sizeof(req),
- true);
+ return mt76_mcu_send_msg(dev, MCU_CMD(FW_START_REQ), &req,
+ sizeof(req), true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_start_firmware);
@@ -27,8 +27,8 @@ int mt76_connac_mcu_patch_sem_ctrl(struct mt76_dev *dev, bool get)
.op = cpu_to_le32(op),
};
- return mt76_mcu_send_msg(dev, MCU_CMD_PATCH_SEM_CONTROL, &req,
- sizeof(req), true);
+ return mt76_mcu_send_msg(dev, MCU_CMD(PATCH_SEM_CONTROL),
+ &req, sizeof(req), true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_patch_sem_ctrl);
@@ -41,8 +41,8 @@ int mt76_connac_mcu_start_patch(struct mt76_dev *dev)
.check_crc = 0,
};
- return mt76_mcu_send_msg(dev, MCU_CMD_PATCH_FINISH_REQ, &req,
- sizeof(req), true);
+ return mt76_mcu_send_msg(dev, MCU_CMD(PATCH_FINISH_REQ),
+ &req, sizeof(req), true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_start_patch);
@@ -64,9 +64,9 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
if (is_mt7921(dev) &&
(req.addr == cpu_to_le32(MCU_PATCH_ADDRESS) || addr == 0x900000))
- cmd = MCU_CMD_PATCH_START_REQ;
+ cmd = MCU_CMD(PATCH_START_REQ);
else
- cmd = MCU_CMD_TARGET_ADDRESS_LEN_REQ;
+ cmd = MCU_CMD(TARGET_ADDRESS_LEN_REQ);
return mt76_mcu_send_msg(dev, cmd, &req, sizeof(req), true);
}
@@ -160,7 +160,8 @@ int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy)
memcpy(__skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
- return mt76_mcu_skb_send_msg(dev, skb, MCU_CMD_SET_CHAN_DOMAIN, false);
+ return mt76_mcu_skb_send_msg(dev, skb, MCU_CE_CMD(SET_CHAN_DOMAIN),
+ false);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_channel_domain);
@@ -176,7 +177,7 @@ int mt76_connac_mcu_set_mac_enable(struct mt76_dev *dev, int band, bool enable,
.band = band,
};
- return mt76_mcu_send_msg(dev, MCU_EXT_CMD_MAC_INIT_CTRL, &req_mac,
+ return mt76_mcu_send_msg(dev, MCU_EXT_CMD(MAC_INIT_CTRL), &req_mac,
sizeof(req_mac), true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_mac_enable);
@@ -198,8 +199,8 @@ int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif)
if (vif->type != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
- return mt76_mcu_send_msg(dev, MCU_CMD_SET_PS_PROFILE, &req,
- sizeof(req), false);
+ return mt76_mcu_send_msg(dev, MCU_CE_CMD(SET_PS_PROFILE),
+ &req, sizeof(req), false);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_vif_ps);
@@ -218,7 +219,7 @@ int mt76_connac_mcu_set_rts_thresh(struct mt76_dev *dev, u32 val, u8 band)
.pkt_thresh = cpu_to_le32(0x2),
};
- return mt76_mcu_send_msg(dev, MCU_EXT_CMD_PROTECT_CTRL, &req,
+ return mt76_mcu_send_msg(dev, MCU_EXT_CMD(PROTECT_CTRL), &req,
sizeof(req), true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_rts_thresh);
@@ -257,11 +258,8 @@ mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
ntlv_hdr->tlv_num = cpu_to_le16(ntlv + 1);
- if (sta_hdr) {
- u16 size = le16_to_cpu(sta_hdr->len);
-
- sta_hdr->len = cpu_to_le16(size + len);
- }
+ if (sta_hdr)
+ le16_add_cpu(&sta_hdr->len, len);
return ptlv;
}
@@ -1071,7 +1069,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN);
- cmd = enable ? MCU_UNI_CMD_DEV_INFO_UPDATE : MCU_UNI_CMD_BSS_INFO_UPDATE;
+ cmd = enable ? MCU_UNI_CMD(DEV_INFO_UPDATE) : MCU_UNI_CMD(BSS_INFO_UPDATE);
data = enable ? (void *)&dev_req : (void *)&basic_req;
len = enable ? sizeof(dev_req) : sizeof(basic_req);
@@ -1079,7 +1077,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
if (err < 0)
return err;
- cmd = enable ? MCU_UNI_CMD_BSS_INFO_UPDATE : MCU_UNI_CMD_DEV_INFO_UPDATE;
+ cmd = enable ? MCU_UNI_CMD(BSS_INFO_UPDATE) : MCU_UNI_CMD(DEV_INFO_UPDATE);
data = enable ? (void *)&basic_req : (void *)&dev_req;
len = enable ? sizeof(basic_req) : sizeof(dev_req);
@@ -1131,7 +1129,8 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
mt76_connac_mcu_wtbl_ba_tlv(dev, skb, params, enable, tx, sta_wtbl,
wtbl_hdr);
- ret = mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_STA_REC_UPDATE, true);
+ ret = mt76_mcu_skb_send_msg(dev, skb,
+ MCU_UNI_CMD(STA_REC_UPDATE), true);
if (ret)
return ret;
@@ -1141,8 +1140,8 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
mt76_connac_mcu_sta_ba_tlv(skb, params, enable, tx);
- return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_STA_REC_UPDATE,
- true);
+ return mt76_mcu_skb_send_msg(dev, skb,
+ MCU_UNI_CMD(STA_REC_UPDATE), true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba);
@@ -1179,7 +1178,7 @@ mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
if (ht_cap->ht_supported)
mode |= PHY_MODE_GN;
- if (he_cap->has_he)
+ if (he_cap && he_cap->has_he)
mode |= PHY_MODE_AX_24G;
} else if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) {
mode |= PHY_MODE_A;
@@ -1190,12 +1189,8 @@ mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
if (vht_cap->vht_supported)
mode |= PHY_MODE_AC;
- if (he_cap->has_he) {
- if (band == NL80211_BAND_6GHZ)
- mode |= PHY_MODE_AX_6G;
- else
- mode |= PHY_MODE_AX_5G;
- }
+ if (he_cap && he_cap->has_he && band == NL80211_BAND_5GHZ)
+ mode |= PHY_MODE_AX_5G;
}
return mode;
@@ -1318,7 +1313,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
basic_req.basic.hw_bss_idx = idx;
if (band == NL80211_BAND_6GHZ)
- basic_req.basic.phymode_ext = BIT(0);
+ basic_req.basic.phymode_ext = PHY_MODE_AX_6G;
basic_phy = mt76_connac_get_phy_mode_v2(phy, vif, band, NULL);
basic_req.basic.nonht_basic_phy = cpu_to_le16(basic_phy);
@@ -1352,7 +1347,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
basic_req.basic.sta_idx = cpu_to_le16(wcid->idx);
basic_req.basic.conn_state = !enable;
- err = mt76_mcu_send_msg(mdev, MCU_UNI_CMD_BSS_INFO_UPDATE, &basic_req,
+ err = mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE), &basic_req,
sizeof(basic_req), true);
if (err < 0)
return err;
@@ -1390,7 +1385,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
mt76_connac_mcu_uni_bss_he_tlv(phy, vif,
(struct tlv *)&he_req.he);
- err = mt76_mcu_send_msg(mdev, MCU_UNI_CMD_BSS_INFO_UPDATE,
+ err = mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE),
&he_req, sizeof(he_req), true);
if (err < 0)
return err;
@@ -1428,7 +1423,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
else if (rlm_req.rlm.control_channel > rlm_req.rlm.center_chan)
rlm_req.rlm.sco = 3; /* SCB */
- return mt76_mcu_send_msg(mdev, MCU_UNI_CMD_BSS_INFO_UPDATE, &rlm_req,
+ return mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE), &rlm_req,
sizeof(rlm_req), true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_uni_add_bss);
@@ -1522,7 +1517,8 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
req->scan_func |= SCAN_FUNC_RANDOM_MAC;
}
- err = mt76_mcu_skb_send_msg(mdev, skb, MCU_CMD_START_HW_SCAN, false);
+ err = mt76_mcu_skb_send_msg(mdev, skb, MCU_CE_CMD(START_HW_SCAN),
+ false);
if (err < 0)
clear_bit(MT76_HW_SCANNING, &phy->state);
@@ -1550,8 +1546,8 @@ int mt76_connac_mcu_cancel_hw_scan(struct mt76_phy *phy,
ieee80211_scan_completed(phy->hw, &info);
}
- return mt76_mcu_send_msg(phy->dev, MCU_CMD_CANCEL_HW_SCAN, &req,
- sizeof(req), false);
+ return mt76_mcu_send_msg(phy->dev, MCU_CE_CMD(CANCEL_HW_SCAN),
+ &req, sizeof(req), false);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_cancel_hw_scan);
@@ -1637,7 +1633,8 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
memcpy(skb_put(skb, sreq->ie_len), sreq->ie, sreq->ie_len);
}
- return mt76_mcu_skb_send_msg(mdev, skb, MCU_CMD_SCHED_SCAN_REQ, false);
+ return mt76_mcu_skb_send_msg(mdev, skb, MCU_CE_CMD(SCHED_SCAN_REQ),
+ false);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sched_scan_req);
@@ -1657,8 +1654,8 @@ int mt76_connac_mcu_sched_scan_enable(struct mt76_phy *phy,
else
clear_bit(MT76_HW_SCHED_SCANNING, &phy->state);
- return mt76_mcu_send_msg(phy->dev, MCU_CMD_SCHED_SCAN_ENABLE, &req,
- sizeof(req), false);
+ return mt76_mcu_send_msg(phy->dev, MCU_CE_CMD(SCHED_SCAN_ENABLE),
+ &req, sizeof(req), false);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sched_scan_enable);
@@ -1670,8 +1667,8 @@ int mt76_connac_mcu_chip_config(struct mt76_dev *dev)
memcpy(req.data, "assert", 7);
- return mt76_mcu_send_msg(dev, MCU_CMD_CHIP_CONFIG, &req, sizeof(req),
- false);
+ return mt76_mcu_send_msg(dev, MCU_CE_CMD(CHIP_CONFIG),
+ &req, sizeof(req), false);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_chip_config);
@@ -1683,8 +1680,8 @@ int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable)
snprintf(req.data, sizeof(req.data), "KeepFullPwr %d", !enable);
- return mt76_mcu_send_msg(dev, MCU_CMD_CHIP_CONFIG, &req, sizeof(req),
- false);
+ return mt76_mcu_send_msg(dev, MCU_CE_CMD(CHIP_CONFIG),
+ &req, sizeof(req), false);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_deep_sleep);
@@ -1786,8 +1783,8 @@ int mt76_connac_mcu_get_nic_capability(struct mt76_phy *phy)
struct sk_buff *skb;
int ret, i;
- ret = mt76_mcu_send_and_get_msg(phy->dev, MCU_CMD_GET_NIC_CAPAB, NULL,
- 0, true, &skb);
+ ret = mt76_mcu_send_and_get_msg(phy->dev, MCU_CE_CMD(GET_NIC_CAPAB),
+ NULL, 0, true, &skb);
if (ret)
return ret;
@@ -1885,30 +1882,6 @@ mt76_connac_mcu_build_sku(struct mt76_dev *dev, s8 *sku,
}
}
-static s8 mt76_connac_get_sar_power(struct mt76_phy *phy,
- struct ieee80211_channel *chan,
- s8 target_power)
-{
- const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
- struct mt76_freq_range_power *frp = phy->frp;
- int freq, i;
-
- if (!capa || !frp)
- return target_power;
-
- freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
- for (i = 0 ; i < capa->num_freq_ranges; i++) {
- if (frp[i].range &&
- freq >= frp[i].range->start_freq &&
- freq < frp[i].range->end_freq) {
- target_power = min_t(s8, frp[i].power, target_power);
- break;
- }
- }
-
- return target_power;
-}
-
static s8 mt76_connac_get_ch_power(struct mt76_phy *phy,
struct ieee80211_channel *chan,
s8 target_power)
@@ -2008,12 +1981,12 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
}
batch_size = DIV_ROUND_UP(n_chan, batch_len);
- if (!phy->cap.has_5ghz)
- last_ch = chan_list_2ghz[n_chan - 1];
- else if (phy->cap.has_6ghz)
- last_ch = chan_list_6ghz[n_chan - 1];
+ if (phy->cap.has_6ghz)
+ last_ch = chan_list_6ghz[ARRAY_SIZE(chan_list_6ghz) - 1];
+ else if (phy->cap.has_5ghz)
+ last_ch = chan_list_5ghz[ARRAY_SIZE(chan_list_5ghz) - 1];
else
- last_ch = chan_list_5ghz[n_chan - 1];
+ last_ch = chan_list_2ghz[ARRAY_SIZE(chan_list_2ghz) - 1];
for (i = 0; i < batch_size; i++) {
struct mt76_connac_tx_power_limit_tlv tx_power_tlv = {};
@@ -2053,8 +2026,7 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
reg_power = mt76_connac_get_ch_power(phy, &chan,
tx_power);
- sar_power = mt76_connac_get_sar_power(phy, &chan,
- reg_power);
+ sar_power = mt76_get_sar_power(phy, &chan, reg_power);
mt76_get_rate_power_limits(phy, &chan, &limits,
sar_power);
@@ -2070,7 +2042,8 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
memcpy(skb->data, &tx_power_tlv, sizeof(tx_power_tlv));
err = mt76_mcu_skb_send_msg(dev, skb,
- MCU_CMD_SET_RATE_TX_POWER, false);
+ MCU_CE_CMD(SET_RATE_TX_POWER),
+ false);
if (err < 0)
return err;
}
@@ -2143,7 +2116,7 @@ int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev,
memcpy(addr, &info->arp_addr_list[i], sizeof(__be32));
}
- return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_OFFLOAD, true);
+ return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(OFFLOAD), true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_update_arp_filter);
@@ -2162,8 +2135,8 @@ int mt76_connac_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
.bss_idx = mvif->idx,
};
- return mt76_mcu_send_msg(phy->dev, MCU_CMD_SET_P2P_OPPPS, &req,
- sizeof(req), false);
+ return mt76_mcu_send_msg(phy->dev, MCU_CE_CMD(SET_P2P_OPPPS),
+ &req, sizeof(req), false);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_p2p_oppps);
@@ -2249,7 +2222,8 @@ int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
memcpy(gtk_tlv->kck, key->kck, NL80211_KCK_LEN);
memcpy(gtk_tlv->replay_ctr, key->replay_ctr, NL80211_REPLAY_CTR_LEN);
- return mt76_mcu_skb_send_msg(phy->dev, skb, MCU_UNI_CMD_OFFLOAD, true);
+ return mt76_mcu_skb_send_msg(phy->dev, skb,
+ MCU_UNI_CMD(OFFLOAD), true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_update_gtk_rekey);
@@ -2275,8 +2249,8 @@ mt76_connac_mcu_set_arp_filter(struct mt76_dev *dev, struct ieee80211_vif *vif,
},
};
- return mt76_mcu_send_msg(dev, MCU_UNI_CMD_OFFLOAD, &req, sizeof(req),
- true);
+ return mt76_mcu_send_msg(dev, MCU_UNI_CMD(OFFLOAD), &req,
+ sizeof(req), true);
}
static int
@@ -2301,8 +2275,8 @@ mt76_connac_mcu_set_gtk_rekey(struct mt76_dev *dev, struct ieee80211_vif *vif,
},
};
- return mt76_mcu_send_msg(dev, MCU_UNI_CMD_OFFLOAD, &req, sizeof(req),
- true);
+ return mt76_mcu_send_msg(dev, MCU_UNI_CMD(OFFLOAD), &req,
+ sizeof(req), true);
}
static int
@@ -2331,8 +2305,8 @@ mt76_connac_mcu_set_suspend_mode(struct mt76_dev *dev,
},
};
- return mt76_mcu_send_msg(dev, MCU_UNI_CMD_SUSPEND, &req, sizeof(req),
- true);
+ return mt76_mcu_send_msg(dev, MCU_UNI_CMD(SUSPEND), &req,
+ sizeof(req), true);
}
static int
@@ -2366,7 +2340,7 @@ mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev,
memcpy(ptlv->pattern, pattern->pattern, pattern->pattern_len);
memcpy(ptlv->mask, pattern->mask, DIV_ROUND_UP(pattern->pattern_len, 8));
- return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_SUSPEND, true);
+ return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(SUSPEND), true);
}
static int
@@ -2418,8 +2392,8 @@ mt76_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif,
else if (mt76_is_sdio(dev))
req.wow_ctrl_tlv.wakeup_hif = WOW_GPIO;
- return mt76_mcu_send_msg(dev, MCU_UNI_CMD_SUSPEND, &req, sizeof(req),
- true);
+ return mt76_mcu_send_msg(dev, MCU_UNI_CMD(SUSPEND), &req,
+ sizeof(req), true);
}
int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend)
@@ -2452,8 +2426,8 @@ int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend)
else if (mt76_is_sdio(dev))
req.hdr.hif_type = 0;
- return mt76_mcu_send_msg(dev, MCU_UNI_CMD_HIF_CTRL, &req, sizeof(req),
- true);
+ return mt76_mcu_send_msg(dev, MCU_UNI_CMD(HIF_CTRL), &req,
+ sizeof(req), true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_hif_suspend);
@@ -2461,7 +2435,7 @@ void mt76_connac_mcu_set_suspend_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif)
{
struct mt76_phy *phy = priv;
- bool suspend = test_bit(MT76_STATE_SUSPEND, &phy->state);
+ bool suspend = !test_bit(MT76_STATE_RUNNING, &phy->state);
struct ieee80211_hw *hw = phy->hw;
struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config;
int i;
@@ -2488,8 +2462,8 @@ u32 mt76_connac_mcu_reg_rr(struct mt76_dev *dev, u32 offset)
.addr = cpu_to_le32(offset),
};
- return mt76_mcu_send_msg(dev, MCU_CMD_REG_READ, &req, sizeof(req),
- true);
+ return mt76_mcu_send_msg(dev, MCU_CE_QUERY(REG_READ), &req,
+ sizeof(req), true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_reg_rr);
@@ -2503,7 +2477,8 @@ void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val)
.val = cpu_to_le32(val),
};
- mt76_mcu_send_msg(dev, MCU_CMD_REG_WRITE, &req, sizeof(req), false);
+ mt76_mcu_send_msg(dev, MCU_CE_CMD(REG_WRITE), &req,
+ sizeof(req), false);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_reg_wr);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index 4e2c9dafd776..5baf8370b7bd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -11,6 +11,76 @@ struct tlv {
__le16 len;
} __packed;
+struct bss_info_omac {
+ __le16 tag;
+ __le16 len;
+ u8 hw_bss_idx;
+ u8 omac_idx;
+ u8 band_idx;
+ u8 rsv0;
+ __le32 conn_type;
+ u32 rsv1;
+} __packed;
+
+struct bss_info_basic {
+ __le16 tag;
+ __le16 len;
+ __le32 network_type;
+ u8 active;
+ u8 rsv0;
+ __le16 bcn_interval;
+ u8 bssid[ETH_ALEN];
+ u8 wmm_idx;
+ u8 dtim_period;
+ u8 bmc_wcid_lo;
+ u8 cipher;
+ u8 phy_mode;
+ u8 max_bssid; /* max BSSID. range: 1 ~ 8, 0: MBSSID disabled */
+ u8 non_tx_bssid;/* non-transmitted BSSID, 0: transmitted BSSID */
+ u8 bmc_wcid_hi; /* high Byte and version */
+ u8 rsv[2];
+} __packed;
+
+struct bss_info_rf_ch {
+ __le16 tag;
+ __le16 len;
+ u8 pri_ch;
+ u8 center_ch0;
+ u8 center_ch1;
+ u8 bw;
+ u8 he_ru26_block; /* 1: don't send HETB in RU26, 0: allow */
+ u8 he_all_disable; /* 1: disallow all HETB, 0: allow */
+ u8 rsv[2];
+} __packed;
+
+struct bss_info_ext_bss {
+ __le16 tag;
+ __le16 len;
+ __le32 mbss_tsf_offset; /* in unit of us */
+ u8 rsv[8];
+} __packed;
+
+enum {
+ BSS_INFO_OMAC,
+ BSS_INFO_BASIC,
+ BSS_INFO_RF_CH, /* optional, for BT/LTE coex */
+ BSS_INFO_PM, /* sta only */
+ BSS_INFO_UAPSD, /* sta only */
+ BSS_INFO_ROAM_DETECT, /* obsoleted */
+ BSS_INFO_LQ_RM, /* obsoleted */
+ BSS_INFO_EXT_BSS,
+ BSS_INFO_BMC_RATE, /* for bmc rate control in CR4 */
+ BSS_INFO_SYNC_MODE, /* obsoleted */
+ BSS_INFO_RA,
+ BSS_INFO_HW_AMSDU,
+ BSS_INFO_BSS_COLOR,
+ BSS_INFO_HE_BASIC,
+ BSS_INFO_PROTECT_INFO,
+ BSS_INFO_OFFLOAD,
+ BSS_INFO_11V_MBSSID,
+ BSS_INFO_MAX_NUM
+};
+
/* sta_rec */
struct sta_ntlv_hdr {
@@ -54,7 +124,7 @@ struct sta_rec_vht {
__le32 vht_cap;
__le16 vht_rx_mcs_map;
__le16 vht_tx_mcs_map;
- /* mt7921 */
+ /* mt7915 - mt7921 */
u8 rts_bw_sig;
u8 rsv[3];
} __packed;
@@ -152,6 +222,191 @@ struct sta_rec_he_6g_capa {
u8 rsv[2];
} __packed;
+struct sec_key {
+ u8 cipher_id;
+ u8 cipher_len;
+ u8 key_id;
+ u8 key_len;
+ u8 key[32];
+} __packed;
+
+struct sta_rec_sec {
+ __le16 tag;
+ __le16 len;
+ u8 add;
+ u8 n_cipher;
+ u8 rsv[2];
+
+ struct sec_key key[2];
+} __packed;
+
+struct sta_rec_bf {
+ __le16 tag;
+ __le16 len;
+
+ __le16 pfmu; /* 0xffff: no access right for PFMU */
+ bool su_mu; /* 0: SU, 1: MU */
+ u8 bf_cap; /* 0: iBF, 1: eBF */
+ u8 sounding_phy; /* 0: legacy, 1: OFDM, 2: HT, 4: VHT */
+ u8 ndpa_rate;
+ u8 ndp_rate;
+ u8 rept_poll_rate;
+ u8 tx_mode; /* 0: legacy, 1: OFDM, 2: HT, 4: VHT ... */
+ u8 ncol;
+ u8 nrow;
+ u8 bw; /* 0: 20M, 1: 40M, 2: 80M, 3: 160M */
+
+ u8 mem_total;
+ u8 mem_20m;
+ struct {
+ u8 row;
+ u8 col: 6, row_msb: 2;
+ } mem[4];
+
+ __le16 smart_ant;
+ u8 se_idx;
+ u8 auto_sounding; /* b7: low traffic indicator
+ * b6: Stop sounding for this entry
+ * b5 ~ b0: postpone sounding
+ */
+ u8 ibf_timeout;
+ u8 ibf_dbw;
+ u8 ibf_ncol;
+ u8 ibf_nrow;
+ u8 nrow_bw160;
+ u8 ncol_bw160;
+ u8 ru_start_idx;
+ u8 ru_end_idx;
+
+ bool trigger_su;
+ bool trigger_mu;
+ bool ng16_su;
+ bool ng16_mu;
+ bool codebook42_su;
+ bool codebook75_mu;
+
+ u8 he_ltf;
+ u8 rsv[3];
+} __packed;
+
+struct sta_rec_bfee {
+ __le16 tag;
+ __le16 len;
+ bool fb_identity_matrix; /* 1: feedback identity matrix */
+ bool ignore_feedback; /* 1: ignore */
+ u8 rsv[2];
+} __packed;
+
+struct sta_rec_muru {
+ __le16 tag;
+ __le16 len;
+
+ struct {
+ bool ofdma_dl_en;
+ bool ofdma_ul_en;
+ bool mimo_dl_en;
+ bool mimo_ul_en;
+ u8 rsv[4];
+ } cfg;
+
+ struct {
+ u8 punc_pream_rx;
+ bool he_20m_in_40m_2g;
+ bool he_20m_in_160m;
+ bool he_80m_in_160m;
+ bool lt16_sigb;
+ bool rx_su_comp_sigb;
+ bool rx_su_non_comp_sigb;
+ u8 rsv;
+ } ofdma_dl;
+
+ struct {
+ u8 t_frame_dur;
+ u8 mu_cascading;
+ u8 uo_ra;
+ u8 he_2x996_tone;
+ u8 rx_t_frame_11ac;
+ u8 rsv[3];
+ } ofdma_ul;
+
+ struct {
+ bool vht_mu_bfee;
+ bool partial_bw_dl_mimo;
+ u8 rsv[2];
+ } mimo_dl;
+
+ struct {
+ bool full_ul_mimo;
+ bool partial_ul_mimo;
+ u8 rsv[2];
+ } mimo_ul;
+} __packed;
+
+struct sta_phy {
+ u8 type;
+ u8 flag;
+ u8 stbc;
+ u8 sgi;
+ u8 bw;
+ u8 ldpc;
+ u8 mcs;
+ u8 nss;
+ u8 he_ltf;
+};
+
+struct sta_rec_ra {
+ __le16 tag;
+ __le16 len;
+
+ u8 valid;
+ u8 auto_rate;
+ u8 phy_mode;
+ u8 channel;
+ u8 bw;
+ u8 disable_cck;
+ u8 ht_mcs32;
+ u8 ht_gf;
+ u8 ht_mcs[4];
+ u8 mmps_mode;
+ u8 gband_256;
+ u8 af;
+ u8 auth_wapi_mode;
+ u8 rate_len;
+
+ u8 supp_mode;
+ u8 supp_cck_rate;
+ u8 supp_ofdm_rate;
+ __le32 supp_ht_mcs;
+ __le16 supp_vht_mcs[4];
+
+ u8 op_mode;
+ u8 op_vht_chan_width;
+ u8 op_vht_rx_nss;
+ u8 op_vht_rx_nss_type;
+
+ __le32 sta_cap;
+
+ struct sta_phy phy;
+} __packed;
+
+struct sta_rec_ra_fixed {
+ __le16 tag;
+ __le16 len;
+
+ __le32 field;
+ u8 op_mode;
+ u8 op_vht_chan_width;
+ u8 op_vht_rx_nss;
+ u8 op_vht_rx_nss_type;
+
+ struct sta_phy phy;
+
+ u8 spe_en;
+ u8 short_preamble;
+ u8 is_5g;
+ u8 mmps_mode;
+} __packed;
+
/* wtbl_rec */
struct wtbl_req_hdr {
@@ -234,6 +489,7 @@ struct wtbl_ba {
__le16 sn;
u8 ba_en;
u8 ba_winsize_idx;
+ /* originator & recipient */
__le16 ba_winsize;
/* recipient only */
u8 peer_addr[ETH_ALEN];
@@ -304,12 +560,17 @@ struct wtbl_raw {
#define MT76_CONNAC_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
sizeof(struct sta_rec_basic) + \
+ sizeof(struct sta_rec_bf) + \
sizeof(struct sta_rec_ht) + \
sizeof(struct sta_rec_he) + \
sizeof(struct sta_rec_ba) + \
sizeof(struct sta_rec_vht) + \
sizeof(struct sta_rec_uapsd) + \
sizeof(struct sta_rec_amsdu) + \
+ sizeof(struct sta_rec_muru) + \
+ sizeof(struct sta_rec_bfee) + \
+ sizeof(struct sta_rec_ra) + \
+ sizeof(struct sta_rec_ra_fixed) + \
sizeof(struct sta_rec_he_6g_capa) + \
sizeof(struct tlv) + \
MT76_CONNAC_WTBL_UPDATE_MAX_SIZE)
@@ -423,7 +684,8 @@ enum {
#define PHY_MODE_AC BIT(5)
#define PHY_MODE_AX_24G BIT(6)
#define PHY_MODE_AX_5G BIT(7)
-#define PHY_MODE_AX_6G BIT(8)
+
+#define PHY_MODE_AX_6G BIT(0) /* phymode_ext */
#define MODE_CCK BIT(0)
#define MODE_OFDM BIT(1)
@@ -431,6 +693,21 @@ enum {
#define MODE_VHT BIT(3)
#define MODE_HE BIT(4)
+#define STA_CAP_WMM BIT(0)
+#define STA_CAP_SGI_20 BIT(4)
+#define STA_CAP_SGI_40 BIT(5)
+#define STA_CAP_TX_STBC BIT(6)
+#define STA_CAP_RX_STBC BIT(7)
+#define STA_CAP_VHT_SGI_80 BIT(16)
+#define STA_CAP_VHT_SGI_160 BIT(17)
+#define STA_CAP_VHT_TX_STBC BIT(18)
+#define STA_CAP_VHT_RX_STBC BIT(19)
+#define STA_CAP_VHT_LDPC BIT(23)
+#define STA_CAP_LDPC BIT(24)
+#define STA_CAP_HT BIT(26)
+#define STA_CAP_VHT BIT(27)
+#define STA_CAP_HE BIT(28)
+
enum {
PHY_TYPE_HR_DSSS_INDEX = 0,
PHY_TYPE_ERP_INDEX,
@@ -489,6 +766,121 @@ enum {
DEV_INFO_MAX_NUM
};
+/* event table */
+enum {
+ MCU_EVENT_TARGET_ADDRESS_LEN = 0x01,
+ MCU_EVENT_FW_START = 0x01,
+ MCU_EVENT_GENERIC = 0x01,
+ MCU_EVENT_ACCESS_REG = 0x02,
+ MCU_EVENT_MT_PATCH_SEM = 0x04,
+ MCU_EVENT_REG_ACCESS = 0x05,
+ MCU_EVENT_LP_INFO = 0x07,
+ MCU_EVENT_SCAN_DONE = 0x0d,
+ MCU_EVENT_TX_DONE = 0x0f,
+ MCU_EVENT_ROC = 0x10,
+ MCU_EVENT_BSS_ABSENCE = 0x11,
+ MCU_EVENT_BSS_BEACON_LOSS = 0x13,
+ MCU_EVENT_CH_PRIVILEGE = 0x18,
+ MCU_EVENT_SCHED_SCAN_DONE = 0x23,
+ MCU_EVENT_DBG_MSG = 0x27,
+ MCU_EVENT_TXPWR = 0xd0,
+ MCU_EVENT_EXT = 0xed,
+ MCU_EVENT_RESTART_DL = 0xef,
+ MCU_EVENT_COREDUMP = 0xf0,
+};
+
+/* ext event table */
+enum {
+ MCU_EXT_EVENT_PS_SYNC = 0x5,
+ MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13,
+ MCU_EXT_EVENT_THERMAL_PROTECT = 0x22,
+ MCU_EXT_EVENT_ASSERT_DUMP = 0x23,
+ MCU_EXT_EVENT_RDD_REPORT = 0x3a,
+ MCU_EXT_EVENT_CSA_NOTIFY = 0x4f,
+ MCU_EXT_EVENT_BCC_NOTIFY = 0x75,
+ MCU_EXT_EVENT_MURU_CTRL = 0x9f,
+};
+
+enum {
+ MCU_Q_QUERY,
+ MCU_Q_SET,
+ MCU_Q_RESERVED,
+ MCU_Q_NA
+};
+
+enum {
+ MCU_S2D_H2N,
+ MCU_S2D_C2N,
+ MCU_S2D_H2C,
+ MCU_S2D_H2CN
+};
+
+enum {
+ PATCH_NOT_DL_SEM_FAIL,
+ PATCH_IS_DL,
+ PATCH_NOT_DL_SEM_SUCCESS,
+ PATCH_REL_SEM_SUCCESS
+};
+
+enum {
+ FW_STATE_INITIAL,
+ FW_STATE_FW_DOWNLOAD,
+ FW_STATE_NORMAL_OPERATION,
+ FW_STATE_NORMAL_TRX,
+ FW_STATE_RDY = 7
+};
+
+enum {
+ CH_SWITCH_NORMAL = 0,
+ CH_SWITCH_SCAN = 3,
+ CH_SWITCH_MCC = 4,
+ CH_SWITCH_DFS = 5,
+ CH_SWITCH_BACKGROUND_SCAN_START = 6,
+ CH_SWITCH_BACKGROUND_SCAN_RUNNING = 7,
+ CH_SWITCH_BACKGROUND_SCAN_STOP = 8,
+ CH_SWITCH_SCAN_BYPASS_DPD = 9
+};
+
+enum {
+ THERMAL_SENSOR_TEMP_QUERY,
+ THERMAL_SENSOR_MANUAL_CTRL,
+ THERMAL_SENSOR_INFO_QUERY,
+ THERMAL_SENSOR_TASK_CTRL,
+};
+
+enum mcu_cipher_type {
+ MCU_CIPHER_NONE = 0,
+ MCU_CIPHER_WEP40,
+ MCU_CIPHER_WEP104,
+ MCU_CIPHER_WEP128,
+ MCU_CIPHER_TKIP,
+ MCU_CIPHER_AES_CCMP,
+ MCU_CIPHER_CCMP_256,
+ MCU_CIPHER_GCMP,
+ MCU_CIPHER_GCMP_256,
+ MCU_CIPHER_WAPI,
+ MCU_CIPHER_BIP_CMAC_128,
+};
+
+enum {
+ EE_MODE_EFUSE,
+ EE_MODE_BUFFER,
+};
+
+enum {
+ EE_FORMAT_BIN,
+ EE_FORMAT_WHOLE,
+ EE_FORMAT_MULTIPLE,
+};
+
+enum {
+ MCU_PHY_STATE_TX_RATE,
+ MCU_PHY_STATE_RX_RATE,
+ MCU_PHY_STATE_RSSI,
+ MCU_PHY_STATE_CONTENTION_RX_RATE,
+ MCU_PHY_STATE_OFDMLQ_CNINFO,
+};
+
#define MCU_CMD_ACK BIT(0)
#define MCU_CMD_UNI BIT(1)
#define MCU_CMD_QUERY BIT(2)
@@ -496,29 +888,51 @@ enum {
#define MCU_CMD_UNI_EXT_ACK (MCU_CMD_ACK | MCU_CMD_UNI | \
MCU_CMD_QUERY)
-#define MCU_FW_PREFIX BIT(31)
-#define MCU_UNI_PREFIX BIT(30)
-#define MCU_CE_PREFIX BIT(29)
-#define MCU_QUERY_PREFIX BIT(28)
-#define MCU_CMD_MASK ~(MCU_FW_PREFIX | MCU_UNI_PREFIX | \
- MCU_CE_PREFIX | MCU_QUERY_PREFIX)
-
-#define MCU_QUERY_MASK BIT(16)
+#define __MCU_CMD_FIELD_ID GENMASK(7, 0)
+#define __MCU_CMD_FIELD_EXT_ID GENMASK(15, 8)
+#define __MCU_CMD_FIELD_QUERY BIT(16)
+#define __MCU_CMD_FIELD_UNI BIT(17)
+#define __MCU_CMD_FIELD_CE BIT(18)
+#define __MCU_CMD_FIELD_WA BIT(19)
+
+#define MCU_CMD(_t) FIELD_PREP(__MCU_CMD_FIELD_ID, \
+ MCU_CMD_##_t)
+#define MCU_EXT_CMD(_t) (MCU_CMD(EXT_CID) | \
+ FIELD_PREP(__MCU_CMD_FIELD_EXT_ID, \
+ MCU_EXT_CMD_##_t))
+#define MCU_EXT_QUERY(_t) (MCU_EXT_CMD(_t) | __MCU_CMD_FIELD_QUERY)
+#define MCU_UNI_CMD(_t) (__MCU_CMD_FIELD_UNI | \
+ FIELD_PREP(__MCU_CMD_FIELD_ID, \
+ MCU_UNI_CMD_##_t))
+#define MCU_CE_CMD(_t) (__MCU_CMD_FIELD_CE | \
+ FIELD_PREP(__MCU_CMD_FIELD_ID, \
+ MCU_CE_CMD_##_t))
+#define MCU_CE_QUERY(_t) (MCU_CE_CMD(_t) | __MCU_CMD_FIELD_QUERY)
+
+#define MCU_WA_CMD(_t) (MCU_CMD(_t) | __MCU_CMD_FIELD_WA)
+#define MCU_WA_EXT_CMD(_t) (MCU_EXT_CMD(_t) | __MCU_CMD_FIELD_WA)
+#define MCU_WA_PARAM_CMD(_t) (MCU_WA_CMD(WA_PARAM) | \
+ FIELD_PREP(__MCU_CMD_FIELD_EXT_ID, \
+ MCU_WA_PARAM_CMD_##_t))
enum {
MCU_EXT_CMD_EFUSE_ACCESS = 0x01,
MCU_EXT_CMD_RF_REG_ACCESS = 0x02,
+ MCU_EXT_CMD_RF_TEST = 0x04,
MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11,
MCU_EXT_CMD_FW_LOG_2_HOST = 0x13,
+ MCU_EXT_CMD_TXBF_ACTION = 0x1e,
MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
+ MCU_EXT_CMD_THERMAL_PROT = 0x23,
MCU_EXT_CMD_STA_REC_UPDATE = 0x25,
MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26,
MCU_EXT_CMD_EDCA_UPDATE = 0x27,
MCU_EXT_CMD_DEV_INFO_UPDATE = 0x2A,
- MCU_EXT_CMD_GET_TEMP = 0x2c,
+ MCU_EXT_CMD_THERMAL_CTRL = 0x2c,
MCU_EXT_CMD_WTBL_UPDATE = 0x32,
+ MCU_EXT_CMD_SET_DRR_CTRL = 0x36,
MCU_EXT_CMD_SET_RDD_CTRL = 0x3a,
MCU_EXT_CMD_ATE_CTRL = 0x3d,
MCU_EXT_CMD_PROTECT_CTRL = 0x3e,
@@ -527,59 +941,74 @@ enum {
MCU_EXT_CMD_RX_HDR_TRANS = 0x47,
MCU_EXT_CMD_MUAR_UPDATE = 0x48,
MCU_EXT_CMD_BCN_OFFLOAD = 0x49,
+ MCU_EXT_CMD_RX_AIRTIME_CTRL = 0x4a,
MCU_EXT_CMD_SET_RX_PATH = 0x4e,
+ MCU_EXT_CMD_EFUSE_FREE_BLOCK = 0x4f,
MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58,
MCU_EXT_CMD_RXDCOC_CAL = 0x59,
+ MCU_EXT_CMD_GET_MIB_INFO = 0x5a,
MCU_EXT_CMD_TXDPD_CAL = 0x60,
MCU_EXT_CMD_CAL_CACHE = 0x67,
- MCU_EXT_CMD_SET_RDD_TH = 0x7c,
+ MCU_EXT_CMD_SET_RADAR_TH = 0x7c,
MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d,
+ MCU_EXT_CMD_MWDS_SUPPORT = 0x80,
+ MCU_EXT_CMD_SET_SER_TRIGGER = 0x81,
+ MCU_EXT_CMD_SCS_CTRL = 0x82,
+ MCU_EXT_CMD_TWT_AGRT_UPDATE = 0x94,
+ MCU_EXT_CMD_FW_DBG_CTRL = 0x95,
+ MCU_EXT_CMD_SET_RDD_TH = 0x9d,
+ MCU_EXT_CMD_MURU_CTRL = 0x9f,
+ MCU_EXT_CMD_SET_SPR = 0xa8,
+ MCU_EXT_CMD_GROUP_PRE_CAL_INFO = 0xab,
+ MCU_EXT_CMD_DPD_PRE_CAL_INFO = 0xac,
+ MCU_EXT_CMD_PHY_STAT_INFO = 0xad,
};
enum {
- MCU_UNI_CMD_DEV_INFO_UPDATE = MCU_UNI_PREFIX | 0x01,
- MCU_UNI_CMD_BSS_INFO_UPDATE = MCU_UNI_PREFIX | 0x02,
- MCU_UNI_CMD_STA_REC_UPDATE = MCU_UNI_PREFIX | 0x03,
- MCU_UNI_CMD_SUSPEND = MCU_UNI_PREFIX | 0x05,
- MCU_UNI_CMD_OFFLOAD = MCU_UNI_PREFIX | 0x06,
- MCU_UNI_CMD_HIF_CTRL = MCU_UNI_PREFIX | 0x07,
+ MCU_UNI_CMD_DEV_INFO_UPDATE = 0x01,
+ MCU_UNI_CMD_BSS_INFO_UPDATE = 0x02,
+ MCU_UNI_CMD_STA_REC_UPDATE = 0x03,
+ MCU_UNI_CMD_SUSPEND = 0x05,
+ MCU_UNI_CMD_OFFLOAD = 0x06,
+ MCU_UNI_CMD_HIF_CTRL = 0x07,
};
enum {
- MCU_CMD_TARGET_ADDRESS_LEN_REQ = MCU_FW_PREFIX | 0x01,
- MCU_CMD_FW_START_REQ = MCU_FW_PREFIX | 0x02,
+ MCU_CMD_TARGET_ADDRESS_LEN_REQ = 0x01,
+ MCU_CMD_FW_START_REQ = 0x02,
MCU_CMD_INIT_ACCESS_REG = 0x3,
- MCU_CMD_NIC_POWER_CTRL = MCU_FW_PREFIX | 0x4,
- MCU_CMD_PATCH_START_REQ = MCU_FW_PREFIX | 0x05,
- MCU_CMD_PATCH_FINISH_REQ = MCU_FW_PREFIX | 0x07,
- MCU_CMD_PATCH_SEM_CONTROL = MCU_FW_PREFIX | 0x10,
+ MCU_CMD_NIC_POWER_CTRL = 0x4,
+ MCU_CMD_PATCH_START_REQ = 0x05,
+ MCU_CMD_PATCH_FINISH_REQ = 0x07,
+ MCU_CMD_PATCH_SEM_CONTROL = 0x10,
+ MCU_CMD_WA_PARAM = 0xc4,
MCU_CMD_EXT_CID = 0xed,
- MCU_CMD_FW_SCATTER = MCU_FW_PREFIX | 0xee,
- MCU_CMD_RESTART_DL_REQ = MCU_FW_PREFIX | 0xef,
+ MCU_CMD_FW_SCATTER = 0xee,
+ MCU_CMD_RESTART_DL_REQ = 0xef,
};
/* offload mcu commands */
enum {
- MCU_CMD_TEST_CTRL = MCU_CE_PREFIX | 0x01,
- MCU_CMD_START_HW_SCAN = MCU_CE_PREFIX | 0x03,
- MCU_CMD_SET_PS_PROFILE = MCU_CE_PREFIX | 0x05,
- MCU_CMD_SET_CHAN_DOMAIN = MCU_CE_PREFIX | 0x0f,
- MCU_CMD_SET_BSS_CONNECTED = MCU_CE_PREFIX | 0x16,
- MCU_CMD_SET_BSS_ABORT = MCU_CE_PREFIX | 0x17,
- MCU_CMD_CANCEL_HW_SCAN = MCU_CE_PREFIX | 0x1b,
- MCU_CMD_SET_ROC = MCU_CE_PREFIX | 0x1d,
- MCU_CMD_SET_P2P_OPPPS = MCU_CE_PREFIX | 0x33,
- MCU_CMD_SET_RATE_TX_POWER = MCU_CE_PREFIX | 0x5d,
- MCU_CMD_SCHED_SCAN_ENABLE = MCU_CE_PREFIX | 0x61,
- MCU_CMD_SCHED_SCAN_REQ = MCU_CE_PREFIX | 0x62,
- MCU_CMD_GET_NIC_CAPAB = MCU_CE_PREFIX | 0x8a,
- MCU_CMD_SET_MU_EDCA_PARMS = MCU_CE_PREFIX | 0xb0,
- MCU_CMD_REG_WRITE = MCU_CE_PREFIX | 0xc0,
- MCU_CMD_REG_READ = MCU_CE_PREFIX | MCU_QUERY_MASK | 0xc0,
- MCU_CMD_CHIP_CONFIG = MCU_CE_PREFIX | 0xca,
- MCU_CMD_FWLOG_2_HOST = MCU_CE_PREFIX | 0xc5,
- MCU_CMD_GET_WTBL = MCU_CE_PREFIX | 0xcd,
- MCU_CMD_GET_TXPWR = MCU_CE_PREFIX | 0xd0,
+ MCU_CE_CMD_TEST_CTRL = 0x01,
+ MCU_CE_CMD_START_HW_SCAN = 0x03,
+ MCU_CE_CMD_SET_PS_PROFILE = 0x05,
+ MCU_CE_CMD_SET_CHAN_DOMAIN = 0x0f,
+ MCU_CE_CMD_SET_BSS_CONNECTED = 0x16,
+ MCU_CE_CMD_SET_BSS_ABORT = 0x17,
+ MCU_CE_CMD_CANCEL_HW_SCAN = 0x1b,
+ MCU_CE_CMD_SET_ROC = 0x1d,
+ MCU_CE_CMD_SET_P2P_OPPPS = 0x33,
+ MCU_CE_CMD_SET_RATE_TX_POWER = 0x5d,
+ MCU_CE_CMD_SCHED_SCAN_ENABLE = 0x61,
+ MCU_CE_CMD_SCHED_SCAN_REQ = 0x62,
+ MCU_CE_CMD_GET_NIC_CAPAB = 0x8a,
+ MCU_CE_CMD_SET_MU_EDCA_PARMS = 0xb0,
+ MCU_CE_CMD_REG_WRITE = 0xc0,
+ MCU_CE_CMD_REG_READ = 0xc0,
+ MCU_CE_CMD_CHIP_CONFIG = 0xca,
+ MCU_CE_CMD_FWLOG_2_HOST = 0xc5,
+ MCU_CE_CMD_GET_WTBL = 0xcd,
+ MCU_CE_CMD_GET_TXPWR = 0xd0,
};
enum {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
index 0bac39bf3b66..66d47c70111a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -237,7 +237,10 @@ int mt76x0_register_device(struct mt76x02_dev *dev)
{
int ret;
- mt76x02_init_device(dev);
+ ret = mt76x02_init_device(dev);
+ if (ret)
+ return ret;
+
mt76x02_config_mac_addr_list(dev);
ret = mt76_register_device(&dev->mt76, true, mt76x02_rates,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index 700ae9c12f1d..07380cce8755 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -31,6 +31,32 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
mt76_txq_schedule_all(&dev->mphy);
}
+int mt76x0_set_sar_specs(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar)
+{
+ int err = -EINVAL, power = hw->conf.power_level * 2;
+ struct mt76x02_dev *dev = hw->priv;
+ struct mt76_phy *mphy = &dev->mphy;
+
+ mutex_lock(&dev->mt76.mutex);
+ if (!cfg80211_chandef_valid(&mphy->chandef))
+ goto out;
+
+ err = mt76_init_sar_power(hw, sar);
+ if (err)
+ goto out;
+
+ dev->txpower_conf = mt76_get_sar_power(mphy, mphy->chandef.chan,
+ power);
+ if (test_bit(MT76_STATE_RUNNING, &mphy->state))
+ mt76x0_phy_set_txpower(dev);
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76x0_set_sar_specs);
+
int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt76x02_dev *dev = hw->priv;
@@ -44,9 +70,13 @@ int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- dev->txpower_conf = hw->conf.power_level * 2;
+ struct mt76_phy *mphy = &dev->mphy;
- if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
+ dev->txpower_conf = hw->conf.power_level * 2;
+ dev->txpower_conf = mt76_get_sar_power(mphy,
+ mphy->chandef.chan,
+ dev->txpower_conf);
+ if (test_bit(MT76_STATE_RUNNING, &mphy->state))
mt76x0_phy_set_txpower(dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
index 6953f253a28a..99dcb8feb9f7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -49,6 +49,8 @@ void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset);
void mt76x0_mac_stop(struct mt76x02_dev *dev);
int mt76x0_config(struct ieee80211_hw *hw, u32 changed);
+int mt76x0_set_sar_specs(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar);
/* PHY */
void mt76x0_phy_init(struct mt76x02_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index f19228fc5a70..9277ff38b7a2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -85,6 +85,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
.set_rts_threshold = mt76x02_set_rts_threshold,
.get_antenna = mt76_get_antenna,
.reconfig_complete = mt76x02_reconfig_complete,
+ .set_sar_specs = mt76x0_set_sar_specs,
};
static int mt76x0e_init_hardware(struct mt76x02_dev *dev, bool resume)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index f2b2fa733845..436daf6d6d86 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -141,6 +141,7 @@ static const struct ieee80211_ops mt76x0u_ops = {
.set_tim = mt76_set_tim,
.release_buffered_frames = mt76_release_buffered_frames,
.get_antenna = mt76_get_antenna,
+ .set_sar_specs = mt76x0_set_sar_specs,
};
static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 4d58c2c1c0ac..44d1a92d9a90 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -133,7 +133,7 @@ struct mt76x02_dev {
extern struct ieee80211_rate mt76x02_rates[12];
-void mt76x02_init_device(struct mt76x02_dev *dev);
+int mt76x02_init_device(struct mt76x02_dev *dev);
void mt76x02_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags, u64 multicast);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index 1f17d86ff755..dd30f537676d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -138,7 +138,7 @@ mt76x02_led_set_brightness(struct led_classdev *led_cdev,
mt76x02_led_set_config(mdev, 0xff, 0);
}
-void mt76x02_init_device(struct mt76x02_dev *dev)
+int mt76x02_init_device(struct mt76x02_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
struct wiphy *wiphy = hw->wiphy;
@@ -197,6 +197,8 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
dev->mphy.chainmask = 0x101;
dev->mphy.antenna_mask = 1;
}
+
+ return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_init_device);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
index a92a479aebaa..7b01a06d7f8d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
@@ -8,6 +8,35 @@
#include "eeprom.h"
#include "../mt76x02_phy.h"
+int mt76x2_set_sar_specs(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar)
+{
+ int err = -EINVAL, power = hw->conf.power_level * 2;
+ struct mt76x02_dev *dev = hw->priv;
+ struct mt76_phy *mphy = &dev->mphy;
+
+ mutex_lock(&dev->mt76.mutex);
+ if (!cfg80211_chandef_valid(&mphy->chandef))
+ goto out;
+
+ err = mt76_init_sar_power(hw, sar);
+ if (err)
+ goto out;
+
+ dev->txpower_conf = mt76_get_sar_power(mphy, mphy->chandef.chan,
+ power);
+ /* convert to per-chain power for 2x2 devices */
+ dev->txpower_conf -= 6;
+
+ if (test_bit(MT76_STATE_RUNNING, &mphy->state))
+ mt76x2_phy_set_txpower(dev);
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76x2_set_sar_specs);
+
static void
mt76x2_set_wlan_state(struct mt76x02_dev *dev, bool enable)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
index d01f47c83eb1..be1217329a77 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
@@ -41,6 +41,8 @@ extern const struct ieee80211_ops mt76x2_ops;
int mt76x2_register_device(struct mt76x02_dev *dev);
int mt76x2_resume_device(struct mt76x02_dev *dev);
+int mt76x2_set_sar_specs(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar);
void mt76x2_phy_power_on(struct mt76x02_dev *dev);
void mt76x2_stop_hardware(struct mt76x02_dev *dev);
int mt76x2_eeprom_init(struct mt76x02_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index c6fa8cf92529..e38e8e5685c2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -292,8 +292,9 @@ int mt76x2_register_device(struct mt76x02_dev *dev)
int ret;
INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
-
- mt76x02_init_device(dev);
+ ret = mt76x02_init_device(dev);
+ if (ret)
+ return ret;
ret = mt76x2_init_hardware(dev);
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index 933125b07ea3..b38bb7a2362b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -78,8 +78,12 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- dev->txpower_conf = hw->conf.power_level * 2;
+ struct mt76_phy *mphy = &dev->mphy;
+ dev->txpower_conf = hw->conf.power_level * 2;
+ dev->txpower_conf = mt76_get_sar_power(mphy,
+ mphy->chandef.chan,
+ dev->txpower_conf);
/* convert to per-chain power for 2x2 devices */
dev->txpower_conf -= 6;
@@ -155,5 +159,6 @@ const struct ieee80211_ops mt76x2_ops = {
.get_antenna = mt76_get_antenna,
.set_rts_threshold = mt76x02_set_rts_threshold,
.reconfig_complete = mt76x02_reconfig_complete,
+ .set_sar_specs = mt76x2_set_sar_specs,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
index 85dcdc22fbeb..33a14365ec9b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
@@ -194,7 +194,9 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
int err;
INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate);
- mt76x02_init_device(dev);
+ err = mt76x02_init_device(dev);
+ if (err)
+ return err;
err = mt76x2u_init_eeprom(dev);
if (err < 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index b66836928d9d..ac07ed1f63a3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -78,12 +78,16 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- dev->txpower_conf = hw->conf.power_level * 2;
+ struct mt76_phy *mphy = &dev->mphy;
+ dev->txpower_conf = hw->conf.power_level * 2;
+ dev->txpower_conf = mt76_get_sar_power(mphy,
+ mphy->chandef.chan,
+ dev->txpower_conf);
/* convert to per-chain power for 2x2 devices */
dev->txpower_conf -= 6;
- if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
+ if (test_bit(MT76_STATE_RUNNING, &mphy->state))
mt76x2_phy_set_txpower(dev);
}
@@ -121,4 +125,5 @@ const struct ieee80211_ops mt76x2u_ops = {
.set_tim = mt76_set_tim,
.release_buffered_frames = mt76_release_buffered_frames,
.get_antenna = mt76_get_antenna,
+ .set_sar_specs = mt76x2_set_sar_specs,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index a15aa256d0cf..e96d1c31dd36 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -82,6 +82,225 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_trigger, NULL,
mt7915_radar_trigger, "%lld\n");
static int
+mt7915_muru_debug_set(void *data, u64 val)
+{
+ struct mt7915_dev *dev = data;
+
+ dev->muru_debug = val;
+ mt7915_mcu_muru_debug_set(dev, data);
+
+ return 0;
+}
+
+static int
+mt7915_muru_debug_get(void *data, u64 *val)
+{
+ struct mt7915_dev *dev = data;
+
+ *val = dev->muru_debug;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_muru_debug, mt7915_muru_debug_get,
+ mt7915_muru_debug_set, "%lld\n");
+
+static int mt7915_muru_stats_show(struct seq_file *file, void *data)
+{
+ struct mt7915_phy *phy = file->private;
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_mcu_muru_stats mu_stats = {};
+ static const char * const dl_non_he_type[] = {
+ "CCK", "OFDM", "HT MIX", "HT GF",
+ "VHT SU", "VHT 2MU", "VHT 3MU", "VHT 4MU"
+ };
+ static const char * const dl_he_type[] = {
+ "HE SU", "HE EXT", "HE 2MU", "HE 3MU", "HE 4MU",
+ "HE 2RU", "HE 3RU", "HE 4RU", "HE 5-8RU", "HE 9-16RU",
+ "HE >16RU"
+ };
+ static const char * const ul_he_type[] = {
+ "HE 2MU", "HE 3MU", "HE 4MU", "HE SU", "HE 2RU",
+ "HE 3RU", "HE 4RU", "HE 5-8RU", "HE 9-16RU", "HE >16RU"
+ };
+ int ret, i;
+ u64 total_ppdu_cnt, sub_total_cnt;
+
+ if (!dev->muru_debug) {
+ seq_puts(file, "Please enable muru_debug first.\n");
+ return 0;
+ }
+
+ mutex_lock(&dev->mt76.mutex);
+
+ ret = mt7915_mcu_muru_debug_get(phy, &mu_stats);
+ if (ret)
+ goto exit;
+
+ /* Non-HE Downlink*/
+ seq_puts(file, "[Non-HE]\nDownlink\nData Type: ");
+
+ for (i = 0; i < 5; i++)
+ seq_printf(file, "%8s | ", dl_non_he_type[i]);
+
+#define __dl_u32(s) le32_to_cpu(mu_stats.dl.s)
+ seq_puts(file, "\nTotal Count:");
+ seq_printf(file, "%8u | %8u | %8u | %8u | %8u | ",
+ __dl_u32(cck_cnt),
+ __dl_u32(ofdm_cnt),
+ __dl_u32(htmix_cnt),
+ __dl_u32(htgf_cnt),
+ __dl_u32(vht_su_cnt));
+
+ seq_puts(file, "\nDownlink MU-MIMO\nData Type: ");
+
+ for (i = 5; i < 8; i++)
+ seq_printf(file, "%8s | ", dl_non_he_type[i]);
+
+ seq_puts(file, "\nTotal Count:");
+ seq_printf(file, "%8u | %8u | %8u | ",
+ __dl_u32(vht_2mu_cnt),
+ __dl_u32(vht_3mu_cnt),
+ __dl_u32(vht_4mu_cnt));
+
+ sub_total_cnt = __dl_u32(vht_2mu_cnt) +
+ __dl_u32(vht_3mu_cnt) +
+ __dl_u32(vht_4mu_cnt);
+
+ seq_printf(file, "\nTotal non-HE MU-MIMO DL PPDU count: %lld",
+ sub_total_cnt);
+
+ total_ppdu_cnt = sub_total_cnt +
+ __dl_u32(cck_cnt) +
+ __dl_u32(ofdm_cnt) +
+ __dl_u32(htmix_cnt) +
+ __dl_u32(htgf_cnt) +
+ __dl_u32(vht_su_cnt);
+
+ seq_printf(file, "\nAll non-HE DL PPDU count: %lld", total_ppdu_cnt);
+
+ /* HE Downlink */
+ seq_puts(file, "\n\n[HE]\nDownlink\nData Type: ");
+
+ for (i = 0; i < 2; i++)
+ seq_printf(file, "%8s | ", dl_he_type[i]);
+
+ seq_puts(file, "\nTotal Count:");
+ seq_printf(file, "%8u | %8u | ",
+ __dl_u32(he_su_cnt),
+ __dl_u32(he_ext_su_cnt));
+
+ seq_puts(file, "\nDownlink MU-MIMO\nData Type: ");
+
+ for (i = 2; i < 5; i++)
+ seq_printf(file, "%8s | ", dl_he_type[i]);
+
+ seq_puts(file, "\nTotal Count:");
+ seq_printf(file, "%8u | %8u | %8u | ",
+ __dl_u32(he_2mu_cnt),
+ __dl_u32(he_3mu_cnt),
+ __dl_u32(he_4mu_cnt));
+
+ seq_puts(file, "\nDownlink OFDMA\nData Type: ");
+
+ for (i = 5; i < 11; i++)
+ seq_printf(file, "%8s | ", dl_he_type[i]);
+
+ seq_puts(file, "\nTotal Count:");
+ seq_printf(file, "%8u | %8u | %8u | %8u | %9u | %8u | ",
+ __dl_u32(he_2ru_cnt),
+ __dl_u32(he_3ru_cnt),
+ __dl_u32(he_4ru_cnt),
+ __dl_u32(he_5to8ru_cnt),
+ __dl_u32(he_9to16ru_cnt),
+ __dl_u32(he_gtr16ru_cnt));
+
+ sub_total_cnt = __dl_u32(he_2mu_cnt) +
+ __dl_u32(he_3mu_cnt) +
+ __dl_u32(he_4mu_cnt);
+ total_ppdu_cnt = sub_total_cnt;
+
+ seq_printf(file, "\nTotal HE MU-MIMO DL PPDU count: %lld",
+ sub_total_cnt);
+
+ sub_total_cnt = __dl_u32(he_2ru_cnt) +
+ __dl_u32(he_3ru_cnt) +
+ __dl_u32(he_4ru_cnt) +
+ __dl_u32(he_5to8ru_cnt) +
+ __dl_u32(he_9to16ru_cnt) +
+ __dl_u32(he_gtr16ru_cnt);
+ total_ppdu_cnt += sub_total_cnt;
+
+ seq_printf(file, "\nTotal HE OFDMA DL PPDU count: %lld",
+ sub_total_cnt);
+
+ total_ppdu_cnt += __dl_u32(he_su_cnt) +
+ __dl_u32(he_ext_su_cnt);
+
+ seq_printf(file, "\nAll HE DL PPDU count: %lld", total_ppdu_cnt);
+#undef __dl_u32
+
+ /* HE Uplink */
+ seq_puts(file, "\n\nUplink");
+ seq_puts(file, "\nTrigger-based Uplink MU-MIMO\nData Type: ");
+
+ for (i = 0; i < 3; i++)
+ seq_printf(file, "%8s | ", ul_he_type[i]);
+
+#define __ul_u32(s) le32_to_cpu(mu_stats.ul.s)
+ seq_puts(file, "\nTotal Count:");
+ seq_printf(file, "%8u | %8u | %8u | ",
+ __ul_u32(hetrig_2mu_cnt),
+ __ul_u32(hetrig_3mu_cnt),
+ __ul_u32(hetrig_4mu_cnt));
+
+ seq_puts(file, "\nTrigger-based Uplink OFDMA\nData Type: ");
+
+ for (i = 3; i < 10; i++)
+ seq_printf(file, "%8s | ", ul_he_type[i]);
+
+ seq_puts(file, "\nTotal Count:");
+ seq_printf(file, "%8u | %8u | %8u | %8u | %8u | %9u | %7u | ",
+ __ul_u32(hetrig_su_cnt),
+ __ul_u32(hetrig_2ru_cnt),
+ __ul_u32(hetrig_3ru_cnt),
+ __ul_u32(hetrig_4ru_cnt),
+ __ul_u32(hetrig_5to8ru_cnt),
+ __ul_u32(hetrig_9to16ru_cnt),
+ __ul_u32(hetrig_gtr16ru_cnt));
+
+ sub_total_cnt = __ul_u32(hetrig_2mu_cnt) +
+ __ul_u32(hetrig_3mu_cnt) +
+ __ul_u32(hetrig_4mu_cnt);
+ total_ppdu_cnt = sub_total_cnt;
+
+ seq_printf(file, "\nTotal HE MU-MIMO UL TB PPDU count: %lld",
+ sub_total_cnt);
+
+ sub_total_cnt = __ul_u32(hetrig_2ru_cnt) +
+ __ul_u32(hetrig_3ru_cnt) +
+ __ul_u32(hetrig_4ru_cnt) +
+ __ul_u32(hetrig_5to8ru_cnt) +
+ __ul_u32(hetrig_9to16ru_cnt) +
+ __ul_u32(hetrig_gtr16ru_cnt);
+ total_ppdu_cnt += sub_total_cnt;
+
+ seq_printf(file, "\nTotal HE OFDMA UL TB PPDU count: %lld",
+ sub_total_cnt);
+
+ total_ppdu_cnt += __ul_u32(hetrig_su_cnt);
+
+ seq_printf(file, "\nAll HE UL TB PPDU count: %lld\n", total_ppdu_cnt);
+#undef __ul_u32
+
+exit:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+DEFINE_SHOW_ATTRIBUTE(mt7915_muru_stats);
+
+static int
mt7915_fw_debug_wm_set(void *data, u64 val)
{
struct mt7915_dev *dev = data;
@@ -355,8 +574,8 @@ mt7915_sta_hw_queue_read(void *data, struct ieee80211_sta *sta)
qlen = mt76_get_field(dev, MT_PLE_BASE + MT_FL_Q3_CTRL,
GENMASK(11, 0));
seq_printf(s, "\tSTA %pM wcid %d: AC%d%d queued:%d\n",
- sta->addr, msta->wcid.idx, msta->vif->wmm_idx,
- ac, qlen);
+ sta->addr, msta->wcid.idx,
+ msta->vif->mt76.wmm_idx, ac, qlen);
}
}
@@ -528,7 +747,9 @@ int mt7915_init_debugfs(struct mt7915_phy *phy)
dir = mt76_register_debugfs_fops(phy->mt76, NULL);
if (!dir)
return -ENOMEM;
-
+ debugfs_create_file("muru_debug", 0600, dir, dev, &fops_muru_debug);
+ debugfs_create_file("muru_stats", 0400, dir, phy,
+ &mt7915_muru_stats_fops);
debugfs_create_file("hw-queues", 0400, dir, phy,
&mt7915_hw_queues_fops);
debugfs_create_file("xmit-queues", 0400, dir, phy,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
index ee3d64434821..edd74d0de157 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc. */
+#include <linux/firmware.h>
#include "mt7915.h"
#include "eeprom.h"
@@ -10,6 +11,9 @@ static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
u8 *eeprom = mdev->eeprom.data;
u32 val = eeprom[MT_EE_DO_PRE_CAL];
+ if (!dev->flash_mode)
+ return 0;
+
if (val != (MT_EE_WIFI_CAL_DPD | MT_EE_WIFI_CAL_GROUP))
return 0;
@@ -21,6 +25,49 @@ static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
return mt76_get_of_eeprom(mdev, dev->cal, MT_EE_PRECAL, val);
}
+static int mt7915_check_eeprom(struct mt7915_dev *dev)
+{
+ u8 *eeprom = dev->mt76.eeprom.data;
+ u16 val = get_unaligned_le16(eeprom);
+
+ switch (val) {
+ case 0x7915:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+mt7915_eeprom_load_default(struct mt7915_dev *dev)
+{
+ char *default_bin = MT7915_EEPROM_DEFAULT;
+ u8 *eeprom = dev->mt76.eeprom.data;
+ const struct firmware *fw = NULL;
+ int ret;
+
+ if (dev->dbdc_support)
+ default_bin = MT7915_EEPROM_DEFAULT_DBDC;
+
+ ret = request_firmware(&fw, default_bin, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data) {
+ dev_err(dev->mt76.dev, "Invalid default bin\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(eeprom, fw->data, MT7915_EEPROM_SIZE);
+ dev->flash_mode = true;
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
static int mt7915_eeprom_load(struct mt7915_dev *dev)
{
int ret;
@@ -31,10 +78,16 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
if (ret) {
dev->flash_mode = true;
- ret = mt7915_eeprom_load_precal(dev);
} else {
+ u8 free_block_num;
u32 block_num, i;
+ mt7915_mcu_get_eeprom_free_block(dev, &free_block_num);
+ /* efuse info not enough */
+ if (free_block_num >= 29)
+ return -EINVAL;
+
+ /* read eeprom data from efuse */
block_num = DIV_ROUND_UP(MT7915_EEPROM_SIZE,
MT7915_EEPROM_BLOCK_SIZE);
for (i = 0; i < block_num; i++)
@@ -42,20 +95,7 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
i * MT7915_EEPROM_BLOCK_SIZE);
}
- return ret;
-}
-
-static int mt7915_check_eeprom(struct mt7915_dev *dev)
-{
- u8 *eeprom = dev->mt76.eeprom.data;
- u16 val = get_unaligned_le16(eeprom);
-
- switch (val) {
- case 0x7915:
- return 0;
- default:
- return -EINVAL;
- }
+ return mt7915_check_eeprom(dev);
}
void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
@@ -117,10 +157,17 @@ int mt7915_eeprom_init(struct mt7915_dev *dev)
int ret;
ret = mt7915_eeprom_load(dev);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ if (ret != -EINVAL)
+ return ret;
+
+ dev_warn(dev->mt76.dev, "eeprom load fail, use default bin\n");
+ ret = mt7915_eeprom_load_default(dev);
+ if (ret)
+ return ret;
+ }
- ret = mt7915_check_eeprom(dev);
+ ret = mt7915_eeprom_load_precal(dev);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index 4fa8e7ba93e6..d054cdecd5f7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -853,7 +853,8 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
mt7915_gen_ppe_thresh(he_cap->ppe_thres, nss);
} else {
he_cap_elem->phy_cap_info[9] |=
- IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US;
+ u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
}
idx++;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 809dc18e5083..48f115502282 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -268,10 +268,9 @@ mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
}
static void
-mt7915_mac_decode_he_mu_radiotap(struct sk_buff *skb,
- struct mt76_rx_status *status,
- __le32 *rxv)
+mt7915_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv)
{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
static const struct ieee80211_radiotap_he_mu mu_known = {
.flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) |
HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) |
@@ -281,6 +280,8 @@ mt7915_mac_decode_he_mu_radiotap(struct sk_buff *skb,
};
struct ieee80211_radiotap_he_mu *he_mu = NULL;
+ status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+
he_mu = skb_push(skb, sizeof(mu_known));
memcpy(he_mu, &mu_known, sizeof(mu_known));
@@ -308,10 +309,9 @@ mt7915_mac_decode_he_mu_radiotap(struct sk_buff *skb,
}
static void
-mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
- struct mt76_rx_status *status,
- __le32 *rxv, u32 phy)
+mt7915_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode)
{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
static const struct ieee80211_radiotap_he known = {
.data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
HE_BITS(DATA1_DATA_DCM_KNOWN) |
@@ -329,6 +329,8 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
struct ieee80211_radiotap_he *he = NULL;
u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
+ status->flag |= RX_FLAG_RADIOTAP_HE;
+
he = skb_push(skb, sizeof(known));
memcpy(he, &known, sizeof(known));
@@ -343,7 +345,7 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
- switch (phy) {
+ switch (mode) {
case MT_PHY_TYPE_HE_SU:
he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
HE_BITS(DATA1_UL_DL_KNOWN) |
@@ -366,6 +368,7 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[7]);
mt7915_mac_decode_he_radiotap_ru(status, he, rxv);
+ mt7915_mac_decode_he_mu_radiotap(skb, rxv);
break;
case MT_PHY_TYPE_HE_TB:
he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
@@ -384,6 +387,81 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
}
}
+/* The HW does not translate the mac header to 802.3 for mesh point */
+static int mt7915_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt7915_sta *msta = (struct mt7915_sta *)status->wcid;
+ struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif;
+ struct ieee80211_hdr hdr;
+ struct ethhdr eth_hdr;
+ __le32 *rxd = (__le32 *)skb->data;
+ __le32 qos_ctrl, ht_ctrl;
+
+ if (FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[3])) !=
+ MT_RXD3_NORMAL_U2M)
+ return -EINVAL;
+
+ if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
+ return -EINVAL;
+
+ if (!msta || !msta->vif)
+ return -EINVAL;
+
+ sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
+ vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
+
+ /* store the info from RXD and ethhdr to avoid being overridden */
+ memcpy(&eth_hdr, skb->data + hdr_gap, sizeof(eth_hdr));
+ hdr.frame_control = FIELD_GET(MT_RXD6_FRAME_CONTROL, rxd[6]);
+ hdr.seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, rxd[8]);
+ qos_ctrl = FIELD_GET(MT_RXD8_QOS_CTL, rxd[8]);
+ ht_ctrl = FIELD_GET(MT_RXD9_HT_CONTROL, rxd[9]);
+
+ hdr.duration_id = 0;
+ ether_addr_copy(hdr.addr1, vif->addr);
+ ether_addr_copy(hdr.addr2, sta->addr);
+ switch (le16_to_cpu(hdr.frame_control) &
+ (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
+ case 0:
+ ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
+ break;
+ case IEEE80211_FCTL_FROMDS:
+ ether_addr_copy(hdr.addr3, eth_hdr.h_source);
+ break;
+ case IEEE80211_FCTL_TODS:
+ ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
+ break;
+ case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
+ ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
+ ether_addr_copy(hdr.addr4, eth_hdr.h_source);
+ break;
+ default:
+ break;
+ }
+
+ skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
+ if (eth_hdr.h_proto == htons(ETH_P_AARP) ||
+ eth_hdr.h_proto == htons(ETH_P_IPX))
+ ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
+ else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN))
+ ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
+ else
+ skb_pull(skb, 2);
+
+ if (ieee80211_has_order(hdr.frame_control))
+ memcpy(skb_push(skb, 2), &ht_ctrl, 2);
+ if (ieee80211_is_data_qos(hdr.frame_control))
+ memcpy(skb_push(skb, 2), &qos_ctrl, 2);
+ if (ieee80211_has_a4(hdr.frame_control))
+ memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
+ else
+ memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
+
+ return 0;
+}
+
static int
mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
{
@@ -391,7 +469,6 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt7915_phy *phy = &dev->phy;
struct ieee80211_supported_band *sband;
- struct ieee80211_hdr *hdr;
__le32 *rxd = (__le32 *)skb->data;
__le32 *rxv = NULL;
u32 mode = 0;
@@ -404,6 +481,7 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
bool unicast, insert_ccmp_hdr = false;
u8 remove_pad, amsdu_info;
bool hdr_trans;
+ u16 hdr_gap;
u16 seq_ctrl = 0;
u8 qos_ctl = 0;
__le16 fc = 0;
@@ -426,9 +504,16 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
return -EINVAL;
+ hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
+ if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
+ return -EINVAL;
+
+ /* ICV error or CCMP/BIP/WPI MIC error */
+ if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
+ status->flag |= RX_FLAG_ONLY_MONITOR;
+
unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
- hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
status->wcid = mt7915_rx_get_wcid(dev, idx, unicast);
if (status->wcid) {
@@ -604,15 +689,12 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
return -EINVAL;
break;
case MT_PHY_TYPE_HE_MU:
- status->flag |= RX_FLAG_RADIOTAP_HE_MU;
- fallthrough;
case MT_PHY_TYPE_HE_SU:
case MT_PHY_TYPE_HE_EXT_SU:
case MT_PHY_TYPE_HE_TB:
status->nss =
FIELD_GET(MT_PRXV_NSTS, v0) + 1;
status->encoding = RX_ENC_HE;
- status->flag |= RX_FLAG_RADIOTAP_HE;
i &= GENMASK(3, 0);
if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
@@ -654,27 +736,55 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
}
}
- skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
-
amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
status->amsdu = !!amsdu_info;
if (status->amsdu) {
status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
- if (!hdr_trans) {
- memmove(skb->data + 2, skb->data,
- ieee80211_get_hdrlen_from_skb(skb));
- skb_pull(skb, 2);
- }
}
- if (insert_ccmp_hdr && !hdr_trans) {
- u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
+ hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
+ if (hdr_trans && ieee80211_has_morefrags(fc)) {
+ if (mt7915_reverse_frag0_hdr_trans(skb, hdr_gap))
+ return -EINVAL;
+ hdr_trans = false;
+ } else {
+ int pad_start = 0;
+
+ skb_pull(skb, hdr_gap);
+ if (!hdr_trans && status->amsdu) {
+ pad_start = ieee80211_get_hdrlen_from_skb(skb);
+ } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
+ /*
+ * When header translation failure is indicated,
+ * the hardware will insert an extra 2-byte field
+ * containing the data length after the protocol
+ * type field.
+ */
+ pad_start = 12;
+ if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
+ pad_start += 4;
+
+ if (get_unaligned_be16(skb->data + pad_start) !=
+ skb->len - pad_start - 2)
+ pad_start = 0;
+ }
- mt76_insert_ccmp_hdr(skb, key_id);
+ if (pad_start) {
+ memmove(skb->data + 2, skb->data, pad_start);
+ skb_pull(skb, 2);
+ }
}
if (!hdr_trans) {
+ struct ieee80211_hdr *hdr;
+
+ if (insert_ccmp_hdr) {
+ u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
+
+ mt76_insert_ccmp_hdr(skb, key_id);
+ }
+
hdr = mt76_skb_get_hdr(skb);
fc = hdr->frame_control;
if (ieee80211_is_data_qos(fc)) {
@@ -682,16 +792,11 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
qos_ctl = *ieee80211_get_qos_ctl(hdr);
}
} else {
- status->flag &= ~(RX_FLAG_RADIOTAP_HE |
- RX_FLAG_RADIOTAP_HE_MU);
status->flag |= RX_FLAG_8023;
}
- if (rxv && status->flag & RX_FLAG_RADIOTAP_HE) {
- mt7915_mac_decode_he_radiotap(skb, status, rxv, mode);
- if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
- mt7915_mac_decode_he_mu_radiotap(skb, status, rxv);
- }
+ if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
+ mt7915_mac_decode_he_radiotap(skb, rxv, mode);
if (!status->wcid || !ieee80211_is_data_qos(fc))
return 0;
@@ -1044,8 +1149,8 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
if (vif) {
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- omac_idx = mvif->omac_idx;
- wmm_idx = mvif->wmm_idx;
+ omac_idx = mvif->mt76.omac_idx;
+ wmm_idx = mvif->mt76.wmm_idx;
}
if (ext_phy && dev->mt76.phy2)
@@ -1181,7 +1286,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (vif) {
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- txp->bss_idx = mvif->idx;
+ txp->bss_idx = mvif->mt76.idx;
}
txp->token = cpu_to_le16(id);
@@ -1268,15 +1373,16 @@ out:
}
static void
-mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
+mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
{
- struct mt7915_tx_free *free = (struct mt7915_tx_free *)skb->data;
+ struct mt7915_tx_free *free = (struct mt7915_tx_free *)data;
struct mt76_dev *mdev = &dev->mt76;
struct mt76_phy *mphy_ext = mdev->phy2;
struct mt76_txwi_cache *txwi;
struct ieee80211_sta *sta = NULL;
LIST_HEAD(free_list);
- struct sk_buff *tmp;
+ struct sk_buff *skb, *tmp;
+ void *end = data + len;
u8 i, count;
bool wake = false;
@@ -1294,6 +1400,9 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
* Should avoid accessing WTBL to get Tx airtime, and use it instead.
*/
count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
+ if (WARN_ON_ONCE((void *)&free->info[count] > end))
+ return;
+
for (i = 0; i < count; i++) {
u32 msdu, info = le32_to_cpu(free->info[i]);
@@ -1336,8 +1445,6 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
mt76_worker_schedule(&dev->mt76.tx_worker);
- napi_consume_skb(skb, 1);
-
list_for_each_entry_safe(skb, tmp, &free_list, list) {
skb_list_del_init(skb);
napi_consume_skb(skb, 1);
@@ -1512,6 +1619,27 @@ out:
rcu_read_unlock();
}
+bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ __le32 *rxd = (__le32 *)data;
+ __le32 *end = (__le32 *)&rxd[len / 4];
+ enum rx_pkt_type type;
+
+ type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+ switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ mt7915_mac_tx_free(dev, data, len);
+ return false;
+ case PKT_TYPE_TXS:
+ for (rxd += 2; rxd + 8 <= end; rxd += 8)
+ mt7915_mac_add_txs(dev, rxd);
+ return false;
+ default:
+ return true;
+ }
+}
+
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
{
@@ -1524,7 +1652,8 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
switch (type) {
case PKT_TYPE_TXRX_NOTIFY:
- mt7915_mac_tx_free(dev, skb);
+ mt7915_mac_tx_free(dev, skb->data, skb->len);
+ napi_consume_skb(skb, 1);
break;
case PKT_TYPE_RX_EVENT:
mt7915_mcu_rx_event(dev, skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index 057ab27b7083..8ac6f59af174 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -203,8 +203,8 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
is_zero_ether_addr(vif->addr))
phy->monitor_vif = vif;
- mvif->idx = ffs(~dev->mt76.vif_mask) - 1;
- if (mvif->idx >= MT7915_MAX_INTERFACES) {
+ mvif->mt76.idx = ffs(~dev->mt76.vif_mask) - 1;
+ if (mvif->mt76.idx >= MT7915_MAX_INTERFACES) {
ret = -ENOSPC;
goto out;
}
@@ -214,29 +214,27 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
ret = -ENOSPC;
goto out;
}
- mvif->omac_idx = idx;
+ mvif->mt76.omac_idx = idx;
mvif->phy = phy;
- mvif->band_idx = ext_phy;
+ mvif->mt76.band_idx = ext_phy;
- if (dev->mt76.phy2)
- mvif->wmm_idx = ext_phy * (MT7915_MAX_WMM_SETS / 2) +
- mvif->idx % (MT7915_MAX_WMM_SETS / 2);
- else
- mvif->wmm_idx = mvif->idx % MT7915_MAX_WMM_SETS;
+ mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
+ if (ext_phy)
+ mvif->mt76.wmm_idx += 2;
ret = mt7915_mcu_add_dev_info(phy, vif, true);
if (ret)
goto out;
- dev->mt76.vif_mask |= BIT(mvif->idx);
- phy->omac_mask |= BIT_ULL(mvif->omac_idx);
+ dev->mt76.vif_mask |= BIT(mvif->mt76.idx);
+ phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
- idx = MT7915_WTBL_RESERVED - mvif->idx;
+ idx = MT7915_WTBL_RESERVED - mvif->mt76.idx;
INIT_LIST_HEAD(&mvif->sta.rc_list);
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
- mvif->sta.wcid.ext_phy = mvif->band_idx;
+ mvif->sta.wcid.ext_phy = mvif->mt76.band_idx;
mvif->sta.wcid.hw_key_idx = -1;
mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
mt76_packet_id_init(&mvif->sta.wcid);
@@ -251,7 +249,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
}
if (vif->type != NL80211_IFTYPE_AP &&
- (!mvif->omac_idx || mvif->omac_idx > 3))
+ (!mvif->mt76.omac_idx || mvif->mt76.omac_idx > 3))
vif->offload_flags = 0;
vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR;
@@ -288,8 +286,8 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
mutex_lock(&dev->mt76.mutex);
- dev->mt76.vif_mask &= ~BIT(mvif->idx);
- phy->omac_mask &= ~BIT_ULL(mvif->omac_idx);
+ dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx);
+ phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
mutex_unlock(&dev->mt76.mutex);
spin_lock_bh(&dev->sta_poll_lock);
@@ -425,6 +423,28 @@ out:
return err;
}
+static int mt7915_set_sar_specs(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ int err = -EINVAL;
+
+ mutex_lock(&dev->mt76.mutex);
+ if (!cfg80211_chandef_valid(&phy->mt76->chandef))
+ goto out;
+
+ err = mt76_init_sar_power(hw, sar);
+ if (err)
+ goto out;
+
+ err = mt7915_mcu_set_txpower_sku(phy);
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return err;
+}
+
static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
@@ -556,7 +576,7 @@ mt7915_update_bss_color(struct ieee80211_hw *hw,
case NL80211_IFTYPE_AP: {
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- if (mvif->omac_idx > HW_BSSID_MAX)
+ if (mvif->mt76.omac_idx > HW_BSSID_MAX)
return;
fallthrough;
}
@@ -655,7 +675,7 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->vif = mvif;
msta->wcid.sta = 1;
msta->wcid.idx = idx;
- msta->wcid.ext_phy = mvif->band_idx;
+ msta->wcid.ext_phy = mvif->mt76.band_idx;
msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
msta->jiffies = jiffies;
@@ -838,7 +858,8 @@ u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif)
lockdep_assert_held(&dev->mt76.mutex);
- n = mvif->omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : mvif->omac_idx;
+ n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
+ : mvif->mt76.omac_idx;
/* TSF software read */
mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_READ);
@@ -878,7 +899,8 @@ mt7915_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&dev->mt76.mutex);
- n = mvif->omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : mvif->omac_idx;
+ n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
+ : mvif->mt76.omac_idx;
mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
/* TSF software overwrite */
@@ -904,7 +926,8 @@ mt7915_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&dev->mt76.mutex);
- n = mvif->omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : mvif->omac_idx;
+ n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
+ : mvif->mt76.omac_idx;
mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
/* TSF software adjust*/
@@ -1195,7 +1218,7 @@ static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
struct mt76_ethtool_worker_info *wi = wi_data;
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- if (msta->vif->idx != wi->idx)
+ if (msta->vif->mt76.idx != wi->idx)
return;
mt76_ethtool_worker(wi, &msta->stats);
@@ -1211,7 +1234,7 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt76_ethtool_worker_info wi = {
.data = data,
- .idx = mvif->idx,
+ .idx = mvif->mt76.idx,
};
struct mib_stats *mib = &phy->mib;
/* See mt7915_ampdu_stat_read_phy, etc */
@@ -1331,6 +1354,7 @@ const struct ieee80211_ops mt7915_ops = {
.sw_scan_complete = mt76_sw_scan_complete,
.release_buffered_frames = mt76_release_buffered_frames,
.get_txpower = mt76_get_txpower,
+ .set_sar_specs = mt7915_set_sar_specs,
.channel_switch_beacon = mt7915_channel_switch_beacon,
.get_stats = mt7915_get_stats,
.get_et_sset_count = mt7915_get_et_sset_count,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 852d5d97c70b..0911b6f973b5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -582,10 +582,10 @@ mt7915_mcu_alloc_sta_req(struct mt7915_dev *dev, struct mt7915_vif *mvif,
struct mt7915_sta *msta, int len)
{
struct sta_req_hdr hdr = {
- .bss_idx = mvif->idx,
+ .bss_idx = mvif->mt76.idx,
.wlan_idx_lo = msta ? to_wcid_lo(msta->wcid.idx) : 0,
.wlan_idx_hi = msta ? to_wcid_hi(msta->wcid.idx) : 0,
- .muar_idx = msta && msta->wcid.sta ? mvif->omac_idx : 0xe,
+ .muar_idx = msta && msta->wcid.sta ? mvif->mt76.omac_idx : 0xe,
.is_tlv_append = 1,
};
struct sk_buff *skb;
@@ -613,7 +613,7 @@ mt7915_mcu_alloc_wtbl_req(struct mt7915_dev *dev, struct mt7915_sta *msta,
if (!nskb) {
nskb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
- MT7915_WTBL_UPDATE_MAX_SIZE);
+ MT76_CONNAC_WTBL_UPDATE_MAX_SIZE);
if (!nskb)
return ERR_PTR(-ENOMEM);
@@ -725,7 +725,7 @@ mt7915_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
bss->network_type = cpu_to_le32(type);
bss->bmc_wcid_lo = to_wcid_lo(wlan_idx);
bss->bmc_wcid_hi = to_wcid_hi(wlan_idx);
- bss->wmm_idx = mvif->wmm_idx;
+ bss->wmm_idx = mvif->mt76.wmm_idx;
bss->active = enable;
if (vif->type != NL80211_IFTYPE_MONITOR) {
@@ -769,10 +769,11 @@ mt7915_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
}
omac = (struct bss_info_omac *)tlv;
- idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
+ idx = mvif->mt76.omac_idx > EXT_BSSID_START ? HW_BSSID_0
+ : mvif->mt76.omac_idx;
omac->conn_type = cpu_to_le32(type);
- omac->omac_idx = mvif->omac_idx;
- omac->band_idx = mvif->band_idx;
+ omac->omac_idx = mvif->mt76.omac_idx;
+ omac->band_idx = mvif->mt76.band_idx;
omac->hw_bss_idx = idx;
}
@@ -937,7 +938,7 @@ mt7915_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt7915_vif *mvif)
int ext_bss_idx, tsf_offset;
struct tlv *tlv;
- ext_bss_idx = mvif->omac_idx - EXT_BSSID_START;
+ ext_bss_idx = mvif->mt76.omac_idx - EXT_BSSID_START;
if (ext_bss_idx < 0)
return;
@@ -973,7 +974,7 @@ mt7915_mcu_muar_config(struct mt7915_phy *phy, struct ieee80211_vif *vif,
{
struct mt7915_dev *dev = phy->dev;
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- u32 idx = mvif->omac_idx - REPEATER_BSSID_START;
+ u32 idx = mvif->mt76.omac_idx - REPEATER_BSSID_START;
u32 mask = phy->omac_mask >> 32 & ~BIT(idx);
const u8 *addr = vif->addr;
struct {
@@ -1011,7 +1012,7 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct sk_buff *skb;
- if (mvif->omac_idx >= REPEATER_BSSID_START) {
+ if (mvif->mt76.omac_idx >= REPEATER_BSSID_START) {
mt7915_mcu_muar_config(phy, vif, false, enable);
mt7915_mcu_muar_config(phy, vif, true, enable);
}
@@ -1039,8 +1040,8 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
if (vif->bss_conf.he_support)
mt7915_mcu_bss_he_tlv(skb, vif, phy);
- if (mvif->omac_idx >= EXT_BSSID_START &&
- mvif->omac_idx < REPEATER_BSSID_START)
+ if (mvif->mt76.omac_idx >= EXT_BSSID_START &&
+ mvif->mt76.omac_idx < REPEATER_BSSID_START)
mt7915_mcu_bss_ext_tlv(skb, mvif);
}
out:
@@ -1204,7 +1205,7 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev,
msta->wcid.amsdu = false;
skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT7915_STA_UPDATE_MAX_SIZE);
+ MT76_CONNAC_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1223,7 +1224,7 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev,
return ret;
skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT7915_STA_UPDATE_MAX_SIZE);
+ MT76_CONNAC_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1265,7 +1266,7 @@ mt7915_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
if (sta) {
memcpy(generic->peer_addr, sta->addr, ETH_ALEN);
generic->partial_aid = cpu_to_le16(sta->aid);
- generic->muar_idx = mvif->omac_idx;
+ generic->muar_idx = mvif->mt76.omac_idx;
generic->qos = sta->wme;
} else {
/* use BSSID in station mode */
@@ -1738,7 +1739,8 @@ int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev,
struct wtbl_req_hdr *wtbl_hdr;
struct sk_buff *skb;
- skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, MT7915_WTBL_UPDATE_MAX_SIZE);
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
+ MT76_CONNAC_WTBL_UPDATE_MAX_SIZE);
if (!skb)
return -ENOMEM;
@@ -1752,33 +1754,6 @@ int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev,
true);
}
-int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- struct wtbl_req_hdr *wtbl_hdr;
- struct tlv *sta_wtbl;
- struct sk_buff *skb;
-
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT7915_STA_UPDATE_MAX_SIZE);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
-
- wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
- &skb);
- if (IS_ERR(wtbl_hdr))
- return PTR_ERR(wtbl_hdr);
-
- mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr);
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(STA_REC_UPDATE), true);
-}
-
static inline bool
mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool bfee)
@@ -1954,7 +1929,7 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt7915_phy *phy =
- mvif->band_idx ? mt7915_ext_phy(dev) : &dev->phy;
+ mvif->mt76.band_idx ? mt7915_ext_phy(dev) : &dev->phy;
int tx_ant = hweight8(phy->mt76->chainmask) - 1;
struct sta_rec_bf *bf;
struct tlv *tlv;
@@ -2021,7 +1996,7 @@ mt7915_mcu_sta_bfee_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt7915_phy *phy =
- mvif->band_idx ? mt7915_ext_phy(dev) : &dev->phy;
+ mvif->mt76.band_idx ? mt7915_ext_phy(dev) : &dev->phy;
int tx_ant = hweight8(phy->mt76->chainmask) - 1;
struct sta_rec_bfee *bfee;
struct tlv *tlv;
@@ -2049,6 +2024,21 @@ mt7915_mcu_sta_bfee_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
bfee->fb_identity_matrix = (nrow == 1 && tx_ant == 2);
}
+static enum mcu_mmps_mode
+mt7915_mcu_get_mmps_mode(enum ieee80211_smps_mode smps)
+{
+ switch (smps) {
+ case IEEE80211_SMPS_OFF:
+ return MCU_MMPS_DISABLE;
+ case IEEE80211_SMPS_STATIC:
+ return MCU_MMPS_STATIC;
+ case IEEE80211_SMPS_DYNAMIC:
+ return MCU_MMPS_DYNAMIC;
+ default:
+ return MCU_MMPS_DISABLE;
+ }
+}
+
int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -2076,7 +2066,11 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
case RATE_PARAM_FIXED_MCS:
case RATE_PARAM_FIXED_GI:
case RATE_PARAM_FIXED_HE_LTF:
- ra->phy = *phy;
+ if (phy)
+ ra->phy = *phy;
+ break;
+ case RATE_PARAM_MMPS_UPDATE:
+ ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->smps_mode);
break;
default:
break;
@@ -2087,6 +2081,39 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
MCU_EXT_CMD(STA_REC_UPDATE), true);
}
+int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
+ MT76_CONNAC_STA_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+
+ wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
+ &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr);
+
+ ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
+ if (ret)
+ return ret;
+
+ return mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, NULL,
+ RATE_PARAM_MMPS_UPDATE);
+}
+
static int
mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
struct ieee80211_vif *vif,
@@ -2278,7 +2305,7 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
int ret;
skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT7915_STA_UPDATE_MAX_SIZE);
+ MT76_CONNAC_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -2323,7 +2350,7 @@ mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif,
u8 rsv1[8];
} __packed req = {
.action = cpu_to_le32(MT_STA_BSS_GROUP),
- .val = cpu_to_le32(mvif->idx % 16),
+ .val = cpu_to_le32(mvif->mt76.idx % 16),
};
msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta;
@@ -2345,7 +2372,7 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta;
skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT7915_STA_UPDATE_MAX_SIZE);
+ MT76_CONNAC_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -2389,53 +2416,6 @@ out:
MCU_EXT_CMD(STA_REC_UPDATE), true);
}
-int mt7915_mcu_set_fixed_rate(struct mt7915_dev *dev,
- struct ieee80211_sta *sta, u32 rate)
-{
- struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- struct mt7915_vif *mvif = msta->vif;
- struct sta_rec_ra_fixed *ra;
- struct sk_buff *skb;
- struct tlv *tlv;
- int len = sizeof(struct sta_req_hdr) + sizeof(*ra);
-
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA_UPDATE, sizeof(*ra));
- ra = (struct sta_rec_ra_fixed *)tlv;
-
- if (!rate) {
- ra->field = cpu_to_le32(RATE_PARAM_AUTO);
- goto out;
- }
-
- ra->field = cpu_to_le32(RATE_PARAM_FIXED);
- ra->phy.type = FIELD_GET(RATE_CFG_PHY_TYPE, rate);
- ra->phy.bw = FIELD_GET(RATE_CFG_BW, rate);
- ra->phy.nss = FIELD_GET(RATE_CFG_NSS, rate);
- ra->phy.mcs = FIELD_GET(RATE_CFG_MCS, rate);
- ra->phy.stbc = FIELD_GET(RATE_CFG_STBC, rate);
-
- if (ra->phy.bw)
- ra->phy.ldpc = 7;
- else
- ra->phy.ldpc = FIELD_GET(RATE_CFG_LDPC, rate) * 7;
-
- /* HT/VHT - SGI: 1, LGI: 0; HE - SGI: 0, MGI: 1, LGI: 2 */
- if (ra->phy.type > MT_PHY_TYPE_VHT) {
- ra->phy.he_ltf = FIELD_GET(RATE_CFG_HE_LTF, rate) * 85;
- ra->phy.sgi = FIELD_GET(RATE_CFG_GI, rate) * 85;
- } else {
- ra->phy.sgi = FIELD_GET(RATE_CFG_GI, rate) * 15;
- }
-
-out:
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(STA_REC_UPDATE), true);
-}
-
int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
struct ieee80211_vif *vif, bool enable)
{
@@ -2458,8 +2438,8 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
} __packed tlv;
} data = {
.hdr = {
- .omac_idx = mvif->omac_idx,
- .dbdc_idx = mvif->band_idx,
+ .omac_idx = mvif->mt76.omac_idx,
+ .dbdc_idx = mvif->mt76.band_idx,
.tlv_num = cpu_to_le16(1),
.is_tlv_append = 1,
},
@@ -2467,11 +2447,11 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
.tag = cpu_to_le16(DEV_INFO_ACTIVE),
.len = cpu_to_le16(sizeof(struct req_tlv)),
.active = enable,
- .dbdc_idx = mvif->band_idx,
+ .dbdc_idx = mvif->mt76.band_idx,
},
};
- if (mvif->omac_idx >= REPEATER_BSSID_START)
+ if (mvif->mt76.omac_idx >= REPEATER_BSSID_START)
return mt7915_mcu_muar_config(phy, vif, false, enable);
memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN);
@@ -2643,7 +2623,7 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
return -EINVAL;
}
- if (mvif->band_idx) {
+ if (mvif->mt76.band_idx) {
info = IEEE80211_SKB_CB(skb);
info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
}
@@ -2967,7 +2947,7 @@ static int mt7915_load_firmware(struct mt7915_dev *dev)
if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE,
FIELD_PREP(MT_TOP_MISC_FW_STATE,
- FW_STATE_WACPU_RDY), 1000)) {
+ FW_STATE_RDY), 1000)) {
dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
return -EIO;
}
@@ -3014,6 +2994,47 @@ int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level)
sizeof(data), false);
}
+int mt7915_mcu_muru_debug_set(struct mt7915_dev *dev, bool enabled)
+{
+ struct {
+ __le32 cmd;
+ u8 enable;
+ } data = {
+ .cmd = cpu_to_le32(MURU_SET_TXC_TX_STATS_EN),
+ .enable = enabled,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &data,
+ sizeof(data), false);
+}
+
+int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy, void *ms)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct sk_buff *skb;
+ struct mt7915_mcu_muru_stats *mu_stats =
+ (struct mt7915_mcu_muru_stats *)ms;
+ int ret;
+
+ struct {
+ __le32 cmd;
+ u8 band_idx;
+ } req = {
+ .cmd = cpu_to_le32(MURU_GET_TXC_TX_STATS),
+ .band_idx = phy != &dev->phy,
+ };
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ memcpy(mu_stats, skb->data, sizeof(struct mt7915_mcu_muru_stats));
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
static int mt7915_mcu_set_mwds(struct mt7915_dev *dev, bool enabled)
{
struct {
@@ -3264,7 +3285,7 @@ int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif)
struct edca *e = &req.edca[ac];
e->set = WMM_PARAM_SET;
- e->queue = ac + mvif->wmm_idx * MT7915_MAX_WMM_SETS;
+ e->queue = ac + mvif->mt76.wmm_idx * MT7915_MAX_WMM_SETS;
e->aifs = q->aifs;
e->txop = cpu_to_le16(q->txop);
@@ -3579,6 +3600,30 @@ int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
return 0;
}
+int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num)
+{
+ struct {
+ u8 _rsv;
+ u8 version;
+ u8 die_idx;
+ u8 _rsv2;
+ } __packed req = {
+ .version = 1,
+ };
+ struct sk_buff *skb;
+ int ret;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(EFUSE_FREE_BLOCK), &req,
+ sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ *block_num = *(u8 *)skb->data;
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
static int mt7915_mcu_set_pre_cal(struct mt7915_dev *dev, u8 idx,
u8 *data, u32 len, int cmd)
{
@@ -3854,11 +3899,11 @@ int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy)
struct mt76_power_limits limits_array;
s8 *la = (s8 *)&limits_array;
int i, idx, n_chains = hweight8(mphy->antenna_mask);
- int tx_power;
-
- tx_power = hw->conf.power_level * 2 -
- mt76_tx_power_nss_delta(n_chains);
+ int tx_power = hw->conf.power_level * 2;
+ tx_power = mt76_get_sar_power(mphy, mphy->chandef.chan,
+ tx_power);
+ tx_power -= mt76_tx_power_nss_delta(n_chains);
tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
&limits_array, tx_power);
mphy->txpower_cur = tx_power;
@@ -4045,7 +4090,7 @@ int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif,
} __packed req = {
.action = MT_SPR_ENABLE,
.arg_num = 1,
- .band_idx = mvif->band_idx,
+ .band_idx = mvif->mt76.band_idx,
.val = cpu_to_le32(enable),
};
@@ -4066,7 +4111,7 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
__le16 wcid;
} __packed req = {
.category = MCU_PHY_STATE_CONTENTION_RX_RATE,
- .band = mvif->band_idx,
+ .band = mvif->mt76.band_idx,
.wcid = cpu_to_le16(msta->wcid.idx),
};
struct ieee80211_supported_band *sband;
@@ -4206,11 +4251,11 @@ int mt7915_mcu_twt_agrt_update(struct mt7915_dev *dev,
} __packed req = {
.tbl_idx = flow->table_id,
.cmd = cmd,
- .own_mac_idx = mvif->omac_idx,
+ .own_mac_idx = mvif->mt76.omac_idx,
.flowid = flow->id,
.peer_id = cpu_to_le16(flow->wcid),
.duration = flow->duration,
- .bss_idx = mvif->idx,
+ .bss_idx = mvif->mt76.idx,
.start_tsf = cpu_to_le64(flow->tsf),
.mantissa = flow->mantissa,
.exponent = flow->exp,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index 1f5a64ba9b59..92268e696931 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -4,6 +4,8 @@
#ifndef __MT7915_MCU_H
#define __MT7915_MCU_H
+#include "../mt76_connac_mcu.h"
+
struct mt7915_mcu_txd {
__le32 txd[8];
@@ -23,29 +25,6 @@ struct mt7915_mcu_txd {
u32 reserved[5];
} __packed __aligned(4);
-/* event table */
-enum {
- MCU_EVENT_TARGET_ADDRESS_LEN = 0x01,
- MCU_EVENT_FW_START = 0x01,
- MCU_EVENT_GENERIC = 0x01,
- MCU_EVENT_ACCESS_REG = 0x02,
- MCU_EVENT_MT_PATCH_SEM = 0x04,
- MCU_EVENT_CH_PRIVILEGE = 0x18,
- MCU_EVENT_EXT = 0xed,
- MCU_EVENT_RESTART_DL = 0xef,
-};
-
-/* ext event table */
-enum {
- MCU_EXT_EVENT_PS_SYNC = 0x5,
- MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13,
- MCU_EXT_EVENT_THERMAL_PROTECT = 0x22,
- MCU_EXT_EVENT_ASSERT_DUMP = 0x23,
- MCU_EXT_EVENT_RDD_REPORT = 0x3a,
- MCU_EXT_EVENT_CSA_NOTIFY = 0x4f,
- MCU_EXT_EVENT_BCC_NOTIFY = 0x75,
-};
-
enum {
MCU_ATE_SET_TRX = 0x1,
MCU_ATE_SET_FREQ_OFFSET = 0xa,
@@ -206,6 +185,44 @@ struct mt7915_mcu_tx {
struct edca edca[IEEE80211_NUM_ACS];
} __packed;
+struct mt7915_mcu_muru_stats {
+ __le32 event_id;
+ struct {
+ __le32 cck_cnt;
+ __le32 ofdm_cnt;
+ __le32 htmix_cnt;
+ __le32 htgf_cnt;
+ __le32 vht_su_cnt;
+ __le32 vht_2mu_cnt;
+ __le32 vht_3mu_cnt;
+ __le32 vht_4mu_cnt;
+ __le32 he_su_cnt;
+ __le32 he_ext_su_cnt;
+ __le32 he_2ru_cnt;
+ __le32 he_2mu_cnt;
+ __le32 he_3ru_cnt;
+ __le32 he_3mu_cnt;
+ __le32 he_4ru_cnt;
+ __le32 he_4mu_cnt;
+ __le32 he_5to8ru_cnt;
+ __le32 he_9to16ru_cnt;
+ __le32 he_gtr16ru_cnt;
+ } dl;
+
+ struct {
+ __le32 hetrig_su_cnt;
+ __le32 hetrig_2ru_cnt;
+ __le32 hetrig_3ru_cnt;
+ __le32 hetrig_4ru_cnt;
+ __le32 hetrig_5to8ru_cnt;
+ __le32 hetrig_9to16ru_cnt;
+ __le32 hetrig_gtr16ru_cnt;
+ __le32 hetrig_2mu_cnt;
+ __le32 hetrig_3mu_cnt;
+ __le32 hetrig_4mu_cnt;
+ } ul;
+};
+
#define WMM_AIFS_SET BIT(0)
#define WMM_CW_MIN_SET BIT(1)
#define WMM_CW_MAX_SET BIT(2)
@@ -216,83 +233,11 @@ struct mt7915_mcu_tx {
#define MCU_PKT_ID 0xa0
enum {
- MCU_Q_QUERY,
- MCU_Q_SET,
- MCU_Q_RESERVED,
- MCU_Q_NA
-};
-
-enum {
- MCU_S2D_H2N,
- MCU_S2D_C2N,
- MCU_S2D_H2C,
- MCU_S2D_H2CN
-};
-
-enum {
MCU_FW_LOG_WM,
MCU_FW_LOG_WA,
MCU_FW_LOG_TO_HOST,
};
-#define __MCU_CMD_FIELD_ID GENMASK(7, 0)
-#define __MCU_CMD_FIELD_EXT_ID GENMASK(15, 8)
-#define __MCU_CMD_FIELD_QUERY BIT(16)
-#define __MCU_CMD_FIELD_WA BIT(17)
-
-enum {
- MCU_CMD_TARGET_ADDRESS_LEN_REQ = 0x01,
- MCU_CMD_FW_START_REQ = 0x02,
- MCU_CMD_INIT_ACCESS_REG = 0x3,
- MCU_CMD_NIC_POWER_CTRL = 0x4,
- MCU_CMD_PATCH_START_REQ = 0x05,
- MCU_CMD_PATCH_FINISH_REQ = 0x07,
- MCU_CMD_PATCH_SEM_CONTROL = 0x10,
- MCU_CMD_WA_PARAM = 0xC4,
- MCU_CMD_EXT_CID = 0xED,
- MCU_CMD_FW_SCATTER = 0xEE,
- MCU_CMD_RESTART_DL_REQ = 0xEF,
-};
-
-enum {
- MCU_EXT_CMD_EFUSE_ACCESS = 0x01,
- MCU_EXT_CMD_RF_TEST = 0x04,
- MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
- MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
- MCU_EXT_CMD_FW_LOG_2_HOST = 0x13,
- MCU_EXT_CMD_TXBF_ACTION = 0x1e,
- MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
- MCU_EXT_CMD_THERMAL_PROT = 0x23,
- MCU_EXT_CMD_STA_REC_UPDATE = 0x25,
- MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26,
- MCU_EXT_CMD_EDCA_UPDATE = 0x27,
- MCU_EXT_CMD_DEV_INFO_UPDATE = 0x2A,
- MCU_EXT_CMD_THERMAL_CTRL = 0x2c,
- MCU_EXT_CMD_WTBL_UPDATE = 0x32,
- MCU_EXT_CMD_SET_DRR_CTRL = 0x36,
- MCU_EXT_CMD_SET_RDD_CTRL = 0x3a,
- MCU_EXT_CMD_ATE_CTRL = 0x3d,
- MCU_EXT_CMD_PROTECT_CTRL = 0x3e,
- MCU_EXT_CMD_MAC_INIT_CTRL = 0x46,
- MCU_EXT_CMD_RX_HDR_TRANS = 0x47,
- MCU_EXT_CMD_MUAR_UPDATE = 0x48,
- MCU_EXT_CMD_RX_AIRTIME_CTRL = 0x4a,
- MCU_EXT_CMD_SET_RX_PATH = 0x4e,
- MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58,
- MCU_EXT_CMD_GET_MIB_INFO = 0x5a,
- MCU_EXT_CMD_MWDS_SUPPORT = 0x80,
- MCU_EXT_CMD_SET_SER_TRIGGER = 0x81,
- MCU_EXT_CMD_SCS_CTRL = 0x82,
- MCU_EXT_CMD_TWT_AGRT_UPDATE = 0x94,
- MCU_EXT_CMD_FW_DBG_CTRL = 0x95,
- MCU_EXT_CMD_SET_RDD_TH = 0x9d,
- MCU_EXT_CMD_MURU_CTRL = 0x9f,
- MCU_EXT_CMD_SET_SPR = 0xa8,
- MCU_EXT_CMD_GROUP_PRE_CAL_INFO = 0xab,
- MCU_EXT_CMD_DPD_PRE_CAL_INFO = 0xac,
- MCU_EXT_CMD_PHY_STAT_INFO = 0xad,
-};
-
enum {
MCU_TWT_AGRT_ADD,
MCU_TWT_AGRT_MODIFY,
@@ -314,55 +259,11 @@ enum {
MCU_WA_PARAM_RED = 0x0e,
};
-#define MCU_CMD(_t) FIELD_PREP(__MCU_CMD_FIELD_ID, MCU_CMD_##_t)
-#define MCU_EXT_CMD(_t) (MCU_CMD(EXT_CID) | \
- FIELD_PREP(__MCU_CMD_FIELD_EXT_ID, \
- MCU_EXT_CMD_##_t))
-#define MCU_EXT_QUERY(_t) (MCU_EXT_CMD(_t) | __MCU_CMD_FIELD_QUERY)
-
-#define MCU_WA_CMD(_t) (MCU_CMD(_t) | __MCU_CMD_FIELD_WA)
-#define MCU_WA_EXT_CMD(_t) (MCU_EXT_CMD(_t) | __MCU_CMD_FIELD_WA)
-#define MCU_WA_PARAM_CMD(_t) (MCU_WA_CMD(WA_PARAM) | \
- FIELD_PREP(__MCU_CMD_FIELD_EXT_ID, \
- MCU_WA_PARAM_CMD_##_t))
-
-enum {
- PATCH_SEM_RELEASE,
- PATCH_SEM_GET
-};
-
-enum {
- PATCH_NOT_DL_SEM_FAIL,
- PATCH_IS_DL,
- PATCH_NOT_DL_SEM_SUCCESS,
- PATCH_REL_SEM_SUCCESS
-};
-
-enum {
- FW_STATE_INITIAL,
- FW_STATE_FW_DOWNLOAD,
- FW_STATE_NORMAL_OPERATION,
- FW_STATE_NORMAL_TRX,
- FW_STATE_WACPU_RDY = 7
-};
-
-enum {
- EE_MODE_EFUSE,
- EE_MODE_BUFFER,
-};
-
-enum {
- EE_FORMAT_BIN,
- EE_FORMAT_WHOLE,
- EE_FORMAT_MULTIPLE,
-};
-
-enum {
- MCU_PHY_STATE_TX_RATE,
- MCU_PHY_STATE_RX_RATE,
- MCU_PHY_STATE_RSSI,
- MCU_PHY_STATE_CONTENTION_RX_RATE,
- MCU_PHY_STATE_OFDMLQ_CNINFO,
+enum mcu_mmps_mode {
+ MCU_MMPS_STATIC,
+ MCU_MMPS_DYNAMIC,
+ MCU_MMPS_RSV,
+ MCU_MMPS_DISABLE,
};
#define STA_TYPE_STA BIT(0)
@@ -389,11 +290,6 @@ enum {
#define CONN_STATE_PORT_SECURE 2
enum {
- DEV_INFO_ACTIVE,
- DEV_INFO_MAX_NUM
-};
-
-enum {
SCS_SEND_DATA,
SCS_SET_MANUAL_PD_TH,
SCS_CONFIG,
@@ -403,75 +299,6 @@ enum {
SCS_GET_GLO_ADDR_EVENT,
};
-enum {
- CMD_CBW_20MHZ = IEEE80211_STA_RX_BW_20,
- CMD_CBW_40MHZ = IEEE80211_STA_RX_BW_40,
- CMD_CBW_80MHZ = IEEE80211_STA_RX_BW_80,
- CMD_CBW_160MHZ = IEEE80211_STA_RX_BW_160,
- CMD_CBW_10MHZ,
- CMD_CBW_5MHZ,
- CMD_CBW_8080MHZ,
-
- CMD_HE_MCS_BW80 = 0,
- CMD_HE_MCS_BW160,
- CMD_HE_MCS_BW8080,
- CMD_HE_MCS_BW_NUM
-};
-
-struct tlv {
- __le16 tag;
- __le16 len;
-} __packed;
-
-struct bss_info_omac {
- __le16 tag;
- __le16 len;
- u8 hw_bss_idx;
- u8 omac_idx;
- u8 band_idx;
- u8 rsv0;
- __le32 conn_type;
- u32 rsv1;
-} __packed;
-
-struct bss_info_basic {
- __le16 tag;
- __le16 len;
- __le32 network_type;
- u8 active;
- u8 rsv0;
- __le16 bcn_interval;
- u8 bssid[ETH_ALEN];
- u8 wmm_idx;
- u8 dtim_period;
- u8 bmc_wcid_lo;
- u8 cipher;
- u8 phy_mode;
- u8 max_bssid; /* max BSSID. range: 1 ~ 8, 0: MBSSID disabled */
- u8 non_tx_bssid;/* non-transmitted BSSID, 0: transmitted BSSID */
- u8 bmc_wcid_hi; /* high Byte and version */
- u8 rsv[2];
-} __packed;
-
-struct bss_info_rf_ch {
- __le16 tag;
- __le16 len;
- u8 pri_ch;
- u8 center_ch0;
- u8 center_ch1;
- u8 bw;
- u8 he_ru26_block; /* 1: don't send HETB in RU26, 0: allow */
- u8 he_all_disable; /* 1: disallow all HETB, 0: allow */
- u8 rsv[2];
-} __packed;
-
-struct bss_info_ext_bss {
- __le16 tag;
- __le16 len;
- __le32 mbss_tsf_offset; /* in unit of us */
- u8 rsv[8];
-} __packed;
-
struct bss_info_bmc_rate {
__le16 tag;
__le16 len;
@@ -581,385 +408,8 @@ enum {
};
enum {
- BSS_INFO_OMAC,
- BSS_INFO_BASIC,
- BSS_INFO_RF_CH, /* optional, for BT/LTE coex */
- BSS_INFO_PM, /* sta only */
- BSS_INFO_UAPSD, /* sta only */
- BSS_INFO_ROAM_DETECT, /* obsoleted */
- BSS_INFO_LQ_RM, /* obsoleted */
- BSS_INFO_EXT_BSS,
- BSS_INFO_BMC_RATE, /* for bmc rate control in CR4 */
- BSS_INFO_SYNC_MODE, /* obsoleted */
- BSS_INFO_RA,
- BSS_INFO_HW_AMSDU,
- BSS_INFO_BSS_COLOR,
- BSS_INFO_HE_BASIC,
- BSS_INFO_PROTECT_INFO,
- BSS_INFO_OFFLOAD,
- BSS_INFO_11V_MBSSID,
- BSS_INFO_MAX_NUM
-};
-
-enum {
- WTBL_RESET_AND_SET = 1,
- WTBL_SET,
- WTBL_QUERY,
- WTBL_RESET_ALL
-};
-
-struct wtbl_req_hdr {
- u8 wlan_idx_lo;
- u8 operation;
- __le16 tlv_num;
- u8 wlan_idx_hi;
- u8 rsv[3];
-} __packed;
-
-struct wtbl_generic {
- __le16 tag;
- __le16 len;
- u8 peer_addr[ETH_ALEN];
- u8 muar_idx;
- u8 skip_tx;
- u8 cf_ack;
- u8 qos;
- u8 mesh;
- u8 adm;
- __le16 partial_aid;
- u8 baf_en;
- u8 aad_om;
-} __packed;
-
-struct wtbl_rx {
- __le16 tag;
- __le16 len;
- u8 rcid;
- u8 rca1;
- u8 rca2;
- u8 rv;
- u8 rsv[4];
-} __packed;
-
-struct wtbl_ht {
- __le16 tag;
- __le16 len;
- u8 ht;
- u8 ldpc;
- u8 af;
- u8 mm;
- u8 rsv[4];
-} __packed;
-
-struct wtbl_vht {
- __le16 tag;
- __le16 len;
- u8 ldpc;
- u8 dyn_bw;
- u8 vht;
- u8 txop_ps;
- u8 rsv[4];
-} __packed;
-
-struct wtbl_hdr_trans {
- __le16 tag;
- __le16 len;
- u8 to_ds;
- u8 from_ds;
- u8 no_rx_trans;
- u8 _rsv;
-};
-
-enum {
- MT_BA_TYPE_INVALID,
- MT_BA_TYPE_ORIGINATOR,
- MT_BA_TYPE_RECIPIENT
-};
-
-enum {
- RST_BA_MAC_TID_MATCH,
- RST_BA_MAC_MATCH,
- RST_BA_NO_MATCH
-};
-
-struct wtbl_ba {
- __le16 tag;
- __le16 len;
- /* common */
- u8 tid;
- u8 ba_type;
- u8 rsv0[2];
- /* originator only */
- __le16 sn;
- u8 ba_en;
- u8 ba_winsize_idx;
- /* originator & recipient */
- __le16 ba_winsize;
- /* recipient only */
- u8 peer_addr[ETH_ALEN];
- u8 rst_ba_tid;
- u8 rst_ba_sel;
- u8 rst_ba_sb;
- u8 band_idx;
- u8 rsv1[4];
-} __packed;
-
-struct wtbl_smps {
- __le16 tag;
- __le16 len;
- u8 smps;
- u8 rsv[3];
-} __packed;
-
-enum {
- WTBL_GENERIC,
- WTBL_RX,
- WTBL_HT,
- WTBL_VHT,
- WTBL_PEER_PS, /* not used */
- WTBL_TX_PS,
- WTBL_HDR_TRANS,
- WTBL_SEC_KEY,
- WTBL_BA,
- WTBL_RDG, /* obsoleted */
- WTBL_PROTECT, /* not used */
- WTBL_CLEAR, /* not used */
- WTBL_BF,
- WTBL_SMPS,
- WTBL_RAW_DATA, /* debug only */
- WTBL_PN,
- WTBL_SPE,
- WTBL_MAX_NUM
-};
-
-struct sta_ntlv_hdr {
- u8 rsv[2];
- __le16 tlv_num;
-} __packed;
-
-struct sta_req_hdr {
- u8 bss_idx;
- u8 wlan_idx_lo;
- __le16 tlv_num;
- u8 is_tlv_append;
- u8 muar_idx;
- u8 wlan_idx_hi;
- u8 rsv;
-} __packed;
-
-struct sta_rec_basic {
- __le16 tag;
- __le16 len;
- __le32 conn_type;
- u8 conn_state;
- u8 qos;
- __le16 aid;
- u8 peer_addr[ETH_ALEN];
- __le16 extra_info;
-} __packed;
-
-struct sta_rec_ht {
- __le16 tag;
- __le16 len;
- __le16 ht_cap;
- u16 rsv;
-} __packed;
-
-struct sta_rec_vht {
- __le16 tag;
- __le16 len;
- __le32 vht_cap;
- __le16 vht_rx_mcs_map;
- __le16 vht_tx_mcs_map;
- u8 rts_bw_sig;
- u8 rsv[3];
-} __packed;
-
-struct sta_rec_uapsd {
- __le16 tag;
- __le16 len;
- u8 dac_map;
- u8 tac_map;
- u8 max_sp;
- u8 rsv0;
- __le16 listen_interval;
- u8 rsv1[2];
-} __packed;
-
-struct sta_rec_muru {
- __le16 tag;
- __le16 len;
-
- struct {
- bool ofdma_dl_en;
- bool ofdma_ul_en;
- bool mimo_dl_en;
- bool mimo_ul_en;
- u8 rsv[4];
- } cfg;
-
- struct {
- u8 punc_pream_rx;
- bool he_20m_in_40m_2g;
- bool he_20m_in_160m;
- bool he_80m_in_160m;
- bool lt16_sigb;
- bool rx_su_comp_sigb;
- bool rx_su_non_comp_sigb;
- u8 rsv;
- } ofdma_dl;
-
- struct {
- u8 t_frame_dur;
- u8 mu_cascading;
- u8 uo_ra;
- u8 he_2x996_tone;
- u8 rx_t_frame_11ac;
- u8 rsv[3];
- } ofdma_ul;
-
- struct {
- bool vht_mu_bfee;
- bool partial_bw_dl_mimo;
- u8 rsv[2];
- } mimo_dl;
-
- struct {
- bool full_ul_mimo;
- bool partial_ul_mimo;
- u8 rsv[2];
- } mimo_ul;
-} __packed;
-
-struct sta_rec_he {
- __le16 tag;
- __le16 len;
-
- __le32 he_cap;
-
- u8 t_frame_dur;
- u8 max_ampdu_exp;
- u8 bw_set;
- u8 device_class;
- u8 dcm_tx_mode;
- u8 dcm_tx_max_nss;
- u8 dcm_rx_mode;
- u8 dcm_rx_max_nss;
- u8 dcm_max_ru;
- u8 punc_pream_rx;
- u8 pkt_ext;
- u8 rsv1;
-
- __le16 max_nss_mcs[CMD_HE_MCS_BW_NUM];
-
- u8 rsv2[2];
-} __packed;
-
-struct sta_rec_ba {
- __le16 tag;
- __le16 len;
- u8 tid;
- u8 ba_type;
- u8 amsdu;
- u8 ba_en;
- __le16 ssn;
- __le16 winsize;
-} __packed;
-
-struct sta_rec_amsdu {
- __le16 tag;
- __le16 len;
- u8 max_amsdu_num;
- u8 max_mpdu_size;
- u8 amsdu_en;
- u8 rsv;
-} __packed;
-
-struct sec_key {
- u8 cipher_id;
- u8 cipher_len;
- u8 key_id;
- u8 key_len;
- u8 key[32];
-} __packed;
-
-struct sta_rec_sec {
- __le16 tag;
- __le16 len;
- u8 add;
- u8 n_cipher;
- u8 rsv[2];
-
- struct sec_key key[2];
-} __packed;
-
-struct sta_phy {
- u8 type;
- u8 flag;
- u8 stbc;
- u8 sgi;
- u8 bw;
- u8 ldpc;
- u8 mcs;
- u8 nss;
- u8 he_ltf;
-};
-
-struct sta_rec_ra {
- __le16 tag;
- __le16 len;
-
- u8 valid;
- u8 auto_rate;
- u8 phy_mode;
- u8 channel;
- u8 bw;
- u8 disable_cck;
- u8 ht_mcs32;
- u8 ht_gf;
- u8 ht_mcs[4];
- u8 mmps_mode;
- u8 gband_256;
- u8 af;
- u8 auth_wapi_mode;
- u8 rate_len;
-
- u8 supp_mode;
- u8 supp_cck_rate;
- u8 supp_ofdm_rate;
- __le32 supp_ht_mcs;
- __le16 supp_vht_mcs[4];
-
- u8 op_mode;
- u8 op_vht_chan_width;
- u8 op_vht_rx_nss;
- u8 op_vht_rx_nss_type;
-
- __le32 sta_cap;
-
- struct sta_phy phy;
-} __packed;
-
-struct sta_rec_ra_fixed {
- __le16 tag;
- __le16 len;
-
- __le32 field;
- u8 op_mode;
- u8 op_vht_chan_width;
- u8 op_vht_rx_nss;
- u8 op_vht_rx_nss_type;
-
- struct sta_phy phy;
-
- u8 spe_en;
- u8 short_preamble;
- u8 is_5g;
- u8 mmps_mode;
-} __packed;
-
-enum {
RATE_PARAM_FIXED = 3,
+ RATE_PARAM_MMPS_UPDATE = 5,
RATE_PARAM_FIXED_HE_LTF = 7,
RATE_PARAM_FIXED_MCS,
RATE_PARAM_FIXED_GI = 11,
@@ -975,120 +425,6 @@ enum {
#define RATE_CFG_PHY_TYPE GENMASK(27, 24)
#define RATE_CFG_HE_LTF GENMASK(31, 28)
-struct sta_rec_bf {
- __le16 tag;
- __le16 len;
-
- __le16 pfmu; /* 0xffff: no access right for PFMU */
- bool su_mu; /* 0: SU, 1: MU */
- u8 bf_cap; /* 0: iBF, 1: eBF */
- u8 sounding_phy; /* 0: legacy, 1: OFDM, 2: HT, 4: VHT */
- u8 ndpa_rate;
- u8 ndp_rate;
- u8 rept_poll_rate;
- u8 tx_mode; /* 0: legacy, 1: OFDM, 2: HT, 4: VHT ... */
- u8 ncol;
- u8 nrow;
- u8 bw; /* 0: 20M, 1: 40M, 2: 80M, 3: 160M */
-
- u8 mem_total;
- u8 mem_20m;
- struct {
- u8 row;
- u8 col: 6, row_msb: 2;
- } mem[4];
-
- __le16 smart_ant;
- u8 se_idx;
- u8 auto_sounding; /* b7: low traffic indicator
- * b6: Stop sounding for this entry
- * b5 ~ b0: postpone sounding
- */
- u8 ibf_timeout;
- u8 ibf_dbw;
- u8 ibf_ncol;
- u8 ibf_nrow;
- u8 nrow_bw160;
- u8 ncol_bw160;
- u8 ru_start_idx;
- u8 ru_end_idx;
-
- bool trigger_su;
- bool trigger_mu;
- bool ng16_su;
- bool ng16_mu;
- bool codebook42_su;
- bool codebook75_mu;
-
- u8 he_ltf;
- u8 rsv[3];
-} __packed;
-
-struct sta_rec_bfee {
- __le16 tag;
- __le16 len;
- bool fb_identity_matrix; /* 1: feedback identity matrix */
- bool ignore_feedback; /* 1: ignore */
- u8 rsv[2];
-} __packed;
-
-enum {
- STA_REC_BASIC,
- STA_REC_RA,
- STA_REC_RA_CMM_INFO,
- STA_REC_RA_UPDATE,
- STA_REC_BF,
- STA_REC_AMSDU,
- STA_REC_BA,
- STA_REC_RED, /* not used */
- STA_REC_TX_PROC, /* for hdr trans and CSO in CR4 */
- STA_REC_HT,
- STA_REC_VHT,
- STA_REC_APPS,
- STA_REC_KEY,
- STA_REC_WTBL,
- STA_REC_HE,
- STA_REC_HW_AMSDU,
- STA_REC_WTBL_AADOM,
- STA_REC_KEY_V2,
- STA_REC_MURU,
- STA_REC_MUEDCA,
- STA_REC_BFEE,
- STA_REC_MAX_NUM
-};
-
-enum mcu_cipher_type {
- MCU_CIPHER_NONE = 0,
- MCU_CIPHER_WEP40,
- MCU_CIPHER_WEP104,
- MCU_CIPHER_WEP128,
- MCU_CIPHER_TKIP,
- MCU_CIPHER_AES_CCMP,
- MCU_CIPHER_CCMP_256,
- MCU_CIPHER_GCMP,
- MCU_CIPHER_GCMP_256,
- MCU_CIPHER_WAPI,
- MCU_CIPHER_BIP_CMAC_128,
-};
-
-enum {
- CH_SWITCH_NORMAL = 0,
- CH_SWITCH_SCAN = 3,
- CH_SWITCH_MCC = 4,
- CH_SWITCH_DFS = 5,
- CH_SWITCH_BACKGROUND_SCAN_START = 6,
- CH_SWITCH_BACKGROUND_SCAN_RUNNING = 7,
- CH_SWITCH_BACKGROUND_SCAN_STOP = 8,
- CH_SWITCH_SCAN_BYPASS_DPD = 9
-};
-
-enum {
- THERMAL_SENSOR_TEMP_QUERY,
- THERMAL_SENSOR_MANUAL_CTRL,
- THERMAL_SENSOR_INFO_QUERY,
- THERMAL_SENSOR_TASK_CTRL,
-};
-
enum {
THERMAL_PROTECT_PARAMETER_CTRL,
THERMAL_PROTECT_BASIC_INFO,
@@ -1116,28 +452,11 @@ enum {
MURU_PLATFORM_TYPE_PERF_LEVEL_2,
};
-#define MT7915_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \
- sizeof(struct wtbl_generic) + \
- sizeof(struct wtbl_rx) + \
- sizeof(struct wtbl_ht) + \
- sizeof(struct wtbl_vht) + \
- sizeof(struct wtbl_hdr_trans) +\
- sizeof(struct wtbl_ba) + \
- sizeof(struct wtbl_smps))
-
-#define MT7915_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
- sizeof(struct sta_rec_basic) + \
- sizeof(struct sta_rec_bf) + \
- sizeof(struct sta_rec_ht) + \
- sizeof(struct sta_rec_he) + \
- sizeof(struct sta_rec_ba) + \
- sizeof(struct sta_rec_vht) + \
- sizeof(struct sta_rec_uapsd) + \
- sizeof(struct sta_rec_amsdu) + \
- sizeof(struct sta_rec_muru) + \
- sizeof(struct sta_rec_bfee) + \
- sizeof(struct tlv) + \
- MT7915_WTBL_UPDATE_MAX_SIZE)
+/* tx cmd tx statistics */
+enum {
+ MURU_SET_TXC_TX_STATS_EN = 150,
+ MURU_GET_TXC_TX_STATS = 151,
+};
#define MT7915_BSS_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
sizeof(struct bss_info_omac) + \
@@ -1154,62 +473,4 @@ enum {
sizeof(struct bss_info_bcn_mbss) + \
sizeof(struct bss_info_bcn_cont))
-#define PHY_MODE_A BIT(0)
-#define PHY_MODE_B BIT(1)
-#define PHY_MODE_G BIT(2)
-#define PHY_MODE_GN BIT(3)
-#define PHY_MODE_AN BIT(4)
-#define PHY_MODE_AC BIT(5)
-#define PHY_MODE_AX_24G BIT(6)
-#define PHY_MODE_AX_5G BIT(7)
-#define PHY_MODE_AX_6G BIT(8)
-
-#define MODE_CCK BIT(0)
-#define MODE_OFDM BIT(1)
-#define MODE_HT BIT(2)
-#define MODE_VHT BIT(3)
-#define MODE_HE BIT(4)
-
-#define STA_CAP_WMM BIT(0)
-#define STA_CAP_SGI_20 BIT(4)
-#define STA_CAP_SGI_40 BIT(5)
-#define STA_CAP_TX_STBC BIT(6)
-#define STA_CAP_RX_STBC BIT(7)
-#define STA_CAP_VHT_SGI_80 BIT(16)
-#define STA_CAP_VHT_SGI_160 BIT(17)
-#define STA_CAP_VHT_TX_STBC BIT(18)
-#define STA_CAP_VHT_RX_STBC BIT(19)
-#define STA_CAP_VHT_LDPC BIT(23)
-#define STA_CAP_LDPC BIT(24)
-#define STA_CAP_HT BIT(26)
-#define STA_CAP_VHT BIT(27)
-#define STA_CAP_HE BIT(28)
-
-/* HE MAC */
-#define STA_REC_HE_CAP_HTC BIT(0)
-#define STA_REC_HE_CAP_BQR BIT(1)
-#define STA_REC_HE_CAP_BSR BIT(2)
-#define STA_REC_HE_CAP_OM BIT(3)
-#define STA_REC_HE_CAP_AMSDU_IN_AMPDU BIT(4)
-/* HE PHY */
-#define STA_REC_HE_CAP_DUAL_BAND BIT(5)
-#define STA_REC_HE_CAP_LDPC BIT(6)
-#define STA_REC_HE_CAP_TRIG_CQI_FK BIT(7)
-#define STA_REC_HE_CAP_PARTIAL_BW_EXT_RANGE BIT(8)
-/* STBC */
-#define STA_REC_HE_CAP_LE_EQ_80M_TX_STBC BIT(9)
-#define STA_REC_HE_CAP_LE_EQ_80M_RX_STBC BIT(10)
-#define STA_REC_HE_CAP_GT_80M_TX_STBC BIT(11)
-#define STA_REC_HE_CAP_GT_80M_RX_STBC BIT(12)
-/* GI */
-#define STA_REC_HE_CAP_SU_PPDU_1LTF_8US_GI BIT(13)
-#define STA_REC_HE_CAP_SU_MU_PPDU_4LTF_8US_GI BIT(14)
-#define STA_REC_HE_CAP_ER_SU_PPDU_1LTF_8US_GI BIT(15)
-#define STA_REC_HE_CAP_ER_SU_PPDU_4LTF_8US_GI BIT(16)
-#define STA_REC_HE_CAP_NDP_4LTF_3DOT2MS_GI BIT(17)
-/* 242 TONE */
-#define STA_REC_HE_CAP_BW20_RU242_SUPPORT BIT(18)
-#define STA_REC_HE_CAP_TX_1024QAM_UNDER_RU242 BIT(19)
-#define STA_REC_HE_CAP_RX_1024QAM_UNDER_RU242 BIT(20)
-
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index e69b4c8974ee..42d887383e8d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -30,6 +30,9 @@
#define MT7915_FIRMWARE_WM "mediatek/mt7915_wm.bin"
#define MT7915_ROM_PATCH "mediatek/mt7915_rom_patch.bin"
+#define MT7915_EEPROM_DEFAULT "mediatek/mt7915_eeprom.bin"
+#define MT7915_EEPROM_DEFAULT_DBDC "mediatek/mt7915_eeprom_dbdc.bin"
+
#define MT7915_EEPROM_SIZE 3584
#define MT7915_EEPROM_BLOCK_SIZE 16
#define MT7915_TOKEN_SIZE 8192
@@ -121,10 +124,7 @@ struct mt7915_vif_cap {
};
struct mt7915_vif {
- u16 idx;
- u8 omac_idx;
- u8 band_idx;
- u8 wmm_idx;
+ struct mt76_vif mt76; /* must be first */
struct mt7915_vif_cap cap;
struct mt7915_sta sta;
@@ -270,6 +270,7 @@ struct mt7915_dev {
bool dbdc_support;
bool flash_mode;
+ bool muru_debug;
bool ibf;
u8 fw_debug_wm;
u8 fw_debug_wa;
@@ -283,20 +284,6 @@ struct mt7915_dev {
};
enum {
- HW_BSSID_0 = 0x0,
- HW_BSSID_1,
- HW_BSSID_2,
- HW_BSSID_3,
- HW_BSSID_MAX = HW_BSSID_3,
- EXT_BSSID_START = 0x10,
- EXT_BSSID_1,
- EXT_BSSID_15 = 0x1f,
- EXT_BSSID_MAX = EXT_BSSID_15,
- REPEATER_BSSID_START = 0x20,
- REPEATER_BSSID_MAX = 0x3f,
-};
-
-enum {
MT_CTX0,
MT_HIF0 = 0x0,
@@ -423,6 +410,7 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
void *data, u32 field);
int mt7915_mcu_set_eeprom(struct mt7915_dev *dev);
int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset);
+int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num);
int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable,
bool hdr_trans);
int mt7915_mcu_set_test_param(struct mt7915_dev *dev, u8 param, bool test_mode,
@@ -515,6 +503,7 @@ void mt7915_tx_token_put(struct mt7915_dev *dev);
int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc);
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
+bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
void mt7915_stats_work(struct work_struct *work);
int mt76_dfs_start_rdd(struct mt7915_dev *dev, bool force);
@@ -522,6 +511,8 @@ int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy);
void mt7915_set_stream_he_caps(struct mt7915_phy *phy);
void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy);
void mt7915_update_channel(struct mt76_phy *mphy);
+int mt7915_mcu_muru_debug_set(struct mt7915_dev *dev, bool enable);
+int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy, void *ms);
int mt7915_init_debugfs(struct mt7915_phy *phy);
#ifdef CONFIG_MAC80211_DEBUGFS
void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
index 0af4cdb897b7..8130ea43971f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
@@ -230,6 +230,7 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
.tx_prepare_skb = mt7915_tx_prepare_skb,
.tx_complete_skb = mt7915_tx_complete_skb,
.rx_skb = mt7915_queue_rx_skb,
+ .rx_check = mt7915_rx_check,
.rx_poll_complete = mt7915_rx_poll_complete,
.sta_ps = mt7915_sta_ps,
.sta_add = mt7915_mac_sta_add,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
index 89aae323d29e..af80c2cf8c83 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
@@ -361,16 +361,15 @@ mt7915_tm_reg_backup_restore(struct mt7915_phy *phy)
return;
}
- if (b)
- return;
-
- b = devm_kzalloc(dev->mt76.dev, 4 * n_regs, GFP_KERNEL);
- if (!b)
- return;
+ if (!b) {
+ b = devm_kzalloc(dev->mt76.dev, 4 * n_regs, GFP_KERNEL);
+ if (!b)
+ return;
- phy->test.reg_backup = b;
- for (i = 0; i < n_regs; i++)
- b[i] = mt76_rr(dev, reg_backup_list[i].band[ext_phy]);
+ phy->test.reg_backup = b;
+ for (i = 0; i < n_regs; i++)
+ b[i] = mt76_rr(dev, reg_backup_list[i].band[ext_phy]);
+ }
mt76_clear(dev, MT_AGG_PCR0(ext_phy, 0), MT_AGG_PCR0_MM_PROT |
MT_AGG_PCR0_GF_PROT | MT_AGG_PCR0_ERP_PROT |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
index 7cdfdf83529f..86fd7292b229 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
@@ -276,7 +276,7 @@ mt7921_pm_set(void *data, u64 val)
struct mt7921_dev *dev = data;
struct mt76_connac_pm *pm = &dev->pm;
- mt7921_mutex_acquire(dev);
+ mutex_lock(&dev->mt76.mutex);
if (val == pm->enable)
goto out;
@@ -285,7 +285,11 @@ mt7921_pm_set(void *data, u64 val)
pm->stats.last_wake_event = jiffies;
pm->stats.last_doze_event = jiffies;
}
- pm->enable = val;
+ /* make sure the chip is awake here and ps_work is scheduled
+ * just at end of the this routine.
+ */
+ pm->enable = false;
+ mt76_connac_pm_wake(&dev->mphy, pm);
ieee80211_iterate_active_interfaces(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
@@ -293,8 +297,10 @@ mt7921_pm_set(void *data, u64 val)
mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
+ pm->enable = val;
+ mt76_connac_power_save_sched(&dev->mphy, pm);
out:
- mt7921_mutex_release(dev);
+ mutex_unlock(&dev->mt76.mutex);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index 210998f086ab..ad59ef9839dc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -76,14 +76,6 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
wiphy->max_sched_scan_reqs = 1;
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
wiphy->reg_notifier = mt7921_regd_notifier;
- wiphy->sar_capa = &mt76_sar_capa;
-
- phy->mt76->frp = devm_kcalloc(dev->mt76.dev,
- wiphy->sar_capa->num_freq_ranges,
- sizeof(struct mt76_freq_range_power),
- GFP_KERNEL);
- if (!phy->mt76->frp)
- return -ENOMEM;
wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
@@ -264,6 +256,10 @@ int mt7921_register_device(struct mt7921_dev *dev)
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
(3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT);
+ if (is_mt7922(&dev->mt76))
+ dev->mphy.sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
+ IEEE80211_VHT_CAP_SHORT_GI_160;
dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask;
dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index db3302b1576a..ec10f95a4649 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -218,10 +218,9 @@ mt7921_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
}
static void
-mt7921_mac_decode_he_mu_radiotap(struct sk_buff *skb,
- struct mt76_rx_status *status,
- __le32 *rxv)
+mt7921_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv)
{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
static const struct ieee80211_radiotap_he_mu mu_known = {
.flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) |
HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) |
@@ -233,6 +232,8 @@ mt7921_mac_decode_he_mu_radiotap(struct sk_buff *skb,
};
struct ieee80211_radiotap_he_mu *he_mu;
+ status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+
he_mu = skb_push(skb, sizeof(mu_known));
memcpy(he_mu, &mu_known, sizeof(mu_known));
@@ -263,10 +264,9 @@ mt7921_mac_decode_he_mu_radiotap(struct sk_buff *skb,
}
static void
-mt7921_mac_decode_he_radiotap(struct sk_buff *skb,
- struct mt76_rx_status *status,
- __le32 *rxv, u32 phy)
+mt7921_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode)
{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
static const struct ieee80211_radiotap_he known = {
.data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
HE_BITS(DATA1_DATA_DCM_KNOWN) |
@@ -284,6 +284,8 @@ mt7921_mac_decode_he_radiotap(struct sk_buff *skb,
struct ieee80211_radiotap_he *he = NULL;
u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
+ status->flag |= RX_FLAG_RADIOTAP_HE;
+
he = skb_push(skb, sizeof(known));
memcpy(he, &known, sizeof(known));
@@ -298,7 +300,7 @@ mt7921_mac_decode_he_radiotap(struct sk_buff *skb,
he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
- switch (phy) {
+ switch (mode) {
case MT_PHY_TYPE_HE_SU:
he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
HE_BITS(DATA1_UL_DL_KNOWN) |
@@ -322,6 +324,7 @@ mt7921_mac_decode_he_radiotap(struct sk_buff *skb,
he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[7]);
mt7921_mac_decode_he_radiotap_ru(status, he, rxv);
+ mt7921_mac_decode_he_mu_radiotap(skb, rxv);
break;
case MT_PHY_TYPE_HE_TB:
he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
@@ -395,6 +398,81 @@ mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb)
mt7921_mac_rssi_iter, skb);
}
+/* The HW does not translate the mac header to 802.3 for mesh point */
+static int mt7921_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt7921_sta *msta = (struct mt7921_sta *)status->wcid;
+ struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif;
+ struct ieee80211_hdr hdr;
+ struct ethhdr eth_hdr;
+ __le32 *rxd = (__le32 *)skb->data;
+ __le32 qos_ctrl, ht_ctrl;
+
+ if (FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[3])) !=
+ MT_RXD3_NORMAL_U2M)
+ return -EINVAL;
+
+ if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
+ return -EINVAL;
+
+ if (!msta || !msta->vif)
+ return -EINVAL;
+
+ sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
+ vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
+
+ /* store the info from RXD and ethhdr to avoid being overridden */
+ memcpy(&eth_hdr, skb->data + hdr_gap, sizeof(eth_hdr));
+ hdr.frame_control = FIELD_GET(MT_RXD6_FRAME_CONTROL, rxd[6]);
+ hdr.seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, rxd[8]);
+ qos_ctrl = FIELD_GET(MT_RXD8_QOS_CTL, rxd[8]);
+ ht_ctrl = FIELD_GET(MT_RXD9_HT_CONTROL, rxd[9]);
+
+ hdr.duration_id = 0;
+ ether_addr_copy(hdr.addr1, vif->addr);
+ ether_addr_copy(hdr.addr2, sta->addr);
+ switch (le16_to_cpu(hdr.frame_control) &
+ (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
+ case 0:
+ ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
+ break;
+ case IEEE80211_FCTL_FROMDS:
+ ether_addr_copy(hdr.addr3, eth_hdr.h_source);
+ break;
+ case IEEE80211_FCTL_TODS:
+ ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
+ break;
+ case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
+ ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
+ ether_addr_copy(hdr.addr4, eth_hdr.h_source);
+ break;
+ default:
+ break;
+ }
+
+ skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
+ if (eth_hdr.h_proto == htons(ETH_P_AARP) ||
+ eth_hdr.h_proto == htons(ETH_P_IPX))
+ ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
+ else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN))
+ ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
+ else
+ skb_pull(skb, 2);
+
+ if (ieee80211_has_order(hdr.frame_control))
+ memcpy(skb_push(skb, 2), &ht_ctrl, 2);
+ if (ieee80211_is_data_qos(hdr.frame_control))
+ memcpy(skb_push(skb, 2), &qos_ctrl, 2);
+ if (ieee80211_has_a4(hdr.frame_control))
+ memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
+ else
+ memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
+
+ return 0;
+}
+
static int
mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
{
@@ -402,11 +480,11 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
bool hdr_trans, unicast, insert_ccmp_hdr = false;
u8 chfreq, qos_ctl = 0, remove_pad, amsdu_info;
+ u16 hdr_gap;
__le32 *rxv = NULL, *rxd = (__le32 *)skb->data;
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt7921_phy *phy = &dev->phy;
struct ieee80211_supported_band *sband;
- struct ieee80211_hdr *hdr;
u32 rxd0 = le32_to_cpu(rxd[0]);
u32 rxd1 = le32_to_cpu(rxd[1]);
u32 rxd2 = le32_to_cpu(rxd[2]);
@@ -428,10 +506,17 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
return -EINVAL;
+ hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
+ if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
+ return -EINVAL;
+
+ /* ICV error or CCMP/BIP/WPI MIC error */
+ if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
+ status->flag |= RX_FLAG_ONLY_MONITOR;
+
chfreq = FIELD_GET(MT_RXD3_NORMAL_CH_FREQ, rxd3);
unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
- hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
status->wcid = mt7921_rx_get_wcid(dev, idx, unicast);
if (status->wcid) {
@@ -612,15 +697,12 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
return -EINVAL;
break;
case MT_PHY_TYPE_HE_MU:
- status->flag |= RX_FLAG_RADIOTAP_HE_MU;
- fallthrough;
case MT_PHY_TYPE_HE_SU:
case MT_PHY_TYPE_HE_EXT_SU:
case MT_PHY_TYPE_HE_TB:
status->nss =
FIELD_GET(MT_PRXV_NSTS, v0) + 1;
status->encoding = RX_ENC_HE;
- status->flag |= RX_FLAG_RADIOTAP_HE;
i &= GENMASK(3, 0);
if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
@@ -668,14 +750,21 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
}
}
- skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
-
amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
status->amsdu = !!amsdu_info;
if (status->amsdu) {
status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
- if (!hdr_trans) {
+ }
+
+ hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
+ if (hdr_trans && ieee80211_has_morefrags(fc)) {
+ if (mt7921_reverse_frag0_hdr_trans(skb, hdr_gap))
+ return -EINVAL;
+ hdr_trans = false;
+ } else {
+ skb_pull(skb, hdr_gap);
+ if (!hdr_trans && status->amsdu) {
memmove(skb->data + 2, skb->data,
ieee80211_get_hdrlen_from_skb(skb));
skb_pull(skb, 2);
@@ -683,6 +772,8 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
}
if (!hdr_trans) {
+ struct ieee80211_hdr *hdr;
+
if (insert_ccmp_hdr) {
u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
@@ -696,19 +787,13 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
qos_ctl = *ieee80211_get_qos_ctl(hdr);
}
} else {
- status->flag &= ~(RX_FLAG_RADIOTAP_HE |
- RX_FLAG_RADIOTAP_HE_MU);
status->flag |= RX_FLAG_8023;
}
mt7921_mac_assoc_rssi(dev, skb);
- if (rxv && status->flag & RX_FLAG_RADIOTAP_HE) {
- mt7921_mac_decode_he_radiotap(skb, status, rxv, mode);
-
- if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
- mt7921_mac_decode_he_mu_radiotap(skb, status, rxv);
- }
+ if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
+ mt7921_mac_decode_he_radiotap(skb, rxv, mode);
if (!status->wcid || !ieee80211_is_data_qos(fc))
return 0;
@@ -903,7 +988,7 @@ void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
mt7921_mac_write_txwi_80211(dev, txwi, skb, key);
if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) {
- int rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+ int rateidx = vif ? ffs(vif->bss_conf.basic_rates) - 1 : 0;
u16 rate, mode;
/* hardware won't add HTC for mgmt/ctrl frame */
@@ -1065,7 +1150,7 @@ out:
return !!skb;
}
-static void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
+void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
{
struct mt7921_sta *msta = NULL;
struct mt76_wcid *wcid;
@@ -1314,6 +1399,7 @@ void mt7921_mac_reset_work(struct work_struct *work)
}
dev->hw_full_reset = false;
+ pm->suspended = false;
ieee80211_wake_queues(hw);
ieee80211_iterate_active_interfaces(hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 633c6d2a57ac..7a8d2596c226 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -128,11 +128,23 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
+
+ if (is_mt7922(phy->mt76->dev)) {
+ he_cap_elem->phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+ he_cap_elem->phy_cap_info[8] |=
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
+ }
break;
}
he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map);
he_mcs->tx_mcs_80 = cpu_to_le16(mcs_map);
+ if (is_mt7922(phy->mt76->dev)) {
+ he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map);
+ he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map);
+ }
memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
if (he_cap_elem->phy_cap_info[6] &
@@ -140,7 +152,8 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
mt7921_gen_ppe_thresh(he_cap->ppe_thres, nss);
} else {
he_cap_elem->phy_cap_info[9] |=
- IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US;
+ u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
}
if (band == NL80211_BAND_6GHZ) {
@@ -166,7 +179,7 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
if (vht_cap->cap & IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN)
cap |= IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS;
- data->he_6ghz_capa.capa = cpu_to_le16(cap);
+ data[idx].he_6ghz_capa.capa = cpu_to_le16(cap);
}
idx++;
}
@@ -221,7 +234,7 @@ int __mt7921_start(struct mt7921_phy *phy)
if (err)
return err;
- err = mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH);
+ err = mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
if (err)
return err;
@@ -318,12 +331,6 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
mtxq->wcid = &mvif->sta.wcid;
}
- if (vif->type != NL80211_IFTYPE_AP &&
- (!mvif->mt76.omac_idx || mvif->mt76.omac_idx > 3))
- vif->offload_flags = 0;
-
- vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR;
-
out:
mt7921_mutex_release(dev);
@@ -369,7 +376,7 @@ static int mt7921_set_channel(struct mt7921_phy *phy)
mt76_set_channel(phy->mt76);
- ret = mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD_CHANNEL_SWITCH);
+ ret = mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD(CHANNEL_SWITCH));
if (ret)
goto out;
@@ -462,7 +469,7 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
struct mt7921_phy *phy = mt7921_hw_phy(hw);
- int ret;
+ int ret = 0;
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
ieee80211_stop_queues(hw);
@@ -474,8 +481,11 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
mt7921_mutex_acquire(dev);
- if (changed & IEEE80211_CONF_CHANGE_POWER)
- mt76_connac_mcu_set_rate_txpower(phy->mt76);
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ ret = mt76_connac_mcu_set_rate_txpower(phy->mt76);
+ if (ret)
+ goto out;
+ }
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
bool enabled = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
@@ -490,9 +500,10 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
mt76_wr(dev, MT_WF_RFCR(0), phy->rxfilter);
}
+out:
mt7921_mutex_release(dev);
- return 0;
+ return ret;
}
static int
@@ -1238,7 +1249,6 @@ static int mt7921_suspend(struct ieee80211_hw *hw,
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
struct mt7921_phy *phy = mt7921_hw_phy(hw);
- int err;
cancel_delayed_work_sync(&phy->scan_work);
cancel_delayed_work_sync(&phy->mt76->mac_work);
@@ -1249,34 +1259,24 @@ static int mt7921_suspend(struct ieee80211_hw *hw,
mt7921_mutex_acquire(dev);
clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
-
- set_bit(MT76_STATE_SUSPEND, &phy->mt76->state);
ieee80211_iterate_active_interfaces(hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt76_connac_mcu_set_suspend_iter,
&dev->mphy);
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
-
mt7921_mutex_release(dev);
- return err;
+ return 0;
}
static int mt7921_resume(struct ieee80211_hw *hw)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
struct mt7921_phy *phy = mt7921_hw_phy(hw);
- int err;
mt7921_mutex_acquire(dev);
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
- if (err < 0)
- goto out;
-
set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
- clear_bit(MT76_STATE_SUSPEND, &phy->mt76->state);
ieee80211_iterate_active_interfaces(hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt76_connac_mcu_set_suspend_iter,
@@ -1284,11 +1284,10 @@ static int mt7921_resume(struct ieee80211_hw *hw)
ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
MT7921_WATCHDOG_TIME);
-out:
mt7921_mutex_release(dev);
- return err;
+ return 0;
}
static void mt7921_set_wakeup(struct ieee80211_hw *hw, bool enabled)
@@ -1334,41 +1333,23 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw,
clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76, vif, &msta->wcid,
- MCU_UNI_CMD_STA_REC_UPDATE);
+ MCU_UNI_CMD(STA_REC_UPDATE));
}
static int mt7921_set_sar_specs(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar)
{
- const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
struct mt7921_dev *dev = mt7921_hw_dev(hw);
- struct mt76_freq_range_power *data, *frp;
struct mt76_phy *mphy = hw->priv;
int err;
- u32 i;
-
- if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
- return -EINVAL;
mt7921_mutex_acquire(dev);
-
- data = mphy->frp;
-
- for (i = 0; i < sar->num_sub_specs; i++) {
- u32 index = sar->sub_specs[i].freq_range_index;
- /* SAR specifies power limitaton in 0.25dbm */
- s32 power = sar->sub_specs[i].power >> 1;
-
- if (power > 127 || power < -127)
- power = 127;
-
- frp = &data[index];
- frp->range = &capa->freq_ranges[index];
- frp->power = power;
- }
+ err = mt76_init_sar_power(hw, sar);
+ if (err)
+ goto out;
err = mt76_connac_mcu_set_rate_txpower(mphy);
-
+out:
mt7921_mutex_release(dev);
return err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index 6ada1ebe7d68..ef1e1ef91611 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -93,9 +93,6 @@ struct mt7921_fw_region {
#define PATCH_SEC_ENC_SCRAMBLE_INFO_MASK GENMASK(15, 0)
#define PATCH_SEC_ENC_AES_KEY_MASK GENMASK(7, 0)
-#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
-#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
-
static enum mcu_cipher_type
mt7921_mcu_get_cipher(int cipher)
{
@@ -163,8 +160,8 @@ mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb)
int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq)
{
+ int mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd);
struct mt7921_mcu_rxd *rxd;
- int mcu_cmd = cmd & MCU_CMD_MASK;
int ret = 0;
if (!skb) {
@@ -179,24 +176,20 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
if (seq != rxd->seq)
return -EAGAIN;
- switch (cmd) {
- case MCU_CMD_PATCH_SEM_CONTROL:
+ if (cmd == MCU_CMD(PATCH_SEM_CONTROL)) {
skb_pull(skb, sizeof(*rxd) - 4);
ret = *skb->data;
- break;
- case MCU_EXT_CMD_GET_TEMP:
+ } else if (cmd == MCU_EXT_CMD(THERMAL_CTRL)) {
skb_pull(skb, sizeof(*rxd) + 4);
ret = le32_to_cpu(*(__le32 *)skb->data);
- break;
- case MCU_EXT_CMD_EFUSE_ACCESS:
+ } else if (cmd == MCU_EXT_CMD(EFUSE_ACCESS)) {
ret = mt7921_mcu_parse_eeprom(mdev, skb);
- break;
- case MCU_UNI_CMD_DEV_INFO_UPDATE:
- case MCU_UNI_CMD_BSS_INFO_UPDATE:
- case MCU_UNI_CMD_STA_REC_UPDATE:
- case MCU_UNI_CMD_HIF_CTRL:
- case MCU_UNI_CMD_OFFLOAD:
- case MCU_UNI_CMD_SUSPEND: {
+ } else if (cmd == MCU_UNI_CMD(DEV_INFO_UPDATE) ||
+ cmd == MCU_UNI_CMD(BSS_INFO_UPDATE) ||
+ cmd == MCU_UNI_CMD(STA_REC_UPDATE) ||
+ cmd == MCU_UNI_CMD(HIF_CTRL) ||
+ cmd == MCU_UNI_CMD(OFFLOAD) ||
+ cmd == MCU_UNI_CMD(SUSPEND)) {
struct mt7921_mcu_uni_event *event;
skb_pull(skb, sizeof(*rxd));
@@ -205,19 +198,14 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
/* skip invalid event */
if (mcu_cmd != event->cid)
ret = -EAGAIN;
- break;
- }
- case MCU_CMD_REG_READ: {
+ } else if (cmd == MCU_CE_QUERY(REG_READ)) {
struct mt7921_mcu_reg_event *event;
skb_pull(skb, sizeof(*rxd));
event = (struct mt7921_mcu_reg_event *)skb->data;
ret = (int)le32_to_cpu(event->val);
- break;
- }
- default:
+ } else {
skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
- break;
}
return ret;
@@ -228,32 +216,28 @@ int mt7921_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, int *wait_seq)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- int txd_len, mcu_cmd = cmd & MCU_CMD_MASK;
+ int txd_len, mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd);
struct mt7921_uni_txd *uni_txd;
struct mt7921_mcu_txd *mcu_txd;
__le32 *txd;
u32 val;
u8 seq;
- switch (cmd) {
- case MCU_UNI_CMD_HIF_CTRL:
- case MCU_UNI_CMD_SUSPEND:
- case MCU_UNI_CMD_OFFLOAD:
- mdev->mcu.timeout = HZ / 3;
- break;
- default:
+ if (cmd == MCU_UNI_CMD(HIF_CTRL) ||
+ cmd == MCU_UNI_CMD(SUSPEND) ||
+ cmd == MCU_UNI_CMD(OFFLOAD))
+ mdev->mcu.timeout = HZ;
+ else
mdev->mcu.timeout = 3 * HZ;
- break;
- }
seq = ++dev->mt76.mcu.msg_seq & 0xf;
if (!seq)
seq = ++dev->mt76.mcu.msg_seq & 0xf;
- if (cmd == MCU_CMD_FW_SCATTER)
+ if (cmd == MCU_CMD(FW_SCATTER))
goto exit;
- txd_len = cmd & MCU_UNI_PREFIX ? sizeof(*uni_txd) : sizeof(*mcu_txd);
+ txd_len = cmd & __MCU_CMD_FIELD_UNI ? sizeof(*uni_txd) : sizeof(*mcu_txd);
txd = (__le32 *)skb_push(skb, txd_len);
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) |
@@ -265,7 +249,7 @@ int mt7921_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_CMD);
txd[1] = cpu_to_le32(val);
- if (cmd & MCU_UNI_PREFIX) {
+ if (cmd & __MCU_CMD_FIELD_UNI) {
uni_txd = (struct mt7921_uni_txd *)txd;
uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd));
uni_txd->option = MCU_CMD_UNI_EXT_ACK;
@@ -283,34 +267,20 @@ int mt7921_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
MT_TX_MCU_PORT_RX_Q0));
mcu_txd->pkt_type = MCU_PKT_ID;
mcu_txd->seq = seq;
+ mcu_txd->cid = mcu_cmd;
+ mcu_txd->s2d_index = MCU_S2D_H2N;
+ mcu_txd->ext_cid = FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd);
- switch (cmd & ~MCU_CMD_MASK) {
- case MCU_FW_PREFIX:
- mcu_txd->set_query = MCU_Q_NA;
- mcu_txd->cid = mcu_cmd;
- break;
- case MCU_CE_PREFIX:
- if (cmd & MCU_QUERY_MASK)
- mcu_txd->set_query = MCU_Q_QUERY;
- else
- mcu_txd->set_query = MCU_Q_SET;
- mcu_txd->cid = mcu_cmd;
- break;
- default:
- mcu_txd->cid = MCU_CMD_EXT_CID;
- if (cmd & MCU_QUERY_PREFIX || cmd == MCU_EXT_CMD_EFUSE_ACCESS)
+ if (mcu_txd->ext_cid || (cmd & __MCU_CMD_FIELD_CE)) {
+ if (cmd & __MCU_CMD_FIELD_QUERY)
mcu_txd->set_query = MCU_Q_QUERY;
else
mcu_txd->set_query = MCU_Q_SET;
- mcu_txd->ext_cid = mcu_cmd;
- mcu_txd->ext_cid_ack = 1;
- break;
+ mcu_txd->ext_cid_ack = !!mcu_txd->ext_cid;
+ } else {
+ mcu_txd->set_query = MCU_Q_NA;
}
- mcu_txd->s2d_index = MCU_S2D_H2N;
- WARN_ON(cmd == MCU_EXT_CMD_EFUSE_ACCESS &&
- mcu_txd->set_query != MCU_Q_QUERY);
-
exit:
if (wait_seq)
*wait_seq = seq;
@@ -419,6 +389,17 @@ mt7921_mcu_low_power_event(struct mt7921_dev *dev, struct sk_buff *skb)
}
static void
+mt7921_mcu_tx_done_event(struct mt7921_dev *dev, struct sk_buff *skb)
+{
+ struct mt7921_mcu_tx_done_event *event;
+
+ skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+ event = (struct mt7921_mcu_tx_done_event *)skb->data;
+
+ mt7921_mac_add_txs(dev, event->txs);
+}
+
+static void
mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
struct mt7921_mcu_rxd *rxd = (struct mt7921_mcu_rxd *)skb->data;
@@ -445,6 +426,9 @@ mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
case MCU_EVENT_LP_INFO:
mt7921_mcu_low_power_event(dev, skb);
break;
+ case MCU_EVENT_TX_DONE:
+ mt7921_mcu_tx_done_event(dev, skb);
+ break;
default:
break;
}
@@ -567,7 +551,7 @@ int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif,
return ret;
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_UNI_CMD_STA_REC_UPDATE, true);
+ MCU_UNI_CMD(STA_REC_UPDATE), true);
}
int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev,
@@ -602,7 +586,7 @@ int mt7921_mcu_restart(struct mt76_dev *dev)
.power_mode = 1,
};
- return mt76_mcu_send_msg(dev, MCU_CMD_NIC_POWER_CTRL, &req,
+ return mt76_mcu_send_msg(dev, MCU_CMD(NIC_POWER_CTRL), &req,
sizeof(req), false);
}
EXPORT_SYMBOL_GPL(mt7921_mcu_restart);
@@ -708,7 +692,7 @@ static int mt7921_load_patch(struct mt7921_dev *dev)
goto out;
}
- ret = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER,
+ ret = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
dl, len, max_len);
if (ret) {
dev_err(dev->mt76.dev, "Failed to send patch\n");
@@ -720,6 +704,17 @@ static int mt7921_load_patch(struct mt7921_dev *dev)
if (ret)
dev_err(dev->mt76.dev, "Failed to start patch\n");
+ if (mt76_is_sdio(&dev->mt76)) {
+ /* activate again */
+ ret = __mt7921_mcu_fw_pmctrl(dev);
+ if (ret)
+ return ret;
+
+ ret = __mt7921_mcu_drv_pmctrl(dev);
+ if (ret)
+ return ret;
+ }
+
out:
sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, false);
switch (sem) {
@@ -782,7 +777,7 @@ mt7921_mcu_send_ram_firmware(struct mt7921_dev *dev,
return err;
}
- err = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER,
+ err = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
data + offset, len, max_len);
if (err) {
dev_err(dev->mt76.dev, "Failed to send firmware.\n");
@@ -882,7 +877,7 @@ fw_loaded:
dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support;
#endif /* CONFIG_PM */
- dev_err(dev->mt76.dev, "Firmware init done\n");
+ dev_dbg(dev->mt76.dev, "Firmware init done\n");
return 0;
}
@@ -896,8 +891,8 @@ int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl)
.ctrl_val = ctrl
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_FWLOG_2_HOST, &data,
- sizeof(data), false);
+ return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(FWLOG_2_HOST),
+ &data, sizeof(data), false);
}
int mt7921_run_firmware(struct mt7921_dev *dev)
@@ -997,8 +992,8 @@ int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
e->cw_max = cpu_to_le16(10);
}
- ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE, &req,
- sizeof(req), true);
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EDCA_UPDATE),
+ &req, sizeof(req), true);
if (ret)
return ret;
@@ -1022,8 +1017,8 @@ int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
e->timer = q->mu_edca_timer;
}
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_MU_EDCA_PARMS, &req_mu,
- sizeof(req_mu), false);
+ return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_MU_EDCA_PARMS),
+ &req_mu, sizeof(req_mu), false);
}
int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd)
@@ -1070,7 +1065,7 @@ int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd)
else
req.switch_reason = CH_SWITCH_NORMAL;
- if (cmd == MCU_EXT_CMD_CHANNEL_SWITCH)
+ if (cmd == MCU_EXT_CMD(CHANNEL_SWITCH))
req.rx_streams = hweight8(req.rx_streams);
if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
@@ -1093,7 +1088,7 @@ int mt7921_mcu_set_eeprom(struct mt7921_dev *dev)
.format = EE_FORMAT_WHOLE,
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_BUFFER_MODE),
&req, sizeof(req), true);
}
EXPORT_SYMBOL_GPL(mt7921_mcu_set_eeprom);
@@ -1108,8 +1103,9 @@ int mt7921_mcu_get_eeprom(struct mt7921_dev *dev, u32 offset)
int ret;
u8 *buf;
- ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_ACCESS, &req,
- sizeof(req), true, &skb);
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76,
+ MCU_EXT_QUERY(EFUSE_ACCESS),
+ &req, sizeof(req), true, &skb);
if (ret)
return ret;
@@ -1154,7 +1150,7 @@ int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif)
if (vif->type != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
- return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
&ps_req, sizeof(ps_req), true);
}
@@ -1190,7 +1186,7 @@ mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif,
if (vif->type != NL80211_IFTYPE_STATION)
return 0;
- return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE,
+ return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
&bcnft_req, sizeof(bcnft_req), true);
}
@@ -1226,13 +1222,13 @@ mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
if (vif->type != NL80211_IFTYPE_STATION)
return 0;
- err = mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_ABORT, &req_hdr,
- sizeof(req_hdr), false);
+ err = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_ABORT),
+ &req_hdr, sizeof(req_hdr), false);
if (err < 0 || !enable)
return err;
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_CONNECTED, &req,
- sizeof(req), false);
+ return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_CONNECTED),
+ &req, sizeof(req), false);
}
int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta,
@@ -1245,7 +1241,7 @@ int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta,
.sta = sta,
.vif = vif,
.enable = enable,
- .cmd = MCU_UNI_CMD_STA_REC_UPDATE,
+ .cmd = MCU_UNI_CMD(STA_REC_UPDATE),
.state = state,
.offload_fw = true,
.rcpi = to_rcpi(rssi),
@@ -1342,7 +1338,7 @@ int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr)
struct sk_buff *skb;
int ret;
- ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CMD_GET_TXPWR,
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CE_CMD(GET_TXPWR),
&req, sizeof(req), true, &skb);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
index edc0c73f8c01..77cc0cc5b436 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
@@ -76,20 +76,32 @@ struct mt7921_uni_txd {
u8 reserved2[4];
} __packed __aligned(4);
-/* event table */
-enum {
- MCU_EVENT_REG_ACCESS = 0x05,
- MCU_EVENT_LP_INFO = 0x07,
- MCU_EVENT_SCAN_DONE = 0x0d,
- MCU_EVENT_TX_DONE = 0x0f,
- MCU_EVENT_BSS_ABSENCE = 0x11,
- MCU_EVENT_BSS_BEACON_LOSS = 0x13,
- MCU_EVENT_CH_PRIVILEGE = 0x18,
- MCU_EVENT_SCHED_SCAN_DONE = 0x23,
- MCU_EVENT_DBG_MSG = 0x27,
- MCU_EVENT_TXPWR = 0xd0,
- MCU_EVENT_COREDUMP = 0xf0,
-};
+struct mt7921_mcu_tx_done_event {
+ u8 pid;
+ u8 status;
+ __le16 seq;
+
+ u8 wlan_idx;
+ u8 tx_cnt;
+ __le16 tx_rate;
+
+ u8 flag;
+ u8 tid;
+ u8 rsp_rate;
+ u8 mcs;
+
+ u8 bw;
+ u8 tx_pwr;
+ u8 reason;
+ u8 rsv0[1];
+
+ __le32 delay;
+ __le32 timestamp;
+ __le32 applied_flag;
+ u8 txs[28];
+
+ u8 rsv1[32];
+} __packed;
/* ext event table */
enum {
@@ -126,20 +138,6 @@ struct mt7921_mcu_eeprom_info {
#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
#define MCU_PKT_ID 0xa0
-enum {
- MCU_Q_QUERY,
- MCU_Q_SET,
- MCU_Q_RESERVED,
- MCU_Q_NA
-};
-
-enum {
- MCU_S2D_H2N,
- MCU_S2D_C2N,
- MCU_S2D_H2C,
- MCU_S2D_H2CN
-};
-
struct mt7921_mcu_uni_event {
u8 cid;
u8 pad[3];
@@ -147,109 +145,10 @@ struct mt7921_mcu_uni_event {
} __packed;
enum {
- PATCH_NOT_DL_SEM_FAIL,
- PATCH_IS_DL,
- PATCH_NOT_DL_SEM_SUCCESS,
- PATCH_REL_SEM_SUCCESS
-};
-
-enum {
- FW_STATE_INITIAL,
- FW_STATE_FW_DOWNLOAD,
- FW_STATE_NORMAL_OPERATION,
- FW_STATE_NORMAL_TRX,
- FW_STATE_WACPU_RDY = 7
-};
-
-enum {
- EE_MODE_EFUSE,
- EE_MODE_BUFFER,
-};
-
-enum {
- EE_FORMAT_BIN,
- EE_FORMAT_WHOLE,
- EE_FORMAT_MULTIPLE,
-};
-
-enum {
- MCU_PHY_STATE_TX_RATE,
- MCU_PHY_STATE_RX_RATE,
- MCU_PHY_STATE_RSSI,
- MCU_PHY_STATE_CONTENTION_RX_RATE,
- MCU_PHY_STATE_OFDMLQ_CNINFO,
-};
-
-struct sec_key {
- u8 cipher_id;
- u8 cipher_len;
- u8 key_id;
- u8 key_len;
- u8 key[32];
-} __packed;
-
-struct sta_rec_sec {
- __le16 tag;
- __le16 len;
- u8 add;
- u8 n_cipher;
- u8 rsv[2];
-
- struct sec_key key[2];
-} __packed;
-
-enum mcu_cipher_type {
- MCU_CIPHER_NONE = 0,
- MCU_CIPHER_WEP40,
- MCU_CIPHER_WEP104,
- MCU_CIPHER_WEP128,
- MCU_CIPHER_TKIP,
- MCU_CIPHER_AES_CCMP,
- MCU_CIPHER_CCMP_256,
- MCU_CIPHER_GCMP,
- MCU_CIPHER_GCMP_256,
- MCU_CIPHER_WAPI,
- MCU_CIPHER_BIP_CMAC_128,
-};
-
-enum {
- CH_SWITCH_NORMAL = 0,
- CH_SWITCH_SCAN = 3,
- CH_SWITCH_MCC = 4,
- CH_SWITCH_DFS = 5,
- CH_SWITCH_BACKGROUND_SCAN_START = 6,
- CH_SWITCH_BACKGROUND_SCAN_RUNNING = 7,
- CH_SWITCH_BACKGROUND_SCAN_STOP = 8,
- CH_SWITCH_SCAN_BYPASS_DPD = 9
-};
-
-enum {
- THERMAL_SENSOR_TEMP_QUERY,
- THERMAL_SENSOR_MANUAL_CTRL,
- THERMAL_SENSOR_INFO_QUERY,
- THERMAL_SENSOR_TASK_CTRL,
-};
-
-enum {
MT_EBF = BIT(0), /* explicit beamforming */
MT_IBF = BIT(1) /* implicit beamforming */
};
-#define STA_CAP_WMM BIT(0)
-#define STA_CAP_SGI_20 BIT(4)
-#define STA_CAP_SGI_40 BIT(5)
-#define STA_CAP_TX_STBC BIT(6)
-#define STA_CAP_RX_STBC BIT(7)
-#define STA_CAP_VHT_SGI_80 BIT(16)
-#define STA_CAP_VHT_SGI_160 BIT(17)
-#define STA_CAP_VHT_TX_STBC BIT(18)
-#define STA_CAP_VHT_RX_STBC BIT(19)
-#define STA_CAP_VHT_LDPC BIT(23)
-#define STA_CAP_LDPC BIT(24)
-#define STA_CAP_HT BIT(26)
-#define STA_CAP_VHT BIT(27)
-#define STA_CAP_HE BIT(28)
-
struct mt7921_mcu_reg_event {
__le32 reg;
__le32 val;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index e9c7c3a19507..96647801850a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -446,6 +446,7 @@ int mt7921_mcu_restart(struct mt76_dev *dev);
void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
+int mt7921e_driver_own(struct mt7921_dev *dev);
int mt7921e_mac_reset(struct mt7921_dev *dev);
int mt7921e_mcu_init(struct mt7921_dev *dev);
int mt7921s_wfsys_reset(struct mt7921_dev *dev);
@@ -463,4 +464,5 @@ int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt76_tx_info *tx_info);
void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
bool mt7921s_tx_status_data(struct mt76_dev *mdev, u8 *update);
+void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index 305b63fa1a8a..9dae2f5972bf 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -15,6 +15,8 @@
static const struct pci_device_id mt7921_pci_device_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7961) },
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7922) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0608) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616) },
{ },
};
@@ -188,7 +190,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
tasklet_init(&dev->irq_tasklet, mt7921_irq_tasklet, (unsigned long)dev);
mdev->rev = (mt7921_l1_rr(dev, MT_HW_CHIPID) << 16) |
(mt7921_l1_rr(dev, MT_HW_REV) & 0xff);
- dev_err(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+ dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
@@ -235,7 +237,6 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
struct mt76_connac_pm *pm = &dev->pm;
- bool hif_suspend;
int i, err;
pm->suspended = true;
@@ -246,12 +247,9 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
if (err < 0)
goto restore_suspend;
- hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state);
- if (hif_suspend) {
- err = mt76_connac_mcu_set_hif_suspend(mdev, true);
- if (err)
- goto restore_suspend;
- }
+ err = mt76_connac_mcu_set_hif_suspend(mdev, true);
+ if (err)
+ goto restore_suspend;
/* always enable deep sleep during suspend to reduce
* power consumption
@@ -302,8 +300,7 @@ restore_napi:
if (!pm->ds_enable)
mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
- if (hif_suspend)
- mt76_connac_mcu_set_hif_suspend(mdev, false);
+ mt76_connac_mcu_set_hif_suspend(mdev, false);
restore_suspend:
pm->suspended = false;
@@ -318,7 +315,6 @@ static int mt7921_pci_resume(struct pci_dev *pdev)
struct mt76_connac_pm *pm = &dev->pm;
int i, err;
- pm->suspended = false;
err = pci_set_power_state(pdev, PCI_D0);
if (err)
return err;
@@ -356,8 +352,11 @@ static int mt7921_pci_resume(struct pci_dev *pdev)
if (!pm->ds_enable)
mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
- if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state))
- err = mt76_connac_mcu_set_hif_suspend(mdev, false);
+ err = mt76_connac_mcu_set_hif_suspend(mdev, false);
+ if (err)
+ return err;
+
+ pm->suspended = false;
return err;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
index f9547d27356e..85286cc9add1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
@@ -321,6 +321,10 @@ int mt7921e_mac_reset(struct mt7921_dev *dev)
MT_INT_MCU_CMD);
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
+ err = mt7921e_driver_own(dev);
+ if (err)
+ return err;
+
err = mt7921_run_firmware(dev);
if (err)
goto out;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
index 583a89a34734..a020352122a1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
@@ -4,7 +4,7 @@
#include "mt7921.h"
#include "mcu.h"
-static int mt7921e_driver_own(struct mt7921_dev *dev)
+int mt7921e_driver_own(struct mt7921_dev *dev)
{
u32 reg = mt7921_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0);
@@ -30,7 +30,7 @@ mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (ret)
return ret;
- if (cmd == MCU_CMD_FW_SCATTER)
+ if (cmd == MCU_CMD(FW_SCATTER))
txq = MT_MCUQ_FWDL;
return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[txq], skb, 0);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
index ddf0eeb8b688..65d693902c22 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
@@ -62,6 +62,10 @@ static int mt7921s_parse_intr(struct mt76_dev *dev, struct mt76s_intr *intr)
if (err < 0)
return err;
+ if (irq_data->rx.num[0] > 16 ||
+ irq_data->rx.num[1] > 128)
+ return -EINVAL;
+
intr->isr = irq_data->isr;
intr->rec_mb = irq_data->rec_mb;
intr->tx.wtqcr = irq_data->tx.wtqcr;
@@ -203,10 +207,11 @@ static int mt7921s_suspend(struct device *__dev)
struct mt7921_dev *dev = sdio_get_drvdata(func);
struct mt76_connac_pm *pm = &dev->pm;
struct mt76_dev *mdev = &dev->mt76;
- bool hif_suspend;
int err;
pm->suspended = true;
+ set_bit(MT76_STATE_SUSPEND, &mdev->phy.state);
+
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
@@ -214,13 +219,6 @@ static int mt7921s_suspend(struct device *__dev)
if (err < 0)
goto restore_suspend;
- hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state);
- if (hif_suspend) {
- err = mt76_connac_mcu_set_hif_suspend(mdev, true);
- if (err)
- goto restore_suspend;
- }
-
/* always enable deep sleep during suspend to reduce
* power consumption
*/
@@ -228,35 +226,45 @@ static int mt7921s_suspend(struct device *__dev)
mt76_txq_schedule_all(&dev->mphy);
mt76_worker_disable(&mdev->tx_worker);
- mt76_worker_disable(&mdev->sdio.txrx_worker);
mt76_worker_disable(&mdev->sdio.status_worker);
- mt76_worker_disable(&mdev->sdio.net_worker);
cancel_work_sync(&mdev->sdio.stat_work);
clear_bit(MT76_READING_STATS, &dev->mphy.state);
-
mt76_tx_status_check(mdev, true);
- err = mt7921_mcu_fw_pmctrl(dev);
+ mt76_worker_schedule(&mdev->sdio.txrx_worker);
+ wait_event_timeout(dev->mt76.sdio.wait,
+ mt76s_txqs_empty(&dev->mt76), 5 * HZ);
+
+ /* It is supposed that SDIO bus is idle at the point */
+ err = mt76_connac_mcu_set_hif_suspend(mdev, true);
if (err)
goto restore_worker;
+ mt76_worker_disable(&mdev->sdio.txrx_worker);
+ mt76_worker_disable(&mdev->sdio.net_worker);
+
+ err = mt7921_mcu_fw_pmctrl(dev);
+ if (err)
+ goto restore_txrx_worker;
+
sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
return 0;
+restore_txrx_worker:
+ mt76_worker_enable(&mdev->sdio.net_worker);
+ mt76_worker_enable(&mdev->sdio.txrx_worker);
+ mt76_connac_mcu_set_hif_suspend(mdev, false);
+
restore_worker:
mt76_worker_enable(&mdev->tx_worker);
- mt76_worker_enable(&mdev->sdio.txrx_worker);
mt76_worker_enable(&mdev->sdio.status_worker);
- mt76_worker_enable(&mdev->sdio.net_worker);
if (!pm->ds_enable)
mt76_connac_mcu_set_deep_sleep(mdev, false);
- if (hif_suspend)
- mt76_connac_mcu_set_hif_suspend(mdev, false);
-
restore_suspend:
+ clear_bit(MT76_STATE_SUSPEND, &mdev->phy.state);
pm->suspended = false;
return err;
@@ -270,7 +278,7 @@ static int mt7921s_resume(struct device *__dev)
struct mt76_dev *mdev = &dev->mt76;
int err;
- pm->suspended = false;
+ clear_bit(MT76_STATE_SUSPEND, &mdev->phy.state);
err = mt7921_mcu_drv_pmctrl(dev);
if (err < 0)
@@ -285,8 +293,11 @@ static int mt7921s_resume(struct device *__dev)
if (!pm->ds_enable)
mt76_connac_mcu_set_deep_sleep(mdev, false);
- if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state))
- err = mt76_connac_mcu_set_hif_suspend(mdev, false);
+ err = mt76_connac_mcu_set_hif_suspend(mdev, false);
+ if (err)
+ return err;
+
+ pm->suspended = false;
return err;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
index bdec508b6b9f..ccaf8134cec7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
@@ -219,5 +219,5 @@ bool mt7921s_tx_status_data(struct mt76_dev *mdev, u8 *update)
mt7921_mac_sta_poll(dev);
mt7921_mutex_release(dev);
- return 0;
+ return false;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
index 437cddad9a90..d20f2ff01be1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
@@ -33,7 +33,7 @@ mt7921s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (ret)
return ret;
- if (cmd == MCU_CMD_FW_SCATTER)
+ if (cmd == MCU_CMD(FW_SCATTER))
type = MT7921_SDIO_FWDL;
mt7921_skb_add_sdio_hdr(skb, type);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c
index 8bd43879dd6f..bdec8684ce94 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c
@@ -66,7 +66,7 @@ mt7921_tm_set(struct mt7921_dev *dev, struct mt7921_tm_cmd *req)
if (!mt76_testmode_enabled(phy))
goto out;
- ret = mt76_mcu_send_msg(&dev->mt76, MCU_CMD_TEST_CTRL, &cmd,
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(TEST_CTRL), &cmd,
sizeof(cmd), false);
if (ret)
goto out;
@@ -95,7 +95,7 @@ mt7921_tm_query(struct mt7921_dev *dev, struct mt7921_tm_cmd *req,
struct sk_buff *skb;
int ret;
- ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CMD_TEST_CTRL,
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CE_CMD(TEST_CTRL),
&cmd, sizeof(cmd), true, &skb);
if (ret)
goto out;
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index c99acc21225e..54f72d215948 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -305,12 +305,12 @@ int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
spin_lock_init(&q->lock);
q->entry = devm_kcalloc(dev->dev,
- MT_NUM_RX_ENTRIES, sizeof(*q->entry),
+ MT76S_NUM_RX_ENTRIES, sizeof(*q->entry),
GFP_KERNEL);
if (!q->entry)
return -ENOMEM;
- q->ndesc = MT_NUM_RX_ENTRIES;
+ q->ndesc = MT76S_NUM_RX_ENTRIES;
q->head = q->tail = 0;
q->queued = 0;
@@ -328,12 +328,12 @@ static struct mt76_queue *mt76s_alloc_tx_queue(struct mt76_dev *dev)
spin_lock_init(&q->lock);
q->entry = devm_kcalloc(dev->dev,
- MT_NUM_TX_ENTRIES, sizeof(*q->entry),
+ MT76S_NUM_TX_ENTRIES, sizeof(*q->entry),
GFP_KERNEL);
if (!q->entry)
return ERR_PTR(-ENOMEM);
- q->ndesc = MT_NUM_TX_ENTRIES;
+ q->ndesc = MT76S_NUM_TX_ENTRIES;
return q;
}
@@ -479,7 +479,8 @@ static void mt76s_status_worker(struct mt76_worker *w)
resched = true;
if (dev->drv->tx_status_data &&
- !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
+ !test_and_set_bit(MT76_READING_STATS, &dev->phy.state) &&
+ !test_bit(MT76_STATE_SUSPEND, &dev->phy.state))
queue_work(dev->wq, &dev->sdio.stat_work);
} while (nframes > 0);
diff --git a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
index 649a56790b89..801590a0a334 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
@@ -317,7 +317,8 @@ void mt76s_txrx_worker(struct mt76_sdio *sdio)
if (ret > 0)
nframes += ret;
- if (test_bit(MT76_MCU_RESET, &dev->phy.state)) {
+ if (test_bit(MT76_MCU_RESET, &dev->phy.state) ||
+ test_bit(MT76_STATE_SUSPEND, &dev->phy.state)) {
if (!mt76s_txqs_empty(dev))
continue;
else
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c
index 66afc2b0a935..1a01ad7a4c16 100644
--- a/drivers/net/wireless/mediatek/mt76/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/testmode.c
@@ -126,9 +126,9 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
hdr = __skb_put_zero(head, head_len);
hdr->frame_control = cpu_to_le16(fc);
- memcpy(hdr->addr1, phy->macaddr, sizeof(phy->macaddr));
- memcpy(hdr->addr2, phy->macaddr, sizeof(phy->macaddr));
- memcpy(hdr->addr3, phy->macaddr, sizeof(phy->macaddr));
+ memcpy(hdr->addr1, td->addr[0], ETH_ALEN);
+ memcpy(hdr->addr2, td->addr[1], ETH_ALEN);
+ memcpy(hdr->addr3, td->addr[2], ETH_ALEN);
skb_set_queue_mapping(head, IEEE80211_AC_BE);
info = IEEE80211_SKB_CB(head);
@@ -318,6 +318,10 @@ mt76_testmode_init_defaults(struct mt76_phy *phy)
td->tx_count = 1;
td->tx_rate_mode = MT76_TM_TX_MODE_OFDM;
td->tx_rate_nss = 1;
+
+ memcpy(td->addr[0], phy->macaddr, ETH_ALEN);
+ memcpy(td->addr[1], phy->macaddr, ETH_ALEN);
+ memcpy(td->addr[2], phy->macaddr, ETH_ALEN);
}
static int
@@ -493,6 +497,20 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
}
+ if (tb[MT76_TM_ATTR_MAC_ADDRS]) {
+ struct nlattr *cur;
+ int idx = 0;
+ int rem;
+
+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_MAC_ADDRS], rem) {
+ if (nla_len(cur) != ETH_ALEN || idx >= 3)
+ goto out;
+
+ memcpy(td->addr[idx], nla_data(cur), ETH_ALEN);
+ idx++;
+ }
+ }
+
if (dev->test_ops->set_params) {
err = dev->test_ops->set_params(phy, tb, state);
if (err)
@@ -635,6 +653,18 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
nla_nest_end(msg, a);
}
+ if (mt76_testmode_param_present(td, MT76_TM_ATTR_MAC_ADDRS)) {
+ a = nla_nest_start(msg, MT76_TM_ATTR_MAC_ADDRS);
+ if (!a)
+ goto out;
+
+ for (i = 0; i < 3; i++)
+ if (nla_put(msg, i, ETH_ALEN, td->addr[i]))
+ goto out;
+
+ nla_nest_end(msg, a);
+ }
+
err = 0;
out:
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.h b/drivers/net/wireless/mediatek/mt76/testmode.h
index d1f9c036dd1f..0590c35c7126 100644
--- a/drivers/net/wireless/mediatek/mt76/testmode.h
+++ b/drivers/net/wireless/mediatek/mt76/testmode.h
@@ -7,6 +7,8 @@
#define MT76_TM_TIMEOUT 10
+#include <net/netlink.h>
+
/**
* enum mt76_testmode_attr - testmode attributes inside NL80211_ATTR_TESTDATA
*
@@ -45,6 +47,8 @@
* @MT76_TM_ATTR_TX_TIME: packet transmission time, in unit of us (u32)
*
* @MT76_TM_ATTR_DRV_DATA: driver specific netlink attrs (nested)
+ *
+ * @MT76_TM_ATTR_MAC_ADDRS: array of nested MAC addresses (nested)
*/
enum mt76_testmode_attr {
MT76_TM_ATTR_UNSPEC,
@@ -81,6 +85,8 @@ enum mt76_testmode_attr {
MT76_TM_ATTR_DRV_DATA,
+ MT76_TM_ATTR_MAC_ADDRS,
+
/* keep last */
NUM_MT76_TM_ATTRS,
MT76_TM_ATTR_MAX = NUM_MT76_TM_ATTRS - 1,
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index dc4bfe7be378..8d8378bafd9b 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -1737,23 +1737,15 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
INIT_LIST_HEAD(&wl->rxq_head.list);
INIT_LIST_HEAD(&wl->vif_list);
- wl->hif_workqueue = create_singlethread_workqueue("WILC_wq");
- if (!wl->hif_workqueue) {
- ret = -ENOMEM;
- goto free_cfg;
- }
vif = wilc_netdev_ifc_init(wl, "wlan%d", WILC_STATION_MODE,
NL80211_IFTYPE_STATION, false);
if (IS_ERR(vif)) {
ret = PTR_ERR(vif);
- goto free_hq;
+ goto free_cfg;
}
return 0;
-free_hq:
- destroy_workqueue(wl->hif_workqueue);
-
free_cfg:
wilc_wlan_cfg_deinit(wl);
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index e69b9c7f3d31..71b44cfe0dfc 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -1312,7 +1312,7 @@ int wilc_set_mac_address(struct wilc_vif *vif, u8 *mac_addr)
result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1);
if (result)
- netdev_err(vif->ndev, "Failed to get mac address\n");
+ netdev_err(vif->ndev, "Failed to set mac address\n");
return result;
}
@@ -1929,6 +1929,7 @@ int wilc_edit_station(struct wilc_vif *vif, const u8 *mac,
int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout)
{
+ struct wilc *wilc = vif->wilc;
struct wid wid;
int result;
s8 power_mode;
@@ -1944,6 +1945,8 @@ int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout)
result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1);
if (result)
netdev_err(vif->ndev, "Failed to send power management\n");
+ else
+ wilc->power_save_mode = enabled;
return result;
}
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 690572e01a2a..643bddaae32a 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -27,7 +27,7 @@ static irqreturn_t isr_uh_routine(int irq, void *user_data)
struct wilc *wilc = user_data;
if (wilc->close) {
- pr_err("Can't handle UH interrupt");
+ pr_err("Can't handle UH interrupt\n");
return IRQ_HANDLED;
}
return IRQ_WAKE_THREAD;
@@ -56,7 +56,7 @@ static int init_irq(struct net_device *dev)
ret = request_threaded_irq(wl->dev_irq_num, isr_uh_routine,
isr_bh_routine,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "WILC_IRQ", wl);
+ dev->name, wl);
if (ret) {
netdev_err(dev, "Failed to request IRQ [%d]\n", ret);
return ret;
@@ -468,7 +468,7 @@ static int wlan_initialize_threads(struct net_device *dev)
struct wilc *wilc = vif->wilc;
wilc->txq_thread = kthread_run(wilc_txq_task, (void *)wilc,
- "K_TXQ_TASK");
+ "%s-tx", dev->name);
if (IS_ERR(wilc->txq_thread)) {
netdev_err(dev, "couldn't create TXQ thread\n");
wilc->close = 0;
@@ -574,6 +574,7 @@ static int wilc_mac_open(struct net_device *ndev)
struct wilc *wl = vif->wilc;
int ret = 0;
struct mgmt_frame_regs mgmt_regs = {};
+ u8 addr[ETH_ALEN] __aligned(2);
if (!wl || !wl->dev) {
netdev_err(ndev, "device not ready\n");
@@ -596,10 +597,9 @@ static int wilc_mac_open(struct net_device *ndev)
vif->idx);
if (is_valid_ether_addr(ndev->dev_addr)) {
- wilc_set_mac_address(vif, ndev->dev_addr);
+ ether_addr_copy(addr, ndev->dev_addr);
+ wilc_set_mac_address(vif, addr);
} else {
- u8 addr[ETH_ALEN];
-
wilc_get_mac_address(vif, addr);
eth_hw_addr_set(ndev, addr);
}
@@ -905,7 +905,6 @@ void wilc_netdev_cleanup(struct wilc *wilc)
wilc_wlan_cfg_deinit(wilc);
wlan_deinit_locks(wilc);
- kfree(wilc->bus_data);
wiphy_unregister(wilc->wiphy);
wiphy_free(wilc->wiphy);
}
@@ -962,8 +961,15 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
ret = register_netdev(ndev);
if (ret) {
- free_netdev(ndev);
- return ERR_PTR(-EFAULT);
+ ret = -EFAULT;
+ goto error;
+ }
+
+ wl->hif_workqueue = alloc_ordered_workqueue("%s-wq", WQ_MEM_RECLAIM,
+ ndev->name);
+ if (!wl->hif_workqueue) {
+ ret = -ENOMEM;
+ goto error;
}
ndev->needs_free_netdev = true;
@@ -977,6 +983,10 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
synchronize_srcu(&wl->srcu);
return vif;
+
+ error:
+ free_netdev(ndev);
+ return ERR_PTR(ret);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h
index b9a88b3e322f..a067274c2014 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.h
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.h
@@ -212,6 +212,8 @@ struct wilc {
s8 mac_status;
struct clk *rtc_clk;
bool initialized;
+ u32 chipid;
+ bool power_save_mode;
int dev_irq_num;
int close;
u8 vif_num;
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 26ebf6664342..ec595dbd8959 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -167,9 +167,11 @@ free:
static void wilc_sdio_remove(struct sdio_func *func)
{
struct wilc *wilc = sdio_get_drvdata(func);
+ struct wilc_sdio *sdio_priv = wilc->bus_data;
clk_disable_unprepare(wilc->rtc_clk);
wilc_netdev_cleanup(wilc);
+ kfree(sdio_priv);
}
static int wilc_sdio_reset(struct wilc *wilc)
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index 640850f989dd..2c2ed4b09efd 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -8,10 +8,13 @@
#include <linux/spi/spi.h>
#include <linux/crc7.h>
#include <linux/crc-itu-t.h>
+#include <linux/gpio/consumer.h>
#include "netdev.h"
#include "cfg80211.h"
+#define SPI_MODALIAS "wilc1000_spi"
+
static bool enable_crc7; /* protect SPI commands with CRC7 */
module_param(enable_crc7, bool, 0644);
MODULE_PARM_DESC(enable_crc7,
@@ -43,6 +46,10 @@ struct wilc_spi {
bool probing_crc; /* true if we're probing chip's CRC config */
bool crc7_enabled; /* true if crc7 is currently enabled */
bool crc16_enabled; /* true if crc16 is currently enabled */
+ struct wilc_gpios {
+ struct gpio_desc *enable; /* ENABLE GPIO or NULL */
+ struct gpio_desc *reset; /* RESET GPIO or NULL */
+ } gpios;
};
static const struct wilc_hif_func wilc_hif_spi;
@@ -99,8 +106,6 @@ static int wilc_spi_reset(struct wilc *wilc);
#define DATA_PKT_LOG_SZ DATA_PKT_LOG_SZ_MAX
#define DATA_PKT_SZ (1 << DATA_PKT_LOG_SZ)
-#define USE_SPI_DMA 0
-
#define WILC_SPI_COMMAND_STAT_SUCCESS 0
#define WILC_GET_RESP_HDR_START(h) (((h) >> 4) & 0xf)
@@ -152,6 +157,50 @@ struct wilc_spi_special_cmd_rsp {
u8 status;
} __packed;
+static int wilc_parse_gpios(struct wilc *wilc)
+{
+ struct spi_device *spi = to_spi_device(wilc->dev);
+ struct wilc_spi *spi_priv = wilc->bus_data;
+ struct wilc_gpios *gpios = &spi_priv->gpios;
+
+ /* get ENABLE pin and deassert it (if it is defined): */
+ gpios->enable = devm_gpiod_get_optional(&spi->dev,
+ "enable", GPIOD_OUT_LOW);
+ /* get RESET pin and assert it (if it is defined): */
+ if (gpios->enable) {
+ /* if enable pin exists, reset must exist as well */
+ gpios->reset = devm_gpiod_get(&spi->dev,
+ "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpios->reset)) {
+ dev_err(&spi->dev, "missing reset gpio.\n");
+ return PTR_ERR(gpios->reset);
+ }
+ } else {
+ gpios->reset = devm_gpiod_get_optional(&spi->dev,
+ "reset", GPIOD_OUT_HIGH);
+ }
+ return 0;
+}
+
+static void wilc_wlan_power(struct wilc *wilc, bool on)
+{
+ struct wilc_spi *spi_priv = wilc->bus_data;
+ struct wilc_gpios *gpios = &spi_priv->gpios;
+
+ if (on) {
+ /* assert ENABLE: */
+ gpiod_set_value(gpios->enable, 1);
+ mdelay(5);
+ /* deassert RESET: */
+ gpiod_set_value(gpios->reset, 0);
+ } else {
+ /* assert RESET: */
+ gpiod_set_value(gpios->reset, 1);
+ /* deassert ENABLE: */
+ gpiod_set_value(gpios->enable, 0);
+ }
+}
+
static int wilc_bus_probe(struct spi_device *spi)
{
int ret;
@@ -171,6 +220,10 @@ static int wilc_bus_probe(struct spi_device *spi)
wilc->bus_data = spi_priv;
wilc->dev_irq_num = spi->irq;
+ ret = wilc_parse_gpios(wilc);
+ if (ret < 0)
+ goto netdev_cleanup;
+
wilc->rtc_clk = devm_clk_get_optional(&spi->dev, "rtc");
if (IS_ERR(wilc->rtc_clk)) {
ret = PTR_ERR(wilc->rtc_clk);
@@ -190,9 +243,11 @@ free:
static int wilc_bus_remove(struct spi_device *spi)
{
struct wilc *wilc = spi_get_drvdata(spi);
+ struct wilc_spi *spi_priv = wilc->bus_data;
clk_disable_unprepare(wilc->rtc_clk);
wilc_netdev_cleanup(wilc);
+ kfree(spi_priv);
return 0;
}
@@ -203,11 +258,18 @@ static const struct of_device_id wilc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, wilc_of_match);
+static const struct spi_device_id wilc_spi_id[] = {
+ { "wilc1000", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, wilc_spi_id);
+
static struct spi_driver wilc_spi_driver = {
.driver = {
- .name = MODALIAS,
+ .name = SPI_MODALIAS,
.of_match_table = wilc_of_match,
},
+ .id_table = wilc_spi_id,
.probe = wilc_bus_probe,
.remove = wilc_bus_remove,
};
@@ -240,7 +302,6 @@ static int wilc_spi_tx(struct wilc *wilc, u8 *b, u32 len)
memset(&msg, 0, sizeof(msg));
spi_message_init(&msg);
msg.spi = spi;
- msg.is_dma_mapped = USE_SPI_DMA;
spi_message_add_tail(&tr, &msg);
ret = spi_sync(spi, &msg);
@@ -284,7 +345,6 @@ static int wilc_spi_rx(struct wilc *wilc, u8 *rb, u32 rlen)
memset(&msg, 0, sizeof(msg));
spi_message_init(&msg);
msg.spi = spi;
- msg.is_dma_mapped = USE_SPI_DMA;
spi_message_add_tail(&tr, &msg);
ret = spi_sync(spi, &msg);
@@ -323,7 +383,6 @@ static int wilc_spi_tx_rx(struct wilc *wilc, u8 *wb, u8 *rb, u32 rlen)
memset(&msg, 0, sizeof(msg));
spi_message_init(&msg);
msg.spi = spi;
- msg.is_dma_mapped = USE_SPI_DMA;
spi_message_add_tail(&tr, &msg);
ret = spi_sync(spi, &msg);
@@ -977,9 +1036,10 @@ static int wilc_spi_reset(struct wilc *wilc)
static int wilc_spi_deinit(struct wilc *wilc)
{
- /*
- * TODO:
- */
+ struct wilc_spi *spi_priv = wilc->bus_data;
+
+ spi_priv->isinit = false;
+ wilc_wlan_power(wilc, false);
return 0;
}
@@ -1000,6 +1060,8 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
dev_err(&spi->dev, "Fail cmd read chip id...\n");
}
+ wilc_wlan_power(wilc, true);
+
/*
* configure protocol
*/
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index ea81ef120fd1..fb5633a05fd5 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -20,13 +20,13 @@ static inline bool is_wilc1000(u32 id)
static inline void acquire_bus(struct wilc *wilc, enum bus_acquire acquire)
{
mutex_lock(&wilc->hif_cs);
- if (acquire == WILC_BUS_ACQUIRE_AND_WAKEUP)
+ if (acquire == WILC_BUS_ACQUIRE_AND_WAKEUP && wilc->power_save_mode)
chip_wakeup(wilc);
}
static inline void release_bus(struct wilc *wilc, enum bus_release release)
{
- if (release == WILC_BUS_RELEASE_ALLOW_SLEEP)
+ if (release == WILC_BUS_RELEASE_ALLOW_SLEEP && wilc->power_save_mode)
chip_allow_sleep(wilc);
mutex_unlock(&wilc->hif_cs);
}
@@ -626,7 +626,6 @@ void chip_wakeup(struct wilc *wilc)
u32 clk_status_val = 0, trials = 0;
u32 wakeup_reg, wakeup_bit;
u32 clk_status_reg, clk_status_bit;
- u32 to_host_from_fw_reg, to_host_from_fw_bit;
u32 from_host_to_fw_reg, from_host_to_fw_bit;
const struct wilc_hif_func *hif_func = wilc->hif_func;
@@ -637,8 +636,6 @@ void chip_wakeup(struct wilc *wilc)
clk_status_bit = WILC_SDIO_CLK_STATUS_BIT;
from_host_to_fw_reg = WILC_SDIO_HOST_TO_FW_REG;
from_host_to_fw_bit = WILC_SDIO_HOST_TO_FW_BIT;
- to_host_from_fw_reg = WILC_SDIO_FW_TO_HOST_REG;
- to_host_from_fw_bit = WILC_SDIO_FW_TO_HOST_BIT;
} else {
wakeup_reg = WILC_SPI_WAKEUP_REG;
wakeup_bit = WILC_SPI_WAKEUP_BIT;
@@ -646,8 +643,6 @@ void chip_wakeup(struct wilc *wilc)
clk_status_bit = WILC_SPI_CLK_STATUS_BIT;
from_host_to_fw_reg = WILC_SPI_HOST_TO_FW_REG;
from_host_to_fw_bit = WILC_SPI_HOST_TO_FW_BIT;
- to_host_from_fw_reg = WILC_SPI_FW_TO_HOST_REG;
- to_host_from_fw_bit = WILC_SPI_FW_TO_HOST_BIT;
}
/* indicate host wakeup */
@@ -1229,7 +1224,8 @@ int wilc_wlan_stop(struct wilc *wilc, struct wilc_vif *vif)
ret = 0;
release:
- release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
+ /* host comm is disabled - we can't issue sleep command anymore: */
+ release_bus(wilc, WILC_BUS_RELEASE_ONLY);
return ret;
}
@@ -1258,7 +1254,7 @@ void wilc_wlan_cleanup(struct net_device *dev)
wilc->rx_buffer = NULL;
kfree(wilc->tx_buffer);
wilc->tx_buffer = NULL;
- wilc->hif_func->hif_deinit(NULL);
+ wilc->hif_func->hif_deinit(wilc);
}
static int wilc_wlan_cfg_commit(struct wilc_vif *vif, int type,
@@ -1447,31 +1443,30 @@ release:
u32 wilc_get_chipid(struct wilc *wilc, bool update)
{
- static u32 chipid;
- u32 tempchipid = 0;
+ u32 chipid = 0;
u32 rfrevid = 0;
- if (chipid == 0 || update) {
- wilc->hif_func->hif_read_reg(wilc, WILC_CHIPID, &tempchipid);
+ if (wilc->chipid == 0 || update) {
+ wilc->hif_func->hif_read_reg(wilc, WILC_CHIPID, &chipid);
wilc->hif_func->hif_read_reg(wilc, WILC_RF_REVISION_ID,
&rfrevid);
- if (!is_wilc1000(tempchipid)) {
- chipid = 0;
- return chipid;
+ if (!is_wilc1000(chipid)) {
+ wilc->chipid = 0;
+ return wilc->chipid;
}
- if (tempchipid == WILC_1000_BASE_ID_2A) { /* 0x1002A0 */
+ if (chipid == WILC_1000_BASE_ID_2A) { /* 0x1002A0 */
if (rfrevid != 0x1)
- tempchipid = WILC_1000_BASE_ID_2A_REV1;
- } else if (tempchipid == WILC_1000_BASE_ID_2B) { /* 0x1002B0 */
+ chipid = WILC_1000_BASE_ID_2A_REV1;
+ } else if (chipid == WILC_1000_BASE_ID_2B) { /* 0x1002B0 */
if (rfrevid == 0x4)
- tempchipid = WILC_1000_BASE_ID_2B_REV1;
+ chipid = WILC_1000_BASE_ID_2B_REV1;
else if (rfrevid != 0x3)
- tempchipid = WILC_1000_BASE_ID_2B_REV2;
+ chipid = WILC_1000_BASE_ID_2B_REV2;
}
- chipid = tempchipid;
+ wilc->chipid = chipid;
}
- return chipid;
+ return wilc->chipid;
}
int wilc_wlan_init(struct net_device *dev)
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h
index 13fde636aa0e..eb7978166d73 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.h
@@ -213,8 +213,6 @@
#define WILC_RX_BUFF_SIZE (96 * 1024)
#define WILC_TX_BUFF_SIZE (64 * 1024)
-#define MODALIAS "WILC_SPI"
-
#define NQUEUES 4
#define AC_BUFFER_SIZE 1000
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index e3a3dc3e45b4..2987ad9271f6 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2746,7 +2746,7 @@ static ssize_t int_proc_write(struct file *file, const char __user *buffer,
nr = nr * 10 + c;
p++;
} while (--len);
- *(int *)PDE_DATA(file_inode(file)) = nr;
+ *(int *)pde_data(file_inode(file)) = nr;
return count;
}
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index a42e2081b75f..06d59ffb7444 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -4859,7 +4859,7 @@ rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
* rts_rate is zero if RTS/CTS or CTS to SELF are not enabled
*/
tx_desc->txdw4 |= cpu_to_le32(rts_rate << TXDESC32_RTS_RATE_SHIFT);
- if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+ if (ampdu_enable || (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
tx_desc->txdw4 |= cpu_to_le32(TXDESC32_RTS_CTS_ENABLE);
tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
} else if (rate_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
@@ -4930,7 +4930,7 @@ rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
/*
* rts_rate is zero if RTS/CTS or CTS to SELF are not enabled
*/
- if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+ if (ampdu_enable || (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_RTS_CTS_ENABLE);
tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_HW_RTS_ENABLE);
} else if (rate_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 6312fddd9c00..eaba66113328 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -1000,6 +1000,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
_initpabias(hw);
rtl92c_dm_init(hw);
exit:
+ local_irq_disable();
local_irq_restore(flags);
return err;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index 9b83c710c9b8..51fe51bb0504 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -899,7 +899,7 @@ static u8 _rtl92c_phy_get_rightchnlplace(u8 chnl)
u8 place = chnl;
if (chnl > 14) {
- for (place = 14; place < sizeof(channel5g); place++) {
+ for (place = 14; place < ARRAY_SIZE(channel5g); place++) {
if (channel5g[place] == chnl) {
place++;
break;
@@ -1366,7 +1366,7 @@ u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl)
u8 place;
if (chnl > 14) {
- for (place = 14; place < sizeof(channel_all); place++) {
+ for (place = 14; place < ARRAY_SIZE(channel_all); place++) {
if (channel_all[place] == chnl)
return place - 13;
}
@@ -2428,7 +2428,7 @@ static bool _rtl92d_is_legal_5g_channel(struct ieee80211_hw *hw, u8 channel)
int i;
- for (i = 0; i < sizeof(channel5g); i++)
+ for (i = 0; i < ARRAY_SIZE(channel5g); i++)
if (channel == channel5g[i])
return true;
return false;
@@ -2692,9 +2692,8 @@ void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw)
u8 i;
rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "settings regs %d default regs %d\n",
- (int)(sizeof(rtlphy->iqk_matrix) /
- sizeof(struct iqk_matrix_regs)),
+ "settings regs %zu default regs %d\n",
+ ARRAY_SIZE(rtlphy->iqk_matrix),
IQK_MATRIX_REG_NUM);
/* 0xe94, 0xe9c, 0xea4, 0xeac, 0xeb4, 0xebc, 0xec4, 0xecc */
for (i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) {
@@ -2861,16 +2860,14 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
case BAND_ON_5G:
/* Get first channel error when change between
* 5G and 2.4G band. */
- if (channel <= 14)
+ if (WARN_ONCE(channel <= 14, "rtl8192de: 5G but channel<=14\n"))
return 0;
- WARN_ONCE((channel <= 14), "rtl8192de: 5G but channel<=14\n");
break;
case BAND_ON_2_4G:
/* Get first channel error when change between
* 5G and 2.4G band. */
- if (channel > 14)
+ if (WARN_ONCE(channel > 14, "rtl8192de: 2G but channel>14\n"))
return 0;
- WARN_ONCE((channel > 14), "rtl8192de: 2G but channel>14\n");
break;
default:
WARN_ONCE(true, "rtl8192de: Invalid WirelessMode(%#x)!!\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index aa07856411b1..31f9e9e5c680 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -108,7 +108,6 @@
#define CHANNEL_GROUP_IDX_5GM 6
#define CHANNEL_GROUP_IDX_5GH 9
#define CHANNEL_GROUP_MAX_5G 9
-#define CHANNEL_MAX_NUMBER_2G 14
#define AVG_THERMAL_NUM 8
#define AVG_THERMAL_NUM_88E 4
#define AVG_THERMAL_NUM_8723BE 4
diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile
index 73d6807a8cdf..834c66ec0af9 100644
--- a/drivers/net/wireless/realtek/rtw88/Makefile
+++ b/drivers/net/wireless/realtek/rtw88/Makefile
@@ -15,6 +15,7 @@ rtw88_core-y += main.o \
ps.o \
sec.o \
bf.o \
+ sar.o \
regd.o
rtw88_core-$(CONFIG_PM) += wow.o
diff --git a/drivers/net/wireless/realtek/rtw88/bf.c b/drivers/net/wireless/realtek/rtw88/bf.c
index aff70e4ae028..df750b3a35e9 100644
--- a/drivers/net/wireless/realtek/rtw88/bf.c
+++ b/drivers/net/wireless/realtek/rtw88/bf.c
@@ -130,7 +130,8 @@ void rtw_bf_cfg_sounding(struct rtw_dev *rtwdev, struct rtw_vif *vif,
BIT_WMAC_USE_NDPARATE |
(csi_rsc << 13);
- rtw_write8(rtwdev, REG_SND_PTCL_CTRL, RTW_SND_CTRL_SOUNDING);
+ rtw_write8_mask(rtwdev, REG_SND_PTCL_CTRL, BIT_MASK_BEAMFORM,
+ RTW_SND_CTRL_SOUNDING);
rtw_write8(rtwdev, REG_SND_PTCL_CTRL + 3, 0x26);
rtw_write8_clr(rtwdev, REG_RXFLTMAP1, BIT_RXFLTMAP1_BF_REPORT_POLL);
rtw_write8_clr(rtwdev, REG_RXFLTMAP4, BIT_RXFLTMAP4_BF_REPORT_POLL);
@@ -177,7 +178,7 @@ void rtw_bf_del_bfer_entry_mu(struct rtw_dev *rtwdev)
void rtw_bf_del_sounding(struct rtw_dev *rtwdev)
{
- rtw_write8(rtwdev, REG_SND_PTCL_CTRL, 0);
+ rtw_write8_mask(rtwdev, REG_SND_PTCL_CTRL, BIT_MASK_BEAMFORM, 0);
}
void rtw_bf_enable_bfee_su(struct rtw_dev *rtwdev, struct rtw_vif *vif,
@@ -204,7 +205,8 @@ void rtw_bf_enable_bfee_su(struct rtw_dev *rtwdev, struct rtw_vif *vif,
}
/* Sounding protocol control */
- rtw_write8(rtwdev, REG_SND_PTCL_CTRL, RTW_SND_CTRL_SOUNDING);
+ rtw_write8_mask(rtwdev, REG_SND_PTCL_CTRL, BIT_MASK_BEAMFORM,
+ RTW_SND_CTRL_SOUNDING);
/* MAC address/Partial AID of Beamformer */
for (i = 0; i < ETH_ALEN; i++)
@@ -273,7 +275,8 @@ void rtw_bf_remove_bfee_su(struct rtw_dev *rtwdev,
struct rtw_bf_info *bfinfo = &rtwdev->bf_info;
rtw_dbg(rtwdev, RTW_DBG_BF, "remove as a su bfee\n");
- rtw_write8(rtwdev, REG_SND_PTCL_CTRL, RTW_SND_CTRL_REMOVE);
+ rtw_write8_mask(rtwdev, REG_SND_PTCL_CTRL, BIT_MASK_BEAMFORM,
+ RTW_SND_CTRL_REMOVE);
switch (bfee->su_reg_index) {
case 0:
@@ -298,7 +301,8 @@ void rtw_bf_remove_bfee_mu(struct rtw_dev *rtwdev,
{
struct rtw_bf_info *bfinfo = &rtwdev->bf_info;
- rtw_write8(rtwdev, REG_SND_PTCL_CTRL, RTW_SND_CTRL_REMOVE);
+ rtw_write8_mask(rtwdev, REG_SND_PTCL_CTRL, BIT_MASK_BEAMFORM,
+ RTW_SND_CTRL_REMOVE);
rtw_bf_del_bfer_entry_mu(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw88/bf.h b/drivers/net/wireless/realtek/rtw88/bf.h
index 17855edb5006..7b40c2c03856 100644
--- a/drivers/net/wireless/realtek/rtw88/bf.h
+++ b/drivers/net/wireless/realtek/rtw88/bf.h
@@ -13,6 +13,9 @@
#define REG_ASSOCIATED_BFMER1_INFO 0x06EC
#define REG_TX_CSI_RPT_PARAM_BW20 0x06F4
#define REG_SND_PTCL_CTRL 0x0718
+#define BIT_DIS_CHK_VHTSIGB_CRC BIT(6)
+#define BIT_DIS_CHK_VHTSIGA_CRC BIT(5)
+#define BIT_MASK_BEAMFORM (GENMASK(4, 0) | BIT(7))
#define REG_MU_TX_CTL 0x14C0
#define REG_MU_STA_GID_VLD 0x14C4
#define REG_MU_STA_USER_POS_INFO 0x14C8
@@ -42,8 +45,8 @@
#define BIT_RXFLTMAP4_BF_REPORT_POLL BIT(4)
#define RTW_NDP_RX_STANDBY_TIME 0x70
-#define RTW_SND_CTRL_REMOVE 0xD8
-#define RTW_SND_CTRL_SOUNDING 0xDB
+#define RTW_SND_CTRL_REMOVE 0x98
+#define RTW_SND_CTRL_SOUNDING 0x9B
enum csi_seg_len {
HAL_CSI_SEG_4K = 0,
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index 682b23502e6e..e429428232c1 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -152,6 +152,22 @@ static int rtw_debugfs_get_rf_read(struct seq_file *m, void *v)
return 0;
}
+static int rtw_debugfs_get_fix_rate(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 fix_rate = dm_info->fix_rate;
+
+ if (fix_rate >= DESC_RATE_MAX) {
+ seq_printf(m, "Fix rate disabled, fix_rate = %u\n", fix_rate);
+ return 0;
+ }
+
+ seq_printf(m, "Data frames fixed at desc rate %u\n", fix_rate);
+ return 0;
+}
+
static int rtw_debugfs_copy_from_user(char tmp[], int size,
const char __user *buffer, size_t count,
int num)
@@ -437,6 +453,31 @@ static ssize_t rtw_debugfs_set_rf_read(struct file *filp,
return count;
}
+static ssize_t rtw_debugfs_set_fix_rate(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
+ struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 fix_rate;
+ char tmp[32 + 1];
+ int ret;
+
+ rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+
+ ret = kstrtou8(tmp, 0, &fix_rate);
+ if (ret) {
+ rtw_warn(rtwdev, "invalid args, [rate]\n");
+ return ret;
+ }
+
+ dm_info->fix_rate = fix_rate;
+
+ return count;
+}
+
static int rtw_debug_get_mac_page(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
@@ -590,9 +631,11 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
u8 ch = hal->current_channel;
u8 regd = rtw_regd_get(rtwdev);
+ seq_printf(m, "channel: %u\n", ch);
+ seq_printf(m, "bandwidth: %u\n", bw);
seq_printf(m, "regulatory: %s\n", rtw_get_regd_string(regd));
- seq_printf(m, "%-4s %-10s %-3s%6s %-4s %4s (%-4s %-4s) %-4s\n",
- "path", "rate", "pwr", "", "base", "", "byr", "lmt", "rem");
+ seq_printf(m, "%-4s %-10s %-9s %-9s (%-4s %-4s %-4s) %-4s\n",
+ "path", "rate", "pwr", "base", "byr", "lmt", "sar", "rem");
mutex_lock(&hal->tx_power_mutex);
for (path = RF_PATH_A; path <= RF_PATH_B; path++) {
@@ -614,13 +657,15 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
seq_printf(m, "%4c ", path + 'A');
rtw_print_rate(m, rate);
- seq_printf(m, " %3u(0x%02x) %4u %4d (%4d %4d) %4d\n",
+ seq_printf(m, " %3u(0x%02x) %4u %4d (%4d %4d %4d) %4d\n",
hal->tx_pwr_tbl[path][rate],
hal->tx_pwr_tbl[path][rate],
pwr_param.pwr_base,
- min_t(s8, pwr_param.pwr_offset,
- pwr_param.pwr_limit),
+ min3(pwr_param.pwr_offset,
+ pwr_param.pwr_limit,
+ pwr_param.pwr_sar),
pwr_param.pwr_offset, pwr_param.pwr_limit,
+ pwr_param.pwr_sar,
pwr_param.pwr_remnant);
}
}
@@ -904,6 +949,39 @@ static int rtw_debugfs_get_fw_crash(struct seq_file *m, void *v)
return 0;
}
+static ssize_t rtw_debugfs_set_force_lowest_basic_rate(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
+ struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ bool input;
+ int err;
+
+ err = kstrtobool_from_user(buffer, count, &input);
+ if (err)
+ return err;
+
+ if (input)
+ set_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags);
+ else
+ clear_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags);
+
+ return count;
+}
+
+static int rtw_debugfs_get_force_lowest_basic_rate(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+
+ seq_printf(m, "force lowest basic rate: %d\n",
+ test_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags));
+
+ return 0;
+}
+
static ssize_t rtw_debugfs_set_dm_cap(struct file *filp,
const char __user *buffer,
size_t count, loff_t *loff)
@@ -1061,6 +1139,11 @@ static struct rtw_debugfs_priv rtw_debug_priv_read_reg = {
.cb_read = rtw_debugfs_get_read_reg,
};
+static struct rtw_debugfs_priv rtw_debug_priv_fix_rate = {
+ .cb_write = rtw_debugfs_set_fix_rate,
+ .cb_read = rtw_debugfs_get_fix_rate,
+};
+
static struct rtw_debugfs_priv rtw_debug_priv_dump_cam = {
.cb_write = rtw_debugfs_set_single_input,
.cb_read = rtw_debugfs_get_dump_cam,
@@ -1094,6 +1177,11 @@ static struct rtw_debugfs_priv rtw_debug_priv_fw_crash = {
.cb_read = rtw_debugfs_get_fw_crash,
};
+static struct rtw_debugfs_priv rtw_debug_priv_force_lowest_basic_rate = {
+ .cb_write = rtw_debugfs_set_force_lowest_basic_rate,
+ .cb_read = rtw_debugfs_get_force_lowest_basic_rate,
+};
+
static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = {
.cb_write = rtw_debugfs_set_dm_cap,
.cb_read = rtw_debugfs_get_dm_cap,
@@ -1126,6 +1214,7 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev)
rtw_debugfs_add_rw(read_reg);
rtw_debugfs_add_w(rf_write);
rtw_debugfs_add_rw(rf_read);
+ rtw_debugfs_add_rw(fix_rate);
rtw_debugfs_add_rw(dump_cam);
rtw_debugfs_add_rw(rsvd_page);
rtw_debugfs_add_r(phy_info);
@@ -1174,6 +1263,7 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev)
rtw_debugfs_add_r(tx_pwr_tbl);
rtw_debugfs_add_rw(edcca_enable);
rtw_debugfs_add_rw(fw_crash);
+ rtw_debugfs_add_rw(force_lowest_basic_rate);
rtw_debugfs_add_rw(dm_cap);
}
diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
index 47c57f395f52..61f8369fe2d6 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.h
+++ b/drivers/net/wireless/realtek/rtw88/debug.h
@@ -22,6 +22,7 @@ enum rtw_debug_mask {
RTW_DBG_CFO = 0x00002000,
RTW_DBG_PATH_DIV = 0x00004000,
RTW_DBG_ADAPTIVITY = 0x00008000,
+ RTW_DBG_HW_SCAN = 0x00010000,
RTW_DBG_ALL = 0xffffffff
};
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 0c4f2a2f2d7f..2f7c036f9022 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -28,6 +28,12 @@ static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
case C2H_CCX_RPT:
rtw_tx_report_handle(rtwdev, skb, C2H_CCX_RPT);
break;
+ case C2H_SCAN_STATUS_RPT:
+ rtw_hw_scan_status_report(rtwdev, skb);
+ break;
+ case C2H_CHAN_SWITCH:
+ rtw_hw_scan_chan_switch(rtwdev, skb);
+ break;
default:
break;
}
@@ -1777,3 +1783,385 @@ void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start)
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
+
+static void rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
+ struct sk_buff_head *list,
+ struct rtw_vif *rtwvif)
+{
+ struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct sk_buff *new;
+ u8 idx;
+
+ for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
+ if (!(BIT(idx) & chip->band))
+ continue;
+ new = skb_copy(skb, GFP_KERNEL);
+ skb_put_data(new, ies->ies[idx], ies->len[idx]);
+ skb_put_data(new, ies->common_ies, ies->common_ie_len);
+ skb_queue_tail(list, new);
+ }
+}
+
+static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_ssids,
+ struct sk_buff_head *probe_req_list)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct sk_buff *skb, *tmp;
+ u8 page_offset = 1, *buf, page_size = chip->page_size;
+ u8 pages = page_offset + num_ssids * RTW_PROBE_PG_CNT;
+ u16 pg_addr = rtwdev->fifo.rsvd_h2c_info_addr, loc;
+ u16 buf_offset = page_size * page_offset;
+ u8 tx_desc_sz = chip->tx_pkt_desc_sz;
+ unsigned int pkt_len;
+ int ret;
+
+ buf = kzalloc(page_size * pages, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf_offset -= tx_desc_sz;
+ skb_queue_walk_safe(probe_req_list, skb, tmp) {
+ skb_unlink(skb, probe_req_list);
+ rtw_fill_rsvd_page_desc(rtwdev, skb, RSVD_PROBE_REQ);
+ if (skb->len > page_size * RTW_PROBE_PG_CNT) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(buf + buf_offset, skb->data, skb->len);
+ pkt_len = skb->len - tx_desc_sz;
+ loc = pg_addr - rtwdev->fifo.rsvd_boundary + page_offset;
+ __rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, pkt_len, loc);
+
+ buf_offset += RTW_PROBE_PG_CNT * page_size;
+ page_offset += RTW_PROBE_PG_CNT;
+ kfree_skb(skb);
+ }
+
+ ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, buf_offset);
+ if (ret) {
+ rtw_err(rtwdev, "Download probe request to firmware failed\n");
+ goto out;
+ }
+
+ rtwdev->scan_info.probe_pg_size = page_offset;
+out:
+ kfree(buf);
+
+ return ret;
+}
+
+static int rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif)
+{
+ struct cfg80211_scan_request *req = rtwvif->scan_req;
+ struct sk_buff_head list;
+ struct sk_buff *skb;
+ u8 num = req->n_ssids, i;
+
+ skb_queue_head_init(&list);
+ for (i = 0; i < num; i++) {
+ skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
+ req->ssids[i].ssid,
+ req->ssids[i].ssid_len,
+ req->ie_len);
+ rtw_append_probe_req_ie(rtwdev, skb, &list, rtwvif);
+ kfree_skb(skb);
+ }
+
+ return _rtw_hw_scan_update_probe_req(rtwdev, num, &list);
+}
+
+static int rtw_add_chan_info(struct rtw_dev *rtwdev, struct rtw_chan_info *info,
+ struct rtw_chan_list *list, u8 *buf)
+{
+ u8 *chan = &buf[list->size];
+ u8 info_size = RTW_CH_INFO_SIZE;
+
+ if (list->size > list->buf_size)
+ return -ENOMEM;
+
+ CH_INFO_SET_CH(chan, info->channel);
+ CH_INFO_SET_PRI_CH_IDX(chan, info->pri_ch_idx);
+ CH_INFO_SET_BW(chan, info->bw);
+ CH_INFO_SET_TIMEOUT(chan, info->timeout);
+ CH_INFO_SET_ACTION_ID(chan, info->action_id);
+ CH_INFO_SET_EXTRA_INFO(chan, info->extra_info);
+ if (info->extra_info) {
+ EXTRA_CH_INFO_SET_ID(chan, RTW_SCAN_EXTRA_ID_DFS);
+ EXTRA_CH_INFO_SET_INFO(chan, RTW_SCAN_EXTRA_ACTION_SCAN);
+ EXTRA_CH_INFO_SET_SIZE(chan, RTW_EX_CH_INFO_SIZE -
+ RTW_EX_CH_INFO_HDR_SIZE);
+ EXTRA_CH_INFO_SET_DFS_EXT_TIME(chan, RTW_DFS_CHAN_TIME);
+ info_size += RTW_EX_CH_INFO_SIZE;
+ }
+ list->size += info_size;
+ list->ch_num++;
+
+ return 0;
+}
+
+static int rtw_add_chan_list(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
+ struct rtw_chan_list *list, u8 *buf)
+{
+ struct cfg80211_scan_request *req = rtwvif->scan_req;
+ struct rtw_fifo_conf *fifo = &rtwdev->fifo;
+ struct ieee80211_channel *channel;
+ int i, ret = 0;
+
+ for (i = 0; i < req->n_channels; i++) {
+ struct rtw_chan_info ch_info = {0};
+
+ channel = req->channels[i];
+ ch_info.channel = channel->hw_value;
+ ch_info.bw = RTW_SCAN_WIDTH;
+ ch_info.pri_ch_idx = RTW_PRI_CH_IDX;
+ ch_info.timeout = req->duration_mandatory ?
+ req->duration : RTW_CHANNEL_TIME;
+
+ if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) {
+ ch_info.action_id = RTW_CHANNEL_RADAR;
+ ch_info.extra_info = 1;
+ /* Overwrite duration for passive scans if necessary */
+ ch_info.timeout = ch_info.timeout > RTW_PASS_CHAN_TIME ?
+ ch_info.timeout : RTW_PASS_CHAN_TIME;
+ } else {
+ ch_info.action_id = RTW_CHANNEL_ACTIVE;
+ }
+
+ ret = rtw_add_chan_info(rtwdev, &ch_info, list, buf);
+ if (ret)
+ return ret;
+ }
+
+ if (list->size > fifo->rsvd_pg_num << TX_PAGE_SIZE_SHIFT) {
+ rtw_err(rtwdev, "List exceeds rsvd page total size\n");
+ return -EINVAL;
+ }
+
+ list->addr = fifo->rsvd_h2c_info_addr + rtwdev->scan_info.probe_pg_size;
+ ret = rtw_fw_write_data_rsvd_page(rtwdev, list->addr, buf, list->size);
+ if (ret)
+ rtw_err(rtwdev, "Download channel list failed\n");
+
+ return ret;
+}
+
+static void rtw_fw_set_scan_offload(struct rtw_dev *rtwdev,
+ struct rtw_ch_switch_option *opt,
+ struct rtw_vif *rtwvif,
+ struct rtw_chan_list *list)
+{
+ struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct cfg80211_scan_request *req = rtwvif->scan_req;
+ struct rtw_fifo_conf *fifo = &rtwdev->fifo;
+ /* reserve one dummy page at the beginning for tx descriptor */
+ u8 pkt_loc = fifo->rsvd_h2c_info_addr - fifo->rsvd_boundary + 1;
+ bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_SCAN_OFFLOAD);
+ SET_PKT_H2C_TOTAL_LEN(h2c_pkt, H2C_PKT_CH_SWITCH_LEN);
+
+ SCAN_OFFLOAD_SET_START(h2c_pkt, opt->switch_en);
+ SCAN_OFFLOAD_SET_BACK_OP_EN(h2c_pkt, opt->back_op_en);
+ SCAN_OFFLOAD_SET_RANDOM_SEQ_EN(h2c_pkt, random_seq);
+ SCAN_OFFLOAD_SET_NO_CCK_EN(h2c_pkt, req->no_cck);
+ SCAN_OFFLOAD_SET_CH_NUM(h2c_pkt, list->ch_num);
+ SCAN_OFFLOAD_SET_CH_INFO_SIZE(h2c_pkt, list->size);
+ SCAN_OFFLOAD_SET_CH_INFO_LOC(h2c_pkt, list->addr - fifo->rsvd_boundary);
+ SCAN_OFFLOAD_SET_OP_CH(h2c_pkt, scan_info->op_chan);
+ SCAN_OFFLOAD_SET_OP_PRI_CH_IDX(h2c_pkt, scan_info->op_pri_ch_idx);
+ SCAN_OFFLOAD_SET_OP_BW(h2c_pkt, scan_info->op_bw);
+ SCAN_OFFLOAD_SET_OP_PORT_ID(h2c_pkt, rtwvif->port);
+ SCAN_OFFLOAD_SET_OP_DWELL_TIME(h2c_pkt, req->duration_mandatory ?
+ req->duration : RTW_CHANNEL_TIME);
+ SCAN_OFFLOAD_SET_OP_GAP_TIME(h2c_pkt, RTW_OFF_CHAN_TIME);
+ SCAN_OFFLOAD_SET_SSID_NUM(h2c_pkt, req->n_ssids);
+ SCAN_OFFLOAD_SET_PKT_LOC(h2c_pkt, pkt_loc);
+
+ rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
+}
+
+void rtw_hw_scan_start(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *scan_req)
+{
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ struct cfg80211_scan_request *req = &scan_req->req;
+ u8 mac_addr[ETH_ALEN];
+
+ rtwdev->scan_info.scanning_vif = vif;
+ rtwvif->scan_ies = &scan_req->ies;
+ rtwvif->scan_req = req;
+
+ ieee80211_stop_queues(rtwdev->hw);
+ if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
+ get_random_mask_addr(mac_addr, req->mac_addr,
+ req->mac_addr_mask);
+ else
+ ether_addr_copy(mac_addr, vif->addr);
+
+ rtw_core_scan_start(rtwdev, rtwvif, mac_addr, true);
+
+ rtwdev->hal.rcr &= ~BIT_CBSSID_BCN;
+ rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
+}
+
+void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ bool aborted)
+{
+ struct cfg80211_scan_info info = {
+ .aborted = aborted,
+ };
+ struct rtw_vif *rtwvif;
+
+ if (!vif)
+ return;
+
+ rtwdev->hal.rcr |= BIT_CBSSID_BCN;
+ rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
+
+ rtw_core_scan_complete(rtwdev, vif);
+
+ ieee80211_wake_queues(rtwdev->hw);
+ ieee80211_scan_completed(rtwdev->hw, &info);
+
+ rtwvif = (struct rtw_vif *)vif->drv_priv;
+ rtwvif->scan_req = NULL;
+ rtwvif->scan_ies = NULL;
+ rtwdev->scan_info.scanning_vif = NULL;
+}
+
+static int rtw_hw_scan_prehandle(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
+ struct rtw_chan_list *list)
+{
+ struct cfg80211_scan_request *req = rtwvif->scan_req;
+ int size = req->n_channels * (RTW_CH_INFO_SIZE + RTW_EX_CH_INFO_SIZE);
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = rtw_hw_scan_update_probe_req(rtwdev, rtwvif);
+ if (ret) {
+ rtw_err(rtwdev, "Update probe request failed\n");
+ goto out;
+ }
+
+ list->buf_size = size;
+ list->size = 0;
+ list->ch_num = 0;
+ ret = rtw_add_chan_list(rtwdev, rtwvif, list, buf);
+out:
+ kfree(buf);
+
+ return ret;
+}
+
+int rtw_hw_scan_offload(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct rtw_vif *rtwvif = vif ? (struct rtw_vif *)vif->drv_priv : NULL;
+ struct rtw_ch_switch_option cs_option = {0};
+ struct rtw_chan_list chan_list = {0};
+ int ret = 0;
+
+ if (!rtwvif)
+ return -EINVAL;
+
+ cs_option.switch_en = enable;
+ cs_option.back_op_en = rtwvif->net_type == RTW_NET_MGD_LINKED;
+ if (enable) {
+ ret = rtw_hw_scan_prehandle(rtwdev, rtwvif, &chan_list);
+ if (ret)
+ goto out;
+ }
+ rtw_fw_set_scan_offload(rtwdev, &cs_option, rtwvif, &chan_list);
+out:
+ return ret;
+}
+
+void rtw_hw_scan_abort(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
+{
+ if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD))
+ return;
+
+ rtw_hw_scan_offload(rtwdev, vif, false);
+ rtw_hw_scan_complete(rtwdev, vif, true);
+}
+
+void rtw_hw_scan_status_report(struct rtw_dev *rtwdev, struct sk_buff *skb)
+{
+ struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
+ struct rtw_c2h_cmd *c2h;
+ bool aborted;
+ u8 rc;
+
+ if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
+ return;
+
+ c2h = get_c2h_from_skb(skb);
+ rc = GET_SCAN_REPORT_RETURN_CODE(c2h->payload);
+ aborted = rc != RTW_SCAN_REPORT_SUCCESS;
+ rtw_hw_scan_complete(rtwdev, vif, aborted);
+
+ if (aborted)
+ rtw_info(rtwdev, "HW scan aborted with code: %d\n", rc);
+}
+
+void rtw_store_op_chan(struct rtw_dev *rtwdev)
+{
+ struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ scan_info->op_chan = hal->current_channel;
+ scan_info->op_bw = hal->current_band_width;
+ scan_info->op_pri_ch_idx = hal->current_primary_channel_index;
+}
+
+static bool rtw_is_op_chan(struct rtw_dev *rtwdev, u8 channel)
+{
+ struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
+
+ return channel == scan_info->op_chan;
+}
+
+void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_c2h_cmd *c2h;
+ enum rtw_scan_notify_id id;
+ u8 chan, status;
+
+ c2h = get_c2h_from_skb(skb);
+ chan = GET_CHAN_SWITCH_CENTRAL_CH(c2h->payload);
+ id = GET_CHAN_SWITCH_ID(c2h->payload);
+ status = GET_CHAN_SWITCH_STATUS(c2h->payload);
+
+ if (id == RTW_SCAN_NOTIFY_ID_POSTSWITCH) {
+ if (rtw_is_op_chan(rtwdev, chan))
+ ieee80211_wake_queues(rtwdev->hw);
+ hal->current_channel = chan;
+ hal->current_band_type = chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
+ } else if (id == RTW_SCAN_NOTIFY_ID_PRESWITCH) {
+ if (IS_CH_5G_BAND(chan)) {
+ rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G);
+ } else if (IS_CH_2G_BAND(chan)) {
+ u8 chan_type;
+
+ if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
+ chan_type = COEX_SWITCH_TO_24G;
+ else
+ chan_type = COEX_SWITCH_TO_24G_NOFORSCAN;
+ rtw_coex_switchband_notify(rtwdev, chan_type);
+ }
+ if (rtw_is_op_chan(rtwdev, chan))
+ ieee80211_stop_queues(rtwdev->hw);
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_HW_SCAN,
+ "Chan switch: %x, id: %x, status: %x\n", chan, id, status);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 09c7afb99e63..654c3c2e5721 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -32,6 +32,17 @@
#define SCAN_NOTIFY_TIMEOUT msecs_to_jiffies(10)
+#define RTW_CHANNEL_TIME 45
+#define RTW_OFF_CHAN_TIME 100
+#define RTW_PASS_CHAN_TIME 105
+#define RTW_DFS_CHAN_TIME 20
+#define RTW_CH_INFO_SIZE 4
+#define RTW_EX_CH_INFO_SIZE 3
+#define RTW_EX_CH_INFO_HDR_SIZE 2
+#define RTW_SCAN_WIDTH 0
+#define RTW_PRI_CH_IDX 1
+#define RTW_PROBE_PG_CNT 2
+
enum rtw_c2h_cmd_id {
C2H_CCX_TX_RPT = 0x03,
C2H_BT_INFO = 0x09,
@@ -48,7 +59,9 @@ enum rtw_c2h_cmd_id {
};
enum rtw_c2h_cmd_id_ext {
+ C2H_SCAN_STATUS_RPT = 0x3,
C2H_CCX_RPT = 0x0f,
+ C2H_CHAN_SWITCH = 0x22,
};
struct rtw_c2h_cmd {
@@ -98,9 +111,11 @@ enum rtw_fw_feature {
FW_FEATURE_LPS_C2H = BIT(1),
FW_FEATURE_LCLK = BIT(2),
FW_FEATURE_PG = BIT(3),
+ FW_FEATURE_TX_WAKE = BIT(4),
FW_FEATURE_BCN_FILTER = BIT(5),
FW_FEATURE_NOTIFY_SCAN = BIT(6),
FW_FEATURE_ADAPTIVITY = BIT(7),
+ FW_FEATURE_SCAN_OFFLOAD = BIT(8),
FW_FEATURE_MAX = BIT(31),
};
@@ -196,6 +211,51 @@ struct rtw_fw_wow_disconnect_para {
u8 retry_count;
};
+enum rtw_channel_type {
+ RTW_CHANNEL_PASSIVE,
+ RTW_CHANNEL_ACTIVE,
+ RTW_CHANNEL_RADAR,
+};
+
+enum rtw_scan_extra_id {
+ RTW_SCAN_EXTRA_ID_DFS,
+};
+
+enum rtw_scan_extra_info {
+ RTW_SCAN_EXTRA_ACTION_SCAN,
+};
+
+enum rtw_scan_report_code {
+ RTW_SCAN_REPORT_SUCCESS = 0x00,
+ RTW_SCAN_REPORT_ERR_PHYDM = 0x01,
+ RTW_SCAN_REPORT_ERR_ID = 0x02,
+ RTW_SCAN_REPORT_ERR_TX = 0x03,
+ RTW_SCAN_REPORT_CANCELED = 0x10,
+ RTW_SCAN_REPORT_CANCELED_EXT = 0x11,
+ RTW_SCAN_REPORT_FW_DISABLED = 0xF0,
+};
+
+enum rtw_scan_notify_id {
+ RTW_SCAN_NOTIFY_ID_PRESWITCH = 0x00,
+ RTW_SCAN_NOTIFY_ID_POSTSWITCH = 0x01,
+ RTW_SCAN_NOTIFY_ID_PROBE_PRETX = 0x02,
+ RTW_SCAN_NOTIFY_ID_PROBE_ISSUETX = 0x03,
+ RTW_SCAN_NOTIFY_ID_NULL0_PRETX = 0x04,
+ RTW_SCAN_NOTIFY_ID_NULL0_ISSUETX = 0x05,
+ RTW_SCAN_NOTIFY_ID_NULL0_POSTTX = 0x06,
+ RTW_SCAN_NOTIFY_ID_NULL1_PRETX = 0x07,
+ RTW_SCAN_NOTIFY_ID_NULL1_ISSUETX = 0x08,
+ RTW_SCAN_NOTIFY_ID_NULL1_POSTTX = 0x09,
+ RTW_SCAN_NOTIFY_ID_DWELLEXT = 0x0A,
+};
+
+enum rtw_scan_notify_status {
+ RTW_SCAN_NOTIFY_STATUS_SUCCESS = 0x00,
+ RTW_SCAN_NOTIFY_STATUS_FAILURE = 0x01,
+ RTW_SCAN_NOTIFY_STATUS_RESOURCE = 0x02,
+ RTW_SCAN_NOTIFY_STATUS_TIMEOUT = 0x03,
+};
+
struct rtw_ch_switch_option {
u8 periodic_option;
u32 tsf_high;
@@ -209,6 +269,8 @@ struct rtw_ch_switch_option {
u8 slow_period;
u8 slow_period_sel;
u8 nlo_en;
+ bool switch_en;
+ bool back_op_en;
};
struct rtw_fw_hdr {
@@ -265,6 +327,11 @@ struct rtw_fw_hdr_legacy {
#define GET_CCX_REPORT_SEQNUM_V1(c2h_payload) (c2h_payload[8] & 0xfc)
#define GET_CCX_REPORT_STATUS_V1(c2h_payload) (c2h_payload[9] & 0xc0)
+#define GET_SCAN_REPORT_RETURN_CODE(c2h_payload) (c2h_payload[2] & 0xff)
+
+#define GET_CHAN_SWITCH_CENTRAL_CH(c2h_payload) (c2h_payload[2])
+#define GET_CHAN_SWITCH_ID(c2h_payload) (c2h_payload[3])
+#define GET_CHAN_SWITCH_STATUS(c2h_payload) (c2h_payload[4])
#define GET_RA_REPORT_RATE(c2h_payload) (c2h_payload[0] & 0x7f)
#define GET_RA_REPORT_SGI(c2h_payload) ((c2h_payload[0] & 0x80) >> 7)
#define GET_RA_REPORT_BW(c2h_payload) (c2h_payload[6])
@@ -284,6 +351,7 @@ struct rtw_fw_hdr_legacy {
#define H2C_PKT_CH_SWITCH 0x02
#define H2C_PKT_UPDATE_PKT 0x0C
+#define H2C_PKT_SCAN_OFFLOAD 0x19
#define H2C_PKT_CH_SWITCH_LEN 0x20
#define H2C_PKT_UPDATE_PKT_LEN 0x4
@@ -334,6 +402,30 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(23, 16))
#define CHSW_INFO_SET_ACTION_ID(pkt, value) \
le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(30, 24))
+#define CHSW_INFO_SET_EXTRA_INFO(pkt, value) \
+ le32p_replace_bits((__le32 *)(pkt) + 0x00, value, BIT(31))
+
+#define CH_INFO_SET_CH(pkt, value) \
+ u8p_replace_bits((u8 *)(pkt) + 0x00, value, GENMASK(7, 0))
+#define CH_INFO_SET_PRI_CH_IDX(pkt, value) \
+ u8p_replace_bits((u8 *)(pkt) + 0x01, value, GENMASK(3, 0))
+#define CH_INFO_SET_BW(pkt, value) \
+ u8p_replace_bits((u8 *)(pkt) + 0x01, value, GENMASK(7, 4))
+#define CH_INFO_SET_TIMEOUT(pkt, value) \
+ u8p_replace_bits((u8 *)(pkt) + 0x02, value, GENMASK(7, 0))
+#define CH_INFO_SET_ACTION_ID(pkt, value) \
+ u8p_replace_bits((u8 *)(pkt) + 0x03, value, GENMASK(6, 0))
+#define CH_INFO_SET_EXTRA_INFO(pkt, value) \
+ u8p_replace_bits((u8 *)(pkt) + 0x03, value, BIT(7))
+
+#define EXTRA_CH_INFO_SET_ID(pkt, value) \
+ u8p_replace_bits((u8 *)(pkt) + 0x04, value, GENMASK(6, 0))
+#define EXTRA_CH_INFO_SET_INFO(pkt, value) \
+ u8p_replace_bits((u8 *)(pkt) + 0x04, value, BIT(7))
+#define EXTRA_CH_INFO_SET_SIZE(pkt, value) \
+ u8p_replace_bits((u8 *)(pkt) + 0x05, value, GENMASK(7, 0))
+#define EXTRA_CH_INFO_SET_DFS_EXT_TIME(pkt, value) \
+ u8p_replace_bits((u8 *)(pkt) + 0x06, value, GENMASK(7, 0))
#define UPDATE_PKT_SET_SIZE(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(15, 0))
@@ -350,12 +442,18 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(2))
#define CH_SWITCH_SET_PERIODIC_OPT(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(4, 3))
+#define CH_SWITCH_SET_SCAN_MODE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(5))
+#define CH_SWITCH_SET_BACK_OP_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(6))
#define CH_SWITCH_SET_INFO_LOC(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(15, 8))
#define CH_SWITCH_SET_CH_NUM(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(23, 16))
#define CH_SWITCH_SET_PRI_CH_IDX(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(27, 24))
+#define CH_SWITCH_SET_DEST_BW(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(31, 28))
#define CH_SWITCH_SET_DEST_CH(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(7, 0))
#define CH_SWITCH_SET_NORMAL_PERIOD(h2c_pkt, value) \
@@ -375,6 +473,41 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define CH_SWITCH_SET_INFO_SIZE(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x06, value, GENMASK(15, 0))
+#define SCAN_OFFLOAD_SET_START(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(0))
+#define SCAN_OFFLOAD_SET_BACK_OP_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(1))
+#define SCAN_OFFLOAD_SET_RANDOM_SEQ_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(2))
+#define SCAN_OFFLOAD_SET_NO_CCK_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(3))
+#define SCAN_OFFLOAD_SET_VERBOSE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(4))
+#define SCAN_OFFLOAD_SET_CH_NUM(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(15, 8))
+#define SCAN_OFFLOAD_SET_CH_INFO_SIZE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(31, 16))
+#define SCAN_OFFLOAD_SET_CH_INFO_LOC(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(7, 0))
+#define SCAN_OFFLOAD_SET_OP_CH(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(15, 8))
+#define SCAN_OFFLOAD_SET_OP_PRI_CH_IDX(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(19, 16))
+#define SCAN_OFFLOAD_SET_OP_BW(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(23, 20))
+#define SCAN_OFFLOAD_SET_OP_PORT_ID(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(26, 24))
+#define SCAN_OFFLOAD_SET_OP_DWELL_TIME(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x04, value, GENMASK(15, 0))
+#define SCAN_OFFLOAD_SET_OP_GAP_TIME(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x04, value, GENMASK(31, 16))
+#define SCAN_OFFLOAD_SET_MODE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x05, value, GENMASK(3, 0))
+#define SCAN_OFFLOAD_SET_SSID_NUM(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x05, value, GENMASK(7, 4))
+#define SCAN_OFFLOAD_SET_PKT_LOC(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x05, value, GENMASK(15, 8))
+
/* Command H2C */
#define H2C_CMD_RSVD_PAGE 0x0
#define H2C_CMD_MEDIA_STATUS_RPT 0x01
@@ -686,4 +819,14 @@ int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
u32 *buffer);
void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start);
void rtw_fw_adaptivity(struct rtw_dev *rtwdev);
+void rtw_store_op_chan(struct rtw_dev *rtwdev);
+void rtw_hw_scan_start(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req);
+void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ bool aborted);
+int rtw_hw_scan_offload(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ bool enable);
+void rtw_hw_scan_status_report(struct rtw_dev *rtwdev, struct sk_buff *skb);
+void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb);
+void rtw_hw_scan_abort(struct rtw_dev *rtwdev, struct ieee80211_vif *vif);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 6f5629852416..ae7d97de5fdf 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -13,6 +13,7 @@
#include "bf.h"
#include "debug.h"
#include "wow.h"
+#include "sar.h"
static void rtw_ops_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
@@ -161,6 +162,7 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
rtwvif->stats.rx_unicast = 0;
rtwvif->stats.tx_cnt = 0;
rtwvif->stats.rx_cnt = 0;
+ rtwvif->scan_req = NULL;
memset(&rtwvif->bfee, 0, sizeof(struct rtw_bfee));
rtwvif->conf = &rtw_vif_port[port];
rtw_txq_init(rtwdev, vif->txq);
@@ -372,9 +374,15 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw_coex_media_status_notify(rtwdev, conf->assoc);
if (rtw_bf_support)
rtw_bf_assoc(rtwdev, vif, conf);
+ rtw_store_op_chan(rtwdev);
} else {
rtw_leave_lps(rtwdev);
rtw_bf_disassoc(rtwdev, vif, conf);
+ /* Abort ongoing scan if cancel_scan isn't issued
+ * when disconnected by peer
+ */
+ if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
+ rtw_hw_scan_abort(rtwdev, vif);
}
config |= PORT_SET_NET_TYPE;
@@ -594,22 +602,9 @@ static void rtw_ops_sw_scan_start(struct ieee80211_hw *hw,
{
struct rtw_dev *rtwdev = hw->priv;
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
- u32 config = 0;
mutex_lock(&rtwdev->mutex);
-
- rtw_leave_lps(rtwdev);
-
- ether_addr_copy(rtwvif->mac_addr, mac_addr);
- config |= PORT_SET_MAC_ADDR;
- rtw_vif_port_config(rtwdev, rtwvif, config);
-
- rtw_coex_scan_notify(rtwdev, COEX_SCAN_START);
- rtw_core_fw_scan_notify(rtwdev, true);
-
- set_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags);
- set_bit(RTW_FLAG_SCANNING, rtwdev->flags);
-
+ rtw_core_scan_start(rtwdev, rtwvif, mac_addr, false);
mutex_unlock(&rtwdev->mutex);
}
@@ -617,22 +612,9 @@ static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
- u32 config = 0;
mutex_lock(&rtwdev->mutex);
-
- clear_bit(RTW_FLAG_SCANNING, rtwdev->flags);
- clear_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags);
-
- rtw_core_fw_scan_notify(rtwdev, false);
-
- ether_addr_copy(rtwvif->mac_addr, vif->addr);
- config |= PORT_SET_MAC_ADDR;
- rtw_vif_port_config(rtwdev, rtwvif, config);
-
- rtw_coex_scan_notify(rtwdev, COEX_SCAN_FINISH);
-
+ rtw_core_scan_complete(rtwdev, vif);
mutex_unlock(&rtwdev->mutex);
}
@@ -815,6 +797,56 @@ static void rtw_reconfig_complete(struct ieee80211_hw *hw,
mutex_unlock(&rtwdev->mutex);
}
+static int rtw_ops_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ int ret;
+
+ if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD))
+ return 1;
+
+ if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
+ return -EBUSY;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw_hw_scan_start(rtwdev, vif, req);
+ ret = rtw_hw_scan_offload(rtwdev, vif, true);
+ if (ret) {
+ rtw_hw_scan_abort(rtwdev, vif);
+ rtw_err(rtwdev, "HW scan failed with status: %d\n", ret);
+ }
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret;
+}
+
+static void rtw_ops_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD))
+ return;
+
+ if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
+ return;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw_hw_scan_abort(rtwdev, vif);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static int rtw_ops_set_sar_specs(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ rtw_set_sar_specs(rtwdev, sar);
+
+ return 0;
+}
+
const struct ieee80211_ops rtw_ops = {
.tx = rtw_ops_tx,
.wake_tx_queue = rtw_ops_wake_tx_queue,
@@ -842,6 +874,9 @@ const struct ieee80211_ops rtw_ops = {
.set_antenna = rtw_ops_set_antenna,
.get_antenna = rtw_ops_get_antenna,
.reconfig_complete = rtw_reconfig_complete,
+ .hw_scan = rtw_ops_hw_scan,
+ .cancel_hw_scan = rtw_ops_cancel_hw_scan,
+ .set_sar_specs = rtw_ops_set_sar_specs,
#ifdef CONFIG_PM
.suspend = rtw_ops_suspend,
.resume = rtw_ops_resume,
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index a0d4d6e31fb4..38252113c4a8 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -17,6 +17,7 @@
#include "tx.h"
#include "debug.h"
#include "bf.h"
+#include "sar.h"
bool rtw_disable_lps_deep_mode;
EXPORT_SYMBOL(rtw_disable_lps_deep_mode);
@@ -637,6 +638,19 @@ static void rtw_txq_ba_work(struct work_struct *work)
rtw_iterate_stas_atomic(rtwdev, rtw_txq_ba_iter, &data);
}
+void rtw_set_rx_freq_band(struct rtw_rx_pkt_stat *pkt_stat, u8 channel)
+{
+ if (IS_CH_2G_BAND(channel))
+ pkt_stat->band = NL80211_BAND_2GHZ;
+ else if (IS_CH_5G_BAND(channel))
+ pkt_stat->band = NL80211_BAND_5GHZ;
+ else
+ return;
+
+ pkt_stat->freq = ieee80211_channel_to_frequency(channel, pkt_stat->band);
+}
+EXPORT_SYMBOL(rtw_set_rx_freq_band);
+
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
struct rtw_channel_params *chan_params)
{
@@ -735,8 +749,28 @@ void rtw_set_channel(struct rtw_dev *rtwdev)
hal->current_band_width = bandwidth;
hal->current_channel = center_chan;
+ hal->current_primary_channel_index = primary_chan_idx;
hal->current_band_type = center_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
+ switch (center_chan) {
+ case 1 ... 14:
+ hal->sar_band = RTW_SAR_BAND_0;
+ break;
+ case 36 ... 64:
+ hal->sar_band = RTW_SAR_BAND_1;
+ break;
+ case 100 ... 144:
+ hal->sar_band = RTW_SAR_BAND_3;
+ break;
+ case 149 ... 177:
+ hal->sar_band = RTW_SAR_BAND_4;
+ break;
+ default:
+ WARN(1, "unknown ch(%u) to SAR band\n", center_chan);
+ hal->sar_band = RTW_SAR_BAND_0;
+ break;
+ }
+
for (i = RTW_CHANNEL_WIDTH_20; i <= RTW_MAX_CHANNEL_WIDTH; i++)
hal->cch_by_bw[i] = ch_param.cch_by_bw[i];
@@ -1278,6 +1312,50 @@ void rtw_core_fw_scan_notify(struct rtw_dev *rtwdev, bool start)
}
}
+void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
+ const u8 *mac_addr, bool hw_scan)
+{
+ u32 config = 0;
+ int ret = 0;
+
+ rtw_leave_lps(rtwdev);
+
+ if (hw_scan && rtwvif->net_type == RTW_NET_NO_LINK) {
+ ret = rtw_leave_ips(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to leave idle state\n");
+ return;
+ }
+ }
+
+ ether_addr_copy(rtwvif->mac_addr, mac_addr);
+ config |= PORT_SET_MAC_ADDR;
+ rtw_vif_port_config(rtwdev, rtwvif, config);
+
+ rtw_coex_scan_notify(rtwdev, COEX_SCAN_START);
+ rtw_core_fw_scan_notify(rtwdev, true);
+
+ set_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags);
+ set_bit(RTW_FLAG_SCANNING, rtwdev->flags);
+}
+
+void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
+{
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ u32 config = 0;
+
+ clear_bit(RTW_FLAG_SCANNING, rtwdev->flags);
+ clear_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags);
+
+ rtw_core_fw_scan_notify(rtwdev, false);
+
+ ether_addr_copy(rtwvif->mac_addr, vif->addr);
+ config |= PORT_SET_MAC_ADDR;
+ rtw_vif_port_config(rtwdev, rtwvif, config);
+
+ rtw_coex_scan_notify(rtwdev, COEX_SCAN_FINISH);
+}
+
int rtw_core_start(struct rtw_dev *rtwdev)
{
int ret;
@@ -1862,13 +1940,14 @@ int rtw_core_init(struct rtw_dev *rtwdev)
rtwdev->sec.total_cam_num = 32;
rtwdev->hal.current_channel = 1;
+ rtwdev->dm_info.fix_rate = U8_MAX;
set_bit(RTW_BC_MC_MACID, rtwdev->mac_id_map);
rtw_stats_init(rtwdev);
/* default rx filter setting */
rtwdev->hal.rcr = BIT_APP_FCS | BIT_APP_MIC | BIT_APP_ICV |
- BIT_HTC_LOC_CTRL | BIT_APP_PHYSTS |
+ BIT_PKTCTL_DLEN | BIT_HTC_LOC_CTRL | BIT_APP_PHYSTS |
BIT_AB | BIT_AM | BIT_APM;
ret = rtw_load_firmware(rtwdev, RTW_NORMAL_FW);
@@ -1951,6 +2030,7 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, TX_AMSDU);
+ ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
@@ -1963,8 +2043,12 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+ hw->wiphy->max_scan_ssids = RTW_SCAN_MAX_SSIDS;
+ hw->wiphy->max_scan_ie_len = RTW_SCAN_MAX_IE_LEN;
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SCAN_RANDOM_SN);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL);
#ifdef CONFIG_PM
hw->wiphy->wowlan = rtwdev->chip->wowlan_stub;
@@ -1973,6 +2057,8 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
rtw_set_supported_band(hw, rtwdev->chip);
SET_IEEE80211_PERM_ADDR(hw, rtwdev->efuse.addr);
+ hw->wiphy->sar_capa = &rtw_sar_capa;
+
ret = rtw_regd_init(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to init regd\n");
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index bbdd535b64e7..dc1cd9bd4b8a 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -22,6 +22,9 @@
#define RTW_MAX_SEC_CAM_NUM 32
#define MAX_PG_CAM_BACKUP_NUM 8
+#define RTW_SCAN_MAX_SSIDS 4
+#define RTW_SCAN_MAX_IE_LEN 128
+
#define RTW_MAX_PATTERN_NUM 12
#define RTW_MAX_PATTERN_MASK_SIZE 16
#define RTW_MAX_PATTERN_SIZE 128
@@ -81,11 +84,9 @@ struct rtw_hci {
IS_CH_5G_BAND_3(channel) || IS_CH_5G_BAND_4(channel))
enum rtw_supported_band {
- RTW_BAND_2G = 1 << 0,
- RTW_BAND_5G = 1 << 1,
- RTW_BAND_60G = 1 << 2,
-
- RTW_BAND_MAX,
+ RTW_BAND_2G = BIT(NL80211_BAND_2GHZ),
+ RTW_BAND_5G = BIT(NL80211_BAND_5GHZ),
+ RTW_BAND_60G = BIT(NL80211_BAND_60GHZ),
};
/* now, support upto 80M bw */
@@ -364,6 +365,7 @@ enum rtw_flags {
RTW_FLAG_WOWLAN,
RTW_FLAG_RESTARTING,
RTW_FLAG_RESTART_TRIGGERING,
+ RTW_FLAG_FORCE_LOWEST_RATE,
NUM_OF_RTW_FLAGS,
};
@@ -629,6 +631,8 @@ struct rtw_rx_pkt_stat {
s8 rx_snr[RTW_RF_PATH_MAX];
u8 rx_evm[RTW_RF_PATH_MAX];
s8 cfo_tail[RTW_RF_PATH_MAX];
+ u16 freq;
+ u8 band;
struct rtw_sta_info *si;
struct ieee80211_vif *vif;
@@ -799,6 +803,8 @@ struct rtw_vif {
struct list_head rsvd_page_list;
struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS];
const struct rtw_vif_port *conf;
+ struct cfg80211_scan_request *scan_req;
+ struct ieee80211_scan_ies *scan_ies;
struct rtw_traffic_stats stats;
@@ -1630,6 +1636,7 @@ struct rtw_dm_info {
u8 cck_gi_u_bnd;
u8 cck_gi_l_bnd;
+ u8 fix_rate;
u8 tx_rate;
u32 rrsr_val_init;
u32 rrsr_mask_min;
@@ -1806,6 +1813,33 @@ struct rtw_fw_state {
u32 feature;
};
+enum rtw_sar_sources {
+ RTW_SAR_SOURCE_NONE,
+ RTW_SAR_SOURCE_COMMON,
+};
+
+enum rtw_sar_bands {
+ RTW_SAR_BAND_0,
+ RTW_SAR_BAND_1,
+ /* RTW_SAR_BAND_2, not used now */
+ RTW_SAR_BAND_3,
+ RTW_SAR_BAND_4,
+
+ RTW_SAR_BAND_NR,
+};
+
+/* the union is reserved for other knids of SAR sources
+ * which might not re-use same format with array common.
+ */
+union rtw_sar_cfg {
+ s8 common[RTW_SAR_BAND_NR];
+};
+
+struct rtw_sar {
+ enum rtw_sar_sources src;
+ union rtw_sar_cfg cfg[RTW_RF_PATH_MAX][RTW_RATE_SECTION_MAX];
+};
+
struct rtw_hal {
u32 rcr;
@@ -1817,6 +1851,7 @@ struct rtw_hal {
u8 ps_mode;
u8 current_channel;
+ u8 current_primary_channel_index;
u8 current_band_width;
u8 current_band_type;
@@ -1853,6 +1888,9 @@ struct rtw_hal {
[RTW_MAX_CHANNEL_NUM_5G];
s8 tx_pwr_tbl[RTW_RF_PATH_MAX]
[DESC_RATE_MAX];
+
+ enum rtw_sar_bands sar_band;
+ struct rtw_sar sar;
};
struct rtw_path_div {
@@ -1863,12 +1901,37 @@ struct rtw_path_div {
u16 path_b_cnt;
};
+struct rtw_chan_info {
+ int pri_ch_idx;
+ int action_id;
+ int bw;
+ u8 extra_info;
+ u8 channel;
+ u16 timeout;
+};
+
+struct rtw_chan_list {
+ u32 buf_size;
+ u32 ch_num;
+ u32 size;
+ u16 addr;
+};
+
+struct rtw_hw_scan_info {
+ struct ieee80211_vif *scanning_vif;
+ u8 probe_pg_size;
+ u8 op_pri_ch_idx;
+ u8 op_chan;
+ u8 op_bw;
+};
+
struct rtw_dev {
struct ieee80211_hw *hw;
struct device *dev;
struct rtw_hci hci;
+ struct rtw_hw_scan_info scan_info;
struct rtw_chip_info *chip;
struct rtw_hal hal;
struct rtw_fifo_conf fifo;
@@ -2021,6 +2084,7 @@ static inline int rtw_chip_dump_fw_crash(struct rtw_dev *rtwdev)
return 0;
}
+void rtw_set_rx_freq_band(struct rtw_rx_pkt_stat *pkt_stat, u8 channel);
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
struct rtw_channel_params *ch_param);
bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target);
@@ -2035,6 +2099,9 @@ void rtw_vif_port_config(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
u32 config);
void rtw_tx_report_purge_timer(struct timer_list *t);
void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
+void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
+ const u8 *mac_addr, bool hw_scan);
+void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif);
int rtw_core_start(struct rtw_dev *rtwdev);
void rtw_core_stop(struct rtw_dev *rtwdev);
int rtw_chip_info_setup(struct rtw_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index a7a6ebfaa203..a0991d3f15c0 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -2,7 +2,6 @@
/* Copyright(c) 2018-2019 Realtek Corporation
*/
-#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "main.h"
@@ -612,6 +611,9 @@ static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
bool tx_empty = true;
u8 queue;
+ if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
+ goto enter_deep_ps;
+
lockdep_assert_held(&rtwpci->irq_lock);
/* Deep PS state is not allowed to TX-DMA */
@@ -637,7 +639,7 @@ static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
"TX path not empty, cannot enter deep power save state\n");
return;
}
-
+enter_deep_ps:
set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
rtw_power_mode_change(rtwdev, true);
}
@@ -808,7 +810,8 @@ static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue)
bd_idx = rtw_pci_tx_queue_idx_addr[queue];
spin_lock_bh(&rtwpci->irq_lock);
- rtw_pci_deep_ps_leave(rtwdev);
+ if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
+ rtw_pci_deep_ps_leave(rtwdev);
rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK);
spin_unlock_bh(&rtwpci->irq_lock);
}
@@ -1409,7 +1412,11 @@ static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
* throughput. This is probably because the ASPM behavior slightly
* varies from different SOC.
*/
- if (rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
+ if (!(rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1))
+ return;
+
+ if ((enter && atomic_dec_if_positive(&rtwpci->link_usage) == 0) ||
+ (!enter && atomic_inc_return(&rtwpci->link_usage) == 1))
rtw_pci_aspm_set(rtwdev, enter);
}
@@ -1658,6 +1665,9 @@ static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)
priv);
int work_done = 0;
+ if (rtwpci->rx_no_aspm)
+ rtw_pci_link_ps(rtwdev, false);
+
while (work_done < budget) {
u32 work_done_once;
@@ -1681,6 +1691,8 @@ static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)
if (rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci))
napi_schedule(napi);
}
+ if (rtwpci->rx_no_aspm)
+ rtw_pci_link_ps(rtwdev, true);
return work_done;
}
@@ -1702,50 +1714,13 @@ static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
netif_napi_del(&rtwpci->napi);
}
-enum rtw88_quirk_dis_pci_caps {
- QUIRK_DIS_PCI_CAP_MSI,
- QUIRK_DIS_PCI_CAP_ASPM,
-};
-
-static int disable_pci_caps(const struct dmi_system_id *dmi)
-{
- uintptr_t dis_caps = (uintptr_t)dmi->driver_data;
-
- if (dis_caps & BIT(QUIRK_DIS_PCI_CAP_MSI))
- rtw_disable_msi = true;
- if (dis_caps & BIT(QUIRK_DIS_PCI_CAP_ASPM))
- rtw_pci_disable_aspm = true;
-
- return 1;
-}
-
-static const struct dmi_system_id rtw88_pci_quirks[] = {
- {
- .callback = disable_pci_caps,
- .ident = "Protempo Ltd L116HTN6SPW",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Protempo Ltd"),
- DMI_MATCH(DMI_PRODUCT_NAME, "L116HTN6SPW"),
- },
- .driver_data = (void *)BIT(QUIRK_DIS_PCI_CAP_ASPM),
- },
- {
- .callback = disable_pci_caps,
- .ident = "HP HP Pavilion Laptop 14-ce0xxx",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Laptop 14-ce0xxx"),
- },
- .driver_data = (void *)BIT(QUIRK_DIS_PCI_CAP_ASPM),
- },
- {}
-};
-
int rtw_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
+ struct pci_dev *bridge = pci_upstream_bridge(pdev);
struct ieee80211_hw *hw;
struct rtw_dev *rtwdev;
+ struct rtw_pci *rtwpci;
int drv_data_size;
int ret;
@@ -1763,6 +1738,9 @@ int rtw_pci_probe(struct pci_dev *pdev,
rtwdev->hci.ops = &rtw_pci_ops;
rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
+ rtwpci = (struct rtw_pci *)rtwdev->priv;
+ atomic_set(&rtwpci->link_usage, 1);
+
ret = rtw_core_init(rtwdev);
if (ret)
goto err_release_hw;
@@ -1791,7 +1769,10 @@ int rtw_pci_probe(struct pci_dev *pdev,
goto err_destroy_pci;
}
- dmi_check_system(rtw88_pci_quirks);
+ /* Disable PCIe ASPM L1 while doing NAPI poll for 8821CE */
+ if (pdev->device == 0xc821 && bridge->vendor == PCI_VENDOR_ID_INTEL)
+ rtwpci->rx_no_aspm = true;
+
rtw_pci_phy_cfg(rtwdev);
ret = rtw_register_hw(rtwdev, hw);
diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
index 66f78eb7757c..0c37efd8c66f 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.h
+++ b/drivers/net/wireless/realtek/rtw88/pci.h
@@ -223,6 +223,8 @@ struct rtw_pci {
struct rtw_pci_tx_ring tx_rings[RTK_MAX_TX_QUEUE_NUM];
struct rtw_pci_rx_ring rx_rings[RTK_MAX_RX_QUEUE_NUM];
u16 link_ctrl;
+ atomic_t link_usage;
+ bool rx_no_aspm;
DECLARE_BITMAP(flags, NUM_OF_RTW_PCI_FLAGS);
void __iomem *mmap;
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index bfddfcbe63f5..e505d17f107e 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -10,6 +10,7 @@
#include "phy.h"
#include "debug.h"
#include "regd.h"
+#include "sar.h"
struct phy_cfg_pair {
u32 addr;
@@ -2004,6 +2005,25 @@ static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev,
return tx_power;
}
+/* return RTW_RATE_SECTION_MAX to indicate rate is invalid */
+static u8 rtw_phy_rate_to_rate_section(u8 rate)
+{
+ if (rate >= DESC_RATE1M && rate <= DESC_RATE11M)
+ return RTW_RATE_SECTION_CCK;
+ else if (rate >= DESC_RATE6M && rate <= DESC_RATE54M)
+ return RTW_RATE_SECTION_OFDM;
+ else if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS7)
+ return RTW_RATE_SECTION_HT_1S;
+ else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15)
+ return RTW_RATE_SECTION_HT_2S;
+ else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9)
+ return RTW_RATE_SECTION_VHT_1S;
+ else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9)
+ return RTW_RATE_SECTION_VHT_2S;
+ else
+ return RTW_RATE_SECTION_MAX;
+}
+
static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
enum rtw_bandwidth bw, u8 rf_path,
u8 rate, u8 channel, u8 regd)
@@ -2011,7 +2031,7 @@ static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
struct rtw_hal *hal = &rtwdev->hal;
u8 *cch_by_bw = hal->cch_by_bw;
s8 power_limit = (s8)rtwdev->chip->max_power_index;
- u8 rs;
+ u8 rs = rtw_phy_rate_to_rate_section(rate);
int ch_idx;
u8 cur_bw, cur_ch;
s8 cur_lmt;
@@ -2019,19 +2039,7 @@ static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
if (regd > RTW_REGD_WW)
return power_limit;
- if (rate >= DESC_RATE1M && rate <= DESC_RATE11M)
- rs = RTW_RATE_SECTION_CCK;
- else if (rate >= DESC_RATE6M && rate <= DESC_RATE54M)
- rs = RTW_RATE_SECTION_OFDM;
- else if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS7)
- rs = RTW_RATE_SECTION_HT_1S;
- else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15)
- rs = RTW_RATE_SECTION_HT_2S;
- else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9)
- rs = RTW_RATE_SECTION_VHT_1S;
- else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9)
- rs = RTW_RATE_SECTION_VHT_2S;
- else
+ if (rs == RTW_RATE_SECTION_MAX)
goto err;
/* only 20M BW with cck and ofdm */
@@ -2065,6 +2073,27 @@ err:
return (s8)rtwdev->chip->max_power_index;
}
+static s8 rtw_phy_get_tx_power_sar(struct rtw_dev *rtwdev, u8 sar_band,
+ u8 rf_path, u8 rate)
+{
+ u8 rs = rtw_phy_rate_to_rate_section(rate);
+ struct rtw_sar_arg arg = {
+ .sar_band = sar_band,
+ .path = rf_path,
+ .rs = rs,
+ };
+
+ if (rs == RTW_RATE_SECTION_MAX)
+ goto err;
+
+ return rtw_query_sar(rtwdev, &arg);
+
+err:
+ WARN(1, "invalid arguments, sar_band=%d, path=%d, rate=%d\n",
+ sar_band, rf_path, rate);
+ return (s8)rtwdev->chip->max_power_index;
+}
+
void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw,
u8 ch, u8 regd, struct rtw_power_params *pwr_param)
{
@@ -2076,6 +2105,7 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw,
s8 *offset = &pwr_param->pwr_offset;
s8 *limit = &pwr_param->pwr_limit;
s8 *remnant = &pwr_param->pwr_remnant;
+ s8 *sar = &pwr_param->pwr_sar;
pwr_idx = &rtwdev->efuse.txpwr_idx_table[path];
group = rtw_get_channel_group(ch, rate);
@@ -2099,6 +2129,7 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw,
rate, ch, regd);
*remnant = (rate <= DESC_RATE11M ? dm_info->txagc_remnant_cck :
dm_info->txagc_remnant_ofdm);
+ *sar = rtw_phy_get_tx_power_sar(rtwdev, hal->sar_band, path, rate);
}
u8
@@ -2113,7 +2144,9 @@ rtw_phy_get_tx_power_index(struct rtw_dev *rtwdev, u8 rf_path, u8 rate,
channel, regd, &pwr_param);
tx_power = pwr_param.pwr_base;
- offset = min_t(s8, pwr_param.pwr_offset, pwr_param.pwr_limit);
+ offset = min3(pwr_param.pwr_offset,
+ pwr_param.pwr_limit,
+ pwr_param.pwr_sar);
if (rtwdev->chip->en_dis_dpd)
offset += rtw_phy_get_dis_dpd_by_rate_diff(rtwdev, rate);
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
index 02d1ec47ffb1..b6c5ae60a462 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.h
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -148,6 +148,7 @@ struct rtw_power_params {
s8 pwr_offset;
s8 pwr_limit;
s8 pwr_remnant;
+ s8 pwr_sar;
};
void
diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c
index 3f0ac33156d6..bfa64c038f5f 100644
--- a/drivers/net/wireless/realtek/rtw88/ps.c
+++ b/drivers/net/wireless/realtek/rtw88/ps.c
@@ -83,6 +83,9 @@ void rtw_power_mode_change(struct rtw_dev *rtwdev, bool enter)
/* Each request require an ack from firmware */
request |= POWER_MODE_ACK;
+ if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
+ request |= POWER_TX_WAKE;
+
rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, request);
/* Check firmware get the power requset and ack via cpwm register */
diff --git a/drivers/net/wireless/realtek/rtw88/ps.h b/drivers/net/wireless/realtek/rtw88/ps.h
index 7819391c8663..c194386f6db5 100644
--- a/drivers/net/wireless/realtek/rtw88/ps.h
+++ b/drivers/net/wireless/realtek/rtw88/ps.h
@@ -9,6 +9,7 @@
#define POWER_MODE_ACK BIT(6)
#define POWER_MODE_PG BIT(4)
+#define POWER_TX_WAKE BIT(1)
#define POWER_MODE_LCLK BIT(0)
#define LEAVE_LPS_TRY_CNT 5
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index 80a6f4da6acd..db078df63f85 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -223,7 +223,8 @@ static int rtw8821c_mac_init(struct rtw_dev *rtwdev)
rtw_write8(rtwdev, REG_TCR + 1, WLAN_TX_FUNC_CFG1);
rtw_write8(rtwdev, REG_ACKTO_CCK, 0x40);
rtw_write8_set(rtwdev, REG_WMAC_TRXPTCL_CTL_H, BIT(1));
- rtw_write8_set(rtwdev, REG_SND_PTCL_CTRL, BIT(6));
+ rtw_write8_set(rtwdev, REG_SND_PTCL_CTRL,
+ BIT_DIS_CHK_VHTSIGB_CRC);
rtw_write32(rtwdev, REG_WMAC_OPTION_FUNCTION + 8, WLAN_MAC_OPT_FUNC2);
rtw_write8(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, WLAN_MAC_OPT_NORM_FUNC1);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h
index 112faa60f653..d9fbddd7b0f3 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.h
@@ -131,7 +131,7 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
#define WLAN_TX_FUNC_CFG2 0x30
#define WLAN_MAC_OPT_NORM_FUNC1 0x98
#define WLAN_MAC_OPT_LB_FUNC1 0x80
-#define WLAN_MAC_OPT_FUNC2 0x30810041
+#define WLAN_MAC_OPT_FUNC2 0xb0810041
#define WLAN_SIFS_CFG (WLAN_SIFS_CCK_CONT_TX | \
(WLAN_SIFS_OFDM_CONT_TX << BIT_SHIFT_SIFS_OFDM_CTX) | \
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index c409c8c29ec8..dd4fbb82750d 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -205,7 +205,7 @@ static void rtw8822b_phy_set_param(struct rtw_dev *rtwdev)
#define WLAN_TX_FUNC_CFG2 0x30
#define WLAN_MAC_OPT_NORM_FUNC1 0x98
#define WLAN_MAC_OPT_LB_FUNC1 0x80
-#define WLAN_MAC_OPT_FUNC2 0x30810041
+#define WLAN_MAC_OPT_FUNC2 0xb0810041
#define WLAN_SIFS_CFG (WLAN_SIFS_CCK_CONT_TX | \
(WLAN_SIFS_OFDM_CONT_TX << BIT_SHIFT_SIFS_OFDM_CTX) | \
@@ -262,6 +262,8 @@ static int rtw8822b_mac_init(struct rtw_dev *rtwdev)
rtw_write8(rtwdev, REG_TCR + 1, WLAN_TX_FUNC_CFG1);
rtw_write32(rtwdev, REG_WMAC_OPTION_FUNCTION + 8, WLAN_MAC_OPT_FUNC2);
rtw_write8(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, WLAN_MAC_OPT_NORM_FUNC1);
+ rtw_write8_set(rtwdev, REG_SND_PTCL_CTRL,
+ BIT_DIS_CHK_VHTSIGB_CRC);
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index 46b881e8e4fe..35c46e5209de 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -1962,7 +1962,7 @@ static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
#define WLAN_TX_FUNC_CFG2 0x30
#define WLAN_MAC_OPT_NORM_FUNC1 0x98
#define WLAN_MAC_OPT_LB_FUNC1 0x80
-#define WLAN_MAC_OPT_FUNC2 0x30810041
+#define WLAN_MAC_OPT_FUNC2 0xb0810041
#define WLAN_MAC_INT_MIG_CFG 0x33330000
#define WLAN_SIFS_CFG (WLAN_SIFS_CCK_CONT_TX | \
@@ -2102,6 +2102,8 @@ static int rtw8822c_mac_init(struct rtw_dev *rtwdev)
BIT_RXPSF_CONT_ERRCHKEN);
value16 = BIT_SET_RXPSF_ERRTHR(value16, 0x07);
rtw_write16(rtwdev, REG_RXPSF_CTRL, value16);
+ rtw_write8_set(rtwdev, REG_SND_PTCL_CTRL,
+ BIT_DIS_CHK_VHTSIGB_CRC);
/* Interrupt migration configuration */
rtw_write32(rtwdev, REG_INT_MIG, WLAN_MAC_INT_MIG_CFG);
@@ -2533,6 +2535,7 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
s8 rx_power[RTW_RF_PATH_MAX];
s8 min_rx_power = -120;
u8 rssi;
+ u8 channel;
int path;
rx_power[RF_PATH_A] = GET_PHY_STAT_P0_PWDB_A(phy_status);
@@ -2553,6 +2556,11 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
rx_power[RF_PATH_A] -= 110;
rx_power[RF_PATH_B] -= 110;
+ channel = GET_PHY_STAT_P0_CHANNEL(phy_status);
+ if (channel == 0)
+ channel = rtwdev->hal.current_channel;
+ rtw_set_rx_freq_band(pkt_stat, channel);
+
pkt_stat->rx_power[RF_PATH_A] = rx_power[RF_PATH_A];
pkt_stat->rx_power[RF_PATH_B] = rx_power[RF_PATH_B];
@@ -2578,6 +2586,7 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
u8 evm_dbm = 0;
u8 rssi;
int path;
+ u8 channel;
if (pkt_stat->rate > DESC_RATE11M && pkt_stat->rate < DESC_RATEMCS0)
rxsc = GET_PHY_STAT_P1_L_RXSC(phy_status);
@@ -2591,6 +2600,9 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
else
bw = RTW_CHANNEL_WIDTH_20;
+ channel = GET_PHY_STAT_P1_CHANNEL(phy_status);
+ rtw_set_rx_freq_band(pkt_stat, channel);
+
pkt_stat->rx_power[RF_PATH_A] = GET_PHY_STAT_P1_PWDB_A(phy_status) - 110;
pkt_stat->rx_power[RF_PATH_B] = GET_PHY_STAT_P1_PWDB_B(phy_status) - 110;
pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 2);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
index 3df627419d81..8201955e1f21 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -137,6 +137,8 @@ const struct rtw_table name ## _tbl = { \
le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(7, 0))
#define GET_PHY_STAT_P0_GAIN_A(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(21, 16))
+#define GET_PHY_STAT_P0_CHANNEL(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(23, 16))
#define GET_PHY_STAT_P0_GAIN_B(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(29, 24))
@@ -149,6 +151,8 @@ const struct rtw_table name ## _tbl = { \
le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(11, 8))
#define GET_PHY_STAT_P1_HT_RXSC(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(15, 12))
+#define GET_PHY_STAT_P1_CHANNEL(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(23, 16))
#define GET_PHY_STAT_P1_RXEVM_A(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(7, 0))
#define GET_PHY_STAT_P1_RXEVM_B(phy_stat) \
diff --git a/drivers/net/wireless/realtek/rtw88/rx.c b/drivers/net/wireless/realtek/rtw88/rx.c
index 7087e385a9b3..d2d607e22198 100644
--- a/drivers/net/wireless/realtek/rtw88/rx.c
+++ b/drivers/net/wireless/realtek/rtw88/rx.c
@@ -6,6 +6,7 @@
#include "rx.h"
#include "ps.h"
#include "debug.h"
+#include "fw.h"
void rtw_rx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct sk_buff *skb)
@@ -138,6 +139,13 @@ static void rtw_rx_addr_match(struct rtw_dev *rtwdev,
rtw_iterate_vifs_atomic(rtwdev, rtw_rx_addr_match_iter, &data);
}
+static void rtw_set_rx_freq_by_pktstat(struct rtw_rx_pkt_stat *pkt_stat,
+ struct ieee80211_rx_status *rx_status)
+{
+ rx_status->freq = pkt_stat->freq;
+ rx_status->band = pkt_stat->band;
+}
+
void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev,
struct rtw_rx_pkt_stat *pkt_stat,
struct ieee80211_hdr *hdr,
@@ -150,6 +158,8 @@ void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev,
memset(rx_status, 0, sizeof(*rx_status));
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
+ if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD))
+ rtw_set_rx_freq_by_pktstat(pkt_stat, rx_status);
if (pkt_stat->crc_err)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
if (pkt_stat->decrypted)
diff --git a/drivers/net/wireless/realtek/rtw88/sar.c b/drivers/net/wireless/realtek/rtw88/sar.c
new file mode 100644
index 000000000000..3383726c4d90
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/sar.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2021 Realtek Corporation
+ */
+
+#include "sar.h"
+#include "phy.h"
+#include "debug.h"
+
+s8 rtw_query_sar(struct rtw_dev *rtwdev, const struct rtw_sar_arg *arg)
+{
+ const struct rtw_hal *hal = &rtwdev->hal;
+ const struct rtw_sar *sar = &hal->sar;
+
+ switch (sar->src) {
+ default:
+ rtw_warn(rtwdev, "unknown SAR source: %d\n", sar->src);
+ fallthrough;
+ case RTW_SAR_SOURCE_NONE:
+ return (s8)rtwdev->chip->max_power_index;
+ case RTW_SAR_SOURCE_COMMON:
+ return sar->cfg[arg->path][arg->rs].common[arg->sar_band];
+ }
+}
+
+static int rtw_apply_sar(struct rtw_dev *rtwdev, const struct rtw_sar *new)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_sar *sar = &hal->sar;
+
+ if (sar->src != RTW_SAR_SOURCE_NONE && new->src != sar->src) {
+ rtw_warn(rtwdev, "SAR source: %d is in use\n", sar->src);
+ return -EBUSY;
+ }
+
+ *sar = *new;
+ rtw_phy_set_tx_power_level(rtwdev, hal->current_channel);
+
+ return 0;
+}
+
+static s8 rtw_sar_to_phy(struct rtw_dev *rtwdev, u8 fct, s32 sar,
+ const struct rtw_sar_arg *arg)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 txgi = rtwdev->chip->txgi_factor;
+ u8 max = rtwdev->chip->max_power_index;
+ s32 tmp;
+ s8 base;
+
+ tmp = fct > txgi ? sar >> (fct - txgi) : sar << (txgi - fct);
+ base = arg->sar_band == RTW_SAR_BAND_0 ?
+ hal->tx_pwr_by_rate_base_2g[arg->path][arg->rs] :
+ hal->tx_pwr_by_rate_base_5g[arg->path][arg->rs];
+
+ return (s8)clamp_t(s32, tmp, -max - 1, max) - base;
+}
+
+static const struct cfg80211_sar_freq_ranges rtw_common_sar_freq_ranges[] = {
+ [RTW_SAR_BAND_0] = { .start_freq = 2412, .end_freq = 2484, },
+ [RTW_SAR_BAND_1] = { .start_freq = 5180, .end_freq = 5320, },
+ [RTW_SAR_BAND_3] = { .start_freq = 5500, .end_freq = 5720, },
+ [RTW_SAR_BAND_4] = { .start_freq = 5745, .end_freq = 5825, },
+};
+
+static_assert(ARRAY_SIZE(rtw_common_sar_freq_ranges) == RTW_SAR_BAND_NR);
+
+const struct cfg80211_sar_capa rtw_sar_capa = {
+ .type = NL80211_SAR_TYPE_POWER,
+ .num_freq_ranges = RTW_SAR_BAND_NR,
+ .freq_ranges = rtw_common_sar_freq_ranges,
+};
+
+int rtw_set_sar_specs(struct rtw_dev *rtwdev,
+ const struct cfg80211_sar_specs *sar)
+{
+ struct rtw_sar_arg arg = {0};
+ struct rtw_sar new = {0};
+ u32 idx, i, j, k;
+ s32 power;
+ s8 val;
+
+ if (sar->type != NL80211_SAR_TYPE_POWER)
+ return -EINVAL;
+
+ memset(&new, rtwdev->chip->max_power_index, sizeof(new));
+ new.src = RTW_SAR_SOURCE_COMMON;
+
+ for (i = 0; i < sar->num_sub_specs; i++) {
+ idx = sar->sub_specs[i].freq_range_index;
+ if (idx >= RTW_SAR_BAND_NR)
+ return -EINVAL;
+
+ power = sar->sub_specs[i].power;
+ rtw_info(rtwdev, "On freq %u to %u, set SAR %d in 1/%lu dBm\n",
+ rtw_common_sar_freq_ranges[idx].start_freq,
+ rtw_common_sar_freq_ranges[idx].end_freq,
+ power, BIT(RTW_COMMON_SAR_FCT));
+
+ for (j = 0; j < RTW_RF_PATH_MAX; j++) {
+ for (k = 0; k < RTW_RATE_SECTION_MAX; k++) {
+ arg = (struct rtw_sar_arg){
+ .sar_band = idx,
+ .path = j,
+ .rs = k,
+ };
+ val = rtw_sar_to_phy(rtwdev, RTW_COMMON_SAR_FCT,
+ power, &arg);
+ new.cfg[j][k].common[idx] = val;
+ }
+ }
+ }
+
+ return rtw_apply_sar(rtwdev, &new);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/sar.h b/drivers/net/wireless/realtek/rtw88/sar.h
new file mode 100644
index 000000000000..e01e7bb790b7
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/sar.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2021 Realtek Corporation
+ */
+
+#include "main.h"
+
+/* NL80211_SAR_TYPE_POWER means unit is in 0.25 dBm,
+ * where 0.25 = 1/4 = 2^(-2), so make factor 2.
+ */
+#define RTW_COMMON_SAR_FCT 2
+
+struct rtw_sar_arg {
+ u8 sar_band;
+ u8 path;
+ u8 rs;
+};
+
+extern const struct cfg80211_sar_capa rtw_sar_capa;
+
+s8 rtw_query_sar(struct rtw_dev *rtwdev, const struct rtw_sar_arg *arg);
+int rtw_set_sar_specs(struct rtw_dev *rtwdev,
+ const struct cfg80211_sar_specs *sar);
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index 3a101aa139ed..efcc1b0371a8 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -233,17 +233,34 @@ void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb, int src)
spin_unlock_irqrestore(&tx_report->q_lock, flags);
}
+static u8 rtw_get_mgmt_rate(struct rtw_dev *rtwdev, struct sk_buff *skb,
+ u8 lowest_rate, bool ignore_rate)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = tx_info->control.vif;
+ bool force_lowest = test_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags);
+
+ if (!vif || !vif->bss_conf.basic_rates || ignore_rate || force_lowest)
+ return lowest_rate;
+
+ return __ffs(vif->bss_conf.basic_rates) + lowest_rate;
+}
+
static void rtw_tx_pkt_info_update_rate(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ bool ignore_rate)
{
if (rtwdev->hal.current_band_type == RTW_BAND_2G) {
pkt_info->rate_id = RTW_RATEID_B_20M;
- pkt_info->rate = DESC_RATE1M;
+ pkt_info->rate = rtw_get_mgmt_rate(rtwdev, skb, DESC_RATE1M,
+ ignore_rate);
} else {
pkt_info->rate_id = RTW_RATEID_G;
- pkt_info->rate = DESC_RATE6M;
+ pkt_info->rate = rtw_get_mgmt_rate(rtwdev, skb, DESC_RATE6M,
+ ignore_rate);
}
+
pkt_info->use_rate = true;
pkt_info->dis_rate_fallback = true;
}
@@ -280,7 +297,7 @@ static void rtw_tx_mgmt_pkt_info_update(struct rtw_dev *rtwdev,
struct ieee80211_sta *sta,
struct sk_buff *skb)
{
- rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb);
+ rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb, false);
pkt_info->dis_qselseq = true;
pkt_info->en_hwseq = true;
pkt_info->hw_ssn_sel = 0;
@@ -295,7 +312,9 @@ static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hw *hw = rtwdev->hw;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
struct rtw_sta_info *si;
+ u8 fix_rate;
u16 seq;
u8 ampdu_factor = 0;
u8 ampdu_density = 0;
@@ -347,6 +366,13 @@ out:
pkt_info->bw = bw;
pkt_info->stbc = stbc;
pkt_info->ldpc = ldpc;
+
+ fix_rate = dm_info->fix_rate;
+ if (fix_rate < DESC_RATE_MAX) {
+ pkt_info->rate = fix_rate;
+ pkt_info->dis_rate_fallback = true;
+ pkt_info->use_rate = true;
+ }
}
void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
@@ -404,7 +430,7 @@ void rtw_tx_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
if (type != RSVD_BEACON && type != RSVD_DUMMY)
pkt_info->qsel = TX_DESC_QSEL_MGMT;
- rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb);
+ rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb, true);
bmc = is_broadcast_ether_addr(hdr->addr1) ||
is_multicast_ether_addr(hdr->addr1);
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index ad7a8155dbed..bd34e4bbe107 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -219,6 +219,7 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
struct ieee80211_key_conf *key,
struct rtw89_sec_cam_entry *sec_cam)
{
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct rtw89_vif *rtwvif;
struct rtw89_addr_cam_entry *addr_cam;
u8 key_idx = 0;
@@ -243,7 +244,7 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
addr_cam->sec_ent[key_idx] = sec_cam->sec_cam_idx;
addr_cam->sec_entries[key_idx] = sec_cam;
set_bit(key_idx, addr_cam->sec_cam_map);
- ret = rtw89_fw_h2c_cam(rtwdev, rtwvif);
+ ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
if (ret) {
rtw89_err(rtwdev, "failed to update addr cam sec entry: %d\n",
ret);
@@ -371,6 +372,7 @@ int rtw89_cam_sec_key_del(struct rtw89_dev *rtwdev,
struct ieee80211_key_conf *key,
bool inform_fw)
{
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
struct rtw89_vif *rtwvif;
struct rtw89_addr_cam_entry *addr_cam;
@@ -394,7 +396,7 @@ int rtw89_cam_sec_key_del(struct rtw89_dev *rtwdev,
clear_bit(key_idx, addr_cam->sec_cam_map);
addr_cam->sec_entries[key_idx] = NULL;
if (inform_fw) {
- ret = rtw89_fw_h2c_cam(rtwdev, rtwvif);
+ ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
if (ret)
rtw89_err(rtwdev, "failed to update cam del key: %d\n", ret);
}
@@ -536,12 +538,8 @@ static int rtw89_cam_init_bssid_cam(struct rtw89_dev *rtwdev,
void rtw89_cam_bssid_changed(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
- struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
- if (vif->type == NL80211_IFTYPE_STATION)
- ether_addr_copy(addr_cam->tma, rtwvif->bssid);
ether_addr_copy(bssid_cam->bssid, rtwvif->bssid);
}
@@ -593,7 +591,7 @@ int rtw89_cam_fill_bssid_cam_info(struct rtw89_dev *rtwdev,
return 0;
}
-static u8 rtw89_cam_addr_hash(u8 start, u8 *addr)
+static u8 rtw89_cam_addr_hash(u8 start, const u8 *addr)
{
u8 hash = 0;
u8 i;
@@ -606,15 +604,18 @@ static u8 rtw89_cam_addr_hash(u8 start, u8 *addr)
void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta,
+ const u8 *scan_mac_addr,
u8 *cmd)
{
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
- struct ieee80211_sta *sta;
- struct rtw89_sta *rtwsta;
struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
+ struct ieee80211_sta *sta = rtwsta_to_sta_safe(rtwsta);
+ const u8 *sma = scan_mac_addr ? scan_mac_addr : rtwvif->mac_addr;
u8 sma_hash, tma_hash, addr_msk_start;
u8 sma_start = 0;
u8 tma_start = 0;
+ u8 *tma = sta ? sta->addr : rtwvif->bssid;
if (addr_cam->addr_mask != 0) {
addr_msk_start = __ffs(addr_cam->addr_mask);
@@ -623,8 +624,8 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
else if (addr_cam->mask_sel == RTW89_TMA)
tma_start = addr_msk_start;
}
- sma_hash = rtw89_cam_addr_hash(sma_start, rtwvif->mac_addr);
- tma_hash = rtw89_cam_addr_hash(tma_start, addr_cam->tma);
+ sma_hash = rtw89_cam_addr_hash(sma_start, sma);
+ tma_hash = rtw89_cam_addr_hash(tma_start, tma);
FWCMD_SET_ADDR_IDX(cmd, addr_cam->addr_cam_idx);
FWCMD_SET_ADDR_OFFSET(cmd, addr_cam->offset);
@@ -642,19 +643,19 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
FWCMD_SET_ADDR_BSSID_CAM_IDX(cmd, addr_cam->bssid_cam_idx);
- FWCMD_SET_ADDR_SMA0(cmd, rtwvif->mac_addr[0]);
- FWCMD_SET_ADDR_SMA1(cmd, rtwvif->mac_addr[1]);
- FWCMD_SET_ADDR_SMA2(cmd, rtwvif->mac_addr[2]);
- FWCMD_SET_ADDR_SMA3(cmd, rtwvif->mac_addr[3]);
- FWCMD_SET_ADDR_SMA4(cmd, rtwvif->mac_addr[4]);
- FWCMD_SET_ADDR_SMA5(cmd, rtwvif->mac_addr[5]);
+ FWCMD_SET_ADDR_SMA0(cmd, sma[0]);
+ FWCMD_SET_ADDR_SMA1(cmd, sma[1]);
+ FWCMD_SET_ADDR_SMA2(cmd, sma[2]);
+ FWCMD_SET_ADDR_SMA3(cmd, sma[3]);
+ FWCMD_SET_ADDR_SMA4(cmd, sma[4]);
+ FWCMD_SET_ADDR_SMA5(cmd, sma[5]);
- FWCMD_SET_ADDR_TMA0(cmd, addr_cam->tma[0]);
- FWCMD_SET_ADDR_TMA1(cmd, addr_cam->tma[1]);
- FWCMD_SET_ADDR_TMA2(cmd, addr_cam->tma[2]);
- FWCMD_SET_ADDR_TMA3(cmd, addr_cam->tma[3]);
- FWCMD_SET_ADDR_TMA4(cmd, addr_cam->tma[4]);
- FWCMD_SET_ADDR_TMA5(cmd, addr_cam->tma[5]);
+ FWCMD_SET_ADDR_TMA0(cmd, tma[0]);
+ FWCMD_SET_ADDR_TMA1(cmd, tma[1]);
+ FWCMD_SET_ADDR_TMA2(cmd, tma[2]);
+ FWCMD_SET_ADDR_TMA3(cmd, tma[3]);
+ FWCMD_SET_ADDR_TMA4(cmd, tma[4]);
+ FWCMD_SET_ADDR_TMA5(cmd, tma[5]);
FWCMD_SET_ADDR_PORT_INT(cmd, rtwvif->port);
FWCMD_SET_ADDR_TSF_SYNC(cmd, rtwvif->port);
@@ -662,15 +663,11 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
FWCMD_SET_ADDR_LSIG_TXOP(cmd, rtwvif->lsig_txop);
FWCMD_SET_ADDR_TGT_IND(cmd, rtwvif->tgt_ind);
FWCMD_SET_ADDR_FRM_TGT_IND(cmd, rtwvif->frm_tgt_ind);
-
- if (vif->type == NL80211_IFTYPE_STATION) {
- sta = rtwvif->mgd.ap;
- if (sta) {
- rtwsta = (struct rtw89_sta *)sta->drv_priv;
- FWCMD_SET_ADDR_MACID(cmd, rtwsta->mac_id);
- FWCMD_SET_ADDR_AID12(cmd, vif->bss_conf.aid & 0xfff);
- }
- }
+ FWCMD_SET_ADDR_MACID(cmd, rtwsta ? rtwsta->mac_id : rtwvif->mac_id);
+ if (rtwvif->net_type == RTW89_NET_TYPE_INFRA)
+ FWCMD_SET_ADDR_AID12(cmd, vif->bss_conf.aid & 0xfff);
+ else if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
+ FWCMD_SET_ADDR_AID12(cmd, sta ? sta->aid & 0xfff : 0);
FWCMD_SET_ADDR_WOL_PATTERN(cmd, rtwvif->wowlan_pattern);
FWCMD_SET_ADDR_WOL_UC(cmd, rtwvif->wowlan_uc);
FWCMD_SET_ADDR_WOL_MAGIC(cmd, rtwvif->wowlan_magic);
diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h
index 90a20a5375c6..33a3ad582b81 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.h
+++ b/drivers/net/wireless/realtek/rtw89/cam.h
@@ -9,145 +9,347 @@
#define RTW89_SEC_CAM_LEN 20
-#define FWCMD_SET_ADDR_IDX(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 1, value, GENMASK(7, 0))
-#define FWCMD_SET_ADDR_OFFSET(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 1, value, GENMASK(15, 8))
-#define FWCMD_SET_ADDR_LEN(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 1, value, GENMASK(23, 16))
-#define FWCMD_SET_ADDR_VALID(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 2, value, BIT(0))
-#define FWCMD_SET_ADDR_NET_TYPE(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(2, 1))
-#define FWCMD_SET_ADDR_BCN_HIT_COND(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(4, 3))
-#define FWCMD_SET_ADDR_HIT_RULE(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(6, 5))
-#define FWCMD_SET_ADDR_BB_SEL(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 2, value, BIT(7))
-#define FWCMD_SET_ADDR_ADDR_MASK(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(13, 8))
-#define FWCMD_SET_ADDR_MASK_SEL(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(15, 14))
-#define FWCMD_SET_ADDR_SMA_HASH(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(23, 16))
-#define FWCMD_SET_ADDR_TMA_HASH(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(31, 24))
-#define FWCMD_SET_ADDR_BSSID_CAM_IDX(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 3, value, GENMASK(5, 0))
-#define FWCMD_SET_ADDR_SMA0(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(7, 0))
-#define FWCMD_SET_ADDR_SMA1(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(15, 8))
-#define FWCMD_SET_ADDR_SMA2(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(23, 16))
-#define FWCMD_SET_ADDR_SMA3(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(31, 24))
-#define FWCMD_SET_ADDR_SMA4(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(7, 0))
-#define FWCMD_SET_ADDR_SMA5(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(15, 8))
-#define FWCMD_SET_ADDR_TMA0(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(23, 16))
-#define FWCMD_SET_ADDR_TMA1(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(31, 24))
-#define FWCMD_SET_ADDR_TMA2(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(7, 0))
-#define FWCMD_SET_ADDR_TMA3(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(15, 8))
-#define FWCMD_SET_ADDR_TMA4(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(23, 16))
-#define FWCMD_SET_ADDR_TMA5(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(31, 24))
-#define FWCMD_SET_ADDR_MACID(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(7, 0))
-#define FWCMD_SET_ADDR_PORT_INT(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(10, 8))
-#define FWCMD_SET_ADDR_TSF_SYNC(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(13, 11))
-#define FWCMD_SET_ADDR_TF_TRS(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 8, value, BIT(14))
-#define FWCMD_SET_ADDR_LSIG_TXOP(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 8, value, BIT(15))
-#define FWCMD_SET_ADDR_TGT_IND(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(26, 24))
-#define FWCMD_SET_ADDR_FRM_TGT_IND(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(29, 27))
-#define FWCMD_SET_ADDR_AID12(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(11, 0))
-#define FWCMD_SET_ADDR_AID12_0(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(7, 0))
-#define FWCMD_SET_ADDR_AID12_1(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(11, 8))
-#define FWCMD_SET_ADDR_WOL_PATTERN(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(12))
-#define FWCMD_SET_ADDR_WOL_UC(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(13))
-#define FWCMD_SET_ADDR_WOL_MAGIC(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(14))
-#define FWCMD_SET_ADDR_WAPI(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(15))
-#define FWCMD_SET_ADDR_SEC_ENT_MODE(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(17, 16))
-#define FWCMD_SET_ADDR_SEC_ENT0_KEYID(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(19, 18))
-#define FWCMD_SET_ADDR_SEC_ENT1_KEYID(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(21, 20))
-#define FWCMD_SET_ADDR_SEC_ENT2_KEYID(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(23, 22))
-#define FWCMD_SET_ADDR_SEC_ENT3_KEYID(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(25, 24))
-#define FWCMD_SET_ADDR_SEC_ENT4_KEYID(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(27, 26))
-#define FWCMD_SET_ADDR_SEC_ENT5_KEYID(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(29, 28))
-#define FWCMD_SET_ADDR_SEC_ENT6_KEYID(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(31, 30))
-#define FWCMD_SET_ADDR_SEC_ENT_VALID(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(7, 0))
-#define FWCMD_SET_ADDR_SEC_ENT0(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(15, 8))
-#define FWCMD_SET_ADDR_SEC_ENT1(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(23, 16))
-#define FWCMD_SET_ADDR_SEC_ENT2(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(31, 24))
-#define FWCMD_SET_ADDR_SEC_ENT3(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(7, 0))
-#define FWCMD_SET_ADDR_SEC_ENT4(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(15, 8))
-#define FWCMD_SET_ADDR_SEC_ENT5(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(23, 16))
-#define FWCMD_SET_ADDR_SEC_ENT6(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(31, 24))
-#define FWCMD_SET_ADDR_BSSID_IDX(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 12, value, GENMASK(7, 0))
-#define FWCMD_SET_ADDR_BSSID_OFFSET(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 12, value, GENMASK(15, 8))
-#define FWCMD_SET_ADDR_BSSID_LEN(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 12, value, GENMASK(23, 16))
-#define FWCMD_SET_ADDR_BSSID_VALID(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 13, value, BIT(0))
-#define FWCMD_SET_ADDR_BSSID_BB_SEL(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 13, value, BIT(1))
-#define FWCMD_SET_ADDR_BSSID_BSS_COLOR(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 13, value, GENMASK(13, 8))
-#define FWCMD_SET_ADDR_BSSID_BSSID0(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 13, value, GENMASK(23, 16))
-#define FWCMD_SET_ADDR_BSSID_BSSID1(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 13, value, GENMASK(31, 24))
-#define FWCMD_SET_ADDR_BSSID_BSSID2(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(7, 0))
-#define FWCMD_SET_ADDR_BSSID_BSSID3(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(15, 8))
-#define FWCMD_SET_ADDR_BSSID_BSSID4(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(23, 16))
-#define FWCMD_SET_ADDR_BSSID_BSSID5(cmd, value) \
- le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(31, 24))
+static inline void FWCMD_SET_ADDR_IDX(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 1, value, GENMASK(7, 0));
+}
+
+static inline void FWCMD_SET_ADDR_OFFSET(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 1, value, GENMASK(15, 8));
+}
+
+static inline void FWCMD_SET_ADDR_LEN(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 1, value, GENMASK(23, 16));
+}
+
+static inline void FWCMD_SET_ADDR_VALID(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 2, value, BIT(0));
+}
+
+static inline void FWCMD_SET_ADDR_NET_TYPE(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(2, 1));
+}
+
+static inline void FWCMD_SET_ADDR_BCN_HIT_COND(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(4, 3));
+}
+
+static inline void FWCMD_SET_ADDR_HIT_RULE(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(6, 5));
+}
+
+static inline void FWCMD_SET_ADDR_BB_SEL(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 2, value, BIT(7));
+}
+
+static inline void FWCMD_SET_ADDR_ADDR_MASK(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(13, 8));
+}
+
+static inline void FWCMD_SET_ADDR_MASK_SEL(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(15, 14));
+}
+
+static inline void FWCMD_SET_ADDR_SMA_HASH(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(23, 16));
+}
+
+static inline void FWCMD_SET_ADDR_TMA_HASH(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(31, 24));
+}
+
+static inline void FWCMD_SET_ADDR_BSSID_CAM_IDX(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 3, value, GENMASK(5, 0));
+}
+
+static inline void FWCMD_SET_ADDR_SMA0(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(7, 0));
+}
+
+static inline void FWCMD_SET_ADDR_SMA1(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(15, 8));
+}
+
+static inline void FWCMD_SET_ADDR_SMA2(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(23, 16));
+}
+
+static inline void FWCMD_SET_ADDR_SMA3(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(31, 24));
+}
+
+static inline void FWCMD_SET_ADDR_SMA4(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(7, 0));
+}
+
+static inline void FWCMD_SET_ADDR_SMA5(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(15, 8));
+}
+
+static inline void FWCMD_SET_ADDR_TMA0(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(23, 16));
+}
+
+static inline void FWCMD_SET_ADDR_TMA1(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(31, 24));
+}
+
+static inline void FWCMD_SET_ADDR_TMA2(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(7, 0));
+}
+
+static inline void FWCMD_SET_ADDR_TMA3(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(15, 8));
+}
+
+static inline void FWCMD_SET_ADDR_TMA4(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(23, 16));
+}
+
+static inline void FWCMD_SET_ADDR_TMA5(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(31, 24));
+}
+
+static inline void FWCMD_SET_ADDR_MACID(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(7, 0));
+}
+
+static inline void FWCMD_SET_ADDR_PORT_INT(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(10, 8));
+}
+
+static inline void FWCMD_SET_ADDR_TSF_SYNC(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(13, 11));
+}
+
+static inline void FWCMD_SET_ADDR_TF_TRS(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 8, value, BIT(14));
+}
+
+static inline void FWCMD_SET_ADDR_LSIG_TXOP(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 8, value, BIT(15));
+}
+
+static inline void FWCMD_SET_ADDR_TGT_IND(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(26, 24));
+}
+
+static inline void FWCMD_SET_ADDR_FRM_TGT_IND(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(29, 27));
+}
+
+static inline void FWCMD_SET_ADDR_AID12(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(11, 0));
+}
+
+static inline void FWCMD_SET_ADDR_AID12_0(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(7, 0));
+}
+
+static inline void FWCMD_SET_ADDR_AID12_1(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(11, 8));
+}
+
+static inline void FWCMD_SET_ADDR_WOL_PATTERN(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(12));
+}
+
+static inline void FWCMD_SET_ADDR_WOL_UC(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(13));
+}
+
+static inline void FWCMD_SET_ADDR_WOL_MAGIC(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(14));
+}
+
+static inline void FWCMD_SET_ADDR_WAPI(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(15));
+}
+
+static inline void FWCMD_SET_ADDR_SEC_ENT_MODE(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(17, 16));
+}
+
+static inline void FWCMD_SET_ADDR_SEC_ENT0_KEYID(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(19, 18));
+}
+
+static inline void FWCMD_SET_ADDR_SEC_ENT1_KEYID(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(21, 20));
+}
+
+static inline void FWCMD_SET_ADDR_SEC_ENT2_KEYID(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(23, 22));
+}
+
+static inline void FWCMD_SET_ADDR_SEC_ENT3_KEYID(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(25, 24));
+}
+
+static inline void FWCMD_SET_ADDR_SEC_ENT4_KEYID(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(27, 26));
+}
+
+static inline void FWCMD_SET_ADDR_SEC_ENT5_KEYID(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(29, 28));
+}
+
+static inline void FWCMD_SET_ADDR_SEC_ENT6_KEYID(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(31, 30));
+}
+
+static inline void FWCMD_SET_ADDR_SEC_ENT_VALID(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(7, 0));
+}
+
+static inline void FWCMD_SET_ADDR_SEC_ENT0(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(15, 8));
+}
+
+static inline void FWCMD_SET_ADDR_SEC_ENT1(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(23, 16));
+}
+
+static inline void FWCMD_SET_ADDR_SEC_ENT2(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(31, 24));
+}
+
+static inline void FWCMD_SET_ADDR_SEC_ENT3(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(7, 0));
+}
+
+static inline void FWCMD_SET_ADDR_SEC_ENT4(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(15, 8));
+}
+
+static inline void FWCMD_SET_ADDR_SEC_ENT5(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(23, 16));
+}
+
+static inline void FWCMD_SET_ADDR_SEC_ENT6(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(31, 24));
+}
+
+static inline void FWCMD_SET_ADDR_BSSID_IDX(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 12, value, GENMASK(7, 0));
+}
+
+static inline void FWCMD_SET_ADDR_BSSID_OFFSET(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 12, value, GENMASK(15, 8));
+}
+
+static inline void FWCMD_SET_ADDR_BSSID_LEN(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 12, value, GENMASK(23, 16));
+}
+
+static inline void FWCMD_SET_ADDR_BSSID_VALID(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 13, value, BIT(0));
+}
+
+static inline void FWCMD_SET_ADDR_BSSID_BB_SEL(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 13, value, BIT(1));
+}
+
+static inline void FWCMD_SET_ADDR_BSSID_BSS_COLOR(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 13, value, GENMASK(13, 8));
+}
+
+static inline void FWCMD_SET_ADDR_BSSID_BSSID0(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 13, value, GENMASK(23, 16));
+}
+
+static inline void FWCMD_SET_ADDR_BSSID_BSSID1(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 13, value, GENMASK(31, 24));
+}
+
+static inline void FWCMD_SET_ADDR_BSSID_BSSID2(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(7, 0));
+}
+
+static inline void FWCMD_SET_ADDR_BSSID_BSSID3(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(15, 8));
+}
+
+static inline void FWCMD_SET_ADDR_BSSID_BSSID4(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(23, 16));
+}
+
+static inline void FWCMD_SET_ADDR_BSSID_BSSID5(void *cmd, u32 value)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(31, 24));
+}
int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
- struct rtw89_vif *vif, u8 *cmd);
+ struct rtw89_vif *vif,
+ struct rtw89_sta *rtwsta,
+ const u8 *scan_mac_addr, u8 *cmd);
int rtw89_cam_fill_bssid_cam_info(struct rtw89_dev *rtwdev,
struct rtw89_vif *vif, u8 *cmd);
int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index abe4b6549ab2..9f7d4f8d0c56 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -540,8 +540,31 @@ static void _update_bt_scbd(struct rtw89_dev *rtwdev, bool only_update);
static void _send_fw_cmd(struct rtw89_dev *rtwdev, u8 h2c_class, u8 h2c_func,
void *param, u16 len)
{
- rtw89_fw_h2c_raw_with_hdr(rtwdev, h2c_class, h2c_func, param, len,
- false, true);
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ int ret;
+
+ if (!wl->status.map.init_ok) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): return by btc not init!!\n", __func__);
+ pfwinfo->cnt_h2c_fail++;
+ return;
+ } else if ((wl->status.map.rf_off_pre == 1 && wl->status.map.rf_off == 1) ||
+ (wl->status.map.lps_pre == 1 && wl->status.map.lps == 1)) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): return by wl off!!\n", __func__);
+ pfwinfo->cnt_h2c_fail++;
+ return;
+ }
+
+ pfwinfo->cnt_h2c++;
+
+ ret = rtw89_fw_h2c_raw_with_hdr(rtwdev, h2c_class, h2c_func, param, len,
+ false, true);
+ if (ret != 0)
+ pfwinfo->cnt_h2c_fail++;
}
static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type)
@@ -1095,6 +1118,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
diff_t = pcysta->tavg_cycle[CXT_WL] - wl_slot_set;
_chk_btc_err(rtwdev, BTC_DCNT_WL_SLOT_DRIFT, diff_t);
}
+
+ _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE, pcysta->slot_cnt[CXST_W1]);
+ _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE, pcysta->slot_cnt[CXST_W1]);
+ _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_FREEZE, (u32)pcysta->cycles);
}
if (rpt_type == BTC_RPT_TYPE_CTRL) {
@@ -1103,6 +1130,18 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
wl->ver_info.fw_coex = prpt->wl_fw_coex_ver;
wl->ver_info.fw = prpt->wl_fw_ver;
dm->wl_fw_cx_offload = !!(prpt->wl_fw_cx_offload);
+
+ _chk_btc_err(rtwdev, BTC_DCNT_RPT_FREEZE,
+ pfwinfo->event[BTF_EVNT_RPT]);
+
+ /* To avoid I/O if WL LPS or power-off */
+ if (wl->status.map.lps != BTC_LPS_RF_OFF && !wl->status.map.rf_off) {
+ rtwdev->chip->ops->btc_update_bt_cnt(rtwdev);
+ _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_FREEZE, 0);
+
+ btc->cx.cnt_bt[BTC_BCNT_POLUT] =
+ rtw89_mac_get_plt_cnt(rtwdev, RTW89_MAC_0);
+ }
}
if (rpt_type >= BTC_RPT_TYPE_BT_VER &&
@@ -1596,7 +1635,7 @@ static void _set_rf_trx_para(struct rtw89_dev *rtwdev)
_set_bt_rx_gain(rtwdev, para.bt_rx_gain);
if (bt->enable.now == 0 || wl->status.map.rf_off == 1 ||
- wl->status.map.lps == 1)
+ wl->status.map.lps == BTC_LPS_RF_OFF)
wl_stb_chg = 0;
else
wl_stb_chg = 1;
@@ -4199,16 +4238,16 @@ void rtw89_btc_ntfy_radio_state(struct rtw89_dev *rtwdev, enum btc_rfctrl rf_sta
switch (rf_state) {
case BTC_RFCTRL_WL_OFF:
wl->status.map.rf_off = 1;
- wl->status.map.lps = 0;
+ wl->status.map.lps = BTC_LPS_OFF;
break;
case BTC_RFCTRL_FW_CTRL:
wl->status.map.rf_off = 0;
- wl->status.map.lps = 1;
+ wl->status.map.lps = BTC_LPS_RF_OFF;
break;
case BTC_RFCTRL_WL_ON:
default:
wl->status.map.rf_off = 0;
- wl->status.map.lps = 0;
+ wl->status.map.lps = BTC_LPS_OFF;
break;
}
@@ -4494,6 +4533,8 @@ void rtw89_btc_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
u8 *buf = &skb->data[RTW89_C2H_HEADER_LEN];
+ len -= RTW89_C2H_HEADER_LEN;
+
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): C2H BT len:%d class:%d fun:%d\n",
__func__, len, class, func);
@@ -4512,14 +4553,12 @@ void rtw89_btc_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], handle C2H BT INFO with data %8ph\n", buf);
btc->cx.cnt_bt[BTC_BCNT_INFOUPDATE]++;
- rtw89_leave_ps_mode(rtwdev);
_update_bt_info(rtwdev, buf, len);
break;
case BTF_EVNT_BT_SCBD:
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], handle C2H BT SCBD with data %8ph\n", buf);
btc->cx.cnt_bt[BTC_BCNT_SCBDUPDATE]++;
- rtw89_leave_ps_mode(rtwdev);
_update_bt_scbd(rtwdev, false);
break;
case BTF_EVNT_BT_PSD:
@@ -4765,7 +4804,6 @@ static void _show_bt_profile_info(struct rtw89_dev *rtwdev, struct seq_file *m)
static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_bt_info *bt = &cx->bt;
@@ -4773,7 +4811,6 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_module *module = &btc->mdinfo;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
u8 *afh = bt_linfo->afh_map;
- u16 polt_cnt = 0;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_BT))
return;
@@ -4849,17 +4886,11 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
cx->cnt_bt[BTC_BCNT_INFOUPDATE],
cx->cnt_bt[BTC_BCNT_INFOSAME]);
- if (wl->status.map.lps || wl->status.map.rf_off)
- return;
-
- chip->ops->btc_update_bt_cnt(rtwdev);
- _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_FREEZE, 0);
-
seq_printf(m,
" %-15s : Hi-rx = %d, Hi-tx = %d, Lo-rx = %d, Lo-tx = %d (bt_polut_wl_tx = %d)\n",
"[trx_req_cnt]", cx->cnt_bt[BTC_BCNT_HIPRI_RX],
cx->cnt_bt[BTC_BCNT_HIPRI_TX], cx->cnt_bt[BTC_BCNT_LOPRI_RX],
- cx->cnt_bt[BTC_BCNT_LOPRI_TX], polt_cnt);
+ cx->cnt_bt[BTC_BCNT_LOPRI_TX], cx->cnt_bt[BTC_BCNT_POLUT]);
}
#define CASE_BTC_RSN_STR(e) case BTC_RSN_ ## e: return #e
@@ -5227,8 +5258,6 @@ static void _show_fbtc_cysta(struct rtw89_dev *rtwdev, struct seq_file *m)
pcysta->bcn_cnt[CXBCN_BT_SLOT],
pcysta->bcn_cnt[CXBCN_BT_OK]);
- _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_FREEZE, (u32)pcysta->cycles);
-
for (i = 0; i < CXST_MAX; i++) {
if (!pcysta->slot_cnt[i])
continue;
@@ -5252,9 +5281,6 @@ static void _show_fbtc_cysta(struct rtw89_dev *rtwdev, struct seq_file *m)
}
seq_puts(m, "\n");
- _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE, pcysta->slot_cnt[CXST_W1]);
- _chk_btc_err(rtwdev, BTC_DCNT_B1_FREEZE, pcysta->slot_cnt[CXST_B1]);
-
seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
"[cycle_time]",
pcysta->tavg_cycle[CXT_WL],
@@ -5606,9 +5632,6 @@ static void _show_summary(struct rtw89_dev *rtwdev, struct seq_file *m)
pfwinfo->event[BTF_EVNT_RPT], prptctrl->rpt_cnt,
prptctrl->rpt_enable, dm->error.val);
- _chk_btc_err(rtwdev, BTC_DCNT_RPT_FREEZE,
- pfwinfo->event[BTF_EVNT_RPT]);
-
if (dm->error.map.wl_fw_hang)
seq_puts(m, " (WL FW Hang!!)");
seq_puts(m, "\n");
diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h
index 4b4565d15c9e..c3a722d259d7 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.h
+++ b/drivers/net/wireless/realtek/rtw89/coex.h
@@ -130,6 +130,12 @@ enum btc_rfctrl {
BTC_RFCTRL_MAX
};
+enum btc_lps_state {
+ BTC_LPS_OFF = 0,
+ BTC_LPS_RF_OFF = 1,
+ BTC_LPS_RF_ON = 2
+};
+
void rtw89_btc_ntfy_poweron(struct rtw89_dev *rtwdev);
void rtw89_btc_ntfy_poweroff(struct rtw89_dev *rtwdev);
void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode);
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index d02ec5a735cb..a0737eea9f81 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright(c) 2019-2020 Realtek Corporation
*/
+#include <linux/ip.h>
+#include <linux/udp.h>
#include "coex.h"
#include "core.h"
@@ -143,20 +145,15 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
{
struct ieee80211_channel *channel = chandef->chan;
enum nl80211_chan_width width = chandef->width;
- u8 *cch_by_bw = chan_param->cch_by_bw;
u32 primary_freq, center_freq;
u8 center_chan;
u8 bandwidth = RTW89_CHANNEL_WIDTH_20;
u8 primary_chan_idx = 0;
- u8 i;
center_chan = channel->hw_value;
primary_freq = channel->center_freq;
center_freq = chandef->center_freq1;
- /* assign the center channel used while 20M bw is selected */
- cch_by_bw[RTW89_CHANNEL_WIDTH_20] = channel->hw_value;
-
switch (width) {
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
@@ -183,10 +180,6 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
primary_chan_idx = RTW89_SC_20_UPMOST;
center_chan -= 6;
}
- /* assign the center channel used
- * while 40M bw is selected
- */
- cch_by_bw[RTW89_CHANNEL_WIDTH_40] = center_chan + 4;
} else {
if (center_freq - primary_freq == 10) {
primary_chan_idx = RTW89_SC_20_LOWER;
@@ -195,10 +188,6 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
primary_chan_idx = RTW89_SC_20_LOWEST;
center_chan += 6;
}
- /* assign the center channel used
- * while 40M bw is selected
- */
- cch_by_bw[RTW89_CHANNEL_WIDTH_40] = center_chan - 4;
}
break;
default:
@@ -210,12 +199,6 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
chan_param->primary_chan = channel->hw_value;
chan_param->bandwidth = bandwidth;
chan_param->pri_ch_idx = primary_chan_idx;
-
- /* assign the center channel used while current bw is selected */
- cch_by_bw[bandwidth] = center_chan;
-
- for (i = bandwidth + 1; i <= RTW89_MAX_CHANNEL_WIDTH; i++)
- cch_by_bw[i] = 0;
}
void rtw89_set_channel(struct rtw89_dev *rtwdev)
@@ -228,7 +211,6 @@ void rtw89_set_channel(struct rtw89_dev *rtwdev)
u8 center_chan, bandwidth;
u8 band_type;
bool band_changed;
- u8 i;
rtw89_get_channel_params(&hw->conf.chandef, &ch_param);
if (WARN(ch_param.center_chan == 0, "Invalid channel\n"))
@@ -242,6 +224,7 @@ void rtw89_set_channel(struct rtw89_dev *rtwdev)
hal->current_band_width = bandwidth;
hal->current_channel = center_chan;
+ hal->prev_primary_channel = hal->current_primary_channel;
hal->current_primary_channel = ch_param.primary_chan;
hal->current_band_type = band_type;
@@ -260,9 +243,6 @@ void rtw89_set_channel(struct rtw89_dev *rtwdev)
break;
}
- for (i = RTW89_CHANNEL_WIDTH_20; i <= RTW89_MAX_CHANNEL_WIDTH; i++)
- hal->cch_by_bw[i] = ch_param.cch_by_bw[i];
-
rtw89_chip_set_channel_prepare(rtwdev, &bak);
chip->ops->set_channel(rtwdev, &ch_param);
@@ -881,8 +861,11 @@ static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev, u8 *addr,
{
s16 cfo;
+ phy_ppdu->chan_idx = RTW89_GET_PHY_STS_IE01_CH_IDX(addr);
+ if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6)
+ return;
/* sign conversion for S(12,2) */
- cfo = sign_extend32(RTW89_GET_PHY_STS_IE0_CFO(addr), 11);
+ cfo = sign_extend32(RTW89_GET_PHY_STS_IE01_CFO(addr), 11);
rtw89_phy_cfo_parse(rtwdev, cfo, phy_ppdu);
}
@@ -908,6 +891,7 @@ static void rtw89_core_update_phy_ppdu(struct rtw89_rx_phy_ppdu *phy_ppdu)
s8 *rssi = phy_ppdu->rssi;
u8 *buf = phy_ppdu->buf;
+ phy_ppdu->ie = RTW89_GET_PHY_STS_IE_MAP(buf);
phy_ppdu->rssi_avg = RTW89_GET_PHY_STS_RSSI_AVG(buf);
rssi[RF_PATH_A] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_A(buf));
rssi[RF_PATH_B] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_B(buf));
@@ -936,8 +920,9 @@ static int rtw89_core_rx_parse_phy_sts(struct rtw89_dev *rtwdev,
u16 ie_len;
u8 *pos, *end;
- if (!phy_ppdu->to_self)
- return 0;
+ /* mark invalid reports and bypass them */
+ if (phy_ppdu->ie < RTW89_CCK_PKT)
+ return -EINVAL;
pos = (u8 *)phy_ppdu->buf + PHY_STS_HDR_LEN;
end = (u8 *)phy_ppdu->buf + phy_ppdu->len;
@@ -1000,9 +985,7 @@ static bool rtw89_core_rx_ppdu_match(struct rtw89_dev *rtwdev,
data_rate_mode = GET_DATA_RATE_MODE(data_rate);
if (data_rate_mode == DATA_RATE_MODE_NON_HT) {
rate_idx = GET_DATA_RATE_NOT_HT_IDX(data_rate);
- /* No 4 CCK rates for 5G */
- if (status->band == NL80211_BAND_5GHZ)
- rate_idx -= 4;
+ /* rate_idx is still hardware value here */
} else if (data_rate_mode == DATA_RATE_MODE_HT) {
rate_idx = GET_DATA_RATE_HT_IDX(data_rate);
} else if (data_rate_mode == DATA_RATE_MODE_VHT) {
@@ -1081,6 +1064,29 @@ static void rtw89_core_rx_stats(struct rtw89_dev *rtwdev,
rtw89_iterate_vifs_bh(rtwdev, rtw89_vif_rx_stats_iter, &iter_data);
}
+static void rtw89_correct_cck_chan(struct rtw89_dev *rtwdev,
+ struct ieee80211_rx_status *status)
+{
+ u16 chan = rtwdev->hal.prev_primary_channel;
+ u8 band = chan <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+
+ if (status->band != NL80211_BAND_2GHZ &&
+ status->encoding == RX_ENC_LEGACY &&
+ status->rate_idx < RTW89_HW_RATE_OFDM6) {
+ status->freq = ieee80211_channel_to_frequency(chan, band);
+ status->band = band;
+ }
+}
+
+static void rtw89_core_hw_to_sband_rate(struct ieee80211_rx_status *rx_status)
+{
+ if (rx_status->band == NL80211_BAND_2GHZ ||
+ rx_status->encoding != RX_ENC_LEGACY)
+ return;
+ /* No 4 CCK rates for non-2G */
+ rx_status->rate_idx -= 4;
+}
+
static void rtw89_core_rx_pending_skb(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct rtw89_rx_desc_info *desc_info,
@@ -1099,6 +1105,8 @@ static void rtw89_core_rx_pending_skb(struct rtw89_dev *rtwdev,
rx_status = IEEE80211_SKB_RXCB(skb_ppdu);
if (rtw89_core_rx_ppdu_match(rtwdev, desc_info, rx_status))
rtw89_chip_query_ppdu(rtwdev, phy_ppdu, rx_status);
+ rtw89_correct_cck_chan(rtwdev, rx_status);
+ rtw89_core_hw_to_sband_rate(rx_status);
rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu);
ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, &rtwdev->napi);
rtwdev->napi_budget_countdown--;
@@ -1112,6 +1120,7 @@ static void rtw89_core_rx_process_ppdu_sts(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu phy_ppdu = {.buf = skb->data, .valid = false,
.len = skb->len,
.to_self = desc_info->addr1_match,
+ .rate = desc_info->data_rate,
.mac_id = desc_info->mac_id};
int ret;
@@ -1267,12 +1276,7 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
if (data_rate_mode == DATA_RATE_MODE_NON_HT) {
rx_status->encoding = RX_ENC_LEGACY;
rx_status->rate_idx = GET_DATA_RATE_NOT_HT_IDX(data_rate);
- /* No 4 CCK rates for 5G */
- if (rx_status->band == NL80211_BAND_5GHZ)
- rx_status->rate_idx -= 4;
- if (rtwdev->scanning)
- rx_status->rate_idx = min_t(u8, rx_status->rate_idx,
- ARRAY_SIZE(rtw89_bitrates) - 5);
+ /* convert rate_idx after we get the correct band */
} else if (data_rate_mode == DATA_RATE_MODE_HT) {
rx_status->encoding = RX_ENC_HT;
rx_status->rate_idx = GET_DATA_RATE_HT_IDX(data_rate);
@@ -1324,10 +1328,13 @@ static void rtw89_core_flush_ppdu_rx_queue(struct rtw89_dev *rtwdev,
{
struct rtw89_ppdu_sts_info *ppdu_sts = &rtwdev->ppdu_sts;
u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0;
+ struct ieee80211_rx_status *rx_status;
struct sk_buff *skb_ppdu, *tmp;
skb_queue_walk_safe(&ppdu_sts->rx_queue[band], skb_ppdu, tmp) {
skb_unlink(skb_ppdu, &ppdu_sts->rx_queue[band]);
+ rx_status = IEEE80211_SKB_RXCB(skb_ppdu);
+ rtw89_core_hw_to_sband_rate(rx_status);
rtw89_core_rx_stats(rtwdev, NULL, desc_info, skb_ppdu);
ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, &rtwdev->napi);
rtwdev->napi_budget_countdown--;
@@ -1360,6 +1367,7 @@ void rtw89_core_rx(struct rtw89_dev *rtwdev,
BIT(desc_info->frame_type) & PPDU_FILTER_BITMAP) {
skb_queue_tail(&ppdu_sts->rx_queue[band], skb);
} else {
+ rtw89_core_hw_to_sband_rate(rx_status);
rtw89_core_rx_stats(rtwdev, NULL, desc_info, skb);
ieee80211_rx_napi(rtwdev->hw, NULL, skb, &rtwdev->napi);
rtwdev->napi_budget_countdown--;
@@ -1825,7 +1833,8 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
ewma_rssi_init(&rtwsta->avg_rssi);
if (vif->type == NL80211_IFTYPE_STATION) {
- rtwvif->mgd.ap = sta;
+ /* for station mode, assign the mac_id from itself */
+ rtwsta->mac_id = rtwvif->mac_id;
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_CONN_START);
rtw89_chip_rfk_channel(rtwdev);
@@ -1851,6 +1860,7 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta)
{
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
int ret;
rtw89_mac_bf_monitor_calc(rtwdev, sta, true);
@@ -1872,7 +1882,7 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
}
/* update cam aid mac_id net_type */
- rtw89_fw_h2c_cam(rtwdev, rtwvif);
+ rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c cam\n");
return ret;
@@ -1897,10 +1907,6 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
return ret;
}
- /* for station mode, assign the mac_id from itself */
- if (vif->type == NL80211_IFTYPE_STATION)
- rtwsta->mac_id = rtwvif->mac_id;
-
ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, 0);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c join info\n");
@@ -1908,7 +1914,7 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
}
/* update cam aid mac_id net_type */
- rtw89_fw_h2c_cam(rtwdev, rtwvif);
+ rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c cam\n");
return ret;
@@ -2115,7 +2121,8 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
- IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US;
+ u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
if (i == NL80211_IFTYPE_STATION)
phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(mcs_map);
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index c2885e4dd882..7c84556ec4ad 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -411,12 +411,13 @@ enum rtw89_regulation_type {
RTW89_NA = 4,
RTW89_IC = 5,
RTW89_KCC = 6,
- RTW89_NCC = 7,
- RTW89_CHILE = 8,
- RTW89_ACMA = 9,
- RTW89_MEXICO = 10,
+ RTW89_ACMA = 7,
+ RTW89_NCC = 8,
+ RTW89_MEXICO = 9,
+ RTW89_CHILE = 10,
RTW89_UKRAINE = 11,
RTW89_CN = 12,
+ RTW89_QATAR = 13,
RTW89_REGD_NUM,
};
@@ -472,6 +473,9 @@ struct rtw89_rx_phy_ppdu {
u8 rssi_avg;
s8 rssi[RF_PATH_MAX];
u8 mac_id;
+ u8 chan_idx;
+ u8 ie;
+ u16 rate;
bool to_self;
bool valid;
};
@@ -543,7 +547,6 @@ enum rtw89_ps_mode {
RTW89_PS_MODE_PWR_GATED = 3,
};
-#define RTW89_MAX_CHANNEL_WIDTH RTW89_CHANNEL_WIDTH_80
#define RTW89_2G_BW_NUM (RTW89_CHANNEL_WIDTH_40 + 1)
#define RTW89_5G_BW_NUM (RTW89_CHANNEL_WIDTH_80 + 1)
#define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_80 + 1)
@@ -570,7 +573,6 @@ struct rtw89_channel_params {
u8 primary_chan;
u8 bandwidth;
u8 pri_ch_idx;
- u8 cch_by_bw[RTW89_MAX_CHANNEL_WIDTH + 1];
};
struct rtw89_channel_help_params {
@@ -803,6 +805,7 @@ enum rtw89_btc_bt_state_cnt {
BTC_BCNT_HIPRI_RX,
BTC_BCNT_LOPRI_TX,
BTC_BCNT_LOPRI_RX,
+ BTC_BCNT_POLUT,
BTC_BCNT_RATECHG,
BTC_BCNT_NUM
};
@@ -1865,7 +1868,6 @@ struct rtw89_addr_cam_entry {
u8 wapi : 1;
u8 mask_sel : 2;
u8 bssid_cam_idx: 6;
- u8 tma[ETH_ALEN];
u8 sma[ETH_ALEN];
u8 sec_ent_mode;
@@ -1934,14 +1936,6 @@ struct rtw89_vif {
bool wowlan_magic;
bool is_hesta;
bool last_a_ctrl;
- union {
- struct {
- struct ieee80211_sta *ap;
- } mgd;
- struct {
- struct list_head sta_list;
- } ap;
- };
struct rtw89_addr_cam_entry addr_cam;
struct rtw89_bssid_cam_entry bssid_cam;
struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS];
@@ -2354,14 +2348,11 @@ struct rtw89_hal {
u32 rx_fltr;
u8 cv;
u8 current_channel;
+ u8 prev_primary_channel;
u8 current_primary_channel;
enum rtw89_subband current_subband;
u8 current_band_width;
u8 current_band_type;
- /* center channel for different available bandwidth,
- * val of (bw > current_band_width) is invalid
- */
- u8 cch_by_bw[RTW89_MAX_CHANNEL_WIDTH + 1];
u32 sw_amsdu_max_size;
u32 antenna_tx;
u32 antenna_rx;
@@ -3127,6 +3118,16 @@ static inline struct ieee80211_sta *rtwsta_to_sta(struct rtw89_sta *rtwsta)
return container_of(p, struct ieee80211_sta, drv_priv);
}
+static inline struct ieee80211_sta *rtwsta_to_sta_safe(struct rtw89_sta *rtwsta)
+{
+ return rtwsta ? rtwsta_to_sta(rtwsta) : NULL;
+}
+
+static inline struct rtw89_sta *sta_to_rtwsta_safe(struct ieee80211_sta *sta)
+{
+ return sta ? (struct rtw89_sta *)sta->drv_priv : NULL;
+}
+
static inline
void rtw89_chip_set_channel_prepare(struct rtw89_dev *rtwdev,
struct rtw89_channel_help_params *p)
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 29eb188c888c..22bd1d03e722 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -2,6 +2,8 @@
/* Copyright(c) 2019-2020 Realtek Corporation
*/
+#include <linux/vmalloc.h>
+
#include "coex.h"
#include "debug.h"
#include "fw.h"
@@ -723,6 +725,7 @@ rtw89_debug_priv_mac_mem_dump_select(struct file *filp,
}
static const u32 mac_mem_base_addr_table[RTW89_MAC_MEM_MAX] = {
+ [RTW89_MAC_MEM_AXIDMA] = AXIDMA_BASE_ADDR,
[RTW89_MAC_MEM_SHARED_BUF] = SHARED_BUF_BASE_ADDR,
[RTW89_MAC_MEM_DMAC_TBL] = DMAC_TBL_BASE_ADDR,
[RTW89_MAC_MEM_SHCUT_MACHDR] = SHCUT_MACHDR_BASE_ADDR,
@@ -735,6 +738,10 @@ static const u32 mac_mem_base_addr_table[RTW89_MAC_MEM_MAX] = {
[RTW89_MAC_MEM_BA_CAM] = BA_CAM_BASE_ADDR,
[RTW89_MAC_MEM_BCN_IE_CAM0] = BCN_IE_CAM0_BASE_ADDR,
[RTW89_MAC_MEM_BCN_IE_CAM1] = BCN_IE_CAM1_BASE_ADDR,
+ [RTW89_MAC_MEM_TXD_FIFO_0] = TXD_FIFO_0_BASE_ADDR,
+ [RTW89_MAC_MEM_TXD_FIFO_1] = TXD_FIFO_1_BASE_ADDR,
+ [RTW89_MAC_MEM_TXDATA_FIFO_0] = TXDATA_FIFO_0_BASE_ADDR,
+ [RTW89_MAC_MEM_TXDATA_FIFO_1] = TXDATA_FIFO_1_BASE_ADDR,
};
static void rtw89_debug_dump_mac_mem(struct seq_file *m,
@@ -814,7 +821,7 @@ rtw89_debug_priv_mac_dbg_port_dump_select(struct file *filp,
return -EINVAL;
}
- enable = set == 0 ? false : true;
+ enable = set != 0;
switch (sel) {
case 0:
debugfs_priv->dbgpkg_en.ss_dbg = enable;
@@ -2280,7 +2287,7 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
switch (status->encoding) {
case RX_ENC_LEGACY:
seq_printf(m, "Legacy %d", status->rate_idx +
- (status->band == NL80211_BAND_5GHZ ? 4 : 0));
+ (status->band != NL80211_BAND_2GHZ ? 4 : 0));
break;
case RX_ENC_HT:
seq_printf(m, "HT MCS-%d%s", status->rate_idx,
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 65ef3dc9d061..8a57b75b07c0 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -523,7 +523,8 @@ void rtw89_unload_firmware(struct rtw89_dev *rtwdev)
}
#define H2C_CAM_LEN 60
-int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta, const u8 *scan_mac_addr)
{
struct sk_buff *skb;
@@ -533,7 +534,7 @@ int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
return -ENOMEM;
}
skb_put(skb, H2C_CAM_LEN);
- rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, skb->data);
+ rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data);
rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, skb->data);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
@@ -780,7 +781,7 @@ static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
if (!ppe_th) {
u8 pad;
- pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK,
+ pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK,
sta->he_cap.he_cap_elem.phy_cap_info[9]);
for (i = 0; i < RTW89_PPE_BW_NUM; i++)
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 36e8d0da6c1e..2d36dc27222f 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -156,863 +156,1125 @@ struct rtw89_h2creg_sch_tx_en {
u16 rsvd:15;
} __packed;
-#define RTW89_SET_FWCMD_RA_IS_DIS(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(0))
-#define RTW89_SET_FWCMD_RA_MODE(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(5, 1))
-#define RTW89_SET_FWCMD_RA_BW_CAP(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(7, 6))
-#define RTW89_SET_FWCMD_RA_MACID(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(15, 8))
-#define RTW89_SET_FWCMD_RA_DCM(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(16))
-#define RTW89_SET_FWCMD_RA_ER(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(17))
-#define RTW89_SET_FWCMD_RA_INIT_RATE_LV(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(19, 18))
-#define RTW89_SET_FWCMD_RA_UPD_ALL(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(20))
-#define RTW89_SET_FWCMD_RA_SGI(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(21))
-#define RTW89_SET_FWCMD_RA_LDPC(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(22))
-#define RTW89_SET_FWCMD_RA_STBC(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(23))
-#define RTW89_SET_FWCMD_RA_SS_NUM(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(26, 24))
-#define RTW89_SET_FWCMD_RA_GILTF(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(29, 27))
-#define RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(30))
-#define RTW89_SET_FWCMD_RA_UPD_MASK(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(31))
-#define RTW89_SET_FWCMD_RA_MASK_0(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(7, 0))
-#define RTW89_SET_FWCMD_RA_MASK_1(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(15, 8))
-#define RTW89_SET_FWCMD_RA_MASK_2(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(23, 16))
-#define RTW89_SET_FWCMD_RA_MASK_3(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(31, 24))
-#define RTW89_SET_FWCMD_RA_MASK_4(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x02, val, GENMASK(7, 0))
-#define RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x02, val, BIT(31))
-#define RTW89_SET_FWCMD_RA_BAND_NUM(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(7, 0))
-#define RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(8))
-#define RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(9))
-#define RTW89_SET_FWCMD_RA_CR_TBL_SEL(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(10))
-#define RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(23, 16))
-#define RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(25, 24))
-#define RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(28, 26))
-#define RTW89_SET_FWCMD_RA_FIXED_CSI_BW(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(31, 29))
-
-#define RTW89_SET_FWCMD_SEC_IDX(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(7, 0))
-#define RTW89_SET_FWCMD_SEC_OFFSET(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(15, 8))
-#define RTW89_SET_FWCMD_SEC_LEN(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(23, 16))
-#define RTW89_SET_FWCMD_SEC_TYPE(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(3, 0))
-#define RTW89_SET_FWCMD_SEC_EXT_KEY(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x01, val, BIT(4))
-#define RTW89_SET_FWCMD_SEC_SPP_MODE(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x01, val, BIT(5))
-#define RTW89_SET_FWCMD_SEC_KEY0(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x02, val, GENMASK(31, 0))
-#define RTW89_SET_FWCMD_SEC_KEY1(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(31, 0))
-#define RTW89_SET_FWCMD_SEC_KEY2(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x04, val, GENMASK(31, 0))
-#define RTW89_SET_FWCMD_SEC_KEY3(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x05, val, GENMASK(31, 0))
-
-#define RTW89_SET_EDCA_SEL(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(1, 0))
-#define RTW89_SET_EDCA_BAND(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(3))
-#define RTW89_SET_EDCA_WMM(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(4))
-#define RTW89_SET_EDCA_AC(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(6, 5))
-#define RTW89_SET_EDCA_PARAM(cmd, val) \
- le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(31, 0))
+static inline void RTW89_SET_FWCMD_RA_IS_DIS(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(0));
+}
+
+static inline void RTW89_SET_FWCMD_RA_MODE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(5, 1));
+}
+
+static inline void RTW89_SET_FWCMD_RA_BW_CAP(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(7, 6));
+}
+
+static inline void RTW89_SET_FWCMD_RA_MACID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_RA_DCM(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(16));
+}
+
+static inline void RTW89_SET_FWCMD_RA_ER(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(17));
+}
+
+static inline void RTW89_SET_FWCMD_RA_INIT_RATE_LV(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(19, 18));
+}
+
+static inline void RTW89_SET_FWCMD_RA_UPD_ALL(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(20));
+}
+
+static inline void RTW89_SET_FWCMD_RA_SGI(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(21));
+}
+
+static inline void RTW89_SET_FWCMD_RA_LDPC(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(22));
+}
+
+static inline void RTW89_SET_FWCMD_RA_STBC(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(23));
+}
+
+static inline void RTW89_SET_FWCMD_RA_SS_NUM(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(26, 24));
+}
+
+static inline void RTW89_SET_FWCMD_RA_GILTF(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(29, 27));
+}
+
+static inline void RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(30));
+}
+
+static inline void RTW89_SET_FWCMD_RA_UPD_MASK(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(31));
+}
+
+static inline void RTW89_SET_FWCMD_RA_MASK_0(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_RA_MASK_1(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_RA_MASK_2(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_RA_MASK_3(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_FWCMD_RA_MASK_4(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x02, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x02, val, BIT(31));
+}
+
+static inline void RTW89_SET_FWCMD_RA_BAND_NUM(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(8));
+}
+
+static inline void RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(9));
+}
+
+static inline void RTW89_SET_FWCMD_RA_CR_TBL_SEL(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(10));
+}
+
+static inline void RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(25, 24));
+}
+
+static inline void RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(28, 26));
+}
+
+static inline void RTW89_SET_FWCMD_RA_FIXED_CSI_BW(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(31, 29));
+}
+
+static inline void RTW89_SET_FWCMD_SEC_IDX(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SEC_OFFSET(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_SEC_LEN(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_SEC_TYPE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(3, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SEC_EXT_KEY(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x01, val, BIT(4));
+}
+
+static inline void RTW89_SET_FWCMD_SEC_SPP_MODE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x01, val, BIT(5));
+}
+
+static inline void RTW89_SET_FWCMD_SEC_KEY0(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x02, val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SEC_KEY1(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SEC_KEY2(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x04, val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SEC_KEY3(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x05, val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_EDCA_SEL(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(1, 0));
+}
+
+static inline void RTW89_SET_EDCA_BAND(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(3));
+}
+
+static inline void RTW89_SET_EDCA_WMM(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(4));
+}
+
+static inline void RTW89_SET_EDCA_AC(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(6, 5));
+}
+
+static inline void RTW89_SET_EDCA_PARAM(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(31, 0));
+}
#define FW_EDCA_PARAM_TXOPLMT_MSK GENMASK(26, 16)
#define FW_EDCA_PARAM_CWMAX_MSK GENMASK(15, 12)
#define FW_EDCA_PARAM_CWMIN_MSK GENMASK(11, 8)
#define FW_EDCA_PARAM_AIFS_MSK GENMASK(7, 0)
#define GET_FWSECTION_HDR_SEC_SIZE(fwhdr) \
- le32_get_bits(*((__le32 *)(fwhdr) + 1), GENMASK(23, 0))
+ le32_get_bits(*((const __le32 *)(fwhdr) + 1), GENMASK(23, 0))
#define GET_FWSECTION_HDR_CHECKSUM(fwhdr) \
- le32_get_bits(*((__le32 *)(fwhdr) + 1), BIT(28))
+ le32_get_bits(*((const __le32 *)(fwhdr) + 1), BIT(28))
#define GET_FWSECTION_HDR_REDL(fwhdr) \
- le32_get_bits(*((__le32 *)(fwhdr) + 1), BIT(29))
+ le32_get_bits(*((const __le32 *)(fwhdr) + 1), BIT(29))
#define GET_FWSECTION_HDR_DL_ADDR(fwhdr) \
- le32_get_bits(*((__le32 *)(fwhdr)), GENMASK(31, 0))
+ le32_get_bits(*((const __le32 *)(fwhdr)), GENMASK(31, 0))
#define GET_FW_HDR_MAJOR_VERSION(fwhdr) \
- le32_get_bits(*((__le32 *)(fwhdr) + 1), GENMASK(7, 0))
+ le32_get_bits(*((const __le32 *)(fwhdr) + 1), GENMASK(7, 0))
#define GET_FW_HDR_MINOR_VERSION(fwhdr) \
- le32_get_bits(*((__le32 *)(fwhdr) + 1), GENMASK(15, 8))
+ le32_get_bits(*((const __le32 *)(fwhdr) + 1), GENMASK(15, 8))
#define GET_FW_HDR_SUBVERSION(fwhdr) \
- le32_get_bits(*((__le32 *)(fwhdr) + 1), GENMASK(23, 16))
+ le32_get_bits(*((const __le32 *)(fwhdr) + 1), GENMASK(23, 16))
#define GET_FW_HDR_SUBINDEX(fwhdr) \
- le32_get_bits(*((__le32 *)(fwhdr) + 1), GENMASK(31, 24))
+ le32_get_bits(*((const __le32 *)(fwhdr) + 1), GENMASK(31, 24))
#define GET_FW_HDR_MONTH(fwhdr) \
- le32_get_bits(*((__le32 *)(fwhdr) + 4), GENMASK(7, 0))
+ le32_get_bits(*((const __le32 *)(fwhdr) + 4), GENMASK(7, 0))
#define GET_FW_HDR_DATE(fwhdr) \
- le32_get_bits(*((__le32 *)(fwhdr) + 4), GENMASK(15, 8))
+ le32_get_bits(*((const __le32 *)(fwhdr) + 4), GENMASK(15, 8))
#define GET_FW_HDR_HOUR(fwhdr) \
- le32_get_bits(*((__le32 *)(fwhdr) + 4), GENMASK(23, 16))
+ le32_get_bits(*((const __le32 *)(fwhdr) + 4), GENMASK(23, 16))
#define GET_FW_HDR_MIN(fwhdr) \
- le32_get_bits(*((__le32 *)(fwhdr) + 4), GENMASK(31, 24))
+ le32_get_bits(*((const __le32 *)(fwhdr) + 4), GENMASK(31, 24))
#define GET_FW_HDR_YEAR(fwhdr) \
- le32_get_bits(*((__le32 *)(fwhdr) + 5), GENMASK(31, 0))
+ le32_get_bits(*((const __le32 *)(fwhdr) + 5), GENMASK(31, 0))
#define GET_FW_HDR_SEC_NUM(fwhdr) \
- le32_get_bits(*((__le32 *)(fwhdr) + 6), GENMASK(15, 8))
+ le32_get_bits(*((const __le32 *)(fwhdr) + 6), GENMASK(15, 8))
#define GET_FW_HDR_CMD_VERSERION(fwhdr) \
- le32_get_bits(*((__le32 *)(fwhdr) + 7), GENMASK(31, 24))
+ le32_get_bits(*((const __le32 *)(fwhdr) + 7), GENMASK(31, 24))
static inline void SET_FW_HDR_PART_SIZE(void *fwhdr, u32 val)
{
le32p_replace_bits((__le32 *)fwhdr + 7, val, GENMASK(15, 0));
}
-#define SET_CTRL_INFO_MACID(table, val) \
- le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0))
-#define SET_CTRL_INFO_OPERATION(table, val) \
- le32p_replace_bits((__le32 *)(table) + 0, val, BIT(7))
+static inline void SET_CTRL_INFO_MACID(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0));
+}
+
+static inline void SET_CTRL_INFO_OPERATION(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 0, val, BIT(7));
+}
#define SET_CMC_TBL_MASK_DATARATE GENMASK(8, 0)
-#define SET_CMC_TBL_DATARATE(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(8, 0)); \
- le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DATARATE, \
- GENMASK(8, 0)); \
-} while (0)
+static inline void SET_CMC_TBL_DATARATE(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(8, 0));
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DATARATE,
+ GENMASK(8, 0));
+}
#define SET_CMC_TBL_MASK_FORCE_TXOP BIT(0)
-#define SET_CMC_TBL_FORCE_TXOP(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 1, val, BIT(9)); \
- le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_FORCE_TXOP, \
- BIT(9)); \
-} while (0)
+static inline void SET_CMC_TBL_FORCE_TXOP(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 1, val, BIT(9));
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_FORCE_TXOP,
+ BIT(9));
+}
#define SET_CMC_TBL_MASK_DATA_BW GENMASK(1, 0)
-#define SET_CMC_TBL_DATA_BW(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(11, 10)); \
- le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DATA_BW, \
- GENMASK(11, 10)); \
-} while (0)
+static inline void SET_CMC_TBL_DATA_BW(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(11, 10));
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DATA_BW,
+ GENMASK(11, 10));
+}
#define SET_CMC_TBL_MASK_DATA_GI_LTF GENMASK(2, 0)
-#define SET_CMC_TBL_DATA_GI_LTF(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(14, 12)); \
- le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DATA_GI_LTF, \
- GENMASK(14, 12)); \
-} while (0)
+static inline void SET_CMC_TBL_DATA_GI_LTF(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(14, 12));
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DATA_GI_LTF,
+ GENMASK(14, 12));
+}
#define SET_CMC_TBL_MASK_DARF_TC_INDEX BIT(0)
-#define SET_CMC_TBL_DARF_TC_INDEX(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 1, val, BIT(15)); \
- le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DARF_TC_INDEX, \
- BIT(15)); \
-} while (0)
+static inline void SET_CMC_TBL_DARF_TC_INDEX(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 1, val, BIT(15));
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DARF_TC_INDEX,
+ BIT(15));
+}
#define SET_CMC_TBL_MASK_ARFR_CTRL GENMASK(3, 0)
-#define SET_CMC_TBL_ARFR_CTRL(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(19, 16)); \
- le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_ARFR_CTRL, \
- GENMASK(19, 16)); \
-} while (0)
+static inline void SET_CMC_TBL_ARFR_CTRL(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(19, 16));
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_ARFR_CTRL,
+ GENMASK(19, 16));
+}
#define SET_CMC_TBL_MASK_ACQ_RPT_EN BIT(0)
-#define SET_CMC_TBL_ACQ_RPT_EN(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 1, val, BIT(20)); \
- le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_ACQ_RPT_EN, \
- BIT(20)); \
-} while (0)
+static inline void SET_CMC_TBL_ACQ_RPT_EN(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 1, val, BIT(20));
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_ACQ_RPT_EN,
+ BIT(20));
+}
#define SET_CMC_TBL_MASK_MGQ_RPT_EN BIT(0)
-#define SET_CMC_TBL_MGQ_RPT_EN(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 1, val, BIT(21)); \
- le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_MGQ_RPT_EN, \
- BIT(21)); \
-} while (0)
+static inline void SET_CMC_TBL_MGQ_RPT_EN(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 1, val, BIT(21));
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_MGQ_RPT_EN,
+ BIT(21));
+}
#define SET_CMC_TBL_MASK_ULQ_RPT_EN BIT(0)
-#define SET_CMC_TBL_ULQ_RPT_EN(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 1, val, BIT(22)); \
- le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_ULQ_RPT_EN, \
- BIT(22)); \
-} while (0)
+static inline void SET_CMC_TBL_ULQ_RPT_EN(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 1, val, BIT(22));
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_ULQ_RPT_EN,
+ BIT(22));
+}
#define SET_CMC_TBL_MASK_TWTQ_RPT_EN BIT(0)
-#define SET_CMC_TBL_TWTQ_RPT_EN(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 1, val, BIT(23)); \
- le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_TWTQ_RPT_EN, \
- BIT(23)); \
-} while (0)
+static inline void SET_CMC_TBL_TWTQ_RPT_EN(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 1, val, BIT(23));
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_TWTQ_RPT_EN,
+ BIT(23));
+}
#define SET_CMC_TBL_MASK_DISRTSFB BIT(0)
-#define SET_CMC_TBL_DISRTSFB(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 1, val, BIT(25)); \
- le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DISRTSFB, \
- BIT(25)); \
-} while (0)
+static inline void SET_CMC_TBL_DISRTSFB(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 1, val, BIT(25));
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DISRTSFB,
+ BIT(25));
+}
#define SET_CMC_TBL_MASK_DISDATAFB BIT(0)
-#define SET_CMC_TBL_DISDATAFB(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 1, val, BIT(26)); \
- le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DISDATAFB, \
- BIT(26)); \
-} while (0)
+static inline void SET_CMC_TBL_DISDATAFB(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 1, val, BIT(26));
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DISDATAFB,
+ BIT(26));
+}
#define SET_CMC_TBL_MASK_TRYRATE BIT(0)
-#define SET_CMC_TBL_TRYRATE(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 1, val, BIT(27)); \
- le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_TRYRATE, \
- BIT(27)); \
-} while (0)
+static inline void SET_CMC_TBL_TRYRATE(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 1, val, BIT(27));
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_TRYRATE,
+ BIT(27));
+}
#define SET_CMC_TBL_MASK_AMPDU_DENSITY GENMASK(3, 0)
-#define SET_CMC_TBL_AMPDU_DENSITY(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(31, 28)); \
- le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_AMPDU_DENSITY, \
- GENMASK(31, 28)); \
-} while (0)
+static inline void SET_CMC_TBL_AMPDU_DENSITY(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(31, 28));
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_AMPDU_DENSITY,
+ GENMASK(31, 28));
+}
#define SET_CMC_TBL_MASK_DATA_RTY_LOWEST_RATE GENMASK(8, 0)
-#define SET_CMC_TBL_DATA_RTY_LOWEST_RATE(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(8, 0)); \
- le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_DATA_RTY_LOWEST_RATE, \
- GENMASK(8, 0)); \
-} while (0)
+static inline void SET_CMC_TBL_DATA_RTY_LOWEST_RATE(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(8, 0));
+ le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_DATA_RTY_LOWEST_RATE,
+ GENMASK(8, 0));
+}
#define SET_CMC_TBL_MASK_AMPDU_TIME_SEL BIT(0)
-#define SET_CMC_TBL_AMPDU_TIME_SEL(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 2, val, BIT(9)); \
- le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_AMPDU_TIME_SEL, \
- BIT(9)); \
-} while (0)
+static inline void SET_CMC_TBL_AMPDU_TIME_SEL(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 2, val, BIT(9));
+ le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_AMPDU_TIME_SEL,
+ BIT(9));
+}
#define SET_CMC_TBL_MASK_AMPDU_LEN_SEL BIT(0)
-#define SET_CMC_TBL_AMPDU_LEN_SEL(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 2, val, BIT(10)); \
- le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_AMPDU_LEN_SEL, \
- BIT(10)); \
-} while (0)
+static inline void SET_CMC_TBL_AMPDU_LEN_SEL(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 2, val, BIT(10));
+ le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_AMPDU_LEN_SEL,
+ BIT(10));
+}
#define SET_CMC_TBL_MASK_RTS_TXCNT_LMT_SEL BIT(0)
-#define SET_CMC_TBL_RTS_TXCNT_LMT_SEL(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 2, val, BIT(11)); \
- le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTS_TXCNT_LMT_SEL, \
- BIT(11)); \
-} while (0)
+static inline void SET_CMC_TBL_RTS_TXCNT_LMT_SEL(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 2, val, BIT(11));
+ le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTS_TXCNT_LMT_SEL,
+ BIT(11));
+}
#define SET_CMC_TBL_MASK_RTS_TXCNT_LMT GENMASK(3, 0)
-#define SET_CMC_TBL_RTS_TXCNT_LMT(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(15, 12)); \
- le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTS_TXCNT_LMT, \
- GENMASK(15, 12)); \
-} while (0)
+static inline void SET_CMC_TBL_RTS_TXCNT_LMT(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(15, 12));
+ le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTS_TXCNT_LMT,
+ GENMASK(15, 12));
+}
#define SET_CMC_TBL_MASK_RTSRATE GENMASK(8, 0)
-#define SET_CMC_TBL_RTSRATE(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(24, 16)); \
- le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTSRATE, \
- GENMASK(24, 16)); \
-} while (0)
+static inline void SET_CMC_TBL_RTSRATE(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(24, 16));
+ le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTSRATE,
+ GENMASK(24, 16));
+}
#define SET_CMC_TBL_MASK_VCS_STBC BIT(0)
-#define SET_CMC_TBL_VCS_STBC(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 2, val, BIT(27)); \
- le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_VCS_STBC, \
- BIT(27)); \
-} while (0)
+static inline void SET_CMC_TBL_VCS_STBC(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 2, val, BIT(27));
+ le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_VCS_STBC,
+ BIT(27));
+}
#define SET_CMC_TBL_MASK_RTS_RTY_LOWEST_RATE GENMASK(3, 0)
-#define SET_CMC_TBL_RTS_RTY_LOWEST_RATE(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(31, 28)); \
- le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTS_RTY_LOWEST_RATE, \
- GENMASK(31, 28)); \
-} while (0)
+static inline void SET_CMC_TBL_RTS_RTY_LOWEST_RATE(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(31, 28));
+ le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTS_RTY_LOWEST_RATE,
+ GENMASK(31, 28));
+}
#define SET_CMC_TBL_MASK_DATA_TX_CNT_LMT GENMASK(5, 0)
-#define SET_CMC_TBL_DATA_TX_CNT_LMT(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(5, 0)); \
- le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_DATA_TX_CNT_LMT, \
- GENMASK(5, 0)); \
-} while (0)
+static inline void SET_CMC_TBL_DATA_TX_CNT_LMT(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(5, 0));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_DATA_TX_CNT_LMT,
+ GENMASK(5, 0));
+}
#define SET_CMC_TBL_MASK_DATA_TXCNT_LMT_SEL BIT(0)
-#define SET_CMC_TBL_DATA_TXCNT_LMT_SEL(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 3, val, BIT(6)); \
- le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_DATA_TXCNT_LMT_SEL, \
- BIT(6)); \
-} while (0)
+static inline void SET_CMC_TBL_DATA_TXCNT_LMT_SEL(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, BIT(6));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_DATA_TXCNT_LMT_SEL,
+ BIT(6));
+}
#define SET_CMC_TBL_MASK_MAX_AGG_NUM_SEL BIT(0)
-#define SET_CMC_TBL_MAX_AGG_NUM_SEL(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 3, val, BIT(7)); \
- le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_MAX_AGG_NUM_SEL, \
- BIT(7)); \
-} while (0)
+static inline void SET_CMC_TBL_MAX_AGG_NUM_SEL(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, BIT(7));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_MAX_AGG_NUM_SEL,
+ BIT(7));
+}
#define SET_CMC_TBL_MASK_RTS_EN BIT(0)
-#define SET_CMC_TBL_RTS_EN(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 3, val, BIT(8)); \
- le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_RTS_EN, \
- BIT(8)); \
-} while (0)
+static inline void SET_CMC_TBL_RTS_EN(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, BIT(8));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_RTS_EN,
+ BIT(8));
+}
#define SET_CMC_TBL_MASK_CTS2SELF_EN BIT(0)
-#define SET_CMC_TBL_CTS2SELF_EN(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 3, val, BIT(9)); \
- le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_CTS2SELF_EN, \
- BIT(9)); \
-} while (0)
+static inline void SET_CMC_TBL_CTS2SELF_EN(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, BIT(9));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_CTS2SELF_EN,
+ BIT(9));
+}
#define SET_CMC_TBL_MASK_CCA_RTS GENMASK(1, 0)
-#define SET_CMC_TBL_CCA_RTS(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(11, 10)); \
- le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_CCA_RTS, \
- GENMASK(11, 10)); \
-} while (0)
+static inline void SET_CMC_TBL_CCA_RTS(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(11, 10));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_CCA_RTS,
+ GENMASK(11, 10));
+}
#define SET_CMC_TBL_MASK_HW_RTS_EN BIT(0)
-#define SET_CMC_TBL_HW_RTS_EN(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 3, val, BIT(12)); \
- le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_HW_RTS_EN, \
- BIT(12)); \
-} while (0)
+static inline void SET_CMC_TBL_HW_RTS_EN(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, BIT(12));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_HW_RTS_EN,
+ BIT(12));
+}
#define SET_CMC_TBL_MASK_RTS_DROP_DATA_MODE GENMASK(1, 0)
-#define SET_CMC_TBL_RTS_DROP_DATA_MODE(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(14, 13)); \
- le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_RTS_DROP_DATA_MODE, \
- GENMASK(14, 13)); \
-} while (0)
+static inline void SET_CMC_TBL_RTS_DROP_DATA_MODE(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(14, 13));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_RTS_DROP_DATA_MODE,
+ GENMASK(14, 13));
+}
#define SET_CMC_TBL_MASK_AMPDU_MAX_LEN GENMASK(10, 0)
-#define SET_CMC_TBL_AMPDU_MAX_LEN(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(26, 16)); \
- le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_AMPDU_MAX_LEN, \
- GENMASK(26, 16)); \
-} while (0)
+static inline void SET_CMC_TBL_AMPDU_MAX_LEN(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(26, 16));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_AMPDU_MAX_LEN,
+ GENMASK(26, 16));
+}
#define SET_CMC_TBL_MASK_UL_MU_DIS BIT(0)
-#define SET_CMC_TBL_UL_MU_DIS(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 3, val, BIT(27)); \
- le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_UL_MU_DIS, \
- BIT(27)); \
-} while (0)
+static inline void SET_CMC_TBL_UL_MU_DIS(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, BIT(27));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_UL_MU_DIS,
+ BIT(27));
+}
#define SET_CMC_TBL_MASK_AMPDU_MAX_TIME GENMASK(3, 0)
-#define SET_CMC_TBL_AMPDU_MAX_TIME(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(31, 28)); \
- le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_AMPDU_MAX_TIME, \
- GENMASK(31, 28)); \
-} while (0)
+static inline void SET_CMC_TBL_AMPDU_MAX_TIME(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(31, 28));
+ le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_AMPDU_MAX_TIME,
+ GENMASK(31, 28));
+}
#define SET_CMC_TBL_MASK_MAX_AGG_NUM GENMASK(7, 0)
-#define SET_CMC_TBL_MAX_AGG_NUM(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(7, 0)); \
- le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_MAX_AGG_NUM, \
- GENMASK(7, 0)); \
-} while (0)
+static inline void SET_CMC_TBL_MAX_AGG_NUM(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(7, 0));
+ le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_MAX_AGG_NUM,
+ GENMASK(7, 0));
+}
#define SET_CMC_TBL_MASK_BA_BMAP GENMASK(1, 0)
-#define SET_CMC_TBL_BA_BMAP(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(9, 8)); \
- le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_BA_BMAP, \
- GENMASK(9, 8)); \
-} while (0)
+static inline void SET_CMC_TBL_BA_BMAP(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(9, 8));
+ le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_BA_BMAP,
+ GENMASK(9, 8));
+}
#define SET_CMC_TBL_MASK_VO_LFTIME_SEL GENMASK(2, 0)
-#define SET_CMC_TBL_VO_LFTIME_SEL(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(18, 16)); \
- le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_VO_LFTIME_SEL, \
- GENMASK(18, 16)); \
-} while (0)
+static inline void SET_CMC_TBL_VO_LFTIME_SEL(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(18, 16));
+ le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_VO_LFTIME_SEL,
+ GENMASK(18, 16));
+}
#define SET_CMC_TBL_MASK_VI_LFTIME_SEL GENMASK(2, 0)
-#define SET_CMC_TBL_VI_LFTIME_SEL(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(21, 19)); \
- le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_VI_LFTIME_SEL, \
- GENMASK(21, 19)); \
-} while (0)
+static inline void SET_CMC_TBL_VI_LFTIME_SEL(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(21, 19));
+ le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_VI_LFTIME_SEL,
+ GENMASK(21, 19));
+}
#define SET_CMC_TBL_MASK_BE_LFTIME_SEL GENMASK(2, 0)
-#define SET_CMC_TBL_BE_LFTIME_SEL(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(24, 22)); \
- le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_BE_LFTIME_SEL, \
- GENMASK(24, 22)); \
-} while (0)
+static inline void SET_CMC_TBL_BE_LFTIME_SEL(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(24, 22));
+ le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_BE_LFTIME_SEL,
+ GENMASK(24, 22));
+}
#define SET_CMC_TBL_MASK_BK_LFTIME_SEL GENMASK(2, 0)
-#define SET_CMC_TBL_BK_LFTIME_SEL(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(27, 25)); \
- le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_BK_LFTIME_SEL, \
- GENMASK(27, 25)); \
-} while (0)
+static inline void SET_CMC_TBL_BK_LFTIME_SEL(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(27, 25));
+ le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_BK_LFTIME_SEL,
+ GENMASK(27, 25));
+}
#define SET_CMC_TBL_MASK_SECTYPE GENMASK(3, 0)
-#define SET_CMC_TBL_SECTYPE(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(31, 28)); \
- le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_SECTYPE, \
- GENMASK(31, 28)); \
-} while (0)
+static inline void SET_CMC_TBL_SECTYPE(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(31, 28));
+ le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_SECTYPE,
+ GENMASK(31, 28));
+}
#define SET_CMC_TBL_MASK_MULTI_PORT_ID GENMASK(2, 0)
-#define SET_CMC_TBL_MULTI_PORT_ID(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(2, 0)); \
- le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_MULTI_PORT_ID, \
- GENMASK(2, 0)); \
-} while (0)
+static inline void SET_CMC_TBL_MULTI_PORT_ID(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(2, 0));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_MULTI_PORT_ID,
+ GENMASK(2, 0));
+}
#define SET_CMC_TBL_MASK_BMC BIT(0)
-#define SET_CMC_TBL_BMC(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 5, val, BIT(3)); \
- le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_BMC, \
- BIT(3)); \
-} while (0)
+static inline void SET_CMC_TBL_BMC(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(3));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_BMC,
+ BIT(3));
+}
#define SET_CMC_TBL_MASK_MBSSID GENMASK(3, 0)
-#define SET_CMC_TBL_MBSSID(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(7, 4)); \
- le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_MBSSID, \
- GENMASK(7, 4)); \
-} while (0)
+static inline void SET_CMC_TBL_MBSSID(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(7, 4));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_MBSSID,
+ GENMASK(7, 4));
+}
#define SET_CMC_TBL_MASK_NAVUSEHDR BIT(0)
-#define SET_CMC_TBL_NAVUSEHDR(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 5, val, BIT(8)); \
- le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_NAVUSEHDR, \
- BIT(8)); \
-} while (0)
+static inline void SET_CMC_TBL_NAVUSEHDR(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(8));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_NAVUSEHDR,
+ BIT(8));
+}
#define SET_CMC_TBL_MASK_TXPWR_MODE GENMASK(2, 0)
-#define SET_CMC_TBL_TXPWR_MODE(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(11, 9)); \
- le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_TXPWR_MODE, \
- GENMASK(11, 9)); \
-} while (0)
+static inline void SET_CMC_TBL_TXPWR_MODE(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(11, 9));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_TXPWR_MODE,
+ GENMASK(11, 9));
+}
#define SET_CMC_TBL_MASK_DATA_DCM BIT(0)
-#define SET_CMC_TBL_DATA_DCM(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 5, val, BIT(12)); \
- le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_DCM, \
- BIT(12)); \
-} while (0)
+static inline void SET_CMC_TBL_DATA_DCM(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(12));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_DCM,
+ BIT(12));
+}
#define SET_CMC_TBL_MASK_DATA_ER BIT(0)
-#define SET_CMC_TBL_DATA_ER(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 5, val, BIT(13)); \
- le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_ER, \
- BIT(13)); \
-} while (0)
+static inline void SET_CMC_TBL_DATA_ER(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(13));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_ER,
+ BIT(13));
+}
#define SET_CMC_TBL_MASK_DATA_LDPC BIT(0)
-#define SET_CMC_TBL_DATA_LDPC(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 5, val, BIT(14)); \
- le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_LDPC, \
- BIT(14)); \
-} while (0)
+static inline void SET_CMC_TBL_DATA_LDPC(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(14));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_LDPC,
+ BIT(14));
+}
#define SET_CMC_TBL_MASK_DATA_STBC BIT(0)
-#define SET_CMC_TBL_DATA_STBC(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 5, val, BIT(15)); \
- le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_STBC, \
- BIT(15)); \
-} while (0)
+static inline void SET_CMC_TBL_DATA_STBC(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(15));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_STBC,
+ BIT(15));
+}
#define SET_CMC_TBL_MASK_A_CTRL_BQR BIT(0)
-#define SET_CMC_TBL_A_CTRL_BQR(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 5, val, BIT(16)); \
- le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_BQR, \
- BIT(16)); \
-} while (0)
+static inline void SET_CMC_TBL_A_CTRL_BQR(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(16));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_BQR,
+ BIT(16));
+}
#define SET_CMC_TBL_MASK_A_CTRL_UPH BIT(0)
-#define SET_CMC_TBL_A_CTRL_UPH(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 5, val, BIT(17)); \
- le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_UPH, \
- BIT(17)); \
-} while (0)
+static inline void SET_CMC_TBL_A_CTRL_UPH(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(17));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_UPH,
+ BIT(17));
+}
#define SET_CMC_TBL_MASK_A_CTRL_BSR BIT(0)
-#define SET_CMC_TBL_A_CTRL_BSR(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 5, val, BIT(18)); \
- le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_BSR, \
- BIT(18)); \
-} while (0)
+static inline void SET_CMC_TBL_A_CTRL_BSR(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(18));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_BSR,
+ BIT(18));
+}
#define SET_CMC_TBL_MASK_A_CTRL_CAS BIT(0)
-#define SET_CMC_TBL_A_CTRL_CAS(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 5, val, BIT(19)); \
- le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_CAS, \
- BIT(19)); \
-} while (0)
+static inline void SET_CMC_TBL_A_CTRL_CAS(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(19));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_CAS,
+ BIT(19));
+}
#define SET_CMC_TBL_MASK_DATA_BW_ER BIT(0)
-#define SET_CMC_TBL_DATA_BW_ER(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 5, val, BIT(20)); \
- le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_BW_ER, \
- BIT(20)); \
-} while (0)
+static inline void SET_CMC_TBL_DATA_BW_ER(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(20));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_BW_ER,
+ BIT(20));
+}
#define SET_CMC_TBL_MASK_LSIG_TXOP_EN BIT(0)
-#define SET_CMC_TBL_LSIG_TXOP_EN(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 5, val, BIT(21)); \
- le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_LSIG_TXOP_EN, \
- BIT(21)); \
-} while (0)
+static inline void SET_CMC_TBL_LSIG_TXOP_EN(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(21));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_LSIG_TXOP_EN,
+ BIT(21));
+}
#define SET_CMC_TBL_MASK_CTRL_CNT_VLD BIT(0)
-#define SET_CMC_TBL_CTRL_CNT_VLD(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 5, val, BIT(27)); \
- le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_CTRL_CNT_VLD, \
- BIT(27)); \
-} while (0)
+static inline void SET_CMC_TBL_CTRL_CNT_VLD(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(27));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_CTRL_CNT_VLD,
+ BIT(27));
+}
#define SET_CMC_TBL_MASK_CTRL_CNT GENMASK(3, 0)
-#define SET_CMC_TBL_CTRL_CNT(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(31, 28)); \
- le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_CTRL_CNT, \
- GENMASK(31, 28)); \
-} while (0)
+static inline void SET_CMC_TBL_CTRL_CNT(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(31, 28));
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_CTRL_CNT,
+ GENMASK(31, 28));
+}
#define SET_CMC_TBL_MASK_RESP_REF_RATE GENMASK(8, 0)
-#define SET_CMC_TBL_RESP_REF_RATE(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(8, 0)); \
- le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_RESP_REF_RATE, \
- GENMASK(8, 0)); \
-} while (0)
+static inline void SET_CMC_TBL_RESP_REF_RATE(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(8, 0));
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_RESP_REF_RATE,
+ GENMASK(8, 0));
+}
#define SET_CMC_TBL_MASK_ALL_ACK_SUPPORT BIT(0)
-#define SET_CMC_TBL_ALL_ACK_SUPPORT(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 6, val, BIT(12)); \
- le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ALL_ACK_SUPPORT, \
- BIT(12)); \
-} while (0)
+static inline void SET_CMC_TBL_ALL_ACK_SUPPORT(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 6, val, BIT(12));
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ALL_ACK_SUPPORT,
+ BIT(12));
+}
#define SET_CMC_TBL_MASK_BSR_QUEUE_SIZE_FORMAT BIT(0)
-#define SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 6, val, BIT(13)); \
- le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_BSR_QUEUE_SIZE_FORMAT, \
- BIT(13)); \
-} while (0)
+static inline void SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 6, val, BIT(13));
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_BSR_QUEUE_SIZE_FORMAT,
+ BIT(13));
+}
#define SET_CMC_TBL_MASK_NTX_PATH_EN GENMASK(3, 0)
-#define SET_CMC_TBL_NTX_PATH_EN(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(19, 16)); \
- le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_NTX_PATH_EN, \
- GENMASK(19, 16)); \
-} while (0)
+static inline void SET_CMC_TBL_NTX_PATH_EN(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(19, 16));
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_NTX_PATH_EN,
+ GENMASK(19, 16));
+}
#define SET_CMC_TBL_MASK_PATH_MAP_A GENMASK(1, 0)
-#define SET_CMC_TBL_PATH_MAP_A(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(21, 20)); \
- le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_A, \
- GENMASK(21, 20)); \
-} while (0)
+static inline void SET_CMC_TBL_PATH_MAP_A(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(21, 20));
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_A,
+ GENMASK(21, 20));
+}
#define SET_CMC_TBL_MASK_PATH_MAP_B GENMASK(1, 0)
-#define SET_CMC_TBL_PATH_MAP_B(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(23, 22)); \
- le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_B, \
- GENMASK(23, 22)); \
-} while (0)
+static inline void SET_CMC_TBL_PATH_MAP_B(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(23, 22));
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_B,
+ GENMASK(23, 22));
+}
#define SET_CMC_TBL_MASK_PATH_MAP_C GENMASK(1, 0)
-#define SET_CMC_TBL_PATH_MAP_C(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(25, 24)); \
- le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_C, \
- GENMASK(25, 24)); \
-} while (0)
+static inline void SET_CMC_TBL_PATH_MAP_C(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(25, 24));
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_C,
+ GENMASK(25, 24));
+}
#define SET_CMC_TBL_MASK_PATH_MAP_D GENMASK(1, 0)
-#define SET_CMC_TBL_PATH_MAP_D(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(27, 26)); \
- le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_D, \
- GENMASK(27, 26)); \
-} while (0)
+static inline void SET_CMC_TBL_PATH_MAP_D(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(27, 26));
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_D,
+ GENMASK(27, 26));
+}
#define SET_CMC_TBL_MASK_ANTSEL_A BIT(0)
-#define SET_CMC_TBL_ANTSEL_A(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 6, val, BIT(28)); \
- le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_A, \
- BIT(28)); \
-} while (0)
+static inline void SET_CMC_TBL_ANTSEL_A(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 6, val, BIT(28));
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_A,
+ BIT(28));
+}
#define SET_CMC_TBL_MASK_ANTSEL_B BIT(0)
-#define SET_CMC_TBL_ANTSEL_B(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 6, val, BIT(29)); \
- le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_B, \
- BIT(29)); \
-} while (0)
+static inline void SET_CMC_TBL_ANTSEL_B(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 6, val, BIT(29));
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_B,
+ BIT(29));
+}
#define SET_CMC_TBL_MASK_ANTSEL_C BIT(0)
-#define SET_CMC_TBL_ANTSEL_C(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 6, val, BIT(30)); \
- le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_C, \
- BIT(30)); \
-} while (0)
+static inline void SET_CMC_TBL_ANTSEL_C(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 6, val, BIT(30));
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_C,
+ BIT(30));
+}
#define SET_CMC_TBL_MASK_ANTSEL_D BIT(0)
-#define SET_CMC_TBL_ANTSEL_D(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 6, val, BIT(31)); \
- le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_D, \
- BIT(31)); \
-} while (0)
+static inline void SET_CMC_TBL_ANTSEL_D(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 6, val, BIT(31));
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_D,
+ BIT(31));
+}
#define SET_CMC_TBL_MASK_ADDR_CAM_INDEX GENMASK(7, 0)
-#define SET_CMC_TBL_ADDR_CAM_INDEX(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(7, 0)); \
- le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_ADDR_CAM_INDEX, \
- GENMASK(7, 0)); \
-} while (0)
+static inline void SET_CMC_TBL_ADDR_CAM_INDEX(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(7, 0));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_ADDR_CAM_INDEX,
+ GENMASK(7, 0));
+}
#define SET_CMC_TBL_MASK_PAID GENMASK(8, 0)
-#define SET_CMC_TBL_PAID(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(16, 8)); \
- le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_PAID, \
- GENMASK(16, 8)); \
-} while (0)
+static inline void SET_CMC_TBL_PAID(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(16, 8));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_PAID,
+ GENMASK(16, 8));
+}
#define SET_CMC_TBL_MASK_ULDL BIT(0)
-#define SET_CMC_TBL_ULDL(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 7, val, BIT(17)); \
- le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_ULDL, \
- BIT(17)); \
-} while (0)
+static inline void SET_CMC_TBL_ULDL(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, BIT(17));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_ULDL,
+ BIT(17));
+}
#define SET_CMC_TBL_MASK_DOPPLER_CTRL GENMASK(1, 0)
-#define SET_CMC_TBL_DOPPLER_CTRL(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(19, 18)); \
- le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_DOPPLER_CTRL, \
- GENMASK(19, 18)); \
-} while (0)
+static inline void SET_CMC_TBL_DOPPLER_CTRL(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(19, 18));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_DOPPLER_CTRL,
+ GENMASK(19, 18));
+}
#define SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING GENMASK(1, 0)
-#define SET_CMC_TBL_NOMINAL_PKT_PADDING(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(21, 20)); \
- le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING, \
- GENMASK(21, 20)); \
-} while (0)
-#define SET_CMC_TBL_NOMINAL_PKT_PADDING40(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(23, 22)); \
- le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING, \
- GENMASK(23, 22)); \
-} while (0)
+static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(21, 20));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING,
+ GENMASK(21, 20));
+}
+
+static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING40(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(23, 22));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING,
+ GENMASK(23, 22));
+}
#define SET_CMC_TBL_MASK_TXPWR_TOLERENCE GENMASK(3, 0)
-#define SET_CMC_TBL_TXPWR_TOLERENCE(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(27, 24)); \
- le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_TXPWR_TOLERENCE, \
- GENMASK(27, 24)); \
-} while (0)
-#define SET_CMC_TBL_NOMINAL_PKT_PADDING80(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(31, 30)); \
- le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING, \
- GENMASK(31, 30)); \
-} while (0)
+static inline void SET_CMC_TBL_TXPWR_TOLERENCE(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(27, 24));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_TXPWR_TOLERENCE,
+ GENMASK(27, 24));
+}
+
+static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING80(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(31, 30));
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING,
+ GENMASK(31, 30));
+}
#define SET_CMC_TBL_MASK_NC GENMASK(2, 0)
-#define SET_CMC_TBL_NC(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(2, 0)); \
- le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NC, \
- GENMASK(2, 0)); \
-} while (0)
+static inline void SET_CMC_TBL_NC(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(2, 0));
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NC,
+ GENMASK(2, 0));
+}
#define SET_CMC_TBL_MASK_NR GENMASK(2, 0)
-#define SET_CMC_TBL_NR(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(5, 3)); \
- le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NR, \
- GENMASK(5, 3)); \
-} while (0)
+static inline void SET_CMC_TBL_NR(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(5, 3));
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NR,
+ GENMASK(5, 3));
+}
#define SET_CMC_TBL_MASK_NG GENMASK(1, 0)
-#define SET_CMC_TBL_NG(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(7, 6)); \
- le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NG, \
- GENMASK(7, 6)); \
-} while (0)
+static inline void SET_CMC_TBL_NG(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(7, 6));
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NG,
+ GENMASK(7, 6));
+}
#define SET_CMC_TBL_MASK_CB GENMASK(1, 0)
-#define SET_CMC_TBL_CB(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(9, 8)); \
- le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CB, \
- GENMASK(9, 8)); \
-} while (0)
+static inline void SET_CMC_TBL_CB(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(9, 8));
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CB,
+ GENMASK(9, 8));
+}
#define SET_CMC_TBL_MASK_CS GENMASK(1, 0)
-#define SET_CMC_TBL_CS(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(11, 10)); \
- le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CS, \
- GENMASK(11, 10)); \
-} while (0)
+static inline void SET_CMC_TBL_CS(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(11, 10));
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CS,
+ GENMASK(11, 10));
+}
#define SET_CMC_TBL_MASK_CSI_TXBF_EN BIT(0)
-#define SET_CMC_TBL_CSI_TXBF_EN(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 8, val, BIT(12)); \
- le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_TXBF_EN, \
- BIT(12)); \
-} while (0)
+static inline void SET_CMC_TBL_CSI_TXBF_EN(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 8, val, BIT(12));
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_TXBF_EN,
+ BIT(12));
+}
#define SET_CMC_TBL_MASK_CSI_STBC_EN BIT(0)
-#define SET_CMC_TBL_CSI_STBC_EN(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 8, val, BIT(13)); \
- le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_STBC_EN, \
- BIT(13)); \
-} while (0)
+static inline void SET_CMC_TBL_CSI_STBC_EN(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 8, val, BIT(13));
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_STBC_EN,
+ BIT(13));
+}
#define SET_CMC_TBL_MASK_CSI_LDPC_EN BIT(0)
-#define SET_CMC_TBL_CSI_LDPC_EN(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 8, val, BIT(14)); \
- le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_LDPC_EN, \
- BIT(14)); \
-} while (0)
+static inline void SET_CMC_TBL_CSI_LDPC_EN(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 8, val, BIT(14));
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_LDPC_EN,
+ BIT(14));
+}
#define SET_CMC_TBL_MASK_CSI_PARA_EN BIT(0)
-#define SET_CMC_TBL_CSI_PARA_EN(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 8, val, BIT(15)); \
- le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_PARA_EN, \
- BIT(15)); \
-} while (0)
+static inline void SET_CMC_TBL_CSI_PARA_EN(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 8, val, BIT(15));
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_PARA_EN,
+ BIT(15));
+}
#define SET_CMC_TBL_MASK_CSI_FIX_RATE GENMASK(8, 0)
-#define SET_CMC_TBL_CSI_FIX_RATE(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(24, 16)); \
- le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_FIX_RATE, \
- GENMASK(24, 16)); \
-} while (0)
+static inline void SET_CMC_TBL_CSI_FIX_RATE(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(24, 16));
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_FIX_RATE,
+ GENMASK(24, 16));
+}
#define SET_CMC_TBL_MASK_CSI_GI_LTF GENMASK(2, 0)
-#define SET_CMC_TBL_CSI_GI_LTF(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(27, 25)); \
- le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_GI_LTF, \
- GENMASK(27, 25)); \
-} while (0)
+static inline void SET_CMC_TBL_CSI_GI_LTF(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(27, 25));
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_GI_LTF,
+ GENMASK(27, 25));
+}
#define SET_CMC_TBL_MASK_CSI_GID_SEL BIT(0)
-#define SET_CMC_TBL_CSI_GID_SEL(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 8, val, BIT(29)); \
- le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_GID_SEL, \
- BIT(29)); \
-} while (0)
+static inline void SET_CMC_TBL_CSI_GID_SEL(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 8, val, BIT(29));
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_GID_SEL,
+ BIT(29));
+}
#define SET_CMC_TBL_MASK_CSI_BW GENMASK(1, 0)
-#define SET_CMC_TBL_CSI_BW(table, val) \
-do { \
- le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(31, 30)); \
- le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_BW, \
- GENMASK(31, 30)); \
-} while (0)
-
-#define SET_FWROLE_MAINTAIN_MACID(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0))
-#define SET_FWROLE_MAINTAIN_SELF_ROLE(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(9, 8))
-#define SET_FWROLE_MAINTAIN_UPD_MODE(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(12, 10))
-#define SET_FWROLE_MAINTAIN_WIFI_ROLE(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(16, 13))
-
-#define SET_JOININFO_MACID(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0))
-#define SET_JOININFO_OP(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, BIT(8))
-#define SET_JOININFO_BAND(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, BIT(9))
-#define SET_JOININFO_WMM(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(11, 10))
-#define SET_JOININFO_TGR(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, BIT(12))
-#define SET_JOININFO_ISHESTA(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, BIT(13))
-#define SET_JOININFO_DLBW(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 14))
-#define SET_JOININFO_TF_MAC_PAD(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(17, 16))
-#define SET_JOININFO_DL_T_PE(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(20, 18))
-#define SET_JOININFO_PORT_ID(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 21))
-#define SET_JOININFO_NET_TYPE(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(25, 24))
-#define SET_JOININFO_WIFI_ROLE(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(29, 26))
-#define SET_JOININFO_SELF_ROLE(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 30))
-
-#define SET_GENERAL_PKT_MACID(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0))
-#define SET_GENERAL_PKT_PROBRSP_ID(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8))
-#define SET_GENERAL_PKT_PSPOLL_ID(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 16))
-#define SET_GENERAL_PKT_NULL_ID(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 24))
-#define SET_GENERAL_PKT_QOS_NULL_ID(h2c, val) \
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(7, 0))
-#define SET_GENERAL_PKT_CTS2SELF_ID(h2c, val) \
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(15, 8))
-
-#define SET_LOG_CFG_LEVEL(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0))
-#define SET_LOG_CFG_PATH(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8))
-#define SET_LOG_CFG_COMP(h2c, val) \
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(31, 0))
-#define SET_LOG_CFG_COMP_EXT(h2c, val) \
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(31, 0))
-
-#define SET_BA_CAM_VALID(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, BIT(0))
-#define SET_BA_CAM_INIT_REQ(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, BIT(1))
-#define SET_BA_CAM_ENTRY_IDX(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(3, 2))
-#define SET_BA_CAM_TID(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 4))
-#define SET_BA_CAM_MACID(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8))
-#define SET_BA_CAM_BMAP_SIZE(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(19, 16))
-#define SET_BA_CAM_SSN(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 20))
-
-#define SET_LPS_PARM_MACID(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0))
-#define SET_LPS_PARM_PSMODE(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8))
-#define SET_LPS_PARM_RLBM(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(19, 16))
-#define SET_LPS_PARM_SMARTPS(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 20))
-#define SET_LPS_PARM_AWAKEINTERVAL(h2c, val) \
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 24))
-#define SET_LPS_PARM_VOUAPSD(h2c, val) \
- le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(0))
-#define SET_LPS_PARM_VIUAPSD(h2c, val) \
- le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(1))
-#define SET_LPS_PARM_BEUAPSD(h2c, val) \
- le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(2))
-#define SET_LPS_PARM_BKUAPSD(h2c, val) \
- le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(3))
-#define SET_LPS_PARM_LASTRPWM(h2c, val) \
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(15, 8))
+static inline void SET_CMC_TBL_CSI_BW(void *table, u32 val)
+{
+ le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(31, 30));
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_BW,
+ GENMASK(31, 30));
+}
+
+static inline void SET_FWROLE_MAINTAIN_MACID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
+}
+
+static inline void SET_FWROLE_MAINTAIN_SELF_ROLE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(9, 8));
+}
+
+static inline void SET_FWROLE_MAINTAIN_UPD_MODE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(12, 10));
+}
+
+static inline void SET_FWROLE_MAINTAIN_WIFI_ROLE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(16, 13));
+}
+
+static inline void SET_JOININFO_MACID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
+}
+
+static inline void SET_JOININFO_OP(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, BIT(8));
+}
+
+static inline void SET_JOININFO_BAND(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, BIT(9));
+}
+
+static inline void SET_JOININFO_WMM(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(11, 10));
+}
+
+static inline void SET_JOININFO_TGR(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, BIT(12));
+}
+
+static inline void SET_JOININFO_ISHESTA(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, BIT(13));
+}
+
+static inline void SET_JOININFO_DLBW(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 14));
+}
+
+static inline void SET_JOININFO_TF_MAC_PAD(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(17, 16));
+}
+
+static inline void SET_JOININFO_DL_T_PE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(20, 18));
+}
+
+static inline void SET_JOININFO_PORT_ID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 21));
+}
+
+static inline void SET_JOININFO_NET_TYPE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(25, 24));
+}
+
+static inline void SET_JOININFO_WIFI_ROLE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(29, 26));
+}
+
+static inline void SET_JOININFO_SELF_ROLE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 30));
+}
+
+static inline void SET_GENERAL_PKT_MACID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
+}
+
+static inline void SET_GENERAL_PKT_PROBRSP_ID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8));
+}
+
+static inline void SET_GENERAL_PKT_PSPOLL_ID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 16));
+}
+
+static inline void SET_GENERAL_PKT_NULL_ID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 24));
+}
+
+static inline void SET_GENERAL_PKT_QOS_NULL_ID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(7, 0));
+}
+
+static inline void SET_GENERAL_PKT_CTS2SELF_ID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(15, 8));
+}
+
+static inline void SET_LOG_CFG_LEVEL(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
+}
+
+static inline void SET_LOG_CFG_PATH(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8));
+}
+
+static inline void SET_LOG_CFG_COMP(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(31, 0));
+}
+
+static inline void SET_LOG_CFG_COMP_EXT(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(31, 0));
+}
+
+static inline void SET_BA_CAM_VALID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, BIT(0));
+}
+
+static inline void SET_BA_CAM_INIT_REQ(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, BIT(1));
+}
+
+static inline void SET_BA_CAM_ENTRY_IDX(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(3, 2));
+}
+
+static inline void SET_BA_CAM_TID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 4));
+}
+
+static inline void SET_BA_CAM_MACID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8));
+}
+
+static inline void SET_BA_CAM_BMAP_SIZE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(19, 16));
+}
+
+static inline void SET_BA_CAM_SSN(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 20));
+}
+
+static inline void SET_LPS_PARM_MACID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
+}
+
+static inline void SET_LPS_PARM_PSMODE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8));
+}
+
+static inline void SET_LPS_PARM_RLBM(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(19, 16));
+}
+
+static inline void SET_LPS_PARM_SMARTPS(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 20));
+}
+
+static inline void SET_LPS_PARM_AWAKEINTERVAL(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 24));
+}
+
+static inline void SET_LPS_PARM_VOUAPSD(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(0));
+}
+
+static inline void SET_LPS_PARM_VIUAPSD(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(1));
+}
+
+static inline void SET_LPS_PARM_BEUAPSD(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(2));
+}
+
+static inline void SET_LPS_PARM_BKUAPSD(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(3));
+}
+
+static inline void SET_LPS_PARM_LASTRPWM(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(15, 8));
+}
enum rtw89_btc_btf_h2c_class {
BTFC_SET = 0x10,
@@ -1054,165 +1316,322 @@ enum rtw89_btc_cxdrvinfo {
CXDRVINFO_MAX,
};
-#define RTW89_SET_FWCMD_CXHDR_TYPE(cmd, val) \
- u8p_replace_bits((u8 *)(cmd) + 0, val, GENMASK(7, 0))
-#define RTW89_SET_FWCMD_CXHDR_LEN(cmd, val) \
- u8p_replace_bits((u8 *)(cmd) + 1, val, GENMASK(7, 0))
-
-#define RTW89_SET_FWCMD_CXINIT_ANT_TYPE(cmd, val) \
- u8p_replace_bits((u8 *)(cmd) + 2, val, GENMASK(7, 0))
-#define RTW89_SET_FWCMD_CXINIT_ANT_NUM(cmd, val) \
- u8p_replace_bits((u8 *)(cmd) + 3, val, GENMASK(7, 0))
-#define RTW89_SET_FWCMD_CXINIT_ANT_ISO(cmd, val) \
- u8p_replace_bits((u8 *)(cmd) + 4, val, GENMASK(7, 0))
-#define RTW89_SET_FWCMD_CXINIT_ANT_POS(cmd, val) \
- u8p_replace_bits((u8 *)(cmd) + 5, val, BIT(0))
-#define RTW89_SET_FWCMD_CXINIT_ANT_DIVERSITY(cmd, val) \
- u8p_replace_bits((u8 *)(cmd) + 5, val, BIT(1))
-#define RTW89_SET_FWCMD_CXINIT_MOD_RFE(cmd, val) \
- u8p_replace_bits((u8 *)(cmd) + 6, val, GENMASK(7, 0))
-#define RTW89_SET_FWCMD_CXINIT_MOD_CV(cmd, val) \
- u8p_replace_bits((u8 *)(cmd) + 7, val, GENMASK(7, 0))
-#define RTW89_SET_FWCMD_CXINIT_MOD_BT_SOLO(cmd, val) \
- u8p_replace_bits((u8 *)(cmd) + 8, val, BIT(0))
-#define RTW89_SET_FWCMD_CXINIT_MOD_BT_POS(cmd, val) \
- u8p_replace_bits((u8 *)(cmd) + 8, val, BIT(1))
-#define RTW89_SET_FWCMD_CXINIT_MOD_SW_TYPE(cmd, val) \
- u8p_replace_bits((u8 *)(cmd) + 8, val, BIT(2))
-#define RTW89_SET_FWCMD_CXINIT_WL_GCH(cmd, val) \
- u8p_replace_bits((u8 *)(cmd) + 10, val, GENMASK(7, 0))
-#define RTW89_SET_FWCMD_CXINIT_WL_ONLY(cmd, val) \
- u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(0))
-#define RTW89_SET_FWCMD_CXINIT_WL_INITOK(cmd, val) \
- u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(1))
-#define RTW89_SET_FWCMD_CXINIT_DBCC_EN(cmd, val) \
- u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(2))
-#define RTW89_SET_FWCMD_CXINIT_CX_OTHER(cmd, val) \
- u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(3))
-#define RTW89_SET_FWCMD_CXINIT_BT_ONLY(cmd, val) \
- u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(4))
-
-#define RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, val) \
- u8p_replace_bits((u8 *)(cmd) + 2, val, GENMASK(7, 0))
-#define RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, val) \
- u8p_replace_bits((u8 *)(cmd) + 3, val, GENMASK(7, 0))
-#define RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, val) \
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(0))
-#define RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, val) \
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(1))
-#define RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, val) \
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(2))
-#define RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, val) \
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(3))
-#define RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, val) \
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(4))
-#define RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, val) \
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(5))
-#define RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, val) \
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(6))
-#define RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, val) \
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(7))
-#define RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, val) \
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(8))
-#define RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, val) \
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(9))
-#define RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, val) \
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(10))
-#define RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, val) \
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(11))
-#define RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, val, n) \
- u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(0))
-#define RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, val, n) \
- u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, GENMASK(3, 1))
-#define RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, val, n) \
- u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(4))
-#define RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, val, n) \
- u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(5))
-#define RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, val, n) \
- u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, GENMASK(7, 6))
-#define RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, val, n) \
- u8p_replace_bits((u8 *)(cmd) + (7 + 12 * (n)), val, BIT(0))
-#define RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, val, n) \
- u8p_replace_bits((u8 *)(cmd) + (7 + 12 * (n)), val, GENMASK(7, 1))
-#define RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, val, n) \
- u8p_replace_bits((u8 *)(cmd) + (8 + 12 * (n)), val, GENMASK(7, 0))
-#define RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, val, n) \
- u8p_replace_bits((u8 *)(cmd) + (9 + 12 * (n)), val, GENMASK(7, 0))
-#define RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, val, n) \
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + (10 + 12 * (n))), val, GENMASK(15, 0))
-#define RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, val, n) \
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + (12 + 12 * (n))), val, GENMASK(15, 0))
-#define RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, val, n) \
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + (14 + 12 * (n))), val, GENMASK(15, 0))
-#define RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, val, n) \
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + (16 + 12 * (n))), val, GENMASK(15, 0))
-
-#define RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, val) \
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, BIT(0))
-#define RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, val) \
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, BIT(1))
-#define RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, val) \
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, BIT(2))
-#define RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, val) \
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(18, 3))
-
-#define RTW89_SET_FWCMD_CXRFK_STATE(cmd, val) \
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(1, 0))
-#define RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, val) \
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(5, 2))
-#define RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, val) \
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(7, 6))
-#define RTW89_SET_FWCMD_CXRFK_BAND(cmd, val) \
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(9, 8))
-#define RTW89_SET_FWCMD_CXRFK_TYPE(cmd, val) \
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(17, 10))
+static inline void RTW89_SET_FWCMD_CXHDR_TYPE(void *cmd, u8 val)
+{
+ u8p_replace_bits((u8 *)(cmd) + 0, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXHDR_LEN(void *cmd, u8 val)
+{
+ u8p_replace_bits((u8 *)(cmd) + 1, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXINIT_ANT_TYPE(void *cmd, u8 val)
+{
+ u8p_replace_bits((u8 *)(cmd) + 2, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXINIT_ANT_NUM(void *cmd, u8 val)
+{
+ u8p_replace_bits((u8 *)(cmd) + 3, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXINIT_ANT_ISO(void *cmd, u8 val)
+{
+ u8p_replace_bits((u8 *)(cmd) + 4, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXINIT_ANT_POS(void *cmd, u8 val)
+{
+ u8p_replace_bits((u8 *)(cmd) + 5, val, BIT(0));
+}
+
+static inline void RTW89_SET_FWCMD_CXINIT_ANT_DIVERSITY(void *cmd, u8 val)
+{
+ u8p_replace_bits((u8 *)(cmd) + 5, val, BIT(1));
+}
+
+static inline void RTW89_SET_FWCMD_CXINIT_MOD_RFE(void *cmd, u8 val)
+{
+ u8p_replace_bits((u8 *)(cmd) + 6, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXINIT_MOD_CV(void *cmd, u8 val)
+{
+ u8p_replace_bits((u8 *)(cmd) + 7, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXINIT_MOD_BT_SOLO(void *cmd, u8 val)
+{
+ u8p_replace_bits((u8 *)(cmd) + 8, val, BIT(0));
+}
+
+static inline void RTW89_SET_FWCMD_CXINIT_MOD_BT_POS(void *cmd, u8 val)
+{
+ u8p_replace_bits((u8 *)(cmd) + 8, val, BIT(1));
+}
+
+static inline void RTW89_SET_FWCMD_CXINIT_MOD_SW_TYPE(void *cmd, u8 val)
+{
+ u8p_replace_bits((u8 *)(cmd) + 8, val, BIT(2));
+}
+
+static inline void RTW89_SET_FWCMD_CXINIT_WL_GCH(void *cmd, u8 val)
+{
+ u8p_replace_bits((u8 *)(cmd) + 10, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXINIT_WL_ONLY(void *cmd, u8 val)
+{
+ u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(0));
+}
+
+static inline void RTW89_SET_FWCMD_CXINIT_WL_INITOK(void *cmd, u8 val)
+{
+ u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(1));
+}
+
+static inline void RTW89_SET_FWCMD_CXINIT_DBCC_EN(void *cmd, u8 val)
+{
+ u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(2));
+}
+
+static inline void RTW89_SET_FWCMD_CXINIT_CX_OTHER(void *cmd, u8 val)
+{
+ u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(3));
+}
+
+static inline void RTW89_SET_FWCMD_CXINIT_BT_ONLY(void *cmd, u8 val)
+{
+ u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(4));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(void *cmd, u8 val)
+{
+ u8p_replace_bits((u8 *)(cmd) + 2, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_LINK_MODE(void *cmd, u8 val)
+{
+ u8p_replace_bits((u8 *)(cmd) + 3, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ROLE_NONE(void *cmd, u16 val)
+{
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(0));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ROLE_STA(void *cmd, u16 val)
+{
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(1));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ROLE_AP(void *cmd, u16 val)
+{
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(2));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ROLE_VAP(void *cmd, u16 val)
+{
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(3));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(void *cmd, u16 val)
+{
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(4));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(void *cmd, u16 val)
+{
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(5));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ROLE_MESH(void *cmd, u16 val)
+{
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(6));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(void *cmd, u16 val)
+{
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(7));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(void *cmd, u16 val)
+{
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(8));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(void *cmd, u16 val)
+{
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(9));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(void *cmd, u16 val)
+{
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(10));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ROLE_NAN(void *cmd, u16 val)
+{
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(11));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(void *cmd, u8 val, int n)
+{
+ u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(0));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_PID(void *cmd, u8 val, int n)
+{
+ u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, GENMASK(3, 1));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_PHY(void *cmd, u8 val, int n)
+{
+ u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(4));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_NOA(void *cmd, u8 val, int n)
+{
+ u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(5));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_BAND(void *cmd, u8 val, int n)
+{
+ u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, GENMASK(7, 6));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(void *cmd, u8 val, int n)
+{
+ u8p_replace_bits((u8 *)(cmd) + (7 + 12 * (n)), val, BIT(0));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_BW(void *cmd, u8 val, int n)
+{
+ u8p_replace_bits((u8 *)(cmd) + (7 + 12 * (n)), val, GENMASK(7, 1));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_ROLE(void *cmd, u8 val, int n)
+{
+ u8p_replace_bits((u8 *)(cmd) + (8 + 12 * (n)), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_CH(void *cmd, u8 val, int n)
+{
+ u8p_replace_bits((u8 *)(cmd) + (9 + 12 * (n)), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(void *cmd, u16 val, int n)
+{
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + (10 + 12 * (n))), val, GENMASK(15, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(void *cmd, u16 val, int n)
+{
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + (12 + 12 * (n))), val, GENMASK(15, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(void *cmd, u16 val, int n)
+{
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + (14 + 12 * (n))), val, GENMASK(15, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(void *cmd, u16 val, int n)
+{
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + (16 + 12 * (n))), val, GENMASK(15, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXCTRL_MANUAL(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, BIT(0));
+}
+
+static inline void RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, BIT(1));
+}
+
+static inline void RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, BIT(2));
+}
+
+static inline void RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(18, 3));
+}
+
+static inline void RTW89_SET_FWCMD_CXRFK_STATE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(1, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXRFK_PATH_MAP(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(5, 2));
+}
+
+static inline void RTW89_SET_FWCMD_CXRFK_PHY_MAP(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(7, 6));
+}
+
+static inline void RTW89_SET_FWCMD_CXRFK_BAND(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(9, 8));
+}
+
+static inline void RTW89_SET_FWCMD_CXRFK_TYPE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(17, 10));
+}
#define RTW89_C2H_HEADER_LEN 8
#define RTW89_GET_C2H_CATEGORY(c2h) \
- le32_get_bits(*((__le32 *)c2h), GENMASK(1, 0))
+ le32_get_bits(*((const __le32 *)c2h), GENMASK(1, 0))
#define RTW89_GET_C2H_CLASS(c2h) \
- le32_get_bits(*((__le32 *)c2h), GENMASK(7, 2))
+ le32_get_bits(*((const __le32 *)c2h), GENMASK(7, 2))
#define RTW89_GET_C2H_FUNC(c2h) \
- le32_get_bits(*((__le32 *)c2h), GENMASK(15, 8))
+ le32_get_bits(*((const __le32 *)c2h), GENMASK(15, 8))
#define RTW89_GET_C2H_LEN(c2h) \
- le32_get_bits(*((__le32 *)(c2h) + 1), GENMASK(13, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 1), GENMASK(13, 0))
#define RTW89_GET_C2H_LOG_SRT_PRT(c2h) (char *)((__le32 *)(c2h) + 2)
#define RTW89_GET_C2H_LOG_LEN(len) ((len) - RTW89_C2H_HEADER_LEN)
#define RTW89_GET_MAC_C2H_DONE_ACK_CAT(c2h) \
- le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(1, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(1, 0))
#define RTW89_GET_MAC_C2H_DONE_ACK_CLASS(c2h) \
- le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(7, 2))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 2))
#define RTW89_GET_MAC_C2H_DONE_ACK_FUNC(c2h) \
- le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(15, 8))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(15, 8))
#define RTW89_GET_MAC_C2H_DONE_ACK_H2C_RETURN(c2h) \
- le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(23, 16))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(23, 16))
#define RTW89_GET_MAC_C2H_DONE_ACK_H2C_SEQ(c2h) \
- le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(31, 24))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 24))
#define RTW89_GET_MAC_C2H_REV_ACK_CAT(c2h) \
- le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(1, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(1, 0))
#define RTW89_GET_MAC_C2H_REV_ACK_CLASS(c2h) \
- le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(7, 2))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 2))
#define RTW89_GET_MAC_C2H_REV_ACK_FUNC(c2h) \
- le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(15, 8))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(15, 8))
#define RTW89_GET_MAC_C2H_REV_ACK_H2C_SEQ(c2h) \
- le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(23, 16))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(23, 16))
#define RTW89_GET_PHY_C2H_RA_RPT_MACID(c2h) \
- le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(15, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(15, 0))
#define RTW89_GET_PHY_C2H_RA_RPT_RETRY_RATIO(c2h) \
- le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(23, 16))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(23, 16))
#define RTW89_GET_PHY_C2H_RA_RPT_MCSNSS(c2h) \
- le32_get_bits(*((__le32 *)(c2h) + 3), GENMASK(6, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 3), GENMASK(6, 0))
#define RTW89_GET_PHY_C2H_RA_RPT_MD_SEL(c2h) \
- le32_get_bits(*((__le32 *)(c2h) + 3), GENMASK(9, 8))
+ le32_get_bits(*((const __le32 *)(c2h) + 3), GENMASK(9, 8))
#define RTW89_GET_PHY_C2H_RA_RPT_GILTF(c2h) \
- le32_get_bits(*((__le32 *)(c2h) + 3), GENMASK(12, 10))
+ le32_get_bits(*((const __le32 *)(c2h) + 3), GENMASK(12, 10))
#define RTW89_GET_PHY_C2H_RA_RPT_BW(c2h) \
- le32_get_bits(*((__le32 *)(c2h) + 3), GENMASK(14, 13))
+ le32_get_bits(*((const __le32 *)(c2h) + 3), GENMASK(14, 13))
/* VHT, HE, HT-old: [6:4]: NSS, [3:0]: MCS
* HT-new: [6:5]: NA, [4:0]: MCS
@@ -1337,7 +1756,8 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta);
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta);
-int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
+int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *vif,
+ struct rtw89_sta *rtwsta, const u8 *scan_mac_addr);
void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h);
void rtw89_fw_c2h_work(struct work_struct *work);
int rtw89_fw_h2c_vif_maintain(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index afcd07ab1de7..b98c47e9ecfe 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -1093,7 +1093,6 @@ static int cmac_func_en(struct rtw89_dev *rtwdev, u8 mac_idx, bool en)
static int dmac_func_en(struct rtw89_dev *rtwdev)
{
u32 val32;
- u32 ret = 0;
val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_MAC_SEC_EN |
B_AX_DISPATCHER_EN | B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN |
@@ -1107,7 +1106,7 @@ static int dmac_func_en(struct rtw89_dev *rtwdev)
B_AX_WD_RLS_CLK_EN);
rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val32);
- return ret;
+ return 0;
}
static int chip_func_en(struct rtw89_dev *rtwdev)
@@ -2991,7 +2990,7 @@ int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
if (ret)
return ret;
- ret = rtw89_fw_h2c_cam(rtwdev, rtwvif);
+ ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
if (ret)
return ret;
@@ -3012,7 +3011,7 @@ int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_cam_deinit(rtwdev, rtwvif);
- ret = rtw89_fw_h2c_cam(rtwdev, rtwvif);
+ ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
if (ret)
return ret;
@@ -3451,6 +3450,18 @@ bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev)
return FIELD_GET(B_AX_LTE_MUX_CTRL_PATH >> 24, val);
}
+u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band)
+{
+ u32 reg;
+ u16 cnt;
+
+ reg = rtw89_mac_reg_by_idx(R_AX_BT_PLT, band);
+ cnt = rtw89_read32_mask(rtwdev, reg, B_AX_BT_PLT_PKT_CNT_MASK);
+ rtw89_write16_set(rtwdev, reg, B_AX_BT_PLT_RST);
+
+ return cnt;
+}
+
static void rtw89_mac_bfee_ctrl(struct rtw89_dev *rtwdev, u8 mac_idx, bool en)
{
u32 reg;
@@ -3695,7 +3706,7 @@ void _rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev)
{
struct rtw89_traffic_stats *stats = &rtwdev->stats;
struct rtw89_vif *rtwvif;
- bool en = stats->tx_tfc_lv > stats->rx_tfc_lv ? false : true;
+ bool en = stats->tx_tfc_lv <= stats->rx_tfc_lv;
bool old = test_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags);
if (en == old)
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index 6f3db8a2a9c2..b7d13edf7dd1 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -227,6 +227,7 @@ enum rtw89_mac_dbg_port_sel {
/* SRAM mem dump */
#define R_AX_INDIR_ACCESS_ENTRY 0x40000
+#define AXIDMA_BASE_ADDR 0x18006000
#define STA_SCHED_BASE_ADDR 0x18808000
#define RXPLD_FLTR_CAM_BASE_ADDR 0x18813000
#define SECURITY_CAM_BASE_ADDR 0x18814000
@@ -240,10 +241,15 @@ enum rtw89_mac_dbg_port_sel {
#define DMAC_TBL_BASE_ADDR 0x18800000
#define SHCUT_MACHDR_BASE_ADDR 0x18800800
#define BCN_IE_CAM1_BASE_ADDR 0x188A0000
+#define TXD_FIFO_0_BASE_ADDR 0x18856200
+#define TXD_FIFO_1_BASE_ADDR 0x188A1080
+#define TXDATA_FIFO_0_BASE_ADDR 0x18856000
+#define TXDATA_FIFO_1_BASE_ADDR 0x188A1000
#define CCTL_INFO_SIZE 32
enum rtw89_mac_mem_sel {
+ RTW89_MAC_MEM_AXIDMA,
RTW89_MAC_MEM_SHARED_BUF,
RTW89_MAC_MEM_DMAC_TBL,
RTW89_MAC_MEM_SHCUT_MACHDR,
@@ -256,6 +262,10 @@ enum rtw89_mac_mem_sel {
RTW89_MAC_MEM_BA_CAM,
RTW89_MAC_MEM_BCN_IE_CAM0,
RTW89_MAC_MEM_BCN_IE_CAM1,
+ RTW89_MAC_MEM_TXD_FIFO_0,
+ RTW89_MAC_MEM_TXD_FIFO_1,
+ RTW89_MAC_MEM_TXDATA_FIFO_0,
+ RTW89_MAC_MEM_TXDATA_FIFO_1,
/* keep last */
RTW89_MAC_MEM_LAST,
@@ -778,6 +788,7 @@ int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex
int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt);
+u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band);
void rtw89_mac_cfg_sb(struct rtw89_dev *rtwdev, u32 val);
u32 rtw89_mac_get_sb(struct rtw89_dev *rtwdev);
bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index 16dc6fb7dbb0..a322259f4cc4 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -27,6 +27,7 @@ static void rtw89_ops_tx(struct ieee80211_hw *hw,
if (ret) {
rtw89_err(rtwdev, "failed to transmit skb: %d\n", ret);
ieee80211_free_txskb(hw, skb);
+ return;
}
rtw89_core_tx_kick_off(rtwdev, qsel);
}
@@ -336,7 +337,7 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID) {
ether_addr_copy(rtwvif->bssid, conf->bssid);
rtw89_cam_bssid_changed(rtwdev, rtwvif);
- rtw89_fw_h2c_cam(rtwdev, rtwvif);
+ rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
}
if (changed & BSS_CHANGED_ERP_SLOT)
@@ -615,6 +616,7 @@ static void rtw89_ops_sw_scan_start(struct ieee80211_hw *hw,
const u8 *mac_addr)
{
struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_hal *hal = &rtwdev->hal;
mutex_lock(&rtwdev->mutex);
@@ -623,6 +625,7 @@ static void rtw89_ops_sw_scan_start(struct ieee80211_hw *hw,
rtw89_btc_ntfy_scan_start(rtwdev, RTW89_PHY_0, hal->current_band_type);
rtw89_chip_rfk_scan(rtwdev, true);
rtw89_hci_recalc_int_mit(rtwdev);
+ rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, mac_addr);
mutex_unlock(&rtwdev->mutex);
}
@@ -630,8 +633,10 @@ static void rtw89_ops_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
mutex_lock(&rtwdev->mutex);
+ rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
rtw89_chip_rfk_scan(rtwdev, false);
rtw89_btc_ntfy_scan_finish(rtwdev, RTW89_PHY_0);
rtwdev->scanning = false;
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index ab134856baac..147009888de0 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -654,6 +654,12 @@ rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev,
u16 idx = info->curr_idx % RTW89_H2C_RF_PAGE_SIZE;
u8 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE;
+ if (page >= RTW89_H2C_RF_PAGE_NUM) {
+ rtw89_warn(rtwdev, "RF parameters exceed size. path=%d, idx=%d",
+ rf_path, info->curr_idx);
+ return;
+ }
+
info->rtw89_phy_config_rf_h2c[page][idx] =
cpu_to_le32((reg->addr << 20) | reg->data);
info->curr_idx++;
@@ -662,30 +668,29 @@ rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev,
static int rtw89_phy_config_rf_reg_fw(struct rtw89_dev *rtwdev,
struct rtw89_fw_h2c_rf_reg_info *info)
{
- u16 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE;
- u16 len = (info->curr_idx % RTW89_H2C_RF_PAGE_SIZE) * 4;
+ u16 remain = info->curr_idx;
+ u16 len = 0;
u8 i;
int ret = 0;
- if (page > RTW89_H2C_RF_PAGE_NUM) {
+ if (remain > RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE) {
rtw89_warn(rtwdev,
- "rf reg h2c total page num %d larger than %d (RTW89_H2C_RF_PAGE_NUM)\n",
- page, RTW89_H2C_RF_PAGE_NUM);
- return -EINVAL;
+ "rf reg h2c total len %d larger than %d\n",
+ remain, RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE);
+ ret = -EINVAL;
+ goto out;
}
- for (i = 0; i < page; i++) {
- ret = rtw89_fw_h2c_rf_reg(rtwdev, info,
- RTW89_H2C_RF_PAGE_SIZE * 4, i);
+ for (i = 0; i < RTW89_H2C_RF_PAGE_NUM && remain; i++, remain -= len) {
+ len = remain > RTW89_H2C_RF_PAGE_SIZE ? RTW89_H2C_RF_PAGE_SIZE : remain;
+ ret = rtw89_fw_h2c_rf_reg(rtwdev, info, len * 4, i);
if (ret)
- return ret;
+ goto out;
}
- ret = rtw89_fw_h2c_rf_reg(rtwdev, info, len, i);
- if (ret)
- return ret;
+out:
info->curr_idx = 0;
- return 0;
+ return ret;
}
static void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev,
@@ -1099,9 +1104,15 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
switch (band) {
case RTW89_BAND_2G:
lmt = (*chip->txpwr_lmt_2g)[bw][ntx][rs][bf][regd][ch_idx];
+ if (!lmt)
+ lmt = (*chip->txpwr_lmt_2g)[bw][ntx][rs][bf]
+ [RTW89_WW][ch_idx];
break;
case RTW89_BAND_5G:
lmt = (*chip->txpwr_lmt_5g)[bw][ntx][rs][bf][regd][ch_idx];
+ if (!lmt)
+ lmt = (*chip->txpwr_lmt_5g)[bw][ntx][rs][bf]
+ [RTW89_WW][ch_idx];
break;
default:
rtw89_warn(rtwdev, "unknown band type: %d\n", band);
@@ -1224,9 +1235,15 @@ static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev,
switch (band) {
case RTW89_BAND_2G:
lmt_ru = (*chip->txpwr_lmt_ru_2g)[ru][ntx][regd][ch_idx];
+ if (!lmt_ru)
+ lmt_ru = (*chip->txpwr_lmt_ru_2g)[ru][ntx]
+ [RTW89_WW][ch_idx];
break;
case RTW89_BAND_5G:
lmt_ru = (*chip->txpwr_lmt_ru_5g)[ru][ntx][regd][ch_idx];
+ if (!lmt_ru)
+ lmt_ru = (*chip->txpwr_lmt_ru_5g)[ru][ntx]
+ [RTW89_WW][ch_idx];
break;
default:
rtw89_warn(rtwdev, "unknown band type: %d\n", band);
@@ -1767,7 +1784,7 @@ static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev)
}
rtw89_phy_cfo_crystal_cap_adjust(rtwdev, new_cfo);
cfo->cfo_avg_pre = new_cfo;
- x_cap_update = cfo->crystal_cap == pre_x_cap ? false : true;
+ x_cap_update = cfo->crystal_cap != pre_x_cap;
rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap_up=%d\n", x_cap_update);
rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap: D:%x C:%x->%x, ofst=%d\n",
cfo->def_x_cap, pre_x_cap, cfo->crystal_cap,
@@ -2404,6 +2421,116 @@ void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev)
env->ccx_watchdog_result, chk_result);
}
+static bool rtw89_physts_ie_page_valid(enum rtw89_phy_status_bitmap *ie_page)
+{
+ if (*ie_page > RTW89_PHYSTS_BITMAP_NUM ||
+ *ie_page == RTW89_RSVD_9)
+ return false;
+ else if (*ie_page > RTW89_RSVD_9)
+ *ie_page -= 1;
+
+ return true;
+}
+
+static u32 rtw89_phy_get_ie_bitmap_addr(enum rtw89_phy_status_bitmap ie_page)
+{
+ static const u8 ie_page_shift = 2;
+
+ return R_PHY_STS_BITMAP_ADDR_START + (ie_page << ie_page_shift);
+}
+
+static u32 rtw89_physts_get_ie_bitmap(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_status_bitmap ie_page)
+{
+ u32 addr;
+
+ if (!rtw89_physts_ie_page_valid(&ie_page))
+ return 0;
+
+ addr = rtw89_phy_get_ie_bitmap_addr(ie_page);
+
+ return rtw89_phy_read32(rtwdev, addr);
+}
+
+static void rtw89_physts_set_ie_bitmap(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_status_bitmap ie_page,
+ u32 val)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u32 addr;
+
+ if (!rtw89_physts_ie_page_valid(&ie_page))
+ return;
+
+ if (chip->chip_id == RTL8852A)
+ val &= B_PHY_STS_BITMAP_MSK_52A;
+
+ addr = rtw89_phy_get_ie_bitmap_addr(ie_page);
+ rtw89_phy_write32(rtwdev, addr, val);
+}
+
+static void rtw89_physts_enable_ie_bitmap(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_status_bitmap bitmap,
+ enum rtw89_phy_status_ie_type ie,
+ bool enable)
+{
+ u32 val = rtw89_physts_get_ie_bitmap(rtwdev, bitmap);
+
+ if (enable)
+ val |= BIT(ie);
+ else
+ val &= ~BIT(ie);
+
+ rtw89_physts_set_ie_bitmap(rtwdev, bitmap, val);
+}
+
+static void rtw89_physts_enable_fail_report(struct rtw89_dev *rtwdev,
+ bool enable,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (enable) {
+ rtw89_phy_write32_clr(rtwdev, R_PLCP_HISTOGRAM,
+ B_STS_DIS_TRIG_BY_FAIL);
+ rtw89_phy_write32_clr(rtwdev, R_PLCP_HISTOGRAM,
+ B_STS_DIS_TRIG_BY_BRK);
+ } else {
+ rtw89_phy_write32_set(rtwdev, R_PLCP_HISTOGRAM,
+ B_STS_DIS_TRIG_BY_FAIL);
+ rtw89_phy_write32_set(rtwdev, R_PLCP_HISTOGRAM,
+ B_STS_DIS_TRIG_BY_BRK);
+ }
+}
+
+static void rtw89_physts_parsing_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 i;
+
+ if (chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV)
+ rtw89_physts_enable_fail_report(rtwdev, false, RTW89_PHY_0);
+
+ for (i = 0; i < RTW89_PHYSTS_BITMAP_NUM; i++) {
+ if (i >= RTW89_CCK_PKT)
+ rtw89_physts_enable_ie_bitmap(rtwdev, i,
+ RTW89_PHYSTS_IE09_FTR_0,
+ true);
+ if ((i >= RTW89_CCK_BRK && i <= RTW89_VHT_MU) ||
+ (i >= RTW89_RSVD_9 && i <= RTW89_CCK_PKT))
+ continue;
+ rtw89_physts_enable_ie_bitmap(rtwdev, i,
+ RTW89_PHYSTS_IE24_OFDM_TD_PATH_A,
+ true);
+ }
+ rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_VHT_PKT,
+ RTW89_PHYSTS_IE13_DL_MU_DEF, true);
+ rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_HE_PKT,
+ RTW89_PHYSTS_IE13_DL_MU_DEF, true);
+
+ /* force IE01 for channel index, only channel field is valid */
+ rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_CCK_PKT,
+ RTW89_PHYSTS_IE01_CMN_OFDM, true);
+}
+
static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev, int type)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -2839,6 +2966,7 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
rtw89_chip_bb_sethw(rtwdev);
rtw89_phy_env_monitor_init(rtwdev);
+ rtw89_physts_parsing_init(rtwdev);
rtw89_phy_dig_init(rtwdev);
rtw89_phy_cfo_init(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index 370129345e0f..b1f059b725a1 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -134,6 +134,66 @@ enum rtw89_ccx_unit {
RTW89_CCX_32_US = 3
};
+enum rtw89_phy_status_ie_type {
+ RTW89_PHYSTS_IE00_CMN_CCK = 0,
+ RTW89_PHYSTS_IE01_CMN_OFDM = 1,
+ RTW89_PHYSTS_IE02_CMN_EXT_AX = 2,
+ RTW89_PHYSTS_IE03_CMN_EXT_SEG_1 = 3,
+ RTW89_PHYSTS_IE04_CMN_EXT_PATH_A = 4,
+ RTW89_PHYSTS_IE05_CMN_EXT_PATH_B = 5,
+ RTW89_PHYSTS_IE06_CMN_EXT_PATH_C = 6,
+ RTW89_PHYSTS_IE07_CMN_EXT_PATH_D = 7,
+ RTW89_PHYSTS_IE08_FTR_CH = 8,
+ RTW89_PHYSTS_IE09_FTR_0 = 9,
+ RTW89_PHYSTS_IE10_FTR_PLCP_EXT = 10,
+ RTW89_PHYSTS_IE11_FTR_PLCP_HISTOGRAM = 11,
+ RTW89_PHYSTS_IE12_MU_EIGEN_INFO = 12,
+ RTW89_PHYSTS_IE13_DL_MU_DEF = 13,
+ RTW89_PHYSTS_IE14_TB_UL_CQI = 14,
+ RTW89_PHYSTS_IE15_TB_UL_DEF = 15,
+ RTW89_PHYSTS_IE16_RSVD16 = 16,
+ RTW89_PHYSTS_IE17_TB_UL_CTRL = 17,
+ RTW89_PHYSTS_IE18_DBG_OFDM_FD_CMN = 18,
+ RTW89_PHYSTS_IE19_DBG_OFDM_TD_CMN = 19,
+ RTW89_PHYSTS_IE20_DBG_OFDM_FD_USER_SEG_0 = 20,
+ RTW89_PHYSTS_IE21_DBG_OFDM_FD_USER_SEG_1 = 21,
+ RTW89_PHYSTS_IE22_DBG_OFDM_FD_USER_AGC = 22,
+ RTW89_PHYSTS_IE23_RSVD23 = 23,
+ RTW89_PHYSTS_IE24_OFDM_TD_PATH_A = 24,
+ RTW89_PHYSTS_IE25_OFDM_TD_PATH_B = 25,
+ RTW89_PHYSTS_IE26_OFDM_TD_PATH_C = 26,
+ RTW89_PHYSTS_IE27_OFDM_TD_PATH_D = 27,
+ RTW89_PHYSTS_IE28_DBG_CCK_PATH_A = 28,
+ RTW89_PHYSTS_IE29_DBG_CCK_PATH_B = 29,
+ RTW89_PHYSTS_IE30_DBG_CCK_PATH_C = 30,
+ RTW89_PHYSTS_IE31_DBG_CCK_PATH_D = 31,
+
+ /* keep last */
+ RTW89_PHYSTS_IE_NUM,
+ RTW89_PHYSTS_IE_MAX = RTW89_PHYSTS_IE_NUM - 1
+};
+
+enum rtw89_phy_status_bitmap {
+ RTW89_TD_SEARCH_FAIL = 0,
+ RTW89_BRK_BY_TX_PKT = 1,
+ RTW89_CCA_SPOOF = 2,
+ RTW89_OFDM_BRK = 3,
+ RTW89_CCK_BRK = 4,
+ RTW89_DL_MU_SPOOFING = 5,
+ RTW89_HE_MU = 6,
+ RTW89_VHT_MU = 7,
+ RTW89_UL_TB_SPOOFING = 8,
+ RTW89_RSVD_9 = 9,
+ RTW89_TRIG_BASE_PPDU = 10,
+ RTW89_CCK_PKT = 11,
+ RTW89_LEGACY_OFDM_PKT = 12,
+ RTW89_HT_PKT = 13,
+ RTW89_VHT_PKT = 14,
+ RTW89_HE_PKT = 15,
+
+ RTW89_PHYSTS_BITMAP_NUM
+};
+
enum rtw89_dig_gain_type {
RTW89_DIG_GAIN_LNA_G = 0,
RTW89_DIG_GAIN_TIA_G = 1,
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 365d8c8ce57b..e0a416d37d0e 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -1658,7 +1658,7 @@
#define R_RSTB_WATCH_DOG 0x000C
#define B_P0_RSTB_WATCH_DOG BIT(0)
#define B_P1_RSTB_WATCH_DOG BIT(1)
-#define B_UPD_P0_EN BIT(30)
+#define B_UPD_P0_EN BIT(31)
#define R_ANAPAR_PW15 0x030C
#define B_ANAPAR_PW15 GENMASK(31, 24)
#define B_ANAPAR_PW15_H GENMASK(27, 24)
@@ -1674,6 +1674,29 @@
#define B_UPD_CLK_ADC_VAL GENMASK(26, 25)
#define R_RSTB_ASYNC 0x0704
#define B_RSTB_ASYNC_ALL BIT(1)
+#define R_MAC_PIN_SEL 0x0734
+#define B_CH_IDX_SEG0 GENMASK(23, 16)
+#define R_PLCP_HISTOGRAM 0x0738
+#define B_STS_DIS_TRIG_BY_BRK BIT(2)
+#define B_STS_DIS_TRIG_BY_FAIL BIT(3)
+#define R_PHY_STS_BITMAP_ADDR_START R_PHY_STS_BITMAP_SEARCH_FAIL
+#define B_PHY_STS_BITMAP_ADDR_MASK GENMASK(6, 2)
+#define R_PHY_STS_BITMAP_SEARCH_FAIL 0x073C
+#define B_PHY_STS_BITMAP_MSK_52A 0x337cff3f
+#define R_PHY_STS_BITMAP_R2T 0x0740
+#define R_PHY_STS_BITMAP_CCA_SPOOF 0x0744
+#define R_PHY_STS_BITMAP_OFDM_BRK 0x0748
+#define R_PHY_STS_BITMAP_CCK_BRK 0x074C
+#define R_PHY_STS_BITMAP_DL_MU_SPOOF 0x0750
+#define R_PHY_STS_BITMAP_HE_MU 0x0754
+#define R_PHY_STS_BITMAP_VHT_MU 0x0758
+#define R_PHY_STS_BITMAP_UL_TB_SPOOF 0x075C
+#define R_PHY_STS_BITMAP_TRIGBASE 0x0760
+#define R_PHY_STS_BITMAP_CCK 0x0764
+#define R_PHY_STS_BITMAP_LEGACY 0x0768
+#define R_PHY_STS_BITMAP_HT 0x076C
+#define R_PHY_STS_BITMAP_VHT 0x0770
+#define R_PHY_STS_BITMAP_HE 0x0774
#define R_PMAC_GNT 0x0980
#define B_PMAC_GNT_TXEN BIT(0)
#define B_PMAC_GNT_RXEN BIT(16)
diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
index f00b94ecfff4..4c37e590e43c 100644
--- a/drivers/net/wireless/realtek/rtw89/regd.c
+++ b/drivers/net/wireless/realtek/rtw89/regd.c
@@ -15,243 +15,244 @@ static const struct rtw89_regulatory rtw89_ww_regd =
COUNTRY_REGD("00", RTW89_WW, RTW89_WW);
static const struct rtw89_regulatory rtw89_regd_map[] = {
- COUNTRY_REGD("AR", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BO", RTW89_WW, RTW89_FCC),
+ COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO),
+ COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("CL", RTW89_WW, RTW89_CHILE),
+ COUNTRY_REGD("CL", RTW89_CHILE, RTW89_CHILE),
COUNTRY_REGD("CO", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("CR", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("EC", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("SV", RTW89_WW, RTW89_FCC),
+ COUNTRY_REGD("SV", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("GT", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("HN", RTW89_WW, RTW89_FCC),
- COUNTRY_REGD("MX", RTW89_FCC, RTW89_MEXICO),
+ COUNTRY_REGD("HN", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("MX", RTW89_MEXICO, RTW89_MEXICO),
COUNTRY_REGD("NI", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("PA", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("PY", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("PE", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("US", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("UY", RTW89_WW, RTW89_FCC),
- COUNTRY_REGD("VE", RTW89_WW, RTW89_FCC),
+ COUNTRY_REGD("UY", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("VE", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("PR", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("AT", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("BE", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("CY", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("CZ", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("DK", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("EE", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("FI", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("FR", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("DE", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("GR", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("HU", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("IS", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("IE", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("IT", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("LV", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("LI", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("LT", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("LU", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("MT", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("MC", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("NL", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("NO", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("PL", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("PT", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("SK", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("SI", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("ES", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("SE", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("CH", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("GB", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("AL", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("AZ", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("BH", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("BA", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("BG", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("HR", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("EG", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("GH", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("IQ", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("IL", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("JO", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("KZ", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("KE", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("KW", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("KG", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("LB", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("LS", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("MK", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("MA", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("MZ", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("NA", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("NG", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("OM", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("QA", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("RO", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("RU", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("SA", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("SN", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("RS", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("ME", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("ZA", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("TR", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("UA", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("AE", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("YE", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("ZW", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("BD", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("KH", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("CN", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("HK", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("IN", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("AT", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("BE", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("CY", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("CZ", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("DK", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("EE", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("FI", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("FR", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("DE", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("GR", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("HU", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("IS", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("IE", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("IT", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("LV", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("LI", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("LT", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("LU", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("MT", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("MC", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("NL", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("NO", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("PL", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("PT", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("SK", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("SI", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("ES", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("SE", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("CH", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("GB", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("AL", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("AZ", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("BH", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("BA", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("BG", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("HR", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("GH", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("IQ", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("IL", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("JO", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("KZ", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("KE", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("KW", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("KG", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("NA", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("QA", RTW89_QATAR, RTW89_QATAR),
+ COUNTRY_REGD("RO", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("RU", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("SA", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("SN", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("RS", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("ME", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("ZA", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("TR", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("UA", RTW89_UKRAINE, RTW89_UKRAINE),
+ COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("YE", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("BD", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("KH", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("CN", RTW89_CN, RTW89_CN),
+ COUNTRY_REGD("HK", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("IN", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC),
- COUNTRY_REGD("MY", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("PK", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("PH", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("SG", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("LK", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("MY", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("TH", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("VN", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("AU", RTW89_WW, RTW89_ACMA),
- COUNTRY_REGD("NZ", RTW89_WW, RTW89_ACMA),
- COUNTRY_REGD("PG", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("TH", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA),
+ COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA),
+ COUNTRY_REGD("PG", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CA", RTW89_IC, RTW89_IC),
COUNTRY_REGD("JP", RTW89_MKK, RTW89_MKK),
- COUNTRY_REGD("JM", RTW89_WW, RTW89_FCC),
+ COUNTRY_REGD("JM", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("AN", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("TT", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("TN", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("TN", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("AF", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("DZ", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("DZ", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("AS", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("AD", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("AO", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("AI", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("AQ", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("AD", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("AO", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("AI", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("AQ", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("AG", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("AM", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("AM", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("AW", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("BS", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("BB", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BY", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("BY", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("BZ", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BJ", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("BJ", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("BM", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BT", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("BW", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("BV", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("IO", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("BT", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("BW", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("BV", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("IO", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("VG", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BN", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("BF", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("MM", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("BI", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("CM", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("CV", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("BN", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("BF", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("MM", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("BI", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("CM", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("CV", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("KY", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("CF", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("TD", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("CX", RTW89_WW, RTW89_ACMA),
- COUNTRY_REGD("CC", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("KM", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("CG", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("CD", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("CK", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("CF", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("TD", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("CX", RTW89_ACMA, RTW89_ACMA),
+ COUNTRY_REGD("CC", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("CK", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("DJ", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("DJ", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("DM", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("GQ", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("ER", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("ET", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("FK", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("FO", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("GQ", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("ER", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("ET", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("FK", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("FO", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("FJ", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("GF", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("PF", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("TF", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("GA", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("GM", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("GE", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("GI", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("GL", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("GF", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("PF", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("TF", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("GA", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("GM", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("GP", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("GG", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("GN", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("GW", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("GY", RTW89_FCC, RTW89_NCC),
+ COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("GW", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("GY", RTW89_NCC, RTW89_NCC),
COUNTRY_REGD("HT", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("HM", RTW89_WW, RTW89_ACMA),
- COUNTRY_REGD("VA", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("IM", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("JE", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("KI", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("LA", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("LR", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("LY", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("MO", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("MG", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("MW", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("MV", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("ML", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("HM", RTW89_ACMA, RTW89_ACMA),
+ COUNTRY_REGD("VA", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("IM", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("JE", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("KI", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("LY", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("MO", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("MG", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("MW", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("MV", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("ML", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("MQ", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("MR", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("MU", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("YT", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("MQ", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("MR", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("MU", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("MD", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("MN", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("MS", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("NR", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("NP", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("NC", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("NE", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("NU", RTW89_WW, RTW89_ACMA),
- COUNTRY_REGD("NF", RTW89_WW, RTW89_ACMA),
+ COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("MS", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("NR", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("NP", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("NC", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("NE", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("NU", RTW89_ACMA, RTW89_ACMA),
+ COUNTRY_REGD("NF", RTW89_ACMA, RTW89_ACMA),
COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("RE", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("RW", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("SH", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("RE", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("RW", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("SH", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("KN", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("LC", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("PM", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("PM", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("VC", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("WS", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("SM", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("SM", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("ST", RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("SC", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("SL", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("SB", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("SO", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("GS", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("SL", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("SB", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("SO", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("GS", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("SR", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("SJ", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("SZ", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("TJ", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("TZ", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("TG", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("TK", RTW89_WW, RTW89_ACMA),
- COUNTRY_REGD("TO", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("TM", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("TC", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("SJ", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("TJ", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("TG", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("TK", RTW89_ACMA, RTW89_ACMA),
+ COUNTRY_REGD("TO", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("TM", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("TC", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("TV", RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("UG", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("UG", RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("UZ", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("VU", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("WF", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("EH", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("ZM", RTW89_WW, RTW89_ETSI),
- COUNTRY_REGD("IR", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("UZ", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("VU", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("WF", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("EH", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("ZM", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("IR", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("PS", RTW89_ETSI, RTW89_ETSI),
};
static const struct rtw89_regulatory *rtw89_regd_find_reg_by_name(char *alpha2)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 5c6ffca3a324..6b75e4bc7352 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -1053,10 +1053,10 @@ static void rtw8852a_set_channel_bb(struct rtw89_dev *rtwdev,
struct rtw89_channel_params *param,
enum rtw89_phy_idx phy_idx)
{
- bool cck_en = param->center_chan > 14 ? false : true;
+ bool cck_en = param->center_chan <= 14;
u8 pri_ch_idx = param->pri_ch_idx;
- if (param->center_chan <= 14)
+ if (cck_en)
rtw8852a_ctrl_sco_cck(rtwdev, param->center_chan,
param->primary_chan, param->bandwidth);
@@ -1069,6 +1069,8 @@ static void rtw8852a_set_channel_bb(struct rtw89_dev *rtwdev,
rtw8852a_bbrst_for_rfk(rtwdev, phy_idx);
}
rtw8852a_spur_elimination(rtwdev, param->center_chan);
+ rtw89_phy_write32_mask(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0,
+ param->primary_chan);
rtw8852a_bb_reset_all(rtwdev, phy_idx);
}
@@ -1927,6 +1929,21 @@ void rtw8852a_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state)
rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
}
+static void rtw8852a_fill_freq_with_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ u16 chan = phy_ppdu->chan_idx;
+ u8 band;
+
+ if (chan == 0)
+ return;
+
+ band = chan <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+ status->freq = ieee80211_channel_to_frequency(chan, band);
+ status->band = band;
+}
+
static void rtw8852a_query_ppdu(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct ieee80211_rx_status *status)
@@ -1939,6 +1956,8 @@ static void rtw8852a_query_ppdu(struct rtw89_dev *rtwdev,
status->chains |= BIT(path);
status->chain_signal[path] = rx_power[path];
}
+ if (phy_ppdu->valid)
+ rtw8852a_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
}
static const struct rtw89_chip_ops rtw8852a_chip_ops = {
@@ -2012,7 +2031,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.limit_efuse_size = 1152,
.phycap_addr = 0x580,
.phycap_size = 128,
- .para_ver = 0x05050764,
+ .para_ver = 0x05050864,
.wlcx_desired = 0x05050000,
.btcx_desired = 0x5,
.scbd = 0x1,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c
index 3a4fe7207420..253b5f8fc4f9 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c
@@ -43384,5248 +43384,6991 @@ static const u8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = {
const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[RTW89_RS_LMT_NUM][RTW89_BF_NUM]
[RTW89_REGD_NUM][RTW89_2G_CH_NUM] = {
- [0][0][0][0][0][0] = 56,
- [0][0][0][0][0][1] = 56,
- [0][0][0][0][0][2] = 56,
- [0][0][0][0][0][3] = 56,
- [0][0][0][0][0][4] = 56,
- [0][0][0][0][0][5] = 56,
- [0][0][0][0][0][6] = 56,
- [0][0][0][0][0][7] = 56,
- [0][0][0][0][0][8] = 56,
- [0][0][0][0][0][9] = 56,
- [0][0][0][0][0][10] = 56,
- [0][0][0][0][0][11] = 56,
- [0][0][0][0][0][12] = 48,
- [0][0][0][0][0][13] = 76,
- [0][1][0][0][0][0] = 44,
- [0][1][0][0][0][1] = 44,
- [0][1][0][0][0][2] = 44,
- [0][1][0][0][0][3] = 44,
- [0][1][0][0][0][4] = 44,
- [0][1][0][0][0][5] = 44,
- [0][1][0][0][0][6] = 44,
- [0][1][0][0][0][7] = 44,
- [0][1][0][0][0][8] = 44,
- [0][1][0][0][0][9] = 44,
- [0][1][0][0][0][10] = 44,
- [0][1][0][0][0][11] = 44,
- [0][1][0][0][0][12] = 38,
- [0][1][0][0][0][13] = 64,
- [1][0][0][0][0][0] = 0,
- [1][0][0][0][0][1] = 0,
- [1][0][0][0][0][2] = 58,
- [1][0][0][0][0][3] = 58,
- [1][0][0][0][0][4] = 58,
- [1][0][0][0][0][5] = 58,
- [1][0][0][0][0][6] = 46,
- [1][0][0][0][0][7] = 46,
- [1][0][0][0][0][8] = 46,
- [1][0][0][0][0][9] = 32,
- [1][0][0][0][0][10] = 32,
- [1][0][0][0][0][11] = 0,
- [1][0][0][0][0][12] = 0,
- [1][0][0][0][0][13] = 0,
- [1][1][0][0][0][0] = 0,
- [1][1][0][0][0][1] = 0,
- [1][1][0][0][0][2] = 46,
- [1][1][0][0][0][3] = 46,
- [1][1][0][0][0][4] = 46,
- [1][1][0][0][0][5] = 46,
- [1][1][0][0][0][6] = 46,
- [1][1][0][0][0][7] = 46,
- [1][1][0][0][0][8] = 46,
- [1][1][0][0][0][9] = 24,
- [1][1][0][0][0][10] = 24,
- [1][1][0][0][0][11] = 0,
- [1][1][0][0][0][12] = 0,
- [1][1][0][0][0][13] = 0,
- [0][0][1][0][0][0] = 58,
- [0][0][1][0][0][1] = 58,
- [0][0][1][0][0][2] = 58,
- [0][0][1][0][0][3] = 58,
- [0][0][1][0][0][4] = 58,
- [0][0][1][0][0][5] = 58,
- [0][0][1][0][0][6] = 58,
- [0][0][1][0][0][7] = 58,
- [0][0][1][0][0][8] = 58,
- [0][0][1][0][0][9] = 58,
- [0][0][1][0][0][10] = 58,
- [0][0][1][0][0][11] = 56,
- [0][0][1][0][0][12] = 52,
- [0][0][1][0][0][13] = 0,
- [0][1][1][0][0][0] = 46,
- [0][1][1][0][0][1] = 46,
- [0][1][1][0][0][2] = 46,
- [0][1][1][0][0][3] = 46,
- [0][1][1][0][0][4] = 46,
- [0][1][1][0][0][5] = 46,
- [0][1][1][0][0][6] = 46,
- [0][1][1][0][0][7] = 46,
- [0][1][1][0][0][8] = 46,
- [0][1][1][0][0][9] = 46,
- [0][1][1][0][0][10] = 46,
- [0][1][1][0][0][11] = 42,
- [0][1][1][0][0][12] = 40,
- [0][1][1][0][0][13] = 0,
- [0][0][2][0][0][0] = 58,
- [0][0][2][0][0][1] = 58,
- [0][0][2][0][0][2] = 58,
- [0][0][2][0][0][3] = 58,
- [0][0][2][0][0][4] = 58,
- [0][0][2][0][0][5] = 58,
- [0][0][2][0][0][6] = 58,
- [0][0][2][0][0][7] = 58,
- [0][0][2][0][0][8] = 58,
- [0][0][2][0][0][9] = 58,
- [0][0][2][0][0][10] = 58,
- [0][0][2][0][0][11] = 54,
- [0][0][2][0][0][12] = 50,
- [0][0][2][0][0][13] = 0,
- [0][1][2][0][0][0] = 46,
- [0][1][2][0][0][1] = 46,
- [0][1][2][0][0][2] = 46,
- [0][1][2][0][0][3] = 46,
- [0][1][2][0][0][4] = 46,
- [0][1][2][0][0][5] = 46,
- [0][1][2][0][0][6] = 46,
- [0][1][2][0][0][7] = 46,
- [0][1][2][0][0][8] = 46,
- [0][1][2][0][0][9] = 46,
- [0][1][2][0][0][10] = 46,
- [0][1][2][0][0][11] = 42,
- [0][1][2][0][0][12] = 40,
- [0][1][2][0][0][13] = 0,
- [0][1][2][1][0][0] = 34,
- [0][1][2][1][0][1] = 34,
- [0][1][2][1][0][2] = 34,
- [0][1][2][1][0][3] = 34,
- [0][1][2][1][0][4] = 34,
- [0][1][2][1][0][5] = 34,
- [0][1][2][1][0][6] = 34,
- [0][1][2][1][0][7] = 34,
- [0][1][2][1][0][8] = 34,
- [0][1][2][1][0][9] = 34,
- [0][1][2][1][0][10] = 34,
- [0][1][2][1][0][11] = 34,
- [0][1][2][1][0][12] = 34,
- [0][1][2][1][0][13] = 0,
- [1][0][2][0][0][0] = 0,
- [1][0][2][0][0][1] = 0,
- [1][0][2][0][0][2] = 56,
- [1][0][2][0][0][3] = 56,
- [1][0][2][0][0][4] = 58,
- [1][0][2][0][0][5] = 58,
- [1][0][2][0][0][6] = 54,
- [1][0][2][0][0][7] = 50,
- [1][0][2][0][0][8] = 50,
- [1][0][2][0][0][9] = 42,
- [1][0][2][0][0][10] = 40,
- [1][0][2][0][0][11] = 0,
- [1][0][2][0][0][12] = 0,
- [1][0][2][0][0][13] = 0,
- [1][1][2][0][0][0] = 0,
- [1][1][2][0][0][1] = 0,
- [1][1][2][0][0][2] = 46,
- [1][1][2][0][0][3] = 46,
- [1][1][2][0][0][4] = 46,
- [1][1][2][0][0][5] = 46,
- [1][1][2][0][0][6] = 46,
- [1][1][2][0][0][7] = 46,
- [1][1][2][0][0][8] = 46,
- [1][1][2][0][0][9] = 38,
- [1][1][2][0][0][10] = 36,
- [1][1][2][0][0][11] = 0,
- [1][1][2][0][0][12] = 0,
- [1][1][2][0][0][13] = 0,
- [1][1][2][1][0][0] = 0,
- [1][1][2][1][0][1] = 0,
- [1][1][2][1][0][2] = 34,
- [1][1][2][1][0][3] = 34,
- [1][1][2][1][0][4] = 34,
- [1][1][2][1][0][5] = 34,
- [1][1][2][1][0][6] = 34,
- [1][1][2][1][0][7] = 34,
- [1][1][2][1][0][8] = 34,
- [1][1][2][1][0][9] = 34,
- [1][1][2][1][0][10] = 34,
- [1][1][2][1][0][11] = 0,
- [1][1][2][1][0][12] = 0,
- [1][1][2][1][0][13] = 0,
- [0][0][0][0][2][0] = 76,
- [0][0][0][0][1][0] = 56,
- [0][0][0][0][3][0] = 68,
- [0][0][0][0][5][0] = 76,
- [0][0][0][0][6][0] = 56,
- [0][0][0][0][9][0] = 56,
- [0][0][0][0][8][0] = 60,
- [0][0][0][0][11][0] = 56,
- [0][0][0][0][2][1] = 76,
- [0][0][0][0][1][1] = 56,
- [0][0][0][0][3][1] = 68,
- [0][0][0][0][5][1] = 76,
- [0][0][0][0][6][1] = 56,
- [0][0][0][0][9][1] = 56,
- [0][0][0][0][8][1] = 60,
- [0][0][0][0][11][1] = 56,
- [0][0][0][0][2][2] = 76,
- [0][0][0][0][1][2] = 56,
- [0][0][0][0][3][2] = 68,
- [0][0][0][0][5][2] = 76,
- [0][0][0][0][6][2] = 56,
- [0][0][0][0][9][2] = 56,
- [0][0][0][0][8][2] = 60,
- [0][0][0][0][11][2] = 56,
- [0][0][0][0][2][3] = 76,
- [0][0][0][0][1][3] = 56,
- [0][0][0][0][3][3] = 68,
- [0][0][0][0][5][3] = 76,
- [0][0][0][0][6][3] = 56,
- [0][0][0][0][9][3] = 56,
- [0][0][0][0][8][3] = 60,
- [0][0][0][0][11][3] = 56,
- [0][0][0][0][2][4] = 76,
- [0][0][0][0][1][4] = 56,
- [0][0][0][0][3][4] = 68,
- [0][0][0][0][5][4] = 76,
- [0][0][0][0][6][4] = 56,
- [0][0][0][0][9][4] = 56,
- [0][0][0][0][8][4] = 60,
- [0][0][0][0][11][4] = 56,
- [0][0][0][0][2][5] = 76,
- [0][0][0][0][1][5] = 56,
- [0][0][0][0][3][5] = 68,
- [0][0][0][0][5][5] = 76,
- [0][0][0][0][6][5] = 56,
- [0][0][0][0][9][5] = 56,
- [0][0][0][0][8][5] = 60,
- [0][0][0][0][11][5] = 56,
- [0][0][0][0][2][6] = 76,
- [0][0][0][0][1][6] = 56,
- [0][0][0][0][3][6] = 68,
- [0][0][0][0][5][6] = 76,
- [0][0][0][0][6][6] = 56,
- [0][0][0][0][9][6] = 56,
- [0][0][0][0][8][6] = 60,
- [0][0][0][0][11][6] = 56,
- [0][0][0][0][2][7] = 76,
- [0][0][0][0][1][7] = 56,
- [0][0][0][0][3][7] = 68,
- [0][0][0][0][5][7] = 76,
- [0][0][0][0][6][7] = 56,
- [0][0][0][0][9][7] = 56,
- [0][0][0][0][8][7] = 60,
- [0][0][0][0][11][7] = 56,
- [0][0][0][0][2][8] = 76,
- [0][0][0][0][1][8] = 56,
- [0][0][0][0][3][8] = 68,
- [0][0][0][0][5][8] = 76,
- [0][0][0][0][6][8] = 56,
- [0][0][0][0][9][8] = 56,
- [0][0][0][0][8][8] = 60,
- [0][0][0][0][11][8] = 56,
- [0][0][0][0][2][9] = 76,
- [0][0][0][0][1][9] = 56,
- [0][0][0][0][3][9] = 68,
- [0][0][0][0][5][9] = 76,
- [0][0][0][0][6][9] = 56,
- [0][0][0][0][9][9] = 56,
- [0][0][0][0][8][9] = 60,
- [0][0][0][0][11][9] = 56,
- [0][0][0][0][2][10] = 76,
- [0][0][0][0][1][10] = 56,
- [0][0][0][0][3][10] = 68,
- [0][0][0][0][5][10] = 76,
- [0][0][0][0][6][10] = 56,
- [0][0][0][0][9][10] = 56,
- [0][0][0][0][8][10] = 60,
- [0][0][0][0][11][10] = 56,
- [0][0][0][0][2][11] = 68,
- [0][0][0][0][1][11] = 56,
- [0][0][0][0][3][11] = 68,
- [0][0][0][0][5][11] = 68,
- [0][0][0][0][6][11] = 56,
- [0][0][0][0][9][11] = 56,
- [0][0][0][0][8][11] = 60,
- [0][0][0][0][11][11] = 56,
- [0][0][0][0][2][12] = 48,
- [0][0][0][0][1][12] = 56,
- [0][0][0][0][3][12] = 68,
- [0][0][0][0][5][12] = 48,
- [0][0][0][0][6][12] = 56,
- [0][0][0][0][9][12] = 56,
- [0][0][0][0][8][12] = 60,
- [0][0][0][0][11][12] = 56,
- [0][0][0][0][2][13] = 127,
- [0][0][0][0][1][13] = 127,
- [0][0][0][0][3][13] = 76,
- [0][0][0][0][5][13] = 127,
- [0][0][0][0][6][13] = 127,
- [0][0][0][0][9][13] = 127,
- [0][0][0][0][8][13] = 127,
- [0][0][0][0][11][13] = 127,
- [0][1][0][0][2][0] = 74,
- [0][1][0][0][1][0] = 44,
- [0][1][0][0][3][0] = 56,
- [0][1][0][0][5][0] = 74,
- [0][1][0][0][6][0] = 44,
- [0][1][0][0][9][0] = 44,
- [0][1][0][0][8][0] = 48,
- [0][1][0][0][11][0] = 44,
- [0][1][0][0][2][1] = 76,
- [0][1][0][0][1][1] = 44,
- [0][1][0][0][3][1] = 56,
- [0][1][0][0][5][1] = 76,
- [0][1][0][0][6][1] = 44,
- [0][1][0][0][9][1] = 44,
- [0][1][0][0][8][1] = 48,
- [0][1][0][0][11][1] = 44,
- [0][1][0][0][2][2] = 76,
- [0][1][0][0][1][2] = 44,
- [0][1][0][0][3][2] = 56,
- [0][1][0][0][5][2] = 76,
- [0][1][0][0][6][2] = 44,
- [0][1][0][0][9][2] = 44,
- [0][1][0][0][8][2] = 48,
- [0][1][0][0][11][2] = 44,
- [0][1][0][0][2][3] = 76,
- [0][1][0][0][1][3] = 44,
- [0][1][0][0][3][3] = 56,
- [0][1][0][0][5][3] = 76,
- [0][1][0][0][6][3] = 44,
- [0][1][0][0][9][3] = 44,
- [0][1][0][0][8][3] = 48,
- [0][1][0][0][11][3] = 44,
- [0][1][0][0][2][4] = 76,
- [0][1][0][0][1][4] = 44,
- [0][1][0][0][3][4] = 56,
- [0][1][0][0][5][4] = 76,
- [0][1][0][0][6][4] = 44,
- [0][1][0][0][9][4] = 44,
- [0][1][0][0][8][4] = 48,
- [0][1][0][0][11][4] = 44,
- [0][1][0][0][2][5] = 76,
- [0][1][0][0][1][5] = 44,
- [0][1][0][0][3][5] = 56,
- [0][1][0][0][5][5] = 76,
- [0][1][0][0][6][5] = 44,
- [0][1][0][0][9][5] = 44,
- [0][1][0][0][8][5] = 48,
- [0][1][0][0][11][5] = 44,
- [0][1][0][0][2][6] = 76,
- [0][1][0][0][1][6] = 44,
- [0][1][0][0][3][6] = 56,
- [0][1][0][0][5][6] = 76,
- [0][1][0][0][6][6] = 44,
- [0][1][0][0][9][6] = 44,
- [0][1][0][0][8][6] = 48,
- [0][1][0][0][11][6] = 44,
- [0][1][0][0][2][7] = 76,
- [0][1][0][0][1][7] = 44,
- [0][1][0][0][3][7] = 56,
- [0][1][0][0][5][7] = 76,
- [0][1][0][0][6][7] = 44,
- [0][1][0][0][9][7] = 44,
- [0][1][0][0][8][7] = 48,
- [0][1][0][0][11][7] = 44,
- [0][1][0][0][2][8] = 76,
- [0][1][0][0][1][8] = 44,
- [0][1][0][0][3][8] = 56,
- [0][1][0][0][5][8] = 76,
- [0][1][0][0][6][8] = 44,
- [0][1][0][0][9][8] = 44,
- [0][1][0][0][8][8] = 48,
- [0][1][0][0][11][8] = 44,
- [0][1][0][0][2][9] = 76,
- [0][1][0][0][1][9] = 44,
- [0][1][0][0][3][9] = 56,
- [0][1][0][0][5][9] = 76,
- [0][1][0][0][6][9] = 44,
- [0][1][0][0][9][9] = 44,
- [0][1][0][0][8][9] = 48,
- [0][1][0][0][11][9] = 44,
- [0][1][0][0][2][10] = 62,
- [0][1][0][0][1][10] = 44,
- [0][1][0][0][3][10] = 56,
- [0][1][0][0][5][10] = 62,
- [0][1][0][0][6][10] = 44,
- [0][1][0][0][9][10] = 44,
- [0][1][0][0][8][10] = 48,
- [0][1][0][0][11][10] = 44,
- [0][1][0][0][2][11] = 52,
- [0][1][0][0][1][11] = 44,
- [0][1][0][0][3][11] = 56,
- [0][1][0][0][5][11] = 52,
- [0][1][0][0][6][11] = 44,
- [0][1][0][0][9][11] = 44,
- [0][1][0][0][8][11] = 48,
- [0][1][0][0][11][11] = 44,
- [0][1][0][0][2][12] = 38,
- [0][1][0][0][1][12] = 44,
- [0][1][0][0][3][12] = 56,
- [0][1][0][0][5][12] = 38,
- [0][1][0][0][6][12] = 44,
- [0][1][0][0][9][12] = 44,
- [0][1][0][0][8][12] = 48,
- [0][1][0][0][11][12] = 44,
- [0][1][0][0][2][13] = 127,
- [0][1][0][0][1][13] = 127,
- [0][1][0][0][3][13] = 64,
- [0][1][0][0][5][13] = 127,
- [0][1][0][0][6][13] = 127,
- [0][1][0][0][9][13] = 127,
- [0][1][0][0][8][13] = 127,
- [0][1][0][0][11][13] = 127,
- [1][0][0][0][2][0] = 127,
- [1][0][0][0][1][0] = 127,
- [1][0][0][0][3][0] = 127,
- [1][0][0][0][5][0] = 127,
- [1][0][0][0][6][0] = 127,
- [1][0][0][0][9][0] = 127,
- [1][0][0][0][8][0] = 127,
- [1][0][0][0][11][0] = 127,
- [1][0][0][0][2][1] = 127,
- [1][0][0][0][1][1] = 127,
- [1][0][0][0][3][1] = 127,
- [1][0][0][0][5][1] = 127,
- [1][0][0][0][6][1] = 127,
- [1][0][0][0][9][1] = 127,
- [1][0][0][0][8][1] = 127,
- [1][0][0][0][11][1] = 127,
- [1][0][0][0][2][2] = 60,
- [1][0][0][0][1][2] = 58,
- [1][0][0][0][3][2] = 68,
- [1][0][0][0][5][2] = 60,
- [1][0][0][0][6][2] = 58,
- [1][0][0][0][9][2] = 58,
- [1][0][0][0][8][2] = 60,
- [1][0][0][0][11][2] = 58,
- [1][0][0][0][2][3] = 60,
- [1][0][0][0][1][3] = 58,
- [1][0][0][0][3][3] = 68,
- [1][0][0][0][5][3] = 60,
- [1][0][0][0][6][3] = 58,
- [1][0][0][0][9][3] = 58,
- [1][0][0][0][8][3] = 60,
- [1][0][0][0][11][3] = 58,
- [1][0][0][0][2][4] = 60,
- [1][0][0][0][1][4] = 58,
- [1][0][0][0][3][4] = 68,
- [1][0][0][0][5][4] = 60,
- [1][0][0][0][6][4] = 58,
- [1][0][0][0][9][4] = 58,
- [1][0][0][0][8][4] = 60,
- [1][0][0][0][11][4] = 58,
- [1][0][0][0][2][5] = 60,
- [1][0][0][0][1][5] = 58,
- [1][0][0][0][3][5] = 68,
- [1][0][0][0][5][5] = 60,
- [1][0][0][0][6][5] = 58,
- [1][0][0][0][9][5] = 58,
- [1][0][0][0][8][5] = 60,
- [1][0][0][0][11][5] = 58,
- [1][0][0][0][2][6] = 46,
- [1][0][0][0][1][6] = 58,
- [1][0][0][0][3][6] = 68,
- [1][0][0][0][5][6] = 46,
- [1][0][0][0][6][6] = 58,
- [1][0][0][0][9][6] = 58,
- [1][0][0][0][8][6] = 60,
- [1][0][0][0][11][6] = 58,
- [1][0][0][0][2][7] = 46,
- [1][0][0][0][1][7] = 58,
- [1][0][0][0][3][7] = 68,
- [1][0][0][0][5][7] = 46,
- [1][0][0][0][6][7] = 58,
- [1][0][0][0][9][7] = 58,
- [1][0][0][0][8][7] = 60,
- [1][0][0][0][11][7] = 58,
- [1][0][0][0][2][8] = 46,
- [1][0][0][0][1][8] = 58,
- [1][0][0][0][3][8] = 68,
- [1][0][0][0][5][8] = 46,
- [1][0][0][0][6][8] = 58,
- [1][0][0][0][9][8] = 58,
- [1][0][0][0][8][8] = 60,
- [1][0][0][0][11][8] = 58,
- [1][0][0][0][2][9] = 32,
- [1][0][0][0][1][9] = 58,
- [1][0][0][0][3][9] = 68,
- [1][0][0][0][5][9] = 32,
- [1][0][0][0][6][9] = 58,
- [1][0][0][0][9][9] = 58,
- [1][0][0][0][8][9] = 60,
- [1][0][0][0][11][9] = 58,
- [1][0][0][0][2][10] = 32,
- [1][0][0][0][1][10] = 58,
- [1][0][0][0][3][10] = 68,
- [1][0][0][0][5][10] = 32,
- [1][0][0][0][6][10] = 58,
- [1][0][0][0][9][10] = 58,
- [1][0][0][0][8][10] = 60,
- [1][0][0][0][11][10] = 58,
- [1][0][0][0][2][11] = 127,
- [1][0][0][0][1][11] = 127,
- [1][0][0][0][3][11] = 127,
- [1][0][0][0][5][11] = 127,
- [1][0][0][0][6][11] = 127,
- [1][0][0][0][9][11] = 127,
- [1][0][0][0][8][11] = 127,
- [1][0][0][0][11][11] = 127,
- [1][0][0][0][2][12] = 127,
- [1][0][0][0][1][12] = 127,
- [1][0][0][0][3][12] = 127,
- [1][0][0][0][5][12] = 127,
- [1][0][0][0][6][12] = 127,
- [1][0][0][0][9][12] = 127,
- [1][0][0][0][8][12] = 127,
- [1][0][0][0][11][12] = 127,
- [1][0][0][0][2][13] = 127,
- [1][0][0][0][1][13] = 127,
- [1][0][0][0][3][13] = 127,
- [1][0][0][0][5][13] = 127,
- [1][0][0][0][6][13] = 127,
- [1][0][0][0][9][13] = 127,
- [1][0][0][0][8][13] = 127,
- [1][0][0][0][11][13] = 127,
- [1][1][0][0][2][0] = 127,
- [1][1][0][0][1][0] = 127,
- [1][1][0][0][3][0] = 127,
- [1][1][0][0][5][0] = 127,
- [1][1][0][0][6][0] = 127,
- [1][1][0][0][9][0] = 127,
- [1][1][0][0][8][0] = 127,
- [1][1][0][0][11][0] = 127,
- [1][1][0][0][2][1] = 127,
- [1][1][0][0][1][1] = 127,
- [1][1][0][0][3][1] = 127,
- [1][1][0][0][5][1] = 127,
- [1][1][0][0][6][1] = 127,
- [1][1][0][0][9][1] = 127,
- [1][1][0][0][8][1] = 127,
- [1][1][0][0][11][1] = 127,
- [1][1][0][0][2][2] = 48,
- [1][1][0][0][1][2] = 46,
- [1][1][0][0][3][2] = 56,
- [1][1][0][0][5][2] = 48,
- [1][1][0][0][6][2] = 46,
- [1][1][0][0][9][2] = 46,
- [1][1][0][0][8][2] = 48,
- [1][1][0][0][11][2] = 46,
- [1][1][0][0][2][3] = 48,
- [1][1][0][0][1][3] = 46,
- [1][1][0][0][3][3] = 56,
- [1][1][0][0][5][3] = 48,
- [1][1][0][0][6][3] = 46,
- [1][1][0][0][9][3] = 46,
- [1][1][0][0][8][3] = 48,
- [1][1][0][0][11][3] = 46,
- [1][1][0][0][2][4] = 48,
- [1][1][0][0][1][4] = 46,
- [1][1][0][0][3][4] = 56,
- [1][1][0][0][5][4] = 48,
- [1][1][0][0][6][4] = 46,
- [1][1][0][0][9][4] = 46,
- [1][1][0][0][8][4] = 48,
- [1][1][0][0][11][4] = 46,
- [1][1][0][0][2][5] = 58,
- [1][1][0][0][1][5] = 46,
- [1][1][0][0][3][5] = 56,
- [1][1][0][0][5][5] = 58,
- [1][1][0][0][6][5] = 46,
- [1][1][0][0][9][5] = 46,
- [1][1][0][0][8][5] = 48,
- [1][1][0][0][11][5] = 46,
- [1][1][0][0][2][6] = 46,
- [1][1][0][0][1][6] = 46,
- [1][1][0][0][3][6] = 56,
- [1][1][0][0][5][6] = 46,
- [1][1][0][0][6][6] = 46,
- [1][1][0][0][9][6] = 46,
- [1][1][0][0][8][6] = 48,
- [1][1][0][0][11][6] = 46,
- [1][1][0][0][2][7] = 46,
- [1][1][0][0][1][7] = 46,
- [1][1][0][0][3][7] = 56,
- [1][1][0][0][5][7] = 46,
- [1][1][0][0][6][7] = 46,
- [1][1][0][0][9][7] = 46,
- [1][1][0][0][8][7] = 48,
- [1][1][0][0][11][7] = 46,
- [1][1][0][0][2][8] = 46,
- [1][1][0][0][1][8] = 46,
- [1][1][0][0][3][8] = 56,
- [1][1][0][0][5][8] = 46,
- [1][1][0][0][6][8] = 46,
- [1][1][0][0][9][8] = 46,
- [1][1][0][0][8][8] = 48,
- [1][1][0][0][11][8] = 46,
- [1][1][0][0][2][9] = 24,
- [1][1][0][0][1][9] = 46,
- [1][1][0][0][3][9] = 56,
- [1][1][0][0][5][9] = 24,
- [1][1][0][0][6][9] = 46,
- [1][1][0][0][9][9] = 46,
- [1][1][0][0][8][9] = 48,
- [1][1][0][0][11][9] = 46,
- [1][1][0][0][2][10] = 24,
- [1][1][0][0][1][10] = 46,
- [1][1][0][0][3][10] = 56,
- [1][1][0][0][5][10] = 24,
- [1][1][0][0][6][10] = 46,
- [1][1][0][0][9][10] = 46,
- [1][1][0][0][8][10] = 48,
- [1][1][0][0][11][10] = 46,
- [1][1][0][0][2][11] = 127,
- [1][1][0][0][1][11] = 127,
- [1][1][0][0][3][11] = 127,
- [1][1][0][0][5][11] = 127,
- [1][1][0][0][6][11] = 127,
- [1][1][0][0][9][11] = 127,
- [1][1][0][0][8][11] = 127,
- [1][1][0][0][11][11] = 127,
- [1][1][0][0][2][12] = 127,
- [1][1][0][0][1][12] = 127,
- [1][1][0][0][3][12] = 127,
- [1][1][0][0][5][12] = 127,
- [1][1][0][0][6][12] = 127,
- [1][1][0][0][9][12] = 127,
- [1][1][0][0][8][12] = 127,
- [1][1][0][0][11][12] = 127,
- [1][1][0][0][2][13] = 127,
- [1][1][0][0][1][13] = 127,
- [1][1][0][0][3][13] = 127,
- [1][1][0][0][5][13] = 127,
- [1][1][0][0][6][13] = 127,
- [1][1][0][0][9][13] = 127,
- [1][1][0][0][8][13] = 127,
- [1][1][0][0][11][13] = 127,
- [0][0][1][0][2][0] = 66,
- [0][0][1][0][1][0] = 58,
- [0][0][1][0][3][0] = 76,
- [0][0][1][0][5][0] = 66,
- [0][0][1][0][6][0] = 58,
- [0][0][1][0][9][0] = 58,
- [0][0][1][0][8][0] = 60,
- [0][0][1][0][11][0] = 58,
- [0][0][1][0][2][1] = 66,
- [0][0][1][0][1][1] = 58,
- [0][0][1][0][3][1] = 76,
- [0][0][1][0][5][1] = 66,
- [0][0][1][0][6][1] = 58,
- [0][0][1][0][9][1] = 58,
- [0][0][1][0][8][1] = 60,
- [0][0][1][0][11][1] = 58,
- [0][0][1][0][2][2] = 70,
- [0][0][1][0][1][2] = 58,
- [0][0][1][0][3][2] = 76,
- [0][0][1][0][5][2] = 70,
- [0][0][1][0][6][2] = 58,
- [0][0][1][0][9][2] = 58,
- [0][0][1][0][8][2] = 60,
- [0][0][1][0][11][2] = 58,
- [0][0][1][0][2][3] = 74,
- [0][0][1][0][1][3] = 58,
- [0][0][1][0][3][3] = 76,
- [0][0][1][0][5][3] = 74,
- [0][0][1][0][6][3] = 58,
- [0][0][1][0][9][3] = 58,
- [0][0][1][0][8][3] = 60,
- [0][0][1][0][11][3] = 58,
- [0][0][1][0][2][4] = 78,
- [0][0][1][0][1][4] = 58,
- [0][0][1][0][3][4] = 76,
- [0][0][1][0][5][4] = 78,
- [0][0][1][0][6][4] = 58,
- [0][0][1][0][9][4] = 58,
- [0][0][1][0][8][4] = 60,
- [0][0][1][0][11][4] = 58,
- [0][0][1][0][2][5] = 78,
- [0][0][1][0][1][5] = 58,
- [0][0][1][0][3][5] = 76,
- [0][0][1][0][5][5] = 78,
- [0][0][1][0][6][5] = 58,
- [0][0][1][0][9][5] = 58,
- [0][0][1][0][8][5] = 60,
- [0][0][1][0][11][5] = 58,
- [0][0][1][0][2][6] = 78,
- [0][0][1][0][1][6] = 58,
- [0][0][1][0][3][6] = 76,
- [0][0][1][0][5][6] = 78,
- [0][0][1][0][6][6] = 58,
- [0][0][1][0][9][6] = 58,
- [0][0][1][0][8][6] = 60,
- [0][0][1][0][11][6] = 58,
- [0][0][1][0][2][7] = 74,
- [0][0][1][0][1][7] = 58,
- [0][0][1][0][3][7] = 76,
- [0][0][1][0][5][7] = 74,
- [0][0][1][0][6][7] = 58,
- [0][0][1][0][9][7] = 58,
- [0][0][1][0][8][7] = 60,
- [0][0][1][0][11][7] = 58,
- [0][0][1][0][2][8] = 70,
- [0][0][1][0][1][8] = 58,
- [0][0][1][0][3][8] = 76,
- [0][0][1][0][5][8] = 70,
- [0][0][1][0][6][8] = 58,
- [0][0][1][0][9][8] = 58,
- [0][0][1][0][8][8] = 60,
- [0][0][1][0][11][8] = 58,
- [0][0][1][0][2][9] = 66,
- [0][0][1][0][1][9] = 58,
- [0][0][1][0][3][9] = 76,
- [0][0][1][0][5][9] = 66,
- [0][0][1][0][6][9] = 58,
- [0][0][1][0][9][9] = 58,
- [0][0][1][0][8][9] = 60,
- [0][0][1][0][11][9] = 58,
- [0][0][1][0][2][10] = 66,
- [0][0][1][0][1][10] = 58,
- [0][0][1][0][3][10] = 76,
- [0][0][1][0][5][10] = 66,
- [0][0][1][0][6][10] = 58,
- [0][0][1][0][9][10] = 58,
- [0][0][1][0][8][10] = 60,
- [0][0][1][0][11][10] = 58,
- [0][0][1][0][2][11] = 56,
- [0][0][1][0][1][11] = 58,
- [0][0][1][0][3][11] = 76,
- [0][0][1][0][5][11] = 56,
- [0][0][1][0][6][11] = 58,
- [0][0][1][0][9][11] = 58,
- [0][0][1][0][8][11] = 60,
- [0][0][1][0][11][11] = 58,
- [0][0][1][0][2][12] = 52,
- [0][0][1][0][1][12] = 58,
- [0][0][1][0][3][12] = 76,
- [0][0][1][0][5][12] = 52,
- [0][0][1][0][6][12] = 58,
- [0][0][1][0][9][12] = 58,
- [0][0][1][0][8][12] = 60,
- [0][0][1][0][11][12] = 58,
- [0][0][1][0][2][13] = 127,
- [0][0][1][0][1][13] = 127,
- [0][0][1][0][3][13] = 127,
- [0][0][1][0][5][13] = 127,
- [0][0][1][0][6][13] = 127,
- [0][0][1][0][9][13] = 127,
- [0][0][1][0][8][13] = 127,
- [0][0][1][0][11][13] = 127,
- [0][1][1][0][2][0] = 62,
- [0][1][1][0][1][0] = 46,
- [0][1][1][0][3][0] = 64,
- [0][1][1][0][5][0] = 62,
- [0][1][1][0][6][0] = 46,
- [0][1][1][0][9][0] = 46,
- [0][1][1][0][8][0] = 48,
- [0][1][1][0][11][0] = 46,
- [0][1][1][0][2][1] = 62,
- [0][1][1][0][1][1] = 46,
- [0][1][1][0][3][1] = 64,
- [0][1][1][0][5][1] = 62,
- [0][1][1][0][6][1] = 46,
- [0][1][1][0][9][1] = 46,
- [0][1][1][0][8][1] = 48,
- [0][1][1][0][11][1] = 46,
- [0][1][1][0][2][2] = 66,
- [0][1][1][0][1][2] = 46,
- [0][1][1][0][3][2] = 64,
- [0][1][1][0][5][2] = 66,
- [0][1][1][0][6][2] = 46,
- [0][1][1][0][9][2] = 46,
- [0][1][1][0][8][2] = 48,
- [0][1][1][0][11][2] = 46,
- [0][1][1][0][2][3] = 70,
- [0][1][1][0][1][3] = 46,
- [0][1][1][0][3][3] = 64,
- [0][1][1][0][5][3] = 70,
- [0][1][1][0][6][3] = 46,
- [0][1][1][0][9][3] = 46,
- [0][1][1][0][8][3] = 48,
- [0][1][1][0][11][3] = 46,
- [0][1][1][0][2][4] = 78,
- [0][1][1][0][1][4] = 46,
- [0][1][1][0][3][4] = 64,
- [0][1][1][0][5][4] = 78,
- [0][1][1][0][6][4] = 46,
- [0][1][1][0][9][4] = 46,
- [0][1][1][0][8][4] = 48,
- [0][1][1][0][11][4] = 46,
- [0][1][1][0][2][5] = 78,
- [0][1][1][0][1][5] = 46,
- [0][1][1][0][3][5] = 64,
- [0][1][1][0][5][5] = 78,
- [0][1][1][0][6][5] = 46,
- [0][1][1][0][9][5] = 46,
- [0][1][1][0][8][5] = 48,
- [0][1][1][0][11][5] = 46,
- [0][1][1][0][2][6] = 78,
- [0][1][1][0][1][6] = 46,
- [0][1][1][0][3][6] = 64,
- [0][1][1][0][5][6] = 78,
- [0][1][1][0][6][6] = 46,
- [0][1][1][0][9][6] = 46,
- [0][1][1][0][8][6] = 48,
- [0][1][1][0][11][6] = 46,
- [0][1][1][0][2][7] = 70,
- [0][1][1][0][1][7] = 46,
- [0][1][1][0][3][7] = 64,
- [0][1][1][0][5][7] = 70,
- [0][1][1][0][6][7] = 46,
- [0][1][1][0][9][7] = 46,
- [0][1][1][0][8][7] = 48,
- [0][1][1][0][11][7] = 46,
- [0][1][1][0][2][8] = 66,
- [0][1][1][0][1][8] = 46,
- [0][1][1][0][3][8] = 64,
- [0][1][1][0][5][8] = 66,
- [0][1][1][0][6][8] = 46,
- [0][1][1][0][9][8] = 46,
- [0][1][1][0][8][8] = 48,
- [0][1][1][0][11][8] = 46,
- [0][1][1][0][2][9] = 62,
- [0][1][1][0][1][9] = 46,
- [0][1][1][0][3][9] = 64,
- [0][1][1][0][5][9] = 62,
- [0][1][1][0][6][9] = 46,
- [0][1][1][0][9][9] = 46,
- [0][1][1][0][8][9] = 48,
- [0][1][1][0][11][9] = 46,
- [0][1][1][0][2][10] = 62,
- [0][1][1][0][1][10] = 46,
- [0][1][1][0][3][10] = 64,
- [0][1][1][0][5][10] = 62,
- [0][1][1][0][6][10] = 46,
- [0][1][1][0][9][10] = 46,
- [0][1][1][0][8][10] = 48,
- [0][1][1][0][11][10] = 46,
- [0][1][1][0][2][11] = 42,
- [0][1][1][0][1][11] = 46,
- [0][1][1][0][3][11] = 64,
- [0][1][1][0][5][11] = 42,
- [0][1][1][0][6][11] = 46,
- [0][1][1][0][9][11] = 46,
- [0][1][1][0][8][11] = 48,
- [0][1][1][0][11][11] = 46,
- [0][1][1][0][2][12] = 40,
- [0][1][1][0][1][12] = 46,
- [0][1][1][0][3][12] = 64,
- [0][1][1][0][5][12] = 40,
- [0][1][1][0][6][12] = 46,
- [0][1][1][0][9][12] = 46,
- [0][1][1][0][8][12] = 48,
- [0][1][1][0][11][12] = 46,
- [0][1][1][0][2][13] = 127,
- [0][1][1][0][1][13] = 127,
- [0][1][1][0][3][13] = 127,
- [0][1][1][0][5][13] = 127,
- [0][1][1][0][6][13] = 127,
- [0][1][1][0][9][13] = 127,
- [0][1][1][0][8][13] = 127,
- [0][1][1][0][11][13] = 127,
- [0][0][2][0][2][0] = 66,
- [0][0][2][0][1][0] = 58,
- [0][0][2][0][3][0] = 76,
- [0][0][2][0][5][0] = 66,
- [0][0][2][0][6][0] = 58,
- [0][0][2][0][9][0] = 58,
- [0][0][2][0][8][0] = 60,
- [0][0][2][0][11][0] = 58,
- [0][0][2][0][2][1] = 66,
- [0][0][2][0][1][1] = 58,
- [0][0][2][0][3][1] = 76,
- [0][0][2][0][5][1] = 66,
- [0][0][2][0][6][1] = 58,
- [0][0][2][0][9][1] = 58,
- [0][0][2][0][8][1] = 60,
- [0][0][2][0][11][1] = 58,
- [0][0][2][0][2][2] = 70,
- [0][0][2][0][1][2] = 58,
- [0][0][2][0][3][2] = 76,
- [0][0][2][0][5][2] = 70,
- [0][0][2][0][6][2] = 58,
- [0][0][2][0][9][2] = 58,
- [0][0][2][0][8][2] = 60,
- [0][0][2][0][11][2] = 58,
- [0][0][2][0][2][3] = 74,
- [0][0][2][0][1][3] = 58,
- [0][0][2][0][3][3] = 76,
- [0][0][2][0][5][3] = 74,
- [0][0][2][0][6][3] = 58,
- [0][0][2][0][9][3] = 58,
- [0][0][2][0][8][3] = 60,
- [0][0][2][0][11][3] = 58,
- [0][0][2][0][2][4] = 76,
- [0][0][2][0][1][4] = 58,
- [0][0][2][0][3][4] = 76,
- [0][0][2][0][5][4] = 76,
- [0][0][2][0][6][4] = 58,
- [0][0][2][0][9][4] = 58,
- [0][0][2][0][8][4] = 60,
- [0][0][2][0][11][4] = 58,
- [0][0][2][0][2][5] = 76,
- [0][0][2][0][1][5] = 58,
- [0][0][2][0][3][5] = 76,
- [0][0][2][0][5][5] = 76,
- [0][0][2][0][6][5] = 58,
- [0][0][2][0][9][5] = 58,
- [0][0][2][0][8][5] = 60,
- [0][0][2][0][11][5] = 58,
- [0][0][2][0][2][6] = 76,
- [0][0][2][0][1][6] = 58,
- [0][0][2][0][3][6] = 76,
- [0][0][2][0][5][6] = 76,
- [0][0][2][0][6][6] = 58,
- [0][0][2][0][9][6] = 58,
- [0][0][2][0][8][6] = 60,
- [0][0][2][0][11][6] = 58,
- [0][0][2][0][2][7] = 74,
- [0][0][2][0][1][7] = 58,
- [0][0][2][0][3][7] = 76,
- [0][0][2][0][5][7] = 74,
- [0][0][2][0][6][7] = 58,
- [0][0][2][0][9][7] = 58,
- [0][0][2][0][8][7] = 60,
- [0][0][2][0][11][7] = 58,
- [0][0][2][0][2][8] = 70,
- [0][0][2][0][1][8] = 58,
- [0][0][2][0][3][8] = 76,
- [0][0][2][0][5][8] = 70,
- [0][0][2][0][6][8] = 58,
- [0][0][2][0][9][8] = 58,
- [0][0][2][0][8][8] = 60,
- [0][0][2][0][11][8] = 58,
- [0][0][2][0][2][9] = 66,
- [0][0][2][0][1][9] = 58,
- [0][0][2][0][3][9] = 76,
- [0][0][2][0][5][9] = 66,
- [0][0][2][0][6][9] = 58,
- [0][0][2][0][9][9] = 58,
- [0][0][2][0][8][9] = 60,
- [0][0][2][0][11][9] = 58,
- [0][0][2][0][2][10] = 66,
- [0][0][2][0][1][10] = 58,
- [0][0][2][0][3][10] = 76,
- [0][0][2][0][5][10] = 66,
- [0][0][2][0][6][10] = 58,
- [0][0][2][0][9][10] = 58,
- [0][0][2][0][8][10] = 60,
- [0][0][2][0][11][10] = 58,
- [0][0][2][0][2][11] = 54,
- [0][0][2][0][1][11] = 58,
- [0][0][2][0][3][11] = 76,
- [0][0][2][0][5][11] = 54,
- [0][0][2][0][6][11] = 58,
- [0][0][2][0][9][11] = 58,
- [0][0][2][0][8][11] = 60,
- [0][0][2][0][11][11] = 58,
- [0][0][2][0][2][12] = 50,
- [0][0][2][0][1][12] = 58,
- [0][0][2][0][3][12] = 76,
- [0][0][2][0][5][12] = 50,
- [0][0][2][0][6][12] = 58,
- [0][0][2][0][9][12] = 58,
- [0][0][2][0][8][12] = 60,
- [0][0][2][0][11][12] = 58,
- [0][0][2][0][2][13] = 127,
- [0][0][2][0][1][13] = 127,
- [0][0][2][0][3][13] = 127,
- [0][0][2][0][5][13] = 127,
- [0][0][2][0][6][13] = 127,
- [0][0][2][0][9][13] = 127,
- [0][0][2][0][8][13] = 127,
- [0][0][2][0][11][13] = 127,
- [0][1][2][0][2][0] = 62,
- [0][1][2][0][1][0] = 46,
- [0][1][2][0][3][0] = 64,
- [0][1][2][0][5][0] = 62,
- [0][1][2][0][6][0] = 46,
- [0][1][2][0][9][0] = 46,
- [0][1][2][0][8][0] = 48,
- [0][1][2][0][11][0] = 46,
- [0][1][2][0][2][1] = 62,
- [0][1][2][0][1][1] = 46,
- [0][1][2][0][3][1] = 64,
- [0][1][2][0][5][1] = 62,
- [0][1][2][0][6][1] = 46,
- [0][1][2][0][9][1] = 46,
- [0][1][2][0][8][1] = 48,
- [0][1][2][0][11][1] = 46,
- [0][1][2][0][2][2] = 66,
- [0][1][2][0][1][2] = 46,
- [0][1][2][0][3][2] = 64,
- [0][1][2][0][5][2] = 66,
- [0][1][2][0][6][2] = 46,
- [0][1][2][0][9][2] = 46,
- [0][1][2][0][8][2] = 48,
- [0][1][2][0][11][2] = 46,
- [0][1][2][0][2][3] = 70,
- [0][1][2][0][1][3] = 46,
- [0][1][2][0][3][3] = 64,
- [0][1][2][0][5][3] = 70,
- [0][1][2][0][6][3] = 46,
- [0][1][2][0][9][3] = 46,
- [0][1][2][0][8][3] = 48,
- [0][1][2][0][11][3] = 46,
- [0][1][2][0][2][4] = 76,
- [0][1][2][0][1][4] = 46,
- [0][1][2][0][3][4] = 64,
- [0][1][2][0][5][4] = 76,
- [0][1][2][0][6][4] = 46,
- [0][1][2][0][9][4] = 46,
- [0][1][2][0][8][4] = 48,
- [0][1][2][0][11][4] = 46,
- [0][1][2][0][2][5] = 76,
- [0][1][2][0][1][5] = 46,
- [0][1][2][0][3][5] = 64,
- [0][1][2][0][5][5] = 76,
- [0][1][2][0][6][5] = 46,
- [0][1][2][0][9][5] = 46,
- [0][1][2][0][8][5] = 48,
- [0][1][2][0][11][5] = 46,
- [0][1][2][0][2][6] = 76,
- [0][1][2][0][1][6] = 46,
- [0][1][2][0][3][6] = 64,
- [0][1][2][0][5][6] = 76,
- [0][1][2][0][6][6] = 46,
- [0][1][2][0][9][6] = 46,
- [0][1][2][0][8][6] = 48,
- [0][1][2][0][11][6] = 46,
- [0][1][2][0][2][7] = 68,
- [0][1][2][0][1][7] = 46,
- [0][1][2][0][3][7] = 64,
- [0][1][2][0][5][7] = 68,
- [0][1][2][0][6][7] = 46,
- [0][1][2][0][9][7] = 46,
- [0][1][2][0][8][7] = 48,
- [0][1][2][0][11][7] = 46,
- [0][1][2][0][2][8] = 64,
- [0][1][2][0][1][8] = 46,
- [0][1][2][0][3][8] = 64,
- [0][1][2][0][5][8] = 64,
- [0][1][2][0][6][8] = 46,
- [0][1][2][0][9][8] = 46,
- [0][1][2][0][8][8] = 48,
- [0][1][2][0][11][8] = 46,
- [0][1][2][0][2][9] = 60,
- [0][1][2][0][1][9] = 46,
- [0][1][2][0][3][9] = 64,
- [0][1][2][0][5][9] = 60,
- [0][1][2][0][6][9] = 46,
- [0][1][2][0][9][9] = 46,
- [0][1][2][0][8][9] = 48,
- [0][1][2][0][11][9] = 46,
- [0][1][2][0][2][10] = 60,
- [0][1][2][0][1][10] = 46,
- [0][1][2][0][3][10] = 64,
- [0][1][2][0][5][10] = 60,
- [0][1][2][0][6][10] = 46,
- [0][1][2][0][9][10] = 46,
- [0][1][2][0][8][10] = 48,
- [0][1][2][0][11][10] = 46,
- [0][1][2][0][2][11] = 42,
- [0][1][2][0][1][11] = 46,
- [0][1][2][0][3][11] = 64,
- [0][1][2][0][5][11] = 42,
- [0][1][2][0][6][11] = 46,
- [0][1][2][0][9][11] = 46,
- [0][1][2][0][8][11] = 48,
- [0][1][2][0][11][11] = 46,
- [0][1][2][0][2][12] = 40,
- [0][1][2][0][1][12] = 46,
- [0][1][2][0][3][12] = 64,
- [0][1][2][0][5][12] = 40,
- [0][1][2][0][6][12] = 46,
- [0][1][2][0][9][12] = 46,
- [0][1][2][0][8][12] = 48,
- [0][1][2][0][11][12] = 46,
- [0][1][2][0][2][13] = 127,
- [0][1][2][0][1][13] = 127,
- [0][1][2][0][3][13] = 127,
- [0][1][2][0][5][13] = 127,
- [0][1][2][0][6][13] = 127,
- [0][1][2][0][9][13] = 127,
- [0][1][2][0][8][13] = 127,
- [0][1][2][0][11][13] = 127,
- [0][1][2][1][2][0] = 62,
- [0][1][2][1][1][0] = 34,
- [0][1][2][1][3][0] = 64,
- [0][1][2][1][5][0] = 62,
- [0][1][2][1][6][0] = 34,
- [0][1][2][1][9][0] = 34,
- [0][1][2][1][8][0] = 36,
- [0][1][2][1][11][0] = 34,
- [0][1][2][1][2][1] = 62,
- [0][1][2][1][1][1] = 34,
- [0][1][2][1][3][1] = 64,
- [0][1][2][1][5][1] = 62,
- [0][1][2][1][6][1] = 34,
- [0][1][2][1][9][1] = 34,
- [0][1][2][1][8][1] = 36,
- [0][1][2][1][11][1] = 34,
- [0][1][2][1][2][2] = 66,
- [0][1][2][1][1][2] = 34,
- [0][1][2][1][3][2] = 64,
- [0][1][2][1][5][2] = 66,
- [0][1][2][1][6][2] = 34,
- [0][1][2][1][9][2] = 34,
- [0][1][2][1][8][2] = 36,
- [0][1][2][1][11][2] = 34,
- [0][1][2][1][2][3] = 70,
- [0][1][2][1][1][3] = 34,
- [0][1][2][1][3][3] = 64,
- [0][1][2][1][5][3] = 70,
- [0][1][2][1][6][3] = 34,
- [0][1][2][1][9][3] = 34,
- [0][1][2][1][8][3] = 36,
- [0][1][2][1][11][3] = 34,
- [0][1][2][1][2][4] = 76,
- [0][1][2][1][1][4] = 34,
- [0][1][2][1][3][4] = 64,
- [0][1][2][1][5][4] = 76,
- [0][1][2][1][6][4] = 34,
- [0][1][2][1][9][4] = 34,
- [0][1][2][1][8][4] = 36,
- [0][1][2][1][11][4] = 34,
- [0][1][2][1][2][5] = 76,
- [0][1][2][1][1][5] = 34,
- [0][1][2][1][3][5] = 64,
- [0][1][2][1][5][5] = 76,
- [0][1][2][1][6][5] = 34,
- [0][1][2][1][9][5] = 34,
- [0][1][2][1][8][5] = 36,
- [0][1][2][1][11][5] = 34,
- [0][1][2][1][2][6] = 76,
- [0][1][2][1][1][6] = 34,
- [0][1][2][1][3][6] = 64,
- [0][1][2][1][5][6] = 76,
- [0][1][2][1][6][6] = 34,
- [0][1][2][1][9][6] = 34,
- [0][1][2][1][8][6] = 36,
- [0][1][2][1][11][6] = 34,
- [0][1][2][1][2][7] = 68,
- [0][1][2][1][1][7] = 34,
- [0][1][2][1][3][7] = 64,
- [0][1][2][1][5][7] = 68,
- [0][1][2][1][6][7] = 34,
- [0][1][2][1][9][7] = 34,
- [0][1][2][1][8][7] = 36,
- [0][1][2][1][11][7] = 34,
- [0][1][2][1][2][8] = 64,
- [0][1][2][1][1][8] = 34,
- [0][1][2][1][3][8] = 64,
- [0][1][2][1][5][8] = 64,
- [0][1][2][1][6][8] = 34,
- [0][1][2][1][9][8] = 34,
- [0][1][2][1][8][8] = 36,
- [0][1][2][1][11][8] = 34,
- [0][1][2][1][2][9] = 60,
- [0][1][2][1][1][9] = 34,
- [0][1][2][1][3][9] = 64,
- [0][1][2][1][5][9] = 60,
- [0][1][2][1][6][9] = 34,
- [0][1][2][1][9][9] = 34,
- [0][1][2][1][8][9] = 36,
- [0][1][2][1][11][9] = 34,
- [0][1][2][1][2][10] = 60,
- [0][1][2][1][1][10] = 34,
- [0][1][2][1][3][10] = 64,
- [0][1][2][1][5][10] = 60,
- [0][1][2][1][6][10] = 34,
- [0][1][2][1][9][10] = 34,
- [0][1][2][1][8][10] = 36,
- [0][1][2][1][11][10] = 34,
- [0][1][2][1][2][11] = 42,
- [0][1][2][1][1][11] = 34,
- [0][1][2][1][3][11] = 64,
- [0][1][2][1][5][11] = 42,
- [0][1][2][1][6][11] = 34,
- [0][1][2][1][9][11] = 34,
- [0][1][2][1][8][11] = 36,
- [0][1][2][1][11][11] = 34,
- [0][1][2][1][2][12] = 40,
- [0][1][2][1][1][12] = 34,
- [0][1][2][1][3][12] = 64,
- [0][1][2][1][5][12] = 40,
- [0][1][2][1][6][12] = 34,
- [0][1][2][1][9][12] = 34,
- [0][1][2][1][8][12] = 36,
- [0][1][2][1][11][12] = 34,
- [0][1][2][1][2][13] = 127,
- [0][1][2][1][1][13] = 127,
- [0][1][2][1][3][13] = 127,
- [0][1][2][1][5][13] = 127,
- [0][1][2][1][6][13] = 127,
- [0][1][2][1][9][13] = 127,
- [0][1][2][1][8][13] = 127,
- [0][1][2][1][11][13] = 127,
- [1][0][2][0][2][0] = 127,
- [1][0][2][0][1][0] = 127,
- [1][0][2][0][3][0] = 127,
- [1][0][2][0][5][0] = 127,
- [1][0][2][0][6][0] = 127,
- [1][0][2][0][9][0] = 127,
- [1][0][2][0][8][0] = 127,
- [1][0][2][0][11][0] = 127,
- [1][0][2][0][2][1] = 127,
- [1][0][2][0][1][1] = 127,
- [1][0][2][0][3][1] = 127,
- [1][0][2][0][5][1] = 127,
- [1][0][2][0][6][1] = 127,
- [1][0][2][0][9][1] = 127,
- [1][0][2][0][8][1] = 127,
- [1][0][2][0][11][1] = 127,
- [1][0][2][0][2][2] = 56,
- [1][0][2][0][1][2] = 58,
- [1][0][2][0][3][2] = 76,
- [1][0][2][0][5][2] = 56,
- [1][0][2][0][6][2] = 58,
- [1][0][2][0][9][2] = 58,
- [1][0][2][0][8][2] = 60,
- [1][0][2][0][11][2] = 58,
- [1][0][2][0][2][3] = 56,
- [1][0][2][0][1][3] = 58,
- [1][0][2][0][3][3] = 76,
- [1][0][2][0][5][3] = 56,
- [1][0][2][0][6][3] = 58,
- [1][0][2][0][9][3] = 58,
- [1][0][2][0][8][3] = 60,
- [1][0][2][0][11][3] = 58,
- [1][0][2][0][2][4] = 60,
- [1][0][2][0][1][4] = 58,
- [1][0][2][0][3][4] = 76,
- [1][0][2][0][5][4] = 60,
- [1][0][2][0][6][4] = 58,
- [1][0][2][0][9][4] = 58,
- [1][0][2][0][8][4] = 60,
- [1][0][2][0][11][4] = 58,
- [1][0][2][0][2][5] = 64,
- [1][0][2][0][1][5] = 58,
- [1][0][2][0][3][5] = 76,
- [1][0][2][0][5][5] = 64,
- [1][0][2][0][6][5] = 58,
- [1][0][2][0][9][5] = 58,
- [1][0][2][0][8][5] = 60,
- [1][0][2][0][11][5] = 58,
- [1][0][2][0][2][6] = 54,
- [1][0][2][0][1][6] = 58,
- [1][0][2][0][3][6] = 76,
- [1][0][2][0][5][6] = 54,
- [1][0][2][0][6][6] = 58,
- [1][0][2][0][9][6] = 58,
- [1][0][2][0][8][6] = 60,
- [1][0][2][0][11][6] = 58,
- [1][0][2][0][2][7] = 50,
- [1][0][2][0][1][7] = 58,
- [1][0][2][0][3][7] = 76,
- [1][0][2][0][5][7] = 50,
- [1][0][2][0][6][7] = 58,
- [1][0][2][0][9][7] = 58,
- [1][0][2][0][8][7] = 60,
- [1][0][2][0][11][7] = 58,
- [1][0][2][0][2][8] = 50,
- [1][0][2][0][1][8] = 58,
- [1][0][2][0][3][8] = 76,
- [1][0][2][0][5][8] = 50,
- [1][0][2][0][6][8] = 58,
- [1][0][2][0][9][8] = 58,
- [1][0][2][0][8][8] = 60,
- [1][0][2][0][11][8] = 58,
- [1][0][2][0][2][9] = 42,
- [1][0][2][0][1][9] = 58,
- [1][0][2][0][3][9] = 76,
- [1][0][2][0][5][9] = 42,
- [1][0][2][0][6][9] = 58,
- [1][0][2][0][9][9] = 58,
- [1][0][2][0][8][9] = 60,
- [1][0][2][0][11][9] = 58,
- [1][0][2][0][2][10] = 40,
- [1][0][2][0][1][10] = 58,
- [1][0][2][0][3][10] = 76,
- [1][0][2][0][5][10] = 40,
- [1][0][2][0][6][10] = 58,
- [1][0][2][0][9][10] = 58,
- [1][0][2][0][8][10] = 60,
- [1][0][2][0][11][10] = 58,
- [1][0][2][0][2][11] = 127,
- [1][0][2][0][1][11] = 127,
- [1][0][2][0][3][11] = 127,
- [1][0][2][0][5][11] = 127,
- [1][0][2][0][6][11] = 127,
- [1][0][2][0][9][11] = 127,
- [1][0][2][0][8][11] = 127,
- [1][0][2][0][11][11] = 127,
- [1][0][2][0][2][12] = 127,
- [1][0][2][0][1][12] = 127,
- [1][0][2][0][3][12] = 127,
- [1][0][2][0][5][12] = 127,
- [1][0][2][0][6][12] = 127,
- [1][0][2][0][9][12] = 127,
- [1][0][2][0][8][12] = 127,
- [1][0][2][0][11][12] = 127,
- [1][0][2][0][2][13] = 127,
- [1][0][2][0][1][13] = 127,
- [1][0][2][0][3][13] = 127,
- [1][0][2][0][5][13] = 127,
- [1][0][2][0][6][13] = 127,
- [1][0][2][0][9][13] = 127,
- [1][0][2][0][8][13] = 127,
- [1][0][2][0][11][13] = 127,
- [1][1][2][0][2][0] = 127,
- [1][1][2][0][1][0] = 127,
- [1][1][2][0][3][0] = 127,
- [1][1][2][0][5][0] = 127,
- [1][1][2][0][6][0] = 127,
- [1][1][2][0][9][0] = 127,
- [1][1][2][0][8][0] = 127,
- [1][1][2][0][11][0] = 127,
- [1][1][2][0][2][1] = 127,
- [1][1][2][0][1][1] = 127,
- [1][1][2][0][3][1] = 127,
- [1][1][2][0][5][1] = 127,
- [1][1][2][0][6][1] = 127,
- [1][1][2][0][9][1] = 127,
- [1][1][2][0][8][1] = 127,
- [1][1][2][0][11][1] = 127,
- [1][1][2][0][2][2] = 52,
- [1][1][2][0][1][2] = 46,
- [1][1][2][0][3][2] = 64,
- [1][1][2][0][5][2] = 52,
- [1][1][2][0][6][2] = 46,
- [1][1][2][0][9][2] = 46,
- [1][1][2][0][8][2] = 48,
- [1][1][2][0][11][2] = 46,
- [1][1][2][0][2][3] = 52,
- [1][1][2][0][1][3] = 46,
- [1][1][2][0][3][3] = 64,
- [1][1][2][0][5][3] = 52,
- [1][1][2][0][6][3] = 46,
- [1][1][2][0][9][3] = 46,
- [1][1][2][0][8][3] = 48,
- [1][1][2][0][11][3] = 46,
- [1][1][2][0][2][4] = 56,
- [1][1][2][0][1][4] = 46,
- [1][1][2][0][3][4] = 64,
- [1][1][2][0][5][4] = 56,
- [1][1][2][0][6][4] = 46,
- [1][1][2][0][9][4] = 46,
- [1][1][2][0][8][4] = 48,
- [1][1][2][0][11][4] = 46,
- [1][1][2][0][2][5] = 60,
- [1][1][2][0][1][5] = 46,
- [1][1][2][0][3][5] = 64,
- [1][1][2][0][5][5] = 60,
- [1][1][2][0][6][5] = 46,
- [1][1][2][0][9][5] = 46,
- [1][1][2][0][8][5] = 48,
- [1][1][2][0][11][5] = 46,
- [1][1][2][0][2][6] = 54,
- [1][1][2][0][1][6] = 46,
- [1][1][2][0][3][6] = 64,
- [1][1][2][0][5][6] = 52,
- [1][1][2][0][6][6] = 46,
- [1][1][2][0][9][6] = 46,
- [1][1][2][0][8][6] = 48,
- [1][1][2][0][11][6] = 46,
- [1][1][2][0][2][7] = 50,
- [1][1][2][0][1][7] = 46,
- [1][1][2][0][3][7] = 64,
- [1][1][2][0][5][7] = 48,
- [1][1][2][0][6][7] = 46,
- [1][1][2][0][9][7] = 46,
- [1][1][2][0][8][7] = 48,
- [1][1][2][0][11][7] = 46,
- [1][1][2][0][2][8] = 50,
- [1][1][2][0][1][8] = 46,
- [1][1][2][0][3][8] = 64,
- [1][1][2][0][5][8] = 48,
- [1][1][2][0][6][8] = 46,
- [1][1][2][0][9][8] = 46,
- [1][1][2][0][8][8] = 48,
- [1][1][2][0][11][8] = 46,
- [1][1][2][0][2][9] = 38,
- [1][1][2][0][1][9] = 46,
- [1][1][2][0][3][9] = 64,
- [1][1][2][0][5][9] = 38,
- [1][1][2][0][6][9] = 46,
- [1][1][2][0][9][9] = 46,
- [1][1][2][0][8][9] = 48,
- [1][1][2][0][11][9] = 46,
- [1][1][2][0][2][10] = 36,
- [1][1][2][0][1][10] = 46,
- [1][1][2][0][3][10] = 64,
- [1][1][2][0][5][10] = 36,
- [1][1][2][0][6][10] = 46,
- [1][1][2][0][9][10] = 46,
- [1][1][2][0][8][10] = 48,
- [1][1][2][0][11][10] = 46,
- [1][1][2][0][2][11] = 127,
- [1][1][2][0][1][11] = 127,
- [1][1][2][0][3][11] = 127,
- [1][1][2][0][5][11] = 127,
- [1][1][2][0][6][11] = 127,
- [1][1][2][0][9][11] = 127,
- [1][1][2][0][8][11] = 127,
- [1][1][2][0][11][11] = 127,
- [1][1][2][0][2][12] = 127,
- [1][1][2][0][1][12] = 127,
- [1][1][2][0][3][12] = 127,
- [1][1][2][0][5][12] = 127,
- [1][1][2][0][6][12] = 127,
- [1][1][2][0][9][12] = 127,
- [1][1][2][0][8][12] = 127,
- [1][1][2][0][11][12] = 127,
- [1][1][2][0][2][13] = 127,
- [1][1][2][0][1][13] = 127,
- [1][1][2][0][3][13] = 127,
- [1][1][2][0][5][13] = 127,
- [1][1][2][0][6][13] = 127,
- [1][1][2][0][9][13] = 127,
- [1][1][2][0][8][13] = 127,
- [1][1][2][0][11][13] = 127,
- [1][1][2][1][2][0] = 127,
- [1][1][2][1][1][0] = 127,
- [1][1][2][1][3][0] = 127,
- [1][1][2][1][5][0] = 127,
- [1][1][2][1][6][0] = 127,
- [1][1][2][1][9][0] = 127,
- [1][1][2][1][8][0] = 127,
- [1][1][2][1][11][0] = 127,
- [1][1][2][1][2][1] = 127,
- [1][1][2][1][1][1] = 127,
- [1][1][2][1][3][1] = 127,
- [1][1][2][1][5][1] = 127,
- [1][1][2][1][6][1] = 127,
- [1][1][2][1][9][1] = 127,
- [1][1][2][1][8][1] = 127,
- [1][1][2][1][11][1] = 127,
- [1][1][2][1][2][2] = 52,
- [1][1][2][1][1][2] = 34,
- [1][1][2][1][3][2] = 64,
- [1][1][2][1][5][2] = 52,
- [1][1][2][1][6][2] = 34,
- [1][1][2][1][9][2] = 34,
- [1][1][2][1][8][2] = 36,
- [1][1][2][1][11][2] = 34,
- [1][1][2][1][2][3] = 52,
- [1][1][2][1][1][3] = 34,
- [1][1][2][1][3][3] = 64,
- [1][1][2][1][5][3] = 52,
- [1][1][2][1][6][3] = 34,
- [1][1][2][1][9][3] = 34,
- [1][1][2][1][8][3] = 36,
- [1][1][2][1][11][3] = 34,
- [1][1][2][1][2][4] = 56,
- [1][1][2][1][1][4] = 34,
- [1][1][2][1][3][4] = 64,
- [1][1][2][1][5][4] = 56,
- [1][1][2][1][6][4] = 34,
- [1][1][2][1][9][4] = 34,
- [1][1][2][1][8][4] = 36,
- [1][1][2][1][11][4] = 34,
- [1][1][2][1][2][5] = 60,
- [1][1][2][1][1][5] = 34,
- [1][1][2][1][3][5] = 64,
- [1][1][2][1][5][5] = 60,
- [1][1][2][1][6][5] = 34,
- [1][1][2][1][9][5] = 34,
- [1][1][2][1][8][5] = 36,
- [1][1][2][1][11][5] = 34,
- [1][1][2][1][2][6] = 54,
- [1][1][2][1][1][6] = 34,
- [1][1][2][1][3][6] = 64,
- [1][1][2][1][5][6] = 52,
- [1][1][2][1][6][6] = 34,
- [1][1][2][1][9][6] = 34,
- [1][1][2][1][8][6] = 36,
- [1][1][2][1][11][6] = 34,
- [1][1][2][1][2][7] = 50,
- [1][1][2][1][1][7] = 34,
- [1][1][2][1][3][7] = 64,
- [1][1][2][1][5][7] = 48,
- [1][1][2][1][6][7] = 34,
- [1][1][2][1][9][7] = 34,
- [1][1][2][1][8][7] = 36,
- [1][1][2][1][11][7] = 34,
- [1][1][2][1][2][8] = 50,
- [1][1][2][1][1][8] = 34,
- [1][1][2][1][3][8] = 64,
- [1][1][2][1][5][8] = 48,
- [1][1][2][1][6][8] = 34,
- [1][1][2][1][9][8] = 34,
- [1][1][2][1][8][8] = 36,
- [1][1][2][1][11][8] = 34,
- [1][1][2][1][2][9] = 38,
- [1][1][2][1][1][9] = 34,
- [1][1][2][1][3][9] = 64,
- [1][1][2][1][5][9] = 38,
- [1][1][2][1][6][9] = 34,
- [1][1][2][1][9][9] = 34,
- [1][1][2][1][8][9] = 36,
- [1][1][2][1][11][9] = 34,
- [1][1][2][1][2][10] = 36,
- [1][1][2][1][1][10] = 34,
- [1][1][2][1][3][10] = 64,
- [1][1][2][1][5][10] = 36,
- [1][1][2][1][6][10] = 34,
- [1][1][2][1][9][10] = 34,
- [1][1][2][1][8][10] = 36,
- [1][1][2][1][11][10] = 34,
- [1][1][2][1][2][11] = 127,
- [1][1][2][1][1][11] = 127,
- [1][1][2][1][3][11] = 127,
- [1][1][2][1][5][11] = 127,
- [1][1][2][1][6][11] = 127,
- [1][1][2][1][9][11] = 127,
- [1][1][2][1][8][11] = 127,
- [1][1][2][1][11][11] = 127,
- [1][1][2][1][2][12] = 127,
- [1][1][2][1][1][12] = 127,
- [1][1][2][1][3][12] = 127,
- [1][1][2][1][5][12] = 127,
- [1][1][2][1][6][12] = 127,
- [1][1][2][1][9][12] = 127,
- [1][1][2][1][8][12] = 127,
- [1][1][2][1][11][12] = 127,
- [1][1][2][1][2][13] = 127,
- [1][1][2][1][1][13] = 127,
- [1][1][2][1][3][13] = 127,
- [1][1][2][1][5][13] = 127,
- [1][1][2][1][6][13] = 127,
- [1][1][2][1][9][13] = 127,
- [1][1][2][1][8][13] = 127,
- [1][1][2][1][11][13] = 127,
+ [0][0][0][0][RTW89_WW][0] = 56,
+ [0][0][0][0][RTW89_WW][1] = 56,
+ [0][0][0][0][RTW89_WW][2] = 56,
+ [0][0][0][0][RTW89_WW][3] = 56,
+ [0][0][0][0][RTW89_WW][4] = 56,
+ [0][0][0][0][RTW89_WW][5] = 56,
+ [0][0][0][0][RTW89_WW][6] = 56,
+ [0][0][0][0][RTW89_WW][7] = 56,
+ [0][0][0][0][RTW89_WW][8] = 56,
+ [0][0][0][0][RTW89_WW][9] = 56,
+ [0][0][0][0][RTW89_WW][10] = 56,
+ [0][0][0][0][RTW89_WW][11] = 56,
+ [0][0][0][0][RTW89_WW][12] = 48,
+ [0][0][0][0][RTW89_WW][13] = 76,
+ [0][1][0][0][RTW89_WW][0] = 44,
+ [0][1][0][0][RTW89_WW][1] = 44,
+ [0][1][0][0][RTW89_WW][2] = 44,
+ [0][1][0][0][RTW89_WW][3] = 44,
+ [0][1][0][0][RTW89_WW][4] = 44,
+ [0][1][0][0][RTW89_WW][5] = 44,
+ [0][1][0][0][RTW89_WW][6] = 44,
+ [0][1][0][0][RTW89_WW][7] = 44,
+ [0][1][0][0][RTW89_WW][8] = 44,
+ [0][1][0][0][RTW89_WW][9] = 44,
+ [0][1][0][0][RTW89_WW][10] = 44,
+ [0][1][0][0][RTW89_WW][11] = 44,
+ [0][1][0][0][RTW89_WW][12] = 38,
+ [0][1][0][0][RTW89_WW][13] = 64,
+ [1][0][0][0][RTW89_WW][0] = 0,
+ [1][0][0][0][RTW89_WW][1] = 0,
+ [1][0][0][0][RTW89_WW][2] = 58,
+ [1][0][0][0][RTW89_WW][3] = 58,
+ [1][0][0][0][RTW89_WW][4] = 58,
+ [1][0][0][0][RTW89_WW][5] = 58,
+ [1][0][0][0][RTW89_WW][6] = 46,
+ [1][0][0][0][RTW89_WW][7] = 46,
+ [1][0][0][0][RTW89_WW][8] = 46,
+ [1][0][0][0][RTW89_WW][9] = 32,
+ [1][0][0][0][RTW89_WW][10] = 32,
+ [1][0][0][0][RTW89_WW][11] = 0,
+ [1][0][0][0][RTW89_WW][12] = 0,
+ [1][0][0][0][RTW89_WW][13] = 0,
+ [1][1][0][0][RTW89_WW][0] = 0,
+ [1][1][0][0][RTW89_WW][1] = 0,
+ [1][1][0][0][RTW89_WW][2] = 46,
+ [1][1][0][0][RTW89_WW][3] = 46,
+ [1][1][0][0][RTW89_WW][4] = 46,
+ [1][1][0][0][RTW89_WW][5] = 46,
+ [1][1][0][0][RTW89_WW][6] = 46,
+ [1][1][0][0][RTW89_WW][7] = 46,
+ [1][1][0][0][RTW89_WW][8] = 46,
+ [1][1][0][0][RTW89_WW][9] = 24,
+ [1][1][0][0][RTW89_WW][10] = 24,
+ [1][1][0][0][RTW89_WW][11] = 0,
+ [1][1][0][0][RTW89_WW][12] = 0,
+ [1][1][0][0][RTW89_WW][13] = 0,
+ [0][0][1][0][RTW89_WW][0] = 58,
+ [0][0][1][0][RTW89_WW][1] = 58,
+ [0][0][1][0][RTW89_WW][2] = 58,
+ [0][0][1][0][RTW89_WW][3] = 58,
+ [0][0][1][0][RTW89_WW][4] = 58,
+ [0][0][1][0][RTW89_WW][5] = 58,
+ [0][0][1][0][RTW89_WW][6] = 58,
+ [0][0][1][0][RTW89_WW][7] = 58,
+ [0][0][1][0][RTW89_WW][8] = 58,
+ [0][0][1][0][RTW89_WW][9] = 58,
+ [0][0][1][0][RTW89_WW][10] = 58,
+ [0][0][1][0][RTW89_WW][11] = 56,
+ [0][0][1][0][RTW89_WW][12] = 52,
+ [0][0][1][0][RTW89_WW][13] = 0,
+ [0][1][1][0][RTW89_WW][0] = 46,
+ [0][1][1][0][RTW89_WW][1] = 46,
+ [0][1][1][0][RTW89_WW][2] = 46,
+ [0][1][1][0][RTW89_WW][3] = 46,
+ [0][1][1][0][RTW89_WW][4] = 46,
+ [0][1][1][0][RTW89_WW][5] = 46,
+ [0][1][1][0][RTW89_WW][6] = 46,
+ [0][1][1][0][RTW89_WW][7] = 46,
+ [0][1][1][0][RTW89_WW][8] = 46,
+ [0][1][1][0][RTW89_WW][9] = 46,
+ [0][1][1][0][RTW89_WW][10] = 46,
+ [0][1][1][0][RTW89_WW][11] = 42,
+ [0][1][1][0][RTW89_WW][12] = 40,
+ [0][1][1][0][RTW89_WW][13] = 0,
+ [0][0][2][0][RTW89_WW][0] = 58,
+ [0][0][2][0][RTW89_WW][1] = 58,
+ [0][0][2][0][RTW89_WW][2] = 58,
+ [0][0][2][0][RTW89_WW][3] = 58,
+ [0][0][2][0][RTW89_WW][4] = 58,
+ [0][0][2][0][RTW89_WW][5] = 58,
+ [0][0][2][0][RTW89_WW][6] = 58,
+ [0][0][2][0][RTW89_WW][7] = 58,
+ [0][0][2][0][RTW89_WW][8] = 58,
+ [0][0][2][0][RTW89_WW][9] = 58,
+ [0][0][2][0][RTW89_WW][10] = 58,
+ [0][0][2][0][RTW89_WW][11] = 54,
+ [0][0][2][0][RTW89_WW][12] = 50,
+ [0][0][2][0][RTW89_WW][13] = 0,
+ [0][1][2][0][RTW89_WW][0] = 46,
+ [0][1][2][0][RTW89_WW][1] = 46,
+ [0][1][2][0][RTW89_WW][2] = 46,
+ [0][1][2][0][RTW89_WW][3] = 46,
+ [0][1][2][0][RTW89_WW][4] = 46,
+ [0][1][2][0][RTW89_WW][5] = 46,
+ [0][1][2][0][RTW89_WW][6] = 46,
+ [0][1][2][0][RTW89_WW][7] = 46,
+ [0][1][2][0][RTW89_WW][8] = 46,
+ [0][1][2][0][RTW89_WW][9] = 46,
+ [0][1][2][0][RTW89_WW][10] = 46,
+ [0][1][2][0][RTW89_WW][11] = 42,
+ [0][1][2][0][RTW89_WW][12] = 40,
+ [0][1][2][0][RTW89_WW][13] = 0,
+ [0][1][2][1][RTW89_WW][0] = 34,
+ [0][1][2][1][RTW89_WW][1] = 34,
+ [0][1][2][1][RTW89_WW][2] = 34,
+ [0][1][2][1][RTW89_WW][3] = 34,
+ [0][1][2][1][RTW89_WW][4] = 34,
+ [0][1][2][1][RTW89_WW][5] = 34,
+ [0][1][2][1][RTW89_WW][6] = 34,
+ [0][1][2][1][RTW89_WW][7] = 34,
+ [0][1][2][1][RTW89_WW][8] = 34,
+ [0][1][2][1][RTW89_WW][9] = 34,
+ [0][1][2][1][RTW89_WW][10] = 34,
+ [0][1][2][1][RTW89_WW][11] = 34,
+ [0][1][2][1][RTW89_WW][12] = 34,
+ [0][1][2][1][RTW89_WW][13] = 0,
+ [1][0][2][0][RTW89_WW][0] = 0,
+ [1][0][2][0][RTW89_WW][1] = 0,
+ [1][0][2][0][RTW89_WW][2] = 56,
+ [1][0][2][0][RTW89_WW][3] = 56,
+ [1][0][2][0][RTW89_WW][4] = 58,
+ [1][0][2][0][RTW89_WW][5] = 58,
+ [1][0][2][0][RTW89_WW][6] = 54,
+ [1][0][2][0][RTW89_WW][7] = 50,
+ [1][0][2][0][RTW89_WW][8] = 50,
+ [1][0][2][0][RTW89_WW][9] = 42,
+ [1][0][2][0][RTW89_WW][10] = 40,
+ [1][0][2][0][RTW89_WW][11] = 0,
+ [1][0][2][0][RTW89_WW][12] = 0,
+ [1][0][2][0][RTW89_WW][13] = 0,
+ [1][1][2][0][RTW89_WW][0] = 0,
+ [1][1][2][0][RTW89_WW][1] = 0,
+ [1][1][2][0][RTW89_WW][2] = 46,
+ [1][1][2][0][RTW89_WW][3] = 46,
+ [1][1][2][0][RTW89_WW][4] = 46,
+ [1][1][2][0][RTW89_WW][5] = 46,
+ [1][1][2][0][RTW89_WW][6] = 46,
+ [1][1][2][0][RTW89_WW][7] = 46,
+ [1][1][2][0][RTW89_WW][8] = 46,
+ [1][1][2][0][RTW89_WW][9] = 38,
+ [1][1][2][0][RTW89_WW][10] = 36,
+ [1][1][2][0][RTW89_WW][11] = 0,
+ [1][1][2][0][RTW89_WW][12] = 0,
+ [1][1][2][0][RTW89_WW][13] = 0,
+ [1][1][2][1][RTW89_WW][0] = 0,
+ [1][1][2][1][RTW89_WW][1] = 0,
+ [1][1][2][1][RTW89_WW][2] = 34,
+ [1][1][2][1][RTW89_WW][3] = 34,
+ [1][1][2][1][RTW89_WW][4] = 34,
+ [1][1][2][1][RTW89_WW][5] = 34,
+ [1][1][2][1][RTW89_WW][6] = 34,
+ [1][1][2][1][RTW89_WW][7] = 34,
+ [1][1][2][1][RTW89_WW][8] = 34,
+ [1][1][2][1][RTW89_WW][9] = 34,
+ [1][1][2][1][RTW89_WW][10] = 34,
+ [1][1][2][1][RTW89_WW][11] = 0,
+ [1][1][2][1][RTW89_WW][12] = 0,
+ [1][1][2][1][RTW89_WW][13] = 0,
+ [0][0][0][0][RTW89_FCC][0] = 76,
+ [0][0][0][0][RTW89_ETSI][0] = 56,
+ [0][0][0][0][RTW89_MKK][0] = 68,
+ [0][0][0][0][RTW89_IC][0] = 76,
+ [0][0][0][0][RTW89_KCC][0] = 76,
+ [0][0][0][0][RTW89_ACMA][0] = 56,
+ [0][0][0][0][RTW89_CHILE][0] = 60,
+ [0][0][0][0][RTW89_UKRAINE][0] = 56,
+ [0][0][0][0][RTW89_MEXICO][0] = 76,
+ [0][0][0][0][RTW89_CN][0] = 56,
+ [0][0][0][0][RTW89_QATAR][0] = 56,
+ [0][0][0][0][RTW89_FCC][1] = 76,
+ [0][0][0][0][RTW89_ETSI][1] = 56,
+ [0][0][0][0][RTW89_MKK][1] = 68,
+ [0][0][0][0][RTW89_IC][1] = 76,
+ [0][0][0][0][RTW89_KCC][1] = 76,
+ [0][0][0][0][RTW89_ACMA][1] = 56,
+ [0][0][0][0][RTW89_CHILE][1] = 60,
+ [0][0][0][0][RTW89_UKRAINE][1] = 56,
+ [0][0][0][0][RTW89_MEXICO][1] = 76,
+ [0][0][0][0][RTW89_CN][1] = 56,
+ [0][0][0][0][RTW89_QATAR][1] = 56,
+ [0][0][0][0][RTW89_FCC][2] = 76,
+ [0][0][0][0][RTW89_ETSI][2] = 56,
+ [0][0][0][0][RTW89_MKK][2] = 68,
+ [0][0][0][0][RTW89_IC][2] = 76,
+ [0][0][0][0][RTW89_KCC][2] = 76,
+ [0][0][0][0][RTW89_ACMA][2] = 56,
+ [0][0][0][0][RTW89_CHILE][2] = 60,
+ [0][0][0][0][RTW89_UKRAINE][2] = 56,
+ [0][0][0][0][RTW89_MEXICO][2] = 76,
+ [0][0][0][0][RTW89_CN][2] = 56,
+ [0][0][0][0][RTW89_QATAR][2] = 56,
+ [0][0][0][0][RTW89_FCC][3] = 76,
+ [0][0][0][0][RTW89_ETSI][3] = 56,
+ [0][0][0][0][RTW89_MKK][3] = 68,
+ [0][0][0][0][RTW89_IC][3] = 76,
+ [0][0][0][0][RTW89_KCC][3] = 76,
+ [0][0][0][0][RTW89_ACMA][3] = 56,
+ [0][0][0][0][RTW89_CHILE][3] = 60,
+ [0][0][0][0][RTW89_UKRAINE][3] = 56,
+ [0][0][0][0][RTW89_MEXICO][3] = 76,
+ [0][0][0][0][RTW89_CN][3] = 56,
+ [0][0][0][0][RTW89_QATAR][3] = 56,
+ [0][0][0][0][RTW89_FCC][4] = 76,
+ [0][0][0][0][RTW89_ETSI][4] = 56,
+ [0][0][0][0][RTW89_MKK][4] = 68,
+ [0][0][0][0][RTW89_IC][4] = 76,
+ [0][0][0][0][RTW89_KCC][4] = 76,
+ [0][0][0][0][RTW89_ACMA][4] = 56,
+ [0][0][0][0][RTW89_CHILE][4] = 60,
+ [0][0][0][0][RTW89_UKRAINE][4] = 56,
+ [0][0][0][0][RTW89_MEXICO][4] = 76,
+ [0][0][0][0][RTW89_CN][4] = 56,
+ [0][0][0][0][RTW89_QATAR][4] = 56,
+ [0][0][0][0][RTW89_FCC][5] = 76,
+ [0][0][0][0][RTW89_ETSI][5] = 56,
+ [0][0][0][0][RTW89_MKK][5] = 68,
+ [0][0][0][0][RTW89_IC][5] = 76,
+ [0][0][0][0][RTW89_KCC][5] = 76,
+ [0][0][0][0][RTW89_ACMA][5] = 56,
+ [0][0][0][0][RTW89_CHILE][5] = 60,
+ [0][0][0][0][RTW89_UKRAINE][5] = 56,
+ [0][0][0][0][RTW89_MEXICO][5] = 76,
+ [0][0][0][0][RTW89_CN][5] = 56,
+ [0][0][0][0][RTW89_QATAR][5] = 56,
+ [0][0][0][0][RTW89_FCC][6] = 76,
+ [0][0][0][0][RTW89_ETSI][6] = 56,
+ [0][0][0][0][RTW89_MKK][6] = 68,
+ [0][0][0][0][RTW89_IC][6] = 76,
+ [0][0][0][0][RTW89_KCC][6] = 76,
+ [0][0][0][0][RTW89_ACMA][6] = 56,
+ [0][0][0][0][RTW89_CHILE][6] = 60,
+ [0][0][0][0][RTW89_UKRAINE][6] = 56,
+ [0][0][0][0][RTW89_MEXICO][6] = 76,
+ [0][0][0][0][RTW89_CN][6] = 56,
+ [0][0][0][0][RTW89_QATAR][6] = 56,
+ [0][0][0][0][RTW89_FCC][7] = 76,
+ [0][0][0][0][RTW89_ETSI][7] = 56,
+ [0][0][0][0][RTW89_MKK][7] = 68,
+ [0][0][0][0][RTW89_IC][7] = 76,
+ [0][0][0][0][RTW89_KCC][7] = 76,
+ [0][0][0][0][RTW89_ACMA][7] = 56,
+ [0][0][0][0][RTW89_CHILE][7] = 60,
+ [0][0][0][0][RTW89_UKRAINE][7] = 56,
+ [0][0][0][0][RTW89_MEXICO][7] = 76,
+ [0][0][0][0][RTW89_CN][7] = 56,
+ [0][0][0][0][RTW89_QATAR][7] = 56,
+ [0][0][0][0][RTW89_FCC][8] = 76,
+ [0][0][0][0][RTW89_ETSI][8] = 56,
+ [0][0][0][0][RTW89_MKK][8] = 68,
+ [0][0][0][0][RTW89_IC][8] = 76,
+ [0][0][0][0][RTW89_KCC][8] = 76,
+ [0][0][0][0][RTW89_ACMA][8] = 56,
+ [0][0][0][0][RTW89_CHILE][8] = 60,
+ [0][0][0][0][RTW89_UKRAINE][8] = 56,
+ [0][0][0][0][RTW89_MEXICO][8] = 76,
+ [0][0][0][0][RTW89_CN][8] = 56,
+ [0][0][0][0][RTW89_QATAR][8] = 56,
+ [0][0][0][0][RTW89_FCC][9] = 76,
+ [0][0][0][0][RTW89_ETSI][9] = 56,
+ [0][0][0][0][RTW89_MKK][9] = 68,
+ [0][0][0][0][RTW89_IC][9] = 76,
+ [0][0][0][0][RTW89_KCC][9] = 76,
+ [0][0][0][0][RTW89_ACMA][9] = 56,
+ [0][0][0][0][RTW89_CHILE][9] = 60,
+ [0][0][0][0][RTW89_UKRAINE][9] = 56,
+ [0][0][0][0][RTW89_MEXICO][9] = 76,
+ [0][0][0][0][RTW89_CN][9] = 56,
+ [0][0][0][0][RTW89_QATAR][9] = 56,
+ [0][0][0][0][RTW89_FCC][10] = 76,
+ [0][0][0][0][RTW89_ETSI][10] = 56,
+ [0][0][0][0][RTW89_MKK][10] = 68,
+ [0][0][0][0][RTW89_IC][10] = 76,
+ [0][0][0][0][RTW89_KCC][10] = 76,
+ [0][0][0][0][RTW89_ACMA][10] = 56,
+ [0][0][0][0][RTW89_CHILE][10] = 60,
+ [0][0][0][0][RTW89_UKRAINE][10] = 56,
+ [0][0][0][0][RTW89_MEXICO][10] = 76,
+ [0][0][0][0][RTW89_CN][10] = 56,
+ [0][0][0][0][RTW89_QATAR][10] = 56,
+ [0][0][0][0][RTW89_FCC][11] = 68,
+ [0][0][0][0][RTW89_ETSI][11] = 56,
+ [0][0][0][0][RTW89_MKK][11] = 68,
+ [0][0][0][0][RTW89_IC][11] = 68,
+ [0][0][0][0][RTW89_KCC][11] = 76,
+ [0][0][0][0][RTW89_ACMA][11] = 56,
+ [0][0][0][0][RTW89_CHILE][11] = 60,
+ [0][0][0][0][RTW89_UKRAINE][11] = 56,
+ [0][0][0][0][RTW89_MEXICO][11] = 68,
+ [0][0][0][0][RTW89_CN][11] = 56,
+ [0][0][0][0][RTW89_QATAR][11] = 56,
+ [0][0][0][0][RTW89_FCC][12] = 48,
+ [0][0][0][0][RTW89_ETSI][12] = 56,
+ [0][0][0][0][RTW89_MKK][12] = 68,
+ [0][0][0][0][RTW89_IC][12] = 48,
+ [0][0][0][0][RTW89_KCC][12] = 76,
+ [0][0][0][0][RTW89_ACMA][12] = 56,
+ [0][0][0][0][RTW89_CHILE][12] = 48,
+ [0][0][0][0][RTW89_UKRAINE][12] = 56,
+ [0][0][0][0][RTW89_MEXICO][12] = 48,
+ [0][0][0][0][RTW89_CN][12] = 56,
+ [0][0][0][0][RTW89_QATAR][12] = 56,
+ [0][0][0][0][RTW89_FCC][13] = 127,
+ [0][0][0][0][RTW89_ETSI][13] = 127,
+ [0][0][0][0][RTW89_MKK][13] = 76,
+ [0][0][0][0][RTW89_IC][13] = 127,
+ [0][0][0][0][RTW89_KCC][13] = 127,
+ [0][0][0][0][RTW89_ACMA][13] = 127,
+ [0][0][0][0][RTW89_CHILE][13] = 127,
+ [0][0][0][0][RTW89_UKRAINE][13] = 127,
+ [0][0][0][0][RTW89_MEXICO][13] = 127,
+ [0][0][0][0][RTW89_CN][13] = 127,
+ [0][0][0][0][RTW89_QATAR][13] = 127,
+ [0][1][0][0][RTW89_FCC][0] = 74,
+ [0][1][0][0][RTW89_ETSI][0] = 44,
+ [0][1][0][0][RTW89_MKK][0] = 56,
+ [0][1][0][0][RTW89_IC][0] = 74,
+ [0][1][0][0][RTW89_KCC][0] = 68,
+ [0][1][0][0][RTW89_ACMA][0] = 44,
+ [0][1][0][0][RTW89_CHILE][0] = 48,
+ [0][1][0][0][RTW89_UKRAINE][0] = 44,
+ [0][1][0][0][RTW89_MEXICO][0] = 74,
+ [0][1][0][0][RTW89_CN][0] = 44,
+ [0][1][0][0][RTW89_QATAR][0] = 44,
+ [0][1][0][0][RTW89_FCC][1] = 76,
+ [0][1][0][0][RTW89_ETSI][1] = 44,
+ [0][1][0][0][RTW89_MKK][1] = 56,
+ [0][1][0][0][RTW89_IC][1] = 76,
+ [0][1][0][0][RTW89_KCC][1] = 68,
+ [0][1][0][0][RTW89_ACMA][1] = 44,
+ [0][1][0][0][RTW89_CHILE][1] = 48,
+ [0][1][0][0][RTW89_UKRAINE][1] = 44,
+ [0][1][0][0][RTW89_MEXICO][1] = 76,
+ [0][1][0][0][RTW89_CN][1] = 44,
+ [0][1][0][0][RTW89_QATAR][1] = 44,
+ [0][1][0][0][RTW89_FCC][2] = 76,
+ [0][1][0][0][RTW89_ETSI][2] = 44,
+ [0][1][0][0][RTW89_MKK][2] = 56,
+ [0][1][0][0][RTW89_IC][2] = 76,
+ [0][1][0][0][RTW89_KCC][2] = 68,
+ [0][1][0][0][RTW89_ACMA][2] = 44,
+ [0][1][0][0][RTW89_CHILE][2] = 48,
+ [0][1][0][0][RTW89_UKRAINE][2] = 44,
+ [0][1][0][0][RTW89_MEXICO][2] = 76,
+ [0][1][0][0][RTW89_CN][2] = 44,
+ [0][1][0][0][RTW89_QATAR][2] = 44,
+ [0][1][0][0][RTW89_FCC][3] = 76,
+ [0][1][0][0][RTW89_ETSI][3] = 44,
+ [0][1][0][0][RTW89_MKK][3] = 56,
+ [0][1][0][0][RTW89_IC][3] = 76,
+ [0][1][0][0][RTW89_KCC][3] = 68,
+ [0][1][0][0][RTW89_ACMA][3] = 44,
+ [0][1][0][0][RTW89_CHILE][3] = 48,
+ [0][1][0][0][RTW89_UKRAINE][3] = 44,
+ [0][1][0][0][RTW89_MEXICO][3] = 76,
+ [0][1][0][0][RTW89_CN][3] = 44,
+ [0][1][0][0][RTW89_QATAR][3] = 44,
+ [0][1][0][0][RTW89_FCC][4] = 76,
+ [0][1][0][0][RTW89_ETSI][4] = 44,
+ [0][1][0][0][RTW89_MKK][4] = 56,
+ [0][1][0][0][RTW89_IC][4] = 76,
+ [0][1][0][0][RTW89_KCC][4] = 68,
+ [0][1][0][0][RTW89_ACMA][4] = 44,
+ [0][1][0][0][RTW89_CHILE][4] = 48,
+ [0][1][0][0][RTW89_UKRAINE][4] = 44,
+ [0][1][0][0][RTW89_MEXICO][4] = 76,
+ [0][1][0][0][RTW89_CN][4] = 44,
+ [0][1][0][0][RTW89_QATAR][4] = 44,
+ [0][1][0][0][RTW89_FCC][5] = 76,
+ [0][1][0][0][RTW89_ETSI][5] = 44,
+ [0][1][0][0][RTW89_MKK][5] = 56,
+ [0][1][0][0][RTW89_IC][5] = 76,
+ [0][1][0][0][RTW89_KCC][5] = 68,
+ [0][1][0][0][RTW89_ACMA][5] = 44,
+ [0][1][0][0][RTW89_CHILE][5] = 48,
+ [0][1][0][0][RTW89_UKRAINE][5] = 44,
+ [0][1][0][0][RTW89_MEXICO][5] = 76,
+ [0][1][0][0][RTW89_CN][5] = 44,
+ [0][1][0][0][RTW89_QATAR][5] = 44,
+ [0][1][0][0][RTW89_FCC][6] = 76,
+ [0][1][0][0][RTW89_ETSI][6] = 44,
+ [0][1][0][0][RTW89_MKK][6] = 56,
+ [0][1][0][0][RTW89_IC][6] = 76,
+ [0][1][0][0][RTW89_KCC][6] = 68,
+ [0][1][0][0][RTW89_ACMA][6] = 44,
+ [0][1][0][0][RTW89_CHILE][6] = 48,
+ [0][1][0][0][RTW89_UKRAINE][6] = 44,
+ [0][1][0][0][RTW89_MEXICO][6] = 76,
+ [0][1][0][0][RTW89_CN][6] = 44,
+ [0][1][0][0][RTW89_QATAR][6] = 44,
+ [0][1][0][0][RTW89_FCC][7] = 76,
+ [0][1][0][0][RTW89_ETSI][7] = 44,
+ [0][1][0][0][RTW89_MKK][7] = 56,
+ [0][1][0][0][RTW89_IC][7] = 76,
+ [0][1][0][0][RTW89_KCC][7] = 68,
+ [0][1][0][0][RTW89_ACMA][7] = 44,
+ [0][1][0][0][RTW89_CHILE][7] = 48,
+ [0][1][0][0][RTW89_UKRAINE][7] = 44,
+ [0][1][0][0][RTW89_MEXICO][7] = 76,
+ [0][1][0][0][RTW89_CN][7] = 44,
+ [0][1][0][0][RTW89_QATAR][7] = 44,
+ [0][1][0][0][RTW89_FCC][8] = 76,
+ [0][1][0][0][RTW89_ETSI][8] = 44,
+ [0][1][0][0][RTW89_MKK][8] = 56,
+ [0][1][0][0][RTW89_IC][8] = 76,
+ [0][1][0][0][RTW89_KCC][8] = 68,
+ [0][1][0][0][RTW89_ACMA][8] = 44,
+ [0][1][0][0][RTW89_CHILE][8] = 48,
+ [0][1][0][0][RTW89_UKRAINE][8] = 44,
+ [0][1][0][0][RTW89_MEXICO][8] = 76,
+ [0][1][0][0][RTW89_CN][8] = 44,
+ [0][1][0][0][RTW89_QATAR][8] = 44,
+ [0][1][0][0][RTW89_FCC][9] = 76,
+ [0][1][0][0][RTW89_ETSI][9] = 44,
+ [0][1][0][0][RTW89_MKK][9] = 56,
+ [0][1][0][0][RTW89_IC][9] = 76,
+ [0][1][0][0][RTW89_KCC][9] = 68,
+ [0][1][0][0][RTW89_ACMA][9] = 44,
+ [0][1][0][0][RTW89_CHILE][9] = 48,
+ [0][1][0][0][RTW89_UKRAINE][9] = 44,
+ [0][1][0][0][RTW89_MEXICO][9] = 76,
+ [0][1][0][0][RTW89_CN][9] = 44,
+ [0][1][0][0][RTW89_QATAR][9] = 44,
+ [0][1][0][0][RTW89_FCC][10] = 62,
+ [0][1][0][0][RTW89_ETSI][10] = 44,
+ [0][1][0][0][RTW89_MKK][10] = 56,
+ [0][1][0][0][RTW89_IC][10] = 62,
+ [0][1][0][0][RTW89_KCC][10] = 68,
+ [0][1][0][0][RTW89_ACMA][10] = 44,
+ [0][1][0][0][RTW89_CHILE][10] = 48,
+ [0][1][0][0][RTW89_UKRAINE][10] = 44,
+ [0][1][0][0][RTW89_MEXICO][10] = 62,
+ [0][1][0][0][RTW89_CN][10] = 44,
+ [0][1][0][0][RTW89_QATAR][10] = 44,
+ [0][1][0][0][RTW89_FCC][11] = 52,
+ [0][1][0][0][RTW89_ETSI][11] = 44,
+ [0][1][0][0][RTW89_MKK][11] = 56,
+ [0][1][0][0][RTW89_IC][11] = 52,
+ [0][1][0][0][RTW89_KCC][11] = 68,
+ [0][1][0][0][RTW89_ACMA][11] = 44,
+ [0][1][0][0][RTW89_CHILE][11] = 48,
+ [0][1][0][0][RTW89_UKRAINE][11] = 44,
+ [0][1][0][0][RTW89_MEXICO][11] = 52,
+ [0][1][0][0][RTW89_CN][11] = 44,
+ [0][1][0][0][RTW89_QATAR][11] = 44,
+ [0][1][0][0][RTW89_FCC][12] = 38,
+ [0][1][0][0][RTW89_ETSI][12] = 44,
+ [0][1][0][0][RTW89_MKK][12] = 56,
+ [0][1][0][0][RTW89_IC][12] = 38,
+ [0][1][0][0][RTW89_KCC][12] = 68,
+ [0][1][0][0][RTW89_ACMA][12] = 44,
+ [0][1][0][0][RTW89_CHILE][12] = 38,
+ [0][1][0][0][RTW89_UKRAINE][12] = 44,
+ [0][1][0][0][RTW89_MEXICO][12] = 38,
+ [0][1][0][0][RTW89_CN][12] = 44,
+ [0][1][0][0][RTW89_QATAR][12] = 44,
+ [0][1][0][0][RTW89_FCC][13] = 127,
+ [0][1][0][0][RTW89_ETSI][13] = 127,
+ [0][1][0][0][RTW89_MKK][13] = 64,
+ [0][1][0][0][RTW89_IC][13] = 127,
+ [0][1][0][0][RTW89_KCC][13] = 127,
+ [0][1][0][0][RTW89_ACMA][13] = 127,
+ [0][1][0][0][RTW89_CHILE][13] = 127,
+ [0][1][0][0][RTW89_UKRAINE][13] = 127,
+ [0][1][0][0][RTW89_MEXICO][13] = 127,
+ [0][1][0][0][RTW89_CN][13] = 127,
+ [0][1][0][0][RTW89_QATAR][13] = 127,
+ [1][0][0][0][RTW89_FCC][0] = 127,
+ [1][0][0][0][RTW89_ETSI][0] = 127,
+ [1][0][0][0][RTW89_MKK][0] = 127,
+ [1][0][0][0][RTW89_IC][0] = 127,
+ [1][0][0][0][RTW89_KCC][0] = 127,
+ [1][0][0][0][RTW89_ACMA][0] = 127,
+ [1][0][0][0][RTW89_CHILE][0] = 127,
+ [1][0][0][0][RTW89_UKRAINE][0] = 127,
+ [1][0][0][0][RTW89_MEXICO][0] = 127,
+ [1][0][0][0][RTW89_CN][0] = 127,
+ [1][0][0][0][RTW89_QATAR][0] = 127,
+ [1][0][0][0][RTW89_FCC][1] = 127,
+ [1][0][0][0][RTW89_ETSI][1] = 127,
+ [1][0][0][0][RTW89_MKK][1] = 127,
+ [1][0][0][0][RTW89_IC][1] = 127,
+ [1][0][0][0][RTW89_KCC][1] = 127,
+ [1][0][0][0][RTW89_ACMA][1] = 127,
+ [1][0][0][0][RTW89_CHILE][1] = 127,
+ [1][0][0][0][RTW89_UKRAINE][1] = 127,
+ [1][0][0][0][RTW89_MEXICO][1] = 127,
+ [1][0][0][0][RTW89_CN][1] = 127,
+ [1][0][0][0][RTW89_QATAR][1] = 127,
+ [1][0][0][0][RTW89_FCC][2] = 60,
+ [1][0][0][0][RTW89_ETSI][2] = 58,
+ [1][0][0][0][RTW89_MKK][2] = 68,
+ [1][0][0][0][RTW89_IC][2] = 60,
+ [1][0][0][0][RTW89_KCC][2] = 70,
+ [1][0][0][0][RTW89_ACMA][2] = 58,
+ [1][0][0][0][RTW89_CHILE][2] = 60,
+ [1][0][0][0][RTW89_UKRAINE][2] = 58,
+ [1][0][0][0][RTW89_MEXICO][2] = 60,
+ [1][0][0][0][RTW89_CN][2] = 58,
+ [1][0][0][0][RTW89_QATAR][2] = 58,
+ [1][0][0][0][RTW89_FCC][3] = 60,
+ [1][0][0][0][RTW89_ETSI][3] = 58,
+ [1][0][0][0][RTW89_MKK][3] = 68,
+ [1][0][0][0][RTW89_IC][3] = 60,
+ [1][0][0][0][RTW89_KCC][3] = 70,
+ [1][0][0][0][RTW89_ACMA][3] = 58,
+ [1][0][0][0][RTW89_CHILE][3] = 60,
+ [1][0][0][0][RTW89_UKRAINE][3] = 58,
+ [1][0][0][0][RTW89_MEXICO][3] = 60,
+ [1][0][0][0][RTW89_CN][3] = 58,
+ [1][0][0][0][RTW89_QATAR][3] = 58,
+ [1][0][0][0][RTW89_FCC][4] = 60,
+ [1][0][0][0][RTW89_ETSI][4] = 58,
+ [1][0][0][0][RTW89_MKK][4] = 68,
+ [1][0][0][0][RTW89_IC][4] = 60,
+ [1][0][0][0][RTW89_KCC][4] = 70,
+ [1][0][0][0][RTW89_ACMA][4] = 58,
+ [1][0][0][0][RTW89_CHILE][4] = 60,
+ [1][0][0][0][RTW89_UKRAINE][4] = 58,
+ [1][0][0][0][RTW89_MEXICO][4] = 60,
+ [1][0][0][0][RTW89_CN][4] = 58,
+ [1][0][0][0][RTW89_QATAR][4] = 58,
+ [1][0][0][0][RTW89_FCC][5] = 60,
+ [1][0][0][0][RTW89_ETSI][5] = 58,
+ [1][0][0][0][RTW89_MKK][5] = 68,
+ [1][0][0][0][RTW89_IC][5] = 60,
+ [1][0][0][0][RTW89_KCC][5] = 70,
+ [1][0][0][0][RTW89_ACMA][5] = 58,
+ [1][0][0][0][RTW89_CHILE][5] = 60,
+ [1][0][0][0][RTW89_UKRAINE][5] = 58,
+ [1][0][0][0][RTW89_MEXICO][5] = 60,
+ [1][0][0][0][RTW89_CN][5] = 58,
+ [1][0][0][0][RTW89_QATAR][5] = 58,
+ [1][0][0][0][RTW89_FCC][6] = 46,
+ [1][0][0][0][RTW89_ETSI][6] = 58,
+ [1][0][0][0][RTW89_MKK][6] = 68,
+ [1][0][0][0][RTW89_IC][6] = 46,
+ [1][0][0][0][RTW89_KCC][6] = 70,
+ [1][0][0][0][RTW89_ACMA][6] = 58,
+ [1][0][0][0][RTW89_CHILE][6] = 46,
+ [1][0][0][0][RTW89_UKRAINE][6] = 58,
+ [1][0][0][0][RTW89_MEXICO][6] = 46,
+ [1][0][0][0][RTW89_CN][6] = 58,
+ [1][0][0][0][RTW89_QATAR][6] = 58,
+ [1][0][0][0][RTW89_FCC][7] = 46,
+ [1][0][0][0][RTW89_ETSI][7] = 58,
+ [1][0][0][0][RTW89_MKK][7] = 68,
+ [1][0][0][0][RTW89_IC][7] = 46,
+ [1][0][0][0][RTW89_KCC][7] = 70,
+ [1][0][0][0][RTW89_ACMA][7] = 58,
+ [1][0][0][0][RTW89_CHILE][7] = 46,
+ [1][0][0][0][RTW89_UKRAINE][7] = 58,
+ [1][0][0][0][RTW89_MEXICO][7] = 46,
+ [1][0][0][0][RTW89_CN][7] = 58,
+ [1][0][0][0][RTW89_QATAR][7] = 58,
+ [1][0][0][0][RTW89_FCC][8] = 46,
+ [1][0][0][0][RTW89_ETSI][8] = 58,
+ [1][0][0][0][RTW89_MKK][8] = 68,
+ [1][0][0][0][RTW89_IC][8] = 46,
+ [1][0][0][0][RTW89_KCC][8] = 70,
+ [1][0][0][0][RTW89_ACMA][8] = 58,
+ [1][0][0][0][RTW89_CHILE][8] = 46,
+ [1][0][0][0][RTW89_UKRAINE][8] = 58,
+ [1][0][0][0][RTW89_MEXICO][8] = 46,
+ [1][0][0][0][RTW89_CN][8] = 58,
+ [1][0][0][0][RTW89_QATAR][8] = 58,
+ [1][0][0][0][RTW89_FCC][9] = 32,
+ [1][0][0][0][RTW89_ETSI][9] = 58,
+ [1][0][0][0][RTW89_MKK][9] = 68,
+ [1][0][0][0][RTW89_IC][9] = 32,
+ [1][0][0][0][RTW89_KCC][9] = 70,
+ [1][0][0][0][RTW89_ACMA][9] = 58,
+ [1][0][0][0][RTW89_CHILE][9] = 32,
+ [1][0][0][0][RTW89_UKRAINE][9] = 58,
+ [1][0][0][0][RTW89_MEXICO][9] = 32,
+ [1][0][0][0][RTW89_CN][9] = 58,
+ [1][0][0][0][RTW89_QATAR][9] = 58,
+ [1][0][0][0][RTW89_FCC][10] = 32,
+ [1][0][0][0][RTW89_ETSI][10] = 58,
+ [1][0][0][0][RTW89_MKK][10] = 68,
+ [1][0][0][0][RTW89_IC][10] = 32,
+ [1][0][0][0][RTW89_KCC][10] = 70,
+ [1][0][0][0][RTW89_ACMA][10] = 58,
+ [1][0][0][0][RTW89_CHILE][10] = 32,
+ [1][0][0][0][RTW89_UKRAINE][10] = 58,
+ [1][0][0][0][RTW89_MEXICO][10] = 32,
+ [1][0][0][0][RTW89_CN][10] = 58,
+ [1][0][0][0][RTW89_QATAR][10] = 58,
+ [1][0][0][0][RTW89_FCC][11] = 127,
+ [1][0][0][0][RTW89_ETSI][11] = 127,
+ [1][0][0][0][RTW89_MKK][11] = 127,
+ [1][0][0][0][RTW89_IC][11] = 127,
+ [1][0][0][0][RTW89_KCC][11] = 127,
+ [1][0][0][0][RTW89_ACMA][11] = 127,
+ [1][0][0][0][RTW89_CHILE][11] = 127,
+ [1][0][0][0][RTW89_UKRAINE][11] = 127,
+ [1][0][0][0][RTW89_MEXICO][11] = 127,
+ [1][0][0][0][RTW89_CN][11] = 127,
+ [1][0][0][0][RTW89_QATAR][11] = 127,
+ [1][0][0][0][RTW89_FCC][12] = 127,
+ [1][0][0][0][RTW89_ETSI][12] = 127,
+ [1][0][0][0][RTW89_MKK][12] = 127,
+ [1][0][0][0][RTW89_IC][12] = 127,
+ [1][0][0][0][RTW89_KCC][12] = 127,
+ [1][0][0][0][RTW89_ACMA][12] = 127,
+ [1][0][0][0][RTW89_CHILE][12] = 127,
+ [1][0][0][0][RTW89_UKRAINE][12] = 127,
+ [1][0][0][0][RTW89_MEXICO][12] = 127,
+ [1][0][0][0][RTW89_CN][12] = 127,
+ [1][0][0][0][RTW89_QATAR][12] = 127,
+ [1][0][0][0][RTW89_FCC][13] = 127,
+ [1][0][0][0][RTW89_ETSI][13] = 127,
+ [1][0][0][0][RTW89_MKK][13] = 127,
+ [1][0][0][0][RTW89_IC][13] = 127,
+ [1][0][0][0][RTW89_KCC][13] = 127,
+ [1][0][0][0][RTW89_ACMA][13] = 127,
+ [1][0][0][0][RTW89_CHILE][13] = 127,
+ [1][0][0][0][RTW89_UKRAINE][13] = 127,
+ [1][0][0][0][RTW89_MEXICO][13] = 127,
+ [1][0][0][0][RTW89_CN][13] = 127,
+ [1][0][0][0][RTW89_QATAR][13] = 127,
+ [1][1][0][0][RTW89_FCC][0] = 127,
+ [1][1][0][0][RTW89_ETSI][0] = 127,
+ [1][1][0][0][RTW89_MKK][0] = 127,
+ [1][1][0][0][RTW89_IC][0] = 127,
+ [1][1][0][0][RTW89_KCC][0] = 127,
+ [1][1][0][0][RTW89_ACMA][0] = 127,
+ [1][1][0][0][RTW89_CHILE][0] = 127,
+ [1][1][0][0][RTW89_UKRAINE][0] = 127,
+ [1][1][0][0][RTW89_MEXICO][0] = 127,
+ [1][1][0][0][RTW89_CN][0] = 127,
+ [1][1][0][0][RTW89_QATAR][0] = 127,
+ [1][1][0][0][RTW89_FCC][1] = 127,
+ [1][1][0][0][RTW89_ETSI][1] = 127,
+ [1][1][0][0][RTW89_MKK][1] = 127,
+ [1][1][0][0][RTW89_IC][1] = 127,
+ [1][1][0][0][RTW89_KCC][1] = 127,
+ [1][1][0][0][RTW89_ACMA][1] = 127,
+ [1][1][0][0][RTW89_CHILE][1] = 127,
+ [1][1][0][0][RTW89_UKRAINE][1] = 127,
+ [1][1][0][0][RTW89_MEXICO][1] = 127,
+ [1][1][0][0][RTW89_CN][1] = 127,
+ [1][1][0][0][RTW89_QATAR][1] = 127,
+ [1][1][0][0][RTW89_FCC][2] = 48,
+ [1][1][0][0][RTW89_ETSI][2] = 46,
+ [1][1][0][0][RTW89_MKK][2] = 56,
+ [1][1][0][0][RTW89_IC][2] = 48,
+ [1][1][0][0][RTW89_KCC][2] = 58,
+ [1][1][0][0][RTW89_ACMA][2] = 46,
+ [1][1][0][0][RTW89_CHILE][2] = 48,
+ [1][1][0][0][RTW89_UKRAINE][2] = 46,
+ [1][1][0][0][RTW89_MEXICO][2] = 48,
+ [1][1][0][0][RTW89_CN][2] = 46,
+ [1][1][0][0][RTW89_QATAR][2] = 46,
+ [1][1][0][0][RTW89_FCC][3] = 48,
+ [1][1][0][0][RTW89_ETSI][3] = 46,
+ [1][1][0][0][RTW89_MKK][3] = 56,
+ [1][1][0][0][RTW89_IC][3] = 48,
+ [1][1][0][0][RTW89_KCC][3] = 58,
+ [1][1][0][0][RTW89_ACMA][3] = 46,
+ [1][1][0][0][RTW89_CHILE][3] = 48,
+ [1][1][0][0][RTW89_UKRAINE][3] = 46,
+ [1][1][0][0][RTW89_MEXICO][3] = 48,
+ [1][1][0][0][RTW89_CN][3] = 46,
+ [1][1][0][0][RTW89_QATAR][3] = 46,
+ [1][1][0][0][RTW89_FCC][4] = 48,
+ [1][1][0][0][RTW89_ETSI][4] = 46,
+ [1][1][0][0][RTW89_MKK][4] = 56,
+ [1][1][0][0][RTW89_IC][4] = 48,
+ [1][1][0][0][RTW89_KCC][4] = 58,
+ [1][1][0][0][RTW89_ACMA][4] = 46,
+ [1][1][0][0][RTW89_CHILE][4] = 48,
+ [1][1][0][0][RTW89_UKRAINE][4] = 46,
+ [1][1][0][0][RTW89_MEXICO][4] = 48,
+ [1][1][0][0][RTW89_CN][4] = 46,
+ [1][1][0][0][RTW89_QATAR][4] = 46,
+ [1][1][0][0][RTW89_FCC][5] = 58,
+ [1][1][0][0][RTW89_ETSI][5] = 46,
+ [1][1][0][0][RTW89_MKK][5] = 56,
+ [1][1][0][0][RTW89_IC][5] = 58,
+ [1][1][0][0][RTW89_KCC][5] = 58,
+ [1][1][0][0][RTW89_ACMA][5] = 46,
+ [1][1][0][0][RTW89_CHILE][5] = 48,
+ [1][1][0][0][RTW89_UKRAINE][5] = 46,
+ [1][1][0][0][RTW89_MEXICO][5] = 58,
+ [1][1][0][0][RTW89_CN][5] = 46,
+ [1][1][0][0][RTW89_QATAR][5] = 46,
+ [1][1][0][0][RTW89_FCC][6] = 46,
+ [1][1][0][0][RTW89_ETSI][6] = 46,
+ [1][1][0][0][RTW89_MKK][6] = 56,
+ [1][1][0][0][RTW89_IC][6] = 46,
+ [1][1][0][0][RTW89_KCC][6] = 58,
+ [1][1][0][0][RTW89_ACMA][6] = 46,
+ [1][1][0][0][RTW89_CHILE][6] = 46,
+ [1][1][0][0][RTW89_UKRAINE][6] = 46,
+ [1][1][0][0][RTW89_MEXICO][6] = 46,
+ [1][1][0][0][RTW89_CN][6] = 46,
+ [1][1][0][0][RTW89_QATAR][6] = 46,
+ [1][1][0][0][RTW89_FCC][7] = 46,
+ [1][1][0][0][RTW89_ETSI][7] = 46,
+ [1][1][0][0][RTW89_MKK][7] = 56,
+ [1][1][0][0][RTW89_IC][7] = 46,
+ [1][1][0][0][RTW89_KCC][7] = 58,
+ [1][1][0][0][RTW89_ACMA][7] = 46,
+ [1][1][0][0][RTW89_CHILE][7] = 46,
+ [1][1][0][0][RTW89_UKRAINE][7] = 46,
+ [1][1][0][0][RTW89_MEXICO][7] = 46,
+ [1][1][0][0][RTW89_CN][7] = 46,
+ [1][1][0][0][RTW89_QATAR][7] = 46,
+ [1][1][0][0][RTW89_FCC][8] = 46,
+ [1][1][0][0][RTW89_ETSI][8] = 46,
+ [1][1][0][0][RTW89_MKK][8] = 56,
+ [1][1][0][0][RTW89_IC][8] = 46,
+ [1][1][0][0][RTW89_KCC][8] = 58,
+ [1][1][0][0][RTW89_ACMA][8] = 46,
+ [1][1][0][0][RTW89_CHILE][8] = 46,
+ [1][1][0][0][RTW89_UKRAINE][8] = 46,
+ [1][1][0][0][RTW89_MEXICO][8] = 46,
+ [1][1][0][0][RTW89_CN][8] = 46,
+ [1][1][0][0][RTW89_QATAR][8] = 46,
+ [1][1][0][0][RTW89_FCC][9] = 24,
+ [1][1][0][0][RTW89_ETSI][9] = 46,
+ [1][1][0][0][RTW89_MKK][9] = 56,
+ [1][1][0][0][RTW89_IC][9] = 24,
+ [1][1][0][0][RTW89_KCC][9] = 58,
+ [1][1][0][0][RTW89_ACMA][9] = 46,
+ [1][1][0][0][RTW89_CHILE][9] = 24,
+ [1][1][0][0][RTW89_UKRAINE][9] = 46,
+ [1][1][0][0][RTW89_MEXICO][9] = 24,
+ [1][1][0][0][RTW89_CN][9] = 46,
+ [1][1][0][0][RTW89_QATAR][9] = 46,
+ [1][1][0][0][RTW89_FCC][10] = 24,
+ [1][1][0][0][RTW89_ETSI][10] = 46,
+ [1][1][0][0][RTW89_MKK][10] = 56,
+ [1][1][0][0][RTW89_IC][10] = 24,
+ [1][1][0][0][RTW89_KCC][10] = 58,
+ [1][1][0][0][RTW89_ACMA][10] = 46,
+ [1][1][0][0][RTW89_CHILE][10] = 24,
+ [1][1][0][0][RTW89_UKRAINE][10] = 46,
+ [1][1][0][0][RTW89_MEXICO][10] = 24,
+ [1][1][0][0][RTW89_CN][10] = 46,
+ [1][1][0][0][RTW89_QATAR][10] = 46,
+ [1][1][0][0][RTW89_FCC][11] = 127,
+ [1][1][0][0][RTW89_ETSI][11] = 127,
+ [1][1][0][0][RTW89_MKK][11] = 127,
+ [1][1][0][0][RTW89_IC][11] = 127,
+ [1][1][0][0][RTW89_KCC][11] = 127,
+ [1][1][0][0][RTW89_ACMA][11] = 127,
+ [1][1][0][0][RTW89_CHILE][11] = 127,
+ [1][1][0][0][RTW89_UKRAINE][11] = 127,
+ [1][1][0][0][RTW89_MEXICO][11] = 127,
+ [1][1][0][0][RTW89_CN][11] = 127,
+ [1][1][0][0][RTW89_QATAR][11] = 127,
+ [1][1][0][0][RTW89_FCC][12] = 127,
+ [1][1][0][0][RTW89_ETSI][12] = 127,
+ [1][1][0][0][RTW89_MKK][12] = 127,
+ [1][1][0][0][RTW89_IC][12] = 127,
+ [1][1][0][0][RTW89_KCC][12] = 127,
+ [1][1][0][0][RTW89_ACMA][12] = 127,
+ [1][1][0][0][RTW89_CHILE][12] = 127,
+ [1][1][0][0][RTW89_UKRAINE][12] = 127,
+ [1][1][0][0][RTW89_MEXICO][12] = 127,
+ [1][1][0][0][RTW89_CN][12] = 127,
+ [1][1][0][0][RTW89_QATAR][12] = 127,
+ [1][1][0][0][RTW89_FCC][13] = 127,
+ [1][1][0][0][RTW89_ETSI][13] = 127,
+ [1][1][0][0][RTW89_MKK][13] = 127,
+ [1][1][0][0][RTW89_IC][13] = 127,
+ [1][1][0][0][RTW89_KCC][13] = 127,
+ [1][1][0][0][RTW89_ACMA][13] = 127,
+ [1][1][0][0][RTW89_CHILE][13] = 127,
+ [1][1][0][0][RTW89_UKRAINE][13] = 127,
+ [1][1][0][0][RTW89_MEXICO][13] = 127,
+ [1][1][0][0][RTW89_CN][13] = 127,
+ [1][1][0][0][RTW89_QATAR][13] = 127,
+ [0][0][1][0][RTW89_FCC][0] = 66,
+ [0][0][1][0][RTW89_ETSI][0] = 58,
+ [0][0][1][0][RTW89_MKK][0] = 76,
+ [0][0][1][0][RTW89_IC][0] = 66,
+ [0][0][1][0][RTW89_KCC][0] = 76,
+ [0][0][1][0][RTW89_ACMA][0] = 58,
+ [0][0][1][0][RTW89_CHILE][0] = 60,
+ [0][0][1][0][RTW89_UKRAINE][0] = 58,
+ [0][0][1][0][RTW89_MEXICO][0] = 66,
+ [0][0][1][0][RTW89_CN][0] = 58,
+ [0][0][1][0][RTW89_QATAR][0] = 58,
+ [0][0][1][0][RTW89_FCC][1] = 66,
+ [0][0][1][0][RTW89_ETSI][1] = 58,
+ [0][0][1][0][RTW89_MKK][1] = 76,
+ [0][0][1][0][RTW89_IC][1] = 66,
+ [0][0][1][0][RTW89_KCC][1] = 76,
+ [0][0][1][0][RTW89_ACMA][1] = 58,
+ [0][0][1][0][RTW89_CHILE][1] = 60,
+ [0][0][1][0][RTW89_UKRAINE][1] = 58,
+ [0][0][1][0][RTW89_MEXICO][1] = 66,
+ [0][0][1][0][RTW89_CN][1] = 58,
+ [0][0][1][0][RTW89_QATAR][1] = 58,
+ [0][0][1][0][RTW89_FCC][2] = 70,
+ [0][0][1][0][RTW89_ETSI][2] = 58,
+ [0][0][1][0][RTW89_MKK][2] = 76,
+ [0][0][1][0][RTW89_IC][2] = 70,
+ [0][0][1][0][RTW89_KCC][2] = 76,
+ [0][0][1][0][RTW89_ACMA][2] = 58,
+ [0][0][1][0][RTW89_CHILE][2] = 60,
+ [0][0][1][0][RTW89_UKRAINE][2] = 58,
+ [0][0][1][0][RTW89_MEXICO][2] = 70,
+ [0][0][1][0][RTW89_CN][2] = 58,
+ [0][0][1][0][RTW89_QATAR][2] = 58,
+ [0][0][1][0][RTW89_FCC][3] = 74,
+ [0][0][1][0][RTW89_ETSI][3] = 58,
+ [0][0][1][0][RTW89_MKK][3] = 76,
+ [0][0][1][0][RTW89_IC][3] = 74,
+ [0][0][1][0][RTW89_KCC][3] = 76,
+ [0][0][1][0][RTW89_ACMA][3] = 58,
+ [0][0][1][0][RTW89_CHILE][3] = 60,
+ [0][0][1][0][RTW89_UKRAINE][3] = 58,
+ [0][0][1][0][RTW89_MEXICO][3] = 74,
+ [0][0][1][0][RTW89_CN][3] = 58,
+ [0][0][1][0][RTW89_QATAR][3] = 58,
+ [0][0][1][0][RTW89_FCC][4] = 78,
+ [0][0][1][0][RTW89_ETSI][4] = 58,
+ [0][0][1][0][RTW89_MKK][4] = 76,
+ [0][0][1][0][RTW89_IC][4] = 78,
+ [0][0][1][0][RTW89_KCC][4] = 76,
+ [0][0][1][0][RTW89_ACMA][4] = 58,
+ [0][0][1][0][RTW89_CHILE][4] = 60,
+ [0][0][1][0][RTW89_UKRAINE][4] = 58,
+ [0][0][1][0][RTW89_MEXICO][4] = 78,
+ [0][0][1][0][RTW89_CN][4] = 58,
+ [0][0][1][0][RTW89_QATAR][4] = 58,
+ [0][0][1][0][RTW89_FCC][5] = 78,
+ [0][0][1][0][RTW89_ETSI][5] = 58,
+ [0][0][1][0][RTW89_MKK][5] = 76,
+ [0][0][1][0][RTW89_IC][5] = 78,
+ [0][0][1][0][RTW89_KCC][5] = 76,
+ [0][0][1][0][RTW89_ACMA][5] = 58,
+ [0][0][1][0][RTW89_CHILE][5] = 60,
+ [0][0][1][0][RTW89_UKRAINE][5] = 58,
+ [0][0][1][0][RTW89_MEXICO][5] = 78,
+ [0][0][1][0][RTW89_CN][5] = 58,
+ [0][0][1][0][RTW89_QATAR][5] = 58,
+ [0][0][1][0][RTW89_FCC][6] = 78,
+ [0][0][1][0][RTW89_ETSI][6] = 58,
+ [0][0][1][0][RTW89_MKK][6] = 76,
+ [0][0][1][0][RTW89_IC][6] = 78,
+ [0][0][1][0][RTW89_KCC][6] = 76,
+ [0][0][1][0][RTW89_ACMA][6] = 58,
+ [0][0][1][0][RTW89_CHILE][6] = 60,
+ [0][0][1][0][RTW89_UKRAINE][6] = 58,
+ [0][0][1][0][RTW89_MEXICO][6] = 78,
+ [0][0][1][0][RTW89_CN][6] = 58,
+ [0][0][1][0][RTW89_QATAR][6] = 58,
+ [0][0][1][0][RTW89_FCC][7] = 74,
+ [0][0][1][0][RTW89_ETSI][7] = 58,
+ [0][0][1][0][RTW89_MKK][7] = 76,
+ [0][0][1][0][RTW89_IC][7] = 74,
+ [0][0][1][0][RTW89_KCC][7] = 76,
+ [0][0][1][0][RTW89_ACMA][7] = 58,
+ [0][0][1][0][RTW89_CHILE][7] = 60,
+ [0][0][1][0][RTW89_UKRAINE][7] = 58,
+ [0][0][1][0][RTW89_MEXICO][7] = 74,
+ [0][0][1][0][RTW89_CN][7] = 58,
+ [0][0][1][0][RTW89_QATAR][7] = 58,
+ [0][0][1][0][RTW89_FCC][8] = 70,
+ [0][0][1][0][RTW89_ETSI][8] = 58,
+ [0][0][1][0][RTW89_MKK][8] = 76,
+ [0][0][1][0][RTW89_IC][8] = 70,
+ [0][0][1][0][RTW89_KCC][8] = 76,
+ [0][0][1][0][RTW89_ACMA][8] = 58,
+ [0][0][1][0][RTW89_CHILE][8] = 60,
+ [0][0][1][0][RTW89_UKRAINE][8] = 58,
+ [0][0][1][0][RTW89_MEXICO][8] = 70,
+ [0][0][1][0][RTW89_CN][8] = 58,
+ [0][0][1][0][RTW89_QATAR][8] = 58,
+ [0][0][1][0][RTW89_FCC][9] = 66,
+ [0][0][1][0][RTW89_ETSI][9] = 58,
+ [0][0][1][0][RTW89_MKK][9] = 76,
+ [0][0][1][0][RTW89_IC][9] = 66,
+ [0][0][1][0][RTW89_KCC][9] = 76,
+ [0][0][1][0][RTW89_ACMA][9] = 58,
+ [0][0][1][0][RTW89_CHILE][9] = 60,
+ [0][0][1][0][RTW89_UKRAINE][9] = 58,
+ [0][0][1][0][RTW89_MEXICO][9] = 66,
+ [0][0][1][0][RTW89_CN][9] = 58,
+ [0][0][1][0][RTW89_QATAR][9] = 58,
+ [0][0][1][0][RTW89_FCC][10] = 66,
+ [0][0][1][0][RTW89_ETSI][10] = 58,
+ [0][0][1][0][RTW89_MKK][10] = 76,
+ [0][0][1][0][RTW89_IC][10] = 66,
+ [0][0][1][0][RTW89_KCC][10] = 76,
+ [0][0][1][0][RTW89_ACMA][10] = 58,
+ [0][0][1][0][RTW89_CHILE][10] = 60,
+ [0][0][1][0][RTW89_UKRAINE][10] = 58,
+ [0][0][1][0][RTW89_MEXICO][10] = 66,
+ [0][0][1][0][RTW89_CN][10] = 58,
+ [0][0][1][0][RTW89_QATAR][10] = 58,
+ [0][0][1][0][RTW89_FCC][11] = 56,
+ [0][0][1][0][RTW89_ETSI][11] = 58,
+ [0][0][1][0][RTW89_MKK][11] = 76,
+ [0][0][1][0][RTW89_IC][11] = 56,
+ [0][0][1][0][RTW89_KCC][11] = 76,
+ [0][0][1][0][RTW89_ACMA][11] = 58,
+ [0][0][1][0][RTW89_CHILE][11] = 56,
+ [0][0][1][0][RTW89_UKRAINE][11] = 58,
+ [0][0][1][0][RTW89_MEXICO][11] = 56,
+ [0][0][1][0][RTW89_CN][11] = 58,
+ [0][0][1][0][RTW89_QATAR][11] = 58,
+ [0][0][1][0][RTW89_FCC][12] = 52,
+ [0][0][1][0][RTW89_ETSI][12] = 58,
+ [0][0][1][0][RTW89_MKK][12] = 76,
+ [0][0][1][0][RTW89_IC][12] = 52,
+ [0][0][1][0][RTW89_KCC][12] = 76,
+ [0][0][1][0][RTW89_ACMA][12] = 58,
+ [0][0][1][0][RTW89_CHILE][12] = 52,
+ [0][0][1][0][RTW89_UKRAINE][12] = 58,
+ [0][0][1][0][RTW89_MEXICO][12] = 52,
+ [0][0][1][0][RTW89_CN][12] = 58,
+ [0][0][1][0][RTW89_QATAR][12] = 58,
+ [0][0][1][0][RTW89_FCC][13] = 127,
+ [0][0][1][0][RTW89_ETSI][13] = 127,
+ [0][0][1][0][RTW89_MKK][13] = 127,
+ [0][0][1][0][RTW89_IC][13] = 127,
+ [0][0][1][0][RTW89_KCC][13] = 127,
+ [0][0][1][0][RTW89_ACMA][13] = 127,
+ [0][0][1][0][RTW89_CHILE][13] = 127,
+ [0][0][1][0][RTW89_UKRAINE][13] = 127,
+ [0][0][1][0][RTW89_MEXICO][13] = 127,
+ [0][0][1][0][RTW89_CN][13] = 127,
+ [0][0][1][0][RTW89_QATAR][13] = 127,
+ [0][1][1][0][RTW89_FCC][0] = 62,
+ [0][1][1][0][RTW89_ETSI][0] = 46,
+ [0][1][1][0][RTW89_MKK][0] = 64,
+ [0][1][1][0][RTW89_IC][0] = 62,
+ [0][1][1][0][RTW89_KCC][0] = 70,
+ [0][1][1][0][RTW89_ACMA][0] = 46,
+ [0][1][1][0][RTW89_CHILE][0] = 48,
+ [0][1][1][0][RTW89_UKRAINE][0] = 46,
+ [0][1][1][0][RTW89_MEXICO][0] = 62,
+ [0][1][1][0][RTW89_CN][0] = 46,
+ [0][1][1][0][RTW89_QATAR][0] = 46,
+ [0][1][1][0][RTW89_FCC][1] = 62,
+ [0][1][1][0][RTW89_ETSI][1] = 46,
+ [0][1][1][0][RTW89_MKK][1] = 64,
+ [0][1][1][0][RTW89_IC][1] = 62,
+ [0][1][1][0][RTW89_KCC][1] = 70,
+ [0][1][1][0][RTW89_ACMA][1] = 46,
+ [0][1][1][0][RTW89_CHILE][1] = 48,
+ [0][1][1][0][RTW89_UKRAINE][1] = 46,
+ [0][1][1][0][RTW89_MEXICO][1] = 62,
+ [0][1][1][0][RTW89_CN][1] = 46,
+ [0][1][1][0][RTW89_QATAR][1] = 46,
+ [0][1][1][0][RTW89_FCC][2] = 66,
+ [0][1][1][0][RTW89_ETSI][2] = 46,
+ [0][1][1][0][RTW89_MKK][2] = 64,
+ [0][1][1][0][RTW89_IC][2] = 66,
+ [0][1][1][0][RTW89_KCC][2] = 70,
+ [0][1][1][0][RTW89_ACMA][2] = 46,
+ [0][1][1][0][RTW89_CHILE][2] = 48,
+ [0][1][1][0][RTW89_UKRAINE][2] = 46,
+ [0][1][1][0][RTW89_MEXICO][2] = 66,
+ [0][1][1][0][RTW89_CN][2] = 46,
+ [0][1][1][0][RTW89_QATAR][2] = 46,
+ [0][1][1][0][RTW89_FCC][3] = 70,
+ [0][1][1][0][RTW89_ETSI][3] = 46,
+ [0][1][1][0][RTW89_MKK][3] = 64,
+ [0][1][1][0][RTW89_IC][3] = 70,
+ [0][1][1][0][RTW89_KCC][3] = 70,
+ [0][1][1][0][RTW89_ACMA][3] = 46,
+ [0][1][1][0][RTW89_CHILE][3] = 48,
+ [0][1][1][0][RTW89_UKRAINE][3] = 46,
+ [0][1][1][0][RTW89_MEXICO][3] = 70,
+ [0][1][1][0][RTW89_CN][3] = 46,
+ [0][1][1][0][RTW89_QATAR][3] = 46,
+ [0][1][1][0][RTW89_FCC][4] = 78,
+ [0][1][1][0][RTW89_ETSI][4] = 46,
+ [0][1][1][0][RTW89_MKK][4] = 64,
+ [0][1][1][0][RTW89_IC][4] = 78,
+ [0][1][1][0][RTW89_KCC][4] = 70,
+ [0][1][1][0][RTW89_ACMA][4] = 46,
+ [0][1][1][0][RTW89_CHILE][4] = 48,
+ [0][1][1][0][RTW89_UKRAINE][4] = 46,
+ [0][1][1][0][RTW89_MEXICO][4] = 78,
+ [0][1][1][0][RTW89_CN][4] = 46,
+ [0][1][1][0][RTW89_QATAR][4] = 46,
+ [0][1][1][0][RTW89_FCC][5] = 78,
+ [0][1][1][0][RTW89_ETSI][5] = 46,
+ [0][1][1][0][RTW89_MKK][5] = 64,
+ [0][1][1][0][RTW89_IC][5] = 78,
+ [0][1][1][0][RTW89_KCC][5] = 70,
+ [0][1][1][0][RTW89_ACMA][5] = 46,
+ [0][1][1][0][RTW89_CHILE][5] = 48,
+ [0][1][1][0][RTW89_UKRAINE][5] = 46,
+ [0][1][1][0][RTW89_MEXICO][5] = 78,
+ [0][1][1][0][RTW89_CN][5] = 46,
+ [0][1][1][0][RTW89_QATAR][5] = 46,
+ [0][1][1][0][RTW89_FCC][6] = 78,
+ [0][1][1][0][RTW89_ETSI][6] = 46,
+ [0][1][1][0][RTW89_MKK][6] = 64,
+ [0][1][1][0][RTW89_IC][6] = 78,
+ [0][1][1][0][RTW89_KCC][6] = 70,
+ [0][1][1][0][RTW89_ACMA][6] = 46,
+ [0][1][1][0][RTW89_CHILE][6] = 48,
+ [0][1][1][0][RTW89_UKRAINE][6] = 46,
+ [0][1][1][0][RTW89_MEXICO][6] = 78,
+ [0][1][1][0][RTW89_CN][6] = 46,
+ [0][1][1][0][RTW89_QATAR][6] = 46,
+ [0][1][1][0][RTW89_FCC][7] = 70,
+ [0][1][1][0][RTW89_ETSI][7] = 46,
+ [0][1][1][0][RTW89_MKK][7] = 64,
+ [0][1][1][0][RTW89_IC][7] = 70,
+ [0][1][1][0][RTW89_KCC][7] = 70,
+ [0][1][1][0][RTW89_ACMA][7] = 46,
+ [0][1][1][0][RTW89_CHILE][7] = 48,
+ [0][1][1][0][RTW89_UKRAINE][7] = 46,
+ [0][1][1][0][RTW89_MEXICO][7] = 70,
+ [0][1][1][0][RTW89_CN][7] = 46,
+ [0][1][1][0][RTW89_QATAR][7] = 46,
+ [0][1][1][0][RTW89_FCC][8] = 66,
+ [0][1][1][0][RTW89_ETSI][8] = 46,
+ [0][1][1][0][RTW89_MKK][8] = 64,
+ [0][1][1][0][RTW89_IC][8] = 66,
+ [0][1][1][0][RTW89_KCC][8] = 70,
+ [0][1][1][0][RTW89_ACMA][8] = 46,
+ [0][1][1][0][RTW89_CHILE][8] = 48,
+ [0][1][1][0][RTW89_UKRAINE][8] = 46,
+ [0][1][1][0][RTW89_MEXICO][8] = 66,
+ [0][1][1][0][RTW89_CN][8] = 46,
+ [0][1][1][0][RTW89_QATAR][8] = 46,
+ [0][1][1][0][RTW89_FCC][9] = 62,
+ [0][1][1][0][RTW89_ETSI][9] = 46,
+ [0][1][1][0][RTW89_MKK][9] = 64,
+ [0][1][1][0][RTW89_IC][9] = 62,
+ [0][1][1][0][RTW89_KCC][9] = 70,
+ [0][1][1][0][RTW89_ACMA][9] = 46,
+ [0][1][1][0][RTW89_CHILE][9] = 48,
+ [0][1][1][0][RTW89_UKRAINE][9] = 46,
+ [0][1][1][0][RTW89_MEXICO][9] = 62,
+ [0][1][1][0][RTW89_CN][9] = 46,
+ [0][1][1][0][RTW89_QATAR][9] = 46,
+ [0][1][1][0][RTW89_FCC][10] = 62,
+ [0][1][1][0][RTW89_ETSI][10] = 46,
+ [0][1][1][0][RTW89_MKK][10] = 64,
+ [0][1][1][0][RTW89_IC][10] = 62,
+ [0][1][1][0][RTW89_KCC][10] = 70,
+ [0][1][1][0][RTW89_ACMA][10] = 46,
+ [0][1][1][0][RTW89_CHILE][10] = 48,
+ [0][1][1][0][RTW89_UKRAINE][10] = 46,
+ [0][1][1][0][RTW89_MEXICO][10] = 62,
+ [0][1][1][0][RTW89_CN][10] = 46,
+ [0][1][1][0][RTW89_QATAR][10] = 46,
+ [0][1][1][0][RTW89_FCC][11] = 42,
+ [0][1][1][0][RTW89_ETSI][11] = 46,
+ [0][1][1][0][RTW89_MKK][11] = 64,
+ [0][1][1][0][RTW89_IC][11] = 42,
+ [0][1][1][0][RTW89_KCC][11] = 70,
+ [0][1][1][0][RTW89_ACMA][11] = 46,
+ [0][1][1][0][RTW89_CHILE][11] = 42,
+ [0][1][1][0][RTW89_UKRAINE][11] = 46,
+ [0][1][1][0][RTW89_MEXICO][11] = 42,
+ [0][1][1][0][RTW89_CN][11] = 46,
+ [0][1][1][0][RTW89_QATAR][11] = 46,
+ [0][1][1][0][RTW89_FCC][12] = 40,
+ [0][1][1][0][RTW89_ETSI][12] = 46,
+ [0][1][1][0][RTW89_MKK][12] = 64,
+ [0][1][1][0][RTW89_IC][12] = 40,
+ [0][1][1][0][RTW89_KCC][12] = 70,
+ [0][1][1][0][RTW89_ACMA][12] = 46,
+ [0][1][1][0][RTW89_CHILE][12] = 40,
+ [0][1][1][0][RTW89_UKRAINE][12] = 46,
+ [0][1][1][0][RTW89_MEXICO][12] = 40,
+ [0][1][1][0][RTW89_CN][12] = 46,
+ [0][1][1][0][RTW89_QATAR][12] = 46,
+ [0][1][1][0][RTW89_FCC][13] = 127,
+ [0][1][1][0][RTW89_ETSI][13] = 127,
+ [0][1][1][0][RTW89_MKK][13] = 127,
+ [0][1][1][0][RTW89_IC][13] = 127,
+ [0][1][1][0][RTW89_KCC][13] = 127,
+ [0][1][1][0][RTW89_ACMA][13] = 127,
+ [0][1][1][0][RTW89_CHILE][13] = 127,
+ [0][1][1][0][RTW89_UKRAINE][13] = 127,
+ [0][1][1][0][RTW89_MEXICO][13] = 127,
+ [0][1][1][0][RTW89_CN][13] = 127,
+ [0][1][1][0][RTW89_QATAR][13] = 127,
+ [0][0][2][0][RTW89_FCC][0] = 66,
+ [0][0][2][0][RTW89_ETSI][0] = 58,
+ [0][0][2][0][RTW89_MKK][0] = 76,
+ [0][0][2][0][RTW89_IC][0] = 66,
+ [0][0][2][0][RTW89_KCC][0] = 76,
+ [0][0][2][0][RTW89_ACMA][0] = 58,
+ [0][0][2][0][RTW89_CHILE][0] = 60,
+ [0][0][2][0][RTW89_UKRAINE][0] = 58,
+ [0][0][2][0][RTW89_MEXICO][0] = 66,
+ [0][0][2][0][RTW89_CN][0] = 58,
+ [0][0][2][0][RTW89_QATAR][0] = 58,
+ [0][0][2][0][RTW89_FCC][1] = 66,
+ [0][0][2][0][RTW89_ETSI][1] = 58,
+ [0][0][2][0][RTW89_MKK][1] = 76,
+ [0][0][2][0][RTW89_IC][1] = 66,
+ [0][0][2][0][RTW89_KCC][1] = 76,
+ [0][0][2][0][RTW89_ACMA][1] = 58,
+ [0][0][2][0][RTW89_CHILE][1] = 60,
+ [0][0][2][0][RTW89_UKRAINE][1] = 58,
+ [0][0][2][0][RTW89_MEXICO][1] = 66,
+ [0][0][2][0][RTW89_CN][1] = 58,
+ [0][0][2][0][RTW89_QATAR][1] = 58,
+ [0][0][2][0][RTW89_FCC][2] = 70,
+ [0][0][2][0][RTW89_ETSI][2] = 58,
+ [0][0][2][0][RTW89_MKK][2] = 76,
+ [0][0][2][0][RTW89_IC][2] = 70,
+ [0][0][2][0][RTW89_KCC][2] = 76,
+ [0][0][2][0][RTW89_ACMA][2] = 58,
+ [0][0][2][0][RTW89_CHILE][2] = 60,
+ [0][0][2][0][RTW89_UKRAINE][2] = 58,
+ [0][0][2][0][RTW89_MEXICO][2] = 70,
+ [0][0][2][0][RTW89_CN][2] = 58,
+ [0][0][2][0][RTW89_QATAR][2] = 58,
+ [0][0][2][0][RTW89_FCC][3] = 74,
+ [0][0][2][0][RTW89_ETSI][3] = 58,
+ [0][0][2][0][RTW89_MKK][3] = 76,
+ [0][0][2][0][RTW89_IC][3] = 74,
+ [0][0][2][0][RTW89_KCC][3] = 76,
+ [0][0][2][0][RTW89_ACMA][3] = 58,
+ [0][0][2][0][RTW89_CHILE][3] = 60,
+ [0][0][2][0][RTW89_UKRAINE][3] = 58,
+ [0][0][2][0][RTW89_MEXICO][3] = 74,
+ [0][0][2][0][RTW89_CN][3] = 58,
+ [0][0][2][0][RTW89_QATAR][3] = 58,
+ [0][0][2][0][RTW89_FCC][4] = 76,
+ [0][0][2][0][RTW89_ETSI][4] = 58,
+ [0][0][2][0][RTW89_MKK][4] = 76,
+ [0][0][2][0][RTW89_IC][4] = 76,
+ [0][0][2][0][RTW89_KCC][4] = 76,
+ [0][0][2][0][RTW89_ACMA][4] = 58,
+ [0][0][2][0][RTW89_CHILE][4] = 60,
+ [0][0][2][0][RTW89_UKRAINE][4] = 58,
+ [0][0][2][0][RTW89_MEXICO][4] = 76,
+ [0][0][2][0][RTW89_CN][4] = 58,
+ [0][0][2][0][RTW89_QATAR][4] = 58,
+ [0][0][2][0][RTW89_FCC][5] = 76,
+ [0][0][2][0][RTW89_ETSI][5] = 58,
+ [0][0][2][0][RTW89_MKK][5] = 76,
+ [0][0][2][0][RTW89_IC][5] = 76,
+ [0][0][2][0][RTW89_KCC][5] = 76,
+ [0][0][2][0][RTW89_ACMA][5] = 58,
+ [0][0][2][0][RTW89_CHILE][5] = 60,
+ [0][0][2][0][RTW89_UKRAINE][5] = 58,
+ [0][0][2][0][RTW89_MEXICO][5] = 76,
+ [0][0][2][0][RTW89_CN][5] = 58,
+ [0][0][2][0][RTW89_QATAR][5] = 58,
+ [0][0][2][0][RTW89_FCC][6] = 76,
+ [0][0][2][0][RTW89_ETSI][6] = 58,
+ [0][0][2][0][RTW89_MKK][6] = 76,
+ [0][0][2][0][RTW89_IC][6] = 76,
+ [0][0][2][0][RTW89_KCC][6] = 76,
+ [0][0][2][0][RTW89_ACMA][6] = 58,
+ [0][0][2][0][RTW89_CHILE][6] = 60,
+ [0][0][2][0][RTW89_UKRAINE][6] = 58,
+ [0][0][2][0][RTW89_MEXICO][6] = 76,
+ [0][0][2][0][RTW89_CN][6] = 58,
+ [0][0][2][0][RTW89_QATAR][6] = 58,
+ [0][0][2][0][RTW89_FCC][7] = 74,
+ [0][0][2][0][RTW89_ETSI][7] = 58,
+ [0][0][2][0][RTW89_MKK][7] = 76,
+ [0][0][2][0][RTW89_IC][7] = 74,
+ [0][0][2][0][RTW89_KCC][7] = 76,
+ [0][0][2][0][RTW89_ACMA][7] = 58,
+ [0][0][2][0][RTW89_CHILE][7] = 60,
+ [0][0][2][0][RTW89_UKRAINE][7] = 58,
+ [0][0][2][0][RTW89_MEXICO][7] = 74,
+ [0][0][2][0][RTW89_CN][7] = 58,
+ [0][0][2][0][RTW89_QATAR][7] = 58,
+ [0][0][2][0][RTW89_FCC][8] = 70,
+ [0][0][2][0][RTW89_ETSI][8] = 58,
+ [0][0][2][0][RTW89_MKK][8] = 76,
+ [0][0][2][0][RTW89_IC][8] = 70,
+ [0][0][2][0][RTW89_KCC][8] = 76,
+ [0][0][2][0][RTW89_ACMA][8] = 58,
+ [0][0][2][0][RTW89_CHILE][8] = 60,
+ [0][0][2][0][RTW89_UKRAINE][8] = 58,
+ [0][0][2][0][RTW89_MEXICO][8] = 70,
+ [0][0][2][0][RTW89_CN][8] = 58,
+ [0][0][2][0][RTW89_QATAR][8] = 58,
+ [0][0][2][0][RTW89_FCC][9] = 66,
+ [0][0][2][0][RTW89_ETSI][9] = 58,
+ [0][0][2][0][RTW89_MKK][9] = 76,
+ [0][0][2][0][RTW89_IC][9] = 66,
+ [0][0][2][0][RTW89_KCC][9] = 76,
+ [0][0][2][0][RTW89_ACMA][9] = 58,
+ [0][0][2][0][RTW89_CHILE][9] = 60,
+ [0][0][2][0][RTW89_UKRAINE][9] = 58,
+ [0][0][2][0][RTW89_MEXICO][9] = 66,
+ [0][0][2][0][RTW89_CN][9] = 58,
+ [0][0][2][0][RTW89_QATAR][9] = 58,
+ [0][0][2][0][RTW89_FCC][10] = 66,
+ [0][0][2][0][RTW89_ETSI][10] = 58,
+ [0][0][2][0][RTW89_MKK][10] = 76,
+ [0][0][2][0][RTW89_IC][10] = 66,
+ [0][0][2][0][RTW89_KCC][10] = 76,
+ [0][0][2][0][RTW89_ACMA][10] = 58,
+ [0][0][2][0][RTW89_CHILE][10] = 60,
+ [0][0][2][0][RTW89_UKRAINE][10] = 58,
+ [0][0][2][0][RTW89_MEXICO][10] = 66,
+ [0][0][2][0][RTW89_CN][10] = 58,
+ [0][0][2][0][RTW89_QATAR][10] = 58,
+ [0][0][2][0][RTW89_FCC][11] = 54,
+ [0][0][2][0][RTW89_ETSI][11] = 58,
+ [0][0][2][0][RTW89_MKK][11] = 76,
+ [0][0][2][0][RTW89_IC][11] = 54,
+ [0][0][2][0][RTW89_KCC][11] = 76,
+ [0][0][2][0][RTW89_ACMA][11] = 58,
+ [0][0][2][0][RTW89_CHILE][11] = 54,
+ [0][0][2][0][RTW89_UKRAINE][11] = 58,
+ [0][0][2][0][RTW89_MEXICO][11] = 54,
+ [0][0][2][0][RTW89_CN][11] = 58,
+ [0][0][2][0][RTW89_QATAR][11] = 58,
+ [0][0][2][0][RTW89_FCC][12] = 50,
+ [0][0][2][0][RTW89_ETSI][12] = 58,
+ [0][0][2][0][RTW89_MKK][12] = 76,
+ [0][0][2][0][RTW89_IC][12] = 50,
+ [0][0][2][0][RTW89_KCC][12] = 74,
+ [0][0][2][0][RTW89_ACMA][12] = 58,
+ [0][0][2][0][RTW89_CHILE][12] = 50,
+ [0][0][2][0][RTW89_UKRAINE][12] = 58,
+ [0][0][2][0][RTW89_MEXICO][12] = 50,
+ [0][0][2][0][RTW89_CN][12] = 58,
+ [0][0][2][0][RTW89_QATAR][12] = 58,
+ [0][0][2][0][RTW89_FCC][13] = 127,
+ [0][0][2][0][RTW89_ETSI][13] = 127,
+ [0][0][2][0][RTW89_MKK][13] = 127,
+ [0][0][2][0][RTW89_IC][13] = 127,
+ [0][0][2][0][RTW89_KCC][13] = 127,
+ [0][0][2][0][RTW89_ACMA][13] = 127,
+ [0][0][2][0][RTW89_CHILE][13] = 127,
+ [0][0][2][0][RTW89_UKRAINE][13] = 127,
+ [0][0][2][0][RTW89_MEXICO][13] = 127,
+ [0][0][2][0][RTW89_CN][13] = 127,
+ [0][0][2][0][RTW89_QATAR][13] = 127,
+ [0][1][2][0][RTW89_FCC][0] = 62,
+ [0][1][2][0][RTW89_ETSI][0] = 46,
+ [0][1][2][0][RTW89_MKK][0] = 64,
+ [0][1][2][0][RTW89_IC][0] = 62,
+ [0][1][2][0][RTW89_KCC][0] = 68,
+ [0][1][2][0][RTW89_ACMA][0] = 46,
+ [0][1][2][0][RTW89_CHILE][0] = 48,
+ [0][1][2][0][RTW89_UKRAINE][0] = 46,
+ [0][1][2][0][RTW89_MEXICO][0] = 62,
+ [0][1][2][0][RTW89_CN][0] = 46,
+ [0][1][2][0][RTW89_QATAR][0] = 46,
+ [0][1][2][0][RTW89_FCC][1] = 62,
+ [0][1][2][0][RTW89_ETSI][1] = 46,
+ [0][1][2][0][RTW89_MKK][1] = 64,
+ [0][1][2][0][RTW89_IC][1] = 62,
+ [0][1][2][0][RTW89_KCC][1] = 70,
+ [0][1][2][0][RTW89_ACMA][1] = 46,
+ [0][1][2][0][RTW89_CHILE][1] = 48,
+ [0][1][2][0][RTW89_UKRAINE][1] = 46,
+ [0][1][2][0][RTW89_MEXICO][1] = 62,
+ [0][1][2][0][RTW89_CN][1] = 46,
+ [0][1][2][0][RTW89_QATAR][1] = 46,
+ [0][1][2][0][RTW89_FCC][2] = 66,
+ [0][1][2][0][RTW89_ETSI][2] = 46,
+ [0][1][2][0][RTW89_MKK][2] = 64,
+ [0][1][2][0][RTW89_IC][2] = 66,
+ [0][1][2][0][RTW89_KCC][2] = 70,
+ [0][1][2][0][RTW89_ACMA][2] = 46,
+ [0][1][2][0][RTW89_CHILE][2] = 48,
+ [0][1][2][0][RTW89_UKRAINE][2] = 46,
+ [0][1][2][0][RTW89_MEXICO][2] = 66,
+ [0][1][2][0][RTW89_CN][2] = 46,
+ [0][1][2][0][RTW89_QATAR][2] = 46,
+ [0][1][2][0][RTW89_FCC][3] = 70,
+ [0][1][2][0][RTW89_ETSI][3] = 46,
+ [0][1][2][0][RTW89_MKK][3] = 64,
+ [0][1][2][0][RTW89_IC][3] = 70,
+ [0][1][2][0][RTW89_KCC][3] = 70,
+ [0][1][2][0][RTW89_ACMA][3] = 46,
+ [0][1][2][0][RTW89_CHILE][3] = 48,
+ [0][1][2][0][RTW89_UKRAINE][3] = 46,
+ [0][1][2][0][RTW89_MEXICO][3] = 70,
+ [0][1][2][0][RTW89_CN][3] = 46,
+ [0][1][2][0][RTW89_QATAR][3] = 46,
+ [0][1][2][0][RTW89_FCC][4] = 76,
+ [0][1][2][0][RTW89_ETSI][4] = 46,
+ [0][1][2][0][RTW89_MKK][4] = 64,
+ [0][1][2][0][RTW89_IC][4] = 76,
+ [0][1][2][0][RTW89_KCC][4] = 70,
+ [0][1][2][0][RTW89_ACMA][4] = 46,
+ [0][1][2][0][RTW89_CHILE][4] = 48,
+ [0][1][2][0][RTW89_UKRAINE][4] = 46,
+ [0][1][2][0][RTW89_MEXICO][4] = 76,
+ [0][1][2][0][RTW89_CN][4] = 46,
+ [0][1][2][0][RTW89_QATAR][4] = 46,
+ [0][1][2][0][RTW89_FCC][5] = 76,
+ [0][1][2][0][RTW89_ETSI][5] = 46,
+ [0][1][2][0][RTW89_MKK][5] = 64,
+ [0][1][2][0][RTW89_IC][5] = 76,
+ [0][1][2][0][RTW89_KCC][5] = 70,
+ [0][1][2][0][RTW89_ACMA][5] = 46,
+ [0][1][2][0][RTW89_CHILE][5] = 48,
+ [0][1][2][0][RTW89_UKRAINE][5] = 46,
+ [0][1][2][0][RTW89_MEXICO][5] = 76,
+ [0][1][2][0][RTW89_CN][5] = 46,
+ [0][1][2][0][RTW89_QATAR][5] = 46,
+ [0][1][2][0][RTW89_FCC][6] = 76,
+ [0][1][2][0][RTW89_ETSI][6] = 46,
+ [0][1][2][0][RTW89_MKK][6] = 64,
+ [0][1][2][0][RTW89_IC][6] = 76,
+ [0][1][2][0][RTW89_KCC][6] = 70,
+ [0][1][2][0][RTW89_ACMA][6] = 46,
+ [0][1][2][0][RTW89_CHILE][6] = 48,
+ [0][1][2][0][RTW89_UKRAINE][6] = 46,
+ [0][1][2][0][RTW89_MEXICO][6] = 76,
+ [0][1][2][0][RTW89_CN][6] = 46,
+ [0][1][2][0][RTW89_QATAR][6] = 46,
+ [0][1][2][0][RTW89_FCC][7] = 68,
+ [0][1][2][0][RTW89_ETSI][7] = 46,
+ [0][1][2][0][RTW89_MKK][7] = 64,
+ [0][1][2][0][RTW89_IC][7] = 68,
+ [0][1][2][0][RTW89_KCC][7] = 70,
+ [0][1][2][0][RTW89_ACMA][7] = 46,
+ [0][1][2][0][RTW89_CHILE][7] = 48,
+ [0][1][2][0][RTW89_UKRAINE][7] = 46,
+ [0][1][2][0][RTW89_MEXICO][7] = 68,
+ [0][1][2][0][RTW89_CN][7] = 46,
+ [0][1][2][0][RTW89_QATAR][7] = 46,
+ [0][1][2][0][RTW89_FCC][8] = 64,
+ [0][1][2][0][RTW89_ETSI][8] = 46,
+ [0][1][2][0][RTW89_MKK][8] = 64,
+ [0][1][2][0][RTW89_IC][8] = 64,
+ [0][1][2][0][RTW89_KCC][8] = 70,
+ [0][1][2][0][RTW89_ACMA][8] = 46,
+ [0][1][2][0][RTW89_CHILE][8] = 48,
+ [0][1][2][0][RTW89_UKRAINE][8] = 46,
+ [0][1][2][0][RTW89_MEXICO][8] = 64,
+ [0][1][2][0][RTW89_CN][8] = 46,
+ [0][1][2][0][RTW89_QATAR][8] = 46,
+ [0][1][2][0][RTW89_FCC][9] = 60,
+ [0][1][2][0][RTW89_ETSI][9] = 46,
+ [0][1][2][0][RTW89_MKK][9] = 64,
+ [0][1][2][0][RTW89_IC][9] = 60,
+ [0][1][2][0][RTW89_KCC][9] = 70,
+ [0][1][2][0][RTW89_ACMA][9] = 46,
+ [0][1][2][0][RTW89_CHILE][9] = 48,
+ [0][1][2][0][RTW89_UKRAINE][9] = 46,
+ [0][1][2][0][RTW89_MEXICO][9] = 60,
+ [0][1][2][0][RTW89_CN][9] = 46,
+ [0][1][2][0][RTW89_QATAR][9] = 46,
+ [0][1][2][0][RTW89_FCC][10] = 60,
+ [0][1][2][0][RTW89_ETSI][10] = 46,
+ [0][1][2][0][RTW89_MKK][10] = 64,
+ [0][1][2][0][RTW89_IC][10] = 60,
+ [0][1][2][0][RTW89_KCC][10] = 70,
+ [0][1][2][0][RTW89_ACMA][10] = 46,
+ [0][1][2][0][RTW89_CHILE][10] = 48,
+ [0][1][2][0][RTW89_UKRAINE][10] = 46,
+ [0][1][2][0][RTW89_MEXICO][10] = 60,
+ [0][1][2][0][RTW89_CN][10] = 46,
+ [0][1][2][0][RTW89_QATAR][10] = 46,
+ [0][1][2][0][RTW89_FCC][11] = 42,
+ [0][1][2][0][RTW89_ETSI][11] = 46,
+ [0][1][2][0][RTW89_MKK][11] = 64,
+ [0][1][2][0][RTW89_IC][11] = 42,
+ [0][1][2][0][RTW89_KCC][11] = 70,
+ [0][1][2][0][RTW89_ACMA][11] = 46,
+ [0][1][2][0][RTW89_CHILE][11] = 42,
+ [0][1][2][0][RTW89_UKRAINE][11] = 46,
+ [0][1][2][0][RTW89_MEXICO][11] = 42,
+ [0][1][2][0][RTW89_CN][11] = 46,
+ [0][1][2][0][RTW89_QATAR][11] = 46,
+ [0][1][2][0][RTW89_FCC][12] = 40,
+ [0][1][2][0][RTW89_ETSI][12] = 46,
+ [0][1][2][0][RTW89_MKK][12] = 64,
+ [0][1][2][0][RTW89_IC][12] = 40,
+ [0][1][2][0][RTW89_KCC][12] = 68,
+ [0][1][2][0][RTW89_ACMA][12] = 46,
+ [0][1][2][0][RTW89_CHILE][12] = 40,
+ [0][1][2][0][RTW89_UKRAINE][12] = 46,
+ [0][1][2][0][RTW89_MEXICO][12] = 40,
+ [0][1][2][0][RTW89_CN][12] = 46,
+ [0][1][2][0][RTW89_QATAR][12] = 46,
+ [0][1][2][0][RTW89_FCC][13] = 127,
+ [0][1][2][0][RTW89_ETSI][13] = 127,
+ [0][1][2][0][RTW89_MKK][13] = 127,
+ [0][1][2][0][RTW89_IC][13] = 127,
+ [0][1][2][0][RTW89_KCC][13] = 127,
+ [0][1][2][0][RTW89_ACMA][13] = 127,
+ [0][1][2][0][RTW89_CHILE][13] = 127,
+ [0][1][2][0][RTW89_UKRAINE][13] = 127,
+ [0][1][2][0][RTW89_MEXICO][13] = 127,
+ [0][1][2][0][RTW89_CN][13] = 127,
+ [0][1][2][0][RTW89_QATAR][13] = 127,
+ [0][1][2][1][RTW89_FCC][0] = 62,
+ [0][1][2][1][RTW89_ETSI][0] = 34,
+ [0][1][2][1][RTW89_MKK][0] = 64,
+ [0][1][2][1][RTW89_IC][0] = 62,
+ [0][1][2][1][RTW89_KCC][0] = 68,
+ [0][1][2][1][RTW89_ACMA][0] = 34,
+ [0][1][2][1][RTW89_CHILE][0] = 36,
+ [0][1][2][1][RTW89_UKRAINE][0] = 34,
+ [0][1][2][1][RTW89_MEXICO][0] = 62,
+ [0][1][2][1][RTW89_CN][0] = 34,
+ [0][1][2][1][RTW89_QATAR][0] = 34,
+ [0][1][2][1][RTW89_FCC][1] = 62,
+ [0][1][2][1][RTW89_ETSI][1] = 34,
+ [0][1][2][1][RTW89_MKK][1] = 64,
+ [0][1][2][1][RTW89_IC][1] = 62,
+ [0][1][2][1][RTW89_KCC][1] = 70,
+ [0][1][2][1][RTW89_ACMA][1] = 34,
+ [0][1][2][1][RTW89_CHILE][1] = 36,
+ [0][1][2][1][RTW89_UKRAINE][1] = 34,
+ [0][1][2][1][RTW89_MEXICO][1] = 62,
+ [0][1][2][1][RTW89_CN][1] = 34,
+ [0][1][2][1][RTW89_QATAR][1] = 34,
+ [0][1][2][1][RTW89_FCC][2] = 66,
+ [0][1][2][1][RTW89_ETSI][2] = 34,
+ [0][1][2][1][RTW89_MKK][2] = 64,
+ [0][1][2][1][RTW89_IC][2] = 66,
+ [0][1][2][1][RTW89_KCC][2] = 70,
+ [0][1][2][1][RTW89_ACMA][2] = 34,
+ [0][1][2][1][RTW89_CHILE][2] = 36,
+ [0][1][2][1][RTW89_UKRAINE][2] = 34,
+ [0][1][2][1][RTW89_MEXICO][2] = 66,
+ [0][1][2][1][RTW89_CN][2] = 34,
+ [0][1][2][1][RTW89_QATAR][2] = 34,
+ [0][1][2][1][RTW89_FCC][3] = 70,
+ [0][1][2][1][RTW89_ETSI][3] = 34,
+ [0][1][2][1][RTW89_MKK][3] = 64,
+ [0][1][2][1][RTW89_IC][3] = 70,
+ [0][1][2][1][RTW89_KCC][3] = 70,
+ [0][1][2][1][RTW89_ACMA][3] = 34,
+ [0][1][2][1][RTW89_CHILE][3] = 36,
+ [0][1][2][1][RTW89_UKRAINE][3] = 34,
+ [0][1][2][1][RTW89_MEXICO][3] = 70,
+ [0][1][2][1][RTW89_CN][3] = 34,
+ [0][1][2][1][RTW89_QATAR][3] = 34,
+ [0][1][2][1][RTW89_FCC][4] = 76,
+ [0][1][2][1][RTW89_ETSI][4] = 34,
+ [0][1][2][1][RTW89_MKK][4] = 64,
+ [0][1][2][1][RTW89_IC][4] = 76,
+ [0][1][2][1][RTW89_KCC][4] = 70,
+ [0][1][2][1][RTW89_ACMA][4] = 34,
+ [0][1][2][1][RTW89_CHILE][4] = 36,
+ [0][1][2][1][RTW89_UKRAINE][4] = 34,
+ [0][1][2][1][RTW89_MEXICO][4] = 76,
+ [0][1][2][1][RTW89_CN][4] = 34,
+ [0][1][2][1][RTW89_QATAR][4] = 34,
+ [0][1][2][1][RTW89_FCC][5] = 76,
+ [0][1][2][1][RTW89_ETSI][5] = 34,
+ [0][1][2][1][RTW89_MKK][5] = 64,
+ [0][1][2][1][RTW89_IC][5] = 76,
+ [0][1][2][1][RTW89_KCC][5] = 70,
+ [0][1][2][1][RTW89_ACMA][5] = 34,
+ [0][1][2][1][RTW89_CHILE][5] = 36,
+ [0][1][2][1][RTW89_UKRAINE][5] = 34,
+ [0][1][2][1][RTW89_MEXICO][5] = 76,
+ [0][1][2][1][RTW89_CN][5] = 34,
+ [0][1][2][1][RTW89_QATAR][5] = 34,
+ [0][1][2][1][RTW89_FCC][6] = 76,
+ [0][1][2][1][RTW89_ETSI][6] = 34,
+ [0][1][2][1][RTW89_MKK][6] = 64,
+ [0][1][2][1][RTW89_IC][6] = 76,
+ [0][1][2][1][RTW89_KCC][6] = 70,
+ [0][1][2][1][RTW89_ACMA][6] = 34,
+ [0][1][2][1][RTW89_CHILE][6] = 36,
+ [0][1][2][1][RTW89_UKRAINE][6] = 34,
+ [0][1][2][1][RTW89_MEXICO][6] = 76,
+ [0][1][2][1][RTW89_CN][6] = 34,
+ [0][1][2][1][RTW89_QATAR][6] = 34,
+ [0][1][2][1][RTW89_FCC][7] = 68,
+ [0][1][2][1][RTW89_ETSI][7] = 34,
+ [0][1][2][1][RTW89_MKK][7] = 64,
+ [0][1][2][1][RTW89_IC][7] = 68,
+ [0][1][2][1][RTW89_KCC][7] = 70,
+ [0][1][2][1][RTW89_ACMA][7] = 34,
+ [0][1][2][1][RTW89_CHILE][7] = 36,
+ [0][1][2][1][RTW89_UKRAINE][7] = 34,
+ [0][1][2][1][RTW89_MEXICO][7] = 68,
+ [0][1][2][1][RTW89_CN][7] = 34,
+ [0][1][2][1][RTW89_QATAR][7] = 34,
+ [0][1][2][1][RTW89_FCC][8] = 64,
+ [0][1][2][1][RTW89_ETSI][8] = 34,
+ [0][1][2][1][RTW89_MKK][8] = 64,
+ [0][1][2][1][RTW89_IC][8] = 64,
+ [0][1][2][1][RTW89_KCC][8] = 70,
+ [0][1][2][1][RTW89_ACMA][8] = 34,
+ [0][1][2][1][RTW89_CHILE][8] = 36,
+ [0][1][2][1][RTW89_UKRAINE][8] = 34,
+ [0][1][2][1][RTW89_MEXICO][8] = 64,
+ [0][1][2][1][RTW89_CN][8] = 34,
+ [0][1][2][1][RTW89_QATAR][8] = 34,
+ [0][1][2][1][RTW89_FCC][9] = 60,
+ [0][1][2][1][RTW89_ETSI][9] = 34,
+ [0][1][2][1][RTW89_MKK][9] = 64,
+ [0][1][2][1][RTW89_IC][9] = 60,
+ [0][1][2][1][RTW89_KCC][9] = 70,
+ [0][1][2][1][RTW89_ACMA][9] = 34,
+ [0][1][2][1][RTW89_CHILE][9] = 36,
+ [0][1][2][1][RTW89_UKRAINE][9] = 34,
+ [0][1][2][1][RTW89_MEXICO][9] = 60,
+ [0][1][2][1][RTW89_CN][9] = 34,
+ [0][1][2][1][RTW89_QATAR][9] = 34,
+ [0][1][2][1][RTW89_FCC][10] = 60,
+ [0][1][2][1][RTW89_ETSI][10] = 34,
+ [0][1][2][1][RTW89_MKK][10] = 64,
+ [0][1][2][1][RTW89_IC][10] = 60,
+ [0][1][2][1][RTW89_KCC][10] = 70,
+ [0][1][2][1][RTW89_ACMA][10] = 34,
+ [0][1][2][1][RTW89_CHILE][10] = 36,
+ [0][1][2][1][RTW89_UKRAINE][10] = 34,
+ [0][1][2][1][RTW89_MEXICO][10] = 60,
+ [0][1][2][1][RTW89_CN][10] = 34,
+ [0][1][2][1][RTW89_QATAR][10] = 34,
+ [0][1][2][1][RTW89_FCC][11] = 42,
+ [0][1][2][1][RTW89_ETSI][11] = 34,
+ [0][1][2][1][RTW89_MKK][11] = 64,
+ [0][1][2][1][RTW89_IC][11] = 42,
+ [0][1][2][1][RTW89_KCC][11] = 70,
+ [0][1][2][1][RTW89_ACMA][11] = 34,
+ [0][1][2][1][RTW89_CHILE][11] = 36,
+ [0][1][2][1][RTW89_UKRAINE][11] = 34,
+ [0][1][2][1][RTW89_MEXICO][11] = 42,
+ [0][1][2][1][RTW89_CN][11] = 34,
+ [0][1][2][1][RTW89_QATAR][11] = 34,
+ [0][1][2][1][RTW89_FCC][12] = 40,
+ [0][1][2][1][RTW89_ETSI][12] = 34,
+ [0][1][2][1][RTW89_MKK][12] = 64,
+ [0][1][2][1][RTW89_IC][12] = 40,
+ [0][1][2][1][RTW89_KCC][12] = 68,
+ [0][1][2][1][RTW89_ACMA][12] = 34,
+ [0][1][2][1][RTW89_CHILE][12] = 36,
+ [0][1][2][1][RTW89_UKRAINE][12] = 34,
+ [0][1][2][1][RTW89_MEXICO][12] = 40,
+ [0][1][2][1][RTW89_CN][12] = 34,
+ [0][1][2][1][RTW89_QATAR][12] = 34,
+ [0][1][2][1][RTW89_FCC][13] = 127,
+ [0][1][2][1][RTW89_ETSI][13] = 127,
+ [0][1][2][1][RTW89_MKK][13] = 127,
+ [0][1][2][1][RTW89_IC][13] = 127,
+ [0][1][2][1][RTW89_KCC][13] = 127,
+ [0][1][2][1][RTW89_ACMA][13] = 127,
+ [0][1][2][1][RTW89_CHILE][13] = 127,
+ [0][1][2][1][RTW89_UKRAINE][13] = 127,
+ [0][1][2][1][RTW89_MEXICO][13] = 127,
+ [0][1][2][1][RTW89_CN][13] = 127,
+ [0][1][2][1][RTW89_QATAR][13] = 127,
+ [1][0][2][0][RTW89_FCC][0] = 127,
+ [1][0][2][0][RTW89_ETSI][0] = 127,
+ [1][0][2][0][RTW89_MKK][0] = 127,
+ [1][0][2][0][RTW89_IC][0] = 127,
+ [1][0][2][0][RTW89_KCC][0] = 127,
+ [1][0][2][0][RTW89_ACMA][0] = 127,
+ [1][0][2][0][RTW89_CHILE][0] = 127,
+ [1][0][2][0][RTW89_UKRAINE][0] = 127,
+ [1][0][2][0][RTW89_MEXICO][0] = 127,
+ [1][0][2][0][RTW89_CN][0] = 127,
+ [1][0][2][0][RTW89_QATAR][0] = 127,
+ [1][0][2][0][RTW89_FCC][1] = 127,
+ [1][0][2][0][RTW89_ETSI][1] = 127,
+ [1][0][2][0][RTW89_MKK][1] = 127,
+ [1][0][2][0][RTW89_IC][1] = 127,
+ [1][0][2][0][RTW89_KCC][1] = 127,
+ [1][0][2][0][RTW89_ACMA][1] = 127,
+ [1][0][2][0][RTW89_CHILE][1] = 127,
+ [1][0][2][0][RTW89_UKRAINE][1] = 127,
+ [1][0][2][0][RTW89_MEXICO][1] = 127,
+ [1][0][2][0][RTW89_CN][1] = 127,
+ [1][0][2][0][RTW89_QATAR][1] = 127,
+ [1][0][2][0][RTW89_FCC][2] = 56,
+ [1][0][2][0][RTW89_ETSI][2] = 58,
+ [1][0][2][0][RTW89_MKK][2] = 68,
+ [1][0][2][0][RTW89_IC][2] = 56,
+ [1][0][2][0][RTW89_KCC][2] = 68,
+ [1][0][2][0][RTW89_ACMA][2] = 58,
+ [1][0][2][0][RTW89_CHILE][2] = 56,
+ [1][0][2][0][RTW89_UKRAINE][2] = 58,
+ [1][0][2][0][RTW89_MEXICO][2] = 56,
+ [1][0][2][0][RTW89_CN][2] = 58,
+ [1][0][2][0][RTW89_QATAR][2] = 58,
+ [1][0][2][0][RTW89_FCC][3] = 56,
+ [1][0][2][0][RTW89_ETSI][3] = 58,
+ [1][0][2][0][RTW89_MKK][3] = 68,
+ [1][0][2][0][RTW89_IC][3] = 56,
+ [1][0][2][0][RTW89_KCC][3] = 68,
+ [1][0][2][0][RTW89_ACMA][3] = 58,
+ [1][0][2][0][RTW89_CHILE][3] = 56,
+ [1][0][2][0][RTW89_UKRAINE][3] = 58,
+ [1][0][2][0][RTW89_MEXICO][3] = 56,
+ [1][0][2][0][RTW89_CN][3] = 58,
+ [1][0][2][0][RTW89_QATAR][3] = 58,
+ [1][0][2][0][RTW89_FCC][4] = 60,
+ [1][0][2][0][RTW89_ETSI][4] = 58,
+ [1][0][2][0][RTW89_MKK][4] = 68,
+ [1][0][2][0][RTW89_IC][4] = 60,
+ [1][0][2][0][RTW89_KCC][4] = 68,
+ [1][0][2][0][RTW89_ACMA][4] = 58,
+ [1][0][2][0][RTW89_CHILE][4] = 60,
+ [1][0][2][0][RTW89_UKRAINE][4] = 58,
+ [1][0][2][0][RTW89_MEXICO][4] = 60,
+ [1][0][2][0][RTW89_CN][4] = 58,
+ [1][0][2][0][RTW89_QATAR][4] = 58,
+ [1][0][2][0][RTW89_FCC][5] = 64,
+ [1][0][2][0][RTW89_ETSI][5] = 58,
+ [1][0][2][0][RTW89_MKK][5] = 68,
+ [1][0][2][0][RTW89_IC][5] = 64,
+ [1][0][2][0][RTW89_KCC][5] = 68,
+ [1][0][2][0][RTW89_ACMA][5] = 58,
+ [1][0][2][0][RTW89_CHILE][5] = 60,
+ [1][0][2][0][RTW89_UKRAINE][5] = 58,
+ [1][0][2][0][RTW89_MEXICO][5] = 64,
+ [1][0][2][0][RTW89_CN][5] = 58,
+ [1][0][2][0][RTW89_QATAR][5] = 58,
+ [1][0][2][0][RTW89_FCC][6] = 54,
+ [1][0][2][0][RTW89_ETSI][6] = 58,
+ [1][0][2][0][RTW89_MKK][6] = 68,
+ [1][0][2][0][RTW89_IC][6] = 54,
+ [1][0][2][0][RTW89_KCC][6] = 68,
+ [1][0][2][0][RTW89_ACMA][6] = 58,
+ [1][0][2][0][RTW89_CHILE][6] = 54,
+ [1][0][2][0][RTW89_UKRAINE][6] = 58,
+ [1][0][2][0][RTW89_MEXICO][6] = 54,
+ [1][0][2][0][RTW89_CN][6] = 58,
+ [1][0][2][0][RTW89_QATAR][6] = 58,
+ [1][0][2][0][RTW89_FCC][7] = 50,
+ [1][0][2][0][RTW89_ETSI][7] = 58,
+ [1][0][2][0][RTW89_MKK][7] = 68,
+ [1][0][2][0][RTW89_IC][7] = 50,
+ [1][0][2][0][RTW89_KCC][7] = 68,
+ [1][0][2][0][RTW89_ACMA][7] = 58,
+ [1][0][2][0][RTW89_CHILE][7] = 50,
+ [1][0][2][0][RTW89_UKRAINE][7] = 58,
+ [1][0][2][0][RTW89_MEXICO][7] = 50,
+ [1][0][2][0][RTW89_CN][7] = 58,
+ [1][0][2][0][RTW89_QATAR][7] = 58,
+ [1][0][2][0][RTW89_FCC][8] = 50,
+ [1][0][2][0][RTW89_ETSI][8] = 58,
+ [1][0][2][0][RTW89_MKK][8] = 68,
+ [1][0][2][0][RTW89_IC][8] = 50,
+ [1][0][2][0][RTW89_KCC][8] = 68,
+ [1][0][2][0][RTW89_ACMA][8] = 58,
+ [1][0][2][0][RTW89_CHILE][8] = 50,
+ [1][0][2][0][RTW89_UKRAINE][8] = 58,
+ [1][0][2][0][RTW89_MEXICO][8] = 50,
+ [1][0][2][0][RTW89_CN][8] = 58,
+ [1][0][2][0][RTW89_QATAR][8] = 58,
+ [1][0][2][0][RTW89_FCC][9] = 42,
+ [1][0][2][0][RTW89_ETSI][9] = 58,
+ [1][0][2][0][RTW89_MKK][9] = 68,
+ [1][0][2][0][RTW89_IC][9] = 42,
+ [1][0][2][0][RTW89_KCC][9] = 68,
+ [1][0][2][0][RTW89_ACMA][9] = 58,
+ [1][0][2][0][RTW89_CHILE][9] = 42,
+ [1][0][2][0][RTW89_UKRAINE][9] = 58,
+ [1][0][2][0][RTW89_MEXICO][9] = 42,
+ [1][0][2][0][RTW89_CN][9] = 58,
+ [1][0][2][0][RTW89_QATAR][9] = 58,
+ [1][0][2][0][RTW89_FCC][10] = 40,
+ [1][0][2][0][RTW89_ETSI][10] = 58,
+ [1][0][2][0][RTW89_MKK][10] = 68,
+ [1][0][2][0][RTW89_IC][10] = 40,
+ [1][0][2][0][RTW89_KCC][10] = 68,
+ [1][0][2][0][RTW89_ACMA][10] = 58,
+ [1][0][2][0][RTW89_CHILE][10] = 40,
+ [1][0][2][0][RTW89_UKRAINE][10] = 58,
+ [1][0][2][0][RTW89_MEXICO][10] = 40,
+ [1][0][2][0][RTW89_CN][10] = 58,
+ [1][0][2][0][RTW89_QATAR][10] = 58,
+ [1][0][2][0][RTW89_FCC][11] = 127,
+ [1][0][2][0][RTW89_ETSI][11] = 127,
+ [1][0][2][0][RTW89_MKK][11] = 127,
+ [1][0][2][0][RTW89_IC][11] = 127,
+ [1][0][2][0][RTW89_KCC][11] = 127,
+ [1][0][2][0][RTW89_ACMA][11] = 127,
+ [1][0][2][0][RTW89_CHILE][11] = 127,
+ [1][0][2][0][RTW89_UKRAINE][11] = 127,
+ [1][0][2][0][RTW89_MEXICO][11] = 127,
+ [1][0][2][0][RTW89_CN][11] = 127,
+ [1][0][2][0][RTW89_QATAR][11] = 127,
+ [1][0][2][0][RTW89_FCC][12] = 127,
+ [1][0][2][0][RTW89_ETSI][12] = 127,
+ [1][0][2][0][RTW89_MKK][12] = 127,
+ [1][0][2][0][RTW89_IC][12] = 127,
+ [1][0][2][0][RTW89_KCC][12] = 127,
+ [1][0][2][0][RTW89_ACMA][12] = 127,
+ [1][0][2][0][RTW89_CHILE][12] = 127,
+ [1][0][2][0][RTW89_UKRAINE][12] = 127,
+ [1][0][2][0][RTW89_MEXICO][12] = 127,
+ [1][0][2][0][RTW89_CN][12] = 127,
+ [1][0][2][0][RTW89_QATAR][12] = 127,
+ [1][0][2][0][RTW89_FCC][13] = 127,
+ [1][0][2][0][RTW89_ETSI][13] = 127,
+ [1][0][2][0][RTW89_MKK][13] = 127,
+ [1][0][2][0][RTW89_IC][13] = 127,
+ [1][0][2][0][RTW89_KCC][13] = 127,
+ [1][0][2][0][RTW89_ACMA][13] = 127,
+ [1][0][2][0][RTW89_CHILE][13] = 127,
+ [1][0][2][0][RTW89_UKRAINE][13] = 127,
+ [1][0][2][0][RTW89_MEXICO][13] = 127,
+ [1][0][2][0][RTW89_CN][13] = 127,
+ [1][0][2][0][RTW89_QATAR][13] = 127,
+ [1][1][2][0][RTW89_FCC][0] = 127,
+ [1][1][2][0][RTW89_ETSI][0] = 127,
+ [1][1][2][0][RTW89_MKK][0] = 127,
+ [1][1][2][0][RTW89_IC][0] = 127,
+ [1][1][2][0][RTW89_KCC][0] = 127,
+ [1][1][2][0][RTW89_ACMA][0] = 127,
+ [1][1][2][0][RTW89_CHILE][0] = 127,
+ [1][1][2][0][RTW89_UKRAINE][0] = 127,
+ [1][1][2][0][RTW89_MEXICO][0] = 127,
+ [1][1][2][0][RTW89_CN][0] = 127,
+ [1][1][2][0][RTW89_QATAR][0] = 127,
+ [1][1][2][0][RTW89_FCC][1] = 127,
+ [1][1][2][0][RTW89_ETSI][1] = 127,
+ [1][1][2][0][RTW89_MKK][1] = 127,
+ [1][1][2][0][RTW89_IC][1] = 127,
+ [1][1][2][0][RTW89_KCC][1] = 127,
+ [1][1][2][0][RTW89_ACMA][1] = 127,
+ [1][1][2][0][RTW89_CHILE][1] = 127,
+ [1][1][2][0][RTW89_UKRAINE][1] = 127,
+ [1][1][2][0][RTW89_MEXICO][1] = 127,
+ [1][1][2][0][RTW89_CN][1] = 127,
+ [1][1][2][0][RTW89_QATAR][1] = 127,
+ [1][1][2][0][RTW89_FCC][2] = 52,
+ [1][1][2][0][RTW89_ETSI][2] = 46,
+ [1][1][2][0][RTW89_MKK][2] = 64,
+ [1][1][2][0][RTW89_IC][2] = 52,
+ [1][1][2][0][RTW89_KCC][2] = 66,
+ [1][1][2][0][RTW89_ACMA][2] = 46,
+ [1][1][2][0][RTW89_CHILE][2] = 48,
+ [1][1][2][0][RTW89_UKRAINE][2] = 46,
+ [1][1][2][0][RTW89_MEXICO][2] = 52,
+ [1][1][2][0][RTW89_CN][2] = 46,
+ [1][1][2][0][RTW89_QATAR][2] = 46,
+ [1][1][2][0][RTW89_FCC][3] = 52,
+ [1][1][2][0][RTW89_ETSI][3] = 46,
+ [1][1][2][0][RTW89_MKK][3] = 64,
+ [1][1][2][0][RTW89_IC][3] = 52,
+ [1][1][2][0][RTW89_KCC][3] = 68,
+ [1][1][2][0][RTW89_ACMA][3] = 46,
+ [1][1][2][0][RTW89_CHILE][3] = 48,
+ [1][1][2][0][RTW89_UKRAINE][3] = 46,
+ [1][1][2][0][RTW89_MEXICO][3] = 52,
+ [1][1][2][0][RTW89_CN][3] = 46,
+ [1][1][2][0][RTW89_QATAR][3] = 46,
+ [1][1][2][0][RTW89_FCC][4] = 56,
+ [1][1][2][0][RTW89_ETSI][4] = 46,
+ [1][1][2][0][RTW89_MKK][4] = 64,
+ [1][1][2][0][RTW89_IC][4] = 56,
+ [1][1][2][0][RTW89_KCC][4] = 68,
+ [1][1][2][0][RTW89_ACMA][4] = 46,
+ [1][1][2][0][RTW89_CHILE][4] = 48,
+ [1][1][2][0][RTW89_UKRAINE][4] = 46,
+ [1][1][2][0][RTW89_MEXICO][4] = 56,
+ [1][1][2][0][RTW89_CN][4] = 46,
+ [1][1][2][0][RTW89_QATAR][4] = 46,
+ [1][1][2][0][RTW89_FCC][5] = 60,
+ [1][1][2][0][RTW89_ETSI][5] = 46,
+ [1][1][2][0][RTW89_MKK][5] = 64,
+ [1][1][2][0][RTW89_IC][5] = 60,
+ [1][1][2][0][RTW89_KCC][5] = 68,
+ [1][1][2][0][RTW89_ACMA][5] = 46,
+ [1][1][2][0][RTW89_CHILE][5] = 48,
+ [1][1][2][0][RTW89_UKRAINE][5] = 46,
+ [1][1][2][0][RTW89_MEXICO][5] = 60,
+ [1][1][2][0][RTW89_CN][5] = 46,
+ [1][1][2][0][RTW89_QATAR][5] = 46,
+ [1][1][2][0][RTW89_FCC][6] = 54,
+ [1][1][2][0][RTW89_ETSI][6] = 46,
+ [1][1][2][0][RTW89_MKK][6] = 64,
+ [1][1][2][0][RTW89_IC][6] = 52,
+ [1][1][2][0][RTW89_KCC][6] = 68,
+ [1][1][2][0][RTW89_ACMA][6] = 46,
+ [1][1][2][0][RTW89_CHILE][6] = 48,
+ [1][1][2][0][RTW89_UKRAINE][6] = 46,
+ [1][1][2][0][RTW89_MEXICO][6] = 54,
+ [1][1][2][0][RTW89_CN][6] = 46,
+ [1][1][2][0][RTW89_QATAR][6] = 46,
+ [1][1][2][0][RTW89_FCC][7] = 50,
+ [1][1][2][0][RTW89_ETSI][7] = 46,
+ [1][1][2][0][RTW89_MKK][7] = 64,
+ [1][1][2][0][RTW89_IC][7] = 48,
+ [1][1][2][0][RTW89_KCC][7] = 68,
+ [1][1][2][0][RTW89_ACMA][7] = 46,
+ [1][1][2][0][RTW89_CHILE][7] = 48,
+ [1][1][2][0][RTW89_UKRAINE][7] = 46,
+ [1][1][2][0][RTW89_MEXICO][7] = 50,
+ [1][1][2][0][RTW89_CN][7] = 46,
+ [1][1][2][0][RTW89_QATAR][7] = 46,
+ [1][1][2][0][RTW89_FCC][8] = 50,
+ [1][1][2][0][RTW89_ETSI][8] = 46,
+ [1][1][2][0][RTW89_MKK][8] = 64,
+ [1][1][2][0][RTW89_IC][8] = 48,
+ [1][1][2][0][RTW89_KCC][8] = 68,
+ [1][1][2][0][RTW89_ACMA][8] = 46,
+ [1][1][2][0][RTW89_CHILE][8] = 48,
+ [1][1][2][0][RTW89_UKRAINE][8] = 46,
+ [1][1][2][0][RTW89_MEXICO][8] = 50,
+ [1][1][2][0][RTW89_CN][8] = 46,
+ [1][1][2][0][RTW89_QATAR][8] = 46,
+ [1][1][2][0][RTW89_FCC][9] = 38,
+ [1][1][2][0][RTW89_ETSI][9] = 46,
+ [1][1][2][0][RTW89_MKK][9] = 64,
+ [1][1][2][0][RTW89_IC][9] = 38,
+ [1][1][2][0][RTW89_KCC][9] = 68,
+ [1][1][2][0][RTW89_ACMA][9] = 46,
+ [1][1][2][0][RTW89_CHILE][9] = 38,
+ [1][1][2][0][RTW89_UKRAINE][9] = 46,
+ [1][1][2][0][RTW89_MEXICO][9] = 38,
+ [1][1][2][0][RTW89_CN][9] = 46,
+ [1][1][2][0][RTW89_QATAR][9] = 46,
+ [1][1][2][0][RTW89_FCC][10] = 36,
+ [1][1][2][0][RTW89_ETSI][10] = 46,
+ [1][1][2][0][RTW89_MKK][10] = 64,
+ [1][1][2][0][RTW89_IC][10] = 36,
+ [1][1][2][0][RTW89_KCC][10] = 66,
+ [1][1][2][0][RTW89_ACMA][10] = 46,
+ [1][1][2][0][RTW89_CHILE][10] = 36,
+ [1][1][2][0][RTW89_UKRAINE][10] = 46,
+ [1][1][2][0][RTW89_MEXICO][10] = 36,
+ [1][1][2][0][RTW89_CN][10] = 46,
+ [1][1][2][0][RTW89_QATAR][10] = 46,
+ [1][1][2][0][RTW89_FCC][11] = 127,
+ [1][1][2][0][RTW89_ETSI][11] = 127,
+ [1][1][2][0][RTW89_MKK][11] = 127,
+ [1][1][2][0][RTW89_IC][11] = 127,
+ [1][1][2][0][RTW89_KCC][11] = 127,
+ [1][1][2][0][RTW89_ACMA][11] = 127,
+ [1][1][2][0][RTW89_CHILE][11] = 127,
+ [1][1][2][0][RTW89_UKRAINE][11] = 127,
+ [1][1][2][0][RTW89_MEXICO][11] = 127,
+ [1][1][2][0][RTW89_CN][11] = 127,
+ [1][1][2][0][RTW89_QATAR][11] = 127,
+ [1][1][2][0][RTW89_FCC][12] = 127,
+ [1][1][2][0][RTW89_ETSI][12] = 127,
+ [1][1][2][0][RTW89_MKK][12] = 127,
+ [1][1][2][0][RTW89_IC][12] = 127,
+ [1][1][2][0][RTW89_KCC][12] = 127,
+ [1][1][2][0][RTW89_ACMA][12] = 127,
+ [1][1][2][0][RTW89_CHILE][12] = 127,
+ [1][1][2][0][RTW89_UKRAINE][12] = 127,
+ [1][1][2][0][RTW89_MEXICO][12] = 127,
+ [1][1][2][0][RTW89_CN][12] = 127,
+ [1][1][2][0][RTW89_QATAR][12] = 127,
+ [1][1][2][0][RTW89_FCC][13] = 127,
+ [1][1][2][0][RTW89_ETSI][13] = 127,
+ [1][1][2][0][RTW89_MKK][13] = 127,
+ [1][1][2][0][RTW89_IC][13] = 127,
+ [1][1][2][0][RTW89_KCC][13] = 127,
+ [1][1][2][0][RTW89_ACMA][13] = 127,
+ [1][1][2][0][RTW89_CHILE][13] = 127,
+ [1][1][2][0][RTW89_UKRAINE][13] = 127,
+ [1][1][2][0][RTW89_MEXICO][13] = 127,
+ [1][1][2][0][RTW89_CN][13] = 127,
+ [1][1][2][0][RTW89_QATAR][13] = 127,
+ [1][1][2][1][RTW89_FCC][0] = 127,
+ [1][1][2][1][RTW89_ETSI][0] = 127,
+ [1][1][2][1][RTW89_MKK][0] = 127,
+ [1][1][2][1][RTW89_IC][0] = 127,
+ [1][1][2][1][RTW89_KCC][0] = 127,
+ [1][1][2][1][RTW89_ACMA][0] = 127,
+ [1][1][2][1][RTW89_CHILE][0] = 127,
+ [1][1][2][1][RTW89_UKRAINE][0] = 127,
+ [1][1][2][1][RTW89_MEXICO][0] = 127,
+ [1][1][2][1][RTW89_CN][0] = 127,
+ [1][1][2][1][RTW89_QATAR][0] = 127,
+ [1][1][2][1][RTW89_FCC][1] = 127,
+ [1][1][2][1][RTW89_ETSI][1] = 127,
+ [1][1][2][1][RTW89_MKK][1] = 127,
+ [1][1][2][1][RTW89_IC][1] = 127,
+ [1][1][2][1][RTW89_KCC][1] = 127,
+ [1][1][2][1][RTW89_ACMA][1] = 127,
+ [1][1][2][1][RTW89_CHILE][1] = 127,
+ [1][1][2][1][RTW89_UKRAINE][1] = 127,
+ [1][1][2][1][RTW89_MEXICO][1] = 127,
+ [1][1][2][1][RTW89_CN][1] = 127,
+ [1][1][2][1][RTW89_QATAR][1] = 127,
+ [1][1][2][1][RTW89_FCC][2] = 52,
+ [1][1][2][1][RTW89_ETSI][2] = 34,
+ [1][1][2][1][RTW89_MKK][2] = 64,
+ [1][1][2][1][RTW89_IC][2] = 52,
+ [1][1][2][1][RTW89_KCC][2] = 66,
+ [1][1][2][1][RTW89_ACMA][2] = 34,
+ [1][1][2][1][RTW89_CHILE][2] = 36,
+ [1][1][2][1][RTW89_UKRAINE][2] = 34,
+ [1][1][2][1][RTW89_MEXICO][2] = 52,
+ [1][1][2][1][RTW89_CN][2] = 34,
+ [1][1][2][1][RTW89_QATAR][2] = 34,
+ [1][1][2][1][RTW89_FCC][3] = 52,
+ [1][1][2][1][RTW89_ETSI][3] = 34,
+ [1][1][2][1][RTW89_MKK][3] = 64,
+ [1][1][2][1][RTW89_IC][3] = 52,
+ [1][1][2][1][RTW89_KCC][3] = 68,
+ [1][1][2][1][RTW89_ACMA][3] = 34,
+ [1][1][2][1][RTW89_CHILE][3] = 36,
+ [1][1][2][1][RTW89_UKRAINE][3] = 34,
+ [1][1][2][1][RTW89_MEXICO][3] = 52,
+ [1][1][2][1][RTW89_CN][3] = 34,
+ [1][1][2][1][RTW89_QATAR][3] = 34,
+ [1][1][2][1][RTW89_FCC][4] = 56,
+ [1][1][2][1][RTW89_ETSI][4] = 34,
+ [1][1][2][1][RTW89_MKK][4] = 64,
+ [1][1][2][1][RTW89_IC][4] = 56,
+ [1][1][2][1][RTW89_KCC][4] = 68,
+ [1][1][2][1][RTW89_ACMA][4] = 34,
+ [1][1][2][1][RTW89_CHILE][4] = 36,
+ [1][1][2][1][RTW89_UKRAINE][4] = 34,
+ [1][1][2][1][RTW89_MEXICO][4] = 56,
+ [1][1][2][1][RTW89_CN][4] = 34,
+ [1][1][2][1][RTW89_QATAR][4] = 34,
+ [1][1][2][1][RTW89_FCC][5] = 60,
+ [1][1][2][1][RTW89_ETSI][5] = 34,
+ [1][1][2][1][RTW89_MKK][5] = 64,
+ [1][1][2][1][RTW89_IC][5] = 60,
+ [1][1][2][1][RTW89_KCC][5] = 68,
+ [1][1][2][1][RTW89_ACMA][5] = 34,
+ [1][1][2][1][RTW89_CHILE][5] = 36,
+ [1][1][2][1][RTW89_UKRAINE][5] = 34,
+ [1][1][2][1][RTW89_MEXICO][5] = 60,
+ [1][1][2][1][RTW89_CN][5] = 34,
+ [1][1][2][1][RTW89_QATAR][5] = 34,
+ [1][1][2][1][RTW89_FCC][6] = 54,
+ [1][1][2][1][RTW89_ETSI][6] = 34,
+ [1][1][2][1][RTW89_MKK][6] = 64,
+ [1][1][2][1][RTW89_IC][6] = 52,
+ [1][1][2][1][RTW89_KCC][6] = 68,
+ [1][1][2][1][RTW89_ACMA][6] = 34,
+ [1][1][2][1][RTW89_CHILE][6] = 36,
+ [1][1][2][1][RTW89_UKRAINE][6] = 34,
+ [1][1][2][1][RTW89_MEXICO][6] = 54,
+ [1][1][2][1][RTW89_CN][6] = 34,
+ [1][1][2][1][RTW89_QATAR][6] = 34,
+ [1][1][2][1][RTW89_FCC][7] = 50,
+ [1][1][2][1][RTW89_ETSI][7] = 34,
+ [1][1][2][1][RTW89_MKK][7] = 64,
+ [1][1][2][1][RTW89_IC][7] = 48,
+ [1][1][2][1][RTW89_KCC][7] = 68,
+ [1][1][2][1][RTW89_ACMA][7] = 34,
+ [1][1][2][1][RTW89_CHILE][7] = 36,
+ [1][1][2][1][RTW89_UKRAINE][7] = 34,
+ [1][1][2][1][RTW89_MEXICO][7] = 50,
+ [1][1][2][1][RTW89_CN][7] = 34,
+ [1][1][2][1][RTW89_QATAR][7] = 34,
+ [1][1][2][1][RTW89_FCC][8] = 50,
+ [1][1][2][1][RTW89_ETSI][8] = 34,
+ [1][1][2][1][RTW89_MKK][8] = 64,
+ [1][1][2][1][RTW89_IC][8] = 48,
+ [1][1][2][1][RTW89_KCC][8] = 68,
+ [1][1][2][1][RTW89_ACMA][8] = 34,
+ [1][1][2][1][RTW89_CHILE][8] = 36,
+ [1][1][2][1][RTW89_UKRAINE][8] = 34,
+ [1][1][2][1][RTW89_MEXICO][8] = 50,
+ [1][1][2][1][RTW89_CN][8] = 34,
+ [1][1][2][1][RTW89_QATAR][8] = 34,
+ [1][1][2][1][RTW89_FCC][9] = 38,
+ [1][1][2][1][RTW89_ETSI][9] = 34,
+ [1][1][2][1][RTW89_MKK][9] = 64,
+ [1][1][2][1][RTW89_IC][9] = 38,
+ [1][1][2][1][RTW89_KCC][9] = 68,
+ [1][1][2][1][RTW89_ACMA][9] = 34,
+ [1][1][2][1][RTW89_CHILE][9] = 36,
+ [1][1][2][1][RTW89_UKRAINE][9] = 34,
+ [1][1][2][1][RTW89_MEXICO][9] = 38,
+ [1][1][2][1][RTW89_CN][9] = 34,
+ [1][1][2][1][RTW89_QATAR][9] = 34,
+ [1][1][2][1][RTW89_FCC][10] = 36,
+ [1][1][2][1][RTW89_ETSI][10] = 34,
+ [1][1][2][1][RTW89_MKK][10] = 64,
+ [1][1][2][1][RTW89_IC][10] = 36,
+ [1][1][2][1][RTW89_KCC][10] = 66,
+ [1][1][2][1][RTW89_ACMA][10] = 34,
+ [1][1][2][1][RTW89_CHILE][10] = 36,
+ [1][1][2][1][RTW89_UKRAINE][10] = 34,
+ [1][1][2][1][RTW89_MEXICO][10] = 36,
+ [1][1][2][1][RTW89_CN][10] = 34,
+ [1][1][2][1][RTW89_QATAR][10] = 34,
+ [1][1][2][1][RTW89_FCC][11] = 127,
+ [1][1][2][1][RTW89_ETSI][11] = 127,
+ [1][1][2][1][RTW89_MKK][11] = 127,
+ [1][1][2][1][RTW89_IC][11] = 127,
+ [1][1][2][1][RTW89_KCC][11] = 127,
+ [1][1][2][1][RTW89_ACMA][11] = 127,
+ [1][1][2][1][RTW89_CHILE][11] = 127,
+ [1][1][2][1][RTW89_UKRAINE][11] = 127,
+ [1][1][2][1][RTW89_MEXICO][11] = 127,
+ [1][1][2][1][RTW89_CN][11] = 127,
+ [1][1][2][1][RTW89_QATAR][11] = 127,
+ [1][1][2][1][RTW89_FCC][12] = 127,
+ [1][1][2][1][RTW89_ETSI][12] = 127,
+ [1][1][2][1][RTW89_MKK][12] = 127,
+ [1][1][2][1][RTW89_IC][12] = 127,
+ [1][1][2][1][RTW89_KCC][12] = 127,
+ [1][1][2][1][RTW89_ACMA][12] = 127,
+ [1][1][2][1][RTW89_CHILE][12] = 127,
+ [1][1][2][1][RTW89_UKRAINE][12] = 127,
+ [1][1][2][1][RTW89_MEXICO][12] = 127,
+ [1][1][2][1][RTW89_CN][12] = 127,
+ [1][1][2][1][RTW89_QATAR][12] = 127,
+ [1][1][2][1][RTW89_FCC][13] = 127,
+ [1][1][2][1][RTW89_ETSI][13] = 127,
+ [1][1][2][1][RTW89_MKK][13] = 127,
+ [1][1][2][1][RTW89_IC][13] = 127,
+ [1][1][2][1][RTW89_KCC][13] = 127,
+ [1][1][2][1][RTW89_ACMA][13] = 127,
+ [1][1][2][1][RTW89_CHILE][13] = 127,
+ [1][1][2][1][RTW89_UKRAINE][13] = 127,
+ [1][1][2][1][RTW89_MEXICO][13] = 127,
+ [1][1][2][1][RTW89_CN][13] = 127,
+ [1][1][2][1][RTW89_QATAR][13] = 127,
};
const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[RTW89_RS_LMT_NUM][RTW89_BF_NUM]
[RTW89_REGD_NUM][RTW89_5G_CH_NUM] = {
- [0][0][1][0][0][0] = 30,
- [0][0][1][0][0][2] = 30,
- [0][0][1][0][0][4] = 30,
- [0][0][1][0][0][6] = 30,
- [0][0][1][0][0][8] = 52,
- [0][0][1][0][0][10] = 52,
- [0][0][1][0][0][12] = 52,
- [0][0][1][0][0][14] = 52,
- [0][0][1][0][0][15] = 52,
- [0][0][1][0][0][17] = 52,
- [0][0][1][0][0][19] = 52,
- [0][0][1][0][0][21] = 52,
- [0][0][1][0][0][23] = 52,
- [0][0][1][0][0][25] = 52,
- [0][0][1][0][0][27] = 52,
- [0][0][1][0][0][29] = 52,
- [0][0][1][0][0][31] = 52,
- [0][0][1][0][0][33] = 52,
- [0][0][1][0][0][35] = 52,
- [0][0][1][0][0][37] = 54,
- [0][0][1][0][0][38] = 28,
- [0][0][1][0][0][40] = 28,
- [0][0][1][0][0][42] = 28,
- [0][0][1][0][0][44] = 28,
- [0][0][1][0][0][46] = 28,
- [0][1][1][0][0][0] = 18,
- [0][1][1][0][0][2] = 18,
- [0][1][1][0][0][4] = 18,
- [0][1][1][0][0][6] = 18,
- [0][1][1][0][0][8] = 40,
- [0][1][1][0][0][10] = 40,
- [0][1][1][0][0][12] = 40,
- [0][1][1][0][0][14] = 40,
- [0][1][1][0][0][15] = 40,
- [0][1][1][0][0][17] = 40,
- [0][1][1][0][0][19] = 40,
- [0][1][1][0][0][21] = 40,
- [0][1][1][0][0][23] = 40,
- [0][1][1][0][0][25] = 40,
- [0][1][1][0][0][27] = 40,
- [0][1][1][0][0][29] = 40,
- [0][1][1][0][0][31] = 40,
- [0][1][1][0][0][33] = 40,
- [0][1][1][0][0][35] = 40,
- [0][1][1][0][0][37] = 42,
- [0][1][1][0][0][38] = 16,
- [0][1][1][0][0][40] = 16,
- [0][1][1][0][0][42] = 16,
- [0][1][1][0][0][44] = 16,
- [0][1][1][0][0][46] = 16,
- [0][0][2][0][0][0] = 30,
- [0][0][2][0][0][2] = 30,
- [0][0][2][0][0][4] = 30,
- [0][0][2][0][0][6] = 30,
- [0][0][2][0][0][8] = 52,
- [0][0][2][0][0][10] = 52,
- [0][0][2][0][0][12] = 52,
- [0][0][2][0][0][14] = 52,
- [0][0][2][0][0][15] = 52,
- [0][0][2][0][0][17] = 52,
- [0][0][2][0][0][19] = 52,
- [0][0][2][0][0][21] = 52,
- [0][0][2][0][0][23] = 52,
- [0][0][2][0][0][25] = 52,
- [0][0][2][0][0][27] = 52,
- [0][0][2][0][0][29] = 52,
- [0][0][2][0][0][31] = 52,
- [0][0][2][0][0][33] = 52,
- [0][0][2][0][0][35] = 52,
- [0][0][2][0][0][37] = 54,
- [0][0][2][0][0][38] = 28,
- [0][0][2][0][0][40] = 28,
- [0][0][2][0][0][42] = 28,
- [0][0][2][0][0][44] = 28,
- [0][0][2][0][0][46] = 28,
- [0][1][2][0][0][0] = 18,
- [0][1][2][0][0][2] = 18,
- [0][1][2][0][0][4] = 18,
- [0][1][2][0][0][6] = 18,
- [0][1][2][0][0][8] = 40,
- [0][1][2][0][0][10] = 40,
- [0][1][2][0][0][12] = 40,
- [0][1][2][0][0][14] = 40,
- [0][1][2][0][0][15] = 40,
- [0][1][2][0][0][17] = 40,
- [0][1][2][0][0][19] = 40,
- [0][1][2][0][0][21] = 40,
- [0][1][2][0][0][23] = 40,
- [0][1][2][0][0][25] = 40,
- [0][1][2][0][0][27] = 40,
- [0][1][2][0][0][29] = 40,
- [0][1][2][0][0][31] = 40,
- [0][1][2][0][0][33] = 40,
- [0][1][2][0][0][35] = 40,
- [0][1][2][0][0][37] = 42,
- [0][1][2][0][0][38] = 16,
- [0][1][2][0][0][40] = 16,
- [0][1][2][0][0][42] = 16,
- [0][1][2][0][0][44] = 16,
- [0][1][2][0][0][46] = 16,
- [0][1][2][1][0][0] = 6,
- [0][1][2][1][0][2] = 6,
- [0][1][2][1][0][4] = 6,
- [0][1][2][1][0][6] = 6,
- [0][1][2][1][0][8] = 28,
- [0][1][2][1][0][10] = 28,
- [0][1][2][1][0][12] = 28,
- [0][1][2][1][0][14] = 28,
- [0][1][2][1][0][15] = 28,
- [0][1][2][1][0][17] = 28,
- [0][1][2][1][0][19] = 28,
- [0][1][2][1][0][21] = 28,
- [0][1][2][1][0][23] = 28,
- [0][1][2][1][0][25] = 28,
- [0][1][2][1][0][27] = 28,
- [0][1][2][1][0][29] = 28,
- [0][1][2][1][0][31] = 28,
- [0][1][2][1][0][33] = 28,
- [0][1][2][1][0][35] = 28,
- [0][1][2][1][0][37] = 30,
- [0][1][2][1][0][38] = 4,
- [0][1][2][1][0][40] = 4,
- [0][1][2][1][0][42] = 4,
- [0][1][2][1][0][44] = 4,
- [0][1][2][1][0][46] = 4,
- [1][0][2][0][0][1] = 30,
- [1][0][2][0][0][5] = 30,
- [1][0][2][0][0][9] = 52,
- [1][0][2][0][0][13] = 52,
- [1][0][2][0][0][16] = 52,
- [1][0][2][0][0][20] = 52,
- [1][0][2][0][0][24] = 52,
- [1][0][2][0][0][28] = 52,
- [1][0][2][0][0][32] = 52,
- [1][0][2][0][0][36] = 54,
- [1][0][2][0][0][39] = 28,
- [1][0][2][0][0][43] = 28,
- [1][1][2][0][0][1] = 18,
- [1][1][2][0][0][5] = 18,
- [1][1][2][0][0][9] = 40,
- [1][1][2][0][0][13] = 40,
- [1][1][2][0][0][16] = 40,
- [1][1][2][0][0][20] = 40,
- [1][1][2][0][0][24] = 40,
- [1][1][2][0][0][28] = 40,
- [1][1][2][0][0][32] = 40,
- [1][1][2][0][0][36] = 42,
- [1][1][2][0][0][39] = 16,
- [1][1][2][0][0][43] = 16,
- [1][1][2][1][0][1] = 6,
- [1][1][2][1][0][5] = 6,
- [1][1][2][1][0][9] = 28,
- [1][1][2][1][0][13] = 28,
- [1][1][2][1][0][16] = 28,
- [1][1][2][1][0][20] = 28,
- [1][1][2][1][0][24] = 28,
- [1][1][2][1][0][28] = 28,
- [1][1][2][1][0][32] = 28,
- [1][1][2][1][0][36] = 30,
- [1][1][2][1][0][39] = 4,
- [1][1][2][1][0][43] = 4,
- [2][0][2][0][0][3] = 30,
- [2][0][2][0][0][11] = 52,
- [2][0][2][0][0][18] = 52,
- [2][0][2][0][0][26] = 52,
- [2][0][2][0][0][34] = 54,
- [2][0][2][0][0][41] = 28,
- [2][1][2][0][0][3] = 18,
- [2][1][2][0][0][11] = 40,
- [2][1][2][0][0][18] = 40,
- [2][1][2][0][0][26] = 40,
- [2][1][2][0][0][34] = 42,
- [2][1][2][0][0][41] = 16,
- [2][1][2][1][0][3] = 6,
- [2][1][2][1][0][11] = 28,
- [2][1][2][1][0][18] = 28,
- [2][1][2][1][0][26] = 28,
- [2][1][2][1][0][34] = 30,
- [2][1][2][1][0][41] = 4,
- [0][0][1][0][2][0] = 76,
- [0][0][1][0][1][0] = 58,
- [0][0][1][0][3][0] = 62,
- [0][0][1][0][5][0] = 62,
- [0][0][1][0][6][0] = 58,
- [0][0][1][0][9][0] = 58,
- [0][0][1][0][8][0] = 30,
- [0][0][1][0][11][0] = 52,
- [0][0][1][0][2][2] = 76,
- [0][0][1][0][1][2] = 58,
- [0][0][1][0][3][2] = 62,
- [0][0][1][0][5][2] = 62,
- [0][0][1][0][6][2] = 58,
- [0][0][1][0][9][2] = 58,
- [0][0][1][0][8][2] = 30,
- [0][0][1][0][11][2] = 52,
- [0][0][1][0][2][4] = 76,
- [0][0][1][0][1][4] = 58,
- [0][0][1][0][3][4] = 62,
- [0][0][1][0][5][4] = 62,
- [0][0][1][0][6][4] = 58,
- [0][0][1][0][9][4] = 58,
- [0][0][1][0][8][4] = 30,
- [0][0][1][0][11][4] = 52,
- [0][0][1][0][2][6] = 76,
- [0][0][1][0][1][6] = 58,
- [0][0][1][0][3][6] = 62,
- [0][0][1][0][5][6] = 62,
- [0][0][1][0][6][6] = 54,
- [0][0][1][0][9][6] = 58,
- [0][0][1][0][8][6] = 30,
- [0][0][1][0][11][6] = 52,
- [0][0][1][0][2][8] = 76,
- [0][0][1][0][1][8] = 58,
- [0][0][1][0][3][8] = 62,
- [0][0][1][0][5][8] = 64,
- [0][0][1][0][6][8] = 58,
- [0][0][1][0][9][8] = 58,
- [0][0][1][0][8][8] = 54,
- [0][0][1][0][11][8] = 52,
- [0][0][1][0][2][10] = 76,
- [0][0][1][0][1][10] = 58,
- [0][0][1][0][3][10] = 62,
- [0][0][1][0][5][10] = 64,
- [0][0][1][0][6][10] = 58,
- [0][0][1][0][9][10] = 58,
- [0][0][1][0][8][10] = 54,
- [0][0][1][0][11][10] = 52,
- [0][0][1][0][2][12] = 76,
- [0][0][1][0][1][12] = 58,
- [0][0][1][0][3][12] = 62,
- [0][0][1][0][5][12] = 64,
- [0][0][1][0][6][12] = 58,
- [0][0][1][0][9][12] = 58,
- [0][0][1][0][8][12] = 54,
- [0][0][1][0][11][12] = 52,
- [0][0][1][0][2][14] = 76,
- [0][0][1][0][1][14] = 58,
- [0][0][1][0][3][14] = 62,
- [0][0][1][0][5][14] = 64,
- [0][0][1][0][6][14] = 58,
- [0][0][1][0][9][14] = 58,
- [0][0][1][0][8][14] = 54,
- [0][0][1][0][11][14] = 52,
- [0][0][1][0][2][15] = 76,
- [0][0][1][0][1][15] = 58,
- [0][0][1][0][3][15] = 76,
- [0][0][1][0][5][15] = 76,
- [0][0][1][0][6][15] = 58,
- [0][0][1][0][9][15] = 58,
- [0][0][1][0][8][15] = 54,
- [0][0][1][0][11][15] = 52,
- [0][0][1][0][2][17] = 76,
- [0][0][1][0][1][17] = 58,
- [0][0][1][0][3][17] = 76,
- [0][0][1][0][5][17] = 76,
- [0][0][1][0][6][17] = 58,
- [0][0][1][0][9][17] = 58,
- [0][0][1][0][8][17] = 54,
- [0][0][1][0][11][17] = 52,
- [0][0][1][0][2][19] = 76,
- [0][0][1][0][1][19] = 58,
- [0][0][1][0][3][19] = 76,
- [0][0][1][0][5][19] = 76,
- [0][0][1][0][6][19] = 58,
- [0][0][1][0][9][19] = 58,
- [0][0][1][0][8][19] = 54,
- [0][0][1][0][11][19] = 52,
- [0][0][1][0][2][21] = 76,
- [0][0][1][0][1][21] = 58,
- [0][0][1][0][3][21] = 76,
- [0][0][1][0][5][21] = 76,
- [0][0][1][0][6][21] = 58,
- [0][0][1][0][9][21] = 58,
- [0][0][1][0][8][21] = 54,
- [0][0][1][0][11][21] = 52,
- [0][0][1][0][2][23] = 76,
- [0][0][1][0][1][23] = 58,
- [0][0][1][0][3][23] = 76,
- [0][0][1][0][5][23] = 76,
- [0][0][1][0][6][23] = 58,
- [0][0][1][0][9][23] = 58,
- [0][0][1][0][8][23] = 54,
- [0][0][1][0][11][23] = 52,
- [0][0][1][0][2][25] = 76,
- [0][0][1][0][1][25] = 58,
- [0][0][1][0][3][25] = 76,
- [0][0][1][0][5][25] = 127,
- [0][0][1][0][6][25] = 58,
- [0][0][1][0][9][25] = 127,
- [0][0][1][0][8][25] = 54,
- [0][0][1][0][11][25] = 52,
- [0][0][1][0][2][27] = 76,
- [0][0][1][0][1][27] = 58,
- [0][0][1][0][3][27] = 76,
- [0][0][1][0][5][27] = 127,
- [0][0][1][0][6][27] = 58,
- [0][0][1][0][9][27] = 127,
- [0][0][1][0][8][27] = 54,
- [0][0][1][0][11][27] = 52,
- [0][0][1][0][2][29] = 76,
- [0][0][1][0][1][29] = 58,
- [0][0][1][0][3][29] = 76,
- [0][0][1][0][5][29] = 127,
- [0][0][1][0][6][29] = 58,
- [0][0][1][0][9][29] = 127,
- [0][0][1][0][8][29] = 54,
- [0][0][1][0][11][29] = 52,
- [0][0][1][0][2][31] = 76,
- [0][0][1][0][1][31] = 58,
- [0][0][1][0][3][31] = 76,
- [0][0][1][0][5][31] = 76,
- [0][0][1][0][6][31] = 58,
- [0][0][1][0][9][31] = 58,
- [0][0][1][0][8][31] = 54,
- [0][0][1][0][11][31] = 52,
- [0][0][1][0][2][33] = 76,
- [0][0][1][0][1][33] = 58,
- [0][0][1][0][3][33] = 76,
- [0][0][1][0][5][33] = 76,
- [0][0][1][0][6][33] = 58,
- [0][0][1][0][9][33] = 58,
- [0][0][1][0][8][33] = 54,
- [0][0][1][0][11][33] = 52,
- [0][0][1][0][2][35] = 74,
- [0][0][1][0][1][35] = 58,
- [0][0][1][0][3][35] = 76,
- [0][0][1][0][5][35] = 74,
- [0][0][1][0][6][35] = 58,
- [0][0][1][0][9][35] = 58,
- [0][0][1][0][8][35] = 54,
- [0][0][1][0][11][35] = 52,
- [0][0][1][0][2][37] = 76,
- [0][0][1][0][1][37] = 127,
- [0][0][1][0][3][37] = 76,
- [0][0][1][0][5][37] = 76,
- [0][0][1][0][6][37] = 58,
- [0][0][1][0][9][37] = 76,
- [0][0][1][0][8][37] = 54,
- [0][0][1][0][11][37] = 127,
- [0][0][1][0][2][38] = 76,
- [0][0][1][0][1][38] = 28,
- [0][0][1][0][3][38] = 127,
- [0][0][1][0][5][38] = 76,
- [0][0][1][0][6][38] = 28,
- [0][0][1][0][9][38] = 76,
- [0][0][1][0][8][38] = 54,
- [0][0][1][0][11][38] = 52,
- [0][0][1][0][2][40] = 76,
- [0][0][1][0][1][40] = 28,
- [0][0][1][0][3][40] = 127,
- [0][0][1][0][5][40] = 76,
- [0][0][1][0][6][40] = 28,
- [0][0][1][0][9][40] = 76,
- [0][0][1][0][8][40] = 54,
- [0][0][1][0][11][40] = 52,
- [0][0][1][0][2][42] = 76,
- [0][0][1][0][1][42] = 28,
- [0][0][1][0][3][42] = 127,
- [0][0][1][0][5][42] = 76,
- [0][0][1][0][6][42] = 28,
- [0][0][1][0][9][42] = 76,
- [0][0][1][0][8][42] = 54,
- [0][0][1][0][11][42] = 52,
- [0][0][1][0][2][44] = 76,
- [0][0][1][0][1][44] = 28,
- [0][0][1][0][3][44] = 127,
- [0][0][1][0][5][44] = 76,
- [0][0][1][0][6][44] = 28,
- [0][0][1][0][9][44] = 76,
- [0][0][1][0][8][44] = 54,
- [0][0][1][0][11][44] = 52,
- [0][0][1][0][2][46] = 76,
- [0][0][1][0][1][46] = 28,
- [0][0][1][0][3][46] = 127,
- [0][0][1][0][5][46] = 76,
- [0][0][1][0][6][46] = 28,
- [0][0][1][0][9][46] = 76,
- [0][0][1][0][8][46] = 54,
- [0][0][1][0][11][46] = 52,
- [0][1][1][0][2][0] = 68,
- [0][1][1][0][1][0] = 46,
- [0][1][1][0][3][0] = 50,
- [0][1][1][0][5][0] = 40,
- [0][1][1][0][6][0] = 46,
- [0][1][1][0][9][0] = 46,
- [0][1][1][0][8][0] = 18,
- [0][1][1][0][11][0] = 40,
- [0][1][1][0][2][2] = 68,
- [0][1][1][0][1][2] = 46,
- [0][1][1][0][3][2] = 50,
- [0][1][1][0][5][2] = 40,
- [0][1][1][0][6][2] = 46,
- [0][1][1][0][9][2] = 46,
- [0][1][1][0][8][2] = 18,
- [0][1][1][0][11][2] = 40,
- [0][1][1][0][2][4] = 68,
- [0][1][1][0][1][4] = 46,
- [0][1][1][0][3][4] = 50,
- [0][1][1][0][5][4] = 40,
- [0][1][1][0][6][4] = 46,
- [0][1][1][0][9][4] = 46,
- [0][1][1][0][8][4] = 18,
- [0][1][1][0][11][4] = 40,
- [0][1][1][0][2][6] = 68,
- [0][1][1][0][1][6] = 46,
- [0][1][1][0][3][6] = 50,
- [0][1][1][0][5][6] = 40,
- [0][1][1][0][6][6] = 36,
- [0][1][1][0][9][6] = 46,
- [0][1][1][0][8][6] = 18,
- [0][1][1][0][11][6] = 40,
- [0][1][1][0][2][8] = 68,
- [0][1][1][0][1][8] = 46,
- [0][1][1][0][3][8] = 50,
- [0][1][1][0][5][8] = 52,
- [0][1][1][0][6][8] = 46,
- [0][1][1][0][9][8] = 46,
- [0][1][1][0][8][8] = 42,
- [0][1][1][0][11][8] = 40,
- [0][1][1][0][2][10] = 68,
- [0][1][1][0][1][10] = 46,
- [0][1][1][0][3][10] = 50,
- [0][1][1][0][5][10] = 52,
- [0][1][1][0][6][10] = 46,
- [0][1][1][0][9][10] = 46,
- [0][1][1][0][8][10] = 42,
- [0][1][1][0][11][10] = 40,
- [0][1][1][0][2][12] = 68,
- [0][1][1][0][1][12] = 46,
- [0][1][1][0][3][12] = 50,
- [0][1][1][0][5][12] = 52,
- [0][1][1][0][6][12] = 46,
- [0][1][1][0][9][12] = 46,
- [0][1][1][0][8][12] = 42,
- [0][1][1][0][11][12] = 40,
- [0][1][1][0][2][14] = 68,
- [0][1][1][0][1][14] = 46,
- [0][1][1][0][3][14] = 50,
- [0][1][1][0][5][14] = 52,
- [0][1][1][0][6][14] = 46,
- [0][1][1][0][9][14] = 46,
- [0][1][1][0][8][14] = 42,
- [0][1][1][0][11][14] = 40,
- [0][1][1][0][2][15] = 68,
- [0][1][1][0][1][15] = 46,
- [0][1][1][0][3][15] = 70,
- [0][1][1][0][5][15] = 68,
- [0][1][1][0][6][15] = 46,
- [0][1][1][0][9][15] = 46,
- [0][1][1][0][8][15] = 42,
- [0][1][1][0][11][15] = 40,
- [0][1][1][0][2][17] = 68,
- [0][1][1][0][1][17] = 46,
- [0][1][1][0][3][17] = 70,
- [0][1][1][0][5][17] = 68,
- [0][1][1][0][6][17] = 46,
- [0][1][1][0][9][17] = 46,
- [0][1][1][0][8][17] = 42,
- [0][1][1][0][11][17] = 40,
- [0][1][1][0][2][19] = 68,
- [0][1][1][0][1][19] = 46,
- [0][1][1][0][3][19] = 70,
- [0][1][1][0][5][19] = 68,
- [0][1][1][0][6][19] = 46,
- [0][1][1][0][9][19] = 46,
- [0][1][1][0][8][19] = 42,
- [0][1][1][0][11][19] = 40,
- [0][1][1][0][2][21] = 68,
- [0][1][1][0][1][21] = 46,
- [0][1][1][0][3][21] = 70,
- [0][1][1][0][5][21] = 68,
- [0][1][1][0][6][21] = 46,
- [0][1][1][0][9][21] = 46,
- [0][1][1][0][8][21] = 42,
- [0][1][1][0][11][21] = 40,
- [0][1][1][0][2][23] = 68,
- [0][1][1][0][1][23] = 46,
- [0][1][1][0][3][23] = 70,
- [0][1][1][0][5][23] = 68,
- [0][1][1][0][6][23] = 46,
- [0][1][1][0][9][23] = 46,
- [0][1][1][0][8][23] = 42,
- [0][1][1][0][11][23] = 40,
- [0][1][1][0][2][25] = 68,
- [0][1][1][0][1][25] = 46,
- [0][1][1][0][3][25] = 70,
- [0][1][1][0][5][25] = 127,
- [0][1][1][0][6][25] = 46,
- [0][1][1][0][9][25] = 127,
- [0][1][1][0][8][25] = 42,
- [0][1][1][0][11][25] = 40,
- [0][1][1][0][2][27] = 68,
- [0][1][1][0][1][27] = 46,
- [0][1][1][0][3][27] = 70,
- [0][1][1][0][5][27] = 127,
- [0][1][1][0][6][27] = 46,
- [0][1][1][0][9][27] = 127,
- [0][1][1][0][8][27] = 42,
- [0][1][1][0][11][27] = 40,
- [0][1][1][0][2][29] = 68,
- [0][1][1][0][1][29] = 46,
- [0][1][1][0][3][29] = 70,
- [0][1][1][0][5][29] = 127,
- [0][1][1][0][6][29] = 46,
- [0][1][1][0][9][29] = 127,
- [0][1][1][0][8][29] = 42,
- [0][1][1][0][11][29] = 40,
- [0][1][1][0][2][31] = 68,
- [0][1][1][0][1][31] = 46,
- [0][1][1][0][3][31] = 70,
- [0][1][1][0][5][31] = 68,
- [0][1][1][0][6][31] = 46,
- [0][1][1][0][9][31] = 46,
- [0][1][1][0][8][31] = 42,
- [0][1][1][0][11][31] = 40,
- [0][1][1][0][2][33] = 68,
- [0][1][1][0][1][33] = 46,
- [0][1][1][0][3][33] = 70,
- [0][1][1][0][5][33] = 68,
- [0][1][1][0][6][33] = 46,
- [0][1][1][0][9][33] = 46,
- [0][1][1][0][8][33] = 42,
- [0][1][1][0][11][33] = 40,
- [0][1][1][0][2][35] = 66,
- [0][1][1][0][1][35] = 46,
- [0][1][1][0][3][35] = 70,
- [0][1][1][0][5][35] = 66,
- [0][1][1][0][6][35] = 46,
- [0][1][1][0][9][35] = 46,
- [0][1][1][0][8][35] = 42,
- [0][1][1][0][11][35] = 40,
- [0][1][1][0][2][37] = 68,
- [0][1][1][0][1][37] = 127,
- [0][1][1][0][3][37] = 70,
- [0][1][1][0][5][37] = 68,
- [0][1][1][0][6][37] = 46,
- [0][1][1][0][9][37] = 68,
- [0][1][1][0][8][37] = 42,
- [0][1][1][0][11][37] = 127,
- [0][1][1][0][2][38] = 76,
- [0][1][1][0][1][38] = 16,
- [0][1][1][0][3][38] = 127,
- [0][1][1][0][5][38] = 76,
- [0][1][1][0][6][38] = 16,
- [0][1][1][0][9][38] = 76,
- [0][1][1][0][8][38] = 42,
- [0][1][1][0][11][38] = 40,
- [0][1][1][0][2][40] = 76,
- [0][1][1][0][1][40] = 16,
- [0][1][1][0][3][40] = 127,
- [0][1][1][0][5][40] = 76,
- [0][1][1][0][6][40] = 16,
- [0][1][1][0][9][40] = 76,
- [0][1][1][0][8][40] = 42,
- [0][1][1][0][11][40] = 40,
- [0][1][1][0][2][42] = 76,
- [0][1][1][0][1][42] = 16,
- [0][1][1][0][3][42] = 127,
- [0][1][1][0][5][42] = 76,
- [0][1][1][0][6][42] = 16,
- [0][1][1][0][9][42] = 76,
- [0][1][1][0][8][42] = 42,
- [0][1][1][0][11][42] = 40,
- [0][1][1][0][2][44] = 76,
- [0][1][1][0][1][44] = 16,
- [0][1][1][0][3][44] = 127,
- [0][1][1][0][5][44] = 76,
- [0][1][1][0][6][44] = 16,
- [0][1][1][0][9][44] = 76,
- [0][1][1][0][8][44] = 42,
- [0][1][1][0][11][44] = 40,
- [0][1][1][0][2][46] = 76,
- [0][1][1][0][1][46] = 16,
- [0][1][1][0][3][46] = 127,
- [0][1][1][0][5][46] = 76,
- [0][1][1][0][6][46] = 16,
- [0][1][1][0][9][46] = 76,
- [0][1][1][0][8][46] = 42,
- [0][1][1][0][11][46] = 40,
- [0][0][2][0][2][0] = 76,
- [0][0][2][0][1][0] = 58,
- [0][0][2][0][3][0] = 62,
- [0][0][2][0][5][0] = 62,
- [0][0][2][0][6][0] = 58,
- [0][0][2][0][9][0] = 58,
- [0][0][2][0][8][0] = 30,
- [0][0][2][0][11][0] = 52,
- [0][0][2][0][2][2] = 76,
- [0][0][2][0][1][2] = 58,
- [0][0][2][0][3][2] = 62,
- [0][0][2][0][5][2] = 62,
- [0][0][2][0][6][2] = 58,
- [0][0][2][0][9][2] = 58,
- [0][0][2][0][8][2] = 30,
- [0][0][2][0][11][2] = 52,
- [0][0][2][0][2][4] = 76,
- [0][0][2][0][1][4] = 58,
- [0][0][2][0][3][4] = 62,
- [0][0][2][0][5][4] = 62,
- [0][0][2][0][6][4] = 58,
- [0][0][2][0][9][4] = 58,
- [0][0][2][0][8][4] = 30,
- [0][0][2][0][11][4] = 52,
- [0][0][2][0][2][6] = 76,
- [0][0][2][0][1][6] = 58,
- [0][0][2][0][3][6] = 62,
- [0][0][2][0][5][6] = 62,
- [0][0][2][0][6][6] = 54,
- [0][0][2][0][9][6] = 58,
- [0][0][2][0][8][6] = 30,
- [0][0][2][0][11][6] = 52,
- [0][0][2][0][2][8] = 76,
- [0][0][2][0][1][8] = 58,
- [0][0][2][0][3][8] = 62,
- [0][0][2][0][5][8] = 64,
- [0][0][2][0][6][8] = 58,
- [0][0][2][0][9][8] = 58,
- [0][0][2][0][8][8] = 54,
- [0][0][2][0][11][8] = 52,
- [0][0][2][0][2][10] = 76,
- [0][0][2][0][1][10] = 58,
- [0][0][2][0][3][10] = 62,
- [0][0][2][0][5][10] = 64,
- [0][0][2][0][6][10] = 58,
- [0][0][2][0][9][10] = 58,
- [0][0][2][0][8][10] = 54,
- [0][0][2][0][11][10] = 52,
- [0][0][2][0][2][12] = 76,
- [0][0][2][0][1][12] = 58,
- [0][0][2][0][3][12] = 62,
- [0][0][2][0][5][12] = 64,
- [0][0][2][0][6][12] = 58,
- [0][0][2][0][9][12] = 58,
- [0][0][2][0][8][12] = 54,
- [0][0][2][0][11][12] = 52,
- [0][0][2][0][2][14] = 76,
- [0][0][2][0][1][14] = 58,
- [0][0][2][0][3][14] = 62,
- [0][0][2][0][5][14] = 64,
- [0][0][2][0][6][14] = 58,
- [0][0][2][0][9][14] = 58,
- [0][0][2][0][8][14] = 54,
- [0][0][2][0][11][14] = 52,
- [0][0][2][0][2][15] = 74,
- [0][0][2][0][1][15] = 58,
- [0][0][2][0][3][15] = 76,
- [0][0][2][0][5][15] = 74,
- [0][0][2][0][6][15] = 58,
- [0][0][2][0][9][15] = 58,
- [0][0][2][0][8][15] = 54,
- [0][0][2][0][11][15] = 52,
- [0][0][2][0][2][17] = 76,
- [0][0][2][0][1][17] = 58,
- [0][0][2][0][3][17] = 76,
- [0][0][2][0][5][17] = 76,
- [0][0][2][0][6][17] = 58,
- [0][0][2][0][9][17] = 58,
- [0][0][2][0][8][17] = 54,
- [0][0][2][0][11][17] = 52,
- [0][0][2][0][2][19] = 76,
- [0][0][2][0][1][19] = 58,
- [0][0][2][0][3][19] = 76,
- [0][0][2][0][5][19] = 76,
- [0][0][2][0][6][19] = 58,
- [0][0][2][0][9][19] = 58,
- [0][0][2][0][8][19] = 54,
- [0][0][2][0][11][19] = 52,
- [0][0][2][0][2][21] = 76,
- [0][0][2][0][1][21] = 58,
- [0][0][2][0][3][21] = 76,
- [0][0][2][0][5][21] = 76,
- [0][0][2][0][6][21] = 58,
- [0][0][2][0][9][21] = 58,
- [0][0][2][0][8][21] = 54,
- [0][0][2][0][11][21] = 52,
- [0][0][2][0][2][23] = 76,
- [0][0][2][0][1][23] = 58,
- [0][0][2][0][3][23] = 76,
- [0][0][2][0][5][23] = 76,
- [0][0][2][0][6][23] = 58,
- [0][0][2][0][9][23] = 58,
- [0][0][2][0][8][23] = 54,
- [0][0][2][0][11][23] = 52,
- [0][0][2][0][2][25] = 76,
- [0][0][2][0][1][25] = 58,
- [0][0][2][0][3][25] = 76,
- [0][0][2][0][5][25] = 127,
- [0][0][2][0][6][25] = 58,
- [0][0][2][0][9][25] = 127,
- [0][0][2][0][8][25] = 54,
- [0][0][2][0][11][25] = 52,
- [0][0][2][0][2][27] = 76,
- [0][0][2][0][1][27] = 58,
- [0][0][2][0][3][27] = 76,
- [0][0][2][0][5][27] = 127,
- [0][0][2][0][6][27] = 58,
- [0][0][2][0][9][27] = 127,
- [0][0][2][0][8][27] = 54,
- [0][0][2][0][11][27] = 52,
- [0][0][2][0][2][29] = 76,
- [0][0][2][0][1][29] = 58,
- [0][0][2][0][3][29] = 76,
- [0][0][2][0][5][29] = 127,
- [0][0][2][0][6][29] = 58,
- [0][0][2][0][9][29] = 127,
- [0][0][2][0][8][29] = 54,
- [0][0][2][0][11][29] = 52,
- [0][0][2][0][2][31] = 76,
- [0][0][2][0][1][31] = 58,
- [0][0][2][0][3][31] = 76,
- [0][0][2][0][5][31] = 76,
- [0][0][2][0][6][31] = 58,
- [0][0][2][0][9][31] = 58,
- [0][0][2][0][8][31] = 54,
- [0][0][2][0][11][31] = 52,
- [0][0][2][0][2][33] = 76,
- [0][0][2][0][1][33] = 58,
- [0][0][2][0][3][33] = 76,
- [0][0][2][0][5][33] = 76,
- [0][0][2][0][6][33] = 58,
- [0][0][2][0][9][33] = 58,
- [0][0][2][0][8][33] = 54,
- [0][0][2][0][11][33] = 52,
- [0][0][2][0][2][35] = 70,
- [0][0][2][0][1][35] = 58,
- [0][0][2][0][3][35] = 76,
- [0][0][2][0][5][35] = 70,
- [0][0][2][0][6][35] = 58,
- [0][0][2][0][9][35] = 58,
- [0][0][2][0][8][35] = 54,
- [0][0][2][0][11][35] = 52,
- [0][0][2][0][2][37] = 76,
- [0][0][2][0][1][37] = 127,
- [0][0][2][0][3][37] = 76,
- [0][0][2][0][5][37] = 76,
- [0][0][2][0][6][37] = 58,
- [0][0][2][0][9][37] = 76,
- [0][0][2][0][8][37] = 54,
- [0][0][2][0][11][37] = 127,
- [0][0][2][0][2][38] = 76,
- [0][0][2][0][1][38] = 28,
- [0][0][2][0][3][38] = 127,
- [0][0][2][0][5][38] = 76,
- [0][0][2][0][6][38] = 28,
- [0][0][2][0][9][38] = 76,
- [0][0][2][0][8][38] = 54,
- [0][0][2][0][11][38] = 52,
- [0][0][2][0][2][40] = 76,
- [0][0][2][0][1][40] = 28,
- [0][0][2][0][3][40] = 127,
- [0][0][2][0][5][40] = 76,
- [0][0][2][0][6][40] = 28,
- [0][0][2][0][9][40] = 76,
- [0][0][2][0][8][40] = 54,
- [0][0][2][0][11][40] = 52,
- [0][0][2][0][2][42] = 76,
- [0][0][2][0][1][42] = 28,
- [0][0][2][0][3][42] = 127,
- [0][0][2][0][5][42] = 76,
- [0][0][2][0][6][42] = 28,
- [0][0][2][0][9][42] = 76,
- [0][0][2][0][8][42] = 54,
- [0][0][2][0][11][42] = 52,
- [0][0][2][0][2][44] = 76,
- [0][0][2][0][1][44] = 28,
- [0][0][2][0][3][44] = 127,
- [0][0][2][0][5][44] = 76,
- [0][0][2][0][6][44] = 28,
- [0][0][2][0][9][44] = 76,
- [0][0][2][0][8][44] = 54,
- [0][0][2][0][11][44] = 52,
- [0][0][2][0][2][46] = 76,
- [0][0][2][0][1][46] = 28,
- [0][0][2][0][3][46] = 127,
- [0][0][2][0][5][46] = 76,
- [0][0][2][0][6][46] = 28,
- [0][0][2][0][9][46] = 76,
- [0][0][2][0][8][46] = 54,
- [0][0][2][0][11][46] = 52,
- [0][1][2][0][2][0] = 68,
- [0][1][2][0][1][0] = 46,
- [0][1][2][0][3][0] = 50,
- [0][1][2][0][5][0] = 40,
- [0][1][2][0][6][0] = 46,
- [0][1][2][0][9][0] = 46,
- [0][1][2][0][8][0] = 18,
- [0][1][2][0][11][0] = 40,
- [0][1][2][0][2][2] = 68,
- [0][1][2][0][1][2] = 46,
- [0][1][2][0][3][2] = 50,
- [0][1][2][0][5][2] = 40,
- [0][1][2][0][6][2] = 46,
- [0][1][2][0][9][2] = 46,
- [0][1][2][0][8][2] = 18,
- [0][1][2][0][11][2] = 40,
- [0][1][2][0][2][4] = 68,
- [0][1][2][0][1][4] = 46,
- [0][1][2][0][3][4] = 50,
- [0][1][2][0][5][4] = 40,
- [0][1][2][0][6][4] = 46,
- [0][1][2][0][9][4] = 46,
- [0][1][2][0][8][4] = 18,
- [0][1][2][0][11][4] = 40,
- [0][1][2][0][2][6] = 68,
- [0][1][2][0][1][6] = 46,
- [0][1][2][0][3][6] = 50,
- [0][1][2][0][5][6] = 40,
- [0][1][2][0][6][6] = 36,
- [0][1][2][0][9][6] = 46,
- [0][1][2][0][8][6] = 18,
- [0][1][2][0][11][6] = 40,
- [0][1][2][0][2][8] = 68,
- [0][1][2][0][1][8] = 46,
- [0][1][2][0][3][8] = 50,
- [0][1][2][0][5][8] = 52,
- [0][1][2][0][6][8] = 46,
- [0][1][2][0][9][8] = 46,
- [0][1][2][0][8][8] = 42,
- [0][1][2][0][11][8] = 40,
- [0][1][2][0][2][10] = 68,
- [0][1][2][0][1][10] = 46,
- [0][1][2][0][3][10] = 50,
- [0][1][2][0][5][10] = 52,
- [0][1][2][0][6][10] = 46,
- [0][1][2][0][9][10] = 46,
- [0][1][2][0][8][10] = 42,
- [0][1][2][0][11][10] = 40,
- [0][1][2][0][2][12] = 68,
- [0][1][2][0][1][12] = 46,
- [0][1][2][0][3][12] = 50,
- [0][1][2][0][5][12] = 52,
- [0][1][2][0][6][12] = 46,
- [0][1][2][0][9][12] = 46,
- [0][1][2][0][8][12] = 42,
- [0][1][2][0][11][12] = 40,
- [0][1][2][0][2][14] = 68,
- [0][1][2][0][1][14] = 46,
- [0][1][2][0][3][14] = 50,
- [0][1][2][0][5][14] = 52,
- [0][1][2][0][6][14] = 46,
- [0][1][2][0][9][14] = 46,
- [0][1][2][0][8][14] = 42,
- [0][1][2][0][11][14] = 40,
- [0][1][2][0][2][15] = 68,
- [0][1][2][0][1][15] = 46,
- [0][1][2][0][3][15] = 70,
- [0][1][2][0][5][15] = 68,
- [0][1][2][0][6][15] = 46,
- [0][1][2][0][9][15] = 46,
- [0][1][2][0][8][15] = 42,
- [0][1][2][0][11][15] = 40,
- [0][1][2][0][2][17] = 68,
- [0][1][2][0][1][17] = 46,
- [0][1][2][0][3][17] = 70,
- [0][1][2][0][5][17] = 68,
- [0][1][2][0][6][17] = 46,
- [0][1][2][0][9][17] = 46,
- [0][1][2][0][8][17] = 42,
- [0][1][2][0][11][17] = 40,
- [0][1][2][0][2][19] = 68,
- [0][1][2][0][1][19] = 46,
- [0][1][2][0][3][19] = 70,
- [0][1][2][0][5][19] = 68,
- [0][1][2][0][6][19] = 46,
- [0][1][2][0][9][19] = 46,
- [0][1][2][0][8][19] = 42,
- [0][1][2][0][11][19] = 40,
- [0][1][2][0][2][21] = 68,
- [0][1][2][0][1][21] = 46,
- [0][1][2][0][3][21] = 70,
- [0][1][2][0][5][21] = 68,
- [0][1][2][0][6][21] = 46,
- [0][1][2][0][9][21] = 46,
- [0][1][2][0][8][21] = 42,
- [0][1][2][0][11][21] = 40,
- [0][1][2][0][2][23] = 68,
- [0][1][2][0][1][23] = 46,
- [0][1][2][0][3][23] = 70,
- [0][1][2][0][5][23] = 68,
- [0][1][2][0][6][23] = 46,
- [0][1][2][0][9][23] = 46,
- [0][1][2][0][8][23] = 42,
- [0][1][2][0][11][23] = 40,
- [0][1][2][0][2][25] = 68,
- [0][1][2][0][1][25] = 46,
- [0][1][2][0][3][25] = 70,
- [0][1][2][0][5][25] = 127,
- [0][1][2][0][6][25] = 46,
- [0][1][2][0][9][25] = 127,
- [0][1][2][0][8][25] = 42,
- [0][1][2][0][11][25] = 40,
- [0][1][2][0][2][27] = 68,
- [0][1][2][0][1][27] = 46,
- [0][1][2][0][3][27] = 70,
- [0][1][2][0][5][27] = 127,
- [0][1][2][0][6][27] = 46,
- [0][1][2][0][9][27] = 127,
- [0][1][2][0][8][27] = 42,
- [0][1][2][0][11][27] = 40,
- [0][1][2][0][2][29] = 68,
- [0][1][2][0][1][29] = 46,
- [0][1][2][0][3][29] = 70,
- [0][1][2][0][5][29] = 127,
- [0][1][2][0][6][29] = 46,
- [0][1][2][0][9][29] = 127,
- [0][1][2][0][8][29] = 42,
- [0][1][2][0][11][29] = 40,
- [0][1][2][0][2][31] = 68,
- [0][1][2][0][1][31] = 46,
- [0][1][2][0][3][31] = 70,
- [0][1][2][0][5][31] = 68,
- [0][1][2][0][6][31] = 46,
- [0][1][2][0][9][31] = 46,
- [0][1][2][0][8][31] = 42,
- [0][1][2][0][11][31] = 40,
- [0][1][2][0][2][33] = 68,
- [0][1][2][0][1][33] = 46,
- [0][1][2][0][3][33] = 70,
- [0][1][2][0][5][33] = 68,
- [0][1][2][0][6][33] = 46,
- [0][1][2][0][9][33] = 46,
- [0][1][2][0][8][33] = 42,
- [0][1][2][0][11][33] = 40,
- [0][1][2][0][2][35] = 64,
- [0][1][2][0][1][35] = 46,
- [0][1][2][0][3][35] = 70,
- [0][1][2][0][5][35] = 64,
- [0][1][2][0][6][35] = 46,
- [0][1][2][0][9][35] = 46,
- [0][1][2][0][8][35] = 42,
- [0][1][2][0][11][35] = 40,
- [0][1][2][0][2][37] = 68,
- [0][1][2][0][1][37] = 127,
- [0][1][2][0][3][37] = 70,
- [0][1][2][0][5][37] = 68,
- [0][1][2][0][6][37] = 46,
- [0][1][2][0][9][37] = 68,
- [0][1][2][0][8][37] = 42,
- [0][1][2][0][11][37] = 127,
- [0][1][2][0][2][38] = 76,
- [0][1][2][0][1][38] = 16,
- [0][1][2][0][3][38] = 127,
- [0][1][2][0][5][38] = 76,
- [0][1][2][0][6][38] = 16,
- [0][1][2][0][9][38] = 76,
- [0][1][2][0][8][38] = 42,
- [0][1][2][0][11][38] = 40,
- [0][1][2][0][2][40] = 76,
- [0][1][2][0][1][40] = 16,
- [0][1][2][0][3][40] = 127,
- [0][1][2][0][5][40] = 76,
- [0][1][2][0][6][40] = 16,
- [0][1][2][0][9][40] = 76,
- [0][1][2][0][8][40] = 42,
- [0][1][2][0][11][40] = 40,
- [0][1][2][0][2][42] = 76,
- [0][1][2][0][1][42] = 16,
- [0][1][2][0][3][42] = 127,
- [0][1][2][0][5][42] = 76,
- [0][1][2][0][6][42] = 16,
- [0][1][2][0][9][42] = 76,
- [0][1][2][0][8][42] = 42,
- [0][1][2][0][11][42] = 40,
- [0][1][2][0][2][44] = 76,
- [0][1][2][0][1][44] = 16,
- [0][1][2][0][3][44] = 127,
- [0][1][2][0][5][44] = 76,
- [0][1][2][0][6][44] = 16,
- [0][1][2][0][9][44] = 76,
- [0][1][2][0][8][44] = 42,
- [0][1][2][0][11][44] = 40,
- [0][1][2][0][2][46] = 76,
- [0][1][2][0][1][46] = 16,
- [0][1][2][0][3][46] = 127,
- [0][1][2][0][5][46] = 76,
- [0][1][2][0][6][46] = 16,
- [0][1][2][0][9][46] = 76,
- [0][1][2][0][8][46] = 42,
- [0][1][2][0][11][46] = 40,
- [0][1][2][1][2][0] = 68,
- [0][1][2][1][1][0] = 34,
- [0][1][2][1][3][0] = 50,
- [0][1][2][1][5][0] = 38,
- [0][1][2][1][6][0] = 34,
- [0][1][2][1][9][0] = 34,
- [0][1][2][1][8][0] = 6,
- [0][1][2][1][11][0] = 28,
- [0][1][2][1][2][2] = 68,
- [0][1][2][1][1][2] = 34,
- [0][1][2][1][3][2] = 50,
- [0][1][2][1][5][2] = 38,
- [0][1][2][1][6][2] = 34,
- [0][1][2][1][9][2] = 34,
- [0][1][2][1][8][2] = 6,
- [0][1][2][1][11][2] = 28,
- [0][1][2][1][2][4] = 68,
- [0][1][2][1][1][4] = 34,
- [0][1][2][1][3][4] = 50,
- [0][1][2][1][5][4] = 38,
- [0][1][2][1][6][4] = 34,
- [0][1][2][1][9][4] = 34,
- [0][1][2][1][8][4] = 6,
- [0][1][2][1][11][4] = 28,
- [0][1][2][1][2][6] = 68,
- [0][1][2][1][1][6] = 34,
- [0][1][2][1][3][6] = 50,
- [0][1][2][1][5][6] = 38,
- [0][1][2][1][6][6] = 34,
- [0][1][2][1][9][6] = 34,
- [0][1][2][1][8][6] = 6,
- [0][1][2][1][11][6] = 28,
- [0][1][2][1][2][8] = 68,
- [0][1][2][1][1][8] = 34,
- [0][1][2][1][3][8] = 50,
- [0][1][2][1][5][8] = 38,
- [0][1][2][1][6][8] = 34,
- [0][1][2][1][9][8] = 34,
- [0][1][2][1][8][8] = 30,
- [0][1][2][1][11][8] = 28,
- [0][1][2][1][2][10] = 68,
- [0][1][2][1][1][10] = 34,
- [0][1][2][1][3][10] = 50,
- [0][1][2][1][5][10] = 38,
- [0][1][2][1][6][10] = 34,
- [0][1][2][1][9][10] = 34,
- [0][1][2][1][8][10] = 30,
- [0][1][2][1][11][10] = 28,
- [0][1][2][1][2][12] = 68,
- [0][1][2][1][1][12] = 34,
- [0][1][2][1][3][12] = 50,
- [0][1][2][1][5][12] = 38,
- [0][1][2][1][6][12] = 34,
- [0][1][2][1][9][12] = 34,
- [0][1][2][1][8][12] = 30,
- [0][1][2][1][11][12] = 28,
- [0][1][2][1][2][14] = 68,
- [0][1][2][1][1][14] = 34,
- [0][1][2][1][3][14] = 50,
- [0][1][2][1][5][14] = 38,
- [0][1][2][1][6][14] = 34,
- [0][1][2][1][9][14] = 34,
- [0][1][2][1][8][14] = 30,
- [0][1][2][1][11][14] = 28,
- [0][1][2][1][2][15] = 68,
- [0][1][2][1][1][15] = 34,
- [0][1][2][1][3][15] = 70,
- [0][1][2][1][5][15] = 62,
- [0][1][2][1][6][15] = 34,
- [0][1][2][1][9][15] = 34,
- [0][1][2][1][8][15] = 30,
- [0][1][2][1][11][15] = 28,
- [0][1][2][1][2][17] = 68,
- [0][1][2][1][1][17] = 34,
- [0][1][2][1][3][17] = 70,
- [0][1][2][1][5][17] = 62,
- [0][1][2][1][6][17] = 34,
- [0][1][2][1][9][17] = 34,
- [0][1][2][1][8][17] = 30,
- [0][1][2][1][11][17] = 28,
- [0][1][2][1][2][19] = 68,
- [0][1][2][1][1][19] = 34,
- [0][1][2][1][3][19] = 70,
- [0][1][2][1][5][19] = 62,
- [0][1][2][1][6][19] = 34,
- [0][1][2][1][9][19] = 34,
- [0][1][2][1][8][19] = 30,
- [0][1][2][1][11][19] = 28,
- [0][1][2][1][2][21] = 68,
- [0][1][2][1][1][21] = 34,
- [0][1][2][1][3][21] = 70,
- [0][1][2][1][5][21] = 62,
- [0][1][2][1][6][21] = 34,
- [0][1][2][1][9][21] = 34,
- [0][1][2][1][8][21] = 30,
- [0][1][2][1][11][21] = 28,
- [0][1][2][1][2][23] = 68,
- [0][1][2][1][1][23] = 34,
- [0][1][2][1][3][23] = 70,
- [0][1][2][1][5][23] = 62,
- [0][1][2][1][6][23] = 34,
- [0][1][2][1][9][23] = 34,
- [0][1][2][1][8][23] = 30,
- [0][1][2][1][11][23] = 28,
- [0][1][2][1][2][25] = 68,
- [0][1][2][1][1][25] = 34,
- [0][1][2][1][3][25] = 70,
- [0][1][2][1][5][25] = 127,
- [0][1][2][1][6][25] = 34,
- [0][1][2][1][9][25] = 127,
- [0][1][2][1][8][25] = 30,
- [0][1][2][1][11][25] = 28,
- [0][1][2][1][2][27] = 68,
- [0][1][2][1][1][27] = 34,
- [0][1][2][1][3][27] = 70,
- [0][1][2][1][5][27] = 127,
- [0][1][2][1][6][27] = 34,
- [0][1][2][1][9][27] = 127,
- [0][1][2][1][8][27] = 30,
- [0][1][2][1][11][27] = 28,
- [0][1][2][1][2][29] = 68,
- [0][1][2][1][1][29] = 34,
- [0][1][2][1][3][29] = 70,
- [0][1][2][1][5][29] = 127,
- [0][1][2][1][6][29] = 34,
- [0][1][2][1][9][29] = 127,
- [0][1][2][1][8][29] = 30,
- [0][1][2][1][11][29] = 28,
- [0][1][2][1][2][31] = 68,
- [0][1][2][1][1][31] = 34,
- [0][1][2][1][3][31] = 70,
- [0][1][2][1][5][31] = 62,
- [0][1][2][1][6][31] = 34,
- [0][1][2][1][9][31] = 34,
- [0][1][2][1][8][31] = 30,
- [0][1][2][1][11][31] = 28,
- [0][1][2][1][2][33] = 68,
- [0][1][2][1][1][33] = 34,
- [0][1][2][1][3][33] = 70,
- [0][1][2][1][5][33] = 62,
- [0][1][2][1][6][33] = 34,
- [0][1][2][1][9][33] = 34,
- [0][1][2][1][8][33] = 30,
- [0][1][2][1][11][33] = 28,
- [0][1][2][1][2][35] = 64,
- [0][1][2][1][1][35] = 34,
- [0][1][2][1][3][35] = 70,
- [0][1][2][1][5][35] = 62,
- [0][1][2][1][6][35] = 34,
- [0][1][2][1][9][35] = 34,
- [0][1][2][1][8][35] = 30,
- [0][1][2][1][11][35] = 28,
- [0][1][2][1][2][37] = 68,
- [0][1][2][1][1][37] = 127,
- [0][1][2][1][3][37] = 70,
- [0][1][2][1][5][37] = 62,
- [0][1][2][1][6][37] = 34,
- [0][1][2][1][9][37] = 68,
- [0][1][2][1][8][37] = 30,
- [0][1][2][1][11][37] = 127,
- [0][1][2][1][2][38] = 76,
- [0][1][2][1][1][38] = 4,
- [0][1][2][1][3][38] = 127,
- [0][1][2][1][5][38] = 76,
- [0][1][2][1][6][38] = 4,
- [0][1][2][1][9][38] = 76,
- [0][1][2][1][8][38] = 30,
- [0][1][2][1][11][38] = 28,
- [0][1][2][1][2][40] = 76,
- [0][1][2][1][1][40] = 4,
- [0][1][2][1][3][40] = 127,
- [0][1][2][1][5][40] = 76,
- [0][1][2][1][6][40] = 4,
- [0][1][2][1][9][40] = 76,
- [0][1][2][1][8][40] = 30,
- [0][1][2][1][11][40] = 28,
- [0][1][2][1][2][42] = 76,
- [0][1][2][1][1][42] = 4,
- [0][1][2][1][3][42] = 127,
- [0][1][2][1][5][42] = 76,
- [0][1][2][1][6][42] = 4,
- [0][1][2][1][9][42] = 76,
- [0][1][2][1][8][42] = 30,
- [0][1][2][1][11][42] = 28,
- [0][1][2][1][2][44] = 76,
- [0][1][2][1][1][44] = 4,
- [0][1][2][1][3][44] = 127,
- [0][1][2][1][5][44] = 76,
- [0][1][2][1][6][44] = 4,
- [0][1][2][1][9][44] = 76,
- [0][1][2][1][8][44] = 30,
- [0][1][2][1][11][44] = 28,
- [0][1][2][1][2][46] = 76,
- [0][1][2][1][1][46] = 4,
- [0][1][2][1][3][46] = 127,
- [0][1][2][1][5][46] = 76,
- [0][1][2][1][6][46] = 4,
- [0][1][2][1][9][46] = 76,
- [0][1][2][1][8][46] = 30,
- [0][1][2][1][11][46] = 28,
- [1][0][2][0][2][1] = 68,
- [1][0][2][0][1][1] = 64,
- [1][0][2][0][3][1] = 62,
- [1][0][2][0][5][1] = 64,
- [1][0][2][0][6][1] = 64,
- [1][0][2][0][9][1] = 64,
- [1][0][2][0][8][1] = 30,
- [1][0][2][0][11][1] = 52,
- [1][0][2][0][2][5] = 72,
- [1][0][2][0][1][5] = 64,
- [1][0][2][0][3][5] = 62,
- [1][0][2][0][5][5] = 64,
- [1][0][2][0][6][5] = 60,
- [1][0][2][0][9][5] = 64,
- [1][0][2][0][8][5] = 30,
- [1][0][2][0][11][5] = 52,
- [1][0][2][0][2][9] = 72,
- [1][0][2][0][1][9] = 64,
- [1][0][2][0][3][9] = 62,
- [1][0][2][0][5][9] = 64,
- [1][0][2][0][6][9] = 64,
- [1][0][2][0][9][9] = 64,
- [1][0][2][0][8][9] = 54,
- [1][0][2][0][11][9] = 52,
- [1][0][2][0][2][13] = 66,
- [1][0][2][0][1][13] = 64,
- [1][0][2][0][3][13] = 62,
- [1][0][2][0][5][13] = 64,
- [1][0][2][0][6][13] = 64,
- [1][0][2][0][9][13] = 64,
- [1][0][2][0][8][13] = 54,
- [1][0][2][0][11][13] = 52,
- [1][0][2][0][2][16] = 62,
- [1][0][2][0][1][16] = 64,
- [1][0][2][0][3][16] = 72,
- [1][0][2][0][5][16] = 62,
- [1][0][2][0][6][16] = 64,
- [1][0][2][0][9][16] = 64,
- [1][0][2][0][8][16] = 54,
- [1][0][2][0][11][16] = 52,
- [1][0][2][0][2][20] = 72,
- [1][0][2][0][1][20] = 64,
- [1][0][2][0][3][20] = 72,
- [1][0][2][0][5][20] = 72,
- [1][0][2][0][6][20] = 64,
- [1][0][2][0][9][20] = 64,
- [1][0][2][0][8][20] = 54,
- [1][0][2][0][11][20] = 52,
- [1][0][2][0][2][24] = 72,
- [1][0][2][0][1][24] = 64,
- [1][0][2][0][3][24] = 72,
- [1][0][2][0][5][24] = 127,
- [1][0][2][0][6][24] = 64,
- [1][0][2][0][9][24] = 127,
- [1][0][2][0][8][24] = 54,
- [1][0][2][0][11][24] = 52,
- [1][0][2][0][2][28] = 72,
- [1][0][2][0][1][28] = 64,
- [1][0][2][0][3][28] = 72,
- [1][0][2][0][5][28] = 127,
- [1][0][2][0][6][28] = 64,
- [1][0][2][0][9][28] = 127,
- [1][0][2][0][8][28] = 54,
- [1][0][2][0][11][28] = 52,
- [1][0][2][0][2][32] = 72,
- [1][0][2][0][1][32] = 64,
- [1][0][2][0][3][32] = 72,
- [1][0][2][0][5][32] = 72,
- [1][0][2][0][6][32] = 64,
- [1][0][2][0][9][32] = 64,
- [1][0][2][0][8][32] = 54,
- [1][0][2][0][11][32] = 52,
- [1][0][2][0][2][36] = 72,
- [1][0][2][0][1][36] = 127,
- [1][0][2][0][3][36] = 72,
- [1][0][2][0][5][36] = 72,
- [1][0][2][0][6][36] = 64,
- [1][0][2][0][9][36] = 72,
- [1][0][2][0][8][36] = 54,
- [1][0][2][0][11][36] = 127,
- [1][0][2][0][2][39] = 72,
- [1][0][2][0][1][39] = 28,
- [1][0][2][0][3][39] = 127,
- [1][0][2][0][5][39] = 72,
- [1][0][2][0][6][39] = 28,
- [1][0][2][0][9][39] = 72,
- [1][0][2][0][8][39] = 54,
- [1][0][2][0][11][39] = 52,
- [1][0][2][0][2][43] = 72,
- [1][0][2][0][1][43] = 28,
- [1][0][2][0][3][43] = 127,
- [1][0][2][0][5][43] = 72,
- [1][0][2][0][6][43] = 28,
- [1][0][2][0][9][43] = 72,
- [1][0][2][0][8][43] = 54,
- [1][0][2][0][11][43] = 52,
- [1][1][2][0][2][1] = 58,
- [1][1][2][0][1][1] = 52,
- [1][1][2][0][3][1] = 50,
- [1][1][2][0][5][1] = 52,
- [1][1][2][0][6][1] = 52,
- [1][1][2][0][9][1] = 52,
- [1][1][2][0][8][1] = 18,
- [1][1][2][0][11][1] = 40,
- [1][1][2][0][2][5] = 72,
- [1][1][2][0][1][5] = 52,
- [1][1][2][0][3][5] = 50,
- [1][1][2][0][5][5] = 52,
- [1][1][2][0][6][5] = 46,
- [1][1][2][0][9][5] = 52,
- [1][1][2][0][8][5] = 18,
- [1][1][2][0][11][5] = 40,
- [1][1][2][0][2][9] = 72,
- [1][1][2][0][1][9] = 52,
- [1][1][2][0][3][9] = 50,
- [1][1][2][0][5][9] = 52,
- [1][1][2][0][6][9] = 52,
- [1][1][2][0][9][9] = 52,
- [1][1][2][0][8][9] = 42,
- [1][1][2][0][11][9] = 40,
- [1][1][2][0][2][13] = 58,
- [1][1][2][0][1][13] = 52,
- [1][1][2][0][3][13] = 50,
- [1][1][2][0][5][13] = 52,
- [1][1][2][0][6][13] = 52,
- [1][1][2][0][9][13] = 52,
- [1][1][2][0][8][13] = 42,
- [1][1][2][0][11][13] = 40,
- [1][1][2][0][2][16] = 56,
- [1][1][2][0][1][16] = 52,
- [1][1][2][0][3][16] = 72,
- [1][1][2][0][5][16] = 56,
- [1][1][2][0][6][16] = 52,
- [1][1][2][0][9][16] = 52,
- [1][1][2][0][8][16] = 42,
- [1][1][2][0][11][16] = 40,
- [1][1][2][0][2][20] = 72,
- [1][1][2][0][1][20] = 52,
- [1][1][2][0][3][20] = 72,
- [1][1][2][0][5][20] = 72,
- [1][1][2][0][6][20] = 52,
- [1][1][2][0][9][20] = 52,
- [1][1][2][0][8][20] = 42,
- [1][1][2][0][11][20] = 40,
- [1][1][2][0][2][24] = 72,
- [1][1][2][0][1][24] = 52,
- [1][1][2][0][3][24] = 72,
- [1][1][2][0][5][24] = 127,
- [1][1][2][0][6][24] = 52,
- [1][1][2][0][9][24] = 127,
- [1][1][2][0][8][24] = 42,
- [1][1][2][0][11][24] = 40,
- [1][1][2][0][2][28] = 72,
- [1][1][2][0][1][28] = 52,
- [1][1][2][0][3][28] = 72,
- [1][1][2][0][5][28] = 127,
- [1][1][2][0][6][28] = 52,
- [1][1][2][0][9][28] = 127,
- [1][1][2][0][8][28] = 42,
- [1][1][2][0][11][28] = 40,
- [1][1][2][0][2][32] = 68,
- [1][1][2][0][1][32] = 52,
- [1][1][2][0][3][32] = 72,
- [1][1][2][0][5][32] = 68,
- [1][1][2][0][6][32] = 52,
- [1][1][2][0][9][32] = 52,
- [1][1][2][0][8][32] = 42,
- [1][1][2][0][11][32] = 40,
- [1][1][2][0][2][36] = 72,
- [1][1][2][0][1][36] = 127,
- [1][1][2][0][3][36] = 72,
- [1][1][2][0][5][36] = 72,
- [1][1][2][0][6][36] = 52,
- [1][1][2][0][9][36] = 72,
- [1][1][2][0][8][36] = 42,
- [1][1][2][0][11][36] = 127,
- [1][1][2][0][2][39] = 72,
- [1][1][2][0][1][39] = 16,
- [1][1][2][0][3][39] = 127,
- [1][1][2][0][5][39] = 72,
- [1][1][2][0][6][39] = 16,
- [1][1][2][0][9][39] = 72,
- [1][1][2][0][8][39] = 42,
- [1][1][2][0][11][39] = 40,
- [1][1][2][0][2][43] = 72,
- [1][1][2][0][1][43] = 16,
- [1][1][2][0][3][43] = 127,
- [1][1][2][0][5][43] = 72,
- [1][1][2][0][6][43] = 16,
- [1][1][2][0][9][43] = 72,
- [1][1][2][0][8][43] = 42,
- [1][1][2][0][11][43] = 40,
- [1][1][2][1][2][1] = 58,
- [1][1][2][1][1][1] = 40,
- [1][1][2][1][3][1] = 50,
- [1][1][2][1][5][1] = 40,
- [1][1][2][1][6][1] = 40,
- [1][1][2][1][9][1] = 40,
- [1][1][2][1][8][1] = 6,
- [1][1][2][1][11][1] = 28,
- [1][1][2][1][2][5] = 68,
- [1][1][2][1][1][5] = 40,
- [1][1][2][1][3][5] = 50,
- [1][1][2][1][5][5] = 40,
- [1][1][2][1][6][5] = 40,
- [1][1][2][1][9][5] = 40,
- [1][1][2][1][8][5] = 6,
- [1][1][2][1][11][5] = 28,
- [1][1][2][1][2][9] = 68,
- [1][1][2][1][1][9] = 40,
- [1][1][2][1][3][9] = 50,
- [1][1][2][1][5][9] = 40,
- [1][1][2][1][6][9] = 40,
- [1][1][2][1][9][9] = 40,
- [1][1][2][1][8][9] = 30,
- [1][1][2][1][11][9] = 28,
- [1][1][2][1][2][13] = 58,
- [1][1][2][1][1][13] = 40,
- [1][1][2][1][3][13] = 50,
- [1][1][2][1][5][13] = 40,
- [1][1][2][1][6][13] = 40,
- [1][1][2][1][9][13] = 40,
- [1][1][2][1][8][13] = 30,
- [1][1][2][1][11][13] = 28,
- [1][1][2][1][2][16] = 56,
- [1][1][2][1][1][16] = 40,
- [1][1][2][1][3][16] = 72,
- [1][1][2][1][5][16] = 56,
- [1][1][2][1][6][16] = 40,
- [1][1][2][1][9][16] = 40,
- [1][1][2][1][8][16] = 30,
- [1][1][2][1][11][16] = 28,
- [1][1][2][1][2][20] = 68,
- [1][1][2][1][1][20] = 40,
- [1][1][2][1][3][20] = 72,
- [1][1][2][1][5][20] = 68,
- [1][1][2][1][6][20] = 40,
- [1][1][2][1][9][20] = 40,
- [1][1][2][1][8][20] = 30,
- [1][1][2][1][11][20] = 28,
- [1][1][2][1][2][24] = 68,
- [1][1][2][1][1][24] = 40,
- [1][1][2][1][3][24] = 72,
- [1][1][2][1][5][24] = 127,
- [1][1][2][1][6][24] = 40,
- [1][1][2][1][9][24] = 127,
- [1][1][2][1][8][24] = 30,
- [1][1][2][1][11][24] = 28,
- [1][1][2][1][2][28] = 68,
- [1][1][2][1][1][28] = 40,
- [1][1][2][1][3][28] = 72,
- [1][1][2][1][5][28] = 127,
- [1][1][2][1][6][28] = 40,
- [1][1][2][1][9][28] = 127,
- [1][1][2][1][8][28] = 30,
- [1][1][2][1][11][28] = 28,
- [1][1][2][1][2][32] = 68,
- [1][1][2][1][1][32] = 40,
- [1][1][2][1][3][32] = 72,
- [1][1][2][1][5][32] = 68,
- [1][1][2][1][6][32] = 40,
- [1][1][2][1][9][32] = 40,
- [1][1][2][1][8][32] = 30,
- [1][1][2][1][11][32] = 28,
- [1][1][2][1][2][36] = 68,
- [1][1][2][1][1][36] = 127,
- [1][1][2][1][3][36] = 72,
- [1][1][2][1][5][36] = 68,
- [1][1][2][1][6][36] = 40,
- [1][1][2][1][9][36] = 68,
- [1][1][2][1][8][36] = 30,
- [1][1][2][1][11][36] = 127,
- [1][1][2][1][2][39] = 72,
- [1][1][2][1][1][39] = 4,
- [1][1][2][1][3][39] = 127,
- [1][1][2][1][5][39] = 72,
- [1][1][2][1][6][39] = 4,
- [1][1][2][1][9][39] = 72,
- [1][1][2][1][8][39] = 30,
- [1][1][2][1][11][39] = 28,
- [1][1][2][1][2][43] = 72,
- [1][1][2][1][1][43] = 4,
- [1][1][2][1][3][43] = 127,
- [1][1][2][1][5][43] = 72,
- [1][1][2][1][6][43] = 4,
- [1][1][2][1][9][43] = 72,
- [1][1][2][1][8][43] = 30,
- [1][1][2][1][11][43] = 28,
- [2][0][2][0][2][3] = 64,
- [2][0][2][0][1][3] = 64,
- [2][0][2][0][3][3] = 64,
- [2][0][2][0][5][3] = 62,
- [2][0][2][0][6][3] = 64,
- [2][0][2][0][9][3] = 64,
- [2][0][2][0][8][3] = 30,
- [2][0][2][0][11][3] = 52,
- [2][0][2][0][2][11] = 64,
- [2][0][2][0][1][11] = 64,
- [2][0][2][0][3][11] = 64,
- [2][0][2][0][5][11] = 62,
- [2][0][2][0][6][11] = 64,
- [2][0][2][0][9][11] = 64,
- [2][0][2][0][8][11] = 54,
- [2][0][2][0][11][11] = 52,
- [2][0][2][0][2][18] = 62,
- [2][0][2][0][1][18] = 64,
- [2][0][2][0][3][18] = 72,
- [2][0][2][0][5][18] = 66,
- [2][0][2][0][6][18] = 64,
- [2][0][2][0][9][18] = 64,
- [2][0][2][0][8][18] = 54,
- [2][0][2][0][11][18] = 52,
- [2][0][2][0][2][26] = 72,
- [2][0][2][0][1][26] = 64,
- [2][0][2][0][3][26] = 72,
- [2][0][2][0][5][26] = 127,
- [2][0][2][0][6][26] = 64,
- [2][0][2][0][9][26] = 127,
- [2][0][2][0][8][26] = 54,
- [2][0][2][0][11][26] = 52,
- [2][0][2][0][2][34] = 72,
- [2][0][2][0][1][34] = 127,
- [2][0][2][0][3][34] = 72,
- [2][0][2][0][5][34] = 72,
- [2][0][2][0][6][34] = 64,
- [2][0][2][0][9][34] = 72,
- [2][0][2][0][8][34] = 54,
- [2][0][2][0][11][34] = 127,
- [2][0][2][0][2][41] = 72,
- [2][0][2][0][1][41] = 28,
- [2][0][2][0][3][41] = 127,
- [2][0][2][0][5][41] = 72,
- [2][0][2][0][6][41] = 28,
- [2][0][2][0][9][41] = 72,
- [2][0][2][0][8][41] = 54,
- [2][0][2][0][11][41] = 52,
- [2][1][2][0][2][3] = 56,
- [2][1][2][0][1][3] = 52,
- [2][1][2][0][3][3] = 52,
- [2][1][2][0][5][3] = 52,
- [2][1][2][0][6][3] = 52,
- [2][1][2][0][9][3] = 52,
- [2][1][2][0][8][3] = 18,
- [2][1][2][0][11][3] = 40,
- [2][1][2][0][2][11] = 56,
- [2][1][2][0][1][11] = 52,
- [2][1][2][0][3][11] = 52,
- [2][1][2][0][5][11] = 52,
- [2][1][2][0][6][11] = 52,
- [2][1][2][0][9][11] = 52,
- [2][1][2][0][8][11] = 42,
- [2][1][2][0][11][11] = 40,
- [2][1][2][0][2][18] = 56,
- [2][1][2][0][1][18] = 52,
- [2][1][2][0][3][18] = 72,
- [2][1][2][0][5][18] = 56,
- [2][1][2][0][6][18] = 52,
- [2][1][2][0][9][18] = 52,
- [2][1][2][0][8][18] = 42,
- [2][1][2][0][11][18] = 40,
- [2][1][2][0][2][26] = 72,
- [2][1][2][0][1][26] = 52,
- [2][1][2][0][3][26] = 72,
- [2][1][2][0][5][26] = 127,
- [2][1][2][0][6][26] = 52,
- [2][1][2][0][9][26] = 127,
- [2][1][2][0][8][26] = 42,
- [2][1][2][0][11][26] = 40,
- [2][1][2][0][2][34] = 72,
- [2][1][2][0][1][34] = 127,
- [2][1][2][0][3][34] = 72,
- [2][1][2][0][5][34] = 72,
- [2][1][2][0][6][34] = 52,
- [2][1][2][0][9][34] = 72,
- [2][1][2][0][8][34] = 42,
- [2][1][2][0][11][34] = 127,
- [2][1][2][0][2][41] = 72,
- [2][1][2][0][1][41] = 16,
- [2][1][2][0][3][41] = 127,
- [2][1][2][0][5][41] = 72,
- [2][1][2][0][6][41] = 16,
- [2][1][2][0][9][41] = 72,
- [2][1][2][0][8][41] = 42,
- [2][1][2][0][11][41] = 40,
- [2][1][2][1][2][3] = 56,
- [2][1][2][1][1][3] = 40,
- [2][1][2][1][3][3] = 52,
- [2][1][2][1][5][3] = 40,
- [2][1][2][1][6][3] = 40,
- [2][1][2][1][9][3] = 40,
- [2][1][2][1][8][3] = 6,
- [2][1][2][1][11][3] = 28,
- [2][1][2][1][2][11] = 56,
- [2][1][2][1][1][11] = 40,
- [2][1][2][1][3][11] = 52,
- [2][1][2][1][5][11] = 40,
- [2][1][2][1][6][11] = 40,
- [2][1][2][1][9][11] = 40,
- [2][1][2][1][8][11] = 30,
- [2][1][2][1][11][11] = 28,
- [2][1][2][1][2][18] = 56,
- [2][1][2][1][1][18] = 40,
- [2][1][2][1][3][18] = 72,
- [2][1][2][1][5][18] = 56,
- [2][1][2][1][6][18] = 40,
- [2][1][2][1][9][18] = 40,
- [2][1][2][1][8][18] = 30,
- [2][1][2][1][11][18] = 28,
- [2][1][2][1][2][26] = 68,
- [2][1][2][1][1][26] = 40,
- [2][1][2][1][3][26] = 72,
- [2][1][2][1][5][26] = 127,
- [2][1][2][1][6][26] = 40,
- [2][1][2][1][9][26] = 127,
- [2][1][2][1][8][26] = 30,
- [2][1][2][1][11][26] = 28,
- [2][1][2][1][2][34] = 68,
- [2][1][2][1][1][34] = 127,
- [2][1][2][1][3][34] = 72,
- [2][1][2][1][5][34] = 68,
- [2][1][2][1][6][34] = 40,
- [2][1][2][1][9][34] = 68,
- [2][1][2][1][8][34] = 30,
- [2][1][2][1][11][34] = 127,
- [2][1][2][1][2][41] = 72,
- [2][1][2][1][1][41] = 4,
- [2][1][2][1][3][41] = 127,
- [2][1][2][1][5][41] = 72,
- [2][1][2][1][6][41] = 4,
- [2][1][2][1][9][41] = 72,
- [2][1][2][1][8][41] = 30,
- [2][1][2][1][11][41] = 28,
+ [0][0][1][0][RTW89_WW][0] = 30,
+ [0][0][1][0][RTW89_WW][2] = 30,
+ [0][0][1][0][RTW89_WW][4] = 30,
+ [0][0][1][0][RTW89_WW][6] = 30,
+ [0][0][1][0][RTW89_WW][8] = 52,
+ [0][0][1][0][RTW89_WW][10] = 52,
+ [0][0][1][0][RTW89_WW][12] = 52,
+ [0][0][1][0][RTW89_WW][14] = 52,
+ [0][0][1][0][RTW89_WW][15] = 52,
+ [0][0][1][0][RTW89_WW][17] = 52,
+ [0][0][1][0][RTW89_WW][19] = 52,
+ [0][0][1][0][RTW89_WW][21] = 52,
+ [0][0][1][0][RTW89_WW][23] = 52,
+ [0][0][1][0][RTW89_WW][25] = 52,
+ [0][0][1][0][RTW89_WW][27] = 52,
+ [0][0][1][0][RTW89_WW][29] = 52,
+ [0][0][1][0][RTW89_WW][31] = 52,
+ [0][0][1][0][RTW89_WW][33] = 52,
+ [0][0][1][0][RTW89_WW][35] = 52,
+ [0][0][1][0][RTW89_WW][37] = 54,
+ [0][0][1][0][RTW89_WW][38] = 28,
+ [0][0][1][0][RTW89_WW][40] = 28,
+ [0][0][1][0][RTW89_WW][42] = 28,
+ [0][0][1][0][RTW89_WW][44] = 28,
+ [0][0][1][0][RTW89_WW][46] = 28,
+ [0][1][1][0][RTW89_WW][0] = 18,
+ [0][1][1][0][RTW89_WW][2] = 18,
+ [0][1][1][0][RTW89_WW][4] = 18,
+ [0][1][1][0][RTW89_WW][6] = 18,
+ [0][1][1][0][RTW89_WW][8] = 40,
+ [0][1][1][0][RTW89_WW][10] = 40,
+ [0][1][1][0][RTW89_WW][12] = 40,
+ [0][1][1][0][RTW89_WW][14] = 40,
+ [0][1][1][0][RTW89_WW][15] = 40,
+ [0][1][1][0][RTW89_WW][17] = 40,
+ [0][1][1][0][RTW89_WW][19] = 40,
+ [0][1][1][0][RTW89_WW][21] = 40,
+ [0][1][1][0][RTW89_WW][23] = 40,
+ [0][1][1][0][RTW89_WW][25] = 40,
+ [0][1][1][0][RTW89_WW][27] = 40,
+ [0][1][1][0][RTW89_WW][29] = 40,
+ [0][1][1][0][RTW89_WW][31] = 40,
+ [0][1][1][0][RTW89_WW][33] = 40,
+ [0][1][1][0][RTW89_WW][35] = 40,
+ [0][1][1][0][RTW89_WW][37] = 42,
+ [0][1][1][0][RTW89_WW][38] = 16,
+ [0][1][1][0][RTW89_WW][40] = 16,
+ [0][1][1][0][RTW89_WW][42] = 16,
+ [0][1][1][0][RTW89_WW][44] = 16,
+ [0][1][1][0][RTW89_WW][46] = 16,
+ [0][0][2][0][RTW89_WW][0] = 30,
+ [0][0][2][0][RTW89_WW][2] = 30,
+ [0][0][2][0][RTW89_WW][4] = 30,
+ [0][0][2][0][RTW89_WW][6] = 30,
+ [0][0][2][0][RTW89_WW][8] = 52,
+ [0][0][2][0][RTW89_WW][10] = 52,
+ [0][0][2][0][RTW89_WW][12] = 52,
+ [0][0][2][0][RTW89_WW][14] = 52,
+ [0][0][2][0][RTW89_WW][15] = 52,
+ [0][0][2][0][RTW89_WW][17] = 52,
+ [0][0][2][0][RTW89_WW][19] = 52,
+ [0][0][2][0][RTW89_WW][21] = 52,
+ [0][0][2][0][RTW89_WW][23] = 52,
+ [0][0][2][0][RTW89_WW][25] = 52,
+ [0][0][2][0][RTW89_WW][27] = 52,
+ [0][0][2][0][RTW89_WW][29] = 52,
+ [0][0][2][0][RTW89_WW][31] = 52,
+ [0][0][2][0][RTW89_WW][33] = 52,
+ [0][0][2][0][RTW89_WW][35] = 52,
+ [0][0][2][0][RTW89_WW][37] = 54,
+ [0][0][2][0][RTW89_WW][38] = 28,
+ [0][0][2][0][RTW89_WW][40] = 28,
+ [0][0][2][0][RTW89_WW][42] = 28,
+ [0][0][2][0][RTW89_WW][44] = 28,
+ [0][0][2][0][RTW89_WW][46] = 28,
+ [0][1][2][0][RTW89_WW][0] = 18,
+ [0][1][2][0][RTW89_WW][2] = 18,
+ [0][1][2][0][RTW89_WW][4] = 18,
+ [0][1][2][0][RTW89_WW][6] = 18,
+ [0][1][2][0][RTW89_WW][8] = 40,
+ [0][1][2][0][RTW89_WW][10] = 40,
+ [0][1][2][0][RTW89_WW][12] = 40,
+ [0][1][2][0][RTW89_WW][14] = 40,
+ [0][1][2][0][RTW89_WW][15] = 40,
+ [0][1][2][0][RTW89_WW][17] = 40,
+ [0][1][2][0][RTW89_WW][19] = 40,
+ [0][1][2][0][RTW89_WW][21] = 40,
+ [0][1][2][0][RTW89_WW][23] = 40,
+ [0][1][2][0][RTW89_WW][25] = 40,
+ [0][1][2][0][RTW89_WW][27] = 40,
+ [0][1][2][0][RTW89_WW][29] = 40,
+ [0][1][2][0][RTW89_WW][31] = 40,
+ [0][1][2][0][RTW89_WW][33] = 40,
+ [0][1][2][0][RTW89_WW][35] = 40,
+ [0][1][2][0][RTW89_WW][37] = 42,
+ [0][1][2][0][RTW89_WW][38] = 16,
+ [0][1][2][0][RTW89_WW][40] = 16,
+ [0][1][2][0][RTW89_WW][42] = 16,
+ [0][1][2][0][RTW89_WW][44] = 16,
+ [0][1][2][0][RTW89_WW][46] = 16,
+ [0][1][2][1][RTW89_WW][0] = 6,
+ [0][1][2][1][RTW89_WW][2] = 6,
+ [0][1][2][1][RTW89_WW][4] = 6,
+ [0][1][2][1][RTW89_WW][6] = 6,
+ [0][1][2][1][RTW89_WW][8] = 28,
+ [0][1][2][1][RTW89_WW][10] = 28,
+ [0][1][2][1][RTW89_WW][12] = 28,
+ [0][1][2][1][RTW89_WW][14] = 28,
+ [0][1][2][1][RTW89_WW][15] = 28,
+ [0][1][2][1][RTW89_WW][17] = 28,
+ [0][1][2][1][RTW89_WW][19] = 28,
+ [0][1][2][1][RTW89_WW][21] = 28,
+ [0][1][2][1][RTW89_WW][23] = 28,
+ [0][1][2][1][RTW89_WW][25] = 28,
+ [0][1][2][1][RTW89_WW][27] = 28,
+ [0][1][2][1][RTW89_WW][29] = 28,
+ [0][1][2][1][RTW89_WW][31] = 28,
+ [0][1][2][1][RTW89_WW][33] = 28,
+ [0][1][2][1][RTW89_WW][35] = 28,
+ [0][1][2][1][RTW89_WW][37] = 30,
+ [0][1][2][1][RTW89_WW][38] = 4,
+ [0][1][2][1][RTW89_WW][40] = 4,
+ [0][1][2][1][RTW89_WW][42] = 4,
+ [0][1][2][1][RTW89_WW][44] = 4,
+ [0][1][2][1][RTW89_WW][46] = 4,
+ [1][0][2][0][RTW89_WW][1] = 30,
+ [1][0][2][0][RTW89_WW][5] = 30,
+ [1][0][2][0][RTW89_WW][9] = 52,
+ [1][0][2][0][RTW89_WW][13] = 52,
+ [1][0][2][0][RTW89_WW][16] = 52,
+ [1][0][2][0][RTW89_WW][20] = 52,
+ [1][0][2][0][RTW89_WW][24] = 52,
+ [1][0][2][0][RTW89_WW][28] = 52,
+ [1][0][2][0][RTW89_WW][32] = 52,
+ [1][0][2][0][RTW89_WW][36] = 54,
+ [1][0][2][0][RTW89_WW][39] = 28,
+ [1][0][2][0][RTW89_WW][43] = 28,
+ [1][1][2][0][RTW89_WW][1] = 18,
+ [1][1][2][0][RTW89_WW][5] = 18,
+ [1][1][2][0][RTW89_WW][9] = 40,
+ [1][1][2][0][RTW89_WW][13] = 40,
+ [1][1][2][0][RTW89_WW][16] = 40,
+ [1][1][2][0][RTW89_WW][20] = 40,
+ [1][1][2][0][RTW89_WW][24] = 40,
+ [1][1][2][0][RTW89_WW][28] = 40,
+ [1][1][2][0][RTW89_WW][32] = 40,
+ [1][1][2][0][RTW89_WW][36] = 42,
+ [1][1][2][0][RTW89_WW][39] = 16,
+ [1][1][2][0][RTW89_WW][43] = 16,
+ [1][1][2][1][RTW89_WW][1] = 6,
+ [1][1][2][1][RTW89_WW][5] = 6,
+ [1][1][2][1][RTW89_WW][9] = 28,
+ [1][1][2][1][RTW89_WW][13] = 28,
+ [1][1][2][1][RTW89_WW][16] = 28,
+ [1][1][2][1][RTW89_WW][20] = 28,
+ [1][1][2][1][RTW89_WW][24] = 28,
+ [1][1][2][1][RTW89_WW][28] = 28,
+ [1][1][2][1][RTW89_WW][32] = 28,
+ [1][1][2][1][RTW89_WW][36] = 30,
+ [1][1][2][1][RTW89_WW][39] = 4,
+ [1][1][2][1][RTW89_WW][43] = 4,
+ [2][0][2][0][RTW89_WW][3] = 30,
+ [2][0][2][0][RTW89_WW][11] = 52,
+ [2][0][2][0][RTW89_WW][18] = 52,
+ [2][0][2][0][RTW89_WW][26] = 52,
+ [2][0][2][0][RTW89_WW][34] = 54,
+ [2][0][2][0][RTW89_WW][41] = 28,
+ [2][1][2][0][RTW89_WW][3] = 18,
+ [2][1][2][0][RTW89_WW][11] = 40,
+ [2][1][2][0][RTW89_WW][18] = 40,
+ [2][1][2][0][RTW89_WW][26] = 40,
+ [2][1][2][0][RTW89_WW][34] = 42,
+ [2][1][2][0][RTW89_WW][41] = 16,
+ [2][1][2][1][RTW89_WW][3] = 6,
+ [2][1][2][1][RTW89_WW][11] = 28,
+ [2][1][2][1][RTW89_WW][18] = 28,
+ [2][1][2][1][RTW89_WW][26] = 28,
+ [2][1][2][1][RTW89_WW][34] = 30,
+ [2][1][2][1][RTW89_WW][41] = 4,
+ [0][0][1][0][RTW89_FCC][0] = 76,
+ [0][0][1][0][RTW89_ETSI][0] = 58,
+ [0][0][1][0][RTW89_MKK][0] = 62,
+ [0][0][1][0][RTW89_IC][0] = 62,
+ [0][0][1][0][RTW89_KCC][0] = 76,
+ [0][0][1][0][RTW89_ACMA][0] = 58,
+ [0][0][1][0][RTW89_CHILE][0] = 30,
+ [0][0][1][0][RTW89_UKRAINE][0] = 52,
+ [0][0][1][0][RTW89_MEXICO][0] = 62,
+ [0][0][1][0][RTW89_CN][0] = 58,
+ [0][0][1][0][RTW89_QATAR][0] = 58,
+ [0][0][1][0][RTW89_FCC][2] = 76,
+ [0][0][1][0][RTW89_ETSI][2] = 58,
+ [0][0][1][0][RTW89_MKK][2] = 62,
+ [0][0][1][0][RTW89_IC][2] = 62,
+ [0][0][1][0][RTW89_KCC][2] = 76,
+ [0][0][1][0][RTW89_ACMA][2] = 58,
+ [0][0][1][0][RTW89_CHILE][2] = 30,
+ [0][0][1][0][RTW89_UKRAINE][2] = 52,
+ [0][0][1][0][RTW89_MEXICO][2] = 62,
+ [0][0][1][0][RTW89_CN][2] = 58,
+ [0][0][1][0][RTW89_QATAR][2] = 58,
+ [0][0][1][0][RTW89_FCC][4] = 76,
+ [0][0][1][0][RTW89_ETSI][4] = 58,
+ [0][0][1][0][RTW89_MKK][4] = 62,
+ [0][0][1][0][RTW89_IC][4] = 62,
+ [0][0][1][0][RTW89_KCC][4] = 76,
+ [0][0][1][0][RTW89_ACMA][4] = 58,
+ [0][0][1][0][RTW89_CHILE][4] = 30,
+ [0][0][1][0][RTW89_UKRAINE][4] = 52,
+ [0][0][1][0][RTW89_MEXICO][4] = 62,
+ [0][0][1][0][RTW89_CN][4] = 58,
+ [0][0][1][0][RTW89_QATAR][4] = 58,
+ [0][0][1][0][RTW89_FCC][6] = 76,
+ [0][0][1][0][RTW89_ETSI][6] = 58,
+ [0][0][1][0][RTW89_MKK][6] = 62,
+ [0][0][1][0][RTW89_IC][6] = 62,
+ [0][0][1][0][RTW89_KCC][6] = 58,
+ [0][0][1][0][RTW89_ACMA][6] = 58,
+ [0][0][1][0][RTW89_CHILE][6] = 30,
+ [0][0][1][0][RTW89_UKRAINE][6] = 52,
+ [0][0][1][0][RTW89_MEXICO][6] = 62,
+ [0][0][1][0][RTW89_CN][6] = 58,
+ [0][0][1][0][RTW89_QATAR][6] = 58,
+ [0][0][1][0][RTW89_FCC][8] = 76,
+ [0][0][1][0][RTW89_ETSI][8] = 58,
+ [0][0][1][0][RTW89_MKK][8] = 62,
+ [0][0][1][0][RTW89_IC][8] = 64,
+ [0][0][1][0][RTW89_KCC][8] = 76,
+ [0][0][1][0][RTW89_ACMA][8] = 58,
+ [0][0][1][0][RTW89_CHILE][8] = 54,
+ [0][0][1][0][RTW89_UKRAINE][8] = 52,
+ [0][0][1][0][RTW89_MEXICO][8] = 76,
+ [0][0][1][0][RTW89_CN][8] = 58,
+ [0][0][1][0][RTW89_QATAR][8] = 58,
+ [0][0][1][0][RTW89_FCC][10] = 76,
+ [0][0][1][0][RTW89_ETSI][10] = 58,
+ [0][0][1][0][RTW89_MKK][10] = 62,
+ [0][0][1][0][RTW89_IC][10] = 64,
+ [0][0][1][0][RTW89_KCC][10] = 76,
+ [0][0][1][0][RTW89_ACMA][10] = 58,
+ [0][0][1][0][RTW89_CHILE][10] = 54,
+ [0][0][1][0][RTW89_UKRAINE][10] = 52,
+ [0][0][1][0][RTW89_MEXICO][10] = 76,
+ [0][0][1][0][RTW89_CN][10] = 58,
+ [0][0][1][0][RTW89_QATAR][10] = 58,
+ [0][0][1][0][RTW89_FCC][12] = 76,
+ [0][0][1][0][RTW89_ETSI][12] = 58,
+ [0][0][1][0][RTW89_MKK][12] = 62,
+ [0][0][1][0][RTW89_IC][12] = 64,
+ [0][0][1][0][RTW89_KCC][12] = 76,
+ [0][0][1][0][RTW89_ACMA][12] = 58,
+ [0][0][1][0][RTW89_CHILE][12] = 54,
+ [0][0][1][0][RTW89_UKRAINE][12] = 52,
+ [0][0][1][0][RTW89_MEXICO][12] = 76,
+ [0][0][1][0][RTW89_CN][12] = 58,
+ [0][0][1][0][RTW89_QATAR][12] = 58,
+ [0][0][1][0][RTW89_FCC][14] = 76,
+ [0][0][1][0][RTW89_ETSI][14] = 58,
+ [0][0][1][0][RTW89_MKK][14] = 62,
+ [0][0][1][0][RTW89_IC][14] = 64,
+ [0][0][1][0][RTW89_KCC][14] = 76,
+ [0][0][1][0][RTW89_ACMA][14] = 58,
+ [0][0][1][0][RTW89_CHILE][14] = 54,
+ [0][0][1][0][RTW89_UKRAINE][14] = 52,
+ [0][0][1][0][RTW89_MEXICO][14] = 76,
+ [0][0][1][0][RTW89_CN][14] = 58,
+ [0][0][1][0][RTW89_QATAR][14] = 58,
+ [0][0][1][0][RTW89_FCC][15] = 76,
+ [0][0][1][0][RTW89_ETSI][15] = 58,
+ [0][0][1][0][RTW89_MKK][15] = 76,
+ [0][0][1][0][RTW89_IC][15] = 76,
+ [0][0][1][0][RTW89_KCC][15] = 76,
+ [0][0][1][0][RTW89_ACMA][15] = 58,
+ [0][0][1][0][RTW89_CHILE][15] = 54,
+ [0][0][1][0][RTW89_UKRAINE][15] = 52,
+ [0][0][1][0][RTW89_MEXICO][15] = 76,
+ [0][0][1][0][RTW89_CN][15] = 127,
+ [0][0][1][0][RTW89_QATAR][15] = 52,
+ [0][0][1][0][RTW89_FCC][17] = 76,
+ [0][0][1][0][RTW89_ETSI][17] = 58,
+ [0][0][1][0][RTW89_MKK][17] = 76,
+ [0][0][1][0][RTW89_IC][17] = 76,
+ [0][0][1][0][RTW89_KCC][17] = 76,
+ [0][0][1][0][RTW89_ACMA][17] = 58,
+ [0][0][1][0][RTW89_CHILE][17] = 54,
+ [0][0][1][0][RTW89_UKRAINE][17] = 52,
+ [0][0][1][0][RTW89_MEXICO][17] = 76,
+ [0][0][1][0][RTW89_CN][17] = 127,
+ [0][0][1][0][RTW89_QATAR][17] = 52,
+ [0][0][1][0][RTW89_FCC][19] = 76,
+ [0][0][1][0][RTW89_ETSI][19] = 58,
+ [0][0][1][0][RTW89_MKK][19] = 76,
+ [0][0][1][0][RTW89_IC][19] = 76,
+ [0][0][1][0][RTW89_KCC][19] = 76,
+ [0][0][1][0][RTW89_ACMA][19] = 58,
+ [0][0][1][0][RTW89_CHILE][19] = 54,
+ [0][0][1][0][RTW89_UKRAINE][19] = 52,
+ [0][0][1][0][RTW89_MEXICO][19] = 76,
+ [0][0][1][0][RTW89_CN][19] = 127,
+ [0][0][1][0][RTW89_QATAR][19] = 52,
+ [0][0][1][0][RTW89_FCC][21] = 76,
+ [0][0][1][0][RTW89_ETSI][21] = 58,
+ [0][0][1][0][RTW89_MKK][21] = 76,
+ [0][0][1][0][RTW89_IC][21] = 76,
+ [0][0][1][0][RTW89_KCC][21] = 76,
+ [0][0][1][0][RTW89_ACMA][21] = 58,
+ [0][0][1][0][RTW89_CHILE][21] = 54,
+ [0][0][1][0][RTW89_UKRAINE][21] = 52,
+ [0][0][1][0][RTW89_MEXICO][21] = 76,
+ [0][0][1][0][RTW89_CN][21] = 127,
+ [0][0][1][0][RTW89_QATAR][21] = 52,
+ [0][0][1][0][RTW89_FCC][23] = 76,
+ [0][0][1][0][RTW89_ETSI][23] = 58,
+ [0][0][1][0][RTW89_MKK][23] = 76,
+ [0][0][1][0][RTW89_IC][23] = 76,
+ [0][0][1][0][RTW89_KCC][23] = 76,
+ [0][0][1][0][RTW89_ACMA][23] = 58,
+ [0][0][1][0][RTW89_CHILE][23] = 54,
+ [0][0][1][0][RTW89_UKRAINE][23] = 52,
+ [0][0][1][0][RTW89_MEXICO][23] = 76,
+ [0][0][1][0][RTW89_CN][23] = 127,
+ [0][0][1][0][RTW89_QATAR][23] = 52,
+ [0][0][1][0][RTW89_FCC][25] = 76,
+ [0][0][1][0][RTW89_ETSI][25] = 58,
+ [0][0][1][0][RTW89_MKK][25] = 76,
+ [0][0][1][0][RTW89_IC][25] = 127,
+ [0][0][1][0][RTW89_KCC][25] = 76,
+ [0][0][1][0][RTW89_ACMA][25] = 127,
+ [0][0][1][0][RTW89_CHILE][25] = 54,
+ [0][0][1][0][RTW89_UKRAINE][25] = 52,
+ [0][0][1][0][RTW89_MEXICO][25] = 76,
+ [0][0][1][0][RTW89_CN][25] = 127,
+ [0][0][1][0][RTW89_QATAR][25] = 52,
+ [0][0][1][0][RTW89_FCC][27] = 76,
+ [0][0][1][0][RTW89_ETSI][27] = 58,
+ [0][0][1][0][RTW89_MKK][27] = 76,
+ [0][0][1][0][RTW89_IC][27] = 127,
+ [0][0][1][0][RTW89_KCC][27] = 76,
+ [0][0][1][0][RTW89_ACMA][27] = 127,
+ [0][0][1][0][RTW89_CHILE][27] = 54,
+ [0][0][1][0][RTW89_UKRAINE][27] = 52,
+ [0][0][1][0][RTW89_MEXICO][27] = 76,
+ [0][0][1][0][RTW89_CN][27] = 127,
+ [0][0][1][0][RTW89_QATAR][27] = 52,
+ [0][0][1][0][RTW89_FCC][29] = 76,
+ [0][0][1][0][RTW89_ETSI][29] = 58,
+ [0][0][1][0][RTW89_MKK][29] = 76,
+ [0][0][1][0][RTW89_IC][29] = 127,
+ [0][0][1][0][RTW89_KCC][29] = 76,
+ [0][0][1][0][RTW89_ACMA][29] = 127,
+ [0][0][1][0][RTW89_CHILE][29] = 54,
+ [0][0][1][0][RTW89_UKRAINE][29] = 52,
+ [0][0][1][0][RTW89_MEXICO][29] = 76,
+ [0][0][1][0][RTW89_CN][29] = 127,
+ [0][0][1][0][RTW89_QATAR][29] = 52,
+ [0][0][1][0][RTW89_FCC][31] = 76,
+ [0][0][1][0][RTW89_ETSI][31] = 58,
+ [0][0][1][0][RTW89_MKK][31] = 76,
+ [0][0][1][0][RTW89_IC][31] = 76,
+ [0][0][1][0][RTW89_KCC][31] = 76,
+ [0][0][1][0][RTW89_ACMA][31] = 58,
+ [0][0][1][0][RTW89_CHILE][31] = 54,
+ [0][0][1][0][RTW89_UKRAINE][31] = 52,
+ [0][0][1][0][RTW89_MEXICO][31] = 76,
+ [0][0][1][0][RTW89_CN][31] = 127,
+ [0][0][1][0][RTW89_QATAR][31] = 52,
+ [0][0][1][0][RTW89_FCC][33] = 76,
+ [0][0][1][0][RTW89_ETSI][33] = 58,
+ [0][0][1][0][RTW89_MKK][33] = 76,
+ [0][0][1][0][RTW89_IC][33] = 76,
+ [0][0][1][0][RTW89_KCC][33] = 76,
+ [0][0][1][0][RTW89_ACMA][33] = 58,
+ [0][0][1][0][RTW89_CHILE][33] = 54,
+ [0][0][1][0][RTW89_UKRAINE][33] = 52,
+ [0][0][1][0][RTW89_MEXICO][33] = 76,
+ [0][0][1][0][RTW89_CN][33] = 127,
+ [0][0][1][0][RTW89_QATAR][33] = 52,
+ [0][0][1][0][RTW89_FCC][35] = 74,
+ [0][0][1][0][RTW89_ETSI][35] = 58,
+ [0][0][1][0][RTW89_MKK][35] = 76,
+ [0][0][1][0][RTW89_IC][35] = 74,
+ [0][0][1][0][RTW89_KCC][35] = 76,
+ [0][0][1][0][RTW89_ACMA][35] = 58,
+ [0][0][1][0][RTW89_CHILE][35] = 54,
+ [0][0][1][0][RTW89_UKRAINE][35] = 52,
+ [0][0][1][0][RTW89_MEXICO][35] = 74,
+ [0][0][1][0][RTW89_CN][35] = 127,
+ [0][0][1][0][RTW89_QATAR][35] = 52,
+ [0][0][1][0][RTW89_FCC][37] = 76,
+ [0][0][1][0][RTW89_ETSI][37] = 127,
+ [0][0][1][0][RTW89_MKK][37] = 76,
+ [0][0][1][0][RTW89_IC][37] = 76,
+ [0][0][1][0][RTW89_KCC][37] = 76,
+ [0][0][1][0][RTW89_ACMA][37] = 76,
+ [0][0][1][0][RTW89_CHILE][37] = 54,
+ [0][0][1][0][RTW89_UKRAINE][37] = 127,
+ [0][0][1][0][RTW89_MEXICO][37] = 76,
+ [0][0][1][0][RTW89_CN][37] = 127,
+ [0][0][1][0][RTW89_QATAR][37] = 127,
+ [0][0][1][0][RTW89_FCC][38] = 76,
+ [0][0][1][0][RTW89_ETSI][38] = 28,
+ [0][0][1][0][RTW89_MKK][38] = 127,
+ [0][0][1][0][RTW89_IC][38] = 76,
+ [0][0][1][0][RTW89_KCC][38] = 76,
+ [0][0][1][0][RTW89_ACMA][38] = 76,
+ [0][0][1][0][RTW89_CHILE][38] = 54,
+ [0][0][1][0][RTW89_UKRAINE][38] = 28,
+ [0][0][1][0][RTW89_MEXICO][38] = 76,
+ [0][0][1][0][RTW89_CN][38] = 72,
+ [0][0][1][0][RTW89_QATAR][38] = 28,
+ [0][0][1][0][RTW89_FCC][40] = 76,
+ [0][0][1][0][RTW89_ETSI][40] = 28,
+ [0][0][1][0][RTW89_MKK][40] = 127,
+ [0][0][1][0][RTW89_IC][40] = 76,
+ [0][0][1][0][RTW89_KCC][40] = 76,
+ [0][0][1][0][RTW89_ACMA][40] = 76,
+ [0][0][1][0][RTW89_CHILE][40] = 54,
+ [0][0][1][0][RTW89_UKRAINE][40] = 28,
+ [0][0][1][0][RTW89_MEXICO][40] = 76,
+ [0][0][1][0][RTW89_CN][40] = 76,
+ [0][0][1][0][RTW89_QATAR][40] = 28,
+ [0][0][1][0][RTW89_FCC][42] = 76,
+ [0][0][1][0][RTW89_ETSI][42] = 28,
+ [0][0][1][0][RTW89_MKK][42] = 127,
+ [0][0][1][0][RTW89_IC][42] = 76,
+ [0][0][1][0][RTW89_KCC][42] = 76,
+ [0][0][1][0][RTW89_ACMA][42] = 76,
+ [0][0][1][0][RTW89_CHILE][42] = 54,
+ [0][0][1][0][RTW89_UKRAINE][42] = 28,
+ [0][0][1][0][RTW89_MEXICO][42] = 76,
+ [0][0][1][0][RTW89_CN][42] = 76,
+ [0][0][1][0][RTW89_QATAR][42] = 28,
+ [0][0][1][0][RTW89_FCC][44] = 76,
+ [0][0][1][0][RTW89_ETSI][44] = 28,
+ [0][0][1][0][RTW89_MKK][44] = 127,
+ [0][0][1][0][RTW89_IC][44] = 76,
+ [0][0][1][0][RTW89_KCC][44] = 76,
+ [0][0][1][0][RTW89_ACMA][44] = 76,
+ [0][0][1][0][RTW89_CHILE][44] = 54,
+ [0][0][1][0][RTW89_UKRAINE][44] = 28,
+ [0][0][1][0][RTW89_MEXICO][44] = 76,
+ [0][0][1][0][RTW89_CN][44] = 76,
+ [0][0][1][0][RTW89_QATAR][44] = 28,
+ [0][0][1][0][RTW89_FCC][46] = 76,
+ [0][0][1][0][RTW89_ETSI][46] = 28,
+ [0][0][1][0][RTW89_MKK][46] = 127,
+ [0][0][1][0][RTW89_IC][46] = 76,
+ [0][0][1][0][RTW89_KCC][46] = 76,
+ [0][0][1][0][RTW89_ACMA][46] = 76,
+ [0][0][1][0][RTW89_CHILE][46] = 54,
+ [0][0][1][0][RTW89_UKRAINE][46] = 28,
+ [0][0][1][0][RTW89_MEXICO][46] = 76,
+ [0][0][1][0][RTW89_CN][46] = 76,
+ [0][0][1][0][RTW89_QATAR][46] = 28,
+ [0][1][1][0][RTW89_FCC][0] = 68,
+ [0][1][1][0][RTW89_ETSI][0] = 46,
+ [0][1][1][0][RTW89_MKK][0] = 50,
+ [0][1][1][0][RTW89_IC][0] = 40,
+ [0][1][1][0][RTW89_KCC][0] = 72,
+ [0][1][1][0][RTW89_ACMA][0] = 46,
+ [0][1][1][0][RTW89_CHILE][0] = 18,
+ [0][1][1][0][RTW89_UKRAINE][0] = 40,
+ [0][1][1][0][RTW89_MEXICO][0] = 50,
+ [0][1][1][0][RTW89_CN][0] = 46,
+ [0][1][1][0][RTW89_QATAR][0] = 46,
+ [0][1][1][0][RTW89_FCC][2] = 68,
+ [0][1][1][0][RTW89_ETSI][2] = 46,
+ [0][1][1][0][RTW89_MKK][2] = 50,
+ [0][1][1][0][RTW89_IC][2] = 40,
+ [0][1][1][0][RTW89_KCC][2] = 72,
+ [0][1][1][0][RTW89_ACMA][2] = 46,
+ [0][1][1][0][RTW89_CHILE][2] = 18,
+ [0][1][1][0][RTW89_UKRAINE][2] = 40,
+ [0][1][1][0][RTW89_MEXICO][2] = 50,
+ [0][1][1][0][RTW89_CN][2] = 46,
+ [0][1][1][0][RTW89_QATAR][2] = 46,
+ [0][1][1][0][RTW89_FCC][4] = 68,
+ [0][1][1][0][RTW89_ETSI][4] = 46,
+ [0][1][1][0][RTW89_MKK][4] = 50,
+ [0][1][1][0][RTW89_IC][4] = 40,
+ [0][1][1][0][RTW89_KCC][4] = 72,
+ [0][1][1][0][RTW89_ACMA][4] = 46,
+ [0][1][1][0][RTW89_CHILE][4] = 18,
+ [0][1][1][0][RTW89_UKRAINE][4] = 40,
+ [0][1][1][0][RTW89_MEXICO][4] = 50,
+ [0][1][1][0][RTW89_CN][4] = 46,
+ [0][1][1][0][RTW89_QATAR][4] = 46,
+ [0][1][1][0][RTW89_FCC][6] = 68,
+ [0][1][1][0][RTW89_ETSI][6] = 46,
+ [0][1][1][0][RTW89_MKK][6] = 50,
+ [0][1][1][0][RTW89_IC][6] = 40,
+ [0][1][1][0][RTW89_KCC][6] = 44,
+ [0][1][1][0][RTW89_ACMA][6] = 46,
+ [0][1][1][0][RTW89_CHILE][6] = 18,
+ [0][1][1][0][RTW89_UKRAINE][6] = 40,
+ [0][1][1][0][RTW89_MEXICO][6] = 50,
+ [0][1][1][0][RTW89_CN][6] = 46,
+ [0][1][1][0][RTW89_QATAR][6] = 46,
+ [0][1][1][0][RTW89_FCC][8] = 68,
+ [0][1][1][0][RTW89_ETSI][8] = 46,
+ [0][1][1][0][RTW89_MKK][8] = 50,
+ [0][1][1][0][RTW89_IC][8] = 52,
+ [0][1][1][0][RTW89_KCC][8] = 72,
+ [0][1][1][0][RTW89_ACMA][8] = 46,
+ [0][1][1][0][RTW89_CHILE][8] = 42,
+ [0][1][1][0][RTW89_UKRAINE][8] = 40,
+ [0][1][1][0][RTW89_MEXICO][8] = 68,
+ [0][1][1][0][RTW89_CN][8] = 46,
+ [0][1][1][0][RTW89_QATAR][8] = 46,
+ [0][1][1][0][RTW89_FCC][10] = 68,
+ [0][1][1][0][RTW89_ETSI][10] = 46,
+ [0][1][1][0][RTW89_MKK][10] = 50,
+ [0][1][1][0][RTW89_IC][10] = 52,
+ [0][1][1][0][RTW89_KCC][10] = 72,
+ [0][1][1][0][RTW89_ACMA][10] = 46,
+ [0][1][1][0][RTW89_CHILE][10] = 42,
+ [0][1][1][0][RTW89_UKRAINE][10] = 40,
+ [0][1][1][0][RTW89_MEXICO][10] = 68,
+ [0][1][1][0][RTW89_CN][10] = 46,
+ [0][1][1][0][RTW89_QATAR][10] = 46,
+ [0][1][1][0][RTW89_FCC][12] = 68,
+ [0][1][1][0][RTW89_ETSI][12] = 46,
+ [0][1][1][0][RTW89_MKK][12] = 50,
+ [0][1][1][0][RTW89_IC][12] = 52,
+ [0][1][1][0][RTW89_KCC][12] = 72,
+ [0][1][1][0][RTW89_ACMA][12] = 46,
+ [0][1][1][0][RTW89_CHILE][12] = 42,
+ [0][1][1][0][RTW89_UKRAINE][12] = 40,
+ [0][1][1][0][RTW89_MEXICO][12] = 68,
+ [0][1][1][0][RTW89_CN][12] = 46,
+ [0][1][1][0][RTW89_QATAR][12] = 46,
+ [0][1][1][0][RTW89_FCC][14] = 68,
+ [0][1][1][0][RTW89_ETSI][14] = 46,
+ [0][1][1][0][RTW89_MKK][14] = 50,
+ [0][1][1][0][RTW89_IC][14] = 52,
+ [0][1][1][0][RTW89_KCC][14] = 72,
+ [0][1][1][0][RTW89_ACMA][14] = 46,
+ [0][1][1][0][RTW89_CHILE][14] = 42,
+ [0][1][1][0][RTW89_UKRAINE][14] = 40,
+ [0][1][1][0][RTW89_MEXICO][14] = 68,
+ [0][1][1][0][RTW89_CN][14] = 46,
+ [0][1][1][0][RTW89_QATAR][14] = 46,
+ [0][1][1][0][RTW89_FCC][15] = 68,
+ [0][1][1][0][RTW89_ETSI][15] = 46,
+ [0][1][1][0][RTW89_MKK][15] = 70,
+ [0][1][1][0][RTW89_IC][15] = 68,
+ [0][1][1][0][RTW89_KCC][15] = 72,
+ [0][1][1][0][RTW89_ACMA][15] = 46,
+ [0][1][1][0][RTW89_CHILE][15] = 42,
+ [0][1][1][0][RTW89_UKRAINE][15] = 40,
+ [0][1][1][0][RTW89_MEXICO][15] = 68,
+ [0][1][1][0][RTW89_CN][15] = 127,
+ [0][1][1][0][RTW89_QATAR][15] = 40,
+ [0][1][1][0][RTW89_FCC][17] = 68,
+ [0][1][1][0][RTW89_ETSI][17] = 46,
+ [0][1][1][0][RTW89_MKK][17] = 70,
+ [0][1][1][0][RTW89_IC][17] = 68,
+ [0][1][1][0][RTW89_KCC][17] = 72,
+ [0][1][1][0][RTW89_ACMA][17] = 46,
+ [0][1][1][0][RTW89_CHILE][17] = 42,
+ [0][1][1][0][RTW89_UKRAINE][17] = 40,
+ [0][1][1][0][RTW89_MEXICO][17] = 68,
+ [0][1][1][0][RTW89_CN][17] = 127,
+ [0][1][1][0][RTW89_QATAR][17] = 40,
+ [0][1][1][0][RTW89_FCC][19] = 68,
+ [0][1][1][0][RTW89_ETSI][19] = 46,
+ [0][1][1][0][RTW89_MKK][19] = 70,
+ [0][1][1][0][RTW89_IC][19] = 68,
+ [0][1][1][0][RTW89_KCC][19] = 72,
+ [0][1][1][0][RTW89_ACMA][19] = 46,
+ [0][1][1][0][RTW89_CHILE][19] = 42,
+ [0][1][1][0][RTW89_UKRAINE][19] = 40,
+ [0][1][1][0][RTW89_MEXICO][19] = 68,
+ [0][1][1][0][RTW89_CN][19] = 127,
+ [0][1][1][0][RTW89_QATAR][19] = 40,
+ [0][1][1][0][RTW89_FCC][21] = 68,
+ [0][1][1][0][RTW89_ETSI][21] = 46,
+ [0][1][1][0][RTW89_MKK][21] = 70,
+ [0][1][1][0][RTW89_IC][21] = 68,
+ [0][1][1][0][RTW89_KCC][21] = 72,
+ [0][1][1][0][RTW89_ACMA][21] = 46,
+ [0][1][1][0][RTW89_CHILE][21] = 42,
+ [0][1][1][0][RTW89_UKRAINE][21] = 40,
+ [0][1][1][0][RTW89_MEXICO][21] = 68,
+ [0][1][1][0][RTW89_CN][21] = 127,
+ [0][1][1][0][RTW89_QATAR][21] = 40,
+ [0][1][1][0][RTW89_FCC][23] = 68,
+ [0][1][1][0][RTW89_ETSI][23] = 46,
+ [0][1][1][0][RTW89_MKK][23] = 70,
+ [0][1][1][0][RTW89_IC][23] = 68,
+ [0][1][1][0][RTW89_KCC][23] = 72,
+ [0][1][1][0][RTW89_ACMA][23] = 46,
+ [0][1][1][0][RTW89_CHILE][23] = 42,
+ [0][1][1][0][RTW89_UKRAINE][23] = 40,
+ [0][1][1][0][RTW89_MEXICO][23] = 68,
+ [0][1][1][0][RTW89_CN][23] = 127,
+ [0][1][1][0][RTW89_QATAR][23] = 40,
+ [0][1][1][0][RTW89_FCC][25] = 68,
+ [0][1][1][0][RTW89_ETSI][25] = 46,
+ [0][1][1][0][RTW89_MKK][25] = 70,
+ [0][1][1][0][RTW89_IC][25] = 127,
+ [0][1][1][0][RTW89_KCC][25] = 72,
+ [0][1][1][0][RTW89_ACMA][25] = 127,
+ [0][1][1][0][RTW89_CHILE][25] = 42,
+ [0][1][1][0][RTW89_UKRAINE][25] = 40,
+ [0][1][1][0][RTW89_MEXICO][25] = 68,
+ [0][1][1][0][RTW89_CN][25] = 127,
+ [0][1][1][0][RTW89_QATAR][25] = 40,
+ [0][1][1][0][RTW89_FCC][27] = 68,
+ [0][1][1][0][RTW89_ETSI][27] = 46,
+ [0][1][1][0][RTW89_MKK][27] = 70,
+ [0][1][1][0][RTW89_IC][27] = 127,
+ [0][1][1][0][RTW89_KCC][27] = 72,
+ [0][1][1][0][RTW89_ACMA][27] = 127,
+ [0][1][1][0][RTW89_CHILE][27] = 42,
+ [0][1][1][0][RTW89_UKRAINE][27] = 40,
+ [0][1][1][0][RTW89_MEXICO][27] = 68,
+ [0][1][1][0][RTW89_CN][27] = 127,
+ [0][1][1][0][RTW89_QATAR][27] = 40,
+ [0][1][1][0][RTW89_FCC][29] = 68,
+ [0][1][1][0][RTW89_ETSI][29] = 46,
+ [0][1][1][0][RTW89_MKK][29] = 70,
+ [0][1][1][0][RTW89_IC][29] = 127,
+ [0][1][1][0][RTW89_KCC][29] = 72,
+ [0][1][1][0][RTW89_ACMA][29] = 127,
+ [0][1][1][0][RTW89_CHILE][29] = 42,
+ [0][1][1][0][RTW89_UKRAINE][29] = 40,
+ [0][1][1][0][RTW89_MEXICO][29] = 68,
+ [0][1][1][0][RTW89_CN][29] = 127,
+ [0][1][1][0][RTW89_QATAR][29] = 40,
+ [0][1][1][0][RTW89_FCC][31] = 68,
+ [0][1][1][0][RTW89_ETSI][31] = 46,
+ [0][1][1][0][RTW89_MKK][31] = 70,
+ [0][1][1][0][RTW89_IC][31] = 68,
+ [0][1][1][0][RTW89_KCC][31] = 72,
+ [0][1][1][0][RTW89_ACMA][31] = 46,
+ [0][1][1][0][RTW89_CHILE][31] = 42,
+ [0][1][1][0][RTW89_UKRAINE][31] = 40,
+ [0][1][1][0][RTW89_MEXICO][31] = 68,
+ [0][1][1][0][RTW89_CN][31] = 127,
+ [0][1][1][0][RTW89_QATAR][31] = 40,
+ [0][1][1][0][RTW89_FCC][33] = 68,
+ [0][1][1][0][RTW89_ETSI][33] = 46,
+ [0][1][1][0][RTW89_MKK][33] = 70,
+ [0][1][1][0][RTW89_IC][33] = 68,
+ [0][1][1][0][RTW89_KCC][33] = 72,
+ [0][1][1][0][RTW89_ACMA][33] = 46,
+ [0][1][1][0][RTW89_CHILE][33] = 42,
+ [0][1][1][0][RTW89_UKRAINE][33] = 40,
+ [0][1][1][0][RTW89_MEXICO][33] = 68,
+ [0][1][1][0][RTW89_CN][33] = 127,
+ [0][1][1][0][RTW89_QATAR][33] = 40,
+ [0][1][1][0][RTW89_FCC][35] = 66,
+ [0][1][1][0][RTW89_ETSI][35] = 46,
+ [0][1][1][0][RTW89_MKK][35] = 70,
+ [0][1][1][0][RTW89_IC][35] = 66,
+ [0][1][1][0][RTW89_KCC][35] = 72,
+ [0][1][1][0][RTW89_ACMA][35] = 46,
+ [0][1][1][0][RTW89_CHILE][35] = 42,
+ [0][1][1][0][RTW89_UKRAINE][35] = 40,
+ [0][1][1][0][RTW89_MEXICO][35] = 66,
+ [0][1][1][0][RTW89_CN][35] = 127,
+ [0][1][1][0][RTW89_QATAR][35] = 40,
+ [0][1][1][0][RTW89_FCC][37] = 68,
+ [0][1][1][0][RTW89_ETSI][37] = 127,
+ [0][1][1][0][RTW89_MKK][37] = 70,
+ [0][1][1][0][RTW89_IC][37] = 68,
+ [0][1][1][0][RTW89_KCC][37] = 72,
+ [0][1][1][0][RTW89_ACMA][37] = 68,
+ [0][1][1][0][RTW89_CHILE][37] = 42,
+ [0][1][1][0][RTW89_UKRAINE][37] = 127,
+ [0][1][1][0][RTW89_MEXICO][37] = 68,
+ [0][1][1][0][RTW89_CN][37] = 127,
+ [0][1][1][0][RTW89_QATAR][37] = 127,
+ [0][1][1][0][RTW89_FCC][38] = 76,
+ [0][1][1][0][RTW89_ETSI][38] = 16,
+ [0][1][1][0][RTW89_MKK][38] = 127,
+ [0][1][1][0][RTW89_IC][38] = 76,
+ [0][1][1][0][RTW89_KCC][38] = 72,
+ [0][1][1][0][RTW89_ACMA][38] = 76,
+ [0][1][1][0][RTW89_CHILE][38] = 42,
+ [0][1][1][0][RTW89_UKRAINE][38] = 16,
+ [0][1][1][0][RTW89_MEXICO][38] = 76,
+ [0][1][1][0][RTW89_CN][38] = 72,
+ [0][1][1][0][RTW89_QATAR][38] = 16,
+ [0][1][1][0][RTW89_FCC][40] = 76,
+ [0][1][1][0][RTW89_ETSI][40] = 16,
+ [0][1][1][0][RTW89_MKK][40] = 127,
+ [0][1][1][0][RTW89_IC][40] = 76,
+ [0][1][1][0][RTW89_KCC][40] = 72,
+ [0][1][1][0][RTW89_ACMA][40] = 76,
+ [0][1][1][0][RTW89_CHILE][40] = 42,
+ [0][1][1][0][RTW89_UKRAINE][40] = 16,
+ [0][1][1][0][RTW89_MEXICO][40] = 76,
+ [0][1][1][0][RTW89_CN][40] = 76,
+ [0][1][1][0][RTW89_QATAR][40] = 16,
+ [0][1][1][0][RTW89_FCC][42] = 76,
+ [0][1][1][0][RTW89_ETSI][42] = 16,
+ [0][1][1][0][RTW89_MKK][42] = 127,
+ [0][1][1][0][RTW89_IC][42] = 76,
+ [0][1][1][0][RTW89_KCC][42] = 72,
+ [0][1][1][0][RTW89_ACMA][42] = 76,
+ [0][1][1][0][RTW89_CHILE][42] = 42,
+ [0][1][1][0][RTW89_UKRAINE][42] = 16,
+ [0][1][1][0][RTW89_MEXICO][42] = 76,
+ [0][1][1][0][RTW89_CN][42] = 76,
+ [0][1][1][0][RTW89_QATAR][42] = 16,
+ [0][1][1][0][RTW89_FCC][44] = 76,
+ [0][1][1][0][RTW89_ETSI][44] = 16,
+ [0][1][1][0][RTW89_MKK][44] = 127,
+ [0][1][1][0][RTW89_IC][44] = 76,
+ [0][1][1][0][RTW89_KCC][44] = 72,
+ [0][1][1][0][RTW89_ACMA][44] = 76,
+ [0][1][1][0][RTW89_CHILE][44] = 42,
+ [0][1][1][0][RTW89_UKRAINE][44] = 16,
+ [0][1][1][0][RTW89_MEXICO][44] = 76,
+ [0][1][1][0][RTW89_CN][44] = 76,
+ [0][1][1][0][RTW89_QATAR][44] = 16,
+ [0][1][1][0][RTW89_FCC][46] = 76,
+ [0][1][1][0][RTW89_ETSI][46] = 16,
+ [0][1][1][0][RTW89_MKK][46] = 127,
+ [0][1][1][0][RTW89_IC][46] = 76,
+ [0][1][1][0][RTW89_KCC][46] = 72,
+ [0][1][1][0][RTW89_ACMA][46] = 76,
+ [0][1][1][0][RTW89_CHILE][46] = 42,
+ [0][1][1][0][RTW89_UKRAINE][46] = 16,
+ [0][1][1][0][RTW89_MEXICO][46] = 76,
+ [0][1][1][0][RTW89_CN][46] = 76,
+ [0][1][1][0][RTW89_QATAR][46] = 16,
+ [0][0][2][0][RTW89_FCC][0] = 76,
+ [0][0][2][0][RTW89_ETSI][0] = 58,
+ [0][0][2][0][RTW89_MKK][0] = 62,
+ [0][0][2][0][RTW89_IC][0] = 62,
+ [0][0][2][0][RTW89_KCC][0] = 76,
+ [0][0][2][0][RTW89_ACMA][0] = 58,
+ [0][0][2][0][RTW89_CHILE][0] = 30,
+ [0][0][2][0][RTW89_UKRAINE][0] = 52,
+ [0][0][2][0][RTW89_MEXICO][0] = 62,
+ [0][0][2][0][RTW89_CN][0] = 58,
+ [0][0][2][0][RTW89_QATAR][0] = 58,
+ [0][0][2][0][RTW89_FCC][2] = 76,
+ [0][0][2][0][RTW89_ETSI][2] = 58,
+ [0][0][2][0][RTW89_MKK][2] = 62,
+ [0][0][2][0][RTW89_IC][2] = 62,
+ [0][0][2][0][RTW89_KCC][2] = 76,
+ [0][0][2][0][RTW89_ACMA][2] = 58,
+ [0][0][2][0][RTW89_CHILE][2] = 30,
+ [0][0][2][0][RTW89_UKRAINE][2] = 52,
+ [0][0][2][0][RTW89_MEXICO][2] = 62,
+ [0][0][2][0][RTW89_CN][2] = 58,
+ [0][0][2][0][RTW89_QATAR][2] = 58,
+ [0][0][2][0][RTW89_FCC][4] = 76,
+ [0][0][2][0][RTW89_ETSI][4] = 58,
+ [0][0][2][0][RTW89_MKK][4] = 62,
+ [0][0][2][0][RTW89_IC][4] = 62,
+ [0][0][2][0][RTW89_KCC][4] = 76,
+ [0][0][2][0][RTW89_ACMA][4] = 58,
+ [0][0][2][0][RTW89_CHILE][4] = 30,
+ [0][0][2][0][RTW89_UKRAINE][4] = 52,
+ [0][0][2][0][RTW89_MEXICO][4] = 62,
+ [0][0][2][0][RTW89_CN][4] = 58,
+ [0][0][2][0][RTW89_QATAR][4] = 58,
+ [0][0][2][0][RTW89_FCC][6] = 76,
+ [0][0][2][0][RTW89_ETSI][6] = 58,
+ [0][0][2][0][RTW89_MKK][6] = 62,
+ [0][0][2][0][RTW89_IC][6] = 62,
+ [0][0][2][0][RTW89_KCC][6] = 58,
+ [0][0][2][0][RTW89_ACMA][6] = 58,
+ [0][0][2][0][RTW89_CHILE][6] = 30,
+ [0][0][2][0][RTW89_UKRAINE][6] = 52,
+ [0][0][2][0][RTW89_MEXICO][6] = 62,
+ [0][0][2][0][RTW89_CN][6] = 58,
+ [0][0][2][0][RTW89_QATAR][6] = 58,
+ [0][0][2][0][RTW89_FCC][8] = 76,
+ [0][0][2][0][RTW89_ETSI][8] = 58,
+ [0][0][2][0][RTW89_MKK][8] = 62,
+ [0][0][2][0][RTW89_IC][8] = 64,
+ [0][0][2][0][RTW89_KCC][8] = 76,
+ [0][0][2][0][RTW89_ACMA][8] = 58,
+ [0][0][2][0][RTW89_CHILE][8] = 54,
+ [0][0][2][0][RTW89_UKRAINE][8] = 52,
+ [0][0][2][0][RTW89_MEXICO][8] = 76,
+ [0][0][2][0][RTW89_CN][8] = 58,
+ [0][0][2][0][RTW89_QATAR][8] = 58,
+ [0][0][2][0][RTW89_FCC][10] = 76,
+ [0][0][2][0][RTW89_ETSI][10] = 58,
+ [0][0][2][0][RTW89_MKK][10] = 62,
+ [0][0][2][0][RTW89_IC][10] = 64,
+ [0][0][2][0][RTW89_KCC][10] = 76,
+ [0][0][2][0][RTW89_ACMA][10] = 58,
+ [0][0][2][0][RTW89_CHILE][10] = 54,
+ [0][0][2][0][RTW89_UKRAINE][10] = 52,
+ [0][0][2][0][RTW89_MEXICO][10] = 76,
+ [0][0][2][0][RTW89_CN][10] = 58,
+ [0][0][2][0][RTW89_QATAR][10] = 58,
+ [0][0][2][0][RTW89_FCC][12] = 76,
+ [0][0][2][0][RTW89_ETSI][12] = 58,
+ [0][0][2][0][RTW89_MKK][12] = 62,
+ [0][0][2][0][RTW89_IC][12] = 64,
+ [0][0][2][0][RTW89_KCC][12] = 76,
+ [0][0][2][0][RTW89_ACMA][12] = 58,
+ [0][0][2][0][RTW89_CHILE][12] = 54,
+ [0][0][2][0][RTW89_UKRAINE][12] = 52,
+ [0][0][2][0][RTW89_MEXICO][12] = 76,
+ [0][0][2][0][RTW89_CN][12] = 58,
+ [0][0][2][0][RTW89_QATAR][12] = 58,
+ [0][0][2][0][RTW89_FCC][14] = 76,
+ [0][0][2][0][RTW89_ETSI][14] = 58,
+ [0][0][2][0][RTW89_MKK][14] = 62,
+ [0][0][2][0][RTW89_IC][14] = 64,
+ [0][0][2][0][RTW89_KCC][14] = 76,
+ [0][0][2][0][RTW89_ACMA][14] = 58,
+ [0][0][2][0][RTW89_CHILE][14] = 54,
+ [0][0][2][0][RTW89_UKRAINE][14] = 52,
+ [0][0][2][0][RTW89_MEXICO][14] = 76,
+ [0][0][2][0][RTW89_CN][14] = 58,
+ [0][0][2][0][RTW89_QATAR][14] = 58,
+ [0][0][2][0][RTW89_FCC][15] = 74,
+ [0][0][2][0][RTW89_ETSI][15] = 58,
+ [0][0][2][0][RTW89_MKK][15] = 76,
+ [0][0][2][0][RTW89_IC][15] = 74,
+ [0][0][2][0][RTW89_KCC][15] = 76,
+ [0][0][2][0][RTW89_ACMA][15] = 58,
+ [0][0][2][0][RTW89_CHILE][15] = 54,
+ [0][0][2][0][RTW89_UKRAINE][15] = 52,
+ [0][0][2][0][RTW89_MEXICO][15] = 74,
+ [0][0][2][0][RTW89_CN][15] = 127,
+ [0][0][2][0][RTW89_QATAR][15] = 52,
+ [0][0][2][0][RTW89_FCC][17] = 76,
+ [0][0][2][0][RTW89_ETSI][17] = 58,
+ [0][0][2][0][RTW89_MKK][17] = 76,
+ [0][0][2][0][RTW89_IC][17] = 76,
+ [0][0][2][0][RTW89_KCC][17] = 76,
+ [0][0][2][0][RTW89_ACMA][17] = 58,
+ [0][0][2][0][RTW89_CHILE][17] = 54,
+ [0][0][2][0][RTW89_UKRAINE][17] = 52,
+ [0][0][2][0][RTW89_MEXICO][17] = 76,
+ [0][0][2][0][RTW89_CN][17] = 127,
+ [0][0][2][0][RTW89_QATAR][17] = 52,
+ [0][0][2][0][RTW89_FCC][19] = 76,
+ [0][0][2][0][RTW89_ETSI][19] = 58,
+ [0][0][2][0][RTW89_MKK][19] = 76,
+ [0][0][2][0][RTW89_IC][19] = 76,
+ [0][0][2][0][RTW89_KCC][19] = 76,
+ [0][0][2][0][RTW89_ACMA][19] = 58,
+ [0][0][2][0][RTW89_CHILE][19] = 54,
+ [0][0][2][0][RTW89_UKRAINE][19] = 52,
+ [0][0][2][0][RTW89_MEXICO][19] = 76,
+ [0][0][2][0][RTW89_CN][19] = 127,
+ [0][0][2][0][RTW89_QATAR][19] = 52,
+ [0][0][2][0][RTW89_FCC][21] = 76,
+ [0][0][2][0][RTW89_ETSI][21] = 58,
+ [0][0][2][0][RTW89_MKK][21] = 76,
+ [0][0][2][0][RTW89_IC][21] = 76,
+ [0][0][2][0][RTW89_KCC][21] = 76,
+ [0][0][2][0][RTW89_ACMA][21] = 58,
+ [0][0][2][0][RTW89_CHILE][21] = 54,
+ [0][0][2][0][RTW89_UKRAINE][21] = 52,
+ [0][0][2][0][RTW89_MEXICO][21] = 76,
+ [0][0][2][0][RTW89_CN][21] = 127,
+ [0][0][2][0][RTW89_QATAR][21] = 52,
+ [0][0][2][0][RTW89_FCC][23] = 76,
+ [0][0][2][0][RTW89_ETSI][23] = 58,
+ [0][0][2][0][RTW89_MKK][23] = 76,
+ [0][0][2][0][RTW89_IC][23] = 76,
+ [0][0][2][0][RTW89_KCC][23] = 76,
+ [0][0][2][0][RTW89_ACMA][23] = 58,
+ [0][0][2][0][RTW89_CHILE][23] = 54,
+ [0][0][2][0][RTW89_UKRAINE][23] = 52,
+ [0][0][2][0][RTW89_MEXICO][23] = 76,
+ [0][0][2][0][RTW89_CN][23] = 127,
+ [0][0][2][0][RTW89_QATAR][23] = 52,
+ [0][0][2][0][RTW89_FCC][25] = 76,
+ [0][0][2][0][RTW89_ETSI][25] = 58,
+ [0][0][2][0][RTW89_MKK][25] = 76,
+ [0][0][2][0][RTW89_IC][25] = 127,
+ [0][0][2][0][RTW89_KCC][25] = 76,
+ [0][0][2][0][RTW89_ACMA][25] = 127,
+ [0][0][2][0][RTW89_CHILE][25] = 54,
+ [0][0][2][0][RTW89_UKRAINE][25] = 52,
+ [0][0][2][0][RTW89_MEXICO][25] = 76,
+ [0][0][2][0][RTW89_CN][25] = 127,
+ [0][0][2][0][RTW89_QATAR][25] = 52,
+ [0][0][2][0][RTW89_FCC][27] = 76,
+ [0][0][2][0][RTW89_ETSI][27] = 58,
+ [0][0][2][0][RTW89_MKK][27] = 76,
+ [0][0][2][0][RTW89_IC][27] = 127,
+ [0][0][2][0][RTW89_KCC][27] = 76,
+ [0][0][2][0][RTW89_ACMA][27] = 127,
+ [0][0][2][0][RTW89_CHILE][27] = 54,
+ [0][0][2][0][RTW89_UKRAINE][27] = 52,
+ [0][0][2][0][RTW89_MEXICO][27] = 76,
+ [0][0][2][0][RTW89_CN][27] = 127,
+ [0][0][2][0][RTW89_QATAR][27] = 52,
+ [0][0][2][0][RTW89_FCC][29] = 76,
+ [0][0][2][0][RTW89_ETSI][29] = 58,
+ [0][0][2][0][RTW89_MKK][29] = 76,
+ [0][0][2][0][RTW89_IC][29] = 127,
+ [0][0][2][0][RTW89_KCC][29] = 76,
+ [0][0][2][0][RTW89_ACMA][29] = 127,
+ [0][0][2][0][RTW89_CHILE][29] = 54,
+ [0][0][2][0][RTW89_UKRAINE][29] = 52,
+ [0][0][2][0][RTW89_MEXICO][29] = 76,
+ [0][0][2][0][RTW89_CN][29] = 127,
+ [0][0][2][0][RTW89_QATAR][29] = 52,
+ [0][0][2][0][RTW89_FCC][31] = 76,
+ [0][0][2][0][RTW89_ETSI][31] = 58,
+ [0][0][2][0][RTW89_MKK][31] = 76,
+ [0][0][2][0][RTW89_IC][31] = 76,
+ [0][0][2][0][RTW89_KCC][31] = 76,
+ [0][0][2][0][RTW89_ACMA][31] = 58,
+ [0][0][2][0][RTW89_CHILE][31] = 54,
+ [0][0][2][0][RTW89_UKRAINE][31] = 52,
+ [0][0][2][0][RTW89_MEXICO][31] = 76,
+ [0][0][2][0][RTW89_CN][31] = 127,
+ [0][0][2][0][RTW89_QATAR][31] = 52,
+ [0][0][2][0][RTW89_FCC][33] = 76,
+ [0][0][2][0][RTW89_ETSI][33] = 58,
+ [0][0][2][0][RTW89_MKK][33] = 76,
+ [0][0][2][0][RTW89_IC][33] = 76,
+ [0][0][2][0][RTW89_KCC][33] = 76,
+ [0][0][2][0][RTW89_ACMA][33] = 58,
+ [0][0][2][0][RTW89_CHILE][33] = 54,
+ [0][0][2][0][RTW89_UKRAINE][33] = 52,
+ [0][0][2][0][RTW89_MEXICO][33] = 76,
+ [0][0][2][0][RTW89_CN][33] = 127,
+ [0][0][2][0][RTW89_QATAR][33] = 52,
+ [0][0][2][0][RTW89_FCC][35] = 70,
+ [0][0][2][0][RTW89_ETSI][35] = 58,
+ [0][0][2][0][RTW89_MKK][35] = 76,
+ [0][0][2][0][RTW89_IC][35] = 70,
+ [0][0][2][0][RTW89_KCC][35] = 76,
+ [0][0][2][0][RTW89_ACMA][35] = 58,
+ [0][0][2][0][RTW89_CHILE][35] = 54,
+ [0][0][2][0][RTW89_UKRAINE][35] = 52,
+ [0][0][2][0][RTW89_MEXICO][35] = 70,
+ [0][0][2][0][RTW89_CN][35] = 127,
+ [0][0][2][0][RTW89_QATAR][35] = 52,
+ [0][0][2][0][RTW89_FCC][37] = 76,
+ [0][0][2][0][RTW89_ETSI][37] = 127,
+ [0][0][2][0][RTW89_MKK][37] = 76,
+ [0][0][2][0][RTW89_IC][37] = 76,
+ [0][0][2][0][RTW89_KCC][37] = 76,
+ [0][0][2][0][RTW89_ACMA][37] = 76,
+ [0][0][2][0][RTW89_CHILE][37] = 54,
+ [0][0][2][0][RTW89_UKRAINE][37] = 127,
+ [0][0][2][0][RTW89_MEXICO][37] = 76,
+ [0][0][2][0][RTW89_CN][37] = 127,
+ [0][0][2][0][RTW89_QATAR][37] = 127,
+ [0][0][2][0][RTW89_FCC][38] = 76,
+ [0][0][2][0][RTW89_ETSI][38] = 28,
+ [0][0][2][0][RTW89_MKK][38] = 127,
+ [0][0][2][0][RTW89_IC][38] = 76,
+ [0][0][2][0][RTW89_KCC][38] = 76,
+ [0][0][2][0][RTW89_ACMA][38] = 76,
+ [0][0][2][0][RTW89_CHILE][38] = 54,
+ [0][0][2][0][RTW89_UKRAINE][38] = 28,
+ [0][0][2][0][RTW89_MEXICO][38] = 76,
+ [0][0][2][0][RTW89_CN][38] = 68,
+ [0][0][2][0][RTW89_QATAR][38] = 28,
+ [0][0][2][0][RTW89_FCC][40] = 76,
+ [0][0][2][0][RTW89_ETSI][40] = 28,
+ [0][0][2][0][RTW89_MKK][40] = 127,
+ [0][0][2][0][RTW89_IC][40] = 76,
+ [0][0][2][0][RTW89_KCC][40] = 76,
+ [0][0][2][0][RTW89_ACMA][40] = 76,
+ [0][0][2][0][RTW89_CHILE][40] = 54,
+ [0][0][2][0][RTW89_UKRAINE][40] = 28,
+ [0][0][2][0][RTW89_MEXICO][40] = 76,
+ [0][0][2][0][RTW89_CN][40] = 76,
+ [0][0][2][0][RTW89_QATAR][40] = 28,
+ [0][0][2][0][RTW89_FCC][42] = 76,
+ [0][0][2][0][RTW89_ETSI][42] = 28,
+ [0][0][2][0][RTW89_MKK][42] = 127,
+ [0][0][2][0][RTW89_IC][42] = 76,
+ [0][0][2][0][RTW89_KCC][42] = 76,
+ [0][0][2][0][RTW89_ACMA][42] = 76,
+ [0][0][2][0][RTW89_CHILE][42] = 54,
+ [0][0][2][0][RTW89_UKRAINE][42] = 28,
+ [0][0][2][0][RTW89_MEXICO][42] = 76,
+ [0][0][2][0][RTW89_CN][42] = 76,
+ [0][0][2][0][RTW89_QATAR][42] = 28,
+ [0][0][2][0][RTW89_FCC][44] = 76,
+ [0][0][2][0][RTW89_ETSI][44] = 28,
+ [0][0][2][0][RTW89_MKK][44] = 127,
+ [0][0][2][0][RTW89_IC][44] = 76,
+ [0][0][2][0][RTW89_KCC][44] = 76,
+ [0][0][2][0][RTW89_ACMA][44] = 76,
+ [0][0][2][0][RTW89_CHILE][44] = 54,
+ [0][0][2][0][RTW89_UKRAINE][44] = 28,
+ [0][0][2][0][RTW89_MEXICO][44] = 76,
+ [0][0][2][0][RTW89_CN][44] = 76,
+ [0][0][2][0][RTW89_QATAR][44] = 28,
+ [0][0][2][0][RTW89_FCC][46] = 76,
+ [0][0][2][0][RTW89_ETSI][46] = 28,
+ [0][0][2][0][RTW89_MKK][46] = 127,
+ [0][0][2][0][RTW89_IC][46] = 76,
+ [0][0][2][0][RTW89_KCC][46] = 76,
+ [0][0][2][0][RTW89_ACMA][46] = 76,
+ [0][0][2][0][RTW89_CHILE][46] = 54,
+ [0][0][2][0][RTW89_UKRAINE][46] = 28,
+ [0][0][2][0][RTW89_MEXICO][46] = 76,
+ [0][0][2][0][RTW89_CN][46] = 76,
+ [0][0][2][0][RTW89_QATAR][46] = 28,
+ [0][1][2][0][RTW89_FCC][0] = 68,
+ [0][1][2][0][RTW89_ETSI][0] = 46,
+ [0][1][2][0][RTW89_MKK][0] = 50,
+ [0][1][2][0][RTW89_IC][0] = 40,
+ [0][1][2][0][RTW89_KCC][0] = 68,
+ [0][1][2][0][RTW89_ACMA][0] = 46,
+ [0][1][2][0][RTW89_CHILE][0] = 18,
+ [0][1][2][0][RTW89_UKRAINE][0] = 40,
+ [0][1][2][0][RTW89_MEXICO][0] = 50,
+ [0][1][2][0][RTW89_CN][0] = 46,
+ [0][1][2][0][RTW89_QATAR][0] = 46,
+ [0][1][2][0][RTW89_FCC][2] = 68,
+ [0][1][2][0][RTW89_ETSI][2] = 46,
+ [0][1][2][0][RTW89_MKK][2] = 50,
+ [0][1][2][0][RTW89_IC][2] = 40,
+ [0][1][2][0][RTW89_KCC][2] = 68,
+ [0][1][2][0][RTW89_ACMA][2] = 46,
+ [0][1][2][0][RTW89_CHILE][2] = 18,
+ [0][1][2][0][RTW89_UKRAINE][2] = 40,
+ [0][1][2][0][RTW89_MEXICO][2] = 50,
+ [0][1][2][0][RTW89_CN][2] = 46,
+ [0][1][2][0][RTW89_QATAR][2] = 46,
+ [0][1][2][0][RTW89_FCC][4] = 68,
+ [0][1][2][0][RTW89_ETSI][4] = 46,
+ [0][1][2][0][RTW89_MKK][4] = 50,
+ [0][1][2][0][RTW89_IC][4] = 40,
+ [0][1][2][0][RTW89_KCC][4] = 68,
+ [0][1][2][0][RTW89_ACMA][4] = 46,
+ [0][1][2][0][RTW89_CHILE][4] = 18,
+ [0][1][2][0][RTW89_UKRAINE][4] = 40,
+ [0][1][2][0][RTW89_MEXICO][4] = 50,
+ [0][1][2][0][RTW89_CN][4] = 46,
+ [0][1][2][0][RTW89_QATAR][4] = 46,
+ [0][1][2][0][RTW89_FCC][6] = 68,
+ [0][1][2][0][RTW89_ETSI][6] = 46,
+ [0][1][2][0][RTW89_MKK][6] = 50,
+ [0][1][2][0][RTW89_IC][6] = 40,
+ [0][1][2][0][RTW89_KCC][6] = 38,
+ [0][1][2][0][RTW89_ACMA][6] = 46,
+ [0][1][2][0][RTW89_CHILE][6] = 18,
+ [0][1][2][0][RTW89_UKRAINE][6] = 40,
+ [0][1][2][0][RTW89_MEXICO][6] = 50,
+ [0][1][2][0][RTW89_CN][6] = 46,
+ [0][1][2][0][RTW89_QATAR][6] = 46,
+ [0][1][2][0][RTW89_FCC][8] = 68,
+ [0][1][2][0][RTW89_ETSI][8] = 46,
+ [0][1][2][0][RTW89_MKK][8] = 50,
+ [0][1][2][0][RTW89_IC][8] = 52,
+ [0][1][2][0][RTW89_KCC][8] = 68,
+ [0][1][2][0][RTW89_ACMA][8] = 46,
+ [0][1][2][0][RTW89_CHILE][8] = 42,
+ [0][1][2][0][RTW89_UKRAINE][8] = 40,
+ [0][1][2][0][RTW89_MEXICO][8] = 68,
+ [0][1][2][0][RTW89_CN][8] = 46,
+ [0][1][2][0][RTW89_QATAR][8] = 46,
+ [0][1][2][0][RTW89_FCC][10] = 68,
+ [0][1][2][0][RTW89_ETSI][10] = 46,
+ [0][1][2][0][RTW89_MKK][10] = 50,
+ [0][1][2][0][RTW89_IC][10] = 52,
+ [0][1][2][0][RTW89_KCC][10] = 68,
+ [0][1][2][0][RTW89_ACMA][10] = 46,
+ [0][1][2][0][RTW89_CHILE][10] = 42,
+ [0][1][2][0][RTW89_UKRAINE][10] = 40,
+ [0][1][2][0][RTW89_MEXICO][10] = 68,
+ [0][1][2][0][RTW89_CN][10] = 46,
+ [0][1][2][0][RTW89_QATAR][10] = 46,
+ [0][1][2][0][RTW89_FCC][12] = 68,
+ [0][1][2][0][RTW89_ETSI][12] = 46,
+ [0][1][2][0][RTW89_MKK][12] = 50,
+ [0][1][2][0][RTW89_IC][12] = 52,
+ [0][1][2][0][RTW89_KCC][12] = 68,
+ [0][1][2][0][RTW89_ACMA][12] = 46,
+ [0][1][2][0][RTW89_CHILE][12] = 42,
+ [0][1][2][0][RTW89_UKRAINE][12] = 40,
+ [0][1][2][0][RTW89_MEXICO][12] = 68,
+ [0][1][2][0][RTW89_CN][12] = 46,
+ [0][1][2][0][RTW89_QATAR][12] = 46,
+ [0][1][2][0][RTW89_FCC][14] = 68,
+ [0][1][2][0][RTW89_ETSI][14] = 46,
+ [0][1][2][0][RTW89_MKK][14] = 50,
+ [0][1][2][0][RTW89_IC][14] = 52,
+ [0][1][2][0][RTW89_KCC][14] = 68,
+ [0][1][2][0][RTW89_ACMA][14] = 46,
+ [0][1][2][0][RTW89_CHILE][14] = 42,
+ [0][1][2][0][RTW89_UKRAINE][14] = 40,
+ [0][1][2][0][RTW89_MEXICO][14] = 68,
+ [0][1][2][0][RTW89_CN][14] = 46,
+ [0][1][2][0][RTW89_QATAR][14] = 46,
+ [0][1][2][0][RTW89_FCC][15] = 68,
+ [0][1][2][0][RTW89_ETSI][15] = 46,
+ [0][1][2][0][RTW89_MKK][15] = 70,
+ [0][1][2][0][RTW89_IC][15] = 68,
+ [0][1][2][0][RTW89_KCC][15] = 66,
+ [0][1][2][0][RTW89_ACMA][15] = 46,
+ [0][1][2][0][RTW89_CHILE][15] = 42,
+ [0][1][2][0][RTW89_UKRAINE][15] = 40,
+ [0][1][2][0][RTW89_MEXICO][15] = 68,
+ [0][1][2][0][RTW89_CN][15] = 127,
+ [0][1][2][0][RTW89_QATAR][15] = 40,
+ [0][1][2][0][RTW89_FCC][17] = 68,
+ [0][1][2][0][RTW89_ETSI][17] = 46,
+ [0][1][2][0][RTW89_MKK][17] = 70,
+ [0][1][2][0][RTW89_IC][17] = 68,
+ [0][1][2][0][RTW89_KCC][17] = 66,
+ [0][1][2][0][RTW89_ACMA][17] = 46,
+ [0][1][2][0][RTW89_CHILE][17] = 42,
+ [0][1][2][0][RTW89_UKRAINE][17] = 40,
+ [0][1][2][0][RTW89_MEXICO][17] = 68,
+ [0][1][2][0][RTW89_CN][17] = 127,
+ [0][1][2][0][RTW89_QATAR][17] = 40,
+ [0][1][2][0][RTW89_FCC][19] = 68,
+ [0][1][2][0][RTW89_ETSI][19] = 46,
+ [0][1][2][0][RTW89_MKK][19] = 70,
+ [0][1][2][0][RTW89_IC][19] = 68,
+ [0][1][2][0][RTW89_KCC][19] = 66,
+ [0][1][2][0][RTW89_ACMA][19] = 46,
+ [0][1][2][0][RTW89_CHILE][19] = 42,
+ [0][1][2][0][RTW89_UKRAINE][19] = 40,
+ [0][1][2][0][RTW89_MEXICO][19] = 68,
+ [0][1][2][0][RTW89_CN][19] = 127,
+ [0][1][2][0][RTW89_QATAR][19] = 40,
+ [0][1][2][0][RTW89_FCC][21] = 68,
+ [0][1][2][0][RTW89_ETSI][21] = 46,
+ [0][1][2][0][RTW89_MKK][21] = 70,
+ [0][1][2][0][RTW89_IC][21] = 68,
+ [0][1][2][0][RTW89_KCC][21] = 66,
+ [0][1][2][0][RTW89_ACMA][21] = 46,
+ [0][1][2][0][RTW89_CHILE][21] = 42,
+ [0][1][2][0][RTW89_UKRAINE][21] = 40,
+ [0][1][2][0][RTW89_MEXICO][21] = 68,
+ [0][1][2][0][RTW89_CN][21] = 127,
+ [0][1][2][0][RTW89_QATAR][21] = 40,
+ [0][1][2][0][RTW89_FCC][23] = 68,
+ [0][1][2][0][RTW89_ETSI][23] = 46,
+ [0][1][2][0][RTW89_MKK][23] = 70,
+ [0][1][2][0][RTW89_IC][23] = 68,
+ [0][1][2][0][RTW89_KCC][23] = 66,
+ [0][1][2][0][RTW89_ACMA][23] = 46,
+ [0][1][2][0][RTW89_CHILE][23] = 42,
+ [0][1][2][0][RTW89_UKRAINE][23] = 40,
+ [0][1][2][0][RTW89_MEXICO][23] = 68,
+ [0][1][2][0][RTW89_CN][23] = 127,
+ [0][1][2][0][RTW89_QATAR][23] = 40,
+ [0][1][2][0][RTW89_FCC][25] = 68,
+ [0][1][2][0][RTW89_ETSI][25] = 46,
+ [0][1][2][0][RTW89_MKK][25] = 70,
+ [0][1][2][0][RTW89_IC][25] = 127,
+ [0][1][2][0][RTW89_KCC][25] = 66,
+ [0][1][2][0][RTW89_ACMA][25] = 127,
+ [0][1][2][0][RTW89_CHILE][25] = 42,
+ [0][1][2][0][RTW89_UKRAINE][25] = 40,
+ [0][1][2][0][RTW89_MEXICO][25] = 68,
+ [0][1][2][0][RTW89_CN][25] = 127,
+ [0][1][2][0][RTW89_QATAR][25] = 40,
+ [0][1][2][0][RTW89_FCC][27] = 68,
+ [0][1][2][0][RTW89_ETSI][27] = 46,
+ [0][1][2][0][RTW89_MKK][27] = 70,
+ [0][1][2][0][RTW89_IC][27] = 127,
+ [0][1][2][0][RTW89_KCC][27] = 66,
+ [0][1][2][0][RTW89_ACMA][27] = 127,
+ [0][1][2][0][RTW89_CHILE][27] = 42,
+ [0][1][2][0][RTW89_UKRAINE][27] = 40,
+ [0][1][2][0][RTW89_MEXICO][27] = 68,
+ [0][1][2][0][RTW89_CN][27] = 127,
+ [0][1][2][0][RTW89_QATAR][27] = 40,
+ [0][1][2][0][RTW89_FCC][29] = 68,
+ [0][1][2][0][RTW89_ETSI][29] = 46,
+ [0][1][2][0][RTW89_MKK][29] = 70,
+ [0][1][2][0][RTW89_IC][29] = 127,
+ [0][1][2][0][RTW89_KCC][29] = 66,
+ [0][1][2][0][RTW89_ACMA][29] = 127,
+ [0][1][2][0][RTW89_CHILE][29] = 42,
+ [0][1][2][0][RTW89_UKRAINE][29] = 40,
+ [0][1][2][0][RTW89_MEXICO][29] = 68,
+ [0][1][2][0][RTW89_CN][29] = 127,
+ [0][1][2][0][RTW89_QATAR][29] = 40,
+ [0][1][2][0][RTW89_FCC][31] = 68,
+ [0][1][2][0][RTW89_ETSI][31] = 46,
+ [0][1][2][0][RTW89_MKK][31] = 70,
+ [0][1][2][0][RTW89_IC][31] = 68,
+ [0][1][2][0][RTW89_KCC][31] = 66,
+ [0][1][2][0][RTW89_ACMA][31] = 46,
+ [0][1][2][0][RTW89_CHILE][31] = 42,
+ [0][1][2][0][RTW89_UKRAINE][31] = 40,
+ [0][1][2][0][RTW89_MEXICO][31] = 68,
+ [0][1][2][0][RTW89_CN][31] = 127,
+ [0][1][2][0][RTW89_QATAR][31] = 40,
+ [0][1][2][0][RTW89_FCC][33] = 68,
+ [0][1][2][0][RTW89_ETSI][33] = 46,
+ [0][1][2][0][RTW89_MKK][33] = 70,
+ [0][1][2][0][RTW89_IC][33] = 68,
+ [0][1][2][0][RTW89_KCC][33] = 66,
+ [0][1][2][0][RTW89_ACMA][33] = 46,
+ [0][1][2][0][RTW89_CHILE][33] = 42,
+ [0][1][2][0][RTW89_UKRAINE][33] = 40,
+ [0][1][2][0][RTW89_MEXICO][33] = 68,
+ [0][1][2][0][RTW89_CN][33] = 127,
+ [0][1][2][0][RTW89_QATAR][33] = 40,
+ [0][1][2][0][RTW89_FCC][35] = 64,
+ [0][1][2][0][RTW89_ETSI][35] = 46,
+ [0][1][2][0][RTW89_MKK][35] = 70,
+ [0][1][2][0][RTW89_IC][35] = 64,
+ [0][1][2][0][RTW89_KCC][35] = 66,
+ [0][1][2][0][RTW89_ACMA][35] = 46,
+ [0][1][2][0][RTW89_CHILE][35] = 42,
+ [0][1][2][0][RTW89_UKRAINE][35] = 40,
+ [0][1][2][0][RTW89_MEXICO][35] = 64,
+ [0][1][2][0][RTW89_CN][35] = 127,
+ [0][1][2][0][RTW89_QATAR][35] = 40,
+ [0][1][2][0][RTW89_FCC][37] = 68,
+ [0][1][2][0][RTW89_ETSI][37] = 127,
+ [0][1][2][0][RTW89_MKK][37] = 70,
+ [0][1][2][0][RTW89_IC][37] = 68,
+ [0][1][2][0][RTW89_KCC][37] = 66,
+ [0][1][2][0][RTW89_ACMA][37] = 68,
+ [0][1][2][0][RTW89_CHILE][37] = 42,
+ [0][1][2][0][RTW89_UKRAINE][37] = 127,
+ [0][1][2][0][RTW89_MEXICO][37] = 68,
+ [0][1][2][0][RTW89_CN][37] = 127,
+ [0][1][2][0][RTW89_QATAR][37] = 127,
+ [0][1][2][0][RTW89_FCC][38] = 76,
+ [0][1][2][0][RTW89_ETSI][38] = 16,
+ [0][1][2][0][RTW89_MKK][38] = 127,
+ [0][1][2][0][RTW89_IC][38] = 76,
+ [0][1][2][0][RTW89_KCC][38] = 66,
+ [0][1][2][0][RTW89_ACMA][38] = 76,
+ [0][1][2][0][RTW89_CHILE][38] = 42,
+ [0][1][2][0][RTW89_UKRAINE][38] = 16,
+ [0][1][2][0][RTW89_MEXICO][38] = 76,
+ [0][1][2][0][RTW89_CN][38] = 68,
+ [0][1][2][0][RTW89_QATAR][38] = 16,
+ [0][1][2][0][RTW89_FCC][40] = 76,
+ [0][1][2][0][RTW89_ETSI][40] = 16,
+ [0][1][2][0][RTW89_MKK][40] = 127,
+ [0][1][2][0][RTW89_IC][40] = 76,
+ [0][1][2][0][RTW89_KCC][40] = 66,
+ [0][1][2][0][RTW89_ACMA][40] = 76,
+ [0][1][2][0][RTW89_CHILE][40] = 42,
+ [0][1][2][0][RTW89_UKRAINE][40] = 16,
+ [0][1][2][0][RTW89_MEXICO][40] = 76,
+ [0][1][2][0][RTW89_CN][40] = 76,
+ [0][1][2][0][RTW89_QATAR][40] = 16,
+ [0][1][2][0][RTW89_FCC][42] = 76,
+ [0][1][2][0][RTW89_ETSI][42] = 16,
+ [0][1][2][0][RTW89_MKK][42] = 127,
+ [0][1][2][0][RTW89_IC][42] = 76,
+ [0][1][2][0][RTW89_KCC][42] = 66,
+ [0][1][2][0][RTW89_ACMA][42] = 76,
+ [0][1][2][0][RTW89_CHILE][42] = 42,
+ [0][1][2][0][RTW89_UKRAINE][42] = 16,
+ [0][1][2][0][RTW89_MEXICO][42] = 76,
+ [0][1][2][0][RTW89_CN][42] = 76,
+ [0][1][2][0][RTW89_QATAR][42] = 16,
+ [0][1][2][0][RTW89_FCC][44] = 76,
+ [0][1][2][0][RTW89_ETSI][44] = 16,
+ [0][1][2][0][RTW89_MKK][44] = 127,
+ [0][1][2][0][RTW89_IC][44] = 76,
+ [0][1][2][0][RTW89_KCC][44] = 66,
+ [0][1][2][0][RTW89_ACMA][44] = 76,
+ [0][1][2][0][RTW89_CHILE][44] = 42,
+ [0][1][2][0][RTW89_UKRAINE][44] = 16,
+ [0][1][2][0][RTW89_MEXICO][44] = 76,
+ [0][1][2][0][RTW89_CN][44] = 76,
+ [0][1][2][0][RTW89_QATAR][44] = 16,
+ [0][1][2][0][RTW89_FCC][46] = 76,
+ [0][1][2][0][RTW89_ETSI][46] = 16,
+ [0][1][2][0][RTW89_MKK][46] = 127,
+ [0][1][2][0][RTW89_IC][46] = 76,
+ [0][1][2][0][RTW89_KCC][46] = 66,
+ [0][1][2][0][RTW89_ACMA][46] = 76,
+ [0][1][2][0][RTW89_CHILE][46] = 42,
+ [0][1][2][0][RTW89_UKRAINE][46] = 16,
+ [0][1][2][0][RTW89_MEXICO][46] = 76,
+ [0][1][2][0][RTW89_CN][46] = 76,
+ [0][1][2][0][RTW89_QATAR][46] = 16,
+ [0][1][2][1][RTW89_FCC][0] = 68,
+ [0][1][2][1][RTW89_ETSI][0] = 34,
+ [0][1][2][1][RTW89_MKK][0] = 50,
+ [0][1][2][1][RTW89_IC][0] = 38,
+ [0][1][2][1][RTW89_KCC][0] = 68,
+ [0][1][2][1][RTW89_ACMA][0] = 34,
+ [0][1][2][1][RTW89_CHILE][0] = 6,
+ [0][1][2][1][RTW89_UKRAINE][0] = 28,
+ [0][1][2][1][RTW89_MEXICO][0] = 50,
+ [0][1][2][1][RTW89_CN][0] = 34,
+ [0][1][2][1][RTW89_QATAR][0] = 34,
+ [0][1][2][1][RTW89_FCC][2] = 68,
+ [0][1][2][1][RTW89_ETSI][2] = 34,
+ [0][1][2][1][RTW89_MKK][2] = 50,
+ [0][1][2][1][RTW89_IC][2] = 38,
+ [0][1][2][1][RTW89_KCC][2] = 68,
+ [0][1][2][1][RTW89_ACMA][2] = 34,
+ [0][1][2][1][RTW89_CHILE][2] = 6,
+ [0][1][2][1][RTW89_UKRAINE][2] = 28,
+ [0][1][2][1][RTW89_MEXICO][2] = 50,
+ [0][1][2][1][RTW89_CN][2] = 34,
+ [0][1][2][1][RTW89_QATAR][2] = 34,
+ [0][1][2][1][RTW89_FCC][4] = 68,
+ [0][1][2][1][RTW89_ETSI][4] = 34,
+ [0][1][2][1][RTW89_MKK][4] = 50,
+ [0][1][2][1][RTW89_IC][4] = 38,
+ [0][1][2][1][RTW89_KCC][4] = 68,
+ [0][1][2][1][RTW89_ACMA][4] = 34,
+ [0][1][2][1][RTW89_CHILE][4] = 6,
+ [0][1][2][1][RTW89_UKRAINE][4] = 28,
+ [0][1][2][1][RTW89_MEXICO][4] = 50,
+ [0][1][2][1][RTW89_CN][4] = 34,
+ [0][1][2][1][RTW89_QATAR][4] = 34,
+ [0][1][2][1][RTW89_FCC][6] = 68,
+ [0][1][2][1][RTW89_ETSI][6] = 34,
+ [0][1][2][1][RTW89_MKK][6] = 50,
+ [0][1][2][1][RTW89_IC][6] = 38,
+ [0][1][2][1][RTW89_KCC][6] = 38,
+ [0][1][2][1][RTW89_ACMA][6] = 34,
+ [0][1][2][1][RTW89_CHILE][6] = 6,
+ [0][1][2][1][RTW89_UKRAINE][6] = 28,
+ [0][1][2][1][RTW89_MEXICO][6] = 50,
+ [0][1][2][1][RTW89_CN][6] = 34,
+ [0][1][2][1][RTW89_QATAR][6] = 34,
+ [0][1][2][1][RTW89_FCC][8] = 68,
+ [0][1][2][1][RTW89_ETSI][8] = 34,
+ [0][1][2][1][RTW89_MKK][8] = 50,
+ [0][1][2][1][RTW89_IC][8] = 38,
+ [0][1][2][1][RTW89_KCC][8] = 68,
+ [0][1][2][1][RTW89_ACMA][8] = 34,
+ [0][1][2][1][RTW89_CHILE][8] = 30,
+ [0][1][2][1][RTW89_UKRAINE][8] = 28,
+ [0][1][2][1][RTW89_MEXICO][8] = 68,
+ [0][1][2][1][RTW89_CN][8] = 34,
+ [0][1][2][1][RTW89_QATAR][8] = 34,
+ [0][1][2][1][RTW89_FCC][10] = 68,
+ [0][1][2][1][RTW89_ETSI][10] = 34,
+ [0][1][2][1][RTW89_MKK][10] = 50,
+ [0][1][2][1][RTW89_IC][10] = 38,
+ [0][1][2][1][RTW89_KCC][10] = 68,
+ [0][1][2][1][RTW89_ACMA][10] = 34,
+ [0][1][2][1][RTW89_CHILE][10] = 30,
+ [0][1][2][1][RTW89_UKRAINE][10] = 28,
+ [0][1][2][1][RTW89_MEXICO][10] = 68,
+ [0][1][2][1][RTW89_CN][10] = 34,
+ [0][1][2][1][RTW89_QATAR][10] = 34,
+ [0][1][2][1][RTW89_FCC][12] = 68,
+ [0][1][2][1][RTW89_ETSI][12] = 34,
+ [0][1][2][1][RTW89_MKK][12] = 50,
+ [0][1][2][1][RTW89_IC][12] = 38,
+ [0][1][2][1][RTW89_KCC][12] = 68,
+ [0][1][2][1][RTW89_ACMA][12] = 34,
+ [0][1][2][1][RTW89_CHILE][12] = 30,
+ [0][1][2][1][RTW89_UKRAINE][12] = 28,
+ [0][1][2][1][RTW89_MEXICO][12] = 68,
+ [0][1][2][1][RTW89_CN][12] = 34,
+ [0][1][2][1][RTW89_QATAR][12] = 34,
+ [0][1][2][1][RTW89_FCC][14] = 68,
+ [0][1][2][1][RTW89_ETSI][14] = 34,
+ [0][1][2][1][RTW89_MKK][14] = 50,
+ [0][1][2][1][RTW89_IC][14] = 38,
+ [0][1][2][1][RTW89_KCC][14] = 68,
+ [0][1][2][1][RTW89_ACMA][14] = 34,
+ [0][1][2][1][RTW89_CHILE][14] = 30,
+ [0][1][2][1][RTW89_UKRAINE][14] = 28,
+ [0][1][2][1][RTW89_MEXICO][14] = 68,
+ [0][1][2][1][RTW89_CN][14] = 34,
+ [0][1][2][1][RTW89_QATAR][14] = 34,
+ [0][1][2][1][RTW89_FCC][15] = 68,
+ [0][1][2][1][RTW89_ETSI][15] = 34,
+ [0][1][2][1][RTW89_MKK][15] = 70,
+ [0][1][2][1][RTW89_IC][15] = 62,
+ [0][1][2][1][RTW89_KCC][15] = 66,
+ [0][1][2][1][RTW89_ACMA][15] = 34,
+ [0][1][2][1][RTW89_CHILE][15] = 30,
+ [0][1][2][1][RTW89_UKRAINE][15] = 28,
+ [0][1][2][1][RTW89_MEXICO][15] = 68,
+ [0][1][2][1][RTW89_CN][15] = 127,
+ [0][1][2][1][RTW89_QATAR][15] = 28,
+ [0][1][2][1][RTW89_FCC][17] = 68,
+ [0][1][2][1][RTW89_ETSI][17] = 34,
+ [0][1][2][1][RTW89_MKK][17] = 70,
+ [0][1][2][1][RTW89_IC][17] = 62,
+ [0][1][2][1][RTW89_KCC][17] = 66,
+ [0][1][2][1][RTW89_ACMA][17] = 34,
+ [0][1][2][1][RTW89_CHILE][17] = 30,
+ [0][1][2][1][RTW89_UKRAINE][17] = 28,
+ [0][1][2][1][RTW89_MEXICO][17] = 68,
+ [0][1][2][1][RTW89_CN][17] = 127,
+ [0][1][2][1][RTW89_QATAR][17] = 28,
+ [0][1][2][1][RTW89_FCC][19] = 68,
+ [0][1][2][1][RTW89_ETSI][19] = 34,
+ [0][1][2][1][RTW89_MKK][19] = 70,
+ [0][1][2][1][RTW89_IC][19] = 62,
+ [0][1][2][1][RTW89_KCC][19] = 66,
+ [0][1][2][1][RTW89_ACMA][19] = 34,
+ [0][1][2][1][RTW89_CHILE][19] = 30,
+ [0][1][2][1][RTW89_UKRAINE][19] = 28,
+ [0][1][2][1][RTW89_MEXICO][19] = 68,
+ [0][1][2][1][RTW89_CN][19] = 127,
+ [0][1][2][1][RTW89_QATAR][19] = 28,
+ [0][1][2][1][RTW89_FCC][21] = 68,
+ [0][1][2][1][RTW89_ETSI][21] = 34,
+ [0][1][2][1][RTW89_MKK][21] = 70,
+ [0][1][2][1][RTW89_IC][21] = 62,
+ [0][1][2][1][RTW89_KCC][21] = 66,
+ [0][1][2][1][RTW89_ACMA][21] = 34,
+ [0][1][2][1][RTW89_CHILE][21] = 30,
+ [0][1][2][1][RTW89_UKRAINE][21] = 28,
+ [0][1][2][1][RTW89_MEXICO][21] = 68,
+ [0][1][2][1][RTW89_CN][21] = 127,
+ [0][1][2][1][RTW89_QATAR][21] = 28,
+ [0][1][2][1][RTW89_FCC][23] = 68,
+ [0][1][2][1][RTW89_ETSI][23] = 34,
+ [0][1][2][1][RTW89_MKK][23] = 70,
+ [0][1][2][1][RTW89_IC][23] = 62,
+ [0][1][2][1][RTW89_KCC][23] = 66,
+ [0][1][2][1][RTW89_ACMA][23] = 34,
+ [0][1][2][1][RTW89_CHILE][23] = 30,
+ [0][1][2][1][RTW89_UKRAINE][23] = 28,
+ [0][1][2][1][RTW89_MEXICO][23] = 68,
+ [0][1][2][1][RTW89_CN][23] = 127,
+ [0][1][2][1][RTW89_QATAR][23] = 28,
+ [0][1][2][1][RTW89_FCC][25] = 68,
+ [0][1][2][1][RTW89_ETSI][25] = 34,
+ [0][1][2][1][RTW89_MKK][25] = 70,
+ [0][1][2][1][RTW89_IC][25] = 127,
+ [0][1][2][1][RTW89_KCC][25] = 66,
+ [0][1][2][1][RTW89_ACMA][25] = 127,
+ [0][1][2][1][RTW89_CHILE][25] = 30,
+ [0][1][2][1][RTW89_UKRAINE][25] = 28,
+ [0][1][2][1][RTW89_MEXICO][25] = 68,
+ [0][1][2][1][RTW89_CN][25] = 127,
+ [0][1][2][1][RTW89_QATAR][25] = 28,
+ [0][1][2][1][RTW89_FCC][27] = 68,
+ [0][1][2][1][RTW89_ETSI][27] = 34,
+ [0][1][2][1][RTW89_MKK][27] = 70,
+ [0][1][2][1][RTW89_IC][27] = 127,
+ [0][1][2][1][RTW89_KCC][27] = 66,
+ [0][1][2][1][RTW89_ACMA][27] = 127,
+ [0][1][2][1][RTW89_CHILE][27] = 30,
+ [0][1][2][1][RTW89_UKRAINE][27] = 28,
+ [0][1][2][1][RTW89_MEXICO][27] = 68,
+ [0][1][2][1][RTW89_CN][27] = 127,
+ [0][1][2][1][RTW89_QATAR][27] = 28,
+ [0][1][2][1][RTW89_FCC][29] = 68,
+ [0][1][2][1][RTW89_ETSI][29] = 34,
+ [0][1][2][1][RTW89_MKK][29] = 70,
+ [0][1][2][1][RTW89_IC][29] = 127,
+ [0][1][2][1][RTW89_KCC][29] = 66,
+ [0][1][2][1][RTW89_ACMA][29] = 127,
+ [0][1][2][1][RTW89_CHILE][29] = 30,
+ [0][1][2][1][RTW89_UKRAINE][29] = 28,
+ [0][1][2][1][RTW89_MEXICO][29] = 68,
+ [0][1][2][1][RTW89_CN][29] = 127,
+ [0][1][2][1][RTW89_QATAR][29] = 28,
+ [0][1][2][1][RTW89_FCC][31] = 68,
+ [0][1][2][1][RTW89_ETSI][31] = 34,
+ [0][1][2][1][RTW89_MKK][31] = 70,
+ [0][1][2][1][RTW89_IC][31] = 62,
+ [0][1][2][1][RTW89_KCC][31] = 66,
+ [0][1][2][1][RTW89_ACMA][31] = 34,
+ [0][1][2][1][RTW89_CHILE][31] = 30,
+ [0][1][2][1][RTW89_UKRAINE][31] = 28,
+ [0][1][2][1][RTW89_MEXICO][31] = 68,
+ [0][1][2][1][RTW89_CN][31] = 127,
+ [0][1][2][1][RTW89_QATAR][31] = 28,
+ [0][1][2][1][RTW89_FCC][33] = 68,
+ [0][1][2][1][RTW89_ETSI][33] = 34,
+ [0][1][2][1][RTW89_MKK][33] = 70,
+ [0][1][2][1][RTW89_IC][33] = 62,
+ [0][1][2][1][RTW89_KCC][33] = 66,
+ [0][1][2][1][RTW89_ACMA][33] = 34,
+ [0][1][2][1][RTW89_CHILE][33] = 30,
+ [0][1][2][1][RTW89_UKRAINE][33] = 28,
+ [0][1][2][1][RTW89_MEXICO][33] = 68,
+ [0][1][2][1][RTW89_CN][33] = 127,
+ [0][1][2][1][RTW89_QATAR][33] = 28,
+ [0][1][2][1][RTW89_FCC][35] = 64,
+ [0][1][2][1][RTW89_ETSI][35] = 34,
+ [0][1][2][1][RTW89_MKK][35] = 70,
+ [0][1][2][1][RTW89_IC][35] = 62,
+ [0][1][2][1][RTW89_KCC][35] = 66,
+ [0][1][2][1][RTW89_ACMA][35] = 34,
+ [0][1][2][1][RTW89_CHILE][35] = 30,
+ [0][1][2][1][RTW89_UKRAINE][35] = 28,
+ [0][1][2][1][RTW89_MEXICO][35] = 64,
+ [0][1][2][1][RTW89_CN][35] = 127,
+ [0][1][2][1][RTW89_QATAR][35] = 28,
+ [0][1][2][1][RTW89_FCC][37] = 68,
+ [0][1][2][1][RTW89_ETSI][37] = 127,
+ [0][1][2][1][RTW89_MKK][37] = 70,
+ [0][1][2][1][RTW89_IC][37] = 62,
+ [0][1][2][1][RTW89_KCC][37] = 66,
+ [0][1][2][1][RTW89_ACMA][37] = 68,
+ [0][1][2][1][RTW89_CHILE][37] = 30,
+ [0][1][2][1][RTW89_UKRAINE][37] = 127,
+ [0][1][2][1][RTW89_MEXICO][37] = 68,
+ [0][1][2][1][RTW89_CN][37] = 127,
+ [0][1][2][1][RTW89_QATAR][37] = 127,
+ [0][1][2][1][RTW89_FCC][38] = 76,
+ [0][1][2][1][RTW89_ETSI][38] = 4,
+ [0][1][2][1][RTW89_MKK][38] = 127,
+ [0][1][2][1][RTW89_IC][38] = 76,
+ [0][1][2][1][RTW89_KCC][38] = 66,
+ [0][1][2][1][RTW89_ACMA][38] = 76,
+ [0][1][2][1][RTW89_CHILE][38] = 30,
+ [0][1][2][1][RTW89_UKRAINE][38] = 4,
+ [0][1][2][1][RTW89_MEXICO][38] = 76,
+ [0][1][2][1][RTW89_CN][38] = 68,
+ [0][1][2][1][RTW89_QATAR][38] = 4,
+ [0][1][2][1][RTW89_FCC][40] = 76,
+ [0][1][2][1][RTW89_ETSI][40] = 4,
+ [0][1][2][1][RTW89_MKK][40] = 127,
+ [0][1][2][1][RTW89_IC][40] = 76,
+ [0][1][2][1][RTW89_KCC][40] = 66,
+ [0][1][2][1][RTW89_ACMA][40] = 76,
+ [0][1][2][1][RTW89_CHILE][40] = 30,
+ [0][1][2][1][RTW89_UKRAINE][40] = 4,
+ [0][1][2][1][RTW89_MEXICO][40] = 76,
+ [0][1][2][1][RTW89_CN][40] = 70,
+ [0][1][2][1][RTW89_QATAR][40] = 4,
+ [0][1][2][1][RTW89_FCC][42] = 76,
+ [0][1][2][1][RTW89_ETSI][42] = 4,
+ [0][1][2][1][RTW89_MKK][42] = 127,
+ [0][1][2][1][RTW89_IC][42] = 76,
+ [0][1][2][1][RTW89_KCC][42] = 66,
+ [0][1][2][1][RTW89_ACMA][42] = 76,
+ [0][1][2][1][RTW89_CHILE][42] = 30,
+ [0][1][2][1][RTW89_UKRAINE][42] = 4,
+ [0][1][2][1][RTW89_MEXICO][42] = 76,
+ [0][1][2][1][RTW89_CN][42] = 70,
+ [0][1][2][1][RTW89_QATAR][42] = 4,
+ [0][1][2][1][RTW89_FCC][44] = 76,
+ [0][1][2][1][RTW89_ETSI][44] = 4,
+ [0][1][2][1][RTW89_MKK][44] = 127,
+ [0][1][2][1][RTW89_IC][44] = 76,
+ [0][1][2][1][RTW89_KCC][44] = 66,
+ [0][1][2][1][RTW89_ACMA][44] = 76,
+ [0][1][2][1][RTW89_CHILE][44] = 30,
+ [0][1][2][1][RTW89_UKRAINE][44] = 4,
+ [0][1][2][1][RTW89_MEXICO][44] = 76,
+ [0][1][2][1][RTW89_CN][44] = 70,
+ [0][1][2][1][RTW89_QATAR][44] = 4,
+ [0][1][2][1][RTW89_FCC][46] = 76,
+ [0][1][2][1][RTW89_ETSI][46] = 4,
+ [0][1][2][1][RTW89_MKK][46] = 127,
+ [0][1][2][1][RTW89_IC][46] = 76,
+ [0][1][2][1][RTW89_KCC][46] = 66,
+ [0][1][2][1][RTW89_ACMA][46] = 76,
+ [0][1][2][1][RTW89_CHILE][46] = 30,
+ [0][1][2][1][RTW89_UKRAINE][46] = 4,
+ [0][1][2][1][RTW89_MEXICO][46] = 76,
+ [0][1][2][1][RTW89_CN][46] = 70,
+ [0][1][2][1][RTW89_QATAR][46] = 4,
+ [1][0][2][0][RTW89_FCC][1] = 68,
+ [1][0][2][0][RTW89_ETSI][1] = 64,
+ [1][0][2][0][RTW89_MKK][1] = 62,
+ [1][0][2][0][RTW89_IC][1] = 64,
+ [1][0][2][0][RTW89_KCC][1] = 72,
+ [1][0][2][0][RTW89_ACMA][1] = 64,
+ [1][0][2][0][RTW89_CHILE][1] = 30,
+ [1][0][2][0][RTW89_UKRAINE][1] = 52,
+ [1][0][2][0][RTW89_MEXICO][1] = 62,
+ [1][0][2][0][RTW89_CN][1] = 64,
+ [1][0][2][0][RTW89_QATAR][1] = 64,
+ [1][0][2][0][RTW89_FCC][5] = 72,
+ [1][0][2][0][RTW89_ETSI][5] = 64,
+ [1][0][2][0][RTW89_MKK][5] = 62,
+ [1][0][2][0][RTW89_IC][5] = 64,
+ [1][0][2][0][RTW89_KCC][5] = 72,
+ [1][0][2][0][RTW89_ACMA][5] = 64,
+ [1][0][2][0][RTW89_CHILE][5] = 30,
+ [1][0][2][0][RTW89_UKRAINE][5] = 52,
+ [1][0][2][0][RTW89_MEXICO][5] = 62,
+ [1][0][2][0][RTW89_CN][5] = 64,
+ [1][0][2][0][RTW89_QATAR][5] = 64,
+ [1][0][2][0][RTW89_FCC][9] = 72,
+ [1][0][2][0][RTW89_ETSI][9] = 64,
+ [1][0][2][0][RTW89_MKK][9] = 62,
+ [1][0][2][0][RTW89_IC][9] = 64,
+ [1][0][2][0][RTW89_KCC][9] = 72,
+ [1][0][2][0][RTW89_ACMA][9] = 64,
+ [1][0][2][0][RTW89_CHILE][9] = 54,
+ [1][0][2][0][RTW89_UKRAINE][9] = 52,
+ [1][0][2][0][RTW89_MEXICO][9] = 72,
+ [1][0][2][0][RTW89_CN][9] = 64,
+ [1][0][2][0][RTW89_QATAR][9] = 64,
+ [1][0][2][0][RTW89_FCC][13] = 66,
+ [1][0][2][0][RTW89_ETSI][13] = 64,
+ [1][0][2][0][RTW89_MKK][13] = 62,
+ [1][0][2][0][RTW89_IC][13] = 64,
+ [1][0][2][0][RTW89_KCC][13] = 68,
+ [1][0][2][0][RTW89_ACMA][13] = 64,
+ [1][0][2][0][RTW89_CHILE][13] = 54,
+ [1][0][2][0][RTW89_UKRAINE][13] = 52,
+ [1][0][2][0][RTW89_MEXICO][13] = 66,
+ [1][0][2][0][RTW89_CN][13] = 64,
+ [1][0][2][0][RTW89_QATAR][13] = 64,
+ [1][0][2][0][RTW89_FCC][16] = 62,
+ [1][0][2][0][RTW89_ETSI][16] = 64,
+ [1][0][2][0][RTW89_MKK][16] = 72,
+ [1][0][2][0][RTW89_IC][16] = 62,
+ [1][0][2][0][RTW89_KCC][16] = 72,
+ [1][0][2][0][RTW89_ACMA][16] = 64,
+ [1][0][2][0][RTW89_CHILE][16] = 54,
+ [1][0][2][0][RTW89_UKRAINE][16] = 52,
+ [1][0][2][0][RTW89_MEXICO][16] = 62,
+ [1][0][2][0][RTW89_CN][16] = 127,
+ [1][0][2][0][RTW89_QATAR][16] = 52,
+ [1][0][2][0][RTW89_FCC][20] = 72,
+ [1][0][2][0][RTW89_ETSI][20] = 64,
+ [1][0][2][0][RTW89_MKK][20] = 72,
+ [1][0][2][0][RTW89_IC][20] = 72,
+ [1][0][2][0][RTW89_KCC][20] = 72,
+ [1][0][2][0][RTW89_ACMA][20] = 64,
+ [1][0][2][0][RTW89_CHILE][20] = 54,
+ [1][0][2][0][RTW89_UKRAINE][20] = 52,
+ [1][0][2][0][RTW89_MEXICO][20] = 72,
+ [1][0][2][0][RTW89_CN][20] = 127,
+ [1][0][2][0][RTW89_QATAR][20] = 52,
+ [1][0][2][0][RTW89_FCC][24] = 72,
+ [1][0][2][0][RTW89_ETSI][24] = 64,
+ [1][0][2][0][RTW89_MKK][24] = 72,
+ [1][0][2][0][RTW89_IC][24] = 127,
+ [1][0][2][0][RTW89_KCC][24] = 72,
+ [1][0][2][0][RTW89_ACMA][24] = 127,
+ [1][0][2][0][RTW89_CHILE][24] = 54,
+ [1][0][2][0][RTW89_UKRAINE][24] = 52,
+ [1][0][2][0][RTW89_MEXICO][24] = 72,
+ [1][0][2][0][RTW89_CN][24] = 127,
+ [1][0][2][0][RTW89_QATAR][24] = 52,
+ [1][0][2][0][RTW89_FCC][28] = 72,
+ [1][0][2][0][RTW89_ETSI][28] = 64,
+ [1][0][2][0][RTW89_MKK][28] = 72,
+ [1][0][2][0][RTW89_IC][28] = 127,
+ [1][0][2][0][RTW89_KCC][28] = 72,
+ [1][0][2][0][RTW89_ACMA][28] = 127,
+ [1][0][2][0][RTW89_CHILE][28] = 54,
+ [1][0][2][0][RTW89_UKRAINE][28] = 52,
+ [1][0][2][0][RTW89_MEXICO][28] = 72,
+ [1][0][2][0][RTW89_CN][28] = 127,
+ [1][0][2][0][RTW89_QATAR][28] = 52,
+ [1][0][2][0][RTW89_FCC][32] = 72,
+ [1][0][2][0][RTW89_ETSI][32] = 64,
+ [1][0][2][0][RTW89_MKK][32] = 72,
+ [1][0][2][0][RTW89_IC][32] = 72,
+ [1][0][2][0][RTW89_KCC][32] = 72,
+ [1][0][2][0][RTW89_ACMA][32] = 64,
+ [1][0][2][0][RTW89_CHILE][32] = 54,
+ [1][0][2][0][RTW89_UKRAINE][32] = 52,
+ [1][0][2][0][RTW89_MEXICO][32] = 72,
+ [1][0][2][0][RTW89_CN][32] = 127,
+ [1][0][2][0][RTW89_QATAR][32] = 52,
+ [1][0][2][0][RTW89_FCC][36] = 72,
+ [1][0][2][0][RTW89_ETSI][36] = 127,
+ [1][0][2][0][RTW89_MKK][36] = 72,
+ [1][0][2][0][RTW89_IC][36] = 72,
+ [1][0][2][0][RTW89_KCC][36] = 72,
+ [1][0][2][0][RTW89_ACMA][36] = 72,
+ [1][0][2][0][RTW89_CHILE][36] = 54,
+ [1][0][2][0][RTW89_UKRAINE][36] = 127,
+ [1][0][2][0][RTW89_MEXICO][36] = 72,
+ [1][0][2][0][RTW89_CN][36] = 127,
+ [1][0][2][0][RTW89_QATAR][36] = 127,
+ [1][0][2][0][RTW89_FCC][39] = 72,
+ [1][0][2][0][RTW89_ETSI][39] = 28,
+ [1][0][2][0][RTW89_MKK][39] = 127,
+ [1][0][2][0][RTW89_IC][39] = 72,
+ [1][0][2][0][RTW89_KCC][39] = 72,
+ [1][0][2][0][RTW89_ACMA][39] = 72,
+ [1][0][2][0][RTW89_CHILE][39] = 54,
+ [1][0][2][0][RTW89_UKRAINE][39] = 28,
+ [1][0][2][0][RTW89_MEXICO][39] = 72,
+ [1][0][2][0][RTW89_CN][39] = 68,
+ [1][0][2][0][RTW89_QATAR][39] = 28,
+ [1][0][2][0][RTW89_FCC][43] = 72,
+ [1][0][2][0][RTW89_ETSI][43] = 28,
+ [1][0][2][0][RTW89_MKK][43] = 127,
+ [1][0][2][0][RTW89_IC][43] = 72,
+ [1][0][2][0][RTW89_KCC][43] = 72,
+ [1][0][2][0][RTW89_ACMA][43] = 72,
+ [1][0][2][0][RTW89_CHILE][43] = 54,
+ [1][0][2][0][RTW89_UKRAINE][43] = 28,
+ [1][0][2][0][RTW89_MEXICO][43] = 72,
+ [1][0][2][0][RTW89_CN][43] = 72,
+ [1][0][2][0][RTW89_QATAR][43] = 28,
+ [1][1][2][0][RTW89_FCC][1] = 58,
+ [1][1][2][0][RTW89_ETSI][1] = 52,
+ [1][1][2][0][RTW89_MKK][1] = 50,
+ [1][1][2][0][RTW89_IC][1] = 52,
+ [1][1][2][0][RTW89_KCC][1] = 66,
+ [1][1][2][0][RTW89_ACMA][1] = 52,
+ [1][1][2][0][RTW89_CHILE][1] = 18,
+ [1][1][2][0][RTW89_UKRAINE][1] = 40,
+ [1][1][2][0][RTW89_MEXICO][1] = 50,
+ [1][1][2][0][RTW89_CN][1] = 52,
+ [1][1][2][0][RTW89_QATAR][1] = 52,
+ [1][1][2][0][RTW89_FCC][5] = 72,
+ [1][1][2][0][RTW89_ETSI][5] = 52,
+ [1][1][2][0][RTW89_MKK][5] = 50,
+ [1][1][2][0][RTW89_IC][5] = 52,
+ [1][1][2][0][RTW89_KCC][5] = 50,
+ [1][1][2][0][RTW89_ACMA][5] = 52,
+ [1][1][2][0][RTW89_CHILE][5] = 18,
+ [1][1][2][0][RTW89_UKRAINE][5] = 40,
+ [1][1][2][0][RTW89_MEXICO][5] = 50,
+ [1][1][2][0][RTW89_CN][5] = 52,
+ [1][1][2][0][RTW89_QATAR][5] = 52,
+ [1][1][2][0][RTW89_FCC][9] = 72,
+ [1][1][2][0][RTW89_ETSI][9] = 52,
+ [1][1][2][0][RTW89_MKK][9] = 50,
+ [1][1][2][0][RTW89_IC][9] = 52,
+ [1][1][2][0][RTW89_KCC][9] = 66,
+ [1][1][2][0][RTW89_ACMA][9] = 52,
+ [1][1][2][0][RTW89_CHILE][9] = 42,
+ [1][1][2][0][RTW89_UKRAINE][9] = 40,
+ [1][1][2][0][RTW89_MEXICO][9] = 72,
+ [1][1][2][0][RTW89_CN][9] = 52,
+ [1][1][2][0][RTW89_QATAR][9] = 52,
+ [1][1][2][0][RTW89_FCC][13] = 58,
+ [1][1][2][0][RTW89_ETSI][13] = 52,
+ [1][1][2][0][RTW89_MKK][13] = 50,
+ [1][1][2][0][RTW89_IC][13] = 52,
+ [1][1][2][0][RTW89_KCC][13] = 66,
+ [1][1][2][0][RTW89_ACMA][13] = 52,
+ [1][1][2][0][RTW89_CHILE][13] = 42,
+ [1][1][2][0][RTW89_UKRAINE][13] = 40,
+ [1][1][2][0][RTW89_MEXICO][13] = 58,
+ [1][1][2][0][RTW89_CN][13] = 52,
+ [1][1][2][0][RTW89_QATAR][13] = 52,
+ [1][1][2][0][RTW89_FCC][16] = 56,
+ [1][1][2][0][RTW89_ETSI][16] = 52,
+ [1][1][2][0][RTW89_MKK][16] = 72,
+ [1][1][2][0][RTW89_IC][16] = 56,
+ [1][1][2][0][RTW89_KCC][16] = 64,
+ [1][1][2][0][RTW89_ACMA][16] = 52,
+ [1][1][2][0][RTW89_CHILE][16] = 42,
+ [1][1][2][0][RTW89_UKRAINE][16] = 40,
+ [1][1][2][0][RTW89_MEXICO][16] = 56,
+ [1][1][2][0][RTW89_CN][16] = 127,
+ [1][1][2][0][RTW89_QATAR][16] = 40,
+ [1][1][2][0][RTW89_FCC][20] = 72,
+ [1][1][2][0][RTW89_ETSI][20] = 52,
+ [1][1][2][0][RTW89_MKK][20] = 72,
+ [1][1][2][0][RTW89_IC][20] = 72,
+ [1][1][2][0][RTW89_KCC][20] = 66,
+ [1][1][2][0][RTW89_ACMA][20] = 52,
+ [1][1][2][0][RTW89_CHILE][20] = 42,
+ [1][1][2][0][RTW89_UKRAINE][20] = 40,
+ [1][1][2][0][RTW89_MEXICO][20] = 72,
+ [1][1][2][0][RTW89_CN][20] = 127,
+ [1][1][2][0][RTW89_QATAR][20] = 40,
+ [1][1][2][0][RTW89_FCC][24] = 72,
+ [1][1][2][0][RTW89_ETSI][24] = 52,
+ [1][1][2][0][RTW89_MKK][24] = 72,
+ [1][1][2][0][RTW89_IC][24] = 127,
+ [1][1][2][0][RTW89_KCC][24] = 66,
+ [1][1][2][0][RTW89_ACMA][24] = 127,
+ [1][1][2][0][RTW89_CHILE][24] = 42,
+ [1][1][2][0][RTW89_UKRAINE][24] = 40,
+ [1][1][2][0][RTW89_MEXICO][24] = 72,
+ [1][1][2][0][RTW89_CN][24] = 127,
+ [1][1][2][0][RTW89_QATAR][24] = 40,
+ [1][1][2][0][RTW89_FCC][28] = 72,
+ [1][1][2][0][RTW89_ETSI][28] = 52,
+ [1][1][2][0][RTW89_MKK][28] = 72,
+ [1][1][2][0][RTW89_IC][28] = 127,
+ [1][1][2][0][RTW89_KCC][28] = 66,
+ [1][1][2][0][RTW89_ACMA][28] = 127,
+ [1][1][2][0][RTW89_CHILE][28] = 42,
+ [1][1][2][0][RTW89_UKRAINE][28] = 40,
+ [1][1][2][0][RTW89_MEXICO][28] = 72,
+ [1][1][2][0][RTW89_CN][28] = 127,
+ [1][1][2][0][RTW89_QATAR][28] = 40,
+ [1][1][2][0][RTW89_FCC][32] = 68,
+ [1][1][2][0][RTW89_ETSI][32] = 52,
+ [1][1][2][0][RTW89_MKK][32] = 72,
+ [1][1][2][0][RTW89_IC][32] = 68,
+ [1][1][2][0][RTW89_KCC][32] = 66,
+ [1][1][2][0][RTW89_ACMA][32] = 52,
+ [1][1][2][0][RTW89_CHILE][32] = 42,
+ [1][1][2][0][RTW89_UKRAINE][32] = 40,
+ [1][1][2][0][RTW89_MEXICO][32] = 68,
+ [1][1][2][0][RTW89_CN][32] = 127,
+ [1][1][2][0][RTW89_QATAR][32] = 40,
+ [1][1][2][0][RTW89_FCC][36] = 72,
+ [1][1][2][0][RTW89_ETSI][36] = 127,
+ [1][1][2][0][RTW89_MKK][36] = 72,
+ [1][1][2][0][RTW89_IC][36] = 72,
+ [1][1][2][0][RTW89_KCC][36] = 66,
+ [1][1][2][0][RTW89_ACMA][36] = 72,
+ [1][1][2][0][RTW89_CHILE][36] = 42,
+ [1][1][2][0][RTW89_UKRAINE][36] = 127,
+ [1][1][2][0][RTW89_MEXICO][36] = 72,
+ [1][1][2][0][RTW89_CN][36] = 127,
+ [1][1][2][0][RTW89_QATAR][36] = 127,
+ [1][1][2][0][RTW89_FCC][39] = 72,
+ [1][1][2][0][RTW89_ETSI][39] = 16,
+ [1][1][2][0][RTW89_MKK][39] = 127,
+ [1][1][2][0][RTW89_IC][39] = 72,
+ [1][1][2][0][RTW89_KCC][39] = 66,
+ [1][1][2][0][RTW89_ACMA][39] = 72,
+ [1][1][2][0][RTW89_CHILE][39] = 42,
+ [1][1][2][0][RTW89_UKRAINE][39] = 16,
+ [1][1][2][0][RTW89_MEXICO][39] = 72,
+ [1][1][2][0][RTW89_CN][39] = 68,
+ [1][1][2][0][RTW89_QATAR][39] = 16,
+ [1][1][2][0][RTW89_FCC][43] = 72,
+ [1][1][2][0][RTW89_ETSI][43] = 16,
+ [1][1][2][0][RTW89_MKK][43] = 127,
+ [1][1][2][0][RTW89_IC][43] = 72,
+ [1][1][2][0][RTW89_KCC][43] = 66,
+ [1][1][2][0][RTW89_ACMA][43] = 72,
+ [1][1][2][0][RTW89_CHILE][43] = 42,
+ [1][1][2][0][RTW89_UKRAINE][43] = 16,
+ [1][1][2][0][RTW89_MEXICO][43] = 72,
+ [1][1][2][0][RTW89_CN][43] = 72,
+ [1][1][2][0][RTW89_QATAR][43] = 16,
+ [1][1][2][1][RTW89_FCC][1] = 58,
+ [1][1][2][1][RTW89_ETSI][1] = 40,
+ [1][1][2][1][RTW89_MKK][1] = 50,
+ [1][1][2][1][RTW89_IC][1] = 40,
+ [1][1][2][1][RTW89_KCC][1] = 66,
+ [1][1][2][1][RTW89_ACMA][1] = 40,
+ [1][1][2][1][RTW89_CHILE][1] = 6,
+ [1][1][2][1][RTW89_UKRAINE][1] = 28,
+ [1][1][2][1][RTW89_MEXICO][1] = 50,
+ [1][1][2][1][RTW89_CN][1] = 40,
+ [1][1][2][1][RTW89_QATAR][1] = 40,
+ [1][1][2][1][RTW89_FCC][5] = 68,
+ [1][1][2][1][RTW89_ETSI][5] = 40,
+ [1][1][2][1][RTW89_MKK][5] = 50,
+ [1][1][2][1][RTW89_IC][5] = 40,
+ [1][1][2][1][RTW89_KCC][5] = 50,
+ [1][1][2][1][RTW89_ACMA][5] = 40,
+ [1][1][2][1][RTW89_CHILE][5] = 6,
+ [1][1][2][1][RTW89_UKRAINE][5] = 28,
+ [1][1][2][1][RTW89_MEXICO][5] = 50,
+ [1][1][2][1][RTW89_CN][5] = 40,
+ [1][1][2][1][RTW89_QATAR][5] = 40,
+ [1][1][2][1][RTW89_FCC][9] = 68,
+ [1][1][2][1][RTW89_ETSI][9] = 40,
+ [1][1][2][1][RTW89_MKK][9] = 50,
+ [1][1][2][1][RTW89_IC][9] = 40,
+ [1][1][2][1][RTW89_KCC][9] = 66,
+ [1][1][2][1][RTW89_ACMA][9] = 40,
+ [1][1][2][1][RTW89_CHILE][9] = 30,
+ [1][1][2][1][RTW89_UKRAINE][9] = 28,
+ [1][1][2][1][RTW89_MEXICO][9] = 68,
+ [1][1][2][1][RTW89_CN][9] = 40,
+ [1][1][2][1][RTW89_QATAR][9] = 40,
+ [1][1][2][1][RTW89_FCC][13] = 58,
+ [1][1][2][1][RTW89_ETSI][13] = 40,
+ [1][1][2][1][RTW89_MKK][13] = 50,
+ [1][1][2][1][RTW89_IC][13] = 40,
+ [1][1][2][1][RTW89_KCC][13] = 66,
+ [1][1][2][1][RTW89_ACMA][13] = 40,
+ [1][1][2][1][RTW89_CHILE][13] = 30,
+ [1][1][2][1][RTW89_UKRAINE][13] = 28,
+ [1][1][2][1][RTW89_MEXICO][13] = 58,
+ [1][1][2][1][RTW89_CN][13] = 40,
+ [1][1][2][1][RTW89_QATAR][13] = 40,
+ [1][1][2][1][RTW89_FCC][16] = 56,
+ [1][1][2][1][RTW89_ETSI][16] = 40,
+ [1][1][2][1][RTW89_MKK][16] = 72,
+ [1][1][2][1][RTW89_IC][16] = 56,
+ [1][1][2][1][RTW89_KCC][16] = 64,
+ [1][1][2][1][RTW89_ACMA][16] = 40,
+ [1][1][2][1][RTW89_CHILE][16] = 30,
+ [1][1][2][1][RTW89_UKRAINE][16] = 28,
+ [1][1][2][1][RTW89_MEXICO][16] = 56,
+ [1][1][2][1][RTW89_CN][16] = 127,
+ [1][1][2][1][RTW89_QATAR][16] = 28,
+ [1][1][2][1][RTW89_FCC][20] = 68,
+ [1][1][2][1][RTW89_ETSI][20] = 40,
+ [1][1][2][1][RTW89_MKK][20] = 72,
+ [1][1][2][1][RTW89_IC][20] = 68,
+ [1][1][2][1][RTW89_KCC][20] = 66,
+ [1][1][2][1][RTW89_ACMA][20] = 40,
+ [1][1][2][1][RTW89_CHILE][20] = 30,
+ [1][1][2][1][RTW89_UKRAINE][20] = 28,
+ [1][1][2][1][RTW89_MEXICO][20] = 68,
+ [1][1][2][1][RTW89_CN][20] = 127,
+ [1][1][2][1][RTW89_QATAR][20] = 28,
+ [1][1][2][1][RTW89_FCC][24] = 68,
+ [1][1][2][1][RTW89_ETSI][24] = 40,
+ [1][1][2][1][RTW89_MKK][24] = 72,
+ [1][1][2][1][RTW89_IC][24] = 127,
+ [1][1][2][1][RTW89_KCC][24] = 66,
+ [1][1][2][1][RTW89_ACMA][24] = 127,
+ [1][1][2][1][RTW89_CHILE][24] = 30,
+ [1][1][2][1][RTW89_UKRAINE][24] = 28,
+ [1][1][2][1][RTW89_MEXICO][24] = 68,
+ [1][1][2][1][RTW89_CN][24] = 127,
+ [1][1][2][1][RTW89_QATAR][24] = 28,
+ [1][1][2][1][RTW89_FCC][28] = 68,
+ [1][1][2][1][RTW89_ETSI][28] = 40,
+ [1][1][2][1][RTW89_MKK][28] = 72,
+ [1][1][2][1][RTW89_IC][28] = 127,
+ [1][1][2][1][RTW89_KCC][28] = 66,
+ [1][1][2][1][RTW89_ACMA][28] = 127,
+ [1][1][2][1][RTW89_CHILE][28] = 30,
+ [1][1][2][1][RTW89_UKRAINE][28] = 28,
+ [1][1][2][1][RTW89_MEXICO][28] = 68,
+ [1][1][2][1][RTW89_CN][28] = 127,
+ [1][1][2][1][RTW89_QATAR][28] = 28,
+ [1][1][2][1][RTW89_FCC][32] = 68,
+ [1][1][2][1][RTW89_ETSI][32] = 40,
+ [1][1][2][1][RTW89_MKK][32] = 72,
+ [1][1][2][1][RTW89_IC][32] = 68,
+ [1][1][2][1][RTW89_KCC][32] = 66,
+ [1][1][2][1][RTW89_ACMA][32] = 40,
+ [1][1][2][1][RTW89_CHILE][32] = 30,
+ [1][1][2][1][RTW89_UKRAINE][32] = 28,
+ [1][1][2][1][RTW89_MEXICO][32] = 68,
+ [1][1][2][1][RTW89_CN][32] = 127,
+ [1][1][2][1][RTW89_QATAR][32] = 28,
+ [1][1][2][1][RTW89_FCC][36] = 68,
+ [1][1][2][1][RTW89_ETSI][36] = 127,
+ [1][1][2][1][RTW89_MKK][36] = 72,
+ [1][1][2][1][RTW89_IC][36] = 68,
+ [1][1][2][1][RTW89_KCC][36] = 66,
+ [1][1][2][1][RTW89_ACMA][36] = 68,
+ [1][1][2][1][RTW89_CHILE][36] = 30,
+ [1][1][2][1][RTW89_UKRAINE][36] = 127,
+ [1][1][2][1][RTW89_MEXICO][36] = 68,
+ [1][1][2][1][RTW89_CN][36] = 127,
+ [1][1][2][1][RTW89_QATAR][36] = 127,
+ [1][1][2][1][RTW89_FCC][39] = 72,
+ [1][1][2][1][RTW89_ETSI][39] = 4,
+ [1][1][2][1][RTW89_MKK][39] = 127,
+ [1][1][2][1][RTW89_IC][39] = 72,
+ [1][1][2][1][RTW89_KCC][39] = 66,
+ [1][1][2][1][RTW89_ACMA][39] = 72,
+ [1][1][2][1][RTW89_CHILE][39] = 30,
+ [1][1][2][1][RTW89_UKRAINE][39] = 4,
+ [1][1][2][1][RTW89_MEXICO][39] = 72,
+ [1][1][2][1][RTW89_CN][39] = 62,
+ [1][1][2][1][RTW89_QATAR][39] = 4,
+ [1][1][2][1][RTW89_FCC][43] = 72,
+ [1][1][2][1][RTW89_ETSI][43] = 4,
+ [1][1][2][1][RTW89_MKK][43] = 127,
+ [1][1][2][1][RTW89_IC][43] = 72,
+ [1][1][2][1][RTW89_KCC][43] = 66,
+ [1][1][2][1][RTW89_ACMA][43] = 72,
+ [1][1][2][1][RTW89_CHILE][43] = 30,
+ [1][1][2][1][RTW89_UKRAINE][43] = 4,
+ [1][1][2][1][RTW89_MEXICO][43] = 72,
+ [1][1][2][1][RTW89_CN][43] = 72,
+ [1][1][2][1][RTW89_QATAR][43] = 4,
+ [2][0][2][0][RTW89_FCC][3] = 64,
+ [2][0][2][0][RTW89_ETSI][3] = 64,
+ [2][0][2][0][RTW89_MKK][3] = 64,
+ [2][0][2][0][RTW89_IC][3] = 62,
+ [2][0][2][0][RTW89_KCC][3] = 72,
+ [2][0][2][0][RTW89_ACMA][3] = 64,
+ [2][0][2][0][RTW89_CHILE][3] = 30,
+ [2][0][2][0][RTW89_UKRAINE][3] = 52,
+ [2][0][2][0][RTW89_MEXICO][3] = 62,
+ [2][0][2][0][RTW89_CN][3] = 64,
+ [2][0][2][0][RTW89_QATAR][3] = 64,
+ [2][0][2][0][RTW89_FCC][11] = 64,
+ [2][0][2][0][RTW89_ETSI][11] = 64,
+ [2][0][2][0][RTW89_MKK][11] = 64,
+ [2][0][2][0][RTW89_IC][11] = 62,
+ [2][0][2][0][RTW89_KCC][11] = 72,
+ [2][0][2][0][RTW89_ACMA][11] = 64,
+ [2][0][2][0][RTW89_CHILE][11] = 54,
+ [2][0][2][0][RTW89_UKRAINE][11] = 52,
+ [2][0][2][0][RTW89_MEXICO][11] = 64,
+ [2][0][2][0][RTW89_CN][11] = 64,
+ [2][0][2][0][RTW89_QATAR][11] = 64,
+ [2][0][2][0][RTW89_FCC][18] = 62,
+ [2][0][2][0][RTW89_ETSI][18] = 64,
+ [2][0][2][0][RTW89_MKK][18] = 72,
+ [2][0][2][0][RTW89_IC][18] = 66,
+ [2][0][2][0][RTW89_KCC][18] = 70,
+ [2][0][2][0][RTW89_ACMA][18] = 64,
+ [2][0][2][0][RTW89_CHILE][18] = 54,
+ [2][0][2][0][RTW89_UKRAINE][18] = 52,
+ [2][0][2][0][RTW89_MEXICO][18] = 62,
+ [2][0][2][0][RTW89_CN][18] = 127,
+ [2][0][2][0][RTW89_QATAR][18] = 52,
+ [2][0][2][0][RTW89_FCC][26] = 72,
+ [2][0][2][0][RTW89_ETSI][26] = 64,
+ [2][0][2][0][RTW89_MKK][26] = 72,
+ [2][0][2][0][RTW89_IC][26] = 127,
+ [2][0][2][0][RTW89_KCC][26] = 72,
+ [2][0][2][0][RTW89_ACMA][26] = 127,
+ [2][0][2][0][RTW89_CHILE][26] = 54,
+ [2][0][2][0][RTW89_UKRAINE][26] = 52,
+ [2][0][2][0][RTW89_MEXICO][26] = 72,
+ [2][0][2][0][RTW89_CN][26] = 127,
+ [2][0][2][0][RTW89_QATAR][26] = 52,
+ [2][0][2][0][RTW89_FCC][34] = 72,
+ [2][0][2][0][RTW89_ETSI][34] = 127,
+ [2][0][2][0][RTW89_MKK][34] = 72,
+ [2][0][2][0][RTW89_IC][34] = 72,
+ [2][0][2][0][RTW89_KCC][34] = 72,
+ [2][0][2][0][RTW89_ACMA][34] = 72,
+ [2][0][2][0][RTW89_CHILE][34] = 54,
+ [2][0][2][0][RTW89_UKRAINE][34] = 127,
+ [2][0][2][0][RTW89_MEXICO][34] = 72,
+ [2][0][2][0][RTW89_CN][34] = 127,
+ [2][0][2][0][RTW89_QATAR][34] = 127,
+ [2][0][2][0][RTW89_FCC][41] = 72,
+ [2][0][2][0][RTW89_ETSI][41] = 28,
+ [2][0][2][0][RTW89_MKK][41] = 127,
+ [2][0][2][0][RTW89_IC][41] = 72,
+ [2][0][2][0][RTW89_KCC][41] = 68,
+ [2][0][2][0][RTW89_ACMA][41] = 72,
+ [2][0][2][0][RTW89_CHILE][41] = 54,
+ [2][0][2][0][RTW89_UKRAINE][41] = 28,
+ [2][0][2][0][RTW89_MEXICO][41] = 72,
+ [2][0][2][0][RTW89_CN][41] = 68,
+ [2][0][2][0][RTW89_QATAR][41] = 28,
+ [2][1][2][0][RTW89_FCC][3] = 56,
+ [2][1][2][0][RTW89_ETSI][3] = 52,
+ [2][1][2][0][RTW89_MKK][3] = 52,
+ [2][1][2][0][RTW89_IC][3] = 52,
+ [2][1][2][0][RTW89_KCC][3] = 66,
+ [2][1][2][0][RTW89_ACMA][3] = 52,
+ [2][1][2][0][RTW89_CHILE][3] = 18,
+ [2][1][2][0][RTW89_UKRAINE][3] = 40,
+ [2][1][2][0][RTW89_MEXICO][3] = 50,
+ [2][1][2][0][RTW89_CN][3] = 52,
+ [2][1][2][0][RTW89_QATAR][3] = 52,
+ [2][1][2][0][RTW89_FCC][11] = 56,
+ [2][1][2][0][RTW89_ETSI][11] = 52,
+ [2][1][2][0][RTW89_MKK][11] = 52,
+ [2][1][2][0][RTW89_IC][11] = 52,
+ [2][1][2][0][RTW89_KCC][11] = 64,
+ [2][1][2][0][RTW89_ACMA][11] = 52,
+ [2][1][2][0][RTW89_CHILE][11] = 42,
+ [2][1][2][0][RTW89_UKRAINE][11] = 40,
+ [2][1][2][0][RTW89_MEXICO][11] = 56,
+ [2][1][2][0][RTW89_CN][11] = 52,
+ [2][1][2][0][RTW89_QATAR][11] = 52,
+ [2][1][2][0][RTW89_FCC][18] = 56,
+ [2][1][2][0][RTW89_ETSI][18] = 52,
+ [2][1][2][0][RTW89_MKK][18] = 72,
+ [2][1][2][0][RTW89_IC][18] = 56,
+ [2][1][2][0][RTW89_KCC][18] = 58,
+ [2][1][2][0][RTW89_ACMA][18] = 52,
+ [2][1][2][0][RTW89_CHILE][18] = 42,
+ [2][1][2][0][RTW89_UKRAINE][18] = 40,
+ [2][1][2][0][RTW89_MEXICO][18] = 56,
+ [2][1][2][0][RTW89_CN][18] = 127,
+ [2][1][2][0][RTW89_QATAR][18] = 40,
+ [2][1][2][0][RTW89_FCC][26] = 72,
+ [2][1][2][0][RTW89_ETSI][26] = 52,
+ [2][1][2][0][RTW89_MKK][26] = 72,
+ [2][1][2][0][RTW89_IC][26] = 127,
+ [2][1][2][0][RTW89_KCC][26] = 64,
+ [2][1][2][0][RTW89_ACMA][26] = 127,
+ [2][1][2][0][RTW89_CHILE][26] = 42,
+ [2][1][2][0][RTW89_UKRAINE][26] = 40,
+ [2][1][2][0][RTW89_MEXICO][26] = 72,
+ [2][1][2][0][RTW89_CN][26] = 127,
+ [2][1][2][0][RTW89_QATAR][26] = 40,
+ [2][1][2][0][RTW89_FCC][34] = 72,
+ [2][1][2][0][RTW89_ETSI][34] = 127,
+ [2][1][2][0][RTW89_MKK][34] = 72,
+ [2][1][2][0][RTW89_IC][34] = 72,
+ [2][1][2][0][RTW89_KCC][34] = 64,
+ [2][1][2][0][RTW89_ACMA][34] = 72,
+ [2][1][2][0][RTW89_CHILE][34] = 42,
+ [2][1][2][0][RTW89_UKRAINE][34] = 127,
+ [2][1][2][0][RTW89_MEXICO][34] = 72,
+ [2][1][2][0][RTW89_CN][34] = 127,
+ [2][1][2][0][RTW89_QATAR][34] = 127,
+ [2][1][2][0][RTW89_FCC][41] = 72,
+ [2][1][2][0][RTW89_ETSI][41] = 16,
+ [2][1][2][0][RTW89_MKK][41] = 127,
+ [2][1][2][0][RTW89_IC][41] = 72,
+ [2][1][2][0][RTW89_KCC][41] = 58,
+ [2][1][2][0][RTW89_ACMA][41] = 72,
+ [2][1][2][0][RTW89_CHILE][41] = 42,
+ [2][1][2][0][RTW89_UKRAINE][41] = 16,
+ [2][1][2][0][RTW89_MEXICO][41] = 72,
+ [2][1][2][0][RTW89_CN][41] = 68,
+ [2][1][2][0][RTW89_QATAR][41] = 16,
+ [2][1][2][1][RTW89_FCC][3] = 56,
+ [2][1][2][1][RTW89_ETSI][3] = 40,
+ [2][1][2][1][RTW89_MKK][3] = 52,
+ [2][1][2][1][RTW89_IC][3] = 40,
+ [2][1][2][1][RTW89_KCC][3] = 66,
+ [2][1][2][1][RTW89_ACMA][3] = 40,
+ [2][1][2][1][RTW89_CHILE][3] = 6,
+ [2][1][2][1][RTW89_UKRAINE][3] = 28,
+ [2][1][2][1][RTW89_MEXICO][3] = 50,
+ [2][1][2][1][RTW89_CN][3] = 40,
+ [2][1][2][1][RTW89_QATAR][3] = 40,
+ [2][1][2][1][RTW89_FCC][11] = 56,
+ [2][1][2][1][RTW89_ETSI][11] = 40,
+ [2][1][2][1][RTW89_MKK][11] = 52,
+ [2][1][2][1][RTW89_IC][11] = 40,
+ [2][1][2][1][RTW89_KCC][11] = 64,
+ [2][1][2][1][RTW89_ACMA][11] = 40,
+ [2][1][2][1][RTW89_CHILE][11] = 30,
+ [2][1][2][1][RTW89_UKRAINE][11] = 28,
+ [2][1][2][1][RTW89_MEXICO][11] = 56,
+ [2][1][2][1][RTW89_CN][11] = 40,
+ [2][1][2][1][RTW89_QATAR][11] = 40,
+ [2][1][2][1][RTW89_FCC][18] = 56,
+ [2][1][2][1][RTW89_ETSI][18] = 40,
+ [2][1][2][1][RTW89_MKK][18] = 72,
+ [2][1][2][1][RTW89_IC][18] = 56,
+ [2][1][2][1][RTW89_KCC][18] = 58,
+ [2][1][2][1][RTW89_ACMA][18] = 40,
+ [2][1][2][1][RTW89_CHILE][18] = 30,
+ [2][1][2][1][RTW89_UKRAINE][18] = 28,
+ [2][1][2][1][RTW89_MEXICO][18] = 56,
+ [2][1][2][1][RTW89_CN][18] = 127,
+ [2][1][2][1][RTW89_QATAR][18] = 28,
+ [2][1][2][1][RTW89_FCC][26] = 68,
+ [2][1][2][1][RTW89_ETSI][26] = 40,
+ [2][1][2][1][RTW89_MKK][26] = 72,
+ [2][1][2][1][RTW89_IC][26] = 127,
+ [2][1][2][1][RTW89_KCC][26] = 64,
+ [2][1][2][1][RTW89_ACMA][26] = 127,
+ [2][1][2][1][RTW89_CHILE][26] = 30,
+ [2][1][2][1][RTW89_UKRAINE][26] = 28,
+ [2][1][2][1][RTW89_MEXICO][26] = 68,
+ [2][1][2][1][RTW89_CN][26] = 127,
+ [2][1][2][1][RTW89_QATAR][26] = 28,
+ [2][1][2][1][RTW89_FCC][34] = 68,
+ [2][1][2][1][RTW89_ETSI][34] = 127,
+ [2][1][2][1][RTW89_MKK][34] = 72,
+ [2][1][2][1][RTW89_IC][34] = 68,
+ [2][1][2][1][RTW89_KCC][34] = 64,
+ [2][1][2][1][RTW89_ACMA][34] = 68,
+ [2][1][2][1][RTW89_CHILE][34] = 30,
+ [2][1][2][1][RTW89_UKRAINE][34] = 127,
+ [2][1][2][1][RTW89_MEXICO][34] = 68,
+ [2][1][2][1][RTW89_CN][34] = 127,
+ [2][1][2][1][RTW89_QATAR][34] = 127,
+ [2][1][2][1][RTW89_FCC][41] = 72,
+ [2][1][2][1][RTW89_ETSI][41] = 4,
+ [2][1][2][1][RTW89_MKK][41] = 127,
+ [2][1][2][1][RTW89_IC][41] = 72,
+ [2][1][2][1][RTW89_KCC][41] = 58,
+ [2][1][2][1][RTW89_ACMA][41] = 72,
+ [2][1][2][1][RTW89_CHILE][41] = 30,
+ [2][1][2][1][RTW89_UKRAINE][41] = 4,
+ [2][1][2][1][RTW89_MEXICO][41] = 72,
+ [2][1][2][1][RTW89_CN][41] = 64,
+ [2][1][2][1][RTW89_QATAR][41] = 4,
};
const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[RTW89_REGD_NUM][RTW89_2G_CH_NUM] = {
- [0][0][0][0] = 32,
- [0][0][0][1] = 32,
- [0][0][0][2] = 32,
- [0][0][0][3] = 32,
- [0][0][0][4] = 32,
- [0][0][0][5] = 32,
- [0][0][0][6] = 32,
- [0][0][0][7] = 32,
- [0][0][0][8] = 32,
- [0][0][0][9] = 32,
- [0][0][0][10] = 32,
- [0][0][0][11] = 32,
- [0][0][0][12] = 32,
- [0][0][0][13] = 0,
- [0][1][0][0] = 20,
- [0][1][0][1] = 20,
- [0][1][0][2] = 20,
- [0][1][0][3] = 20,
- [0][1][0][4] = 20,
- [0][1][0][5] = 20,
- [0][1][0][6] = 20,
- [0][1][0][7] = 20,
- [0][1][0][8] = 20,
- [0][1][0][9] = 20,
- [0][1][0][10] = 20,
- [0][1][0][11] = 20,
- [0][1][0][12] = 20,
- [0][1][0][13] = 0,
- [1][0][0][0] = 42,
- [1][0][0][1] = 42,
- [1][0][0][2] = 42,
- [1][0][0][3] = 42,
- [1][0][0][4] = 42,
- [1][0][0][5] = 42,
- [1][0][0][6] = 42,
- [1][0][0][7] = 42,
- [1][0][0][8] = 42,
- [1][0][0][9] = 42,
- [1][0][0][10] = 42,
- [1][0][0][11] = 42,
- [1][0][0][12] = 36,
- [1][0][0][13] = 0,
- [1][1][0][0] = 30,
- [1][1][0][1] = 30,
- [1][1][0][2] = 30,
- [1][1][0][3] = 30,
- [1][1][0][4] = 30,
- [1][1][0][5] = 30,
- [1][1][0][6] = 30,
- [1][1][0][7] = 30,
- [1][1][0][8] = 30,
- [1][1][0][9] = 30,
- [1][1][0][10] = 30,
- [1][1][0][11] = 30,
- [1][1][0][12] = 30,
- [1][1][0][13] = 0,
- [2][0][0][0] = 52,
- [2][0][0][1] = 52,
- [2][0][0][2] = 52,
- [2][0][0][3] = 52,
- [2][0][0][4] = 52,
- [2][0][0][5] = 52,
- [2][0][0][6] = 52,
- [2][0][0][7] = 52,
- [2][0][0][8] = 52,
- [2][0][0][9] = 52,
- [2][0][0][10] = 52,
- [2][0][0][11] = 52,
- [2][0][0][12] = 40,
- [2][0][0][13] = 0,
- [2][1][0][0] = 40,
- [2][1][0][1] = 40,
- [2][1][0][2] = 40,
- [2][1][0][3] = 40,
- [2][1][0][4] = 40,
- [2][1][0][5] = 40,
- [2][1][0][6] = 40,
- [2][1][0][7] = 40,
- [2][1][0][8] = 40,
- [2][1][0][9] = 40,
- [2][1][0][10] = 40,
- [2][1][0][11] = 40,
- [2][1][0][12] = 26,
- [2][1][0][13] = 0,
- [0][0][2][0] = 70,
- [0][0][1][0] = 32,
- [0][0][3][0] = 40,
- [0][0][5][0] = 70,
- [0][0][6][0] = 32,
- [0][0][9][0] = 32,
- [0][0][8][0] = 60,
- [0][0][11][0] = 32,
- [0][0][2][1] = 70,
- [0][0][1][1] = 32,
- [0][0][3][1] = 40,
- [0][0][5][1] = 70,
- [0][0][6][1] = 32,
- [0][0][9][1] = 32,
- [0][0][8][1] = 60,
- [0][0][11][1] = 32,
- [0][0][2][2] = 74,
- [0][0][1][2] = 32,
- [0][0][3][2] = 40,
- [0][0][5][2] = 74,
- [0][0][6][2] = 32,
- [0][0][9][2] = 32,
- [0][0][8][2] = 60,
- [0][0][11][2] = 32,
- [0][0][2][3] = 78,
- [0][0][1][3] = 32,
- [0][0][3][3] = 40,
- [0][0][5][3] = 78,
- [0][0][6][3] = 32,
- [0][0][9][3] = 32,
- [0][0][8][3] = 60,
- [0][0][11][3] = 32,
- [0][0][2][4] = 78,
- [0][0][1][4] = 32,
- [0][0][3][4] = 40,
- [0][0][5][4] = 78,
- [0][0][6][4] = 32,
- [0][0][9][4] = 32,
- [0][0][8][4] = 60,
- [0][0][11][4] = 32,
- [0][0][2][5] = 78,
- [0][0][1][5] = 32,
- [0][0][3][5] = 40,
- [0][0][5][5] = 78,
- [0][0][6][5] = 32,
- [0][0][9][5] = 32,
- [0][0][8][5] = 60,
- [0][0][11][5] = 32,
- [0][0][2][6] = 78,
- [0][0][1][6] = 32,
- [0][0][3][6] = 40,
- [0][0][5][6] = 78,
- [0][0][6][6] = 32,
- [0][0][9][6] = 32,
- [0][0][8][6] = 60,
- [0][0][11][6] = 32,
- [0][0][2][7] = 78,
- [0][0][1][7] = 32,
- [0][0][3][7] = 40,
- [0][0][5][7] = 78,
- [0][0][6][7] = 32,
- [0][0][9][7] = 32,
- [0][0][8][7] = 60,
- [0][0][11][7] = 32,
- [0][0][2][8] = 74,
- [0][0][1][8] = 32,
- [0][0][3][8] = 40,
- [0][0][5][8] = 74,
- [0][0][6][8] = 32,
- [0][0][9][8] = 32,
- [0][0][8][8] = 60,
- [0][0][11][8] = 32,
- [0][0][2][9] = 70,
- [0][0][1][9] = 32,
- [0][0][3][9] = 40,
- [0][0][5][9] = 70,
- [0][0][6][9] = 32,
- [0][0][9][9] = 32,
- [0][0][8][9] = 60,
- [0][0][11][9] = 32,
- [0][0][2][10] = 70,
- [0][0][1][10] = 32,
- [0][0][3][10] = 40,
- [0][0][5][10] = 70,
- [0][0][6][10] = 32,
- [0][0][9][10] = 32,
- [0][0][8][10] = 60,
- [0][0][11][10] = 32,
- [0][0][2][11] = 58,
- [0][0][1][11] = 32,
- [0][0][3][11] = 40,
- [0][0][5][11] = 58,
- [0][0][6][11] = 32,
- [0][0][9][11] = 32,
- [0][0][8][11] = 60,
- [0][0][11][11] = 32,
- [0][0][2][12] = 34,
- [0][0][1][12] = 32,
- [0][0][3][12] = 40,
- [0][0][5][12] = 34,
- [0][0][6][12] = 32,
- [0][0][9][12] = 32,
- [0][0][8][12] = 60,
- [0][0][11][12] = 32,
- [0][0][2][13] = 127,
- [0][0][1][13] = 127,
- [0][0][3][13] = 127,
- [0][0][5][13] = 127,
- [0][0][6][13] = 127,
- [0][0][9][13] = 127,
- [0][0][8][13] = 127,
- [0][0][11][13] = 127,
- [0][1][2][0] = 64,
- [0][1][1][0] = 20,
- [0][1][3][0] = 28,
- [0][1][5][0] = 64,
- [0][1][6][0] = 20,
- [0][1][9][0] = 20,
- [0][1][8][0] = 48,
- [0][1][11][0] = 20,
- [0][1][2][1] = 64,
- [0][1][1][1] = 20,
- [0][1][3][1] = 28,
- [0][1][5][1] = 64,
- [0][1][6][1] = 20,
- [0][1][9][1] = 20,
- [0][1][8][1] = 48,
- [0][1][11][1] = 20,
- [0][1][2][2] = 68,
- [0][1][1][2] = 20,
- [0][1][3][2] = 28,
- [0][1][5][2] = 68,
- [0][1][6][2] = 20,
- [0][1][9][2] = 20,
- [0][1][8][2] = 48,
- [0][1][11][2] = 20,
- [0][1][2][3] = 72,
- [0][1][1][3] = 20,
- [0][1][3][3] = 28,
- [0][1][5][3] = 72,
- [0][1][6][3] = 20,
- [0][1][9][3] = 20,
- [0][1][8][3] = 48,
- [0][1][11][3] = 20,
- [0][1][2][4] = 76,
- [0][1][1][4] = 20,
- [0][1][3][4] = 28,
- [0][1][5][4] = 76,
- [0][1][6][4] = 20,
- [0][1][9][4] = 20,
- [0][1][8][4] = 48,
- [0][1][11][4] = 20,
- [0][1][2][5] = 78,
- [0][1][1][5] = 20,
- [0][1][3][5] = 28,
- [0][1][5][5] = 78,
- [0][1][6][5] = 20,
- [0][1][9][5] = 20,
- [0][1][8][5] = 48,
- [0][1][11][5] = 20,
- [0][1][2][6] = 76,
- [0][1][1][6] = 20,
- [0][1][3][6] = 28,
- [0][1][5][6] = 76,
- [0][1][6][6] = 20,
- [0][1][9][6] = 20,
- [0][1][8][6] = 48,
- [0][1][11][6] = 20,
- [0][1][2][7] = 72,
- [0][1][1][7] = 20,
- [0][1][3][7] = 28,
- [0][1][5][7] = 72,
- [0][1][6][7] = 20,
- [0][1][9][7] = 20,
- [0][1][8][7] = 48,
- [0][1][11][7] = 20,
- [0][1][2][8] = 68,
- [0][1][1][8] = 20,
- [0][1][3][8] = 28,
- [0][1][5][8] = 68,
- [0][1][6][8] = 20,
- [0][1][9][8] = 20,
- [0][1][8][8] = 48,
- [0][1][11][8] = 20,
- [0][1][2][9] = 64,
- [0][1][1][9] = 20,
- [0][1][3][9] = 28,
- [0][1][5][9] = 64,
- [0][1][6][9] = 20,
- [0][1][9][9] = 20,
- [0][1][8][9] = 48,
- [0][1][11][9] = 20,
- [0][1][2][10] = 64,
- [0][1][1][10] = 20,
- [0][1][3][10] = 28,
- [0][1][5][10] = 64,
- [0][1][6][10] = 20,
- [0][1][9][10] = 20,
- [0][1][8][10] = 48,
- [0][1][11][10] = 20,
- [0][1][2][11] = 54,
- [0][1][1][11] = 20,
- [0][1][3][11] = 28,
- [0][1][5][11] = 54,
- [0][1][6][11] = 20,
- [0][1][9][11] = 20,
- [0][1][8][11] = 48,
- [0][1][11][11] = 20,
- [0][1][2][12] = 32,
- [0][1][1][12] = 20,
- [0][1][3][12] = 28,
- [0][1][5][12] = 32,
- [0][1][6][12] = 20,
- [0][1][9][12] = 20,
- [0][1][8][12] = 48,
- [0][1][11][12] = 20,
- [0][1][2][13] = 127,
- [0][1][1][13] = 127,
- [0][1][3][13] = 127,
- [0][1][5][13] = 127,
- [0][1][6][13] = 127,
- [0][1][9][13] = 127,
- [0][1][8][13] = 127,
- [0][1][11][13] = 127,
- [1][0][2][0] = 72,
- [1][0][1][0] = 42,
- [1][0][3][0] = 50,
- [1][0][5][0] = 72,
- [1][0][6][0] = 42,
- [1][0][9][0] = 42,
- [1][0][8][0] = 60,
- [1][0][11][0] = 42,
- [1][0][2][1] = 72,
- [1][0][1][1] = 42,
- [1][0][3][1] = 50,
- [1][0][5][1] = 72,
- [1][0][6][1] = 42,
- [1][0][9][1] = 42,
- [1][0][8][1] = 60,
- [1][0][11][1] = 42,
- [1][0][2][2] = 76,
- [1][0][1][2] = 42,
- [1][0][3][2] = 50,
- [1][0][5][2] = 76,
- [1][0][6][2] = 42,
- [1][0][9][2] = 42,
- [1][0][8][2] = 60,
- [1][0][11][2] = 42,
- [1][0][2][3] = 78,
- [1][0][1][3] = 42,
- [1][0][3][3] = 50,
- [1][0][5][3] = 78,
- [1][0][6][3] = 42,
- [1][0][9][3] = 42,
- [1][0][8][3] = 60,
- [1][0][11][3] = 42,
- [1][0][2][4] = 78,
- [1][0][1][4] = 42,
- [1][0][3][4] = 50,
- [1][0][5][4] = 78,
- [1][0][6][4] = 42,
- [1][0][9][4] = 42,
- [1][0][8][4] = 60,
- [1][0][11][4] = 42,
- [1][0][2][5] = 78,
- [1][0][1][5] = 42,
- [1][0][3][5] = 50,
- [1][0][5][5] = 78,
- [1][0][6][5] = 42,
- [1][0][9][5] = 42,
- [1][0][8][5] = 60,
- [1][0][11][5] = 42,
- [1][0][2][6] = 78,
- [1][0][1][6] = 42,
- [1][0][3][6] = 50,
- [1][0][5][6] = 78,
- [1][0][6][6] = 42,
- [1][0][9][6] = 42,
- [1][0][8][6] = 60,
- [1][0][11][6] = 42,
- [1][0][2][7] = 78,
- [1][0][1][7] = 42,
- [1][0][3][7] = 50,
- [1][0][5][7] = 78,
- [1][0][6][7] = 42,
- [1][0][9][7] = 42,
- [1][0][8][7] = 60,
- [1][0][11][7] = 42,
- [1][0][2][8] = 78,
- [1][0][1][8] = 42,
- [1][0][3][8] = 50,
- [1][0][5][8] = 78,
- [1][0][6][8] = 42,
- [1][0][9][8] = 42,
- [1][0][8][8] = 60,
- [1][0][11][8] = 42,
- [1][0][2][9] = 74,
- [1][0][1][9] = 42,
- [1][0][3][9] = 50,
- [1][0][5][9] = 74,
- [1][0][6][9] = 42,
- [1][0][9][9] = 42,
- [1][0][8][9] = 60,
- [1][0][11][9] = 42,
- [1][0][2][10] = 74,
- [1][0][1][10] = 42,
- [1][0][3][10] = 50,
- [1][0][5][10] = 74,
- [1][0][6][10] = 42,
- [1][0][9][10] = 42,
- [1][0][8][10] = 60,
- [1][0][11][10] = 42,
- [1][0][2][11] = 64,
- [1][0][1][11] = 42,
- [1][0][3][11] = 50,
- [1][0][5][11] = 64,
- [1][0][6][11] = 42,
- [1][0][9][11] = 42,
- [1][0][8][11] = 60,
- [1][0][11][11] = 42,
- [1][0][2][12] = 36,
- [1][0][1][12] = 42,
- [1][0][3][12] = 50,
- [1][0][5][12] = 36,
- [1][0][6][12] = 42,
- [1][0][9][12] = 42,
- [1][0][8][12] = 60,
- [1][0][11][12] = 42,
- [1][0][2][13] = 127,
- [1][0][1][13] = 127,
- [1][0][3][13] = 127,
- [1][0][5][13] = 127,
- [1][0][6][13] = 127,
- [1][0][9][13] = 127,
- [1][0][8][13] = 127,
- [1][0][11][13] = 127,
- [1][1][2][0] = 66,
- [1][1][1][0] = 30,
- [1][1][3][0] = 38,
- [1][1][5][0] = 66,
- [1][1][6][0] = 30,
- [1][1][9][0] = 30,
- [1][1][8][0] = 48,
- [1][1][11][0] = 30,
- [1][1][2][1] = 66,
- [1][1][1][1] = 30,
- [1][1][3][1] = 38,
- [1][1][5][1] = 66,
- [1][1][6][1] = 30,
- [1][1][9][1] = 30,
- [1][1][8][1] = 48,
- [1][1][11][1] = 30,
- [1][1][2][2] = 70,
- [1][1][1][2] = 30,
- [1][1][3][2] = 38,
- [1][1][5][2] = 70,
- [1][1][6][2] = 30,
- [1][1][9][2] = 30,
- [1][1][8][2] = 48,
- [1][1][11][2] = 30,
- [1][1][2][3] = 74,
- [1][1][1][3] = 30,
- [1][1][3][3] = 38,
- [1][1][5][3] = 74,
- [1][1][6][3] = 30,
- [1][1][9][3] = 30,
- [1][1][8][3] = 48,
- [1][1][11][3] = 30,
- [1][1][2][4] = 78,
- [1][1][1][4] = 30,
- [1][1][3][4] = 38,
- [1][1][5][4] = 78,
- [1][1][6][4] = 30,
- [1][1][9][4] = 30,
- [1][1][8][4] = 48,
- [1][1][11][4] = 30,
- [1][1][2][5] = 78,
- [1][1][1][5] = 30,
- [1][1][3][5] = 38,
- [1][1][5][5] = 78,
- [1][1][6][5] = 30,
- [1][1][9][5] = 30,
- [1][1][8][5] = 48,
- [1][1][11][5] = 30,
- [1][1][2][6] = 78,
- [1][1][1][6] = 30,
- [1][1][3][6] = 38,
- [1][1][5][6] = 78,
- [1][1][6][6] = 30,
- [1][1][9][6] = 30,
- [1][1][8][6] = 48,
- [1][1][11][6] = 30,
- [1][1][2][7] = 74,
- [1][1][1][7] = 30,
- [1][1][3][7] = 38,
- [1][1][5][7] = 74,
- [1][1][6][7] = 30,
- [1][1][9][7] = 30,
- [1][1][8][7] = 48,
- [1][1][11][7] = 30,
- [1][1][2][8] = 70,
- [1][1][1][8] = 30,
- [1][1][3][8] = 38,
- [1][1][5][8] = 70,
- [1][1][6][8] = 30,
- [1][1][9][8] = 30,
- [1][1][8][8] = 48,
- [1][1][11][8] = 30,
- [1][1][2][9] = 66,
- [1][1][1][9] = 30,
- [1][1][3][9] = 38,
- [1][1][5][9] = 66,
- [1][1][6][9] = 30,
- [1][1][9][9] = 30,
- [1][1][8][9] = 48,
- [1][1][11][9] = 30,
- [1][1][2][10] = 66,
- [1][1][1][10] = 30,
- [1][1][3][10] = 38,
- [1][1][5][10] = 66,
- [1][1][6][10] = 30,
- [1][1][9][10] = 30,
- [1][1][8][10] = 48,
- [1][1][11][10] = 30,
- [1][1][2][11] = 60,
- [1][1][1][11] = 30,
- [1][1][3][11] = 38,
- [1][1][5][11] = 60,
- [1][1][6][11] = 30,
- [1][1][9][11] = 30,
- [1][1][8][11] = 48,
- [1][1][11][11] = 30,
- [1][1][2][12] = 32,
- [1][1][1][12] = 30,
- [1][1][3][12] = 38,
- [1][1][5][12] = 32,
- [1][1][6][12] = 30,
- [1][1][9][12] = 30,
- [1][1][8][12] = 48,
- [1][1][11][12] = 30,
- [1][1][2][13] = 127,
- [1][1][1][13] = 127,
- [1][1][3][13] = 127,
- [1][1][5][13] = 127,
- [1][1][6][13] = 127,
- [1][1][9][13] = 127,
- [1][1][8][13] = 127,
- [1][1][11][13] = 127,
- [2][0][2][0] = 76,
- [2][0][1][0] = 52,
- [2][0][3][0] = 64,
- [2][0][5][0] = 76,
- [2][0][6][0] = 52,
- [2][0][9][0] = 52,
- [2][0][8][0] = 60,
- [2][0][11][0] = 52,
- [2][0][2][1] = 76,
- [2][0][1][1] = 52,
- [2][0][3][1] = 64,
- [2][0][5][1] = 76,
- [2][0][6][1] = 52,
- [2][0][9][1] = 52,
- [2][0][8][1] = 60,
- [2][0][11][1] = 52,
- [2][0][2][2] = 78,
- [2][0][1][2] = 52,
- [2][0][3][2] = 64,
- [2][0][5][2] = 78,
- [2][0][6][2] = 52,
- [2][0][9][2] = 52,
- [2][0][8][2] = 60,
- [2][0][11][2] = 52,
- [2][0][2][3] = 78,
- [2][0][1][3] = 52,
- [2][0][3][3] = 64,
- [2][0][5][3] = 78,
- [2][0][6][3] = 52,
- [2][0][9][3] = 52,
- [2][0][8][3] = 60,
- [2][0][11][3] = 52,
- [2][0][2][4] = 78,
- [2][0][1][4] = 52,
- [2][0][3][4] = 64,
- [2][0][5][4] = 78,
- [2][0][6][4] = 52,
- [2][0][9][4] = 52,
- [2][0][8][4] = 60,
- [2][0][11][4] = 52,
- [2][0][2][5] = 78,
- [2][0][1][5] = 52,
- [2][0][3][5] = 64,
- [2][0][5][5] = 78,
- [2][0][6][5] = 52,
- [2][0][9][5] = 52,
- [2][0][8][5] = 60,
- [2][0][11][5] = 52,
- [2][0][2][6] = 78,
- [2][0][1][6] = 52,
- [2][0][3][6] = 64,
- [2][0][5][6] = 78,
- [2][0][6][6] = 52,
- [2][0][9][6] = 52,
- [2][0][8][6] = 60,
- [2][0][11][6] = 52,
- [2][0][2][7] = 78,
- [2][0][1][7] = 52,
- [2][0][3][7] = 64,
- [2][0][5][7] = 78,
- [2][0][6][7] = 52,
- [2][0][9][7] = 52,
- [2][0][8][7] = 60,
- [2][0][11][7] = 52,
- [2][0][2][8] = 78,
- [2][0][1][8] = 52,
- [2][0][3][8] = 64,
- [2][0][5][8] = 78,
- [2][0][6][8] = 52,
- [2][0][9][8] = 52,
- [2][0][8][8] = 60,
- [2][0][11][8] = 52,
- [2][0][2][9] = 76,
- [2][0][1][9] = 52,
- [2][0][3][9] = 64,
- [2][0][5][9] = 76,
- [2][0][6][9] = 52,
- [2][0][9][9] = 52,
- [2][0][8][9] = 60,
- [2][0][11][9] = 52,
- [2][0][2][10] = 76,
- [2][0][1][10] = 52,
- [2][0][3][10] = 64,
- [2][0][5][10] = 76,
- [2][0][6][10] = 52,
- [2][0][9][10] = 52,
- [2][0][8][10] = 60,
- [2][0][11][10] = 52,
- [2][0][2][11] = 68,
- [2][0][1][11] = 52,
- [2][0][3][11] = 64,
- [2][0][5][11] = 68,
- [2][0][6][11] = 52,
- [2][0][9][11] = 52,
- [2][0][8][11] = 60,
- [2][0][11][11] = 52,
- [2][0][2][12] = 40,
- [2][0][1][12] = 52,
- [2][0][3][12] = 64,
- [2][0][5][12] = 40,
- [2][0][6][12] = 52,
- [2][0][9][12] = 52,
- [2][0][8][12] = 60,
- [2][0][11][12] = 52,
- [2][0][2][13] = 127,
- [2][0][1][13] = 127,
- [2][0][3][13] = 127,
- [2][0][5][13] = 127,
- [2][0][6][13] = 127,
- [2][0][9][13] = 127,
- [2][0][8][13] = 127,
- [2][0][11][13] = 127,
- [2][1][2][0] = 68,
- [2][1][1][0] = 40,
- [2][1][3][0] = 52,
- [2][1][5][0] = 68,
- [2][1][6][0] = 40,
- [2][1][9][0] = 40,
- [2][1][8][0] = 48,
- [2][1][11][0] = 40,
- [2][1][2][1] = 68,
- [2][1][1][1] = 40,
- [2][1][3][1] = 52,
- [2][1][5][1] = 68,
- [2][1][6][1] = 40,
- [2][1][9][1] = 40,
- [2][1][8][1] = 48,
- [2][1][11][1] = 40,
- [2][1][2][2] = 72,
- [2][1][1][2] = 40,
- [2][1][3][2] = 52,
- [2][1][5][2] = 72,
- [2][1][6][2] = 40,
- [2][1][9][2] = 40,
- [2][1][8][2] = 48,
- [2][1][11][2] = 40,
- [2][1][2][3] = 76,
- [2][1][1][3] = 40,
- [2][1][3][3] = 52,
- [2][1][5][3] = 76,
- [2][1][6][3] = 40,
- [2][1][9][3] = 40,
- [2][1][8][3] = 48,
- [2][1][11][3] = 40,
- [2][1][2][4] = 78,
- [2][1][1][4] = 40,
- [2][1][3][4] = 52,
- [2][1][5][4] = 78,
- [2][1][6][4] = 40,
- [2][1][9][4] = 40,
- [2][1][8][4] = 48,
- [2][1][11][4] = 40,
- [2][1][2][5] = 78,
- [2][1][1][5] = 40,
- [2][1][3][5] = 52,
- [2][1][5][5] = 78,
- [2][1][6][5] = 40,
- [2][1][9][5] = 40,
- [2][1][8][5] = 48,
- [2][1][11][5] = 40,
- [2][1][2][6] = 78,
- [2][1][1][6] = 40,
- [2][1][3][6] = 52,
- [2][1][5][6] = 78,
- [2][1][6][6] = 40,
- [2][1][9][6] = 40,
- [2][1][8][6] = 48,
- [2][1][11][6] = 40,
- [2][1][2][7] = 78,
- [2][1][1][7] = 40,
- [2][1][3][7] = 52,
- [2][1][5][7] = 78,
- [2][1][6][7] = 40,
- [2][1][9][7] = 40,
- [2][1][8][7] = 48,
- [2][1][11][7] = 40,
- [2][1][2][8] = 74,
- [2][1][1][8] = 40,
- [2][1][3][8] = 52,
- [2][1][5][8] = 74,
- [2][1][6][8] = 40,
- [2][1][9][8] = 40,
- [2][1][8][8] = 48,
- [2][1][11][8] = 40,
- [2][1][2][9] = 70,
- [2][1][1][9] = 40,
- [2][1][3][9] = 52,
- [2][1][5][9] = 70,
- [2][1][6][9] = 40,
- [2][1][9][9] = 40,
- [2][1][8][9] = 48,
- [2][1][11][9] = 40,
- [2][1][2][10] = 70,
- [2][1][1][10] = 40,
- [2][1][3][10] = 52,
- [2][1][5][10] = 70,
- [2][1][6][10] = 40,
- [2][1][9][10] = 40,
- [2][1][8][10] = 48,
- [2][1][11][10] = 40,
- [2][1][2][11] = 48,
- [2][1][1][11] = 40,
- [2][1][3][11] = 52,
- [2][1][5][11] = 48,
- [2][1][6][11] = 40,
- [2][1][9][11] = 40,
- [2][1][8][11] = 48,
- [2][1][11][11] = 40,
- [2][1][2][12] = 26,
- [2][1][1][12] = 40,
- [2][1][3][12] = 52,
- [2][1][5][12] = 26,
- [2][1][6][12] = 40,
- [2][1][9][12] = 40,
- [2][1][8][12] = 48,
- [2][1][11][12] = 40,
- [2][1][2][13] = 127,
- [2][1][1][13] = 127,
- [2][1][3][13] = 127,
- [2][1][5][13] = 127,
- [2][1][6][13] = 127,
- [2][1][9][13] = 127,
- [2][1][8][13] = 127,
- [2][1][11][13] = 127,
+ [0][0][RTW89_WW][0] = 32,
+ [0][0][RTW89_WW][1] = 32,
+ [0][0][RTW89_WW][2] = 32,
+ [0][0][RTW89_WW][3] = 32,
+ [0][0][RTW89_WW][4] = 32,
+ [0][0][RTW89_WW][5] = 32,
+ [0][0][RTW89_WW][6] = 32,
+ [0][0][RTW89_WW][7] = 32,
+ [0][0][RTW89_WW][8] = 32,
+ [0][0][RTW89_WW][9] = 32,
+ [0][0][RTW89_WW][10] = 32,
+ [0][0][RTW89_WW][11] = 32,
+ [0][0][RTW89_WW][12] = 32,
+ [0][0][RTW89_WW][13] = 0,
+ [0][1][RTW89_WW][0] = 20,
+ [0][1][RTW89_WW][1] = 20,
+ [0][1][RTW89_WW][2] = 20,
+ [0][1][RTW89_WW][3] = 20,
+ [0][1][RTW89_WW][4] = 20,
+ [0][1][RTW89_WW][5] = 20,
+ [0][1][RTW89_WW][6] = 20,
+ [0][1][RTW89_WW][7] = 20,
+ [0][1][RTW89_WW][8] = 20,
+ [0][1][RTW89_WW][9] = 20,
+ [0][1][RTW89_WW][10] = 20,
+ [0][1][RTW89_WW][11] = 20,
+ [0][1][RTW89_WW][12] = 20,
+ [0][1][RTW89_WW][13] = 0,
+ [1][0][RTW89_WW][0] = 42,
+ [1][0][RTW89_WW][1] = 42,
+ [1][0][RTW89_WW][2] = 42,
+ [1][0][RTW89_WW][3] = 42,
+ [1][0][RTW89_WW][4] = 42,
+ [1][0][RTW89_WW][5] = 42,
+ [1][0][RTW89_WW][6] = 42,
+ [1][0][RTW89_WW][7] = 42,
+ [1][0][RTW89_WW][8] = 42,
+ [1][0][RTW89_WW][9] = 42,
+ [1][0][RTW89_WW][10] = 42,
+ [1][0][RTW89_WW][11] = 42,
+ [1][0][RTW89_WW][12] = 36,
+ [1][0][RTW89_WW][13] = 0,
+ [1][1][RTW89_WW][0] = 30,
+ [1][1][RTW89_WW][1] = 30,
+ [1][1][RTW89_WW][2] = 30,
+ [1][1][RTW89_WW][3] = 30,
+ [1][1][RTW89_WW][4] = 30,
+ [1][1][RTW89_WW][5] = 30,
+ [1][1][RTW89_WW][6] = 30,
+ [1][1][RTW89_WW][7] = 30,
+ [1][1][RTW89_WW][8] = 30,
+ [1][1][RTW89_WW][9] = 30,
+ [1][1][RTW89_WW][10] = 30,
+ [1][1][RTW89_WW][11] = 30,
+ [1][1][RTW89_WW][12] = 30,
+ [1][1][RTW89_WW][13] = 0,
+ [2][0][RTW89_WW][0] = 52,
+ [2][0][RTW89_WW][1] = 52,
+ [2][0][RTW89_WW][2] = 52,
+ [2][0][RTW89_WW][3] = 52,
+ [2][0][RTW89_WW][4] = 52,
+ [2][0][RTW89_WW][5] = 52,
+ [2][0][RTW89_WW][6] = 52,
+ [2][0][RTW89_WW][7] = 52,
+ [2][0][RTW89_WW][8] = 52,
+ [2][0][RTW89_WW][9] = 52,
+ [2][0][RTW89_WW][10] = 52,
+ [2][0][RTW89_WW][11] = 52,
+ [2][0][RTW89_WW][12] = 40,
+ [2][0][RTW89_WW][13] = 0,
+ [2][1][RTW89_WW][0] = 40,
+ [2][1][RTW89_WW][1] = 40,
+ [2][1][RTW89_WW][2] = 40,
+ [2][1][RTW89_WW][3] = 40,
+ [2][1][RTW89_WW][4] = 40,
+ [2][1][RTW89_WW][5] = 40,
+ [2][1][RTW89_WW][6] = 40,
+ [2][1][RTW89_WW][7] = 40,
+ [2][1][RTW89_WW][8] = 40,
+ [2][1][RTW89_WW][9] = 40,
+ [2][1][RTW89_WW][10] = 40,
+ [2][1][RTW89_WW][11] = 40,
+ [2][1][RTW89_WW][12] = 26,
+ [2][1][RTW89_WW][13] = 0,
+ [0][0][RTW89_FCC][0] = 70,
+ [0][0][RTW89_ETSI][0] = 32,
+ [0][0][RTW89_MKK][0] = 40,
+ [0][0][RTW89_IC][0] = 70,
+ [0][0][RTW89_KCC][0] = 46,
+ [0][0][RTW89_ACMA][0] = 32,
+ [0][0][RTW89_CHILE][0] = 60,
+ [0][0][RTW89_UKRAINE][0] = 32,
+ [0][0][RTW89_MEXICO][0] = 70,
+ [0][0][RTW89_CN][0] = 32,
+ [0][0][RTW89_QATAR][0] = 32,
+ [0][0][RTW89_FCC][1] = 70,
+ [0][0][RTW89_ETSI][1] = 32,
+ [0][0][RTW89_MKK][1] = 40,
+ [0][0][RTW89_IC][1] = 70,
+ [0][0][RTW89_KCC][1] = 46,
+ [0][0][RTW89_ACMA][1] = 32,
+ [0][0][RTW89_CHILE][1] = 60,
+ [0][0][RTW89_UKRAINE][1] = 32,
+ [0][0][RTW89_MEXICO][1] = 70,
+ [0][0][RTW89_CN][1] = 32,
+ [0][0][RTW89_QATAR][1] = 32,
+ [0][0][RTW89_FCC][2] = 74,
+ [0][0][RTW89_ETSI][2] = 32,
+ [0][0][RTW89_MKK][2] = 40,
+ [0][0][RTW89_IC][2] = 74,
+ [0][0][RTW89_KCC][2] = 46,
+ [0][0][RTW89_ACMA][2] = 32,
+ [0][0][RTW89_CHILE][2] = 60,
+ [0][0][RTW89_UKRAINE][2] = 32,
+ [0][0][RTW89_MEXICO][2] = 74,
+ [0][0][RTW89_CN][2] = 32,
+ [0][0][RTW89_QATAR][2] = 32,
+ [0][0][RTW89_FCC][3] = 78,
+ [0][0][RTW89_ETSI][3] = 32,
+ [0][0][RTW89_MKK][3] = 40,
+ [0][0][RTW89_IC][3] = 78,
+ [0][0][RTW89_KCC][3] = 46,
+ [0][0][RTW89_ACMA][3] = 32,
+ [0][0][RTW89_CHILE][3] = 60,
+ [0][0][RTW89_UKRAINE][3] = 32,
+ [0][0][RTW89_MEXICO][3] = 78,
+ [0][0][RTW89_CN][3] = 32,
+ [0][0][RTW89_QATAR][3] = 32,
+ [0][0][RTW89_FCC][4] = 78,
+ [0][0][RTW89_ETSI][4] = 32,
+ [0][0][RTW89_MKK][4] = 40,
+ [0][0][RTW89_IC][4] = 78,
+ [0][0][RTW89_KCC][4] = 46,
+ [0][0][RTW89_ACMA][4] = 32,
+ [0][0][RTW89_CHILE][4] = 60,
+ [0][0][RTW89_UKRAINE][4] = 32,
+ [0][0][RTW89_MEXICO][4] = 78,
+ [0][0][RTW89_CN][4] = 32,
+ [0][0][RTW89_QATAR][4] = 32,
+ [0][0][RTW89_FCC][5] = 78,
+ [0][0][RTW89_ETSI][5] = 32,
+ [0][0][RTW89_MKK][5] = 40,
+ [0][0][RTW89_IC][5] = 78,
+ [0][0][RTW89_KCC][5] = 46,
+ [0][0][RTW89_ACMA][5] = 32,
+ [0][0][RTW89_CHILE][5] = 60,
+ [0][0][RTW89_UKRAINE][5] = 32,
+ [0][0][RTW89_MEXICO][5] = 78,
+ [0][0][RTW89_CN][5] = 32,
+ [0][0][RTW89_QATAR][5] = 32,
+ [0][0][RTW89_FCC][6] = 78,
+ [0][0][RTW89_ETSI][6] = 32,
+ [0][0][RTW89_MKK][6] = 40,
+ [0][0][RTW89_IC][6] = 78,
+ [0][0][RTW89_KCC][6] = 46,
+ [0][0][RTW89_ACMA][6] = 32,
+ [0][0][RTW89_CHILE][6] = 60,
+ [0][0][RTW89_UKRAINE][6] = 32,
+ [0][0][RTW89_MEXICO][6] = 78,
+ [0][0][RTW89_CN][6] = 32,
+ [0][0][RTW89_QATAR][6] = 32,
+ [0][0][RTW89_FCC][7] = 78,
+ [0][0][RTW89_ETSI][7] = 32,
+ [0][0][RTW89_MKK][7] = 40,
+ [0][0][RTW89_IC][7] = 78,
+ [0][0][RTW89_KCC][7] = 46,
+ [0][0][RTW89_ACMA][7] = 32,
+ [0][0][RTW89_CHILE][7] = 60,
+ [0][0][RTW89_UKRAINE][7] = 32,
+ [0][0][RTW89_MEXICO][7] = 78,
+ [0][0][RTW89_CN][7] = 32,
+ [0][0][RTW89_QATAR][7] = 32,
+ [0][0][RTW89_FCC][8] = 74,
+ [0][0][RTW89_ETSI][8] = 32,
+ [0][0][RTW89_MKK][8] = 40,
+ [0][0][RTW89_IC][8] = 74,
+ [0][0][RTW89_KCC][8] = 46,
+ [0][0][RTW89_ACMA][8] = 32,
+ [0][0][RTW89_CHILE][8] = 60,
+ [0][0][RTW89_UKRAINE][8] = 32,
+ [0][0][RTW89_MEXICO][8] = 74,
+ [0][0][RTW89_CN][8] = 32,
+ [0][0][RTW89_QATAR][8] = 32,
+ [0][0][RTW89_FCC][9] = 70,
+ [0][0][RTW89_ETSI][9] = 32,
+ [0][0][RTW89_MKK][9] = 40,
+ [0][0][RTW89_IC][9] = 70,
+ [0][0][RTW89_KCC][9] = 46,
+ [0][0][RTW89_ACMA][9] = 32,
+ [0][0][RTW89_CHILE][9] = 60,
+ [0][0][RTW89_UKRAINE][9] = 32,
+ [0][0][RTW89_MEXICO][9] = 70,
+ [0][0][RTW89_CN][9] = 32,
+ [0][0][RTW89_QATAR][9] = 32,
+ [0][0][RTW89_FCC][10] = 70,
+ [0][0][RTW89_ETSI][10] = 32,
+ [0][0][RTW89_MKK][10] = 40,
+ [0][0][RTW89_IC][10] = 70,
+ [0][0][RTW89_KCC][10] = 46,
+ [0][0][RTW89_ACMA][10] = 32,
+ [0][0][RTW89_CHILE][10] = 60,
+ [0][0][RTW89_UKRAINE][10] = 32,
+ [0][0][RTW89_MEXICO][10] = 70,
+ [0][0][RTW89_CN][10] = 32,
+ [0][0][RTW89_QATAR][10] = 32,
+ [0][0][RTW89_FCC][11] = 58,
+ [0][0][RTW89_ETSI][11] = 32,
+ [0][0][RTW89_MKK][11] = 40,
+ [0][0][RTW89_IC][11] = 58,
+ [0][0][RTW89_KCC][11] = 46,
+ [0][0][RTW89_ACMA][11] = 32,
+ [0][0][RTW89_CHILE][11] = 58,
+ [0][0][RTW89_UKRAINE][11] = 32,
+ [0][0][RTW89_MEXICO][11] = 58,
+ [0][0][RTW89_CN][11] = 32,
+ [0][0][RTW89_QATAR][11] = 32,
+ [0][0][RTW89_FCC][12] = 34,
+ [0][0][RTW89_ETSI][12] = 32,
+ [0][0][RTW89_MKK][12] = 40,
+ [0][0][RTW89_IC][12] = 34,
+ [0][0][RTW89_KCC][12] = 46,
+ [0][0][RTW89_ACMA][12] = 32,
+ [0][0][RTW89_CHILE][12] = 34,
+ [0][0][RTW89_UKRAINE][12] = 32,
+ [0][0][RTW89_MEXICO][12] = 34,
+ [0][0][RTW89_CN][12] = 32,
+ [0][0][RTW89_QATAR][12] = 32,
+ [0][0][RTW89_FCC][13] = 127,
+ [0][0][RTW89_ETSI][13] = 127,
+ [0][0][RTW89_MKK][13] = 127,
+ [0][0][RTW89_IC][13] = 127,
+ [0][0][RTW89_KCC][13] = 127,
+ [0][0][RTW89_ACMA][13] = 127,
+ [0][0][RTW89_CHILE][13] = 127,
+ [0][0][RTW89_UKRAINE][13] = 127,
+ [0][0][RTW89_MEXICO][13] = 127,
+ [0][0][RTW89_CN][13] = 127,
+ [0][0][RTW89_QATAR][13] = 127,
+ [0][1][RTW89_FCC][0] = 64,
+ [0][1][RTW89_ETSI][0] = 20,
+ [0][1][RTW89_MKK][0] = 28,
+ [0][1][RTW89_IC][0] = 64,
+ [0][1][RTW89_KCC][0] = 32,
+ [0][1][RTW89_ACMA][0] = 20,
+ [0][1][RTW89_CHILE][0] = 48,
+ [0][1][RTW89_UKRAINE][0] = 20,
+ [0][1][RTW89_MEXICO][0] = 64,
+ [0][1][RTW89_CN][0] = 20,
+ [0][1][RTW89_QATAR][0] = 20,
+ [0][1][RTW89_FCC][1] = 64,
+ [0][1][RTW89_ETSI][1] = 20,
+ [0][1][RTW89_MKK][1] = 28,
+ [0][1][RTW89_IC][1] = 64,
+ [0][1][RTW89_KCC][1] = 32,
+ [0][1][RTW89_ACMA][1] = 20,
+ [0][1][RTW89_CHILE][1] = 48,
+ [0][1][RTW89_UKRAINE][1] = 20,
+ [0][1][RTW89_MEXICO][1] = 64,
+ [0][1][RTW89_CN][1] = 20,
+ [0][1][RTW89_QATAR][1] = 20,
+ [0][1][RTW89_FCC][2] = 68,
+ [0][1][RTW89_ETSI][2] = 20,
+ [0][1][RTW89_MKK][2] = 28,
+ [0][1][RTW89_IC][2] = 68,
+ [0][1][RTW89_KCC][2] = 32,
+ [0][1][RTW89_ACMA][2] = 20,
+ [0][1][RTW89_CHILE][2] = 48,
+ [0][1][RTW89_UKRAINE][2] = 20,
+ [0][1][RTW89_MEXICO][2] = 68,
+ [0][1][RTW89_CN][2] = 20,
+ [0][1][RTW89_QATAR][2] = 20,
+ [0][1][RTW89_FCC][3] = 72,
+ [0][1][RTW89_ETSI][3] = 20,
+ [0][1][RTW89_MKK][3] = 28,
+ [0][1][RTW89_IC][3] = 72,
+ [0][1][RTW89_KCC][3] = 32,
+ [0][1][RTW89_ACMA][3] = 20,
+ [0][1][RTW89_CHILE][3] = 48,
+ [0][1][RTW89_UKRAINE][3] = 20,
+ [0][1][RTW89_MEXICO][3] = 72,
+ [0][1][RTW89_CN][3] = 20,
+ [0][1][RTW89_QATAR][3] = 20,
+ [0][1][RTW89_FCC][4] = 76,
+ [0][1][RTW89_ETSI][4] = 20,
+ [0][1][RTW89_MKK][4] = 28,
+ [0][1][RTW89_IC][4] = 76,
+ [0][1][RTW89_KCC][4] = 32,
+ [0][1][RTW89_ACMA][4] = 20,
+ [0][1][RTW89_CHILE][4] = 48,
+ [0][1][RTW89_UKRAINE][4] = 20,
+ [0][1][RTW89_MEXICO][4] = 76,
+ [0][1][RTW89_CN][4] = 20,
+ [0][1][RTW89_QATAR][4] = 20,
+ [0][1][RTW89_FCC][5] = 78,
+ [0][1][RTW89_ETSI][5] = 20,
+ [0][1][RTW89_MKK][5] = 28,
+ [0][1][RTW89_IC][5] = 78,
+ [0][1][RTW89_KCC][5] = 32,
+ [0][1][RTW89_ACMA][5] = 20,
+ [0][1][RTW89_CHILE][5] = 48,
+ [0][1][RTW89_UKRAINE][5] = 20,
+ [0][1][RTW89_MEXICO][5] = 78,
+ [0][1][RTW89_CN][5] = 20,
+ [0][1][RTW89_QATAR][5] = 20,
+ [0][1][RTW89_FCC][6] = 76,
+ [0][1][RTW89_ETSI][6] = 20,
+ [0][1][RTW89_MKK][6] = 28,
+ [0][1][RTW89_IC][6] = 76,
+ [0][1][RTW89_KCC][6] = 32,
+ [0][1][RTW89_ACMA][6] = 20,
+ [0][1][RTW89_CHILE][6] = 48,
+ [0][1][RTW89_UKRAINE][6] = 20,
+ [0][1][RTW89_MEXICO][6] = 76,
+ [0][1][RTW89_CN][6] = 20,
+ [0][1][RTW89_QATAR][6] = 20,
+ [0][1][RTW89_FCC][7] = 72,
+ [0][1][RTW89_ETSI][7] = 20,
+ [0][1][RTW89_MKK][7] = 28,
+ [0][1][RTW89_IC][7] = 72,
+ [0][1][RTW89_KCC][7] = 32,
+ [0][1][RTW89_ACMA][7] = 20,
+ [0][1][RTW89_CHILE][7] = 48,
+ [0][1][RTW89_UKRAINE][7] = 20,
+ [0][1][RTW89_MEXICO][7] = 72,
+ [0][1][RTW89_CN][7] = 20,
+ [0][1][RTW89_QATAR][7] = 20,
+ [0][1][RTW89_FCC][8] = 68,
+ [0][1][RTW89_ETSI][8] = 20,
+ [0][1][RTW89_MKK][8] = 28,
+ [0][1][RTW89_IC][8] = 68,
+ [0][1][RTW89_KCC][8] = 32,
+ [0][1][RTW89_ACMA][8] = 20,
+ [0][1][RTW89_CHILE][8] = 48,
+ [0][1][RTW89_UKRAINE][8] = 20,
+ [0][1][RTW89_MEXICO][8] = 68,
+ [0][1][RTW89_CN][8] = 20,
+ [0][1][RTW89_QATAR][8] = 20,
+ [0][1][RTW89_FCC][9] = 64,
+ [0][1][RTW89_ETSI][9] = 20,
+ [0][1][RTW89_MKK][9] = 28,
+ [0][1][RTW89_IC][9] = 64,
+ [0][1][RTW89_KCC][9] = 32,
+ [0][1][RTW89_ACMA][9] = 20,
+ [0][1][RTW89_CHILE][9] = 48,
+ [0][1][RTW89_UKRAINE][9] = 20,
+ [0][1][RTW89_MEXICO][9] = 64,
+ [0][1][RTW89_CN][9] = 20,
+ [0][1][RTW89_QATAR][9] = 20,
+ [0][1][RTW89_FCC][10] = 64,
+ [0][1][RTW89_ETSI][10] = 20,
+ [0][1][RTW89_MKK][10] = 28,
+ [0][1][RTW89_IC][10] = 64,
+ [0][1][RTW89_KCC][10] = 32,
+ [0][1][RTW89_ACMA][10] = 20,
+ [0][1][RTW89_CHILE][10] = 48,
+ [0][1][RTW89_UKRAINE][10] = 20,
+ [0][1][RTW89_MEXICO][10] = 64,
+ [0][1][RTW89_CN][10] = 20,
+ [0][1][RTW89_QATAR][10] = 20,
+ [0][1][RTW89_FCC][11] = 54,
+ [0][1][RTW89_ETSI][11] = 20,
+ [0][1][RTW89_MKK][11] = 28,
+ [0][1][RTW89_IC][11] = 54,
+ [0][1][RTW89_KCC][11] = 32,
+ [0][1][RTW89_ACMA][11] = 20,
+ [0][1][RTW89_CHILE][11] = 48,
+ [0][1][RTW89_UKRAINE][11] = 20,
+ [0][1][RTW89_MEXICO][11] = 54,
+ [0][1][RTW89_CN][11] = 20,
+ [0][1][RTW89_QATAR][11] = 20,
+ [0][1][RTW89_FCC][12] = 32,
+ [0][1][RTW89_ETSI][12] = 20,
+ [0][1][RTW89_MKK][12] = 28,
+ [0][1][RTW89_IC][12] = 32,
+ [0][1][RTW89_KCC][12] = 32,
+ [0][1][RTW89_ACMA][12] = 20,
+ [0][1][RTW89_CHILE][12] = 32,
+ [0][1][RTW89_UKRAINE][12] = 20,
+ [0][1][RTW89_MEXICO][12] = 32,
+ [0][1][RTW89_CN][12] = 20,
+ [0][1][RTW89_QATAR][12] = 20,
+ [0][1][RTW89_FCC][13] = 127,
+ [0][1][RTW89_ETSI][13] = 127,
+ [0][1][RTW89_MKK][13] = 127,
+ [0][1][RTW89_IC][13] = 127,
+ [0][1][RTW89_KCC][13] = 127,
+ [0][1][RTW89_ACMA][13] = 127,
+ [0][1][RTW89_CHILE][13] = 127,
+ [0][1][RTW89_UKRAINE][13] = 127,
+ [0][1][RTW89_MEXICO][13] = 127,
+ [0][1][RTW89_CN][13] = 127,
+ [0][1][RTW89_QATAR][13] = 127,
+ [1][0][RTW89_FCC][0] = 72,
+ [1][0][RTW89_ETSI][0] = 42,
+ [1][0][RTW89_MKK][0] = 50,
+ [1][0][RTW89_IC][0] = 72,
+ [1][0][RTW89_KCC][0] = 58,
+ [1][0][RTW89_ACMA][0] = 42,
+ [1][0][RTW89_CHILE][0] = 60,
+ [1][0][RTW89_UKRAINE][0] = 42,
+ [1][0][RTW89_MEXICO][0] = 72,
+ [1][0][RTW89_CN][0] = 42,
+ [1][0][RTW89_QATAR][0] = 42,
+ [1][0][RTW89_FCC][1] = 72,
+ [1][0][RTW89_ETSI][1] = 42,
+ [1][0][RTW89_MKK][1] = 50,
+ [1][0][RTW89_IC][1] = 72,
+ [1][0][RTW89_KCC][1] = 58,
+ [1][0][RTW89_ACMA][1] = 42,
+ [1][0][RTW89_CHILE][1] = 60,
+ [1][0][RTW89_UKRAINE][1] = 42,
+ [1][0][RTW89_MEXICO][1] = 72,
+ [1][0][RTW89_CN][1] = 42,
+ [1][0][RTW89_QATAR][1] = 42,
+ [1][0][RTW89_FCC][2] = 76,
+ [1][0][RTW89_ETSI][2] = 42,
+ [1][0][RTW89_MKK][2] = 50,
+ [1][0][RTW89_IC][2] = 76,
+ [1][0][RTW89_KCC][2] = 58,
+ [1][0][RTW89_ACMA][2] = 42,
+ [1][0][RTW89_CHILE][2] = 60,
+ [1][0][RTW89_UKRAINE][2] = 42,
+ [1][0][RTW89_MEXICO][2] = 76,
+ [1][0][RTW89_CN][2] = 42,
+ [1][0][RTW89_QATAR][2] = 42,
+ [1][0][RTW89_FCC][3] = 78,
+ [1][0][RTW89_ETSI][3] = 42,
+ [1][0][RTW89_MKK][3] = 50,
+ [1][0][RTW89_IC][3] = 78,
+ [1][0][RTW89_KCC][3] = 58,
+ [1][0][RTW89_ACMA][3] = 42,
+ [1][0][RTW89_CHILE][3] = 60,
+ [1][0][RTW89_UKRAINE][3] = 42,
+ [1][0][RTW89_MEXICO][3] = 78,
+ [1][0][RTW89_CN][3] = 42,
+ [1][0][RTW89_QATAR][3] = 42,
+ [1][0][RTW89_FCC][4] = 78,
+ [1][0][RTW89_ETSI][4] = 42,
+ [1][0][RTW89_MKK][4] = 50,
+ [1][0][RTW89_IC][4] = 78,
+ [1][0][RTW89_KCC][4] = 58,
+ [1][0][RTW89_ACMA][4] = 42,
+ [1][0][RTW89_CHILE][4] = 60,
+ [1][0][RTW89_UKRAINE][4] = 42,
+ [1][0][RTW89_MEXICO][4] = 78,
+ [1][0][RTW89_CN][4] = 42,
+ [1][0][RTW89_QATAR][4] = 42,
+ [1][0][RTW89_FCC][5] = 78,
+ [1][0][RTW89_ETSI][5] = 42,
+ [1][0][RTW89_MKK][5] = 50,
+ [1][0][RTW89_IC][5] = 78,
+ [1][0][RTW89_KCC][5] = 58,
+ [1][0][RTW89_ACMA][5] = 42,
+ [1][0][RTW89_CHILE][5] = 60,
+ [1][0][RTW89_UKRAINE][5] = 42,
+ [1][0][RTW89_MEXICO][5] = 78,
+ [1][0][RTW89_CN][5] = 42,
+ [1][0][RTW89_QATAR][5] = 42,
+ [1][0][RTW89_FCC][6] = 78,
+ [1][0][RTW89_ETSI][6] = 42,
+ [1][0][RTW89_MKK][6] = 50,
+ [1][0][RTW89_IC][6] = 78,
+ [1][0][RTW89_KCC][6] = 58,
+ [1][0][RTW89_ACMA][6] = 42,
+ [1][0][RTW89_CHILE][6] = 60,
+ [1][0][RTW89_UKRAINE][6] = 42,
+ [1][0][RTW89_MEXICO][6] = 78,
+ [1][0][RTW89_CN][6] = 42,
+ [1][0][RTW89_QATAR][6] = 42,
+ [1][0][RTW89_FCC][7] = 78,
+ [1][0][RTW89_ETSI][7] = 42,
+ [1][0][RTW89_MKK][7] = 50,
+ [1][0][RTW89_IC][7] = 78,
+ [1][0][RTW89_KCC][7] = 58,
+ [1][0][RTW89_ACMA][7] = 42,
+ [1][0][RTW89_CHILE][7] = 60,
+ [1][0][RTW89_UKRAINE][7] = 42,
+ [1][0][RTW89_MEXICO][7] = 78,
+ [1][0][RTW89_CN][7] = 42,
+ [1][0][RTW89_QATAR][7] = 42,
+ [1][0][RTW89_FCC][8] = 78,
+ [1][0][RTW89_ETSI][8] = 42,
+ [1][0][RTW89_MKK][8] = 50,
+ [1][0][RTW89_IC][8] = 78,
+ [1][0][RTW89_KCC][8] = 58,
+ [1][0][RTW89_ACMA][8] = 42,
+ [1][0][RTW89_CHILE][8] = 60,
+ [1][0][RTW89_UKRAINE][8] = 42,
+ [1][0][RTW89_MEXICO][8] = 78,
+ [1][0][RTW89_CN][8] = 42,
+ [1][0][RTW89_QATAR][8] = 42,
+ [1][0][RTW89_FCC][9] = 74,
+ [1][0][RTW89_ETSI][9] = 42,
+ [1][0][RTW89_MKK][9] = 50,
+ [1][0][RTW89_IC][9] = 74,
+ [1][0][RTW89_KCC][9] = 58,
+ [1][0][RTW89_ACMA][9] = 42,
+ [1][0][RTW89_CHILE][9] = 60,
+ [1][0][RTW89_UKRAINE][9] = 42,
+ [1][0][RTW89_MEXICO][9] = 74,
+ [1][0][RTW89_CN][9] = 42,
+ [1][0][RTW89_QATAR][9] = 42,
+ [1][0][RTW89_FCC][10] = 74,
+ [1][0][RTW89_ETSI][10] = 42,
+ [1][0][RTW89_MKK][10] = 50,
+ [1][0][RTW89_IC][10] = 74,
+ [1][0][RTW89_KCC][10] = 58,
+ [1][0][RTW89_ACMA][10] = 42,
+ [1][0][RTW89_CHILE][10] = 60,
+ [1][0][RTW89_UKRAINE][10] = 42,
+ [1][0][RTW89_MEXICO][10] = 74,
+ [1][0][RTW89_CN][10] = 42,
+ [1][0][RTW89_QATAR][10] = 42,
+ [1][0][RTW89_FCC][11] = 64,
+ [1][0][RTW89_ETSI][11] = 42,
+ [1][0][RTW89_MKK][11] = 50,
+ [1][0][RTW89_IC][11] = 64,
+ [1][0][RTW89_KCC][11] = 58,
+ [1][0][RTW89_ACMA][11] = 42,
+ [1][0][RTW89_CHILE][11] = 60,
+ [1][0][RTW89_UKRAINE][11] = 42,
+ [1][0][RTW89_MEXICO][11] = 64,
+ [1][0][RTW89_CN][11] = 42,
+ [1][0][RTW89_QATAR][11] = 42,
+ [1][0][RTW89_FCC][12] = 36,
+ [1][0][RTW89_ETSI][12] = 42,
+ [1][0][RTW89_MKK][12] = 50,
+ [1][0][RTW89_IC][12] = 36,
+ [1][0][RTW89_KCC][12] = 58,
+ [1][0][RTW89_ACMA][12] = 42,
+ [1][0][RTW89_CHILE][12] = 36,
+ [1][0][RTW89_UKRAINE][12] = 42,
+ [1][0][RTW89_MEXICO][12] = 36,
+ [1][0][RTW89_CN][12] = 42,
+ [1][0][RTW89_QATAR][12] = 42,
+ [1][0][RTW89_FCC][13] = 127,
+ [1][0][RTW89_ETSI][13] = 127,
+ [1][0][RTW89_MKK][13] = 127,
+ [1][0][RTW89_IC][13] = 127,
+ [1][0][RTW89_KCC][13] = 127,
+ [1][0][RTW89_ACMA][13] = 127,
+ [1][0][RTW89_CHILE][13] = 127,
+ [1][0][RTW89_UKRAINE][13] = 127,
+ [1][0][RTW89_MEXICO][13] = 127,
+ [1][0][RTW89_CN][13] = 127,
+ [1][0][RTW89_QATAR][13] = 127,
+ [1][1][RTW89_FCC][0] = 66,
+ [1][1][RTW89_ETSI][0] = 30,
+ [1][1][RTW89_MKK][0] = 38,
+ [1][1][RTW89_IC][0] = 66,
+ [1][1][RTW89_KCC][0] = 44,
+ [1][1][RTW89_ACMA][0] = 30,
+ [1][1][RTW89_CHILE][0] = 48,
+ [1][1][RTW89_UKRAINE][0] = 30,
+ [1][1][RTW89_MEXICO][0] = 66,
+ [1][1][RTW89_CN][0] = 30,
+ [1][1][RTW89_QATAR][0] = 30,
+ [1][1][RTW89_FCC][1] = 66,
+ [1][1][RTW89_ETSI][1] = 30,
+ [1][1][RTW89_MKK][1] = 38,
+ [1][1][RTW89_IC][1] = 66,
+ [1][1][RTW89_KCC][1] = 44,
+ [1][1][RTW89_ACMA][1] = 30,
+ [1][1][RTW89_CHILE][1] = 48,
+ [1][1][RTW89_UKRAINE][1] = 30,
+ [1][1][RTW89_MEXICO][1] = 66,
+ [1][1][RTW89_CN][1] = 30,
+ [1][1][RTW89_QATAR][1] = 30,
+ [1][1][RTW89_FCC][2] = 70,
+ [1][1][RTW89_ETSI][2] = 30,
+ [1][1][RTW89_MKK][2] = 38,
+ [1][1][RTW89_IC][2] = 70,
+ [1][1][RTW89_KCC][2] = 44,
+ [1][1][RTW89_ACMA][2] = 30,
+ [1][1][RTW89_CHILE][2] = 48,
+ [1][1][RTW89_UKRAINE][2] = 30,
+ [1][1][RTW89_MEXICO][2] = 70,
+ [1][1][RTW89_CN][2] = 30,
+ [1][1][RTW89_QATAR][2] = 30,
+ [1][1][RTW89_FCC][3] = 74,
+ [1][1][RTW89_ETSI][3] = 30,
+ [1][1][RTW89_MKK][3] = 38,
+ [1][1][RTW89_IC][3] = 74,
+ [1][1][RTW89_KCC][3] = 44,
+ [1][1][RTW89_ACMA][3] = 30,
+ [1][1][RTW89_CHILE][3] = 48,
+ [1][1][RTW89_UKRAINE][3] = 30,
+ [1][1][RTW89_MEXICO][3] = 74,
+ [1][1][RTW89_CN][3] = 30,
+ [1][1][RTW89_QATAR][3] = 30,
+ [1][1][RTW89_FCC][4] = 78,
+ [1][1][RTW89_ETSI][4] = 30,
+ [1][1][RTW89_MKK][4] = 38,
+ [1][1][RTW89_IC][4] = 78,
+ [1][1][RTW89_KCC][4] = 44,
+ [1][1][RTW89_ACMA][4] = 30,
+ [1][1][RTW89_CHILE][4] = 48,
+ [1][1][RTW89_UKRAINE][4] = 30,
+ [1][1][RTW89_MEXICO][4] = 78,
+ [1][1][RTW89_CN][4] = 30,
+ [1][1][RTW89_QATAR][4] = 30,
+ [1][1][RTW89_FCC][5] = 78,
+ [1][1][RTW89_ETSI][5] = 30,
+ [1][1][RTW89_MKK][5] = 38,
+ [1][1][RTW89_IC][5] = 78,
+ [1][1][RTW89_KCC][5] = 44,
+ [1][1][RTW89_ACMA][5] = 30,
+ [1][1][RTW89_CHILE][5] = 48,
+ [1][1][RTW89_UKRAINE][5] = 30,
+ [1][1][RTW89_MEXICO][5] = 78,
+ [1][1][RTW89_CN][5] = 30,
+ [1][1][RTW89_QATAR][5] = 30,
+ [1][1][RTW89_FCC][6] = 78,
+ [1][1][RTW89_ETSI][6] = 30,
+ [1][1][RTW89_MKK][6] = 38,
+ [1][1][RTW89_IC][6] = 78,
+ [1][1][RTW89_KCC][6] = 44,
+ [1][1][RTW89_ACMA][6] = 30,
+ [1][1][RTW89_CHILE][6] = 48,
+ [1][1][RTW89_UKRAINE][6] = 30,
+ [1][1][RTW89_MEXICO][6] = 78,
+ [1][1][RTW89_CN][6] = 30,
+ [1][1][RTW89_QATAR][6] = 30,
+ [1][1][RTW89_FCC][7] = 74,
+ [1][1][RTW89_ETSI][7] = 30,
+ [1][1][RTW89_MKK][7] = 38,
+ [1][1][RTW89_IC][7] = 74,
+ [1][1][RTW89_KCC][7] = 44,
+ [1][1][RTW89_ACMA][7] = 30,
+ [1][1][RTW89_CHILE][7] = 48,
+ [1][1][RTW89_UKRAINE][7] = 30,
+ [1][1][RTW89_MEXICO][7] = 74,
+ [1][1][RTW89_CN][7] = 30,
+ [1][1][RTW89_QATAR][7] = 30,
+ [1][1][RTW89_FCC][8] = 70,
+ [1][1][RTW89_ETSI][8] = 30,
+ [1][1][RTW89_MKK][8] = 38,
+ [1][1][RTW89_IC][8] = 70,
+ [1][1][RTW89_KCC][8] = 44,
+ [1][1][RTW89_ACMA][8] = 30,
+ [1][1][RTW89_CHILE][8] = 48,
+ [1][1][RTW89_UKRAINE][8] = 30,
+ [1][1][RTW89_MEXICO][8] = 70,
+ [1][1][RTW89_CN][8] = 30,
+ [1][1][RTW89_QATAR][8] = 30,
+ [1][1][RTW89_FCC][9] = 66,
+ [1][1][RTW89_ETSI][9] = 30,
+ [1][1][RTW89_MKK][9] = 38,
+ [1][1][RTW89_IC][9] = 66,
+ [1][1][RTW89_KCC][9] = 44,
+ [1][1][RTW89_ACMA][9] = 30,
+ [1][1][RTW89_CHILE][9] = 48,
+ [1][1][RTW89_UKRAINE][9] = 30,
+ [1][1][RTW89_MEXICO][9] = 66,
+ [1][1][RTW89_CN][9] = 30,
+ [1][1][RTW89_QATAR][9] = 30,
+ [1][1][RTW89_FCC][10] = 66,
+ [1][1][RTW89_ETSI][10] = 30,
+ [1][1][RTW89_MKK][10] = 38,
+ [1][1][RTW89_IC][10] = 66,
+ [1][1][RTW89_KCC][10] = 44,
+ [1][1][RTW89_ACMA][10] = 30,
+ [1][1][RTW89_CHILE][10] = 48,
+ [1][1][RTW89_UKRAINE][10] = 30,
+ [1][1][RTW89_MEXICO][10] = 66,
+ [1][1][RTW89_CN][10] = 30,
+ [1][1][RTW89_QATAR][10] = 30,
+ [1][1][RTW89_FCC][11] = 60,
+ [1][1][RTW89_ETSI][11] = 30,
+ [1][1][RTW89_MKK][11] = 38,
+ [1][1][RTW89_IC][11] = 60,
+ [1][1][RTW89_KCC][11] = 44,
+ [1][1][RTW89_ACMA][11] = 30,
+ [1][1][RTW89_CHILE][11] = 48,
+ [1][1][RTW89_UKRAINE][11] = 30,
+ [1][1][RTW89_MEXICO][11] = 60,
+ [1][1][RTW89_CN][11] = 30,
+ [1][1][RTW89_QATAR][11] = 30,
+ [1][1][RTW89_FCC][12] = 32,
+ [1][1][RTW89_ETSI][12] = 30,
+ [1][1][RTW89_MKK][12] = 38,
+ [1][1][RTW89_IC][12] = 32,
+ [1][1][RTW89_KCC][12] = 44,
+ [1][1][RTW89_ACMA][12] = 30,
+ [1][1][RTW89_CHILE][12] = 32,
+ [1][1][RTW89_UKRAINE][12] = 30,
+ [1][1][RTW89_MEXICO][12] = 32,
+ [1][1][RTW89_CN][12] = 30,
+ [1][1][RTW89_QATAR][12] = 30,
+ [1][1][RTW89_FCC][13] = 127,
+ [1][1][RTW89_ETSI][13] = 127,
+ [1][1][RTW89_MKK][13] = 127,
+ [1][1][RTW89_IC][13] = 127,
+ [1][1][RTW89_KCC][13] = 127,
+ [1][1][RTW89_ACMA][13] = 127,
+ [1][1][RTW89_CHILE][13] = 127,
+ [1][1][RTW89_UKRAINE][13] = 127,
+ [1][1][RTW89_MEXICO][13] = 127,
+ [1][1][RTW89_CN][13] = 127,
+ [1][1][RTW89_QATAR][13] = 127,
+ [2][0][RTW89_FCC][0] = 76,
+ [2][0][RTW89_ETSI][0] = 52,
+ [2][0][RTW89_MKK][0] = 64,
+ [2][0][RTW89_IC][0] = 76,
+ [2][0][RTW89_KCC][0] = 70,
+ [2][0][RTW89_ACMA][0] = 52,
+ [2][0][RTW89_CHILE][0] = 60,
+ [2][0][RTW89_UKRAINE][0] = 52,
+ [2][0][RTW89_MEXICO][0] = 76,
+ [2][0][RTW89_CN][0] = 52,
+ [2][0][RTW89_QATAR][0] = 52,
+ [2][0][RTW89_FCC][1] = 76,
+ [2][0][RTW89_ETSI][1] = 52,
+ [2][0][RTW89_MKK][1] = 64,
+ [2][0][RTW89_IC][1] = 76,
+ [2][0][RTW89_KCC][1] = 70,
+ [2][0][RTW89_ACMA][1] = 52,
+ [2][0][RTW89_CHILE][1] = 60,
+ [2][0][RTW89_UKRAINE][1] = 52,
+ [2][0][RTW89_MEXICO][1] = 76,
+ [2][0][RTW89_CN][1] = 52,
+ [2][0][RTW89_QATAR][1] = 52,
+ [2][0][RTW89_FCC][2] = 78,
+ [2][0][RTW89_ETSI][2] = 52,
+ [2][0][RTW89_MKK][2] = 64,
+ [2][0][RTW89_IC][2] = 78,
+ [2][0][RTW89_KCC][2] = 70,
+ [2][0][RTW89_ACMA][2] = 52,
+ [2][0][RTW89_CHILE][2] = 60,
+ [2][0][RTW89_UKRAINE][2] = 52,
+ [2][0][RTW89_MEXICO][2] = 78,
+ [2][0][RTW89_CN][2] = 52,
+ [2][0][RTW89_QATAR][2] = 52,
+ [2][0][RTW89_FCC][3] = 78,
+ [2][0][RTW89_ETSI][3] = 52,
+ [2][0][RTW89_MKK][3] = 64,
+ [2][0][RTW89_IC][3] = 78,
+ [2][0][RTW89_KCC][3] = 70,
+ [2][0][RTW89_ACMA][3] = 52,
+ [2][0][RTW89_CHILE][3] = 60,
+ [2][0][RTW89_UKRAINE][3] = 52,
+ [2][0][RTW89_MEXICO][3] = 78,
+ [2][0][RTW89_CN][3] = 52,
+ [2][0][RTW89_QATAR][3] = 52,
+ [2][0][RTW89_FCC][4] = 78,
+ [2][0][RTW89_ETSI][4] = 52,
+ [2][0][RTW89_MKK][4] = 64,
+ [2][0][RTW89_IC][4] = 78,
+ [2][0][RTW89_KCC][4] = 70,
+ [2][0][RTW89_ACMA][4] = 52,
+ [2][0][RTW89_CHILE][4] = 60,
+ [2][0][RTW89_UKRAINE][4] = 52,
+ [2][0][RTW89_MEXICO][4] = 78,
+ [2][0][RTW89_CN][4] = 52,
+ [2][0][RTW89_QATAR][4] = 52,
+ [2][0][RTW89_FCC][5] = 78,
+ [2][0][RTW89_ETSI][5] = 52,
+ [2][0][RTW89_MKK][5] = 64,
+ [2][0][RTW89_IC][5] = 78,
+ [2][0][RTW89_KCC][5] = 70,
+ [2][0][RTW89_ACMA][5] = 52,
+ [2][0][RTW89_CHILE][5] = 60,
+ [2][0][RTW89_UKRAINE][5] = 52,
+ [2][0][RTW89_MEXICO][5] = 78,
+ [2][0][RTW89_CN][5] = 52,
+ [2][0][RTW89_QATAR][5] = 52,
+ [2][0][RTW89_FCC][6] = 78,
+ [2][0][RTW89_ETSI][6] = 52,
+ [2][0][RTW89_MKK][6] = 64,
+ [2][0][RTW89_IC][6] = 78,
+ [2][0][RTW89_KCC][6] = 70,
+ [2][0][RTW89_ACMA][6] = 52,
+ [2][0][RTW89_CHILE][6] = 60,
+ [2][0][RTW89_UKRAINE][6] = 52,
+ [2][0][RTW89_MEXICO][6] = 78,
+ [2][0][RTW89_CN][6] = 52,
+ [2][0][RTW89_QATAR][6] = 52,
+ [2][0][RTW89_FCC][7] = 78,
+ [2][0][RTW89_ETSI][7] = 52,
+ [2][0][RTW89_MKK][7] = 64,
+ [2][0][RTW89_IC][7] = 78,
+ [2][0][RTW89_KCC][7] = 70,
+ [2][0][RTW89_ACMA][7] = 52,
+ [2][0][RTW89_CHILE][7] = 60,
+ [2][0][RTW89_UKRAINE][7] = 52,
+ [2][0][RTW89_MEXICO][7] = 78,
+ [2][0][RTW89_CN][7] = 52,
+ [2][0][RTW89_QATAR][7] = 52,
+ [2][0][RTW89_FCC][8] = 78,
+ [2][0][RTW89_ETSI][8] = 52,
+ [2][0][RTW89_MKK][8] = 64,
+ [2][0][RTW89_IC][8] = 78,
+ [2][0][RTW89_KCC][8] = 70,
+ [2][0][RTW89_ACMA][8] = 52,
+ [2][0][RTW89_CHILE][8] = 60,
+ [2][0][RTW89_UKRAINE][8] = 52,
+ [2][0][RTW89_MEXICO][8] = 78,
+ [2][0][RTW89_CN][8] = 52,
+ [2][0][RTW89_QATAR][8] = 52,
+ [2][0][RTW89_FCC][9] = 76,
+ [2][0][RTW89_ETSI][9] = 52,
+ [2][0][RTW89_MKK][9] = 64,
+ [2][0][RTW89_IC][9] = 76,
+ [2][0][RTW89_KCC][9] = 70,
+ [2][0][RTW89_ACMA][9] = 52,
+ [2][0][RTW89_CHILE][9] = 60,
+ [2][0][RTW89_UKRAINE][9] = 52,
+ [2][0][RTW89_MEXICO][9] = 76,
+ [2][0][RTW89_CN][9] = 52,
+ [2][0][RTW89_QATAR][9] = 52,
+ [2][0][RTW89_FCC][10] = 76,
+ [2][0][RTW89_ETSI][10] = 52,
+ [2][0][RTW89_MKK][10] = 64,
+ [2][0][RTW89_IC][10] = 76,
+ [2][0][RTW89_KCC][10] = 70,
+ [2][0][RTW89_ACMA][10] = 52,
+ [2][0][RTW89_CHILE][10] = 60,
+ [2][0][RTW89_UKRAINE][10] = 52,
+ [2][0][RTW89_MEXICO][10] = 76,
+ [2][0][RTW89_CN][10] = 52,
+ [2][0][RTW89_QATAR][10] = 52,
+ [2][0][RTW89_FCC][11] = 68,
+ [2][0][RTW89_ETSI][11] = 52,
+ [2][0][RTW89_MKK][11] = 64,
+ [2][0][RTW89_IC][11] = 68,
+ [2][0][RTW89_KCC][11] = 70,
+ [2][0][RTW89_ACMA][11] = 52,
+ [2][0][RTW89_CHILE][11] = 60,
+ [2][0][RTW89_UKRAINE][11] = 52,
+ [2][0][RTW89_MEXICO][11] = 68,
+ [2][0][RTW89_CN][11] = 52,
+ [2][0][RTW89_QATAR][11] = 52,
+ [2][0][RTW89_FCC][12] = 40,
+ [2][0][RTW89_ETSI][12] = 52,
+ [2][0][RTW89_MKK][12] = 64,
+ [2][0][RTW89_IC][12] = 40,
+ [2][0][RTW89_KCC][12] = 70,
+ [2][0][RTW89_ACMA][12] = 52,
+ [2][0][RTW89_CHILE][12] = 40,
+ [2][0][RTW89_UKRAINE][12] = 52,
+ [2][0][RTW89_MEXICO][12] = 40,
+ [2][0][RTW89_CN][12] = 52,
+ [2][0][RTW89_QATAR][12] = 52,
+ [2][0][RTW89_FCC][13] = 127,
+ [2][0][RTW89_ETSI][13] = 127,
+ [2][0][RTW89_MKK][13] = 127,
+ [2][0][RTW89_IC][13] = 127,
+ [2][0][RTW89_KCC][13] = 127,
+ [2][0][RTW89_ACMA][13] = 127,
+ [2][0][RTW89_CHILE][13] = 127,
+ [2][0][RTW89_UKRAINE][13] = 127,
+ [2][0][RTW89_MEXICO][13] = 127,
+ [2][0][RTW89_CN][13] = 127,
+ [2][0][RTW89_QATAR][13] = 127,
+ [2][1][RTW89_FCC][0] = 68,
+ [2][1][RTW89_ETSI][0] = 40,
+ [2][1][RTW89_MKK][0] = 52,
+ [2][1][RTW89_IC][0] = 68,
+ [2][1][RTW89_KCC][0] = 56,
+ [2][1][RTW89_ACMA][0] = 40,
+ [2][1][RTW89_CHILE][0] = 48,
+ [2][1][RTW89_UKRAINE][0] = 40,
+ [2][1][RTW89_MEXICO][0] = 68,
+ [2][1][RTW89_CN][0] = 40,
+ [2][1][RTW89_QATAR][0] = 40,
+ [2][1][RTW89_FCC][1] = 68,
+ [2][1][RTW89_ETSI][1] = 40,
+ [2][1][RTW89_MKK][1] = 52,
+ [2][1][RTW89_IC][1] = 68,
+ [2][1][RTW89_KCC][1] = 56,
+ [2][1][RTW89_ACMA][1] = 40,
+ [2][1][RTW89_CHILE][1] = 48,
+ [2][1][RTW89_UKRAINE][1] = 40,
+ [2][1][RTW89_MEXICO][1] = 68,
+ [2][1][RTW89_CN][1] = 40,
+ [2][1][RTW89_QATAR][1] = 40,
+ [2][1][RTW89_FCC][2] = 72,
+ [2][1][RTW89_ETSI][2] = 40,
+ [2][1][RTW89_MKK][2] = 52,
+ [2][1][RTW89_IC][2] = 72,
+ [2][1][RTW89_KCC][2] = 56,
+ [2][1][RTW89_ACMA][2] = 40,
+ [2][1][RTW89_CHILE][2] = 48,
+ [2][1][RTW89_UKRAINE][2] = 40,
+ [2][1][RTW89_MEXICO][2] = 72,
+ [2][1][RTW89_CN][2] = 40,
+ [2][1][RTW89_QATAR][2] = 40,
+ [2][1][RTW89_FCC][3] = 76,
+ [2][1][RTW89_ETSI][3] = 40,
+ [2][1][RTW89_MKK][3] = 52,
+ [2][1][RTW89_IC][3] = 76,
+ [2][1][RTW89_KCC][3] = 56,
+ [2][1][RTW89_ACMA][3] = 40,
+ [2][1][RTW89_CHILE][3] = 48,
+ [2][1][RTW89_UKRAINE][3] = 40,
+ [2][1][RTW89_MEXICO][3] = 76,
+ [2][1][RTW89_CN][3] = 40,
+ [2][1][RTW89_QATAR][3] = 40,
+ [2][1][RTW89_FCC][4] = 78,
+ [2][1][RTW89_ETSI][4] = 40,
+ [2][1][RTW89_MKK][4] = 52,
+ [2][1][RTW89_IC][4] = 78,
+ [2][1][RTW89_KCC][4] = 56,
+ [2][1][RTW89_ACMA][4] = 40,
+ [2][1][RTW89_CHILE][4] = 48,
+ [2][1][RTW89_UKRAINE][4] = 40,
+ [2][1][RTW89_MEXICO][4] = 78,
+ [2][1][RTW89_CN][4] = 40,
+ [2][1][RTW89_QATAR][4] = 40,
+ [2][1][RTW89_FCC][5] = 78,
+ [2][1][RTW89_ETSI][5] = 40,
+ [2][1][RTW89_MKK][5] = 52,
+ [2][1][RTW89_IC][5] = 78,
+ [2][1][RTW89_KCC][5] = 56,
+ [2][1][RTW89_ACMA][5] = 40,
+ [2][1][RTW89_CHILE][5] = 48,
+ [2][1][RTW89_UKRAINE][5] = 40,
+ [2][1][RTW89_MEXICO][5] = 78,
+ [2][1][RTW89_CN][5] = 40,
+ [2][1][RTW89_QATAR][5] = 40,
+ [2][1][RTW89_FCC][6] = 78,
+ [2][1][RTW89_ETSI][6] = 40,
+ [2][1][RTW89_MKK][6] = 52,
+ [2][1][RTW89_IC][6] = 78,
+ [2][1][RTW89_KCC][6] = 56,
+ [2][1][RTW89_ACMA][6] = 40,
+ [2][1][RTW89_CHILE][6] = 48,
+ [2][1][RTW89_UKRAINE][6] = 40,
+ [2][1][RTW89_MEXICO][6] = 78,
+ [2][1][RTW89_CN][6] = 40,
+ [2][1][RTW89_QATAR][6] = 40,
+ [2][1][RTW89_FCC][7] = 78,
+ [2][1][RTW89_ETSI][7] = 40,
+ [2][1][RTW89_MKK][7] = 52,
+ [2][1][RTW89_IC][7] = 78,
+ [2][1][RTW89_KCC][7] = 56,
+ [2][1][RTW89_ACMA][7] = 40,
+ [2][1][RTW89_CHILE][7] = 48,
+ [2][1][RTW89_UKRAINE][7] = 40,
+ [2][1][RTW89_MEXICO][7] = 78,
+ [2][1][RTW89_CN][7] = 40,
+ [2][1][RTW89_QATAR][7] = 40,
+ [2][1][RTW89_FCC][8] = 74,
+ [2][1][RTW89_ETSI][8] = 40,
+ [2][1][RTW89_MKK][8] = 52,
+ [2][1][RTW89_IC][8] = 74,
+ [2][1][RTW89_KCC][8] = 56,
+ [2][1][RTW89_ACMA][8] = 40,
+ [2][1][RTW89_CHILE][8] = 48,
+ [2][1][RTW89_UKRAINE][8] = 40,
+ [2][1][RTW89_MEXICO][8] = 74,
+ [2][1][RTW89_CN][8] = 40,
+ [2][1][RTW89_QATAR][8] = 40,
+ [2][1][RTW89_FCC][9] = 70,
+ [2][1][RTW89_ETSI][9] = 40,
+ [2][1][RTW89_MKK][9] = 52,
+ [2][1][RTW89_IC][9] = 70,
+ [2][1][RTW89_KCC][9] = 56,
+ [2][1][RTW89_ACMA][9] = 40,
+ [2][1][RTW89_CHILE][9] = 48,
+ [2][1][RTW89_UKRAINE][9] = 40,
+ [2][1][RTW89_MEXICO][9] = 70,
+ [2][1][RTW89_CN][9] = 40,
+ [2][1][RTW89_QATAR][9] = 40,
+ [2][1][RTW89_FCC][10] = 70,
+ [2][1][RTW89_ETSI][10] = 40,
+ [2][1][RTW89_MKK][10] = 52,
+ [2][1][RTW89_IC][10] = 70,
+ [2][1][RTW89_KCC][10] = 56,
+ [2][1][RTW89_ACMA][10] = 40,
+ [2][1][RTW89_CHILE][10] = 48,
+ [2][1][RTW89_UKRAINE][10] = 40,
+ [2][1][RTW89_MEXICO][10] = 70,
+ [2][1][RTW89_CN][10] = 40,
+ [2][1][RTW89_QATAR][10] = 40,
+ [2][1][RTW89_FCC][11] = 48,
+ [2][1][RTW89_ETSI][11] = 40,
+ [2][1][RTW89_MKK][11] = 52,
+ [2][1][RTW89_IC][11] = 48,
+ [2][1][RTW89_KCC][11] = 56,
+ [2][1][RTW89_ACMA][11] = 40,
+ [2][1][RTW89_CHILE][11] = 48,
+ [2][1][RTW89_UKRAINE][11] = 40,
+ [2][1][RTW89_MEXICO][11] = 48,
+ [2][1][RTW89_CN][11] = 40,
+ [2][1][RTW89_QATAR][11] = 40,
+ [2][1][RTW89_FCC][12] = 26,
+ [2][1][RTW89_ETSI][12] = 40,
+ [2][1][RTW89_MKK][12] = 52,
+ [2][1][RTW89_IC][12] = 26,
+ [2][1][RTW89_KCC][12] = 56,
+ [2][1][RTW89_ACMA][12] = 40,
+ [2][1][RTW89_CHILE][12] = 26,
+ [2][1][RTW89_UKRAINE][12] = 40,
+ [2][1][RTW89_MEXICO][12] = 26,
+ [2][1][RTW89_CN][12] = 40,
+ [2][1][RTW89_QATAR][12] = 40,
+ [2][1][RTW89_FCC][13] = 127,
+ [2][1][RTW89_ETSI][13] = 127,
+ [2][1][RTW89_MKK][13] = 127,
+ [2][1][RTW89_IC][13] = 127,
+ [2][1][RTW89_KCC][13] = 127,
+ [2][1][RTW89_ACMA][13] = 127,
+ [2][1][RTW89_CHILE][13] = 127,
+ [2][1][RTW89_UKRAINE][13] = 127,
+ [2][1][RTW89_MEXICO][13] = 127,
+ [2][1][RTW89_CN][13] = 127,
+ [2][1][RTW89_QATAR][13] = 127,
};
const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[RTW89_REGD_NUM][RTW89_5G_CH_NUM] = {
- [0][0][0][0] = 22,
- [0][0][0][2] = 22,
- [0][0][0][4] = 22,
- [0][0][0][6] = 22,
- [0][0][0][8] = 24,
- [0][0][0][10] = 24,
- [0][0][0][12] = 24,
- [0][0][0][14] = 24,
- [0][0][0][15] = 24,
- [0][0][0][17] = 24,
- [0][0][0][19] = 24,
- [0][0][0][21] = 24,
- [0][0][0][23] = 24,
- [0][0][0][25] = 24,
- [0][0][0][27] = 24,
- [0][0][0][29] = 24,
- [0][0][0][31] = 24,
- [0][0][0][33] = 24,
- [0][0][0][35] = 24,
- [0][0][0][37] = 24,
- [0][0][0][38] = 28,
- [0][0][0][40] = 28,
- [0][0][0][42] = 28,
- [0][0][0][44] = 28,
- [0][0][0][46] = 28,
- [0][1][0][0] = 8,
- [0][1][0][2] = 8,
- [0][1][0][4] = 8,
- [0][1][0][6] = 8,
- [0][1][0][8] = 12,
- [0][1][0][10] = 12,
- [0][1][0][12] = 12,
- [0][1][0][14] = 12,
- [0][1][0][15] = 12,
- [0][1][0][17] = 12,
- [0][1][0][19] = 12,
- [0][1][0][21] = 12,
- [0][1][0][23] = 12,
- [0][1][0][25] = 12,
- [0][1][0][27] = 12,
- [0][1][0][29] = 12,
- [0][1][0][31] = 12,
- [0][1][0][33] = 12,
- [0][1][0][35] = 12,
- [0][1][0][37] = 12,
- [0][1][0][38] = 16,
- [0][1][0][40] = 16,
- [0][1][0][42] = 16,
- [0][1][0][44] = 16,
- [0][1][0][46] = 16,
- [1][0][0][0] = 30,
- [1][0][0][2] = 30,
- [1][0][0][4] = 30,
- [1][0][0][6] = 30,
- [1][0][0][8] = 36,
- [1][0][0][10] = 36,
- [1][0][0][12] = 36,
- [1][0][0][14] = 36,
- [1][0][0][15] = 36,
- [1][0][0][17] = 36,
- [1][0][0][19] = 36,
- [1][0][0][21] = 36,
- [1][0][0][23] = 36,
- [1][0][0][25] = 36,
- [1][0][0][27] = 36,
- [1][0][0][29] = 36,
- [1][0][0][31] = 36,
- [1][0][0][33] = 36,
- [1][0][0][35] = 36,
- [1][0][0][37] = 36,
- [1][0][0][38] = 28,
- [1][0][0][40] = 28,
- [1][0][0][42] = 28,
- [1][0][0][44] = 28,
- [1][0][0][46] = 28,
- [1][1][0][0] = 18,
- [1][1][0][2] = 18,
- [1][1][0][4] = 18,
- [1][1][0][6] = 18,
- [1][1][0][8] = 22,
- [1][1][0][10] = 22,
- [1][1][0][12] = 22,
- [1][1][0][14] = 22,
- [1][1][0][15] = 22,
- [1][1][0][17] = 22,
- [1][1][0][19] = 22,
- [1][1][0][21] = 22,
- [1][1][0][23] = 22,
- [1][1][0][25] = 22,
- [1][1][0][27] = 22,
- [1][1][0][29] = 22,
- [1][1][0][31] = 22,
- [1][1][0][33] = 22,
- [1][1][0][35] = 22,
- [1][1][0][37] = 22,
- [1][1][0][38] = 16,
- [1][1][0][40] = 16,
- [1][1][0][42] = 16,
- [1][1][0][44] = 16,
- [1][1][0][46] = 16,
- [2][0][0][0] = 30,
- [2][0][0][2] = 30,
- [2][0][0][4] = 30,
- [2][0][0][6] = 30,
- [2][0][0][8] = 46,
- [2][0][0][10] = 46,
- [2][0][0][12] = 46,
- [2][0][0][14] = 46,
- [2][0][0][15] = 46,
- [2][0][0][17] = 46,
- [2][0][0][19] = 46,
- [2][0][0][21] = 46,
- [2][0][0][23] = 46,
- [2][0][0][25] = 46,
- [2][0][0][27] = 46,
- [2][0][0][29] = 46,
- [2][0][0][31] = 46,
- [2][0][0][33] = 46,
- [2][0][0][35] = 46,
- [2][0][0][37] = 46,
- [2][0][0][38] = 28,
- [2][0][0][40] = 28,
- [2][0][0][42] = 28,
- [2][0][0][44] = 28,
- [2][0][0][46] = 28,
- [2][1][0][0] = 18,
- [2][1][0][2] = 18,
- [2][1][0][4] = 18,
- [2][1][0][6] = 18,
- [2][1][0][8] = 32,
- [2][1][0][10] = 32,
- [2][1][0][12] = 32,
- [2][1][0][14] = 32,
- [2][1][0][15] = 32,
- [2][1][0][17] = 32,
- [2][1][0][19] = 32,
- [2][1][0][21] = 32,
- [2][1][0][23] = 32,
- [2][1][0][25] = 32,
- [2][1][0][27] = 32,
- [2][1][0][29] = 32,
- [2][1][0][31] = 32,
- [2][1][0][33] = 32,
- [2][1][0][35] = 32,
- [2][1][0][37] = 32,
- [2][1][0][38] = 16,
- [2][1][0][40] = 16,
- [2][1][0][42] = 16,
- [2][1][0][44] = 16,
- [2][1][0][46] = 16,
- [0][0][2][0] = 48,
- [0][0][1][0] = 24,
- [0][0][3][0] = 26,
- [0][0][5][0] = 22,
- [0][0][6][0] = 24,
- [0][0][9][0] = 24,
- [0][0][8][0] = 30,
- [0][0][11][0] = 24,
- [0][0][2][2] = 48,
- [0][0][1][2] = 24,
- [0][0][3][2] = 26,
- [0][0][5][2] = 22,
- [0][0][6][2] = 24,
- [0][0][9][2] = 24,
- [0][0][8][2] = 30,
- [0][0][11][2] = 24,
- [0][0][2][4] = 48,
- [0][0][1][4] = 24,
- [0][0][3][4] = 26,
- [0][0][5][4] = 22,
- [0][0][6][4] = 24,
- [0][0][9][4] = 24,
- [0][0][8][4] = 30,
- [0][0][11][4] = 24,
- [0][0][2][6] = 48,
- [0][0][1][6] = 24,
- [0][0][3][6] = 26,
- [0][0][5][6] = 22,
- [0][0][6][6] = 24,
- [0][0][9][6] = 24,
- [0][0][8][6] = 30,
- [0][0][11][6] = 24,
- [0][0][2][8] = 48,
- [0][0][1][8] = 24,
- [0][0][3][8] = 26,
- [0][0][5][8] = 48,
- [0][0][6][8] = 24,
- [0][0][9][8] = 24,
- [0][0][8][8] = 54,
- [0][0][11][8] = 24,
- [0][0][2][10] = 48,
- [0][0][1][10] = 24,
- [0][0][3][10] = 26,
- [0][0][5][10] = 48,
- [0][0][6][10] = 24,
- [0][0][9][10] = 24,
- [0][0][8][10] = 54,
- [0][0][11][10] = 24,
- [0][0][2][12] = 48,
- [0][0][1][12] = 24,
- [0][0][3][12] = 26,
- [0][0][5][12] = 48,
- [0][0][6][12] = 24,
- [0][0][9][12] = 24,
- [0][0][8][12] = 54,
- [0][0][11][12] = 24,
- [0][0][2][14] = 48,
- [0][0][1][14] = 24,
- [0][0][3][14] = 26,
- [0][0][5][14] = 48,
- [0][0][6][14] = 24,
- [0][0][9][14] = 24,
- [0][0][8][14] = 54,
- [0][0][11][14] = 24,
- [0][0][2][15] = 48,
- [0][0][1][15] = 24,
- [0][0][3][15] = 44,
- [0][0][5][15] = 48,
- [0][0][6][15] = 24,
- [0][0][9][15] = 24,
- [0][0][8][15] = 54,
- [0][0][11][15] = 24,
- [0][0][2][17] = 48,
- [0][0][1][17] = 24,
- [0][0][3][17] = 44,
- [0][0][5][17] = 48,
- [0][0][6][17] = 24,
- [0][0][9][17] = 24,
- [0][0][8][17] = 54,
- [0][0][11][17] = 24,
- [0][0][2][19] = 48,
- [0][0][1][19] = 24,
- [0][0][3][19] = 44,
- [0][0][5][19] = 48,
- [0][0][6][19] = 24,
- [0][0][9][19] = 24,
- [0][0][8][19] = 54,
- [0][0][11][19] = 24,
- [0][0][2][21] = 48,
- [0][0][1][21] = 24,
- [0][0][3][21] = 44,
- [0][0][5][21] = 48,
- [0][0][6][21] = 24,
- [0][0][9][21] = 24,
- [0][0][8][21] = 54,
- [0][0][11][21] = 24,
- [0][0][2][23] = 48,
- [0][0][1][23] = 24,
- [0][0][3][23] = 44,
- [0][0][5][23] = 48,
- [0][0][6][23] = 24,
- [0][0][9][23] = 24,
- [0][0][8][23] = 54,
- [0][0][11][23] = 24,
- [0][0][2][25] = 48,
- [0][0][1][25] = 24,
- [0][0][3][25] = 44,
- [0][0][5][25] = 127,
- [0][0][6][25] = 24,
- [0][0][9][25] = 127,
- [0][0][8][25] = 54,
- [0][0][11][25] = 24,
- [0][0][2][27] = 48,
- [0][0][1][27] = 24,
- [0][0][3][27] = 44,
- [0][0][5][27] = 127,
- [0][0][6][27] = 24,
- [0][0][9][27] = 127,
- [0][0][8][27] = 54,
- [0][0][11][27] = 24,
- [0][0][2][29] = 48,
- [0][0][1][29] = 24,
- [0][0][3][29] = 44,
- [0][0][5][29] = 127,
- [0][0][6][29] = 24,
- [0][0][9][29] = 127,
- [0][0][8][29] = 54,
- [0][0][11][29] = 24,
- [0][0][2][31] = 48,
- [0][0][1][31] = 24,
- [0][0][3][31] = 44,
- [0][0][5][31] = 48,
- [0][0][6][31] = 24,
- [0][0][9][31] = 24,
- [0][0][8][31] = 54,
- [0][0][11][31] = 24,
- [0][0][2][33] = 48,
- [0][0][1][33] = 24,
- [0][0][3][33] = 44,
- [0][0][5][33] = 48,
- [0][0][6][33] = 24,
- [0][0][9][33] = 24,
- [0][0][8][33] = 54,
- [0][0][11][33] = 24,
- [0][0][2][35] = 48,
- [0][0][1][35] = 24,
- [0][0][3][35] = 44,
- [0][0][5][35] = 48,
- [0][0][6][35] = 24,
- [0][0][9][35] = 24,
- [0][0][8][35] = 54,
- [0][0][11][35] = 24,
- [0][0][2][37] = 48,
- [0][0][1][37] = 127,
- [0][0][3][37] = 44,
- [0][0][5][37] = 48,
- [0][0][6][37] = 24,
- [0][0][9][37] = 48,
- [0][0][8][37] = 54,
- [0][0][11][37] = 127,
- [0][0][2][38] = 76,
- [0][0][1][38] = 28,
- [0][0][3][38] = 127,
- [0][0][5][38] = 76,
- [0][0][6][38] = 28,
- [0][0][9][38] = 76,
- [0][0][8][38] = 54,
- [0][0][11][38] = 28,
- [0][0][2][40] = 76,
- [0][0][1][40] = 28,
- [0][0][3][40] = 127,
- [0][0][5][40] = 76,
- [0][0][6][40] = 28,
- [0][0][9][40] = 76,
- [0][0][8][40] = 54,
- [0][0][11][40] = 28,
- [0][0][2][42] = 76,
- [0][0][1][42] = 28,
- [0][0][3][42] = 127,
- [0][0][5][42] = 76,
- [0][0][6][42] = 28,
- [0][0][9][42] = 76,
- [0][0][8][42] = 54,
- [0][0][11][42] = 28,
- [0][0][2][44] = 76,
- [0][0][1][44] = 28,
- [0][0][3][44] = 127,
- [0][0][5][44] = 76,
- [0][0][6][44] = 28,
- [0][0][9][44] = 76,
- [0][0][8][44] = 54,
- [0][0][11][44] = 28,
- [0][0][2][46] = 76,
- [0][0][1][46] = 28,
- [0][0][3][46] = 127,
- [0][0][5][46] = 76,
- [0][0][6][46] = 28,
- [0][0][9][46] = 76,
- [0][0][8][46] = 54,
- [0][0][11][46] = 28,
- [0][1][2][0] = 36,
- [0][1][1][0] = 12,
- [0][1][3][0] = 14,
- [0][1][5][0] = 8,
- [0][1][6][0] = 12,
- [0][1][9][0] = 12,
- [0][1][8][0] = 18,
- [0][1][11][0] = 12,
- [0][1][2][2] = 36,
- [0][1][1][2] = 12,
- [0][1][3][2] = 14,
- [0][1][5][2] = 8,
- [0][1][6][2] = 12,
- [0][1][9][2] = 12,
- [0][1][8][2] = 18,
- [0][1][11][2] = 12,
- [0][1][2][4] = 36,
- [0][1][1][4] = 12,
- [0][1][3][4] = 14,
- [0][1][5][4] = 8,
- [0][1][6][4] = 12,
- [0][1][9][4] = 12,
- [0][1][8][4] = 18,
- [0][1][11][4] = 12,
- [0][1][2][6] = 36,
- [0][1][1][6] = 12,
- [0][1][3][6] = 14,
- [0][1][5][6] = 8,
- [0][1][6][6] = 12,
- [0][1][9][6] = 12,
- [0][1][8][6] = 18,
- [0][1][11][6] = 12,
- [0][1][2][8] = 36,
- [0][1][1][8] = 12,
- [0][1][3][8] = 14,
- [0][1][5][8] = 36,
- [0][1][6][8] = 12,
- [0][1][9][8] = 12,
- [0][1][8][8] = 42,
- [0][1][11][8] = 12,
- [0][1][2][10] = 36,
- [0][1][1][10] = 12,
- [0][1][3][10] = 14,
- [0][1][5][10] = 36,
- [0][1][6][10] = 12,
- [0][1][9][10] = 12,
- [0][1][8][10] = 42,
- [0][1][11][10] = 12,
- [0][1][2][12] = 36,
- [0][1][1][12] = 12,
- [0][1][3][12] = 14,
- [0][1][5][12] = 36,
- [0][1][6][12] = 12,
- [0][1][9][12] = 12,
- [0][1][8][12] = 42,
- [0][1][11][12] = 12,
- [0][1][2][14] = 36,
- [0][1][1][14] = 12,
- [0][1][3][14] = 14,
- [0][1][5][14] = 36,
- [0][1][6][14] = 12,
- [0][1][9][14] = 12,
- [0][1][8][14] = 42,
- [0][1][11][14] = 12,
- [0][1][2][15] = 36,
- [0][1][1][15] = 12,
- [0][1][3][15] = 32,
- [0][1][5][15] = 36,
- [0][1][6][15] = 12,
- [0][1][9][15] = 12,
- [0][1][8][15] = 42,
- [0][1][11][15] = 12,
- [0][1][2][17] = 36,
- [0][1][1][17] = 12,
- [0][1][3][17] = 32,
- [0][1][5][17] = 36,
- [0][1][6][17] = 12,
- [0][1][9][17] = 12,
- [0][1][8][17] = 42,
- [0][1][11][17] = 12,
- [0][1][2][19] = 36,
- [0][1][1][19] = 12,
- [0][1][3][19] = 32,
- [0][1][5][19] = 36,
- [0][1][6][19] = 12,
- [0][1][9][19] = 12,
- [0][1][8][19] = 42,
- [0][1][11][19] = 12,
- [0][1][2][21] = 36,
- [0][1][1][21] = 12,
- [0][1][3][21] = 32,
- [0][1][5][21] = 36,
- [0][1][6][21] = 12,
- [0][1][9][21] = 12,
- [0][1][8][21] = 42,
- [0][1][11][21] = 12,
- [0][1][2][23] = 36,
- [0][1][1][23] = 12,
- [0][1][3][23] = 32,
- [0][1][5][23] = 36,
- [0][1][6][23] = 12,
- [0][1][9][23] = 12,
- [0][1][8][23] = 42,
- [0][1][11][23] = 12,
- [0][1][2][25] = 36,
- [0][1][1][25] = 12,
- [0][1][3][25] = 32,
- [0][1][5][25] = 127,
- [0][1][6][25] = 12,
- [0][1][9][25] = 127,
- [0][1][8][25] = 42,
- [0][1][11][25] = 12,
- [0][1][2][27] = 36,
- [0][1][1][27] = 12,
- [0][1][3][27] = 32,
- [0][1][5][27] = 127,
- [0][1][6][27] = 12,
- [0][1][9][27] = 127,
- [0][1][8][27] = 42,
- [0][1][11][27] = 12,
- [0][1][2][29] = 36,
- [0][1][1][29] = 12,
- [0][1][3][29] = 32,
- [0][1][5][29] = 127,
- [0][1][6][29] = 12,
- [0][1][9][29] = 127,
- [0][1][8][29] = 42,
- [0][1][11][29] = 12,
- [0][1][2][31] = 36,
- [0][1][1][31] = 12,
- [0][1][3][31] = 32,
- [0][1][5][31] = 36,
- [0][1][6][31] = 12,
- [0][1][9][31] = 12,
- [0][1][8][31] = 42,
- [0][1][11][31] = 12,
- [0][1][2][33] = 36,
- [0][1][1][33] = 12,
- [0][1][3][33] = 32,
- [0][1][5][33] = 36,
- [0][1][6][33] = 12,
- [0][1][9][33] = 12,
- [0][1][8][33] = 42,
- [0][1][11][33] = 12,
- [0][1][2][35] = 36,
- [0][1][1][35] = 12,
- [0][1][3][35] = 32,
- [0][1][5][35] = 36,
- [0][1][6][35] = 12,
- [0][1][9][35] = 12,
- [0][1][8][35] = 42,
- [0][1][11][35] = 12,
- [0][1][2][37] = 36,
- [0][1][1][37] = 127,
- [0][1][3][37] = 32,
- [0][1][5][37] = 36,
- [0][1][6][37] = 12,
- [0][1][9][37] = 36,
- [0][1][8][37] = 42,
- [0][1][11][37] = 127,
- [0][1][2][38] = 72,
- [0][1][1][38] = 16,
- [0][1][3][38] = 127,
- [0][1][5][38] = 72,
- [0][1][6][38] = 16,
- [0][1][9][38] = 76,
- [0][1][8][38] = 42,
- [0][1][11][38] = 16,
- [0][1][2][40] = 76,
- [0][1][1][40] = 16,
- [0][1][3][40] = 127,
- [0][1][5][40] = 76,
- [0][1][6][40] = 16,
- [0][1][9][40] = 76,
- [0][1][8][40] = 42,
- [0][1][11][40] = 16,
- [0][1][2][42] = 76,
- [0][1][1][42] = 16,
- [0][1][3][42] = 127,
- [0][1][5][42] = 76,
- [0][1][6][42] = 16,
- [0][1][9][42] = 76,
- [0][1][8][42] = 42,
- [0][1][11][42] = 16,
- [0][1][2][44] = 76,
- [0][1][1][44] = 16,
- [0][1][3][44] = 127,
- [0][1][5][44] = 76,
- [0][1][6][44] = 16,
- [0][1][9][44] = 76,
- [0][1][8][44] = 42,
- [0][1][11][44] = 16,
- [0][1][2][46] = 76,
- [0][1][1][46] = 16,
- [0][1][3][46] = 127,
- [0][1][5][46] = 76,
- [0][1][6][46] = 16,
- [0][1][9][46] = 76,
- [0][1][8][46] = 42,
- [0][1][11][46] = 16,
- [1][0][2][0] = 62,
- [1][0][1][0] = 36,
- [1][0][3][0] = 36,
- [1][0][5][0] = 34,
- [1][0][6][0] = 36,
- [1][0][9][0] = 36,
- [1][0][8][0] = 30,
- [1][0][11][0] = 36,
- [1][0][2][2] = 62,
- [1][0][1][2] = 36,
- [1][0][3][2] = 36,
- [1][0][5][2] = 34,
- [1][0][6][2] = 36,
- [1][0][9][2] = 36,
- [1][0][8][2] = 30,
- [1][0][11][2] = 36,
- [1][0][2][4] = 62,
- [1][0][1][4] = 36,
- [1][0][3][4] = 36,
- [1][0][5][4] = 34,
- [1][0][6][4] = 36,
- [1][0][9][4] = 36,
- [1][0][8][4] = 30,
- [1][0][11][4] = 36,
- [1][0][2][6] = 62,
- [1][0][1][6] = 36,
- [1][0][3][6] = 36,
- [1][0][5][6] = 34,
- [1][0][6][6] = 36,
- [1][0][9][6] = 36,
- [1][0][8][6] = 30,
- [1][0][11][6] = 36,
- [1][0][2][8] = 62,
- [1][0][1][8] = 36,
- [1][0][3][8] = 36,
- [1][0][5][8] = 62,
- [1][0][6][8] = 36,
- [1][0][9][8] = 36,
- [1][0][8][8] = 54,
- [1][0][11][8] = 36,
- [1][0][2][10] = 62,
- [1][0][1][10] = 36,
- [1][0][3][10] = 36,
- [1][0][5][10] = 62,
- [1][0][6][10] = 36,
- [1][0][9][10] = 36,
- [1][0][8][10] = 54,
- [1][0][11][10] = 36,
- [1][0][2][12] = 62,
- [1][0][1][12] = 36,
- [1][0][3][12] = 36,
- [1][0][5][12] = 62,
- [1][0][6][12] = 36,
- [1][0][9][12] = 36,
- [1][0][8][12] = 54,
- [1][0][11][12] = 36,
- [1][0][2][14] = 62,
- [1][0][1][14] = 36,
- [1][0][3][14] = 36,
- [1][0][5][14] = 62,
- [1][0][6][14] = 36,
- [1][0][9][14] = 36,
- [1][0][8][14] = 54,
- [1][0][11][14] = 36,
- [1][0][2][15] = 62,
- [1][0][1][15] = 36,
- [1][0][3][15] = 58,
- [1][0][5][15] = 62,
- [1][0][6][15] = 36,
- [1][0][9][15] = 36,
- [1][0][8][15] = 54,
- [1][0][11][15] = 36,
- [1][0][2][17] = 62,
- [1][0][1][17] = 36,
- [1][0][3][17] = 58,
- [1][0][5][17] = 62,
- [1][0][6][17] = 36,
- [1][0][9][17] = 36,
- [1][0][8][17] = 54,
- [1][0][11][17] = 36,
- [1][0][2][19] = 62,
- [1][0][1][19] = 36,
- [1][0][3][19] = 58,
- [1][0][5][19] = 62,
- [1][0][6][19] = 36,
- [1][0][9][19] = 36,
- [1][0][8][19] = 54,
- [1][0][11][19] = 36,
- [1][0][2][21] = 62,
- [1][0][1][21] = 36,
- [1][0][3][21] = 58,
- [1][0][5][21] = 62,
- [1][0][6][21] = 36,
- [1][0][9][21] = 36,
- [1][0][8][21] = 54,
- [1][0][11][21] = 36,
- [1][0][2][23] = 62,
- [1][0][1][23] = 36,
- [1][0][3][23] = 58,
- [1][0][5][23] = 62,
- [1][0][6][23] = 36,
- [1][0][9][23] = 36,
- [1][0][8][23] = 54,
- [1][0][11][23] = 36,
- [1][0][2][25] = 62,
- [1][0][1][25] = 36,
- [1][0][3][25] = 58,
- [1][0][5][25] = 127,
- [1][0][6][25] = 36,
- [1][0][9][25] = 127,
- [1][0][8][25] = 54,
- [1][0][11][25] = 36,
- [1][0][2][27] = 62,
- [1][0][1][27] = 36,
- [1][0][3][27] = 58,
- [1][0][5][27] = 127,
- [1][0][6][27] = 36,
- [1][0][9][27] = 127,
- [1][0][8][27] = 54,
- [1][0][11][27] = 36,
- [1][0][2][29] = 62,
- [1][0][1][29] = 36,
- [1][0][3][29] = 58,
- [1][0][5][29] = 127,
- [1][0][6][29] = 36,
- [1][0][9][29] = 127,
- [1][0][8][29] = 54,
- [1][0][11][29] = 36,
- [1][0][2][31] = 62,
- [1][0][1][31] = 36,
- [1][0][3][31] = 58,
- [1][0][5][31] = 62,
- [1][0][6][31] = 36,
- [1][0][9][31] = 36,
- [1][0][8][31] = 54,
- [1][0][11][31] = 36,
- [1][0][2][33] = 62,
- [1][0][1][33] = 36,
- [1][0][3][33] = 58,
- [1][0][5][33] = 62,
- [1][0][6][33] = 36,
- [1][0][9][33] = 36,
- [1][0][8][33] = 54,
- [1][0][11][33] = 36,
- [1][0][2][35] = 62,
- [1][0][1][35] = 36,
- [1][0][3][35] = 58,
- [1][0][5][35] = 62,
- [1][0][6][35] = 36,
- [1][0][9][35] = 36,
- [1][0][8][35] = 54,
- [1][0][11][35] = 36,
- [1][0][2][37] = 56,
- [1][0][1][37] = 62,
- [1][0][3][37] = 127,
- [1][0][5][37] = 58,
- [1][0][6][37] = 62,
- [1][0][9][37] = 36,
- [1][0][8][37] = 62,
- [1][0][11][37] = 54,
- [1][0][2][38] = 76,
- [1][0][1][38] = 28,
- [1][0][3][38] = 127,
- [1][0][5][38] = 76,
- [1][0][6][38] = 28,
- [1][0][9][38] = 76,
- [1][0][8][38] = 54,
- [1][0][11][38] = 28,
- [1][0][2][40] = 76,
- [1][0][1][40] = 28,
- [1][0][3][40] = 127,
- [1][0][5][40] = 76,
- [1][0][6][40] = 28,
- [1][0][9][40] = 76,
- [1][0][8][40] = 54,
- [1][0][11][40] = 28,
- [1][0][2][42] = 76,
- [1][0][1][42] = 28,
- [1][0][3][42] = 127,
- [1][0][5][42] = 76,
- [1][0][6][42] = 28,
- [1][0][9][42] = 76,
- [1][0][8][42] = 54,
- [1][0][11][42] = 28,
- [1][0][2][44] = 76,
- [1][0][1][44] = 28,
- [1][0][3][44] = 127,
- [1][0][5][44] = 76,
- [1][0][6][44] = 28,
- [1][0][9][44] = 76,
- [1][0][8][44] = 54,
- [1][0][11][44] = 28,
- [1][0][2][46] = 76,
- [1][0][1][46] = 28,
- [1][0][3][46] = 127,
- [1][0][5][46] = 76,
- [1][0][6][46] = 28,
- [1][0][9][46] = 76,
- [1][0][8][46] = 54,
- [1][0][11][46] = 28,
- [1][1][2][0] = 46,
- [1][1][1][0] = 22,
- [1][1][3][0] = 24,
- [1][1][5][0] = 18,
- [1][1][6][0] = 22,
- [1][1][9][0] = 22,
- [1][1][8][0] = 18,
- [1][1][11][0] = 22,
- [1][1][2][2] = 46,
- [1][1][1][2] = 22,
- [1][1][3][2] = 24,
- [1][1][5][2] = 18,
- [1][1][6][2] = 22,
- [1][1][9][2] = 22,
- [1][1][8][2] = 18,
- [1][1][11][2] = 22,
- [1][1][2][4] = 46,
- [1][1][1][4] = 22,
- [1][1][3][4] = 24,
- [1][1][5][4] = 18,
- [1][1][6][4] = 22,
- [1][1][9][4] = 22,
- [1][1][8][4] = 18,
- [1][1][11][4] = 22,
- [1][1][2][6] = 46,
- [1][1][1][6] = 22,
- [1][1][3][6] = 24,
- [1][1][5][6] = 18,
- [1][1][6][6] = 22,
- [1][1][9][6] = 22,
- [1][1][8][6] = 18,
- [1][1][11][6] = 22,
- [1][1][2][8] = 46,
- [1][1][1][8] = 22,
- [1][1][3][8] = 24,
- [1][1][5][8] = 46,
- [1][1][6][8] = 22,
- [1][1][9][8] = 22,
- [1][1][8][8] = 42,
- [1][1][11][8] = 22,
- [1][1][2][10] = 46,
- [1][1][1][10] = 22,
- [1][1][3][10] = 24,
- [1][1][5][10] = 46,
- [1][1][6][10] = 22,
- [1][1][9][10] = 22,
- [1][1][8][10] = 42,
- [1][1][11][10] = 22,
- [1][1][2][12] = 46,
- [1][1][1][12] = 22,
- [1][1][3][12] = 24,
- [1][1][5][12] = 46,
- [1][1][6][12] = 22,
- [1][1][9][12] = 22,
- [1][1][8][12] = 42,
- [1][1][11][12] = 22,
- [1][1][2][14] = 46,
- [1][1][1][14] = 22,
- [1][1][3][14] = 24,
- [1][1][5][14] = 46,
- [1][1][6][14] = 22,
- [1][1][9][14] = 22,
- [1][1][8][14] = 42,
- [1][1][11][14] = 22,
- [1][1][2][15] = 46,
- [1][1][1][15] = 22,
- [1][1][3][15] = 46,
- [1][1][5][15] = 46,
- [1][1][6][15] = 22,
- [1][1][9][15] = 22,
- [1][1][8][15] = 42,
- [1][1][11][15] = 22,
- [1][1][2][17] = 46,
- [1][1][1][17] = 22,
- [1][1][3][17] = 46,
- [1][1][5][17] = 46,
- [1][1][6][17] = 22,
- [1][1][9][17] = 22,
- [1][1][8][17] = 42,
- [1][1][11][17] = 22,
- [1][1][2][19] = 46,
- [1][1][1][19] = 22,
- [1][1][3][19] = 46,
- [1][1][5][19] = 46,
- [1][1][6][19] = 22,
- [1][1][9][19] = 22,
- [1][1][8][19] = 42,
- [1][1][11][19] = 22,
- [1][1][2][21] = 46,
- [1][1][1][21] = 22,
- [1][1][3][21] = 46,
- [1][1][5][21] = 46,
- [1][1][6][21] = 22,
- [1][1][9][21] = 22,
- [1][1][8][21] = 42,
- [1][1][11][21] = 22,
- [1][1][2][23] = 46,
- [1][1][1][23] = 22,
- [1][1][3][23] = 46,
- [1][1][5][23] = 46,
- [1][1][6][23] = 22,
- [1][1][9][23] = 22,
- [1][1][8][23] = 42,
- [1][1][11][23] = 22,
- [1][1][2][25] = 46,
- [1][1][1][25] = 22,
- [1][1][3][25] = 46,
- [1][1][5][25] = 127,
- [1][1][6][25] = 22,
- [1][1][9][25] = 127,
- [1][1][8][25] = 42,
- [1][1][11][25] = 22,
- [1][1][2][27] = 46,
- [1][1][1][27] = 22,
- [1][1][3][27] = 46,
- [1][1][5][27] = 127,
- [1][1][6][27] = 22,
- [1][1][9][27] = 127,
- [1][1][8][27] = 42,
- [1][1][11][27] = 22,
- [1][1][2][29] = 46,
- [1][1][1][29] = 22,
- [1][1][3][29] = 46,
- [1][1][5][29] = 127,
- [1][1][6][29] = 22,
- [1][1][9][29] = 127,
- [1][1][8][29] = 42,
- [1][1][11][29] = 22,
- [1][1][2][31] = 46,
- [1][1][1][31] = 22,
- [1][1][3][31] = 46,
- [1][1][5][31] = 46,
- [1][1][6][31] = 22,
- [1][1][9][31] = 22,
- [1][1][8][31] = 42,
- [1][1][11][31] = 22,
- [1][1][2][33] = 46,
- [1][1][1][33] = 22,
- [1][1][3][33] = 46,
- [1][1][5][33] = 46,
- [1][1][6][33] = 22,
- [1][1][9][33] = 22,
- [1][1][8][33] = 42,
- [1][1][11][33] = 22,
- [1][1][2][35] = 46,
- [1][1][1][35] = 22,
- [1][1][3][35] = 46,
- [1][1][5][35] = 46,
- [1][1][6][35] = 22,
- [1][1][9][35] = 22,
- [1][1][8][35] = 42,
- [1][1][11][35] = 22,
- [1][1][2][37] = 46,
- [1][1][1][37] = 127,
- [1][1][3][37] = 46,
- [1][1][5][37] = 46,
- [1][1][6][37] = 22,
- [1][1][9][37] = 50,
- [1][1][8][37] = 42,
- [1][1][11][37] = 127,
- [1][1][2][38] = 74,
- [1][1][1][38] = 16,
- [1][1][3][38] = 127,
- [1][1][5][38] = 74,
- [1][1][6][38] = 16,
- [1][1][9][38] = 76,
- [1][1][8][38] = 42,
- [1][1][11][38] = 16,
- [1][1][2][40] = 76,
- [1][1][1][40] = 16,
- [1][1][3][40] = 127,
- [1][1][5][40] = 76,
- [1][1][6][40] = 16,
- [1][1][9][40] = 76,
- [1][1][8][40] = 42,
- [1][1][11][40] = 16,
- [1][1][2][42] = 76,
- [1][1][1][42] = 16,
- [1][1][3][42] = 127,
- [1][1][5][42] = 76,
- [1][1][6][42] = 16,
- [1][1][9][42] = 76,
- [1][1][8][42] = 42,
- [1][1][11][42] = 16,
- [1][1][2][44] = 76,
- [1][1][1][44] = 16,
- [1][1][3][44] = 127,
- [1][1][5][44] = 76,
- [1][1][6][44] = 16,
- [1][1][9][44] = 76,
- [1][1][8][44] = 42,
- [1][1][11][44] = 16,
- [1][1][2][46] = 76,
- [1][1][1][46] = 16,
- [1][1][3][46] = 127,
- [1][1][5][46] = 76,
- [1][1][6][46] = 16,
- [1][1][9][46] = 76,
- [1][1][8][46] = 42,
- [1][1][11][46] = 16,
- [2][0][2][0] = 74,
- [2][0][1][0] = 46,
- [2][0][3][0] = 50,
- [2][0][5][0] = 46,
- [2][0][6][0] = 46,
- [2][0][9][0] = 46,
- [2][0][8][0] = 30,
- [2][0][11][0] = 46,
- [2][0][2][2] = 74,
- [2][0][1][2] = 46,
- [2][0][3][2] = 50,
- [2][0][5][2] = 46,
- [2][0][6][2] = 46,
- [2][0][9][2] = 46,
- [2][0][8][2] = 30,
- [2][0][11][2] = 46,
- [2][0][2][4] = 74,
- [2][0][1][4] = 46,
- [2][0][3][4] = 50,
- [2][0][5][4] = 46,
- [2][0][6][4] = 46,
- [2][0][9][4] = 46,
- [2][0][8][4] = 30,
- [2][0][11][4] = 46,
- [2][0][2][6] = 74,
- [2][0][1][6] = 46,
- [2][0][3][6] = 50,
- [2][0][5][6] = 46,
- [2][0][6][6] = 46,
- [2][0][9][6] = 46,
- [2][0][8][6] = 30,
- [2][0][11][6] = 46,
- [2][0][2][8] = 74,
- [2][0][1][8] = 46,
- [2][0][3][8] = 50,
- [2][0][5][8] = 66,
- [2][0][6][8] = 46,
- [2][0][9][8] = 46,
- [2][0][8][8] = 54,
- [2][0][11][8] = 46,
- [2][0][2][10] = 74,
- [2][0][1][10] = 46,
- [2][0][3][10] = 50,
- [2][0][5][10] = 66,
- [2][0][6][10] = 46,
- [2][0][9][10] = 46,
- [2][0][8][10] = 54,
- [2][0][11][10] = 46,
- [2][0][2][12] = 74,
- [2][0][1][12] = 46,
- [2][0][3][12] = 50,
- [2][0][5][12] = 66,
- [2][0][6][12] = 46,
- [2][0][9][12] = 46,
- [2][0][8][12] = 54,
- [2][0][11][12] = 46,
- [2][0][2][14] = 74,
- [2][0][1][14] = 46,
- [2][0][3][14] = 50,
- [2][0][5][14] = 66,
- [2][0][6][14] = 46,
- [2][0][9][14] = 46,
- [2][0][8][14] = 54,
- [2][0][11][14] = 46,
- [2][0][2][15] = 74,
- [2][0][1][15] = 46,
- [2][0][3][15] = 70,
- [2][0][5][15] = 74,
- [2][0][6][15] = 46,
- [2][0][9][15] = 46,
- [2][0][8][15] = 54,
- [2][0][11][15] = 46,
- [2][0][2][17] = 74,
- [2][0][1][17] = 46,
- [2][0][3][17] = 70,
- [2][0][5][17] = 74,
- [2][0][6][17] = 46,
- [2][0][9][17] = 46,
- [2][0][8][17] = 54,
- [2][0][11][17] = 46,
- [2][0][2][19] = 74,
- [2][0][1][19] = 46,
- [2][0][3][19] = 70,
- [2][0][5][19] = 74,
- [2][0][6][19] = 46,
- [2][0][9][19] = 46,
- [2][0][8][19] = 54,
- [2][0][11][19] = 46,
- [2][0][2][21] = 74,
- [2][0][1][21] = 46,
- [2][0][3][21] = 70,
- [2][0][5][21] = 74,
- [2][0][6][21] = 46,
- [2][0][9][21] = 46,
- [2][0][8][21] = 54,
- [2][0][11][21] = 46,
- [2][0][2][23] = 74,
- [2][0][1][23] = 46,
- [2][0][3][23] = 70,
- [2][0][5][23] = 74,
- [2][0][6][23] = 46,
- [2][0][9][23] = 46,
- [2][0][8][23] = 54,
- [2][0][11][23] = 46,
- [2][0][2][25] = 74,
- [2][0][1][25] = 46,
- [2][0][3][25] = 70,
- [2][0][5][25] = 127,
- [2][0][6][25] = 46,
- [2][0][9][25] = 127,
- [2][0][8][25] = 54,
- [2][0][11][25] = 46,
- [2][0][2][27] = 74,
- [2][0][1][27] = 46,
- [2][0][3][27] = 70,
- [2][0][5][27] = 127,
- [2][0][6][27] = 46,
- [2][0][9][27] = 127,
- [2][0][8][27] = 54,
- [2][0][11][27] = 46,
- [2][0][2][29] = 74,
- [2][0][1][29] = 46,
- [2][0][3][29] = 70,
- [2][0][5][29] = 127,
- [2][0][6][29] = 46,
- [2][0][9][29] = 127,
- [2][0][8][29] = 54,
- [2][0][11][29] = 46,
- [2][0][2][31] = 74,
- [2][0][1][31] = 46,
- [2][0][3][31] = 70,
- [2][0][5][31] = 74,
- [2][0][6][31] = 46,
- [2][0][9][31] = 46,
- [2][0][8][31] = 54,
- [2][0][11][31] = 46,
- [2][0][2][33] = 74,
- [2][0][1][33] = 46,
- [2][0][3][33] = 70,
- [2][0][5][33] = 74,
- [2][0][6][33] = 46,
- [2][0][9][33] = 46,
- [2][0][8][33] = 54,
- [2][0][11][33] = 46,
- [2][0][2][35] = 74,
- [2][0][1][35] = 46,
- [2][0][3][35] = 70,
- [2][0][5][35] = 74,
- [2][0][6][35] = 46,
- [2][0][9][35] = 46,
- [2][0][8][35] = 54,
- [2][0][11][35] = 46,
- [2][0][2][37] = 74,
- [2][0][1][37] = 127,
- [2][0][3][37] = 70,
- [2][0][5][37] = 74,
- [2][0][6][37] = 46,
- [2][0][9][37] = 74,
- [2][0][8][37] = 54,
- [2][0][11][37] = 127,
- [2][0][2][38] = 76,
- [2][0][1][38] = 28,
- [2][0][3][38] = 127,
- [2][0][5][38] = 76,
- [2][0][6][38] = 28,
- [2][0][9][38] = 76,
- [2][0][8][38] = 54,
- [2][0][11][38] = 28,
- [2][0][2][40] = 76,
- [2][0][1][40] = 28,
- [2][0][3][40] = 127,
- [2][0][5][40] = 76,
- [2][0][6][40] = 28,
- [2][0][9][40] = 76,
- [2][0][8][40] = 54,
- [2][0][11][40] = 28,
- [2][0][2][42] = 76,
- [2][0][1][42] = 28,
- [2][0][3][42] = 127,
- [2][0][5][42] = 76,
- [2][0][6][42] = 28,
- [2][0][9][42] = 76,
- [2][0][8][42] = 54,
- [2][0][11][42] = 28,
- [2][0][2][44] = 76,
- [2][0][1][44] = 28,
- [2][0][3][44] = 127,
- [2][0][5][44] = 76,
- [2][0][6][44] = 28,
- [2][0][9][44] = 76,
- [2][0][8][44] = 54,
- [2][0][11][44] = 28,
- [2][0][2][46] = 76,
- [2][0][1][46] = 28,
- [2][0][3][46] = 127,
- [2][0][5][46] = 76,
- [2][0][6][46] = 28,
- [2][0][9][46] = 76,
- [2][0][8][46] = 54,
- [2][0][11][46] = 28,
- [2][1][2][0] = 58,
- [2][1][1][0] = 32,
- [2][1][3][0] = 38,
- [2][1][5][0] = 30,
- [2][1][6][0] = 32,
- [2][1][9][0] = 32,
- [2][1][8][0] = 18,
- [2][1][11][0] = 32,
- [2][1][2][2] = 58,
- [2][1][1][2] = 32,
- [2][1][3][2] = 38,
- [2][1][5][2] = 30,
- [2][1][6][2] = 32,
- [2][1][9][2] = 32,
- [2][1][8][2] = 18,
- [2][1][11][2] = 32,
- [2][1][2][4] = 58,
- [2][1][1][4] = 32,
- [2][1][3][4] = 38,
- [2][1][5][4] = 30,
- [2][1][6][4] = 32,
- [2][1][9][4] = 32,
- [2][1][8][4] = 18,
- [2][1][11][4] = 32,
- [2][1][2][6] = 58,
- [2][1][1][6] = 32,
- [2][1][3][6] = 38,
- [2][1][5][6] = 30,
- [2][1][6][6] = 32,
- [2][1][9][6] = 32,
- [2][1][8][6] = 18,
- [2][1][11][6] = 32,
- [2][1][2][8] = 58,
- [2][1][1][8] = 32,
- [2][1][3][8] = 38,
- [2][1][5][8] = 52,
- [2][1][6][8] = 32,
- [2][1][9][8] = 32,
- [2][1][8][8] = 42,
- [2][1][11][8] = 32,
- [2][1][2][10] = 58,
- [2][1][1][10] = 32,
- [2][1][3][10] = 38,
- [2][1][5][10] = 52,
- [2][1][6][10] = 32,
- [2][1][9][10] = 32,
- [2][1][8][10] = 42,
- [2][1][11][10] = 32,
- [2][1][2][12] = 58,
- [2][1][1][12] = 32,
- [2][1][3][12] = 38,
- [2][1][5][12] = 52,
- [2][1][6][12] = 32,
- [2][1][9][12] = 32,
- [2][1][8][12] = 42,
- [2][1][11][12] = 32,
- [2][1][2][14] = 58,
- [2][1][1][14] = 32,
- [2][1][3][14] = 38,
- [2][1][5][14] = 52,
- [2][1][6][14] = 32,
- [2][1][9][14] = 32,
- [2][1][8][14] = 42,
- [2][1][11][14] = 32,
- [2][1][2][15] = 58,
- [2][1][1][15] = 32,
- [2][1][3][15] = 58,
- [2][1][5][15] = 58,
- [2][1][6][15] = 32,
- [2][1][9][15] = 32,
- [2][1][8][15] = 42,
- [2][1][11][15] = 32,
- [2][1][2][17] = 58,
- [2][1][1][17] = 32,
- [2][1][3][17] = 58,
- [2][1][5][17] = 58,
- [2][1][6][17] = 32,
- [2][1][9][17] = 32,
- [2][1][8][17] = 42,
- [2][1][11][17] = 32,
- [2][1][2][19] = 58,
- [2][1][1][19] = 32,
- [2][1][3][19] = 58,
- [2][1][5][19] = 58,
- [2][1][6][19] = 32,
- [2][1][9][19] = 32,
- [2][1][8][19] = 42,
- [2][1][11][19] = 32,
- [2][1][2][21] = 58,
- [2][1][1][21] = 32,
- [2][1][3][21] = 58,
- [2][1][5][21] = 58,
- [2][1][6][21] = 32,
- [2][1][9][21] = 32,
- [2][1][8][21] = 42,
- [2][1][11][21] = 32,
- [2][1][2][23] = 58,
- [2][1][1][23] = 32,
- [2][1][3][23] = 58,
- [2][1][5][23] = 58,
- [2][1][6][23] = 32,
- [2][1][9][23] = 32,
- [2][1][8][23] = 42,
- [2][1][11][23] = 32,
- [2][1][2][25] = 58,
- [2][1][1][25] = 32,
- [2][1][3][25] = 58,
- [2][1][5][25] = 127,
- [2][1][6][25] = 32,
- [2][1][9][25] = 127,
- [2][1][8][25] = 42,
- [2][1][11][25] = 32,
- [2][1][2][27] = 58,
- [2][1][1][27] = 32,
- [2][1][3][27] = 58,
- [2][1][5][27] = 127,
- [2][1][6][27] = 32,
- [2][1][9][27] = 127,
- [2][1][8][27] = 42,
- [2][1][11][27] = 32,
- [2][1][2][29] = 58,
- [2][1][1][29] = 32,
- [2][1][3][29] = 58,
- [2][1][5][29] = 127,
- [2][1][6][29] = 32,
- [2][1][9][29] = 127,
- [2][1][8][29] = 42,
- [2][1][11][29] = 32,
- [2][1][2][31] = 58,
- [2][1][1][31] = 32,
- [2][1][3][31] = 58,
- [2][1][5][31] = 58,
- [2][1][6][31] = 32,
- [2][1][9][31] = 32,
- [2][1][8][31] = 42,
- [2][1][11][31] = 32,
- [2][1][2][33] = 58,
- [2][1][1][33] = 32,
- [2][1][3][33] = 58,
- [2][1][5][33] = 58,
- [2][1][6][33] = 32,
- [2][1][9][33] = 32,
- [2][1][8][33] = 42,
- [2][1][11][33] = 32,
- [2][1][2][35] = 58,
- [2][1][1][35] = 32,
- [2][1][3][35] = 58,
- [2][1][5][35] = 58,
- [2][1][6][35] = 32,
- [2][1][9][35] = 32,
- [2][1][8][35] = 42,
- [2][1][11][35] = 32,
- [2][1][2][37] = 58,
- [2][1][1][37] = 127,
- [2][1][3][37] = 58,
- [2][1][5][37] = 58,
- [2][1][6][37] = 32,
- [2][1][9][37] = 62,
- [2][1][8][37] = 42,
- [2][1][11][37] = 127,
- [2][1][2][38] = 76,
- [2][1][1][38] = 16,
- [2][1][3][38] = 127,
- [2][1][5][38] = 76,
- [2][1][6][38] = 16,
- [2][1][9][38] = 76,
- [2][1][8][38] = 42,
- [2][1][11][38] = 16,
- [2][1][2][40] = 76,
- [2][1][1][40] = 16,
- [2][1][3][40] = 127,
- [2][1][5][40] = 76,
- [2][1][6][40] = 16,
- [2][1][9][40] = 76,
- [2][1][8][40] = 42,
- [2][1][11][40] = 16,
- [2][1][2][42] = 76,
- [2][1][1][42] = 16,
- [2][1][3][42] = 127,
- [2][1][5][42] = 76,
- [2][1][6][42] = 16,
- [2][1][9][42] = 76,
- [2][1][8][42] = 42,
- [2][1][11][42] = 16,
- [2][1][2][44] = 76,
- [2][1][1][44] = 16,
- [2][1][3][44] = 127,
- [2][1][5][44] = 76,
- [2][1][6][44] = 16,
- [2][1][9][44] = 76,
- [2][1][8][44] = 42,
- [2][1][11][44] = 16,
- [2][1][2][46] = 76,
- [2][1][1][46] = 16,
- [2][1][3][46] = 127,
- [2][1][5][46] = 76,
- [2][1][6][46] = 16,
- [2][1][9][46] = 76,
- [2][1][8][46] = 42,
- [2][1][11][46] = 16,
+ [0][0][RTW89_WW][0] = 22,
+ [0][0][RTW89_WW][2] = 22,
+ [0][0][RTW89_WW][4] = 22,
+ [0][0][RTW89_WW][6] = 20,
+ [0][0][RTW89_WW][8] = 24,
+ [0][0][RTW89_WW][10] = 24,
+ [0][0][RTW89_WW][12] = 24,
+ [0][0][RTW89_WW][14] = 24,
+ [0][0][RTW89_WW][15] = 24,
+ [0][0][RTW89_WW][17] = 24,
+ [0][0][RTW89_WW][19] = 24,
+ [0][0][RTW89_WW][21] = 24,
+ [0][0][RTW89_WW][23] = 24,
+ [0][0][RTW89_WW][25] = 24,
+ [0][0][RTW89_WW][27] = 24,
+ [0][0][RTW89_WW][29] = 24,
+ [0][0][RTW89_WW][31] = 24,
+ [0][0][RTW89_WW][33] = 24,
+ [0][0][RTW89_WW][35] = 24,
+ [0][0][RTW89_WW][37] = 44,
+ [0][0][RTW89_WW][38] = 28,
+ [0][0][RTW89_WW][40] = 28,
+ [0][0][RTW89_WW][42] = 28,
+ [0][0][RTW89_WW][44] = 28,
+ [0][0][RTW89_WW][46] = 28,
+ [0][1][RTW89_WW][0] = 8,
+ [0][1][RTW89_WW][2] = 8,
+ [0][1][RTW89_WW][4] = 8,
+ [0][1][RTW89_WW][6] = 4,
+ [0][1][RTW89_WW][8] = 12,
+ [0][1][RTW89_WW][10] = 12,
+ [0][1][RTW89_WW][12] = 12,
+ [0][1][RTW89_WW][14] = 12,
+ [0][1][RTW89_WW][15] = 12,
+ [0][1][RTW89_WW][17] = 12,
+ [0][1][RTW89_WW][19] = 12,
+ [0][1][RTW89_WW][21] = 12,
+ [0][1][RTW89_WW][23] = 12,
+ [0][1][RTW89_WW][25] = 12,
+ [0][1][RTW89_WW][27] = 12,
+ [0][1][RTW89_WW][29] = 12,
+ [0][1][RTW89_WW][31] = 12,
+ [0][1][RTW89_WW][33] = 12,
+ [0][1][RTW89_WW][35] = 12,
+ [0][1][RTW89_WW][37] = 32,
+ [0][1][RTW89_WW][38] = 16,
+ [0][1][RTW89_WW][40] = 16,
+ [0][1][RTW89_WW][42] = 16,
+ [0][1][RTW89_WW][44] = 16,
+ [0][1][RTW89_WW][46] = 16,
+ [1][0][RTW89_WW][0] = 30,
+ [1][0][RTW89_WW][2] = 30,
+ [1][0][RTW89_WW][4] = 30,
+ [1][0][RTW89_WW][6] = 30,
+ [1][0][RTW89_WW][8] = 36,
+ [1][0][RTW89_WW][10] = 36,
+ [1][0][RTW89_WW][12] = 36,
+ [1][0][RTW89_WW][14] = 36,
+ [1][0][RTW89_WW][15] = 36,
+ [1][0][RTW89_WW][17] = 36,
+ [1][0][RTW89_WW][19] = 36,
+ [1][0][RTW89_WW][21] = 36,
+ [1][0][RTW89_WW][23] = 36,
+ [1][0][RTW89_WW][25] = 36,
+ [1][0][RTW89_WW][27] = 36,
+ [1][0][RTW89_WW][29] = 36,
+ [1][0][RTW89_WW][31] = 36,
+ [1][0][RTW89_WW][33] = 36,
+ [1][0][RTW89_WW][35] = 36,
+ [1][0][RTW89_WW][37] = 54,
+ [1][0][RTW89_WW][38] = 28,
+ [1][0][RTW89_WW][40] = 28,
+ [1][0][RTW89_WW][42] = 28,
+ [1][0][RTW89_WW][44] = 28,
+ [1][0][RTW89_WW][46] = 28,
+ [1][1][RTW89_WW][0] = 18,
+ [1][1][RTW89_WW][2] = 18,
+ [1][1][RTW89_WW][4] = 18,
+ [1][1][RTW89_WW][6] = 16,
+ [1][1][RTW89_WW][8] = 22,
+ [1][1][RTW89_WW][10] = 22,
+ [1][1][RTW89_WW][12] = 22,
+ [1][1][RTW89_WW][14] = 22,
+ [1][1][RTW89_WW][15] = 22,
+ [1][1][RTW89_WW][17] = 22,
+ [1][1][RTW89_WW][19] = 22,
+ [1][1][RTW89_WW][21] = 22,
+ [1][1][RTW89_WW][23] = 22,
+ [1][1][RTW89_WW][25] = 22,
+ [1][1][RTW89_WW][27] = 22,
+ [1][1][RTW89_WW][29] = 22,
+ [1][1][RTW89_WW][31] = 22,
+ [1][1][RTW89_WW][33] = 22,
+ [1][1][RTW89_WW][35] = 22,
+ [1][1][RTW89_WW][37] = 42,
+ [1][1][RTW89_WW][38] = 16,
+ [1][1][RTW89_WW][40] = 16,
+ [1][1][RTW89_WW][42] = 16,
+ [1][1][RTW89_WW][44] = 16,
+ [1][1][RTW89_WW][46] = 16,
+ [2][0][RTW89_WW][0] = 30,
+ [2][0][RTW89_WW][2] = 30,
+ [2][0][RTW89_WW][4] = 30,
+ [2][0][RTW89_WW][6] = 30,
+ [2][0][RTW89_WW][8] = 46,
+ [2][0][RTW89_WW][10] = 46,
+ [2][0][RTW89_WW][12] = 46,
+ [2][0][RTW89_WW][14] = 46,
+ [2][0][RTW89_WW][15] = 46,
+ [2][0][RTW89_WW][17] = 46,
+ [2][0][RTW89_WW][19] = 46,
+ [2][0][RTW89_WW][21] = 46,
+ [2][0][RTW89_WW][23] = 46,
+ [2][0][RTW89_WW][25] = 46,
+ [2][0][RTW89_WW][27] = 46,
+ [2][0][RTW89_WW][29] = 46,
+ [2][0][RTW89_WW][31] = 46,
+ [2][0][RTW89_WW][33] = 46,
+ [2][0][RTW89_WW][35] = 46,
+ [2][0][RTW89_WW][37] = 54,
+ [2][0][RTW89_WW][38] = 28,
+ [2][0][RTW89_WW][40] = 28,
+ [2][0][RTW89_WW][42] = 28,
+ [2][0][RTW89_WW][44] = 28,
+ [2][0][RTW89_WW][46] = 28,
+ [2][1][RTW89_WW][0] = 18,
+ [2][1][RTW89_WW][2] = 18,
+ [2][1][RTW89_WW][4] = 18,
+ [2][1][RTW89_WW][6] = 18,
+ [2][1][RTW89_WW][8] = 32,
+ [2][1][RTW89_WW][10] = 32,
+ [2][1][RTW89_WW][12] = 32,
+ [2][1][RTW89_WW][14] = 32,
+ [2][1][RTW89_WW][15] = 32,
+ [2][1][RTW89_WW][17] = 32,
+ [2][1][RTW89_WW][19] = 32,
+ [2][1][RTW89_WW][21] = 32,
+ [2][1][RTW89_WW][23] = 32,
+ [2][1][RTW89_WW][25] = 32,
+ [2][1][RTW89_WW][27] = 32,
+ [2][1][RTW89_WW][29] = 32,
+ [2][1][RTW89_WW][31] = 32,
+ [2][1][RTW89_WW][33] = 32,
+ [2][1][RTW89_WW][35] = 32,
+ [2][1][RTW89_WW][37] = 42,
+ [2][1][RTW89_WW][38] = 16,
+ [2][1][RTW89_WW][40] = 16,
+ [2][1][RTW89_WW][42] = 16,
+ [2][1][RTW89_WW][44] = 16,
+ [2][1][RTW89_WW][46] = 16,
+ [0][0][RTW89_FCC][0] = 48,
+ [0][0][RTW89_ETSI][0] = 24,
+ [0][0][RTW89_MKK][0] = 26,
+ [0][0][RTW89_IC][0] = 22,
+ [0][0][RTW89_KCC][0] = 46,
+ [0][0][RTW89_ACMA][0] = 24,
+ [0][0][RTW89_CHILE][0] = 30,
+ [0][0][RTW89_UKRAINE][0] = 24,
+ [0][0][RTW89_MEXICO][0] = 48,
+ [0][0][RTW89_CN][0] = 24,
+ [0][0][RTW89_QATAR][0] = 24,
+ [0][0][RTW89_FCC][2] = 48,
+ [0][0][RTW89_ETSI][2] = 24,
+ [0][0][RTW89_MKK][2] = 26,
+ [0][0][RTW89_IC][2] = 22,
+ [0][0][RTW89_KCC][2] = 46,
+ [0][0][RTW89_ACMA][2] = 24,
+ [0][0][RTW89_CHILE][2] = 30,
+ [0][0][RTW89_UKRAINE][2] = 24,
+ [0][0][RTW89_MEXICO][2] = 48,
+ [0][0][RTW89_CN][2] = 24,
+ [0][0][RTW89_QATAR][2] = 24,
+ [0][0][RTW89_FCC][4] = 48,
+ [0][0][RTW89_ETSI][4] = 24,
+ [0][0][RTW89_MKK][4] = 26,
+ [0][0][RTW89_IC][4] = 22,
+ [0][0][RTW89_KCC][4] = 46,
+ [0][0][RTW89_ACMA][4] = 24,
+ [0][0][RTW89_CHILE][4] = 30,
+ [0][0][RTW89_UKRAINE][4] = 24,
+ [0][0][RTW89_MEXICO][4] = 48,
+ [0][0][RTW89_CN][4] = 24,
+ [0][0][RTW89_QATAR][4] = 24,
+ [0][0][RTW89_FCC][6] = 48,
+ [0][0][RTW89_ETSI][6] = 24,
+ [0][0][RTW89_MKK][6] = 26,
+ [0][0][RTW89_IC][6] = 22,
+ [0][0][RTW89_KCC][6] = 20,
+ [0][0][RTW89_ACMA][6] = 24,
+ [0][0][RTW89_CHILE][6] = 30,
+ [0][0][RTW89_UKRAINE][6] = 24,
+ [0][0][RTW89_MEXICO][6] = 48,
+ [0][0][RTW89_CN][6] = 24,
+ [0][0][RTW89_QATAR][6] = 24,
+ [0][0][RTW89_FCC][8] = 48,
+ [0][0][RTW89_ETSI][8] = 24,
+ [0][0][RTW89_MKK][8] = 26,
+ [0][0][RTW89_IC][8] = 48,
+ [0][0][RTW89_KCC][8] = 46,
+ [0][0][RTW89_ACMA][8] = 24,
+ [0][0][RTW89_CHILE][8] = 48,
+ [0][0][RTW89_UKRAINE][8] = 24,
+ [0][0][RTW89_MEXICO][8] = 48,
+ [0][0][RTW89_CN][8] = 24,
+ [0][0][RTW89_QATAR][8] = 24,
+ [0][0][RTW89_FCC][10] = 48,
+ [0][0][RTW89_ETSI][10] = 24,
+ [0][0][RTW89_MKK][10] = 26,
+ [0][0][RTW89_IC][10] = 48,
+ [0][0][RTW89_KCC][10] = 46,
+ [0][0][RTW89_ACMA][10] = 24,
+ [0][0][RTW89_CHILE][10] = 48,
+ [0][0][RTW89_UKRAINE][10] = 24,
+ [0][0][RTW89_MEXICO][10] = 48,
+ [0][0][RTW89_CN][10] = 24,
+ [0][0][RTW89_QATAR][10] = 24,
+ [0][0][RTW89_FCC][12] = 48,
+ [0][0][RTW89_ETSI][12] = 24,
+ [0][0][RTW89_MKK][12] = 26,
+ [0][0][RTW89_IC][12] = 48,
+ [0][0][RTW89_KCC][12] = 46,
+ [0][0][RTW89_ACMA][12] = 24,
+ [0][0][RTW89_CHILE][12] = 48,
+ [0][0][RTW89_UKRAINE][12] = 24,
+ [0][0][RTW89_MEXICO][12] = 48,
+ [0][0][RTW89_CN][12] = 24,
+ [0][0][RTW89_QATAR][12] = 24,
+ [0][0][RTW89_FCC][14] = 48,
+ [0][0][RTW89_ETSI][14] = 24,
+ [0][0][RTW89_MKK][14] = 26,
+ [0][0][RTW89_IC][14] = 48,
+ [0][0][RTW89_KCC][14] = 46,
+ [0][0][RTW89_ACMA][14] = 24,
+ [0][0][RTW89_CHILE][14] = 48,
+ [0][0][RTW89_UKRAINE][14] = 24,
+ [0][0][RTW89_MEXICO][14] = 48,
+ [0][0][RTW89_CN][14] = 24,
+ [0][0][RTW89_QATAR][14] = 24,
+ [0][0][RTW89_FCC][15] = 48,
+ [0][0][RTW89_ETSI][15] = 24,
+ [0][0][RTW89_MKK][15] = 44,
+ [0][0][RTW89_IC][15] = 48,
+ [0][0][RTW89_KCC][15] = 46,
+ [0][0][RTW89_ACMA][15] = 24,
+ [0][0][RTW89_CHILE][15] = 48,
+ [0][0][RTW89_UKRAINE][15] = 24,
+ [0][0][RTW89_MEXICO][15] = 48,
+ [0][0][RTW89_CN][15] = 127,
+ [0][0][RTW89_QATAR][15] = 24,
+ [0][0][RTW89_FCC][17] = 48,
+ [0][0][RTW89_ETSI][17] = 24,
+ [0][0][RTW89_MKK][17] = 44,
+ [0][0][RTW89_IC][17] = 48,
+ [0][0][RTW89_KCC][17] = 46,
+ [0][0][RTW89_ACMA][17] = 24,
+ [0][0][RTW89_CHILE][17] = 48,
+ [0][0][RTW89_UKRAINE][17] = 24,
+ [0][0][RTW89_MEXICO][17] = 48,
+ [0][0][RTW89_CN][17] = 127,
+ [0][0][RTW89_QATAR][17] = 24,
+ [0][0][RTW89_FCC][19] = 48,
+ [0][0][RTW89_ETSI][19] = 24,
+ [0][0][RTW89_MKK][19] = 44,
+ [0][0][RTW89_IC][19] = 48,
+ [0][0][RTW89_KCC][19] = 46,
+ [0][0][RTW89_ACMA][19] = 24,
+ [0][0][RTW89_CHILE][19] = 48,
+ [0][0][RTW89_UKRAINE][19] = 24,
+ [0][0][RTW89_MEXICO][19] = 48,
+ [0][0][RTW89_CN][19] = 127,
+ [0][0][RTW89_QATAR][19] = 24,
+ [0][0][RTW89_FCC][21] = 48,
+ [0][0][RTW89_ETSI][21] = 24,
+ [0][0][RTW89_MKK][21] = 44,
+ [0][0][RTW89_IC][21] = 48,
+ [0][0][RTW89_KCC][21] = 46,
+ [0][0][RTW89_ACMA][21] = 24,
+ [0][0][RTW89_CHILE][21] = 48,
+ [0][0][RTW89_UKRAINE][21] = 24,
+ [0][0][RTW89_MEXICO][21] = 48,
+ [0][0][RTW89_CN][21] = 127,
+ [0][0][RTW89_QATAR][21] = 24,
+ [0][0][RTW89_FCC][23] = 48,
+ [0][0][RTW89_ETSI][23] = 24,
+ [0][0][RTW89_MKK][23] = 44,
+ [0][0][RTW89_IC][23] = 48,
+ [0][0][RTW89_KCC][23] = 46,
+ [0][0][RTW89_ACMA][23] = 24,
+ [0][0][RTW89_CHILE][23] = 48,
+ [0][0][RTW89_UKRAINE][23] = 24,
+ [0][0][RTW89_MEXICO][23] = 48,
+ [0][0][RTW89_CN][23] = 127,
+ [0][0][RTW89_QATAR][23] = 24,
+ [0][0][RTW89_FCC][25] = 48,
+ [0][0][RTW89_ETSI][25] = 24,
+ [0][0][RTW89_MKK][25] = 44,
+ [0][0][RTW89_IC][25] = 127,
+ [0][0][RTW89_KCC][25] = 46,
+ [0][0][RTW89_ACMA][25] = 127,
+ [0][0][RTW89_CHILE][25] = 48,
+ [0][0][RTW89_UKRAINE][25] = 24,
+ [0][0][RTW89_MEXICO][25] = 48,
+ [0][0][RTW89_CN][25] = 127,
+ [0][0][RTW89_QATAR][25] = 24,
+ [0][0][RTW89_FCC][27] = 48,
+ [0][0][RTW89_ETSI][27] = 24,
+ [0][0][RTW89_MKK][27] = 44,
+ [0][0][RTW89_IC][27] = 127,
+ [0][0][RTW89_KCC][27] = 46,
+ [0][0][RTW89_ACMA][27] = 127,
+ [0][0][RTW89_CHILE][27] = 48,
+ [0][0][RTW89_UKRAINE][27] = 24,
+ [0][0][RTW89_MEXICO][27] = 48,
+ [0][0][RTW89_CN][27] = 127,
+ [0][0][RTW89_QATAR][27] = 24,
+ [0][0][RTW89_FCC][29] = 48,
+ [0][0][RTW89_ETSI][29] = 24,
+ [0][0][RTW89_MKK][29] = 44,
+ [0][0][RTW89_IC][29] = 127,
+ [0][0][RTW89_KCC][29] = 46,
+ [0][0][RTW89_ACMA][29] = 127,
+ [0][0][RTW89_CHILE][29] = 48,
+ [0][0][RTW89_UKRAINE][29] = 24,
+ [0][0][RTW89_MEXICO][29] = 48,
+ [0][0][RTW89_CN][29] = 127,
+ [0][0][RTW89_QATAR][29] = 24,
+ [0][0][RTW89_FCC][31] = 48,
+ [0][0][RTW89_ETSI][31] = 24,
+ [0][0][RTW89_MKK][31] = 44,
+ [0][0][RTW89_IC][31] = 48,
+ [0][0][RTW89_KCC][31] = 46,
+ [0][0][RTW89_ACMA][31] = 24,
+ [0][0][RTW89_CHILE][31] = 48,
+ [0][0][RTW89_UKRAINE][31] = 24,
+ [0][0][RTW89_MEXICO][31] = 48,
+ [0][0][RTW89_CN][31] = 127,
+ [0][0][RTW89_QATAR][31] = 24,
+ [0][0][RTW89_FCC][33] = 48,
+ [0][0][RTW89_ETSI][33] = 24,
+ [0][0][RTW89_MKK][33] = 44,
+ [0][0][RTW89_IC][33] = 48,
+ [0][0][RTW89_KCC][33] = 46,
+ [0][0][RTW89_ACMA][33] = 24,
+ [0][0][RTW89_CHILE][33] = 48,
+ [0][0][RTW89_UKRAINE][33] = 24,
+ [0][0][RTW89_MEXICO][33] = 48,
+ [0][0][RTW89_CN][33] = 127,
+ [0][0][RTW89_QATAR][33] = 24,
+ [0][0][RTW89_FCC][35] = 48,
+ [0][0][RTW89_ETSI][35] = 24,
+ [0][0][RTW89_MKK][35] = 44,
+ [0][0][RTW89_IC][35] = 48,
+ [0][0][RTW89_KCC][35] = 46,
+ [0][0][RTW89_ACMA][35] = 24,
+ [0][0][RTW89_CHILE][35] = 48,
+ [0][0][RTW89_UKRAINE][35] = 24,
+ [0][0][RTW89_MEXICO][35] = 48,
+ [0][0][RTW89_CN][35] = 127,
+ [0][0][RTW89_QATAR][35] = 24,
+ [0][0][RTW89_FCC][37] = 48,
+ [0][0][RTW89_ETSI][37] = 127,
+ [0][0][RTW89_MKK][37] = 44,
+ [0][0][RTW89_IC][37] = 48,
+ [0][0][RTW89_KCC][37] = 46,
+ [0][0][RTW89_ACMA][37] = 48,
+ [0][0][RTW89_CHILE][37] = 48,
+ [0][0][RTW89_UKRAINE][37] = 127,
+ [0][0][RTW89_MEXICO][37] = 48,
+ [0][0][RTW89_CN][37] = 127,
+ [0][0][RTW89_QATAR][37] = 127,
+ [0][0][RTW89_FCC][38] = 76,
+ [0][0][RTW89_ETSI][38] = 28,
+ [0][0][RTW89_MKK][38] = 127,
+ [0][0][RTW89_IC][38] = 76,
+ [0][0][RTW89_KCC][38] = 46,
+ [0][0][RTW89_ACMA][38] = 76,
+ [0][0][RTW89_CHILE][38] = 54,
+ [0][0][RTW89_UKRAINE][38] = 28,
+ [0][0][RTW89_MEXICO][38] = 76,
+ [0][0][RTW89_CN][38] = 62,
+ [0][0][RTW89_QATAR][38] = 28,
+ [0][0][RTW89_FCC][40] = 76,
+ [0][0][RTW89_ETSI][40] = 28,
+ [0][0][RTW89_MKK][40] = 127,
+ [0][0][RTW89_IC][40] = 76,
+ [0][0][RTW89_KCC][40] = 46,
+ [0][0][RTW89_ACMA][40] = 76,
+ [0][0][RTW89_CHILE][40] = 54,
+ [0][0][RTW89_UKRAINE][40] = 28,
+ [0][0][RTW89_MEXICO][40] = 76,
+ [0][0][RTW89_CN][40] = 62,
+ [0][0][RTW89_QATAR][40] = 28,
+ [0][0][RTW89_FCC][42] = 76,
+ [0][0][RTW89_ETSI][42] = 28,
+ [0][0][RTW89_MKK][42] = 127,
+ [0][0][RTW89_IC][42] = 76,
+ [0][0][RTW89_KCC][42] = 46,
+ [0][0][RTW89_ACMA][42] = 76,
+ [0][0][RTW89_CHILE][42] = 54,
+ [0][0][RTW89_UKRAINE][42] = 28,
+ [0][0][RTW89_MEXICO][42] = 76,
+ [0][0][RTW89_CN][42] = 62,
+ [0][0][RTW89_QATAR][42] = 28,
+ [0][0][RTW89_FCC][44] = 76,
+ [0][0][RTW89_ETSI][44] = 28,
+ [0][0][RTW89_MKK][44] = 127,
+ [0][0][RTW89_IC][44] = 76,
+ [0][0][RTW89_KCC][44] = 46,
+ [0][0][RTW89_ACMA][44] = 76,
+ [0][0][RTW89_CHILE][44] = 54,
+ [0][0][RTW89_UKRAINE][44] = 28,
+ [0][0][RTW89_MEXICO][44] = 76,
+ [0][0][RTW89_CN][44] = 62,
+ [0][0][RTW89_QATAR][44] = 28,
+ [0][0][RTW89_FCC][46] = 76,
+ [0][0][RTW89_ETSI][46] = 28,
+ [0][0][RTW89_MKK][46] = 127,
+ [0][0][RTW89_IC][46] = 76,
+ [0][0][RTW89_KCC][46] = 46,
+ [0][0][RTW89_ACMA][46] = 76,
+ [0][0][RTW89_CHILE][46] = 54,
+ [0][0][RTW89_UKRAINE][46] = 28,
+ [0][0][RTW89_MEXICO][46] = 76,
+ [0][0][RTW89_CN][46] = 62,
+ [0][0][RTW89_QATAR][46] = 28,
+ [0][1][RTW89_FCC][0] = 36,
+ [0][1][RTW89_ETSI][0] = 12,
+ [0][1][RTW89_MKK][0] = 14,
+ [0][1][RTW89_IC][0] = 8,
+ [0][1][RTW89_KCC][0] = 32,
+ [0][1][RTW89_ACMA][0] = 12,
+ [0][1][RTW89_CHILE][0] = 18,
+ [0][1][RTW89_UKRAINE][0] = 12,
+ [0][1][RTW89_MEXICO][0] = 36,
+ [0][1][RTW89_CN][0] = 12,
+ [0][1][RTW89_QATAR][0] = 12,
+ [0][1][RTW89_FCC][2] = 36,
+ [0][1][RTW89_ETSI][2] = 12,
+ [0][1][RTW89_MKK][2] = 14,
+ [0][1][RTW89_IC][2] = 8,
+ [0][1][RTW89_KCC][2] = 32,
+ [0][1][RTW89_ACMA][2] = 12,
+ [0][1][RTW89_CHILE][2] = 18,
+ [0][1][RTW89_UKRAINE][2] = 12,
+ [0][1][RTW89_MEXICO][2] = 36,
+ [0][1][RTW89_CN][2] = 12,
+ [0][1][RTW89_QATAR][2] = 12,
+ [0][1][RTW89_FCC][4] = 36,
+ [0][1][RTW89_ETSI][4] = 12,
+ [0][1][RTW89_MKK][4] = 14,
+ [0][1][RTW89_IC][4] = 8,
+ [0][1][RTW89_KCC][4] = 32,
+ [0][1][RTW89_ACMA][4] = 12,
+ [0][1][RTW89_CHILE][4] = 18,
+ [0][1][RTW89_UKRAINE][4] = 12,
+ [0][1][RTW89_MEXICO][4] = 36,
+ [0][1][RTW89_CN][4] = 12,
+ [0][1][RTW89_QATAR][4] = 12,
+ [0][1][RTW89_FCC][6] = 36,
+ [0][1][RTW89_ETSI][6] = 12,
+ [0][1][RTW89_MKK][6] = 14,
+ [0][1][RTW89_IC][6] = 8,
+ [0][1][RTW89_KCC][6] = 4,
+ [0][1][RTW89_ACMA][6] = 12,
+ [0][1][RTW89_CHILE][6] = 18,
+ [0][1][RTW89_UKRAINE][6] = 12,
+ [0][1][RTW89_MEXICO][6] = 36,
+ [0][1][RTW89_CN][6] = 12,
+ [0][1][RTW89_QATAR][6] = 12,
+ [0][1][RTW89_FCC][8] = 36,
+ [0][1][RTW89_ETSI][8] = 12,
+ [0][1][RTW89_MKK][8] = 14,
+ [0][1][RTW89_IC][8] = 36,
+ [0][1][RTW89_KCC][8] = 32,
+ [0][1][RTW89_ACMA][8] = 12,
+ [0][1][RTW89_CHILE][8] = 36,
+ [0][1][RTW89_UKRAINE][8] = 12,
+ [0][1][RTW89_MEXICO][8] = 36,
+ [0][1][RTW89_CN][8] = 12,
+ [0][1][RTW89_QATAR][8] = 12,
+ [0][1][RTW89_FCC][10] = 36,
+ [0][1][RTW89_ETSI][10] = 12,
+ [0][1][RTW89_MKK][10] = 14,
+ [0][1][RTW89_IC][10] = 36,
+ [0][1][RTW89_KCC][10] = 32,
+ [0][1][RTW89_ACMA][10] = 12,
+ [0][1][RTW89_CHILE][10] = 36,
+ [0][1][RTW89_UKRAINE][10] = 12,
+ [0][1][RTW89_MEXICO][10] = 36,
+ [0][1][RTW89_CN][10] = 12,
+ [0][1][RTW89_QATAR][10] = 12,
+ [0][1][RTW89_FCC][12] = 36,
+ [0][1][RTW89_ETSI][12] = 12,
+ [0][1][RTW89_MKK][12] = 14,
+ [0][1][RTW89_IC][12] = 36,
+ [0][1][RTW89_KCC][12] = 32,
+ [0][1][RTW89_ACMA][12] = 12,
+ [0][1][RTW89_CHILE][12] = 36,
+ [0][1][RTW89_UKRAINE][12] = 12,
+ [0][1][RTW89_MEXICO][12] = 36,
+ [0][1][RTW89_CN][12] = 12,
+ [0][1][RTW89_QATAR][12] = 12,
+ [0][1][RTW89_FCC][14] = 36,
+ [0][1][RTW89_ETSI][14] = 12,
+ [0][1][RTW89_MKK][14] = 14,
+ [0][1][RTW89_IC][14] = 36,
+ [0][1][RTW89_KCC][14] = 32,
+ [0][1][RTW89_ACMA][14] = 12,
+ [0][1][RTW89_CHILE][14] = 36,
+ [0][1][RTW89_UKRAINE][14] = 12,
+ [0][1][RTW89_MEXICO][14] = 36,
+ [0][1][RTW89_CN][14] = 12,
+ [0][1][RTW89_QATAR][14] = 12,
+ [0][1][RTW89_FCC][15] = 36,
+ [0][1][RTW89_ETSI][15] = 12,
+ [0][1][RTW89_MKK][15] = 32,
+ [0][1][RTW89_IC][15] = 36,
+ [0][1][RTW89_KCC][15] = 32,
+ [0][1][RTW89_ACMA][15] = 12,
+ [0][1][RTW89_CHILE][15] = 36,
+ [0][1][RTW89_UKRAINE][15] = 12,
+ [0][1][RTW89_MEXICO][15] = 36,
+ [0][1][RTW89_CN][15] = 127,
+ [0][1][RTW89_QATAR][15] = 12,
+ [0][1][RTW89_FCC][17] = 36,
+ [0][1][RTW89_ETSI][17] = 12,
+ [0][1][RTW89_MKK][17] = 32,
+ [0][1][RTW89_IC][17] = 36,
+ [0][1][RTW89_KCC][17] = 32,
+ [0][1][RTW89_ACMA][17] = 12,
+ [0][1][RTW89_CHILE][17] = 36,
+ [0][1][RTW89_UKRAINE][17] = 12,
+ [0][1][RTW89_MEXICO][17] = 36,
+ [0][1][RTW89_CN][17] = 127,
+ [0][1][RTW89_QATAR][17] = 12,
+ [0][1][RTW89_FCC][19] = 36,
+ [0][1][RTW89_ETSI][19] = 12,
+ [0][1][RTW89_MKK][19] = 32,
+ [0][1][RTW89_IC][19] = 36,
+ [0][1][RTW89_KCC][19] = 32,
+ [0][1][RTW89_ACMA][19] = 12,
+ [0][1][RTW89_CHILE][19] = 36,
+ [0][1][RTW89_UKRAINE][19] = 12,
+ [0][1][RTW89_MEXICO][19] = 36,
+ [0][1][RTW89_CN][19] = 127,
+ [0][1][RTW89_QATAR][19] = 12,
+ [0][1][RTW89_FCC][21] = 36,
+ [0][1][RTW89_ETSI][21] = 12,
+ [0][1][RTW89_MKK][21] = 32,
+ [0][1][RTW89_IC][21] = 36,
+ [0][1][RTW89_KCC][21] = 32,
+ [0][1][RTW89_ACMA][21] = 12,
+ [0][1][RTW89_CHILE][21] = 36,
+ [0][1][RTW89_UKRAINE][21] = 12,
+ [0][1][RTW89_MEXICO][21] = 36,
+ [0][1][RTW89_CN][21] = 127,
+ [0][1][RTW89_QATAR][21] = 12,
+ [0][1][RTW89_FCC][23] = 36,
+ [0][1][RTW89_ETSI][23] = 12,
+ [0][1][RTW89_MKK][23] = 32,
+ [0][1][RTW89_IC][23] = 36,
+ [0][1][RTW89_KCC][23] = 32,
+ [0][1][RTW89_ACMA][23] = 12,
+ [0][1][RTW89_CHILE][23] = 36,
+ [0][1][RTW89_UKRAINE][23] = 12,
+ [0][1][RTW89_MEXICO][23] = 36,
+ [0][1][RTW89_CN][23] = 127,
+ [0][1][RTW89_QATAR][23] = 12,
+ [0][1][RTW89_FCC][25] = 36,
+ [0][1][RTW89_ETSI][25] = 12,
+ [0][1][RTW89_MKK][25] = 32,
+ [0][1][RTW89_IC][25] = 127,
+ [0][1][RTW89_KCC][25] = 32,
+ [0][1][RTW89_ACMA][25] = 127,
+ [0][1][RTW89_CHILE][25] = 36,
+ [0][1][RTW89_UKRAINE][25] = 12,
+ [0][1][RTW89_MEXICO][25] = 36,
+ [0][1][RTW89_CN][25] = 127,
+ [0][1][RTW89_QATAR][25] = 12,
+ [0][1][RTW89_FCC][27] = 36,
+ [0][1][RTW89_ETSI][27] = 12,
+ [0][1][RTW89_MKK][27] = 32,
+ [0][1][RTW89_IC][27] = 127,
+ [0][1][RTW89_KCC][27] = 32,
+ [0][1][RTW89_ACMA][27] = 127,
+ [0][1][RTW89_CHILE][27] = 36,
+ [0][1][RTW89_UKRAINE][27] = 12,
+ [0][1][RTW89_MEXICO][27] = 36,
+ [0][1][RTW89_CN][27] = 127,
+ [0][1][RTW89_QATAR][27] = 12,
+ [0][1][RTW89_FCC][29] = 36,
+ [0][1][RTW89_ETSI][29] = 12,
+ [0][1][RTW89_MKK][29] = 32,
+ [0][1][RTW89_IC][29] = 127,
+ [0][1][RTW89_KCC][29] = 32,
+ [0][1][RTW89_ACMA][29] = 127,
+ [0][1][RTW89_CHILE][29] = 36,
+ [0][1][RTW89_UKRAINE][29] = 12,
+ [0][1][RTW89_MEXICO][29] = 36,
+ [0][1][RTW89_CN][29] = 127,
+ [0][1][RTW89_QATAR][29] = 12,
+ [0][1][RTW89_FCC][31] = 36,
+ [0][1][RTW89_ETSI][31] = 12,
+ [0][1][RTW89_MKK][31] = 32,
+ [0][1][RTW89_IC][31] = 36,
+ [0][1][RTW89_KCC][31] = 32,
+ [0][1][RTW89_ACMA][31] = 12,
+ [0][1][RTW89_CHILE][31] = 36,
+ [0][1][RTW89_UKRAINE][31] = 12,
+ [0][1][RTW89_MEXICO][31] = 36,
+ [0][1][RTW89_CN][31] = 127,
+ [0][1][RTW89_QATAR][31] = 12,
+ [0][1][RTW89_FCC][33] = 36,
+ [0][1][RTW89_ETSI][33] = 12,
+ [0][1][RTW89_MKK][33] = 32,
+ [0][1][RTW89_IC][33] = 36,
+ [0][1][RTW89_KCC][33] = 32,
+ [0][1][RTW89_ACMA][33] = 12,
+ [0][1][RTW89_CHILE][33] = 36,
+ [0][1][RTW89_UKRAINE][33] = 12,
+ [0][1][RTW89_MEXICO][33] = 36,
+ [0][1][RTW89_CN][33] = 127,
+ [0][1][RTW89_QATAR][33] = 12,
+ [0][1][RTW89_FCC][35] = 36,
+ [0][1][RTW89_ETSI][35] = 12,
+ [0][1][RTW89_MKK][35] = 32,
+ [0][1][RTW89_IC][35] = 36,
+ [0][1][RTW89_KCC][35] = 32,
+ [0][1][RTW89_ACMA][35] = 12,
+ [0][1][RTW89_CHILE][35] = 36,
+ [0][1][RTW89_UKRAINE][35] = 12,
+ [0][1][RTW89_MEXICO][35] = 36,
+ [0][1][RTW89_CN][35] = 127,
+ [0][1][RTW89_QATAR][35] = 12,
+ [0][1][RTW89_FCC][37] = 36,
+ [0][1][RTW89_ETSI][37] = 127,
+ [0][1][RTW89_MKK][37] = 32,
+ [0][1][RTW89_IC][37] = 36,
+ [0][1][RTW89_KCC][37] = 32,
+ [0][1][RTW89_ACMA][37] = 36,
+ [0][1][RTW89_CHILE][37] = 36,
+ [0][1][RTW89_UKRAINE][37] = 127,
+ [0][1][RTW89_MEXICO][37] = 36,
+ [0][1][RTW89_CN][37] = 127,
+ [0][1][RTW89_QATAR][37] = 127,
+ [0][1][RTW89_FCC][38] = 72,
+ [0][1][RTW89_ETSI][38] = 16,
+ [0][1][RTW89_MKK][38] = 127,
+ [0][1][RTW89_IC][38] = 72,
+ [0][1][RTW89_KCC][38] = 32,
+ [0][1][RTW89_ACMA][38] = 76,
+ [0][1][RTW89_CHILE][38] = 42,
+ [0][1][RTW89_UKRAINE][38] = 16,
+ [0][1][RTW89_MEXICO][38] = 72,
+ [0][1][RTW89_CN][38] = 50,
+ [0][1][RTW89_QATAR][38] = 16,
+ [0][1][RTW89_FCC][40] = 76,
+ [0][1][RTW89_ETSI][40] = 16,
+ [0][1][RTW89_MKK][40] = 127,
+ [0][1][RTW89_IC][40] = 76,
+ [0][1][RTW89_KCC][40] = 32,
+ [0][1][RTW89_ACMA][40] = 76,
+ [0][1][RTW89_CHILE][40] = 42,
+ [0][1][RTW89_UKRAINE][40] = 16,
+ [0][1][RTW89_MEXICO][40] = 76,
+ [0][1][RTW89_CN][40] = 50,
+ [0][1][RTW89_QATAR][40] = 16,
+ [0][1][RTW89_FCC][42] = 76,
+ [0][1][RTW89_ETSI][42] = 16,
+ [0][1][RTW89_MKK][42] = 127,
+ [0][1][RTW89_IC][42] = 76,
+ [0][1][RTW89_KCC][42] = 32,
+ [0][1][RTW89_ACMA][42] = 76,
+ [0][1][RTW89_CHILE][42] = 42,
+ [0][1][RTW89_UKRAINE][42] = 16,
+ [0][1][RTW89_MEXICO][42] = 76,
+ [0][1][RTW89_CN][42] = 50,
+ [0][1][RTW89_QATAR][42] = 16,
+ [0][1][RTW89_FCC][44] = 76,
+ [0][1][RTW89_ETSI][44] = 16,
+ [0][1][RTW89_MKK][44] = 127,
+ [0][1][RTW89_IC][44] = 76,
+ [0][1][RTW89_KCC][44] = 32,
+ [0][1][RTW89_ACMA][44] = 76,
+ [0][1][RTW89_CHILE][44] = 42,
+ [0][1][RTW89_UKRAINE][44] = 16,
+ [0][1][RTW89_MEXICO][44] = 76,
+ [0][1][RTW89_CN][44] = 50,
+ [0][1][RTW89_QATAR][44] = 16,
+ [0][1][RTW89_FCC][46] = 76,
+ [0][1][RTW89_ETSI][46] = 16,
+ [0][1][RTW89_MKK][46] = 127,
+ [0][1][RTW89_IC][46] = 76,
+ [0][1][RTW89_KCC][46] = 32,
+ [0][1][RTW89_ACMA][46] = 76,
+ [0][1][RTW89_CHILE][46] = 42,
+ [0][1][RTW89_UKRAINE][46] = 16,
+ [0][1][RTW89_MEXICO][46] = 76,
+ [0][1][RTW89_CN][46] = 50,
+ [0][1][RTW89_QATAR][46] = 16,
+ [1][0][RTW89_FCC][0] = 62,
+ [1][0][RTW89_ETSI][0] = 36,
+ [1][0][RTW89_MKK][0] = 36,
+ [1][0][RTW89_IC][0] = 34,
+ [1][0][RTW89_KCC][0] = 58,
+ [1][0][RTW89_ACMA][0] = 36,
+ [1][0][RTW89_CHILE][0] = 30,
+ [1][0][RTW89_UKRAINE][0] = 36,
+ [1][0][RTW89_MEXICO][0] = 62,
+ [1][0][RTW89_CN][0] = 36,
+ [1][0][RTW89_QATAR][0] = 36,
+ [1][0][RTW89_FCC][2] = 62,
+ [1][0][RTW89_ETSI][2] = 36,
+ [1][0][RTW89_MKK][2] = 36,
+ [1][0][RTW89_IC][2] = 34,
+ [1][0][RTW89_KCC][2] = 58,
+ [1][0][RTW89_ACMA][2] = 36,
+ [1][0][RTW89_CHILE][2] = 30,
+ [1][0][RTW89_UKRAINE][2] = 36,
+ [1][0][RTW89_MEXICO][2] = 62,
+ [1][0][RTW89_CN][2] = 36,
+ [1][0][RTW89_QATAR][2] = 36,
+ [1][0][RTW89_FCC][4] = 62,
+ [1][0][RTW89_ETSI][4] = 36,
+ [1][0][RTW89_MKK][4] = 36,
+ [1][0][RTW89_IC][4] = 34,
+ [1][0][RTW89_KCC][4] = 58,
+ [1][0][RTW89_ACMA][4] = 36,
+ [1][0][RTW89_CHILE][4] = 30,
+ [1][0][RTW89_UKRAINE][4] = 36,
+ [1][0][RTW89_MEXICO][4] = 62,
+ [1][0][RTW89_CN][4] = 36,
+ [1][0][RTW89_QATAR][4] = 36,
+ [1][0][RTW89_FCC][6] = 62,
+ [1][0][RTW89_ETSI][6] = 36,
+ [1][0][RTW89_MKK][6] = 36,
+ [1][0][RTW89_IC][6] = 34,
+ [1][0][RTW89_KCC][6] = 32,
+ [1][0][RTW89_ACMA][6] = 36,
+ [1][0][RTW89_CHILE][6] = 30,
+ [1][0][RTW89_UKRAINE][6] = 36,
+ [1][0][RTW89_MEXICO][6] = 62,
+ [1][0][RTW89_CN][6] = 36,
+ [1][0][RTW89_QATAR][6] = 36,
+ [1][0][RTW89_FCC][8] = 62,
+ [1][0][RTW89_ETSI][8] = 36,
+ [1][0][RTW89_MKK][8] = 36,
+ [1][0][RTW89_IC][8] = 62,
+ [1][0][RTW89_KCC][8] = 58,
+ [1][0][RTW89_ACMA][8] = 36,
+ [1][0][RTW89_CHILE][8] = 54,
+ [1][0][RTW89_UKRAINE][8] = 36,
+ [1][0][RTW89_MEXICO][8] = 62,
+ [1][0][RTW89_CN][8] = 36,
+ [1][0][RTW89_QATAR][8] = 36,
+ [1][0][RTW89_FCC][10] = 62,
+ [1][0][RTW89_ETSI][10] = 36,
+ [1][0][RTW89_MKK][10] = 36,
+ [1][0][RTW89_IC][10] = 62,
+ [1][0][RTW89_KCC][10] = 58,
+ [1][0][RTW89_ACMA][10] = 36,
+ [1][0][RTW89_CHILE][10] = 54,
+ [1][0][RTW89_UKRAINE][10] = 36,
+ [1][0][RTW89_MEXICO][10] = 62,
+ [1][0][RTW89_CN][10] = 36,
+ [1][0][RTW89_QATAR][10] = 36,
+ [1][0][RTW89_FCC][12] = 62,
+ [1][0][RTW89_ETSI][12] = 36,
+ [1][0][RTW89_MKK][12] = 36,
+ [1][0][RTW89_IC][12] = 62,
+ [1][0][RTW89_KCC][12] = 58,
+ [1][0][RTW89_ACMA][12] = 36,
+ [1][0][RTW89_CHILE][12] = 54,
+ [1][0][RTW89_UKRAINE][12] = 36,
+ [1][0][RTW89_MEXICO][12] = 62,
+ [1][0][RTW89_CN][12] = 36,
+ [1][0][RTW89_QATAR][12] = 36,
+ [1][0][RTW89_FCC][14] = 62,
+ [1][0][RTW89_ETSI][14] = 36,
+ [1][0][RTW89_MKK][14] = 36,
+ [1][0][RTW89_IC][14] = 62,
+ [1][0][RTW89_KCC][14] = 58,
+ [1][0][RTW89_ACMA][14] = 36,
+ [1][0][RTW89_CHILE][14] = 54,
+ [1][0][RTW89_UKRAINE][14] = 36,
+ [1][0][RTW89_MEXICO][14] = 62,
+ [1][0][RTW89_CN][14] = 36,
+ [1][0][RTW89_QATAR][14] = 36,
+ [1][0][RTW89_FCC][15] = 62,
+ [1][0][RTW89_ETSI][15] = 36,
+ [1][0][RTW89_MKK][15] = 58,
+ [1][0][RTW89_IC][15] = 62,
+ [1][0][RTW89_KCC][15] = 58,
+ [1][0][RTW89_ACMA][15] = 36,
+ [1][0][RTW89_CHILE][15] = 54,
+ [1][0][RTW89_UKRAINE][15] = 36,
+ [1][0][RTW89_MEXICO][15] = 62,
+ [1][0][RTW89_CN][15] = 127,
+ [1][0][RTW89_QATAR][15] = 36,
+ [1][0][RTW89_FCC][17] = 62,
+ [1][0][RTW89_ETSI][17] = 36,
+ [1][0][RTW89_MKK][17] = 58,
+ [1][0][RTW89_IC][17] = 62,
+ [1][0][RTW89_KCC][17] = 58,
+ [1][0][RTW89_ACMA][17] = 36,
+ [1][0][RTW89_CHILE][17] = 54,
+ [1][0][RTW89_UKRAINE][17] = 36,
+ [1][0][RTW89_MEXICO][17] = 62,
+ [1][0][RTW89_CN][17] = 127,
+ [1][0][RTW89_QATAR][17] = 36,
+ [1][0][RTW89_FCC][19] = 62,
+ [1][0][RTW89_ETSI][19] = 36,
+ [1][0][RTW89_MKK][19] = 58,
+ [1][0][RTW89_IC][19] = 62,
+ [1][0][RTW89_KCC][19] = 58,
+ [1][0][RTW89_ACMA][19] = 36,
+ [1][0][RTW89_CHILE][19] = 54,
+ [1][0][RTW89_UKRAINE][19] = 36,
+ [1][0][RTW89_MEXICO][19] = 62,
+ [1][0][RTW89_CN][19] = 127,
+ [1][0][RTW89_QATAR][19] = 36,
+ [1][0][RTW89_FCC][21] = 62,
+ [1][0][RTW89_ETSI][21] = 36,
+ [1][0][RTW89_MKK][21] = 58,
+ [1][0][RTW89_IC][21] = 62,
+ [1][0][RTW89_KCC][21] = 58,
+ [1][0][RTW89_ACMA][21] = 36,
+ [1][0][RTW89_CHILE][21] = 54,
+ [1][0][RTW89_UKRAINE][21] = 36,
+ [1][0][RTW89_MEXICO][21] = 62,
+ [1][0][RTW89_CN][21] = 127,
+ [1][0][RTW89_QATAR][21] = 36,
+ [1][0][RTW89_FCC][23] = 62,
+ [1][0][RTW89_ETSI][23] = 36,
+ [1][0][RTW89_MKK][23] = 58,
+ [1][0][RTW89_IC][23] = 62,
+ [1][0][RTW89_KCC][23] = 58,
+ [1][0][RTW89_ACMA][23] = 36,
+ [1][0][RTW89_CHILE][23] = 54,
+ [1][0][RTW89_UKRAINE][23] = 36,
+ [1][0][RTW89_MEXICO][23] = 62,
+ [1][0][RTW89_CN][23] = 127,
+ [1][0][RTW89_QATAR][23] = 36,
+ [1][0][RTW89_FCC][25] = 62,
+ [1][0][RTW89_ETSI][25] = 36,
+ [1][0][RTW89_MKK][25] = 58,
+ [1][0][RTW89_IC][25] = 127,
+ [1][0][RTW89_KCC][25] = 58,
+ [1][0][RTW89_ACMA][25] = 127,
+ [1][0][RTW89_CHILE][25] = 54,
+ [1][0][RTW89_UKRAINE][25] = 36,
+ [1][0][RTW89_MEXICO][25] = 62,
+ [1][0][RTW89_CN][25] = 127,
+ [1][0][RTW89_QATAR][25] = 36,
+ [1][0][RTW89_FCC][27] = 62,
+ [1][0][RTW89_ETSI][27] = 36,
+ [1][0][RTW89_MKK][27] = 58,
+ [1][0][RTW89_IC][27] = 127,
+ [1][0][RTW89_KCC][27] = 58,
+ [1][0][RTW89_ACMA][27] = 127,
+ [1][0][RTW89_CHILE][27] = 54,
+ [1][0][RTW89_UKRAINE][27] = 36,
+ [1][0][RTW89_MEXICO][27] = 62,
+ [1][0][RTW89_CN][27] = 127,
+ [1][0][RTW89_QATAR][27] = 36,
+ [1][0][RTW89_FCC][29] = 62,
+ [1][0][RTW89_ETSI][29] = 36,
+ [1][0][RTW89_MKK][29] = 58,
+ [1][0][RTW89_IC][29] = 127,
+ [1][0][RTW89_KCC][29] = 58,
+ [1][0][RTW89_ACMA][29] = 127,
+ [1][0][RTW89_CHILE][29] = 54,
+ [1][0][RTW89_UKRAINE][29] = 36,
+ [1][0][RTW89_MEXICO][29] = 62,
+ [1][0][RTW89_CN][29] = 127,
+ [1][0][RTW89_QATAR][29] = 36,
+ [1][0][RTW89_FCC][31] = 62,
+ [1][0][RTW89_ETSI][31] = 36,
+ [1][0][RTW89_MKK][31] = 58,
+ [1][0][RTW89_IC][31] = 62,
+ [1][0][RTW89_KCC][31] = 58,
+ [1][0][RTW89_ACMA][31] = 36,
+ [1][0][RTW89_CHILE][31] = 54,
+ [1][0][RTW89_UKRAINE][31] = 36,
+ [1][0][RTW89_MEXICO][31] = 62,
+ [1][0][RTW89_CN][31] = 127,
+ [1][0][RTW89_QATAR][31] = 36,
+ [1][0][RTW89_FCC][33] = 62,
+ [1][0][RTW89_ETSI][33] = 36,
+ [1][0][RTW89_MKK][33] = 58,
+ [1][0][RTW89_IC][33] = 62,
+ [1][0][RTW89_KCC][33] = 58,
+ [1][0][RTW89_ACMA][33] = 36,
+ [1][0][RTW89_CHILE][33] = 54,
+ [1][0][RTW89_UKRAINE][33] = 36,
+ [1][0][RTW89_MEXICO][33] = 62,
+ [1][0][RTW89_CN][33] = 127,
+ [1][0][RTW89_QATAR][33] = 36,
+ [1][0][RTW89_FCC][35] = 62,
+ [1][0][RTW89_ETSI][35] = 36,
+ [1][0][RTW89_MKK][35] = 58,
+ [1][0][RTW89_IC][35] = 62,
+ [1][0][RTW89_KCC][35] = 58,
+ [1][0][RTW89_ACMA][35] = 36,
+ [1][0][RTW89_CHILE][35] = 54,
+ [1][0][RTW89_UKRAINE][35] = 36,
+ [1][0][RTW89_MEXICO][35] = 62,
+ [1][0][RTW89_CN][35] = 127,
+ [1][0][RTW89_QATAR][35] = 36,
+ [1][0][RTW89_FCC][37] = 62,
+ [1][0][RTW89_ETSI][37] = 127,
+ [1][0][RTW89_MKK][37] = 58,
+ [1][0][RTW89_IC][37] = 62,
+ [1][0][RTW89_KCC][37] = 58,
+ [1][0][RTW89_ACMA][37] = 62,
+ [1][0][RTW89_CHILE][37] = 54,
+ [1][0][RTW89_UKRAINE][37] = 127,
+ [1][0][RTW89_MEXICO][37] = 62,
+ [1][0][RTW89_CN][37] = 127,
+ [1][0][RTW89_QATAR][37] = 127,
+ [1][0][RTW89_FCC][38] = 76,
+ [1][0][RTW89_ETSI][38] = 28,
+ [1][0][RTW89_MKK][38] = 127,
+ [1][0][RTW89_IC][38] = 76,
+ [1][0][RTW89_KCC][38] = 58,
+ [1][0][RTW89_ACMA][38] = 76,
+ [1][0][RTW89_CHILE][38] = 54,
+ [1][0][RTW89_UKRAINE][38] = 28,
+ [1][0][RTW89_MEXICO][38] = 76,
+ [1][0][RTW89_CN][38] = 74,
+ [1][0][RTW89_QATAR][38] = 28,
+ [1][0][RTW89_FCC][40] = 76,
+ [1][0][RTW89_ETSI][40] = 28,
+ [1][0][RTW89_MKK][40] = 127,
+ [1][0][RTW89_IC][40] = 76,
+ [1][0][RTW89_KCC][40] = 58,
+ [1][0][RTW89_ACMA][40] = 76,
+ [1][0][RTW89_CHILE][40] = 54,
+ [1][0][RTW89_UKRAINE][40] = 28,
+ [1][0][RTW89_MEXICO][40] = 76,
+ [1][0][RTW89_CN][40] = 74,
+ [1][0][RTW89_QATAR][40] = 28,
+ [1][0][RTW89_FCC][42] = 76,
+ [1][0][RTW89_ETSI][42] = 28,
+ [1][0][RTW89_MKK][42] = 127,
+ [1][0][RTW89_IC][42] = 76,
+ [1][0][RTW89_KCC][42] = 58,
+ [1][0][RTW89_ACMA][42] = 76,
+ [1][0][RTW89_CHILE][42] = 54,
+ [1][0][RTW89_UKRAINE][42] = 28,
+ [1][0][RTW89_MEXICO][42] = 76,
+ [1][0][RTW89_CN][42] = 74,
+ [1][0][RTW89_QATAR][42] = 28,
+ [1][0][RTW89_FCC][44] = 76,
+ [1][0][RTW89_ETSI][44] = 28,
+ [1][0][RTW89_MKK][44] = 127,
+ [1][0][RTW89_IC][44] = 76,
+ [1][0][RTW89_KCC][44] = 58,
+ [1][0][RTW89_ACMA][44] = 76,
+ [1][0][RTW89_CHILE][44] = 54,
+ [1][0][RTW89_UKRAINE][44] = 28,
+ [1][0][RTW89_MEXICO][44] = 76,
+ [1][0][RTW89_CN][44] = 74,
+ [1][0][RTW89_QATAR][44] = 28,
+ [1][0][RTW89_FCC][46] = 76,
+ [1][0][RTW89_ETSI][46] = 28,
+ [1][0][RTW89_MKK][46] = 127,
+ [1][0][RTW89_IC][46] = 76,
+ [1][0][RTW89_KCC][46] = 58,
+ [1][0][RTW89_ACMA][46] = 76,
+ [1][0][RTW89_CHILE][46] = 54,
+ [1][0][RTW89_UKRAINE][46] = 28,
+ [1][0][RTW89_MEXICO][46] = 76,
+ [1][0][RTW89_CN][46] = 74,
+ [1][0][RTW89_QATAR][46] = 28,
+ [1][1][RTW89_FCC][0] = 46,
+ [1][1][RTW89_ETSI][0] = 22,
+ [1][1][RTW89_MKK][0] = 24,
+ [1][1][RTW89_IC][0] = 18,
+ [1][1][RTW89_KCC][0] = 44,
+ [1][1][RTW89_ACMA][0] = 22,
+ [1][1][RTW89_CHILE][0] = 18,
+ [1][1][RTW89_UKRAINE][0] = 22,
+ [1][1][RTW89_MEXICO][0] = 46,
+ [1][1][RTW89_CN][0] = 22,
+ [1][1][RTW89_QATAR][0] = 22,
+ [1][1][RTW89_FCC][2] = 46,
+ [1][1][RTW89_ETSI][2] = 22,
+ [1][1][RTW89_MKK][2] = 24,
+ [1][1][RTW89_IC][2] = 18,
+ [1][1][RTW89_KCC][2] = 44,
+ [1][1][RTW89_ACMA][2] = 22,
+ [1][1][RTW89_CHILE][2] = 18,
+ [1][1][RTW89_UKRAINE][2] = 22,
+ [1][1][RTW89_MEXICO][2] = 46,
+ [1][1][RTW89_CN][2] = 22,
+ [1][1][RTW89_QATAR][2] = 22,
+ [1][1][RTW89_FCC][4] = 46,
+ [1][1][RTW89_ETSI][4] = 22,
+ [1][1][RTW89_MKK][4] = 24,
+ [1][1][RTW89_IC][4] = 18,
+ [1][1][RTW89_KCC][4] = 44,
+ [1][1][RTW89_ACMA][4] = 22,
+ [1][1][RTW89_CHILE][4] = 18,
+ [1][1][RTW89_UKRAINE][4] = 22,
+ [1][1][RTW89_MEXICO][4] = 46,
+ [1][1][RTW89_CN][4] = 22,
+ [1][1][RTW89_QATAR][4] = 22,
+ [1][1][RTW89_FCC][6] = 46,
+ [1][1][RTW89_ETSI][6] = 22,
+ [1][1][RTW89_MKK][6] = 24,
+ [1][1][RTW89_IC][6] = 18,
+ [1][1][RTW89_KCC][6] = 16,
+ [1][1][RTW89_ACMA][6] = 22,
+ [1][1][RTW89_CHILE][6] = 18,
+ [1][1][RTW89_UKRAINE][6] = 22,
+ [1][1][RTW89_MEXICO][6] = 46,
+ [1][1][RTW89_CN][6] = 22,
+ [1][1][RTW89_QATAR][6] = 22,
+ [1][1][RTW89_FCC][8] = 46,
+ [1][1][RTW89_ETSI][8] = 22,
+ [1][1][RTW89_MKK][8] = 24,
+ [1][1][RTW89_IC][8] = 46,
+ [1][1][RTW89_KCC][8] = 44,
+ [1][1][RTW89_ACMA][8] = 22,
+ [1][1][RTW89_CHILE][8] = 42,
+ [1][1][RTW89_UKRAINE][8] = 22,
+ [1][1][RTW89_MEXICO][8] = 46,
+ [1][1][RTW89_CN][8] = 22,
+ [1][1][RTW89_QATAR][8] = 22,
+ [1][1][RTW89_FCC][10] = 46,
+ [1][1][RTW89_ETSI][10] = 22,
+ [1][1][RTW89_MKK][10] = 24,
+ [1][1][RTW89_IC][10] = 46,
+ [1][1][RTW89_KCC][10] = 44,
+ [1][1][RTW89_ACMA][10] = 22,
+ [1][1][RTW89_CHILE][10] = 42,
+ [1][1][RTW89_UKRAINE][10] = 22,
+ [1][1][RTW89_MEXICO][10] = 46,
+ [1][1][RTW89_CN][10] = 22,
+ [1][1][RTW89_QATAR][10] = 22,
+ [1][1][RTW89_FCC][12] = 46,
+ [1][1][RTW89_ETSI][12] = 22,
+ [1][1][RTW89_MKK][12] = 24,
+ [1][1][RTW89_IC][12] = 46,
+ [1][1][RTW89_KCC][12] = 44,
+ [1][1][RTW89_ACMA][12] = 22,
+ [1][1][RTW89_CHILE][12] = 42,
+ [1][1][RTW89_UKRAINE][12] = 22,
+ [1][1][RTW89_MEXICO][12] = 46,
+ [1][1][RTW89_CN][12] = 22,
+ [1][1][RTW89_QATAR][12] = 22,
+ [1][1][RTW89_FCC][14] = 46,
+ [1][1][RTW89_ETSI][14] = 22,
+ [1][1][RTW89_MKK][14] = 24,
+ [1][1][RTW89_IC][14] = 46,
+ [1][1][RTW89_KCC][14] = 44,
+ [1][1][RTW89_ACMA][14] = 22,
+ [1][1][RTW89_CHILE][14] = 42,
+ [1][1][RTW89_UKRAINE][14] = 22,
+ [1][1][RTW89_MEXICO][14] = 46,
+ [1][1][RTW89_CN][14] = 22,
+ [1][1][RTW89_QATAR][14] = 22,
+ [1][1][RTW89_FCC][15] = 46,
+ [1][1][RTW89_ETSI][15] = 22,
+ [1][1][RTW89_MKK][15] = 46,
+ [1][1][RTW89_IC][15] = 46,
+ [1][1][RTW89_KCC][15] = 44,
+ [1][1][RTW89_ACMA][15] = 22,
+ [1][1][RTW89_CHILE][15] = 42,
+ [1][1][RTW89_UKRAINE][15] = 22,
+ [1][1][RTW89_MEXICO][15] = 46,
+ [1][1][RTW89_CN][15] = 127,
+ [1][1][RTW89_QATAR][15] = 22,
+ [1][1][RTW89_FCC][17] = 46,
+ [1][1][RTW89_ETSI][17] = 22,
+ [1][1][RTW89_MKK][17] = 46,
+ [1][1][RTW89_IC][17] = 46,
+ [1][1][RTW89_KCC][17] = 44,
+ [1][1][RTW89_ACMA][17] = 22,
+ [1][1][RTW89_CHILE][17] = 42,
+ [1][1][RTW89_UKRAINE][17] = 22,
+ [1][1][RTW89_MEXICO][17] = 46,
+ [1][1][RTW89_CN][17] = 127,
+ [1][1][RTW89_QATAR][17] = 22,
+ [1][1][RTW89_FCC][19] = 46,
+ [1][1][RTW89_ETSI][19] = 22,
+ [1][1][RTW89_MKK][19] = 46,
+ [1][1][RTW89_IC][19] = 46,
+ [1][1][RTW89_KCC][19] = 44,
+ [1][1][RTW89_ACMA][19] = 22,
+ [1][1][RTW89_CHILE][19] = 42,
+ [1][1][RTW89_UKRAINE][19] = 22,
+ [1][1][RTW89_MEXICO][19] = 46,
+ [1][1][RTW89_CN][19] = 127,
+ [1][1][RTW89_QATAR][19] = 22,
+ [1][1][RTW89_FCC][21] = 46,
+ [1][1][RTW89_ETSI][21] = 22,
+ [1][1][RTW89_MKK][21] = 46,
+ [1][1][RTW89_IC][21] = 46,
+ [1][1][RTW89_KCC][21] = 44,
+ [1][1][RTW89_ACMA][21] = 22,
+ [1][1][RTW89_CHILE][21] = 42,
+ [1][1][RTW89_UKRAINE][21] = 22,
+ [1][1][RTW89_MEXICO][21] = 46,
+ [1][1][RTW89_CN][21] = 127,
+ [1][1][RTW89_QATAR][21] = 22,
+ [1][1][RTW89_FCC][23] = 46,
+ [1][1][RTW89_ETSI][23] = 22,
+ [1][1][RTW89_MKK][23] = 46,
+ [1][1][RTW89_IC][23] = 46,
+ [1][1][RTW89_KCC][23] = 44,
+ [1][1][RTW89_ACMA][23] = 22,
+ [1][1][RTW89_CHILE][23] = 42,
+ [1][1][RTW89_UKRAINE][23] = 22,
+ [1][1][RTW89_MEXICO][23] = 46,
+ [1][1][RTW89_CN][23] = 127,
+ [1][1][RTW89_QATAR][23] = 22,
+ [1][1][RTW89_FCC][25] = 46,
+ [1][1][RTW89_ETSI][25] = 22,
+ [1][1][RTW89_MKK][25] = 46,
+ [1][1][RTW89_IC][25] = 127,
+ [1][1][RTW89_KCC][25] = 44,
+ [1][1][RTW89_ACMA][25] = 127,
+ [1][1][RTW89_CHILE][25] = 42,
+ [1][1][RTW89_UKRAINE][25] = 22,
+ [1][1][RTW89_MEXICO][25] = 46,
+ [1][1][RTW89_CN][25] = 127,
+ [1][1][RTW89_QATAR][25] = 22,
+ [1][1][RTW89_FCC][27] = 46,
+ [1][1][RTW89_ETSI][27] = 22,
+ [1][1][RTW89_MKK][27] = 46,
+ [1][1][RTW89_IC][27] = 127,
+ [1][1][RTW89_KCC][27] = 44,
+ [1][1][RTW89_ACMA][27] = 127,
+ [1][1][RTW89_CHILE][27] = 42,
+ [1][1][RTW89_UKRAINE][27] = 22,
+ [1][1][RTW89_MEXICO][27] = 46,
+ [1][1][RTW89_CN][27] = 127,
+ [1][1][RTW89_QATAR][27] = 22,
+ [1][1][RTW89_FCC][29] = 46,
+ [1][1][RTW89_ETSI][29] = 22,
+ [1][1][RTW89_MKK][29] = 46,
+ [1][1][RTW89_IC][29] = 127,
+ [1][1][RTW89_KCC][29] = 44,
+ [1][1][RTW89_ACMA][29] = 127,
+ [1][1][RTW89_CHILE][29] = 42,
+ [1][1][RTW89_UKRAINE][29] = 22,
+ [1][1][RTW89_MEXICO][29] = 46,
+ [1][1][RTW89_CN][29] = 127,
+ [1][1][RTW89_QATAR][29] = 22,
+ [1][1][RTW89_FCC][31] = 46,
+ [1][1][RTW89_ETSI][31] = 22,
+ [1][1][RTW89_MKK][31] = 46,
+ [1][1][RTW89_IC][31] = 46,
+ [1][1][RTW89_KCC][31] = 44,
+ [1][1][RTW89_ACMA][31] = 22,
+ [1][1][RTW89_CHILE][31] = 42,
+ [1][1][RTW89_UKRAINE][31] = 22,
+ [1][1][RTW89_MEXICO][31] = 46,
+ [1][1][RTW89_CN][31] = 127,
+ [1][1][RTW89_QATAR][31] = 22,
+ [1][1][RTW89_FCC][33] = 46,
+ [1][1][RTW89_ETSI][33] = 22,
+ [1][1][RTW89_MKK][33] = 46,
+ [1][1][RTW89_IC][33] = 46,
+ [1][1][RTW89_KCC][33] = 44,
+ [1][1][RTW89_ACMA][33] = 22,
+ [1][1][RTW89_CHILE][33] = 42,
+ [1][1][RTW89_UKRAINE][33] = 22,
+ [1][1][RTW89_MEXICO][33] = 46,
+ [1][1][RTW89_CN][33] = 127,
+ [1][1][RTW89_QATAR][33] = 22,
+ [1][1][RTW89_FCC][35] = 46,
+ [1][1][RTW89_ETSI][35] = 22,
+ [1][1][RTW89_MKK][35] = 46,
+ [1][1][RTW89_IC][35] = 46,
+ [1][1][RTW89_KCC][35] = 44,
+ [1][1][RTW89_ACMA][35] = 22,
+ [1][1][RTW89_CHILE][35] = 42,
+ [1][1][RTW89_UKRAINE][35] = 22,
+ [1][1][RTW89_MEXICO][35] = 46,
+ [1][1][RTW89_CN][35] = 127,
+ [1][1][RTW89_QATAR][35] = 22,
+ [1][1][RTW89_FCC][37] = 46,
+ [1][1][RTW89_ETSI][37] = 127,
+ [1][1][RTW89_MKK][37] = 46,
+ [1][1][RTW89_IC][37] = 46,
+ [1][1][RTW89_KCC][37] = 44,
+ [1][1][RTW89_ACMA][37] = 50,
+ [1][1][RTW89_CHILE][37] = 42,
+ [1][1][RTW89_UKRAINE][37] = 127,
+ [1][1][RTW89_MEXICO][37] = 46,
+ [1][1][RTW89_CN][37] = 127,
+ [1][1][RTW89_QATAR][37] = 127,
+ [1][1][RTW89_FCC][38] = 74,
+ [1][1][RTW89_ETSI][38] = 16,
+ [1][1][RTW89_MKK][38] = 127,
+ [1][1][RTW89_IC][38] = 74,
+ [1][1][RTW89_KCC][38] = 44,
+ [1][1][RTW89_ACMA][38] = 76,
+ [1][1][RTW89_CHILE][38] = 42,
+ [1][1][RTW89_UKRAINE][38] = 16,
+ [1][1][RTW89_MEXICO][38] = 74,
+ [1][1][RTW89_CN][38] = 62,
+ [1][1][RTW89_QATAR][38] = 16,
+ [1][1][RTW89_FCC][40] = 76,
+ [1][1][RTW89_ETSI][40] = 16,
+ [1][1][RTW89_MKK][40] = 127,
+ [1][1][RTW89_IC][40] = 76,
+ [1][1][RTW89_KCC][40] = 44,
+ [1][1][RTW89_ACMA][40] = 76,
+ [1][1][RTW89_CHILE][40] = 42,
+ [1][1][RTW89_UKRAINE][40] = 16,
+ [1][1][RTW89_MEXICO][40] = 76,
+ [1][1][RTW89_CN][40] = 62,
+ [1][1][RTW89_QATAR][40] = 16,
+ [1][1][RTW89_FCC][42] = 76,
+ [1][1][RTW89_ETSI][42] = 16,
+ [1][1][RTW89_MKK][42] = 127,
+ [1][1][RTW89_IC][42] = 76,
+ [1][1][RTW89_KCC][42] = 44,
+ [1][1][RTW89_ACMA][42] = 76,
+ [1][1][RTW89_CHILE][42] = 42,
+ [1][1][RTW89_UKRAINE][42] = 16,
+ [1][1][RTW89_MEXICO][42] = 76,
+ [1][1][RTW89_CN][42] = 62,
+ [1][1][RTW89_QATAR][42] = 16,
+ [1][1][RTW89_FCC][44] = 76,
+ [1][1][RTW89_ETSI][44] = 16,
+ [1][1][RTW89_MKK][44] = 127,
+ [1][1][RTW89_IC][44] = 76,
+ [1][1][RTW89_KCC][44] = 44,
+ [1][1][RTW89_ACMA][44] = 76,
+ [1][1][RTW89_CHILE][44] = 42,
+ [1][1][RTW89_UKRAINE][44] = 16,
+ [1][1][RTW89_MEXICO][44] = 76,
+ [1][1][RTW89_CN][44] = 62,
+ [1][1][RTW89_QATAR][44] = 16,
+ [1][1][RTW89_FCC][46] = 76,
+ [1][1][RTW89_ETSI][46] = 16,
+ [1][1][RTW89_MKK][46] = 127,
+ [1][1][RTW89_IC][46] = 76,
+ [1][1][RTW89_KCC][46] = 44,
+ [1][1][RTW89_ACMA][46] = 76,
+ [1][1][RTW89_CHILE][46] = 42,
+ [1][1][RTW89_UKRAINE][46] = 16,
+ [1][1][RTW89_MEXICO][46] = 76,
+ [1][1][RTW89_CN][46] = 62,
+ [1][1][RTW89_QATAR][46] = 16,
+ [2][0][RTW89_FCC][0] = 74,
+ [2][0][RTW89_ETSI][0] = 46,
+ [2][0][RTW89_MKK][0] = 50,
+ [2][0][RTW89_IC][0] = 46,
+ [2][0][RTW89_KCC][0] = 70,
+ [2][0][RTW89_ACMA][0] = 46,
+ [2][0][RTW89_CHILE][0] = 30,
+ [2][0][RTW89_UKRAINE][0] = 46,
+ [2][0][RTW89_MEXICO][0] = 62,
+ [2][0][RTW89_CN][0] = 46,
+ [2][0][RTW89_QATAR][0] = 46,
+ [2][0][RTW89_FCC][2] = 74,
+ [2][0][RTW89_ETSI][2] = 46,
+ [2][0][RTW89_MKK][2] = 50,
+ [2][0][RTW89_IC][2] = 46,
+ [2][0][RTW89_KCC][2] = 70,
+ [2][0][RTW89_ACMA][2] = 46,
+ [2][0][RTW89_CHILE][2] = 30,
+ [2][0][RTW89_UKRAINE][2] = 46,
+ [2][0][RTW89_MEXICO][2] = 62,
+ [2][0][RTW89_CN][2] = 46,
+ [2][0][RTW89_QATAR][2] = 46,
+ [2][0][RTW89_FCC][4] = 74,
+ [2][0][RTW89_ETSI][4] = 46,
+ [2][0][RTW89_MKK][4] = 50,
+ [2][0][RTW89_IC][4] = 46,
+ [2][0][RTW89_KCC][4] = 70,
+ [2][0][RTW89_ACMA][4] = 46,
+ [2][0][RTW89_CHILE][4] = 30,
+ [2][0][RTW89_UKRAINE][4] = 46,
+ [2][0][RTW89_MEXICO][4] = 62,
+ [2][0][RTW89_CN][4] = 46,
+ [2][0][RTW89_QATAR][4] = 46,
+ [2][0][RTW89_FCC][6] = 74,
+ [2][0][RTW89_ETSI][6] = 46,
+ [2][0][RTW89_MKK][6] = 50,
+ [2][0][RTW89_IC][6] = 46,
+ [2][0][RTW89_KCC][6] = 44,
+ [2][0][RTW89_ACMA][6] = 46,
+ [2][0][RTW89_CHILE][6] = 30,
+ [2][0][RTW89_UKRAINE][6] = 46,
+ [2][0][RTW89_MEXICO][6] = 62,
+ [2][0][RTW89_CN][6] = 46,
+ [2][0][RTW89_QATAR][6] = 46,
+ [2][0][RTW89_FCC][8] = 74,
+ [2][0][RTW89_ETSI][8] = 46,
+ [2][0][RTW89_MKK][8] = 50,
+ [2][0][RTW89_IC][8] = 66,
+ [2][0][RTW89_KCC][8] = 70,
+ [2][0][RTW89_ACMA][8] = 46,
+ [2][0][RTW89_CHILE][8] = 54,
+ [2][0][RTW89_UKRAINE][8] = 46,
+ [2][0][RTW89_MEXICO][8] = 74,
+ [2][0][RTW89_CN][8] = 46,
+ [2][0][RTW89_QATAR][8] = 46,
+ [2][0][RTW89_FCC][10] = 74,
+ [2][0][RTW89_ETSI][10] = 46,
+ [2][0][RTW89_MKK][10] = 50,
+ [2][0][RTW89_IC][10] = 66,
+ [2][0][RTW89_KCC][10] = 70,
+ [2][0][RTW89_ACMA][10] = 46,
+ [2][0][RTW89_CHILE][10] = 54,
+ [2][0][RTW89_UKRAINE][10] = 46,
+ [2][0][RTW89_MEXICO][10] = 74,
+ [2][0][RTW89_CN][10] = 46,
+ [2][0][RTW89_QATAR][10] = 46,
+ [2][0][RTW89_FCC][12] = 74,
+ [2][0][RTW89_ETSI][12] = 46,
+ [2][0][RTW89_MKK][12] = 50,
+ [2][0][RTW89_IC][12] = 66,
+ [2][0][RTW89_KCC][12] = 70,
+ [2][0][RTW89_ACMA][12] = 46,
+ [2][0][RTW89_CHILE][12] = 54,
+ [2][0][RTW89_UKRAINE][12] = 46,
+ [2][0][RTW89_MEXICO][12] = 74,
+ [2][0][RTW89_CN][12] = 46,
+ [2][0][RTW89_QATAR][12] = 46,
+ [2][0][RTW89_FCC][14] = 74,
+ [2][0][RTW89_ETSI][14] = 46,
+ [2][0][RTW89_MKK][14] = 50,
+ [2][0][RTW89_IC][14] = 66,
+ [2][0][RTW89_KCC][14] = 70,
+ [2][0][RTW89_ACMA][14] = 46,
+ [2][0][RTW89_CHILE][14] = 54,
+ [2][0][RTW89_UKRAINE][14] = 46,
+ [2][0][RTW89_MEXICO][14] = 74,
+ [2][0][RTW89_CN][14] = 46,
+ [2][0][RTW89_QATAR][14] = 46,
+ [2][0][RTW89_FCC][15] = 74,
+ [2][0][RTW89_ETSI][15] = 46,
+ [2][0][RTW89_MKK][15] = 70,
+ [2][0][RTW89_IC][15] = 74,
+ [2][0][RTW89_KCC][15] = 70,
+ [2][0][RTW89_ACMA][15] = 46,
+ [2][0][RTW89_CHILE][15] = 54,
+ [2][0][RTW89_UKRAINE][15] = 46,
+ [2][0][RTW89_MEXICO][15] = 74,
+ [2][0][RTW89_CN][15] = 127,
+ [2][0][RTW89_QATAR][15] = 46,
+ [2][0][RTW89_FCC][17] = 74,
+ [2][0][RTW89_ETSI][17] = 46,
+ [2][0][RTW89_MKK][17] = 70,
+ [2][0][RTW89_IC][17] = 74,
+ [2][0][RTW89_KCC][17] = 70,
+ [2][0][RTW89_ACMA][17] = 46,
+ [2][0][RTW89_CHILE][17] = 54,
+ [2][0][RTW89_UKRAINE][17] = 46,
+ [2][0][RTW89_MEXICO][17] = 74,
+ [2][0][RTW89_CN][17] = 127,
+ [2][0][RTW89_QATAR][17] = 46,
+ [2][0][RTW89_FCC][19] = 74,
+ [2][0][RTW89_ETSI][19] = 46,
+ [2][0][RTW89_MKK][19] = 70,
+ [2][0][RTW89_IC][19] = 74,
+ [2][0][RTW89_KCC][19] = 70,
+ [2][0][RTW89_ACMA][19] = 46,
+ [2][0][RTW89_CHILE][19] = 54,
+ [2][0][RTW89_UKRAINE][19] = 46,
+ [2][0][RTW89_MEXICO][19] = 74,
+ [2][0][RTW89_CN][19] = 127,
+ [2][0][RTW89_QATAR][19] = 46,
+ [2][0][RTW89_FCC][21] = 74,
+ [2][0][RTW89_ETSI][21] = 46,
+ [2][0][RTW89_MKK][21] = 70,
+ [2][0][RTW89_IC][21] = 74,
+ [2][0][RTW89_KCC][21] = 70,
+ [2][0][RTW89_ACMA][21] = 46,
+ [2][0][RTW89_CHILE][21] = 54,
+ [2][0][RTW89_UKRAINE][21] = 46,
+ [2][0][RTW89_MEXICO][21] = 74,
+ [2][0][RTW89_CN][21] = 127,
+ [2][0][RTW89_QATAR][21] = 46,
+ [2][0][RTW89_FCC][23] = 74,
+ [2][0][RTW89_ETSI][23] = 46,
+ [2][0][RTW89_MKK][23] = 70,
+ [2][0][RTW89_IC][23] = 74,
+ [2][0][RTW89_KCC][23] = 70,
+ [2][0][RTW89_ACMA][23] = 46,
+ [2][0][RTW89_CHILE][23] = 54,
+ [2][0][RTW89_UKRAINE][23] = 46,
+ [2][0][RTW89_MEXICO][23] = 74,
+ [2][0][RTW89_CN][23] = 127,
+ [2][0][RTW89_QATAR][23] = 46,
+ [2][0][RTW89_FCC][25] = 74,
+ [2][0][RTW89_ETSI][25] = 46,
+ [2][0][RTW89_MKK][25] = 70,
+ [2][0][RTW89_IC][25] = 127,
+ [2][0][RTW89_KCC][25] = 70,
+ [2][0][RTW89_ACMA][25] = 127,
+ [2][0][RTW89_CHILE][25] = 54,
+ [2][0][RTW89_UKRAINE][25] = 46,
+ [2][0][RTW89_MEXICO][25] = 74,
+ [2][0][RTW89_CN][25] = 127,
+ [2][0][RTW89_QATAR][25] = 46,
+ [2][0][RTW89_FCC][27] = 74,
+ [2][0][RTW89_ETSI][27] = 46,
+ [2][0][RTW89_MKK][27] = 70,
+ [2][0][RTW89_IC][27] = 127,
+ [2][0][RTW89_KCC][27] = 70,
+ [2][0][RTW89_ACMA][27] = 127,
+ [2][0][RTW89_CHILE][27] = 54,
+ [2][0][RTW89_UKRAINE][27] = 46,
+ [2][0][RTW89_MEXICO][27] = 74,
+ [2][0][RTW89_CN][27] = 127,
+ [2][0][RTW89_QATAR][27] = 46,
+ [2][0][RTW89_FCC][29] = 74,
+ [2][0][RTW89_ETSI][29] = 46,
+ [2][0][RTW89_MKK][29] = 70,
+ [2][0][RTW89_IC][29] = 127,
+ [2][0][RTW89_KCC][29] = 70,
+ [2][0][RTW89_ACMA][29] = 127,
+ [2][0][RTW89_CHILE][29] = 54,
+ [2][0][RTW89_UKRAINE][29] = 46,
+ [2][0][RTW89_MEXICO][29] = 74,
+ [2][0][RTW89_CN][29] = 127,
+ [2][0][RTW89_QATAR][29] = 46,
+ [2][0][RTW89_FCC][31] = 74,
+ [2][0][RTW89_ETSI][31] = 46,
+ [2][0][RTW89_MKK][31] = 70,
+ [2][0][RTW89_IC][31] = 74,
+ [2][0][RTW89_KCC][31] = 70,
+ [2][0][RTW89_ACMA][31] = 46,
+ [2][0][RTW89_CHILE][31] = 54,
+ [2][0][RTW89_UKRAINE][31] = 46,
+ [2][0][RTW89_MEXICO][31] = 74,
+ [2][0][RTW89_CN][31] = 127,
+ [2][0][RTW89_QATAR][31] = 46,
+ [2][0][RTW89_FCC][33] = 74,
+ [2][0][RTW89_ETSI][33] = 46,
+ [2][0][RTW89_MKK][33] = 70,
+ [2][0][RTW89_IC][33] = 74,
+ [2][0][RTW89_KCC][33] = 70,
+ [2][0][RTW89_ACMA][33] = 46,
+ [2][0][RTW89_CHILE][33] = 54,
+ [2][0][RTW89_UKRAINE][33] = 46,
+ [2][0][RTW89_MEXICO][33] = 74,
+ [2][0][RTW89_CN][33] = 127,
+ [2][0][RTW89_QATAR][33] = 46,
+ [2][0][RTW89_FCC][35] = 74,
+ [2][0][RTW89_ETSI][35] = 46,
+ [2][0][RTW89_MKK][35] = 70,
+ [2][0][RTW89_IC][35] = 74,
+ [2][0][RTW89_KCC][35] = 70,
+ [2][0][RTW89_ACMA][35] = 46,
+ [2][0][RTW89_CHILE][35] = 54,
+ [2][0][RTW89_UKRAINE][35] = 46,
+ [2][0][RTW89_MEXICO][35] = 74,
+ [2][0][RTW89_CN][35] = 127,
+ [2][0][RTW89_QATAR][35] = 46,
+ [2][0][RTW89_FCC][37] = 74,
+ [2][0][RTW89_ETSI][37] = 127,
+ [2][0][RTW89_MKK][37] = 70,
+ [2][0][RTW89_IC][37] = 74,
+ [2][0][RTW89_KCC][37] = 70,
+ [2][0][RTW89_ACMA][37] = 74,
+ [2][0][RTW89_CHILE][37] = 54,
+ [2][0][RTW89_UKRAINE][37] = 127,
+ [2][0][RTW89_MEXICO][37] = 74,
+ [2][0][RTW89_CN][37] = 127,
+ [2][0][RTW89_QATAR][37] = 127,
+ [2][0][RTW89_FCC][38] = 76,
+ [2][0][RTW89_ETSI][38] = 28,
+ [2][0][RTW89_MKK][38] = 127,
+ [2][0][RTW89_IC][38] = 76,
+ [2][0][RTW89_KCC][38] = 70,
+ [2][0][RTW89_ACMA][38] = 76,
+ [2][0][RTW89_CHILE][38] = 54,
+ [2][0][RTW89_UKRAINE][38] = 28,
+ [2][0][RTW89_MEXICO][38] = 76,
+ [2][0][RTW89_CN][38] = 76,
+ [2][0][RTW89_QATAR][38] = 28,
+ [2][0][RTW89_FCC][40] = 76,
+ [2][0][RTW89_ETSI][40] = 28,
+ [2][0][RTW89_MKK][40] = 127,
+ [2][0][RTW89_IC][40] = 76,
+ [2][0][RTW89_KCC][40] = 70,
+ [2][0][RTW89_ACMA][40] = 76,
+ [2][0][RTW89_CHILE][40] = 54,
+ [2][0][RTW89_UKRAINE][40] = 28,
+ [2][0][RTW89_MEXICO][40] = 76,
+ [2][0][RTW89_CN][40] = 76,
+ [2][0][RTW89_QATAR][40] = 28,
+ [2][0][RTW89_FCC][42] = 76,
+ [2][0][RTW89_ETSI][42] = 28,
+ [2][0][RTW89_MKK][42] = 127,
+ [2][0][RTW89_IC][42] = 76,
+ [2][0][RTW89_KCC][42] = 70,
+ [2][0][RTW89_ACMA][42] = 76,
+ [2][0][RTW89_CHILE][42] = 54,
+ [2][0][RTW89_UKRAINE][42] = 28,
+ [2][0][RTW89_MEXICO][42] = 76,
+ [2][0][RTW89_CN][42] = 76,
+ [2][0][RTW89_QATAR][42] = 28,
+ [2][0][RTW89_FCC][44] = 76,
+ [2][0][RTW89_ETSI][44] = 28,
+ [2][0][RTW89_MKK][44] = 127,
+ [2][0][RTW89_IC][44] = 76,
+ [2][0][RTW89_KCC][44] = 70,
+ [2][0][RTW89_ACMA][44] = 76,
+ [2][0][RTW89_CHILE][44] = 54,
+ [2][0][RTW89_UKRAINE][44] = 28,
+ [2][0][RTW89_MEXICO][44] = 76,
+ [2][0][RTW89_CN][44] = 76,
+ [2][0][RTW89_QATAR][44] = 28,
+ [2][0][RTW89_FCC][46] = 76,
+ [2][0][RTW89_ETSI][46] = 28,
+ [2][0][RTW89_MKK][46] = 127,
+ [2][0][RTW89_IC][46] = 76,
+ [2][0][RTW89_KCC][46] = 70,
+ [2][0][RTW89_ACMA][46] = 76,
+ [2][0][RTW89_CHILE][46] = 54,
+ [2][0][RTW89_UKRAINE][46] = 28,
+ [2][0][RTW89_MEXICO][46] = 76,
+ [2][0][RTW89_CN][46] = 76,
+ [2][0][RTW89_QATAR][46] = 28,
+ [2][1][RTW89_FCC][0] = 58,
+ [2][1][RTW89_ETSI][0] = 32,
+ [2][1][RTW89_MKK][0] = 38,
+ [2][1][RTW89_IC][0] = 30,
+ [2][1][RTW89_KCC][0] = 54,
+ [2][1][RTW89_ACMA][0] = 32,
+ [2][1][RTW89_CHILE][0] = 18,
+ [2][1][RTW89_UKRAINE][0] = 32,
+ [2][1][RTW89_MEXICO][0] = 50,
+ [2][1][RTW89_CN][0] = 32,
+ [2][1][RTW89_QATAR][0] = 32,
+ [2][1][RTW89_FCC][2] = 58,
+ [2][1][RTW89_ETSI][2] = 32,
+ [2][1][RTW89_MKK][2] = 38,
+ [2][1][RTW89_IC][2] = 30,
+ [2][1][RTW89_KCC][2] = 54,
+ [2][1][RTW89_ACMA][2] = 32,
+ [2][1][RTW89_CHILE][2] = 18,
+ [2][1][RTW89_UKRAINE][2] = 32,
+ [2][1][RTW89_MEXICO][2] = 50,
+ [2][1][RTW89_CN][2] = 32,
+ [2][1][RTW89_QATAR][2] = 32,
+ [2][1][RTW89_FCC][4] = 58,
+ [2][1][RTW89_ETSI][4] = 32,
+ [2][1][RTW89_MKK][4] = 38,
+ [2][1][RTW89_IC][4] = 30,
+ [2][1][RTW89_KCC][4] = 54,
+ [2][1][RTW89_ACMA][4] = 32,
+ [2][1][RTW89_CHILE][4] = 18,
+ [2][1][RTW89_UKRAINE][4] = 32,
+ [2][1][RTW89_MEXICO][4] = 50,
+ [2][1][RTW89_CN][4] = 32,
+ [2][1][RTW89_QATAR][4] = 32,
+ [2][1][RTW89_FCC][6] = 58,
+ [2][1][RTW89_ETSI][6] = 32,
+ [2][1][RTW89_MKK][6] = 38,
+ [2][1][RTW89_IC][6] = 30,
+ [2][1][RTW89_KCC][6] = 26,
+ [2][1][RTW89_ACMA][6] = 32,
+ [2][1][RTW89_CHILE][6] = 18,
+ [2][1][RTW89_UKRAINE][6] = 32,
+ [2][1][RTW89_MEXICO][6] = 50,
+ [2][1][RTW89_CN][6] = 32,
+ [2][1][RTW89_QATAR][6] = 32,
+ [2][1][RTW89_FCC][8] = 58,
+ [2][1][RTW89_ETSI][8] = 32,
+ [2][1][RTW89_MKK][8] = 38,
+ [2][1][RTW89_IC][8] = 52,
+ [2][1][RTW89_KCC][8] = 54,
+ [2][1][RTW89_ACMA][8] = 32,
+ [2][1][RTW89_CHILE][8] = 42,
+ [2][1][RTW89_UKRAINE][8] = 32,
+ [2][1][RTW89_MEXICO][8] = 58,
+ [2][1][RTW89_CN][8] = 32,
+ [2][1][RTW89_QATAR][8] = 32,
+ [2][1][RTW89_FCC][10] = 58,
+ [2][1][RTW89_ETSI][10] = 32,
+ [2][1][RTW89_MKK][10] = 38,
+ [2][1][RTW89_IC][10] = 52,
+ [2][1][RTW89_KCC][10] = 54,
+ [2][1][RTW89_ACMA][10] = 32,
+ [2][1][RTW89_CHILE][10] = 42,
+ [2][1][RTW89_UKRAINE][10] = 32,
+ [2][1][RTW89_MEXICO][10] = 58,
+ [2][1][RTW89_CN][10] = 32,
+ [2][1][RTW89_QATAR][10] = 32,
+ [2][1][RTW89_FCC][12] = 58,
+ [2][1][RTW89_ETSI][12] = 32,
+ [2][1][RTW89_MKK][12] = 38,
+ [2][1][RTW89_IC][12] = 52,
+ [2][1][RTW89_KCC][12] = 54,
+ [2][1][RTW89_ACMA][12] = 32,
+ [2][1][RTW89_CHILE][12] = 42,
+ [2][1][RTW89_UKRAINE][12] = 32,
+ [2][1][RTW89_MEXICO][12] = 58,
+ [2][1][RTW89_CN][12] = 32,
+ [2][1][RTW89_QATAR][12] = 32,
+ [2][1][RTW89_FCC][14] = 58,
+ [2][1][RTW89_ETSI][14] = 32,
+ [2][1][RTW89_MKK][14] = 38,
+ [2][1][RTW89_IC][14] = 52,
+ [2][1][RTW89_KCC][14] = 54,
+ [2][1][RTW89_ACMA][14] = 32,
+ [2][1][RTW89_CHILE][14] = 42,
+ [2][1][RTW89_UKRAINE][14] = 32,
+ [2][1][RTW89_MEXICO][14] = 58,
+ [2][1][RTW89_CN][14] = 32,
+ [2][1][RTW89_QATAR][14] = 32,
+ [2][1][RTW89_FCC][15] = 58,
+ [2][1][RTW89_ETSI][15] = 32,
+ [2][1][RTW89_MKK][15] = 58,
+ [2][1][RTW89_IC][15] = 58,
+ [2][1][RTW89_KCC][15] = 54,
+ [2][1][RTW89_ACMA][15] = 32,
+ [2][1][RTW89_CHILE][15] = 42,
+ [2][1][RTW89_UKRAINE][15] = 32,
+ [2][1][RTW89_MEXICO][15] = 58,
+ [2][1][RTW89_CN][15] = 127,
+ [2][1][RTW89_QATAR][15] = 32,
+ [2][1][RTW89_FCC][17] = 58,
+ [2][1][RTW89_ETSI][17] = 32,
+ [2][1][RTW89_MKK][17] = 58,
+ [2][1][RTW89_IC][17] = 58,
+ [2][1][RTW89_KCC][17] = 54,
+ [2][1][RTW89_ACMA][17] = 32,
+ [2][1][RTW89_CHILE][17] = 42,
+ [2][1][RTW89_UKRAINE][17] = 32,
+ [2][1][RTW89_MEXICO][17] = 58,
+ [2][1][RTW89_CN][17] = 127,
+ [2][1][RTW89_QATAR][17] = 32,
+ [2][1][RTW89_FCC][19] = 58,
+ [2][1][RTW89_ETSI][19] = 32,
+ [2][1][RTW89_MKK][19] = 58,
+ [2][1][RTW89_IC][19] = 58,
+ [2][1][RTW89_KCC][19] = 54,
+ [2][1][RTW89_ACMA][19] = 32,
+ [2][1][RTW89_CHILE][19] = 42,
+ [2][1][RTW89_UKRAINE][19] = 32,
+ [2][1][RTW89_MEXICO][19] = 58,
+ [2][1][RTW89_CN][19] = 127,
+ [2][1][RTW89_QATAR][19] = 32,
+ [2][1][RTW89_FCC][21] = 58,
+ [2][1][RTW89_ETSI][21] = 32,
+ [2][1][RTW89_MKK][21] = 58,
+ [2][1][RTW89_IC][21] = 58,
+ [2][1][RTW89_KCC][21] = 54,
+ [2][1][RTW89_ACMA][21] = 32,
+ [2][1][RTW89_CHILE][21] = 42,
+ [2][1][RTW89_UKRAINE][21] = 32,
+ [2][1][RTW89_MEXICO][21] = 58,
+ [2][1][RTW89_CN][21] = 127,
+ [2][1][RTW89_QATAR][21] = 32,
+ [2][1][RTW89_FCC][23] = 58,
+ [2][1][RTW89_ETSI][23] = 32,
+ [2][1][RTW89_MKK][23] = 58,
+ [2][1][RTW89_IC][23] = 58,
+ [2][1][RTW89_KCC][23] = 54,
+ [2][1][RTW89_ACMA][23] = 32,
+ [2][1][RTW89_CHILE][23] = 42,
+ [2][1][RTW89_UKRAINE][23] = 32,
+ [2][1][RTW89_MEXICO][23] = 58,
+ [2][1][RTW89_CN][23] = 127,
+ [2][1][RTW89_QATAR][23] = 32,
+ [2][1][RTW89_FCC][25] = 58,
+ [2][1][RTW89_ETSI][25] = 32,
+ [2][1][RTW89_MKK][25] = 58,
+ [2][1][RTW89_IC][25] = 127,
+ [2][1][RTW89_KCC][25] = 54,
+ [2][1][RTW89_ACMA][25] = 127,
+ [2][1][RTW89_CHILE][25] = 42,
+ [2][1][RTW89_UKRAINE][25] = 32,
+ [2][1][RTW89_MEXICO][25] = 58,
+ [2][1][RTW89_CN][25] = 127,
+ [2][1][RTW89_QATAR][25] = 32,
+ [2][1][RTW89_FCC][27] = 58,
+ [2][1][RTW89_ETSI][27] = 32,
+ [2][1][RTW89_MKK][27] = 58,
+ [2][1][RTW89_IC][27] = 127,
+ [2][1][RTW89_KCC][27] = 54,
+ [2][1][RTW89_ACMA][27] = 127,
+ [2][1][RTW89_CHILE][27] = 42,
+ [2][1][RTW89_UKRAINE][27] = 32,
+ [2][1][RTW89_MEXICO][27] = 58,
+ [2][1][RTW89_CN][27] = 127,
+ [2][1][RTW89_QATAR][27] = 32,
+ [2][1][RTW89_FCC][29] = 58,
+ [2][1][RTW89_ETSI][29] = 32,
+ [2][1][RTW89_MKK][29] = 58,
+ [2][1][RTW89_IC][29] = 127,
+ [2][1][RTW89_KCC][29] = 54,
+ [2][1][RTW89_ACMA][29] = 127,
+ [2][1][RTW89_CHILE][29] = 42,
+ [2][1][RTW89_UKRAINE][29] = 32,
+ [2][1][RTW89_MEXICO][29] = 58,
+ [2][1][RTW89_CN][29] = 127,
+ [2][1][RTW89_QATAR][29] = 32,
+ [2][1][RTW89_FCC][31] = 58,
+ [2][1][RTW89_ETSI][31] = 32,
+ [2][1][RTW89_MKK][31] = 58,
+ [2][1][RTW89_IC][31] = 58,
+ [2][1][RTW89_KCC][31] = 54,
+ [2][1][RTW89_ACMA][31] = 32,
+ [2][1][RTW89_CHILE][31] = 42,
+ [2][1][RTW89_UKRAINE][31] = 32,
+ [2][1][RTW89_MEXICO][31] = 58,
+ [2][1][RTW89_CN][31] = 127,
+ [2][1][RTW89_QATAR][31] = 32,
+ [2][1][RTW89_FCC][33] = 58,
+ [2][1][RTW89_ETSI][33] = 32,
+ [2][1][RTW89_MKK][33] = 58,
+ [2][1][RTW89_IC][33] = 58,
+ [2][1][RTW89_KCC][33] = 54,
+ [2][1][RTW89_ACMA][33] = 32,
+ [2][1][RTW89_CHILE][33] = 42,
+ [2][1][RTW89_UKRAINE][33] = 32,
+ [2][1][RTW89_MEXICO][33] = 58,
+ [2][1][RTW89_CN][33] = 127,
+ [2][1][RTW89_QATAR][33] = 32,
+ [2][1][RTW89_FCC][35] = 58,
+ [2][1][RTW89_ETSI][35] = 32,
+ [2][1][RTW89_MKK][35] = 58,
+ [2][1][RTW89_IC][35] = 58,
+ [2][1][RTW89_KCC][35] = 54,
+ [2][1][RTW89_ACMA][35] = 32,
+ [2][1][RTW89_CHILE][35] = 42,
+ [2][1][RTW89_UKRAINE][35] = 32,
+ [2][1][RTW89_MEXICO][35] = 58,
+ [2][1][RTW89_CN][35] = 127,
+ [2][1][RTW89_QATAR][35] = 32,
+ [2][1][RTW89_FCC][37] = 58,
+ [2][1][RTW89_ETSI][37] = 127,
+ [2][1][RTW89_MKK][37] = 58,
+ [2][1][RTW89_IC][37] = 58,
+ [2][1][RTW89_KCC][37] = 54,
+ [2][1][RTW89_ACMA][37] = 62,
+ [2][1][RTW89_CHILE][37] = 42,
+ [2][1][RTW89_UKRAINE][37] = 127,
+ [2][1][RTW89_MEXICO][37] = 58,
+ [2][1][RTW89_CN][37] = 127,
+ [2][1][RTW89_QATAR][37] = 127,
+ [2][1][RTW89_FCC][38] = 76,
+ [2][1][RTW89_ETSI][38] = 16,
+ [2][1][RTW89_MKK][38] = 127,
+ [2][1][RTW89_IC][38] = 76,
+ [2][1][RTW89_KCC][38] = 54,
+ [2][1][RTW89_ACMA][38] = 76,
+ [2][1][RTW89_CHILE][38] = 42,
+ [2][1][RTW89_UKRAINE][38] = 16,
+ [2][1][RTW89_MEXICO][38] = 76,
+ [2][1][RTW89_CN][38] = 64,
+ [2][1][RTW89_QATAR][38] = 16,
+ [2][1][RTW89_FCC][40] = 76,
+ [2][1][RTW89_ETSI][40] = 16,
+ [2][1][RTW89_MKK][40] = 127,
+ [2][1][RTW89_IC][40] = 76,
+ [2][1][RTW89_KCC][40] = 54,
+ [2][1][RTW89_ACMA][40] = 76,
+ [2][1][RTW89_CHILE][40] = 42,
+ [2][1][RTW89_UKRAINE][40] = 16,
+ [2][1][RTW89_MEXICO][40] = 76,
+ [2][1][RTW89_CN][40] = 64,
+ [2][1][RTW89_QATAR][40] = 16,
+ [2][1][RTW89_FCC][42] = 76,
+ [2][1][RTW89_ETSI][42] = 16,
+ [2][1][RTW89_MKK][42] = 127,
+ [2][1][RTW89_IC][42] = 76,
+ [2][1][RTW89_KCC][42] = 54,
+ [2][1][RTW89_ACMA][42] = 76,
+ [2][1][RTW89_CHILE][42] = 42,
+ [2][1][RTW89_UKRAINE][42] = 16,
+ [2][1][RTW89_MEXICO][42] = 76,
+ [2][1][RTW89_CN][42] = 64,
+ [2][1][RTW89_QATAR][42] = 16,
+ [2][1][RTW89_FCC][44] = 76,
+ [2][1][RTW89_ETSI][44] = 16,
+ [2][1][RTW89_MKK][44] = 127,
+ [2][1][RTW89_IC][44] = 76,
+ [2][1][RTW89_KCC][44] = 54,
+ [2][1][RTW89_ACMA][44] = 76,
+ [2][1][RTW89_CHILE][44] = 42,
+ [2][1][RTW89_UKRAINE][44] = 16,
+ [2][1][RTW89_MEXICO][44] = 76,
+ [2][1][RTW89_CN][44] = 64,
+ [2][1][RTW89_QATAR][44] = 16,
+ [2][1][RTW89_FCC][46] = 76,
+ [2][1][RTW89_ETSI][46] = 16,
+ [2][1][RTW89_MKK][46] = 127,
+ [2][1][RTW89_IC][46] = 76,
+ [2][1][RTW89_KCC][46] = 54,
+ [2][1][RTW89_ACMA][46] = 76,
+ [2][1][RTW89_CHILE][46] = 42,
+ [2][1][RTW89_UKRAINE][46] = 16,
+ [2][1][RTW89_MEXICO][46] = 76,
+ [2][1][RTW89_CN][46] = 64,
+ [2][1][RTW89_QATAR][46] = 16,
};
#define DECLARE_DIG_TABLE(name) \
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index f1e0fe36107d..75b11249f306 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -140,52 +140,56 @@
le32_get_bits((rxdesc)->dword5, GENMASK(7, 0))
#define RTW89_GET_RXINFO_USR_NUM(rpt) \
- le32_get_bits(*((__le32 *)rpt), GENMASK(3, 0))
+ le32_get_bits(*((const __le32 *)rpt), GENMASK(3, 0))
#define RTW89_GET_RXINFO_FW_DEFINE(rpt) \
- le32_get_bits(*((__le32 *)rpt), GENMASK(15, 8))
+ le32_get_bits(*((const __le32 *)rpt), GENMASK(15, 8))
#define RTW89_GET_RXINFO_LSIG_LEN(rpt) \
- le32_get_bits(*((__le32 *)rpt), GENMASK(27, 16))
+ le32_get_bits(*((const __le32 *)rpt), GENMASK(27, 16))
#define RTW89_GET_RXINFO_IS_TO_SELF(rpt) \
- le32_get_bits(*((__le32 *)rpt), BIT(28))
+ le32_get_bits(*((const __le32 *)rpt), BIT(28))
#define RTW89_GET_RXINFO_RX_CNT_VLD(rpt) \
- le32_get_bits(*((__le32 *)rpt), BIT(29))
+ le32_get_bits(*((const __le32 *)rpt), BIT(29))
#define RTW89_GET_RXINFO_LONG_RXD(rpt) \
- le32_get_bits(*((__le32 *)rpt), GENMASK(31, 30))
+ le32_get_bits(*((const __le32 *)rpt), GENMASK(31, 30))
#define RTW89_GET_RXINFO_SERVICE(rpt) \
- le32_get_bits(*((__le32 *)(rpt) + 1), GENMASK(15, 0))
+ le32_get_bits(*((const __le32 *)(rpt) + 1), GENMASK(15, 0))
#define RTW89_GET_RXINFO_PLCP_LEN(rpt) \
- le32_get_bits(*((__le32 *)(rpt) + 1), GENMASK(23, 16))
+ le32_get_bits(*((const __le32 *)(rpt) + 1), GENMASK(23, 16))
#define RTW89_GET_RXINFO_MAC_ID_VALID(rpt, usr) \
- le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), BIT(0))
+ le32_get_bits(*((const __le32 *)(rpt) + (usr) + 2), BIT(0))
#define RTW89_GET_RXINFO_DATA(rpt, usr) \
- le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), BIT(1))
+ le32_get_bits(*((const __le32 *)(rpt) + (usr) + 2), BIT(1))
#define RTW89_GET_RXINFO_CTRL(rpt, usr) \
- le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), BIT(2))
+ le32_get_bits(*((const __le32 *)(rpt) + (usr) + 2), BIT(2))
#define RTW89_GET_RXINFO_MGMT(rpt, usr) \
- le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), BIT(3))
+ le32_get_bits(*((const __le32 *)(rpt) + (usr) + 2), BIT(3))
#define RTW89_GET_RXINFO_BCM(rpt, usr) \
- le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), BIT(4))
+ le32_get_bits(*((const __le32 *)(rpt) + (usr) + 2), BIT(4))
#define RTW89_GET_RXINFO_MACID(rpt, usr) \
- le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), GENMASK(15, 8))
+ le32_get_bits(*((const __le32 *)(rpt) + (usr) + 2), GENMASK(15, 8))
+#define RTW89_GET_PHY_STS_IE_MAP(sts) \
+ le32_get_bits(*((const __le32 *)(sts)), GENMASK(4, 0))
#define RTW89_GET_PHY_STS_RSSI_A(sts) \
- le32_get_bits(*((__le32 *)(sts) + 1), GENMASK(7, 0))
+ le32_get_bits(*((const __le32 *)(sts) + 1), GENMASK(7, 0))
#define RTW89_GET_PHY_STS_RSSI_B(sts) \
- le32_get_bits(*((__le32 *)(sts) + 1), GENMASK(15, 8))
+ le32_get_bits(*((const __le32 *)(sts) + 1), GENMASK(15, 8))
#define RTW89_GET_PHY_STS_RSSI_C(sts) \
- le32_get_bits(*((__le32 *)(sts) + 1), GENMASK(23, 16))
+ le32_get_bits(*((const __le32 *)(sts) + 1), GENMASK(23, 16))
#define RTW89_GET_PHY_STS_RSSI_D(sts) \
- le32_get_bits(*((__le32 *)(sts) + 1), GENMASK(31, 24))
+ le32_get_bits(*((const __le32 *)(sts) + 1), GENMASK(31, 24))
#define RTW89_GET_PHY_STS_LEN(sts) \
- le32_get_bits(*((__le32 *)sts), GENMASK(15, 8))
+ le32_get_bits(*((const __le32 *)sts), GENMASK(15, 8))
#define RTW89_GET_PHY_STS_RSSI_AVG(sts) \
- le32_get_bits(*((__le32 *)sts), GENMASK(31, 24))
+ le32_get_bits(*((const __le32 *)sts), GENMASK(31, 24))
#define RTW89_GET_PHY_STS_IE_TYPE(ie) \
- le32_get_bits(*((__le32 *)ie), GENMASK(4, 0))
+ le32_get_bits(*((const __le32 *)ie), GENMASK(4, 0))
#define RTW89_GET_PHY_STS_IE_LEN(ie) \
- le32_get_bits(*((__le32 *)ie), GENMASK(11, 5))
-#define RTW89_GET_PHY_STS_IE0_CFO(ie) \
- le32_get_bits(*((__le32 *)(ie) + 1), GENMASK(31, 20))
+ le32_get_bits(*((const __le32 *)ie), GENMASK(11, 5))
+#define RTW89_GET_PHY_STS_IE01_CH_IDX(ie) \
+ le32_get_bits(*((const __le32 *)ie), GENMASK(23, 16))
+#define RTW89_GET_PHY_STS_IE01_CFO(ie) \
+ le32_get_bits(*((const __le32 *)(ie) + 1), GENMASK(31, 20))
enum rtw89_tx_channel {
RTW89_TXCH_ACH0 = 0,
@@ -251,45 +255,6 @@ enum rtw89_tx_qsel {
/* reserved */
};
-enum rtw89_phy_status_ie_type {
- RTW89_PHYSTS_IE00_CMN_CCK = 0,
- RTW89_PHYSTS_IE01_CMN_OFDM = 1,
- RTW89_PHYSTS_IE02_CMN_EXT_AX = 2,
- RTW89_PHYSTS_IE03_CMN_EXT_SEG_1 = 3,
- RTW89_PHYSTS_IE04_CMN_EXT_PATH_A = 4,
- RTW89_PHYSTS_IE05_CMN_EXT_PATH_B = 5,
- RTW89_PHYSTS_IE06_CMN_EXT_PATH_C = 6,
- RTW89_PHYSTS_IE07_CMN_EXT_PATH_D = 7,
- RTW89_PHYSTS_IE08_FTR_CH = 8,
- RTW89_PHYSTS_IE09_FTR_PLCP_0 = 9,
- RTW89_PHYSTS_IE10_FTR_PLCP_EXT = 10,
- RTW89_PHYSTS_IE11_FTR_PLCP_HISTOGRAM = 11,
- RTW89_PHYSTS_IE12_MU_EIGEN_INFO = 12,
- RTW89_PHYSTS_IE13_DL_MU_DEF = 13,
- RTW89_PHYSTS_IE14_TB_UL_CQI = 14,
- RTW89_PHYSTS_IE15_TB_UL_DEF = 15,
- RTW89_PHYSTS_IE16_RSVD16 = 16,
- RTW89_PHYSTS_IE17_TB_UL_CTRL = 17,
- RTW89_PHYSTS_IE18_DBG_OFDM_FD_CMN = 18,
- RTW89_PHYSTS_IE19_DBG_OFDM_TD_CMN = 19,
- RTW89_PHYSTS_IE20_DBG_OFDM_FD_USER_SEG_0 = 20,
- RTW89_PHYSTS_IE21_DBG_OFDM_FD_USER_SEG_1 = 21,
- RTW89_PHYSTS_IE22_DBG_OFDM_FD_USER_AGC = 22,
- RTW89_PHYSTS_IE23_RSVD23 = 23,
- RTW89_PHYSTS_IE24_DBG_OFDM_TD_PATH_A = 24,
- RTW89_PHYSTS_IE25_DBG_OFDM_TD_PATH_B = 25,
- RTW89_PHYSTS_IE26_DBG_OFDM_TD_PATH_C = 26,
- RTW89_PHYSTS_IE27_DBG_OFDM_TD_PATH_D = 27,
- RTW89_PHYSTS_IE28_DBG_CCK_PATH_A = 28,
- RTW89_PHYSTS_IE29_DBG_CCK_PATH_B = 29,
- RTW89_PHYSTS_IE30_DBG_CCK_PATH_C = 30,
- RTW89_PHYSTS_IE31_DBG_CCK_PATH_D = 31,
-
- /* keep last */
- RTW89_PHYSTS_IE_NUM,
- RTW89_PHYSTS_IE_MAX = RTW89_PHYSTS_IE_NUM - 1
-};
-
static inline u8 rtw89_core_get_qsel(struct rtw89_dev *rtwdev, u8 tid)
{
switch (tid) {
diff --git a/drivers/net/wireless/rsi/rsi_91x_coex.c b/drivers/net/wireless/rsi/rsi_91x_coex.c
index a0c5d02ae88c..8a3d86897ea8 100644
--- a/drivers/net/wireless/rsi/rsi_91x_coex.c
+++ b/drivers/net/wireless/rsi/rsi_91x_coex.c
@@ -63,7 +63,7 @@ static void rsi_coex_scheduler_thread(struct rsi_common *common)
rsi_coex_sched_tx_pkts(coex_cb);
} while (atomic_read(&coex_cb->coex_tx_thread.thread_done) == 0);
- complete_and_exit(&coex_cb->coex_tx_thread.completion, 0);
+ kthread_complete_and_exit(&coex_cb->coex_tx_thread.completion, 0);
}
int rsi_coex_recv_pkt(struct rsi_common *common, u8 *msg)
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index e70c1c7fdf59..913e11fb3807 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -1108,6 +1108,9 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw,
break;
}
+ if (ii >= RSI_MAX_VIFS)
+ return status;
+
mutex_lock(&common->mutex);
if (ssn != NULL)
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
index f1bf71e6c608..f9f004446b07 100644
--- a/drivers/net/wireless/rsi/rsi_91x_main.c
+++ b/drivers/net/wireless/rsi/rsi_91x_main.c
@@ -23,6 +23,7 @@
#include "rsi_common.h"
#include "rsi_coex.h"
#include "rsi_hal.h"
+#include "rsi_usb.h"
u32 rsi_zone_enabled = /* INFO_ZONE |
INIT_ZONE |
@@ -168,6 +169,9 @@ int rsi_read_pkt(struct rsi_common *common, u8 *rx_pkt, s32 rcv_pkt_len)
frame_desc = &rx_pkt[index];
actual_length = *(u16 *)&frame_desc[0];
offset = *(u16 *)&frame_desc[2];
+ if (!rcv_pkt_len && offset >
+ RSI_MAX_RX_USB_PKT_SIZE - FRAME_DESC_SZ)
+ goto fail;
queueno = rsi_get_queueno(frame_desc, offset);
length = rsi_get_length(frame_desc, offset);
@@ -260,7 +264,7 @@ static void rsi_tx_scheduler_thread(struct rsi_common *common)
if (common->init_done)
rsi_core_qos_processor(common);
} while (atomic_read(&common->tx_thread.thread_done) == 0);
- complete_and_exit(&common->tx_thread.completion, 0);
+ kthread_complete_and_exit(&common->tx_thread.completion, 0);
}
#ifdef CONFIG_RSI_COEX
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index 8ace1874e5cb..b2b47a0abcbf 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -75,7 +75,7 @@ void rsi_sdio_rx_thread(struct rsi_common *common)
rsi_dbg(INFO_ZONE, "%s: Terminated SDIO RX thread\n", __func__);
atomic_inc(&sdev->rx_thread.thread_done);
- complete_and_exit(&sdev->rx_thread.completion, 0);
+ kthread_complete_and_exit(&sdev->rx_thread.completion, 0);
}
/**
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 6821ea991895..66fe386ec9cc 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -269,8 +269,12 @@ static void rsi_rx_done_handler(struct urb *urb)
struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)rx_cb->data;
int status = -EINVAL;
+ if (!rx_cb->rx_skb)
+ return;
+
if (urb->status) {
dev_kfree_skb(rx_cb->rx_skb);
+ rx_cb->rx_skb = NULL;
return;
}
@@ -294,8 +298,10 @@ out:
if (rsi_rx_urb_submit(dev->priv, rx_cb->ep_num, GFP_ATOMIC))
rsi_dbg(ERR_ZONE, "%s: Failed in urb submission", __func__);
- if (status)
+ if (status) {
dev_kfree_skb(rx_cb->rx_skb);
+ rx_cb->rx_skb = NULL;
+ }
}
static void rsi_rx_urb_kill(struct rsi_hw *adapter, u8 ep_num)
@@ -324,7 +330,6 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num, gfp_t mem_flags)
struct sk_buff *skb;
u8 dword_align_bytes = 0;
-#define RSI_MAX_RX_USB_PKT_SIZE 3000
skb = dev_alloc_skb(RSI_MAX_RX_USB_PKT_SIZE);
if (!skb)
return -ENOMEM;
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
index 4ffcdde1acb1..5130b0e72adc 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
@@ -56,6 +56,6 @@ void rsi_usb_rx_thread(struct rsi_common *common)
out:
rsi_dbg(INFO_ZONE, "%s: Terminated thread\n", __func__);
skb_queue_purge(&dev->rx_q);
- complete_and_exit(&dev->rx_thread.completion, 0);
+ kthread_complete_and_exit(&dev->rx_thread.completion, 0);
}
diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h
index 254d19b66412..961851748bc4 100644
--- a/drivers/net/wireless/rsi/rsi_usb.h
+++ b/drivers/net/wireless/rsi/rsi_usb.h
@@ -44,6 +44,8 @@
#define RSI_USB_BUF_SIZE 4096
#define RSI_USB_CTRL_BUF_SIZE 0x04
+#define RSI_MAX_RX_USB_PKT_SIZE 3000
+
struct rx_usb_ctrl_block {
u8 *data;
struct urb *rx_urb;
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 136a0d3b23c9..a25a6143e65f 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1520,6 +1520,12 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
wl->hw->wiphy->max_scan_ssids = 1;
+
+ /* We set max_scan_ie_len to a random value to make wpa_supplicant scans not
+ * fail, as the driver will the ignore the extra passed IEs anyway
+ */
+ wl->hw->wiphy->max_scan_ie_len = 512;
+
wl->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wl1251_band_2ghz;
wl->hw->queues = 4;
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 9fd8cf2d270c..72fc41ac83c0 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -26,7 +26,7 @@
#include "wl12xx_80211.h"
#include "io.h"
-static bool dump = false;
+static bool dump;
struct wl12xx_sdio_glue {
struct device *dev;
diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig
index 17543be14665..609fd4a2c865 100644
--- a/drivers/net/wwan/Kconfig
+++ b/drivers/net/wwan/Kconfig
@@ -16,6 +16,17 @@ config WWAN
if WWAN
+config WWAN_DEBUGFS
+ bool "WWAN devices debugfs interface" if EXPERT
+ depends on DEBUG_FS
+ default y
+ help
+ Enables debugfs infrastructure for the WWAN core and device drivers.
+
+ If this option is selected, then you can find the debug interface
+ elements for each WWAN device in a directory that is corresponding to
+ the device name: debugfs/wwan/wwanX.
+
config WWAN_HWSIM
tristate "Simulated WWAN device"
help
@@ -50,6 +61,19 @@ config MHI_WWAN_MBIM
To compile this driver as a module, choose M here: the module will be
called mhi_wwan_mbim.
+config QCOM_BAM_DMUX
+ tristate "Qualcomm BAM-DMUX WWAN network driver"
+ depends on (DMA_ENGINE && PM && QCOM_SMEM_STATE) || COMPILE_TEST
+ help
+ The BAM Data Multiplexer provides access to the network data channels
+ of modems integrated into many older Qualcomm SoCs, e.g. Qualcomm
+ MSM8916 or MSM8974. The connection can be established via QMI/AT from
+ userspace with control ports available through the WWAN subsystem
+ (CONFIG_RPMSG_WWAN_CTRL) or QRTR network sockets (CONFIG_QRTR).
+
+ To compile this driver as a module, choose M here: the module will be
+ called qcom_bam_dmux.
+
config RPMSG_WWAN_CTRL
tristate "RPMSG WWAN control driver"
depends on RPMSG
@@ -72,6 +96,7 @@ config IOSM
tristate "IOSM Driver for Intel M.2 WWAN Device"
depends on INTEL_IOMMU
select NET_DEVLINK
+ select RELAY if WWAN_DEBUGFS
help
This driver enables Intel M.2 WWAN Device communication.
diff --git a/drivers/net/wwan/Makefile b/drivers/net/wwan/Makefile
index fe51feedac21..e722650bebea 100644
--- a/drivers/net/wwan/Makefile
+++ b/drivers/net/wwan/Makefile
@@ -10,5 +10,6 @@ obj-$(CONFIG_WWAN_HWSIM) += wwan_hwsim.o
obj-$(CONFIG_MHI_WWAN_CTRL) += mhi_wwan_ctrl.o
obj-$(CONFIG_MHI_WWAN_MBIM) += mhi_wwan_mbim.o
+obj-$(CONFIG_QCOM_BAM_DMUX) += qcom_bam_dmux.o
obj-$(CONFIG_RPMSG_WWAN_CTRL) += rpmsg_wwan_ctrl.o
obj-$(CONFIG_IOSM) += iosm/
diff --git a/drivers/net/wwan/iosm/Makefile b/drivers/net/wwan/iosm/Makefile
index b838034bb120..fa8d6afd18e1 100644
--- a/drivers/net/wwan/iosm/Makefile
+++ b/drivers/net/wwan/iosm/Makefile
@@ -23,4 +23,8 @@ iosm-y = \
iosm_ipc_flash.o \
iosm_ipc_coredump.o
+iosm-$(CONFIG_WWAN_DEBUGFS) += \
+ iosm_ipc_debugfs.o \
+ iosm_ipc_trace.o
+
obj-$(CONFIG_IOSM) := iosm.o
diff --git a/drivers/net/wwan/iosm/iosm_ipc_debugfs.c b/drivers/net/wwan/iosm/iosm_ipc_debugfs.c
new file mode 100644
index 000000000000..f2f57751a7d2
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_debugfs.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/wwan.h>
+
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_trace.h"
+#include "iosm_ipc_debugfs.h"
+
+void ipc_debugfs_init(struct iosm_imem *ipc_imem)
+{
+ struct dentry *debugfs_pdev = wwan_get_debugfs_dir(ipc_imem->dev);
+
+ ipc_imem->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME,
+ debugfs_pdev);
+
+ ipc_imem->trace = ipc_trace_init(ipc_imem);
+ if (!ipc_imem->trace)
+ dev_warn(ipc_imem->dev, "trace channel init failed");
+}
+
+void ipc_debugfs_deinit(struct iosm_imem *ipc_imem)
+{
+ ipc_trace_deinit(ipc_imem->trace);
+ debugfs_remove_recursive(ipc_imem->debugfs_dir);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_debugfs.h b/drivers/net/wwan/iosm/iosm_ipc_debugfs.h
new file mode 100644
index 000000000000..8a84bfa2c14a
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_debugfs.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_DEBUGFS_H
+#define IOSM_IPC_DEBUGFS_H
+
+#ifdef CONFIG_WWAN_DEBUGFS
+void ipc_debugfs_init(struct iosm_imem *ipc_imem);
+void ipc_debugfs_deinit(struct iosm_imem *ipc_imem);
+#else
+static inline void ipc_debugfs_init(struct iosm_imem *ipc_imem) {}
+static inline void ipc_debugfs_deinit(struct iosm_imem *ipc_imem) {}
+#endif
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
index 12c03dacb5dd..f9e8e0ee4de3 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
@@ -10,6 +10,8 @@
#include "iosm_ipc_flash.h"
#include "iosm_ipc_imem.h"
#include "iosm_ipc_port.h"
+#include "iosm_ipc_trace.h"
+#include "iosm_ipc_debugfs.h"
/* Check the wwan ips if it is valid with Channel as input. */
static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl)
@@ -132,7 +134,6 @@ static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
* for channel alloc function.
*/
cfg->instance_id = IPC_MEM_MUX_IP_CH_IF_ID;
- cfg->nr_sessions = IPC_MEM_MUX_IP_SESSION_ENTRIES;
return 0;
}
@@ -269,9 +270,14 @@ static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
switch (pipe->channel->ctype) {
case IPC_CTYPE_CTRL:
port_id = pipe->channel->channel_id;
+ ipc_pcie_addr_unmap(ipc_imem->pcie, IPC_CB(skb)->len,
+ IPC_CB(skb)->mapping,
+ IPC_CB(skb)->direction);
if (port_id == IPC_MEM_CTRL_CHL_ID_7)
ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink,
skb);
+ else if (ipc_is_trace_channel(ipc_imem, port_id))
+ ipc_trace_port_rx(ipc_imem, skb);
else
wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port,
skb);
@@ -555,6 +561,8 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
ctrl_chl_idx++;
}
+ ipc_debugfs_init(ipc_imem);
+
ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0,
false);
@@ -1170,6 +1178,7 @@ void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) {
ipc_mux_deinit(ipc_imem->mux);
+ ipc_debugfs_deinit(ipc_imem);
ipc_wwan_deinit(ipc_imem->wwan);
ipc_port_deinit(ipc_imem->ipc_port);
}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h
index 6b8a837faef2..98554e9beb01 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h
@@ -305,6 +305,7 @@ enum ipc_phase {
* @sio: IPC SIO data structure pointer
* @ipc_port: IPC PORT data structure pointer
* @pcie: IPC PCIe
+ * @trace: IPC trace data structure pointer
* @dev: Pointer to device structure
* @ipc_requested_state: Expected IPC state on CP.
* @channels: Channel list with UL/DL pipe pairs.
@@ -339,6 +340,7 @@ enum ipc_phase {
* @ev_mux_net_transmit_pending:0 means inform the IPC tasklet to pass
* @reset_det_n: Reset detect flag
* @pcie_wake_n: Pcie wake flag
+ * @debugfs_dir: Debug FS directory for driver-specific entries
*/
struct iosm_imem {
struct iosm_mmio *mmio;
@@ -348,6 +350,9 @@ struct iosm_imem {
struct iosm_mux *mux;
struct iosm_cdev *ipc_port[IPC_MEM_MAX_CHANNELS];
struct iosm_pcie *pcie;
+#ifdef CONFIG_WWAN_DEBUGFS
+ struct iosm_trace *trace;
+#endif
struct device *dev;
enum ipc_mem_device_ipc_state ipc_requested_state;
struct ipc_mem_channel channels[IPC_MEM_MAX_CHANNELS];
@@ -376,6 +381,9 @@ struct iosm_imem {
ev_mux_net_transmit_pending:1,
reset_det_n:1,
pcie_wake_n:1;
+#ifdef CONFIG_WWAN_DEBUGFS
+ struct dentry *debugfs_dir;
+#endif
};
/**
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
index 831cdae28e8a..57304a5adf68 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
@@ -176,11 +176,14 @@ channel_unavailable:
return false;
}
-/* Release a sio link to CP. */
-void ipc_imem_sys_cdev_close(struct iosm_cdev *ipc_cdev)
+/**
+ * ipc_imem_sys_port_close - Release a sio link to CP.
+ * @ipc_imem: Imem instance.
+ * @channel: Channel instance.
+ */
+void ipc_imem_sys_port_close(struct iosm_imem *ipc_imem,
+ struct ipc_mem_channel *channel)
{
- struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
- struct ipc_mem_channel *channel = ipc_cdev->channel;
enum ipc_phase curr_phase;
int status = 0;
u32 tail = 0;
@@ -638,6 +641,6 @@ int ipc_imem_sys_devlink_read(struct iosm_devlink *devlink, u8 *data,
memcpy(data, skb->data, skb->len);
devlink_read_fail:
- ipc_pcie_kfree_skb(devlink->pcie, skb);
+ dev_kfree_skb(skb);
return rc;
}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
index f0c88ac5643c..f8afb217d9e2 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
@@ -43,12 +43,8 @@
*/
struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem,
int chl_id, int hp_id);
-
-/**
- * ipc_imem_sys_cdev_close - Release a sio link to CP.
- * @ipc_cdev: iosm sio instance.
- */
-void ipc_imem_sys_cdev_close(struct iosm_cdev *ipc_cdev);
+void ipc_imem_sys_port_close(struct iosm_imem *ipc_imem,
+ struct ipc_mem_channel *channel);
/**
* ipc_imem_sys_cdev_write - Route the uplink buffer to CP.
@@ -145,4 +141,5 @@ int ipc_imem_sys_devlink_read(struct iosm_devlink *ipc_devlink, u8 *data,
*/
int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink,
unsigned char *buf, int count);
+
#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.c b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
index 09f94c123531..f09e5e77a2a5 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mmio.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
@@ -192,7 +192,7 @@ void ipc_mmio_config(struct iosm_mmio *ipc_mmio)
iowrite64(0, ipc_mmio->base + ipc_mmio->offset.ap_win_end);
iowrite64(ipc_mmio->context_info_addr,
- ipc_mmio->base + ipc_mmio->offset.context_info);
+ ipc_mmio->base + ipc_mmio->offset.context_info);
}
void ipc_mmio_set_psi_addr_and_size(struct iosm_mmio *ipc_mmio, dma_addr_t addr,
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.c b/drivers/net/wwan/iosm/iosm_ipc_mux.c
index c1c77ce699da..8e66ffe92055 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux.c
@@ -97,7 +97,7 @@ static bool ipc_mux_session_open(struct iosm_mux *ipc_mux,
/* Search for a free session interface id. */
if_id = le32_to_cpu(session_open->if_id);
- if (if_id < 0 || if_id >= ipc_mux->nr_sessions) {
+ if (if_id < 0 || if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
dev_err(ipc_mux->dev, "invalid interface id=%d", if_id);
return false;
}
@@ -129,6 +129,7 @@ static bool ipc_mux_session_open(struct iosm_mux *ipc_mux,
/* Save and return the assigned if id. */
session_open->if_id = cpu_to_le32(if_id);
+ ipc_mux->nr_sessions++;
return true;
}
@@ -151,7 +152,7 @@ static void ipc_mux_session_close(struct iosm_mux *ipc_mux,
/* Copy the session interface id. */
if_id = le32_to_cpu(msg->if_id);
- if (if_id < 0 || if_id >= ipc_mux->nr_sessions) {
+ if (if_id < 0 || if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
dev_err(ipc_mux->dev, "invalid session id %d", if_id);
return;
}
@@ -170,6 +171,7 @@ static void ipc_mux_session_close(struct iosm_mux *ipc_mux,
ipc_mux->session[if_id].flow_ctl_mask = 0;
ipc_mux_session_reset(ipc_mux, if_id);
+ ipc_mux->nr_sessions--;
}
static void ipc_mux_channel_close(struct iosm_mux *ipc_mux,
@@ -178,7 +180,7 @@ static void ipc_mux_channel_close(struct iosm_mux *ipc_mux,
int i;
/* Free pending session UL packet. */
- for (i = 0; i < ipc_mux->nr_sessions; i++)
+ for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++)
if (ipc_mux->session[i].wwan)
ipc_mux_session_reset(ipc_mux, i);
@@ -244,6 +246,11 @@ static int ipc_mux_schedule(struct iosm_mux *ipc_mux, union mux_msg *msg)
/* Release an IP session. */
ipc_mux->event = MUX_E_MUX_SESSION_CLOSE;
ipc_mux_session_close(ipc_mux, &msg->session_close);
+ if (!ipc_mux->nr_sessions) {
+ ipc_mux->event = MUX_E_MUX_CHANNEL_CLOSE;
+ ipc_mux_channel_close(ipc_mux,
+ &msg->channel_close);
+ }
ret = ipc_mux->channel_id;
goto out;
@@ -281,7 +288,6 @@ struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg,
ipc_mux->protocol = mux_cfg->protocol;
ipc_mux->ul_flow = mux_cfg->ul_flow;
- ipc_mux->nr_sessions = mux_cfg->nr_sessions;
ipc_mux->instance_id = mux_cfg->instance_id;
ipc_mux->wwan_q_offset = 0;
@@ -340,7 +346,7 @@ static void ipc_mux_restart_tx_for_all_sessions(struct iosm_mux *ipc_mux)
struct mux_session *session;
int idx;
- for (idx = 0; idx < ipc_mux->nr_sessions; idx++) {
+ for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
session = &ipc_mux->session[idx];
if (!session->wwan)
@@ -365,7 +371,7 @@ static void ipc_mux_stop_netif_for_all_sessions(struct iosm_mux *ipc_mux)
struct mux_session *session;
int idx;
- for (idx = 0; idx < ipc_mux->nr_sessions; idx++) {
+ for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
session = &ipc_mux->session[idx];
if (!session->wwan)
@@ -387,7 +393,7 @@ void ipc_mux_check_n_restart_tx(struct iosm_mux *ipc_mux)
int ipc_mux_get_max_sessions(struct iosm_mux *ipc_mux)
{
- return ipc_mux ? ipc_mux->nr_sessions : -EFAULT;
+ return ipc_mux ? IPC_MEM_MUX_IP_SESSION_ENTRIES : -EFAULT;
}
enum ipc_mux_protocol ipc_mux_get_active_protocol(struct iosm_mux *ipc_mux)
@@ -435,9 +441,11 @@ void ipc_mux_deinit(struct iosm_mux *ipc_mux)
return;
ipc_mux_stop_netif_for_all_sessions(ipc_mux);
- channel_close = &mux_msg.channel_close;
- channel_close->event = MUX_E_MUX_CHANNEL_CLOSE;
- ipc_mux_schedule(ipc_mux, &mux_msg);
+ if (ipc_mux->state == MUX_S_ACTIVE) {
+ channel_close = &mux_msg.channel_close;
+ channel_close->event = MUX_E_MUX_CHANNEL_CLOSE;
+ ipc_mux_schedule(ipc_mux, &mux_msg);
+ }
/* Empty the ADB free list. */
free_list = &ipc_mux->ul_adb.free_list;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.h b/drivers/net/wwan/iosm/iosm_ipc_mux.h
index ddd2cd0bd911..88debaa1ed31 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux.h
@@ -278,7 +278,6 @@ struct iosm_mux {
struct ipc_mux_config {
enum ipc_mux_protocol protocol;
enum ipc_mux_ul_flow ul_flow;
- int nr_sessions;
int instance_id;
};
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
index bdb2d32cdb6d..40fb54a0513e 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
@@ -175,7 +175,7 @@ static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux,
switch (le32_to_cpu(cmdh->command_type)) {
case MUX_LITE_CMD_FLOW_CTL:
- if (cmdh->if_id >= ipc_mux->nr_sessions) {
+ if (cmdh->if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
dev_err(ipc_mux->dev, "if_id [%d] not valid",
cmdh->if_id);
return -EINVAL; /* No session interface id. */
@@ -307,13 +307,13 @@ static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux,
}
if_id = fct->if_id;
- if (if_id >= ipc_mux->nr_sessions) {
+ if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
dev_err(ipc_mux->dev, "not supported if_id: %d", if_id);
return;
}
/* Is the session active ? */
- if_id = array_index_nospec(if_id, ipc_mux->nr_sessions);
+ if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
wwan = ipc_mux->session[if_id].wwan;
if (!wwan) {
dev_err(ipc_mux->dev, "session Net ID is NULL");
@@ -355,13 +355,13 @@ static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
}
if_id = adgh->if_id;
- if (if_id >= ipc_mux->nr_sessions) {
+ if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
dev_err(ipc_mux->dev, "invalid if_id while decoding %d", if_id);
return;
}
/* Is the session active ? */
- if_id = array_index_nospec(if_id, ipc_mux->nr_sessions);
+ if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
wwan = ipc_mux->session[if_id].wwan;
if (!wwan) {
dev_err(ipc_mux->dev, "session Net ID is NULL");
@@ -538,7 +538,7 @@ static void ipc_mux_stop_tx_for_all_sessions(struct iosm_mux *ipc_mux)
struct mux_session *session;
int idx;
- for (idx = 0; idx < ipc_mux->nr_sessions; idx++) {
+ for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
session = &ipc_mux->session[idx];
if (!session->wwan)
@@ -563,7 +563,7 @@ static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux)
qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
- for (i = 0; i < ipc_mux->nr_sessions; i++) {
+ for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
session = &ipc_mux->session[i];
if (!session->wwan || session->flow_ctl_mask)
@@ -777,13 +777,13 @@ bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
ipc_mux->adb_prep_ongoing = true;
- for (i = 0; i < ipc_mux->nr_sessions; i++) {
+ for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
session_id = ipc_mux->rr_next_session;
session = &ipc_mux->session[session_id];
/* Go to next handle rr_next_session overflow */
ipc_mux->rr_next_session++;
- if (ipc_mux->rr_next_session >= ipc_mux->nr_sessions)
+ if (ipc_mux->rr_next_session >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
ipc_mux->rr_next_session = 0;
if (!session->wwan || session->flow_ctl_mask ||
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
index 2fe88b8be348..d73894e2a84e 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
@@ -363,67 +363,22 @@ static int __maybe_unused ipc_pcie_resume_s2idle(struct iosm_pcie *ipc_pcie)
int __maybe_unused ipc_pcie_suspend(struct iosm_pcie *ipc_pcie)
{
- struct pci_dev *pdev;
- int ret;
-
- pdev = ipc_pcie->pci;
-
- /* Execute D3 one time. */
- if (pdev->current_state != PCI_D0) {
- dev_dbg(ipc_pcie->dev, "done for PM=%d", pdev->current_state);
- return 0;
- }
-
/* The HAL shall ask the shared memory layer whether D3 is allowed. */
ipc_imem_pm_suspend(ipc_pcie->imem);
- /* Save the PCI configuration space of a device before suspending. */
- ret = pci_save_state(pdev);
-
- if (ret) {
- dev_err(ipc_pcie->dev, "pci_save_state error=%d", ret);
- return ret;
- }
-
- /* Set the power state of a PCI device.
- * Transition a device to a new power state, using the device's PCI PM
- * registers.
- */
- ret = pci_set_power_state(pdev, PCI_D3cold);
-
- if (ret) {
- dev_err(ipc_pcie->dev, "pci_set_power_state error=%d", ret);
- return ret;
- }
-
dev_dbg(ipc_pcie->dev, "SUSPEND done");
- return ret;
+ return 0;
}
int __maybe_unused ipc_pcie_resume(struct iosm_pcie *ipc_pcie)
{
- int ret;
-
- /* Set the power state of a PCI device.
- * Transition a device to a new power state, using the device's PCI PM
- * registers.
- */
- ret = pci_set_power_state(ipc_pcie->pci, PCI_D0);
-
- if (ret) {
- dev_err(ipc_pcie->dev, "pci_set_power_state error=%d", ret);
- return ret;
- }
-
- pci_restore_state(ipc_pcie->pci);
-
/* The HAL shall inform the shared memory layer that the device is
* active.
*/
ipc_imem_pm_resume(ipc_pcie->imem);
dev_dbg(ipc_pcie->dev, "RESUME done");
- return ret;
+ return 0;
}
static int __maybe_unused ipc_pcie_suspend_cb(struct device *dev)
diff --git a/drivers/net/wwan/iosm/iosm_ipc_port.c b/drivers/net/wwan/iosm/iosm_ipc_port.c
index beb944847398..b6d81c627277 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_port.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_port.c
@@ -27,7 +27,7 @@ static void ipc_port_ctrl_stop(struct wwan_port *port)
{
struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
- ipc_imem_sys_cdev_close(ipc_port);
+ ipc_imem_sys_port_close(ipc_port->ipc_imem, ipc_port->channel);
}
/* transfer control data to modem */
diff --git a/drivers/net/wwan/iosm/iosm_ipc_trace.c b/drivers/net/wwan/iosm/iosm_ipc_trace.c
new file mode 100644
index 000000000000..eeecfa3d10c5
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_trace.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#include <linux/wwan.h>
+#include "iosm_ipc_trace.h"
+
+/* sub buffer size and number of sub buffer */
+#define IOSM_TRC_SUB_BUFF_SIZE 131072
+#define IOSM_TRC_N_SUB_BUFF 32
+
+#define IOSM_TRC_FILE_PERM 0600
+
+#define IOSM_TRC_DEBUGFS_TRACE "trace"
+#define IOSM_TRC_DEBUGFS_TRACE_CTRL "trace_ctrl"
+
+/**
+ * ipc_trace_port_rx - Receive trace packet from cp and write to relay buffer
+ * @ipc_imem: Pointer to iosm_imem structure
+ * @skb: Pointer to struct sk_buff
+ */
+void ipc_trace_port_rx(struct iosm_imem *ipc_imem, struct sk_buff *skb)
+{
+ struct iosm_trace *ipc_trace = ipc_imem->trace;
+
+ if (ipc_trace->ipc_rchan)
+ relay_write(ipc_trace->ipc_rchan, skb->data, skb->len);
+
+ dev_kfree_skb(skb);
+}
+
+/* Creates relay file in debugfs. */
+static struct dentry *
+ipc_trace_create_buf_file_handler(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ *is_global = 1;
+ return debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+}
+
+/* Removes relay file from debugfs. */
+static int ipc_trace_remove_buf_file_handler(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+ return 0;
+}
+
+static int ipc_trace_subbuf_start_handler(struct rchan_buf *buf, void *subbuf,
+ void *prev_subbuf,
+ size_t prev_padding)
+{
+ if (relay_buf_full(buf)) {
+ pr_err_ratelimited("Relay_buf full dropping traces");
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Relay interface callbacks */
+static struct rchan_callbacks relay_callbacks = {
+ .subbuf_start = ipc_trace_subbuf_start_handler,
+ .create_buf_file = ipc_trace_create_buf_file_handler,
+ .remove_buf_file = ipc_trace_remove_buf_file_handler,
+};
+
+/* Copy the trace control mode to user buffer */
+static ssize_t ipc_trace_ctrl_file_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct iosm_trace *ipc_trace = filp->private_data;
+ char buf[16];
+ int len;
+
+ mutex_lock(&ipc_trace->trc_mutex);
+ len = snprintf(buf, sizeof(buf), "%d\n", ipc_trace->mode);
+ mutex_unlock(&ipc_trace->trc_mutex);
+
+ return simple_read_from_buffer(buffer, count, ppos, buf, len);
+}
+
+/* Open and close the trace channel depending on user input */
+static ssize_t ipc_trace_ctrl_file_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct iosm_trace *ipc_trace = filp->private_data;
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul_from_user(buffer, count, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ipc_trace->trc_mutex);
+ if (val == TRACE_ENABLE && ipc_trace->mode != TRACE_ENABLE) {
+ ipc_trace->channel = ipc_imem_sys_port_open(ipc_trace->ipc_imem,
+ ipc_trace->chl_id,
+ IPC_HP_CDEV_OPEN);
+ if (!ipc_trace->channel) {
+ ret = -EIO;
+ goto unlock;
+ }
+ ipc_trace->mode = TRACE_ENABLE;
+ } else if (val == TRACE_DISABLE && ipc_trace->mode != TRACE_DISABLE) {
+ ipc_trace->mode = TRACE_DISABLE;
+ /* close trace channel */
+ ipc_imem_sys_port_close(ipc_trace->ipc_imem,
+ ipc_trace->channel);
+ relay_flush(ipc_trace->ipc_rchan);
+ }
+ ret = count;
+unlock:
+ mutex_unlock(&ipc_trace->trc_mutex);
+ return ret;
+}
+
+static const struct file_operations ipc_trace_fops = {
+ .open = simple_open,
+ .write = ipc_trace_ctrl_file_write,
+ .read = ipc_trace_ctrl_file_read,
+};
+
+/**
+ * ipc_trace_init - Create trace interface & debugfs entries
+ * @ipc_imem: Pointer to iosm_imem structure
+ *
+ * Returns: Pointer to trace instance on success else NULL
+ */
+struct iosm_trace *ipc_trace_init(struct iosm_imem *ipc_imem)
+{
+ struct ipc_chnl_cfg chnl_cfg = { 0 };
+ struct iosm_trace *ipc_trace;
+
+ ipc_chnl_cfg_get(&chnl_cfg, IPC_MEM_CTRL_CHL_ID_3);
+ ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL, chnl_cfg,
+ IRQ_MOD_OFF);
+
+ ipc_trace = kzalloc(sizeof(*ipc_trace), GFP_KERNEL);
+ if (!ipc_trace)
+ return NULL;
+
+ ipc_trace->mode = TRACE_DISABLE;
+ ipc_trace->dev = ipc_imem->dev;
+ ipc_trace->ipc_imem = ipc_imem;
+ ipc_trace->chl_id = IPC_MEM_CTRL_CHL_ID_3;
+
+ mutex_init(&ipc_trace->trc_mutex);
+
+ ipc_trace->ctrl_file = debugfs_create_file(IOSM_TRC_DEBUGFS_TRACE_CTRL,
+ IOSM_TRC_FILE_PERM,
+ ipc_imem->debugfs_dir,
+ ipc_trace, &ipc_trace_fops);
+
+ ipc_trace->ipc_rchan = relay_open(IOSM_TRC_DEBUGFS_TRACE,
+ ipc_imem->debugfs_dir,
+ IOSM_TRC_SUB_BUFF_SIZE,
+ IOSM_TRC_N_SUB_BUFF,
+ &relay_callbacks, NULL);
+
+ return ipc_trace;
+}
+
+/**
+ * ipc_trace_deinit - Closing relayfs, removing debugfs entries
+ * @ipc_trace: Pointer to the iosm_trace data struct
+ */
+void ipc_trace_deinit(struct iosm_trace *ipc_trace)
+{
+ if (!ipc_trace)
+ return;
+
+ debugfs_remove(ipc_trace->ctrl_file);
+ relay_close(ipc_trace->ipc_rchan);
+ mutex_destroy(&ipc_trace->trc_mutex);
+ kfree(ipc_trace);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_trace.h b/drivers/net/wwan/iosm/iosm_ipc_trace.h
new file mode 100644
index 000000000000..5ebe7790585c
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_trace.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_TRACE_H
+#define IOSM_IPC_TRACE_H
+
+#include <linux/debugfs.h>
+#include <linux/relay.h>
+
+#include "iosm_ipc_chnl_cfg.h"
+#include "iosm_ipc_imem_ops.h"
+
+/**
+ * enum trace_ctrl_mode - State of trace channel
+ * @TRACE_DISABLE: mode for disable trace
+ * @TRACE_ENABLE: mode for enable trace
+ */
+enum trace_ctrl_mode {
+ TRACE_DISABLE = 0,
+ TRACE_ENABLE,
+};
+
+/**
+ * struct iosm_trace - Struct for trace interface
+ * @ipc_rchan: Pointer to relay channel
+ * @ctrl_file: Pointer to trace control file
+ * @ipc_imem: Imem instance
+ * @dev: Pointer to device struct
+ * @channel: Channel instance
+ * @chl_id: Channel Indentifier
+ * @trc_mutex: Mutex used for read and write mode
+ * @mode: Mode for enable and disable trace
+ */
+
+struct iosm_trace {
+ struct rchan *ipc_rchan;
+ struct dentry *ctrl_file;
+ struct iosm_imem *ipc_imem;
+ struct device *dev;
+ struct ipc_mem_channel *channel;
+ enum ipc_channel_id chl_id;
+ struct mutex trc_mutex; /* Mutex used for read and write mode */
+ enum trace_ctrl_mode mode;
+};
+
+#ifdef CONFIG_WWAN_DEBUGFS
+
+static inline bool ipc_is_trace_channel(struct iosm_imem *ipc_mem, u16 chl_id)
+{
+ return ipc_mem->trace && ipc_mem->trace->chl_id == chl_id;
+}
+
+struct iosm_trace *ipc_trace_init(struct iosm_imem *ipc_imem);
+void ipc_trace_deinit(struct iosm_trace *ipc_trace);
+void ipc_trace_port_rx(struct iosm_imem *ipc_imem, struct sk_buff *skb);
+
+#else
+
+static inline bool ipc_is_trace_channel(struct iosm_imem *ipc_mem, u16 chl_id)
+{
+ return false;
+}
+
+static inline void ipc_trace_port_rx(struct iosm_imem *ipc_imem,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb(skb);
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
index b571d9cedba4..27151148c782 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
@@ -8,6 +8,7 @@
#include <linux/if_link.h>
#include <linux/rtnetlink.h>
#include <linux/wwan.h>
+#include <net/pkt_sched.h>
#include "iosm_ipc_chnl_cfg.h"
#include "iosm_ipc_imem_ops.h"
@@ -159,7 +160,7 @@ static void ipc_wwan_setup(struct net_device *iosm_dev)
{
iosm_dev->header_ops = NULL;
iosm_dev->hard_header_len = 0;
- iosm_dev->priv_flags |= IFF_NO_QUEUE;
+ iosm_dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
iosm_dev->type = ARPHRD_NONE;
iosm_dev->mtu = ETH_DATA_LEN;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.h b/drivers/net/wwan/iosm/iosm_ipc_wwan.h
index 4925f22dff0a..a23e926398ff 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_wwan.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.h
@@ -42,14 +42,4 @@ int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg,
*
*/
void ipc_wwan_tx_flowctrl(struct iosm_wwan *ipc_wwan, int id, bool on);
-
-/**
- * ipc_wwan_is_tx_stopped - Checks if Tx stopped for a Interface id.
- * @ipc_wwan: Pointer to wwan instance
- * @id: Ipc mux channel session id
- *
- * Return: true if stopped, false otherwise
- */
-bool ipc_wwan_is_tx_stopped(struct iosm_wwan *ipc_wwan, int id);
-
#endif
diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c
index 71bf9b4f769f..6872782e8dd8 100644
--- a/drivers/net/wwan/mhi_wwan_mbim.c
+++ b/drivers/net/wwan/mhi_wwan_mbim.c
@@ -385,13 +385,13 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
int err;
while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) {
- struct sk_buff *skb = alloc_skb(MHI_DEFAULT_MRU, GFP_KERNEL);
+ struct sk_buff *skb = alloc_skb(mbim->mru, GFP_KERNEL);
if (unlikely(!skb))
break;
err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb,
- MHI_DEFAULT_MRU, MHI_EOT);
+ mbim->mru, MHI_EOT);
if (unlikely(err)) {
kfree_skb(skb);
break;
diff --git a/drivers/net/wwan/qcom_bam_dmux.c b/drivers/net/wwan/qcom_bam_dmux.c
new file mode 100644
index 000000000000..5dfa2eba6014
--- /dev/null
+++ b/drivers/net/wwan/qcom_bam_dmux.c
@@ -0,0 +1,907 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Qualcomm BAM-DMUX WWAN network driver
+ * Copyright (c) 2020, Stephan Gerhold <stephan@gerhold.net>
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/if_arp.h>
+#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/soc/qcom/smem_state.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <net/pkt_sched.h>
+
+#define BAM_DMUX_BUFFER_SIZE SZ_2K
+#define BAM_DMUX_HDR_SIZE sizeof(struct bam_dmux_hdr)
+#define BAM_DMUX_MAX_DATA_SIZE (BAM_DMUX_BUFFER_SIZE - BAM_DMUX_HDR_SIZE)
+#define BAM_DMUX_NUM_SKB 32
+
+#define BAM_DMUX_HDR_MAGIC 0x33fc
+
+#define BAM_DMUX_AUTOSUSPEND_DELAY 1000
+#define BAM_DMUX_REMOTE_TIMEOUT msecs_to_jiffies(2000)
+
+enum {
+ BAM_DMUX_CMD_DATA,
+ BAM_DMUX_CMD_OPEN,
+ BAM_DMUX_CMD_CLOSE,
+};
+
+enum {
+ BAM_DMUX_CH_DATA_0,
+ BAM_DMUX_CH_DATA_1,
+ BAM_DMUX_CH_DATA_2,
+ BAM_DMUX_CH_DATA_3,
+ BAM_DMUX_CH_DATA_4,
+ BAM_DMUX_CH_DATA_5,
+ BAM_DMUX_CH_DATA_6,
+ BAM_DMUX_CH_DATA_7,
+ BAM_DMUX_NUM_CH
+};
+
+struct bam_dmux_hdr {
+ u16 magic;
+ u8 signal;
+ u8 cmd;
+ u8 pad;
+ u8 ch;
+ u16 len;
+};
+
+struct bam_dmux_skb_dma {
+ struct bam_dmux *dmux;
+ struct sk_buff *skb;
+ dma_addr_t addr;
+};
+
+struct bam_dmux {
+ struct device *dev;
+
+ int pc_irq;
+ bool pc_state, pc_ack_state;
+ struct qcom_smem_state *pc, *pc_ack;
+ u32 pc_mask, pc_ack_mask;
+ wait_queue_head_t pc_wait;
+ struct completion pc_ack_completion;
+
+ struct dma_chan *rx, *tx;
+ struct bam_dmux_skb_dma rx_skbs[BAM_DMUX_NUM_SKB];
+ struct bam_dmux_skb_dma tx_skbs[BAM_DMUX_NUM_SKB];
+ spinlock_t tx_lock; /* Protect tx_skbs, tx_next_skb */
+ unsigned int tx_next_skb;
+ atomic_long_t tx_deferred_skb;
+ struct work_struct tx_wakeup_work;
+
+ DECLARE_BITMAP(remote_channels, BAM_DMUX_NUM_CH);
+ struct work_struct register_netdev_work;
+ struct net_device *netdevs[BAM_DMUX_NUM_CH];
+};
+
+struct bam_dmux_netdev {
+ struct bam_dmux *dmux;
+ u8 ch;
+};
+
+static void bam_dmux_pc_vote(struct bam_dmux *dmux, bool enable)
+{
+ reinit_completion(&dmux->pc_ack_completion);
+ qcom_smem_state_update_bits(dmux->pc, dmux->pc_mask,
+ enable ? dmux->pc_mask : 0);
+}
+
+static void bam_dmux_pc_ack(struct bam_dmux *dmux)
+{
+ qcom_smem_state_update_bits(dmux->pc_ack, dmux->pc_ack_mask,
+ dmux->pc_ack_state ? 0 : dmux->pc_ack_mask);
+ dmux->pc_ack_state = !dmux->pc_ack_state;
+}
+
+static bool bam_dmux_skb_dma_map(struct bam_dmux_skb_dma *skb_dma,
+ enum dma_data_direction dir)
+{
+ struct device *dev = skb_dma->dmux->dev;
+
+ skb_dma->addr = dma_map_single(dev, skb_dma->skb->data, skb_dma->skb->len, dir);
+ if (dma_mapping_error(dev, skb_dma->addr)) {
+ dev_err(dev, "Failed to DMA map buffer\n");
+ skb_dma->addr = 0;
+ return false;
+ }
+
+ return true;
+}
+
+static void bam_dmux_skb_dma_unmap(struct bam_dmux_skb_dma *skb_dma,
+ enum dma_data_direction dir)
+{
+ dma_unmap_single(skb_dma->dmux->dev, skb_dma->addr, skb_dma->skb->len, dir);
+ skb_dma->addr = 0;
+}
+
+static void bam_dmux_tx_wake_queues(struct bam_dmux *dmux)
+{
+ int i;
+
+ dev_dbg(dmux->dev, "wake queues\n");
+
+ for (i = 0; i < BAM_DMUX_NUM_CH; ++i) {
+ struct net_device *netdev = dmux->netdevs[i];
+
+ if (netdev && netif_running(netdev))
+ netif_wake_queue(netdev);
+ }
+}
+
+static void bam_dmux_tx_stop_queues(struct bam_dmux *dmux)
+{
+ int i;
+
+ dev_dbg(dmux->dev, "stop queues\n");
+
+ for (i = 0; i < BAM_DMUX_NUM_CH; ++i) {
+ struct net_device *netdev = dmux->netdevs[i];
+
+ if (netdev)
+ netif_stop_queue(netdev);
+ }
+}
+
+static void bam_dmux_tx_done(struct bam_dmux_skb_dma *skb_dma)
+{
+ struct bam_dmux *dmux = skb_dma->dmux;
+ unsigned long flags;
+
+ pm_runtime_mark_last_busy(dmux->dev);
+ pm_runtime_put_autosuspend(dmux->dev);
+
+ if (skb_dma->addr)
+ bam_dmux_skb_dma_unmap(skb_dma, DMA_TO_DEVICE);
+
+ spin_lock_irqsave(&dmux->tx_lock, flags);
+ skb_dma->skb = NULL;
+ if (skb_dma == &dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB])
+ bam_dmux_tx_wake_queues(dmux);
+ spin_unlock_irqrestore(&dmux->tx_lock, flags);
+}
+
+static void bam_dmux_tx_callback(void *data)
+{
+ struct bam_dmux_skb_dma *skb_dma = data;
+ struct sk_buff *skb = skb_dma->skb;
+
+ bam_dmux_tx_done(skb_dma);
+ dev_consume_skb_any(skb);
+}
+
+static bool bam_dmux_skb_dma_submit_tx(struct bam_dmux_skb_dma *skb_dma)
+{
+ struct bam_dmux *dmux = skb_dma->dmux;
+ struct dma_async_tx_descriptor *desc;
+
+ desc = dmaengine_prep_slave_single(dmux->tx, skb_dma->addr,
+ skb_dma->skb->len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dmux->dev, "Failed to prepare TX DMA buffer\n");
+ return false;
+ }
+
+ desc->callback = bam_dmux_tx_callback;
+ desc->callback_param = skb_dma;
+ desc->cookie = dmaengine_submit(desc);
+ return true;
+}
+
+static struct bam_dmux_skb_dma *
+bam_dmux_tx_queue(struct bam_dmux *dmux, struct sk_buff *skb)
+{
+ struct bam_dmux_skb_dma *skb_dma;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dmux->tx_lock, flags);
+
+ skb_dma = &dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB];
+ if (skb_dma->skb) {
+ bam_dmux_tx_stop_queues(dmux);
+ spin_unlock_irqrestore(&dmux->tx_lock, flags);
+ return NULL;
+ }
+ skb_dma->skb = skb;
+
+ dmux->tx_next_skb++;
+ if (dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB].skb)
+ bam_dmux_tx_stop_queues(dmux);
+
+ spin_unlock_irqrestore(&dmux->tx_lock, flags);
+ return skb_dma;
+}
+
+static int bam_dmux_send_cmd(struct bam_dmux_netdev *bndev, u8 cmd)
+{
+ struct bam_dmux *dmux = bndev->dmux;
+ struct bam_dmux_skb_dma *skb_dma;
+ struct bam_dmux_hdr *hdr;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = alloc_skb(sizeof(*hdr), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = skb_put_zero(skb, sizeof(*hdr));
+ hdr->magic = BAM_DMUX_HDR_MAGIC;
+ hdr->cmd = cmd;
+ hdr->ch = bndev->ch;
+
+ skb_dma = bam_dmux_tx_queue(dmux, skb);
+ if (!skb_dma) {
+ ret = -EAGAIN;
+ goto free_skb;
+ }
+
+ ret = pm_runtime_get_sync(dmux->dev);
+ if (ret < 0)
+ goto tx_fail;
+
+ if (!bam_dmux_skb_dma_map(skb_dma, DMA_TO_DEVICE)) {
+ ret = -ENOMEM;
+ goto tx_fail;
+ }
+
+ if (!bam_dmux_skb_dma_submit_tx(skb_dma)) {
+ ret = -EIO;
+ goto tx_fail;
+ }
+
+ dma_async_issue_pending(dmux->tx);
+ return 0;
+
+tx_fail:
+ bam_dmux_tx_done(skb_dma);
+free_skb:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static int bam_dmux_netdev_open(struct net_device *netdev)
+{
+ struct bam_dmux_netdev *bndev = netdev_priv(netdev);
+ int ret;
+
+ ret = bam_dmux_send_cmd(bndev, BAM_DMUX_CMD_OPEN);
+ if (ret)
+ return ret;
+
+ netif_start_queue(netdev);
+ return 0;
+}
+
+static int bam_dmux_netdev_stop(struct net_device *netdev)
+{
+ struct bam_dmux_netdev *bndev = netdev_priv(netdev);
+
+ netif_stop_queue(netdev);
+ bam_dmux_send_cmd(bndev, BAM_DMUX_CMD_CLOSE);
+ return 0;
+}
+
+static unsigned int needed_room(unsigned int avail, unsigned int needed)
+{
+ if (avail >= needed)
+ return 0;
+ return needed - avail;
+}
+
+static int bam_dmux_tx_prepare_skb(struct bam_dmux_netdev *bndev,
+ struct sk_buff *skb)
+{
+ unsigned int head = needed_room(skb_headroom(skb), BAM_DMUX_HDR_SIZE);
+ unsigned int pad = sizeof(u32) - skb->len % sizeof(u32);
+ unsigned int tail = needed_room(skb_tailroom(skb), pad);
+ struct bam_dmux_hdr *hdr;
+ int ret;
+
+ if (head || tail || skb_cloned(skb)) {
+ ret = pskb_expand_head(skb, head, tail, GFP_ATOMIC);
+ if (ret)
+ return ret;
+ }
+
+ hdr = skb_push(skb, sizeof(*hdr));
+ hdr->magic = BAM_DMUX_HDR_MAGIC;
+ hdr->signal = 0;
+ hdr->cmd = BAM_DMUX_CMD_DATA;
+ hdr->pad = pad;
+ hdr->ch = bndev->ch;
+ hdr->len = skb->len - sizeof(*hdr);
+ if (pad)
+ skb_put_zero(skb, pad);
+
+ return 0;
+}
+
+static netdev_tx_t bam_dmux_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct bam_dmux_netdev *bndev = netdev_priv(netdev);
+ struct bam_dmux *dmux = bndev->dmux;
+ struct bam_dmux_skb_dma *skb_dma;
+ int active, ret;
+
+ skb_dma = bam_dmux_tx_queue(dmux, skb);
+ if (!skb_dma)
+ return NETDEV_TX_BUSY;
+
+ active = pm_runtime_get(dmux->dev);
+ if (active < 0 && active != -EINPROGRESS)
+ goto drop;
+
+ ret = bam_dmux_tx_prepare_skb(bndev, skb);
+ if (ret)
+ goto drop;
+
+ if (!bam_dmux_skb_dma_map(skb_dma, DMA_TO_DEVICE))
+ goto drop;
+
+ if (active <= 0) {
+ /* Cannot sleep here so mark skb for wakeup handler and return */
+ if (!atomic_long_fetch_or(BIT(skb_dma - dmux->tx_skbs),
+ &dmux->tx_deferred_skb))
+ queue_pm_work(&dmux->tx_wakeup_work);
+ return NETDEV_TX_OK;
+ }
+
+ if (!bam_dmux_skb_dma_submit_tx(skb_dma))
+ goto drop;
+
+ dma_async_issue_pending(dmux->tx);
+ return NETDEV_TX_OK;
+
+drop:
+ bam_dmux_tx_done(skb_dma);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static void bam_dmux_tx_wakeup_work(struct work_struct *work)
+{
+ struct bam_dmux *dmux = container_of(work, struct bam_dmux, tx_wakeup_work);
+ unsigned long pending;
+ int ret, i;
+
+ ret = pm_runtime_resume_and_get(dmux->dev);
+ if (ret < 0) {
+ dev_err(dmux->dev, "Failed to resume: %d\n", ret);
+ return;
+ }
+
+ pending = atomic_long_xchg(&dmux->tx_deferred_skb, 0);
+ if (!pending)
+ goto out;
+
+ dev_dbg(dmux->dev, "pending skbs after wakeup: %#lx\n", pending);
+ for_each_set_bit(i, &pending, BAM_DMUX_NUM_SKB) {
+ bam_dmux_skb_dma_submit_tx(&dmux->tx_skbs[i]);
+ }
+ dma_async_issue_pending(dmux->tx);
+
+out:
+ pm_runtime_mark_last_busy(dmux->dev);
+ pm_runtime_put_autosuspend(dmux->dev);
+}
+
+static const struct net_device_ops bam_dmux_ops = {
+ .ndo_open = bam_dmux_netdev_open,
+ .ndo_stop = bam_dmux_netdev_stop,
+ .ndo_start_xmit = bam_dmux_netdev_start_xmit,
+};
+
+static const struct device_type wwan_type = {
+ .name = "wwan",
+};
+
+static void bam_dmux_netdev_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &bam_dmux_ops;
+
+ dev->type = ARPHRD_RAWIP;
+ SET_NETDEV_DEVTYPE(dev, &wwan_type);
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+
+ dev->mtu = ETH_DATA_LEN;
+ dev->max_mtu = BAM_DMUX_MAX_DATA_SIZE;
+ dev->needed_headroom = sizeof(struct bam_dmux_hdr);
+ dev->needed_tailroom = sizeof(u32); /* word-aligned */
+ dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
+
+ /* This perm addr will be used as interface identifier by IPv6 */
+ dev->addr_assign_type = NET_ADDR_RANDOM;
+ eth_random_addr(dev->perm_addr);
+}
+
+static void bam_dmux_register_netdev_work(struct work_struct *work)
+{
+ struct bam_dmux *dmux = container_of(work, struct bam_dmux, register_netdev_work);
+ struct bam_dmux_netdev *bndev;
+ struct net_device *netdev;
+ int ch, ret;
+
+ for_each_set_bit(ch, dmux->remote_channels, BAM_DMUX_NUM_CH) {
+ if (dmux->netdevs[ch])
+ continue;
+
+ netdev = alloc_netdev(sizeof(*bndev), "wwan%d", NET_NAME_ENUM,
+ bam_dmux_netdev_setup);
+ if (!netdev)
+ return;
+
+ SET_NETDEV_DEV(netdev, dmux->dev);
+ netdev->dev_port = ch;
+
+ bndev = netdev_priv(netdev);
+ bndev->dmux = dmux;
+ bndev->ch = ch;
+
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(dmux->dev, "Failed to register netdev for channel %u: %d\n",
+ ch, ret);
+ free_netdev(netdev);
+ return;
+ }
+
+ dmux->netdevs[ch] = netdev;
+ }
+}
+
+static void bam_dmux_rx_callback(void *data);
+
+static bool bam_dmux_skb_dma_submit_rx(struct bam_dmux_skb_dma *skb_dma)
+{
+ struct bam_dmux *dmux = skb_dma->dmux;
+ struct dma_async_tx_descriptor *desc;
+
+ desc = dmaengine_prep_slave_single(dmux->rx, skb_dma->addr,
+ skb_dma->skb->len, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dmux->dev, "Failed to prepare RX DMA buffer\n");
+ return false;
+ }
+
+ desc->callback = bam_dmux_rx_callback;
+ desc->callback_param = skb_dma;
+ desc->cookie = dmaengine_submit(desc);
+ return true;
+}
+
+static bool bam_dmux_skb_dma_queue_rx(struct bam_dmux_skb_dma *skb_dma, gfp_t gfp)
+{
+ if (!skb_dma->skb) {
+ skb_dma->skb = __netdev_alloc_skb(NULL, BAM_DMUX_BUFFER_SIZE, gfp);
+ if (!skb_dma->skb)
+ return false;
+ skb_put(skb_dma->skb, BAM_DMUX_BUFFER_SIZE);
+ }
+
+ return bam_dmux_skb_dma_map(skb_dma, DMA_FROM_DEVICE) &&
+ bam_dmux_skb_dma_submit_rx(skb_dma);
+}
+
+static void bam_dmux_cmd_data(struct bam_dmux_skb_dma *skb_dma)
+{
+ struct bam_dmux *dmux = skb_dma->dmux;
+ struct sk_buff *skb = skb_dma->skb;
+ struct bam_dmux_hdr *hdr = (struct bam_dmux_hdr *)skb->data;
+ struct net_device *netdev = dmux->netdevs[hdr->ch];
+
+ if (!netdev || !netif_running(netdev)) {
+ dev_warn(dmux->dev, "Data for inactive channel %u\n", hdr->ch);
+ return;
+ }
+
+ if (hdr->len > BAM_DMUX_MAX_DATA_SIZE) {
+ dev_err(dmux->dev, "Data larger than buffer? (%u > %u)\n",
+ hdr->len, (u16)BAM_DMUX_MAX_DATA_SIZE);
+ return;
+ }
+
+ skb_dma->skb = NULL; /* Hand over to network stack */
+
+ skb_pull(skb, sizeof(*hdr));
+ skb_trim(skb, hdr->len);
+ skb->dev = netdev;
+
+ /* Only Raw-IP/QMAP is supported by this driver */
+ switch (skb->data[0] & 0xf0) {
+ case 0x40:
+ skb->protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ skb->protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ skb->protocol = htons(ETH_P_MAP);
+ break;
+ }
+
+ netif_receive_skb(skb);
+}
+
+static void bam_dmux_cmd_open(struct bam_dmux *dmux, struct bam_dmux_hdr *hdr)
+{
+ struct net_device *netdev = dmux->netdevs[hdr->ch];
+
+ dev_dbg(dmux->dev, "open channel: %u\n", hdr->ch);
+
+ if (__test_and_set_bit(hdr->ch, dmux->remote_channels)) {
+ dev_warn(dmux->dev, "Channel already open: %u\n", hdr->ch);
+ return;
+ }
+
+ if (netdev) {
+ netif_device_attach(netdev);
+ } else {
+ /* Cannot sleep here, schedule work to register the netdev */
+ schedule_work(&dmux->register_netdev_work);
+ }
+}
+
+static void bam_dmux_cmd_close(struct bam_dmux *dmux, struct bam_dmux_hdr *hdr)
+{
+ struct net_device *netdev = dmux->netdevs[hdr->ch];
+
+ dev_dbg(dmux->dev, "close channel: %u\n", hdr->ch);
+
+ if (!__test_and_clear_bit(hdr->ch, dmux->remote_channels)) {
+ dev_err(dmux->dev, "Channel not open: %u\n", hdr->ch);
+ return;
+ }
+
+ if (netdev)
+ netif_device_detach(netdev);
+}
+
+static void bam_dmux_rx_callback(void *data)
+{
+ struct bam_dmux_skb_dma *skb_dma = data;
+ struct bam_dmux *dmux = skb_dma->dmux;
+ struct sk_buff *skb = skb_dma->skb;
+ struct bam_dmux_hdr *hdr = (struct bam_dmux_hdr *)skb->data;
+
+ bam_dmux_skb_dma_unmap(skb_dma, DMA_FROM_DEVICE);
+
+ if (hdr->magic != BAM_DMUX_HDR_MAGIC) {
+ dev_err(dmux->dev, "Invalid magic in header: %#x\n", hdr->magic);
+ goto out;
+ }
+
+ if (hdr->ch >= BAM_DMUX_NUM_CH) {
+ dev_dbg(dmux->dev, "Unsupported channel: %u\n", hdr->ch);
+ goto out;
+ }
+
+ switch (hdr->cmd) {
+ case BAM_DMUX_CMD_DATA:
+ bam_dmux_cmd_data(skb_dma);
+ break;
+ case BAM_DMUX_CMD_OPEN:
+ bam_dmux_cmd_open(dmux, hdr);
+ break;
+ case BAM_DMUX_CMD_CLOSE:
+ bam_dmux_cmd_close(dmux, hdr);
+ break;
+ default:
+ dev_err(dmux->dev, "Unsupported command %u on channel %u\n",
+ hdr->cmd, hdr->ch);
+ break;
+ }
+
+out:
+ if (bam_dmux_skb_dma_queue_rx(skb_dma, GFP_ATOMIC))
+ dma_async_issue_pending(dmux->rx);
+}
+
+static bool bam_dmux_power_on(struct bam_dmux *dmux)
+{
+ struct device *dev = dmux->dev;
+ struct dma_slave_config dma_rx_conf = {
+ .direction = DMA_DEV_TO_MEM,
+ .src_maxburst = BAM_DMUX_BUFFER_SIZE,
+ };
+ int i;
+
+ dmux->rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(dmux->rx)) {
+ dev_err(dev, "Failed to request RX DMA channel: %pe\n", dmux->rx);
+ dmux->rx = NULL;
+ return false;
+ }
+ dmaengine_slave_config(dmux->rx, &dma_rx_conf);
+
+ for (i = 0; i < BAM_DMUX_NUM_SKB; i++) {
+ if (!bam_dmux_skb_dma_queue_rx(&dmux->rx_skbs[i], GFP_KERNEL))
+ return false;
+ }
+ dma_async_issue_pending(dmux->rx);
+
+ return true;
+}
+
+static void bam_dmux_free_skbs(struct bam_dmux_skb_dma skbs[],
+ enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < BAM_DMUX_NUM_SKB; i++) {
+ struct bam_dmux_skb_dma *skb_dma = &skbs[i];
+
+ if (skb_dma->addr)
+ bam_dmux_skb_dma_unmap(skb_dma, dir);
+ if (skb_dma->skb) {
+ dev_kfree_skb(skb_dma->skb);
+ skb_dma->skb = NULL;
+ }
+ }
+}
+
+static void bam_dmux_power_off(struct bam_dmux *dmux)
+{
+ if (dmux->tx) {
+ dmaengine_terminate_sync(dmux->tx);
+ dma_release_channel(dmux->tx);
+ dmux->tx = NULL;
+ }
+
+ if (dmux->rx) {
+ dmaengine_terminate_sync(dmux->rx);
+ dma_release_channel(dmux->rx);
+ dmux->rx = NULL;
+ }
+
+ bam_dmux_free_skbs(dmux->rx_skbs, DMA_FROM_DEVICE);
+}
+
+static irqreturn_t bam_dmux_pc_irq(int irq, void *data)
+{
+ struct bam_dmux *dmux = data;
+ bool new_state = !dmux->pc_state;
+
+ dev_dbg(dmux->dev, "pc: %u\n", new_state);
+
+ if (new_state) {
+ if (bam_dmux_power_on(dmux))
+ bam_dmux_pc_ack(dmux);
+ else
+ bam_dmux_power_off(dmux);
+ } else {
+ bam_dmux_power_off(dmux);
+ bam_dmux_pc_ack(dmux);
+ }
+
+ dmux->pc_state = new_state;
+ wake_up_all(&dmux->pc_wait);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bam_dmux_pc_ack_irq(int irq, void *data)
+{
+ struct bam_dmux *dmux = data;
+
+ dev_dbg(dmux->dev, "pc ack\n");
+ complete_all(&dmux->pc_ack_completion);
+
+ return IRQ_HANDLED;
+}
+
+static int bam_dmux_runtime_suspend(struct device *dev)
+{
+ struct bam_dmux *dmux = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "runtime suspend\n");
+ bam_dmux_pc_vote(dmux, false);
+
+ return 0;
+}
+
+static int __maybe_unused bam_dmux_runtime_resume(struct device *dev)
+{
+ struct bam_dmux *dmux = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "runtime resume\n");
+
+ /* Wait until previous power down was acked */
+ if (!wait_for_completion_timeout(&dmux->pc_ack_completion,
+ BAM_DMUX_REMOTE_TIMEOUT))
+ return -ETIMEDOUT;
+
+ /* Vote for power state */
+ bam_dmux_pc_vote(dmux, true);
+
+ /* Wait for ack */
+ if (!wait_for_completion_timeout(&dmux->pc_ack_completion,
+ BAM_DMUX_REMOTE_TIMEOUT)) {
+ bam_dmux_pc_vote(dmux, false);
+ return -ETIMEDOUT;
+ }
+
+ /* Wait until we're up */
+ if (!wait_event_timeout(dmux->pc_wait, dmux->pc_state,
+ BAM_DMUX_REMOTE_TIMEOUT)) {
+ bam_dmux_pc_vote(dmux, false);
+ return -ETIMEDOUT;
+ }
+
+ /* Ensure that we actually initialized successfully */
+ if (!dmux->rx) {
+ bam_dmux_pc_vote(dmux, false);
+ return -ENXIO;
+ }
+
+ /* Request TX channel if necessary */
+ if (dmux->tx)
+ return 0;
+
+ dmux->tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(dmux->rx)) {
+ dev_err(dev, "Failed to request TX DMA channel: %pe\n", dmux->tx);
+ dmux->tx = NULL;
+ bam_dmux_runtime_suspend(dev);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int bam_dmux_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bam_dmux *dmux;
+ int ret, pc_ack_irq, i;
+ unsigned int bit;
+
+ dmux = devm_kzalloc(dev, sizeof(*dmux), GFP_KERNEL);
+ if (!dmux)
+ return -ENOMEM;
+
+ dmux->dev = dev;
+ platform_set_drvdata(pdev, dmux);
+
+ dmux->pc_irq = platform_get_irq_byname(pdev, "pc");
+ if (dmux->pc_irq < 0)
+ return dmux->pc_irq;
+
+ pc_ack_irq = platform_get_irq_byname(pdev, "pc-ack");
+ if (pc_ack_irq < 0)
+ return pc_ack_irq;
+
+ dmux->pc = devm_qcom_smem_state_get(dev, "pc", &bit);
+ if (IS_ERR(dmux->pc))
+ return dev_err_probe(dev, PTR_ERR(dmux->pc),
+ "Failed to get pc state\n");
+ dmux->pc_mask = BIT(bit);
+
+ dmux->pc_ack = devm_qcom_smem_state_get(dev, "pc-ack", &bit);
+ if (IS_ERR(dmux->pc_ack))
+ return dev_err_probe(dev, PTR_ERR(dmux->pc_ack),
+ "Failed to get pc-ack state\n");
+ dmux->pc_ack_mask = BIT(bit);
+
+ init_waitqueue_head(&dmux->pc_wait);
+ init_completion(&dmux->pc_ack_completion);
+ complete_all(&dmux->pc_ack_completion);
+
+ spin_lock_init(&dmux->tx_lock);
+ INIT_WORK(&dmux->tx_wakeup_work, bam_dmux_tx_wakeup_work);
+ INIT_WORK(&dmux->register_netdev_work, bam_dmux_register_netdev_work);
+
+ for (i = 0; i < BAM_DMUX_NUM_SKB; i++) {
+ dmux->rx_skbs[i].dmux = dmux;
+ dmux->tx_skbs[i].dmux = dmux;
+ }
+
+ /* Runtime PM manages our own power vote.
+ * Note that the RX path may be active even if we are runtime suspended,
+ * since it is controlled by the remote side.
+ */
+ pm_runtime_set_autosuspend_delay(dev, BAM_DMUX_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
+
+ ret = devm_request_threaded_irq(dev, pc_ack_irq, NULL, bam_dmux_pc_ack_irq,
+ IRQF_ONESHOT, NULL, dmux);
+ if (ret)
+ return ret;
+
+ ret = devm_request_threaded_irq(dev, dmux->pc_irq, NULL, bam_dmux_pc_irq,
+ IRQF_ONESHOT, NULL, dmux);
+ if (ret)
+ return ret;
+
+ ret = irq_get_irqchip_state(dmux->pc_irq, IRQCHIP_STATE_LINE_LEVEL,
+ &dmux->pc_state);
+ if (ret)
+ return ret;
+
+ /* Check if remote finished initialization before us */
+ if (dmux->pc_state) {
+ if (bam_dmux_power_on(dmux))
+ bam_dmux_pc_ack(dmux);
+ else
+ bam_dmux_power_off(dmux);
+ }
+
+ return 0;
+}
+
+static int bam_dmux_remove(struct platform_device *pdev)
+{
+ struct bam_dmux *dmux = platform_get_drvdata(pdev);
+ struct device *dev = dmux->dev;
+ LIST_HEAD(list);
+ int i;
+
+ /* Unregister network interfaces */
+ cancel_work_sync(&dmux->register_netdev_work);
+ rtnl_lock();
+ for (i = 0; i < BAM_DMUX_NUM_CH; ++i)
+ if (dmux->netdevs[i])
+ unregister_netdevice_queue(dmux->netdevs[i], &list);
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
+ cancel_work_sync(&dmux->tx_wakeup_work);
+
+ /* Drop our own power vote */
+ pm_runtime_disable(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ bam_dmux_runtime_suspend(dev);
+ pm_runtime_set_suspended(dev);
+
+ /* Try to wait for remote side to drop power vote */
+ if (!wait_event_timeout(dmux->pc_wait, !dmux->rx, BAM_DMUX_REMOTE_TIMEOUT))
+ dev_err(dev, "Timed out waiting for remote side to suspend\n");
+
+ /* Make sure everything is cleaned up before we return */
+ disable_irq(dmux->pc_irq);
+ bam_dmux_power_off(dmux);
+ bam_dmux_free_skbs(dmux->tx_skbs, DMA_TO_DEVICE);
+
+ return 0;
+}
+
+static const struct dev_pm_ops bam_dmux_pm_ops = {
+ SET_RUNTIME_PM_OPS(bam_dmux_runtime_suspend, bam_dmux_runtime_resume, NULL)
+};
+
+static const struct of_device_id bam_dmux_of_match[] = {
+ { .compatible = "qcom,bam-dmux" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, bam_dmux_of_match);
+
+static struct platform_driver bam_dmux_driver = {
+ .probe = bam_dmux_probe,
+ .remove = bam_dmux_remove,
+ .driver = {
+ .name = "bam-dmux",
+ .pm = &bam_dmux_pm_ops,
+ .of_match_table = bam_dmux_of_match,
+ },
+};
+module_platform_driver(bam_dmux_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm BAM-DMUX WWAN Network Driver");
+MODULE_AUTHOR("Stephan Gerhold <stephan@gerhold.net>");
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index d293ab688044..1508dc2a497b 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -3,6 +3,7 @@
#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/idr.h>
@@ -25,6 +26,7 @@ static DEFINE_IDA(minors); /* minors for WWAN port chardevs */
static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */
static struct class *wwan_class;
static int wwan_major;
+static struct dentry *wwan_debugfs_dir;
#define to_wwan_dev(d) container_of(d, struct wwan_device, dev)
#define to_wwan_port(d) container_of(d, struct wwan_port, dev)
@@ -40,6 +42,7 @@ static int wwan_major;
* @port_id: Current available port ID to pick.
* @ops: wwan device ops
* @ops_ctxt: context to pass to ops
+ * @debugfs_dir: WWAN device debugfs dir
*/
struct wwan_device {
unsigned int id;
@@ -47,6 +50,9 @@ struct wwan_device {
atomic_t port_id;
const struct wwan_ops *ops;
void *ops_ctxt;
+#ifdef CONFIG_WWAN_DEBUGFS
+ struct dentry *debugfs_dir;
+#endif
};
/**
@@ -142,6 +148,20 @@ static struct wwan_device *wwan_dev_get_by_name(const char *name)
return to_wwan_dev(dev);
}
+#ifdef CONFIG_WWAN_DEBUGFS
+struct dentry *wwan_get_debugfs_dir(struct device *parent)
+{
+ struct wwan_device *wwandev;
+
+ wwandev = wwan_dev_get_by_parent(parent);
+ if (IS_ERR(wwandev))
+ return ERR_CAST(wwandev);
+
+ return wwandev->debugfs_dir;
+}
+EXPORT_SYMBOL_GPL(wwan_get_debugfs_dir);
+#endif
+
/* This function allocates and registers a new WWAN device OR if a WWAN device
* already exist for the given parent, it gets a reference and return it.
* This function is not exported (for now), it is called indirectly via
@@ -189,6 +209,12 @@ static struct wwan_device *wwan_create_dev(struct device *parent)
goto done_unlock;
}
+#ifdef CONFIG_WWAN_DEBUGFS
+ wwandev->debugfs_dir =
+ debugfs_create_dir(kobject_name(&wwandev->dev.kobj),
+ wwan_debugfs_dir);
+#endif
+
done_unlock:
mutex_unlock(&wwan_register_lock);
@@ -218,10 +244,14 @@ static void wwan_remove_dev(struct wwan_device *wwandev)
else
ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child);
- if (!ret)
+ if (!ret) {
+#ifdef CONFIG_WWAN_DEBUGFS
+ debugfs_remove_recursive(wwandev->debugfs_dir);
+#endif
device_unregister(&wwandev->dev);
- else
+ } else {
put_device(&wwandev->dev);
+ }
mutex_unlock(&wwan_register_lock);
}
@@ -1117,6 +1147,10 @@ static int __init wwan_init(void)
goto destroy;
}
+#ifdef CONFIG_WWAN_DEBUGFS
+ wwan_debugfs_dir = debugfs_create_dir("wwan", NULL);
+#endif
+
return 0;
destroy:
@@ -1128,6 +1162,7 @@ unregister:
static void __exit wwan_exit(void)
{
+ debugfs_remove_recursive(wwan_debugfs_dir);
__unregister_chrdev(wwan_major, 0, WWAN_MAX_MINORS, "wwan_port");
rtnl_link_unregister(&wwan_rtnl_link_ops);
class_destroy(wwan_class);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index d514d96027a6..8b18246ad999 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -948,7 +948,7 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(queue->info->netdev, prog, act);
}
return act;
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index f78670bf41e0..28a9e1eb9bcf 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -205,9 +205,7 @@ static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
r = fdp_nci_i2c_read(phy, &skb);
- if (r == -EREMOTEIO)
- return IRQ_HANDLED;
- else if (r == -ENOMEM || r == -EBADMSG)
+ if (r == -EREMOTEIO || r == -ENOMEM || r == -EBADMSG)
return IRQ_HANDLED;
if (skb != NULL)
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 37d26f01986b..62a0f1a010cb 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -188,7 +188,7 @@ do { \
static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
{
int polarity, retry, ret;
- char rset_cmd[] = { 0x05, 0xF9, 0x04, 0x00, 0xC3, 0xE5 };
+ static const char rset_cmd[] = { 0x05, 0xF9, 0x04, 0x00, 0xC3, 0xE5 };
int count = sizeof(rset_cmd);
nfc_info(&phy->i2c_dev->dev, "Detecting nfc_en polarity\n");
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index 35b32fb90906..a86b5edfc7ce 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -315,10 +315,8 @@ static int st21nfca_hci_i2c_repack(struct sk_buff *skb)
skb_pull(skb, 1);
r = check_crc(skb->data, skb->len);
- if (r != 0) {
- i = 0;
+ if (r != 0)
return -EBADMSG;
- }
/* remove headbyte */
skb_pull(skb, 1);
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index a43fc4117fa5..c922f10d0d7b 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -316,6 +316,11 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
return -ENOMEM;
transaction->aid_len = skb->data[1];
+
+ /* Checking if the length of the AID is valid */
+ if (transaction->aid_len > sizeof(transaction->aid))
+ return -EINVAL;
+
memcpy(transaction->aid, &skb->data[2],
transaction->aid_len);
@@ -325,6 +330,11 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
return -EPROTO;
transaction->params_len = skb->data[transaction->aid_len + 3];
+
+ /* Total size is allocated (skb->len - 2) minus fixed array members */
+ if (transaction->params_len > ((skb->len - 2) - sizeof(struct nfc_evt_transaction)))
+ return -EINVAL;
+
memcpy(transaction->params, skb->data +
transaction->aid_len + 4, transaction->params_len);
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index 87847c380051..04550b1f984c 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -1321,6 +1321,8 @@ static const struct ntb_dev_data dev_data[] = {
static const struct pci_device_id amd_ntb_pci_tbl[] = {
{ PCI_VDEVICE(AMD, 0x145b), (kernel_ulong_t)&dev_data[0] },
{ PCI_VDEVICE(AMD, 0x148b), (kernel_ulong_t)&dev_data[1] },
+ { PCI_VDEVICE(AMD, 0x14c0), (kernel_ulong_t)&dev_data[1] },
+ { PCI_VDEVICE(AMD, 0x14c3), (kernel_ulong_t)&dev_data[1] },
{ PCI_VDEVICE(HYGON, 0x145b), (kernel_ulong_t)&dev_data[0] },
{ 0, }
};
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
index 4c6eb61a6ac6..88ae18b0efa8 100644
--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -297,7 +297,7 @@ static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
* (see CMA_CONFIG_ALIGNMENT)
*/
dev_err(&sndev->stdev->dev,
- "ERROR: Memory window address is not aligned to it's size!\n");
+ "ERROR: Memory window address is not aligned to its size!\n");
return -EINVAL;
}
@@ -419,8 +419,10 @@ static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
enum ntb_width *width)
{
struct switchtec_dev *stdev = sndev->stdev;
+ struct part_cfg_regs __iomem *part_cfg =
+ &stdev->mmio_part_cfg_all[partition];
- u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id);
+ u32 pff = ioread32(&part_cfg->vep_pff_inst_id) & 0xFF;
u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
if (speed)
@@ -840,7 +842,6 @@ static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
u64 tpart_vec;
int self;
u64 part_map;
- int bit;
sndev->ntb.pdev = sndev->stdev->pdev;
sndev->ntb.topo = NTB_TOPO_SWITCH;
@@ -859,31 +860,31 @@ static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
tpart_vec |= ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_low);
part_map = ioread64(&sndev->mmio_ntb->ep_map);
+ tpart_vec &= part_map;
part_map &= ~(1 << sndev->self_partition);
- if (!ffs(tpart_vec)) {
+ if (!tpart_vec) {
if (sndev->stdev->partition_count != 2) {
dev_err(&sndev->stdev->dev,
"ntb target partition not defined\n");
return -ENODEV;
}
- bit = ffs(part_map);
- if (!bit) {
+ if (!part_map) {
dev_err(&sndev->stdev->dev,
"peer partition is not NT partition\n");
return -ENODEV;
}
- sndev->peer_partition = bit - 1;
+ sndev->peer_partition = __ffs64(part_map);
} else {
- if (ffs(tpart_vec) != fls(tpart_vec)) {
+ if (__ffs64(tpart_vec) != (fls64(tpart_vec) - 1)) {
dev_err(&sndev->stdev->dev,
"ntb driver only supports 1 pair of 1-1 ntb mapping\n");
return -ENODEV;
}
- sndev->peer_partition = ffs(tpart_vec) - 1;
+ sndev->peer_partition = __ffs64(tpart_vec);
if (!(part_map & (1ULL << sndev->peer_partition))) {
dev_err(&sndev->stdev->dev,
"ntb target partition is not NT partition\n");
@@ -954,7 +955,7 @@ static int config_req_id_table(struct switchtec_ntb *sndev,
u32 error;
u32 proxy_id;
- if (ioread32(&mmio_ctrl->req_id_table_size) < count) {
+ if (ioread16(&mmio_ctrl->req_id_table_size) < count) {
dev_err(&sndev->stdev->dev,
"Not enough requester IDs available.\n");
return -EFAULT;
@@ -966,9 +967,6 @@ static int config_req_id_table(struct switchtec_ntb *sndev,
if (rc)
return rc;
- iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
- &mmio_ctrl->partition_ctrl);
-
for (i = 0; i < count; i++) {
iowrite32(req_ids[i] << 16 | NTB_CTRL_REQ_ID_EN,
&mmio_ctrl->req_id_table[i]);
@@ -1090,7 +1088,7 @@ static int crosslink_enum_partition(struct switchtec_ntb *sndev,
{
struct part_cfg_regs __iomem *part_cfg =
&sndev->stdev->mmio_part_cfg_all[sndev->peer_partition];
- u32 pff = ioread32(&part_cfg->vep_pff_inst_id);
+ u32 pff = ioread32(&part_cfg->vep_pff_inst_id) & 0xFF;
struct pff_csr_regs __iomem *mmio_pff =
&sndev->stdev->mmio_pff_csr[pff];
const u64 bar_space = 0x1000000000LL;
diff --git a/drivers/ntb/msi.c b/drivers/ntb/msi.c
index 3f05cfbc73af..dd683cb58d09 100644
--- a/drivers/ntb/msi.c
+++ b/drivers/ntb/msi.c
@@ -108,8 +108,10 @@ int ntb_msi_setup_mws(struct ntb_dev *ntb)
if (!ntb->msi)
return -EINVAL;
- desc = first_msi_entry(&ntb->pdev->dev);
+ msi_lock_descs(&ntb->pdev->dev);
+ desc = msi_first_desc(&ntb->pdev->dev, MSI_DESC_ASSOCIATED);
addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32);
+ msi_unlock_descs(&ntb->pdev->dev);
for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) {
peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
@@ -260,8 +262,9 @@ static int ntbm_msi_setup_callback(struct ntb_dev *ntb, struct msi_desc *entry,
* @handler: Function to be called when the IRQ occurs
* @thread_fn: Function to be called in a threaded interrupt context. NULL
* for clients which handle everything in @handler
- * @devname: An ascii name for the claiming device, dev_name(dev) if NULL
+ * @name: An ascii name for the claiming device, dev_name(dev) if NULL
* @dev_id: A cookie passed back to the handler function
+ * @msi_desc: MSI descriptor data which triggers the interrupt
*
* This function assigns an interrupt handler to an unused
* MSI interrupt and returns the descriptor used to trigger
@@ -281,13 +284,15 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
const char *name, void *dev_id,
struct ntb_msi_desc *msi_desc)
{
+ struct device *dev = &ntb->pdev->dev;
struct msi_desc *entry;
int ret;
if (!ntb->msi)
return -EINVAL;
- for_each_pci_msi_entry(entry, ntb->pdev) {
+ msi_lock_descs(dev);
+ msi_for_each_desc(entry, dev, MSI_DESC_ASSOCIATED) {
if (irq_has_action(entry->irq))
continue;
@@ -304,14 +309,17 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
ret = ntbm_msi_setup_callback(ntb, entry, msi_desc);
if (ret) {
devm_free_irq(&ntb->dev, entry->irq, dev_id);
- return ret;
+ goto unlock;
}
-
- return entry->irq;
+ ret = entry->irq;
+ goto unlock;
}
+ ret = -ENODEV;
- return -ENODEV;
+unlock:
+ msi_unlock_descs(dev);
+ return ret;
}
EXPORT_SYMBOL(ntbm_msi_request_threaded_irq);
diff --git a/drivers/nubus/proc.c b/drivers/nubus/proc.c
index 88e1f9a0faaf..1fd667852271 100644
--- a/drivers/nubus/proc.c
+++ b/drivers/nubus/proc.c
@@ -93,30 +93,30 @@ struct nubus_proc_pde_data {
static struct nubus_proc_pde_data *
nubus_proc_alloc_pde_data(unsigned char *ptr, unsigned int size)
{
- struct nubus_proc_pde_data *pde_data;
+ struct nubus_proc_pde_data *pded;
- pde_data = kmalloc(sizeof(*pde_data), GFP_KERNEL);
- if (!pde_data)
+ pded = kmalloc(sizeof(*pded), GFP_KERNEL);
+ if (!pded)
return NULL;
- pde_data->res_ptr = ptr;
- pde_data->res_size = size;
- return pde_data;
+ pded->res_ptr = ptr;
+ pded->res_size = size;
+ return pded;
}
static int nubus_proc_rsrc_show(struct seq_file *m, void *v)
{
struct inode *inode = m->private;
- struct nubus_proc_pde_data *pde_data;
+ struct nubus_proc_pde_data *pded;
- pde_data = PDE_DATA(inode);
- if (!pde_data)
+ pded = pde_data(inode);
+ if (!pded)
return 0;
- if (pde_data->res_size > m->size)
+ if (pded->res_size > m->size)
return -EFBIG;
- if (pde_data->res_size) {
+ if (pded->res_size) {
int lanes = (int)proc_get_parent_data(inode);
struct nubus_dirent ent;
@@ -124,11 +124,11 @@ static int nubus_proc_rsrc_show(struct seq_file *m, void *v)
return 0;
ent.mask = lanes;
- ent.base = pde_data->res_ptr;
+ ent.base = pded->res_ptr;
ent.data = 0;
- nubus_seq_write_rsrc_mem(m, &ent, pde_data->res_size);
+ nubus_seq_write_rsrc_mem(m, &ent, pded->res_size);
} else {
- unsigned int data = (unsigned int)pde_data->res_ptr;
+ unsigned int data = (unsigned int)pded->res_ptr;
seq_putc(m, data >> 16);
seq_putc(m, data >> 8);
@@ -142,18 +142,18 @@ void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
unsigned int size)
{
char name[9];
- struct nubus_proc_pde_data *pde_data;
+ struct nubus_proc_pde_data *pded;
if (!procdir)
return;
snprintf(name, sizeof(name), "%x", ent->type);
if (size)
- pde_data = nubus_proc_alloc_pde_data(nubus_dirptr(ent), size);
+ pded = nubus_proc_alloc_pde_data(nubus_dirptr(ent), size);
else
- pde_data = NULL;
+ pded = NULL;
proc_create_single_data(name, S_IFREG | 0444, procdir,
- nubus_proc_rsrc_show, pde_data);
+ nubus_proc_rsrc_show, pded);
}
void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index b7d1eb38b27d..347fe7afa583 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -22,7 +22,7 @@ if LIBNVDIMM
config BLK_DEV_PMEM
tristate "PMEM: Persistent memory block device support"
default LIBNVDIMM
- select DAX_DRIVER
+ select DAX
select ND_BTT if BTT
select ND_PFN if NVDIMM_PFN
help
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index fe7ece1534e1..58d95242a836 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -301,29 +301,8 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev,
return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
}
-/*
- * Use the 'no check' versions of copy_from_iter_flushcache() and
- * copy_mc_to_iter() to bypass HARDENED_USERCOPY overhead. Bounds
- * checking, both file offset and device offset, is handled by
- * dax_iomap_actor()
- */
-static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
-{
- return _copy_from_iter_flushcache(addr, bytes, i);
-}
-
-static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
-{
- return _copy_mc_to_iter(addr, bytes, i);
-}
-
static const struct dax_operations pmem_dax_ops = {
.direct_access = pmem_dax_direct_access,
- .dax_supported = generic_fsdax_supported,
- .copy_from_iter = pmem_copy_from_iter,
- .copy_to_iter = pmem_copy_to_iter,
.zero_page_range = pmem_dax_zero_page_range,
};
@@ -379,6 +358,7 @@ static void pmem_release_disk(void *__pmem)
{
struct pmem_device *pmem = __pmem;
+ dax_remove_host(pmem->disk);
kill_dax(pmem->dax_dev);
put_dax(pmem->dax_dev);
del_gendisk(pmem->disk);
@@ -402,7 +382,6 @@ static int pmem_attach_disk(struct device *dev,
struct gendisk *disk;
void *addr;
int rc;
- unsigned long flags = 0UL;
pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
if (!pmem)
@@ -495,19 +474,24 @@ static int pmem_attach_disk(struct device *dev,
nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_range);
disk->bb = &pmem->bb;
- if (is_nvdimm_sync(nd_region))
- flags = DAXDEV_F_SYNC;
- dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops, flags);
+ dax_dev = alloc_dax(pmem, &pmem_dax_ops);
if (IS_ERR(dax_dev)) {
rc = PTR_ERR(dax_dev);
goto out;
}
+ set_dax_nocache(dax_dev);
+ set_dax_nomc(dax_dev);
+ if (is_nvdimm_sync(nd_region))
+ set_dax_synchronous(dax_dev);
+ rc = dax_add_host(dax_dev, disk);
+ if (rc)
+ goto out_cleanup_dax;
dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
pmem->dax_dev = dax_dev;
rc = device_add_disk(dev, disk, pmem_attribute_groups);
if (rc)
- goto out_cleanup_dax;
+ goto out_remove_host;
if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
return -ENOMEM;
@@ -519,6 +503,8 @@ static int pmem_attach_disk(struct device *dev,
dev_warn(dev, "'badblocks' notification disabled\n");
return 0;
+out_remove_host:
+ dax_remove_host(pmem->disk);
out_cleanup_dax:
kill_dax(pmem->dax_dev);
put_dax(pmem->dax_dev);
diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c
index 726c7354d465..995b6cdc67ed 100644
--- a/drivers/nvdimm/virtio_pmem.c
+++ b/drivers/nvdimm/virtio_pmem.c
@@ -105,7 +105,7 @@ static void virtio_pmem_remove(struct virtio_device *vdev)
nvdimm_bus_unregister(nvdimm_bus);
vdev->config->del_vqs(vdev);
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
}
static struct virtio_driver virtio_pmem_driver = {
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 1af8a4513708..5e0bfda04bd7 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -991,7 +991,6 @@ EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
{
struct nvme_command *cmd = nvme_req(req)->cmd;
- struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
blk_status_t ret = BLK_STS_OK;
if (!(req->rq_flags & RQF_DONTPREP))
@@ -1038,8 +1037,6 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
return BLK_STS_IOERR;
}
- if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
- nvme_req(req)->genctr++;
cmd->common.command_id = nvme_cid(req);
trace_nvme_setup_cmd(req, cmd);
return ret;
@@ -1057,7 +1054,7 @@ static int nvme_execute_rq(struct gendisk *disk, struct request *rq,
{
blk_status_t status;
- status = blk_execute_rq(disk, rq, at_head);
+ status = blk_execute_rq(rq, at_head);
if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
return -EINTR;
if (nvme_req(rq)->status)
@@ -1284,7 +1281,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
rq->timeout = ctrl->kato * HZ;
rq->end_io_data = ctrl;
- blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io);
+ blk_execute_rq_nowait(rq, false, nvme_keep_alive_end_io);
}
static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
@@ -2762,9 +2759,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
return -EINVAL;
}
subsys->awupf = le16_to_cpu(id->awupf);
-#ifdef CONFIG_NVME_MULTIPATH
- subsys->iopolicy = NVME_IOPOLICY_NUMA;
-#endif
+ nvme_mpath_default_iopolicy(subsys);
subsys->dev.class = nvme_subsys_class;
subsys->dev.release = nvme_release_subsystem;
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 282d54117e0a..f79a66d4e22c 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -1069,15 +1069,34 @@ out_unlock:
return ret ? ret : count;
}
+static void __nvmf_concat_opt_tokens(struct seq_file *seq_file)
+{
+ const struct match_token *tok;
+ int idx;
+
+ /*
+ * Add dummy entries for instance and cntlid to
+ * signal an invalid/non-existing controller
+ */
+ seq_puts(seq_file, "instance=-1,cntlid=-1");
+ for (idx = 0; idx < ARRAY_SIZE(opt_tokens); idx++) {
+ tok = &opt_tokens[idx];
+ if (tok->token == NVMF_OPT_ERR)
+ continue;
+ seq_puts(seq_file, ",");
+ seq_puts(seq_file, tok->pattern);
+ }
+ seq_puts(seq_file, "\n");
+}
+
static int nvmf_dev_show(struct seq_file *seq_file, void *private)
{
struct nvme_ctrl *ctrl;
- int ret = 0;
mutex_lock(&nvmf_dev_mutex);
ctrl = seq_file->private;
if (!ctrl) {
- ret = -EINVAL;
+ __nvmf_concat_opt_tokens(seq_file);
goto out_unlock;
}
@@ -1086,7 +1105,7 @@ static int nvmf_dev_show(struct seq_file *seq_file, void *private)
out_unlock:
mutex_unlock(&nvmf_dev_mutex);
- return ret;
+ return 0;
}
static int nvmf_dev_open(struct inode *inode, struct file *file)
diff --git a/drivers/nvme/host/fault_inject.c b/drivers/nvme/host/fault_inject.c
index 1352159733b0..83d2e6860d38 100644
--- a/drivers/nvme/host/fault_inject.c
+++ b/drivers/nvme/host/fault_inject.c
@@ -56,7 +56,7 @@ void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject)
void nvme_should_fail(struct request *req)
{
- struct gendisk *disk = req->rq_disk;
+ struct gendisk *disk = req->q->disk;
struct nvme_fault_inject *fault_inject = NULL;
u16 status;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 13e5d503ed07..f8bf6606eb2f 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -13,6 +13,42 @@ module_param(multipath, bool, 0444);
MODULE_PARM_DESC(multipath,
"turn on native support for multiple controllers per subsystem");
+static const char *nvme_iopolicy_names[] = {
+ [NVME_IOPOLICY_NUMA] = "numa",
+ [NVME_IOPOLICY_RR] = "round-robin",
+};
+
+static int iopolicy = NVME_IOPOLICY_NUMA;
+
+static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
+{
+ if (!val)
+ return -EINVAL;
+ if (!strncmp(val, "numa", 4))
+ iopolicy = NVME_IOPOLICY_NUMA;
+ else if (!strncmp(val, "round-robin", 11))
+ iopolicy = NVME_IOPOLICY_RR;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
+{
+ return sprintf(buf, "%s\n", nvme_iopolicy_names[iopolicy]);
+}
+
+module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
+ &iopolicy, 0644);
+MODULE_PARM_DESC(iopolicy,
+ "Default multipath I/O policy; 'numa' (default) or 'round-robin'");
+
+void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
+{
+ subsys->iopolicy = iopolicy;
+}
+
void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
{
struct nvme_ns_head *h;
@@ -706,11 +742,6 @@ void nvme_mpath_stop(struct nvme_ctrl *ctrl)
struct device_attribute subsys_attr_##_name = \
__ATTR(_name, _mode, _show, _store)
-static const char *nvme_iopolicy_names[] = {
- [NVME_IOPOLICY_NUMA] = "numa",
- [NVME_IOPOLICY_RR] = "round-robin",
-};
-
static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 9b095ee01364..a162f6c6da6e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -614,6 +614,10 @@ static inline bool nvme_try_complete_req(struct request *req, __le16 status,
union nvme_result result)
{
struct nvme_request *rq = nvme_req(req);
+ struct nvme_ctrl *ctrl = rq->ctrl;
+
+ if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
+ rq->genctr++;
rq->status = le16_to_cpu(status) >> 1;
rq->result = result;
@@ -763,6 +767,7 @@ static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
+void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, int *flags);
void nvme_failover_req(struct request *req);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
@@ -860,6 +865,9 @@ static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
{
}
+static inline void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
+{
+}
#endif /* CONFIG_NVME_MULTIPATH */
int nvme_revalidate_zones(struct nvme_ns *ns);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index ca2ee806d74b..6a99ed680915 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -500,22 +500,13 @@ static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq)
nvmeq->last_sq_tail = nvmeq->sq_tail;
}
-/**
- * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
- * @nvmeq: The queue to use
- * @cmd: The command to send
- * @write_sq: whether to write to the SQ doorbell
- */
-static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
- bool write_sq)
+static inline void nvme_sq_copy_cmd(struct nvme_queue *nvmeq,
+ struct nvme_command *cmd)
{
- spin_lock(&nvmeq->sq_lock);
memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes),
- cmd, sizeof(*cmd));
+ absolute_pointer(cmd), sizeof(*cmd));
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
- nvme_write_sq_db(nvmeq, write_sq);
- spin_unlock(&nvmeq->sq_lock);
}
static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
@@ -912,52 +903,32 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
return BLK_STS_OK;
}
-/*
- * NOTE: ns is NULL when called on the admin queue.
- */
-static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
+static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
{
- struct nvme_ns *ns = hctx->queue->queuedata;
- struct nvme_queue *nvmeq = hctx->driver_data;
- struct nvme_dev *dev = nvmeq->dev;
- struct request *req = bd->rq;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct nvme_command *cmnd = &iod->cmd;
blk_status_t ret;
iod->aborted = 0;
iod->npages = -1;
iod->nents = 0;
- /*
- * We should not need to do this, but we're still using this to
- * ensure we can drain requests on a dying queue.
- */
- if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
- return BLK_STS_IOERR;
-
- if (!nvme_check_ready(&dev->ctrl, req, true))
- return nvme_fail_nonready_command(&dev->ctrl, req);
-
- ret = nvme_setup_cmd(ns, req);
+ ret = nvme_setup_cmd(req->q->queuedata, req);
if (ret)
return ret;
if (blk_rq_nr_phys_segments(req)) {
- ret = nvme_map_data(dev, req, cmnd);
+ ret = nvme_map_data(dev, req, &iod->cmd);
if (ret)
goto out_free_cmd;
}
if (blk_integrity_rq(req)) {
- ret = nvme_map_metadata(dev, req, cmnd);
+ ret = nvme_map_metadata(dev, req, &iod->cmd);
if (ret)
goto out_unmap_data;
}
blk_mq_start_request(req);
- nvme_submit_cmd(nvmeq, cmnd, bd->last);
return BLK_STS_OK;
out_unmap_data:
nvme_unmap_data(dev, req);
@@ -966,6 +937,96 @@ out_free_cmd:
return ret;
}
+/*
+ * NOTE: ns is NULL when called on the admin queue.
+ */
+static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct nvme_queue *nvmeq = hctx->driver_data;
+ struct nvme_dev *dev = nvmeq->dev;
+ struct request *req = bd->rq;
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ blk_status_t ret;
+
+ /*
+ * We should not need to do this, but we're still using this to
+ * ensure we can drain requests on a dying queue.
+ */
+ if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
+ return BLK_STS_IOERR;
+
+ if (unlikely(!nvme_check_ready(&dev->ctrl, req, true)))
+ return nvme_fail_nonready_command(&dev->ctrl, req);
+
+ ret = nvme_prep_rq(dev, req);
+ if (unlikely(ret))
+ return ret;
+ spin_lock(&nvmeq->sq_lock);
+ nvme_sq_copy_cmd(nvmeq, &iod->cmd);
+ nvme_write_sq_db(nvmeq, bd->last);
+ spin_unlock(&nvmeq->sq_lock);
+ return BLK_STS_OK;
+}
+
+static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist)
+{
+ spin_lock(&nvmeq->sq_lock);
+ while (!rq_list_empty(*rqlist)) {
+ struct request *req = rq_list_pop(rqlist);
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ nvme_sq_copy_cmd(nvmeq, &iod->cmd);
+ }
+ nvme_write_sq_db(nvmeq, true);
+ spin_unlock(&nvmeq->sq_lock);
+}
+
+static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
+{
+ /*
+ * We should not need to do this, but we're still using this to
+ * ensure we can drain requests on a dying queue.
+ */
+ if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
+ return false;
+ if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true)))
+ return false;
+
+ req->mq_hctx->tags->rqs[req->tag] = req;
+ return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK;
+}
+
+static void nvme_queue_rqs(struct request **rqlist)
+{
+ struct request *req, *next, *prev = NULL;
+ struct request *requeue_list = NULL;
+
+ rq_list_for_each_safe(rqlist, req, next) {
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+
+ if (!nvme_prep_rq_batch(nvmeq, req)) {
+ /* detach 'req' and add to remainder list */
+ rq_list_move(rqlist, &requeue_list, req, prev);
+
+ req = prev;
+ if (!req)
+ continue;
+ }
+
+ if (!next || req->mq_hctx != next->mq_hctx) {
+ /* detach rest of list, and submit */
+ req->rq_next = NULL;
+ nvme_submit_cmds(nvmeq, rqlist);
+ *rqlist = next;
+ prev = NULL;
+ } else
+ prev = req;
+ }
+
+ *rqlist = requeue_list;
+}
+
static __always_inline void nvme_pci_unmap_rq(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -1140,7 +1201,11 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
c.common.opcode = nvme_admin_async_event;
c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
- nvme_submit_cmd(nvmeq, &c, true);
+
+ spin_lock(&nvmeq->sq_lock);
+ nvme_sq_copy_cmd(nvmeq, &c);
+ nvme_write_sq_db(nvmeq, true);
+ spin_unlock(&nvmeq->sq_lock);
}
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
@@ -1371,7 +1436,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
}
abort_req->end_io_data = NULL;
- blk_execute_rq_nowait(NULL, abort_req, 0, abort_endio);
+ blk_execute_rq_nowait(abort_req, false, abort_endio);
/*
* The aborted req will be completed on receiving the abort req.
@@ -1663,6 +1728,7 @@ static const struct blk_mq_ops nvme_mq_admin_ops = {
static const struct blk_mq_ops nvme_mq_ops = {
.queue_rq = nvme_queue_rq,
+ .queue_rqs = nvme_queue_rqs,
.complete = nvme_pci_complete_rq,
.commit_rqs = nvme_commit_rqs,
.init_hctx = nvme_init_hctx,
@@ -2416,9 +2482,8 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
req->end_io_data = nvmeq;
init_completion(&nvmeq->delete_done);
- blk_execute_rq_nowait(NULL, req, false,
- opcode == nvme_admin_delete_cq ?
- nvme_del_cq_end : nvme_del_queue_end);
+ blk_execute_rq_nowait(req, false, opcode == nvme_admin_delete_cq ?
+ nvme_del_cq_end : nvme_del_queue_end);
return 0;
}
@@ -3326,7 +3391,8 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_DEALLOCATE_ZEROES, },
{ PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
- NVME_QUIRK_DEALLOCATE_ZEROES, },
+ NVME_QUIRK_DEALLOCATE_ZEROES |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, },
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index 35bac7a25422..b5f85259461a 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -68,7 +68,7 @@ TRACE_EVENT(nvme_setup_cmd,
__entry->nsid = le32_to_cpu(cmd->common.nsid);
__entry->metadata = !!blk_integrity_rq(req);
__entry->fctype = cmd->fabrics.fctype;
- __assign_disk_name(__entry->disk, req->rq_disk);
+ __assign_disk_name(__entry->disk, req->q->disk);
memcpy(__entry->cdw10, &cmd->common.cdw10,
sizeof(__entry->cdw10));
),
@@ -103,7 +103,7 @@ TRACE_EVENT(nvme_complete_rq,
__entry->retries = nvme_req(req)->retries;
__entry->flags = nvme_req(req)->flags;
__entry->status = nvme_req(req)->status;
- __assign_disk_name(__entry->disk, req->rq_disk);
+ __assign_disk_name(__entry->disk, req->q->disk);
),
TP_printk("nvme%d: %sqid=%d, cmdid=%u, res=%#llx, retries=%u, flags=0x%x, status=%#x",
__entry->ctrl_id, __print_disk_name(__entry->disk),
@@ -153,7 +153,7 @@ TRACE_EVENT(nvme_sq,
),
TP_fast_assign(
__entry->ctrl_id = nvme_req(req)->ctrl->instance;
- __assign_disk_name(__entry->disk, req->rq_disk);
+ __assign_disk_name(__entry->disk, req->q->disk);
__entry->qid = nvme_req_qid(req);
__entry->sq_head = le16_to_cpu(sq_head);
__entry->sq_tail = sq_tail;
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index f0efb3537989..9e5b89ae29df 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -284,8 +284,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
schedule_work(&req->p.work);
} else {
rq->end_io_data = req;
- blk_execute_rq_nowait(ns ? ns->disk : NULL, rq, 0,
- nvmet_passthru_req_done);
+ blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);
}
if (ns)
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index e765d3d0542e..23a38dcf0fc4 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -312,6 +312,8 @@ static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct nvmem_device *nvmem = to_nvmem_device(dev);
+ attr->size = nvmem->size;
+
return nvmem_bin_attr_get_umode(nvmem);
}
diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c
index 6a537d959f14..e9a375dd84af 100644
--- a/drivers/nvmem/mtk-efuse.c
+++ b/drivers/nvmem/mtk-efuse.c
@@ -19,11 +19,12 @@ static int mtk_reg_read(void *context,
unsigned int reg, void *_val, size_t bytes)
{
struct mtk_efuse_priv *priv = context;
- u32 *val = _val;
- int i = 0, words = bytes / 4;
+ void __iomem *addr = priv->base + reg;
+ u8 *val = _val;
+ int i;
- while (words--)
- *val++ = readl(priv->base + reg + (i++ * 4));
+ for (i = 0; i < bytes; i++, val++)
+ *val = readb(addr + i);
return 0;
}
@@ -45,8 +46,8 @@ static int mtk_efuse_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- econfig.stride = 4;
- econfig.word_size = 4;
+ econfig.stride = 1;
+ econfig.word_size = 1;
econfig.reg_read = mtk_reg_read;
econfig.size = resource_size(res);
econfig.priv = priv;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 61de453b885c..e7d92b67cb8a 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -651,6 +651,28 @@ bool of_device_is_available(const struct device_node *device)
EXPORT_SYMBOL(of_device_is_available);
/**
+ * __of_device_is_fail - check if a device has status "fail" or "fail-..."
+ *
+ * @device: Node to check status for, with locks already held
+ *
+ * Return: True if the status property is set to "fail" or "fail-..." (for any
+ * error code suffix), false otherwise
+ */
+static bool __of_device_is_fail(const struct device_node *device)
+{
+ const char *status;
+
+ if (!device)
+ return false;
+
+ status = __of_get_property(device, "status", NULL);
+ if (status == NULL)
+ return false;
+
+ return !strcmp(status, "fail") || !strncmp(status, "fail-", 5);
+}
+
+/**
* of_device_is_big_endian - check if a device has BE registers
*
* @device: Node to check for endianness
@@ -796,6 +818,9 @@ EXPORT_SYMBOL(of_get_next_available_child);
* of_get_next_cpu_node - Iterate on cpu nodes
* @prev: previous child of the /cpus node, or NULL to get first
*
+ * Unusable CPUs (those with the status property set to "fail" or "fail-...")
+ * will be skipped.
+ *
* Return: A cpu node pointer with refcount incremented, use of_node_put()
* on it when done. Returns NULL when prev is the last child. Decrements
* the refcount of prev.
@@ -817,6 +842,8 @@ struct device_node *of_get_next_cpu_node(struct device_node *prev)
of_node_put(node);
}
for (; next; next = next->sibling) {
+ if (__of_device_is_fail(next))
+ continue;
if (!(of_node_name_eq(next, "cpu") ||
__of_node_is_type(next, "cpu")))
continue;
@@ -1349,9 +1376,14 @@ int of_phandle_iterator_next(struct of_phandle_iterator *it)
* property data length
*/
if (it->cur + count > it->list_end) {
- pr_err("%pOF: %s = %d found %d\n",
- it->parent, it->cells_name,
- count, it->cell_count);
+ if (it->cells_name)
+ pr_err("%pOF: %s = %d found %td\n",
+ it->parent, it->cells_name,
+ count, it->list_end - it->cur);
+ else
+ pr_err("%pOF: phandle %s needs %d, found %td\n",
+ it->parent, of_node_full_name(it->node),
+ count, it->list_end - it->cur);
goto err;
}
}
@@ -1388,15 +1420,18 @@ int of_phandle_iterator_args(struct of_phandle_iterator *it,
return count;
}
-static int __of_parse_phandle_with_args(const struct device_node *np,
- const char *list_name,
- const char *cells_name,
- int cell_count, int index,
- struct of_phandle_args *out_args)
+int __of_parse_phandle_with_args(const struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int cell_count, int index,
+ struct of_phandle_args *out_args)
{
struct of_phandle_iterator it;
int rc, cur_index = 0;
+ if (index < 0)
+ return -EINVAL;
+
/* Loop over the phandles until all the requested entry is found */
of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) {
/*
@@ -1439,82 +1474,7 @@ static int __of_parse_phandle_with_args(const struct device_node *np,
of_node_put(it.node);
return rc;
}
-
-/**
- * of_parse_phandle - Resolve a phandle property to a device_node pointer
- * @np: Pointer to device node holding phandle property
- * @phandle_name: Name of property holding a phandle value
- * @index: For properties holding a table of phandles, this is the index into
- * the table
- *
- * Return: The device_node pointer with refcount incremented. Use
- * of_node_put() on it when done.
- */
-struct device_node *of_parse_phandle(const struct device_node *np,
- const char *phandle_name, int index)
-{
- struct of_phandle_args args;
-
- if (index < 0)
- return NULL;
-
- if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0,
- index, &args))
- return NULL;
-
- return args.np;
-}
-EXPORT_SYMBOL(of_parse_phandle);
-
-/**
- * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
- * @np: pointer to a device tree node containing a list
- * @list_name: property name that contains a list
- * @cells_name: property name that specifies phandles' arguments count
- * @index: index of a phandle to parse out
- * @out_args: optional pointer to output arguments structure (will be filled)
- *
- * This function is useful to parse lists of phandles and their arguments.
- * Returns 0 on success and fills out_args, on error returns appropriate
- * errno value.
- *
- * Caller is responsible to call of_node_put() on the returned out_args->np
- * pointer.
- *
- * Example::
- *
- * phandle1: node1 {
- * #list-cells = <2>;
- * };
- *
- * phandle2: node2 {
- * #list-cells = <1>;
- * };
- *
- * node3 {
- * list = <&phandle1 1 2 &phandle2 3>;
- * };
- *
- * To get a device_node of the ``node2`` node you may call this:
- * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
- */
-int of_parse_phandle_with_args(const struct device_node *np, const char *list_name,
- const char *cells_name, int index,
- struct of_phandle_args *out_args)
-{
- int cell_count = -1;
-
- if (index < 0)
- return -EINVAL;
-
- /* If cells_name is NULL we assume a cell count of 0 */
- if (!cells_name)
- cell_count = 0;
-
- return __of_parse_phandle_with_args(np, list_name, cells_name,
- cell_count, index, out_args);
-}
-EXPORT_SYMBOL(of_parse_phandle_with_args);
+EXPORT_SYMBOL(__of_parse_phandle_with_args);
/**
* of_parse_phandle_with_args_map() - Find a node pointed by phandle in a list and remap it
@@ -1701,47 +1661,6 @@ free:
EXPORT_SYMBOL(of_parse_phandle_with_args_map);
/**
- * of_parse_phandle_with_fixed_args() - Find a node pointed by phandle in a list
- * @np: pointer to a device tree node containing a list
- * @list_name: property name that contains a list
- * @cell_count: number of argument cells following the phandle
- * @index: index of a phandle to parse out
- * @out_args: optional pointer to output arguments structure (will be filled)
- *
- * This function is useful to parse lists of phandles and their arguments.
- * Returns 0 on success and fills out_args, on error returns appropriate
- * errno value.
- *
- * Caller is responsible to call of_node_put() on the returned out_args->np
- * pointer.
- *
- * Example::
- *
- * phandle1: node1 {
- * };
- *
- * phandle2: node2 {
- * };
- *
- * node3 {
- * list = <&phandle1 0 2 &phandle2 2 3>;
- * };
- *
- * To get a device_node of the ``node2`` node you may call this:
- * of_parse_phandle_with_fixed_args(node3, "list", 2, 1, &args);
- */
-int of_parse_phandle_with_fixed_args(const struct device_node *np,
- const char *list_name, int cell_count,
- int index, struct of_phandle_args *out_args)
-{
- if (index < 0)
- return -EINVAL;
- return __of_parse_phandle_with_args(np, list_name, NULL, cell_count,
- index, out_args);
-}
-EXPORT_SYMBOL(of_parse_phandle_with_fixed_args);
-
-/**
* of_count_phandle_with_args() - Find the number of phandles references in a property
* @np: pointer to a device tree node containing a list
* @list_name: property name that contains a list
diff --git a/drivers/of/device.c b/drivers/of/device.c
index b0800c260f64..874f031442dc 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -28,7 +28,7 @@
const struct of_device_id *of_match_device(const struct of_device_id *matches,
const struct device *dev)
{
- if ((!matches) || (!dev->of_node))
+ if (!matches || !dev->of_node || dev->of_node_reused)
return NULL;
return of_match_node(matches, dev->of_node);
}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index bdca35284ceb..ad85ff6474ff 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -26,6 +26,7 @@
#include <linux/serial_core.h>
#include <linux/sysfs.h>
#include <linux/random.h>
+#include <linux/kmemleak.h>
#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#include <asm/page.h>
@@ -482,9 +483,11 @@ static int __init early_init_dt_reserve_memory_arch(phys_addr_t base,
if (nomap) {
/*
* If the memory is already reserved (by another region), we
- * should not allow it to be marked nomap.
+ * should not allow it to be marked nomap, but don't worry
+ * if the region isn't memory as it won't be mapped.
*/
- if (memblock_is_region_reserved(base, size))
+ if (memblock_overlaps_region(&memblock.memory, base, size) &&
+ memblock_is_region_reserved(base, size))
return -EBUSY;
return memblock_mark_nomap(base, size);
@@ -522,9 +525,12 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
size = dt_mem_next_cell(dt_root_size_cells, &prop);
if (size &&
- early_init_dt_reserve_memory_arch(base, size, nomap) == 0)
+ early_init_dt_reserve_memory_arch(base, size, nomap) == 0) {
pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
+ if (!nomap)
+ kmemleak_alloc_phys(base, size, 0, 0);
+ }
else
pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
@@ -965,18 +971,22 @@ static void __init early_init_dt_check_for_elfcorehdr(unsigned long node)
elfcorehdr_addr, elfcorehdr_size);
}
-static phys_addr_t cap_mem_addr;
-static phys_addr_t cap_mem_size;
+static unsigned long chosen_node_offset = -FDT_ERR_NOTFOUND;
/**
* early_init_dt_check_for_usable_mem_range - Decode usable memory range
* location from flat tree
- * @node: reference to node containing usable memory range location ('chosen')
*/
-static void __init early_init_dt_check_for_usable_mem_range(unsigned long node)
+void __init early_init_dt_check_for_usable_mem_range(void)
{
const __be32 *prop;
int len;
+ phys_addr_t cap_mem_addr;
+ phys_addr_t cap_mem_size;
+ unsigned long node = chosen_node_offset;
+
+ if ((long)node < 0)
+ return;
pr_debug("Looking for usable-memory-range property... ");
@@ -989,6 +999,8 @@ static void __init early_init_dt_check_for_usable_mem_range(unsigned long node)
pr_debug("cap_mem_start=%pa cap_mem_size=%pa\n", &cap_mem_addr,
&cap_mem_size);
+
+ memblock_cap_memory_range(cap_mem_addr, cap_mem_size);
}
#ifdef CONFIG_SERIAL_EARLYCON
@@ -1042,13 +1054,14 @@ int __init early_init_dt_scan_chosen_stdout(void)
/*
* early_init_dt_scan_root - fetch the top level address and size cells
*/
-int __init early_init_dt_scan_root(unsigned long node, const char *uname,
- int depth, void *data)
+int __init early_init_dt_scan_root(void)
{
const __be32 *prop;
+ const void *fdt = initial_boot_params;
+ int node = fdt_path_offset(fdt, "/");
- if (depth != 0)
- return 0;
+ if (node < 0)
+ return -ENODEV;
dt_root_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
dt_root_addr_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
@@ -1063,8 +1076,7 @@ int __init early_init_dt_scan_root(unsigned long node, const char *uname,
dt_root_addr_cells = be32_to_cpup(prop);
pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells);
- /* break now */
- return 1;
+ return 0;
}
u64 __init dt_mem_next_cell(int s, const __be32 **cellp)
@@ -1078,73 +1090,78 @@ u64 __init dt_mem_next_cell(int s, const __be32 **cellp)
/*
* early_init_dt_scan_memory - Look for and parse memory nodes
*/
-int __init early_init_dt_scan_memory(unsigned long node, const char *uname,
- int depth, void *data)
+int __init early_init_dt_scan_memory(void)
{
- const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
- const __be32 *reg, *endp;
- int l;
- bool hotpluggable;
+ int node;
+ const void *fdt = initial_boot_params;
- /* We are scanning "memory" nodes only */
- if (type == NULL || strcmp(type, "memory") != 0)
- return 0;
+ fdt_for_each_subnode(node, fdt, 0) {
+ const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+ const __be32 *reg, *endp;
+ int l;
+ bool hotpluggable;
- reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
- if (reg == NULL)
- reg = of_get_flat_dt_prop(node, "reg", &l);
- if (reg == NULL)
- return 0;
+ /* We are scanning "memory" nodes only */
+ if (type == NULL || strcmp(type, "memory") != 0)
+ continue;
- endp = reg + (l / sizeof(__be32));
- hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL);
+ reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
+ if (reg == NULL)
+ reg = of_get_flat_dt_prop(node, "reg", &l);
+ if (reg == NULL)
+ continue;
- pr_debug("memory scan node %s, reg size %d,\n", uname, l);
+ endp = reg + (l / sizeof(__be32));
+ hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL);
- while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
- u64 base, size;
+ pr_debug("memory scan node %s, reg size %d,\n",
+ fdt_get_name(fdt, node, NULL), l);
- base = dt_mem_next_cell(dt_root_addr_cells, &reg);
- size = dt_mem_next_cell(dt_root_size_cells, &reg);
+ while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
+ u64 base, size;
- if (size == 0)
- continue;
- pr_debug(" - %llx, %llx\n", base, size);
+ base = dt_mem_next_cell(dt_root_addr_cells, &reg);
+ size = dt_mem_next_cell(dt_root_size_cells, &reg);
- early_init_dt_add_memory_arch(base, size);
+ if (size == 0)
+ continue;
+ pr_debug(" - %llx, %llx\n", base, size);
- if (!hotpluggable)
- continue;
+ early_init_dt_add_memory_arch(base, size);
- if (memblock_mark_hotplug(base, size))
- pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n",
- base, base + size);
- }
+ if (!hotpluggable)
+ continue;
+ if (memblock_mark_hotplug(base, size))
+ pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n",
+ base, base + size);
+ }
+ }
return 0;
}
-int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
- int depth, void *data)
+int __init early_init_dt_scan_chosen(char *cmdline)
{
- int l;
+ int l, node;
const char *p;
const void *rng_seed;
+ const void *fdt = initial_boot_params;
- pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
+ node = fdt_path_offset(fdt, "/chosen");
+ if (node < 0)
+ node = fdt_path_offset(fdt, "/chosen@0");
+ if (node < 0)
+ return -ENOENT;
- if (depth != 1 || !data ||
- (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
- return 0;
+ chosen_node_offset = node;
early_init_dt_check_for_initrd(node);
early_init_dt_check_for_elfcorehdr(node);
- early_init_dt_check_for_usable_mem_range(node);
/* Retrieve command line */
p = of_get_flat_dt_prop(node, "bootargs", &l);
if (p != NULL && l > 0)
- strlcpy(data, p, min(l, COMMAND_LINE_SIZE));
+ strlcpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
/*
* CONFIG_CMDLINE is meant to be a default in case nothing else
@@ -1153,18 +1170,18 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
*/
#ifdef CONFIG_CMDLINE
#if defined(CONFIG_CMDLINE_EXTEND)
- strlcat(data, " ", COMMAND_LINE_SIZE);
- strlcat(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ strlcat(cmdline, " ", COMMAND_LINE_SIZE);
+ strlcat(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#elif defined(CONFIG_CMDLINE_FORCE)
- strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ strlcpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#else
/* No arguments from boot loader, use kernel's cmdl*/
- if (!((char *)data)[0])
- strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ if (!((char *)cmdline)[0])
+ strlcpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#endif
#endif /* CONFIG_CMDLINE */
- pr_debug("Command line is: %s\n", (char *)data);
+ pr_debug("Command line is: %s\n", (char *)cmdline);
rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
if (rng_seed && l > 0) {
@@ -1178,8 +1195,7 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
fdt_totalsize(initial_boot_params));
}
- /* break now */
- return 1;
+ return 0;
}
#ifndef MIN_MEMBLOCK_ADDR
@@ -1261,21 +1277,21 @@ bool __init early_init_dt_verify(void *params)
void __init early_init_dt_scan_nodes(void)
{
- int rc = 0;
+ int rc;
/* Initialize {size,address}-cells info */
- of_scan_flat_dt(early_init_dt_scan_root, NULL);
+ early_init_dt_scan_root();
/* Retrieve various information from the /chosen node */
- rc = of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
- if (!rc)
+ rc = early_init_dt_scan_chosen(boot_command_line);
+ if (rc)
pr_warn("No chosen node found, continuing without\n");
/* Setup memory, calling early_init_dt_add_memory_arch */
- of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+ early_init_dt_scan_memory();
/* Handle linux,usable-memory-range property */
- memblock_cap_memory_range(cap_mem_addr, cap_mem_size);
+ early_init_dt_check_for_usable_mem_range();
}
bool __init early_init_dt_scan(void *params)
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index b3faf89744aa..793350028906 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -540,6 +540,10 @@ static int __init of_platform_default_populate_init(void)
of_node_put(node);
}
+ node = of_get_compatible_child(of_chosen, "simple-framebuffer");
+ of_platform_device_create(node, NULL, NULL);
+ of_node_put(node);
+
/* Populate everything else. */
of_platform_default_populate(NULL, NULL, NULL);
diff --git a/drivers/of/property.c b/drivers/of/property.c
index a3483484a5a2..8e90071de6ed 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -1075,6 +1075,17 @@ static struct device_node *of_get_compat_node(struct device_node *np)
return np;
}
+static struct device_node *of_get_compat_node_parent(struct device_node *np)
+{
+ struct device_node *parent, *node;
+
+ parent = of_get_parent(np);
+ node = of_get_compat_node(parent);
+ of_node_put(parent);
+
+ return node;
+}
+
/**
* of_link_to_phandle - Add fwnode link to supplier from supplier phandle
* @con_np: consumer device tree node
@@ -1249,7 +1260,9 @@ static struct device_node *parse_##fname(struct device_node *np, \
* @parse_prop.index: For properties holding a list of phandles, this is the
* index into the list
* @optional: Describes whether a supplier is mandatory or not
- * @node_not_dev: The consumer node containing the property is never a device.
+ * @node_not_dev: The consumer node containing the property is never converted
+ * to a struct device. Instead, parse ancestor nodes for the
+ * compatible property to find a node corresponding to a device.
*
* Returns:
* parse_prop() return values are
@@ -1424,7 +1437,7 @@ static int of_link_property(struct device_node *con_np, const char *prop_name)
struct device_node *con_dev_np;
con_dev_np = s->node_not_dev
- ? of_get_compat_node(con_np)
+ ? of_get_compat_node_parent(con_np)
: of_node_get(con_np);
matched = true;
i++;
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 481ba8682ebf..70992103c07d 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -911,11 +911,18 @@ static void __init of_unittest_dma_ranges_one(const char *path,
if (!rc) {
phys_addr_t paddr;
dma_addr_t dma_addr;
- struct device dev_bogus;
+ struct device *dev_bogus;
- dev_bogus.dma_range_map = map;
- paddr = dma_to_phys(&dev_bogus, expect_dma_addr);
- dma_addr = phys_to_dma(&dev_bogus, expect_paddr);
+ dev_bogus = kzalloc(sizeof(struct device), GFP_KERNEL);
+ if (!dev_bogus) {
+ unittest(0, "kzalloc() failed\n");
+ kfree(map);
+ return;
+ }
+
+ dev_bogus->dma_range_map = map;
+ paddr = dma_to_phys(dev_bogus, expect_dma_addr);
+ dma_addr = phys_to_dma(dev_bogus, expect_paddr);
unittest(paddr == expect_paddr,
"of_dma_get_range: wrong phys addr %pap (expecting %llx) on node %pOF\n",
@@ -925,6 +932,7 @@ static void __init of_unittest_dma_ranges_one(const char *path,
&dma_addr, expect_dma_addr, np);
kfree(map);
+ kfree(dev_bogus);
}
of_node_put(np);
#endif
@@ -934,8 +942,9 @@ static void __init of_unittest_parse_dma_ranges(void)
{
of_unittest_dma_ranges_one("/testcase-data/address-tests/device@70000000",
0x0, 0x20000000);
- of_unittest_dma_ranges_one("/testcase-data/address-tests/bus@80000000/device@1000",
- 0x100000000, 0x20000000);
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
+ of_unittest_dma_ranges_one("/testcase-data/address-tests/bus@80000000/device@1000",
+ 0x100000000, 0x20000000);
of_unittest_dma_ranges_one("/testcase-data/address-tests/pci@90000000",
0x80000000, 0x20000000);
}
@@ -1492,7 +1501,7 @@ static int __init unittest_data_add(void)
}
#ifdef CONFIG_OF_OVERLAY
-static int __init overlay_data_apply(const char *overlay_name, int *overlay_id);
+static int __init overlay_data_apply(const char *overlay_name, int *ovcs_id);
static int unittest_probe(struct platform_device *pdev)
{
@@ -1657,7 +1666,7 @@ static void __init of_unittest_overlay_gpio(void)
* The overlays are applied by overlay_data_apply()
* instead of of_unittest_apply_overlay() so that they
* will not be tracked. Thus they will not be removed
- * by of_unittest_destroy_tracked_overlays().
+ * by of_unittest_remove_tracked_overlays().
*
* - apply overlay_gpio_01
* - apply overlay_gpio_02a
@@ -1905,86 +1914,70 @@ static const char *overlay_name_from_nr(int nr)
static const char *bus_path = "/testcase-data/overlay-node/test-bus";
-/* FIXME: it is NOT guaranteed that overlay ids are assigned in sequence */
-
-#define MAX_UNITTEST_OVERLAYS 256
-static unsigned long overlay_id_bits[BITS_TO_LONGS(MAX_UNITTEST_OVERLAYS)];
-static int overlay_first_id = -1;
+#define MAX_TRACK_OVCS_IDS 256
-static long of_unittest_overlay_tracked(int id)
-{
- if (WARN_ON(id >= MAX_UNITTEST_OVERLAYS))
- return 0;
- return overlay_id_bits[BIT_WORD(id)] & BIT_MASK(id);
-}
+static int track_ovcs_id[MAX_TRACK_OVCS_IDS];
+static int track_ovcs_id_overlay_nr[MAX_TRACK_OVCS_IDS];
+static int track_ovcs_id_cnt;
-static void of_unittest_track_overlay(int id)
+static void of_unittest_track_overlay(int ovcs_id, int overlay_nr)
{
- if (overlay_first_id < 0)
- overlay_first_id = id;
- id -= overlay_first_id;
-
- if (WARN_ON(id >= MAX_UNITTEST_OVERLAYS))
+ if (WARN_ON(track_ovcs_id_cnt >= MAX_TRACK_OVCS_IDS))
return;
- overlay_id_bits[BIT_WORD(id)] |= BIT_MASK(id);
+
+ track_ovcs_id[track_ovcs_id_cnt] = ovcs_id;
+ track_ovcs_id_overlay_nr[track_ovcs_id_cnt] = overlay_nr;
+ track_ovcs_id_cnt++;
}
-static void of_unittest_untrack_overlay(int id)
+static void of_unittest_untrack_overlay(int ovcs_id)
{
- if (overlay_first_id < 0)
+ if (WARN_ON(track_ovcs_id_cnt < 1))
return;
- id -= overlay_first_id;
- if (WARN_ON(id >= MAX_UNITTEST_OVERLAYS))
- return;
- overlay_id_bits[BIT_WORD(id)] &= ~BIT_MASK(id);
-}
-static void of_unittest_destroy_tracked_overlays(void)
-{
- int id, ret, defers, ovcs_id;
+ track_ovcs_id_cnt--;
- if (overlay_first_id < 0)
- return;
+ /* If out of synch then test is broken. Do not try to recover. */
+ WARN_ON(track_ovcs_id[track_ovcs_id_cnt] != ovcs_id);
+}
- /* try until no defers */
- do {
- defers = 0;
- /* remove in reverse order */
- for (id = MAX_UNITTEST_OVERLAYS - 1; id >= 0; id--) {
- if (!of_unittest_overlay_tracked(id))
- continue;
+static void of_unittest_remove_tracked_overlays(void)
+{
+ int ret, ovcs_id, overlay_nr, save_ovcs_id;
+ const char *overlay_name;
- ovcs_id = id + overlay_first_id;
- ret = of_overlay_remove(&ovcs_id);
- if (ret == -ENODEV) {
- pr_warn("%s: no overlay to destroy for #%d\n",
- __func__, id + overlay_first_id);
- continue;
- }
- if (ret != 0) {
- defers++;
- pr_warn("%s: overlay destroy failed for #%d\n",
- __func__, id + overlay_first_id);
- continue;
- }
+ while (track_ovcs_id_cnt > 0) {
- of_unittest_untrack_overlay(id);
+ ovcs_id = track_ovcs_id[track_ovcs_id_cnt - 1];
+ overlay_nr = track_ovcs_id_overlay_nr[track_ovcs_id_cnt - 1];
+ save_ovcs_id = ovcs_id;
+ ret = of_overlay_remove(&ovcs_id);
+ if (ret == -ENODEV) {
+ overlay_name = overlay_name_from_nr(overlay_nr);
+ pr_warn("%s: of_overlay_remove() for overlay \"%s\" failed, ret = %d\n",
+ __func__, overlay_name, ret);
}
- } while (defers > 0);
+ of_unittest_untrack_overlay(save_ovcs_id);
+ }
+
}
-static int __init of_unittest_apply_overlay(int overlay_nr, int *overlay_id)
+static int __init of_unittest_apply_overlay(int overlay_nr, int *ovcs_id)
{
+ /*
+ * The overlay will be tracked, thus it will be removed
+ * by of_unittest_remove_tracked_overlays().
+ */
+
const char *overlay_name;
overlay_name = overlay_name_from_nr(overlay_nr);
- if (!overlay_data_apply(overlay_name, overlay_id)) {
- unittest(0, "could not apply overlay \"%s\"\n",
- overlay_name);
+ if (!overlay_data_apply(overlay_name, ovcs_id)) {
+ unittest(0, "could not apply overlay \"%s\"\n", overlay_name);
return -EFAULT;
}
- of_unittest_track_overlay(*overlay_id);
+ of_unittest_track_overlay(*ovcs_id, overlay_nr);
return 0;
}
@@ -2029,7 +2022,7 @@ static int __init of_unittest_apply_revert_overlay_check(int overlay_nr,
int unittest_nr, int before, int after,
enum overlay_type ovtype)
{
- int ret, ovcs_id, save_id;
+ int ret, ovcs_id, save_ovcs_id;
/* unittest device must be in before state */
if (of_unittest_device_exists(unittest_nr, ovtype) != before) {
@@ -2057,7 +2050,7 @@ static int __init of_unittest_apply_revert_overlay_check(int overlay_nr,
return -EINVAL;
}
- save_id = ovcs_id;
+ save_ovcs_id = ovcs_id;
ret = of_overlay_remove(&ovcs_id);
if (ret != 0) {
unittest(0, "%s failed to be destroyed @\"%s\"\n",
@@ -2065,7 +2058,7 @@ static int __init of_unittest_apply_revert_overlay_check(int overlay_nr,
unittest_path(unittest_nr, ovtype));
return ret;
}
- of_unittest_untrack_overlay(save_id);
+ of_unittest_untrack_overlay(save_ovcs_id);
/* unittest device must be again in before state */
if (of_unittest_device_exists(unittest_nr, PDEV_OVERLAY) != before) {
@@ -2192,7 +2185,7 @@ static void __init of_unittest_overlay_5(void)
/* test overlay application in sequence */
static void __init of_unittest_overlay_6(void)
{
- int i, ov_id[2], ovcs_id;
+ int i, save_ovcs_id[2], ovcs_id;
int overlay_nr = 6, unittest_nr = 6;
int before = 0, after = 1;
const char *overlay_name;
@@ -2225,8 +2218,8 @@ static void __init of_unittest_overlay_6(void)
unittest(0, "could not apply overlay \"%s\"\n", overlay_name);
return;
}
- ov_id[0] = ovcs_id;
- of_unittest_track_overlay(ov_id[0]);
+ save_ovcs_id[0] = ovcs_id;
+ of_unittest_track_overlay(ovcs_id, overlay_nr + 0);
EXPECT_END(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest6/status");
@@ -2242,8 +2235,8 @@ static void __init of_unittest_overlay_6(void)
unittest(0, "could not apply overlay \"%s\"\n", overlay_name);
return;
}
- ov_id[1] = ovcs_id;
- of_unittest_track_overlay(ov_id[1]);
+ save_ovcs_id[1] = ovcs_id;
+ of_unittest_track_overlay(ovcs_id, overlay_nr + 1);
EXPECT_END(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest7/status");
@@ -2263,7 +2256,7 @@ static void __init of_unittest_overlay_6(void)
}
for (i = 1; i >= 0; i--) {
- ovcs_id = ov_id[i];
+ ovcs_id = save_ovcs_id[i];
if (of_overlay_remove(&ovcs_id)) {
unittest(0, "%s failed destroy @\"%s\"\n",
overlay_name_from_nr(overlay_nr + i),
@@ -2271,7 +2264,7 @@ static void __init of_unittest_overlay_6(void)
PDEV_OVERLAY));
return;
}
- of_unittest_untrack_overlay(ov_id[i]);
+ of_unittest_untrack_overlay(save_ovcs_id[i]);
}
for (i = 0; i < 2; i++) {
@@ -2294,7 +2287,7 @@ static void __init of_unittest_overlay_6(void)
/* test overlay application in sequence */
static void __init of_unittest_overlay_8(void)
{
- int i, ov_id[2], ovcs_id;
+ int i, save_ovcs_id[2], ovcs_id;
int overlay_nr = 8, unittest_nr = 8;
const char *overlay_name;
int ret;
@@ -2316,8 +2309,8 @@ static void __init of_unittest_overlay_8(void)
if (!ret)
return;
- ov_id[0] = ovcs_id;
- of_unittest_track_overlay(ov_id[0]);
+ save_ovcs_id[0] = ovcs_id;
+ of_unittest_track_overlay(ovcs_id, overlay_nr + 0);
overlay_name = overlay_name_from_nr(overlay_nr + 1);
@@ -2335,11 +2328,11 @@ static void __init of_unittest_overlay_8(void)
return;
}
- ov_id[1] = ovcs_id;
- of_unittest_track_overlay(ov_id[1]);
+ save_ovcs_id[1] = ovcs_id;
+ of_unittest_track_overlay(ovcs_id, overlay_nr + 1);
/* now try to remove first overlay (it should fail) */
- ovcs_id = ov_id[0];
+ ovcs_id = save_ovcs_id[0];
EXPECT_BEGIN(KERN_INFO,
"OF: overlay: node_overlaps_later_cs: #6 overlaps with #7 @/testcase-data/overlay-node/test-bus/test-unittest8");
@@ -2356,6 +2349,10 @@ static void __init of_unittest_overlay_8(void)
"OF: overlay: node_overlaps_later_cs: #6 overlaps with #7 @/testcase-data/overlay-node/test-bus/test-unittest8");
if (!ret) {
+ /*
+ * Should never get here. If we do, expect a lot of
+ * subsequent tracking and overlay removal related errors.
+ */
unittest(0, "%s was destroyed @\"%s\"\n",
overlay_name_from_nr(overlay_nr + 0),
unittest_path(unittest_nr,
@@ -2365,7 +2362,7 @@ static void __init of_unittest_overlay_8(void)
/* removing them in order should work */
for (i = 1; i >= 0; i--) {
- ovcs_id = ov_id[i];
+ ovcs_id = save_ovcs_id[i];
if (of_overlay_remove(&ovcs_id)) {
unittest(0, "%s not destroyed @\"%s\"\n",
overlay_name_from_nr(overlay_nr + i),
@@ -2373,7 +2370,7 @@ static void __init of_unittest_overlay_8(void)
PDEV_OVERLAY));
return;
}
- of_unittest_untrack_overlay(ov_id[i]);
+ of_unittest_untrack_overlay(save_ovcs_id[i]);
}
unittest(1, "overlay test %d passed\n", 8);
@@ -2805,7 +2802,7 @@ static void __init of_unittest_overlay(void)
of_unittest_overlay_gpio();
- of_unittest_destroy_tracked_overlays();
+ of_unittest_remove_tracked_overlays();
out:
of_node_put(bus_np);
@@ -2837,7 +2834,7 @@ struct overlay_info {
uint8_t *dtb_begin;
uint8_t *dtb_end;
int expected_result;
- int overlay_id;
+ int ovcs_id;
char *name;
};
@@ -2991,7 +2988,7 @@ void __init unittest_unflatten_overlay_base(void)
*
* Return 0 on unexpected error.
*/
-static int __init overlay_data_apply(const char *overlay_name, int *overlay_id)
+static int __init overlay_data_apply(const char *overlay_name, int *ovcs_id)
{
struct overlay_info *info;
int found = 0;
@@ -3013,9 +3010,9 @@ static int __init overlay_data_apply(const char *overlay_name, int *overlay_id)
if (!size)
pr_err("no overlay data for %s\n", overlay_name);
- ret = of_overlay_fdt_apply(info->dtb_begin, size, &info->overlay_id);
- if (overlay_id)
- *overlay_id = info->overlay_id;
+ ret = of_overlay_fdt_apply(info->dtb_begin, size, &info->ovcs_id);
+ if (ovcs_id)
+ *ovcs_id = info->ovcs_id;
if (ret < 0)
goto out;
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index cf91cb024be3..1e4a5663d011 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -168,14 +168,14 @@ static int led_proc_show(struct seq_file *m, void *v)
static int led_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, led_proc_show, PDE_DATA(inode));
+ return single_open(file, led_proc_show, pde_data(inode));
}
static ssize_t led_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- void *data = PDE_DATA(file_inode(file));
+ void *data = pde_data(file_inode(file));
char *cur, lbuf[32];
int d;
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index e090978518f1..d9e51036a4fa 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -482,11 +482,12 @@ static struct attribute *paths_subsys_attrs[] = {
&paths_attr_layer.attr,
NULL,
};
+ATTRIBUTE_GROUPS(paths_subsys);
/* Specific kobject type for our PDC paths */
static struct kobj_type ktype_pdcspath = {
.sysfs_ops = &pdcspath_attr_ops,
- .default_attrs = paths_subsys_attrs,
+ .default_groups = paths_subsys_groups,
};
/* We hard define the 4 types of path we expect to find */
@@ -979,8 +980,10 @@ pdcs_register_pathentries(void)
entry->kobj.kset = paths_kset;
err = kobject_init_and_add(&entry->kobj, &ktype_pdcspath, NULL,
"%s", entry->name);
- if (err)
+ if (err) {
+ kobject_put(&entry->kobj);
return err;
+ }
/* kobject is now registered */
write_lock(&entry->rw_lock);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 43e615aa12ff..d98fafdd0f99 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -184,7 +184,7 @@ config PCI_LABEL
config PCI_HYPERV
tristate "Hyper-V PCI Frontend"
- depends on X86_64 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && SYSFS
+ depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && SYSFS
select PCI_HYPERV_INTERFACE
help
The PCI device frontend driver allows the kernel to import arbitrary
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index d62c4ac4ae1b..37be95adf169 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -5,8 +5,9 @@
obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \
remove.o pci.o pci-driver.o search.o \
pci-sysfs.o rom.o setup-res.o irq.o vpd.o \
- setup-bus.o vc.o mmap.o setup-irq.o msi.o
+ setup-bus.o vc.o mmap.o setup-irq.o
+obj-$(CONFIG_PCI) += msi/
obj-$(CONFIG_PCI) += pcie/
ifdef CONFIG_PCI
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 46935695cfb9..0d9f6b21babb 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -42,7 +42,10 @@ int noinline pci_bus_read_config_##size \
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
pci_lock_config(flags); \
res = bus->ops->read(bus, devfn, pos, len, &data); \
- *value = (type)data; \
+ if (res) \
+ PCI_SET_ERROR_RESPONSE(value); \
+ else \
+ *value = (type)data; \
pci_unlock_config(flags); \
return res; \
}
@@ -80,10 +83,8 @@ int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
void __iomem *addr;
addr = bus->ops->map_bus(bus, devfn, where);
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
if (size == 1)
*val = readb(addr);
@@ -122,10 +123,8 @@ int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
void __iomem *addr;
addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
*val = readl(addr);
@@ -228,7 +227,10 @@ int pci_user_read_config_##size \
ret = dev->bus->ops->read(dev->bus, dev->devfn, \
pos, sizeof(type), &data); \
raw_spin_unlock_irq(&pci_lock); \
- *val = (type)data; \
+ if (ret) \
+ PCI_SET_ERROR_RESPONSE(val); \
+ else \
+ *val = (type)data; \
return pcibios_err_to_errno(ret); \
} \
EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
@@ -410,9 +412,9 @@ int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
if (pcie_capability_reg_implemented(dev, pos)) {
ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
/*
- * Reset *val to 0 if pci_read_config_word() fails, it may
- * have been written as 0xFFFF if hardware error happens
- * during pci_read_config_word().
+ * Reset *val to 0 if pci_read_config_word() fails; it may
+ * have been written as 0xFFFF (PCI_ERROR_RESPONSE) if the
+ * config read failed on PCI.
*/
if (ret)
*val = 0;
@@ -445,9 +447,9 @@ int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
if (pcie_capability_reg_implemented(dev, pos)) {
ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
/*
- * Reset *val to 0 if pci_read_config_dword() fails, it may
- * have been written as 0xFFFFFFFF if hardware error happens
- * during pci_read_config_dword().
+ * Reset *val to 0 if pci_read_config_dword() fails; it may
+ * have been written as 0xFFFFFFFF (PCI_ERROR_RESPONSE) if
+ * the config read failed on PCI.
*/
if (ret)
*val = 0;
@@ -523,7 +525,7 @@ EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);
int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
{
if (pci_dev_is_disconnected(dev)) {
- *val = ~0;
+ PCI_SET_ERROR_RESPONSE(val);
return PCIBIOS_DEVICE_NOT_FOUND;
}
return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
@@ -533,7 +535,7 @@ EXPORT_SYMBOL(pci_read_config_byte);
int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
{
if (pci_dev_is_disconnected(dev)) {
- *val = ~0;
+ PCI_SET_ERROR_RESPONSE(val);
return PCIBIOS_DEVICE_NOT_FOUND;
}
return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
@@ -544,7 +546,7 @@ int pci_read_config_dword(const struct pci_dev *dev, int where,
u32 *val)
{
if (pci_dev_is_disconnected(dev)) {
- *val = ~0;
+ PCI_SET_ERROR_RESPONSE(val);
return PCIBIOS_DEVICE_NOT_FOUND;
}
return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 7fc5135ffbbf..601f2531ee91 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -4,7 +4,7 @@ menu "PCI controller drivers"
depends on PCI
config PCI_MVEBU
- bool "Marvell EBU PCIe controller"
+ tristate "Marvell EBU PCIe controller"
depends on ARCH_MVEBU || ARCH_DOVE || COMPILE_TEST
depends on MVEBU_MBUS
depends on ARM
@@ -274,14 +274,14 @@ config PCIE_BRCMSTB
BMIPS_GENERIC || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
- default ARCH_BRCMSTB
+ default ARCH_BRCMSTB || BMIPS_GENERIC
help
Say Y here to enable PCIe host controller support for
Broadcom STB based SoCs, like the Raspberry Pi 4.
config PCI_HYPERV_INTERFACE
tristate "Hyper-V PCI Interface"
- depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64
+ depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN
help
The Hyper-V PCI Interface is a helper driver allows other drivers to
have a common interface with the Hyper-V PCI frontend driver.
@@ -332,8 +332,8 @@ config PCIE_APPLE
If unsure, say Y if you have an Apple Silicon system.
config PCIE_MT7621
- bool "MediaTek MT7621 PCIe Controller"
- depends on SOC_MT7621 || (MIPS && COMPILE_TEST)
+ tristate "MediaTek MT7621 PCIe Controller"
+ depends on SOC_MT7621 || COMPILE_TEST
select PHY_MT7621_PCI
default SOC_MT7621
help
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 918e11082e6a..489586a4cdc7 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -51,11 +51,10 @@ enum link_status {
#define MAX_LANES 2
struct j721e_pcie {
- struct device *dev;
+ struct cdns_pcie *cdns_pcie;
struct clk *refclk;
u32 mode;
u32 num_lanes;
- struct cdns_pcie *cdns_pcie;
void __iomem *user_cfg_base;
void __iomem *intd_cfg_base;
u32 linkdown_irq_regfield;
@@ -99,7 +98,7 @@ static inline void j721e_pcie_intd_writel(struct j721e_pcie *pcie, u32 offset,
static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv)
{
struct j721e_pcie *pcie = priv;
- struct device *dev = pcie->dev;
+ struct device *dev = pcie->cdns_pcie->dev;
u32 reg;
reg = j721e_pcie_intd_readl(pcie, STATUS_REG_SYS_2);
@@ -165,7 +164,7 @@ static const struct cdns_pcie_ops j721e_pcie_ops = {
static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon,
unsigned int offset)
{
- struct device *dev = pcie->dev;
+ struct device *dev = pcie->cdns_pcie->dev;
u32 mask = J721E_MODE_RC;
u32 mode = pcie->mode;
u32 val = 0;
@@ -184,7 +183,7 @@ static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon,
static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie,
struct regmap *syscon, unsigned int offset)
{
- struct device *dev = pcie->dev;
+ struct device *dev = pcie->cdns_pcie->dev;
struct device_node *np = dev->of_node;
int link_speed;
u32 val = 0;
@@ -205,7 +204,7 @@ static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie,
static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
struct regmap *syscon, unsigned int offset)
{
- struct device *dev = pcie->dev;
+ struct device *dev = pcie->cdns_pcie->dev;
u32 lanes = pcie->num_lanes;
u32 val = 0;
int ret;
@@ -220,7 +219,7 @@ static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
{
- struct device *dev = pcie->dev;
+ struct device *dev = pcie->cdns_pcie->dev;
struct device_node *node = dev->of_node;
struct of_phandle_args args;
unsigned int offset = 0;
@@ -354,7 +353,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct pci_host_bridge *bridge;
- struct j721e_pcie_data *data;
+ const struct j721e_pcie_data *data;
struct cdns_pcie *cdns_pcie;
struct j721e_pcie *pcie;
struct cdns_pcie_rc *rc;
@@ -367,7 +366,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
int ret;
int irq;
- data = (struct j721e_pcie_data *)of_device_get_match_data(dev);
+ data = of_device_get_match_data(dev);
if (!data)
return -EINVAL;
@@ -377,7 +376,6 @@ static int j721e_pcie_probe(struct platform_device *pdev)
if (!pcie)
return -ENOMEM;
- pcie->dev = dev;
pcie->mode = mode;
pcie->linkdown_irq_regfield = data->linkdown_irq_regfield;
diff --git a/drivers/pci/controller/cadence/pcie-cadence-plat.c b/drivers/pci/controller/cadence/pcie-cadence-plat.c
index a224afadbcc0..bac0541317c1 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-plat.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-plat.c
@@ -45,7 +45,6 @@ static int cdns_plat_pcie_probe(struct platform_device *pdev)
{
const struct cdns_plat_pcie_of_data *data;
struct cdns_plat_pcie *cdns_plat_pcie;
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct pci_host_bridge *bridge;
struct cdns_pcie_ep *ep;
@@ -54,11 +53,10 @@ static int cdns_plat_pcie_probe(struct platform_device *pdev)
bool is_rc;
int ret;
- match = of_match_device(cdns_plat_pcie_of_match, dev);
- if (!match)
+ data = of_device_get_match_data(dev);
+ if (!data)
return -EINVAL;
- data = (struct cdns_plat_pcie_of_data *)match->data;
is_rc = data->is_rc;
pr_debug(" Started %s with is_rc: %d\n", __func__, is_rc);
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 262421e5d917..c8a27b6290ce 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -310,7 +310,7 @@ struct cdns_pcie {
* single function at a time
* @vendor_id: PCI vendor ID
* @device_id: PCI device ID
- * @avail_ib_bar: Satus of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or
+ * @avail_ib_bar: Status of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or
* available
* @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2
* @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index a4221f6f3629..dfcdeb432dc8 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -213,7 +213,7 @@ static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
if (!val)
return 0;
- pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0);
+ pos = find_first_bit(&val, MAX_MSI_IRQS_PER_CTRL);
while (pos != MAX_MSI_IRQS_PER_CTRL) {
generic_handle_domain_irq(pp->irq_domain,
(index * MAX_MSI_IRQS_PER_CTRL) + pos);
@@ -697,16 +697,14 @@ static int dra7xx_pcie_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
char name[10];
struct gpio_desc *reset;
- const struct of_device_id *match;
const struct dra7xx_pcie_of_data *data;
enum dw_pcie_device_mode mode;
u32 b1co_mode_sel_mask;
- match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev);
- if (!match)
+ data = of_device_get_match_data(dev);
+ if (!data)
return -EINVAL;
- data = (struct dra7xx_pcie_of_data *)match->data;
mode = (enum dw_pcie_device_mode)data->mode;
b1co_mode_sel_mask = data->b1co_mode_sel_mask;
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index 722dacdd5a17..467c8d1cd7e4 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -217,10 +217,8 @@ static int exynos_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
{
struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- if (PCI_SLOT(devfn)) {
- *val = ~0;
+ if (PCI_SLOT(devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
- }
*val = dw_pcie_read_dbi(pci, where, size);
return PCIBIOS_SUCCESSFUL;
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 26f49f797b0f..6974bd5aa116 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -29,6 +29,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
+#include <linux/phy/phy.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -49,6 +50,7 @@ enum imx6_pcie_variants {
IMX6QP,
IMX7D,
IMX8MQ,
+ IMX8MM,
};
#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
@@ -88,6 +90,7 @@ struct imx6_pcie {
struct device *pd_pcie;
/* power domain for pcie phy */
struct device *pd_pcie_phy;
+ struct phy *phy;
const struct imx6_pcie_drvdata *drvdata;
};
@@ -372,6 +375,8 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
case IMX7D:
case IMX8MQ:
reset_control_assert(imx6_pcie->pciephy_reset);
+ fallthrough;
+ case IMX8MM:
reset_control_assert(imx6_pcie->apps_reset);
break;
case IMX6SX:
@@ -407,7 +412,8 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
{
- WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ);
+ WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
+ imx6_pcie->drvdata->variant != IMX8MM);
return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
}
@@ -446,6 +452,11 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
break;
case IMX7D:
break;
+ case IMX8MM:
+ ret = clk_prepare_enable(imx6_pcie->pcie_aux);
+ if (ret)
+ dev_err(dev, "unable to enable pcie_aux clock\n");
+ break;
case IMX8MQ:
ret = clk_prepare_enable(imx6_pcie->pcie_aux);
if (ret) {
@@ -522,6 +533,14 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
goto err_ref_clk;
}
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MM:
+ if (phy_power_on(imx6_pcie->phy))
+ dev_err(dev, "unable to power on PHY\n");
+ break;
+ default:
+ break;
+ }
/* allow the clocks to stabilize */
usleep_range(200, 500);
@@ -538,6 +557,10 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
case IMX8MQ:
reset_control_deassert(imx6_pcie->pciephy_reset);
break;
+ case IMX8MM:
+ if (phy_init(imx6_pcie->phy))
+ dev_err(dev, "waiting for phy ready timeout!\n");
+ break;
case IMX7D:
reset_control_deassert(imx6_pcie->pciephy_reset);
@@ -614,6 +637,12 @@ static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
{
switch (imx6_pcie->drvdata->variant) {
+ case IMX8MM:
+ /*
+ * The PHY initialization had been done in the PHY
+ * driver, break here directly.
+ */
+ break;
case IMX8MQ:
/*
* TODO: Currently this code assumes external
@@ -753,6 +782,7 @@ static void imx6_pcie_ltssm_enable(struct device *dev)
break;
case IMX7D:
case IMX8MQ:
+ case IMX8MM:
reset_control_deassert(imx6_pcie->apps_reset);
break;
}
@@ -871,6 +901,7 @@ static void imx6_pcie_ltssm_disable(struct device *dev)
IMX6Q_GPR12_PCIE_CTL_2, 0);
break;
case IMX7D:
+ case IMX8MM:
reset_control_assert(imx6_pcie->apps_reset);
break;
default:
@@ -930,6 +961,7 @@ static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
break;
case IMX8MQ:
+ case IMX8MM:
clk_disable_unprepare(imx6_pcie->pcie_aux);
break;
default:
@@ -945,8 +977,16 @@ static int imx6_pcie_suspend_noirq(struct device *dev)
return 0;
imx6_pcie_pm_turnoff(imx6_pcie);
- imx6_pcie_clk_disable(imx6_pcie);
imx6_pcie_ltssm_disable(dev);
+ imx6_pcie_clk_disable(imx6_pcie);
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MM:
+ if (phy_power_off(imx6_pcie->phy))
+ dev_err(dev, "unable to power off PHY\n");
+ break;
+ default:
+ break;
+ }
return 0;
}
@@ -1043,11 +1083,6 @@ static int imx6_pcie_probe(struct platform_device *pdev)
}
/* Fetch clocks */
- imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
- if (IS_ERR(imx6_pcie->pcie_phy))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy),
- "pcie_phy clock source missing or invalid\n");
-
imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
if (IS_ERR(imx6_pcie->pcie_bus))
return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_bus),
@@ -1090,9 +1125,34 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(imx6_pcie->apps_reset);
}
break;
+ case IMX8MM:
+ imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
+ if (IS_ERR(imx6_pcie->pcie_aux))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
+ "pcie_aux clock source missing or invalid\n");
+ imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
+ "apps");
+ if (IS_ERR(imx6_pcie->apps_reset))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->apps_reset),
+ "failed to get pcie apps reset control\n");
+
+ imx6_pcie->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(imx6_pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy),
+ "failed to get pcie phy\n");
+
+ break;
default:
break;
}
+ /* Don't fetch the pcie_phy clock, if it has abstract PHY driver */
+ if (imx6_pcie->phy == NULL) {
+ imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
+ if (IS_ERR(imx6_pcie->pcie_phy))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy),
+ "pcie_phy clock source missing or invalid\n");
+ }
+
/* Grab turnoff reset */
imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
@@ -1202,6 +1262,10 @@ static const struct imx6_pcie_drvdata drvdata[] = {
[IMX8MQ] = {
.variant = IMX8MQ,
},
+ [IMX8MM] = {
+ .variant = IMX8MM,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ },
};
static const struct of_device_id imx6_pcie_of_match[] = {
@@ -1209,7 +1273,8 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
{ .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
{ .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], },
- { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], } ,
+ { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
+ { .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
{},
};
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 865258d8c53c..1c2ee4e13f1c 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -747,9 +747,9 @@ err:
#ifdef CONFIG_ARM
/*
- * When a PCI device does not exist during config cycles, keystone host gets a
- * bus error instead of returning 0xffffffff. This handler always returns 0
- * for this kind of faults.
+ * When a PCI device does not exist during config cycles, keystone host
+ * gets a bus error instead of returning 0xffffffff (PCI_ERROR_RESPONSE).
+ * This handler always returns 0 for this kind of fault.
*/
static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
@@ -775,12 +775,19 @@ static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
struct dw_pcie *pci = ks_pcie->pci;
struct device *dev = pci->dev;
struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ unsigned int offset = 0;
devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id");
if (IS_ERR(devctrl_regs))
return PTR_ERR(devctrl_regs);
- ret = regmap_read(devctrl_regs, 0, &id);
+ /* Do not error out to maintain old DT compatibility */
+ ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-id", 1, 0, &args);
+ if (!ret)
+ offset = args.args[0];
+
+ ret = regmap_read(devctrl_regs, offset, &id);
if (ret)
return ret;
@@ -989,6 +996,8 @@ err_phy:
static int ks_pcie_set_mode(struct device *dev)
{
struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ unsigned int offset = 0;
struct regmap *syscon;
u32 val;
u32 mask;
@@ -998,10 +1007,15 @@ static int ks_pcie_set_mode(struct device *dev)
if (IS_ERR(syscon))
return 0;
+ /* Do not error out to maintain old DT compatibility */
+ ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args);
+ if (!ret)
+ offset = args.args[0];
+
mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN;
val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN;
- ret = regmap_update_bits(syscon, 0, mask, val);
+ ret = regmap_update_bits(syscon, offset, mask, val);
if (ret) {
dev_err(dev, "failed to set pcie mode\n");
return ret;
@@ -1014,6 +1028,8 @@ static int ks_pcie_am654_set_mode(struct device *dev,
enum dw_pcie_device_mode mode)
{
struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ unsigned int offset = 0;
struct regmap *syscon;
u32 val;
u32 mask;
@@ -1023,6 +1039,11 @@ static int ks_pcie_am654_set_mode(struct device *dev,
if (IS_ERR(syscon))
return 0;
+ /* Do not error out to maintain old DT compatibility */
+ ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args);
+ if (!ret)
+ offset = args.args[0];
+
mask = AM654_PCIE_DEV_TYPE_MASK;
switch (mode) {
@@ -1037,7 +1058,7 @@ static int ks_pcie_am654_set_mode(struct device *dev,
return -EINVAL;
}
- ret = regmap_update_bits(syscon, 0, mask, val);
+ ret = regmap_update_bits(syscon, offset, mask, val);
if (ret) {
dev_err(dev, "failed to set pcie mode\n");
return ret;
@@ -1087,7 +1108,6 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
const struct ks_pcie_of_data *data;
- const struct of_device_id *match;
enum dw_pcie_device_mode mode;
struct dw_pcie *pci;
struct keystone_pcie *ks_pcie;
@@ -1104,8 +1124,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
int irq;
int i;
- match = of_match_device(of_match_ptr(ks_pcie_of_match), dev);
- data = (struct ks_pcie_of_data *)match->data;
+ data = of_device_get_match_data(dev);
if (!data)
return -EINVAL;
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index 5b9c625df7b8..6a4f0619bb1c 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -3,6 +3,7 @@
* PCIe host controller driver for Freescale Layerscape SoCs
*
* Copyright (C) 2014 Freescale Semiconductor.
+ * Copyright 2021 NXP
*
* Author: Minghuan Lian <Minghuan.Lian@freescale.com>
*/
@@ -22,12 +23,6 @@
#include "pcie-designware.h"
-/* PEX1/2 Misc Ports Status Register */
-#define SCFG_PEXMSCPORTSR(pex_idx) (0x94 + (pex_idx) * 4)
-#define LTSSM_STATE_SHIFT 20
-#define LTSSM_STATE_MASK 0x3f
-#define LTSSM_PCIE_L0 0x11 /* L0 state */
-
/* PEX Internal Configuration Registers */
#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */
@@ -35,20 +30,8 @@
#define PCIE_IATU_NUM 6
-struct ls_pcie_drvdata {
- u32 lut_offset;
- u32 ltssm_shift;
- u32 lut_dbg;
- const struct dw_pcie_host_ops *ops;
- const struct dw_pcie_ops *dw_pcie_ops;
-};
-
struct ls_pcie {
struct dw_pcie *pci;
- void __iomem *lut;
- struct regmap *scfg;
- const struct ls_pcie_drvdata *drvdata;
- int index;
};
#define to_ls_pcie(x) dev_get_drvdata((x)->dev)
@@ -83,38 +66,6 @@ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
iowrite32(val, pci->dbi_base + PCIE_STRFMR1);
}
-static int ls1021_pcie_link_up(struct dw_pcie *pci)
-{
- u32 state;
- struct ls_pcie *pcie = to_ls_pcie(pci);
-
- if (!pcie->scfg)
- return 0;
-
- regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state);
- state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK;
-
- if (state < LTSSM_PCIE_L0)
- return 0;
-
- return 1;
-}
-
-static int ls_pcie_link_up(struct dw_pcie *pci)
-{
- struct ls_pcie *pcie = to_ls_pcie(pci);
- u32 state;
-
- state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >>
- pcie->drvdata->ltssm_shift) &
- LTSSM_STATE_MASK;
-
- if (state < LTSSM_PCIE_L0)
- return 0;
-
- return 1;
-}
-
/* Forward error response of outbound non-posted requests */
static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
{
@@ -139,96 +90,20 @@ static int ls_pcie_host_init(struct pcie_port *pp)
return 0;
}
-static int ls1021_pcie_host_init(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct ls_pcie *pcie = to_ls_pcie(pci);
- struct device *dev = pci->dev;
- u32 index[2];
- int ret;
-
- pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node,
- "fsl,pcie-scfg");
- if (IS_ERR(pcie->scfg)) {
- ret = PTR_ERR(pcie->scfg);
- dev_err(dev, "No syscfg phandle specified\n");
- pcie->scfg = NULL;
- return ret;
- }
-
- if (of_property_read_u32_array(dev->of_node,
- "fsl,pcie-scfg", index, 2)) {
- pcie->scfg = NULL;
- return -EINVAL;
- }
- pcie->index = index[1];
-
- return ls_pcie_host_init(pp);
-}
-
-static const struct dw_pcie_host_ops ls1021_pcie_host_ops = {
- .host_init = ls1021_pcie_host_init,
-};
-
static const struct dw_pcie_host_ops ls_pcie_host_ops = {
.host_init = ls_pcie_host_init,
};
-static const struct dw_pcie_ops dw_ls1021_pcie_ops = {
- .link_up = ls1021_pcie_link_up,
-};
-
-static const struct dw_pcie_ops dw_ls_pcie_ops = {
- .link_up = ls_pcie_link_up,
-};
-
-static const struct ls_pcie_drvdata ls1021_drvdata = {
- .ops = &ls1021_pcie_host_ops,
- .dw_pcie_ops = &dw_ls1021_pcie_ops,
-};
-
-static const struct ls_pcie_drvdata ls1043_drvdata = {
- .lut_offset = 0x10000,
- .ltssm_shift = 24,
- .lut_dbg = 0x7fc,
- .ops = &ls_pcie_host_ops,
- .dw_pcie_ops = &dw_ls_pcie_ops,
-};
-
-static const struct ls_pcie_drvdata ls1046_drvdata = {
- .lut_offset = 0x80000,
- .ltssm_shift = 24,
- .lut_dbg = 0x407fc,
- .ops = &ls_pcie_host_ops,
- .dw_pcie_ops = &dw_ls_pcie_ops,
-};
-
-static const struct ls_pcie_drvdata ls2080_drvdata = {
- .lut_offset = 0x80000,
- .ltssm_shift = 0,
- .lut_dbg = 0x7fc,
- .ops = &ls_pcie_host_ops,
- .dw_pcie_ops = &dw_ls_pcie_ops,
-};
-
-static const struct ls_pcie_drvdata ls2088_drvdata = {
- .lut_offset = 0x80000,
- .ltssm_shift = 0,
- .lut_dbg = 0x407fc,
- .ops = &ls_pcie_host_ops,
- .dw_pcie_ops = &dw_ls_pcie_ops,
-};
-
static const struct of_device_id ls_pcie_of_match[] = {
- { .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata },
- { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
- { .compatible = "fsl,ls1028a-pcie", .data = &ls2088_drvdata },
- { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
- { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
- { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
- { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
- { .compatible = "fsl,ls2088a-pcie", .data = &ls2088_drvdata },
- { .compatible = "fsl,ls1088a-pcie", .data = &ls2088_drvdata },
+ { .compatible = "fsl,ls1012a-pcie", },
+ { .compatible = "fsl,ls1021a-pcie", },
+ { .compatible = "fsl,ls1028a-pcie", },
+ { .compatible = "fsl,ls1043a-pcie", },
+ { .compatible = "fsl,ls1046a-pcie", },
+ { .compatible = "fsl,ls2080a-pcie", },
+ { .compatible = "fsl,ls2085a-pcie", },
+ { .compatible = "fsl,ls2088a-pcie", },
+ { .compatible = "fsl,ls1088a-pcie", },
{ },
};
@@ -247,11 +122,8 @@ static int ls_pcie_probe(struct platform_device *pdev)
if (!pci)
return -ENOMEM;
- pcie->drvdata = of_device_get_match_data(dev);
-
pci->dev = dev;
- pci->ops = pcie->drvdata->dw_pcie_ops;
- pci->pp.ops = pcie->drvdata->ops;
+ pci->pp.ops = &ls_pcie_host_ops;
pcie->pci = pci;
@@ -260,8 +132,6 @@ static int ls_pcie_probe(struct platform_device *pdev)
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
- pcie->lut = pci->dbi_base + pcie->drvdata->lut_offset;
-
if (!ls_pcie_is_bridge(pcie))
return -ENODEV;
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index c91fc1954432..2f15441770e1 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -380,17 +380,15 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
struct artpec6_pcie *artpec6_pcie;
int ret;
- const struct of_device_id *match;
const struct artpec_pcie_of_data *data;
enum artpec_pcie_variants variant;
enum dw_pcie_device_mode mode;
u32 val;
- match = of_match_device(artpec6_pcie_of_match, dev);
- if (!match)
+ data = of_device_get_match_data(dev);
+ if (!data)
return -EINVAL;
- data = (struct artpec_pcie_of_data *)match->data;
variant = (enum artpec_pcie_variants)data->variant;
mode = (enum dw_pcie_device_mode)data->mode;
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 8851eb161a0e..0c5de87d3cc6 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -122,15 +122,13 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
struct dw_plat_pcie *dw_plat_pcie;
struct dw_pcie *pci;
int ret;
- const struct of_device_id *match;
const struct dw_plat_pcie_of_data *data;
enum dw_pcie_device_mode mode;
- match = of_match_device(dw_plat_pcie_of_match, dev);
- if (!match)
+ data = of_device_get_match_data(dev);
+ if (!data)
return -EINVAL;
- data = (struct dw_plat_pcie_of_data *)match->data;
mode = (enum dw_pcie_device_mode)data->mode;
dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 850b4533f4ef..d92c8a25094f 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -672,10 +672,11 @@ void dw_pcie_iatu_detect(struct dw_pcie *pci)
if (!pci->atu_base) {
struct resource *res =
platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
- if (res)
+ if (res) {
pci->atu_size = resource_size(res);
- pci->atu_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(pci->atu_base))
+ pci->atu_base = devm_ioremap_resource(dev, res);
+ }
+ if (!pci->atu_base || IS_ERR(pci->atu_base))
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
}
diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c
index 8fc5960faf28..8904b5b85ee5 100644
--- a/drivers/pci/controller/dwc/pcie-hisi.c
+++ b/drivers/pci/controller/dwc/pcie-hisi.c
@@ -18,6 +18,10 @@
#if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
+struct hisi_pcie {
+ void __iomem *reg_base;
+};
+
static int hisi_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
@@ -58,10 +62,10 @@ static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
int where)
{
struct pci_config_window *cfg = bus->sysdata;
- void __iomem *reg_base = cfg->priv;
+ struct hisi_pcie *pcie = cfg->priv;
if (bus->number == cfg->busr.start)
- return reg_base + where;
+ return pcie->reg_base + where;
else
return pci_ecam_map_bus(bus, devfn, where);
}
@@ -71,12 +75,16 @@ static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
static int hisi_pcie_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
+ struct hisi_pcie *pcie;
struct acpi_device *adev = to_acpi_device(dev);
struct acpi_pci_root *root = acpi_driver_data(adev);
struct resource *res;
- void __iomem *reg_base;
int ret;
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
/*
* Retrieve RC base and size from a HISI0081 device with _UID
* matching our segment.
@@ -91,11 +99,11 @@ static int hisi_pcie_init(struct pci_config_window *cfg)
return -ENOMEM;
}
- reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
- if (!reg_base)
+ pcie->reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
+ if (!pcie->reg_base)
return -ENOMEM;
- cfg->priv = reg_base;
+ cfg->priv = pcie;
return 0;
}
@@ -115,9 +123,13 @@ const struct pci_ecam_ops hisi_pcie_ops = {
static int hisi_pcie_platform_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
+ struct hisi_pcie *pcie;
struct platform_device *pdev = to_platform_device(dev);
struct resource *res;
- void __iomem *reg_base;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res) {
@@ -125,11 +137,11 @@ static int hisi_pcie_platform_init(struct pci_config_window *cfg)
return -EINVAL;
}
- reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
- if (!reg_base)
+ pcie->reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
+ if (!pcie->reg_base)
return -ENOMEM;
- cfg->priv = reg_base;
+ cfg->priv = pcie;
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index 86f9d16c50d7..410555dccb6d 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -127,10 +127,8 @@ static int histb_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
{
struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- if (PCI_SLOT(devfn)) {
- *val = ~0;
+ if (PCI_SLOT(devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
- }
*val = dw_pcie_read_dbi(pci, where, size);
return PCIBIOS_SUCCESSFUL;
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
index d15cf35fa7f2..5ba144924ff8 100644
--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -62,7 +62,7 @@ struct intel_pcie_soc {
unsigned int pcie_ver;
};
-struct intel_pcie_port {
+struct intel_pcie {
struct dw_pcie pci;
void __iomem *app_base;
struct gpio_desc *reset_gpio;
@@ -83,53 +83,53 @@ static void pcie_update_bits(void __iomem *base, u32 ofs, u32 mask, u32 val)
writel(val, base + ofs);
}
-static inline void pcie_app_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val)
+static inline void pcie_app_wr(struct intel_pcie *pcie, u32 ofs, u32 val)
{
- writel(val, lpp->app_base + ofs);
+ writel(val, pcie->app_base + ofs);
}
-static void pcie_app_wr_mask(struct intel_pcie_port *lpp, u32 ofs,
+static void pcie_app_wr_mask(struct intel_pcie *pcie, u32 ofs,
u32 mask, u32 val)
{
- pcie_update_bits(lpp->app_base, ofs, mask, val);
+ pcie_update_bits(pcie->app_base, ofs, mask, val);
}
-static inline u32 pcie_rc_cfg_rd(struct intel_pcie_port *lpp, u32 ofs)
+static inline u32 pcie_rc_cfg_rd(struct intel_pcie *pcie, u32 ofs)
{
- return dw_pcie_readl_dbi(&lpp->pci, ofs);
+ return dw_pcie_readl_dbi(&pcie->pci, ofs);
}
-static inline void pcie_rc_cfg_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val)
+static inline void pcie_rc_cfg_wr(struct intel_pcie *pcie, u32 ofs, u32 val)
{
- dw_pcie_writel_dbi(&lpp->pci, ofs, val);
+ dw_pcie_writel_dbi(&pcie->pci, ofs, val);
}
-static void pcie_rc_cfg_wr_mask(struct intel_pcie_port *lpp, u32 ofs,
+static void pcie_rc_cfg_wr_mask(struct intel_pcie *pcie, u32 ofs,
u32 mask, u32 val)
{
- pcie_update_bits(lpp->pci.dbi_base, ofs, mask, val);
+ pcie_update_bits(pcie->pci.dbi_base, ofs, mask, val);
}
-static void intel_pcie_ltssm_enable(struct intel_pcie_port *lpp)
+static void intel_pcie_ltssm_enable(struct intel_pcie *pcie)
{
- pcie_app_wr_mask(lpp, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE,
+ pcie_app_wr_mask(pcie, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE,
PCIE_APP_CCR_LTSSM_ENABLE);
}
-static void intel_pcie_ltssm_disable(struct intel_pcie_port *lpp)
+static void intel_pcie_ltssm_disable(struct intel_pcie *pcie)
{
- pcie_app_wr_mask(lpp, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, 0);
+ pcie_app_wr_mask(pcie, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, 0);
}
-static void intel_pcie_link_setup(struct intel_pcie_port *lpp)
+static void intel_pcie_link_setup(struct intel_pcie *pcie)
{
u32 val;
- u8 offset = dw_pcie_find_capability(&lpp->pci, PCI_CAP_ID_EXP);
+ u8 offset = dw_pcie_find_capability(&pcie->pci, PCI_CAP_ID_EXP);
- val = pcie_rc_cfg_rd(lpp, offset + PCI_EXP_LNKCTL);
+ val = pcie_rc_cfg_rd(pcie, offset + PCI_EXP_LNKCTL);
val &= ~(PCI_EXP_LNKCTL_LD | PCI_EXP_LNKCTL_ASPMC);
- pcie_rc_cfg_wr(lpp, offset + PCI_EXP_LNKCTL, val);
+ pcie_rc_cfg_wr(pcie, offset + PCI_EXP_LNKCTL, val);
}
static void intel_pcie_init_n_fts(struct dw_pcie *pci)
@@ -148,14 +148,14 @@ static void intel_pcie_init_n_fts(struct dw_pcie *pci)
pci->n_fts[0] = PORT_AFR_N_FTS_GEN12_DFT;
}
-static int intel_pcie_ep_rst_init(struct intel_pcie_port *lpp)
+static int intel_pcie_ep_rst_init(struct intel_pcie *pcie)
{
- struct device *dev = lpp->pci.dev;
+ struct device *dev = pcie->pci.dev;
int ret;
- lpp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(lpp->reset_gpio)) {
- ret = PTR_ERR(lpp->reset_gpio);
+ pcie->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(pcie->reset_gpio)) {
+ ret = PTR_ERR(pcie->reset_gpio);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to request PCIe GPIO: %d\n", ret);
return ret;
@@ -167,19 +167,19 @@ static int intel_pcie_ep_rst_init(struct intel_pcie_port *lpp)
return 0;
}
-static void intel_pcie_core_rst_assert(struct intel_pcie_port *lpp)
+static void intel_pcie_core_rst_assert(struct intel_pcie *pcie)
{
- reset_control_assert(lpp->core_rst);
+ reset_control_assert(pcie->core_rst);
}
-static void intel_pcie_core_rst_deassert(struct intel_pcie_port *lpp)
+static void intel_pcie_core_rst_deassert(struct intel_pcie *pcie)
{
/*
* One micro-second delay to make sure the reset pulse
* wide enough so that core reset is clean.
*/
udelay(1);
- reset_control_deassert(lpp->core_rst);
+ reset_control_deassert(pcie->core_rst);
/*
* Some SoC core reset also reset PHY, more delay needed
@@ -188,58 +188,58 @@ static void intel_pcie_core_rst_deassert(struct intel_pcie_port *lpp)
usleep_range(1000, 2000);
}
-static void intel_pcie_device_rst_assert(struct intel_pcie_port *lpp)
+static void intel_pcie_device_rst_assert(struct intel_pcie *pcie)
{
- gpiod_set_value_cansleep(lpp->reset_gpio, 1);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
}
-static void intel_pcie_device_rst_deassert(struct intel_pcie_port *lpp)
+static void intel_pcie_device_rst_deassert(struct intel_pcie *pcie)
{
- msleep(lpp->rst_intrvl);
- gpiod_set_value_cansleep(lpp->reset_gpio, 0);
+ msleep(pcie->rst_intrvl);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
}
-static void intel_pcie_core_irq_disable(struct intel_pcie_port *lpp)
+static void intel_pcie_core_irq_disable(struct intel_pcie *pcie)
{
- pcie_app_wr(lpp, PCIE_APP_IRNEN, 0);
- pcie_app_wr(lpp, PCIE_APP_IRNCR, PCIE_APP_IRN_INT);
+ pcie_app_wr(pcie, PCIE_APP_IRNEN, 0);
+ pcie_app_wr(pcie, PCIE_APP_IRNCR, PCIE_APP_IRN_INT);
}
static int intel_pcie_get_resources(struct platform_device *pdev)
{
- struct intel_pcie_port *lpp = platform_get_drvdata(pdev);
- struct dw_pcie *pci = &lpp->pci;
+ struct intel_pcie *pcie = platform_get_drvdata(pdev);
+ struct dw_pcie *pci = &pcie->pci;
struct device *dev = pci->dev;
int ret;
- lpp->core_clk = devm_clk_get(dev, NULL);
- if (IS_ERR(lpp->core_clk)) {
- ret = PTR_ERR(lpp->core_clk);
+ pcie->core_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pcie->core_clk)) {
+ ret = PTR_ERR(pcie->core_clk);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to get clks: %d\n", ret);
return ret;
}
- lpp->core_rst = devm_reset_control_get(dev, NULL);
- if (IS_ERR(lpp->core_rst)) {
- ret = PTR_ERR(lpp->core_rst);
+ pcie->core_rst = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(pcie->core_rst)) {
+ ret = PTR_ERR(pcie->core_rst);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to get resets: %d\n", ret);
return ret;
}
ret = device_property_read_u32(dev, "reset-assert-ms",
- &lpp->rst_intrvl);
+ &pcie->rst_intrvl);
if (ret)
- lpp->rst_intrvl = RESET_INTERVAL_MS;
+ pcie->rst_intrvl = RESET_INTERVAL_MS;
- lpp->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
- if (IS_ERR(lpp->app_base))
- return PTR_ERR(lpp->app_base);
+ pcie->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
+ if (IS_ERR(pcie->app_base))
+ return PTR_ERR(pcie->app_base);
- lpp->phy = devm_phy_get(dev, "pcie");
- if (IS_ERR(lpp->phy)) {
- ret = PTR_ERR(lpp->phy);
+ pcie->phy = devm_phy_get(dev, "pcie");
+ if (IS_ERR(pcie->phy)) {
+ ret = PTR_ERR(pcie->phy);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Couldn't get pcie-phy: %d\n", ret);
return ret;
@@ -248,137 +248,137 @@ static int intel_pcie_get_resources(struct platform_device *pdev)
return 0;
}
-static int intel_pcie_wait_l2(struct intel_pcie_port *lpp)
+static int intel_pcie_wait_l2(struct intel_pcie *pcie)
{
u32 value;
int ret;
- struct dw_pcie *pci = &lpp->pci;
+ struct dw_pcie *pci = &pcie->pci;
if (pci->link_gen < 3)
return 0;
/* Send PME_TURN_OFF message */
- pcie_app_wr_mask(lpp, PCIE_APP_MSG_CR, PCIE_APP_MSG_XMT_PM_TURNOFF,
+ pcie_app_wr_mask(pcie, PCIE_APP_MSG_CR, PCIE_APP_MSG_XMT_PM_TURNOFF,
PCIE_APP_MSG_XMT_PM_TURNOFF);
/* Read PMC status and wait for falling into L2 link state */
- ret = readl_poll_timeout(lpp->app_base + PCIE_APP_PMC, value,
+ ret = readl_poll_timeout(pcie->app_base + PCIE_APP_PMC, value,
value & PCIE_APP_PMC_IN_L2, 20,
jiffies_to_usecs(5 * HZ));
if (ret)
- dev_err(lpp->pci.dev, "PCIe link enter L2 timeout!\n");
+ dev_err(pcie->pci.dev, "PCIe link enter L2 timeout!\n");
return ret;
}
-static void intel_pcie_turn_off(struct intel_pcie_port *lpp)
+static void intel_pcie_turn_off(struct intel_pcie *pcie)
{
- if (dw_pcie_link_up(&lpp->pci))
- intel_pcie_wait_l2(lpp);
+ if (dw_pcie_link_up(&pcie->pci))
+ intel_pcie_wait_l2(pcie);
/* Put endpoint device in reset state */
- intel_pcie_device_rst_assert(lpp);
- pcie_rc_cfg_wr_mask(lpp, PCI_COMMAND, PCI_COMMAND_MEMORY, 0);
+ intel_pcie_device_rst_assert(pcie);
+ pcie_rc_cfg_wr_mask(pcie, PCI_COMMAND, PCI_COMMAND_MEMORY, 0);
}
-static int intel_pcie_host_setup(struct intel_pcie_port *lpp)
+static int intel_pcie_host_setup(struct intel_pcie *pcie)
{
int ret;
- struct dw_pcie *pci = &lpp->pci;
+ struct dw_pcie *pci = &pcie->pci;
- intel_pcie_core_rst_assert(lpp);
- intel_pcie_device_rst_assert(lpp);
+ intel_pcie_core_rst_assert(pcie);
+ intel_pcie_device_rst_assert(pcie);
- ret = phy_init(lpp->phy);
+ ret = phy_init(pcie->phy);
if (ret)
return ret;
- intel_pcie_core_rst_deassert(lpp);
+ intel_pcie_core_rst_deassert(pcie);
- ret = clk_prepare_enable(lpp->core_clk);
+ ret = clk_prepare_enable(pcie->core_clk);
if (ret) {
- dev_err(lpp->pci.dev, "Core clock enable failed: %d\n", ret);
+ dev_err(pcie->pci.dev, "Core clock enable failed: %d\n", ret);
goto clk_err;
}
pci->atu_base = pci->dbi_base + 0xC0000;
- intel_pcie_ltssm_disable(lpp);
- intel_pcie_link_setup(lpp);
+ intel_pcie_ltssm_disable(pcie);
+ intel_pcie_link_setup(pcie);
intel_pcie_init_n_fts(pci);
dw_pcie_setup_rc(&pci->pp);
dw_pcie_upconfig_setup(pci);
- intel_pcie_device_rst_deassert(lpp);
- intel_pcie_ltssm_enable(lpp);
+ intel_pcie_device_rst_deassert(pcie);
+ intel_pcie_ltssm_enable(pcie);
ret = dw_pcie_wait_for_link(pci);
if (ret)
goto app_init_err;
/* Enable integrated interrupts */
- pcie_app_wr_mask(lpp, PCIE_APP_IRNEN, PCIE_APP_IRN_INT,
+ pcie_app_wr_mask(pcie, PCIE_APP_IRNEN, PCIE_APP_IRN_INT,
PCIE_APP_IRN_INT);
return 0;
app_init_err:
- clk_disable_unprepare(lpp->core_clk);
+ clk_disable_unprepare(pcie->core_clk);
clk_err:
- intel_pcie_core_rst_assert(lpp);
- phy_exit(lpp->phy);
+ intel_pcie_core_rst_assert(pcie);
+ phy_exit(pcie->phy);
return ret;
}
-static void __intel_pcie_remove(struct intel_pcie_port *lpp)
+static void __intel_pcie_remove(struct intel_pcie *pcie)
{
- intel_pcie_core_irq_disable(lpp);
- intel_pcie_turn_off(lpp);
- clk_disable_unprepare(lpp->core_clk);
- intel_pcie_core_rst_assert(lpp);
- phy_exit(lpp->phy);
+ intel_pcie_core_irq_disable(pcie);
+ intel_pcie_turn_off(pcie);
+ clk_disable_unprepare(pcie->core_clk);
+ intel_pcie_core_rst_assert(pcie);
+ phy_exit(pcie->phy);
}
static int intel_pcie_remove(struct platform_device *pdev)
{
- struct intel_pcie_port *lpp = platform_get_drvdata(pdev);
- struct pcie_port *pp = &lpp->pci.pp;
+ struct intel_pcie *pcie = platform_get_drvdata(pdev);
+ struct pcie_port *pp = &pcie->pci.pp;
dw_pcie_host_deinit(pp);
- __intel_pcie_remove(lpp);
+ __intel_pcie_remove(pcie);
return 0;
}
static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev)
{
- struct intel_pcie_port *lpp = dev_get_drvdata(dev);
+ struct intel_pcie *pcie = dev_get_drvdata(dev);
int ret;
- intel_pcie_core_irq_disable(lpp);
- ret = intel_pcie_wait_l2(lpp);
+ intel_pcie_core_irq_disable(pcie);
+ ret = intel_pcie_wait_l2(pcie);
if (ret)
return ret;
- phy_exit(lpp->phy);
- clk_disable_unprepare(lpp->core_clk);
+ phy_exit(pcie->phy);
+ clk_disable_unprepare(pcie->core_clk);
return ret;
}
static int __maybe_unused intel_pcie_resume_noirq(struct device *dev)
{
- struct intel_pcie_port *lpp = dev_get_drvdata(dev);
+ struct intel_pcie *pcie = dev_get_drvdata(dev);
- return intel_pcie_host_setup(lpp);
+ return intel_pcie_host_setup(pcie);
}
static int intel_pcie_rc_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct intel_pcie_port *lpp = dev_get_drvdata(pci->dev);
+ struct intel_pcie *pcie = dev_get_drvdata(pci->dev);
- return intel_pcie_host_setup(lpp);
+ return intel_pcie_host_setup(pcie);
}
static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr)
@@ -402,17 +402,17 @@ static int intel_pcie_probe(struct platform_device *pdev)
{
const struct intel_pcie_soc *data;
struct device *dev = &pdev->dev;
- struct intel_pcie_port *lpp;
+ struct intel_pcie *pcie;
struct pcie_port *pp;
struct dw_pcie *pci;
int ret;
- lpp = devm_kzalloc(dev, sizeof(*lpp), GFP_KERNEL);
- if (!lpp)
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
return -ENOMEM;
- platform_set_drvdata(pdev, lpp);
- pci = &lpp->pci;
+ platform_set_drvdata(pdev, pcie);
+ pci = &pcie->pci;
pci->dev = dev;
pp = &pci->pp;
@@ -420,7 +420,7 @@ static int intel_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = intel_pcie_ep_rst_init(lpp);
+ ret = intel_pcie_ep_rst_init(pcie);
if (ret)
return ret;
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index 095afbccf9c1..fa6886d66488 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -530,10 +530,8 @@ static int kirin_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
{
struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- if (PCI_SLOT(devfn)) {
- *val = ~0;
+ if (PCI_SLOT(devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
- }
*val = dw_pcie_read_dbi(pci, where, size);
return PCIBIOS_SUCCESSFUL;
@@ -773,7 +771,6 @@ static const struct of_device_id kirin_pcie_match[] = {
static int kirin_pcie_probe(struct platform_device *pdev)
{
enum pcie_kirin_phy_type phy_type;
- const struct of_device_id *of_id;
struct device *dev = &pdev->dev;
struct kirin_pcie *kirin_pcie;
struct dw_pcie *pci;
@@ -784,13 +781,12 @@ static int kirin_pcie_probe(struct platform_device *pdev)
return -EINVAL;
}
- of_id = of_match_device(kirin_pcie_match, dev);
- if (!of_id) {
+ phy_type = (long)of_device_get_match_data(dev);
+ if (!phy_type) {
dev_err(dev, "OF data missing\n");
return -EINVAL;
}
- phy_type = (long)of_id->data;
kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
if (!kirin_pcie)
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index cfe66bf04c1d..6ce8eddf3a37 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -553,10 +553,8 @@ static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
int irq, ret;
irq = platform_get_irq_byname(pdev, "global");
- if (irq < 0) {
- dev_err(&pdev->dev, "Failed to get Global IRQ\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
qcom_pcie_ep_global_irq_thread,
@@ -620,7 +618,7 @@ static void qcom_pcie_ep_init(struct dw_pcie_ep *ep)
dw_pcie_ep_reset_bar(pci, bar);
}
-static struct dw_pcie_ep_ops pci_ep_ops = {
+static const struct dw_pcie_ep_ops pci_ep_ops = {
.ep_init = qcom_pcie_ep_init,
.raise_irq = qcom_pcie_ep_raise_irq,
.get_features = qcom_pcie_epc_get_features,
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 1c3d1116bb60..c19cd506ed3f 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1343,7 +1343,7 @@ static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie)
/* Look for an available entry to hold the mapping */
for (i = 0; i < nr_map; i++) {
- u16 bdf_be = cpu_to_be16(map[i].bdf);
+ __be16 bdf_be = cpu_to_be16(map[i].bdf);
u32 val;
u8 hash;
@@ -1534,6 +1534,12 @@ static int qcom_pcie_probe(struct platform_device *pdev)
const struct qcom_pcie_cfg *pcie_cfg;
int ret;
+ pcie_cfg = of_device_get_match_data(dev);
+ if (!pcie_cfg || !pcie_cfg->ops) {
+ dev_err(dev, "Invalid platform data\n");
+ return -EINVAL;
+ }
+
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
@@ -1553,12 +1559,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pcie->pci = pci;
- pcie_cfg = of_device_get_match_data(dev);
- if (!pcie_cfg || !pcie_cfg->ops) {
- dev_err(dev, "Invalid platform data\n");
- return -EINVAL;
- }
-
pcie->ops = pcie_cfg->ops;
pcie->pipe_clk_need_muxing = pcie_cfg->pipe_clk_need_muxing;
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index 1a9e353bef55..1569e82b5568 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -69,7 +69,7 @@ struct pcie_app_reg {
static int spear13xx_pcie_start_link(struct dw_pcie *pci)
{
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
- struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+ struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
/* enable ltssm */
writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID)
@@ -83,7 +83,7 @@ static int spear13xx_pcie_start_link(struct dw_pcie *pci)
static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
{
struct spear13xx_pcie *spear13xx_pcie = arg;
- struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+ struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
struct dw_pcie *pci = spear13xx_pcie->pci;
struct pcie_port *pp = &pci->pp;
unsigned int status;
@@ -102,7 +102,7 @@ static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie)
{
- struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+ struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
/* Enable MSI interrupt */
if (IS_ENABLED(CONFIG_PCI_MSI))
@@ -113,7 +113,7 @@ static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pc
static int spear13xx_pcie_link_up(struct dw_pcie *pci)
{
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
- struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+ struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
if (readl(&app_reg->app_status_1) & XMLH_LINK_UP)
return 1;
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 904976913081..b1b5f836a806 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -245,7 +245,7 @@ static const unsigned int pcie_gen_freq[] = {
GEN4_CORE_CLK_FREQ
};
-struct tegra_pcie_dw {
+struct tegra194_pcie {
struct device *dev;
struct resource *appl_res;
struct resource *dbi_res;
@@ -289,22 +289,22 @@ struct tegra_pcie_dw {
int ep_state;
};
-struct tegra_pcie_dw_of_data {
+struct tegra194_pcie_of_data {
enum dw_pcie_device_mode mode;
};
-static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
+static inline struct tegra194_pcie *to_tegra_pcie(struct dw_pcie *pci)
{
- return container_of(pci, struct tegra_pcie_dw, pci);
+ return container_of(pci, struct tegra194_pcie, pci);
}
-static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value,
+static inline void appl_writel(struct tegra194_pcie *pcie, const u32 value,
const u32 reg)
{
writel_relaxed(value, pcie->appl_base + reg);
}
-static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
+static inline u32 appl_readl(struct tegra194_pcie *pcie, const u32 reg)
{
return readl_relaxed(pcie->appl_base + reg);
}
@@ -316,7 +316,7 @@ struct tegra_pcie_soc {
static void apply_bad_link_workaround(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct tegra194_pcie *pcie = to_tegra_pcie(pci);
u32 current_link_width;
u16 val;
@@ -349,7 +349,7 @@ static void apply_bad_link_workaround(struct pcie_port *pp)
static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
{
- struct tegra_pcie_dw *pcie = arg;
+ struct tegra194_pcie *pcie = arg;
struct dw_pcie *pci = &pcie->pci;
struct pcie_port *pp = &pci->pp;
u32 val, tmp;
@@ -420,7 +420,7 @@ static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
+static void pex_ep_event_hot_rst_done(struct tegra194_pcie *pcie)
{
u32 val;
@@ -448,7 +448,7 @@ static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
{
- struct tegra_pcie_dw *pcie = arg;
+ struct tegra194_pcie *pcie = arg;
struct dw_pcie *pci = &pcie->pci;
u32 val, speed;
@@ -494,7 +494,7 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
{
- struct tegra_pcie_dw *pcie = arg;
+ struct tegra194_pcie *pcie = arg;
struct dw_pcie_ep *ep = &pcie->pci.ep;
int spurious = 1;
u32 status_l0, status_l1, link_status;
@@ -537,7 +537,7 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
+static int tegra194_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
/*
@@ -554,7 +554,7 @@ static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
return pci_generic_config_read(bus, devfn, where, size, val);
}
-static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
+static int tegra194_pcie_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 val)
{
/*
@@ -571,8 +571,8 @@ static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
static struct pci_ops tegra_pci_ops = {
.map_bus = dw_pcie_own_conf_map_bus,
- .read = tegra_pcie_dw_rd_own_conf,
- .write = tegra_pcie_dw_wr_own_conf,
+ .read = tegra194_pcie_rd_own_conf,
+ .write = tegra194_pcie_wr_own_conf,
};
#if defined(CONFIG_PCIEASPM)
@@ -594,7 +594,7 @@ static const u32 event_cntr_data_offset[] = {
0x1dc
};
-static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
+static void disable_aspm_l11(struct tegra194_pcie *pcie)
{
u32 val;
@@ -603,7 +603,7 @@ static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
}
-static void disable_aspm_l12(struct tegra_pcie_dw *pcie)
+static void disable_aspm_l12(struct tegra194_pcie *pcie)
{
u32 val;
@@ -612,7 +612,7 @@ static void disable_aspm_l12(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
}
-static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
+static inline u32 event_counter_prog(struct tegra194_pcie *pcie, u32 event)
{
u32 val;
@@ -629,7 +629,7 @@ static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
static int aspm_state_cnt(struct seq_file *s, void *data)
{
- struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *)
+ struct tegra194_pcie *pcie = (struct tegra194_pcie *)
dev_get_drvdata(s->private);
u32 val;
@@ -660,7 +660,7 @@ static int aspm_state_cnt(struct seq_file *s, void *data)
return 0;
}
-static void init_host_aspm(struct tegra_pcie_dw *pcie)
+static void init_host_aspm(struct tegra194_pcie *pcie)
{
struct dw_pcie *pci = &pcie->pci;
u32 val;
@@ -688,22 +688,22 @@ static void init_host_aspm(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
}
-static void init_debugfs(struct tegra_pcie_dw *pcie)
+static void init_debugfs(struct tegra194_pcie *pcie)
{
debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
aspm_state_cnt);
}
#else
-static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
-static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
-static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
-static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }
+static inline void disable_aspm_l12(struct tegra194_pcie *pcie) { return; }
+static inline void disable_aspm_l11(struct tegra194_pcie *pcie) { return; }
+static inline void init_host_aspm(struct tegra194_pcie *pcie) { return; }
+static inline void init_debugfs(struct tegra194_pcie *pcie) { return; }
#endif
static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct tegra194_pcie *pcie = to_tegra_pcie(pci);
u32 val;
u16 val_w;
@@ -741,7 +741,7 @@ static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct tegra194_pcie *pcie = to_tegra_pcie(pci);
u32 val;
/* Enable legacy interrupt generation */
@@ -762,7 +762,7 @@ static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)
static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct tegra194_pcie *pcie = to_tegra_pcie(pci);
u32 val;
/* Enable MSI interrupt generation */
@@ -775,7 +775,7 @@ static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
static void tegra_pcie_enable_interrupts(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct tegra194_pcie *pcie = to_tegra_pcie(pci);
/* Clear interrupt statuses before enabling interrupts */
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
@@ -800,7 +800,7 @@ static void tegra_pcie_enable_interrupts(struct pcie_port *pp)
tegra_pcie_enable_msi_interrupts(pp);
}
-static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
+static void config_gen3_gen4_eq_presets(struct tegra194_pcie *pcie)
{
struct dw_pcie *pci = &pcie->pci;
u32 val, offset, i;
@@ -853,10 +853,10 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
}
-static int tegra_pcie_dw_host_init(struct pcie_port *pp)
+static int tegra194_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct tegra194_pcie *pcie = to_tegra_pcie(pci);
u32 val;
pp->bridge->ops = &tegra_pci_ops;
@@ -914,10 +914,10 @@ static int tegra_pcie_dw_host_init(struct pcie_port *pp)
return 0;
}
-static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
+static int tegra194_pcie_start_link(struct dw_pcie *pci)
{
u32 val, offset, speed, tmp;
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct tegra194_pcie *pcie = to_tegra_pcie(pci);
struct pcie_port *pp = &pci->pp;
bool retry = true;
@@ -982,7 +982,7 @@ retry_link:
val &= ~PCI_DLF_EXCHANGE_ENABLE;
dw_pcie_writel_dbi(pci, offset, val);
- tegra_pcie_dw_host_init(pp);
+ tegra194_pcie_host_init(pp);
dw_pcie_setup_rc(pp);
retry = false;
@@ -998,32 +998,32 @@ retry_link:
return 0;
}
-static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
+static int tegra194_pcie_link_up(struct dw_pcie *pci)
{
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct tegra194_pcie *pcie = to_tegra_pcie(pci);
u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
return !!(val & PCI_EXP_LNKSTA_DLLLA);
}
-static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
+static void tegra194_pcie_stop_link(struct dw_pcie *pci)
{
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct tegra194_pcie *pcie = to_tegra_pcie(pci);
disable_irq(pcie->pex_rst_irq);
}
static const struct dw_pcie_ops tegra_dw_pcie_ops = {
- .link_up = tegra_pcie_dw_link_up,
- .start_link = tegra_pcie_dw_start_link,
- .stop_link = tegra_pcie_dw_stop_link,
+ .link_up = tegra194_pcie_link_up,
+ .start_link = tegra194_pcie_start_link,
+ .stop_link = tegra194_pcie_stop_link,
};
-static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
- .host_init = tegra_pcie_dw_host_init,
+static const struct dw_pcie_host_ops tegra194_pcie_host_ops = {
+ .host_init = tegra194_pcie_host_init,
};
-static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
+static void tegra_pcie_disable_phy(struct tegra194_pcie *pcie)
{
unsigned int phy_count = pcie->phy_count;
@@ -1033,7 +1033,7 @@ static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
}
}
-static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie)
+static int tegra_pcie_enable_phy(struct tegra194_pcie *pcie)
{
unsigned int i;
int ret;
@@ -1060,7 +1060,7 @@ phy_exit:
return ret;
}
-static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
+static int tegra194_pcie_parse_dt(struct tegra194_pcie *pcie)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
struct device_node *np = pcie->dev->of_node;
@@ -1156,7 +1156,7 @@ static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
return 0;
}
-static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
+static int tegra_pcie_bpmp_set_ctrl_state(struct tegra194_pcie *pcie,
bool enable)
{
struct mrq_uphy_response resp;
@@ -1184,7 +1184,7 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
return tegra_bpmp_transfer(pcie->bpmp, &msg);
}
-static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
+static int tegra_pcie_bpmp_set_pll_state(struct tegra194_pcie *pcie,
bool enable)
{
struct mrq_uphy_response resp;
@@ -1212,7 +1212,7 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
return tegra_bpmp_transfer(pcie->bpmp, &msg);
}
-static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
+static void tegra_pcie_downstream_dev_to_D0(struct tegra194_pcie *pcie)
{
struct pcie_port *pp = &pcie->pci.pp;
struct pci_bus *child, *root_bus = NULL;
@@ -1250,7 +1250,7 @@ static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
}
}
-static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
+static int tegra_pcie_get_slot_regulators(struct tegra194_pcie *pcie)
{
pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");
if (IS_ERR(pcie->slot_ctl_3v3)) {
@@ -1271,7 +1271,7 @@ static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
return 0;
}
-static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie)
+static int tegra_pcie_enable_slot_regulators(struct tegra194_pcie *pcie)
{
int ret;
@@ -1309,7 +1309,7 @@ fail_12v_enable:
return ret;
}
-static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)
+static void tegra_pcie_disable_slot_regulators(struct tegra194_pcie *pcie)
{
if (pcie->slot_ctl_12v)
regulator_disable(pcie->slot_ctl_12v);
@@ -1317,7 +1317,7 @@ static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)
regulator_disable(pcie->slot_ctl_3v3);
}
-static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
+static int tegra_pcie_config_controller(struct tegra194_pcie *pcie,
bool en_hw_hot_rst)
{
int ret;
@@ -1414,7 +1414,7 @@ fail_slot_reg_en:
return ret;
}
-static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie)
+static void tegra_pcie_unconfig_controller(struct tegra194_pcie *pcie)
{
int ret;
@@ -1442,7 +1442,7 @@ static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie)
pcie->cid, ret);
}
-static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
+static int tegra_pcie_init_controller(struct tegra194_pcie *pcie)
{
struct dw_pcie *pci = &pcie->pci;
struct pcie_port *pp = &pci->pp;
@@ -1452,7 +1452,7 @@ static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
if (ret < 0)
return ret;
- pp->ops = &tegra_pcie_dw_host_ops;
+ pp->ops = &tegra194_pcie_host_ops;
ret = dw_pcie_host_init(pp);
if (ret < 0) {
@@ -1467,11 +1467,11 @@ fail_host_init:
return ret;
}
-static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
+static int tegra_pcie_try_link_l2(struct tegra194_pcie *pcie)
{
u32 val;
- if (!tegra_pcie_dw_link_up(&pcie->pci))
+ if (!tegra194_pcie_link_up(&pcie->pci))
return 0;
val = appl_readl(pcie, APPL_RADM_STATUS);
@@ -1483,12 +1483,12 @@ static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
1, PME_ACK_TIMEOUT);
}
-static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
+static void tegra194_pcie_pme_turnoff(struct tegra194_pcie *pcie)
{
u32 data;
int err;
- if (!tegra_pcie_dw_link_up(&pcie->pci)) {
+ if (!tegra194_pcie_link_up(&pcie->pci)) {
dev_dbg(pcie->dev, "PCIe link is not up...!\n");
return;
}
@@ -1545,15 +1545,15 @@ static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
appl_writel(pcie, data, APPL_PINMUX);
}
-static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
+static void tegra_pcie_deinit_controller(struct tegra194_pcie *pcie)
{
tegra_pcie_downstream_dev_to_D0(pcie);
dw_pcie_host_deinit(&pcie->pci.pp);
- tegra_pcie_dw_pme_turnoff(pcie);
+ tegra194_pcie_pme_turnoff(pcie);
tegra_pcie_unconfig_controller(pcie);
}
-static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
+static int tegra_pcie_config_rp(struct tegra194_pcie *pcie)
{
struct device *dev = pcie->dev;
char *name;
@@ -1580,7 +1580,7 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
goto fail_pm_get_sync;
}
- pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
+ pcie->link_state = tegra194_pcie_link_up(&pcie->pci);
if (!pcie->link_state) {
ret = -ENOMEDIUM;
goto fail_host_init;
@@ -1605,7 +1605,7 @@ fail_pm_get_sync:
return ret;
}
-static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
+static void pex_ep_event_pex_rst_assert(struct tegra194_pcie *pcie)
{
u32 val;
int ret;
@@ -1644,7 +1644,7 @@ static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
}
-static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
+static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie)
{
struct dw_pcie *pci = &pcie->pci;
struct dw_pcie_ep *ep = &pci->ep;
@@ -1809,7 +1809,7 @@ fail_pll_init:
static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
{
- struct tegra_pcie_dw *pcie = arg;
+ struct tegra194_pcie *pcie = arg;
if (gpiod_get_value(pcie->pex_rst_gpiod))
pex_ep_event_pex_rst_assert(pcie);
@@ -1819,7 +1819,7 @@ static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
+static int tegra_pcie_ep_raise_legacy_irq(struct tegra194_pcie *pcie, u16 irq)
{
/* Tegra194 supports only INTA */
if (irq > 1)
@@ -1831,7 +1831,7 @@ static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
return 0;
}
-static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
+static int tegra_pcie_ep_raise_msi_irq(struct tegra194_pcie *pcie, u16 irq)
{
if (unlikely(irq > 31))
return -EINVAL;
@@ -1841,7 +1841,7 @@ static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
return 0;
}
-static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
+static int tegra_pcie_ep_raise_msix_irq(struct tegra194_pcie *pcie, u16 irq)
{
struct dw_pcie_ep *ep = &pcie->pci.ep;
@@ -1855,7 +1855,7 @@ static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct tegra194_pcie *pcie = to_tegra_pcie(pci);
switch (type) {
case PCI_EPC_IRQ_LEGACY:
@@ -1896,7 +1896,7 @@ static const struct dw_pcie_ep_ops pcie_ep_ops = {
.get_features = tegra_pcie_ep_get_features,
};
-static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
+static int tegra_pcie_config_ep(struct tegra194_pcie *pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = &pcie->pci;
@@ -1957,12 +1957,12 @@ static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
return 0;
}
-static int tegra_pcie_dw_probe(struct platform_device *pdev)
+static int tegra194_pcie_probe(struct platform_device *pdev)
{
- const struct tegra_pcie_dw_of_data *data;
+ const struct tegra194_pcie_of_data *data;
struct device *dev = &pdev->dev;
struct resource *atu_dma_res;
- struct tegra_pcie_dw *pcie;
+ struct tegra194_pcie *pcie;
struct pcie_port *pp;
struct dw_pcie *pci;
struct phy **phys;
@@ -1988,7 +1988,7 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
pcie->dev = &pdev->dev;
pcie->mode = (enum dw_pcie_device_mode)data->mode;
- ret = tegra_pcie_dw_parse_dt(pcie);
+ ret = tegra194_pcie_parse_dt(pcie);
if (ret < 0) {
const char *level = KERN_ERR;
@@ -2146,9 +2146,9 @@ fail:
return ret;
}
-static int tegra_pcie_dw_remove(struct platform_device *pdev)
+static int tegra194_pcie_remove(struct platform_device *pdev)
{
- struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
+ struct tegra194_pcie *pcie = platform_get_drvdata(pdev);
if (!pcie->link_state)
return 0;
@@ -2164,9 +2164,9 @@ static int tegra_pcie_dw_remove(struct platform_device *pdev)
return 0;
}
-static int tegra_pcie_dw_suspend_late(struct device *dev)
+static int tegra194_pcie_suspend_late(struct device *dev)
{
- struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+ struct tegra194_pcie *pcie = dev_get_drvdata(dev);
u32 val;
if (!pcie->link_state)
@@ -2182,9 +2182,9 @@ static int tegra_pcie_dw_suspend_late(struct device *dev)
return 0;
}
-static int tegra_pcie_dw_suspend_noirq(struct device *dev)
+static int tegra194_pcie_suspend_noirq(struct device *dev)
{
- struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+ struct tegra194_pcie *pcie = dev_get_drvdata(dev);
if (!pcie->link_state)
return 0;
@@ -2193,15 +2193,15 @@ static int tegra_pcie_dw_suspend_noirq(struct device *dev)
pcie->msi_ctrl_int = dw_pcie_readl_dbi(&pcie->pci,
PORT_LOGIC_MSI_CTRL_INT_0_EN);
tegra_pcie_downstream_dev_to_D0(pcie);
- tegra_pcie_dw_pme_turnoff(pcie);
+ tegra194_pcie_pme_turnoff(pcie);
tegra_pcie_unconfig_controller(pcie);
return 0;
}
-static int tegra_pcie_dw_resume_noirq(struct device *dev)
+static int tegra194_pcie_resume_noirq(struct device *dev)
{
- struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+ struct tegra194_pcie *pcie = dev_get_drvdata(dev);
int ret;
if (!pcie->link_state)
@@ -2211,7 +2211,7 @@ static int tegra_pcie_dw_resume_noirq(struct device *dev)
if (ret < 0)
return ret;
- ret = tegra_pcie_dw_host_init(&pcie->pci.pp);
+ ret = tegra194_pcie_host_init(&pcie->pci.pp);
if (ret < 0) {
dev_err(dev, "Failed to init host: %d\n", ret);
goto fail_host_init;
@@ -2219,7 +2219,7 @@ static int tegra_pcie_dw_resume_noirq(struct device *dev)
dw_pcie_setup_rc(&pcie->pci.pp);
- ret = tegra_pcie_dw_start_link(&pcie->pci);
+ ret = tegra194_pcie_start_link(&pcie->pci);
if (ret < 0)
goto fail_host_init;
@@ -2234,9 +2234,9 @@ fail_host_init:
return ret;
}
-static int tegra_pcie_dw_resume_early(struct device *dev)
+static int tegra194_pcie_resume_early(struct device *dev)
{
- struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+ struct tegra194_pcie *pcie = dev_get_drvdata(dev);
u32 val;
if (pcie->mode == DW_PCIE_EP_TYPE) {
@@ -2259,9 +2259,9 @@ static int tegra_pcie_dw_resume_early(struct device *dev)
return 0;
}
-static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
+static void tegra194_pcie_shutdown(struct platform_device *pdev)
{
- struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
+ struct tegra194_pcie *pcie = platform_get_drvdata(pdev);
if (!pcie->link_state)
return;
@@ -2273,50 +2273,50 @@ static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
if (IS_ENABLED(CONFIG_PCI_MSI))
disable_irq(pcie->pci.pp.msi_irq);
- tegra_pcie_dw_pme_turnoff(pcie);
+ tegra194_pcie_pme_turnoff(pcie);
tegra_pcie_unconfig_controller(pcie);
}
-static const struct tegra_pcie_dw_of_data tegra_pcie_dw_rc_of_data = {
+static const struct tegra194_pcie_of_data tegra194_pcie_rc_of_data = {
.mode = DW_PCIE_RC_TYPE,
};
-static const struct tegra_pcie_dw_of_data tegra_pcie_dw_ep_of_data = {
+static const struct tegra194_pcie_of_data tegra194_pcie_ep_of_data = {
.mode = DW_PCIE_EP_TYPE,
};
-static const struct of_device_id tegra_pcie_dw_of_match[] = {
+static const struct of_device_id tegra194_pcie_of_match[] = {
{
.compatible = "nvidia,tegra194-pcie",
- .data = &tegra_pcie_dw_rc_of_data,
+ .data = &tegra194_pcie_rc_of_data,
},
{
.compatible = "nvidia,tegra194-pcie-ep",
- .data = &tegra_pcie_dw_ep_of_data,
+ .data = &tegra194_pcie_ep_of_data,
},
{},
};
-static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
- .suspend_late = tegra_pcie_dw_suspend_late,
- .suspend_noirq = tegra_pcie_dw_suspend_noirq,
- .resume_noirq = tegra_pcie_dw_resume_noirq,
- .resume_early = tegra_pcie_dw_resume_early,
+static const struct dev_pm_ops tegra194_pcie_pm_ops = {
+ .suspend_late = tegra194_pcie_suspend_late,
+ .suspend_noirq = tegra194_pcie_suspend_noirq,
+ .resume_noirq = tegra194_pcie_resume_noirq,
+ .resume_early = tegra194_pcie_resume_early,
};
-static struct platform_driver tegra_pcie_dw_driver = {
- .probe = tegra_pcie_dw_probe,
- .remove = tegra_pcie_dw_remove,
- .shutdown = tegra_pcie_dw_shutdown,
+static struct platform_driver tegra194_pcie_driver = {
+ .probe = tegra194_pcie_probe,
+ .remove = tegra194_pcie_remove,
+ .shutdown = tegra194_pcie_shutdown,
.driver = {
.name = "tegra194-pcie",
- .pm = &tegra_pcie_dw_pm_ops,
- .of_match_table = tegra_pcie_dw_of_match,
+ .pm = &tegra194_pcie_pm_ops,
+ .of_match_table = tegra194_pcie_of_match,
},
};
-module_platform_driver(tegra_pcie_dw_driver);
+module_platform_driver(tegra194_pcie_driver);
-MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
+MODULE_DEVICE_TABLE(of, tegra194_pcie_of_match);
MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index d05be942956e..b45ac3754242 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -61,9 +61,9 @@
#define PCL_RDLH_LINK_UP BIT(1)
#define PCL_XMLH_LINK_UP BIT(0)
-struct uniphier_pcie_priv {
- void __iomem *base;
+struct uniphier_pcie {
struct dw_pcie pci;
+ void __iomem *base;
struct clk *clk;
struct reset_control *rst;
struct phy *phy;
@@ -72,62 +72,62 @@ struct uniphier_pcie_priv {
#define to_uniphier_pcie(x) dev_get_drvdata((x)->dev)
-static void uniphier_pcie_ltssm_enable(struct uniphier_pcie_priv *priv,
+static void uniphier_pcie_ltssm_enable(struct uniphier_pcie *pcie,
bool enable)
{
u32 val;
- val = readl(priv->base + PCL_APP_READY_CTRL);
+ val = readl(pcie->base + PCL_APP_READY_CTRL);
if (enable)
val |= PCL_APP_LTSSM_ENABLE;
else
val &= ~PCL_APP_LTSSM_ENABLE;
- writel(val, priv->base + PCL_APP_READY_CTRL);
+ writel(val, pcie->base + PCL_APP_READY_CTRL);
}
-static void uniphier_pcie_init_rc(struct uniphier_pcie_priv *priv)
+static void uniphier_pcie_init_rc(struct uniphier_pcie *pcie)
{
u32 val;
/* set RC MODE */
- val = readl(priv->base + PCL_MODE);
+ val = readl(pcie->base + PCL_MODE);
val |= PCL_MODE_REGEN;
val &= ~PCL_MODE_REGVAL;
- writel(val, priv->base + PCL_MODE);
+ writel(val, pcie->base + PCL_MODE);
/* use auxiliary power detection */
- val = readl(priv->base + PCL_APP_PM0);
+ val = readl(pcie->base + PCL_APP_PM0);
val |= PCL_SYS_AUX_PWR_DET;
- writel(val, priv->base + PCL_APP_PM0);
+ writel(val, pcie->base + PCL_APP_PM0);
/* assert PERST# */
- val = readl(priv->base + PCL_PINCTRL0);
+ val = readl(pcie->base + PCL_PINCTRL0);
val &= ~(PCL_PERST_NOE_REGVAL | PCL_PERST_OUT_REGVAL
| PCL_PERST_PLDN_REGVAL);
val |= PCL_PERST_NOE_REGEN | PCL_PERST_OUT_REGEN
| PCL_PERST_PLDN_REGEN;
- writel(val, priv->base + PCL_PINCTRL0);
+ writel(val, pcie->base + PCL_PINCTRL0);
- uniphier_pcie_ltssm_enable(priv, false);
+ uniphier_pcie_ltssm_enable(pcie, false);
usleep_range(100000, 200000);
/* deassert PERST# */
- val = readl(priv->base + PCL_PINCTRL0);
+ val = readl(pcie->base + PCL_PINCTRL0);
val |= PCL_PERST_OUT_REGVAL | PCL_PERST_OUT_REGEN;
- writel(val, priv->base + PCL_PINCTRL0);
+ writel(val, pcie->base + PCL_PINCTRL0);
}
-static int uniphier_pcie_wait_rc(struct uniphier_pcie_priv *priv)
+static int uniphier_pcie_wait_rc(struct uniphier_pcie *pcie)
{
u32 status;
int ret;
/* wait PIPE clock */
- ret = readl_poll_timeout(priv->base + PCL_PIPEMON, status,
+ ret = readl_poll_timeout(pcie->base + PCL_PIPEMON, status,
status & PCL_PCLK_ALIVE, 100000, 1000000);
if (ret) {
- dev_err(priv->pci.dev,
+ dev_err(pcie->pci.dev,
"Failed to initialize controller in RC mode\n");
return ret;
}
@@ -137,10 +137,10 @@ static int uniphier_pcie_wait_rc(struct uniphier_pcie_priv *priv)
static int uniphier_pcie_link_up(struct dw_pcie *pci)
{
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
u32 val, mask;
- val = readl(priv->base + PCL_STATUS_LINK);
+ val = readl(pcie->base + PCL_STATUS_LINK);
mask = PCL_RDLH_LINK_UP | PCL_XMLH_LINK_UP;
return (val & mask) == mask;
@@ -148,39 +148,40 @@ static int uniphier_pcie_link_up(struct dw_pcie *pci)
static int uniphier_pcie_start_link(struct dw_pcie *pci)
{
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
- uniphier_pcie_ltssm_enable(priv, true);
+ uniphier_pcie_ltssm_enable(pcie, true);
return 0;
}
static void uniphier_pcie_stop_link(struct dw_pcie *pci)
{
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
- uniphier_pcie_ltssm_enable(priv, false);
+ uniphier_pcie_ltssm_enable(pcie, false);
}
-static void uniphier_pcie_irq_enable(struct uniphier_pcie_priv *priv)
+static void uniphier_pcie_irq_enable(struct uniphier_pcie *pcie)
{
- writel(PCL_RCV_INT_ALL_ENABLE, priv->base + PCL_RCV_INT);
- writel(PCL_RCV_INTX_ALL_ENABLE, priv->base + PCL_RCV_INTX);
+ writel(PCL_RCV_INT_ALL_ENABLE, pcie->base + PCL_RCV_INT);
+ writel(PCL_RCV_INTX_ALL_ENABLE, pcie->base + PCL_RCV_INTX);
}
+
static void uniphier_pcie_irq_mask(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
unsigned long flags;
u32 val;
raw_spin_lock_irqsave(&pp->lock, flags);
- val = readl(priv->base + PCL_RCV_INTX);
+ val = readl(pcie->base + PCL_RCV_INTX);
val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
- writel(val, priv->base + PCL_RCV_INTX);
+ writel(val, pcie->base + PCL_RCV_INTX);
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
@@ -189,15 +190,15 @@ static void uniphier_pcie_irq_unmask(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
unsigned long flags;
u32 val;
raw_spin_lock_irqsave(&pp->lock, flags);
- val = readl(priv->base + PCL_RCV_INTX);
+ val = readl(pcie->base + PCL_RCV_INTX);
val &= ~BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
- writel(val, priv->base + PCL_RCV_INTX);
+ writel(val, pcie->base + PCL_RCV_INTX);
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
@@ -226,13 +227,13 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
{
struct pcie_port *pp = irq_desc_get_handler_data(desc);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long reg;
u32 val, bit;
/* INT for debug */
- val = readl(priv->base + PCL_RCV_INT);
+ val = readl(pcie->base + PCL_RCV_INT);
if (val & PCL_CFG_BW_MGT_STATUS)
dev_dbg(pci->dev, "Link Bandwidth Management Event\n");
@@ -243,16 +244,16 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
if (val & PCL_CFG_PME_MSI_STATUS)
dev_dbg(pci->dev, "PME Interrupt\n");
- writel(val, priv->base + PCL_RCV_INT);
+ writel(val, pcie->base + PCL_RCV_INT);
/* INTx */
chained_irq_enter(chip, desc);
- val = readl(priv->base + PCL_RCV_INTX);
+ val = readl(pcie->base + PCL_RCV_INTX);
reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val);
for_each_set_bit(bit, &reg, PCI_NUM_INTX)
- generic_handle_domain_irq(priv->legacy_irq_domain, bit);
+ generic_handle_domain_irq(pcie->legacy_irq_domain, bit);
chained_irq_exit(chip, desc);
}
@@ -260,7 +261,7 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
struct device_node *np = pci->dev->of_node;
struct device_node *np_intc;
int ret = 0;
@@ -278,9 +279,9 @@ static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp)
goto out_put_node;
}
- priv->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
+ pcie->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
&uniphier_intx_domain_ops, pp);
- if (!priv->legacy_irq_domain) {
+ if (!pcie->legacy_irq_domain) {
dev_err(pci->dev, "Failed to get INTx domain\n");
ret = -ENODEV;
goto out_put_node;
@@ -297,14 +298,14 @@ out_put_node:
static int uniphier_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
int ret;
ret = uniphier_pcie_config_legacy_irq(pp);
if (ret)
return ret;
- uniphier_pcie_irq_enable(priv);
+ uniphier_pcie_irq_enable(pcie);
return 0;
}
@@ -313,36 +314,36 @@ static const struct dw_pcie_host_ops uniphier_pcie_host_ops = {
.host_init = uniphier_pcie_host_init,
};
-static int uniphier_pcie_host_enable(struct uniphier_pcie_priv *priv)
+static int uniphier_pcie_host_enable(struct uniphier_pcie *pcie)
{
int ret;
- ret = clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(pcie->clk);
if (ret)
return ret;
- ret = reset_control_deassert(priv->rst);
+ ret = reset_control_deassert(pcie->rst);
if (ret)
goto out_clk_disable;
- uniphier_pcie_init_rc(priv);
+ uniphier_pcie_init_rc(pcie);
- ret = phy_init(priv->phy);
+ ret = phy_init(pcie->phy);
if (ret)
goto out_rst_assert;
- ret = uniphier_pcie_wait_rc(priv);
+ ret = uniphier_pcie_wait_rc(pcie);
if (ret)
goto out_phy_exit;
return 0;
out_phy_exit:
- phy_exit(priv->phy);
+ phy_exit(pcie->phy);
out_rst_assert:
- reset_control_assert(priv->rst);
+ reset_control_assert(pcie->rst);
out_clk_disable:
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(pcie->clk);
return ret;
}
@@ -356,41 +357,41 @@ static const struct dw_pcie_ops dw_pcie_ops = {
static int uniphier_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct uniphier_pcie_priv *priv;
+ struct uniphier_pcie *pcie;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
return -ENOMEM;
- priv->pci.dev = dev;
- priv->pci.ops = &dw_pcie_ops;
+ pcie->pci.dev = dev;
+ pcie->pci.ops = &dw_pcie_ops;
- priv->base = devm_platform_ioremap_resource_byname(pdev, "link");
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
+ pcie->base = devm_platform_ioremap_resource_byname(pdev, "link");
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
- priv->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
+ pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pcie->clk))
+ return PTR_ERR(pcie->clk);
- priv->rst = devm_reset_control_get_shared(dev, NULL);
- if (IS_ERR(priv->rst))
- return PTR_ERR(priv->rst);
+ pcie->rst = devm_reset_control_get_shared(dev, NULL);
+ if (IS_ERR(pcie->rst))
+ return PTR_ERR(pcie->rst);
- priv->phy = devm_phy_optional_get(dev, "pcie-phy");
- if (IS_ERR(priv->phy))
- return PTR_ERR(priv->phy);
+ pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
+ if (IS_ERR(pcie->phy))
+ return PTR_ERR(pcie->phy);
- platform_set_drvdata(pdev, priv);
+ platform_set_drvdata(pdev, pcie);
- ret = uniphier_pcie_host_enable(priv);
+ ret = uniphier_pcie_host_enable(pcie);
if (ret)
return ret;
- priv->pci.pp.ops = &uniphier_pcie_host_ops;
+ pcie->pci.pp.ops = &uniphier_pcie_host_ops;
- return dw_pcie_host_init(&priv->pci.pp);
+ return dw_pcie_host_init(&pcie->pci.pp);
}
static const struct of_device_id uniphier_pcie_match[] = {
diff --git a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
index 306950272fd6..d7b7350f02dd 100644
--- a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
+++ b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
@@ -34,31 +34,31 @@
#define PF_DBG_WE BIT(31)
#define PF_DBG_PABR BIT(27)
-#define to_ls_pcie_g4(x) platform_get_drvdata((x)->pdev)
+#define to_ls_g4_pcie(x) platform_get_drvdata((x)->pdev)
-struct ls_pcie_g4 {
+struct ls_g4_pcie {
struct mobiveil_pcie pci;
struct delayed_work dwork;
int irq;
};
-static inline u32 ls_pcie_g4_pf_readl(struct ls_pcie_g4 *pcie, u32 off)
+static inline u32 ls_g4_pcie_pf_readl(struct ls_g4_pcie *pcie, u32 off)
{
return ioread32(pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
}
-static inline void ls_pcie_g4_pf_writel(struct ls_pcie_g4 *pcie,
+static inline void ls_g4_pcie_pf_writel(struct ls_g4_pcie *pcie,
u32 off, u32 val)
{
iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
}
-static int ls_pcie_g4_link_up(struct mobiveil_pcie *pci)
+static int ls_g4_pcie_link_up(struct mobiveil_pcie *pci)
{
- struct ls_pcie_g4 *pcie = to_ls_pcie_g4(pci);
+ struct ls_g4_pcie *pcie = to_ls_g4_pcie(pci);
u32 state;
- state = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
+ state = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
state = state & PF_DBG_LTSSM_MASK;
if (state == PF_DBG_LTSSM_L0)
@@ -67,14 +67,14 @@ static int ls_pcie_g4_link_up(struct mobiveil_pcie *pci)
return 0;
}
-static void ls_pcie_g4_disable_interrupt(struct ls_pcie_g4 *pcie)
+static void ls_g4_pcie_disable_interrupt(struct ls_g4_pcie *pcie)
{
struct mobiveil_pcie *mv_pci = &pcie->pci;
mobiveil_csr_writel(mv_pci, 0, PAB_INTP_AMBA_MISC_ENB);
}
-static void ls_pcie_g4_enable_interrupt(struct ls_pcie_g4 *pcie)
+static void ls_g4_pcie_enable_interrupt(struct ls_g4_pcie *pcie)
{
struct mobiveil_pcie *mv_pci = &pcie->pci;
u32 val;
@@ -87,7 +87,7 @@ static void ls_pcie_g4_enable_interrupt(struct ls_pcie_g4 *pcie)
mobiveil_csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_ENB);
}
-static int ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie)
+static int ls_g4_pcie_reinit_hw(struct ls_g4_pcie *pcie)
{
struct mobiveil_pcie *mv_pci = &pcie->pci;
struct device *dev = &mv_pci->pdev->dev;
@@ -97,7 +97,7 @@ static int ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie)
/* Poll for pab_csb_reset to set and PAB activity to clear */
do {
usleep_range(10, 15);
- val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_INT_STAT);
+ val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_INT_STAT);
act_stat = mobiveil_csr_readl(mv_pci, PAB_ACTIVITY_STAT);
} while (((val & PF_INT_STAT_PABRST) == 0 || act_stat) && to--);
if (to < 0) {
@@ -106,22 +106,22 @@ static int ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie)
}
/* clear PEX_RESET bit in PEX_PF0_DBG register */
- val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
+ val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
val |= PF_DBG_WE;
- ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
+ ls_g4_pcie_pf_writel(pcie, PCIE_PF_DBG, val);
- val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
+ val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
val |= PF_DBG_PABR;
- ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
+ ls_g4_pcie_pf_writel(pcie, PCIE_PF_DBG, val);
- val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
+ val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
val &= ~PF_DBG_WE;
- ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
+ ls_g4_pcie_pf_writel(pcie, PCIE_PF_DBG, val);
mobiveil_host_init(mv_pci, true);
to = 100;
- while (!ls_pcie_g4_link_up(mv_pci) && to--)
+ while (!ls_g4_pcie_link_up(mv_pci) && to--)
usleep_range(200, 250);
if (to < 0) {
dev_err(dev, "PCIe link training timeout\n");
@@ -131,9 +131,9 @@ static int ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie)
return 0;
}
-static irqreturn_t ls_pcie_g4_isr(int irq, void *dev_id)
+static irqreturn_t ls_g4_pcie_isr(int irq, void *dev_id)
{
- struct ls_pcie_g4 *pcie = (struct ls_pcie_g4 *)dev_id;
+ struct ls_g4_pcie *pcie = (struct ls_g4_pcie *)dev_id;
struct mobiveil_pcie *mv_pci = &pcie->pci;
u32 val;
@@ -142,7 +142,7 @@ static irqreturn_t ls_pcie_g4_isr(int irq, void *dev_id)
return IRQ_NONE;
if (val & PAB_INTP_RESET) {
- ls_pcie_g4_disable_interrupt(pcie);
+ ls_g4_pcie_disable_interrupt(pcie);
schedule_delayed_work(&pcie->dwork, msecs_to_jiffies(1));
}
@@ -151,9 +151,9 @@ static irqreturn_t ls_pcie_g4_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int ls_pcie_g4_interrupt_init(struct mobiveil_pcie *mv_pci)
+static int ls_g4_pcie_interrupt_init(struct mobiveil_pcie *mv_pci)
{
- struct ls_pcie_g4 *pcie = to_ls_pcie_g4(mv_pci);
+ struct ls_g4_pcie *pcie = to_ls_g4_pcie(mv_pci);
struct platform_device *pdev = mv_pci->pdev;
struct device *dev = &pdev->dev;
int ret;
@@ -162,7 +162,7 @@ static int ls_pcie_g4_interrupt_init(struct mobiveil_pcie *mv_pci)
if (pcie->irq < 0)
return pcie->irq;
- ret = devm_request_irq(dev, pcie->irq, ls_pcie_g4_isr,
+ ret = devm_request_irq(dev, pcie->irq, ls_g4_pcie_isr,
IRQF_SHARED, pdev->name, pcie);
if (ret) {
dev_err(dev, "Can't register PCIe IRQ, errno = %d\n", ret);
@@ -172,11 +172,11 @@ static int ls_pcie_g4_interrupt_init(struct mobiveil_pcie *mv_pci)
return 0;
}
-static void ls_pcie_g4_reset(struct work_struct *work)
+static void ls_g4_pcie_reset(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work, struct delayed_work,
work);
- struct ls_pcie_g4 *pcie = container_of(dwork, struct ls_pcie_g4, dwork);
+ struct ls_g4_pcie *pcie = container_of(dwork, struct ls_g4_pcie, dwork);
struct mobiveil_pcie *mv_pci = &pcie->pci;
u16 ctrl;
@@ -184,26 +184,26 @@ static void ls_pcie_g4_reset(struct work_struct *work)
ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
mobiveil_csr_writew(mv_pci, ctrl, PCI_BRIDGE_CONTROL);
- if (!ls_pcie_g4_reinit_hw(pcie))
+ if (!ls_g4_pcie_reinit_hw(pcie))
return;
- ls_pcie_g4_enable_interrupt(pcie);
+ ls_g4_pcie_enable_interrupt(pcie);
}
-static struct mobiveil_rp_ops ls_pcie_g4_rp_ops = {
- .interrupt_init = ls_pcie_g4_interrupt_init,
+static struct mobiveil_rp_ops ls_g4_pcie_rp_ops = {
+ .interrupt_init = ls_g4_pcie_interrupt_init,
};
-static const struct mobiveil_pab_ops ls_pcie_g4_pab_ops = {
- .link_up = ls_pcie_g4_link_up,
+static const struct mobiveil_pab_ops ls_g4_pcie_pab_ops = {
+ .link_up = ls_g4_pcie_link_up,
};
-static int __init ls_pcie_g4_probe(struct platform_device *pdev)
+static int __init ls_g4_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct pci_host_bridge *bridge;
struct mobiveil_pcie *mv_pci;
- struct ls_pcie_g4 *pcie;
+ struct ls_g4_pcie *pcie;
struct device_node *np = dev->of_node;
int ret;
@@ -220,13 +220,13 @@ static int __init ls_pcie_g4_probe(struct platform_device *pdev)
mv_pci = &pcie->pci;
mv_pci->pdev = pdev;
- mv_pci->ops = &ls_pcie_g4_pab_ops;
- mv_pci->rp.ops = &ls_pcie_g4_rp_ops;
+ mv_pci->ops = &ls_g4_pcie_pab_ops;
+ mv_pci->rp.ops = &ls_g4_pcie_rp_ops;
mv_pci->rp.bridge = bridge;
platform_set_drvdata(pdev, pcie);
- INIT_DELAYED_WORK(&pcie->dwork, ls_pcie_g4_reset);
+ INIT_DELAYED_WORK(&pcie->dwork, ls_g4_pcie_reset);
ret = mobiveil_pcie_host_probe(mv_pci);
if (ret) {
@@ -234,22 +234,22 @@ static int __init ls_pcie_g4_probe(struct platform_device *pdev)
return ret;
}
- ls_pcie_g4_enable_interrupt(pcie);
+ ls_g4_pcie_enable_interrupt(pcie);
return 0;
}
-static const struct of_device_id ls_pcie_g4_of_match[] = {
+static const struct of_device_id ls_g4_pcie_of_match[] = {
{ .compatible = "fsl,lx2160a-pcie", },
{ },
};
-static struct platform_driver ls_pcie_g4_driver = {
+static struct platform_driver ls_g4_pcie_driver = {
.driver = {
.name = "layerscape-pcie-gen4",
- .of_match_table = ls_pcie_g4_of_match,
+ .of_match_table = ls_g4_pcie_of_match,
.suppress_bind_attrs = true,
},
};
-builtin_platform_driver_probe(ls_pcie_g4_driver, ls_pcie_g4_probe);
+builtin_platform_driver_probe(ls_g4_pcie_driver, ls_g4_pcie_probe);
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index c3b725afa11f..4f5b44827d21 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -115,6 +115,7 @@
#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
#define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
+#define PCIE_MSI_ALL_MASK GENMASK(31, 0)
#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
#define PCIE_MSI_DATA_MASK GENMASK(15, 0)
@@ -570,6 +571,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
/* Clear all interrupts */
+ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG);
advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
@@ -582,7 +584,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
/* Unmask all MSIs */
- advk_writel(pcie, 0, PCIE_MSI_MASK_REG);
+ advk_writel(pcie, ~(u32)PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
/* Enable summary interrupt for GIC SPI source */
reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
@@ -872,11 +874,15 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
return PCI_BRIDGE_EMUL_HANDLED;
}
- case PCI_CAP_LIST_ID:
case PCI_EXP_DEVCAP:
case PCI_EXP_DEVCTL:
+ case PCI_EXP_DEVCAP2:
+ case PCI_EXP_DEVCTL2:
+ case PCI_EXP_LNKCAP2:
+ case PCI_EXP_LNKCTL2:
*value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
return PCI_BRIDGE_EMUL_HANDLED;
+
default:
return PCI_BRIDGE_EMUL_NOT_HANDLED;
}
@@ -890,10 +896,6 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
struct advk_pcie *pcie = bridge->data;
switch (reg) {
- case PCI_EXP_DEVCTL:
- advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
- break;
-
case PCI_EXP_LNKCTL:
advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
if (new & PCI_EXP_LNKCTL_RL)
@@ -915,6 +917,12 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
advk_writel(pcie, new, PCIE_ISR0_REG);
break;
+ case PCI_EXP_DEVCTL:
+ case PCI_EXP_DEVCTL2:
+ case PCI_EXP_LNKCTL2:
+ advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
+ break;
+
default:
break;
}
@@ -953,6 +961,9 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
/* Support interrupt A for MSI feature */
bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
+ /* Aardvark HW provides PCIe Capability structure in version 2 */
+ bridge->pcie_conf.cap = cpu_to_le16(2);
+
/* Indicates supports for Completion Retry Status */
bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
@@ -1017,10 +1028,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
u32 reg;
int ret;
- if (!advk_pcie_valid_device(pcie, bus, devfn)) {
- *val = 0xffffffff;
+ if (!advk_pcie_valid_device(pcie, bus, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
- }
if (pci_is_root_bus(bus))
return pci_bridge_emul_conf_read(&pcie->bridge, where,
@@ -1383,7 +1392,7 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie)
msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG);
- msi_status = msi_val & ~msi_mask;
+ msi_status = msi_val & ((~msi_mask) & PCIE_MSI_ALL_MASK);
for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) {
if (!(BIT(msi_idx) & msi_status))
@@ -1535,8 +1544,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
* only PIO for issuing configuration transfers which does
* not use PCIe window configuration.
*/
- if (type != IORESOURCE_MEM && type != IORESOURCE_MEM_64 &&
- type != IORESOURCE_IO)
+ if (type != IORESOURCE_MEM && type != IORESOURCE_IO)
continue;
/*
@@ -1544,8 +1552,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
* configuration is set to transparent memory access so it
* does not need window configuration.
*/
- if ((type == IORESOURCE_MEM || type == IORESOURCE_MEM_64) &&
- entry->offset == 0)
+ if (type == IORESOURCE_MEM && entry->offset == 0)
continue;
/*
@@ -1677,20 +1684,64 @@ static int advk_pcie_remove(struct platform_device *pdev)
{
struct advk_pcie *pcie = platform_get_drvdata(pdev);
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ u32 val;
int i;
+ /* Remove PCI bus with all devices */
pci_lock_rescan_remove();
pci_stop_root_bus(bridge->bus);
pci_remove_root_bus(bridge->bus);
pci_unlock_rescan_remove();
+ /* Disable Root Bridge I/O space, memory space and bus mastering */
+ val = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
+ val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ advk_writel(pcie, val, PCIE_CORE_CMD_STATUS_REG);
+
+ /* Disable MSI */
+ val = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
+ val &= ~PCIE_CORE_CTRL2_MSI_ENABLE;
+ advk_writel(pcie, val, PCIE_CORE_CTRL2_REG);
+
+ /* Clear MSI address */
+ advk_writel(pcie, 0, PCIE_MSI_ADDR_LOW_REG);
+ advk_writel(pcie, 0, PCIE_MSI_ADDR_HIGH_REG);
+
+ /* Mask all interrupts */
+ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
+ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG);
+ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
+ advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_MASK_REG);
+
+ /* Clear all interrupts */
+ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG);
+ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
+ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
+ advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
+
+ /* Remove IRQ domains */
advk_pcie_remove_msi_irq_domain(pcie);
advk_pcie_remove_irq_domain(pcie);
+ /* Free config space for emulated root bridge */
+ pci_bridge_emul_cleanup(&pcie->bridge);
+
+ /* Assert PERST# signal which prepares PCIe card for power down */
+ if (pcie->reset_gpio)
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+
+ /* Disable link training */
+ val = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ val &= ~LINK_TRAINING_EN;
+ advk_writel(pcie, val, PCIE_CORE_CTRL0_REG);
+
/* Disable outbound address windows mapping */
for (i = 0; i < OB_WIN_COUNT; i++)
advk_pcie_disable_ob_win(pcie, i);
+ /* Disable phy */
+ advk_pcie_disable_phy(pcie);
+
return 0;
}
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 6733cb14e775..20ea2ee330b8 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -43,13 +43,12 @@
#include <linux/pci-ecam.h>
#include <linux/delay.h>
#include <linux/semaphore.h>
-#include <linux/irqdomain.h>
-#include <asm/irqdomain.h>
-#include <asm/apic.h>
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/hyperv.h>
#include <linux/refcount.h>
+#include <linux/irqdomain.h>
+#include <linux/acpi.h>
#include <asm/mshyperv.h>
/*
@@ -583,6 +582,265 @@ struct hv_pci_compl {
static void hv_pci_onchannelcallback(void *context);
+#ifdef CONFIG_X86
+#define DELIVERY_MODE APIC_DELIVERY_MODE_FIXED
+#define FLOW_HANDLER handle_edge_irq
+#define FLOW_NAME "edge"
+
+static int hv_pci_irqchip_init(void)
+{
+ return 0;
+}
+
+static struct irq_domain *hv_pci_get_root_domain(void)
+{
+ return x86_vector_domain;
+}
+
+static unsigned int hv_msi_get_int_vector(struct irq_data *data)
+{
+ struct irq_cfg *cfg = irqd_cfg(data);
+
+ return cfg->vector;
+}
+
+static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry,
+ struct msi_desc *msi_desc)
+{
+ msi_entry->address.as_uint32 = msi_desc->msg.address_lo;
+ msi_entry->data.as_uint32 = msi_desc->msg.data;
+}
+
+static int hv_msi_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ return pci_msi_prepare(domain, dev, nvec, info);
+}
+#elif defined(CONFIG_ARM64)
+/*
+ * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit
+ * of room at the start to allow for SPIs to be specified through ACPI and
+ * starting with a power of two to satisfy power of 2 multi-MSI requirement.
+ */
+#define HV_PCI_MSI_SPI_START 64
+#define HV_PCI_MSI_SPI_NR (1020 - HV_PCI_MSI_SPI_START)
+#define DELIVERY_MODE 0
+#define FLOW_HANDLER NULL
+#define FLOW_NAME NULL
+#define hv_msi_prepare NULL
+
+struct hv_pci_chip_data {
+ DECLARE_BITMAP(spi_map, HV_PCI_MSI_SPI_NR);
+ struct mutex map_lock;
+};
+
+/* Hyper-V vPCI MSI GIC IRQ domain */
+static struct irq_domain *hv_msi_gic_irq_domain;
+
+/* Hyper-V PCI MSI IRQ chip */
+static struct irq_chip hv_arm64_msi_irq_chip = {
+ .name = "MSI",
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent
+};
+
+static unsigned int hv_msi_get_int_vector(struct irq_data *irqd)
+{
+ return irqd->parent_data->hwirq;
+}
+
+static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry,
+ struct msi_desc *msi_desc)
+{
+ msi_entry->address = ((u64)msi_desc->msg.address_hi << 32) |
+ msi_desc->msg.address_lo;
+ msi_entry->data = msi_desc->msg.data;
+}
+
+/*
+ * @nr_bm_irqs: Indicates the number of IRQs that were allocated from
+ * the bitmap.
+ * @nr_dom_irqs: Indicates the number of IRQs that were allocated from
+ * the parent domain.
+ */
+static void hv_pci_vec_irq_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_bm_irqs,
+ unsigned int nr_dom_irqs)
+{
+ struct hv_pci_chip_data *chip_data = domain->host_data;
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ int first = d->hwirq - HV_PCI_MSI_SPI_START;
+ int i;
+
+ mutex_lock(&chip_data->map_lock);
+ bitmap_release_region(chip_data->spi_map,
+ first,
+ get_count_order(nr_bm_irqs));
+ mutex_unlock(&chip_data->map_lock);
+ for (i = 0; i < nr_dom_irqs; i++) {
+ if (i)
+ d = irq_domain_get_irq_data(domain, virq + i);
+ irq_domain_reset_irq_data(d);
+ }
+
+ irq_domain_free_irqs_parent(domain, virq, nr_dom_irqs);
+}
+
+static void hv_pci_vec_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ hv_pci_vec_irq_free(domain, virq, nr_irqs, nr_irqs);
+}
+
+static int hv_pci_vec_alloc_device_irq(struct irq_domain *domain,
+ unsigned int nr_irqs,
+ irq_hw_number_t *hwirq)
+{
+ struct hv_pci_chip_data *chip_data = domain->host_data;
+ int index;
+
+ /* Find and allocate region from the SPI bitmap */
+ mutex_lock(&chip_data->map_lock);
+ index = bitmap_find_free_region(chip_data->spi_map,
+ HV_PCI_MSI_SPI_NR,
+ get_count_order(nr_irqs));
+ mutex_unlock(&chip_data->map_lock);
+ if (index < 0)
+ return -ENOSPC;
+
+ *hwirq = index + HV_PCI_MSI_SPI_START;
+
+ return 0;
+}
+
+static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct irq_fwspec fwspec;
+ struct irq_data *d;
+ int ret;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 2;
+ fwspec.param[0] = hwirq;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (ret)
+ return ret;
+
+ /*
+ * Since the interrupt specifier is not coming from ACPI or DT, the
+ * trigger type will need to be set explicitly. Otherwise, it will be
+ * set to whatever is in the GIC configuration.
+ */
+ d = irq_domain_get_irq_data(domain->parent, virq);
+
+ return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+}
+
+static int hv_pci_vec_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *args)
+{
+ irq_hw_number_t hwirq;
+ unsigned int i;
+ int ret;
+
+ ret = hv_pci_vec_alloc_device_irq(domain, nr_irqs, &hwirq);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ ret = hv_pci_vec_irq_gic_domain_alloc(domain, virq + i,
+ hwirq + i);
+ if (ret) {
+ hv_pci_vec_irq_free(domain, virq, nr_irqs, i);
+ return ret;
+ }
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i,
+ hwirq + i,
+ &hv_arm64_msi_irq_chip,
+ domain->host_data);
+ pr_debug("pID:%d vID:%u\n", (int)(hwirq + i), virq + i);
+ }
+
+ return 0;
+}
+
+/*
+ * Pick the first cpu as the irq affinity that can be temporarily used for
+ * composing MSI from the hypervisor. GIC will eventually set the right
+ * affinity for the irq and the 'unmask' will retarget the interrupt to that
+ * cpu.
+ */
+static int hv_pci_vec_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *irqd, bool reserve)
+{
+ int cpu = cpumask_first(cpu_present_mask);
+
+ irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
+
+ return 0;
+}
+
+static const struct irq_domain_ops hv_pci_domain_ops = {
+ .alloc = hv_pci_vec_irq_domain_alloc,
+ .free = hv_pci_vec_irq_domain_free,
+ .activate = hv_pci_vec_irq_domain_activate,
+};
+
+static int hv_pci_irqchip_init(void)
+{
+ static struct hv_pci_chip_data *chip_data;
+ struct fwnode_handle *fn = NULL;
+ int ret = -ENOMEM;
+
+ chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
+ if (!chip_data)
+ return ret;
+
+ mutex_init(&chip_data->map_lock);
+ fn = irq_domain_alloc_named_fwnode("hv_vpci_arm64");
+ if (!fn)
+ goto free_chip;
+
+ /*
+ * IRQ domain once enabled, should not be removed since there is no
+ * way to ensure that all the corresponding devices are also gone and
+ * no interrupts will be generated.
+ */
+ hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR,
+ fn, &hv_pci_domain_ops,
+ chip_data);
+
+ if (!hv_msi_gic_irq_domain) {
+ pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n");
+ goto free_chip;
+ }
+
+ return 0;
+
+free_chip:
+ kfree(chip_data);
+ if (fn)
+ irq_domain_free_fwnode(fn);
+
+ return ret;
+}
+
+static struct irq_domain *hv_pci_get_root_domain(void)
+{
+ return hv_msi_gic_irq_domain;
+}
+#endif /* CONFIG_ARM64 */
+
/**
* hv_pci_generic_compl() - Invoked for a completion packet
* @context: Set up by the sender of the packet.
@@ -1191,17 +1449,11 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
put_pcichild(hpdev);
}
-static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest,
- bool force)
-{
- struct irq_data *parent = data->parent_data;
-
- return parent->chip->irq_set_affinity(parent, dest, force);
-}
-
static void hv_irq_mask(struct irq_data *data)
{
pci_msi_mask_irq(data);
+ if (data->parent_data->chip->irq_mask)
+ irq_chip_mask_parent(data);
}
/**
@@ -1217,7 +1469,6 @@ static void hv_irq_mask(struct irq_data *data)
static void hv_irq_unmask(struct irq_data *data)
{
struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
- struct irq_cfg *cfg = irqd_cfg(data);
struct hv_retarget_device_interrupt *params;
struct hv_pcibus_device *hbus;
struct cpumask *dest;
@@ -1246,7 +1497,7 @@ static void hv_irq_unmask(struct irq_data *data)
(hbus->hdev->dev_instance.b[7] << 8) |
(hbus->hdev->dev_instance.b[6] & 0xf8) |
PCI_FUNC(pdev->devfn);
- params->int_target.vector = cfg->vector;
+ params->int_target.vector = hv_msi_get_int_vector(data);
/*
* Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by
@@ -1319,6 +1570,8 @@ exit_unlock:
dev_err(&hbus->hdev->device,
"%s() failed: %#llx", __func__, res);
+ if (data->parent_data->chip->irq_unmask)
+ irq_chip_unmask_parent(data);
pci_msi_unmask_irq(data);
}
@@ -1347,7 +1600,7 @@ static u32 hv_compose_msi_req_v1(
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.vector_count = 1;
- int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED;
+ int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
/*
* Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
@@ -1377,7 +1630,7 @@ static u32 hv_compose_msi_req_v2(
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.vector_count = 1;
- int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED;
+ int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
cpu = hv_compose_msi_req_get_cpu(affinity);
int_pkt->int_desc.processor_array[0] =
hv_cpu_number_to_vp_number(cpu);
@@ -1397,7 +1650,7 @@ static u32 hv_compose_msi_req_v3(
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.reserved = 0;
int_pkt->int_desc.vector_count = 1;
- int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED;
+ int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
cpu = hv_compose_msi_req_get_cpu(affinity);
int_pkt->int_desc.processor_array[0] =
hv_cpu_number_to_vp_number(cpu);
@@ -1419,7 +1672,6 @@ static u32 hv_compose_msi_req_v3(
*/
static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- struct irq_cfg *cfg = irqd_cfg(data);
struct hv_pcibus_device *hbus;
struct vmbus_channel *channel;
struct hv_pci_dev *hpdev;
@@ -1470,7 +1722,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
dest,
hpdev->desc.win_slot.slot,
- cfg->vector);
+ hv_msi_get_int_vector(data));
break;
case PCI_PROTOCOL_VERSION_1_2:
@@ -1478,14 +1730,14 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
dest,
hpdev->desc.win_slot.slot,
- cfg->vector);
+ hv_msi_get_int_vector(data));
break;
case PCI_PROTOCOL_VERSION_1_4:
size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
dest,
hpdev->desc.win_slot.slot,
- cfg->vector);
+ hv_msi_get_int_vector(data));
break;
default:
@@ -1594,14 +1846,18 @@ return_null_message:
static struct irq_chip hv_msi_irq_chip = {
.name = "Hyper-V PCIe MSI",
.irq_compose_msi_msg = hv_compose_msi_msg,
- .irq_set_affinity = hv_set_affinity,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#ifdef CONFIG_X86
.irq_ack = irq_chip_ack_parent,
+#elif defined(CONFIG_ARM64)
+ .irq_eoi = irq_chip_eoi_parent,
+#endif
.irq_mask = hv_irq_mask,
.irq_unmask = hv_irq_unmask,
};
static struct msi_domain_ops hv_msi_ops = {
- .msi_prepare = pci_msi_prepare,
+ .msi_prepare = hv_msi_prepare,
.msi_free = hv_msi_free,
};
@@ -1625,12 +1881,12 @@ static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI |
MSI_FLAG_PCI_MSIX);
- hbus->msi_info.handler = handle_edge_irq;
- hbus->msi_info.handler_name = "edge";
+ hbus->msi_info.handler = FLOW_HANDLER;
+ hbus->msi_info.handler_name = FLOW_NAME;
hbus->msi_info.data = hbus;
hbus->irq_domain = pci_msi_create_irq_domain(hbus->fwnode,
&hbus->msi_info,
- x86_vector_domain);
+ hv_pci_get_root_domain());
if (!hbus->irq_domain) {
dev_err(&hbus->hdev->device,
"Failed to build an MSI IRQ domain\n");
@@ -1774,7 +2030,7 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
* If the memory enable bit is already set, Hyper-V silently ignores
* the below BAR updates, and the related PCI device driver can not
* work, because reading from the device register(s) always returns
- * 0xFFFFFFFF.
+ * 0xFFFFFFFF (PCI_ERROR_RESPONSE).
*/
list_for_each_entry(hpdev, &hbus->children, list_entry) {
_hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command);
@@ -3445,18 +3701,23 @@ static int hv_pci_suspend(struct hv_device *hdev)
static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
{
- struct msi_desc *entry;
struct irq_data *irq_data;
+ struct msi_desc *entry;
+ int ret = 0;
- for_each_pci_msi_entry(entry, pdev) {
+ msi_lock_descs(&pdev->dev);
+ msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
irq_data = irq_get_irq_data(entry->irq);
- if (WARN_ON_ONCE(!irq_data))
- return -EINVAL;
+ if (WARN_ON_ONCE(!irq_data)) {
+ ret = -EINVAL;
+ break;
+ }
hv_compose_msi_msg(irq_data, &entry->msg);
}
+ msi_unlock_descs(&pdev->dev);
- return 0;
+ return ret;
}
/*
@@ -3542,9 +3803,15 @@ static void __exit exit_hv_pci_drv(void)
static int __init init_hv_pci_drv(void)
{
+ int ret;
+
if (!hv_is_hyperv_initialized())
return -ENODEV;
+ ret = hv_pci_irqchip_init();
+ if (ret)
+ return ret;
+
/* Set the invalid domain number's bit, so it will not be used */
set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index ed13e81cd691..71258ea3d35f 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -6,6 +6,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -51,10 +52,14 @@
PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \
PCIE_CONF_ADDR_EN)
#define PCIE_CONF_DATA_OFF 0x18fc
+#define PCIE_INT_CAUSE_OFF 0x1900
+#define PCIE_INT_PM_PME BIT(28)
#define PCIE_MASK_OFF 0x1910
#define PCIE_MASK_ENABLE_INTS 0x0f000000
#define PCIE_CTRL_OFF 0x1a00
#define PCIE_CTRL_X1_MODE 0x0001
+#define PCIE_CTRL_RC_MODE BIT(1)
+#define PCIE_CTRL_MASTER_HOT_RESET BIT(24)
#define PCIE_STAT_OFF 0x1a04
#define PCIE_STAT_BUS 0xff00
#define PCIE_STAT_DEV 0x1f0000
@@ -125,6 +130,11 @@ static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
}
+static u8 mvebu_pcie_get_local_bus_nr(struct mvebu_pcie_port *port)
+{
+ return (mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_BUS) >> 8;
+}
+
static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr)
{
u32 stat;
@@ -145,22 +155,13 @@ static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
mvebu_writel(port, stat, PCIE_STAT_OFF);
}
-/*
- * Setup PCIE BARs and Address Decode Wins:
- * BAR[0] -> internal registers (needed for MSI)
- * BAR[1] -> covers all DRAM banks
- * BAR[2] -> Disabled
- * WIN[0-3] -> DRAM bank[0-3]
- */
-static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
+static void mvebu_pcie_disable_wins(struct mvebu_pcie_port *port)
{
- const struct mbus_dram_target_info *dram;
- u32 size;
int i;
- dram = mv_mbus_dram_info();
+ mvebu_writel(port, 0, PCIE_BAR_LO_OFF(0));
+ mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
- /* First, disable and clear BARs and windows. */
for (i = 1; i < 3; i++) {
mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i));
mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i));
@@ -176,6 +177,25 @@ static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF);
mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF);
mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF);
+}
+
+/*
+ * Setup PCIE BARs and Address Decode Wins:
+ * BAR[0] -> internal registers (needed for MSI)
+ * BAR[1] -> covers all DRAM banks
+ * BAR[2] -> Disabled
+ * WIN[0-3] -> DRAM bank[0-3]
+ */
+static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
+{
+ const struct mbus_dram_target_info *dram;
+ u32 size;
+ int i;
+
+ dram = mv_mbus_dram_info();
+
+ /* First, disable and clear BARs and windows. */
+ mvebu_pcie_disable_wins(port);
/* Setup windows for DDR banks. Count total DDR size on the fly. */
size = 0;
@@ -213,18 +233,47 @@ static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
{
- u32 cmd, mask;
+ u32 ctrl, cmd, dev_rev, mask;
- /* Point PCIe unit MBUS decode windows to DRAM space. */
- mvebu_pcie_setup_wins(port);
+ /* Setup PCIe controller to Root Complex mode. */
+ ctrl = mvebu_readl(port, PCIE_CTRL_OFF);
+ ctrl |= PCIE_CTRL_RC_MODE;
+ mvebu_writel(port, ctrl, PCIE_CTRL_OFF);
- /* Master + slave enable. */
+ /* Disable Root Bridge I/O space, memory space and bus mastering. */
cmd = mvebu_readl(port, PCIE_CMD_OFF);
- cmd |= PCI_COMMAND_IO;
- cmd |= PCI_COMMAND_MEMORY;
- cmd |= PCI_COMMAND_MASTER;
+ cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
mvebu_writel(port, cmd, PCIE_CMD_OFF);
+ /*
+ * Change Class Code of PCI Bridge device to PCI Bridge (0x6004)
+ * because default value is Memory controller (0x5080).
+ *
+ * Note that this mvebu PCI Bridge does not have compliant Type 1
+ * Configuration Space. Header Type is reported as Type 0 and it
+ * has format of Type 0 config space.
+ *
+ * Moreover Type 0 BAR registers (ranges 0x10 - 0x28 and 0x30 - 0x34)
+ * have the same format in Marvell's specification as in PCIe
+ * specification, but their meaning is totally different and they do
+ * different things: they are aliased into internal mvebu registers
+ * (e.g. PCIE_BAR_LO_OFF) and these should not be changed or
+ * reconfigured by pci device drivers.
+ *
+ * Therefore driver uses emulation of PCI Bridge which emulates
+ * access to configuration space via internal mvebu registers or
+ * emulated configuration buffer. Driver access these PCI Bridge
+ * directly for simplification, but these registers can be accessed
+ * also via standard mvebu way for accessing PCI config space.
+ */
+ dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF);
+ dev_rev &= ~0xffffff00;
+ dev_rev |= (PCI_CLASS_BRIDGE_PCI << 8) << 8;
+ mvebu_writel(port, dev_rev, PCIE_DEV_REV_OFF);
+
+ /* Point PCIe unit MBUS decode windows to DRAM space. */
+ mvebu_pcie_setup_wins(port);
+
/* Enable interrupt lines A-D. */
mask = mvebu_readl(port, PCIE_MASK_OFF);
mask |= PCIE_MASK_ENABLE_INTS;
@@ -250,6 +299,9 @@ static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port,
case 4:
*val = readl_relaxed(conf_data);
break;
+ default:
+ *val = 0xffffffff;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
}
return PCIBIOS_SUCCESSFUL;
@@ -303,7 +355,7 @@ static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
* areas each having a power of two size. We start from the largest
* one (i.e highest order bit set in the size).
*/
-static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
+static int mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
unsigned int target, unsigned int attribute,
phys_addr_t base, size_t size,
phys_addr_t remap)
@@ -324,7 +376,7 @@ static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
&base, &end, ret);
mvebu_pcie_del_windows(port, base - size_mapped,
size_mapped);
- return;
+ return ret;
}
size -= sz;
@@ -333,16 +385,20 @@ static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
if (remap != MVEBU_MBUS_NO_REMAP)
remap += sz;
}
+
+ return 0;
}
-static void mvebu_pcie_set_window(struct mvebu_pcie_port *port,
+static int mvebu_pcie_set_window(struct mvebu_pcie_port *port,
unsigned int target, unsigned int attribute,
const struct mvebu_pcie_window *desired,
struct mvebu_pcie_window *cur)
{
+ int ret;
+
if (desired->base == cur->base && desired->remap == cur->remap &&
desired->size == cur->size)
- return;
+ return 0;
if (cur->size != 0) {
mvebu_pcie_del_windows(port, cur->base, cur->size);
@@ -357,31 +413,35 @@ static void mvebu_pcie_set_window(struct mvebu_pcie_port *port,
}
if (desired->size == 0)
- return;
+ return 0;
+
+ ret = mvebu_pcie_add_windows(port, target, attribute, desired->base,
+ desired->size, desired->remap);
+ if (ret) {
+ cur->size = 0;
+ cur->base = 0;
+ return ret;
+ }
- mvebu_pcie_add_windows(port, target, attribute, desired->base,
- desired->size, desired->remap);
*cur = *desired;
+ return 0;
}
-static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
+static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
{
struct mvebu_pcie_window desired = {};
struct pci_bridge_emul_conf *conf = &port->bridge.conf;
/* Are the new iobase/iolimit values invalid? */
if (conf->iolimit < conf->iobase ||
- conf->iolimitupper < conf->iobaseupper ||
- !(conf->command & PCI_COMMAND_IO)) {
- mvebu_pcie_set_window(port, port->io_target, port->io_attr,
- &desired, &port->iowin);
- return;
- }
+ conf->iolimitupper < conf->iobaseupper)
+ return mvebu_pcie_set_window(port, port->io_target, port->io_attr,
+ &desired, &port->iowin);
if (!mvebu_has_ioport(port)) {
dev_WARN(&port->pcie->pdev->dev,
"Attempt to set IO when IO is disabled\n");
- return;
+ return -EOPNOTSUPP;
}
/*
@@ -399,22 +459,19 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
desired.remap) +
1;
- mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired,
- &port->iowin);
+ return mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired,
+ &port->iowin);
}
-static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
+static int mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
{
struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
struct pci_bridge_emul_conf *conf = &port->bridge.conf;
/* Are the new membase/memlimit values invalid? */
- if (conf->memlimit < conf->membase ||
- !(conf->command & PCI_COMMAND_MEMORY)) {
- mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
- &desired, &port->memwin);
- return;
- }
+ if (conf->memlimit < conf->membase)
+ return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
+ &desired, &port->memwin);
/*
* We read the PCI-to-PCI bridge emulated registers, and
@@ -426,8 +483,56 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
desired.size = (((conf->memlimit & 0xFFF0) << 16) | 0xFFFFF) -
desired.base + 1;
- mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
- &port->memwin);
+ return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
+ &port->memwin);
+}
+
+static pci_bridge_emul_read_status_t
+mvebu_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
+ int reg, u32 *value)
+{
+ struct mvebu_pcie_port *port = bridge->data;
+
+ switch (reg) {
+ case PCI_COMMAND:
+ *value = mvebu_readl(port, PCIE_CMD_OFF);
+ break;
+
+ case PCI_PRIMARY_BUS: {
+ /*
+ * From the whole 32bit register we support reading from HW only
+ * secondary bus number which is mvebu local bus number.
+ * Other bits are retrieved only from emulated config buffer.
+ */
+ __le32 *cfgspace = (__le32 *)&bridge->conf;
+ u32 val = le32_to_cpu(cfgspace[PCI_PRIMARY_BUS / 4]);
+ val &= ~0xff00;
+ val |= mvebu_pcie_get_local_bus_nr(port) << 8;
+ *value = val;
+ break;
+ }
+
+ case PCI_INTERRUPT_LINE: {
+ /*
+ * From the whole 32bit register we support reading from HW only
+ * one bit: PCI_BRIDGE_CTL_BUS_RESET.
+ * Other bits are retrieved only from emulated config buffer.
+ */
+ __le32 *cfgspace = (__le32 *)&bridge->conf;
+ u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
+ if (mvebu_readl(port, PCIE_CTRL_OFF) & PCIE_CTRL_MASTER_HOT_RESET)
+ val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
+ else
+ val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16);
+ *value = val;
+ break;
+ }
+
+ default:
+ return PCI_BRIDGE_EMUL_NOT_HANDLED;
+ }
+
+ return PCI_BRIDGE_EMUL_HANDLED;
}
static pci_bridge_emul_read_status_t
@@ -442,9 +547,7 @@ mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
break;
case PCI_EXP_DEVCTL:
- *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL) &
- ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
- PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
break;
case PCI_EXP_LNKCAP:
@@ -468,6 +571,18 @@ mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
*value = mvebu_readl(port, PCIE_RC_RTSTA);
break;
+ case PCI_EXP_DEVCAP2:
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP2);
+ break;
+
+ case PCI_EXP_DEVCTL2:
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2);
+ break;
+
+ case PCI_EXP_LNKCTL2:
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2);
+ break;
+
default:
return PCI_BRIDGE_EMUL_NOT_HANDLED;
}
@@ -484,39 +599,62 @@ mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
switch (reg) {
case PCI_COMMAND:
- {
- if (!mvebu_has_ioport(port))
- conf->command &= ~PCI_COMMAND_IO;
-
- if ((old ^ new) & PCI_COMMAND_IO)
- mvebu_pcie_handle_iobase_change(port);
- if ((old ^ new) & PCI_COMMAND_MEMORY)
- mvebu_pcie_handle_membase_change(port);
+ if (!mvebu_has_ioport(port)) {
+ conf->command = cpu_to_le16(
+ le16_to_cpu(conf->command) & ~PCI_COMMAND_IO);
+ new &= ~PCI_COMMAND_IO;
+ }
+ mvebu_writel(port, new, PCIE_CMD_OFF);
break;
- }
case PCI_IO_BASE:
- /*
- * We keep bit 1 set, it is a read-only bit that
- * indicates we support 32 bits addressing for the
- * I/O
- */
- conf->iobase |= PCI_IO_RANGE_TYPE_32;
- conf->iolimit |= PCI_IO_RANGE_TYPE_32;
- mvebu_pcie_handle_iobase_change(port);
+ if ((mask & 0xffff) && mvebu_pcie_handle_iobase_change(port)) {
+ /* On error disable IO range */
+ conf->iobase &= ~0xf0;
+ conf->iolimit &= ~0xf0;
+ conf->iobaseupper = cpu_to_le16(0x0000);
+ conf->iolimitupper = cpu_to_le16(0x0000);
+ if (mvebu_has_ioport(port))
+ conf->iobase |= 0xf0;
+ }
break;
case PCI_MEMORY_BASE:
- mvebu_pcie_handle_membase_change(port);
+ if (mvebu_pcie_handle_membase_change(port)) {
+ /* On error disable mem range */
+ conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) & ~0xfff0);
+ conf->memlimit = cpu_to_le16(le16_to_cpu(conf->memlimit) & ~0xfff0);
+ conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) | 0xfff0);
+ }
break;
case PCI_IO_BASE_UPPER16:
- mvebu_pcie_handle_iobase_change(port);
+ if (mvebu_pcie_handle_iobase_change(port)) {
+ /* On error disable IO range */
+ conf->iobase &= ~0xf0;
+ conf->iolimit &= ~0xf0;
+ conf->iobaseupper = cpu_to_le16(0x0000);
+ conf->iolimitupper = cpu_to_le16(0x0000);
+ if (mvebu_has_ioport(port))
+ conf->iobase |= 0xf0;
+ }
break;
case PCI_PRIMARY_BUS:
- mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus);
+ if (mask & 0xff00)
+ mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus);
+ break;
+
+ case PCI_INTERRUPT_LINE:
+ if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
+ u32 ctrl = mvebu_readl(port, PCIE_CTRL_OFF);
+ if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
+ ctrl |= PCIE_CTRL_MASTER_HOT_RESET;
+ else
+ ctrl &= ~PCIE_CTRL_MASTER_HOT_RESET;
+ mvebu_writel(port, ctrl, PCIE_CTRL_OFF);
+ }
break;
default:
@@ -532,13 +670,6 @@ mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
switch (reg) {
case PCI_EXP_DEVCTL:
- /*
- * Armada370 data says these bits must always
- * be zero when in root complex mode.
- */
- new &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
- PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
-
mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
break;
@@ -555,12 +686,31 @@ mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
break;
case PCI_EXP_RTSTA:
- mvebu_writel(port, new, PCIE_RC_RTSTA);
+ /*
+ * PME Status bit in Root Status Register (PCIE_RC_RTSTA)
+ * is read-only and can be cleared only by writing 0b to the
+ * Interrupt Cause RW0C register (PCIE_INT_CAUSE_OFF). So
+ * clear PME via Interrupt Cause.
+ */
+ if (new & PCI_EXP_RTSTA_PME)
+ mvebu_writel(port, ~PCIE_INT_PM_PME, PCIE_INT_CAUSE_OFF);
+ break;
+
+ case PCI_EXP_DEVCTL2:
+ mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2);
+ break;
+
+ case PCI_EXP_LNKCTL2:
+ mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2);
+ break;
+
+ default:
break;
}
}
static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
+ .read_base = mvebu_pci_bridge_emul_base_conf_read,
.write_base = mvebu_pci_bridge_emul_base_conf_write,
.read_pcie = mvebu_pci_bridge_emul_pcie_conf_read,
.write_pcie = mvebu_pci_bridge_emul_pcie_conf_write,
@@ -570,9 +720,11 @@ static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
* Initialize the configuration space of the PCI-to-PCI bridge
* associated with the given PCIe interface.
*/
-static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
+static int mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
{
struct pci_bridge_emul *bridge = &port->bridge;
+ u32 pcie_cap = mvebu_readl(port, PCIE_CAP_PCIEXP);
+ u8 pcie_cap_ver = ((pcie_cap >> 16) & PCI_EXP_FLAGS_VERS);
bridge->conf.vendor = PCI_VENDOR_ID_MARVELL;
bridge->conf.device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
@@ -585,11 +737,17 @@ static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
}
+ /*
+ * Older mvebu hardware provides PCIe Capability structure only in
+ * version 1. New hardware provides it in version 2.
+ */
+ bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver);
+
bridge->has_pcie = true;
bridge->data = port;
bridge->ops = &mvebu_pci_bridge_emul_ops;
- pci_bridge_emul_init(bridge, PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR);
+ return pci_bridge_emul_init(bridge, PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR);
}
static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
@@ -606,6 +764,9 @@ static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
for (i = 0; i < pcie->nports; i++) {
struct mvebu_pcie_port *port = &pcie->ports[i];
+ if (!port->base)
+ continue;
+
if (bus->number == 0 && port->devfn == devfn)
return port;
if (bus->number != 0 &&
@@ -653,20 +814,16 @@ static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
int ret;
port = mvebu_pcie_find_port(pcie, bus, devfn);
- if (!port) {
- *val = 0xffffffff;
+ if (!port)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
/* Access the emulated PCI-to-PCI bridge */
if (bus->number == 0)
return pci_bridge_emul_conf_read(&port->bridge, where,
size, val);
- if (!mvebu_pcie_link_up(port)) {
- *val = 0xffffffff;
+ if (!mvebu_pcie_link_up(port))
return PCIBIOS_DEVICE_NOT_FOUND;
- }
/* Access the real PCIe interface */
ret = mvebu_pcie_hw_rd_conf(port, bus, devfn,
@@ -680,6 +837,15 @@ static struct pci_ops mvebu_pcie_ops = {
.write = mvebu_pcie_wr_conf,
};
+static int mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ /* Interrupt support on mvebu emulated bridges is not implemented yet */
+ if (dev->bus->number == 0)
+ return 0; /* Proper return code 0 == NO_IRQ */
+
+ return of_irq_parse_and_map_pci(dev, slot, pin);
+}
+
static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
const struct resource *res,
resource_size_t start,
@@ -781,6 +947,8 @@ static int mvebu_pcie_suspend(struct device *dev)
pcie = dev_get_drvdata(dev);
for (i = 0; i < pcie->nports; i++) {
struct mvebu_pcie_port *port = pcie->ports + i;
+ if (!port->base)
+ continue;
port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF);
}
@@ -795,6 +963,8 @@ static int mvebu_pcie_resume(struct device *dev)
pcie = dev_get_drvdata(dev);
for (i = 0; i < pcie->nports; i++) {
struct mvebu_pcie_port *port = pcie->ports + i;
+ if (!port->base)
+ continue;
mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF);
mvebu_pcie_setup_hw(port);
}
@@ -838,6 +1008,11 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
port->devfn = of_pci_get_devfn(child);
if (port->devfn < 0)
goto skip;
+ if (PCI_FUNC(port->devfn) != 0) {
+ dev_err(dev, "%s: invalid function number, must be zero\n",
+ port->name);
+ goto skip;
+ }
ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM,
&port->mem_target, &port->mem_attr);
@@ -992,6 +1167,10 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
resource_size(&pcie->io) - 1);
pcie->realio.name = "PCI I/O";
+ ret = devm_pci_remap_iospace(dev, &pcie->realio, pcie->io.start);
+ if (ret)
+ return ret;
+
pci_add_resource(&bridge->windows, &pcie->realio);
ret = devm_request_resource(dev, &ioport_resource, &pcie->realio);
if (ret)
@@ -1001,54 +1180,6 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
return 0;
}
-/*
- * This is a copy of pci_host_probe(), except that it does the I/O
- * remap as the last step, once we are sure we won't fail.
- *
- * It should be removed once the I/O remap error handling issue has
- * been sorted out.
- */
-static int mvebu_pci_host_probe(struct pci_host_bridge *bridge)
-{
- struct mvebu_pcie *pcie;
- struct pci_bus *bus, *child;
- int ret;
-
- ret = pci_scan_root_bus_bridge(bridge);
- if (ret < 0) {
- dev_err(bridge->dev.parent, "Scanning root bridge failed");
- return ret;
- }
-
- pcie = pci_host_bridge_priv(bridge);
- if (resource_size(&pcie->io) != 0) {
- unsigned int i;
-
- for (i = 0; i < resource_size(&pcie->realio); i += SZ_64K)
- pci_ioremap_io(i, pcie->io.start + i);
- }
-
- bus = bridge->bus;
-
- /*
- * We insert PCI resources into the iomem_resource and
- * ioport_resource trees in either pci_bus_claim_resources()
- * or pci_bus_assign_resources().
- */
- if (pci_has_flag(PCI_PROBE_ONLY)) {
- pci_bus_claim_resources(bus);
- } else {
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
-
- list_for_each_entry(child, &bus->children, node)
- pcie_bus_configure_settings(child);
- }
-
- pci_bus_add_devices(bus);
- return 0;
-}
-
static int mvebu_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1112,9 +1243,93 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
continue;
}
+ ret = mvebu_pci_bridge_emul_init(port);
+ if (ret < 0) {
+ dev_err(dev, "%s: cannot init emulated bridge\n",
+ port->name);
+ devm_iounmap(dev, port->base);
+ port->base = NULL;
+ mvebu_pcie_powerdown(port);
+ continue;
+ }
+
+ /*
+ * PCIe topology exported by mvebu hw is quite complicated. In
+ * reality has something like N fully independent host bridges
+ * where each host bridge has one PCIe Root Port (which acts as
+ * PCI Bridge device). Each host bridge has its own independent
+ * internal registers, independent access to PCI config space,
+ * independent interrupt lines, independent window and memory
+ * access configuration. But additionally there is some kind of
+ * peer-to-peer support between PCIe devices behind different
+ * host bridges limited just to forwarding of memory and I/O
+ * transactions (forwarding of error messages and config cycles
+ * is not supported). So we could say there are N independent
+ * PCIe Root Complexes.
+ *
+ * For this kind of setup DT should have been structured into
+ * N independent PCIe controllers / host bridges. But instead
+ * structure in past was defined to put PCIe Root Ports of all
+ * host bridges into one bus zero, like in classic multi-port
+ * Root Complex setup with just one host bridge.
+ *
+ * This means that pci-mvebu.c driver provides "virtual" bus 0
+ * on which registers all PCIe Root Ports (PCI Bridge devices)
+ * specified in DT by their BDF addresses and virtually routes
+ * PCI config access of each PCI bridge device to specific PCIe
+ * host bridge.
+ *
+ * Normally PCI Bridge should choose between Type 0 and Type 1
+ * config requests based on primary and secondary bus numbers
+ * configured on the bridge itself. But because mvebu PCI Bridge
+ * does not have registers for primary and secondary bus numbers
+ * in its config space, it determinates type of config requests
+ * via its own custom way.
+ *
+ * There are two options how mvebu determinate type of config
+ * request.
+ *
+ * 1. If Secondary Bus Number Enable bit is not set or is not
+ * available (applies for pre-XP PCIe controllers) then Type 0
+ * is used if target bus number equals Local Bus Number (bits
+ * [15:8] in register 0x1a04) and target device number differs
+ * from Local Device Number (bits [20:16] in register 0x1a04).
+ * Type 1 is used if target bus number differs from Local Bus
+ * Number. And when target bus number equals Local Bus Number
+ * and target device equals Local Device Number then request is
+ * routed to Local PCI Bridge (PCIe Root Port).
+ *
+ * 2. If Secondary Bus Number Enable bit is set (bit 7 in
+ * register 0x1a2c) then mvebu hw determinate type of config
+ * request like compliant PCI Bridge based on primary bus number
+ * which is configured via Local Bus Number (bits [15:8] in
+ * register 0x1a04) and secondary bus number which is configured
+ * via Secondary Bus Number (bits [7:0] in register 0x1a2c).
+ * Local PCI Bridge (PCIe Root Port) is available on primary bus
+ * as device with Local Device Number (bits [20:16] in register
+ * 0x1a04).
+ *
+ * Secondary Bus Number Enable bit is disabled by default and
+ * option 2. is not available on pre-XP PCIe controllers. Hence
+ * this driver always use option 1.
+ *
+ * Basically it means that primary and secondary buses shares
+ * one virtual number configured via Local Bus Number bits and
+ * Local Device Number bits determinates if accessing primary
+ * or secondary bus. Set Local Device Number to 1 and redirect
+ * all writes of PCI Bridge Secondary Bus Number register to
+ * Local Bus Number (bits [15:8] in register 0x1a04).
+ *
+ * So when accessing devices on buses behind secondary bus
+ * number it would work correctly. And also when accessing
+ * device 0 at secondary bus number via config space would be
+ * correctly routed to secondary bus. Due to issues described
+ * in mvebu_pcie_setup_hw(), PCI Bridges at primary bus (zero)
+ * are not accessed directly via PCI config space but rarher
+ * indirectly via kernel emulated PCI bridge driver.
+ */
mvebu_pcie_setup_hw(port);
- mvebu_pcie_set_local_dev_nr(port, 1);
- mvebu_pci_bridge_emul_init(port);
+ mvebu_pcie_set_local_dev_nr(port, 0);
}
pcie->nports = i;
@@ -1122,8 +1337,55 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
bridge->sysdata = pcie;
bridge->ops = &mvebu_pcie_ops;
bridge->align_resource = mvebu_pcie_align_resource;
+ bridge->map_irq = mvebu_pcie_map_irq;
+
+ return pci_host_probe(bridge);
+}
+
+static int mvebu_pcie_remove(struct platform_device *pdev)
+{
+ struct mvebu_pcie *pcie = platform_get_drvdata(pdev);
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ u32 cmd;
+ int i;
+
+ /* Remove PCI bus with all devices. */
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(bridge->bus);
+ pci_remove_root_bus(bridge->bus);
+ pci_unlock_rescan_remove();
+
+ for (i = 0; i < pcie->nports; i++) {
+ struct mvebu_pcie_port *port = &pcie->ports[i];
+
+ if (!port->base)
+ continue;
+
+ /* Disable Root Bridge I/O space, memory space and bus mastering. */
+ cmd = mvebu_readl(port, PCIE_CMD_OFF);
+ cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ mvebu_writel(port, cmd, PCIE_CMD_OFF);
+
+ /* Mask all interrupt sources. */
+ mvebu_writel(port, 0, PCIE_MASK_OFF);
+
+ /* Free config space for emulated root bridge. */
+ pci_bridge_emul_cleanup(&port->bridge);
- return mvebu_pci_host_probe(bridge);
+ /* Disable and clear BARs and windows. */
+ mvebu_pcie_disable_wins(port);
+
+ /* Delete PCIe IO and MEM windows. */
+ if (port->iowin.size)
+ mvebu_pcie_del_windows(port, port->iowin.base, port->iowin.size);
+ if (port->memwin.size)
+ mvebu_pcie_del_windows(port, port->memwin.base, port->memwin.size);
+
+ /* Power down card and disable clocks. Must be the last step. */
+ mvebu_pcie_powerdown(port);
+ }
+
+ return 0;
}
static const struct of_device_id mvebu_pcie_of_match_table[] = {
@@ -1142,10 +1404,14 @@ static struct platform_driver mvebu_pcie_driver = {
.driver = {
.name = "mvebu-pcie",
.of_match_table = mvebu_pcie_of_match_table,
- /* driver unloading/unbinding currently not supported */
- .suppress_bind_attrs = true,
.pm = &mvebu_pcie_pm_ops,
},
.probe = mvebu_pcie_probe,
+ .remove = mvebu_pcie_remove,
};
-builtin_platform_driver(mvebu_pcie_driver);
+module_platform_driver(mvebu_pcie_driver);
+
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@bootlin.com>");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
+MODULE_DESCRIPTION("Marvell EBU PCIe controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-rcar-gen2.c b/drivers/pci/controller/pci-rcar-gen2.c
index afde4aa8f6dc..35804ea394fd 100644
--- a/drivers/pci/controller/pci-rcar-gen2.c
+++ b/drivers/pci/controller/pci-rcar-gen2.c
@@ -93,7 +93,7 @@
#define RCAR_PCI_UNIT_REV_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x48)
-struct rcar_pci_priv {
+struct rcar_pci {
struct device *dev;
void __iomem *reg;
struct resource mem_res;
@@ -105,7 +105,7 @@ struct rcar_pci_priv {
static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn,
int where)
{
- struct rcar_pci_priv *priv = bus->sysdata;
+ struct rcar_pci *priv = bus->sysdata;
int slot, val;
if (!pci_is_root_bus(bus) || PCI_FUNC(devfn))
@@ -132,7 +132,7 @@ static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn,
static irqreturn_t rcar_pci_err_irq(int irq, void *pw)
{
- struct rcar_pci_priv *priv = pw;
+ struct rcar_pci *priv = pw;
struct device *dev = priv->dev;
u32 status = ioread32(priv->reg + RCAR_PCI_INT_STATUS_REG);
@@ -148,7 +148,7 @@ static irqreturn_t rcar_pci_err_irq(int irq, void *pw)
return IRQ_NONE;
}
-static void rcar_pci_setup_errirq(struct rcar_pci_priv *priv)
+static void rcar_pci_setup_errirq(struct rcar_pci *priv)
{
struct device *dev = priv->dev;
int ret;
@@ -166,11 +166,11 @@ static void rcar_pci_setup_errirq(struct rcar_pci_priv *priv)
iowrite32(val, priv->reg + RCAR_PCI_INT_ENABLE_REG);
}
#else
-static inline void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) { }
+static inline void rcar_pci_setup_errirq(struct rcar_pci *priv) { }
#endif
/* PCI host controller setup */
-static void rcar_pci_setup(struct rcar_pci_priv *priv)
+static void rcar_pci_setup(struct rcar_pci *priv)
{
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(priv);
struct device *dev = priv->dev;
@@ -279,7 +279,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *cfg_res, *mem_res;
- struct rcar_pci_priv *priv;
+ struct rcar_pci *priv;
struct pci_host_bridge *bridge;
void __iomem *reg;
diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
index e9d5ca245f5e..b5bd10a62adb 100644
--- a/drivers/pci/controller/pci-thunder-ecam.c
+++ b/drivers/pci/controller/pci-thunder-ecam.c
@@ -41,10 +41,9 @@ static int handle_ea_bar(u32 e0, int bar, struct pci_bus *bus,
}
if (where_a == 0x4) {
addr = bus->ops->map_bus(bus, devfn, bar); /* BAR 0 */
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
+
v = readl(addr);
v &= ~0xf;
v |= 2; /* EA entry-1. Base-L */
@@ -56,10 +55,9 @@ static int handle_ea_bar(u32 e0, int bar, struct pci_bus *bus,
u32 barl_rb;
addr = bus->ops->map_bus(bus, devfn, bar); /* BAR 0 */
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
+
barl_orig = readl(addr + 0);
writel(0xffffffff, addr + 0);
barl_rb = readl(addr + 0);
@@ -72,10 +70,9 @@ static int handle_ea_bar(u32 e0, int bar, struct pci_bus *bus,
}
if (where_a == 0xc) {
addr = bus->ops->map_bus(bus, devfn, bar + 4); /* BAR 1 */
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
+
v = readl(addr); /* EA entry-3. Base-H */
set_val(v, where, size, val);
return PCIBIOS_SUCCESSFUL;
@@ -104,10 +101,8 @@ static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn,
}
addr = bus->ops->map_bus(bus, devfn, where_a);
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
v = readl(addr);
@@ -135,10 +130,8 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
int where_a = where & ~3;
addr = bus->ops->map_bus(bus, devfn, 0xc);
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
v = readl(addr);
@@ -146,10 +139,8 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
cfg_type = (v >> 16) & 0x7f;
addr = bus->ops->map_bus(bus, devfn, 8);
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
class_rev = readl(addr);
if (class_rev == 0xffffffff)
@@ -176,10 +167,8 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
}
addr = bus->ops->map_bus(bus, devfn, 0);
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
vendor_device = readl(addr);
if (vendor_device == 0xffffffff)
@@ -196,10 +185,9 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
bool is_tns = (vendor_device == 0xa01f177d);
addr = bus->ops->map_bus(bus, devfn, 0x70);
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
+
/* E_CAP */
v = readl(addr);
has_msix = (v & 0xff00) != 0;
@@ -211,10 +199,9 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
}
if (where_a == 0xb0) {
addr = bus->ops->map_bus(bus, devfn, where_a);
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
+
v = readl(addr);
if (v & 0xff00)
pr_err("Bad MSIX cap header: %08x\n", v);
@@ -268,10 +255,9 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
if (where_a == 0x70) {
addr = bus->ops->map_bus(bus, devfn, where_a);
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
+
v = readl(addr);
if (v & 0xff00)
pr_err("Bad PCIe cap header: %08x\n", v);
diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
index 0660b9da204f..06a9855cb431 100644
--- a/drivers/pci/controller/pci-thunder-pem.c
+++ b/drivers/pci/controller/pci-thunder-pem.c
@@ -41,10 +41,8 @@ static int thunder_pem_bridge_read(struct pci_bus *bus, unsigned int devfn,
struct pci_config_window *cfg = bus->sysdata;
struct thunder_pem_pci *pem_pci = (struct thunder_pem_pci *)cfg->priv;
- if (devfn != 0 || where >= 2048) {
- *val = ~0;
+ if (devfn != 0 || where >= 2048)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
/*
* 32-bit accesses only. Write the address to the low order
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
index c50ff279903c..bfa259781b69 100644
--- a/drivers/pci/controller/pci-xgene-msi.c
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -269,9 +269,7 @@ static void xgene_free_domains(struct xgene_msi *msi)
static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi)
{
- int size = BITS_TO_LONGS(NR_MSI_VEC) * sizeof(long);
-
- xgene_msi->bitmap = kzalloc(size, GFP_KERNEL);
+ xgene_msi->bitmap = bitmap_zalloc(NR_MSI_VEC, GFP_KERNEL);
if (!xgene_msi->bitmap)
return -ENOMEM;
@@ -360,7 +358,7 @@ static int xgene_msi_remove(struct platform_device *pdev)
kfree(msi->msi_groups);
- kfree(msi->bitmap);
+ bitmap_free(msi->bitmap);
msi->bitmap = NULL;
xgene_free_domains(msi);
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index 56d0d50338c8..0d5acbfc7143 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -60,7 +60,7 @@
#define XGENE_PCIE_IP_VER_2 2
#if defined(CONFIG_PCI_XGENE) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
-struct xgene_pcie_port {
+struct xgene_pcie {
struct device_node *node;
struct device *dev;
struct clk *clk;
@@ -71,12 +71,12 @@ struct xgene_pcie_port {
u32 version;
};
-static u32 xgene_pcie_readl(struct xgene_pcie_port *port, u32 reg)
+static u32 xgene_pcie_readl(struct xgene_pcie *port, u32 reg)
{
return readl(port->csr_base + reg);
}
-static void xgene_pcie_writel(struct xgene_pcie_port *port, u32 reg, u32 val)
+static void xgene_pcie_writel(struct xgene_pcie *port, u32 reg, u32 val)
{
writel(val, port->csr_base + reg);
}
@@ -86,15 +86,15 @@ static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
}
-static inline struct xgene_pcie_port *pcie_bus_to_port(struct pci_bus *bus)
+static inline struct xgene_pcie *pcie_bus_to_port(struct pci_bus *bus)
{
struct pci_config_window *cfg;
if (acpi_disabled)
- return (struct xgene_pcie_port *)(bus->sysdata);
+ return (struct xgene_pcie *)(bus->sysdata);
cfg = bus->sysdata;
- return (struct xgene_pcie_port *)(cfg->priv);
+ return (struct xgene_pcie *)(cfg->priv);
}
/*
@@ -103,7 +103,7 @@ static inline struct xgene_pcie_port *pcie_bus_to_port(struct pci_bus *bus)
*/
static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus)
{
- struct xgene_pcie_port *port = pcie_bus_to_port(bus);
+ struct xgene_pcie *port = pcie_bus_to_port(bus);
if (bus->number >= (bus->primary + 1))
return port->cfg_base + AXI_EP_CFG_ACCESS;
@@ -117,7 +117,7 @@ static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus)
*/
static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn)
{
- struct xgene_pcie_port *port = pcie_bus_to_port(bus);
+ struct xgene_pcie *port = pcie_bus_to_port(bus);
unsigned int b, d, f;
u32 rtdid_val = 0;
@@ -164,18 +164,18 @@ static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
- struct xgene_pcie_port *port = pcie_bus_to_port(bus);
+ struct xgene_pcie *port = pcie_bus_to_port(bus);
if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) !=
PCIBIOS_SUCCESSFUL)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
- * The v1 controller has a bug in its Configuration Request
- * Retry Status (CRS) logic: when CRS Software Visibility is
- * enabled and we read the Vendor and Device ID of a non-existent
- * device, the controller fabricates return data of 0xFFFF0001
- * ("device exists but is not ready") instead of 0xFFFFFFFF
+ * The v1 controller has a bug in its Configuration Request Retry
+ * Status (CRS) logic: when CRS Software Visibility is enabled and
+ * we read the Vendor and Device ID of a non-existent device, the
+ * controller fabricates return data of 0xFFFF0001 ("device exists
+ * but is not ready") instead of 0xFFFFFFFF (PCI_ERROR_RESPONSE)
* ("device does not exist"). This causes the PCI core to retry
* the read until it times out. Avoid this by not claiming to
* support CRS SV.
@@ -227,7 +227,7 @@ static int xgene_pcie_ecam_init(struct pci_config_window *cfg, u32 ipversion)
{
struct device *dev = cfg->parent;
struct acpi_device *adev = to_acpi_device(dev);
- struct xgene_pcie_port *port;
+ struct xgene_pcie *port;
struct resource csr;
int ret;
@@ -281,7 +281,7 @@ const struct pci_ecam_ops xgene_v2_pcie_ecam_ops = {
#endif
#if defined(CONFIG_PCI_XGENE)
-static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr,
+static u64 xgene_pcie_set_ib_mask(struct xgene_pcie *port, u32 addr,
u32 flags, u64 size)
{
u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags;
@@ -307,7 +307,7 @@ static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr,
return mask;
}
-static void xgene_pcie_linkup(struct xgene_pcie_port *port,
+static void xgene_pcie_linkup(struct xgene_pcie *port,
u32 *lanes, u32 *speed)
{
u32 val32;
@@ -322,7 +322,7 @@ static void xgene_pcie_linkup(struct xgene_pcie_port *port,
}
}
-static int xgene_pcie_init_port(struct xgene_pcie_port *port)
+static int xgene_pcie_init_port(struct xgene_pcie *port)
{
struct device *dev = port->dev;
int rc;
@@ -342,7 +342,7 @@ static int xgene_pcie_init_port(struct xgene_pcie_port *port)
return 0;
}
-static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
+static int xgene_pcie_map_reg(struct xgene_pcie *port,
struct platform_device *pdev)
{
struct device *dev = port->dev;
@@ -362,7 +362,7 @@ static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
return 0;
}
-static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
+static void xgene_pcie_setup_ob_reg(struct xgene_pcie *port,
struct resource *res, u32 offset,
u64 cpu_addr, u64 pci_addr)
{
@@ -394,7 +394,7 @@ static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
xgene_pcie_writel(port, offset + 0x14, upper_32_bits(pci_addr));
}
-static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port)
+static void xgene_pcie_setup_cfg_reg(struct xgene_pcie *port)
{
u64 addr = port->cfg_addr;
@@ -403,7 +403,7 @@ static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port)
xgene_pcie_writel(port, CFGCTL, EN_REG);
}
-static int xgene_pcie_map_ranges(struct xgene_pcie_port *port)
+static int xgene_pcie_map_ranges(struct xgene_pcie *port)
{
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port);
struct resource_entry *window;
@@ -444,7 +444,7 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port)
return 0;
}
-static void xgene_pcie_setup_pims(struct xgene_pcie_port *port, u32 pim_reg,
+static void xgene_pcie_setup_pims(struct xgene_pcie *port, u32 pim_reg,
u64 pim, u64 size)
{
xgene_pcie_writel(port, pim_reg, lower_32_bits(pim));
@@ -465,7 +465,7 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
return 1;
}
- if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) {
+ if ((size > SZ_1K) && (size < SZ_4G) && !(*ib_reg_mask & (1 << 0))) {
*ib_reg_mask |= (1 << 0);
return 0;
}
@@ -478,7 +478,7 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
return -EINVAL;
}
-static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
+static void xgene_pcie_setup_ib_reg(struct xgene_pcie *port,
struct resource_entry *entry,
u8 *ib_reg_mask)
{
@@ -529,7 +529,7 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
xgene_pcie_setup_pims(port, pim_reg, pci_addr, ~(size - 1));
}
-static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
+static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie *port)
{
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port);
struct resource_entry *entry;
@@ -542,7 +542,7 @@ static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
}
/* clear BAR configuration which was done by firmware */
-static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
+static void xgene_pcie_clear_config(struct xgene_pcie *port)
{
int i;
@@ -550,7 +550,7 @@ static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
xgene_pcie_writel(port, i, 0);
}
-static int xgene_pcie_setup(struct xgene_pcie_port *port)
+static int xgene_pcie_setup(struct xgene_pcie *port)
{
struct device *dev = port->dev;
u32 val, lanes = 0, speed = 0;
@@ -588,7 +588,7 @@ static int xgene_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
- struct xgene_pcie_port *port;
+ struct xgene_pcie *port;
struct pci_host_bridge *bridge;
int ret;
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index 2513e9363236..18b2361d6462 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -510,10 +510,8 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
if (altera_pcie_hide_rc_bar(bus, devfn, where))
return PCIBIOS_BAD_REGISTER_NUMBER;
- if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) {
- *value = 0xffffffff;
+ if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn)))
return PCIBIOS_DEVICE_NOT_FOUND;
- }
return _altera_pcie_cfg_read(pcie, bus->number, devfn, where, size,
value);
@@ -767,7 +765,7 @@ static int altera_pcie_probe(struct platform_device *pdev)
struct altera_pcie *pcie;
struct pci_host_bridge *bridge;
int ret;
- const struct of_device_id *match;
+ const struct altera_pcie_data *data;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
if (!bridge)
@@ -777,11 +775,11 @@ static int altera_pcie_probe(struct platform_device *pdev)
pcie->pdev = pdev;
platform_set_drvdata(pdev, pcie);
- match = of_match_device(altera_pcie_of_match, &pdev->dev);
- if (!match)
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
return -ENODEV;
- pcie->pcie_data = match->data;
+ pcie->pcie_data = data;
ret = altera_pcie_parse_dt(pcie);
if (ret) {
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index b090924b41fe..854d95163112 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -42,8 +42,9 @@
#define CORE_FABRIC_STAT_MASK 0x001F001F
#define CORE_LANE_CFG(port) (0x84000 + 0x4000 * (port))
#define CORE_LANE_CFG_REFCLK0REQ BIT(0)
-#define CORE_LANE_CFG_REFCLK1 BIT(1)
+#define CORE_LANE_CFG_REFCLK1REQ BIT(1)
#define CORE_LANE_CFG_REFCLK0ACK BIT(2)
+#define CORE_LANE_CFG_REFCLK1ACK BIT(3)
#define CORE_LANE_CFG_REFCLKEN (BIT(9) | BIT(10))
#define CORE_LANE_CTL(port) (0x84004 + 0x4000 * (port))
#define CORE_LANE_CTL_CFGACC BIT(15)
@@ -482,9 +483,9 @@ static int apple_pcie_setup_refclk(struct apple_pcie *pcie,
if (res < 0)
return res;
- rmw_set(CORE_LANE_CFG_REFCLK1, pcie->base + CORE_LANE_CFG(port->idx));
+ rmw_set(CORE_LANE_CFG_REFCLK1REQ, pcie->base + CORE_LANE_CFG(port->idx));
res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
- stat, stat & CORE_LANE_CFG_REFCLK1,
+ stat, stat & CORE_LANE_CFG_REFCLK1ACK,
100, 50000);
if (res < 0)
@@ -563,6 +564,9 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
return ret;
}
+ rmw_clear(PORT_REFCLK_CGDIS, port->base + PORT_REFCLK);
+ rmw_clear(PORT_APPCLK_CGDIS, port->base + PORT_APPCLK);
+
ret = apple_pcie_port_setup_irq(port);
if (ret)
return ret;
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index 1fc7bd49a7ad..375c0c40bbf8 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -24,6 +24,7 @@
#include <linux/pci.h>
#include <linux/pci-ecam.h>
#include <linux/printk.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -118,6 +119,7 @@
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG 0x4204
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK 0x2
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000
+#define PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x00800000
#define PCIE_INTR2_CPU_BASE 0x4300
@@ -144,6 +146,9 @@
#define BRCM_INT_PCI_MSI_NR 32
#define BRCM_INT_PCI_MSI_LEGACY_NR 8
#define BRCM_INT_PCI_MSI_SHIFT 0
+#define BRCM_INT_PCI_MSI_MASK GENMASK(BRCM_INT_PCI_MSI_NR - 1, 0)
+#define BRCM_INT_PCI_MSI_LEGACY_MASK GENMASK(31, \
+ 32 - BRCM_INT_PCI_MSI_LEGACY_NR)
/* MSI target addresses */
#define BRCM_MSI_TARGET_ADDR_LT_4GB 0x0fffffffcULL
@@ -191,6 +196,8 @@ static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie,
static inline void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val);
static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val);
static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val);
+static int brcm_pcie_linkup(struct brcm_pcie *pcie);
+static int brcm_pcie_add_bus(struct pci_bus *bus);
enum {
RGR1_SW_INIT_1,
@@ -205,6 +212,8 @@ enum {
enum pcie_type {
GENERIC,
+ BCM7425,
+ BCM7435,
BCM4908,
BCM7278,
BCM2711,
@@ -223,6 +232,12 @@ static const int pcie_offsets[] = {
[EXT_CFG_DATA] = 0x9004,
};
+static const int pcie_offsets_bmips_7425[] = {
+ [RGR1_SW_INIT_1] = 0x8010,
+ [EXT_CFG_INDEX] = 0x8300,
+ [EXT_CFG_DATA] = 0x8304,
+};
+
static const struct pcie_cfg_data generic_cfg = {
.offsets = pcie_offsets,
.type = GENERIC,
@@ -230,6 +245,20 @@ static const struct pcie_cfg_data generic_cfg = {
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
};
+static const struct pcie_cfg_data bcm7425_cfg = {
+ .offsets = pcie_offsets_bmips_7425,
+ .type = BCM7425,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const struct pcie_cfg_data bcm7435_cfg = {
+ .offsets = pcie_offsets,
+ .type = BCM7435,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
static const struct pcie_cfg_data bcm4908_cfg = {
.offsets = pcie_offsets,
.type = BCM4908,
@@ -257,6 +286,14 @@ static const struct pcie_cfg_data bcm2711_cfg = {
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
};
+struct subdev_regulators {
+ unsigned int num_supplies;
+ struct regulator_bulk_data supplies[];
+};
+
+static int pci_subdev_regulators_add_bus(struct pci_bus *bus);
+static void pci_subdev_regulators_remove_bus(struct pci_bus *bus);
+
struct brcm_msi {
struct device *dev;
void __iomem *base;
@@ -266,8 +303,7 @@ struct brcm_msi {
struct mutex lock; /* guards the alloc/free operations */
u64 target_addr;
int irq;
- /* used indicates which MSI interrupts have been alloc'd */
- unsigned long used;
+ DECLARE_BITMAP(used, BRCM_INT_PCI_MSI_NR);
bool legacy;
/* Some chips have MSIs in bits [31..24] of a shared register. */
int legacy_shift;
@@ -295,8 +331,16 @@ struct brcm_pcie {
u32 hw_rev;
void (*perst_set)(struct brcm_pcie *pcie, u32 val);
void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+ bool refusal_mode;
+ struct subdev_regulators *sr;
+ bool ep_wakeup_capable;
};
+static inline bool is_bmips(const struct brcm_pcie *pcie)
+{
+ return pcie->type == BCM7435 || pcie->type == BCM7425;
+}
+
/*
* This is to convert the size of the inbound "BAR" region to the
* non-linear values of PCIE_X_MISC_RC_BAR[123]_CONFIG_LO.SIZE
@@ -406,6 +450,99 @@ static int brcm_pcie_set_ssc(struct brcm_pcie *pcie)
return ssc && pll ? 0 : -EIO;
}
+static void *alloc_subdev_regulators(struct device *dev)
+{
+ static const char * const supplies[] = {
+ "vpcie3v3",
+ "vpcie3v3aux",
+ "vpcie12v",
+ };
+ const size_t size = sizeof(struct subdev_regulators)
+ + sizeof(struct regulator_bulk_data) * ARRAY_SIZE(supplies);
+ struct subdev_regulators *sr;
+ int i;
+
+ sr = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (sr) {
+ sr->num_supplies = ARRAY_SIZE(supplies);
+ for (i = 0; i < ARRAY_SIZE(supplies); i++)
+ sr->supplies[i].supply = supplies[i];
+ }
+
+ return sr;
+}
+
+static int pci_subdev_regulators_add_bus(struct pci_bus *bus)
+{
+ struct device *dev = &bus->dev;
+ struct subdev_regulators *sr;
+ int ret;
+
+ if (!dev->of_node || !bus->parent || !pci_is_root_bus(bus->parent))
+ return 0;
+
+ if (dev->driver_data)
+ dev_err(dev, "dev.driver_data unexpectedly non-NULL\n");
+
+ sr = alloc_subdev_regulators(dev);
+ if (!sr)
+ return -ENOMEM;
+
+ dev->driver_data = sr;
+ ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies);
+ if (ret)
+ return ret;
+
+ ret = regulator_bulk_enable(sr->num_supplies, sr->supplies);
+ if (ret) {
+ dev_err(dev, "failed to enable regulators for downstream device\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int brcm_pcie_add_bus(struct pci_bus *bus)
+{
+ struct device *dev = &bus->dev;
+ struct brcm_pcie *pcie = (struct brcm_pcie *) bus->sysdata;
+ int ret;
+
+ if (!dev->of_node || !bus->parent || !pci_is_root_bus(bus->parent))
+ return 0;
+
+ ret = pci_subdev_regulators_add_bus(bus);
+ if (ret)
+ return ret;
+
+ /* Grab the regulators for suspend/resume */
+ pcie->sr = bus->dev.driver_data;
+
+ /*
+ * If we have failed linkup there is no point to return an error as
+ * currently it will cause a WARNING() from pci_alloc_child_bus().
+ * We return 0 and turn on the "refusal_mode" so that any further
+ * accesses to the pci_dev just get 0xffffffff
+ */
+ if (brcm_pcie_linkup(pcie) != 0)
+ pcie->refusal_mode = true;
+
+ return 0;
+}
+
+static void pci_subdev_regulators_remove_bus(struct pci_bus *bus)
+{
+ struct device *dev = &bus->dev;
+ struct subdev_regulators *sr = dev->driver_data;
+
+ if (!sr || !bus->parent || !pci_is_root_bus(bus->parent))
+ return;
+
+ if (regulator_bulk_disable(sr->num_supplies, sr->supplies))
+ dev_err(dev, "failed to disable regulators for downstream device\n");
+ dev->driver_data = NULL;
+}
+
/* Limits operation to a specific generation (1, 2, or 3) */
static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
{
@@ -443,6 +580,9 @@ static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK);
writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win));
+ if (is_bmips(pcie))
+ return;
+
/* Write the cpu & limit addr upper bits */
high_addr_shift =
HWEIGHT32(PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK);
@@ -534,7 +674,7 @@ static int brcm_msi_alloc(struct brcm_msi *msi)
int hwirq;
mutex_lock(&msi->lock);
- hwirq = bitmap_find_free_region(&msi->used, msi->nr, 0);
+ hwirq = bitmap_find_free_region(msi->used, msi->nr, 0);
mutex_unlock(&msi->lock);
return hwirq;
@@ -543,7 +683,7 @@ static int brcm_msi_alloc(struct brcm_msi *msi)
static void brcm_msi_free(struct brcm_msi *msi, unsigned long hwirq)
{
mutex_lock(&msi->lock);
- bitmap_release_region(&msi->used, hwirq, 0);
+ bitmap_release_region(msi->used, hwirq, 0);
mutex_unlock(&msi->lock);
}
@@ -619,7 +759,8 @@ static void brcm_msi_remove(struct brcm_pcie *pcie)
static void brcm_msi_set_regs(struct brcm_msi *msi)
{
- u32 val = __GENMASK(31, msi->legacy_shift);
+ u32 val = msi->legacy ? BRCM_INT_PCI_MSI_LEGACY_MASK :
+ BRCM_INT_PCI_MSI_MASK;
writel(val, msi->intr_base + MSI_INT_MASK_CLR);
writel(val, msi->intr_base + MSI_INT_CLR);
@@ -661,6 +802,12 @@ static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
msi->irq = irq;
msi->legacy = pcie->hw_rev < BRCM_PCIE_HW_REV_33;
+ /*
+ * Sanity check to make sure that the 'used' bitmap in struct brcm_msi
+ * is large enough.
+ */
+ BUILD_BUG_ON(BRCM_INT_PCI_MSI_LEGACY_NR > BRCM_INT_PCI_MSI_NR);
+
if (msi->legacy) {
msi->intr_base = msi->base + PCIE_INTR2_CPU_BASE;
msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR;
@@ -711,6 +858,18 @@ static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn,
/* Accesses to the RC go right to the RC registers if slot==0 */
if (pci_is_root_bus(bus))
return PCI_SLOT(devfn) ? NULL : base + where;
+ if (pcie->refusal_mode) {
+ /*
+ * At this point we do not have link. There will be a CPU
+ * abort -- a quirk with this controller --if Linux tries
+ * to read any config-space registers besides those
+ * targeting the host bridge. To prevent this we hijack
+ * the address to point to a safe access that will return
+ * 0xffffffff.
+ */
+ writel(0xffffffff, base + PCIE_MISC_RC_BAR2_CONFIG_HI);
+ return base + PCIE_MISC_RC_BAR2_CONFIG_HI + (where & 0x3);
+ }
/* For devices, write to the config space index register */
idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);
@@ -718,10 +877,35 @@ static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn,
return base + PCIE_EXT_CFG_DATA + where;
}
+static void __iomem *brcm_pcie_map_conf32(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct brcm_pcie *pcie = bus->sysdata;
+ void __iomem *base = pcie->base;
+ int idx;
+
+ /* Accesses to the RC go right to the RC registers if slot==0 */
+ if (pci_is_root_bus(bus))
+ return PCI_SLOT(devfn) ? NULL : base + (where & ~0x3);
+
+ /* For devices, write to the config space index register */
+ idx = PCIE_ECAM_OFFSET(bus->number, devfn, (where & ~3));
+ writel(idx, base + IDX_ADDR(pcie));
+ return base + DATA_ADDR(pcie);
+}
+
static struct pci_ops brcm_pcie_ops = {
.map_bus = brcm_pcie_map_conf,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
+ .add_bus = brcm_pcie_add_bus,
+ .remove_bus = pci_subdev_regulators_remove_bus,
+};
+
+static struct pci_ops brcm_pcie_ops32 = {
+ .map_bus = brcm_pcie_map_conf32,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
};
static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
@@ -863,16 +1047,9 @@ static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
static int brcm_pcie_setup(struct brcm_pcie *pcie)
{
- struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
u64 rc_bar2_offset, rc_bar2_size;
void __iomem *base = pcie->base;
- struct device *dev = pcie->dev;
- struct resource_entry *entry;
- bool ssc_good = false;
- struct resource *res;
- int num_out_wins = 0;
- u16 nlw, cls, lnksta;
- int i, ret, memc;
+ int ret, memc;
u32 tmp, burst, aspm_support;
/* Reset the bridge */
@@ -883,7 +1060,10 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
pcie->bridge_sw_init_set(pcie, 0);
tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
- tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
+ if (is_bmips(pcie))
+ tmp &= ~PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
+ else
+ tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
/* Wait for SerDes to be stable */
usleep_range(100, 200);
@@ -893,8 +1073,10 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
* is encoded as 0=128, 1=256, 2=512, 3=Rsvd, for BCM7278 it
* is encoded as 0=Rsvd, 1=128, 2=256, 3=512.
*/
- if (pcie->type == BCM2711)
- burst = 0x0; /* 128B */
+ if (is_bmips(pcie))
+ burst = 0x1; /* 256 bytes */
+ else if (pcie->type == BCM2711)
+ burst = 0x0; /* 128 bytes */
else if (pcie->type == BCM7278)
burst = 0x3; /* 512 bytes */
else
@@ -957,6 +1139,40 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
if (pcie->gen)
brcm_pcie_set_gen(pcie, pcie->gen);
+ /* Don't advertise L0s capability if 'aspm-no-l0s' */
+ aspm_support = PCIE_LINK_STATE_L1;
+ if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
+ aspm_support |= PCIE_LINK_STATE_L0S;
+ tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ u32p_replace_bits(&tmp, aspm_support,
+ PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+
+ /*
+ * For config space accesses on the RC, show the right class for
+ * a PCIe-PCIe bridge (the default setting is to be EP mode).
+ */
+ tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+ u32p_replace_bits(&tmp, 0x060400,
+ PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+
+ return 0;
+}
+
+static int brcm_pcie_linkup(struct brcm_pcie *pcie)
+{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ struct device *dev = pcie->dev;
+ void __iomem *base = pcie->base;
+ struct resource_entry *entry;
+ struct resource *res;
+ int num_out_wins = 0;
+ u16 nlw, cls, lnksta;
+ bool ssc_good = false;
+ u32 tmp;
+ int ret, i;
+
/* Unassert the fundamental reset */
pcie->perst_set(pcie, 0);
@@ -988,30 +1204,25 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
return -EINVAL;
}
+ if (is_bmips(pcie)) {
+ u64 start = res->start;
+ unsigned int j, nwins = resource_size(res) / SZ_128M;
+
+ /* bmips PCIe outbound windows have a 128MB max size */
+ if (nwins > BRCM_NUM_PCIE_OUT_WINS)
+ nwins = BRCM_NUM_PCIE_OUT_WINS;
+ for (j = 0; j < nwins; j++, start += SZ_128M)
+ brcm_pcie_set_outbound_win(pcie, j, start,
+ start - entry->offset,
+ SZ_128M);
+ break;
+ }
brcm_pcie_set_outbound_win(pcie, num_out_wins, res->start,
res->start - entry->offset,
resource_size(res));
num_out_wins++;
}
- /* Don't advertise L0s capability if 'aspm-no-l0s' */
- aspm_support = PCIE_LINK_STATE_L1;
- if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
- aspm_support |= PCIE_LINK_STATE_L0S;
- tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
- u32p_replace_bits(&tmp, aspm_support,
- PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
- writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
-
- /*
- * For config space accesses on the RC, show the right class for
- * a PCIe-PCIe bridge (the default setting is to be EP mode).
- */
- tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
- u32p_replace_bits(&tmp, 0x060400,
- PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
- writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
-
if (pcie->ssc) {
ret = brcm_pcie_set_ssc(pcie);
if (ret == 0)
@@ -1140,17 +1351,60 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
pcie->bridge_sw_init_set(pcie, 1);
}
+static int pci_dev_may_wakeup(struct pci_dev *dev, void *data)
+{
+ bool *ret = data;
+
+ if (device_may_wakeup(&dev->dev)) {
+ *ret = true;
+ dev_info(&dev->dev, "disable cancelled for wake-up device\n");
+ }
+ return (int) *ret;
+}
+
static int brcm_pcie_suspend(struct device *dev)
{
struct brcm_pcie *pcie = dev_get_drvdata(dev);
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
int ret;
brcm_pcie_turn_off(pcie);
- ret = brcm_phy_stop(pcie);
- reset_control_rearm(pcie->rescal);
+ /*
+ * If brcm_phy_stop() returns an error, just dev_err(). If we
+ * return the error it will cause the suspend to fail and this is a
+ * forgivable offense that will probably be erased on resume.
+ */
+ if (brcm_phy_stop(pcie))
+ dev_err(dev, "Could not stop phy for suspend\n");
+
+ ret = reset_control_rearm(pcie->rescal);
+ if (ret) {
+ dev_err(dev, "Could not rearm rescal reset\n");
+ return ret;
+ }
+
+ if (pcie->sr) {
+ /*
+ * Now turn off the regulators, but if at least one
+ * downstream device is enabled as a wake-up source, do not
+ * turn off regulators.
+ */
+ pcie->ep_wakeup_capable = false;
+ pci_walk_bus(bridge->bus, pci_dev_may_wakeup,
+ &pcie->ep_wakeup_capable);
+ if (!pcie->ep_wakeup_capable) {
+ ret = regulator_bulk_disable(pcie->sr->num_supplies,
+ pcie->sr->supplies);
+ if (ret) {
+ dev_err(dev, "Could not turn off regulators\n");
+ reset_control_reset(pcie->rescal);
+ return ret;
+ }
+ }
+ }
clk_disable_unprepare(pcie->clk);
- return ret;
+ return 0;
}
static int brcm_pcie_resume(struct device *dev)
@@ -1161,11 +1415,32 @@ static int brcm_pcie_resume(struct device *dev)
int ret;
base = pcie->base;
- clk_prepare_enable(pcie->clk);
+ ret = clk_prepare_enable(pcie->clk);
+ if (ret)
+ return ret;
+
+ if (pcie->sr) {
+ if (pcie->ep_wakeup_capable) {
+ /*
+ * We are resuming from a suspend. In the suspend we
+ * did not disable the power supplies, so there is
+ * no need to enable them (and falsely increase their
+ * usage count).
+ */
+ pcie->ep_wakeup_capable = false;
+ } else {
+ ret = regulator_bulk_enable(pcie->sr->num_supplies,
+ pcie->sr->supplies);
+ if (ret) {
+ dev_err(dev, "Could not turn on regulators\n");
+ goto err_disable_clk;
+ }
+ }
+ }
ret = reset_control_reset(pcie->rescal);
if (ret)
- goto err_disable_clk;
+ goto err_regulator;
ret = brcm_phy_start(pcie);
if (ret)
@@ -1186,6 +1461,10 @@ static int brcm_pcie_resume(struct device *dev)
if (ret)
goto err_reset;
+ ret = brcm_pcie_linkup(pcie);
+ if (ret)
+ goto err_reset;
+
if (pcie->msi)
brcm_msi_set_regs(pcie->msi);
@@ -1193,6 +1472,9 @@ static int brcm_pcie_resume(struct device *dev)
err_reset:
reset_control_rearm(pcie->rescal);
+err_regulator:
+ if (pcie->sr)
+ regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies);
err_disable_clk:
clk_disable_unprepare(pcie->clk);
return ret;
@@ -1202,8 +1484,10 @@ static void __brcm_pcie_remove(struct brcm_pcie *pcie)
{
brcm_msi_remove(pcie);
brcm_pcie_turn_off(pcie);
- brcm_phy_stop(pcie);
- reset_control_rearm(pcie->rescal);
+ if (brcm_phy_stop(pcie))
+ dev_err(pcie->dev, "Could not stop phy\n");
+ if (reset_control_rearm(pcie->rescal))
+ dev_err(pcie->dev, "Could not rearm rescal reset\n");
clk_disable_unprepare(pcie->clk);
}
@@ -1226,6 +1510,8 @@ static const struct of_device_id brcm_pcie_match[] = {
{ .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg },
{ .compatible = "brcm,bcm7216-pcie", .data = &bcm7278_cfg },
{ .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
+ { .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg },
+ { .compatible = "brcm,bcm7425-pcie", .data = &bcm7425_cfg },
{},
};
@@ -1315,12 +1601,22 @@ static int brcm_pcie_probe(struct platform_device *pdev)
}
}
- bridge->ops = &brcm_pcie_ops;
+ bridge->ops = pcie->type == BCM7425 ? &brcm_pcie_ops32 : &brcm_pcie_ops;
bridge->sysdata = pcie;
platform_set_drvdata(pdev, pcie);
- return pci_host_probe(bridge);
+ ret = pci_host_probe(bridge);
+ if (!ret && !brcm_pcie_link_up(pcie))
+ ret = -ENODEV;
+
+ if (ret) {
+ brcm_pcie_remove(pdev);
+ return ret;
+ }
+
+ return 0;
+
fail:
__brcm_pcie_remove(pcie);
return ret;
@@ -1329,8 +1625,8 @@ fail:
MODULE_DEVICE_TABLE(of, brcm_pcie_match);
static const struct dev_pm_ops brcm_pcie_pm_ops = {
- .suspend = brcm_pcie_suspend,
- .resume = brcm_pcie_resume,
+ .suspend_noirq = brcm_pcie_suspend,
+ .resume_noirq = brcm_pcie_resume,
};
static struct platform_driver brcm_pcie_driver = {
diff --git a/drivers/pci/controller/pcie-iproc-bcma.c b/drivers/pci/controller/pcie-iproc-bcma.c
index f918c713afb0..54b6e6d5bc64 100644
--- a/drivers/pci/controller/pcie-iproc-bcma.c
+++ b/drivers/pci/controller/pcie-iproc-bcma.c
@@ -23,7 +23,7 @@ static void bcma_pcie2_fixup_class(struct pci_dev *dev)
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8011, bcma_pcie2_fixup_class);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8012, bcma_pcie2_fixup_class);
-static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int iproc_bcma_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct iproc_pcie *pcie = dev->sysdata;
struct bcma_device *bdev = container_of(pcie->dev, struct bcma_device, dev);
@@ -31,7 +31,7 @@ static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return bcma_core_irq(bdev, 5);
}
-static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
+static int iproc_bcma_pcie_probe(struct bcma_device *bdev)
{
struct device *dev = &bdev->dev;
struct iproc_pcie *pcie;
@@ -64,33 +64,33 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
if (ret)
return ret;
- pcie->map_irq = iproc_pcie_bcma_map_irq;
+ pcie->map_irq = iproc_bcma_pcie_map_irq;
bcma_set_drvdata(bdev, pcie);
return iproc_pcie_setup(pcie, &bridge->windows);
}
-static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
+static void iproc_bcma_pcie_remove(struct bcma_device *bdev)
{
struct iproc_pcie *pcie = bcma_get_drvdata(bdev);
iproc_pcie_remove(pcie);
}
-static const struct bcma_device_id iproc_pcie_bcma_table[] = {
+static const struct bcma_device_id iproc_bcma_pcie_table[] = {
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_PCIEG2, BCMA_ANY_REV, BCMA_ANY_CLASS),
{},
};
-MODULE_DEVICE_TABLE(bcma, iproc_pcie_bcma_table);
+MODULE_DEVICE_TABLE(bcma, iproc_bcma_pcie_table);
-static struct bcma_driver iproc_pcie_bcma_driver = {
+static struct bcma_driver iproc_bcma_pcie_driver = {
.name = KBUILD_MODNAME,
- .id_table = iproc_pcie_bcma_table,
- .probe = iproc_pcie_bcma_probe,
- .remove = iproc_pcie_bcma_remove,
+ .id_table = iproc_bcma_pcie_table,
+ .probe = iproc_bcma_pcie_probe,
+ .remove = iproc_bcma_pcie_remove,
};
-module_bcma_driver(iproc_pcie_bcma_driver);
+module_bcma_driver(iproc_bcma_pcie_driver);
MODULE_AUTHOR("Hauke Mehrtens");
MODULE_DESCRIPTION("Broadcom iProc PCIe BCMA driver");
diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c
index b93e7bda101b..538115246c79 100644
--- a/drivers/pci/controller/pcie-iproc-platform.c
+++ b/drivers/pci/controller/pcie-iproc-platform.c
@@ -37,7 +37,7 @@ static const struct of_device_id iproc_pcie_of_match_table[] = {
};
MODULE_DEVICE_TABLE(of, iproc_pcie_of_match_table);
-static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
+static int iproc_pltfm_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct iproc_pcie *pcie;
@@ -115,30 +115,30 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
return 0;
}
-static int iproc_pcie_pltfm_remove(struct platform_device *pdev)
+static int iproc_pltfm_pcie_remove(struct platform_device *pdev)
{
struct iproc_pcie *pcie = platform_get_drvdata(pdev);
return iproc_pcie_remove(pcie);
}
-static void iproc_pcie_pltfm_shutdown(struct platform_device *pdev)
+static void iproc_pltfm_pcie_shutdown(struct platform_device *pdev)
{
struct iproc_pcie *pcie = platform_get_drvdata(pdev);
iproc_pcie_shutdown(pcie);
}
-static struct platform_driver iproc_pcie_pltfm_driver = {
+static struct platform_driver iproc_pltfm_pcie_driver = {
.driver = {
.name = "iproc-pcie",
.of_match_table = of_match_ptr(iproc_pcie_of_match_table),
},
- .probe = iproc_pcie_pltfm_probe,
- .remove = iproc_pcie_pltfm_remove,
- .shutdown = iproc_pcie_pltfm_shutdown,
+ .probe = iproc_pltfm_pcie_probe,
+ .remove = iproc_pltfm_pcie_remove,
+ .shutdown = iproc_pltfm_pcie_shutdown,
};
-module_platform_driver(iproc_pcie_pltfm_driver);
+module_platform_driver(iproc_pltfm_pcie_driver);
MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
MODULE_DESCRIPTION("Broadcom iPROC PCIe platform driver");
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 36b9d2c46cfa..b3e75bc61ff1 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -659,10 +659,8 @@ static int iproc_pci_raw_config_read32(struct iproc_pcie *pcie,
void __iomem *addr;
addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3);
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
*val = readl(addr);
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index 17c59b0d6978..7705d61fba4c 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -79,6 +79,9 @@
#define PCIE_ICMD_PM_REG 0x198
#define PCIE_TURN_OFF_LINK BIT(4)
+#define PCIE_MISC_CTRL_REG 0x348
+#define PCIE_DISABLE_DVFSRC_VLT_REQ BIT(1)
+
#define PCIE_TRANS_TABLE_BASE_REG 0x800
#define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4
#define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8
@@ -110,7 +113,7 @@ struct mtk_msi_set {
};
/**
- * struct mtk_pcie_port - PCIe port information
+ * struct mtk_gen3_pcie - PCIe port information
* @dev: pointer to PCIe device
* @base: IO mapped register base
* @reg_base: physical register base
@@ -129,7 +132,7 @@ struct mtk_msi_set {
* @lock: lock protecting IRQ bit map
* @msi_irq_in_use: bit map for assigned MSI IRQ
*/
-struct mtk_pcie_port {
+struct mtk_gen3_pcie {
struct device *dev;
void __iomem *base;
phys_addr_t reg_base;
@@ -162,7 +165,7 @@ struct mtk_pcie_port {
static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
int where, int size)
{
- struct mtk_pcie_port *port = bus->sysdata;
+ struct mtk_gen3_pcie *pcie = bus->sysdata;
int bytes;
u32 val;
@@ -171,15 +174,15 @@ static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) |
PCIE_CFG_HEADER(bus->number, devfn);
- writel_relaxed(val, port->base + PCIE_CFGNUM_REG);
+ writel_relaxed(val, pcie->base + PCIE_CFGNUM_REG);
}
static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
int where)
{
- struct mtk_pcie_port *port = bus->sysdata;
+ struct mtk_gen3_pcie *pcie = bus->sysdata;
- return port->base + PCIE_CFG_OFFSET_ADDR + where;
+ return pcie->base + PCIE_CFG_OFFSET_ADDR + where;
}
static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
@@ -207,7 +210,7 @@ static struct pci_ops mtk_pcie_ops = {
.write = mtk_pcie_config_write,
};
-static int mtk_pcie_set_trans_table(struct mtk_pcie_port *port,
+static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,
resource_size_t cpu_addr,
resource_size_t pci_addr,
resource_size_t size,
@@ -217,12 +220,12 @@ static int mtk_pcie_set_trans_table(struct mtk_pcie_port *port,
u32 val;
if (num >= PCIE_MAX_TRANS_TABLES) {
- dev_err(port->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
+ dev_err(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
(unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
return -ENODEV;
}
- table = port->base + PCIE_TRANS_TABLE_BASE_REG +
+ table = pcie->base + PCIE_TRANS_TABLE_BASE_REG +
num * PCIE_ATR_TLB_SET_OFFSET;
writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1),
@@ -244,66 +247,71 @@ static int mtk_pcie_set_trans_table(struct mtk_pcie_port *port,
return 0;
}
-static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
+static void mtk_pcie_enable_msi(struct mtk_gen3_pcie *pcie)
{
int i;
u32 val;
for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
- struct mtk_msi_set *msi_set = &port->msi_sets[i];
+ struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
- msi_set->base = port->base + PCIE_MSI_SET_BASE_REG +
+ msi_set->base = pcie->base + PCIE_MSI_SET_BASE_REG +
i * PCIE_MSI_SET_OFFSET;
- msi_set->msg_addr = port->reg_base + PCIE_MSI_SET_BASE_REG +
+ msi_set->msg_addr = pcie->reg_base + PCIE_MSI_SET_BASE_REG +
i * PCIE_MSI_SET_OFFSET;
/* Configure the MSI capture address */
writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base);
writel_relaxed(upper_32_bits(msi_set->msg_addr),
- port->base + PCIE_MSI_SET_ADDR_HI_BASE +
+ pcie->base + PCIE_MSI_SET_ADDR_HI_BASE +
i * PCIE_MSI_SET_ADDR_HI_OFFSET);
}
- val = readl_relaxed(port->base + PCIE_MSI_SET_ENABLE_REG);
+ val = readl_relaxed(pcie->base + PCIE_MSI_SET_ENABLE_REG);
val |= PCIE_MSI_SET_ENABLE;
- writel_relaxed(val, port->base + PCIE_MSI_SET_ENABLE_REG);
+ writel_relaxed(val, pcie->base + PCIE_MSI_SET_ENABLE_REG);
- val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
+ val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
val |= PCIE_MSI_ENABLE;
- writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
+ writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
}
-static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
+static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
{
struct resource_entry *entry;
- struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
unsigned int table_index = 0;
int err;
u32 val;
/* Set as RC mode */
- val = readl_relaxed(port->base + PCIE_SETTING_REG);
+ val = readl_relaxed(pcie->base + PCIE_SETTING_REG);
val |= PCIE_RC_MODE;
- writel_relaxed(val, port->base + PCIE_SETTING_REG);
+ writel_relaxed(val, pcie->base + PCIE_SETTING_REG);
/* Set class code */
- val = readl_relaxed(port->base + PCIE_PCI_IDS_1);
+ val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1);
val &= ~GENMASK(31, 8);
val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI << 8);
- writel_relaxed(val, port->base + PCIE_PCI_IDS_1);
+ writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1);
/* Mask all INTx interrupts */
- val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
+ val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
val &= ~PCIE_INTX_ENABLE;
- writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
+ writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
+
+ /* Disable DVFSRC voltage request */
+ val = readl_relaxed(pcie->base + PCIE_MISC_CTRL_REG);
+ val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
+ writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
/* Assert all reset signals */
- val = readl_relaxed(port->base + PCIE_RST_CTRL_REG);
+ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
- writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
/*
- * Described in PCIe CEM specification setctions 2.2 (PERST# Signal)
+ * Described in PCIe CEM specification sections 2.2 (PERST# Signal)
* and 2.2.1 (Initial Power-Up (G3 to S0)).
* The deassertion of PERST# should be delayed 100ms (TPVPERL)
* for the power and clock to become stable.
@@ -312,19 +320,19 @@ static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
/* De-assert reset signals */
val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
- writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
/* Check if the link is up or not */
- err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_REG, val,
+ err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
!!(val & PCIE_PORT_LINKUP), 20,
PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
if (err) {
- val = readl_relaxed(port->base + PCIE_LTSSM_STATUS_REG);
- dev_err(port->dev, "PCIe link down, ltssm reg val: %#x\n", val);
+ val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG);
+ dev_err(pcie->dev, "PCIe link down, ltssm reg val: %#x\n", val);
return err;
}
- mtk_pcie_enable_msi(port);
+ mtk_pcie_enable_msi(pcie);
/* Set PCIe translation windows */
resource_list_for_each_entry(entry, &host->windows) {
@@ -347,12 +355,12 @@ static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
pci_addr = res->start - entry->offset;
size = resource_size(res);
- err = mtk_pcie_set_trans_table(port, cpu_addr, pci_addr, size,
+ err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size,
type, table_index);
if (err)
return err;
- dev_dbg(port->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
+ dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
range_type, table_index, (unsigned long long)cpu_addr,
(unsigned long long)pci_addr, (unsigned long long)size);
@@ -396,7 +404,7 @@ static struct msi_domain_info mtk_msi_domain_info = {
static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
- struct mtk_pcie_port *port = data->domain->host_data;
+ struct mtk_gen3_pcie *pcie = data->domain->host_data;
unsigned long hwirq;
hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
@@ -404,7 +412,7 @@ static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->address_hi = upper_32_bits(msi_set->msg_addr);
msg->address_lo = lower_32_bits(msi_set->msg_addr);
msg->data = hwirq;
- dev_dbg(port->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
+ dev_dbg(pcie->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
hwirq, msg->address_hi, msg->address_lo, msg->data);
}
@@ -421,33 +429,33 @@ static void mtk_msi_bottom_irq_ack(struct irq_data *data)
static void mtk_msi_bottom_irq_mask(struct irq_data *data)
{
struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
- struct mtk_pcie_port *port = data->domain->host_data;
+ struct mtk_gen3_pcie *pcie = data->domain->host_data;
unsigned long hwirq, flags;
u32 val;
hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
- raw_spin_lock_irqsave(&port->irq_lock, flags);
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
val &= ~BIT(hwirq);
writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
- raw_spin_unlock_irqrestore(&port->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
}
static void mtk_msi_bottom_irq_unmask(struct irq_data *data)
{
struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
- struct mtk_pcie_port *port = data->domain->host_data;
+ struct mtk_gen3_pcie *pcie = data->domain->host_data;
unsigned long hwirq, flags;
u32 val;
hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
- raw_spin_lock_irqsave(&port->irq_lock, flags);
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
val |= BIT(hwirq);
writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
- raw_spin_unlock_irqrestore(&port->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
}
static struct irq_chip mtk_msi_bottom_irq_chip = {
@@ -463,22 +471,22 @@ static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs,
void *arg)
{
- struct mtk_pcie_port *port = domain->host_data;
+ struct mtk_gen3_pcie *pcie = domain->host_data;
struct mtk_msi_set *msi_set;
int i, hwirq, set_idx;
- mutex_lock(&port->lock);
+ mutex_lock(&pcie->lock);
- hwirq = bitmap_find_free_region(port->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
+ hwirq = bitmap_find_free_region(pcie->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
order_base_2(nr_irqs));
- mutex_unlock(&port->lock);
+ mutex_unlock(&pcie->lock);
if (hwirq < 0)
return -ENOSPC;
set_idx = hwirq / PCIE_MSI_IRQS_PER_SET;
- msi_set = &port->msi_sets[set_idx];
+ msi_set = &pcie->msi_sets[set_idx];
for (i = 0; i < nr_irqs; i++)
irq_domain_set_info(domain, virq + i, hwirq + i,
@@ -491,15 +499,15 @@ static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain,
static void mtk_msi_bottom_domain_free(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
- struct mtk_pcie_port *port = domain->host_data;
+ struct mtk_gen3_pcie *pcie = domain->host_data;
struct irq_data *data = irq_domain_get_irq_data(domain, virq);
- mutex_lock(&port->lock);
+ mutex_lock(&pcie->lock);
- bitmap_release_region(port->msi_irq_in_use, data->hwirq,
+ bitmap_release_region(pcie->msi_irq_in_use, data->hwirq,
order_base_2(nr_irqs));
- mutex_unlock(&port->lock);
+ mutex_unlock(&pcie->lock);
irq_domain_free_irqs_common(domain, virq, nr_irqs);
}
@@ -511,28 +519,28 @@ static const struct irq_domain_ops mtk_msi_bottom_domain_ops = {
static void mtk_intx_mask(struct irq_data *data)
{
- struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+ struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&port->irq_lock, flags);
- val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
+ val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT);
- writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
- raw_spin_unlock_irqrestore(&port->irq_lock, flags);
+ writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
}
static void mtk_intx_unmask(struct irq_data *data)
{
- struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+ struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&port->irq_lock, flags);
- val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
+ val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
- writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
- raw_spin_unlock_irqrestore(&port->irq_lock, flags);
+ writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
}
/**
@@ -545,11 +553,11 @@ static void mtk_intx_unmask(struct irq_data *data)
*/
static void mtk_intx_eoi(struct irq_data *data)
{
- struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+ struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
unsigned long hwirq;
hwirq = data->hwirq + PCIE_INTX_SHIFT;
- writel_relaxed(BIT(hwirq), port->base + PCIE_INT_STATUS_REG);
+ writel_relaxed(BIT(hwirq), pcie->base + PCIE_INT_STATUS_REG);
}
static struct irq_chip mtk_intx_irq_chip = {
@@ -573,13 +581,13 @@ static const struct irq_domain_ops intx_domain_ops = {
.map = mtk_pcie_intx_map,
};
-static int mtk_pcie_init_irq_domains(struct mtk_pcie_port *port)
+static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
{
- struct device *dev = port->dev;
+ struct device *dev = pcie->dev;
struct device_node *intc_node, *node = dev->of_node;
int ret;
- raw_spin_lock_init(&port->irq_lock);
+ raw_spin_lock_init(&pcie->irq_lock);
/* Setup INTx */
intc_node = of_get_child_by_name(node, "interrupt-controller");
@@ -588,28 +596,28 @@ static int mtk_pcie_init_irq_domains(struct mtk_pcie_port *port)
return -ENODEV;
}
- port->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
- &intx_domain_ops, port);
- if (!port->intx_domain) {
+ pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
+ if (!pcie->intx_domain) {
dev_err(dev, "failed to create INTx IRQ domain\n");
return -ENODEV;
}
/* Setup MSI */
- mutex_init(&port->lock);
+ mutex_init(&pcie->lock);
- port->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
- &mtk_msi_bottom_domain_ops, port);
- if (!port->msi_bottom_domain) {
+ pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
+ &mtk_msi_bottom_domain_ops, pcie);
+ if (!pcie->msi_bottom_domain) {
dev_err(dev, "failed to create MSI bottom domain\n");
ret = -ENODEV;
goto err_msi_bottom_domain;
}
- port->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
+ pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
&mtk_msi_domain_info,
- port->msi_bottom_domain);
- if (!port->msi_domain) {
+ pcie->msi_bottom_domain);
+ if (!pcie->msi_domain) {
dev_err(dev, "failed to create MSI domain\n");
ret = -ENODEV;
goto err_msi_domain;
@@ -618,32 +626,32 @@ static int mtk_pcie_init_irq_domains(struct mtk_pcie_port *port)
return 0;
err_msi_domain:
- irq_domain_remove(port->msi_bottom_domain);
+ irq_domain_remove(pcie->msi_bottom_domain);
err_msi_bottom_domain:
- irq_domain_remove(port->intx_domain);
+ irq_domain_remove(pcie->intx_domain);
return ret;
}
-static void mtk_pcie_irq_teardown(struct mtk_pcie_port *port)
+static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie)
{
- irq_set_chained_handler_and_data(port->irq, NULL, NULL);
+ irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
- if (port->intx_domain)
- irq_domain_remove(port->intx_domain);
+ if (pcie->intx_domain)
+ irq_domain_remove(pcie->intx_domain);
- if (port->msi_domain)
- irq_domain_remove(port->msi_domain);
+ if (pcie->msi_domain)
+ irq_domain_remove(pcie->msi_domain);
- if (port->msi_bottom_domain)
- irq_domain_remove(port->msi_bottom_domain);
+ if (pcie->msi_bottom_domain)
+ irq_domain_remove(pcie->msi_bottom_domain);
- irq_dispose_mapping(port->irq);
+ irq_dispose_mapping(pcie->irq);
}
-static void mtk_pcie_msi_handler(struct mtk_pcie_port *port, int set_idx)
+static void mtk_pcie_msi_handler(struct mtk_gen3_pcie *pcie, int set_idx)
{
- struct mtk_msi_set *msi_set = &port->msi_sets[set_idx];
+ struct mtk_msi_set *msi_set = &pcie->msi_sets[set_idx];
unsigned long msi_enable, msi_status;
irq_hw_number_t bit, hwirq;
@@ -658,59 +666,59 @@ static void mtk_pcie_msi_handler(struct mtk_pcie_port *port, int set_idx)
for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
- generic_handle_domain_irq(port->msi_bottom_domain, hwirq);
+ generic_handle_domain_irq(pcie->msi_bottom_domain, hwirq);
}
} while (true);
}
static void mtk_pcie_irq_handler(struct irq_desc *desc)
{
- struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
+ struct mtk_gen3_pcie *pcie = irq_desc_get_handler_data(desc);
struct irq_chip *irqchip = irq_desc_get_chip(desc);
unsigned long status;
irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
chained_irq_enter(irqchip, desc);
- status = readl_relaxed(port->base + PCIE_INT_STATUS_REG);
+ status = readl_relaxed(pcie->base + PCIE_INT_STATUS_REG);
for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
PCIE_INTX_SHIFT)
- generic_handle_domain_irq(port->intx_domain,
+ generic_handle_domain_irq(pcie->intx_domain,
irq_bit - PCIE_INTX_SHIFT);
irq_bit = PCIE_MSI_SHIFT;
for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
PCIE_MSI_SHIFT) {
- mtk_pcie_msi_handler(port, irq_bit - PCIE_MSI_SHIFT);
+ mtk_pcie_msi_handler(pcie, irq_bit - PCIE_MSI_SHIFT);
- writel_relaxed(BIT(irq_bit), port->base + PCIE_INT_STATUS_REG);
+ writel_relaxed(BIT(irq_bit), pcie->base + PCIE_INT_STATUS_REG);
}
chained_irq_exit(irqchip, desc);
}
-static int mtk_pcie_setup_irq(struct mtk_pcie_port *port)
+static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie)
{
- struct device *dev = port->dev;
+ struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
int err;
- err = mtk_pcie_init_irq_domains(port);
+ err = mtk_pcie_init_irq_domains(pcie);
if (err)
return err;
- port->irq = platform_get_irq(pdev, 0);
- if (port->irq < 0)
- return port->irq;
+ pcie->irq = platform_get_irq(pdev, 0);
+ if (pcie->irq < 0)
+ return pcie->irq;
- irq_set_chained_handler_and_data(port->irq, mtk_pcie_irq_handler, port);
+ irq_set_chained_handler_and_data(pcie->irq, mtk_pcie_irq_handler, pcie);
return 0;
}
-static int mtk_pcie_parse_port(struct mtk_pcie_port *port)
+static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
{
- struct device *dev = port->dev;
+ struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *regs;
int ret;
@@ -718,77 +726,77 @@ static int mtk_pcie_parse_port(struct mtk_pcie_port *port)
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
if (!regs)
return -EINVAL;
- port->base = devm_ioremap_resource(dev, regs);
- if (IS_ERR(port->base)) {
+ pcie->base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(pcie->base)) {
dev_err(dev, "failed to map register base\n");
- return PTR_ERR(port->base);
+ return PTR_ERR(pcie->base);
}
- port->reg_base = regs->start;
+ pcie->reg_base = regs->start;
- port->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
- if (IS_ERR(port->phy_reset)) {
- ret = PTR_ERR(port->phy_reset);
+ pcie->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
+ if (IS_ERR(pcie->phy_reset)) {
+ ret = PTR_ERR(pcie->phy_reset);
if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to get PHY reset\n");
return ret;
}
- port->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
- if (IS_ERR(port->mac_reset)) {
- ret = PTR_ERR(port->mac_reset);
+ pcie->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
+ if (IS_ERR(pcie->mac_reset)) {
+ ret = PTR_ERR(pcie->mac_reset);
if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to get MAC reset\n");
return ret;
}
- port->phy = devm_phy_optional_get(dev, "pcie-phy");
- if (IS_ERR(port->phy)) {
- ret = PTR_ERR(port->phy);
+ pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
+ if (IS_ERR(pcie->phy)) {
+ ret = PTR_ERR(pcie->phy);
if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to get PHY\n");
return ret;
}
- port->num_clks = devm_clk_bulk_get_all(dev, &port->clks);
- if (port->num_clks < 0) {
+ pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks);
+ if (pcie->num_clks < 0) {
dev_err(dev, "failed to get clocks\n");
- return port->num_clks;
+ return pcie->num_clks;
}
return 0;
}
-static int mtk_pcie_power_up(struct mtk_pcie_port *port)
+static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
{
- struct device *dev = port->dev;
+ struct device *dev = pcie->dev;
int err;
/* PHY power on and enable pipe clock */
- reset_control_deassert(port->phy_reset);
+ reset_control_deassert(pcie->phy_reset);
- err = phy_init(port->phy);
+ err = phy_init(pcie->phy);
if (err) {
dev_err(dev, "failed to initialize PHY\n");
goto err_phy_init;
}
- err = phy_power_on(port->phy);
+ err = phy_power_on(pcie->phy);
if (err) {
dev_err(dev, "failed to power on PHY\n");
goto err_phy_on;
}
/* MAC power on and enable transaction layer clocks */
- reset_control_deassert(port->mac_reset);
+ reset_control_deassert(pcie->mac_reset);
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
- err = clk_bulk_prepare_enable(port->num_clks, port->clks);
+ err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
if (err) {
dev_err(dev, "failed to enable clocks\n");
goto err_clk_init;
@@ -799,55 +807,55 @@ static int mtk_pcie_power_up(struct mtk_pcie_port *port)
err_clk_init:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
- reset_control_assert(port->mac_reset);
- phy_power_off(port->phy);
+ reset_control_assert(pcie->mac_reset);
+ phy_power_off(pcie->phy);
err_phy_on:
- phy_exit(port->phy);
+ phy_exit(pcie->phy);
err_phy_init:
- reset_control_assert(port->phy_reset);
+ reset_control_assert(pcie->phy_reset);
return err;
}
-static void mtk_pcie_power_down(struct mtk_pcie_port *port)
+static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie)
{
- clk_bulk_disable_unprepare(port->num_clks, port->clks);
+ clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
- pm_runtime_put_sync(port->dev);
- pm_runtime_disable(port->dev);
- reset_control_assert(port->mac_reset);
+ pm_runtime_put_sync(pcie->dev);
+ pm_runtime_disable(pcie->dev);
+ reset_control_assert(pcie->mac_reset);
- phy_power_off(port->phy);
- phy_exit(port->phy);
- reset_control_assert(port->phy_reset);
+ phy_power_off(pcie->phy);
+ phy_exit(pcie->phy);
+ reset_control_assert(pcie->phy_reset);
}
-static int mtk_pcie_setup(struct mtk_pcie_port *port)
+static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
{
int err;
- err = mtk_pcie_parse_port(port);
+ err = mtk_pcie_parse_port(pcie);
if (err)
return err;
/* Don't touch the hardware registers before power up */
- err = mtk_pcie_power_up(port);
+ err = mtk_pcie_power_up(pcie);
if (err)
return err;
/* Try link up */
- err = mtk_pcie_startup_port(port);
+ err = mtk_pcie_startup_port(pcie);
if (err)
goto err_setup;
- err = mtk_pcie_setup_irq(port);
+ err = mtk_pcie_setup_irq(pcie);
if (err)
goto err_setup;
return 0;
err_setup:
- mtk_pcie_power_down(port);
+ mtk_pcie_power_down(pcie);
return err;
}
@@ -855,30 +863,30 @@ err_setup:
static int mtk_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct mtk_pcie_port *port;
+ struct mtk_gen3_pcie *pcie;
struct pci_host_bridge *host;
int err;
- host = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+ host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
if (!host)
return -ENOMEM;
- port = pci_host_bridge_priv(host);
+ pcie = pci_host_bridge_priv(host);
- port->dev = dev;
- platform_set_drvdata(pdev, port);
+ pcie->dev = dev;
+ platform_set_drvdata(pdev, pcie);
- err = mtk_pcie_setup(port);
+ err = mtk_pcie_setup(pcie);
if (err)
return err;
host->ops = &mtk_pcie_ops;
- host->sysdata = port;
+ host->sysdata = pcie;
err = pci_host_probe(host);
if (err) {
- mtk_pcie_irq_teardown(port);
- mtk_pcie_power_down(port);
+ mtk_pcie_irq_teardown(pcie);
+ mtk_pcie_power_down(pcie);
return err;
}
@@ -887,66 +895,66 @@ static int mtk_pcie_probe(struct platform_device *pdev)
static int mtk_pcie_remove(struct platform_device *pdev)
{
- struct mtk_pcie_port *port = platform_get_drvdata(pdev);
- struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
+ struct mtk_gen3_pcie *pcie = platform_get_drvdata(pdev);
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
pci_lock_rescan_remove();
pci_stop_root_bus(host->bus);
pci_remove_root_bus(host->bus);
pci_unlock_rescan_remove();
- mtk_pcie_irq_teardown(port);
- mtk_pcie_power_down(port);
+ mtk_pcie_irq_teardown(pcie);
+ mtk_pcie_power_down(pcie);
return 0;
}
-static void __maybe_unused mtk_pcie_irq_save(struct mtk_pcie_port *port)
+static void __maybe_unused mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
{
int i;
- raw_spin_lock(&port->irq_lock);
+ raw_spin_lock(&pcie->irq_lock);
- port->saved_irq_state = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
+ pcie->saved_irq_state = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
- struct mtk_msi_set *msi_set = &port->msi_sets[i];
+ struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
msi_set->saved_irq_state = readl_relaxed(msi_set->base +
PCIE_MSI_SET_ENABLE_OFFSET);
}
- raw_spin_unlock(&port->irq_lock);
+ raw_spin_unlock(&pcie->irq_lock);
}
-static void __maybe_unused mtk_pcie_irq_restore(struct mtk_pcie_port *port)
+static void __maybe_unused mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)
{
int i;
- raw_spin_lock(&port->irq_lock);
+ raw_spin_lock(&pcie->irq_lock);
- writel_relaxed(port->saved_irq_state, port->base + PCIE_INT_ENABLE_REG);
+ writel_relaxed(pcie->saved_irq_state, pcie->base + PCIE_INT_ENABLE_REG);
for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
- struct mtk_msi_set *msi_set = &port->msi_sets[i];
+ struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
writel_relaxed(msi_set->saved_irq_state,
msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
}
- raw_spin_unlock(&port->irq_lock);
+ raw_spin_unlock(&pcie->irq_lock);
}
-static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_pcie_port *port)
+static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)
{
u32 val;
- val = readl_relaxed(port->base + PCIE_ICMD_PM_REG);
+ val = readl_relaxed(pcie->base + PCIE_ICMD_PM_REG);
val |= PCIE_TURN_OFF_LINK;
- writel_relaxed(val, port->base + PCIE_ICMD_PM_REG);
+ writel_relaxed(val, pcie->base + PCIE_ICMD_PM_REG);
/* Check the link is L2 */
- return readl_poll_timeout(port->base + PCIE_LTSSM_STATUS_REG, val,
+ return readl_poll_timeout(pcie->base + PCIE_LTSSM_STATUS_REG, val,
(PCIE_LTSSM_STATE(val) ==
PCIE_LTSSM_STATE_L2_IDLE), 20,
50 * USEC_PER_MSEC);
@@ -954,46 +962,46 @@ static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_pcie_port *port)
static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
{
- struct mtk_pcie_port *port = dev_get_drvdata(dev);
+ struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
int err;
u32 val;
/* Trigger link to L2 state */
- err = mtk_pcie_turn_off_link(port);
+ err = mtk_pcie_turn_off_link(pcie);
if (err) {
- dev_err(port->dev, "cannot enter L2 state\n");
+ dev_err(pcie->dev, "cannot enter L2 state\n");
return err;
}
/* Pull down the PERST# pin */
- val = readl_relaxed(port->base + PCIE_RST_CTRL_REG);
+ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
val |= PCIE_PE_RSTB;
- writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
- dev_dbg(port->dev, "entered L2 states successfully");
+ dev_dbg(pcie->dev, "entered L2 states successfully");
- mtk_pcie_irq_save(port);
- mtk_pcie_power_down(port);
+ mtk_pcie_irq_save(pcie);
+ mtk_pcie_power_down(pcie);
return 0;
}
static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
{
- struct mtk_pcie_port *port = dev_get_drvdata(dev);
+ struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
int err;
- err = mtk_pcie_power_up(port);
+ err = mtk_pcie_power_up(pcie);
if (err)
return err;
- err = mtk_pcie_startup_port(port);
+ err = mtk_pcie_startup_port(pcie);
if (err) {
- mtk_pcie_power_down(port);
+ mtk_pcie_power_down(pcie);
return err;
}
- mtk_pcie_irq_restore(port);
+ mtk_pcie_irq_restore(pcie);
return 0;
}
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 2f3f974977a3..ddfbd4aebdec 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -365,19 +365,12 @@ static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
{
struct mtk_pcie_port *port;
u32 bn = bus->number;
- int ret;
port = mtk_pcie_find_port(bus, devfn);
- if (!port) {
- *val = ~0;
+ if (!port)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
- ret = mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val);
- if (ret)
- *val = ~0;
- return ret;
+ return mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val);
}
static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
@@ -702,6 +695,13 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
*/
writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
+ /*
+ * Described in PCIe CEM specification sections 2.2 (PERST# Signal) and
+ * 2.2.1 (Initial Power-Up (G3 to S0)). The deassertion of PERST# should
+ * be delayed 100ms (TPVPERL) for the power and clock to become stable.
+ */
+ msleep(100);
+
/* De-assert PHY, PE, PIPE, MAC and configuration reset */
val = readl(port->base + PCIE_RST_CTRL);
val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
index 329f930d17aa..29d8e81e4181 100644
--- a/drivers/pci/controller/pcie-microchip-host.c
+++ b/drivers/pci/controller/pcie-microchip-host.c
@@ -262,7 +262,7 @@ struct mc_msi {
DECLARE_BITMAP(used, MC_NUM_MSI_IRQS);
};
-struct mc_port {
+struct mc_pcie {
void __iomem *axi_base_addr;
struct device *dev;
struct irq_domain *intx_domain;
@@ -382,7 +382,7 @@ static struct {
static char poss_clks[][5] = { "fic0", "fic1", "fic2", "fic3" };
-static void mc_pcie_enable_msi(struct mc_port *port, void __iomem *base)
+static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *base)
{
struct mc_msi *msi = &port->msi;
u32 cap_offset = MC_MSI_CAP_CTRL_OFFSET;
@@ -405,7 +405,7 @@ static void mc_pcie_enable_msi(struct mc_port *port, void __iomem *base)
static void mc_handle_msi(struct irq_desc *desc)
{
- struct mc_port *port = irq_desc_get_handler_data(desc);
+ struct mc_pcie *port = irq_desc_get_handler_data(desc);
struct device *dev = port->dev;
struct mc_msi *msi = &port->msi;
void __iomem *bridge_base_addr =
@@ -428,7 +428,7 @@ static void mc_handle_msi(struct irq_desc *desc)
static void mc_msi_bottom_irq_ack(struct irq_data *data)
{
- struct mc_port *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
u32 bitpos = data->hwirq;
@@ -443,7 +443,7 @@ static void mc_msi_bottom_irq_ack(struct irq_data *data)
static void mc_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- struct mc_port *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
phys_addr_t addr = port->msi.vector_phy;
msg->address_lo = lower_32_bits(addr);
@@ -470,7 +470,7 @@ static struct irq_chip mc_msi_bottom_irq_chip = {
static int mc_irq_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *args)
{
- struct mc_port *port = domain->host_data;
+ struct mc_pcie *port = domain->host_data;
struct mc_msi *msi = &port->msi;
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
@@ -503,7 +503,7 @@ static void mc_irq_msi_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
- struct mc_port *port = irq_data_get_irq_chip_data(d);
+ struct mc_pcie *port = irq_data_get_irq_chip_data(d);
struct mc_msi *msi = &port->msi;
mutex_lock(&msi->lock);
@@ -534,7 +534,7 @@ static struct msi_domain_info mc_msi_domain_info = {
.chip = &mc_msi_irq_chip,
};
-static int mc_allocate_msi_domains(struct mc_port *port)
+static int mc_allocate_msi_domains(struct mc_pcie *port)
{
struct device *dev = port->dev;
struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
@@ -562,7 +562,7 @@ static int mc_allocate_msi_domains(struct mc_port *port)
static void mc_handle_intx(struct irq_desc *desc)
{
- struct mc_port *port = irq_desc_get_handler_data(desc);
+ struct mc_pcie *port = irq_desc_get_handler_data(desc);
struct device *dev = port->dev;
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
@@ -585,7 +585,7 @@ static void mc_handle_intx(struct irq_desc *desc)
static void mc_ack_intx_irq(struct irq_data *data)
{
- struct mc_port *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
@@ -595,7 +595,7 @@ static void mc_ack_intx_irq(struct irq_data *data)
static void mc_mask_intx_irq(struct irq_data *data)
{
- struct mc_port *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
unsigned long flags;
@@ -611,7 +611,7 @@ static void mc_mask_intx_irq(struct irq_data *data)
static void mc_unmask_intx_irq(struct irq_data *data)
{
- struct mc_port *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
unsigned long flags;
@@ -698,7 +698,7 @@ static u32 local_events(void __iomem *addr)
return val;
}
-static u32 get_events(struct mc_port *port)
+static u32 get_events(struct mc_pcie *port)
{
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
@@ -715,7 +715,7 @@ static u32 get_events(struct mc_port *port)
static irqreturn_t mc_event_handler(int irq, void *dev_id)
{
- struct mc_port *port = dev_id;
+ struct mc_pcie *port = dev_id;
struct device *dev = port->dev;
struct irq_data *data;
@@ -731,7 +731,7 @@ static irqreturn_t mc_event_handler(int irq, void *dev_id)
static void mc_handle_event(struct irq_desc *desc)
{
- struct mc_port *port = irq_desc_get_handler_data(desc);
+ struct mc_pcie *port = irq_desc_get_handler_data(desc);
unsigned long events;
u32 bit;
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -748,7 +748,7 @@ static void mc_handle_event(struct irq_desc *desc)
static void mc_ack_event_irq(struct irq_data *data)
{
- struct mc_port *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
u32 event = data->hwirq;
void __iomem *addr;
u32 mask;
@@ -763,7 +763,7 @@ static void mc_ack_event_irq(struct irq_data *data)
static void mc_mask_event_irq(struct irq_data *data)
{
- struct mc_port *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
u32 event = data->hwirq;
void __iomem *addr;
u32 mask;
@@ -793,7 +793,7 @@ static void mc_mask_event_irq(struct irq_data *data)
static void mc_unmask_event_irq(struct irq_data *data)
{
- struct mc_port *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
u32 event = data->hwirq;
void __iomem *addr;
u32 mask;
@@ -881,7 +881,7 @@ static int mc_pcie_init_clks(struct device *dev)
return 0;
}
-static int mc_pcie_init_irq_domains(struct mc_port *port)
+static int mc_pcie_init_irq_domains(struct mc_pcie *port)
{
struct device *dev = port->dev;
struct device_node *node = dev->of_node;
@@ -957,7 +957,7 @@ static void mc_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
}
static int mc_pcie_setup_windows(struct platform_device *pdev,
- struct mc_port *port)
+ struct mc_pcie *port)
{
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
@@ -983,7 +983,7 @@ static int mc_platform_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
struct platform_device *pdev = to_platform_device(dev);
- struct mc_port *port;
+ struct mc_pcie *port;
void __iomem *bridge_base_addr;
void __iomem *ctrl_base_addr;
int ret;
diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
index b60dfb45ef7b..33eb37a2225c 100644
--- a/drivers/pci/controller/pcie-mt7621.c
+++ b/drivers/pci/controller/pcie-mt7621.c
@@ -93,8 +93,8 @@ struct mt7621_pcie_port {
* reset lines are inverted.
*/
struct mt7621_pcie {
- void __iomem *base;
struct device *dev;
+ void __iomem *base;
struct list_head ports;
bool resets_inverted;
};
@@ -109,15 +109,6 @@ static inline void pcie_write(struct mt7621_pcie *pcie, u32 val, u32 reg)
writel_relaxed(val, pcie->base + reg);
}
-static inline void pcie_rmw(struct mt7621_pcie *pcie, u32 reg, u32 clr, u32 set)
-{
- u32 val = readl_relaxed(pcie->base + reg);
-
- val &= ~clr;
- val |= set;
- writel_relaxed(val, pcie->base + reg);
-}
-
static inline u32 pcie_port_read(struct mt7621_pcie_port *port, u32 reg)
{
return readl_relaxed(port->base + reg);
@@ -129,7 +120,7 @@ static inline void pcie_port_write(struct mt7621_pcie_port *port,
writel_relaxed(val, port->base + reg);
}
-static inline u32 mt7621_pci_get_cfgaddr(unsigned int bus, unsigned int slot,
+static inline u32 mt7621_pcie_get_cfgaddr(unsigned int bus, unsigned int slot,
unsigned int func, unsigned int where)
{
return (((where & 0xf00) >> 8) << 24) | (bus << 16) | (slot << 11) |
@@ -140,7 +131,7 @@ static void __iomem *mt7621_pcie_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
struct mt7621_pcie *pcie = bus->sysdata;
- u32 address = mt7621_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
+ u32 address = mt7621_pcie_get_cfgaddr(bus->number, PCI_SLOT(devfn),
PCI_FUNC(devfn), where);
writel_relaxed(address, pcie->base + RALINK_PCI_CONFIG_ADDR);
@@ -148,7 +139,7 @@ static void __iomem *mt7621_pcie_map_bus(struct pci_bus *bus,
return pcie->base + RALINK_PCI_CONFIG_DATA + (where & 3);
}
-struct pci_ops mt7621_pci_ops = {
+static struct pci_ops mt7621_pcie_ops = {
.map_bus = mt7621_pcie_map_bus,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
@@ -156,7 +147,7 @@ struct pci_ops mt7621_pci_ops = {
static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg)
{
- u32 address = mt7621_pci_get_cfgaddr(0, dev, 0, reg);
+ u32 address = mt7621_pcie_get_cfgaddr(0, dev, 0, reg);
pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR);
return pcie_read(pcie, RALINK_PCI_CONFIG_DATA);
@@ -165,7 +156,7 @@ static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg)
static void write_config(struct mt7621_pcie *pcie, unsigned int dev,
u32 reg, u32 val)
{
- u32 address = mt7621_pci_get_cfgaddr(0, dev, 0, reg);
+ u32 address = mt7621_pcie_get_cfgaddr(0, dev, 0, reg);
pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR);
pcie_write(pcie, val, RALINK_PCI_CONFIG_DATA);
@@ -208,37 +199,6 @@ static inline void mt7621_control_deassert(struct mt7621_pcie_port *port)
reset_control_assert(port->pcie_rst);
}
-static int setup_cm_memory_region(struct pci_host_bridge *host)
-{
- struct mt7621_pcie *pcie = pci_host_bridge_priv(host);
- struct device *dev = pcie->dev;
- struct resource_entry *entry;
- resource_size_t mask;
-
- entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
- if (!entry) {
- dev_err(dev, "cannot get memory resource\n");
- return -EINVAL;
- }
-
- if (mips_cps_numiocu(0)) {
- /*
- * FIXME: hardware doesn't accept mask values with 1s after
- * 0s (e.g. 0xffef), so it would be great to warn if that's
- * about to happen
- */
- mask = ~(entry->res->end - entry->res->start);
-
- write_gcr_reg1_base(entry->res->start);
- write_gcr_reg1_mask(mask | CM_GCR_REGn_MASK_CMTGT_IOCU0);
- dev_info(dev, "PCI coherence region base: 0x%08llx, mask/settings: 0x%08llx\n",
- (unsigned long long)read_gcr_reg1_base(),
- (unsigned long long)read_gcr_reg1_mask());
- }
-
- return 0;
-}
-
static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie,
struct device_node *node,
int slot)
@@ -505,16 +465,16 @@ static int mt7621_pcie_register_host(struct pci_host_bridge *host)
{
struct mt7621_pcie *pcie = pci_host_bridge_priv(host);
- host->ops = &mt7621_pci_ops;
+ host->ops = &mt7621_pcie_ops;
host->sysdata = pcie;
return pci_host_probe(host);
}
-static const struct soc_device_attribute mt7621_pci_quirks_match[] = {
+static const struct soc_device_attribute mt7621_pcie_quirks_match[] = {
{ .soc_id = "mt7621", .revision = "E2" }
};
-static int mt7621_pci_probe(struct platform_device *pdev)
+static int mt7621_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct soc_device_attribute *attr;
@@ -535,7 +495,7 @@ static int mt7621_pci_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
INIT_LIST_HEAD(&pcie->ports);
- attr = soc_device_match(mt7621_pci_quirks_match);
+ attr = soc_device_match(mt7621_pcie_quirks_match);
if (attr)
pcie->resets_inverted = true;
@@ -557,12 +517,6 @@ static int mt7621_pci_probe(struct platform_device *pdev)
goto remove_resets;
}
- err = setup_cm_memory_region(bridge);
- if (err) {
- dev_err(dev, "error setting up iocu mem regions\n");
- goto remove_resets;
- }
-
return mt7621_pcie_register_host(bridge);
remove_resets:
@@ -572,7 +526,7 @@ remove_resets:
return err;
}
-static int mt7621_pci_remove(struct platform_device *pdev)
+static int mt7621_pcie_remove(struct platform_device *pdev)
{
struct mt7621_pcie *pcie = platform_get_drvdata(pdev);
struct mt7621_pcie_port *port;
@@ -583,18 +537,20 @@ static int mt7621_pci_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id mt7621_pci_ids[] = {
+static const struct of_device_id mt7621_pcie_ids[] = {
{ .compatible = "mediatek,mt7621-pci" },
{},
};
-MODULE_DEVICE_TABLE(of, mt7621_pci_ids);
+MODULE_DEVICE_TABLE(of, mt7621_pcie_ids);
-static struct platform_driver mt7621_pci_driver = {
- .probe = mt7621_pci_probe,
- .remove = mt7621_pci_remove,
+static struct platform_driver mt7621_pcie_driver = {
+ .probe = mt7621_pcie_probe,
+ .remove = mt7621_pcie_remove,
.driver = {
.name = "mt7621-pci",
- .of_match_table = of_match_ptr(mt7621_pci_ids),
+ .of_match_table = mt7621_pcie_ids,
},
};
-builtin_platform_driver(mt7621_pci_driver);
+builtin_platform_driver(mt7621_pcie_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index e12c2d8be05a..38b6e02edfa9 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -50,10 +50,10 @@ struct rcar_msi {
*/
static void __iomem *pcie_base;
/*
- * Static copy of bus clock pointer, so we can check whether the clock
- * is enabled or not.
+ * Static copy of PCIe device pointer, so we can check whether the
+ * device is runtime suspended or not.
*/
-static struct clk *pcie_bus_clk;
+static struct device *pcie_dev;
#endif
/* Structure representing the PCIe interface */
@@ -159,10 +159,8 @@ static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
bus, devfn, where, val);
- if (ret != PCIBIOS_SUCCESSFUL) {
- *val = 0xffffffff;
+ if (ret != PCIBIOS_SUCCESSFUL)
return ret;
- }
if (size == 1)
*val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff;
@@ -792,7 +790,7 @@ static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
#ifdef CONFIG_ARM
/* Cache static copy for L1 link state fixup hook on aarch32 */
pcie_base = pcie->base;
- pcie_bus_clk = host->bus_clk;
+ pcie_dev = pcie->dev;
#endif
return 0;
@@ -1062,7 +1060,7 @@ static int rcar_pcie_aarch32_abort_handler(unsigned long addr,
spin_lock_irqsave(&pmsr_lock, flags);
- if (!pcie_base || !__clk_is_enabled(pcie_bus_clk)) {
+ if (!pcie_base || pm_runtime_suspended(pcie_dev)) {
ret = 1;
goto unlock_exit;
}
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index c52316d0bfd2..45a28880f322 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -221,10 +221,8 @@ static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
{
struct rockchip_pcie *rockchip = bus->sysdata;
- if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) {
- *val = 0xffffffff;
+ if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn)))
return PCIBIOS_DEVICE_NOT_FOUND;
- }
if (pci_is_root_bus(bus))
return rockchip_pcie_rd_own_conf(rockchip, where, size, val);
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index 95426df03200..c7cd44ed4dfc 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -99,10 +99,10 @@
#define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11)
/**
- * struct xilinx_cpm_pcie_port - PCIe port information
+ * struct xilinx_cpm_pcie - PCIe port information
+ * @dev: Device pointer
* @reg_base: Bridge Register Base
* @cpm_base: CPM System Level Control and Status Register(SLCR) Base
- * @dev: Device pointer
* @intx_domain: Legacy IRQ domain pointer
* @cpm_domain: CPM IRQ domain pointer
* @cfg: Holds mappings of config space window
@@ -110,10 +110,10 @@
* @irq: Error interrupt number
* @lock: lock protecting shared register access
*/
-struct xilinx_cpm_pcie_port {
+struct xilinx_cpm_pcie {
+ struct device *dev;
void __iomem *reg_base;
void __iomem *cpm_base;
- struct device *dev;
struct irq_domain *intx_domain;
struct irq_domain *cpm_domain;
struct pci_config_window *cfg;
@@ -122,24 +122,24 @@ struct xilinx_cpm_pcie_port {
raw_spinlock_t lock;
};
-static u32 pcie_read(struct xilinx_cpm_pcie_port *port, u32 reg)
+static u32 pcie_read(struct xilinx_cpm_pcie *port, u32 reg)
{
return readl_relaxed(port->reg_base + reg);
}
-static void pcie_write(struct xilinx_cpm_pcie_port *port,
+static void pcie_write(struct xilinx_cpm_pcie *port,
u32 val, u32 reg)
{
writel_relaxed(val, port->reg_base + reg);
}
-static bool cpm_pcie_link_up(struct xilinx_cpm_pcie_port *port)
+static bool cpm_pcie_link_up(struct xilinx_cpm_pcie *port)
{
return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) &
XILINX_CPM_PCIE_REG_PSCR_LNKUP);
}
-static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie_port *port)
+static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie *port)
{
unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR);
@@ -153,7 +153,7 @@ static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie_port *port)
static void xilinx_cpm_mask_leg_irq(struct irq_data *data)
{
- struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(data);
+ struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(data);
unsigned long flags;
u32 mask;
u32 val;
@@ -167,7 +167,7 @@ static void xilinx_cpm_mask_leg_irq(struct irq_data *data)
static void xilinx_cpm_unmask_leg_irq(struct irq_data *data)
{
- struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(data);
+ struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(data);
unsigned long flags;
u32 mask;
u32 val;
@@ -211,7 +211,7 @@ static const struct irq_domain_ops intx_domain_ops = {
static void xilinx_cpm_pcie_intx_flow(struct irq_desc *desc)
{
- struct xilinx_cpm_pcie_port *port = irq_desc_get_handler_data(desc);
+ struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long val;
int i;
@@ -229,7 +229,7 @@ static void xilinx_cpm_pcie_intx_flow(struct irq_desc *desc)
static void xilinx_cpm_mask_event_irq(struct irq_data *d)
{
- struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(d);
+ struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(d);
u32 val;
raw_spin_lock(&port->lock);
@@ -241,7 +241,7 @@ static void xilinx_cpm_mask_event_irq(struct irq_data *d)
static void xilinx_cpm_unmask_event_irq(struct irq_data *d)
{
- struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(d);
+ struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(d);
u32 val;
raw_spin_lock(&port->lock);
@@ -273,7 +273,7 @@ static const struct irq_domain_ops event_domain_ops = {
static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
{
- struct xilinx_cpm_pcie_port *port = irq_desc_get_handler_data(desc);
+ struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long val;
int i;
@@ -327,7 +327,7 @@ static const struct {
static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *dev_id)
{
- struct xilinx_cpm_pcie_port *port = dev_id;
+ struct xilinx_cpm_pcie *port = dev_id;
struct device *dev = port->dev;
struct irq_data *d;
@@ -350,7 +350,7 @@ static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie_port *port)
+static void xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie *port)
{
if (port->intx_domain) {
irq_domain_remove(port->intx_domain);
@@ -369,7 +369,7 @@ static void xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie_port *port)
*
* Return: '0' on success and error value on failure
*/
-static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie_port *port)
+static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie *port)
{
struct device *dev = port->dev;
struct device_node *node = dev->of_node;
@@ -410,7 +410,7 @@ out:
return -ENOMEM;
}
-static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie_port *port)
+static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port)
{
struct device *dev = port->dev;
struct platform_device *pdev = to_platform_device(dev);
@@ -462,7 +462,7 @@ static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie_port *port)
* xilinx_cpm_pcie_init_port - Initialize hardware
* @port: PCIe port information
*/
-static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie_port *port)
+static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
{
if (cpm_pcie_link_up(port))
dev_info(port->dev, "PCIe Link is UP\n");
@@ -497,7 +497,7 @@ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie_port *port)
*
* Return: '0' on success and error value on failure
*/
-static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie_port *port,
+static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port,
struct resource *bus_range)
{
struct device *dev = port->dev;
@@ -523,7 +523,7 @@ static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie_port *port,
return 0;
}
-static void xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie_port *port)
+static void xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie *port)
{
irq_set_chained_handler_and_data(port->intx_irq, NULL, NULL);
irq_set_chained_handler_and_data(port->irq, NULL, NULL);
@@ -537,7 +537,7 @@ static void xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie_port *port)
*/
static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
{
- struct xilinx_cpm_pcie_port *port;
+ struct xilinx_cpm_pcie *port;
struct device *dev = &pdev->dev;
struct pci_host_bridge *bridge;
struct resource_entry *bus;
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index a72b4f9a2b00..40d070e54ad2 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -146,7 +146,7 @@
struct nwl_msi { /* MSI information */
struct irq_domain *msi_domain;
- unsigned long *bitmap;
+ DECLARE_BITMAP(bitmap, INT_PCI_MSI_NR);
struct irq_domain *dev_domain;
struct mutex lock; /* protect bitmap variable */
int irq_msi0;
@@ -335,12 +335,10 @@ static void nwl_pcie_leg_handler(struct irq_desc *desc)
static void nwl_pcie_handle_msi_irq(struct nwl_pcie *pcie, u32 status_reg)
{
- struct nwl_msi *msi;
+ struct nwl_msi *msi = &pcie->msi;
unsigned long status;
u32 bit;
- msi = &pcie->msi;
-
while ((status = nwl_bridge_readl(pcie, status_reg)) != 0) {
for_each_set_bit(bit, &status, 32) {
nwl_bridge_writel(pcie, 1 << bit, status_reg);
@@ -560,30 +558,21 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie)
struct nwl_msi *msi = &pcie->msi;
unsigned long base;
int ret;
- int size = BITS_TO_LONGS(INT_PCI_MSI_NR) * sizeof(long);
mutex_init(&msi->lock);
- msi->bitmap = kzalloc(size, GFP_KERNEL);
- if (!msi->bitmap)
- return -ENOMEM;
-
/* Get msi_1 IRQ number */
msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1");
- if (msi->irq_msi1 < 0) {
- ret = -EINVAL;
- goto err;
- }
+ if (msi->irq_msi1 < 0)
+ return -EINVAL;
irq_set_chained_handler_and_data(msi->irq_msi1,
nwl_pcie_msi_handler_high, pcie);
/* Get msi_0 IRQ number */
msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0");
- if (msi->irq_msi0 < 0) {
- ret = -EINVAL;
- goto err;
- }
+ if (msi->irq_msi0 < 0)
+ return -EINVAL;
irq_set_chained_handler_and_data(msi->irq_msi0,
nwl_pcie_msi_handler_low, pcie);
@@ -592,8 +581,7 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie)
ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT;
if (!ret) {
dev_err(dev, "MSI not present\n");
- ret = -EIO;
- goto err;
+ return -EIO;
}
/* Enable MSII */
@@ -632,10 +620,6 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie)
nwl_bridge_writel(pcie, MSGF_MSI_SR_LO_MASK, MSGF_MSI_MASK_LO);
return 0;
-err:
- kfree(msi->bitmap);
- msi->bitmap = NULL;
- return ret;
}
static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index aa9bdcebc838..cb6e9f7b0152 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -91,18 +91,18 @@
#define XILINX_NUM_MSI_IRQS 128
/**
- * struct xilinx_pcie_port - PCIe port information
- * @reg_base: IO Mapped Register Base
+ * struct xilinx_pcie - PCIe port information
* @dev: Device pointer
+ * @reg_base: IO Mapped Register Base
* @msi_map: Bitmap of allocated MSIs
* @map_lock: Mutex protecting the MSI allocation
* @msi_domain: MSI IRQ domain pointer
* @leg_domain: Legacy IRQ domain pointer
* @resources: Bus Resources
*/
-struct xilinx_pcie_port {
- void __iomem *reg_base;
+struct xilinx_pcie {
struct device *dev;
+ void __iomem *reg_base;
unsigned long msi_map[BITS_TO_LONGS(XILINX_NUM_MSI_IRQS)];
struct mutex map_lock;
struct irq_domain *msi_domain;
@@ -110,35 +110,35 @@ struct xilinx_pcie_port {
struct list_head resources;
};
-static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg)
+static inline u32 pcie_read(struct xilinx_pcie *pcie, u32 reg)
{
- return readl(port->reg_base + reg);
+ return readl(pcie->reg_base + reg);
}
-static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg)
+static inline void pcie_write(struct xilinx_pcie *pcie, u32 val, u32 reg)
{
- writel(val, port->reg_base + reg);
+ writel(val, pcie->reg_base + reg);
}
-static inline bool xilinx_pcie_link_up(struct xilinx_pcie_port *port)
+static inline bool xilinx_pcie_link_up(struct xilinx_pcie *pcie)
{
- return (pcie_read(port, XILINX_PCIE_REG_PSCR) &
+ return (pcie_read(pcie, XILINX_PCIE_REG_PSCR) &
XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
}
/**
* xilinx_pcie_clear_err_interrupts - Clear Error Interrupts
- * @port: PCIe port information
+ * @pcie: PCIe port information
*/
-static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
+static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie *pcie)
{
- struct device *dev = port->dev;
- unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
+ struct device *dev = pcie->dev;
+ unsigned long val = pcie_read(pcie, XILINX_PCIE_REG_RPEFR);
if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
dev_dbg(dev, "Requester ID %lu\n",
val & XILINX_PCIE_RPEFR_REQ_ID);
- pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
+ pcie_write(pcie, XILINX_PCIE_RPEFR_ALL_MASK,
XILINX_PCIE_REG_RPEFR);
}
}
@@ -152,11 +152,11 @@ static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
*/
static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
{
- struct xilinx_pcie_port *port = bus->sysdata;
+ struct xilinx_pcie *pcie = bus->sysdata;
- /* Check if link is up when trying to access downstream ports */
+ /* Check if link is up when trying to access downstream pcie ports */
if (!pci_is_root_bus(bus)) {
- if (!xilinx_pcie_link_up(port))
+ if (!xilinx_pcie_link_up(pcie))
return false;
} else if (devfn > 0) {
/* Only one device down on each root port */
@@ -177,12 +177,12 @@ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
static void __iomem *xilinx_pcie_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
- struct xilinx_pcie_port *port = bus->sysdata;
+ struct xilinx_pcie *pcie = bus->sysdata;
if (!xilinx_pcie_valid_device(bus, devfn))
return NULL;
- return port->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
+ return pcie->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
}
/* PCIe operations */
@@ -215,7 +215,7 @@ static int xilinx_msi_set_affinity(struct irq_data *d, const struct cpumask *mas
static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- struct xilinx_pcie_port *pcie = irq_data_get_irq_chip_data(data);
+ struct xilinx_pcie *pcie = irq_data_get_irq_chip_data(data);
phys_addr_t pa = ALIGN_DOWN(virt_to_phys(pcie), SZ_4K);
msg->address_lo = lower_32_bits(pa);
@@ -232,14 +232,14 @@ static struct irq_chip xilinx_msi_bottom_chip = {
static int xilinx_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *args)
{
- struct xilinx_pcie_port *port = domain->host_data;
+ struct xilinx_pcie *pcie = domain->host_data;
int hwirq, i;
- mutex_lock(&port->map_lock);
+ mutex_lock(&pcie->map_lock);
- hwirq = bitmap_find_free_region(port->msi_map, XILINX_NUM_MSI_IRQS, order_base_2(nr_irqs));
+ hwirq = bitmap_find_free_region(pcie->msi_map, XILINX_NUM_MSI_IRQS, order_base_2(nr_irqs));
- mutex_unlock(&port->map_lock);
+ mutex_unlock(&pcie->map_lock);
if (hwirq < 0)
return -ENOSPC;
@@ -256,13 +256,13 @@ static void xilinx_msi_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
- struct xilinx_pcie_port *port = domain->host_data;
+ struct xilinx_pcie *pcie = domain->host_data;
- mutex_lock(&port->map_lock);
+ mutex_lock(&pcie->map_lock);
- bitmap_release_region(port->msi_map, d->hwirq, order_base_2(nr_irqs));
+ bitmap_release_region(pcie->msi_map, d->hwirq, order_base_2(nr_irqs));
- mutex_unlock(&port->map_lock);
+ mutex_unlock(&pcie->map_lock);
}
static const struct irq_domain_ops xilinx_msi_domain_ops = {
@@ -275,7 +275,7 @@ static struct msi_domain_info xilinx_msi_info = {
.chip = &xilinx_msi_top_chip,
};
-static int xilinx_allocate_msi_domains(struct xilinx_pcie_port *pcie)
+static int xilinx_allocate_msi_domains(struct xilinx_pcie *pcie)
{
struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
struct irq_domain *parent;
@@ -298,7 +298,7 @@ static int xilinx_allocate_msi_domains(struct xilinx_pcie_port *pcie)
return 0;
}
-static void xilinx_free_msi_domains(struct xilinx_pcie_port *pcie)
+static void xilinx_free_msi_domains(struct xilinx_pcie *pcie)
{
struct irq_domain *parent = pcie->msi_domain->parent;
@@ -342,13 +342,13 @@ static const struct irq_domain_ops intx_domain_ops = {
*/
static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
{
- struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data;
- struct device *dev = port->dev;
+ struct xilinx_pcie *pcie = (struct xilinx_pcie *)data;
+ struct device *dev = pcie->dev;
u32 val, mask, status;
/* Read interrupt decode and mask registers */
- val = pcie_read(port, XILINX_PCIE_REG_IDR);
- mask = pcie_read(port, XILINX_PCIE_REG_IMR);
+ val = pcie_read(pcie, XILINX_PCIE_REG_IDR);
+ mask = pcie_read(pcie, XILINX_PCIE_REG_IMR);
status = val & mask;
if (!status)
@@ -371,23 +371,23 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
if (status & XILINX_PCIE_INTR_CORRECTABLE) {
dev_warn(dev, "Correctable error message\n");
- xilinx_pcie_clear_err_interrupts(port);
+ xilinx_pcie_clear_err_interrupts(pcie);
}
if (status & XILINX_PCIE_INTR_NONFATAL) {
dev_warn(dev, "Non fatal error message\n");
- xilinx_pcie_clear_err_interrupts(port);
+ xilinx_pcie_clear_err_interrupts(pcie);
}
if (status & XILINX_PCIE_INTR_FATAL) {
dev_warn(dev, "Fatal error message\n");
- xilinx_pcie_clear_err_interrupts(port);
+ xilinx_pcie_clear_err_interrupts(pcie);
}
if (status & (XILINX_PCIE_INTR_INTX | XILINX_PCIE_INTR_MSI)) {
struct irq_domain *domain;
- val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
+ val = pcie_read(pcie, XILINX_PCIE_REG_RPIFR1);
/* Check whether interrupt valid */
if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
@@ -397,17 +397,17 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
/* Decode the IRQ number */
if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
- val = pcie_read(port, XILINX_PCIE_REG_RPIFR2) &
+ val = pcie_read(pcie, XILINX_PCIE_REG_RPIFR2) &
XILINX_PCIE_RPIFR2_MSG_DATA;
- domain = port->msi_domain->parent;
+ domain = pcie->msi_domain->parent;
} else {
val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
XILINX_PCIE_RPIFR1_INTR_SHIFT;
- domain = port->leg_domain;
+ domain = pcie->leg_domain;
}
/* Clear interrupt FIFO register 1 */
- pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
+ pcie_write(pcie, XILINX_PCIE_RPIFR1_ALL_MASK,
XILINX_PCIE_REG_RPIFR1);
generic_handle_domain_irq(domain, val);
@@ -442,20 +442,20 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
error:
/* Clear the Interrupt Decode register */
- pcie_write(port, status, XILINX_PCIE_REG_IDR);
+ pcie_write(pcie, status, XILINX_PCIE_REG_IDR);
return IRQ_HANDLED;
}
/**
* xilinx_pcie_init_irq_domain - Initialize IRQ domain
- * @port: PCIe port information
+ * @pcie: PCIe port information
*
* Return: '0' on success and error value on failure
*/
-static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
+static int xilinx_pcie_init_irq_domain(struct xilinx_pcie *pcie)
{
- struct device *dev = port->dev;
+ struct device *dev = pcie->dev;
struct device_node *pcie_intc_node;
int ret;
@@ -466,25 +466,25 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
return -ENODEV;
}
- port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ pcie->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
&intx_domain_ops,
- port);
+ pcie);
of_node_put(pcie_intc_node);
- if (!port->leg_domain) {
+ if (!pcie->leg_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
return -ENODEV;
}
/* Setup MSI */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- phys_addr_t pa = ALIGN_DOWN(virt_to_phys(port), SZ_4K);
+ phys_addr_t pa = ALIGN_DOWN(virt_to_phys(pcie), SZ_4K);
- ret = xilinx_allocate_msi_domains(port);
+ ret = xilinx_allocate_msi_domains(pcie);
if (ret)
return ret;
- pcie_write(port, upper_32_bits(pa), XILINX_PCIE_REG_MSIBASE1);
- pcie_write(port, lower_32_bits(pa), XILINX_PCIE_REG_MSIBASE2);
+ pcie_write(pcie, upper_32_bits(pa), XILINX_PCIE_REG_MSIBASE1);
+ pcie_write(pcie, lower_32_bits(pa), XILINX_PCIE_REG_MSIBASE2);
}
return 0;
@@ -492,44 +492,44 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
/**
* xilinx_pcie_init_port - Initialize hardware
- * @port: PCIe port information
+ * @pcie: PCIe port information
*/
-static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
+static void xilinx_pcie_init_port(struct xilinx_pcie *pcie)
{
- struct device *dev = port->dev;
+ struct device *dev = pcie->dev;
- if (xilinx_pcie_link_up(port))
+ if (xilinx_pcie_link_up(pcie))
dev_info(dev, "PCIe Link is UP\n");
else
dev_info(dev, "PCIe Link is DOWN\n");
/* Disable all interrupts */
- pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK,
+ pcie_write(pcie, ~XILINX_PCIE_IDR_ALL_MASK,
XILINX_PCIE_REG_IMR);
/* Clear pending interrupts */
- pcie_write(port, pcie_read(port, XILINX_PCIE_REG_IDR) &
+ pcie_write(pcie, pcie_read(pcie, XILINX_PCIE_REG_IDR) &
XILINX_PCIE_IMR_ALL_MASK,
XILINX_PCIE_REG_IDR);
/* Enable all interrupts we handle */
- pcie_write(port, XILINX_PCIE_IMR_ENABLE_MASK, XILINX_PCIE_REG_IMR);
+ pcie_write(pcie, XILINX_PCIE_IMR_ENABLE_MASK, XILINX_PCIE_REG_IMR);
/* Enable the Bridge enable bit */
- pcie_write(port, pcie_read(port, XILINX_PCIE_REG_RPSC) |
+ pcie_write(pcie, pcie_read(pcie, XILINX_PCIE_REG_RPSC) |
XILINX_PCIE_REG_RPSC_BEN,
XILINX_PCIE_REG_RPSC);
}
/**
* xilinx_pcie_parse_dt - Parse Device tree
- * @port: PCIe port information
+ * @pcie: PCIe port information
*
* Return: '0' on success and error value on failure
*/
-static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
+static int xilinx_pcie_parse_dt(struct xilinx_pcie *pcie)
{
- struct device *dev = port->dev;
+ struct device *dev = pcie->dev;
struct device_node *node = dev->of_node;
struct resource regs;
unsigned int irq;
@@ -541,14 +541,14 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
return err;
}
- port->reg_base = devm_pci_remap_cfg_resource(dev, &regs);
- if (IS_ERR(port->reg_base))
- return PTR_ERR(port->reg_base);
+ pcie->reg_base = devm_pci_remap_cfg_resource(dev, &regs);
+ if (IS_ERR(pcie->reg_base))
+ return PTR_ERR(pcie->reg_base);
irq = irq_of_parse_and_map(node, 0);
err = devm_request_irq(dev, irq, xilinx_pcie_intr_handler,
IRQF_SHARED | IRQF_NO_THREAD,
- "xilinx-pcie", port);
+ "xilinx-pcie", pcie);
if (err) {
dev_err(dev, "unable to request irq %d\n", irq);
return err;
@@ -566,41 +566,41 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
static int xilinx_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct xilinx_pcie_port *port;
+ struct xilinx_pcie *pcie;
struct pci_host_bridge *bridge;
int err;
if (!dev->of_node)
return -ENODEV;
- bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
if (!bridge)
return -ENODEV;
- port = pci_host_bridge_priv(bridge);
- mutex_init(&port->map_lock);
- port->dev = dev;
+ pcie = pci_host_bridge_priv(bridge);
+ mutex_init(&pcie->map_lock);
+ pcie->dev = dev;
- err = xilinx_pcie_parse_dt(port);
+ err = xilinx_pcie_parse_dt(pcie);
if (err) {
dev_err(dev, "Parsing DT failed\n");
return err;
}
- xilinx_pcie_init_port(port);
+ xilinx_pcie_init_port(pcie);
- err = xilinx_pcie_init_irq_domain(port);
+ err = xilinx_pcie_init_irq_domain(pcie);
if (err) {
dev_err(dev, "Failed creating IRQ Domain\n");
return err;
}
- bridge->sysdata = port;
+ bridge->sysdata = pcie;
bridge->ops = &xilinx_pcie_ops;
err = pci_host_probe(bridge);
if (err)
- xilinx_free_msi_domains(port);
+ xilinx_free_msi_domains(pcie);
return err;
}
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index a45e8e59d3d4..cc166c683638 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -501,6 +501,40 @@ static inline void vmd_acpi_begin(void) { }
static inline void vmd_acpi_end(void) { }
#endif /* CONFIG_ACPI */
+static void vmd_domain_reset(struct vmd_dev *vmd)
+{
+ u16 bus, max_buses = resource_size(&vmd->resources[0]);
+ u8 dev, functions, fn, hdr_type;
+ char __iomem *base;
+
+ for (bus = 0; bus < max_buses; bus++) {
+ for (dev = 0; dev < 32; dev++) {
+ base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus,
+ PCI_DEVFN(dev, 0), 0);
+
+ hdr_type = readb(base + PCI_HEADER_TYPE) &
+ PCI_HEADER_TYPE_MASK;
+
+ functions = (hdr_type & 0x80) ? 8 : 1;
+ for (fn = 0; fn < functions; fn++) {
+ base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus,
+ PCI_DEVFN(dev, fn), 0);
+
+ hdr_type = readb(base + PCI_HEADER_TYPE) &
+ PCI_HEADER_TYPE_MASK;
+
+ if (hdr_type != PCI_HEADER_TYPE_BRIDGE ||
+ (readw(base + PCI_CLASS_DEVICE) !=
+ PCI_CLASS_BRIDGE_PCI))
+ continue;
+
+ memset_io(base + PCI_IO_BASE, 0,
+ PCI_ROM_ADDRESS1 - PCI_IO_BASE);
+ }
+ }
+ }
+}
+
static void vmd_attach_resources(struct vmd_dev *vmd)
{
vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1];
@@ -541,7 +575,7 @@ static int vmd_get_phys_offsets(struct vmd_dev *vmd, bool native_hint,
int ret;
ret = pci_read_config_dword(dev, PCI_REG_VMLOCK, &vmlock);
- if (ret || vmlock == ~0)
+ if (ret || PCI_POSSIBLE_ERROR(vmlock))
return -ENODEV;
if (MB2_SHADOW_EN(vmlock)) {
@@ -661,6 +695,21 @@ static int vmd_alloc_irqs(struct vmd_dev *vmd)
return 0;
}
+/*
+ * Since VMD is an aperture to regular PCIe root ports, only allow it to
+ * control features that the OS is allowed to control on the physical PCI bus.
+ */
+static void vmd_copy_host_bridge_flags(struct pci_host_bridge *root_bridge,
+ struct pci_host_bridge *vmd_bridge)
+{
+ vmd_bridge->native_pcie_hotplug = root_bridge->native_pcie_hotplug;
+ vmd_bridge->native_shpc_hotplug = root_bridge->native_shpc_hotplug;
+ vmd_bridge->native_aer = root_bridge->native_aer;
+ vmd_bridge->native_pme = root_bridge->native_pme;
+ vmd_bridge->native_ltr = root_bridge->native_ltr;
+ vmd_bridge->native_dpc = root_bridge->native_dpc;
+}
+
static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
{
struct pci_sysdata *sd = &vmd->sysdata;
@@ -798,6 +847,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
return -ENODEV;
}
+ vmd_copy_host_bridge_flags(pci_find_host_bridge(vmd->dev->bus),
+ to_pci_host_bridge(vmd->bus->bridge));
+
vmd_attach_resources(vmd);
if (vmd->irq_domain)
dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
@@ -805,6 +857,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
vmd_acpi_begin();
pci_scan_child_bus(vmd->bus);
+ vmd_domain_reset(vmd);
+ list_for_each_entry(child, &vmd->bus->children, node)
+ pci_reset_bus(child->self);
pci_assign_unassigned_bus_resources(vmd->bus);
/*
@@ -953,6 +1008,10 @@ static const struct pci_device_id vmd_ids[] = {
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
VMD_FEAT_HAS_BUS_RESTRICTIONS |
VMD_FEAT_OFFSET_FIRST_VECTOR,},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa77f),
+ .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
+ VMD_FEAT_HAS_BUS_RESTRICTIONS |
+ VMD_FEAT_OFFSET_FIRST_VECTOR,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
VMD_FEAT_HAS_BUS_RESTRICTIONS |
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
index 5a03401f4571..9a00448c7e61 100644
--- a/drivers/pci/endpoint/functions/pci-epf-ntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c
@@ -1262,7 +1262,7 @@ static void epf_ntb_db_mw_bar_cleanup(struct epf_ntb *ntb,
}
/**
- * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capaiblity
+ * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capability
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
* @type: PRIMARY interface or SECONDARY interface
*
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index 38621558d397..3bc9273d0a08 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -334,7 +334,7 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
u8 encode_int;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
- interrupts > 32)
+ interrupts < 1 || interrupts > 32)
return -EINVAL;
if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO
index cc6194aa24c1..88f217c82b4f 100644
--- a/drivers/pci/hotplug/TODO
+++ b/drivers/pci/hotplug/TODO
@@ -30,11 +30,6 @@ ibmphp:
or ibmphp should store a pointer to its bus in struct slot. Probably the
former.
-* The functions get_max_adapter_speed() and get_bus_name() are commented out.
- Can they be deleted? There are also forward declarations at the top of
- ibmphp_core.c as well as pointers in ibmphp_hotplug_slot_ops, likewise
- commented out.
-
* ibmphp_init_devno() takes a struct slot **, it could instead take a
struct slot *.
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index ed7b58eb64d2..93fd2a621822 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -2273,7 +2273,7 @@ static u32 configure_new_device(struct controller *ctrl, struct pci_func *func
while ((function < max_functions) && (!stop_it)) {
pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(func->device, function), 0x00, &ID);
- if (ID == 0xFFFFFFFF) {
+ if (PCI_POSSIBLE_ERROR(ID)) {
function++;
} else {
/* Setup slot structure. */
@@ -2517,7 +2517,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
pci_bus_read_config_dword(pci_bus, PCI_DEVFN(device, 0), 0x00, &ID);
pci_bus->number = func->bus;
- if (ID != 0xFFFFFFFF) { /* device present */
+ if (!PCI_POSSIBLE_ERROR(ID)) { /* device present */
/* Setup slot structure. */
new_slot = cpqhp_slot_create(hold_bus_node->base);
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index 17124254d897..197997e264a2 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -50,14 +50,6 @@ static int irqs[16]; /* PIC mode IRQs we're using so far (in case MPS
static int init_flag;
-/*
-static int get_max_adapter_speed_1 (struct hotplug_slot *, u8 *, u8);
-
-static inline int get_max_adapter_speed (struct hotplug_slot *hs, u8 *value)
-{
- return get_max_adapter_speed_1 (hs, value, 1);
-}
-*/
static inline int get_cur_bus_info(struct slot **sl)
{
int rc = 1;
@@ -401,69 +393,6 @@ static int get_max_bus_speed(struct slot *slot)
return rc;
}
-/*
-static int get_max_adapter_speed_1(struct hotplug_slot *hotplug_slot, u8 *value, u8 flag)
-{
- int rc = -ENODEV;
- struct slot *pslot;
- struct slot myslot;
-
- debug("get_max_adapter_speed_1 - Entry hotplug_slot[%lx] pvalue[%lx]\n",
- (ulong)hotplug_slot, (ulong) value);
-
- if (flag)
- ibmphp_lock_operations();
-
- if (hotplug_slot && value) {
- pslot = hotplug_slot->private;
- if (pslot) {
- memcpy(&myslot, pslot, sizeof(struct slot));
- rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
- &(myslot.status));
-
- if (!(SLOT_LATCH (myslot.status)) &&
- (SLOT_PRESENT (myslot.status))) {
- rc = ibmphp_hpc_readslot(pslot,
- READ_EXTSLOTSTATUS,
- &(myslot.ext_status));
- if (!rc)
- *value = SLOT_SPEED(myslot.ext_status);
- } else
- *value = MAX_ADAPTER_NONE;
- }
- }
-
- if (flag)
- ibmphp_unlock_operations();
-
- debug("get_max_adapter_speed_1 - Exit rc[%d] value[%x]\n", rc, *value);
- return rc;
-}
-
-static int get_bus_name(struct hotplug_slot *hotplug_slot, char *value)
-{
- int rc = -ENODEV;
- struct slot *pslot = NULL;
-
- debug("get_bus_name - Entry hotplug_slot[%lx]\n", (ulong)hotplug_slot);
-
- ibmphp_lock_operations();
-
- if (hotplug_slot) {
- pslot = hotplug_slot->private;
- if (pslot) {
- rc = 0;
- snprintf(value, 100, "Bus %x", pslot->bus);
- }
- } else
- rc = -ENODEV;
-
- ibmphp_unlock_operations();
- debug("get_bus_name - Exit rc[%d] value[%x]\n", rc, *value);
- return rc;
-}
-*/
-
/****************************************************************************
* This routine will initialize the ops data structure used in the validate
* function. It will also power off empty slots that are powered on since BIOS
@@ -1231,9 +1160,6 @@ const struct hotplug_slot_ops ibmphp_hotplug_slot_ops = {
.get_attention_status = get_attention_status,
.get_latch_status = get_latch_status,
.get_adapter_status = get_adapter_present,
-/* .get_max_adapter_speed = get_max_adapter_speed,
- .get_bus_name_status = get_bus_name,
-*/
};
static void ibmphp_unload(void)
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 918dccbc74b6..e0a614acee05 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -75,6 +75,8 @@ extern int pciehp_poll_time;
* @reset_lock: prevents access to the Data Link Layer Link Active bit in the
* Link Status register and to the Presence Detect State bit in the Slot
* Status register during a slot reset which may cause them to flap
+ * @depth: Number of additional hotplug ports in the path to the root bus,
+ * used as lock subclass for @reset_lock
* @ist_running: flag to keep user request waiting while IRQ thread is running
* @request_result: result of last user request submitted to the IRQ thread
* @requester: wait queue to wake up on completion of user request,
@@ -106,6 +108,7 @@ struct controller {
struct hotplug_slot hotplug_slot; /* hotplug core interface */
struct rw_semaphore reset_lock;
+ unsigned int depth;
unsigned int ist_running;
int request_result;
wait_queue_head_t requester;
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index f34114d45259..4042d87d539d 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -166,7 +166,7 @@ static void pciehp_check_presence(struct controller *ctrl)
{
int occupied;
- down_read(&ctrl->reset_lock);
+ down_read_nested(&ctrl->reset_lock, ctrl->depth);
mutex_lock(&ctrl->state_lock);
occupied = pciehp_card_present_or_link_active(ctrl);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 83a0fa119cae..1c1ebf3dad43 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -89,7 +89,7 @@ static int pcie_poll_cmd(struct controller *ctrl, int timeout)
do {
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
- if (slot_status == (u16) ~0) {
+ if (PCI_POSSIBLE_ERROR(slot_status)) {
ctrl_info(ctrl, "%s: no response from device\n",
__func__);
return 0;
@@ -165,7 +165,7 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
pcie_wait_cmd(ctrl);
pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
- if (slot_ctrl == (u16) ~0) {
+ if (PCI_POSSIBLE_ERROR(slot_ctrl)) {
ctrl_info(ctrl, "%s: no response from device\n", __func__);
goto out;
}
@@ -236,7 +236,7 @@ int pciehp_check_link_active(struct controller *ctrl)
int ret;
ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
- if (ret == PCIBIOS_DEVICE_NOT_FOUND || lnk_status == (u16)~0)
+ if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(lnk_status))
return -ENODEV;
ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
@@ -443,7 +443,7 @@ int pciehp_card_present(struct controller *ctrl)
int ret;
ret = pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
- if (ret == PCIBIOS_DEVICE_NOT_FOUND || slot_status == (u16)~0)
+ if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(slot_status))
return -ENODEV;
return !!(slot_status & PCI_EXP_SLTSTA_PDS);
@@ -583,7 +583,7 @@ static void pciehp_ignore_dpc_link_change(struct controller *ctrl,
* the corresponding link change may have been ignored above.
* Synthesize it to ensure that it is acted on.
*/
- down_read(&ctrl->reset_lock);
+ down_read_nested(&ctrl->reset_lock, ctrl->depth);
if (!pciehp_check_link_active(ctrl))
pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC);
up_read(&ctrl->reset_lock);
@@ -621,7 +621,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
read_status:
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
- if (status == (u16) ~0) {
+ if (PCI_POSSIBLE_ERROR(status)) {
ctrl_info(ctrl, "%s: no response from device\n", __func__);
if (parent)
pm_runtime_put(parent);
@@ -642,6 +642,8 @@ read_status:
*/
if (ctrl->power_fault_detected)
status &= ~PCI_EXP_SLTSTA_PFD;
+ else if (status & PCI_EXP_SLTSTA_PFD)
+ ctrl->power_fault_detected = true;
events |= status;
if (!events) {
@@ -651,7 +653,7 @@ read_status:
}
if (status) {
- pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, status);
/*
* In MSI mode, all event bits must be zero before the port
@@ -725,8 +727,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
}
/* Check Power Fault Detected */
- if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
- ctrl->power_fault_detected = 1;
+ if (events & PCI_EXP_SLTSTA_PFD) {
ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl));
pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
PCI_EXP_SLTCTL_ATTN_IND_ON);
@@ -746,7 +747,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
* Disable requests have higher priority than Presence Detect Changed
* or Data Link Layer State Changed events.
*/
- down_read(&ctrl->reset_lock);
+ down_read_nested(&ctrl->reset_lock, ctrl->depth);
if (events & DISABLE_SLOT)
pciehp_handle_disable_request(ctrl);
else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC))
@@ -906,7 +907,7 @@ int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
if (probe)
return 0;
- down_write(&ctrl->reset_lock);
+ down_write_nested(&ctrl->reset_lock, ctrl->depth);
if (!ATTN_BUTTN(ctrl)) {
ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
@@ -962,6 +963,20 @@ static inline void dbg_ctrl(struct controller *ctrl)
#define FLAG(x, y) (((x) & (y)) ? '+' : '-')
+static inline int pcie_hotplug_depth(struct pci_dev *dev)
+{
+ struct pci_bus *bus = dev->bus;
+ int depth = 0;
+
+ while (bus->parent) {
+ bus = bus->parent;
+ if (bus->self && bus->self->is_hotplug_bridge)
+ depth++;
+ }
+
+ return depth;
+}
+
struct controller *pcie_init(struct pcie_device *dev)
{
struct controller *ctrl;
@@ -975,6 +990,7 @@ struct controller *pcie_init(struct pcie_device *dev)
return NULL;
ctrl->pcie = dev;
+ ctrl->depth = pcie_hotplug_depth(dev->port);
pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
if (pdev->hotplug_user_indicators)
diff --git a/drivers/pci/msi/Makefile b/drivers/pci/msi/Makefile
new file mode 100644
index 000000000000..93ef7b9e404d
--- /dev/null
+++ b/drivers/pci/msi/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the PCI/MSI
+obj-$(CONFIG_PCI) += pcidev_msi.o
+obj-$(CONFIG_PCI_MSI) += msi.o
+obj-$(CONFIG_PCI_MSI_IRQ_DOMAIN) += irqdomain.o
+obj-$(CONFIG_PCI_MSI_ARCH_FALLBACKS) += legacy.o
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
new file mode 100644
index 000000000000..e9cf318e6670
--- /dev/null
+++ b/drivers/pci/msi/irqdomain.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI Message Signaled Interrupt (MSI) - irqdomain support
+ */
+#include <linux/acpi_iort.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
+
+#include "msi.h"
+
+int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ struct irq_domain *domain;
+
+ domain = dev_get_msi_domain(&dev->dev);
+ if (domain && irq_domain_is_hierarchy(domain))
+ return msi_domain_alloc_irqs_descs_locked(domain, &dev->dev, nvec);
+
+ return pci_msi_legacy_setup_msi_irqs(dev, nvec, type);
+}
+
+void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
+{
+ struct irq_domain *domain;
+
+ domain = dev_get_msi_domain(&dev->dev);
+ if (domain && irq_domain_is_hierarchy(domain))
+ msi_domain_free_irqs_descs_locked(domain, &dev->dev);
+ else
+ pci_msi_legacy_teardown_msi_irqs(dev);
+ msi_free_msi_descs(&dev->dev);
+}
+
+/**
+ * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space
+ * @irq_data: Pointer to interrupt data of the MSI interrupt
+ * @msg: Pointer to the message
+ */
+static void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg)
+{
+ struct msi_desc *desc = irq_data_get_msi_desc(irq_data);
+
+ /*
+ * For MSI-X desc->irq is always equal to irq_data->irq. For
+ * MSI only the first interrupt of MULTI MSI passes the test.
+ */
+ if (desc->irq == irq_data->irq)
+ __pci_write_msi_msg(desc, msg);
+}
+
+/**
+ * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source
+ * @desc: Pointer to the MSI descriptor
+ *
+ * The ID number is only used within the irqdomain.
+ */
+static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc)
+{
+ struct pci_dev *dev = msi_desc_to_pci_dev(desc);
+
+ return (irq_hw_number_t)desc->msi_index |
+ pci_dev_id(dev) << 11 |
+ (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
+}
+
+static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc)
+{
+ return !desc->pci.msi_attrib.is_msix && desc->nvec_used > 1;
+}
+
+/**
+ * pci_msi_domain_check_cap - Verify that @domain supports the capabilities
+ * for @dev
+ * @domain: The interrupt domain to check
+ * @info: The domain info for verification
+ * @dev: The device to check
+ *
+ * Returns:
+ * 0 if the functionality is supported
+ * 1 if Multi MSI is requested, but the domain does not support it
+ * -ENOTSUPP otherwise
+ */
+static int pci_msi_domain_check_cap(struct irq_domain *domain,
+ struct msi_domain_info *info,
+ struct device *dev)
+{
+ struct msi_desc *desc = msi_first_desc(dev, MSI_DESC_ALL);
+
+ /* Special handling to support __pci_enable_msi_range() */
+ if (pci_msi_desc_is_multi_msi(desc) &&
+ !(info->flags & MSI_FLAG_MULTI_PCI_MSI))
+ return 1;
+
+ if (desc->pci.msi_attrib.is_msix) {
+ if (!(info->flags & MSI_FLAG_PCI_MSIX))
+ return -ENOTSUPP;
+
+ if (info->flags & MSI_FLAG_MSIX_CONTIGUOUS) {
+ unsigned int idx = 0;
+
+ /* Check for gaps in the entry indices */
+ msi_for_each_desc(desc, dev, MSI_DESC_ALL) {
+ if (desc->msi_index != idx++)
+ return -ENOTSUPP;
+ }
+ }
+ }
+ return 0;
+}
+
+static void pci_msi_domain_set_desc(msi_alloc_info_t *arg,
+ struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = pci_msi_domain_calc_hwirq(desc);
+}
+
+static struct msi_domain_ops pci_msi_domain_ops_default = {
+ .set_desc = pci_msi_domain_set_desc,
+ .msi_check = pci_msi_domain_check_cap,
+};
+
+static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info)
+{
+ struct msi_domain_ops *ops = info->ops;
+
+ if (ops == NULL) {
+ info->ops = &pci_msi_domain_ops_default;
+ } else {
+ if (ops->set_desc == NULL)
+ ops->set_desc = pci_msi_domain_set_desc;
+ if (ops->msi_check == NULL)
+ ops->msi_check = pci_msi_domain_check_cap;
+ }
+}
+
+static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
+{
+ struct irq_chip *chip = info->chip;
+
+ BUG_ON(!chip);
+ if (!chip->irq_write_msi_msg)
+ chip->irq_write_msi_msg = pci_msi_domain_write_msg;
+ if (!chip->irq_mask)
+ chip->irq_mask = pci_msi_mask_irq;
+ if (!chip->irq_unmask)
+ chip->irq_unmask = pci_msi_unmask_irq;
+}
+
+/**
+ * pci_msi_create_irq_domain - Create a MSI interrupt domain
+ * @fwnode: Optional fwnode of the interrupt controller
+ * @info: MSI domain info
+ * @parent: Parent irq domain
+ *
+ * Updates the domain and chip ops and creates a MSI interrupt domain.
+ *
+ * Returns:
+ * A domain pointer or NULL in case of failure.
+ */
+struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
+ struct msi_domain_info *info,
+ struct irq_domain *parent)
+{
+ struct irq_domain *domain;
+
+ if (WARN_ON(info->flags & MSI_FLAG_LEVEL_CAPABLE))
+ info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
+
+ if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
+ pci_msi_domain_update_dom_ops(info);
+ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
+ pci_msi_domain_update_chip_ops(info);
+
+ info->flags |= MSI_FLAG_ACTIVATE_EARLY | MSI_FLAG_DEV_SYSFS;
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
+ info->flags |= MSI_FLAG_MUST_REACTIVATE;
+
+ /* PCI-MSI is oneshot-safe */
+ info->chip->flags |= IRQCHIP_ONESHOT_SAFE;
+
+ domain = msi_create_irq_domain(fwnode, info, parent);
+ if (!domain)
+ return NULL;
+
+ irq_domain_update_bus_token(domain, DOMAIN_BUS_PCI_MSI);
+ return domain;
+}
+EXPORT_SYMBOL_GPL(pci_msi_create_irq_domain);
+
+/*
+ * Users of the generic MSI infrastructure expect a device to have a single ID,
+ * so with DMA aliases we have to pick the least-worst compromise. Devices with
+ * DMA phantom functions tend to still emit MSIs from the real function number,
+ * so we ignore those and only consider topological aliases where either the
+ * alias device or RID appears on a different bus number. We also make the
+ * reasonable assumption that bridges are walked in an upstream direction (so
+ * the last one seen wins), and the much braver assumption that the most likely
+ * case is that of PCI->PCIe so we should always use the alias RID. This echoes
+ * the logic from intel_irq_remapping's set_msi_sid(), which presumably works
+ * well enough in practice; in the face of the horrible PCIe<->PCI-X conditions
+ * for taking ownership all we can really do is close our eyes and hope...
+ */
+static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data)
+{
+ u32 *pa = data;
+ u8 bus = PCI_BUS_NUM(*pa);
+
+ if (pdev->bus->number != bus || PCI_BUS_NUM(alias) != bus)
+ *pa = alias;
+
+ return 0;
+}
+
+/**
+ * pci_msi_domain_get_msi_rid - Get the MSI requester id (RID)
+ * @domain: The interrupt domain
+ * @pdev: The PCI device.
+ *
+ * The RID for a device is formed from the alias, with a firmware
+ * supplied mapping applied
+ *
+ * Returns: The RID.
+ */
+u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
+{
+ struct device_node *of_node;
+ u32 rid = pci_dev_id(pdev);
+
+ pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
+
+ of_node = irq_domain_get_of_node(domain);
+ rid = of_node ? of_msi_map_id(&pdev->dev, of_node, rid) :
+ iort_msi_map_id(&pdev->dev, rid);
+
+ return rid;
+}
+
+/**
+ * pci_msi_get_device_domain - Get the MSI domain for a given PCI device
+ * @pdev: The PCI device
+ *
+ * Use the firmware data to find a device-specific MSI domain
+ * (i.e. not one that is set as a default).
+ *
+ * Returns: The corresponding MSI domain or NULL if none has been found.
+ */
+struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
+{
+ struct irq_domain *dom;
+ u32 rid = pci_dev_id(pdev);
+
+ pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
+ dom = of_msi_map_get_device_domain(&pdev->dev, rid, DOMAIN_BUS_PCI_MSI);
+ if (!dom)
+ dom = iort_get_device_domain(&pdev->dev, rid,
+ DOMAIN_BUS_PCI_MSI);
+ return dom;
+}
+
+/**
+ * pci_dev_has_special_msi_domain - Check whether the device is handled by
+ * a non-standard PCI-MSI domain
+ * @pdev: The PCI device to check.
+ *
+ * Returns: True if the device irqdomain or the bus irqdomain is
+ * non-standard PCI/MSI.
+ */
+bool pci_dev_has_special_msi_domain(struct pci_dev *pdev)
+{
+ struct irq_domain *dom = dev_get_msi_domain(&pdev->dev);
+
+ if (!dom)
+ dom = dev_get_msi_domain(&pdev->bus->dev);
+
+ if (!dom)
+ return true;
+
+ return dom->bus_token != DOMAIN_BUS_PCI_MSI;
+}
diff --git a/drivers/pci/msi/legacy.c b/drivers/pci/msi/legacy.c
new file mode 100644
index 000000000000..db761adef652
--- /dev/null
+++ b/drivers/pci/msi/legacy.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI Message Signaled Interrupt (MSI).
+ *
+ * Legacy architecture specific setup and teardown mechanism.
+ */
+#include "msi.h"
+
+/* Arch hooks */
+int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
+{
+ return -EINVAL;
+}
+
+void __weak arch_teardown_msi_irq(unsigned int irq)
+{
+}
+
+int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ struct msi_desc *desc;
+ int ret;
+
+ /*
+ * If an architecture wants to support multiple MSI, it needs to
+ * override arch_setup_msi_irqs()
+ */
+ if (type == PCI_CAP_ID_MSI && nvec > 1)
+ return 1;
+
+ msi_for_each_desc(desc, &dev->dev, MSI_DESC_NOTASSOCIATED) {
+ ret = arch_setup_msi_irq(dev, desc);
+ if (ret)
+ return ret < 0 ? ret : -ENOSPC;
+ }
+
+ return 0;
+}
+
+void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
+{
+ struct msi_desc *desc;
+ int i;
+
+ msi_for_each_desc(desc, &dev->dev, MSI_DESC_ASSOCIATED) {
+ for (i = 0; i < desc->nvec_used; i++)
+ arch_teardown_msi_irq(desc->irq + i);
+ }
+}
+
+static int pci_msi_setup_check_result(struct pci_dev *dev, int type, int ret)
+{
+ struct msi_desc *desc;
+ int avail = 0;
+
+ if (type != PCI_CAP_ID_MSIX || ret >= 0)
+ return ret;
+
+ /* Scan the MSI descriptors for successfully allocated ones. */
+ msi_for_each_desc(desc, &dev->dev, MSI_DESC_ASSOCIATED)
+ avail++;
+
+ return avail ? avail : ret;
+}
+
+int pci_msi_legacy_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ int ret = arch_setup_msi_irqs(dev, nvec, type);
+
+ ret = pci_msi_setup_check_result(dev, type, ret);
+ if (!ret)
+ ret = msi_device_populate_sysfs(&dev->dev);
+ return ret;
+}
+
+void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev)
+{
+ msi_device_destroy_sysfs(&dev->dev);
+ arch_teardown_msi_irqs(dev);
+}
diff --git a/drivers/pci/msi.c b/drivers/pci/msi/msi.c
index d84cf30bb279..c19c7ca58186 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -6,156 +6,29 @@
* Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
* Copyright (C) 2016 Christoph Hellwig.
*/
-
#include <linux/err.h>
-#include <linux/mm.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
#include <linux/export.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/proc_fs.h>
-#include <linux/msi.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
-#include <linux/io.h>
-#include <linux/acpi_iort.h>
-#include <linux/slab.h>
-#include <linux/irqdomain.h>
-#include <linux/of_irq.h>
-
-#include "pci.h"
-
-#ifdef CONFIG_PCI_MSI
+#include <linux/irq.h>
+
+#include "../pci.h"
+#include "msi.h"
static int pci_msi_enable = 1;
int pci_msi_ignore_mask;
-#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1)
-
-#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
-static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
-{
- struct irq_domain *domain;
-
- domain = dev_get_msi_domain(&dev->dev);
- if (domain && irq_domain_is_hierarchy(domain))
- return msi_domain_alloc_irqs(domain, &dev->dev, nvec);
-
- return arch_setup_msi_irqs(dev, nvec, type);
-}
-
-static void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
-{
- struct irq_domain *domain;
-
- domain = dev_get_msi_domain(&dev->dev);
- if (domain && irq_domain_is_hierarchy(domain))
- msi_domain_free_irqs(domain, &dev->dev);
- else
- arch_teardown_msi_irqs(dev);
-}
-#else
-#define pci_msi_setup_msi_irqs arch_setup_msi_irqs
-#define pci_msi_teardown_msi_irqs arch_teardown_msi_irqs
-#endif
-
-#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
-/* Arch hooks */
-int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
-{
- return -EINVAL;
-}
-
-void __weak arch_teardown_msi_irq(unsigned int irq)
-{
-}
-
-int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
-{
- struct msi_desc *entry;
- int ret;
-
- /*
- * If an architecture wants to support multiple MSI, it needs to
- * override arch_setup_msi_irqs()
- */
- if (type == PCI_CAP_ID_MSI && nvec > 1)
- return 1;
-
- for_each_pci_msi_entry(entry, dev) {
- ret = arch_setup_msi_irq(dev, entry);
- if (ret < 0)
- return ret;
- if (ret > 0)
- return -ENOSPC;
- }
-
- return 0;
-}
-
-void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
-{
- int i;
- struct msi_desc *entry;
-
- for_each_pci_msi_entry(entry, dev)
- if (entry->irq)
- for (i = 0; i < entry->nvec_used; i++)
- arch_teardown_msi_irq(entry->irq + i);
-}
-#endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */
-
-static void default_restore_msi_irq(struct pci_dev *dev, int irq)
-{
- struct msi_desc *entry;
-
- entry = NULL;
- if (dev->msix_enabled) {
- for_each_pci_msi_entry(entry, dev) {
- if (irq == entry->irq)
- break;
- }
- } else if (dev->msi_enabled) {
- entry = irq_get_msi_desc(irq);
- }
-
- if (entry)
- __pci_write_msi_msg(entry, &entry->msg);
-}
-
-void __weak arch_restore_msi_irqs(struct pci_dev *dev)
-{
- return default_restore_msi_irqs(dev);
-}
-
-/*
- * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to
- * mask all MSI interrupts by clearing the MSI enable bit does not work
- * reliably as devices without an INTx disable bit will then generate a
- * level IRQ which will never be cleared.
- */
-static inline __attribute_const__ u32 msi_multi_mask(struct msi_desc *desc)
-{
- /* Don't shift by >= width of type */
- if (desc->msi_attrib.multi_cap >= 5)
- return 0xffffffff;
- return (1 << (1 << desc->msi_attrib.multi_cap)) - 1;
-}
-
static noinline void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set)
{
- raw_spinlock_t *lock = &desc->dev->msi_lock;
+ raw_spinlock_t *lock = &to_pci_dev(desc->dev)->msi_lock;
unsigned long flags;
- if (!desc->msi_attrib.can_mask)
+ if (!desc->pci.msi_attrib.can_mask)
return;
raw_spin_lock_irqsave(lock, flags);
- desc->msi_mask &= ~clear;
- desc->msi_mask |= set;
- pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos,
- desc->msi_mask);
+ desc->pci.msi_mask &= ~clear;
+ desc->pci.msi_mask |= set;
+ pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->pci.mask_pos,
+ desc->pci.msi_mask);
raw_spin_unlock_irqrestore(lock, flags);
}
@@ -171,7 +44,7 @@ static inline void pci_msi_unmask(struct msi_desc *desc, u32 mask)
static inline void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
{
- return desc->mask_base + desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
+ return desc->pci.mask_base + desc->msi_index * PCI_MSIX_ENTRY_SIZE;
}
/*
@@ -184,27 +57,27 @@ static void pci_msix_write_vector_ctrl(struct msi_desc *desc, u32 ctrl)
{
void __iomem *desc_addr = pci_msix_desc_addr(desc);
- if (desc->msi_attrib.can_mask)
+ if (desc->pci.msi_attrib.can_mask)
writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
static inline void pci_msix_mask(struct msi_desc *desc)
{
- desc->msix_ctrl |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
- pci_msix_write_vector_ctrl(desc, desc->msix_ctrl);
+ desc->pci.msix_ctrl |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ pci_msix_write_vector_ctrl(desc, desc->pci.msix_ctrl);
/* Flush write to device */
- readl(desc->mask_base);
+ readl(desc->pci.mask_base);
}
static inline void pci_msix_unmask(struct msi_desc *desc)
{
- desc->msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
- pci_msix_write_vector_ctrl(desc, desc->msix_ctrl);
+ desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ pci_msix_write_vector_ctrl(desc, desc->pci.msix_ctrl);
}
static void __pci_msi_mask_desc(struct msi_desc *desc, u32 mask)
{
- if (desc->msi_attrib.is_msix)
+ if (desc->pci.msi_attrib.is_msix)
pci_msix_mask(desc);
else
pci_msi_mask(desc, mask);
@@ -212,7 +85,7 @@ static void __pci_msi_mask_desc(struct msi_desc *desc, u32 mask)
static void __pci_msi_unmask_desc(struct msi_desc *desc, u32 mask)
{
- if (desc->msi_attrib.is_msix)
+ if (desc->pci.msi_attrib.is_msix)
pci_msix_unmask(desc);
else
pci_msi_unmask(desc, mask);
@@ -242,24 +115,16 @@ void pci_msi_unmask_irq(struct irq_data *data)
}
EXPORT_SYMBOL_GPL(pci_msi_unmask_irq);
-void default_restore_msi_irqs(struct pci_dev *dev)
-{
- struct msi_desc *entry;
-
- for_each_pci_msi_entry(entry, dev)
- default_restore_msi_irq(dev, entry->irq);
-}
-
void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
struct pci_dev *dev = msi_desc_to_pci_dev(entry);
BUG_ON(dev->current_state != PCI_D0);
- if (entry->msi_attrib.is_msix) {
+ if (entry->pci.msi_attrib.is_msix) {
void __iomem *base = pci_msix_desc_addr(entry);
- if (WARN_ON_ONCE(entry->msi_attrib.is_virtual))
+ if (WARN_ON_ONCE(entry->pci.msi_attrib.is_virtual))
return;
msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
@@ -271,7 +136,7 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
&msg->address_lo);
- if (entry->msi_attrib.is_64) {
+ if (entry->pci.msi_attrib.is_64) {
pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
&msg->address_hi);
pci_read_config_word(dev, pos + PCI_MSI_DATA_64, &data);
@@ -289,12 +154,12 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
if (dev->current_state != PCI_D0 || pci_dev_is_disconnected(dev)) {
/* Don't touch the hardware now */
- } else if (entry->msi_attrib.is_msix) {
+ } else if (entry->pci.msi_attrib.is_msix) {
void __iomem *base = pci_msix_desc_addr(entry);
- u32 ctrl = entry->msix_ctrl;
+ u32 ctrl = entry->pci.msix_ctrl;
bool unmasked = !(ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT);
- if (entry->msi_attrib.is_virtual)
+ if (entry->pci.msi_attrib.is_virtual)
goto skip;
/*
@@ -323,12 +188,12 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
msgctl &= ~PCI_MSI_FLAGS_QSIZE;
- msgctl |= entry->msi_attrib.multiple << 4;
+ msgctl |= entry->pci.msi_attrib.multiple << 4;
pci_write_config_word(dev, pos + PCI_MSI_FLAGS, msgctl);
pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
msg->address_lo);
- if (entry->msi_attrib.is_64) {
+ if (entry->pci.msi_attrib.is_64) {
pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
msg->address_hi);
pci_write_config_word(dev, pos + PCI_MSI_DATA_64,
@@ -359,30 +224,11 @@ EXPORT_SYMBOL_GPL(pci_write_msi_msg);
static void free_msi_irqs(struct pci_dev *dev)
{
- struct list_head *msi_list = dev_to_msi_list(&dev->dev);
- struct msi_desc *entry, *tmp;
- int i;
-
- for_each_pci_msi_entry(entry, dev)
- if (entry->irq)
- for (i = 0; i < entry->nvec_used; i++)
- BUG_ON(irq_has_action(entry->irq + i));
-
- if (dev->msi_irq_groups) {
- msi_destroy_sysfs(&dev->dev, dev->msi_irq_groups);
- dev->msi_irq_groups = NULL;
- }
-
pci_msi_teardown_msi_irqs(dev);
- list_for_each_entry_safe(entry, tmp, msi_list, list) {
- if (entry->msi_attrib.is_msix) {
- if (list_is_last(&entry->list, msi_list))
- iounmap(entry->mask_base);
- }
-
- list_del(&entry->list);
- free_msi_entry(entry);
+ if (dev->msix_base) {
+ iounmap(dev->msix_base);
+ dev->msix_base = NULL;
}
}
@@ -403,10 +249,19 @@ static void pci_msi_set_enable(struct pci_dev *dev, int enable)
pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
}
+/*
+ * Architecture override returns true when the PCI MSI message should be
+ * written by the generic restore function.
+ */
+bool __weak arch_restore_msi_irqs(struct pci_dev *dev)
+{
+ return true;
+}
+
static void __pci_restore_msi_state(struct pci_dev *dev)
{
- u16 control;
struct msi_desc *entry;
+ u16 control;
if (!dev->msi_enabled)
return;
@@ -415,12 +270,13 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
pci_intx_for_msi(dev, 0);
pci_msi_set_enable(dev, 0);
- arch_restore_msi_irqs(dev);
+ if (arch_restore_msi_irqs(dev))
+ __pci_write_msi_msg(entry, &entry->msg);
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
pci_msi_update_mask(entry, 0, 0);
control &= ~PCI_MSI_FLAGS_QSIZE;
- control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
+ control |= (entry->pci.msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
}
@@ -437,19 +293,25 @@ static void pci_msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set)
static void __pci_restore_msix_state(struct pci_dev *dev)
{
struct msi_desc *entry;
+ bool write_msg;
if (!dev->msix_enabled)
return;
- BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
/* route the table */
pci_intx_for_msi(dev, 0);
pci_msix_clear_and_set_ctrl(dev, 0,
PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
- arch_restore_msi_irqs(dev);
- for_each_pci_msi_entry(entry, dev)
- pci_msix_write_vector_ctrl(entry, entry->msix_ctrl);
+ write_msg = arch_restore_msi_irqs(dev);
+
+ msi_lock_descs(&dev->dev);
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
+ if (write_msg)
+ __pci_write_msi_msg(entry, &entry->msg);
+ pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
+ }
+ msi_unlock_descs(&dev->dev);
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
}
@@ -461,48 +323,79 @@ void pci_restore_msi_state(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_restore_msi_state);
-static struct msi_desc *
-msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
+static void pcim_msi_release(void *pcidev)
{
- struct irq_affinity_desc *masks = NULL;
- struct msi_desc *entry;
- u16 control;
+ struct pci_dev *dev = pcidev;
- if (affd)
- masks = irq_create_affinity_masks(nvec, affd);
+ dev->is_msi_managed = false;
+ pci_free_irq_vectors(dev);
+}
+
+/*
+ * Needs to be separate from pcim_release to prevent an ordering problem
+ * vs. msi_device_data_release() in the MSI core code.
+ */
+static int pcim_setup_msi_release(struct pci_dev *dev)
+{
+ int ret;
+
+ if (!pci_is_managed(dev) || dev->is_msi_managed)
+ return 0;
+
+ ret = devm_add_action(&dev->dev, pcim_msi_release, dev);
+ if (!ret)
+ dev->is_msi_managed = true;
+ return ret;
+}
+
+/*
+ * Ordering vs. devres: msi device data has to be installed first so that
+ * pcim_msi_release() is invoked before it on device release.
+ */
+static int pci_setup_msi_context(struct pci_dev *dev)
+{
+ int ret = msi_setup_device_data(&dev->dev);
+
+ if (!ret)
+ ret = pcim_setup_msi_release(dev);
+ return ret;
+}
+
+static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
+ struct irq_affinity_desc *masks)
+{
+ struct msi_desc desc;
+ u16 control;
/* MSI Entry Initialization */
- entry = alloc_msi_entry(&dev->dev, nvec, masks);
- if (!entry)
- goto out;
+ memset(&desc, 0, sizeof(desc));
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
/* Lies, damned lies, and MSIs */
if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
control |= PCI_MSI_FLAGS_MASKBIT;
+ /* Respect XEN's mask disabling */
+ if (pci_msi_ignore_mask)
+ control &= ~PCI_MSI_FLAGS_MASKBIT;
- entry->msi_attrib.is_msix = 0;
- entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
- entry->msi_attrib.is_virtual = 0;
- entry->msi_attrib.entry_nr = 0;
- entry->msi_attrib.can_mask = !pci_msi_ignore_mask &&
- !!(control & PCI_MSI_FLAGS_MASKBIT);
- entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
- entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
- entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
+ desc.nvec_used = nvec;
+ desc.pci.msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
+ desc.pci.msi_attrib.can_mask = !!(control & PCI_MSI_FLAGS_MASKBIT);
+ desc.pci.msi_attrib.default_irq = dev->irq;
+ desc.pci.msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
+ desc.pci.msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
+ desc.affinity = masks;
if (control & PCI_MSI_FLAGS_64BIT)
- entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
+ desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
else
- entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
+ desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
/* Save the initial mask status */
- if (entry->msi_attrib.can_mask)
- pci_read_config_dword(dev, entry->mask_pos, &entry->msi_mask);
+ if (desc.pci.msi_attrib.can_mask)
+ pci_read_config_dword(dev, desc.pci.mask_pos, &desc.pci.msi_mask);
-out:
- kfree(masks);
- return entry;
+ return msi_add_msi_desc(&dev->dev, &desc);
}
static int msi_verify_entries(struct pci_dev *dev)
@@ -512,14 +405,14 @@ static int msi_verify_entries(struct pci_dev *dev)
if (!dev->no_64bit_msi)
return 0;
- for_each_pci_msi_entry(entry, dev) {
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
if (entry->msg.address_hi) {
pci_err(dev, "arch assigned 64-bit MSI address %#x%08x but device only supports 32 bits\n",
entry->msg.address_hi, entry->msg.address_lo);
- return -EIO;
+ break;
}
}
- return 0;
+ return !entry ? 0 : -EIO;
}
/**
@@ -537,21 +430,29 @@ static int msi_verify_entries(struct pci_dev *dev)
static int msi_capability_init(struct pci_dev *dev, int nvec,
struct irq_affinity *affd)
{
- const struct attribute_group **groups;
+ struct irq_affinity_desc *masks = NULL;
struct msi_desc *entry;
int ret;
- pci_msi_set_enable(dev, 0); /* Disable MSI during set up */
+ /*
+ * Disable MSI during setup in the hardware, but mark it enabled
+ * so that setup code can evaluate it.
+ */
+ pci_msi_set_enable(dev, 0);
+ dev->msi_enabled = 1;
- entry = msi_setup_entry(dev, nvec, affd);
- if (!entry)
- return -ENOMEM;
+ if (affd)
+ masks = irq_create_affinity_masks(nvec, affd);
+
+ msi_lock_descs(&dev->dev);
+ ret = msi_setup_msi_desc(dev, nvec, masks);
+ if (ret)
+ goto fail;
/* All MSIs are unmasked by default; mask them all */
+ entry = msi_first_desc(&dev->dev, MSI_DESC_ALL);
pci_msi_mask(entry, msi_multi_mask(entry));
- list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
-
/* Configure MSI capability structure */
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
if (ret)
@@ -561,26 +462,22 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
if (ret)
goto err;
- groups = msi_populate_sysfs(&dev->dev);
- if (IS_ERR(groups)) {
- ret = PTR_ERR(groups);
- goto err;
- }
-
- dev->msi_irq_groups = groups;
-
/* Set MSI enabled bits */
pci_intx_for_msi(dev, 0);
pci_msi_set_enable(dev, 1);
- dev->msi_enabled = 1;
pcibios_free_irq(dev);
dev->irq = entry->irq;
- return 0;
+ goto unlock;
err:
pci_msi_unmask(entry, msi_multi_mask(entry));
free_msi_irqs(dev);
+fail:
+ dev->msi_enabled = 0;
+unlock:
+ msi_unlock_descs(&dev->dev);
+ kfree(masks);
return ret;
}
@@ -605,70 +502,49 @@ static void __iomem *msix_map_region(struct pci_dev *dev,
return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
}
-static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
- struct msix_entry *entries, int nvec,
- struct irq_affinity *affd)
+static int msix_setup_msi_descs(struct pci_dev *dev, void __iomem *base,
+ struct msix_entry *entries, int nvec,
+ struct irq_affinity_desc *masks)
{
- struct irq_affinity_desc *curmsk, *masks = NULL;
- struct msi_desc *entry;
+ int ret = 0, i, vec_count = pci_msix_vec_count(dev);
+ struct irq_affinity_desc *curmsk;
+ struct msi_desc desc;
void __iomem *addr;
- int ret, i;
- int vec_count = pci_msix_vec_count(dev);
-
- if (affd)
- masks = irq_create_affinity_masks(nvec, affd);
- for (i = 0, curmsk = masks; i < nvec; i++) {
- entry = alloc_msi_entry(&dev->dev, 1, curmsk);
- if (!entry) {
- if (!i)
- iounmap(base);
- else
- free_msi_irqs(dev);
- /* No enough memory. Don't try again */
- ret = -ENOMEM;
- goto out;
- }
+ memset(&desc, 0, sizeof(desc));
- entry->msi_attrib.is_msix = 1;
- entry->msi_attrib.is_64 = 1;
+ desc.nvec_used = 1;
+ desc.pci.msi_attrib.is_msix = 1;
+ desc.pci.msi_attrib.is_64 = 1;
+ desc.pci.msi_attrib.default_irq = dev->irq;
+ desc.pci.mask_base = base;
- if (entries)
- entry->msi_attrib.entry_nr = entries[i].entry;
- else
- entry->msi_attrib.entry_nr = i;
+ for (i = 0, curmsk = masks; i < nvec; i++, curmsk++) {
+ desc.msi_index = entries ? entries[i].entry : i;
+ desc.affinity = masks ? curmsk : NULL;
+ desc.pci.msi_attrib.is_virtual = desc.msi_index >= vec_count;
+ desc.pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
+ !desc.pci.msi_attrib.is_virtual;
- entry->msi_attrib.is_virtual =
- entry->msi_attrib.entry_nr >= vec_count;
-
- entry->msi_attrib.can_mask = !pci_msi_ignore_mask &&
- !entry->msi_attrib.is_virtual;
-
- entry->msi_attrib.default_irq = dev->irq;
- entry->mask_base = base;
-
- if (entry->msi_attrib.can_mask) {
- addr = pci_msix_desc_addr(entry);
- entry->msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
+ if (!desc.pci.msi_attrib.can_mask) {
+ addr = pci_msix_desc_addr(&desc);
+ desc.pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
- list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
- if (masks)
- curmsk++;
+ ret = msi_add_msi_desc(&dev->dev, &desc);
+ if (ret)
+ break;
}
- ret = 0;
-out:
- kfree(masks);
return ret;
}
static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
{
- struct msi_desc *entry;
+ struct msi_desc *desc;
- for_each_pci_msi_entry(entry, dev) {
- if (entries) {
- entries->vector = entry->irq;
+ if (entries) {
+ msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) {
+ entries->vector = desc->irq;
entries++;
}
}
@@ -686,6 +562,41 @@ static void msix_mask_all(void __iomem *base, int tsize)
writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
+static int msix_setup_interrupts(struct pci_dev *dev, void __iomem *base,
+ struct msix_entry *entries, int nvec,
+ struct irq_affinity *affd)
+{
+ struct irq_affinity_desc *masks = NULL;
+ int ret;
+
+ if (affd)
+ masks = irq_create_affinity_masks(nvec, affd);
+
+ msi_lock_descs(&dev->dev);
+ ret = msix_setup_msi_descs(dev, base, entries, nvec, masks);
+ if (ret)
+ goto out_free;
+
+ ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
+ if (ret)
+ goto out_free;
+
+ /* Check if all MSI entries honor device restrictions */
+ ret = msi_verify_entries(dev);
+ if (ret)
+ goto out_free;
+
+ msix_update_entries(dev, entries);
+ goto out_unlock;
+
+out_free:
+ free_msi_irqs(dev);
+out_unlock:
+ msi_unlock_descs(&dev->dev);
+ kfree(masks);
+ return ret;
+}
+
/**
* msix_capability_init - configure device's MSI-X capability
* @dev: pointer to the pci_dev data structure of MSI-X device function
@@ -700,7 +611,6 @@ static void msix_mask_all(void __iomem *base, int tsize)
static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
int nvec, struct irq_affinity *affd)
{
- const struct attribute_group **groups;
void __iomem *base;
int ret, tsize;
u16 control;
@@ -713,6 +623,9 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL |
PCI_MSIX_FLAGS_ENABLE);
+ /* Mark it enabled so setup functions can query it */
+ dev->msix_enabled = 1;
+
pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
/* Request & Map MSI-X table region */
tsize = msix_table_size(control);
@@ -722,32 +635,14 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
goto out_disable;
}
- ret = msix_setup_entries(dev, base, entries, nvec, affd);
- if (ret)
- goto out_disable;
-
- ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
- if (ret)
- goto out_avail;
+ dev->msix_base = base;
- /* Check if all MSI entries honor device restrictions */
- ret = msi_verify_entries(dev);
+ ret = msix_setup_interrupts(dev, base, entries, nvec, affd);
if (ret)
- goto out_free;
-
- msix_update_entries(dev, entries);
-
- groups = msi_populate_sysfs(&dev->dev);
- if (IS_ERR(groups)) {
- ret = PTR_ERR(groups);
- goto out_free;
- }
-
- dev->msi_irq_groups = groups;
+ goto out_disable;
- /* Set MSI-X enabled bits and unmask the function */
+ /* Disable INTX */
pci_intx_for_msi(dev, 0);
- dev->msix_enabled = 1;
/*
* Ensure that all table entries are masked to prevent
@@ -763,27 +658,8 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
pcibios_free_irq(dev);
return 0;
-out_avail:
- if (ret < 0) {
- /*
- * If we had some success, report the number of IRQs
- * we succeeded in setting up.
- */
- struct msi_desc *entry;
- int avail = 0;
-
- for_each_pci_msi_entry(entry, dev) {
- if (entry->irq != 0)
- avail++;
- }
- if (avail != 0)
- ret = avail;
- }
-
-out_free:
- free_msi_irqs(dev);
-
out_disable:
+ dev->msix_enabled = 0;
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE, 0);
return ret;
@@ -870,18 +746,17 @@ static void pci_msi_shutdown(struct pci_dev *dev)
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;
- BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
- desc = first_pci_msi_entry(dev);
-
pci_msi_set_enable(dev, 0);
pci_intx_for_msi(dev, 1);
dev->msi_enabled = 0;
/* Return the device with MSI unmasked as initial states */
- pci_msi_unmask(desc, msi_multi_mask(desc));
+ desc = msi_first_desc(&dev->dev, MSI_DESC_ALL);
+ if (!WARN_ON_ONCE(!desc))
+ pci_msi_unmask(desc, msi_multi_mask(desc));
/* Restore dev->irq to its default pin-assertion IRQ */
- dev->irq = desc->msi_attrib.default_irq;
+ dev->irq = desc->pci.msi_attrib.default_irq;
pcibios_alloc_irq(dev);
}
@@ -890,8 +765,10 @@ void pci_disable_msi(struct pci_dev *dev)
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;
+ msi_lock_descs(&dev->dev);
pci_msi_shutdown(dev);
free_msi_irqs(dev);
+ msi_unlock_descs(&dev->dev);
}
EXPORT_SYMBOL(pci_disable_msi);
@@ -952,7 +829,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
static void pci_msix_shutdown(struct pci_dev *dev)
{
- struct msi_desc *entry;
+ struct msi_desc *desc;
if (!pci_msi_enable || !dev || !dev->msix_enabled)
return;
@@ -963,8 +840,8 @@ static void pci_msix_shutdown(struct pci_dev *dev)
}
/* Return the device with MSI-X masked as initial states */
- for_each_pci_msi_entry(entry, dev)
- pci_msix_mask(entry);
+ msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL)
+ pci_msix_mask(desc);
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
pci_intx_for_msi(dev, 1);
@@ -977,28 +854,13 @@ void pci_disable_msix(struct pci_dev *dev)
if (!pci_msi_enable || !dev || !dev->msix_enabled)
return;
+ msi_lock_descs(&dev->dev);
pci_msix_shutdown(dev);
free_msi_irqs(dev);
+ msi_unlock_descs(&dev->dev);
}
EXPORT_SYMBOL(pci_disable_msix);
-void pci_no_msi(void)
-{
- pci_msi_enable = 0;
-}
-
-/**
- * pci_msi_enabled - is MSI enabled?
- *
- * Returns true if MSI has not been disabled by the command-line option
- * pci=nomsi.
- **/
-int pci_msi_enabled(void)
-{
- return pci_msi_enable;
-}
-EXPORT_SYMBOL(pci_msi_enabled);
-
static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
struct irq_affinity *affd)
{
@@ -1029,6 +891,10 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
if (nvec > maxvec)
nvec = maxvec;
+ rc = pci_setup_msi_context(dev);
+ if (rc)
+ return rc;
+
for (;;) {
if (affd) {
nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
@@ -1072,6 +938,10 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
if (WARN_ON_ONCE(dev->msix_enabled))
return -EINVAL;
+ rc = pci_setup_msi_context(dev);
+ if (rc)
+ return rc;
+
for (;;) {
if (affd) {
nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
@@ -1194,35 +1064,25 @@ EXPORT_SYMBOL(pci_free_irq_vectors);
/**
* pci_irq_vector - return Linux IRQ number of a device vector
- * @dev: PCI device to operate on
- * @nr: device-relative interrupt vector index (0-based).
+ * @dev: PCI device to operate on
+ * @nr: Interrupt vector index (0-based)
+ *
+ * @nr has the following meanings depending on the interrupt mode:
+ * MSI-X: The index in the MSI-X vector table
+ * MSI: The index of the enabled MSI vectors
+ * INTx: Must be 0
+ *
+ * Return: The Linux interrupt number or -EINVAl if @nr is out of range.
*/
int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
{
- if (dev->msix_enabled) {
- struct msi_desc *entry;
- int i = 0;
-
- for_each_pci_msi_entry(entry, dev) {
- if (i == nr)
- return entry->irq;
- i++;
- }
- WARN_ON_ONCE(1);
- return -EINVAL;
- }
+ unsigned int irq;
- if (dev->msi_enabled) {
- struct msi_desc *entry = first_pci_msi_entry(dev);
-
- if (WARN_ON_ONCE(nr >= entry->nvec_used))
- return -EINVAL;
- } else {
- if (WARN_ON_ONCE(nr > 0))
- return -EINVAL;
- }
+ if (!dev->msi_enabled && !dev->msix_enabled)
+ return !nr ? dev->irq : -EINVAL;
- return dev->irq + nr;
+ irq = msi_get_virq(&dev->dev, nr);
+ return irq ? irq : -EINVAL;
}
EXPORT_SYMBOL(pci_irq_vector);
@@ -1230,332 +1090,58 @@ EXPORT_SYMBOL(pci_irq_vector);
* pci_irq_get_affinity - return the affinity of a particular MSI vector
* @dev: PCI device to operate on
* @nr: device-relative interrupt vector index (0-based).
+ *
+ * @nr has the following meanings depending on the interrupt mode:
+ * MSI-X: The index in the MSI-X vector table
+ * MSI: The index of the enabled MSI vectors
+ * INTx: Must be 0
+ *
+ * Return: A cpumask pointer or NULL if @nr is out of range
*/
const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
{
- if (dev->msix_enabled) {
- struct msi_desc *entry;
- int i = 0;
+ int idx, irq = pci_irq_vector(dev, nr);
+ struct msi_desc *desc;
- for_each_pci_msi_entry(entry, dev) {
- if (i == nr)
- return &entry->affinity->mask;
- i++;
- }
- WARN_ON_ONCE(1);
+ if (WARN_ON_ONCE(irq <= 0))
return NULL;
- } else if (dev->msi_enabled) {
- struct msi_desc *entry = first_pci_msi_entry(dev);
- if (WARN_ON_ONCE(!entry || !entry->affinity ||
- nr >= entry->nvec_used))
- return NULL;
-
- return &entry->affinity[nr].mask;
- } else {
+ desc = irq_get_msi_desc(irq);
+ /* Non-MSI does not have the information handy */
+ if (!desc)
return cpu_possible_mask;
- }
-}
-EXPORT_SYMBOL(pci_irq_get_affinity);
-struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
-{
- return to_pci_dev(desc->dev);
-}
-EXPORT_SYMBOL(msi_desc_to_pci_dev);
-
-void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
-{
- struct pci_dev *dev = msi_desc_to_pci_dev(desc);
-
- return dev->bus->sysdata;
-}
-EXPORT_SYMBOL_GPL(msi_desc_to_pci_sysdata);
-
-#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
-/**
- * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space
- * @irq_data: Pointer to interrupt data of the MSI interrupt
- * @msg: Pointer to the message
- */
-void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg)
-{
- struct msi_desc *desc = irq_data_get_msi_desc(irq_data);
+ if (WARN_ON_ONCE(!desc->affinity))
+ return NULL;
/*
- * For MSI-X desc->irq is always equal to irq_data->irq. For
- * MSI only the first interrupt of MULTI MSI passes the test.
+ * MSI has a mask array in the descriptor.
+ * MSI-X has a single mask.
*/
- if (desc->irq == irq_data->irq)
- __pci_write_msi_msg(desc, msg);
-}
-
-/**
- * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source
- * @desc: Pointer to the MSI descriptor
- *
- * The ID number is only used within the irqdomain.
- */
-static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc)
-{
- struct pci_dev *dev = msi_desc_to_pci_dev(desc);
-
- return (irq_hw_number_t)desc->msi_attrib.entry_nr |
- pci_dev_id(dev) << 11 |
- (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
-}
-
-static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc)
-{
- return !desc->msi_attrib.is_msix && desc->nvec_used > 1;
-}
-
-/**
- * pci_msi_domain_check_cap - Verify that @domain supports the capabilities
- * for @dev
- * @domain: The interrupt domain to check
- * @info: The domain info for verification
- * @dev: The device to check
- *
- * Returns:
- * 0 if the functionality is supported
- * 1 if Multi MSI is requested, but the domain does not support it
- * -ENOTSUPP otherwise
- */
-int pci_msi_domain_check_cap(struct irq_domain *domain,
- struct msi_domain_info *info, struct device *dev)
-{
- struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev));
-
- /* Special handling to support __pci_enable_msi_range() */
- if (pci_msi_desc_is_multi_msi(desc) &&
- !(info->flags & MSI_FLAG_MULTI_PCI_MSI))
- return 1;
- else if (desc->msi_attrib.is_msix && !(info->flags & MSI_FLAG_PCI_MSIX))
- return -ENOTSUPP;
-
- return 0;
-}
-
-static int pci_msi_domain_handle_error(struct irq_domain *domain,
- struct msi_desc *desc, int error)
-{
- /* Special handling to support __pci_enable_msi_range() */
- if (pci_msi_desc_is_multi_msi(desc) && error == -ENOSPC)
- return 1;
-
- return error;
-}
-
-static void pci_msi_domain_set_desc(msi_alloc_info_t *arg,
- struct msi_desc *desc)
-{
- arg->desc = desc;
- arg->hwirq = pci_msi_domain_calc_hwirq(desc);
-}
-
-static struct msi_domain_ops pci_msi_domain_ops_default = {
- .set_desc = pci_msi_domain_set_desc,
- .msi_check = pci_msi_domain_check_cap,
- .handle_error = pci_msi_domain_handle_error,
-};
-
-static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info)
-{
- struct msi_domain_ops *ops = info->ops;
-
- if (ops == NULL) {
- info->ops = &pci_msi_domain_ops_default;
- } else {
- if (ops->set_desc == NULL)
- ops->set_desc = pci_msi_domain_set_desc;
- if (ops->msi_check == NULL)
- ops->msi_check = pci_msi_domain_check_cap;
- if (ops->handle_error == NULL)
- ops->handle_error = pci_msi_domain_handle_error;
- }
-}
-
-static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
-{
- struct irq_chip *chip = info->chip;
-
- BUG_ON(!chip);
- if (!chip->irq_write_msi_msg)
- chip->irq_write_msi_msg = pci_msi_domain_write_msg;
- if (!chip->irq_mask)
- chip->irq_mask = pci_msi_mask_irq;
- if (!chip->irq_unmask)
- chip->irq_unmask = pci_msi_unmask_irq;
-}
-
-/**
- * pci_msi_create_irq_domain - Create a MSI interrupt domain
- * @fwnode: Optional fwnode of the interrupt controller
- * @info: MSI domain info
- * @parent: Parent irq domain
- *
- * Updates the domain and chip ops and creates a MSI interrupt domain.
- *
- * Returns:
- * A domain pointer or NULL in case of failure.
- */
-struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
- struct msi_domain_info *info,
- struct irq_domain *parent)
-{
- struct irq_domain *domain;
-
- if (WARN_ON(info->flags & MSI_FLAG_LEVEL_CAPABLE))
- info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
-
- if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
- pci_msi_domain_update_dom_ops(info);
- if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
- pci_msi_domain_update_chip_ops(info);
-
- info->flags |= MSI_FLAG_ACTIVATE_EARLY;
- if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
- info->flags |= MSI_FLAG_MUST_REACTIVATE;
-
- /* PCI-MSI is oneshot-safe */
- info->chip->flags |= IRQCHIP_ONESHOT_SAFE;
-
- domain = msi_create_irq_domain(fwnode, info, parent);
- if (!domain)
- return NULL;
-
- irq_domain_update_bus_token(domain, DOMAIN_BUS_PCI_MSI);
- return domain;
-}
-EXPORT_SYMBOL_GPL(pci_msi_create_irq_domain);
-
-/*
- * Users of the generic MSI infrastructure expect a device to have a single ID,
- * so with DMA aliases we have to pick the least-worst compromise. Devices with
- * DMA phantom functions tend to still emit MSIs from the real function number,
- * so we ignore those and only consider topological aliases where either the
- * alias device or RID appears on a different bus number. We also make the
- * reasonable assumption that bridges are walked in an upstream direction (so
- * the last one seen wins), and the much braver assumption that the most likely
- * case is that of PCI->PCIe so we should always use the alias RID. This echoes
- * the logic from intel_irq_remapping's set_msi_sid(), which presumably works
- * well enough in practice; in the face of the horrible PCIe<->PCI-X conditions
- * for taking ownership all we can really do is close our eyes and hope...
- */
-static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data)
-{
- u32 *pa = data;
- u8 bus = PCI_BUS_NUM(*pa);
-
- if (pdev->bus->number != bus || PCI_BUS_NUM(alias) != bus)
- *pa = alias;
-
- return 0;
+ idx = dev->msi_enabled ? nr : 0;
+ return &desc->affinity[idx].mask;
}
+EXPORT_SYMBOL(pci_irq_get_affinity);
-/**
- * pci_msi_domain_get_msi_rid - Get the MSI requester id (RID)
- * @domain: The interrupt domain
- * @pdev: The PCI device.
- *
- * The RID for a device is formed from the alias, with a firmware
- * supplied mapping applied
- *
- * Returns: The RID.
- */
-u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
+struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
{
- struct device_node *of_node;
- u32 rid = pci_dev_id(pdev);
-
- pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
-
- of_node = irq_domain_get_of_node(domain);
- rid = of_node ? of_msi_map_id(&pdev->dev, of_node, rid) :
- iort_msi_map_id(&pdev->dev, rid);
-
- return rid;
+ return to_pci_dev(desc->dev);
}
+EXPORT_SYMBOL(msi_desc_to_pci_dev);
-/**
- * pci_msi_get_device_domain - Get the MSI domain for a given PCI device
- * @pdev: The PCI device
- *
- * Use the firmware data to find a device-specific MSI domain
- * (i.e. not one that is set as a default).
- *
- * Returns: The corresponding MSI domain or NULL if none has been found.
- */
-struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
+void pci_no_msi(void)
{
- struct irq_domain *dom;
- u32 rid = pci_dev_id(pdev);
-
- pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
- dom = of_msi_map_get_device_domain(&pdev->dev, rid, DOMAIN_BUS_PCI_MSI);
- if (!dom)
- dom = iort_get_device_domain(&pdev->dev, rid,
- DOMAIN_BUS_PCI_MSI);
- return dom;
+ pci_msi_enable = 0;
}
/**
- * pci_dev_has_special_msi_domain - Check whether the device is handled by
- * a non-standard PCI-MSI domain
- * @pdev: The PCI device to check.
+ * pci_msi_enabled - is MSI enabled?
*
- * Returns: True if the device irqdomain or the bus irqdomain is
- * non-standard PCI/MSI.
- */
-bool pci_dev_has_special_msi_domain(struct pci_dev *pdev)
-{
- struct irq_domain *dom = dev_get_msi_domain(&pdev->dev);
-
- if (!dom)
- dom = dev_get_msi_domain(&pdev->bus->dev);
-
- if (!dom)
- return true;
-
- return dom->bus_token != DOMAIN_BUS_PCI_MSI;
-}
-
-#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
-#endif /* CONFIG_PCI_MSI */
-
-void pci_msi_init(struct pci_dev *dev)
-{
- u16 ctrl;
-
- /*
- * Disable the MSI hardware to avoid screaming interrupts
- * during boot. This is the power on reset default so
- * usually this should be a noop.
- */
- dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
- if (!dev->msi_cap)
- return;
-
- pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl);
- if (ctrl & PCI_MSI_FLAGS_ENABLE)
- pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS,
- ctrl & ~PCI_MSI_FLAGS_ENABLE);
-
- if (!(ctrl & PCI_MSI_FLAGS_64BIT))
- dev->no_64bit_msi = 1;
-}
-
-void pci_msix_init(struct pci_dev *dev)
+ * Returns true if MSI has not been disabled by the command-line option
+ * pci=nomsi.
+ **/
+int pci_msi_enabled(void)
{
- u16 ctrl;
-
- dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- if (!dev->msix_cap)
- return;
-
- pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
- if (ctrl & PCI_MSIX_FLAGS_ENABLE)
- pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS,
- ctrl & ~PCI_MSIX_FLAGS_ENABLE);
+ return pci_msi_enable;
}
+EXPORT_SYMBOL(pci_msi_enabled);
diff --git a/drivers/pci/msi/msi.h b/drivers/pci/msi/msi.h
new file mode 100644
index 000000000000..dbeff066bedd
--- /dev/null
+++ b/drivers/pci/msi/msi.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/pci.h>
+#include <linux/msi.h>
+
+#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1)
+
+extern int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
+extern void pci_msi_teardown_msi_irqs(struct pci_dev *dev);
+
+#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
+extern int pci_msi_legacy_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
+extern void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev);
+#else
+static inline int pci_msi_legacy_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ WARN_ON_ONCE(1);
+ return -ENODEV;
+}
+
+static inline void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev)
+{
+ WARN_ON_ONCE(1);
+}
+#endif
+
+/*
+ * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to
+ * mask all MSI interrupts by clearing the MSI enable bit does not work
+ * reliably as devices without an INTx disable bit will then generate a
+ * level IRQ which will never be cleared.
+ */
+static inline __attribute_const__ u32 msi_multi_mask(struct msi_desc *desc)
+{
+ /* Don't shift by >= width of type */
+ if (desc->pci.msi_attrib.multi_cap >= 5)
+ return 0xffffffff;
+ return (1 << (1 << desc->pci.msi_attrib.multi_cap)) - 1;
+}
diff --git a/drivers/pci/msi/pcidev_msi.c b/drivers/pci/msi/pcidev_msi.c
new file mode 100644
index 000000000000..5520aff53b56
--- /dev/null
+++ b/drivers/pci/msi/pcidev_msi.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MSI[X} related functions which are available unconditionally.
+ */
+#include "../pci.h"
+
+/*
+ * Disable the MSI[X] hardware to avoid screaming interrupts during boot.
+ * This is the power on reset default so usually this should be a noop.
+ */
+
+void pci_msi_init(struct pci_dev *dev)
+{
+ u16 ctrl;
+
+ dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ if (!dev->msi_cap)
+ return;
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl);
+ if (ctrl & PCI_MSI_FLAGS_ENABLE) {
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS,
+ ctrl & ~PCI_MSI_FLAGS_ENABLE);
+ }
+
+ if (!(ctrl & PCI_MSI_FLAGS_64BIT))
+ dev->no_64bit_msi = 1;
+}
+
+void pci_msix_init(struct pci_dev *dev)
+{
+ u16 ctrl;
+
+ dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ if (!dev->msix_cap)
+ return;
+
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+ if (ctrl & PCI_MSIX_FLAGS_ENABLE) {
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS,
+ ctrl & ~PCI_MSIX_FLAGS_ENABLE);
+ }
+}
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 0b1237cff239..cb2e8351c2cc 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -247,7 +247,7 @@ void of_pci_check_probe_only(void)
else
pci_clear_flags(PCI_PROBE_ONLY);
- pr_info("PROBE_ONLY %sabled\n", val ? "en" : "dis");
+ pr_info("PROBE_ONLY %s\n", val ? "enabled" : "disabled");
}
EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 8d47cb7218d1..1015274bd2fe 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -219,7 +219,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
error = gen_pool_add_owner(p2pdma->pool, (unsigned long)addr,
pci_bus_address(pdev, bar) + offset,
range_len(&pgmap->range), dev_to_node(&pdev->dev),
- pgmap->ref);
+ &pgmap->ref);
if (error)
goto pages_free;
@@ -710,7 +710,7 @@ void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
if (!ret)
goto out;
- if (unlikely(!percpu_ref_tryget_live(ref))) {
+ if (unlikely(!percpu_ref_tryget_live_rcu(ref))) {
gen_pool_free(p2pdma->pool, (unsigned long) ret, size);
ret = NULL;
goto out;
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index db97cddfc85e..c994ebec2360 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -139,8 +139,13 @@ struct pci_bridge_reg_behavior pci_regs_behavior[PCI_STD_HEADER_SIZEOF / 4] = {
.ro = GENMASK(7, 0),
},
+ /*
+ * If expansion ROM is unsupported then ROM Base Address register must
+ * be implemented as read-only register that return 0 when read, same
+ * as for unused Base Address registers.
+ */
[PCI_ROM_ADDRESS1 / 4] = {
- .rw = GENMASK(31, 11) | BIT(0),
+ .ro = ~0,
},
/*
@@ -171,41 +176,55 @@ struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] =
[PCI_CAP_LIST_ID / 4] = {
/*
* Capability ID, Next Capability Pointer and
- * Capabilities register are all read-only.
+ * bits [14:0] of Capabilities register are all read-only.
+ * Bit 15 of Capabilities register is reserved.
*/
- .ro = ~0,
+ .ro = GENMASK(30, 0),
},
[PCI_EXP_DEVCAP / 4] = {
- .ro = ~0,
+ /*
+ * Bits [31:29] and [17:16] are reserved.
+ * Bits [27:18] are reserved for non-upstream ports.
+ * Bits 28 and [14:6] are reserved for non-endpoint devices.
+ * Other bits are read-only.
+ */
+ .ro = BIT(15) | GENMASK(5, 0),
},
[PCI_EXP_DEVCTL / 4] = {
- /* Device control register is RW */
- .rw = GENMASK(15, 0),
+ /*
+ * Device control register is RW, except bit 15 which is
+ * reserved for non-endpoints or non-PCIe-to-PCI/X bridges.
+ */
+ .rw = GENMASK(14, 0),
/*
* Device status register has bits 6 and [3:0] W1C, [5:4] RO,
- * the rest is reserved
+ * the rest is reserved. Also bit 6 is reserved for non-upstream
+ * ports.
*/
- .w1c = (BIT(6) | GENMASK(3, 0)) << 16,
+ .w1c = GENMASK(3, 0) << 16,
.ro = GENMASK(5, 4) << 16,
},
[PCI_EXP_LNKCAP / 4] = {
- /* All bits are RO, except bit 23 which is reserved */
- .ro = lower_32_bits(~BIT(23)),
+ /*
+ * All bits are RO, except bit 23 which is reserved and
+ * bit 18 which is reserved for non-upstream ports.
+ */
+ .ro = lower_32_bits(~(BIT(23) | PCI_EXP_LNKCAP_CLKPM)),
},
[PCI_EXP_LNKCTL / 4] = {
/*
* Link control has bits [15:14], [11:3] and [1:0] RW, the
- * rest is reserved.
+ * rest is reserved. Bit 8 is reserved for non-upstream ports.
*
* Link status has bits [13:0] RO, and bits [15:14]
* W1C.
*/
- .rw = GENMASK(15, 14) | GENMASK(11, 3) | GENMASK(1, 0),
+ .rw = GENMASK(15, 14) | GENMASK(11, 9) | GENMASK(7, 3) | GENMASK(1, 0),
.ro = GENMASK(13, 0) << 16,
.w1c = GENMASK(15, 14) << 16,
},
@@ -251,6 +270,49 @@ struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] =
.ro = GENMASK(15, 0) | PCI_EXP_RTSTA_PENDING,
.w1c = PCI_EXP_RTSTA_PME,
},
+
+ [PCI_EXP_DEVCAP2 / 4] = {
+ /*
+ * Device capabilities 2 register has reserved bits [30:27].
+ * Also bits [26:24] are reserved for non-upstream ports.
+ */
+ .ro = BIT(31) | GENMASK(23, 0),
+ },
+
+ [PCI_EXP_DEVCTL2 / 4] = {
+ /*
+ * Device control 2 register is RW. Bit 11 is reserved for
+ * non-upstream ports.
+ *
+ * Device status 2 register is reserved.
+ */
+ .rw = GENMASK(15, 12) | GENMASK(10, 0),
+ },
+
+ [PCI_EXP_LNKCAP2 / 4] = {
+ /* Link capabilities 2 register has reserved bits [30:25] and 0. */
+ .ro = BIT(31) | GENMASK(24, 1),
+ },
+
+ [PCI_EXP_LNKCTL2 / 4] = {
+ /*
+ * Link control 2 register is RW.
+ *
+ * Link status 2 register has bits 5, 15 W1C;
+ * bits 10, 11 reserved and others are RO.
+ */
+ .rw = GENMASK(15, 0),
+ .w1c = (BIT(15) | BIT(5)) << 16,
+ .ro = (GENMASK(14, 12) | GENMASK(9, 6) | GENMASK(4, 0)) << 16,
+ },
+
+ [PCI_EXP_SLTCAP2 / 4] = {
+ /* Slot capabilities 2 register is reserved. */
+ },
+
+ [PCI_EXP_SLTCTL2 / 4] = {
+ /* Both Slot control 2 and Slot status 2 registers are reserved. */
+ },
};
/*
@@ -265,7 +327,11 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
{
BUILD_BUG_ON(sizeof(bridge->conf) != PCI_BRIDGE_CONF_END);
- bridge->conf.class_revision |= cpu_to_le32(PCI_CLASS_BRIDGE_PCI << 16);
+ /*
+ * class_revision: Class is high 24 bits and revision is low 8 bit of this member,
+ * while class for PCI Bridge Normal Decode has the 24-bit value: PCI_CLASS_BRIDGE_PCI << 8
+ */
+ bridge->conf.class_revision |= cpu_to_le32((PCI_CLASS_BRIDGE_PCI << 8) << 8);
bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
bridge->conf.cache_line_size = 0x10;
bridge->conf.status = cpu_to_le16(PCI_STATUS_CAP_LIST);
@@ -277,11 +343,9 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
if (bridge->has_pcie) {
bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START;
+ bridge->conf.status |= cpu_to_le16(PCI_STATUS_CAP_LIST);
bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP;
- /* Set PCIe v2, root port, slot support */
- bridge->pcie_conf.cap =
- cpu_to_le16(PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
- PCI_EXP_FLAGS_SLOT);
+ bridge->pcie_conf.cap |= cpu_to_le16(PCI_EXP_TYPE_ROOT_PORT << 4);
bridge->pcie_cap_regs_behavior =
kmemdup(pcie_cap_regs_behavior,
sizeof(pcie_cap_regs_behavior),
@@ -290,6 +354,27 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
kfree(bridge->pci_regs_behavior);
return -ENOMEM;
}
+ /* These bits are applicable only for PCI and reserved on PCIe */
+ bridge->pci_regs_behavior[PCI_CACHE_LINE_SIZE / 4].ro &=
+ ~GENMASK(15, 8);
+ bridge->pci_regs_behavior[PCI_COMMAND / 4].ro &=
+ ~((PCI_COMMAND_SPECIAL | PCI_COMMAND_INVALIDATE |
+ PCI_COMMAND_VGA_PALETTE | PCI_COMMAND_WAIT |
+ PCI_COMMAND_FAST_BACK) |
+ (PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK |
+ PCI_STATUS_DEVSEL_MASK) << 16);
+ bridge->pci_regs_behavior[PCI_PRIMARY_BUS / 4].ro &=
+ ~GENMASK(31, 24);
+ bridge->pci_regs_behavior[PCI_IO_BASE / 4].ro &=
+ ~((PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK |
+ PCI_STATUS_DEVSEL_MASK) << 16);
+ bridge->pci_regs_behavior[PCI_INTERRUPT_LINE / 4].rw &=
+ ~((PCI_BRIDGE_CTL_MASTER_ABORT |
+ BIT(8) | BIT(9) | BIT(11)) << 16);
+ bridge->pci_regs_behavior[PCI_INTERRUPT_LINE / 4].ro &=
+ ~((PCI_BRIDGE_CTL_FAST_BACK) << 16);
+ bridge->pci_regs_behavior[PCI_INTERRUPT_LINE / 4].w1c &=
+ ~(BIT(10) << 16);
}
if (flags & PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR) {
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index cfe2f85af09e..602f0fb0b007 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -62,11 +62,8 @@ static ssize_t irq_show(struct device *dev,
* For MSI, show the first MSI IRQ; for all other cases including
* MSI-X, show the legacy INTx IRQ.
*/
- if (pdev->msi_enabled) {
- struct msi_desc *desc = first_pci_msi_entry(pdev);
-
- return sysfs_emit(buf, "%u\n", desc->irq);
- }
+ if (pdev->msi_enabled)
+ return sysfs_emit(buf, "%u\n", pci_irq_vector(pdev, 0));
#endif
return sysfs_emit(buf, "%u\n", pdev->irq);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 3d2fb394986a..9ecce435fb3f 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1115,7 +1115,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
return -EIO;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
- if (pmcsr == (u16) ~0) {
+ if (PCI_POSSIBLE_ERROR(pmcsr)) {
pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
pci_power_name(dev->current_state),
pci_power_name(state));
@@ -1271,16 +1271,16 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
* After reset, the device should not silently discard config
* requests, but it may still indicate that it needs more time by
* responding to them with CRS completions. The Root Port will
- * generally synthesize ~0 data to complete the read (except when
- * CRS SV is enabled and the read was for the Vendor ID; in that
- * case it synthesizes 0x0001 data).
+ * generally synthesize ~0 (PCI_ERROR_RESPONSE) data to complete
+ * the read (except when CRS SV is enabled and the read was for the
+ * Vendor ID; in that case it synthesizes 0x0001 data).
*
* Wait for the device to return a non-CRS completion. Read the
* Command register instead of Vendor ID so we don't have to
* contend with the CRS SV value.
*/
pci_read_config_dword(dev, PCI_COMMAND, &id);
- while (id == ~0) {
+ while (PCI_POSSIBLE_ERROR(id)) {
if (delay > timeout) {
pci_warn(dev, "not ready %dms after %s; giving up\n",
delay - 1, reset_type);
@@ -1556,7 +1556,7 @@ static void pci_save_ltr_state(struct pci_dev *dev)
{
int ltr;
struct pci_cap_saved_state *save_state;
- u16 *cap;
+ u32 *cap;
if (!pci_is_pcie(dev))
return;
@@ -1571,25 +1571,25 @@ static void pci_save_ltr_state(struct pci_dev *dev)
return;
}
- cap = (u16 *)&save_state->cap.data[0];
- pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
- pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
+ /* Some broken devices only support dword access to LTR */
+ cap = &save_state->cap.data[0];
+ pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
}
static void pci_restore_ltr_state(struct pci_dev *dev)
{
struct pci_cap_saved_state *save_state;
int ltr;
- u16 *cap;
+ u32 *cap;
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
if (!save_state || !ltr)
return;
- cap = (u16 *)&save_state->cap.data[0];
- pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
- pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
+ /* Some broken devices only support dword access to LTR */
+ cap = &save_state->cap.data[0];
+ pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
}
/**
@@ -2024,11 +2024,6 @@ static void pcim_release(struct device *gendev, void *res)
struct pci_devres *this = res;
int i;
- if (dev->msi_enabled)
- pci_disable_msi(dev);
- if (dev->msix_enabled)
- pci_disable_msix(dev);
-
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
if (this->region_mask & (1 << i))
pci_release_region(dev, i);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 52c74682601a..a96b7424c9bc 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -41,11 +41,6 @@
#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1 | \
ASPM_STATE_L1SS)
-struct aspm_latency {
- u32 l0s; /* L0s latency (nsec) */
- u32 l1; /* L1 latency (nsec) */
-};
-
struct pcie_link_state {
struct pci_dev *pdev; /* Upstream component of the Link */
struct pci_dev *downstream; /* Downstream component, function 0 */
@@ -65,15 +60,6 @@ struct pcie_link_state {
u32 clkpm_enabled:1; /* Current Clock PM state */
u32 clkpm_default:1; /* Default Clock PM state by BIOS */
u32 clkpm_disable:1; /* Clock PM disabled */
-
- /* Exit latencies */
- struct aspm_latency latency_up; /* Upstream direction exit latency */
- struct aspm_latency latency_dw; /* Downstream direction exit latency */
- /*
- * Endpoint acceptable latencies. A pcie downstream port only
- * has one slot under it, so at most there are 8 functions.
- */
- struct aspm_latency acceptable[8];
};
static int aspm_disabled, aspm_force;
@@ -105,6 +91,20 @@ static const char *policy_str[] = {
#define LINK_RETRAIN_TIMEOUT HZ
+/*
+ * The L1 PM substate capability is only implemented in function 0 in a
+ * multi function device.
+ */
+static struct pci_dev *pci_function_0(struct pci_bus *linkbus)
+{
+ struct pci_dev *child;
+
+ list_for_each_entry(child, &linkbus->devices, bus_list)
+ if (PCI_FUNC(child->devfn) == 0)
+ return child;
+ return NULL;
+}
+
static int policy_to_aspm_state(struct pcie_link_state *link)
{
switch (aspm_policy) {
@@ -378,8 +378,10 @@ static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value)
static void pcie_aspm_check_latency(struct pci_dev *endpoint)
{
- u32 latency, l1_switch_latency = 0;
- struct aspm_latency *acceptable;
+ u32 latency, encoding, lnkcap_up, lnkcap_dw;
+ u32 l1_switch_latency = 0, latency_up_l0s;
+ u32 latency_up_l1, latency_dw_l0s, latency_dw_l1;
+ u32 acceptable_l0s, acceptable_l1;
struct pcie_link_state *link;
/* Device not in D0 doesn't need latency check */
@@ -388,17 +390,36 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint)
return;
link = endpoint->bus->self->link_state;
- acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)];
+
+ /* Calculate endpoint L0s acceptable latency */
+ encoding = (endpoint->devcap & PCI_EXP_DEVCAP_L0S) >> 6;
+ acceptable_l0s = calc_l0s_acceptable(encoding);
+
+ /* Calculate endpoint L1 acceptable latency */
+ encoding = (endpoint->devcap & PCI_EXP_DEVCAP_L1) >> 9;
+ acceptable_l1 = calc_l1_acceptable(encoding);
while (link) {
+ struct pci_dev *dev = pci_function_0(link->pdev->subordinate);
+
+ /* Read direction exit latencies */
+ pcie_capability_read_dword(link->pdev, PCI_EXP_LNKCAP,
+ &lnkcap_up);
+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP,
+ &lnkcap_dw);
+ latency_up_l0s = calc_l0s_latency(lnkcap_up);
+ latency_up_l1 = calc_l1_latency(lnkcap_up);
+ latency_dw_l0s = calc_l0s_latency(lnkcap_dw);
+ latency_dw_l1 = calc_l1_latency(lnkcap_dw);
+
/* Check upstream direction L0s latency */
if ((link->aspm_capable & ASPM_STATE_L0S_UP) &&
- (link->latency_up.l0s > acceptable->l0s))
+ (latency_up_l0s > acceptable_l0s))
link->aspm_capable &= ~ASPM_STATE_L0S_UP;
/* Check downstream direction L0s latency */
if ((link->aspm_capable & ASPM_STATE_L0S_DW) &&
- (link->latency_dw.l0s > acceptable->l0s))
+ (latency_dw_l0s > acceptable_l0s))
link->aspm_capable &= ~ASPM_STATE_L0S_DW;
/*
* Check L1 latency.
@@ -413,9 +434,9 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint)
* L1 exit latencies advertised by a device include L1
* substate latencies (and hence do not do any check).
*/
- latency = max_t(u32, link->latency_up.l1, link->latency_dw.l1);
+ latency = max_t(u32, latency_up_l1, latency_dw_l1);
if ((link->aspm_capable & ASPM_STATE_L1) &&
- (latency + l1_switch_latency > acceptable->l1))
+ (latency + l1_switch_latency > acceptable_l1))
link->aspm_capable &= ~ASPM_STATE_L1;
l1_switch_latency += 1000;
@@ -423,20 +444,6 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint)
}
}
-/*
- * The L1 PM substate capability is only implemented in function 0 in a
- * multi function device.
- */
-static struct pci_dev *pci_function_0(struct pci_bus *linkbus)
-{
- struct pci_dev *child;
-
- list_for_each_entry(child, &linkbus->devices, bus_list)
- if (PCI_FUNC(child->devfn) == 0)
- return child;
- return NULL;
-}
-
static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
u32 clear, u32 set)
{
@@ -496,6 +503,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
encode_l12_threshold(l1_2_threshold, &scale, &value);
ctl1 |= t_common_mode << 8 | scale << 29 | value << 16;
+ /* Some broken devices only support dword access to L1 SS */
pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, &pctl1);
pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, &pctl2);
pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1, &cctl1);
@@ -593,8 +601,6 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
link->aspm_enabled |= ASPM_STATE_L0S_UP;
if (parent_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
link->aspm_enabled |= ASPM_STATE_L0S_DW;
- link->latency_up.l0s = calc_l0s_latency(parent_lnkcap);
- link->latency_dw.l0s = calc_l0s_latency(child_lnkcap);
/* Setup L1 state */
if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L1)
@@ -602,8 +608,6 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1)
link->aspm_enabled |= ASPM_STATE_L1;
- link->latency_up.l1 = calc_l1_latency(parent_lnkcap);
- link->latency_dw.l1 = calc_l1_latency(child_lnkcap);
/* Setup L1 substate */
pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP,
@@ -660,22 +664,10 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
/* Get and check endpoint acceptable latencies */
list_for_each_entry(child, &linkbus->devices, bus_list) {
- u32 reg32, encoding;
- struct aspm_latency *acceptable =
- &link->acceptable[PCI_FUNC(child->devfn)];
-
if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT &&
pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END)
continue;
- pcie_capability_read_dword(child, PCI_EXP_DEVCAP, &reg32);
- /* Calculate endpoint L0s acceptable latency */
- encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
- acceptable->l0s = calc_l0s_acceptable(encoding);
- /* Calculate endpoint L1 acceptable latency */
- encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
- acceptable->l1 = calc_l1_acceptable(encoding);
-
pcie_aspm_check_latency(child);
}
}
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index c556e7beafe3..3e9afee02e8d 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -79,7 +79,7 @@ static bool dpc_completed(struct pci_dev *pdev)
u16 status;
pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_STATUS, &status);
- if ((status != 0xffff) && (status & PCI_EXP_DPC_STATUS_TRIGGER))
+ if ((!PCI_POSSIBLE_ERROR(status)) && (status & PCI_EXP_DPC_STATUS_TRIGGER))
return false;
if (test_bit(PCI_DPC_RECOVERING, &pdev->priv_flags))
@@ -312,7 +312,7 @@ static irqreturn_t dpc_irq(int irq, void *context)
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
- if (!(status & PCI_EXP_DPC_STATUS_INTERRUPT) || status == (u16)(~0))
+ if (!(status & PCI_EXP_DPC_STATUS_INTERRUPT) || PCI_POSSIBLE_ERROR(status))
return IRQ_NONE;
pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 1d0dd77fed3a..ef8ce436ead9 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -224,7 +224,7 @@ static void pcie_pme_work_fn(struct work_struct *work)
break;
pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
- if (rtsta == (u32) ~0)
+ if (PCI_POSSIBLE_ERROR(rtsta))
break;
if (rtsta & PCI_EXP_RTSTA_PME) {
@@ -274,7 +274,7 @@ static irqreturn_t pcie_pme_irq(int irq, void *context)
spin_lock_irqsave(&data->lock, flags);
pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
- if (rtsta == (u32) ~0 || !(rtsta & PCI_EXP_RTSTA_PME)) {
+ if (PCI_POSSIBLE_ERROR(rtsta) || !(rtsta & PCI_EXP_RTSTA_PME)) {
spin_unlock_irqrestore(&data->lock, flags);
return IRQ_NONE;
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 087d3658f75c..17a969942d37 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -206,14 +206,14 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
* memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
* 1 must be clear.
*/
- if (sz == 0xffffffff)
+ if (PCI_POSSIBLE_ERROR(sz))
sz = 0;
/*
* I don't know how l can have all bits set. Copied from old code.
* Maybe it fixes a bug on some ancient platform.
*/
- if (l == 0xffffffff)
+ if (PCI_POSSIBLE_ERROR(l))
l = 0;
if (type == pci_bar_unknown) {
@@ -898,8 +898,6 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
bridge->bus = bus;
- /* Temporarily move resources off the list */
- list_splice_init(&bridge->windows, &resources);
bus->sysdata = bridge->sysdata;
bus->ops = bridge->ops;
bus->number = bus->busn_res.start = bridge->busnr;
@@ -925,6 +923,8 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
if (err)
goto free;
+ /* Temporarily move resources off the list */
+ list_splice_init(&bridge->windows, &resources);
err = device_add(&bridge->dev);
if (err) {
put_device(&bridge->dev);
@@ -1579,20 +1579,12 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev)
static void set_pcie_thunderbolt(struct pci_dev *dev)
{
- int vsec = 0;
- u32 header;
-
- while ((vsec = pci_find_next_ext_capability(dev, vsec,
- PCI_EXT_CAP_ID_VNDR))) {
- pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
+ u16 vsec;
- /* Is the device part of a Thunderbolt controller? */
- if (dev->vendor == PCI_VENDOR_ID_INTEL &&
- PCI_VNDR_HEADER_ID(header) == PCI_VSEC_ID_INTEL_TBT) {
- dev->is_thunderbolt = 1;
- return;
- }
- }
+ /* Is the device part of a Thunderbolt controller? */
+ vsec = pci_find_vsec_capability(dev, PCI_VENDOR_ID_INTEL, PCI_VSEC_ID_INTEL_TBT);
+ if (vsec)
+ dev->is_thunderbolt = 1;
}
static void set_pcie_untrusted(struct pci_dev *dev)
@@ -1683,7 +1675,7 @@ static int pci_cfg_space_size_ext(struct pci_dev *dev)
if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
return PCI_CFG_SPACE_SIZE;
- if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
+ if (PCI_POSSIBLE_ERROR(status) || pci_ext_cfg_is_aliased(dev))
return PCI_CFG_SPACE_SIZE;
return PCI_CFG_SPACE_EXP_SIZE;
@@ -2311,7 +2303,9 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
INIT_LIST_HEAD(&dev->bus_list);
dev->dev.type = &pci_dev_type;
dev->bus = pci_bus_get(bus);
-
+#ifdef CONFIG_PCI_MSI
+ raw_spin_lock_init(&dev->msi_lock);
+#endif
return dev;
}
EXPORT_SYMBOL(pci_alloc_dev);
@@ -2371,8 +2365,8 @@ bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
return false;
- /* Some broken boards return 0 or ~0 if a slot is empty: */
- if (*l == 0xffffffff || *l == 0x00000000 ||
+ /* Some broken boards return 0 or ~0 (PCI_ERROR_RESPONSE) if a slot is empty: */
+ if (PCI_POSSIBLE_ERROR(*l) || *l == 0x00000000 ||
*l == 0x0000ffff || *l == 0xffff0000)
return false;
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index cb18f8a13ab6..9c7edec64f7e 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -21,14 +21,14 @@ static int proc_initialized; /* = 0 */
static loff_t proc_bus_pci_lseek(struct file *file, loff_t off, int whence)
{
- struct pci_dev *dev = PDE_DATA(file_inode(file));
+ struct pci_dev *dev = pde_data(file_inode(file));
return fixed_size_llseek(file, off, whence, dev->cfg_size);
}
static ssize_t proc_bus_pci_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
- struct pci_dev *dev = PDE_DATA(file_inode(file));
+ struct pci_dev *dev = pde_data(file_inode(file));
unsigned int pos = *ppos;
unsigned int cnt, size;
@@ -114,7 +114,7 @@ static ssize_t proc_bus_pci_write(struct file *file, const char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct inode *ino = file_inode(file);
- struct pci_dev *dev = PDE_DATA(ino);
+ struct pci_dev *dev = pde_data(ino);
int pos = *ppos;
int size = dev->cfg_size;
int cnt, ret;
@@ -196,7 +196,7 @@ struct pci_filp_private {
static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- struct pci_dev *dev = PDE_DATA(file_inode(file));
+ struct pci_dev *dev = pde_data(file_inode(file));
#ifdef HAVE_PCI_MMAP
struct pci_filp_private *fpriv = file->private_data;
#endif /* HAVE_PCI_MMAP */
@@ -244,7 +244,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
#ifdef HAVE_PCI_MMAP
static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct pci_dev *dev = PDE_DATA(file_inode(file));
+ struct pci_dev *dev = pde_data(file_inode(file));
struct pci_filp_private *fpriv = file->private_data;
int i, ret, write_combine = 0, res_bit = IORESOURCE_MEM;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 003950c738d2..d2dd6a6cda60 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -980,8 +980,8 @@ static void quirk_via_ioapic(struct pci_dev *dev)
else
tmp = 0x1f; /* all known bits (4-0) routed to external APIC */
- pci_info(dev, "%sbling VIA external APIC routing\n",
- tmp == 0 ? "Disa" : "Ena");
+ pci_info(dev, "%s VIA external APIC routing\n",
+ tmp ? "Enabling" : "Disabling");
/* Offset 0x58: External APIC IRQ output control */
pci_write_config_byte(dev, 0x58, tmp);
@@ -1850,7 +1850,7 @@ static void quirk_huawei_pcie_sva(struct pci_dev *pdev)
* can set it directly.
*/
if (!pdev->dev.of_node &&
- device_add_properties(&pdev->dev, properties))
+ device_create_managed_software_node(&pdev->dev, properties, NULL))
pci_warn(pdev, "could not add stall property");
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa250, quirk_huawei_pcie_sva);
@@ -4103,6 +4103,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120,
quirk_dma_func1_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c136 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9125,
+ quirk_dma_func1_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
@@ -5683,6 +5686,15 @@ SWITCHTEC_QUIRK(0x4268); /* PAX 68XG4 */
SWITCHTEC_QUIRK(0x4252); /* PAX 52XG4 */
SWITCHTEC_QUIRK(0x4236); /* PAX 36XG4 */
SWITCHTEC_QUIRK(0x4228); /* PAX 28XG4 */
+SWITCHTEC_QUIRK(0x4352); /* PFXA 52XG4 */
+SWITCHTEC_QUIRK(0x4336); /* PFXA 36XG4 */
+SWITCHTEC_QUIRK(0x4328); /* PFXA 28XG4 */
+SWITCHTEC_QUIRK(0x4452); /* PSXA 52XG4 */
+SWITCHTEC_QUIRK(0x4436); /* PSXA 36XG4 */
+SWITCHTEC_QUIRK(0x4428); /* PSXA 28XG4 */
+SWITCHTEC_QUIRK(0x4552); /* PAXA 52XG4 */
+SWITCHTEC_QUIRK(0x4536); /* PAXA 36XG4 */
+SWITCHTEC_QUIRK(0x4528); /* PAXA 28XG4 */
/*
* The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints.
@@ -5857,3 +5869,13 @@ static void nvidia_ion_ahci_fixup(struct pci_dev *pdev)
pdev->dev_flags |= PCI_DEV_FLAGS_HAS_MSI_MASKING;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0ab8, nvidia_ion_ahci_fixup);
+
+static void rom_bar_overlap_defect(struct pci_dev *dev)
+{
+ pci_info(dev, "working around ROM BAR overlap defect\n");
+ dev->rom_bar_overlap = 1;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1533, rom_bar_overlap_defect);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1536, rom_bar_overlap_defect);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1537, rom_bar_overlap_defect);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1538, rom_bar_overlap_defect);
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 7f1acb3918d0..439ac5f5907a 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -75,12 +75,16 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
* as zero when disabled, so don't update ROM BARs unless
* they're enabled. See
* https://lore.kernel.org/r/43147B3D.1030309@vc.cvut.cz/
+ * But we must update ROM BAR for buggy devices where even a
+ * disabled ROM can conflict with other BARs.
*/
- if (!(res->flags & IORESOURCE_ROM_ENABLE))
+ if (!(res->flags & IORESOURCE_ROM_ENABLE) &&
+ !dev->rom_bar_overlap)
return;
reg = dev->rom_base_reg;
- new |= PCI_ROM_ADDRESS_ENABLE;
+ if (res->flags & IORESOURCE_ROM_ENABLE)
+ new |= PCI_ROM_ADDRESS_ENABLE;
} else
return;
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 751a26668e3a..a0c67191a8b9 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -96,11 +96,12 @@ static struct attribute *pci_slot_default_attrs[] = {
&pci_slot_attr_cur_speed.attr,
NULL,
};
+ATTRIBUTE_GROUPS(pci_slot_default);
static struct kobj_type pci_slot_ktype = {
.sysfs_ops = &pci_slot_sysfs_ops,
.release = &pci_slot_release,
- .default_attrs = pci_slot_default_attrs,
+ .default_groups = pci_slot_default_groups,
};
static char *make_slot_name(const char *name)
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 38c2b036fb8e..c36c1238c604 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -122,7 +122,7 @@ static void stuser_set_state(struct switchtec_user *stuser,
{
/* requires the mrpc_mutex to already be held when called */
- const char * const state_names[] = {
+ static const char * const state_names[] = {
[MRPC_IDLE] = "IDLE",
[MRPC_QUEUED] = "QUEUED",
[MRPC_RUNNING] = "RUNNING",
@@ -1779,6 +1779,15 @@ static const struct pci_device_id switchtec_pci_tbl[] = {
SWITCHTEC_PCI_DEVICE(0x4252, SWITCHTEC_GEN4), //PAX 52XG4
SWITCHTEC_PCI_DEVICE(0x4236, SWITCHTEC_GEN4), //PAX 36XG4
SWITCHTEC_PCI_DEVICE(0x4228, SWITCHTEC_GEN4), //PAX 28XG4
+ SWITCHTEC_PCI_DEVICE(0x4352, SWITCHTEC_GEN4), //PFXA 52XG4
+ SWITCHTEC_PCI_DEVICE(0x4336, SWITCHTEC_GEN4), //PFXA 36XG4
+ SWITCHTEC_PCI_DEVICE(0x4328, SWITCHTEC_GEN4), //PFXA 28XG4
+ SWITCHTEC_PCI_DEVICE(0x4452, SWITCHTEC_GEN4), //PSXA 52XG4
+ SWITCHTEC_PCI_DEVICE(0x4436, SWITCHTEC_GEN4), //PSXA 36XG4
+ SWITCHTEC_PCI_DEVICE(0x4428, SWITCHTEC_GEN4), //PSXA 28XG4
+ SWITCHTEC_PCI_DEVICE(0x4552, SWITCHTEC_GEN4), //PAXA 52XG4
+ SWITCHTEC_PCI_DEVICE(0x4536, SWITCHTEC_GEN4), //PAXA 36XG4
+ SWITCHTEC_PCI_DEVICE(0x4528, SWITCHTEC_GEN4), //PAXA 28XG4
{0}
};
MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index d858d25b6cab..d2a7b9fd678b 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -262,8 +262,8 @@ static int pci_frontend_enable_msix(struct pci_dev *dev,
}
i = 0;
- for_each_pci_msi_entry(entry, dev) {
- op.msix_entries[i].entry = entry->msi_attrib.entry_nr;
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_NOTASSOCIATED) {
+ op.msix_entries[i].entry = entry->msi_index;
/* Vector is useless at this point. */
op.msix_entries[i].vector = -1;
i++;
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index d13b8d1a780a..ab53eab635f6 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -209,7 +209,7 @@ config PCMCIA_PXA2XX
tristate "PXA2xx support"
depends on ARM && ARCH_PXA && PCMCIA
depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \
- || MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \
+ || ARCH_PXA_PALM || TRIZEPS_PCMCIA \
|| ARCOM_PCMCIA || ARCH_PXA_ESERIES || MACH_STARGATE2 \
|| MACH_VPAC270 || MACH_BALLOON3 || MACH_COLIBRI \
|| MACH_COLIBRI320 || MACH_H4700)
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index d82c07c4806b..2d5657cfc49c 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -48,10 +48,8 @@ sa1100_cs-$(CONFIG_SA1100_H3100) += sa1100_h3600.o
sa1100_cs-$(CONFIG_SA1100_H3600) += sa1100_h3600.o
sa1100_cs-$(CONFIG_SA1100_SIMPAD) += sa1100_simpad.o
-pxa2xx_cm_x2xx_cs-y += pxa2xx_cm_x2xx.o pxa2xx_cm_x255.o pxa2xx_cm_x270.o
pxa2xx-obj-$(CONFIG_MACH_MAINSTONE) += pxa2xx_mainstone.o
pxa2xx-obj-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o
-pxa2xx-obj-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x2xx_cs.o
pxa2xx-obj-$(CONFIG_ARCOM_PCMCIA) += pxa2xx_viper.o
pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o
pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index 6b1edfc890a3..92df2c2c5d07 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
+#include <linux/pci.h>
#include <linux/regmap.h>
#include <pcmcia/ss.h>
@@ -230,6 +231,7 @@ static int at91_cf_probe(struct platform_device *pdev)
struct at91_cf_socket *cf;
struct at91_cf_data *board;
struct resource *io;
+ struct resource realio;
int status;
board = devm_kzalloc(&pdev->dev, sizeof(*board), GFP_KERNEL);
@@ -307,7 +309,9 @@ static int at91_cf_probe(struct platform_device *pdev)
* io_offset is set to 0x10000 to avoid the check in static_find_io().
* */
cf->socket.io_offset = 0x10000;
- status = pci_ioremap_io(0x10000, cf->phys_baseaddr + CF_IO_PHYS);
+ realio.start = cf->socket.io_offset;
+ realio.end = realio.start + SZ_64K - 1;
+ status = pci_remap_iospace(&realio, cf->phys_baseaddr + CF_IO_PHYS);
if (status)
goto fail0a;
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index e211e2619680..f70197154a36 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -666,18 +666,16 @@ static int pccardd(void *__skt)
if (events || sysfs_events)
continue;
+ set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop())
break;
- set_current_state(TASK_INTERRUPTIBLE);
-
schedule();
- /* make sure we are running */
- __set_current_state(TASK_RUNNING);
-
try_to_freeze();
}
+ /* make sure we are running before we exit */
+ __set_current_state(TASK_RUNNING);
/* shut down socket, if a device is still present */
if (skt->state & SOCKET_PRESENT) {
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 5bd1b80424e7..ace133b9f7d4 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -998,7 +998,7 @@ static int runtime_resume(struct device *dev)
static ssize_t field##_show (struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct pcmcia_device *p_dev = to_pcmcia_dev(dev); \
- return p_dev->test ? sprintf(buf, format, p_dev->field) : -ENODEV; \
+ return p_dev->test ? sysfs_emit(buf, format, p_dev->field) : -ENODEV; \
} \
static DEVICE_ATTR_RO(field);
@@ -1006,7 +1006,7 @@ static DEVICE_ATTR_RO(field);
static ssize_t name##_show (struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct pcmcia_device *p_dev = to_pcmcia_dev(dev); \
- return p_dev->field ? sprintf(buf, "%s\n", p_dev->field) : -ENODEV; \
+ return p_dev->field ? sysfs_emit(buf, "%s\n", p_dev->field) : -ENODEV; \
} \
static DEVICE_ATTR_RO(name);
@@ -1022,7 +1022,7 @@ static ssize_t function_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
- return p_dev->socket ? sprintf(buf, "0x%02x\n", p_dev->func) : -ENODEV;
+ return p_dev->socket ? sysfs_emit(buf, "0x%02x\n", p_dev->func) : -ENODEV;
}
static DEVICE_ATTR_RO(function);
@@ -1030,13 +1030,12 @@ static ssize_t resources_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
- char *str = buf;
- int i;
+ int i, at = 0;
for (i = 0; i < PCMCIA_NUM_RESOURCES; i++)
- str += sprintf(str, "%pr\n", p_dev->resource[i]);
+ at += sysfs_emit_at(buf, at, "%pr\n", p_dev->resource[i]);
- return str - buf;
+ return at;
}
static DEVICE_ATTR_RO(resources);
@@ -1045,9 +1044,9 @@ static ssize_t pm_state_show(struct device *dev, struct device_attribute *attr,
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
if (p_dev->suspended)
- return sprintf(buf, "off\n");
+ return sysfs_emit(buf, "off\n");
else
- return sprintf(buf, "on\n");
+ return sysfs_emit(buf, "on\n");
}
static ssize_t pm_state_store(struct device *dev, struct device_attribute *attr,
@@ -1081,8 +1080,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
hash[i] = crc32(0, p_dev->prod_id[i],
strlen(p_dev->prod_id[i]));
}
- return sprintf(buf, "pcmcia:m%04Xc%04Xf%02Xfn%02Xpfn%02X"
- "pa%08Xpb%08Xpc%08Xpd%08X\n",
+ return sysfs_emit(buf, "pcmcia:m%04Xc%04Xf%02Xfn%02Xpfn%02Xpa%08Xpb%08Xpc%08Xpd%08X\n",
p_dev->has_manf_id ? p_dev->manf_id : 0,
p_dev->has_card_id ? p_dev->card_id : 0,
p_dev->has_func_id ? p_dev->func_id : 0,
diff --git a/drivers/pcmcia/pcmcia_cis.c b/drivers/pcmcia/pcmcia_cis.c
index f650e19a315c..6bc0bc24d357 100644
--- a/drivers/pcmcia/pcmcia_cis.c
+++ b/drivers/pcmcia/pcmcia_cis.c
@@ -386,7 +386,7 @@ size_t pcmcia_get_tuple(struct pcmcia_device *p_dev, cisdata_t code,
}
EXPORT_SYMBOL(pcmcia_get_tuple);
-
+#ifdef CONFIG_NET
/*
* pcmcia_do_get_mac() - internal helper for pcmcia_get_mac_from_cis()
*
@@ -431,3 +431,4 @@ int pcmcia_get_mac_from_cis(struct pcmcia_device *p_dev, struct net_device *dev)
}
EXPORT_SYMBOL(pcmcia_get_mac_from_cis);
+#endif /* CONFIG_NET */
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index c1c197292111..d78091e79a0f 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -390,10 +390,9 @@ int pcmcia_release_configuration(struct pcmcia_device *p_dev)
* "stale", we don't bother checking the port ranges against the
* current socket values.
*/
-static int pcmcia_release_io(struct pcmcia_device *p_dev)
+static void pcmcia_release_io(struct pcmcia_device *p_dev)
{
struct pcmcia_socket *s = p_dev->socket;
- int ret = -EINVAL;
config_t *c;
mutex_lock(&s->ops_mutex);
@@ -412,8 +411,6 @@ static int pcmcia_release_io(struct pcmcia_device *p_dev)
out:
mutex_unlock(&s->ops_mutex);
-
- return ret;
} /* pcmcia_release_io */
diff --git a/drivers/pcmcia/pxa2xx_cm_x255.c b/drivers/pcmcia/pxa2xx_cm_x255.c
deleted file mode 100644
index c0b6b846fbaa..000000000000
--- a/drivers/pcmcia/pxa2xx_cm_x255.c
+++ /dev/null
@@ -1,124 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/pcmcia/pxa/pxa_cm_x255.c
- *
- * Compulab Ltd., 2003, 2007, 2008
- * Mike Rapoport <mike@compulab.co.il>
- */
-
-#include <linux/platform_device.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/export.h>
-
-#include "soc_common.h"
-
-#define GPIO_PCMCIA_SKTSEL (54)
-#define GPIO_PCMCIA_S0_CD_VALID (16)
-#define GPIO_PCMCIA_S1_CD_VALID (17)
-#define GPIO_PCMCIA_S0_RDYINT (6)
-#define GPIO_PCMCIA_S1_RDYINT (8)
-#define GPIO_PCMCIA_RESET (9)
-
-static int cmx255_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
-{
- int ret = gpio_request(GPIO_PCMCIA_RESET, "PCCard reset");
- if (ret)
- return ret;
- gpio_direction_output(GPIO_PCMCIA_RESET, 0);
-
- if (skt->nr == 0) {
- skt->stat[SOC_STAT_CD].gpio = GPIO_PCMCIA_S0_CD_VALID;
- skt->stat[SOC_STAT_CD].name = "PCMCIA0 CD";
- skt->stat[SOC_STAT_RDY].gpio = GPIO_PCMCIA_S0_RDYINT;
- skt->stat[SOC_STAT_RDY].name = "PCMCIA0 RDY";
- } else {
- skt->stat[SOC_STAT_CD].gpio = GPIO_PCMCIA_S1_CD_VALID;
- skt->stat[SOC_STAT_CD].name = "PCMCIA1 CD";
- skt->stat[SOC_STAT_RDY].gpio = GPIO_PCMCIA_S1_RDYINT;
- skt->stat[SOC_STAT_RDY].name = "PCMCIA1 RDY";
- }
-
- return 0;
-}
-
-static void cmx255_pcmcia_shutdown(struct soc_pcmcia_socket *skt)
-{
- gpio_free(GPIO_PCMCIA_RESET);
-}
-
-
-static void cmx255_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
- struct pcmcia_state *state)
-{
- state->vs_3v = 0;
- state->vs_Xv = 0;
-}
-
-
-static int cmx255_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
- const socket_state_t *state)
-{
- switch (skt->nr) {
- case 0:
- if (state->flags & SS_RESET) {
- gpio_set_value(GPIO_PCMCIA_SKTSEL, 0);
- udelay(1);
- gpio_set_value(GPIO_PCMCIA_RESET, 1);
- udelay(10);
- gpio_set_value(GPIO_PCMCIA_RESET, 0);
- }
- break;
- case 1:
- if (state->flags & SS_RESET) {
- gpio_set_value(GPIO_PCMCIA_SKTSEL, 1);
- udelay(1);
- gpio_set_value(GPIO_PCMCIA_RESET, 1);
- udelay(10);
- gpio_set_value(GPIO_PCMCIA_RESET, 0);
- }
- break;
- }
-
- return 0;
-}
-
-static struct pcmcia_low_level cmx255_pcmcia_ops __initdata = {
- .owner = THIS_MODULE,
- .hw_init = cmx255_pcmcia_hw_init,
- .hw_shutdown = cmx255_pcmcia_shutdown,
- .socket_state = cmx255_pcmcia_socket_state,
- .configure_socket = cmx255_pcmcia_configure_socket,
- .nr = 1,
-};
-
-static struct platform_device *cmx255_pcmcia_device;
-
-int __init cmx255_pcmcia_init(void)
-{
- int ret;
-
- cmx255_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
-
- if (!cmx255_pcmcia_device)
- return -ENOMEM;
-
- ret = platform_device_add_data(cmx255_pcmcia_device, &cmx255_pcmcia_ops,
- sizeof(cmx255_pcmcia_ops));
-
- if (ret == 0) {
- printk(KERN_INFO "Registering cm-x255 PCMCIA interface.\n");
- ret = platform_device_add(cmx255_pcmcia_device);
- }
-
- if (ret)
- platform_device_put(cmx255_pcmcia_device);
-
- return ret;
-}
-
-void __exit cmx255_pcmcia_exit(void)
-{
- platform_device_unregister(cmx255_pcmcia_device);
-}
diff --git a/drivers/pcmcia/pxa2xx_cm_x270.c b/drivers/pcmcia/pxa2xx_cm_x270.c
deleted file mode 100644
index 36e35da5f887..000000000000
--- a/drivers/pcmcia/pxa2xx_cm_x270.c
+++ /dev/null
@@ -1,103 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/pcmcia/pxa/pxa_cm_x270.c
- *
- * Compulab Ltd., 2003, 2007, 2008
- * Mike Rapoport <mike@compulab.co.il>
- */
-
-#include <linux/platform_device.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/export.h>
-
-#include "soc_common.h"
-
-#define GPIO_PCMCIA_S0_CD_VALID (84)
-#define GPIO_PCMCIA_S0_RDYINT (82)
-#define GPIO_PCMCIA_RESET (53)
-
-static int cmx270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
-{
- int ret = gpio_request(GPIO_PCMCIA_RESET, "PCCard reset");
- if (ret)
- return ret;
- gpio_direction_output(GPIO_PCMCIA_RESET, 0);
-
- skt->stat[SOC_STAT_CD].gpio = GPIO_PCMCIA_S0_CD_VALID;
- skt->stat[SOC_STAT_CD].name = "PCMCIA0 CD";
- skt->stat[SOC_STAT_RDY].gpio = GPIO_PCMCIA_S0_RDYINT;
- skt->stat[SOC_STAT_RDY].name = "PCMCIA0 RDY";
-
- return ret;
-}
-
-static void cmx270_pcmcia_shutdown(struct soc_pcmcia_socket *skt)
-{
- gpio_free(GPIO_PCMCIA_RESET);
-}
-
-
-static void cmx270_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
- struct pcmcia_state *state)
-{
- state->vs_3v = 0;
- state->vs_Xv = 0;
-}
-
-
-static int cmx270_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
- const socket_state_t *state)
-{
- switch (skt->nr) {
- case 0:
- if (state->flags & SS_RESET) {
- gpio_set_value(GPIO_PCMCIA_RESET, 1);
- udelay(10);
- gpio_set_value(GPIO_PCMCIA_RESET, 0);
- }
- break;
- }
-
- return 0;
-}
-
-static struct pcmcia_low_level cmx270_pcmcia_ops __initdata = {
- .owner = THIS_MODULE,
- .hw_init = cmx270_pcmcia_hw_init,
- .hw_shutdown = cmx270_pcmcia_shutdown,
- .socket_state = cmx270_pcmcia_socket_state,
- .configure_socket = cmx270_pcmcia_configure_socket,
- .nr = 1,
-};
-
-static struct platform_device *cmx270_pcmcia_device;
-
-int __init cmx270_pcmcia_init(void)
-{
- int ret;
-
- cmx270_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
-
- if (!cmx270_pcmcia_device)
- return -ENOMEM;
-
- ret = platform_device_add_data(cmx270_pcmcia_device, &cmx270_pcmcia_ops,
- sizeof(cmx270_pcmcia_ops));
-
- if (ret == 0) {
- printk(KERN_INFO "Registering cm-x270 PCMCIA interface.\n");
- ret = platform_device_add(cmx270_pcmcia_device);
- }
-
- if (ret)
- platform_device_put(cmx270_pcmcia_device);
-
- return ret;
-}
-
-void __exit cmx270_pcmcia_exit(void)
-{
- platform_device_unregister(cmx270_pcmcia_device);
-}
diff --git a/drivers/pcmcia/pxa2xx_cm_x2xx.c b/drivers/pcmcia/pxa2xx_cm_x2xx.c
deleted file mode 100644
index 14eae238131d..000000000000
--- a/drivers/pcmcia/pxa2xx_cm_x2xx.c
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/pcmcia/pxa/pxa_cm_x2xx.c
- *
- * Compulab Ltd., 2003, 2007, 2008
- * Mike Rapoport <mike@compulab.co.il>
- */
-
-#include <linux/module.h>
-
-#include <asm/mach-types.h>
-#include <mach/hardware.h>
-
-int cmx255_pcmcia_init(void);
-int cmx270_pcmcia_init(void);
-void cmx255_pcmcia_exit(void);
-void cmx270_pcmcia_exit(void);
-
-static int __init cmx2xx_pcmcia_init(void)
-{
- int ret = -ENODEV;
-
- if (machine_is_armcore() && cpu_is_pxa25x())
- ret = cmx255_pcmcia_init();
- else if (machine_is_armcore() && cpu_is_pxa27x())
- ret = cmx270_pcmcia_init();
-
- return ret;
-}
-
-static void __exit cmx2xx_pcmcia_exit(void)
-{
- if (machine_is_armcore() && cpu_is_pxa25x())
- cmx255_pcmcia_exit();
- else if (machine_is_armcore() && cpu_is_pxa27x())
- cmx270_pcmcia_exit();
-}
-
-module_init(cmx2xx_pcmcia_init);
-module_exit(cmx2xx_pcmcia_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
-MODULE_DESCRIPTION("CM-x2xx PCMCIA driver");
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index bb15a8bdbaab..6b6c578b5f92 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -690,6 +690,9 @@ static struct resource *__nonstatic_find_io_region(struct pcmcia_socket *s,
unsigned long min = base;
int ret;
+ if (!res)
+ return NULL;
+
data.mask = align - 1;
data.offset = base & data.mask;
data.map = &s_data->io_db;
@@ -809,6 +812,9 @@ static struct resource *nonstatic_find_mem_region(u_long base, u_long num,
unsigned long min, max;
int ret, i, j;
+ if (!res)
+ return NULL;
+
low = low || !(s->features & SS_CAP_PAGE_REGS);
data.mask = align - 1;
@@ -1076,7 +1082,7 @@ static ssize_t show_io_db(struct device *dev,
for (p = data->io_db.next; p != &data->io_db; p = p->next) {
if (ret > (PAGE_SIZE - 10))
continue;
- ret += scnprintf(&buf[ret], (PAGE_SIZE - ret - 1),
+ ret += sysfs_emit_at(buf, ret,
"0x%08lx - 0x%08lx\n",
((unsigned long) p->base),
((unsigned long) p->base + p->num - 1));
@@ -1133,7 +1139,7 @@ static ssize_t show_mem_db(struct device *dev,
p = p->next) {
if (ret > (PAGE_SIZE - 10))
continue;
- ret += scnprintf(&buf[ret], (PAGE_SIZE - ret - 1),
+ ret += sysfs_emit_at(buf, ret,
"0x%08lx - 0x%08lx\n",
((unsigned long) p->base),
((unsigned long) p->base + p->num - 1));
@@ -1142,7 +1148,7 @@ static ssize_t show_mem_db(struct device *dev,
for (p = data->mem_db.next; p != &data->mem_db; p = p->next) {
if (ret > (PAGE_SIZE - 10))
continue;
- ret += scnprintf(&buf[ret], (PAGE_SIZE - ret - 1),
+ ret += sysfs_emit_at(buf, ret,
"0x%08lx - 0x%08lx\n",
((unsigned long) p->base),
((unsigned long) p->base + p->num - 1));
diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c
index d1b220a1e1ab..c7a906664c36 100644
--- a/drivers/pcmcia/socket_sysfs.c
+++ b/drivers/pcmcia/socket_sysfs.c
@@ -38,8 +38,8 @@ static ssize_t pccard_show_type(struct device *dev, struct device_attribute *att
if (!(s->state & SOCKET_PRESENT))
return -ENODEV;
if (s->state & SOCKET_CARDBUS)
- return sprintf(buf, "32-bit\n");
- return sprintf(buf, "16-bit\n");
+ return sysfs_emit(buf, "32-bit\n");
+ return sysfs_emit(buf, "16-bit\n");
}
static DEVICE_ATTR(card_type, 0444, pccard_show_type, NULL);
@@ -51,9 +51,9 @@ static ssize_t pccard_show_voltage(struct device *dev, struct device_attribute *
if (!(s->state & SOCKET_PRESENT))
return -ENODEV;
if (s->socket.Vcc)
- return sprintf(buf, "%d.%dV\n", s->socket.Vcc / 10,
+ return sysfs_emit(buf, "%d.%dV\n", s->socket.Vcc / 10,
s->socket.Vcc % 10);
- return sprintf(buf, "X.XV\n");
+ return sysfs_emit(buf, "X.XV\n");
}
static DEVICE_ATTR(card_voltage, 0444, pccard_show_voltage, NULL);
@@ -63,7 +63,7 @@ static ssize_t pccard_show_vpp(struct device *dev, struct device_attribute *attr
struct pcmcia_socket *s = to_socket(dev);
if (!(s->state & SOCKET_PRESENT))
return -ENODEV;
- return sprintf(buf, "%d.%dV\n", s->socket.Vpp / 10, s->socket.Vpp % 10);
+ return sysfs_emit(buf, "%d.%dV\n", s->socket.Vpp / 10, s->socket.Vpp % 10);
}
static DEVICE_ATTR(card_vpp, 0444, pccard_show_vpp, NULL);
@@ -73,7 +73,7 @@ static ssize_t pccard_show_vcc(struct device *dev, struct device_attribute *attr
struct pcmcia_socket *s = to_socket(dev);
if (!(s->state & SOCKET_PRESENT))
return -ENODEV;
- return sprintf(buf, "%d.%dV\n", s->socket.Vcc / 10, s->socket.Vcc % 10);
+ return sysfs_emit(buf, "%d.%dV\n", s->socket.Vcc / 10, s->socket.Vcc % 10);
}
static DEVICE_ATTR(card_vcc, 0444, pccard_show_vcc, NULL);
@@ -98,7 +98,7 @@ static ssize_t pccard_show_card_pm_state(struct device *dev,
char *buf)
{
struct pcmcia_socket *s = to_socket(dev);
- return sprintf(buf, "%s\n", s->state & SOCKET_SUSPEND ? "off" : "on");
+ return sysfs_emit(buf, "%s\n", s->state & SOCKET_SUSPEND ? "off" : "on");
}
static ssize_t pccard_store_card_pm_state(struct device *dev,
@@ -145,7 +145,7 @@ static ssize_t pccard_show_irq_mask(struct device *dev,
char *buf)
{
struct pcmcia_socket *s = to_socket(dev);
- return sprintf(buf, "0x%04x\n", s->irq_mask);
+ return sysfs_emit(buf, "0x%04x\n", s->irq_mask);
}
static ssize_t pccard_store_irq_mask(struct device *dev,
@@ -177,7 +177,7 @@ static ssize_t pccard_show_resource(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pcmcia_socket *s = to_socket(dev);
- return sprintf(buf, "%s\n", s->resource_setup_done ? "yes" : "no");
+ return sysfs_emit(buf, "%s\n", s->resource_setup_done ? "yes" : "no");
}
static ssize_t pccard_store_resource(struct device *dev,
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 84bfc0e85d6b..3966a6ceb1ac 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -144,6 +144,7 @@ static inline u8 exca_readb(struct yenta_socket *socket, unsigned reg)
return val;
}
+/*
static inline u8 exca_readw(struct yenta_socket *socket, unsigned reg)
{
u16 val;
@@ -152,6 +153,7 @@ static inline u8 exca_readw(struct yenta_socket *socket, unsigned reg)
debug("%04x %04x\n", socket, reg, val);
return val;
}
+*/
static inline void exca_writeb(struct yenta_socket *socket, unsigned reg, u8 val)
{
@@ -176,16 +178,16 @@ static ssize_t show_yenta_registers(struct device *yentadev, struct device_attri
struct yenta_socket *socket = dev_get_drvdata(yentadev);
int offset = 0, i;
- offset = snprintf(buf, PAGE_SIZE, "CB registers:");
+ offset = sysfs_emit(buf, "CB registers:");
for (i = 0; i < 0x24; i += 4) {
unsigned val;
if (!(i & 15))
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "\n%02x:", i);
+ offset += sysfs_emit_at(buf, offset, "\n%02x:", i);
val = cb_readl(socket, i);
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, " %08x", val);
+ offset += sysfs_emit_at(buf, offset, " %08x", val);
}
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "\n\nExCA registers:");
+ offset += sysfs_emit_at(buf, offset, "\n\nExCA registers:");
for (i = 0; i < 0x45; i++) {
unsigned char val;
if (!(i & 7)) {
@@ -193,12 +195,12 @@ static ssize_t show_yenta_registers(struct device *yentadev, struct device_attri
memcpy(buf + offset, " -", 2);
offset += 2;
} else
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, "\n%02x:", i);
+ offset += sysfs_emit_at(buf, offset, "\n%02x:", i);
}
val = exca_readb(socket, i);
- offset += scnprintf(buf + offset, PAGE_SIZE - offset, " %02x", val);
+ offset += sysfs_emit_at(buf, offset, " %02x", val);
}
- buf[offset++] = '\n';
+ sysfs_emit_at(buf, offset, "\n");
return offset;
}
@@ -1297,7 +1299,7 @@ static int yenta_probe(struct pci_dev *dev, const struct pci_device_id *id)
return ret;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int yenta_dev_suspend_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -1342,12 +1344,7 @@ static int yenta_dev_resume_noirq(struct device *dev)
}
static const struct dev_pm_ops yenta_pm_ops = {
- .suspend_noirq = yenta_dev_suspend_noirq,
- .resume_noirq = yenta_dev_resume_noirq,
- .freeze_noirq = yenta_dev_suspend_noirq,
- .thaw_noirq = yenta_dev_resume_noirq,
- .poweroff_noirq = yenta_dev_suspend_noirq,
- .restore_noirq = yenta_dev_resume_noirq,
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(yenta_dev_suspend_noirq, yenta_dev_resume_noirq)
};
#define YENTA_PM_OPS (&yenta_pm_ops)
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 4374af292e6d..e1a0c44bc686 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -43,7 +43,7 @@ config ARM_CCN
config ARM_CMN
tristate "Arm CMN-600 PMU support"
- depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on ARM64 || COMPILE_TEST
help
Support for PMU events monitoring on the Arm CMN-600 Coherent Mesh
Network interconnect.
@@ -139,6 +139,13 @@ config ARM_DMC620_PMU
Support for PMU events monitoring on the ARM DMC-620 memory
controller.
+config MARVELL_CN10K_TAD_PMU
+ tristate "Marvell CN10K LLC-TAD PMU"
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
+ help
+ Provides support for Last-Level cache Tag-and-data Units (LLC-TAD)
+ performance monitors on CN10K family silicons.
+
source "drivers/perf/hisilicon/Kconfig"
endmenu
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 5260b116c7da..2db5418d5b0a 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o
obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o
obj-$(CONFIG_ARM_DMC620_PMU) += arm_dmc620_pmu.o
+obj-$(CONFIG_MARVELL_CN10K_TAD_PMU) += marvell_cn10k_tad_pmu.o
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index bc3cba5f8c5d..0e48adce57ef 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -5,8 +5,10 @@
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/debugfs.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -23,7 +25,10 @@
#define CMN_NI_LOGICAL_ID GENMASK_ULL(47, 32)
#define CMN_NODEID_DEVID(reg) ((reg) & 3)
+#define CMN_NODEID_EXT_DEVID(reg) ((reg) & 1)
#define CMN_NODEID_PID(reg) (((reg) >> 2) & 1)
+#define CMN_NODEID_EXT_PID(reg) (((reg) >> 1) & 3)
+#define CMN_NODEID_1x1_PID(reg) (((reg) >> 2) & 7)
#define CMN_NODEID_X(reg, bits) ((reg) >> (3 + (bits)))
#define CMN_NODEID_Y(reg, bits) (((reg) >> 3) & ((1U << (bits)) - 1))
@@ -34,20 +39,28 @@
#define CMN_CHILD_NODE_ADDR GENMASK(27, 0)
#define CMN_CHILD_NODE_EXTERNAL BIT(31)
-#define CMN_ADDR_NODE_PTR GENMASK(27, 14)
+#define CMN_MAX_DIMENSION 8
+#define CMN_MAX_XPS (CMN_MAX_DIMENSION * CMN_MAX_DIMENSION)
+#define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4)
-#define CMN_NODE_PTR_DEVID(ptr) (((ptr) >> 2) & 3)
-#define CMN_NODE_PTR_PID(ptr) ((ptr) & 1)
-#define CMN_NODE_PTR_X(ptr, bits) ((ptr) >> (6 + (bits)))
-#define CMN_NODE_PTR_Y(ptr, bits) (((ptr) >> 6) & ((1U << (bits)) - 1))
-
-#define CMN_MAX_XPS (8 * 8)
-
-/* The CFG node has one other useful purpose */
+/* The CFG node has various info besides the discovery tree */
#define CMN_CFGM_PERIPH_ID_2 0x0010
#define CMN_CFGM_PID2_REVISION GENMASK(7, 4)
-/* PMU registers occupy the 3rd 4KB page of each node's 16KB space */
+#define CMN_CFGM_INFO_GLOBAL 0x900
+#define CMN_INFO_MULTIPLE_DTM_EN BIT_ULL(63)
+#define CMN_INFO_RSP_VC_NUM GENMASK_ULL(53, 52)
+#define CMN_INFO_DAT_VC_NUM GENMASK_ULL(51, 50)
+
+/* XPs also have some local topology info which has uses too */
+#define CMN_MXP__CONNECT_INFO_P0 0x0008
+#define CMN_MXP__CONNECT_INFO_P1 0x0010
+#define CMN_MXP__CONNECT_INFO_P2 0x0028
+#define CMN_MXP__CONNECT_INFO_P3 0x0030
+#define CMN_MXP__CONNECT_INFO_P4 0x0038
+#define CMN_MXP__CONNECT_INFO_P5 0x0040
+
+/* PMU registers occupy the 3rd 4KB page of each node's region */
#define CMN_PMU_OFFSET 0x2000
/* For most nodes, this is all there is */
@@ -57,6 +70,7 @@
/* DTMs live in the PMU space of XP registers */
#define CMN_DTM_WPn(n) (0x1A0 + (n) * 0x18)
#define CMN_DTM_WPn_CONFIG(n) (CMN_DTM_WPn(n) + 0x00)
+#define CMN_DTM_WPn_CONFIG_WP_DEV_SEL2 GENMASK_ULL(18,17)
#define CMN_DTM_WPn_CONFIG_WP_COMBINE BIT(6)
#define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE BIT(5)
#define CMN_DTM_WPn_CONFIG_WP_GRP BIT(4)
@@ -81,7 +95,11 @@
#define CMN_DTM_PMEVCNTSR 0x240
+#define CMN_DTM_UNIT_INFO 0x0910
+
#define CMN_DTM_NUM_COUNTERS 4
+/* Want more local counters? Why not replicate the whole DTM! Ugh... */
+#define CMN_DTM_OFFSET(n) ((n) * 0x200)
/* The DTC node is where the magic happens */
#define CMN_DT_DTC_CTL 0x0a00
@@ -122,11 +140,11 @@
/* Event attributes */
-#define CMN_CONFIG_TYPE GENMASK(15, 0)
-#define CMN_CONFIG_EVENTID GENMASK(23, 16)
-#define CMN_CONFIG_OCCUPID GENMASK(27, 24)
-#define CMN_CONFIG_BYNODEID BIT(31)
-#define CMN_CONFIG_NODEID GENMASK(47, 32)
+#define CMN_CONFIG_TYPE GENMASK_ULL(15, 0)
+#define CMN_CONFIG_EVENTID GENMASK_ULL(23, 16)
+#define CMN_CONFIG_OCCUPID GENMASK_ULL(27, 24)
+#define CMN_CONFIG_BYNODEID BIT_ULL(31)
+#define CMN_CONFIG_NODEID GENMASK_ULL(47, 32)
#define CMN_EVENT_TYPE(event) FIELD_GET(CMN_CONFIG_TYPE, (event)->attr.config)
#define CMN_EVENT_EVENTID(event) FIELD_GET(CMN_CONFIG_EVENTID, (event)->attr.config)
@@ -134,13 +152,13 @@
#define CMN_EVENT_BYNODEID(event) FIELD_GET(CMN_CONFIG_BYNODEID, (event)->attr.config)
#define CMN_EVENT_NODEID(event) FIELD_GET(CMN_CONFIG_NODEID, (event)->attr.config)
-#define CMN_CONFIG_WP_COMBINE GENMASK(27, 24)
-#define CMN_CONFIG_WP_DEV_SEL BIT(48)
-#define CMN_CONFIG_WP_CHN_SEL GENMASK(50, 49)
-#define CMN_CONFIG_WP_GRP BIT(52)
-#define CMN_CONFIG_WP_EXCLUSIVE BIT(53)
-#define CMN_CONFIG1_WP_VAL GENMASK(63, 0)
-#define CMN_CONFIG2_WP_MASK GENMASK(63, 0)
+#define CMN_CONFIG_WP_COMBINE GENMASK_ULL(27, 24)
+#define CMN_CONFIG_WP_DEV_SEL GENMASK_ULL(50, 48)
+#define CMN_CONFIG_WP_CHN_SEL GENMASK_ULL(55, 51)
+#define CMN_CONFIG_WP_GRP BIT_ULL(56)
+#define CMN_CONFIG_WP_EXCLUSIVE BIT_ULL(57)
+#define CMN_CONFIG1_WP_VAL GENMASK_ULL(63, 0)
+#define CMN_CONFIG2_WP_MASK GENMASK_ULL(63, 0)
#define CMN_EVENT_WP_COMBINE(event) FIELD_GET(CMN_CONFIG_WP_COMBINE, (event)->attr.config)
#define CMN_EVENT_WP_DEV_SEL(event) FIELD_GET(CMN_CONFIG_WP_DEV_SEL, (event)->attr.config)
@@ -155,7 +173,13 @@
#define CMN_WP_DOWN 2
-/* r0px probably don't exist in silicon, thankfully */
+enum cmn_model {
+ CMN_ANY = -1,
+ CMN600 = 1,
+ CI700 = 2,
+};
+
+/* CMN-600 r0px shouldn't exist in silicon, thankfully */
enum cmn_revision {
CMN600_R1P0,
CMN600_R1P1,
@@ -163,6 +187,10 @@ enum cmn_revision {
CMN600_R1P3,
CMN600_R2P0,
CMN600_R3P0,
+ CMN600_R3P1,
+ CI700_R0P0 = 0,
+ CI700_R1P0,
+ CI700_R2P0,
};
enum cmn_node_type {
@@ -174,9 +202,12 @@ enum cmn_node_type {
CMN_TYPE_HNF,
CMN_TYPE_XP,
CMN_TYPE_SBSX,
- CMN_TYPE_RNI = 0xa,
+ CMN_TYPE_MPAM_S,
+ CMN_TYPE_MPAM_NS,
+ CMN_TYPE_RNI,
CMN_TYPE_RND = 0xd,
CMN_TYPE_RNSAM = 0xf,
+ CMN_TYPE_MTSX,
CMN_TYPE_CXRA = 0x100,
CMN_TYPE_CXHA = 0x101,
CMN_TYPE_CXLA = 0x102,
@@ -189,32 +220,32 @@ struct arm_cmn_node {
u16 id, logid;
enum cmn_node_type type;
+ int dtm;
union {
- /* Device node */
+ /* DN/HN-F/CXHA */
struct {
- int to_xp;
- /* DN/HN-F/CXHA */
- unsigned int occupid_val;
- unsigned int occupid_count;
+ u8 occupid_val;
+ u8 occupid_count;
};
/* XP */
- struct {
- int dtc;
- u32 pmu_config_low;
- union {
- u8 input_sel[4];
- __le32 pmu_config_high;
- };
- s8 wp_event[4];
- };
+ u8 dtc;
};
-
union {
u8 event[4];
__le32 event_sel;
};
};
+struct arm_cmn_dtm {
+ void __iomem *base;
+ u32 pmu_config_low;
+ union {
+ u8 input_sel[4];
+ __le32 pmu_config_high;
+ };
+ s8 wp_event[4];
+};
+
struct arm_cmn_dtc {
void __iomem *base;
int irq;
@@ -231,35 +262,238 @@ struct arm_cmn_dtc {
struct arm_cmn {
struct device *dev;
void __iomem *base;
+ unsigned int state;
enum cmn_revision rev;
+ enum cmn_model model;
u8 mesh_x;
u8 mesh_y;
u16 num_xps;
u16 num_dns;
+ bool multi_dtm;
+ u8 ports_used;
+ struct {
+ unsigned int rsp_vc_num : 2;
+ unsigned int dat_vc_num : 2;
+ };
+
struct arm_cmn_node *xps;
struct arm_cmn_node *dns;
+ struct arm_cmn_dtm *dtms;
struct arm_cmn_dtc *dtc;
unsigned int num_dtcs;
int cpu;
struct hlist_node cpuhp_node;
- unsigned int state;
struct pmu pmu;
+ struct dentry *debug;
};
#define to_cmn(p) container_of(p, struct arm_cmn, pmu)
static int arm_cmn_hp_state;
+struct arm_cmn_nodeid {
+ u8 x;
+ u8 y;
+ u8 port;
+ u8 dev;
+};
+
+static int arm_cmn_xyidbits(const struct arm_cmn *cmn)
+{
+ int dim = max(cmn->mesh_x, cmn->mesh_y);
+
+ return dim > 4 ? 3 : 2;
+}
+
+static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn *cmn, u16 id)
+{
+ struct arm_cmn_nodeid nid;
+
+ if (cmn->num_xps == 1) {
+ nid.x = 0;
+ nid.y = 0;
+ nid.port = CMN_NODEID_1x1_PID(id);
+ nid.dev = CMN_NODEID_DEVID(id);
+ } else {
+ int bits = arm_cmn_xyidbits(cmn);
+
+ nid.x = CMN_NODEID_X(id, bits);
+ nid.y = CMN_NODEID_Y(id, bits);
+ if (cmn->ports_used & 0xc) {
+ nid.port = CMN_NODEID_EXT_PID(id);
+ nid.dev = CMN_NODEID_EXT_DEVID(id);
+ } else {
+ nid.port = CMN_NODEID_PID(id);
+ nid.dev = CMN_NODEID_DEVID(id);
+ }
+ }
+ return nid;
+}
+
+static struct arm_cmn_node *arm_cmn_node_to_xp(const struct arm_cmn *cmn,
+ const struct arm_cmn_node *dn)
+{
+ struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
+ int xp_idx = cmn->mesh_x * nid.y + nid.x;
+
+ return cmn->xps + xp_idx;
+}
+static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn,
+ enum cmn_node_type type)
+{
+ struct arm_cmn_node *dn;
+
+ for (dn = cmn->dns; dn->type; dn++)
+ if (dn->type == type)
+ return dn;
+ return NULL;
+}
+
+struct dentry *arm_cmn_debugfs;
+
+#ifdef CONFIG_DEBUG_FS
+static const char *arm_cmn_device_type(u8 type)
+{
+ switch(type) {
+ case 0x01: return " RN-I |";
+ case 0x02: return " RN-D |";
+ case 0x04: return " RN-F_B |";
+ case 0x05: return "RN-F_B_E|";
+ case 0x06: return " RN-F_A |";
+ case 0x07: return "RN-F_A_E|";
+ case 0x08: return " HN-T |";
+ case 0x09: return " HN-I |";
+ case 0x0a: return " HN-D |";
+ case 0x0c: return " SN-F |";
+ case 0x0d: return " SBSX |";
+ case 0x0e: return " HN-F |";
+ case 0x0f: return " SN-F_E |";
+ case 0x10: return " SN-F_D |";
+ case 0x11: return " CXHA |";
+ case 0x12: return " CXRA |";
+ case 0x13: return " CXRH |";
+ case 0x14: return " RN-F_D |";
+ case 0x15: return "RN-F_D_E|";
+ case 0x16: return " RN-F_C |";
+ case 0x17: return "RN-F_C_E|";
+ case 0x1c: return " MTSX |";
+ default: return " |";
+ }
+}
+
+static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d)
+{
+ struct arm_cmn *cmn = s->private;
+ struct arm_cmn_node *dn;
+
+ for (dn = cmn->dns; dn->type; dn++) {
+ struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
+
+ if (dn->type == CMN_TYPE_XP)
+ continue;
+ /* Ignore the extra components that will overlap on some ports */
+ if (dn->type < CMN_TYPE_HNI)
+ continue;
+
+ if (nid.x != x || nid.y != y || nid.port != p || nid.dev != d)
+ continue;
+
+ seq_printf(s, " #%-2d |", dn->logid);
+ return;
+ }
+ seq_puts(s, " |");
+}
+
+static int arm_cmn_map_show(struct seq_file *s, void *data)
+{
+ struct arm_cmn *cmn = s->private;
+ int x, y, p, pmax = fls(cmn->ports_used);
+
+ seq_puts(s, " X");
+ for (x = 0; x < cmn->mesh_x; x++)
+ seq_printf(s, " %d ", x);
+ seq_puts(s, "\nY P D+");
+ y = cmn->mesh_y;
+ while (y--) {
+ int xp_base = cmn->mesh_x * y;
+ u8 port[6][CMN_MAX_DIMENSION];
+
+ for (x = 0; x < cmn->mesh_x; x++)
+ seq_puts(s, "--------+");
+
+ seq_printf(s, "\n%d |", y);
+ for (x = 0; x < cmn->mesh_x; x++) {
+ struct arm_cmn_node *xp = cmn->xps + xp_base + x;
+ void __iomem *base = xp->pmu_base - CMN_PMU_OFFSET;
+
+ port[0][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P0);
+ port[1][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P1);
+ port[2][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P2);
+ port[3][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P3);
+ port[4][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P4);
+ port[5][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P5);
+ seq_printf(s, " XP #%-2d |", xp_base + x);
+ }
+
+ seq_puts(s, "\n |");
+ for (x = 0; x < cmn->mesh_x; x++) {
+ u8 dtc = cmn->xps[xp_base + x].dtc;
+
+ if (dtc & (dtc - 1))
+ seq_puts(s, " DTC ?? |");
+ else
+ seq_printf(s, " DTC %ld |", __ffs(dtc));
+ }
+ seq_puts(s, "\n |");
+ for (x = 0; x < cmn->mesh_x; x++)
+ seq_puts(s, "........|");
+
+ for (p = 0; p < pmax; p++) {
+ seq_printf(s, "\n %d |", p);
+ for (x = 0; x < cmn->mesh_x; x++)
+ seq_puts(s, arm_cmn_device_type(port[p][x]));
+ seq_puts(s, "\n 0|");
+ for (x = 0; x < cmn->mesh_x; x++)
+ arm_cmn_show_logid(s, x, y, p, 0);
+ seq_puts(s, "\n 1|");
+ for (x = 0; x < cmn->mesh_x; x++)
+ arm_cmn_show_logid(s, x, y, p, 1);
+ }
+ seq_puts(s, "\n-----+");
+ }
+ for (x = 0; x < cmn->mesh_x; x++)
+ seq_puts(s, "--------+");
+ seq_puts(s, "\n");
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(arm_cmn_map);
+
+static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id)
+{
+ const char *name = "map";
+
+ if (id > 0)
+ name = devm_kasprintf(cmn->dev, GFP_KERNEL, "map_%d", id);
+ if (!name)
+ return;
+
+ cmn->debug = debugfs_create_file(name, 0444, arm_cmn_debugfs, cmn, &arm_cmn_map_fops);
+}
+#else
+static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id) {}
+#endif
+
struct arm_cmn_hw_event {
struct arm_cmn_node *dn;
u64 dtm_idx[2];
unsigned int dtc_idx;
u8 dtcs_used;
u8 num_dns;
+ u8 dtm_offset;
};
#define for_each_hw_dn(hw, dn, i) \
@@ -283,6 +517,7 @@ static unsigned int arm_cmn_get_index(u64 x[], unsigned int pos)
struct arm_cmn_event_attr {
struct device_attribute attr;
+ enum cmn_model model;
enum cmn_node_type type;
u8 eventid;
u8 occupid;
@@ -294,50 +529,22 @@ struct arm_cmn_format_attr {
int config;
};
-static int arm_cmn_xyidbits(const struct arm_cmn *cmn)
-{
- return cmn->mesh_x > 4 || cmn->mesh_y > 4 ? 3 : 2;
-}
-
-static void arm_cmn_init_node_to_xp(const struct arm_cmn *cmn,
- struct arm_cmn_node *dn)
-{
- int bits = arm_cmn_xyidbits(cmn);
- int x = CMN_NODEID_X(dn->id, bits);
- int y = CMN_NODEID_Y(dn->id, bits);
- int xp_idx = cmn->mesh_x * y + x;
-
- dn->to_xp = (cmn->xps + xp_idx) - dn;
-}
-
-static struct arm_cmn_node *arm_cmn_node_to_xp(struct arm_cmn_node *dn)
-{
- return dn->type == CMN_TYPE_XP ? dn : dn + dn->to_xp;
-}
-
-static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn,
- enum cmn_node_type type)
-{
- int i;
-
- for (i = 0; i < cmn->num_dns; i++)
- if (cmn->dns[i].type == type)
- return &cmn->dns[i];
- return NULL;
-}
-
-#define CMN_EVENT_ATTR(_name, _type, _eventid, _occupid) \
+#define CMN_EVENT_ATTR(_model, _name, _type, _eventid, _occupid) \
(&((struct arm_cmn_event_attr[]) {{ \
.attr = __ATTR(_name, 0444, arm_cmn_event_show, NULL), \
+ .model = _model, \
.type = _type, \
.eventid = _eventid, \
.occupid = _occupid, \
}})[0].attr.attr)
-static bool arm_cmn_is_occup_event(enum cmn_node_type type, unsigned int id)
+static bool arm_cmn_is_occup_event(enum cmn_model model,
+ enum cmn_node_type type, unsigned int id)
{
- return (type == CMN_TYPE_DVM && id == 0x05) ||
- (type == CMN_TYPE_HNF && id == 0x0f);
+ if (type == CMN_TYPE_DVM)
+ return (model == CMN600 && id == 0x05) ||
+ (model == CI700 && id == 0x0c);
+ return type == CMN_TYPE_HNF && id == 0x0f;
}
static ssize_t arm_cmn_event_show(struct device *dev,
@@ -355,7 +562,7 @@ static ssize_t arm_cmn_event_show(struct device *dev,
"type=0x%x,eventid=0x%x,wp_dev_sel=?,wp_chn_sel=?,wp_grp=?,wp_val=?,wp_mask=?\n",
eattr->type, eattr->eventid);
- if (arm_cmn_is_occup_event(eattr->type, eattr->eventid))
+ if (arm_cmn_is_occup_event(eattr->model, eattr->type, eattr->eventid))
return sysfs_emit(buf, "type=0x%x,eventid=0x%x,occupid=0x%x\n",
eattr->type, eattr->eventid, eattr->occupid);
@@ -370,60 +577,81 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev));
struct arm_cmn_event_attr *eattr;
- enum cmn_node_type type;
eattr = container_of(attr, typeof(*eattr), attr.attr);
- type = eattr->type;
- /* Watchpoints aren't nodes */
- if (type == CMN_TYPE_WP)
- type = CMN_TYPE_XP;
+ if (!(eattr->model & cmn->model))
+ return 0;
+
+ /* Watchpoints aren't nodes, so avoid confusion */
+ if (eattr->type == CMN_TYPE_WP)
+ return attr->mode;
+
+ /* Hide XP events for unused interfaces/channels */
+ if (eattr->type == CMN_TYPE_XP) {
+ unsigned int intf = (eattr->eventid >> 2) & 7;
+ unsigned int chan = eattr->eventid >> 5;
+
+ if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3)))
+ return 0;
+
+ if ((chan == 5 && cmn->rsp_vc_num < 2) ||
+ (chan == 6 && cmn->dat_vc_num < 2))
+ return 0;
+ }
/* Revision-specific differences */
- if (cmn->rev < CMN600_R1P2) {
- if (type == CMN_TYPE_HNF && eattr->eventid == 0x1b)
+ if (cmn->model == CMN600 && cmn->rev < CMN600_R1P2) {
+ if (eattr->type == CMN_TYPE_HNF && eattr->eventid == 0x1b)
return 0;
}
- if (!arm_cmn_node(cmn, type))
+ if (!arm_cmn_node(cmn, eattr->type))
return 0;
return attr->mode;
}
-#define _CMN_EVENT_DVM(_name, _event, _occup) \
- CMN_EVENT_ATTR(dn_##_name, CMN_TYPE_DVM, _event, _occup)
+#define _CMN_EVENT_DVM(_model, _name, _event, _occup) \
+ CMN_EVENT_ATTR(_model, dn_##_name, CMN_TYPE_DVM, _event, _occup)
#define CMN_EVENT_DTC(_name) \
- CMN_EVENT_ATTR(dtc_##_name, CMN_TYPE_DTC, 0, 0)
-#define _CMN_EVENT_HNF(_name, _event, _occup) \
- CMN_EVENT_ATTR(hnf_##_name, CMN_TYPE_HNF, _event, _occup)
+ CMN_EVENT_ATTR(CMN_ANY, dtc_##_name, CMN_TYPE_DTC, 0, 0)
+#define _CMN_EVENT_HNF(_model, _name, _event, _occup) \
+ CMN_EVENT_ATTR(_model, hnf_##_name, CMN_TYPE_HNF, _event, _occup)
#define CMN_EVENT_HNI(_name, _event) \
- CMN_EVENT_ATTR(hni_##_name, CMN_TYPE_HNI, _event, 0)
+ CMN_EVENT_ATTR(CMN_ANY, hni_##_name, CMN_TYPE_HNI, _event, 0)
#define __CMN_EVENT_XP(_name, _event) \
- CMN_EVENT_ATTR(mxp_##_name, CMN_TYPE_XP, _event, 0)
-#define CMN_EVENT_SBSX(_name, _event) \
- CMN_EVENT_ATTR(sbsx_##_name, CMN_TYPE_SBSX, _event, 0)
-#define CMN_EVENT_RNID(_name, _event) \
- CMN_EVENT_ATTR(rnid_##_name, CMN_TYPE_RNI, _event, 0)
-
-#define CMN_EVENT_DVM(_name, _event) \
- _CMN_EVENT_DVM(_name, _event, 0)
-#define CMN_EVENT_HNF(_name, _event) \
- _CMN_EVENT_HNF(_name, _event, 0)
+ CMN_EVENT_ATTR(CMN_ANY, mxp_##_name, CMN_TYPE_XP, _event, 0)
+#define CMN_EVENT_SBSX(_model, _name, _event) \
+ CMN_EVENT_ATTR(_model, sbsx_##_name, CMN_TYPE_SBSX, _event, 0)
+#define CMN_EVENT_RNID(_model, _name, _event) \
+ CMN_EVENT_ATTR(_model, rnid_##_name, CMN_TYPE_RNI, _event, 0)
+#define CMN_EVENT_MTSX(_name, _event) \
+ CMN_EVENT_ATTR(CMN_ANY, mtsx_##_name, CMN_TYPE_MTSX, _event, 0)
+
+#define CMN_EVENT_DVM(_model, _name, _event) \
+ _CMN_EVENT_DVM(_model, _name, _event, 0)
+#define CMN_EVENT_HNF(_model, _name, _event) \
+ _CMN_EVENT_HNF(_model, _name, _event, 0)
#define _CMN_EVENT_XP(_name, _event) \
__CMN_EVENT_XP(e_##_name, (_event) | (0 << 2)), \
__CMN_EVENT_XP(w_##_name, (_event) | (1 << 2)), \
__CMN_EVENT_XP(n_##_name, (_event) | (2 << 2)), \
__CMN_EVENT_XP(s_##_name, (_event) | (3 << 2)), \
__CMN_EVENT_XP(p0_##_name, (_event) | (4 << 2)), \
- __CMN_EVENT_XP(p1_##_name, (_event) | (5 << 2))
+ __CMN_EVENT_XP(p1_##_name, (_event) | (5 << 2)), \
+ __CMN_EVENT_XP(p2_##_name, (_event) | (6 << 2)), \
+ __CMN_EVENT_XP(p3_##_name, (_event) | (7 << 2))
/* Good thing there are only 3 fundamental XP events... */
#define CMN_EVENT_XP(_name, _event) \
_CMN_EVENT_XP(req_##_name, (_event) | (0 << 5)), \
_CMN_EVENT_XP(rsp_##_name, (_event) | (1 << 5)), \
_CMN_EVENT_XP(snp_##_name, (_event) | (2 << 5)), \
- _CMN_EVENT_XP(dat_##_name, (_event) | (3 << 5))
+ _CMN_EVENT_XP(dat_##_name, (_event) | (3 << 5)), \
+ _CMN_EVENT_XP(pub_##_name, (_event) | (4 << 5)), \
+ _CMN_EVENT_XP(rsp2_##_name, (_event) | (5 << 5)), \
+ _CMN_EVENT_XP(dat2_##_name, (_event) | (6 << 5))
static struct attribute *arm_cmn_event_attrs[] = {
@@ -434,115 +662,152 @@ static struct attribute *arm_cmn_event_attrs[] = {
* slot, but our lazy short-cut of using the DTM counter index for
* the PMU index as well happens to avoid that by construction.
*/
- CMN_EVENT_DVM(rxreq_dvmop, 0x01),
- CMN_EVENT_DVM(rxreq_dvmsync, 0x02),
- CMN_EVENT_DVM(rxreq_dvmop_vmid_filtered, 0x03),
- CMN_EVENT_DVM(rxreq_retried, 0x04),
- _CMN_EVENT_DVM(rxreq_trk_occupancy_all, 0x05, 0),
- _CMN_EVENT_DVM(rxreq_trk_occupancy_dvmop, 0x05, 1),
- _CMN_EVENT_DVM(rxreq_trk_occupancy_dvmsync, 0x05, 2),
-
- CMN_EVENT_HNF(cache_miss, 0x01),
- CMN_EVENT_HNF(slc_sf_cache_access, 0x02),
- CMN_EVENT_HNF(cache_fill, 0x03),
- CMN_EVENT_HNF(pocq_retry, 0x04),
- CMN_EVENT_HNF(pocq_reqs_recvd, 0x05),
- CMN_EVENT_HNF(sf_hit, 0x06),
- CMN_EVENT_HNF(sf_evictions, 0x07),
- CMN_EVENT_HNF(dir_snoops_sent, 0x08),
- CMN_EVENT_HNF(brd_snoops_sent, 0x09),
- CMN_EVENT_HNF(slc_eviction, 0x0a),
- CMN_EVENT_HNF(slc_fill_invalid_way, 0x0b),
- CMN_EVENT_HNF(mc_retries, 0x0c),
- CMN_EVENT_HNF(mc_reqs, 0x0d),
- CMN_EVENT_HNF(qos_hh_retry, 0x0e),
- _CMN_EVENT_HNF(qos_pocq_occupancy_all, 0x0f, 0),
- _CMN_EVENT_HNF(qos_pocq_occupancy_read, 0x0f, 1),
- _CMN_EVENT_HNF(qos_pocq_occupancy_write, 0x0f, 2),
- _CMN_EVENT_HNF(qos_pocq_occupancy_atomic, 0x0f, 3),
- _CMN_EVENT_HNF(qos_pocq_occupancy_stash, 0x0f, 4),
- CMN_EVENT_HNF(pocq_addrhaz, 0x10),
- CMN_EVENT_HNF(pocq_atomic_addrhaz, 0x11),
- CMN_EVENT_HNF(ld_st_swp_adq_full, 0x12),
- CMN_EVENT_HNF(cmp_adq_full, 0x13),
- CMN_EVENT_HNF(txdat_stall, 0x14),
- CMN_EVENT_HNF(txrsp_stall, 0x15),
- CMN_EVENT_HNF(seq_full, 0x16),
- CMN_EVENT_HNF(seq_hit, 0x17),
- CMN_EVENT_HNF(snp_sent, 0x18),
- CMN_EVENT_HNF(sfbi_dir_snp_sent, 0x19),
- CMN_EVENT_HNF(sfbi_brd_snp_sent, 0x1a),
- CMN_EVENT_HNF(snp_sent_untrk, 0x1b),
- CMN_EVENT_HNF(intv_dirty, 0x1c),
- CMN_EVENT_HNF(stash_snp_sent, 0x1d),
- CMN_EVENT_HNF(stash_data_pull, 0x1e),
- CMN_EVENT_HNF(snp_fwded, 0x1f),
-
- CMN_EVENT_HNI(rrt_rd_occ_cnt_ovfl, 0x20),
- CMN_EVENT_HNI(rrt_wr_occ_cnt_ovfl, 0x21),
- CMN_EVENT_HNI(rdt_rd_occ_cnt_ovfl, 0x22),
- CMN_EVENT_HNI(rdt_wr_occ_cnt_ovfl, 0x23),
- CMN_EVENT_HNI(wdb_occ_cnt_ovfl, 0x24),
- CMN_EVENT_HNI(rrt_rd_alloc, 0x25),
- CMN_EVENT_HNI(rrt_wr_alloc, 0x26),
- CMN_EVENT_HNI(rdt_rd_alloc, 0x27),
- CMN_EVENT_HNI(rdt_wr_alloc, 0x28),
- CMN_EVENT_HNI(wdb_alloc, 0x29),
- CMN_EVENT_HNI(txrsp_retryack, 0x2a),
- CMN_EVENT_HNI(arvalid_no_arready, 0x2b),
- CMN_EVENT_HNI(arready_no_arvalid, 0x2c),
- CMN_EVENT_HNI(awvalid_no_awready, 0x2d),
- CMN_EVENT_HNI(awready_no_awvalid, 0x2e),
- CMN_EVENT_HNI(wvalid_no_wready, 0x2f),
- CMN_EVENT_HNI(txdat_stall, 0x30),
- CMN_EVENT_HNI(nonpcie_serialization, 0x31),
- CMN_EVENT_HNI(pcie_serialization, 0x32),
-
- CMN_EVENT_XP(txflit_valid, 0x01),
- CMN_EVENT_XP(txflit_stall, 0x02),
- CMN_EVENT_XP(partial_dat_flit, 0x03),
+ CMN_EVENT_DVM(CMN600, rxreq_dvmop, 0x01),
+ CMN_EVENT_DVM(CMN600, rxreq_dvmsync, 0x02),
+ CMN_EVENT_DVM(CMN600, rxreq_dvmop_vmid_filtered, 0x03),
+ CMN_EVENT_DVM(CMN600, rxreq_retried, 0x04),
+ _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_all, 0x05, 0),
+ _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_dvmop, 0x05, 1),
+ _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_dvmsync, 0x05, 2),
+ CMN_EVENT_DVM(CI700, dvmop_tlbi, 0x01),
+ CMN_EVENT_DVM(CI700, dvmop_bpi, 0x02),
+ CMN_EVENT_DVM(CI700, dvmop_pici, 0x03),
+ CMN_EVENT_DVM(CI700, dvmop_vici, 0x04),
+ CMN_EVENT_DVM(CI700, dvmsync, 0x05),
+ CMN_EVENT_DVM(CI700, vmid_filtered, 0x06),
+ CMN_EVENT_DVM(CI700, rndop_filtered, 0x07),
+ CMN_EVENT_DVM(CI700, retry, 0x08),
+ CMN_EVENT_DVM(CI700, txsnp_flitv, 0x09),
+ CMN_EVENT_DVM(CI700, txsnp_stall, 0x0a),
+ CMN_EVENT_DVM(CI700, trkfull, 0x0b),
+ _CMN_EVENT_DVM(CI700, trk_occupancy_all, 0x0c, 0),
+ _CMN_EVENT_DVM(CI700, trk_occupancy_dvmop, 0x0c, 1),
+ _CMN_EVENT_DVM(CI700, trk_occupancy_dvmsync, 0x0c, 2),
+
+ CMN_EVENT_HNF(CMN_ANY, cache_miss, 0x01),
+ CMN_EVENT_HNF(CMN_ANY, slc_sf_cache_access, 0x02),
+ CMN_EVENT_HNF(CMN_ANY, cache_fill, 0x03),
+ CMN_EVENT_HNF(CMN_ANY, pocq_retry, 0x04),
+ CMN_EVENT_HNF(CMN_ANY, pocq_reqs_recvd, 0x05),
+ CMN_EVENT_HNF(CMN_ANY, sf_hit, 0x06),
+ CMN_EVENT_HNF(CMN_ANY, sf_evictions, 0x07),
+ CMN_EVENT_HNF(CMN_ANY, dir_snoops_sent, 0x08),
+ CMN_EVENT_HNF(CMN_ANY, brd_snoops_sent, 0x09),
+ CMN_EVENT_HNF(CMN_ANY, slc_eviction, 0x0a),
+ CMN_EVENT_HNF(CMN_ANY, slc_fill_invalid_way, 0x0b),
+ CMN_EVENT_HNF(CMN_ANY, mc_retries, 0x0c),
+ CMN_EVENT_HNF(CMN_ANY, mc_reqs, 0x0d),
+ CMN_EVENT_HNF(CMN_ANY, qos_hh_retry, 0x0e),
+ _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_all, 0x0f, 0),
+ _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_read, 0x0f, 1),
+ _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_write, 0x0f, 2),
+ _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_atomic, 0x0f, 3),
+ _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_stash, 0x0f, 4),
+ CMN_EVENT_HNF(CMN_ANY, pocq_addrhaz, 0x10),
+ CMN_EVENT_HNF(CMN_ANY, pocq_atomic_addrhaz, 0x11),
+ CMN_EVENT_HNF(CMN_ANY, ld_st_swp_adq_full, 0x12),
+ CMN_EVENT_HNF(CMN_ANY, cmp_adq_full, 0x13),
+ CMN_EVENT_HNF(CMN_ANY, txdat_stall, 0x14),
+ CMN_EVENT_HNF(CMN_ANY, txrsp_stall, 0x15),
+ CMN_EVENT_HNF(CMN_ANY, seq_full, 0x16),
+ CMN_EVENT_HNF(CMN_ANY, seq_hit, 0x17),
+ CMN_EVENT_HNF(CMN_ANY, snp_sent, 0x18),
+ CMN_EVENT_HNF(CMN_ANY, sfbi_dir_snp_sent, 0x19),
+ CMN_EVENT_HNF(CMN_ANY, sfbi_brd_snp_sent, 0x1a),
+ CMN_EVENT_HNF(CMN_ANY, snp_sent_untrk, 0x1b),
+ CMN_EVENT_HNF(CMN_ANY, intv_dirty, 0x1c),
+ CMN_EVENT_HNF(CMN_ANY, stash_snp_sent, 0x1d),
+ CMN_EVENT_HNF(CMN_ANY, stash_data_pull, 0x1e),
+ CMN_EVENT_HNF(CMN_ANY, snp_fwded, 0x1f),
+ CMN_EVENT_HNF(CI700, atomic_fwd, 0x20),
+ CMN_EVENT_HNF(CI700, mpam_hardlim, 0x21),
+ CMN_EVENT_HNF(CI700, mpam_softlim, 0x22),
+
+ CMN_EVENT_HNI(rrt_rd_occ_cnt_ovfl, 0x20),
+ CMN_EVENT_HNI(rrt_wr_occ_cnt_ovfl, 0x21),
+ CMN_EVENT_HNI(rdt_rd_occ_cnt_ovfl, 0x22),
+ CMN_EVENT_HNI(rdt_wr_occ_cnt_ovfl, 0x23),
+ CMN_EVENT_HNI(wdb_occ_cnt_ovfl, 0x24),
+ CMN_EVENT_HNI(rrt_rd_alloc, 0x25),
+ CMN_EVENT_HNI(rrt_wr_alloc, 0x26),
+ CMN_EVENT_HNI(rdt_rd_alloc, 0x27),
+ CMN_EVENT_HNI(rdt_wr_alloc, 0x28),
+ CMN_EVENT_HNI(wdb_alloc, 0x29),
+ CMN_EVENT_HNI(txrsp_retryack, 0x2a),
+ CMN_EVENT_HNI(arvalid_no_arready, 0x2b),
+ CMN_EVENT_HNI(arready_no_arvalid, 0x2c),
+ CMN_EVENT_HNI(awvalid_no_awready, 0x2d),
+ CMN_EVENT_HNI(awready_no_awvalid, 0x2e),
+ CMN_EVENT_HNI(wvalid_no_wready, 0x2f),
+ CMN_EVENT_HNI(txdat_stall, 0x30),
+ CMN_EVENT_HNI(nonpcie_serialization, 0x31),
+ CMN_EVENT_HNI(pcie_serialization, 0x32),
+
+ CMN_EVENT_XP(txflit_valid, 0x01),
+ CMN_EVENT_XP(txflit_stall, 0x02),
+ CMN_EVENT_XP(partial_dat_flit, 0x03),
/* We treat watchpoints as a special made-up class of XP events */
- CMN_EVENT_ATTR(watchpoint_up, CMN_TYPE_WP, 0, 0),
- CMN_EVENT_ATTR(watchpoint_down, CMN_TYPE_WP, 2, 0),
-
- CMN_EVENT_SBSX(rd_req, 0x01),
- CMN_EVENT_SBSX(wr_req, 0x02),
- CMN_EVENT_SBSX(cmo_req, 0x03),
- CMN_EVENT_SBSX(txrsp_retryack, 0x04),
- CMN_EVENT_SBSX(txdat_flitv, 0x05),
- CMN_EVENT_SBSX(txrsp_flitv, 0x06),
- CMN_EVENT_SBSX(rd_req_trkr_occ_cnt_ovfl, 0x11),
- CMN_EVENT_SBSX(wr_req_trkr_occ_cnt_ovfl, 0x12),
- CMN_EVENT_SBSX(cmo_req_trkr_occ_cnt_ovfl, 0x13),
- CMN_EVENT_SBSX(wdb_occ_cnt_ovfl, 0x14),
- CMN_EVENT_SBSX(rd_axi_trkr_occ_cnt_ovfl, 0x15),
- CMN_EVENT_SBSX(cmo_axi_trkr_occ_cnt_ovfl, 0x16),
- CMN_EVENT_SBSX(arvalid_no_arready, 0x21),
- CMN_EVENT_SBSX(awvalid_no_awready, 0x22),
- CMN_EVENT_SBSX(wvalid_no_wready, 0x23),
- CMN_EVENT_SBSX(txdat_stall, 0x24),
- CMN_EVENT_SBSX(txrsp_stall, 0x25),
-
- CMN_EVENT_RNID(s0_rdata_beats, 0x01),
- CMN_EVENT_RNID(s1_rdata_beats, 0x02),
- CMN_EVENT_RNID(s2_rdata_beats, 0x03),
- CMN_EVENT_RNID(rxdat_flits, 0x04),
- CMN_EVENT_RNID(txdat_flits, 0x05),
- CMN_EVENT_RNID(txreq_flits_total, 0x06),
- CMN_EVENT_RNID(txreq_flits_retried, 0x07),
- CMN_EVENT_RNID(rrt_occ_ovfl, 0x08),
- CMN_EVENT_RNID(wrt_occ_ovfl, 0x09),
- CMN_EVENT_RNID(txreq_flits_replayed, 0x0a),
- CMN_EVENT_RNID(wrcancel_sent, 0x0b),
- CMN_EVENT_RNID(s0_wdata_beats, 0x0c),
- CMN_EVENT_RNID(s1_wdata_beats, 0x0d),
- CMN_EVENT_RNID(s2_wdata_beats, 0x0e),
- CMN_EVENT_RNID(rrt_alloc, 0x0f),
- CMN_EVENT_RNID(wrt_alloc, 0x10),
- CMN_EVENT_RNID(rdb_unord, 0x11),
- CMN_EVENT_RNID(rdb_replay, 0x12),
- CMN_EVENT_RNID(rdb_hybrid, 0x13),
- CMN_EVENT_RNID(rdb_ord, 0x14),
+ CMN_EVENT_ATTR(CMN_ANY, watchpoint_up, CMN_TYPE_WP, CMN_WP_UP, 0),
+ CMN_EVENT_ATTR(CMN_ANY, watchpoint_down, CMN_TYPE_WP, CMN_WP_DOWN, 0),
+
+ CMN_EVENT_SBSX(CMN_ANY, rd_req, 0x01),
+ CMN_EVENT_SBSX(CMN_ANY, wr_req, 0x02),
+ CMN_EVENT_SBSX(CMN_ANY, cmo_req, 0x03),
+ CMN_EVENT_SBSX(CMN_ANY, txrsp_retryack, 0x04),
+ CMN_EVENT_SBSX(CMN_ANY, txdat_flitv, 0x05),
+ CMN_EVENT_SBSX(CMN_ANY, txrsp_flitv, 0x06),
+ CMN_EVENT_SBSX(CMN_ANY, rd_req_trkr_occ_cnt_ovfl, 0x11),
+ CMN_EVENT_SBSX(CMN_ANY, wr_req_trkr_occ_cnt_ovfl, 0x12),
+ CMN_EVENT_SBSX(CMN_ANY, cmo_req_trkr_occ_cnt_ovfl, 0x13),
+ CMN_EVENT_SBSX(CMN_ANY, wdb_occ_cnt_ovfl, 0x14),
+ CMN_EVENT_SBSX(CMN_ANY, rd_axi_trkr_occ_cnt_ovfl, 0x15),
+ CMN_EVENT_SBSX(CMN_ANY, cmo_axi_trkr_occ_cnt_ovfl, 0x16),
+ CMN_EVENT_SBSX(CI700, rdb_occ_cnt_ovfl, 0x17),
+ CMN_EVENT_SBSX(CMN_ANY, arvalid_no_arready, 0x21),
+ CMN_EVENT_SBSX(CMN_ANY, awvalid_no_awready, 0x22),
+ CMN_EVENT_SBSX(CMN_ANY, wvalid_no_wready, 0x23),
+ CMN_EVENT_SBSX(CMN_ANY, txdat_stall, 0x24),
+ CMN_EVENT_SBSX(CMN_ANY, txrsp_stall, 0x25),
+
+ CMN_EVENT_RNID(CMN_ANY, s0_rdata_beats, 0x01),
+ CMN_EVENT_RNID(CMN_ANY, s1_rdata_beats, 0x02),
+ CMN_EVENT_RNID(CMN_ANY, s2_rdata_beats, 0x03),
+ CMN_EVENT_RNID(CMN_ANY, rxdat_flits, 0x04),
+ CMN_EVENT_RNID(CMN_ANY, txdat_flits, 0x05),
+ CMN_EVENT_RNID(CMN_ANY, txreq_flits_total, 0x06),
+ CMN_EVENT_RNID(CMN_ANY, txreq_flits_retried, 0x07),
+ CMN_EVENT_RNID(CMN_ANY, rrt_occ_ovfl, 0x08),
+ CMN_EVENT_RNID(CMN_ANY, wrt_occ_ovfl, 0x09),
+ CMN_EVENT_RNID(CMN_ANY, txreq_flits_replayed, 0x0a),
+ CMN_EVENT_RNID(CMN_ANY, wrcancel_sent, 0x0b),
+ CMN_EVENT_RNID(CMN_ANY, s0_wdata_beats, 0x0c),
+ CMN_EVENT_RNID(CMN_ANY, s1_wdata_beats, 0x0d),
+ CMN_EVENT_RNID(CMN_ANY, s2_wdata_beats, 0x0e),
+ CMN_EVENT_RNID(CMN_ANY, rrt_alloc, 0x0f),
+ CMN_EVENT_RNID(CMN_ANY, wrt_alloc, 0x10),
+ CMN_EVENT_RNID(CMN600, rdb_unord, 0x11),
+ CMN_EVENT_RNID(CMN600, rdb_replay, 0x12),
+ CMN_EVENT_RNID(CMN600, rdb_hybrid, 0x13),
+ CMN_EVENT_RNID(CMN600, rdb_ord, 0x14),
+ CMN_EVENT_RNID(CI700, padb_occ_ovfl, 0x11),
+ CMN_EVENT_RNID(CI700, rpdb_occ_ovfl, 0x12),
+ CMN_EVENT_RNID(CI700, rrt_occup_ovfl_slice1, 0x13),
+ CMN_EVENT_RNID(CI700, rrt_occup_ovfl_slice2, 0x14),
+ CMN_EVENT_RNID(CI700, rrt_occup_ovfl_slice3, 0x15),
+ CMN_EVENT_RNID(CI700, wrt_throttled, 0x16),
+
+ CMN_EVENT_MTSX(tc_lookup, 0x01),
+ CMN_EVENT_MTSX(tc_fill, 0x02),
+ CMN_EVENT_MTSX(tc_miss, 0x03),
+ CMN_EVENT_MTSX(tdb_forward, 0x04),
+ CMN_EVENT_MTSX(tcq_hazard, 0x05),
+ CMN_EVENT_MTSX(tcq_rd_alloc, 0x06),
+ CMN_EVENT_MTSX(tcq_wr_alloc, 0x07),
+ CMN_EVENT_MTSX(tcq_cmo_alloc, 0x08),
+ CMN_EVENT_MTSX(axi_rd_req, 0x09),
+ CMN_EVENT_MTSX(axi_wr_req, 0x0a),
+ CMN_EVENT_MTSX(tcq_occ_cnt_ovfl, 0x0b),
+ CMN_EVENT_MTSX(tdb_occ_cnt_ovfl, 0x0c),
NULL
};
@@ -644,7 +909,8 @@ static u32 arm_cmn_wp_config(struct perf_event *event)
config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) |
FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) |
FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_GRP, grp) |
- FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE, exc);
+ FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE, exc) |
+ FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL2, dev >> 1);
if (combine && !grp)
config |= CMN_DTM_WPn_CONFIG_WP_COMBINE;
@@ -679,18 +945,19 @@ static void arm_cmn_pmu_disable(struct pmu *pmu)
static u64 arm_cmn_read_dtm(struct arm_cmn *cmn, struct arm_cmn_hw_event *hw,
bool snapshot)
{
+ struct arm_cmn_dtm *dtm = NULL;
struct arm_cmn_node *dn;
- unsigned int i, offset;
- u64 count = 0;
+ unsigned int i, offset, dtm_idx;
+ u64 reg, count = 0;
offset = snapshot ? CMN_DTM_PMEVCNTSR : CMN_DTM_PMEVCNT;
for_each_hw_dn(hw, dn, i) {
- struct arm_cmn_node *xp = arm_cmn_node_to_xp(dn);
- int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
- u64 reg = readq_relaxed(xp->pmu_base + offset);
- u16 dtm_count = reg >> (dtm_idx * 16);
-
- count += dtm_count;
+ if (dtm != &cmn->dtms[dn->dtm]) {
+ dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset;
+ reg = readq_relaxed(dtm->base + offset);
+ }
+ dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
+ count += (u16)(reg >> (dtm_idx * 16));
}
return count;
}
@@ -774,8 +1041,10 @@ static void arm_cmn_event_start(struct perf_event *event, int flags)
u64 mask = CMN_EVENT_WP_MASK(event);
for_each_hw_dn(hw, dn, i) {
- writeq_relaxed(val, dn->pmu_base + CMN_DTM_WPn_VAL(wp_idx));
- writeq_relaxed(mask, dn->pmu_base + CMN_DTM_WPn_MASK(wp_idx));
+ void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset);
+
+ writeq_relaxed(val, base + CMN_DTM_WPn_VAL(wp_idx));
+ writeq_relaxed(mask, base + CMN_DTM_WPn_MASK(wp_idx));
}
} else for_each_hw_dn(hw, dn, i) {
int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
@@ -800,8 +1069,10 @@ static void arm_cmn_event_stop(struct perf_event *event, int flags)
int wp_idx = arm_cmn_wp_idx(event);
for_each_hw_dn(hw, dn, i) {
- writeq_relaxed(0, dn->pmu_base + CMN_DTM_WPn_MASK(wp_idx));
- writeq_relaxed(~0ULL, dn->pmu_base + CMN_DTM_WPn_VAL(wp_idx));
+ void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset);
+
+ writeq_relaxed(0, base + CMN_DTM_WPn_MASK(wp_idx));
+ writeq_relaxed(~0ULL, base + CMN_DTM_WPn_VAL(wp_idx));
}
} else for_each_hw_dn(hw, dn, i) {
int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
@@ -814,14 +1085,15 @@ static void arm_cmn_event_stop(struct perf_event *event, int flags)
}
struct arm_cmn_val {
- u8 dtm_count[CMN_MAX_XPS];
- u8 occupid[CMN_MAX_XPS];
- u8 wp[CMN_MAX_XPS][4];
+ u8 dtm_count[CMN_MAX_DTMS];
+ u8 occupid[CMN_MAX_DTMS];
+ u8 wp[CMN_MAX_DTMS][4];
int dtc_count;
bool cycles;
};
-static void arm_cmn_val_add_event(struct arm_cmn_val *val, struct perf_event *event)
+static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val,
+ struct perf_event *event)
{
struct arm_cmn_hw_event *hw = to_cmn_hw(event);
struct arm_cmn_node *dn;
@@ -839,33 +1111,33 @@ static void arm_cmn_val_add_event(struct arm_cmn_val *val, struct perf_event *ev
}
val->dtc_count++;
- if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event)))
+ if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event)))
occupid = CMN_EVENT_OCCUPID(event) + 1;
else
occupid = 0;
for_each_hw_dn(hw, dn, i) {
- int wp_idx, xp = arm_cmn_node_to_xp(dn)->logid;
+ int wp_idx, dtm = dn->dtm;
- val->dtm_count[xp]++;
- val->occupid[xp] = occupid;
+ val->dtm_count[dtm]++;
+ val->occupid[dtm] = occupid;
if (type != CMN_TYPE_WP)
continue;
wp_idx = arm_cmn_wp_idx(event);
- val->wp[xp][wp_idx] = CMN_EVENT_WP_COMBINE(event) + 1;
+ val->wp[dtm][wp_idx] = CMN_EVENT_WP_COMBINE(event) + 1;
}
}
-static int arm_cmn_validate_group(struct perf_event *event)
+static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event)
{
struct arm_cmn_hw_event *hw = to_cmn_hw(event);
struct arm_cmn_node *dn;
struct perf_event *sibling, *leader = event->group_leader;
enum cmn_node_type type;
- struct arm_cmn_val val;
- int i;
+ struct arm_cmn_val *val;
+ int i, ret = -EINVAL;
u8 occupid;
if (leader == event)
@@ -874,54 +1146,61 @@ static int arm_cmn_validate_group(struct perf_event *event)
if (event->pmu != leader->pmu && !is_software_event(leader))
return -EINVAL;
- memset(&val, 0, sizeof(val));
+ val = kzalloc(sizeof(*val), GFP_KERNEL);
+ if (!val)
+ return -ENOMEM;
- arm_cmn_val_add_event(&val, leader);
+ arm_cmn_val_add_event(cmn, val, leader);
for_each_sibling_event(sibling, leader)
- arm_cmn_val_add_event(&val, sibling);
+ arm_cmn_val_add_event(cmn, val, sibling);
type = CMN_EVENT_TYPE(event);
- if (type == CMN_TYPE_DTC)
- return val.cycles ? -EINVAL : 0;
+ if (type == CMN_TYPE_DTC) {
+ ret = val->cycles ? -EINVAL : 0;
+ goto done;
+ }
- if (val.dtc_count == CMN_DT_NUM_COUNTERS)
- return -EINVAL;
+ if (val->dtc_count == CMN_DT_NUM_COUNTERS)
+ goto done;
- if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event)))
+ if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event)))
occupid = CMN_EVENT_OCCUPID(event) + 1;
else
occupid = 0;
for_each_hw_dn(hw, dn, i) {
- int wp_idx, wp_cmb, xp = arm_cmn_node_to_xp(dn)->logid;
+ int wp_idx, wp_cmb, dtm = dn->dtm;
- if (val.dtm_count[xp] == CMN_DTM_NUM_COUNTERS)
- return -EINVAL;
+ if (val->dtm_count[dtm] == CMN_DTM_NUM_COUNTERS)
+ goto done;
- if (occupid && val.occupid[xp] && occupid != val.occupid[xp])
- return -EINVAL;
+ if (occupid && val->occupid[dtm] && occupid != val->occupid[dtm])
+ goto done;
if (type != CMN_TYPE_WP)
continue;
wp_idx = arm_cmn_wp_idx(event);
- if (val.wp[xp][wp_idx])
- return -EINVAL;
+ if (val->wp[dtm][wp_idx])
+ goto done;
- wp_cmb = val.wp[xp][wp_idx ^ 1];
+ wp_cmb = val->wp[dtm][wp_idx ^ 1];
if (wp_cmb && wp_cmb != CMN_EVENT_WP_COMBINE(event) + 1)
- return -EINVAL;
+ goto done;
}
- return 0;
+ ret = 0;
+done:
+ kfree(val);
+ return ret;
}
static int arm_cmn_event_init(struct perf_event *event)
{
struct arm_cmn *cmn = to_cmn(event->pmu);
struct arm_cmn_hw_event *hw = to_cmn_hw(event);
+ struct arm_cmn_node *dn;
enum cmn_node_type type;
- unsigned int i;
bool bynodeid;
u16 nodeid, eventid;
@@ -947,38 +1226,37 @@ static int arm_cmn_event_init(struct perf_event *event)
eventid = CMN_EVENT_EVENTID(event);
if (eventid != CMN_WP_UP && eventid != CMN_WP_DOWN)
return -EINVAL;
+ /* ...but the DTM may depend on which port we're watching */
+ if (cmn->multi_dtm)
+ hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2;
}
bynodeid = CMN_EVENT_BYNODEID(event);
nodeid = CMN_EVENT_NODEID(event);
hw->dn = arm_cmn_node(cmn, type);
- for (i = hw->dn - cmn->dns; i < cmn->num_dns && cmn->dns[i].type == type; i++) {
- if (!bynodeid) {
- hw->num_dns++;
- } else if (cmn->dns[i].id != nodeid) {
+ if (!hw->dn)
+ return -EINVAL;
+ for (dn = hw->dn; dn->type == type; dn++) {
+ if (bynodeid && dn->id != nodeid) {
hw->dn++;
- } else {
- hw->num_dns = 1;
- break;
+ continue;
}
+ hw->dtcs_used |= arm_cmn_node_to_xp(cmn, dn)->dtc;
+ hw->num_dns++;
+ if (bynodeid)
+ break;
}
if (!hw->num_dns) {
- int bits = arm_cmn_xyidbits(cmn);
+ struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, nodeid);
dev_dbg(cmn->dev, "invalid node 0x%x (%d,%d,%d,%d) type 0x%x\n",
- nodeid, CMN_NODEID_X(nodeid, bits), CMN_NODEID_Y(nodeid, bits),
- CMN_NODEID_PID(nodeid), CMN_NODEID_DEVID(nodeid), type);
+ nodeid, nid.x, nid.y, nid.port, nid.dev, type);
return -EINVAL;
}
- /*
- * By assuming events count in all DTC domains, we cunningly avoid
- * needing to know anything about how XPs are assigned to domains.
- */
- hw->dtcs_used = (1U << cmn->num_dtcs) - 1;
- return arm_cmn_validate_group(event);
+ return arm_cmn_validate_group(cmn, event);
}
static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event,
@@ -988,17 +1266,17 @@ static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event,
enum cmn_node_type type = CMN_EVENT_TYPE(event);
while (i--) {
- struct arm_cmn_node *xp = arm_cmn_node_to_xp(hw->dn + i);
+ struct arm_cmn_dtm *dtm = &cmn->dtms[hw->dn[i].dtm] + hw->dtm_offset;
unsigned int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
if (type == CMN_TYPE_WP)
- hw->dn[i].wp_event[arm_cmn_wp_idx(event)] = -1;
+ dtm->wp_event[arm_cmn_wp_idx(event)] = -1;
- if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event)))
+ if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event)))
hw->dn[i].occupid_count--;
- xp->pmu_config_low &= ~CMN__PMEVCNT_PAIRED(dtm_idx);
- writel_relaxed(xp->pmu_config_low, xp->pmu_base + CMN_DTM_PMU_CONFIG);
+ dtm->pmu_config_low &= ~CMN__PMEVCNT_PAIRED(dtm_idx);
+ writel_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG);
}
memset(hw->dtm_idx, 0, sizeof(hw->dtm_idx));
@@ -1040,12 +1318,12 @@ static int arm_cmn_event_add(struct perf_event *event, int flags)
/* ...then the local counters to feed it. */
for_each_hw_dn(hw, dn, i) {
- struct arm_cmn_node *xp = arm_cmn_node_to_xp(dn);
+ struct arm_cmn_dtm *dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset;
unsigned int dtm_idx, shift;
u64 reg;
dtm_idx = 0;
- while (xp->pmu_config_low & CMN__PMEVCNT_PAIRED(dtm_idx))
+ while (dtm->pmu_config_low & CMN__PMEVCNT_PAIRED(dtm_idx))
if (++dtm_idx == CMN_DTM_NUM_COUNTERS)
goto free_dtms;
@@ -1055,26 +1333,28 @@ static int arm_cmn_event_add(struct perf_event *event, int flags)
int tmp, wp_idx = arm_cmn_wp_idx(event);
u32 cfg = arm_cmn_wp_config(event);
- if (dn->wp_event[wp_idx] >= 0)
+ if (dtm->wp_event[wp_idx] >= 0)
goto free_dtms;
- tmp = dn->wp_event[wp_idx ^ 1];
+ tmp = dtm->wp_event[wp_idx ^ 1];
if (tmp >= 0 && CMN_EVENT_WP_COMBINE(event) !=
CMN_EVENT_WP_COMBINE(dtc->counters[tmp]))
goto free_dtms;
input_sel = CMN__PMEVCNT0_INPUT_SEL_WP + wp_idx;
- dn->wp_event[wp_idx] = dtc_idx;
- writel_relaxed(cfg, dn->pmu_base + CMN_DTM_WPn_CONFIG(wp_idx));
+ dtm->wp_event[wp_idx] = dtc_idx;
+ writel_relaxed(cfg, dtm->base + CMN_DTM_WPn_CONFIG(wp_idx));
} else {
- unsigned int port = CMN_NODEID_PID(dn->id);
- unsigned int dev = CMN_NODEID_DEVID(dn->id);
+ struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
+
+ if (cmn->multi_dtm)
+ nid.port %= 2;
input_sel = CMN__PMEVCNT0_INPUT_SEL_DEV + dtm_idx +
- (port << 4) + (dev << 2);
+ (nid.port << 4) + (nid.dev << 2);
- if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event))) {
- int occupid = CMN_EVENT_OCCUPID(event);
+ if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event))) {
+ u8 occupid = CMN_EVENT_OCCUPID(event);
if (dn->occupid_count == 0) {
dn->occupid_val = occupid;
@@ -1089,13 +1369,13 @@ static int arm_cmn_event_add(struct perf_event *event, int flags)
arm_cmn_set_index(hw->dtm_idx, i, dtm_idx);
- xp->input_sel[dtm_idx] = input_sel;
+ dtm->input_sel[dtm_idx] = input_sel;
shift = CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(dtm_idx);
- xp->pmu_config_low &= ~(CMN__PMEVCNT0_GLOBAL_NUM << shift);
- xp->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, dtc_idx) << shift;
- xp->pmu_config_low |= CMN__PMEVCNT_PAIRED(dtm_idx);
- reg = (u64)le32_to_cpu(xp->pmu_config_high) << 32 | xp->pmu_config_low;
- writeq_relaxed(reg, xp->pmu_base + CMN_DTM_PMU_CONFIG);
+ dtm->pmu_config_low &= ~(CMN__PMEVCNT0_GLOBAL_NUM << shift);
+ dtm->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, dtc_idx) << shift;
+ dtm->pmu_config_low |= CMN__PMEVCNT_PAIRED(dtm_idx);
+ reg = (u64)le32_to_cpu(dtm->pmu_config_high) << 32 | dtm->pmu_config_low;
+ writeq_relaxed(reg, dtm->base + CMN_DTM_PMU_CONFIG);
}
/* Go go go! */
@@ -1147,23 +1427,47 @@ static int arm_cmn_commit_txn(struct pmu *pmu)
return 0;
}
-static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+static void arm_cmn_migrate(struct arm_cmn *cmn, unsigned int cpu)
+{
+ unsigned int i;
+
+ perf_pmu_migrate_context(&cmn->pmu, cmn->cpu, cpu);
+ for (i = 0; i < cmn->num_dtcs; i++)
+ irq_set_affinity(cmn->dtc[i].irq, cpumask_of(cpu));
+ cmn->cpu = cpu;
+}
+
+static int arm_cmn_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
{
struct arm_cmn *cmn;
- unsigned int i, target;
+ int node;
- cmn = hlist_entry_safe(node, struct arm_cmn, cpuhp_node);
- if (cpu != cmn->cpu)
- return 0;
+ cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node);
+ node = dev_to_node(cmn->dev);
+ if (node != NUMA_NO_NODE && cpu_to_node(cmn->cpu) != node && cpu_to_node(cpu) == node)
+ arm_cmn_migrate(cmn, cpu);
+ return 0;
+}
+
+static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
+{
+ struct arm_cmn *cmn;
+ unsigned int target;
+ int node;
+ cpumask_t mask;
- target = cpumask_any_but(cpu_online_mask, cpu);
- if (target >= nr_cpu_ids)
+ cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node);
+ if (cpu != cmn->cpu)
return 0;
- perf_pmu_migrate_context(&cmn->pmu, cpu, target);
- for (i = 0; i < cmn->num_dtcs; i++)
- irq_set_affinity(cmn->dtc[i].irq, cpumask_of(target));
- cmn->cpu = target;
+ node = dev_to_node(cmn->dev);
+ if (cpumask_and(&mask, cpumask_of_node(node), cpu_online_mask) &&
+ cpumask_andnot(&mask, &mask, cpumask_of(cpu)))
+ target = cpumask_any(&mask);
+ else
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target < nr_cpu_ids)
+ arm_cmn_migrate(cmn, target);
return 0;
}
@@ -1231,23 +1535,22 @@ static int arm_cmn_init_irqs(struct arm_cmn *cmn)
return 0;
}
-static void arm_cmn_init_dtm(struct arm_cmn_node *xp)
+static void arm_cmn_init_dtm(struct arm_cmn_dtm *dtm, struct arm_cmn_node *xp, int idx)
{
int i;
+ dtm->base = xp->pmu_base + CMN_DTM_OFFSET(idx);
+ dtm->pmu_config_low = CMN_DTM_PMU_CONFIG_PMU_EN;
for (i = 0; i < 4; i++) {
- xp->wp_event[i] = -1;
- writeq_relaxed(0, xp->pmu_base + CMN_DTM_WPn_MASK(i));
- writeq_relaxed(~0ULL, xp->pmu_base + CMN_DTM_WPn_VAL(i));
+ dtm->wp_event[i] = -1;
+ writeq_relaxed(0, dtm->base + CMN_DTM_WPn_MASK(i));
+ writeq_relaxed(~0ULL, dtm->base + CMN_DTM_WPn_VAL(i));
}
- xp->pmu_config_low = CMN_DTM_PMU_CONFIG_PMU_EN;
- xp->dtc = -1;
}
static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int idx)
{
struct arm_cmn_dtc *dtc = cmn->dtc + idx;
- struct arm_cmn_node *xp;
dtc->base = dn->pmu_base - CMN_PMU_OFFSET;
dtc->irq = platform_get_irq(to_platform_device(cmn->dev), idx);
@@ -1258,10 +1561,6 @@ static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int id
writel_relaxed(0x1ff, dtc->base + CMN_DT_PMOVSR_CLR);
writel_relaxed(CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR);
- /* We do at least know that a DTC's XP must be in that DTC's domain */
- xp = arm_cmn_node_to_xp(dn);
- xp->dtc = idx;
-
return 0;
}
@@ -1278,8 +1577,9 @@ static int arm_cmn_node_cmp(const void *a, const void *b)
static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
{
- struct arm_cmn_node *dn;
+ struct arm_cmn_node *dn, *xp;
int dtc_idx = 0;
+ u8 dtcs_present = (1 << cmn->num_dtcs) - 1;
cmn->dtc = devm_kcalloc(cmn->dev, cmn->num_dtcs, sizeof(cmn->dtc[0]), GFP_KERNEL);
if (!cmn->dtc)
@@ -1289,14 +1589,26 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
cmn->xps = arm_cmn_node(cmn, CMN_TYPE_XP);
- for (dn = cmn->dns; dn < cmn->dns + cmn->num_dns; dn++) {
- if (dn->type != CMN_TYPE_XP)
- arm_cmn_init_node_to_xp(cmn, dn);
- else if (cmn->num_dtcs == 1)
- dn->dtc = 0;
+ for (dn = cmn->dns; dn->type; dn++) {
+ if (dn->type == CMN_TYPE_XP) {
+ dn->dtc &= dtcs_present;
+ continue;
+ }
- if (dn->type == CMN_TYPE_DTC)
- arm_cmn_init_dtc(cmn, dn, dtc_idx++);
+ xp = arm_cmn_node_to_xp(cmn, dn);
+ dn->dtm = xp->dtm;
+ if (cmn->multi_dtm)
+ dn->dtm += arm_cmn_nid(cmn, dn->id).port / 2;
+
+ if (dn->type == CMN_TYPE_DTC) {
+ int err;
+ /* We do at least know that a DTC's XP must be in that DTC's domain */
+ if (xp->dtc == 0xf)
+ xp->dtc = 1 << dtc_idx;
+ err = arm_cmn_init_dtc(cmn, dn, dtc_idx++);
+ if (err)
+ return err;
+ }
/* To the PMU, RN-Ds don't add anything over RN-Is, so smoosh them together */
if (dn->type == CMN_TYPE_RND)
@@ -1335,19 +1647,25 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
{
void __iomem *cfg_region;
struct arm_cmn_node cfg, *dn;
+ struct arm_cmn_dtm *dtm;
u16 child_count, child_poff;
u32 xp_offset[CMN_MAX_XPS];
u64 reg;
int i, j;
+ size_t sz;
+
+ arm_cmn_init_node_info(cmn, rgn_offset, &cfg);
+ if (cfg.type != CMN_TYPE_CFG)
+ return -ENODEV;
cfg_region = cmn->base + rgn_offset;
reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_2);
cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg);
- dev_dbg(cmn->dev, "periph_id_2 revision: %d\n", cmn->rev);
- arm_cmn_init_node_info(cmn, rgn_offset, &cfg);
- if (cfg.type != CMN_TYPE_CFG)
- return -ENODEV;
+ reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL);
+ cmn->multi_dtm = reg & CMN_INFO_MULTIPLE_DTM_EN;
+ cmn->rsp_vc_num = FIELD_GET(CMN_INFO_RSP_VC_NUM, reg);
+ cmn->dat_vc_num = FIELD_GET(CMN_INFO_DAT_VC_NUM, reg);
reg = readq_relaxed(cfg_region + CMN_CHILD_INFO);
child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg);
@@ -1365,20 +1683,28 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
cmn->num_dns += FIELD_GET(CMN_CI_CHILD_COUNT, reg);
}
- /* Cheeky +1 to help terminate pointer-based iteration */
- cmn->dns = devm_kcalloc(cmn->dev, cmn->num_dns + 1,
- sizeof(*cmn->dns), GFP_KERNEL);
- if (!cmn->dns)
+ /* Cheeky +1 to help terminate pointer-based iteration later */
+ dn = devm_kcalloc(cmn->dev, cmn->num_dns + 1, sizeof(*dn), GFP_KERNEL);
+ if (!dn)
+ return -ENOMEM;
+
+ /* Initial safe upper bound on DTMs for any possible mesh layout */
+ i = cmn->num_xps;
+ if (cmn->multi_dtm)
+ i += cmn->num_xps + 1;
+ dtm = devm_kcalloc(cmn->dev, i, sizeof(*dtm), GFP_KERNEL);
+ if (!dtm)
return -ENOMEM;
/* Pass 2: now we can actually populate the nodes */
- dn = cmn->dns;
+ cmn->dns = dn;
+ cmn->dtms = dtm;
for (i = 0; i < cmn->num_xps; i++) {
void __iomem *xp_region = cmn->base + xp_offset[i];
struct arm_cmn_node *xp = dn++;
+ unsigned int xp_ports = 0;
arm_cmn_init_node_info(cmn, xp_offset[i], xp);
- arm_cmn_init_dtm(xp);
/*
* Thanks to the order in which XP logical IDs seem to be
* assigned, we can handily infer the mesh X dimension by
@@ -1388,6 +1714,40 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
if (xp->id == (1 << 3))
cmn->mesh_x = xp->logid;
+ if (cmn->model == CMN600)
+ xp->dtc = 0xf;
+ else
+ xp->dtc = 1 << readl_relaxed(xp_region + CMN_DTM_UNIT_INFO);
+
+ xp->dtm = dtm - cmn->dtms;
+ arm_cmn_init_dtm(dtm++, xp, 0);
+ /*
+ * Keeping track of connected ports will let us filter out
+ * unnecessary XP events easily. We can also reliably infer the
+ * "extra device ports" configuration for the node ID format
+ * from this, since in that case we will see at least one XP
+ * with port 2 connected, for the HN-D.
+ */
+ if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P0))
+ xp_ports |= BIT(0);
+ if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P1))
+ xp_ports |= BIT(1);
+ if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P2))
+ xp_ports |= BIT(2);
+ if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P3))
+ xp_ports |= BIT(3);
+ if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P4))
+ xp_ports |= BIT(4);
+ if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P5))
+ xp_ports |= BIT(5);
+
+ if (cmn->multi_dtm && (xp_ports & 0xc))
+ arm_cmn_init_dtm(dtm++, xp, 1);
+ if (cmn->multi_dtm && (xp_ports & 0x30))
+ arm_cmn_init_dtm(dtm++, xp, 2);
+
+ cmn->ports_used |= xp_ports;
+
reg = readq_relaxed(xp_region + CMN_CHILD_INFO);
child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg);
child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg);
@@ -1422,11 +1782,14 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
case CMN_TYPE_SBSX:
case CMN_TYPE_RNI:
case CMN_TYPE_RND:
+ case CMN_TYPE_MTSX:
case CMN_TYPE_CXRA:
case CMN_TYPE_CXHA:
dn++;
break;
/* Nothing to see here */
+ case CMN_TYPE_MPAM_S:
+ case CMN_TYPE_MPAM_NS:
case CMN_TYPE_RNSAM:
case CMN_TYPE_CXLA:
break;
@@ -1441,6 +1804,16 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
/* Correct for any nodes we skipped */
cmn->num_dns = dn - cmn->dns;
+ sz = (void *)(dn + 1) - (void *)cmn->dns;
+ dn = devm_krealloc(cmn->dev, cmn->dns, sz, GFP_KERNEL);
+ if (dn)
+ cmn->dns = dn;
+
+ sz = (void *)dtm - (void *)cmn->dtms;
+ dtm = devm_krealloc(cmn->dev, cmn->dtms, sz, GFP_KERNEL);
+ if (dtm)
+ cmn->dtms = dtm;
+
/*
* If mesh_x wasn't set during discovery then we never saw
* an XP at (0,1), thus we must have an Nx1 configuration.
@@ -1449,13 +1822,20 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
cmn->mesh_x = cmn->num_xps;
cmn->mesh_y = cmn->num_xps / cmn->mesh_x;
- dev_dbg(cmn->dev, "mesh %dx%d, ID width %d\n",
- cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn));
+ /* 1x1 config plays havoc with XP event encodings */
+ if (cmn->num_xps == 1)
+ dev_warn(cmn->dev, "1x1 config not fully supported, translate XP events manually\n");
+
+ dev_dbg(cmn->dev, "model %d, periph_id_2 revision %d\n", cmn->model, cmn->rev);
+ reg = cmn->ports_used;
+ dev_dbg(cmn->dev, "mesh %dx%d, ID width %d, ports %6pbl%s\n",
+ cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn), &reg,
+ cmn->multi_dtm ? ", multi-DTM" : "");
return 0;
}
-static int arm_cmn_acpi_probe(struct platform_device *pdev, struct arm_cmn *cmn)
+static int arm_cmn600_acpi_probe(struct platform_device *pdev, struct arm_cmn *cmn)
{
struct resource *cfg, *root;
@@ -1482,21 +1862,11 @@ static int arm_cmn_acpi_probe(struct platform_device *pdev, struct arm_cmn *cmn)
return root->start - cfg->start;
}
-static int arm_cmn_of_probe(struct platform_device *pdev, struct arm_cmn *cmn)
+static int arm_cmn600_of_probe(struct device_node *np)
{
- struct device_node *np = pdev->dev.of_node;
u32 rootnode;
- int ret;
-
- cmn->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(cmn->base))
- return PTR_ERR(cmn->base);
- ret = of_property_read_u32(np, "arm,root-node", &rootnode);
- if (ret)
- return ret;
-
- return rootnode;
+ return of_property_read_u32(np, "arm,root-node", &rootnode) ?: rootnode;
}
static int arm_cmn_probe(struct platform_device *pdev)
@@ -1504,19 +1874,26 @@ static int arm_cmn_probe(struct platform_device *pdev)
struct arm_cmn *cmn;
const char *name;
static atomic_t id;
- int err, rootnode;
+ int err, rootnode, this_id;
cmn = devm_kzalloc(&pdev->dev, sizeof(*cmn), GFP_KERNEL);
if (!cmn)
return -ENOMEM;
cmn->dev = &pdev->dev;
+ cmn->model = (unsigned long)device_get_match_data(cmn->dev);
platform_set_drvdata(pdev, cmn);
- if (has_acpi_companion(cmn->dev))
- rootnode = arm_cmn_acpi_probe(pdev, cmn);
- else
- rootnode = arm_cmn_of_probe(pdev, cmn);
+ if (cmn->model == CMN600 && has_acpi_companion(cmn->dev)) {
+ rootnode = arm_cmn600_acpi_probe(pdev, cmn);
+ } else {
+ rootnode = 0;
+ cmn->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(cmn->base))
+ return PTR_ERR(cmn->base);
+ if (cmn->model == CMN600)
+ rootnode = arm_cmn600_of_probe(pdev->dev.of_node);
+ }
if (rootnode < 0)
return rootnode;
@@ -1532,7 +1909,7 @@ static int arm_cmn_probe(struct platform_device *pdev)
if (err)
return err;
- cmn->cpu = raw_smp_processor_id();
+ cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev));
cmn->pmu = (struct pmu) {
.module = THIS_MODULE,
.attr_groups = arm_cmn_attr_groups,
@@ -1551,7 +1928,8 @@ static int arm_cmn_probe(struct platform_device *pdev)
.cancel_txn = arm_cmn_end_txn,
};
- name = devm_kasprintf(cmn->dev, GFP_KERNEL, "arm_cmn_%d", atomic_fetch_inc(&id));
+ this_id = atomic_fetch_inc(&id);
+ name = devm_kasprintf(cmn->dev, GFP_KERNEL, "arm_cmn_%d", this_id);
if (!name)
return -ENOMEM;
@@ -1561,7 +1939,10 @@ static int arm_cmn_probe(struct platform_device *pdev)
err = perf_pmu_register(&cmn->pmu, name, -1);
if (err)
- cpuhp_state_remove_instance(arm_cmn_hp_state, &cmn->cpuhp_node);
+ cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node);
+ else
+ arm_cmn_debugfs_init(cmn, this_id);
+
return err;
}
@@ -1572,13 +1953,15 @@ static int arm_cmn_remove(struct platform_device *pdev)
writel_relaxed(0, cmn->dtc[0].base + CMN_DT_DTC_CTL);
perf_pmu_unregister(&cmn->pmu);
- cpuhp_state_remove_instance(arm_cmn_hp_state, &cmn->cpuhp_node);
+ cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node);
+ debugfs_remove(cmn->debug);
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id arm_cmn_of_match[] = {
- { .compatible = "arm,cmn-600", },
+ { .compatible = "arm,cmn-600", .data = (void *)CMN600 },
+ { .compatible = "arm,ci-700", .data = (void *)CI700 },
{}
};
MODULE_DEVICE_TABLE(of, arm_cmn_of_match);
@@ -1586,7 +1969,7 @@ MODULE_DEVICE_TABLE(of, arm_cmn_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id arm_cmn_acpi_match[] = {
- { "ARMHC600", },
+ { "ARMHC600", CMN600 },
{}
};
MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match);
@@ -1607,15 +1990,20 @@ static int __init arm_cmn_init(void)
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
- "perf/arm/cmn:online", NULL,
+ "perf/arm/cmn:online",
+ arm_cmn_pmu_online_cpu,
arm_cmn_pmu_offline_cpu);
if (ret < 0)
return ret;
arm_cmn_hp_state = ret;
+ arm_cmn_debugfs = debugfs_create_dir("arm-cmn", NULL);
+
ret = platform_driver_register(&arm_cmn_driver);
- if (ret)
+ if (ret) {
cpuhp_remove_multi_state(arm_cmn_hp_state);
+ debugfs_remove(arm_cmn_debugfs);
+ }
return ret;
}
@@ -1623,6 +2011,7 @@ static void __exit arm_cmn_exit(void)
{
platform_driver_unregister(&arm_cmn_driver);
cpuhp_remove_multi_state(arm_cmn_hp_state);
+ debugfs_remove(arm_cmn_debugfs);
}
module_init(arm_cmn_init);
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index 226348822ab3..c49108a72865 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -47,6 +47,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/msi.h>
+#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/smp.h>
@@ -75,6 +76,10 @@
#define SMMU_PMCG_CR 0xE04
#define SMMU_PMCG_CR_ENABLE BIT(0)
#define SMMU_PMCG_IIDR 0xE08
+#define SMMU_PMCG_IIDR_PRODUCTID GENMASK(31, 20)
+#define SMMU_PMCG_IIDR_VARIANT GENMASK(19, 16)
+#define SMMU_PMCG_IIDR_REVISION GENMASK(15, 12)
+#define SMMU_PMCG_IIDR_IMPLEMENTER GENMASK(11, 0)
#define SMMU_PMCG_CEID0 0xE20
#define SMMU_PMCG_CEID1 0xE28
#define SMMU_PMCG_IRQ_CTRL 0xE50
@@ -83,6 +88,20 @@
#define SMMU_PMCG_IRQ_CFG1 0xE60
#define SMMU_PMCG_IRQ_CFG2 0xE64
+/* IMP-DEF ID registers */
+#define SMMU_PMCG_PIDR0 0xFE0
+#define SMMU_PMCG_PIDR0_PART_0 GENMASK(7, 0)
+#define SMMU_PMCG_PIDR1 0xFE4
+#define SMMU_PMCG_PIDR1_DES_0 GENMASK(7, 4)
+#define SMMU_PMCG_PIDR1_PART_1 GENMASK(3, 0)
+#define SMMU_PMCG_PIDR2 0xFE8
+#define SMMU_PMCG_PIDR2_REVISION GENMASK(7, 4)
+#define SMMU_PMCG_PIDR2_DES_1 GENMASK(2, 0)
+#define SMMU_PMCG_PIDR3 0xFEC
+#define SMMU_PMCG_PIDR3_REVAND GENMASK(7, 4)
+#define SMMU_PMCG_PIDR4 0xFD0
+#define SMMU_PMCG_PIDR4_DES_2 GENMASK(3, 0)
+
/* MSI config fields */
#define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2)
#define MSI_CFG2_MEMATTR_DEVICE_nGnRE 0x1
@@ -684,7 +703,6 @@ static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
{
- struct msi_desc *desc;
struct device *dev = pmu->dev;
int ret;
@@ -701,9 +719,7 @@ static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
return;
}
- desc = first_msi_entry(dev);
- if (desc)
- pmu->irq = desc->irq;
+ pmu->irq = msi_get_virq(dev, 0);
/* Add callback to free MSIs on teardown */
devm_add_action(dev, smmu_pmu_free_msis, dev);
@@ -754,6 +770,41 @@ static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options);
}
+static bool smmu_pmu_coresight_id_regs(struct smmu_pmu *smmu_pmu)
+{
+ return of_device_is_compatible(smmu_pmu->dev->of_node,
+ "arm,mmu-600-pmcg");
+}
+
+static void smmu_pmu_get_iidr(struct smmu_pmu *smmu_pmu)
+{
+ u32 iidr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_IIDR);
+
+ if (!iidr && smmu_pmu_coresight_id_regs(smmu_pmu)) {
+ u32 pidr0 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR0);
+ u32 pidr1 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR1);
+ u32 pidr2 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR2);
+ u32 pidr3 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR3);
+ u32 pidr4 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR4);
+
+ u32 productid = FIELD_GET(SMMU_PMCG_PIDR0_PART_0, pidr0) |
+ (FIELD_GET(SMMU_PMCG_PIDR1_PART_1, pidr1) << 8);
+ u32 variant = FIELD_GET(SMMU_PMCG_PIDR2_REVISION, pidr2);
+ u32 revision = FIELD_GET(SMMU_PMCG_PIDR3_REVAND, pidr3);
+ u32 implementer =
+ FIELD_GET(SMMU_PMCG_PIDR1_DES_0, pidr1) |
+ (FIELD_GET(SMMU_PMCG_PIDR2_DES_1, pidr2) << 4) |
+ (FIELD_GET(SMMU_PMCG_PIDR4_DES_2, pidr4) << 8);
+
+ iidr = FIELD_PREP(SMMU_PMCG_IIDR_PRODUCTID, productid) |
+ FIELD_PREP(SMMU_PMCG_IIDR_VARIANT, variant) |
+ FIELD_PREP(SMMU_PMCG_IIDR_REVISION, revision) |
+ FIELD_PREP(SMMU_PMCG_IIDR_IMPLEMENTER, implementer);
+ }
+
+ smmu_pmu->iidr = iidr;
+}
+
static int smmu_pmu_probe(struct platform_device *pdev)
{
struct smmu_pmu *smmu_pmu;
@@ -825,7 +876,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
return err;
}
- smmu_pmu->iidr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_IIDR);
+ smmu_pmu_get_iidr(smmu_pmu);
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx",
(res_0->start) >> SMMU_PMCG_PA_SHIFT);
@@ -834,7 +885,8 @@ static int smmu_pmu_probe(struct platform_device *pdev)
return -EINVAL;
}
- smmu_pmu_get_acpi_options(smmu_pmu);
+ if (!dev->of_node)
+ smmu_pmu_get_acpi_options(smmu_pmu);
/* Pick one CPU to be the preferred one to use */
smmu_pmu->on_cpu = raw_smp_processor_id();
@@ -884,9 +936,18 @@ static void smmu_pmu_shutdown(struct platform_device *pdev)
smmu_pmu_disable(&smmu_pmu->pmu);
}
+#ifdef CONFIG_OF
+static const struct of_device_id smmu_pmu_of_match[] = {
+ { .compatible = "arm,smmu-v3-pmcg" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, smmu_pmu_of_match);
+#endif
+
static struct platform_driver smmu_pmu_driver = {
.driver = {
.name = "arm-smmu-v3-pmcg",
+ .of_match_table = of_match_ptr(smmu_pmu_of_match),
.suppress_bind_attrs = true,
},
.probe = smmu_pmu_probe,
diff --git a/drivers/perf/hisilicon/Kconfig b/drivers/perf/hisilicon/Kconfig
index c5d1b7019fff..5546218b5598 100644
--- a/drivers/perf/hisilicon/Kconfig
+++ b/drivers/perf/hisilicon/Kconfig
@@ -5,3 +5,12 @@ config HISI_PMU
help
Support for HiSilicon SoC L3 Cache performance monitor, Hydra Home
Agent performance monitor and DDR Controller performance monitor.
+
+config HISI_PCIE_PMU
+ tristate "HiSilicon PCIE PERF PMU"
+ depends on PCI && ARM64
+ help
+ Provide support for HiSilicon PCIe performance monitoring unit (PMU)
+ RCiEP devices.
+ Adds the PCIe PMU into perf events system for monitoring latency,
+ bandwidth etc.
diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile
index 7643c9f93e36..506ed39e3266 100644
--- a/drivers/perf/hisilicon/Makefile
+++ b/drivers/perf/hisilicon/Makefile
@@ -2,3 +2,5 @@
obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \
hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o hisi_uncore_sllc_pmu.o \
hisi_uncore_pa_pmu.o
+
+obj-$(CONFIG_HISI_PCIE_PMU) += hisi_pcie_pmu.o
diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c
new file mode 100644
index 000000000000..21771708597d
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c
@@ -0,0 +1,948 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This driver adds support for PCIe PMU RCiEP device. Related
+ * perf events are bandwidth, latency etc.
+ *
+ * Copyright (C) 2021 HiSilicon Limited
+ * Author: Qi Liu <liuqi115@huawei.com>
+ */
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/perf_event.h>
+
+#define DRV_NAME "hisi_pcie_pmu"
+/* Define registers */
+#define HISI_PCIE_GLOBAL_CTRL 0x00
+#define HISI_PCIE_EVENT_CTRL 0x010
+#define HISI_PCIE_CNT 0x090
+#define HISI_PCIE_EXT_CNT 0x110
+#define HISI_PCIE_INT_STAT 0x150
+#define HISI_PCIE_INT_MASK 0x154
+#define HISI_PCIE_REG_BDF 0xfe0
+#define HISI_PCIE_REG_VERSION 0xfe4
+#define HISI_PCIE_REG_INFO 0xfe8
+
+/* Define command in HISI_PCIE_GLOBAL_CTRL */
+#define HISI_PCIE_GLOBAL_EN 0x01
+#define HISI_PCIE_GLOBAL_NONE 0
+
+/* Define command in HISI_PCIE_EVENT_CTRL */
+#define HISI_PCIE_EVENT_EN BIT_ULL(20)
+#define HISI_PCIE_RESET_CNT BIT_ULL(22)
+#define HISI_PCIE_INIT_SET BIT_ULL(34)
+#define HISI_PCIE_THR_EN BIT_ULL(26)
+#define HISI_PCIE_TARGET_EN BIT_ULL(32)
+#define HISI_PCIE_TRIG_EN BIT_ULL(52)
+
+/* Define offsets in HISI_PCIE_EVENT_CTRL */
+#define HISI_PCIE_EVENT_M GENMASK_ULL(15, 0)
+#define HISI_PCIE_THR_MODE_M GENMASK_ULL(27, 27)
+#define HISI_PCIE_THR_M GENMASK_ULL(31, 28)
+#define HISI_PCIE_TARGET_M GENMASK_ULL(52, 36)
+#define HISI_PCIE_TRIG_MODE_M GENMASK_ULL(53, 53)
+#define HISI_PCIE_TRIG_M GENMASK_ULL(59, 56)
+
+#define HISI_PCIE_MAX_COUNTERS 8
+#define HISI_PCIE_REG_STEP 8
+#define HISI_PCIE_THR_MAX_VAL 10
+#define HISI_PCIE_TRIG_MAX_VAL 10
+#define HISI_PCIE_MAX_PERIOD (GENMASK_ULL(63, 0))
+#define HISI_PCIE_INIT_VAL BIT_ULL(63)
+
+struct hisi_pcie_pmu {
+ struct perf_event *hw_events[HISI_PCIE_MAX_COUNTERS];
+ struct hlist_node node;
+ struct pci_dev *pdev;
+ struct pmu pmu;
+ void __iomem *base;
+ int irq;
+ u32 identifier;
+ /* Minimum and maximum BDF of root ports monitored by PMU */
+ u16 bdf_min;
+ u16 bdf_max;
+ int on_cpu;
+};
+
+struct hisi_pcie_reg_pair {
+ u16 lo;
+ u16 hi;
+};
+
+#define to_pcie_pmu(p) (container_of((p), struct hisi_pcie_pmu, pmu))
+#define GET_PCI_DEVFN(bdf) ((bdf) & 0xff)
+
+#define HISI_PCIE_PMU_FILTER_ATTR(_name, _config, _hi, _lo) \
+ static u64 hisi_pcie_get_##_name(struct perf_event *event) \
+ { \
+ return FIELD_GET(GENMASK(_hi, _lo), event->attr._config); \
+ } \
+
+HISI_PCIE_PMU_FILTER_ATTR(event, config, 16, 0);
+HISI_PCIE_PMU_FILTER_ATTR(thr_len, config1, 3, 0);
+HISI_PCIE_PMU_FILTER_ATTR(thr_mode, config1, 4, 4);
+HISI_PCIE_PMU_FILTER_ATTR(trig_len, config1, 8, 5);
+HISI_PCIE_PMU_FILTER_ATTR(trig_mode, config1, 9, 9);
+HISI_PCIE_PMU_FILTER_ATTR(port, config2, 15, 0);
+HISI_PCIE_PMU_FILTER_ATTR(bdf, config2, 31, 16);
+
+static ssize_t hisi_pcie_format_sysfs_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+
+ return sysfs_emit(buf, "%s\n", (char *)eattr->var);
+}
+
+static ssize_t hisi_pcie_event_sysfs_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct perf_pmu_events_attr *pmu_attr =
+ container_of(attr, struct perf_pmu_events_attr, attr);
+
+ return sysfs_emit(buf, "config=0x%llx\n", pmu_attr->id);
+}
+
+#define HISI_PCIE_PMU_FORMAT_ATTR(_name, _format) \
+ (&((struct dev_ext_attribute[]){ \
+ { .attr = __ATTR(_name, 0444, hisi_pcie_format_sysfs_show, \
+ NULL), \
+ .var = (void *)_format } \
+ })[0].attr.attr)
+
+#define HISI_PCIE_PMU_EVENT_ATTR(_name, _id) \
+ PMU_EVENT_ATTR_ID(_name, hisi_pcie_event_sysfs_show, _id)
+
+static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(pcie_pmu->on_cpu));
+}
+static DEVICE_ATTR_RO(cpumask);
+
+static ssize_t identifier_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev));
+
+ return sysfs_emit(buf, "%#x\n", pcie_pmu->identifier);
+}
+static DEVICE_ATTR_RO(identifier);
+
+static ssize_t bus_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev));
+
+ return sysfs_emit(buf, "%#04x\n", PCI_BUS_NUM(pcie_pmu->bdf_min));
+}
+static DEVICE_ATTR_RO(bus);
+
+static struct hisi_pcie_reg_pair
+hisi_pcie_parse_reg_value(struct hisi_pcie_pmu *pcie_pmu, u32 reg_off)
+{
+ u32 val = readl_relaxed(pcie_pmu->base + reg_off);
+ struct hisi_pcie_reg_pair regs = {
+ .lo = val,
+ .hi = val >> 16,
+ };
+
+ return regs;
+}
+
+/*
+ * Hardware counter and ext_counter work together for bandwidth, latency, bus
+ * utilization and buffer occupancy events. For example, RX memory write latency
+ * events(index = 0x0010), counter counts total delay cycles and ext_counter
+ * counts RX memory write PCIe packets number.
+ *
+ * As we don't want PMU driver to process these two data, "delay cycles" can
+ * be treated as an independent event(index = 0x0010), "RX memory write packets
+ * number" as another(index = 0x10010). BIT 16 is used to distinguish and 0-15
+ * bits are "real" event index, which can be used to set HISI_PCIE_EVENT_CTRL.
+ */
+#define EXT_COUNTER_IS_USED(idx) ((idx) & BIT(16))
+
+static u32 hisi_pcie_get_real_event(struct perf_event *event)
+{
+ return hisi_pcie_get_event(event) & GENMASK(15, 0);
+}
+
+static u32 hisi_pcie_pmu_get_offset(u32 offset, u32 idx)
+{
+ return offset + HISI_PCIE_REG_STEP * idx;
+}
+
+static u32 hisi_pcie_pmu_readl(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset,
+ u32 idx)
+{
+ u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx);
+
+ return readl_relaxed(pcie_pmu->base + offset);
+}
+
+static void hisi_pcie_pmu_writel(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, u32 idx, u32 val)
+{
+ u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx);
+
+ writel_relaxed(val, pcie_pmu->base + offset);
+}
+
+static u64 hisi_pcie_pmu_readq(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, u32 idx)
+{
+ u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx);
+
+ return readq_relaxed(pcie_pmu->base + offset);
+}
+
+static void hisi_pcie_pmu_writeq(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, u32 idx, u64 val)
+{
+ u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx);
+
+ writeq_relaxed(val, pcie_pmu->base + offset);
+}
+
+static void hisi_pcie_pmu_config_filter(struct perf_event *event)
+{
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 reg = HISI_PCIE_INIT_SET;
+ u64 port, trig_len, thr_len;
+
+ /* Config HISI_PCIE_EVENT_CTRL according to event. */
+ reg |= FIELD_PREP(HISI_PCIE_EVENT_M, hisi_pcie_get_real_event(event));
+
+ /* Config HISI_PCIE_EVENT_CTRL according to root port or EP device. */
+ port = hisi_pcie_get_port(event);
+ if (port)
+ reg |= FIELD_PREP(HISI_PCIE_TARGET_M, port);
+ else
+ reg |= HISI_PCIE_TARGET_EN |
+ FIELD_PREP(HISI_PCIE_TARGET_M, hisi_pcie_get_bdf(event));
+
+ /* Config HISI_PCIE_EVENT_CTRL according to trigger condition. */
+ trig_len = hisi_pcie_get_trig_len(event);
+ if (trig_len) {
+ reg |= FIELD_PREP(HISI_PCIE_TRIG_M, trig_len);
+ reg |= FIELD_PREP(HISI_PCIE_TRIG_MODE_M, hisi_pcie_get_trig_mode(event));
+ reg |= HISI_PCIE_TRIG_EN;
+ }
+
+ /* Config HISI_PCIE_EVENT_CTRL according to threshold condition. */
+ thr_len = hisi_pcie_get_thr_len(event);
+ if (thr_len) {
+ reg |= FIELD_PREP(HISI_PCIE_THR_M, thr_len);
+ reg |= FIELD_PREP(HISI_PCIE_THR_MODE_M, hisi_pcie_get_thr_mode(event));
+ reg |= HISI_PCIE_THR_EN;
+ }
+
+ hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, hwc->idx, reg);
+}
+
+static void hisi_pcie_pmu_clear_filter(struct perf_event *event)
+{
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, hwc->idx, HISI_PCIE_INIT_SET);
+}
+
+static bool hisi_pcie_pmu_valid_requester_id(struct hisi_pcie_pmu *pcie_pmu, u32 bdf)
+{
+ struct pci_dev *root_port, *pdev;
+ u16 rp_bdf;
+
+ pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pcie_pmu->pdev->bus), PCI_BUS_NUM(bdf),
+ GET_PCI_DEVFN(bdf));
+ if (!pdev)
+ return false;
+
+ root_port = pcie_find_root_port(pdev);
+ if (!root_port) {
+ pci_dev_put(pdev);
+ return false;
+ }
+
+ pci_dev_put(pdev);
+ rp_bdf = pci_dev_id(root_port);
+ return rp_bdf >= pcie_pmu->bdf_min && rp_bdf <= pcie_pmu->bdf_max;
+}
+
+static bool hisi_pcie_pmu_valid_filter(struct perf_event *event,
+ struct hisi_pcie_pmu *pcie_pmu)
+{
+ u32 requester_id = hisi_pcie_get_bdf(event);
+
+ if (hisi_pcie_get_thr_len(event) > HISI_PCIE_THR_MAX_VAL)
+ return false;
+
+ if (hisi_pcie_get_trig_len(event) > HISI_PCIE_TRIG_MAX_VAL)
+ return false;
+
+ if (requester_id) {
+ if (!hisi_pcie_pmu_valid_requester_id(pcie_pmu, requester_id))
+ return false;
+ }
+
+ return true;
+}
+
+static bool hisi_pcie_pmu_cmp_event(struct perf_event *target,
+ struct perf_event *event)
+{
+ return hisi_pcie_get_real_event(target) == hisi_pcie_get_real_event(event);
+}
+
+static bool hisi_pcie_pmu_validate_event_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct perf_event *event_group[HISI_PCIE_MAX_COUNTERS];
+ int counters = 1;
+ int num;
+
+ event_group[0] = leader;
+ if (!is_software_event(leader)) {
+ if (leader->pmu != event->pmu)
+ return false;
+
+ if (leader != event && !hisi_pcie_pmu_cmp_event(leader, event))
+ event_group[counters++] = event;
+ }
+
+ for_each_sibling_event(sibling, event->group_leader) {
+ if (is_software_event(sibling))
+ continue;
+
+ if (sibling->pmu != event->pmu)
+ return false;
+
+ for (num = 0; num < counters; num++) {
+ if (hisi_pcie_pmu_cmp_event(event_group[num], sibling))
+ break;
+ }
+
+ if (num == counters)
+ event_group[counters++] = sibling;
+ }
+
+ return counters <= HISI_PCIE_MAX_COUNTERS;
+}
+
+static int hisi_pcie_pmu_event_init(struct perf_event *event)
+{
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ event->cpu = pcie_pmu->on_cpu;
+
+ if (EXT_COUNTER_IS_USED(hisi_pcie_get_event(event)))
+ hwc->event_base = HISI_PCIE_EXT_CNT;
+ else
+ hwc->event_base = HISI_PCIE_CNT;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* Sampling is not supported. */
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EOPNOTSUPP;
+
+ if (!hisi_pcie_pmu_valid_filter(event, pcie_pmu))
+ return -EINVAL;
+
+ if (!hisi_pcie_pmu_validate_event_group(event))
+ return -EINVAL;
+
+ return 0;
+}
+
+static u64 hisi_pcie_pmu_read_counter(struct perf_event *event)
+{
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
+ u32 idx = event->hw.idx;
+
+ return hisi_pcie_pmu_readq(pcie_pmu, event->hw.event_base, idx);
+}
+
+static int hisi_pcie_pmu_find_related_event(struct hisi_pcie_pmu *pcie_pmu,
+ struct perf_event *event)
+{
+ struct perf_event *sibling;
+ int idx;
+
+ for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) {
+ sibling = pcie_pmu->hw_events[idx];
+ if (!sibling)
+ continue;
+
+ if (!hisi_pcie_pmu_cmp_event(sibling, event))
+ continue;
+
+ /* Related events must be used in group */
+ if (sibling->group_leader == event->group_leader)
+ return idx;
+ else
+ return -EINVAL;
+ }
+
+ return idx;
+}
+
+static int hisi_pcie_pmu_get_event_idx(struct hisi_pcie_pmu *pcie_pmu)
+{
+ int idx;
+
+ for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) {
+ if (!pcie_pmu->hw_events[idx])
+ return idx;
+ }
+
+ return -EINVAL;
+}
+
+static void hisi_pcie_pmu_event_update(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 new_cnt, prev_cnt, delta;
+
+ do {
+ prev_cnt = local64_read(&hwc->prev_count);
+ new_cnt = hisi_pcie_pmu_read_counter(event);
+ } while (local64_cmpxchg(&hwc->prev_count, prev_cnt,
+ new_cnt) != prev_cnt);
+
+ delta = (new_cnt - prev_cnt) & HISI_PCIE_MAX_PERIOD;
+ local64_add(delta, &event->count);
+}
+
+static void hisi_pcie_pmu_read(struct perf_event *event)
+{
+ hisi_pcie_pmu_event_update(event);
+}
+
+static void hisi_pcie_pmu_set_period(struct perf_event *event)
+{
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ local64_set(&hwc->prev_count, HISI_PCIE_INIT_VAL);
+ hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_CNT, idx, HISI_PCIE_INIT_VAL);
+ hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EXT_CNT, idx, HISI_PCIE_INIT_VAL);
+}
+
+static void hisi_pcie_pmu_enable_counter(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc)
+{
+ u32 idx = hwc->idx;
+ u64 val;
+
+ val = hisi_pcie_pmu_readq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx);
+ val |= HISI_PCIE_EVENT_EN;
+ hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, val);
+}
+
+static void hisi_pcie_pmu_disable_counter(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc)
+{
+ u32 idx = hwc->idx;
+ u64 val;
+
+ val = hisi_pcie_pmu_readq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx);
+ val &= ~HISI_PCIE_EVENT_EN;
+ hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, val);
+}
+
+static void hisi_pcie_pmu_enable_int(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc)
+{
+ u32 idx = hwc->idx;
+
+ hisi_pcie_pmu_writel(pcie_pmu, HISI_PCIE_INT_MASK, idx, 0);
+}
+
+static void hisi_pcie_pmu_disable_int(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc)
+{
+ u32 idx = hwc->idx;
+
+ hisi_pcie_pmu_writel(pcie_pmu, HISI_PCIE_INT_MASK, idx, 1);
+}
+
+static void hisi_pcie_pmu_reset_counter(struct hisi_pcie_pmu *pcie_pmu, int idx)
+{
+ hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, HISI_PCIE_RESET_CNT);
+ hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, HISI_PCIE_INIT_SET);
+}
+
+static void hisi_pcie_pmu_start(struct perf_event *event, int flags)
+{
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+ u64 prev_cnt;
+
+ if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
+ return;
+
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+ hwc->state = 0;
+
+ hisi_pcie_pmu_config_filter(event);
+ hisi_pcie_pmu_enable_counter(pcie_pmu, hwc);
+ hisi_pcie_pmu_enable_int(pcie_pmu, hwc);
+ hisi_pcie_pmu_set_period(event);
+
+ if (flags & PERF_EF_RELOAD) {
+ prev_cnt = local64_read(&hwc->prev_count);
+ hisi_pcie_pmu_writeq(pcie_pmu, hwc->event_base, idx, prev_cnt);
+ }
+
+ perf_event_update_userpage(event);
+}
+
+static void hisi_pcie_pmu_stop(struct perf_event *event, int flags)
+{
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ hisi_pcie_pmu_event_update(event);
+ hisi_pcie_pmu_disable_int(pcie_pmu, hwc);
+ hisi_pcie_pmu_disable_counter(pcie_pmu, hwc);
+ hisi_pcie_pmu_clear_filter(event);
+ WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+ hwc->state |= PERF_HES_STOPPED;
+
+ if (hwc->state & PERF_HES_UPTODATE)
+ return;
+
+ hwc->state |= PERF_HES_UPTODATE;
+}
+
+static int hisi_pcie_pmu_add(struct perf_event *event, int flags)
+{
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+ /* Check all working events to find a related event. */
+ idx = hisi_pcie_pmu_find_related_event(pcie_pmu, event);
+ if (idx < 0)
+ return idx;
+
+ /* Current event shares an enabled counter with the related event */
+ if (idx < HISI_PCIE_MAX_COUNTERS) {
+ hwc->idx = idx;
+ goto start_count;
+ }
+
+ idx = hisi_pcie_pmu_get_event_idx(pcie_pmu);
+ if (idx < 0)
+ return idx;
+
+ hwc->idx = idx;
+ pcie_pmu->hw_events[idx] = event;
+ /* Reset Counter to avoid previous statistic interference. */
+ hisi_pcie_pmu_reset_counter(pcie_pmu, idx);
+
+start_count:
+ if (flags & PERF_EF_START)
+ hisi_pcie_pmu_start(event, PERF_EF_RELOAD);
+
+ return 0;
+}
+
+static void hisi_pcie_pmu_del(struct perf_event *event, int flags)
+{
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ hisi_pcie_pmu_stop(event, PERF_EF_UPDATE);
+ pcie_pmu->hw_events[hwc->idx] = NULL;
+ perf_event_update_userpage(event);
+}
+
+static void hisi_pcie_pmu_enable(struct pmu *pmu)
+{
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(pmu);
+ int num;
+
+ for (num = 0; num < HISI_PCIE_MAX_COUNTERS; num++) {
+ if (pcie_pmu->hw_events[num])
+ break;
+ }
+
+ if (num == HISI_PCIE_MAX_COUNTERS)
+ return;
+
+ writel(HISI_PCIE_GLOBAL_EN, pcie_pmu->base + HISI_PCIE_GLOBAL_CTRL);
+}
+
+static void hisi_pcie_pmu_disable(struct pmu *pmu)
+{
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(pmu);
+
+ writel(HISI_PCIE_GLOBAL_NONE, pcie_pmu->base + HISI_PCIE_GLOBAL_CTRL);
+}
+
+static irqreturn_t hisi_pcie_pmu_irq(int irq, void *data)
+{
+ struct hisi_pcie_pmu *pcie_pmu = data;
+ irqreturn_t ret = IRQ_NONE;
+ struct perf_event *event;
+ u32 overflown;
+ int idx;
+
+ for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) {
+ overflown = hisi_pcie_pmu_readl(pcie_pmu, HISI_PCIE_INT_STAT, idx);
+ if (!overflown)
+ continue;
+
+ /* Clear status of interrupt. */
+ hisi_pcie_pmu_writel(pcie_pmu, HISI_PCIE_INT_STAT, idx, 1);
+ event = pcie_pmu->hw_events[idx];
+ if (!event)
+ continue;
+
+ hisi_pcie_pmu_event_update(event);
+ hisi_pcie_pmu_set_period(event);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static int hisi_pcie_pmu_irq_register(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu)
+{
+ int irq, ret;
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (ret < 0) {
+ pci_err(pdev, "Failed to enable MSI vectors: %d\n", ret);
+ return ret;
+ }
+
+ irq = pci_irq_vector(pdev, 0);
+ ret = request_irq(irq, hisi_pcie_pmu_irq, IRQF_NOBALANCING | IRQF_NO_THREAD, DRV_NAME,
+ pcie_pmu);
+ if (ret) {
+ pci_err(pdev, "Failed to register IRQ: %d\n", ret);
+ pci_free_irq_vectors(pdev);
+ return ret;
+ }
+
+ pcie_pmu->irq = irq;
+
+ return 0;
+}
+
+static void hisi_pcie_pmu_irq_unregister(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu)
+{
+ free_irq(pcie_pmu->irq, pcie_pmu);
+ pci_free_irq_vectors(pdev);
+}
+
+static int hisi_pcie_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct hisi_pcie_pmu *pcie_pmu = hlist_entry_safe(node, struct hisi_pcie_pmu, node);
+
+ if (pcie_pmu->on_cpu == -1) {
+ pcie_pmu->on_cpu = cpu;
+ WARN_ON(irq_set_affinity(pcie_pmu->irq, cpumask_of(cpu)));
+ }
+
+ return 0;
+}
+
+static int hisi_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct hisi_pcie_pmu *pcie_pmu = hlist_entry_safe(node, struct hisi_pcie_pmu, node);
+ unsigned int target;
+
+ /* Nothing to do if this CPU doesn't own the PMU */
+ if (pcie_pmu->on_cpu != cpu)
+ return 0;
+
+ pcie_pmu->on_cpu = -1;
+ /* Choose a new CPU from all online cpus. */
+ target = cpumask_first(cpu_online_mask);
+ if (target >= nr_cpu_ids) {
+ pci_err(pcie_pmu->pdev, "There is no CPU to set\n");
+ return 0;
+ }
+
+ perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target);
+ /* Use this CPU for event counting */
+ pcie_pmu->on_cpu = target;
+ WARN_ON(irq_set_affinity(pcie_pmu->irq, cpumask_of(target)));
+
+ return 0;
+}
+
+static struct attribute *hisi_pcie_pmu_events_attr[] = {
+ HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_latency, 0x0010),
+ HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_cnt, 0x10010),
+ HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_latency, 0x0210),
+ HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_cnt, 0x10210),
+ HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_latency, 0x0011),
+ HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_cnt, 0x10011),
+ HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_flux, 0x1005),
+ HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_time, 0x11005),
+ HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_flux, 0x2004),
+ HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_time, 0x12004),
+ NULL
+};
+
+static struct attribute_group hisi_pcie_pmu_events_group = {
+ .name = "events",
+ .attrs = hisi_pcie_pmu_events_attr,
+};
+
+static struct attribute *hisi_pcie_pmu_format_attr[] = {
+ HISI_PCIE_PMU_FORMAT_ATTR(event, "config:0-16"),
+ HISI_PCIE_PMU_FORMAT_ATTR(thr_len, "config1:0-3"),
+ HISI_PCIE_PMU_FORMAT_ATTR(thr_mode, "config1:4"),
+ HISI_PCIE_PMU_FORMAT_ATTR(trig_len, "config1:5-8"),
+ HISI_PCIE_PMU_FORMAT_ATTR(trig_mode, "config1:9"),
+ HISI_PCIE_PMU_FORMAT_ATTR(port, "config2:0-15"),
+ HISI_PCIE_PMU_FORMAT_ATTR(bdf, "config2:16-31"),
+ NULL
+};
+
+static const struct attribute_group hisi_pcie_pmu_format_group = {
+ .name = "format",
+ .attrs = hisi_pcie_pmu_format_attr,
+};
+
+static struct attribute *hisi_pcie_pmu_bus_attrs[] = {
+ &dev_attr_bus.attr,
+ NULL
+};
+
+static const struct attribute_group hisi_pcie_pmu_bus_attr_group = {
+ .attrs = hisi_pcie_pmu_bus_attrs,
+};
+
+static struct attribute *hisi_pcie_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static const struct attribute_group hisi_pcie_pmu_cpumask_attr_group = {
+ .attrs = hisi_pcie_pmu_cpumask_attrs,
+};
+
+static struct attribute *hisi_pcie_pmu_identifier_attrs[] = {
+ &dev_attr_identifier.attr,
+ NULL
+};
+
+static const struct attribute_group hisi_pcie_pmu_identifier_attr_group = {
+ .attrs = hisi_pcie_pmu_identifier_attrs,
+};
+
+static const struct attribute_group *hisi_pcie_pmu_attr_groups[] = {
+ &hisi_pcie_pmu_events_group,
+ &hisi_pcie_pmu_format_group,
+ &hisi_pcie_pmu_bus_attr_group,
+ &hisi_pcie_pmu_cpumask_attr_group,
+ &hisi_pcie_pmu_identifier_attr_group,
+ NULL
+};
+
+static int hisi_pcie_alloc_pmu(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu)
+{
+ struct hisi_pcie_reg_pair regs;
+ u16 sicl_id, core_id;
+ char *name;
+
+ regs = hisi_pcie_parse_reg_value(pcie_pmu, HISI_PCIE_REG_BDF);
+ pcie_pmu->bdf_min = regs.lo;
+ pcie_pmu->bdf_max = regs.hi;
+
+ regs = hisi_pcie_parse_reg_value(pcie_pmu, HISI_PCIE_REG_INFO);
+ sicl_id = regs.hi;
+ core_id = regs.lo;
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_pcie%u_core%u", sicl_id, core_id);
+ if (!name)
+ return -ENOMEM;
+
+ pcie_pmu->pdev = pdev;
+ pcie_pmu->on_cpu = -1;
+ pcie_pmu->identifier = readl(pcie_pmu->base + HISI_PCIE_REG_VERSION);
+ pcie_pmu->pmu = (struct pmu) {
+ .name = name,
+ .module = THIS_MODULE,
+ .event_init = hisi_pcie_pmu_event_init,
+ .pmu_enable = hisi_pcie_pmu_enable,
+ .pmu_disable = hisi_pcie_pmu_disable,
+ .add = hisi_pcie_pmu_add,
+ .del = hisi_pcie_pmu_del,
+ .start = hisi_pcie_pmu_start,
+ .stop = hisi_pcie_pmu_stop,
+ .read = hisi_pcie_pmu_read,
+ .task_ctx_nr = perf_invalid_context,
+ .attr_groups = hisi_pcie_pmu_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ };
+
+ return 0;
+}
+
+static int hisi_pcie_init_pmu(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu)
+{
+ int ret;
+
+ pcie_pmu->base = pci_ioremap_bar(pdev, 2);
+ if (!pcie_pmu->base) {
+ pci_err(pdev, "Ioremap failed for pcie_pmu resource\n");
+ return -ENOMEM;
+ }
+
+ ret = hisi_pcie_alloc_pmu(pdev, pcie_pmu);
+ if (ret)
+ goto err_iounmap;
+
+ ret = hisi_pcie_pmu_irq_register(pdev, pcie_pmu);
+ if (ret)
+ goto err_iounmap;
+
+ ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node);
+ if (ret) {
+ pci_err(pdev, "Failed to register hotplug: %d\n", ret);
+ goto err_irq_unregister;
+ }
+
+ ret = perf_pmu_register(&pcie_pmu->pmu, pcie_pmu->pmu.name, -1);
+ if (ret) {
+ pci_err(pdev, "Failed to register PCIe PMU: %d\n", ret);
+ goto err_hotplug_unregister;
+ }
+
+ return ret;
+
+err_hotplug_unregister:
+ cpuhp_state_remove_instance_nocalls(
+ CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node);
+
+err_irq_unregister:
+ hisi_pcie_pmu_irq_unregister(pdev, pcie_pmu);
+
+err_iounmap:
+ iounmap(pcie_pmu->base);
+
+ return ret;
+}
+
+static void hisi_pcie_uninit_pmu(struct pci_dev *pdev)
+{
+ struct hisi_pcie_pmu *pcie_pmu = pci_get_drvdata(pdev);
+
+ perf_pmu_unregister(&pcie_pmu->pmu);
+ cpuhp_state_remove_instance_nocalls(
+ CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node);
+ hisi_pcie_pmu_irq_unregister(pdev, pcie_pmu);
+ iounmap(pcie_pmu->base);
+}
+
+static int hisi_pcie_init_dev(struct pci_dev *pdev)
+{
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ pci_err(pdev, "Failed to enable PCI device: %d\n", ret);
+ return ret;
+ }
+
+ ret = pcim_iomap_regions(pdev, BIT(2), DRV_NAME);
+ if (ret < 0) {
+ pci_err(pdev, "Failed to request PCI mem regions: %d\n", ret);
+ return ret;
+ }
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+
+static int hisi_pcie_pmu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct hisi_pcie_pmu *pcie_pmu;
+ int ret;
+
+ pcie_pmu = devm_kzalloc(&pdev->dev, sizeof(*pcie_pmu), GFP_KERNEL);
+ if (!pcie_pmu)
+ return -ENOMEM;
+
+ ret = hisi_pcie_init_dev(pdev);
+ if (ret)
+ return ret;
+
+ ret = hisi_pcie_init_pmu(pdev, pcie_pmu);
+ if (ret)
+ return ret;
+
+ pci_set_drvdata(pdev, pcie_pmu);
+
+ return ret;
+}
+
+static void hisi_pcie_pmu_remove(struct pci_dev *pdev)
+{
+ hisi_pcie_uninit_pmu(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static const struct pci_device_id hisi_pcie_pmu_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa12d) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, hisi_pcie_pmu_ids);
+
+static struct pci_driver hisi_pcie_pmu_driver = {
+ .name = DRV_NAME,
+ .id_table = hisi_pcie_pmu_ids,
+ .probe = hisi_pcie_pmu_probe,
+ .remove = hisi_pcie_pmu_remove,
+};
+
+static int __init hisi_pcie_module_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE,
+ "AP_PERF_ARM_HISI_PCIE_PMU_ONLINE",
+ hisi_pcie_pmu_online_cpu,
+ hisi_pcie_pmu_offline_cpu);
+ if (ret) {
+ pr_err("Failed to setup PCIe PMU hotplug: %d\n", ret);
+ return ret;
+ }
+
+ ret = pci_register_driver(&hisi_pcie_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE);
+
+ return ret;
+}
+module_init(hisi_pcie_module_init);
+
+static void __exit hisi_pcie_module_exit(void)
+{
+ pci_unregister_driver(&hisi_pcie_pmu_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE);
+}
+module_exit(hisi_pcie_module_exit);
+
+MODULE_DESCRIPTION("HiSilicon PCIe PMU driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>");
diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c
new file mode 100644
index 000000000000..7f4d292658e3
--- /dev/null
+++ b/drivers/perf/marvell_cn10k_tad_pmu.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CN10K LLC-TAD perf driver
+ *
+ * Copyright (C) 2021 Marvell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "tad_pmu: " fmt
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/cpuhotplug.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+
+#define TAD_PFC_OFFSET 0x0
+#define TAD_PFC(counter) (TAD_PFC_OFFSET | (counter << 3))
+#define TAD_PRF_OFFSET 0x100
+#define TAD_PRF(counter) (TAD_PRF_OFFSET | (counter << 3))
+#define TAD_PRF_CNTSEL_MASK 0xFF
+#define TAD_MAX_COUNTERS 8
+
+#define to_tad_pmu(p) (container_of(p, struct tad_pmu, pmu))
+
+struct tad_region {
+ void __iomem *base;
+};
+
+struct tad_pmu {
+ struct pmu pmu;
+ struct tad_region *regions;
+ u32 region_cnt;
+ unsigned int cpu;
+ struct hlist_node node;
+ struct perf_event *events[TAD_MAX_COUNTERS];
+ DECLARE_BITMAP(counters_map, TAD_MAX_COUNTERS);
+};
+
+static int tad_pmu_cpuhp_state;
+
+static void tad_pmu_event_counter_read(struct perf_event *event)
+{
+ struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u32 counter_idx = hwc->idx;
+ u64 prev, new;
+ int i;
+
+ do {
+ prev = local64_read(&hwc->prev_count);
+ for (i = 0, new = 0; i < tad_pmu->region_cnt; i++)
+ new += readq(tad_pmu->regions[i].base +
+ TAD_PFC(counter_idx));
+ } while (local64_cmpxchg(&hwc->prev_count, prev, new) != prev);
+
+ local64_add(new - prev, &event->count);
+}
+
+static void tad_pmu_event_counter_stop(struct perf_event *event, int flags)
+{
+ struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u32 counter_idx = hwc->idx;
+ int i;
+
+ /* TAD()_PFC() stop counting on the write
+ * which sets TAD()_PRF()[CNTSEL] == 0
+ */
+ for (i = 0; i < tad_pmu->region_cnt; i++) {
+ writeq_relaxed(0, tad_pmu->regions[i].base +
+ TAD_PRF(counter_idx));
+ }
+
+ tad_pmu_event_counter_read(event);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static void tad_pmu_event_counter_start(struct perf_event *event, int flags)
+{
+ struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u32 event_idx = event->attr.config;
+ u32 counter_idx = hwc->idx;
+ u64 reg_val;
+ int i;
+
+ hwc->state = 0;
+
+ /* Typically TAD_PFC() are zeroed to start counting */
+ for (i = 0; i < tad_pmu->region_cnt; i++)
+ writeq_relaxed(0, tad_pmu->regions[i].base +
+ TAD_PFC(counter_idx));
+
+ /* TAD()_PFC() start counting on the write
+ * which sets TAD()_PRF()[CNTSEL] != 0
+ */
+ for (i = 0; i < tad_pmu->region_cnt; i++) {
+ reg_val = readq_relaxed(tad_pmu->regions[i].base +
+ TAD_PRF(counter_idx));
+ reg_val |= (event_idx & 0xFF);
+ writeq_relaxed(reg_val, tad_pmu->regions[i].base +
+ TAD_PRF(counter_idx));
+ }
+}
+
+static void tad_pmu_event_counter_del(struct perf_event *event, int flags)
+{
+ struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ tad_pmu_event_counter_stop(event, flags | PERF_EF_UPDATE);
+ tad_pmu->events[idx] = NULL;
+ clear_bit(idx, tad_pmu->counters_map);
+}
+
+static int tad_pmu_event_counter_add(struct perf_event *event, int flags)
+{
+ struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+
+ /* Get a free counter for this event */
+ idx = find_first_zero_bit(tad_pmu->counters_map, TAD_MAX_COUNTERS);
+ if (idx == TAD_MAX_COUNTERS)
+ return -EAGAIN;
+
+ set_bit(idx, tad_pmu->counters_map);
+
+ hwc->idx = idx;
+ hwc->state = PERF_HES_STOPPED;
+ tad_pmu->events[idx] = event;
+
+ if (flags & PERF_EF_START)
+ tad_pmu_event_counter_start(event, flags);
+
+ return 0;
+}
+
+static int tad_pmu_event_init(struct perf_event *event)
+{
+ struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
+
+ if (!event->attr.disabled)
+ return -EINVAL;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if (event->state != PERF_EVENT_STATE_OFF)
+ return -EINVAL;
+
+ event->cpu = tad_pmu->cpu;
+ event->hw.idx = -1;
+ event->hw.config_base = event->attr.config;
+
+ return 0;
+}
+
+static ssize_t tad_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+#define TAD_PMU_EVENT_ATTR(name, config) \
+ PMU_EVENT_ATTR_ID(name, tad_pmu_event_show, config)
+
+static struct attribute *tad_pmu_event_attrs[] = {
+ TAD_PMU_EVENT_ATTR(tad_none, 0x0),
+ TAD_PMU_EVENT_ATTR(tad_req_msh_in_any, 0x1),
+ TAD_PMU_EVENT_ATTR(tad_req_msh_in_mn, 0x2),
+ TAD_PMU_EVENT_ATTR(tad_req_msh_in_exlmn, 0x3),
+ TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_any, 0x4),
+ TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_mn, 0x5),
+ TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_exlmn, 0x6),
+ TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_dss, 0x7),
+ TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_retry_dss, 0x8),
+ TAD_PMU_EVENT_ATTR(tad_dat_msh_in_any, 0x9),
+ TAD_PMU_EVENT_ATTR(tad_dat_msh_in_dss, 0xa),
+ TAD_PMU_EVENT_ATTR(tad_req_msh_out_any, 0xb),
+ TAD_PMU_EVENT_ATTR(tad_req_msh_out_dss_rd, 0xc),
+ TAD_PMU_EVENT_ATTR(tad_req_msh_out_dss_wr, 0xd),
+ TAD_PMU_EVENT_ATTR(tad_req_msh_out_evict, 0xe),
+ TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_any, 0xf),
+ TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_retry_exlmn, 0x10),
+ TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_retry_mn, 0x11),
+ TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_exlmn, 0x12),
+ TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_mn, 0x13),
+ TAD_PMU_EVENT_ATTR(tad_snp_msh_out_any, 0x14),
+ TAD_PMU_EVENT_ATTR(tad_snp_msh_out_mn, 0x15),
+ TAD_PMU_EVENT_ATTR(tad_snp_msh_out_exlmn, 0x16),
+ TAD_PMU_EVENT_ATTR(tad_dat_msh_out_any, 0x17),
+ TAD_PMU_EVENT_ATTR(tad_dat_msh_out_fill, 0x18),
+ TAD_PMU_EVENT_ATTR(tad_dat_msh_out_dss, 0x19),
+ TAD_PMU_EVENT_ATTR(tad_alloc_dtg, 0x1a),
+ TAD_PMU_EVENT_ATTR(tad_alloc_ltg, 0x1b),
+ TAD_PMU_EVENT_ATTR(tad_alloc_any, 0x1c),
+ TAD_PMU_EVENT_ATTR(tad_hit_dtg, 0x1d),
+ TAD_PMU_EVENT_ATTR(tad_hit_ltg, 0x1e),
+ TAD_PMU_EVENT_ATTR(tad_hit_any, 0x1f),
+ TAD_PMU_EVENT_ATTR(tad_tag_rd, 0x20),
+ TAD_PMU_EVENT_ATTR(tad_dat_rd, 0x21),
+ TAD_PMU_EVENT_ATTR(tad_dat_rd_byp, 0x22),
+ TAD_PMU_EVENT_ATTR(tad_ifb_occ, 0x23),
+ TAD_PMU_EVENT_ATTR(tad_req_occ, 0x24),
+ NULL
+};
+
+static const struct attribute_group tad_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = tad_pmu_event_attrs,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-7");
+
+static struct attribute *tad_pmu_format_attrs[] = {
+ &format_attr_event.attr,
+ NULL
+};
+
+static struct attribute_group tad_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = tad_pmu_format_attrs,
+};
+
+static ssize_t tad_pmu_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tad_pmu *tad_pmu = to_tad_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(tad_pmu->cpu));
+}
+
+static DEVICE_ATTR(cpumask, 0444, tad_pmu_cpumask_show, NULL);
+
+static struct attribute *tad_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static struct attribute_group tad_pmu_cpumask_attr_group = {
+ .attrs = tad_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *tad_pmu_attr_groups[] = {
+ &tad_pmu_events_attr_group,
+ &tad_pmu_format_attr_group,
+ &tad_pmu_cpumask_attr_group,
+ NULL
+};
+
+static int tad_pmu_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct tad_region *regions;
+ struct tad_pmu *tad_pmu;
+ struct resource *res;
+ u32 tad_pmu_page_size;
+ u32 tad_page_size;
+ u32 tad_cnt;
+ int i, ret;
+ char *name;
+
+ tad_pmu = devm_kzalloc(&pdev->dev, sizeof(*tad_pmu), GFP_KERNEL);
+ if (!tad_pmu)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, tad_pmu);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Mem resource not found\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(node, "marvell,tad-page-size",
+ &tad_page_size);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't find tad-page-size property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "marvell,tad-pmu-page-size",
+ &tad_pmu_page_size);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't find tad-pmu-page-size property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "marvell,tad-cnt", &tad_cnt);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't find tad-cnt property\n");
+ return ret;
+ }
+
+ regions = devm_kcalloc(&pdev->dev, tad_cnt,
+ sizeof(*regions), GFP_KERNEL);
+ if (!regions)
+ return -ENOMEM;
+
+ /* ioremap the distributed TAD pmu regions */
+ for (i = 0; i < tad_cnt && res->start < res->end; i++) {
+ regions[i].base = devm_ioremap(&pdev->dev,
+ res->start,
+ tad_pmu_page_size);
+ if (!regions[i].base) {
+ dev_err(&pdev->dev, "TAD%d ioremap fail\n", i);
+ return -ENOMEM;
+ }
+ res->start += tad_page_size;
+ }
+
+ tad_pmu->regions = regions;
+ tad_pmu->region_cnt = tad_cnt;
+
+ tad_pmu->pmu = (struct pmu) {
+
+ .module = THIS_MODULE,
+ .attr_groups = tad_pmu_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE |
+ PERF_PMU_CAP_NO_INTERRUPT,
+ .task_ctx_nr = perf_invalid_context,
+
+ .event_init = tad_pmu_event_init,
+ .add = tad_pmu_event_counter_add,
+ .del = tad_pmu_event_counter_del,
+ .start = tad_pmu_event_counter_start,
+ .stop = tad_pmu_event_counter_stop,
+ .read = tad_pmu_event_counter_read,
+ };
+
+ tad_pmu->cpu = raw_smp_processor_id();
+
+ /* Register pmu instance for cpu hotplug */
+ ret = cpuhp_state_add_instance_nocalls(tad_pmu_cpuhp_state,
+ &tad_pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+ return ret;
+ }
+
+ name = "tad";
+ ret = perf_pmu_register(&tad_pmu->pmu, name, -1);
+ if (ret)
+ cpuhp_state_remove_instance_nocalls(tad_pmu_cpuhp_state,
+ &tad_pmu->node);
+
+ return ret;
+}
+
+static int tad_pmu_remove(struct platform_device *pdev)
+{
+ struct tad_pmu *pmu = platform_get_drvdata(pdev);
+
+ cpuhp_state_remove_instance_nocalls(tad_pmu_cpuhp_state,
+ &pmu->node);
+ perf_pmu_unregister(&pmu->pmu);
+
+ return 0;
+}
+
+static const struct of_device_id tad_pmu_of_match[] = {
+ { .compatible = "marvell,cn10k-tad-pmu", },
+ {},
+};
+
+static struct platform_driver tad_pmu_driver = {
+ .driver = {
+ .name = "cn10k_tad_pmu",
+ .of_match_table = of_match_ptr(tad_pmu_of_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = tad_pmu_probe,
+ .remove = tad_pmu_remove,
+};
+
+static int tad_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct tad_pmu *pmu = hlist_entry_safe(node, struct tad_pmu, node);
+ unsigned int target;
+
+ if (cpu != pmu->cpu)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&pmu->pmu, cpu, target);
+ pmu->cpu = target;
+
+ return 0;
+}
+
+static int __init tad_pmu_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/cn10k/tadpmu:online",
+ NULL,
+ tad_pmu_offline_cpu);
+ if (ret < 0)
+ return ret;
+ tad_pmu_cpuhp_state = ret;
+ return platform_driver_register(&tad_pmu_driver);
+}
+
+static void __exit tad_pmu_exit(void)
+{
+ platform_driver_unregister(&tad_pmu_driver);
+ cpuhp_remove_multi_state(tad_pmu_cpuhp_state);
+}
+
+module_init(tad_pmu_init);
+module_exit(tad_pmu_exit);
+
+MODULE_DESCRIPTION("Marvell CN10K LLC-TAD Perf driver");
+MODULE_AUTHOR("Bhaskara Budiredla <bbudiredla@marvell.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/amlogic/Kconfig b/drivers/phy/amlogic/Kconfig
index db5d0cd757e3..486ca23aba32 100644
--- a/drivers/phy/amlogic/Kconfig
+++ b/drivers/phy/amlogic/Kconfig
@@ -2,6 +2,16 @@
#
# Phy drivers for Amlogic platforms
#
+config PHY_MESON8_HDMI_TX
+ tristate "Meson8, Meson8b and Meson8m2 HDMI TX PHY driver"
+ depends on (ARCH_MESON && ARM) || COMPILE_TEST
+ depends on OF
+ select MFD_SYSCON
+ help
+ Enable this to support the HDMI TX PHYs found in Meson8,
+ Meson8b and Meson8m2 SoCs.
+ If unsure, say N.
+
config PHY_MESON8B_USB2
tristate "Meson8, Meson8b, Meson8m2 and GXBB USB2 PHY driver"
default ARCH_MESON
diff --git a/drivers/phy/amlogic/Makefile b/drivers/phy/amlogic/Makefile
index 8fa07fbd0d92..c0886c850bb0 100644
--- a/drivers/phy/amlogic/Makefile
+++ b/drivers/phy/amlogic/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_PHY_MESON8_HDMI_TX) += phy-meson8-hdmi-tx.o
obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o
obj-$(CONFIG_PHY_MESON_GXL_USB2) += phy-meson-gxl-usb2.o
obj-$(CONFIG_PHY_MESON_G12A_USB2) += phy-meson-g12a-usb2.o
diff --git a/drivers/phy/amlogic/phy-meson8-hdmi-tx.c b/drivers/phy/amlogic/phy-meson8-hdmi-tx.c
new file mode 100644
index 000000000000..f9a6572c27d8
--- /dev/null
+++ b/drivers/phy/amlogic/phy-meson8-hdmi-tx.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Meson8, Meson8b and Meson8m2 HDMI TX PHY.
+ *
+ * Copyright (C) 2021 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+/*
+ * Unfortunately there is no detailed documentation available for the
+ * HHI_HDMI_PHY_CNTL0 register. CTL0 and CTL1 is all we know about.
+ * Magic register values in the driver below are taken from the vendor
+ * BSP / kernel.
+ */
+#define HHI_HDMI_PHY_CNTL0 0x3a0
+ #define HHI_HDMI_PHY_CNTL0_HDMI_CTL1 GENMASK(31, 16)
+ #define HHI_HDMI_PHY_CNTL0_HDMI_CTL0 GENMASK(15, 0)
+
+#define HHI_HDMI_PHY_CNTL1 0x3a4
+ #define HHI_HDMI_PHY_CNTL1_CLOCK_ENABLE BIT(1)
+ #define HHI_HDMI_PHY_CNTL1_SOFT_RESET BIT(0)
+
+#define HHI_HDMI_PHY_CNTL2 0x3a8
+
+struct phy_meson8_hdmi_tx_priv {
+ struct regmap *hhi;
+ struct clk *tmds_clk;
+};
+
+static int phy_meson8_hdmi_tx_init(struct phy *phy)
+{
+ struct phy_meson8_hdmi_tx_priv *priv = phy_get_drvdata(phy);
+
+ return clk_prepare_enable(priv->tmds_clk);
+}
+
+static int phy_meson8_hdmi_tx_exit(struct phy *phy)
+{
+ struct phy_meson8_hdmi_tx_priv *priv = phy_get_drvdata(phy);
+
+ clk_disable_unprepare(priv->tmds_clk);
+
+ return 0;
+}
+
+static int phy_meson8_hdmi_tx_power_on(struct phy *phy)
+{
+ struct phy_meson8_hdmi_tx_priv *priv = phy_get_drvdata(phy);
+ unsigned int i;
+ u16 hdmi_ctl0;
+
+ if (clk_get_rate(priv->tmds_clk) >= 2970UL * 1000 * 1000)
+ hdmi_ctl0 = 0x1e8b;
+ else
+ hdmi_ctl0 = 0x4d0b;
+
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0,
+ FIELD_PREP(HHI_HDMI_PHY_CNTL0_HDMI_CTL1, 0x08c3) |
+ FIELD_PREP(HHI_HDMI_PHY_CNTL0_HDMI_CTL0, hdmi_ctl0));
+
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL1, 0x0);
+
+ /* Reset three times, just like the vendor driver does */
+ for (i = 0; i < 3; i++) {
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL1,
+ HHI_HDMI_PHY_CNTL1_CLOCK_ENABLE |
+ HHI_HDMI_PHY_CNTL1_SOFT_RESET);
+ usleep_range(1000, 2000);
+
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL1,
+ HHI_HDMI_PHY_CNTL1_CLOCK_ENABLE);
+ usleep_range(1000, 2000);
+ }
+
+ return 0;
+}
+
+static int phy_meson8_hdmi_tx_power_off(struct phy *phy)
+{
+ struct phy_meson8_hdmi_tx_priv *priv = phy_get_drvdata(phy);
+
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0,
+ FIELD_PREP(HHI_HDMI_PHY_CNTL0_HDMI_CTL1, 0x0841) |
+ FIELD_PREP(HHI_HDMI_PHY_CNTL0_HDMI_CTL0, 0x8d00));
+
+ return 0;
+}
+
+static const struct phy_ops phy_meson8_hdmi_tx_ops = {
+ .init = phy_meson8_hdmi_tx_init,
+ .exit = phy_meson8_hdmi_tx_exit,
+ .power_on = phy_meson8_hdmi_tx_power_on,
+ .power_off = phy_meson8_hdmi_tx_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int phy_meson8_hdmi_tx_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct phy_meson8_hdmi_tx_priv *priv;
+ struct phy_provider *phy_provider;
+ struct resource *res;
+ struct phy *phy;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->hhi = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(priv->hhi))
+ return PTR_ERR(priv->hhi);
+
+ priv->tmds_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->tmds_clk))
+ return PTR_ERR(priv->tmds_clk);
+
+ phy = devm_phy_create(&pdev->dev, np, &phy_meson8_hdmi_tx_ops);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ phy_set_drvdata(phy, priv);
+
+ phy_provider = devm_of_phy_provider_register(&pdev->dev,
+ of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id phy_meson8_hdmi_tx_of_match[] = {
+ { .compatible = "amlogic,meson8-hdmi-tx-phy" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, phy_meson8_hdmi_tx_of_match);
+
+static struct platform_driver phy_meson8_hdmi_tx_driver = {
+ .probe = phy_meson8_hdmi_tx_probe,
+ .driver = {
+ .name = "phy-meson8-hdmi-tx",
+ .of_match_table = phy_meson8_hdmi_tx_of_match,
+ },
+};
+module_platform_driver(phy_meson8_hdmi_tx_driver);
+
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_DESCRIPTION("Meson8, Meson8b and Meson8m2 HDMI TX PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb2.c b/drivers/phy/broadcom/phy-bcm-ns-usb2.c
index 4b015b8a71c3..6a36e187d100 100644
--- a/drivers/phy/broadcom/phy-bcm-ns-usb2.c
+++ b/drivers/phy/broadcom/phy-bcm-ns-usb2.c
@@ -9,17 +9,23 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
struct bcm_ns_usb2 {
struct device *dev;
struct clk *ref_clk;
struct phy *phy;
+ struct regmap *clkset;
+ void __iomem *base;
+
+ /* Deprecated binding */
void __iomem *dmu;
};
@@ -27,7 +33,6 @@ static int bcm_ns_usb2_phy_init(struct phy *phy)
{
struct bcm_ns_usb2 *usb2 = phy_get_drvdata(phy);
struct device *dev = usb2->dev;
- void __iomem *dmu = usb2->dmu;
u32 ref_clk_rate, usb2ctl, usb_pll_ndiv, usb_pll_pdiv;
int err = 0;
@@ -44,7 +49,10 @@ static int bcm_ns_usb2_phy_init(struct phy *phy)
goto err_clk_off;
}
- usb2ctl = readl(dmu + BCMA_DMU_CRU_USB2_CONTROL);
+ if (usb2->base)
+ usb2ctl = readl(usb2->base);
+ else
+ usb2ctl = readl(usb2->dmu + BCMA_DMU_CRU_USB2_CONTROL);
if (usb2ctl & BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_MASK) {
usb_pll_pdiv = usb2ctl;
@@ -58,15 +66,24 @@ static int bcm_ns_usb2_phy_init(struct phy *phy)
usb_pll_ndiv = (1920000000 * usb_pll_pdiv) / ref_clk_rate;
/* Unlock DMU PLL settings with some magic value */
- writel(0x0000ea68, dmu + BCMA_DMU_CRU_CLKSET_KEY);
+ if (usb2->clkset)
+ regmap_write(usb2->clkset, 0, 0x0000ea68);
+ else
+ writel(0x0000ea68, usb2->dmu + BCMA_DMU_CRU_CLKSET_KEY);
/* Write USB 2.0 PLL control setting */
usb2ctl &= ~BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_MASK;
usb2ctl |= usb_pll_ndiv << BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_SHIFT;
- writel(usb2ctl, dmu + BCMA_DMU_CRU_USB2_CONTROL);
+ if (usb2->base)
+ writel(usb2ctl, usb2->base);
+ else
+ writel(usb2ctl, usb2->dmu + BCMA_DMU_CRU_USB2_CONTROL);
/* Lock DMU PLL settings */
- writel(0x00000000, dmu + BCMA_DMU_CRU_CLKSET_KEY);
+ if (usb2->clkset)
+ regmap_write(usb2->clkset, 0, 0x00000000);
+ else
+ writel(0x00000000, usb2->dmu + BCMA_DMU_CRU_CLKSET_KEY);
err_clk_off:
clk_disable_unprepare(usb2->ref_clk);
@@ -90,15 +107,32 @@ static int bcm_ns_usb2_probe(struct platform_device *pdev)
return -ENOMEM;
usb2->dev = dev;
- usb2->dmu = devm_platform_ioremap_resource_byname(pdev, "dmu");
- if (IS_ERR(usb2->dmu)) {
- dev_err(dev, "Failed to map DMU regs\n");
- return PTR_ERR(usb2->dmu);
+ if (of_find_property(dev->of_node, "brcm,syscon-clkset", NULL)) {
+ usb2->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(usb2->base)) {
+ dev_err(dev, "Failed to map control reg\n");
+ return PTR_ERR(usb2->base);
+ }
+
+ usb2->clkset = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "brcm,syscon-clkset");
+ if (IS_ERR(usb2->clkset)) {
+ dev_err(dev, "Failed to lookup clkset regmap\n");
+ return PTR_ERR(usb2->clkset);
+ }
+ } else {
+ usb2->dmu = devm_platform_ioremap_resource_byname(pdev, "dmu");
+ if (IS_ERR(usb2->dmu)) {
+ dev_err(dev, "Failed to map DMU regs\n");
+ return PTR_ERR(usb2->dmu);
+ }
+
+ dev_warn(dev, "using deprecated DT binding\n");
}
usb2->ref_clk = devm_clk_get(dev, "phy-ref-clk");
if (IS_ERR(usb2->ref_clk)) {
- dev_err(dev, "Clock not defined\n");
+ dev_err_probe(dev, PTR_ERR(usb2->ref_clk), "failed to get ref clk\n");
return PTR_ERR(usb2->ref_clk);
}
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index e93818e3991f..da24acd26666 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -23,6 +23,9 @@
#include <dt-bindings/phy/phy.h>
#include <dt-bindings/phy/phy-cadence.h>
+#define NUM_SSC_MODE 3
+#define NUM_PHY_TYPE 4
+
/* PHY register offsets */
#define SIERRA_COMMON_CDB_OFFSET 0x0
#define SIERRA_MACRO_ID_REG 0x0
@@ -31,12 +34,21 @@
#define SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG 0x49
#define SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG 0x4A
#define SIERRA_CMN_PLLLC_LOCK_CNTSTART_PREG 0x4B
+#define SIERRA_CMN_PLLLC_CLK1_PREG 0x4D
#define SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG 0x4F
#define SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG 0x50
+#define SIERRA_CMN_PLLLC_DSMCORR_PREG 0x51
+#define SIERRA_CMN_PLLLC_SS_PREG 0x52
+#define SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG 0x53
+#define SIERRA_CMN_PLLLC_SSTWOPT_PREG 0x54
#define SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG 0x62
+#define SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG 0x63
#define SIERRA_CMN_REFRCV_PREG 0x98
#define SIERRA_CMN_REFRCV1_PREG 0xB8
#define SIERRA_CMN_PLLLC1_GEN_PREG 0xC2
+#define SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG 0xCA
+#define SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG 0xD0
+#define SIERRA_CMN_PLLLC1_SS_TIME_STEPSIZE_MODE_PREG 0xE2
#define SIERRA_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
((0x4000 << (block_offset)) + \
@@ -49,7 +61,11 @@
#define SIERRA_DET_STANDEC_E_PREG 0x004
#define SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG 0x008
#define SIERRA_PSM_A0IN_TMR_PREG 0x009
+#define SIERRA_PSM_A3IN_TMR_PREG 0x00C
#define SIERRA_PSM_DIAG_PREG 0x015
+#define SIERRA_PSC_LN_A3_PREG 0x023
+#define SIERRA_PSC_LN_A4_PREG 0x024
+#define SIERRA_PSC_LN_IDLE_PREG 0x026
#define SIERRA_PSC_TX_A0_PREG 0x028
#define SIERRA_PSC_TX_A1_PREG 0x029
#define SIERRA_PSC_TX_A2_PREG 0x02A
@@ -59,18 +75,22 @@
#define SIERRA_PSC_RX_A2_PREG 0x032
#define SIERRA_PSC_RX_A3_PREG 0x033
#define SIERRA_PLLCTRL_SUBRATE_PREG 0x03A
+#define SIERRA_PLLCTRL_GEN_A_PREG 0x03B
#define SIERRA_PLLCTRL_GEN_D_PREG 0x03E
#define SIERRA_PLLCTRL_CPGAIN_MODE_PREG 0x03F
#define SIERRA_PLLCTRL_STATUS_PREG 0x044
#define SIERRA_CLKPATH_BIASTRIM_PREG 0x04B
#define SIERRA_DFE_BIASTRIM_PREG 0x04C
#define SIERRA_DRVCTRL_ATTEN_PREG 0x06A
+#define SIERRA_DRVCTRL_BOOST_PREG 0x06F
#define SIERRA_CLKPATHCTRL_TMR_PREG 0x081
#define SIERRA_RX_CREQ_FLTR_A_MODE3_PREG 0x085
#define SIERRA_RX_CREQ_FLTR_A_MODE2_PREG 0x086
#define SIERRA_RX_CREQ_FLTR_A_MODE1_PREG 0x087
#define SIERRA_RX_CREQ_FLTR_A_MODE0_PREG 0x088
+#define SIERRA_CREQ_DCBIASATTEN_OVR_PREG 0x08C
#define SIERRA_CREQ_CCLKDET_MODE01_PREG 0x08E
+#define SIERRA_RX_CTLE_CAL_PREG 0x08F
#define SIERRA_RX_CTLE_MAINTENANCE_PREG 0x091
#define SIERRA_CREQ_FSMCLK_SEL_PREG 0x092
#define SIERRA_CREQ_EQ_CTRL_PREG 0x093
@@ -120,15 +140,28 @@
#define SIERRA_DEQ_ALUT12 0x114
#define SIERRA_DEQ_ALUT13 0x115
#define SIERRA_DEQ_DFETAP_CTRL_PREG 0x128
+#define SIERRA_DEQ_DFETAP0 0x129
+#define SIERRA_DEQ_DFETAP1 0x12B
+#define SIERRA_DEQ_DFETAP2 0x12D
+#define SIERRA_DEQ_DFETAP3 0x12F
+#define SIERRA_DEQ_DFETAP4 0x131
#define SIERRA_DFE_EN_1010_IGNORE_PREG 0x134
+#define SIERRA_DEQ_PRECUR_PREG 0x138
+#define SIERRA_DEQ_POSTCUR_PREG 0x140
+#define SIERRA_DEQ_POSTCUR_DECR_PREG 0x142
#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
#define SIERRA_DEQ_TAU_CTRL2_PREG 0x151
+#define SIERRA_DEQ_TAU_CTRL3_PREG 0x152
+#define SIERRA_DEQ_OPENEYE_CTRL_PREG 0x158
#define SIERRA_DEQ_PICTRL_PREG 0x161
#define SIERRA_CPICAL_TMRVAL_MODE1_PREG 0x170
#define SIERRA_CPICAL_TMRVAL_MODE0_PREG 0x171
#define SIERRA_CPICAL_PICNT_MODE1_PREG 0x174
#define SIERRA_CPI_OUTBUF_RATESEL_PREG 0x17C
+#define SIERRA_CPI_RESBIAS_BIN_PREG 0x17E
+#define SIERRA_CPI_TRIM_PREG 0x17F
#define SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG 0x183
+#define SIERRA_EPI_CTRL_PREG 0x187
#define SIERRA_LFPSDET_SUPPORT_PREG 0x188
#define SIERRA_LFPSFILT_NS_PREG 0x18A
#define SIERRA_LFPSFILT_RD_PREG 0x18B
@@ -142,15 +175,36 @@
#define SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG 0x14F
#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
-#define SIERRA_PHY_CONFIG_CTRL_OFFSET(block_offset) \
- (0xc000 << (block_offset))
+/* PHY PCS common registers */
+#define SIERRA_PHY_PCS_COMMON_OFFSET(block_offset) \
+ (0xc000 << (block_offset))
+#define SIERRA_PHY_PIPE_CMN_CTRL1 0x0
#define SIERRA_PHY_PLL_CFG 0xe
+/* PHY PCS lane registers */
+#define SIERRA_PHY_PCS_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
+ ((0xD000 << (block_offset)) + \
+ (((ln) << 8) << (reg_offset)))
+
+#define SIERRA_PHY_ISO_LINK_CTRL 0xB
+
+/* PHY PMA common registers */
+#define SIERRA_PHY_PMA_COMMON_OFFSET(block_offset) \
+ (0xE000 << (block_offset))
+#define SIERRA_PHY_PMA_CMN_CTRL 0x000
+
+/* PHY PMA lane registers */
+#define SIERRA_PHY_PMA_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
+ ((0xF000 << (block_offset)) + \
+ (((ln) << 8) << (reg_offset)))
+
+#define SIERRA_PHY_PMA_XCVR_CTRL 0x000
+
#define SIERRA_MACRO_ID 0x00007364
#define SIERRA_MAX_LANES 16
#define PLL_LOCK_TIME 100000
-#define CDNS_SIERRA_OUTPUT_CLOCKS 2
+#define CDNS_SIERRA_OUTPUT_CLOCKS 3
#define CDNS_SIERRA_INPUT_CLOCKS 5
enum cdns_sierra_clock_input {
PHY_CLK,
@@ -167,12 +221,21 @@ static const struct reg_field macro_id_type =
REG_FIELD(SIERRA_MACRO_ID_REG, 0, 15);
static const struct reg_field phy_pll_cfg_1 =
REG_FIELD(SIERRA_PHY_PLL_CFG, 1, 1);
+static const struct reg_field pma_cmn_ready =
+ REG_FIELD(SIERRA_PHY_PMA_CMN_CTRL, 0, 0);
static const struct reg_field pllctrl_lock =
REG_FIELD(SIERRA_PLLCTRL_STATUS_PREG, 0, 0);
+static const struct reg_field phy_iso_link_ctrl_1 =
+ REG_FIELD(SIERRA_PHY_ISO_LINK_CTRL, 1, 1);
+static const struct reg_field cmn_plllc_clk1outdiv_preg =
+ REG_FIELD(SIERRA_CMN_PLLLC_CLK1_PREG, 0, 6);
+static const struct reg_field cmn_plllc_clk1_en_preg =
+ REG_FIELD(SIERRA_CMN_PLLLC_CLK1_PREG, 12, 12);
static const char * const clk_names[] = {
[CDNS_SIERRA_PLL_CMNLC] = "pll_cmnlc",
[CDNS_SIERRA_PLL_CMNLC1] = "pll_cmnlc1",
+ [CDNS_SIERRA_DERIVED_REFCLK] = "refclk_der",
};
enum cdns_sierra_cmn_plllc {
@@ -215,14 +278,41 @@ static const int pll_mux_parent_index[][SIERRA_NUM_CMN_PLLC_PARENTS] = {
[CMN_PLLLC1] = { PLL1_REFCLK, PLL0_REFCLK },
};
-static u32 cdns_sierra_pll_mux_table[] = { 0, 1 };
+static u32 cdns_sierra_pll_mux_table[][SIERRA_NUM_CMN_PLLC_PARENTS] = {
+ [CMN_PLLLC] = { 0, 1 },
+ [CMN_PLLLC1] = { 1, 0 },
+};
+
+struct cdns_sierra_derived_refclk {
+ struct clk_hw hw;
+ struct regmap_field *cmn_plllc_clk1outdiv_preg;
+ struct regmap_field *cmn_plllc_clk1_en_preg;
+ struct clk_init_data clk_data;
+};
+
+#define to_cdns_sierra_derived_refclk(_hw) \
+ container_of(_hw, struct cdns_sierra_derived_refclk, hw)
+
+enum cdns_sierra_phy_type {
+ TYPE_NONE,
+ TYPE_PCIE,
+ TYPE_USB,
+ TYPE_QSGMII
+};
+
+enum cdns_sierra_ssc_mode {
+ NO_SSC,
+ EXTERNAL_SSC,
+ INTERNAL_SSC
+};
struct cdns_sierra_inst {
struct phy *phy;
- u32 phy_type;
+ enum cdns_sierra_phy_type phy_type;
u32 num_lanes;
u32 mlane;
struct reset_control *lnk_rst;
+ enum cdns_sierra_ssc_mode ssc_mode;
};
struct cdns_reg_pairs {
@@ -230,18 +320,23 @@ struct cdns_reg_pairs {
u32 off;
};
+struct cdns_sierra_vals {
+ const struct cdns_reg_pairs *reg_pairs;
+ u32 num_regs;
+};
+
struct cdns_sierra_data {
- u32 id_value;
- u8 block_offset_shift;
- u8 reg_offset_shift;
- u32 pcie_cmn_regs;
- u32 pcie_ln_regs;
- u32 usb_cmn_regs;
- u32 usb_ln_regs;
- const struct cdns_reg_pairs *pcie_cmn_vals;
- const struct cdns_reg_pairs *pcie_ln_vals;
- const struct cdns_reg_pairs *usb_cmn_vals;
- const struct cdns_reg_pairs *usb_ln_vals;
+ u32 id_value;
+ u8 block_offset_shift;
+ u8 reg_offset_shift;
+ struct cdns_sierra_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_sierra_vals *phy_pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_sierra_vals *pma_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_sierra_vals *pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
};
struct cdns_regmap_cdb_context {
@@ -253,16 +348,21 @@ struct cdns_regmap_cdb_context {
struct cdns_sierra_phy {
struct device *dev;
struct regmap *regmap;
- struct cdns_sierra_data *init_data;
+ const struct cdns_sierra_data *init_data;
struct cdns_sierra_inst phys[SIERRA_MAX_LANES];
struct reset_control *phy_rst;
struct reset_control *apb_rst;
struct regmap *regmap_lane_cdb[SIERRA_MAX_LANES];
- struct regmap *regmap_phy_config_ctrl;
+ struct regmap *regmap_phy_pcs_common_cdb;
+ struct regmap *regmap_phy_pcs_lane_cdb[SIERRA_MAX_LANES];
+ struct regmap *regmap_phy_pma_common_cdb;
+ struct regmap *regmap_phy_pma_lane_cdb[SIERRA_MAX_LANES];
struct regmap *regmap_common_cdb;
struct regmap_field *macro_id_type;
struct regmap_field *phy_pll_cfg_1;
+ struct regmap_field *pma_cmn_ready;
struct regmap_field *pllctrl_lock[SIERRA_MAX_LANES];
+ struct regmap_field *phy_iso_link_ctrl_1[SIERRA_MAX_LANES];
struct regmap_field *cmn_refrcv_refclk_plllc1en_preg[SIERRA_NUM_CMN_PLLC];
struct regmap_field *cmn_refrcv_refclk_termen_preg[SIERRA_NUM_CMN_PLLC];
struct regmap_field *cmn_plllc_pfdclk1_sel_preg[SIERRA_NUM_CMN_PLLC];
@@ -329,51 +429,141 @@ static const struct regmap_config cdns_sierra_common_cdb_config = {
.reg_read = cdns_regmap_read,
};
-static const struct regmap_config cdns_sierra_phy_config_ctrl_config = {
- .name = "sierra_phy_config_ctrl",
+static const struct regmap_config cdns_sierra_phy_pcs_cmn_cdb_config = {
+ .name = "sierra_phy_pcs_cmn_cdb",
.reg_stride = 1,
.fast_io = true,
.reg_write = cdns_regmap_write,
.reg_read = cdns_regmap_read,
};
+#define SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF(n) \
+{ \
+ .name = "sierra_phy_pcs_lane" n "_cdb", \
+ .reg_stride = 1, \
+ .fast_io = true, \
+ .reg_write = cdns_regmap_write, \
+ .reg_read = cdns_regmap_read, \
+}
+
+static const struct regmap_config cdns_sierra_phy_pcs_lane_cdb_config[] = {
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("0"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("1"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("2"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("3"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("4"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("5"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("6"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("7"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("8"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("9"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("10"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("11"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("12"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("13"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("14"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("15"),
+};
+
+static const struct regmap_config cdns_sierra_phy_pma_cmn_cdb_config = {
+ .name = "sierra_phy_pma_cmn_cdb",
+ .reg_stride = 1,
+ .fast_io = true,
+ .reg_write = cdns_regmap_write,
+ .reg_read = cdns_regmap_read,
+};
+
+#define SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF(n) \
+{ \
+ .name = "sierra_phy_pma_lane" n "_cdb", \
+ .reg_stride = 1, \
+ .fast_io = true, \
+ .reg_write = cdns_regmap_write, \
+ .reg_read = cdns_regmap_read, \
+}
+
+static const struct regmap_config cdns_sierra_phy_pma_lane_cdb_config[] = {
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("0"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("1"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("2"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("3"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("4"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("5"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("6"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("7"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("8"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("9"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("10"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("11"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("12"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("13"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("14"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("15"),
+};
+
static int cdns_sierra_phy_init(struct phy *gphy)
{
struct cdns_sierra_inst *ins = phy_get_drvdata(gphy);
struct cdns_sierra_phy *phy = dev_get_drvdata(gphy->dev.parent);
+ const struct cdns_sierra_data *init_data = phy->init_data;
+ struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
+ enum cdns_sierra_phy_type phy_type = ins->phy_type;
+ enum cdns_sierra_ssc_mode ssc = ins->ssc_mode;
+ struct cdns_sierra_vals *phy_pma_ln_vals;
+ const struct cdns_reg_pairs *reg_pairs;
+ struct cdns_sierra_vals *pcs_cmn_vals;
struct regmap *regmap;
+ u32 num_regs;
int i, j;
- const struct cdns_reg_pairs *cmn_vals, *ln_vals;
- u32 num_cmn_regs, num_ln_regs;
/* Initialise the PHY registers, unless auto configured */
- if (phy->autoconf)
+ if (phy->autoconf || phy->nsubnodes > 1)
return 0;
clk_set_rate(phy->input_clks[CMN_REFCLK_DIG_DIV], 25000000);
clk_set_rate(phy->input_clks[CMN_REFCLK1_DIG_DIV], 25000000);
- if (ins->phy_type == PHY_TYPE_PCIE) {
- num_cmn_regs = phy->init_data->pcie_cmn_regs;
- num_ln_regs = phy->init_data->pcie_ln_regs;
- cmn_vals = phy->init_data->pcie_cmn_vals;
- ln_vals = phy->init_data->pcie_ln_vals;
- } else if (ins->phy_type == PHY_TYPE_USB3) {
- num_cmn_regs = phy->init_data->usb_cmn_regs;
- num_ln_regs = phy->init_data->usb_ln_regs;
- cmn_vals = phy->init_data->usb_cmn_vals;
- ln_vals = phy->init_data->usb_ln_vals;
- } else {
- return -EINVAL;
+
+ /* PHY PCS common registers configurations */
+ pcs_cmn_vals = init_data->pcs_cmn_vals[phy_type][TYPE_NONE][ssc];
+ if (pcs_cmn_vals) {
+ reg_pairs = pcs_cmn_vals->reg_pairs;
+ num_regs = pcs_cmn_vals->num_regs;
+ regmap = phy->regmap_phy_pcs_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
+ }
+
+ /* PHY PMA lane registers configurations */
+ phy_pma_ln_vals = init_data->phy_pma_ln_vals[phy_type][TYPE_NONE][ssc];
+ if (phy_pma_ln_vals) {
+ reg_pairs = phy_pma_ln_vals->reg_pairs;
+ num_regs = phy_pma_ln_vals->num_regs;
+ for (i = 0; i < ins->num_lanes; i++) {
+ regmap = phy->regmap_phy_pma_lane_cdb[i + ins->mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
+ }
}
- regmap = phy->regmap_common_cdb;
- for (j = 0; j < num_cmn_regs ; j++)
- regmap_write(regmap, cmn_vals[j].off, cmn_vals[j].val);
+ /* PMA common registers configurations */
+ pma_cmn_vals = init_data->pma_cmn_vals[phy_type][TYPE_NONE][ssc];
+ if (pma_cmn_vals) {
+ reg_pairs = pma_cmn_vals->reg_pairs;
+ num_regs = pma_cmn_vals->num_regs;
+ regmap = phy->regmap_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
+ }
- for (i = 0; i < ins->num_lanes; i++) {
- for (j = 0; j < num_ln_regs ; j++) {
+ /* PMA lane registers configurations */
+ pma_ln_vals = init_data->pma_ln_vals[phy_type][TYPE_NONE][ssc];
+ if (pma_ln_vals) {
+ reg_pairs = pma_ln_vals->reg_pairs;
+ num_regs = pma_ln_vals->num_regs;
+ for (i = 0; i < ins->num_lanes; i++) {
regmap = phy->regmap_lane_cdb[i + ins->mlane];
- regmap_write(regmap, ln_vals[j].off, ln_vals[j].val);
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
}
}
@@ -388,10 +578,13 @@ static int cdns_sierra_phy_on(struct phy *gphy)
u32 val;
int ret;
- ret = reset_control_deassert(sp->phy_rst);
- if (ret) {
- dev_err(dev, "Failed to take the PHY out of reset\n");
- return ret;
+ if (sp->nsubnodes == 1) {
+ /* Take the PHY out of reset */
+ ret = reset_control_deassert(sp->phy_rst);
+ if (ret) {
+ dev_err(dev, "Failed to take the PHY out of reset\n");
+ return ret;
+ }
}
/* Take the PHY lane group out of reset */
@@ -401,6 +594,26 @@ static int cdns_sierra_phy_on(struct phy *gphy)
return ret;
}
+ if (ins->phy_type == TYPE_PCIE || ins->phy_type == TYPE_USB) {
+ ret = regmap_field_read_poll_timeout(sp->phy_iso_link_ctrl_1[ins->mlane],
+ val, !val, 1000, PLL_LOCK_TIME);
+ if (ret) {
+ dev_err(dev, "Timeout waiting for PHY status ready\n");
+ return ret;
+ }
+ }
+
+ /*
+ * Wait for cmn_ready assertion
+ * PHY_PMA_CMN_CTRL[0] == 1
+ */
+ ret = regmap_field_read_poll_timeout(sp->pma_cmn_ready, val, val,
+ 1000, PLL_LOCK_TIME);
+ if (ret) {
+ dev_err(dev, "Timeout waiting for CMN ready\n");
+ return ret;
+ }
+
ret = regmap_field_read_poll_timeout(sp->pllctrl_lock[ins->mlane],
val, val, 1000, PLL_LOCK_TIME);
if (ret < 0)
@@ -436,11 +649,25 @@ static const struct phy_ops ops = {
static u8 cdns_sierra_pll_mux_get_parent(struct clk_hw *hw)
{
struct cdns_sierra_pll_mux *mux = to_cdns_sierra_pll_mux(hw);
+ struct regmap_field *plllc1en_field = mux->plllc1en_field;
+ struct regmap_field *termen_field = mux->termen_field;
struct regmap_field *field = mux->pfdclk_sel_preg;
unsigned int val;
+ int index;
regmap_field_read(field, &val);
- return clk_mux_val_to_index(hw, cdns_sierra_pll_mux_table, 0, val);
+
+ if (strstr(clk_hw_get_name(hw), clk_names[CDNS_SIERRA_PLL_CMNLC1])) {
+ index = clk_mux_val_to_index(hw, cdns_sierra_pll_mux_table[CMN_PLLLC1], 0, val);
+ if (index == 1) {
+ regmap_field_write(plllc1en_field, 1);
+ regmap_field_write(termen_field, 1);
+ }
+ } else {
+ index = clk_mux_val_to_index(hw, cdns_sierra_pll_mux_table[CMN_PLLLC], 0, val);
+ }
+
+ return index;
}
static int cdns_sierra_pll_mux_set_parent(struct clk_hw *hw, u8 index)
@@ -458,7 +685,11 @@ static int cdns_sierra_pll_mux_set_parent(struct clk_hw *hw, u8 index)
ret |= regmap_field_write(termen_field, 1);
}
- val = cdns_sierra_pll_mux_table[index];
+ if (strstr(clk_hw_get_name(hw), clk_names[CDNS_SIERRA_PLL_CMNLC1]))
+ val = cdns_sierra_pll_mux_table[CMN_PLLLC1][index];
+ else
+ val = cdns_sierra_pll_mux_table[CMN_PLLLC][index];
+
ret |= regmap_field_write(field, val);
return ret;
@@ -496,8 +727,8 @@ static int cdns_sierra_pll_mux_register(struct cdns_sierra_phy *sp,
for (i = 0; i < num_parents; i++) {
clk = sp->input_clks[pll_mux_parent_index[clk_index][i]];
if (IS_ERR_OR_NULL(clk)) {
- dev_err(dev, "No parent clock for derived_refclk\n");
- return PTR_ERR(clk);
+ dev_err(dev, "No parent clock for PLL mux clocks\n");
+ return IS_ERR(clk) ? PTR_ERR(clk) : -ENOENT;
}
parent_names[i] = __clk_get_name(clk);
}
@@ -551,6 +782,91 @@ static int cdns_sierra_phy_register_pll_mux(struct cdns_sierra_phy *sp)
return 0;
}
+static int cdns_sierra_derived_refclk_enable(struct clk_hw *hw)
+{
+ struct cdns_sierra_derived_refclk *derived_refclk = to_cdns_sierra_derived_refclk(hw);
+
+ regmap_field_write(derived_refclk->cmn_plllc_clk1_en_preg, 0x1);
+
+ /* Programming to get 100Mhz clock output in ref_der_clk_out 5GHz VCO/50 = 100MHz */
+ regmap_field_write(derived_refclk->cmn_plllc_clk1outdiv_preg, 0x2E);
+
+ return 0;
+}
+
+static void cdns_sierra_derived_refclk_disable(struct clk_hw *hw)
+{
+ struct cdns_sierra_derived_refclk *derived_refclk = to_cdns_sierra_derived_refclk(hw);
+
+ regmap_field_write(derived_refclk->cmn_plllc_clk1_en_preg, 0);
+}
+
+static int cdns_sierra_derived_refclk_is_enabled(struct clk_hw *hw)
+{
+ struct cdns_sierra_derived_refclk *derived_refclk = to_cdns_sierra_derived_refclk(hw);
+ int val;
+
+ regmap_field_read(derived_refclk->cmn_plllc_clk1_en_preg, &val);
+
+ return !!val;
+}
+
+static const struct clk_ops cdns_sierra_derived_refclk_ops = {
+ .enable = cdns_sierra_derived_refclk_enable,
+ .disable = cdns_sierra_derived_refclk_disable,
+ .is_enabled = cdns_sierra_derived_refclk_is_enabled,
+};
+
+static int cdns_sierra_derived_refclk_register(struct cdns_sierra_phy *sp)
+{
+ struct cdns_sierra_derived_refclk *derived_refclk;
+ struct device *dev = sp->dev;
+ struct regmap_field *field;
+ struct clk_init_data *init;
+ struct regmap *regmap;
+ char clk_name[100];
+ struct clk *clk;
+
+ derived_refclk = devm_kzalloc(dev, sizeof(*derived_refclk), GFP_KERNEL);
+ if (!derived_refclk)
+ return -ENOMEM;
+
+ snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
+ clk_names[CDNS_SIERRA_DERIVED_REFCLK]);
+
+ init = &derived_refclk->clk_data;
+
+ init->ops = &cdns_sierra_derived_refclk_ops;
+ init->flags = 0;
+ init->name = clk_name;
+
+ regmap = sp->regmap_common_cdb;
+
+ field = devm_regmap_field_alloc(dev, regmap, cmn_plllc_clk1outdiv_preg);
+ if (IS_ERR(field)) {
+ dev_err(dev, "cmn_plllc_clk1outdiv_preg reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ derived_refclk->cmn_plllc_clk1outdiv_preg = field;
+
+ field = devm_regmap_field_alloc(dev, regmap, cmn_plllc_clk1_en_preg);
+ if (IS_ERR(field)) {
+ dev_err(dev, "cmn_plllc_clk1_en_preg reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ derived_refclk->cmn_plllc_clk1_en_preg = field;
+
+ derived_refclk->hw.init = init;
+
+ clk = devm_clk_register(dev, &derived_refclk->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ sp->output_clks[CDNS_SIERRA_DERIVED_REFCLK] = clk;
+
+ return 0;
+}
+
static void cdns_sierra_clk_unregister(struct cdns_sierra_phy *sp)
{
struct device *dev = sp->dev;
@@ -571,6 +887,12 @@ static int cdns_sierra_clk_register(struct cdns_sierra_phy *sp)
return ret;
}
+ ret = cdns_sierra_derived_refclk_register(sp);
+ if (ret) {
+ dev_err(dev, "Failed to register derived refclk\n");
+ return ret;
+ }
+
sp->clk_data.clks = sp->output_clks;
sp->clk_data.clk_num = CDNS_SIERRA_OUTPUT_CLOCKS;
ret = of_clk_add_provider(node, of_clk_src_onecell_get, &sp->clk_data);
@@ -583,20 +905,37 @@ static int cdns_sierra_clk_register(struct cdns_sierra_phy *sp)
static int cdns_sierra_get_optional(struct cdns_sierra_inst *inst,
struct device_node *child)
{
+ u32 phy_type;
+
if (of_property_read_u32(child, "reg", &inst->mlane))
return -EINVAL;
if (of_property_read_u32(child, "cdns,num-lanes", &inst->num_lanes))
return -EINVAL;
- if (of_property_read_u32(child, "cdns,phy-type", &inst->phy_type))
+ if (of_property_read_u32(child, "cdns,phy-type", &phy_type))
return -EINVAL;
+ switch (phy_type) {
+ case PHY_TYPE_PCIE:
+ inst->phy_type = TYPE_PCIE;
+ break;
+ case PHY_TYPE_USB3:
+ inst->phy_type = TYPE_USB;
+ break;
+ case PHY_TYPE_QSGMII:
+ inst->phy_type = TYPE_QSGMII;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ inst->ssc_mode = EXTERNAL_SSC;
+ of_property_read_u32(child, "cdns,ssc-mode", &inst->ssc_mode);
+
return 0;
}
-static const struct of_device_id cdns_sierra_id_table[];
-
static struct regmap *cdns_regmap_init(struct device *dev, void __iomem *base,
u32 block_offset, u8 reg_offset_shift,
const struct regmap_config *config)
@@ -656,7 +995,7 @@ static int cdns_regfield_init(struct cdns_sierra_phy *sp)
sp->cmn_refrcv_refclk_termen_preg[i] = field;
}
- regmap = sp->regmap_phy_config_ctrl;
+ regmap = sp->regmap_phy_pcs_common_cdb;
field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg_1);
if (IS_ERR(field)) {
dev_err(dev, "PHY_PLL_CFG_1 reg field init failed\n");
@@ -664,6 +1003,14 @@ static int cdns_regfield_init(struct cdns_sierra_phy *sp)
}
sp->phy_pll_cfg_1 = field;
+ regmap = sp->regmap_phy_pma_common_cdb;
+ field = devm_regmap_field_alloc(dev, regmap, pma_cmn_ready);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_PMA_CMN_CTRL reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ sp->pma_cmn_ready = field;
+
for (i = 0; i < SIERRA_MAX_LANES; i++) {
regmap = sp->regmap_lane_cdb[i];
field = devm_regmap_field_alloc(dev, regmap, pllctrl_lock);
@@ -671,7 +1018,17 @@ static int cdns_regfield_init(struct cdns_sierra_phy *sp)
dev_err(dev, "P%d_ENABLE reg field init failed\n", i);
return PTR_ERR(field);
}
- sp->pllctrl_lock[i] = field;
+ sp->pllctrl_lock[i] = field;
+ }
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ regmap = sp->regmap_phy_pcs_lane_cdb[i];
+ field = devm_regmap_field_alloc(dev, regmap, phy_iso_link_ctrl_1);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_ISO_LINK_CTRL reg field init for lane %d failed\n", i);
+ return PTR_ERR(field);
+ }
+ sp->phy_iso_link_ctrl_1[i] = field;
}
return 0;
@@ -708,14 +1065,49 @@ static int cdns_regmap_init_blocks(struct cdns_sierra_phy *sp,
}
sp->regmap_common_cdb = regmap;
- block_offset = SIERRA_PHY_CONFIG_CTRL_OFFSET(block_offset_shift);
+ block_offset = SIERRA_PHY_PCS_COMMON_OFFSET(block_offset_shift);
+ regmap = cdns_regmap_init(dev, base, block_offset, reg_offset_shift,
+ &cdns_sierra_phy_pcs_cmn_cdb_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY PCS common CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_phy_pcs_common_cdb = regmap;
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ block_offset = SIERRA_PHY_PCS_LANE_CDB_OFFSET(i, block_offset_shift,
+ reg_offset_shift);
+ regmap = cdns_regmap_init(dev, base, block_offset,
+ reg_offset_shift,
+ &cdns_sierra_phy_pcs_lane_cdb_config[i]);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY PCS lane CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_phy_pcs_lane_cdb[i] = regmap;
+ }
+
+ block_offset = SIERRA_PHY_PMA_COMMON_OFFSET(block_offset_shift);
regmap = cdns_regmap_init(dev, base, block_offset, reg_offset_shift,
- &cdns_sierra_phy_config_ctrl_config);
+ &cdns_sierra_phy_pma_cmn_cdb_config);
if (IS_ERR(regmap)) {
- dev_err(dev, "Failed to init PHY config and control regmap\n");
+ dev_err(dev, "Failed to init PHY PMA common CDB regmap\n");
return PTR_ERR(regmap);
}
- sp->regmap_phy_config_ctrl = regmap;
+ sp->regmap_phy_pma_common_cdb = regmap;
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ block_offset = SIERRA_PHY_PMA_LANE_CDB_OFFSET(i, block_offset_shift,
+ reg_offset_shift);
+ regmap = cdns_regmap_init(dev, base, block_offset,
+ reg_offset_shift,
+ &cdns_sierra_phy_pma_lane_cdb_config[i]);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY PMA lane CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_phy_pma_lane_cdb[i] = regmap;
+ }
return 0;
}
@@ -824,13 +1216,127 @@ static int cdns_sierra_phy_get_resets(struct cdns_sierra_phy *sp,
return 0;
}
+static int cdns_sierra_phy_configure_multilink(struct cdns_sierra_phy *sp)
+{
+ const struct cdns_sierra_data *init_data = sp->init_data;
+ struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
+ enum cdns_sierra_phy_type phy_t1, phy_t2;
+ struct cdns_sierra_vals *phy_pma_ln_vals;
+ const struct cdns_reg_pairs *reg_pairs;
+ struct cdns_sierra_vals *pcs_cmn_vals;
+ int i, j, node, mlane, num_lanes, ret;
+ enum cdns_sierra_ssc_mode ssc;
+ struct regmap *regmap;
+ u32 num_regs;
+
+ /* Maximum 2 links (subnodes) are supported */
+ if (sp->nsubnodes != 2)
+ return -EINVAL;
+
+ clk_set_rate(sp->input_clks[CMN_REFCLK_DIG_DIV], 25000000);
+ clk_set_rate(sp->input_clks[CMN_REFCLK1_DIG_DIV], 25000000);
+
+ /* PHY configured to use both PLL LC and LC1 */
+ regmap_field_write(sp->phy_pll_cfg_1, 0x1);
+
+ phy_t1 = sp->phys[0].phy_type;
+ phy_t2 = sp->phys[1].phy_type;
+
+ /*
+ * PHY configuration for multi-link operation is done in two steps.
+ * e.g. Consider a case for a 4 lane PHY with PCIe using 2 lanes and QSGMII other 2 lanes.
+ * Sierra PHY has 2 PLLs, viz. PLLLC and PLLLC1. So in this case, PLLLC is used for PCIe
+ * and PLLLC1 is used for QSGMII. PHY is configured in two steps as described below.
+ *
+ * [1] For first step, phy_t1 = TYPE_PCIE and phy_t2 = TYPE_QSGMII
+ * So the register values are selected as [TYPE_PCIE][TYPE_QSGMII][ssc].
+ * This will configure PHY registers associated for PCIe (i.e. first protocol)
+ * involving PLLLC registers and registers for first 2 lanes of PHY.
+ * [2] In second step, the variables phy_t1 and phy_t2 are swapped. So now,
+ * phy_t1 = TYPE_QSGMII and phy_t2 = TYPE_PCIE. And the register values are selected as
+ * [TYPE_QSGMII][TYPE_PCIE][ssc].
+ * This will configure PHY registers associated for QSGMII (i.e. second protocol)
+ * involving PLLLC1 registers and registers for other 2 lanes of PHY.
+ *
+ * This completes the PHY configuration for multilink operation. This approach enables
+ * dividing the large number of PHY register configurations into protocol specific
+ * smaller groups.
+ */
+ for (node = 0; node < sp->nsubnodes; node++) {
+ if (node == 1) {
+ /*
+ * If first link with phy_t1 is configured, then configure the PHY for
+ * second link with phy_t2. Get the array values as [phy_t2][phy_t1][ssc].
+ */
+ swap(phy_t1, phy_t2);
+ }
+
+ mlane = sp->phys[node].mlane;
+ ssc = sp->phys[node].ssc_mode;
+ num_lanes = sp->phys[node].num_lanes;
+
+ /* PHY PCS common registers configurations */
+ pcs_cmn_vals = init_data->pcs_cmn_vals[phy_t1][phy_t2][ssc];
+ if (pcs_cmn_vals) {
+ reg_pairs = pcs_cmn_vals->reg_pairs;
+ num_regs = pcs_cmn_vals->num_regs;
+ regmap = sp->regmap_phy_pcs_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
+ }
+
+ /* PHY PMA lane registers configurations */
+ phy_pma_ln_vals = init_data->phy_pma_ln_vals[phy_t1][phy_t2][ssc];
+ if (phy_pma_ln_vals) {
+ reg_pairs = phy_pma_ln_vals->reg_pairs;
+ num_regs = phy_pma_ln_vals->num_regs;
+ for (i = 0; i < num_lanes; i++) {
+ regmap = sp->regmap_phy_pma_lane_cdb[i + mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
+ }
+ }
+
+ /* PMA common registers configurations */
+ pma_cmn_vals = init_data->pma_cmn_vals[phy_t1][phy_t2][ssc];
+ if (pma_cmn_vals) {
+ reg_pairs = pma_cmn_vals->reg_pairs;
+ num_regs = pma_cmn_vals->num_regs;
+ regmap = sp->regmap_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
+ }
+
+ /* PMA lane registers configurations */
+ pma_ln_vals = init_data->pma_ln_vals[phy_t1][phy_t2][ssc];
+ if (pma_ln_vals) {
+ reg_pairs = pma_ln_vals->reg_pairs;
+ num_regs = pma_ln_vals->num_regs;
+ for (i = 0; i < num_lanes; i++) {
+ regmap = sp->regmap_lane_cdb[i + mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
+ }
+ }
+
+ if (phy_t1 == TYPE_QSGMII)
+ reset_control_deassert(sp->phys[node].lnk_rst);
+ }
+
+ /* Take the PHY out of reset */
+ ret = reset_control_deassert(sp->phy_rst);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int cdns_sierra_phy_probe(struct platform_device *pdev)
{
struct cdns_sierra_phy *sp;
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
- const struct of_device_id *match;
- struct cdns_sierra_data *data;
+ const struct cdns_sierra_data *data;
unsigned int id_value;
int i, ret, node = 0;
void __iomem *base;
@@ -840,12 +1346,10 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
return -ENODEV;
/* Get init data for this PHY */
- match = of_match_device(cdns_sierra_id_table, dev);
- if (!match)
+ data = of_device_get_match_data(dev);
+ if (!data)
return -EINVAL;
- data = (struct cdns_sierra_data *)match->data;
-
sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL);
if (!sp)
return -ENOMEM;
@@ -946,8 +1450,11 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
}
/* If more than one subnode, configure the PHY as multilink */
- if (!sp->autoconf && sp->nsubnodes > 1)
- regmap_field_write(sp->phy_pll_cfg_1, 0x1);
+ if (!sp->autoconf && sp->nsubnodes > 1) {
+ ret = cdns_sierra_phy_configure_multilink(sp);
+ if (ret)
+ goto put_child2;
+ }
pm_runtime_enable(dev);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
@@ -991,6 +1498,449 @@ static int cdns_sierra_phy_remove(struct platform_device *pdev)
return 0;
}
+/* QSGMII PHY PMA lane configuration */
+static struct cdns_reg_pairs qsgmii_phy_pma_ln_regs[] = {
+ {0x9010, SIERRA_PHY_PMA_XCVR_CTRL}
+};
+
+static struct cdns_sierra_vals qsgmii_phy_pma_ln_vals = {
+ .reg_pairs = qsgmii_phy_pma_ln_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_phy_pma_ln_regs),
+};
+
+/* QSGMII refclk 100MHz, 20b, opt1, No BW cal, no ssc, PLL LC1 */
+static const struct cdns_reg_pairs qsgmii_100_no_ssc_plllc1_cmn_regs[] = {
+ {0x2085, SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC1_SS_TIME_STEPSIZE_MODE_PREG}
+};
+
+static const struct cdns_reg_pairs qsgmii_100_no_ssc_plllc1_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x0252, SIERRA_DET_STANDEC_E_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x0FFE, SIERRA_PSC_RX_A0_PREG},
+ {0x0011, SIERRA_PLLCTRL_SUBRATE_PREG},
+ {0x0001, SIERRA_PLLCTRL_GEN_A_PREG},
+ {0x5233, SIERRA_PLLCTRL_CPGAIN_MODE_PREG},
+ {0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
+ {0x0089, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x3C3C, SIERRA_CREQ_CCLKDET_MODE01_PREG},
+ {0x3222, SIERRA_CREQ_FSMCLK_SEL_PREG},
+ {0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
+ {0x8422, SIERRA_CTLELUT_CTRL_PREG},
+ {0x4111, SIERRA_DFE_ECMP_RATESEL_PREG},
+ {0x4111, SIERRA_DFE_SMP_RATESEL_PREG},
+ {0x0002, SIERRA_DEQ_PHALIGN_CTRL},
+ {0x9595, SIERRA_DEQ_VGATUNE_CTRL_PREG},
+ {0x0186, SIERRA_DEQ_GLUT0},
+ {0x0186, SIERRA_DEQ_GLUT1},
+ {0x0186, SIERRA_DEQ_GLUT2},
+ {0x0186, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x0861, SIERRA_DEQ_ALUT0},
+ {0x07E0, SIERRA_DEQ_ALUT1},
+ {0x079E, SIERRA_DEQ_ALUT2},
+ {0x071D, SIERRA_DEQ_ALUT3},
+ {0x03F5, SIERRA_DEQ_DFETAP_CTRL_PREG},
+ {0x0C01, SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG},
+ {0x3C40, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C04, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0033, SIERRA_DEQ_PICTRL_PREG},
+ {0x0660, SIERRA_CPICAL_TMRVAL_MODE0_PREG},
+ {0x00D5, SIERRA_CPI_OUTBUF_RATESEL_PREG},
+ {0x0B6D, SIERRA_CPI_RESBIAS_BIN_PREG},
+ {0x0102, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x0002, SIERRA_RXBUFFER_RCDFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_cmn_vals = {
+ .reg_pairs = qsgmii_100_no_ssc_plllc1_cmn_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_cmn_regs),
+};
+
+static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_ln_vals = {
+ .reg_pairs = qsgmii_100_no_ssc_plllc1_ln_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_ln_regs),
+};
+
+/* PCIE PHY PCS common configuration */
+static struct cdns_reg_pairs pcie_phy_pcs_cmn_regs[] = {
+ {0x0430, SIERRA_PHY_PIPE_CMN_CTRL1}
+};
+
+static struct cdns_sierra_vals pcie_phy_pcs_cmn_vals = {
+ .reg_pairs = pcie_phy_pcs_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_phy_pcs_cmn_regs),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_no_ssc, pcie_links_using_plllc, pipe_bw_3 */
+static const struct cdns_reg_pairs pcie_100_no_ssc_plllc_cmn_regs[] = {
+ {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG}
+};
+
+/*
+ * refclk100MHz_32b_PCIe_ln_no_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ml_pcie_100_no_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_no_ssc_plllc_cmn_vals = {
+ .reg_pairs = pcie_100_no_ssc_plllc_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_100_no_ssc_plllc_cmn_regs),
+};
+
+static struct cdns_sierra_vals ml_pcie_100_no_ssc_ln_vals = {
+ .reg_pairs = ml_pcie_100_no_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ml_pcie_100_no_ssc_ln_regs),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_int_ssc, pcie_links_using_plllc, pipe_bw_3 */
+static const struct cdns_reg_pairs pcie_100_int_ssc_plllc_cmn_regs[] = {
+ {0x000E, SIERRA_CMN_PLLLC_MODE_PREG},
+ {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x0581, SIERRA_CMN_PLLLC_DSMCORR_PREG},
+ {0x7F80, SIERRA_CMN_PLLLC_SS_PREG},
+ {0x0041, SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG},
+ {0x0464, SIERRA_CMN_PLLLC_SSTWOPT_PREG},
+ {0x0D0D, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG},
+ {0x0060, SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG}
+};
+
+/*
+ * refclk100MHz_32b_PCIe_ln_int_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ml_pcie_100_int_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_int_ssc_plllc_cmn_vals = {
+ .reg_pairs = pcie_100_int_ssc_plllc_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_100_int_ssc_plllc_cmn_regs),
+};
+
+static struct cdns_sierra_vals ml_pcie_100_int_ssc_ln_vals = {
+ .reg_pairs = ml_pcie_100_int_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ml_pcie_100_int_ssc_ln_regs),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc, pcie_links_using_plllc, pipe_bw_3 */
+static const struct cdns_reg_pairs pcie_100_ext_ssc_plllc_cmn_regs[] = {
+ {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x1B1B, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}
+};
+
+/*
+ * refclk100MHz_32b_PCIe_ln_ext_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ml_pcie_100_ext_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_ext_ssc_plllc_cmn_vals = {
+ .reg_pairs = pcie_100_ext_ssc_plllc_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_100_ext_ssc_plllc_cmn_regs),
+};
+
+static struct cdns_sierra_vals ml_pcie_100_ext_ssc_ln_vals = {
+ .reg_pairs = ml_pcie_100_ext_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ml_pcie_100_ext_ssc_ln_regs),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_no_ssc */
+static const struct cdns_reg_pairs cdns_pcie_cmn_regs_no_ssc[] = {
+ {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG}
+};
+
+/* refclk100MHz_32b_PCIe_ln_no_ssc */
+static const struct cdns_reg_pairs cdns_pcie_ln_regs_no_ssc[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_no_ssc_cmn_vals = {
+ .reg_pairs = cdns_pcie_cmn_regs_no_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_no_ssc),
+};
+
+static struct cdns_sierra_vals pcie_100_no_ssc_ln_vals = {
+ .reg_pairs = cdns_pcie_ln_regs_no_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_no_ssc),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_int_ssc */
+static const struct cdns_reg_pairs cdns_pcie_cmn_regs_int_ssc[] = {
+ {0x000E, SIERRA_CMN_PLLLC_MODE_PREG},
+ {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x0581, SIERRA_CMN_PLLLC_DSMCORR_PREG},
+ {0x7F80, SIERRA_CMN_PLLLC_SS_PREG},
+ {0x0041, SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG},
+ {0x0464, SIERRA_CMN_PLLLC_SSTWOPT_PREG},
+ {0x0D0D, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG},
+ {0x0060, SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG}
+};
+
+/* refclk100MHz_32b_PCIe_ln_int_ssc */
+static const struct cdns_reg_pairs cdns_pcie_ln_regs_int_ssc[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_int_ssc_cmn_vals = {
+ .reg_pairs = cdns_pcie_cmn_regs_int_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_int_ssc),
+};
+
+static struct cdns_sierra_vals pcie_100_int_ssc_ln_vals = {
+ .reg_pairs = cdns_pcie_ln_regs_int_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_int_ssc),
+};
+
/* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc */
static const struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = {
{0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
@@ -1002,13 +1952,62 @@ static const struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = {
/* refclk100MHz_32b_PCIe_ln_ext_ssc */
static const struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
{0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
{0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
{0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
- {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG}
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_ext_ssc_cmn_vals = {
+ .reg_pairs = cdns_pcie_cmn_regs_ext_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
+};
+
+static struct cdns_sierra_vals pcie_100_ext_ssc_ln_vals = {
+ .reg_pairs = cdns_pcie_ln_regs_ext_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
};
/* refclk100MHz_20b_USB_cmn_pll_ext_ssc */
@@ -1118,32 +2117,167 @@ static const struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0x4243, SIERRA_RXBUFFER_DFECTRL_PREG}
};
+static struct cdns_sierra_vals usb_100_ext_ssc_cmn_vals = {
+ .reg_pairs = cdns_usb_cmn_regs_ext_ssc,
+ .num_regs = ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
+};
+
+static struct cdns_sierra_vals usb_100_ext_ssc_ln_vals = {
+ .reg_pairs = cdns_usb_ln_regs_ext_ssc,
+ .num_regs = ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
+};
+
static const struct cdns_sierra_data cdns_map_sierra = {
- SIERRA_MACRO_ID,
- 0x2,
- 0x2,
- ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
- ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
- ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
- ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
- cdns_pcie_cmn_regs_ext_ssc,
- cdns_pcie_ln_regs_ext_ssc,
- cdns_usb_cmn_regs_ext_ssc,
- cdns_usb_ln_regs_ext_ssc,
+ .id_value = SIERRA_MACRO_ID,
+ .block_offset_shift = 0x2,
+ .reg_offset_shift = 0x2,
+ .pcs_cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
+ },
+ },
+ .pma_cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [EXTERNAL_SSC] = &usb_100_ext_ssc_cmn_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ },
+ },
+ },
+ .pma_ln_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &ml_pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [EXTERNAL_SSC] = &usb_100_ext_ssc_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ },
+ },
+ },
};
static const struct cdns_sierra_data cdns_ti_map_sierra = {
- SIERRA_MACRO_ID,
- 0x0,
- 0x1,
- ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
- ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
- ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
- ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
- cdns_pcie_cmn_regs_ext_ssc,
- cdns_pcie_ln_regs_ext_ssc,
- cdns_usb_cmn_regs_ext_ssc,
- cdns_usb_ln_regs_ext_ssc,
+ .id_value = SIERRA_MACRO_ID,
+ .block_offset_shift = 0x0,
+ .reg_offset_shift = 0x1,
+ .pcs_cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
+ },
+ },
+ .phy_pma_ln_vals = {
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_phy_pma_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_phy_pma_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_phy_pma_ln_vals,
+ },
+ },
+ },
+ .pma_cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [EXTERNAL_SSC] = &usb_100_ext_ssc_cmn_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ },
+ },
+ },
+ .pma_ln_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &ml_pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [EXTERNAL_SSC] = &usb_100_ext_ssc_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ },
+ },
+ },
};
static const struct of_device_id cdns_sierra_id_table[] = {
diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c
index 5786166133d3..7c4b8050485f 100644
--- a/drivers/phy/cadence/phy-cadence-torrent.c
+++ b/drivers/phy/cadence/phy-cadence-torrent.c
@@ -2278,7 +2278,7 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals;
enum cdns_torrent_ref_clk ref_clk = cdns_phy->ref_clk_rate;
struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals;
- enum cdns_torrent_phy_type phy_t1, phy_t2, tmp_phy_type;
+ enum cdns_torrent_phy_type phy_t1, phy_t2;
struct cdns_torrent_vals *pcs_cmn_vals;
int i, j, node, mlane, num_lanes, ret;
struct cdns_reg_pairs *reg_pairs;
@@ -2304,9 +2304,7 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
* configure the PHY for second link with phy_t2.
* Get the array values as [phy_t2][phy_t1][ssc].
*/
- tmp_phy_type = phy_t1;
- phy_t1 = phy_t2;
- phy_t2 = tmp_phy_type;
+ swap(phy_t1, phy_t2);
}
mlane = cdns_phy->phys[node].mlane;
diff --git a/drivers/phy/freescale/Kconfig b/drivers/phy/freescale/Kconfig
index 320630ffe3cd..c3669c28ea9f 100644
--- a/drivers/phy/freescale/Kconfig
+++ b/drivers/phy/freescale/Kconfig
@@ -14,3 +14,11 @@ config PHY_MIXEL_MIPI_DPHY
help
Enable this to add support for the Mixel DSI PHY as found
on NXP's i.MX8 family of SOCs.
+
+config PHY_FSL_IMX8M_PCIE
+ tristate "Freescale i.MX8M PCIE PHY"
+ depends on OF && HAS_IOMEM
+ select GENERIC_PHY
+ help
+ Enable this to add support for the PCIE PHY as found on
+ i.MX8M family of SOCs.
diff --git a/drivers/phy/freescale/Makefile b/drivers/phy/freescale/Makefile
index 1d02e3869b45..55d07c742ab0 100644
--- a/drivers/phy/freescale/Makefile
+++ b/drivers/phy/freescale/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_PHY_FSL_IMX8MQ_USB) += phy-fsl-imx8mq-usb.o
obj-$(CONFIG_PHY_MIXEL_MIPI_DPHY) += phy-fsl-imx8-mipi-dphy.o
+obj-$(CONFIG_PHY_FSL_IMX8M_PCIE) += phy-fsl-imx8m-pcie.o
diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
new file mode 100644
index 000000000000..04b1aafb29f4
--- /dev/null
+++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2021 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <dt-bindings/phy/phy-imx8-pcie.h>
+
+#define IMX8MM_PCIE_PHY_CMN_REG061 0x184
+#define ANA_PLL_CLK_OUT_TO_EXT_IO_EN BIT(0)
+#define IMX8MM_PCIE_PHY_CMN_REG062 0x188
+#define ANA_PLL_CLK_OUT_TO_EXT_IO_SEL BIT(3)
+#define IMX8MM_PCIE_PHY_CMN_REG063 0x18C
+#define AUX_PLL_REFCLK_SEL_SYS_PLL GENMASK(7, 6)
+#define IMX8MM_PCIE_PHY_CMN_REG064 0x190
+#define ANA_AUX_RX_TX_SEL_TX BIT(7)
+#define ANA_AUX_RX_TERM_GND_EN BIT(3)
+#define ANA_AUX_TX_TERM BIT(2)
+#define IMX8MM_PCIE_PHY_CMN_REG065 0x194
+#define ANA_AUX_RX_TERM (BIT(7) | BIT(4))
+#define ANA_AUX_TX_LVL GENMASK(3, 0)
+#define IMX8MM_PCIE_PHY_CMN_REG75 0x1D4
+#define PCIE_PHY_CMN_REG75_PLL_DONE 0x3
+#define PCIE_PHY_TRSV_REG5 0x414
+#define PCIE_PHY_TRSV_REG5_GEN1_DEEMP 0x2D
+#define PCIE_PHY_TRSV_REG6 0x418
+#define PCIE_PHY_TRSV_REG6_GEN2_DEEMP 0xF
+
+#define IMX8MM_GPR_PCIE_REF_CLK_SEL GENMASK(25, 24)
+#define IMX8MM_GPR_PCIE_REF_CLK_PLL FIELD_PREP(IMX8MM_GPR_PCIE_REF_CLK_SEL, 0x3)
+#define IMX8MM_GPR_PCIE_REF_CLK_EXT FIELD_PREP(IMX8MM_GPR_PCIE_REF_CLK_SEL, 0x2)
+#define IMX8MM_GPR_PCIE_AUX_EN BIT(19)
+#define IMX8MM_GPR_PCIE_CMN_RST BIT(18)
+#define IMX8MM_GPR_PCIE_POWER_OFF BIT(17)
+#define IMX8MM_GPR_PCIE_SSC_EN BIT(16)
+#define IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE BIT(9)
+
+struct imx8_pcie_phy {
+ void __iomem *base;
+ struct clk *clk;
+ struct phy *phy;
+ struct regmap *iomuxc_gpr;
+ struct reset_control *reset;
+ u32 refclk_pad_mode;
+ u32 tx_deemph_gen1;
+ u32 tx_deemph_gen2;
+ bool clkreq_unused;
+};
+
+static int imx8_pcie_phy_init(struct phy *phy)
+{
+ int ret;
+ u32 val, pad_mode;
+ struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy);
+
+ reset_control_assert(imx8_phy->reset);
+
+ pad_mode = imx8_phy->refclk_pad_mode;
+ /* Set AUX_EN_OVERRIDE 1'b0, when the CLKREQ# isn't hooked */
+ regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+ IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE,
+ imx8_phy->clkreq_unused ?
+ 0 : IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE);
+ regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+ IMX8MM_GPR_PCIE_AUX_EN,
+ IMX8MM_GPR_PCIE_AUX_EN);
+ regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+ IMX8MM_GPR_PCIE_POWER_OFF, 0);
+ regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+ IMX8MM_GPR_PCIE_SSC_EN, 0);
+
+ regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+ IMX8MM_GPR_PCIE_REF_CLK_SEL,
+ pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT ?
+ IMX8MM_GPR_PCIE_REF_CLK_EXT :
+ IMX8MM_GPR_PCIE_REF_CLK_PLL);
+ usleep_range(100, 200);
+
+ /* Do the PHY common block reset */
+ regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+ IMX8MM_GPR_PCIE_CMN_RST,
+ IMX8MM_GPR_PCIE_CMN_RST);
+ usleep_range(200, 500);
+
+ if (pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT) {
+ /* Configure the pad as input */
+ val = readl(imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061);
+ writel(val & ~ANA_PLL_CLK_OUT_TO_EXT_IO_EN,
+ imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061);
+ } else if (pad_mode == IMX8_PCIE_REFCLK_PAD_OUTPUT) {
+ /* Configure the PHY to output the refclock via pad */
+ writel(ANA_PLL_CLK_OUT_TO_EXT_IO_EN,
+ imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061);
+ writel(ANA_PLL_CLK_OUT_TO_EXT_IO_SEL,
+ imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG062);
+ writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
+ imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG063);
+ val = ANA_AUX_RX_TX_SEL_TX | ANA_AUX_TX_TERM;
+ writel(val | ANA_AUX_RX_TERM_GND_EN,
+ imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG064);
+ writel(ANA_AUX_RX_TERM | ANA_AUX_TX_LVL,
+ imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG065);
+ }
+
+ /* Tune PHY de-emphasis setting to pass PCIe compliance. */
+ if (imx8_phy->tx_deemph_gen1)
+ writel(imx8_phy->tx_deemph_gen1,
+ imx8_phy->base + PCIE_PHY_TRSV_REG5);
+ if (imx8_phy->tx_deemph_gen2)
+ writel(imx8_phy->tx_deemph_gen2,
+ imx8_phy->base + PCIE_PHY_TRSV_REG6);
+
+ reset_control_deassert(imx8_phy->reset);
+
+ /* Polling to check the phy is ready or not. */
+ ret = readl_poll_timeout(imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG75,
+ val, val == PCIE_PHY_CMN_REG75_PLL_DONE,
+ 10, 20000);
+ return ret;
+}
+
+static int imx8_pcie_phy_power_on(struct phy *phy)
+{
+ struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy);
+
+ return clk_prepare_enable(imx8_phy->clk);
+}
+
+static int imx8_pcie_phy_power_off(struct phy *phy)
+{
+ struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy);
+
+ clk_disable_unprepare(imx8_phy->clk);
+
+ return 0;
+}
+
+static const struct phy_ops imx8_pcie_phy_ops = {
+ .init = imx8_pcie_phy_init,
+ .power_on = imx8_pcie_phy_power_on,
+ .power_off = imx8_pcie_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int imx8_pcie_phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct imx8_pcie_phy *imx8_phy;
+ struct resource *res;
+
+ imx8_phy = devm_kzalloc(dev, sizeof(*imx8_phy), GFP_KERNEL);
+ if (!imx8_phy)
+ return -ENOMEM;
+
+ /* get PHY refclk pad mode */
+ of_property_read_u32(np, "fsl,refclk-pad-mode",
+ &imx8_phy->refclk_pad_mode);
+
+ if (of_property_read_u32(np, "fsl,tx-deemph-gen1",
+ &imx8_phy->tx_deemph_gen1))
+ imx8_phy->tx_deemph_gen1 = 0;
+
+ if (of_property_read_u32(np, "fsl,tx-deemph-gen2",
+ &imx8_phy->tx_deemph_gen2))
+ imx8_phy->tx_deemph_gen2 = 0;
+
+ if (of_property_read_bool(np, "fsl,clkreq-unsupported"))
+ imx8_phy->clkreq_unused = true;
+ else
+ imx8_phy->clkreq_unused = false;
+
+ imx8_phy->clk = devm_clk_get(dev, "ref");
+ if (IS_ERR(imx8_phy->clk)) {
+ dev_err(dev, "failed to get imx pcie phy clock\n");
+ return PTR_ERR(imx8_phy->clk);
+ }
+
+ /* Grab GPR config register range */
+ imx8_phy->iomuxc_gpr =
+ syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+ if (IS_ERR(imx8_phy->iomuxc_gpr)) {
+ dev_err(dev, "unable to find iomuxc registers\n");
+ return PTR_ERR(imx8_phy->iomuxc_gpr);
+ }
+
+ imx8_phy->reset = devm_reset_control_get_exclusive(dev, "pciephy");
+ if (IS_ERR(imx8_phy->reset)) {
+ dev_err(dev, "Failed to get PCIEPHY reset control\n");
+ return PTR_ERR(imx8_phy->reset);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ imx8_phy->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(imx8_phy->base))
+ return PTR_ERR(imx8_phy->base);
+
+ imx8_phy->phy = devm_phy_create(dev, NULL, &imx8_pcie_phy_ops);
+ if (IS_ERR(imx8_phy->phy))
+ return PTR_ERR(imx8_phy->phy);
+
+ phy_set_drvdata(imx8_phy->phy, imx8_phy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id imx8_pcie_phy_of_match[] = {
+ {.compatible = "fsl,imx8mm-pcie-phy",},
+ { },
+};
+MODULE_DEVICE_TABLE(of, imx8_pcie_phy_of_match);
+
+static struct platform_driver imx8_pcie_phy_driver = {
+ .probe = imx8_pcie_phy_probe,
+ .driver = {
+ .name = "imx8-pcie-phy",
+ .of_match_table = imx8_pcie_phy_of_match,
+ }
+};
+module_platform_driver(imx8_pcie_phy_driver);
+
+MODULE_DESCRIPTION("FSL IMX8 PCIE PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/intel/Kconfig b/drivers/phy/intel/Kconfig
index ac42bb2fb394..18a3cc5b98c0 100644
--- a/drivers/phy/intel/Kconfig
+++ b/drivers/phy/intel/Kconfig
@@ -46,3 +46,13 @@ config PHY_INTEL_LGM_EMMC
select GENERIC_PHY
help
Enable this to support the Intel EMMC PHY
+
+config PHY_INTEL_THUNDERBAY_EMMC
+ tristate "Intel Thunder Bay eMMC PHY driver"
+ depends on OF && (ARCH_THUNDERBAY || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ This option enables support for Intel Thunder Bay SoC eMMC PHY.
+
+ To compile this driver as a module, choose M here: the module
+ will be called phy-intel-thunderbay-emmc.ko.
diff --git a/drivers/phy/intel/Makefile b/drivers/phy/intel/Makefile
index 14550981a707..b7321d56b0bb 100644
--- a/drivers/phy/intel/Makefile
+++ b/drivers/phy/intel/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_PHY_INTEL_KEEMBAY_EMMC) += phy-intel-keembay-emmc.o
obj-$(CONFIG_PHY_INTEL_KEEMBAY_USB) += phy-intel-keembay-usb.o
obj-$(CONFIG_PHY_INTEL_LGM_COMBO) += phy-intel-lgm-combo.o
obj-$(CONFIG_PHY_INTEL_LGM_EMMC) += phy-intel-lgm-emmc.o
+obj-$(CONFIG_PHY_INTEL_THUNDERBAY_EMMC) += phy-intel-thunderbay-emmc.o
diff --git a/drivers/phy/intel/phy-intel-thunderbay-emmc.c b/drivers/phy/intel/phy-intel-thunderbay-emmc.c
new file mode 100644
index 000000000000..593f6970b81e
--- /dev/null
+++ b/drivers/phy/intel/phy-intel-thunderbay-emmc.c
@@ -0,0 +1,509 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel ThunderBay eMMC PHY driver
+ *
+ * Copyright (C) 2021 Intel Corporation
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+/* eMMC/SD/SDIO core/phy configuration registers */
+#define CTRL_CFG_0 0x00
+#define CTRL_CFG_1 0x04
+#define CTRL_PRESET_0 0x08
+#define CTRL_PRESET_1 0x0c
+#define CTRL_PRESET_2 0x10
+#define CTRL_PRESET_3 0x14
+#define CTRL_PRESET_4 0x18
+#define CTRL_CFG_2 0x1c
+#define CTRL_CFG_3 0x20
+#define PHY_CFG_0 0x24
+#define PHY_CFG_1 0x28
+#define PHY_CFG_2 0x2c
+#define PHYBIST_CTRL 0x30
+#define SDHC_STAT3 0x34
+#define PHY_STAT 0x38
+#define PHYBIST_STAT_0 0x3c
+#define PHYBIST_STAT_1 0x40
+#define EMMC_AXI 0x44
+
+/* CTRL_PRESET_3 */
+#define CTRL_PRESET3_MASK GENMASK(31, 0)
+#define CTRL_PRESET3_SHIFT 0
+
+/* CTRL_CFG_0 bit fields */
+#define SUPPORT_HS_MASK BIT(26)
+#define SUPPORT_HS_SHIFT 26
+
+#define SUPPORT_8B_MASK BIT(24)
+#define SUPPORT_8B_SHIFT 24
+
+/* CTRL_CFG_1 bit fields */
+#define SUPPORT_SDR50_MASK BIT(28)
+#define SUPPORT_SDR50_SHIFT 28
+#define SLOT_TYPE_MASK GENMASK(27, 26)
+#define SLOT_TYPE_OFFSET 26
+#define SUPPORT_64B_MASK BIT(24)
+#define SUPPORT_64B_SHIFT 24
+#define SUPPORT_HS400_MASK BIT(2)
+#define SUPPORT_HS400_SHIFT 2
+#define SUPPORT_DDR50_MASK BIT(1)
+#define SUPPORT_DDR50_SHIFT 1
+#define SUPPORT_SDR104_MASK BIT(0)
+#define SUPPORT_SDR104_SHIFT 0
+
+/* PHY_CFG_0 bit fields */
+#define SEL_DLY_TXCLK_MASK BIT(29)
+#define SEL_DLY_TXCLK_SHIFT 29
+#define SEL_DLY_RXCLK_MASK BIT(28)
+#define SEL_DLY_RXCLK_SHIFT 28
+
+#define OTAP_DLY_ENA_MASK BIT(27)
+#define OTAP_DLY_ENA_SHIFT 27
+#define OTAP_DLY_SEL_MASK GENMASK(26, 23)
+#define OTAP_DLY_SEL_SHIFT 23
+#define ITAP_CHG_WIN_MASK BIT(22)
+#define ITAP_CHG_WIN_SHIFT 22
+#define ITAP_DLY_ENA_MASK BIT(21)
+#define ITAP_DLY_ENA_SHIFT 21
+#define ITAP_DLY_SEL_MASK GENMASK(20, 16)
+#define ITAP_DLY_SEL_SHIFT 16
+#define RET_ENB_MASK BIT(15)
+#define RET_ENB_SHIFT 15
+#define RET_EN_MASK BIT(14)
+#define RET_EN_SHIFT 14
+#define DLL_IFF_MASK GENMASK(13, 11)
+#define DLL_IFF_SHIFT 11
+#define DLL_EN_MASK BIT(10)
+#define DLL_EN_SHIFT 10
+#define DLL_TRIM_ICP_MASK GENMASK(9, 6)
+#define DLL_TRIM_ICP_SHIFT 6
+#define RETRIM_EN_MASK BIT(5)
+#define RETRIM_EN_SHIFT 5
+#define RETRIM_MASK BIT(4)
+#define RETRIM_SHIFT 4
+#define DR_TY_MASK GENMASK(3, 1)
+#define DR_TY_SHIFT 1
+#define PWR_DOWN_MASK BIT(0)
+#define PWR_DOWN_SHIFT 0
+
+/* PHY_CFG_1 bit fields */
+#define REN_DAT_MASK GENMASK(19, 12)
+#define REN_DAT_SHIFT 12
+#define REN_CMD_MASK BIT(11)
+#define REN_CMD_SHIFT 11
+#define REN_STRB_MASK BIT(10)
+#define REN_STRB_SHIFT 10
+#define PU_STRB_MASK BIT(20)
+#define PU_STRB_SHIFT 20
+
+/* PHY_CFG_2 bit fields */
+#define CLKBUF_MASK GENMASK(24, 21)
+#define CLKBUF_SHIFT 21
+#define SEL_STRB_MASK GENMASK(20, 13)
+#define SEL_STRB_SHIFT 13
+#define SEL_FREQ_MASK GENMASK(12, 10)
+#define SEL_FREQ_SHIFT 10
+
+/* PHY_STAT bit fields */
+#define CAL_DONE BIT(6)
+#define DLL_RDY BIT(5)
+
+#define OTAP_DLY 0x0
+#define ITAP_DLY 0x0
+#define STRB 0x33
+
+/* From ACS_eMMC51_16nFFC_RO1100_Userguide_v1p0.pdf p17 */
+#define FREQSEL_200M_170M 0x0
+#define FREQSEL_170M_140M 0x1
+#define FREQSEL_140M_110M 0x2
+#define FREQSEL_110M_80M 0x3
+#define FREQSEL_80M_50M 0x4
+#define FREQSEL_275M_250M 0x5
+#define FREQSEL_250M_225M 0x6
+#define FREQSEL_225M_200M 0x7
+
+/* Phy power status */
+#define PHY_UNINITIALIZED 0
+#define PHY_INITIALIZED 1
+
+/*
+ * During init(400KHz) phy_settings will be called with 200MHZ clock
+ * To avoid incorrectly setting the phy for init(400KHZ) "phy_power_sts" is used.
+ * When actual clock is set always phy is powered off once and then powered on.
+ * (sdhci_arasan_set_clock). That feature will be used to identify whether the
+ * settings are for init phy_power_on or actual clock phy_power_on
+ * 0 --> init settings
+ * 1 --> actual settings
+ */
+
+struct thunderbay_emmc_phy {
+ void __iomem *reg_base;
+ struct clk *emmcclk;
+ int phy_power_sts;
+};
+
+static inline void update_reg(struct thunderbay_emmc_phy *tbh_phy, u32 offset,
+ u32 mask, u32 shift, u32 val)
+{
+ u32 tmp;
+
+ tmp = readl(tbh_phy->reg_base + offset);
+ tmp &= ~mask;
+ tmp |= val << shift;
+ writel(tmp, tbh_phy->reg_base + offset);
+}
+
+static int thunderbay_emmc_phy_power(struct phy *phy, bool power_on)
+{
+ struct thunderbay_emmc_phy *tbh_phy = phy_get_drvdata(phy);
+ unsigned int freqsel = FREQSEL_200M_170M;
+ unsigned long rate;
+ static int lock;
+ u32 val;
+ int ret;
+
+ /* Disable DLL */
+ rate = clk_get_rate(tbh_phy->emmcclk);
+ switch (rate) {
+ case 200000000:
+ /* lock dll only when it is used, i.e only if SEL_DLY_TXCLK/RXCLK are 0 */
+ update_reg(tbh_phy, PHY_CFG_0, DLL_EN_MASK, DLL_EN_SHIFT, 0x0);
+ break;
+
+ /* dll lock not required for other frequencies */
+ case 50000000 ... 52000000:
+ case 400000:
+ default:
+ break;
+ }
+
+ if (!power_on)
+ return 0;
+
+ rate = clk_get_rate(tbh_phy->emmcclk);
+ switch (rate) {
+ case 170000001 ... 200000000:
+ freqsel = FREQSEL_200M_170M;
+ break;
+
+ case 140000001 ... 170000000:
+ freqsel = FREQSEL_170M_140M;
+ break;
+
+ case 110000001 ... 140000000:
+ freqsel = FREQSEL_140M_110M;
+ break;
+
+ case 80000001 ... 110000000:
+ freqsel = FREQSEL_110M_80M;
+ break;
+
+ case 50000000 ... 80000000:
+ freqsel = FREQSEL_80M_50M;
+ break;
+
+ case 250000001 ... 275000000:
+ freqsel = FREQSEL_275M_250M;
+ break;
+
+ case 225000001 ... 250000000:
+ freqsel = FREQSEL_250M_225M;
+ break;
+
+ case 200000001 ... 225000000:
+ freqsel = FREQSEL_225M_200M;
+ break;
+ default:
+ break;
+ }
+ /* Clock rate is checked against upper limit. It may fall low during init */
+ if (rate > 200000000)
+ dev_warn(&phy->dev, "Unsupported rate: %lu\n", rate);
+
+ udelay(5);
+
+ if (lock == 0) {
+ /* PDB will be done only once per boot */
+ update_reg(tbh_phy, PHY_CFG_0, PWR_DOWN_MASK,
+ PWR_DOWN_SHIFT, 0x1);
+ lock = 1;
+ /*
+ * According to the user manual, it asks driver to wait 5us for
+ * calpad busy trimming. However it is documented that this value is
+ * PVT(A.K.A. process, voltage and temperature) relevant, so some
+ * failure cases are found which indicates we should be more tolerant
+ * to calpad busy trimming.
+ */
+ ret = readl_poll_timeout(tbh_phy->reg_base + PHY_STAT,
+ val, (val & CAL_DONE), 10, 50);
+ if (ret) {
+ dev_err(&phy->dev, "caldone failed, ret=%d\n", ret);
+ return ret;
+ }
+ }
+ rate = clk_get_rate(tbh_phy->emmcclk);
+ switch (rate) {
+ case 200000000:
+ /* Set frequency of the DLL operation */
+ update_reg(tbh_phy, PHY_CFG_2, SEL_FREQ_MASK, SEL_FREQ_SHIFT, freqsel);
+
+ /* Enable DLL */
+ update_reg(tbh_phy, PHY_CFG_0, DLL_EN_MASK, DLL_EN_SHIFT, 0x1);
+
+ /*
+ * After enabling analog DLL circuits docs say that we need 10.2 us if
+ * our source clock is at 50 MHz and that lock time scales linearly
+ * with clock speed. If we are powering on the PHY and the card clock
+ * is super slow (like 100kHz) this could take as long as 5.1 ms as
+ * per the math: 10.2 us * (50000000 Hz / 100000 Hz) => 5.1 ms
+ * hopefully we won't be running at 100 kHz, but we should still make
+ * sure we wait long enough.
+ *
+ * NOTE: There appear to be corner cases where the DLL seems to take
+ * extra long to lock for reasons that aren't understood. In some
+ * extreme cases we've seen it take up to over 10ms (!). We'll be
+ * generous and give it 50ms.
+ */
+ ret = readl_poll_timeout(tbh_phy->reg_base + PHY_STAT,
+ val, (val & DLL_RDY), 10, 50 * USEC_PER_MSEC);
+ if (ret) {
+ dev_err(&phy->dev, "dllrdy failed, ret=%d\n", ret);
+ return ret;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int thunderbay_emmc_phy_init(struct phy *phy)
+{
+ struct thunderbay_emmc_phy *tbh_phy = phy_get_drvdata(phy);
+
+ tbh_phy->emmcclk = clk_get(&phy->dev, "emmcclk");
+
+ return PTR_ERR_OR_ZERO(tbh_phy->emmcclk);
+}
+
+static int thunderbay_emmc_phy_exit(struct phy *phy)
+{
+ struct thunderbay_emmc_phy *tbh_phy = phy_get_drvdata(phy);
+
+ clk_put(tbh_phy->emmcclk);
+
+ return 0;
+}
+
+static int thunderbay_emmc_phy_power_on(struct phy *phy)
+{
+ struct thunderbay_emmc_phy *tbh_phy = phy_get_drvdata(phy);
+ unsigned long rate;
+
+ /* Overwrite capability bits configurable in bootloader */
+ update_reg(tbh_phy, CTRL_CFG_0,
+ SUPPORT_HS_MASK, SUPPORT_HS_SHIFT, 0x1);
+ update_reg(tbh_phy, CTRL_CFG_0,
+ SUPPORT_8B_MASK, SUPPORT_8B_SHIFT, 0x1);
+ update_reg(tbh_phy, CTRL_CFG_1,
+ SUPPORT_SDR50_MASK, SUPPORT_SDR50_SHIFT, 0x1);
+ update_reg(tbh_phy, CTRL_CFG_1,
+ SUPPORT_DDR50_MASK, SUPPORT_DDR50_SHIFT, 0x1);
+ update_reg(tbh_phy, CTRL_CFG_1,
+ SUPPORT_SDR104_MASK, SUPPORT_SDR104_SHIFT, 0x1);
+ update_reg(tbh_phy, CTRL_CFG_1,
+ SUPPORT_HS400_MASK, SUPPORT_HS400_SHIFT, 0x1);
+ update_reg(tbh_phy, CTRL_CFG_1,
+ SUPPORT_64B_MASK, SUPPORT_64B_SHIFT, 0x1);
+
+ if (tbh_phy->phy_power_sts == PHY_UNINITIALIZED) {
+ /* Indicates initialization, settings for init, same as 400KHZ setting */
+ update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_TXCLK_MASK, SEL_DLY_TXCLK_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_RXCLK_MASK, SEL_DLY_RXCLK_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_ENA_MASK, ITAP_DLY_ENA_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_SEL_MASK, ITAP_DLY_SEL_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_ENA_MASK, OTAP_DLY_ENA_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_SEL_MASK, OTAP_DLY_SEL_SHIFT, 0);
+ update_reg(tbh_phy, PHY_CFG_0, DLL_TRIM_ICP_MASK, DLL_TRIM_ICP_SHIFT, 0);
+ update_reg(tbh_phy, PHY_CFG_0, DR_TY_MASK, DR_TY_SHIFT, 0x1);
+
+ } else if (tbh_phy->phy_power_sts == PHY_INITIALIZED) {
+ /* Indicates actual clock setting */
+ rate = clk_get_rate(tbh_phy->emmcclk);
+ switch (rate) {
+ case 200000000:
+ update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_TXCLK_MASK,
+ SEL_DLY_TXCLK_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_RXCLK_MASK,
+ SEL_DLY_RXCLK_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_ENA_MASK,
+ ITAP_DLY_ENA_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_SEL_MASK,
+ ITAP_DLY_SEL_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_ENA_MASK,
+ OTAP_DLY_ENA_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_SEL_MASK,
+ OTAP_DLY_SEL_SHIFT, 2);
+ update_reg(tbh_phy, PHY_CFG_0, DLL_TRIM_ICP_MASK,
+ DLL_TRIM_ICP_SHIFT, 0x8);
+ update_reg(tbh_phy, PHY_CFG_0, DR_TY_MASK,
+ DR_TY_SHIFT, 0x1);
+ /* For HS400 only */
+ update_reg(tbh_phy, PHY_CFG_2, SEL_STRB_MASK,
+ SEL_STRB_SHIFT, STRB);
+ break;
+
+ case 50000000 ... 52000000:
+ /* For both HS and DDR52 this setting works */
+ update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_TXCLK_MASK,
+ SEL_DLY_TXCLK_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_RXCLK_MASK,
+ SEL_DLY_RXCLK_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_ENA_MASK,
+ ITAP_DLY_ENA_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_SEL_MASK,
+ ITAP_DLY_SEL_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_ENA_MASK,
+ OTAP_DLY_ENA_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_SEL_MASK,
+ OTAP_DLY_SEL_SHIFT, 4);
+ update_reg(tbh_phy, PHY_CFG_0, DLL_TRIM_ICP_MASK,
+ DLL_TRIM_ICP_SHIFT, 0x8);
+ update_reg(tbh_phy, PHY_CFG_0,
+ DR_TY_MASK, DR_TY_SHIFT, 0x1);
+ break;
+
+ case 400000:
+ update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_TXCLK_MASK,
+ SEL_DLY_TXCLK_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_RXCLK_MASK,
+ SEL_DLY_RXCLK_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_ENA_MASK,
+ ITAP_DLY_ENA_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_SEL_MASK,
+ ITAP_DLY_SEL_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_ENA_MASK,
+ OTAP_DLY_ENA_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_SEL_MASK,
+ OTAP_DLY_SEL_SHIFT, 0);
+ update_reg(tbh_phy, PHY_CFG_0, DLL_TRIM_ICP_MASK,
+ DLL_TRIM_ICP_SHIFT, 0);
+ update_reg(tbh_phy, PHY_CFG_0, DR_TY_MASK, DR_TY_SHIFT, 0x1);
+ break;
+
+ default:
+ update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_TXCLK_MASK,
+ SEL_DLY_TXCLK_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_RXCLK_MASK,
+ SEL_DLY_RXCLK_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_ENA_MASK,
+ ITAP_DLY_ENA_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_SEL_MASK,
+ ITAP_DLY_SEL_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_ENA_MASK,
+ OTAP_DLY_ENA_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_SEL_MASK,
+ OTAP_DLY_SEL_SHIFT, 2);
+ update_reg(tbh_phy, PHY_CFG_0, DLL_TRIM_ICP_MASK,
+ DLL_TRIM_ICP_SHIFT, 0x8);
+ update_reg(tbh_phy, PHY_CFG_0, DR_TY_MASK,
+ DR_TY_SHIFT, 0x1);
+ break;
+ }
+ /* Reset, init seq called without phy_power_off, this indicates init seq */
+ tbh_phy->phy_power_sts = PHY_UNINITIALIZED;
+ }
+
+ update_reg(tbh_phy, PHY_CFG_0, RETRIM_EN_MASK, RETRIM_EN_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, RETRIM_MASK, RETRIM_SHIFT, 0x0);
+
+ return thunderbay_emmc_phy_power(phy, 1);
+}
+
+static int thunderbay_emmc_phy_power_off(struct phy *phy)
+{
+ struct thunderbay_emmc_phy *tbh_phy = phy_get_drvdata(phy);
+
+ tbh_phy->phy_power_sts = PHY_INITIALIZED;
+
+ return thunderbay_emmc_phy_power(phy, 0);
+}
+
+static const struct phy_ops thunderbay_emmc_phy_ops = {
+ .init = thunderbay_emmc_phy_init,
+ .exit = thunderbay_emmc_phy_exit,
+ .power_on = thunderbay_emmc_phy_power_on,
+ .power_off = thunderbay_emmc_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id thunderbay_emmc_phy_of_match[] = {
+ { .compatible = "intel,thunderbay-emmc-phy",
+ (void *)&thunderbay_emmc_phy_ops },
+ {}
+};
+MODULE_DEVICE_TABLE(of, thunderbay_emmc_phy_of_match);
+
+static int thunderbay_emmc_phy_probe(struct platform_device *pdev)
+{
+ struct thunderbay_emmc_phy *tbh_phy;
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *id;
+ struct phy *generic_phy;
+ struct resource *res;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ tbh_phy = devm_kzalloc(dev, sizeof(*tbh_phy), GFP_KERNEL);
+ if (!tbh_phy)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tbh_phy->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tbh_phy->reg_base))
+ return PTR_ERR(tbh_phy->reg_base);
+
+ tbh_phy->phy_power_sts = PHY_UNINITIALIZED;
+ id = of_match_node(thunderbay_emmc_phy_of_match, pdev->dev.of_node);
+ if (!id) {
+ dev_err(dev, "failed to get match_node\n");
+ return -EINVAL;
+ }
+
+ generic_phy = devm_phy_create(dev, dev->of_node, id->data);
+ if (IS_ERR(generic_phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(generic_phy);
+ }
+
+ phy_set_drvdata(generic_phy, tbh_phy);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static struct platform_driver thunderbay_emmc_phy_driver = {
+ .probe = thunderbay_emmc_phy_probe,
+ .driver = {
+ .name = "thunderbay-emmc-phy",
+ .of_match_table = thunderbay_emmc_phy_of_match,
+ },
+};
+module_platform_driver(thunderbay_emmc_phy_driver);
+
+MODULE_AUTHOR("Nandhini S <nandhini.srikandan@intel.com>");
+MODULE_AUTHOR("Rashmi A <rashmi.a@intel.com>");
+MODULE_DESCRIPTION("Intel Thunder Bay eMMC PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index bbd6f2ad6f24..34672e868a1e 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -141,6 +141,7 @@
#define COMPHY_FW_SPEED_1250 0
#define COMPHY_FW_SPEED_3125 2
#define COMPHY_FW_SPEED_5000 3
+#define COMPHY_FW_SPEED_515625 4
#define COMPHY_FW_SPEED_103125 6
#define COMPHY_FW_PORT_OFFSET 8
#define COMPHY_FW_PORT_MASK GENMASK(11, 8)
@@ -220,6 +221,7 @@ static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = {
ETH_CONF(2, 0, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
ETH_CONF(2, 0, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_2500BASEX),
ETH_CONF(2, 0, PHY_INTERFACE_MODE_RXAUI, 0x1, COMPHY_FW_MODE_RXAUI),
+ ETH_CONF(2, 0, PHY_INTERFACE_MODE_5GBASER, 0x1, COMPHY_FW_MODE_XFI),
ETH_CONF(2, 0, PHY_INTERFACE_MODE_10GBASER, 0x1, COMPHY_FW_MODE_XFI),
GEN_CONF(2, 0, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
GEN_CONF(2, 0, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
@@ -234,6 +236,7 @@ static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = {
/* lane 4 */
ETH_CONF(4, 0, PHY_INTERFACE_MODE_SGMII, 0x2, COMPHY_FW_MODE_SGMII),
ETH_CONF(4, 0, PHY_INTERFACE_MODE_2500BASEX, 0x2, COMPHY_FW_MODE_2500BASEX),
+ ETH_CONF(4, 0, PHY_INTERFACE_MODE_5GBASER, 0x2, COMPHY_FW_MODE_XFI),
ETH_CONF(4, 0, PHY_INTERFACE_MODE_10GBASER, 0x2, COMPHY_FW_MODE_XFI),
ETH_CONF(4, 0, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI),
GEN_CONF(4, 0, PHY_MODE_USB_DEVICE_SS, COMPHY_FW_MODE_USB3D),
@@ -241,6 +244,7 @@ static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = {
GEN_CONF(4, 1, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
ETH_CONF(4, 1, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
ETH_CONF(4, 1, PHY_INTERFACE_MODE_2500BASEX, -1, COMPHY_FW_MODE_2500BASEX),
+ ETH_CONF(4, 1, PHY_INTERFACE_MODE_5GBASER, -1, COMPHY_FW_MODE_XFI),
ETH_CONF(4, 1, PHY_INTERFACE_MODE_10GBASER, -1, COMPHY_FW_MODE_XFI),
/* lane 5 */
ETH_CONF(5, 1, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI),
@@ -790,6 +794,11 @@ static int mvebu_comphy_power_on(struct phy *phy)
lane->id);
fw_speed = COMPHY_FW_SPEED_3125;
break;
+ case PHY_INTERFACE_MODE_5GBASER:
+ dev_dbg(priv->dev, "set lane %d to 5GBASE-R mode\n",
+ lane->id);
+ fw_speed = COMPHY_FW_SPEED_515625;
+ break;
case PHY_INTERFACE_MODE_10GBASER:
dev_dbg(priv->dev, "set lane %d to 10GBASE-R mode\n",
lane->id);
diff --git a/drivers/phy/mediatek/phy-mtk-io.h b/drivers/phy/mediatek/phy-mtk-io.h
new file mode 100644
index 000000000000..500fcdab165d
--- /dev/null
+++ b/drivers/phy/mediatek/phy-mtk-io.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ */
+
+#ifndef __PHY_MTK_H__
+#define __PHY_MTK_H__
+
+#include <linux/io.h>
+
+static inline void mtk_phy_clear_bits(void __iomem *reg, u32 bits)
+{
+ u32 tmp = readl(reg);
+
+ tmp &= ~bits;
+ writel(tmp, reg);
+}
+
+static inline void mtk_phy_set_bits(void __iomem *reg, u32 bits)
+{
+ u32 tmp = readl(reg);
+
+ tmp |= bits;
+ writel(tmp, reg);
+}
+
+static inline void mtk_phy_update_bits(void __iomem *reg, u32 mask, u32 val)
+{
+ u32 tmp = readl(reg);
+
+ tmp &= ~mask;
+ tmp |= val & mask;
+ writel(tmp, reg);
+}
+
+#endif
diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
index 28ad9403c441..67b005d5b9e3 100644
--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
+++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
@@ -146,6 +146,8 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
return -ENOMEM;
mipi_tx->driver_data = of_device_get_match_data(dev);
+ if (!mipi_tx->driver_data)
+ return -ENODEV;
mipi_tx->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mipi_tx->regs))
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index cdcef865fe9e..6d307102f4f6 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -8,16 +8,18 @@
#include <dt-bindings/phy/phy.h>
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include "phy-mtk-io.h"
+
/* version V1 sub-banks offset base address */
/* banks shared by multiple phys */
#define SSUSB_SIFSLV_V1_SPLLC 0x000 /* shared by u3 phys */
@@ -41,6 +43,9 @@
#define SSUSB_SIFSLV_V2_U3PHYD 0x200
#define SSUSB_SIFSLV_V2_U3PHYA 0x400
+#define U3P_MISC_REG1 0x04
+#define MR1_EFUSE_AUTO_LOAD_DIS BIT(6)
+
#define U3P_USBPHYACR0 0x000
#define PA0_RG_U2PLL_FORCE_ON BIT(15)
#define PA0_USB20_PLL_PREDIV GENMASK(7, 6)
@@ -133,6 +138,8 @@
#define P3C_RG_SWRST_U3_PHYD_FORCE_EN BIT(24)
#define U3P_U3_PHYA_REG0 0x000
+#define P3A_RG_IEXT_INTR GENMASK(15, 10)
+#define P3A_RG_IEXT_INTR_VAL(x) ((0x3f & (x)) << 10)
#define P3A_RG_CLKDRV_OFF GENMASK(3, 2)
#define P3A_RG_CLKDRV_OFF_VAL(x) ((0x3 & (x)) << 2)
@@ -187,6 +194,19 @@
#define P3D_RG_FWAKE_TH GENMASK(21, 16)
#define P3D_RG_FWAKE_TH_VAL(x) ((0x3f & (x)) << 16)
+#define U3P_U3_PHYD_IMPCAL0 0x010
+#define P3D_RG_FORCE_TX_IMPEL BIT(31)
+#define P3D_RG_TX_IMPEL GENMASK(28, 24)
+#define P3D_RG_TX_IMPEL_VAL(x) ((0x1f & (x)) << 24)
+
+#define U3P_U3_PHYD_IMPCAL1 0x014
+#define P3D_RG_FORCE_RX_IMPEL BIT(31)
+#define P3D_RG_RX_IMPEL GENMASK(28, 24)
+#define P3D_RG_RX_IMPEL_VAL(x) ((0x1f & (x)) << 24)
+
+#define U3P_U3_PHYD_RSV 0x054
+#define P3D_RG_EFUSE_AUTO_LOAD_DIS BIT(12)
+
#define U3P_U3_PHYD_CDR1 0x05c
#define P3D_RG_CDR_BIR_LTD1 GENMASK(28, 24)
#define P3D_RG_CDR_BIR_LTD1_VAL(x) ((0x1f & (x)) << 24)
@@ -307,6 +327,11 @@ struct mtk_phy_pdata {
* 48M PLL, fix it by switching PLL to 26M from default 48M
*/
bool sw_pll_48m_to_26m;
+ /*
+ * Some SoCs (e.g. mt8195) drop a bit when use auto load efuse,
+ * support sw way, also support it for v2/v3 optionally.
+ */
+ bool sw_efuse_supported;
enum mtk_phy_version version;
};
@@ -336,6 +361,10 @@ struct mtk_phy_instance {
struct regmap *type_sw;
u32 type_sw_reg;
u32 type_sw_index;
+ u32 efuse_sw_en;
+ u32 efuse_intr;
+ u32 efuse_tx_imp;
+ u32 efuse_rx_imp;
int eye_src;
int eye_vrt;
int eye_term;
@@ -373,15 +402,11 @@ static void hs_slew_rate_calibrate(struct mtk_tphy *tphy,
return;
/* enable USB ring oscillator */
- tmp = readl(com + U3P_USBPHYACR5);
- tmp |= PA5_RG_U2_HSTX_SRCAL_EN;
- writel(tmp, com + U3P_USBPHYACR5);
+ mtk_phy_set_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCAL_EN);
udelay(1);
/*enable free run clock */
- tmp = readl(fmreg + U3P_U2FREQ_FMMONR1);
- tmp |= P2F_RG_FRCK_EN;
- writel(tmp, fmreg + U3P_U2FREQ_FMMONR1);
+ mtk_phy_set_bits(fmreg + U3P_U2FREQ_FMMONR1, P2F_RG_FRCK_EN);
/* set cycle count as 1024, and select u2 channel */
tmp = readl(fmreg + U3P_U2FREQ_FMCR0);
@@ -393,9 +418,7 @@ static void hs_slew_rate_calibrate(struct mtk_tphy *tphy,
writel(tmp, fmreg + U3P_U2FREQ_FMCR0);
/* enable frequency meter */
- tmp = readl(fmreg + U3P_U2FREQ_FMCR0);
- tmp |= P2F_RG_FREQDET_EN;
- writel(tmp, fmreg + U3P_U2FREQ_FMCR0);
+ mtk_phy_set_bits(fmreg + U3P_U2FREQ_FMCR0, P2F_RG_FREQDET_EN);
/* ignore return value */
readl_poll_timeout(fmreg + U3P_U2FREQ_FMMONR1, tmp,
@@ -404,14 +427,10 @@ static void hs_slew_rate_calibrate(struct mtk_tphy *tphy,
fm_out = readl(fmreg + U3P_U2FREQ_VALUE);
/* disable frequency meter */
- tmp = readl(fmreg + U3P_U2FREQ_FMCR0);
- tmp &= ~P2F_RG_FREQDET_EN;
- writel(tmp, fmreg + U3P_U2FREQ_FMCR0);
+ mtk_phy_clear_bits(fmreg + U3P_U2FREQ_FMCR0, P2F_RG_FREQDET_EN);
/*disable free run clock */
- tmp = readl(fmreg + U3P_U2FREQ_FMMONR1);
- tmp &= ~P2F_RG_FRCK_EN;
- writel(tmp, fmreg + U3P_U2FREQ_FMMONR1);
+ mtk_phy_clear_bits(fmreg + U3P_U2FREQ_FMMONR1, P2F_RG_FRCK_EN);
if (fm_out) {
/* ( 1024 / FM_OUT ) x reference clock frequency x coef */
@@ -427,63 +446,44 @@ static void hs_slew_rate_calibrate(struct mtk_tphy *tphy,
tphy->src_ref_clk, tphy->src_coef);
/* set HS slew rate */
- tmp = readl(com + U3P_USBPHYACR5);
- tmp &= ~PA5_RG_U2_HSTX_SRCTRL;
- tmp |= PA5_RG_U2_HSTX_SRCTRL_VAL(calibration_val);
- writel(tmp, com + U3P_USBPHYACR5);
+ mtk_phy_update_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCTRL,
+ PA5_RG_U2_HSTX_SRCTRL_VAL(calibration_val));
/* disable USB ring oscillator */
- tmp = readl(com + U3P_USBPHYACR5);
- tmp &= ~PA5_RG_U2_HSTX_SRCAL_EN;
- writel(tmp, com + U3P_USBPHYACR5);
+ mtk_phy_clear_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCAL_EN);
}
static void u3_phy_instance_init(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct u3phy_banks *u3_banks = &instance->u3_banks;
- u32 tmp;
/* gating PCIe Analog XTAL clock */
- tmp = readl(u3_banks->spllc + U3P_SPLLC_XTALCTL3);
- tmp |= XC3_RG_U3_XTAL_RX_PWD | XC3_RG_U3_FRC_XTAL_RX_PWD;
- writel(tmp, u3_banks->spllc + U3P_SPLLC_XTALCTL3);
+ mtk_phy_set_bits(u3_banks->spllc + U3P_SPLLC_XTALCTL3,
+ XC3_RG_U3_XTAL_RX_PWD | XC3_RG_U3_FRC_XTAL_RX_PWD);
/* gating XSQ */
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG0);
- tmp &= ~P3A_RG_XTAL_EXT_EN_U3;
- tmp |= P3A_RG_XTAL_EXT_EN_U3_VAL(2);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG0);
-
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_REG9);
- tmp &= ~P3A_RG_RX_DAC_MUX;
- tmp |= P3A_RG_RX_DAC_MUX_VAL(4);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_REG9);
-
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_REG6);
- tmp &= ~P3A_RG_TX_EIDLE_CM;
- tmp |= P3A_RG_TX_EIDLE_CM_VAL(0xe);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_REG6);
-
- tmp = readl(u3_banks->phyd + U3P_U3_PHYD_CDR1);
- tmp &= ~(P3D_RG_CDR_BIR_LTD0 | P3D_RG_CDR_BIR_LTD1);
- tmp |= P3D_RG_CDR_BIR_LTD0_VAL(0xc) | P3D_RG_CDR_BIR_LTD1_VAL(0x3);
- writel(tmp, u3_banks->phyd + U3P_U3_PHYD_CDR1);
-
- tmp = readl(u3_banks->phyd + U3P_U3_PHYD_LFPS1);
- tmp &= ~P3D_RG_FWAKE_TH;
- tmp |= P3D_RG_FWAKE_TH_VAL(0x34);
- writel(tmp, u3_banks->phyd + U3P_U3_PHYD_LFPS1);
-
- tmp = readl(u3_banks->phyd + U3P_U3_PHYD_RXDET1);
- tmp &= ~P3D_RG_RXDET_STB2_SET;
- tmp |= P3D_RG_RXDET_STB2_SET_VAL(0x10);
- writel(tmp, u3_banks->phyd + U3P_U3_PHYD_RXDET1);
-
- tmp = readl(u3_banks->phyd + U3P_U3_PHYD_RXDET2);
- tmp &= ~P3D_RG_RXDET_STB2_SET_P3;
- tmp |= P3D_RG_RXDET_STB2_SET_P3_VAL(0x10);
- writel(tmp, u3_banks->phyd + U3P_U3_PHYD_RXDET2);
+ mtk_phy_update_bits(u3_banks->phya + U3P_U3_PHYA_DA_REG0,
+ P3A_RG_XTAL_EXT_EN_U3, P3A_RG_XTAL_EXT_EN_U3_VAL(2));
+
+ mtk_phy_update_bits(u3_banks->phya + U3P_U3_PHYA_REG9,
+ P3A_RG_RX_DAC_MUX, P3A_RG_RX_DAC_MUX_VAL(4));
+
+ mtk_phy_update_bits(u3_banks->phya + U3P_U3_PHYA_REG6,
+ P3A_RG_TX_EIDLE_CM, P3A_RG_TX_EIDLE_CM_VAL(0xe));
+
+ mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_CDR1,
+ P3D_RG_CDR_BIR_LTD0 | P3D_RG_CDR_BIR_LTD1,
+ P3D_RG_CDR_BIR_LTD0_VAL(0xc) | P3D_RG_CDR_BIR_LTD1_VAL(0x3));
+
+ mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_LFPS1,
+ P3D_RG_FWAKE_TH, P3D_RG_FWAKE_TH_VAL(0x34));
+
+ mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_RXDET1,
+ P3D_RG_RXDET_STB2_SET, P3D_RG_RXDET_STB2_SET_VAL(0x10));
+
+ mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_RXDET2,
+ P3D_RG_RXDET_STB2_SET_P3, P3D_RG_RXDET_STB2_SET_P3_VAL(0x10));
dev_dbg(tphy->dev, "%s(%d)\n", __func__, instance->index);
}
@@ -493,26 +493,20 @@ static void u2_phy_pll_26m_set(struct mtk_tphy *tphy,
{
struct u2phy_banks *u2_banks = &instance->u2_banks;
void __iomem *com = u2_banks->com;
- u32 tmp;
if (!tphy->pdata->sw_pll_48m_to_26m)
return;
- tmp = readl(com + U3P_USBPHYACR0);
- tmp &= ~PA0_USB20_PLL_PREDIV;
- tmp |= PA0_USB20_PLL_PREDIV_VAL(0);
- writel(tmp, com + U3P_USBPHYACR0);
+ mtk_phy_update_bits(com + U3P_USBPHYACR0, PA0_USB20_PLL_PREDIV,
+ PA0_USB20_PLL_PREDIV_VAL(0));
- tmp = readl(com + U3P_USBPHYACR2);
- tmp &= ~PA2_RG_U2PLL_BW;
- tmp |= PA2_RG_U2PLL_BW_VAL(3);
- writel(tmp, com + U3P_USBPHYACR2);
+ mtk_phy_update_bits(com + U3P_USBPHYACR2, PA2_RG_U2PLL_BW,
+ PA2_RG_U2PLL_BW_VAL(3));
writel(P2R_RG_U2PLL_FBDIV_26M, com + U3P_U2PHYA_RESV);
- tmp = readl(com + U3P_U2PHYA_RESV1);
- tmp |= P2R_RG_U2PLL_FRA_EN | P2R_RG_U2PLL_REFCLK_SEL;
- writel(tmp, com + U3P_U2PHYA_RESV1);
+ mtk_phy_set_bits(com + U3P_U2PHYA_RESV1,
+ P2R_RG_U2PLL_FRA_EN | P2R_RG_U2PLL_REFCLK_SEL);
}
static void u2_phy_instance_init(struct mtk_tphy *tphy,
@@ -521,58 +515,40 @@ static void u2_phy_instance_init(struct mtk_tphy *tphy,
struct u2phy_banks *u2_banks = &instance->u2_banks;
void __iomem *com = u2_banks->com;
u32 index = instance->index;
- u32 tmp;
/* switch to USB function, and enable usb pll */
- tmp = readl(com + U3P_U2PHYDTM0);
- tmp &= ~(P2C_FORCE_UART_EN | P2C_FORCE_SUSPENDM);
- tmp |= P2C_RG_XCVRSEL_VAL(1) | P2C_RG_DATAIN_VAL(0);
- writel(tmp, com + U3P_U2PHYDTM0);
+ mtk_phy_clear_bits(com + U3P_U2PHYDTM0, P2C_FORCE_UART_EN | P2C_FORCE_SUSPENDM);
+
+ mtk_phy_update_bits(com + U3P_U2PHYDTM0, P2C_RG_XCVRSEL | P2C_RG_DATAIN,
+ P2C_RG_XCVRSEL_VAL(1) | P2C_RG_DATAIN_VAL(0));
- tmp = readl(com + U3P_U2PHYDTM1);
- tmp &= ~P2C_RG_UART_EN;
- writel(tmp, com + U3P_U2PHYDTM1);
+ mtk_phy_clear_bits(com + U3P_U2PHYDTM1, P2C_RG_UART_EN);
- tmp = readl(com + U3P_USBPHYACR0);
- tmp |= PA0_RG_USB20_INTR_EN;
- writel(tmp, com + U3P_USBPHYACR0);
+ mtk_phy_set_bits(com + U3P_USBPHYACR0, PA0_RG_USB20_INTR_EN);
/* disable switch 100uA current to SSUSB */
- tmp = readl(com + U3P_USBPHYACR5);
- tmp &= ~PA5_RG_U2_HS_100U_U3_EN;
- writel(tmp, com + U3P_USBPHYACR5);
-
- if (!index) {
- tmp = readl(com + U3P_U2PHYACR4);
- tmp &= ~P2C_U2_GPIO_CTR_MSK;
- writel(tmp, com + U3P_U2PHYACR4);
- }
+ mtk_phy_clear_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HS_100U_U3_EN);
+
+ if (!index)
+ mtk_phy_clear_bits(com + U3P_U2PHYACR4, P2C_U2_GPIO_CTR_MSK);
if (tphy->pdata->avoid_rx_sen_degradation) {
if (!index) {
- tmp = readl(com + U3P_USBPHYACR2);
- tmp |= PA2_RG_SIF_U2PLL_FORCE_EN;
- writel(tmp, com + U3P_USBPHYACR2);
+ mtk_phy_set_bits(com + U3P_USBPHYACR2, PA2_RG_SIF_U2PLL_FORCE_EN);
- tmp = readl(com + U3D_U2PHYDCR0);
- tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON;
- writel(tmp, com + U3D_U2PHYDCR0);
+ mtk_phy_clear_bits(com + U3D_U2PHYDCR0, P2C_RG_SIF_U2PLL_FORCE_ON);
} else {
- tmp = readl(com + U3D_U2PHYDCR0);
- tmp |= P2C_RG_SIF_U2PLL_FORCE_ON;
- writel(tmp, com + U3D_U2PHYDCR0);
+ mtk_phy_set_bits(com + U3D_U2PHYDCR0, P2C_RG_SIF_U2PLL_FORCE_ON);
- tmp = readl(com + U3P_U2PHYDTM0);
- tmp |= P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM;
- writel(tmp, com + U3P_U2PHYDTM0);
+ mtk_phy_set_bits(com + U3P_U2PHYDTM0,
+ P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM);
}
}
- tmp = readl(com + U3P_USBPHYACR6);
- tmp &= ~PA6_RG_U2_BC11_SW_EN; /* DP/DM BC1.1 path Disable */
- tmp &= ~PA6_RG_U2_SQTH;
- tmp |= PA6_RG_U2_SQTH_VAL(2);
- writel(tmp, com + U3P_USBPHYACR6);
+ /* DP/DM BC1.1 path Disable */
+ mtk_phy_clear_bits(com + U3P_USBPHYACR6, PA6_RG_U2_BC11_SW_EN);
+
+ mtk_phy_update_bits(com + U3P_USBPHYACR6, PA6_RG_U2_SQTH, PA6_RG_U2_SQTH_VAL(2));
/* Workaround only for mt8195, HW fix it for others (V3) */
u2_phy_pll_26m_set(tphy, instance);
@@ -586,30 +562,21 @@ static void u2_phy_instance_power_on(struct mtk_tphy *tphy,
struct u2phy_banks *u2_banks = &instance->u2_banks;
void __iomem *com = u2_banks->com;
u32 index = instance->index;
- u32 tmp;
- tmp = readl(com + U3P_U2PHYDTM0);
- tmp &= ~(P2C_RG_XCVRSEL | P2C_RG_DATAIN | P2C_DTM0_PART_MASK);
- writel(tmp, com + U3P_U2PHYDTM0);
+ mtk_phy_clear_bits(com + U3P_U2PHYDTM0,
+ P2C_RG_XCVRSEL | P2C_RG_DATAIN | P2C_DTM0_PART_MASK);
/* OTG Enable */
- tmp = readl(com + U3P_USBPHYACR6);
- tmp |= PA6_RG_U2_OTG_VBUSCMP_EN;
- writel(tmp, com + U3P_USBPHYACR6);
+ mtk_phy_set_bits(com + U3P_USBPHYACR6, PA6_RG_U2_OTG_VBUSCMP_EN);
+
+ mtk_phy_set_bits(com + U3P_U2PHYDTM1, P2C_RG_VBUSVALID | P2C_RG_AVALID);
- tmp = readl(com + U3P_U2PHYDTM1);
- tmp |= P2C_RG_VBUSVALID | P2C_RG_AVALID;
- tmp &= ~P2C_RG_SESSEND;
- writel(tmp, com + U3P_U2PHYDTM1);
+ mtk_phy_clear_bits(com + U3P_U2PHYDTM1, P2C_RG_SESSEND);
if (tphy->pdata->avoid_rx_sen_degradation && index) {
- tmp = readl(com + U3D_U2PHYDCR0);
- tmp |= P2C_RG_SIF_U2PLL_FORCE_ON;
- writel(tmp, com + U3D_U2PHYDCR0);
+ mtk_phy_set_bits(com + U3D_U2PHYDCR0, P2C_RG_SIF_U2PLL_FORCE_ON);
- tmp = readl(com + U3P_U2PHYDTM0);
- tmp |= P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM;
- writel(tmp, com + U3P_U2PHYDTM0);
+ mtk_phy_set_bits(com + U3P_U2PHYDTM0, P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM);
}
dev_dbg(tphy->dev, "%s(%d)\n", __func__, index);
}
@@ -620,30 +587,20 @@ static void u2_phy_instance_power_off(struct mtk_tphy *tphy,
struct u2phy_banks *u2_banks = &instance->u2_banks;
void __iomem *com = u2_banks->com;
u32 index = instance->index;
- u32 tmp;
- tmp = readl(com + U3P_U2PHYDTM0);
- tmp &= ~(P2C_RG_XCVRSEL | P2C_RG_DATAIN);
- writel(tmp, com + U3P_U2PHYDTM0);
+ mtk_phy_clear_bits(com + U3P_U2PHYDTM0, P2C_RG_XCVRSEL | P2C_RG_DATAIN);
/* OTG Disable */
- tmp = readl(com + U3P_USBPHYACR6);
- tmp &= ~PA6_RG_U2_OTG_VBUSCMP_EN;
- writel(tmp, com + U3P_USBPHYACR6);
+ mtk_phy_clear_bits(com + U3P_USBPHYACR6, PA6_RG_U2_OTG_VBUSCMP_EN);
+
+ mtk_phy_clear_bits(com + U3P_U2PHYDTM1, P2C_RG_VBUSVALID | P2C_RG_AVALID);
- tmp = readl(com + U3P_U2PHYDTM1);
- tmp &= ~(P2C_RG_VBUSVALID | P2C_RG_AVALID);
- tmp |= P2C_RG_SESSEND;
- writel(tmp, com + U3P_U2PHYDTM1);
+ mtk_phy_set_bits(com + U3P_U2PHYDTM1, P2C_RG_SESSEND);
if (tphy->pdata->avoid_rx_sen_degradation && index) {
- tmp = readl(com + U3P_U2PHYDTM0);
- tmp &= ~(P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM);
- writel(tmp, com + U3P_U2PHYDTM0);
+ mtk_phy_clear_bits(com + U3P_U2PHYDTM0, P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM);
- tmp = readl(com + U3D_U2PHYDCR0);
- tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON;
- writel(tmp, com + U3D_U2PHYDCR0);
+ mtk_phy_clear_bits(com + U3D_U2PHYDCR0, P2C_RG_SIF_U2PLL_FORCE_ON);
}
dev_dbg(tphy->dev, "%s(%d)\n", __func__, index);
@@ -655,16 +612,11 @@ static void u2_phy_instance_exit(struct mtk_tphy *tphy,
struct u2phy_banks *u2_banks = &instance->u2_banks;
void __iomem *com = u2_banks->com;
u32 index = instance->index;
- u32 tmp;
if (tphy->pdata->avoid_rx_sen_degradation && index) {
- tmp = readl(com + U3D_U2PHYDCR0);
- tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON;
- writel(tmp, com + U3D_U2PHYDCR0);
+ mtk_phy_clear_bits(com + U3D_U2PHYDCR0, P2C_RG_SIF_U2PLL_FORCE_ON);
- tmp = readl(com + U3P_U2PHYDTM0);
- tmp &= ~P2C_FORCE_SUSPENDM;
- writel(tmp, com + U3P_U2PHYDTM0);
+ mtk_phy_clear_bits(com + U3P_U2PHYDTM0, P2C_FORCE_SUSPENDM);
}
}
@@ -697,69 +649,50 @@ static void pcie_phy_instance_init(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct u3phy_banks *u3_banks = &instance->u3_banks;
- u32 tmp;
+ void __iomem *phya = u3_banks->phya;
if (tphy->pdata->version != MTK_PHY_V1)
return;
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG0);
- tmp &= ~(P3A_RG_XTAL_EXT_PE1H | P3A_RG_XTAL_EXT_PE2H);
- tmp |= P3A_RG_XTAL_EXT_PE1H_VAL(0x2) | P3A_RG_XTAL_EXT_PE2H_VAL(0x2);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG0);
+ mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG0,
+ P3A_RG_XTAL_EXT_PE1H | P3A_RG_XTAL_EXT_PE2H,
+ P3A_RG_XTAL_EXT_PE1H_VAL(0x2) | P3A_RG_XTAL_EXT_PE2H_VAL(0x2));
/* ref clk drive */
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_REG1);
- tmp &= ~P3A_RG_CLKDRV_AMP;
- tmp |= P3A_RG_CLKDRV_AMP_VAL(0x4);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_REG1);
+ mtk_phy_update_bits(phya + U3P_U3_PHYA_REG1, P3A_RG_CLKDRV_AMP,
+ P3A_RG_CLKDRV_AMP_VAL(0x4));
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_REG0);
- tmp &= ~P3A_RG_CLKDRV_OFF;
- tmp |= P3A_RG_CLKDRV_OFF_VAL(0x1);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_REG0);
+ mtk_phy_update_bits(phya + U3P_U3_PHYA_REG0, P3A_RG_CLKDRV_OFF,
+ P3A_RG_CLKDRV_OFF_VAL(0x1));
/* SSC delta -5000ppm */
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG20);
- tmp &= ~P3A_RG_PLL_DELTA1_PE2H;
- tmp |= P3A_RG_PLL_DELTA1_PE2H_VAL(0x3c);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG20);
+ mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG20, P3A_RG_PLL_DELTA1_PE2H,
+ P3A_RG_PLL_DELTA1_PE2H_VAL(0x3c));
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG25);
- tmp &= ~P3A_RG_PLL_DELTA_PE2H;
- tmp |= P3A_RG_PLL_DELTA_PE2H_VAL(0x36);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG25);
+ mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG25, P3A_RG_PLL_DELTA_PE2H,
+ P3A_RG_PLL_DELTA_PE2H_VAL(0x36));
/* change pll BW 0.6M */
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG5);
- tmp &= ~(P3A_RG_PLL_BR_PE2H | P3A_RG_PLL_IC_PE2H);
- tmp |= P3A_RG_PLL_BR_PE2H_VAL(0x1) | P3A_RG_PLL_IC_PE2H_VAL(0x1);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG5);
-
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG4);
- tmp &= ~(P3A_RG_PLL_DIVEN_PE2H | P3A_RG_PLL_BC_PE2H);
- tmp |= P3A_RG_PLL_BC_PE2H_VAL(0x3);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG4);
-
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG6);
- tmp &= ~P3A_RG_PLL_IR_PE2H;
- tmp |= P3A_RG_PLL_IR_PE2H_VAL(0x2);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG6);
-
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG7);
- tmp &= ~P3A_RG_PLL_BP_PE2H;
- tmp |= P3A_RG_PLL_BP_PE2H_VAL(0xa);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG7);
+ mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG5,
+ P3A_RG_PLL_BR_PE2H | P3A_RG_PLL_IC_PE2H,
+ P3A_RG_PLL_BR_PE2H_VAL(0x1) | P3A_RG_PLL_IC_PE2H_VAL(0x1));
+
+ mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG4,
+ P3A_RG_PLL_DIVEN_PE2H | P3A_RG_PLL_BC_PE2H,
+ P3A_RG_PLL_BC_PE2H_VAL(0x3));
+
+ mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG6, P3A_RG_PLL_IR_PE2H,
+ P3A_RG_PLL_IR_PE2H_VAL(0x2));
+
+ mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG7, P3A_RG_PLL_BP_PE2H,
+ P3A_RG_PLL_BP_PE2H_VAL(0xa));
/* Tx Detect Rx Timing: 10us -> 5us */
- tmp = readl(u3_banks->phyd + U3P_U3_PHYD_RXDET1);
- tmp &= ~P3D_RG_RXDET_STB2_SET;
- tmp |= P3D_RG_RXDET_STB2_SET_VAL(0x10);
- writel(tmp, u3_banks->phyd + U3P_U3_PHYD_RXDET1);
+ mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_RXDET1,
+ P3D_RG_RXDET_STB2_SET, P3D_RG_RXDET_STB2_SET_VAL(0x10));
- tmp = readl(u3_banks->phyd + U3P_U3_PHYD_RXDET2);
- tmp &= ~P3D_RG_RXDET_STB2_SET_P3;
- tmp |= P3D_RG_RXDET_STB2_SET_P3_VAL(0x10);
- writel(tmp, u3_banks->phyd + U3P_U3_PHYD_RXDET2);
+ mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_RXDET2,
+ P3D_RG_RXDET_STB2_SET_P3, P3D_RG_RXDET_STB2_SET_P3_VAL(0x10));
/* wait for PCIe subsys register to active */
usleep_range(2500, 3000);
@@ -770,15 +703,12 @@ static void pcie_phy_instance_power_on(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct u3phy_banks *bank = &instance->u3_banks;
- u32 tmp;
- tmp = readl(bank->chip + U3P_U3_CHIP_GPIO_CTLD);
- tmp &= ~(P3C_FORCE_IP_SW_RST | P3C_REG_IP_SW_RST);
- writel(tmp, bank->chip + U3P_U3_CHIP_GPIO_CTLD);
+ mtk_phy_clear_bits(bank->chip + U3P_U3_CHIP_GPIO_CTLD,
+ P3C_FORCE_IP_SW_RST | P3C_REG_IP_SW_RST);
- tmp = readl(bank->chip + U3P_U3_CHIP_GPIO_CTLE);
- tmp &= ~(P3C_RG_SWRST_U3_PHYD_FORCE_EN | P3C_RG_SWRST_U3_PHYD);
- writel(tmp, bank->chip + U3P_U3_CHIP_GPIO_CTLE);
+ mtk_phy_clear_bits(bank->chip + U3P_U3_CHIP_GPIO_CTLE,
+ P3C_RG_SWRST_U3_PHYD_FORCE_EN | P3C_RG_SWRST_U3_PHYD);
}
static void pcie_phy_instance_power_off(struct mtk_tphy *tphy,
@@ -786,15 +716,12 @@ static void pcie_phy_instance_power_off(struct mtk_tphy *tphy,
{
struct u3phy_banks *bank = &instance->u3_banks;
- u32 tmp;
- tmp = readl(bank->chip + U3P_U3_CHIP_GPIO_CTLD);
- tmp |= P3C_FORCE_IP_SW_RST | P3C_REG_IP_SW_RST;
- writel(tmp, bank->chip + U3P_U3_CHIP_GPIO_CTLD);
+ mtk_phy_set_bits(bank->chip + U3P_U3_CHIP_GPIO_CTLD,
+ P3C_FORCE_IP_SW_RST | P3C_REG_IP_SW_RST);
- tmp = readl(bank->chip + U3P_U3_CHIP_GPIO_CTLE);
- tmp |= P3C_RG_SWRST_U3_PHYD_FORCE_EN | P3C_RG_SWRST_U3_PHYD;
- writel(tmp, bank->chip + U3P_U3_CHIP_GPIO_CTLE);
+ mtk_phy_set_bits(bank->chip + U3P_U3_CHIP_GPIO_CTLE,
+ P3C_RG_SWRST_U3_PHYD_FORCE_EN | P3C_RG_SWRST_U3_PHYD);
}
static void sata_phy_instance_init(struct mtk_tphy *tphy,
@@ -802,55 +729,42 @@ static void sata_phy_instance_init(struct mtk_tphy *tphy,
{
struct u3phy_banks *u3_banks = &instance->u3_banks;
void __iomem *phyd = u3_banks->phyd;
- u32 tmp;
/* charge current adjustment */
- tmp = readl(phyd + ANA_RG_CTRL_SIGNAL6);
- tmp &= ~(RG_CDR_BIRLTR_GEN1_MSK | RG_CDR_BC_GEN1_MSK);
- tmp |= RG_CDR_BIRLTR_GEN1_VAL(0x6) | RG_CDR_BC_GEN1_VAL(0x1a);
- writel(tmp, phyd + ANA_RG_CTRL_SIGNAL6);
-
- tmp = readl(phyd + ANA_EQ_EYE_CTRL_SIGNAL4);
- tmp &= ~RG_CDR_BIRLTD0_GEN1_MSK;
- tmp |= RG_CDR_BIRLTD0_GEN1_VAL(0x18);
- writel(tmp, phyd + ANA_EQ_EYE_CTRL_SIGNAL4);
-
- tmp = readl(phyd + ANA_EQ_EYE_CTRL_SIGNAL5);
- tmp &= ~RG_CDR_BIRLTD0_GEN3_MSK;
- tmp |= RG_CDR_BIRLTD0_GEN3_VAL(0x06);
- writel(tmp, phyd + ANA_EQ_EYE_CTRL_SIGNAL5);
-
- tmp = readl(phyd + ANA_RG_CTRL_SIGNAL4);
- tmp &= ~(RG_CDR_BICLTR_GEN1_MSK | RG_CDR_BR_GEN2_MSK);
- tmp |= RG_CDR_BICLTR_GEN1_VAL(0x0c) | RG_CDR_BR_GEN2_VAL(0x07);
- writel(tmp, phyd + ANA_RG_CTRL_SIGNAL4);
-
- tmp = readl(phyd + PHYD_CTRL_SIGNAL_MODE4);
- tmp &= ~(RG_CDR_BICLTD0_GEN1_MSK | RG_CDR_BICLTD1_GEN1_MSK);
- tmp |= RG_CDR_BICLTD0_GEN1_VAL(0x08) | RG_CDR_BICLTD1_GEN1_VAL(0x02);
- writel(tmp, phyd + PHYD_CTRL_SIGNAL_MODE4);
-
- tmp = readl(phyd + PHYD_DESIGN_OPTION2);
- tmp &= ~RG_LOCK_CNT_SEL_MSK;
- tmp |= RG_LOCK_CNT_SEL_VAL(0x02);
- writel(tmp, phyd + PHYD_DESIGN_OPTION2);
-
- tmp = readl(phyd + PHYD_DESIGN_OPTION9);
- tmp &= ~(RG_T2_MIN_MSK | RG_TG_MIN_MSK |
- RG_T2_MAX_MSK | RG_TG_MAX_MSK);
- tmp |= RG_T2_MIN_VAL(0x12) | RG_TG_MIN_VAL(0x04) |
- RG_T2_MAX_VAL(0x31) | RG_TG_MAX_VAL(0x0e);
- writel(tmp, phyd + PHYD_DESIGN_OPTION9);
-
- tmp = readl(phyd + ANA_RG_CTRL_SIGNAL1);
- tmp &= ~RG_IDRV_0DB_GEN1_MSK;
- tmp |= RG_IDRV_0DB_GEN1_VAL(0x20);
- writel(tmp, phyd + ANA_RG_CTRL_SIGNAL1);
-
- tmp = readl(phyd + ANA_EQ_EYE_CTRL_SIGNAL1);
- tmp &= ~RG_EQ_DLEQ_LFI_GEN1_MSK;
- tmp |= RG_EQ_DLEQ_LFI_GEN1_VAL(0x03);
- writel(tmp, phyd + ANA_EQ_EYE_CTRL_SIGNAL1);
+ mtk_phy_update_bits(phyd + ANA_RG_CTRL_SIGNAL6,
+ RG_CDR_BIRLTR_GEN1_MSK | RG_CDR_BC_GEN1_MSK,
+ RG_CDR_BIRLTR_GEN1_VAL(0x6) | RG_CDR_BC_GEN1_VAL(0x1a));
+
+ mtk_phy_update_bits(phyd + ANA_EQ_EYE_CTRL_SIGNAL4, RG_CDR_BIRLTD0_GEN1_MSK,
+ RG_CDR_BIRLTD0_GEN1_VAL(0x18));
+
+ mtk_phy_update_bits(phyd + ANA_EQ_EYE_CTRL_SIGNAL5, RG_CDR_BIRLTD0_GEN3_MSK,
+ RG_CDR_BIRLTD0_GEN3_VAL(0x06));
+
+ mtk_phy_update_bits(phyd + ANA_RG_CTRL_SIGNAL4,
+ RG_CDR_BICLTR_GEN1_MSK | RG_CDR_BR_GEN2_MSK,
+ RG_CDR_BICLTR_GEN1_VAL(0x0c) | RG_CDR_BR_GEN2_VAL(0x07));
+
+ mtk_phy_update_bits(phyd + PHYD_CTRL_SIGNAL_MODE4,
+ RG_CDR_BICLTD0_GEN1_MSK | RG_CDR_BICLTD1_GEN1_MSK,
+ RG_CDR_BICLTD0_GEN1_VAL(0x08) | RG_CDR_BICLTD1_GEN1_VAL(0x02));
+
+ mtk_phy_update_bits(phyd + PHYD_DESIGN_OPTION2, RG_LOCK_CNT_SEL_MSK,
+ RG_LOCK_CNT_SEL_VAL(0x02));
+
+ mtk_phy_update_bits(phyd + PHYD_DESIGN_OPTION9,
+ RG_T2_MIN_MSK | RG_TG_MIN_MSK,
+ RG_T2_MIN_VAL(0x12) | RG_TG_MIN_VAL(0x04));
+
+ mtk_phy_update_bits(phyd + PHYD_DESIGN_OPTION9,
+ RG_T2_MAX_MSK | RG_TG_MAX_MSK,
+ RG_T2_MAX_VAL(0x31) | RG_TG_MAX_VAL(0x0e));
+
+ mtk_phy_update_bits(phyd + ANA_RG_CTRL_SIGNAL1, RG_IDRV_0DB_GEN1_MSK,
+ RG_IDRV_0DB_GEN1_VAL(0x20));
+
+ mtk_phy_update_bits(phyd + ANA_EQ_EYE_CTRL_SIGNAL1, RG_EQ_DLEQ_LFI_GEN1_MSK,
+ RG_EQ_DLEQ_LFI_GEN1_VAL(0x03));
dev_dbg(tphy->dev, "%s(%d)\n", __func__, instance->index);
}
@@ -938,48 +852,29 @@ static void u2_phy_props_set(struct mtk_tphy *tphy,
{
struct u2phy_banks *u2_banks = &instance->u2_banks;
void __iomem *com = u2_banks->com;
- u32 tmp;
- if (instance->bc12_en) {
- tmp = readl(com + U3P_U2PHYBC12C);
- tmp |= P2C_RG_CHGDT_EN; /* BC1.2 path Enable */
- writel(tmp, com + U3P_U2PHYBC12C);
- }
+ if (instance->bc12_en) /* BC1.2 path Enable */
+ mtk_phy_set_bits(com + U3P_U2PHYBC12C, P2C_RG_CHGDT_EN);
- if (tphy->pdata->version < MTK_PHY_V3 && instance->eye_src) {
- tmp = readl(com + U3P_USBPHYACR5);
- tmp &= ~PA5_RG_U2_HSTX_SRCTRL;
- tmp |= PA5_RG_U2_HSTX_SRCTRL_VAL(instance->eye_src);
- writel(tmp, com + U3P_USBPHYACR5);
- }
+ if (tphy->pdata->version < MTK_PHY_V3 && instance->eye_src)
+ mtk_phy_update_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCTRL,
+ PA5_RG_U2_HSTX_SRCTRL_VAL(instance->eye_src));
- if (instance->eye_vrt) {
- tmp = readl(com + U3P_USBPHYACR1);
- tmp &= ~PA1_RG_VRT_SEL;
- tmp |= PA1_RG_VRT_SEL_VAL(instance->eye_vrt);
- writel(tmp, com + U3P_USBPHYACR1);
- }
+ if (instance->eye_vrt)
+ mtk_phy_update_bits(com + U3P_USBPHYACR1, PA1_RG_VRT_SEL,
+ PA1_RG_VRT_SEL_VAL(instance->eye_vrt));
- if (instance->eye_term) {
- tmp = readl(com + U3P_USBPHYACR1);
- tmp &= ~PA1_RG_TERM_SEL;
- tmp |= PA1_RG_TERM_SEL_VAL(instance->eye_term);
- writel(tmp, com + U3P_USBPHYACR1);
- }
+ if (instance->eye_term)
+ mtk_phy_update_bits(com + U3P_USBPHYACR1, PA1_RG_TERM_SEL,
+ PA1_RG_TERM_SEL_VAL(instance->eye_term));
- if (instance->intr) {
- tmp = readl(com + U3P_USBPHYACR1);
- tmp &= ~PA1_RG_INTR_CAL;
- tmp |= PA1_RG_INTR_CAL_VAL(instance->intr);
- writel(tmp, com + U3P_USBPHYACR1);
- }
+ if (instance->intr)
+ mtk_phy_update_bits(com + U3P_USBPHYACR1, PA1_RG_INTR_CAL,
+ PA1_RG_INTR_CAL_VAL(instance->intr));
- if (instance->discth) {
- tmp = readl(com + U3P_USBPHYACR6);
- tmp &= ~PA6_RG_U2_DISCTH;
- tmp |= PA6_RG_U2_DISCTH_VAL(instance->discth);
- writel(tmp, com + U3P_USBPHYACR6);
- }
+ if (instance->discth)
+ mtk_phy_update_bits(com + U3P_USBPHYACR6, PA6_RG_U2_DISCTH,
+ PA6_RG_U2_DISCTH_VAL(instance->discth));
}
/* type switch for usb3/pcie/sgmii/sata */
@@ -1040,6 +935,117 @@ static int phy_type_set(struct mtk_phy_instance *instance)
return 0;
}
+static int phy_efuse_get(struct mtk_tphy *tphy, struct mtk_phy_instance *instance)
+{
+ struct device *dev = &instance->phy->dev;
+ int ret = 0;
+
+ /* tphy v1 doesn't support sw efuse, skip it */
+ if (!tphy->pdata->sw_efuse_supported) {
+ instance->efuse_sw_en = 0;
+ return 0;
+ }
+
+ /* software efuse is optional */
+ instance->efuse_sw_en = device_property_read_bool(dev, "nvmem-cells");
+ if (!instance->efuse_sw_en)
+ return 0;
+
+ switch (instance->type) {
+ case PHY_TYPE_USB2:
+ ret = nvmem_cell_read_variable_le_u32(dev, "intr", &instance->efuse_intr);
+ if (ret) {
+ dev_err(dev, "fail to get u2 intr efuse, %d\n", ret);
+ break;
+ }
+
+ /* no efuse, ignore it */
+ if (!instance->efuse_intr) {
+ dev_warn(dev, "no u2 intr efuse, but dts enable it\n");
+ instance->efuse_sw_en = 0;
+ break;
+ }
+
+ dev_dbg(dev, "u2 efuse - intr %x\n", instance->efuse_intr);
+ break;
+
+ case PHY_TYPE_USB3:
+ case PHY_TYPE_PCIE:
+ ret = nvmem_cell_read_variable_le_u32(dev, "intr", &instance->efuse_intr);
+ if (ret) {
+ dev_err(dev, "fail to get u3 intr efuse, %d\n", ret);
+ break;
+ }
+
+ ret = nvmem_cell_read_variable_le_u32(dev, "rx_imp", &instance->efuse_rx_imp);
+ if (ret) {
+ dev_err(dev, "fail to get u3 rx_imp efuse, %d\n", ret);
+ break;
+ }
+
+ ret = nvmem_cell_read_variable_le_u32(dev, "tx_imp", &instance->efuse_tx_imp);
+ if (ret) {
+ dev_err(dev, "fail to get u3 tx_imp efuse, %d\n", ret);
+ break;
+ }
+
+ /* no efuse, ignore it */
+ if (!instance->efuse_intr &&
+ !instance->efuse_rx_imp &&
+ !instance->efuse_rx_imp) {
+ dev_warn(dev, "no u3 intr efuse, but dts enable it\n");
+ instance->efuse_sw_en = 0;
+ break;
+ }
+
+ dev_dbg(dev, "u3 efuse - intr %x, rx_imp %x, tx_imp %x\n",
+ instance->efuse_intr, instance->efuse_rx_imp,instance->efuse_tx_imp);
+ break;
+ default:
+ dev_err(dev, "no sw efuse for type %d\n", instance->type);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void phy_efuse_set(struct mtk_phy_instance *instance)
+{
+ struct device *dev = &instance->phy->dev;
+ struct u2phy_banks *u2_banks = &instance->u2_banks;
+ struct u3phy_banks *u3_banks = &instance->u3_banks;
+
+ if (!instance->efuse_sw_en)
+ return;
+
+ switch (instance->type) {
+ case PHY_TYPE_USB2:
+ mtk_phy_set_bits(u2_banks->misc + U3P_MISC_REG1, MR1_EFUSE_AUTO_LOAD_DIS);
+
+ mtk_phy_update_bits(u2_banks->com + U3P_USBPHYACR1, PA1_RG_INTR_CAL,
+ PA1_RG_INTR_CAL_VAL(instance->efuse_intr));
+ break;
+ case PHY_TYPE_USB3:
+ case PHY_TYPE_PCIE:
+ mtk_phy_set_bits(u3_banks->phyd + U3P_U3_PHYD_RSV, P3D_RG_EFUSE_AUTO_LOAD_DIS);
+
+ mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_IMPCAL0, P3D_RG_TX_IMPEL,
+ P3D_RG_TX_IMPEL_VAL(instance->efuse_tx_imp));
+ mtk_phy_set_bits(u3_banks->phyd + U3P_U3_PHYD_IMPCAL0, P3D_RG_FORCE_TX_IMPEL);
+
+ mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_IMPCAL1, P3D_RG_RX_IMPEL,
+ P3D_RG_RX_IMPEL_VAL(instance->efuse_rx_imp));
+ mtk_phy_set_bits(u3_banks->phyd + U3P_U3_PHYD_IMPCAL1, P3D_RG_FORCE_RX_IMPEL);
+
+ mtk_phy_update_bits(u3_banks->phya + U3P_U3_PHYA_REG0, P3A_RG_IEXT_INTR,
+ P3A_RG_IEXT_INTR_VAL(instance->efuse_intr));
+ break;
+ default:
+ dev_warn(dev, "no sw efuse for type %d\n", instance->type);
+ break;
+ }
+}
+
static int mtk_phy_init(struct phy *phy)
{
struct mtk_phy_instance *instance = phy_get_drvdata(phy);
@@ -1050,6 +1056,8 @@ static int mtk_phy_init(struct phy *phy)
if (ret)
return ret;
+ phy_efuse_set(instance);
+
switch (instance->type) {
case PHY_TYPE_USB2:
u2_phy_instance_init(tphy, instance);
@@ -1134,6 +1142,7 @@ static struct phy *mtk_phy_xlate(struct device *dev,
struct mtk_phy_instance *instance = NULL;
struct device_node *phy_np = args->np;
int index;
+ int ret;
if (args->args_count != 1) {
dev_err(dev, "invalid number of cells in 'phy' property\n");
@@ -1174,6 +1183,10 @@ static struct phy *mtk_phy_xlate(struct device *dev,
return ERR_PTR(-EINVAL);
}
+ ret = phy_efuse_get(tphy, instance);
+ if (ret)
+ return ERR_PTR(ret);
+
phy_parse_property(tphy, instance);
phy_type_set(instance);
@@ -1196,10 +1209,12 @@ static const struct mtk_phy_pdata tphy_v1_pdata = {
static const struct mtk_phy_pdata tphy_v2_pdata = {
.avoid_rx_sen_degradation = false,
+ .sw_efuse_supported = true,
.version = MTK_PHY_V2,
};
static const struct mtk_phy_pdata tphy_v3_pdata = {
+ .sw_efuse_supported = true,
.version = MTK_PHY_V3,
};
@@ -1210,6 +1225,7 @@ static const struct mtk_phy_pdata mt8173_pdata = {
static const struct mtk_phy_pdata mt8195_pdata = {
.sw_pll_48m_to_26m = true,
+ .sw_efuse_supported = true,
.version = MTK_PHY_V3,
};
diff --git a/drivers/phy/mediatek/phy-mtk-xsphy.c b/drivers/phy/mediatek/phy-mtk-xsphy.c
index 8c51131945c0..c0cdb78f77fa 100644
--- a/drivers/phy/mediatek/phy-mtk-xsphy.c
+++ b/drivers/phy/mediatek/phy-mtk-xsphy.c
@@ -10,13 +10,14 @@
#include <dt-bindings/phy/phy.h>
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include "phy-mtk-io.h"
+
/* u2 phy banks */
#define SSUSB_SIFSLV_MISC 0x000
#define SSUSB_SIFSLV_U2FREQ 0x100
@@ -126,26 +127,18 @@ static void u2_phy_slew_rate_calibrate(struct mtk_xsphy *xsphy,
return;
/* enable USB ring oscillator */
- tmp = readl(pbase + XSP_USBPHYACR5);
- tmp |= P2A5_RG_HSTX_SRCAL_EN;
- writel(tmp, pbase + XSP_USBPHYACR5);
+ mtk_phy_set_bits(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCAL_EN);
udelay(1); /* wait clock stable */
/* enable free run clock */
- tmp = readl(pbase + XSP_U2FREQ_FMMONR1);
- tmp |= P2F_RG_FRCK_EN;
- writel(tmp, pbase + XSP_U2FREQ_FMMONR1);
+ mtk_phy_set_bits(pbase + XSP_U2FREQ_FMMONR1, P2F_RG_FRCK_EN);
/* set cycle count as 1024 */
- tmp = readl(pbase + XSP_U2FREQ_FMCR0);
- tmp &= ~(P2F_RG_CYCLECNT);
- tmp |= P2F_RG_CYCLECNT_VAL(XSP_FM_DET_CYCLE_CNT);
- writel(tmp, pbase + XSP_U2FREQ_FMCR0);
+ mtk_phy_update_bits(pbase + XSP_U2FREQ_FMCR0, P2F_RG_CYCLECNT,
+ P2F_RG_CYCLECNT_VAL(XSP_FM_DET_CYCLE_CNT));
/* enable frequency meter */
- tmp = readl(pbase + XSP_U2FREQ_FMCR0);
- tmp |= P2F_RG_FREQDET_EN;
- writel(tmp, pbase + XSP_U2FREQ_FMCR0);
+ mtk_phy_set_bits(pbase + XSP_U2FREQ_FMCR0, P2F_RG_FREQDET_EN);
/* ignore return value */
readl_poll_timeout(pbase + XSP_U2FREQ_FMMONR1, tmp,
@@ -154,14 +147,10 @@ static void u2_phy_slew_rate_calibrate(struct mtk_xsphy *xsphy,
fm_out = readl(pbase + XSP_U2FREQ_MMONR0);
/* disable frequency meter */
- tmp = readl(pbase + XSP_U2FREQ_FMCR0);
- tmp &= ~P2F_RG_FREQDET_EN;
- writel(tmp, pbase + XSP_U2FREQ_FMCR0);
+ mtk_phy_clear_bits(pbase + XSP_U2FREQ_FMCR0, P2F_RG_FREQDET_EN);
/* disable free run clock */
- tmp = readl(pbase + XSP_U2FREQ_FMMONR1);
- tmp &= ~P2F_RG_FRCK_EN;
- writel(tmp, pbase + XSP_U2FREQ_FMMONR1);
+ mtk_phy_clear_bits(pbase + XSP_U2FREQ_FMMONR1, P2F_RG_FRCK_EN);
if (fm_out) {
/* (1024 / FM_OUT) x reference clock frequency x coefficient */
@@ -177,31 +166,22 @@ static void u2_phy_slew_rate_calibrate(struct mtk_xsphy *xsphy,
xsphy->src_ref_clk, xsphy->src_coef);
/* set HS slew rate */
- tmp = readl(pbase + XSP_USBPHYACR5);
- tmp &= ~P2A5_RG_HSTX_SRCTRL;
- tmp |= P2A5_RG_HSTX_SRCTRL_VAL(calib_val);
- writel(tmp, pbase + XSP_USBPHYACR5);
+ mtk_phy_update_bits(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCTRL,
+ P2A5_RG_HSTX_SRCTRL_VAL(calib_val));
/* disable USB ring oscillator */
- tmp = readl(pbase + XSP_USBPHYACR5);
- tmp &= ~P2A5_RG_HSTX_SRCAL_EN;
- writel(tmp, pbase + XSP_USBPHYACR5);
+ mtk_phy_clear_bits(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCAL_EN);
}
static void u2_phy_instance_init(struct mtk_xsphy *xsphy,
struct xsphy_instance *inst)
{
void __iomem *pbase = inst->port_base;
- u32 tmp;
/* DP/DM BC1.1 path Disable */
- tmp = readl(pbase + XSP_USBPHYACR6);
- tmp &= ~P2A6_RG_BC11_SW_EN;
- writel(tmp, pbase + XSP_USBPHYACR6);
+ mtk_phy_clear_bits(pbase + XSP_USBPHYACR6, P2A6_RG_BC11_SW_EN);
- tmp = readl(pbase + XSP_USBPHYACR0);
- tmp |= P2A0_RG_INTR_EN;
- writel(tmp, pbase + XSP_USBPHYACR0);
+ mtk_phy_set_bits(pbase + XSP_USBPHYACR0, P2A0_RG_INTR_EN);
}
static void u2_phy_instance_power_on(struct mtk_xsphy *xsphy,
@@ -209,16 +189,12 @@ static void u2_phy_instance_power_on(struct mtk_xsphy *xsphy,
{
void __iomem *pbase = inst->port_base;
u32 index = inst->index;
- u32 tmp;
- tmp = readl(pbase + XSP_USBPHYACR6);
- tmp |= P2A6_RG_OTG_VBUSCMP_EN;
- writel(tmp, pbase + XSP_USBPHYACR6);
+ mtk_phy_set_bits(pbase + XSP_USBPHYACR6, P2A6_RG_OTG_VBUSCMP_EN);
- tmp = readl(pbase + XSP_U2PHYDTM1);
- tmp |= P2D_RG_VBUSVALID | P2D_RG_AVALID;
- tmp &= ~P2D_RG_SESSEND;
- writel(tmp, pbase + XSP_U2PHYDTM1);
+ mtk_phy_update_bits(pbase + XSP_U2PHYDTM1,
+ P2D_RG_VBUSVALID | P2D_RG_AVALID | P2D_RG_SESSEND,
+ P2D_RG_VBUSVALID | P2D_RG_AVALID);
dev_dbg(xsphy->dev, "%s(%d)\n", __func__, index);
}
@@ -228,16 +204,12 @@ static void u2_phy_instance_power_off(struct mtk_xsphy *xsphy,
{
void __iomem *pbase = inst->port_base;
u32 index = inst->index;
- u32 tmp;
- tmp = readl(pbase + XSP_USBPHYACR6);
- tmp &= ~P2A6_RG_OTG_VBUSCMP_EN;
- writel(tmp, pbase + XSP_USBPHYACR6);
+ mtk_phy_clear_bits(pbase + XSP_USBPHYACR6, P2A6_RG_OTG_VBUSCMP_EN);
- tmp = readl(pbase + XSP_U2PHYDTM1);
- tmp &= ~(P2D_RG_VBUSVALID | P2D_RG_AVALID);
- tmp |= P2D_RG_SESSEND;
- writel(tmp, pbase + XSP_U2PHYDTM1);
+ mtk_phy_update_bits(pbase + XSP_U2PHYDTM1,
+ P2D_RG_VBUSVALID | P2D_RG_AVALID | P2D_RG_SESSEND,
+ P2D_RG_SESSEND);
dev_dbg(xsphy->dev, "%s(%d)\n", __func__, index);
}
@@ -306,63 +278,43 @@ static void u2_phy_props_set(struct mtk_xsphy *xsphy,
struct xsphy_instance *inst)
{
void __iomem *pbase = inst->port_base;
- u32 tmp;
- if (inst->efuse_intr) {
- tmp = readl(pbase + XSP_USBPHYACR1);
- tmp &= ~P2A1_RG_INTR_CAL;
- tmp |= P2A1_RG_INTR_CAL_VAL(inst->efuse_intr);
- writel(tmp, pbase + XSP_USBPHYACR1);
- }
+ if (inst->efuse_intr)
+ mtk_phy_update_bits(pbase + XSP_USBPHYACR1, P2A1_RG_INTR_CAL,
+ P2A1_RG_INTR_CAL_VAL(inst->efuse_intr));
- if (inst->eye_src) {
- tmp = readl(pbase + XSP_USBPHYACR5);
- tmp &= ~P2A5_RG_HSTX_SRCTRL;
- tmp |= P2A5_RG_HSTX_SRCTRL_VAL(inst->eye_src);
- writel(tmp, pbase + XSP_USBPHYACR5);
- }
+ if (inst->eye_src)
+ mtk_phy_update_bits(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCTRL,
+ P2A5_RG_HSTX_SRCTRL_VAL(inst->eye_src));
- if (inst->eye_vrt) {
- tmp = readl(pbase + XSP_USBPHYACR1);
- tmp &= ~P2A1_RG_VRT_SEL;
- tmp |= P2A1_RG_VRT_SEL_VAL(inst->eye_vrt);
- writel(tmp, pbase + XSP_USBPHYACR1);
- }
+ if (inst->eye_vrt)
+ mtk_phy_update_bits(pbase + XSP_USBPHYACR1, P2A1_RG_VRT_SEL,
+ P2A1_RG_VRT_SEL_VAL(inst->eye_vrt));
- if (inst->eye_term) {
- tmp = readl(pbase + XSP_USBPHYACR1);
- tmp &= ~P2A1_RG_TERM_SEL;
- tmp |= P2A1_RG_TERM_SEL_VAL(inst->eye_term);
- writel(tmp, pbase + XSP_USBPHYACR1);
- }
+ if (inst->eye_term)
+ mtk_phy_update_bits(pbase + XSP_USBPHYACR1, P2A1_RG_TERM_SEL,
+ P2A1_RG_TERM_SEL_VAL(inst->eye_term));
}
static void u3_phy_props_set(struct mtk_xsphy *xsphy,
struct xsphy_instance *inst)
{
void __iomem *pbase = inst->port_base;
- u32 tmp;
- if (inst->efuse_intr) {
- tmp = readl(xsphy->glb_base + SSPXTP_PHYA_GLB_00);
- tmp &= ~RG_XTP_GLB_BIAS_INTR_CTRL;
- tmp |= RG_XTP_GLB_BIAS_INTR_CTRL_VAL(inst->efuse_intr);
- writel(tmp, xsphy->glb_base + SSPXTP_PHYA_GLB_00);
- }
+ if (inst->efuse_intr)
+ mtk_phy_update_bits(xsphy->glb_base + SSPXTP_PHYA_GLB_00,
+ RG_XTP_GLB_BIAS_INTR_CTRL,
+ RG_XTP_GLB_BIAS_INTR_CTRL_VAL(inst->efuse_intr));
- if (inst->efuse_tx_imp) {
- tmp = readl(pbase + SSPXTP_PHYA_LN_04);
- tmp &= ~RG_XTP_LN0_TX_IMPSEL;
- tmp |= RG_XTP_LN0_TX_IMPSEL_VAL(inst->efuse_tx_imp);
- writel(tmp, pbase + SSPXTP_PHYA_LN_04);
- }
+ if (inst->efuse_tx_imp)
+ mtk_phy_update_bits(pbase + SSPXTP_PHYA_LN_04,
+ RG_XTP_LN0_TX_IMPSEL,
+ RG_XTP_LN0_TX_IMPSEL_VAL(inst->efuse_tx_imp));
- if (inst->efuse_rx_imp) {
- tmp = readl(pbase + SSPXTP_PHYA_LN_14);
- tmp &= ~RG_XTP_LN0_RX_IMPSEL;
- tmp |= RG_XTP_LN0_RX_IMPSEL_VAL(inst->efuse_rx_imp);
- writel(tmp, pbase + SSPXTP_PHYA_LN_14);
- }
+ if (inst->efuse_rx_imp)
+ mtk_phy_update_bits(pbase + SSPXTP_PHYA_LN_14,
+ RG_XTP_LN0_RX_IMPSEL,
+ RG_XTP_LN0_RX_IMPSEL_VAL(inst->efuse_rx_imp));
}
static int mtk_phy_init(struct phy *phy)
diff --git a/drivers/phy/microchip/Kconfig b/drivers/phy/microchip/Kconfig
index 3728a284bf64..38039ed0754c 100644
--- a/drivers/phy/microchip/Kconfig
+++ b/drivers/phy/microchip/Kconfig
@@ -11,3 +11,11 @@ config PHY_SPARX5_SERDES
depends on HAS_IOMEM
help
Enable this for support of the 10G/25G SerDes on Microchip Sparx5.
+
+config PHY_LAN966X_SERDES
+ tristate "SerDes PHY driver for Microchip LAN966X"
+ select GENERIC_PHY
+ depends on OF
+ depends on MFD_SYSCON
+ help
+ Enable this for supporting SerDes muxing with Microchip LAN966X
diff --git a/drivers/phy/microchip/Makefile b/drivers/phy/microchip/Makefile
index 7b98345712aa..fd73b87960a5 100644
--- a/drivers/phy/microchip/Makefile
+++ b/drivers/phy/microchip/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_PHY_SPARX5_SERDES) := sparx5_serdes.o
+obj-$(CONFIG_PHY_LAN966X_SERDES) := lan966x_serdes.o
diff --git a/drivers/phy/microchip/lan966x_serdes.c b/drivers/phy/microchip/lan966x_serdes.c
new file mode 100644
index 000000000000..e86a879b92b5
--- /dev/null
+++ b/drivers/phy/microchip/lan966x_serdes.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/phy/phy-lan966x-serdes.h>
+#include "lan966x_serdes_regs.h"
+
+#define PLL_CONF_MASK GENMASK(4, 3)
+#define PLL_CONF_25MHZ 0
+#define PLL_CONF_125MHZ 1
+#define PLL_CONF_SERDES_125MHZ 2
+#define PLL_CONF_BYPASS 3
+
+#define lan_offset_(id, tinst, tcnt, \
+ gbase, ginst, gcnt, gwidth, \
+ raddr, rinst, rcnt, rwidth) \
+ (gbase + ((ginst) * gwidth) + raddr + ((rinst) * rwidth))
+#define lan_offset(...) lan_offset_(__VA_ARGS__)
+
+#define lan_rmw(val, mask, reg, off) \
+ lan_rmw_(val, mask, reg, lan_offset(off))
+
+#define SERDES_MUX(_idx, _port, _mode, _submode, _mask, _mux) { \
+ .idx = _idx, \
+ .port = _port, \
+ .mode = _mode, \
+ .submode = _submode, \
+ .mask = _mask, \
+ .mux = _mux, \
+}
+
+#define SERDES_MUX_GMII(i, p, m, c) \
+ SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_GMII, m, c)
+#define SERDES_MUX_SGMII(i, p, m, c) \
+ SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_SGMII, m, c)
+#define SERDES_MUX_QSGMII(i, p, m, c) \
+ SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_QSGMII, m, c)
+#define SERDES_MUX_RGMII(i, p, m, c) \
+ SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_RGMII, m, c)
+
+static void lan_rmw_(u32 val, u32 mask, void __iomem *mem, u32 offset)
+{
+ u32 v;
+
+ v = readl(mem + offset);
+ v = (v & ~mask) | (val & mask);
+ writel(v, mem + offset);
+}
+
+struct serdes_mux {
+ u8 idx;
+ u8 port;
+ enum phy_mode mode;
+ int submode;
+ u32 mask;
+ u32 mux;
+};
+
+static const struct serdes_mux lan966x_serdes_muxes[] = {
+ SERDES_MUX_QSGMII(SERDES6G(1), 0, HSIO_HW_CFG_QSGMII_ENA,
+ HSIO_HW_CFG_QSGMII_ENA_SET(BIT(0))),
+ SERDES_MUX_QSGMII(SERDES6G(1), 1, HSIO_HW_CFG_QSGMII_ENA,
+ HSIO_HW_CFG_QSGMII_ENA_SET(BIT(0))),
+ SERDES_MUX_QSGMII(SERDES6G(1), 2, HSIO_HW_CFG_QSGMII_ENA,
+ HSIO_HW_CFG_QSGMII_ENA_SET(BIT(0))),
+ SERDES_MUX_QSGMII(SERDES6G(1), 3, HSIO_HW_CFG_QSGMII_ENA,
+ HSIO_HW_CFG_QSGMII_ENA_SET(BIT(0))),
+
+ SERDES_MUX_QSGMII(SERDES6G(2), 4, HSIO_HW_CFG_QSGMII_ENA,
+ HSIO_HW_CFG_QSGMII_ENA_SET(BIT(1))),
+ SERDES_MUX_QSGMII(SERDES6G(2), 5, HSIO_HW_CFG_QSGMII_ENA,
+ HSIO_HW_CFG_QSGMII_ENA_SET(BIT(1))),
+ SERDES_MUX_QSGMII(SERDES6G(2), 6, HSIO_HW_CFG_QSGMII_ENA,
+ HSIO_HW_CFG_QSGMII_ENA_SET(BIT(1))),
+ SERDES_MUX_QSGMII(SERDES6G(2), 7, HSIO_HW_CFG_QSGMII_ENA,
+ HSIO_HW_CFG_QSGMII_ENA_SET(BIT(1))),
+
+ SERDES_MUX_GMII(CU(0), 0, HSIO_HW_CFG_GMII_ENA,
+ HSIO_HW_CFG_GMII_ENA_SET(BIT(0))),
+ SERDES_MUX_GMII(CU(1), 1, HSIO_HW_CFG_GMII_ENA,
+ HSIO_HW_CFG_GMII_ENA_SET(BIT(1))),
+
+ SERDES_MUX_SGMII(SERDES6G(0), 0, HSIO_HW_CFG_SD6G_0_CFG, 0),
+ SERDES_MUX_SGMII(SERDES6G(1), 1, HSIO_HW_CFG_SD6G_1_CFG, 0),
+ SERDES_MUX_SGMII(SERDES6G(0), 2, HSIO_HW_CFG_SD6G_0_CFG,
+ HSIO_HW_CFG_SD6G_0_CFG_SET(1)),
+ SERDES_MUX_SGMII(SERDES6G(1), 3, HSIO_HW_CFG_SD6G_1_CFG,
+ HSIO_HW_CFG_SD6G_1_CFG_SET(1)),
+
+ SERDES_MUX_RGMII(RGMII(0), 2, HSIO_HW_CFG_RGMII_0_CFG |
+ HSIO_HW_CFG_RGMII_ENA,
+ HSIO_HW_CFG_RGMII_0_CFG_SET(BIT(0)) |
+ HSIO_HW_CFG_RGMII_ENA_SET(BIT(0))),
+ SERDES_MUX_RGMII(RGMII(1), 3, HSIO_HW_CFG_RGMII_1_CFG |
+ HSIO_HW_CFG_RGMII_ENA,
+ HSIO_HW_CFG_RGMII_1_CFG_SET(BIT(0)) |
+ HSIO_HW_CFG_RGMII_ENA_SET(BIT(1))),
+ SERDES_MUX_RGMII(RGMII(0), 5, HSIO_HW_CFG_RGMII_0_CFG |
+ HSIO_HW_CFG_RGMII_ENA,
+ HSIO_HW_CFG_RGMII_0_CFG_SET(BIT(0)) |
+ HSIO_HW_CFG_RGMII_ENA_SET(BIT(0))),
+ SERDES_MUX_RGMII(RGMII(1), 6, HSIO_HW_CFG_RGMII_1_CFG |
+ HSIO_HW_CFG_RGMII_ENA,
+ HSIO_HW_CFG_RGMII_1_CFG_SET(BIT(0)) |
+ HSIO_HW_CFG_RGMII_ENA_SET(BIT(1))),
+};
+
+struct serdes_ctrl {
+ void __iomem *regs;
+ struct device *dev;
+ struct phy *phys[SERDES_MAX];
+ int ref125;
+};
+
+struct serdes_macro {
+ u8 idx;
+ int port;
+ struct serdes_ctrl *ctrl;
+ int speed;
+ phy_interface_t mode;
+};
+
+enum lan966x_sd6g40_mode {
+ LAN966X_SD6G40_MODE_QSGMII,
+ LAN966X_SD6G40_MODE_SGMII,
+};
+
+enum lan966x_sd6g40_ltx2rx {
+ LAN966X_SD6G40_TX2RX_LOOP_NONE,
+ LAN966X_SD6G40_LTX2RX
+};
+
+struct lan966x_sd6g40_setup_args {
+ enum lan966x_sd6g40_mode mode;
+ enum lan966x_sd6g40_ltx2rx tx2rx_loop;
+ bool txinvert;
+ bool rxinvert;
+ bool refclk125M;
+ bool mute;
+};
+
+struct lan966x_sd6g40_mode_args {
+ enum lan966x_sd6g40_mode mode;
+ u8 lane_10bit_sel;
+ u8 mpll_multiplier;
+ u8 ref_clkdiv2;
+ u8 tx_rate;
+ u8 rx_rate;
+};
+
+struct lan966x_sd6g40_setup {
+ u8 rx_term_en;
+ u8 lane_10bit_sel;
+ u8 tx_invert;
+ u8 rx_invert;
+ u8 mpll_multiplier;
+ u8 lane_loopbk_en;
+ u8 ref_clkdiv2;
+ u8 tx_rate;
+ u8 rx_rate;
+};
+
+static int lan966x_sd6g40_reg_cfg(struct serdes_macro *macro,
+ struct lan966x_sd6g40_setup *res_struct,
+ u32 idx)
+{
+ u32 value;
+
+ /* Note: SerDes HSIO is configured in 1G_LAN mode */
+ lan_rmw(HSIO_SD_CFG_LANE_10BIT_SEL_SET(res_struct->lane_10bit_sel) |
+ HSIO_SD_CFG_RX_RATE_SET(res_struct->rx_rate) |
+ HSIO_SD_CFG_TX_RATE_SET(res_struct->tx_rate) |
+ HSIO_SD_CFG_TX_INVERT_SET(res_struct->tx_invert) |
+ HSIO_SD_CFG_RX_INVERT_SET(res_struct->rx_invert) |
+ HSIO_SD_CFG_LANE_LOOPBK_EN_SET(res_struct->lane_loopbk_en) |
+ HSIO_SD_CFG_RX_RESET_SET(0) |
+ HSIO_SD_CFG_TX_RESET_SET(0),
+ HSIO_SD_CFG_LANE_10BIT_SEL |
+ HSIO_SD_CFG_RX_RATE |
+ HSIO_SD_CFG_TX_RATE |
+ HSIO_SD_CFG_TX_INVERT |
+ HSIO_SD_CFG_RX_INVERT |
+ HSIO_SD_CFG_LANE_LOOPBK_EN |
+ HSIO_SD_CFG_RX_RESET |
+ HSIO_SD_CFG_TX_RESET,
+ macro->ctrl->regs, HSIO_SD_CFG(idx));
+
+ lan_rmw(HSIO_MPLL_CFG_MPLL_MULTIPLIER_SET(res_struct->mpll_multiplier) |
+ HSIO_MPLL_CFG_REF_CLKDIV2_SET(res_struct->ref_clkdiv2),
+ HSIO_MPLL_CFG_MPLL_MULTIPLIER |
+ HSIO_MPLL_CFG_REF_CLKDIV2,
+ macro->ctrl->regs, HSIO_MPLL_CFG(idx));
+
+ lan_rmw(HSIO_SD_CFG_RX_TERM_EN_SET(res_struct->rx_term_en),
+ HSIO_SD_CFG_RX_TERM_EN,
+ macro->ctrl->regs, HSIO_SD_CFG(idx));
+
+ lan_rmw(HSIO_MPLL_CFG_REF_SSP_EN_SET(1),
+ HSIO_MPLL_CFG_REF_SSP_EN,
+ macro->ctrl->regs, HSIO_MPLL_CFG(idx));
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ lan_rmw(HSIO_SD_CFG_PHY_RESET_SET(0),
+ HSIO_SD_CFG_PHY_RESET,
+ macro->ctrl->regs, HSIO_SD_CFG(idx));
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ lan_rmw(HSIO_MPLL_CFG_MPLL_EN_SET(1),
+ HSIO_MPLL_CFG_MPLL_EN,
+ macro->ctrl->regs, HSIO_MPLL_CFG(idx));
+
+ usleep_range(7 * USEC_PER_MSEC, 8 * USEC_PER_MSEC);
+
+ value = readl(macro->ctrl->regs + lan_offset(HSIO_SD_STAT(idx)));
+ value = HSIO_SD_STAT_MPLL_STATE_GET(value);
+ if (value != 0x1) {
+ dev_err(macro->ctrl->dev,
+ "Unexpected sd_sd_stat[%u] mpll_state was 0x1 but is 0x%x\n",
+ idx, value);
+ return -EIO;
+ }
+
+ lan_rmw(HSIO_SD_CFG_TX_CM_EN_SET(1),
+ HSIO_SD_CFG_TX_CM_EN,
+ macro->ctrl->regs, HSIO_SD_CFG(idx));
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ value = readl(macro->ctrl->regs + lan_offset(HSIO_SD_STAT(idx)));
+ value = HSIO_SD_STAT_TX_CM_STATE_GET(value);
+ if (value != 0x1) {
+ dev_err(macro->ctrl->dev,
+ "Unexpected sd_sd_stat[%u] tx_cm_state was 0x1 but is 0x%x\n",
+ idx, value);
+ return -EIO;
+ }
+
+ lan_rmw(HSIO_SD_CFG_RX_PLL_EN_SET(1) |
+ HSIO_SD_CFG_TX_EN_SET(1),
+ HSIO_SD_CFG_RX_PLL_EN |
+ HSIO_SD_CFG_TX_EN,
+ macro->ctrl->regs, HSIO_SD_CFG(idx));
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ /* Waiting for serdes 0 rx DPLL to lock... */
+ value = readl(macro->ctrl->regs + lan_offset(HSIO_SD_STAT(idx)));
+ value = HSIO_SD_STAT_RX_PLL_STATE_GET(value);
+ if (value != 0x1) {
+ dev_err(macro->ctrl->dev,
+ "Unexpected sd_sd_stat[%u] rx_pll_state was 0x1 but is 0x%x\n",
+ idx, value);
+ return -EIO;
+ }
+
+ /* Waiting for serdes 0 tx operational... */
+ value = readl(macro->ctrl->regs + lan_offset(HSIO_SD_STAT(idx)));
+ value = HSIO_SD_STAT_TX_STATE_GET(value);
+ if (value != 0x1) {
+ dev_err(macro->ctrl->dev,
+ "Unexpected sd_sd_stat[%u] tx_state was 0x1 but is 0x%x\n",
+ idx, value);
+ return -EIO;
+ }
+
+ lan_rmw(HSIO_SD_CFG_TX_DATA_EN_SET(1) |
+ HSIO_SD_CFG_RX_DATA_EN_SET(1),
+ HSIO_SD_CFG_TX_DATA_EN |
+ HSIO_SD_CFG_RX_DATA_EN,
+ macro->ctrl->regs, HSIO_SD_CFG(idx));
+
+ return 0;
+}
+
+static int lan966x_sd6g40_get_conf_from_mode(struct serdes_macro *macro,
+ enum lan966x_sd6g40_mode f_mode,
+ bool ref125M,
+ struct lan966x_sd6g40_mode_args *ret_val)
+{
+ switch (f_mode) {
+ case LAN966X_SD6G40_MODE_QSGMII:
+ ret_val->lane_10bit_sel = 0;
+ if (ref125M) {
+ ret_val->mpll_multiplier = 40;
+ ret_val->ref_clkdiv2 = 0x1;
+ ret_val->tx_rate = 0x0;
+ ret_val->rx_rate = 0x0;
+ } else {
+ ret_val->mpll_multiplier = 100;
+ ret_val->ref_clkdiv2 = 0x0;
+ ret_val->tx_rate = 0x0;
+ ret_val->rx_rate = 0x0;
+ }
+ break;
+
+ case LAN966X_SD6G40_MODE_SGMII:
+ ret_val->lane_10bit_sel = 1;
+ if (ref125M) {
+ ret_val->mpll_multiplier = macro->speed == SPEED_2500 ? 50 : 40;
+ ret_val->ref_clkdiv2 = 0x1;
+ ret_val->tx_rate = macro->speed == SPEED_2500 ? 0x1 : 0x2;
+ ret_val->rx_rate = macro->speed == SPEED_2500 ? 0x1 : 0x2;
+ } else {
+ ret_val->mpll_multiplier = macro->speed == SPEED_2500 ? 125 : 100;
+ ret_val->ref_clkdiv2 = 0x0;
+ ret_val->tx_rate = macro->speed == SPEED_2500 ? 0x1 : 0x2;
+ ret_val->rx_rate = macro->speed == SPEED_2500 ? 0x1 : 0x2;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int lan966x_calc_sd6g40_setup_lane(struct serdes_macro *macro,
+ struct lan966x_sd6g40_setup_args config,
+ struct lan966x_sd6g40_setup *ret_val)
+{
+ struct lan966x_sd6g40_mode_args sd6g40_mode;
+ struct lan966x_sd6g40_mode_args *mode_args = &sd6g40_mode;
+ int ret;
+
+ ret = lan966x_sd6g40_get_conf_from_mode(macro, config.mode,
+ config.refclk125M, mode_args);
+ if (ret)
+ return ret;
+
+ ret_val->lane_10bit_sel = mode_args->lane_10bit_sel;
+ ret_val->rx_rate = mode_args->rx_rate;
+ ret_val->tx_rate = mode_args->tx_rate;
+ ret_val->mpll_multiplier = mode_args->mpll_multiplier;
+ ret_val->ref_clkdiv2 = mode_args->ref_clkdiv2;
+ ret_val->rx_term_en = 0;
+
+ if (config.tx2rx_loop == LAN966X_SD6G40_LTX2RX)
+ ret_val->lane_loopbk_en = 1;
+ else
+ ret_val->lane_loopbk_en = 0;
+
+ ret_val->tx_invert = !!config.txinvert;
+ ret_val->rx_invert = !!config.rxinvert;
+
+ return 0;
+}
+
+static int lan966x_sd6g40_setup_lane(struct serdes_macro *macro,
+ struct lan966x_sd6g40_setup_args config,
+ u32 idx)
+{
+ struct lan966x_sd6g40_setup calc_results = {};
+ int ret;
+
+ ret = lan966x_calc_sd6g40_setup_lane(macro, config, &calc_results);
+ if (ret)
+ return ret;
+
+ return lan966x_sd6g40_reg_cfg(macro, &calc_results, idx);
+}
+
+static int lan966x_sd6g40_setup(struct serdes_macro *macro, u32 idx, int mode)
+{
+ struct lan966x_sd6g40_setup_args conf = {};
+
+ conf.refclk125M = macro->ctrl->ref125;
+
+ if (mode == PHY_INTERFACE_MODE_QSGMII)
+ conf.mode = LAN966X_SD6G40_MODE_QSGMII;
+ else
+ conf.mode = LAN966X_SD6G40_MODE_SGMII;
+
+ return lan966x_sd6g40_setup_lane(macro, conf, idx);
+}
+
+static int serdes_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+{
+ struct serdes_macro *macro = phy_get_drvdata(phy);
+ unsigned int i;
+ int val;
+
+ /* As of now only PHY_MODE_ETHERNET is supported */
+ if (mode != PHY_MODE_ETHERNET)
+ return -EOPNOTSUPP;
+
+ if (submode == PHY_INTERFACE_MODE_2500BASEX)
+ macro->speed = SPEED_2500;
+ else
+ macro->speed = SPEED_1000;
+
+ if (submode == PHY_INTERFACE_MODE_1000BASEX ||
+ submode == PHY_INTERFACE_MODE_2500BASEX)
+ submode = PHY_INTERFACE_MODE_SGMII;
+
+ for (i = 0; i < ARRAY_SIZE(lan966x_serdes_muxes); i++) {
+ if (macro->idx != lan966x_serdes_muxes[i].idx ||
+ mode != lan966x_serdes_muxes[i].mode ||
+ submode != lan966x_serdes_muxes[i].submode ||
+ macro->port != lan966x_serdes_muxes[i].port)
+ continue;
+
+ val = readl(macro->ctrl->regs + lan_offset(HSIO_HW_CFG));
+ val |= lan966x_serdes_muxes[i].mux;
+ lan_rmw(val, lan966x_serdes_muxes[i].mask,
+ macro->ctrl->regs, HSIO_HW_CFG);
+
+ macro->mode = lan966x_serdes_muxes[i].submode;
+
+ if (macro->idx < CU_MAX)
+ return 0;
+
+ if (macro->idx < SERDES6G_MAX)
+ return lan966x_sd6g40_setup(macro,
+ macro->idx - (CU_MAX + 1),
+ macro->mode);
+
+ if (macro->idx < RGMII_MAX)
+ return 0;
+
+ return -EOPNOTSUPP;
+ }
+
+ return -EINVAL;
+}
+
+static const struct phy_ops serdes_ops = {
+ .set_mode = serdes_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *serdes_simple_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct serdes_ctrl *ctrl = dev_get_drvdata(dev);
+ unsigned int port, idx, i;
+
+ if (args->args_count != 2)
+ return ERR_PTR(-EINVAL);
+
+ port = args->args[0];
+ idx = args->args[1];
+
+ for (i = 0; i < SERDES_MAX; i++) {
+ struct serdes_macro *macro = phy_get_drvdata(ctrl->phys[i]);
+
+ if (idx != macro->idx)
+ continue;
+
+ macro->port = port;
+ return ctrl->phys[i];
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+static int serdes_phy_create(struct serdes_ctrl *ctrl, u8 idx, struct phy **phy)
+{
+ struct serdes_macro *macro;
+
+ *phy = devm_phy_create(ctrl->dev, NULL, &serdes_ops);
+ if (IS_ERR(*phy))
+ return PTR_ERR(*phy);
+
+ macro = devm_kzalloc(ctrl->dev, sizeof(*macro), GFP_KERNEL);
+ if (!macro)
+ return -ENOMEM;
+
+ macro->idx = idx;
+ macro->ctrl = ctrl;
+ macro->port = -1;
+
+ phy_set_drvdata(*phy, macro);
+
+ return 0;
+}
+
+static int serdes_probe(struct platform_device *pdev)
+{
+ struct phy_provider *provider;
+ struct serdes_ctrl *ctrl;
+ void __iomem *hw_stat;
+ unsigned int i;
+ u32 val;
+ int ret;
+
+ ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->dev = &pdev->dev;
+ ctrl->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(ctrl->regs))
+ return PTR_ERR(ctrl->regs);
+
+ hw_stat = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
+ if (IS_ERR(hw_stat))
+ return PTR_ERR(hw_stat);
+
+ for (i = 0; i < SERDES_MAX; i++) {
+ ret = serdes_phy_create(ctrl, i, &ctrl->phys[i]);
+ if (ret)
+ return ret;
+ }
+
+ val = readl(hw_stat);
+ val = FIELD_GET(PLL_CONF_MASK, val);
+ ctrl->ref125 = (val == PLL_CONF_125MHZ ||
+ val == PLL_CONF_SERDES_125MHZ);
+
+ dev_set_drvdata(&pdev->dev, ctrl);
+
+ provider = devm_of_phy_provider_register(ctrl->dev,
+ serdes_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(provider);
+}
+
+static const struct of_device_id serdes_ids[] = {
+ { .compatible = "microchip,lan966x-serdes", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, serdes_ids);
+
+static struct platform_driver mscc_lan966x_serdes = {
+ .probe = serdes_probe,
+ .driver = {
+ .name = "microchip,lan966x-serdes",
+ .of_match_table = of_match_ptr(serdes_ids),
+ },
+};
+
+module_platform_driver(mscc_lan966x_serdes);
+
+MODULE_DESCRIPTION("Microchip lan966x switch serdes driver");
+MODULE_AUTHOR("Horatiu Vultur <horatiu.vultur@microchip.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/microchip/lan966x_serdes_regs.h b/drivers/phy/microchip/lan966x_serdes_regs.h
new file mode 100644
index 000000000000..ea30f64ffd5c
--- /dev/null
+++ b/drivers/phy/microchip/lan966x_serdes_regs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _LAN966X_SERDES_REGS_H_
+#define _LAN966X_SERDES_REGS_H_
+
+#include <linux/bitfield.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+
+enum lan966x_target {
+ TARGET_HSIO = 32,
+ NUM_TARGETS = 66
+};
+
+#define __REG(...) __VA_ARGS__
+
+/* HSIO:SD:SD_CFG */
+#define HSIO_SD_CFG(g) __REG(TARGET_HSIO, 0, 1, 8, g, 3, 32, 0, 0, 1, 4)
+
+#define HSIO_SD_CFG_PHY_RESET BIT(27)
+#define HSIO_SD_CFG_PHY_RESET_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_PHY_RESET, x)
+#define HSIO_SD_CFG_PHY_RESET_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_PHY_RESET, x)
+
+#define HSIO_SD_CFG_TX_RESET BIT(18)
+#define HSIO_SD_CFG_TX_RESET_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_TX_RESET, x)
+#define HSIO_SD_CFG_TX_RESET_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_TX_RESET, x)
+
+#define HSIO_SD_CFG_TX_RATE GENMASK(17, 16)
+#define HSIO_SD_CFG_TX_RATE_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_TX_RATE, x)
+#define HSIO_SD_CFG_TX_RATE_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_TX_RATE, x)
+
+#define HSIO_SD_CFG_TX_INVERT BIT(15)
+#define HSIO_SD_CFG_TX_INVERT_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_TX_INVERT, x)
+#define HSIO_SD_CFG_TX_INVERT_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_TX_INVERT, x)
+
+#define HSIO_SD_CFG_TX_EN BIT(14)
+#define HSIO_SD_CFG_TX_EN_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_TX_EN, x)
+#define HSIO_SD_CFG_TX_EN_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_TX_EN, x)
+
+#define HSIO_SD_CFG_TX_DATA_EN BIT(12)
+#define HSIO_SD_CFG_TX_DATA_EN_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_TX_DATA_EN, x)
+#define HSIO_SD_CFG_TX_DATA_EN_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_TX_DATA_EN, x)
+
+#define HSIO_SD_CFG_TX_CM_EN BIT(11)
+#define HSIO_SD_CFG_TX_CM_EN_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_TX_CM_EN, x)
+#define HSIO_SD_CFG_TX_CM_EN_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_TX_CM_EN, x)
+
+#define HSIO_SD_CFG_LANE_10BIT_SEL BIT(10)
+#define HSIO_SD_CFG_LANE_10BIT_SEL_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_LANE_10BIT_SEL, x)
+#define HSIO_SD_CFG_LANE_10BIT_SEL_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_LANE_10BIT_SEL, x)
+
+#define HSIO_SD_CFG_RX_TERM_EN BIT(9)
+#define HSIO_SD_CFG_RX_TERM_EN_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_RX_TERM_EN, x)
+#define HSIO_SD_CFG_RX_TERM_EN_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_RX_TERM_EN, x)
+
+#define HSIO_SD_CFG_RX_RESET BIT(8)
+#define HSIO_SD_CFG_RX_RESET_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_RX_RESET, x)
+#define HSIO_SD_CFG_RX_RESET_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_RX_RESET, x)
+
+#define HSIO_SD_CFG_RX_RATE GENMASK(7, 6)
+#define HSIO_SD_CFG_RX_RATE_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_RX_RATE, x)
+#define HSIO_SD_CFG_RX_RATE_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_RX_RATE, x)
+
+#define HSIO_SD_CFG_RX_PLL_EN BIT(5)
+#define HSIO_SD_CFG_RX_PLL_EN_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_RX_PLL_EN, x)
+#define HSIO_SD_CFG_RX_PLL_EN_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_RX_PLL_EN, x)
+
+#define HSIO_SD_CFG_RX_INVERT BIT(3)
+#define HSIO_SD_CFG_RX_INVERT_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_RX_INVERT, x)
+#define HSIO_SD_CFG_RX_INVERT_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_RX_INVERT, x)
+
+#define HSIO_SD_CFG_RX_DATA_EN BIT(2)
+#define HSIO_SD_CFG_RX_DATA_EN_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_RX_DATA_EN, x)
+#define HSIO_SD_CFG_RX_DATA_EN_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_RX_DATA_EN, x)
+
+#define HSIO_SD_CFG_LANE_LOOPBK_EN BIT(0)
+#define HSIO_SD_CFG_LANE_LOOPBK_EN_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_LANE_LOOPBK_EN, x)
+#define HSIO_SD_CFG_LANE_LOOPBK_EN_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_LANE_LOOPBK_EN, x)
+
+/* HSIO:SD:MPLL_CFG */
+#define HSIO_MPLL_CFG(g) __REG(TARGET_HSIO, 0, 1, 8, g, 3, 32, 8, 0, 1, 4)
+
+#define HSIO_MPLL_CFG_REF_SSP_EN BIT(18)
+#define HSIO_MPLL_CFG_REF_SSP_EN_SET(x)\
+ FIELD_PREP(HSIO_MPLL_CFG_REF_SSP_EN, x)
+#define HSIO_MPLL_CFG_REF_SSP_EN_GET(x)\
+ FIELD_GET(HSIO_MPLL_CFG_REF_SSP_EN, x)
+
+#define HSIO_MPLL_CFG_REF_CLKDIV2 BIT(17)
+#define HSIO_MPLL_CFG_REF_CLKDIV2_SET(x)\
+ FIELD_PREP(HSIO_MPLL_CFG_REF_CLKDIV2, x)
+#define HSIO_MPLL_CFG_REF_CLKDIV2_GET(x)\
+ FIELD_GET(HSIO_MPLL_CFG_REF_CLKDIV2, x)
+
+#define HSIO_MPLL_CFG_MPLL_EN BIT(16)
+#define HSIO_MPLL_CFG_MPLL_EN_SET(x)\
+ FIELD_PREP(HSIO_MPLL_CFG_MPLL_EN, x)
+#define HSIO_MPLL_CFG_MPLL_EN_GET(x)\
+ FIELD_GET(HSIO_MPLL_CFG_MPLL_EN, x)
+
+#define HSIO_MPLL_CFG_MPLL_MULTIPLIER GENMASK(6, 0)
+#define HSIO_MPLL_CFG_MPLL_MULTIPLIER_SET(x)\
+ FIELD_PREP(HSIO_MPLL_CFG_MPLL_MULTIPLIER, x)
+#define HSIO_MPLL_CFG_MPLL_MULTIPLIER_GET(x)\
+ FIELD_GET(HSIO_MPLL_CFG_MPLL_MULTIPLIER, x)
+
+/* HSIO:SD:SD_STAT */
+#define HSIO_SD_STAT(g) __REG(TARGET_HSIO, 0, 1, 8, g, 3, 32, 12, 0, 1, 4)
+
+#define HSIO_SD_STAT_MPLL_STATE BIT(6)
+#define HSIO_SD_STAT_MPLL_STATE_SET(x)\
+ FIELD_PREP(HSIO_SD_STAT_MPLL_STATE, x)
+#define HSIO_SD_STAT_MPLL_STATE_GET(x)\
+ FIELD_GET(HSIO_SD_STAT_MPLL_STATE, x)
+
+#define HSIO_SD_STAT_TX_STATE BIT(5)
+#define HSIO_SD_STAT_TX_STATE_SET(x)\
+ FIELD_PREP(HSIO_SD_STAT_TX_STATE, x)
+#define HSIO_SD_STAT_TX_STATE_GET(x)\
+ FIELD_GET(HSIO_SD_STAT_TX_STATE, x)
+
+#define HSIO_SD_STAT_TX_CM_STATE BIT(2)
+#define HSIO_SD_STAT_TX_CM_STATE_SET(x)\
+ FIELD_PREP(HSIO_SD_STAT_TX_CM_STATE, x)
+#define HSIO_SD_STAT_TX_CM_STATE_GET(x)\
+ FIELD_GET(HSIO_SD_STAT_TX_CM_STATE, x)
+
+#define HSIO_SD_STAT_RX_PLL_STATE BIT(0)
+#define HSIO_SD_STAT_RX_PLL_STATE_SET(x)\
+ FIELD_PREP(HSIO_SD_STAT_RX_PLL_STATE, x)
+#define HSIO_SD_STAT_RX_PLL_STATE_GET(x)\
+ FIELD_GET(HSIO_SD_STAT_RX_PLL_STATE, x)
+
+/* HSIO:HW_CFGSTAT:HW_CFG */
+#define HSIO_HW_CFG __REG(TARGET_HSIO, 0, 1, 104, 0, 1, 52, 0, 0, 1, 4)
+
+#define HSIO_HW_CFG_RGMII_1_CFG BIT(15)
+#define HSIO_HW_CFG_RGMII_1_CFG_SET(x)\
+ (((x) << 15) & GENMASK(15, 15))
+#define HSIO_HW_CFG_RGMII_1_CFG_GET(x)\
+ FIELD_GET(HSIO_HW_CFG_RGMII_1_CFG, x)
+
+#define HSIO_HW_CFG_RGMII_0_CFG BIT(14)
+#define HSIO_HW_CFG_RGMII_0_CFG_SET(x)\
+ (((x) << 14) & GENMASK(14, 14))
+#define HSIO_HW_CFG_RGMII_0_CFG_GET(x)\
+ FIELD_GET(HSIO_HW_CFG_RGMII_0_CFG, x)
+
+#define HSIO_HW_CFG_RGMII_ENA GENMASK(13, 12)
+#define HSIO_HW_CFG_RGMII_ENA_SET(x)\
+ (((x) << 12) & GENMASK(13, 12))
+#define HSIO_HW_CFG_RGMII_ENA_GET(x)\
+ FIELD_GET(HSIO_HW_CFG_RGMII_ENA, x)
+
+#define HSIO_HW_CFG_SD6G_0_CFG BIT(11)
+#define HSIO_HW_CFG_SD6G_0_CFG_SET(x)\
+ (((x) << 11) & GENMASK(11, 11))
+#define HSIO_HW_CFG_SD6G_0_CFG_GET(x)\
+ FIELD_GET(HSIO_HW_CFG_SD6G_0_CFG, x)
+
+#define HSIO_HW_CFG_SD6G_1_CFG BIT(10)
+#define HSIO_HW_CFG_SD6G_1_CFG_SET(x)\
+ (((x) << 10) & GENMASK(10, 10))
+#define HSIO_HW_CFG_SD6G_1_CFG_GET(x)\
+ FIELD_GET(HSIO_HW_CFG_SD6G_1_CFG, x)
+
+#define HSIO_HW_CFG_GMII_ENA GENMASK(9, 2)
+#define HSIO_HW_CFG_GMII_ENA_SET(x)\
+ (((x) << 2) & GENMASK(9, 2))
+#define HSIO_HW_CFG_GMII_ENA_GET(x)\
+ FIELD_GET(HSIO_HW_CFG_GMII_ENA, x)
+
+#define HSIO_HW_CFG_QSGMII_ENA GENMASK(1, 0)
+#define HSIO_HW_CFG_QSGMII_ENA_SET(x)\
+ ((x) & GENMASK(1, 0))
+#define HSIO_HW_CFG_QSGMII_ENA_GET(x)\
+ FIELD_GET(HSIO_HW_CFG_QSGMII_ENA, x)
+
+#endif /* _LAN966X_HSIO_REGS_H_ */
diff --git a/drivers/phy/phy-can-transceiver.c b/drivers/phy/phy-can-transceiver.c
index c2cb93b4df71..6f3fe37dee0e 100644
--- a/drivers/phy/phy-can-transceiver.c
+++ b/drivers/phy/phy-can-transceiver.c
@@ -110,14 +110,14 @@ static int can_transceiver_phy_probe(struct platform_device *pdev)
can_transceiver_phy->generic_phy = phy;
if (drvdata->flags & CAN_TRANSCEIVER_STB_PRESENT) {
- standby_gpio = devm_gpiod_get(dev, "standby", GPIOD_OUT_HIGH);
+ standby_gpio = devm_gpiod_get_optional(dev, "standby", GPIOD_OUT_HIGH);
if (IS_ERR(standby_gpio))
return PTR_ERR(standby_gpio);
can_transceiver_phy->standby_gpio = standby_gpio;
}
if (drvdata->flags & CAN_TRANSCEIVER_EN_PRESENT) {
- enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(enable_gpio))
return PTR_ERR(enable_gpio);
can_transceiver_phy->enable_gpio = enable_gpio;
diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
index 7f6fcb8ec5ba..5c98850f5a36 100644
--- a/drivers/phy/qualcomm/Kconfig
+++ b/drivers/phy/qualcomm/Kconfig
@@ -18,6 +18,16 @@ config PHY_QCOM_APQ8064_SATA
depends on OF
select GENERIC_PHY
+config PHY_QCOM_EDP
+ tristate "Qualcomm eDP PHY driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on OF
+ depends on COMMON_CLK
+ select GENERIC_PHY
+ help
+ Enable this driver to support the Qualcomm eDP PHY found in various
+ Qualcomm chipsets.
+
config PHY_QCOM_IPQ4019_USB
tristate "Qualcomm IPQ4019 USB PHY driver"
depends on OF && (ARCH_QCOM || COMPILE_TEST)
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index 47acbd7daa3a..e9e3b1a4dbb0 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PHY_ATH79_USB) += phy-ath79-usb.o
obj-$(CONFIG_PHY_QCOM_APQ8064_SATA) += phy-qcom-apq8064-sata.o
+obj-$(CONFIG_PHY_QCOM_EDP) += phy-qcom-edp.o
obj-$(CONFIG_PHY_QCOM_IPQ4019_USB) += phy-qcom-ipq4019-usb.o
obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o
obj-$(CONFIG_PHY_QCOM_PCIE2) += phy-qcom-pcie2.o
diff --git a/drivers/phy/qualcomm/phy-qcom-edp.c b/drivers/phy/qualcomm/phy-qcom-edp.c
new file mode 100644
index 000000000000..a8ecd2e8442d
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-edp.c
@@ -0,0 +1,674 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017, 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/phy/phy.h>
+
+#include "phy-qcom-qmp.h"
+
+/* EDP_PHY registers */
+#define DP_PHY_CFG 0x0010
+#define DP_PHY_CFG_1 0x0014
+#define DP_PHY_PD_CTL 0x001c
+#define DP_PHY_MODE 0x0020
+
+#define DP_PHY_AUX_CFG0 0x0024
+#define DP_PHY_AUX_CFG1 0x0028
+#define DP_PHY_AUX_CFG2 0x002C
+#define DP_PHY_AUX_CFG3 0x0030
+#define DP_PHY_AUX_CFG4 0x0034
+#define DP_PHY_AUX_CFG5 0x0038
+#define DP_PHY_AUX_CFG6 0x003C
+#define DP_PHY_AUX_CFG7 0x0040
+#define DP_PHY_AUX_CFG8 0x0044
+#define DP_PHY_AUX_CFG9 0x0048
+
+#define DP_PHY_AUX_INTERRUPT_MASK 0x0058
+
+#define DP_PHY_VCO_DIV 0x0074
+#define DP_PHY_TX0_TX1_LANE_CTL 0x007c
+#define DP_PHY_TX2_TX3_LANE_CTL 0x00a0
+
+#define DP_PHY_STATUS 0x00e0
+
+/* LANE_TXn registers */
+#define TXn_CLKBUF_ENABLE 0x0000
+#define TXn_TX_EMP_POST1_LVL 0x0004
+
+#define TXn_TX_DRV_LVL 0x0014
+#define TXn_TX_DRV_LVL_OFFSET 0x0018
+#define TXn_RESET_TSYNC_EN 0x001c
+#define TXn_LDO_CONFIG 0x0084
+#define TXn_TX_BAND 0x0028
+
+#define TXn_RES_CODE_LANE_OFFSET_TX0 0x0044
+#define TXn_RES_CODE_LANE_OFFSET_TX1 0x0048
+
+#define TXn_TRANSCEIVER_BIAS_EN 0x0054
+#define TXn_HIGHZ_DRVR_EN 0x0058
+#define TXn_TX_POL_INV 0x005c
+#define TXn_LANE_MODE_1 0x0064
+
+#define TXn_TRAN_DRVR_EMP_EN 0x0078
+
+struct qcom_edp {
+ struct device *dev;
+
+ struct phy *phy;
+
+ void __iomem *edp;
+ void __iomem *tx0;
+ void __iomem *tx1;
+ void __iomem *pll;
+
+ struct clk_hw dp_link_hw;
+ struct clk_hw dp_pixel_hw;
+
+ struct phy_configure_opts_dp dp_opts;
+
+ struct clk_bulk_data clks[2];
+ struct regulator_bulk_data supplies[2];
+};
+
+static int qcom_edp_phy_init(struct phy *phy)
+{
+ struct qcom_edp *edp = phy_get_drvdata(phy);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(edp->supplies), edp->supplies);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(edp->clks), edp->clks);
+ if (ret)
+ goto out_disable_supplies;
+
+ writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
+ DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
+ edp->edp + DP_PHY_PD_CTL);
+
+ /* Turn on BIAS current for PHY/PLL */
+ writel(0x17, edp->pll + QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN);
+
+ writel(DP_PHY_PD_CTL_PSR_PWRDN, edp->edp + DP_PHY_PD_CTL);
+ msleep(20);
+
+ writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
+ DP_PHY_PD_CTL_LANE_0_1_PWRDN | DP_PHY_PD_CTL_LANE_2_3_PWRDN |
+ DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
+ edp->edp + DP_PHY_PD_CTL);
+
+ writel(0x00, edp->edp + DP_PHY_AUX_CFG0);
+ writel(0x13, edp->edp + DP_PHY_AUX_CFG1);
+ writel(0x24, edp->edp + DP_PHY_AUX_CFG2);
+ writel(0x00, edp->edp + DP_PHY_AUX_CFG3);
+ writel(0x0a, edp->edp + DP_PHY_AUX_CFG4);
+ writel(0x26, edp->edp + DP_PHY_AUX_CFG5);
+ writel(0x0a, edp->edp + DP_PHY_AUX_CFG6);
+ writel(0x03, edp->edp + DP_PHY_AUX_CFG7);
+ writel(0x37, edp->edp + DP_PHY_AUX_CFG8);
+ writel(0x03, edp->edp + DP_PHY_AUX_CFG9);
+
+ writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK |
+ PHY_AUX_SYNC_ERR_MASK | PHY_AUX_ALIGN_ERR_MASK |
+ PHY_AUX_REQ_ERR_MASK, edp->edp + DP_PHY_AUX_INTERRUPT_MASK);
+
+ msleep(20);
+
+ return 0;
+
+out_disable_supplies:
+ regulator_bulk_disable(ARRAY_SIZE(edp->supplies), edp->supplies);
+
+ return ret;
+}
+
+static int qcom_edp_phy_configure(struct phy *phy, union phy_configure_opts *opts)
+{
+ const struct phy_configure_opts_dp *dp_opts = &opts->dp;
+ struct qcom_edp *edp = phy_get_drvdata(phy);
+
+ memcpy(&edp->dp_opts, dp_opts, sizeof(*dp_opts));
+
+ return 0;
+}
+
+static int qcom_edp_configure_ssc(const struct qcom_edp *edp)
+{
+ const struct phy_configure_opts_dp *dp_opts = &edp->dp_opts;
+ u32 step1;
+ u32 step2;
+
+ switch (dp_opts->link_rate) {
+ case 1620:
+ case 2700:
+ case 8100:
+ step1 = 0x45;
+ step2 = 0x06;
+ break;
+
+ case 5400:
+ step1 = 0x5c;
+ step2 = 0x08;
+ break;
+
+ default:
+ /* Other link rates aren't supported */
+ return -EINVAL;
+ }
+
+ writel(0x01, edp->pll + QSERDES_V4_COM_SSC_EN_CENTER);
+ writel(0x00, edp->pll + QSERDES_V4_COM_SSC_ADJ_PER1);
+ writel(0x36, edp->pll + QSERDES_V4_COM_SSC_PER1);
+ writel(0x01, edp->pll + QSERDES_V4_COM_SSC_PER2);
+ writel(step1, edp->pll + QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0);
+ writel(step2, edp->pll + QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0);
+
+ return 0;
+}
+
+static int qcom_edp_configure_pll(const struct qcom_edp *edp)
+{
+ const struct phy_configure_opts_dp *dp_opts = &edp->dp_opts;
+ u32 div_frac_start2_mode0;
+ u32 div_frac_start3_mode0;
+ u32 dec_start_mode0;
+ u32 lock_cmp1_mode0;
+ u32 lock_cmp2_mode0;
+ u32 hsclk_sel;
+
+ switch (dp_opts->link_rate) {
+ case 1620:
+ hsclk_sel = 0x5;
+ dec_start_mode0 = 0x69;
+ div_frac_start2_mode0 = 0x80;
+ div_frac_start3_mode0 = 0x07;
+ lock_cmp1_mode0 = 0x6f;
+ lock_cmp2_mode0 = 0x08;
+ break;
+
+ case 2700:
+ hsclk_sel = 0x3;
+ dec_start_mode0 = 0x69;
+ div_frac_start2_mode0 = 0x80;
+ div_frac_start3_mode0 = 0x07;
+ lock_cmp1_mode0 = 0x0f;
+ lock_cmp2_mode0 = 0x0e;
+ break;
+
+ case 5400:
+ hsclk_sel = 0x1;
+ dec_start_mode0 = 0x8c;
+ div_frac_start2_mode0 = 0x00;
+ div_frac_start3_mode0 = 0x0a;
+ lock_cmp1_mode0 = 0x1f;
+ lock_cmp2_mode0 = 0x1c;
+ break;
+
+ case 8100:
+ hsclk_sel = 0x0;
+ dec_start_mode0 = 0x69;
+ div_frac_start2_mode0 = 0x80;
+ div_frac_start3_mode0 = 0x07;
+ lock_cmp1_mode0 = 0x2f;
+ lock_cmp2_mode0 = 0x2a;
+ break;
+
+ default:
+ /* Other link rates aren't supported */
+ return -EINVAL;
+ }
+
+ writel(0x01, edp->pll + QSERDES_V4_COM_SVS_MODE_CLK_SEL);
+ writel(0x0b, edp->pll + QSERDES_V4_COM_SYSCLK_EN_SEL);
+ writel(0x02, edp->pll + QSERDES_V4_COM_SYS_CLK_CTRL);
+ writel(0x0c, edp->pll + QSERDES_V4_COM_CLK_ENABLE1);
+ writel(0x06, edp->pll + QSERDES_V4_COM_SYSCLK_BUF_ENABLE);
+ writel(0x30, edp->pll + QSERDES_V4_COM_CLK_SELECT);
+ writel(hsclk_sel, edp->pll + QSERDES_V4_COM_HSCLK_SEL);
+ writel(0x0f, edp->pll + QSERDES_V4_COM_PLL_IVCO);
+ writel(0x08, edp->pll + QSERDES_V4_COM_LOCK_CMP_EN);
+ writel(0x36, edp->pll + QSERDES_V4_COM_PLL_CCTRL_MODE0);
+ writel(0x16, edp->pll + QSERDES_V4_COM_PLL_RCTRL_MODE0);
+ writel(0x06, edp->pll + QSERDES_V4_COM_CP_CTRL_MODE0);
+ writel(dec_start_mode0, edp->pll + QSERDES_V4_COM_DEC_START_MODE0);
+ writel(0x00, edp->pll + QSERDES_V4_COM_DIV_FRAC_START1_MODE0);
+ writel(div_frac_start2_mode0, edp->pll + QSERDES_V4_COM_DIV_FRAC_START2_MODE0);
+ writel(div_frac_start3_mode0, edp->pll + QSERDES_V4_COM_DIV_FRAC_START3_MODE0);
+ writel(0x02, edp->pll + QSERDES_V4_COM_CMN_CONFIG);
+ writel(0x3f, edp->pll + QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE0);
+ writel(0x00, edp->pll + QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE0);
+ writel(0x00, edp->pll + QSERDES_V4_COM_VCO_TUNE_MAP);
+ writel(lock_cmp1_mode0, edp->pll + QSERDES_V4_COM_LOCK_CMP1_MODE0);
+ writel(lock_cmp2_mode0, edp->pll + QSERDES_V4_COM_LOCK_CMP2_MODE0);
+
+ writel(0x0a, edp->pll + QSERDES_V4_COM_BG_TIMER);
+ writel(0x14, edp->pll + QSERDES_V4_COM_CORECLK_DIV_MODE0);
+ writel(0x00, edp->pll + QSERDES_V4_COM_VCO_TUNE_CTRL);
+ writel(0x17, edp->pll + QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN);
+ writel(0x0f, edp->pll + QSERDES_V4_COM_CORE_CLK_EN);
+ writel(0xa0, edp->pll + QSERDES_V4_COM_VCO_TUNE1_MODE0);
+ writel(0x03, edp->pll + QSERDES_V4_COM_VCO_TUNE2_MODE0);
+
+ return 0;
+}
+
+static int qcom_edp_set_vco_div(const struct qcom_edp *edp)
+{
+ const struct phy_configure_opts_dp *dp_opts = &edp->dp_opts;
+ unsigned long pixel_freq;
+ u32 vco_div;
+
+ switch (dp_opts->link_rate) {
+ case 1620:
+ vco_div = 0x1;
+ pixel_freq = 1620000000UL / 2;
+ break;
+
+ case 2700:
+ vco_div = 0x1;
+ pixel_freq = 2700000000UL / 2;
+ break;
+
+ case 5400:
+ vco_div = 0x2;
+ pixel_freq = 5400000000UL / 4;
+ break;
+
+ case 8100:
+ vco_div = 0x0;
+ pixel_freq = 8100000000UL / 6;
+ break;
+
+ default:
+ /* Other link rates aren't supported */
+ return -EINVAL;
+ }
+
+ writel(vco_div, edp->edp + DP_PHY_VCO_DIV);
+
+ clk_set_rate(edp->dp_link_hw.clk, dp_opts->link_rate * 100000);
+ clk_set_rate(edp->dp_pixel_hw.clk, pixel_freq);
+
+ return 0;
+}
+
+static int qcom_edp_phy_power_on(struct phy *phy)
+{
+ const struct qcom_edp *edp = phy_get_drvdata(phy);
+ int timeout;
+ int ret;
+ u32 val;
+
+ writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
+ DP_PHY_PD_CTL_LANE_0_1_PWRDN | DP_PHY_PD_CTL_LANE_2_3_PWRDN |
+ DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
+ edp->edp + DP_PHY_PD_CTL);
+ writel(0xfc, edp->edp + DP_PHY_MODE);
+
+ timeout = readl_poll_timeout(edp->pll + QSERDES_V4_COM_CMN_STATUS,
+ val, val & BIT(7), 5, 200);
+ if (timeout)
+ return timeout;
+
+ writel(0x01, edp->tx0 + TXn_LDO_CONFIG);
+ writel(0x01, edp->tx1 + TXn_LDO_CONFIG);
+ writel(0x00, edp->tx0 + TXn_LANE_MODE_1);
+ writel(0x00, edp->tx1 + TXn_LANE_MODE_1);
+
+ ret = qcom_edp_configure_ssc(edp);
+ if (ret)
+ return ret;
+
+ ret = qcom_edp_configure_pll(edp);
+ if (ret)
+ return ret;
+
+ /* TX Lane configuration */
+ writel(0x05, edp->edp + DP_PHY_TX0_TX1_LANE_CTL);
+ writel(0x05, edp->edp + DP_PHY_TX2_TX3_LANE_CTL);
+
+ /* TX-0 register configuration */
+ writel(0x03, edp->tx0 + TXn_TRANSCEIVER_BIAS_EN);
+ writel(0x0f, edp->tx0 + TXn_CLKBUF_ENABLE);
+ writel(0x03, edp->tx0 + TXn_RESET_TSYNC_EN);
+ writel(0x01, edp->tx0 + TXn_TRAN_DRVR_EMP_EN);
+ writel(0x04, edp->tx0 + TXn_TX_BAND);
+
+ /* TX-1 register configuration */
+ writel(0x03, edp->tx1 + TXn_TRANSCEIVER_BIAS_EN);
+ writel(0x0f, edp->tx1 + TXn_CLKBUF_ENABLE);
+ writel(0x03, edp->tx1 + TXn_RESET_TSYNC_EN);
+ writel(0x01, edp->tx1 + TXn_TRAN_DRVR_EMP_EN);
+ writel(0x04, edp->tx1 + TXn_TX_BAND);
+
+ ret = qcom_edp_set_vco_div(edp);
+ if (ret)
+ return ret;
+
+ writel(0x01, edp->edp + DP_PHY_CFG);
+ writel(0x05, edp->edp + DP_PHY_CFG);
+ writel(0x01, edp->edp + DP_PHY_CFG);
+ writel(0x09, edp->edp + DP_PHY_CFG);
+
+ writel(0x20, edp->pll + QSERDES_V4_COM_RESETSM_CNTRL);
+
+ timeout = readl_poll_timeout(edp->pll + QSERDES_V4_COM_C_READY_STATUS,
+ val, val & BIT(0), 500, 10000);
+ if (timeout)
+ return timeout;
+
+ writel(0x19, edp->edp + DP_PHY_CFG);
+ writel(0x1f, edp->tx0 + TXn_HIGHZ_DRVR_EN);
+ writel(0x04, edp->tx0 + TXn_HIGHZ_DRVR_EN);
+ writel(0x00, edp->tx0 + TXn_TX_POL_INV);
+ writel(0x1f, edp->tx1 + TXn_HIGHZ_DRVR_EN);
+ writel(0x04, edp->tx1 + TXn_HIGHZ_DRVR_EN);
+ writel(0x00, edp->tx1 + TXn_TX_POL_INV);
+ writel(0x10, edp->tx0 + TXn_TX_DRV_LVL_OFFSET);
+ writel(0x10, edp->tx1 + TXn_TX_DRV_LVL_OFFSET);
+ writel(0x11, edp->tx0 + TXn_RES_CODE_LANE_OFFSET_TX0);
+ writel(0x11, edp->tx0 + TXn_RES_CODE_LANE_OFFSET_TX1);
+ writel(0x11, edp->tx1 + TXn_RES_CODE_LANE_OFFSET_TX0);
+ writel(0x11, edp->tx1 + TXn_RES_CODE_LANE_OFFSET_TX1);
+
+ writel(0x10, edp->tx0 + TXn_TX_EMP_POST1_LVL);
+ writel(0x10, edp->tx1 + TXn_TX_EMP_POST1_LVL);
+ writel(0x1f, edp->tx0 + TXn_TX_DRV_LVL);
+ writel(0x1f, edp->tx1 + TXn_TX_DRV_LVL);
+
+ writel(0x4, edp->tx0 + TXn_HIGHZ_DRVR_EN);
+ writel(0x3, edp->tx0 + TXn_TRANSCEIVER_BIAS_EN);
+ writel(0x4, edp->tx1 + TXn_HIGHZ_DRVR_EN);
+ writel(0x0, edp->tx1 + TXn_TRANSCEIVER_BIAS_EN);
+ writel(0x3, edp->edp + DP_PHY_CFG_1);
+
+ writel(0x18, edp->edp + DP_PHY_CFG);
+ usleep_range(100, 1000);
+
+ writel(0x19, edp->edp + DP_PHY_CFG);
+
+ return readl_poll_timeout(edp->edp + DP_PHY_STATUS,
+ val, val & BIT(1), 500, 10000);
+}
+
+static int qcom_edp_phy_power_off(struct phy *phy)
+{
+ const struct qcom_edp *edp = phy_get_drvdata(phy);
+
+ writel(DP_PHY_PD_CTL_PSR_PWRDN, edp->edp + DP_PHY_PD_CTL);
+
+ return 0;
+}
+
+static int qcom_edp_phy_exit(struct phy *phy)
+{
+ struct qcom_edp *edp = phy_get_drvdata(phy);
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(edp->clks), edp->clks);
+ regulator_bulk_disable(ARRAY_SIZE(edp->supplies), edp->supplies);
+
+ return 0;
+}
+
+static const struct phy_ops qcom_edp_ops = {
+ .init = qcom_edp_phy_init,
+ .configure = qcom_edp_phy_configure,
+ .power_on = qcom_edp_phy_power_on,
+ .power_off = qcom_edp_phy_power_off,
+ .exit = qcom_edp_phy_exit,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * Embedded Display Port PLL driver block diagram for branch clocks
+ *
+ * +------------------------------+
+ * | EDP_VCO_CLK |
+ * | |
+ * | +-------------------+ |
+ * | | (EDP PLL/VCO) | |
+ * | +---------+---------+ |
+ * | v |
+ * | +----------+-----------+ |
+ * | | hsclk_divsel_clk_src | |
+ * | +----------+-----------+ |
+ * +------------------------------+
+ * |
+ * +---------<---------v------------>----------+
+ * | |
+ * +--------v----------------+ |
+ * | edp_phy_pll_link_clk | |
+ * | link_clk | |
+ * +--------+----------------+ |
+ * | |
+ * | |
+ * v v
+ * Input to DISPCC block |
+ * for link clk, crypto clk |
+ * and interface clock |
+ * |
+ * |
+ * +--------<------------+-----------------+---<---+
+ * | | |
+ * +----v---------+ +--------v-----+ +--------v------+
+ * | vco_divided | | vco_divided | | vco_divided |
+ * | _clk_src | | _clk_src | | _clk_src |
+ * | | | | | |
+ * |divsel_six | | divsel_two | | divsel_four |
+ * +-------+------+ +-----+--------+ +--------+------+
+ * | | |
+ * v---->----------v-------------<------v
+ * |
+ * +----------+-----------------+
+ * | edp_phy_pll_vco_div_clk |
+ * +---------+------------------+
+ * |
+ * v
+ * Input to DISPCC block
+ * for EDP pixel clock
+ *
+ */
+static int qcom_edp_dp_pixel_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ switch (req->rate) {
+ case 1620000000UL / 2:
+ case 2700000000UL / 2:
+ /* 5.4 and 8.1 GHz are same link rate as 2.7GHz, i.e. div 4 and div 6 */
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static unsigned long
+qcom_edp_dp_pixel_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ const struct qcom_edp *edp = container_of(hw, struct qcom_edp, dp_pixel_hw);
+ const struct phy_configure_opts_dp *dp_opts = &edp->dp_opts;
+
+ switch (dp_opts->link_rate) {
+ case 1620:
+ return 1620000000UL / 2;
+ case 2700:
+ return 2700000000UL / 2;
+ case 5400:
+ return 5400000000UL / 4;
+ case 8100:
+ return 8100000000UL / 6;
+ default:
+ return 0;
+ }
+}
+
+static const struct clk_ops qcom_edp_dp_pixel_clk_ops = {
+ .determine_rate = qcom_edp_dp_pixel_clk_determine_rate,
+ .recalc_rate = qcom_edp_dp_pixel_clk_recalc_rate,
+};
+
+static int qcom_edp_dp_link_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ switch (req->rate) {
+ case 162000000:
+ case 270000000:
+ case 540000000:
+ case 810000000:
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static unsigned long
+qcom_edp_dp_link_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ const struct qcom_edp *edp = container_of(hw, struct qcom_edp, dp_link_hw);
+ const struct phy_configure_opts_dp *dp_opts = &edp->dp_opts;
+
+ switch (dp_opts->link_rate) {
+ case 1620:
+ case 2700:
+ case 5400:
+ case 8100:
+ return dp_opts->link_rate * 100000;
+
+ default:
+ return 0;
+ }
+}
+
+static const struct clk_ops qcom_edp_dp_link_clk_ops = {
+ .determine_rate = qcom_edp_dp_link_clk_determine_rate,
+ .recalc_rate = qcom_edp_dp_link_clk_recalc_rate,
+};
+
+static int qcom_edp_clks_register(struct qcom_edp *edp, struct device_node *np)
+{
+ struct clk_hw_onecell_data *data;
+ struct clk_init_data init = { };
+ int ret;
+
+ data = devm_kzalloc(edp->dev, struct_size(data, hws, 2), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ init.ops = &qcom_edp_dp_link_clk_ops;
+ init.name = "edp_phy_pll_link_clk";
+ edp->dp_link_hw.init = &init;
+ ret = devm_clk_hw_register(edp->dev, &edp->dp_link_hw);
+ if (ret)
+ return ret;
+
+ init.ops = &qcom_edp_dp_pixel_clk_ops;
+ init.name = "edp_phy_pll_vco_div_clk";
+ edp->dp_pixel_hw.init = &init;
+ ret = devm_clk_hw_register(edp->dev, &edp->dp_pixel_hw);
+ if (ret)
+ return ret;
+
+ data->hws[0] = &edp->dp_link_hw;
+ data->hws[1] = &edp->dp_pixel_hw;
+ data->num = 2;
+
+ return devm_of_clk_add_hw_provider(edp->dev, of_clk_hw_onecell_get, data);
+}
+
+static int qcom_edp_phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ struct qcom_edp *edp;
+ int ret;
+
+ edp = devm_kzalloc(dev, sizeof(*edp), GFP_KERNEL);
+ if (!edp)
+ return -ENOMEM;
+
+ edp->dev = dev;
+
+ edp->edp = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(edp->edp))
+ return PTR_ERR(edp->edp);
+
+ edp->tx0 = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(edp->tx0))
+ return PTR_ERR(edp->tx0);
+
+ edp->tx1 = devm_platform_ioremap_resource(pdev, 2);
+ if (IS_ERR(edp->tx1))
+ return PTR_ERR(edp->tx1);
+
+ edp->pll = devm_platform_ioremap_resource(pdev, 3);
+ if (IS_ERR(edp->pll))
+ return PTR_ERR(edp->pll);
+
+ edp->clks[0].id = "aux";
+ edp->clks[1].id = "cfg_ahb";
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(edp->clks), edp->clks);
+ if (ret)
+ return ret;
+
+ edp->supplies[0].supply = "vdda-phy";
+ edp->supplies[1].supply = "vdda-pll";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(edp->supplies), edp->supplies);
+ if (ret)
+ return ret;
+
+ ret = qcom_edp_clks_register(edp, pdev->dev.of_node);
+ if (ret)
+ return ret;
+
+ edp->phy = devm_phy_create(dev, pdev->dev.of_node, &qcom_edp_ops);
+ if (IS_ERR(edp->phy)) {
+ dev_err(dev, "failed to register phy\n");
+ return PTR_ERR(edp->phy);
+ }
+
+ phy_set_drvdata(edp->phy, edp);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id qcom_edp_phy_match_table[] = {
+ { .compatible = "qcom,sc8180x-edp-phy" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_edp_phy_match_table);
+
+static struct platform_driver qcom_edp_phy_driver = {
+ .probe = qcom_edp_phy_probe,
+ .driver = {
+ .name = "qcom-edp-phy",
+ .of_match_table = qcom_edp_phy_match_table,
+ },
+};
+
+module_platform_driver(qcom_edp_phy_driver);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm eDP QMP PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index c96639d5f581..8ea87c69f463 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -2866,6 +2866,215 @@ static const struct qmp_phy_init_tbl qcm2290_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x88),
};
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE2_MODE1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE1, 0xb4),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0x68),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xa2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_BUF_ENABLE, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x4c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0x75),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x04),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xd8),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH3, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH4, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0xf0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_TX_ADAPT_POST_THRESH, 0xf0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0x77),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RATE_SLEW_CNTRL1, 0x0b),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x05),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG2, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x97),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x46),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_CFG, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0xd0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MISC1, 0x88),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORE_CLK_EN, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MODE, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_DC_LEVEL_CTRL, 0x0f),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_LANE_MODE_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_LANE_MODE_2, 0xf6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_TX, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_RX, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_PI_CONTROLS, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B1, 0xcc),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B3, 0xcc),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6, 0x29),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B0, 0xc5),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B1, 0xad),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B2, 0xb6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B3, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B4, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B5, 0xfb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B6, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B0, 0xc7),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B1, 0xef),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B2, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B3, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B4, 0x81),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B5, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B6, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_PHPRE_CTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_0_1, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_2_3, 0x37),
+
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_3, 0x05),
+
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE3, 0x1f),
+
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH4_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH5_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH6_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE210, 0x1f),
+
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE3, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_MAN_VAL, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_IDAC_SAOFFSET, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_DAC_ENABLE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_GM_CAL, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH2, 0x1f),
+};
+
+/* Register names should be validated, they might be different for this PHY */
+static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG2, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG3, 0x22),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_G3S2_PRE_GAIN, 0x2e),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0x99),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_EQ_CONFIG1, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3, 0x28),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN, 0x2e),
+};
+
struct qmp_phy;
/* struct qmp_phy_cfg - per-PHY initialization config */
@@ -3094,6 +3303,10 @@ static const char * const qmp_v4_sm8250_usbphy_clk_l[] = {
"aux", "ref_clk_src", "com_aux"
};
+static const char * const sm8450_ufs_phy_clk_l[] = {
+ "qref", "ref", "ref_aux",
+};
+
static const char * const sdm845_ufs_phy_clk_l[] = {
"ref", "ref_aux",
};
@@ -4090,6 +4303,94 @@ static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
};
+static const struct qmp_phy_cfg sm8450_ufsphy_cfg = {
+ .type = PHY_TYPE_UFS,
+ .nlanes = 2,
+
+ .serdes_tbl = sm8350_ufsphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8350_ufsphy_serdes_tbl),
+ .tx_tbl = sm8350_ufsphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_tx_tbl),
+ .rx_tbl = sm8350_ufsphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_rx_tbl),
+ .pcs_tbl = sm8350_ufsphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8350_ufsphy_pcs_tbl),
+ .clk_list = sm8450_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sm8450_ufs_phy_clk_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8150_ufsphy_regs_layout,
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .is_dual_lane_phy = true,
+};
+
+static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8450_qmp_gen3x1_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_serdes_tbl),
+ .tx_tbl = sm8450_qmp_gen3x1_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_tx_tbl),
+ .rx_tbl = sm8450_qmp_gen3x1_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_rx_tbl),
+ .pcs_tbl = sm8450_qmp_gen3x1_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_tbl),
+ .pcs_misc_tbl = sm8450_qmp_gen3x1_pcie_pcs_misc_tbl,
+ .pcs_misc_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_misc_tbl),
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8250_pcie_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+};
+
+static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 2,
+
+ .serdes_tbl = sm8450_qmp_gen4x2_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_serdes_tbl),
+ .tx_tbl = sm8450_qmp_gen4x2_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_tx_tbl),
+ .rx_tbl = sm8450_qmp_gen4x2_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_rx_tbl),
+ .pcs_tbl = sm8450_qmp_gen4x2_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_tbl),
+ .pcs_misc_tbl = sm8450_qmp_gen4x2_pcie_pcs_misc_tbl,
+ .pcs_misc_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_misc_tbl),
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8250_pcie_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS_4_20,
+
+ .is_dual_lane_phy = true,
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+};
+
static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = {
.type = PHY_TYPE_USB3,
.nlanes = 1,
@@ -5749,6 +6050,18 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
.compatible = "qcom,sm8350-qmp-usb3-uni-phy",
.data = &sm8350_usb3_uniphy_cfg,
}, {
+ .compatible = "qcom,sm8450-qmp-gen3x1-pcie-phy",
+ .data = &sm8450_qmp_gen3x1_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sm8450-qmp-gen4x2-pcie-phy",
+ .data = &sm8450_qmp_gen4x2_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sm8450-qmp-ufs-phy",
+ .data = &sm8450_ufsphy_cfg,
+ }, {
+ .compatible = "qcom,sm8450-qmp-usb3-phy",
+ .data = &sm8350_usb3phy_cfg,
+ }, {
.compatible = "qcom,qcm2290-qmp-usb3-phy",
.data = &qcm2290_usb3phy_cfg,
},
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index e15f461065bb..06b2556ed93a 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -551,6 +551,7 @@
/* Only for QMP V4 PHY - QSERDES COM registers */
#define QSERDES_V4_COM_BG_TIMER 0x00c
#define QSERDES_V4_COM_SSC_EN_CENTER 0x010
+#define QSERDES_V4_COM_SSC_ADJ_PER1 0x014
#define QSERDES_V4_COM_SSC_PER1 0x01c
#define QSERDES_V4_COM_SSC_PER2 0x020
#define QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0 0x024
@@ -1069,6 +1070,16 @@
#define QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2 0x828
/* Only for QMP V5 PHY - QSERDES COM registers */
+#define QSERDES_V5_COM_SSC_EN_CENTER 0x010
+#define QSERDES_V5_COM_SSC_PER1 0x01c
+#define QSERDES_V5_COM_SSC_PER2 0x020
+#define QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0 0x024
+#define QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0 0x028
+#define QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1 0x030
+#define QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1 0x034
+#define QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN 0x044
+#define QSERDES_V5_COM_CLK_ENABLE1 0x048
+#define QSERDES_V5_COM_SYSCLK_BUF_ENABLE 0x050
#define QSERDES_V5_COM_PLL_IVCO 0x058
#define QSERDES_V5_COM_CP_CTRL_MODE0 0x074
#define QSERDES_V5_COM_CP_CTRL_MODE1 0x078
@@ -1078,16 +1089,35 @@
#define QSERDES_V5_COM_PLL_CCTRL_MODE1 0x088
#define QSERDES_V5_COM_SYSCLK_EN_SEL 0x094
#define QSERDES_V5_COM_LOCK_CMP_EN 0x0a4
+#define QSERDES_V5_COM_LOCK_CMP_CFG 0x0a8
#define QSERDES_V5_COM_LOCK_CMP1_MODE0 0x0ac
#define QSERDES_V5_COM_LOCK_CMP2_MODE0 0x0b0
#define QSERDES_V5_COM_LOCK_CMP1_MODE1 0x0b4
#define QSERDES_V5_COM_DEC_START_MODE0 0x0bc
#define QSERDES_V5_COM_LOCK_CMP2_MODE1 0x0b8
#define QSERDES_V5_COM_DEC_START_MODE1 0x0c4
+#define QSERDES_V5_COM_DIV_FRAC_START1_MODE0 0x0cc
+#define QSERDES_V5_COM_DIV_FRAC_START2_MODE0 0x0d0
+#define QSERDES_V5_COM_DIV_FRAC_START3_MODE0 0x0d4
+#define QSERDES_V5_COM_DIV_FRAC_START1_MODE1 0x0d8
+#define QSERDES_V5_COM_DIV_FRAC_START2_MODE1 0x0dc
+#define QSERDES_V5_COM_DIV_FRAC_START3_MODE1 0x0e0
#define QSERDES_V5_COM_VCO_TUNE_MAP 0x10c
+#define QSERDES_V5_COM_VCO_TUNE1_MODE0 0x110
+#define QSERDES_V5_COM_VCO_TUNE2_MODE0 0x114
+#define QSERDES_V5_COM_VCO_TUNE1_MODE1 0x118
+#define QSERDES_V5_COM_VCO_TUNE2_MODE1 0x11c
#define QSERDES_V5_COM_VCO_TUNE_INITVAL2 0x124
+#define QSERDES_V5_COM_CLK_SELECT 0x154
#define QSERDES_V5_COM_HSCLK_SEL 0x158
#define QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL 0x15c
+#define QSERDES_V5_COM_CORECLK_DIV_MODE0 0x168
+#define QSERDES_V5_COM_CORECLK_DIV_MODE1 0x16c
+#define QSERDES_V5_COM_CORE_CLK_EN 0x174
+#define QSERDES_V5_COM_CMN_CONFIG 0x17c
+#define QSERDES_V5_COM_CMN_MISC1 0x19c
+#define QSERDES_V5_COM_CMN_MODE 0x1a4
+#define QSERDES_V5_COM_VCO_DC_LEVEL_CTRL 0x1a8
#define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x1ac
#define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x1b0
#define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x1b4
@@ -1112,6 +1142,12 @@
#define QSERDES_V5_TX_PWM_GEAR_3_DIVIDER_BAND0_1 0x180
#define QSERDES_V5_TX_PWM_GEAR_4_DIVIDER_BAND0_1 0x184
+/* Only for QMP V5_20 PHY - TX registers */
+#define QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_TX 0x30
+#define QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_RX 0x34
+#define QSERDES_V5_20_TX_LANE_MODE_1 0x78
+#define QSERDES_V5_20_TX_LANE_MODE_2 0x7c
+
/* Only for QMP V5 PHY - RX registers */
#define QSERDES_V5_RX_UCDR_FO_GAIN 0x008
#define QSERDES_V5_RX_UCDR_SO_GAIN 0x014
@@ -1130,6 +1166,7 @@
#define QSERDES_V5_RX_AC_JTAG_ENABLE 0x068
#define QSERDES_V5_RX_AC_JTAG_MODE 0x078
#define QSERDES_V5_RX_RX_TERM_BW 0x080
+#define QSERDES_V5_RX_TX_ADAPT_POST_THRESH 0x0cc
#define QSERDES_V5_RX_VGA_CAL_CNTRL1 0x0d4
#define QSERDES_V5_RX_VGA_CAL_CNTRL2 0x0d8
#define QSERDES_V5_RX_GM_CAL 0x0dc
@@ -1167,6 +1204,73 @@
#define QSERDES_V5_RX_DCC_CTRL1 0x1a8
#define QSERDES_V5_RX_VTH_CODE 0x1b0
+/* Only for QMP V5_20 PHY - RX registers */
+#define QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE2 0x008
+#define QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE3 0x00c
+#define QSERDES_V5_20_RX_UCDR_PI_CONTROLS 0x020
+#define QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_0_1 0x02c
+#define QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_2_3 0x030
+#define QSERDES_V5_20_RX_RX_IDAC_SAOFFSET 0x07c
+#define QSERDES_V5_20_RX_DFE_3 0x090
+#define QSERDES_V5_20_RX_DFE_DAC_ENABLE1 0x0b4
+#define QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH1 0x0c4
+#define QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH2 0x0c8
+#define QSERDES_V5_20_RX_VGA_CAL_MAN_VAL 0x0dc
+#define QSERDES_V5_20_RX_GM_CAL 0x0ec
+#define QSERDES_V5_20_RX_RX_EQU_ADAPTOR_CNTRL4 0x108
+#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B1 0x164
+#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2 0x168
+#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B3 0x16c
+#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5 0x174
+#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6 0x178
+#define QSERDES_V5_20_RX_RX_MODE_RATE2_B0 0x17c
+#define QSERDES_V5_20_RX_RX_MODE_RATE2_B1 0x180
+#define QSERDES_V5_20_RX_RX_MODE_RATE2_B2 0x184
+#define QSERDES_V5_20_RX_RX_MODE_RATE2_B3 0x188
+#define QSERDES_V5_20_RX_RX_MODE_RATE2_B4 0x18c
+#define QSERDES_V5_20_RX_RX_MODE_RATE2_B5 0x190
+#define QSERDES_V5_20_RX_RX_MODE_RATE2_B6 0x194
+#define QSERDES_V5_20_RX_RX_MODE_RATE3_B0 0x198
+#define QSERDES_V5_20_RX_RX_MODE_RATE3_B1 0x19c
+#define QSERDES_V5_20_RX_RX_MODE_RATE3_B2 0x1a0
+#define QSERDES_V5_20_RX_RX_MODE_RATE3_B3 0x1a4
+#define QSERDES_V5_20_RX_RX_MODE_RATE3_B4 0x1a8
+#define QSERDES_V5_20_RX_RX_MODE_RATE3_B5 0x1ac
+#define QSERDES_V5_20_RX_RX_MODE_RATE3_B6 0x1b0
+#define QSERDES_V5_20_RX_PHPRE_CTRL 0x1b4
+#define QSERDES_V5_20_RX_DFE_CTLE_POST_CAL_OFFSET 0x1c0
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE210 0x1f4
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE3 0x1f8
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE210 0x1fc
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE3 0x200
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE210 0x204
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE3 0x208
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH4_RATE3 0x210
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH5_RATE3 0x218
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH6_RATE3 0x220
+
+/* Only for QMP V5 PHY - USB/PCIe PCS registers */
+#define QPHY_V5_PCS_REFGEN_REQ_CONFIG1 0x0dc
+#define QPHY_V5_PCS_G3S2_PRE_GAIN 0x170
+#define QPHY_V5_PCS_RX_SIGDET_LVL 0x188
+#define QPHY_V5_PCS_RATE_SLEW_CNTRL1 0x198
+#define QPHY_V5_PCS_EQ_CONFIG2 0x1e0
+#define QPHY_V5_PCS_EQ_CONFIG3 0x1e4
+
+/* Only for QMP V5 PHY - PCS_PCIE registers */
+#define QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x20
+#define QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1 0x54
+#define QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS 0x94
+#define QPHY_V5_PCS_PCIE_EQ_CONFIG2 0xa8
+
+/* Only for QMP V5_20 PHY - PCIe PCS registers */
+#define QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x01c
+#define QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS 0x090
+#define QPHY_V5_20_PCS_PCIE_EQ_CONFIG1 0x0a0
+#define QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5 0x108
+#define QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN 0x15c
+#define QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3 0x184
+
/* Only for QMP V5 PHY - UFS PCS registers */
#define QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_MSB 0x00c
#define QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_LSB 0x010
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index 1938365abbb3..eca77e44a4c1 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -204,6 +204,7 @@ struct rockchip_usb2phy_port {
* @dcd_retries: The retry count used to track Data contact
* detection process.
* @edev: extcon device for notification registration
+ * @irq: muxed interrupt for single irq configuration
* @phy_cfg: phy register configuration, assigned by driver data.
* @ports: phy port instance.
*/
@@ -218,6 +219,7 @@ struct rockchip_usb2phy {
enum power_supply_type chg_type;
u8 dcd_retries;
struct extcon_dev *edev;
+ int irq;
const struct rockchip_usb2phy_cfg *phy_cfg;
struct rockchip_usb2phy_port ports[USB2PHY_NUM_PORTS];
};
@@ -750,7 +752,6 @@ static void rockchip_chg_detect_work(struct work_struct *work)
fallthrough;
case USB_CHG_STATE_SECONDARY_DONE:
rphy->chg_state = USB_CHG_STATE_DETECTED;
- delay = 0;
fallthrough;
case USB_CHG_STATE_DETECTED:
/* put the controller in normal mode */
@@ -927,6 +928,102 @@ static irqreturn_t rockchip_usb2phy_otg_mux_irq(int irq, void *data)
return IRQ_NONE;
}
+static irqreturn_t rockchip_usb2phy_irq(int irq, void *data)
+{
+ struct rockchip_usb2phy *rphy = data;
+ struct rockchip_usb2phy_port *rport;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned int index;
+
+ for (index = 0; index < rphy->phy_cfg->num_ports; index++) {
+ rport = &rphy->ports[index];
+ if (!rport->phy)
+ continue;
+
+ /* Handle linestate irq for both otg port and host port */
+ ret = rockchip_usb2phy_linestate_irq(irq, rport);
+ }
+
+ return ret;
+}
+
+static int rockchip_usb2phy_port_irq_init(struct rockchip_usb2phy *rphy,
+ struct rockchip_usb2phy_port *rport,
+ struct device_node *child_np)
+{
+ int ret;
+
+ /*
+ * If the usb2 phy used combined irq for otg and host port,
+ * don't need to init otg and host port irq separately.
+ */
+ if (rphy->irq > 0)
+ return 0;
+
+ switch (rport->port_id) {
+ case USB2PHY_PORT_HOST:
+ rport->ls_irq = of_irq_get_byname(child_np, "linestate");
+ if (rport->ls_irq < 0) {
+ dev_err(rphy->dev, "no linestate irq provided\n");
+ return rport->ls_irq;
+ }
+
+ ret = devm_request_threaded_irq(rphy->dev, rport->ls_irq, NULL,
+ rockchip_usb2phy_linestate_irq,
+ IRQF_ONESHOT,
+ "rockchip_usb2phy", rport);
+ if (ret) {
+ dev_err(rphy->dev, "failed to request linestate irq handle\n");
+ return ret;
+ }
+ break;
+ case USB2PHY_PORT_OTG:
+ /*
+ * Some SoCs use one interrupt with otg-id/otg-bvalid/linestate
+ * interrupts muxed together, so probe the otg-mux interrupt first,
+ * if not found, then look for the regular interrupts one by one.
+ */
+ rport->otg_mux_irq = of_irq_get_byname(child_np, "otg-mux");
+ if (rport->otg_mux_irq > 0) {
+ ret = devm_request_threaded_irq(rphy->dev, rport->otg_mux_irq,
+ NULL,
+ rockchip_usb2phy_otg_mux_irq,
+ IRQF_ONESHOT,
+ "rockchip_usb2phy_otg",
+ rport);
+ if (ret) {
+ dev_err(rphy->dev,
+ "failed to request otg-mux irq handle\n");
+ return ret;
+ }
+ } else {
+ rport->bvalid_irq = of_irq_get_byname(child_np, "otg-bvalid");
+ if (rport->bvalid_irq < 0) {
+ dev_err(rphy->dev, "no vbus valid irq provided\n");
+ ret = rport->bvalid_irq;
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(rphy->dev, rport->bvalid_irq,
+ NULL,
+ rockchip_usb2phy_bvalid_irq,
+ IRQF_ONESHOT,
+ "rockchip_usb2phy_bvalid",
+ rport);
+ if (ret) {
+ dev_err(rphy->dev,
+ "failed to request otg-bvalid irq handle\n");
+ return ret;
+ }
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int rockchip_usb2phy_host_port_init(struct rockchip_usb2phy *rphy,
struct rockchip_usb2phy_port *rport,
struct device_node *child_np)
@@ -940,18 +1037,9 @@ static int rockchip_usb2phy_host_port_init(struct rockchip_usb2phy *rphy,
mutex_init(&rport->mutex);
INIT_DELAYED_WORK(&rport->sm_work, rockchip_usb2phy_sm_work);
- rport->ls_irq = of_irq_get_byname(child_np, "linestate");
- if (rport->ls_irq < 0) {
- dev_err(rphy->dev, "no linestate irq provided\n");
- return rport->ls_irq;
- }
-
- ret = devm_request_threaded_irq(rphy->dev, rport->ls_irq, NULL,
- rockchip_usb2phy_linestate_irq,
- IRQF_ONESHOT,
- "rockchip_usb2phy", rport);
+ ret = rockchip_usb2phy_port_irq_init(rphy, rport, child_np);
if (ret) {
- dev_err(rphy->dev, "failed to request linestate irq handle\n");
+ dev_err(rphy->dev, "failed to setup host irq\n");
return ret;
}
@@ -1000,43 +1088,10 @@ static int rockchip_usb2phy_otg_port_init(struct rockchip_usb2phy *rphy,
INIT_DELAYED_WORK(&rport->chg_work, rockchip_chg_detect_work);
INIT_DELAYED_WORK(&rport->otg_sm_work, rockchip_usb2phy_otg_sm_work);
- /*
- * Some SoCs use one interrupt with otg-id/otg-bvalid/linestate
- * interrupts muxed together, so probe the otg-mux interrupt first,
- * if not found, then look for the regular interrupts one by one.
- */
- rport->otg_mux_irq = of_irq_get_byname(child_np, "otg-mux");
- if (rport->otg_mux_irq > 0) {
- ret = devm_request_threaded_irq(rphy->dev, rport->otg_mux_irq,
- NULL,
- rockchip_usb2phy_otg_mux_irq,
- IRQF_ONESHOT,
- "rockchip_usb2phy_otg",
- rport);
- if (ret) {
- dev_err(rphy->dev,
- "failed to request otg-mux irq handle\n");
- goto out;
- }
- } else {
- rport->bvalid_irq = of_irq_get_byname(child_np, "otg-bvalid");
- if (rport->bvalid_irq < 0) {
- dev_err(rphy->dev, "no vbus valid irq provided\n");
- ret = rport->bvalid_irq;
- goto out;
- }
-
- ret = devm_request_threaded_irq(rphy->dev, rport->bvalid_irq,
- NULL,
- rockchip_usb2phy_bvalid_irq,
- IRQF_ONESHOT,
- "rockchip_usb2phy_bvalid",
- rport);
- if (ret) {
- dev_err(rphy->dev,
- "failed to request otg-bvalid irq handle\n");
- goto out;
- }
+ ret = rockchip_usb2phy_port_irq_init(rphy, rport, child_np);
+ if (ret) {
+ dev_err(rphy->dev, "failed to init irq for host port\n");
+ goto out;
}
if (!IS_ERR(rphy->edev)) {
@@ -1074,12 +1129,19 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (!dev->parent || !dev->parent->of_node)
- return -EINVAL;
+ if (!dev->parent || !dev->parent->of_node) {
+ rphy->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,usbgrf");
+ if (IS_ERR(rphy->grf)) {
+ dev_err(dev, "failed to locate usbgrf\n");
+ return PTR_ERR(rphy->grf);
+ }
+ }
- rphy->grf = syscon_node_to_regmap(dev->parent->of_node);
- if (IS_ERR(rphy->grf))
- return PTR_ERR(rphy->grf);
+ else {
+ rphy->grf = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(rphy->grf))
+ return PTR_ERR(rphy->grf);
+ }
if (of_device_is_compatible(np, "rockchip,rv1108-usb2phy")) {
rphy->usbgrf =
@@ -1091,16 +1153,26 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
rphy->usbgrf = NULL;
}
- if (of_property_read_u32(np, "reg", &reg)) {
+ if (of_property_read_u32_index(np, "reg", 0, &reg)) {
dev_err(dev, "the reg property is not assigned in %pOFn node\n",
np);
return -EINVAL;
}
+ /* support address_cells=2 */
+ if (reg == 0) {
+ if (of_property_read_u32_index(np, "reg", 1, &reg)) {
+ dev_err(dev, "the reg property is not assigned in %pOFn node\n",
+ np);
+ return -EINVAL;
+ }
+ }
+
rphy->dev = dev;
phy_cfgs = match->data;
rphy->chg_state = USB_CHG_STATE_UNDEFINED;
rphy->chg_type = POWER_SUPPLY_TYPE_UNKNOWN;
+ rphy->irq = platform_get_irq_optional(pdev, 0);
platform_set_drvdata(pdev, rphy);
ret = rockchip_usb2phy_extcon_register(rphy);
@@ -1180,6 +1252,20 @@ next_child:
}
provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ if (rphy->irq > 0) {
+ ret = devm_request_threaded_irq(rphy->dev, rphy->irq, NULL,
+ rockchip_usb2phy_irq,
+ IRQF_ONESHOT,
+ "rockchip_usb2phy",
+ rphy);
+ if (ret) {
+ dev_err(rphy->dev,
+ "failed to request usb2phy irq handle\n");
+ goto put_child;
+ }
+ }
+
return PTR_ERR_OR_ZERO(provider);
put_child:
@@ -1418,6 +1504,69 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
{ /* sentinel */ }
};
+static const struct rockchip_usb2phy_cfg rk3568_phy_cfgs[] = {
+ {
+ .reg = 0xfe8a0000,
+ .num_ports = 2,
+ .clkout_ctl = { 0x0008, 4, 4, 1, 0 },
+ .port_cfgs = {
+ [USB2PHY_PORT_OTG] = {
+ .phy_sus = { 0x0000, 8, 0, 0, 0x1d1 },
+ .bvalid_det_en = { 0x0080, 2, 2, 0, 1 },
+ .bvalid_det_st = { 0x0084, 2, 2, 0, 1 },
+ .bvalid_det_clr = { 0x0088, 2, 2, 0, 1 },
+ .utmi_avalid = { 0x00c0, 10, 10, 0, 1 },
+ .utmi_bvalid = { 0x00c0, 9, 9, 0, 1 },
+ },
+ [USB2PHY_PORT_HOST] = {
+ /* Select suspend control from controller */
+ .phy_sus = { 0x0004, 8, 0, 0x1d2, 0x1d2 },
+ .ls_det_en = { 0x0080, 1, 1, 0, 1 },
+ .ls_det_st = { 0x0084, 1, 1, 0, 1 },
+ .ls_det_clr = { 0x0088, 1, 1, 0, 1 },
+ .utmi_ls = { 0x00c0, 17, 16, 0, 1 },
+ .utmi_hstdet = { 0x00c0, 19, 19, 0, 1 }
+ }
+ },
+ .chg_det = {
+ .opmode = { 0x0000, 3, 0, 5, 1 },
+ .cp_det = { 0x00c0, 24, 24, 0, 1 },
+ .dcp_det = { 0x00c0, 23, 23, 0, 1 },
+ .dp_det = { 0x00c0, 25, 25, 0, 1 },
+ .idm_sink_en = { 0x0008, 8, 8, 0, 1 },
+ .idp_sink_en = { 0x0008, 7, 7, 0, 1 },
+ .idp_src_en = { 0x0008, 9, 9, 0, 1 },
+ .rdm_pdwn_en = { 0x0008, 10, 10, 0, 1 },
+ .vdm_src_en = { 0x0008, 12, 12, 0, 1 },
+ .vdp_src_en = { 0x0008, 11, 11, 0, 1 },
+ },
+ },
+ {
+ .reg = 0xfe8b0000,
+ .num_ports = 2,
+ .clkout_ctl = { 0x0008, 4, 4, 1, 0 },
+ .port_cfgs = {
+ [USB2PHY_PORT_OTG] = {
+ .phy_sus = { 0x0000, 8, 0, 0x1d2, 0x1d1 },
+ .ls_det_en = { 0x0080, 0, 0, 0, 1 },
+ .ls_det_st = { 0x0084, 0, 0, 0, 1 },
+ .ls_det_clr = { 0x0088, 0, 0, 0, 1 },
+ .utmi_ls = { 0x00c0, 5, 4, 0, 1 },
+ .utmi_hstdet = { 0x00c0, 7, 7, 0, 1 }
+ },
+ [USB2PHY_PORT_HOST] = {
+ .phy_sus = { 0x0004, 8, 0, 0x1d2, 0x1d1 },
+ .ls_det_en = { 0x0080, 1, 1, 0, 1 },
+ .ls_det_st = { 0x0084, 1, 1, 0, 1 },
+ .ls_det_clr = { 0x0088, 1, 1, 0, 1 },
+ .utmi_ls = { 0x00c0, 17, 16, 0, 1 },
+ .utmi_hstdet = { 0x00c0, 19, 19, 0, 1 }
+ }
+ },
+ },
+ { /* sentinel */ }
+};
+
static const struct rockchip_usb2phy_cfg rv1108_phy_cfgs[] = {
{
.reg = 0x100,
@@ -1467,6 +1616,7 @@ static const struct of_device_id rockchip_usb2phy_dt_match[] = {
{ .compatible = "rockchip,rk3328-usb2phy", .data = &rk3328_phy_cfgs },
{ .compatible = "rockchip,rk3366-usb2phy", .data = &rk3366_phy_cfgs },
{ .compatible = "rockchip,rk3399-usb2phy", .data = &rk3399_phy_cfgs },
+ { .compatible = "rockchip,rk3568-usb2phy", .data = &rk3568_phy_cfgs },
{ .compatible = "rockchip,rv1108-usb2phy", .data = &rv1108_phy_cfgs },
{}
};
diff --git a/drivers/phy/socionext/Kconfig b/drivers/phy/socionext/Kconfig
index a3970e0f89da..8ae644756352 100644
--- a/drivers/phy/socionext/Kconfig
+++ b/drivers/phy/socionext/Kconfig
@@ -43,4 +43,4 @@ config PHY_UNIPHIER_AHCI
select GENERIC_PHY
help
Enable this to support PHY implemented in AHCI controller
- on UniPhier SoCs. This driver supports PXs2 and PXs3 SoCs.
+ on UniPhier SoCs. This driver supports Pro4, PXs2 and PXs3 SoCs.
diff --git a/drivers/phy/socionext/phy-uniphier-ahci.c b/drivers/phy/socionext/phy-uniphier-ahci.c
index 7427c40bf4ae..28cf3efe0695 100644
--- a/drivers/phy/socionext/phy-uniphier-ahci.c
+++ b/drivers/phy/socionext/phy-uniphier-ahci.c
@@ -19,8 +19,9 @@
struct uniphier_ahciphy_priv {
struct device *dev;
void __iomem *base;
- struct clk *clk, *clk_parent;
- struct reset_control *rst, *rst_parent;
+ struct clk *clk, *clk_parent, *clk_parent_gio;
+ struct reset_control *rst, *rst_parent, *rst_parent_gio;
+ struct reset_control *rst_pm, *rst_tx, *rst_rx;
const struct uniphier_ahciphy_soc_data *data;
};
@@ -28,10 +29,30 @@ struct uniphier_ahciphy_soc_data {
int (*init)(struct uniphier_ahciphy_priv *priv);
int (*power_on)(struct uniphier_ahciphy_priv *priv);
int (*power_off)(struct uniphier_ahciphy_priv *priv);
+ bool is_legacy;
bool is_ready_high;
bool is_phy_clk;
};
+/* for Pro4 */
+#define CKCTRL0 0x0
+#define CKCTRL0_CK_OFF BIT(9)
+#define CKCTRL0_NCY_MASK GENMASK(8, 4)
+#define CKCTRL0_NCY5_MASK GENMASK(3, 2)
+#define CKCTRL0_PRESCALE_MASK GENMASK(1, 0)
+#define CKCTRL1 0x4
+#define CKCTRL1_LOS_LVL_MASK GENMASK(20, 16)
+#define CKCTRL1_TX_LVL_MASK GENMASK(12, 8)
+#define RXTXCTRL 0x8
+#define RXTXCTRL_RX_EQ_VALL_MASK GENMASK(31, 29)
+#define RXTXCTRL_RX_DPLL_MODE_MASK GENMASK(28, 26)
+#define RXTXCTRL_TX_ATTEN_MASK GENMASK(14, 12)
+#define RXTXCTRL_TX_BOOST_MASK GENMASK(11, 8)
+#define RXTXCTRL_TX_EDGERATE_MASK GENMASK(3, 2)
+#define RXTXCTRL_TX_CKO_EN BIT(0)
+#define RSTPWR 0x30
+#define RSTPWR_RX_EN_VAL BIT(18)
+
/* for PXs2/PXs3 */
#define CKCTRL 0x0
#define CKCTRL_P0_READY BIT(15)
@@ -50,6 +71,128 @@ struct uniphier_ahciphy_soc_data {
#define RXCTRL_LOS_BIAS_MASK GENMASK(10, 8)
#define RXCTRL_RX_EQ_MASK GENMASK(2, 0)
+static int uniphier_ahciphy_pro4_init(struct uniphier_ahciphy_priv *priv)
+{
+ u32 val;
+
+ /* set phy MPLL parameters */
+ val = readl(priv->base + CKCTRL0);
+ val &= ~CKCTRL0_NCY_MASK;
+ val |= FIELD_PREP(CKCTRL0_NCY_MASK, 0x6);
+ val &= ~CKCTRL0_NCY5_MASK;
+ val |= FIELD_PREP(CKCTRL0_NCY5_MASK, 0x2);
+ val &= ~CKCTRL0_PRESCALE_MASK;
+ val |= FIELD_PREP(CKCTRL0_PRESCALE_MASK, 0x1);
+ writel(val, priv->base + CKCTRL0);
+
+ /* setup phy control parameters */
+ val = readl(priv->base + CKCTRL1);
+ val &= ~CKCTRL1_LOS_LVL_MASK;
+ val |= FIELD_PREP(CKCTRL1_LOS_LVL_MASK, 0x10);
+ val &= ~CKCTRL1_TX_LVL_MASK;
+ val |= FIELD_PREP(CKCTRL1_TX_LVL_MASK, 0x06);
+ writel(val, priv->base + CKCTRL1);
+
+ val = readl(priv->base + RXTXCTRL);
+ val &= ~RXTXCTRL_RX_EQ_VALL_MASK;
+ val |= FIELD_PREP(RXTXCTRL_RX_EQ_VALL_MASK, 0x6);
+ val &= ~RXTXCTRL_RX_DPLL_MODE_MASK;
+ val |= FIELD_PREP(RXTXCTRL_RX_DPLL_MODE_MASK, 0x3);
+ val &= ~RXTXCTRL_TX_ATTEN_MASK;
+ val |= FIELD_PREP(RXTXCTRL_TX_ATTEN_MASK, 0x3);
+ val &= ~RXTXCTRL_TX_BOOST_MASK;
+ val |= FIELD_PREP(RXTXCTRL_TX_BOOST_MASK, 0x5);
+ val &= ~RXTXCTRL_TX_EDGERATE_MASK;
+ val |= FIELD_PREP(RXTXCTRL_TX_EDGERATE_MASK, 0x0);
+ writel(val, priv->base + RXTXCTRL);
+
+ return 0;
+}
+
+static int uniphier_ahciphy_pro4_power_on(struct uniphier_ahciphy_priv *priv)
+{
+ u32 val;
+ int ret;
+
+ /* enable reference clock for phy */
+ val = readl(priv->base + CKCTRL0);
+ val &= ~CKCTRL0_CK_OFF;
+ writel(val, priv->base + CKCTRL0);
+
+ /* enable TX clock */
+ val = readl(priv->base + RXTXCTRL);
+ val |= RXTXCTRL_TX_CKO_EN;
+ writel(val, priv->base + RXTXCTRL);
+
+ /* wait until RX is ready */
+ ret = readl_poll_timeout(priv->base + RSTPWR, val,
+ !(val & RSTPWR_RX_EN_VAL), 200, 2000);
+ if (ret) {
+ dev_err(priv->dev, "Failed to check whether Rx is ready\n");
+ goto out_disable_clock;
+ }
+
+ /* release all reset */
+ ret = reset_control_deassert(priv->rst_pm);
+ if (ret) {
+ dev_err(priv->dev, "Failed to release PM reset\n");
+ goto out_disable_clock;
+ }
+
+ ret = reset_control_deassert(priv->rst_tx);
+ if (ret) {
+ dev_err(priv->dev, "Failed to release Tx reset\n");
+ goto out_reset_pm_assert;
+ }
+
+ ret = reset_control_deassert(priv->rst_rx);
+ if (ret) {
+ dev_err(priv->dev, "Failed to release Rx reset\n");
+ goto out_reset_tx_assert;
+ }
+
+ return 0;
+
+out_reset_tx_assert:
+ reset_control_assert(priv->rst_tx);
+out_reset_pm_assert:
+ reset_control_assert(priv->rst_pm);
+
+out_disable_clock:
+ /* disable TX clock */
+ val = readl(priv->base + RXTXCTRL);
+ val &= ~RXTXCTRL_TX_CKO_EN;
+ writel(val, priv->base + RXTXCTRL);
+
+ /* disable reference clock for phy */
+ val = readl(priv->base + CKCTRL0);
+ val |= CKCTRL0_CK_OFF;
+ writel(val, priv->base + CKCTRL0);
+
+ return ret;
+}
+
+static int uniphier_ahciphy_pro4_power_off(struct uniphier_ahciphy_priv *priv)
+{
+ u32 val;
+
+ reset_control_assert(priv->rst_rx);
+ reset_control_assert(priv->rst_tx);
+ reset_control_assert(priv->rst_pm);
+
+ /* disable TX clock */
+ val = readl(priv->base + RXTXCTRL);
+ val &= ~RXTXCTRL_TX_CKO_EN;
+ writel(val, priv->base + RXTXCTRL);
+
+ /* disable reference clock for phy */
+ val = readl(priv->base + CKCTRL0);
+ val |= CKCTRL0_CK_OFF;
+ writel(val, priv->base + CKCTRL0);
+
+ return 0;
+}
+
static void uniphier_ahciphy_pxs2_enable(struct uniphier_ahciphy_priv *priv,
bool enable)
{
@@ -142,14 +285,22 @@ static int uniphier_ahciphy_init(struct phy *phy)
struct uniphier_ahciphy_priv *priv = phy_get_drvdata(phy);
int ret;
- ret = clk_prepare_enable(priv->clk_parent);
+ ret = clk_prepare_enable(priv->clk_parent_gio);
if (ret)
return ret;
- ret = reset_control_deassert(priv->rst_parent);
+ ret = clk_prepare_enable(priv->clk_parent);
+ if (ret)
+ goto out_clk_gio_disable;
+
+ ret = reset_control_deassert(priv->rst_parent_gio);
if (ret)
goto out_clk_disable;
+ ret = reset_control_deassert(priv->rst_parent);
+ if (ret)
+ goto out_rst_gio_assert;
+
if (priv->data->init) {
ret = priv->data->init(priv);
if (ret)
@@ -160,8 +311,12 @@ static int uniphier_ahciphy_init(struct phy *phy)
out_rst_assert:
reset_control_assert(priv->rst_parent);
+out_rst_gio_assert:
+ reset_control_assert(priv->rst_parent_gio);
out_clk_disable:
clk_disable_unprepare(priv->clk_parent);
+out_clk_gio_disable:
+ clk_disable_unprepare(priv->clk_parent_gio);
return ret;
}
@@ -171,7 +326,9 @@ static int uniphier_ahciphy_exit(struct phy *phy)
struct uniphier_ahciphy_priv *priv = phy_get_drvdata(phy);
reset_control_assert(priv->rst_parent);
+ reset_control_assert(priv->rst_parent_gio);
clk_disable_unprepare(priv->clk_parent);
+ clk_disable_unprepare(priv->clk_parent_gio);
return 0;
}
@@ -265,6 +422,28 @@ static int uniphier_ahciphy_probe(struct platform_device *pdev)
if (IS_ERR(priv->rst))
return PTR_ERR(priv->rst);
+ if (priv->data->is_legacy) {
+ priv->clk_parent_gio = devm_clk_get(dev, "gio");
+ if (IS_ERR(priv->clk_parent_gio))
+ return PTR_ERR(priv->clk_parent_gio);
+ priv->rst_parent_gio =
+ devm_reset_control_get_shared(dev, "gio");
+ if (IS_ERR(priv->rst_parent_gio))
+ return PTR_ERR(priv->rst_parent_gio);
+
+ priv->rst_pm = devm_reset_control_get_shared(dev, "pm");
+ if (IS_ERR(priv->rst_pm))
+ return PTR_ERR(priv->rst_pm);
+
+ priv->rst_tx = devm_reset_control_get_shared(dev, "tx");
+ if (IS_ERR(priv->rst_tx))
+ return PTR_ERR(priv->rst_tx);
+
+ priv->rst_rx = devm_reset_control_get_shared(dev, "rx");
+ if (IS_ERR(priv->rst_rx))
+ return PTR_ERR(priv->rst_rx);
+ }
+
phy = devm_phy_create(dev, dev->of_node, &uniphier_ahciphy_ops);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create phy\n");
@@ -279,9 +458,18 @@ static int uniphier_ahciphy_probe(struct platform_device *pdev)
return 0;
}
+static const struct uniphier_ahciphy_soc_data uniphier_pro4_data = {
+ .init = uniphier_ahciphy_pro4_init,
+ .power_on = uniphier_ahciphy_pro4_power_on,
+ .power_off = uniphier_ahciphy_pro4_power_off,
+ .is_legacy = true,
+ .is_phy_clk = false,
+};
+
static const struct uniphier_ahciphy_soc_data uniphier_pxs2_data = {
.power_on = uniphier_ahciphy_pxs2_power_on,
.power_off = uniphier_ahciphy_pxs2_power_off,
+ .is_legacy = false,
.is_ready_high = false,
.is_phy_clk = false,
};
@@ -290,12 +478,17 @@ static const struct uniphier_ahciphy_soc_data uniphier_pxs3_data = {
.init = uniphier_ahciphy_pxs3_init,
.power_on = uniphier_ahciphy_pxs2_power_on,
.power_off = uniphier_ahciphy_pxs2_power_off,
+ .is_legacy = false,
.is_ready_high = true,
.is_phy_clk = true,
};
static const struct of_device_id uniphier_ahciphy_match[] = {
{
+ .compatible = "socionext,uniphier-pro4-ahci-phy",
+ .data = &uniphier_pro4_data,
+ },
+ {
.compatible = "socionext,uniphier-pxs2-ahci-phy",
.data = &uniphier_pxs2_data,
},
diff --git a/drivers/phy/socionext/phy-uniphier-pcie.c b/drivers/phy/socionext/phy-uniphier-pcie.c
index 6bdbd1f214dd..ebca296ef123 100644
--- a/drivers/phy/socionext/phy-uniphier-pcie.c
+++ b/drivers/phy/socionext/phy-uniphier-pcie.c
@@ -27,6 +27,7 @@
#define TESTI_DAT_MASK GENMASK(13, 6)
#define TESTI_ADR_MASK GENMASK(5, 1)
#define TESTI_WR_EN BIT(0)
+#define TESTIO_PHY_SHIFT 16
#define PCL_PHY_TEST_O 0x2004
#define TESTO_DAT_MASK GENMASK(7, 0)
@@ -39,6 +40,10 @@
#define SG_USBPCIESEL 0x590
#define SG_USBPCIESEL_PCIE BIT(0)
+/* SC */
+#define SC_US3SRCSEL 0x2244
+#define SC_US3SRCSEL_2LANE GENMASK(9, 8)
+
#define PCL_PHY_R00 0
#define RX_EQ_ADJ_EN BIT(3) /* enable for EQ adjustment */
#define PCL_PHY_R06 6
@@ -47,6 +52,9 @@
#define PCL_PHY_R26 26
#define VCO_CTRL GENMASK(7, 4) /* Tx VCO adjustment value */
#define VCO_CTRL_INIT_VAL 5
+#define PCL_PHY_R28 28
+#define VCOPLL_CLMP GENMASK(3, 2) /* Tx VCOPLL clamp mode */
+#define VCOPLL_CLMP_VAL 0
struct uniphier_pciephy_priv {
void __iomem *base;
@@ -58,43 +66,57 @@ struct uniphier_pciephy_priv {
struct uniphier_pciephy_soc_data {
bool is_legacy;
+ bool is_dual_phy;
void (*set_phymode)(struct regmap *regmap);
};
static void uniphier_pciephy_testio_write(struct uniphier_pciephy_priv *priv,
- u32 data)
+ int id, u32 data)
{
+ if (id)
+ data <<= TESTIO_PHY_SHIFT;
+
/* need to read TESTO twice after accessing TESTI */
writel(data, priv->base + PCL_PHY_TEST_I);
readl(priv->base + PCL_PHY_TEST_O);
readl(priv->base + PCL_PHY_TEST_O);
}
+static u32 uniphier_pciephy_testio_read(struct uniphier_pciephy_priv *priv, int id)
+{
+ u32 val = readl(priv->base + PCL_PHY_TEST_O);
+
+ if (id)
+ val >>= TESTIO_PHY_SHIFT;
+
+ return val & TESTO_DAT_MASK;
+}
+
static void uniphier_pciephy_set_param(struct uniphier_pciephy_priv *priv,
- u32 reg, u32 mask, u32 param)
+ int id, u32 reg, u32 mask, u32 param)
{
u32 val;
/* read previous data */
val = FIELD_PREP(TESTI_DAT_MASK, 1);
val |= FIELD_PREP(TESTI_ADR_MASK, reg);
- uniphier_pciephy_testio_write(priv, val);
- val = readl(priv->base + PCL_PHY_TEST_O) & TESTO_DAT_MASK;
+ uniphier_pciephy_testio_write(priv, id, val);
+ val = uniphier_pciephy_testio_read(priv, id);
/* update value */
val &= ~mask;
val |= mask & param;
val = FIELD_PREP(TESTI_DAT_MASK, val);
val |= FIELD_PREP(TESTI_ADR_MASK, reg);
- uniphier_pciephy_testio_write(priv, val);
- uniphier_pciephy_testio_write(priv, val | TESTI_WR_EN);
- uniphier_pciephy_testio_write(priv, val);
+ uniphier_pciephy_testio_write(priv, id, val);
+ uniphier_pciephy_testio_write(priv, id, val | TESTI_WR_EN);
+ uniphier_pciephy_testio_write(priv, id, val);
/* read current data as dummy */
val = FIELD_PREP(TESTI_DAT_MASK, 1);
val |= FIELD_PREP(TESTI_ADR_MASK, reg);
- uniphier_pciephy_testio_write(priv, val);
- readl(priv->base + PCL_PHY_TEST_O);
+ uniphier_pciephy_testio_write(priv, id, val);
+ uniphier_pciephy_testio_read(priv, id);
}
static void uniphier_pciephy_assert(struct uniphier_pciephy_priv *priv)
@@ -120,7 +142,7 @@ static int uniphier_pciephy_init(struct phy *phy)
{
struct uniphier_pciephy_priv *priv = phy_get_drvdata(phy);
u32 val;
- int ret;
+ int ret, id;
ret = clk_prepare_enable(priv->clk);
if (ret)
@@ -148,12 +170,16 @@ static int uniphier_pciephy_init(struct phy *phy)
if (priv->data->is_legacy)
return 0;
- uniphier_pciephy_set_param(priv, PCL_PHY_R00,
+ for (id = 0; id < (priv->data->is_dual_phy ? 2 : 1); id++) {
+ uniphier_pciephy_set_param(priv, id, PCL_PHY_R00,
RX_EQ_ADJ_EN, RX_EQ_ADJ_EN);
- uniphier_pciephy_set_param(priv, PCL_PHY_R06, RX_EQ_ADJ,
+ uniphier_pciephy_set_param(priv, id, PCL_PHY_R06, RX_EQ_ADJ,
FIELD_PREP(RX_EQ_ADJ, RX_EQ_ADJ_VAL));
- uniphier_pciephy_set_param(priv, PCL_PHY_R26, VCO_CTRL,
+ uniphier_pciephy_set_param(priv, id, PCL_PHY_R26, VCO_CTRL,
FIELD_PREP(VCO_CTRL, VCO_CTRL_INIT_VAL));
+ uniphier_pciephy_set_param(priv, id, PCL_PHY_R28, VCOPLL_CLMP,
+ FIELD_PREP(VCOPLL_CLMP, VCOPLL_CLMP_VAL));
+ }
usleep_range(1, 10);
uniphier_pciephy_deassert(priv);
@@ -261,17 +287,31 @@ static void uniphier_pciephy_ld20_setmode(struct regmap *regmap)
SG_USBPCIESEL_PCIE, SG_USBPCIESEL_PCIE);
}
+static void uniphier_pciephy_nx1_setmode(struct regmap *regmap)
+{
+ regmap_update_bits(regmap, SC_US3SRCSEL,
+ SC_US3SRCSEL_2LANE, SC_US3SRCSEL_2LANE);
+}
+
static const struct uniphier_pciephy_soc_data uniphier_pro5_data = {
.is_legacy = true,
};
static const struct uniphier_pciephy_soc_data uniphier_ld20_data = {
.is_legacy = false,
+ .is_dual_phy = false,
.set_phymode = uniphier_pciephy_ld20_setmode,
};
static const struct uniphier_pciephy_soc_data uniphier_pxs3_data = {
.is_legacy = false,
+ .is_dual_phy = false,
+};
+
+static const struct uniphier_pciephy_soc_data uniphier_nx1_data = {
+ .is_legacy = false,
+ .is_dual_phy = true,
+ .set_phymode = uniphier_pciephy_nx1_setmode,
};
static const struct of_device_id uniphier_pciephy_match[] = {
@@ -287,6 +327,10 @@ static const struct of_device_id uniphier_pciephy_match[] = {
.compatible = "socionext,uniphier-pxs3-pcie-phy",
.data = &uniphier_pxs3_data,
},
+ {
+ .compatible = "socionext,uniphier-nx1-pcie-phy",
+ .data = &uniphier_nx1_data,
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, uniphier_pciephy_match);
diff --git a/drivers/phy/socionext/phy-uniphier-usb3hs.c b/drivers/phy/socionext/phy-uniphier-usb3hs.c
index a9bc74121f38..8c8673df0084 100644
--- a/drivers/phy/socionext/phy-uniphier-usb3hs.c
+++ b/drivers/phy/socionext/phy-uniphier-usb3hs.c
@@ -447,6 +447,10 @@ static const struct of_device_id uniphier_u3hsphy_match[] = {
.compatible = "socionext,uniphier-pxs3-usb3-hsphy",
.data = &uniphier_pxs3_data,
},
+ {
+ .compatible = "socionext,uniphier-nx1-usb3-hsphy",
+ .data = &uniphier_pxs3_data,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, uniphier_u3hsphy_match);
diff --git a/drivers/phy/socionext/phy-uniphier-usb3ss.c b/drivers/phy/socionext/phy-uniphier-usb3ss.c
index 6700645bcbe6..f402ed8732fd 100644
--- a/drivers/phy/socionext/phy-uniphier-usb3ss.c
+++ b/drivers/phy/socionext/phy-uniphier-usb3ss.c
@@ -22,11 +22,13 @@
#include <linux/reset.h>
#define SSPHY_TESTI 0x0
-#define SSPHY_TESTO 0x4
#define TESTI_DAT_MASK GENMASK(13, 6)
#define TESTI_ADR_MASK GENMASK(5, 1)
#define TESTI_WR_EN BIT(0)
+#define SSPHY_TESTO 0x4
+#define TESTO_DAT_MASK GENMASK(7, 0)
+
#define PHY_F(regno, msb, lsb) { (regno), (msb), (lsb) }
#define CDR_CPD_TRIM PHY_F(7, 3, 0) /* RxPLL charge pump current */
@@ -84,12 +86,12 @@ static void uniphier_u3ssphy_set_param(struct uniphier_u3ssphy_priv *priv,
val = FIELD_PREP(TESTI_DAT_MASK, 1);
val |= FIELD_PREP(TESTI_ADR_MASK, p->field.reg_no);
uniphier_u3ssphy_testio_write(priv, val);
- val = readl(priv->base + SSPHY_TESTO);
+ val = readl(priv->base + SSPHY_TESTO) & TESTO_DAT_MASK;
/* update value */
- val &= ~FIELD_PREP(TESTI_DAT_MASK, field_mask);
+ val &= ~field_mask;
data = field_mask & (p->value << p->field.lsb);
- val = FIELD_PREP(TESTI_DAT_MASK, data);
+ val = FIELD_PREP(TESTI_DAT_MASK, data | val);
val |= FIELD_PREP(TESTI_ADR_MASK, p->field.reg_no);
uniphier_u3ssphy_testio_write(priv, val);
uniphier_u3ssphy_testio_write(priv, val | TESTI_WR_EN);
@@ -328,6 +330,10 @@ static const struct of_device_id uniphier_u3ssphy_match[] = {
.compatible = "socionext,uniphier-pxs3-usb3-ssphy",
.data = &uniphier_ld20_data,
},
+ {
+ .compatible = "socionext,uniphier-nx1-usb3-ssphy",
+ .data = &uniphier_ld20_data,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, uniphier_u3ssphy_match);
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index e4f4a9be5132..2ce9bfd783d4 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -672,17 +672,15 @@ static int stm32_usbphyc_probe(struct platform_device *pdev)
usbphyc->vdda1v1 = devm_regulator_get(dev, "vdda1v1");
if (IS_ERR(usbphyc->vdda1v1)) {
- ret = PTR_ERR(usbphyc->vdda1v1);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get vdda1v1 supply: %d\n", ret);
+ ret = dev_err_probe(dev, PTR_ERR(usbphyc->vdda1v1),
+ "failed to get vdda1v1 supply\n");
goto clk_disable;
}
usbphyc->vdda1v8 = devm_regulator_get(dev, "vdda1v8");
if (IS_ERR(usbphyc->vdda1v8)) {
- ret = PTR_ERR(usbphyc->vdda1v8);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get vdda1v8 supply: %d\n", ret);
+ ret = dev_err_probe(dev, PTR_ERR(usbphyc->vdda1v8),
+ "failed to get vdda1v8 supply\n");
goto clk_disable;
}
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 963de5913e50..aa5237eacd29 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -455,7 +455,7 @@ tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type,
name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
if (!name) {
of_node_put(ports);
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
np = of_get_child_by_name(ports, name);
kfree(name);
diff --git a/drivers/phy/ti/phy-omap-control.c b/drivers/phy/ti/phy-omap-control.c
index 47482f106fab..76c5595f0859 100644
--- a/drivers/phy/ti/phy-omap-control.c
+++ b/drivers/phy/ti/phy-omap-control.c
@@ -26,7 +26,7 @@ void omap_control_pcie_pcs(struct device *dev, u8 delay)
u32 val;
struct omap_control_phy *control_phy;
- if (IS_ERR(dev) || !dev) {
+ if (IS_ERR_OR_NULL(dev)) {
pr_err("%s: invalid device\n", __func__);
return;
}
@@ -61,7 +61,7 @@ void omap_control_phy_power(struct device *dev, int on)
unsigned long rate;
struct omap_control_phy *control_phy;
- if (IS_ERR(dev) || !dev) {
+ if (IS_ERR_OR_NULL(dev)) {
pr_err("%s: invalid device\n", __func__);
return;
}
@@ -202,7 +202,7 @@ void omap_control_usb_set_mode(struct device *dev,
{
struct omap_control_phy *ctrl_phy;
- if (IS_ERR(dev) || !dev)
+ if (IS_ERR_OR_NULL(dev))
return;
ctrl_phy = dev_get_drvdata(dev);
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 6a961d5f8726..6fc56d6598e2 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -31,6 +31,24 @@ config DEBUG_PINCTRL
help
Say Y here to add some extra checks and diagnostics to PINCTRL calls.
+config PINCTRL_AMD
+ tristate "AMD GPIO pin control"
+ depends on HAS_IOMEM
+ depends on ACPI || COMPILE_TEST
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ help
+ The driver for memory mapped GPIO functionality on AMD platforms
+ (x86 or arm). Most of the pins are usually muxed to some other
+ functionality by firmware, so only a small amount is available
+ for GPIO use.
+
+ Requires ACPI/FDT device enumeration code to set up a platform
+ device.
+
config PINCTRL_APPLE_GPIO
tristate "Apple SoC GPIO pin controller driver"
depends on ARCH_APPLE
@@ -69,20 +87,6 @@ config PINCTRL_AS3722
open drain configuration for the GPIO pins of AS3722 devices. It also
supports the GPIO functionality through gpiolib.
-config PINCTRL_AXP209
- tristate "X-Powers AXP209 PMIC pinctrl and GPIO Support"
- depends on MFD_AXP20X
- depends on OF
- select PINMUX
- select GENERIC_PINCONF
- select GPIOLIB
- help
- AXP PMICs provides multiple GPIOs that can be muxed for different
- functions. This driver bundles a pinctrl driver to select the function
- muxing and a GPIO driver to handle the GPIO when the GPIO function is
- selected.
- Say yes to enable pinctrl and GPIO support for the AXP209 PMIC
-
config PINCTRL_AT91
bool "AT91 pinctrl driver"
depends on OF
@@ -109,23 +113,19 @@ config PINCTRL_AT91PIO4
Say Y here to enable the at91 pinctrl/gpio driver for Atmel PIO4
controller available on sama5d2 SoC.
-config PINCTRL_AMD
- tristate "AMD GPIO pin control"
- depends on HAS_IOMEM
- depends on ACPI || COMPILE_TEST
- select GPIOLIB
- select GPIOLIB_IRQCHIP
+config PINCTRL_AXP209
+ tristate "X-Powers AXP209 PMIC pinctrl and GPIO Support"
+ depends on MFD_AXP20X
+ depends on OF
select PINMUX
- select PINCONF
select GENERIC_PINCONF
+ select GPIOLIB
help
- driver for memory mapped GPIO functionality on AMD platforms
- (x86 or arm).Most pins are usually muxed to some other
- functionality by firmware,so only a small amount is available
- for gpio use.
-
- Requires ACPI/FDT device enumeration code to set up a platform
- device.
+ AXP PMICs provides multiple GPIOs that can be muxed for different
+ functions. This driver bundles a pinctrl driver to select the function
+ muxing and a GPIO driver to handle the GPIO when the GPIO function is
+ selected.
+ Say Y to enable pinctrl and GPIO support for the AXP209 PMIC.
config PINCTRL_BM1880
bool "Bitmain BM1880 Pinctrl driver"
@@ -136,13 +136,13 @@ config PINCTRL_BM1880
Pinctrl driver for Bitmain BM1880 SoC.
config PINCTRL_DA850_PUPD
- tristate "TI DA850/OMAP-L138/AM18XX pullup/pulldown groups"
+ tristate "TI DA850/OMAP-L138/AM18XX pull-up and pull-down groups"
depends on OF && (ARCH_DAVINCI_DA850 || COMPILE_TEST)
select PINCONF
select GENERIC_PINCONF
help
Driver for TI DA850/OMAP-L138/AM18XX pinconf. Used to control
- pullup/pulldown pin groups.
+ pull-up and pull-down pin groups.
config PINCTRL_DA9062
tristate "Dialog Semiconductor DA9062 PMIC pinctrl and GPIO Support"
@@ -154,7 +154,7 @@ config PINCTRL_DA9062
function muxing and a GPIO driver to handle the GPIO when the GPIO
function is selected.
- Say yes to enable pinctrl and GPIO support for the DA9062 PMIC.
+ Say Y to enable pinctrl and GPIO support for the DA9062 PMIC.
config PINCTRL_DIGICOLOR
bool
@@ -162,12 +162,93 @@ config PINCTRL_DIGICOLOR
select PINMUX
select GENERIC_PINCONF
+config PINCTRL_EQUILIBRIUM
+ tristate "Generic pinctrl and GPIO driver for Intel Lightning Mountain SoC"
+ depends on OF && HAS_IOMEM
+ depends on X86 || COMPILE_TEST
+ select PINMUX
+ select PINCONF
+ select GPIOLIB
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ help
+ Equilibrium driver is a pinctrl and GPIO driver for Intel Lightning
+ Mountain network processor SoC that supports both the GPIO and pin
+ control frameworks. It provides interfaces to setup pin muxing, assign
+ desired pin functions, configure GPIO attributes for LGM SoC pins.
+ Pin muxing and pin config settings are retrieved from device tree.
+
+config PINCTRL_GEMINI
+ bool
+ depends on ARCH_GEMINI
+ default ARCH_GEMINI
+ select PINMUX
+ select GENERIC_PINCONF
+ select MFD_SYSCON
+
+config PINCTRL_INGENIC
+ bool "Pinctrl driver for the Ingenic JZ47xx SoCs"
+ default MACH_INGENIC
+ depends on OF
+ depends on MIPS || COMPILE_TEST
+ select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select REGMAP_MMIO
+
+config PINCTRL_K210
+ bool "Pinctrl driver for the Canaan Kendryte K210 SoC"
+ depends on RISCV && SOC_CANAAN && OF
+ select GENERIC_PINMUX_FUNCTIONS
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select OF_GPIO
+ select REGMAP_MMIO
+ default SOC_CANAAN
+ help
+ Add support for the Canaan Kendryte K210 RISC-V SOC Field
+ Programmable IO Array (FPIOA) controller.
+
+config PINCTRL_KEEMBAY
+ tristate "Pinctrl driver for Intel Keem Bay SoC"
+ depends on ARCH_KEEMBAY || (ARM64 && COMPILE_TEST)
+ depends on HAS_IOMEM
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select GPIO_GENERIC
+ help
+ This selects pin control driver for the Intel Keem Bay SoC.
+ It provides pin config functions such as pull-up, pull-down,
+ interrupt, drive strength, sec lock, Schmitt trigger, slew
+ rate control and direction control. This module will be
+ called as pinctrl-keembay.
+
config PINCTRL_LANTIQ
bool
depends on LANTIQ
select PINMUX
select PINCONF
+config PINCTRL_FALCON
+ bool
+ depends on SOC_FALCON
+ depends on PINCTRL_LANTIQ
+
+config PINCTRL_XWAY
+ bool
+ depends on SOC_TYPE_XWAY
+ depends on PINCTRL_LANTIQ
+
config PINCTRL_LPC18XX
bool "NXP LPC18XX/43XX SCU pinctrl driver"
depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
@@ -177,18 +258,16 @@ config PINCTRL_LPC18XX
help
Pinctrl driver for NXP LPC18xx/43xx System Control Unit (SCU).
-config PINCTRL_FALCON
- bool
- depends on SOC_FALCON
- depends on PINCTRL_LANTIQ
-
-config PINCTRL_GEMINI
- bool
- depends on ARCH_GEMINI
- default ARCH_GEMINI
+config PINCTRL_MAX77620
+ tristate "MAX77620/MAX20024 Pincontrol support"
+ depends on MFD_MAX77620 && OF
select PINMUX
select GENERIC_PINCONF
- select MFD_SYSCON
+ help
+ Say Y here to enable Pin control support for Maxim MAX77620 PMIC.
+ This PMIC has 8 GPIO pins that work as GPIO as well as special
+ function in alternate mode. This driver also configure push-pull,
+ open drain, FPS slots etc.
config PINCTRL_MCP23S08_I2C
tristate
@@ -212,99 +291,47 @@ config PINCTRL_MCP23S08
This provides a GPIO interface supporting inputs and outputs and a
corresponding interrupt-controller.
-config PINCTRL_OXNAS
- bool
+config PINCTRL_MICROCHIP_SGPIO
+ bool "Pinctrl driver for Microsemi/Microchip Serial GPIO"
depends on OF
- select PINMUX
- select PINCONF
- select GENERIC_PINCONF
+ depends on HAS_IOMEM
select GPIOLIB
- select OF_GPIO
select GPIOLIB_IRQCHIP
- select MFD_SYSCON
-
-config PINCTRL_ROCKCHIP
- tristate "Rockchip gpio and pinctrl driver"
- depends on ARCH_ROCKCHIP || COMPILE_TEST
- depends on OF
- select GPIOLIB
- select PINMUX
select GENERIC_PINCONF
- select GENERIC_IRQ_CHIP
- select MFD_SYSCON
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
select OF_GPIO
- default ARCH_ROCKCHIP
help
- This support pinctrl and gpio driver for Rockchip SoCs.
+ Support for the serial GPIO interface used on Microsemi and
+ Microchip SoCs. By using a serial interface, the SIO
+ controller significantly extends the number of available
+ GPIOs with a minimum number of additional pins on the
+ device. The primary purpose of the SIO controller is to
+ connect control signals from SFP modules and to act as an
+ LED controller.
-config PINCTRL_SINGLE
- tristate "One-register-per-pin type device tree based pinctrl driver"
+config PINCTRL_OCELOT
+ bool "Pinctrl driver for the Microsemi Ocelot and Jaguar2 SoCs"
depends on OF
depends on HAS_IOMEM
- select GENERIC_PINCTRL_GROUPS
- select GENERIC_PINMUX_FUNCTIONS
- select GENERIC_PINCONF
- help
- This selects the device tree based generic pinctrl driver.
-
-config PINCTRL_SX150X
- bool "Semtech SX150x I2C GPIO expander pinctrl driver"
- depends on I2C=y
- select PINMUX
- select PINCONF
- select GENERIC_PINCONF
select GPIOLIB
select GPIOLIB_IRQCHIP
- select REGMAP
- help
- Say yes here to provide support for Semtech SX150x-series I2C
- GPIO expanders as pinctrl module.
- Compatible models include:
- - 8 bits: sx1508q, sx1502q
- - 16 bits: sx1509q, sx1506q
-
-config PINCTRL_PISTACHIO
- bool "IMG Pistachio SoC pinctrl driver"
- depends on OF && (MIPS || COMPILE_TEST)
- depends on GPIOLIB
- select PINMUX
select GENERIC_PINCONF
- select GPIOLIB_IRQCHIP
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
select OF_GPIO
- help
- This support pinctrl and gpio driver for IMG Pistachio SoC.
+ select REGMAP_MMIO
-config PINCTRL_ST
+config PINCTRL_OXNAS
bool
depends on OF
select PINMUX
select PINCONF
- select GPIOLIB_IRQCHIP
-
-config PINCTRL_STMFX
- tristate "STMicroelectronics STMFX GPIO expander pinctrl driver"
- depends on I2C
- depends on OF_GPIO
select GENERIC_PINCONF
+ select GPIOLIB
+ select OF_GPIO
select GPIOLIB_IRQCHIP
- select MFD_STMFX
- help
- Driver for STMicroelectronics Multi-Function eXpander (STMFX)
- GPIO expander.
- This provides a GPIO interface supporting inputs and outputs,
- and configuring push-pull, open-drain, and can also be used as
- interrupt-controller.
-
-config PINCTRL_MAX77620
- tristate "MAX77620/MAX20024 Pincontrol support"
- depends on MFD_MAX77620 && OF
- select PINMUX
- select GENERIC_PINCONF
- help
- Say Yes here to enable Pin control support for Maxim PMIC MAX77620.
- This PMIC has 8 GPIO pins that work as GPIO as well as special
- function in alternate mode. This driver also configure push-pull,
- open drain, FPS slots etc.
+ select MFD_SYSCON
config PINCTRL_PALMAS
tristate "Pinctrl driver for the PALMAS Series MFD devices"
@@ -334,41 +361,16 @@ config PINCTRL_PIC32MZDA
def_bool y if PIC32MZDA
select PINCTRL_PIC32
-config PINCTRL_ZYNQ
- bool "Pinctrl driver for Xilinx Zynq"
- depends on ARCH_ZYNQ
- select PINMUX
- select GENERIC_PINCONF
- help
- This selects the pinctrl driver for Xilinx Zynq.
-
-config PINCTRL_ZYNQMP
- tristate "Pinctrl driver for Xilinx ZynqMP"
- depends on ZYNQMP_FIRMWARE
+config PINCTRL_PISTACHIO
+ bool "IMG Pistachio SoC pinctrl driver"
+ depends on OF && (MIPS || COMPILE_TEST)
+ depends on GPIOLIB
select PINMUX
select GENERIC_PINCONF
- default ZYNQMP_FIRMWARE
- help
- This selects the pinctrl driver for Xilinx ZynqMP platform.
- This driver will query the pin information from the firmware
- and allow configuring the pins.
- Configuration can include the mux function to select on those
- pin(s)/group(s), and various pin configuration parameters
- such as pull-up, slew rate, etc.
- This driver can also be built as a module. If so, the module
- will be called pinctrl-zynqmp.
-
-config PINCTRL_INGENIC
- bool "Pinctrl driver for the Ingenic JZ47xx SoCs"
- default MACH_INGENIC
- depends on OF
- depends on MIPS || COMPILE_TEST
- select GENERIC_PINCONF
- select GENERIC_PINCTRL_GROUPS
- select GENERIC_PINMUX_FUNCTIONS
- select GPIOLIB
select GPIOLIB_IRQCHIP
- select REGMAP_MMIO
+ select OF_GPIO
+ help
+ This support pinctrl and GPIO driver for IMG Pistachio SoC.
config PINCTRL_RK805
tristate "Pinctrl and GPIO driver for RK805 PMIC"
@@ -379,53 +381,92 @@ config PINCTRL_RK805
help
This selects the pinctrl driver for RK805.
-config PINCTRL_OCELOT
- bool "Pinctrl driver for the Microsemi Ocelot and Jaguar2 SoCs"
+config PINCTRL_ROCKCHIP
+ tristate "Rockchip gpio and pinctrl driver"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on OF
- depends on HAS_IOMEM
select GPIOLIB
- select GPIOLIB_IRQCHIP
+ select PINMUX
select GENERIC_PINCONF
- select GENERIC_PINCTRL_GROUPS
- select GENERIC_PINMUX_FUNCTIONS
+ select GENERIC_IRQ_CHIP
+ select MFD_SYSCON
select OF_GPIO
- select REGMAP_MMIO
+ default ARCH_ROCKCHIP
+ help
+ This support pinctrl and GPIO driver for Rockchip SoCs.
-config PINCTRL_MICROCHIP_SGPIO
- bool "Pinctrl driver for Microsemi/Microchip Serial GPIO"
+config PINCTRL_SINGLE
+ tristate "One-register-per-pin type device tree based pinctrl driver"
depends on OF
depends on HAS_IOMEM
- select GPIOLIB
- select GPIOLIB_IRQCHIP
- select GENERIC_PINCONF
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
- select OF_GPIO
+ select GENERIC_PINCONF
help
- Support for the serial GPIO interface used on Microsemi and
- Microchip SoC's. By using a serial interface, the SIO
- controller significantly extends the number of available
- GPIOs with a minimum number of additional pins on the
- device. The primary purpose of the SIO controller is to
- connect control signals from SFP modules and to act as an
- LED controller.
+ This selects the device tree based generic pinctrl driver.
-config PINCTRL_K210
- bool "Pinctrl driver for the Canaan Kendryte K210 SoC"
- depends on RISCV && SOC_CANAAN && OF
+config PINCTRL_ST
+ bool
+ depends on OF
+ select PINMUX
+ select PINCONF
+ select GPIOLIB_IRQCHIP
+
+config PINCTRL_STARFIVE
+ tristate "Pinctrl and GPIO driver for the StarFive JH7100 SoC"
+ depends on SOC_STARFIVE || COMPILE_TEST
+ depends on OF
+ default SOC_STARFIVE
+ select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
select GENERIC_PINCONF
select GPIOLIB
+ select GPIOLIB_IRQCHIP
select OF_GPIO
- select REGMAP_MMIO
- default SOC_CANAAN
help
- Add support for the Canaan Kendryte K210 RISC-V SOC Field
- Programmable IO Array (FPIOA) controller.
+ Say yes here to support pin control on the StarFive JH7100 SoC.
+ This also provides an interface to the GPIO pins not used by other
+ peripherals supporting inputs, outputs, configuring pull-up/pull-down
+ and interrupts on input changes.
-config PINCTRL_KEEMBAY
- tristate "Pinctrl driver for Intel Keem Bay SoC"
- depends on ARCH_KEEMBAY || (ARM64 && COMPILE_TEST)
+config PINCTRL_STMFX
+ tristate "STMicroelectronics STMFX GPIO expander pinctrl driver"
+ depends on I2C
+ depends on OF_GPIO
+ select GENERIC_PINCONF
+ select GPIOLIB_IRQCHIP
+ select MFD_STMFX
+ help
+ Driver for STMicroelectronics Multi-Function eXpander (STMFX)
+ GPIO expander.
+ This provides a GPIO interface supporting inputs and outputs,
+ and configuring push-pull, open-drain, and can also be used as
+ interrupt-controller.
+
+config PINCTRL_SX150X
+ bool "Semtech SX150x I2C GPIO expander pinctrl driver"
+ depends on I2C=y
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select REGMAP
+ help
+ Say Y here to provide support for Semtech SX150x-series I2C
+ GPIO expanders as pinctrl module.
+ Compatible models include:
+ - 8 bits: sx1508q, sx1502q
+ - 16 bits: sx1509q, sx1506q
+
+config PINCTRL_TB10X
+ bool
+ depends on OF && ARC_PLAT_TB10X
+ select GPIOLIB
+
+config PINCTRL_THUNDERBAY
+ tristate "Generic pinctrl and GPIO driver for Intel Thunder Bay SoC"
+ depends on ARCH_THUNDERBAY || (ARM64 && COMPILE_TEST)
depends on HAS_IOMEM
select PINMUX
select PINCONF
@@ -436,18 +477,45 @@ config PINCTRL_KEEMBAY
select GPIOLIB_IRQCHIP
select GPIO_GENERIC
help
- This selects pin control driver for the Intel Keembay SoC.
- It provides pin config functions such as pullup, pulldown,
- interrupt, drive strength, sec lock, schmitt trigger, slew
+ This selects pin control driver for the Intel Thunder Bay SoC.
+ It provides pin config functions such as pull-up, pull-down,
+ interrupt, drive strength, sec lock, Schmitt trigger, slew
rate control and direction control. This module will be
- called as pinctrl-keembay.
+ called as pinctrl-thunderbay.
+
+config PINCTRL_ZYNQ
+ bool "Pinctrl driver for Xilinx Zynq"
+ depends on ARCH_ZYNQ
+ select PINMUX
+ select GENERIC_PINCONF
+ help
+ This selects the pinctrl driver for Xilinx Zynq.
+
+config PINCTRL_ZYNQMP
+ tristate "Pinctrl driver for Xilinx ZynqMP"
+ depends on ZYNQMP_FIRMWARE
+ select PINMUX
+ select GENERIC_PINCONF
+ default ZYNQMP_FIRMWARE
+ help
+ This selects the pinctrl driver for Xilinx ZynqMP platform.
+ This driver will query the pin information from the firmware
+ and allow configuring the pins.
+ Configuration can include the mux function to select on those
+ pin(s)/group(s), and various pin configuration parameters
+ such as pull-up, slew rate, etc.
+ This driver can also be built as a module. If so, the module
+ will be called pinctrl-zynqmp.
source "drivers/pinctrl/actions/Kconfig"
source "drivers/pinctrl/aspeed/Kconfig"
source "drivers/pinctrl/bcm/Kconfig"
source "drivers/pinctrl/berlin/Kconfig"
+source "drivers/pinctrl/cirrus/Kconfig"
source "drivers/pinctrl/freescale/Kconfig"
source "drivers/pinctrl/intel/Kconfig"
+source "drivers/pinctrl/mediatek/Kconfig"
+source "drivers/pinctrl/meson/Kconfig"
source "drivers/pinctrl/mvebu/Kconfig"
source "drivers/pinctrl/nomadik/Kconfig"
source "drivers/pinctrl/nuvoton/Kconfig"
@@ -463,40 +531,7 @@ source "drivers/pinctrl/sunxi/Kconfig"
source "drivers/pinctrl/tegra/Kconfig"
source "drivers/pinctrl/ti/Kconfig"
source "drivers/pinctrl/uniphier/Kconfig"
-source "drivers/pinctrl/vt8500/Kconfig"
-source "drivers/pinctrl/mediatek/Kconfig"
-source "drivers/pinctrl/meson/Kconfig"
-source "drivers/pinctrl/cirrus/Kconfig"
source "drivers/pinctrl/visconti/Kconfig"
-
-config PINCTRL_XWAY
- bool
- depends on SOC_TYPE_XWAY
- depends on PINCTRL_LANTIQ
-
-config PINCTRL_TB10X
- bool
- depends on OF && ARC_PLAT_TB10X
- select GPIOLIB
-
-config PINCTRL_EQUILIBRIUM
- tristate "Generic pinctrl and GPIO driver for Intel Lightning Mountain SoC"
- depends on OF && HAS_IOMEM
- depends on X86 || COMPILE_TEST
- select PINMUX
- select PINCONF
- select GPIOLIB
- select GPIO_GENERIC
- select GPIOLIB_IRQCHIP
- select GENERIC_PINCONF
- select GENERIC_PINCTRL_GROUPS
- select GENERIC_PINMUX_FUNCTIONS
-
- help
- Equilibrium pinctrl driver is a pinctrl & GPIO driver for Intel Lightning
- Mountain network processor SoC that supports both the linux GPIO and pin
- control frameworks. It provides interfaces to setup pinmux, assign desired
- pin functions, configure GPIO attributes for LGM SoC pins. Pinmux and
- pinconf settings are retrieved from device tree.
+source "drivers/pinctrl/vt8500/Kconfig"
endif
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 5e63de2ffcf4..08c364d611f5 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -6,56 +6,60 @@ subdir-ccflags-$(CONFIG_DEBUG_PINCTRL) += -DDEBUG
obj-y += core.o pinctrl-utils.o
obj-$(CONFIG_PINMUX) += pinmux.o
obj-$(CONFIG_PINCONF) += pinconf.o
-obj-$(CONFIG_OF) += devicetree.o
obj-$(CONFIG_GENERIC_PINCONF) += pinconf-generic.o
+obj-$(CONFIG_OF) += devicetree.o
+
+obj-$(CONFIG_PINCTRL_AMD) += pinctrl-amd.o
obj-$(CONFIG_PINCTRL_APPLE_GPIO) += pinctrl-apple-gpio.o
obj-$(CONFIG_PINCTRL_ARTPEC6) += pinctrl-artpec6.o
obj-$(CONFIG_PINCTRL_AS3722) += pinctrl-as3722.o
-obj-$(CONFIG_PINCTRL_AXP209) += pinctrl-axp209.o
obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
obj-$(CONFIG_PINCTRL_AT91PIO4) += pinctrl-at91-pio4.o
-obj-$(CONFIG_PINCTRL_AMD) += pinctrl-amd.o
+obj-$(CONFIG_PINCTRL_AXP209) += pinctrl-axp209.o
obj-$(CONFIG_PINCTRL_BM1880) += pinctrl-bm1880.o
obj-$(CONFIG_PINCTRL_DA850_PUPD) += pinctrl-da850-pupd.o
obj-$(CONFIG_PINCTRL_DA9062) += pinctrl-da9062.o
obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o
-obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
+obj-$(CONFIG_PINCTRL_EQUILIBRIUM) += pinctrl-equilibrium.o
obj-$(CONFIG_PINCTRL_GEMINI) += pinctrl-gemini.o
+obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o
+obj-$(CONFIG_PINCTRL_K210) += pinctrl-k210.o
+obj-$(CONFIG_PINCTRL_KEEMBAY) += pinctrl-keembay.o
+obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o
+obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
+obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o
+obj-$(CONFIG_PINCTRL_LPC18XX) += pinctrl-lpc18xx.o
obj-$(CONFIG_PINCTRL_MAX77620) += pinctrl-max77620.o
obj-$(CONFIG_PINCTRL_MCP23S08_I2C) += pinctrl-mcp23s08_i2c.o
obj-$(CONFIG_PINCTRL_MCP23S08_SPI) += pinctrl-mcp23s08_spi.o
obj-$(CONFIG_PINCTRL_MCP23S08) += pinctrl-mcp23s08.o
-obj-$(CONFIG_PINCTRL_MESON) += meson/
+obj-$(CONFIG_PINCTRL_MICROCHIP_SGPIO) += pinctrl-microchip-sgpio.o
+obj-$(CONFIG_PINCTRL_OCELOT) += pinctrl-ocelot.o
obj-$(CONFIG_PINCTRL_OXNAS) += pinctrl-oxnas.o
obj-$(CONFIG_PINCTRL_PALMAS) += pinctrl-palmas.o
obj-$(CONFIG_PINCTRL_PIC32) += pinctrl-pic32.o
obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o
+obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
+obj-$(CONFIG_PINCTRL_STARFIVE) += pinctrl-starfive.o
+obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o
+obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
obj-$(CONFIG_PINCTRL_SX150X) += pinctrl-sx150x.o
-obj-$(CONFIG_ARCH_TEGRA) += tegra/
-obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o
-obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o
-obj-$(CONFIG_PINCTRL_LPC18XX) += pinctrl-lpc18xx.o
obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o
-obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
-obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o
-obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
+obj-$(CONFIG_PINCTRL_THUNDERBAY) += pinctrl-thunderbay.o
obj-$(CONFIG_PINCTRL_ZYNQMP) += pinctrl-zynqmp.o
-obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o
-obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
-obj-$(CONFIG_PINCTRL_OCELOT) += pinctrl-ocelot.o
-obj-$(CONFIG_PINCTRL_MICROCHIP_SGPIO) += pinctrl-microchip-sgpio.o
-obj-$(CONFIG_PINCTRL_EQUILIBRIUM) += pinctrl-equilibrium.o
-obj-$(CONFIG_PINCTRL_K210) += pinctrl-k210.o
-obj-$(CONFIG_PINCTRL_KEEMBAY) += pinctrl-keembay.o
+obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
obj-y += actions/
obj-$(CONFIG_ARCH_ASPEED) += aspeed/
obj-y += bcm/
obj-$(CONFIG_PINCTRL_BERLIN) += berlin/
+obj-y += cirrus/
obj-y += freescale/
obj-$(CONFIG_X86) += intel/
+obj-y += mediatek/
+obj-$(CONFIG_PINCTRL_MESON) += meson/
obj-y += mvebu/
obj-y += nomadik/
obj-$(CONFIG_ARCH_NPCM7XX) += nuvoton/
@@ -68,9 +72,8 @@ obj-$(CONFIG_PINCTRL_SPEAR) += spear/
obj-y += sprd/
obj-$(CONFIG_PINCTRL_STM32) += stm32/
obj-$(CONFIG_PINCTRL_SUNXI) += sunxi/
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += ti/
obj-$(CONFIG_PINCTRL_UNIPHIER) += uniphier/
-obj-$(CONFIG_ARCH_VT8500) += vt8500/
-obj-y += mediatek/
-obj-y += cirrus/
obj-$(CONFIG_PINCTRL_VISCONTI) += visconti/
+obj-$(CONFIG_ARCH_VT8500) += vt8500/
diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c
index 781f2200ed58..ed46abc15d72 100644
--- a/drivers/pinctrl/actions/pinctrl-owl.c
+++ b/drivers/pinctrl/actions/pinctrl-owl.c
@@ -874,7 +874,6 @@ static int owl_gpio_init(struct owl_pinctrl *pctrl)
chip->label = dev_name(pctrl->dev);
chip->parent = pctrl->dev;
chip->owner = THIS_MODULE;
- chip->of_node = pctrl->dev->of_node;
pctrl->irq_chip.name = chip->of_node->name;
pctrl->irq_chip.irq_ack = owl_gpio_irq_ack;
diff --git a/drivers/pinctrl/aspeed/Kconfig b/drivers/pinctrl/aspeed/Kconfig
index de8b185c4fee..1a4e5b9ed471 100644
--- a/drivers/pinctrl/aspeed/Kconfig
+++ b/drivers/pinctrl/aspeed/Kconfig
@@ -2,7 +2,7 @@
config PINCTRL_ASPEED
bool
depends on (ARCH_ASPEED || COMPILE_TEST) && OF
- depends on MFD_SYSCON
+ select MFD_SYSCON
select PINMUX
select PINCONF
select GENERIC_PINCONF
diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
index 8fc1feedd861..5123f4c33854 100644
--- a/drivers/pinctrl/bcm/Kconfig
+++ b/drivers/pinctrl/bcm/Kconfig
@@ -146,6 +146,8 @@ config PINCTRL_NS
depends on OF && (ARCH_BCM_5301X || COMPILE_TEST)
select PINMUX
select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
default ARCH_BCM_5301X
help
Say yes here to enable the Broadcom NS SoC pins driver.
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index b607d10e4cbd..c4ebfa852b42 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -313,7 +313,10 @@ static inline void bcm2835_pinctrl_fsel_set(
static int bcm2835_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
- return pinctrl_gpio_direction_input(chip->base + offset);
+ struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
+
+ bcm2835_pinctrl_fsel_set(pc, offset, BCM2835_FSEL_GPIO_IN);
+ return 0;
}
static int bcm2835_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -348,8 +351,11 @@ static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
static int bcm2835_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
- bcm2835_gpio_set(chip, offset, value);
- return pinctrl_gpio_direction_output(chip->base + offset);
+ struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
+
+ bcm2835_gpio_set_bit(pc, value ? GPSET0 : GPCLR0, offset);
+ bcm2835_pinctrl_fsel_set(pc, offset, BCM2835_FSEL_GPIO_OUT);
+ return 0;
}
static const struct gpio_chip bcm2835_gpio_chip = {
@@ -407,7 +413,7 @@ static void bcm2835_gpio_irq_handler(struct irq_desc *desc)
struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
struct irq_chip *host_chip = irq_desc_get_chip(desc);
int irq = irq_desc_get_irq(desc);
- int group;
+ int group = 0;
int i;
for (i = 0; i < BCM2835_NUM_IRQS; i++) {
@@ -1222,7 +1228,6 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
pc->gpio_chip = *pdata->gpio_chip;
pc->gpio_chip.parent = dev;
- pc->gpio_chip.of_node = np;
for (i = 0; i < BCM2835_NUM_BANKS; i++) {
unsigned long events;
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index a7a0dd638a26..52fa2f4cd618 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -836,7 +836,6 @@ static int iproc_gpio_probe(struct platform_device *pdev)
chip->num_banks = (ngpios + NGPIOS_PER_BANK - 1) / NGPIOS_PER_BANK;
gc->label = dev_name(dev);
gc->parent = dev;
- gc->of_node = dev->of_node;
gc->request = iproc_gpio_request;
gc->free = iproc_gpio_free;
gc->direction_input = iproc_gpio_direction_input;
diff --git a/drivers/pinctrl/bcm/pinctrl-ns.c b/drivers/pinctrl/bcm/pinctrl-ns.c
index d7f8175d2c1c..65a86543c58c 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns.c
@@ -14,6 +14,9 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include "../core.h"
+#include "../pinmux.h"
+
#define FLAG_BCM4708 BIT(1)
#define FLAG_BCM4709 BIT(2)
#define FLAG_BCM53012 BIT(3)
@@ -25,10 +28,6 @@ struct ns_pinctrl {
void __iomem *base;
struct pinctrl_desc pctldesc;
- struct ns_pinctrl_group *groups;
- unsigned int num_groups;
- struct ns_pinctrl_function *functions;
- unsigned int num_functions;
};
/*
@@ -65,22 +64,22 @@ static const struct pinctrl_pin_desc ns_pinctrl_pins[] = {
struct ns_pinctrl_group {
const char *name;
- const unsigned int *pins;
+ unsigned int *pins;
const unsigned int num_pins;
unsigned int chipsets;
};
-static const unsigned int spi_pins[] = { 0, 1, 2, 3 };
-static const unsigned int i2c_pins[] = { 4, 5 };
-static const unsigned int mdio_pins[] = { 6, 7 };
-static const unsigned int pwm0_pins[] = { 8 };
-static const unsigned int pwm1_pins[] = { 9 };
-static const unsigned int pwm2_pins[] = { 10 };
-static const unsigned int pwm3_pins[] = { 11 };
-static const unsigned int uart1_pins[] = { 12, 13, 14, 15 };
-static const unsigned int uart2_pins[] = { 16, 17 };
-static const unsigned int sdio_pwr_pins[] = { 22 };
-static const unsigned int sdio_1p8v_pins[] = { 23 };
+static unsigned int spi_pins[] = { 0, 1, 2, 3 };
+static unsigned int i2c_pins[] = { 4, 5 };
+static unsigned int mdio_pins[] = { 6, 7 };
+static unsigned int pwm0_pins[] = { 8 };
+static unsigned int pwm1_pins[] = { 9 };
+static unsigned int pwm2_pins[] = { 10 };
+static unsigned int pwm3_pins[] = { 11 };
+static unsigned int uart1_pins[] = { 12, 13, 14, 15 };
+static unsigned int uart2_pins[] = { 16, 17 };
+static unsigned int sdio_pwr_pins[] = { 22 };
+static unsigned int sdio_1p8v_pins[] = { 23 };
#define NS_GROUP(_name, _pins, _chipsets) \
{ \
@@ -146,38 +145,10 @@ static const struct ns_pinctrl_function ns_pinctrl_functions[] = {
* Groups code
*/
-static int ns_pinctrl_get_groups_count(struct pinctrl_dev *pctrl_dev)
-{
- struct ns_pinctrl *ns_pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
-
- return ns_pinctrl->num_groups;
-}
-
-static const char *ns_pinctrl_get_group_name(struct pinctrl_dev *pctrl_dev,
- unsigned int selector)
-{
- struct ns_pinctrl *ns_pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
-
- return ns_pinctrl->groups[selector].name;
-}
-
-static int ns_pinctrl_get_group_pins(struct pinctrl_dev *pctrl_dev,
- unsigned int selector,
- const unsigned int **pins,
- unsigned int *num_pins)
-{
- struct ns_pinctrl *ns_pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
-
- *pins = ns_pinctrl->groups[selector].pins;
- *num_pins = ns_pinctrl->groups[selector].num_pins;
-
- return 0;
-}
-
static const struct pinctrl_ops ns_pinctrl_ops = {
- .get_groups_count = ns_pinctrl_get_groups_count,
- .get_group_name = ns_pinctrl_get_group_name,
- .get_group_pins = ns_pinctrl_get_group_pins,
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
.dt_node_to_map = pinconf_generic_dt_node_to_map_group,
.dt_free_map = pinconf_generic_dt_free_map,
};
@@ -186,48 +157,22 @@ static const struct pinctrl_ops ns_pinctrl_ops = {
* Functions code
*/
-static int ns_pinctrl_get_functions_count(struct pinctrl_dev *pctrl_dev)
-{
- struct ns_pinctrl *ns_pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
-
- return ns_pinctrl->num_functions;
-}
-
-static const char *ns_pinctrl_get_function_name(struct pinctrl_dev *pctrl_dev,
- unsigned int selector)
-{
- struct ns_pinctrl *ns_pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
-
- return ns_pinctrl->functions[selector].name;
-}
-
-static int ns_pinctrl_get_function_groups(struct pinctrl_dev *pctrl_dev,
- unsigned int selector,
- const char * const **groups,
- unsigned * const num_groups)
-{
- struct ns_pinctrl *ns_pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
-
- *groups = ns_pinctrl->functions[selector].groups;
- *num_groups = ns_pinctrl->functions[selector].num_groups;
-
- return 0;
-}
-
static int ns_pinctrl_set_mux(struct pinctrl_dev *pctrl_dev,
unsigned int func_select,
- unsigned int grp_select)
+ unsigned int group_selector)
{
struct ns_pinctrl *ns_pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+ struct group_desc *group;
u32 unset = 0;
u32 tmp;
int i;
- for (i = 0; i < ns_pinctrl->groups[grp_select].num_pins; i++) {
- int pin_number = ns_pinctrl->groups[grp_select].pins[i];
+ group = pinctrl_generic_get_group(pctrl_dev, group_selector);
+ if (!group)
+ return -EINVAL;
- unset |= BIT(pin_number);
- }
+ for (i = 0; i < group->num_pins; i++)
+ unset |= BIT(group->pins[i]);
tmp = readl(ns_pinctrl->base);
tmp &= ~unset;
@@ -237,9 +182,9 @@ static int ns_pinctrl_set_mux(struct pinctrl_dev *pctrl_dev,
}
static const struct pinmux_ops ns_pinctrl_pmxops = {
- .get_functions_count = ns_pinctrl_get_functions_count,
- .get_function_name = ns_pinctrl_get_function_name,
- .get_function_groups = ns_pinctrl_get_function_groups,
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
.set_mux = ns_pinctrl_set_mux,
};
@@ -267,8 +212,6 @@ static int ns_pinctrl_probe(struct platform_device *pdev)
struct ns_pinctrl *ns_pinctrl;
struct pinctrl_desc *pctldesc;
struct pinctrl_pin_desc *pin;
- struct ns_pinctrl_group *group;
- struct ns_pinctrl_function *function;
struct resource *res;
int i;
@@ -315,43 +258,33 @@ static int ns_pinctrl_probe(struct platform_device *pdev)
}
}
- ns_pinctrl->groups = devm_kcalloc(dev, ARRAY_SIZE(ns_pinctrl_groups),
- sizeof(struct ns_pinctrl_group),
- GFP_KERNEL);
- if (!ns_pinctrl->groups)
- return -ENOMEM;
- for (i = 0, group = &ns_pinctrl->groups[0];
- i < ARRAY_SIZE(ns_pinctrl_groups); i++) {
- const struct ns_pinctrl_group *src = &ns_pinctrl_groups[i];
+ /* Register */
- if (src->chipsets & ns_pinctrl->chipset_flag) {
- memcpy(group++, src, sizeof(*src));
- ns_pinctrl->num_groups++;
- }
+ ns_pinctrl->pctldev = devm_pinctrl_register(dev, pctldesc, ns_pinctrl);
+ if (IS_ERR(ns_pinctrl->pctldev)) {
+ dev_err(dev, "Failed to register pinctrl\n");
+ return PTR_ERR(ns_pinctrl->pctldev);
}
- ns_pinctrl->functions = devm_kcalloc(dev,
- ARRAY_SIZE(ns_pinctrl_functions),
- sizeof(struct ns_pinctrl_function),
- GFP_KERNEL);
- if (!ns_pinctrl->functions)
- return -ENOMEM;
- for (i = 0, function = &ns_pinctrl->functions[0];
- i < ARRAY_SIZE(ns_pinctrl_functions); i++) {
- const struct ns_pinctrl_function *src = &ns_pinctrl_functions[i];
+ for (i = 0; i < ARRAY_SIZE(ns_pinctrl_groups); i++) {
+ const struct ns_pinctrl_group *group = &ns_pinctrl_groups[i];
- if (src->chipsets & ns_pinctrl->chipset_flag) {
- memcpy(function++, src, sizeof(*src));
- ns_pinctrl->num_functions++;
- }
+ if (!(group->chipsets & ns_pinctrl->chipset_flag))
+ continue;
+
+ pinctrl_generic_add_group(ns_pinctrl->pctldev, group->name,
+ group->pins, group->num_pins, NULL);
}
- /* Register */
+ for (i = 0; i < ARRAY_SIZE(ns_pinctrl_functions); i++) {
+ const struct ns_pinctrl_function *function = &ns_pinctrl_functions[i];
- ns_pinctrl->pctldev = devm_pinctrl_register(dev, pctldesc, ns_pinctrl);
- if (IS_ERR(ns_pinctrl->pctldev)) {
- dev_err(dev, "Failed to register pinctrl\n");
- return PTR_ERR(ns_pinctrl->pctldev);
+ if (!(function->chipsets & ns_pinctrl->chipset_flag))
+ continue;
+
+ pinmux_generic_add_function(ns_pinctrl->pctldev, function->name,
+ function->groups,
+ function->num_groups, NULL);
}
return 0;
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index e03142895f61..643dbd315033 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -648,7 +648,6 @@ static int nsp_gpio_probe(struct platform_device *pdev)
gc->ngpio = val;
gc->label = dev_name(dev);
gc->parent = dev;
- gc->of_node = dev->of_node;
gc->request = gpiochip_generic_request;
gc->free = gpiochip_generic_free;
gc->direction_input = nsp_gpio_direction_input;
diff --git a/drivers/pinctrl/cirrus/pinctrl-lochnagar.c b/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
index 670ac53a3141..3fda4446d70b 100644
--- a/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
+++ b/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
@@ -1161,9 +1161,6 @@ static int lochnagar_pin_probe(struct platform_device *pdev)
priv->gpio_chip.can_sleep = true;
priv->gpio_chip.parent = dev;
priv->gpio_chip.base = -1;
-#ifdef CONFIG_OF_GPIO
- priv->gpio_chip.of_node = dev->of_node;
-#endif
switch (lochnagar->type) {
case LOCHNAGAR1:
diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
index dce2626384a9..e1cfbee3643a 100644
--- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c
+++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
@@ -8,8 +8,10 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
@@ -1004,13 +1006,14 @@ static int madera_pin_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "%s\n", __func__);
+ device_set_node(&pdev->dev, dev_fwnode(pdev->dev.parent));
+
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = &pdev->dev;
priv->madera = madera;
- pdev->dev.of_node = madera->dev->of_node;
switch (madera->type) {
case CS47L15:
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 21fa21c6547b..8bdafaf40b29 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -173,6 +173,13 @@ config PINCTRL_IMX8ULP
help
Say Y here to enable the imx8ulp pinctrl driver
+config PINCTRL_IMXRT1050
+ bool "IMXRT1050 pinctrl driver"
+ depends on ARCH_MXC
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imxrt1050 pinctrl driver
+
config PINCTRL_VF610
bool "Freescale Vybrid VF610 pinctrl driver"
depends on SOC_VF610
diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile
index c44930b1b362..565a0350bf09 100644
--- a/drivers/pinctrl/freescale/Makefile
+++ b/drivers/pinctrl/freescale/Makefile
@@ -30,3 +30,4 @@ obj-$(CONFIG_PINCTRL_MXS) += pinctrl-mxs.o
obj-$(CONFIG_PINCTRL_IMX23) += pinctrl-imx23.o
obj-$(CONFIG_PINCTRL_IMX25) += pinctrl-imx25.o
obj-$(CONFIG_PINCTRL_IMX28) += pinctrl-imx28.o
+obj-$(CONFIG_PINCTRL_IMXRT1050) += pinctrl-imxrt1050.o
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index daf28bc5661d..fa3cc0b80ede 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -648,7 +648,8 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
struct device_node *child;
struct function_desc *func;
struct group_desc *grp;
- u32 i = 0;
+ const char **group_names;
+ u32 i;
dev_dbg(pctl->dev, "parse function(%d): %pOFn\n", index, np);
@@ -663,14 +664,18 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
dev_err(ipctl->dev, "no groups defined in %pOF\n", np);
return -EINVAL;
}
- func->group_names = devm_kcalloc(ipctl->dev, func->num_group_names,
- sizeof(char *), GFP_KERNEL);
- if (!func->group_names)
+
+ group_names = devm_kcalloc(ipctl->dev, func->num_group_names,
+ sizeof(char *), GFP_KERNEL);
+ if (!group_names)
return -ENOMEM;
+ i = 0;
+ for_each_child_of_node(np, child)
+ group_names[i++] = child->name;
+ func->group_names = group_names;
+ i = 0;
for_each_child_of_node(np, child) {
- func->group_names[i] = child->name;
-
grp = devm_kzalloc(ipctl->dev, sizeof(struct group_desc),
GFP_KERNEL);
if (!grp) {
diff --git a/drivers/pinctrl/freescale/pinctrl-imxrt1050.c b/drivers/pinctrl/freescale/pinctrl-imxrt1050.c
new file mode 100644
index 000000000000..11f31c90ad30
--- /dev/null
+++ b/drivers/pinctrl/freescale/pinctrl-imxrt1050.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020
+ * Author(s): Giulio Benetti <giulio.benetti@benettiengineering.com>
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-imx.h"
+
+enum imxrt1050_pads {
+ IMXRT1050_PAD_RESERVE0 = 0,
+ IMXRT1050_PAD_RESERVE1 = 1,
+ IMXRT1050_PAD_RESERVE2 = 2,
+ IMXRT1050_PAD_RESERVE3 = 3,
+ IMXRT1050_PAD_RESERVE4 = 4,
+ IMXRT1050_PAD_RESERVE5 = 5,
+ IMXRT1050_PAD_RESERVE6 = 6,
+ IMXRT1050_PAD_RESERVE7 = 7,
+ IMXRT1050_PAD_RESERVE8 = 8,
+ IMXRT1050_PAD_RESERVE9 = 9,
+ IMXRT1050_IOMUXC_GPIO1_IO00 = 10,
+ IMXRT1050_IOMUXC_GPIO1_IO01 = 11,
+ IMXRT1050_IOMUXC_GPIO1_IO02 = 12,
+ IMXRT1050_IOMUXC_GPIO1_IO03 = 13,
+ IMXRT1050_IOMUXC_GPIO1_IO04 = 14,
+ IMXRT1050_IOMUXC_GPIO1_IO05 = 15,
+ IMXRT1050_IOMUXC_GPIO1_IO06 = 16,
+ IMXRT1050_IOMUXC_GPIO1_IO07 = 17,
+ IMXRT1050_IOMUXC_GPIO1_IO08 = 18,
+ IMXRT1050_IOMUXC_GPIO1_IO09 = 19,
+ IMXRT1050_IOMUXC_GPIO1_IO10 = 20,
+ IMXRT1050_IOMUXC_GPIO1_IO11 = 21,
+ IMXRT1050_IOMUXC_GPIO1_IO12 = 22,
+ IMXRT1050_IOMUXC_GPIO1_IO13 = 23,
+ IMXRT1050_IOMUXC_GPIO1_IO14 = 24,
+ IMXRT1050_IOMUXC_GPIO1_IO15 = 25,
+ IMXRT1050_IOMUXC_ENET_MDC = 26,
+ IMXRT1050_IOMUXC_ENET_MDIO = 27,
+ IMXRT1050_IOMUXC_ENET_TD3 = 28,
+ IMXRT1050_IOMUXC_ENET_TD2 = 29,
+ IMXRT1050_IOMUXC_ENET_TD1 = 30,
+ IMXRT1050_IOMUXC_ENET_TD0 = 31,
+ IMXRT1050_IOMUXC_ENET_TX_CTL = 32,
+ IMXRT1050_IOMUXC_ENET_TXC = 33,
+ IMXRT1050_IOMUXC_ENET_RX_CTL = 34,
+ IMXRT1050_IOMUXC_ENET_RXC = 35,
+ IMXRT1050_IOMUXC_ENET_RD0 = 36,
+ IMXRT1050_IOMUXC_ENET_RD1 = 37,
+ IMXRT1050_IOMUXC_ENET_RD2 = 38,
+ IMXRT1050_IOMUXC_ENET_RD3 = 39,
+ IMXRT1050_IOMUXC_SD1_CLK = 40,
+ IMXRT1050_IOMUXC_SD1_CMD = 41,
+ IMXRT1050_IOMUXC_SD1_DATA0 = 42,
+ IMXRT1050_IOMUXC_SD1_DATA1 = 43,
+ IMXRT1050_IOMUXC_SD1_DATA2 = 44,
+ IMXRT1050_IOMUXC_SD1_DATA3 = 45,
+ IMXRT1050_IOMUXC_SD1_DATA4 = 46,
+ IMXRT1050_IOMUXC_SD1_DATA5 = 47,
+ IMXRT1050_IOMUXC_SD1_DATA6 = 48,
+ IMXRT1050_IOMUXC_SD1_DATA7 = 49,
+ IMXRT1050_IOMUXC_SD1_RESET_B = 50,
+ IMXRT1050_IOMUXC_SD1_STROBE = 51,
+ IMXRT1050_IOMUXC_SD2_CD_B = 52,
+ IMXRT1050_IOMUXC_SD2_CLK = 53,
+ IMXRT1050_IOMUXC_SD2_CMD = 54,
+ IMXRT1050_IOMUXC_SD2_DATA0 = 55,
+ IMXRT1050_IOMUXC_SD2_DATA1 = 56,
+ IMXRT1050_IOMUXC_SD2_DATA2 = 57,
+ IMXRT1050_IOMUXC_SD2_DATA3 = 58,
+ IMXRT1050_IOMUXC_SD2_RESET_B = 59,
+ IMXRT1050_IOMUXC_SD2_WP = 60,
+ IMXRT1050_IOMUXC_NAND_ALE = 61,
+ IMXRT1050_IOMUXC_NAND_CE0 = 62,
+ IMXRT1050_IOMUXC_NAND_CE1 = 63,
+ IMXRT1050_IOMUXC_NAND_CE2 = 64,
+ IMXRT1050_IOMUXC_NAND_CE3 = 65,
+ IMXRT1050_IOMUXC_NAND_CLE = 66,
+ IMXRT1050_IOMUXC_NAND_DATA00 = 67,
+ IMXRT1050_IOMUXC_NAND_DATA01 = 68,
+ IMXRT1050_IOMUXC_NAND_DATA02 = 69,
+ IMXRT1050_IOMUXC_NAND_DATA03 = 70,
+ IMXRT1050_IOMUXC_NAND_DATA04 = 71,
+ IMXRT1050_IOMUXC_NAND_DATA05 = 72,
+ IMXRT1050_IOMUXC_NAND_DATA06 = 73,
+ IMXRT1050_IOMUXC_NAND_DATA07 = 74,
+ IMXRT1050_IOMUXC_NAND_DQS = 75,
+ IMXRT1050_IOMUXC_NAND_RE_B = 76,
+ IMXRT1050_IOMUXC_NAND_READY_B = 77,
+ IMXRT1050_IOMUXC_NAND_WE_B = 78,
+ IMXRT1050_IOMUXC_NAND_WP_B = 79,
+ IMXRT1050_IOMUXC_SAI5_RXFS = 80,
+ IMXRT1050_IOMUXC_SAI5_RXC = 81,
+ IMXRT1050_IOMUXC_SAI5_RXD0 = 82,
+ IMXRT1050_IOMUXC_SAI5_RXD1 = 83,
+ IMXRT1050_IOMUXC_SAI5_RXD2 = 84,
+ IMXRT1050_IOMUXC_SAI5_RXD3 = 85,
+ IMXRT1050_IOMUXC_SAI5_MCLK = 86,
+ IMXRT1050_IOMUXC_SAI1_RXFS = 87,
+ IMXRT1050_IOMUXC_SAI1_RXC = 88,
+ IMXRT1050_IOMUXC_SAI1_RXD0 = 89,
+ IMXRT1050_IOMUXC_SAI1_RXD1 = 90,
+ IMXRT1050_IOMUXC_SAI1_RXD2 = 91,
+ IMXRT1050_IOMUXC_SAI1_RXD3 = 92,
+ IMXRT1050_IOMUXC_SAI1_RXD4 = 93,
+ IMXRT1050_IOMUXC_SAI1_RXD5 = 94,
+ IMXRT1050_IOMUXC_SAI1_RXD6 = 95,
+ IMXRT1050_IOMUXC_SAI1_RXD7 = 96,
+ IMXRT1050_IOMUXC_SAI1_TXFS = 97,
+ IMXRT1050_IOMUXC_SAI1_TXC = 98,
+ IMXRT1050_IOMUXC_SAI1_TXD0 = 99,
+ IMXRT1050_IOMUXC_SAI1_TXD1 = 100,
+ IMXRT1050_IOMUXC_SAI1_TXD2 = 101,
+ IMXRT1050_IOMUXC_SAI1_TXD3 = 102,
+ IMXRT1050_IOMUXC_SAI1_TXD4 = 103,
+ IMXRT1050_IOMUXC_SAI1_TXD5 = 104,
+ IMXRT1050_IOMUXC_SAI1_TXD6 = 105,
+ IMXRT1050_IOMUXC_SAI1_TXD7 = 106,
+ IMXRT1050_IOMUXC_SAI1_MCLK = 107,
+ IMXRT1050_IOMUXC_SAI2_RXFS = 108,
+ IMXRT1050_IOMUXC_SAI2_RXC = 109,
+ IMXRT1050_IOMUXC_SAI2_RXD0 = 110,
+ IMXRT1050_IOMUXC_SAI2_TXFS = 111,
+ IMXRT1050_IOMUXC_SAI2_TXC = 112,
+ IMXRT1050_IOMUXC_SAI2_TXD0 = 113,
+ IMXRT1050_IOMUXC_SAI2_MCLK = 114,
+ IMXRT1050_IOMUXC_SAI3_RXFS = 115,
+ IMXRT1050_IOMUXC_SAI3_RXC = 116,
+ IMXRT1050_IOMUXC_SAI3_RXD = 117,
+ IMXRT1050_IOMUXC_SAI3_TXFS = 118,
+ IMXRT1050_IOMUXC_SAI3_TXC = 119,
+ IMXRT1050_IOMUXC_SAI3_TXD = 120,
+ IMXRT1050_IOMUXC_SAI3_MCLK = 121,
+ IMXRT1050_IOMUXC_SPDIF_TX = 122,
+ IMXRT1050_IOMUXC_SPDIF_RX = 123,
+ IMXRT1050_IOMUXC_SPDIF_EXT_CLK = 124,
+ IMXRT1050_IOMUXC_ECSPI1_SCLK = 125,
+ IMXRT1050_IOMUXC_ECSPI1_MOSI = 126,
+ IMXRT1050_IOMUXC_ECSPI1_MISO = 127,
+ IMXRT1050_IOMUXC_ECSPI1_SS0 = 128,
+ IMXRT1050_IOMUXC_ECSPI2_SCLK = 129,
+ IMXRT1050_IOMUXC_ECSPI2_MOSI = 130,
+ IMXRT1050_IOMUXC_ECSPI2_MISO = 131,
+ IMXRT1050_IOMUXC_ECSPI2_SS0 = 132,
+ IMXRT1050_IOMUXC_I2C1_SCL = 133,
+ IMXRT1050_IOMUXC_I2C1_SDA = 134,
+ IMXRT1050_IOMUXC_I2C2_SCL = 135,
+ IMXRT1050_IOMUXC_I2C2_SDA = 136,
+ IMXRT1050_IOMUXC_I2C3_SCL = 137,
+ IMXRT1050_IOMUXC_I2C3_SDA = 138,
+ IMXRT1050_IOMUXC_I2C4_SCL = 139,
+ IMXRT1050_IOMUXC_I2C4_SDA = 140,
+ IMXRT1050_IOMUXC_UART1_RXD = 141,
+ IMXRT1050_IOMUXC_UART1_TXD = 142,
+ IMXRT1050_IOMUXC_UART2_RXD = 143,
+ IMXRT1050_IOMUXC_UART2_TXD = 144,
+ IMXRT1050_IOMUXC_UART3_RXD = 145,
+ IMXRT1050_IOMUXC_UART3_TXD = 146,
+ IMXRT1050_IOMUXC_UART4_RXD = 147,
+ IMXRT1050_IOMUXC_UART4_TXD = 148,
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imxrt1050_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(IMXRT1050_PAD_RESERVE0),
+ IMX_PINCTRL_PIN(IMXRT1050_PAD_RESERVE1),
+ IMX_PINCTRL_PIN(IMXRT1050_PAD_RESERVE2),
+ IMX_PINCTRL_PIN(IMXRT1050_PAD_RESERVE3),
+ IMX_PINCTRL_PIN(IMXRT1050_PAD_RESERVE4),
+ IMX_PINCTRL_PIN(IMXRT1050_PAD_RESERVE5),
+ IMX_PINCTRL_PIN(IMXRT1050_PAD_RESERVE6),
+ IMX_PINCTRL_PIN(IMXRT1050_PAD_RESERVE7),
+ IMX_PINCTRL_PIN(IMXRT1050_PAD_RESERVE8),
+ IMX_PINCTRL_PIN(IMXRT1050_PAD_RESERVE9),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO00),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO01),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO02),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO03),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO04),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO05),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO06),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO07),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO08),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO09),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO10),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO11),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO12),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO13),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO14),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO15),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_MDC),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_MDIO),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_TD3),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_TD2),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_TD1),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_TD0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_TX_CTL),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_TXC),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_RX_CTL),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_RXC),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_RD0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_RD1),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_RD2),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_RD3),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_CLK),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_CMD),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_DATA0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_DATA1),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_DATA2),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_DATA3),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_DATA4),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_DATA5),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_DATA6),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_DATA7),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_RESET_B),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_STROBE),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD2_CD_B),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD2_CLK),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD2_CMD),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD2_DATA0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD2_DATA1),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD2_DATA2),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD2_DATA3),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD2_RESET_B),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD2_WP),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_ALE),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_CE0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_CE1),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_CE2),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_CE3),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_CLE),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_DATA00),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_DATA01),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_DATA02),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_DATA03),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_DATA04),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_DATA05),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_DATA06),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_DATA07),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_DQS),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_RE_B),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_READY_B),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_WE_B),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_WP_B),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI5_RXFS),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI5_RXC),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI5_RXD0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI5_RXD1),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI5_RXD2),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI5_RXD3),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI5_MCLK),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_RXFS),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_RXC),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_RXD0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_RXD1),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_RXD2),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_RXD3),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_RXD4),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_RXD5),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_RXD6),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_RXD7),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_TXFS),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_TXC),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_TXD0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_TXD1),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_TXD2),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_TXD3),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_TXD4),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_TXD5),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_TXD6),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_TXD7),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_MCLK),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI2_RXFS),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI2_RXC),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI2_RXD0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI2_TXFS),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI2_TXC),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI2_TXD0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI2_MCLK),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI3_RXFS),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI3_RXC),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI3_RXD),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI3_TXFS),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI3_TXC),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI3_TXD),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI3_MCLK),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SPDIF_TX),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SPDIF_RX),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SPDIF_EXT_CLK),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ECSPI1_SCLK),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ECSPI1_MOSI),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ECSPI1_MISO),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ECSPI1_SS0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ECSPI2_SCLK),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ECSPI2_MOSI),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ECSPI2_MISO),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ECSPI2_SS0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_I2C1_SCL),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_I2C1_SDA),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_I2C2_SCL),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_I2C2_SDA),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_I2C3_SCL),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_I2C3_SDA),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_I2C4_SCL),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_I2C4_SDA),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_UART1_RXD),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_UART1_TXD),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_UART2_RXD),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_UART2_TXD),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_UART3_RXD),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_UART3_TXD),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_UART4_RXD),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_UART4_TXD),
+};
+
+static const struct imx_pinctrl_soc_info imxrt1050_pinctrl_info = {
+ .pins = imxrt1050_pinctrl_pads,
+ .npins = ARRAY_SIZE(imxrt1050_pinctrl_pads),
+ .gpr_compatible = "fsl,imxrt1050-iomuxc-gpr",
+};
+
+static const struct of_device_id imxrt1050_pinctrl_of_match[] = {
+ { .compatible = "fsl,imxrt1050-iomuxc", .data = &imxrt1050_pinctrl_info, },
+ { /* sentinel */ }
+};
+
+static int imxrt1050_pinctrl_probe(struct platform_device *pdev)
+{
+ return imx_pinctrl_probe(pdev, &imxrt1050_pinctrl_info);
+}
+
+static struct platform_driver imxrt1050_pinctrl_driver = {
+ .driver = {
+ .name = "imxrt1050-pinctrl",
+ .of_match_table = of_match_ptr(imxrt1050_pinctrl_of_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = imxrt1050_pinctrl_probe,
+};
+
+static int __init imxrt1050_pinctrl_init(void)
+{
+ return platform_driver_register(&imxrt1050_pinctrl_driver);
+}
+arch_initcall(imxrt1050_pinctrl_init);
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 8f23d126c6a7..4c01333e1406 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1577,7 +1577,7 @@ static int byt_gpio_probe(struct intel_pinctrl *vg)
vg->irqchip.irq_mask = byt_irq_mask,
vg->irqchip.irq_unmask = byt_irq_unmask,
vg->irqchip.irq_set_type = byt_irq_type,
- vg->irqchip.flags = IRQCHIP_SKIP_SET_WAKE,
+ vg->irqchip.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_SET_TYPE_MASKED,
girq = &gc->irq;
girq->chip = &vg->irqchip;
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 980099028cf8..abffda1fd51e 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -73,6 +73,8 @@ struct intel_pad_context {
u32 padctrl1;
};
+#define CHV_INVALID_HWIRQ ((unsigned int)INVALID_HWIRQ)
+
/**
* struct intel_community_context - community context for Cherryview
* @intr_lines: Mapping between 16 HW interrupt wires and GPIO offset (in GPIO number space)
@@ -709,6 +711,7 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned int function, unsigned int group)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct device *dev = pctrl->dev;
const struct intel_pingroup *grp;
unsigned long flags;
int i;
@@ -720,9 +723,8 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
/* Check first that the pad is not locked */
for (i = 0; i < grp->npins; i++) {
if (chv_pad_locked(pctrl, grp->pins[i])) {
- dev_warn(pctrl->dev, "unable to set mode for locked pin %u\n",
- grp->pins[i]);
raw_spin_unlock_irqrestore(&chv_lock, flags);
+ dev_warn(dev, "unable to set mode for locked pin %u\n", grp->pins[i]);
return -EBUSY;
}
}
@@ -757,8 +759,8 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
value |= CHV_PADCTRL1_INVRXTX_TXENABLE;
chv_writel(pctrl, pin, CHV_PADCTRL1, value);
- dev_dbg(pctrl->dev, "configured pin %u mode %u OE %sinverted\n",
- pin, mode, invert_oe ? "" : "not ");
+ dev_dbg(dev, "configured pin %u mode %u OE %sinverted\n", pin, mode,
+ invert_oe ? "" : "not ");
}
raw_spin_unlock_irqrestore(&chv_lock, flags);
@@ -812,7 +814,7 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
/* Reset the interrupt mapping */
for (i = 0; i < ARRAY_SIZE(cctx->intr_lines); i++) {
if (cctx->intr_lines[i] == offset) {
- cctx->intr_lines[i] = 0;
+ cctx->intr_lines[i] = CHV_INVALID_HWIRQ;
break;
}
}
@@ -1058,6 +1060,7 @@ static int chv_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *configs, unsigned int nconfigs)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct device *dev = pctrl->dev;
enum pin_config_param param;
int i, ret;
u32 arg;
@@ -1094,8 +1097,7 @@ static int chv_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
return -ENOTSUPP;
}
- dev_dbg(pctrl->dev, "pin %d set config %d arg %u\n", pin,
- param, arg);
+ dev_dbg(dev, "pin %d set config %d arg %u\n", pin, param, arg);
}
return 0;
@@ -1302,6 +1304,7 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
if (irqd_get_trigger_type(d) == IRQ_TYPE_NONE) {
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
+ struct device *dev = pctrl->dev;
struct intel_community_context *cctx = &pctrl->context.communities[0];
unsigned int pin = irqd_to_hwirq(d);
irq_flow_handler_t handler;
@@ -1319,8 +1322,10 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
else
handler = handle_edge_irq;
- if (!cctx->intr_lines[intsel]) {
+ if (cctx->intr_lines[intsel] == CHV_INVALID_HWIRQ) {
irq_set_handler_locked(d, handler);
+ dev_dbg(dev, "using interrupt line %u for IRQ_TYPE_NONE on pin %u\n",
+ intsel, pin);
cctx->intr_lines[intsel] = pin;
}
raw_spin_unlock_irqrestore(&chv_lock, flags);
@@ -1330,17 +1335,74 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
return 0;
}
+static int chv_gpio_set_intr_line(struct intel_pinctrl *pctrl, unsigned int pin)
+{
+ struct device *dev = pctrl->dev;
+ struct intel_community_context *cctx = &pctrl->context.communities[0];
+ const struct intel_community *community = &pctrl->communities[0];
+ u32 value, intsel;
+ int i;
+
+ value = chv_readl(pctrl, pin, CHV_PADCTRL0);
+ intsel = (value & CHV_PADCTRL0_INTSEL_MASK) >> CHV_PADCTRL0_INTSEL_SHIFT;
+
+ if (cctx->intr_lines[intsel] == pin)
+ return 0;
+
+ if (cctx->intr_lines[intsel] == CHV_INVALID_HWIRQ) {
+ dev_dbg(dev, "using interrupt line %u for pin %u\n", intsel, pin);
+ cctx->intr_lines[intsel] = pin;
+ return 0;
+ }
+
+ /*
+ * The interrupt line selected by the BIOS is already in use by
+ * another pin, this is a known BIOS bug found on several models.
+ * But this may also be caused by Linux deciding to use a pin as
+ * IRQ which was not expected to be used as such by the BIOS authors,
+ * so log this at info level only.
+ */
+ dev_info(dev, "interrupt line %u is used by both pin %u and pin %u\n", intsel,
+ cctx->intr_lines[intsel], pin);
+
+ if (chv_pad_locked(pctrl, pin))
+ return -EBUSY;
+
+ /*
+ * The BIOS fills the interrupt lines from 0 counting up, start at
+ * the other end to find a free interrupt line to workaround this.
+ */
+ for (i = community->nirqs - 1; i >= 0; i--) {
+ if (cctx->intr_lines[i] == CHV_INVALID_HWIRQ)
+ break;
+ }
+ if (i < 0)
+ return -EBUSY;
+
+ dev_info(dev, "changing the interrupt line for pin %u to %d\n", pin, i);
+
+ value = (value & ~CHV_PADCTRL0_INTSEL_MASK) | (i << CHV_PADCTRL0_INTSEL_SHIFT);
+ chv_writel(pctrl, pin, CHV_PADCTRL0, value);
+ cctx->intr_lines[i] = pin;
+
+ return 0;
+}
+
static int chv_gpio_irq_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
- struct intel_community_context *cctx = &pctrl->context.communities[0];
unsigned int pin = irqd_to_hwirq(d);
unsigned long flags;
u32 value;
+ int ret;
raw_spin_lock_irqsave(&chv_lock, flags);
+ ret = chv_gpio_set_intr_line(pctrl, pin);
+ if (ret)
+ goto out_unlock;
+
/*
* Pins which can be used as shared interrupt are configured in
* BIOS. Driver trusts BIOS configurations and assigns different
@@ -1375,26 +1437,22 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned int type)
chv_writel(pctrl, pin, CHV_PADCTRL1, value);
}
- value = chv_readl(pctrl, pin, CHV_PADCTRL0);
- value &= CHV_PADCTRL0_INTSEL_MASK;
- value >>= CHV_PADCTRL0_INTSEL_SHIFT;
-
- cctx->intr_lines[value] = pin;
-
if (type & IRQ_TYPE_EDGE_BOTH)
irq_set_handler_locked(d, handle_edge_irq);
else if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_handler_locked(d, handle_level_irq);
+out_unlock:
raw_spin_unlock_irqrestore(&chv_lock, flags);
- return 0;
+ return ret;
}
static void chv_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
+ struct device *dev = pctrl->dev;
const struct intel_community *community = &pctrl->communities[0];
struct intel_community_context *cctx = &pctrl->context.communities[0];
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -1412,6 +1470,11 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
unsigned int offset;
offset = cctx->intr_lines[intr_line];
+ if (offset == CHV_INVALID_HWIRQ) {
+ dev_err(dev, "interrupt on unused interrupt line %u\n", intr_line);
+ continue;
+ }
+
generic_handle_domain_irq(gc->irq.domain, offset);
}
@@ -1512,17 +1575,16 @@ static int chv_gpio_irq_init_hw(struct gpio_chip *chip)
static int chv_gpio_add_pin_ranges(struct gpio_chip *chip)
{
struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
+ struct device *dev = pctrl->dev;
const struct intel_community *community = &pctrl->communities[0];
const struct intel_padgroup *gpp;
int ret, i;
for (i = 0; i < community->ngpps; i++) {
gpp = &community->gpps[i];
- ret = gpiochip_add_pin_range(chip, dev_name(pctrl->dev),
- gpp->base, gpp->base,
- gpp->size);
+ ret = gpiochip_add_pin_range(chip, dev_name(dev), gpp->base, gpp->base, gpp->size);
if (ret) {
- dev_err(pctrl->dev, "failed to add GPIO pin range\n");
+ dev_err(dev, "failed to add GPIO pin range\n");
return ret;
}
}
@@ -1535,15 +1597,16 @@ static int chv_gpio_probe(struct intel_pinctrl *pctrl, int irq)
const struct intel_community *community = &pctrl->communities[0];
const struct intel_padgroup *gpp;
struct gpio_chip *chip = &pctrl->chip;
+ struct device *dev = pctrl->dev;
bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
int ret, i, irq_base;
*chip = chv_gpio_chip;
chip->ngpio = pctrl->soc->pins[pctrl->soc->npins - 1].number + 1;
- chip->label = dev_name(pctrl->dev);
+ chip->label = dev_name(dev);
chip->add_pin_ranges = chv_gpio_add_pin_ranges;
- chip->parent = pctrl->dev;
+ chip->parent = dev;
chip->base = -1;
pctrl->irq = irq;
@@ -1565,17 +1628,16 @@ static int chv_gpio_probe(struct intel_pinctrl *pctrl, int irq)
if (need_valid_mask) {
chip->irq.init_valid_mask = chv_init_irq_valid_mask;
} else {
- irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0,
- pctrl->soc->npins, NUMA_NO_NODE);
+ irq_base = devm_irq_alloc_descs(dev, -1, 0, pctrl->soc->npins, NUMA_NO_NODE);
if (irq_base < 0) {
- dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n");
+ dev_err(dev, "Failed to allocate IRQ numbers\n");
return irq_base;
}
}
- ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
+ ret = devm_gpiochip_add_data(dev, chip, pctrl);
if (ret) {
- dev_err(pctrl->dev, "Failed to register gpiochip\n");
+ dev_err(dev, "Failed to register gpiochip\n");
return ret;
}
@@ -1617,11 +1679,13 @@ static acpi_status chv_pinctrl_mmio_access_handler(u32 function,
static int chv_pinctrl_probe(struct platform_device *pdev)
{
const struct intel_pinctrl_soc_data *soc_data;
+ struct intel_community_context *cctx;
struct intel_community *community;
struct device *dev = &pdev->dev;
struct acpi_device *adev = ACPI_COMPANION(dev);
struct intel_pinctrl *pctrl;
acpi_status status;
+ unsigned int i;
int ret, irq;
soc_data = intel_pinctrl_get_soc_data(pdev);
@@ -1663,6 +1727,10 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
if (!pctrl->context.communities)
return -ENOMEM;
+ cctx = &pctrl->context.communities[0];
+ for (i = 0; i < ARRAY_SIZE(cctx->intr_lines); i++)
+ cctx->intr_lines[i] = CHV_INVALID_HWIRQ;
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -1767,15 +1835,15 @@ static int chv_pinctrl_resume_noirq(struct device *dev)
val &= ~CHV_PADCTRL0_GPIORXSTATE;
if (ctx->padctrl0 != val) {
chv_writel(pctrl, desc->number, CHV_PADCTRL0, ctx->padctrl0);
- dev_dbg(pctrl->dev, "restored pin %2u ctrl0 0x%08x\n",
- desc->number, chv_readl(pctrl, desc->number, CHV_PADCTRL0));
+ dev_dbg(dev, "restored pin %2u ctrl0 0x%08x\n", desc->number,
+ chv_readl(pctrl, desc->number, CHV_PADCTRL0));
}
val = chv_readl(pctrl, desc->number, CHV_PADCTRL1);
if (ctx->padctrl1 != val) {
chv_writel(pctrl, desc->number, CHV_PADCTRL1, ctx->padctrl1);
- dev_dbg(pctrl->dev, "restored pin %2u ctrl1 0x%08x\n",
- desc->number, chv_readl(pctrl, desc->number, CHV_PADCTRL1));
+ dev_dbg(dev, "restored pin %2u ctrl1 0x%08x\n", desc->number,
+ chv_readl(pctrl, desc->number, CHV_PADCTRL1));
}
}
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.c b/drivers/pinctrl/mediatek/pinctrl-moore.c
index ad3b67163973..5bfaa84839c7 100644
--- a/drivers/pinctrl/mediatek/pinctrl-moore.c
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.c
@@ -519,7 +519,7 @@ static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
return mtk_eint_set_debounce(hw->eint, desc->eint.eint_n, debounce);
}
-static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
+static int mtk_build_gpiochip(struct mtk_pinctrl *hw)
{
struct gpio_chip *chip = &hw->chip;
int ret;
@@ -536,7 +536,6 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
chip->set_config = mtk_gpio_set_config;
chip->base = -1;
chip->ngpio = hw->soc->npins;
- chip->of_node = np;
chip->of_gpio_n_cells = 2;
ret = gpiochip_add_data(chip, hw);
@@ -550,7 +549,7 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
* Documentation/devicetree/bindings/gpio/gpio.txt on how to
* bind pinctrl and gpio drivers via the "gpio-ranges" property.
*/
- if (!of_find_property(np, "gpio-ranges", NULL)) {
+ if (!of_find_property(hw->dev->of_node, "gpio-ranges", NULL)) {
ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0,
chip->ngpio);
if (ret < 0) {
@@ -691,7 +690,7 @@ int mtk_moore_pinctrl_probe(struct platform_device *pdev,
"Failed to add EINT, but pinctrl still can work\n");
/* Build gpiochip should be after pinctrl_enable is done */
- err = mtk_build_gpiochip(hw, pdev->dev.of_node);
+ err = mtk_build_gpiochip(hw);
if (err) {
dev_err(&pdev->dev, "Failed to add gpio_chip\n");
return err;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index 53779822348d..e1ae3beb9f72 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -815,6 +815,8 @@ static int mtk_pinconf_bias_get_rsel(struct mtk_pinctrl *hw,
goto out;
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PD, &pd);
+ if (err)
+ goto out;
if (pu == 0 && pd == 0) {
*pullup = 0;
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index d4e02c5d74a8..f9f9110f2107 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -581,7 +581,7 @@ ssize_t mtk_pctrl_show_one_pin(struct mtk_pinctrl *hw,
{
int pinmux, pullup, pullen, len = 0, r1 = -1, r0 = -1, rsel = -1;
const struct mtk_pin_desc *desc;
- u32 try_all_type;
+ u32 try_all_type = 0;
if (gpio >= hw->soc->npins)
return -EINVAL;
@@ -895,7 +895,7 @@ static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
return mtk_eint_set_debounce(hw->eint, desc->eint.eint_n, debounce);
}
-static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
+static int mtk_build_gpiochip(struct mtk_pinctrl *hw)
{
struct gpio_chip *chip = &hw->chip;
int ret;
@@ -913,7 +913,6 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
chip->set_config = mtk_gpio_set_config;
chip->base = -1;
chip->ngpio = hw->soc->npins;
- chip->of_node = np;
chip->of_gpio_n_cells = 2;
ret = gpiochip_add_data(chip, hw);
@@ -1037,7 +1036,7 @@ int mtk_paris_pinctrl_probe(struct platform_device *pdev,
"Failed to add EINT, but pinctrl still can work\n");
/* Build gpiochip should be after pinctrl_enable is done */
- err = mtk_build_gpiochip(hw, pdev->dev.of_node);
+ err = mtk_build_gpiochip(hw);
if (err) {
dev_err(&pdev->dev, "Failed to add gpio_chip\n");
return err;
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index 5cb018f98800..08cad14042e2 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/string_helpers.h>
#include "../pinctrl-utils.h"
@@ -341,12 +342,12 @@ static int armada_37xx_pmx_set_by_name(struct pinctrl_dev *pctldev,
struct armada_37xx_pin_group *grp)
{
struct armada_37xx_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+ struct device *dev = info->dev;
unsigned int reg = SELECTION;
unsigned int mask = grp->reg_mask;
int func, val;
- dev_dbg(info->dev, "enable function %s group %s\n",
- name, grp->name);
+ dev_dbg(dev, "enable function %s group %s\n", name, grp->name);
func = match_string(grp->funcs, NB_FUNCS, name);
if (func < 0)
@@ -722,25 +723,22 @@ static unsigned int armada_37xx_irq_startup(struct irq_data *d)
static int armada_37xx_irqchip_register(struct platform_device *pdev,
struct armada_37xx_pinctrl *info)
{
- struct device_node *np = info->dev->of_node;
struct gpio_chip *gc = &info->gpio_chip;
struct irq_chip *irqchip = &info->irq_chip;
struct gpio_irq_chip *girq = &gc->irq;
struct device *dev = &pdev->dev;
- struct resource res;
+ struct device_node *np;
int ret = -ENODEV, i, nr_irq_parent;
/* Check if we have at least one gpio-controller child node */
- for_each_child_of_node(info->dev->of_node, np) {
+ for_each_child_of_node(dev->of_node, np) {
if (of_property_read_bool(np, "gpio-controller")) {
ret = 0;
break;
}
}
- if (ret) {
- dev_err(dev, "no gpio-controller child node\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "no gpio-controller child node\n");
nr_irq_parent = of_irq_count(np);
spin_lock_init(&info->irq_lock);
@@ -750,12 +748,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
return 0;
}
- if (of_address_to_resource(info->dev->of_node, 1, &res)) {
- dev_err(dev, "cannot find IO resource\n");
- return -ENOENT;
- }
-
- info->base = devm_ioremap_resource(info->dev, &res);
+ info->base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(info->base))
return PTR_ERR(info->base);
@@ -774,8 +767,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
* the chained irq with all of them.
*/
girq->num_parents = nr_irq_parent;
- girq->parents = devm_kcalloc(&pdev->dev, nr_irq_parent,
- sizeof(*girq->parents), GFP_KERNEL);
+ girq->parents = devm_kcalloc(dev, nr_irq_parent, sizeof(*girq->parents), GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
for (i = 0; i < nr_irq_parent; i++) {
@@ -794,11 +786,12 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
static int armada_37xx_gpiochip_register(struct platform_device *pdev,
struct armada_37xx_pinctrl *info)
{
+ struct device *dev = &pdev->dev;
struct device_node *np;
struct gpio_chip *gc;
int ret = -ENODEV;
- for_each_child_of_node(info->dev->of_node, np) {
+ for_each_child_of_node(dev->of_node, np) {
if (of_find_property(np, "gpio-controller", NULL)) {
ret = 0;
break;
@@ -811,7 +804,7 @@ static int armada_37xx_gpiochip_register(struct platform_device *pdev,
gc = &info->gpio_chip;
gc->ngpio = info->data->nr_pins;
- gc->parent = &pdev->dev;
+ gc->parent = dev;
gc->base = -1;
gc->of_node = np;
gc->label = info->data->name;
@@ -819,11 +812,8 @@ static int armada_37xx_gpiochip_register(struct platform_device *pdev,
ret = armada_37xx_irqchip_register(pdev, info);
if (ret)
return ret;
- ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
- if (ret)
- return ret;
- return 0;
+ return devm_gpiochip_add_data(dev, gc, info);
}
/**
@@ -874,13 +864,13 @@ static int armada_37xx_add_function(struct armada_37xx_pmx_func *funcs,
static int armada_37xx_fill_group(struct armada_37xx_pinctrl *info)
{
int n, num = 0, funcsize = info->data->nr_pins;
+ struct device *dev = info->dev;
for (n = 0; n < info->ngroups; n++) {
struct armada_37xx_pin_group *grp = &info->groups[n];
int i, j, f;
- grp->pins = devm_kcalloc(info->dev,
- grp->npins + grp->extra_npins,
+ grp->pins = devm_kcalloc(dev, grp->npins + grp->extra_npins,
sizeof(*grp->pins),
GFP_KERNEL);
if (!grp->pins)
@@ -898,8 +888,7 @@ static int armada_37xx_fill_group(struct armada_37xx_pinctrl *info)
ret = armada_37xx_add_function(info->funcs, &funcsize,
grp->funcs[f]);
if (ret == -EOVERFLOW)
- dev_err(info->dev,
- "More functions than pins(%d)\n",
+ dev_err(dev, "More functions than pins(%d)\n",
info->data->nr_pins);
if (ret < 0)
continue;
@@ -913,7 +902,7 @@ static int armada_37xx_fill_group(struct armada_37xx_pinctrl *info)
}
/**
- * armada_37xx_fill_funcs() - complete the funcs array
+ * armada_37xx_fill_func() - complete the funcs array
* @info: info driver instance
*
* Based on the data available from the armada_37xx_pin_group array
@@ -925,6 +914,7 @@ static int armada_37xx_fill_group(struct armada_37xx_pinctrl *info)
static int armada_37xx_fill_func(struct armada_37xx_pinctrl *info)
{
struct armada_37xx_pmx_func *funcs = info->funcs;
+ struct device *dev = info->dev;
int n;
for (n = 0; n < info->nfuncs; n++) {
@@ -932,8 +922,7 @@ static int armada_37xx_fill_func(struct armada_37xx_pinctrl *info)
const char **groups;
int g;
- funcs[n].groups = devm_kcalloc(info->dev,
- funcs[n].ngroups,
+ funcs[n].groups = devm_kcalloc(dev, funcs[n].ngroups,
sizeof(*(funcs[n].groups)),
GFP_KERNEL);
if (!funcs[n].groups)
@@ -962,6 +951,8 @@ static int armada_37xx_pinctrl_register(struct platform_device *pdev,
const struct armada_37xx_pin_data *pin_data = info->data;
struct pinctrl_desc *ctrldesc = &info->pctl;
struct pinctrl_pin_desc *pindesc, *pdesc;
+ struct device *dev = &pdev->dev;
+ char **pin_names;
int pin, ret;
info->groups = pin_data->groups;
@@ -973,20 +964,21 @@ static int armada_37xx_pinctrl_register(struct platform_device *pdev,
ctrldesc->pmxops = &armada_37xx_pmx_ops;
ctrldesc->confops = &armada_37xx_pinconf_ops;
- pindesc = devm_kcalloc(&pdev->dev,
- pin_data->nr_pins, sizeof(*pindesc),
- GFP_KERNEL);
+ pindesc = devm_kcalloc(dev, pin_data->nr_pins, sizeof(*pindesc), GFP_KERNEL);
if (!pindesc)
return -ENOMEM;
ctrldesc->pins = pindesc;
ctrldesc->npins = pin_data->nr_pins;
+ pin_names = devm_kasprintf_strarray(dev, pin_data->name, pin_data->nr_pins);
+ if (IS_ERR(pin_names))
+ return PTR_ERR(pin_names);
+
pdesc = pindesc;
for (pin = 0; pin < pin_data->nr_pins; pin++) {
pdesc->number = pin;
- pdesc->name = kasprintf(GFP_KERNEL, "%s-%d",
- pin_data->name, pin);
+ pdesc->name = pin_names[pin];
pdesc++;
}
@@ -994,14 +986,10 @@ static int armada_37xx_pinctrl_register(struct platform_device *pdev,
* we allocate functions for number of pins and hope there are
* fewer unique functions than pins available
*/
- info->funcs = devm_kcalloc(&pdev->dev,
- pin_data->nr_pins,
- sizeof(struct armada_37xx_pmx_func),
- GFP_KERNEL);
+ info->funcs = devm_kcalloc(dev, pin_data->nr_pins, sizeof(*info->funcs), GFP_KERNEL);
if (!info->funcs)
return -ENOMEM;
-
ret = armada_37xx_fill_group(info);
if (ret)
return ret;
@@ -1010,11 +998,9 @@ static int armada_37xx_pinctrl_register(struct platform_device *pdev,
if (ret)
return ret;
- info->pctl_dev = devm_pinctrl_register(&pdev->dev, ctrldesc, info);
- if (IS_ERR(info->pctl_dev)) {
- dev_err(&pdev->dev, "could not register pinctrl driver\n");
- return PTR_ERR(info->pctl_dev);
- }
+ info->pctl_dev = devm_pinctrl_register(dev, ctrldesc, info);
+ if (IS_ERR(info->pctl_dev))
+ return dev_err_probe(dev, PTR_ERR(info->pctl_dev), "could not register pinctrl driver\n");
return 0;
}
@@ -1143,18 +1129,15 @@ static int __init armada_37xx_pinctrl_probe(struct platform_device *pdev)
struct regmap *regmap;
int ret;
- info = devm_kzalloc(dev, sizeof(struct armada_37xx_pinctrl),
- GFP_KERNEL);
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = dev;
regmap = syscon_node_to_regmap(np);
- if (IS_ERR(regmap)) {
- dev_err(&pdev->dev, "cannot get regmap\n");
- return PTR_ERR(regmap);
- }
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "cannot get regmap\n");
info->regmap = regmap;
info->data = of_device_get_match_data(dev);
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 22e8d4c4040e..f8edcc88ac01 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -46,6 +46,7 @@ static const struct pin_config_item conf_items[] = {
PCONFDUMP(PIN_CONFIG_MODE_LOW_POWER, "pin low power", "mode", true),
PCONFDUMP(PIN_CONFIG_OUTPUT_ENABLE, "output enabled", NULL, false),
PCONFDUMP(PIN_CONFIG_OUTPUT, "pin output", "level", true),
+ PCONFDUMP(PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS, "output impedance", "ohms", true),
PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector", true),
PCONFDUMP(PIN_CONFIG_SLEEP_HARDWARE_STATE, "sleep hardware state", NULL, false),
PCONFDUMP(PIN_CONFIG_SLEW_RATE, "slew rate", NULL, true),
@@ -179,6 +180,7 @@ static const struct pinconf_generic_params dt_params[] = {
{ "output-disable", PIN_CONFIG_OUTPUT_ENABLE, 0 },
{ "output-enable", PIN_CONFIG_OUTPUT_ENABLE, 1 },
{ "output-high", PIN_CONFIG_OUTPUT, 1, },
+ { "output-impedance-ohms", PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS, 0 },
{ "output-low", PIN_CONFIG_OUTPUT, 0, },
{ "power-source", PIN_CONFIG_POWER_SOURCE, 0 },
{ "sleep-hardware-state", PIN_CONFIG_SLEEP_HARDWARE_STATE, 0 },
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index ecab9064a845..1a7d686494ff 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -1009,9 +1009,6 @@ static int amd_gpio_probe(struct platform_device *pdev)
gpio_dev->gc.owner = THIS_MODULE;
gpio_dev->gc.parent = &pdev->dev;
gpio_dev->gc.ngpio = resource_size(res) / 4;
-#if defined(CONFIG_OF_GPIO)
- gpio_dev->gc.of_node = pdev->dev.of_node;
-#endif
gpio_dev->hwbank_num = gpio_dev->gc.ngpio / 64;
gpio_dev->groups = kerncz_groups;
diff --git a/drivers/pinctrl/pinctrl-apple-gpio.c b/drivers/pinctrl/pinctrl-apple-gpio.c
index a7861079a650..72f4dd2466e1 100644
--- a/drivers/pinctrl/pinctrl-apple-gpio.c
+++ b/drivers/pinctrl/pinctrl-apple-gpio.c
@@ -11,6 +11,7 @@
*/
#include <dt-bindings/pinctrl/apple.h>
+#include <linux/bits.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -36,7 +37,7 @@ struct apple_gpio_pinctrl {
struct pinctrl_desc pinctrl_desc;
struct gpio_chip gpio_chip;
struct irq_chip irq_chip;
- u8 irqgrps[0];
+ u8 irqgrps[];
};
#define REG_GPIO(x) (4 * (x))
@@ -70,31 +71,35 @@ struct regmap_config regmap_config = {
.cache_type = REGCACHE_FLAT,
.max_register = 512 * sizeof(u32),
.num_reg_defaults_raw = 512,
- .use_relaxed_mmio = true
+ .use_relaxed_mmio = true,
};
-// No locking needed to mask/unmask IRQs as the interrupt mode is per pin-register.
+/* No locking needed to mask/unmask IRQs as the interrupt mode is per pin-register. */
static void apple_gpio_set_reg(struct apple_gpio_pinctrl *pctl,
- unsigned int pin, u32 mask, u32 value)
+ unsigned int pin, u32 mask, u32 value)
{
regmap_update_bits(pctl->map, REG_GPIO(pin), mask, value);
}
-static uint32_t apple_gpio_get_reg(struct apple_gpio_pinctrl *pctl,
- unsigned int pin)
+static u32 apple_gpio_get_reg(struct apple_gpio_pinctrl *pctl,
+ unsigned int pin)
{
- unsigned int val = 0;
+ int ret;
+ u32 val;
+
+ ret = regmap_read(pctl->map, REG_GPIO(pin), &val);
+ if (ret)
+ return 0;
- regmap_read(pctl->map, REG_GPIO(pin), &val);
return val;
}
/* Pin controller functions */
static int apple_gpio_dt_node_to_map(struct pinctrl_dev *pctldev,
- struct device_node *node,
- struct pinctrl_map **map,
- unsigned *num_maps)
+ struct device_node *node,
+ struct pinctrl_map **map,
+ unsigned *num_maps)
{
unsigned reserved_maps;
struct apple_gpio_pinctrl *pctl;
@@ -114,13 +119,12 @@ static int apple_gpio_dt_node_to_map(struct pinctrl_dev *pctldev,
dev_err(pctl->dev,
"missing or empty pinmux property in node %pOFn.\n",
node);
- return ret;
+ return ret ? ret : -EINVAL;
}
num_pins = ret;
- ret = pinctrl_utils_reserve_map(pctldev, map, &reserved_maps, num_maps,
- num_pins);
+ ret = pinctrl_utils_reserve_map(pctldev, map, &reserved_maps, num_maps, num_pins);
if (ret)
return ret;
@@ -138,11 +142,10 @@ static int apple_gpio_dt_node_to_map(struct pinctrl_dev *pctldev,
}
group_name = pinctrl_generic_get_group_name(pctldev, pin);
- function_name =
- pinmux_generic_get_function_name(pctl->pctldev, func);
+ function_name = pinmux_generic_get_function_name(pctl->pctldev, func);
ret = pinctrl_utils_add_map_mux(pctl->pctldev, map,
- &reserved_maps, num_maps,
- group_name, function_name);
+ &reserved_maps, num_maps,
+ group_name, function_name);
if (ret)
goto free_map;
}
@@ -165,7 +168,7 @@ static const struct pinctrl_ops apple_gpio_pinctrl_ops = {
/* Pin multiplexer functions */
static int apple_gpio_pinmux_set(struct pinctrl_dev *pctldev, unsigned func,
- unsigned group)
+ unsigned group)
{
struct apple_gpio_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
@@ -186,14 +189,14 @@ static const struct pinmux_ops apple_gpio_pinmux_ops = {
/* GPIO chip functions */
-static int apple_gpio_get_direction(struct gpio_chip *chip,
- unsigned int offset)
+static int apple_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct apple_gpio_pinctrl *pctl = gpiochip_get_data(chip);
unsigned int reg = apple_gpio_get_reg(pctl, offset);
- return (FIELD_GET(REG_GPIOx_MODE, reg) == REG_GPIOx_OUT) ?
- GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+ if (FIELD_GET(REG_GPIOx_MODE, reg) == REG_GPIOx_OUT)
+ return GPIO_LINE_DIRECTION_OUT;
+ return GPIO_LINE_DIRECTION_IN;
}
static int apple_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -211,17 +214,14 @@ static int apple_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(reg & REG_GPIOx_DATA);
}
-static void apple_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static void apple_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct apple_gpio_pinctrl *pctl = gpiochip_get_data(chip);
- apple_gpio_set_reg(pctl, offset, REG_GPIOx_DATA,
- value ? REG_GPIOx_DATA : 0);
+ apple_gpio_set_reg(pctl, offset, REG_GPIOx_DATA, value ? REG_GPIOx_DATA : 0);
}
-static int apple_gpio_direction_input(struct gpio_chip *chip,
- unsigned int offset)
+static int apple_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
struct apple_gpio_pinctrl *pctl = gpiochip_get_data(chip);
@@ -234,7 +234,7 @@ static int apple_gpio_direction_input(struct gpio_chip *chip,
}
static int apple_gpio_direction_output(struct gpio_chip *chip,
- unsigned int offset, int value)
+ unsigned int offset, int value)
{
struct apple_gpio_pinctrl *pctl = gpiochip_get_data(chip);
@@ -249,13 +249,10 @@ static int apple_gpio_direction_output(struct gpio_chip *chip,
static void apple_gpio_irq_ack(struct irq_data *data)
{
- struct apple_gpio_pinctrl *pctl =
- gpiochip_get_data(irq_data_get_irq_chip_data(data));
- unsigned int irqgrp =
- FIELD_GET(REG_GPIOx_GRP, apple_gpio_get_reg(pctl, data->hwirq));
+ struct apple_gpio_pinctrl *pctl = gpiochip_get_data(irq_data_get_irq_chip_data(data));
+ unsigned int irqgrp = FIELD_GET(REG_GPIOx_GRP, apple_gpio_get_reg(pctl, data->hwirq));
- writel(BIT(data->hwirq & 31),
- pctl->base + REG_IRQ(irqgrp, data->hwirq));
+ writel(BIT(data->hwirq % 32), pctl->base + REG_IRQ(irqgrp, data->hwirq));
}
static unsigned int apple_gpio_irq_type(unsigned int type)
@@ -278,20 +275,19 @@ static unsigned int apple_gpio_irq_type(unsigned int type)
static void apple_gpio_irq_mask(struct irq_data *data)
{
- struct apple_gpio_pinctrl *pctl =
- gpiochip_get_data(irq_data_get_irq_chip_data(data));
+ struct apple_gpio_pinctrl *pctl = gpiochip_get_data(irq_data_get_irq_chip_data(data));
+
apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE,
- FIELD_PREP(REG_GPIOx_MODE, REG_GPIOx_IN_IRQ_OFF));
+ FIELD_PREP(REG_GPIOx_MODE, REG_GPIOx_IN_IRQ_OFF));
}
static void apple_gpio_irq_unmask(struct irq_data *data)
{
- struct apple_gpio_pinctrl *pctl =
- gpiochip_get_data(irq_data_get_irq_chip_data(data));
+ struct apple_gpio_pinctrl *pctl = gpiochip_get_data(irq_data_get_irq_chip_data(data));
unsigned int irqtype = apple_gpio_irq_type(irqd_get_trigger_type(data));
apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE,
- FIELD_PREP(REG_GPIOx_MODE, irqtype));
+ FIELD_PREP(REG_GPIOx_MODE, irqtype));
}
static unsigned int apple_gpio_irq_startup(struct irq_data *data)
@@ -300,7 +296,7 @@ static unsigned int apple_gpio_irq_startup(struct irq_data *data)
struct apple_gpio_pinctrl *pctl = gpiochip_get_data(chip);
apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_GRP,
- FIELD_PREP(REG_GPIOx_GRP, 0));
+ FIELD_PREP(REG_GPIOx_GRP, 0));
apple_gpio_direction_input(chip, data->hwirq);
apple_gpio_irq_unmask(data);
@@ -308,18 +304,16 @@ static unsigned int apple_gpio_irq_startup(struct irq_data *data)
return 0;
}
-static int apple_gpio_irq_set_type(struct irq_data *data,
- unsigned int type)
+static int apple_gpio_irq_set_type(struct irq_data *data, unsigned int type)
{
- struct apple_gpio_pinctrl *pctl =
- gpiochip_get_data(irq_data_get_irq_chip_data(data));
+ struct apple_gpio_pinctrl *pctl = gpiochip_get_data(irq_data_get_irq_chip_data(data));
unsigned int irqtype = apple_gpio_irq_type(type);
if (irqtype == REG_GPIOx_IN_IRQ_OFF)
return -EINVAL;
apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE,
- FIELD_PREP(REG_GPIOx_MODE, irqtype));
+ FIELD_PREP(REG_GPIOx_MODE, irqtype));
if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_handler_locked(data, handle_level_irq);
@@ -366,10 +360,6 @@ static int apple_gpio_register(struct apple_gpio_pinctrl *pctl)
void **irq_data = NULL;
int ret;
- if (!of_property_read_bool(pctl->dev->of_node, "gpio-controller"))
- return dev_err_probe(pctl->dev, -ENODEV,
- "No gpio-controller property\n");
-
pctl->irq_chip = apple_gpio_irqchip;
pctl->gpio_chip.label = dev_name(pctl->dev);
@@ -383,7 +373,6 @@ static int apple_gpio_register(struct apple_gpio_pinctrl *pctl)
pctl->gpio_chip.base = -1;
pctl->gpio_chip.ngpio = pctl->pinctrl_desc.npins;
pctl->gpio_chip.parent = pctl->dev;
- pctl->gpio_chip.of_node = pctl->dev->of_node;
if (girq->num_parents) {
int i;
@@ -398,14 +387,13 @@ static int apple_gpio_register(struct apple_gpio_pinctrl *pctl)
GFP_KERNEL);
if (!girq->parents || !irq_data) {
ret = -ENOMEM;
- goto out;
+ goto out_free_irq_data;
}
for (i = 0; i < girq->num_parents; i++) {
- ret = platform_get_irq(to_platform_device(pctl->dev),
- i);
+ ret = platform_get_irq(to_platform_device(pctl->dev), i);
if (ret < 0)
- goto out;
+ goto out_free_irq_data;
girq->parents[i] = ret;
pctl->irqgrps[i] = i;
@@ -419,7 +407,8 @@ static int apple_gpio_register(struct apple_gpio_pinctrl *pctl)
}
ret = devm_gpiochip_add_data(pctl->dev, &pctl->gpio_chip, pctl);
-out:
+
+out_free_irq_data:
kfree(girq->parents);
kfree(irq_data);
diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c
index 13c193156363..4313756b52e6 100644
--- a/drivers/pinctrl/pinctrl-as3722.c
+++ b/drivers/pinctrl/pinctrl-as3722.c
@@ -23,19 +23,20 @@
#include <linux/delay.h>
#include <linux/gpio/driver.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mfd/as3722.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinmux.h>
-#include <linux/pm.h>
-#include <linux/slab.h>
#include "core.h"
#include "pinconf.h"
@@ -551,12 +552,13 @@ static int as3722_pinctrl_probe(struct platform_device *pdev)
struct as3722_pctrl_info *as_pci;
int ret;
+ device_set_node(&pdev->dev, dev_fwnode(pdev->dev.parent));
+
as_pci = devm_kzalloc(&pdev->dev, sizeof(*as_pci), GFP_KERNEL);
if (!as_pci)
return -ENOMEM;
as_pci->dev = &pdev->dev;
- as_pci->dev->of_node = pdev->dev.parent->of_node;
as_pci->as3722 = dev_get_drvdata(pdev->dev.parent);
platform_set_drvdata(pdev, as_pci);
@@ -578,7 +580,6 @@ static int as3722_pinctrl_probe(struct platform_device *pdev)
as_pci->gpio_chip = as3722_gpio_chip;
as_pci->gpio_chip.parent = &pdev->dev;
- as_pci->gpio_chip.of_node = pdev->dev.parent->of_node;
ret = gpiochip_add_data(&as_pci->gpio_chip, as_pci);
if (ret < 0) {
dev_err(&pdev->dev, "Couldn't register gpiochip, %d\n", ret);
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 03c32b2c5d30..fafd1f55cba7 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -1136,7 +1136,6 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
}
atmel_pioctrl->gpio_chip = &atmel_gpio_chip;
- atmel_pioctrl->gpio_chip->of_node = dev->of_node;
atmel_pioctrl->gpio_chip->ngpio = atmel_pioctrl->npins;
atmel_pioctrl->gpio_chip->label = dev_name(dev);
atmel_pioctrl->gpio_chip->parent = dev;
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 6022496bb6a9..d91a010e65f5 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1868,7 +1868,6 @@ static int at91_gpio_probe(struct platform_device *pdev)
at91_chip->chip = at91_gpio_template;
chip = &at91_chip->chip;
- chip->of_node = np;
chip->label = dev_name(&pdev->dev);
chip->parent = &pdev->dev;
chip->owner = THIS_MODULE;
diff --git a/drivers/pinctrl/pinctrl-da9062.c b/drivers/pinctrl/pinctrl-da9062.c
index 1c08579f0198..0e0ac3f3ffef 100644
--- a/drivers/pinctrl/pinctrl-da9062.c
+++ b/drivers/pinctrl/pinctrl-da9062.c
@@ -14,6 +14,7 @@
#include <linux/bits.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/gpio/driver.h>
@@ -256,6 +257,8 @@ static int da9062_pctl_probe(struct platform_device *pdev)
struct da9062_pctl *pctl;
int i;
+ device_set_node(&pdev->dev, dev_fwnode(pdev->dev.parent));
+
pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
if (!pctl)
return -ENOMEM;
@@ -277,9 +280,6 @@ static int da9062_pctl_probe(struct platform_device *pdev)
pctl->gc = reference_gc;
pctl->gc.label = dev_name(&pdev->dev);
pctl->gc.parent = &pdev->dev;
-#ifdef CONFIG_OF_GPIO
- pctl->gc.of_node = parent->of_node;
-#endif
platform_set_drvdata(pdev, pctl);
diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c
index ff702cfbaa28..cc3546fc4610 100644
--- a/drivers/pinctrl/pinctrl-digicolor.c
+++ b/drivers/pinctrl/pinctrl-digicolor.c
@@ -233,7 +233,7 @@ static void dc_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
spin_unlock_irqrestore(&pmap->lock, flags);
}
-static int dc_gpiochip_add(struct dc_pinmap *pmap, struct device_node *np)
+static int dc_gpiochip_add(struct dc_pinmap *pmap)
{
struct gpio_chip *chip = &pmap->chip;
int ret;
@@ -248,7 +248,6 @@ static int dc_gpiochip_add(struct dc_pinmap *pmap, struct device_node *np)
chip->set = dc_gpio_set;
chip->base = -1;
chip->ngpio = PINS_COUNT;
- chip->of_node = np;
chip->of_gpio_n_cells = 2;
spin_lock_init(&pmap->lock);
@@ -326,7 +325,7 @@ static int dc_pinctrl_probe(struct platform_device *pdev)
return PTR_ERR(pmap->pctl);
}
- return dc_gpiochip_add(pmap, pdev->dev.of_node);
+ return dc_gpiochip_add(pmap);
}
static const struct of_device_id dc_pinctrl_ids[] = {
diff --git a/drivers/pinctrl/pinctrl-keembay.c b/drivers/pinctrl/pinctrl-keembay.c
index 2bce563d5b8b..152c35bce8ec 100644
--- a/drivers/pinctrl/pinctrl-keembay.c
+++ b/drivers/pinctrl/pinctrl-keembay.c
@@ -1555,58 +1555,42 @@ static int keembay_pinctrl_reg(struct keembay_pinctrl *kpc, struct device *dev)
}
static int keembay_add_functions(struct keembay_pinctrl *kpc,
- struct function_desc *function)
+ struct function_desc *functions)
{
unsigned int i;
/* Assign the groups for each function */
- for (i = 0; i < kpc->npins; i++) {
- const struct pinctrl_pin_desc *pdesc = keembay_pins + i;
- struct keembay_mux_desc *mux = pdesc->drv_data;
-
- while (mux->name) {
- struct function_desc *func;
- const char **grp;
- size_t grp_size;
- u32 j, grp_num;
-
- for (j = 0; j < kpc->nfuncs; j++) {
- if (!strcmp(mux->name, function[j].name))
- break;
- }
-
- if (j == kpc->nfuncs)
- return -EINVAL;
-
- func = function + j;
- grp_num = func->num_group_names;
- grp_size = sizeof(*func->group_names);
-
- if (!func->group_names) {
- func->group_names = devm_kcalloc(kpc->dev,
- grp_num,
- grp_size,
- GFP_KERNEL);
- if (!func->group_names)
- return -ENOMEM;
+ for (i = 0; i < kpc->nfuncs; i++) {
+ struct function_desc *func = &functions[i];
+ const char **group_names;
+ unsigned int grp_idx = 0;
+ int j;
+
+ group_names = devm_kcalloc(kpc->dev, func->num_group_names,
+ sizeof(*group_names), GFP_KERNEL);
+ if (!group_names)
+ return -ENOMEM;
+
+ for (j = 0; j < kpc->npins; j++) {
+ const struct pinctrl_pin_desc *pdesc = &keembay_pins[j];
+ struct keembay_mux_desc *mux;
+
+ for (mux = pdesc->drv_data; mux->name; mux++) {
+ if (!strcmp(mux->name, func->name))
+ group_names[grp_idx++] = pdesc->name;
}
-
- grp = func->group_names;
- while (*grp)
- grp++;
-
- *grp = pdesc->name;
- mux++;
}
+
+ func->group_names = group_names;
}
/* Add all functions */
for (i = 0; i < kpc->nfuncs; i++) {
pinmux_generic_add_function(kpc->pctrl,
- function[i].name,
- function[i].group_names,
- function[i].num_group_names,
- function[i].data);
+ functions[i].name,
+ functions[i].group_names,
+ functions[i].num_group_names,
+ functions[i].data);
}
return 0;
@@ -1617,37 +1601,38 @@ static int keembay_build_functions(struct keembay_pinctrl *kpc)
struct function_desc *keembay_funcs, *new_funcs;
int i;
- /* Allocate total number of functions */
+ /*
+ * Allocate maximum possible number of functions. Assume every pin
+ * being part of 8 (hw maximum) globally unique muxes.
+ */
kpc->nfuncs = 0;
keembay_funcs = kcalloc(kpc->npins * 8, sizeof(*keembay_funcs), GFP_KERNEL);
if (!keembay_funcs)
return -ENOMEM;
- /* Find total number of functions and each's properties */
+ /* Setup 1 function for each unique mux */
for (i = 0; i < kpc->npins; i++) {
const struct pinctrl_pin_desc *pdesc = keembay_pins + i;
- struct keembay_mux_desc *mux = pdesc->drv_data;
+ struct keembay_mux_desc *mux;
- while (mux->name) {
- struct function_desc *fdesc = keembay_funcs;
+ for (mux = pdesc->drv_data; mux->name; mux++) {
+ struct function_desc *fdesc;
- while (fdesc->name) {
+ /* Check if we already have function for this mux */
+ for (fdesc = keembay_funcs; fdesc->name; fdesc++) {
if (!strcmp(mux->name, fdesc->name)) {
fdesc->num_group_names++;
break;
}
-
- fdesc++;
}
+ /* Setup new function for this mux we didn't see before */
if (!fdesc->name) {
fdesc->name = mux->name;
fdesc->num_group_names = 1;
fdesc->data = &mux->mode;
kpc->nfuncs++;
}
-
- mux++;
}
}
diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c
index c643ed43ebbf..1ee94574f0af 100644
--- a/drivers/pinctrl/pinctrl-max77620.c
+++ b/drivers/pinctrl/pinctrl-max77620.c
@@ -10,14 +10,16 @@
*/
#include <linux/mfd/max77620.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinmux.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
#include "core.h"
#include "pinconf.h"
@@ -551,12 +553,13 @@ static int max77620_pinctrl_probe(struct platform_device *pdev)
struct max77620_pctrl_info *mpci;
int i;
+ device_set_node(&pdev->dev, dev_fwnode(pdev->dev.parent));
+
mpci = devm_kzalloc(&pdev->dev, sizeof(*mpci), GFP_KERNEL);
if (!mpci)
return -ENOMEM;
mpci->dev = &pdev->dev;
- mpci->dev->of_node = pdev->dev.parent->of_node;
mpci->rmap = max77620->rmap;
mpci->pins = max77620_pins_desc;
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index bccebe43dd6a..695236636d05 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -551,7 +551,6 @@ int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
mcp->chip.set = mcp23s08_set;
#ifdef CONFIG_OF_GPIO
mcp->chip.of_gpio_n_cells = 2;
- mcp->chip.of_node = dev->of_node;
#endif
mcp->chip.base = base;
diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
index 78765faa245a..8e081c90bdb2 100644
--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
+++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
@@ -17,6 +17,7 @@
#include <linux/pinctrl/pinmux.h>
#include <linux/platform_device.h>
#include <linux/property.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include "core.h"
@@ -113,7 +114,7 @@ struct sgpio_priv {
u32 bitcount;
u32 ports;
u32 clock;
- u32 __iomem *regs;
+ struct regmap *regs;
const struct sgpio_properties *properties;
};
@@ -134,31 +135,42 @@ static inline int sgpio_addr_to_pin(struct sgpio_priv *priv, int port, int bit)
return bit + port * priv->bitcount;
}
-static inline u32 sgpio_readl(struct sgpio_priv *priv, u32 rno, u32 off)
+static inline u32 sgpio_get_addr(struct sgpio_priv *priv, u32 rno, u32 off)
+{
+ return priv->properties->regoff[rno] + off;
+}
+
+static u32 sgpio_readl(struct sgpio_priv *priv, u32 rno, u32 off)
{
- u32 __iomem *reg = &priv->regs[priv->properties->regoff[rno] + off];
+ u32 addr = sgpio_get_addr(priv, rno, off);
+ u32 val = 0;
+ int ret;
- return readl(reg);
+ ret = regmap_read(priv->regs, addr, &val);
+ WARN_ONCE(ret, "error reading sgpio reg %d\n", ret);
+
+ return val;
}
-static inline void sgpio_writel(struct sgpio_priv *priv,
+static void sgpio_writel(struct sgpio_priv *priv,
u32 val, u32 rno, u32 off)
{
- u32 __iomem *reg = &priv->regs[priv->properties->regoff[rno] + off];
+ u32 addr = sgpio_get_addr(priv, rno, off);
+ int ret;
- writel(val, reg);
+ ret = regmap_write(priv->regs, addr, val);
+ WARN_ONCE(ret, "error writing sgpio reg %d\n", ret);
}
static inline void sgpio_clrsetbits(struct sgpio_priv *priv,
u32 rno, u32 off, u32 clear, u32 set)
{
- u32 __iomem *reg = &priv->regs[priv->properties->regoff[rno] + off];
- u32 val = readl(reg);
+ u32 val = sgpio_readl(priv, rno, off);
val &= ~clear;
val |= set;
- writel(val, reg);
+ sgpio_writel(priv, val, rno, off);
}
static inline void sgpio_configure_bitstream(struct sgpio_priv *priv)
@@ -807,7 +819,13 @@ static int microchip_sgpio_probe(struct platform_device *pdev)
struct reset_control *reset;
struct sgpio_priv *priv;
struct clk *clk;
+ u32 __iomem *regs;
u32 val;
+ struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ };
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -832,9 +850,14 @@ static int microchip_sgpio_probe(struct platform_device *pdev)
return -EINVAL;
}
- priv->regs = devm_platform_ioremap_resource(pdev, 0);
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ priv->regs = devm_regmap_init_mmio(dev, regs, &regmap_config);
if (IS_ERR(priv->regs))
return PTR_ERR(priv->regs);
+
priv->properties = device_get_match_data(dev);
priv->in.is_input = true;
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 0a36ec8775a3..fc969208d904 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -57,16 +57,71 @@ enum {
#define OCELOT_FUNC_PER_PIN 4
enum {
+ FUNC_CAN0_a,
+ FUNC_CAN0_b,
+ FUNC_CAN1,
FUNC_NONE,
+ FUNC_FC0_a,
+ FUNC_FC0_b,
+ FUNC_FC0_c,
+ FUNC_FC1_a,
+ FUNC_FC1_b,
+ FUNC_FC1_c,
+ FUNC_FC2_a,
+ FUNC_FC2_b,
+ FUNC_FC3_a,
+ FUNC_FC3_b,
+ FUNC_FC3_c,
+ FUNC_FC4_a,
+ FUNC_FC4_b,
+ FUNC_FC4_c,
+ FUNC_FC_SHRD0,
+ FUNC_FC_SHRD1,
+ FUNC_FC_SHRD2,
+ FUNC_FC_SHRD3,
+ FUNC_FC_SHRD4,
+ FUNC_FC_SHRD5,
+ FUNC_FC_SHRD6,
+ FUNC_FC_SHRD7,
+ FUNC_FC_SHRD8,
+ FUNC_FC_SHRD9,
+ FUNC_FC_SHRD10,
+ FUNC_FC_SHRD11,
+ FUNC_FC_SHRD12,
+ FUNC_FC_SHRD13,
+ FUNC_FC_SHRD14,
+ FUNC_FC_SHRD15,
+ FUNC_FC_SHRD16,
+ FUNC_FC_SHRD17,
+ FUNC_FC_SHRD18,
+ FUNC_FC_SHRD19,
+ FUNC_FC_SHRD20,
FUNC_GPIO,
+ FUNC_IB_TRG_a,
+ FUNC_IB_TRG_b,
+ FUNC_IB_TRG_c,
FUNC_IRQ0,
+ FUNC_IRQ_IN_a,
+ FUNC_IRQ_IN_b,
+ FUNC_IRQ_IN_c,
FUNC_IRQ0_IN,
+ FUNC_IRQ_OUT_a,
+ FUNC_IRQ_OUT_b,
+ FUNC_IRQ_OUT_c,
FUNC_IRQ0_OUT,
FUNC_IRQ1,
FUNC_IRQ1_IN,
FUNC_IRQ1_OUT,
FUNC_EXT_IRQ,
FUNC_MIIM,
+ FUNC_MIIM_a,
+ FUNC_MIIM_b,
+ FUNC_MIIM_c,
+ FUNC_MIIM_Sa,
+ FUNC_MIIM_Sb,
+ FUNC_OB_TRG,
+ FUNC_OB_TRG_a,
+ FUNC_OB_TRG_b,
FUNC_PHY_LED,
FUNC_PCI_WAKE,
FUNC_MD,
@@ -74,65 +129,174 @@ enum {
FUNC_PTP1,
FUNC_PTP2,
FUNC_PTP3,
+ FUNC_PTPSYNC_1,
+ FUNC_PTPSYNC_2,
+ FUNC_PTPSYNC_3,
+ FUNC_PTPSYNC_4,
+ FUNC_PTPSYNC_5,
+ FUNC_PTPSYNC_6,
+ FUNC_PTPSYNC_7,
FUNC_PWM,
+ FUNC_QSPI1,
+ FUNC_QSPI2,
+ FUNC_R,
+ FUNC_RECO_a,
+ FUNC_RECO_b,
FUNC_RECO_CLK,
+ FUNC_SD,
FUNC_SFP,
+ FUNC_SFP_SD,
FUNC_SG0,
FUNC_SG1,
FUNC_SG2,
+ FUNC_SGPIO_a,
+ FUNC_SGPIO_b,
FUNC_SI,
FUNC_SI2,
FUNC_TACHO,
+ FUNC_TACHO_a,
+ FUNC_TACHO_b,
FUNC_TWI,
FUNC_TWI2,
FUNC_TWI3,
FUNC_TWI_SCL_M,
+ FUNC_TWI_SLC_GATE,
+ FUNC_TWI_SLC_GATE_AD,
FUNC_UART,
FUNC_UART2,
FUNC_UART3,
+ FUNC_USB_H_a,
+ FUNC_USB_H_b,
+ FUNC_USB_H_c,
+ FUNC_USB_S_a,
+ FUNC_USB_S_b,
+ FUNC_USB_S_c,
FUNC_PLL_STAT,
FUNC_EMMC,
+ FUNC_EMMC_SD,
FUNC_REF_CLK,
FUNC_RCVRD_CLK,
FUNC_MAX
};
static const char *const ocelot_function_names[] = {
+ [FUNC_CAN0_a] = "can0_a",
+ [FUNC_CAN0_b] = "can0_b",
+ [FUNC_CAN1] = "can1",
[FUNC_NONE] = "none",
+ [FUNC_FC0_a] = "fc0_a",
+ [FUNC_FC0_b] = "fc0_b",
+ [FUNC_FC0_c] = "fc0_c",
+ [FUNC_FC1_a] = "fc1_a",
+ [FUNC_FC1_b] = "fc1_b",
+ [FUNC_FC1_c] = "fc1_c",
+ [FUNC_FC2_a] = "fc2_a",
+ [FUNC_FC2_b] = "fc2_b",
+ [FUNC_FC3_a] = "fc3_a",
+ [FUNC_FC3_b] = "fc3_b",
+ [FUNC_FC3_c] = "fc3_c",
+ [FUNC_FC4_a] = "fc4_a",
+ [FUNC_FC4_b] = "fc4_b",
+ [FUNC_FC4_c] = "fc4_c",
+ [FUNC_FC_SHRD0] = "fc_shrd0",
+ [FUNC_FC_SHRD1] = "fc_shrd1",
+ [FUNC_FC_SHRD2] = "fc_shrd2",
+ [FUNC_FC_SHRD3] = "fc_shrd3",
+ [FUNC_FC_SHRD4] = "fc_shrd4",
+ [FUNC_FC_SHRD5] = "fc_shrd5",
+ [FUNC_FC_SHRD6] = "fc_shrd6",
+ [FUNC_FC_SHRD7] = "fc_shrd7",
+ [FUNC_FC_SHRD8] = "fc_shrd8",
+ [FUNC_FC_SHRD9] = "fc_shrd9",
+ [FUNC_FC_SHRD10] = "fc_shrd10",
+ [FUNC_FC_SHRD11] = "fc_shrd11",
+ [FUNC_FC_SHRD12] = "fc_shrd12",
+ [FUNC_FC_SHRD13] = "fc_shrd13",
+ [FUNC_FC_SHRD14] = "fc_shrd14",
+ [FUNC_FC_SHRD15] = "fc_shrd15",
+ [FUNC_FC_SHRD16] = "fc_shrd16",
+ [FUNC_FC_SHRD17] = "fc_shrd17",
+ [FUNC_FC_SHRD18] = "fc_shrd18",
+ [FUNC_FC_SHRD19] = "fc_shrd19",
+ [FUNC_FC_SHRD20] = "fc_shrd20",
[FUNC_GPIO] = "gpio",
+ [FUNC_IB_TRG_a] = "ib_trig_a",
+ [FUNC_IB_TRG_b] = "ib_trig_b",
+ [FUNC_IB_TRG_c] = "ib_trig_c",
[FUNC_IRQ0] = "irq0",
+ [FUNC_IRQ_IN_a] = "irq_in_a",
+ [FUNC_IRQ_IN_b] = "irq_in_b",
+ [FUNC_IRQ_IN_c] = "irq_in_c",
[FUNC_IRQ0_IN] = "irq0_in",
+ [FUNC_IRQ_OUT_a] = "irq_out_a",
+ [FUNC_IRQ_OUT_b] = "irq_out_b",
+ [FUNC_IRQ_OUT_c] = "irq_out_c",
[FUNC_IRQ0_OUT] = "irq0_out",
[FUNC_IRQ1] = "irq1",
[FUNC_IRQ1_IN] = "irq1_in",
[FUNC_IRQ1_OUT] = "irq1_out",
[FUNC_EXT_IRQ] = "ext_irq",
[FUNC_MIIM] = "miim",
+ [FUNC_MIIM_a] = "miim_a",
+ [FUNC_MIIM_b] = "miim_b",
+ [FUNC_MIIM_c] = "miim_c",
+ [FUNC_MIIM_Sa] = "miim_slave_a",
+ [FUNC_MIIM_Sb] = "miim_slave_b",
[FUNC_PHY_LED] = "phy_led",
[FUNC_PCI_WAKE] = "pci_wake",
[FUNC_MD] = "md",
+ [FUNC_OB_TRG] = "ob_trig",
+ [FUNC_OB_TRG_a] = "ob_trig_a",
+ [FUNC_OB_TRG_b] = "ob_trig_b",
[FUNC_PTP0] = "ptp0",
[FUNC_PTP1] = "ptp1",
[FUNC_PTP2] = "ptp2",
[FUNC_PTP3] = "ptp3",
+ [FUNC_PTPSYNC_1] = "ptpsync_1",
+ [FUNC_PTPSYNC_2] = "ptpsync_2",
+ [FUNC_PTPSYNC_3] = "ptpsync_3",
+ [FUNC_PTPSYNC_4] = "ptpsync_4",
+ [FUNC_PTPSYNC_5] = "ptpsync_5",
+ [FUNC_PTPSYNC_6] = "ptpsync_6",
+ [FUNC_PTPSYNC_7] = "ptpsync_7",
[FUNC_PWM] = "pwm",
+ [FUNC_QSPI1] = "qspi1",
+ [FUNC_QSPI2] = "qspi2",
+ [FUNC_R] = "reserved",
+ [FUNC_RECO_a] = "reco_a",
+ [FUNC_RECO_b] = "reco_b",
[FUNC_RECO_CLK] = "reco_clk",
+ [FUNC_SD] = "sd",
[FUNC_SFP] = "sfp",
+ [FUNC_SFP_SD] = "sfp_sd",
[FUNC_SG0] = "sg0",
[FUNC_SG1] = "sg1",
[FUNC_SG2] = "sg2",
+ [FUNC_SGPIO_a] = "sgpio_a",
+ [FUNC_SGPIO_b] = "sgpio_b",
[FUNC_SI] = "si",
[FUNC_SI2] = "si2",
[FUNC_TACHO] = "tacho",
+ [FUNC_TACHO_a] = "tacho_a",
+ [FUNC_TACHO_b] = "tacho_b",
[FUNC_TWI] = "twi",
[FUNC_TWI2] = "twi2",
[FUNC_TWI3] = "twi3",
[FUNC_TWI_SCL_M] = "twi_scl_m",
+ [FUNC_TWI_SLC_GATE] = "twi_slc_gate",
+ [FUNC_TWI_SLC_GATE_AD] = "twi_slc_gate_ad",
+ [FUNC_USB_H_a] = "usb_host_a",
+ [FUNC_USB_H_b] = "usb_host_b",
+ [FUNC_USB_H_c] = "usb_host_c",
+ [FUNC_USB_S_a] = "usb_slave_a",
+ [FUNC_USB_S_b] = "usb_slave_b",
+ [FUNC_USB_S_c] = "usb_slave_c",
[FUNC_UART] = "uart",
[FUNC_UART2] = "uart2",
[FUNC_UART3] = "uart3",
[FUNC_PLL_STAT] = "pll_stat",
[FUNC_EMMC] = "emmc",
+ [FUNC_EMMC_SD] = "emmc_sd",
[FUNC_REF_CLK] = "ref_clk",
[FUNC_RCVRD_CLK] = "rcvrd_clk",
};
@@ -145,6 +309,7 @@ struct ocelot_pmx_func {
struct ocelot_pin_caps {
unsigned int pin;
unsigned char functions[OCELOT_FUNC_PER_PIN];
+ unsigned char a_functions[OCELOT_FUNC_PER_PIN]; /* Additional functions */
};
struct ocelot_pinctrl {
@@ -152,7 +317,7 @@ struct ocelot_pinctrl {
struct pinctrl_dev *pctl;
struct gpio_chip gpio_chip;
struct regmap *map;
- void __iomem *pincfg;
+ struct regmap *pincfg;
struct pinctrl_desc *desc;
struct ocelot_pmx_func func[FUNC_MAX];
u8 stride;
@@ -676,6 +841,187 @@ static const struct pinctrl_pin_desc sparx5_pins[] = {
SPARX5_PIN(63),
};
+#define LAN966X_P(p, f0, f1, f2, f3, f4, f5, f6, f7) \
+static struct ocelot_pin_caps lan966x_pin_##p = { \
+ .pin = p, \
+ .functions = { \
+ FUNC_##f0, FUNC_##f1, FUNC_##f2, \
+ FUNC_##f3 \
+ }, \
+ .a_functions = { \
+ FUNC_##f4, FUNC_##f5, FUNC_##f6, \
+ FUNC_##f7 \
+ }, \
+}
+
+/* Pinmuxing table taken from data sheet */
+/* Pin FUNC0 FUNC1 FUNC2 FUNC3 FUNC4 FUNC5 FUNC6 FUNC7 */
+LAN966X_P(0, GPIO, NONE, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(1, GPIO, NONE, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(2, GPIO, NONE, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(3, GPIO, NONE, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(4, GPIO, NONE, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(5, GPIO, NONE, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(6, GPIO, NONE, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(7, GPIO, NONE, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(8, GPIO, FC0_a, USB_H_b, NONE, USB_S_b, NONE, NONE, R);
+LAN966X_P(9, GPIO, FC0_a, USB_H_b, NONE, NONE, NONE, NONE, R);
+LAN966X_P(10, GPIO, FC0_a, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(11, GPIO, FC1_a, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(12, GPIO, FC1_a, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(13, GPIO, FC1_a, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(14, GPIO, FC2_a, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(15, GPIO, FC2_a, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(16, GPIO, FC2_a, IB_TRG_a, NONE, OB_TRG_a, IRQ_IN_c, IRQ_OUT_c, R);
+LAN966X_P(17, GPIO, FC3_a, IB_TRG_a, NONE, OB_TRG_a, IRQ_IN_c, IRQ_OUT_c, R);
+LAN966X_P(18, GPIO, FC3_a, IB_TRG_a, NONE, OB_TRG_a, IRQ_IN_c, IRQ_OUT_c, R);
+LAN966X_P(19, GPIO, FC3_a, IB_TRG_a, NONE, OB_TRG_a, IRQ_IN_c, IRQ_OUT_c, R);
+LAN966X_P(20, GPIO, FC4_a, IB_TRG_a, NONE, OB_TRG_a, IRQ_IN_c, NONE, R);
+LAN966X_P(21, GPIO, FC4_a, NONE, NONE, OB_TRG_a, NONE, NONE, R);
+LAN966X_P(22, GPIO, FC4_a, NONE, NONE, OB_TRG_a, NONE, NONE, R);
+LAN966X_P(23, GPIO, NONE, NONE, NONE, OB_TRG_a, NONE, NONE, R);
+LAN966X_P(24, GPIO, FC0_b, IB_TRG_a, USB_H_c, OB_TRG_a, IRQ_IN_c, TACHO_a, R);
+LAN966X_P(25, GPIO, FC0_b, IB_TRG_a, USB_H_c, OB_TRG_a, IRQ_OUT_c, SFP_SD, R);
+LAN966X_P(26, GPIO, FC0_b, IB_TRG_a, USB_S_c, OB_TRG_a, CAN0_a, SFP_SD, R);
+LAN966X_P(27, GPIO, NONE, NONE, NONE, OB_TRG_a, CAN0_a, NONE, R);
+LAN966X_P(28, GPIO, MIIM_a, NONE, NONE, OB_TRG_a, IRQ_OUT_c, SFP_SD, R);
+LAN966X_P(29, GPIO, MIIM_a, NONE, NONE, OB_TRG_a, NONE, NONE, R);
+LAN966X_P(30, GPIO, FC3_c, CAN1, NONE, OB_TRG, RECO_b, NONE, R);
+LAN966X_P(31, GPIO, FC3_c, CAN1, NONE, OB_TRG, RECO_b, NONE, R);
+LAN966X_P(32, GPIO, FC3_c, NONE, SGPIO_a, NONE, MIIM_Sa, NONE, R);
+LAN966X_P(33, GPIO, FC1_b, NONE, SGPIO_a, NONE, MIIM_Sa, MIIM_b, R);
+LAN966X_P(34, GPIO, FC1_b, NONE, SGPIO_a, NONE, MIIM_Sa, MIIM_b, R);
+LAN966X_P(35, GPIO, FC1_b, NONE, SGPIO_a, CAN0_b, NONE, NONE, R);
+LAN966X_P(36, GPIO, NONE, PTPSYNC_1, NONE, CAN0_b, NONE, NONE, R);
+LAN966X_P(37, GPIO, FC_SHRD0, PTPSYNC_2, TWI_SLC_GATE_AD, NONE, NONE, NONE, R);
+LAN966X_P(38, GPIO, NONE, PTPSYNC_3, NONE, NONE, NONE, NONE, R);
+LAN966X_P(39, GPIO, NONE, PTPSYNC_4, NONE, NONE, NONE, NONE, R);
+LAN966X_P(40, GPIO, FC_SHRD1, PTPSYNC_5, NONE, NONE, NONE, NONE, R);
+LAN966X_P(41, GPIO, FC_SHRD2, PTPSYNC_6, TWI_SLC_GATE_AD, NONE, NONE, NONE, R);
+LAN966X_P(42, GPIO, FC_SHRD3, PTPSYNC_7, TWI_SLC_GATE_AD, NONE, NONE, NONE, R);
+LAN966X_P(43, GPIO, FC2_b, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, RECO_a, IRQ_IN_a, R);
+LAN966X_P(44, GPIO, FC2_b, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, RECO_a, IRQ_IN_a, R);
+LAN966X_P(45, GPIO, FC2_b, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, NONE, IRQ_IN_a, R);
+LAN966X_P(46, GPIO, FC1_c, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, FC_SHRD4, IRQ_IN_a, R);
+LAN966X_P(47, GPIO, FC1_c, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, FC_SHRD5, IRQ_IN_a, R);
+LAN966X_P(48, GPIO, FC1_c, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, FC_SHRD6, IRQ_IN_a, R);
+LAN966X_P(49, GPIO, FC_SHRD7, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, TWI_SLC_GATE, IRQ_IN_a, R);
+LAN966X_P(50, GPIO, FC_SHRD16, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, TWI_SLC_GATE, NONE, R);
+LAN966X_P(51, GPIO, FC3_b, OB_TRG_b, IB_TRG_c, IRQ_OUT_b, NONE, IRQ_IN_b, R);
+LAN966X_P(52, GPIO, FC3_b, OB_TRG_b, IB_TRG_c, IRQ_OUT_b, TACHO_b, IRQ_IN_b, R);
+LAN966X_P(53, GPIO, FC3_b, OB_TRG_b, IB_TRG_c, IRQ_OUT_b, NONE, IRQ_IN_b, R);
+LAN966X_P(54, GPIO, FC_SHRD8, OB_TRG_b, IB_TRG_c, IRQ_OUT_b, TWI_SLC_GATE, IRQ_IN_b, R);
+LAN966X_P(55, GPIO, FC_SHRD9, OB_TRG_b, IB_TRG_c, IRQ_OUT_b, TWI_SLC_GATE, IRQ_IN_b, R);
+LAN966X_P(56, GPIO, FC4_b, OB_TRG_b, IB_TRG_c, IRQ_OUT_b, FC_SHRD10, IRQ_IN_b, R);
+LAN966X_P(57, GPIO, FC4_b, TWI_SLC_GATE, IB_TRG_c, IRQ_OUT_b, FC_SHRD11, IRQ_IN_b, R);
+LAN966X_P(58, GPIO, FC4_b, TWI_SLC_GATE, IB_TRG_c, IRQ_OUT_b, FC_SHRD12, IRQ_IN_b, R);
+LAN966X_P(59, GPIO, QSPI1, MIIM_c, NONE, NONE, MIIM_Sb, NONE, R);
+LAN966X_P(60, GPIO, QSPI1, MIIM_c, NONE, NONE, MIIM_Sb, NONE, R);
+LAN966X_P(61, GPIO, QSPI1, NONE, SGPIO_b, FC0_c, MIIM_Sb, NONE, R);
+LAN966X_P(62, GPIO, QSPI1, FC_SHRD13, SGPIO_b, FC0_c, TWI_SLC_GATE, SFP_SD, R);
+LAN966X_P(63, GPIO, QSPI1, FC_SHRD14, SGPIO_b, FC0_c, TWI_SLC_GATE, SFP_SD, R);
+LAN966X_P(64, GPIO, QSPI1, FC4_c, SGPIO_b, FC_SHRD15, TWI_SLC_GATE, SFP_SD, R);
+LAN966X_P(65, GPIO, USB_H_a, FC4_c, NONE, IRQ_OUT_c, TWI_SLC_GATE_AD, NONE, R);
+LAN966X_P(66, GPIO, USB_H_a, FC4_c, USB_S_a, IRQ_OUT_c, IRQ_IN_c, NONE, R);
+LAN966X_P(67, GPIO, EMMC_SD, NONE, QSPI2, NONE, NONE, NONE, R);
+LAN966X_P(68, GPIO, EMMC_SD, NONE, QSPI2, NONE, NONE, NONE, R);
+LAN966X_P(69, GPIO, EMMC_SD, NONE, QSPI2, NONE, NONE, NONE, R);
+LAN966X_P(70, GPIO, EMMC_SD, NONE, QSPI2, NONE, NONE, NONE, R);
+LAN966X_P(71, GPIO, EMMC_SD, NONE, QSPI2, NONE, NONE, NONE, R);
+LAN966X_P(72, GPIO, EMMC_SD, NONE, QSPI2, NONE, NONE, NONE, R);
+LAN966X_P(73, GPIO, EMMC, NONE, NONE, SD, NONE, NONE, R);
+LAN966X_P(74, GPIO, EMMC, NONE, FC_SHRD17, SD, TWI_SLC_GATE, NONE, R);
+LAN966X_P(75, GPIO, EMMC, NONE, FC_SHRD18, SD, TWI_SLC_GATE, NONE, R);
+LAN966X_P(76, GPIO, EMMC, NONE, FC_SHRD19, SD, TWI_SLC_GATE, NONE, R);
+LAN966X_P(77, GPIO, EMMC_SD, NONE, FC_SHRD20, NONE, TWI_SLC_GATE, NONE, R);
+
+#define LAN966X_PIN(n) { \
+ .number = n, \
+ .name = "GPIO_"#n, \
+ .drv_data = &lan966x_pin_##n \
+}
+
+static const struct pinctrl_pin_desc lan966x_pins[] = {
+ LAN966X_PIN(0),
+ LAN966X_PIN(1),
+ LAN966X_PIN(2),
+ LAN966X_PIN(3),
+ LAN966X_PIN(4),
+ LAN966X_PIN(5),
+ LAN966X_PIN(6),
+ LAN966X_PIN(7),
+ LAN966X_PIN(8),
+ LAN966X_PIN(9),
+ LAN966X_PIN(10),
+ LAN966X_PIN(11),
+ LAN966X_PIN(12),
+ LAN966X_PIN(13),
+ LAN966X_PIN(14),
+ LAN966X_PIN(15),
+ LAN966X_PIN(16),
+ LAN966X_PIN(17),
+ LAN966X_PIN(18),
+ LAN966X_PIN(19),
+ LAN966X_PIN(20),
+ LAN966X_PIN(21),
+ LAN966X_PIN(22),
+ LAN966X_PIN(23),
+ LAN966X_PIN(24),
+ LAN966X_PIN(25),
+ LAN966X_PIN(26),
+ LAN966X_PIN(27),
+ LAN966X_PIN(28),
+ LAN966X_PIN(29),
+ LAN966X_PIN(30),
+ LAN966X_PIN(31),
+ LAN966X_PIN(32),
+ LAN966X_PIN(33),
+ LAN966X_PIN(34),
+ LAN966X_PIN(35),
+ LAN966X_PIN(36),
+ LAN966X_PIN(37),
+ LAN966X_PIN(38),
+ LAN966X_PIN(39),
+ LAN966X_PIN(40),
+ LAN966X_PIN(41),
+ LAN966X_PIN(42),
+ LAN966X_PIN(43),
+ LAN966X_PIN(44),
+ LAN966X_PIN(45),
+ LAN966X_PIN(46),
+ LAN966X_PIN(47),
+ LAN966X_PIN(48),
+ LAN966X_PIN(49),
+ LAN966X_PIN(50),
+ LAN966X_PIN(51),
+ LAN966X_PIN(52),
+ LAN966X_PIN(53),
+ LAN966X_PIN(54),
+ LAN966X_PIN(55),
+ LAN966X_PIN(56),
+ LAN966X_PIN(57),
+ LAN966X_PIN(58),
+ LAN966X_PIN(59),
+ LAN966X_PIN(60),
+ LAN966X_PIN(61),
+ LAN966X_PIN(62),
+ LAN966X_PIN(63),
+ LAN966X_PIN(64),
+ LAN966X_PIN(65),
+ LAN966X_PIN(66),
+ LAN966X_PIN(67),
+ LAN966X_PIN(68),
+ LAN966X_PIN(69),
+ LAN966X_PIN(70),
+ LAN966X_PIN(71),
+ LAN966X_PIN(72),
+ LAN966X_PIN(73),
+ LAN966X_PIN(74),
+ LAN966X_PIN(75),
+ LAN966X_PIN(76),
+ LAN966X_PIN(77),
+};
+
static int ocelot_get_functions_count(struct pinctrl_dev *pctldev)
{
return ARRAY_SIZE(ocelot_function_names);
@@ -709,6 +1055,9 @@ static int ocelot_pin_function_idx(struct ocelot_pinctrl *info,
for (i = 0; i < OCELOT_FUNC_PER_PIN; i++) {
if (function == p->functions[i])
return i;
+
+ if (function == p->a_functions[i])
+ return i + OCELOT_FUNC_PER_PIN;
}
return -1;
@@ -744,6 +1093,36 @@ static int ocelot_pinmux_set_mux(struct pinctrl_dev *pctldev,
return 0;
}
+static int lan966x_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int selector, unsigned int group)
+{
+ struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+ struct ocelot_pin_caps *pin = info->desc->pins[group].drv_data;
+ unsigned int p = pin->pin % 32;
+ int f;
+
+ f = ocelot_pin_function_idx(info, group, selector);
+ if (f < 0)
+ return -EINVAL;
+
+ /*
+ * f is encoded on three bits.
+ * bit 0 of f goes in BIT(pin) of ALT[0], bit 1 of f goes in BIT(pin) of
+ * ALT[1], bit 2 of f goes in BIT(pin) of ALT[2]
+ * This is racy because three registers can't be updated at the same time
+ * but it doesn't matter much for now.
+ * Note: ALT0/ALT1/ALT2 are organized specially for 78 gpio targets
+ */
+ regmap_update_bits(info->map, REG_ALT(0, info, pin->pin),
+ BIT(p), f << p);
+ regmap_update_bits(info->map, REG_ALT(1, info, pin->pin),
+ BIT(p), (f >> 1) << p);
+ regmap_update_bits(info->map, REG_ALT(2, info, pin->pin),
+ BIT(p), (f >> 2) << p);
+
+ return 0;
+}
+
#define REG(r, info, p) ((r) * (info)->stride + (4 * ((p) / 32)))
static int ocelot_gpio_set_direction(struct pinctrl_dev *pctldev,
@@ -774,6 +1153,23 @@ static int ocelot_gpio_request_enable(struct pinctrl_dev *pctldev,
return 0;
}
+static int lan966x_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int p = offset % 32;
+
+ regmap_update_bits(info->map, REG_ALT(0, info, offset),
+ BIT(p), 0);
+ regmap_update_bits(info->map, REG_ALT(1, info, offset),
+ BIT(p), 0);
+ regmap_update_bits(info->map, REG_ALT(2, info, offset),
+ BIT(p), 0);
+
+ return 0;
+}
+
static const struct pinmux_ops ocelot_pmx_ops = {
.get_functions_count = ocelot_get_functions_count,
.get_function_name = ocelot_get_function_name,
@@ -783,6 +1179,15 @@ static const struct pinmux_ops ocelot_pmx_ops = {
.gpio_request_enable = ocelot_gpio_request_enable,
};
+static const struct pinmux_ops lan966x_pmx_ops = {
+ .get_functions_count = ocelot_get_functions_count,
+ .get_function_name = ocelot_get_function_name,
+ .get_function_groups = ocelot_get_function_groups,
+ .set_mux = lan966x_pinmux_set_mux,
+ .gpio_set_direction = ocelot_gpio_set_direction,
+ .gpio_request_enable = lan966x_gpio_request_enable,
+};
+
static int ocelot_pctl_get_groups_count(struct pinctrl_dev *pctldev)
{
struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
@@ -819,7 +1224,11 @@ static int ocelot_hw_get_value(struct ocelot_pinctrl *info,
int ret = -EOPNOTSUPP;
if (info->pincfg) {
- u32 regcfg = readl(info->pincfg + (pin * sizeof(u32)));
+ u32 regcfg;
+
+ ret = regmap_read(info->pincfg, pin, &regcfg);
+ if (ret)
+ return ret;
ret = 0;
switch (reg) {
@@ -843,6 +1252,24 @@ static int ocelot_hw_get_value(struct ocelot_pinctrl *info,
return ret;
}
+static int ocelot_pincfg_clrsetbits(struct ocelot_pinctrl *info, u32 regaddr,
+ u32 clrbits, u32 setbits)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(info->pincfg, regaddr, &val);
+ if (ret)
+ return ret;
+
+ val &= ~clrbits;
+ val |= setbits;
+
+ ret = regmap_write(info->pincfg, regaddr, val);
+
+ return ret;
+}
+
static int ocelot_hw_set_value(struct ocelot_pinctrl *info,
unsigned int pin,
unsigned int reg,
@@ -851,21 +1278,23 @@ static int ocelot_hw_set_value(struct ocelot_pinctrl *info,
int ret = -EOPNOTSUPP;
if (info->pincfg) {
- void __iomem *regaddr = info->pincfg + (pin * sizeof(u32));
ret = 0;
switch (reg) {
case PINCONF_BIAS:
- ocelot_clrsetbits(regaddr, BIAS_BITS, val);
+ ret = ocelot_pincfg_clrsetbits(info, pin, BIAS_BITS,
+ val);
break;
case PINCONF_SCHMITT:
- ocelot_clrsetbits(regaddr, SCHMITT_BIT, val);
+ ret = ocelot_pincfg_clrsetbits(info, pin, SCHMITT_BIT,
+ val);
break;
case PINCONF_DRIVE_STRENGTH:
if (val <= 3)
- ocelot_clrsetbits(regaddr, DRIVE_BITS, val);
+ ret = ocelot_pincfg_clrsetbits(info, pin,
+ DRIVE_BITS, val);
else
ret = -EINVAL;
break;
@@ -1078,6 +1507,16 @@ static struct pinctrl_desc sparx5_desc = {
.owner = THIS_MODULE,
};
+static struct pinctrl_desc lan966x_desc = {
+ .name = "lan966x-pinctrl",
+ .pins = lan966x_pins,
+ .npins = ARRAY_SIZE(lan966x_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &lan966x_pmx_ops,
+ .confops = &ocelot_confops,
+ .owner = THIS_MODULE,
+};
+
static int ocelot_create_group_func_map(struct device *dev,
struct ocelot_pinctrl *info)
{
@@ -1308,8 +1747,7 @@ static int ocelot_gpiochip_register(struct platform_device *pdev,
gc = &info->gpio_chip;
gc->ngpio = info->desc->npins;
gc->parent = &pdev->dev;
- gc->base = 0;
- gc->of_node = info->dev->of_node;
+ gc->base = -1;
gc->label = "ocelot-gpio";
irq = irq_of_parse_and_map(gc->of_node, 0);
@@ -1337,15 +1775,36 @@ static const struct of_device_id ocelot_pinctrl_of_match[] = {
{ .compatible = "mscc,ocelot-pinctrl", .data = &ocelot_desc },
{ .compatible = "mscc,jaguar2-pinctrl", .data = &jaguar2_desc },
{ .compatible = "microchip,sparx5-pinctrl", .data = &sparx5_desc },
+ { .compatible = "microchip,lan966x-pinctrl", .data = &lan966x_desc },
{},
};
+static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
+{
+ void __iomem *base;
+
+ const struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 32,
+ };
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
+ dev_dbg(&pdev->dev, "Failed to ioremap config registers (no extended pinconf)\n");
+ return NULL;
+ }
+
+ return devm_regmap_init_mmio(&pdev->dev, base, &regmap_config);
+}
+
static int ocelot_pinctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ocelot_pinctrl *info;
+ struct regmap *pincfg;
void __iomem *base;
- struct resource *res;
int ret;
struct regmap_config regmap_config = {
.reg_bits = 32,
@@ -1378,12 +1837,11 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
/* Pinconf registers */
if (info->desc->confops) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- dev_dbg(dev, "Failed to ioremap config registers (no extended pinconf)\n");
+ pincfg = ocelot_pinctrl_create_pincfg(pdev);
+ if (IS_ERR(pincfg))
+ dev_dbg(dev, "Failed to create pincfg regmap\n");
else
- info->pincfg = base;
+ info->pincfg = pincfg;
}
ret = ocelot_pinctrl_register(pdev, info);
diff --git a/drivers/pinctrl/pinctrl-oxnas.c b/drivers/pinctrl/pinctrl-oxnas.c
index cebd810bd6d1..fb10a8473ebe 100644
--- a/drivers/pinctrl/pinctrl-oxnas.c
+++ b/drivers/pinctrl/pinctrl-oxnas.c
@@ -1232,7 +1232,6 @@ static int oxnas_gpio_probe(struct platform_device *pdev)
bank->id = id;
bank->gpio_chip.parent = &pdev->dev;
- bank->gpio_chip.of_node = np;
bank->gpio_chip.ngpio = ngpios;
girq = &bank->gpio_chip.irq;
girq->chip = &bank->irq_chip;
diff --git a/drivers/pinctrl/pinctrl-pic32.c b/drivers/pinctrl/pinctrl-pic32.c
index 748dabd8db6e..37acfdfc2cae 100644
--- a/drivers/pinctrl/pinctrl-pic32.c
+++ b/drivers/pinctrl/pinctrl-pic32.c
@@ -2241,7 +2241,7 @@ static int pic32_gpio_probe(struct platform_device *pdev)
}
bank->gpio_chip.parent = &pdev->dev;
- bank->gpio_chip.of_node = np;
+
girq = &bank->gpio_chip.irq;
girq->chip = &bank->irq_chip;
girq->parent_handler = pic32_gpio_irq_handler;
diff --git a/drivers/pinctrl/pinctrl-rk805.c b/drivers/pinctrl/pinctrl-rk805.c
index c6f4229eb106..7c1f7408fb9a 100644
--- a/drivers/pinctrl/pinctrl-rk805.c
+++ b/drivers/pinctrl/pinctrl-rk805.c
@@ -13,17 +13,17 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mfd/rk808.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinmux.h>
-#include <linux/pm.h>
-#include <linux/slab.h>
#include "core.h"
#include "pinconf.h"
@@ -420,18 +420,18 @@ static int rk805_pinctrl_probe(struct platform_device *pdev)
struct rk805_pctrl_info *pci;
int ret;
+ device_set_node(&pdev->dev, dev_fwnode(pdev->dev.parent));
+
pci = devm_kzalloc(&pdev->dev, sizeof(*pci), GFP_KERNEL);
if (!pci)
return -ENOMEM;
pci->dev = &pdev->dev;
- pci->dev->of_node = pdev->dev.parent->of_node;
pci->rk808 = dev_get_drvdata(pdev->dev.parent);
pci->pinctrl_desc = rk805_pinctrl_desc;
pci->gpio_chip = rk805_gpio_chip;
pci->gpio_chip.parent = &pdev->dev;
- pci->gpio_chip.of_node = pdev->dev.parent->of_node;
platform_set_drvdata(pdev, pci);
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 5ce260f152ce..d8dd8415fa81 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -33,13 +33,15 @@
#include <linux/clk.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
+#include <linux/string_helpers.h>
+
#include <dt-bindings/pinctrl/rockchip.h>
#include "core.h"
#include "pinconf.h"
#include "pinctrl-rockchip.h"
-/**
+/*
* Generate a bitmask for setting a value (v) with a write mask bit in hiword
* register 31:16 area.
*/
@@ -285,6 +287,7 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev,
{
struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
const struct rockchip_pin_group *grp;
+ struct device *dev = info->dev;
struct pinctrl_map *new_map;
struct device_node *parent;
int map_num = 1;
@@ -296,8 +299,7 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev,
*/
grp = pinctrl_name_to_group(info, np->name);
if (!grp) {
- dev_err(info->dev, "unable to find group for node %pOFn\n",
- np);
+ dev_err(dev, "unable to find group for node %pOFn\n", np);
return -EINVAL;
}
@@ -331,7 +333,7 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev,
new_map[i].data.configs.num_configs = grp->data[i].nconfigs;
}
- dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n",
+ dev_dbg(dev, "maps: function %s group %s num %d\n",
(*map)->data.mux.function, (*map)->data.mux.group, map_num);
return 0;
@@ -872,20 +874,20 @@ static int rockchip_verify_mux(struct rockchip_pin_bank *bank,
int pin, int mux)
{
struct rockchip_pinctrl *info = bank->drvdata;
+ struct device *dev = info->dev;
int iomux_num = (pin / 8);
if (iomux_num > 3)
return -EINVAL;
if (bank->iomux[iomux_num].type & IOMUX_UNROUTED) {
- dev_err(info->dev, "pin %d is unrouted\n", pin);
+ dev_err(dev, "pin %d is unrouted\n", pin);
return -EINVAL;
}
if (bank->iomux[iomux_num].type & IOMUX_GPIO_ONLY) {
if (mux != RK_FUNC_GPIO) {
- dev_err(info->dev,
- "pin %d only supports a gpio mux\n", pin);
+ dev_err(dev, "pin %d only supports a gpio mux\n", pin);
return -ENOTSUPP;
}
}
@@ -909,6 +911,7 @@ static int rockchip_verify_mux(struct rockchip_pin_bank *bank,
static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
{
struct rockchip_pinctrl *info = bank->drvdata;
+ struct device *dev = info->dev;
int iomux_num = (pin / 8);
struct regmap *regmap;
int reg, ret, mask, mux_type;
@@ -922,8 +925,7 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
if (bank->iomux[iomux_num].type & IOMUX_GPIO_ONLY)
return 0;
- dev_dbg(info->dev, "setting mux of GPIO%d-%d to %d\n",
- bank->bank_num, pin, mux);
+ dev_dbg(dev, "setting mux of GPIO%d-%d to %d\n", bank->bank_num, pin, mux);
regmap = (bank->iomux[iomux_num].type & IOMUX_SOURCE_PMU)
? info->regmap_pmu : info->regmap_base;
@@ -1575,6 +1577,7 @@ static int rockchip_get_drive_perpin(struct rockchip_pin_bank *bank,
{
struct rockchip_pinctrl *info = bank->drvdata;
struct rockchip_pin_ctrl *ctrl = info->ctrl;
+ struct device *dev = info->dev;
struct regmap *regmap;
int reg, ret;
u32 data, temp, rmask_bits;
@@ -1620,7 +1623,7 @@ static int rockchip_get_drive_perpin(struct rockchip_pin_bank *bank,
bit -= 16;
break;
default:
- dev_err(info->dev, "unsupported bit: %d for pinctrl drive type: %d\n",
+ dev_err(dev, "unsupported bit: %d for pinctrl drive type: %d\n",
bit, drv_type);
return -EINVAL;
}
@@ -1632,8 +1635,7 @@ static int rockchip_get_drive_perpin(struct rockchip_pin_bank *bank,
rmask_bits = RK3288_DRV_BITS_PER_PIN;
break;
default:
- dev_err(info->dev, "unsupported pinctrl drive type: %d\n",
- drv_type);
+ dev_err(dev, "unsupported pinctrl drive type: %d\n", drv_type);
return -EINVAL;
}
@@ -1652,13 +1654,14 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
{
struct rockchip_pinctrl *info = bank->drvdata;
struct rockchip_pin_ctrl *ctrl = info->ctrl;
+ struct device *dev = info->dev;
struct regmap *regmap;
int reg, ret, i;
u32 data, rmask, rmask_bits, temp;
u8 bit;
int drv_type = bank->drv[pin_num / 8].drv_type;
- dev_dbg(info->dev, "setting drive of GPIO%d-%d to %d\n",
+ dev_dbg(dev, "setting drive of GPIO%d-%d to %d\n",
bank->bank_num, pin_num, strength);
ctrl->drv_calc_reg(bank, pin_num, &regmap, &reg, &bit);
@@ -1680,8 +1683,7 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
}
if (ret < 0) {
- dev_err(info->dev, "unsupported driver strength %d\n",
- strength);
+ dev_err(dev, "unsupported driver strength %d\n", strength);
return ret;
}
@@ -1720,7 +1722,7 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
bit -= 16;
break;
default:
- dev_err(info->dev, "unsupported bit: %d for pinctrl drive type: %d\n",
+ dev_err(dev, "unsupported bit: %d for pinctrl drive type: %d\n",
bit, drv_type);
return -EINVAL;
}
@@ -1731,8 +1733,7 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
rmask_bits = RK3288_DRV_BITS_PER_PIN;
break;
default:
- dev_err(info->dev, "unsupported pinctrl drive type: %d\n",
- drv_type);
+ dev_err(dev, "unsupported pinctrl drive type: %d\n", drv_type);
return -EINVAL;
}
@@ -1766,6 +1767,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
{
struct rockchip_pinctrl *info = bank->drvdata;
struct rockchip_pin_ctrl *ctrl = info->ctrl;
+ struct device *dev = info->dev;
struct regmap *regmap;
int reg, ret, pull_type;
u8 bit;
@@ -1800,7 +1802,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
return rockchip_pull_list[pull_type][data];
default:
- dev_err(info->dev, "unsupported pinctrl type\n");
+ dev_err(dev, "unsupported pinctrl type\n");
return -EINVAL;
};
}
@@ -1810,13 +1812,13 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
{
struct rockchip_pinctrl *info = bank->drvdata;
struct rockchip_pin_ctrl *ctrl = info->ctrl;
+ struct device *dev = info->dev;
struct regmap *regmap;
int reg, ret, i, pull_type;
u8 bit;
u32 data, rmask;
- dev_dbg(info->dev, "setting pull of GPIO%d-%d to %d\n",
- bank->bank_num, pin_num, pull);
+ dev_dbg(dev, "setting pull of GPIO%d-%d to %d\n", bank->bank_num, pin_num, pull);
/* rk3066b does support any pulls */
if (ctrl->type == RK3066B)
@@ -1859,8 +1861,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
}
if (ret < 0) {
- dev_err(info->dev, "unsupported pull setting %d\n",
- pull);
+ dev_err(dev, "unsupported pull setting %d\n", pull);
return ret;
}
@@ -1872,7 +1873,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
ret = regmap_update_bits(regmap, reg, rmask, data);
break;
default:
- dev_err(info->dev, "unsupported pinctrl type\n");
+ dev_err(dev, "unsupported pinctrl type\n");
return -EINVAL;
}
@@ -1963,12 +1964,13 @@ static int rockchip_set_schmitt(struct rockchip_pin_bank *bank,
{
struct rockchip_pinctrl *info = bank->drvdata;
struct rockchip_pin_ctrl *ctrl = info->ctrl;
+ struct device *dev = info->dev;
struct regmap *regmap;
int reg, ret;
u8 bit;
u32 data, rmask;
- dev_dbg(info->dev, "setting input schmitt of GPIO%d-%d to %d\n",
+ dev_dbg(dev, "setting input schmitt of GPIO%d-%d to %d\n",
bank->bank_num, pin_num, enable);
ret = ctrl->schmitt_calc_reg(bank, pin_num, &regmap, &reg, &bit);
@@ -2028,10 +2030,11 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
const unsigned int *pins = info->groups[group].pins;
const struct rockchip_pin_config *data = info->groups[group].data;
+ struct device *dev = info->dev;
struct rockchip_pin_bank *bank;
int cnt, ret = 0;
- dev_dbg(info->dev, "enable function %s group %s\n",
+ dev_dbg(dev, "enable function %s group %s\n",
info->functions[selector].name, info->groups[group].name);
/*
@@ -2310,6 +2313,7 @@ static int rockchip_pinctrl_parse_groups(struct device_node *np,
struct rockchip_pinctrl *info,
u32 index)
{
+ struct device *dev = info->dev;
struct rockchip_pin_bank *bank;
int size;
const __be32 *list;
@@ -2317,7 +2321,7 @@ static int rockchip_pinctrl_parse_groups(struct device_node *np,
int i, j;
int ret;
- dev_dbg(info->dev, "group(%d): %pOFn\n", index, np);
+ dev_dbg(dev, "group(%d): %pOFn\n", index, np);
/* Initialise group */
grp->name = np->name;
@@ -2329,19 +2333,13 @@ static int rockchip_pinctrl_parse_groups(struct device_node *np,
list = of_get_property(np, "rockchip,pins", &size);
/* we do not check return since it's safe node passed down */
size /= sizeof(*list);
- if (!size || size % 4) {
- dev_err(info->dev, "wrong pins number or pins and configs should be by 4\n");
- return -EINVAL;
- }
+ if (!size || size % 4)
+ return dev_err_probe(dev, -EINVAL, "wrong pins number or pins and configs should be by 4\n");
grp->npins = size / 4;
- grp->pins = devm_kcalloc(info->dev, grp->npins, sizeof(unsigned int),
- GFP_KERNEL);
- grp->data = devm_kcalloc(info->dev,
- grp->npins,
- sizeof(struct rockchip_pin_config),
- GFP_KERNEL);
+ grp->pins = devm_kcalloc(dev, grp->npins, sizeof(*grp->pins), GFP_KERNEL);
+ grp->data = devm_kcalloc(dev, grp->npins, sizeof(*grp->data), GFP_KERNEL);
if (!grp->pins || !grp->data)
return -ENOMEM;
@@ -2375,6 +2373,7 @@ static int rockchip_pinctrl_parse_functions(struct device_node *np,
struct rockchip_pinctrl *info,
u32 index)
{
+ struct device *dev = info->dev;
struct device_node *child;
struct rockchip_pmx_func *func;
struct rockchip_pin_group *grp;
@@ -2382,7 +2381,7 @@ static int rockchip_pinctrl_parse_functions(struct device_node *np,
static u32 grp_index;
u32 i = 0;
- dev_dbg(info->dev, "parse function(%d): %pOFn\n", index, np);
+ dev_dbg(dev, "parse function(%d): %pOFn\n", index, np);
func = &info->functions[index];
@@ -2392,8 +2391,7 @@ static int rockchip_pinctrl_parse_functions(struct device_node *np,
if (func->ngroups <= 0)
return 0;
- func->groups = devm_kcalloc(info->dev,
- func->ngroups, sizeof(char *), GFP_KERNEL);
+ func->groups = devm_kcalloc(dev, func->ngroups, sizeof(*func->groups), GFP_KERNEL);
if (!func->groups)
return -ENOMEM;
@@ -2421,20 +2419,14 @@ static int rockchip_pinctrl_parse_dt(struct platform_device *pdev,
rockchip_pinctrl_child_count(info, np);
- dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions);
- dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups);
+ dev_dbg(dev, "nfunctions = %d\n", info->nfunctions);
+ dev_dbg(dev, "ngroups = %d\n", info->ngroups);
- info->functions = devm_kcalloc(dev,
- info->nfunctions,
- sizeof(struct rockchip_pmx_func),
- GFP_KERNEL);
+ info->functions = devm_kcalloc(dev, info->nfunctions, sizeof(*info->functions), GFP_KERNEL);
if (!info->functions)
return -ENOMEM;
- info->groups = devm_kcalloc(dev,
- info->ngroups,
- sizeof(struct rockchip_pin_group),
- GFP_KERNEL);
+ info->groups = devm_kcalloc(dev, info->ngroups, sizeof(*info->groups), GFP_KERNEL);
if (!info->groups)
return -ENOMEM;
@@ -2446,7 +2438,7 @@ static int rockchip_pinctrl_parse_dt(struct platform_device *pdev,
ret = rockchip_pinctrl_parse_functions(child, info, i++);
if (ret) {
- dev_err(&pdev->dev, "failed to parse function\n");
+ dev_err(dev, "failed to parse function\n");
of_node_put(child);
return ret;
}
@@ -2461,6 +2453,8 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
struct pinctrl_desc *ctrldesc = &info->pctl;
struct pinctrl_pin_desc *pindesc, *pdesc;
struct rockchip_pin_bank *pin_bank;
+ struct device *dev = &pdev->dev;
+ char **pin_names;
int pin, bank, ret;
int k;
@@ -2470,9 +2464,7 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
ctrldesc->pmxops = &rockchip_pmx_ops;
ctrldesc->confops = &rockchip_pinconf_ops;
- pindesc = devm_kcalloc(&pdev->dev,
- info->ctrl->nr_pins, sizeof(*pindesc),
- GFP_KERNEL);
+ pindesc = devm_kcalloc(dev, info->ctrl->nr_pins, sizeof(*pindesc), GFP_KERNEL);
if (!pindesc)
return -ENOMEM;
@@ -2482,10 +2474,14 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
pdesc = pindesc;
for (bank = 0, k = 0; bank < info->ctrl->nr_banks; bank++) {
pin_bank = &info->ctrl->pin_banks[bank];
+
+ pin_names = devm_kasprintf_strarray(dev, pin_bank->name, pin_bank->nr_pins);
+ if (IS_ERR(pin_names))
+ return PTR_ERR(pin_names);
+
for (pin = 0; pin < pin_bank->nr_pins; pin++, k++) {
pdesc->number = k;
- pdesc->name = kasprintf(GFP_KERNEL, "%s-%d",
- pin_bank->name, pin);
+ pdesc->name = pin_names[pin];
pdesc++;
}
@@ -2497,11 +2493,9 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
if (ret)
return ret;
- info->pctl_dev = devm_pinctrl_register(&pdev->dev, ctrldesc, info);
- if (IS_ERR(info->pctl_dev)) {
- dev_err(&pdev->dev, "could not register pinctrl driver\n");
- return PTR_ERR(info->pctl_dev);
- }
+ info->pctl_dev = devm_pinctrl_register(dev, ctrldesc, info);
+ if (IS_ERR(info->pctl_dev))
+ return dev_err_probe(dev, PTR_ERR(info->pctl_dev), "could not register pinctrl driver\n");
return 0;
}
@@ -2513,8 +2507,9 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
struct rockchip_pinctrl *d,
struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
const struct of_device_id *match;
- struct device_node *node = pdev->dev.of_node;
struct rockchip_pin_ctrl *ctrl;
struct rockchip_pin_bank *bank;
int grf_offs, pmu_offs, drv_grf_offs, drv_pmu_offs, i, j;
@@ -2566,7 +2561,7 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
drv_pmu_offs : drv_grf_offs;
}
- dev_dbg(d->dev, "bank %d, iomux %d has iom_offset 0x%x drv_offset 0x%x\n",
+ dev_dbg(dev, "bank %d, iomux %d has iom_offset 0x%x drv_offset 0x%x\n",
i, j, iom->offset, drv->offset);
/*
@@ -2675,16 +2670,14 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
{
struct rockchip_pinctrl *info;
struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node, *node;
struct rockchip_pin_ctrl *ctrl;
- struct device_node *np = pdev->dev.of_node, *node;
struct resource *res;
void __iomem *base;
int ret;
- if (!dev->of_node) {
- dev_err(dev, "device tree node not found\n");
- return -ENODEV;
- }
+ if (!dev->of_node)
+ return dev_err_probe(dev, -ENODEV, "device tree node not found\n");
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -2693,10 +2686,8 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
info->dev = dev;
ctrl = rockchip_pinctrl_get_soc_data(info, pdev);
- if (!ctrl) {
- dev_err(dev, "driver data not available\n");
- return -EINVAL;
- }
+ if (!ctrl)
+ return dev_err_probe(dev, -EINVAL, "driver data not available\n");
info->ctrl = ctrl;
node = of_parse_phandle(np, "rockchip,grf", 0);
@@ -2705,32 +2696,28 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
if (IS_ERR(info->regmap_base))
return PTR_ERR(info->regmap_base);
} else {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
rockchip_regmap_config.max_register = resource_size(res) - 4;
rockchip_regmap_config.name = "rockchip,pinctrl";
- info->regmap_base = devm_regmap_init_mmio(&pdev->dev, base,
- &rockchip_regmap_config);
+ info->regmap_base =
+ devm_regmap_init_mmio(dev, base, &rockchip_regmap_config);
/* to check for the old dt-bindings */
info->reg_size = resource_size(res);
/* Honor the old binding, with pull registers as 2nd resource */
if (ctrl->type == RK3188 && info->reg_size < 0x200) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 1, &res);
if (IS_ERR(base))
return PTR_ERR(base);
- rockchip_regmap_config.max_register =
- resource_size(res) - 4;
+ rockchip_regmap_config.max_register = resource_size(res) - 4;
rockchip_regmap_config.name = "rockchip,pinctrl-pull";
- info->regmap_pull = devm_regmap_init_mmio(&pdev->dev,
- base,
- &rockchip_regmap_config);
+ info->regmap_pull =
+ devm_regmap_init_mmio(dev, base, &rockchip_regmap_config);
}
}
@@ -2748,11 +2735,9 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
- ret = of_platform_populate(np, rockchip_bank_match, NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to register gpio device\n");
- return ret;
- }
+ ret = of_platform_populate(np, NULL, NULL, &pdev->dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register gpio device\n");
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 1482a01dfec7..0fea71fd9a00 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -55,7 +55,7 @@
#define ST_GPIO_DIRECTION_OUT 0x2
#define ST_GPIO_DIRECTION_IN 0x4
-/**
+/*
* Packed style retime configuration.
* There are two registers cfg0 and cfg1 in this style for each bank.
* Each field in this register is 8 bit corresponding to 8 pins in the bank.
@@ -69,7 +69,7 @@
#define RT_P_CFG1_CLKNOTDATA_FIELD(reg) REG_FIELD(reg, 16, 23)
#define RT_P_CFG1_DOUBLE_EDGE_FIELD(reg) REG_FIELD(reg, 24, 31)
-/**
+/*
* Dedicated style retime Configuration register
* each register is dedicated per pin.
*/
@@ -814,26 +814,25 @@ static int st_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
{
struct st_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
const struct st_pctl_group *grp;
+ struct device *dev = info->dev;
struct pinctrl_map *new_map;
struct device_node *parent;
int map_num, i;
grp = st_pctl_find_group_by_name(info, np->name);
if (!grp) {
- dev_err(info->dev, "unable to find group for node %pOFn\n",
- np);
+ dev_err(dev, "unable to find group for node %pOFn\n", np);
return -EINVAL;
}
map_num = grp->npins + 1;
- new_map = devm_kcalloc(pctldev->dev,
- map_num, sizeof(*new_map), GFP_KERNEL);
+ new_map = devm_kcalloc(dev, map_num, sizeof(*new_map), GFP_KERNEL);
if (!new_map)
return -ENOMEM;
parent = of_get_parent(np);
if (!parent) {
- devm_kfree(pctldev->dev, new_map);
+ devm_kfree(dev, new_map);
return -EINVAL;
}
@@ -853,7 +852,7 @@ static int st_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
new_map[i].data.configs.configs = &grp->pin_conf[i].config;
new_map[i].data.configs.num_configs = 1;
}
- dev_info(pctldev->dev, "maps: function %s group %s num %d\n",
+ dev_info(dev, "maps: function %s group %s num %d\n",
(*map)->data.mux.function, grp->name, map_num);
return 0;
@@ -1173,6 +1172,7 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
/* bank pad direction val altfunction */
const __be32 *list;
struct property *pp;
+ struct device *dev = info->dev;
struct st_pinconf *conf;
struct device_node *pins;
int i = 0, npins = 0, nr_props, ret = 0;
@@ -1197,9 +1197,8 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
grp->npins = npins;
grp->name = np->name;
- grp->pins = devm_kcalloc(info->dev, npins, sizeof(u32), GFP_KERNEL);
- grp->pin_conf = devm_kcalloc(info->dev,
- npins, sizeof(*conf), GFP_KERNEL);
+ grp->pins = devm_kcalloc(dev, npins, sizeof(*grp->pins), GFP_KERNEL);
+ grp->pin_conf = devm_kcalloc(dev, npins, sizeof(*grp->pin_conf), GFP_KERNEL);
if (!grp->pins || !grp->pin_conf) {
ret = -ENOMEM;
@@ -1247,6 +1246,7 @@ out_put_node:
static int st_pctl_parse_functions(struct device_node *np,
struct st_pinctrl *info, u32 index, int *grp_index)
{
+ struct device *dev = info->dev;
struct device_node *child;
struct st_pmx_func *func;
struct st_pctl_group *grp;
@@ -1255,12 +1255,9 @@ static int st_pctl_parse_functions(struct device_node *np,
func = &info->functions[index];
func->name = np->name;
func->ngroups = of_get_child_count(np);
- if (func->ngroups == 0) {
- dev_err(info->dev, "No groups defined\n");
- return -EINVAL;
- }
- func->groups = devm_kcalloc(info->dev,
- func->ngroups, sizeof(char *), GFP_KERNEL);
+ if (func->ngroups == 0)
+ return dev_err_probe(dev, -EINVAL, "No groups defined\n");
+ func->groups = devm_kcalloc(dev, func->ngroups, sizeof(*func->groups), GFP_KERNEL);
if (!func->groups)
return -ENOMEM;
@@ -1275,8 +1272,7 @@ static int st_pctl_parse_functions(struct device_node *np,
return ret;
}
}
- dev_info(info->dev, "Function[%d\t name:%s,\tgroups:%d]\n",
- index, func->name, func->ngroups);
+ dev_info(dev, "Function[%d\t name:%s,\tgroups:%d]\n", index, func->name, func->ngroups);
return 0;
}
@@ -1557,10 +1553,8 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
skip_irq:
err = gpiochip_add_data(&bank->gpio_chip, bank);
- if (err) {
- dev_err(dev, "Failed to add gpiochip(%d)!\n", bank_num);
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "Failed to add gpiochip(%d)!\n", bank_num);
dev_info(dev, "%s bank added.\n", range->name);
return 0;
@@ -1577,63 +1571,50 @@ static const struct of_device_id st_pctl_of_match[] = {
static int st_pctl_probe_dt(struct platform_device *pdev,
struct pinctrl_desc *pctl_desc, struct st_pinctrl *info)
{
+ struct device *dev = &pdev->dev;
int ret = 0;
int i = 0, j = 0, k = 0, bank;
struct pinctrl_pin_desc *pdesc;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev->of_node;
struct device_node *child;
int grp_index = 0;
int irq = 0;
- struct resource *res;
st_pctl_dt_child_count(info, np);
- if (!info->nbanks) {
- dev_err(&pdev->dev, "you need at least one gpio bank\n");
- return -EINVAL;
- }
+ if (!info->nbanks)
+ return dev_err_probe(dev, -EINVAL, "you need at least one gpio bank\n");
- dev_info(&pdev->dev, "nbanks = %d\n", info->nbanks);
- dev_info(&pdev->dev, "nfunctions = %d\n", info->nfunctions);
- dev_info(&pdev->dev, "ngroups = %d\n", info->ngroups);
+ dev_info(dev, "nbanks = %d\n", info->nbanks);
+ dev_info(dev, "nfunctions = %d\n", info->nfunctions);
+ dev_info(dev, "ngroups = %d\n", info->ngroups);
- info->functions = devm_kcalloc(&pdev->dev,
- info->nfunctions, sizeof(*info->functions), GFP_KERNEL);
+ info->functions = devm_kcalloc(dev, info->nfunctions, sizeof(*info->functions), GFP_KERNEL);
- info->groups = devm_kcalloc(&pdev->dev,
- info->ngroups, sizeof(*info->groups),
- GFP_KERNEL);
+ info->groups = devm_kcalloc(dev, info->ngroups, sizeof(*info->groups), GFP_KERNEL);
- info->banks = devm_kcalloc(&pdev->dev,
- info->nbanks, sizeof(*info->banks), GFP_KERNEL);
+ info->banks = devm_kcalloc(dev, info->nbanks, sizeof(*info->banks), GFP_KERNEL);
if (!info->functions || !info->groups || !info->banks)
return -ENOMEM;
info->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
- if (IS_ERR(info->regmap)) {
- dev_err(info->dev, "No syscfg phandle specified\n");
- return PTR_ERR(info->regmap);
- }
+ if (IS_ERR(info->regmap))
+ return dev_err_probe(dev, PTR_ERR(info->regmap), "No syscfg phandle specified\n");
info->data = of_match_node(st_pctl_of_match, np)->data;
irq = platform_get_irq(pdev, 0);
if (irq > 0) {
- res = platform_get_resource_byname(pdev,
- IORESOURCE_MEM, "irqmux");
- info->irqmux_base = devm_ioremap_resource(&pdev->dev, res);
-
+ info->irqmux_base = devm_platform_ioremap_resource_byname(pdev, "irqmux");
if (IS_ERR(info->irqmux_base))
return PTR_ERR(info->irqmux_base);
irq_set_chained_handler_and_data(irq, st_gpio_irqmux_handler,
info);
-
}
pctl_desc->npins = info->nbanks * ST_GPIO_PINS_PER_BANK;
- pdesc = devm_kcalloc(&pdev->dev,
- pctl_desc->npins, sizeof(*pdesc), GFP_KERNEL);
+ pdesc = devm_kcalloc(dev, pctl_desc->npins, sizeof(*pdesc), GFP_KERNEL);
if (!pdesc)
return -ENOMEM;
@@ -1643,6 +1624,8 @@ static int st_pctl_probe_dt(struct platform_device *pdev,
for_each_child_of_node(np, child) {
if (of_property_read_bool(child, "gpio-controller")) {
const char *bank_name = NULL;
+ char **pin_names;
+
ret = st_gpiolib_register_bank(info, bank, child);
if (ret) {
of_node_put(child);
@@ -1651,10 +1634,16 @@ static int st_pctl_probe_dt(struct platform_device *pdev,
k = info->banks[bank].range.pin_base;
bank_name = info->banks[bank].range.name;
+
+ pin_names = devm_kasprintf_strarray(dev, bank_name, ST_GPIO_PINS_PER_BANK);
+ if (IS_ERR(pin_names)) {
+ of_node_put(child);
+ return PTR_ERR(pin_names);
+ }
+
for (j = 0; j < ST_GPIO_PINS_PER_BANK; j++, k++) {
pdesc->number = k;
- pdesc->name = kasprintf(GFP_KERNEL, "%s[%d]",
- bank_name, j);
+ pdesc->name = pin_names[j];
pdesc++;
}
st_parse_syscfgs(info, bank, child);
@@ -1663,7 +1652,7 @@ static int st_pctl_probe_dt(struct platform_device *pdev,
ret = st_pctl_parse_functions(child, info,
i++, &grp_index);
if (ret) {
- dev_err(&pdev->dev, "No functions found.\n");
+ dev_err(dev, "No functions found.\n");
of_node_put(child);
return ret;
}
@@ -1675,24 +1664,25 @@ static int st_pctl_probe_dt(struct platform_device *pdev,
static int st_pctl_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct st_pinctrl *info;
struct pinctrl_desc *pctl_desc;
int ret, i;
- if (!pdev->dev.of_node) {
- dev_err(&pdev->dev, "device node not found.\n");
+ if (!dev->of_node) {
+ dev_err(dev, "device node not found.\n");
return -EINVAL;
}
- pctl_desc = devm_kzalloc(&pdev->dev, sizeof(*pctl_desc), GFP_KERNEL);
+ pctl_desc = devm_kzalloc(dev, sizeof(*pctl_desc), GFP_KERNEL);
if (!pctl_desc)
return -ENOMEM;
- info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- info->dev = &pdev->dev;
+ info->dev = dev;
platform_set_drvdata(pdev, info);
ret = st_pctl_probe_dt(pdev, pctl_desc, info);
if (ret)
@@ -1702,13 +1692,11 @@ static int st_pctl_probe(struct platform_device *pdev)
pctl_desc->pctlops = &st_pctlops;
pctl_desc->pmxops = &st_pmxops;
pctl_desc->confops = &st_confops;
- pctl_desc->name = dev_name(&pdev->dev);
+ pctl_desc->name = dev_name(dev);
- info->pctl = devm_pinctrl_register(&pdev->dev, pctl_desc, info);
- if (IS_ERR(info->pctl)) {
- dev_err(&pdev->dev, "Failed pinctrl registration\n");
- return PTR_ERR(info->pctl);
- }
+ info->pctl = devm_pinctrl_register(dev, pctl_desc, info);
+ if (IS_ERR(info->pctl))
+ return dev_err_probe(dev, PTR_ERR(info->pctl), "Failed pinctrl registration\n");
for (i = 0; i < info->nbanks; i++)
pinctrl_add_gpio_range(info->pctl, &info->banks[i].range);
diff --git a/drivers/pinctrl/pinctrl-starfive.c b/drivers/pinctrl/pinctrl-starfive.c
new file mode 100644
index 000000000000..0b912152a405
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-starfive.c
@@ -0,0 +1,1354 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Pinctrl / GPIO driver for StarFive JH7100 SoC
+ *
+ * Copyright (C) 2020 Shanghai StarFive Technology Co., Ltd.
+ * Copyright (C) 2021 Emil Renner Berthing <kernel@esmil.dk>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include <dt-bindings/pinctrl/pinctrl-starfive.h>
+
+#include "core.h"
+#include "pinctrl-utils.h"
+#include "pinmux.h"
+#include "pinconf.h"
+
+#define DRIVER_NAME "pinctrl-starfive"
+
+/*
+ * Refer to Section 12. GPIO Registers in the JH7100 data sheet:
+ * https://github.com/starfive-tech/JH7100_Docs
+ */
+#define NR_GPIOS 64
+
+/*
+ * Global enable for GPIO interrupts. If bit 0 is set to 1 the GPIO interrupts
+ * are enabled. If set to 0 the GPIO interrupts are disabled.
+ */
+#define GPIOEN 0x000
+
+/*
+ * The following 32-bit registers come in pairs, but only the offset of the
+ * first register is defined. The first controls (interrupts for) GPIO 0-31 and
+ * the second GPIO 32-63.
+ */
+
+/*
+ * Interrupt Type. If set to 1 the interrupt is edge-triggered. If set to 0 the
+ * interrupt is level-triggered.
+ */
+#define GPIOIS 0x010
+
+/*
+ * Edge-Trigger Interrupt Type. If set to 1 the interrupt gets triggered on
+ * both positive and negative edges. If set to 0 the interrupt is triggered by a
+ * single edge.
+ */
+#define GPIOIBE 0x018
+
+/*
+ * Interrupt Trigger Polarity. If set to 1 the interrupt is triggered on a
+ * rising edge (edge-triggered) or high level (level-triggered). If set to 0 the
+ * interrupt is triggered on a falling edge (edge-triggered) or low level
+ * (level-triggered).
+ */
+#define GPIOIEV 0x020
+
+/*
+ * Interrupt Mask. If set to 1 the interrupt is enabled (unmasked). If set to 0
+ * the interrupt is disabled (masked). Note that the current documentation is
+ * wrong and says the exct opposite of this.
+ */
+#define GPIOIE 0x028
+
+/*
+ * Clear Edge-Triggered Interrupts. Write a 1 to clear the edge-triggered
+ * interrupt.
+ */
+#define GPIOIC 0x030
+
+/*
+ * Edge-Triggered Interrupt Status. A 1 means the configured edge was detected.
+ */
+#define GPIORIS 0x038
+
+/*
+ * Interrupt Status after Masking. A 1 means the configured edge or level was
+ * detected and not masked.
+ */
+#define GPIOMIS 0x040
+
+/*
+ * Data Value. Dynamically reflects the value of the GPIO pin. If 1 the pin is
+ * a digital 1 and if 0 the pin is a digital 0.
+ */
+#define GPIODIN 0x048
+
+/*
+ * From the data sheet section 12.2, there are 64 32-bit output data registers
+ * and 64 output enable registers. Output data and output enable registers for
+ * a given GPIO are contiguous. Eg. GPO0_DOUT_CFG is 0x50 and GPO0_DOEN_CFG is
+ * 0x54 while GPO1_DOUT_CFG is 0x58 and GPO1_DOEN_CFG is 0x5c. The stride
+ * between GPIO registers is effectively 8, thus: GPOn_DOUT_CFG is 0x50 + 8n
+ * and GPOn_DOEN_CFG is 0x54 + 8n.
+ */
+#define GPON_DOUT_CFG 0x050
+#define GPON_DOEN_CFG 0x054
+
+/*
+ * From Section 12.3, there are 75 input signal configuration registers which
+ * are 4 bytes wide starting with GPI_CPU_JTAG_TCK_CFG at 0x250 and ending with
+ * GPI_USB_OVER_CURRENT_CFG 0x378
+ */
+#define GPI_CFG_OFFSET 0x250
+
+/*
+ * Pad Control Bits. There are 16 pad control bits for each pin located in 103
+ * 32-bit registers controlling PAD_GPIO[0] to PAD_GPIO[63] followed by
+ * PAD_FUNC_SHARE[0] to PAD_FUNC_SHARE[141]. Odd numbered pins use the upper 16
+ * bit of each register.
+ */
+#define PAD_SLEW_RATE_MASK GENMASK(11, 9)
+#define PAD_SLEW_RATE_POS 9
+#define PAD_BIAS_STRONG_PULL_UP BIT(8)
+#define PAD_INPUT_ENABLE BIT(7)
+#define PAD_INPUT_SCHMITT_ENABLE BIT(6)
+#define PAD_BIAS_DISABLE BIT(5)
+#define PAD_BIAS_PULL_DOWN BIT(4)
+#define PAD_BIAS_MASK \
+ (PAD_BIAS_STRONG_PULL_UP | \
+ PAD_BIAS_DISABLE | \
+ PAD_BIAS_PULL_DOWN)
+#define PAD_DRIVE_STRENGTH_MASK GENMASK(3, 0)
+#define PAD_DRIVE_STRENGTH_POS 0
+
+/*
+ * From Section 11, the IO_PADSHARE_SEL register can be programmed to select
+ * one of seven pre-defined multiplexed signal groups on PAD_FUNC_SHARE and
+ * PAD_GPIO pads. This is a global setting.
+ */
+#define IO_PADSHARE_SEL 0x1a0
+
+/*
+ * This just needs to be some number such that when
+ * sfp->gpio.pin_base = PAD_INVALID_GPIO then
+ * starfive_pin_to_gpio(sfp, validpin) is never a valid GPIO number.
+ * That is it should underflow and return something >= NR_GPIOS.
+ */
+#define PAD_INVALID_GPIO 0x10000
+
+/*
+ * The packed pinmux values from the device tree look like this:
+ *
+ * | 31 - 24 | 23 - 16 | 15 - 8 | 7 | 6 | 5 - 0 |
+ * | dout | doen | din | dout rev | doen rev | gpio nr |
+ *
+ * ..but the GPOn_DOUT_CFG and GPOn_DOEN_CFG registers look like this:
+ *
+ * | 31 | 30 - 8 | 7 - 0 |
+ * | dout/doen rev | unused | dout/doen |
+ */
+static unsigned int starfive_pinmux_to_gpio(u32 v)
+{
+ return v & (NR_GPIOS - 1);
+}
+
+static u32 starfive_pinmux_to_dout(u32 v)
+{
+ return ((v & BIT(7)) << (31 - 7)) | ((v >> 24) & GENMASK(7, 0));
+}
+
+static u32 starfive_pinmux_to_doen(u32 v)
+{
+ return ((v & BIT(6)) << (31 - 6)) | ((v >> 16) & GENMASK(7, 0));
+}
+
+static u32 starfive_pinmux_to_din(u32 v)
+{
+ return (v >> 8) & GENMASK(7, 0);
+}
+
+/*
+ * The maximum GPIO output current depends on the chosen drive strength:
+ *
+ * DS: 0 1 2 3 4 5 6 7
+ * mA: 14.2 21.2 28.2 35.2 42.2 49.1 56.0 62.8
+ *
+ * After rounding that is 7*DS + 14 mA
+ */
+static u32 starfive_drive_strength_to_max_mA(u16 ds)
+{
+ return 7 * ds + 14;
+}
+
+static u16 starfive_drive_strength_from_max_mA(u32 i)
+{
+ return (clamp(i, 14U, 63U) - 14) / 7;
+}
+
+struct starfive_pinctrl {
+ struct gpio_chip gc;
+ struct pinctrl_gpio_range gpios;
+ raw_spinlock_t lock;
+ void __iomem *base;
+ void __iomem *padctl;
+ struct pinctrl_dev *pctl;
+};
+
+static inline unsigned int starfive_pin_to_gpio(const struct starfive_pinctrl *sfp,
+ unsigned int pin)
+{
+ return pin - sfp->gpios.pin_base;
+}
+
+static inline unsigned int starfive_gpio_to_pin(const struct starfive_pinctrl *sfp,
+ unsigned int gpio)
+{
+ return sfp->gpios.pin_base + gpio;
+}
+
+static struct starfive_pinctrl *starfive_from_irq_data(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ return container_of(gc, struct starfive_pinctrl, gc);
+}
+
+static struct starfive_pinctrl *starfive_from_irq_desc(struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+
+ return container_of(gc, struct starfive_pinctrl, gc);
+}
+
+static const struct pinctrl_pin_desc starfive_pins[] = {
+ PINCTRL_PIN(PAD_GPIO(0), "GPIO[0]"),
+ PINCTRL_PIN(PAD_GPIO(1), "GPIO[1]"),
+ PINCTRL_PIN(PAD_GPIO(2), "GPIO[2]"),
+ PINCTRL_PIN(PAD_GPIO(3), "GPIO[3]"),
+ PINCTRL_PIN(PAD_GPIO(4), "GPIO[4]"),
+ PINCTRL_PIN(PAD_GPIO(5), "GPIO[5]"),
+ PINCTRL_PIN(PAD_GPIO(6), "GPIO[6]"),
+ PINCTRL_PIN(PAD_GPIO(7), "GPIO[7]"),
+ PINCTRL_PIN(PAD_GPIO(8), "GPIO[8]"),
+ PINCTRL_PIN(PAD_GPIO(9), "GPIO[9]"),
+ PINCTRL_PIN(PAD_GPIO(10), "GPIO[10]"),
+ PINCTRL_PIN(PAD_GPIO(11), "GPIO[11]"),
+ PINCTRL_PIN(PAD_GPIO(12), "GPIO[12]"),
+ PINCTRL_PIN(PAD_GPIO(13), "GPIO[13]"),
+ PINCTRL_PIN(PAD_GPIO(14), "GPIO[14]"),
+ PINCTRL_PIN(PAD_GPIO(15), "GPIO[15]"),
+ PINCTRL_PIN(PAD_GPIO(16), "GPIO[16]"),
+ PINCTRL_PIN(PAD_GPIO(17), "GPIO[17]"),
+ PINCTRL_PIN(PAD_GPIO(18), "GPIO[18]"),
+ PINCTRL_PIN(PAD_GPIO(19), "GPIO[19]"),
+ PINCTRL_PIN(PAD_GPIO(20), "GPIO[20]"),
+ PINCTRL_PIN(PAD_GPIO(21), "GPIO[21]"),
+ PINCTRL_PIN(PAD_GPIO(22), "GPIO[22]"),
+ PINCTRL_PIN(PAD_GPIO(23), "GPIO[23]"),
+ PINCTRL_PIN(PAD_GPIO(24), "GPIO[24]"),
+ PINCTRL_PIN(PAD_GPIO(25), "GPIO[25]"),
+ PINCTRL_PIN(PAD_GPIO(26), "GPIO[26]"),
+ PINCTRL_PIN(PAD_GPIO(27), "GPIO[27]"),
+ PINCTRL_PIN(PAD_GPIO(28), "GPIO[28]"),
+ PINCTRL_PIN(PAD_GPIO(29), "GPIO[29]"),
+ PINCTRL_PIN(PAD_GPIO(30), "GPIO[30]"),
+ PINCTRL_PIN(PAD_GPIO(31), "GPIO[31]"),
+ PINCTRL_PIN(PAD_GPIO(32), "GPIO[32]"),
+ PINCTRL_PIN(PAD_GPIO(33), "GPIO[33]"),
+ PINCTRL_PIN(PAD_GPIO(34), "GPIO[34]"),
+ PINCTRL_PIN(PAD_GPIO(35), "GPIO[35]"),
+ PINCTRL_PIN(PAD_GPIO(36), "GPIO[36]"),
+ PINCTRL_PIN(PAD_GPIO(37), "GPIO[37]"),
+ PINCTRL_PIN(PAD_GPIO(38), "GPIO[38]"),
+ PINCTRL_PIN(PAD_GPIO(39), "GPIO[39]"),
+ PINCTRL_PIN(PAD_GPIO(40), "GPIO[40]"),
+ PINCTRL_PIN(PAD_GPIO(41), "GPIO[41]"),
+ PINCTRL_PIN(PAD_GPIO(42), "GPIO[42]"),
+ PINCTRL_PIN(PAD_GPIO(43), "GPIO[43]"),
+ PINCTRL_PIN(PAD_GPIO(44), "GPIO[44]"),
+ PINCTRL_PIN(PAD_GPIO(45), "GPIO[45]"),
+ PINCTRL_PIN(PAD_GPIO(46), "GPIO[46]"),
+ PINCTRL_PIN(PAD_GPIO(47), "GPIO[47]"),
+ PINCTRL_PIN(PAD_GPIO(48), "GPIO[48]"),
+ PINCTRL_PIN(PAD_GPIO(49), "GPIO[49]"),
+ PINCTRL_PIN(PAD_GPIO(50), "GPIO[50]"),
+ PINCTRL_PIN(PAD_GPIO(51), "GPIO[51]"),
+ PINCTRL_PIN(PAD_GPIO(52), "GPIO[52]"),
+ PINCTRL_PIN(PAD_GPIO(53), "GPIO[53]"),
+ PINCTRL_PIN(PAD_GPIO(54), "GPIO[54]"),
+ PINCTRL_PIN(PAD_GPIO(55), "GPIO[55]"),
+ PINCTRL_PIN(PAD_GPIO(56), "GPIO[56]"),
+ PINCTRL_PIN(PAD_GPIO(57), "GPIO[57]"),
+ PINCTRL_PIN(PAD_GPIO(58), "GPIO[58]"),
+ PINCTRL_PIN(PAD_GPIO(59), "GPIO[59]"),
+ PINCTRL_PIN(PAD_GPIO(60), "GPIO[60]"),
+ PINCTRL_PIN(PAD_GPIO(61), "GPIO[61]"),
+ PINCTRL_PIN(PAD_GPIO(62), "GPIO[62]"),
+ PINCTRL_PIN(PAD_GPIO(63), "GPIO[63]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(0), "FUNC_SHARE[0]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(1), "FUNC_SHARE[1]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(2), "FUNC_SHARE[2]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(3), "FUNC_SHARE[3]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(4), "FUNC_SHARE[4]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(5), "FUNC_SHARE[5]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(6), "FUNC_SHARE[6]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(7), "FUNC_SHARE[7]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(8), "FUNC_SHARE[8]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(9), "FUNC_SHARE[9]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(10), "FUNC_SHARE[10]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(11), "FUNC_SHARE[11]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(12), "FUNC_SHARE[12]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(13), "FUNC_SHARE[13]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(14), "FUNC_SHARE[14]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(15), "FUNC_SHARE[15]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(16), "FUNC_SHARE[16]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(17), "FUNC_SHARE[17]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(18), "FUNC_SHARE[18]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(19), "FUNC_SHARE[19]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(20), "FUNC_SHARE[20]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(21), "FUNC_SHARE[21]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(22), "FUNC_SHARE[22]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(23), "FUNC_SHARE[23]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(24), "FUNC_SHARE[24]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(25), "FUNC_SHARE[25]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(26), "FUNC_SHARE[26]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(27), "FUNC_SHARE[27]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(28), "FUNC_SHARE[28]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(29), "FUNC_SHARE[29]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(30), "FUNC_SHARE[30]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(31), "FUNC_SHARE[31]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(32), "FUNC_SHARE[32]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(33), "FUNC_SHARE[33]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(34), "FUNC_SHARE[34]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(35), "FUNC_SHARE[35]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(36), "FUNC_SHARE[36]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(37), "FUNC_SHARE[37]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(38), "FUNC_SHARE[38]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(39), "FUNC_SHARE[39]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(40), "FUNC_SHARE[40]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(41), "FUNC_SHARE[41]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(42), "FUNC_SHARE[42]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(43), "FUNC_SHARE[43]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(44), "FUNC_SHARE[44]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(45), "FUNC_SHARE[45]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(46), "FUNC_SHARE[46]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(47), "FUNC_SHARE[47]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(48), "FUNC_SHARE[48]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(49), "FUNC_SHARE[49]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(50), "FUNC_SHARE[50]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(51), "FUNC_SHARE[51]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(52), "FUNC_SHARE[52]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(53), "FUNC_SHARE[53]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(54), "FUNC_SHARE[54]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(55), "FUNC_SHARE[55]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(56), "FUNC_SHARE[56]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(57), "FUNC_SHARE[57]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(58), "FUNC_SHARE[58]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(59), "FUNC_SHARE[59]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(60), "FUNC_SHARE[60]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(61), "FUNC_SHARE[61]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(62), "FUNC_SHARE[62]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(63), "FUNC_SHARE[63]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(64), "FUNC_SHARE[64]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(65), "FUNC_SHARE[65]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(66), "FUNC_SHARE[66]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(67), "FUNC_SHARE[67]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(68), "FUNC_SHARE[68]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(69), "FUNC_SHARE[69]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(70), "FUNC_SHARE[70]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(71), "FUNC_SHARE[71]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(72), "FUNC_SHARE[72]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(73), "FUNC_SHARE[73]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(74), "FUNC_SHARE[74]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(75), "FUNC_SHARE[75]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(76), "FUNC_SHARE[76]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(77), "FUNC_SHARE[77]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(78), "FUNC_SHARE[78]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(79), "FUNC_SHARE[79]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(80), "FUNC_SHARE[80]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(81), "FUNC_SHARE[81]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(82), "FUNC_SHARE[82]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(83), "FUNC_SHARE[83]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(84), "FUNC_SHARE[84]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(85), "FUNC_SHARE[85]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(86), "FUNC_SHARE[86]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(87), "FUNC_SHARE[87]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(88), "FUNC_SHARE[88]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(89), "FUNC_SHARE[89]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(90), "FUNC_SHARE[90]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(91), "FUNC_SHARE[91]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(92), "FUNC_SHARE[92]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(93), "FUNC_SHARE[93]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(94), "FUNC_SHARE[94]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(95), "FUNC_SHARE[95]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(96), "FUNC_SHARE[96]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(97), "FUNC_SHARE[97]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(98), "FUNC_SHARE[98]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(99), "FUNC_SHARE[99]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(100), "FUNC_SHARE[100]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(101), "FUNC_SHARE[101]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(102), "FUNC_SHARE[102]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(103), "FUNC_SHARE[103]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(104), "FUNC_SHARE[104]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(105), "FUNC_SHARE[105]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(106), "FUNC_SHARE[106]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(107), "FUNC_SHARE[107]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(108), "FUNC_SHARE[108]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(109), "FUNC_SHARE[109]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(110), "FUNC_SHARE[110]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(111), "FUNC_SHARE[111]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(112), "FUNC_SHARE[112]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(113), "FUNC_SHARE[113]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(114), "FUNC_SHARE[114]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(115), "FUNC_SHARE[115]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(116), "FUNC_SHARE[116]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(117), "FUNC_SHARE[117]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(118), "FUNC_SHARE[118]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(119), "FUNC_SHARE[119]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(120), "FUNC_SHARE[120]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(121), "FUNC_SHARE[121]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(122), "FUNC_SHARE[122]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(123), "FUNC_SHARE[123]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(124), "FUNC_SHARE[124]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(125), "FUNC_SHARE[125]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(126), "FUNC_SHARE[126]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(127), "FUNC_SHARE[127]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(128), "FUNC_SHARE[128]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(129), "FUNC_SHARE[129]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(130), "FUNC_SHARE[130]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(131), "FUNC_SHARE[131]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(132), "FUNC_SHARE[132]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(133), "FUNC_SHARE[133]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(134), "FUNC_SHARE[134]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(135), "FUNC_SHARE[135]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(136), "FUNC_SHARE[136]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(137), "FUNC_SHARE[137]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(138), "FUNC_SHARE[138]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(139), "FUNC_SHARE[139]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(140), "FUNC_SHARE[140]"),
+ PINCTRL_PIN(PAD_FUNC_SHARE(141), "FUNC_SHARE[141]"),
+};
+
+#ifdef CONFIG_DEBUG_FS
+static void starfive_pin_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s,
+ unsigned int pin)
+{
+ struct starfive_pinctrl *sfp = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int gpio = starfive_pin_to_gpio(sfp, pin);
+ void __iomem *reg;
+ u32 dout, doen;
+
+ if (gpio >= NR_GPIOS)
+ return;
+
+ reg = sfp->base + GPON_DOUT_CFG + 8 * gpio;
+ dout = readl_relaxed(reg + 0x000);
+ doen = readl_relaxed(reg + 0x004);
+
+ seq_printf(s, "dout=%lu%s doen=%lu%s",
+ dout & GENMASK(7, 0), (dout & BIT(31)) ? "r" : "",
+ doen & GENMASK(7, 0), (doen & BIT(31)) ? "r" : "");
+}
+#else
+#define starfive_pin_dbg_show NULL
+#endif
+
+static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **maps,
+ unsigned int *num_maps)
+{
+ struct starfive_pinctrl *sfp = pinctrl_dev_get_drvdata(pctldev);
+ struct device *dev = sfp->gc.parent;
+ struct device_node *child;
+ struct pinctrl_map *map;
+ const char **pgnames;
+ const char *grpname;
+ u32 *pinmux;
+ int ngroups;
+ int *pins;
+ int nmaps;
+ int ret;
+
+ nmaps = 0;
+ ngroups = 0;
+ for_each_child_of_node(np, child) {
+ int npinmux = of_property_count_u32_elems(child, "pinmux");
+ int npins = of_property_count_u32_elems(child, "pins");
+
+ if (npinmux > 0 && npins > 0) {
+ dev_err(dev, "invalid pinctrl group %pOFn.%pOFn: both pinmux and pins set\n",
+ np, child);
+ of_node_put(child);
+ return -EINVAL;
+ }
+ if (npinmux == 0 && npins == 0) {
+ dev_err(dev, "invalid pinctrl group %pOFn.%pOFn: neither pinmux nor pins set\n",
+ np, child);
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ if (npinmux > 0)
+ nmaps += 2;
+ else
+ nmaps += 1;
+ ngroups += 1;
+ }
+
+ pgnames = devm_kcalloc(dev, ngroups, sizeof(*pgnames), GFP_KERNEL);
+ if (!pgnames)
+ return -ENOMEM;
+
+ map = kcalloc(nmaps, sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ nmaps = 0;
+ ngroups = 0;
+ for_each_child_of_node(np, child) {
+ int npins;
+ int i;
+
+ grpname = devm_kasprintf(dev, GFP_KERNEL, "%pOFn.%pOFn", np, child);
+ if (!grpname) {
+ ret = -ENOMEM;
+ goto put_child;
+ }
+
+ pgnames[ngroups++] = grpname;
+
+ if ((npins = of_property_count_u32_elems(child, "pinmux")) > 0) {
+ pins = devm_kcalloc(dev, npins, sizeof(*pins), GFP_KERNEL);
+ if (!pins) {
+ ret = -ENOMEM;
+ goto put_child;
+ }
+
+ pinmux = devm_kcalloc(dev, npins, sizeof(*pinmux), GFP_KERNEL);
+ if (!pinmux) {
+ ret = -ENOMEM;
+ goto put_child;
+ }
+
+ ret = of_property_read_u32_array(child, "pinmux", pinmux, npins);
+ if (ret)
+ goto put_child;
+
+ for (i = 0; i < npins; i++) {
+ unsigned int gpio = starfive_pinmux_to_gpio(pinmux[i]);
+
+ pins[i] = starfive_gpio_to_pin(sfp, gpio);
+ }
+
+ map[nmaps].type = PIN_MAP_TYPE_MUX_GROUP;
+ map[nmaps].data.mux.function = np->name;
+ map[nmaps].data.mux.group = grpname;
+ nmaps += 1;
+ } else if ((npins = of_property_count_u32_elems(child, "pins")) > 0) {
+ pins = devm_kcalloc(dev, npins, sizeof(*pins), GFP_KERNEL);
+ if (!pins) {
+ ret = -ENOMEM;
+ goto put_child;
+ }
+
+ pinmux = NULL;
+
+ for (i = 0; i < npins; i++) {
+ u32 v;
+
+ ret = of_property_read_u32_index(child, "pins", i, &v);
+ if (ret)
+ goto put_child;
+ pins[i] = v;
+ }
+ } else {
+ ret = -EINVAL;
+ goto put_child;
+ }
+
+ ret = pinctrl_generic_add_group(pctldev, grpname, pins, npins, pinmux);
+ if (ret < 0) {
+ dev_err(dev, "error adding group %s: %d\n", grpname, ret);
+ goto put_child;
+ }
+
+ ret = pinconf_generic_parse_dt_config(child, pctldev,
+ &map[nmaps].data.configs.configs,
+ &map[nmaps].data.configs.num_configs);
+ if (ret) {
+ dev_err(dev, "error parsing pin config of group %s: %d\n",
+ grpname, ret);
+ goto put_child;
+ }
+
+ /* don't create a map if there are no pinconf settings */
+ if (map[nmaps].data.configs.num_configs == 0)
+ continue;
+
+ map[nmaps].type = PIN_MAP_TYPE_CONFIGS_GROUP;
+ map[nmaps].data.configs.group_or_pin = grpname;
+ nmaps += 1;
+ }
+
+ ret = pinmux_generic_add_function(pctldev, np->name, pgnames, ngroups, NULL);
+ if (ret < 0) {
+ dev_err(dev, "error adding function %s: %d\n", np->name, ret);
+ goto free_map;
+ }
+
+ *maps = map;
+ *num_maps = nmaps;
+ return 0;
+
+put_child:
+ of_node_put(child);
+free_map:
+ pinctrl_utils_free_map(pctldev, map, nmaps);
+ return ret;
+}
+
+static const struct pinctrl_ops starfive_pinctrl_ops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .pin_dbg_show = starfive_pin_dbg_show,
+ .dt_node_to_map = starfive_dt_node_to_map,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+static int starfive_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int fsel, unsigned int gsel)
+{
+ struct starfive_pinctrl *sfp = pinctrl_dev_get_drvdata(pctldev);
+ struct device *dev = sfp->gc.parent;
+ const struct group_desc *group;
+ const u32 *pinmux;
+ unsigned int i;
+
+ group = pinctrl_generic_get_group(pctldev, gsel);
+ if (!group)
+ return -EINVAL;
+
+ pinmux = group->data;
+ for (i = 0; i < group->num_pins; i++) {
+ u32 v = pinmux[i];
+ unsigned int gpio = starfive_pinmux_to_gpio(v);
+ u32 dout = starfive_pinmux_to_dout(v);
+ u32 doen = starfive_pinmux_to_doen(v);
+ u32 din = starfive_pinmux_to_din(v);
+ void __iomem *reg_dout;
+ void __iomem *reg_doen;
+ void __iomem *reg_din;
+ unsigned long flags;
+
+ dev_dbg(dev, "GPIO%u: dout=0x%x doen=0x%x din=0x%x\n",
+ gpio, dout, doen, din);
+
+ reg_dout = sfp->base + GPON_DOUT_CFG + 8 * gpio;
+ reg_doen = sfp->base + GPON_DOEN_CFG + 8 * gpio;
+ if (din != GPI_NONE)
+ reg_din = sfp->base + GPI_CFG_OFFSET + 4 * din;
+ else
+ reg_din = NULL;
+
+ raw_spin_lock_irqsave(&sfp->lock, flags);
+ writel_relaxed(dout, reg_dout);
+ writel_relaxed(doen, reg_doen);
+ if (reg_din)
+ writel_relaxed(gpio + 2, reg_din);
+ raw_spin_unlock_irqrestore(&sfp->lock, flags);
+ }
+
+ return 0;
+}
+
+static const struct pinmux_ops starfive_pinmux_ops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = starfive_set_mux,
+ .strict = true,
+};
+
+static u16 starfive_padctl_get(struct starfive_pinctrl *sfp,
+ unsigned int pin)
+{
+ void __iomem *reg = sfp->padctl + 4 * (pin / 2);
+ int shift = 16 * (pin % 2);
+
+ return readl_relaxed(reg) >> shift;
+}
+
+static void starfive_padctl_rmw(struct starfive_pinctrl *sfp,
+ unsigned int pin,
+ u16 _mask, u16 _value)
+{
+ void __iomem *reg = sfp->padctl + 4 * (pin / 2);
+ int shift = 16 * (pin % 2);
+ u32 mask = (u32)_mask << shift;
+ u32 value = (u32)_value << shift;
+ unsigned long flags;
+
+ dev_dbg(sfp->gc.parent, "padctl_rmw(%u, 0x%03x, 0x%03x)\n", pin, _mask, _value);
+
+ raw_spin_lock_irqsave(&sfp->lock, flags);
+ value |= readl_relaxed(reg) & ~mask;
+ writel_relaxed(value, reg);
+ raw_spin_unlock_irqrestore(&sfp->lock, flags);
+}
+
+#define PIN_CONFIG_STARFIVE_STRONG_PULL_UP (PIN_CONFIG_END + 1)
+
+static const struct pinconf_generic_params starfive_pinconf_custom_params[] = {
+ { "starfive,strong-pull-up", PIN_CONFIG_STARFIVE_STRONG_PULL_UP, 1 },
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item starfive_pinconf_custom_conf_items[] = {
+ PCONFDUMP(PIN_CONFIG_STARFIVE_STRONG_PULL_UP, "input bias strong pull-up", NULL, false),
+};
+
+static_assert(ARRAY_SIZE(starfive_pinconf_custom_conf_items) ==
+ ARRAY_SIZE(starfive_pinconf_custom_params));
+#else
+#define starfive_pinconf_custom_conf_items NULL
+#endif
+
+static int starfive_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
+{
+ struct starfive_pinctrl *sfp = pinctrl_dev_get_drvdata(pctldev);
+ int param = pinconf_to_config_param(*config);
+ u16 value = starfive_padctl_get(sfp, pin);
+ bool enabled;
+ u32 arg;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ enabled = value & PAD_BIAS_DISABLE;
+ arg = 0;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ enabled = value & PAD_BIAS_PULL_DOWN;
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ enabled = !(value & PAD_BIAS_MASK);
+ arg = 1;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ enabled = value & PAD_DRIVE_STRENGTH_MASK;
+ arg = starfive_drive_strength_to_max_mA(value & PAD_DRIVE_STRENGTH_MASK);
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ enabled = value & PAD_INPUT_ENABLE;
+ arg = enabled;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ enabled = value & PAD_INPUT_SCHMITT_ENABLE;
+ arg = enabled;
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ enabled = value & PAD_SLEW_RATE_MASK;
+ arg = (value & PAD_SLEW_RATE_MASK) >> PAD_SLEW_RATE_POS;
+ break;
+ case PIN_CONFIG_STARFIVE_STRONG_PULL_UP:
+ enabled = value & PAD_BIAS_STRONG_PULL_UP;
+ arg = enabled;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+ return enabled ? 0 : -EINVAL;
+}
+
+static int starfive_pinconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned int gsel, unsigned long *config)
+{
+ const struct group_desc *group;
+
+ group = pinctrl_generic_get_group(pctldev, gsel);
+ if (!group)
+ return -EINVAL;
+
+ return starfive_pinconf_get(pctldev, group->pins[0], config);
+}
+
+static int starfive_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int gsel,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct starfive_pinctrl *sfp = pinctrl_dev_get_drvdata(pctldev);
+ const struct group_desc *group;
+ u16 mask, value;
+ int i;
+
+ group = pinctrl_generic_get_group(pctldev, gsel);
+ if (!group)
+ return -EINVAL;
+
+ mask = 0;
+ value = 0;
+ for (i = 0; i < num_configs; i++) {
+ int param = pinconf_to_config_param(configs[i]);
+ u32 arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ mask |= PAD_BIAS_MASK;
+ value = (value & ~PAD_BIAS_MASK) | PAD_BIAS_DISABLE;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (arg == 0)
+ return -ENOTSUPP;
+ mask |= PAD_BIAS_MASK;
+ value = (value & ~PAD_BIAS_MASK) | PAD_BIAS_PULL_DOWN;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (arg == 0)
+ return -ENOTSUPP;
+ mask |= PAD_BIAS_MASK;
+ value = value & ~PAD_BIAS_MASK;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ mask |= PAD_DRIVE_STRENGTH_MASK;
+ value = (value & ~PAD_DRIVE_STRENGTH_MASK) |
+ starfive_drive_strength_from_max_mA(arg);
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ mask |= PAD_INPUT_ENABLE;
+ if (arg)
+ value |= PAD_INPUT_ENABLE;
+ else
+ value &= ~PAD_INPUT_ENABLE;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ mask |= PAD_INPUT_SCHMITT_ENABLE;
+ if (arg)
+ value |= PAD_INPUT_SCHMITT_ENABLE;
+ else
+ value &= ~PAD_INPUT_SCHMITT_ENABLE;
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ mask |= PAD_SLEW_RATE_MASK;
+ value = (value & ~PAD_SLEW_RATE_MASK) |
+ ((arg << PAD_SLEW_RATE_POS) & PAD_SLEW_RATE_MASK);
+ break;
+ case PIN_CONFIG_STARFIVE_STRONG_PULL_UP:
+ if (arg) {
+ mask |= PAD_BIAS_MASK;
+ value = (value & ~PAD_BIAS_MASK) |
+ PAD_BIAS_STRONG_PULL_UP;
+ } else {
+ mask |= PAD_BIAS_STRONG_PULL_UP;
+ value = value & ~PAD_BIAS_STRONG_PULL_UP;
+ }
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+ }
+
+ for (i = 0; i < group->num_pins; i++)
+ starfive_padctl_rmw(sfp, group->pins[i], mask, value);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void starfive_pinconf_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned int pin)
+{
+ struct starfive_pinctrl *sfp = pinctrl_dev_get_drvdata(pctldev);
+ u16 value = starfive_padctl_get(sfp, pin);
+
+ seq_printf(s, " (0x%03x)", value);
+}
+#else
+#define starfive_pinconf_dbg_show NULL
+#endif
+
+static const struct pinconf_ops starfive_pinconf_ops = {
+ .pin_config_get = starfive_pinconf_get,
+ .pin_config_group_get = starfive_pinconf_group_get,
+ .pin_config_group_set = starfive_pinconf_group_set,
+ .pin_config_dbg_show = starfive_pinconf_dbg_show,
+ .is_generic = true,
+};
+
+static struct pinctrl_desc starfive_desc = {
+ .name = DRIVER_NAME,
+ .pins = starfive_pins,
+ .npins = ARRAY_SIZE(starfive_pins),
+ .pctlops = &starfive_pinctrl_ops,
+ .pmxops = &starfive_pinmux_ops,
+ .confops = &starfive_pinconf_ops,
+ .owner = THIS_MODULE,
+ .num_custom_params = ARRAY_SIZE(starfive_pinconf_custom_params),
+ .custom_params = starfive_pinconf_custom_params,
+ .custom_conf_items = starfive_pinconf_custom_conf_items,
+};
+
+static int starfive_gpio_request(struct gpio_chip *gc, unsigned int gpio)
+{
+ return pinctrl_gpio_request(gc->base + gpio);
+}
+
+static void starfive_gpio_free(struct gpio_chip *gc, unsigned int gpio)
+{
+ pinctrl_gpio_free(gc->base + gpio);
+}
+
+static int starfive_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct starfive_pinctrl *sfp = container_of(gc, struct starfive_pinctrl, gc);
+ void __iomem *doen = sfp->base + GPON_DOEN_CFG + 8 * gpio;
+
+ if (readl_relaxed(doen) == GPO_ENABLE)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int starfive_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int gpio)
+{
+ struct starfive_pinctrl *sfp = container_of(gc, struct starfive_pinctrl, gc);
+ void __iomem *doen = sfp->base + GPON_DOEN_CFG + 8 * gpio;
+ unsigned long flags;
+
+ /* enable input and schmitt trigger */
+ starfive_padctl_rmw(sfp, starfive_gpio_to_pin(sfp, gpio),
+ PAD_INPUT_ENABLE | PAD_INPUT_SCHMITT_ENABLE,
+ PAD_INPUT_ENABLE | PAD_INPUT_SCHMITT_ENABLE);
+
+ raw_spin_lock_irqsave(&sfp->lock, flags);
+ writel_relaxed(GPO_DISABLE, doen);
+ raw_spin_unlock_irqrestore(&sfp->lock, flags);
+ return 0;
+}
+
+static int starfive_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int gpio, int value)
+{
+ struct starfive_pinctrl *sfp = container_of(gc, struct starfive_pinctrl, gc);
+ void __iomem *dout = sfp->base + GPON_DOUT_CFG + 8 * gpio;
+ void __iomem *doen = sfp->base + GPON_DOEN_CFG + 8 * gpio;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&sfp->lock, flags);
+ writel_relaxed(value, dout);
+ writel_relaxed(GPO_ENABLE, doen);
+ raw_spin_unlock_irqrestore(&sfp->lock, flags);
+
+ /* disable input, schmitt trigger and bias */
+ starfive_padctl_rmw(sfp, starfive_gpio_to_pin(sfp, gpio),
+ PAD_BIAS_MASK | PAD_INPUT_ENABLE | PAD_INPUT_SCHMITT_ENABLE,
+ PAD_BIAS_DISABLE);
+
+ return 0;
+}
+
+static int starfive_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct starfive_pinctrl *sfp = container_of(gc, struct starfive_pinctrl, gc);
+ void __iomem *din = sfp->base + GPIODIN + 4 * (gpio / 32);
+
+ return !!(readl_relaxed(din) & BIT(gpio % 32));
+}
+
+static void starfive_gpio_set(struct gpio_chip *gc, unsigned int gpio,
+ int value)
+{
+ struct starfive_pinctrl *sfp = container_of(gc, struct starfive_pinctrl, gc);
+ void __iomem *dout = sfp->base + GPON_DOUT_CFG + 8 * gpio;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&sfp->lock, flags);
+ writel_relaxed(value, dout);
+ raw_spin_unlock_irqrestore(&sfp->lock, flags);
+}
+
+static int starfive_gpio_set_config(struct gpio_chip *gc, unsigned int gpio,
+ unsigned long config)
+{
+ struct starfive_pinctrl *sfp = container_of(gc, struct starfive_pinctrl, gc);
+ u32 arg = pinconf_to_config_argument(config);
+ u16 value;
+ u16 mask;
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ mask = PAD_BIAS_MASK;
+ value = PAD_BIAS_DISABLE;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (arg == 0)
+ return -ENOTSUPP;
+ mask = PAD_BIAS_MASK;
+ value = PAD_BIAS_PULL_DOWN;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (arg == 0)
+ return -ENOTSUPP;
+ mask = PAD_BIAS_MASK;
+ value = 0;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ return 0;
+ case PIN_CONFIG_INPUT_ENABLE:
+ mask = PAD_INPUT_ENABLE;
+ value = arg ? PAD_INPUT_ENABLE : 0;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ mask = PAD_INPUT_SCHMITT_ENABLE;
+ value = arg ? PAD_INPUT_SCHMITT_ENABLE : 0;
+ break;
+ default:
+ return -ENOTSUPP;
+ };
+
+ starfive_padctl_rmw(sfp, starfive_gpio_to_pin(sfp, gpio), mask, value);
+ return 0;
+}
+
+static int starfive_gpio_add_pin_ranges(struct gpio_chip *gc)
+{
+ struct starfive_pinctrl *sfp = container_of(gc, struct starfive_pinctrl, gc);
+
+ sfp->gpios.name = sfp->gc.label;
+ sfp->gpios.base = sfp->gc.base;
+ /*
+ * sfp->gpios.pin_base depends on the chosen signal group
+ * and is set in starfive_probe()
+ */
+ sfp->gpios.npins = NR_GPIOS;
+ sfp->gpios.gc = &sfp->gc;
+ pinctrl_add_gpio_range(sfp->pctl, &sfp->gpios);
+ return 0;
+}
+
+static void starfive_irq_ack(struct irq_data *d)
+{
+ struct starfive_pinctrl *sfp = starfive_from_irq_data(d);
+ irq_hw_number_t gpio = irqd_to_hwirq(d);
+ void __iomem *ic = sfp->base + GPIOIC + 4 * (gpio / 32);
+ u32 mask = BIT(gpio % 32);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&sfp->lock, flags);
+ writel_relaxed(mask, ic);
+ raw_spin_unlock_irqrestore(&sfp->lock, flags);
+}
+
+static void starfive_irq_mask(struct irq_data *d)
+{
+ struct starfive_pinctrl *sfp = starfive_from_irq_data(d);
+ irq_hw_number_t gpio = irqd_to_hwirq(d);
+ void __iomem *ie = sfp->base + GPIOIE + 4 * (gpio / 32);
+ u32 mask = BIT(gpio % 32);
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&sfp->lock, flags);
+ value = readl_relaxed(ie) & ~mask;
+ writel_relaxed(value, ie);
+ raw_spin_unlock_irqrestore(&sfp->lock, flags);
+}
+
+static void starfive_irq_mask_ack(struct irq_data *d)
+{
+ struct starfive_pinctrl *sfp = starfive_from_irq_data(d);
+ irq_hw_number_t gpio = irqd_to_hwirq(d);
+ void __iomem *ie = sfp->base + GPIOIE + 4 * (gpio / 32);
+ void __iomem *ic = sfp->base + GPIOIC + 4 * (gpio / 32);
+ u32 mask = BIT(gpio % 32);
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&sfp->lock, flags);
+ value = readl_relaxed(ie) & ~mask;
+ writel_relaxed(value, ie);
+ writel_relaxed(mask, ic);
+ raw_spin_unlock_irqrestore(&sfp->lock, flags);
+}
+
+static void starfive_irq_unmask(struct irq_data *d)
+{
+ struct starfive_pinctrl *sfp = starfive_from_irq_data(d);
+ irq_hw_number_t gpio = irqd_to_hwirq(d);
+ void __iomem *ie = sfp->base + GPIOIE + 4 * (gpio / 32);
+ u32 mask = BIT(gpio % 32);
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&sfp->lock, flags);
+ value = readl_relaxed(ie) | mask;
+ writel_relaxed(value, ie);
+ raw_spin_unlock_irqrestore(&sfp->lock, flags);
+}
+
+static int starfive_irq_set_type(struct irq_data *d, unsigned int trigger)
+{
+ struct starfive_pinctrl *sfp = starfive_from_irq_data(d);
+ irq_hw_number_t gpio = irqd_to_hwirq(d);
+ void __iomem *base = sfp->base + 4 * (gpio / 32);
+ u32 mask = BIT(gpio % 32);
+ u32 irq_type, edge_both, polarity;
+ unsigned long flags;
+
+ switch (trigger) {
+ case IRQ_TYPE_EDGE_RISING:
+ irq_type = mask; /* 1: edge triggered */
+ edge_both = 0; /* 0: single edge */
+ polarity = mask; /* 1: rising edge */
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ irq_type = mask; /* 1: edge triggered */
+ edge_both = 0; /* 0: single edge */
+ polarity = 0; /* 0: falling edge */
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ irq_type = mask; /* 1: edge triggered */
+ edge_both = mask; /* 1: both edges */
+ polarity = 0; /* 0: ignored */
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_type = 0; /* 0: level triggered */
+ edge_both = 0; /* 0: ignored */
+ polarity = mask; /* 1: high level */
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_type = 0; /* 0: level triggered */
+ edge_both = 0; /* 0: ignored */
+ polarity = 0; /* 0: low level */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (trigger & IRQ_TYPE_EDGE_BOTH)
+ irq_set_handler_locked(d, handle_edge_irq);
+ else
+ irq_set_handler_locked(d, handle_level_irq);
+
+ raw_spin_lock_irqsave(&sfp->lock, flags);
+ irq_type |= readl_relaxed(base + GPIOIS) & ~mask;
+ writel_relaxed(irq_type, base + GPIOIS);
+ edge_both |= readl_relaxed(base + GPIOIBE) & ~mask;
+ writel_relaxed(edge_both, base + GPIOIBE);
+ polarity |= readl_relaxed(base + GPIOIEV) & ~mask;
+ writel_relaxed(polarity, base + GPIOIEV);
+ raw_spin_unlock_irqrestore(&sfp->lock, flags);
+ return 0;
+}
+
+static struct irq_chip starfive_irq_chip = {
+ .irq_ack = starfive_irq_ack,
+ .irq_mask = starfive_irq_mask,
+ .irq_mask_ack = starfive_irq_mask_ack,
+ .irq_unmask = starfive_irq_unmask,
+ .irq_set_type = starfive_irq_set_type,
+ .flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
+static void starfive_gpio_irq_handler(struct irq_desc *desc)
+{
+ struct starfive_pinctrl *sfp = starfive_from_irq_desc(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long mis;
+ unsigned int pin;
+
+ chained_irq_enter(chip, desc);
+
+ mis = readl_relaxed(sfp->base + GPIOMIS + 0);
+ for_each_set_bit(pin, &mis, 32)
+ generic_handle_domain_irq(sfp->gc.irq.domain, pin);
+
+ mis = readl_relaxed(sfp->base + GPIOMIS + 4);
+ for_each_set_bit(pin, &mis, 32)
+ generic_handle_domain_irq(sfp->gc.irq.domain, pin + 32);
+
+ chained_irq_exit(chip, desc);
+}
+
+static int starfive_gpio_init_hw(struct gpio_chip *gc)
+{
+ struct starfive_pinctrl *sfp = container_of(gc, struct starfive_pinctrl, gc);
+
+ /* mask all GPIO interrupts */
+ writel(0, sfp->base + GPIOIE + 0);
+ writel(0, sfp->base + GPIOIE + 4);
+ /* clear edge interrupt flags */
+ writel(~0U, sfp->base + GPIOIC + 0);
+ writel(~0U, sfp->base + GPIOIC + 4);
+ /* enable GPIO interrupts */
+ writel(1, sfp->base + GPIOEN);
+ return 0;
+}
+
+static void starfive_disable_clock(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static int starfive_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct starfive_pinctrl *sfp;
+ struct reset_control *rst;
+ struct clk *clk;
+ u32 value;
+ int ret;
+
+ sfp = devm_kzalloc(dev, sizeof(*sfp), GFP_KERNEL);
+ if (!sfp)
+ return -ENOMEM;
+
+ sfp->base = devm_platform_ioremap_resource_byname(pdev, "gpio");
+ if (IS_ERR(sfp->base))
+ return PTR_ERR(sfp->base);
+
+ sfp->padctl = devm_platform_ioremap_resource_byname(pdev, "padctl");
+ if (IS_ERR(sfp->padctl))
+ return PTR_ERR(sfp->padctl);
+
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "could not get clock\n");
+
+ rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(rst))
+ return dev_err_probe(dev, PTR_ERR(rst), "could not get reset\n");
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return dev_err_probe(dev, ret, "could not enable clock\n");
+
+ ret = devm_add_action_or_reset(dev, starfive_disable_clock, clk);
+ if (ret)
+ return ret;
+
+ /*
+ * We don't want to assert reset and risk undoing pin muxing for the
+ * early boot serial console, but let's make sure the reset line is
+ * deasserted in case someone runs a really minimal bootloader.
+ */
+ ret = reset_control_deassert(rst);
+ if (ret)
+ return dev_err_probe(dev, ret, "could not deassert reset\n");
+
+ platform_set_drvdata(pdev, sfp);
+ sfp->gc.parent = dev;
+ raw_spin_lock_init(&sfp->lock);
+
+ ret = devm_pinctrl_register_and_init(dev, &starfive_desc, sfp, &sfp->pctl);
+ if (ret)
+ return dev_err_probe(dev, ret, "could not register pinctrl driver\n");
+
+ if (!of_property_read_u32(dev->of_node, "starfive,signal-group", &value)) {
+ if (value > 6)
+ return dev_err_probe(dev, -EINVAL, "invalid signal group %u\n", value);
+ writel(value, sfp->padctl + IO_PADSHARE_SEL);
+ }
+
+ value = readl(sfp->padctl + IO_PADSHARE_SEL);
+ switch (value) {
+ case 0:
+ sfp->gpios.pin_base = PAD_INVALID_GPIO;
+ goto out_pinctrl_enable;
+ case 1:
+ sfp->gpios.pin_base = PAD_GPIO(0);
+ break;
+ case 2:
+ sfp->gpios.pin_base = PAD_FUNC_SHARE(72);
+ break;
+ case 3:
+ sfp->gpios.pin_base = PAD_FUNC_SHARE(70);
+ break;
+ case 4: case 5: case 6:
+ sfp->gpios.pin_base = PAD_FUNC_SHARE(0);
+ break;
+ default:
+ return dev_err_probe(dev, -EINVAL, "invalid signal group %u\n", value);
+ }
+
+ sfp->gc.label = dev_name(dev);
+ sfp->gc.owner = THIS_MODULE;
+ sfp->gc.request = starfive_gpio_request;
+ sfp->gc.free = starfive_gpio_free;
+ sfp->gc.get_direction = starfive_gpio_get_direction;
+ sfp->gc.direction_input = starfive_gpio_direction_input;
+ sfp->gc.direction_output = starfive_gpio_direction_output;
+ sfp->gc.get = starfive_gpio_get;
+ sfp->gc.set = starfive_gpio_set;
+ sfp->gc.set_config = starfive_gpio_set_config;
+ sfp->gc.add_pin_ranges = starfive_gpio_add_pin_ranges;
+ sfp->gc.base = -1;
+ sfp->gc.ngpio = NR_GPIOS;
+
+ starfive_irq_chip.parent_device = dev;
+ starfive_irq_chip.name = sfp->gc.label;
+
+ sfp->gc.irq.chip = &starfive_irq_chip;
+ sfp->gc.irq.parent_handler = starfive_gpio_irq_handler;
+ sfp->gc.irq.num_parents = 1;
+ sfp->gc.irq.parents = devm_kcalloc(dev, sfp->gc.irq.num_parents,
+ sizeof(*sfp->gc.irq.parents), GFP_KERNEL);
+ if (!sfp->gc.irq.parents)
+ return -ENOMEM;
+ sfp->gc.irq.default_type = IRQ_TYPE_NONE;
+ sfp->gc.irq.handler = handle_bad_irq;
+ sfp->gc.irq.init_hw = starfive_gpio_init_hw;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ sfp->gc.irq.parents[0] = ret;
+
+ ret = devm_gpiochip_add_data(dev, &sfp->gc, sfp);
+ if (ret)
+ return dev_err_probe(dev, ret, "could not register gpiochip\n");
+
+out_pinctrl_enable:
+ return pinctrl_enable(sfp->pctl);
+}
+
+static const struct of_device_id starfive_of_match[] = {
+ { .compatible = "starfive,jh7100-pinctrl" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, starfive_of_match);
+
+static struct platform_driver starfive_pinctrl_driver = {
+ .probe = starfive_probe,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = starfive_of_match,
+ },
+};
+module_platform_driver(starfive_pinctrl_driver);
+
+MODULE_DESCRIPTION("Pinctrl driver for StarFive SoCs");
+MODULE_AUTHOR("Emil Renner Berthing <kernel@esmil.dk>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index 5fa2488fae87..ab4dde40d3ed 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -675,7 +675,6 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
pctl->gpio_chip.base = -1;
pctl->gpio_chip.ngpio = pctl->pctl_desc.npins;
pctl->gpio_chip.can_sleep = true;
- pctl->gpio_chip.of_node = np;
pctl->irq_chip.name = dev_name(pctl->dev);
pctl->irq_chip.irq_mask = stmfx_pinctrl_irq_mask;
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
index 484a3b9e875c..a87ea3b95cf4 100644
--- a/drivers/pinctrl/pinctrl-sx150x.c
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -1163,9 +1163,6 @@ static int sx150x_probe(struct i2c_client *client,
pctl->gpio.set = sx150x_gpio_set;
pctl->gpio.set_config = gpiochip_generic_config;
pctl->gpio.parent = dev;
-#ifdef CONFIG_OF_GPIO
- pctl->gpio.of_node = dev->of_node;
-#endif
pctl->gpio.can_sleep = true;
pctl->gpio.label = devm_kstrdup(dev, client->name, GFP_KERNEL);
if (!pctl->gpio.label)
diff --git a/drivers/pinctrl/pinctrl-thunderbay.c b/drivers/pinctrl/pinctrl-thunderbay.c
new file mode 100644
index 000000000000..b5b47f4dd774
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-thunderbay.c
@@ -0,0 +1,1322 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Thunder Bay SOC pinctrl/GPIO driver
+ *
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "core.h"
+#include "pinconf.h"
+#include "pinctrl-utils.h"
+#include "pinmux.h"
+
+/* Bit 0:2 and 4:6 should be used for mode selection */
+#define THB_GPIO_PINMUX_MODE_0 0x00
+#define THB_GPIO_PINMUX_MODE_1 0x11
+#define THB_GPIO_PINMUX_MODE_2 0x22
+#define THB_GPIO_PINMUX_MODE_3 0x33
+#define THB_GPIO_PINMUX_MODE_4 0x44
+
+#define THB_GPIO_PORT_SELECT_MASK BIT(8)
+#define THB_GPIO_PAD_DIRECTION_MASK BIT(10)
+#define THB_GPIO_SPU_MASK BIT(11)
+#define THB_GPIO_PULL_ENABLE_MASK BIT(12)
+#define THB_GPIO_PULL_UP_MASK BIT(13)
+#define THB_GPIO_PULL_DOWN_MASK BIT(14)
+#define THB_GPIO_ENAQ_MASK BIT(15)
+/* bit 16-19: Drive Strength for the Pad */
+#define THB_GPIO_DRIVE_STRENGTH_MASK (0xF0000)
+#define THB_GPIO_SLEW_RATE_MASK BIT(20)
+#define THB_GPIO_SCHMITT_TRIGGER_MASK BIT(21)
+
+#define THB_GPIO_REG_OFFSET(pin_num) ((pin_num) * (0x4))
+#define THB_MAX_MODE_SUPPORTED (5u)
+#define THB_MAX_NPINS_SUPPORTED (67u)
+
+/* store Pin status */
+static u32 thb_pinx_status[THB_MAX_NPINS_SUPPORTED];
+
+struct thunderbay_mux_desc {
+ u8 mode;
+ const char *name;
+};
+
+#define THUNDERBAY_PIN_DESC(pin_number, pin_name, ...) { \
+ .number = pin_number, \
+ .name = pin_name, \
+ .drv_data = &(struct thunderbay_mux_desc[]) { \
+ __VA_ARGS__, { } }, \
+}
+
+#define THUNDERBAY_MUX(pin_mode, pin_function) { \
+ .mode = pin_mode, \
+ .name = pin_function, \
+}
+
+struct thunderbay_pin_soc {
+ const struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+};
+
+/**
+ * struct thunderbay_pinctrl - Intel Thunderbay pinctrl structure
+ * @pctrl: Pointer to the pin controller device
+ * @base0: First register base address
+ * @dev: Pointer to the device structure
+ * @chip: GPIO chip used by this pin controller
+ * @soc: Pin control configuration data based on SoC
+ * @ngroups: Number of pin groups available
+ * @nfuncs: Number of pin functions available
+ */
+struct thunderbay_pinctrl {
+ struct pinctrl_dev *pctrl;
+ void __iomem *base0;
+ struct device *dev;
+ struct gpio_chip chip;
+ const struct thunderbay_pin_soc *soc;
+ unsigned int ngroups;
+ unsigned int nfuncs;
+};
+
+static const struct pinctrl_pin_desc thunderbay_pins[] = {
+ THUNDERBAY_PIN_DESC(0, "GPIO0",
+ THUNDERBAY_MUX(0X0, "I2C0_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(1, "GPIO1",
+ THUNDERBAY_MUX(0X0, "I2C0_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(2, "GPIO2",
+ THUNDERBAY_MUX(0X0, "I2C1_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(3, "GPIO3",
+ THUNDERBAY_MUX(0X0, "I2C1_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(4, "GPIO4",
+ THUNDERBAY_MUX(0X0, "I2C2_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(5, "GPIO5",
+ THUNDERBAY_MUX(0X0, "I2C2_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(6, "GPIO6",
+ THUNDERBAY_MUX(0X0, "I2C3_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(7, "GPIO7",
+ THUNDERBAY_MUX(0X0, "I2C3_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(8, "GPIO8",
+ THUNDERBAY_MUX(0X0, "I2C4_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(9, "GPIO9",
+ THUNDERBAY_MUX(0X0, "I2C4_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(10, "GPIO10",
+ THUNDERBAY_MUX(0X0, "UART0_M0"),
+ THUNDERBAY_MUX(0X1, "RT0_DSU_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(11, "GPIO11",
+ THUNDERBAY_MUX(0X0, "UART0_M0"),
+ THUNDERBAY_MUX(0X1, "RT0_DSU_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(12, "GPIO12",
+ THUNDERBAY_MUX(0X0, "UART0_M0"),
+ THUNDERBAY_MUX(0X1, "RT1_DSU_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(13, "GPIO13",
+ THUNDERBAY_MUX(0X0, "UART0_M0"),
+ THUNDERBAY_MUX(0X1, "RT1_DSU_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(14, "GPIO14",
+ THUNDERBAY_MUX(0X0, "UART1_M0"),
+ THUNDERBAY_MUX(0X1, "RT2_DSU_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "TRIGGER_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(15, "GPIO15",
+ THUNDERBAY_MUX(0X0, "UART1_M0"),
+ THUNDERBAY_MUX(0X1, "RT2_DSU_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "TRIGGER_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(16, "GPIO16",
+ THUNDERBAY_MUX(0X0, "UART1_M0"),
+ THUNDERBAY_MUX(0X1, "RT3_DSU_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(17, "GPIO17",
+ THUNDERBAY_MUX(0X0, "UART1_M0"),
+ THUNDERBAY_MUX(0X1, "RT3_DSU_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(18, "GPIO18",
+ THUNDERBAY_MUX(0X0, "SPI0_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(19, "GPIO19",
+ THUNDERBAY_MUX(0X0, "SPI0_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(20, "GPIO20",
+ THUNDERBAY_MUX(0X0, "SPI0_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_TRACE_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(21, "GPIO21",
+ THUNDERBAY_MUX(0X0, "SPI0_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_TRACE_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(22, "GPIO22",
+ THUNDERBAY_MUX(0X0, "SPI1_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M0"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(23, "GPIO23",
+ THUNDERBAY_MUX(0X0, "SPI1_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(24, "GPIO24",
+ THUNDERBAY_MUX(0X0, "SPI1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_TRACE_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(25, "GPIO25",
+ THUNDERBAY_MUX(0X0, "SPI1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_TRACE_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(26, "GPIO26",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(27, "GPIO27",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(28, "GPIO28",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(29, "GPIO29",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(30, "GPIO30",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(31, "GPIO31",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(32, "GPIO32",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(33, "GPIO33",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(34, "GPIO34",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DIG_VIEW_0"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(35, "GPIO35",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DIG_VIEW_1"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(36, "GPIO36",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "CPR_IO_OUT_CLK_0"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(37, "GPIO37",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "CPR_IO_OUT_CLK_1"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(38, "GPIO38",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "CPR_IO_OUT_CLK_2"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(39, "GPIO39",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "CPR_IO_OUT_CLK_3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(40, "GPIO40",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(41, "GPIO41",
+ THUNDERBAY_MUX(0X0, "POWER_INTERRUPT_MAX_PLATFORM_POWER_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(42, "GPIO42",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(43, "GPIO43",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(44, "GPIO44",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(45, "GPIO45",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(46, "GPIO46",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(47, "GPIO47",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(48, "GPIO48",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(49, "GPIO49",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(50, "GPIO50",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DIG_VIEW_0"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(51, "GPIO51",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DIG_VIEW_1"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(52, "GPIO52",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "CPR_IO_OUT_CLK_0"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(53, "GPIO53",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "CPR_IO_OUT_CLK_1"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(54, "GPIO54",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "CPR_IO_OUT_CLK_2"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(55, "GPIO55",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "CPR_IO_OUT_CLK_3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(56, "GPIO56",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "POWER_INTERRUPT_ICCMAX_VDDD_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(57, "GPIO57",
+ THUNDERBAY_MUX(0X0, "POWER_INTERRUPT_ICCMAX_VPU_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(58, "GPIO58",
+ THUNDERBAY_MUX(0X0, "THERMTRIP_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(59, "GPIO59",
+ THUNDERBAY_MUX(0X0, "THERMTRIP_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(60, "GPIO60",
+ THUNDERBAY_MUX(0X0, "SMBUS_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(61, "GPIO61",
+ THUNDERBAY_MUX(0X0, "SMBUS_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "POWER_INTERRUPT_ICCMAX_VDDD_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(62, "GPIO62",
+ THUNDERBAY_MUX(0X0, "PLATFORM_RESET_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(63, "GPIO63",
+ THUNDERBAY_MUX(0X0, "PLATFORM_RESET_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(64, "GPIO64",
+ THUNDERBAY_MUX(0X0, "PLATFORM_SHUTDOWN_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(65, "GPIO65",
+ THUNDERBAY_MUX(0X0, "PLATFORM_SHUTDOWN_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(66, "GPIO66",
+ THUNDERBAY_MUX(0X0, "POWER_INTERRUPT_ICCMAX_MEDIA_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+};
+
+static const struct thunderbay_pin_soc thunderbay_data = {
+ .pins = thunderbay_pins,
+ .npins = ARRAY_SIZE(thunderbay_pins),
+};
+
+static u32 thb_gpio_read_reg(struct gpio_chip *chip, unsigned int pinnr)
+{
+ struct thunderbay_pinctrl *tpc = gpiochip_get_data(chip);
+
+ return readl(tpc->base0 + THB_GPIO_REG_OFFSET(pinnr));
+}
+
+static u32 thb_gpio_write_reg(struct gpio_chip *chip, unsigned int pinnr, u32 value)
+{
+ struct thunderbay_pinctrl *tpc = gpiochip_get_data(chip);
+
+ writel(value, (tpc->base0 + THB_GPIO_REG_OFFSET(pinnr)));
+ return 0;
+}
+
+static int thb_read_gpio_data(struct gpio_chip *chip, unsigned int offset, unsigned int pad_dir)
+{
+ int data_offset;
+ u32 data_reg;
+
+ /* as per GPIO Spec = pad_dir 0:input, 1:output */
+ data_offset = 0x2000u + (offset / 32);
+ if (!pad_dir)
+ data_offset += 4;
+ data_reg = thb_gpio_read_reg(chip, data_offset);
+
+ return data_reg & BIT(offset % 32);
+}
+
+static int thb_write_gpio_data(struct gpio_chip *chip, unsigned int offset, unsigned int value)
+{
+ int data_offset;
+ u32 data_reg;
+
+ data_offset = 0x2000u + (offset / 32);
+
+ data_reg = thb_gpio_read_reg(chip, data_offset);
+
+ if (value > 0)
+ data_reg |= BIT(offset % 32);
+ else
+ data_reg &= ~BIT(offset % 32);
+
+ return thb_gpio_write_reg(chip, data_offset, data_reg);
+}
+
+static int thunderbay_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ u32 reg = thb_gpio_read_reg(chip, offset);
+
+ /* Return direction only if configured as GPIO else negative error */
+ if (reg & THB_GPIO_PORT_SELECT_MASK)
+ return !(reg & THB_GPIO_PAD_DIRECTION_MASK);
+ return -EINVAL;
+}
+
+static int thunderbay_gpio_set_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ u32 reg = thb_gpio_read_reg(chip, offset);
+
+ /* set pin as input only if it is GPIO else error */
+ if (reg & THB_GPIO_PORT_SELECT_MASK) {
+ reg &= (~THB_GPIO_PAD_DIRECTION_MASK);
+ thb_gpio_write_reg(chip, offset, reg);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void thunderbay_gpio_set_value(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ u32 reg = thb_gpio_read_reg(chip, offset);
+
+ /* update pin value only if it is GPIO-output else error */
+ if ((reg & THB_GPIO_PORT_SELECT_MASK) && (reg & THB_GPIO_PAD_DIRECTION_MASK))
+ thb_write_gpio_data(chip, offset, value);
+}
+
+static int thunderbay_gpio_set_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ u32 reg = thb_gpio_read_reg(chip, offset);
+
+ /* set pin as output only if it is GPIO else error */
+ if (reg & THB_GPIO_PORT_SELECT_MASK) {
+ reg |= THB_GPIO_PAD_DIRECTION_MASK;
+ thb_gpio_write_reg(chip, offset, reg);
+ thunderbay_gpio_set_value(chip, offset, value);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int thunderbay_gpio_get_value(struct gpio_chip *chip, unsigned int offset)
+{
+ u32 reg = thb_gpio_read_reg(chip, offset);
+ int gpio_dir = 0;
+
+ /* Read pin value only if it is GPIO else error */
+ if (reg & THB_GPIO_PORT_SELECT_MASK) {
+ /* 0=in, 1=out */
+ gpio_dir = (reg & THB_GPIO_PAD_DIRECTION_MASK) > 0;
+
+ /* Returns negative value when pin is configured as PORT */
+ return thb_read_gpio_data(chip, offset, gpio_dir);
+ }
+ return -EINVAL;
+}
+
+static int thunderbay_gpiochip_probe(struct thunderbay_pinctrl *tpc)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ int ret;
+
+ chip->label = dev_name(tpc->dev);
+ chip->parent = tpc->dev;
+ chip->request = gpiochip_generic_request;
+ chip->free = gpiochip_generic_free;
+ chip->get_direction = thunderbay_gpio_get_direction;
+ chip->direction_input = thunderbay_gpio_set_direction_input;
+ chip->direction_output = thunderbay_gpio_set_direction_output;
+ chip->get = thunderbay_gpio_get_value;
+ chip->set = thunderbay_gpio_set_value;
+ chip->set_config = gpiochip_generic_config;
+ /* identifies the first GPIO number handled by this chip; or,
+ * if negative during registration, requests dynamic ID allocation.
+ * Please pass -1 as base to let gpiolib select the chip base in all possible cases.
+ * We want to get rid of the static GPIO number space in the long run.
+ */
+ chip->base = -1;
+ /* Number of GPIOs handled by this controller; the last GPIO handled is (base + ngpio - 1)*/
+ chip->ngpio = THB_MAX_NPINS_SUPPORTED;
+
+ /* Register/add Thunder Bay GPIO chip with Linux framework */
+ ret = gpiochip_add_data(chip, tpc);
+ if (ret)
+ dev_err(tpc->dev, "Failed to add gpiochip\n");
+ return ret;
+}
+
+static int thunderbay_request_gpio(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct thunderbay_pinctrl *tpc = pinctrl_dev_get_drvdata(pctldev);
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg = 0;
+
+ if (thb_pinx_status[pin] == 0u) {
+ reg = thb_gpio_read_reg(chip, pin);
+ /* Updates PIN configuration as GPIO and sets GPIO to MODE-4*/
+ reg |= (THB_GPIO_PORT_SELECT_MASK | THB_GPIO_PINMUX_MODE_4);
+ thb_gpio_write_reg(chip, pin, reg);
+
+ /* update pin status as busy */
+ thb_pinx_status[pin] = 1u;
+
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void thunderbay_free_gpio(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct thunderbay_pinctrl *tpc = pinctrl_dev_get_drvdata(pctldev);
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg = 0;
+
+ if (thb_pinx_status[pin] == 1u) {
+ reg = thb_gpio_read_reg(chip, pin);
+
+ /* Updates PIN configuration from GPIO to PORT */
+ reg &= (~THB_GPIO_PORT_SELECT_MASK);
+
+ /* Change Port/gpio mode to default mode-0 */
+ reg &= (~THB_GPIO_PINMUX_MODE_4);
+
+ thb_gpio_write_reg(chip, pin, reg);
+
+ /* update pin status as free */
+ thb_pinx_status[pin] = 0u;
+ }
+}
+
+static int thb_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int func_select, unsigned int group_select)
+{
+ struct thunderbay_pinctrl *tpc = pinctrl_dev_get_drvdata(pctldev);
+ struct gpio_chip *chip = &tpc->chip;
+ struct function_desc *function;
+ unsigned int i, pin_mode;
+ struct group_desc *group;
+ int ret = -EINVAL;
+ u32 reg = 0u;
+
+ group = pinctrl_generic_get_group(pctldev, group_select);
+ if (!group)
+ return -EINVAL;
+
+ function = pinmux_generic_get_function(pctldev, func_select);
+ if (!function)
+ return -EINVAL;
+
+ pin_mode = *(unsigned int *)(function->data);
+
+ /* Change modes for pins in the selected group */
+ for (i = 0; i < group->num_pins; i++) {
+ reg = thb_gpio_read_reg(chip, group->pins[i]);
+
+ switch (pin_mode) {
+ case 0u:
+ reg |= THB_GPIO_PINMUX_MODE_0;
+ break;
+ case 1u:
+ reg |= THB_GPIO_PINMUX_MODE_1;
+ break;
+ case 2u:
+ reg |= THB_GPIO_PINMUX_MODE_2;
+ break;
+ case 3u:
+ reg |= THB_GPIO_PINMUX_MODE_3;
+ break;
+ case 4u:
+ reg |= THB_GPIO_PINMUX_MODE_4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = thb_gpio_write_reg(chip, group->pins[i], reg);
+ if (~ret) {
+ /* update pin status as busy */
+ thb_pinx_status[group->pins[i]] = 1u;
+ }
+ }
+ return ret;
+}
+
+static int thunderbay_build_groups(struct thunderbay_pinctrl *tpc)
+{
+ struct group_desc *thunderbay_groups;
+ int i;
+
+ tpc->ngroups = tpc->soc->npins;
+ thunderbay_groups = devm_kcalloc(tpc->dev, tpc->ngroups,
+ sizeof(*thunderbay_groups), GFP_KERNEL);
+ if (!thunderbay_groups)
+ return -ENOMEM;
+
+ for (i = 0; i < tpc->ngroups; i++) {
+ struct group_desc *group = thunderbay_groups + i;
+ const struct pinctrl_pin_desc *pin_info = thunderbay_pins + i;
+
+ group->name = pin_info->name;
+ group->pins = (int *)&pin_info->number;
+ pinctrl_generic_add_group(tpc->pctrl, group->name,
+ group->pins, 1, NULL);
+ }
+ return 0;
+}
+
+static int thunderbay_add_functions(struct thunderbay_pinctrl *tpc, struct function_desc *funcs)
+{
+ struct function_desc *function = funcs;
+ int i;
+
+ /* Assign the groups for each function */
+ for (i = 0; i < tpc->soc->npins; i++) {
+ const struct pinctrl_pin_desc *pin_info = thunderbay_pins + i;
+ struct thunderbay_mux_desc *pin_mux = pin_info->drv_data;
+
+ while (pin_mux->name) {
+ const char **grp;
+ int j, grp_num, match = 0;
+ size_t grp_size;
+ struct function_desc *func;
+
+ for (j = 0; j < tpc->nfuncs; j++) {
+ if (!strcmp(pin_mux->name, function[j].name)) {
+ match = 1;
+ break;
+ }
+ }
+
+ if (!match)
+ return -EINVAL;
+
+ func = function + j;
+ grp_num = func->num_group_names;
+ grp_size = sizeof(*func->group_names);
+
+ if (!func->group_names) {
+ func->group_names = devm_kcalloc(tpc->dev,
+ grp_num,
+ grp_size,
+ GFP_KERNEL);
+ if (!func->group_names) {
+ kfree(func);
+ return -ENOMEM;
+ }
+ }
+
+ grp = func->group_names;
+ while (*grp)
+ grp++;
+
+ *grp = pin_info->name;
+ pin_mux++;
+ }
+ }
+
+ /* Add all functions */
+ for (i = 0; i < tpc->nfuncs; i++) {
+ pinmux_generic_add_function(tpc->pctrl,
+ function[i].name,
+ function[i].group_names,
+ function[i].num_group_names,
+ function[i].data);
+ }
+ kfree(function);
+ return 0;
+}
+
+static int thunderbay_build_functions(struct thunderbay_pinctrl *tpc)
+{
+ struct function_desc *thunderbay_funcs;
+ void *ptr;
+ int pin;
+
+ /* Total number of functions is unknown at this point. Allocate first. */
+ tpc->nfuncs = 0;
+ thunderbay_funcs = kcalloc(tpc->soc->npins * 8,
+ sizeof(*thunderbay_funcs), GFP_KERNEL);
+ if (!thunderbay_funcs)
+ return -ENOMEM;
+
+ /* Find total number of functions and each's properties */
+ for (pin = 0; pin < tpc->soc->npins; pin++) {
+ const struct pinctrl_pin_desc *pin_info = thunderbay_pins + pin;
+ struct thunderbay_mux_desc *pin_mux = pin_info->drv_data;
+
+ while (pin_mux->name) {
+ struct function_desc *func = thunderbay_funcs;
+
+ while (func->name) {
+ if (!strcmp(pin_mux->name, func->name)) {
+ func->num_group_names++;
+ break;
+ }
+ func++;
+ }
+
+ if (!func->name) {
+ func->name = pin_mux->name;
+ func->num_group_names = 1;
+ func->data = (int *)&pin_mux->mode;
+ tpc->nfuncs++;
+ }
+
+ pin_mux++;
+ }
+ }
+
+ /* Reallocate memory based on actual number of functions */
+ ptr = krealloc(thunderbay_funcs,
+ tpc->nfuncs * sizeof(*thunderbay_funcs), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ thunderbay_funcs = ptr;
+ return thunderbay_add_functions(tpc, thunderbay_funcs);
+}
+
+static int thunderbay_pinconf_set_tristate(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ if (config > 0)
+ reg |= THB_GPIO_ENAQ_MASK;
+ else
+ reg &= ~THB_GPIO_ENAQ_MASK;
+
+ return thb_gpio_write_reg(chip, pin, reg);
+}
+
+static int thunderbay_pinconf_get_tristate(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 *config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ *config = (reg & THB_GPIO_ENAQ_MASK) > 0;
+
+ return 0;
+}
+
+static int thunderbay_pinconf_set_pulldown(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ if (config > 0)
+ reg |= THB_GPIO_PULL_DOWN_MASK;
+ else
+ reg &= ~THB_GPIO_PULL_DOWN_MASK;
+
+ return thb_gpio_write_reg(chip, pin, reg);
+}
+
+static int thunderbay_pinconf_get_pulldown(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 *config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg = 0;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ *config = ((reg & THB_GPIO_PULL_DOWN_MASK) > 0) ? 1 : 0;
+
+ return 0;
+}
+
+static int thunderbay_pinconf_set_pullup(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ if (config > 0)
+ reg &= ~THB_GPIO_PULL_UP_MASK;
+ else
+ reg |= THB_GPIO_PULL_UP_MASK;
+
+ return thb_gpio_write_reg(chip, pin, reg);
+}
+
+static int thunderbay_pinconf_get_pullup(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 *config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ *config = ((reg & THB_GPIO_PULL_UP_MASK) == 0) ? 1 : 0;
+
+ return 0;
+}
+
+static int thunderbay_pinconf_set_opendrain(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ if (config > 0)
+ reg &= ~THB_GPIO_PULL_ENABLE_MASK;
+ else
+ reg |= THB_GPIO_PULL_ENABLE_MASK;
+
+ return thb_gpio_write_reg(chip, pin, reg);
+}
+
+static int thunderbay_pinconf_get_opendrain(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 *config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ *config = ((reg & THB_GPIO_PULL_ENABLE_MASK) == 0) ? 1 : 0;
+
+ return 0;
+}
+
+static int thunderbay_pinconf_set_pushpull(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ if (config > 0)
+ reg |= THB_GPIO_PULL_ENABLE_MASK;
+ else
+ reg &= ~THB_GPIO_PULL_ENABLE_MASK;
+
+ return thb_gpio_write_reg(chip, pin, reg);
+}
+
+static int thunderbay_pinconf_get_pushpull(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 *config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ *config = ((reg & THB_GPIO_PULL_ENABLE_MASK) > 0) ? 1 : 0;
+
+ return 0;
+}
+
+static int thunderbay_pinconf_set_drivestrength(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+
+ /* Drive Strength: 0x0 to 0xF */
+ if (config <= 0xF) {
+ reg = (reg | config);
+ return thb_gpio_write_reg(chip, pin, reg);
+ }
+
+ return -EINVAL;
+}
+
+static int thunderbay_pinconf_get_drivestrength(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 *config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ reg = (reg & THB_GPIO_DRIVE_STRENGTH_MASK) >> 16;
+ *config = (reg > 0) ? reg : 0;
+
+ return 0;
+}
+
+static int thunderbay_pinconf_set_schmitt(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ if (config > 0)
+ reg |= THB_GPIO_SCHMITT_TRIGGER_MASK;
+ else
+ reg &= ~THB_GPIO_SCHMITT_TRIGGER_MASK;
+
+ return thb_gpio_write_reg(chip, pin, reg);
+}
+
+static int thunderbay_pinconf_get_schmitt(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 *config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ *config = ((reg & THB_GPIO_SCHMITT_TRIGGER_MASK) > 0) ? 1 : 0;
+
+ return 0;
+}
+
+static int thunderbay_pinconf_set_slew_rate(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg = 0;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ if (config > 0)
+ reg |= THB_GPIO_SLEW_RATE_MASK;
+ else
+ reg &= ~THB_GPIO_SLEW_RATE_MASK;
+
+ return thb_gpio_write_reg(chip, pin, reg);
+}
+
+static int thunderbay_pinconf_get_slew_rate(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 *config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ *config = ((reg & THB_GPIO_SLEW_RATE_MASK) > 0) ? 1 : 0;
+
+ return 0;
+}
+
+static int thunderbay_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct thunderbay_pinctrl *tpc = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ u32 arg;
+ int ret;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ ret = thunderbay_pinconf_get_tristate(tpc, pin, &arg);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = thunderbay_pinconf_get_pulldown(tpc, pin, &arg);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = thunderbay_pinconf_get_pullup(tpc, pin, &arg);
+ break;
+
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ ret = thunderbay_pinconf_get_opendrain(tpc, pin, &arg);
+ break;
+
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ ret = thunderbay_pinconf_get_pushpull(tpc, pin, &arg);
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ ret = thunderbay_pinconf_get_drivestrength(tpc, pin, &arg);
+ break;
+
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ ret = thunderbay_pinconf_get_schmitt(tpc, pin, &arg);
+ break;
+
+ case PIN_CONFIG_SLEW_RATE:
+ ret = thunderbay_pinconf_get_slew_rate(tpc, pin, &arg);
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return ret;
+}
+
+static int thunderbay_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct thunderbay_pinctrl *tpc = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param;
+ unsigned int pinconf;
+ int ret = 0;
+ u32 arg;
+
+ for (pinconf = 0; pinconf < num_configs; pinconf++) {
+ param = pinconf_to_config_param(configs[pinconf]);
+ arg = pinconf_to_config_argument(configs[pinconf]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ ret = thunderbay_pinconf_set_tristate(tpc, pin, arg);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = thunderbay_pinconf_set_pulldown(tpc, pin, arg);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = thunderbay_pinconf_set_pullup(tpc, pin, arg);
+ break;
+
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ ret = thunderbay_pinconf_set_opendrain(tpc, pin, arg);
+ break;
+
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ ret = thunderbay_pinconf_set_pushpull(tpc, pin, arg);
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ ret = thunderbay_pinconf_set_drivestrength(tpc, pin, arg);
+ break;
+
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ ret = thunderbay_pinconf_set_schmitt(tpc, pin, arg);
+ break;
+
+ case PIN_CONFIG_SLEW_RATE:
+ ret = thunderbay_pinconf_set_slew_rate(tpc, pin, arg);
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+ }
+ return ret;
+}
+
+static const struct pinctrl_ops thunderbay_pctlops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static const struct pinmux_ops thunderbay_pmxops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = thb_pinctrl_set_mux,
+ .gpio_request_enable = thunderbay_request_gpio,
+ .gpio_disable_free = thunderbay_free_gpio,
+};
+
+static const struct pinconf_ops thunderbay_confops = {
+ .is_generic = true,
+ .pin_config_get = thunderbay_pinconf_get,
+ .pin_config_set = thunderbay_pinconf_set,
+};
+
+static struct pinctrl_desc thunderbay_pinctrl_desc = {
+ .name = "thunderbay-pinmux",
+ .pctlops = &thunderbay_pctlops,
+ .pmxops = &thunderbay_pmxops,
+ .confops = &thunderbay_confops,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id thunderbay_pinctrl_match[] = {
+ {
+ .compatible = "intel,thunderbay-pinctrl",
+ .data = &thunderbay_data
+ },
+ {}
+};
+
+static int thunderbay_pinctrl_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id;
+ struct device *dev = &pdev->dev;
+ struct thunderbay_pinctrl *tpc;
+ struct resource *iomem;
+ int ret;
+
+ of_id = of_match_node(thunderbay_pinctrl_match, pdev->dev.of_node);
+ if (!of_id)
+ return -ENODEV;
+
+ tpc = devm_kzalloc(dev, sizeof(*tpc), GFP_KERNEL);
+ if (!tpc)
+ return -ENOMEM;
+
+ tpc->dev = dev;
+ tpc->soc = of_id->data;
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem)
+ return -ENXIO;
+
+ tpc->base0 = devm_ioremap_resource(dev, iomem);
+ if (IS_ERR(tpc->base0))
+ return PTR_ERR(tpc->base0);
+
+ thunderbay_pinctrl_desc.pins = tpc->soc->pins;
+ thunderbay_pinctrl_desc.npins = tpc->soc->npins;
+
+ /* Register pinctrl */
+ tpc->pctrl = devm_pinctrl_register(dev, &thunderbay_pinctrl_desc, tpc);
+ if (IS_ERR(tpc->pctrl))
+ return PTR_ERR(tpc->pctrl);
+
+ /* Setup pinmux groups */
+ ret = thunderbay_build_groups(tpc);
+ if (ret)
+ return ret;
+
+ /* Setup pinmux functions */
+ ret = thunderbay_build_functions(tpc);
+ if (ret)
+ return ret;
+
+ /* Setup GPIO */
+ ret = thunderbay_gpiochip_probe(tpc);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, tpc);
+
+ return 0;
+}
+
+static int thunderbay_pinctrl_remove(struct platform_device *pdev)
+{
+ /* thunderbay_pinctrl_remove function to clear the assigned memory */
+ return 0;
+}
+
+static struct platform_driver thunderbay_pinctrl_driver = {
+ .driver = {
+ .name = "thunderbay-pinctrl",
+ .of_match_table = thunderbay_pinctrl_match,
+ },
+ .probe = thunderbay_pinctrl_probe,
+ .remove = thunderbay_pinctrl_remove,
+};
+
+builtin_platform_driver(thunderbay_pinctrl_driver);
+
+MODULE_AUTHOR("Lakshmi Sowjanya D <lakshmi.sowjanya.d@intel.com>");
+MODULE_AUTHOR("Kiran Kumar S <kiran.kumar1.s@intel.com>");
+MODULE_DESCRIPTION("Intel Thunder Bay Pinctrl/GPIO Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index 5e3f31b55eb7..3a03beb8a755 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -1763,7 +1763,6 @@ static int pinmux_xway_probe(struct platform_device *pdev)
/* register the gpio chip */
xway_chip.parent = &pdev->dev;
xway_chip.owner = THIS_MODULE;
- xway_chip.of_node = pdev->dev.of_node;
ret = devm_gpiochip_add_data(&pdev->dev, &xway_chip, NULL);
if (ret) {
dev_err(&pdev->dev, "Failed to register gpio chip\n");
diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c
index e14012209992..42da6bd399ee 100644
--- a/drivers/pinctrl/pinctrl-zynqmp.c
+++ b/drivers/pinctrl/pinctrl-zynqmp.c
@@ -809,6 +809,7 @@ static int zynqmp_pinctrl_prepare_pin_desc(struct device *dev,
unsigned int *npins)
{
struct pinctrl_pin_desc *pins, *pin;
+ char **pin_names;
int ret;
int i;
@@ -820,13 +821,14 @@ static int zynqmp_pinctrl_prepare_pin_desc(struct device *dev,
if (!pins)
return -ENOMEM;
+ pin_names = devm_kasprintf_strarray(dev, ZYNQMP_PIN_PREFIX, *npins);
+ if (IS_ERR(pin_names))
+ return PTR_ERR(pin_names);
+
for (i = 0; i < *npins; i++) {
pin = &pins[i];
pin->number = i;
- pin->name = devm_kasprintf(dev, GFP_KERNEL, "%s%d",
- ZYNQMP_PIN_PREFIX, i);
- if (!pin->name)
- return -ENOMEM;
+ pin->name = pin_names[i];
}
*zynqmp_pins = pins;
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 6cdbd9ccf2f0..f94d43b082d9 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -875,7 +875,7 @@ EXPORT_SYMBOL_GPL(pinmux_generic_get_function);
*/
int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
const char *name,
- const char **groups,
+ const char * const *groups,
const unsigned int num_groups,
void *data)
{
diff --git a/drivers/pinctrl/pinmux.h b/drivers/pinctrl/pinmux.h
index 78c3a31be882..72fcf03eaa43 100644
--- a/drivers/pinctrl/pinmux.h
+++ b/drivers/pinctrl/pinmux.h
@@ -129,7 +129,7 @@ static inline void pinmux_init_device_debugfs(struct dentry *devroot,
*/
struct function_desc {
const char *name;
- const char **group_names;
+ const char * const *group_names;
int num_group_names;
void *data;
};
@@ -150,7 +150,7 @@ struct function_desc *pinmux_generic_get_function(struct pinctrl_dev *pctldev,
int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
const char *name,
- const char **groups,
+ const char * const *groups,
unsigned const num_groups,
void *data);
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 3e0c00766f59..ca6f68a061a8 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -302,6 +302,15 @@ config PINCTRL_SM6350
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SM6350 platform.
+config PINCTRL_SDX65
+ tristate "Qualcomm Technologies Inc SDX65 pin controller driver"
+ depends on GPIOLIB && OF
+ depends on PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SDX65 platform.
+
config PINCTRL_SM8150
tristate "Qualcomm Technologies Inc SM8150 pin controller driver"
depends on OF
@@ -328,6 +337,15 @@ config PINCTRL_SM8350
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SM8350 platform.
+config PINCTRL_SM8450
+ tristate "Qualcomm Technologies Inc SM8450 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SM8450 platform.
+
config PINCTRL_LPASS_LPI
tristate "Qualcomm Technologies Inc LPASS LPI pin controller driver"
select PINMUX
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 49b509080745..709882f54d25 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -35,7 +35,9 @@ obj-$(CONFIG_PINCTRL_SDX55) += pinctrl-sdx55.o
obj-$(CONFIG_PINCTRL_SM6115) += pinctrl-sm6115.o
obj-$(CONFIG_PINCTRL_SM6125) += pinctrl-sm6125.o
obj-$(CONFIG_PINCTRL_SM6350) += pinctrl-sm6350.o
+obj-$(CONFIG_PINCTRL_SDX65) += pinctrl-sdx65.o
obj-$(CONFIG_PINCTRL_SM8150) += pinctrl-sm8150.o
obj-$(CONFIG_PINCTRL_SM8250) += pinctrl-sm8250.o
obj-$(CONFIG_PINCTRL_SM8350) += pinctrl-sm8350.o
+obj-$(CONFIG_PINCTRL_SM8450) += pinctrl-sm8450.o
obj-$(CONFIG_PINCTRL_LPASS_LPI) += pinctrl-lpass-lpi.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 8476a8ac4451..780878dede9e 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -185,6 +185,7 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned int irq = irq_find_mapping(gc->irq.domain, group);
struct irq_data *d = irq_get_irq_data(irq);
unsigned int gpio_func = pctrl->soc->gpio_func;
+ unsigned int egpio_func = pctrl->soc->egpio_func;
const struct msm_pingroup *g;
unsigned long flags;
u32 val, mask;
@@ -218,8 +219,18 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
raw_spin_lock_irqsave(&pctrl->lock, flags);
val = msm_readl_ctl(pctrl, g);
- val &= ~mask;
- val |= i << g->mux_bit;
+
+ if (egpio_func && i == egpio_func) {
+ if (val & BIT(g->egpio_present))
+ val &= ~BIT(g->egpio_enable);
+ } else {
+ val &= ~mask;
+ val |= i << g->mux_bit;
+ /* Claim ownership of pin if egpio capable */
+ if (egpio_func && val & BIT(g->egpio_present))
+ val |= BIT(g->egpio_enable);
+ }
+
msm_writel_ctl(val, pctrl, g);
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
@@ -1253,7 +1264,6 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
chip->label = dev_name(pctrl->dev);
chip->parent = pctrl->dev;
chip->owner = THIS_MODULE;
- chip->of_node = pctrl->dev->of_node;
if (msm_gpio_needs_valid_mask(pctrl))
chip->init_valid_mask = msm_gpio_init_valid_mask;
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index e31a5167c91e..dd0d949f7a9e 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -77,6 +77,8 @@ struct msm_pingroup {
unsigned drv_bit:5;
unsigned od_bit:5;
+ unsigned egpio_enable:5;
+ unsigned egpio_present:5;
unsigned oe_bit:5;
unsigned in_bit:5;
unsigned out_bit:5;
@@ -119,6 +121,13 @@ struct msm_gpio_wakeirq_map {
* to be aware that their parent can't handle dual
* edge interrupts.
* @gpio_func: Which function number is GPIO (usually 0).
+ * @egpio_func: If non-zero then this SoC supports eGPIO. Even though in
+ * hardware this is a mux 1-level above the TLMM, we'll treat
+ * it as if this is just another mux state of the TLMM. Since
+ * it doesn't really map to hardware, we'll allocate a virtual
+ * function number for eGPIO and any time we see that function
+ * number used we'll treat it as a request to mux away from
+ * our TLMM towards another owner.
*/
struct msm_pinctrl_soc_data {
const struct pinctrl_pin_desc *pins;
@@ -136,6 +145,7 @@ struct msm_pinctrl_soc_data {
unsigned int nwakeirq_map;
bool wakeirq_dual_edge_errata;
unsigned int gpio_func;
+ unsigned int egpio_func;
};
extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280.c b/drivers/pinctrl/qcom/pinctrl-sc7280.c
index 9017ede409c9..31df55c79cb3 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7280.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7280.c
@@ -43,6 +43,8 @@
.mux_bit = 2, \
.pull_bit = 0, \
.drv_bit = 6, \
+ .egpio_enable = 12, \
+ .egpio_present = 11, \
.oe_bit = 9, \
.in_bit = 0, \
.out_bit = 1, \
@@ -520,6 +522,7 @@ enum sc7280_functions {
msm_mux_dp_lcd,
msm_mux_edp_hot,
msm_mux_edp_lcd,
+ msm_mux_egpio,
msm_mux_gcc_gp1,
msm_mux_gcc_gp2,
msm_mux_gcc_gp3,
@@ -658,6 +661,14 @@ static const char * const gpio_groups[] = {
"gpio165", "gpio166", "gpio167", "gpio168", "gpio169", "gpio170",
"gpio171", "gpio172", "gpio173", "gpio174",
};
+static const char * const egpio_groups[] = {
+ "gpio144", "gpio145", "gpio146", "gpio147", "gpio148", "gpio149",
+ "gpio150", "gpio151", "gpio152", "gpio153", "gpio154", "gpio155",
+ "gpio156", "gpio157", "gpio158", "gpio159", "gpio160", "gpio161",
+ "gpio162", "gpio163", "gpio164", "gpio165", "gpio166", "gpio167",
+ "gpio168", "gpio169", "gpio170", "gpio171", "gpio172", "gpio173",
+ "gpio174",
+};
static const char * const atest_char_groups[] = {
"gpio81",
};
@@ -1150,6 +1161,7 @@ static const struct msm_function sc7280_functions[] = {
FUNCTION(dp_lcd),
FUNCTION(edp_hot),
FUNCTION(edp_lcd),
+ FUNCTION(egpio),
FUNCTION(gcc_gp1),
FUNCTION(gcc_gp2),
FUNCTION(gcc_gp3),
@@ -1408,37 +1420,37 @@ static const struct msm_pingroup sc7280_groups[] = {
[141] = PINGROUP(141, _, _, _, _, _, _, _, _, _),
[142] = PINGROUP(142, _, _, _, _, _, _, _, _, _),
[143] = PINGROUP(143, _, _, _, _, _, _, _, _, _),
- [144] = PINGROUP(144, _, _, _, _, _, _, _, _, _),
- [145] = PINGROUP(145, _, _, _, _, _, _, _, _, _),
- [146] = PINGROUP(146, _, _, _, _, _, _, _, _, _),
- [147] = PINGROUP(147, _, _, _, _, _, _, _, _, _),
- [148] = PINGROUP(148, _, _, _, _, _, _, _, _, _),
- [149] = PINGROUP(149, _, _, _, _, _, _, _, _, _),
- [150] = PINGROUP(150, qdss, _, _, _, _, _, _, _, _),
- [151] = PINGROUP(151, qdss, _, _, _, _, _, _, _, _),
- [152] = PINGROUP(152, qdss, _, _, _, _, _, _, _, _),
- [153] = PINGROUP(153, qdss, _, _, _, _, _, _, _, _),
- [154] = PINGROUP(154, _, _, _, _, _, _, _, _, _),
- [155] = PINGROUP(155, _, _, _, _, _, _, _, _, _),
- [156] = PINGROUP(156, qdss_cti, _, _, _, _, _, _, _, _),
- [157] = PINGROUP(157, qdss_cti, _, _, _, _, _, _, _, _),
- [158] = PINGROUP(158, _, _, _, _, _, _, _, _, _),
- [159] = PINGROUP(159, _, _, _, _, _, _, _, _, _),
- [160] = PINGROUP(160, _, _, _, _, _, _, _, _, _),
- [161] = PINGROUP(161, _, _, _, _, _, _, _, _, _),
- [162] = PINGROUP(162, _, _, _, _, _, _, _, _, _),
- [163] = PINGROUP(163, _, _, _, _, _, _, _, _, _),
- [164] = PINGROUP(164, _, _, _, _, _, _, _, _, _),
- [165] = PINGROUP(165, qdss_cti, _, _, _, _, _, _, _, _),
- [166] = PINGROUP(166, qdss_cti, _, _, _, _, _, _, _, _),
- [167] = PINGROUP(167, _, _, _, _, _, _, _, _, _),
- [168] = PINGROUP(168, _, _, _, _, _, _, _, _, _),
- [169] = PINGROUP(169, _, _, _, _, _, _, _, _, _),
- [170] = PINGROUP(170, _, _, _, _, _, _, _, _, _),
- [171] = PINGROUP(171, qdss, _, _, _, _, _, _, _, _),
- [172] = PINGROUP(172, qdss, _, _, _, _, _, _, _, _),
- [173] = PINGROUP(173, qdss, _, _, _, _, _, _, _, _),
- [174] = PINGROUP(174, qdss, _, _, _, _, _, _, _, _),
+ [144] = PINGROUP(144, _, _, _, _, _, _, _, _, egpio),
+ [145] = PINGROUP(145, _, _, _, _, _, _, _, _, egpio),
+ [146] = PINGROUP(146, _, _, _, _, _, _, _, _, egpio),
+ [147] = PINGROUP(147, _, _, _, _, _, _, _, _, egpio),
+ [148] = PINGROUP(148, _, _, _, _, _, _, _, _, egpio),
+ [149] = PINGROUP(149, _, _, _, _, _, _, _, _, egpio),
+ [150] = PINGROUP(150, qdss, _, _, _, _, _, _, _, egpio),
+ [151] = PINGROUP(151, qdss, _, _, _, _, _, _, _, egpio),
+ [152] = PINGROUP(152, qdss, _, _, _, _, _, _, _, egpio),
+ [153] = PINGROUP(153, qdss, _, _, _, _, _, _, _, egpio),
+ [154] = PINGROUP(154, _, _, _, _, _, _, _, _, egpio),
+ [155] = PINGROUP(155, _, _, _, _, _, _, _, _, egpio),
+ [156] = PINGROUP(156, qdss_cti, _, _, _, _, _, _, _, egpio),
+ [157] = PINGROUP(157, qdss_cti, _, _, _, _, _, _, _, egpio),
+ [158] = PINGROUP(158, _, _, _, _, _, _, _, _, egpio),
+ [159] = PINGROUP(159, _, _, _, _, _, _, _, _, egpio),
+ [160] = PINGROUP(160, _, _, _, _, _, _, _, _, egpio),
+ [161] = PINGROUP(161, _, _, _, _, _, _, _, _, egpio),
+ [162] = PINGROUP(162, _, _, _, _, _, _, _, _, egpio),
+ [163] = PINGROUP(163, _, _, _, _, _, _, _, _, egpio),
+ [164] = PINGROUP(164, _, _, _, _, _, _, _, _, egpio),
+ [165] = PINGROUP(165, qdss_cti, _, _, _, _, _, _, _, egpio),
+ [166] = PINGROUP(166, qdss_cti, _, _, _, _, _, _, _, egpio),
+ [167] = PINGROUP(167, _, _, _, _, _, _, _, _, egpio),
+ [168] = PINGROUP(168, _, _, _, _, _, _, _, _, egpio),
+ [169] = PINGROUP(169, _, _, _, _, _, _, _, _, egpio),
+ [170] = PINGROUP(170, _, _, _, _, _, _, _, _, egpio),
+ [171] = PINGROUP(171, qdss, _, _, _, _, _, _, _, egpio),
+ [172] = PINGROUP(172, qdss, _, _, _, _, _, _, _, egpio),
+ [173] = PINGROUP(173, qdss, _, _, _, _, _, _, _, egpio),
+ [174] = PINGROUP(174, qdss, _, _, _, _, _, _, _, egpio),
[175] = UFS_RESET(ufs_reset, 0xbe000),
[176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0xb3004, 0, 6),
[177] = SDC_QDSD_PINGROUP(sdc1_clk, 0xb3000, 13, 6),
@@ -1481,6 +1493,7 @@ static const struct msm_pinctrl_soc_data sc7280_pinctrl = {
.ngpios = 176,
.wakeirq_map = sc7280_pdc_map,
.nwakeirq_map = ARRAY_SIZE(sc7280_pdc_map),
+ .egpio_func = 9,
};
static int sc7280_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx65.c b/drivers/pinctrl/qcom/pinctrl-sdx65.c
new file mode 100644
index 000000000000..e793ea713965
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sdx65.c
@@ -0,0 +1,967 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_BASE 0x0
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_BASE + REG_SIZE * id, \
+ .io_reg = REG_BASE + 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = REG_BASE + 0x8 + REG_SIZE * id, \
+ .intr_status_reg = REG_BASE + 0xc + REG_SIZE * id, \
+ .intr_target_reg = REG_BASE + 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, offset) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = offset, \
+ .io_reg = offset + 0x4, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+static const struct pinctrl_pin_desc sdx65_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "UFS_RESET"),
+ PINCTRL_PIN(109, "SDC1_RCLK"),
+ PINCTRL_PIN(110, "SDC1_CLK"),
+ PINCTRL_PIN(111, "SDC1_CMD"),
+ PINCTRL_PIN(112, "SDC1_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+
+static const unsigned int ufs_reset_pins[] = { 108 };
+static const unsigned int sdc1_rclk_pins[] = { 109 };
+static const unsigned int sdc1_clk_pins[] = { 110 };
+static const unsigned int sdc1_cmd_pins[] = { 111 };
+static const unsigned int sdc1_data_pins[] = { 112 };
+
+enum sdx65_functions {
+ msm_mux_qlink0_wmss,
+ msm_mux_adsp_ext,
+ msm_mux_atest_char,
+ msm_mux_atest_char0,
+ msm_mux_atest_char1,
+ msm_mux_atest_char2,
+ msm_mux_atest_char3,
+ msm_mux_audio_ref,
+ msm_mux_bimc_dte0,
+ msm_mux_bimc_dte1,
+ msm_mux_blsp_i2c1,
+ msm_mux_blsp_i2c2,
+ msm_mux_blsp_i2c3,
+ msm_mux_blsp_i2c4,
+ msm_mux_blsp_spi1,
+ msm_mux_blsp_spi2,
+ msm_mux_blsp_spi3,
+ msm_mux_blsp_spi4,
+ msm_mux_blsp_uart1,
+ msm_mux_blsp_uart2,
+ msm_mux_blsp_uart3,
+ msm_mux_blsp_uart4,
+ msm_mux_char_exec,
+ msm_mux_coex_uart,
+ msm_mux_coex_uart2,
+ msm_mux_cri_trng,
+ msm_mux_cri_trng0,
+ msm_mux_cri_trng1,
+ msm_mux_dbg_out,
+ msm_mux_ddr_bist,
+ msm_mux_ddr_pxi0,
+ msm_mux_ebi0_wrcdc,
+ msm_mux_ebi2_a,
+ msm_mux_ebi2_lcd,
+ msm_mux_ext_dbg,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_gcc_plltest,
+ msm_mux_gpio,
+ msm_mux_i2s_mclk,
+ msm_mux_jitter_bist,
+ msm_mux_ldo_en,
+ msm_mux_ldo_update,
+ msm_mux_m_voc,
+ msm_mux_mgpi_clk,
+ msm_mux_native_char,
+ msm_mux_native_tsens,
+ msm_mux_native_tsense,
+ msm_mux_nav_gpio,
+ msm_mux_pa_indicator,
+ msm_mux_pci_e,
+ msm_mux_pcie_clkreq,
+ msm_mux_pll_bist,
+ msm_mux_pll_ref,
+ msm_mux_pri_mi2s,
+ msm_mux_pri_mi2s_ws,
+ msm_mux_prng_rosc,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_gpio,
+ msm_mux_qlink0_en,
+ msm_mux_qlink0_req,
+ msm_mux_qlink1_en,
+ msm_mux_qlink1_req,
+ msm_mux_qlink1_wmss,
+ msm_mux_qlink2_en,
+ msm_mux_qlink2_req,
+ msm_mux_qlink2_wmss,
+ msm_mux_sdc1_tb,
+ msm_mux_sec_mi2s,
+ msm_mux_spmi_coex,
+ msm_mux_spmi_vgi,
+ msm_mux_tgu_ch0,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_data,
+ msm_mux_uim2_present,
+ msm_mux_uim2_reset,
+ msm_mux_usb2phy_ac,
+ msm_mux_vsense_trigger,
+ msm_mux__,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107",
+};
+static const char * const uim2_data_groups[] = {
+ "gpio0",
+};
+static const char * const blsp_uart1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio48", "gpio49", "gpio80",
+ "gpio81",
+};
+static const char * const ebi0_wrcdc_groups[] = {
+ "gpio0", "gpio2",
+};
+static const char * const uim2_present_groups[] = {
+ "gpio1",
+};
+static const char * const uim2_reset_groups[] = {
+ "gpio2",
+};
+static const char * const blsp_i2c1_groups[] = {
+ "gpio2", "gpio3", "gpio82", "gpio83",
+};
+static const char * const uim2_clk_groups[] = {
+ "gpio3",
+};
+static const char * const blsp_spi2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7", "gpio23", "gpio47", "gpio62",
+};
+static const char * const blsp_uart2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7", "gpio63", "gpio64", "gpio65",
+ "gpio66",
+};
+static const char * const blsp_i2c2_groups[] = {
+ "gpio6", "gpio7", "gpio65", "gpio66",
+};
+static const char * const char_exec_groups[] = {
+ "gpio6", "gpio7",
+};
+static const char * const qdss_gpio_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7", "gpio12", "gpio13",
+ "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19",
+ "gpio33", "gpio42", "gpio63", "gpio64", "gpio65", "gpio66",
+};
+static const char * const blsp_spi3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio23", "gpio47", "gpio62",
+};
+static const char * const blsp_uart3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const ext_dbg_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const ldo_en_groups[] = {
+ "gpio8",
+};
+static const char * const blsp_i2c3_groups[] = {
+ "gpio10", "gpio11",
+};
+static const char * const gcc_gp3_groups[] = {
+ "gpio11",
+};
+static const char * const pri_mi2s_ws_groups[] = {
+ "gpio12",
+};
+static const char * const pri_mi2s_groups[] = {
+ "gpio13", "gpio14", "gpio15",
+};
+static const char * const vsense_trigger_groups[] = {
+ "gpio13",
+};
+static const char * const native_tsens_groups[] = {
+ "gpio14",
+};
+static const char * const bimc_dte0_groups[] = {
+ "gpio14", "gpio59",
+};
+static const char * const bimc_dte1_groups[] = {
+ "gpio15", "gpio61",
+};
+static const char * const sec_mi2s_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+static const char * const blsp_spi4_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19", "gpio23", "gpio47", "gpio62",
+};
+static const char * const blsp_uart4_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19", "gpio22", "gpio23", "gpio48",
+ "gpio49",
+};
+static const char * const qdss_cti_groups[] = {
+ "gpio16", "gpio16", "gpio17", "gpio17", "gpio54", "gpio54", "gpio55",
+ "gpio55", "gpio59", "gpio60", "gpio65", "gpio65", "gpio66", "gpio66",
+ "gpio94", "gpio94", "gpio95", "gpio95",
+};
+static const char * const blsp_i2c4_groups[] = {
+ "gpio18", "gpio19", "gpio84", "gpio85",
+};
+static const char * const gcc_gp1_groups[] = {
+ "gpio18",
+};
+static const char * const jitter_bist_groups[] = {
+ "gpio19",
+};
+static const char * const gcc_gp2_groups[] = {
+ "gpio19",
+};
+static const char * const pll_bist_groups[] = {
+ "gpio22",
+};
+static const char * const blsp_spi1_groups[] = {
+ "gpio23", "gpio47", "gpio62", "gpio80", "gpio81", "gpio82", "gpio83",
+};
+static const char * const adsp_ext_groups[] = {
+ "gpio24", "gpio25",
+};
+static const char * const qlink0_wmss_groups[] = {
+ "gpio28",
+};
+static const char * const native_tsense_groups[] = {
+ "gpio29", "gpio72",
+};
+static const char * const nav_gpio_groups[] = {
+ "gpio31", "gpio32",
+};
+static const char * const pll_ref_groups[] = {
+ "gpio32",
+};
+static const char * const pa_indicator_groups[] = {
+ "gpio33",
+};
+static const char * const qlink0_en_groups[] = {
+ "gpio34",
+};
+static const char * const qlink0_req_groups[] = {
+ "gpio35",
+};
+static const char * const dbg_out_groups[] = {
+ "gpio35",
+};
+static const char * const cri_trng_groups[] = {
+ "gpio36",
+};
+static const char * const prng_rosc_groups[] = {
+ "gpio38",
+};
+static const char * const cri_trng0_groups[] = {
+ "gpio40",
+};
+static const char * const cri_trng1_groups[] = {
+ "gpio41",
+};
+static const char * const coex_uart_groups[] = {
+ "gpio44", "gpio45",
+};
+static const char * const ddr_pxi0_groups[] = {
+ "gpio45", "gpio46",
+};
+static const char * const m_voc_groups[] = {
+ "gpio46", "gpio48", "gpio49", "gpio59", "gpio60",
+};
+static const char * const ddr_bist_groups[] = {
+ "gpio46", "gpio47", "gpio48", "gpio49",
+};
+static const char * const pci_e_groups[] = {
+ "gpio53",
+};
+static const char * const tgu_ch0_groups[] = {
+ "gpio55",
+};
+static const char * const pcie_clkreq_groups[] = {
+ "gpio56",
+};
+static const char * const native_char_groups[] = {
+ "gpio26", "gpio29", "gpio33", "gpio42", "gpio57",
+};
+static const char * const mgpi_clk_groups[] = {
+ "gpio61", "gpio71",
+};
+static const char * const qlink2_wmss_groups[] = {
+ "gpio61",
+};
+static const char * const i2s_mclk_groups[] = {
+ "gpio62",
+};
+static const char * const audio_ref_groups[] = {
+ "gpio62",
+};
+static const char * const ldo_update_groups[] = {
+ "gpio62",
+};
+static const char * const atest_char_groups[] = {
+ "gpio63",
+};
+static const char * const atest_char3_groups[] = {
+ "gpio64",
+};
+static const char * const atest_char2_groups[] = {
+ "gpio65",
+};
+static const char * const atest_char1_groups[] = {
+ "gpio66",
+};
+static const char * const uim1_data_groups[] = {
+ "gpio67",
+};
+static const char * const atest_char0_groups[] = {
+ "gpio67",
+};
+static const char * const uim1_present_groups[] = {
+ "gpio68",
+};
+static const char * const uim1_reset_groups[] = {
+ "gpio69",
+};
+static const char * const uim1_clk_groups[] = {
+ "gpio70",
+};
+static const char * const qlink2_en_groups[] = {
+ "gpio71",
+};
+static const char * const qlink1_en_groups[] = {
+ "gpio72",
+};
+static const char * const qlink1_req_groups[] = {
+ "gpio73",
+};
+static const char * const qlink1_wmss_groups[] = {
+ "gpio74",
+};
+static const char * const coex_uart2_groups[] = {
+ "gpio75", "gpio76", "gpio102", "gpio103",
+};
+static const char * const spmi_coex_groups[] = {
+ "gpio75", "gpio76",
+};
+static const char * const qlink2_req_groups[] = {
+ "gpio77",
+};
+static const char * const spmi_vgi_groups[] = {
+ "gpio78", "gpio79",
+};
+static const char * const gcc_plltest_groups[] = {
+ "gpio81", "gpio82",
+};
+static const char * const ebi2_lcd_groups[] = {
+ "gpio84", "gpio85", "gpio90",
+};
+static const char * const ebi2_a_groups[] = {
+ "gpio89",
+};
+static const char * const usb2phy_ac_groups[] = {
+ "gpio93",
+};
+static const char * const sdc1_tb_groups[] = {
+ "gpio106",
+};
+
+static const struct msm_function sdx65_functions[] = {
+ FUNCTION(qlink0_wmss),
+ FUNCTION(adsp_ext),
+ FUNCTION(atest_char),
+ FUNCTION(atest_char0),
+ FUNCTION(atest_char1),
+ FUNCTION(atest_char2),
+ FUNCTION(atest_char3),
+ FUNCTION(audio_ref),
+ FUNCTION(bimc_dte0),
+ FUNCTION(bimc_dte1),
+ FUNCTION(blsp_i2c1),
+ FUNCTION(blsp_i2c2),
+ FUNCTION(blsp_i2c3),
+ FUNCTION(blsp_i2c4),
+ FUNCTION(blsp_spi1),
+ FUNCTION(blsp_spi2),
+ FUNCTION(blsp_spi3),
+ FUNCTION(blsp_spi4),
+ FUNCTION(blsp_uart1),
+ FUNCTION(blsp_uart2),
+ FUNCTION(blsp_uart3),
+ FUNCTION(blsp_uart4),
+ FUNCTION(char_exec),
+ FUNCTION(coex_uart),
+ FUNCTION(coex_uart2),
+ FUNCTION(cri_trng),
+ FUNCTION(cri_trng0),
+ FUNCTION(cri_trng1),
+ FUNCTION(dbg_out),
+ FUNCTION(ddr_bist),
+ FUNCTION(ddr_pxi0),
+ FUNCTION(ebi0_wrcdc),
+ FUNCTION(ebi2_a),
+ FUNCTION(ebi2_lcd),
+ FUNCTION(ext_dbg),
+ FUNCTION(gcc_gp1),
+ FUNCTION(gcc_gp2),
+ FUNCTION(gcc_gp3),
+ FUNCTION(gcc_plltest),
+ FUNCTION(gpio),
+ FUNCTION(i2s_mclk),
+ FUNCTION(jitter_bist),
+ FUNCTION(ldo_en),
+ FUNCTION(ldo_update),
+ FUNCTION(m_voc),
+ FUNCTION(mgpi_clk),
+ FUNCTION(native_char),
+ FUNCTION(native_tsens),
+ FUNCTION(native_tsense),
+ FUNCTION(nav_gpio),
+ FUNCTION(pa_indicator),
+ FUNCTION(pci_e),
+ FUNCTION(pcie_clkreq),
+ FUNCTION(pll_bist),
+ FUNCTION(pll_ref),
+ FUNCTION(pri_mi2s),
+ FUNCTION(pri_mi2s_ws),
+ FUNCTION(prng_rosc),
+ FUNCTION(qdss_cti),
+ FUNCTION(qdss_gpio),
+ FUNCTION(qlink0_en),
+ FUNCTION(qlink0_req),
+ FUNCTION(qlink1_en),
+ FUNCTION(qlink1_req),
+ FUNCTION(qlink1_wmss),
+ FUNCTION(qlink2_en),
+ FUNCTION(qlink2_req),
+ FUNCTION(qlink2_wmss),
+ FUNCTION(sdc1_tb),
+ FUNCTION(sec_mi2s),
+ FUNCTION(spmi_coex),
+ FUNCTION(spmi_vgi),
+ FUNCTION(tgu_ch0),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_present),
+ FUNCTION(uim1_reset),
+ FUNCTION(uim2_clk),
+ FUNCTION(uim2_data),
+ FUNCTION(uim2_present),
+ FUNCTION(uim2_reset),
+ FUNCTION(usb2phy_ac),
+ FUNCTION(vsense_trigger),
+};
+
+/* Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup sdx65_groups[] = {
+ [0] = PINGROUP(0, uim2_data, blsp_uart1, ebi0_wrcdc, _, _, _, _, _, _),
+ [1] = PINGROUP(1, uim2_present, blsp_uart1, _, _, _, _, _, _, _),
+ [2] = PINGROUP(2, uim2_reset, blsp_uart1, blsp_i2c1, ebi0_wrcdc, _, _, _, _, _),
+ [3] = PINGROUP(3, uim2_clk, blsp_uart1, blsp_i2c1, _, _, _, _, _, _),
+ [4] = PINGROUP(4, blsp_spi2, blsp_uart2, _, qdss_gpio, _, _, _, _, _),
+ [5] = PINGROUP(5, blsp_spi2, blsp_uart2, _, qdss_gpio, _, _, _, _, _),
+ [6] = PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, char_exec, _, qdss_gpio, _, _, _),
+ [7] = PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, char_exec, _, qdss_gpio, _, _, _),
+ [8] = PINGROUP(8, blsp_spi3, blsp_uart3, ext_dbg, ldo_en, _, _, _, _, _),
+ [9] = PINGROUP(9, blsp_spi3, blsp_uart3, ext_dbg, _, _, _, _, _, _),
+ [10] = PINGROUP(10, blsp_spi3, blsp_uart3, blsp_i2c3, ext_dbg, _, _, _, _, _),
+ [11] = PINGROUP(11, blsp_spi3, blsp_uart3, blsp_i2c3, ext_dbg, gcc_gp3, _, _, _, _),
+ [12] = PINGROUP(12, pri_mi2s_ws, _, qdss_gpio, _, _, _, _, _, _),
+ [13] = PINGROUP(13, pri_mi2s, _, qdss_gpio, vsense_trigger, _, _, _, _, _),
+ [14] = PINGROUP(14, pri_mi2s, _, _, qdss_gpio, native_tsens, bimc_dte0, _, _, _),
+ [15] = PINGROUP(15, pri_mi2s, _, _, qdss_gpio, bimc_dte1, _, _, _, _),
+ [16] = PINGROUP(16, sec_mi2s, blsp_spi4, blsp_uart4, qdss_cti, qdss_cti, _, _, qdss_gpio, _),
+ [17] = PINGROUP(17, sec_mi2s, blsp_spi4, blsp_uart4, qdss_cti, qdss_cti, _, qdss_gpio, _, _),
+ [18] = PINGROUP(18, sec_mi2s, blsp_spi4, blsp_uart4, blsp_i2c4, gcc_gp1, qdss_gpio, _, _, _),
+ [19] = PINGROUP(19, sec_mi2s, blsp_spi4, blsp_uart4, blsp_i2c4, jitter_bist, gcc_gp2, _, qdss_gpio, _),
+ [20] = PINGROUP(20, _, _, _, _, _, _, _, _, _),
+ [21] = PINGROUP(21, _, _, _, _, _, _, _, _, _),
+ [22] = PINGROUP(22, blsp_uart4, pll_bist, _, _, _, _, _, _, _),
+ [23] = PINGROUP(23, blsp_uart4, blsp_spi2, blsp_spi1, blsp_spi3, blsp_spi4, _, _, _, _),
+ [24] = PINGROUP(24, adsp_ext, _, _, _, _, _, _, _, _),
+ [25] = PINGROUP(25, adsp_ext, _, _, _, _, _, _, _, _),
+ [26] = PINGROUP(26, _, _, _, native_char, _, _, _, _, _),
+ [27] = PINGROUP(27, _, _, _, _, _, _, _, _, _),
+ [28] = PINGROUP(28, qlink0_wmss, _, _, _, _, _, _, _, _),
+ [29] = PINGROUP(29, _, _, _, native_tsense, native_char, _, _, _, _),
+ [30] = PINGROUP(30, _, _, _, _, _, _, _, _, _),
+ [31] = PINGROUP(31, nav_gpio, _, _, _, _, _, _, _, _),
+ [32] = PINGROUP(32, nav_gpio, pll_ref, _, _, _, _, _, _, _),
+ [33] = PINGROUP(33, _, pa_indicator, qdss_gpio, native_char, _, _, _, _, _),
+ [34] = PINGROUP(34, qlink0_en, _, _, _, _, _, _, _, _),
+ [35] = PINGROUP(35, qlink0_req, dbg_out, _, _, _, _, _, _, _),
+ [36] = PINGROUP(36, _, _, cri_trng, _, _, _, _, _, _),
+ [37] = PINGROUP(37, _, _, _, _, _, _, _, _, _),
+ [38] = PINGROUP(38, _, _, prng_rosc, _, _, _, _, _, _),
+ [39] = PINGROUP(39, _, _, _, _, _, _, _, _, _),
+ [40] = PINGROUP(40, _, _, cri_trng0, _, _, _, _, _, _),
+ [41] = PINGROUP(41, _, _, cri_trng1, _, _, _, _, _, _),
+ [42] = PINGROUP(42, _, qdss_gpio, native_char, _, _, _, _, _, _),
+ [43] = PINGROUP(43, _, _, _, _, _, _, _, _, _),
+ [44] = PINGROUP(44, coex_uart, _, _, _, _, _, _, _, _),
+ [45] = PINGROUP(45, coex_uart, ddr_pxi0, _, _, _, _, _, _, _),
+ [46] = PINGROUP(46, m_voc, ddr_bist, ddr_pxi0, _, _, _, _, _, _),
+ [47] = PINGROUP(47, ddr_bist, blsp_spi1, blsp_spi2, blsp_spi3, blsp_spi4, _, _, _, _),
+ [48] = PINGROUP(48, m_voc, blsp_uart1, blsp_uart4, ddr_bist, _, _, _, _, _),
+ [49] = PINGROUP(49, m_voc, blsp_uart1, blsp_uart4, ddr_bist, _, _, _, _, _),
+ [50] = PINGROUP(50, _, _, _, _, _, _, _, _, _),
+ [51] = PINGROUP(51, _, _, _, _, _, _, _, _, _),
+ [52] = PINGROUP(52, _, _, _, _, _, _, _, _, _),
+ [53] = PINGROUP(53, pci_e, _, _, _, _, _, _, _, _),
+ [54] = PINGROUP(54, qdss_cti, qdss_cti, _, _, _, _, _, _, _),
+ [55] = PINGROUP(55, qdss_cti, qdss_cti, tgu_ch0, _, _, _, _, _, _),
+ [56] = PINGROUP(56, pcie_clkreq, _, _, _, _, _, _, _, _),
+ [57] = PINGROUP(57, _, native_char, _, _, _, _, _, _, _),
+ [58] = PINGROUP(58, _, _, _, _, _, _, _, _, _),
+ [59] = PINGROUP(59, qdss_cti, m_voc, bimc_dte0, _, _, _, _, _, _),
+ [60] = PINGROUP(60, qdss_cti, _, m_voc, _, _, _, _, _, _),
+ [61] = PINGROUP(61, mgpi_clk, qlink2_wmss, bimc_dte1, _, _, _, _, _, _),
+ [62] = PINGROUP(62, i2s_mclk, audio_ref, blsp_spi1, blsp_spi2, blsp_spi3, blsp_spi4, ldo_update, _, _),
+ [63] = PINGROUP(63, blsp_uart2, _, qdss_gpio, atest_char, _, _, _, _, _),
+ [64] = PINGROUP(64, blsp_uart2, qdss_gpio, atest_char3, _, _, _, _, _, _),
+ [65] = PINGROUP(65, blsp_uart2, blsp_i2c2, qdss_cti, qdss_cti, _, qdss_gpio, atest_char2, _, _),
+ [66] = PINGROUP(66, blsp_uart2, blsp_i2c2, qdss_cti, qdss_cti, qdss_gpio, atest_char1, _, _, _),
+ [67] = PINGROUP(67, uim1_data, atest_char0, _, _, _, _, _, _, _),
+ [68] = PINGROUP(68, uim1_present, _, _, _, _, _, _, _, _),
+ [69] = PINGROUP(69, uim1_reset, _, _, _, _, _, _, _, _),
+ [70] = PINGROUP(70, uim1_clk, _, _, _, _, _, _, _, _),
+ [71] = PINGROUP(71, mgpi_clk, qlink2_en, _, _, _, _, _, _, _),
+ [72] = PINGROUP(72, qlink1_en, _, native_tsense, _, _, _, _, _, _),
+ [73] = PINGROUP(73, qlink1_req, _, _, _, _, _, _, _, _),
+ [74] = PINGROUP(74, qlink1_wmss, _, _, _, _, _, _, _, _),
+ [75] = PINGROUP(75, coex_uart2, spmi_coex, _, _, _, _, _, _, _),
+ [76] = PINGROUP(76, coex_uart2, spmi_coex, _, _, _, _, _, _, _),
+ [77] = PINGROUP(77, _, qlink2_req, _, _, _, _, _, _, _),
+ [78] = PINGROUP(78, spmi_vgi, _, _, _, _, _, _, _, _),
+ [79] = PINGROUP(79, spmi_vgi, _, _, _, _, _, _, _, _),
+ [80] = PINGROUP(80, _, blsp_spi1, _, blsp_uart1, _, _, _, _, _),
+ [81] = PINGROUP(81, _, blsp_spi1, _, blsp_uart1, gcc_plltest, _, _, _, _),
+ [82] = PINGROUP(82, _, blsp_spi1, _, blsp_i2c1, gcc_plltest, _, _, _, _),
+ [83] = PINGROUP(83, _, blsp_spi1, _, blsp_i2c1, _, _, _, _, _),
+ [84] = PINGROUP(84, _, ebi2_lcd, _, blsp_i2c4, _, _, _, _, _),
+ [85] = PINGROUP(85, _, ebi2_lcd, _, blsp_i2c4, _, _, _, _, _),
+ [86] = PINGROUP(86, _, _, _, _, _, _, _, _, _),
+ [87] = PINGROUP(87, _, _, _, _, _, _, _, _, _),
+ [88] = PINGROUP(88, _, _, _, _, _, _, _, _, _),
+ [89] = PINGROUP(89, _, _, _, _, ebi2_a, _, _, _, _),
+ [90] = PINGROUP(90, _, _, _, _, ebi2_lcd, _, _, _, _),
+ [91] = PINGROUP(91, _, _, _, _, _, _, _, _, _),
+ [92] = PINGROUP(92, _, _, _, _, _, _, _, _, _),
+ [93] = PINGROUP(93, _, _, usb2phy_ac, _, _, _, _, _, _),
+ [94] = PINGROUP(94, qdss_cti, qdss_cti, _, _, _, _, _, _, _),
+ [95] = PINGROUP(95, qdss_cti, qdss_cti, _, _, _, _, _, _, _),
+ [96] = PINGROUP(96, _, _, _, _, _, _, _, _, _),
+ [97] = PINGROUP(97, _, _, _, _, _, _, _, _, _),
+ [98] = PINGROUP(98, _, _, _, _, _, _, _, _, _),
+ [99] = PINGROUP(99, _, _, _, _, _, _, _, _, _),
+ [100] = PINGROUP(100, _, _, _, _, _, _, _, _, _),
+ [101] = PINGROUP(101, _, _, _, _, _, _, _, _, _),
+ [102] = PINGROUP(102, _, _, coex_uart2, _, _, _, _, _, _),
+ [103] = PINGROUP(103, _, _, coex_uart2, _, _, _, _, _, _),
+ [104] = PINGROUP(104, _, _, _, _, _, _, _, _, _),
+ [105] = PINGROUP(105, _, _, _, _, _, _, _, _, _),
+ [106] = PINGROUP(106, sdc1_tb, _, _, _, _, _, _, _, _),
+ [107] = PINGROUP(107, _, _, _, _, _, _, _, _, _),
+ [108] = UFS_RESET(ufs_reset, 0x0),
+ [109] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x9a000, 15, 0),
+ [110] = SDC_QDSD_PINGROUP(sdc1_clk, 0x9a000, 13, 6),
+ [111] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x9a000, 11, 3),
+ [112] = SDC_QDSD_PINGROUP(sdc1_data, 0x9a000, 9, 0),
+};
+
+static const struct msm_gpio_wakeirq_map sdx65_pdc_map[] = {
+ {1, 20}, {2, 21}, {5, 22}, {6, 23}, {9, 24}, {10, 25},
+ {11, 26}, {12, 27}, {13, 28}, {14, 29}, {15, 30}, {16, 31},
+ {17, 32}, {18, 33}, {19, 34}, {21, 35}, {22, 36}, {23, 70},
+ {24, 37}, {25, 38}, {35, 40}, {43, 41}, {46, 44}, {48, 45},
+ {49, 57}, {50, 46}, {52, 47}, {54, 49}, {55, 50}, {60, 53},
+ {61, 54}, {64, 55}, {65, 81}, {68, 56}, {71, 58}, {73, 59},
+ {77, 77}, {81, 65}, {83, 63}, {84, 64}, {86, 66}, {88, 67},
+ {89, 68}, {90, 69}, {93, 71}, {94, 72}, {95, 73}, {96, 74},
+ {99, 75}, {103, 78}, {104, 79}
+};
+
+static const struct msm_pinctrl_soc_data sdx65_pinctrl = {
+ .pins = sdx65_pins,
+ .npins = ARRAY_SIZE(sdx65_pins),
+ .functions = sdx65_functions,
+ .nfunctions = ARRAY_SIZE(sdx65_functions),
+ .groups = sdx65_groups,
+ .ngroups = ARRAY_SIZE(sdx65_groups),
+ .ngpios = 109,
+ .wakeirq_map = sdx65_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(sdx65_pdc_map),
+};
+
+static int sdx65_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &sdx65_pinctrl);
+}
+
+static const struct of_device_id sdx65_pinctrl_of_match[] = {
+ { .compatible = "qcom,sdx65-tlmm", },
+ { },
+};
+
+static struct platform_driver sdx65_pinctrl_driver = {
+ .driver = {
+ .name = "sdx65-tlmm",
+ .of_match_table = sdx65_pinctrl_of_match,
+ },
+ .probe = sdx65_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init sdx65_pinctrl_init(void)
+{
+ return platform_driver_register(&sdx65_pinctrl_driver);
+}
+arch_initcall(sdx65_pinctrl_init);
+
+static void __exit sdx65_pinctrl_exit(void)
+{
+ platform_driver_unregister(&sdx65_pinctrl_driver);
+}
+module_exit(sdx65_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI sdx65 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, sdx65_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8450.c b/drivers/pinctrl/qcom/pinctrl-sm8450.c
new file mode 100644
index 000000000000..c6fa3dbc14a1
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sm8450.c
@@ -0,0 +1,1689 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_SIZE 0x1000
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = 0x8 + REG_SIZE * id, \
+ .intr_status_reg = 0xc + REG_SIZE * id, \
+ .intr_target_reg = 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, offset) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = offset, \
+ .io_reg = offset + 0x4, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+static const struct pinctrl_pin_desc sm8450_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "GPIO_146"),
+ PINCTRL_PIN(147, "GPIO_147"),
+ PINCTRL_PIN(148, "GPIO_148"),
+ PINCTRL_PIN(149, "GPIO_149"),
+ PINCTRL_PIN(150, "GPIO_150"),
+ PINCTRL_PIN(151, "GPIO_151"),
+ PINCTRL_PIN(152, "GPIO_152"),
+ PINCTRL_PIN(153, "GPIO_153"),
+ PINCTRL_PIN(154, "GPIO_154"),
+ PINCTRL_PIN(155, "GPIO_155"),
+ PINCTRL_PIN(156, "GPIO_156"),
+ PINCTRL_PIN(157, "GPIO_157"),
+ PINCTRL_PIN(158, "GPIO_158"),
+ PINCTRL_PIN(159, "GPIO_159"),
+ PINCTRL_PIN(160, "GPIO_160"),
+ PINCTRL_PIN(161, "GPIO_161"),
+ PINCTRL_PIN(162, "GPIO_162"),
+ PINCTRL_PIN(163, "GPIO_163"),
+ PINCTRL_PIN(164, "GPIO_164"),
+ PINCTRL_PIN(165, "GPIO_165"),
+ PINCTRL_PIN(166, "GPIO_166"),
+ PINCTRL_PIN(167, "GPIO_167"),
+ PINCTRL_PIN(168, "GPIO_168"),
+ PINCTRL_PIN(169, "GPIO_169"),
+ PINCTRL_PIN(170, "GPIO_170"),
+ PINCTRL_PIN(171, "GPIO_171"),
+ PINCTRL_PIN(172, "GPIO_172"),
+ PINCTRL_PIN(173, "GPIO_173"),
+ PINCTRL_PIN(174, "GPIO_174"),
+ PINCTRL_PIN(175, "GPIO_175"),
+ PINCTRL_PIN(176, "GPIO_176"),
+ PINCTRL_PIN(177, "GPIO_177"),
+ PINCTRL_PIN(178, "GPIO_178"),
+ PINCTRL_PIN(179, "GPIO_179"),
+ PINCTRL_PIN(180, "GPIO_180"),
+ PINCTRL_PIN(181, "GPIO_181"),
+ PINCTRL_PIN(182, "GPIO_182"),
+ PINCTRL_PIN(183, "GPIO_183"),
+ PINCTRL_PIN(184, "GPIO_184"),
+ PINCTRL_PIN(185, "GPIO_185"),
+ PINCTRL_PIN(186, "GPIO_186"),
+ PINCTRL_PIN(187, "GPIO_187"),
+ PINCTRL_PIN(188, "GPIO_188"),
+ PINCTRL_PIN(189, "GPIO_189"),
+ PINCTRL_PIN(190, "GPIO_190"),
+ PINCTRL_PIN(191, "GPIO_191"),
+ PINCTRL_PIN(192, "GPIO_192"),
+ PINCTRL_PIN(193, "GPIO_193"),
+ PINCTRL_PIN(194, "GPIO_194"),
+ PINCTRL_PIN(195, "GPIO_195"),
+ PINCTRL_PIN(196, "GPIO_196"),
+ PINCTRL_PIN(197, "GPIO_197"),
+ PINCTRL_PIN(198, "GPIO_198"),
+ PINCTRL_PIN(199, "GPIO_199"),
+ PINCTRL_PIN(200, "GPIO_200"),
+ PINCTRL_PIN(201, "GPIO_201"),
+ PINCTRL_PIN(202, "GPIO_202"),
+ PINCTRL_PIN(203, "GPIO_203"),
+ PINCTRL_PIN(204, "GPIO_204"),
+ PINCTRL_PIN(205, "GPIO_205"),
+ PINCTRL_PIN(206, "GPIO_206"),
+ PINCTRL_PIN(207, "GPIO_207"),
+ PINCTRL_PIN(208, "GPIO_208"),
+ PINCTRL_PIN(209, "GPIO_209"),
+ PINCTRL_PIN(210, "UFS_RESET"),
+ PINCTRL_PIN(211, "SDC2_CLK"),
+ PINCTRL_PIN(212, "SDC2_CMD"),
+ PINCTRL_PIN(213, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+DECLARE_MSM_GPIO_PINS(150);
+DECLARE_MSM_GPIO_PINS(151);
+DECLARE_MSM_GPIO_PINS(152);
+DECLARE_MSM_GPIO_PINS(153);
+DECLARE_MSM_GPIO_PINS(154);
+DECLARE_MSM_GPIO_PINS(155);
+DECLARE_MSM_GPIO_PINS(156);
+DECLARE_MSM_GPIO_PINS(157);
+DECLARE_MSM_GPIO_PINS(158);
+DECLARE_MSM_GPIO_PINS(159);
+DECLARE_MSM_GPIO_PINS(160);
+DECLARE_MSM_GPIO_PINS(161);
+DECLARE_MSM_GPIO_PINS(162);
+DECLARE_MSM_GPIO_PINS(163);
+DECLARE_MSM_GPIO_PINS(164);
+DECLARE_MSM_GPIO_PINS(165);
+DECLARE_MSM_GPIO_PINS(166);
+DECLARE_MSM_GPIO_PINS(167);
+DECLARE_MSM_GPIO_PINS(168);
+DECLARE_MSM_GPIO_PINS(169);
+DECLARE_MSM_GPIO_PINS(170);
+DECLARE_MSM_GPIO_PINS(171);
+DECLARE_MSM_GPIO_PINS(172);
+DECLARE_MSM_GPIO_PINS(173);
+DECLARE_MSM_GPIO_PINS(174);
+DECLARE_MSM_GPIO_PINS(175);
+DECLARE_MSM_GPIO_PINS(176);
+DECLARE_MSM_GPIO_PINS(177);
+DECLARE_MSM_GPIO_PINS(178);
+DECLARE_MSM_GPIO_PINS(179);
+DECLARE_MSM_GPIO_PINS(180);
+DECLARE_MSM_GPIO_PINS(181);
+DECLARE_MSM_GPIO_PINS(182);
+DECLARE_MSM_GPIO_PINS(183);
+DECLARE_MSM_GPIO_PINS(184);
+DECLARE_MSM_GPIO_PINS(185);
+DECLARE_MSM_GPIO_PINS(186);
+DECLARE_MSM_GPIO_PINS(187);
+DECLARE_MSM_GPIO_PINS(188);
+DECLARE_MSM_GPIO_PINS(189);
+DECLARE_MSM_GPIO_PINS(190);
+DECLARE_MSM_GPIO_PINS(191);
+DECLARE_MSM_GPIO_PINS(192);
+DECLARE_MSM_GPIO_PINS(193);
+DECLARE_MSM_GPIO_PINS(194);
+DECLARE_MSM_GPIO_PINS(195);
+DECLARE_MSM_GPIO_PINS(196);
+DECLARE_MSM_GPIO_PINS(197);
+DECLARE_MSM_GPIO_PINS(198);
+DECLARE_MSM_GPIO_PINS(199);
+DECLARE_MSM_GPIO_PINS(200);
+DECLARE_MSM_GPIO_PINS(201);
+DECLARE_MSM_GPIO_PINS(202);
+DECLARE_MSM_GPIO_PINS(203);
+DECLARE_MSM_GPIO_PINS(204);
+DECLARE_MSM_GPIO_PINS(205);
+DECLARE_MSM_GPIO_PINS(206);
+DECLARE_MSM_GPIO_PINS(207);
+DECLARE_MSM_GPIO_PINS(208);
+DECLARE_MSM_GPIO_PINS(209);
+
+static const unsigned int ufs_reset_pins[] = { 210 };
+static const unsigned int sdc2_clk_pins[] = { 211 };
+static const unsigned int sdc2_cmd_pins[] = { 212 };
+static const unsigned int sdc2_data_pins[] = { 213 };
+
+enum sm8450_functions {
+ msm_mux_gpio,
+ msm_mux_aon_cam,
+ msm_mux_atest_char,
+ msm_mux_atest_usb,
+ msm_mux_audio_ref,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async,
+ msm_mux_cci_i2c,
+ msm_mux_cci_timer,
+ msm_mux_cmu_rng,
+ msm_mux_coex_uart1,
+ msm_mux_coex_uart2,
+ msm_mux_cri_trng,
+ msm_mux_cri_trng0,
+ msm_mux_cri_trng1,
+ msm_mux_dbg_out,
+ msm_mux_ddr_bist,
+ msm_mux_ddr_pxi0,
+ msm_mux_ddr_pxi1,
+ msm_mux_ddr_pxi2,
+ msm_mux_ddr_pxi3,
+ msm_mux_dp_hot,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_ibi_i3c,
+ msm_mux_jitter_bist,
+ msm_mux_mdp_vsync,
+ msm_mux_mdp_vsync0,
+ msm_mux_mdp_vsync1,
+ msm_mux_mdp_vsync2,
+ msm_mux_mdp_vsync3,
+ msm_mux_mi2s0_data0,
+ msm_mux_mi2s0_data1,
+ msm_mux_mi2s0_sck,
+ msm_mux_mi2s0_ws,
+ msm_mux_mi2s2_data0,
+ msm_mux_mi2s2_data1,
+ msm_mux_mi2s2_sck,
+ msm_mux_mi2s2_ws,
+ msm_mux_mss_grfc0,
+ msm_mux_mss_grfc1,
+ msm_mux_mss_grfc10,
+ msm_mux_mss_grfc11,
+ msm_mux_mss_grfc12,
+ msm_mux_mss_grfc2,
+ msm_mux_mss_grfc3,
+ msm_mux_mss_grfc4,
+ msm_mux_mss_grfc5,
+ msm_mux_mss_grfc6,
+ msm_mux_mss_grfc7,
+ msm_mux_mss_grfc8,
+ msm_mux_mss_grfc9,
+ msm_mux_nav,
+ msm_mux_pcie0_clkreqn,
+ msm_mux_pcie1_clkreqn,
+ msm_mux_phase_flag,
+ msm_mux_pll_bist,
+ msm_mux_pll_clk,
+ msm_mux_pri_mi2s,
+ msm_mux_prng_rosc,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_gpio,
+ msm_mux_qlink0_enable,
+ msm_mux_qlink0_request,
+ msm_mux_qlink0_wmss,
+ msm_mux_qlink1_enable,
+ msm_mux_qlink1_request,
+ msm_mux_qlink1_wmss,
+ msm_mux_qlink2_enable,
+ msm_mux_qlink2_request,
+ msm_mux_qlink2_wmss,
+ msm_mux_qspi0,
+ msm_mux_qspi1,
+ msm_mux_qspi2,
+ msm_mux_qspi3,
+ msm_mux_qspi_clk,
+ msm_mux_qspi_cs,
+ msm_mux_qup0,
+ msm_mux_qup1,
+ msm_mux_qup10,
+ msm_mux_qup11,
+ msm_mux_qup12,
+ msm_mux_qup13,
+ msm_mux_qup14,
+ msm_mux_qup15,
+ msm_mux_qup16,
+ msm_mux_qup17,
+ msm_mux_qup18,
+ msm_mux_qup19,
+ msm_mux_qup2,
+ msm_mux_qup20,
+ msm_mux_qup21,
+ msm_mux_qup3,
+ msm_mux_qup4,
+ msm_mux_qup5,
+ msm_mux_qup6,
+ msm_mux_qup7,
+ msm_mux_qup8,
+ msm_mux_qup9,
+ msm_mux_qup_l4,
+ msm_mux_qup_l5,
+ msm_mux_qup_l6,
+ msm_mux_sd_write,
+ msm_mux_sdc40,
+ msm_mux_sdc41,
+ msm_mux_sdc42,
+ msm_mux_sdc43,
+ msm_mux_sdc4_clk,
+ msm_mux_sdc4_cmd,
+ msm_mux_sec_mi2s,
+ msm_mux_tb_trig,
+ msm_mux_tgu_ch0,
+ msm_mux_tgu_ch1,
+ msm_mux_tgu_ch2,
+ msm_mux_tgu_ch3,
+ msm_mux_tmess_prng0,
+ msm_mux_tmess_prng1,
+ msm_mux_tmess_prng2,
+ msm_mux_tmess_prng3,
+ msm_mux_tsense_pwm1,
+ msm_mux_tsense_pwm2,
+ msm_mux_uim0_clk,
+ msm_mux_uim0_data,
+ msm_mux_uim0_present,
+ msm_mux_uim0_reset,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_usb2phy_ac,
+ msm_mux_usb_phy,
+ msm_mux_vfr_0,
+ msm_mux_vfr_1,
+ msm_mux_vsense_trigger,
+ msm_mux__,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+ "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+ "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146",
+ "gpio147", "gpio148", "gpio149", "gpio150", "gpio151", "gpio152",
+ "gpio153", "gpio154", "gpio155", "gpio156", "gpio157", "gpio158",
+ "gpio159", "gpio160", "gpio161", "gpio162", "gpio163", "gpio164",
+ "gpio165", "gpio166", "gpio167", "gpio168", "gpio169", "gpio170",
+ "gpio171", "gpio172", "gpio173", "gpio174", "gpio175", "gpio176",
+ "gpio177", "gpio178", "gpio179", "gpio180", "gpio181", "gpio182",
+ "gpio183", "gpio184", "gpio185", "gpio186", "gpio187", "gpio188",
+ "gpio189", "gpio190", "gpio191", "gpio192", "gpio193", "gpio194",
+ "gpio195", "gpio196", "gpio197", "gpio198", "gpio199", "gpio200",
+ "gpio201", "gpio202", "gpio203", "gpio204", "gpio205", "gpio206",
+ "gpio207", "gpio208", "gpio209",
+};
+
+static const char * const aon_cam_groups[] = {
+ "gpio108",
+};
+
+static const char * const atest_char_groups[] = {
+ "gpio86", "gpio87", "gpio88", "gpio89", "gpio90",
+};
+
+static const char * const atest_usb_groups[] = {
+ "gpio37", "gpio39", "gpio55", "gpio148", "gpio149",
+};
+
+static const char * const audio_ref_groups[] = {
+ "gpio124",
+};
+
+static const char * const cam_mclk_groups[] = {
+ "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", "gpio105", "gpio106", "gpio107",
+};
+
+static const char * const cci_async_groups[] = {
+ "gpio109", "gpio119", "gpio120",
+};
+
+static const char * const cci_i2c_groups[] = {
+ "gpio110", "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio208", "gpio209",
+};
+
+static const char * const cci_timer_groups[] = {
+ "gpio116", "gpio117", "gpio118", "gpio119", "gpio120",
+};
+
+static const char * const cmu_rng_groups[] = {
+ "gpio94", "gpio95", "gpio96", "gpio97",
+};
+
+static const char * const coex_uart1_groups[] = {
+ "gpio148", "gpio149",
+};
+
+static const char * const coex_uart2_groups[] = {
+ "gpio150", "gpio151",
+};
+
+static const char * const cri_trng_groups[] = {
+ "gpio99",
+};
+
+static const char * const cri_trng0_groups[] = {
+ "gpio71",
+};
+
+static const char * const cri_trng1_groups[] = {
+ "gpio72",
+};
+
+static const char * const dbg_out_groups[] = {
+ "gpio9",
+};
+
+static const char * const ddr_bist_groups[] = {
+ "gpio36", "gpio37", "gpio40", "gpio41",
+};
+
+static const char * const ddr_pxi0_groups[] = {
+ "gpio51", "gpio52",
+};
+
+static const char * const ddr_pxi1_groups[] = {
+ "gpio40", "gpio41",
+};
+
+static const char * const ddr_pxi2_groups[] = {
+ "gpio45", "gpio47",
+};
+
+static const char * const ddr_pxi3_groups[] = {
+ "gpio43", "gpio44",
+};
+
+static const char * const dp_hot_groups[] = {
+ "gpio47",
+};
+
+static const char * const gcc_gp1_groups[] = {
+ "gpio86", "gpio134",
+};
+
+static const char * const gcc_gp2_groups[] = {
+ "gpio87", "gpio135",
+};
+
+static const char * const gcc_gp3_groups[] = {
+ "gpio88", "gpio136",
+};
+
+static const char * const ibi_i3c_groups[] = {
+ "gpio28", "gpio29", "gpio32", "gpio33", "gpio56", "gpio57", "gpio60", "gpio61",
+};
+
+static const char * const jitter_bist_groups[] = {
+ "gpio24",
+};
+
+static const char * const mdp_vsync_groups[] = {
+ "gpio46", "gpio47", "gpio86", "gpio87", "gpio88",
+};
+
+static const char * const mdp_vsync0_groups[] = {
+ "gpio86",
+};
+
+static const char * const mdp_vsync1_groups[] = {
+ "gpio86",
+};
+
+static const char * const mdp_vsync2_groups[] = {
+ "gpio87",
+};
+
+static const char * const mdp_vsync3_groups[] = {
+ "gpio87",
+};
+
+static const char * const mi2s0_data0_groups[] = {
+ "gpio127",
+};
+
+static const char * const mi2s0_data1_groups[] = {
+ "gpio128",
+};
+
+static const char * const mi2s0_sck_groups[] = {
+ "gpio126",
+};
+
+static const char * const mi2s0_ws_groups[] = {
+ "gpio129",
+};
+
+static const char * const mi2s2_data0_groups[] = {
+ "gpio122",
+};
+
+static const char * const mi2s2_data1_groups[] = {
+ "gpio124",
+};
+
+static const char * const mi2s2_sck_groups[] = {
+ "gpio121",
+};
+
+static const char * const mi2s2_ws_groups[] = {
+ "gpio123",
+};
+
+static const char * const mss_grfc0_groups[] = {
+ "gpio138", "gpio153",
+};
+
+static const char * const mss_grfc1_groups[] = {
+ "gpio139",
+};
+
+static const char * const mss_grfc10_groups[] = {
+ "gpio150",
+};
+
+static const char * const mss_grfc11_groups[] = {
+ "gpio151",
+};
+
+static const char * const mss_grfc12_groups[] = {
+ "gpio152",
+};
+
+static const char * const mss_grfc2_groups[] = {
+ "gpio140",
+};
+
+static const char * const mss_grfc3_groups[] = {
+ "gpio141",
+};
+
+static const char * const mss_grfc4_groups[] = {
+ "gpio142",
+};
+
+static const char * const mss_grfc5_groups[] = {
+ "gpio143",
+};
+
+static const char * const mss_grfc6_groups[] = {
+ "gpio144",
+};
+
+static const char * const mss_grfc7_groups[] = {
+ "gpio145",
+};
+
+static const char * const mss_grfc8_groups[] = {
+ "gpio146",
+};
+
+static const char * const mss_grfc9_groups[] = {
+ "gpio147",
+};
+
+static const char * const nav_groups[] = {
+ "gpio153", "gpio154", "gpio155",
+};
+
+static const char * const pcie0_clkreqn_groups[] = {
+ "gpio95",
+};
+
+static const char * const pcie1_clkreqn_groups[] = {
+ "gpio98",
+};
+
+static const char * const phase_flag_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7", "gpio10", "gpio11", "gpio12", "gpio13",
+ "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio25", "gpio26",
+ "gpio76", "gpio77", "gpio78", "gpio79", "gpio81", "gpio82", "gpio83", "gpio92",
+ "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", "gpio99",
+};
+
+static const char * const pll_bist_groups[] = {
+ "gpio20",
+};
+
+static const char * const pll_clk_groups[] = {
+ "gpio107",
+};
+
+static const char * const pri_mi2s_groups[] = {
+ "gpio125",
+};
+
+static const char * const prng_rosc_groups[] = {
+ "gpio73", "gpio75", "gpio81", "gpio83", "gpio81",
+};
+
+static const char * const qdss_cti_groups[] = {
+ "gpio2", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84", "gpio85", "gpio93",
+};
+
+static const char * const qdss_gpio_groups[] = {
+ "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", "gpio105", "gpio106", "gpio107",
+ "gpio110", "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio117", "gpio118",
+ "gpio119", "gpio120", "gpio188", "gpio189", "gpio190", "gpio191", "gpio192", "gpio193",
+ "gpio194", "gpio195", "gpio196", "gpio197", "gpio198", "gpio199", "gpio200", "gpio201",
+ "gpio202", "gpio203", "gpio204", "gpio205",
+};
+
+static const char * const qlink0_enable_groups[] = {
+ "gpio157",
+};
+
+static const char * const qlink0_request_groups[] = {
+ "gpio156",
+};
+
+static const char * const qlink0_wmss_groups[] = {
+ "gpio158",
+};
+
+static const char * const qlink1_enable_groups[] = {
+ "gpio160",
+};
+
+static const char * const qlink1_request_groups[] = {
+ "gpio159",
+};
+
+static const char * const qlink1_wmss_groups[] = {
+ "gpio161",
+};
+
+static const char * const qlink2_enable_groups[] = {
+ "gpio163",
+};
+
+static const char * const qlink2_request_groups[] = {
+ "gpio162",
+};
+
+static const char * const qlink2_wmss_groups[] = {
+ "gpio164",
+};
+
+static const char * const qspi0_groups[] = {
+ "gpio52",
+};
+
+static const char * const qspi1_groups[] = {
+ "gpio53",
+};
+
+static const char * const qspi2_groups[] = {
+ "gpio48",
+};
+
+static const char * const qspi3_groups[] = {
+ "gpio49",
+};
+
+static const char * const qspi_clk_groups[] = {
+ "gpio50",
+};
+
+static const char * const qspi_cs_groups[] = {
+ "gpio51", "gpio54",
+};
+
+static const char * const qup0_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+
+static const char * const qup1_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+
+static const char * const qup10_groups[] = {
+ "gpio36", "gpio37", "gpio38", "gpio39",
+};
+
+static const char * const qup11_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43",
+};
+
+static const char * const qup12_groups[] = {
+ "gpio44", "gpio45", "gpio46", "gpio47",
+};
+
+static const char * const qup13_groups[] = {
+ "gpio48", "gpio49", "gpio50", "gpio51",
+};
+
+static const char * const qup14_groups[] = {
+ "gpio52", "gpio53", "gpio54", "gpio55",
+};
+
+static const char * const qup15_groups[] = {
+ "gpio56", "gpio57", "gpio58", "gpio59",
+};
+
+static const char * const qup16_groups[] = {
+ "gpio60", "gpio61", "gpio62", "gpio63",
+};
+
+static const char * const qup17_groups[] = {
+ "gpio64", "gpio65", "gpio66", "gpio67",
+};
+
+static const char * const qup18_groups[] = {
+ "gpio68", "gpio69", "gpio70", "gpio71",
+};
+
+static const char * const qup19_groups[] = {
+ "gpio72", "gpio73", "gpio74", "gpio75",
+};
+
+static const char * const qup2_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+
+static const char * const qup20_groups[] = {
+ "gpio76", "gpio77", "gpio78", "gpio79",
+};
+
+static const char * const qup21_groups[] = {
+ "gpio80", "gpio81", "gpio82", "gpio83",
+};
+
+static const char * const qup3_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+
+static const char * const qup4_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+
+static const char * const qup5_groups[] = {
+ "gpio84", "gpio85", "gpio206", "gpio207",
+};
+
+static const char * const qup6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+
+static const char * const qup7_groups[] = {
+ "gpio24", "gpio25", "gpio26", "gpio27",
+};
+
+static const char * const qup8_groups[] = {
+ "gpio28", "gpio29", "gpio30", "gpio31",
+};
+
+static const char * const qup9_groups[] = {
+ "gpio32", "gpio33", "gpio34", "gpio35",
+};
+
+static const char * const qup_l4_groups[] = {
+ "gpio24", "gpio40", "gpio58", "gpio63",
+};
+
+static const char * const qup_l5_groups[] = {
+ "gpio25", "gpio41", "gpio59", "gpio66",
+};
+
+static const char * const qup_l6_groups[] = {
+ "gpio26", "gpio42", "gpio62", "gpio67",
+};
+
+static const char * const sd_write_groups[] = {
+ "gpio93",
+};
+
+static const char * const sdc40_groups[] = {
+ "gpio52",
+};
+
+static const char * const sdc41_groups[] = {
+ "gpio53",
+};
+
+static const char * const sdc42_groups[] = {
+ "gpio48",
+};
+
+static const char * const sdc43_groups[] = {
+ "gpio49",
+};
+
+static const char * const sdc4_clk_groups[] = {
+ "gpio50",
+};
+
+static const char * const sdc4_cmd_groups[] = {
+ "gpio51",
+};
+
+static const char * const sec_mi2s_groups[] = {
+ "gpio124",
+};
+
+static const char * const tb_trig_groups[] = {
+ "gpio64", "gpio137",
+};
+
+static const char * const tgu_ch0_groups[] = {
+ "gpio64",
+};
+
+static const char * const tgu_ch1_groups[] = {
+ "gpio65",
+};
+
+static const char * const tgu_ch2_groups[] = {
+ "gpio66",
+};
+
+static const char * const tgu_ch3_groups[] = {
+ "gpio67",
+};
+
+static const char * const tmess_prng0_groups[] = {
+ "gpio80",
+};
+
+static const char * const tmess_prng1_groups[] = {
+ "gpio79",
+};
+
+static const char * const tmess_prng2_groups[] = {
+ "gpio77",
+};
+
+static const char * const tmess_prng3_groups[] = {
+ "gpio76",
+};
+
+static const char * const tsense_pwm1_groups[] = {
+ "gpio91",
+};
+
+static const char * const tsense_pwm2_groups[] = {
+ "gpio91",
+};
+
+static const char * const uim0_clk_groups[] = {
+ "gpio131",
+};
+
+static const char * const uim0_data_groups[] = {
+ "gpio130",
+};
+
+static const char * const uim0_present_groups[] = {
+ "gpio133",
+};
+
+static const char * const uim0_reset_groups[] = {
+ "gpio132",
+};
+
+static const char * const uim1_clk_groups[] = {
+ "gpio135",
+};
+
+static const char * const uim1_data_groups[] = {
+ "gpio134",
+};
+
+static const char * const uim1_present_groups[] = {
+ "gpio137",
+};
+
+static const char * const uim1_reset_groups[] = {
+ "gpio136",
+};
+
+static const char * const usb2phy_ac_groups[] = {
+ "gpio90",
+};
+
+static const char * const usb_phy_groups[] = {
+ "gpio91",
+};
+
+static const char * const vfr_0_groups[] = {
+ "gpio89",
+};
+
+static const char * const vfr_1_groups[] = {
+ "gpio155",
+};
+
+static const char * const vsense_trigger_groups[] = {
+ "gpio18",
+};
+
+static const struct msm_function sm8450_functions[] = {
+ FUNCTION(gpio),
+ FUNCTION(aon_cam),
+ FUNCTION(atest_char),
+ FUNCTION(atest_usb),
+ FUNCTION(audio_ref),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci_async),
+ FUNCTION(cci_i2c),
+ FUNCTION(cci_timer),
+ FUNCTION(cmu_rng),
+ FUNCTION(coex_uart1),
+ FUNCTION(coex_uart2),
+ FUNCTION(cri_trng),
+ FUNCTION(cri_trng0),
+ FUNCTION(cri_trng1),
+ FUNCTION(dbg_out),
+ FUNCTION(ddr_bist),
+ FUNCTION(ddr_pxi0),
+ FUNCTION(ddr_pxi1),
+ FUNCTION(ddr_pxi2),
+ FUNCTION(ddr_pxi3),
+ FUNCTION(dp_hot),
+ FUNCTION(gcc_gp1),
+ FUNCTION(gcc_gp2),
+ FUNCTION(gcc_gp3),
+ FUNCTION(ibi_i3c),
+ FUNCTION(jitter_bist),
+ FUNCTION(mdp_vsync),
+ FUNCTION(mdp_vsync0),
+ FUNCTION(mdp_vsync1),
+ FUNCTION(mdp_vsync2),
+ FUNCTION(mdp_vsync3),
+ FUNCTION(mi2s0_data0),
+ FUNCTION(mi2s0_data1),
+ FUNCTION(mi2s0_sck),
+ FUNCTION(mi2s0_ws),
+ FUNCTION(mi2s2_data0),
+ FUNCTION(mi2s2_data1),
+ FUNCTION(mi2s2_sck),
+ FUNCTION(mi2s2_ws),
+ FUNCTION(mss_grfc0),
+ FUNCTION(mss_grfc1),
+ FUNCTION(mss_grfc10),
+ FUNCTION(mss_grfc11),
+ FUNCTION(mss_grfc12),
+ FUNCTION(mss_grfc2),
+ FUNCTION(mss_grfc3),
+ FUNCTION(mss_grfc4),
+ FUNCTION(mss_grfc5),
+ FUNCTION(mss_grfc6),
+ FUNCTION(mss_grfc7),
+ FUNCTION(mss_grfc8),
+ FUNCTION(mss_grfc9),
+ FUNCTION(nav),
+ FUNCTION(pcie0_clkreqn),
+ FUNCTION(pcie1_clkreqn),
+ FUNCTION(phase_flag),
+ FUNCTION(pll_bist),
+ FUNCTION(pll_clk),
+ FUNCTION(pri_mi2s),
+ FUNCTION(prng_rosc),
+ FUNCTION(qdss_cti),
+ FUNCTION(qdss_gpio),
+ FUNCTION(qlink0_enable),
+ FUNCTION(qlink0_request),
+ FUNCTION(qlink0_wmss),
+ FUNCTION(qlink1_enable),
+ FUNCTION(qlink1_request),
+ FUNCTION(qlink1_wmss),
+ FUNCTION(qlink2_enable),
+ FUNCTION(qlink2_request),
+ FUNCTION(qlink2_wmss),
+ FUNCTION(qspi0),
+ FUNCTION(qspi1),
+ FUNCTION(qspi2),
+ FUNCTION(qspi3),
+ FUNCTION(qspi_clk),
+ FUNCTION(qspi_cs),
+ FUNCTION(qup0),
+ FUNCTION(qup1),
+ FUNCTION(qup10),
+ FUNCTION(qup11),
+ FUNCTION(qup12),
+ FUNCTION(qup13),
+ FUNCTION(qup14),
+ FUNCTION(qup15),
+ FUNCTION(qup16),
+ FUNCTION(qup17),
+ FUNCTION(qup18),
+ FUNCTION(qup19),
+ FUNCTION(qup2),
+ FUNCTION(qup20),
+ FUNCTION(qup21),
+ FUNCTION(qup3),
+ FUNCTION(qup4),
+ FUNCTION(qup5),
+ FUNCTION(qup6),
+ FUNCTION(qup7),
+ FUNCTION(qup8),
+ FUNCTION(qup9),
+ FUNCTION(qup_l4),
+ FUNCTION(qup_l5),
+ FUNCTION(qup_l6),
+ FUNCTION(sd_write),
+ FUNCTION(sdc40),
+ FUNCTION(sdc41),
+ FUNCTION(sdc42),
+ FUNCTION(sdc43),
+ FUNCTION(sdc4_clk),
+ FUNCTION(sdc4_cmd),
+ FUNCTION(sec_mi2s),
+ FUNCTION(tb_trig),
+ FUNCTION(tgu_ch0),
+ FUNCTION(tgu_ch1),
+ FUNCTION(tgu_ch2),
+ FUNCTION(tgu_ch3),
+ FUNCTION(tmess_prng0),
+ FUNCTION(tmess_prng1),
+ FUNCTION(tmess_prng2),
+ FUNCTION(tmess_prng3),
+ FUNCTION(tsense_pwm1),
+ FUNCTION(tsense_pwm2),
+ FUNCTION(uim0_clk),
+ FUNCTION(uim0_data),
+ FUNCTION(uim0_present),
+ FUNCTION(uim0_reset),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_present),
+ FUNCTION(uim1_reset),
+ FUNCTION(usb2phy_ac),
+ FUNCTION(usb_phy),
+ FUNCTION(vfr_0),
+ FUNCTION(vfr_1),
+ FUNCTION(vsense_trigger),
+};
+
+/* Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup sm8450_groups[] = {
+ [0] = PINGROUP(0, qup0, _, _, _, _, _, _, _, _),
+ [1] = PINGROUP(1, qup0, _, _, _, _, _, _, _, _),
+ [2] = PINGROUP(2, qup0, qdss_cti, _, _, _, _, _, _, _),
+ [3] = PINGROUP(3, qup0, _, _, _, _, _, _, _, _),
+ [4] = PINGROUP(4, qup1, phase_flag, _, _, _, _, _, _, _),
+ [5] = PINGROUP(5, qup1, phase_flag, _, _, _, _, _, _, _),
+ [6] = PINGROUP(6, qup1, phase_flag, _, _, _, _, _, _, _),
+ [7] = PINGROUP(7, qup1, phase_flag, _, _, _, _, _, _, _),
+ [8] = PINGROUP(8, qup2, _, _, _, _, _, _, _, _),
+ [9] = PINGROUP(9, qup2, dbg_out, _, _, _, _, _, _, _),
+ [10] = PINGROUP(10, qup2, phase_flag, _, _, _, _, _, _, _),
+ [11] = PINGROUP(11, qup2, phase_flag, _, _, _, _, _, _, _),
+ [12] = PINGROUP(12, qup3, phase_flag, _, _, _, _, _, _, _),
+ [13] = PINGROUP(13, qup3, phase_flag, _, _, _, _, _, _, _),
+ [14] = PINGROUP(14, qup3, phase_flag, _, _, _, _, _, _, _),
+ [15] = PINGROUP(15, qup3, phase_flag, _, _, _, _, _, _, _),
+ [16] = PINGROUP(16, qup4, phase_flag, _, _, _, _, _, _, _),
+ [17] = PINGROUP(17, qup4, phase_flag, _, _, _, _, _, _, _),
+ [18] = PINGROUP(18, qup4, phase_flag, _, vsense_trigger, _, _, _, _, _),
+ [19] = PINGROUP(19, qup4, phase_flag, _, _, _, _, _, _, _),
+ [20] = PINGROUP(20, qup6, pll_bist, _, _, _, _, _, _, _),
+ [21] = PINGROUP(21, qup6, _, _, _, _, _, _, _, _),
+ [22] = PINGROUP(22, qup6, _, _, _, _, _, _, _, _),
+ [23] = PINGROUP(23, qup6, _, _, _, _, _, _, _, _),
+ [24] = PINGROUP(24, qup7, qup_l4, jitter_bist, _, _, _, _, _, _),
+ [25] = PINGROUP(25, qup7, qup_l5, phase_flag, _, _, _, _, _, _),
+ [26] = PINGROUP(26, qup7, qup_l6, phase_flag, _, _, _, _, _, _),
+ [27] = PINGROUP(27, qup7, _, _, _, _, _, _, _, _),
+ [28] = PINGROUP(28, qup8, ibi_i3c, _, _, _, _, _, _, _),
+ [29] = PINGROUP(29, qup8, ibi_i3c, _, _, _, _, _, _, _),
+ [30] = PINGROUP(30, qup8, _, _, _, _, _, _, _, _),
+ [31] = PINGROUP(31, qup8, _, _, _, _, _, _, _, _),
+ [32] = PINGROUP(32, qup9, ibi_i3c, _, _, _, _, _, _, _),
+ [33] = PINGROUP(33, qup9, ibi_i3c, _, _, _, _, _, _, _),
+ [34] = PINGROUP(34, qup9, _, _, _, _, _, _, _, _),
+ [35] = PINGROUP(35, qup9, _, _, _, _, _, _, _, _),
+ [36] = PINGROUP(36, qup10, ddr_bist, _, _, _, _, _, _, _),
+ [37] = PINGROUP(37, qup10, ddr_bist, atest_usb, _, _, _, _, _, _),
+ [38] = PINGROUP(38, qup10, _, _, _, _, _, _, _, _),
+ [39] = PINGROUP(39, qup10, atest_usb, _, _, _, _, _, _, _),
+ [40] = PINGROUP(40, qup11, qup_l4, ddr_bist, ddr_pxi1, _, _, _, _, _),
+ [41] = PINGROUP(41, qup11, qup_l5, ddr_bist, ddr_pxi1, _, _, _, _, _),
+ [42] = PINGROUP(42, qup11, qup_l6, _, _, _, _, _, _, _),
+ [43] = PINGROUP(43, qup11, ddr_pxi3, _, _, _, _, _, _, _),
+ [44] = PINGROUP(44, qup12, ddr_pxi3, _, _, _, _, _, _, _),
+ [45] = PINGROUP(45, qup12, ddr_pxi2, _, _, _, _, _, _, _),
+ [46] = PINGROUP(46, qup12, mdp_vsync, _, _, _, _, _, _, _),
+ [47] = PINGROUP(47, qup12, dp_hot, mdp_vsync, ddr_pxi2, _, _, _, _, _),
+ [48] = PINGROUP(48, qup13, qspi2, sdc42, _, _, _, _, _, _),
+ [49] = PINGROUP(49, qup13, qspi3, sdc43, _, _, _, _, _, _),
+ [50] = PINGROUP(50, qup13, qspi_clk, sdc4_clk, _, _, _, _, _, _),
+ [51] = PINGROUP(51, qup13, qspi_cs, sdc4_cmd, ddr_pxi0, _, _, _, _, _),
+ [52] = PINGROUP(52, qup14, qspi0, sdc40, ddr_pxi0, _, _, _, _, _),
+ [53] = PINGROUP(53, qup14, qspi1, sdc41, _, _, _, _, _, _),
+ [54] = PINGROUP(54, qup14, qspi_cs, _, _, _, _, _, _, _),
+ [55] = PINGROUP(55, qup14, atest_usb, _, _, _, _, _, _, _),
+ [56] = PINGROUP(56, qup15, ibi_i3c, _, _, _, _, _, _, _),
+ [57] = PINGROUP(57, qup15, ibi_i3c, _, _, _, _, _, _, _),
+ [58] = PINGROUP(58, qup15, qup_l4, _, _, _, _, _, _, _),
+ [59] = PINGROUP(59, qup15, qup_l5, _, _, _, _, _, _, _),
+ [60] = PINGROUP(60, qup16, ibi_i3c, _, _, _, _, _, _, _),
+ [61] = PINGROUP(61, qup16, ibi_i3c, _, _, _, _, _, _, _),
+ [62] = PINGROUP(62, qup16, qup_l6, _, _, _, _, _, _, _),
+ [63] = PINGROUP(63, qup16, qup_l4, _, _, _, _, _, _, _),
+ [64] = PINGROUP(64, qup17, tb_trig, tgu_ch0, _, _, _, _, _, _),
+ [65] = PINGROUP(65, qup17, tgu_ch1, _, _, _, _, _, _, _),
+ [66] = PINGROUP(66, qup17, qup_l5, tgu_ch2, _, _, _, _, _, _),
+ [67] = PINGROUP(67, qup17, qup_l6, tgu_ch3, _, _, _, _, _, _),
+ [68] = PINGROUP(68, qup18, _, _, _, _, _, _, _, _),
+ [69] = PINGROUP(69, qup18, _, _, _, _, _, _, _, _),
+ [70] = PINGROUP(70, qup18, _, _, _, _, _, _, _, _),
+ [71] = PINGROUP(71, qup18, cri_trng0, _, _, _, _, _, _, _),
+ [72] = PINGROUP(72, qup19, cri_trng1, _, _, _, _, _, _, _),
+ [73] = PINGROUP(73, qup19, prng_rosc, _, _, _, _, _, _, _),
+ [74] = PINGROUP(74, qup19, _, _, _, _, _, _, _, _),
+ [75] = PINGROUP(75, qup19, prng_rosc, _, _, _, _, _, _, _),
+ [76] = PINGROUP(76, qup20, phase_flag, tmess_prng3, _, _, _, _, _, _),
+ [77] = PINGROUP(77, qup20, phase_flag, tmess_prng2, _, _, _, _, _, _),
+ [78] = PINGROUP(78, qup20, phase_flag, _, _, _, _, _, _, _),
+ [79] = PINGROUP(79, qup20, phase_flag, tmess_prng1, _, _, _, _, _, _),
+ [80] = PINGROUP(80, qup21, qdss_cti, phase_flag, tmess_prng0, _, _, _, _, _),
+ [81] = PINGROUP(81, qup21, qdss_cti, phase_flag, prng_rosc, _, _, _, _, _),
+ [82] = PINGROUP(82, qup21, qdss_cti, phase_flag, _, _, _, _, _, _),
+ [83] = PINGROUP(83, qup21, qdss_cti, phase_flag, prng_rosc, _, _, _, _, _),
+ [84] = PINGROUP(84, qup5, qdss_cti, _, _, _, _, _, _, _),
+ [85] = PINGROUP(85, qup5, qdss_cti, _, _, _, _, _, _, _),
+ [86] = PINGROUP(86, mdp_vsync, mdp_vsync0, mdp_vsync1, gcc_gp1, atest_char, _, _, _, _),
+ [87] = PINGROUP(87, mdp_vsync, mdp_vsync2, mdp_vsync3, gcc_gp2, atest_char, _, _, _, _),
+ [88] = PINGROUP(88, mdp_vsync, gcc_gp3, atest_char, _, _, _, _, _, _),
+ [89] = PINGROUP(89, vfr_0, atest_char, _, _, _, _, _, _, _),
+ [90] = PINGROUP(90, usb2phy_ac, atest_char, _, _, _, _, _, _, _),
+ [91] = PINGROUP(91, usb_phy, tsense_pwm1, tsense_pwm2, _, _, _, _, _, _),
+ [92] = PINGROUP(92, phase_flag, _, _, _, _, _, _, _, _),
+ [93] = PINGROUP(93, sd_write, qdss_cti, phase_flag, _, _, _, _, _, _),
+ [94] = PINGROUP(94, cmu_rng, phase_flag, _, _, _, _, _, _, _),
+ [95] = PINGROUP(95, pcie0_clkreqn, cmu_rng, phase_flag, _, _, _, _, _, _),
+ [96] = PINGROUP(96, cmu_rng, phase_flag, _, _, _, _, _, _, _),
+ [97] = PINGROUP(97, cmu_rng, phase_flag, _, _, _, _, _, _, _),
+ [98] = PINGROUP(98, pcie1_clkreqn, phase_flag, _, _, _, _, _, _, _),
+ [99] = PINGROUP(99, phase_flag, cri_trng, _, _, _, _, _, _, _),
+ [100] = PINGROUP(100, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [101] = PINGROUP(101, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [102] = PINGROUP(102, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [103] = PINGROUP(103, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [104] = PINGROUP(104, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [105] = PINGROUP(105, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [106] = PINGROUP(106, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [107] = PINGROUP(107, cam_mclk, qdss_gpio, pll_clk, _, _, _, _, _, _),
+ [108] = PINGROUP(108, aon_cam, _, _, _, _, _, _, _, _),
+ [109] = PINGROUP(109, cci_async, _, _, _, _, _, _, _, _),
+ [110] = PINGROUP(110, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [111] = PINGROUP(111, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [112] = PINGROUP(112, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [113] = PINGROUP(113, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [114] = PINGROUP(114, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [115] = PINGROUP(115, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [116] = PINGROUP(116, cci_timer, _, _, _, _, _, _, _, _),
+ [117] = PINGROUP(117, cci_timer, qdss_gpio, _, _, _, _, _, _, _),
+ [118] = PINGROUP(118, cci_timer, qdss_gpio, _, _, _, _, _, _, _),
+ [119] = PINGROUP(119, cci_timer, cci_async, qdss_gpio, _, _, _, _, _, _),
+ [120] = PINGROUP(120, cci_timer, cci_async, qdss_gpio, _, _, _, _, _, _),
+ [121] = PINGROUP(121, mi2s2_sck, _, _, _, _, _, _, _, _),
+ [122] = PINGROUP(122, mi2s2_data0, _, _, _, _, _, _, _, _),
+ [123] = PINGROUP(123, mi2s2_ws, _, _, _, _, _, _, _, _),
+ [124] = PINGROUP(124, mi2s2_data1, sec_mi2s, audio_ref, _, _, _, _, _, _),
+ [125] = PINGROUP(125, pri_mi2s, _, _, _, _, _, _, _, _),
+ [126] = PINGROUP(126, mi2s0_sck, _, _, _, _, _, _, _, _),
+ [127] = PINGROUP(127, mi2s0_data0, _, _, _, _, _, _, _, _),
+ [128] = PINGROUP(128, mi2s0_data1, _, _, _, _, _, _, _, _),
+ [129] = PINGROUP(129, mi2s0_ws, _, _, _, _, _, _, _, _),
+ [130] = PINGROUP(130, uim0_data, _, _, _, _, _, _, _, _),
+ [131] = PINGROUP(131, uim0_clk, _, _, _, _, _, _, _, _),
+ [132] = PINGROUP(132, uim0_reset, _, _, _, _, _, _, _, _),
+ [133] = PINGROUP(133, uim0_present, _, _, _, _, _, _, _, _),
+ [134] = PINGROUP(134, uim1_data, gcc_gp1, _, _, _, _, _, _, _),
+ [135] = PINGROUP(135, uim1_clk, gcc_gp2, _, _, _, _, _, _, _),
+ [136] = PINGROUP(136, uim1_reset, gcc_gp3, _, _, _, _, _, _, _),
+ [137] = PINGROUP(137, uim1_present, tb_trig, _, _, _, _, _, _, _),
+ [138] = PINGROUP(138, _, mss_grfc0, _, _, _, _, _, _, _),
+ [139] = PINGROUP(139, _, mss_grfc1, _, _, _, _, _, _, _),
+ [140] = PINGROUP(140, _, mss_grfc2, _, _, _, _, _, _, _),
+ [141] = PINGROUP(141, _, mss_grfc3, _, _, _, _, _, _, _),
+ [142] = PINGROUP(142, _, mss_grfc4, _, _, _, _, _, _, _),
+ [143] = PINGROUP(143, _, mss_grfc5, _, _, _, _, _, _, _),
+ [144] = PINGROUP(144, _, mss_grfc6, _, _, _, _, _, _, _),
+ [145] = PINGROUP(145, _, mss_grfc7, _, _, _, _, _, _, _),
+ [146] = PINGROUP(146, _, mss_grfc8, _, _, _, _, _, _, _),
+ [147] = PINGROUP(147, _, mss_grfc9, _, _, _, _, _, _, _),
+ [148] = PINGROUP(148, coex_uart1, atest_usb, _, _, _, _, _, _, _),
+ [149] = PINGROUP(149, coex_uart1, atest_usb, _, _, _, _, _, _, _),
+ [150] = PINGROUP(150, coex_uart2, mss_grfc10, _, _, _, _, _, _, _),
+ [151] = PINGROUP(151, coex_uart2, mss_grfc11, _, _, _, _, _, _, _),
+ [152] = PINGROUP(152, mss_grfc12, _, _, _, _, _, _, _, _),
+ [153] = PINGROUP(153, mss_grfc0, nav, _, _, _, _, _, _, _),
+ [154] = PINGROUP(154, nav, _, _, _, _, _, _, _, _),
+ [155] = PINGROUP(155, nav, vfr_1, _, _, _, _, _, _, _),
+ [156] = PINGROUP(156, qlink0_request, _, _, _, _, _, _, _, _),
+ [157] = PINGROUP(157, qlink0_enable, _, _, _, _, _, _, _, _),
+ [158] = PINGROUP(158, qlink0_wmss, _, _, _, _, _, _, _, _),
+ [159] = PINGROUP(159, qlink1_request, _, _, _, _, _, _, _, _),
+ [160] = PINGROUP(160, qlink1_enable, _, _, _, _, _, _, _, _),
+ [161] = PINGROUP(161, qlink1_wmss, _, _, _, _, _, _, _, _),
+ [162] = PINGROUP(162, qlink2_request, _, _, _, _, _, _, _, _),
+ [163] = PINGROUP(163, qlink2_enable, _, _, _, _, _, _, _, _),
+ [164] = PINGROUP(164, qlink2_wmss, _, _, _, _, _, _, _, _),
+ [165] = PINGROUP(165, _, _, _, _, _, _, _, _, _),
+ [166] = PINGROUP(166, _, _, _, _, _, _, _, _, _),
+ [167] = PINGROUP(167, _, _, _, _, _, _, _, _, _),
+ [168] = PINGROUP(168, _, _, _, _, _, _, _, _, _),
+ [169] = PINGROUP(169, _, _, _, _, _, _, _, _, _),
+ [170] = PINGROUP(170, _, _, _, _, _, _, _, _, _),
+ [171] = PINGROUP(171, _, _, _, _, _, _, _, _, _),
+ [172] = PINGROUP(172, _, _, _, _, _, _, _, _, _),
+ [173] = PINGROUP(173, _, _, _, _, _, _, _, _, _),
+ [174] = PINGROUP(174, _, _, _, _, _, _, _, _, _),
+ [175] = PINGROUP(175, _, _, _, _, _, _, _, _, _),
+ [176] = PINGROUP(176, _, _, _, _, _, _, _, _, _),
+ [177] = PINGROUP(177, _, _, _, _, _, _, _, _, _),
+ [178] = PINGROUP(178, _, _, _, _, _, _, _, _, _),
+ [179] = PINGROUP(179, _, _, _, _, _, _, _, _, _),
+ [180] = PINGROUP(180, _, _, _, _, _, _, _, _, _),
+ [181] = PINGROUP(181, _, _, _, _, _, _, _, _, _),
+ [182] = PINGROUP(182, _, _, _, _, _, _, _, _, _),
+ [183] = PINGROUP(183, _, _, _, _, _, _, _, _, _),
+ [184] = PINGROUP(184, _, _, _, _, _, _, _, _, _),
+ [185] = PINGROUP(185, _, _, _, _, _, _, _, _, _),
+ [186] = PINGROUP(186, _, _, _, _, _, _, _, _, _),
+ [187] = PINGROUP(187, _, _, _, _, _, _, _, _, _),
+ [188] = PINGROUP(188, _, qdss_gpio, _, _, _, _, _, _, _),
+ [189] = PINGROUP(189, _, qdss_gpio, _, _, _, _, _, _, _),
+ [190] = PINGROUP(190, qdss_gpio, _, _, _, _, _, _, _, _),
+ [191] = PINGROUP(191, qdss_gpio, _, _, _, _, _, _, _, _),
+ [192] = PINGROUP(192, _, qdss_gpio, _, _, _, _, _, _, _),
+ [193] = PINGROUP(193, _, qdss_gpio, _, _, _, _, _, _, _),
+ [194] = PINGROUP(194, _, qdss_gpio, _, _, _, _, _, _, _),
+ [195] = PINGROUP(195, _, qdss_gpio, _, _, _, _, _, _, _),
+ [196] = PINGROUP(196, _, qdss_gpio, _, _, _, _, _, _, _),
+ [197] = PINGROUP(197, _, qdss_gpio, _, _, _, _, _, _, _),
+ [198] = PINGROUP(198, _, qdss_gpio, _, _, _, _, _, _, _),
+ [199] = PINGROUP(199, _, qdss_gpio, _, _, _, _, _, _, _),
+ [200] = PINGROUP(200, _, qdss_gpio, _, _, _, _, _, _, _),
+ [201] = PINGROUP(201, _, qdss_gpio, _, _, _, _, _, _, _),
+ [202] = PINGROUP(202, qdss_gpio, _, _, _, _, _, _, _, _),
+ [203] = PINGROUP(203, qdss_gpio, _, _, _, _, _, _, _, _),
+ [204] = PINGROUP(204, qdss_gpio, _, _, _, _, _, _, _, _),
+ [205] = PINGROUP(205, qdss_gpio, _, _, _, _, _, _, _, _),
+ [206] = PINGROUP(206, qup5, _, _, _, _, _, _, _, _),
+ [207] = PINGROUP(207, qup5, _, _, _, _, _, _, _, _),
+ [208] = PINGROUP(208, cci_i2c, _, _, _, _, _, _, _, _),
+ [209] = PINGROUP(209, cci_i2c, _, _, _, _, _, _, _, _),
+ [210] = UFS_RESET(ufs_reset, 0xde000),
+ [211] = SDC_QDSD_PINGROUP(sdc2_clk, 0xd6000, 14, 6),
+ [212] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xd6000, 11, 3),
+ [213] = SDC_QDSD_PINGROUP(sdc2_data, 0xd6000, 9, 0),
+};
+
+static const struct msm_gpio_wakeirq_map sm8450_pdc_map[] = {
+ { 2, 70 }, { 3, 77 }, { 7, 52 }, { 8, 108 }, { 10, 128 }, { 11, 53 },
+ { 12, 129 }, { 13, 130 }, { 14, 131 }, { 15, 67 }, { 19, 69 }, { 21, 132 },
+ { 23, 54 }, { 26, 56 }, { 27, 71 }, { 28, 57 }, { 31, 55 }, { 32, 58 },
+ { 34, 72 }, { 35, 43 }, { 36, 78 }, { 38, 79 }, { 39, 62 }, { 40, 80 },
+ { 41, 133 }, { 43, 81 }, { 44, 87 }, { 45, 134 }, { 46, 66 }, { 47, 63 },
+ { 50, 88 }, { 51, 89 }, { 55, 90 }, { 56, 59 }, { 59, 82 }, { 60, 60 },
+ { 62, 135 }, { 63, 91 }, { 66, 136 }, { 67, 44 }, { 69, 137 }, { 71, 97 },
+ { 75, 73 }, { 79, 74 }, { 80, 96 }, { 81, 98 }, { 82, 45 }, { 83, 99 },
+ { 84, 94 }, { 85, 100 }, { 86, 101 }, { 87, 102 }, { 88, 92 }, { 89, 83 },
+ { 90, 84 }, { 91, 85 }, { 92, 46 }, { 95, 103 }, { 96, 104 }, { 98, 105 },
+ { 99, 106 }, { 115, 95 }, { 116, 76 }, { 117, 75 }, { 118, 86 }, { 119, 93 },
+ { 133, 47 }, { 137, 42 }, { 148, 61 }, { 150, 68 }, { 153, 65 }, { 154, 48 },
+ { 155, 49 }, { 156, 64 }, { 159, 50 }, { 162, 51 }, { 166, 111 }, { 169, 114 },
+ { 171, 115 }, { 172, 116 }, { 174, 117 }, { 176, 107 }, { 181, 109 },
+ { 182, 110 }, { 185, 112 }, { 187, 113 }, { 188, 118 }, { 190, 122 },
+ { 192, 123 }, { 195, 124 }, { 201, 119 }, { 203, 120 }, { 205, 121 },
+};
+
+static const struct msm_pinctrl_soc_data sm8450_tlmm = {
+ .pins = sm8450_pins,
+ .npins = ARRAY_SIZE(sm8450_pins),
+ .functions = sm8450_functions,
+ .nfunctions = ARRAY_SIZE(sm8450_functions),
+ .groups = sm8450_groups,
+ .ngroups = ARRAY_SIZE(sm8450_groups),
+ .ngpios = 211,
+ .wakeirq_map = sm8450_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(sm8450_pdc_map),
+};
+
+static int sm8450_tlmm_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &sm8450_tlmm);
+}
+
+static const struct of_device_id sm8450_tlmm_of_match[] = {
+ { .compatible = "qcom,sm8450-tlmm", },
+ { },
+};
+
+static struct platform_driver sm8450_tlmm_driver = {
+ .driver = {
+ .name = "sm8450-tlmm",
+ .of_match_table = sm8450_tlmm_of_match,
+ },
+ .probe = sm8450_tlmm_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init sm8450_tlmm_init(void)
+{
+ return platform_driver_register(&sm8450_tlmm_driver);
+}
+arch_initcall(sm8450_tlmm_init);
+
+static void __exit sm8450_tlmm_exit(void)
+{
+ platform_driver_unregister(&sm8450_tlmm_driver);
+}
+module_exit(sm8450_tlmm_exit);
+
+MODULE_DESCRIPTION("QTI SM8450 TLMM driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, sm8450_tlmm_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 5283d5e9e8bc..f2eac3b05d67 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1141,6 +1141,7 @@ static int pmic_gpio_remove(struct platform_device *pdev)
}
static const struct of_device_id pmic_gpio_of_match[] = {
+ { .compatible = "qcom,pm2250-gpio", .data = (void *) 10 },
/* pm660 has 13 GPIOs with holes on 1, 5, 6, 7, 8 and 10 */
{ .compatible = "qcom,pm660-gpio", .data = (void *) 13 },
/* pm660l has 12 GPIOs with holes on 1, 2, 10, 11 and 12 */
@@ -1151,6 +1152,7 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm7325-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm8005-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pm8008-gpio", .data = (void *) 2 },
+ { .compatible = "qcom,pm8019-gpio", .data = (void *) 6 },
/* pm8150 has 10 GPIOs with holes on 2, 5, 7 and 8 */
{ .compatible = "qcom,pm8150-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pmc8180-gpio", .data = (void *) 10 },
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index b5949f766a7a..1b41adda8129 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -773,7 +773,6 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
pctrl->chip = pm8xxx_gpio_template;
pctrl->chip.base = -1;
pctrl->chip.parent = &pdev->dev;
- pctrl->chip.of_node = pdev->dev.of_node;
pctrl->chip.of_gpio_n_cells = 2;
pctrl->chip.label = dev_name(pctrl->dev);
pctrl->chip.ngpio = pctrl->npins;
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 842940594c4a..49893a5133a8 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -857,7 +857,6 @@ static int pm8xxx_mpp_probe(struct platform_device *pdev)
pctrl->chip = pm8xxx_mpp_template;
pctrl->chip.base = -1;
pctrl->chip.parent = &pdev->dev;
- pctrl->chip.of_node = pdev->dev.of_node;
pctrl->chip.of_gpio_n_cells = 2;
pctrl->chip.label = dev_name(pctrl->dev);
pctrl->chip.ngpio = pctrl->npins;
diff --git a/drivers/pinctrl/renesas/pfc-r8a779a0.c b/drivers/pinctrl/renesas/pfc-r8a779a0.c
index ad6532443a78..83580385c3ca 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779a0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779a0.c
@@ -3835,7 +3835,7 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ RCAR_GP_PIN(3, 12), 16, 3 }, /* CANFD5_RX */
{ RCAR_GP_PIN(3, 11), 12, 3 }, /* CANFD5_TX */
{ RCAR_GP_PIN(3, 10), 8, 3 }, /* CANFD4_RX */
- { RCAR_GP_PIN(3, 9), 4, 3 }, /* CANFD4_TX*/
+ { RCAR_GP_PIN(3, 9), 4, 3 }, /* CANFD4_TX */
{ RCAR_GP_PIN(3, 8), 0, 3 }, /* CANFD3_RX */
} },
{ PINMUX_DRIVE_REG("DRV2CTRL3", 0xe6058888) {
@@ -4305,7 +4305,7 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
[11] = RCAR_GP_PIN(6, 11), /* AVB2_TD3 */
[12] = RCAR_GP_PIN(6, 12), /* AVB2_TXCREFCLK */
[13] = RCAR_GP_PIN(6, 13), /* AVB2_MDIO */
- [14] = RCAR_GP_PIN(6, 14), /* AVB2_MDC*/
+ [14] = RCAR_GP_PIN(6, 14), /* AVB2_MDC */
[15] = RCAR_GP_PIN(6, 15), /* AVB2_MAGIC */
[16] = RCAR_GP_PIN(6, 16), /* AVB2_PHY_INT */
[17] = RCAR_GP_PIN(6, 17), /* AVB2_LINK */
diff --git a/drivers/pinctrl/renesas/pinctrl-rza1.c b/drivers/pinctrl/renesas/pinctrl-rza1.c
index 10020fe302b8..c1d6e9512c7a 100644
--- a/drivers/pinctrl/renesas/pinctrl-rza1.c
+++ b/drivers/pinctrl/renesas/pinctrl-rza1.c
@@ -757,9 +757,9 @@ static int rza1_gpio_request(struct gpio_chip *chip, unsigned int gpio)
}
/**
- * rza1_gpio_disable_free() - reset a pin
+ * rza1_gpio_free() - reset a pin
*
- * Surprisingly, disable_free a gpio, is equivalent to request it.
+ * Surprisingly, freeing a gpio is equivalent to requesting it.
* Reset pin to port mode, with input buffer disabled. This overwrites all
* port direction settings applied with set_direction
*
@@ -875,7 +875,7 @@ static int rza1_dt_node_pin_count(struct device_node *np)
}
/**
- * rza1_parse_pmx_function() - parse a pin mux sub-node
+ * rza1_parse_pinmux_node() - parse a pin mux sub-node
*
* @rza1_pctl: RZ/A1 pin controller device
* @np: of pmx sub-node
diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c
index 32829eb9656c..c0a04f1ee994 100644
--- a/drivers/pinctrl/renesas/pinctrl-rza2.c
+++ b/drivers/pinctrl/renesas/pinctrl-rza2.c
@@ -240,7 +240,6 @@ static int rza2_gpio_register(struct rza2_pinctrl_priv *priv)
int ret;
chip.label = devm_kasprintf(priv->dev, GFP_KERNEL, "%pOFn", np);
- chip.of_node = np;
chip.parent = priv->dev;
chip.ngpio = priv->npins;
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index 20b2af889ca9..ccee9c9e2e22 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -35,20 +35,21 @@
#define MUX_FUNC(pinconf) (((pinconf) & MUX_FUNC_MASK) >> MUX_FUNC_OFFS)
/* PIN capabilities */
-#define PIN_CFG_IOLH BIT(0)
-#define PIN_CFG_SR BIT(1)
-#define PIN_CFG_IEN BIT(2)
-#define PIN_CFG_PUPD BIT(3)
-#define PIN_CFG_IOLH_SD0 BIT(4)
-#define PIN_CFG_IOLH_SD1 BIT(5)
-#define PIN_CFG_IOLH_QSPI BIT(6)
-#define PIN_CFG_IOLH_ETH0 BIT(7)
-#define PIN_CFG_IOLH_ETH1 BIT(8)
-#define PIN_CFG_FILONOFF BIT(9)
-#define PIN_CFG_FILNUM BIT(10)
-#define PIN_CFG_FILCLKSEL BIT(11)
-
-#define RZG2L_MPXED_PIN_FUNCS (PIN_CFG_IOLH | \
+#define PIN_CFG_IOLH_A BIT(0)
+#define PIN_CFG_IOLH_B BIT(1)
+#define PIN_CFG_SR BIT(2)
+#define PIN_CFG_IEN BIT(3)
+#define PIN_CFG_PUPD BIT(4)
+#define PIN_CFG_IO_VMC_SD0 BIT(5)
+#define PIN_CFG_IO_VMC_SD1 BIT(6)
+#define PIN_CFG_IO_VMC_QSPI BIT(7)
+#define PIN_CFG_IO_VMC_ETH0 BIT(8)
+#define PIN_CFG_IO_VMC_ETH1 BIT(9)
+#define PIN_CFG_FILONOFF BIT(10)
+#define PIN_CFG_FILNUM BIT(11)
+#define PIN_CFG_FILCLKSEL BIT(12)
+
+#define RZG2L_MPXED_PIN_FUNCS (PIN_CFG_IOLH_A | \
PIN_CFG_SR | \
PIN_CFG_PUPD | \
PIN_CFG_FILONOFF | \
@@ -77,7 +78,7 @@
#define RZG2L_SINGLE_PIN BIT(31)
#define RZG2L_SINGLE_PIN_PACK(p, b, f) (RZG2L_SINGLE_PIN | \
((p) << 24) | ((b) << 20) | (f))
-#define RZG2L_SINGLE_PIN_GET_PORT(x) (((x) & GENMASK(30, 24)) >> 24)
+#define RZG2L_SINGLE_PIN_GET_PORT_OFFSET(x) (((x) & GENMASK(30, 24)) >> 24)
#define RZG2L_SINGLE_PIN_GET_BIT(x) (((x) & GENMASK(22, 20)) >> 20)
#define RZG2L_SINGLE_PIN_GET_CFGS(x) ((x) & GENMASK(19, 0))
@@ -86,6 +87,7 @@
#define PMC(n) (0x0200 + 0x10 + (n))
#define PFC(n) (0x0400 + 0x40 + (n) * 4)
#define PIN(n) (0x0800 + 0x10 + (n))
+#define IOLH(n) (0x1000 + (n) * 8)
#define IEN(n) (0x1800 + (n) * 8)
#define PWPR (0x3014)
#define SD_CH(n) (0x3000 + (n) * 4)
@@ -101,11 +103,13 @@
#define PVDD_MASK 0x01
#define PFC_MASK 0x07
#define IEN_MASK 0x01
+#define IOLH_MASK 0x03
#define PM_INPUT 0x1
#define PM_OUTPUT 0x2
#define RZG2L_PIN_ID_TO_PORT(id) ((id) / RZG2L_PINS_PER_PORT)
+#define RZG2L_PIN_ID_TO_PORT_OFFSET(id) (RZG2L_PIN_ID_TO_PORT(id) + 0x10)
#define RZG2L_PIN_ID_TO_PIN(id) ((id) % RZG2L_PINS_PER_PORT)
struct rzg2l_dedicated_configs {
@@ -137,6 +141,9 @@ struct rzg2l_pinctrl {
spinlock_t lock;
};
+static const unsigned int iolh_groupa_mA[] = { 2, 4, 8, 12 };
+static const unsigned int iolh_groupb_oi[] = { 100, 66, 50, 33 };
+
static void rzg2l_pinctrl_set_pfc_mode(struct rzg2l_pinctrl *pctrl,
u8 port, u8 pin, u8 func)
{
@@ -424,6 +431,56 @@ done:
return ret;
}
+static int rzg2l_validate_gpio_pin(struct rzg2l_pinctrl *pctrl,
+ u32 cfg, u32 port, u8 bit)
+{
+ u8 pincount = RZG2L_GPIO_PORT_GET_PINCNT(cfg);
+ u32 port_index = RZG2L_GPIO_PORT_GET_INDEX(cfg);
+ u32 data;
+
+ if (bit >= pincount || port >= pctrl->data->n_port_pins)
+ return -EINVAL;
+
+ data = pctrl->data->port_pin_configs[port];
+ if (port_index != RZG2L_GPIO_PORT_GET_INDEX(data))
+ return -EINVAL;
+
+ return 0;
+}
+
+static u32 rzg2l_read_pin_config(struct rzg2l_pinctrl *pctrl, u32 offset,
+ u8 bit, u32 mask)
+{
+ void __iomem *addr = pctrl->base + offset;
+
+ /* handle _L/_H for 32-bit register read/write */
+ if (bit >= 4) {
+ bit -= 4;
+ addr += 4;
+ }
+
+ return (readl(addr) >> (bit * 8)) & mask;
+}
+
+static void rzg2l_rmw_pin_config(struct rzg2l_pinctrl *pctrl, u32 offset,
+ u8 bit, u32 mask, u32 val)
+{
+ void __iomem *addr = pctrl->base + offset;
+ unsigned long flags;
+ u32 reg;
+
+ /* handle _L/_H for 32-bit register read/write */
+ if (bit >= 4) {
+ bit -= 4;
+ addr += 4;
+ }
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+ reg = readl(addr) & ~(mask << (bit * 8));
+ writel(reg | (val << (bit * 8)), addr);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
unsigned int _pin,
unsigned long *config)
@@ -435,7 +492,7 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
unsigned int arg = 0;
unsigned long flags;
void __iomem *addr;
- u32 port = 0, reg;
+ u32 port_offset;
u32 cfg = 0;
u8 bit = 0;
@@ -443,36 +500,33 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
return -EINVAL;
if (*pin_data & RZG2L_SINGLE_PIN) {
- port = RZG2L_SINGLE_PIN_GET_PORT(*pin_data);
+ port_offset = RZG2L_SINGLE_PIN_GET_PORT_OFFSET(*pin_data);
cfg = RZG2L_SINGLE_PIN_GET_CFGS(*pin_data);
bit = RZG2L_SINGLE_PIN_GET_BIT(*pin_data);
+ } else {
+ cfg = RZG2L_GPIO_PORT_GET_CFGS(*pin_data);
+ port_offset = RZG2L_PIN_ID_TO_PORT_OFFSET(_pin);
+ bit = RZG2L_PIN_ID_TO_PIN(_pin);
+
+ if (rzg2l_validate_gpio_pin(pctrl, *pin_data, RZG2L_PIN_ID_TO_PORT(_pin), bit))
+ return -EINVAL;
}
switch (param) {
case PIN_CONFIG_INPUT_ENABLE:
if (!(cfg & PIN_CFG_IEN))
return -EINVAL;
- spin_lock_irqsave(&pctrl->lock, flags);
- /* handle _L/_H for 32-bit register read/write */
- addr = pctrl->base + IEN(port);
- if (bit >= 4) {
- bit -= 4;
- addr += 4;
- }
-
- reg = readl(addr) & (IEN_MASK << (bit * 8));
- arg = (reg >> (bit * 8)) & 0x1;
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ arg = rzg2l_read_pin_config(pctrl, IEN(port_offset), bit, IEN_MASK);
break;
case PIN_CONFIG_POWER_SOURCE: {
u32 pwr_reg = 0x0;
- if (cfg & PIN_CFG_IOLH_SD0)
+ if (cfg & PIN_CFG_IO_VMC_SD0)
pwr_reg = SD_CH(0);
- else if (cfg & PIN_CFG_IOLH_SD1)
+ else if (cfg & PIN_CFG_IO_VMC_SD1)
pwr_reg = SD_CH(1);
- else if (cfg & PIN_CFG_IOLH_QSPI)
+ else if (cfg & PIN_CFG_IO_VMC_QSPI)
pwr_reg = QSPI;
else
return -EINVAL;
@@ -484,6 +538,28 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
break;
}
+ case PIN_CONFIG_DRIVE_STRENGTH: {
+ unsigned int index;
+
+ if (!(cfg & PIN_CFG_IOLH_A))
+ return -EINVAL;
+
+ index = rzg2l_read_pin_config(pctrl, IOLH(port_offset), bit, IOLH_MASK);
+ arg = iolh_groupa_mA[index];
+ break;
+ }
+
+ case PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS: {
+ unsigned int index;
+
+ if (!(cfg & PIN_CFG_IOLH_B))
+ return -EINVAL;
+
+ index = rzg2l_read_pin_config(pctrl, IOLH(port_offset), bit, IOLH_MASK);
+ arg = iolh_groupb_oi[index];
+ break;
+ }
+
default:
return -ENOTSUPP;
}
@@ -504,7 +580,7 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
enum pin_config_param param;
unsigned long flags;
void __iomem *addr;
- u32 port = 0, reg;
+ u32 port_offset;
unsigned int i;
u32 cfg = 0;
u8 bit = 0;
@@ -513,9 +589,16 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
return -EINVAL;
if (*pin_data & RZG2L_SINGLE_PIN) {
- port = RZG2L_SINGLE_PIN_GET_PORT(*pin_data);
+ port_offset = RZG2L_SINGLE_PIN_GET_PORT_OFFSET(*pin_data);
cfg = RZG2L_SINGLE_PIN_GET_CFGS(*pin_data);
bit = RZG2L_SINGLE_PIN_GET_BIT(*pin_data);
+ } else {
+ cfg = RZG2L_GPIO_PORT_GET_CFGS(*pin_data);
+ port_offset = RZG2L_PIN_ID_TO_PORT_OFFSET(_pin);
+ bit = RZG2L_PIN_ID_TO_PIN(_pin);
+
+ if (rzg2l_validate_gpio_pin(pctrl, *pin_data, RZG2L_PIN_ID_TO_PORT(_pin), bit))
+ return -EINVAL;
}
for (i = 0; i < num_configs; i++) {
@@ -528,17 +611,7 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
if (!(cfg & PIN_CFG_IEN))
return -EINVAL;
- /* handle _L/_H for 32-bit register read/write */
- addr = pctrl->base + IEN(port);
- if (bit >= 4) {
- bit -= 4;
- addr += 4;
- }
-
- spin_lock_irqsave(&pctrl->lock, flags);
- reg = readl(addr) & ~(IEN_MASK << (bit * 8));
- writel(reg | (arg << (bit * 8)), addr);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ rzg2l_rmw_pin_config(pctrl, IEN(port_offset), bit, IEN_MASK, !!arg);
break;
}
@@ -549,11 +622,11 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
if (mV != 1800 && mV != 3300)
return -EINVAL;
- if (cfg & PIN_CFG_IOLH_SD0)
+ if (cfg & PIN_CFG_IO_VMC_SD0)
pwr_reg = SD_CH(0);
- else if (cfg & PIN_CFG_IOLH_SD1)
+ else if (cfg & PIN_CFG_IO_VMC_SD1)
pwr_reg = SD_CH(1);
- else if (cfg & PIN_CFG_IOLH_QSPI)
+ else if (cfg & PIN_CFG_IO_VMC_QSPI)
pwr_reg = QSPI;
else
return -EINVAL;
@@ -564,6 +637,43 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
spin_unlock_irqrestore(&pctrl->lock, flags);
break;
}
+
+ case PIN_CONFIG_DRIVE_STRENGTH: {
+ unsigned int arg = pinconf_to_config_argument(_configs[i]);
+ unsigned int index;
+
+ if (!(cfg & PIN_CFG_IOLH_A))
+ return -EINVAL;
+
+ for (index = 0; index < ARRAY_SIZE(iolh_groupa_mA); index++) {
+ if (arg == iolh_groupa_mA[index])
+ break;
+ }
+ if (index >= ARRAY_SIZE(iolh_groupa_mA))
+ return -EINVAL;
+
+ rzg2l_rmw_pin_config(pctrl, IOLH(port_offset), bit, IOLH_MASK, index);
+ break;
+ }
+
+ case PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS: {
+ unsigned int arg = pinconf_to_config_argument(_configs[i]);
+ unsigned int index;
+
+ if (!(cfg & PIN_CFG_IOLH_B))
+ return -EINVAL;
+
+ for (index = 0; index < ARRAY_SIZE(iolh_groupb_oi); index++) {
+ if (arg == iolh_groupb_oi[index])
+ break;
+ }
+ if (index >= ARRAY_SIZE(iolh_groupb_oi))
+ return -EINVAL;
+
+ rzg2l_rmw_pin_config(pctrl, IOLH(port_offset), bit, IOLH_MASK, index);
+ break;
+ }
+
default:
return -EOPNOTSUPP;
}
@@ -855,24 +965,24 @@ static const u32 rzg2l_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(3, 0x21, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(2, 0x22, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(2, 0x23, RZG2L_MPXED_PIN_FUNCS),
- RZG2L_GPIO_PORT_PACK(3, 0x24, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
- RZG2L_GPIO_PORT_PACK(2, 0x25, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
- RZG2L_GPIO_PORT_PACK(2, 0x26, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
- RZG2L_GPIO_PORT_PACK(2, 0x27, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
- RZG2L_GPIO_PORT_PACK(2, 0x28, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
- RZG2L_GPIO_PORT_PACK(2, 0x29, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
- RZG2L_GPIO_PORT_PACK(2, 0x2a, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
- RZG2L_GPIO_PORT_PACK(2, 0x2b, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
- RZG2L_GPIO_PORT_PACK(2, 0x2c, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
- RZG2L_GPIO_PORT_PACK(2, 0x2d, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
- RZG2L_GPIO_PORT_PACK(2, 0x2e, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
- RZG2L_GPIO_PORT_PACK(2, 0x2f, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
- RZG2L_GPIO_PORT_PACK(2, 0x30, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
- RZG2L_GPIO_PORT_PACK(2, 0x31, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
- RZG2L_GPIO_PORT_PACK(2, 0x32, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
- RZG2L_GPIO_PORT_PACK(2, 0x33, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
- RZG2L_GPIO_PORT_PACK(2, 0x34, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
- RZG2L_GPIO_PORT_PACK(3, 0x35, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
+ RZG2L_GPIO_PORT_PACK(3, 0x24, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x25, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x26, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x27, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x28, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x29, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x2a, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x2b, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x2c, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x2d, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x2e, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x2f, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x30, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x31, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x32, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x33, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x34, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
+ RZG2L_GPIO_PORT_PACK(3, 0x35, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
RZG2L_GPIO_PORT_PACK(2, 0x36, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(3, 0x37, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(3, 0x38, RZG2L_MPXED_PIN_FUNCS),
@@ -890,75 +1000,75 @@ static struct rzg2l_dedicated_configs rzg2l_dedicated_pins[] = {
{ "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0,
(PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL)) },
{ "TMS/SWDIO", RZG2L_SINGLE_PIN_PACK(0x2, 0,
- (PIN_CFG_SR | PIN_CFG_IOLH | PIN_CFG_IEN)) },
+ (PIN_CFG_SR | PIN_CFG_IOLH_A | PIN_CFG_IEN)) },
{ "TDO", RZG2L_SINGLE_PIN_PACK(0x3, 0,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN)) },
+ (PIN_CFG_IOLH_A | PIN_CFG_SR | PIN_CFG_IEN)) },
{ "AUDIO_CLK1", RZG2L_SINGLE_PIN_PACK(0x4, 0, PIN_CFG_IEN) },
{ "AUDIO_CLK2", RZG2L_SINGLE_PIN_PACK(0x4, 1, PIN_CFG_IEN) },
{ "SD0_CLK", RZG2L_SINGLE_PIN_PACK(0x6, 0,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_SD0)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_CMD", RZG2L_SINGLE_PIN_PACK(0x6, 1,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_RST#", RZG2L_SINGLE_PIN_PACK(0x6, 2,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_SD0)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_DATA0", RZG2L_SINGLE_PIN_PACK(0x7, 0,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_DATA1", RZG2L_SINGLE_PIN_PACK(0x7, 1,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_DATA2", RZG2L_SINGLE_PIN_PACK(0x7, 2,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_DATA3", RZG2L_SINGLE_PIN_PACK(0x7, 3,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_DATA4", RZG2L_SINGLE_PIN_PACK(0x7, 4,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_DATA5", RZG2L_SINGLE_PIN_PACK(0x7, 5,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_DATA6", RZG2L_SINGLE_PIN_PACK(0x7, 6,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_DATA7", RZG2L_SINGLE_PIN_PACK(0x7, 7,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
{ "SD1_CLK", RZG2L_SINGLE_PIN_PACK(0x8, 0,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_SD1))},
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_SD1)) },
{ "SD1_CMD", RZG2L_SINGLE_PIN_PACK(0x8, 1,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD1)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) },
{ "SD1_DATA0", RZG2L_SINGLE_PIN_PACK(0x9, 0,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD1)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) },
{ "SD1_DATA1", RZG2L_SINGLE_PIN_PACK(0x9, 1,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD1)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) },
{ "SD1_DATA2", RZG2L_SINGLE_PIN_PACK(0x9, 2,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD1)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) },
{ "SD1_DATA3", RZG2L_SINGLE_PIN_PACK(0x9, 3,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD1)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) },
{ "QSPI0_SPCLK", RZG2L_SINGLE_PIN_PACK(0xa, 0,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI0_IO0", RZG2L_SINGLE_PIN_PACK(0xa, 1,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI0_IO1", RZG2L_SINGLE_PIN_PACK(0xa, 2,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI0_IO2", RZG2L_SINGLE_PIN_PACK(0xa, 3,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI0_IO3", RZG2L_SINGLE_PIN_PACK(0xa, 4,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI0_SSL", RZG2L_SINGLE_PIN_PACK(0xa, 5,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI1_SPCLK", RZG2L_SINGLE_PIN_PACK(0xb, 0,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI1_IO0", RZG2L_SINGLE_PIN_PACK(0xb, 1,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI1_IO1", RZG2L_SINGLE_PIN_PACK(0xb, 2,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI1_IO2", RZG2L_SINGLE_PIN_PACK(0xb, 3,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI1_IO3", RZG2L_SINGLE_PIN_PACK(0xb, 4,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI1_SSL", RZG2L_SINGLE_PIN_PACK(0xb, 5,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI_RESET#", RZG2L_SINGLE_PIN_PACK(0xc, 0,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI_WP#", RZG2L_SINGLE_PIN_PACK(0xc, 1,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
- { "QSPI_INT#", RZG2L_SINGLE_PIN_PACK(0xc, 2, (PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
- { "WDTOVF_PERROUT#", RZG2L_SINGLE_PIN_PACK(0xd, 0, (PIN_CFG_IOLH | PIN_CFG_SR)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
+ { "QSPI_INT#", RZG2L_SINGLE_PIN_PACK(0xc, 2, (PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
+ { "WDTOVF_PERROUT#", RZG2L_SINGLE_PIN_PACK(0xd, 0, (PIN_CFG_IOLH_A | PIN_CFG_SR)) },
{ "RIIC0_SDA", RZG2L_SINGLE_PIN_PACK(0xe, 0, PIN_CFG_IEN) },
{ "RIIC0_SCL", RZG2L_SINGLE_PIN_PACK(0xe, 1, PIN_CFG_IEN) },
{ "RIIC1_SDA", RZG2L_SINGLE_PIN_PACK(0xe, 2, PIN_CFG_IEN) },
diff --git a/drivers/pinctrl/renesas/pinctrl.c b/drivers/pinctrl/renesas/pinctrl.c
index f3eecb20c086..96b9de974246 100644
--- a/drivers/pinctrl/renesas/pinctrl.c
+++ b/drivers/pinctrl/renesas/pinctrl.c
@@ -504,7 +504,6 @@ static u32 sh_pfc_pinconf_find_drive_strength_reg(struct sh_pfc *pfc,
static int sh_pfc_pinconf_get_drive_strength(struct sh_pfc *pfc,
unsigned int pin)
{
- unsigned long flags;
unsigned int offset;
unsigned int size;
u32 reg;
@@ -514,11 +513,7 @@ static int sh_pfc_pinconf_get_drive_strength(struct sh_pfc *pfc,
if (!reg)
return -EINVAL;
- spin_lock_irqsave(&pfc->lock, flags);
- val = sh_pfc_read(pfc, reg);
- spin_unlock_irqrestore(&pfc->lock, flags);
-
- val = (val >> offset) & GENMASK(size - 1, 0);
+ val = (sh_pfc_read(pfc, reg) >> offset) & GENMASK(size - 1, 0);
/* Convert the value to mA based on a full drive strength value of 24mA.
* We can make the full value configurable later if needed.
@@ -648,9 +643,7 @@ static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned _pin,
if (WARN(bit < 0, "invalid pin %#x", _pin))
return bit;
- spin_lock_irqsave(&pfc->lock, flags);
val = sh_pfc_read(pfc, pocctrl);
- spin_unlock_irqrestore(&pfc->lock, flags);
lower_voltage = (pin->configs & SH_PFC_PIN_VOLTAGE_25_33) ?
2500 : 1800;
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
index 6b77fd24571e..2e490e7696f4 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
@@ -441,6 +441,87 @@ const struct samsung_pinctrl_of_match_data exynos7_of_data __initconst = {
.num_ctrl = ARRAY_SIZE(exynos7_pin_ctrl),
};
+/* pin banks of exynos7885 pin-controller 0 (ALIVE) */
+static const struct samsung_pin_bank_data exynos7885_pin_banks0[] __initconst = {
+ EXYNOS_PIN_BANK_EINTN(3, 0x000, "etc0"),
+ EXYNOS_PIN_BANK_EINTN(3, 0x020, "etc1"),
+ EXYNOS850_PIN_BANK_EINTW(8, 0x040, "gpa0", 0x00),
+ EXYNOS850_PIN_BANK_EINTW(8, 0x060, "gpa1", 0x04),
+ EXYNOS850_PIN_BANK_EINTW(8, 0x080, "gpa2", 0x08),
+ EXYNOS850_PIN_BANK_EINTW(5, 0x0a0, "gpq0", 0x0c),
+};
+
+/* pin banks of exynos7885 pin-controller 1 (DISPAUD) */
+static const struct samsung_pin_bank_data exynos7885_pin_banks1[] __initconst = {
+ EXYNOS850_PIN_BANK_EINTG(5, 0x000, "gpb0", 0x00),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x020, "gpb1", 0x04),
+ EXYNOS850_PIN_BANK_EINTG(5, 0x040, "gpb2", 0x08),
+};
+
+/* pin banks of exynos7885 pin-controller 2 (FSYS) */
+static const struct samsung_pin_bank_data exynos7885_pin_banks2[] __initconst = {
+ EXYNOS850_PIN_BANK_EINTG(4, 0x000, "gpf0", 0x00),
+ EXYNOS850_PIN_BANK_EINTG(8, 0x020, "gpf2", 0x04),
+ EXYNOS850_PIN_BANK_EINTG(6, 0x040, "gpf3", 0x08),
+ EXYNOS850_PIN_BANK_EINTG(6, 0x060, "gpf4", 0x0c),
+};
+
+/* pin banks of exynos7885 pin-controller 3 (TOP) */
+static const struct samsung_pin_bank_data exynos7885_pin_banks3[] __initconst = {
+ EXYNOS850_PIN_BANK_EINTG(4, 0x000, "gpp0", 0x00),
+ EXYNOS850_PIN_BANK_EINTG(3, 0x020, "gpg0", 0x04),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x040, "gpp1", 0x08),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x060, "gpp2", 0x0c),
+ EXYNOS850_PIN_BANK_EINTG(3, 0x080, "gpp3", 0x10),
+ EXYNOS850_PIN_BANK_EINTG(6, 0x0a0, "gpp4", 0x14),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x0c0, "gpp5", 0x18),
+ EXYNOS850_PIN_BANK_EINTG(5, 0x0e0, "gpp6", 0x1c),
+ EXYNOS850_PIN_BANK_EINTG(2, 0x100, "gpp7", 0x20),
+ EXYNOS850_PIN_BANK_EINTG(2, 0x120, "gpp8", 0x24),
+ EXYNOS850_PIN_BANK_EINTG(8, 0x140, "gpg1", 0x28),
+ EXYNOS850_PIN_BANK_EINTG(8, 0x160, "gpg2", 0x2c),
+ EXYNOS850_PIN_BANK_EINTG(8, 0x180, "gpg3", 0x30),
+ EXYNOS850_PIN_BANK_EINTG(2, 0x1a0, "gpg4", 0x34),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x1c0, "gpc0", 0x38),
+ EXYNOS850_PIN_BANK_EINTG(8, 0x1e0, "gpc1", 0x3c),
+ EXYNOS850_PIN_BANK_EINTG(8, 0x200, "gpc2", 0x40),
+};
+
+static const struct samsung_pin_ctrl exynos7885_pin_ctrl[] __initconst = {
+ {
+ /* pin-controller instance 0 Alive data */
+ .pin_banks = exynos7885_pin_banks0,
+ .nr_banks = ARRAY_SIZE(exynos7885_pin_banks0),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 1 DISPAUD data */
+ .pin_banks = exynos7885_pin_banks1,
+ .nr_banks = ARRAY_SIZE(exynos7885_pin_banks1),
+ }, {
+ /* pin-controller instance 2 FSYS data */
+ .pin_banks = exynos7885_pin_banks2,
+ .nr_banks = ARRAY_SIZE(exynos7885_pin_banks2),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 3 TOP data */
+ .pin_banks = exynos7885_pin_banks3,
+ .nr_banks = ARRAY_SIZE(exynos7885_pin_banks3),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ },
+};
+
+const struct samsung_pinctrl_of_match_data exynos7885_of_data __initconst = {
+ .ctrl = exynos7885_pin_ctrl,
+ .num_ctrl = ARRAY_SIZE(exynos7885_pin_ctrl),
+};
+
/* pin banks of exynos850 pin-controller 0 (ALIVE) */
static const struct samsung_pin_bank_data exynos850_pin_banks0[] __initconst = {
/* Must start with EINTG banks, ordered by EINT group number. */
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 23f355ae9ca0..0f6e9305fec5 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -1095,7 +1095,6 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
struct samsung_pinctrl_drv_data *drvdata;
const struct samsung_pin_ctrl *ctrl;
struct device *dev = &pdev->dev;
- struct resource *res;
int ret;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
@@ -1109,9 +1108,11 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
}
drvdata->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res)
- drvdata->irq = res->start;
+ ret = platform_get_irq_optional(pdev, 0);
+ if (ret < 0 && ret != -ENXIO)
+ return ret;
+ if (ret > 0)
+ drvdata->irq = ret;
if (ctrl->retention_data) {
drvdata->retention_ctrl = ctrl->retention_data->init(drvdata,
@@ -1264,6 +1265,8 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
.data = &exynos5433_of_data },
{ .compatible = "samsung,exynos7-pinctrl",
.data = &exynos7_of_data },
+ { .compatible = "samsung,exynos7885-pinctrl",
+ .data = &exynos7885_of_data },
{ .compatible = "samsung,exynos850-pinctrl",
.data = &exynos850_of_data },
{ .compatible = "samsung,exynosautov9-pinctrl",
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index 547968a31aed..1f8d30ba05af 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -339,6 +339,7 @@ extern const struct samsung_pinctrl_of_match_data exynos5410_of_data;
extern const struct samsung_pinctrl_of_match_data exynos5420_of_data;
extern const struct samsung_pinctrl_of_match_data exynos5433_of_data;
extern const struct samsung_pinctrl_of_match_data exynos7_of_data;
+extern const struct samsung_pinctrl_of_match_data exynos7885_of_data;
extern const struct samsung_pinctrl_of_match_data exynos850_of_data;
extern const struct samsung_pinctrl_of_match_data exynosautov9_of_data;
extern const struct samsung_pinctrl_of_match_data s3c64xx_of_data;
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index 43bb334af1e1..ada401ef4342 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -14,11 +14,13 @@
#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/init.h>
+#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <linux/regmap.h>
#include <linux/spinlock.h>
#define MAX_GPIO_PER_REG 32
@@ -64,7 +66,7 @@ struct plgpio_regs {
*/
struct plgpio {
spinlock_t lock;
- void __iomem *base;
+ struct regmap *regmap;
struct clk *clk;
struct gpio_chip chip;
int (*p2o)(int pin); /* pin_to_offset */
@@ -77,33 +79,38 @@ struct plgpio {
};
/* register manipulation inline functions */
-static inline u32 is_plgpio_set(void __iomem *base, u32 pin, u32 reg)
+static inline u32 is_plgpio_set(struct regmap *regmap, u32 pin, u32 reg)
{
u32 offset = PIN_OFFSET(pin);
- void __iomem *reg_off = REG_OFFSET(base, reg, pin);
- u32 val = readl_relaxed(reg_off);
+ u32 reg_off = REG_OFFSET(0, reg, pin);
+ u32 val;
+
+ regmap_read(regmap, reg_off, &val);
return !!(val & (1 << offset));
}
-static inline void plgpio_reg_set(void __iomem *base, u32 pin, u32 reg)
+static inline void plgpio_reg_set(struct regmap *regmap, u32 pin, u32 reg)
{
u32 offset = PIN_OFFSET(pin);
- void __iomem *reg_off = REG_OFFSET(base, reg, pin);
- u32 val = readl_relaxed(reg_off);
+ u32 reg_off = REG_OFFSET(0, reg, pin);
+ u32 mask;
- writel_relaxed(val | (1 << offset), reg_off);
+ mask = 1 << offset;
+ regmap_update_bits(regmap, reg_off, mask, mask);
}
-static inline void plgpio_reg_reset(void __iomem *base, u32 pin, u32 reg)
+static inline void plgpio_reg_reset(struct regmap *regmap, u32 pin, u32 reg)
{
u32 offset = PIN_OFFSET(pin);
- void __iomem *reg_off = REG_OFFSET(base, reg, pin);
- u32 val = readl_relaxed(reg_off);
+ u32 reg_off = REG_OFFSET(0, reg, pin);
+ u32 mask;
- writel_relaxed(val & ~(1 << offset), reg_off);
+ mask = 1 << offset;
+ regmap_update_bits(regmap, reg_off, mask, 0);
}
+
/* gpio framework specific routines */
static int plgpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
@@ -118,7 +125,7 @@ static int plgpio_direction_input(struct gpio_chip *chip, unsigned offset)
}
spin_lock_irqsave(&plgpio->lock, flags);
- plgpio_reg_set(plgpio->base, offset, plgpio->regs.dir);
+ plgpio_reg_set(plgpio->regmap, offset, plgpio->regs.dir);
spin_unlock_irqrestore(&plgpio->lock, flags);
return 0;
@@ -145,13 +152,13 @@ static int plgpio_direction_output(struct gpio_chip *chip, unsigned offset,
spin_lock_irqsave(&plgpio->lock, flags);
if (value)
- plgpio_reg_set(plgpio->base, wdata_offset,
+ plgpio_reg_set(plgpio->regmap, wdata_offset,
plgpio->regs.wdata);
else
- plgpio_reg_reset(plgpio->base, wdata_offset,
+ plgpio_reg_reset(plgpio->regmap, wdata_offset,
plgpio->regs.wdata);
- plgpio_reg_reset(plgpio->base, dir_offset, plgpio->regs.dir);
+ plgpio_reg_reset(plgpio->regmap, dir_offset, plgpio->regs.dir);
spin_unlock_irqrestore(&plgpio->lock, flags);
return 0;
@@ -171,7 +178,7 @@ static int plgpio_get_value(struct gpio_chip *chip, unsigned offset)
return -EINVAL;
}
- return is_plgpio_set(plgpio->base, offset, plgpio->regs.rdata);
+ return is_plgpio_set(plgpio->regmap, offset, plgpio->regs.rdata);
}
static void plgpio_set_value(struct gpio_chip *chip, unsigned offset, int value)
@@ -189,9 +196,9 @@ static void plgpio_set_value(struct gpio_chip *chip, unsigned offset, int value)
}
if (value)
- plgpio_reg_set(plgpio->base, offset, plgpio->regs.wdata);
+ plgpio_reg_set(plgpio->regmap, offset, plgpio->regs.wdata);
else
- plgpio_reg_reset(plgpio->base, offset, plgpio->regs.wdata);
+ plgpio_reg_reset(plgpio->regmap, offset, plgpio->regs.wdata);
}
static int plgpio_request(struct gpio_chip *chip, unsigned offset)
@@ -234,7 +241,7 @@ static int plgpio_request(struct gpio_chip *chip, unsigned offset)
}
spin_lock_irqsave(&plgpio->lock, flags);
- plgpio_reg_set(plgpio->base, offset, plgpio->regs.enb);
+ plgpio_reg_set(plgpio->regmap, offset, plgpio->regs.enb);
spin_unlock_irqrestore(&plgpio->lock, flags);
return 0;
@@ -266,7 +273,7 @@ static void plgpio_free(struct gpio_chip *chip, unsigned offset)
}
spin_lock_irqsave(&plgpio->lock, flags);
- plgpio_reg_reset(plgpio->base, offset, plgpio->regs.enb);
+ plgpio_reg_reset(plgpio->regmap, offset, plgpio->regs.enb);
spin_unlock_irqrestore(&plgpio->lock, flags);
disable_clk:
@@ -292,7 +299,7 @@ static void plgpio_irq_disable(struct irq_data *d)
}
spin_lock_irqsave(&plgpio->lock, flags);
- plgpio_reg_set(plgpio->base, offset, plgpio->regs.ie);
+ plgpio_reg_set(plgpio->regmap, offset, plgpio->regs.ie);
spin_unlock_irqrestore(&plgpio->lock, flags);
}
@@ -311,7 +318,7 @@ static void plgpio_irq_enable(struct irq_data *d)
}
spin_lock_irqsave(&plgpio->lock, flags);
- plgpio_reg_reset(plgpio->base, offset, plgpio->regs.ie);
+ plgpio_reg_reset(plgpio->regmap, offset, plgpio->regs.ie);
spin_unlock_irqrestore(&plgpio->lock, flags);
}
@@ -320,7 +327,7 @@ static int plgpio_irq_set_type(struct irq_data *d, unsigned trigger)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct plgpio *plgpio = gpiochip_get_data(gc);
int offset = d->hwirq;
- void __iomem *reg_off;
+ u32 reg_off;
unsigned int supported_type = 0, val;
if (offset >= plgpio->chip.ngpio)
@@ -337,14 +344,14 @@ static int plgpio_irq_set_type(struct irq_data *d, unsigned trigger)
if (plgpio->regs.eit == -1)
return 0;
- reg_off = REG_OFFSET(plgpio->base, plgpio->regs.eit, offset);
- val = readl_relaxed(reg_off);
+ reg_off = REG_OFFSET(0, plgpio->regs.eit, offset);
+ regmap_read(plgpio->regmap, reg_off, &val);
offset = PIN_OFFSET(offset);
if (trigger & IRQ_TYPE_EDGE_RISING)
- writel_relaxed(val | (1 << offset), reg_off);
+ regmap_write(plgpio->regmap, reg_off, val | (1 << offset));
else
- writel_relaxed(val & ~(1 << offset), reg_off);
+ regmap_write(plgpio->regmap, reg_off, val & ~(1 << offset));
return 0;
}
@@ -362,7 +369,8 @@ static void plgpio_irq_handler(struct irq_desc *desc)
struct plgpio *plgpio = gpiochip_get_data(gc);
struct irq_chip *irqchip = irq_desc_get_chip(desc);
int regs_count, count, pin, offset, i = 0;
- unsigned long pending;
+ u32 pending;
+ unsigned long pendingl;
count = plgpio->chip.ngpio;
regs_count = DIV_ROUND_UP(count, MAX_GPIO_PER_REG);
@@ -370,14 +378,14 @@ static void plgpio_irq_handler(struct irq_desc *desc)
chained_irq_enter(irqchip, desc);
/* check all plgpio MIS registers for a possible interrupt */
for (; i < regs_count; i++) {
- pending = readl_relaxed(plgpio->base + plgpio->regs.mis +
- i * sizeof(int *));
+ regmap_read(plgpio->regmap, plgpio->regs.mis +
+ i * sizeof(int *), &pending);
if (!pending)
continue;
/* clear interrupts */
- writel_relaxed(~pending, plgpio->base + plgpio->regs.mis +
- i * sizeof(int *));
+ regmap_write(plgpio->regmap, plgpio->regs.mis +
+ i * sizeof(int *), ~pending);
/*
* clear extra bits in last register having gpios < MAX/REG
* ex: Suppose there are max 102 plgpios. then last register
@@ -389,7 +397,8 @@ static void plgpio_irq_handler(struct irq_desc *desc)
if (count < MAX_GPIO_PER_REG)
pending &= (1 << count) - 1;
- for_each_set_bit(offset, &pending, MAX_GPIO_PER_REG) {
+ pendingl = pending;
+ for_each_set_bit(offset, &pendingl, MAX_GPIO_PER_REG) {
/* get correct pin for "offset" */
if (plgpio->o2p && (plgpio->p2o_regs & PTO_MIS_REG)) {
pin = plgpio->o2p(offset);
@@ -511,8 +520,10 @@ static int plgpio_probe_dt(struct platform_device *pdev, struct plgpio *plgpio)
end:
return ret;
}
+
static int plgpio_probe(struct platform_device *pdev)
{
+ struct device_node *regmap_np;
struct plgpio *plgpio;
int ret, irq;
@@ -520,9 +531,23 @@ static int plgpio_probe(struct platform_device *pdev)
if (!plgpio)
return -ENOMEM;
- plgpio->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(plgpio->base))
- return PTR_ERR(plgpio->base);
+ regmap_np = of_parse_phandle(pdev->dev.of_node, "regmap", 0);
+ if (regmap_np) {
+ plgpio->regmap = device_node_to_regmap(regmap_np);
+ of_node_put(regmap_np);
+ if (IS_ERR(plgpio->regmap)) {
+ dev_err(&pdev->dev, "Retrieve regmap failed (%pe)\n",
+ plgpio->regmap);
+ return PTR_ERR(plgpio->regmap);
+ }
+ } else {
+ plgpio->regmap = device_node_to_regmap(pdev->dev.of_node);
+ if (IS_ERR(plgpio->regmap)) {
+ dev_err(&pdev->dev, "Init regmap failed (%pe)\n",
+ plgpio->regmap);
+ return PTR_ERR(plgpio->regmap);
+ }
+ }
ret = plgpio_probe_dt(pdev, plgpio);
if (ret) {
@@ -556,7 +581,6 @@ static int plgpio_probe(struct platform_device *pdev)
plgpio->chip.label = dev_name(&pdev->dev);
plgpio->chip.parent = &pdev->dev;
plgpio->chip.owner = THIS_MODULE;
- plgpio->chip.of_node = pdev->dev.of_node;
if (!IS_ERR(plgpio->clk)) {
ret = clk_prepare(plgpio->clk);
@@ -607,22 +631,23 @@ static int plgpio_suspend(struct device *dev)
{
struct plgpio *plgpio = dev_get_drvdata(dev);
int i, reg_count = DIV_ROUND_UP(plgpio->chip.ngpio, MAX_GPIO_PER_REG);
- void __iomem *off;
+ u32 off;
for (i = 0; i < reg_count; i++) {
- off = plgpio->base + i * sizeof(int *);
+ off = i * sizeof(int *);
if (plgpio->regs.enb != -1)
- plgpio->csave_regs[i].enb =
- readl_relaxed(plgpio->regs.enb + off);
+ regmap_read(plgpio->regmap, plgpio->regs.enb + off,
+ &plgpio->csave_regs[i].enb);
if (plgpio->regs.eit != -1)
- plgpio->csave_regs[i].eit =
- readl_relaxed(plgpio->regs.eit + off);
- plgpio->csave_regs[i].wdata = readl_relaxed(plgpio->regs.wdata +
- off);
- plgpio->csave_regs[i].dir = readl_relaxed(plgpio->regs.dir +
- off);
- plgpio->csave_regs[i].ie = readl_relaxed(plgpio->regs.ie + off);
+ regmap_read(plgpio->regmap, plgpio->regs.eit + off,
+ &plgpio->csave_regs[i].eit);
+ regmap_read(plgpio->regmap, plgpio->regs.wdata + off,
+ &plgpio->csave_regs[i].wdata);
+ regmap_read(plgpio->regmap, plgpio->regs.dir + off,
+ &plgpio->csave_regs[i].dir);
+ regmap_read(plgpio->regmap, plgpio->regs.ie + off,
+ &plgpio->csave_regs[i].ie);
}
return 0;
@@ -636,7 +661,7 @@ static int plgpio_suspend(struct device *dev)
*/
#define plgpio_prepare_reg(__reg, _off, _mask, _tmp) \
{ \
- _tmp = readl_relaxed(plgpio->regs.__reg + _off); \
+ regmap_read(plgpio->regmap, plgpio->regs.__reg + _off, &_tmp); \
_tmp &= ~_mask; \
plgpio->csave_regs[i].__reg = \
_tmp | (plgpio->csave_regs[i].__reg & _mask); \
@@ -646,11 +671,11 @@ static int plgpio_resume(struct device *dev)
{
struct plgpio *plgpio = dev_get_drvdata(dev);
int i, reg_count = DIV_ROUND_UP(plgpio->chip.ngpio, MAX_GPIO_PER_REG);
- void __iomem *off;
+ u32 off;
u32 mask, tmp;
for (i = 0; i < reg_count; i++) {
- off = plgpio->base + i * sizeof(int *);
+ off = i * sizeof(int *);
if (i == reg_count - 1) {
mask = (1 << (plgpio->chip.ngpio - i *
@@ -667,20 +692,22 @@ static int plgpio_resume(struct device *dev)
plgpio_prepare_reg(ie, off, mask, tmp);
}
- writel_relaxed(plgpio->csave_regs[i].wdata, plgpio->regs.wdata +
- off);
- writel_relaxed(plgpio->csave_regs[i].dir, plgpio->regs.dir +
- off);
+ regmap_write(plgpio->regmap, plgpio->regs.wdata + off,
+ plgpio->csave_regs[i].wdata);
+
+ regmap_write(plgpio->regmap, plgpio->regs.dir + off,
+ plgpio->csave_regs[i].dir);
if (plgpio->regs.eit != -1)
- writel_relaxed(plgpio->csave_regs[i].eit,
- plgpio->regs.eit + off);
+ regmap_write(plgpio->regmap, plgpio->regs.eit + off,
+ plgpio->csave_regs[i].eit);
- writel_relaxed(plgpio->csave_regs[i].ie, plgpio->regs.ie + off);
+ regmap_write(plgpio->regmap, plgpio->regs.ie + off,
+ plgpio->csave_regs[i].ie);
if (plgpio->regs.enb != -1)
- writel_relaxed(plgpio->csave_regs[i].enb,
- plgpio->regs.enb + off);
+ regmap_write(plgpio->regmap, plgpio->regs.enb + off,
+ plgpio->csave_regs[i].enb);
}
return 0;
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index 948f56abb9ae..e0543c1ad641 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -14,6 +14,7 @@
*/
#include <linux/err.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -367,9 +368,12 @@ int spear_pinctrl_probe(struct platform_device *pdev,
if (!pmx)
return -ENOMEM;
- pmx->vbase = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(pmx->vbase))
- return PTR_ERR(pmx->vbase);
+ pmx->regmap = device_node_to_regmap(np);
+ if (IS_ERR(pmx->regmap)) {
+ dev_err(&pdev->dev, "Init regmap failed (%pe).\n",
+ pmx->regmap);
+ return PTR_ERR(pmx->regmap);
+ }
pmx->dev = &pdev->dev;
pmx->machdata = machdata;
diff --git a/drivers/pinctrl/spear/pinctrl-spear.h b/drivers/pinctrl/spear/pinctrl-spear.h
index db029b148c87..63a0b5ea56ef 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.h
+++ b/drivers/pinctrl/spear/pinctrl-spear.h
@@ -15,6 +15,7 @@
#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/pinctrl/pinctrl.h>
+#include <linux/regmap.h>
#include <linux/types.h>
struct platform_device;
@@ -172,24 +173,27 @@ struct spear_pinctrl_machdata {
* @dev: pointer to struct dev of platform_device registered
* @pctl: pointer to struct pinctrl_dev
* @machdata: pointer to SoC or machine specific structure
- * @vbase: virtual base address of pinmux controller
+ * @regmap: regmap of pinmux controller
*/
struct spear_pmx {
struct device *dev;
struct pinctrl_dev *pctl;
struct spear_pinctrl_machdata *machdata;
- void __iomem *vbase;
+ struct regmap *regmap;
};
/* exported routines */
static inline u32 pmx_readl(struct spear_pmx *pmx, u32 reg)
{
- return readl_relaxed(pmx->vbase + reg);
+ u32 val;
+
+ regmap_read(pmx->regmap, reg, &val);
+ return val;
}
static inline void pmx_writel(struct spear_pmx *pmx, u32 val, u32 reg)
{
- writel_relaxed(val, pmx->vbase + reg);
+ regmap_write(pmx->regmap, reg, val);
}
void pmx_init_addr(struct spear_pinctrl_machdata *machdata, u16 reg);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 862c84efb718..80d6750c74a6 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -835,7 +835,9 @@ static const struct pinmux_ops sunxi_pmx_ops = {
static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
unsigned offset)
{
- return pinctrl_gpio_direction_input(chip->base + offset);
+ struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
+
+ return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL, offset, true);
}
static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -885,8 +887,10 @@ static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip,
static int sunxi_pinctrl_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
+ struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
+
sunxi_pinctrl_gpio_set(chip, offset, value);
- return pinctrl_gpio_direction_output(chip->base + offset);
+ return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL, offset, false);
}
static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc,
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 65b97e240196..6fac30de1c6a 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -565,7 +565,6 @@ int wmt_pinctrl_probe(struct platform_device *pdev,
data->gpio_chip = wmt_gpio_chip;
data->gpio_chip.parent = &pdev->dev;
- data->gpio_chip.of_node = pdev->dev.of_node;
data->gpio_chip.ngpio = data->nbanks * 32;
platform_set_drvdata(pdev, data);
diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig
index 8ac149173c64..d421e1482395 100644
--- a/drivers/platform/mips/Kconfig
+++ b/drivers/platform/mips/Kconfig
@@ -30,4 +30,10 @@ config RS780E_ACPI
help
Loongson RS780E PCH ACPI Controller driver.
+config LS2K_RESET
+ bool "Loongson-2K1000 Reset Controller"
+ depends on MACH_LOONGSON64 || COMPILE_TEST
+ help
+ Loongson-2K1000 Reset Controller driver.
+
endif # MIPS_PLATFORM_DEVICES
diff --git a/drivers/platform/mips/Makefile b/drivers/platform/mips/Makefile
index 178149098777..4c71444e453a 100644
--- a/drivers/platform/mips/Makefile
+++ b/drivers/platform/mips/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CPU_HWMON) += cpu_hwmon.o
obj-$(CONFIG_RS780E_ACPI) += rs780e-acpi.o
+obj-$(CONFIG_LS2K_RESET) += ls2k-reset.o
diff --git a/drivers/platform/mips/ls2k-reset.c b/drivers/platform/mips/ls2k-reset.c
new file mode 100644
index 000000000000..8f42d5d16480
--- /dev/null
+++ b/drivers/platform/mips/ls2k-reset.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021, Qing Zhang <zhangqing@loongson.cn>
+ * Loongson-2K1000 reset support
+ */
+
+#include <linux/of_address.h>
+#include <linux/pm.h>
+#include <asm/reboot.h>
+
+#define PM1_STS 0x0c /* Power Management 1 Status Register */
+#define PM1_CNT 0x14 /* Power Management 1 Control Register */
+#define RST_CNT 0x30 /* Reset Control Register */
+
+static void __iomem *base;
+
+static void ls2k_restart(char *command)
+{
+ writel(0x1, base + RST_CNT);
+}
+
+static void ls2k_poweroff(void)
+{
+ /* Clear */
+ writel((readl(base + PM1_STS) & 0xffffffff), base + PM1_STS);
+ /* Sleep Enable | Soft Off*/
+ writel(GENMASK(12, 10) | BIT(13), base + PM1_CNT);
+}
+
+static int ls2k_reset_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "loongson,ls2k-pm");
+ if (!np) {
+ pr_info("Failed to get PM node\n");
+ return -ENODEV;
+ }
+
+ base = of_iomap(np, 0);
+ of_node_put(np);
+ if (!base) {
+ pr_info("Failed to map PM register base address\n");
+ return -ENOMEM;
+ }
+
+ _machine_restart = ls2k_restart;
+ pm_power_off = ls2k_poweroff;
+
+ return 0;
+}
+
+arch_initcall(ls2k_reset_init);
diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
index 3105f651614f..5f0578e25f71 100644
--- a/drivers/platform/surface/Kconfig
+++ b/drivers/platform/surface/Kconfig
@@ -5,7 +5,6 @@
menuconfig SURFACE_PLATFORMS
bool "Microsoft Surface Platform-Specific Device Drivers"
- depends on ACPI
default y
help
Say Y here to get to see options for platform-specific device drivers
@@ -30,12 +29,14 @@ config SURFACE3_WMI
config SURFACE_3_BUTTON
tristate "Power/home/volume buttons driver for Microsoft Surface 3 tablet"
+ depends on ACPI
depends on KEYBOARD_GPIO && I2C
help
This driver handles the power/home/volume buttons on the Microsoft Surface 3 tablet.
config SURFACE_3_POWER_OPREGION
tristate "Surface 3 battery platform operation region support"
+ depends on ACPI
depends on I2C
help
This driver provides support for ACPI operation
@@ -126,6 +127,7 @@ config SURFACE_DTX
config SURFACE_GPE
tristate "Surface GPE/Lid Support Driver"
+ depends on ACPI
depends on DMI
help
This driver marks the GPEs related to the ACPI lid device found on
@@ -135,6 +137,7 @@ config SURFACE_GPE
config SURFACE_HOTPLUG
tristate "Surface Hot-Plug Driver"
+ depends on ACPI
depends on GPIOLIB
help
Driver for out-of-band hot-plug event signaling on Microsoft Surface
@@ -154,6 +157,7 @@ config SURFACE_HOTPLUG
config SURFACE_PLATFORM_PROFILE
tristate "Surface Platform Profile Driver"
+ depends on ACPI
depends on SURFACE_AGGREGATOR_REGISTRY
select ACPI_PLATFORM_PROFILE
help
@@ -176,6 +180,7 @@ config SURFACE_PLATFORM_PROFILE
config SURFACE_PRO3_BUTTON
tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3/4 tablet"
+ depends on ACPI
depends on INPUT
help
This driver handles the power/home/volume buttons on the Microsoft Surface Pro 3/4 tablet.
diff --git a/drivers/platform/surface/aggregator/Kconfig b/drivers/platform/surface/aggregator/Kconfig
index fd6dc452f3e8..cab020324256 100644
--- a/drivers/platform/surface/aggregator/Kconfig
+++ b/drivers/platform/surface/aggregator/Kconfig
@@ -4,6 +4,7 @@
menuconfig SURFACE_AGGREGATOR
tristate "Microsoft Surface System Aggregator Module Subsystem and Drivers"
depends on SERIAL_DEV_BUS
+ depends on ACPI
select CRC_CCITT
help
The Surface System Aggregator Module (Surface SAM or SSAM) is an
diff --git a/drivers/platform/surface/aggregator/bus.c b/drivers/platform/surface/aggregator/bus.c
index 0a40dd9c94ed..abbbb5b08b07 100644
--- a/drivers/platform/surface/aggregator/bus.c
+++ b/drivers/platform/surface/aggregator/bus.c
@@ -374,27 +374,19 @@ static int ssam_remove_device(struct device *dev, void *_data)
}
/**
- * ssam_controller_remove_clients() - Remove SSAM client devices registered as
- * direct children under the given controller.
- * @ctrl: The controller to remove all direct clients for.
+ * ssam_remove_clients() - Remove SSAM client devices registered as direct
+ * children under the given parent device.
+ * @dev: The (parent) device to remove all direct clients for.
*
- * Remove all SSAM client devices registered as direct children under the
- * given controller. Note that this only accounts for direct children of the
- * controller device. This does not take care of any client devices where the
- * parent device has been manually set before calling ssam_device_add. Refer
- * to ssam_device_add()/ssam_device_remove() for more details on those cases.
- *
- * To avoid new devices being added in parallel to this call, the main
- * controller lock (not statelock) must be held during this (and if
- * necessary, any subsequent deinitialization) call.
+ * Remove all SSAM client devices registered as direct children under the given
+ * device. Note that this only accounts for direct children of the device.
+ * Refer to ssam_device_add()/ssam_device_remove() for more details.
*/
-void ssam_controller_remove_clients(struct ssam_controller *ctrl)
+void ssam_remove_clients(struct device *dev)
{
- struct device *dev;
-
- dev = ssam_controller_device(ctrl);
device_for_each_child_reverse(dev, NULL, ssam_remove_device);
}
+EXPORT_SYMBOL_GPL(ssam_remove_clients);
/**
* ssam_bus_register() - Register and set-up the SSAM client device bus.
diff --git a/drivers/platform/surface/aggregator/bus.h b/drivers/platform/surface/aggregator/bus.h
index ed032c2cbdb2..6964ee84e79c 100644
--- a/drivers/platform/surface/aggregator/bus.h
+++ b/drivers/platform/surface/aggregator/bus.h
@@ -12,14 +12,11 @@
#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
-void ssam_controller_remove_clients(struct ssam_controller *ctrl);
-
int ssam_bus_register(void);
void ssam_bus_unregister(void);
#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
-static inline void ssam_controller_remove_clients(struct ssam_controller *ctrl) {}
static inline int ssam_bus_register(void) { return 0; }
static inline void ssam_bus_unregister(void) {}
diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
index c61bbeeec2df..d384d36098c2 100644
--- a/drivers/platform/surface/aggregator/core.c
+++ b/drivers/platform/surface/aggregator/core.c
@@ -22,6 +22,7 @@
#include <linux/sysfs.h>
#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/device.h>
#include "bus.h"
#include "controller.h"
@@ -735,7 +736,7 @@ static void ssam_serial_hub_remove(struct serdev_device *serdev)
ssam_controller_lock(ctrl);
/* Remove all client devices. */
- ssam_controller_remove_clients(ctrl);
+ ssam_remove_clients(&serdev->dev);
/* Act as if suspending to silence events. */
status = ssam_ctrl_notif_display_off(ctrl);
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index e70f4c63554e..ce2bd88feeaa 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -258,20 +258,6 @@ static int ssam_uid_from_string(const char *str, struct ssam_device_uid *uid)
return 0;
}
-static int ssam_hub_remove_devices_fn(struct device *dev, void *data)
-{
- if (!is_ssam_device(dev))
- return 0;
-
- ssam_device_remove(to_ssam_device(dev));
- return 0;
-}
-
-static void ssam_hub_remove_devices(struct device *parent)
-{
- device_for_each_child_reverse(parent, NULL, ssam_hub_remove_devices_fn);
-}
-
static int ssam_hub_add_device(struct device *parent, struct ssam_controller *ctrl,
struct fwnode_handle *node)
{
@@ -297,8 +283,8 @@ static int ssam_hub_add_device(struct device *parent, struct ssam_controller *ct
return status;
}
-static int ssam_hub_add_devices(struct device *parent, struct ssam_controller *ctrl,
- struct fwnode_handle *node)
+static int ssam_hub_register_clients(struct device *parent, struct ssam_controller *ctrl,
+ struct fwnode_handle *node)
{
struct fwnode_handle *child;
int status;
@@ -317,7 +303,7 @@ static int ssam_hub_add_devices(struct device *parent, struct ssam_controller *c
return 0;
err:
- ssam_hub_remove_devices(parent);
+ ssam_remove_clients(parent);
return status;
}
@@ -412,9 +398,9 @@ static void ssam_base_hub_update_workfn(struct work_struct *work)
hub->state = state;
if (hub->state == SSAM_BASE_HUB_CONNECTED)
- status = ssam_hub_add_devices(&hub->sdev->dev, hub->sdev->ctrl, node);
+ status = ssam_hub_register_clients(&hub->sdev->dev, hub->sdev->ctrl, node);
else
- ssam_hub_remove_devices(&hub->sdev->dev);
+ ssam_remove_clients(&hub->sdev->dev);
if (status)
dev_err(&hub->sdev->dev, "failed to update base-hub devices: %d\n", status);
@@ -496,7 +482,7 @@ static int ssam_base_hub_probe(struct ssam_device *sdev)
err:
ssam_notifier_unregister(sdev->ctrl, &hub->notif);
cancel_delayed_work_sync(&hub->update_work);
- ssam_hub_remove_devices(&sdev->dev);
+ ssam_remove_clients(&sdev->dev);
return status;
}
@@ -508,7 +494,7 @@ static void ssam_base_hub_remove(struct ssam_device *sdev)
ssam_notifier_unregister(sdev->ctrl, &hub->notif);
cancel_delayed_work_sync(&hub->update_work);
- ssam_hub_remove_devices(&sdev->dev);
+ ssam_remove_clients(&sdev->dev);
}
static const struct ssam_device_id ssam_base_hub_match[] = {
@@ -611,7 +597,7 @@ static int ssam_platform_hub_probe(struct platform_device *pdev)
set_secondary_fwnode(&pdev->dev, root);
- status = ssam_hub_add_devices(&pdev->dev, ctrl, root);
+ status = ssam_hub_register_clients(&pdev->dev, ctrl, root);
if (status) {
set_secondary_fwnode(&pdev->dev, NULL);
software_node_unregister_node_group(nodes);
@@ -625,7 +611,7 @@ static int ssam_platform_hub_remove(struct platform_device *pdev)
{
const struct software_node **nodes = platform_get_drvdata(pdev);
- ssam_hub_remove_devices(&pdev->dev);
+ ssam_remove_clients(&pdev->dev);
set_secondary_fwnode(&pdev->dev, NULL);
software_node_unregister_node_group(nodes);
return 0;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 7400bc5da5be..24deeeb29af2 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -127,6 +127,19 @@ config GIGABYTE_WMI
To compile this driver as a module, choose M here: the module will
be called gigabyte-wmi.
+config YOGABOOK_WMI
+ tristate "Lenovo Yoga Book tablet WMI key driver"
+ depends on ACPI_WMI
+ depends on INPUT
+ select LEDS_CLASS
+ select NEW_LEDS
+ help
+ Say Y here if you want to support the 'Pen' key and keyboard backlight
+ control on the Lenovo Yoga Book tablets.
+
+ To compile this driver as a module, choose M here: the module will
+ be called lenovo-yogabook-wmi.
+
config ACERHDF
tristate "Acer Aspire One temperature and fan driver"
depends on ACPI && THERMAL
@@ -296,6 +309,25 @@ config ASUS_NB_WMI
If you have an ACPI-WMI compatible Asus Notebook, say Y or M
here.
+config ASUS_TF103C_DOCK
+ tristate "Asus TF103C 2-in-1 keyboard dock"
+ depends on ACPI
+ depends on I2C
+ depends on INPUT
+ depends on HID
+ depends on GPIOLIB
+ help
+ This is a driver for the keyboard, touchpad and USB port of the
+ keyboard dock for the Asus TF103C 2-in-1 tablet.
+
+ This keyboard dock has its own I2C attached embedded controller
+ and the keyboard and touchpad are also connected over I2C,
+ instead of using the usual USB connection. This means that the
+ keyboard dock requires this special driver to function.
+
+ If you have an Asus TF103C tablet say Y or M here, for a generic x86
+ distro config say M here.
+
config MERAKI_MX100
tristate "Cisco Meraki MX100 Platform Driver"
depends on GPIOLIB
@@ -517,7 +549,9 @@ config THINKPAD_ACPI
depends on ACPI_VIDEO || ACPI_VIDEO = n
depends on BACKLIGHT_CLASS_DEVICE
depends on I2C
+ depends on DRM
select ACPI_PLATFORM_PROFILE
+ select DRM_PRIVACY_SCREEN
select HWMON
select NVRAM
select NEW_LEDS
@@ -991,6 +1025,23 @@ config TOUCHSCREEN_DMI
the OS-image for the device. This option supplies the missing info.
Enable this for x86 tablets with Silead or Chipone touchscreens.
+config X86_ANDROID_TABLETS
+ tristate "X86 Android tablet support"
+ depends on I2C && SERIAL_DEV_BUS && ACPI && GPIOLIB
+ help
+ X86 tablets which ship with Android as (part of) the factory image
+ typically have various problems with their DSDTs. The factory kernels
+ shipped on these devices typically have device addresses and GPIOs
+ hardcoded in the kernel, rather than specified in their DSDT.
+
+ With the DSDT containing a random collection of devices which may or
+ may not actually be present. This driver contains various fixes for
+ such tablets, including instantiating kernel devices for devices which
+ are missing from the DSDT.
+
+ If you have a x86 Android tablet say Y or M here, for a generic x86
+ distro config say M here.
+
config FW_ATTR_CLASS
tristate
@@ -1075,6 +1126,18 @@ config INTEL_SCU_IPC_UTIL
low level access for debug work and updating the firmware. Say
N unless you will be doing this on an Intel MID platform.
+config SIEMENS_SIMATIC_IPC
+ tristate "Siemens Simatic IPC Class driver"
+ depends on PCI
+ help
+ This Simatic IPC class driver is the central of several drivers. It
+ is mainly used for system identification, after which drivers in other
+ classes will take care of driving specifics of those machines.
+ i.e. LEDs and watchdog.
+
+ To compile this driver as a module, choose M here: the module
+ will be called simatic-ipc.
+
endif # X86_PLATFORM_DEVICES
config PMC_ATOM
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 253a096b5dd8..c12a9b044fd8 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_NVIDIA_WMI_EC_BACKLIGHT) += nvidia-wmi-ec-backlight.o
obj-$(CONFIG_PEAQ_WMI) += peaq-wmi.o
obj-$(CONFIG_XIAOMI_WMI) += xiaomi-wmi.o
obj-$(CONFIG_GIGABYTE_WMI) += gigabyte-wmi.o
+obj-$(CONFIG_YOGABOOK_WMI) += lenovo-yogabook-wmi.o
# Acer
obj-$(CONFIG_ACERHDF) += acerhdf.o
@@ -35,6 +36,7 @@ obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o
obj-$(CONFIG_ASUS_WIRELESS) += asus-wireless.o
obj-$(CONFIG_ASUS_WMI) += asus-wmi.o
obj-$(CONFIG_ASUS_NB_WMI) += asus-nb-wmi.o
+obj-$(CONFIG_ASUS_TF103C_DOCK) += asus-tf103c-dock.o
obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o
obj-$(CONFIG_EEEPC_WMI) += eeepc-wmi.o
@@ -112,6 +114,7 @@ obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o
obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o
obj-$(CONFIG_TOUCHSCREEN_DMI) += touchscreen_dmi.o
obj-$(CONFIG_WIRELESS_HOTKEY) += wireless-hotkey.o
+obj-$(CONFIG_X86_ANDROID_TABLETS) += x86-android-tablets.o
# Intel uncore drivers
obj-$(CONFIG_INTEL_IPS) += intel_ips.o
@@ -123,3 +126,6 @@ obj-$(CONFIG_INTEL_SCU_PLATFORM) += intel_scu_pltdrv.o
obj-$(CONFIG_INTEL_SCU_WDT) += intel_scu_wdt.o
obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o
obj-$(CONFIG_PMC_ATOM) += pmc_atom.o
+
+# Siemens Simatic Industrial PCs
+obj-$(CONFIG_SIEMENS_SIMATIC_IPC) += simatic-ipc.o
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index 230593ae5d6d..f794343d6aaa 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -35,6 +35,12 @@
#define AMD_PMC_SCRATCH_REG_CZN 0x94
#define AMD_PMC_SCRATCH_REG_YC 0xD14
+/* STB Registers */
+#define AMD_PMC_STB_INDEX_ADDRESS 0xF8
+#define AMD_PMC_STB_INDEX_DATA 0xFC
+#define AMD_PMC_STB_PMI_0 0x03E30600
+#define AMD_PMC_STB_PREDEF 0xC6000001
+
/* Base address of SMU for mapping physical address to virtual address */
#define AMD_PMC_SMU_INDEX_ADDRESS 0xB8
#define AMD_PMC_SMU_INDEX_DATA 0xBC
@@ -82,6 +88,7 @@
#define SOC_SUBSYSTEM_IP_MAX 12
#define DELAY_MIN_US 2000
#define DELAY_MAX_US 3000
+#define FIFO_SIZE 4096
enum amd_pmc_def {
MSG_TEST = 0x01,
MSG_OS_HINT_PCO,
@@ -121,14 +128,21 @@ struct amd_pmc_dev {
u16 minor;
u16 rev;
struct device *dev;
+ struct pci_dev *rdev;
struct mutex lock; /* generic mutex lock */
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs_dir;
#endif /* CONFIG_DEBUG_FS */
};
+static bool enable_stb;
+module_param(enable_stb, bool, 0644);
+MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism");
+
static struct amd_pmc_dev pmc;
static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
+static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
+static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf);
static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
{
@@ -175,6 +189,50 @@ static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev)
return 0;
}
+static int amd_pmc_stb_debugfs_open(struct inode *inode, struct file *filp)
+{
+ struct amd_pmc_dev *dev = filp->f_inode->i_private;
+ u32 size = FIFO_SIZE * sizeof(u32);
+ u32 *buf;
+ int rc;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ rc = amd_pmc_read_stb(dev, buf);
+ if (rc) {
+ kfree(buf);
+ return rc;
+ }
+
+ filp->private_data = buf;
+ return rc;
+}
+
+static ssize_t amd_pmc_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
+ loff_t *pos)
+{
+ if (!filp->private_data)
+ return -EINVAL;
+
+ return simple_read_from_buffer(buf, size, pos, filp->private_data,
+ FIFO_SIZE * sizeof(u32));
+}
+
+static int amd_pmc_stb_debugfs_release(struct inode *inode, struct file *filp)
+{
+ kfree(filp->private_data);
+ return 0;
+}
+
+const struct file_operations amd_pmc_stb_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = amd_pmc_stb_debugfs_open,
+ .read = amd_pmc_stb_debugfs_read,
+ .release = amd_pmc_stb_debugfs_release,
+};
+
static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev,
struct seq_file *s)
{
@@ -288,6 +346,10 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
&s0ix_stats_fops);
debugfs_create_file("amd_pmc_idlemask", 0644, dev->dbgfs_dir, dev,
&amd_pmc_idlemask_fops);
+ /* Enable STB only when the module_param is set */
+ if (enable_stb)
+ debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
+ &amd_pmc_stb_debugfs_fops);
}
#else
static inline void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
@@ -484,6 +546,13 @@ static int __maybe_unused amd_pmc_suspend(struct device *dev)
if (rc)
dev_err(pdev->dev, "suspend failed\n");
+ if (enable_stb)
+ rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF);
+ if (rc) {
+ dev_err(pdev->dev, "error writing to STB\n");
+ return rc;
+ }
+
return rc;
}
@@ -504,6 +573,14 @@ static int __maybe_unused amd_pmc_resume(struct device *dev)
/* Dump the IdleMask to see the blockers */
amd_pmc_idlemask_read(pdev, dev, NULL);
+ /* Write data incremented by 1 to distinguish in stb_read */
+ if (enable_stb)
+ rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF + 1);
+ if (rc) {
+ dev_err(pdev->dev, "error writing to STB\n");
+ return rc;
+ }
+
return 0;
}
@@ -521,6 +598,50 @@ static const struct pci_device_id pmc_pci_ids[] = {
{ }
};
+static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
+{
+ int err;
+
+ err = pci_write_config_dword(dev->rdev, AMD_PMC_STB_INDEX_ADDRESS, AMD_PMC_STB_PMI_0);
+ if (err) {
+ dev_err(dev->dev, "failed to write addr in stb: 0x%X\n",
+ AMD_PMC_STB_INDEX_ADDRESS);
+ return pcibios_err_to_errno(err);
+ }
+
+ err = pci_write_config_dword(dev->rdev, AMD_PMC_STB_INDEX_DATA, data);
+ if (err) {
+ dev_err(dev->dev, "failed to write data in stb: 0x%X\n",
+ AMD_PMC_STB_INDEX_DATA);
+ return pcibios_err_to_errno(err);
+ }
+
+ return 0;
+}
+
+static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf)
+{
+ int i, err;
+
+ err = pci_write_config_dword(dev->rdev, AMD_PMC_STB_INDEX_ADDRESS, AMD_PMC_STB_PMI_0);
+ if (err) {
+ dev_err(dev->dev, "error writing addr to stb: 0x%X\n",
+ AMD_PMC_STB_INDEX_ADDRESS);
+ return pcibios_err_to_errno(err);
+ }
+
+ for (i = 0; i < FIFO_SIZE; i++) {
+ err = pci_read_config_dword(dev->rdev, AMD_PMC_STB_INDEX_DATA, buf++);
+ if (err) {
+ dev_err(dev->dev, "error reading data from stb: 0x%X\n",
+ AMD_PMC_STB_INDEX_DATA);
+ return pcibios_err_to_errno(err);
+ }
+ }
+
+ return 0;
+}
+
static int amd_pmc_probe(struct platform_device *pdev)
{
struct amd_pmc_dev *dev = &pmc;
@@ -534,22 +655,23 @@ static int amd_pmc_probe(struct platform_device *pdev)
rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
if (!rdev || !pci_match_id(pmc_pci_ids, rdev)) {
- pci_dev_put(rdev);
- return -ENODEV;
+ err = -ENODEV;
+ goto err_pci_dev_put;
}
dev->cpu_id = rdev->device;
+ dev->rdev = rdev;
err = pci_write_config_dword(rdev, AMD_PMC_SMU_INDEX_ADDRESS, AMD_PMC_BASE_ADDR_LO);
if (err) {
dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMC_SMU_INDEX_ADDRESS);
- pci_dev_put(rdev);
- return pcibios_err_to_errno(err);
+ err = pcibios_err_to_errno(err);
+ goto err_pci_dev_put;
}
err = pci_read_config_dword(rdev, AMD_PMC_SMU_INDEX_DATA, &val);
if (err) {
- pci_dev_put(rdev);
- return pcibios_err_to_errno(err);
+ err = pcibios_err_to_errno(err);
+ goto err_pci_dev_put;
}
base_addr_lo = val & AMD_PMC_BASE_ADDR_HI_MASK;
@@ -557,24 +679,25 @@ static int amd_pmc_probe(struct platform_device *pdev)
err = pci_write_config_dword(rdev, AMD_PMC_SMU_INDEX_ADDRESS, AMD_PMC_BASE_ADDR_HI);
if (err) {
dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMC_SMU_INDEX_ADDRESS);
- pci_dev_put(rdev);
- return pcibios_err_to_errno(err);
+ err = pcibios_err_to_errno(err);
+ goto err_pci_dev_put;
}
err = pci_read_config_dword(rdev, AMD_PMC_SMU_INDEX_DATA, &val);
if (err) {
- pci_dev_put(rdev);
- return pcibios_err_to_errno(err);
+ err = pcibios_err_to_errno(err);
+ goto err_pci_dev_put;
}
base_addr_hi = val & AMD_PMC_BASE_ADDR_LO_MASK;
- pci_dev_put(rdev);
base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMC_BASE_ADDR_OFFSET,
AMD_PMC_MAPPING_SIZE);
- if (!dev->regbase)
- return -ENOMEM;
+ if (!dev->regbase) {
+ err = -ENOMEM;
+ goto err_pci_dev_put;
+ }
mutex_init(&dev->lock);
@@ -583,8 +706,10 @@ static int amd_pmc_probe(struct platform_device *pdev)
base_addr_hi = FCH_BASE_PHY_ADDR_HIGH;
fch_phys_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
dev->fch_virt_addr = devm_ioremap(dev->dev, fch_phys_addr, FCH_SSC_MAPPING_SIZE);
- if (!dev->fch_virt_addr)
- return -ENOMEM;
+ if (!dev->fch_virt_addr) {
+ err = -ENOMEM;
+ goto err_pci_dev_put;
+ }
/* Use SMU to get the s0i3 debug stats */
err = amd_pmc_setup_smu_logging(dev);
@@ -595,6 +720,10 @@ static int amd_pmc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
amd_pmc_dbgfs_register(dev);
return 0;
+
+err_pci_dev_put:
+ pci_dev_put(rdev);
+ return err;
}
static int amd_pmc_remove(struct platform_device *pdev)
@@ -602,6 +731,7 @@ static int amd_pmc_remove(struct platform_device *pdev)
struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
amd_pmc_dbgfs_unregister(dev);
+ pci_dev_put(dev->rdev);
mutex_destroy(&dev->lock);
return 0;
}
diff --git a/drivers/platform/x86/asus-tf103c-dock.c b/drivers/platform/x86/asus-tf103c-dock.c
new file mode 100644
index 000000000000..d4ef8f362ee6
--- /dev/null
+++ b/drivers/platform/x86/asus-tf103c-dock.c
@@ -0,0 +1,945 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * This is a driver for the keyboard, touchpad and USB port of the
+ * keyboard dock for the Asus TF103C 2-in-1 tablet.
+ *
+ * This keyboard dock has its own I2C attached embedded controller
+ * and the keyboard and touchpad are also connected over I2C,
+ * instead of using the usual USB connection. This means that the
+ * keyboard dock requires this special driver to function.
+ *
+ * Copyright (C) 2021 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
+#include <linux/hid.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mod_devicetable.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/workqueue.h>
+#include <asm/unaligned.h>
+
+static bool fnlock;
+module_param(fnlock, bool, 0644);
+MODULE_PARM_DESC(fnlock,
+ "By default the kbd toprow sends multimedia key presses. AltGr "
+ "can be pressed to change this to F1-F12. Set this to 1 to "
+ "change the default. Press AltGr + Esc to toggle at runtime.");
+
+#define TF103C_DOCK_DEV_NAME "NPCE69A:00"
+
+#define TF103C_DOCK_HPD_DEBOUNCE msecs_to_jiffies(20)
+
+/*** Touchpad I2C device defines ***/
+#define TF103C_DOCK_TP_ADDR 0x15
+
+/*** Keyboard I2C device defines **A*/
+#define TF103C_DOCK_KBD_ADDR 0x16
+
+#define TF103C_DOCK_KBD_DATA_REG 0x73
+#define TF103C_DOCK_KBD_DATA_MIN_LENGTH 4
+#define TF103C_DOCK_KBD_DATA_MAX_LENGTH 11
+#define TF103C_DOCK_KBD_DATA_MODIFIERS 3
+#define TF103C_DOCK_KBD_DATA_KEYS 5
+#define TF103C_DOCK_KBD_CMD_REG 0x75
+
+#define TF103C_DOCK_KBD_CMD_ENABLE 0x0800
+
+/*** EC innterrupt data I2C device defines ***/
+#define TF103C_DOCK_INTR_ADDR 0x19
+#define TF103C_DOCK_INTR_DATA_REG 0x6a
+
+#define TF103C_DOCK_INTR_DATA1_OBF_MASK 0x01
+#define TF103C_DOCK_INTR_DATA1_KEY_MASK 0x04
+#define TF103C_DOCK_INTR_DATA1_KBC_MASK 0x08
+#define TF103C_DOCK_INTR_DATA1_AUX_MASK 0x20
+#define TF103C_DOCK_INTR_DATA1_SCI_MASK 0x40
+#define TF103C_DOCK_INTR_DATA1_SMI_MASK 0x80
+/* Special values for the OOB data on kbd_client / tp_client */
+#define TF103C_DOCK_INTR_DATA1_OOB_VALUE 0xc1
+#define TF103C_DOCK_INTR_DATA2_OOB_VALUE 0x04
+
+#define TF103C_DOCK_SMI_AC_EVENT 0x31
+#define TF103C_DOCK_SMI_HANDSHAKING 0x50
+#define TF103C_DOCK_SMI_EC_WAKEUP 0x53
+#define TF103C_DOCK_SMI_BOOTBLOCK_RESET 0x5e
+#define TF103C_DOCK_SMI_WATCHDOG_RESET 0x5f
+#define TF103C_DOCK_SMI_ADAPTER_CHANGE 0x60
+#define TF103C_DOCK_SMI_DOCK_INSERT 0x61
+#define TF103C_DOCK_SMI_DOCK_REMOVE 0x62
+#define TF103C_DOCK_SMI_PAD_BL_CHANGE 0x63
+#define TF103C_DOCK_SMI_HID_STATUS_CHANGED 0x64
+#define TF103C_DOCK_SMI_HID_WAKEUP 0x65
+#define TF103C_DOCK_SMI_S3 0x83
+#define TF103C_DOCK_SMI_S5 0x85
+#define TF103C_DOCK_SMI_NOTIFY_SHUTDOWN 0x90
+#define TF103C_DOCK_SMI_RESUME 0x91
+
+/*** EC (dockram) I2C device defines ***/
+#define TF103C_DOCK_EC_ADDR 0x1b
+
+#define TF103C_DOCK_EC_CMD_REG 0x0a
+#define TF103C_DOCK_EC_CMD_LEN 9
+
+enum {
+ TF103C_DOCK_FLAG_HID_OPEN,
+};
+
+struct tf103c_dock_data {
+ struct delayed_work hpd_work;
+ struct irq_chip tp_irqchip;
+ struct irq_domain *tp_irq_domain;
+ struct i2c_client *ec_client;
+ struct i2c_client *intr_client;
+ struct i2c_client *kbd_client;
+ struct i2c_client *tp_client;
+ struct gpio_desc *pwr_en;
+ struct gpio_desc *irq_gpio;
+ struct gpio_desc *hpd_gpio;
+ struct input_dev *input;
+ struct hid_device *hid;
+ unsigned long flags;
+ int board_rev;
+ int irq;
+ int hpd_irq;
+ int tp_irq;
+ int last_press_0x13;
+ int last_press_0x14;
+ bool enabled;
+ bool tp_enabled;
+ bool altgr_pressed;
+ bool esc_pressed;
+ bool filter_esc;
+ u8 kbd_buf[TF103C_DOCK_KBD_DATA_MAX_LENGTH];
+};
+
+static struct gpiod_lookup_table tf103c_dock_gpios = {
+ .dev_id = "i2c-" TF103C_DOCK_DEV_NAME,
+ .table = {
+ GPIO_LOOKUP("INT33FC:00", 55, "dock_pwr_en", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:02", 1, "dock_irq", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:02", 29, "dock_hpd", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio_crystalcove", 2, "board_rev", GPIO_ACTIVE_HIGH),
+ {}
+ },
+};
+
+/* Byte 0 is the length of the rest of the packet */
+static const u8 tf103c_dock_enable_cmd[9] = { 8, 0x20, 0, 0, 0, 0, 0x20, 0, 0 };
+static const u8 tf103c_dock_usb_enable_cmd[9] = { 8, 0, 0, 0, 0, 0, 0, 0x40, 0 };
+static const u8 tf103c_dock_suspend_cmd[9] = { 8, 0, 0x20, 0, 0, 0x22, 0, 0, 0 };
+
+/*** keyboard related code ***/
+
+static u8 tf103c_dock_kbd_hid_desc[] = {
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x06, /* Usage (Keyboard), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x11, /* Report ID (17), */
+ 0x95, 0x08, /* Report Count (8), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x05, 0x07, /* Usage Page (Keyboard), */
+ 0x19, 0xE0, /* Usage Minimum (KB Leftcontrol), */
+ 0x29, 0xE7, /* Usage Maximum (KB Right GUI), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x95, 0x06, /* Report Count (6), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+ 0x05, 0x07, /* Usage Page (Keyboard), */
+ 0x19, 0x00, /* Usage Minimum (None), */
+ 0x2A, 0xFF, 0x00, /* Usage Maximum (FFh), */
+ 0x81, 0x00, /* Input, */
+ 0xC0 /* End Collection */
+};
+
+static int tf103c_dock_kbd_read(struct tf103c_dock_data *dock)
+{
+ struct i2c_client *client = dock->kbd_client;
+ struct device *dev = &dock->ec_client->dev;
+ struct i2c_msg msgs[2];
+ u8 reg[2];
+ int ret;
+
+ reg[0] = TF103C_DOCK_KBD_DATA_REG & 0xff;
+ reg[1] = TF103C_DOCK_KBD_DATA_REG >> 8;
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = sizeof(reg);
+ msgs[0].buf = reg;
+
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = TF103C_DOCK_KBD_DATA_MAX_LENGTH;
+ msgs[1].buf = dock->kbd_buf;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs)) {
+ dev_err(dev, "error %d reading kbd data\n", ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void tf103c_dock_kbd_write(struct tf103c_dock_data *dock, u16 cmd)
+{
+ struct device *dev = &dock->ec_client->dev;
+ u8 buf[4];
+ int ret;
+
+ put_unaligned_le16(TF103C_DOCK_KBD_CMD_REG, &buf[0]);
+ put_unaligned_le16(cmd, &buf[2]);
+
+ ret = i2c_master_send(dock->kbd_client, buf, sizeof(buf));
+ if (ret != sizeof(buf))
+ dev_err(dev, "error %d writing kbd cmd\n", ret);
+}
+
+/* HID ll_driver functions for forwarding input-reports from the kbd_client */
+static int tf103c_dock_hid_parse(struct hid_device *hid)
+{
+ return hid_parse_report(hid, tf103c_dock_kbd_hid_desc,
+ sizeof(tf103c_dock_kbd_hid_desc));
+}
+
+static int tf103c_dock_hid_start(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void tf103c_dock_hid_stop(struct hid_device *hid)
+{
+ hid->claimed = 0;
+}
+
+static int tf103c_dock_hid_open(struct hid_device *hid)
+{
+ struct tf103c_dock_data *dock = hid->driver_data;
+
+ set_bit(TF103C_DOCK_FLAG_HID_OPEN, &dock->flags);
+ return 0;
+}
+
+static void tf103c_dock_hid_close(struct hid_device *hid)
+{
+ struct tf103c_dock_data *dock = hid->driver_data;
+
+ clear_bit(TF103C_DOCK_FLAG_HID_OPEN, &dock->flags);
+}
+
+/* Mandatory, but not used */
+static int tf103c_dock_hid_raw_request(struct hid_device *hid, u8 reportnum,
+ u8 *buf, size_t len, u8 rtype, int reqtype)
+{
+ return 0;
+}
+
+struct hid_ll_driver tf103c_dock_hid_ll_driver = {
+ .parse = tf103c_dock_hid_parse,
+ .start = tf103c_dock_hid_start,
+ .stop = tf103c_dock_hid_stop,
+ .open = tf103c_dock_hid_open,
+ .close = tf103c_dock_hid_close,
+ .raw_request = tf103c_dock_hid_raw_request,
+};
+
+static int tf103c_dock_toprow_codes[13][2] = {
+ /* Normal, AltGr pressed */
+ { KEY_POWER, KEY_F1 },
+ { KEY_RFKILL, KEY_F2 },
+ { KEY_F21, KEY_F3 }, /* Touchpad toggle, userspace expects F21 */
+ { KEY_BRIGHTNESSDOWN, KEY_F4 },
+ { KEY_BRIGHTNESSUP, KEY_F5 },
+ { KEY_CAMERA, KEY_F6 },
+ { KEY_CONFIG, KEY_F7 },
+ { KEY_PREVIOUSSONG, KEY_F8 },
+ { KEY_PLAYPAUSE, KEY_F9 },
+ { KEY_NEXTSONG, KEY_F10 },
+ { KEY_MUTE, KEY_F11 },
+ { KEY_VOLUMEDOWN, KEY_F12 },
+ { KEY_VOLUMEUP, KEY_SYSRQ },
+};
+
+static void tf103c_dock_report_toprow_kbd_hook(struct tf103c_dock_data *dock)
+{
+ u8 *esc, *buf = dock->kbd_buf;
+ int size;
+
+ /*
+ * Stop AltGr reports from getting reported on the "Asus TF103C Dock
+ * Keyboard" input_dev, since this gets used as "Fn" key for the toprow
+ * keys. Instead we report this on the "Asus TF103C Dock Top Row Keys"
+ * input_dev, when not used to modify the toprow keys.
+ */
+ dock->altgr_pressed = buf[TF103C_DOCK_KBD_DATA_MODIFIERS] & 0x40;
+ buf[TF103C_DOCK_KBD_DATA_MODIFIERS] &= ~0x40;
+
+ input_report_key(dock->input, KEY_RIGHTALT, dock->altgr_pressed);
+ input_sync(dock->input);
+
+ /* Toggle fnlock on AltGr + Esc press */
+ buf = buf + TF103C_DOCK_KBD_DATA_KEYS;
+ size = TF103C_DOCK_KBD_DATA_MAX_LENGTH - TF103C_DOCK_KBD_DATA_KEYS;
+ esc = memchr(buf, 0x29, size);
+ if (!dock->esc_pressed && esc) {
+ if (dock->altgr_pressed) {
+ fnlock = !fnlock;
+ dock->filter_esc = true;
+ }
+ }
+ if (esc && dock->filter_esc)
+ *esc = 0;
+ else
+ dock->filter_esc = false;
+
+ dock->esc_pressed = esc != NULL;
+}
+
+static void tf103c_dock_toprow_press(struct tf103c_dock_data *dock, int key_code)
+{
+ /*
+ * Release AltGr before reporting the toprow key, so that userspace
+ * sees e.g. just KEY_SUSPEND and not AltGr + KEY_SUSPEND.
+ */
+ if (dock->altgr_pressed) {
+ input_report_key(dock->input, KEY_RIGHTALT, false);
+ input_sync(dock->input);
+ }
+
+ input_report_key(dock->input, key_code, true);
+ input_sync(dock->input);
+}
+
+static void tf103c_dock_toprow_release(struct tf103c_dock_data *dock, int key_code)
+{
+ input_report_key(dock->input, key_code, false);
+ input_sync(dock->input);
+
+ if (dock->altgr_pressed) {
+ input_report_key(dock->input, KEY_RIGHTALT, true);
+ input_sync(dock->input);
+ }
+}
+
+static void tf103c_dock_toprow_event(struct tf103c_dock_data *dock,
+ int toprow_index, int *last_press)
+{
+ int key_code, fn = dock->altgr_pressed ^ fnlock;
+
+ if (last_press && *last_press) {
+ tf103c_dock_toprow_release(dock, *last_press);
+ *last_press = 0;
+ }
+
+ if (toprow_index < 0)
+ return;
+
+ key_code = tf103c_dock_toprow_codes[toprow_index][fn];
+ tf103c_dock_toprow_press(dock, key_code);
+
+ if (last_press)
+ *last_press = key_code;
+ else
+ tf103c_dock_toprow_release(dock, key_code);
+}
+
+/*
+ * The keyboard sends what appears to be standard I2C-HID input-reports,
+ * except that a 16 bit register address of where the I2C-HID format
+ * input-reports are stored must be send before reading it in a single
+ * (I2C repeated-start) I2C transaction.
+ *
+ * Its unknown how to get the HID descriptors but they are easy to reconstruct:
+ *
+ * Input report id 0x11 is 8 bytes long and contain standard USB HID intf-class,
+ * Boot Interface Subclass reports.
+ * Input report id 0x13 is 2 bytes long and sends Consumer Control events
+ * Input report id 0x14 is 1 byte long and sends System Control events
+ *
+ * However the top row keys (where a normal keyboard has F1-F12 + Print-Screen)
+ * are a mess, using a mix of the 0x13 and 0x14 input reports as well as EC SCI
+ * events; and these need special handling to allow actually sending F1-F12,
+ * since the Fn key on the keyboard only works on the cursor keys and the top
+ * row keys always send their special "Multimedia hotkey" codes.
+ *
+ * So only forward the 0x11 reports to HID and handle the top-row keys here.
+ */
+static void tf103c_dock_kbd_interrupt(struct tf103c_dock_data *dock)
+{
+ struct device *dev = &dock->ec_client->dev;
+ u8 *buf = dock->kbd_buf;
+ int size;
+
+ if (tf103c_dock_kbd_read(dock))
+ return;
+
+ size = buf[0] | buf[1] << 8;
+ if (size < TF103C_DOCK_KBD_DATA_MIN_LENGTH ||
+ size > TF103C_DOCK_KBD_DATA_MAX_LENGTH) {
+ dev_err(dev, "error reported kbd pkt size %d is out of range %d-%d\n", size,
+ TF103C_DOCK_KBD_DATA_MIN_LENGTH,
+ TF103C_DOCK_KBD_DATA_MAX_LENGTH);
+ return;
+ }
+
+ switch (buf[2]) {
+ case 0x11:
+ if (size != 11)
+ break;
+
+ tf103c_dock_report_toprow_kbd_hook(dock);
+
+ if (test_bit(TF103C_DOCK_FLAG_HID_OPEN, &dock->flags))
+ hid_input_report(dock->hid, HID_INPUT_REPORT, buf + 2, size - 2, 1);
+ return;
+ case 0x13:
+ if (size != 5)
+ break;
+
+ switch (buf[3] | buf[4] << 8) {
+ case 0:
+ tf103c_dock_toprow_event(dock, -1, &dock->last_press_0x13);
+ return;
+ case 0x70:
+ tf103c_dock_toprow_event(dock, 3, &dock->last_press_0x13);
+ return;
+ case 0x6f:
+ tf103c_dock_toprow_event(dock, 4, &dock->last_press_0x13);
+ return;
+ case 0xb6:
+ tf103c_dock_toprow_event(dock, 7, &dock->last_press_0x13);
+ return;
+ case 0xcd:
+ tf103c_dock_toprow_event(dock, 8, &dock->last_press_0x13);
+ return;
+ case 0xb5:
+ tf103c_dock_toprow_event(dock, 9, &dock->last_press_0x13);
+ return;
+ case 0xe2:
+ tf103c_dock_toprow_event(dock, 10, &dock->last_press_0x13);
+ return;
+ case 0xea:
+ tf103c_dock_toprow_event(dock, 11, &dock->last_press_0x13);
+ return;
+ case 0xe9:
+ tf103c_dock_toprow_event(dock, 12, &dock->last_press_0x13);
+ return;
+ }
+ break;
+ case 0x14:
+ if (size != 4)
+ break;
+
+ switch (buf[3]) {
+ case 0:
+ tf103c_dock_toprow_event(dock, -1, &dock->last_press_0x14);
+ return;
+ case 1:
+ tf103c_dock_toprow_event(dock, 0, &dock->last_press_0x14);
+ return;
+ }
+ break;
+ }
+
+ dev_warn(dev, "warning unknown kbd data: %*ph\n", size, buf);
+}
+
+/*** touchpad related code ***/
+
+static const struct property_entry tf103c_dock_touchpad_props[] = {
+ PROPERTY_ENTRY_BOOL("elan,clickpad"),
+ { }
+};
+
+static const struct software_node tf103c_dock_touchpad_sw_node = {
+ .properties = tf103c_dock_touchpad_props,
+};
+
+/*
+ * tf103c_enable_touchpad() is only called from the threaded interrupt handler
+ * and tf103c_disable_touchpad() is only called after the irq is disabled,
+ * so no locking is necessary.
+ */
+static void tf103c_dock_enable_touchpad(struct tf103c_dock_data *dock)
+{
+ struct i2c_board_info board_info = { };
+ struct device *dev = &dock->ec_client->dev;
+ int ret;
+
+ if (dock->tp_enabled) {
+ /* Happens after resume, the tp needs to be reinitialized */
+ ret = device_reprobe(&dock->tp_client->dev);
+ if (ret)
+ dev_err_probe(dev, ret, "reprobing tp-client\n");
+ return;
+ }
+
+ strscpy(board_info.type, "elan_i2c", I2C_NAME_SIZE);
+ board_info.addr = TF103C_DOCK_TP_ADDR;
+ board_info.dev_name = TF103C_DOCK_DEV_NAME "-tp";
+ board_info.irq = dock->tp_irq;
+ board_info.swnode = &tf103c_dock_touchpad_sw_node;
+
+ dock->tp_client = i2c_new_client_device(dock->ec_client->adapter, &board_info);
+ if (IS_ERR(dock->tp_client)) {
+ dev_err(dev, "error %ld creating tp client\n", PTR_ERR(dock->tp_client));
+ return;
+ }
+
+ dock->tp_enabled = true;
+}
+
+static void tf103c_dock_disable_touchpad(struct tf103c_dock_data *dock)
+{
+ if (!dock->tp_enabled)
+ return;
+
+ i2c_unregister_device(dock->tp_client);
+
+ dock->tp_enabled = false;
+}
+
+/*** interrupt handling code ***/
+static void tf103c_dock_ec_cmd(struct tf103c_dock_data *dock, const u8 *cmd)
+{
+ struct device *dev = &dock->ec_client->dev;
+ int ret;
+
+ ret = i2c_smbus_write_i2c_block_data(dock->ec_client, TF103C_DOCK_EC_CMD_REG,
+ TF103C_DOCK_EC_CMD_LEN, cmd);
+ if (ret)
+ dev_err(dev, "error %d sending %*ph cmd\n", ret,
+ TF103C_DOCK_EC_CMD_LEN, cmd);
+}
+
+static void tf103c_dock_sci(struct tf103c_dock_data *dock, u8 val)
+{
+ struct device *dev = &dock->ec_client->dev;
+
+ switch (val) {
+ case 2:
+ tf103c_dock_toprow_event(dock, 1, NULL);
+ return;
+ case 4:
+ tf103c_dock_toprow_event(dock, 2, NULL);
+ return;
+ case 8:
+ tf103c_dock_toprow_event(dock, 5, NULL);
+ return;
+ case 17:
+ tf103c_dock_toprow_event(dock, 6, NULL);
+ return;
+ }
+
+ dev_warn(dev, "warning unknown SCI value: 0x%02x\n", val);
+}
+
+static void tf103c_dock_smi(struct tf103c_dock_data *dock, u8 val)
+{
+ struct device *dev = &dock->ec_client->dev;
+
+ switch (val) {
+ case TF103C_DOCK_SMI_EC_WAKEUP:
+ tf103c_dock_ec_cmd(dock, tf103c_dock_enable_cmd);
+ tf103c_dock_ec_cmd(dock, tf103c_dock_usb_enable_cmd);
+ tf103c_dock_kbd_write(dock, TF103C_DOCK_KBD_CMD_ENABLE);
+ break;
+ case TF103C_DOCK_SMI_PAD_BL_CHANGE:
+ /* There is no backlight, but the EC still sends this */
+ break;
+ case TF103C_DOCK_SMI_HID_STATUS_CHANGED:
+ tf103c_dock_enable_touchpad(dock);
+ break;
+ default:
+ dev_warn(dev, "warning unknown SMI value: 0x%02x\n", val);
+ break;
+ }
+}
+
+static irqreturn_t tf103c_dock_irq(int irq, void *data)
+{
+ struct tf103c_dock_data *dock = data;
+ struct device *dev = &dock->ec_client->dev;
+ u8 intr_data[8];
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(dock->intr_client, TF103C_DOCK_INTR_DATA_REG,
+ sizeof(intr_data), intr_data);
+ if (ret != sizeof(intr_data)) {
+ dev_err(dev, "error %d reading intr data\n", ret);
+ return IRQ_NONE;
+ }
+
+ if (!(intr_data[1] & TF103C_DOCK_INTR_DATA1_OBF_MASK))
+ return IRQ_NONE;
+
+ /* intr_data[0] is the length of the rest of the packet */
+ if (intr_data[0] == 3 && intr_data[1] == TF103C_DOCK_INTR_DATA1_OOB_VALUE &&
+ intr_data[2] == TF103C_DOCK_INTR_DATA2_OOB_VALUE) {
+ /* intr_data[3] seems to contain a HID input report id */
+ switch (intr_data[3]) {
+ case 0x01:
+ handle_nested_irq(dock->tp_irq);
+ break;
+ case 0x11:
+ case 0x13:
+ case 0x14:
+ tf103c_dock_kbd_interrupt(dock);
+ break;
+ default:
+ dev_warn(dev, "warning unknown intr_data[3]: 0x%02x\n", intr_data[3]);
+ break;
+ }
+ return IRQ_HANDLED;
+ }
+
+ if (intr_data[1] & TF103C_DOCK_INTR_DATA1_SCI_MASK) {
+ tf103c_dock_sci(dock, intr_data[2]);
+ return IRQ_HANDLED;
+ }
+
+ if (intr_data[1] & TF103C_DOCK_INTR_DATA1_SMI_MASK) {
+ tf103c_dock_smi(dock, intr_data[2]);
+ return IRQ_HANDLED;
+ }
+
+ dev_warn(dev, "warning unknown intr data: %*ph\n", 8, intr_data);
+ return IRQ_NONE;
+}
+
+/*
+ * tf103c_dock_[dis|en]able only run from hpd_work or at times when
+ * hpd_work cannot run (hpd_irq disabled), so no locking is necessary.
+ */
+static void tf103c_dock_enable(struct tf103c_dock_data *dock)
+{
+ if (dock->enabled)
+ return;
+
+ if (dock->board_rev != 2)
+ gpiod_set_value(dock->pwr_en, 1);
+
+ msleep(500);
+ enable_irq(dock->irq);
+
+ dock->enabled = true;
+}
+
+static void tf103c_dock_disable(struct tf103c_dock_data *dock)
+{
+ if (!dock->enabled)
+ return;
+
+ disable_irq(dock->irq);
+ tf103c_dock_disable_touchpad(dock);
+ if (dock->board_rev != 2)
+ gpiod_set_value(dock->pwr_en, 0);
+
+ dock->enabled = false;
+}
+
+static void tf103c_dock_hpd_work(struct work_struct *work)
+{
+ struct tf103c_dock_data *dock =
+ container_of(work, struct tf103c_dock_data, hpd_work.work);
+
+ if (gpiod_get_value(dock->hpd_gpio))
+ tf103c_dock_enable(dock);
+ else
+ tf103c_dock_disable(dock);
+}
+
+static irqreturn_t tf103c_dock_hpd_irq(int irq, void *data)
+{
+ struct tf103c_dock_data *dock = data;
+
+ mod_delayed_work(system_long_wq, &dock->hpd_work, TF103C_DOCK_HPD_DEBOUNCE);
+ return IRQ_HANDLED;
+}
+
+static void tf103c_dock_start_hpd(struct tf103c_dock_data *dock)
+{
+ enable_irq(dock->hpd_irq);
+ /* Sync current HPD status */
+ queue_delayed_work(system_long_wq, &dock->hpd_work, TF103C_DOCK_HPD_DEBOUNCE);
+}
+
+static void tf103c_dock_stop_hpd(struct tf103c_dock_data *dock)
+{
+ disable_irq(dock->hpd_irq);
+ cancel_delayed_work_sync(&dock->hpd_work);
+}
+
+/*** probe ***/
+
+static const struct dmi_system_id tf103c_dock_dmi_ids[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
+ },
+ },
+ { }
+};
+
+static void tf103c_dock_non_devm_cleanup(void *data)
+{
+ struct tf103c_dock_data *dock = data;
+
+ if (dock->tp_irq_domain)
+ irq_domain_remove(dock->tp_irq_domain);
+
+ if (!IS_ERR_OR_NULL(dock->hid))
+ hid_destroy_device(dock->hid);
+
+ i2c_unregister_device(dock->kbd_client);
+ i2c_unregister_device(dock->intr_client);
+ gpiod_remove_lookup_table(&tf103c_dock_gpios);
+}
+
+static int tf103c_dock_probe(struct i2c_client *client)
+{
+ struct i2c_board_info board_info = { };
+ struct device *dev = &client->dev;
+ struct gpio_desc *board_rev_gpio;
+ struct tf103c_dock_data *dock;
+ enum gpiod_flags flags;
+ int i, ret;
+
+ /* GPIOs are hardcoded for the Asus TF103C, don't bind on other devs */
+ if (!dmi_check_system(tf103c_dock_dmi_ids))
+ return -ENODEV;
+
+ dock = devm_kzalloc(dev, sizeof(*dock), GFP_KERNEL);
+ if (!dock)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&dock->hpd_work, tf103c_dock_hpd_work);
+
+ /* 1. Get GPIOs and their IRQs */
+ gpiod_add_lookup_table(&tf103c_dock_gpios);
+
+ ret = devm_add_action_or_reset(dev, tf103c_dock_non_devm_cleanup, dock);
+ if (ret)
+ return ret;
+
+ /*
+ * The pin is configured as input by default, use ASIS because otherwise
+ * the gpio-crystalcove.c switches off the internal pull-down replacing
+ * it with a pull-up.
+ */
+ board_rev_gpio = gpiod_get(dev, "board_rev", GPIOD_ASIS);
+ if (IS_ERR(board_rev_gpio))
+ return dev_err_probe(dev, PTR_ERR(board_rev_gpio), "requesting board_rev GPIO\n");
+ dock->board_rev = gpiod_get_value_cansleep(board_rev_gpio) + 1;
+ gpiod_put(board_rev_gpio);
+
+ /*
+ * The Android driver drives the dock-pwr-en pin high at probe for
+ * revision 2 boards and then never touches it again?
+ * This code has only been tested on a revision 1 board, so for now
+ * just mimick what Android does on revision 2 boards.
+ */
+ flags = (dock->board_rev == 2) ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ dock->pwr_en = devm_gpiod_get(dev, "dock_pwr_en", flags);
+ if (IS_ERR(dock->pwr_en))
+ return dev_err_probe(dev, PTR_ERR(dock->pwr_en), "requesting pwr_en GPIO\n");
+
+ dock->irq_gpio = devm_gpiod_get(dev, "dock_irq", GPIOD_IN);
+ if (IS_ERR(dock->irq_gpio))
+ return dev_err_probe(dev, PTR_ERR(dock->irq_gpio), "requesting IRQ GPIO\n");
+
+ dock->irq = gpiod_to_irq(dock->irq_gpio);
+ if (dock->irq < 0)
+ return dev_err_probe(dev, dock->irq, "getting dock IRQ");
+
+ ret = devm_request_threaded_irq(dev, dock->irq, NULL, tf103c_dock_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ "dock_irq", dock);
+ if (ret)
+ return dev_err_probe(dev, ret, "requesting dock IRQ");
+
+ dock->hpd_gpio = devm_gpiod_get(dev, "dock_hpd", GPIOD_IN);
+ if (IS_ERR(dock->hpd_gpio))
+ return dev_err_probe(dev, PTR_ERR(dock->hpd_gpio), "requesting HPD GPIO\n");
+
+ dock->hpd_irq = gpiod_to_irq(dock->hpd_gpio);
+ if (dock->hpd_irq < 0)
+ return dev_err_probe(dev, dock->hpd_irq, "getting HPD IRQ");
+
+ ret = devm_request_irq(dev, dock->hpd_irq, tf103c_dock_hpd_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_NO_AUTOEN,
+ "dock_hpd", dock);
+ if (ret)
+ return ret;
+
+ /*
+ * 2. Create I2C clients. The dock uses 4 different i2c addresses,
+ * the ACPI NPCE69A node being probed points to the EC address.
+ */
+ dock->ec_client = client;
+
+ strscpy(board_info.type, "tf103c-dock-intr", I2C_NAME_SIZE);
+ board_info.addr = TF103C_DOCK_INTR_ADDR;
+ board_info.dev_name = TF103C_DOCK_DEV_NAME "-intr";
+
+ dock->intr_client = i2c_new_client_device(client->adapter, &board_info);
+ if (IS_ERR(dock->intr_client))
+ return dev_err_probe(dev, PTR_ERR(dock->intr_client), "creating intr client\n");
+
+ strscpy(board_info.type, "tf103c-dock-kbd", I2C_NAME_SIZE);
+ board_info.addr = TF103C_DOCK_KBD_ADDR;
+ board_info.dev_name = TF103C_DOCK_DEV_NAME "-kbd";
+
+ dock->kbd_client = i2c_new_client_device(client->adapter, &board_info);
+ if (IS_ERR(dock->kbd_client))
+ return dev_err_probe(dev, PTR_ERR(dock->kbd_client), "creating kbd client\n");
+
+ /* 3. Create input_dev for the top row of the keyboard */
+ dock->input = devm_input_allocate_device(dev);
+ if (!dock->input)
+ return -ENOMEM;
+
+ dock->input->name = "Asus TF103C Dock Top Row Keys";
+ dock->input->phys = dev_name(dev);
+ dock->input->dev.parent = dev;
+ dock->input->id.bustype = BUS_I2C;
+ dock->input->id.vendor = /* USB_VENDOR_ID_ASUSTEK */
+ dock->input->id.product = /* From TF-103-C */
+ dock->input->id.version = 0x0100; /* 1.0 */
+
+ for (i = 0; i < ARRAY_SIZE(tf103c_dock_toprow_codes); i++) {
+ input_set_capability(dock->input, EV_KEY, tf103c_dock_toprow_codes[i][0]);
+ input_set_capability(dock->input, EV_KEY, tf103c_dock_toprow_codes[i][1]);
+ }
+ input_set_capability(dock->input, EV_KEY, KEY_RIGHTALT);
+
+ ret = input_register_device(dock->input);
+ if (ret)
+ return ret;
+
+ /* 4. Create HID device for the keyboard */
+ dock->hid = hid_allocate_device();
+ if (IS_ERR(dock->hid))
+ return dev_err_probe(dev, PTR_ERR(dock->hid), "allocating hid dev\n");
+
+ dock->hid->driver_data = dock;
+ dock->hid->ll_driver = &tf103c_dock_hid_ll_driver;
+ dock->hid->dev.parent = &client->dev;
+ dock->hid->bus = BUS_I2C;
+ dock->hid->vendor = 0x0b05; /* USB_VENDOR_ID_ASUSTEK */
+ dock->hid->product = 0x0103; /* From TF-103-C */
+ dock->hid->version = 0x0100; /* 1.0 */
+ strscpy(dock->hid->name, "Asus TF103C Dock Keyboard", sizeof(dock->hid->name));
+ strscpy(dock->hid->phys, dev_name(dev), sizeof(dock->hid->phys));
+
+ ret = hid_add_device(dock->hid);
+ if (ret)
+ return dev_err_probe(dev, ret, "adding hid dev\n");
+
+ /* 5. Setup irqchip for touchpad IRQ pass-through */
+ dock->tp_irqchip.name = KBUILD_MODNAME;
+
+ dock->tp_irq_domain = irq_domain_add_linear(NULL, 1, &irq_domain_simple_ops, NULL);
+ if (!dock->tp_irq_domain)
+ return -ENOMEM;
+
+ dock->tp_irq = irq_create_mapping(dock->tp_irq_domain, 0);
+ if (!dock->tp_irq)
+ return -ENOMEM;
+
+ irq_set_chip_data(dock->tp_irq, dock);
+ irq_set_chip_and_handler(dock->tp_irq, &dock->tp_irqchip, handle_simple_irq);
+ irq_set_nested_thread(dock->tp_irq, true);
+ irq_set_noprobe(dock->tp_irq);
+
+ dev_info(dev, "Asus TF103C board-revision: %d\n", dock->board_rev);
+
+ tf103c_dock_start_hpd(dock);
+
+ device_init_wakeup(dev, true);
+ i2c_set_clientdata(client, dock);
+ return 0;
+}
+
+static int tf103c_dock_remove(struct i2c_client *client)
+{
+ struct tf103c_dock_data *dock = i2c_get_clientdata(client);
+
+ tf103c_dock_stop_hpd(dock);
+ tf103c_dock_disable(dock);
+
+ return 0;
+}
+
+static int __maybe_unused tf103c_dock_suspend(struct device *dev)
+{
+ struct tf103c_dock_data *dock = dev_get_drvdata(dev);
+
+ tf103c_dock_stop_hpd(dock);
+
+ if (dock->enabled) {
+ tf103c_dock_ec_cmd(dock, tf103c_dock_suspend_cmd);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(dock->irq);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tf103c_dock_resume(struct device *dev)
+{
+ struct tf103c_dock_data *dock = dev_get_drvdata(dev);
+
+ if (dock->enabled) {
+ if (device_may_wakeup(dev))
+ disable_irq_wake(dock->irq);
+
+ /* Don't try to resume if the dock was unplugged during suspend */
+ if (gpiod_get_value(dock->hpd_gpio))
+ tf103c_dock_ec_cmd(dock, tf103c_dock_enable_cmd);
+ }
+
+ tf103c_dock_start_hpd(dock);
+ return 0;
+}
+
+SIMPLE_DEV_PM_OPS(tf103c_dock_pm_ops, tf103c_dock_suspend, tf103c_dock_resume);
+
+static const struct acpi_device_id tf103c_dock_acpi_match[] = {
+ {"NPCE69A"},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, tf103c_dock_acpi_match);
+
+static struct i2c_driver tf103c_dock_driver = {
+ .driver = {
+ .name = "asus-tf103c-dock",
+ .pm = &tf103c_dock_pm_ops,
+ .acpi_match_table = tf103c_dock_acpi_match,
+ },
+ .probe_new = tf103c_dock_probe,
+ .remove = tf103c_dock_remove,
+};
+module_i2c_driver(tf103c_dock_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com");
+MODULE_DESCRIPTION("X86 Android tablets DSDT fixups driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 8f067ac4e952..a3b83b22a3b1 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -13,29 +13,29 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/backlight.h>
+#include <linux/debugfs.h>
+#include <linux/dmi.h>
+#include <linux/fb.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/slab.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
-#include <linux/fb.h>
-#include <linux/backlight.h>
+#include <linux/kernel.h>
#include <linux/leds.h>
-#include <linux/rfkill.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
+#include <linux/platform_data/x86/asus-wmi.h>
+#include <linux/platform_device.h>
#include <linux/platform_profile.h>
#include <linux/power_supply.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/debugfs.h>
+#include <linux/rfkill.h>
#include <linux/seq_file.h>
-#include <linux/platform_data/x86/asus-wmi.h>
-#include <linux/platform_device.h>
-#include <linux/acpi.h>
-#include <linux/dmi.h>
+#include <linux/slab.h>
+#include <linux/types.h>
#include <linux/units.h>
#include <acpi/battery.h>
@@ -43,8 +43,8 @@
#include "asus-wmi.h"
-MODULE_AUTHOR("Corentin Chary <corentin.chary@gmail.com>, "
- "Yong Wang <yong.y.wang@intel.com>");
+MODULE_AUTHOR("Corentin Chary <corentin.chary@gmail.com>");
+MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
MODULE_DESCRIPTION("Asus Generic WMI Driver");
MODULE_LICENSE("GPL");
@@ -106,8 +106,17 @@ module_param(fnlock_default, bool, 0444);
#define WMI_EVENT_MASK 0xFFFF
+#define FAN_CURVE_POINTS 8
+#define FAN_CURVE_BUF_LEN (FAN_CURVE_POINTS * 2)
+#define FAN_CURVE_DEV_CPU 0x00
+#define FAN_CURVE_DEV_GPU 0x01
+/* Mask to determine if setting temperature or percentage */
+#define FAN_CURVE_PWM_MASK 0x04
+
static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
+static int throttle_thermal_policy_write(struct asus_wmi *);
+
static bool ashs_present(void)
{
int i = 0;
@@ -122,7 +131,8 @@ struct bios_args {
u32 arg0;
u32 arg1;
u32 arg2; /* At least TUF Gaming series uses 3 dword input buffer. */
- u32 arg4;
+ u32 arg3;
+ u32 arg4; /* Some ROG laptops require a full 5 input args */
u32 arg5;
} __packed;
@@ -173,6 +183,13 @@ enum fan_type {
FAN_TYPE_SPEC83, /* starting in Spec 8.3, use CPU_FAN_CTRL */
};
+struct fan_curve_data {
+ bool enabled;
+ u32 device_id;
+ u8 temps[FAN_CURVE_POINTS];
+ u8 percents[FAN_CURVE_POINTS];
+};
+
struct asus_wmi {
int dsts_id;
int spec;
@@ -220,6 +237,10 @@ struct asus_wmi {
bool throttle_thermal_policy_available;
u8 throttle_thermal_policy_mode;
+ bool cpu_fan_curve_available;
+ bool gpu_fan_curve_available;
+ struct fan_curve_data custom_fan_curves[2];
+
struct platform_profile_handler platform_profile_handler;
bool platform_profile_support;
@@ -285,6 +306,103 @@ int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval)
}
EXPORT_SYMBOL_GPL(asus_wmi_evaluate_method);
+static int asus_wmi_evaluate_method5(u32 method_id,
+ u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4, u32 *retval)
+{
+ struct bios_args args = {
+ .arg0 = arg0,
+ .arg1 = arg1,
+ .arg2 = arg2,
+ .arg3 = arg3,
+ .arg4 = arg4,
+ };
+ struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+ union acpi_object *obj;
+ u32 tmp = 0;
+
+ status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 0, method_id,
+ &input, &output);
+
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ obj = (union acpi_object *)output.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ tmp = (u32) obj->integer.value;
+
+ if (retval)
+ *retval = tmp;
+
+ kfree(obj);
+
+ if (tmp == ASUS_WMI_UNSUPPORTED_METHOD)
+ return -ENODEV;
+
+ return 0;
+}
+
+/*
+ * Returns as an error if the method output is not a buffer. Typically this
+ * means that the method called is unsupported.
+ */
+static int asus_wmi_evaluate_method_buf(u32 method_id,
+ u32 arg0, u32 arg1, u8 *ret_buffer, size_t size)
+{
+ struct bios_args args = {
+ .arg0 = arg0,
+ .arg1 = arg1,
+ .arg2 = 0,
+ };
+ struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+ union acpi_object *obj;
+ int err = 0;
+
+ status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 0, method_id,
+ &input, &output);
+
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ obj = (union acpi_object *)output.pointer;
+
+ switch (obj->type) {
+ case ACPI_TYPE_BUFFER:
+ if (obj->buffer.length > size)
+ err = -ENOSPC;
+ if (obj->buffer.length == 0)
+ err = -ENODATA;
+
+ memcpy(ret_buffer, obj->buffer.pointer, obj->buffer.length);
+ break;
+ case ACPI_TYPE_INTEGER:
+ err = (u32)obj->integer.value;
+
+ if (err == ASUS_WMI_UNSUPPORTED_METHOD)
+ err = -ENODEV;
+ /*
+ * At least one method returns a 0 with no buffer if no arg
+ * is provided, such as ASUS_WMI_DEVID_CPU_FAN_CURVE
+ */
+ if (err == 0)
+ err = -ENODATA;
+ break;
+ default:
+ err = -ENODATA;
+ break;
+ }
+
+ kfree(obj);
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
static int asus_wmi_evaluate_method_agfn(const struct acpi_buffer args)
{
struct acpi_buffer input;
@@ -1036,12 +1154,10 @@ static void asus_rfkill_hotplug(struct asus_wmi *asus)
absent = (l == 0xffffffff);
if (blocked != absent) {
- pr_warn("BIOS says wireless lan is %s, "
- "but the pci device is %s\n",
+ pr_warn("BIOS says wireless lan is %s, but the pci device is %s\n",
blocked ? "blocked" : "unblocked",
absent ? "absent" : "present");
- pr_warn("skipped wireless hotplug as probably "
- "inappropriate for this model\n");
+ pr_warn("skipped wireless hotplug as probably inappropriate for this model\n");
goto out_unlock;
}
@@ -1806,6 +1922,13 @@ static ssize_t pwm1_enable_store(struct device *dev,
}
asus->fan_pwm_mode = state;
+
+ /* Must set to disabled if mode is toggled */
+ if (asus->cpu_fan_curve_available)
+ asus->custom_fan_curves[FAN_CURVE_DEV_CPU].enabled = false;
+ if (asus->gpu_fan_curve_available)
+ asus->custom_fan_curves[FAN_CURVE_DEV_GPU].enabled = false;
+
return count;
}
@@ -1953,9 +2076,9 @@ static int fan_boost_mode_check_present(struct asus_wmi *asus)
static int fan_boost_mode_write(struct asus_wmi *asus)
{
- int err;
- u8 value;
u32 retval;
+ u8 value;
+ int err;
value = asus->fan_boost_mode;
@@ -2013,10 +2136,10 @@ static ssize_t fan_boost_mode_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int result;
- u8 new_mode;
struct asus_wmi *asus = dev_get_drvdata(dev);
u8 mask = asus->fan_boost_mode_mask;
+ u8 new_mode;
+ int result;
result = kstrtou8(buf, 10, &new_mode);
if (result < 0) {
@@ -2043,6 +2166,426 @@ static ssize_t fan_boost_mode_store(struct device *dev,
// Fan boost mode: 0 - normal, 1 - overboost, 2 - silent
static DEVICE_ATTR_RW(fan_boost_mode);
+/* Custom fan curves **********************************************************/
+
+static void fan_curve_copy_from_buf(struct fan_curve_data *data, u8 *buf)
+{
+ int i;
+
+ for (i = 0; i < FAN_CURVE_POINTS; i++) {
+ data->temps[i] = buf[i];
+ }
+
+ for (i = 0; i < FAN_CURVE_POINTS; i++) {
+ data->percents[i] =
+ 255 * buf[i + FAN_CURVE_POINTS] / 100;
+ }
+}
+
+static int fan_curve_get_factory_default(struct asus_wmi *asus, u32 fan_dev)
+{
+ struct fan_curve_data *curves;
+ u8 buf[FAN_CURVE_BUF_LEN];
+ int fan_idx = 0;
+ u8 mode = 0;
+ int err;
+
+ if (asus->throttle_thermal_policy_available)
+ mode = asus->throttle_thermal_policy_mode;
+ /* DEVID_<C/G>PU_FAN_CURVE is switched for OVERBOOST vs SILENT */
+ if (mode == 2)
+ mode = 1;
+ else if (mode == 1)
+ mode = 2;
+
+ if (fan_dev == ASUS_WMI_DEVID_GPU_FAN_CURVE)
+ fan_idx = FAN_CURVE_DEV_GPU;
+
+ curves = &asus->custom_fan_curves[fan_idx];
+ err = asus_wmi_evaluate_method_buf(asus->dsts_id, fan_dev, mode, buf,
+ FAN_CURVE_BUF_LEN);
+ if (err)
+ return err;
+
+ fan_curve_copy_from_buf(curves, buf);
+ curves->device_id = fan_dev;
+
+ return 0;
+}
+
+/* Check if capability exists, and populate defaults */
+static int fan_curve_check_present(struct asus_wmi *asus, bool *available,
+ u32 fan_dev)
+{
+ int err;
+
+ *available = false;
+
+ err = fan_curve_get_factory_default(asus, fan_dev);
+ if (err) {
+ if (err == -ENODEV)
+ return 0;
+ return err;
+ }
+
+ *available = true;
+ return 0;
+}
+
+/* Determine which fan the attribute is for if SENSOR_ATTR */
+static struct fan_curve_data *fan_curve_attr_select(struct asus_wmi *asus,
+ struct device_attribute *attr)
+{
+ int index = to_sensor_dev_attr(attr)->index;
+
+ return &asus->custom_fan_curves[index & FAN_CURVE_DEV_GPU];
+}
+
+/* Determine which fan the attribute is for if SENSOR_ATTR_2 */
+static struct fan_curve_data *fan_curve_attr_2_select(struct asus_wmi *asus,
+ struct device_attribute *attr)
+{
+ int nr = to_sensor_dev_attr_2(attr)->nr;
+
+ return &asus->custom_fan_curves[nr & FAN_CURVE_DEV_GPU];
+}
+
+static ssize_t fan_curve_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute_2 *dev_attr = to_sensor_dev_attr_2(attr);
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ struct fan_curve_data *data;
+ int value, index, nr;
+
+ data = fan_curve_attr_2_select(asus, attr);
+ index = dev_attr->index;
+ nr = dev_attr->nr;
+
+ if (nr & FAN_CURVE_PWM_MASK)
+ value = data->percents[index];
+ else
+ value = data->temps[index];
+
+ return sysfs_emit(buf, "%d\n", value);
+}
+
+/*
+ * "fan_dev" is the related WMI method such as ASUS_WMI_DEVID_CPU_FAN_CURVE.
+ */
+static int fan_curve_write(struct asus_wmi *asus,
+ struct fan_curve_data *data)
+{
+ u32 arg1 = 0, arg2 = 0, arg3 = 0, arg4 = 0;
+ u8 *percents = data->percents;
+ u8 *temps = data->temps;
+ int ret, i, shift = 0;
+
+ if (!data->enabled)
+ return 0;
+
+ for (i = 0; i < FAN_CURVE_POINTS / 2; i++) {
+ arg1 += (temps[i]) << shift;
+ arg2 += (temps[i + 4]) << shift;
+ /* Scale to percentage for device */
+ arg3 += (100 * percents[i] / 255) << shift;
+ arg4 += (100 * percents[i + 4] / 255) << shift;
+ shift += 8;
+ }
+
+ return asus_wmi_evaluate_method5(ASUS_WMI_METHODID_DEVS,
+ data->device_id,
+ arg1, arg2, arg3, arg4, &ret);
+}
+
+static ssize_t fan_curve_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct sensor_device_attribute_2 *dev_attr = to_sensor_dev_attr_2(attr);
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ struct fan_curve_data *data;
+ u8 value;
+ int err;
+
+ int pwm = dev_attr->nr & FAN_CURVE_PWM_MASK;
+ int index = dev_attr->index;
+
+ data = fan_curve_attr_2_select(asus, attr);
+
+ err = kstrtou8(buf, 10, &value);
+ if (err < 0)
+ return err;
+
+ if (pwm) {
+ data->percents[index] = value;
+ } else {
+ data->temps[index] = value;
+ }
+
+ /*
+ * Mark as disabled so the user has to explicitly enable to apply a
+ * changed fan curve. This prevents potential lockups from writing out
+ * many changes as one-write-per-change.
+ */
+ data->enabled = false;
+
+ return count;
+}
+
+static ssize_t fan_curve_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ struct fan_curve_data *data;
+ int out = 2;
+
+ data = fan_curve_attr_select(asus, attr);
+
+ if (data->enabled)
+ out = 1;
+
+ return sysfs_emit(buf, "%d\n", out);
+}
+
+static ssize_t fan_curve_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ struct fan_curve_data *data;
+ int value, err;
+
+ data = fan_curve_attr_select(asus, attr);
+
+ err = kstrtoint(buf, 10, &value);
+ if (err < 0)
+ return err;
+
+ switch (value) {
+ case 1:
+ data->enabled = true;
+ break;
+ case 2:
+ data->enabled = false;
+ break;
+ /*
+ * Auto + reset the fan curve data to defaults. Make it an explicit
+ * option so that users don't accidentally overwrite a set fan curve.
+ */
+ case 3:
+ err = fan_curve_get_factory_default(asus, data->device_id);
+ if (err)
+ return err;
+ data->enabled = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (data->enabled) {
+ err = fan_curve_write(asus, data);
+ if (err)
+ return err;
+ } else {
+ /*
+ * For machines with throttle this is the only way to reset fans
+ * to default mode of operation (does not erase curve data).
+ */
+ if (asus->throttle_thermal_policy_available) {
+ err = throttle_thermal_policy_write(asus);
+ if (err)
+ return err;
+ /* Similar is true for laptops with this fan */
+ } else if (asus->fan_type == FAN_TYPE_SPEC83) {
+ err = asus_fan_set_auto(asus);
+ if (err)
+ return err;
+ } else {
+ /* Safeguard against fautly ACPI tables */
+ err = fan_curve_get_factory_default(asus, data->device_id);
+ if (err)
+ return err;
+ err = fan_curve_write(asus, data);
+ if (err)
+ return err;
+ }
+ }
+ return count;
+}
+
+/* CPU */
+static SENSOR_DEVICE_ATTR_RW(pwm1_enable, fan_curve_enable, FAN_CURVE_DEV_CPU);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point1_temp, fan_curve,
+ FAN_CURVE_DEV_CPU, 0);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point2_temp, fan_curve,
+ FAN_CURVE_DEV_CPU, 1);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point3_temp, fan_curve,
+ FAN_CURVE_DEV_CPU, 2);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point4_temp, fan_curve,
+ FAN_CURVE_DEV_CPU, 3);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point5_temp, fan_curve,
+ FAN_CURVE_DEV_CPU, 4);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point6_temp, fan_curve,
+ FAN_CURVE_DEV_CPU, 5);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point7_temp, fan_curve,
+ FAN_CURVE_DEV_CPU, 6);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point8_temp, fan_curve,
+ FAN_CURVE_DEV_CPU, 7);
+
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point1_pwm, fan_curve,
+ FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 0);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point2_pwm, fan_curve,
+ FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 1);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point3_pwm, fan_curve,
+ FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 2);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point4_pwm, fan_curve,
+ FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 3);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point5_pwm, fan_curve,
+ FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 4);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point6_pwm, fan_curve,
+ FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 5);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point7_pwm, fan_curve,
+ FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 6);
+static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point8_pwm, fan_curve,
+ FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 7);
+
+/* GPU */
+static SENSOR_DEVICE_ATTR_RW(pwm2_enable, fan_curve_enable, FAN_CURVE_DEV_GPU);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point1_temp, fan_curve,
+ FAN_CURVE_DEV_GPU, 0);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point2_temp, fan_curve,
+ FAN_CURVE_DEV_GPU, 1);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point3_temp, fan_curve,
+ FAN_CURVE_DEV_GPU, 2);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point4_temp, fan_curve,
+ FAN_CURVE_DEV_GPU, 3);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point5_temp, fan_curve,
+ FAN_CURVE_DEV_GPU, 4);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point6_temp, fan_curve,
+ FAN_CURVE_DEV_GPU, 5);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point7_temp, fan_curve,
+ FAN_CURVE_DEV_GPU, 6);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point8_temp, fan_curve,
+ FAN_CURVE_DEV_GPU, 7);
+
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point1_pwm, fan_curve,
+ FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 0);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point2_pwm, fan_curve,
+ FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 1);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point3_pwm, fan_curve,
+ FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 2);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point4_pwm, fan_curve,
+ FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 3);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point5_pwm, fan_curve,
+ FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 4);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point6_pwm, fan_curve,
+ FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 5);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point7_pwm, fan_curve,
+ FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 6);
+static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point8_pwm, fan_curve,
+ FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 7);
+
+static struct attribute *asus_fan_curve_attr[] = {
+ /* CPU */
+ &sensor_dev_attr_pwm1_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point4_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point5_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point6_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point7_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point8_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point4_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point5_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point6_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point7_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point8_pwm.dev_attr.attr,
+ /* GPU */
+ &sensor_dev_attr_pwm2_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point4_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point5_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point6_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point7_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point8_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point3_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point4_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point5_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point6_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point7_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point8_pwm.dev_attr.attr,
+ NULL
+};
+
+static umode_t asus_fan_curve_is_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct asus_wmi *asus = dev_get_drvdata(dev->parent);
+
+ /*
+ * Check the char instead of casting attr as there are two attr types
+ * involved here (attr1 and attr2)
+ */
+ if (asus->cpu_fan_curve_available && attr->name[3] == '1')
+ return 0644;
+
+ if (asus->gpu_fan_curve_available && attr->name[3] == '2')
+ return 0644;
+
+ return 0;
+}
+
+static const struct attribute_group asus_fan_curve_attr_group = {
+ .is_visible = asus_fan_curve_is_visible,
+ .attrs = asus_fan_curve_attr,
+};
+__ATTRIBUTE_GROUPS(asus_fan_curve_attr);
+
+/*
+ * Must be initialised after throttle_thermal_policy_check_present() as
+ * we check the status of throttle_thermal_policy_available during init.
+ */
+static int asus_wmi_custom_fan_curve_init(struct asus_wmi *asus)
+{
+ struct device *dev = &asus->platform_device->dev;
+ struct device *hwmon;
+ int err;
+
+ err = fan_curve_check_present(asus, &asus->cpu_fan_curve_available,
+ ASUS_WMI_DEVID_CPU_FAN_CURVE);
+ if (err)
+ return err;
+
+ err = fan_curve_check_present(asus, &asus->gpu_fan_curve_available,
+ ASUS_WMI_DEVID_GPU_FAN_CURVE);
+ if (err)
+ return err;
+
+ if (!asus->cpu_fan_curve_available && !asus->gpu_fan_curve_available)
+ return 0;
+
+ hwmon = devm_hwmon_device_register_with_groups(
+ dev, "asus_custom_fan_curve", asus, asus_fan_curve_attr_groups);
+
+ if (IS_ERR(hwmon)) {
+ dev_err(dev,
+ "Could not register asus_custom_fan_curve device\n");
+ return PTR_ERR(hwmon);
+ }
+
+ return 0;
+}
+
/* Throttle thermal policy ****************************************************/
static int throttle_thermal_policy_check_present(struct asus_wmi *asus)
@@ -2092,6 +2635,12 @@ static int throttle_thermal_policy_write(struct asus_wmi *asus)
return -EIO;
}
+ /* Must set to disabled if mode is toggled */
+ if (asus->cpu_fan_curve_available)
+ asus->custom_fan_curves[FAN_CURVE_DEV_CPU].enabled = false;
+ if (asus->gpu_fan_curve_available)
+ asus->custom_fan_curves[FAN_CURVE_DEV_GPU].enabled = false;
+
return 0;
}
@@ -3035,6 +3584,10 @@ static int asus_wmi_add(struct platform_device *pdev)
if (err)
goto fail_hwmon;
+ err = asus_wmi_custom_fan_curve_init(asus);
+ if (err)
+ goto fail_custom_fan_curve;
+
err = asus_wmi_led_init(asus);
if (err)
goto fail_leds;
@@ -3106,6 +3659,7 @@ fail_input:
asus_wmi_sysfs_exit(asus->platform_device);
fail_sysfs:
fail_throttle_thermal_policy:
+fail_custom_fan_curve:
fail_platform_profile_setup:
if (asus->platform_profile_support)
platform_profile_remove();
@@ -3131,6 +3685,7 @@ static int asus_wmi_remove(struct platform_device *device)
asus_wmi_debugfs_exit(asus);
asus_wmi_sysfs_exit(asus->platform_device);
asus_fan_set_auto(asus);
+ throttle_thermal_policy_set_default(asus);
asus_wmi_battery_exit(asus);
if (asus->platform_profile_support)
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 435a91fe2568..e9f852f7c27f 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -355,39 +355,20 @@ static int lis3lv02d_remove(struct platform_device *device)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int lis3lv02d_suspend(struct device *dev)
+static int __maybe_unused lis3lv02d_suspend(struct device *dev)
{
/* make sure the device is off when we suspend */
lis3lv02d_poweroff(&lis3_dev);
return 0;
}
-static int lis3lv02d_resume(struct device *dev)
+static int __maybe_unused lis3lv02d_resume(struct device *dev)
{
lis3lv02d_poweron(&lis3_dev);
return 0;
}
-static int lis3lv02d_restore(struct device *dev)
-{
- lis3lv02d_poweron(&lis3_dev);
- return 0;
-}
-
-static const struct dev_pm_ops hp_accel_pm = {
- .suspend = lis3lv02d_suspend,
- .resume = lis3lv02d_resume,
- .freeze = lis3lv02d_suspend,
- .thaw = lis3lv02d_resume,
- .poweroff = lis3lv02d_suspend,
- .restore = lis3lv02d_restore,
-};
-
-#define HP_ACCEL_PM (&hp_accel_pm)
-#else
-#define HP_ACCEL_PM NULL
-#endif
+static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume);
/* For the HP MDPS aka 3D Driveguard */
static struct platform_driver lis3lv02d_driver = {
@@ -395,7 +376,7 @@ static struct platform_driver lis3lv02d_driver = {
.remove = lis3lv02d_remove,
.driver = {
.name = "hp_accel",
- .pm = HP_ACCEL_PM,
+ .pm = &hp_accel_pm,
.acpi_match_table = lis3lv02d_device_ids,
},
};
diff --git a/drivers/platform/x86/intel/Kconfig b/drivers/platform/x86/intel/Kconfig
index 40096b25994a..8e65086bb6c8 100644
--- a/drivers/platform/x86/intel/Kconfig
+++ b/drivers/platform/x86/intel/Kconfig
@@ -170,3 +170,14 @@ config INTEL_UNCORE_FREQ_CONTROL
To compile this driver as a module, choose M here: the module
will be called intel-uncore-frequency.
+
+config INTEL_VSEC
+ tristate "Intel Vendor Specific Extended Capabilities Driver"
+ depends on PCI
+ select AUXILIARY_BUS
+ help
+ Adds support for feature drivers exposed using Intel PCIe VSEC and
+ DVSEC.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel_vsec.
diff --git a/drivers/platform/x86/intel/Makefile b/drivers/platform/x86/intel/Makefile
index 7c24be2423d8..35f2066578b2 100644
--- a/drivers/platform/x86/intel/Makefile
+++ b/drivers/platform/x86/intel/Makefile
@@ -26,10 +26,14 @@ intel_int0002_vgpio-y := int0002_vgpio.o
obj-$(CONFIG_INTEL_INT0002_VGPIO) += intel_int0002_vgpio.o
intel_oaktrail-y := oaktrail.o
obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
+intel_vsec-y := vsec.o
+obj-$(CONFIG_INTEL_VSEC) += intel_vsec.o
# Intel PMIC / PMC / P-Unit drivers
intel_bxtwc_tmu-y := bxtwc_tmu.o
obj-$(CONFIG_INTEL_BXTWC_PMIC_TMU) += intel_bxtwc_tmu.o
+intel_crystal_cove_charger-y := crystal_cove_charger.o
+obj-$(CONFIG_X86_ANDROID_TABLETS) += intel_crystal_cove_charger.o
intel_chtdc_ti_pwrbtn-y := chtdc_ti_pwrbtn.o
obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o
intel_mrfld_pwrbtn-y := mrfld_pwrbtn.o
diff --git a/drivers/platform/x86/intel/crystal_cove_charger.c b/drivers/platform/x86/intel/crystal_cove_charger.c
new file mode 100644
index 000000000000..0374bc742513
--- /dev/null
+++ b/drivers/platform/x86/intel/crystal_cove_charger.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for the external-charger IRQ pass-through function of the
+ * Intel Bay Trail Crystal Cove PMIC.
+ *
+ * Note this is NOT a power_supply class driver, it just deals with IRQ
+ * pass-through, this requires a separate driver because the PMIC's
+ * level 2 interrupt for this must be explicitly acked.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define CHGRIRQ_REG 0x0a
+
+struct crystal_cove_charger_data {
+ struct mutex buslock; /* irq_bus_lock */
+ struct irq_chip irqchip;
+ struct regmap *regmap;
+ struct irq_domain *irq_domain;
+ int irq;
+ int charger_irq;
+ bool irq_enabled;
+ bool irq_is_enabled;
+};
+
+static irqreturn_t crystal_cove_charger_irq(int irq, void *data)
+{
+ struct crystal_cove_charger_data *charger = data;
+
+ /* No need to read CHGRIRQ_REG as there is only 1 IRQ */
+ handle_nested_irq(charger->charger_irq);
+
+ /* Ack CHGRIRQ 0 */
+ regmap_write(charger->regmap, CHGRIRQ_REG, BIT(0));
+
+ return IRQ_HANDLED;
+}
+
+static void crystal_cove_charger_irq_bus_lock(struct irq_data *data)
+{
+ struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&charger->buslock);
+}
+
+static void crystal_cove_charger_irq_bus_sync_unlock(struct irq_data *data)
+{
+ struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data);
+
+ if (charger->irq_is_enabled != charger->irq_enabled) {
+ if (charger->irq_enabled)
+ enable_irq(charger->irq);
+ else
+ disable_irq(charger->irq);
+
+ charger->irq_is_enabled = charger->irq_enabled;
+ }
+
+ mutex_unlock(&charger->buslock);
+}
+
+static void crystal_cove_charger_irq_unmask(struct irq_data *data)
+{
+ struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data);
+
+ charger->irq_enabled = true;
+}
+
+static void crystal_cove_charger_irq_mask(struct irq_data *data)
+{
+ struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data);
+
+ charger->irq_enabled = false;
+}
+
+static void crystal_cove_charger_rm_irq_domain(void *data)
+{
+ struct crystal_cove_charger_data *charger = data;
+
+ irq_domain_remove(charger->irq_domain);
+}
+
+static int crystal_cove_charger_probe(struct platform_device *pdev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+ struct crystal_cove_charger_data *charger;
+ int ret;
+
+ charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL);
+ if (!charger)
+ return -ENOMEM;
+
+ charger->regmap = pmic->regmap;
+ mutex_init(&charger->buslock);
+
+ charger->irq = platform_get_irq(pdev, 0);
+ if (charger->irq < 0)
+ return charger->irq;
+
+ charger->irq_domain = irq_domain_create_linear(dev_fwnode(pdev->dev.parent), 1,
+ &irq_domain_simple_ops, NULL);
+ if (!charger->irq_domain)
+ return -ENOMEM;
+
+ /* Distuingish IRQ domain from others sharing (MFD) the same fwnode */
+ irq_domain_update_bus_token(charger->irq_domain, DOMAIN_BUS_WAKEUP);
+
+ ret = devm_add_action_or_reset(&pdev->dev, crystal_cove_charger_rm_irq_domain, charger);
+ if (ret)
+ return ret;
+
+ charger->charger_irq = irq_create_mapping(charger->irq_domain, 0);
+ if (!charger->charger_irq)
+ return -ENOMEM;
+
+ charger->irqchip.name = KBUILD_MODNAME;
+ charger->irqchip.irq_unmask = crystal_cove_charger_irq_unmask;
+ charger->irqchip.irq_mask = crystal_cove_charger_irq_mask;
+ charger->irqchip.irq_bus_lock = crystal_cove_charger_irq_bus_lock;
+ charger->irqchip.irq_bus_sync_unlock = crystal_cove_charger_irq_bus_sync_unlock;
+
+ irq_set_chip_data(charger->charger_irq, charger);
+ irq_set_chip_and_handler(charger->charger_irq, &charger->irqchip, handle_simple_irq);
+ irq_set_nested_thread(charger->charger_irq, true);
+ irq_set_noprobe(charger->charger_irq);
+
+ ret = devm_request_threaded_irq(&pdev->dev, charger->irq, NULL,
+ crystal_cove_charger_irq,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ KBUILD_MODNAME, charger);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "requesting irq\n");
+
+ return 0;
+}
+
+static struct platform_driver crystal_cove_charger_driver = {
+ .probe = crystal_cove_charger_probe,
+ .driver = {
+ .name = "crystal_cove_charger",
+ },
+};
+module_platform_driver(crystal_cove_charger_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com");
+MODULE_DESCRIPTION("Intel Bay Trail Crystal Cove external charger IRQ pass-through");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/int3472/Makefile b/drivers/platform/x86/intel/int3472/Makefile
index 2362e04db18d..cfec7784c5c9 100644
--- a/drivers/platform/x86/intel/int3472/Makefile
+++ b/drivers/platform/x86/intel/int3472/Makefile
@@ -1,5 +1,4 @@
-obj-$(CONFIG_INTEL_SKL_INT3472) += intel_skl_int3472.o
-intel_skl_int3472-y := intel_skl_int3472_common.o \
- intel_skl_int3472_discrete.o \
- intel_skl_int3472_tps68470.o \
- intel_skl_int3472_clk_and_regulator.o
+obj-$(CONFIG_INTEL_SKL_INT3472) += intel_skl_int3472_discrete.o \
+ intel_skl_int3472_tps68470.o
+intel_skl_int3472_discrete-y := discrete.o clk_and_regulator.o common.o
+intel_skl_int3472_tps68470-y := tps68470.o tps68470_board_data.o common.o
diff --git a/drivers/platform/x86/intel/int3472/intel_skl_int3472_clk_and_regulator.c b/drivers/platform/x86/intel/int3472/clk_and_regulator.c
index 1700e7557a82..1cf958983e86 100644
--- a/drivers/platform/x86/intel/int3472/intel_skl_int3472_clk_and_regulator.c
+++ b/drivers/platform/x86/intel/int3472/clk_and_regulator.c
@@ -9,7 +9,7 @@
#include <linux/regulator/driver.h>
#include <linux/slab.h>
-#include "intel_skl_int3472_common.h"
+#include "common.h"
/*
* The regulators have to have .ops to be valid, but the only ops we actually
diff --git a/drivers/platform/x86/intel/int3472/common.c b/drivers/platform/x86/intel/int3472/common.c
new file mode 100644
index 000000000000..77cf058e4168
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/common.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Author: Dan Scally <djrscally@gmail.com> */
+
+#include <linux/acpi.h>
+#include <linux/slab.h>
+
+#include "common.h"
+
+union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev, char *id)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_handle handle = adev->handle;
+ union acpi_object *obj;
+ acpi_status status;
+
+ status = acpi_evaluate_object(handle, id, NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return ERR_PTR(-ENODEV);
+
+ obj = buffer.pointer;
+ if (!obj)
+ return ERR_PTR(-ENODEV);
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ acpi_handle_err(handle, "%s object is not an ACPI buffer\n", id);
+ kfree(obj);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return obj;
+}
+
+int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb)
+{
+ union acpi_object *obj;
+ int ret;
+
+ obj = skl_int3472_get_acpi_buffer(adev, "CLDB");
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (obj->buffer.length > sizeof(*cldb)) {
+ acpi_handle_err(adev->handle, "The CLDB buffer is too large\n");
+ ret = -EINVAL;
+ goto out_free_obj;
+ }
+
+ memcpy(cldb, obj->buffer.pointer, obj->buffer.length);
+ ret = 0;
+
+out_free_obj:
+ kfree(obj);
+ return ret;
+}
+
+/* sensor_adev_ret may be NULL, name_ret must not be NULL */
+int skl_int3472_get_sensor_adev_and_name(struct device *dev,
+ struct acpi_device **sensor_adev_ret,
+ const char **name_ret)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct acpi_device *sensor;
+ int ret = 0;
+
+ sensor = acpi_dev_get_first_consumer_dev(adev);
+ if (!sensor) {
+ dev_err(dev, "INT3472 seems to have no dependents.\n");
+ return -ENODEV;
+ }
+
+ *name_ret = devm_kasprintf(dev, GFP_KERNEL, I2C_DEV_NAME_FORMAT,
+ acpi_dev_name(sensor));
+ if (!*name_ret)
+ ret = -ENOMEM;
+
+ if (ret == 0 && sensor_adev_ret)
+ *sensor_adev_ret = sensor;
+ else
+ acpi_dev_put(sensor);
+
+ return ret;
+}
diff --git a/drivers/platform/x86/intel/int3472/intel_skl_int3472_common.h b/drivers/platform/x86/intel/int3472/common.h
index 714fde73b524..53270d19c73a 100644
--- a/drivers/platform/x86/intel/int3472/intel_skl_int3472_common.h
+++ b/drivers/platform/x86/intel/int3472/common.h
@@ -105,12 +105,12 @@ struct int3472_discrete_device {
struct gpiod_lookup_table gpios;
};
-int skl_int3472_discrete_probe(struct platform_device *pdev);
-int skl_int3472_discrete_remove(struct platform_device *pdev);
-int skl_int3472_tps68470_probe(struct i2c_client *client);
union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev,
char *id);
int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb);
+int skl_int3472_get_sensor_adev_and_name(struct device *dev,
+ struct acpi_device **sensor_adev_ret,
+ const char **name_ret);
int skl_int3472_register_clock(struct int3472_discrete_device *int3472);
void skl_int3472_unregister_clock(struct int3472_discrete_device *int3472);
diff --git a/drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
index e59d79c7e82f..5b514fa01a97 100644
--- a/drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c
+++ b/drivers/platform/x86/intel/int3472/discrete.c
@@ -14,7 +14,7 @@
#include <linux/platform_device.h>
#include <linux/uuid.h>
-#include "intel_skl_int3472_common.h"
+#include "common.h"
/*
* 79234640-9e10-4fea-a5c1-b5aa8b19756f
@@ -332,7 +332,9 @@ static int skl_int3472_parse_crs(struct int3472_discrete_device *int3472)
return 0;
}
-int skl_int3472_discrete_probe(struct platform_device *pdev)
+static int skl_int3472_discrete_remove(struct platform_device *pdev);
+
+static int skl_int3472_discrete_probe(struct platform_device *pdev)
{
struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
struct int3472_discrete_device *int3472;
@@ -361,19 +363,10 @@ int skl_int3472_discrete_probe(struct platform_device *pdev)
int3472->dev = &pdev->dev;
platform_set_drvdata(pdev, int3472);
- int3472->sensor = acpi_dev_get_first_consumer_dev(adev);
- if (!int3472->sensor) {
- dev_err(&pdev->dev, "INT3472 seems to have no dependents.\n");
- return -ENODEV;
- }
-
- int3472->sensor_name = devm_kasprintf(int3472->dev, GFP_KERNEL,
- I2C_DEV_NAME_FORMAT,
- acpi_dev_name(int3472->sensor));
- if (!int3472->sensor_name) {
- ret = -ENOMEM;
- goto err_put_sensor;
- }
+ ret = skl_int3472_get_sensor_adev_and_name(&pdev->dev, &int3472->sensor,
+ &int3472->sensor_name);
+ if (ret)
+ return ret;
/*
* Initialising this list means we can call gpiod_remove_lookup_table()
@@ -387,15 +380,11 @@ int skl_int3472_discrete_probe(struct platform_device *pdev)
return ret;
}
+ acpi_dev_clear_dependencies(adev);
return 0;
-
-err_put_sensor:
- acpi_dev_put(int3472->sensor);
-
- return ret;
}
-int skl_int3472_discrete_remove(struct platform_device *pdev)
+static int skl_int3472_discrete_remove(struct platform_device *pdev)
{
struct int3472_discrete_device *int3472 = platform_get_drvdata(pdev);
@@ -411,3 +400,23 @@ int skl_int3472_discrete_remove(struct platform_device *pdev)
return 0;
}
+
+static const struct acpi_device_id int3472_device_id[] = {
+ { "INT3472", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, int3472_device_id);
+
+static struct platform_driver int3472_discrete = {
+ .driver = {
+ .name = "int3472-discrete",
+ .acpi_match_table = int3472_device_id,
+ },
+ .probe = skl_int3472_discrete_probe,
+ .remove = skl_int3472_discrete_remove,
+};
+module_platform_driver(int3472_discrete);
+
+MODULE_DESCRIPTION("Intel SkyLake INT3472 ACPI Discrete Device Driver");
+MODULE_AUTHOR("Daniel Scally <djrscally@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/int3472/intel_skl_int3472_common.c b/drivers/platform/x86/intel/int3472/intel_skl_int3472_common.c
deleted file mode 100644
index 497e74fba75f..000000000000
--- a/drivers/platform/x86/intel/int3472/intel_skl_int3472_common.c
+++ /dev/null
@@ -1,106 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Author: Dan Scally <djrscally@gmail.com> */
-
-#include <linux/acpi.h>
-#include <linux/i2c.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "intel_skl_int3472_common.h"
-
-union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev, char *id)
-{
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- acpi_handle handle = adev->handle;
- union acpi_object *obj;
- acpi_status status;
-
- status = acpi_evaluate_object(handle, id, NULL, &buffer);
- if (ACPI_FAILURE(status))
- return ERR_PTR(-ENODEV);
-
- obj = buffer.pointer;
- if (!obj)
- return ERR_PTR(-ENODEV);
-
- if (obj->type != ACPI_TYPE_BUFFER) {
- acpi_handle_err(handle, "%s object is not an ACPI buffer\n", id);
- kfree(obj);
- return ERR_PTR(-EINVAL);
- }
-
- return obj;
-}
-
-int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb)
-{
- union acpi_object *obj;
- int ret;
-
- obj = skl_int3472_get_acpi_buffer(adev, "CLDB");
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- if (obj->buffer.length > sizeof(*cldb)) {
- acpi_handle_err(adev->handle, "The CLDB buffer is too large\n");
- ret = -EINVAL;
- goto out_free_obj;
- }
-
- memcpy(cldb, obj->buffer.pointer, obj->buffer.length);
- ret = 0;
-
-out_free_obj:
- kfree(obj);
- return ret;
-}
-
-static const struct acpi_device_id int3472_device_id[] = {
- { "INT3472", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, int3472_device_id);
-
-static struct platform_driver int3472_discrete = {
- .driver = {
- .name = "int3472-discrete",
- .acpi_match_table = int3472_device_id,
- },
- .probe = skl_int3472_discrete_probe,
- .remove = skl_int3472_discrete_remove,
-};
-
-static struct i2c_driver int3472_tps68470 = {
- .driver = {
- .name = "int3472-tps68470",
- .acpi_match_table = int3472_device_id,
- },
- .probe_new = skl_int3472_tps68470_probe,
-};
-
-static int skl_int3472_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&int3472_discrete);
- if (ret)
- return ret;
-
- ret = i2c_register_driver(THIS_MODULE, &int3472_tps68470);
- if (ret)
- platform_driver_unregister(&int3472_discrete);
-
- return ret;
-}
-module_init(skl_int3472_init);
-
-static void skl_int3472_exit(void)
-{
- platform_driver_unregister(&int3472_discrete);
- i2c_del_driver(&int3472_tps68470);
-}
-module_exit(skl_int3472_exit);
-
-MODULE_DESCRIPTION("Intel SkyLake INT3472 ACPI Device Driver");
-MODULE_AUTHOR("Daniel Scally <djrscally@gmail.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/int3472/intel_skl_int3472_tps68470.c b/drivers/platform/x86/intel/int3472/tps68470.c
index c05b4cf502fe..22f61b47f9e5 100644
--- a/drivers/platform/x86/intel/int3472/intel_skl_int3472_tps68470.c
+++ b/drivers/platform/x86/intel/int3472/tps68470.c
@@ -2,27 +2,27 @@
/* Author: Dan Scally <djrscally@gmail.com> */
#include <linux/i2c.h>
+#include <linux/kernel.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps68470.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/tps68470.h>
#include <linux/regmap.h>
+#include <linux/string.h>
-#include "intel_skl_int3472_common.h"
+#include "common.h"
+#include "tps68470.h"
#define DESIGNED_FOR_CHROMEOS 1
#define DESIGNED_FOR_WINDOWS 2
+#define TPS68470_WIN_MFD_CELL_COUNT 3
+
static const struct mfd_cell tps68470_cros[] = {
{ .name = "tps68470-gpio" },
{ .name = "tps68470_pmic_opregion" },
};
-static const struct mfd_cell tps68470_win[] = {
- { .name = "tps68470-gpio" },
- { .name = "tps68470-clk" },
- { .name = "tps68470-regulator" },
-};
-
static const struct regmap_config tps68470_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -95,13 +95,21 @@ static int skl_int3472_tps68470_calc_type(struct acpi_device *adev)
return DESIGNED_FOR_WINDOWS;
}
-int skl_int3472_tps68470_probe(struct i2c_client *client)
+static int skl_int3472_tps68470_probe(struct i2c_client *client)
{
struct acpi_device *adev = ACPI_COMPANION(&client->dev);
+ const struct int3472_tps68470_board_data *board_data;
+ struct tps68470_clk_platform_data clk_pdata = {};
+ struct mfd_cell *cells;
struct regmap *regmap;
int device_type;
int ret;
+ ret = skl_int3472_get_sensor_adev_and_name(&client->dev, NULL,
+ &clk_pdata.consumer_dev_name);
+ if (ret)
+ return ret;
+
regmap = devm_regmap_init_i2c(client, &tps68470_regmap_config);
if (IS_ERR(regmap)) {
dev_err(&client->dev, "Failed to create regmap: %ld\n", PTR_ERR(regmap));
@@ -119,9 +127,38 @@ int skl_int3472_tps68470_probe(struct i2c_client *client)
device_type = skl_int3472_tps68470_calc_type(adev);
switch (device_type) {
case DESIGNED_FOR_WINDOWS:
+ board_data = int3472_tps68470_get_board_data(dev_name(&client->dev));
+ if (!board_data)
+ return dev_err_probe(&client->dev, -ENODEV, "No board-data found for this model\n");
+
+ cells = kcalloc(TPS68470_WIN_MFD_CELL_COUNT, sizeof(*cells), GFP_KERNEL);
+ if (!cells)
+ return -ENOMEM;
+
+ /*
+ * The order of the cells matters here! The clk must be first
+ * because the regulator depends on it. The gpios must be last,
+ * acpi_gpiochip_add() calls acpi_dev_clear_dependencies() and
+ * the clk + regulators must be ready when this happens.
+ */
+ cells[0].name = "tps68470-clk";
+ cells[0].platform_data = &clk_pdata;
+ cells[0].pdata_size = sizeof(clk_pdata);
+ cells[1].name = "tps68470-regulator";
+ cells[1].platform_data = (void *)board_data->tps68470_regulator_pdata;
+ cells[1].pdata_size = sizeof(struct tps68470_regulator_platform_data);
+ cells[2].name = "tps68470-gpio";
+
+ gpiod_add_lookup_table(board_data->tps68470_gpio_lookup_table);
+
ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_NONE,
- tps68470_win, ARRAY_SIZE(tps68470_win),
+ cells, TPS68470_WIN_MFD_CELL_COUNT,
NULL, 0, NULL);
+ kfree(cells);
+
+ if (ret)
+ gpiod_remove_lookup_table(board_data->tps68470_gpio_lookup_table);
+
break;
case DESIGNED_FOR_CHROMEOS:
ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_NONE,
@@ -133,5 +170,42 @@ int skl_int3472_tps68470_probe(struct i2c_client *client)
return device_type;
}
+ /*
+ * No acpi_dev_clear_dependencies() here, since the acpi_gpiochip_add()
+ * for the GPIO cell already does this.
+ */
+
return ret;
}
+
+static int skl_int3472_tps68470_remove(struct i2c_client *client)
+{
+ const struct int3472_tps68470_board_data *board_data;
+
+ board_data = int3472_tps68470_get_board_data(dev_name(&client->dev));
+ if (board_data)
+ gpiod_remove_lookup_table(board_data->tps68470_gpio_lookup_table);
+
+ return 0;
+}
+
+static const struct acpi_device_id int3472_device_id[] = {
+ { "INT3472", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, int3472_device_id);
+
+static struct i2c_driver int3472_tps68470 = {
+ .driver = {
+ .name = "int3472-tps68470",
+ .acpi_match_table = int3472_device_id,
+ },
+ .probe_new = skl_int3472_tps68470_probe,
+ .remove = skl_int3472_tps68470_remove,
+};
+module_i2c_driver(int3472_tps68470);
+
+MODULE_DESCRIPTION("Intel SkyLake INT3472 ACPI TPS68470 Device Driver");
+MODULE_AUTHOR("Daniel Scally <djrscally@gmail.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_SOFTDEP("pre: clk-tps68470 tps68470-regulator");
diff --git a/drivers/platform/x86/intel/int3472/tps68470.h b/drivers/platform/x86/intel/int3472/tps68470.h
new file mode 100644
index 000000000000..cfd33eb62740
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/tps68470.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * TI TPS68470 PMIC platform data definition.
+ *
+ * Copyright (c) 2021 Red Hat Inc.
+ *
+ * Red Hat authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#ifndef _INTEL_SKL_INT3472_TPS68470_H
+#define _INTEL_SKL_INT3472_TPS68470_H
+
+struct gpiod_lookup_table;
+struct tps68470_regulator_platform_data;
+
+struct int3472_tps68470_board_data {
+ const char *dev_name;
+ struct gpiod_lookup_table *tps68470_gpio_lookup_table;
+ const struct tps68470_regulator_platform_data *tps68470_regulator_pdata;
+};
+
+const struct int3472_tps68470_board_data *int3472_tps68470_get_board_data(const char *dev_name);
+
+#endif
diff --git a/drivers/platform/x86/intel/int3472/tps68470_board_data.c b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
new file mode 100644
index 000000000000..f93d437fd192
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI TPS68470 PMIC platform data definition.
+ *
+ * Copyright (c) 2021 Dan Scally <djrscally@gmail.com>
+ * Copyright (c) 2021 Red Hat Inc.
+ *
+ * Red Hat authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/dmi.h>
+#include <linux/gpio/machine.h>
+#include <linux/platform_data/tps68470.h>
+#include <linux/regulator/machine.h>
+#include "tps68470.h"
+
+static struct regulator_consumer_supply int347a_core_consumer_supplies[] = {
+ REGULATOR_SUPPLY("dvdd", "i2c-INT347A:00"),
+};
+
+static struct regulator_consumer_supply int347a_ana_consumer_supplies[] = {
+ REGULATOR_SUPPLY("avdd", "i2c-INT347A:00"),
+};
+
+static struct regulator_consumer_supply int347a_vcm_consumer_supplies[] = {
+ REGULATOR_SUPPLY("vdd", "i2c-INT347A:00-VCM"),
+};
+
+static struct regulator_consumer_supply int347a_vsio_consumer_supplies[] = {
+ REGULATOR_SUPPLY("dovdd", "i2c-INT347A:00"),
+ REGULATOR_SUPPLY("vsio", "i2c-INT347A:00-VCM"),
+};
+
+static const struct regulator_init_data surface_go_tps68470_core_reg_init_data = {
+ .constraints = {
+ .min_uV = 1200000,
+ .max_uV = 1200000,
+ .apply_uV = true,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(int347a_core_consumer_supplies),
+ .consumer_supplies = int347a_core_consumer_supplies,
+};
+
+static const struct regulator_init_data surface_go_tps68470_ana_reg_init_data = {
+ .constraints = {
+ .min_uV = 2815200,
+ .max_uV = 2815200,
+ .apply_uV = true,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(int347a_ana_consumer_supplies),
+ .consumer_supplies = int347a_ana_consumer_supplies,
+};
+
+static const struct regulator_init_data surface_go_tps68470_vcm_reg_init_data = {
+ .constraints = {
+ .min_uV = 2815200,
+ .max_uV = 2815200,
+ .apply_uV = true,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(int347a_vcm_consumer_supplies),
+ .consumer_supplies = int347a_vcm_consumer_supplies,
+};
+
+/* Ensure the always-on VIO regulator has the same voltage as VSIO */
+static const struct regulator_init_data surface_go_tps68470_vio_reg_init_data = {
+ .constraints = {
+ .min_uV = 1800600,
+ .max_uV = 1800600,
+ .apply_uV = true,
+ .always_on = true,
+ },
+};
+
+static const struct regulator_init_data surface_go_tps68470_vsio_reg_init_data = {
+ .constraints = {
+ .min_uV = 1800600,
+ .max_uV = 1800600,
+ .apply_uV = true,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(int347a_vsio_consumer_supplies),
+ .consumer_supplies = int347a_vsio_consumer_supplies,
+};
+
+static const struct tps68470_regulator_platform_data surface_go_tps68470_pdata = {
+ .reg_init_data = {
+ [TPS68470_CORE] = &surface_go_tps68470_core_reg_init_data,
+ [TPS68470_ANA] = &surface_go_tps68470_ana_reg_init_data,
+ [TPS68470_VCM] = &surface_go_tps68470_vcm_reg_init_data,
+ [TPS68470_VIO] = &surface_go_tps68470_vio_reg_init_data,
+ [TPS68470_VSIO] = &surface_go_tps68470_vsio_reg_init_data,
+ },
+};
+
+static struct gpiod_lookup_table surface_go_tps68470_gpios = {
+ .dev_id = "i2c-INT347A:00",
+ .table = {
+ GPIO_LOOKUP("tps68470-gpio", 9, "reset", GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP("tps68470-gpio", 7, "powerdown", GPIO_ACTIVE_LOW)
+ }
+};
+
+static const struct int3472_tps68470_board_data surface_go_tps68470_board_data = {
+ .dev_name = "i2c-INT3472:05",
+ .tps68470_gpio_lookup_table = &surface_go_tps68470_gpios,
+ .tps68470_regulator_pdata = &surface_go_tps68470_pdata,
+};
+
+static const struct int3472_tps68470_board_data surface_go3_tps68470_board_data = {
+ .dev_name = "i2c-INT3472:01",
+ .tps68470_gpio_lookup_table = &surface_go_tps68470_gpios,
+ .tps68470_regulator_pdata = &surface_go_tps68470_pdata,
+};
+
+static const struct dmi_system_id int3472_tps68470_board_data_table[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Go"),
+ },
+ .driver_data = (void *)&surface_go_tps68470_board_data,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Go 2"),
+ },
+ .driver_data = (void *)&surface_go_tps68470_board_data,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
+ },
+ .driver_data = (void *)&surface_go3_tps68470_board_data,
+ },
+ { }
+};
+
+const struct int3472_tps68470_board_data *int3472_tps68470_get_board_data(const char *dev_name)
+{
+ const struct int3472_tps68470_board_data *board_data;
+ const struct dmi_system_id *match;
+
+ for (match = dmi_first_match(int3472_tps68470_board_data_table);
+ match;
+ match = dmi_first_match(match + 1)) {
+ board_data = match->driver_data;
+ if (strcmp(board_data->dev_name, dev_name) == 0)
+ return board_data;
+ }
+
+ return NULL;
+}
diff --git a/drivers/platform/x86/intel/pmt/Kconfig b/drivers/platform/x86/intel/pmt/Kconfig
index d630f883a717..e916fc966221 100644
--- a/drivers/platform/x86/intel/pmt/Kconfig
+++ b/drivers/platform/x86/intel/pmt/Kconfig
@@ -17,7 +17,7 @@ config INTEL_PMT_CLASS
config INTEL_PMT_TELEMETRY
tristate "Intel Platform Monitoring Technology (PMT) Telemetry driver"
- depends on MFD_INTEL_PMT
+ depends on INTEL_VSEC
select INTEL_PMT_CLASS
help
The Intel Platform Monitory Technology (PMT) Telemetry driver provides
@@ -29,7 +29,7 @@ config INTEL_PMT_TELEMETRY
config INTEL_PMT_CRASHLOG
tristate "Intel Platform Monitoring Technology (PMT) Crashlog driver"
- depends on MFD_INTEL_PMT
+ depends on INTEL_VSEC
select INTEL_PMT_CLASS
help
The Intel Platform Monitoring Technology (PMT) crashlog driver provides
diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c
index 659b1073033c..1c9e3f3ea41c 100644
--- a/drivers/platform/x86/intel/pmt/class.c
+++ b/drivers/platform/x86/intel/pmt/class.c
@@ -13,6 +13,7 @@
#include <linux/mm.h>
#include <linux/pci.h>
+#include "../vsec.h"
#include "class.h"
#define PMT_XA_START 0
@@ -281,31 +282,29 @@ fail_dev_create:
return ret;
}
-int intel_pmt_dev_create(struct intel_pmt_entry *entry,
- struct intel_pmt_namespace *ns,
- struct platform_device *pdev, int idx)
+int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespace *ns,
+ struct intel_vsec_device *intel_vsec_dev, int idx)
{
+ struct device *dev = &intel_vsec_dev->auxdev.dev;
struct intel_pmt_header header;
struct resource *disc_res;
- int ret = -ENODEV;
+ int ret;
- disc_res = platform_get_resource(pdev, IORESOURCE_MEM, idx);
- if (!disc_res)
- return ret;
+ disc_res = &intel_vsec_dev->resource[idx];
- entry->disc_table = devm_platform_ioremap_resource(pdev, idx);
+ entry->disc_table = devm_ioremap_resource(dev, disc_res);
if (IS_ERR(entry->disc_table))
return PTR_ERR(entry->disc_table);
- ret = ns->pmt_header_decode(entry, &header, &pdev->dev);
+ ret = ns->pmt_header_decode(entry, &header, dev);
if (ret)
return ret;
- ret = intel_pmt_populate_entry(entry, &header, &pdev->dev, disc_res);
+ ret = intel_pmt_populate_entry(entry, &header, dev, disc_res);
if (ret)
return ret;
- return intel_pmt_dev_register(entry, ns, &pdev->dev);
+ return intel_pmt_dev_register(entry, ns, dev);
}
EXPORT_SYMBOL_GPL(intel_pmt_dev_create);
diff --git a/drivers/platform/x86/intel/pmt/class.h b/drivers/platform/x86/intel/pmt/class.h
index 1337019c2873..db11d58867ce 100644
--- a/drivers/platform/x86/intel/pmt/class.h
+++ b/drivers/platform/x86/intel/pmt/class.h
@@ -2,13 +2,14 @@
#ifndef _INTEL_PMT_CLASS_H
#define _INTEL_PMT_CLASS_H
-#include <linux/platform_device.h>
#include <linux/xarray.h>
#include <linux/types.h>
#include <linux/bits.h>
#include <linux/err.h>
#include <linux/io.h>
+#include "../vsec.h"
+
/* PMT access types */
#define ACCESS_BARID 2
#define ACCESS_LOCAL 3
@@ -47,7 +48,7 @@ struct intel_pmt_namespace {
bool intel_pmt_is_early_client_hw(struct device *dev);
int intel_pmt_dev_create(struct intel_pmt_entry *entry,
struct intel_pmt_namespace *ns,
- struct platform_device *pdev, int idx);
+ struct intel_vsec_device *dev, int idx);
void intel_pmt_dev_destroy(struct intel_pmt_entry *entry,
struct intel_pmt_namespace *ns);
#endif
diff --git a/drivers/platform/x86/intel/pmt/crashlog.c b/drivers/platform/x86/intel/pmt/crashlog.c
index 1c1021f04d3c..34daf9df168b 100644
--- a/drivers/platform/x86/intel/pmt/crashlog.c
+++ b/drivers/platform/x86/intel/pmt/crashlog.c
@@ -8,6 +8,7 @@
* Author: "Alexander Duyck" <alexander.h.duyck@linux.intel.com>
*/
+#include <linux/auxiliary_bus.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -15,10 +16,9 @@
#include <linux/uaccess.h>
#include <linux/overflow.h>
+#include "../vsec.h"
#include "class.h"
-#define DRV_NAME "pmt_crashlog"
-
/* Crashlog discovery header types */
#define CRASH_TYPE_OOBMSM 1
@@ -257,34 +257,34 @@ static struct intel_pmt_namespace pmt_crashlog_ns = {
/*
* initialization
*/
-static int pmt_crashlog_remove(struct platform_device *pdev)
+static void pmt_crashlog_remove(struct auxiliary_device *auxdev)
{
- struct pmt_crashlog_priv *priv = platform_get_drvdata(pdev);
+ struct pmt_crashlog_priv *priv = auxiliary_get_drvdata(auxdev);
int i;
for (i = 0; i < priv->num_entries; i++)
intel_pmt_dev_destroy(&priv->entry[i].entry, &pmt_crashlog_ns);
-
- return 0;
}
-static int pmt_crashlog_probe(struct platform_device *pdev)
+static int pmt_crashlog_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
{
+ struct intel_vsec_device *intel_vsec_dev = auxdev_to_ivdev(auxdev);
struct pmt_crashlog_priv *priv;
size_t size;
int i, ret;
- size = struct_size(priv, entry, pdev->num_resources);
- priv = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ size = struct_size(priv, entry, intel_vsec_dev->num_resources);
+ priv = devm_kzalloc(&auxdev->dev, size, GFP_KERNEL);
if (!priv)
return -ENOMEM;
- platform_set_drvdata(pdev, priv);
+ auxiliary_set_drvdata(auxdev, priv);
- for (i = 0; i < pdev->num_resources; i++) {
+ for (i = 0; i < intel_vsec_dev->num_resources; i++) {
struct intel_pmt_entry *entry = &priv->entry[i].entry;
- ret = intel_pmt_dev_create(entry, &pmt_crashlog_ns, pdev, i);
+ ret = intel_pmt_dev_create(entry, &pmt_crashlog_ns, intel_vsec_dev, i);
if (ret < 0)
goto abort_probe;
if (ret)
@@ -295,26 +295,30 @@ static int pmt_crashlog_probe(struct platform_device *pdev)
return 0;
abort_probe:
- pmt_crashlog_remove(pdev);
+ pmt_crashlog_remove(auxdev);
return ret;
}
-static struct platform_driver pmt_crashlog_driver = {
- .driver = {
- .name = DRV_NAME,
- },
- .remove = pmt_crashlog_remove,
- .probe = pmt_crashlog_probe,
+static const struct auxiliary_device_id pmt_crashlog_id_table[] = {
+ { .name = "intel_vsec.crashlog" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, pmt_crashlog_id_table);
+
+static struct auxiliary_driver pmt_crashlog_aux_driver = {
+ .id_table = pmt_crashlog_id_table,
+ .remove = pmt_crashlog_remove,
+ .probe = pmt_crashlog_probe,
};
static int __init pmt_crashlog_init(void)
{
- return platform_driver_register(&pmt_crashlog_driver);
+ return auxiliary_driver_register(&pmt_crashlog_aux_driver);
}
static void __exit pmt_crashlog_exit(void)
{
- platform_driver_unregister(&pmt_crashlog_driver);
+ auxiliary_driver_unregister(&pmt_crashlog_aux_driver);
xa_destroy(&crashlog_array);
}
@@ -323,5 +327,4 @@ module_exit(pmt_crashlog_exit);
MODULE_AUTHOR("Alexander Duyck <alexander.h.duyck@linux.intel.com>");
MODULE_DESCRIPTION("Intel PMT Crashlog driver");
-MODULE_ALIAS("platform:" DRV_NAME);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c
index 38d52651c572..6b6f3e2a617a 100644
--- a/drivers/platform/x86/intel/pmt/telemetry.c
+++ b/drivers/platform/x86/intel/pmt/telemetry.c
@@ -8,6 +8,7 @@
* Author: "David E. Box" <david.e.box@linux.intel.com>
*/
+#include <linux/auxiliary_bus.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -15,10 +16,9 @@
#include <linux/uaccess.h>
#include <linux/overflow.h>
+#include "../vsec.h"
#include "class.h"
-#define TELEM_DEV_NAME "pmt_telemetry"
-
#define TELEM_SIZE_OFFSET 0x0
#define TELEM_GUID_OFFSET 0x4
#define TELEM_BASE_OFFSET 0x8
@@ -79,34 +79,33 @@ static struct intel_pmt_namespace pmt_telem_ns = {
.pmt_header_decode = pmt_telem_header_decode,
};
-static int pmt_telem_remove(struct platform_device *pdev)
+static void pmt_telem_remove(struct auxiliary_device *auxdev)
{
- struct pmt_telem_priv *priv = platform_get_drvdata(pdev);
+ struct pmt_telem_priv *priv = auxiliary_get_drvdata(auxdev);
int i;
for (i = 0; i < priv->num_entries; i++)
intel_pmt_dev_destroy(&priv->entry[i], &pmt_telem_ns);
-
- return 0;
}
-static int pmt_telem_probe(struct platform_device *pdev)
+static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
{
+ struct intel_vsec_device *intel_vsec_dev = auxdev_to_ivdev(auxdev);
struct pmt_telem_priv *priv;
size_t size;
int i, ret;
- size = struct_size(priv, entry, pdev->num_resources);
- priv = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ size = struct_size(priv, entry, intel_vsec_dev->num_resources);
+ priv = devm_kzalloc(&auxdev->dev, size, GFP_KERNEL);
if (!priv)
return -ENOMEM;
- platform_set_drvdata(pdev, priv);
+ auxiliary_set_drvdata(auxdev, priv);
- for (i = 0; i < pdev->num_resources; i++) {
+ for (i = 0; i < intel_vsec_dev->num_resources; i++) {
struct intel_pmt_entry *entry = &priv->entry[i];
- ret = intel_pmt_dev_create(entry, &pmt_telem_ns, pdev, i);
+ ret = intel_pmt_dev_create(entry, &pmt_telem_ns, intel_vsec_dev, i);
if (ret < 0)
goto abort_probe;
if (ret)
@@ -117,32 +116,35 @@ static int pmt_telem_probe(struct platform_device *pdev)
return 0;
abort_probe:
- pmt_telem_remove(pdev);
+ pmt_telem_remove(auxdev);
return ret;
}
-static struct platform_driver pmt_telem_driver = {
- .driver = {
- .name = TELEM_DEV_NAME,
- },
- .remove = pmt_telem_remove,
- .probe = pmt_telem_probe,
+static const struct auxiliary_device_id pmt_telem_id_table[] = {
+ { .name = "intel_vsec.telemetry" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, pmt_telem_id_table);
+
+static struct auxiliary_driver pmt_telem_aux_driver = {
+ .id_table = pmt_telem_id_table,
+ .remove = pmt_telem_remove,
+ .probe = pmt_telem_probe,
};
static int __init pmt_telem_init(void)
{
- return platform_driver_register(&pmt_telem_driver);
+ return auxiliary_driver_register(&pmt_telem_aux_driver);
}
module_init(pmt_telem_init);
static void __exit pmt_telem_exit(void)
{
- platform_driver_unregister(&pmt_telem_driver);
+ auxiliary_driver_unregister(&pmt_telem_aux_driver);
xa_destroy(&telem_array);
}
module_exit(pmt_telem_exit);
MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
MODULE_DESCRIPTION("Intel PMT Telemetry driver");
-MODULE_ALIAS("platform:" TELEM_DEV_NAME);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency.c
index 3ee4c5c8a64f..4cd8254f2e40 100644
--- a/drivers/platform/x86/intel/uncore-frequency.c
+++ b/drivers/platform/x86/intel/uncore-frequency.c
@@ -225,6 +225,7 @@ static struct attribute *uncore_attrs[] = {
&min_freq_khz.attr,
NULL
};
+ATTRIBUTE_GROUPS(uncore);
static void uncore_sysfs_entry_release(struct kobject *kobj)
{
@@ -236,7 +237,7 @@ static void uncore_sysfs_entry_release(struct kobject *kobj)
static struct kobj_type uncore_ktype = {
.release = uncore_sysfs_entry_release,
.sysfs_ops = &kobj_sysfs_ops,
- .default_attrs = uncore_attrs,
+ .default_groups = uncore_groups,
};
/* Caller provides protection */
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
new file mode 100644
index 000000000000..c3bdd75ed690
--- /dev/null
+++ b/drivers/platform/x86/intel/vsec.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Vendor Specific Extended Capabilities auxiliary bus driver
+ *
+ * Copyright (c) 2021, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Author: David E. Box <david.e.box@linux.intel.com>
+ *
+ * This driver discovers and creates auxiliary devices for Intel defined PCIe
+ * "Vendor Specific" and "Designated Vendor Specific" Extended Capabilities,
+ * VSEC and DVSEC respectively. The driver supports features on specific PCIe
+ * endpoints that exist primarily to expose them.
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bits.h>
+#include <linux/kernel.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "vsec.h"
+
+/* Intel DVSEC offsets */
+#define INTEL_DVSEC_ENTRIES 0xA
+#define INTEL_DVSEC_SIZE 0xB
+#define INTEL_DVSEC_TABLE 0xC
+#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0))
+#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3))
+#define TABLE_OFFSET_SHIFT 3
+
+static DEFINE_IDA(intel_vsec_ida);
+
+/**
+ * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers.
+ * @rev: Revision ID of the VSEC/DVSEC register space
+ * @length: Length of the VSEC/DVSEC register space
+ * @id: ID of the feature
+ * @num_entries: Number of instances of the feature
+ * @entry_size: Size of the discovery table for each feature
+ * @tbir: BAR containing the discovery tables
+ * @offset: BAR offset of start of the first discovery table
+ */
+struct intel_vsec_header {
+ u8 rev;
+ u16 length;
+ u16 id;
+ u8 num_entries;
+ u8 entry_size;
+ u8 tbir;
+ u32 offset;
+};
+
+/* Platform specific data */
+struct intel_vsec_platform_info {
+ struct intel_vsec_header **capabilities;
+ unsigned long quirks;
+};
+
+enum intel_vsec_id {
+ VSEC_ID_TELEMETRY = 2,
+ VSEC_ID_WATCHER = 3,
+ VSEC_ID_CRASHLOG = 4,
+};
+
+static enum intel_vsec_id intel_vsec_allow_list[] = {
+ VSEC_ID_TELEMETRY,
+ VSEC_ID_WATCHER,
+ VSEC_ID_CRASHLOG,
+};
+
+static const char *intel_vsec_name(enum intel_vsec_id id)
+{
+ switch (id) {
+ case VSEC_ID_TELEMETRY:
+ return "telemetry";
+
+ case VSEC_ID_WATCHER:
+ return "watcher";
+
+ case VSEC_ID_CRASHLOG:
+ return "crashlog";
+
+ default:
+ return NULL;
+ }
+}
+
+static bool intel_vsec_allowed(u16 id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_vsec_allow_list); i++)
+ if (intel_vsec_allow_list[i] == id)
+ return true;
+
+ return false;
+}
+
+static bool intel_vsec_disabled(u16 id, unsigned long quirks)
+{
+ switch (id) {
+ case VSEC_ID_WATCHER:
+ return !!(quirks & VSEC_QUIRK_NO_WATCHER);
+
+ case VSEC_ID_CRASHLOG:
+ return !!(quirks & VSEC_QUIRK_NO_CRASHLOG);
+
+ default:
+ return false;
+ }
+}
+
+static void intel_vsec_remove_aux(void *data)
+{
+ auxiliary_device_delete(data);
+ auxiliary_device_uninit(data);
+}
+
+static void intel_vsec_dev_release(struct device *dev)
+{
+ struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(dev);
+
+ ida_free(intel_vsec_dev->ida, intel_vsec_dev->auxdev.id);
+ kfree(intel_vsec_dev->resource);
+ kfree(intel_vsec_dev);
+}
+
+static int intel_vsec_add_aux(struct pci_dev *pdev, struct intel_vsec_device *intel_vsec_dev,
+ const char *name)
+{
+ struct auxiliary_device *auxdev = &intel_vsec_dev->auxdev;
+ int ret;
+
+ ret = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(intel_vsec_dev);
+ return ret;
+ }
+
+ auxdev->id = ret;
+ auxdev->name = name;
+ auxdev->dev.parent = &pdev->dev;
+ auxdev->dev.release = intel_vsec_dev_release;
+
+ ret = auxiliary_device_init(auxdev);
+ if (ret < 0) {
+ ida_free(intel_vsec_dev->ida, auxdev->id);
+ kfree(intel_vsec_dev->resource);
+ kfree(intel_vsec_dev);
+ return ret;
+ }
+
+ ret = auxiliary_device_add(auxdev);
+ if (ret < 0) {
+ auxiliary_device_uninit(auxdev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(&pdev->dev, intel_vsec_remove_aux, auxdev);
+}
+
+static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *header,
+ unsigned long quirks)
+{
+ struct intel_vsec_device *intel_vsec_dev;
+ struct resource *res, *tmp;
+ int i;
+
+ if (!intel_vsec_allowed(header->id) || intel_vsec_disabled(header->id, quirks))
+ return -EINVAL;
+
+ if (!header->num_entries) {
+ dev_dbg(&pdev->dev, "Invalid 0 entry count for header id %d\n", header->id);
+ return -EINVAL;
+ }
+
+ if (!header->entry_size) {
+ dev_dbg(&pdev->dev, "Invalid 0 entry size for header id %d\n", header->id);
+ return -EINVAL;
+ }
+
+ intel_vsec_dev = kzalloc(sizeof(*intel_vsec_dev), GFP_KERNEL);
+ if (!intel_vsec_dev)
+ return -ENOMEM;
+
+ res = kcalloc(header->num_entries, sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ kfree(intel_vsec_dev);
+ return -ENOMEM;
+ }
+
+ if (quirks & VSEC_QUIRK_TABLE_SHIFT)
+ header->offset >>= TABLE_OFFSET_SHIFT;
+
+ /*
+ * The DVSEC/VSEC contains the starting offset and count for a block of
+ * discovery tables. Create a resource array of these tables to the
+ * auxiliary device driver.
+ */
+ for (i = 0, tmp = res; i < header->num_entries; i++, tmp++) {
+ tmp->start = pdev->resource[header->tbir].start +
+ header->offset + i * (header->entry_size * sizeof(u32));
+ tmp->end = tmp->start + (header->entry_size * sizeof(u32)) - 1;
+ tmp->flags = IORESOURCE_MEM;
+ }
+
+ intel_vsec_dev->pcidev = pdev;
+ intel_vsec_dev->resource = res;
+ intel_vsec_dev->num_resources = header->num_entries;
+ intel_vsec_dev->quirks = quirks;
+ intel_vsec_dev->ida = &intel_vsec_ida;
+
+ return intel_vsec_add_aux(pdev, intel_vsec_dev, intel_vsec_name(header->id));
+}
+
+static bool intel_vsec_walk_header(struct pci_dev *pdev, unsigned long quirks,
+ struct intel_vsec_header **header)
+{
+ bool have_devices = false;
+ int ret;
+
+ for ( ; *header; header++) {
+ ret = intel_vsec_add_dev(pdev, *header, quirks);
+ if (ret)
+ dev_info(&pdev->dev, "Could not add device for DVSEC id %d\n",
+ (*header)->id);
+ else
+ have_devices = true;
+ }
+
+ return have_devices;
+}
+
+static bool intel_vsec_walk_dvsec(struct pci_dev *pdev, unsigned long quirks)
+{
+ bool have_devices = false;
+ int pos = 0;
+
+ do {
+ struct intel_vsec_header header;
+ u32 table, hdr;
+ u16 vid;
+ int ret;
+
+ pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_DVSEC);
+ if (!pos)
+ break;
+
+ pci_read_config_dword(pdev, pos + PCI_DVSEC_HEADER1, &hdr);
+ vid = PCI_DVSEC_HEADER1_VID(hdr);
+ if (vid != PCI_VENDOR_ID_INTEL)
+ continue;
+
+ /* Support only revision 1 */
+ header.rev = PCI_DVSEC_HEADER1_REV(hdr);
+ if (header.rev != 1) {
+ dev_info(&pdev->dev, "Unsupported DVSEC revision %d\n", header.rev);
+ continue;
+ }
+
+ header.length = PCI_DVSEC_HEADER1_LEN(hdr);
+
+ pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES, &header.num_entries);
+ pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE, &header.entry_size);
+ pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE, &table);
+
+ header.tbir = INTEL_DVSEC_TABLE_BAR(table);
+ header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
+
+ pci_read_config_dword(pdev, pos + PCI_DVSEC_HEADER2, &hdr);
+ header.id = PCI_DVSEC_HEADER2_ID(hdr);
+
+ ret = intel_vsec_add_dev(pdev, &header, quirks);
+ if (ret)
+ continue;
+
+ have_devices = true;
+ } while (true);
+
+ return have_devices;
+}
+
+static bool intel_vsec_walk_vsec(struct pci_dev *pdev, unsigned long quirks)
+{
+ bool have_devices = false;
+ int pos = 0;
+
+ do {
+ struct intel_vsec_header header;
+ u32 table, hdr;
+ int ret;
+
+ pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_VNDR);
+ if (!pos)
+ break;
+
+ pci_read_config_dword(pdev, pos + PCI_VNDR_HEADER, &hdr);
+
+ /* Support only revision 1 */
+ header.rev = PCI_VNDR_HEADER_REV(hdr);
+ if (header.rev != 1) {
+ dev_info(&pdev->dev, "Unsupported VSEC revision %d\n", header.rev);
+ continue;
+ }
+
+ header.id = PCI_VNDR_HEADER_ID(hdr);
+ header.length = PCI_VNDR_HEADER_LEN(hdr);
+
+ /* entry, size, and table offset are the same as DVSEC */
+ pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES, &header.num_entries);
+ pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE, &header.entry_size);
+ pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE, &table);
+
+ header.tbir = INTEL_DVSEC_TABLE_BAR(table);
+ header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
+
+ ret = intel_vsec_add_dev(pdev, &header, quirks);
+ if (ret)
+ continue;
+
+ have_devices = true;
+ } while (true);
+
+ return have_devices;
+}
+
+static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct intel_vsec_platform_info *info;
+ bool have_devices = false;
+ unsigned long quirks = 0;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ info = (struct intel_vsec_platform_info *)id->driver_data;
+ if (info)
+ quirks = info->quirks;
+
+ if (intel_vsec_walk_dvsec(pdev, quirks))
+ have_devices = true;
+
+ if (intel_vsec_walk_vsec(pdev, quirks))
+ have_devices = true;
+
+ if (info && (info->quirks & VSEC_QUIRK_NO_DVSEC) &&
+ intel_vsec_walk_header(pdev, quirks, info->capabilities))
+ have_devices = true;
+
+ if (!have_devices)
+ return -ENODEV;
+
+ return 0;
+}
+
+/* TGL info */
+static const struct intel_vsec_platform_info tgl_info = {
+ .quirks = VSEC_QUIRK_NO_WATCHER | VSEC_QUIRK_NO_CRASHLOG | VSEC_QUIRK_TABLE_SHIFT,
+};
+
+/* DG1 info */
+static struct intel_vsec_header dg1_telemetry = {
+ .length = 0x10,
+ .id = 2,
+ .num_entries = 1,
+ .entry_size = 3,
+ .tbir = 0,
+ .offset = 0x466000,
+};
+
+static struct intel_vsec_header *dg1_capabilities[] = {
+ &dg1_telemetry,
+ NULL
+};
+
+static const struct intel_vsec_platform_info dg1_info = {
+ .capabilities = dg1_capabilities,
+ .quirks = VSEC_QUIRK_NO_DVSEC,
+};
+
+#define PCI_DEVICE_ID_INTEL_VSEC_ADL 0x467d
+#define PCI_DEVICE_ID_INTEL_VSEC_DG1 0x490e
+#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7
+#define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d
+static const struct pci_device_id intel_vsec_pci_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, NULL) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, intel_vsec_pci_ids);
+
+static struct pci_driver intel_vsec_pci_driver = {
+ .name = "intel_vsec",
+ .id_table = intel_vsec_pci_ids,
+ .probe = intel_vsec_pci_probe,
+};
+module_pci_driver(intel_vsec_pci_driver);
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Extended Capabilities auxiliary bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h
new file mode 100644
index 000000000000..4cc36678e8c5
--- /dev/null
+++ b/drivers/platform/x86/intel/vsec.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _VSEC_H
+#define _VSEC_H
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bits.h>
+
+struct pci_dev;
+struct resource;
+
+enum intel_vsec_quirks {
+ /* Watcher feature not supported */
+ VSEC_QUIRK_NO_WATCHER = BIT(0),
+
+ /* Crashlog feature not supported */
+ VSEC_QUIRK_NO_CRASHLOG = BIT(1),
+
+ /* Use shift instead of mask to read discovery table offset */
+ VSEC_QUIRK_TABLE_SHIFT = BIT(2),
+
+ /* DVSEC not present (provided in driver data) */
+ VSEC_QUIRK_NO_DVSEC = BIT(3),
+};
+
+struct intel_vsec_device {
+ struct auxiliary_device auxdev;
+ struct pci_dev *pcidev;
+ struct resource *resource;
+ struct ida *ida;
+ unsigned long quirks;
+ int num_resources;
+};
+
+static inline struct intel_vsec_device *dev_to_ivdev(struct device *dev)
+{
+ return container_of(dev, struct intel_vsec_device, auxdev.dev);
+}
+
+static inline struct intel_vsec_device *auxdev_to_ivdev(struct auxiliary_device *auxdev)
+{
+ return container_of(auxdev, struct intel_vsec_device, auxdev);
+}
+#endif
diff --git a/drivers/platform/x86/lenovo-yogabook-wmi.c b/drivers/platform/x86/lenovo-yogabook-wmi.c
new file mode 100644
index 000000000000..5f4bd1eec38a
--- /dev/null
+++ b/drivers/platform/x86/lenovo-yogabook-wmi.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0
+/* WMI driver for Lenovo Yoga Book YB1-X90* / -X91* tablets */
+
+#include <linux/acpi.h>
+#include <linux/devm-helpers.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/leds.h>
+#include <linux/wmi.h>
+#include <linux/workqueue.h>
+
+#define YB_MBTN_EVENT_GUID "243FEC1D-1963-41C1-8100-06A9D82A94B4"
+#define YB_MBTN_METHOD_GUID "742B0CA1-0B20-404B-9CAA-AEFCABF30CE0"
+
+#define YB_PAD_ENABLE 1
+#define YB_PAD_DISABLE 2
+#define YB_LIGHTUP_BTN 3
+
+#define YB_KBD_BL_DEFAULT 128
+
+/* flags */
+enum {
+ YB_KBD_IS_ON,
+ YB_DIGITIZER_IS_ON,
+ YB_DIGITIZER_MODE,
+ YB_TABLET_MODE,
+ YB_SUSPENDED,
+};
+
+struct yogabook_wmi {
+ struct wmi_device *wdev;
+ struct acpi_device *kbd_adev;
+ struct acpi_device *dig_adev;
+ struct device *kbd_dev;
+ struct device *dig_dev;
+ struct gpio_desc *backside_hall_gpio;
+ int backside_hall_irq;
+ struct work_struct work;
+ struct led_classdev kbd_bl_led;
+ unsigned long flags;
+ uint8_t brightness;
+};
+
+static int yogabook_wmi_do_action(struct wmi_device *wdev, int action)
+{
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer input;
+ acpi_status status;
+ u32 dummy_arg = 0;
+
+ dev_dbg(&wdev->dev, "Do action: %d\n", action);
+
+ input.pointer = &dummy_arg;
+ input.length = sizeof(dummy_arg);
+
+ status = wmi_evaluate_method(YB_MBTN_METHOD_GUID, 0, action, &input,
+ &output);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&wdev->dev, "Calling WMI method failure: 0x%x\n",
+ status);
+ return status;
+ }
+
+ kfree(output.pointer);
+
+ return 0;
+}
+
+/*
+ * To control keyboard backlight, call the method KBLC() of the TCS1 ACPI
+ * device (Goodix touchpad acts as virtual sensor keyboard).
+ */
+static int yogabook_wmi_set_kbd_backlight(struct wmi_device *wdev,
+ uint8_t level)
+{
+ struct yogabook_wmi *data = dev_get_drvdata(&wdev->dev);
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_object_list input;
+ union acpi_object param;
+ acpi_status status;
+
+ if (data->kbd_adev->power.state != ACPI_STATE_D0) {
+ dev_warn(&wdev->dev, "keyboard touchscreen not in D0, cannot set brightness\n");
+ return -ENXIO;
+ }
+
+ dev_dbg(&wdev->dev, "Set KBLC level to %u\n", level);
+
+ input.count = 1;
+ input.pointer = &param;
+
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = 255 - level;
+
+ status = acpi_evaluate_object(acpi_device_handle(data->kbd_adev), "KBLC",
+ &input, &output);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&wdev->dev, "Failed to call KBLC method: 0x%x\n", status);
+ return status;
+ }
+
+ kfree(output.pointer);
+ return 0;
+}
+
+static void yogabook_wmi_work(struct work_struct *work)
+{
+ struct yogabook_wmi *data = container_of(work, struct yogabook_wmi, work);
+ struct device *dev = &data->wdev->dev;
+ bool kbd_on, digitizer_on;
+ int r;
+
+ if (test_bit(YB_SUSPENDED, &data->flags))
+ return;
+
+ if (test_bit(YB_TABLET_MODE, &data->flags)) {
+ kbd_on = false;
+ digitizer_on = false;
+ } else if (test_bit(YB_DIGITIZER_MODE, &data->flags)) {
+ digitizer_on = true;
+ kbd_on = false;
+ } else {
+ kbd_on = true;
+ digitizer_on = false;
+ }
+
+ if (!kbd_on && test_bit(YB_KBD_IS_ON, &data->flags)) {
+ /*
+ * Must be done before releasing the keyboard touchscreen driver,
+ * so that the keyboard touchscreen dev is still in D0.
+ */
+ yogabook_wmi_set_kbd_backlight(data->wdev, 0);
+ device_release_driver(data->kbd_dev);
+ clear_bit(YB_KBD_IS_ON, &data->flags);
+ }
+
+ if (!digitizer_on && test_bit(YB_DIGITIZER_IS_ON, &data->flags)) {
+ yogabook_wmi_do_action(data->wdev, YB_PAD_DISABLE);
+ device_release_driver(data->dig_dev);
+ clear_bit(YB_DIGITIZER_IS_ON, &data->flags);
+ }
+
+ if (kbd_on && !test_bit(YB_KBD_IS_ON, &data->flags)) {
+ r = device_reprobe(data->kbd_dev);
+ if (r)
+ dev_warn(dev, "Reprobe of keyboard touchscreen failed: %d\n", r);
+
+ yogabook_wmi_set_kbd_backlight(data->wdev, data->brightness);
+ set_bit(YB_KBD_IS_ON, &data->flags);
+ }
+
+ if (digitizer_on && !test_bit(YB_DIGITIZER_IS_ON, &data->flags)) {
+ r = device_reprobe(data->dig_dev);
+ if (r)
+ dev_warn(dev, "Reprobe of digitizer failed: %d\n", r);
+
+ yogabook_wmi_do_action(data->wdev, YB_PAD_ENABLE);
+ set_bit(YB_DIGITIZER_IS_ON, &data->flags);
+ }
+}
+
+static void yogabook_wmi_notify(struct wmi_device *wdev, union acpi_object *dummy)
+{
+ struct yogabook_wmi *data = dev_get_drvdata(&wdev->dev);
+
+ if (test_bit(YB_SUSPENDED, &data->flags))
+ return;
+
+ if (test_bit(YB_DIGITIZER_MODE, &data->flags))
+ clear_bit(YB_DIGITIZER_MODE, &data->flags);
+ else
+ set_bit(YB_DIGITIZER_MODE, &data->flags);
+
+ /*
+ * We are called from the ACPI core and the driver [un]binding which is
+ * done also needs ACPI functions, use a workqueue to avoid deadlocking.
+ */
+ schedule_work(&data->work);
+}
+
+static irqreturn_t yogabook_backside_hall_irq(int irq, void *_data)
+{
+ struct yogabook_wmi *data = _data;
+
+ if (gpiod_get_value(data->backside_hall_gpio))
+ set_bit(YB_TABLET_MODE, &data->flags);
+ else
+ clear_bit(YB_TABLET_MODE, &data->flags);
+
+ schedule_work(&data->work);
+
+ return IRQ_HANDLED;
+}
+
+static enum led_brightness kbd_brightness_get(struct led_classdev *cdev)
+{
+ struct yogabook_wmi *data =
+ container_of(cdev, struct yogabook_wmi, kbd_bl_led);
+
+ return data->brightness;
+}
+
+static int kbd_brightness_set(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ struct yogabook_wmi *data =
+ container_of(cdev, struct yogabook_wmi, kbd_bl_led);
+ struct wmi_device *wdev = data->wdev;
+
+ if ((value < 0) || (value > 255))
+ return -EINVAL;
+
+ data->brightness = value;
+
+ if (data->kbd_adev->power.state != ACPI_STATE_D0)
+ return 0;
+
+ return yogabook_wmi_set_kbd_backlight(wdev, data->brightness);
+}
+
+static struct gpiod_lookup_table yogabook_wmi_gpios = {
+ .dev_id = "243FEC1D-1963-41C1-8100-06A9D82A94B4",
+ .table = {
+ GPIO_LOOKUP("INT33FF:02", 18, "backside_hall_sw", GPIO_ACTIVE_LOW),
+ {}
+ },
+};
+
+static void yogabook_wmi_rm_gpio_lookup(void *unused)
+{
+ gpiod_remove_lookup_table(&yogabook_wmi_gpios);
+}
+
+static int yogabook_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+ struct yogabook_wmi *data;
+ int r;
+
+ data = devm_kzalloc(&wdev->dev, sizeof(struct yogabook_wmi), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ dev_set_drvdata(&wdev->dev, data);
+
+ data->wdev = wdev;
+ data->brightness = YB_KBD_BL_DEFAULT;
+ set_bit(YB_KBD_IS_ON, &data->flags);
+ set_bit(YB_DIGITIZER_IS_ON, &data->flags);
+
+ r = devm_work_autocancel(&wdev->dev, &data->work, yogabook_wmi_work);
+ if (r)
+ return r;
+
+ data->kbd_adev = acpi_dev_get_first_match_dev("GDIX1001", NULL, -1);
+ if (!data->kbd_adev) {
+ dev_err(&wdev->dev, "Cannot find the touchpad device in ACPI tables\n");
+ return -ENODEV;
+ }
+
+ data->dig_adev = acpi_dev_get_first_match_dev("WCOM0019", NULL, -1);
+ if (!data->dig_adev) {
+ dev_err(&wdev->dev, "Cannot find the digitizer device in ACPI tables\n");
+ r = -ENODEV;
+ goto error_put_devs;
+ }
+
+ data->kbd_dev = get_device(acpi_get_first_physical_node(data->kbd_adev));
+ if (!data->kbd_dev || !data->kbd_dev->driver) {
+ r = -EPROBE_DEFER;
+ goto error_put_devs;
+ }
+
+ data->dig_dev = get_device(acpi_get_first_physical_node(data->dig_adev));
+ if (!data->dig_dev || !data->dig_dev->driver) {
+ r = -EPROBE_DEFER;
+ goto error_put_devs;
+ }
+
+ gpiod_add_lookup_table(&yogabook_wmi_gpios);
+
+ r = devm_add_action_or_reset(&wdev->dev, yogabook_wmi_rm_gpio_lookup, NULL);
+ if (r)
+ goto error_put_devs;
+
+ data->backside_hall_gpio =
+ devm_gpiod_get(&wdev->dev, "backside_hall_sw", GPIOD_IN);
+ if (IS_ERR(data->backside_hall_gpio)) {
+ r = PTR_ERR(data->backside_hall_gpio);
+ dev_err_probe(&wdev->dev, r, "Getting backside_hall_sw GPIO\n");
+ goto error_put_devs;
+ }
+
+ r = gpiod_to_irq(data->backside_hall_gpio);
+ if (r < 0) {
+ dev_err_probe(&wdev->dev, r, "Getting backside_hall_sw IRQ\n");
+ goto error_put_devs;
+ }
+ data->backside_hall_irq = r;
+
+ r = devm_request_irq(&wdev->dev, data->backside_hall_irq,
+ yogabook_backside_hall_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "backside_hall_sw", data);
+ if (r) {
+ dev_err_probe(&wdev->dev, r, "Requesting backside_hall_sw IRQ\n");
+ goto error_put_devs;
+ }
+
+ schedule_work(&data->work);
+
+ data->kbd_bl_led.name = "ybwmi::kbd_backlight";
+ data->kbd_bl_led.brightness_set_blocking = kbd_brightness_set;
+ data->kbd_bl_led.brightness_get = kbd_brightness_get;
+ data->kbd_bl_led.max_brightness = 255;
+
+ r = devm_led_classdev_register(&wdev->dev, &data->kbd_bl_led);
+ if (r < 0) {
+ dev_err_probe(&wdev->dev, r, "Registering backlight LED device\n");
+ goto error_put_devs;
+ }
+
+ return 0;
+
+error_put_devs:
+ put_device(data->dig_dev);
+ put_device(data->kbd_dev);
+ acpi_dev_put(data->dig_adev);
+ acpi_dev_put(data->kbd_adev);
+ return r;
+}
+
+static void yogabook_wmi_remove(struct wmi_device *wdev)
+{
+ struct yogabook_wmi *data = dev_get_drvdata(&wdev->dev);
+
+ put_device(data->dig_dev);
+ put_device(data->kbd_dev);
+ acpi_dev_put(data->dig_adev);
+ acpi_dev_put(data->kbd_adev);
+}
+
+static int __maybe_unused yogabook_wmi_suspend(struct device *dev)
+{
+ struct wmi_device *wdev = container_of(dev, struct wmi_device, dev);
+ struct yogabook_wmi *data = dev_get_drvdata(dev);
+
+ set_bit(YB_SUSPENDED, &data->flags);
+
+ flush_work(&data->work);
+
+ /* Turn off the pen button at sleep */
+ if (test_bit(YB_DIGITIZER_IS_ON, &data->flags))
+ yogabook_wmi_do_action(wdev, YB_PAD_DISABLE);
+
+ return 0;
+}
+
+static int __maybe_unused yogabook_wmi_resume(struct device *dev)
+{
+ struct wmi_device *wdev = container_of(dev, struct wmi_device, dev);
+ struct yogabook_wmi *data = dev_get_drvdata(dev);
+
+ if (test_bit(YB_KBD_IS_ON, &data->flags)) {
+ /* Ensure keyboard touchpad is on before we call KBLC() */
+ acpi_device_set_power(data->kbd_adev, ACPI_STATE_D0);
+ yogabook_wmi_set_kbd_backlight(wdev, data->brightness);
+ }
+
+ if (test_bit(YB_DIGITIZER_IS_ON, &data->flags))
+ yogabook_wmi_do_action(wdev, YB_PAD_ENABLE);
+
+ clear_bit(YB_SUSPENDED, &data->flags);
+
+ /* Check for YB_TABLET_MODE changes made during suspend */
+ schedule_work(&data->work);
+
+ return 0;
+}
+
+static const struct wmi_device_id yogabook_wmi_id_table[] = {
+ {
+ .guid_string = YB_MBTN_EVENT_GUID,
+ },
+ { } /* Terminating entry */
+};
+
+static SIMPLE_DEV_PM_OPS(yogabook_wmi_pm_ops,
+ yogabook_wmi_suspend, yogabook_wmi_resume);
+
+static struct wmi_driver yogabook_wmi_driver = {
+ .driver = {
+ .name = "yogabook-wmi",
+ .pm = &yogabook_wmi_pm_ops,
+ },
+ .no_notify_data = true,
+ .id_table = yogabook_wmi_id_table,
+ .probe = yogabook_wmi_probe,
+ .remove = yogabook_wmi_remove,
+ .notify = yogabook_wmi_notify,
+};
+module_wmi_driver(yogabook_wmi_driver);
+
+MODULE_DEVICE_TABLE(wmi, yogabook_wmi_id_table);
+MODULE_AUTHOR("Yauhen Kharuzhy");
+MODULE_DESCRIPTION("Lenovo Yoga Book WMI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index a9d2a4b98e57..a40fae6edc84 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/platform_data/x86/clk-pmc-atom.h>
#include <linux/platform_data/x86/pmc_atom.h>
+#include <linux/platform_data/x86/simatic-ipc.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/seq_file.h>
@@ -362,6 +363,30 @@ static void pmc_dbgfs_register(struct pmc_dev *pmc)
}
#endif /* CONFIG_DEBUG_FS */
+static bool pmc_clk_is_critical = true;
+
+static int dmi_callback(const struct dmi_system_id *d)
+{
+ pr_info("%s critclks quirk enabled\n", d->ident);
+
+ return 1;
+}
+
+static int dmi_callback_siemens(const struct dmi_system_id *d)
+{
+ u32 st_id;
+
+ if (dmi_walk(simatic_ipc_find_dmi_entry_helper, &st_id))
+ goto out;
+
+ if (st_id == SIMATIC_IPC_IPC227E || st_id == SIMATIC_IPC_IPC277E)
+ return dmi_callback(d);
+
+out:
+ pmc_clk_is_critical = false;
+ return 1;
+}
+
/*
* Some systems need one or more of their pmc_plt_clks to be
* marked as critical.
@@ -370,6 +395,7 @@ static const struct dmi_system_id critclk_systems[] = {
{
/* pmc_plt_clk0 is used for an external HSIC USB HUB */
.ident = "MPL CEC1x",
+ .callback = dmi_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MPL AG"),
DMI_MATCH(DMI_PRODUCT_NAME, "CEC10 Family"),
@@ -378,6 +404,7 @@ static const struct dmi_system_id critclk_systems[] = {
{
/* pmc_plt_clk0 - 3 are used for the 4 ethernet controllers */
.ident = "Lex 3I380D",
+ .callback = dmi_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Lex BayTrail"),
DMI_MATCH(DMI_PRODUCT_NAME, "3I380D"),
@@ -386,6 +413,7 @@ static const struct dmi_system_id critclk_systems[] = {
{
/* pmc_plt_clk* - are used for ethernet controllers */
.ident = "Lex 2I385SW",
+ .callback = dmi_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Lex BayTrail"),
DMI_MATCH(DMI_PRODUCT_NAME, "2I385SW"),
@@ -394,30 +422,17 @@ static const struct dmi_system_id critclk_systems[] = {
{
/* pmc_plt_clk* - are used for ethernet controllers */
.ident = "Beckhoff Baytrail",
+ .callback = dmi_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
DMI_MATCH(DMI_PRODUCT_FAMILY, "CBxx63"),
},
},
{
- .ident = "SIMATIC IPC227E",
+ .ident = "SIEMENS AG",
+ .callback = dmi_callback_siemens,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "6ES7647-8B"),
- },
- },
- {
- .ident = "SIMATIC IPC277E",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "6AV7882-0"),
- },
- },
- {
- .ident = "CONNECT X300",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "A5E45074588"),
},
},
@@ -429,7 +444,6 @@ static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
{
struct platform_device *clkdev;
struct pmc_clk_data *clk_data;
- const struct dmi_system_id *d = dmi_first_match(critclk_systems);
clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
if (!clk_data)
@@ -437,10 +451,8 @@ static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
clk_data->base = pmc_regmap; /* offset is added by client */
clk_data->clks = pmc_data->clks;
- if (d) {
- clk_data->critical = true;
- pr_info("%s critclks quirk enabled\n", d->ident);
- }
+ if (dmi_check_system(critclk_systems))
+ clk_data->critical = pmc_clk_is_critical;
clkdev = platform_device_register_data(&pdev->dev, "clk-pmc-atom",
PLATFORM_DEVID_NONE,
diff --git a/drivers/platform/x86/simatic-ipc.c b/drivers/platform/x86/simatic-ipc.c
new file mode 100644
index 000000000000..b599cda5ba3c
--- /dev/null
+++ b/drivers/platform/x86/simatic-ipc.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Siemens SIMATIC IPC platform driver
+ *
+ * Copyright (c) Siemens AG, 2018-2021
+ *
+ * Authors:
+ * Henning Schild <henning.schild@siemens.com>
+ * Jan Kiszka <jan.kiszka@siemens.com>
+ * Gerd Haeussler <gerd.haeussler.ext@siemens.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dmi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_data/x86/simatic-ipc.h>
+#include <linux/platform_device.h>
+
+static struct platform_device *ipc_led_platform_device;
+static struct platform_device *ipc_wdt_platform_device;
+
+static const struct dmi_system_id simatic_ipc_whitelist[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"),
+ },
+ },
+ {}
+};
+
+static struct simatic_ipc_platform platform_data;
+
+static struct {
+ u32 station_id;
+ u8 led_mode;
+ u8 wdt_mode;
+} device_modes[] = {
+ {SIMATIC_IPC_IPC127E, SIMATIC_IPC_DEVICE_127E, SIMATIC_IPC_DEVICE_NONE},
+ {SIMATIC_IPC_IPC227D, SIMATIC_IPC_DEVICE_227D, SIMATIC_IPC_DEVICE_NONE},
+ {SIMATIC_IPC_IPC227E, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_227E},
+ {SIMATIC_IPC_IPC277E, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_227E},
+ {SIMATIC_IPC_IPC427D, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_NONE},
+ {SIMATIC_IPC_IPC427E, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_427E},
+ {SIMATIC_IPC_IPC477E, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_427E},
+};
+
+static int register_platform_devices(u32 station_id)
+{
+ u8 ledmode = SIMATIC_IPC_DEVICE_NONE;
+ u8 wdtmode = SIMATIC_IPC_DEVICE_NONE;
+ int i;
+
+ platform_data.devmode = SIMATIC_IPC_DEVICE_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(device_modes); i++) {
+ if (device_modes[i].station_id == station_id) {
+ ledmode = device_modes[i].led_mode;
+ wdtmode = device_modes[i].wdt_mode;
+ break;
+ }
+ }
+
+ if (ledmode != SIMATIC_IPC_DEVICE_NONE) {
+ platform_data.devmode = ledmode;
+ ipc_led_platform_device =
+ platform_device_register_data(NULL,
+ KBUILD_MODNAME "_leds", PLATFORM_DEVID_NONE,
+ &platform_data,
+ sizeof(struct simatic_ipc_platform));
+ if (IS_ERR(ipc_led_platform_device))
+ return PTR_ERR(ipc_led_platform_device);
+
+ pr_debug("device=%s created\n",
+ ipc_led_platform_device->name);
+ }
+
+ if (wdtmode != SIMATIC_IPC_DEVICE_NONE) {
+ platform_data.devmode = wdtmode;
+ ipc_wdt_platform_device =
+ platform_device_register_data(NULL,
+ KBUILD_MODNAME "_wdt", PLATFORM_DEVID_NONE,
+ &platform_data,
+ sizeof(struct simatic_ipc_platform));
+ if (IS_ERR(ipc_wdt_platform_device))
+ return PTR_ERR(ipc_wdt_platform_device);
+
+ pr_debug("device=%s created\n",
+ ipc_wdt_platform_device->name);
+ }
+
+ if (ledmode == SIMATIC_IPC_DEVICE_NONE &&
+ wdtmode == SIMATIC_IPC_DEVICE_NONE) {
+ pr_warn("unsupported IPC detected, station id=%08x\n",
+ station_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* FIXME: this should eventually be done with generic P2SB discovery code
+ * the individual drivers for watchdogs and LEDs access memory that implements
+ * GPIO, but pinctrl will not come up because of missing ACPI entries
+ *
+ * While there is no conflict a cleaner solution would be to somehow bring up
+ * pinctrl even with these ACPI entries missing, and base the drivers on pinctrl.
+ * After which the following function could be dropped, together with the code
+ * poking the memory.
+ */
+/*
+ * Get membase address from PCI, used in leds and wdt module. Here we read
+ * the bar0. The final address calculation is done in the appropriate modules
+ */
+u32 simatic_ipc_get_membase0(unsigned int p2sb)
+{
+ struct pci_bus *bus;
+ u32 bar0 = 0;
+ /*
+ * The GPIO memory is in bar0 of the hidden P2SB device.
+ * Unhide the device to have a quick look at it, before we hide it
+ * again.
+ * Also grab the pci rescan lock so that device does not get discovered
+ * and remapped while it is visible.
+ * This code is inspired by drivers/mfd/lpc_ich.c
+ */
+ bus = pci_find_bus(0, 0);
+ pci_lock_rescan_remove();
+ pci_bus_write_config_byte(bus, p2sb, 0xE1, 0x0);
+ pci_bus_read_config_dword(bus, p2sb, PCI_BASE_ADDRESS_0, &bar0);
+
+ bar0 &= ~0xf;
+ pci_bus_write_config_byte(bus, p2sb, 0xE1, 0x1);
+ pci_unlock_rescan_remove();
+
+ return bar0;
+}
+EXPORT_SYMBOL(simatic_ipc_get_membase0);
+
+static int __init simatic_ipc_init_module(void)
+{
+ const struct dmi_system_id *match;
+ u32 station_id;
+ int err;
+
+ match = dmi_first_match(simatic_ipc_whitelist);
+ if (!match)
+ return 0;
+
+ err = dmi_walk(simatic_ipc_find_dmi_entry_helper, &station_id);
+
+ if (err || station_id == SIMATIC_IPC_INVALID_STATION_ID) {
+ pr_warn("DMI entry %d not found\n", SIMATIC_IPC_DMI_ENTRY_OEM);
+ return 0;
+ }
+
+ return register_platform_devices(station_id);
+}
+
+static void __exit simatic_ipc_exit_module(void)
+{
+ platform_device_unregister(ipc_led_platform_device);
+ ipc_led_platform_device = NULL;
+
+ platform_device_unregister(ipc_wdt_platform_device);
+ ipc_wdt_platform_device = NULL;
+}
+
+module_init(simatic_ipc_init_module);
+module_exit(simatic_ipc_exit_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Gerd Haeussler <gerd.haeussler.ext@siemens.com>");
+MODULE_ALIAS("dmi:*:svnSIEMENSAG:*");
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index c4d9c45350f7..0b73e16cccea 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -128,8 +128,23 @@ MODULE_PARM_DESC(debug_support, "Enable debug command support");
*/
#define LENOVO_DEBUG_CMD_GUID "7FF47003-3B6C-4E5E-A227-E979824A85D1"
+/*
+ * Name:
+ * Lenovo_OpcodeIF
+ * Description:
+ * Opcode interface which provides the ability to set multiple
+ * parameters and then trigger an action with a final command.
+ * This is particularly useful for simplifying setting passwords.
+ * With this support comes the ability to set System, HDD and NVMe
+ * passwords.
+ * This is currently available on ThinkCenter and ThinkStations platforms
+ */
+#define LENOVO_OPCODE_IF_GUID "DFDDEF2C-57D4-48ce-B196-0FB787D90836"
+
#define TLMI_POP_PWD (1 << 0)
#define TLMI_PAP_PWD (1 << 1)
+#define TLMI_HDD_PWD (1 << 2)
+#define TLMI_SYS_PWD (1 << 3)
#define to_tlmi_pwd_setting(kobj) container_of(kobj, struct tlmi_pwd_setting, kobj)
#define to_tlmi_attr_setting(kobj) container_of(kobj, struct tlmi_attr_setting, kobj)
@@ -145,6 +160,10 @@ static const char * const encoding_options[] = {
[TLMI_ENCODING_ASCII] = "ascii",
[TLMI_ENCODING_SCANCODE] = "scancode",
};
+static const char * const level_options[] = {
+ [TLMI_LEVEL_USER] = "user",
+ [TLMI_LEVEL_MASTER] = "master",
+};
static struct think_lmi tlmi_priv;
static struct class *fw_attr_class;
@@ -233,6 +252,7 @@ static int tlmi_get_pwd_settings(struct tlmi_pwdcfg *pwdcfg)
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
const union acpi_object *obj;
acpi_status status;
+ int copy_size;
if (!tlmi_priv.can_get_password_settings)
return -EOPNOTSUPP;
@@ -253,14 +273,21 @@ static int tlmi_get_pwd_settings(struct tlmi_pwdcfg *pwdcfg)
* The size of thinkpad_wmi_pcfg on ThinkStation is larger than ThinkPad.
* To make the driver compatible on different brands, we permit it to get
* the data in below case.
+ * Settings must have at minimum the core fields available
*/
- if (obj->buffer.length < sizeof(struct tlmi_pwdcfg)) {
+ if (obj->buffer.length < sizeof(struct tlmi_pwdcfg_core)) {
pr_warn("Unknown pwdcfg buffer length %d\n", obj->buffer.length);
kfree(obj);
return -EIO;
}
- memcpy(pwdcfg, obj->buffer.pointer, sizeof(struct tlmi_pwdcfg));
+
+ copy_size = obj->buffer.length < sizeof(struct tlmi_pwdcfg) ?
+ obj->buffer.length : sizeof(struct tlmi_pwdcfg);
+ memcpy(pwdcfg, obj->buffer.pointer, copy_size);
kfree(obj);
+
+ if (WARN_ON(pwdcfg->core.max_length >= TLMI_PWD_BUFSIZE))
+ pwdcfg->core.max_length = TLMI_PWD_BUFSIZE - 1;
return 0;
}
@@ -270,6 +297,20 @@ static int tlmi_save_bios_settings(const char *password)
password);
}
+static int tlmi_opcode_setting(char *setting, const char *value)
+{
+ char *opcode_str;
+ int ret;
+
+ opcode_str = kasprintf(GFP_KERNEL, "%s:%s;", setting, value);
+ if (!opcode_str)
+ return -ENOMEM;
+
+ ret = tlmi_simple_call(LENOVO_OPCODE_IF_GUID, opcode_str);
+ kfree(opcode_str);
+ return ret;
+}
+
static int tlmi_setting(int item, char **value, const char *guid_string)
{
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -370,16 +411,54 @@ static ssize_t new_password_store(struct kobject *kobj,
goto out;
}
- /* Format: 'PasswordType,CurrentPw,NewPw,Encoding,KbdLang;' */
- auth_str = kasprintf(GFP_KERNEL, "%s,%s,%s,%s,%s;",
- setting->pwd_type, setting->password, new_pwd,
- encoding_options[setting->encoding], setting->kbdlang);
- if (!auth_str) {
- ret = -ENOMEM;
- goto out;
+ /* If opcode support is present use that interface */
+ if (tlmi_priv.opcode_support) {
+ char pwd_type[8];
+
+ /* Special handling required for HDD and NVMe passwords */
+ if (setting == tlmi_priv.pwd_hdd) {
+ if (setting->level == TLMI_LEVEL_USER)
+ sprintf(pwd_type, "uhdp%d", setting->index);
+ else
+ sprintf(pwd_type, "mhdp%d", setting->index);
+ } else if (setting == tlmi_priv.pwd_nvme) {
+ if (setting->level == TLMI_LEVEL_USER)
+ sprintf(pwd_type, "unvp%d", setting->index);
+ else
+ sprintf(pwd_type, "mnvp%d", setting->index);
+ } else {
+ sprintf(pwd_type, "%s", setting->pwd_type);
+ }
+
+ ret = tlmi_opcode_setting("WmiOpcodePasswordType", pwd_type);
+ if (ret)
+ goto out;
+
+ if (tlmi_priv.pwd_admin->valid) {
+ ret = tlmi_opcode_setting("WmiOpcodePasswordAdmin",
+ tlmi_priv.pwd_admin->password);
+ if (ret)
+ goto out;
+ }
+ ret = tlmi_opcode_setting("WmiOpcodePasswordCurrent01", setting->password);
+ if (ret)
+ goto out;
+ ret = tlmi_opcode_setting("WmiOpcodePasswordNew01", new_pwd);
+ if (ret)
+ goto out;
+ ret = tlmi_simple_call(LENOVO_OPCODE_IF_GUID, "WmiOpcodePasswordSetUpdate;");
+ } else {
+ /* Format: 'PasswordType,CurrentPw,NewPw,Encoding,KbdLang;' */
+ auth_str = kasprintf(GFP_KERNEL, "%s,%s,%s,%s,%s;",
+ setting->pwd_type, setting->password, new_pwd,
+ encoding_options[setting->encoding], setting->kbdlang);
+ if (!auth_str) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = tlmi_simple_call(LENOVO_SET_BIOS_PASSWORD_GUID, auth_str);
+ kfree(auth_str);
}
- ret = tlmi_simple_call(LENOVO_SET_BIOS_PASSWORD_GUID, auth_str);
- kfree(auth_str);
out:
kfree(new_pwd);
return ret ?: count;
@@ -475,6 +554,75 @@ static ssize_t role_show(struct kobject *kobj, struct kobj_attribute *attr,
}
static struct kobj_attribute auth_role = __ATTR_RO(role);
+static ssize_t index_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+
+ return sysfs_emit(buf, "%d\n", setting->index);
+}
+
+static ssize_t index_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+ int err, val;
+
+ err = kstrtoint(buf, 10, &val);
+ if (err < 0)
+ return err;
+
+ if (val < 0 || val > TLMI_INDEX_MAX)
+ return -EINVAL;
+
+ setting->index = val;
+ return count;
+}
+
+static struct kobj_attribute auth_index = __ATTR_RW(index);
+
+static ssize_t level_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+
+ return sysfs_emit(buf, "%s\n", level_options[setting->level]);
+}
+
+static ssize_t level_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+ int i;
+
+ /* Scan for a matching profile */
+ i = sysfs_match_string(level_options, buf);
+ if (i < 0)
+ return -EINVAL;
+
+ setting->level = i;
+ return count;
+}
+
+static struct kobj_attribute auth_level = __ATTR_RW(level);
+
+static umode_t auth_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct tlmi_pwd_setting *setting = to_tlmi_pwd_setting(kobj);
+
+ /*We only want to display level and index settings on HDD/NVMe */
+ if ((attr == (struct attribute *)&auth_index) ||
+ (attr == (struct attribute *)&auth_level)) {
+ if ((setting == tlmi_priv.pwd_hdd) || (setting == tlmi_priv.pwd_nvme))
+ return attr->mode;
+ return 0;
+ }
+ return attr->mode;
+}
+
static struct attribute *auth_attrs[] = {
&auth_is_pass_set.attr,
&auth_min_pass_length.attr,
@@ -485,10 +633,13 @@ static struct attribute *auth_attrs[] = {
&auth_mechanism.attr,
&auth_encoding.attr,
&auth_kbdlang.attr,
+ &auth_index.attr,
+ &auth_level.attr,
NULL
};
static const struct attribute_group auth_attr_group = {
+ .is_visible = auth_attr_is_visible,
.attrs = auth_attrs,
};
@@ -752,6 +903,16 @@ static void tlmi_release_attr(void)
kobject_put(&tlmi_priv.pwd_admin->kobj);
sysfs_remove_group(&tlmi_priv.pwd_power->kobj, &auth_attr_group);
kobject_put(&tlmi_priv.pwd_power->kobj);
+
+ if (tlmi_priv.opcode_support) {
+ sysfs_remove_group(&tlmi_priv.pwd_system->kobj, &auth_attr_group);
+ kobject_put(&tlmi_priv.pwd_system->kobj);
+ sysfs_remove_group(&tlmi_priv.pwd_hdd->kobj, &auth_attr_group);
+ kobject_put(&tlmi_priv.pwd_hdd->kobj);
+ sysfs_remove_group(&tlmi_priv.pwd_nvme->kobj, &auth_attr_group);
+ kobject_put(&tlmi_priv.pwd_nvme->kobj);
+ }
+
kset_unregister(tlmi_priv.authentication_kset);
}
@@ -831,7 +992,7 @@ static int tlmi_sysfs_init(void)
goto fail_create_attr;
tlmi_priv.pwd_power->kobj.kset = tlmi_priv.authentication_kset;
- ret = kobject_add(&tlmi_priv.pwd_power->kobj, NULL, "%s", "System");
+ ret = kobject_add(&tlmi_priv.pwd_power->kobj, NULL, "%s", "Power-on");
if (ret)
goto fail_create_attr;
@@ -839,6 +1000,35 @@ static int tlmi_sysfs_init(void)
if (ret)
goto fail_create_attr;
+ if (tlmi_priv.opcode_support) {
+ tlmi_priv.pwd_system->kobj.kset = tlmi_priv.authentication_kset;
+ ret = kobject_add(&tlmi_priv.pwd_system->kobj, NULL, "%s", "System");
+ if (ret)
+ goto fail_create_attr;
+
+ ret = sysfs_create_group(&tlmi_priv.pwd_system->kobj, &auth_attr_group);
+ if (ret)
+ goto fail_create_attr;
+
+ tlmi_priv.pwd_hdd->kobj.kset = tlmi_priv.authentication_kset;
+ ret = kobject_add(&tlmi_priv.pwd_hdd->kobj, NULL, "%s", "HDD");
+ if (ret)
+ goto fail_create_attr;
+
+ ret = sysfs_create_group(&tlmi_priv.pwd_hdd->kobj, &auth_attr_group);
+ if (ret)
+ goto fail_create_attr;
+
+ tlmi_priv.pwd_nvme->kobj.kset = tlmi_priv.authentication_kset;
+ ret = kobject_add(&tlmi_priv.pwd_nvme->kobj, NULL, "%s", "NVMe");
+ if (ret)
+ goto fail_create_attr;
+
+ ret = sysfs_create_group(&tlmi_priv.pwd_nvme->kobj, &auth_attr_group);
+ if (ret)
+ goto fail_create_attr;
+ }
+
return ret;
fail_create_attr:
@@ -851,9 +1041,30 @@ fail_class_created:
}
/* ---- Base Driver -------------------------------------------------------- */
+static struct tlmi_pwd_setting *tlmi_create_auth(const char *pwd_type,
+ const char *pwd_role)
+{
+ struct tlmi_pwd_setting *new_pwd;
+
+ new_pwd = kzalloc(sizeof(struct tlmi_pwd_setting), GFP_KERNEL);
+ if (!new_pwd)
+ return NULL;
+
+ strscpy(new_pwd->kbdlang, "us", TLMI_LANG_MAXLEN);
+ new_pwd->encoding = TLMI_ENCODING_ASCII;
+ new_pwd->pwd_type = pwd_type;
+ new_pwd->role = pwd_role;
+ new_pwd->minlen = tlmi_priv.pwdcfg.core.min_length;
+ new_pwd->maxlen = tlmi_priv.pwdcfg.core.max_length;
+ new_pwd->index = 0;
+
+ kobject_init(&new_pwd->kobj, &tlmi_pwd_setting_ktype);
+
+ return new_pwd;
+}
+
static int tlmi_analyze(void)
{
- struct tlmi_pwdcfg pwdcfg;
acpi_status status;
int i, ret;
@@ -873,6 +1084,9 @@ static int tlmi_analyze(void)
if (wmi_has_guid(LENOVO_DEBUG_CMD_GUID))
tlmi_priv.can_debug_cmd = true;
+ if (wmi_has_guid(LENOVO_OPCODE_IF_GUID))
+ tlmi_priv.opcode_support = true;
+
/*
* Try to find the number of valid settings of this machine
* and use it to create sysfs attributes.
@@ -923,49 +1137,69 @@ static int tlmi_analyze(void)
}
/* Create password setting structure */
- ret = tlmi_get_pwd_settings(&pwdcfg);
+ ret = tlmi_get_pwd_settings(&tlmi_priv.pwdcfg);
if (ret)
goto fail_clear_attr;
- tlmi_priv.pwd_admin = kzalloc(sizeof(struct tlmi_pwd_setting), GFP_KERNEL);
- if (!tlmi_priv.pwd_admin) {
- ret = -ENOMEM;
+ /* All failures below boil down to kmalloc failures */
+ ret = -ENOMEM;
+
+ tlmi_priv.pwd_admin = tlmi_create_auth("pap", "bios-admin");
+ if (!tlmi_priv.pwd_admin)
goto fail_clear_attr;
- }
- strscpy(tlmi_priv.pwd_admin->kbdlang, "us", TLMI_LANG_MAXLEN);
- tlmi_priv.pwd_admin->encoding = TLMI_ENCODING_ASCII;
- tlmi_priv.pwd_admin->pwd_type = "pap";
- tlmi_priv.pwd_admin->role = "bios-admin";
- tlmi_priv.pwd_admin->minlen = pwdcfg.min_length;
- if (WARN_ON(pwdcfg.max_length >= TLMI_PWD_BUFSIZE))
- pwdcfg.max_length = TLMI_PWD_BUFSIZE - 1;
- tlmi_priv.pwd_admin->maxlen = pwdcfg.max_length;
- if (pwdcfg.password_state & TLMI_PAP_PWD)
+
+ if (tlmi_priv.pwdcfg.core.password_state & TLMI_PAP_PWD)
tlmi_priv.pwd_admin->valid = true;
- kobject_init(&tlmi_priv.pwd_admin->kobj, &tlmi_pwd_setting_ktype);
+ tlmi_priv.pwd_power = tlmi_create_auth("pop", "power-on");
+ if (!tlmi_priv.pwd_power)
+ goto fail_clear_attr;
- tlmi_priv.pwd_power = kzalloc(sizeof(struct tlmi_pwd_setting), GFP_KERNEL);
- if (!tlmi_priv.pwd_power) {
- ret = -ENOMEM;
- goto fail_free_pwd_admin;
- }
- strscpy(tlmi_priv.pwd_power->kbdlang, "us", TLMI_LANG_MAXLEN);
- tlmi_priv.pwd_power->encoding = TLMI_ENCODING_ASCII;
- tlmi_priv.pwd_power->pwd_type = "pop";
- tlmi_priv.pwd_power->role = "power-on";
- tlmi_priv.pwd_power->minlen = pwdcfg.min_length;
- tlmi_priv.pwd_power->maxlen = pwdcfg.max_length;
-
- if (pwdcfg.password_state & TLMI_POP_PWD)
+ if (tlmi_priv.pwdcfg.core.password_state & TLMI_POP_PWD)
tlmi_priv.pwd_power->valid = true;
- kobject_init(&tlmi_priv.pwd_power->kobj, &tlmi_pwd_setting_ktype);
+ if (tlmi_priv.opcode_support) {
+ tlmi_priv.pwd_system = tlmi_create_auth("sys", "system");
+ if (!tlmi_priv.pwd_system)
+ goto fail_clear_attr;
+
+ if (tlmi_priv.pwdcfg.core.password_state & TLMI_SYS_PWD)
+ tlmi_priv.pwd_system->valid = true;
+ tlmi_priv.pwd_hdd = tlmi_create_auth("hdd", "hdd");
+ if (!tlmi_priv.pwd_hdd)
+ goto fail_clear_attr;
+
+ tlmi_priv.pwd_nvme = tlmi_create_auth("nvm", "nvme");
+ if (!tlmi_priv.pwd_nvme)
+ goto fail_clear_attr;
+
+ if (tlmi_priv.pwdcfg.core.password_state & TLMI_HDD_PWD) {
+ /* Check if PWD is configured and set index to first drive found */
+ if (tlmi_priv.pwdcfg.ext.hdd_user_password ||
+ tlmi_priv.pwdcfg.ext.hdd_master_password) {
+ tlmi_priv.pwd_hdd->valid = true;
+ if (tlmi_priv.pwdcfg.ext.hdd_master_password)
+ tlmi_priv.pwd_hdd->index =
+ ffs(tlmi_priv.pwdcfg.ext.hdd_master_password) - 1;
+ else
+ tlmi_priv.pwd_hdd->index =
+ ffs(tlmi_priv.pwdcfg.ext.hdd_user_password) - 1;
+ }
+ if (tlmi_priv.pwdcfg.ext.nvme_user_password ||
+ tlmi_priv.pwdcfg.ext.nvme_master_password) {
+ tlmi_priv.pwd_nvme->valid = true;
+ if (tlmi_priv.pwdcfg.ext.nvme_master_password)
+ tlmi_priv.pwd_nvme->index =
+ ffs(tlmi_priv.pwdcfg.ext.nvme_master_password) - 1;
+ else
+ tlmi_priv.pwd_nvme->index =
+ ffs(tlmi_priv.pwdcfg.ext.nvme_user_password) - 1;
+ }
+ }
+ }
return 0;
-fail_free_pwd_admin:
- kfree(tlmi_priv.pwd_admin);
fail_clear_attr:
for (i = 0; i < TLMI_SETTINGS_COUNT; ++i) {
if (tlmi_priv.setting[i]) {
@@ -973,6 +1207,11 @@ fail_clear_attr:
kfree(tlmi_priv.setting[i]);
}
}
+ kfree(tlmi_priv.pwd_admin);
+ kfree(tlmi_priv.pwd_power);
+ kfree(tlmi_priv.pwd_system);
+ kfree(tlmi_priv.pwd_hdd);
+ kfree(tlmi_priv.pwd_nvme);
return ret;
}
diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h
index 2ce5086a5af2..e46c7f383353 100644
--- a/drivers/platform/x86/think-lmi.h
+++ b/drivers/platform/x86/think-lmi.h
@@ -9,6 +9,7 @@
#define TLMI_SETTINGS_MAXLEN 512
#define TLMI_PWD_BUFSIZE 129
#define TLMI_LANG_MAXLEN 4
+#define TLMI_INDEX_MAX 32
/* Possible error values */
struct tlmi_err_codes {
@@ -21,8 +22,13 @@ enum encoding_option {
TLMI_ENCODING_SCANCODE,
};
+enum level_option {
+ TLMI_LEVEL_USER,
+ TLMI_LEVEL_MASTER,
+};
+
/* password configuration details */
-struct tlmi_pwdcfg {
+struct tlmi_pwdcfg_core {
uint32_t password_mode;
uint32_t password_state;
uint32_t min_length;
@@ -31,6 +37,18 @@ struct tlmi_pwdcfg {
uint32_t supported_keyboard;
};
+struct tlmi_pwdcfg_ext {
+ uint32_t hdd_user_password;
+ uint32_t hdd_master_password;
+ uint32_t nvme_user_password;
+ uint32_t nvme_master_password;
+};
+
+struct tlmi_pwdcfg {
+ struct tlmi_pwdcfg_core core;
+ struct tlmi_pwdcfg_ext ext;
+};
+
/* password setting details */
struct tlmi_pwd_setting {
struct kobject kobj;
@@ -42,6 +60,8 @@ struct tlmi_pwd_setting {
int maxlen;
enum encoding_option encoding;
char kbdlang[TLMI_LANG_MAXLEN];
+ int index; /*Used for HDD and NVME auth */
+ enum level_option level;
};
/* Attribute setting details */
@@ -61,13 +81,19 @@ struct think_lmi {
bool can_get_password_settings;
bool pending_changes;
bool can_debug_cmd;
+ bool opcode_support;
struct tlmi_attr_setting *setting[TLMI_SETTINGS_COUNT];
struct device *class_dev;
struct kset *attribute_kset;
struct kset *authentication_kset;
+
+ struct tlmi_pwdcfg pwdcfg;
struct tlmi_pwd_setting *pwd_admin;
struct tlmi_pwd_setting *pwd_power;
+ struct tlmi_pwd_setting *pwd_system;
+ struct tlmi_pwd_setting *pwd_hdd;
+ struct tlmi_pwd_setting *pwd_nvme;
};
#endif /* !_THINK_LMI_H_ */
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index bb1abb947e1e..098180fb1cfc 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -73,6 +73,7 @@
#include <linux/uaccess.h>
#include <acpi/battery.h>
#include <acpi/video.h>
+#include <drm/drm_privacy_screen_driver.h>
#include "dual_accel_detect.h"
/* ThinkPad CMOS commands */
@@ -157,6 +158,7 @@ enum tpacpi_hkey_event_t {
TP_HKEY_EV_VOL_UP = 0x1015, /* Volume up or unmute */
TP_HKEY_EV_VOL_DOWN = 0x1016, /* Volume down or unmute */
TP_HKEY_EV_VOL_MUTE = 0x1017, /* Mixer output mute */
+ TP_HKEY_EV_PRIVACYGUARD_TOGGLE = 0x130f, /* Toggle priv.guard on/off */
/* Reasons for waking up from S3/S4 */
TP_HKEY_EV_WKUP_S3_UNDOCK = 0x2304, /* undock requested, S3 */
@@ -332,12 +334,10 @@ static struct {
u32 battery_force_primary:1;
u32 input_device_registered:1;
u32 platform_drv_registered:1;
- u32 platform_drv_attrs_registered:1;
u32 sensors_pdrv_registered:1;
- u32 sensors_pdrv_attrs_registered:1;
- u32 sensors_pdev_attrs_registered:1;
u32 hotkey_poll_active:1;
u32 has_adaptive_kbd:1;
+ u32 kbd_lang:1;
} tp_features;
static struct {
@@ -880,14 +880,14 @@ static int dispatch_proc_show(struct seq_file *m, void *v)
static int dispatch_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, dispatch_proc_show, PDE_DATA(inode));
+ return single_open(file, dispatch_proc_show, pde_data(inode));
}
static ssize_t dispatch_proc_write(struct file *file,
const char __user *userbuf,
size_t count, loff_t *pos)
{
- struct ibm_struct *ibm = PDE_DATA(file_inode(file));
+ struct ibm_struct *ibm = pde_data(file_inode(file));
char *kernbuf;
int ret;
@@ -983,20 +983,6 @@ static void tpacpi_shutdown_handler(struct platform_device *pdev)
}
}
-static struct platform_driver tpacpi_pdriver = {
- .driver = {
- .name = TPACPI_DRVR_NAME,
- .pm = &tpacpi_pm,
- },
- .shutdown = tpacpi_shutdown_handler,
-};
-
-static struct platform_driver tpacpi_hwmon_pdriver = {
- .driver = {
- .name = TPACPI_HWMON_DRVR_NAME,
- },
-};
-
/*************************************************************************
* sysfs support helpers
*/
@@ -1479,53 +1465,6 @@ static ssize_t uwb_emulstate_store(struct device_driver *drv, const char *buf,
static DRIVER_ATTR_RW(uwb_emulstate);
#endif
-/* --------------------------------------------------------------------- */
-
-static struct driver_attribute *tpacpi_driver_attributes[] = {
- &driver_attr_debug_level, &driver_attr_version,
- &driver_attr_interface_version,
-};
-
-static int __init tpacpi_create_driver_attributes(struct device_driver *drv)
-{
- int i, res;
-
- i = 0;
- res = 0;
- while (!res && i < ARRAY_SIZE(tpacpi_driver_attributes)) {
- res = driver_create_file(drv, tpacpi_driver_attributes[i]);
- i++;
- }
-
-#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
- if (!res && dbg_wlswemul)
- res = driver_create_file(drv, &driver_attr_wlsw_emulstate);
- if (!res && dbg_bluetoothemul)
- res = driver_create_file(drv, &driver_attr_bluetooth_emulstate);
- if (!res && dbg_wwanemul)
- res = driver_create_file(drv, &driver_attr_wwan_emulstate);
- if (!res && dbg_uwbemul)
- res = driver_create_file(drv, &driver_attr_uwb_emulstate);
-#endif
-
- return res;
-}
-
-static void tpacpi_remove_driver_attributes(struct device_driver *drv)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tpacpi_driver_attributes); i++)
- driver_remove_file(drv, tpacpi_driver_attributes[i]);
-
-#ifdef THINKPAD_ACPI_DEBUGFACILITIES
- driver_remove_file(drv, &driver_attr_wlsw_emulstate);
- driver_remove_file(drv, &driver_attr_bluetooth_emulstate);
- driver_remove_file(drv, &driver_attr_wwan_emulstate);
- driver_remove_file(drv, &driver_attr_uwb_emulstate);
-#endif
-}
-
/*************************************************************************
* Firmware Data
*/
@@ -2999,7 +2938,14 @@ static struct attribute *adaptive_kbd_attributes[] = {
NULL
};
+static umode_t hadaptive_kbd_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ return tp_features.has_adaptive_kbd ? attr->mode : 0;
+}
+
static const struct attribute_group adaptive_kbd_attr_group = {
+ .is_visible = hadaptive_kbd_attr_is_visible,
.attrs = adaptive_kbd_attributes,
};
@@ -3096,8 +3042,6 @@ static void hotkey_exit(void)
hotkey_poll_stop_sync();
mutex_unlock(&hotkey_mutex);
#endif
- sysfs_remove_group(&tpacpi_pdev->dev.kobj, &hotkey_attr_group);
-
dbg_printk(TPACPI_DBG_EXIT | TPACPI_DBG_HKEY,
"restoring original HKEY status and mask\n");
/* yes, there is a bitwise or below, we want the
@@ -3436,7 +3380,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
str_supported(tp_features.hotkey));
if (!tp_features.hotkey)
- return 1;
+ return -ENODEV;
quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable,
ARRAY_SIZE(tpacpi_hotkey_qtable));
@@ -3492,14 +3436,8 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
*/
if (acpi_evalf(hkey_handle, &hotkey_adaptive_all_mask,
"MHKA", "dd", 2)) {
- if (hotkey_adaptive_all_mask != 0) {
+ if (hotkey_adaptive_all_mask != 0)
tp_features.has_adaptive_kbd = true;
- res = sysfs_create_group(
- &tpacpi_pdev->dev.kobj,
- &adaptive_kbd_attr_group);
- if (res)
- goto err_exit;
- }
} else {
tp_features.has_adaptive_kbd = false;
hotkey_adaptive_all_mask = 0x0U;
@@ -3529,7 +3467,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
* the first hotkey_mask_get to return hotkey_orig_mask */
res = hotkey_mask_get();
if (res)
- goto err_exit;
+ return res;
hotkey_orig_mask = hotkey_acpi_mask;
} else {
@@ -3553,9 +3491,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
}
tabletsw_state = hotkey_init_tablet_mode();
- res = sysfs_create_group(&tpacpi_pdev->dev.kobj, &hotkey_attr_group);
- if (res)
- goto err_exit;
/* Set up key map */
keymap_id = tpacpi_check_quirks(tpacpi_keymap_qtable,
@@ -3568,8 +3503,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
TPACPI_HOTKEY_MAP_SIZE, GFP_KERNEL);
if (!hotkey_keycode_map) {
pr_err("failed to allocate memory for key map\n");
- res = -ENOMEM;
- goto err_exit;
+ return -ENOMEM;
}
input_set_capability(tpacpi_inputdev, EV_MSC, MSC_SCAN);
@@ -3650,12 +3584,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
hotkey_poll_setup_safe(true);
return 0;
-
-err_exit:
- sysfs_remove_group(&tpacpi_pdev->dev.kobj, &hotkey_attr_group);
- sysfs_remove_group(&tpacpi_pdev->dev.kobj, &adaptive_kbd_attr_group);
-
- return (res < 0) ? res : 1;
}
/* Thinkpad X1 Carbon support 5 modes including Home mode, Web browser
@@ -3788,6 +3716,30 @@ static bool adaptive_keyboard_hotkey_notify_hotkey(unsigned int scancode)
}
}
+static bool hotkey_notify_extended_hotkey(const u32 hkey)
+{
+ unsigned int scancode;
+
+ switch (hkey) {
+ case TP_HKEY_EV_PRIVACYGUARD_TOGGLE:
+ tpacpi_driver_event(hkey);
+ return true;
+ }
+
+ /* Extended keycodes start at 0x300 and our offset into the map
+ * TP_ACPI_HOTKEYSCAN_EXTENDED_START. The calculated scancode
+ * will be positive, but might not be in the correct range.
+ */
+ scancode = (hkey & 0xfff) - (0x300 - TP_ACPI_HOTKEYSCAN_EXTENDED_START);
+ if (scancode >= TP_ACPI_HOTKEYSCAN_EXTENDED_START &&
+ scancode < TPACPI_HOTKEY_MAP_LEN) {
+ tpacpi_input_send_key(scancode);
+ return true;
+ }
+
+ return false;
+}
+
static bool hotkey_notify_hotkey(const u32 hkey,
bool *send_acpi_ev,
bool *ignore_acpi_ev)
@@ -3822,17 +3774,7 @@ static bool hotkey_notify_hotkey(const u32 hkey,
return adaptive_keyboard_hotkey_notify_hotkey(scancode);
case 3:
- /* Extended keycodes start at 0x300 and our offset into the map
- * TP_ACPI_HOTKEYSCAN_EXTENDED_START. The calculated scancode
- * will be positive, but might not be in the correct range.
- */
- scancode -= (0x300 - TP_ACPI_HOTKEYSCAN_EXTENDED_START);
- if (scancode >= TP_ACPI_HOTKEYSCAN_EXTENDED_START &&
- scancode < TPACPI_HOTKEY_MAP_LEN) {
- tpacpi_input_send_key(scancode);
- return true;
- }
- break;
+ return hotkey_notify_extended_hotkey(hkey);
}
return false;
@@ -4386,7 +4328,14 @@ static struct attribute *bluetooth_attributes[] = {
NULL
};
+static umode_t bluetooth_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ return tp_features.bluetooth ? attr->mode : 0;
+}
+
static const struct attribute_group bluetooth_attr_group = {
+ .is_visible = bluetooth_attr_is_visible,
.attrs = bluetooth_attributes,
};
@@ -4408,11 +4357,7 @@ static void bluetooth_shutdown(void)
static void bluetooth_exit(void)
{
- sysfs_remove_group(&tpacpi_pdev->dev.kobj,
- &bluetooth_attr_group);
-
tpacpi_destroy_rfkill(TPACPI_RFK_BLUETOOTH_SW_ID);
-
bluetooth_shutdown();
}
@@ -4519,24 +4464,14 @@ static int __init bluetooth_init(struct ibm_init_struct *iibm)
}
if (!tp_features.bluetooth)
- return 1;
+ return -ENODEV;
res = tpacpi_new_rfkill(TPACPI_RFK_BLUETOOTH_SW_ID,
&bluetooth_tprfk_ops,
RFKILL_TYPE_BLUETOOTH,
TPACPI_RFK_BLUETOOTH_SW_NAME,
true);
- if (res)
- return res;
-
- res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
- &bluetooth_attr_group);
- if (res) {
- tpacpi_destroy_rfkill(TPACPI_RFK_BLUETOOTH_SW_ID);
- return res;
- }
-
- return 0;
+ return res;
}
/* procfs -------------------------------------------------------------- */
@@ -4643,7 +4578,14 @@ static struct attribute *wan_attributes[] = {
NULL
};
+static umode_t wan_attr_is_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
+{
+ return tp_features.wan ? attr->mode : 0;
+}
+
static const struct attribute_group wan_attr_group = {
+ .is_visible = wan_attr_is_visible,
.attrs = wan_attributes,
};
@@ -4665,11 +4607,7 @@ static void wan_shutdown(void)
static void wan_exit(void)
{
- sysfs_remove_group(&tpacpi_pdev->dev.kobj,
- &wan_attr_group);
-
tpacpi_destroy_rfkill(TPACPI_RFK_WWAN_SW_ID);
-
wan_shutdown();
}
@@ -4706,25 +4644,14 @@ static int __init wan_init(struct ibm_init_struct *iibm)
}
if (!tp_features.wan)
- return 1;
+ return -ENODEV;
res = tpacpi_new_rfkill(TPACPI_RFK_WWAN_SW_ID,
&wan_tprfk_ops,
RFKILL_TYPE_WWAN,
TPACPI_RFK_WWAN_SW_NAME,
true);
- if (res)
- return res;
-
- res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
- &wan_attr_group);
-
- if (res) {
- tpacpi_destroy_rfkill(TPACPI_RFK_WWAN_SW_ID);
- return res;
- }
-
- return 0;
+ return res;
}
/* procfs -------------------------------------------------------------- */
@@ -4846,7 +4773,7 @@ static int __init uwb_init(struct ibm_init_struct *iibm)
}
if (!tp_features.uwb)
- return 1;
+ return -ENODEV;
res = tpacpi_new_rfkill(TPACPI_RFK_UWB_SW_ID,
&uwb_tprfk_ops,
@@ -4939,7 +4866,7 @@ static int __init video_init(struct ibm_init_struct *iibm)
str_supported(video_supported != TPACPI_VIDEO_NONE),
video_supported);
- return (video_supported != TPACPI_VIDEO_NONE) ? 0 : 1;
+ return (video_supported != TPACPI_VIDEO_NONE) ? 0 : -ENODEV;
}
static void video_exit(void)
@@ -5347,7 +5274,7 @@ static int __init kbdlight_init(struct ibm_init_struct *iibm)
if (!kbdlight_is_supported()) {
tp_features.kbdlight = 0;
vdbg_printk(TPACPI_DBG_INIT, "kbdlight is unsupported\n");
- return 1;
+ return -ENODEV;
}
kbdlight_brightness = kbdlight_sysfs_get(NULL);
@@ -5537,7 +5464,7 @@ static int __init light_init(struct ibm_init_struct *iibm)
str_supported(tp_features.light_status));
if (!tp_features.light)
- return 1;
+ return -ENODEV;
rc = led_classdev_register(&tpacpi_pdev->dev,
&tpacpi_led_thinklight.led_classdev);
@@ -5625,30 +5552,35 @@ static ssize_t cmos_command_store(struct device *dev,
static DEVICE_ATTR_WO(cmos_command);
+static struct attribute *cmos_attributes[] = {
+ &dev_attr_cmos_command.attr,
+ NULL
+};
+
+static umode_t cmos_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ return cmos_handle ? attr->mode : 0;
+}
+
+static const struct attribute_group cmos_attr_group = {
+ .is_visible = cmos_attr_is_visible,
+ .attrs = cmos_attributes,
+};
+
/* --------------------------------------------------------------------- */
static int __init cmos_init(struct ibm_init_struct *iibm)
{
- int res;
-
vdbg_printk(TPACPI_DBG_INIT,
- "initializing cmos commands subdriver\n");
+ "initializing cmos commands subdriver\n");
TPACPI_ACPIHANDLE_INIT(cmos);
vdbg_printk(TPACPI_DBG_INIT, "cmos commands are %s\n",
- str_supported(cmos_handle != NULL));
-
- res = device_create_file(&tpacpi_pdev->dev, &dev_attr_cmos_command);
- if (res)
- return res;
-
- return (cmos_handle) ? 0 : 1;
-}
+ str_supported(cmos_handle != NULL));
-static void cmos_exit(void)
-{
- device_remove_file(&tpacpi_pdev->dev, &dev_attr_cmos_command);
+ return cmos_handle ? 0 : -ENODEV;
}
static int cmos_read(struct seq_file *m)
@@ -5689,7 +5621,6 @@ static struct ibm_struct cmos_driver_data = {
.name = "cmos",
.read = cmos_read,
.write = cmos_write,
- .exit = cmos_exit,
};
/*************************************************************************
@@ -5894,6 +5825,7 @@ static int __init tpacpi_init_led(unsigned int led)
tpacpi_leds[led].led_classdev.brightness_get = &led_sysfs_get;
tpacpi_leds[led].led_classdev.name = tpacpi_led_names[led];
+ tpacpi_leds[led].led_classdev.flags = LED_RETAIN_AT_SHUTDOWN;
tpacpi_leds[led].led = led;
return led_classdev_register(&tpacpi_pdev->dev, &tpacpi_leds[led].led_classdev);
@@ -5994,7 +5926,7 @@ static int __init led_init(struct ibm_init_struct *iibm)
str_supported(led_supported), led_supported);
if (led_supported == TPACPI_LED_NONE)
- return 1;
+ return -ENODEV;
tpacpi_leds = kcalloc(TPACPI_LED_NUMLEDS, sizeof(*tpacpi_leds),
GFP_KERNEL);
@@ -6123,7 +6055,7 @@ static int __init beep_init(struct ibm_init_struct *iibm)
tp_features.beep_needs_two_args = !!(quirks & TPACPI_BEEP_Q1);
- return (beep_handle) ? 0 : 1;
+ return (beep_handle) ? 0 : -ENODEV;
}
static int beep_read(struct seq_file *m)
@@ -6200,7 +6132,6 @@ struct ibm_thermal_sensors_struct {
};
static enum thermal_access_mode thermal_read_mode;
-static const struct attribute_group *thermal_attr_group;
static bool thermal_use_labels;
/* idx is zero-based */
@@ -6354,14 +6285,6 @@ static struct sensor_device_attribute sensor_dev_attr_thermal_temp_input[] = {
&sensor_dev_attr_thermal_temp_input[X].dev_attr.attr
static struct attribute *thermal_temp_input_attr[] = {
- THERMAL_ATTRS(8),
- THERMAL_ATTRS(9),
- THERMAL_ATTRS(10),
- THERMAL_ATTRS(11),
- THERMAL_ATTRS(12),
- THERMAL_ATTRS(13),
- THERMAL_ATTRS(14),
- THERMAL_ATTRS(15),
THERMAL_ATTRS(0),
THERMAL_ATTRS(1),
THERMAL_ATTRS(2),
@@ -6370,15 +6293,37 @@ static struct attribute *thermal_temp_input_attr[] = {
THERMAL_ATTRS(5),
THERMAL_ATTRS(6),
THERMAL_ATTRS(7),
+ THERMAL_ATTRS(8),
+ THERMAL_ATTRS(9),
+ THERMAL_ATTRS(10),
+ THERMAL_ATTRS(11),
+ THERMAL_ATTRS(12),
+ THERMAL_ATTRS(13),
+ THERMAL_ATTRS(14),
+ THERMAL_ATTRS(15),
NULL
};
-static const struct attribute_group thermal_temp_input16_group = {
- .attrs = thermal_temp_input_attr
-};
+static umode_t thermal_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ if (thermal_read_mode == TPACPI_THERMAL_NONE)
+ return 0;
+
+ if (attr == THERMAL_ATTRS(8) || attr == THERMAL_ATTRS(9) ||
+ attr == THERMAL_ATTRS(10) || attr == THERMAL_ATTRS(11) ||
+ attr == THERMAL_ATTRS(12) || attr == THERMAL_ATTRS(13) ||
+ attr == THERMAL_ATTRS(14) || attr == THERMAL_ATTRS(15)) {
+ if (thermal_read_mode != TPACPI_THERMAL_TPEC_16)
+ return 0;
+ }
+
+ return attr->mode;
+}
-static const struct attribute_group thermal_temp_input8_group = {
- .attrs = &thermal_temp_input_attr[8]
+static const struct attribute_group thermal_attr_group = {
+ .is_visible = thermal_attr_is_visible,
+ .attrs = thermal_temp_input_attr,
};
#undef THERMAL_SENSOR_ATTR_TEMP
@@ -6402,7 +6347,14 @@ static struct attribute *temp_label_attributes[] = {
NULL
};
+static umode_t temp_label_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ return thermal_use_labels ? attr->mode : 0;
+}
+
static const struct attribute_group temp_label_attr_group = {
+ .is_visible = temp_label_attr_is_visible,
.attrs = temp_label_attributes,
};
@@ -6413,7 +6365,6 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
u8 t, ta1, ta2, ver = 0;
int i;
int acpi_tmp7;
- int res;
vdbg_printk(TPACPI_DBG_INIT, "initializing thermal subdriver\n");
@@ -6488,42 +6439,7 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
str_supported(thermal_read_mode != TPACPI_THERMAL_NONE),
thermal_read_mode);
- switch (thermal_read_mode) {
- case TPACPI_THERMAL_TPEC_16:
- thermal_attr_group = &thermal_temp_input16_group;
- break;
- case TPACPI_THERMAL_TPEC_8:
- case TPACPI_THERMAL_ACPI_TMP07:
- case TPACPI_THERMAL_ACPI_UPDT:
- thermal_attr_group = &thermal_temp_input8_group;
- break;
- case TPACPI_THERMAL_NONE:
- default:
- return 1;
- }
-
- res = sysfs_create_group(&tpacpi_hwmon->kobj, thermal_attr_group);
- if (res)
- return res;
-
- if (thermal_use_labels) {
- res = sysfs_create_group(&tpacpi_hwmon->kobj, &temp_label_attr_group);
- if (res) {
- sysfs_remove_group(&tpacpi_hwmon->kobj, thermal_attr_group);
- return res;
- }
- }
-
- return 0;
-}
-
-static void thermal_exit(void)
-{
- if (thermal_attr_group)
- sysfs_remove_group(&tpacpi_hwmon->kobj, thermal_attr_group);
-
- if (thermal_use_labels)
- sysfs_remove_group(&tpacpi_hwmon->kobj, &temp_label_attr_group);
+ return thermal_read_mode != TPACPI_THERMAL_NONE ? 0 : -ENODEV;
}
static int thermal_read(struct seq_file *m)
@@ -6550,7 +6466,6 @@ static int thermal_read(struct seq_file *m)
static struct ibm_struct thermal_driver_data = {
.name = "thermal",
.read = thermal_read,
- .exit = thermal_exit,
};
/*************************************************************************
@@ -6935,25 +6850,25 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
/* if it is unknown, we don't handle it: it wouldn't be safe */
if (tp_features.bright_unkfw)
- return 1;
+ return -ENODEV;
if (!brightness_enable) {
dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
"brightness support disabled by module parameter\n");
- return 1;
+ return -ENODEV;
}
if (acpi_video_get_backlight_type() != acpi_backlight_vendor) {
if (brightness_enable > 1) {
pr_info("Standard ACPI backlight interface available, not loading native one\n");
- return 1;
+ return -ENODEV;
} else if (brightness_enable == 1) {
pr_warn("Cannot enable backlight brightness support, ACPI is already handling it. Refer to the acpi_backlight kernel parameter.\n");
- return 1;
+ return -ENODEV;
}
} else if (!tp_features.bright_acpimode) {
pr_notice("ACPI backlight interface not available\n");
- return 1;
+ return -ENODEV;
}
pr_notice("ACPI native brightness control enabled\n");
@@ -6986,7 +6901,7 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
return -EINVAL;
if (tpacpi_brightness_get_raw(&b) < 0)
- return 1;
+ return -ENODEV;
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_PLATFORM;
@@ -7576,7 +7491,7 @@ static int __init volume_create_alsa_mixer(void)
sizeof(struct tpacpi_alsa_data), &card);
if (rc < 0 || !card) {
pr_err("Failed to create ALSA card structures: %d\n", rc);
- return 1;
+ return -ENODEV;
}
BUG_ON(!card->private_data);
@@ -7635,7 +7550,7 @@ static int __init volume_create_alsa_mixer(void)
err_exit:
snd_card_free(card);
- return 1;
+ return -ENODEV;
}
#define TPACPI_VOL_Q_MUTEONLY 0x0001 /* Mute-only control available */
@@ -7684,7 +7599,7 @@ static int __init volume_init(struct ibm_init_struct *iibm)
if (volume_mode == TPACPI_VOL_MODE_UCMS_STEP) {
pr_err("UCMS step volume mode not implemented, please contact %s\n",
TPACPI_MAIL);
- return 1;
+ return -ENODEV;
}
if (volume_capabilities >= TPACPI_VOL_CAP_MAX)
@@ -7697,7 +7612,7 @@ static int __init volume_init(struct ibm_init_struct *iibm)
if (!alsa_enable) {
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
"ALSA mixer disabled by parameter, not loading volume subdriver...\n");
- return 1;
+ return -ENODEV;
}
quirks = tpacpi_check_quirks(volume_quirk_table,
@@ -7710,7 +7625,7 @@ static int __init volume_init(struct ibm_init_struct *iibm)
else if (quirks & TPACPI_VOL_Q_LEVEL)
tp_features.mixer_no_level_control = 0;
else
- return 1; /* no mixer */
+ return -ENODEV; /* no mixer */
break;
case TPACPI_VOL_CAP_VOLMUTE:
tp_features.mixer_no_level_control = 0;
@@ -7719,7 +7634,7 @@ static int __init volume_init(struct ibm_init_struct *iibm)
tp_features.mixer_no_level_control = 1;
break;
default:
- return 1;
+ return -ENODEV;
}
if (volume_capabilities != TPACPI_VOL_CAP_AUTO)
@@ -7891,7 +7806,7 @@ static int __init volume_init(struct ibm_init_struct *iibm)
{
pr_info("volume: disabled as there is no ALSA support in this kernel\n");
- return 1;
+ return -ENODEV;
}
static struct ibm_struct volume_driver_data = {
@@ -8725,17 +8640,45 @@ static ssize_t fan_watchdog_store(struct device_driver *drv, const char *buf,
static DRIVER_ATTR_RW(fan_watchdog);
/* --------------------------------------------------------------------- */
+
static struct attribute *fan_attributes[] = {
- &dev_attr_pwm1_enable.attr, &dev_attr_pwm1.attr,
+ &dev_attr_pwm1_enable.attr,
+ &dev_attr_pwm1.attr,
&dev_attr_fan1_input.attr,
- NULL, /* for fan2_input */
+ &dev_attr_fan2_input.attr,
NULL
};
+static umode_t fan_attr_is_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
+{
+ if (fan_status_access_mode == TPACPI_FAN_NONE &&
+ fan_control_access_mode == TPACPI_FAN_WR_NONE)
+ return 0;
+
+ if (attr == &dev_attr_fan2_input.attr) {
+ if (!tp_features.second_fan)
+ return 0;
+ }
+
+ return attr->mode;
+}
+
static const struct attribute_group fan_attr_group = {
+ .is_visible = fan_attr_is_visible,
.attrs = fan_attributes,
};
+static struct attribute *fan_driver_attributes[] = {
+ &driver_attr_fan_watchdog.attr,
+ NULL
+};
+
+static const struct attribute_group fan_driver_attr_group = {
+ .is_visible = fan_attr_is_visible,
+ .attrs = fan_driver_attributes,
+};
+
#define TPACPI_FAN_Q1 0x0001 /* Unitialized HFSP */
#define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */
#define TPACPI_FAN_2CTL 0x0004 /* selects fan2 control */
@@ -8763,7 +8706,6 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
static int __init fan_init(struct ibm_init_struct *iibm)
{
- int rc;
unsigned long quirks;
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_FAN,
@@ -8810,7 +8752,7 @@ static int __init fan_init(struct ibm_init_struct *iibm)
}
} else {
pr_err("ThinkPad ACPI EC access misbehaving, fan status and control unavailable\n");
- return 1;
+ return -ENODEV;
}
}
@@ -8859,28 +8801,11 @@ static int __init fan_init(struct ibm_init_struct *iibm)
if (fan_status_access_mode != TPACPI_FAN_NONE)
fan_get_status_safe(NULL);
- if (fan_status_access_mode != TPACPI_FAN_NONE ||
- fan_control_access_mode != TPACPI_FAN_WR_NONE) {
- if (tp_features.second_fan) {
- /* attach second fan tachometer */
- fan_attributes[ARRAY_SIZE(fan_attributes)-2] =
- &dev_attr_fan2_input.attr;
- }
- rc = sysfs_create_group(&tpacpi_hwmon->kobj,
- &fan_attr_group);
- if (rc < 0)
- return rc;
+ if (fan_status_access_mode == TPACPI_FAN_NONE &&
+ fan_control_access_mode == TPACPI_FAN_WR_NONE)
+ return -ENODEV;
- rc = driver_create_file(&tpacpi_hwmon_pdriver.driver,
- &driver_attr_fan_watchdog);
- if (rc < 0) {
- sysfs_remove_group(&tpacpi_hwmon->kobj,
- &fan_attr_group);
- return rc;
- }
- return 0;
- } else
- return 1;
+ return 0;
}
static void fan_exit(void)
@@ -8888,11 +8813,6 @@ static void fan_exit(void)
vdbg_printk(TPACPI_DBG_EXIT | TPACPI_DBG_FAN,
"cancelling any pending fan watchdog tasks\n");
- /* FIXME: can we really do this unconditionally? */
- sysfs_remove_group(&tpacpi_hwmon->kobj, &fan_attr_group);
- driver_remove_file(&tpacpi_hwmon_pdriver.driver,
- &driver_attr_fan_watchdog);
-
cancel_delayed_work(&fan_watchdog_task);
flush_workqueue(tpacpi_wq);
}
@@ -9310,6 +9230,10 @@ static struct ibm_struct mute_led_driver_data = {
#define SET_START "BCCS"
#define GET_STOP "BCSG"
#define SET_STOP "BCSS"
+#define GET_DISCHARGE "BDSG"
+#define SET_DISCHARGE "BDSS"
+#define GET_INHIBIT "BICG"
+#define SET_INHIBIT "BICS"
enum {
BAT_ANY = 0,
@@ -9326,6 +9250,8 @@ enum {
/* This is used in the get/set helpers */
THRESHOLD_START,
THRESHOLD_STOP,
+ FORCE_DISCHARGE,
+ INHIBIT_CHARGE,
};
struct tpacpi_battery_data {
@@ -9333,6 +9259,7 @@ struct tpacpi_battery_data {
int start_support;
int charge_stop;
int stop_support;
+ unsigned int charge_behaviours;
};
struct tpacpi_battery_driver_data {
@@ -9390,6 +9317,18 @@ static int tpacpi_battery_get(int what, int battery, int *ret)
if (*ret == 0)
*ret = 100;
return 0;
+ case FORCE_DISCHARGE:
+ if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_DISCHARGE, ret, battery))
+ return -ENODEV;
+ /* The force discharge status is in bit 0 */
+ *ret = *ret & 0x01;
+ return 0;
+ case INHIBIT_CHARGE:
+ if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_INHIBIT, ret, battery))
+ return -ENODEV;
+ /* The inhibit charge status is in bit 0 */
+ *ret = *ret & 0x01;
+ return 0;
default:
pr_crit("wrong parameter: %d", what);
return -EINVAL;
@@ -9418,12 +9357,65 @@ static int tpacpi_battery_set(int what, int battery, int value)
return -ENODEV;
}
return 0;
+ case FORCE_DISCHARGE:
+ /* Force discharge is in bit 0,
+ * break on AC attach is in bit 1 (won't work on some ThinkPads),
+ * battery ID is in bits 8-9, 2 bits.
+ */
+ if (ACPI_FAILURE(tpacpi_battery_acpi_eval(SET_DISCHARGE, &ret, param))) {
+ pr_err("failed to set force discharge on %d", battery);
+ return -ENODEV;
+ }
+ return 0;
+ case INHIBIT_CHARGE:
+ /* When setting inhibit charge, we set a default value of
+ * always breaking on AC detach and the effective time is set to
+ * be permanent.
+ * The battery ID is in bits 4-5, 2 bits,
+ * the effective time is in bits 8-23, 2 bytes.
+ * A time of FFFF indicates forever.
+ */
+ param = value;
+ param |= battery << 4;
+ param |= 0xFFFF << 8;
+ if (ACPI_FAILURE(tpacpi_battery_acpi_eval(SET_INHIBIT, &ret, param))) {
+ pr_err("failed to set inhibit charge on %d", battery);
+ return -ENODEV;
+ }
+ return 0;
default:
pr_crit("wrong parameter: %d", what);
return -EINVAL;
}
}
+static int tpacpi_battery_set_validate(int what, int battery, int value)
+{
+ int ret, v;
+
+ ret = tpacpi_battery_set(what, battery, value);
+ if (ret < 0)
+ return ret;
+
+ ret = tpacpi_battery_get(what, battery, &v);
+ if (ret < 0)
+ return ret;
+
+ if (v == value)
+ return 0;
+
+ msleep(500);
+
+ ret = tpacpi_battery_get(what, battery, &v);
+ if (ret < 0)
+ return ret;
+
+ if (v == value)
+ return 0;
+
+ return -EIO;
+}
+
static int tpacpi_battery_probe(int battery)
{
int ret = 0;
@@ -9436,6 +9428,10 @@ static int tpacpi_battery_probe(int battery)
* 2) Check for support
* 3) Get the current stop threshold
* 4) Check for support
+ * 5) Get the current force discharge status
+ * 6) Check for support
+ * 7) Get the current inhibit charge status
+ * 8) Check for support
*/
if (acpi_has_method(hkey_handle, GET_START)) {
if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_START, &ret, battery)) {
@@ -9472,10 +9468,35 @@ static int tpacpi_battery_probe(int battery)
return -ENODEV;
}
}
- pr_info("battery %d registered (start %d, stop %d)",
- battery,
- battery_info.batteries[battery].charge_start,
- battery_info.batteries[battery].charge_stop);
+ if (acpi_has_method(hkey_handle, GET_DISCHARGE)) {
+ if (ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_DISCHARGE, &ret, battery))) {
+ pr_err("Error probing battery discharge; %d\n", battery);
+ return -ENODEV;
+ }
+ /* Support is marked in bit 8 */
+ if (ret & BIT(8))
+ battery_info.batteries[battery].charge_behaviours |=
+ BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE);
+ }
+ if (acpi_has_method(hkey_handle, GET_INHIBIT)) {
+ if (ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_INHIBIT, &ret, battery))) {
+ pr_err("Error probing battery inhibit charge; %d\n", battery);
+ return -ENODEV;
+ }
+ /* Support is marked in bit 5 */
+ if (ret & BIT(5))
+ battery_info.batteries[battery].charge_behaviours |=
+ BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE);
+ }
+
+ battery_info.batteries[battery].charge_behaviours |=
+ BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO);
+
+ pr_info("battery %d registered (start %d, stop %d, behaviours: 0x%x)\n",
+ battery,
+ battery_info.batteries[battery].charge_start,
+ battery_info.batteries[battery].charge_stop,
+ battery_info.batteries[battery].charge_behaviours);
return 0;
}
@@ -9610,6 +9631,40 @@ static ssize_t charge_control_end_threshold_show(struct device *device,
return tpacpi_battery_show(THRESHOLD_STOP, device, buf);
}
+static ssize_t charge_behaviour_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ enum power_supply_charge_behaviour active = POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO;
+ struct power_supply *supply = to_power_supply(dev);
+ unsigned int available;
+ int ret, battery;
+
+ battery = tpacpi_battery_get_id(supply->desc->name);
+ available = battery_info.batteries[battery].charge_behaviours;
+
+ if (available & BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE)) {
+ if (tpacpi_battery_get(FORCE_DISCHARGE, battery, &ret))
+ return -ENODEV;
+ if (ret) {
+ active = POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE;
+ goto out;
+ }
+ }
+
+ if (available & BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE)) {
+ if (tpacpi_battery_get(INHIBIT_CHARGE, battery, &ret))
+ return -ENODEV;
+ if (ret) {
+ active = POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE;
+ goto out;
+ }
+ }
+
+out:
+ return power_supply_charge_behaviour_show(dev, available, active, buf);
+}
+
static ssize_t charge_control_start_threshold_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -9624,8 +9679,55 @@ static ssize_t charge_control_end_threshold_store(struct device *dev,
return tpacpi_battery_store(THRESHOLD_STOP, dev, buf, count);
}
+static ssize_t charge_behaviour_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct power_supply *supply = to_power_supply(dev);
+ int selected, battery, ret = 0;
+ unsigned int available;
+
+ battery = tpacpi_battery_get_id(supply->desc->name);
+ available = battery_info.batteries[battery].charge_behaviours;
+ selected = power_supply_charge_behaviour_parse(available, buf);
+
+ if (selected < 0)
+ return selected;
+
+ switch (selected) {
+ case POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO:
+ if (available & BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE))
+ ret = tpacpi_battery_set_validate(FORCE_DISCHARGE, battery, 0);
+ if (available & BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE))
+ ret = min(ret, tpacpi_battery_set_validate(INHIBIT_CHARGE, battery, 0));
+ if (ret < 0)
+ return ret;
+ break;
+ case POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE:
+ if (available & BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE))
+ ret = tpacpi_battery_set_validate(INHIBIT_CHARGE, battery, 0);
+ ret = min(ret, tpacpi_battery_set_validate(FORCE_DISCHARGE, battery, 1));
+ if (ret < 0)
+ return ret;
+ break;
+ case POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE:
+ if (available & BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE))
+ ret = tpacpi_battery_set_validate(FORCE_DISCHARGE, battery, 0);
+ ret = min(ret, tpacpi_battery_set_validate(INHIBIT_CHARGE, battery, 1));
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ dev_err(dev, "Unexpected charge behaviour: %d\n", selected);
+ return -EINVAL;
+ }
+
+ return count;
+}
+
static DEVICE_ATTR_RW(charge_control_start_threshold);
static DEVICE_ATTR_RW(charge_control_end_threshold);
+static DEVICE_ATTR_RW(charge_behaviour);
static struct device_attribute dev_attr_charge_start_threshold = __ATTR(
charge_start_threshold,
0644,
@@ -9644,6 +9746,7 @@ static struct attribute *tpacpi_battery_attrs[] = {
&dev_attr_charge_control_end_threshold.attr,
&dev_attr_charge_start_threshold.attr,
&dev_attr_charge_stop_threshold.attr,
+ &dev_attr_charge_behaviour.attr,
NULL,
};
@@ -9715,69 +9818,85 @@ static struct ibm_struct battery_driver_data = {
* LCD Shadow subdriver, for the Lenovo PrivacyGuard feature
*/
-static int lcdshadow_state;
+static struct drm_privacy_screen *lcdshadow_dev;
+static acpi_handle lcdshadow_get_handle;
+static acpi_handle lcdshadow_set_handle;
-static int lcdshadow_on_off(bool state)
+static int lcdshadow_set_sw_state(struct drm_privacy_screen *priv,
+ enum drm_privacy_screen_status state)
{
- acpi_handle set_shadow_handle;
int output;
- if (ACPI_FAILURE(acpi_get_handle(hkey_handle, "SSSS", &set_shadow_handle))) {
- pr_warn("Thinkpad ACPI has no %s interface.\n", "SSSS");
+ if (WARN_ON(!mutex_is_locked(&priv->lock)))
return -EIO;
- }
- if (!acpi_evalf(set_shadow_handle, &output, NULL, "dd", (int)state))
+ if (!acpi_evalf(lcdshadow_set_handle, &output, NULL, "dd", (int)state))
return -EIO;
- lcdshadow_state = state;
+ priv->hw_state = priv->sw_state = state;
return 0;
}
-static int lcdshadow_set(bool on)
+static void lcdshadow_get_hw_state(struct drm_privacy_screen *priv)
{
- if (lcdshadow_state < 0)
- return lcdshadow_state;
- if (lcdshadow_state == on)
- return 0;
- return lcdshadow_on_off(on);
+ int output;
+
+ if (!acpi_evalf(lcdshadow_get_handle, &output, NULL, "dd", 0))
+ return;
+
+ priv->hw_state = priv->sw_state = output & 0x1;
}
+static const struct drm_privacy_screen_ops lcdshadow_ops = {
+ .set_sw_state = lcdshadow_set_sw_state,
+ .get_hw_state = lcdshadow_get_hw_state,
+};
+
static int tpacpi_lcdshadow_init(struct ibm_init_struct *iibm)
{
- acpi_handle get_shadow_handle;
+ acpi_status status1, status2;
int output;
- if (ACPI_FAILURE(acpi_get_handle(hkey_handle, "GSSS", &get_shadow_handle))) {
- lcdshadow_state = -ENODEV;
+ status1 = acpi_get_handle(hkey_handle, "GSSS", &lcdshadow_get_handle);
+ status2 = acpi_get_handle(hkey_handle, "SSSS", &lcdshadow_set_handle);
+ if (ACPI_FAILURE(status1) || ACPI_FAILURE(status2))
return 0;
- }
- if (!acpi_evalf(get_shadow_handle, &output, NULL, "dd", 0)) {
- lcdshadow_state = -EIO;
+ if (!acpi_evalf(lcdshadow_get_handle, &output, NULL, "dd", 0))
return -EIO;
- }
- if (!(output & 0x10000)) {
- lcdshadow_state = -ENODEV;
+
+ if (!(output & 0x10000))
return 0;
- }
- lcdshadow_state = output & 0x1;
+
+ lcdshadow_dev = drm_privacy_screen_register(&tpacpi_pdev->dev,
+ &lcdshadow_ops);
+ if (IS_ERR(lcdshadow_dev))
+ return PTR_ERR(lcdshadow_dev);
return 0;
}
+static void lcdshadow_exit(void)
+{
+ drm_privacy_screen_unregister(lcdshadow_dev);
+}
+
static void lcdshadow_resume(void)
{
- if (lcdshadow_state >= 0)
- lcdshadow_on_off(lcdshadow_state);
+ if (!lcdshadow_dev)
+ return;
+
+ mutex_lock(&lcdshadow_dev->lock);
+ lcdshadow_set_sw_state(lcdshadow_dev, lcdshadow_dev->sw_state);
+ mutex_unlock(&lcdshadow_dev->lock);
}
static int lcdshadow_read(struct seq_file *m)
{
- if (lcdshadow_state < 0) {
+ if (!lcdshadow_dev) {
seq_puts(m, "status:\t\tnot supported\n");
} else {
- seq_printf(m, "status:\t\t%d\n", lcdshadow_state);
+ seq_printf(m, "status:\t\t%d\n", lcdshadow_dev->hw_state);
seq_puts(m, "commands:\t0, 1\n");
}
@@ -9789,7 +9908,7 @@ static int lcdshadow_write(char *buf)
char *cmd;
int res, state = -EINVAL;
- if (lcdshadow_state < 0)
+ if (!lcdshadow_dev)
return -ENODEV;
while ((cmd = strsep(&buf, ","))) {
@@ -9801,11 +9920,18 @@ static int lcdshadow_write(char *buf)
if (state >= 2 || state < 0)
return -EINVAL;
- return lcdshadow_set(state);
+ mutex_lock(&lcdshadow_dev->lock);
+ res = lcdshadow_set_sw_state(lcdshadow_dev, state);
+ mutex_unlock(&lcdshadow_dev->lock);
+
+ drm_privacy_screen_call_notifier_chain(lcdshadow_dev);
+
+ return res;
}
static struct ibm_struct lcdshadow_driver_data = {
.name = "lcdshadow",
+ .exit = lcdshadow_exit,
.resume = lcdshadow_resume,
.read = lcdshadow_read,
.write = lcdshadow_write,
@@ -9845,33 +9971,6 @@ static int dytc_command(int command, int *output)
return 0;
}
-static int dytc_get_version(void)
-{
- int err, output;
-
- /* Check if we've been called before - and just return cached value */
- if (dytc_version)
- return dytc_version;
-
- /* Otherwise query DYTC and extract version information */
- err = dytc_command(DYTC_CMD_QUERY, &output);
- /*
- * If support isn't available (ENODEV) then don't return an error
- * and don't create the sysfs group
- */
- if (err == -ENODEV)
- return 0;
- /* For all other errors we can flag the failure */
- if (err)
- return err;
-
- /* Check DYTC is enabled and supports mode setting */
- if (output & BIT(DYTC_QUERY_ENABLE_BIT))
- dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF;
-
- return 0;
-}
-
static int lapsensor_get(bool *present, bool *state)
{
int output, err;
@@ -9954,59 +10053,55 @@ static ssize_t palmsensor_show(struct device *dev,
}
static DEVICE_ATTR_RO(palmsensor);
+static struct attribute *proxsensor_attributes[] = {
+ &dev_attr_dytc_lapmode.attr,
+ &dev_attr_palmsensor.attr,
+ NULL
+};
+
+static umode_t proxsensor_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ if (attr == &dev_attr_dytc_lapmode.attr) {
+ /*
+ * Platforms before DYTC version 5 claim to have a lap sensor,
+ * but it doesn't work, so we ignore them.
+ */
+ if (!has_lapsensor || dytc_version < 5)
+ return 0;
+ } else if (attr == &dev_attr_palmsensor.attr) {
+ if (!has_palmsensor)
+ return 0;
+ }
+
+ return attr->mode;
+}
+
+static const struct attribute_group proxsensor_attr_group = {
+ .is_visible = proxsensor_attr_is_visible,
+ .attrs = proxsensor_attributes,
+};
+
static int tpacpi_proxsensor_init(struct ibm_init_struct *iibm)
{
- int palm_err, lap_err, err;
+ int palm_err, lap_err;
palm_err = palmsensor_get(&has_palmsensor, &palm_state);
lap_err = lapsensor_get(&has_lapsensor, &lap_state);
- /*
- * If support isn't available (ENODEV) for both devices then quit, but
- * don't return an error.
- */
+ /* If support isn't available for both devices return -ENODEV */
if ((palm_err == -ENODEV) && (lap_err == -ENODEV))
- return 0;
+ return -ENODEV;
/* Otherwise, if there was an error return it */
if (palm_err && (palm_err != -ENODEV))
return palm_err;
if (lap_err && (lap_err != -ENODEV))
return lap_err;
- if (has_palmsensor) {
- err = sysfs_create_file(&tpacpi_pdev->dev.kobj, &dev_attr_palmsensor.attr);
- if (err)
- return err;
- }
-
- /* Check if we know the DYTC version, if we don't then get it */
- if (!dytc_version) {
- err = dytc_get_version();
- if (err)
- return err;
- }
- /*
- * Platforms before DYTC version 5 claim to have a lap sensor, but it doesn't work, so we
- * ignore them
- */
- if (has_lapsensor && (dytc_version >= 5)) {
- err = sysfs_create_file(&tpacpi_pdev->dev.kobj, &dev_attr_dytc_lapmode.attr);
- if (err)
- return err;
- }
return 0;
}
-static void proxsensor_exit(void)
-{
- if (has_lapsensor)
- sysfs_remove_file(&tpacpi_pdev->dev.kobj, &dev_attr_dytc_lapmode.attr);
- if (has_palmsensor)
- sysfs_remove_file(&tpacpi_pdev->dev.kobj, &dev_attr_palmsensor.attr);
-}
-
static struct ibm_struct proxsensor_driver_data = {
.name = "proximity-sensor",
- .exit = proxsensor_exit,
};
/*************************************************************************
@@ -10045,7 +10140,6 @@ static struct ibm_struct proxsensor_driver_data = {
#define DYTC_ENABLE_CQL DYTC_SET_COMMAND(DYTC_FUNCTION_CQL, DYTC_MODE_BALANCE, 1)
-static bool dytc_profile_available;
static enum platform_profile_option dytc_current_profile;
static atomic_t dytc_ignore_event = ATOMIC_INIT(0);
static DEFINE_MUTEX(dytc_mutex);
@@ -10149,9 +10243,6 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
int output;
int err;
- if (!dytc_profile_available)
- return -ENODEV;
-
err = mutex_lock_interruptible(&dytc_mutex);
if (err)
return err;
@@ -10222,60 +10313,47 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
set_bit(PLATFORM_PROFILE_BALANCED, dytc_profile.choices);
set_bit(PLATFORM_PROFILE_PERFORMANCE, dytc_profile.choices);
- dytc_profile_available = false;
err = dytc_command(DYTC_CMD_QUERY, &output);
- /*
- * If support isn't available (ENODEV) then don't return an error
- * and don't create the sysfs group
- */
- if (err == -ENODEV)
- return 0;
- /* For all other errors we can flag the failure */
if (err)
return err;
- /* Check if we know the DYTC version, if we don't then get it */
- if (!dytc_version) {
- err = dytc_get_version();
- if (err)
- return err;
- }
+ if (output & BIT(DYTC_QUERY_ENABLE_BIT))
+ dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF;
+
/* Check DYTC is enabled and supports mode setting */
- if (dytc_version >= 5) {
- dbg_printk(TPACPI_DBG_INIT,
- "DYTC version %d: thermal mode available\n", dytc_version);
- /*
- * Check if MMC_GET functionality available
- * Version > 6 and return success from MMC_GET command
- */
- dytc_mmc_get_available = false;
- if (dytc_version >= 6) {
- err = dytc_command(DYTC_CMD_MMC_GET, &output);
- if (!err && ((output & DYTC_ERR_MASK) == DYTC_ERR_SUCCESS))
- dytc_mmc_get_available = true;
- }
- /* Create platform_profile structure and register */
- err = platform_profile_register(&dytc_profile);
- /*
- * If for some reason platform_profiles aren't enabled
- * don't quit terminally.
- */
- if (err)
- return 0;
+ if (dytc_version < 5)
+ return -ENODEV;
- dytc_profile_available = true;
- /* Ensure initial values are correct */
- dytc_profile_refresh();
+ dbg_printk(TPACPI_DBG_INIT,
+ "DYTC version %d: thermal mode available\n", dytc_version);
+ /*
+ * Check if MMC_GET functionality available
+ * Version > 6 and return success from MMC_GET command
+ */
+ dytc_mmc_get_available = false;
+ if (dytc_version >= 6) {
+ err = dytc_command(DYTC_CMD_MMC_GET, &output);
+ if (!err && ((output & DYTC_ERR_MASK) == DYTC_ERR_SUCCESS))
+ dytc_mmc_get_available = true;
}
+ /* Create platform_profile structure and register */
+ err = platform_profile_register(&dytc_profile);
+ /*
+ * If for some reason platform_profiles aren't enabled
+ * don't quit terminally.
+ */
+ if (err)
+ return -ENODEV;
+
+ /* Ensure initial values are correct */
+ dytc_profile_refresh();
+
return 0;
}
static void dytc_profile_exit(void)
{
- if (dytc_profile_available) {
- dytc_profile_available = false;
- platform_profile_remove();
- }
+ platform_profile_remove();
}
static struct ibm_struct dytc_profile_driver_data = {
@@ -10423,7 +10501,14 @@ static struct attribute *kbdlang_attributes[] = {
NULL
};
+static umode_t kbdlang_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ return tp_features.kbd_lang ? attr->mode : 0;
+}
+
static const struct attribute_group kbdlang_attr_group = {
+ .is_visible = kbdlang_attr_is_visible,
.attrs = kbdlang_attributes,
};
@@ -10432,28 +10517,12 @@ static int tpacpi_kbdlang_init(struct ibm_init_struct *iibm)
int err, output;
err = get_keyboard_lang(&output);
- /*
- * If support isn't available (ENODEV) then don't return an error
- * just don't create the sysfs group.
- */
- if (err == -ENODEV)
- return 0;
-
- if (err)
- return err;
-
- /* Platform supports this feature - create the sysfs file */
- return sysfs_create_group(&tpacpi_pdev->dev.kobj, &kbdlang_attr_group);
-}
-
-static void kbdlang_exit(void)
-{
- sysfs_remove_group(&tpacpi_pdev->dev.kobj, &kbdlang_attr_group);
+ tp_features.kbd_lang = !err;
+ return err;
}
static struct ibm_struct kbdlang_driver_data = {
.name = "kbdlang",
- .exit = kbdlang_exit,
};
/*************************************************************************
@@ -10524,41 +10593,134 @@ static ssize_t wwan_antenna_type_show(struct device *dev,
}
static DEVICE_ATTR_RO(wwan_antenna_type);
-static int tpacpi_dprc_init(struct ibm_init_struct *iibm)
+static struct attribute *dprc_attributes[] = {
+ &dev_attr_wwan_antenna_type.attr,
+ NULL
+};
+
+static umode_t dprc_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
{
- int wwanantenna_err, err;
+ return has_antennatype ? attr->mode : 0;
+}
- wwanantenna_err = get_wwan_antenna(&wwan_antennatype);
- /*
- * If support isn't available (ENODEV) then quit, but don't
- * return an error.
- */
- if (wwanantenna_err == -ENODEV)
- return 0;
+static const struct attribute_group dprc_attr_group = {
+ .is_visible = dprc_attr_is_visible,
+ .attrs = dprc_attributes,
+};
+
+static int tpacpi_dprc_init(struct ibm_init_struct *iibm)
+{
+ int err;
- /* if there was an error return it */
- if (wwanantenna_err && (wwanantenna_err != -ENODEV))
- return wwanantenna_err;
- else if (!wwanantenna_err)
- has_antennatype = true;
+ err = get_wwan_antenna(&wwan_antennatype);
+ if (err)
+ return err;
- if (has_antennatype) {
- err = sysfs_create_file(&tpacpi_pdev->dev.kobj, &dev_attr_wwan_antenna_type.attr);
- if (err)
- return err;
- }
+ has_antennatype = true;
return 0;
}
-static void dprc_exit(void)
+static struct ibm_struct dprc_driver_data = {
+ .name = "dprc",
+};
+
+/* --------------------------------------------------------------------- */
+
+static struct attribute *tpacpi_driver_attributes[] = {
+ &driver_attr_debug_level.attr,
+ &driver_attr_version.attr,
+ &driver_attr_interface_version.attr,
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+ &driver_attr_wlsw_emulstate.attr,
+ &driver_attr_bluetooth_emulstate.attr,
+ &driver_attr_wwan_emulstate.attr,
+ &driver_attr_uwb_emulstate.attr,
+#endif
+ NULL
+};
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+static umode_t tpacpi_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
{
- if (has_antennatype)
- sysfs_remove_file(&tpacpi_pdev->dev.kobj, &dev_attr_wwan_antenna_type.attr);
+ if (attr == &driver_attr_wlsw_emulstate.attr) {
+ if (!dbg_wlswemul)
+ return 0;
+ } else if (attr == &driver_attr_bluetooth_emulstate.attr) {
+ if (!dbg_bluetoothemul)
+ return 0;
+ } else if (attr == &driver_attr_wwan_emulstate.attr) {
+ if (!dbg_wwanemul)
+ return 0;
+ } else if (attr == &driver_attr_uwb_emulstate.attr) {
+ if (!dbg_uwbemul)
+ return 0;
+ }
+
+ return attr->mode;
}
+#endif
-static struct ibm_struct dprc_driver_data = {
- .name = "dprc",
- .exit = dprc_exit,
+static const struct attribute_group tpacpi_driver_attr_group = {
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+ .is_visible = tpacpi_attr_is_visible,
+#endif
+ .attrs = tpacpi_driver_attributes,
+};
+
+static const struct attribute_group *tpacpi_driver_groups[] = {
+ &tpacpi_driver_attr_group,
+ NULL,
+};
+
+static const struct attribute_group *tpacpi_groups[] = {
+ &adaptive_kbd_attr_group,
+ &hotkey_attr_group,
+ &bluetooth_attr_group,
+ &wan_attr_group,
+ &cmos_attr_group,
+ &proxsensor_attr_group,
+ &kbdlang_attr_group,
+ &dprc_attr_group,
+ NULL,
+};
+
+static const struct attribute_group *tpacpi_hwmon_groups[] = {
+ &thermal_attr_group,
+ &temp_label_attr_group,
+ &fan_attr_group,
+ NULL,
+};
+
+static const struct attribute_group *tpacpi_hwmon_driver_groups[] = {
+ &fan_driver_attr_group,
+ NULL,
+};
+
+/****************************************************************************
+ ****************************************************************************
+ *
+ * Platform drivers
+ *
+ ****************************************************************************
+ ****************************************************************************/
+
+static struct platform_driver tpacpi_pdriver = {
+ .driver = {
+ .name = TPACPI_DRVR_NAME,
+ .pm = &tpacpi_pm,
+ .groups = tpacpi_driver_groups,
+ .dev_groups = tpacpi_groups,
+ },
+ .shutdown = tpacpi_shutdown_handler,
+};
+
+static struct platform_driver tpacpi_hwmon_pdriver = {
+ .driver = {
+ .name = TPACPI_HWMON_DRVR_NAME,
+ .groups = tpacpi_hwmon_driver_groups,
+ },
};
/****************************************************************************
@@ -10615,6 +10777,20 @@ static void tpacpi_driver_event(const unsigned int hkey_event)
if (!atomic_add_unless(&dytc_ignore_event, -1, 0))
dytc_profile_refresh();
}
+
+ if (lcdshadow_dev && hkey_event == TP_HKEY_EV_PRIVACYGUARD_TOGGLE) {
+ enum drm_privacy_screen_status old_hw_state;
+ bool changed;
+
+ mutex_lock(&lcdshadow_dev->lock);
+ old_hw_state = lcdshadow_dev->hw_state;
+ lcdshadow_get_hw_state(lcdshadow_dev);
+ changed = lcdshadow_dev->hw_state != old_hw_state;
+ mutex_unlock(&lcdshadow_dev->lock);
+
+ if (changed)
+ drm_privacy_screen_call_notifier_chain(lcdshadow_dev);
+ }
}
static void hotkey_driver_event(const unsigned int scancode)
@@ -10701,8 +10877,8 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
if (iibm->init) {
ret = iibm->init(iibm);
- if (ret > 0)
- return 0; /* probe failed */
+ if (ret > 0 || ret == -ENODEV)
+ return 0; /* subdriver functionality not available */
if (ret)
return ret;
@@ -11081,8 +11257,6 @@ static int __init set_ibm_param(const char *val, const struct kernel_param *kp)
for (i = 0; i < ARRAY_SIZE(ibms_init); i++) {
ibm = ibms_init[i].data;
- WARN_ON(ibm == NULL);
-
if (!ibm || !ibm->name)
continue;
@@ -11194,6 +11368,13 @@ static void thinkpad_acpi_module_exit(void)
tpacpi_lifecycle = TPACPI_LIFE_EXITING;
+ if (tpacpi_hwmon)
+ hwmon_device_unregister(tpacpi_hwmon);
+ if (tp_features.sensors_pdrv_registered)
+ platform_driver_unregister(&tpacpi_hwmon_pdriver);
+ if (tp_features.platform_drv_registered)
+ platform_driver_unregister(&tpacpi_pdriver);
+
list_for_each_entry_safe_reverse(ibm, itmp,
&tpacpi_all_drivers,
all_drivers) {
@@ -11210,28 +11391,12 @@ static void thinkpad_acpi_module_exit(void)
kfree(hotkey_keycode_map);
}
- if (tpacpi_hwmon)
- hwmon_device_unregister(tpacpi_hwmon);
-
if (tpacpi_sensors_pdev)
platform_device_unregister(tpacpi_sensors_pdev);
if (tpacpi_pdev)
platform_device_unregister(tpacpi_pdev);
-
- if (tp_features.sensors_pdrv_attrs_registered)
- tpacpi_remove_driver_attributes(&tpacpi_hwmon_pdriver.driver);
- if (tp_features.platform_drv_attrs_registered)
- tpacpi_remove_driver_attributes(&tpacpi_pdriver.driver);
-
- if (tp_features.sensors_pdrv_registered)
- platform_driver_unregister(&tpacpi_hwmon_pdriver);
-
- if (tp_features.platform_drv_registered)
- platform_driver_unregister(&tpacpi_pdriver);
-
if (proc_dir)
remove_proc_entry(TPACPI_PROC_DIR, acpi_root_dir);
-
if (tpacpi_wq)
destroy_workqueue(tpacpi_wq);
@@ -11283,36 +11448,6 @@ static int __init thinkpad_acpi_module_init(void)
return -ENODEV;
}
- ret = platform_driver_register(&tpacpi_pdriver);
- if (ret) {
- pr_err("unable to register main platform driver\n");
- thinkpad_acpi_module_exit();
- return ret;
- }
- tp_features.platform_drv_registered = 1;
-
- ret = platform_driver_register(&tpacpi_hwmon_pdriver);
- if (ret) {
- pr_err("unable to register hwmon platform driver\n");
- thinkpad_acpi_module_exit();
- return ret;
- }
- tp_features.sensors_pdrv_registered = 1;
-
- ret = tpacpi_create_driver_attributes(&tpacpi_pdriver.driver);
- if (!ret) {
- tp_features.platform_drv_attrs_registered = 1;
- ret = tpacpi_create_driver_attributes(
- &tpacpi_hwmon_pdriver.driver);
- }
- if (ret) {
- pr_err("unable to create sysfs driver attributes\n");
- thinkpad_acpi_module_exit();
- return ret;
- }
- tp_features.sensors_pdrv_attrs_registered = 1;
-
-
/* Device initialization */
tpacpi_pdev = platform_device_register_simple(TPACPI_DRVR_NAME, -1,
NULL, 0);
@@ -11333,17 +11468,7 @@ static int __init thinkpad_acpi_module_init(void)
thinkpad_acpi_module_exit();
return ret;
}
- tp_features.sensors_pdev_attrs_registered = 1;
- tpacpi_hwmon = hwmon_device_register_with_groups(
- &tpacpi_sensors_pdev->dev, TPACPI_NAME, NULL, NULL);
- if (IS_ERR(tpacpi_hwmon)) {
- ret = PTR_ERR(tpacpi_hwmon);
- tpacpi_hwmon = NULL;
- pr_err("unable to register hwmon device\n");
- thinkpad_acpi_module_exit();
- return ret;
- }
mutex_init(&tpacpi_inputdev_send_mutex);
tpacpi_inputdev = input_allocate_device();
if (!tpacpi_inputdev) {
@@ -11376,6 +11501,32 @@ static int __init thinkpad_acpi_module_init(void)
tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
+ ret = platform_driver_register(&tpacpi_pdriver);
+ if (ret) {
+ pr_err("unable to register main platform driver\n");
+ thinkpad_acpi_module_exit();
+ return ret;
+ }
+ tp_features.platform_drv_registered = 1;
+
+ ret = platform_driver_register(&tpacpi_hwmon_pdriver);
+ if (ret) {
+ pr_err("unable to register hwmon platform driver\n");
+ thinkpad_acpi_module_exit();
+ return ret;
+ }
+ tp_features.sensors_pdrv_registered = 1;
+
+ tpacpi_hwmon = hwmon_device_register_with_groups(
+ &tpacpi_sensors_pdev->dev, TPACPI_NAME, NULL, tpacpi_hwmon_groups);
+ if (IS_ERR(tpacpi_hwmon)) {
+ ret = PTR_ERR(tpacpi_hwmon);
+ tpacpi_hwmon = NULL;
+ pr_err("unable to register hwmon device\n");
+ thinkpad_acpi_module_exit();
+ return ret;
+ }
+
ret = input_register_device(tpacpi_inputdev);
if (ret < 0) {
pr_err("unable to register input device\n");
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 352508d30467..f113dec98e21 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1368,7 +1368,7 @@ static int lcd_proc_show(struct seq_file *m, void *v)
static int lcd_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, lcd_proc_show, PDE_DATA(inode));
+ return single_open(file, lcd_proc_show, pde_data(inode));
}
static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
@@ -1404,7 +1404,7 @@ static int set_lcd_status(struct backlight_device *bd)
static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
+ struct toshiba_acpi_dev *dev = pde_data(file_inode(file));
char cmd[42];
size_t len;
int levels;
@@ -1469,13 +1469,13 @@ static int video_proc_show(struct seq_file *m, void *v)
static int video_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, video_proc_show, PDE_DATA(inode));
+ return single_open(file, video_proc_show, pde_data(inode));
}
static ssize_t video_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
+ struct toshiba_acpi_dev *dev = pde_data(file_inode(file));
char *buffer;
char *cmd;
int lcd_out = -1, crt_out = -1, tv_out = -1;
@@ -1580,13 +1580,13 @@ static int fan_proc_show(struct seq_file *m, void *v)
static int fan_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, fan_proc_show, PDE_DATA(inode));
+ return single_open(file, fan_proc_show, pde_data(inode));
}
static ssize_t fan_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
+ struct toshiba_acpi_dev *dev = pde_data(file_inode(file));
char cmd[42];
size_t len;
int value;
@@ -1628,13 +1628,13 @@ static int keys_proc_show(struct seq_file *m, void *v)
static int keys_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, keys_proc_show, PDE_DATA(inode));
+ return single_open(file, keys_proc_show, pde_data(inode));
}
static ssize_t keys_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
+ struct toshiba_acpi_dev *dev = pde_data(file_inode(file));
char cmd[42];
size_t len;
int value;
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 17dd54d4b783..494f23052678 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -107,6 +107,9 @@ static const struct property_entry chuwi_hi10_plus_props[] = {
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10plus.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
+ PROPERTY_ENTRY_BOOL("silead,pen-supported"),
+ PROPERTY_ENTRY_U32("silead,pen-resolution-x", 8),
+ PROPERTY_ENTRY_U32("silead,pen-resolution-y", 8),
{ }
};
@@ -124,15 +127,21 @@ static const struct ts_dmi_data chuwi_hi10_plus_data = {
.properties = chuwi_hi10_plus_props,
};
+static const u32 chuwi_hi10_pro_efi_min_max[] = { 8, 1911, 8, 1271 };
+
static const struct property_entry chuwi_hi10_pro_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
- PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1912),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 1272),
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 80),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 26),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1962),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1254),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10-pro.fw"),
+ PROPERTY_ENTRY_U32_ARRAY("silead,efi-fw-min-max", chuwi_hi10_pro_efi_min_max),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
+ PROPERTY_ENTRY_BOOL("silead,pen-supported"),
+ PROPERTY_ENTRY_U32("silead,pen-resolution-x", 8),
+ PROPERTY_ENTRY_U32("silead,pen-resolution-y", 8),
{ }
};
@@ -352,18 +361,6 @@ static const struct ts_dmi_data gdix1001_01_upside_down_data = {
.properties = gdix1001_upside_down_props,
};
-static const struct property_entry glavey_tm800a550l_props[] = {
- PROPERTY_ENTRY_STRING("firmware-name", "gt912-glavey-tm800a550l.fw"),
- PROPERTY_ENTRY_STRING("goodix,config-name", "gt912-glavey-tm800a550l.cfg"),
- PROPERTY_ENTRY_U32("goodix,main-clk", 54),
- { }
-};
-
-static const struct ts_dmi_data glavey_tm800a550l_data = {
- .acpi_name = "GDIX1001:00",
- .properties = glavey_tm800a550l_props,
-};
-
static const struct property_entry gp_electronic_t701_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
@@ -1140,15 +1137,6 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"),
},
},
- { /* Glavey TM800A550L */
- .driver_data = (void *)&glavey_tm800a550l_data,
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
- DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
- /* Above strings are too generic, also match on BIOS version */
- DMI_MATCH(DMI_BIOS_VERSION, "ZY-8-BI-PX4S70VTR400-X423B-005-D"),
- },
- },
{
/* GP-electronic T701 */
.driver_data = (void *)&gp_electronic_t701_data,
diff --git a/drivers/platform/x86/uv_sysfs.c b/drivers/platform/x86/uv_sysfs.c
index 956a354b57c1..625b0b79d185 100644
--- a/drivers/platform/x86/uv_sysfs.c
+++ b/drivers/platform/x86/uv_sysfs.c
@@ -175,6 +175,7 @@ static struct attribute *uv_hub_attrs[] = {
&cnode_attribute.attr,
NULL,
};
+ATTRIBUTE_GROUPS(uv_hub);
static void hub_release(struct kobject *kobj)
{
@@ -205,7 +206,7 @@ static const struct sysfs_ops hub_sysfs_ops = {
static struct kobj_type hub_attr_type = {
.release = hub_release,
.sysfs_ops = &hub_sysfs_ops,
- .default_attrs = uv_hub_attrs,
+ .default_groups = uv_hub_groups,
};
static int uv_hubs_init(void)
@@ -327,6 +328,7 @@ static struct attribute *uv_port_attrs[] = {
&uv_port_conn_port_attribute.attr,
NULL,
};
+ATTRIBUTE_GROUPS(uv_port);
static void uv_port_release(struct kobject *kobj)
{
@@ -357,7 +359,7 @@ static const struct sysfs_ops uv_port_sysfs_ops = {
static struct kobj_type uv_port_attr_type = {
.release = uv_port_release,
.sysfs_ops = &uv_port_sysfs_ops,
- .default_attrs = uv_port_attrs,
+ .default_groups = uv_port_groups,
};
static int uv_ports_init(void)
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index c34341f4da76..58a23a9adbef 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -57,6 +57,11 @@ static_assert(sizeof(typeof_member(struct guid_block, guid)) == 16);
static_assert(sizeof(struct guid_block) == 20);
static_assert(__alignof__(struct guid_block) == 1);
+enum { /* wmi_block flags */
+ WMI_READ_TAKES_NO_ARGS,
+ WMI_PROBED,
+};
+
struct wmi_block {
struct wmi_device dev;
struct list_head list;
@@ -67,8 +72,7 @@ struct wmi_block {
wmi_notify_handler handler;
void *handler_data;
u64 req_buf_size;
-
- bool read_takes_no_args;
+ unsigned long flags;
};
@@ -367,7 +371,7 @@ static acpi_status __query_block(struct wmi_block *wblock, u8 instance,
wq_params[0].type = ACPI_TYPE_INTEGER;
wq_params[0].integer.value = instance;
- if (instance == 0 && wblock->read_takes_no_args)
+ if (instance == 0 && test_bit(WMI_READ_TAKES_NO_ARGS, &wblock->flags))
input.count = 0;
/*
@@ -1005,6 +1009,7 @@ static int wmi_dev_probe(struct device *dev)
}
}
+ set_bit(WMI_PROBED, &wblock->flags);
return 0;
probe_misc_failure:
@@ -1022,6 +1027,8 @@ static void wmi_dev_remove(struct device *dev)
struct wmi_block *wblock = dev_to_wblock(dev);
struct wmi_driver *wdriver = drv_to_wdrv(dev->driver);
+ clear_bit(WMI_PROBED, &wblock->flags);
+
if (wdriver->filter_callback) {
misc_deregister(&wblock->char_dev);
kfree(wblock->char_dev.name);
@@ -1113,7 +1120,7 @@ static int wmi_create_device(struct device *wmi_bus_dev,
* laptops, WQxx may not be a method at all.)
*/
if (info->type != ACPI_TYPE_METHOD || info->param_count == 0)
- wblock->read_takes_no_args = true;
+ set_bit(WMI_READ_TAKES_NO_ARGS, &wblock->flags);
kfree(info);
@@ -1319,15 +1326,17 @@ static void acpi_wmi_notify_handler(acpi_handle handle, u32 event,
return;
/* If a driver is bound, then notify the driver. */
- if (wblock->dev.dev.driver) {
+ if (test_bit(WMI_PROBED, &wblock->flags) && wblock->dev.dev.driver) {
struct wmi_driver *driver = drv_to_wdrv(wblock->dev.dev.driver);
struct acpi_buffer evdata = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status;
- status = get_event_data(wblock, &evdata);
- if (ACPI_FAILURE(status)) {
- dev_warn(&wblock->dev.dev, "failed to get event data\n");
- return;
+ if (!driver->no_notify_data) {
+ status = get_event_data(wblock, &evdata);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(&wblock->dev.dev, "failed to get event data\n");
+ return;
+ }
}
if (driver->notify)
diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c
new file mode 100644
index 000000000000..3ba63ad91b28
--- /dev/null
+++ b/drivers/platform/x86/x86-android-tablets.c
@@ -0,0 +1,870 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DMI based code to deal with broken DSDTs on X86 tablets which ship with
+ * Android as (part of) the factory image. The factory kernels shipped on these
+ * devices typically have a bunch of things hardcoded, rather than specified
+ * in their DSDT.
+ *
+ * Copyright (C) 2021 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/power/bq24190_charger.h>
+#include <linux/serdev.h>
+#include <linux/string.h>
+/* For gpio_get_desc() which is EXPORT_SYMBOL_GPL() */
+#include "../../gpio/gpiolib.h"
+
+/*
+ * Helper code to get Linux IRQ numbers given a description of the IRQ source
+ * (either IOAPIC index, or GPIO chip name + pin-number).
+ */
+enum x86_acpi_irq_type {
+ X86_ACPI_IRQ_TYPE_NONE,
+ X86_ACPI_IRQ_TYPE_APIC,
+ X86_ACPI_IRQ_TYPE_GPIOINT,
+ X86_ACPI_IRQ_TYPE_PMIC,
+};
+
+struct x86_acpi_irq_data {
+ char *chip; /* GPIO chip label (GPIOINT) or PMIC ACPI path (PMIC) */
+ enum x86_acpi_irq_type type;
+ enum irq_domain_bus_token domain;
+ int index;
+ int trigger; /* ACPI_EDGE_SENSITIVE / ACPI_LEVEL_SENSITIVE */
+ int polarity; /* ACPI_ACTIVE_HIGH / ACPI_ACTIVE_LOW / ACPI_ACTIVE_BOTH */
+};
+
+static int x86_acpi_irq_helper_gpiochip_find(struct gpio_chip *gc, void *data)
+{
+ return gc->label && !strcmp(gc->label, data);
+}
+
+static int x86_acpi_irq_helper_get(const struct x86_acpi_irq_data *data)
+{
+ struct irq_fwspec fwspec = { };
+ struct irq_domain *domain;
+ struct acpi_device *adev;
+ struct gpio_desc *gpiod;
+ struct gpio_chip *chip;
+ unsigned int irq_type;
+ acpi_handle handle;
+ acpi_status status;
+ int irq, ret;
+
+ switch (data->type) {
+ case X86_ACPI_IRQ_TYPE_APIC:
+ irq = acpi_register_gsi(NULL, data->index, data->trigger, data->polarity);
+ if (irq < 0)
+ pr_err("error %d getting APIC IRQ %d\n", irq, data->index);
+
+ return irq;
+ case X86_ACPI_IRQ_TYPE_GPIOINT:
+ /* Like acpi_dev_gpio_irq_get(), but without parsing ACPI resources */
+ chip = gpiochip_find(data->chip, x86_acpi_irq_helper_gpiochip_find);
+ if (!chip) {
+ pr_err("error cannot find GPIO chip %s\n", data->chip);
+ return -ENODEV;
+ }
+
+ gpiod = gpiochip_get_desc(chip, data->index);
+ if (IS_ERR(gpiod)) {
+ ret = PTR_ERR(gpiod);
+ pr_err("error %d getting GPIO %s %d\n", ret, data->chip, data->index);
+ return ret;
+ }
+
+ irq = gpiod_to_irq(gpiod);
+ if (irq < 0) {
+ pr_err("error %d getting IRQ %s %d\n", irq, data->chip, data->index);
+ return irq;
+ }
+
+ irq_type = acpi_dev_get_irq_type(data->trigger, data->polarity);
+ if (irq_type != IRQ_TYPE_NONE && irq_type != irq_get_trigger_type(irq))
+ irq_set_irq_type(irq, irq_type);
+
+ return irq;
+ case X86_ACPI_IRQ_TYPE_PMIC:
+ status = acpi_get_handle(NULL, data->chip, &handle);
+ if (ACPI_FAILURE(status)) {
+ pr_err("error could not get %s handle\n", data->chip);
+ return -ENODEV;
+ }
+
+ acpi_bus_get_device(handle, &adev);
+ if (!adev) {
+ pr_err("error could not get %s adev\n", data->chip);
+ return -ENODEV;
+ }
+
+ fwspec.fwnode = acpi_fwnode_handle(adev);
+ domain = irq_find_matching_fwspec(&fwspec, data->domain);
+ if (!domain) {
+ pr_err("error could not find IRQ domain for %s\n", data->chip);
+ return -ENODEV;
+ }
+
+ return irq_create_mapping(domain, data->index);
+ default:
+ return 0;
+ }
+}
+
+struct x86_i2c_client_info {
+ struct i2c_board_info board_info;
+ char *adapter_path;
+ struct x86_acpi_irq_data irq_data;
+};
+
+struct x86_serdev_info {
+ const char *ctrl_hid;
+ const char *ctrl_uid;
+ const char *ctrl_devname;
+ /*
+ * ATM the serdev core only supports of or ACPI matching; and sofar all
+ * Android x86 tablets DSDTs have usable serdev nodes, but sometimes
+ * under the wrong controller. So we just tie the existing serdev ACPI
+ * node to the right controller.
+ */
+ const char *serdev_hid;
+};
+
+struct x86_dev_info {
+ const char * const *modules;
+ struct gpiod_lookup_table **gpiod_lookup_tables;
+ const struct x86_i2c_client_info *i2c_client_info;
+ const struct platform_device_info *pdev_info;
+ const struct x86_serdev_info *serdev_info;
+ int i2c_client_count;
+ int pdev_count;
+ int serdev_count;
+};
+
+/* Generic / shared bq24190 settings */
+static const char * const bq24190_suppliers[] = { "tusb1210-psy" };
+
+static const struct property_entry bq24190_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq24190_suppliers),
+ PROPERTY_ENTRY_BOOL("omit-battery-class"),
+ PROPERTY_ENTRY_BOOL("disable-reset"),
+ { }
+};
+
+static const struct software_node bq24190_node = {
+ .properties = bq24190_props,
+};
+
+/* For enableing the bq24190 5V boost based on id-pin */
+static struct regulator_consumer_supply intel_int3496_consumer = {
+ .supply = "vbus",
+ .dev_name = "intel-int3496",
+};
+
+static const struct regulator_init_data bq24190_vbus_init_data = {
+ .constraints = {
+ .name = "bq24190_vbus",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .consumer_supplies = &intel_int3496_consumer,
+ .num_consumer_supplies = 1,
+};
+
+static struct bq24190_platform_data bq24190_pdata = {
+ .regulator_init_data = &bq24190_vbus_init_data,
+};
+
+static const char * const bq24190_modules[] __initconst = {
+ "crystal_cove_charger", /* For the bq24190 IRQ */
+ "bq24190_charger", /* For the Vbus regulator for intel-int3496 */
+ NULL
+};
+
+/* Generic pdevs array and gpio-lookups for micro USB ID pin handling */
+static const struct platform_device_info int3496_pdevs[] __initconst = {
+ {
+ /* For micro USB ID pin handling */
+ .name = "intel-int3496",
+ .id = PLATFORM_DEVID_NONE,
+ },
+};
+
+static struct gpiod_lookup_table int3496_gpo2_pin22_gpios = {
+ .dev_id = "intel-int3496",
+ .table = {
+ GPIO_LOOKUP("INT33FC:02", 22, "id", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+/* Asus ME176C tablets have an Android factory img with everything hardcoded */
+static const char * const asus_me176c_accel_mount_matrix[] = {
+ "-1", "0", "0",
+ "0", "1", "0",
+ "0", "0", "1"
+};
+
+static const struct property_entry asus_me176c_accel_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", asus_me176c_accel_mount_matrix),
+ { }
+};
+
+static const struct software_node asus_me176c_accel_node = {
+ .properties = asus_me176c_accel_props,
+};
+
+static const struct x86_i2c_client_info asus_me176c_i2c_clients[] __initconst = {
+ {
+ /* bq24190 battery charger */
+ .board_info = {
+ .type = "bq24190",
+ .addr = 0x6b,
+ .dev_name = "bq24190",
+ .swnode = &bq24190_node,
+ .platform_data = &bq24190_pdata,
+ },
+ .adapter_path = "\\_SB_.I2C1",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_PMIC,
+ .chip = "\\_SB_.I2C7.PMIC",
+ .domain = DOMAIN_BUS_WAKEUP,
+ .index = 0,
+ },
+ }, {
+ /* ug3105 battery monitor */
+ .board_info = {
+ .type = "ug3105",
+ .addr = 0x70,
+ .dev_name = "ug3105",
+ },
+ .adapter_path = "\\_SB_.I2C1",
+ }, {
+ /* ak09911 compass */
+ .board_info = {
+ .type = "ak09911",
+ .addr = 0x0c,
+ .dev_name = "ak09911",
+ },
+ .adapter_path = "\\_SB_.I2C5",
+ }, {
+ /* kxtj21009 accel */
+ .board_info = {
+ .type = "kxtj21009",
+ .addr = 0x0f,
+ .dev_name = "kxtj21009",
+ .swnode = &asus_me176c_accel_node,
+ },
+ .adapter_path = "\\_SB_.I2C5",
+ }, {
+ /* goodix touchscreen */
+ .board_info = {
+ .type = "GDIX1001:00",
+ .addr = 0x14,
+ .dev_name = "goodix_ts",
+ },
+ .adapter_path = "\\_SB_.I2C6",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_APIC,
+ .index = 0x45,
+ .trigger = ACPI_EDGE_SENSITIVE,
+ .polarity = ACPI_ACTIVE_LOW,
+ },
+ },
+};
+
+static const struct x86_serdev_info asus_me176c_serdevs[] __initconst = {
+ {
+ .ctrl_hid = "80860F0A",
+ .ctrl_uid = "2",
+ .ctrl_devname = "serial0",
+ .serdev_hid = "BCM2E3A",
+ },
+};
+
+static struct gpiod_lookup_table asus_me176c_goodix_gpios = {
+ .dev_id = "i2c-goodix_ts",
+ .table = {
+ GPIO_LOOKUP("INT33FC:00", 60, "reset", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:02", 28, "irq", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static struct gpiod_lookup_table *asus_me176c_gpios[] = {
+ &int3496_gpo2_pin22_gpios,
+ &asus_me176c_goodix_gpios,
+ NULL
+};
+
+static const struct x86_dev_info asus_me176c_info __initconst = {
+ .i2c_client_info = asus_me176c_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(asus_me176c_i2c_clients),
+ .pdev_info = int3496_pdevs,
+ .pdev_count = ARRAY_SIZE(int3496_pdevs),
+ .serdev_info = asus_me176c_serdevs,
+ .serdev_count = ARRAY_SIZE(asus_me176c_serdevs),
+ .gpiod_lookup_tables = asus_me176c_gpios,
+ .modules = bq24190_modules,
+};
+
+/* Asus TF103C tablets have an Android factory img with everything hardcoded */
+static const char * const asus_tf103c_accel_mount_matrix[] = {
+ "0", "-1", "0",
+ "-1", "0", "0",
+ "0", "0", "1"
+};
+
+static const struct property_entry asus_tf103c_accel_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", asus_tf103c_accel_mount_matrix),
+ { }
+};
+
+static const struct software_node asus_tf103c_accel_node = {
+ .properties = asus_tf103c_accel_props,
+};
+
+static const struct property_entry asus_tf103c_touchscreen_props[] = {
+ PROPERTY_ENTRY_STRING("compatible", "atmel,atmel_mxt_ts"),
+ { }
+};
+
+static const struct software_node asus_tf103c_touchscreen_node = {
+ .properties = asus_tf103c_touchscreen_props,
+};
+
+static const struct x86_i2c_client_info asus_tf103c_i2c_clients[] __initconst = {
+ {
+ /* bq24190 battery charger */
+ .board_info = {
+ .type = "bq24190",
+ .addr = 0x6b,
+ .dev_name = "bq24190",
+ .swnode = &bq24190_node,
+ .platform_data = &bq24190_pdata,
+ },
+ .adapter_path = "\\_SB_.I2C1",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_PMIC,
+ .chip = "\\_SB_.I2C7.PMIC",
+ .domain = DOMAIN_BUS_WAKEUP,
+ .index = 0,
+ },
+ }, {
+ /* ug3105 battery monitor */
+ .board_info = {
+ .type = "ug3105",
+ .addr = 0x70,
+ .dev_name = "ug3105",
+ },
+ .adapter_path = "\\_SB_.I2C1",
+ }, {
+ /* ak09911 compass */
+ .board_info = {
+ .type = "ak09911",
+ .addr = 0x0c,
+ .dev_name = "ak09911",
+ },
+ .adapter_path = "\\_SB_.I2C5",
+ }, {
+ /* kxtj21009 accel */
+ .board_info = {
+ .type = "kxtj21009",
+ .addr = 0x0f,
+ .dev_name = "kxtj21009",
+ .swnode = &asus_tf103c_accel_node,
+ },
+ .adapter_path = "\\_SB_.I2C5",
+ }, {
+ /* atmel touchscreen */
+ .board_info = {
+ .type = "atmel_mxt_ts",
+ .addr = 0x4a,
+ .dev_name = "atmel_mxt_ts",
+ .swnode = &asus_tf103c_touchscreen_node,
+ },
+ .adapter_path = "\\_SB_.I2C6",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_GPIOINT,
+ .chip = "INT33FC:02",
+ .index = 28,
+ .trigger = ACPI_EDGE_SENSITIVE,
+ .polarity = ACPI_ACTIVE_LOW,
+ },
+ },
+};
+
+static struct gpiod_lookup_table *asus_tf103c_gpios[] = {
+ &int3496_gpo2_pin22_gpios,
+ NULL
+};
+
+static const struct x86_dev_info asus_tf103c_info __initconst = {
+ .i2c_client_info = asus_tf103c_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(asus_tf103c_i2c_clients),
+ .pdev_info = int3496_pdevs,
+ .pdev_count = ARRAY_SIZE(int3496_pdevs),
+ .gpiod_lookup_tables = asus_tf103c_gpios,
+ .modules = bq24190_modules,
+};
+
+/*
+ * When booted with the BIOS set to Android mode the Chuwi Hi8 (CWI509) DSDT
+ * contains a whole bunch of bogus ACPI I2C devices and is missing entries
+ * for the touchscreen and the accelerometer.
+ */
+static const struct property_entry chuwi_hi8_gsl1680_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1665),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi8.fw"),
+ { }
+};
+
+static const struct software_node chuwi_hi8_gsl1680_node = {
+ .properties = chuwi_hi8_gsl1680_props,
+};
+
+static const char * const chuwi_hi8_mount_matrix[] = {
+ "1", "0", "0",
+ "0", "-1", "0",
+ "0", "0", "1"
+};
+
+static const struct property_entry chuwi_hi8_bma250e_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", chuwi_hi8_mount_matrix),
+ { }
+};
+
+static const struct software_node chuwi_hi8_bma250e_node = {
+ .properties = chuwi_hi8_bma250e_props,
+};
+
+static const struct x86_i2c_client_info chuwi_hi8_i2c_clients[] __initconst = {
+ {
+ /* Silead touchscreen */
+ .board_info = {
+ .type = "gsl1680",
+ .addr = 0x40,
+ .swnode = &chuwi_hi8_gsl1680_node,
+ },
+ .adapter_path = "\\_SB_.I2C4",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_APIC,
+ .index = 0x44,
+ .trigger = ACPI_EDGE_SENSITIVE,
+ .polarity = ACPI_ACTIVE_HIGH,
+ },
+ }, {
+ /* BMA250E accelerometer */
+ .board_info = {
+ .type = "bma250e",
+ .addr = 0x18,
+ .swnode = &chuwi_hi8_bma250e_node,
+ },
+ .adapter_path = "\\_SB_.I2C3",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_GPIOINT,
+ .chip = "INT33FC:02",
+ .index = 23,
+ .trigger = ACPI_LEVEL_SENSITIVE,
+ .polarity = ACPI_ACTIVE_HIGH,
+ },
+ },
+};
+
+static const struct x86_dev_info chuwi_hi8_info __initconst = {
+ .i2c_client_info = chuwi_hi8_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(chuwi_hi8_i2c_clients),
+};
+
+/*
+ * Whitelabel (sold as various brands) TM800A550L tablets.
+ * These tablet's DSDT contains a whole bunch of bogus ACPI I2C devices
+ * (removed through acpi_quirk_skip_i2c_client_enumeration()) and
+ * the touchscreen fwnode has the wrong GPIOs.
+ */
+static const char * const whitelabel_tm800a550l_accel_mount_matrix[] = {
+ "-1", "0", "0",
+ "0", "1", "0",
+ "0", "0", "1"
+};
+
+static const struct property_entry whitelabel_tm800a550l_accel_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", whitelabel_tm800a550l_accel_mount_matrix),
+ { }
+};
+
+static const struct software_node whitelabel_tm800a550l_accel_node = {
+ .properties = whitelabel_tm800a550l_accel_props,
+};
+
+static const struct property_entry whitelabel_tm800a550l_goodix_props[] = {
+ PROPERTY_ENTRY_STRING("firmware-name", "gt912-tm800a550l.fw"),
+ PROPERTY_ENTRY_STRING("goodix,config-name", "gt912-tm800a550l.cfg"),
+ PROPERTY_ENTRY_U32("goodix,main-clk", 54),
+ { }
+};
+
+static const struct software_node whitelabel_tm800a550l_goodix_node = {
+ .properties = whitelabel_tm800a550l_goodix_props,
+};
+
+static const struct x86_i2c_client_info whitelabel_tm800a550l_i2c_clients[] __initconst = {
+ {
+ /* goodix touchscreen */
+ .board_info = {
+ .type = "GDIX1001:00",
+ .addr = 0x14,
+ .dev_name = "goodix_ts",
+ .swnode = &whitelabel_tm800a550l_goodix_node,
+ },
+ .adapter_path = "\\_SB_.I2C2",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_APIC,
+ .index = 0x44,
+ .trigger = ACPI_EDGE_SENSITIVE,
+ .polarity = ACPI_ACTIVE_HIGH,
+ },
+ }, {
+ /* kxcj91008 accel */
+ .board_info = {
+ .type = "kxcj91008",
+ .addr = 0x0f,
+ .dev_name = "kxcj91008",
+ .swnode = &whitelabel_tm800a550l_accel_node,
+ },
+ .adapter_path = "\\_SB_.I2C3",
+ },
+};
+
+static struct gpiod_lookup_table whitelabel_tm800a550l_goodix_gpios = {
+ .dev_id = "i2c-goodix_ts",
+ .table = {
+ GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:02", 3, "irq", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static struct gpiod_lookup_table *whitelabel_tm800a550l_gpios[] = {
+ &whitelabel_tm800a550l_goodix_gpios,
+ NULL
+};
+
+static const struct x86_dev_info whitelabel_tm800a550l_info __initconst = {
+ .i2c_client_info = whitelabel_tm800a550l_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(whitelabel_tm800a550l_i2c_clients),
+ .gpiod_lookup_tables = whitelabel_tm800a550l_gpios,
+};
+
+/*
+ * If the EFI bootloader is not Xiaomi's own signed Android loader, then the
+ * Xiaomi Mi Pad 2 X86 tablet sets OSID in the DSDT to 1 (Windows), causing
+ * a bunch of devices to be hidden.
+ *
+ * This takes care of instantiating the hidden devices manually.
+ */
+static const char * const bq27520_suppliers[] = { "bq25890-charger" };
+
+static const struct property_entry bq27520_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq27520_suppliers),
+ { }
+};
+
+static const struct software_node bq27520_node = {
+ .properties = bq27520_props,
+};
+
+static const struct x86_i2c_client_info xiaomi_mipad2_i2c_clients[] __initconst = {
+ {
+ /* BQ27520 fuel-gauge */
+ .board_info = {
+ .type = "bq27520",
+ .addr = 0x55,
+ .dev_name = "bq27520",
+ .swnode = &bq27520_node,
+ },
+ .adapter_path = "\\_SB_.PCI0.I2C1",
+ }, {
+ /* KTD2026 RGB notification LED controller */
+ .board_info = {
+ .type = "ktd2026",
+ .addr = 0x30,
+ .dev_name = "ktd2026",
+ },
+ .adapter_path = "\\_SB_.PCI0.I2C3",
+ },
+};
+
+static const struct x86_dev_info xiaomi_mipad2_info __initconst = {
+ .i2c_client_info = xiaomi_mipad2_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(xiaomi_mipad2_i2c_clients),
+};
+
+static const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
+ {
+ /* Asus MeMO Pad 7 ME176C */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ME176C"),
+ },
+ .driver_data = (void *)&asus_me176c_info,
+ },
+ {
+ /* Asus TF103C */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
+ },
+ .driver_data = (void *)&asus_tf103c_info,
+ },
+ {
+ /* Chuwi Hi8 (CWI509) */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_BOARD_NAME, "BYT-PA03C"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ilife"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "S806"),
+ },
+ .driver_data = (void *)&chuwi_hi8_info,
+ },
+ {
+ /* Whitelabel (sold as various brands) TM800A550L */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ /* Above strings are too generic, also match on BIOS version */
+ DMI_MATCH(DMI_BIOS_VERSION, "ZY-8-BI-PX4S70VTR400-X423B-005-D"),
+ },
+ .driver_data = (void *)&whitelabel_tm800a550l_info,
+ },
+ {
+ /* Xiaomi Mi Pad 2 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
+ },
+ .driver_data = (void *)&xiaomi_mipad2_info,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, x86_android_tablet_ids);
+
+static int i2c_client_count;
+static int pdev_count;
+static int serdev_count;
+static struct i2c_client **i2c_clients;
+static struct platform_device **pdevs;
+static struct serdev_device **serdevs;
+static struct gpiod_lookup_table **gpiod_lookup_tables;
+
+static __init int x86_instantiate_i2c_client(const struct x86_dev_info *dev_info,
+ int idx)
+{
+ const struct x86_i2c_client_info *client_info = &dev_info->i2c_client_info[idx];
+ struct i2c_board_info board_info = client_info->board_info;
+ struct i2c_adapter *adap;
+ acpi_handle handle;
+ acpi_status status;
+
+ board_info.irq = x86_acpi_irq_helper_get(&client_info->irq_data);
+ if (board_info.irq < 0)
+ return board_info.irq;
+
+ status = acpi_get_handle(NULL, client_info->adapter_path, &handle);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Error could not get %s handle\n", client_info->adapter_path);
+ return -ENODEV;
+ }
+
+ adap = i2c_acpi_find_adapter_by_handle(handle);
+ if (!adap) {
+ pr_err("error could not get %s adapter\n", client_info->adapter_path);
+ return -ENODEV;
+ }
+
+ i2c_clients[idx] = i2c_new_client_device(adap, &board_info);
+ put_device(&adap->dev);
+ if (IS_ERR(i2c_clients[idx]))
+ return dev_err_probe(&adap->dev, PTR_ERR(i2c_clients[idx]),
+ "creating I2C-client %d\n", idx);
+
+ return 0;
+}
+
+static __init int x86_instantiate_serdev(const struct x86_serdev_info *info, int idx)
+{
+ struct acpi_device *ctrl_adev, *serdev_adev;
+ struct serdev_device *serdev;
+ struct device *ctrl_dev;
+ int ret = -ENODEV;
+
+ ctrl_adev = acpi_dev_get_first_match_dev(info->ctrl_hid, info->ctrl_uid, -1);
+ if (!ctrl_adev) {
+ pr_err("error could not get %s/%s ctrl adev\n",
+ info->ctrl_hid, info->ctrl_uid);
+ return -ENODEV;
+ }
+
+ serdev_adev = acpi_dev_get_first_match_dev(info->serdev_hid, NULL, -1);
+ if (!serdev_adev) {
+ pr_err("error could not get %s serdev adev\n", info->serdev_hid);
+ goto put_ctrl_adev;
+ }
+
+ /* get_first_physical_node() returns a weak ref, no need to put() it */
+ ctrl_dev = acpi_get_first_physical_node(ctrl_adev);
+ if (!ctrl_dev) {
+ pr_err("error could not get %s/%s ctrl physical dev\n",
+ info->ctrl_hid, info->ctrl_uid);
+ goto put_serdev_adev;
+ }
+
+ /* ctrl_dev now points to the controller's parent, get the controller */
+ ctrl_dev = device_find_child_by_name(ctrl_dev, info->ctrl_devname);
+ if (!ctrl_dev) {
+ pr_err("error could not get %s/%s %s ctrl dev\n",
+ info->ctrl_hid, info->ctrl_uid, info->ctrl_devname);
+ goto put_serdev_adev;
+ }
+
+ serdev = serdev_device_alloc(to_serdev_controller(ctrl_dev));
+ if (!serdev) {
+ ret = -ENOMEM;
+ goto put_serdev_adev;
+ }
+
+ ACPI_COMPANION_SET(&serdev->dev, serdev_adev);
+ acpi_device_set_enumerated(serdev_adev);
+
+ ret = serdev_device_add(serdev);
+ if (ret) {
+ dev_err(&serdev->dev, "error %d adding serdev\n", ret);
+ serdev_device_put(serdev);
+ goto put_serdev_adev;
+ }
+
+ serdevs[idx] = serdev;
+
+put_serdev_adev:
+ acpi_dev_put(serdev_adev);
+put_ctrl_adev:
+ acpi_dev_put(ctrl_adev);
+ return ret;
+}
+
+static void x86_android_tablet_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < serdev_count; i++) {
+ if (serdevs[i])
+ serdev_device_remove(serdevs[i]);
+ }
+
+ kfree(serdevs);
+
+ for (i = 0; i < pdev_count; i++)
+ platform_device_unregister(pdevs[i]);
+
+ kfree(pdevs);
+
+ for (i = 0; i < i2c_client_count; i++)
+ i2c_unregister_device(i2c_clients[i]);
+
+ kfree(i2c_clients);
+
+ for (i = 0; gpiod_lookup_tables && gpiod_lookup_tables[i]; i++)
+ gpiod_remove_lookup_table(gpiod_lookup_tables[i]);
+}
+
+static __init int x86_android_tablet_init(void)
+{
+ const struct x86_dev_info *dev_info;
+ const struct dmi_system_id *id;
+ int i, ret = 0;
+
+ id = dmi_first_match(x86_android_tablet_ids);
+ if (!id)
+ return -ENODEV;
+
+ dev_info = id->driver_data;
+
+ /*
+ * Since this runs from module_init() it cannot use -EPROBE_DEFER,
+ * instead pre-load any modules which are listed as requirements.
+ */
+ for (i = 0; dev_info->modules && dev_info->modules[i]; i++)
+ request_module(dev_info->modules[i]);
+
+ gpiod_lookup_tables = dev_info->gpiod_lookup_tables;
+ for (i = 0; gpiod_lookup_tables && gpiod_lookup_tables[i]; i++)
+ gpiod_add_lookup_table(gpiod_lookup_tables[i]);
+
+ i2c_clients = kcalloc(dev_info->i2c_client_count, sizeof(*i2c_clients), GFP_KERNEL);
+ if (!i2c_clients) {
+ x86_android_tablet_cleanup();
+ return -ENOMEM;
+ }
+
+ i2c_client_count = dev_info->i2c_client_count;
+ for (i = 0; i < i2c_client_count; i++) {
+ ret = x86_instantiate_i2c_client(dev_info, i);
+ if (ret < 0) {
+ x86_android_tablet_cleanup();
+ return ret;
+ }
+ }
+
+ pdevs = kcalloc(dev_info->pdev_count, sizeof(*pdevs), GFP_KERNEL);
+ if (!pdevs) {
+ x86_android_tablet_cleanup();
+ return -ENOMEM;
+ }
+
+ pdev_count = dev_info->pdev_count;
+ for (i = 0; i < pdev_count; i++) {
+ pdevs[i] = platform_device_register_full(&dev_info->pdev_info[i]);
+ if (IS_ERR(pdevs[i])) {
+ x86_android_tablet_cleanup();
+ return PTR_ERR(pdevs[i]);
+ }
+ }
+
+ serdevs = kcalloc(dev_info->serdev_count, sizeof(*serdevs), GFP_KERNEL);
+ if (!serdevs) {
+ x86_android_tablet_cleanup();
+ return -ENOMEM;
+ }
+
+ serdev_count = dev_info->serdev_count;
+ for (i = 0; i < serdev_count; i++) {
+ ret = x86_instantiate_serdev(&dev_info->serdev_info[i], i);
+ if (ret < 0) {
+ x86_android_tablet_cleanup();
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+module_init(x86_android_tablet_init);
+module_exit(x86_android_tablet_cleanup);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com");
+MODULE_DESCRIPTION("X86 Android tablets DSDT fixups driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pnp/isapnp/proc.c b/drivers/pnp/isapnp/proc.c
index 1ae458c02656..55ae72a2818b 100644
--- a/drivers/pnp/isapnp/proc.c
+++ b/drivers/pnp/isapnp/proc.c
@@ -22,7 +22,7 @@ static loff_t isapnp_proc_bus_lseek(struct file *file, loff_t off, int whence)
static ssize_t isapnp_proc_bus_read(struct file *file, char __user * buf,
size_t nbytes, loff_t * ppos)
{
- struct pnp_dev *dev = PDE_DATA(file_inode(file));
+ struct pnp_dev *dev = pde_data(file_inode(file));
int pos = *ppos;
int cnt, size = 256;
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 669ef4700c1a..f7e86ae9f72f 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -160,7 +160,7 @@ static int pnp_dock_thread(void *unused)
* No dock to manage
*/
case PNP_FUNCTION_NOT_SUPPORTED:
- complete_and_exit(&unload_sem, 0);
+ kthread_complete_and_exit(&unload_sem, 0);
case PNP_SYSTEM_NOT_DOCKED:
d = 0;
break;
@@ -170,7 +170,7 @@ static int pnp_dock_thread(void *unused)
default:
pnpbios_print_status("pnp_dock_thread", status);
printk(KERN_WARNING "PnPBIOS: disabling dock monitoring.\n");
- complete_and_exit(&unload_sem, 0);
+ kthread_complete_and_exit(&unload_sem, 0);
}
if (d != docked) {
if (pnp_dock_event(d, &now) == 0) {
@@ -183,7 +183,7 @@ static int pnp_dock_thread(void *unused)
}
}
}
- complete_and_exit(&unload_sem, 0);
+ kthread_complete_and_exit(&unload_sem, 0);
}
static int pnpbios_get_resources(struct pnp_dev *dev)
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index a806830e3a40..0f0d819b157f 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -173,13 +173,13 @@ static int pnpbios_proc_show(struct seq_file *m, void *v)
static int pnpbios_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, pnpbios_proc_show, PDE_DATA(inode));
+ return single_open(file, pnpbios_proc_show, pde_data(inode));
}
static ssize_t pnpbios_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- void *data = PDE_DATA(file_inode(file));
+ void *data = pde_data(file_inode(file));
struct pnp_bios_node *node;
int boot = (long)data >> 8;
u8 nodenum = (long)data;
diff --git a/drivers/power/reset/mt6323-poweroff.c b/drivers/power/reset/mt6323-poweroff.c
index 0532803e6cbc..d90e76fcb938 100644
--- a/drivers/power/reset/mt6323-poweroff.c
+++ b/drivers/power/reset/mt6323-poweroff.c
@@ -57,6 +57,9 @@ static int mt6323_pwrc_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
pwrc->base = res->start;
pwrc->regmap = mt6397_chip->regmap;
pwrc->dev = &pdev->dev;
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 5cf5bb56d2e3..b366e2fd8e97 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -557,6 +557,18 @@ config CHARGER_MAX77693
help
Say Y to enable support for the Maxim MAX77693 battery charger.
+config CHARGER_MAX77976
+ tristate "Maxim MAX77976 battery charger driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ The Maxim MAX77976 is a 19 Vin, 5.5A 1-Cell Li+ Battery Charger
+ USB OTG support. It has an I2C interface for configuration.
+
+ Say Y to enable support for the Maxim MAX77976 battery charger.
+ This driver can also be built as a module. If so, the module will be
+ called max77976_charger.
+
config CHARGER_MAX8997
tristate "Maxim MAX8997/MAX8966 PMIC battery charger driver"
depends on MFD_MAX8997 && REGULATOR_MAX8997
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index 4e55a11aab79..2c1b264b2046 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -75,6 +75,7 @@ obj-$(CONFIG_CHARGER_MAX14577) += max14577_charger.o
obj-$(CONFIG_CHARGER_DETECTOR_MAX14656) += max14656_charger_detector.o
obj-$(CONFIG_CHARGER_MAX77650) += max77650-charger.o
obj-$(CONFIG_CHARGER_MAX77693) += max77693_charger.o
+obj-$(CONFIG_CHARGER_MAX77976) += max77976_charger.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
obj-$(CONFIG_CHARGER_MP2629) += mp2629_charger.o
diff --git a/drivers/power/supply/ab8500-bm.h b/drivers/power/supply/ab8500-bm.h
index d11405b7ee1a..56a5aaf9a27a 100644
--- a/drivers/power/supply/ab8500-bm.h
+++ b/drivers/power/supply/ab8500-bm.h
@@ -160,13 +160,6 @@
#define BTEMP_HIGH_TH_57_1 0x02
#define BTEMP_HIGH_TH_62 0x03
-/* current is mA */
-#define USB_0P1A 100
-#define USB_0P2A 200
-#define USB_0P3A 300
-#define USB_0P4A 400
-#define USB_0P5A 500
-
#define LOW_BAT_3P1V 0x20
#define LOW_BAT_2P3V 0x00
#define LOW_BAT_RESET 0x01
@@ -203,8 +196,8 @@ enum bup_vch_sel {
#define BATT_OVV_TH_3P7 0x00
#define BATT_OVV_TH_4P75 0x01
-/* A value to indicate over voltage */
-#define BATT_OVV_VALUE 4750
+/* A value to indicate over voltage (microvolts) */
+#define BATT_OVV_VALUE 4750000
/* VBUS OVV constants */
#define VBUS_OVV_SELECT_MASK 0x78
@@ -291,16 +284,6 @@ struct ab8500_res_to_temp {
int resist;
};
-/**
- * struct ab8500_v_to_cap - Table for translating voltage to capacity
- * @voltage: Voltage in mV
- * @capacity: Capacity in percent
- */
-struct ab8500_v_to_cap {
- int voltage;
- int capacity;
-};
-
/* Forward declaration */
struct ab8500_fg;
@@ -314,10 +297,9 @@ struct ab8500_fg;
* @init_total_time: Total init time during startup
* @high_curr_time: Time current has to be high to go to recovery
* @accu_charging: FG accumulation time while charging
- * @accu_high_curr: FG accumulation time in high current mode
- * @high_curr_threshold: High current threshold, in mA
- * @lowbat_threshold: Low battery threshold, in mV
- * @overbat_threshold: Over battery threshold, in mV
+ * @accu_high_curr_ua: FG accumulation time in high current mode
+ * @high_curr_threshold_ua: High current threshold, in uA
+ * @lowbat_threshold_uv: Low battery threshold, in uV
* @battok_falling_th_sel0 Threshold in mV for battOk signal sel0
* Resolution in 50 mV step.
* @battok_raising_th_sel1 Threshold in mV for battOk signal sel1
@@ -342,9 +324,8 @@ struct ab8500_fg_parameters {
int high_curr_time;
int accu_charging;
int accu_high_curr;
- int high_curr_threshold;
- int lowbat_threshold;
- int overbat_threshold;
+ int high_curr_threshold_ua;
+ int lowbat_threshold_uv;
int battok_falling_th_sel0;
int battok_raising_th_sel1;
int user_cap_limit;
@@ -359,31 +340,21 @@ struct ab8500_fg_parameters {
/**
* struct ab8500_charger_maximization - struct used by the board config.
* @use_maxi: Enable maximization for this battery type
- * @maxi_chg_curr: Maximum charger current allowed
+ * @maxi_chg_curr_ua: Maximum charger current allowed in microampere
* @maxi_wait_cycles: cycles to wait before setting charger current
- * @charger_curr_step delta between two charger current settings (mA)
+ * @charger_curr_step_ua: delta between two charger current settings (uA)
*/
struct ab8500_maxim_parameters {
bool ena_maxi;
- int chg_curr;
+ int chg_curr_ua;
int wait_cycles;
- int charger_curr_step;
+ int charger_curr_step_ua;
};
/**
* struct ab8500_battery_type - different batteries supported
- * @name: battery technology
* @resis_high: battery upper resistance limit
* @resis_low: battery lower resistance limit
- * @charge_full_design: Maximum battery capacity in mAh
- * @nominal_voltage: Nominal voltage of the battery in mV
- * @termination_vol: max voltage upto which battery can be charged
- * @termination_curr battery charging termination current in mA
- * @recharge_cap battery capacity limit that will trigger a new
- * full charging cycle in the case where maintenan-
- * -ce charging has been disabled
- * @normal_cur_lvl: charger current in normal state in mA
- * @normal_vol_lvl: charger voltage in normal state in mV
* @maint_a_cur_lvl: charger current in maintenance A state in mA
* @maint_a_vol_lvl: charger voltage in maintenance A state in mV
* @maint_a_chg_timer_h: charge time in maintenance A state
@@ -392,25 +363,12 @@ struct ab8500_maxim_parameters {
* @maint_b_chg_timer_h: charge time in maintenance B state
* @low_high_cur_lvl: charger current in temp low/high state in mA
* @low_high_vol_lvl: charger voltage in temp low/high state in mV'
- * @battery_resistance: battery inner resistance in mOhm.
* @n_r_t_tbl_elements: number of elements in r_to_t_tbl
* @r_to_t_tbl: table containing resistance to temp points
- * @n_v_cap_tbl_elements: number of elements in v_to_cap_tbl
- * @v_to_cap_tbl: Voltage to capacity (in %) table
- * @n_batres_tbl_elements number of elements in the batres_tbl
- * @batres_tbl battery internal resistance vs temperature table
*/
struct ab8500_battery_type {
- int name;
int resis_high;
int resis_low;
- int charge_full_design;
- int nominal_voltage;
- int termination_vol;
- int termination_curr;
- int recharge_cap;
- int normal_cur_lvl;
- int normal_vol_lvl;
int maint_a_cur_lvl;
int maint_a_vol_lvl;
int maint_a_chg_timer_h;
@@ -419,13 +377,8 @@ struct ab8500_battery_type {
int maint_b_chg_timer_h;
int low_high_cur_lvl;
int low_high_vol_lvl;
- int battery_resistance;
int n_temp_tbl_elements;
const struct ab8500_res_to_temp *r_to_t_tbl;
- int n_v_cap_tbl_elements;
- const struct ab8500_v_to_cap *v_to_cap_tbl;
- int n_batres_tbl_elements;
- const struct batres_vs_temp *batres_tbl;
};
/**
@@ -446,24 +399,21 @@ struct ab8500_bm_capacity_levels {
/**
* struct ab8500_bm_charger_parameters - Charger specific parameters
- * @usb_volt_max: maximum allowed USB charger voltage in mV
- * @usb_curr_max: maximum allowed USB charger current in mA
- * @ac_volt_max: maximum allowed AC charger voltage in mV
- * @ac_curr_max: maximum allowed AC charger current in mA
+ * @usb_volt_max_uv: maximum allowed USB charger voltage in uV
+ * @usb_curr_max_ua: maximum allowed USB charger current in uA
+ * @ac_volt_max_uv: maximum allowed AC charger voltage in uV
+ * @ac_curr_max_ua: maximum allowed AC charger current in uA
*/
struct ab8500_bm_charger_parameters {
- int usb_volt_max;
- int usb_curr_max;
- int ac_volt_max;
- int ac_curr_max;
+ int usb_volt_max_uv;
+ int usb_curr_max_ua;
+ int ac_volt_max_uv;
+ int ac_curr_max_ua;
};
/**
* struct ab8500_bm_data - ab8500 battery management data
- * @temp_under under this temp, charging is stopped
- * @temp_low between this temp and temp_under charging is reduced
- * @temp_high between this temp and temp_over charging is reduced
- * @temp_over over this temp, charging is stopped
+ * @bi battery info from device tree
* @temp_now present battery temperature
* @temp_interval_chg temperature measurement interval in s when charging
* @temp_interval_nochg temperature measurement interval in s when not charging
@@ -478,16 +428,10 @@ struct ab8500_bm_charger_parameters {
* @enable_overshoot flag to enable VBAT overshoot control
* @auto_trig flag to enable auto adc trigger
* @fg_res resistance of FG resistor in 0.1mOhm
- * @n_btypes number of elements in array bat_type
- * @batt_id index of the identified battery in array bat_type
* @interval_charging charge alg cycle period time when charging (sec)
* @interval_not_charging charge alg cycle period time when not charging (sec)
* @temp_hysteresis temperature hysteresis
* @gnd_lift_resistance Battery ground to phone ground resistance (mOhm)
- * @n_chg_out_curr number of elements in array chg_output_curr
- * @n_chg_in_curr number of elements in array chg_input_curr
- * @chg_output_curr charger output current level map
- * @chg_input_curr charger input current level map
* @maxi maximization parameters
* @cap_levels capacity in percent for the different capacity levels
* @bat_type table of supported battery types
@@ -495,10 +439,7 @@ struct ab8500_bm_charger_parameters {
* @fg_params fuel gauge parameters
*/
struct ab8500_bm_data {
- int temp_under;
- int temp_low;
- int temp_high;
- int temp_over;
+ struct power_supply_battery_info *bi;
int temp_now;
int temp_interval_chg;
int temp_interval_nochg;
@@ -513,16 +454,10 @@ struct ab8500_bm_data {
bool auto_trig;
enum ab8500_adc_therm adc_therm;
int fg_res;
- int n_btypes;
- int batt_id;
int interval_charging;
int interval_not_charging;
int temp_hysteresis;
int gnd_lift_resistance;
- int n_chg_out_curr;
- int n_chg_in_curr;
- int *chg_output_curr;
- int *chg_input_curr;
const struct ab8500_maxim_parameters *maxi;
const struct ab8500_bm_capacity_levels *cap_levels;
struct ab8500_battery_type *bat_type;
@@ -547,17 +482,6 @@ struct res_to_temp {
int resist;
};
-/**
- * struct batres_vs_temp - defines one point in a temp vs battery internal
- * resistance curve.
- * @temp: battery pack temperature in Celsius
- * @resist: battery internal reistance in mOhm
- */
-struct batres_vs_temp {
- int temp;
- int resist;
-};
-
/* Forward declaration */
struct ab8500_fg;
@@ -570,9 +494,10 @@ int ab8500_fg_inst_curr_start(struct ab8500_fg *di);
int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res);
int ab8500_fg_inst_curr_started(struct ab8500_fg *di);
int ab8500_fg_inst_curr_done(struct ab8500_fg *di);
-int ab8500_bm_of_probe(struct device *dev,
- struct device_node *np,
+int ab8500_bm_of_probe(struct power_supply *psy,
struct ab8500_bm_data *bm);
+void ab8500_bm_of_remove(struct power_supply *psy,
+ struct ab8500_bm_data *bm);
extern struct platform_driver ab8500_fg_driver;
extern struct platform_driver ab8500_btemp_driver;
diff --git a/drivers/power/supply/ab8500-chargalg.h b/drivers/power/supply/ab8500-chargalg.h
index 07e6ff50084f..f47a0061c36a 100644
--- a/drivers/power/supply/ab8500-chargalg.h
+++ b/drivers/power/supply/ab8500-chargalg.h
@@ -31,16 +31,16 @@ struct ux500_charger_ops {
* struct ux500_charger - power supply ux500 charger sub class
* @psy power supply base class
* @ops ux500 charger operations
- * @max_out_volt maximum output charger voltage in mV
- * @max_out_curr maximum output charger current in mA
+ * @max_out_volt_uv maximum output charger voltage in uV
+ * @max_out_curr_ua maximum output charger current in uA
* @enabled indicates if this charger is used or not
* @external external charger unit (pm2xxx)
*/
struct ux500_charger {
struct power_supply *psy;
struct ux500_charger_ops ops;
- int max_out_volt;
- int max_out_curr;
+ int max_out_volt_uv;
+ int max_out_curr_ua;
int wdt_refresh;
bool enabled;
bool external;
diff --git a/drivers/power/supply/ab8500_bmdata.c b/drivers/power/supply/ab8500_bmdata.c
index bfc1245d7912..7ae95f537580 100644
--- a/drivers/power/supply/ab8500_bmdata.c
+++ b/drivers/power/supply/ab8500_bmdata.c
@@ -5,127 +5,42 @@
#include "ab8500-bm.h"
-/*
- * These are the defined batteries that uses a NTC and ID resistor placed
- * inside of the battery pack.
- * Note that the res_to_temp table must be strictly sorted by falling resistance
- * values to work.
- */
-const struct ab8500_res_to_temp ab8500_temp_tbl_a_thermistor[] = {
- {-5, 53407},
- { 0, 48594},
- { 5, 43804},
- {10, 39188},
- {15, 34870},
- {20, 30933},
- {25, 27422},
- {30, 24347},
- {35, 21694},
- {40, 19431},
- {45, 17517},
- {50, 15908},
- {55, 14561},
- {60, 13437},
- {65, 12500},
-};
-EXPORT_SYMBOL(ab8500_temp_tbl_a_thermistor);
-
-const int ab8500_temp_tbl_a_size = ARRAY_SIZE(ab8500_temp_tbl_a_thermistor);
-EXPORT_SYMBOL(ab8500_temp_tbl_a_size);
-
-const struct ab8500_res_to_temp ab8500_temp_tbl_b_thermistor[] = {
- {-5, 200000},
- { 0, 159024},
- { 5, 151921},
- {10, 144300},
- {15, 136424},
- {20, 128565},
- {25, 120978},
- {30, 113875},
- {35, 107397},
- {40, 101629},
- {45, 96592},
- {50, 92253},
- {55, 88569},
- {60, 85461},
- {65, 82869},
-};
-EXPORT_SYMBOL(ab8500_temp_tbl_b_thermistor);
-
-const int ab8500_temp_tbl_b_size = ARRAY_SIZE(ab8500_temp_tbl_b_thermistor);
-EXPORT_SYMBOL(ab8500_temp_tbl_b_size);
-
-static const struct ab8500_v_to_cap cap_tbl_a_thermistor[] = {
- {4171, 100},
- {4114, 95},
- {4009, 83},
- {3947, 74},
- {3907, 67},
- {3863, 59},
- {3830, 56},
- {3813, 53},
- {3791, 46},
- {3771, 33},
- {3754, 25},
- {3735, 20},
- {3717, 17},
- {3681, 13},
- {3664, 8},
- {3651, 6},
- {3635, 5},
- {3560, 3},
- {3408, 1},
- {3247, 0},
-};
-
-static const struct ab8500_v_to_cap cap_tbl_b_thermistor[] = {
- {4161, 100},
- {4124, 98},
- {4044, 90},
- {4003, 85},
- {3966, 80},
- {3933, 75},
- {3888, 67},
- {3849, 60},
- {3813, 55},
- {3787, 47},
- {3772, 30},
- {3751, 25},
- {3718, 20},
- {3681, 16},
- {3660, 14},
- {3589, 10},
- {3546, 7},
- {3495, 4},
- {3404, 2},
- {3250, 0},
-};
-
-static const struct ab8500_v_to_cap cap_tbl[] = {
- {4186, 100},
- {4163, 99},
- {4114, 95},
- {4068, 90},
- {3990, 80},
- {3926, 70},
- {3898, 65},
- {3866, 60},
- {3833, 55},
- {3812, 50},
- {3787, 40},
- {3768, 30},
- {3747, 25},
- {3730, 20},
- {3705, 15},
- {3699, 14},
- {3684, 12},
- {3672, 9},
- {3657, 7},
- {3638, 6},
- {3556, 4},
- {3424, 2},
- {3317, 1},
- {3094, 0},
+/* Default: under this temperature, charging is stopped */
+#define AB8500_TEMP_UNDER 3
+/* Default: between this temp and AB8500_TEMP_UNDER charging is reduced */
+#define AB8500_TEMP_LOW 8
+/* Default: between this temp and AB8500_TEMP_OVER charging is reduced */
+#define AB8500_TEMP_HIGH 43
+/* Default: over this temp, charging is stopped */
+#define AB8500_TEMP_OVER 48
+/* Default: temperature hysteresis */
+#define AB8500_TEMP_HYSTERESIS 3
+
+static struct power_supply_battery_ocv_table ocv_cap_tbl[] = {
+ { .ocv = 4186000, .capacity = 100},
+ { .ocv = 4163000, .capacity = 99},
+ { .ocv = 4114000, .capacity = 95},
+ { .ocv = 4068000, .capacity = 90},
+ { .ocv = 3990000, .capacity = 80},
+ { .ocv = 3926000, .capacity = 70},
+ { .ocv = 3898000, .capacity = 65},
+ { .ocv = 3866000, .capacity = 60},
+ { .ocv = 3833000, .capacity = 55},
+ { .ocv = 3812000, .capacity = 50},
+ { .ocv = 3787000, .capacity = 40},
+ { .ocv = 3768000, .capacity = 30},
+ { .ocv = 3747000, .capacity = 25},
+ { .ocv = 3730000, .capacity = 20},
+ { .ocv = 3705000, .capacity = 15},
+ { .ocv = 3699000, .capacity = 14},
+ { .ocv = 3684000, .capacity = 12},
+ { .ocv = 3672000, .capacity = 9},
+ { .ocv = 3657000, .capacity = 7},
+ { .ocv = 3638000, .capacity = 6},
+ { .ocv = 3556000, .capacity = 4},
+ { .ocv = 3424000, .capacity = 2},
+ { .ocv = 3317000, .capacity = 1},
+ { .ocv = 3094000, .capacity = 0},
};
/*
@@ -152,244 +67,33 @@ static const struct ab8500_res_to_temp temp_tbl[] = {
/*
* Note that the batres_vs_temp table must be strictly sorted by falling
- * temperature values to work.
- */
-static const struct batres_vs_temp temp_to_batres_tbl_thermistor[] = {
- { 40, 120},
- { 30, 135},
- { 20, 165},
- { 10, 230},
- { 00, 325},
- {-10, 445},
- {-20, 595},
-};
-
-/*
- * Note that the batres_vs_temp table must be strictly sorted by falling
- * temperature values to work.
- */
-static const struct batres_vs_temp temp_to_batres_tbl_ext_thermistor[] = {
- { 60, 300},
- { 30, 300},
- { 20, 300},
- { 10, 300},
- { 00, 300},
- {-10, 300},
- {-20, 300},
-};
-
-/* battery resistance table for LI ION 9100 battery */
-static const struct batres_vs_temp temp_to_batres_tbl_9100[] = {
- { 60, 180},
- { 30, 180},
- { 20, 180},
- { 10, 180},
- { 00, 180},
- {-10, 180},
- {-20, 180},
-};
-
-static struct ab8500_battery_type bat_type_thermistor[] = {
- [BATTERY_UNKNOWN] = {
- /* First element always represent the UNKNOWN battery */
- .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN,
- .resis_high = 0,
- .resis_low = 0,
- .battery_resistance = 300,
- .charge_full_design = 612,
- .nominal_voltage = 3700,
- .termination_vol = 4050,
- .termination_curr = 200,
- .recharge_cap = 95,
- .normal_cur_lvl = 400,
- .normal_vol_lvl = 4100,
- .maint_a_cur_lvl = 400,
- .maint_a_vol_lvl = 4050,
- .maint_a_chg_timer_h = 60,
- .maint_b_cur_lvl = 400,
- .maint_b_vol_lvl = 4000,
- .maint_b_chg_timer_h = 200,
- .low_high_cur_lvl = 300,
- .low_high_vol_lvl = 4000,
- .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
- .r_to_t_tbl = temp_tbl,
- .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
- .v_to_cap_tbl = cap_tbl,
- .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
- .batres_tbl = temp_to_batres_tbl_thermistor,
- },
- {
- .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
- .resis_high = 53407,
- .resis_low = 12500,
- .battery_resistance = 300,
- .charge_full_design = 900,
- .nominal_voltage = 3600,
- .termination_vol = 4150,
- .termination_curr = 80,
- .recharge_cap = 95,
- .normal_cur_lvl = 700,
- .normal_vol_lvl = 4200,
- .maint_a_cur_lvl = 600,
- .maint_a_vol_lvl = 4150,
- .maint_a_chg_timer_h = 60,
- .maint_b_cur_lvl = 600,
- .maint_b_vol_lvl = 4100,
- .maint_b_chg_timer_h = 200,
- .low_high_cur_lvl = 300,
- .low_high_vol_lvl = 4000,
- .n_temp_tbl_elements = ARRAY_SIZE(ab8500_temp_tbl_a_thermistor),
- .r_to_t_tbl = ab8500_temp_tbl_a_thermistor,
- .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_a_thermistor),
- .v_to_cap_tbl = cap_tbl_a_thermistor,
- .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
- .batres_tbl = temp_to_batres_tbl_thermistor,
-
- },
- {
- .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
- .resis_high = 200000,
- .resis_low = 82869,
- .battery_resistance = 300,
- .charge_full_design = 900,
- .nominal_voltage = 3600,
- .termination_vol = 4150,
- .termination_curr = 80,
- .recharge_cap = 95,
- .normal_cur_lvl = 700,
- .normal_vol_lvl = 4200,
- .maint_a_cur_lvl = 600,
- .maint_a_vol_lvl = 4150,
- .maint_a_chg_timer_h = 60,
- .maint_b_cur_lvl = 600,
- .maint_b_vol_lvl = 4100,
- .maint_b_chg_timer_h = 200,
- .low_high_cur_lvl = 300,
- .low_high_vol_lvl = 4000,
- .n_temp_tbl_elements = ARRAY_SIZE(ab8500_temp_tbl_b_thermistor),
- .r_to_t_tbl = ab8500_temp_tbl_b_thermistor,
- .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_b_thermistor),
- .v_to_cap_tbl = cap_tbl_b_thermistor,
- .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
- .batres_tbl = temp_to_batres_tbl_thermistor,
- },
-};
-
-static struct ab8500_battery_type bat_type_ext_thermistor[] = {
- [BATTERY_UNKNOWN] = {
- /* First element always represent the UNKNOWN battery */
- .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN,
- .resis_high = 0,
- .resis_low = 0,
- .battery_resistance = 300,
- .charge_full_design = 612,
- .nominal_voltage = 3700,
- .termination_vol = 4050,
- .termination_curr = 200,
- .recharge_cap = 95,
- .normal_cur_lvl = 400,
- .normal_vol_lvl = 4100,
- .maint_a_cur_lvl = 400,
- .maint_a_vol_lvl = 4050,
- .maint_a_chg_timer_h = 60,
- .maint_b_cur_lvl = 400,
- .maint_b_vol_lvl = 4000,
- .maint_b_chg_timer_h = 200,
- .low_high_cur_lvl = 300,
- .low_high_vol_lvl = 4000,
- .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
- .r_to_t_tbl = temp_tbl,
- .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
- .v_to_cap_tbl = cap_tbl,
- .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
- .batres_tbl = temp_to_batres_tbl_thermistor,
- },
-/*
- * These are the batteries that doesn't have an internal NTC resistor to measure
- * its temperature. The temperature in this case is measure with a NTC placed
- * near the battery but on the PCB.
+ * temperature values to work. Factory resistance is 300 mOhm and the
+ * resistance values to the right are percentages of 300 mOhm.
*/
- {
- .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
- .resis_high = 76000,
- .resis_low = 53000,
- .battery_resistance = 300,
- .charge_full_design = 900,
- .nominal_voltage = 3700,
- .termination_vol = 4150,
- .termination_curr = 100,
- .recharge_cap = 95,
- .normal_cur_lvl = 700,
- .normal_vol_lvl = 4200,
- .maint_a_cur_lvl = 600,
- .maint_a_vol_lvl = 4150,
- .maint_a_chg_timer_h = 60,
- .maint_b_cur_lvl = 600,
- .maint_b_vol_lvl = 4100,
- .maint_b_chg_timer_h = 200,
- .low_high_cur_lvl = 300,
- .low_high_vol_lvl = 4000,
- .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
- .r_to_t_tbl = temp_tbl,
- .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
- .v_to_cap_tbl = cap_tbl,
- .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
- .batres_tbl = temp_to_batres_tbl_thermistor,
- },
- {
- .name = POWER_SUPPLY_TECHNOLOGY_LION,
- .resis_high = 30000,
- .resis_low = 10000,
- .battery_resistance = 300,
- .charge_full_design = 950,
- .nominal_voltage = 3700,
- .termination_vol = 4150,
- .termination_curr = 100,
- .recharge_cap = 95,
- .normal_cur_lvl = 700,
- .normal_vol_lvl = 4200,
- .maint_a_cur_lvl = 600,
- .maint_a_vol_lvl = 4150,
- .maint_a_chg_timer_h = 60,
- .maint_b_cur_lvl = 600,
- .maint_b_vol_lvl = 4100,
- .maint_b_chg_timer_h = 200,
- .low_high_cur_lvl = 300,
- .low_high_vol_lvl = 4000,
- .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
- .r_to_t_tbl = temp_tbl,
- .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
- .v_to_cap_tbl = cap_tbl,
- .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
- .batres_tbl = temp_to_batres_tbl_thermistor,
- },
- {
- .name = POWER_SUPPLY_TECHNOLOGY_LION,
- .resis_high = 95000,
- .resis_low = 76001,
- .battery_resistance = 300,
- .charge_full_design = 950,
- .nominal_voltage = 3700,
- .termination_vol = 4150,
- .termination_curr = 100,
- .recharge_cap = 95,
- .normal_cur_lvl = 700,
- .normal_vol_lvl = 4200,
- .maint_a_cur_lvl = 600,
- .maint_a_vol_lvl = 4150,
- .maint_a_chg_timer_h = 60,
- .maint_b_cur_lvl = 600,
- .maint_b_vol_lvl = 4100,
- .maint_b_chg_timer_h = 200,
- .low_high_cur_lvl = 300,
- .low_high_vol_lvl = 4000,
- .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
- .r_to_t_tbl = temp_tbl,
- .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
- .v_to_cap_tbl = cap_tbl,
- .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
- .batres_tbl = temp_to_batres_tbl_thermistor,
- },
+static struct power_supply_resistance_temp_table temp_to_batres_tbl_thermistor[] = {
+ { .temp = 40, .resistance = 40 /* 120 mOhm */ },
+ { .temp = 30, .resistance = 45 /* 135 mOhm */ },
+ { .temp = 20, .resistance = 55 /* 165 mOhm */ },
+ { .temp = 10, .resistance = 77 /* 230 mOhm */ },
+ { .temp = 00, .resistance = 108 /* 325 mOhm */ },
+ { .temp = -10, .resistance = 158 /* 445 mOhm */ },
+ { .temp = -20, .resistance = 198 /* 595 mOhm */ },
+};
+
+/* Default battery type for reference designs is the unknown type */
+static struct ab8500_battery_type bat_type_thermistor_unknown = {
+ .resis_high = 0,
+ .resis_low = 0,
+ .maint_a_cur_lvl = 400,
+ .maint_a_vol_lvl = 4050,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 400,
+ .maint_b_vol_lvl = 4000,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
};
static const struct ab8500_bm_capacity_levels cap_levels = {
@@ -409,8 +113,8 @@ static const struct ab8500_fg_parameters fg = {
.high_curr_time = 60,
.accu_charging = 30,
.accu_high_curr = 30,
- .high_curr_threshold = 50,
- .lowbat_threshold = 3100,
+ .high_curr_threshold_ua = 50000,
+ .lowbat_threshold_uv = 3100000,
.battok_falling_th_sel0 = 2860,
.battok_raising_th_sel1 = 2860,
.maint_thres = 95,
@@ -424,41 +128,20 @@ static const struct ab8500_fg_parameters fg = {
static const struct ab8500_maxim_parameters ab8500_maxi_params = {
.ena_maxi = true,
- .chg_curr = 910,
+ .chg_curr_ua = 910000,
.wait_cycles = 10,
- .charger_curr_step = 100,
+ .charger_curr_step_ua = 100000,
};
static const struct ab8500_bm_charger_parameters chg = {
- .usb_volt_max = 5500,
- .usb_curr_max = 1500,
- .ac_volt_max = 7500,
- .ac_curr_max = 1500,
-};
-
-/*
- * This array maps the raw hex value to charger output current used by the
- * AB8500 values
- */
-static int ab8500_charge_output_curr_map[] = {
- 100, 200, 300, 400, 500, 600, 700, 800,
- 900, 1000, 1100, 1200, 1300, 1400, 1500, 1500,
-};
-
-/*
- * This array maps the raw hex value to charger input current used by the
- * AB8500 values
- */
-static int ab8500_charge_input_curr_map[] = {
- 50, 98, 193, 290, 380, 450, 500, 600,
- 700, 800, 900, 1000, 1100, 1300, 1400, 1500,
+ .usb_volt_max_uv = 5500000,
+ .usb_curr_max_ua = 1500000,
+ .ac_volt_max_uv = 7500000,
+ .ac_curr_max_ua = 1500000,
};
+/* This is referenced directly in the charger code */
struct ab8500_bm_data ab8500_bm_data = {
- .temp_under = 3,
- .temp_low = 8,
- .temp_high = 43,
- .temp_over = 48,
.main_safety_tmr_h = 4,
.temp_interval_chg = 20,
.temp_interval_nochg = 120,
@@ -472,71 +155,91 @@ struct ab8500_bm_data ab8500_bm_data = {
.enable_overshoot = false,
.fg_res = 100,
.cap_levels = &cap_levels,
- .bat_type = bat_type_thermistor,
- .n_btypes = ARRAY_SIZE(bat_type_thermistor),
- .batt_id = 0,
+ .bat_type = &bat_type_thermistor_unknown,
.interval_charging = 5,
.interval_not_charging = 120,
- .temp_hysteresis = 3,
.gnd_lift_resistance = 34,
- .chg_output_curr = ab8500_charge_output_curr_map,
- .n_chg_out_curr = ARRAY_SIZE(ab8500_charge_output_curr_map),
.maxi = &ab8500_maxi_params,
.chg_params = &chg,
.fg_params = &fg,
- .chg_input_curr = ab8500_charge_input_curr_map,
- .n_chg_in_curr = ARRAY_SIZE(ab8500_charge_input_curr_map),
};
-int ab8500_bm_of_probe(struct device *dev,
- struct device_node *np,
+int ab8500_bm_of_probe(struct power_supply *psy,
struct ab8500_bm_data *bm)
{
- const struct batres_vs_temp *tmp_batres_tbl;
- struct device_node *battery_node;
- const char *btech;
- int i;
-
- battery_node = of_parse_phandle(np, "monitored-battery", 0);
- if (!battery_node) {
- dev_err(dev, "battery node or reference missing\n");
- return -EINVAL;
+ struct power_supply_battery_info *bi;
+ struct device *dev = &psy->dev;
+ int ret;
+
+ ret = power_supply_get_battery_info(psy, &bm->bi);
+ if (ret) {
+ dev_err(dev, "cannot retrieve battery info\n");
+ return ret;
}
-
- btech = of_get_property(battery_node, "stericsson,battery-type", NULL);
- if (!btech) {
- dev_warn(dev, "missing property battery-name/type\n");
- of_node_put(battery_node);
- return -EINVAL;
+ bi = bm->bi;
+
+ /* Fill in defaults for any data missing from the device tree */
+ if (bi->charge_full_design_uah < 0)
+ /* The default capacity is 612 mAh for unknown batteries */
+ bi->charge_full_design_uah = 612000;
+
+ /*
+ * All of these voltages need to be specified or we will simply
+ * fall back to safe defaults.
+ */
+ if ((bi->voltage_min_design_uv < 0) ||
+ (bi->voltage_max_design_uv < 0) ||
+ (bi->overvoltage_limit_uv < 0)) {
+ /* Nominal voltage is 3.7V for unknown batteries */
+ bi->voltage_min_design_uv = 3700000;
+ bi->voltage_max_design_uv = 3700000;
+ /* Termination voltage (overcharge limit) 4.05V */
+ bi->overvoltage_limit_uv = 4050000;
}
- if (strncmp(btech, "LION", 4) == 0) {
- bm->no_maintenance = true;
- bm->chg_unknown_bat = true;
- bm->bat_type[BATTERY_UNKNOWN].charge_full_design = 2600;
- bm->bat_type[BATTERY_UNKNOWN].termination_vol = 4150;
- bm->bat_type[BATTERY_UNKNOWN].recharge_cap = 95;
- bm->bat_type[BATTERY_UNKNOWN].normal_cur_lvl = 520;
- bm->bat_type[BATTERY_UNKNOWN].normal_vol_lvl = 4200;
+ if (bi->constant_charge_current_max_ua < 0)
+ bi->constant_charge_current_max_ua = 400000;
+
+ if (bi->constant_charge_voltage_max_uv < 0)
+ bi->constant_charge_voltage_max_uv = 4100000;
+
+ if (bi->charge_term_current_ua)
+ /* Charging stops when we drop below this current */
+ bi->charge_term_current_ua = 200000;
+
+ /*
+ * Internal resistance and factory resistance are tightly coupled
+ * so both MUST be defined or we fall back to defaults.
+ */
+ if ((bi->factory_internal_resistance_uohm < 0) ||
+ !bi->resist_table) {
+ bi->factory_internal_resistance_uohm = 300000;
+ bi->resist_table = temp_to_batres_tbl_thermistor;
+ bi->resist_table_size = ARRAY_SIZE(temp_to_batres_tbl_thermistor);
}
- if (of_property_read_bool(battery_node, "thermistor-on-batctrl")) {
- if (strncmp(btech, "LION", 4) == 0)
- tmp_batres_tbl = temp_to_batres_tbl_9100;
- else
- tmp_batres_tbl = temp_to_batres_tbl_thermistor;
- } else {
- bm->n_btypes = 4;
- bm->bat_type = bat_type_ext_thermistor;
- bm->adc_therm = AB8500_ADC_THERM_BATTEMP;
- tmp_batres_tbl = temp_to_batres_tbl_ext_thermistor;
+ if (!bi->ocv_table[0]) {
+ /* Default capacity table at say 25 degrees Celsius */
+ bi->ocv_temp[0] = 25;
+ bi->ocv_table[0] = ocv_cap_tbl;
+ bi->ocv_table_size[0] = ARRAY_SIZE(ocv_cap_tbl);
}
- /* select the battery resolution table */
- for (i = 0; i < bm->n_btypes; ++i)
- bm->bat_type[i].batres_tbl = tmp_batres_tbl;
-
- of_node_put(battery_node);
+ if (bi->temp_min == INT_MIN)
+ bi->temp_min = AB8500_TEMP_UNDER;
+ if (bi->temp_max == INT_MAX)
+ bi->temp_max = AB8500_TEMP_OVER;
+ if (bi->temp_alert_min == INT_MIN)
+ bi->temp_alert_min = AB8500_TEMP_LOW;
+ if (bi->temp_alert_max == INT_MAX)
+ bi->temp_alert_max = AB8500_TEMP_HIGH;
+ bm->temp_hysteresis = AB8500_TEMP_HYSTERESIS;
return 0;
}
+
+void ab8500_bm_of_remove(struct power_supply *psy,
+ struct ab8500_bm_data *bm)
+{
+ power_supply_put_battery_info(psy, bm->bi);
+}
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index b6c9111d77d7..cc33c5187fbb 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -451,15 +451,13 @@ static int ab8500_btemp_res_to_temp(struct ab8500_btemp *di,
*/
static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
{
+ struct power_supply_battery_info *bi = di->bm->bi;
int temp, ret;
static int prev;
int rbat, rntc, vntc;
- u8 id;
- id = di->bm->batt_id;
-
- if (di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL &&
- id != BATTERY_UNKNOWN) {
+ if ((di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL) &&
+ (bi && (bi->technology == POWER_SUPPLY_TECHNOLOGY_UNKNOWN))) {
rbat = ab8500_btemp_get_batctrl_res(di);
if (rbat < 0) {
@@ -473,8 +471,8 @@ static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
}
temp = ab8500_btemp_res_to_temp(di,
- di->bm->bat_type[id].r_to_t_tbl,
- di->bm->bat_type[id].n_temp_tbl_elements, rbat);
+ di->bm->bat_type->r_to_t_tbl,
+ di->bm->bat_type->n_temp_tbl_elements, rbat);
} else {
ret = iio_read_channel_processed(di->btemp_ball, &vntc);
if (ret < 0) {
@@ -490,8 +488,8 @@ static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
rntc = 230000 * vntc / (VTVOUT_V - vntc);
temp = ab8500_btemp_res_to_temp(di,
- di->bm->bat_type[id].r_to_t_tbl,
- di->bm->bat_type[id].n_temp_tbl_elements, rntc);
+ di->bm->bat_type->r_to_t_tbl,
+ di->bm->bat_type->n_temp_tbl_elements, rntc);
prev = temp;
}
dev_dbg(di->dev, "Battery temperature is %d\n", temp);
@@ -512,7 +510,6 @@ static int ab8500_btemp_id(struct ab8500_btemp *di)
u8 i;
di->curr_source = BTEMP_BATCTRL_CURR_SRC_7UA;
- di->bm->batt_id = BATTERY_UNKNOWN;
res = ab8500_btemp_get_batctrl_res(di);
if (res < 0) {
@@ -520,40 +517,37 @@ static int ab8500_btemp_id(struct ab8500_btemp *di)
return -ENXIO;
}
- /* BATTERY_UNKNOWN is defined on position 0, skip it! */
- for (i = BATTERY_UNKNOWN + 1; i < di->bm->n_btypes; i++) {
- if ((res <= di->bm->bat_type[i].resis_high) &&
- (res >= di->bm->bat_type[i].resis_low)) {
- dev_dbg(di->dev, "Battery detected on %s"
- " low %d < res %d < high: %d"
- " index: %d\n",
- di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL ?
- "BATCTRL" : "BATTEMP",
- di->bm->bat_type[i].resis_low, res,
- di->bm->bat_type[i].resis_high, i);
-
- di->bm->batt_id = i;
- break;
- }
- }
-
- if (di->bm->batt_id == BATTERY_UNKNOWN) {
+ if ((res <= di->bm->bat_type->resis_high) &&
+ (res >= di->bm->bat_type->resis_low)) {
+ dev_info(di->dev, "Battery detected on %s"
+ " low %d < res %d < high: %d"
+ " index: %d\n",
+ di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL ?
+ "BATCTRL" : "BATTEMP",
+ di->bm->bat_type->resis_low, res,
+ di->bm->bat_type->resis_high, i);
+ } else {
dev_warn(di->dev, "Battery identified as unknown"
- ", resistance %d Ohm\n", res);
+ ", resistance %d Ohm\n", res);
return -ENXIO;
}
/*
* We only have to change current source if the
- * detected type is Type 1.
+ * detected type is Type 1 (LIPO) resis_high = 53407, resis_low = 12500
+ * if someone hacks this in.
+ *
+ * FIXME: make sure this is done automatically for the batteries
+ * that need it.
*/
- if (di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL &&
- di->bm->batt_id == 1) {
+ if ((di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL) &&
+ (di->bm->bi && (di->bm->bi->technology == POWER_SUPPLY_TECHNOLOGY_LIPO)) &&
+ (res <= 53407) && (res >= 12500)) {
dev_dbg(di->dev, "Set BATCTRL current source to 20uA\n");
di->curr_source = BTEMP_BATCTRL_CURR_SRC_20UA;
}
- return di->bm->batt_id;
+ return 0;
}
/**
@@ -814,7 +808,10 @@ static int ab8500_btemp_get_property(struct power_supply *psy,
val->intval = 1;
break;
case POWER_SUPPLY_PROP_TECHNOLOGY:
- val->intval = di->bm->bat_type[di->bm->batt_id].name;
+ if (di->bm->bi)
+ val->intval = di->bm->bi->technology;
+ else
+ val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
break;
case POWER_SUPPLY_PROP_TEMP:
val->intval = ab8500_btemp_get_temp(di);
diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
index ff4b26b1ceca..c4a2fe07126c 100644
--- a/drivers/power/supply/ab8500_chargalg.c
+++ b/drivers/power/supply/ab8500_chargalg.c
@@ -46,8 +46,15 @@
/* Five minutes expressed in seconds */
#define FIVE_MINUTES_IN_SECONDS 300
-#define CHARGALG_CURR_STEP_LOW 0
-#define CHARGALG_CURR_STEP_HIGH 100
+#define CHARGALG_CURR_STEP_LOW_UA 0
+#define CHARGALG_CURR_STEP_HIGH_UA 100000
+
+/*
+ * This is the battery capacity limit that will trigger a new
+ * full charging cycle in the case where maintenance charging
+ * has been disabled
+ */
+#define AB8500_RECHARGE_CAP 95
enum ab8500_chargers {
NO_CHG,
@@ -63,14 +70,14 @@ struct ab8500_chargalg_charger_info {
enum ab8500_chargers charger_type;
bool usb_chg_ok;
bool ac_chg_ok;
- int usb_volt;
- int usb_curr;
- int ac_volt;
- int ac_curr;
- int usb_vset;
- int usb_iset;
- int ac_vset;
- int ac_iset;
+ int usb_volt_uv;
+ int usb_curr_ua;
+ int ac_volt_uv;
+ int ac_curr_ua;
+ int usb_vset_uv;
+ int usb_iset_ua;
+ int ac_vset_uv;
+ int ac_iset_ua;
};
struct ab8500_chargalg_suspension_status {
@@ -81,14 +88,14 @@ struct ab8500_chargalg_suspension_status {
struct ab8500_chargalg_current_step_status {
bool curr_step_change;
- int curr_step;
+ int curr_step_ua;
};
struct ab8500_chargalg_battery_data {
int temp;
- int volt;
- int avg_curr;
- int inst_curr;
+ int volt_uv;
+ int avg_curr_ua;
+ int inst_curr_ua;
int percent;
};
@@ -177,13 +184,13 @@ struct ab8500_chargalg_events {
/**
* struct ab8500_charge_curr_maximization - Charger maximization parameters
- * @original_iset: the non optimized/maximised charger current
- * @current_iset: the charging current used at this moment
- * @test_delta_i: the delta between the current we want to charge and the
+ * @original_iset_ua: the non optimized/maximised charger current
+ * @current_iset_ua: the charging current used at this moment
+ * @test_delta_i_ua: the delta between the current we want to charge and the
current that is really going into the battery
* @condition_cnt: number of iterations needed before a new charger current
is set
- * @max_current: maximum charger current
+ * @max_current_ua: maximum charger current
* @wait_cnt: to avoid too fast current step down in case of charger
* voltage collapse, we insert this delay between step
* down
@@ -191,11 +198,11 @@ struct ab8500_chargalg_events {
increased
*/
struct ab8500_charge_curr_maximization {
- int original_iset;
- int current_iset;
- int test_delta_i;
+ int original_iset_ua;
+ int current_iset_ua;
+ int test_delta_i_ua;
int condition_cnt;
- int max_current;
+ int max_current_ua;
int wait_cnt;
u8 level;
};
@@ -345,6 +352,8 @@ static void ab8500_chargalg_state_to(struct ab8500_chargalg *di,
static int ab8500_chargalg_check_charger_enable(struct ab8500_chargalg *di)
{
+ struct power_supply_battery_info *bi = di->bm->bi;
+
switch (di->charge_state) {
case STATE_NORMAL:
case STATE_MAINTENANCE_A:
@@ -356,13 +365,13 @@ static int ab8500_chargalg_check_charger_enable(struct ab8500_chargalg *di)
if (di->chg_info.charger_type & USB_CHG) {
return di->usb_chg->ops.check_enable(di->usb_chg,
- di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
- di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
+ bi->constant_charge_voltage_max_uv,
+ bi->constant_charge_current_max_ua);
} else if ((di->chg_info.charger_type & AC_CHG) &&
!(di->ac_chg->external)) {
return di->ac_chg->ops.check_enable(di->ac_chg,
- di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
- di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
+ bi->constant_charge_voltage_max_uv,
+ bi->constant_charge_current_max_ua);
}
return 0;
}
@@ -537,14 +546,14 @@ static int ab8500_chargalg_kick_watchdog(struct ab8500_chargalg *di)
* ab8500_chargalg_ac_en() - Turn on/off the AC charger
* @di: pointer to the ab8500_chargalg structure
* @enable: charger on/off
- * @vset: requested charger output voltage
- * @iset: requested charger output current
+ * @vset_uv: requested charger output voltage in microvolt
+ * @iset_ua: requested charger output current in microampere
*
* The AC charger will be turned on/off with the requested charge voltage and
* current
*/
static int ab8500_chargalg_ac_en(struct ab8500_chargalg *di, int enable,
- int vset, int iset)
+ int vset_uv, int iset_ua)
{
static int ab8500_chargalg_ex_ac_enable_toggle;
@@ -552,13 +561,13 @@ static int ab8500_chargalg_ac_en(struct ab8500_chargalg *di, int enable,
return -ENXIO;
/* Select maximum of what both the charger and the battery supports */
- if (di->ac_chg->max_out_volt)
- vset = min(vset, di->ac_chg->max_out_volt);
- if (di->ac_chg->max_out_curr)
- iset = min(iset, di->ac_chg->max_out_curr);
+ if (di->ac_chg->max_out_volt_uv)
+ vset_uv = min(vset_uv, di->ac_chg->max_out_volt_uv);
+ if (di->ac_chg->max_out_curr_ua)
+ iset_ua = min(iset_ua, di->ac_chg->max_out_curr_ua);
- di->chg_info.ac_iset = iset;
- di->chg_info.ac_vset = vset;
+ di->chg_info.ac_iset_ua = iset_ua;
+ di->chg_info.ac_vset_uv = vset_uv;
/* Enable external charger */
if (enable && di->ac_chg->external &&
@@ -568,47 +577,47 @@ static int ab8500_chargalg_ac_en(struct ab8500_chargalg *di, int enable,
ab8500_chargalg_ex_ac_enable_toggle++;
}
- return di->ac_chg->ops.enable(di->ac_chg, enable, vset, iset);
+ return di->ac_chg->ops.enable(di->ac_chg, enable, vset_uv, iset_ua);
}
/**
* ab8500_chargalg_usb_en() - Turn on/off the USB charger
* @di: pointer to the ab8500_chargalg structure
* @enable: charger on/off
- * @vset: requested charger output voltage
- * @iset: requested charger output current
+ * @vset_uv: requested charger output voltage in microvolt
+ * @iset_ua: requested charger output current in microampere
*
* The USB charger will be turned on/off with the requested charge voltage and
* current
*/
static int ab8500_chargalg_usb_en(struct ab8500_chargalg *di, int enable,
- int vset, int iset)
+ int vset_uv, int iset_ua)
{
if (!di->usb_chg || !di->usb_chg->ops.enable)
return -ENXIO;
/* Select maximum of what both the charger and the battery supports */
- if (di->usb_chg->max_out_volt)
- vset = min(vset, di->usb_chg->max_out_volt);
- if (di->usb_chg->max_out_curr)
- iset = min(iset, di->usb_chg->max_out_curr);
+ if (di->usb_chg->max_out_volt_uv)
+ vset_uv = min(vset_uv, di->usb_chg->max_out_volt_uv);
+ if (di->usb_chg->max_out_curr_ua)
+ iset_ua = min(iset_ua, di->usb_chg->max_out_curr_ua);
- di->chg_info.usb_iset = iset;
- di->chg_info.usb_vset = vset;
+ di->chg_info.usb_iset_ua = iset_ua;
+ di->chg_info.usb_vset_uv = vset_uv;
- return di->usb_chg->ops.enable(di->usb_chg, enable, vset, iset);
+ return di->usb_chg->ops.enable(di->usb_chg, enable, vset_uv, iset_ua);
}
/**
* ab8500_chargalg_update_chg_curr() - Update charger current
* @di: pointer to the ab8500_chargalg structure
- * @iset: requested charger output current
+ * @iset_ua: requested charger output current in microampere
*
* The charger output current will be updated for the charger
* that is currently in use
*/
static int ab8500_chargalg_update_chg_curr(struct ab8500_chargalg *di,
- int iset)
+ int iset_ua)
{
/* Check if charger exists and update current if charging */
if (di->ac_chg && di->ac_chg->ops.update_curr &&
@@ -617,24 +626,24 @@ static int ab8500_chargalg_update_chg_curr(struct ab8500_chargalg *di,
* Select maximum of what both the charger
* and the battery supports
*/
- if (di->ac_chg->max_out_curr)
- iset = min(iset, di->ac_chg->max_out_curr);
+ if (di->ac_chg->max_out_curr_ua)
+ iset_ua = min(iset_ua, di->ac_chg->max_out_curr_ua);
- di->chg_info.ac_iset = iset;
+ di->chg_info.ac_iset_ua = iset_ua;
- return di->ac_chg->ops.update_curr(di->ac_chg, iset);
+ return di->ac_chg->ops.update_curr(di->ac_chg, iset_ua);
} else if (di->usb_chg && di->usb_chg->ops.update_curr &&
di->chg_info.charger_type & USB_CHG) {
/*
* Select maximum of what both the charger
* and the battery supports
*/
- if (di->usb_chg->max_out_curr)
- iset = min(iset, di->usb_chg->max_out_curr);
+ if (di->usb_chg->max_out_curr_ua)
+ iset_ua = min(iset_ua, di->usb_chg->max_out_curr_ua);
- di->chg_info.usb_iset = iset;
+ di->chg_info.usb_iset_ua = iset_ua;
- return di->usb_chg->ops.update_curr(di->usb_chg, iset);
+ return di->usb_chg->ops.update_curr(di->usb_chg, iset_ua);
}
return -ENXIO;
@@ -683,28 +692,28 @@ static void ab8500_chargalg_hold_charging(struct ab8500_chargalg *di)
/**
* ab8500_chargalg_start_charging() - Start the charger
* @di: pointer to the ab8500_chargalg structure
- * @vset: requested charger output voltage
- * @iset: requested charger output current
+ * @vset_uv: requested charger output voltage in microvolt
+ * @iset_ua: requested charger output current in microampere
*
* A charger will be enabled depending on the requested charger type that was
* detected previously.
*/
static void ab8500_chargalg_start_charging(struct ab8500_chargalg *di,
- int vset, int iset)
+ int vset_uv, int iset_ua)
{
switch (di->chg_info.charger_type) {
case AC_CHG:
dev_dbg(di->dev,
- "AC parameters: Vset %d, Ich %d\n", vset, iset);
+ "AC parameters: Vset %d, Ich %d\n", vset_uv, iset_ua);
ab8500_chargalg_usb_en(di, false, 0, 0);
- ab8500_chargalg_ac_en(di, true, vset, iset);
+ ab8500_chargalg_ac_en(di, true, vset_uv, iset_ua);
break;
case USB_CHG:
dev_dbg(di->dev,
- "USB parameters: Vset %d, Ich %d\n", vset, iset);
+ "USB parameters: Vset %d, Ich %d\n", vset_uv, iset_ua);
ab8500_chargalg_ac_en(di, false, 0, 0);
- ab8500_chargalg_usb_en(di, true, vset, iset);
+ ab8500_chargalg_usb_en(di, true, vset_uv, iset_ua);
break;
default:
@@ -722,27 +731,29 @@ static void ab8500_chargalg_start_charging(struct ab8500_chargalg *di,
*/
static void ab8500_chargalg_check_temp(struct ab8500_chargalg *di)
{
- if (di->batt_data.temp > (di->bm->temp_low + di->t_hyst_norm) &&
- di->batt_data.temp < (di->bm->temp_high - di->t_hyst_norm)) {
+ struct power_supply_battery_info *bi = di->bm->bi;
+
+ if (di->batt_data.temp > (bi->temp_alert_min + di->t_hyst_norm) &&
+ di->batt_data.temp < (bi->temp_alert_max - di->t_hyst_norm)) {
/* Temp OK! */
di->events.btemp_underover = false;
di->events.btemp_lowhigh = false;
di->t_hyst_norm = 0;
di->t_hyst_lowhigh = 0;
} else {
- if (((di->batt_data.temp >= di->bm->temp_high) &&
+ if (((di->batt_data.temp >= bi->temp_alert_max) &&
(di->batt_data.temp <
- (di->bm->temp_over - di->t_hyst_lowhigh))) ||
+ (bi->temp_max - di->t_hyst_lowhigh))) ||
((di->batt_data.temp >
- (di->bm->temp_under + di->t_hyst_lowhigh)) &&
- (di->batt_data.temp <= di->bm->temp_low))) {
+ (bi->temp_min + di->t_hyst_lowhigh)) &&
+ (di->batt_data.temp <= bi->temp_alert_min))) {
/* TEMP minor!!!!! */
di->events.btemp_underover = false;
di->events.btemp_lowhigh = true;
di->t_hyst_norm = di->bm->temp_hysteresis;
di->t_hyst_lowhigh = 0;
- } else if (di->batt_data.temp <= di->bm->temp_under ||
- di->batt_data.temp >= di->bm->temp_over) {
+ } else if (di->batt_data.temp <= bi->temp_min ||
+ di->batt_data.temp >= bi->temp_max) {
/* TEMP major!!!!! */
di->events.btemp_underover = true;
di->events.btemp_lowhigh = false;
@@ -766,12 +777,12 @@ static void ab8500_chargalg_check_temp(struct ab8500_chargalg *di)
*/
static void ab8500_chargalg_check_charger_voltage(struct ab8500_chargalg *di)
{
- if (di->chg_info.usb_volt > di->bm->chg_params->usb_volt_max)
+ if (di->chg_info.usb_volt_uv > di->bm->chg_params->usb_volt_max_uv)
di->chg_info.usb_chg_ok = false;
else
di->chg_info.usb_chg_ok = true;
- if (di->chg_info.ac_volt > di->bm->chg_params->ac_volt_max)
+ if (di->chg_info.ac_volt_uv > di->bm->chg_params->ac_volt_max_uv)
di->chg_info.ac_chg_ok = false;
else
di->chg_info.ac_chg_ok = true;
@@ -790,12 +801,12 @@ static void ab8500_chargalg_end_of_charge(struct ab8500_chargalg *di)
{
if (di->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
di->charge_state == STATE_NORMAL &&
- !di->maintenance_chg && (di->batt_data.volt >=
- di->bm->bat_type[di->bm->batt_id].termination_vol ||
+ !di->maintenance_chg && (di->batt_data.volt_uv >=
+ di->bm->bi->overvoltage_limit_uv ||
di->events.usb_cv_active || di->events.ac_cv_active) &&
- di->batt_data.avg_curr <
- di->bm->bat_type[di->bm->batt_id].termination_curr &&
- di->batt_data.avg_curr > 0) {
+ di->batt_data.avg_curr_ua <
+ di->bm->bi->charge_term_current_ua &&
+ di->batt_data.avg_curr_ua > 0) {
if (++di->eoc_cnt >= EOC_COND_CNT) {
di->eoc_cnt = 0;
di->charge_status = POWER_SUPPLY_STATUS_FULL;
@@ -816,12 +827,12 @@ static void ab8500_chargalg_end_of_charge(struct ab8500_chargalg *di)
static void init_maxim_chg_curr(struct ab8500_chargalg *di)
{
- di->ccm.original_iset =
- di->bm->bat_type[di->bm->batt_id].normal_cur_lvl;
- di->ccm.current_iset =
- di->bm->bat_type[di->bm->batt_id].normal_cur_lvl;
- di->ccm.test_delta_i = di->bm->maxi->charger_curr_step;
- di->ccm.max_current = di->bm->maxi->chg_curr;
+ struct power_supply_battery_info *bi = di->bm->bi;
+
+ di->ccm.original_iset_ua = bi->constant_charge_current_max_ua;
+ di->ccm.current_iset_ua = bi->constant_charge_current_max_ua;
+ di->ccm.test_delta_i_ua = di->bm->maxi->charger_curr_step_ua;
+ di->ccm.max_current_ua = di->bm->maxi->chg_curr_ua;
di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
di->ccm.level = 0;
}
@@ -837,12 +848,12 @@ static void init_maxim_chg_curr(struct ab8500_chargalg *di)
*/
static enum maxim_ret ab8500_chargalg_chg_curr_maxim(struct ab8500_chargalg *di)
{
- int delta_i;
+ int delta_i_ua;
if (!di->bm->maxi->ena_maxi)
return MAXIM_RET_NOACTION;
- delta_i = di->ccm.original_iset - di->batt_data.inst_curr;
+ delta_i_ua = di->ccm.original_iset_ua - di->batt_data.inst_curr_ua;
if (di->events.vbus_collapsed) {
dev_dbg(di->dev, "Charger voltage has collapsed %d\n",
@@ -851,9 +862,9 @@ static enum maxim_ret ab8500_chargalg_chg_curr_maxim(struct ab8500_chargalg *di)
dev_dbg(di->dev, "lowering current\n");
di->ccm.wait_cnt++;
di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
- di->ccm.max_current =
- di->ccm.current_iset - di->ccm.test_delta_i;
- di->ccm.current_iset = di->ccm.max_current;
+ di->ccm.max_current_ua =
+ di->ccm.current_iset_ua - di->ccm.test_delta_i_ua;
+ di->ccm.current_iset_ua = di->ccm.max_current_ua;
di->ccm.level--;
return MAXIM_RET_CHANGE;
} else {
@@ -866,36 +877,36 @@ static enum maxim_ret ab8500_chargalg_chg_curr_maxim(struct ab8500_chargalg *di)
di->ccm.wait_cnt = 0;
- if (di->batt_data.inst_curr > di->ccm.original_iset) {
- dev_dbg(di->dev, " Maximization Ibat (%dmA) too high"
- " (limit %dmA) (current iset: %dmA)!\n",
- di->batt_data.inst_curr, di->ccm.original_iset,
- di->ccm.current_iset);
+ if (di->batt_data.inst_curr_ua > di->ccm.original_iset_ua) {
+ dev_dbg(di->dev, " Maximization Ibat (%duA) too high"
+ " (limit %duA) (current iset: %duA)!\n",
+ di->batt_data.inst_curr_ua, di->ccm.original_iset_ua,
+ di->ccm.current_iset_ua);
- if (di->ccm.current_iset == di->ccm.original_iset)
+ if (di->ccm.current_iset_ua == di->ccm.original_iset_ua)
return MAXIM_RET_NOACTION;
di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
- di->ccm.current_iset = di->ccm.original_iset;
+ di->ccm.current_iset_ua = di->ccm.original_iset_ua;
di->ccm.level = 0;
return MAXIM_RET_IBAT_TOO_HIGH;
}
- if (delta_i > di->ccm.test_delta_i &&
- (di->ccm.current_iset + di->ccm.test_delta_i) <
- di->ccm.max_current) {
+ if (delta_i_ua > di->ccm.test_delta_i_ua &&
+ (di->ccm.current_iset_ua + di->ccm.test_delta_i_ua) <
+ di->ccm.max_current_ua) {
if (di->ccm.condition_cnt-- == 0) {
/* Increse the iset with cco.test_delta_i */
di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
- di->ccm.current_iset += di->ccm.test_delta_i;
+ di->ccm.current_iset_ua += di->ccm.test_delta_i_ua;
di->ccm.level++;
dev_dbg(di->dev, " Maximization needed, increase"
- " with %d mA to %dmA (Optimal ibat: %d)"
+ " with %d uA to %duA (Optimal ibat: %d uA)"
" Level %d\n",
- di->ccm.test_delta_i,
- di->ccm.current_iset,
- di->ccm.original_iset,
+ di->ccm.test_delta_i_ua,
+ di->ccm.current_iset_ua,
+ di->ccm.original_iset_ua,
di->ccm.level);
return MAXIM_RET_CHANGE;
} else {
@@ -909,6 +920,7 @@ static enum maxim_ret ab8500_chargalg_chg_curr_maxim(struct ab8500_chargalg *di)
static void handle_maxim_chg_curr(struct ab8500_chargalg *di)
{
+ struct power_supply_battery_info *bi = di->bm->bi;
enum maxim_ret ret;
int result;
@@ -916,13 +928,13 @@ static void handle_maxim_chg_curr(struct ab8500_chargalg *di)
switch (ret) {
case MAXIM_RET_CHANGE:
result = ab8500_chargalg_update_chg_curr(di,
- di->ccm.current_iset);
+ di->ccm.current_iset_ua);
if (result)
dev_err(di->dev, "failed to set chg curr\n");
break;
case MAXIM_RET_IBAT_TOO_HIGH:
result = ab8500_chargalg_update_chg_curr(di,
- di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
+ bi->constant_charge_current_max_ua);
if (result)
dev_err(di->dev, "failed to set chg curr\n");
break;
@@ -1158,13 +1170,13 @@ static int ab8500_chargalg_get_ext_psy_data(struct device *dev, void *data)
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
switch (ext->desc->type) {
case POWER_SUPPLY_TYPE_BATTERY:
- di->batt_data.volt = ret.intval / 1000;
+ di->batt_data.volt_uv = ret.intval;
break;
case POWER_SUPPLY_TYPE_MAINS:
- di->chg_info.ac_volt = ret.intval / 1000;
+ di->chg_info.ac_volt_uv = ret.intval;
break;
case POWER_SUPPLY_TYPE_USB:
- di->chg_info.usb_volt = ret.intval / 1000;
+ di->chg_info.usb_volt_uv = ret.intval;
break;
default:
break;
@@ -1217,15 +1229,13 @@ static int ab8500_chargalg_get_ext_psy_data(struct device *dev, void *data)
case POWER_SUPPLY_PROP_CURRENT_NOW:
switch (ext->desc->type) {
case POWER_SUPPLY_TYPE_MAINS:
- di->chg_info.ac_curr =
- ret.intval / 1000;
- break;
+ di->chg_info.ac_curr_ua = ret.intval;
+ break;
case POWER_SUPPLY_TYPE_USB:
- di->chg_info.usb_curr =
- ret.intval / 1000;
+ di->chg_info.usb_curr_ua = ret.intval;
break;
case POWER_SUPPLY_TYPE_BATTERY:
- di->batt_data.inst_curr = ret.intval / 1000;
+ di->batt_data.inst_curr_ua = ret.intval;
break;
default:
break;
@@ -1235,7 +1245,7 @@ static int ab8500_chargalg_get_ext_psy_data(struct device *dev, void *data)
case POWER_SUPPLY_PROP_CURRENT_AVG:
switch (ext->desc->type) {
case POWER_SUPPLY_TYPE_BATTERY:
- di->batt_data.avg_curr = ret.intval / 1000;
+ di->batt_data.avg_curr_ua = ret.intval;
break;
case POWER_SUPPLY_TYPE_USB:
if (ret.intval)
@@ -1289,9 +1299,10 @@ static void ab8500_chargalg_external_power_changed(struct power_supply *psy)
*/
static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
{
+ struct power_supply_battery_info *bi = di->bm->bi;
int charger_status;
int ret;
- int curr_step_lvl;
+ int curr_step_lvl_ua;
/* Collect data from all power_supply class devices */
class_for_each_device(power_supply_class, NULL,
@@ -1395,9 +1406,9 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
"State %s Active_chg %d Chg_status %d AC %d USB %d "
"AC_online %d USB_online %d AC_CV %d USB_CV %d AC_I %d "
"USB_I %d AC_Vset %d AC_Iset %d USB_Vset %d USB_Iset %d\n",
- di->batt_data.volt,
- di->batt_data.avg_curr,
- di->batt_data.inst_curr,
+ di->batt_data.volt_uv,
+ di->batt_data.avg_curr_ua,
+ di->batt_data.inst_curr_ua,
di->batt_data.temp,
di->batt_data.percent,
di->maintenance_chg,
@@ -1410,12 +1421,12 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
di->chg_info.online_chg & USB_CHG,
di->events.ac_cv_active,
di->events.usb_cv_active,
- di->chg_info.ac_curr,
- di->chg_info.usb_curr,
- di->chg_info.ac_vset,
- di->chg_info.ac_iset,
- di->chg_info.usb_vset,
- di->chg_info.usb_iset);
+ di->chg_info.ac_curr_ua,
+ di->chg_info.usb_curr_ua,
+ di->chg_info.ac_vset_uv,
+ di->chg_info.ac_iset_ua,
+ di->chg_info.usb_vset_uv,
+ di->chg_info.usb_iset_ua);
switch (di->charge_state) {
case STATE_HANDHELD_INIT:
@@ -1500,16 +1511,15 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
break;
case STATE_NORMAL_INIT:
- if (di->curr_status.curr_step == CHARGALG_CURR_STEP_LOW)
+ if (di->curr_status.curr_step_ua == CHARGALG_CURR_STEP_LOW_UA)
ab8500_chargalg_stop_charging(di);
else {
- curr_step_lvl = di->bm->bat_type[
- di->bm->batt_id].normal_cur_lvl
- * di->curr_status.curr_step
- / CHARGALG_CURR_STEP_HIGH;
+ curr_step_lvl_ua = bi->constant_charge_current_max_ua
+ * di->curr_status.curr_step_ua
+ / CHARGALG_CURR_STEP_HIGH_UA;
ab8500_chargalg_start_charging(di,
- di->bm->bat_type[di->bm->batt_id]
- .normal_vol_lvl, curr_step_lvl);
+ bi->constant_charge_voltage_max_uv,
+ curr_step_lvl_ua);
}
ab8500_chargalg_state_to(di, STATE_NORMAL);
@@ -1543,21 +1553,17 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
fallthrough;
case STATE_WAIT_FOR_RECHARGE:
- if (di->batt_data.percent <=
- di->bm->bat_type[di->bm->batt_id].recharge_cap)
+ if (di->batt_data.percent <= AB8500_RECHARGE_CAP)
ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
break;
case STATE_MAINTENANCE_A_INIT:
ab8500_chargalg_stop_safety_timer(di);
ab8500_chargalg_start_maintenance_timer(di,
- di->bm->bat_type[
- di->bm->batt_id].maint_a_chg_timer_h);
+ di->bm->bat_type->maint_a_chg_timer_h);
ab8500_chargalg_start_charging(di,
- di->bm->bat_type[
- di->bm->batt_id].maint_a_vol_lvl,
- di->bm->bat_type[
- di->bm->batt_id].maint_a_cur_lvl);
+ di->bm->bat_type->maint_a_vol_lvl,
+ di->bm->bat_type->maint_a_cur_lvl);
ab8500_chargalg_state_to(di, STATE_MAINTENANCE_A);
power_supply_changed(di->chargalg_psy);
fallthrough;
@@ -1571,13 +1577,10 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
case STATE_MAINTENANCE_B_INIT:
ab8500_chargalg_start_maintenance_timer(di,
- di->bm->bat_type[
- di->bm->batt_id].maint_b_chg_timer_h);
+ di->bm->bat_type->maint_b_chg_timer_h);
ab8500_chargalg_start_charging(di,
- di->bm->bat_type[
- di->bm->batt_id].maint_b_vol_lvl,
- di->bm->bat_type[
- di->bm->batt_id].maint_b_cur_lvl);
+ di->bm->bat_type->maint_b_vol_lvl,
+ di->bm->bat_type->maint_b_cur_lvl);
ab8500_chargalg_state_to(di, STATE_MAINTENANCE_B);
power_supply_changed(di->chargalg_psy);
fallthrough;
@@ -1591,10 +1594,8 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
case STATE_TEMP_LOWHIGH_INIT:
ab8500_chargalg_start_charging(di,
- di->bm->bat_type[
- di->bm->batt_id].low_high_vol_lvl,
- di->bm->bat_type[
- di->bm->batt_id].low_high_cur_lvl);
+ di->bm->bat_type->low_high_vol_lvl,
+ di->bm->bat_type->low_high_cur_lvl);
ab8500_chargalg_stop_maintenance_timer(di);
di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
ab8500_chargalg_state_to(di, STATE_TEMP_LOWHIGH);
@@ -1722,7 +1723,7 @@ static int ab8500_chargalg_get_property(struct power_supply *psy,
if (di->events.batt_ovv) {
val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
} else if (di->events.btemp_underover) {
- if (di->batt_data.temp <= di->bm->temp_under)
+ if (di->batt_data.temp <= di->bm->bi->temp_min)
val->intval = POWER_SUPPLY_HEALTH_COLD;
else
val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
@@ -1744,7 +1745,7 @@ static int ab8500_chargalg_get_property(struct power_supply *psy,
static ssize_t ab8500_chargalg_curr_step_show(struct ab8500_chargalg *di,
char *buf)
{
- return sprintf(buf, "%d\n", di->curr_status.curr_step);
+ return sprintf(buf, "%d\n", di->curr_status.curr_step_ua);
}
static ssize_t ab8500_chargalg_curr_step_store(struct ab8500_chargalg *di,
@@ -1757,9 +1758,9 @@ static ssize_t ab8500_chargalg_curr_step_store(struct ab8500_chargalg *di,
if (ret < 0)
return ret;
- di->curr_status.curr_step = param;
- if (di->curr_status.curr_step >= CHARGALG_CURR_STEP_LOW &&
- di->curr_status.curr_step <= CHARGALG_CURR_STEP_HIGH) {
+ di->curr_status.curr_step_ua = param;
+ if (di->curr_status.curr_step_ua >= CHARGALG_CURR_STEP_LOW_UA &&
+ di->curr_status.curr_step_ua <= CHARGALG_CURR_STEP_HIGH_UA) {
di->curr_status.curr_step_change = true;
queue_work(di->chargalg_wq, &di->chargalg_work);
} else
@@ -2056,7 +2057,7 @@ static int ab8500_chargalg_probe(struct platform_device *pdev)
dev_err(di->dev, "failed to create sysfs entry\n");
return ret;
}
- di->curr_status.curr_step = CHARGALG_CURR_STEP_HIGH;
+ di->curr_status.curr_step_ua = CHARGALG_CURR_STEP_HIGH_UA;
dev_info(di->dev, "probe success\n");
return component_add(dev, &ab8500_chargalg_component_ops);
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index 15eadaf46f14..ce074c018dcb 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -145,23 +145,23 @@ enum ab8500_usb_state {
AB8500_BM_USB_STATE_MAX,
};
-/* VBUS input current limits supported in AB8500 in mA */
-#define USB_CH_IP_CUR_LVL_0P05 50
-#define USB_CH_IP_CUR_LVL_0P09 98
-#define USB_CH_IP_CUR_LVL_0P19 193
-#define USB_CH_IP_CUR_LVL_0P29 290
-#define USB_CH_IP_CUR_LVL_0P38 380
-#define USB_CH_IP_CUR_LVL_0P45 450
-#define USB_CH_IP_CUR_LVL_0P5 500
-#define USB_CH_IP_CUR_LVL_0P6 600
-#define USB_CH_IP_CUR_LVL_0P7 700
-#define USB_CH_IP_CUR_LVL_0P8 800
-#define USB_CH_IP_CUR_LVL_0P9 900
-#define USB_CH_IP_CUR_LVL_1P0 1000
-#define USB_CH_IP_CUR_LVL_1P1 1100
-#define USB_CH_IP_CUR_LVL_1P3 1300
-#define USB_CH_IP_CUR_LVL_1P4 1400
-#define USB_CH_IP_CUR_LVL_1P5 1500
+/* VBUS input current limits supported in AB8500 in uA */
+#define USB_CH_IP_CUR_LVL_0P05 50000
+#define USB_CH_IP_CUR_LVL_0P09 98000
+#define USB_CH_IP_CUR_LVL_0P19 193000
+#define USB_CH_IP_CUR_LVL_0P29 290000
+#define USB_CH_IP_CUR_LVL_0P38 380000
+#define USB_CH_IP_CUR_LVL_0P45 450000
+#define USB_CH_IP_CUR_LVL_0P5 500000
+#define USB_CH_IP_CUR_LVL_0P6 600000
+#define USB_CH_IP_CUR_LVL_0P7 700000
+#define USB_CH_IP_CUR_LVL_0P8 800000
+#define USB_CH_IP_CUR_LVL_0P9 900000
+#define USB_CH_IP_CUR_LVL_1P0 1000000
+#define USB_CH_IP_CUR_LVL_1P1 1100000
+#define USB_CH_IP_CUR_LVL_1P3 1300000
+#define USB_CH_IP_CUR_LVL_1P4 1400000
+#define USB_CH_IP_CUR_LVL_1P5 1500000
#define VBAT_TRESH_IP_CUR_RED 3800
@@ -183,10 +183,10 @@ struct ab8500_charger_interrupts {
struct ab8500_charger_info {
int charger_connected;
int charger_online;
- int charger_voltage;
+ int charger_voltage_uv;
int cv_active;
bool wd_expired;
- int charger_current;
+ int charger_current_ua;
};
struct ab8500_charger_event_flags {
@@ -201,17 +201,17 @@ struct ab8500_charger_event_flags {
};
struct ab8500_charger_usb_state {
- int usb_current;
- int usb_current_tmp;
+ int usb_current_ua;
+ int usb_current_tmp_ua;
enum ab8500_usb_state state;
enum ab8500_usb_state state_tmp;
spinlock_t usb_lock;
};
struct ab8500_charger_max_usb_in_curr {
- int usb_type_max;
- int set_max;
- int calculated_max;
+ int usb_type_max_ua;
+ int set_max_ua;
+ int calculated_max_ua;
};
/**
@@ -479,7 +479,7 @@ static void ab8500_charger_set_usb_connected(struct ab8500_charger *di,
* ab8500_charger_get_ac_voltage() - get ac charger voltage
* @di: pointer to the ab8500_charger structure
*
- * Returns ac charger voltage (on success)
+ * Returns ac charger voltage in microvolt (on success)
*/
static int ab8500_charger_get_ac_voltage(struct ab8500_charger *di)
{
@@ -493,7 +493,8 @@ static int ab8500_charger_get_ac_voltage(struct ab8500_charger *di)
} else {
vch = 0;
}
- return vch;
+ /* Convert to microvolt, IIO returns millivolt */
+ return vch * 1000;
}
/**
@@ -530,7 +531,7 @@ static int ab8500_charger_ac_cv(struct ab8500_charger *di)
* @di: pointer to the ab8500_charger structure
*
* This function returns the vbus voltage.
- * Returns vbus voltage (on success)
+ * Returns vbus voltage in microvolt (on success)
*/
static int ab8500_charger_get_vbus_voltage(struct ab8500_charger *di)
{
@@ -544,7 +545,8 @@ static int ab8500_charger_get_vbus_voltage(struct ab8500_charger *di)
} else {
vch = 0;
}
- return vch;
+ /* Convert to microvolt, IIO returns millivolt */
+ return vch * 1000;
}
/**
@@ -552,7 +554,7 @@ static int ab8500_charger_get_vbus_voltage(struct ab8500_charger *di)
* @di: pointer to the ab8500_charger structure
*
* This function returns the usb charger current.
- * Returns usb current (on success) and error code on failure
+ * Returns usb current in microamperes (on success) and error code on failure
*/
static int ab8500_charger_get_usb_current(struct ab8500_charger *di)
{
@@ -566,7 +568,8 @@ static int ab8500_charger_get_usb_current(struct ab8500_charger *di)
} else {
ich = 0;
}
- return ich;
+ /* Return microamperes */
+ return ich * 1000;
}
/**
@@ -574,7 +577,7 @@ static int ab8500_charger_get_usb_current(struct ab8500_charger *di)
* @di: pointer to the ab8500_charger structure
*
* This function returns the ac charger current.
- * Returns ac current (on success) and error code on failure.
+ * Returns ac current in microamperes (on success) and error code on failure.
*/
static int ab8500_charger_get_ac_current(struct ab8500_charger *di)
{
@@ -588,7 +591,8 @@ static int ab8500_charger_get_ac_current(struct ab8500_charger *di)
} else {
ich = 0;
}
- return ich;
+ /* Return microamperes */
+ return ich * 1000;
}
/**
@@ -711,19 +715,19 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
case USB_STAT_STD_HOST_C_S:
dev_dbg(di->dev, "USB Type - Standard host is "
"detected through USB driver\n");
- di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
+ di->max_usb_in_curr.usb_type_max_ua = USB_CH_IP_CUR_LVL_0P5;
di->is_aca_rid = 0;
break;
case USB_STAT_HOST_CHG_HS_CHIRP:
- di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
+ di->max_usb_in_curr.usb_type_max_ua = USB_CH_IP_CUR_LVL_0P5;
di->is_aca_rid = 0;
break;
case USB_STAT_HOST_CHG_HS:
- di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
+ di->max_usb_in_curr.usb_type_max_ua = USB_CH_IP_CUR_LVL_0P5;
di->is_aca_rid = 0;
break;
case USB_STAT_ACA_RID_C_HS:
- di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P9;
+ di->max_usb_in_curr.usb_type_max_ua = USB_CH_IP_CUR_LVL_0P9;
di->is_aca_rid = 0;
break;
case USB_STAT_ACA_RID_A:
@@ -732,7 +736,7 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
* can consume (900mA). Closest level is 500mA
*/
dev_dbg(di->dev, "USB_STAT_ACA_RID_A detected\n");
- di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
+ di->max_usb_in_curr.usb_type_max_ua = USB_CH_IP_CUR_LVL_0P5;
di->is_aca_rid = 1;
break;
case USB_STAT_ACA_RID_B:
@@ -740,36 +744,36 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
* Dedicated charger level minus 120mA (20mA for ACA and
* 100mA for potential accessory). Closest level is 1300mA
*/
- di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_1P3;
+ di->max_usb_in_curr.usb_type_max_ua = USB_CH_IP_CUR_LVL_1P3;
dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d", link_status,
- di->max_usb_in_curr.usb_type_max);
+ di->max_usb_in_curr.usb_type_max_ua);
di->is_aca_rid = 1;
break;
case USB_STAT_HOST_CHG_NM:
- di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
+ di->max_usb_in_curr.usb_type_max_ua = USB_CH_IP_CUR_LVL_0P5;
di->is_aca_rid = 0;
break;
case USB_STAT_DEDICATED_CHG:
- di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_1P5;
+ di->max_usb_in_curr.usb_type_max_ua = USB_CH_IP_CUR_LVL_1P5;
di->is_aca_rid = 0;
break;
case USB_STAT_ACA_RID_C_HS_CHIRP:
case USB_STAT_ACA_RID_C_NM:
- di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_1P5;
+ di->max_usb_in_curr.usb_type_max_ua = USB_CH_IP_CUR_LVL_1P5;
di->is_aca_rid = 1;
break;
case USB_STAT_NOT_CONFIGURED:
if (di->vbus_detected) {
di->usb_device_is_unrecognised = true;
dev_dbg(di->dev, "USB Type - Legacy charger.\n");
- di->max_usb_in_curr.usb_type_max =
+ di->max_usb_in_curr.usb_type_max_ua =
USB_CH_IP_CUR_LVL_1P5;
break;
}
fallthrough;
case USB_STAT_HM_IDGND:
dev_err(di->dev, "USB Type - Charging not allowed\n");
- di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05;
+ di->max_usb_in_curr.usb_type_max_ua = USB_CH_IP_CUR_LVL_0P05;
ret = -ENXIO;
break;
case USB_STAT_RESERVED:
@@ -781,11 +785,11 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
break;
} else {
dev_dbg(di->dev, "USB Type - Charging not allowed\n");
- di->max_usb_in_curr.usb_type_max =
+ di->max_usb_in_curr.usb_type_max_ua =
USB_CH_IP_CUR_LVL_0P05;
dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d",
link_status,
- di->max_usb_in_curr.usb_type_max);
+ di->max_usb_in_curr.usb_type_max_ua);
ret = -ENXIO;
break;
}
@@ -793,25 +797,25 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
case USB_STAT_CARKIT_2:
case USB_STAT_ACA_DOCK_CHARGER:
case USB_STAT_CHARGER_LINE_1:
- di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
+ di->max_usb_in_curr.usb_type_max_ua = USB_CH_IP_CUR_LVL_0P5;
dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d", link_status,
- di->max_usb_in_curr.usb_type_max);
+ di->max_usb_in_curr.usb_type_max_ua);
break;
case USB_STAT_NOT_VALID_LINK:
dev_err(di->dev, "USB Type invalid - try charging anyway\n");
- di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
+ di->max_usb_in_curr.usb_type_max_ua = USB_CH_IP_CUR_LVL_0P5;
break;
default:
dev_err(di->dev, "USB Type - Unknown\n");
- di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05;
+ di->max_usb_in_curr.usb_type_max_ua = USB_CH_IP_CUR_LVL_0P05;
ret = -ENXIO;
break;
}
- di->max_usb_in_curr.set_max = di->max_usb_in_curr.usb_type_max;
+ di->max_usb_in_curr.set_max_ua = di->max_usb_in_curr.usb_type_max_ua;
dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d",
- link_status, di->max_usb_in_curr.set_max);
+ link_status, di->max_usb_in_curr.set_max_ua);
return ret;
}
@@ -921,145 +925,157 @@ static int ab8500_charger_detect_usb_type(struct ab8500_charger *di)
/*
* This array maps the raw hex value to charger voltage used by the AB8500
- * Values taken from the UM0836
+ * Values taken from the UM0836, in microvolt.
*/
static int ab8500_charger_voltage_map[] = {
- 3500 ,
- 3525 ,
- 3550 ,
- 3575 ,
- 3600 ,
- 3625 ,
- 3650 ,
- 3675 ,
- 3700 ,
- 3725 ,
- 3750 ,
- 3775 ,
- 3800 ,
- 3825 ,
- 3850 ,
- 3875 ,
- 3900 ,
- 3925 ,
- 3950 ,
- 3975 ,
- 4000 ,
- 4025 ,
- 4050 ,
- 4060 ,
- 4070 ,
- 4080 ,
- 4090 ,
- 4100 ,
- 4110 ,
- 4120 ,
- 4130 ,
- 4140 ,
- 4150 ,
- 4160 ,
- 4170 ,
- 4180 ,
- 4190 ,
- 4200 ,
- 4210 ,
- 4220 ,
- 4230 ,
- 4240 ,
- 4250 ,
- 4260 ,
- 4270 ,
- 4280 ,
- 4290 ,
- 4300 ,
- 4310 ,
- 4320 ,
- 4330 ,
- 4340 ,
- 4350 ,
- 4360 ,
- 4370 ,
- 4380 ,
- 4390 ,
- 4400 ,
- 4410 ,
- 4420 ,
- 4430 ,
- 4440 ,
- 4450 ,
- 4460 ,
- 4470 ,
- 4480 ,
- 4490 ,
- 4500 ,
- 4510 ,
- 4520 ,
- 4530 ,
- 4540 ,
- 4550 ,
- 4560 ,
- 4570 ,
- 4580 ,
- 4590 ,
- 4600 ,
+ 3500000,
+ 3525000,
+ 3550000,
+ 3575000,
+ 3600000,
+ 3625000,
+ 3650000,
+ 3675000,
+ 3700000,
+ 3725000,
+ 3750000,
+ 3775000,
+ 3800000,
+ 3825000,
+ 3850000,
+ 3875000,
+ 3900000,
+ 3925000,
+ 3950000,
+ 3975000,
+ 4000000,
+ 4025000,
+ 4050000,
+ 4060000,
+ 4070000,
+ 4080000,
+ 4090000,
+ 4100000,
+ 4110000,
+ 4120000,
+ 4130000,
+ 4140000,
+ 4150000,
+ 4160000,
+ 4170000,
+ 4180000,
+ 4190000,
+ 4200000,
+ 4210000,
+ 4220000,
+ 4230000,
+ 4240000,
+ 4250000,
+ 4260000,
+ 4270000,
+ 4280000,
+ 4290000,
+ 4300000,
+ 4310000,
+ 4320000,
+ 4330000,
+ 4340000,
+ 4350000,
+ 4360000,
+ 4370000,
+ 4380000,
+ 4390000,
+ 4400000,
+ 4410000,
+ 4420000,
+ 4430000,
+ 4440000,
+ 4450000,
+ 4460000,
+ 4470000,
+ 4480000,
+ 4490000,
+ 4500000,
+ 4510000,
+ 4520000,
+ 4530000,
+ 4540000,
+ 4550000,
+ 4560000,
+ 4570000,
+ 4580000,
+ 4590000,
+ 4600000,
};
-static int ab8500_voltage_to_regval(int voltage)
+static int ab8500_voltage_to_regval(int voltage_uv)
{
int i;
/* Special case for voltage below 3.5V */
- if (voltage < ab8500_charger_voltage_map[0])
+ if (voltage_uv < ab8500_charger_voltage_map[0])
return LOW_VOLT_REG;
for (i = 1; i < ARRAY_SIZE(ab8500_charger_voltage_map); i++) {
- if (voltage < ab8500_charger_voltage_map[i])
+ if (voltage_uv < ab8500_charger_voltage_map[i])
return i - 1;
}
/* If not last element, return error */
i = ARRAY_SIZE(ab8500_charger_voltage_map) - 1;
- if (voltage == ab8500_charger_voltage_map[i])
+ if (voltage_uv == ab8500_charger_voltage_map[i])
return i;
else
return -1;
}
-static int ab8500_current_to_regval(struct ab8500_charger *di, int curr)
+/* This array maps the raw register value to charger input current */
+static int ab8500_charge_input_curr_map[] = {
+ 50000, 98000, 193000, 290000, 380000, 450000, 500000, 600000,
+ 700000, 800000, 900000, 1000000, 1100000, 1300000, 1400000, 1500000,
+};
+
+/* This array maps the raw register value to charger output current */
+static int ab8500_charge_output_curr_map[] = {
+ 100000, 200000, 300000, 400000, 500000, 600000, 700000, 800000,
+ 900000, 1000000, 1100000, 1200000, 1300000, 1400000, 1500000, 1500000,
+};
+
+static int ab8500_current_to_regval(struct ab8500_charger *di, int curr_ua)
{
int i;
- if (curr < di->bm->chg_output_curr[0])
+ if (curr_ua < ab8500_charge_output_curr_map[0])
return 0;
- for (i = 0; i < di->bm->n_chg_out_curr; i++) {
- if (curr < di->bm->chg_output_curr[i])
+ for (i = 0; i < ARRAY_SIZE(ab8500_charge_output_curr_map); i++) {
+ if (curr_ua < ab8500_charge_output_curr_map[i])
return i - 1;
}
/* If not last element, return error */
- i = di->bm->n_chg_out_curr - 1;
- if (curr == di->bm->chg_output_curr[i])
+ i = ARRAY_SIZE(ab8500_charge_output_curr_map) - 1;
+ if (curr_ua == ab8500_charge_output_curr_map[i])
return i;
else
return -1;
}
-static int ab8500_vbus_in_curr_to_regval(struct ab8500_charger *di, int curr)
+static int ab8500_vbus_in_curr_to_regval(struct ab8500_charger *di, int curr_ua)
{
int i;
- if (curr < di->bm->chg_input_curr[0])
+ if (curr_ua < ab8500_charge_input_curr_map[0])
return 0;
- for (i = 0; i < di->bm->n_chg_in_curr; i++) {
- if (curr < di->bm->chg_input_curr[i])
+ for (i = 0; i < ARRAY_SIZE(ab8500_charge_input_curr_map); i++) {
+ if (curr_ua < ab8500_charge_input_curr_map[i])
return i - 1;
}
/* If not last element, return error */
- i = di->bm->n_chg_in_curr - 1;
- if (curr == di->bm->chg_input_curr[i])
+ i = ARRAY_SIZE(ab8500_charge_input_curr_map) - 1;
+ if (curr_ua == ab8500_charge_input_curr_map[i])
return i;
else
return -1;
@@ -1070,35 +1086,35 @@ static int ab8500_vbus_in_curr_to_regval(struct ab8500_charger *di, int curr)
* @di: pointer to the ab8500_charger structre
*
* The usb stack provides the maximum current that can be drawn from
- * the standard usb host. This will be in mA.
- * This function converts current in mA to a value that can be written
+ * the standard usb host. This will be in uA.
+ * This function converts current in uA to a value that can be written
* to the register. Returns -1 if charging is not allowed
*/
static int ab8500_charger_get_usb_cur(struct ab8500_charger *di)
{
int ret = 0;
- switch (di->usb_state.usb_current) {
- case 100:
- di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P09;
+ switch (di->usb_state.usb_current_ua) {
+ case 100000:
+ di->max_usb_in_curr.usb_type_max_ua = USB_CH_IP_CUR_LVL_0P09;
break;
- case 200:
- di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P19;
+ case 200000:
+ di->max_usb_in_curr.usb_type_max_ua = USB_CH_IP_CUR_LVL_0P19;
break;
- case 300:
- di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P29;
+ case 300000:
+ di->max_usb_in_curr.usb_type_max_ua = USB_CH_IP_CUR_LVL_0P29;
break;
- case 400:
- di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P38;
+ case 400000:
+ di->max_usb_in_curr.usb_type_max_ua = USB_CH_IP_CUR_LVL_0P38;
break;
- case 500:
- di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
+ case 500000:
+ di->max_usb_in_curr.usb_type_max_ua = USB_CH_IP_CUR_LVL_0P5;
break;
default:
- di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05;
+ di->max_usb_in_curr.usb_type_max_ua = USB_CH_IP_CUR_LVL_0P05;
ret = -EPERM;
break;
}
- di->max_usb_in_curr.set_max = di->max_usb_in_curr.usb_type_max;
+ di->max_usb_in_curr.set_max_ua = di->max_usb_in_curr.usb_type_max_ua;
return ret;
}
@@ -1123,7 +1139,7 @@ static bool ab8500_charger_check_continue_stepping(struct ab8500_charger *di,
/**
* ab8500_charger_set_current() - set charger current
* @di: pointer to the ab8500_charger structure
- * @ich: charger current, in mA
+ * @ich_ua: charger current, in uA
* @reg: select what charger register to set
*
* Set charger current.
@@ -1134,7 +1150,7 @@ static bool ab8500_charger_check_continue_stepping(struct ab8500_charger *di,
* Returns error code in case of failure else 0(on success)
*/
static int ab8500_charger_set_current(struct ab8500_charger *di,
- int ich, int reg)
+ int ich_ua, int reg)
{
int ret = 0;
int curr_index, prev_curr_index, shift_value, i;
@@ -1155,7 +1171,7 @@ static int ab8500_charger_set_current(struct ab8500_charger *di,
case AB8500_MCH_IPT_CURLVL_REG:
shift_value = MAIN_CH_INPUT_CURR_SHIFT;
prev_curr_index = (reg_value >> shift_value);
- curr_index = ab8500_current_to_regval(di, ich);
+ curr_index = ab8500_current_to_regval(di, ich_ua);
step_udelay = STEP_UDELAY;
if (!di->ac.charger_connected)
no_stepping = true;
@@ -1163,7 +1179,7 @@ static int ab8500_charger_set_current(struct ab8500_charger *di,
case AB8500_USBCH_IPT_CRNTLVL_REG:
shift_value = VBUS_IN_CURR_LIM_SHIFT;
prev_curr_index = (reg_value >> shift_value);
- curr_index = ab8500_vbus_in_curr_to_regval(di, ich);
+ curr_index = ab8500_vbus_in_curr_to_regval(di, ich_ua);
step_udelay = STEP_UDELAY * 100;
if (!di->usb.charger_connected)
@@ -1172,7 +1188,7 @@ static int ab8500_charger_set_current(struct ab8500_charger *di,
case AB8500_CH_OPT_CRNTLVL_REG:
shift_value = 0;
prev_curr_index = (reg_value >> shift_value);
- curr_index = ab8500_current_to_regval(di, ich);
+ curr_index = ab8500_current_to_regval(di, ich_ua);
step_udelay = STEP_UDELAY;
if (curr_index && (curr_index - prev_curr_index) > 1)
step_udelay *= 100;
@@ -1201,8 +1217,8 @@ static int ab8500_charger_set_current(struct ab8500_charger *di,
goto exit_set_current;
}
- dev_dbg(di->dev, "%s set charger current: %d mA for reg: 0x%02x\n",
- __func__, ich, reg);
+ dev_dbg(di->dev, "%s set charger current: %d uA for reg: 0x%02x\n",
+ __func__, ich_ua, reg);
if (no_stepping) {
ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
@@ -1249,31 +1265,31 @@ exit_set_current:
/**
* ab8500_charger_set_vbus_in_curr() - set VBUS input current limit
* @di: pointer to the ab8500_charger structure
- * @ich_in: charger input current limit
+ * @ich_in_ua: charger input current limit in microampere
*
* Sets the current that can be drawn from the USB host
* Returns error code in case of failure else 0(on success)
*/
static int ab8500_charger_set_vbus_in_curr(struct ab8500_charger *di,
- int ich_in)
+ int ich_in_ua)
{
int min_value;
int ret;
/* We should always use to lowest current limit */
- min_value = min(di->bm->chg_params->usb_curr_max, ich_in);
- if (di->max_usb_in_curr.set_max > 0)
- min_value = min(di->max_usb_in_curr.set_max, min_value);
+ min_value = min(di->bm->chg_params->usb_curr_max_ua, ich_in_ua);
+ if (di->max_usb_in_curr.set_max_ua > 0)
+ min_value = min(di->max_usb_in_curr.set_max_ua, min_value);
- if (di->usb_state.usb_current >= 0)
- min_value = min(di->usb_state.usb_current, min_value);
+ if (di->usb_state.usb_current_ua >= 0)
+ min_value = min(di->usb_state.usb_current_ua, min_value);
switch (min_value) {
- case 100:
+ case 100000:
if (di->vbat < VBAT_TRESH_IP_CUR_RED)
min_value = USB_CH_IP_CUR_LVL_0P05;
break;
- case 500:
+ case 500000:
if (di->vbat < VBAT_TRESH_IP_CUR_RED)
min_value = USB_CH_IP_CUR_LVL_0P45;
break;
@@ -1281,7 +1297,7 @@ static int ab8500_charger_set_vbus_in_curr(struct ab8500_charger *di,
break;
}
- dev_info(di->dev, "VBUS input current limit set to %d mA\n", min_value);
+ dev_info(di->dev, "VBUS input current limit set to %d uA\n", min_value);
mutex_lock(&di->usb_ipt_crnt_lock);
ret = ab8500_charger_set_current(di, min_value,
@@ -1294,30 +1310,30 @@ static int ab8500_charger_set_vbus_in_curr(struct ab8500_charger *di,
/**
* ab8500_charger_set_main_in_curr() - set main charger input current
* @di: pointer to the ab8500_charger structure
- * @ich_in: input charger current, in mA
+ * @ich_in_ua: input charger current, in uA
*
* Set main charger input current.
* Returns error code in case of failure else 0(on success)
*/
static int ab8500_charger_set_main_in_curr(struct ab8500_charger *di,
- int ich_in)
+ int ich_in_ua)
{
- return ab8500_charger_set_current(di, ich_in,
+ return ab8500_charger_set_current(di, ich_in_ua,
AB8500_MCH_IPT_CURLVL_REG);
}
/**
* ab8500_charger_set_output_curr() - set charger output current
* @di: pointer to the ab8500_charger structure
- * @ich_out: output charger current, in mA
+ * @ich_out_ua: output charger current, in uA
*
* Set charger output current.
* Returns error code in case of failure else 0(on success)
*/
static int ab8500_charger_set_output_curr(struct ab8500_charger *di,
- int ich_out)
+ int ich_out_ua)
{
- return ab8500_charger_set_current(di, ich_out,
+ return ab8500_charger_set_current(di, ich_out_ua,
AB8500_CH_OPT_CRNTLVL_REG);
}
@@ -1368,14 +1384,14 @@ static int ab8500_charger_led_en(struct ab8500_charger *di, int on)
* ab8500_charger_ac_en() - enable or disable ac charging
* @di: pointer to the ab8500_charger structure
* @enable: enable/disable flag
- * @vset: charging voltage
- * @iset: charging current
+ * @vset_uv: charging voltage in microvolt
+ * @iset_ua: charging current in microampere
*
* Enable/Disable AC/Mains charging and turns on/off the charging led
* respectively.
**/
static int ab8500_charger_ac_en(struct ux500_charger *charger,
- int enable, int vset, int iset)
+ int enable, int vset_uv, int iset_ua)
{
int ret;
int volt_index;
@@ -1393,7 +1409,7 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger,
}
/* Enable AC charging */
- dev_dbg(di->dev, "Enable AC: %dmV %dmA\n", vset, iset);
+ dev_dbg(di->dev, "Enable AC: %duV %duA\n", vset_uv, iset_ua);
/*
* Due to a bug in AB8500, BTEMP_HIGH/LOW interrupts
@@ -1415,10 +1431,10 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger,
}
/* Check if the requested voltage or current is valid */
- volt_index = ab8500_voltage_to_regval(vset);
- curr_index = ab8500_current_to_regval(di, iset);
+ volt_index = ab8500_voltage_to_regval(vset_uv);
+ curr_index = ab8500_current_to_regval(di, iset_ua);
input_curr_index = ab8500_current_to_regval(di,
- di->bm->chg_params->ac_curr_max);
+ di->bm->chg_params->ac_curr_max_ua);
if (volt_index < 0 || curr_index < 0 || input_curr_index < 0) {
dev_err(di->dev,
"Charger voltage or current too high, "
@@ -1435,14 +1451,14 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger,
}
/* MainChInputCurr: current that can be drawn from the charger*/
ret = ab8500_charger_set_main_in_curr(di,
- di->bm->chg_params->ac_curr_max);
+ di->bm->chg_params->ac_curr_max_ua);
if (ret) {
dev_err(di->dev, "%s Failed to set MainChInputCurr\n",
__func__);
return ret;
}
/* ChOutputCurentLevel: protected output current */
- ret = ab8500_charger_set_output_curr(di, iset);
+ ret = ab8500_charger_set_output_curr(di, iset_ua);
if (ret) {
dev_err(di->dev, "%s "
"Failed to set ChOutputCurentLevel\n",
@@ -1545,14 +1561,14 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger,
* ab8500_charger_usb_en() - enable usb charging
* @di: pointer to the ab8500_charger structure
* @enable: enable/disable flag
- * @vset: charging voltage
- * @ich_out: charger output current
+ * @vset_uv: charging voltage in microvolt
+ * @ich_out_ua: charger output current in microampere
*
* Enable/Disable USB charging and turns on/off the charging led respectively.
* Returns error code in case of failure else 0(on success)
*/
static int ab8500_charger_usb_en(struct ux500_charger *charger,
- int enable, int vset, int ich_out)
+ int enable, int vset_uv, int ich_out_ua)
{
int ret;
int volt_index;
@@ -1588,11 +1604,11 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger,
}
/* Enable USB charging */
- dev_dbg(di->dev, "Enable USB: %dmV %dmA\n", vset, ich_out);
+ dev_dbg(di->dev, "Enable USB: %d uV %d uA\n", vset_uv, ich_out_ua);
/* Check if the requested voltage or current is valid */
- volt_index = ab8500_voltage_to_regval(vset);
- curr_index = ab8500_current_to_regval(di, ich_out);
+ volt_index = ab8500_voltage_to_regval(vset_uv);
+ curr_index = ab8500_current_to_regval(di, ich_out_ua);
if (volt_index < 0 || curr_index < 0) {
dev_err(di->dev,
"Charger voltage or current too high, "
@@ -1633,14 +1649,14 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger,
/* USBChInputCurr: current that can be drawn from the usb */
ret = ab8500_charger_set_vbus_in_curr(di,
- di->max_usb_in_curr.usb_type_max);
+ di->max_usb_in_curr.usb_type_max_ua);
if (ret) {
dev_err(di->dev, "setting USBChInputCurr failed\n");
return ret;
}
/* ChOutputCurentLevel: protected output current */
- ret = ab8500_charger_set_output_curr(di, ich_out);
+ ret = ab8500_charger_set_output_curr(di, ich_out_ua);
if (ret) {
dev_err(di->dev, "%s "
"Failed to set ChOutputCurentLevel\n",
@@ -1726,14 +1742,14 @@ out:
/**
* ab8500_charger_usb_check_enable() - enable usb charging
* @charger: pointer to the ux500_charger structure
- * @vset: charging voltage
- * @iset: charger output current
+ * @vset_uv: charging voltage in microvolt
+ * @iset_ua: charger output current in microampere
*
* Check if the VBUS charger has been disconnected and reconnected without
* AB8500 rising an interrupt. Returns 0 on success.
*/
static int ab8500_charger_usb_check_enable(struct ux500_charger *charger,
- int vset, int iset)
+ int vset_uv, int iset_ua)
{
u8 usbch_ctrl1 = 0;
int ret = 0;
@@ -1762,7 +1778,7 @@ static int ab8500_charger_usb_check_enable(struct ux500_charger *charger,
return ret;
}
- ret = ab8500_charger_usb_en(&di->usb_chg, true, vset, iset);
+ ret = ab8500_charger_usb_en(&di->usb_chg, true, vset_uv, iset_ua);
if (ret < 0) {
dev_err(di->dev, "Failed to enable VBUS charger %d\n",
__LINE__);
@@ -1775,14 +1791,14 @@ static int ab8500_charger_usb_check_enable(struct ux500_charger *charger,
/**
* ab8500_charger_ac_check_enable() - enable usb charging
* @charger: pointer to the ux500_charger structure
- * @vset: charging voltage
- * @iset: charger output current
+ * @vset_uv: charging voltage in microvolt
+ * @iset_ua: charger output current in micrompere
*
* Check if the AC charger has been disconnected and reconnected without
* AB8500 rising an interrupt. Returns 0 on success.
*/
static int ab8500_charger_ac_check_enable(struct ux500_charger *charger,
- int vset, int iset)
+ int vset_uv, int iset_ua)
{
u8 mainch_ctrl1 = 0;
int ret = 0;
@@ -1812,7 +1828,7 @@ static int ab8500_charger_ac_check_enable(struct ux500_charger *charger,
return ret;
}
- ret = ab8500_charger_ac_en(&di->usb_chg, true, vset, iset);
+ ret = ab8500_charger_ac_en(&di->usb_chg, true, vset_uv, iset_ua);
if (ret < 0) {
dev_err(di->dev, "failed to enable AC charger %d\n",
__LINE__);
@@ -1851,13 +1867,14 @@ static int ab8500_charger_watchdog_kick(struct ux500_charger *charger)
/**
* ab8500_charger_update_charger_current() - update charger current
- * @di: pointer to the ab8500_charger structure
+ * @charger: pointer to the ab8500_charger structure
+ * @ich_out_ua: desired output current in microampere
*
* Update the charger output current for the specified charger
* Returns error code in case of failure else 0(on success)
*/
static int ab8500_charger_update_charger_current(struct ux500_charger *charger,
- int ich_out)
+ int ich_out_ua)
{
int ret;
struct ab8500_charger *di;
@@ -1869,7 +1886,7 @@ static int ab8500_charger_update_charger_current(struct ux500_charger *charger,
else
return -ENXIO;
- ret = ab8500_charger_set_output_curr(di, ich_out);
+ ret = ab8500_charger_set_output_curr(di, ich_out_ua);
if (ret) {
dev_err(di->dev, "%s "
"Failed to set ChOutputCurentLevel\n",
@@ -1961,10 +1978,10 @@ static void ab8500_charger_check_vbat_work(struct work_struct *work)
di->vbat > VBAT_TRESH_IP_CUR_RED))) {
dev_dbg(di->dev, "Vbat did cross threshold, curr: %d, new: %d,"
- " old: %d\n", di->max_usb_in_curr.usb_type_max,
+ " old: %d\n", di->max_usb_in_curr.usb_type_max_ua,
di->vbat, di->old_vbat);
ab8500_charger_set_vbus_in_curr(di,
- di->max_usb_in_curr.usb_type_max);
+ di->max_usb_in_curr.usb_type_max_ua);
power_supply_changed(di->usb_chg.psy);
}
@@ -2245,7 +2262,7 @@ static void ab8500_charger_usb_link_attach_work(struct work_struct *work)
/* Update maximum input current if USB enumeration is not detected */
if (!di->usb.charger_online) {
ret = ab8500_charger_set_vbus_in_curr(di,
- di->max_usb_in_curr.usb_type_max);
+ di->max_usb_in_curr.usb_type_max_ua);
if (ret)
return;
}
@@ -2407,11 +2424,11 @@ static void ab8500_charger_usb_state_changed_work(struct work_struct *work)
spin_lock_irqsave(&di->usb_state.usb_lock, flags);
di->usb_state.state = di->usb_state.state_tmp;
- di->usb_state.usb_current = di->usb_state.usb_current_tmp;
+ di->usb_state.usb_current_ua = di->usb_state.usb_current_tmp_ua;
spin_unlock_irqrestore(&di->usb_state.usb_lock, flags);
- dev_dbg(di->dev, "%s USB state: 0x%02x mA: %d\n",
- __func__, di->usb_state.state, di->usb_state.usb_current);
+ dev_dbg(di->dev, "%s USB state: 0x%02x uA: %d\n",
+ __func__, di->usb_state.state, di->usb_state.usb_current_ua);
switch (di->usb_state.state) {
case AB8500_BM_USB_STATE_RESET_HS:
@@ -2437,7 +2454,7 @@ static void ab8500_charger_usb_state_changed_work(struct work_struct *work)
if (!ab8500_charger_get_usb_cur(di)) {
/* Update maximum input current */
ret = ab8500_charger_set_vbus_in_curr(di,
- di->max_usb_in_curr.usb_type_max);
+ di->max_usb_in_curr.usb_type_max_ua);
if (ret)
return;
@@ -2657,7 +2674,7 @@ static void ab8500_charger_vbus_drop_end_work(struct work_struct *work)
{
struct ab8500_charger *di = container_of(work,
struct ab8500_charger, vbus_drop_end_work.work);
- int ret, curr;
+ int ret, curr_ua;
u8 reg_value;
di->flags.vbus_drop_end = false;
@@ -2673,30 +2690,30 @@ static void ab8500_charger_vbus_drop_end_work(struct work_struct *work)
return;
}
- curr = di->bm->chg_input_curr[
+ curr_ua = ab8500_charge_input_curr_map[
reg_value >> AUTO_VBUS_IN_CURR_LIM_SHIFT];
- if (di->max_usb_in_curr.calculated_max != curr) {
+ if (di->max_usb_in_curr.calculated_max_ua != curr_ua) {
/* USB source is collapsing */
- di->max_usb_in_curr.calculated_max = curr;
+ di->max_usb_in_curr.calculated_max_ua = curr_ua;
dev_dbg(di->dev,
- "VBUS input current limiting to %d mA\n",
- di->max_usb_in_curr.calculated_max);
+ "VBUS input current limiting to %d uA\n",
+ di->max_usb_in_curr.calculated_max_ua);
} else {
/*
* USB source can not give more than this amount.
* Taking more will collapse the source.
*/
- di->max_usb_in_curr.set_max =
- di->max_usb_in_curr.calculated_max;
+ di->max_usb_in_curr.set_max_ua =
+ di->max_usb_in_curr.calculated_max_ua;
dev_dbg(di->dev,
- "VBUS input current limited to %d mA\n",
- di->max_usb_in_curr.set_max);
+ "VBUS input current limited to %d uA\n",
+ di->max_usb_in_curr.set_max_ua);
}
if (di->usb.charger_connected)
ab8500_charger_set_vbus_in_curr(di,
- di->max_usb_in_curr.usb_type_max);
+ di->max_usb_in_curr.usb_type_max_ua);
}
/**
@@ -2926,9 +2943,9 @@ static int ab8500_charger_ac_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
ret = ab8500_charger_get_ac_voltage(di);
if (ret >= 0)
- di->ac.charger_voltage = ret;
+ di->ac.charger_voltage_uv = ret;
/* On error, use previous value */
- val->intval = di->ac.charger_voltage * 1000;
+ val->intval = di->ac.charger_voltage_uv;
break;
case POWER_SUPPLY_PROP_VOLTAGE_AVG:
/*
@@ -2941,8 +2958,8 @@ static int ab8500_charger_ac_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CURRENT_NOW:
ret = ab8500_charger_get_ac_current(di);
if (ret >= 0)
- di->ac.charger_current = ret;
- val->intval = di->ac.charger_current * 1000;
+ di->ac.charger_current_ua = ret;
+ val->intval = di->ac.charger_current_ua;
break;
default:
return -EINVAL;
@@ -2995,8 +3012,8 @@ static int ab8500_charger_usb_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
ret = ab8500_charger_get_vbus_voltage(di);
if (ret >= 0)
- di->usb.charger_voltage = ret;
- val->intval = di->usb.charger_voltage * 1000;
+ di->usb.charger_voltage_uv = ret;
+ val->intval = di->usb.charger_voltage_uv;
break;
case POWER_SUPPLY_PROP_VOLTAGE_AVG:
/*
@@ -3009,8 +3026,8 @@ static int ab8500_charger_usb_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CURRENT_NOW:
ret = ab8500_charger_get_usb_current(di);
if (ret >= 0)
- di->usb.charger_current = ret;
- val->intval = di->usb.charger_current * 1000;
+ di->usb.charger_current_ua = ret;
+ val->intval = di->usb.charger_current_ua;
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
/*
@@ -3186,6 +3203,11 @@ static int ab8500_charger_usb_notifier_call(struct notifier_block *nb,
struct ab8500_charger *di =
container_of(nb, struct ab8500_charger, nb);
enum ab8500_usb_state bm_usb_state;
+ /*
+ * FIXME: it appears the AB8500 PHY never sends what it should here.
+ * Fix the PHY driver to properly notify the desired current.
+ * Also broadcast microampere and not milliampere.
+ */
unsigned mA = *((unsigned *)power);
if (event != USB_EVENT_VBUS) {
@@ -3196,7 +3218,7 @@ static int ab8500_charger_usb_notifier_call(struct notifier_block *nb,
/* TODO: State is fabricate here. See if charger really needs USB
* state or if mA is enough
*/
- if ((di->usb_state.usb_current == 2) && (mA > 2))
+ if ((di->usb_state.usb_current_ua == 2000) && (mA > 2))
bm_usb_state = AB8500_BM_USB_STATE_RESUME;
else if (mA == 0)
bm_usb_state = AB8500_BM_USB_STATE_RESET_HS;
@@ -3212,7 +3234,8 @@ static int ab8500_charger_usb_notifier_call(struct notifier_block *nb,
spin_lock(&di->usb_state.usb_lock);
di->usb_state.state_tmp = bm_usb_state;
- di->usb_state.usb_current_tmp = mA;
+ /* FIXME: broadcast ua instead, see above */
+ di->usb_state.usb_current_tmp_ua = mA * 1000;
spin_unlock(&di->usb_state.usb_lock);
/*
@@ -3413,11 +3436,6 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->bm = &ab8500_bm_data;
- ret = ab8500_bm_of_probe(dev, np, di->bm);
- if (ret) {
- dev_err(dev, "failed to get battery information\n");
- return ret;
- }
di->autopower_cfg = of_property_read_bool(np, "autopower_cfg");
/* get parent data */
@@ -3490,9 +3508,11 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->invalid_charger_detect_state = 0;
/* AC and USB supply config */
+ ac_psy_cfg.of_node = np;
ac_psy_cfg.supplied_to = supply_interface;
ac_psy_cfg.num_supplicants = ARRAY_SIZE(supply_interface);
ac_psy_cfg.drv_data = &di->ac_chg;
+ usb_psy_cfg.of_node = np;
usb_psy_cfg.supplied_to = supply_interface;
usb_psy_cfg.num_supplicants = ARRAY_SIZE(supply_interface);
usb_psy_cfg.drv_data = &di->usb_chg;
@@ -3503,10 +3523,10 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->ac_chg.ops.check_enable = &ab8500_charger_ac_check_enable;
di->ac_chg.ops.kick_wd = &ab8500_charger_watchdog_kick;
di->ac_chg.ops.update_curr = &ab8500_charger_update_charger_current;
- di->ac_chg.max_out_volt = ab8500_charger_voltage_map[
+ di->ac_chg.max_out_volt_uv = ab8500_charger_voltage_map[
ARRAY_SIZE(ab8500_charger_voltage_map) - 1];
- di->ac_chg.max_out_curr =
- di->bm->chg_output_curr[di->bm->n_chg_out_curr - 1];
+ di->ac_chg.max_out_curr_ua =
+ ab8500_charge_output_curr_map[ARRAY_SIZE(ab8500_charge_output_curr_map) - 1];
di->ac_chg.wdt_refresh = CHG_WD_INTERVAL;
/*
* The AB8505 only supports USB charging. If we are not the
@@ -3524,13 +3544,13 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->usb_chg.ops.check_enable = &ab8500_charger_usb_check_enable;
di->usb_chg.ops.kick_wd = &ab8500_charger_watchdog_kick;
di->usb_chg.ops.update_curr = &ab8500_charger_update_charger_current;
- di->usb_chg.max_out_volt = ab8500_charger_voltage_map[
+ di->usb_chg.max_out_volt_uv = ab8500_charger_voltage_map[
ARRAY_SIZE(ab8500_charger_voltage_map) - 1];
- di->usb_chg.max_out_curr =
- di->bm->chg_output_curr[di->bm->n_chg_out_curr - 1];
+ di->usb_chg.max_out_curr_ua =
+ ab8500_charge_output_curr_map[ARRAY_SIZE(ab8500_charge_output_curr_map) - 1];
di->usb_chg.wdt_refresh = CHG_WD_INTERVAL;
di->usb_chg.external = false;
- di->usb_state.usb_current = -1;
+ di->usb_state.usb_current_ua = -1;
mutex_init(&di->charger_attached_mutex);
@@ -3610,6 +3630,15 @@ static int ab8500_charger_probe(struct platform_device *pdev)
return PTR_ERR(di->usb_chg.psy);
}
+ /*
+ * Check what battery we have, since we always have the USB
+ * psy, use that as a handle.
+ */
+ ret = ab8500_bm_of_probe(di->usb_chg.psy, di->bm);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get battery information\n");
+
/* Identify the connected charger types during startup */
charger_status = ab8500_charger_detect_chargers(di, true);
if (charger_status & AC_PW_CONN) {
@@ -3636,11 +3665,13 @@ static int ab8500_charger_probe(struct platform_device *pdev)
}
if (!match) {
dev_err(dev, "no matching components\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto remove_ab8500_bm;
}
if (IS_ERR(match)) {
dev_err(dev, "could not create component match\n");
- return PTR_ERR(match);
+ ret = PTR_ERR(match);
+ goto remove_ab8500_bm;
}
/* Notifier for external charger enabling */
@@ -3681,6 +3712,8 @@ out_charger_notifier:
if (!di->ac_chg.enabled)
blocking_notifier_chain_unregister(
&charger_notifier_list, &charger_nb);
+remove_ab8500_bm:
+ ab8500_bm_of_remove(di->usb_chg.psy, di->bm);
return ret;
}
@@ -3691,6 +3724,7 @@ static int ab8500_charger_remove(struct platform_device *pdev)
component_master_del(&pdev->dev, &ab8500_charger_comp_ops);
usb_unregister_notifier(di->usb_phy, &di->nb);
+ ab8500_bm_of_remove(di->usb_chg.psy, di->bm);
usb_put_phy(di->usb_phy);
if (!di->ac_chg.enabled)
blocking_notifier_chain_unregister(
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 05fe9724ba50..b0919a6a6587 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -38,7 +38,6 @@
#include "ab8500-bm.h"
-#define MILLI_TO_MICRO 1000
#define FG_LSB_IN_MA 1627
#define QLSB_NANO_AMP_HOURS_X10 1071
#define INS_CURR_TIMEOUT (3 * HZ)
@@ -157,10 +156,10 @@ struct inst_curr_result_list {
* @dev: Pointer to the structure device
* @node: a list of AB8500 FGs, hence prepared for reentrance
* @irq holds the CCEOC interrupt number
- * @vbat: Battery voltage in mV
- * @vbat_nom: Nominal battery voltage in mV
- * @inst_curr: Instantenous battery current in mA
- * @avg_curr: Average battery current in mA
+ * @vbat_uv: Battery voltage in uV
+ * @vbat_nom_uv: Nominal battery voltage in uV
+ * @inst_curr_ua: Instantenous battery current in uA
+ * @avg_curr_ua: Average battery current in uA
* @bat_temp battery temperature
* @fg_samples: Number of samples used in the FG accumulation
* @accu_charge: Accumulated charge from the last conversion
@@ -199,10 +198,10 @@ struct ab8500_fg {
struct device *dev;
struct list_head node;
int irq;
- int vbat;
- int vbat_nom;
- int inst_curr;
- int avg_curr;
+ int vbat_uv;
+ int vbat_nom_uv;
+ int inst_curr_ua;
+ int avg_curr_ua;
int bat_temp;
int fg_samples;
int accu_charge;
@@ -266,84 +265,84 @@ static enum power_supply_property ab8500_fg_props[] = {
/*
* This array maps the raw hex value to lowbat voltage used by the AB8500
- * Values taken from the UM0836
+ * Values taken from the UM0836, in microvolts.
*/
static int ab8500_fg_lowbat_voltage_map[] = {
- 2300 ,
- 2325 ,
- 2350 ,
- 2375 ,
- 2400 ,
- 2425 ,
- 2450 ,
- 2475 ,
- 2500 ,
- 2525 ,
- 2550 ,
- 2575 ,
- 2600 ,
- 2625 ,
- 2650 ,
- 2675 ,
- 2700 ,
- 2725 ,
- 2750 ,
- 2775 ,
- 2800 ,
- 2825 ,
- 2850 ,
- 2875 ,
- 2900 ,
- 2925 ,
- 2950 ,
- 2975 ,
- 3000 ,
- 3025 ,
- 3050 ,
- 3075 ,
- 3100 ,
- 3125 ,
- 3150 ,
- 3175 ,
- 3200 ,
- 3225 ,
- 3250 ,
- 3275 ,
- 3300 ,
- 3325 ,
- 3350 ,
- 3375 ,
- 3400 ,
- 3425 ,
- 3450 ,
- 3475 ,
- 3500 ,
- 3525 ,
- 3550 ,
- 3575 ,
- 3600 ,
- 3625 ,
- 3650 ,
- 3675 ,
- 3700 ,
- 3725 ,
- 3750 ,
- 3775 ,
- 3800 ,
- 3825 ,
- 3850 ,
- 3850 ,
+ 2300000,
+ 2325000,
+ 2350000,
+ 2375000,
+ 2400000,
+ 2425000,
+ 2450000,
+ 2475000,
+ 2500000,
+ 2525000,
+ 2550000,
+ 2575000,
+ 2600000,
+ 2625000,
+ 2650000,
+ 2675000,
+ 2700000,
+ 2725000,
+ 2750000,
+ 2775000,
+ 2800000,
+ 2825000,
+ 2850000,
+ 2875000,
+ 2900000,
+ 2925000,
+ 2950000,
+ 2975000,
+ 3000000,
+ 3025000,
+ 3050000,
+ 3075000,
+ 3100000,
+ 3125000,
+ 3150000,
+ 3175000,
+ 3200000,
+ 3225000,
+ 3250000,
+ 3275000,
+ 3300000,
+ 3325000,
+ 3350000,
+ 3375000,
+ 3400000,
+ 3425000,
+ 3450000,
+ 3475000,
+ 3500000,
+ 3525000,
+ 3550000,
+ 3575000,
+ 3600000,
+ 3625000,
+ 3650000,
+ 3675000,
+ 3700000,
+ 3725000,
+ 3750000,
+ 3775000,
+ 3800000,
+ 3825000,
+ 3850000,
+ 3850000,
};
-static u8 ab8500_volt_to_regval(int voltage)
+static u8 ab8500_volt_to_regval(int voltage_uv)
{
int i;
- if (voltage < ab8500_fg_lowbat_voltage_map[0])
+ if (voltage_uv < ab8500_fg_lowbat_voltage_map[0])
return 0;
for (i = 0; i < ARRAY_SIZE(ab8500_fg_lowbat_voltage_map); i++) {
- if (voltage < ab8500_fg_lowbat_voltage_map[i])
+ if (voltage_uv < ab8500_fg_lowbat_voltage_map[i])
return (u8) i - 1;
}
@@ -354,16 +353,16 @@ static u8 ab8500_volt_to_regval(int voltage)
/**
* ab8500_fg_is_low_curr() - Low or high current mode
* @di: pointer to the ab8500_fg structure
- * @curr: the current to base or our decision on
+ * @curr_ua: the current to base or our decision on in microampere
*
* Low current mode if the current consumption is below a certain threshold
*/
-static int ab8500_fg_is_low_curr(struct ab8500_fg *di, int curr)
+static int ab8500_fg_is_low_curr(struct ab8500_fg *di, int curr_ua)
{
/*
* We want to know if we're in low current mode
*/
- if (curr > -di->bm->fg_params->high_curr_threshold)
+ if (curr_ua > -di->bm->fg_params->high_curr_threshold_ua)
return true;
else
return false;
@@ -601,13 +600,13 @@ int ab8500_fg_inst_curr_done(struct ab8500_fg *di)
/**
* ab8500_fg_inst_curr_finalize() - battery instantaneous current
* @di: pointer to the ab8500_fg structure
- * @res: battery instantenous current(on success)
+ * @curr_ua: battery instantenous current in microampere (on success)
*
* Returns 0 or an error code
* Note: This is part "two" and has to be called at earliest 250 ms
* after ab8500_fg_inst_curr_start()
*/
-int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
+int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *curr_ua)
{
u8 low, high;
int val;
@@ -663,14 +662,13 @@ int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
/*
* Convert to unit value in mA
* Full scale input voltage is
- * 63.160mV => LSB = 63.160mV/(4096*res) = 1.542mA
+ * 63.160mV => LSB = 63.160mV/(4096*res) = 1.542.000 uA
* Given a 250ms conversion cycle time the LSB corresponds
* to 107.1 nAh. Convert to current by dividing by the conversion
* time in hours (250ms = 1 / (3600 * 4)h)
* 107.1nAh assumes 10mOhm, but fg_res is in 0.1mOhm
*/
- val = (val * QLSB_NANO_AMP_HOURS_X10 * 36 * 4) /
- (1000 * di->bm->fg_res);
+ val = (val * QLSB_NANO_AMP_HOURS_X10 * 36 * 4) / di->bm->fg_res;
if (di->turn_off_fg) {
dev_dbg(di->dev, "%s Disable FG\n", __func__);
@@ -688,7 +686,7 @@ int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
goto fail;
}
mutex_unlock(&di->cc_lock);
- (*res) = val;
+ *curr_ua = val;
return 0;
fail:
@@ -699,15 +697,15 @@ fail:
/**
* ab8500_fg_inst_curr_blocking() - battery instantaneous current
* @di: pointer to the ab8500_fg structure
- * @res: battery instantenous current(on success)
*
- * Returns 0 else error code
+ * Returns battery instantenous current in microampere (on success)
+ * else error code
*/
int ab8500_fg_inst_curr_blocking(struct ab8500_fg *di)
{
int ret;
unsigned long timeout;
- int res = 0;
+ int curr_ua = 0;
ret = ab8500_fg_inst_curr_start(di);
if (ret) {
@@ -730,14 +728,14 @@ int ab8500_fg_inst_curr_blocking(struct ab8500_fg *di)
}
}
- ret = ab8500_fg_inst_curr_finalize(di, &res);
+ ret = ab8500_fg_inst_curr_finalize(di, &curr_ua);
if (ret) {
dev_err(di->dev, "Failed to finalize fg_inst\n");
return 0;
}
- dev_dbg(di->dev, "%s instant current: %d", __func__, res);
- return res;
+ dev_dbg(di->dev, "%s instant current: %d uA", __func__, curr_ua);
+ return curr_ua;
fail:
disable_irq(di->irq);
mutex_unlock(&di->cc_lock);
@@ -797,13 +795,12 @@ static void ab8500_fg_acc_cur_work(struct work_struct *work)
(100 * di->bm->fg_res);
/*
- * Convert to unit value in mA
+ * Convert to unit value in uA
* by dividing by the conversion
* time in hours (= samples / (3600 * 4)h)
- * and multiply with 1000
*/
- di->avg_curr = (val * QLSB_NANO_AMP_HOURS_X10 * 36) /
- (1000 * di->bm->fg_res * (di->fg_samples / 4));
+ di->avg_curr_ua = (val * QLSB_NANO_AMP_HOURS_X10 * 36) /
+ (di->bm->fg_res * (di->fg_samples / 4));
di->flags.conv_done = true;
@@ -825,7 +822,7 @@ exit:
* ab8500_fg_bat_voltage() - get battery voltage
* @di: pointer to the ab8500_fg structure
*
- * Returns battery voltage(on success) else error code
+ * Returns battery voltage in microvolts (on success) else error code
*/
static int ab8500_fg_bat_voltage(struct ab8500_fg *di)
{
@@ -840,6 +837,8 @@ static int ab8500_fg_bat_voltage(struct ab8500_fg *di)
return prev;
}
+ /* IIO returns millivolts but we want microvolts */
+ vbat *= 1000;
prev = vbat;
return vbat;
}
@@ -847,41 +846,16 @@ static int ab8500_fg_bat_voltage(struct ab8500_fg *di)
/**
* ab8500_fg_volt_to_capacity() - Voltage based capacity
* @di: pointer to the ab8500_fg structure
- * @voltage: The voltage to convert to a capacity
+ * @voltage_uv: The voltage to convert to a capacity in microvolt
*
* Returns battery capacity in per mille based on voltage
*/
-static int ab8500_fg_volt_to_capacity(struct ab8500_fg *di, int voltage)
+static int ab8500_fg_volt_to_capacity(struct ab8500_fg *di, int voltage_uv)
{
- int i, tbl_size;
- const struct ab8500_v_to_cap *tbl;
- int cap = 0;
-
- tbl = di->bm->bat_type[di->bm->batt_id].v_to_cap_tbl;
- tbl_size = di->bm->bat_type[di->bm->batt_id].n_v_cap_tbl_elements;
-
- for (i = 0; i < tbl_size; ++i) {
- if (voltage > tbl[i].voltage)
- break;
- }
-
- if ((i > 0) && (i < tbl_size)) {
- cap = fixp_linear_interpolate(
- tbl[i].voltage,
- tbl[i].capacity * 10,
- tbl[i-1].voltage,
- tbl[i-1].capacity * 10,
- voltage);
- } else if (i == 0) {
- cap = 1000;
- } else {
- cap = 0;
- }
-
- dev_dbg(di->dev, "%s Vbat: %d, Cap: %d per mille",
- __func__, voltage, cap);
+ struct power_supply_battery_info *bi = di->bm->bi;
- return cap;
+ /* Multiply by 10 because the capacity is tracked in per mille */
+ return power_supply_batinfo_ocv2cap(bi, voltage_uv, di->bat_temp) * 10;
}
/**
@@ -893,8 +867,8 @@ static int ab8500_fg_volt_to_capacity(struct ab8500_fg *di, int voltage)
*/
static int ab8500_fg_uncomp_volt_to_capacity(struct ab8500_fg *di)
{
- di->vbat = ab8500_fg_bat_voltage(di);
- return ab8500_fg_volt_to_capacity(di, di->vbat);
+ di->vbat_uv = ab8500_fg_bat_voltage(di);
+ return ab8500_fg_volt_to_capacity(di, di->vbat_uv);
}
/**
@@ -902,44 +876,35 @@ static int ab8500_fg_uncomp_volt_to_capacity(struct ab8500_fg *di)
* @di: pointer to the ab8500_fg structure
*
* Returns battery inner resistance added with the fuel gauge resistor value
- * to get the total resistance in the whole link from gnd to bat+ node.
+ * to get the total resistance in the whole link from gnd to bat+ node
+ * in milliohm.
*/
static int ab8500_fg_battery_resistance(struct ab8500_fg *di)
{
- int i, tbl_size;
- const struct batres_vs_temp *tbl;
- int resist = 0;
+ struct power_supply_battery_info *bi = di->bm->bi;
+ int resistance_percent = 0;
+ int resistance;
- tbl = di->bm->bat_type[di->bm->batt_id].batres_tbl;
- tbl_size = di->bm->bat_type[di->bm->batt_id].n_batres_tbl_elements;
-
- for (i = 0; i < tbl_size; ++i) {
- if (di->bat_temp / 10 > tbl[i].temp)
- break;
- }
-
- if ((i > 0) && (i < tbl_size)) {
- resist = fixp_linear_interpolate(
- tbl[i].temp,
- tbl[i].resist,
- tbl[i-1].temp,
- tbl[i-1].resist,
- di->bat_temp / 10);
- } else if (i == 0) {
- resist = tbl[0].resist;
- } else {
- resist = tbl[tbl_size - 1].resist;
- }
+ resistance_percent = power_supply_temp2resist_simple(bi->resist_table,
+ bi->resist_table_size,
+ di->bat_temp / 10);
+ /*
+ * We get a percentage of factory resistance here so first get
+ * the factory resistance in milliohms then calculate how much
+ * resistance we have at this temperature.
+ */
+ resistance = (bi->factory_internal_resistance_uohm / 1000);
+ resistance = resistance * resistance_percent / 100;
dev_dbg(di->dev, "%s Temp: %d battery internal resistance: %d"
" fg resistance %d, total: %d (mOhm)\n",
- __func__, di->bat_temp, resist, di->bm->fg_res / 10,
- (di->bm->fg_res / 10) + resist);
+ __func__, di->bat_temp, resistance, di->bm->fg_res / 10,
+ (di->bm->fg_res / 10) + resistance);
/* fg_res variable is in 0.1mOhm */
- resist += di->bm->fg_res / 10;
+ resistance += di->bm->fg_res / 10;
- return resist;
+ return resistance;
}
/**
@@ -951,31 +916,34 @@ static int ab8500_fg_battery_resistance(struct ab8500_fg *di)
*/
static int ab8500_fg_load_comp_volt_to_capacity(struct ab8500_fg *di)
{
- int vbat_comp, res;
+ int vbat_comp_uv, res;
int i = 0;
- int vbat = 0;
+ int vbat_uv = 0;
ab8500_fg_inst_curr_start(di);
do {
- vbat += ab8500_fg_bat_voltage(di);
+ vbat_uv += ab8500_fg_bat_voltage(di);
i++;
usleep_range(5000, 6000);
} while (!ab8500_fg_inst_curr_done(di));
- ab8500_fg_inst_curr_finalize(di, &di->inst_curr);
+ ab8500_fg_inst_curr_finalize(di, &di->inst_curr_ua);
- di->vbat = vbat / i;
+ di->vbat_uv = vbat_uv / i;
res = ab8500_fg_battery_resistance(di);
- /* Use Ohms law to get the load compensated voltage */
- vbat_comp = di->vbat - (di->inst_curr * res) / 1000;
+ /*
+ * Use Ohms law to get the load compensated voltage.
+ * Divide by 1000 to get from milliohms to ohms.
+ */
+ vbat_comp_uv = di->vbat_uv - (di->inst_curr_ua * res) / 1000;
- dev_dbg(di->dev, "%s Measured Vbat: %dmV,Compensated Vbat %dmV, "
- "R: %dmOhm, Current: %dmA Vbat Samples: %d\n",
- __func__, di->vbat, vbat_comp, res, di->inst_curr, i);
+ dev_dbg(di->dev, "%s Measured Vbat: %d uV,Compensated Vbat %d uV, "
+ "R: %d mOhm, Current: %d uA Vbat Samples: %d\n",
+ __func__, di->vbat_uv, vbat_comp_uv, res, di->inst_curr_ua, i);
- return ab8500_fg_volt_to_capacity(di, vbat_comp);
+ return ab8500_fg_volt_to_capacity(di, vbat_comp_uv);
}
/**
@@ -1014,11 +982,16 @@ static int ab8500_fg_convert_mah_to_uwh(struct ab8500_fg *di, int cap_mah)
u64 div_res;
u32 div_rem;
- div_res = ((u64) cap_mah) * ((u64) di->vbat_nom);
- div_rem = do_div(div_res, 1000);
+ /*
+ * Capacity is in milli ampere hours (10^-3)Ah
+ * Nominal voltage is in microvolts (10^-6)V
+ * divide by 1000000 after multiplication to get to mWh
+ */
+ div_res = ((u64) cap_mah) * ((u64) di->vbat_nom_uv);
+ div_rem = do_div(div_res, 1000000);
/* Make sure to round upwards if necessary */
- if (div_rem >= 1000 / 2)
+ if (div_rem >= 1000000 / 2)
div_res++;
return (int) div_res;
@@ -1057,8 +1030,8 @@ static int ab8500_fg_calc_cap_charging(struct ab8500_fg *di)
ab8500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
/* We need to update battery voltage and inst current when charging */
- di->vbat = ab8500_fg_bat_voltage(di);
- di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+ di->vbat_uv = ab8500_fg_bat_voltage(di);
+ di->inst_curr_ua = ab8500_fg_inst_curr_blocking(di);
return di->bat_cap.mah;
}
@@ -1585,9 +1558,9 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
* RECOVERY_SLEEP if time left.
* If high, go to READOUT
*/
- di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+ di->inst_curr_ua = ab8500_fg_inst_curr_blocking(di);
- if (ab8500_fg_is_low_curr(di, di->inst_curr)) {
+ if (ab8500_fg_is_low_curr(di, di->inst_curr_ua)) {
if (di->recovery_cnt >
di->bm->fg_params->recovery_total_time) {
di->fg_samples = SEC_TO_SAMPLE(
@@ -1620,9 +1593,9 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
break;
case AB8500_FG_DISCHARGE_READOUT:
- di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+ di->inst_curr_ua = ab8500_fg_inst_curr_blocking(di);
- if (ab8500_fg_is_low_curr(di, di->inst_curr)) {
+ if (ab8500_fg_is_low_curr(di, di->inst_curr_ua)) {
/* Detect mode change */
if (di->high_curr_mode) {
di->high_curr_mode = false;
@@ -1768,9 +1741,9 @@ static void ab8500_fg_algorithm(struct ab8500_fg *di)
di->bat_cap.prev_mah,
di->bat_cap.prev_percent,
di->bat_cap.prev_level,
- di->vbat,
- di->inst_curr,
- di->avg_curr,
+ di->vbat_uv,
+ di->inst_curr_ua,
+ di->avg_curr_ua,
di->accu_charge,
di->flags.charging,
di->charge_state,
@@ -1863,15 +1836,15 @@ static void ab8500_fg_check_hw_failure_work(struct work_struct *work)
*/
static void ab8500_fg_low_bat_work(struct work_struct *work)
{
- int vbat;
+ int vbat_uv;
struct ab8500_fg *di = container_of(work, struct ab8500_fg,
fg_low_bat_work.work);
- vbat = ab8500_fg_bat_voltage(di);
+ vbat_uv = ab8500_fg_bat_voltage(di);
/* Check if LOW_BAT still fulfilled */
- if (vbat < di->bm->fg_params->lowbat_threshold) {
+ if (vbat_uv < di->bm->fg_params->lowbat_threshold_uv) {
/* Is it time to shut down? */
if (di->low_bat_cnt < 1) {
di->flags.low_bat = true;
@@ -2101,15 +2074,15 @@ static int ab8500_fg_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
if (di->flags.bat_ovv)
- val->intval = BATT_OVV_VALUE * 1000;
+ val->intval = BATT_OVV_VALUE;
else
- val->intval = di->vbat * 1000;
+ val->intval = di->vbat_uv;
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
- val->intval = di->inst_curr * 1000;
+ val->intval = di->inst_curr_ua;
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
- val->intval = di->avg_curr * 1000;
+ val->intval = di->avg_curr_ua;
break;
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
val->intval = ab8500_fg_convert_mah_to_uwh(di,
@@ -2167,11 +2140,13 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
struct power_supply *ext = dev_get_drvdata(dev);
const char **supplicants = (const char **)ext->supplied_to;
struct ab8500_fg *di;
+ struct power_supply_battery_info *bi;
union power_supply_propval ret;
int j;
psy = (struct power_supply *)data;
di = power_supply_get_drvdata(psy);
+ bi = di->bm->bi;
/*
* For all psy where the name of your driver
@@ -2234,21 +2209,22 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
switch (ext->desc->type) {
case POWER_SUPPLY_TYPE_BATTERY:
if (!di->flags.batt_id_received &&
- di->bm->batt_id != BATTERY_UNKNOWN) {
+ (bi && (bi->technology !=
+ POWER_SUPPLY_TECHNOLOGY_UNKNOWN))) {
const struct ab8500_battery_type *b;
- b = &(di->bm->bat_type[di->bm->batt_id]);
+ b = di->bm->bat_type;
di->flags.batt_id_received = true;
di->bat_cap.max_mah_design =
- MILLI_TO_MICRO *
- b->charge_full_design;
+ di->bm->bi->charge_full_design_uah;
di->bat_cap.max_mah =
di->bat_cap.max_mah_design;
- di->vbat_nom = b->nominal_voltage;
+ di->vbat_nom_uv =
+ di->bm->bi->voltage_max_design_uv;
}
if (ret.intval)
@@ -2314,7 +2290,7 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
AB8500_SYS_CTRL2_BLOCK,
AB8500_LOW_BAT_REG,
ab8500_volt_to_regval(
- di->bm->fg_params->lowbat_threshold) << 1 |
+ di->bm->fg_params->lowbat_threshold_uv) << 1 |
LOW_BAT_ENABLE);
if (ret) {
dev_err(di->dev, "%s write failed\n", __func__);
@@ -3018,6 +2994,10 @@ static int ab8500_fg_bind(struct device *dev, struct device *master,
return -ENOMEM;
}
+ di->bat_cap.max_mah_design = di->bm->bi->charge_full_design_uah;
+ di->bat_cap.max_mah = di->bat_cap.max_mah_design;
+ di->vbat_nom_uv = di->bm->bi->voltage_max_design_uv;
+
/* Start the coulomb counter */
ab8500_fg_coulomb_counter(di, true);
/* Run the FG algorithm */
@@ -3077,13 +3057,6 @@ static int ab8500_fg_probe(struct platform_device *pdev)
psy_cfg.num_supplicants = ARRAY_SIZE(supply_interface);
psy_cfg.drv_data = di;
- di->bat_cap.max_mah_design = MILLI_TO_MICRO *
- di->bm->bat_type[di->bm->batt_id].charge_full_design;
-
- di->bat_cap.max_mah = di->bat_cap.max_mah_design;
-
- di->vbat_nom = di->bm->bat_type[di->bm->batt_id].nominal_voltage;
-
di->init_capacity = true;
ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_INIT);
diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c
index 18a9db0df4b1..5d197141f476 100644
--- a/drivers/power/supply/axp20x_battery.c
+++ b/drivers/power/supply/axp20x_battery.c
@@ -561,7 +561,7 @@ static int axp20x_power_probe(struct platform_device *pdev)
{
struct axp20x_batt_ps *axp20x_batt;
struct power_supply_config psy_cfg = {};
- struct power_supply_battery_info info;
+ struct power_supply_battery_info *info;
struct device *dev = &pdev->dev;
if (!of_device_is_available(pdev->dev.of_node))
@@ -615,8 +615,8 @@ static int axp20x_power_probe(struct platform_device *pdev)
}
if (!power_supply_get_battery_info(axp20x_batt->batt, &info)) {
- int vmin = info.voltage_min_design_uv;
- int ccc = info.constant_charge_current_max_ua;
+ int vmin = info->voltage_min_design_uv;
+ int ccc = info->constant_charge_current_max_ua;
if (vmin > 0 && axp20x_set_voltage_min_design(axp20x_batt,
vmin))
diff --git a/drivers/power/supply/bd99954-charger.c b/drivers/power/supply/bd99954-charger.c
index ffd8bfa08179..96e93e1b8094 100644
--- a/drivers/power/supply/bd99954-charger.c
+++ b/drivers/power/supply/bd99954-charger.c
@@ -882,7 +882,7 @@ struct dt_init {
static int bd9995x_fw_probe(struct bd9995x_device *bd)
{
int ret;
- struct power_supply_battery_info info;
+ struct power_supply_battery_info *info;
u32 property;
int i;
int regval;
@@ -891,49 +891,41 @@ static int bd9995x_fw_probe(struct bd9995x_device *bd)
struct battery_init battery_inits[] = {
{
.name = "trickle-charging current",
- .info_data = &info.tricklecharge_current_ua,
.range = &charging_current_ranges[0],
.ranges = 2,
.data = &init->itrich_set,
}, {
.name = "pre-charging current",
- .info_data = &info.precharge_current_ua,
.range = &charging_current_ranges[0],
.ranges = 2,
.data = &init->iprech_set,
}, {
.name = "pre-to-trickle charge voltage threshold",
- .info_data = &info.precharge_voltage_max_uv,
.range = &trickle_to_pre_threshold_ranges[0],
.ranges = 2,
.data = &init->vprechg_th_set,
}, {
.name = "charging termination current",
- .info_data = &info.charge_term_current_ua,
.range = &charging_current_ranges[0],
.ranges = 2,
.data = &init->iterm_set,
}, {
.name = "charging re-start voltage",
- .info_data = &info.charge_restart_voltage_uv,
.range = &charge_voltage_regulation_ranges[0],
.ranges = 2,
.data = &init->vrechg_set,
}, {
.name = "battery overvoltage limit",
- .info_data = &info.overvoltage_limit_uv,
.range = &charge_voltage_regulation_ranges[0],
.ranges = 2,
.data = &init->vbatovp_set,
}, {
.name = "fast-charging max current",
- .info_data = &info.constant_charge_current_max_ua,
.range = &fast_charge_current_ranges[0],
.ranges = 1,
.data = &init->ichg_set,
}, {
.name = "fast-charging voltage",
- .info_data = &info.constant_charge_voltage_max_uv,
.range = &charge_voltage_regulation_ranges[0],
.ranges = 2,
.data = &init->vfastchg_reg_set1,
@@ -966,6 +958,16 @@ static int bd9995x_fw_probe(struct bd9995x_device *bd)
if (ret < 0)
return ret;
+ /* Put pointers to the generic battery info */
+ battery_inits[0].info_data = &info->tricklecharge_current_ua;
+ battery_inits[1].info_data = &info->precharge_current_ua;
+ battery_inits[2].info_data = &info->precharge_voltage_max_uv;
+ battery_inits[3].info_data = &info->charge_term_current_ua;
+ battery_inits[4].info_data = &info->charge_restart_voltage_uv;
+ battery_inits[5].info_data = &info->overvoltage_limit_uv;
+ battery_inits[6].info_data = &info->constant_charge_current_max_ua;
+ battery_inits[7].info_data = &info->constant_charge_voltage_max_uv;
+
for (i = 0; i < ARRAY_SIZE(battery_inits); i++) {
int val = *battery_inits[i].info_data;
const struct linear_range *range = battery_inits[i].range;
@@ -980,7 +982,7 @@ static int bd9995x_fw_probe(struct bd9995x_device *bd)
dev_err(bd->dev, "Unsupported value for %s\n",
battery_inits[i].name);
- power_supply_put_battery_info(bd->charger, &info);
+ power_supply_put_battery_info(bd->charger, info);
return -EINVAL;
}
if (!found) {
@@ -991,7 +993,7 @@ static int bd9995x_fw_probe(struct bd9995x_device *bd)
*(battery_inits[i].data) = regval;
}
- power_supply_put_battery_info(bd->charger, &info);
+ power_supply_put_battery_info(bd->charger, info);
for (i = 0; i < ARRAY_SIZE(props); i++) {
ret = device_property_read_u32(bd->dev, props[i].prop,
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index 35ff0c8fe96f..06c34b09349c 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -1670,7 +1670,7 @@ static int bq24190_hw_init(struct bq24190_dev_info *bdi)
static int bq24190_get_config(struct bq24190_dev_info *bdi)
{
const char * const s = "ti,system-minimum-microvolt";
- struct power_supply_battery_info info = {};
+ struct power_supply_battery_info *info;
int v;
if (device_property_read_u32(bdi->dev, s, &v) == 0) {
@@ -1684,7 +1684,7 @@ static int bq24190_get_config(struct bq24190_dev_info *bdi)
if (bdi->dev->of_node &&
!power_supply_get_battery_info(bdi->charger, &info)) {
- v = info.precharge_current_ua / 1000;
+ v = info->precharge_current_ua / 1000;
if (v >= BQ24190_REG_PCTCC_IPRECHG_MIN
&& v <= BQ24190_REG_PCTCC_IPRECHG_MAX)
bdi->iprechg = v;
@@ -1692,7 +1692,7 @@ static int bq24190_get_config(struct bq24190_dev_info *bdi)
dev_warn(bdi->dev, "invalid value for battery:precharge-current-microamp: %d\n",
v);
- v = info.charge_term_current_ua / 1000;
+ v = info->charge_term_current_ua / 1000;
if (v >= BQ24190_REG_PCTCC_ITERM_MIN
&& v <= BQ24190_REG_PCTCC_ITERM_MAX)
bdi->iterm = v;
diff --git a/drivers/power/supply/bq2515x_charger.c b/drivers/power/supply/bq2515x_charger.c
index 374b112f712a..4f76ad9c2f18 100644
--- a/drivers/power/supply/bq2515x_charger.c
+++ b/drivers/power/supply/bq2515x_charger.c
@@ -945,7 +945,7 @@ static int bq2515x_power_supply_register(struct bq2515x_device *bq2515x,
static int bq2515x_hw_init(struct bq2515x_device *bq2515x)
{
int ret;
- struct power_supply_battery_info bat_info = { };
+ struct power_supply_battery_info *bat_info;
ret = bq2515x_disable_watchdog_timers(bq2515x);
if (ret)
@@ -969,13 +969,13 @@ static int bq2515x_hw_init(struct bq2515x_device *bq2515x)
} else {
bq2515x->init_data.ichg =
- bat_info.constant_charge_current_max_ua;
+ bat_info->constant_charge_current_max_ua;
bq2515x->init_data.vbatreg =
- bat_info.constant_charge_voltage_max_uv;
+ bat_info->constant_charge_voltage_max_uv;
bq2515x->init_data.iprechg =
- bat_info.precharge_current_ua;
+ bat_info->precharge_current_ua;
}
ret = bq2515x_set_const_charge_current(bq2515x,
diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c
index f501ecd49202..b274942dc46a 100644
--- a/drivers/power/supply/bq256xx_charger.c
+++ b/drivers/power/supply/bq256xx_charger.c
@@ -1504,7 +1504,7 @@ static int bq256xx_power_supply_init(struct bq256xx_device *bq,
static int bq256xx_hw_init(struct bq256xx_device *bq)
{
- struct power_supply_battery_info bat_info = { };
+ struct power_supply_battery_info *bat_info;
int wd_reg_val = BQ256XX_WATCHDOG_DIS;
int ret = 0;
int i;
@@ -1526,16 +1526,16 @@ static int bq256xx_hw_init(struct bq256xx_device *bq)
if (ret) {
dev_warn(bq->dev, "battery info missing, default values will be applied\n");
- bat_info.constant_charge_current_max_ua =
+ bat_info->constant_charge_current_max_ua =
bq->chip_info->bq256xx_def_ichg;
- bat_info.constant_charge_voltage_max_uv =
+ bat_info->constant_charge_voltage_max_uv =
bq->chip_info->bq256xx_def_vbatreg;
- bat_info.precharge_current_ua =
+ bat_info->precharge_current_ua =
bq->chip_info->bq256xx_def_iprechg;
- bat_info.charge_term_current_ua =
+ bat_info->charge_term_current_ua =
bq->chip_info->bq256xx_def_iterm;
bq->init_data.ichg_max =
@@ -1545,10 +1545,10 @@ static int bq256xx_hw_init(struct bq256xx_device *bq)
bq->chip_info->bq256xx_max_vbatreg;
} else {
bq->init_data.ichg_max =
- bat_info.constant_charge_current_max_ua;
+ bat_info->constant_charge_current_max_ua;
bq->init_data.vbatreg_max =
- bat_info.constant_charge_voltage_max_uv;
+ bat_info->constant_charge_voltage_max_uv;
}
ret = bq->chip_info->bq256xx_set_vindpm(bq, bq->init_data.vindpm);
@@ -1560,26 +1560,26 @@ static int bq256xx_hw_init(struct bq256xx_device *bq)
return ret;
ret = bq->chip_info->bq256xx_set_ichg(bq,
- bat_info.constant_charge_current_max_ua);
+ bat_info->constant_charge_current_max_ua);
if (ret)
return ret;
ret = bq->chip_info->bq256xx_set_iprechg(bq,
- bat_info.precharge_current_ua);
+ bat_info->precharge_current_ua);
if (ret)
return ret;
ret = bq->chip_info->bq256xx_set_vbatreg(bq,
- bat_info.constant_charge_voltage_max_uv);
+ bat_info->constant_charge_voltage_max_uv);
if (ret)
return ret;
ret = bq->chip_info->bq256xx_set_iterm(bq,
- bat_info.charge_term_current_ua);
+ bat_info->charge_term_current_ua);
if (ret)
return ret;
- power_supply_put_battery_info(bq->charger, &bat_info);
+ power_supply_put_battery_info(bq->charger, bat_info);
return 0;
}
diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
index b7eac5428083..e62da9dc4f35 100644
--- a/drivers/power/supply/bq25890_charger.c
+++ b/drivers/power/supply/bq25890_charger.c
@@ -266,6 +266,7 @@ enum bq25890_table_ids {
/* lookup tables */
TBL_TREG,
TBL_BOOSTI,
+ TBL_TSPCT,
};
/* Thermal Regulation Threshold lookup table, in degrees Celsius */
@@ -280,6 +281,28 @@ static const u32 bq25890_boosti_tbl[] = {
#define BQ25890_BOOSTI_TBL_SIZE ARRAY_SIZE(bq25890_boosti_tbl)
+/* NTC 10K temperature lookup table in tenths of a degree */
+static const u32 bq25890_tspct_tbl[] = {
+ 850, 840, 830, 820, 810, 800, 790, 780,
+ 770, 760, 750, 740, 730, 720, 710, 700,
+ 690, 685, 680, 675, 670, 660, 650, 645,
+ 640, 630, 620, 615, 610, 600, 590, 585,
+ 580, 570, 565, 560, 550, 540, 535, 530,
+ 520, 515, 510, 500, 495, 490, 480, 475,
+ 470, 460, 455, 450, 440, 435, 430, 425,
+ 420, 410, 405, 400, 390, 385, 380, 370,
+ 365, 360, 355, 350, 340, 335, 330, 320,
+ 310, 305, 300, 290, 285, 280, 275, 270,
+ 260, 250, 245, 240, 230, 225, 220, 210,
+ 205, 200, 190, 180, 175, 170, 160, 150,
+ 145, 140, 130, 120, 115, 110, 100, 90,
+ 80, 70, 60, 50, 40, 30, 20, 10,
+ 0, -10, -20, -30, -40, -60, -70, -80,
+ -90, -10, -120, -140, -150, -170, -190, -210,
+};
+
+#define BQ25890_TSPCT_TBL_SIZE ARRAY_SIZE(bq25890_tspct_tbl)
+
struct bq25890_range {
u32 min;
u32 max;
@@ -308,7 +331,8 @@ static const union {
/* lookup tables */
[TBL_TREG] = { .lt = {bq25890_treg_tbl, BQ25890_TREG_TBL_SIZE} },
- [TBL_BOOSTI] = { .lt = {bq25890_boosti_tbl, BQ25890_BOOSTI_TBL_SIZE} }
+ [TBL_BOOSTI] = { .lt = {bq25890_boosti_tbl, BQ25890_BOOSTI_TBL_SIZE} },
+ [TBL_TSPCT] = { .lt = {bq25890_tspct_tbl, BQ25890_TSPCT_TBL_SIZE} }
};
static int bq25890_field_read(struct bq25890_device *bq,
@@ -388,6 +412,7 @@ static bool bq25890_is_adc_property(enum power_supply_property psp)
switch (psp) {
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
case POWER_SUPPLY_PROP_CURRENT_NOW:
+ case POWER_SUPPLY_PROP_TEMP:
return true;
default:
@@ -528,6 +553,15 @@ static int bq25890_power_supply_get_property(struct power_supply *psy,
val->intval = ret * -50000;
break;
+ case POWER_SUPPLY_PROP_TEMP:
+ ret = bq25890_field_read(bq, F_TSPCT);
+ if (ret < 0)
+ return ret;
+
+ /* convert TS percentage into rough temperature */
+ val->intval = bq25890_find_val(ret, TBL_TSPCT);
+ break;
+
default:
return -EINVAL;
}
@@ -713,6 +747,7 @@ static const enum power_supply_property bq25890_power_supply_props[] = {
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_TEMP,
};
static char *bq25890_charger_supplied_to[] = {
diff --git a/drivers/power/supply/bq25980_charger.c b/drivers/power/supply/bq25980_charger.c
index 0008c229fd9c..9daa6d14db4d 100644
--- a/drivers/power/supply/bq25980_charger.c
+++ b/drivers/power/supply/bq25980_charger.c
@@ -1079,7 +1079,7 @@ static int bq25980_power_supply_init(struct bq25980_device *bq,
static int bq25980_hw_init(struct bq25980_device *bq)
{
- struct power_supply_battery_info bat_info = { };
+ struct power_supply_battery_info *bat_info;
int wd_reg_val = BQ25980_WATCHDOG_DIS;
int wd_max_val = BQ25980_NUM_WD_VAL - 1;
int ret = 0;
@@ -1112,8 +1112,8 @@ static int bq25980_hw_init(struct bq25980_device *bq)
return -EINVAL;
}
- bq->init_data.ichg_max = bat_info.constant_charge_current_max_ua;
- bq->init_data.vreg_max = bat_info.constant_charge_voltage_max_uv;
+ bq->init_data.ichg_max = bat_info->constant_charge_current_max_ua;
+ bq->init_data.vreg_max = bat_info->constant_charge_voltage_max_uv;
if (bq->state.bypass) {
ret = regmap_update_bits(bq->regmap, BQ25980_CHRGR_CTRL_2,
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 7e5e24b585d8..72e727cd31e8 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -1474,7 +1474,7 @@ static void bq27xxx_battery_set_config(struct bq27xxx_device_info *di,
static void bq27xxx_battery_settings(struct bq27xxx_device_info *di)
{
- struct power_supply_battery_info info = {};
+ struct power_supply_battery_info *info;
unsigned int min, max;
if (power_supply_get_battery_info(di->bat, &info) < 0)
@@ -1485,43 +1485,43 @@ static void bq27xxx_battery_settings(struct bq27xxx_device_info *di)
return;
}
- if (info.energy_full_design_uwh != info.charge_full_design_uah) {
- if (info.energy_full_design_uwh == -EINVAL)
+ if (info->energy_full_design_uwh != info->charge_full_design_uah) {
+ if (info->energy_full_design_uwh == -EINVAL)
dev_warn(di->dev, "missing battery:energy-full-design-microwatt-hours\n");
- else if (info.charge_full_design_uah == -EINVAL)
+ else if (info->charge_full_design_uah == -EINVAL)
dev_warn(di->dev, "missing battery:charge-full-design-microamp-hours\n");
}
/* assume min == 0 */
max = di->dm_regs[BQ27XXX_DM_DESIGN_ENERGY].max;
- if (info.energy_full_design_uwh > max * 1000) {
+ if (info->energy_full_design_uwh > max * 1000) {
dev_err(di->dev, "invalid battery:energy-full-design-microwatt-hours %d\n",
- info.energy_full_design_uwh);
- info.energy_full_design_uwh = -EINVAL;
+ info->energy_full_design_uwh);
+ info->energy_full_design_uwh = -EINVAL;
}
/* assume min == 0 */
max = di->dm_regs[BQ27XXX_DM_DESIGN_CAPACITY].max;
- if (info.charge_full_design_uah > max * 1000) {
+ if (info->charge_full_design_uah > max * 1000) {
dev_err(di->dev, "invalid battery:charge-full-design-microamp-hours %d\n",
- info.charge_full_design_uah);
- info.charge_full_design_uah = -EINVAL;
+ info->charge_full_design_uah);
+ info->charge_full_design_uah = -EINVAL;
}
min = di->dm_regs[BQ27XXX_DM_TERMINATE_VOLTAGE].min;
max = di->dm_regs[BQ27XXX_DM_TERMINATE_VOLTAGE].max;
- if ((info.voltage_min_design_uv < min * 1000 ||
- info.voltage_min_design_uv > max * 1000) &&
- info.voltage_min_design_uv != -EINVAL) {
+ if ((info->voltage_min_design_uv < min * 1000 ||
+ info->voltage_min_design_uv > max * 1000) &&
+ info->voltage_min_design_uv != -EINVAL) {
dev_err(di->dev, "invalid battery:voltage-min-design-microvolt %d\n",
- info.voltage_min_design_uv);
- info.voltage_min_design_uv = -EINVAL;
+ info->voltage_min_design_uv);
+ info->voltage_min_design_uv = -EINVAL;
}
- if ((info.energy_full_design_uwh != -EINVAL &&
- info.charge_full_design_uah != -EINVAL) ||
- info.voltage_min_design_uv != -EINVAL)
- bq27xxx_battery_set_config(di, &info);
+ if ((info->energy_full_design_uwh != -EINVAL &&
+ info->charge_full_design_uah != -EINVAL) ||
+ info->voltage_min_design_uv != -EINVAL)
+ bq27xxx_battery_set_config(di, info);
}
/*
diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
index 091868e9e9e8..0c87ad0dbf71 100644
--- a/drivers/power/supply/cw2015_battery.c
+++ b/drivers/power/supply/cw2015_battery.c
@@ -61,7 +61,7 @@ struct cw_battery {
struct delayed_work battery_delay_work;
struct regmap *regmap;
struct power_supply *rk_bat;
- struct power_supply_battery_info battery;
+ struct power_supply_battery_info *battery;
u8 *bat_profile;
bool charger_attached;
@@ -505,22 +505,22 @@ static int cw_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CHARGE_FULL:
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
- if (cw_bat->battery.charge_full_design_uah > 0)
- val->intval = cw_bat->battery.charge_full_design_uah;
+ if (cw_bat->battery->charge_full_design_uah > 0)
+ val->intval = cw_bat->battery->charge_full_design_uah;
else
val->intval = 0;
break;
case POWER_SUPPLY_PROP_CHARGE_NOW:
- val->intval = cw_bat->battery.charge_full_design_uah;
+ val->intval = cw_bat->battery->charge_full_design_uah;
val->intval = val->intval * cw_bat->soc / 100;
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
if (cw_battery_valid_time_to_empty(cw_bat) &&
- cw_bat->battery.charge_full_design_uah > 0) {
+ cw_bat->battery->charge_full_design_uah > 0) {
/* calculate remaining capacity */
- val->intval = cw_bat->battery.charge_full_design_uah;
+ val->intval = cw_bat->battery->charge_full_design_uah;
val->intval = val->intval * cw_bat->soc / 100;
/* estimate current based on time to empty */
@@ -687,6 +687,12 @@ static int cw_bat_probe(struct i2c_client *client)
ret = power_supply_get_battery_info(cw_bat->rk_bat, &cw_bat->battery);
if (ret) {
+ /* Allocate an empty battery */
+ cw_bat->battery = devm_kzalloc(&client->dev,
+ sizeof(cw_bat->battery),
+ GFP_KERNEL);
+ if (!cw_bat->battery)
+ return -ENOMEM;
dev_warn(cw_bat->dev,
"No monitored battery, some properties will be missing\n");
}
@@ -724,7 +730,7 @@ static int cw_bat_remove(struct i2c_client *client)
struct cw_battery *cw_bat = i2c_get_clientdata(client);
cancel_delayed_work_sync(&cw_bat->battery_delay_work);
- power_supply_put_battery_info(cw_bat->rk_bat, &cw_bat->battery);
+ power_supply_put_battery_info(cw_bat->rk_bat, cw_bat->battery);
return 0;
}
diff --git a/drivers/power/supply/ingenic-battery.c b/drivers/power/supply/ingenic-battery.c
index 8b18219ebe90..2e7fdfde47ec 100644
--- a/drivers/power/supply/ingenic-battery.c
+++ b/drivers/power/supply/ingenic-battery.c
@@ -18,7 +18,7 @@ struct ingenic_battery {
struct iio_channel *channel;
struct power_supply_desc desc;
struct power_supply *battery;
- struct power_supply_battery_info info;
+ struct power_supply_battery_info *info;
};
static int ingenic_battery_get_property(struct power_supply *psy,
@@ -26,7 +26,7 @@ static int ingenic_battery_get_property(struct power_supply *psy,
union power_supply_propval *val)
{
struct ingenic_battery *bat = power_supply_get_drvdata(psy);
- struct power_supply_battery_info *info = &bat->info;
+ struct power_supply_battery_info *info = bat->info;
int ret;
switch (psp) {
@@ -80,7 +80,7 @@ static int ingenic_battery_set_scale(struct ingenic_battery *bat)
if (ret != IIO_AVAIL_LIST || scale_type != IIO_VAL_FRACTIONAL_LOG2)
return -EINVAL;
- max_mV = bat->info.voltage_max_design_uv / 1000;
+ max_mV = bat->info->voltage_max_design_uv / 1000;
for (i = 0; i < scale_len; i += 2) {
u64 scale_mV = (max_raw * scale_raw[i]) >> scale_raw[i + 1];
@@ -156,13 +156,13 @@ static int ingenic_battery_probe(struct platform_device *pdev)
dev_err(dev, "Unable to get battery info: %d\n", ret);
return ret;
}
- if (bat->info.voltage_min_design_uv < 0) {
+ if (bat->info->voltage_min_design_uv < 0) {
dev_err(dev, "Unable to get voltage min design\n");
- return bat->info.voltage_min_design_uv;
+ return bat->info->voltage_min_design_uv;
}
- if (bat->info.voltage_max_design_uv < 0) {
+ if (bat->info->voltage_max_design_uv < 0) {
dev_err(dev, "Unable to get voltage max design\n");
- return bat->info.voltage_max_design_uv;
+ return bat->info->voltage_max_design_uv;
}
return ingenic_battery_set_scale(bat);
diff --git a/drivers/power/supply/max77976_charger.c b/drivers/power/supply/max77976_charger.c
new file mode 100644
index 000000000000..8b6c8cfa7503
--- /dev/null
+++ b/drivers/power/supply/max77976_charger.c
@@ -0,0 +1,509 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * max77976_charger.c - Driver for the Maxim MAX77976 battery charger
+ *
+ * Copyright (C) 2021 Luca Ceresoli
+ * Author: Luca Ceresoli <luca@lucaceresoli.net>
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+
+#define MAX77976_DRIVER_NAME "max77976-charger"
+#define MAX77976_CHIP_ID 0x76
+
+static const char *max77976_manufacturer = "Maxim Integrated";
+static const char *max77976_model = "MAX77976";
+
+/* --------------------------------------------------------------------------
+ * Register map
+ */
+
+#define MAX77976_REG_CHIP_ID 0x00
+#define MAX77976_REG_CHIP_REVISION 0x01
+#define MAX77976_REG_CHG_INT_OK 0x12
+#define MAX77976_REG_CHG_DETAILS_01 0x14
+#define MAX77976_REG_CHG_CNFG_00 0x16
+#define MAX77976_REG_CHG_CNFG_02 0x18
+#define MAX77976_REG_CHG_CNFG_06 0x1c
+#define MAX77976_REG_CHG_CNFG_09 0x1f
+
+/* CHG_DETAILS_01.CHG_DTLS values */
+enum max77976_charging_state {
+ MAX77976_CHARGING_PREQUALIFICATION = 0x0,
+ MAX77976_CHARGING_FAST_CONST_CURRENT,
+ MAX77976_CHARGING_FAST_CONST_VOLTAGE,
+ MAX77976_CHARGING_TOP_OFF,
+ MAX77976_CHARGING_DONE,
+ MAX77976_CHARGING_RESERVED_05,
+ MAX77976_CHARGING_TIMER_FAULT,
+ MAX77976_CHARGING_SUSPENDED_QBATT_OFF,
+ MAX77976_CHARGING_OFF,
+ MAX77976_CHARGING_RESERVED_09,
+ MAX77976_CHARGING_THERMAL_SHUTDOWN,
+ MAX77976_CHARGING_WATCHDOG_EXPIRED,
+ MAX77976_CHARGING_SUSPENDED_JEITA,
+ MAX77976_CHARGING_SUSPENDED_THM_REMOVAL,
+ MAX77976_CHARGING_SUSPENDED_PIN,
+ MAX77976_CHARGING_RESERVED_0F,
+};
+
+/* CHG_DETAILS_01.BAT_DTLS values */
+enum max77976_battery_state {
+ MAX77976_BATTERY_BATTERY_REMOVAL = 0x0,
+ MAX77976_BATTERY_PREQUALIFICATION,
+ MAX77976_BATTERY_TIMER_FAULT,
+ MAX77976_BATTERY_REGULAR_VOLTAGE,
+ MAX77976_BATTERY_LOW_VOLTAGE,
+ MAX77976_BATTERY_OVERVOLTAGE,
+ MAX77976_BATTERY_RESERVED,
+ MAX77976_BATTERY_BATTERY_ONLY, // No valid adapter is present
+};
+
+/* CHG_CNFG_00.MODE values */
+enum max77976_mode {
+ MAX77976_MODE_CHARGER_BUCK = 0x5,
+ MAX77976_MODE_BOOST = 0x9,
+};
+
+/* CHG_CNFG_02.CHG_CC: charge current limit, 100..5500 mA, 50 mA steps */
+#define MAX77976_CHG_CC_STEP 50000U
+#define MAX77976_CHG_CC_MIN 100000U
+#define MAX77976_CHG_CC_MAX 5500000U
+
+/* CHG_CNFG_09.CHGIN_ILIM: input current limit, 100..3200 mA, 100 mA steps */
+#define MAX77976_CHGIN_ILIM_STEP 100000U
+#define MAX77976_CHGIN_ILIM_MIN 100000U
+#define MAX77976_CHGIN_ILIM_MAX 3200000U
+
+enum max77976_field_idx {
+ VERSION, REVISION, /* CHIP_REVISION */
+ CHGIN_OK, /* CHG_INT_OK */
+ BAT_DTLS, CHG_DTLS, /* CHG_DETAILS_01 */
+ MODE, /* CHG_CNFG_00 */
+ CHG_CC, /* CHG_CNFG_02 */
+ CHGPROT, /* CHG_CNFG_06 */
+ CHGIN_ILIM, /* CHG_CNFG_09 */
+ MAX77976_N_REGMAP_FIELDS
+};
+
+static const struct reg_field max77976_reg_field[MAX77976_N_REGMAP_FIELDS] = {
+ [VERSION] = REG_FIELD(MAX77976_REG_CHIP_REVISION, 4, 7),
+ [REVISION] = REG_FIELD(MAX77976_REG_CHIP_REVISION, 0, 3),
+ [CHGIN_OK] = REG_FIELD(MAX77976_REG_CHG_INT_OK, 6, 6),
+ [CHG_DTLS] = REG_FIELD(MAX77976_REG_CHG_DETAILS_01, 0, 3),
+ [BAT_DTLS] = REG_FIELD(MAX77976_REG_CHG_DETAILS_01, 4, 6),
+ [MODE] = REG_FIELD(MAX77976_REG_CHG_CNFG_00, 0, 3),
+ [CHG_CC] = REG_FIELD(MAX77976_REG_CHG_CNFG_02, 0, 6),
+ [CHGPROT] = REG_FIELD(MAX77976_REG_CHG_CNFG_06, 2, 3),
+ [CHGIN_ILIM] = REG_FIELD(MAX77976_REG_CHG_CNFG_09, 0, 5),
+};
+
+static const struct regmap_config max77976_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x24,
+};
+
+/* --------------------------------------------------------------------------
+ * Data structures
+ */
+
+struct max77976 {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct regmap_field *rfield[MAX77976_N_REGMAP_FIELDS];
+};
+
+/* --------------------------------------------------------------------------
+ * power_supply properties
+ */
+
+static int max77976_get_status(struct max77976 *chg, int *val)
+{
+ unsigned int regval;
+ int err;
+
+ err = regmap_field_read(chg->rfield[CHG_DTLS], &regval);
+ if (err < 0)
+ return err;
+
+ switch (regval) {
+ case MAX77976_CHARGING_PREQUALIFICATION:
+ case MAX77976_CHARGING_FAST_CONST_CURRENT:
+ case MAX77976_CHARGING_FAST_CONST_VOLTAGE:
+ case MAX77976_CHARGING_TOP_OFF:
+ *val = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case MAX77976_CHARGING_DONE:
+ *val = POWER_SUPPLY_STATUS_FULL;
+ break;
+ case MAX77976_CHARGING_TIMER_FAULT:
+ case MAX77976_CHARGING_SUSPENDED_QBATT_OFF:
+ case MAX77976_CHARGING_SUSPENDED_JEITA:
+ case MAX77976_CHARGING_SUSPENDED_THM_REMOVAL:
+ case MAX77976_CHARGING_SUSPENDED_PIN:
+ *val = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case MAX77976_CHARGING_OFF:
+ case MAX77976_CHARGING_THERMAL_SHUTDOWN:
+ case MAX77976_CHARGING_WATCHDOG_EXPIRED:
+ *val = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ default:
+ *val = POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static int max77976_get_charge_type(struct max77976 *chg, int *val)
+{
+ unsigned int regval;
+ int err;
+
+ err = regmap_field_read(chg->rfield[CHG_DTLS], &regval);
+ if (err < 0)
+ return err;
+
+ switch (regval) {
+ case MAX77976_CHARGING_PREQUALIFICATION:
+ *val = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ break;
+ case MAX77976_CHARGING_FAST_CONST_CURRENT:
+ case MAX77976_CHARGING_FAST_CONST_VOLTAGE:
+ *val = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ break;
+ case MAX77976_CHARGING_TOP_OFF:
+ *val = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
+ break;
+ case MAX77976_CHARGING_DONE:
+ case MAX77976_CHARGING_TIMER_FAULT:
+ case MAX77976_CHARGING_SUSPENDED_QBATT_OFF:
+ case MAX77976_CHARGING_OFF:
+ case MAX77976_CHARGING_THERMAL_SHUTDOWN:
+ case MAX77976_CHARGING_WATCHDOG_EXPIRED:
+ case MAX77976_CHARGING_SUSPENDED_JEITA:
+ case MAX77976_CHARGING_SUSPENDED_THM_REMOVAL:
+ case MAX77976_CHARGING_SUSPENDED_PIN:
+ *val = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ break;
+ default:
+ *val = POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static int max77976_get_health(struct max77976 *chg, int *val)
+{
+ unsigned int regval;
+ int err;
+
+ err = regmap_field_read(chg->rfield[BAT_DTLS], &regval);
+ if (err < 0)
+ return err;
+
+ switch (regval) {
+ case MAX77976_BATTERY_BATTERY_REMOVAL:
+ *val = POWER_SUPPLY_HEALTH_NO_BATTERY;
+ break;
+ case MAX77976_BATTERY_LOW_VOLTAGE:
+ case MAX77976_BATTERY_REGULAR_VOLTAGE:
+ *val = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case MAX77976_BATTERY_TIMER_FAULT:
+ *val = POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE;
+ break;
+ case MAX77976_BATTERY_OVERVOLTAGE:
+ *val = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ break;
+ case MAX77976_BATTERY_PREQUALIFICATION:
+ case MAX77976_BATTERY_BATTERY_ONLY:
+ *val = POWER_SUPPLY_HEALTH_UNKNOWN;
+ break;
+ default:
+ *val = POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static int max77976_get_online(struct max77976 *chg, int *val)
+{
+ unsigned int regval;
+ int err;
+
+ err = regmap_field_read(chg->rfield[CHGIN_OK], &regval);
+ if (err < 0)
+ return err;
+
+ *val = (regval ? 1 : 0);
+
+ return 0;
+}
+
+static int max77976_get_integer(struct max77976 *chg, enum max77976_field_idx fidx,
+ unsigned int clamp_min, unsigned int clamp_max,
+ unsigned int mult, int *val)
+{
+ unsigned int regval;
+ int err;
+
+ err = regmap_field_read(chg->rfield[fidx], &regval);
+ if (err < 0)
+ return err;
+
+ *val = clamp_val(regval * mult, clamp_min, clamp_max);
+
+ return 0;
+}
+
+static int max77976_set_integer(struct max77976 *chg, enum max77976_field_idx fidx,
+ unsigned int clamp_min, unsigned int clamp_max,
+ unsigned int div, int val)
+{
+ unsigned int regval;
+
+ regval = clamp_val(val, clamp_min, clamp_max) / div;
+
+ return regmap_field_write(chg->rfield[fidx], regval);
+}
+
+static int max77976_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct max77976 *chg = power_supply_get_drvdata(psy);
+ int err = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ err = max77976_get_status(chg, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ err = max77976_get_charge_type(chg, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ err = max77976_get_health(chg, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ err = max77976_get_online(chg, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
+ val->intval = MAX77976_CHG_CC_MAX;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ err = max77976_get_integer(chg, CHG_CC,
+ MAX77976_CHG_CC_MIN,
+ MAX77976_CHG_CC_MAX,
+ MAX77976_CHG_CC_STEP,
+ &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ err = max77976_get_integer(chg, CHGIN_ILIM,
+ MAX77976_CHGIN_ILIM_MIN,
+ MAX77976_CHGIN_ILIM_MAX,
+ MAX77976_CHGIN_ILIM_STEP,
+ &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = max77976_model;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = max77976_manufacturer;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int max77976_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct max77976 *chg = power_supply_get_drvdata(psy);
+ int err = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ err = max77976_set_integer(chg, CHG_CC,
+ MAX77976_CHG_CC_MIN,
+ MAX77976_CHG_CC_MAX,
+ MAX77976_CHG_CC_STEP,
+ val->intval);
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ err = max77976_set_integer(chg, CHGIN_ILIM,
+ MAX77976_CHGIN_ILIM_MIN,
+ MAX77976_CHGIN_ILIM_MAX,
+ MAX77976_CHGIN_ILIM_STEP,
+ val->intval);
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+};
+
+static int max77976_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static enum power_supply_property max77976_psy_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static const struct power_supply_desc max77976_psy_desc = {
+ .name = MAX77976_DRIVER_NAME,
+ .type = POWER_SUPPLY_TYPE_USB,
+ .properties = max77976_psy_props,
+ .num_properties = ARRAY_SIZE(max77976_psy_props),
+ .get_property = max77976_get_property,
+ .set_property = max77976_set_property,
+ .property_is_writeable = max77976_property_is_writeable,
+};
+
+/* --------------------------------------------------------------------------
+ * Entry point
+ */
+
+static int max77976_detect(struct max77976 *chg)
+{
+ struct device *dev = &chg->client->dev;
+ unsigned int id, ver, rev;
+ int err;
+
+ err = regmap_read(chg->regmap, MAX77976_REG_CHIP_ID, &id);
+ if (err)
+ return dev_err_probe(dev, err, "cannot read chip ID\n");
+
+ if (id != MAX77976_CHIP_ID)
+ return dev_err_probe(dev, -ENXIO, "unknown model ID 0x%02x\n", id);
+
+ err = regmap_field_read(chg->rfield[VERSION], &ver);
+ if (!err)
+ err = regmap_field_read(chg->rfield[REVISION], &rev);
+ if (err)
+ return dev_err_probe(dev, -ENXIO, "cannot read version/revision\n");
+
+ dev_info(dev, "detected model MAX779%02x ver %u rev %u", id, ver, rev);
+
+ return 0;
+}
+
+static int max77976_configure(struct max77976 *chg)
+{
+ struct device *dev = &chg->client->dev;
+ int err;
+
+ /* Magic value to unlock writing to some registers */
+ err = regmap_field_write(chg->rfield[CHGPROT], 0x3);
+ if (err)
+ goto err;
+
+ /*
+ * Mode 5 = Charger ON, OTG OFF, buck ON, boost OFF.
+ * Other modes are not implemented by this driver.
+ */
+ err = regmap_field_write(chg->rfield[MODE], MAX77976_MODE_CHARGER_BUCK);
+ if (err)
+ goto err;
+
+ return 0;
+
+err:
+ return dev_err_probe(dev, err, "error while configuring");
+}
+
+static int max77976_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct power_supply_config psy_cfg = {};
+ struct power_supply *psy;
+ struct max77976 *chg;
+ int err;
+ int i;
+
+ chg = devm_kzalloc(dev, sizeof(*chg), GFP_KERNEL);
+ if (!chg)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, chg);
+ psy_cfg.drv_data = chg;
+ chg->client = client;
+
+ chg->regmap = devm_regmap_init_i2c(client, &max77976_regmap_config);
+ if (IS_ERR(chg->regmap))
+ return dev_err_probe(dev, PTR_ERR(chg->regmap),
+ "cannot allocate regmap\n");
+
+ for (i = 0; i < MAX77976_N_REGMAP_FIELDS; i++) {
+ chg->rfield[i] = devm_regmap_field_alloc(dev, chg->regmap,
+ max77976_reg_field[i]);
+ if (IS_ERR(chg->rfield[i]))
+ return dev_err_probe(dev, PTR_ERR(chg->rfield[i]),
+ "cannot allocate regmap field\n");
+ }
+
+ err = max77976_detect(chg);
+ if (err)
+ return err;
+
+ err = max77976_configure(chg);
+ if (err)
+ return err;
+
+ psy = devm_power_supply_register_no_ws(dev, &max77976_psy_desc, &psy_cfg);
+ if (IS_ERR(psy))
+ return dev_err_probe(dev, PTR_ERR(psy), "cannot register\n");
+
+ return 0;
+}
+
+static const struct i2c_device_id max77976_i2c_id[] = {
+ { MAX77976_DRIVER_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, max77976_i2c_id);
+
+static const struct of_device_id max77976_of_id[] = {
+ { .compatible = "maxim,max77976" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, max77976_of_id);
+
+static struct i2c_driver max77976_driver = {
+ .driver = {
+ .name = MAX77976_DRIVER_NAME,
+ .of_match_table = max77976_of_id,
+ },
+ .probe_new = max77976_probe,
+ .id_table = max77976_i2c_id,
+};
+module_i2c_driver(max77976_driver);
+
+MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>");
+MODULE_DESCRIPTION("Maxim MAX77976 charger driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 6093754cebd5..ec838c9bcc0a 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -21,6 +21,7 @@
#include <linux/power_supply.h>
#include <linux/property.h>
#include <linux/thermal.h>
+#include <linux/fixp-arith.h>
#include "power_supply.h"
/* exported for the APM Power driver, APM emulation */
@@ -563,14 +564,19 @@ EXPORT_SYMBOL_GPL(devm_power_supply_get_by_phandle);
#endif /* CONFIG_OF */
int power_supply_get_battery_info(struct power_supply *psy,
- struct power_supply_battery_info *info)
+ struct power_supply_battery_info **info_out)
{
struct power_supply_resistance_temp_table *resist_table;
+ struct power_supply_battery_info *info;
struct device_node *battery_np;
const char *value;
int err, len, index;
const __be32 *list;
+ info = devm_kmalloc(&psy->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
info->technology = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
info->energy_full_design_uwh = -EINVAL;
info->charge_full_design_uah = -EINVAL;
@@ -580,6 +586,10 @@ int power_supply_get_battery_info(struct power_supply *psy,
info->charge_term_current_ua = -EINVAL;
info->constant_charge_current_max_ua = -EINVAL;
info->constant_charge_voltage_max_uv = -EINVAL;
+ info->tricklecharge_current_ua = -EINVAL;
+ info->precharge_voltage_max_uv = -EINVAL;
+ info->charge_restart_voltage_uv = -EINVAL;
+ info->overvoltage_limit_uv = -EINVAL;
info->temp_ambient_alert_min = INT_MIN;
info->temp_ambient_alert_max = INT_MAX;
info->temp_alert_min = INT_MIN;
@@ -727,7 +737,7 @@ int power_supply_get_battery_info(struct power_supply *psy,
list = of_get_property(battery_np, "resistance-temp-table", &len);
if (!list || !len)
- goto out_put_node;
+ goto out_ret_pointer;
info->resist_table_size = len / (2 * sizeof(__be32));
resist_table = info->resist_table = devm_kcalloc(&psy->dev,
@@ -745,6 +755,10 @@ int power_supply_get_battery_info(struct power_supply *psy,
resist_table[index].resistance = be32_to_cpu(*list++);
}
+out_ret_pointer:
+ /* Finally return the whole thing */
+ *info_out = info;
+
out_put_node:
of_node_put(battery_np);
return err;
@@ -763,6 +777,8 @@ void power_supply_put_battery_info(struct power_supply *psy,
if (info->resist_table)
devm_kfree(&psy->dev, info->resist_table);
+
+ devm_kfree(&psy->dev, info);
}
EXPORT_SYMBOL_GPL(power_supply_put_battery_info);
@@ -783,26 +799,25 @@ EXPORT_SYMBOL_GPL(power_supply_put_battery_info);
int power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table,
int table_len, int temp)
{
- int i, resist;
+ int i, high, low;
- for (i = 0; i < table_len; i++)
+ /* Break loop at table_len - 1 because that is the highest index */
+ for (i = 0; i < table_len - 1; i++)
if (temp > table[i].temp)
break;
- if (i > 0 && i < table_len) {
- int tmp;
-
- tmp = (table[i - 1].resistance - table[i].resistance) *
- (temp - table[i].temp);
- tmp /= table[i - 1].temp - table[i].temp;
- resist = tmp + table[i].resistance;
- } else if (i == 0) {
- resist = table[0].resistance;
- } else {
- resist = table[table_len - 1].resistance;
- }
-
- return resist;
+ /* The library function will deal with high == low */
+ if ((i == 0) || (i == (table_len - 1)))
+ high = i;
+ else
+ high = i - 1;
+ low = i;
+
+ return fixp_linear_interpolate(table[low].temp,
+ table[low].resistance,
+ table[high].temp,
+ table[high].resistance,
+ temp);
}
EXPORT_SYMBOL_GPL(power_supply_temp2resist_simple);
@@ -821,24 +836,25 @@ EXPORT_SYMBOL_GPL(power_supply_temp2resist_simple);
int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table,
int table_len, int ocv)
{
- int i, cap, tmp;
+ int i, high, low;
- for (i = 0; i < table_len; i++)
+ /* Break loop at table_len - 1 because that is the highest index */
+ for (i = 0; i < table_len - 1; i++)
if (ocv > table[i].ocv)
break;
- if (i > 0 && i < table_len) {
- tmp = (table[i - 1].capacity - table[i].capacity) *
- (ocv - table[i].ocv);
- tmp /= table[i - 1].ocv - table[i].ocv;
- cap = tmp + table[i].capacity;
- } else if (i == 0) {
- cap = table[0].capacity;
- } else {
- cap = table[table_len - 1].capacity;
- }
-
- return cap;
+ /* The library function will deal with high == low */
+ if ((i == 0) || (i == (table_len - 1)))
+ high = i - 1;
+ else
+ high = i; /* i.e. i == 0 */
+ low = i;
+
+ return fixp_linear_interpolate(table[low].ocv,
+ table[low].capacity,
+ table[high].ocv,
+ table[high].capacity,
+ ocv);
}
EXPORT_SYMBOL_GPL(power_supply_ocv2cap_simple);
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index c3d7cbcd4fad..c0dfcfa33206 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -106,6 +106,7 @@ static const char * const POWER_SUPPLY_HEALTH_TEXT[] = {
[POWER_SUPPLY_HEALTH_WARM] = "Warm",
[POWER_SUPPLY_HEALTH_COOL] = "Cool",
[POWER_SUPPLY_HEALTH_HOT] = "Hot",
+ [POWER_SUPPLY_HEALTH_NO_BATTERY] = "No battery",
};
static const char * const POWER_SUPPLY_TECHNOLOGY_TEXT[] = {
@@ -133,6 +134,12 @@ static const char * const POWER_SUPPLY_SCOPE_TEXT[] = {
[POWER_SUPPLY_SCOPE_DEVICE] = "Device",
};
+static const char * const POWER_SUPPLY_CHARGE_BEHAVIOUR_TEXT[] = {
+ [POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO] = "auto",
+ [POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE] = "inhibit-charge",
+ [POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE] = "force-discharge",
+};
+
static struct power_supply_attr power_supply_attrs[] = {
/* Properties of type `int' */
POWER_SUPPLY_ENUM_ATTR(STATUS),
@@ -172,6 +179,7 @@ static struct power_supply_attr power_supply_attrs[] = {
POWER_SUPPLY_ATTR(CHARGE_CONTROL_LIMIT_MAX),
POWER_SUPPLY_ATTR(CHARGE_CONTROL_START_THRESHOLD),
POWER_SUPPLY_ATTR(CHARGE_CONTROL_END_THRESHOLD),
+ POWER_SUPPLY_ENUM_ATTR(CHARGE_BEHAVIOUR),
POWER_SUPPLY_ATTR(INPUT_CURRENT_LIMIT),
POWER_SUPPLY_ATTR(INPUT_VOLTAGE_LIMIT),
POWER_SUPPLY_ATTR(INPUT_POWER_LIMIT),
@@ -484,3 +492,52 @@ out:
return ret;
}
+
+ssize_t power_supply_charge_behaviour_show(struct device *dev,
+ unsigned int available_behaviours,
+ enum power_supply_charge_behaviour current_behaviour,
+ char *buf)
+{
+ bool match = false, available, active;
+ ssize_t count = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(POWER_SUPPLY_CHARGE_BEHAVIOUR_TEXT); i++) {
+ available = available_behaviours & BIT(i);
+ active = i == current_behaviour;
+
+ if (available && active) {
+ count += sysfs_emit_at(buf, count, "[%s] ",
+ POWER_SUPPLY_CHARGE_BEHAVIOUR_TEXT[i]);
+ match = true;
+ } else if (available) {
+ count += sysfs_emit_at(buf, count, "%s ",
+ POWER_SUPPLY_CHARGE_BEHAVIOUR_TEXT[i]);
+ }
+ }
+
+ if (!match) {
+ dev_warn(dev, "driver reporting unsupported charge behaviour\n");
+ return -EINVAL;
+ }
+
+ if (count)
+ buf[count - 1] = '\n';
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(power_supply_charge_behaviour_show);
+
+int power_supply_charge_behaviour_parse(unsigned int available_behaviours, const char *buf)
+{
+ int i = sysfs_match_string(POWER_SUPPLY_CHARGE_BEHAVIOUR_TEXT, buf);
+
+ if (i < 0)
+ return i;
+
+ if (available_behaviours & BIT(i))
+ return i;
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(power_supply_charge_behaviour_parse);
diff --git a/drivers/power/supply/qcom_smbb.c b/drivers/power/supply/qcom_smbb.c
index 84cc9fba029d..bd50124eef9f 100644
--- a/drivers/power/supply/qcom_smbb.c
+++ b/drivers/power/supply/qcom_smbb.c
@@ -863,8 +863,8 @@ static int smbb_charger_probe(struct platform_device *pdev)
}
chg->revision += 1;
- if (chg->revision != 2 && chg->revision != 3) {
- dev_err(&pdev->dev, "v1 hardware not supported\n");
+ if (chg->revision != 1 && chg->revision != 2 && chg->revision != 3) {
+ dev_err(&pdev->dev, "v%d hardware not supported\n", chg->revision);
return -ENODEV;
}
dev_info(&pdev->dev, "Initializing SMBB rev %u", chg->revision);
@@ -1012,6 +1012,7 @@ static int smbb_charger_remove(struct platform_device *pdev)
}
static const struct of_device_id smbb_charger_id_table[] = {
+ { .compatible = "qcom,pm8226-charger" },
{ .compatible = "qcom,pm8941-charger" },
{ }
};
diff --git a/drivers/power/supply/sc2731_charger.c b/drivers/power/supply/sc2731_charger.c
index 288b79836c13..9ac17cf7a126 100644
--- a/drivers/power/supply/sc2731_charger.c
+++ b/drivers/power/supply/sc2731_charger.c
@@ -368,7 +368,7 @@ static int sc2731_charger_usb_change(struct notifier_block *nb,
static int sc2731_charger_hw_init(struct sc2731_charger_info *info)
{
- struct power_supply_battery_info bat_info = { };
+ struct power_supply_battery_info *bat_info;
u32 term_currrent, term_voltage, cur_val, vol_val;
int ret;
@@ -390,7 +390,7 @@ static int sc2731_charger_hw_init(struct sc2731_charger_info *info)
cur_val = 0x2;
vol_val = 0x1;
} else {
- term_currrent = bat_info.charge_term_current_ua / 1000;
+ term_currrent = bat_info->charge_term_current_ua / 1000;
if (term_currrent <= 90)
cur_val = 0;
@@ -399,7 +399,7 @@ static int sc2731_charger_hw_init(struct sc2731_charger_info *info)
else
cur_val = ((term_currrent - 90) / 25) + 1;
- term_voltage = bat_info.constant_charge_voltage_max_uv / 1000;
+ term_voltage = bat_info->constant_charge_voltage_max_uv / 1000;
if (term_voltage > 4500)
term_voltage = 4500;
@@ -409,7 +409,7 @@ static int sc2731_charger_hw_init(struct sc2731_charger_info *info)
else
vol_val = 0;
- power_supply_put_battery_info(info->psy_usb, &bat_info);
+ power_supply_put_battery_info(info->psy_usb, bat_info);
}
/* Set charge termination current */
diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c
index ae45069bd5e1..632977f84b95 100644
--- a/drivers/power/supply/sc27xx_fuel_gauge.c
+++ b/drivers/power/supply/sc27xx_fuel_gauge.c
@@ -998,7 +998,7 @@ static int sc27xx_fgu_calibration(struct sc27xx_fgu_data *data)
static int sc27xx_fgu_hw_init(struct sc27xx_fgu_data *data)
{
- struct power_supply_battery_info info = { };
+ struct power_supply_battery_info *info;
struct power_supply_battery_ocv_table *table;
int ret, delta_clbcnt, alarm_adc;
@@ -1008,16 +1008,16 @@ static int sc27xx_fgu_hw_init(struct sc27xx_fgu_data *data)
return ret;
}
- data->total_cap = info.charge_full_design_uah / 1000;
- data->max_volt = info.constant_charge_voltage_max_uv / 1000;
- data->internal_resist = info.factory_internal_resistance_uohm / 1000;
- data->min_volt = info.voltage_min_design_uv;
+ data->total_cap = info->charge_full_design_uah / 1000;
+ data->max_volt = info->constant_charge_voltage_max_uv / 1000;
+ data->internal_resist = info->factory_internal_resistance_uohm / 1000;
+ data->min_volt = info->voltage_min_design_uv;
/*
* For SC27XX fuel gauge device, we only use one ocv-capacity
* table in normal temperature 20 Celsius.
*/
- table = power_supply_find_ocv2cap_table(&info, 20, &data->table_len);
+ table = power_supply_find_ocv2cap_table(info, 20, &data->table_len);
if (!table)
return -EINVAL;
@@ -1025,7 +1025,7 @@ static int sc27xx_fgu_hw_init(struct sc27xx_fgu_data *data)
data->table_len * sizeof(*table),
GFP_KERNEL);
if (!data->cap_table) {
- power_supply_put_battery_info(data->battery, &info);
+ power_supply_put_battery_info(data->battery, info);
return -ENOMEM;
}
@@ -1035,19 +1035,19 @@ static int sc27xx_fgu_hw_init(struct sc27xx_fgu_data *data)
if (!data->alarm_cap)
data->alarm_cap += 1;
- data->resist_table_len = info.resist_table_size;
+ data->resist_table_len = info->resist_table_size;
if (data->resist_table_len > 0) {
- data->resist_table = devm_kmemdup(data->dev, info.resist_table,
+ data->resist_table = devm_kmemdup(data->dev, info->resist_table,
data->resist_table_len *
sizeof(struct power_supply_resistance_temp_table),
GFP_KERNEL);
if (!data->resist_table) {
- power_supply_put_battery_info(data->battery, &info);
+ power_supply_put_battery_info(data->battery, info);
return -ENOMEM;
}
}
- power_supply_put_battery_info(data->battery, &info);
+ power_supply_put_battery_info(data->battery, info);
ret = sc27xx_fgu_calibration(data);
if (ret)
diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c
index 753944e774c4..d56e469043bb 100644
--- a/drivers/power/supply/smb347-charger.c
+++ b/drivers/power/supply/smb347-charger.c
@@ -1281,7 +1281,7 @@ static void smb347_dt_parse_dev_info(struct smb347_charger *smb)
static int smb347_get_battery_info(struct smb347_charger *smb)
{
- struct power_supply_battery_info info = {};
+ struct power_supply_battery_info *info;
struct power_supply *supply;
int err;
@@ -1296,29 +1296,29 @@ static int smb347_get_battery_info(struct smb347_charger *smb)
if (err)
return err;
- if (info.constant_charge_current_max_ua != -EINVAL)
- smb->max_charge_current = info.constant_charge_current_max_ua;
+ if (info->constant_charge_current_max_ua != -EINVAL)
+ smb->max_charge_current = info->constant_charge_current_max_ua;
- if (info.constant_charge_voltage_max_uv != -EINVAL)
- smb->max_charge_voltage = info.constant_charge_voltage_max_uv;
+ if (info->constant_charge_voltage_max_uv != -EINVAL)
+ smb->max_charge_voltage = info->constant_charge_voltage_max_uv;
- if (info.precharge_current_ua != -EINVAL)
- smb->pre_charge_current = info.precharge_current_ua;
+ if (info->precharge_current_ua != -EINVAL)
+ smb->pre_charge_current = info->precharge_current_ua;
- if (info.charge_term_current_ua != -EINVAL)
- smb->termination_current = info.charge_term_current_ua;
+ if (info->charge_term_current_ua != -EINVAL)
+ smb->termination_current = info->charge_term_current_ua;
- if (info.temp_alert_min != INT_MIN)
- smb->soft_cold_temp_limit = info.temp_alert_min;
+ if (info->temp_alert_min != INT_MIN)
+ smb->soft_cold_temp_limit = info->temp_alert_min;
- if (info.temp_alert_max != INT_MAX)
- smb->soft_hot_temp_limit = info.temp_alert_max;
+ if (info->temp_alert_max != INT_MAX)
+ smb->soft_hot_temp_limit = info->temp_alert_max;
- if (info.temp_min != INT_MIN)
- smb->hard_cold_temp_limit = info.temp_min;
+ if (info->temp_min != INT_MIN)
+ smb->hard_cold_temp_limit = info->temp_min;
- if (info.temp_max != INT_MAX)
- smb->hard_hot_temp_limit = info.temp_max;
+ if (info->temp_max != INT_MAX)
+ smb->hard_hot_temp_limit = info->temp_max;
/* Suspend when battery temperature is outside hard limits */
if (smb->hard_cold_temp_limit != SMB3XX_TEMP_USE_DEFAULT ||
diff --git a/drivers/powercap/dtpm.c b/drivers/powercap/dtpm.c
index 2a5c1829aab7..8cb45f2d3d78 100644
--- a/drivers/powercap/dtpm.c
+++ b/drivers/powercap/dtpm.c
@@ -382,7 +382,7 @@ void dtpm_unregister(struct dtpm *dtpm)
{
powercap_unregister_zone(pct, &dtpm->zone);
- pr_info("Unregistered dtpm node '%s'\n", dtpm->zone.name);
+ pr_debug("Unregistered dtpm node '%s'\n", dtpm->zone.name);
}
/**
@@ -453,8 +453,8 @@ int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent)
dtpm->power_limit = dtpm->power_max;
}
- pr_info("Registered dtpm node '%s' / %llu-%llu uW, \n",
- dtpm->zone.name, dtpm->power_min, dtpm->power_max);
+ pr_debug("Registered dtpm node '%s' / %llu-%llu uW, \n",
+ dtpm->zone.name, dtpm->power_min, dtpm->power_max);
mutex_unlock(&dtpm_lock);
diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
index 6e1a0043c411..a20bf12f3ce3 100644
--- a/drivers/powercap/idle_inject.c
+++ b/drivers/powercap/idle_inject.c
@@ -12,7 +12,7 @@
*
* All of the kthreads used for idle injection are created at init time.
*
- * Next, the users of the the idle injection framework provide a cpumask via
+ * Next, the users of the idle injection framework provide a cpumask via
* its register function. The kthreads will be synchronized with respect to
* this cpumask.
*
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 7c0099e7a6d7..07611a00b78f 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -61,6 +61,20 @@
#define PERF_STATUS_THROTTLE_TIME_MASK 0xffffffff
#define PP_POLICY_MASK 0x1F
+/*
+ * SPR has different layout for Psys Domain PowerLimit registers.
+ * There are 17 bits of PL1 and PL2 instead of 15 bits.
+ * The Enable bits and TimeWindow bits are also shifted as a result.
+ */
+#define PSYS_POWER_LIMIT1_MASK 0x1FFFF
+#define PSYS_POWER_LIMIT1_ENABLE BIT(17)
+
+#define PSYS_POWER_LIMIT2_MASK (0x1FFFFULL<<32)
+#define PSYS_POWER_LIMIT2_ENABLE BIT_ULL(49)
+
+#define PSYS_TIME_WINDOW1_MASK (0x7FULL<<19)
+#define PSYS_TIME_WINDOW2_MASK (0x7FULL<<51)
+
/* Non HW constants */
#define RAPL_PRIMITIVE_DERIVED BIT(1) /* not from raw data */
#define RAPL_PRIMITIVE_DUMMY BIT(2)
@@ -97,6 +111,7 @@ struct rapl_defaults {
bool to_raw);
unsigned int dram_domain_energy_unit;
unsigned int psys_domain_energy_unit;
+ bool spr_psys_bits;
};
static struct rapl_defaults *rapl_defaults;
@@ -669,12 +684,51 @@ static struct rapl_primitive_info rpi[] = {
RAPL_DOMAIN_REG_PERF, TIME_UNIT, 0),
PRIMITIVE_INFO_INIT(PRIORITY_LEVEL, PP_POLICY_MASK, 0,
RAPL_DOMAIN_REG_POLICY, ARBITRARY_UNIT, 0),
+ PRIMITIVE_INFO_INIT(PSYS_POWER_LIMIT1, PSYS_POWER_LIMIT1_MASK, 0,
+ RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
+ PRIMITIVE_INFO_INIT(PSYS_POWER_LIMIT2, PSYS_POWER_LIMIT2_MASK, 32,
+ RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
+ PRIMITIVE_INFO_INIT(PSYS_PL1_ENABLE, PSYS_POWER_LIMIT1_ENABLE, 17,
+ RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
+ PRIMITIVE_INFO_INIT(PSYS_PL2_ENABLE, PSYS_POWER_LIMIT2_ENABLE, 49,
+ RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
+ PRIMITIVE_INFO_INIT(PSYS_TIME_WINDOW1, PSYS_TIME_WINDOW1_MASK, 19,
+ RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
+ PRIMITIVE_INFO_INIT(PSYS_TIME_WINDOW2, PSYS_TIME_WINDOW2_MASK, 51,
+ RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
/* non-hardware */
PRIMITIVE_INFO_INIT(AVERAGE_POWER, 0, 0, 0, POWER_UNIT,
RAPL_PRIMITIVE_DERIVED),
{NULL, 0, 0, 0},
};
+static enum rapl_primitives
+prim_fixups(struct rapl_domain *rd, enum rapl_primitives prim)
+{
+ if (!rapl_defaults->spr_psys_bits)
+ return prim;
+
+ if (rd->id != RAPL_DOMAIN_PLATFORM)
+ return prim;
+
+ switch (prim) {
+ case POWER_LIMIT1:
+ return PSYS_POWER_LIMIT1;
+ case POWER_LIMIT2:
+ return PSYS_POWER_LIMIT2;
+ case PL1_ENABLE:
+ return PSYS_PL1_ENABLE;
+ case PL2_ENABLE:
+ return PSYS_PL2_ENABLE;
+ case TIME_WINDOW1:
+ return PSYS_TIME_WINDOW1;
+ case TIME_WINDOW2:
+ return PSYS_TIME_WINDOW2;
+ default:
+ return prim;
+ }
+}
+
/* Read primitive data based on its related struct rapl_primitive_info.
* if xlate flag is set, return translated data based on data units, i.e.
* time, energy, and power.
@@ -692,7 +746,8 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
enum rapl_primitives prim, bool xlate, u64 *data)
{
u64 value;
- struct rapl_primitive_info *rp = &rpi[prim];
+ enum rapl_primitives prim_fixed = prim_fixups(rd, prim);
+ struct rapl_primitive_info *rp = &rpi[prim_fixed];
struct reg_action ra;
int cpu;
@@ -738,7 +793,8 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
enum rapl_primitives prim,
unsigned long long value)
{
- struct rapl_primitive_info *rp = &rpi[prim];
+ enum rapl_primitives prim_fixed = prim_fixups(rd, prim);
+ struct rapl_primitive_info *rp = &rpi[prim_fixed];
int cpu;
u64 bits;
struct reg_action ra;
@@ -981,6 +1037,7 @@ static const struct rapl_defaults rapl_defaults_spr_server = {
.compute_time_window = rapl_compute_time_window_core,
.dram_domain_energy_unit = 15300,
.psys_domain_energy_unit = 1000000000,
+ .spr_psys_bits = true,
};
static const struct rapl_defaults rapl_defaults_byt = {
diff --git a/drivers/ptp/ptp_ines.c b/drivers/ptp/ptp_ines.c
index 6c7c2843ba0b..61f47fb9d997 100644
--- a/drivers/ptp/ptp_ines.c
+++ b/drivers/ptp/ptp_ines.c
@@ -338,10 +338,6 @@ static int ines_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
- /* reserved for future extensions */
- if (cfg.flags)
- return -EINVAL;
-
switch (cfg.tx_type) {
case HWTSTAMP_TX_OFF:
ts_stat_tx = 0;
diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c
index baee0379482b..ab1d233173e1 100644
--- a/drivers/ptp/ptp_vclock.c
+++ b/drivers/ptp/ptp_vclock.c
@@ -185,8 +185,8 @@ out:
}
EXPORT_SYMBOL(ptp_get_vclocks_index);
-void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps,
- int vclock_index)
+ktime_t ptp_convert_timestamp(const struct skb_shared_hwtstamps *hwtstamps,
+ int vclock_index)
{
char name[PTP_CLOCK_NAME_LEN] = "";
struct ptp_vclock *vclock;
@@ -198,12 +198,12 @@ void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps,
snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", vclock_index);
dev = class_find_device_by_name(ptp_class, name);
if (!dev)
- return;
+ return 0;
ptp = dev_get_drvdata(dev);
if (!ptp->is_virtual_clock) {
put_device(dev);
- return;
+ return 0;
}
vclock = info_to_vclock(ptp->info);
@@ -215,7 +215,7 @@ void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps,
spin_unlock_irqrestore(&vclock->lock, flags);
put_device(dev);
- hwtstamps->hwtstamp = ns_to_ktime(ns);
+ return ns_to_ktime(ns);
}
EXPORT_SYMBOL(ptp_convert_timestamp);
#endif
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index fb04a439462c..c7552df32082 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -152,6 +152,32 @@ of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args)
}
EXPORT_SYMBOL_GPL(of_pwm_xlate_with_flags);
+struct pwm_device *
+of_pwm_single_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
+{
+ struct pwm_device *pwm;
+
+ if (pc->of_pwm_n_cells < 1)
+ return ERR_PTR(-EINVAL);
+
+ /* validate that one cell is specified, optionally with flags */
+ if (args->args_count != 1 && args->args_count != 2)
+ return ERR_PTR(-EINVAL);
+
+ pwm = pwm_request_from_chip(pc, 0, NULL);
+ if (IS_ERR(pwm))
+ return pwm;
+
+ pwm->args.period = args->args[0];
+ pwm->args.polarity = PWM_POLARITY_NORMAL;
+
+ if (args->args_count == 2 && args->args[2] & PWM_POLARITY_INVERTED)
+ pwm->args.polarity = PWM_POLARITY_INVERSED;
+
+ return pwm;
+}
+EXPORT_SYMBOL_GPL(of_pwm_single_xlate);
+
static void of_pwmchip_add(struct pwm_chip *chip)
{
if (!chip->dev || !chip->dev->of_node)
@@ -522,6 +548,73 @@ static void pwm_apply_state_debug(struct pwm_device *pwm,
}
}
+static int pwm_apply_legacy(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err;
+ struct pwm_state initial_state = pwm->state;
+
+ if (state->polarity != pwm->state.polarity) {
+ if (!chip->ops->set_polarity)
+ return -EINVAL;
+
+ /*
+ * Changing the polarity of a running PWM is only allowed when
+ * the PWM driver implements ->apply().
+ */
+ if (pwm->state.enabled) {
+ chip->ops->disable(chip, pwm);
+
+ /*
+ * Update pwm->state already here in case
+ * .set_polarity() or another callback depend on that.
+ */
+ pwm->state.enabled = false;
+ }
+
+ err = chip->ops->set_polarity(chip, pwm, state->polarity);
+ if (err)
+ goto rollback;
+
+ pwm->state.polarity = state->polarity;
+ }
+
+ if (!state->enabled) {
+ if (pwm->state.enabled)
+ chip->ops->disable(chip, pwm);
+
+ return 0;
+ }
+
+ /*
+ * We cannot skip calling ->config even if state->period ==
+ * pwm->state.period && state->duty_cycle == pwm->state.duty_cycle
+ * because we might have exited early in the last call to
+ * pwm_apply_state because of !state->enabled and so the two values in
+ * pwm->state might not be configured in hardware.
+ */
+ err = chip->ops->config(pwm->chip, pwm,
+ state->duty_cycle,
+ state->period);
+ if (err)
+ goto rollback;
+
+ pwm->state.period = state->period;
+ pwm->state.duty_cycle = state->duty_cycle;
+
+ if (!pwm->state.enabled) {
+ err = chip->ops->enable(chip, pwm);
+ if (err)
+ goto rollback;
+ }
+
+ return 0;
+
+rollback:
+ pwm->state = initial_state;
+ return err;
+}
+
/**
* pwm_apply_state() - atomically apply a new state to a PWM device
* @pwm: PWM device
@@ -554,70 +647,22 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
state->usage_power == pwm->state.usage_power)
return 0;
- if (chip->ops->apply) {
+ if (chip->ops->apply)
err = chip->ops->apply(chip, pwm, state);
- if (err)
- return err;
+ else
+ err = pwm_apply_legacy(chip, pwm, state);
+ if (err)
+ return err;
- trace_pwm_apply(pwm, state);
+ trace_pwm_apply(pwm, state);
- pwm->state = *state;
+ pwm->state = *state;
- /*
- * only do this after pwm->state was applied as some
- * implementations of .get_state depend on this
- */
- pwm_apply_state_debug(pwm, state);
- } else {
- /*
- * FIXME: restore the initial state in case of error.
- */
- if (state->polarity != pwm->state.polarity) {
- if (!chip->ops->set_polarity)
- return -EINVAL;
-
- /*
- * Changing the polarity of a running PWM is
- * only allowed when the PWM driver implements
- * ->apply().
- */
- if (pwm->state.enabled) {
- chip->ops->disable(chip, pwm);
- pwm->state.enabled = false;
- }
-
- err = chip->ops->set_polarity(chip, pwm,
- state->polarity);
- if (err)
- return err;
-
- pwm->state.polarity = state->polarity;
- }
-
- if (state->period != pwm->state.period ||
- state->duty_cycle != pwm->state.duty_cycle) {
- err = chip->ops->config(pwm->chip, pwm,
- state->duty_cycle,
- state->period);
- if (err)
- return err;
-
- pwm->state.duty_cycle = state->duty_cycle;
- pwm->state.period = state->period;
- }
-
- if (state->enabled != pwm->state.enabled) {
- if (state->enabled) {
- err = chip->ops->enable(chip, pwm);
- if (err)
- return err;
- } else {
- chip->ops->disable(chip, pwm);
- }
-
- pwm->state.enabled = state->enabled;
- }
- }
+ /*
+ * only do this after pwm->state was applied as some
+ * implementations of .get_state depend on this
+ */
+ pwm_apply_state_debug(pwm, state);
return 0;
}
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index f97f82548293..5996049f66ec 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -128,11 +128,9 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
duty = DIV_ROUND_UP(timebase * duty_ns, period_ns);
- ret = pm_runtime_get_sync(chip->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(chip->dev);
+ ret = pm_runtime_resume_and_get(chip->dev);
+ if (ret < 0)
return ret;
- }
val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
val &= ~(PWM_CTRL_CFG_DIV_MASK << PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm));
@@ -184,10 +182,33 @@ static void img_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
pm_runtime_put_autosuspend(chip->dev);
}
+static int img_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ if (pwm->state.enabled)
+ img_pwm_disable(chip, pwm);
+
+ return 0;
+ }
+
+ err = img_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ if (err)
+ return err;
+
+ if (!pwm->state.enabled)
+ err = img_pwm_enable(chip, pwm);
+
+ return err;
+}
+
static const struct pwm_ops img_pwm_ops = {
- .config = img_pwm_config,
- .enable = img_pwm_enable,
- .disable = img_pwm_disable,
+ .apply = img_pwm_apply,
.owner = THIS_MODULE,
};
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index a9efdcf839ae..238ec88c130b 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -148,20 +148,6 @@ static const struct platform_device_id *pxa_pwm_get_id_dt(struct device *dev)
return id ? id->data : NULL;
}
-static struct pwm_device *
-pxa_pwm_of_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
-{
- struct pwm_device *pwm;
-
- pwm = pwm_request_from_chip(pc, 0, NULL);
- if (IS_ERR(pwm))
- return pwm;
-
- pwm->args.period = args->args[0];
-
- return pwm;
-}
-
static int pwm_probe(struct platform_device *pdev)
{
const struct platform_device_id *id = platform_get_device_id(pdev);
@@ -187,7 +173,7 @@ static int pwm_probe(struct platform_device *pdev)
pc->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1;
if (IS_ENABLED(CONFIG_OF)) {
- pc->chip.of_xlate = pxa_pwm_of_xlate;
+ pc->chip.of_xlate = of_pwm_single_xlate;
pc->chip.of_pwm_n_cells = 1;
}
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index 11a10b575ace..18cf974ac776 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -42,12 +42,16 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pm_opp.h>
#include <linux/pwm.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/reset.h>
+#include <soc/tegra/common.h>
+
#define PWM_ENABLE (1 << 31)
#define PWM_DUTY_WIDTH 8
#define PWM_DUTY_SHIFT 16
@@ -145,7 +149,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
required_clk_rate =
(NSEC_PER_SEC / period_ns) << PWM_DUTY_WIDTH;
- err = clk_set_rate(pc->clk, required_clk_rate);
+ err = dev_pm_opp_set_rate(pc->dev, required_clk_rate);
if (err < 0)
return -EINVAL;
@@ -181,8 +185,8 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* before writing the register. Otherwise, keep it enabled.
*/
if (!pwm_is_enabled(pwm)) {
- err = clk_prepare_enable(pc->clk);
- if (err < 0)
+ err = pm_runtime_resume_and_get(pc->dev);
+ if (err)
return err;
} else
val |= PWM_ENABLE;
@@ -193,7 +197,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* If the PWM is not enabled, turn the clock off again to save power.
*/
if (!pwm_is_enabled(pwm))
- clk_disable_unprepare(pc->clk);
+ pm_runtime_put(pc->dev);
return 0;
}
@@ -204,8 +208,8 @@ static int tegra_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
int rc = 0;
u32 val;
- rc = clk_prepare_enable(pc->clk);
- if (rc < 0)
+ rc = pm_runtime_resume_and_get(pc->dev);
+ if (rc)
return rc;
val = pwm_readl(pc, pwm->hwpwm);
@@ -224,7 +228,7 @@ static void tegra_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
val &= ~PWM_ENABLE;
pwm_writel(pc, pwm->hwpwm, val);
- clk_disable_unprepare(pc->clk);
+ pm_runtime_put_sync(pc->dev);
}
static const struct pwm_ops tegra_pwm_ops = {
@@ -256,11 +260,20 @@ static int tegra_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pwm->clk))
return PTR_ERR(pwm->clk);
+ ret = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
/* Set maximum frequency of the IP */
- ret = clk_set_rate(pwm->clk, pwm->soc->max_frequency);
+ ret = dev_pm_opp_set_rate(pwm->dev, pwm->soc->max_frequency);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to set max frequency: %d\n", ret);
- return ret;
+ goto put_pm;
}
/*
@@ -278,7 +291,7 @@ static int tegra_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pwm->rst)) {
ret = PTR_ERR(pwm->rst);
dev_err(&pdev->dev, "Reset control is not found: %d\n", ret);
- return ret;
+ goto put_pm;
}
reset_control_deassert(pwm->rst);
@@ -291,10 +304,16 @@ static int tegra_pwm_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
reset_control_assert(pwm->rst);
- return ret;
+ goto put_pm;
}
+ pm_runtime_put(&pdev->dev);
+
return 0;
+put_pm:
+ pm_runtime_put_sync_suspend(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
+ return ret;
}
static int tegra_pwm_remove(struct platform_device *pdev)
@@ -305,20 +324,44 @@ static int tegra_pwm_remove(struct platform_device *pdev)
reset_control_assert(pc->rst);
+ pm_runtime_force_suspend(&pdev->dev);
+
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int tegra_pwm_suspend(struct device *dev)
+static int __maybe_unused tegra_pwm_runtime_suspend(struct device *dev)
{
- return pinctrl_pm_select_sleep_state(dev);
+ struct tegra_pwm_chip *pc = dev_get_drvdata(dev);
+ int err;
+
+ clk_disable_unprepare(pc->clk);
+
+ err = pinctrl_pm_select_sleep_state(dev);
+ if (err) {
+ clk_prepare_enable(pc->clk);
+ return err;
+ }
+
+ return 0;
}
-static int tegra_pwm_resume(struct device *dev)
+static int __maybe_unused tegra_pwm_runtime_resume(struct device *dev)
{
- return pinctrl_pm_select_default_state(dev);
+ struct tegra_pwm_chip *pc = dev_get_drvdata(dev);
+ int err;
+
+ err = pinctrl_pm_select_default_state(dev);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(pc->clk);
+ if (err) {
+ pinctrl_pm_select_sleep_state(dev);
+ return err;
+ }
+
+ return 0;
}
-#endif
static const struct tegra_pwm_soc tegra20_pwm_soc = {
.num_channels = 4,
@@ -344,7 +387,10 @@ static const struct of_device_id tegra_pwm_of_match[] = {
MODULE_DEVICE_TABLE(of, tegra_pwm_of_match);
static const struct dev_pm_ops tegra_pwm_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(tegra_pwm_suspend, tegra_pwm_resume)
+ SET_RUNTIME_PM_OPS(tegra_pwm_runtime_suspend, tegra_pwm_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver tegra_pwm_driver = {
diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c
index 203194f2c92e..86567add79db 100644
--- a/drivers/pwm/pwm-twl.c
+++ b/drivers/pwm/pwm-twl.c
@@ -58,9 +58,9 @@ static inline struct twl_pwm_chip *to_twl(struct pwm_chip *chip)
}
static int twl_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+ u64 duty_ns, u64 period_ns)
{
- int duty_cycle = DIV_ROUND_UP(duty_ns * TWL_PWM_MAX, period_ns) + 1;
+ int duty_cycle = DIV64_U64_ROUND_UP(duty_ns * TWL_PWM_MAX, period_ns) + 1;
u8 pwm_config[2] = { 1, 0 };
int base, ret;
@@ -279,19 +279,65 @@ out:
mutex_unlock(&twl->mutex);
}
+static int twl4030_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ if (pwm->state.enabled)
+ twl4030_pwm_disable(chip, pwm);
+
+ return 0;
+ }
+
+ err = twl_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ if (err)
+ return err;
+
+ if (!pwm->state.enabled)
+ err = twl4030_pwm_enable(chip, pwm);
+
+ return err;
+}
+
+static int twl6030_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ if (pwm->state.enabled)
+ twl6030_pwm_disable(chip, pwm);
+
+ return 0;
+ }
+
+ err = twl_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ if (err)
+ return err;
+
+ if (!pwm->state.enabled)
+ err = twl6030_pwm_enable(chip, pwm);
+
+ return err;
+}
+
static const struct pwm_ops twl4030_pwm_ops = {
- .config = twl_pwm_config,
- .enable = twl4030_pwm_enable,
- .disable = twl4030_pwm_disable,
+ .apply = twl4030_pwm_apply,
.request = twl4030_pwm_request,
.free = twl4030_pwm_free,
.owner = THIS_MODULE,
};
static const struct pwm_ops twl6030_pwm_ops = {
- .config = twl_pwm_config,
- .enable = twl6030_pwm_enable,
- .disable = twl6030_pwm_disable,
+ .apply = twl6030_pwm_apply,
.owner = THIS_MODULE,
};
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index 480bfc29782f..7170a315535b 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -70,7 +70,7 @@ static inline void vt8500_pwm_busy_wait(struct vt8500_chip *vt8500, int nr, u8 b
}
static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+ u64 duty_ns, u64 period_ns)
{
struct vt8500_chip *vt8500 = to_vt8500_chip(chip);
unsigned long long c;
@@ -102,8 +102,8 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
}
c = (unsigned long long)pv * duty_ns;
- do_div(c, period_ns);
- dc = c;
+
+ dc = div64_u64(c, period_ns);
writel(prescale, vt8500->base + REG_SCALAR(pwm->hwpwm));
vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_SCALAR_UPDATE);
@@ -176,11 +176,54 @@ static int vt8500_pwm_set_polarity(struct pwm_chip *chip,
return 0;
}
+static int vt8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err;
+ bool enabled = pwm->state.enabled;
+
+ if (state->polarity != pwm->state.polarity) {
+ /*
+ * Changing the polarity of a running PWM is only allowed when
+ * the PWM driver implements ->apply().
+ */
+ if (enabled) {
+ vt8500_pwm_disable(chip, pwm);
+
+ enabled = false;
+ }
+
+ err = vt8500_pwm_set_polarity(chip, pwm, state->polarity);
+ if (err)
+ return err;
+ }
+
+ if (!state->enabled) {
+ if (enabled)
+ vt8500_pwm_disable(chip, pwm);
+
+ return 0;
+ }
+
+ /*
+ * We cannot skip calling ->config even if state->period ==
+ * pwm->state.period && state->duty_cycle == pwm->state.duty_cycle
+ * because we might have exited early in the last call to
+ * pwm_apply_state because of !state->enabled and so the two values in
+ * pwm->state might not be configured in hardware.
+ */
+ err = vt8500_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ if (err)
+ return err;
+
+ if (!enabled)
+ err = vt8500_pwm_enable(chip, pwm);
+
+ return err;
+}
+
static const struct pwm_ops vt8500_pwm_ops = {
- .enable = vt8500_pwm_enable,
- .disable = vt8500_pwm_disable,
- .config = vt8500_pwm_config,
- .set_polarity = vt8500_pwm_set_polarity,
+ .apply = vt8500_pwm_apply,
.owner = THIS_MODULE,
};
diff --git a/drivers/rapidio/switches/Kconfig b/drivers/rapidio/switches/Kconfig
index 3e18f9c51e29..02771ba3e54f 100644
--- a/drivers/rapidio/switches/Kconfig
+++ b/drivers/rapidio/switches/Kconfig
@@ -2,22 +2,11 @@
#
# RapidIO switches configuration
#
-config RAPIDIO_TSI57X
- tristate "IDT Tsi57x SRIO switches support"
- help
- Includes support for IDT Tsi57x family of serial RapidIO switches.
-
config RAPIDIO_CPS_XX
tristate "IDT CPS-xx SRIO switches support"
help
Includes support for IDT CPS-16/12/10/8 serial RapidIO switches.
-config RAPIDIO_TSI568
- tristate "Tsi568 SRIO switch support"
- default n
- help
- Includes support for IDT Tsi568 serial RapidIO switch.
-
config RAPIDIO_CPS_GEN2
tristate "IDT CPS Gen.2 SRIO switch support"
default n
diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile
index 69e7de31e41c..ef1749a79c2b 100644
--- a/drivers/rapidio/switches/Makefile
+++ b/drivers/rapidio/switches/Makefile
@@ -3,8 +3,6 @@
# Makefile for RIO switches
#
-obj-$(CONFIG_RAPIDIO_TSI57X) += tsi57x.o
obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o
-obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o
obj-$(CONFIG_RAPIDIO_CPS_GEN2) += idt_gen2.o
obj-$(CONFIG_RAPIDIO_RXS_GEN3) += idt_gen3.o
diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c
deleted file mode 100644
index 103b48a24980..000000000000
--- a/drivers/rapidio/switches/tsi568.c
+++ /dev/null
@@ -1,195 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * RapidIO Tsi568 switch support
- *
- * Copyright 2009-2010 Integrated Device Technology, Inc.
- * Alexandre Bounine <alexandre.bounine@idt.com>
- * - Added EM support
- * - Modified switch operations initialization.
- *
- * Copyright 2005 MontaVista Software, Inc.
- * Matt Porter <mporter@kernel.crashing.org>
- */
-
-#include <linux/rio.h>
-#include <linux/rio_drv.h>
-#include <linux/rio_ids.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include "../rio.h"
-
-/* Global (broadcast) route registers */
-#define SPBC_ROUTE_CFG_DESTID 0x10070
-#define SPBC_ROUTE_CFG_PORT 0x10074
-
-/* Per port route registers */
-#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n)
-#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n)
-
-#define TSI568_SP_MODE(n) (0x11004 + 0x100*n)
-#define TSI568_SP_MODE_PW_DIS 0x08000000
-
-static int
-tsi568_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
- u16 table, u16 route_destid, u8 route_port)
-{
- if (table == RIO_GLOBAL_TABLE) {
- rio_mport_write_config_32(mport, destid, hopcount,
- SPBC_ROUTE_CFG_DESTID, route_destid);
- rio_mport_write_config_32(mport, destid, hopcount,
- SPBC_ROUTE_CFG_PORT, route_port);
- } else {
- rio_mport_write_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_DESTID(table),
- route_destid);
- rio_mport_write_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_PORT(table), route_port);
- }
-
- udelay(10);
-
- return 0;
-}
-
-static int
-tsi568_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
- u16 table, u16 route_destid, u8 *route_port)
-{
- int ret = 0;
- u32 result;
-
- if (table == RIO_GLOBAL_TABLE) {
- rio_mport_write_config_32(mport, destid, hopcount,
- SPBC_ROUTE_CFG_DESTID, route_destid);
- rio_mport_read_config_32(mport, destid, hopcount,
- SPBC_ROUTE_CFG_PORT, &result);
- } else {
- rio_mport_write_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_DESTID(table),
- route_destid);
- rio_mport_read_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_PORT(table), &result);
- }
-
- *route_port = result;
- if (*route_port > 15)
- ret = -1;
-
- return ret;
-}
-
-static int
-tsi568_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
- u16 table)
-{
- u32 route_idx;
- u32 lut_size;
-
- lut_size = (mport->sys_size) ? 0x1ff : 0xff;
-
- if (table == RIO_GLOBAL_TABLE) {
- rio_mport_write_config_32(mport, destid, hopcount,
- SPBC_ROUTE_CFG_DESTID, 0x80000000);
- for (route_idx = 0; route_idx <= lut_size; route_idx++)
- rio_mport_write_config_32(mport, destid, hopcount,
- SPBC_ROUTE_CFG_PORT,
- RIO_INVALID_ROUTE);
- } else {
- rio_mport_write_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_DESTID(table),
- 0x80000000);
- for (route_idx = 0; route_idx <= lut_size; route_idx++)
- rio_mport_write_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_PORT(table),
- RIO_INVALID_ROUTE);
- }
-
- return 0;
-}
-
-static int
-tsi568_em_init(struct rio_dev *rdev)
-{
- u32 regval;
- int portnum;
-
- pr_debug("TSI568 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);
-
- /* Make sure that Port-Writes are disabled (for all ports) */
- for (portnum = 0;
- portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {
- rio_read_config_32(rdev, TSI568_SP_MODE(portnum), &regval);
- rio_write_config_32(rdev, TSI568_SP_MODE(portnum),
- regval | TSI568_SP_MODE_PW_DIS);
- }
-
- return 0;
-}
-
-static struct rio_switch_ops tsi568_switch_ops = {
- .owner = THIS_MODULE,
- .add_entry = tsi568_route_add_entry,
- .get_entry = tsi568_route_get_entry,
- .clr_table = tsi568_route_clr_table,
- .set_domain = NULL,
- .get_domain = NULL,
- .em_init = tsi568_em_init,
- .em_handle = NULL,
-};
-
-static int tsi568_probe(struct rio_dev *rdev, const struct rio_device_id *id)
-{
- pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
-
- spin_lock(&rdev->rswitch->lock);
-
- if (rdev->rswitch->ops) {
- spin_unlock(&rdev->rswitch->lock);
- return -EINVAL;
- }
-
- rdev->rswitch->ops = &tsi568_switch_ops;
- spin_unlock(&rdev->rswitch->lock);
- return 0;
-}
-
-static void tsi568_remove(struct rio_dev *rdev)
-{
- pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
- spin_lock(&rdev->rswitch->lock);
- if (rdev->rswitch->ops != &tsi568_switch_ops) {
- spin_unlock(&rdev->rswitch->lock);
- return;
- }
- rdev->rswitch->ops = NULL;
- spin_unlock(&rdev->rswitch->lock);
-}
-
-static const struct rio_device_id tsi568_id_table[] = {
- {RIO_DEVICE(RIO_DID_TSI568, RIO_VID_TUNDRA)},
- { 0, } /* terminate list */
-};
-
-static struct rio_driver tsi568_driver = {
- .name = "tsi568",
- .id_table = tsi568_id_table,
- .probe = tsi568_probe,
- .remove = tsi568_remove,
-};
-
-static int __init tsi568_init(void)
-{
- return rio_register_driver(&tsi568_driver);
-}
-
-static void __exit tsi568_exit(void)
-{
- rio_unregister_driver(&tsi568_driver);
-}
-
-device_initcall(tsi568_init);
-module_exit(tsi568_exit);
-
-MODULE_DESCRIPTION("IDT Tsi568 Serial RapidIO switch driver");
-MODULE_AUTHOR("Integrated Device Technology, Inc.");
-MODULE_LICENSE("GPL");
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c
deleted file mode 100644
index 271762046f8c..000000000000
--- a/drivers/rapidio/switches/tsi57x.c
+++ /dev/null
@@ -1,365 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * RapidIO Tsi57x switch family support
- *
- * Copyright 2009-2010 Integrated Device Technology, Inc.
- * Alexandre Bounine <alexandre.bounine@idt.com>
- * - Added EM support
- * - Modified switch operations initialization.
- *
- * Copyright 2005 MontaVista Software, Inc.
- * Matt Porter <mporter@kernel.crashing.org>
- */
-
-#include <linux/rio.h>
-#include <linux/rio_drv.h>
-#include <linux/rio_ids.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include "../rio.h"
-
-/* Global (broadcast) route registers */
-#define SPBC_ROUTE_CFG_DESTID 0x10070
-#define SPBC_ROUTE_CFG_PORT 0x10074
-
-/* Per port route registers */
-#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n)
-#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n)
-
-#define TSI578_SP_MODE(n) (0x11004 + n*0x100)
-#define TSI578_SP_MODE_GLBL 0x10004
-#define TSI578_SP_MODE_PW_DIS 0x08000000
-#define TSI578_SP_MODE_LUT_512 0x01000000
-
-#define TSI578_SP_CTL_INDEP(n) (0x13004 + n*0x100)
-#define TSI578_SP_LUT_PEINF(n) (0x13010 + n*0x100)
-#define TSI578_SP_CS_TX(n) (0x13014 + n*0x100)
-#define TSI578_SP_INT_STATUS(n) (0x13018 + n*0x100)
-
-#define TSI578_GLBL_ROUTE_BASE 0x10078
-
-static int
-tsi57x_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
- u16 table, u16 route_destid, u8 route_port)
-{
- if (table == RIO_GLOBAL_TABLE) {
- rio_mport_write_config_32(mport, destid, hopcount,
- SPBC_ROUTE_CFG_DESTID, route_destid);
- rio_mport_write_config_32(mport, destid, hopcount,
- SPBC_ROUTE_CFG_PORT, route_port);
- } else {
- rio_mport_write_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_DESTID(table), route_destid);
- rio_mport_write_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_PORT(table), route_port);
- }
-
- udelay(10);
-
- return 0;
-}
-
-static int
-tsi57x_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
- u16 table, u16 route_destid, u8 *route_port)
-{
- int ret = 0;
- u32 result;
-
- if (table == RIO_GLOBAL_TABLE) {
- /* Use local RT of the ingress port to avoid possible
- race condition */
- rio_mport_read_config_32(mport, destid, hopcount,
- RIO_SWP_INFO_CAR, &result);
- table = (result & RIO_SWP_INFO_PORT_NUM_MASK);
- }
-
- rio_mport_write_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_DESTID(table), route_destid);
- rio_mport_read_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_PORT(table), &result);
-
- *route_port = (u8)result;
- if (*route_port > 15)
- ret = -1;
-
- return ret;
-}
-
-static int
-tsi57x_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
- u16 table)
-{
- u32 route_idx;
- u32 lut_size;
-
- lut_size = (mport->sys_size) ? 0x1ff : 0xff;
-
- if (table == RIO_GLOBAL_TABLE) {
- rio_mport_write_config_32(mport, destid, hopcount,
- SPBC_ROUTE_CFG_DESTID, 0x80000000);
- for (route_idx = 0; route_idx <= lut_size; route_idx++)
- rio_mport_write_config_32(mport, destid, hopcount,
- SPBC_ROUTE_CFG_PORT,
- RIO_INVALID_ROUTE);
- } else {
- rio_mport_write_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_DESTID(table), 0x80000000);
- for (route_idx = 0; route_idx <= lut_size; route_idx++)
- rio_mport_write_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_PORT(table) , RIO_INVALID_ROUTE);
- }
-
- return 0;
-}
-
-static int
-tsi57x_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
- u8 sw_domain)
-{
- u32 regval;
-
- /*
- * Switch domain configuration operates only at global level
- */
-
- /* Turn off flat (LUT_512) mode */
- rio_mport_read_config_32(mport, destid, hopcount,
- TSI578_SP_MODE_GLBL, &regval);
- rio_mport_write_config_32(mport, destid, hopcount, TSI578_SP_MODE_GLBL,
- regval & ~TSI578_SP_MODE_LUT_512);
- /* Set switch domain base */
- rio_mport_write_config_32(mport, destid, hopcount,
- TSI578_GLBL_ROUTE_BASE,
- (u32)(sw_domain << 24));
- return 0;
-}
-
-static int
-tsi57x_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
- u8 *sw_domain)
-{
- u32 regval;
-
- /*
- * Switch domain configuration operates only at global level
- */
- rio_mport_read_config_32(mport, destid, hopcount,
- TSI578_GLBL_ROUTE_BASE, &regval);
-
- *sw_domain = (u8)(regval >> 24);
-
- return 0;
-}
-
-static int
-tsi57x_em_init(struct rio_dev *rdev)
-{
- u32 regval;
- int portnum;
-
- pr_debug("TSI578 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);
-
- for (portnum = 0;
- portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {
- /* Make sure that Port-Writes are enabled (for all ports) */
- rio_read_config_32(rdev,
- TSI578_SP_MODE(portnum), &regval);
- rio_write_config_32(rdev,
- TSI578_SP_MODE(portnum),
- regval & ~TSI578_SP_MODE_PW_DIS);
-
- /* Clear all pending interrupts */
- rio_read_config_32(rdev,
- RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum),
- &regval);
- rio_write_config_32(rdev,
- RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum),
- regval & 0x07120214);
-
- rio_read_config_32(rdev,
- TSI578_SP_INT_STATUS(portnum), &regval);
- rio_write_config_32(rdev,
- TSI578_SP_INT_STATUS(portnum),
- regval & 0x000700bd);
-
- /* Enable all interrupts to allow ports to send a port-write */
- rio_read_config_32(rdev,
- TSI578_SP_CTL_INDEP(portnum), &regval);
- rio_write_config_32(rdev,
- TSI578_SP_CTL_INDEP(portnum),
- regval | 0x000b0000);
-
- /* Skip next (odd) port if the current port is in x4 mode */
- rio_read_config_32(rdev,
- RIO_DEV_PORT_N_CTL_CSR(rdev, portnum),
- &regval);
- if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4)
- portnum++;
- }
-
- /* set TVAL = ~50us */
- rio_write_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x9a << 8);
-
- return 0;
-}
-
-static int
-tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
-{
- struct rio_mport *mport = rdev->net->hport;
- u32 intstat, err_status;
- int sendcount, checkcount;
- u8 route_port;
- u32 regval;
-
- rio_read_config_32(rdev,
- RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum),
- &err_status);
-
- if ((err_status & RIO_PORT_N_ERR_STS_PORT_OK) &&
- (err_status & (RIO_PORT_N_ERR_STS_OUT_ES |
- RIO_PORT_N_ERR_STS_INP_ES))) {
- /* Remove any queued packets by locking/unlocking port */
- rio_read_config_32(rdev,
- RIO_DEV_PORT_N_CTL_CSR(rdev, portnum),
- &regval);
- if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) {
- rio_write_config_32(rdev,
- RIO_DEV_PORT_N_CTL_CSR(rdev, portnum),
- regval | RIO_PORT_N_CTL_LOCKOUT);
- udelay(50);
- rio_write_config_32(rdev,
- RIO_DEV_PORT_N_CTL_CSR(rdev, portnum),
- regval);
- }
-
- /* Read from link maintenance response register to clear
- * valid bit
- */
- rio_read_config_32(rdev,
- RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, portnum),
- &regval);
-
- /* Send a Packet-Not-Accepted/Link-Request-Input-Status control
- * symbol to recover from IES/OES
- */
- sendcount = 3;
- while (sendcount) {
- rio_write_config_32(rdev,
- TSI578_SP_CS_TX(portnum), 0x40fc8000);
- checkcount = 3;
- while (checkcount--) {
- udelay(50);
- rio_read_config_32(rdev,
- RIO_DEV_PORT_N_MNT_RSP_CSR(rdev,
- portnum),
- &regval);
- if (regval & RIO_PORT_N_MNT_RSP_RVAL)
- goto exit_es;
- }
-
- sendcount--;
- }
- }
-
-exit_es:
- /* Clear implementation specific error status bits */
- rio_read_config_32(rdev, TSI578_SP_INT_STATUS(portnum), &intstat);
- pr_debug("TSI578[%x:%x] SP%d_INT_STATUS=0x%08x\n",
- rdev->destid, rdev->hopcount, portnum, intstat);
-
- if (intstat & 0x10000) {
- rio_read_config_32(rdev,
- TSI578_SP_LUT_PEINF(portnum), &regval);
- regval = (mport->sys_size) ? (regval >> 16) : (regval >> 24);
- route_port = rdev->rswitch->route_table[regval];
- pr_debug("RIO: TSI578[%s] P%d LUT Parity Error (destID=%d)\n",
- rio_name(rdev), portnum, regval);
- tsi57x_route_add_entry(mport, rdev->destid, rdev->hopcount,
- RIO_GLOBAL_TABLE, regval, route_port);
- }
-
- rio_write_config_32(rdev, TSI578_SP_INT_STATUS(portnum),
- intstat & 0x000700bd);
-
- return 0;
-}
-
-static struct rio_switch_ops tsi57x_switch_ops = {
- .owner = THIS_MODULE,
- .add_entry = tsi57x_route_add_entry,
- .get_entry = tsi57x_route_get_entry,
- .clr_table = tsi57x_route_clr_table,
- .set_domain = tsi57x_set_domain,
- .get_domain = tsi57x_get_domain,
- .em_init = tsi57x_em_init,
- .em_handle = tsi57x_em_handler,
-};
-
-static int tsi57x_probe(struct rio_dev *rdev, const struct rio_device_id *id)
-{
- pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
-
- spin_lock(&rdev->rswitch->lock);
-
- if (rdev->rswitch->ops) {
- spin_unlock(&rdev->rswitch->lock);
- return -EINVAL;
- }
- rdev->rswitch->ops = &tsi57x_switch_ops;
-
- if (rdev->do_enum) {
- /* Ensure that default routing is disabled on startup */
- rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT,
- RIO_INVALID_ROUTE);
- }
-
- spin_unlock(&rdev->rswitch->lock);
- return 0;
-}
-
-static void tsi57x_remove(struct rio_dev *rdev)
-{
- pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
- spin_lock(&rdev->rswitch->lock);
- if (rdev->rswitch->ops != &tsi57x_switch_ops) {
- spin_unlock(&rdev->rswitch->lock);
- return;
- }
- rdev->rswitch->ops = NULL;
- spin_unlock(&rdev->rswitch->lock);
-}
-
-static const struct rio_device_id tsi57x_id_table[] = {
- {RIO_DEVICE(RIO_DID_TSI572, RIO_VID_TUNDRA)},
- {RIO_DEVICE(RIO_DID_TSI574, RIO_VID_TUNDRA)},
- {RIO_DEVICE(RIO_DID_TSI577, RIO_VID_TUNDRA)},
- {RIO_DEVICE(RIO_DID_TSI578, RIO_VID_TUNDRA)},
- { 0, } /* terminate list */
-};
-
-static struct rio_driver tsi57x_driver = {
- .name = "tsi57x",
- .id_table = tsi57x_id_table,
- .probe = tsi57x_probe,
- .remove = tsi57x_remove,
-};
-
-static int __init tsi57x_init(void)
-{
- return rio_register_driver(&tsi57x_driver);
-}
-
-static void __exit tsi57x_exit(void)
-{
- rio_unregister_driver(&tsi57x_driver);
-}
-
-device_initcall(tsi57x_init);
-module_exit(tsi57x_exit);
-
-MODULE_DESCRIPTION("IDT Tsi57x Serial RapidIO switch family driver");
-MODULE_AUTHOR("Integrated Device Technology, Inc.");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index d7894f178bd4..42f2fc0bc8a9 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -38,7 +38,7 @@
* elements entered into the array, during which, we're decaying all elements.
* If, after decay, an element gets inserted again, its generation is set to 11b
* to make sure it has higher numerical count than other, older elements and
- * thus emulate an an LRU-like behavior when deleting elements to free up space
+ * thus emulate an LRU-like behavior when deleting elements to free up space
* in the page.
*
* When an element reaches it's max count of action_threshold, we try to poison
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 6be9b1c8a615..1c35fed20d34 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -636,6 +636,15 @@ config REGULATOR_MAX8998
via I2C bus. The provided regulator is suitable for S3C6410
and S5PC1XX chips to control VCC_CORE and VCC_USIM voltages.
+config REGULATOR_MAX20086
+ tristate "Maxim MAX20086-MAX20089 Camera Power Protectors"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This driver controls a Maxim MAX20086-MAX20089 camera power
+ protectorvia I2C bus. The regulator has 2 or 4 outputs depending on
+ the device model. This driver is only capable to turn on/off them.
+
config REGULATOR_MAX77686
tristate "Maxim 77686 regulator"
depends on MFD_MAX77686 || COMPILE_TEST
@@ -1339,6 +1348,15 @@ config REGULATOR_TPS65912
help
This driver supports TPS65912 voltage regulator chip.
+config REGULATOR_TPS68470
+ tristate "TI TPS68470 PMIC Regulators Driver"
+ depends on INTEL_SKL_INT3472 || COMPILE_TEST
+ help
+ This driver adds support for the TPS68470 PMIC to register
+ regulators against the usual framework.
+
+ The module will be called "tps68470-regulator".
+
config REGULATOR_TWL4030
tristate "TI TWL4030/TWL5030/TWL6030/TPS659x0 PMIC"
depends on TWL4030_CORE
@@ -1415,4 +1433,3 @@ config REGULATOR_QCOM_LABIBB
for LCD display panel.
endif
-
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index b07d2a22df0b..2e1b087489fa 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -78,6 +78,7 @@ obj-$(CONFIG_REGULATOR_MAX8952) += max8952.o
obj-$(CONFIG_REGULATOR_MAX8973) += max8973-regulator.o
obj-$(CONFIG_REGULATOR_MAX8997) += max8997-regulator.o
obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
+obj-$(CONFIG_REGULATOR_MAX20086) += max20086-regulator.o
obj-$(CONFIG_REGULATOR_MAX77686) += max77686-regulator.o
obj-$(CONFIG_REGULATOR_MAX77693) += max77693-regulator.o
obj-$(CONFIG_REGULATOR_MAX77802) += max77802-regulator.o
@@ -159,6 +160,7 @@ obj-$(CONFIG_REGULATOR_TPS6586X) += tps6586x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
obj-$(CONFIG_REGULATOR_TPS65912) += tps65912-regulator.o
obj-$(CONFIG_REGULATOR_TPS65132) += tps65132-regulator.o
+obj-$(CONFIG_REGULATOR_TPS68470) += tps68470-regulator.o
obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o twl6030-regulator.o
obj-$(CONFIG_REGULATOR_UNIPHIER) += uniphier-regulator.o
obj-$(CONFIG_REGULATOR_VCTRL) += vctrl-regulator.o
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index d60fccedb250..00efb18a836c 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -125,27 +125,6 @@ static int bd71837_get_buck34_enable_hwctrl(struct regulator_dev *rdev)
return !!(BD718XX_BUCK_RUN_ON & val);
}
-/*
- * On BD71837 (not on BD71847, BD71850, ...)
- * Bucks 1 to 4 support DVS. PWM mode is used when voltage is changed.
- * Bucks 5 to 8 and LDOs can use PFM and must be disabled when voltage
- * is changed. Hence we return -EBUSY for these if voltage is changed
- * when BUCK/LDO is enabled.
- *
- * On BD71847, BD71850, ... The LDO voltage can be changed when LDO is
- * enabled. But if voltage is increased the LDO power-good monitoring
- * must be disabled for the duration of changing + 1mS to ensure voltage
- * has reached the higher level before HW does next under voltage detection
- * cycle.
- */
-static int bd71837_set_voltage_sel_restricted(struct regulator_dev *rdev,
- unsigned int sel)
-{
- if (rdev->desc->ops->is_enabled(rdev))
- return -EBUSY;
-
- return regulator_set_voltage_sel_regmap(rdev, sel);
-}
static void voltage_change_done(struct regulator_dev *rdev, unsigned int sel,
unsigned int *mask)
@@ -642,22 +621,22 @@ BD718XX_OPS(bd71837_pickable_range_buck_ops,
bd718x7_set_buck_ovp);
BD718XX_OPS(bd71837_ldo_regulator_ops, regulator_list_voltage_linear_range,
- NULL, bd71837_set_voltage_sel_restricted,
+ NULL, rohm_regulator_set_voltage_sel_restricted,
regulator_get_voltage_sel_regmap, NULL, NULL, bd718x7_set_ldo_uvp,
NULL);
BD718XX_OPS(bd71837_ldo_regulator_nolinear_ops, regulator_list_voltage_table,
- NULL, bd71837_set_voltage_sel_restricted,
+ NULL, rohm_regulator_set_voltage_sel_restricted,
regulator_get_voltage_sel_regmap, NULL, NULL, bd718x7_set_ldo_uvp,
NULL);
BD718XX_OPS(bd71837_buck_regulator_ops, regulator_list_voltage_linear_range,
- NULL, bd71837_set_voltage_sel_restricted,
+ NULL, rohm_regulator_set_voltage_sel_restricted,
regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
NULL, bd718x7_set_buck_uvp, bd718x7_set_buck_ovp);
BD718XX_OPS(bd71837_buck_regulator_nolinear_ops, regulator_list_voltage_table,
- regulator_map_voltage_ascend, bd71837_set_voltage_sel_restricted,
+ regulator_map_voltage_ascend, rohm_regulator_set_voltage_sel_restricted,
regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
NULL, bd718x7_set_buck_uvp, bd718x7_set_buck_ovp);
/*
diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
index e66925090258..6f21223a488e 100644
--- a/drivers/regulator/da9121-regulator.c
+++ b/drivers/regulator/da9121-regulator.c
@@ -86,6 +86,22 @@ static struct da9121_range da9121_3A_1phase_current = {
.reg_max = 6,
};
+static struct da9121_range da914x_40A_4phase_current = {
+ .val_min = 14000000,
+ .val_max = 80000000,
+ .val_stp = 2000000,
+ .reg_min = 1,
+ .reg_max = 14,
+};
+
+static struct da9121_range da914x_20A_2phase_current = {
+ .val_min = 7000000,
+ .val_max = 40000000,
+ .val_stp = 2000000,
+ .reg_min = 1,
+ .reg_max = 14,
+};
+
struct da9121_variant_info {
int num_bucks;
int num_phases;
@@ -97,6 +113,8 @@ static const struct da9121_variant_info variant_parameters[] = {
{ 2, 1, &da9121_3A_1phase_current }, //DA9121_TYPE_DA9220_DA9132
{ 2, 1, &da9121_5A_1phase_current }, //DA9121_TYPE_DA9122_DA9131
{ 1, 2, &da9121_6A_2phase_current }, //DA9121_TYPE_DA9217
+ { 1, 4, &da914x_40A_4phase_current }, //DA9121_TYPE_DA9141
+ { 1, 2, &da914x_20A_2phase_current }, //DA9121_TYPE_DA9142
};
struct da9121_field {
@@ -253,6 +271,11 @@ static int da9121_set_current_limit(struct regulator_dev *rdev,
goto error;
}
+ if (rdev->desc->ops->is_enabled(rdev)) {
+ ret = -EBUSY;
+ goto error;
+ }
+
ret = da9121_ceiling_selector(rdev, min_ua, max_ua, &sel);
if (ret < 0)
goto error;
@@ -537,11 +560,65 @@ static const struct regulator_desc da9217_reg = {
.vsel_mask = DA9121_MASK_BUCK_BUCKx_5_CHx_A_VOUT,
};
+#define DA914X_MIN_MV 500
+#define DA914X_MAX_MV 1000
+#define DA914X_STEP_MV 10
+#define DA914X_MIN_SEL (DA914X_MIN_MV / DA914X_STEP_MV)
+#define DA914X_N_VOLTAGES (((DA914X_MAX_MV - DA914X_MIN_MV) / DA914X_STEP_MV) \
+ + 1 + DA914X_MIN_SEL)
+
+static const struct regulator_desc da9141_reg = {
+ .id = DA9121_IDX_BUCK1,
+ .name = "DA9141",
+ .of_match = "buck1",
+ .of_parse_cb = da9121_of_parse_cb,
+ .owner = THIS_MODULE,
+ .regulators_node = of_match_ptr("regulators"),
+ .of_map_mode = da9121_map_mode,
+ .ops = &da9121_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = DA914X_N_VOLTAGES,
+ .min_uV = DA914X_MIN_MV * 1000,
+ .uV_step = DA914X_STEP_MV * 1000,
+ .linear_min_sel = DA914X_MIN_SEL,
+ .vsel_reg = DA9121_REG_BUCK_BUCK1_5,
+ .vsel_mask = DA9121_MASK_BUCK_BUCKx_5_CHx_A_VOUT,
+ .enable_reg = DA9121_REG_BUCK_BUCK1_0,
+ .enable_mask = DA9121_MASK_BUCK_BUCKx_0_CHx_EN,
+ /* Default value of BUCK_BUCK1_0.CH1_SRC_DVC_UP */
+ .ramp_delay = 20000,
+ /* tBUCK_EN */
+ .enable_time = 20,
+};
+
+static const struct regulator_desc da9142_reg = {
+ .id = DA9121_IDX_BUCK1,
+ .name = "DA9142 BUCK1",
+ .of_match = "buck1",
+ .of_parse_cb = da9121_of_parse_cb,
+ .owner = THIS_MODULE,
+ .regulators_node = of_match_ptr("regulators"),
+ .of_map_mode = da9121_map_mode,
+ .ops = &da9121_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = DA914X_N_VOLTAGES,
+ .min_uV = DA914X_MIN_MV * 1000,
+ .uV_step = DA914X_STEP_MV * 1000,
+ .linear_min_sel = DA914X_MIN_SEL,
+ .enable_reg = DA9121_REG_BUCK_BUCK1_0,
+ .enable_mask = DA9121_MASK_BUCK_BUCKx_0_CHx_EN,
+ .vsel_reg = DA9121_REG_BUCK_BUCK1_5,
+ .vsel_mask = DA9121_MASK_BUCK_BUCKx_5_CHx_A_VOUT,
+};
+
+
static const struct regulator_desc *local_da9121_regulators[][DA9121_IDX_MAX] = {
[DA9121_TYPE_DA9121_DA9130] = { &da9121_reg, NULL },
[DA9121_TYPE_DA9220_DA9132] = { &da9220_reg[0], &da9220_reg[1] },
[DA9121_TYPE_DA9122_DA9131] = { &da9122_reg[0], &da9122_reg[1] },
[DA9121_TYPE_DA9217] = { &da9217_reg, NULL },
+ [DA9121_TYPE_DA9141] = { &da9141_reg, NULL },
+ [DA9121_TYPE_DA9142] = { &da9142_reg, NULL },
};
static void da9121_status_poll_on(struct work_struct *work)
@@ -835,7 +912,7 @@ static int da9121_check_device_type(struct i2c_client *i2c, struct da9121 *chip)
goto error;
}
- if (device_id != DA9121_DEVICE_ID) {
+ if ((device_id != DA9121_DEVICE_ID) && (device_id != DA914x_DEVICE_ID)) {
dev_err(chip->dev, "Invalid device ID: 0x%02x\n", device_id);
ret = -ENODEV;
goto error;
@@ -877,6 +954,22 @@ static int da9121_check_device_type(struct i2c_client *i2c, struct da9121 *chip)
break;
}
+ if (device_id == DA914x_DEVICE_ID) {
+ switch (chip->subvariant_id) {
+ case DA9121_SUBTYPE_DA9141:
+ type = "DA9141";
+ config_match = (variant_vrc == DA9141_VARIANT_VRC);
+ break;
+ case DA9121_SUBTYPE_DA9142:
+ type = "DA9142";
+ config_match = (variant_vrc == DA9142_VARIANT_VRC);
+ break;
+ default:
+ type = "Unknown";
+ break;
+ }
+ }
+
dev_info(chip->dev,
"Device detected (device-ID: 0x%02X, var-ID: 0x%02X, %s)\n",
device_id, variant_id, type);
@@ -890,8 +983,10 @@ static int da9121_check_device_type(struct i2c_client *i2c, struct da9121 *chip)
variant_mrc = (variant_id & DA9121_MASK_OTP_VARIANT_ID_MRC)
>> DA9121_SHIFT_OTP_VARIANT_ID_MRC;
- if ((device_id == DA9121_DEVICE_ID) &&
- (variant_mrc < DA9121_VARIANT_MRC_BASE)) {
+ if (((device_id == DA9121_DEVICE_ID) &&
+ (variant_mrc < DA9121_VARIANT_MRC_BASE)) ||
+ ((device_id == DA914x_DEVICE_ID) &&
+ (variant_mrc != DA914x_VARIANT_MRC_BASE))) {
dev_err(chip->dev,
"Cannot support variant MRC: 0x%02X\n", variant_mrc);
ret = -EINVAL;
@@ -931,6 +1026,14 @@ static int da9121_assign_chip_model(struct i2c_client *i2c,
chip->variant_id = DA9121_TYPE_DA9220_DA9132;
regmap = &da9121_2ch_regmap_config;
break;
+ case DA9121_SUBTYPE_DA9141:
+ chip->variant_id = DA9121_TYPE_DA9141;
+ regmap = &da9121_1ch_regmap_config;
+ break;
+ case DA9121_SUBTYPE_DA9142:
+ chip->variant_id = DA9121_TYPE_DA9142;
+ regmap = &da9121_2ch_regmap_config;
+ break;
}
/* Set these up for of_regulator_match call which may want .of_map_modes */
@@ -1010,6 +1113,8 @@ static const struct of_device_id da9121_dt_ids[] = {
{ .compatible = "dlg,da9131", .data = (void *) DA9121_SUBTYPE_DA9131 },
{ .compatible = "dlg,da9220", .data = (void *) DA9121_SUBTYPE_DA9220 },
{ .compatible = "dlg,da9132", .data = (void *) DA9121_SUBTYPE_DA9132 },
+ { .compatible = "dlg,da9141", .data = (void *) DA9121_SUBTYPE_DA9141 },
+ { .compatible = "dlg,da9142", .data = (void *) DA9121_SUBTYPE_DA9142 },
{ }
};
MODULE_DEVICE_TABLE(of, da9121_dt_ids);
@@ -1065,7 +1170,7 @@ static int da9121_i2c_remove(struct i2c_client *i2c)
{
struct da9121 *chip = i2c_get_clientdata(i2c);
const int mask_all[4] = { 0xFF, 0xFF, 0xFF, 0xFF };
- int ret = 0;
+ int ret;
free_irq(chip->chip_irq, chip);
cancel_delayed_work_sync(&chip->work);
@@ -1073,7 +1178,7 @@ static int da9121_i2c_remove(struct i2c_client *i2c)
ret = regmap_bulk_write(chip->regmap, DA9121_REG_SYS_MASK_0, mask_all, 4);
if (ret != 0)
dev_err(chip->dev, "Failed to set IRQ masks: %d\n", ret);
- return ret;
+ return 0;
}
static const struct i2c_device_id da9121_i2c_id[] = {
@@ -1084,6 +1189,8 @@ static const struct i2c_device_id da9121_i2c_id[] = {
{"da9131", DA9121_TYPE_DA9122_DA9131},
{"da9220", DA9121_TYPE_DA9220_DA9132},
{"da9132", DA9121_TYPE_DA9220_DA9132},
+ {"da9141", DA9121_TYPE_DA9141},
+ {"da9142", DA9121_TYPE_DA9142},
{},
};
MODULE_DEVICE_TABLE(i2c, da9121_i2c_id);
diff --git a/drivers/regulator/da9121-regulator.h b/drivers/regulator/da9121-regulator.h
index 357f416e17c1..a328a0bdfa29 100644
--- a/drivers/regulator/da9121-regulator.h
+++ b/drivers/regulator/da9121-regulator.h
@@ -26,7 +26,9 @@ enum da9121_variant {
DA9121_TYPE_DA9121_DA9130,
DA9121_TYPE_DA9220_DA9132,
DA9121_TYPE_DA9122_DA9131,
- DA9121_TYPE_DA9217
+ DA9121_TYPE_DA9217,
+ DA9121_TYPE_DA9141,
+ DA9121_TYPE_DA9142
};
enum da9121_subvariant {
@@ -36,7 +38,9 @@ enum da9121_subvariant {
DA9121_SUBTYPE_DA9132,
DA9121_SUBTYPE_DA9122,
DA9121_SUBTYPE_DA9131,
- DA9121_SUBTYPE_DA9217
+ DA9121_SUBTYPE_DA9217,
+ DA9121_SUBTYPE_DA9141,
+ DA9121_SUBTYPE_DA9142
};
/* Minimum, maximum and default polling millisecond periods are provided
@@ -70,6 +74,14 @@ enum da9121_subvariant {
#define DA9121_REG_SYS_GPIO1_1 0x13
#define DA9121_REG_SYS_GPIO2_0 0x14
#define DA9121_REG_SYS_GPIO2_1 0x15
+#define DA914x_REG_SYS_GPIO3_0 0x16
+#define DA914x_REG_SYS_GPIO3_1 0x17
+#define DA914x_REG_SYS_GPIO4_0 0x18
+#define DA914x_REG_SYS_GPIO4_1 0x19
+#define DA914x_REG_SYS_ADMUX1_0 0x1A
+#define DA914x_REG_SYS_ADMUX1_1 0x1B
+#define DA914x_REG_SYS_ADMUX2_0 0x1C
+#define DA914x_REG_SYS_ADMUX2_1 0x1D
#define DA9121_REG_BUCK_BUCK1_0 0x20
#define DA9121_REG_BUCK_BUCK1_1 0x21
#define DA9121_REG_BUCK_BUCK1_2 0x22
@@ -276,6 +288,7 @@ enum da9121_subvariant {
#define DA9121_MASK_OTP_DEVICE_ID_DEV_ID 0xFF
#define DA9121_DEVICE_ID 0x05
+#define DA914x_DEVICE_ID 0x26
/* DA9121_REG_OTP_VARIANT_ID */
@@ -293,6 +306,10 @@ enum da9121_subvariant {
#define DA9131_VARIANT_VRC 0x1
#define DA9132_VARIANT_VRC 0x2
+#define DA914x_VARIANT_MRC_BASE 0x0
+#define DA9141_VARIANT_VRC 0x1
+#define DA9142_VARIANT_VRC 0x2
+
/* DA9121_REG_OTP_CUSTOMER_ID */
#define DA9121_MASK_OTP_CUSTOMER_ID_CUST_ID 0xFF
diff --git a/drivers/regulator/irq_helpers.c b/drivers/regulator/irq_helpers.c
index 522764435575..fe7ae0f3f46a 100644
--- a/drivers/regulator/irq_helpers.c
+++ b/drivers/regulator/irq_helpers.c
@@ -320,7 +320,9 @@ static void init_rdev_errors(struct regulator_irq *h)
* IRQF_ONESHOT when requesting the (threaded) irq.
* @common_errs: Errors which can be flagged by this IRQ for all rdevs.
* When IRQ is re-enabled these errors will be cleared
- * from all associated regulators
+ * from all associated regulators. Use this instead of the
+ * per_rdev_errs if you use
+ * regulator_irq_map_event_simple() for event mapping.
* @per_rdev_errs: Optional error flag array describing errors specific
* for only some of the regulators. These errors will be
* or'ed with common errors. If this is given the array
@@ -395,3 +397,40 @@ void regulator_irq_helper_cancel(void **handle)
}
}
EXPORT_SYMBOL_GPL(regulator_irq_helper_cancel);
+
+/**
+ * regulator_irq_map_event_simple - regulator IRQ notification for trivial IRQs
+ *
+ * @irq: Number of IRQ that occurred
+ * @rid: Information about the event IRQ indicates
+ * @dev_mask: mask indicating the regulator originating the IRQ
+ *
+ * Regulators whose IRQ has single, well defined purpose (always indicate
+ * exactly one event, and are relevant to exactly one regulator device) can
+ * use this function as their map_event callbac for their regulator IRQ
+ * notification helperk. Exactly one rdev and exactly one error (in
+ * "common_errs"-field) can be given at IRQ helper registration for
+ * regulator_irq_map_event_simple() to be viable.
+ */
+int regulator_irq_map_event_simple(int irq, struct regulator_irq_data *rid,
+ unsigned long *dev_mask)
+{
+ int err = rid->states[0].possible_errs;
+
+ *dev_mask = 1;
+ /*
+ * This helper should only be used in a situation where the IRQ
+ * can indicate only one type of problem for one specific rdev.
+ * Something fishy is going on if we are having multiple rdevs or ERROR
+ * flags here.
+ */
+ if (WARN_ON(rid->num_states != 1 || hweight32(err) != 1))
+ return 0;
+
+ rid->states[0].errors = err;
+ rid->states[0].notifs = regulator_err2notif(err);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regulator_irq_map_event_simple);
+
diff --git a/drivers/regulator/max20086-regulator.c b/drivers/regulator/max20086-regulator.c
new file mode 100644
index 000000000000..fbc56b043071
--- /dev/null
+++ b/drivers/regulator/max20086-regulator.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+//
+// max20086-regulator.c - MAX20086-MAX20089 camera power protector driver
+//
+// Copyright (C) 2022 Laurent Pinchart <laurent.pinchart@idesonboard.com>
+// Copyright (C) 2018 Avnet, Inc.
+
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
+
+/* Register Offset */
+#define MAX20086_REG_MASK 0x00
+#define MAX20086_REG_CONFIG 0x01
+#define MAX20086_REG_ID 0x02
+#define MAX20086_REG_STAT1 0x03
+#define MAX20086_REG_STAT2_L 0x04
+#define MAX20086_REG_STAT2_H 0x05
+#define MAX20086_REG_ADC1 0x06
+#define MAX20086_REG_ADC2 0x07
+#define MAX20086_REG_ADC3 0x08
+#define MAX20086_REG_ADC4 0x09
+
+/* DEVICE IDs */
+#define MAX20086_DEVICE_ID_MAX20086 0x40
+#define MAX20086_DEVICE_ID_MAX20087 0x20
+#define MAX20086_DEVICE_ID_MAX20088 0x10
+#define MAX20086_DEVICE_ID_MAX20089 0x00
+#define DEVICE_ID_MASK 0xf0
+
+/* Register bits */
+#define MAX20086_EN_MASK 0x0f
+#define MAX20086_EN_OUT1 0x01
+#define MAX20086_EN_OUT2 0x02
+#define MAX20086_EN_OUT3 0x04
+#define MAX20086_EN_OUT4 0x08
+#define MAX20086_INT_DISABLE_ALL 0x3f
+
+#define MAX20086_MAX_REGULATORS 4
+
+struct max20086_chip_info {
+ u8 id;
+ unsigned int num_outputs;
+};
+
+struct max20086_regulator {
+ struct device_node *of_node;
+ struct regulator_init_data *init_data;
+ const struct regulator_desc *desc;
+ struct regulator_dev *rdev;
+};
+
+struct max20086 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct gpio_desc *ena_gpiod;
+
+ const struct max20086_chip_info *info;
+
+ struct max20086_regulator regulators[MAX20086_MAX_REGULATORS];
+};
+
+static const struct regulator_ops max20086_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+#define MAX20086_REGULATOR_DESC(n) \
+{ \
+ .name = "OUT"#n, \
+ .supply_name = "in", \
+ .id = (n) - 1, \
+ .ops = &max20086_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .enable_reg = MAX20086_REG_CONFIG, \
+ .enable_mask = 1 << ((n) - 1), \
+ .enable_val = 1 << ((n) - 1), \
+ .disable_val = 0, \
+}
+
+static const char * const max20086_output_names[] = {
+ "OUT1",
+ "OUT2",
+ "OUT3",
+ "OUT4",
+};
+
+static const struct regulator_desc max20086_regulators[] = {
+ MAX20086_REGULATOR_DESC(1),
+ MAX20086_REGULATOR_DESC(2),
+ MAX20086_REGULATOR_DESC(3),
+ MAX20086_REGULATOR_DESC(4),
+};
+
+static int max20086_regulators_register(struct max20086 *chip)
+{
+ unsigned int i;
+
+ for (i = 0; i < chip->info->num_outputs; i++) {
+ struct max20086_regulator *reg = &chip->regulators[i];
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+
+ config.dev = chip->dev;
+ config.init_data = reg->init_data;
+ config.driver_data = chip;
+ config.of_node = reg->of_node;
+ config.regmap = chip->regmap;
+ config.ena_gpiod = chip->ena_gpiod;
+
+ rdev = devm_regulator_register(chip->dev, reg->desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(chip->dev,
+ "Failed to register regulator output %s\n",
+ reg->desc->name);
+ return PTR_ERR(rdev);
+ }
+
+ reg->rdev = rdev;
+ }
+
+ return 0;
+}
+
+static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on)
+{
+ struct of_regulator_match matches[MAX20086_MAX_REGULATORS] = { };
+ struct device_node *node;
+ unsigned int i;
+ int ret;
+
+ node = of_get_child_by_name(chip->dev->of_node, "regulators");
+ if (!node) {
+ dev_err(chip->dev, "regulators node not found\n");
+ return PTR_ERR(node);
+ }
+
+ for (i = 0; i < chip->info->num_outputs; ++i)
+ matches[i].name = max20086_output_names[i];
+
+ ret = of_regulator_match(chip->dev, node, matches,
+ chip->info->num_outputs);
+ of_node_put(node);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to match regulators\n");
+ return -EINVAL;
+ }
+
+ *boot_on = false;
+
+ for (i = 0; i < chip->info->num_outputs; i++) {
+ struct max20086_regulator *reg = &chip->regulators[i];
+
+ reg->init_data = matches[i].init_data;
+ reg->of_node = matches[i].of_node;
+ reg->desc = &max20086_regulators[i];
+
+ if (reg->init_data) {
+ if (reg->init_data->constraints.always_on ||
+ reg->init_data->constraints.boot_on)
+ *boot_on = true;
+ }
+ }
+
+ return 0;
+}
+
+static int max20086_detect(struct max20086 *chip)
+{
+ unsigned int data;
+ int ret;
+
+ ret = regmap_read(chip->regmap, MAX20086_REG_ID, &data);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read DEVICE_ID reg: %d\n", ret);
+ return ret;
+ }
+
+ if ((data & DEVICE_ID_MASK) != chip->info->id) {
+ dev_err(chip->dev, "Invalid device ID 0x%02x\n", data);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static bool max20086_gen_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX20086_REG_MASK:
+ case MAX20086_REG_CONFIG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config max20086_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = max20086_gen_is_writeable_reg,
+ .max_register = 0x9,
+ .cache_type = REGCACHE_NONE,
+};
+
+static int max20086_i2c_probe(struct i2c_client *i2c)
+{
+ struct max20086 *chip;
+ enum gpiod_flags flags;
+ bool boot_on;
+ int ret;
+
+ chip = devm_kzalloc(&i2c->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = &i2c->dev;
+ chip->info = device_get_match_data(chip->dev);
+
+ i2c_set_clientdata(i2c, chip);
+
+ chip->regmap = devm_regmap_init_i2c(i2c, &max20086_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ ret = PTR_ERR(chip->regmap);
+ dev_err(chip->dev, "Failed to allocate register map: %d\n", ret);
+ return ret;
+ }
+
+ ret = max20086_parse_regulators_dt(chip, &boot_on);
+ if (ret < 0)
+ return ret;
+
+ ret = max20086_detect(chip);
+ if (ret < 0)
+ return ret;
+
+ /* Until IRQ support is added, just disable all interrupts. */
+ ret = regmap_update_bits(chip->regmap, MAX20086_REG_MASK,
+ MAX20086_INT_DISABLE_ALL,
+ MAX20086_INT_DISABLE_ALL);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to disable interrupts: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Get the enable GPIO. If any of the outputs is marked as being
+ * enabled at boot, request the GPIO with an initial high state to
+ * avoid disabling outputs that may have been turned on by the boot
+ * loader. Otherwise, request it with a low state to enter lower-power
+ * shutdown.
+ */
+ flags = boot_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ chip->ena_gpiod = devm_gpiod_get(chip->dev, "enable", flags);
+ if (IS_ERR(chip->ena_gpiod)) {
+ ret = PTR_ERR(chip->ena_gpiod);
+ dev_err(chip->dev, "Failed to get enable GPIO: %d\n", ret);
+ return ret;
+ }
+
+ ret = max20086_regulators_register(chip);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to register regulators: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id max20086_i2c_id[] = {
+ { "max20086" },
+ { "max20087" },
+ { "max20088" },
+ { "max20089" },
+ { /* Sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(i2c, max20086_i2c_id);
+
+static const struct of_device_id max20086_dt_ids[] = {
+ {
+ .compatible = "maxim,max20086",
+ .data = &(const struct max20086_chip_info) {
+ .id = MAX20086_DEVICE_ID_MAX20086,
+ .num_outputs = 4,
+ }
+ }, {
+ .compatible = "maxim,max20087",
+ .data = &(const struct max20086_chip_info) {
+ .id = MAX20086_DEVICE_ID_MAX20087,
+ .num_outputs = 4,
+ }
+ }, {
+ .compatible = "maxim,max20088",
+ .data = &(const struct max20086_chip_info) {
+ .id = MAX20086_DEVICE_ID_MAX20088,
+ .num_outputs = 2,
+ }
+ }, {
+ .compatible = "maxim,max20089",
+ .data = &(const struct max20086_chip_info) {
+ .id = MAX20086_DEVICE_ID_MAX20089,
+ .num_outputs = 2,
+ }
+ },
+ { /* Sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, max20086_dt_ids);
+
+static struct i2c_driver max20086_regulator_driver = {
+ .driver = {
+ .name = "max20086",
+ .of_match_table = of_match_ptr(max20086_dt_ids),
+ },
+ .probe_new = max20086_i2c_probe,
+ .id_table = max20086_i2c_id,
+};
+
+module_i2c_driver(max20086_regulator_driver);
+
+MODULE_AUTHOR("Watson Chow <watson.chow@avnet.com>");
+MODULE_DESCRIPTION("MAX20086-MAX20089 Camera Power Protector Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/mt6380-regulator.c b/drivers/regulator/mt6380-regulator.c
index 9efd8710a6f3..2e6b61d3b0cf 100644
--- a/drivers/regulator/mt6380-regulator.c
+++ b/drivers/regulator/mt6380-regulator.c
@@ -183,7 +183,7 @@ static const unsigned int ldo_volt_table4[] = {
static int mt6380_regulator_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
- int ret, val = 0;
+ int val = 0;
struct mt6380_regulator_info *info = rdev_get_drvdata(rdev);
switch (mode) {
@@ -199,10 +199,8 @@ static int mt6380_regulator_set_mode(struct regulator_dev *rdev,
val <<= ffs(info->modeset_mask) - 1;
- ret = regmap_update_bits(rdev->regmap, info->modeset_reg,
+ return regmap_update_bits(rdev->regmap, info->modeset_reg,
info->modeset_mask, val);
-
- return ret;
}
static unsigned int mt6380_regulator_get_mode(struct regulator_dev *rdev)
diff --git a/drivers/regulator/qcom-labibb-regulator.c b/drivers/regulator/qcom-labibb-regulator.c
index b3da0dc58782..639b71eb41ff 100644
--- a/drivers/regulator/qcom-labibb-regulator.c
+++ b/drivers/regulator/qcom-labibb-regulator.c
@@ -260,7 +260,7 @@ static irqreturn_t qcom_labibb_ocp_isr(int irq, void *chip)
/* If the regulator is not enabled, this is a fake event */
if (!ops->is_enabled(vreg->rdev))
- return 0;
+ return IRQ_HANDLED;
/* If we tried to recover for too many times it's not getting better */
if (vreg->ocp_irq_count > LABIBB_MAX_OCP_COUNT)
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index 12425f667c00..a3bc0eb6ceb8 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -814,6 +814,11 @@ static const struct rpmh_vreg_init_data pm8998_vreg_data[] = {
{}
};
+static const struct rpmh_vreg_init_data pmg1110_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps510, "vdd-s1"),
+ {}
+};
+
static const struct rpmh_vreg_init_data pmi8998_vreg_data[] = {
RPMH_VREG("bob", "bob%s1", &pmic4_bob, "vdd-bob"),
{}
@@ -969,6 +974,20 @@ static const struct rpmh_vreg_init_data pm8350c_vreg_data[] = {
{}
};
+static const struct rpmh_vreg_init_data pm8450_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps520, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps520, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps520, "vdd-s3"),
+ RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps520, "vdd-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps520, "vdd-s5"),
+ RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps520, "vdd-s6"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo, "vdd-l1"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo, "vdd-l2"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l3"),
+ RPMH_VREG("ldo4", "ldo%s4", &pmic5_pldo_lv, "vdd-l4"),
+ {}
+};
+
static const struct rpmh_vreg_init_data pm8009_vreg_data[] = {
RPMH_VREG("smps1", "smp%s1", &pmic5_hfsmps510, "vdd-s1"),
RPMH_VREG("smps2", "smp%s2", &pmic5_hfsmps515, "vdd-s2"),
@@ -1214,10 +1233,18 @@ static const struct of_device_id __maybe_unused rpmh_regulator_match_table[] = {
.data = pm8350c_vreg_data,
},
{
+ .compatible = "qcom,pm8450-rpmh-regulators",
+ .data = pm8450_vreg_data,
+ },
+ {
.compatible = "qcom,pm8998-rpmh-regulators",
.data = pm8998_vreg_data,
},
{
+ .compatible = "qcom,pmg1110-rpmh-regulators",
+ .data = pmg1110_vreg_data,
+ },
+ {
.compatible = "qcom,pmi8998-rpmh-regulators",
.data = pmi8998_vreg_data,
},
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 8bac024dde8b..9fc666107a06 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -9,6 +9,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/soc/qcom/smd-rpm.h>
struct qcom_rpm_reg {
@@ -1239,52 +1240,91 @@ static const struct of_device_id rpm_of_match[] = {
};
MODULE_DEVICE_TABLE(of, rpm_of_match);
-static int rpm_reg_probe(struct platform_device *pdev)
+/**
+ * rpm_regulator_init_vreg() - initialize all attributes of a qcom_smd-regulator
+ * @vreg: Pointer to the individual qcom_smd-regulator resource
+ * @dev: Pointer to the top level qcom_smd-regulator PMIC device
+ * @node: Pointer to the individual qcom_smd-regulator resource
+ * device node
+ * @rpm: Pointer to the rpm bus node
+ * @pmic_rpm_data: Pointer to a null-terminated array of qcom_smd-regulator
+ * resources defined for the top level PMIC device
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int rpm_regulator_init_vreg(struct qcom_rpm_reg *vreg, struct device *dev,
+ struct device_node *node, struct qcom_smd_rpm *rpm,
+ const struct rpm_regulator_data *pmic_rpm_data)
{
- const struct rpm_regulator_data *reg;
- const struct of_device_id *match;
- struct regulator_config config = { };
+ struct regulator_config config = {};
+ const struct rpm_regulator_data *rpm_data;
struct regulator_dev *rdev;
+ int ret;
+
+ for (rpm_data = pmic_rpm_data; rpm_data->name; rpm_data++)
+ if (of_node_name_eq(node, rpm_data->name))
+ break;
+
+ if (!rpm_data->name) {
+ dev_err(dev, "Unknown regulator %pOFn\n", node);
+ return -EINVAL;
+ }
+
+ vreg->dev = dev;
+ vreg->rpm = rpm;
+ vreg->type = rpm_data->type;
+ vreg->id = rpm_data->id;
+
+ memcpy(&vreg->desc, rpm_data->desc, sizeof(vreg->desc));
+ vreg->desc.name = rpm_data->name;
+ vreg->desc.supply_name = rpm_data->supply;
+ vreg->desc.owner = THIS_MODULE;
+ vreg->desc.type = REGULATOR_VOLTAGE;
+ vreg->desc.of_match = rpm_data->name;
+
+ config.dev = dev;
+ config.of_node = node;
+ config.driver_data = vreg;
+
+ rdev = devm_regulator_register(dev, &vreg->desc, &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(dev, "%pOFn: devm_regulator_register() failed, ret=%d\n", node, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rpm_reg_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct rpm_regulator_data *vreg_data;
+ struct device_node *node;
struct qcom_rpm_reg *vreg;
struct qcom_smd_rpm *rpm;
+ int ret;
rpm = dev_get_drvdata(pdev->dev.parent);
if (!rpm) {
- dev_err(&pdev->dev, "unable to retrieve handle to rpm\n");
+ dev_err(&pdev->dev, "Unable to retrieve handle to rpm\n");
return -ENODEV;
}
- match = of_match_device(rpm_of_match, &pdev->dev);
- if (!match) {
- dev_err(&pdev->dev, "failed to match device\n");
+ vreg_data = of_device_get_match_data(dev);
+ if (!vreg_data)
return -ENODEV;
- }
- for (reg = match->data; reg->name; reg++) {
+ for_each_available_child_of_node(dev->of_node, node) {
vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
if (!vreg)
return -ENOMEM;
- vreg->dev = &pdev->dev;
- vreg->type = reg->type;
- vreg->id = reg->id;
- vreg->rpm = rpm;
-
- memcpy(&vreg->desc, reg->desc, sizeof(vreg->desc));
-
- vreg->desc.id = -1;
- vreg->desc.owner = THIS_MODULE;
- vreg->desc.type = REGULATOR_VOLTAGE;
- vreg->desc.name = reg->name;
- vreg->desc.supply_name = reg->supply;
- vreg->desc.of_match = reg->name;
-
- config.dev = &pdev->dev;
- config.driver_data = vreg;
- rdev = devm_regulator_register(&pdev->dev, &vreg->desc, &config);
- if (IS_ERR(rdev)) {
- dev_err(&pdev->dev, "failed to register %s\n", reg->name);
- return PTR_ERR(rdev);
+ ret = rpm_regulator_init_vreg(vreg, dev, node, rpm, vreg_data);
+
+ if (ret < 0) {
+ of_node_put(node);
+ return ret;
}
}
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 41424a3366d0..02bfce981150 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -1895,6 +1895,44 @@ static const struct spmi_regulator_data pm8941_regulators[] = {
{ }
};
+static const struct spmi_regulator_data pm8226_regulators[] = {
+ { "s1", 0x1400, "vdd_s1", },
+ { "s2", 0x1700, "vdd_s2", },
+ { "s3", 0x1a00, "vdd_s3", },
+ { "s4", 0x1d00, "vdd_s4", },
+ { "s5", 0x2000, "vdd_s5", },
+ { "l1", 0x4000, "vdd_l1_l2_l4_l5", },
+ { "l2", 0x4100, "vdd_l1_l2_l4_l5", },
+ { "l3", 0x4200, "vdd_l3_l24_l26", },
+ { "l4", 0x4300, "vdd_l1_l2_l4_l5", },
+ { "l5", 0x4400, "vdd_l1_l2_l4_l5", },
+ { "l6", 0x4500, "vdd_l6_l7_l8_l9_l27", },
+ { "l7", 0x4600, "vdd_l6_l7_l8_l9_l27", },
+ { "l8", 0x4700, "vdd_l6_l7_l8_l9_l27", },
+ { "l9", 0x4800, "vdd_l6_l7_l8_l9_l27", },
+ { "l10", 0x4900, "vdd_l10_l11_l13", },
+ { "l11", 0x4a00, "vdd_l10_l11_l13", },
+ { "l12", 0x4b00, "vdd_l12_l14", },
+ { "l13", 0x4c00, "vdd_l10_l11_l13", },
+ { "l14", 0x4d00, "vdd_l12_l14", },
+ { "l15", 0x4e00, "vdd_l15_l16_l17_l18", },
+ { "l16", 0x4f00, "vdd_l15_l16_l17_l18", },
+ { "l17", 0x5000, "vdd_l15_l16_l17_l18", },
+ { "l18", 0x5100, "vdd_l15_l16_l17_l18", },
+ { "l19", 0x5200, "vdd_l19_l20_l21_l22_l23_l28", },
+ { "l20", 0x5300, "vdd_l19_l20_l21_l22_l23_l28", },
+ { "l21", 0x5400, "vdd_l19_l20_l21_l22_l23_l28", },
+ { "l22", 0x5500, "vdd_l19_l20_l21_l22_l23_l28", },
+ { "l23", 0x5600, "vdd_l19_l20_l21_l22_l23_l28", },
+ { "l24", 0x5700, "vdd_l3_l24_l26", },
+ { "l25", 0x5800, "vdd_l25", },
+ { "l26", 0x5900, "vdd_l3_l24_l26", },
+ { "l27", 0x5a00, "vdd_l6_l7_l8_l9_l27", },
+ { "l28", 0x5b00, "vdd_l19_l20_l21_l22_l23_l28", },
+ { "lvs1", 0x8000, "vdd_lvs1", },
+ { }
+};
+
static const struct spmi_regulator_data pm8841_regulators[] = {
{ "s1", 0x1400, "vdd_s1", },
{ "s2", 0x1700, "vdd_s2", NULL, 0x1c08 },
@@ -2095,6 +2133,7 @@ static const struct spmi_regulator_data pms405_regulators[] = {
static const struct of_device_id qcom_spmi_regulator_match[] = {
{ .compatible = "qcom,pm8004-regulators", .data = &pm8004_regulators },
{ .compatible = "qcom,pm8005-regulators", .data = &pm8005_regulators },
+ { .compatible = "qcom,pm8226-regulators", .data = &pm8226_regulators },
{ .compatible = "qcom,pm8841-regulators", .data = &pm8841_regulators },
{ .compatible = "qcom,pm8916-regulators", .data = &pm8916_regulators },
{ .compatible = "qcom,pm8941-regulators", .data = &pm8941_regulators },
diff --git a/drivers/regulator/rohm-regulator.c b/drivers/regulator/rohm-regulator.c
index 6e0d9c08ec1c..f97a9a51ee76 100644
--- a/drivers/regulator/rohm-regulator.c
+++ b/drivers/regulator/rohm-regulator.c
@@ -112,6 +112,22 @@ int rohm_regulator_set_dvs_levels(const struct rohm_dvs_config *dvs,
}
EXPORT_SYMBOL(rohm_regulator_set_dvs_levels);
+/*
+ * Few ROHM PMIC ICs have constrains on voltage changing:
+ * BD71837 - only buck 1-4 voltages can be changed when they are enabled.
+ * Other bucks and all LDOs must be disabled when voltage is changed.
+ * BD96801 - LDO voltage levels can be changed when LDOs are disabled.
+ */
+int rohm_regulator_set_voltage_sel_restricted(struct regulator_dev *rdev,
+ unsigned int sel)
+{
+ if (rdev->desc->ops->is_enabled(rdev))
+ return -EBUSY;
+
+ return regulator_set_voltage_sel_regmap(rdev, sel);
+}
+EXPORT_SYMBOL_GPL(rohm_regulator_set_voltage_sel_restricted);
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
MODULE_DESCRIPTION("Generic helpers for ROHM PMIC regulator drivers");
diff --git a/drivers/regulator/tps68470-regulator.c b/drivers/regulator/tps68470-regulator.c
new file mode 100644
index 000000000000..4bca7c4128ab
--- /dev/null
+++ b/drivers/regulator/tps68470-regulator.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Regulator driver for TPS68470 PMIC
+//
+// Copyright (c) 2021 Red Hat Inc.
+// Copyright (C) 2018 Intel Corporation
+//
+// Authors:
+// Hans de Goede <hdegoede@redhat.com>
+// Zaikuo Wang <zaikuo.wang@intel.com>
+// Tianshu Qiu <tian.shu.qiu@intel.com>
+// Jian Xu Zheng <jian.xu.zheng@intel.com>
+// Yuning Pu <yuning.pu@intel.com>
+// Rajmohan Mani <rajmohan.mani@intel.com>
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/tps68470.h>
+#include <linux/module.h>
+#include <linux/platform_data/tps68470.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+struct tps68470_regulator_data {
+ struct clk *clk;
+};
+
+#define TPS68470_REGULATOR(_name, _id, _ops, _n, \
+ _vr, _vm, _er, _em, _lr, _nlr) \
+ [TPS68470_ ## _name] = { \
+ .name = # _name, \
+ .id = _id, \
+ .ops = &_ops, \
+ .n_voltages = _n, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .vsel_reg = _vr, \
+ .vsel_mask = _vm, \
+ .enable_reg = _er, \
+ .enable_mask = _em, \
+ .linear_ranges = _lr, \
+ .n_linear_ranges = _nlr, \
+ }
+
+static const struct linear_range tps68470_ldo_ranges[] = {
+ REGULATOR_LINEAR_RANGE(875000, 0, 125, 17800),
+};
+
+static const struct linear_range tps68470_core_ranges[] = {
+ REGULATOR_LINEAR_RANGE(900000, 0, 42, 25000),
+};
+
+static int tps68470_regulator_enable(struct regulator_dev *rdev)
+{
+ struct tps68470_regulator_data *data = rdev->reg_data;
+ int ret;
+
+ /* The Core buck regulator needs the PMIC's PLL to be enabled */
+ if (rdev->desc->id == TPS68470_CORE) {
+ ret = clk_prepare_enable(data->clk);
+ if (ret) {
+ dev_err(&rdev->dev, "Error enabling TPS68470 clock\n");
+ return ret;
+ }
+ }
+
+ return regulator_enable_regmap(rdev);
+}
+
+static int tps68470_regulator_disable(struct regulator_dev *rdev)
+{
+ struct tps68470_regulator_data *data = rdev->reg_data;
+
+ if (rdev->desc->id == TPS68470_CORE)
+ clk_disable_unprepare(data->clk);
+
+ return regulator_disable_regmap(rdev);
+}
+
+/* Operations permitted on DCDCx, LDO2, LDO3 and LDO4 */
+static const struct regulator_ops tps68470_regulator_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = tps68470_regulator_enable,
+ .disable = tps68470_regulator_disable,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+};
+
+static const struct regulator_ops tps68470_always_on_reg_ops = {
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+};
+
+static const struct regulator_desc regulators[] = {
+ TPS68470_REGULATOR(CORE, TPS68470_CORE, tps68470_regulator_ops, 43,
+ TPS68470_REG_VDVAL, TPS68470_VDVAL_DVOLT_MASK,
+ TPS68470_REG_VDCTL, TPS68470_VDCTL_EN_MASK,
+ tps68470_core_ranges, ARRAY_SIZE(tps68470_core_ranges)),
+ TPS68470_REGULATOR(ANA, TPS68470_ANA, tps68470_regulator_ops, 126,
+ TPS68470_REG_VAVAL, TPS68470_VAVAL_AVOLT_MASK,
+ TPS68470_REG_VACTL, TPS68470_VACTL_EN_MASK,
+ tps68470_ldo_ranges, ARRAY_SIZE(tps68470_ldo_ranges)),
+ TPS68470_REGULATOR(VCM, TPS68470_VCM, tps68470_regulator_ops, 126,
+ TPS68470_REG_VCMVAL, TPS68470_VCMVAL_VCVOLT_MASK,
+ TPS68470_REG_VCMCTL, TPS68470_VCMCTL_EN_MASK,
+ tps68470_ldo_ranges, ARRAY_SIZE(tps68470_ldo_ranges)),
+ TPS68470_REGULATOR(VIO, TPS68470_VIO, tps68470_always_on_reg_ops, 126,
+ TPS68470_REG_VIOVAL, TPS68470_VIOVAL_IOVOLT_MASK,
+ 0, 0,
+ tps68470_ldo_ranges, ARRAY_SIZE(tps68470_ldo_ranges)),
+/*
+ * (1) This regulator must have the same voltage as VIO if S_IO LDO is used to
+ * power a sensor/VCM which I2C is daisy chained behind the PMIC.
+ * (2) If there is no I2C daisy chain it can be set freely.
+ */
+ TPS68470_REGULATOR(VSIO, TPS68470_VSIO, tps68470_regulator_ops, 126,
+ TPS68470_REG_VSIOVAL, TPS68470_VSIOVAL_IOVOLT_MASK,
+ TPS68470_REG_S_I2C_CTL, TPS68470_S_I2C_CTL_EN_MASK,
+ tps68470_ldo_ranges, ARRAY_SIZE(tps68470_ldo_ranges)),
+ TPS68470_REGULATOR(AUX1, TPS68470_AUX1, tps68470_regulator_ops, 126,
+ TPS68470_REG_VAUX1VAL, TPS68470_VAUX1VAL_AUX1VOLT_MASK,
+ TPS68470_REG_VAUX1CTL, TPS68470_VAUX1CTL_EN_MASK,
+ tps68470_ldo_ranges, ARRAY_SIZE(tps68470_ldo_ranges)),
+ TPS68470_REGULATOR(AUX2, TPS68470_AUX2, tps68470_regulator_ops, 126,
+ TPS68470_REG_VAUX2VAL, TPS68470_VAUX2VAL_AUX2VOLT_MASK,
+ TPS68470_REG_VAUX2CTL, TPS68470_VAUX2CTL_EN_MASK,
+ tps68470_ldo_ranges, ARRAY_SIZE(tps68470_ldo_ranges)),
+};
+
+static int tps68470_regulator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tps68470_regulator_platform_data *pdata = dev_get_platdata(dev);
+ struct tps68470_regulator_data *data;
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+ int i;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->clk = devm_clk_get(dev, "tps68470-clk");
+ if (IS_ERR(data->clk))
+ return dev_err_probe(dev, PTR_ERR(data->clk), "getting tps68470-clk\n");
+
+ config.dev = dev->parent;
+ config.regmap = dev_get_drvdata(dev->parent);
+ config.driver_data = data;
+
+ for (i = 0; i < TPS68470_NUM_REGULATORS; i++) {
+ if (pdata)
+ config.init_data = pdata->reg_init_data[i];
+ else
+ config.init_data = NULL;
+
+ rdev = devm_regulator_register(dev, &regulators[i], &config);
+ if (IS_ERR(rdev))
+ return dev_err_probe(dev, PTR_ERR(rdev),
+ "registering %s regulator\n",
+ regulators[i].name);
+ }
+
+ return 0;
+}
+
+static struct platform_driver tps68470_regulator_driver = {
+ .driver = {
+ .name = "tps68470-regulator",
+ },
+ .probe = tps68470_regulator_probe,
+};
+
+/*
+ * The ACPI tps68470 probe-ordering depends on the clk/gpio/regulator drivers
+ * registering before the drivers for the camera-sensors which use them bind.
+ * subsys_initcall() ensures this when the drivers are builtin.
+ */
+static int __init tps68470_regulator_init(void)
+{
+ return platform_driver_register(&tps68470_regulator_driver);
+}
+subsys_initcall(tps68470_regulator_init);
+
+static void __exit tps68470_regulator_exit(void)
+{
+ platform_driver_unregister(&tps68470_regulator_driver);
+}
+module_exit(tps68470_regulator_exit);
+
+MODULE_ALIAS("platform:tps68470-regulator");
+MODULE_DESCRIPTION("TPS68470 voltage regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 4a51cfea45ac..e2a20d512152 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -196,7 +196,6 @@ static int twl4030reg_enable(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int grp;
- int ret;
grp = twlreg_grp(rdev);
if (grp < 0)
@@ -204,16 +203,13 @@ static int twl4030reg_enable(struct regulator_dev *rdev)
grp |= P1_GRP_4030;
- ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
-
- return ret;
+ return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
}
static int twl4030reg_disable(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int grp;
- int ret;
grp = twlreg_grp(rdev);
if (grp < 0)
@@ -221,9 +217,7 @@ static int twl4030reg_disable(struct regulator_dev *rdev)
grp &= ~(P1_GRP_4030 | P2_GRP_4030 | P3_GRP_4030);
- ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
-
- return ret;
+ return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
}
static int twl4030reg_get_status(struct regulator_dev *rdev)
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index f2e961f998ca..166019786653 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -180,6 +180,7 @@ config QCOM_Q6V5_ADSP
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
select MFD_SYSCON
select QCOM_PIL_INFO
select QCOM_MDT_LOADER
@@ -199,6 +200,7 @@ config QCOM_Q6V5_MSS
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
select MFD_SYSCON
select QCOM_MDT_LOADER
select QCOM_PIL_INFO
@@ -218,6 +220,7 @@ config QCOM_Q6V5_PAS
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
select MFD_SYSCON
select QCOM_PIL_INFO
select QCOM_MDT_LOADER
@@ -239,6 +242,7 @@ config QCOM_Q6V5_WCSS
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
select MFD_SYSCON
select QCOM_MDT_LOADER
select QCOM_PIL_INFO
@@ -283,6 +287,17 @@ config QCOM_WCNSS_PIL
verified and booted with the help of the Peripheral Authentication
System (PAS) in TrustZone.
+config RCAR_REMOTEPROC
+ tristate "Renesas R-Car Gen3 remoteproc support"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ help
+ Say y here to support R-Car realtime processor via the
+ remote processor framework. An ELF firmware can be loaded
+ thanks to sysfs remoteproc entries. The remote processor
+ can be started and stopped.
+ This can be either built-in or a loadable module.
+ If compiled as module (M), the module name is rcar_rproc.
+
config ST_REMOTEPROC
tristate "ST remoteproc support"
depends on ARCH_STI
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 0ac256b6c977..5478c7cb9e07 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_QCOM_SYSMON) += qcom_sysmon.o
obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o
qcom_wcnss_pil-y += qcom_wcnss.o
qcom_wcnss_pil-y += qcom_wcnss_iris.o
+obj-$(CONFIG_RCAR_REMOTEPROC) += rcar_rproc.o
obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o
obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index ff8170dbbc3c..7a096f1891e6 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -34,7 +34,8 @@
#define IMX7D_M4_START (IMX7D_ENABLE_M4 | IMX7D_SW_M4P_RST \
| IMX7D_SW_M4C_RST)
-#define IMX7D_M4_STOP IMX7D_SW_M4C_NON_SCLR_RST
+#define IMX7D_M4_STOP (IMX7D_ENABLE_M4 | IMX7D_SW_M4C_RST | \
+ IMX7D_SW_M4C_NON_SCLR_RST)
/* Address: 0x020D8000 */
#define IMX6SX_SRC_SCR 0x00
@@ -45,7 +46,8 @@
#define IMX6SX_M4_START (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4P_RST \
| IMX6SX_SW_M4C_RST)
-#define IMX6SX_M4_STOP IMX6SX_SW_M4C_NON_SCLR_RST
+#define IMX6SX_M4_STOP (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4C_RST | \
+ IMX6SX_SW_M4C_NON_SCLR_RST)
#define IMX6SX_M4_RST_MASK (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4P_RST \
| IMX6SX_SW_M4C_NON_SCLR_RST \
| IMX6SX_SW_M4C_RST)
@@ -684,7 +686,7 @@ static int imx_rproc_detect_mode(struct imx_rproc *priv)
return ret;
}
- if (!(val & dcfg->src_stop))
+ if ((val & dcfg->src_mask) != dcfg->src_stop)
priv->rproc->state = RPROC_DETACHED;
return 0;
@@ -804,6 +806,7 @@ static int imx_rproc_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->clk);
rproc_del(rproc);
imx_rproc_free_mbox(rproc);
+ destroy_workqueue(priv->workqueue);
rproc_free(rproc);
return 0;
diff --git a/drivers/remoteproc/ingenic_rproc.c b/drivers/remoteproc/ingenic_rproc.c
index a356738160a4..9902cce28692 100644
--- a/drivers/remoteproc/ingenic_rproc.c
+++ b/drivers/remoteproc/ingenic_rproc.c
@@ -218,14 +218,13 @@ static int ingenic_rproc_probe(struct platform_device *pdev)
if (vpu->irq < 0)
return vpu->irq;
- ret = devm_request_irq(dev, vpu->irq, vpu_interrupt, 0, "VPU", rproc);
+ ret = devm_request_irq(dev, vpu->irq, vpu_interrupt, IRQF_NO_AUTOEN,
+ "VPU", rproc);
if (ret < 0) {
dev_err(dev, "Failed to request IRQ\n");
return ret;
}
- disable_irq(vpu->irq);
-
ret = devm_rproc_add(dev, rproc);
if (ret) {
dev_err(dev, "Failed to register remote processor\n");
diff --git a/drivers/remoteproc/mtk_scp_ipi.c b/drivers/remoteproc/mtk_scp_ipi.c
index 6dc955ecab80..00f041ebcde6 100644
--- a/drivers/remoteproc/mtk_scp_ipi.c
+++ b/drivers/remoteproc/mtk_scp_ipi.c
@@ -23,7 +23,7 @@
*
* Register an ipi function to receive ipi interrupt from SCP.
*
- * Returns 0 if ipi registers successfully, -error on error.
+ * Return: 0 if ipi registers successfully, -error on error.
*/
int scp_ipi_register(struct mtk_scp *scp,
u32 id,
@@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(scp_ipi_unlock);
* When the processing completes, IPI handler registered
* by scp_ipi_register will be called in interrupt context.
*
- * Returns 0 if sending data successfully, -error on error.
+ * Return: 0 if sending data successfully, -error on error.
**/
int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
unsigned int wait)
diff --git a/drivers/remoteproc/qcom_pil_info.c b/drivers/remoteproc/qcom_pil_info.c
index 7c007dd7b200..aca21560e20b 100644
--- a/drivers/remoteproc/qcom_pil_info.c
+++ b/drivers/remoteproc/qcom_pil_info.c
@@ -104,7 +104,7 @@ int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size)
return -ENOMEM;
found_unused:
- memcpy_toio(entry, image, PIL_RELOC_NAME_LEN);
+ memcpy_toio(entry, image, strnlen(image, PIL_RELOC_NAME_LEN));
found_existing:
/* Use two writel() as base is only aligned to 4 bytes on odd entries */
writel(base, entry + PIL_RELOC_NAME_LEN);
diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
index eada7e34f3af..442a388f8102 100644
--- a/drivers/remoteproc/qcom_q6v5.c
+++ b/drivers/remoteproc/qcom_q6v5.c
@@ -10,6 +10,7 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/soc/qcom/qcom_aoss.h>
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
#include <linux/remoteproc.h>
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 03857dc9cdc1..184bb7cdf95a 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -524,6 +524,23 @@ static const struct adsp_data sdm845_adsp_resource_init = {
.ssctl_id = 0x14,
};
+static const struct adsp_data sm6350_adsp_resource = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .load_state = "adsp",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
static const struct adsp_data sm8150_adsp_resource = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
@@ -612,6 +629,23 @@ static const struct adsp_data sdm845_cdsp_resource_init = {
.ssctl_id = 0x17,
};
+static const struct adsp_data sm6350_cdsp_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 18,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mx",
+ NULL
+ },
+ .load_state = "cdsp",
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
static const struct adsp_data sm8150_cdsp_resource = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
@@ -652,6 +686,7 @@ static const struct adsp_data sm8350_cdsp_resource = {
.auto_boot = true,
.proxy_pd_names = (char*[]){
"cx",
+ "mxc",
NULL
},
.load_state = "cdsp",
@@ -804,6 +839,9 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sdm845-adsp-pas", .data = &sdm845_adsp_resource_init},
{ .compatible = "qcom,sdm845-cdsp-pas", .data = &sdm845_cdsp_resource_init},
{ .compatible = "qcom,sdx55-mpss-pas", .data = &sdx55_mpss_resource},
+ { .compatible = "qcom,sm6350-adsp-pas", .data = &sm6350_adsp_resource},
+ { .compatible = "qcom,sm6350-cdsp-pas", .data = &sm6350_cdsp_resource},
+ { .compatible = "qcom,sm6350-mpss-pas", .data = &mpss_resource_init},
{ .compatible = "qcom,sm8150-adsp-pas", .data = &sm8150_adsp_resource},
{ .compatible = "qcom,sm8150-cdsp-pas", .data = &sm8150_cdsp_resource},
{ .compatible = "qcom,sm8150-mpss-pas", .data = &mpss_resource_init},
diff --git a/drivers/remoteproc/rcar_rproc.c b/drivers/remoteproc/rcar_rproc.c
new file mode 100644
index 000000000000..aa86154109c7
--- /dev/null
+++ b/drivers/remoteproc/rcar_rproc.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) IoT.bzh 2021
+ */
+
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/pm_runtime.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include "remoteproc_internal.h"
+
+struct rcar_rproc {
+ struct reset_control *rst;
+};
+
+static int rcar_rproc_mem_alloc(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ struct device *dev = &rproc->dev;
+ void *va;
+
+ dev_dbg(dev, "map memory: %pa+%zx\n", &mem->dma, mem->len);
+ va = ioremap_wc(mem->dma, mem->len);
+ if (!va) {
+ dev_err(dev, "Unable to map memory region: %pa+%zx\n",
+ &mem->dma, mem->len);
+ return -ENOMEM;
+ }
+
+ /* Update memory entry va */
+ mem->va = va;
+
+ return 0;
+}
+
+static int rcar_rproc_mem_release(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ dev_dbg(&rproc->dev, "unmap memory: %pa\n", &mem->dma);
+ iounmap(mem->va);
+
+ return 0;
+}
+
+static int rcar_rproc_prepare(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct device_node *np = dev->of_node;
+ struct of_phandle_iterator it;
+ struct rproc_mem_entry *mem;
+ struct reserved_mem *rmem;
+ u32 da;
+
+ /* Register associated reserved memory regions */
+ of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
+ while (of_phandle_iterator_next(&it) == 0) {
+
+ rmem = of_reserved_mem_lookup(it.node);
+ if (!rmem) {
+ dev_err(&rproc->dev,
+ "unable to acquire memory-region\n");
+ return -EINVAL;
+ }
+
+ if (rmem->base > U32_MAX)
+ return -EINVAL;
+
+ /* No need to translate pa to da, R-Car use same map */
+ da = rmem->base;
+ mem = rproc_mem_entry_init(dev, NULL,
+ rmem->base,
+ rmem->size, da,
+ rcar_rproc_mem_alloc,
+ rcar_rproc_mem_release,
+ it.node->name);
+
+ if (!mem)
+ return -ENOMEM;
+
+ rproc_add_carveout(rproc, mem);
+ }
+
+ return 0;
+}
+
+static int rcar_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ int ret;
+
+ ret = rproc_elf_load_rsc_table(rproc, fw);
+ if (ret)
+ dev_info(&rproc->dev, "No resource table in elf\n");
+
+ return 0;
+}
+
+static int rcar_rproc_start(struct rproc *rproc)
+{
+ struct rcar_rproc *priv = rproc->priv;
+ int err;
+
+ if (!rproc->bootaddr)
+ return -EINVAL;
+
+ err = rcar_rst_set_rproc_boot_addr(rproc->bootaddr);
+ if (err) {
+ dev_err(&rproc->dev, "failed to set rproc boot addr\n");
+ return err;
+ }
+
+ err = reset_control_deassert(priv->rst);
+ if (err)
+ dev_err(&rproc->dev, "failed to deassert reset\n");
+
+ return err;
+}
+
+static int rcar_rproc_stop(struct rproc *rproc)
+{
+ struct rcar_rproc *priv = rproc->priv;
+ int err;
+
+ err = reset_control_assert(priv->rst);
+ if (err)
+ dev_err(&rproc->dev, "failed to assert reset\n");
+
+ return err;
+}
+
+static struct rproc_ops rcar_rproc_ops = {
+ .prepare = rcar_rproc_prepare,
+ .start = rcar_rproc_start,
+ .stop = rcar_rproc_stop,
+ .load = rproc_elf_load_segments,
+ .parse_fw = rcar_rproc_parse_fw,
+ .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
+ .sanity_check = rproc_elf_sanity_check,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+
+};
+
+static int rcar_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct rcar_rproc *priv;
+ struct rproc *rproc;
+ int ret;
+
+ rproc = devm_rproc_alloc(dev, np->name, &rcar_rproc_ops,
+ NULL, sizeof(*priv));
+ if (!rproc)
+ return -ENOMEM;
+
+ priv = rproc->priv;
+
+ priv->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ ret = PTR_ERR(priv->rst);
+ dev_err_probe(dev, ret, "fail to acquire rproc reset\n");
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ dev_err(dev, "failed to power up\n");
+ return ret;
+ }
+
+ dev_set_drvdata(dev, rproc);
+
+ /* Manually start the rproc */
+ rproc->auto_boot = false;
+
+ ret = devm_rproc_add(dev, rproc);
+ if (ret) {
+ dev_err(dev, "rproc_add failed\n");
+ goto pm_disable;
+ }
+
+ return 0;
+
+pm_disable:
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static int rcar_rproc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+static const struct of_device_id rcar_rproc_of_match[] = {
+ { .compatible = "renesas,rcar-cr7" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, rcar_rproc_of_match);
+
+static struct platform_driver rcar_rproc_driver = {
+ .probe = rcar_rproc_probe,
+ .remove = rcar_rproc_remove,
+ .driver = {
+ .name = "rcar-rproc",
+ .of_match_table = rcar_rproc_of_match,
+ },
+};
+
+module_platform_driver(rcar_rproc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Renesas R-Car Gen3 remote processor control driver");
+MODULE_AUTHOR("Julien Massot <julien.massot@iot.bzh>");
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 775df165eb45..69f51acf235e 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -577,8 +577,8 @@ static int rproc_handle_vdev(struct rproc *rproc, void *ptr,
dma_get_mask(rproc->dev.parent));
if (ret) {
dev_warn(dev,
- "Failed to set DMA mask %llx. Trying to continue... %x\n",
- dma_get_mask(rproc->dev.parent), ret);
+ "Failed to set DMA mask %llx. Trying to continue... (%pe)\n",
+ dma_get_mask(rproc->dev.parent), ERR_PTR(ret));
}
/* parse the vrings */
diff --git a/drivers/remoteproc/remoteproc_coredump.c b/drivers/remoteproc/remoteproc_coredump.c
index c892f433a323..4b093420d98a 100644
--- a/drivers/remoteproc/remoteproc_coredump.c
+++ b/drivers/remoteproc/remoteproc_coredump.c
@@ -166,7 +166,7 @@ static void rproc_copy_segment(struct rproc *rproc, void *dest,
memset(dest, 0xff, size);
} else {
if (is_iomem)
- memcpy_fromio(dest, ptr, size);
+ memcpy_fromio(dest, (void const __iomem *)ptr, size);
else
memcpy(dest, ptr, size);
}
diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c
index 22096adc1ad3..4ed9467897e5 100644
--- a/drivers/remoteproc/st_slim_rproc.c
+++ b/drivers/remoteproc/st_slim_rproc.c
@@ -216,7 +216,7 @@ static const struct rproc_ops slim_rproc_ops = {
* obtains and enables any clocks required by the SLIM core and also
* ioremaps the various IO.
*
- * Returns st_slim_rproc pointer or PTR_ERR() on error.
+ * Return: st_slim_rproc pointer or PTR_ERR() on error.
*/
struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev,
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index b643efcf995a..7d782ed9e589 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -494,7 +494,7 @@ static int stm32_rproc_stop(struct rproc *rproc)
int err, idx;
/* request shutdown of the remote processor */
- if (rproc->state != RPROC_OFFLINE) {
+ if (rproc->state != RPROC_OFFLINE && rproc->state != RPROC_CRASHED) {
idx = stm32_rproc_mbox_idx(rproc, STM32_MBX_SHUTDOWN);
if (idx >= 0 && ddata->mb[idx].chan) {
err = mbox_send_message(ddata->mb[idx].chan, "detach");
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
index c352fa277c8d..939c5d90b562 100644
--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -767,6 +767,7 @@ static const struct k3_dsp_dev_data c71_data = {
static const struct of_device_id k3_dsp_of_match[] = {
{ .compatible = "ti,j721e-c66-dsp", .data = &c66_data, },
{ .compatible = "ti,j721e-c71-dsp", .data = &c71_data, },
+ { .compatible = "ti,j721s2-c71-dsp", .data = &c71_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, k3_dsp_of_match);
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index 6499302d00c3..969531c05b13 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -1535,7 +1535,7 @@ static const struct k3_r5_soc_data am65_j721e_soc_data = {
.single_cpu_mode = false,
};
-static const struct k3_r5_soc_data j7200_soc_data = {
+static const struct k3_r5_soc_data j7200_j721s2_soc_data = {
.tcm_is_double = true,
.tcm_ecc_autoinit = true,
.single_cpu_mode = false,
@@ -1550,8 +1550,9 @@ static const struct k3_r5_soc_data am64_soc_data = {
static const struct of_device_id k3_r5_of_match[] = {
{ .compatible = "ti,am654-r5fss", .data = &am65_j721e_soc_data, },
{ .compatible = "ti,j721e-r5fss", .data = &am65_j721e_soc_data, },
- { .compatible = "ti,j7200-r5fss", .data = &j7200_soc_data, },
+ { .compatible = "ti,j7200-r5fss", .data = &j7200_j721s2_soc_data, },
{ .compatible = "ti,am64-r5fss", .data = &am64_soc_data, },
+ { .compatible = "ti,j721s2-r5fss", .data = &j7200_j721s2_soc_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, k3_r5_of_match);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 85024eb1d2ea..6f8ba0ddc05f 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -224,6 +224,13 @@ config RESET_SOCFPGA
This enables the reset driver for the SoCFPGA ARMv7 platforms. This
driver gets initialized early during platform init calls.
+config RESET_STARFIVE_JH7100
+ bool "StarFive JH7100 Reset Driver"
+ depends on SOC_STARFIVE || COMPILE_TEST
+ default SOC_STARFIVE
+ help
+ This enables the reset controller driver for the StarFive JH7100 SoC.
+
config RESET_SUNXI
bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI
default ARCH_SUNXI
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 21d46d8869ff..bd0a97be18b5 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_RESET_RZG2L_USBPHY_CTRL) += reset-rzg2l-usbphy-ctrl.o
obj-$(CONFIG_RESET_SCMI) += reset-scmi.o
obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o
obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o
+obj-$(CONFIG_RESET_STARFIVE_JH7100) += reset-starfive-jh7100.o
obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o
obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o
obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o
diff --git a/drivers/reset/reset-starfive-jh7100.c b/drivers/reset/reset-starfive-jh7100.c
new file mode 100644
index 000000000000..fc44b2fb3e03
--- /dev/null
+++ b/drivers/reset/reset-starfive-jh7100.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Reset driver for the StarFive JH7100 SoC
+ *
+ * Copyright (C) 2021 Emil Renner Berthing <kernel@esmil.dk>
+ */
+
+#include <linux/bitmap.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/reset/starfive-jh7100.h>
+
+/* register offsets */
+#define JH7100_RESET_ASSERT0 0x00
+#define JH7100_RESET_ASSERT1 0x04
+#define JH7100_RESET_ASSERT2 0x08
+#define JH7100_RESET_ASSERT3 0x0c
+#define JH7100_RESET_STATUS0 0x10
+#define JH7100_RESET_STATUS1 0x14
+#define JH7100_RESET_STATUS2 0x18
+#define JH7100_RESET_STATUS3 0x1c
+
+/*
+ * Writing a 1 to the n'th bit of the m'th ASSERT register asserts
+ * line 32m + n, and writing a 0 deasserts the same line.
+ * Most reset lines have their status inverted so a 0 bit in the STATUS
+ * register means the line is asserted and a 1 means it's deasserted. A few
+ * lines don't though, so store the expected value of the status registers when
+ * all lines are asserted.
+ */
+static const u64 jh7100_reset_asserted[2] = {
+ /* STATUS0 */
+ BIT_ULL_MASK(JH7100_RST_U74) |
+ BIT_ULL_MASK(JH7100_RST_VP6_DRESET) |
+ BIT_ULL_MASK(JH7100_RST_VP6_BRESET) |
+ /* STATUS1 */
+ BIT_ULL_MASK(JH7100_RST_HIFI4_DRESET) |
+ BIT_ULL_MASK(JH7100_RST_HIFI4_BRESET),
+ /* STATUS2 */
+ BIT_ULL_MASK(JH7100_RST_E24) |
+ /* STATUS3 */
+ 0,
+};
+
+struct jh7100_reset {
+ struct reset_controller_dev rcdev;
+ /* protect registers against concurrent read-modify-write */
+ spinlock_t lock;
+ void __iomem *base;
+};
+
+static inline struct jh7100_reset *
+jh7100_reset_from(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct jh7100_reset, rcdev);
+}
+
+static int jh7100_reset_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct jh7100_reset *data = jh7100_reset_from(rcdev);
+ unsigned long offset = BIT_ULL_WORD(id);
+ u64 mask = BIT_ULL_MASK(id);
+ void __iomem *reg_assert = data->base + JH7100_RESET_ASSERT0 + offset * sizeof(u64);
+ void __iomem *reg_status = data->base + JH7100_RESET_STATUS0 + offset * sizeof(u64);
+ u64 done = jh7100_reset_asserted[offset] & mask;
+ u64 value;
+ unsigned long flags;
+ int ret;
+
+ if (!assert)
+ done ^= mask;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ value = readq(reg_assert);
+ if (assert)
+ value |= mask;
+ else
+ value &= ~mask;
+ writeq(value, reg_assert);
+
+ /* if the associated clock is gated, deasserting might otherwise hang forever */
+ ret = readq_poll_timeout_atomic(reg_status, value, (value & mask) == done, 0, 1000);
+
+ spin_unlock_irqrestore(&data->lock, flags);
+ return ret;
+}
+
+static int jh7100_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return jh7100_reset_update(rcdev, id, true);
+}
+
+static int jh7100_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return jh7100_reset_update(rcdev, id, false);
+}
+
+static int jh7100_reset_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ int ret;
+
+ ret = jh7100_reset_assert(rcdev, id);
+ if (ret)
+ return ret;
+
+ return jh7100_reset_deassert(rcdev, id);
+}
+
+static int jh7100_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct jh7100_reset *data = jh7100_reset_from(rcdev);
+ unsigned long offset = BIT_ULL_WORD(id);
+ u64 mask = BIT_ULL_MASK(id);
+ void __iomem *reg_status = data->base + JH7100_RESET_STATUS0 + offset * sizeof(u64);
+ u64 value = readq(reg_status);
+
+ return !((value ^ jh7100_reset_asserted[offset]) & mask);
+}
+
+static const struct reset_control_ops jh7100_reset_ops = {
+ .assert = jh7100_reset_assert,
+ .deassert = jh7100_reset_deassert,
+ .reset = jh7100_reset_reset,
+ .status = jh7100_reset_status,
+};
+
+static int __init jh7100_reset_probe(struct platform_device *pdev)
+{
+ struct jh7100_reset *data;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
+
+ data->rcdev.ops = &jh7100_reset_ops;
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.nr_resets = JH7100_RSTN_END;
+ data->rcdev.dev = &pdev->dev;
+ data->rcdev.of_node = pdev->dev.of_node;
+ spin_lock_init(&data->lock);
+
+ return devm_reset_controller_register(&pdev->dev, &data->rcdev);
+}
+
+static const struct of_device_id jh7100_reset_dt_ids[] = {
+ { .compatible = "starfive,jh7100-reset" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver jh7100_reset_driver = {
+ .driver = {
+ .name = "jh7100-reset",
+ .of_match_table = jh7100_reset_dt_ids,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(jh7100_reset_driver, jh7100_reset_probe);
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 3f377a795b33..1030cfa80e04 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -427,7 +427,7 @@ static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink,
* Allocates a local channel id and sends a RPM_CMD_OPEN message to the remote.
* Will return with refcount held, regardless of outcome.
*
- * Returns 0 on success, negative errno otherwise.
+ * Return: 0 on success, negative errno otherwise.
*/
static int qcom_glink_send_open_req(struct qcom_glink *glink,
struct glink_channel *channel)
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index 8da1b5cb31b3..540e027f08c4 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -1467,7 +1467,7 @@ ATTRIBUTE_GROUPS(qcom_smd_edge);
* @parent: parent device for the edge
* @node: device_node describing the edge
*
- * Returns an edge reference, or negative ERR_PTR() on failure.
+ * Return: an edge reference, or negative ERR_PTR() on failure.
*/
struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
struct device_node *node)
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index b5907b80727c..5663cf799c95 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -9,6 +9,9 @@
* Based on rpmsg performance statistics driver by Michal Simek, which in turn
* was based on TI & Google OMX rpmsg driver.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/fs.h>
@@ -90,7 +93,7 @@ static int rpmsg_eptdev_destroy(struct device *dev, void *data)
/* wake up any blocked readers */
wake_up_interruptible(&eptdev->readq);
- device_del(&eptdev->dev);
+ cdev_device_del(&eptdev->cdev, &eptdev->dev);
put_device(&eptdev->dev);
return 0;
@@ -333,7 +336,6 @@ static void rpmsg_eptdev_release_device(struct device *dev)
ida_simple_remove(&rpmsg_ept_ida, dev->id);
ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt));
- cdev_del(&eptdev->cdev);
kfree(eptdev);
}
@@ -378,19 +380,13 @@ static int rpmsg_eptdev_create(struct rpmsg_ctrldev *ctrldev,
dev->id = ret;
dev_set_name(dev, "rpmsg%d", ret);
- ret = cdev_add(&eptdev->cdev, dev->devt, 1);
+ ret = cdev_device_add(&eptdev->cdev, &eptdev->dev);
if (ret)
goto free_ept_ida;
/* We can now rely on the release function for cleanup */
dev->release = rpmsg_eptdev_release_device;
- ret = device_add(dev);
- if (ret) {
- dev_err(dev, "device_add failed: %d\n", ret);
- put_device(dev);
- }
-
return ret;
free_ept_ida:
@@ -459,7 +455,6 @@ static void rpmsg_ctrldev_release_device(struct device *dev)
ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
- cdev_del(&ctrldev->cdev);
kfree(ctrldev);
}
@@ -494,19 +489,13 @@ static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev)
dev->id = ret;
dev_set_name(&ctrldev->dev, "rpmsg_ctrl%d", ret);
- ret = cdev_add(&ctrldev->cdev, dev->devt, 1);
+ ret = cdev_device_add(&ctrldev->cdev, &ctrldev->dev);
if (ret)
goto free_ctrl_ida;
/* We can now rely on the release function for cleanup */
dev->release = rpmsg_ctrldev_release_device;
- ret = device_add(dev);
- if (ret) {
- dev_err(&rpdev->dev, "device_add failed: %d\n", ret);
- put_device(dev);
- }
-
dev_set_drvdata(&rpdev->dev, ctrldev);
return ret;
@@ -532,7 +521,7 @@ static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev)
if (ret)
dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret);
- device_del(&ctrldev->dev);
+ cdev_device_del(&ctrldev->cdev, &ctrldev->dev);
put_device(&ctrldev->dev);
}
@@ -550,7 +539,7 @@ static int rpmsg_chrdev_init(void)
ret = alloc_chrdev_region(&rpmsg_major, 0, RPMSG_DEV_MAX, "rpmsg");
if (ret < 0) {
- pr_err("rpmsg: failed to allocate char dev region\n");
+ pr_err("failed to allocate char dev region\n");
return ret;
}
@@ -563,7 +552,7 @@ static int rpmsg_chrdev_init(void)
ret = register_rpmsg_driver(&rpmsg_chrdev_driver);
if (ret < 0) {
- pr_err("rpmsgchr: failed to register rpmsg driver\n");
+ pr_err("failed to register rpmsg driver\n");
class_destroy(rpmsg_class);
unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
}
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index d3eb60059ef1..d9e612f4f0f2 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -26,7 +26,7 @@
* @rpdev: rpmsg device
* @chinfo: channel_info to bind
*
- * Returns a pointer to the new rpmsg device on success, or NULL on error.
+ * Return: a pointer to the new rpmsg device on success, or NULL on error.
*/
struct rpmsg_device *rpmsg_create_channel(struct rpmsg_device *rpdev,
struct rpmsg_channel_info *chinfo)
@@ -48,7 +48,7 @@ EXPORT_SYMBOL(rpmsg_create_channel);
* @rpdev: rpmsg device
* @chinfo: channel_info to bind
*
- * Returns 0 on success or an appropriate error value.
+ * Return: 0 on success or an appropriate error value.
*/
int rpmsg_release_channel(struct rpmsg_device *rpdev,
struct rpmsg_channel_info *chinfo)
@@ -102,7 +102,7 @@ EXPORT_SYMBOL(rpmsg_release_channel);
* dynamically assign them an available rpmsg address (drivers should have
* a very good reason why not to always use RPMSG_ADDR_ANY here).
*
- * Returns a pointer to the endpoint on success, or NULL on error.
+ * Return: a pointer to the endpoint on success, or NULL on error.
*/
struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev,
rpmsg_rx_cb_t cb, void *priv,
@@ -146,7 +146,7 @@ EXPORT_SYMBOL(rpmsg_destroy_ept);
*
* Can only be called from process context (for now).
*
- * Returns 0 on success and an appropriate error value on failure.
+ * Return: 0 on success and an appropriate error value on failure.
*/
int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len)
{
@@ -175,7 +175,7 @@ EXPORT_SYMBOL(rpmsg_send);
*
* Can only be called from process context (for now).
*
- * Returns 0 on success and an appropriate error value on failure.
+ * Return: 0 on success and an appropriate error value on failure.
*/
int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
{
@@ -206,7 +206,7 @@ EXPORT_SYMBOL(rpmsg_sendto);
*
* Can only be called from process context (for now).
*
- * Returns 0 on success and an appropriate error value on failure.
+ * Return: 0 on success and an appropriate error value on failure.
*/
int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
void *data, int len)
@@ -235,7 +235,7 @@ EXPORT_SYMBOL(rpmsg_send_offchannel);
*
* Can only be called from process context (for now).
*
- * Returns 0 on success and an appropriate error value on failure.
+ * Return: 0 on success and an appropriate error value on failure.
*/
int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
{
@@ -263,7 +263,7 @@ EXPORT_SYMBOL(rpmsg_trysend);
*
* Can only be called from process context (for now).
*
- * Returns 0 on success and an appropriate error value on failure.
+ * Return: 0 on success and an appropriate error value on failure.
*/
int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
{
@@ -282,7 +282,7 @@ EXPORT_SYMBOL(rpmsg_trysendto);
* @filp: file for poll_wait()
* @wait: poll_table for poll_wait()
*
- * Returns mask representing the current state of the endpoint's send buffers
+ * Return: mask representing the current state of the endpoint's send buffers
*/
__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait)
@@ -313,7 +313,7 @@ EXPORT_SYMBOL(rpmsg_poll);
*
* Can only be called from process context (for now).
*
- * Returns 0 on success and an appropriate error value on failure.
+ * Return: 0 on success and an appropriate error value on failure.
*/
int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
void *data, int len)
@@ -540,13 +540,25 @@ static int rpmsg_dev_probe(struct device *dev)
err = rpdrv->probe(rpdev);
if (err) {
dev_err(dev, "%s: failed: %d\n", __func__, err);
- if (ept)
- rpmsg_destroy_ept(ept);
- goto out;
+ goto destroy_ept;
}
- if (ept && rpdev->ops->announce_create)
+ if (ept && rpdev->ops->announce_create) {
err = rpdev->ops->announce_create(rpdev);
+ if (err) {
+ dev_err(dev, "failed to announce creation\n");
+ goto remove_rpdev;
+ }
+ }
+
+ return 0;
+
+remove_rpdev:
+ if (rpdrv->remove)
+ rpdrv->remove(rpdev);
+destroy_ept:
+ if (ept)
+ rpmsg_destroy_ept(ept);
out:
return err;
}
@@ -623,7 +635,7 @@ EXPORT_SYMBOL(rpmsg_unregister_device);
* @rpdrv: pointer to a struct rpmsg_driver
* @owner: owning module/driver
*
- * Returns 0 on success, and an appropriate error value on failure.
+ * Return: 0 on success, and an appropriate error value on failure.
*/
int __register_rpmsg_driver(struct rpmsg_driver *rpdrv, struct module *owner)
{
@@ -637,7 +649,7 @@ EXPORT_SYMBOL(__register_rpmsg_driver);
* unregister_rpmsg_driver() - unregister an rpmsg driver from the rpmsg bus
* @rpdrv: pointer to a struct rpmsg_driver
*
- * Returns 0 on success, and an appropriate error value on failure.
+ * Return: 0 on success, and an appropriate error value on failure.
*/
void unregister_rpmsg_driver(struct rpmsg_driver *rpdrv)
{
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 9c112aa65040..ac764e04c898 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -547,7 +547,7 @@ static void rpmsg_downref_sleepers(struct virtproc_info *vrp)
* should use the appropriate rpmsg_{try}send{to, _offchannel} API
* (see include/linux/rpmsg.h).
*
- * Returns 0 on success and an appropriate error value on failure.
+ * Return: 0 on success and an appropriate error value on failure.
*/
static int rpmsg_send_offchannel_raw(struct rpmsg_device *rpdev,
u32 src, u32 dst,
@@ -1024,7 +1024,7 @@ static void rpmsg_remove(struct virtio_device *vdev)
size_t total_buf_space = vrp->num_bufs * vrp->buf_size;
int ret;
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
ret = device_for_each_child(&vdev->dev, NULL, rpmsg_remove_device);
if (ret)
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 058e56a10ab8..d85a3c31347c 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1216,6 +1216,17 @@ config RTC_DRV_V3020
This driver can also be built as a module. If so, the module
will be called rtc-v3020.
+config RTC_DRV_GAMECUBE
+ tristate "Nintendo GameCube, Wii and Wii U RTC"
+ depends on GAMECUBE || WII || COMPILE_TEST
+ select REGMAP
+ help
+ If you say yes here you will get support for the RTC subsystem
+ of the Nintendo GameCube, Wii and Wii U.
+
+ This driver can also be built as a module. If so, the module
+ will be called "rtc-gamecube".
+
config RTC_DRV_WM831X
tristate "Wolfson Microelectronics WM831x RTC"
depends on MFD_WM831X
@@ -1444,6 +1455,19 @@ config RTC_DRV_SH
To compile this driver as a module, choose M here: the
module will be called rtc-sh.
+config RTC_DRV_SUNPLUS
+ tristate "Sunplus SP7021 RTC"
+ depends on SOC_SP7021
+ help
+ Say 'yes' to get support for the real-time clock present in
+ Sunplus SP7021 - a SoC for industrial applications. It provides
+ RTC status check, timer/alarm functionalities, user data
+ reservation with the battery over 2.5V, RTC power status check
+ and battery charge.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-sunplus.
+
config RTC_DRV_VR41XX
tristate "NEC VR41XX"
depends on CPU_VR41XX || COMPILE_TEST
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 678a8ef4abae..e92f3e943245 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -111,6 +111,7 @@ obj-$(CONFIG_RTC_DRV_MT7622) += rtc-mt7622.o
obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
obj-$(CONFIG_RTC_DRV_MXC) += rtc-mxc.o
obj-$(CONFIG_RTC_DRV_MXC_V2) += rtc-mxc_v2.o
+obj-$(CONFIG_RTC_DRV_GAMECUBE) += rtc-gamecube.o
obj-$(CONFIG_RTC_DRV_NTXEC) += rtc-ntxec.o
obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
obj-$(CONFIG_RTC_DRV_OPAL) += rtc-opal.o
@@ -165,6 +166,7 @@ obj-$(CONFIG_RTC_DRV_STM32) += rtc-stm32.o
obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o
obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o
obj-$(CONFIG_RTC_DRV_SUN6I) += rtc-sun6i.o
+obj-$(CONFIG_RTC_DRV_SUNPLUS) += rtc-sunplus.o
obj-$(CONFIG_RTC_DRV_SUNXI) += rtc-sunxi.o
obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
diff --git a/drivers/rtc/dev.c b/drivers/rtc/dev.c
index e104972a28fd..69325aeede1a 100644
--- a/drivers/rtc/dev.c
+++ b/drivers/rtc/dev.c
@@ -391,14 +391,14 @@ static long rtc_dev_ioctl(struct file *file,
}
switch(param.param) {
- long offset;
case RTC_PARAM_FEATURES:
if (param.index != 0)
err = -EINVAL;
param.uvalue = rtc->features[0];
break;
- case RTC_PARAM_CORRECTION:
+ case RTC_PARAM_CORRECTION: {
+ long offset;
mutex_unlock(&rtc->ops_lock);
if (param.index != 0)
return -EINVAL;
@@ -407,7 +407,7 @@ static long rtc_dev_ioctl(struct file *file,
if (err == 0)
param.svalue = offset;
break;
-
+ }
default:
if (rtc->ops->param_get)
err = rtc->ops->param_get(rtc->dev.parent, &param);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 4eb53412b808..7c006c2b125f 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -222,6 +222,8 @@ static inline void cmos_write_bank2(unsigned char val, unsigned char addr)
static int cmos_read_time(struct device *dev, struct rtc_time *t)
{
+ int ret;
+
/*
* If pm_trace abused the RTC for storage, set the timespec to 0,
* which tells the caller that this RTC value is unusable.
@@ -229,7 +231,12 @@ static int cmos_read_time(struct device *dev, struct rtc_time *t)
if (!pm_trace_rtc_valid())
return -EIO;
- mc146818_get_time(t);
+ ret = mc146818_get_time(t);
+ if (ret < 0) {
+ dev_err_ratelimited(dev, "unable to read current time\n");
+ return ret;
+ }
+
return 0;
}
@@ -242,10 +249,46 @@ static int cmos_set_time(struct device *dev, struct rtc_time *t)
return mc146818_set_time(t);
}
+struct cmos_read_alarm_callback_param {
+ struct cmos_rtc *cmos;
+ struct rtc_time *time;
+ unsigned char rtc_control;
+};
+
+static void cmos_read_alarm_callback(unsigned char __always_unused seconds,
+ void *param_in)
+{
+ struct cmos_read_alarm_callback_param *p =
+ (struct cmos_read_alarm_callback_param *)param_in;
+ struct rtc_time *time = p->time;
+
+ time->tm_sec = CMOS_READ(RTC_SECONDS_ALARM);
+ time->tm_min = CMOS_READ(RTC_MINUTES_ALARM);
+ time->tm_hour = CMOS_READ(RTC_HOURS_ALARM);
+
+ if (p->cmos->day_alrm) {
+ /* ignore upper bits on readback per ACPI spec */
+ time->tm_mday = CMOS_READ(p->cmos->day_alrm) & 0x3f;
+ if (!time->tm_mday)
+ time->tm_mday = -1;
+
+ if (p->cmos->mon_alrm) {
+ time->tm_mon = CMOS_READ(p->cmos->mon_alrm);
+ if (!time->tm_mon)
+ time->tm_mon = -1;
+ }
+ }
+
+ p->rtc_control = CMOS_READ(RTC_CONTROL);
+}
+
static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
- unsigned char rtc_control;
+ struct cmos_read_alarm_callback_param p = {
+ .cmos = cmos,
+ .time = &t->time,
+ };
/* This not only a rtc_op, but also called directly */
if (!is_valid_irq(cmos->irq))
@@ -256,28 +299,18 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
* the future.
*/
- spin_lock_irq(&rtc_lock);
- t->time.tm_sec = CMOS_READ(RTC_SECONDS_ALARM);
- t->time.tm_min = CMOS_READ(RTC_MINUTES_ALARM);
- t->time.tm_hour = CMOS_READ(RTC_HOURS_ALARM);
-
- if (cmos->day_alrm) {
- /* ignore upper bits on readback per ACPI spec */
- t->time.tm_mday = CMOS_READ(cmos->day_alrm) & 0x3f;
- if (!t->time.tm_mday)
- t->time.tm_mday = -1;
-
- if (cmos->mon_alrm) {
- t->time.tm_mon = CMOS_READ(cmos->mon_alrm);
- if (!t->time.tm_mon)
- t->time.tm_mon = -1;
- }
- }
-
- rtc_control = CMOS_READ(RTC_CONTROL);
- spin_unlock_irq(&rtc_lock);
+ /* Some Intel chipsets disconnect the alarm registers when the clock
+ * update is in progress - during this time reads return bogus values
+ * and writes may fail silently. See for example "7th Generation Intel®
+ * Processor Family I/O for U/Y Platforms [...] Datasheet", section
+ * 27.7.1
+ *
+ * Use the mc146818_avoid_UIP() function to avoid this.
+ */
+ if (!mc146818_avoid_UIP(cmos_read_alarm_callback, &p))
+ return -EIO;
- if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ if (!(p.rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
if (((unsigned)t->time.tm_sec) < 0x60)
t->time.tm_sec = bcd2bin(t->time.tm_sec);
else
@@ -306,7 +339,7 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
}
}
- t->enabled = !!(rtc_control & RTC_AIE);
+ t->enabled = !!(p.rtc_control & RTC_AIE);
t->pending = 0;
return 0;
@@ -437,10 +470,57 @@ static int cmos_validate_alarm(struct device *dev, struct rtc_wkalrm *t)
return 0;
}
+struct cmos_set_alarm_callback_param {
+ struct cmos_rtc *cmos;
+ unsigned char mon, mday, hrs, min, sec;
+ struct rtc_wkalrm *t;
+};
+
+/* Note: this function may be executed by mc146818_avoid_UIP() more then
+ * once
+ */
+static void cmos_set_alarm_callback(unsigned char __always_unused seconds,
+ void *param_in)
+{
+ struct cmos_set_alarm_callback_param *p =
+ (struct cmos_set_alarm_callback_param *)param_in;
+
+ /* next rtc irq must not be from previous alarm setting */
+ cmos_irq_disable(p->cmos, RTC_AIE);
+
+ /* update alarm */
+ CMOS_WRITE(p->hrs, RTC_HOURS_ALARM);
+ CMOS_WRITE(p->min, RTC_MINUTES_ALARM);
+ CMOS_WRITE(p->sec, RTC_SECONDS_ALARM);
+
+ /* the system may support an "enhanced" alarm */
+ if (p->cmos->day_alrm) {
+ CMOS_WRITE(p->mday, p->cmos->day_alrm);
+ if (p->cmos->mon_alrm)
+ CMOS_WRITE(p->mon, p->cmos->mon_alrm);
+ }
+
+ if (use_hpet_alarm()) {
+ /*
+ * FIXME the HPET alarm glue currently ignores day_alrm
+ * and mon_alrm ...
+ */
+ hpet_set_alarm_time(p->t->time.tm_hour, p->t->time.tm_min,
+ p->t->time.tm_sec);
+ }
+
+ if (p->t->enabled)
+ cmos_irq_enable(p->cmos, RTC_AIE);
+}
+
static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
- unsigned char mon, mday, hrs, min, sec, rtc_control;
+ struct cmos_set_alarm_callback_param p = {
+ .cmos = cmos,
+ .t = t
+ };
+ unsigned char rtc_control;
int ret;
/* This not only a rtc_op, but also called directly */
@@ -451,52 +531,33 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
if (ret < 0)
return ret;
- mon = t->time.tm_mon + 1;
- mday = t->time.tm_mday;
- hrs = t->time.tm_hour;
- min = t->time.tm_min;
- sec = t->time.tm_sec;
+ p.mon = t->time.tm_mon + 1;
+ p.mday = t->time.tm_mday;
+ p.hrs = t->time.tm_hour;
+ p.min = t->time.tm_min;
+ p.sec = t->time.tm_sec;
+ spin_lock_irq(&rtc_lock);
rtc_control = CMOS_READ(RTC_CONTROL);
+ spin_unlock_irq(&rtc_lock);
+
if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
/* Writing 0xff means "don't care" or "match all". */
- mon = (mon <= 12) ? bin2bcd(mon) : 0xff;
- mday = (mday >= 1 && mday <= 31) ? bin2bcd(mday) : 0xff;
- hrs = (hrs < 24) ? bin2bcd(hrs) : 0xff;
- min = (min < 60) ? bin2bcd(min) : 0xff;
- sec = (sec < 60) ? bin2bcd(sec) : 0xff;
- }
-
- spin_lock_irq(&rtc_lock);
-
- /* next rtc irq must not be from previous alarm setting */
- cmos_irq_disable(cmos, RTC_AIE);
-
- /* update alarm */
- CMOS_WRITE(hrs, RTC_HOURS_ALARM);
- CMOS_WRITE(min, RTC_MINUTES_ALARM);
- CMOS_WRITE(sec, RTC_SECONDS_ALARM);
-
- /* the system may support an "enhanced" alarm */
- if (cmos->day_alrm) {
- CMOS_WRITE(mday, cmos->day_alrm);
- if (cmos->mon_alrm)
- CMOS_WRITE(mon, cmos->mon_alrm);
+ p.mon = (p.mon <= 12) ? bin2bcd(p.mon) : 0xff;
+ p.mday = (p.mday >= 1 && p.mday <= 31) ? bin2bcd(p.mday) : 0xff;
+ p.hrs = (p.hrs < 24) ? bin2bcd(p.hrs) : 0xff;
+ p.min = (p.min < 60) ? bin2bcd(p.min) : 0xff;
+ p.sec = (p.sec < 60) ? bin2bcd(p.sec) : 0xff;
}
- if (use_hpet_alarm()) {
- /*
- * FIXME the HPET alarm glue currently ignores day_alrm
- * and mon_alrm ...
- */
- hpet_set_alarm_time(t->time.tm_hour, t->time.tm_min,
- t->time.tm_sec);
- }
-
- if (t->enabled)
- cmos_irq_enable(cmos, RTC_AIE);
-
- spin_unlock_irq(&rtc_lock);
+ /*
+ * Some Intel chipsets disconnect the alarm registers when the clock
+ * update is in progress - during this time writes fail silently.
+ *
+ * Use mc146818_avoid_UIP() to avoid this.
+ */
+ if (!mc146818_avoid_UIP(cmos_set_alarm_callback, &p))
+ return -EIO;
cmos->alarm_expires = rtc_tm_to_time64(&t->time);
@@ -790,16 +851,14 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
rename_region(ports, dev_name(&cmos_rtc.rtc->dev));
- spin_lock_irq(&rtc_lock);
-
- /* Ensure that the RTC is accessible. Bit 6 must be 0! */
- if ((CMOS_READ(RTC_VALID) & 0x40) != 0) {
- spin_unlock_irq(&rtc_lock);
- dev_warn(dev, "not accessible\n");
+ if (!mc146818_does_rtc_work()) {
+ dev_warn(dev, "broken or not accessible\n");
retval = -ENXIO;
goto cleanup1;
}
+ spin_lock_irq(&rtc_lock);
+
if (!(flags & CMOS_RTC_FLAGS_NOFREQ)) {
/* force periodic irq to CMOS reset default of 1024Hz;
*
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index d4b72a9fa2ba..ee2efb496174 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -475,12 +475,14 @@ static int da9063_rtc_probe(struct platform_device *pdev)
da9063_data_to_tm(data, &rtc->alarm_time, rtc);
rtc->rtc_sync = false;
- /*
- * TODO: some models have alarms on a minute boundary but still support
- * real hardware interrupts. Add this once the core supports it.
- */
- if (config->rtc_data_start != RTC_SEC)
- rtc->rtc_dev->uie_unsupported = 1;
+ if (config->rtc_data_start != RTC_SEC) {
+ set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->rtc_dev->features);
+ /*
+ * TODO: some models have alarms on a minute boundary but still
+ * support real hardware interrupts.
+ */
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtc_dev->features);
+ }
irq_alarm = platform_get_irq_byname(pdev, "ALARM");
if (irq_alarm < 0)
@@ -494,6 +496,8 @@ static int da9063_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n",
irq_alarm, ret);
+ device_init_wakeup(&pdev->dev, true);
+
return devm_rtc_register_device(rtc->rtc_dev);
}
diff --git a/drivers/rtc/rtc-ftrtc010.c b/drivers/rtc/rtc-ftrtc010.c
index ad3add5db4c8..53bb08fe1cd4 100644
--- a/drivers/rtc/rtc-ftrtc010.c
+++ b/drivers/rtc/rtc-ftrtc010.c
@@ -141,11 +141,9 @@ static int ftrtc010_rtc_probe(struct platform_device *pdev)
}
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res)
- return -ENODEV;
-
- rtc->rtc_irq = res->start;
+ rtc->rtc_irq = platform_get_irq(pdev, 0);
+ if (rtc->rtc_irq < 0)
+ return rtc->rtc_irq;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
diff --git a/drivers/rtc/rtc-gamecube.c b/drivers/rtc/rtc-gamecube.c
new file mode 100644
index 000000000000..f717b36f4738
--- /dev/null
+++ b/drivers/rtc/rtc-gamecube.c
@@ -0,0 +1,377 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nintendo GameCube, Wii and Wii U RTC driver
+ *
+ * This driver is for the MX23L4005, more specifically its real-time clock and
+ * SRAM storage. The value returned by the RTC counter must be added with the
+ * offset stored in a bias register in SRAM (on the GameCube and Wii) or in
+ * /config/rtc.xml (on the Wii U). The latter being very impractical to access
+ * from Linux, this driver assumes the bootloader has read it and stored it in
+ * SRAM like for the other two consoles.
+ *
+ * This device sits on a bus named EXI (which is similar to SPI), channel 0,
+ * device 1. This driver assumes no other user of the EXI bus, which is
+ * currently the case but would have to be reworked to add support for other
+ * GameCube hardware exposed on this bus.
+ *
+ * References:
+ * - https://wiiubrew.org/wiki/Hardware/RTC
+ * - https://wiibrew.org/wiki/MX23L4005
+ *
+ * Copyright (C) 2018 rw-r-r-0644
+ * Copyright (C) 2021 Emmanuel Gil Peyrot <linkmauve@linkmauve.fr>
+ *
+ * Based on rtc-gcn.c
+ * Copyright (C) 2004-2009 The GameCube Linux Team
+ * Copyright (C) 2005,2008,2009 Albert Herranz
+ * Based on gamecube_time.c from Torben Nielsen.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+#include <linux/time.h>
+
+/* EXI registers */
+#define EXICSR 0
+#define EXICR 12
+#define EXIDATA 16
+
+/* EXI register values */
+#define EXICSR_DEV 0x380
+ #define EXICSR_DEV1 0x100
+#define EXICSR_CLK 0x070
+ #define EXICSR_CLK_1MHZ 0x000
+ #define EXICSR_CLK_2MHZ 0x010
+ #define EXICSR_CLK_4MHZ 0x020
+ #define EXICSR_CLK_8MHZ 0x030
+ #define EXICSR_CLK_16MHZ 0x040
+ #define EXICSR_CLK_32MHZ 0x050
+#define EXICSR_INT 0x008
+ #define EXICSR_INTSET 0x008
+
+#define EXICR_TSTART 0x001
+#define EXICR_TRSMODE 0x002
+ #define EXICR_TRSMODE_IMM 0x000
+#define EXICR_TRSTYPE 0x00C
+ #define EXICR_TRSTYPE_R 0x000
+ #define EXICR_TRSTYPE_W 0x004
+#define EXICR_TLEN 0x030
+ #define EXICR_TLEN32 0x030
+
+/* EXI registers values to access the RTC */
+#define RTC_EXICSR (EXICSR_DEV1 | EXICSR_CLK_8MHZ | EXICSR_INTSET)
+#define RTC_EXICR_W (EXICR_TSTART | EXICR_TRSMODE_IMM | EXICR_TRSTYPE_W | EXICR_TLEN32)
+#define RTC_EXICR_R (EXICR_TSTART | EXICR_TRSMODE_IMM | EXICR_TRSTYPE_R | EXICR_TLEN32)
+#define RTC_EXIDATA_W 0x80000000
+
+/* RTC registers */
+#define RTC_COUNTER 0x200000
+#define RTC_SRAM 0x200001
+#define RTC_SRAM_BIAS 0x200004
+#define RTC_SNAPSHOT 0x204000
+#define RTC_ONTMR 0x210000
+#define RTC_OFFTMR 0x210001
+#define RTC_TEST0 0x210004
+#define RTC_TEST1 0x210005
+#define RTC_TEST2 0x210006
+#define RTC_TEST3 0x210007
+#define RTC_CONTROL0 0x21000c
+#define RTC_CONTROL1 0x21000d
+
+/* RTC flags */
+#define RTC_CONTROL0_UNSTABLE_POWER 0x00000800
+#define RTC_CONTROL0_LOW_BATTERY 0x00000200
+
+struct priv {
+ struct regmap *regmap;
+ void __iomem *iob;
+ u32 rtc_bias;
+};
+
+static int exi_read(void *context, u32 reg, u32 *data)
+{
+ struct priv *d = (struct priv *)context;
+ void __iomem *iob = d->iob;
+
+ /* The spin loops here loop about 15~16 times each, so there is no need
+ * to use a more expensive sleep method.
+ */
+
+ /* Write register offset */
+ iowrite32be(RTC_EXICSR, iob + EXICSR);
+ iowrite32be(reg << 8, iob + EXIDATA);
+ iowrite32be(RTC_EXICR_W, iob + EXICR);
+ while (!(ioread32be(iob + EXICSR) & EXICSR_INTSET))
+ cpu_relax();
+
+ /* Read data */
+ iowrite32be(RTC_EXICSR, iob + EXICSR);
+ iowrite32be(RTC_EXICR_R, iob + EXICR);
+ while (!(ioread32be(iob + EXICSR) & EXICSR_INTSET))
+ cpu_relax();
+ *data = ioread32be(iob + EXIDATA);
+
+ /* Clear channel parameters */
+ iowrite32be(0, iob + EXICSR);
+
+ return 0;
+}
+
+static int exi_write(void *context, u32 reg, u32 data)
+{
+ struct priv *d = (struct priv *)context;
+ void __iomem *iob = d->iob;
+
+ /* The spin loops here loop about 15~16 times each, so there is no need
+ * to use a more expensive sleep method.
+ */
+
+ /* Write register offset */
+ iowrite32be(RTC_EXICSR, iob + EXICSR);
+ iowrite32be(RTC_EXIDATA_W | (reg << 8), iob + EXIDATA);
+ iowrite32be(RTC_EXICR_W, iob + EXICR);
+ while (!(ioread32be(iob + EXICSR) & EXICSR_INTSET))
+ cpu_relax();
+
+ /* Write data */
+ iowrite32be(RTC_EXICSR, iob + EXICSR);
+ iowrite32be(data, iob + EXIDATA);
+ iowrite32be(RTC_EXICR_W, iob + EXICR);
+ while (!(ioread32be(iob + EXICSR) & EXICSR_INTSET))
+ cpu_relax();
+
+ /* Clear channel parameters */
+ iowrite32be(0, iob + EXICSR);
+
+ return 0;
+}
+
+static const struct regmap_bus exi_bus = {
+ /* TODO: is that true? Not that it matters here, but still. */
+ .fast_io = true,
+ .reg_read = exi_read,
+ .reg_write = exi_write,
+};
+
+static int gamecube_rtc_read_time(struct device *dev, struct rtc_time *t)
+{
+ struct priv *d = dev_get_drvdata(dev);
+ int ret;
+ u32 counter;
+ time64_t timestamp;
+
+ ret = regmap_read(d->regmap, RTC_COUNTER, &counter);
+ if (ret)
+ return ret;
+
+ /* Add the counter and the bias to obtain the timestamp */
+ timestamp = (time64_t)d->rtc_bias + counter;
+ rtc_time64_to_tm(timestamp, t);
+
+ return 0;
+}
+
+static int gamecube_rtc_set_time(struct device *dev, struct rtc_time *t)
+{
+ struct priv *d = dev_get_drvdata(dev);
+ time64_t timestamp;
+
+ /* Subtract the timestamp and the bias to obtain the counter value */
+ timestamp = rtc_tm_to_time64(t);
+ return regmap_write(d->regmap, RTC_COUNTER, timestamp - d->rtc_bias);
+}
+
+static int gamecube_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct priv *d = dev_get_drvdata(dev);
+ int value;
+ int control0;
+ int ret;
+
+ switch (cmd) {
+ case RTC_VL_READ:
+ ret = regmap_read(d->regmap, RTC_CONTROL0, &control0);
+ if (ret)
+ return ret;
+
+ value = 0;
+ if (control0 & RTC_CONTROL0_UNSTABLE_POWER)
+ value |= RTC_VL_DATA_INVALID;
+ if (control0 & RTC_CONTROL0_LOW_BATTERY)
+ value |= RTC_VL_BACKUP_LOW;
+ return put_user(value, (unsigned int __user *)arg);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static const struct rtc_class_ops gamecube_rtc_ops = {
+ .read_time = gamecube_rtc_read_time,
+ .set_time = gamecube_rtc_set_time,
+ .ioctl = gamecube_rtc_ioctl,
+};
+
+static int gamecube_rtc_read_offset_from_sram(struct priv *d)
+{
+ struct device_node *np;
+ int ret;
+ struct resource res;
+ void __iomem *hw_srnprot;
+ u32 old;
+
+ np = of_find_compatible_node(NULL, NULL, "nintendo,latte-srnprot");
+ if (!np)
+ np = of_find_compatible_node(NULL, NULL,
+ "nintendo,hollywood-srnprot");
+ if (!np) {
+ pr_info("HW_SRNPROT not found, assuming a GameCube\n");
+ return regmap_read(d->regmap, RTC_SRAM_BIAS, &d->rtc_bias);
+ }
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret) {
+ pr_err("no io memory range found\n");
+ return -1;
+ }
+
+ hw_srnprot = ioremap(res.start, resource_size(&res));
+ old = ioread32be(hw_srnprot);
+
+ /* TODO: figure out why we use this magic constant. I obtained it by
+ * reading the leftover value after boot, after IOSU already ran.
+ *
+ * On my Wii U, setting this register to 1 prevents the console from
+ * rebooting properly, so wiiubrew.org must be missing something.
+ *
+ * See https://wiiubrew.org/wiki/Hardware/Latte_registers
+ */
+ if (old != 0x7bf)
+ iowrite32be(0x7bf, hw_srnprot);
+
+ /* Get the offset from RTC SRAM.
+ *
+ * Its default location on the GameCube and on the Wii is in the SRAM,
+ * while on the Wii U the bootloader needs to fill it with the contents
+ * of /config/rtc.xml on the SLC (the eMMC). We don’t do that from
+ * Linux since it requires implementing a proprietary filesystem and do
+ * file decryption, instead we require the bootloader to fill the same
+ * SRAM address as on previous consoles.
+ */
+ ret = regmap_read(d->regmap, RTC_SRAM_BIAS, &d->rtc_bias);
+ if (ret) {
+ pr_err("failed to get the RTC bias\n");
+ return -1;
+ }
+
+ /* Reset SRAM access to how it was before, our job here is done. */
+ if (old != 0x7bf)
+ iowrite32be(old, hw_srnprot);
+ iounmap(hw_srnprot);
+
+ return 0;
+}
+
+static const struct regmap_range rtc_rd_ranges[] = {
+ regmap_reg_range(0x200000, 0x200010),
+ regmap_reg_range(0x204000, 0x204000),
+ regmap_reg_range(0x210000, 0x210001),
+ regmap_reg_range(0x210004, 0x210007),
+ regmap_reg_range(0x21000c, 0x21000d),
+};
+
+static const struct regmap_access_table rtc_rd_regs = {
+ .yes_ranges = rtc_rd_ranges,
+ .n_yes_ranges = ARRAY_SIZE(rtc_rd_ranges),
+};
+
+static const struct regmap_range rtc_wr_ranges[] = {
+ regmap_reg_range(0x200000, 0x200010),
+ regmap_reg_range(0x204000, 0x204000),
+ regmap_reg_range(0x210000, 0x210001),
+ regmap_reg_range(0x21000d, 0x21000d),
+};
+
+static const struct regmap_access_table rtc_wr_regs = {
+ .yes_ranges = rtc_wr_ranges,
+ .n_yes_ranges = ARRAY_SIZE(rtc_wr_ranges),
+};
+
+static const struct regmap_config gamecube_rtc_regmap_config = {
+ .reg_bits = 24,
+ .val_bits = 32,
+ .rd_table = &rtc_rd_regs,
+ .wr_table = &rtc_wr_regs,
+ .max_register = 0x21000d,
+ .name = "gamecube-rtc",
+};
+
+static int gamecube_rtc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rtc_device *rtc;
+ struct priv *d;
+ int ret;
+
+ d = devm_kzalloc(dev, sizeof(struct priv), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ d->iob = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(d->iob))
+ return PTR_ERR(d->iob);
+
+ d->regmap = devm_regmap_init(dev, &exi_bus, d,
+ &gamecube_rtc_regmap_config);
+ if (IS_ERR(d->regmap))
+ return PTR_ERR(d->regmap);
+
+ ret = gamecube_rtc_read_offset_from_sram(d);
+ if (ret)
+ return ret;
+ dev_dbg(dev, "SRAM bias: 0x%x", d->rtc_bias);
+
+ dev_set_drvdata(dev, d);
+
+ rtc = devm_rtc_allocate_device(dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ /* We can represent further than that, but it depends on the stored
+ * bias and we can’t modify it persistently on all supported consoles,
+ * so here we pretend to be limited to 2106.
+ */
+ rtc->range_min = 0;
+ rtc->range_max = U32_MAX;
+ rtc->ops = &gamecube_rtc_ops;
+
+ devm_rtc_register_device(rtc);
+
+ return 0;
+}
+
+static const struct of_device_id gamecube_rtc_of_match[] = {
+ {.compatible = "nintendo,latte-exi" },
+ {.compatible = "nintendo,hollywood-exi" },
+ {.compatible = "nintendo,flipper-exi" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gamecube_rtc_of_match);
+
+static struct platform_driver gamecube_rtc_driver = {
+ .probe = gamecube_rtc_probe,
+ .driver = {
+ .name = "rtc-gamecube",
+ .of_match_table = gamecube_rtc_of_match,
+ },
+};
+module_platform_driver(gamecube_rtc_driver);
+
+MODULE_AUTHOR("Emmanuel Gil Peyrot <linkmauve@linkmauve.fr>");
+MODULE_DESCRIPTION("Nintendo GameCube, Wii and Wii U RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
index dcfaf09946ee..ae9f131b43c0 100644
--- a/drivers/rtc/rtc-mc146818-lib.c
+++ b/drivers/rtc/rtc-mc146818-lib.c
@@ -8,48 +8,100 @@
#include <linux/acpi.h>
#endif
-unsigned int mc146818_get_time(struct rtc_time *time)
+/*
+ * Execute a function while the UIP (Update-in-progress) bit of the RTC is
+ * unset.
+ *
+ * Warning: callback may be executed more then once.
+ */
+bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
+ void *param)
{
- unsigned char ctrl;
+ int i;
unsigned long flags;
- unsigned char century = 0;
- bool retry;
+ unsigned char seconds;
-#ifdef CONFIG_MACH_DECSTATION
- unsigned int real_year;
-#endif
+ for (i = 0; i < 10; i++) {
+ spin_lock_irqsave(&rtc_lock, flags);
-again:
- spin_lock_irqsave(&rtc_lock, flags);
- /* Ensure that the RTC is accessible. Bit 6 must be 0! */
- if (WARN_ON_ONCE((CMOS_READ(RTC_VALID) & 0x40) != 0)) {
- spin_unlock_irqrestore(&rtc_lock, flags);
- memset(time, 0xff, sizeof(*time));
- return 0;
- }
+ /*
+ * Check whether there is an update in progress during which the
+ * readout is unspecified. The maximum update time is ~2ms. Poll
+ * every msec for completion.
+ *
+ * Store the second value before checking UIP so a long lasting
+ * NMI which happens to hit after the UIP check cannot make
+ * an update cycle invisible.
+ */
+ seconds = CMOS_READ(RTC_SECONDS);
- /*
- * Check whether there is an update in progress during which the
- * readout is unspecified. The maximum update time is ~2ms. Poll
- * every msec for completion.
- *
- * Store the second value before checking UIP so a long lasting NMI
- * which happens to hit after the UIP check cannot make an update
- * cycle invisible.
- */
- time->tm_sec = CMOS_READ(RTC_SECONDS);
+ if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ mdelay(1);
+ continue;
+ }
- if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
- spin_unlock_irqrestore(&rtc_lock, flags);
- mdelay(1);
- goto again;
- }
+ /* Revalidate the above readout */
+ if (seconds != CMOS_READ(RTC_SECONDS)) {
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ continue;
+ }
- /* Revalidate the above readout */
- if (time->tm_sec != CMOS_READ(RTC_SECONDS)) {
+ if (callback)
+ callback(seconds, param);
+
+ /*
+ * Check for the UIP bit again. If it is set now then
+ * the above values may contain garbage.
+ */
+ if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ mdelay(1);
+ continue;
+ }
+
+ /*
+ * A NMI might have interrupted the above sequence so check
+ * whether the seconds value has changed which indicates that
+ * the NMI took longer than the UIP bit was set. Unlikely, but
+ * possible and there is also virt...
+ */
+ if (seconds != CMOS_READ(RTC_SECONDS)) {
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ continue;
+ }
spin_unlock_irqrestore(&rtc_lock, flags);
- goto again;
+
+ return true;
}
+ return false;
+}
+EXPORT_SYMBOL_GPL(mc146818_avoid_UIP);
+
+/*
+ * If the UIP (Update-in-progress) bit of the RTC is set for more then
+ * 10ms, the RTC is apparently broken or not present.
+ */
+bool mc146818_does_rtc_work(void)
+{
+ return mc146818_avoid_UIP(NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(mc146818_does_rtc_work);
+
+struct mc146818_get_time_callback_param {
+ struct rtc_time *time;
+ unsigned char ctrl;
+#ifdef CONFIG_ACPI
+ unsigned char century;
+#endif
+#ifdef CONFIG_MACH_DECSTATION
+ unsigned int real_year;
+#endif
+};
+
+static void mc146818_get_time_callback(unsigned char seconds, void *param_in)
+{
+ struct mc146818_get_time_callback_param *p = param_in;
/*
* Only the values that we read from the RTC are set. We leave
@@ -57,39 +109,39 @@ again:
* RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated
* by the RTC when initially set to a non-zero value.
*/
- time->tm_min = CMOS_READ(RTC_MINUTES);
- time->tm_hour = CMOS_READ(RTC_HOURS);
- time->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
- time->tm_mon = CMOS_READ(RTC_MONTH);
- time->tm_year = CMOS_READ(RTC_YEAR);
+ p->time->tm_sec = seconds;
+ p->time->tm_min = CMOS_READ(RTC_MINUTES);
+ p->time->tm_hour = CMOS_READ(RTC_HOURS);
+ p->time->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
+ p->time->tm_mon = CMOS_READ(RTC_MONTH);
+ p->time->tm_year = CMOS_READ(RTC_YEAR);
#ifdef CONFIG_MACH_DECSTATION
- real_year = CMOS_READ(RTC_DEC_YEAR);
+ p->real_year = CMOS_READ(RTC_DEC_YEAR);
#endif
#ifdef CONFIG_ACPI
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
- acpi_gbl_FADT.century)
- century = CMOS_READ(acpi_gbl_FADT.century);
+ acpi_gbl_FADT.century) {
+ p->century = CMOS_READ(acpi_gbl_FADT.century);
+ } else {
+ p->century = 0;
+ }
#endif
- ctrl = CMOS_READ(RTC_CONTROL);
- /*
- * Check for the UIP bit again. If it is set now then
- * the above values may contain garbage.
- */
- retry = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP;
- /*
- * A NMI might have interrupted the above sequence so check whether
- * the seconds value has changed which indicates that the NMI took
- * longer than the UIP bit was set. Unlikely, but possible and
- * there is also virt...
- */
- retry |= time->tm_sec != CMOS_READ(RTC_SECONDS);
- spin_unlock_irqrestore(&rtc_lock, flags);
+ p->ctrl = CMOS_READ(RTC_CONTROL);
+}
- if (retry)
- goto again;
+int mc146818_get_time(struct rtc_time *time)
+{
+ struct mc146818_get_time_callback_param p = {
+ .time = time
+ };
+
+ if (!mc146818_avoid_UIP(mc146818_get_time_callback, &p)) {
+ memset(time, 0, sizeof(*time));
+ return -EIO;
+ }
- if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ if (!(p.ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
{
time->tm_sec = bcd2bin(time->tm_sec);
time->tm_min = bcd2bin(time->tm_min);
@@ -97,15 +149,19 @@ again:
time->tm_mday = bcd2bin(time->tm_mday);
time->tm_mon = bcd2bin(time->tm_mon);
time->tm_year = bcd2bin(time->tm_year);
- century = bcd2bin(century);
+#ifdef CONFIG_ACPI
+ p.century = bcd2bin(p.century);
+#endif
}
#ifdef CONFIG_MACH_DECSTATION
- time->tm_year += real_year - 72;
+ time->tm_year += p.real_year - 72;
#endif
- if (century > 20)
- time->tm_year += (century - 19) * 100;
+#ifdef CONFIG_ACPI
+ if (p.century > 19)
+ time->tm_year += (p.century - 19) * 100;
+#endif
/*
* Account for differences between how the RTC uses the values
@@ -116,7 +172,7 @@ again:
time->tm_mon--;
- return RTC_24H;
+ return 0;
}
EXPORT_SYMBOL_GPL(mc146818_get_time);
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 56c58b055dff..81a5b1f2e68c 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -748,7 +748,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
/*
* Enable timestamp function and store timestamp of first trigger
- * event until TSF1 and TFS2 interrupt flags are cleared.
+ * event until TSF1 and TSF2 interrupt flags are cleared.
*/
ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_TS_CTRL,
PCF2127_BIT_TS_CTRL_TSOFF |
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 15e50bb10cf0..df2b072c394d 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -514,21 +514,56 @@ static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063)
}
#endif
-static const struct pcf85063_config pcf85063tp_config = {
- .regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = 0x0a,
+enum pcf85063_type {
+ PCF85063,
+ PCF85063TP,
+ PCF85063A,
+ RV8263,
+ PCF85063_LAST_ID
+};
+
+static struct pcf85063_config pcf85063_cfg[] = {
+ [PCF85063] = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x0a,
+ },
+ },
+ [PCF85063TP] = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x0a,
+ },
+ },
+ [PCF85063A] = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x11,
+ },
+ .has_alarms = 1,
+ },
+ [RV8263] = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x11,
+ },
+ .has_alarms = 1,
+ .force_cap_7000 = 1,
},
};
+static const struct i2c_device_id pcf85063_ids[];
+
static int pcf85063_probe(struct i2c_client *client)
{
struct pcf85063 *pcf85063;
unsigned int tmp;
int err;
- const struct pcf85063_config *config = &pcf85063tp_config;
- const void *data = of_device_get_match_data(&client->dev);
+ const struct pcf85063_config *config;
struct nvmem_config nvmem_cfg = {
.name = "pcf85063_nvram",
.reg_read = pcf85063_nvmem_read,
@@ -544,8 +579,17 @@ static int pcf85063_probe(struct i2c_client *client)
if (!pcf85063)
return -ENOMEM;
- if (data)
- config = data;
+ if (client->dev.of_node) {
+ config = of_device_get_match_data(&client->dev);
+ if (!config)
+ return -ENODEV;
+ } else {
+ enum pcf85063_type type =
+ i2c_match_id(pcf85063_ids, client)->driver_data;
+ if (type >= PCF85063_LAST_ID)
+ return -ENODEV;
+ config = &pcf85063_cfg[type];
+ }
pcf85063->regmap = devm_regmap_init_i2c(client, &config->regmap);
if (IS_ERR(pcf85063->regmap))
@@ -604,31 +648,21 @@ static int pcf85063_probe(struct i2c_client *client)
return devm_rtc_register_device(pcf85063->rtc);
}
-#ifdef CONFIG_OF
-static const struct pcf85063_config pcf85063a_config = {
- .regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = 0x11,
- },
- .has_alarms = 1,
-};
-
-static const struct pcf85063_config rv8263_config = {
- .regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = 0x11,
- },
- .has_alarms = 1,
- .force_cap_7000 = 1,
+static const struct i2c_device_id pcf85063_ids[] = {
+ { "pcf85063", PCF85063 },
+ { "pcf85063tp", PCF85063TP },
+ { "pcf85063a", PCF85063A },
+ { "rv8263", RV8263 },
+ {}
};
+MODULE_DEVICE_TABLE(i2c, pcf85063_ids);
+#ifdef CONFIG_OF
static const struct of_device_id pcf85063_of_match[] = {
- { .compatible = "nxp,pcf85063", .data = &pcf85063tp_config },
- { .compatible = "nxp,pcf85063tp", .data = &pcf85063tp_config },
- { .compatible = "nxp,pcf85063a", .data = &pcf85063a_config },
- { .compatible = "microcrystal,rv8263", .data = &rv8263_config },
+ { .compatible = "nxp,pcf85063", .data = &pcf85063_cfg[PCF85063] },
+ { .compatible = "nxp,pcf85063tp", .data = &pcf85063_cfg[PCF85063TP] },
+ { .compatible = "nxp,pcf85063a", .data = &pcf85063_cfg[PCF85063A] },
+ { .compatible = "microcrystal,rv8263", .data = &pcf85063_cfg[RV8263] },
{}
};
MODULE_DEVICE_TABLE(of, pcf85063_of_match);
@@ -640,6 +674,7 @@ static struct i2c_driver pcf85063_driver = {
.of_match_table = of_match_ptr(pcf85063_of_match),
},
.probe_new = pcf85063_probe,
+ .id_table = pcf85063_ids,
};
module_i2c_driver(pcf85063_driver);
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index d2f1d8f754bf..cf8119b6d320 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -330,6 +330,10 @@ static int __init pxa_rtc_probe(struct platform_device *pdev)
if (sa1100_rtc->irq_alarm < 0)
return -ENXIO;
+ sa1100_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(sa1100_rtc->rtc))
+ return PTR_ERR(sa1100_rtc->rtc);
+
pxa_rtc->base = devm_ioremap(dev, pxa_rtc->ress->start,
resource_size(pxa_rtc->ress));
if (!pxa_rtc->base) {
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index 80980414890c..cb15983383f5 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -28,8 +28,10 @@
#define RS5C372_REG_MONTH 5
#define RS5C372_REG_YEAR 6
#define RS5C372_REG_TRIM 7
-# define RS5C372_TRIM_XSL 0x80
+# define RS5C372_TRIM_XSL 0x80 /* only if RS5C372[a|b] */
# define RS5C372_TRIM_MASK 0x7F
+# define R2221TL_TRIM_DEV (1 << 7) /* only if R2221TL */
+# define RS5C372_TRIM_DECR (1 << 6)
#define RS5C_REG_ALARM_A_MIN 8 /* or ALARM_W */
#define RS5C_REG_ALARM_A_HOURS 9
@@ -324,8 +326,12 @@ static int rs5c372_get_trim(struct i2c_client *client, int *osc, int *trim)
struct rs5c372 *rs5c372 = i2c_get_clientdata(client);
u8 tmp = rs5c372->regs[RS5C372_REG_TRIM];
- if (osc)
- *osc = (tmp & RS5C372_TRIM_XSL) ? 32000 : 32768;
+ if (osc) {
+ if (rs5c372->type == rtc_rs5c372a || rs5c372->type == rtc_rs5c372b)
+ *osc = (tmp & RS5C372_TRIM_XSL) ? 32000 : 32768;
+ else
+ *osc = 32768;
+ }
if (trim) {
dev_dbg(&client->dev, "%s: raw trim=%x\n", __func__, tmp);
@@ -485,6 +491,176 @@ static int rs5c372_rtc_proc(struct device *dev, struct seq_file *seq)
#define rs5c372_rtc_proc NULL
#endif
+#ifdef CONFIG_RTC_INTF_DEV
+static int rs5c372_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct rs5c372 *rs5c = i2c_get_clientdata(to_i2c_client(dev));
+ unsigned char ctrl2;
+ int addr;
+ unsigned int flags;
+
+ dev_dbg(dev, "%s: cmd=%x\n", __func__, cmd);
+
+ addr = RS5C_ADDR(RS5C_REG_CTRL2);
+ ctrl2 = i2c_smbus_read_byte_data(rs5c->client, addr);
+
+ switch (cmd) {
+ case RTC_VL_READ:
+ flags = 0;
+
+ switch (rs5c->type) {
+ case rtc_r2025sd:
+ case rtc_r2221tl:
+ if ((rs5c->type == rtc_r2025sd && !(ctrl2 & R2x2x_CTRL2_XSTP)) ||
+ (rs5c->type == rtc_r2221tl && (ctrl2 & R2x2x_CTRL2_XSTP))) {
+ flags |= RTC_VL_DATA_INVALID;
+ }
+ if (ctrl2 & R2x2x_CTRL2_VDET)
+ flags |= RTC_VL_BACKUP_LOW;
+ break;
+ default:
+ if (ctrl2 & RS5C_CTRL2_XSTP)
+ flags |= RTC_VL_DATA_INVALID;
+ break;
+ }
+
+ return put_user(flags, (unsigned int __user *)arg);
+ case RTC_VL_CLR:
+ /* clear VDET bit */
+ if (rs5c->type == rtc_r2025sd || rs5c->type == rtc_r2221tl) {
+ ctrl2 &= ~R2x2x_CTRL2_VDET;
+ if (i2c_smbus_write_byte_data(rs5c->client, addr, ctrl2) < 0) {
+ dev_dbg(&rs5c->client->dev, "%s: write error in line %i\n",
+ __func__, __LINE__);
+ return -EIO;
+ }
+ }
+ return 0;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+#else
+#define rs5c372_ioctl NULL
+#endif
+
+static int rs5c372_read_offset(struct device *dev, long *offset)
+{
+ struct rs5c372 *rs5c = i2c_get_clientdata(to_i2c_client(dev));
+ u8 val = rs5c->regs[RS5C372_REG_TRIM];
+ long ppb_per_step = 0;
+ bool decr = val & RS5C372_TRIM_DECR;
+
+ switch (rs5c->type) {
+ case rtc_r2221tl:
+ ppb_per_step = val & R2221TL_TRIM_DEV ? 1017 : 3051;
+ break;
+ case rtc_rs5c372a:
+ case rtc_rs5c372b:
+ ppb_per_step = val & RS5C372_TRIM_XSL ? 3125 : 3051;
+ break;
+ default:
+ ppb_per_step = 3051;
+ break;
+ }
+
+ /* Only bits[0:5] repsents the time counts */
+ val &= 0x3F;
+
+ /* If bits[1:5] are all 0, it means no increment or decrement */
+ if (!(val & 0x3E)) {
+ *offset = 0;
+ } else {
+ if (decr)
+ *offset = -(((~val) & 0x3F) + 1) * ppb_per_step;
+ else
+ *offset = (val - 1) * ppb_per_step;
+ }
+
+ return 0;
+}
+
+static int rs5c372_set_offset(struct device *dev, long offset)
+{
+ struct rs5c372 *rs5c = i2c_get_clientdata(to_i2c_client(dev));
+ int addr = RS5C_ADDR(RS5C372_REG_TRIM);
+ u8 val = 0;
+ u8 tmp = 0;
+ long ppb_per_step = 3051;
+ long steps = LONG_MIN;
+
+ switch (rs5c->type) {
+ case rtc_rs5c372a:
+ case rtc_rs5c372b:
+ tmp = rs5c->regs[RS5C372_REG_TRIM];
+ if (tmp & RS5C372_TRIM_XSL) {
+ ppb_per_step = 3125;
+ val |= RS5C372_TRIM_XSL;
+ }
+ break;
+ case rtc_r2221tl:
+ /*
+ * Check if it is possible to use high resolution mode (DEV=1).
+ * In this mode, the minimum resolution is 2 / (32768 * 20 * 3),
+ * which is about 1017 ppb.
+ */
+ steps = DIV_ROUND_CLOSEST(offset, 1017);
+ if (steps >= -0x3E && steps <= 0x3E) {
+ ppb_per_step = 1017;
+ val |= R2221TL_TRIM_DEV;
+ } else {
+ /*
+ * offset is out of the range of high resolution mode.
+ * Try to use low resolution mode (DEV=0). In this mode,
+ * the minimum resolution is 2 / (32768 * 20), which is
+ * about 3051 ppb.
+ */
+ steps = LONG_MIN;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (steps == LONG_MIN) {
+ steps = DIV_ROUND_CLOSEST(offset, ppb_per_step);
+ if (steps > 0x3E || steps < -0x3E)
+ return -ERANGE;
+ }
+
+ if (steps > 0) {
+ val |= steps + 1;
+ } else {
+ val |= RS5C372_TRIM_DECR;
+ val |= (~(-steps - 1)) & 0x3F;
+ }
+
+ if (!steps || !(val & 0x3E)) {
+ /*
+ * if offset is too small, set oscillation adjustment register
+ * or time trimming register with its default value whic means
+ * no increment or decrement. But for rs5c372[a|b], the XSL bit
+ * should be kept unchanged.
+ */
+ if (rs5c->type == rtc_rs5c372a || rs5c->type == rtc_rs5c372b)
+ val &= RS5C372_TRIM_XSL;
+ else
+ val = 0;
+ }
+
+ dev_dbg(&rs5c->client->dev, "write 0x%x for offset %ld\n", val, offset);
+
+ if (i2c_smbus_write_byte_data(rs5c->client, addr, val) < 0) {
+ dev_err(&rs5c->client->dev, "failed to write 0x%x to reg %d\n", val, addr);
+ return -EIO;
+ }
+
+ rs5c->regs[RS5C372_REG_TRIM] = val;
+
+ return 0;
+}
+
static const struct rtc_class_ops rs5c372_rtc_ops = {
.proc = rs5c372_rtc_proc,
.read_time = rs5c372_rtc_read_time,
@@ -492,6 +668,9 @@ static const struct rtc_class_ops rs5c372_rtc_ops = {
.read_alarm = rs5c_read_alarm,
.set_alarm = rs5c_set_alarm,
.alarm_irq_enable = rs5c_rtc_alarm_irq_enable,
+ .ioctl = rs5c372_ioctl,
+ .read_offset = rs5c372_read_offset,
+ .set_offset = rs5c372_set_offset,
};
#if IS_ENABLED(CONFIG_RTC_INTF_SYSFS)
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index 0d5ed38bf60c..f69e0b1137cd 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -55,6 +55,7 @@
enum rv8803_type {
rv_8803,
+ rx_8804,
rx_8900
};
@@ -601,6 +602,7 @@ static int rv8803_probe(struct i2c_client *client,
static const struct i2c_device_id rv8803_id[] = {
{ "rv8803", rv_8803 },
+ { "rv8804", rx_8804 },
{ "rx8803", rv_8803 },
{ "rx8900", rx_8900 },
{ }
@@ -617,6 +619,10 @@ static const __maybe_unused struct of_device_id rv8803_of_match[] = {
.data = (void *)rv_8803
},
{
+ .compatible = "epson,rx8804",
+ .data = (void *)rx_8804
+ },
+ {
.compatible = "epson,rx8900",
.data = (void *)rx_8900
},
diff --git a/drivers/rtc/rtc-sunplus.c b/drivers/rtc/rtc-sunplus.c
new file mode 100644
index 000000000000..e8e2ab1103fc
--- /dev/null
+++ b/drivers/rtc/rtc-sunplus.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * The RTC driver for Sunplus SP7021
+ *
+ * Copyright (C) 2019 Sunplus Technology Inc., All rights reseerved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/rtc.h>
+
+#define RTC_REG_NAME "rtc"
+
+#define RTC_CTRL 0x40
+#define TIMER_FREEZE_MASK_BIT BIT(5 + 16)
+#define TIMER_FREEZE BIT(5)
+#define DIS_SYS_RST_RTC_MASK_BIT BIT(4 + 16)
+#define DIS_SYS_RST_RTC BIT(4)
+#define RTC32K_MODE_RESET_MASK_BIT BIT(3 + 16)
+#define RTC32K_MODE_RESET BIT(3)
+#define ALARM_EN_OVERDUE_MASK_BIT BIT(2 + 16)
+#define ALARM_EN_OVERDUE BIT(2)
+#define ALARM_EN_PMC_MASK_BIT BIT(1 + 16)
+#define ALARM_EN_PMC BIT(1)
+#define ALARM_EN_MASK_BIT BIT(0 + 16)
+#define ALARM_EN BIT(0)
+#define RTC_TIMER_OUT 0x44
+#define RTC_DIVIDER 0x48
+#define RTC_TIMER_SET 0x4c
+#define RTC_ALARM_SET 0x50
+#define RTC_USER_DATA 0x54
+#define RTC_RESET_RECORD 0x58
+#define RTC_BATT_CHARGE_CTRL 0x5c
+#define BAT_CHARGE_RSEL_MASK_BIT GENMASK(3 + 16, 2 + 16)
+#define BAT_CHARGE_RSEL_MASK GENMASK(3, 2)
+#define BAT_CHARGE_RSEL_2K_OHM FIELD_PREP(BAT_CHARGE_RSEL_MASK, 0)
+#define BAT_CHARGE_RSEL_250_OHM FIELD_PREP(BAT_CHARGE_RSEL_MASK, 1)
+#define BAT_CHARGE_RSEL_50_OHM FIELD_PREP(BAT_CHARGE_RSEL_MASK, 2)
+#define BAT_CHARGE_RSEL_0_OHM FIELD_PREP(BAT_CHARGE_RSEL_MASK, 3)
+#define BAT_CHARGE_DSEL_MASK_BIT BIT(1 + 16)
+#define BAT_CHARGE_DSEL_MASK GENMASK(1, 1)
+#define BAT_CHARGE_DSEL_ON FIELD_PREP(BAT_CHARGE_DSEL_MASK, 0)
+#define BAT_CHARGE_DSEL_OFF FIELD_PREP(BAT_CHARGE_DSEL_MASK, 1)
+#define BAT_CHARGE_EN_MASK_BIT BIT(0 + 16)
+#define BAT_CHARGE_EN BIT(0)
+#define RTC_TRIM_CTRL 0x60
+
+struct sunplus_rtc {
+ struct rtc_device *rtc;
+ struct resource *res;
+ struct clk *rtcclk;
+ struct reset_control *rstc;
+ void __iomem *reg_base;
+ int irq;
+};
+
+static void sp_get_seconds(struct device *dev, unsigned long *secs)
+{
+ struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev);
+
+ *secs = (unsigned long)readl(sp_rtc->reg_base + RTC_TIMER_OUT);
+}
+
+static void sp_set_seconds(struct device *dev, unsigned long secs)
+{
+ struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev);
+
+ writel((u32)secs, sp_rtc->reg_base + RTC_TIMER_SET);
+}
+
+static int sp_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ unsigned long secs;
+
+ sp_get_seconds(dev, &secs);
+ rtc_time64_to_tm(secs, tm);
+
+ return 0;
+}
+
+static int sp_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ unsigned long secs;
+
+ secs = rtc_tm_to_time64(tm);
+ dev_dbg(dev, "%s, secs = %lu\n", __func__, secs);
+ sp_set_seconds(dev, secs);
+
+ return 0;
+}
+
+static int sp_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev);
+ unsigned long alarm_time;
+
+ alarm_time = rtc_tm_to_time64(&alrm->time);
+ dev_dbg(dev, "%s, alarm_time: %u\n", __func__, (u32)(alarm_time));
+ writel((u32)alarm_time, sp_rtc->reg_base + RTC_ALARM_SET);
+
+ return 0;
+}
+
+static int sp_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev);
+ unsigned int alarm_time;
+
+ alarm_time = readl(sp_rtc->reg_base + RTC_ALARM_SET);
+ dev_dbg(dev, "%s, alarm_time: %u\n", __func__, alarm_time);
+
+ if (alarm_time == 0)
+ alrm->enabled = 0;
+ else
+ alrm->enabled = 1;
+
+ rtc_time64_to_tm((unsigned long)(alarm_time), &alrm->time);
+
+ return 0;
+}
+
+static int sp_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev);
+
+ if (enabled)
+ writel((TIMER_FREEZE_MASK_BIT | DIS_SYS_RST_RTC_MASK_BIT |
+ RTC32K_MODE_RESET_MASK_BIT | ALARM_EN_OVERDUE_MASK_BIT |
+ ALARM_EN_PMC_MASK_BIT | ALARM_EN_MASK_BIT) |
+ (DIS_SYS_RST_RTC | ALARM_EN_OVERDUE | ALARM_EN_PMC | ALARM_EN),
+ sp_rtc->reg_base + RTC_CTRL);
+ else
+ writel((ALARM_EN_OVERDUE_MASK_BIT | ALARM_EN_PMC_MASK_BIT | ALARM_EN_MASK_BIT) |
+ 0x0, sp_rtc->reg_base + RTC_CTRL);
+
+ return 0;
+}
+
+static const struct rtc_class_ops sp_rtc_ops = {
+ .read_time = sp_rtc_read_time,
+ .set_time = sp_rtc_set_time,
+ .set_alarm = sp_rtc_set_alarm,
+ .read_alarm = sp_rtc_read_alarm,
+ .alarm_irq_enable = sp_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t sp_rtc_irq_handler(int irq, void *dev_id)
+{
+ struct platform_device *plat_dev = dev_id;
+ struct sunplus_rtc *sp_rtc = dev_get_drvdata(&plat_dev->dev);
+
+ rtc_update_irq(sp_rtc->rtc, 1, RTC_IRQF | RTC_AF);
+ dev_dbg(&plat_dev->dev, "[RTC] ALARM INT\n");
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * -------------------------------------------------------------------------------------
+ * bat_charge_rsel bat_charge_dsel bat_charge_en Remarks
+ * x x 0 Disable
+ * 0 0 1 0.86mA (2K Ohm with diode)
+ * 1 0 1 1.81mA (250 Ohm with diode)
+ * 2 0 1 2.07mA (50 Ohm with diode)
+ * 3 0 1 16.0mA (0 Ohm with diode)
+ * 0 1 1 1.36mA (2K Ohm without diode)
+ * 1 1 1 3.99mA (250 Ohm without diode)
+ * 2 1 1 4.41mA (50 Ohm without diode)
+ * 3 1 1 16.0mA (0 Ohm without diode)
+ * -------------------------------------------------------------------------------------
+ */
+static void sp_rtc_set_trickle_charger(struct device dev)
+{
+ struct sunplus_rtc *sp_rtc = dev_get_drvdata(&dev);
+ u32 ohms, rsel;
+ u32 chargeable;
+
+ if (of_property_read_u32(dev.of_node, "trickle-resistor-ohms", &ohms) ||
+ of_property_read_u32(dev.of_node, "aux-voltage-chargeable", &chargeable)) {
+ dev_warn(&dev, "battery charger disabled\n");
+ return;
+ }
+
+ switch (ohms) {
+ case 2000:
+ rsel = BAT_CHARGE_RSEL_2K_OHM;
+ break;
+ case 250:
+ rsel = BAT_CHARGE_RSEL_250_OHM;
+ break;
+ case 50:
+ rsel = BAT_CHARGE_RSEL_50_OHM;
+ break;
+ case 0:
+ rsel = BAT_CHARGE_RSEL_0_OHM;
+ break;
+ default:
+ dev_err(&dev, "invalid charger resistor value (%d)\n", ohms);
+ return;
+ }
+
+ writel(BAT_CHARGE_RSEL_MASK_BIT | rsel, sp_rtc->reg_base + RTC_BATT_CHARGE_CTRL);
+
+ switch (chargeable) {
+ case 0:
+ writel(BAT_CHARGE_DSEL_MASK_BIT | BAT_CHARGE_DSEL_OFF,
+ sp_rtc->reg_base + RTC_BATT_CHARGE_CTRL);
+ break;
+ case 1:
+ writel(BAT_CHARGE_DSEL_MASK_BIT | BAT_CHARGE_DSEL_ON,
+ sp_rtc->reg_base + RTC_BATT_CHARGE_CTRL);
+ break;
+ default:
+ dev_err(&dev, "invalid aux-voltage-chargeable value (%d)\n", chargeable);
+ return;
+ }
+
+ writel(BAT_CHARGE_EN_MASK_BIT | BAT_CHARGE_EN, sp_rtc->reg_base + RTC_BATT_CHARGE_CTRL);
+}
+
+static int sp_rtc_probe(struct platform_device *plat_dev)
+{
+ struct sunplus_rtc *sp_rtc;
+ int ret;
+
+ sp_rtc = devm_kzalloc(&plat_dev->dev, sizeof(*sp_rtc), GFP_KERNEL);
+ if (!sp_rtc)
+ return -ENOMEM;
+
+ sp_rtc->res = platform_get_resource_byname(plat_dev, IORESOURCE_MEM, RTC_REG_NAME);
+ sp_rtc->reg_base = devm_ioremap_resource(&plat_dev->dev, sp_rtc->res);
+ if (IS_ERR(sp_rtc->reg_base))
+ return dev_err_probe(&plat_dev->dev, PTR_ERR(sp_rtc->reg_base),
+ "%s devm_ioremap_resource fail\n", RTC_REG_NAME);
+ dev_dbg(&plat_dev->dev, "res = 0x%x, reg_base = 0x%lx\n",
+ sp_rtc->res->start, (unsigned long)sp_rtc->reg_base);
+
+ sp_rtc->irq = platform_get_irq(plat_dev, 0);
+ if (sp_rtc->irq < 0)
+ return dev_err_probe(&plat_dev->dev, sp_rtc->irq, "platform_get_irq failed\n");
+
+ ret = devm_request_irq(&plat_dev->dev, sp_rtc->irq, sp_rtc_irq_handler,
+ IRQF_TRIGGER_RISING, "rtc irq", plat_dev);
+ if (ret)
+ return dev_err_probe(&plat_dev->dev, ret, "devm_request_irq failed:\n");
+
+ sp_rtc->rtcclk = devm_clk_get(&plat_dev->dev, NULL);
+ if (IS_ERR(sp_rtc->rtcclk))
+ return dev_err_probe(&plat_dev->dev, PTR_ERR(sp_rtc->rtcclk),
+ "devm_clk_get fail\n");
+
+ sp_rtc->rstc = devm_reset_control_get_exclusive(&plat_dev->dev, NULL);
+ if (IS_ERR(sp_rtc->rstc))
+ return dev_err_probe(&plat_dev->dev, PTR_ERR(sp_rtc->rstc),
+ "failed to retrieve reset controller\n");
+
+ ret = clk_prepare_enable(sp_rtc->rtcclk);
+ if (ret)
+ goto free_clk;
+
+ ret = reset_control_deassert(sp_rtc->rstc);
+ if (ret)
+ goto free_reset_assert;
+
+ device_init_wakeup(&plat_dev->dev, 1);
+ dev_set_drvdata(&plat_dev->dev, sp_rtc);
+
+ sp_rtc->rtc = devm_rtc_allocate_device(&plat_dev->dev);
+ if (IS_ERR(sp_rtc->rtc)) {
+ ret = PTR_ERR(sp_rtc->rtc);
+ goto free_reset_assert;
+ }
+
+ sp_rtc->rtc->range_max = U32_MAX;
+ sp_rtc->rtc->range_min = 0;
+ sp_rtc->rtc->ops = &sp_rtc_ops;
+
+ ret = devm_rtc_register_device(sp_rtc->rtc);
+ if (ret)
+ goto free_reset_assert;
+
+ /* Setup trickle charger */
+ if (plat_dev->dev.of_node)
+ sp_rtc_set_trickle_charger(plat_dev->dev);
+
+ /* Keep RTC from system reset */
+ writel(DIS_SYS_RST_RTC_MASK_BIT | DIS_SYS_RST_RTC, sp_rtc->reg_base + RTC_CTRL);
+
+ return 0;
+
+free_reset_assert:
+ reset_control_assert(sp_rtc->rstc);
+free_clk:
+ clk_disable_unprepare(sp_rtc->rtcclk);
+
+ return ret;
+}
+
+static int sp_rtc_remove(struct platform_device *plat_dev)
+{
+ struct sunplus_rtc *sp_rtc = dev_get_drvdata(&plat_dev->dev);
+
+ device_init_wakeup(&plat_dev->dev, 0);
+ reset_control_assert(sp_rtc->rstc);
+ clk_disable_unprepare(sp_rtc->rtcclk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sp_rtc_suspend(struct device *dev)
+{
+ struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(sp_rtc->irq);
+
+ return 0;
+}
+
+static int sp_rtc_resume(struct device *dev)
+{
+ struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(sp_rtc->irq);
+
+ return 0;
+}
+#endif
+
+static const struct of_device_id sp_rtc_of_match[] = {
+ { .compatible = "sunplus,sp7021-rtc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sp_rtc_of_match);
+
+static SIMPLE_DEV_PM_OPS(sp_rtc_pm_ops, sp_rtc_suspend, sp_rtc_resume);
+
+static struct platform_driver sp_rtc_driver = {
+ .probe = sp_rtc_probe,
+ .remove = sp_rtc_remove,
+ .driver = {
+ .name = "sp7021-rtc",
+ .of_match_table = sp_rtc_of_match,
+ .pm = &sp_rtc_pm_ops,
+ },
+};
+module_platform_driver(sp_rtc_driver);
+
+MODULE_AUTHOR("Vincent Shih <vincent.sunplus@gmail.com>");
+MODULE_DESCRIPTION("Sunplus RTC driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index d0416dbd0cd8..e3710a762aba 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -5,7 +5,7 @@ comment "S/390 block device drivers"
config DCSSBLK
def_tristate m
select FS_DAX_LIMITED
- select DAX_DRIVER
+ select DAX
prompt "DCSSBLK support"
depends on S390 && BLOCK
help
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 6043c832d09e..811e79c9f59c 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -1824,10 +1824,11 @@ static struct attribute *paths_info_attrs[] = {
&path_fcs_attribute.attr,
NULL,
};
+ATTRIBUTE_GROUPS(paths_info);
static struct kobj_type path_attr_type = {
.release = dasd_path_release,
- .default_attrs = paths_info_attrs,
+ .default_groups = paths_info_groups,
.sysfs_ops = &kobj_sysfs_ops,
};
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 27ab888b44d0..d614843caf6c 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -44,18 +44,6 @@ static const struct block_device_operations dcssblk_devops = {
.release = dcssblk_release,
};
-static size_t dcssblk_dax_copy_from_iter(struct dax_device *dax_dev,
- pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
-{
- return copy_from_iter(addr, bytes, i);
-}
-
-static size_t dcssblk_dax_copy_to_iter(struct dax_device *dax_dev,
- pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
-{
- return copy_to_iter(addr, bytes, i);
-}
-
static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev,
pgoff_t pgoff, size_t nr_pages)
{
@@ -72,9 +60,6 @@ static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev,
static const struct dax_operations dcssblk_dax_ops = {
.direct_access = dcssblk_dax_direct_access,
- .dax_supported = generic_fsdax_supported,
- .copy_from_iter = dcssblk_dax_copy_from_iter,
- .copy_to_iter = dcssblk_dax_copy_to_iter,
.zero_page_range = dcssblk_dax_zero_page_range,
};
@@ -687,18 +672,21 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
if (rc)
goto put_dev;
- dev_info->dax_dev = alloc_dax(dev_info, dev_info->gd->disk_name,
- &dcssblk_dax_ops, DAXDEV_F_SYNC);
+ dev_info->dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops);
if (IS_ERR(dev_info->dax_dev)) {
rc = PTR_ERR(dev_info->dax_dev);
dev_info->dax_dev = NULL;
goto put_dev;
}
+ set_dax_synchronous(dev_info->dax_dev);
+ rc = dax_add_host(dev_info->dax_dev, dev_info->gd);
+ if (rc)
+ goto out_dax;
get_device(&dev_info->dev);
rc = device_add_disk(&dev_info->dev, dev_info->gd, NULL);
if (rc)
- goto out_dax;
+ goto out_dax_host;
switch (dev_info->segment_type) {
case SEG_TYPE_SR:
@@ -714,6 +702,8 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
rc = count;
goto out;
+out_dax_host:
+ dax_remove_host(dev_info->gd);
out_dax:
put_device(&dev_info->dev);
kill_dax(dev_info->dax_dev);
diff --git a/drivers/s390/char/keyboard.h b/drivers/s390/char/keyboard.h
index c467589c7f45..c06d399b9b1f 100644
--- a/drivers/s390/char/keyboard.h
+++ b/drivers/s390/char/keyboard.h
@@ -56,7 +56,7 @@ static inline void
kbd_put_queue(struct tty_port *port, int ch)
{
tty_insert_flip_char(port, ch, 0);
- tty_schedule_flip(port);
+ tty_flip_buffer_push(port);
}
static inline void
@@ -64,5 +64,5 @@ kbd_puts_queue(struct tty_port *port, char *cp)
{
while (*cp)
tty_insert_flip_char(port, *cp++, 0);
- tty_schedule_flip(port);
+ tty_flip_buffer_push(port);
}
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index b64feab62caa..e9943a86c361 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -139,7 +139,7 @@ int __init sclp_early_get_core_info(struct sclp_core_info *info)
}
sclp_fill_core_info(info, sccb);
out:
- memblock_phys_free((unsigned long)sccb, length);
+ memblock_free(sccb, length);
return rc;
}
diff --git a/drivers/s390/char/sclp_sd.c b/drivers/s390/char/sclp_sd.c
index 25c2d760f6e6..f9e164be7568 100644
--- a/drivers/s390/char/sclp_sd.c
+++ b/drivers/s390/char/sclp_sd.c
@@ -438,11 +438,12 @@ static struct attribute *sclp_sd_file_default_attrs[] = {
&reload_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(sclp_sd_file_default);
static struct kobj_type sclp_sd_file_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.release = sclp_sd_file_release,
- .default_attrs = sclp_sd_file_default_attrs,
+ .default_groups = sclp_sd_file_default_groups,
};
/**
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 9e066281e2d0..4cebfaaa22b4 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -72,7 +72,7 @@ static void vmcp_response_alloc(struct vmcp_session *session)
if (order > 2)
page = cma_alloc(vmcp_cma, nr_pages, 0, false);
if (page) {
- session->response = (char *)page_to_phys(page);
+ session->response = (char *)page_to_virt(page);
session->cma_alloc = 1;
return;
}
@@ -89,7 +89,7 @@ static void vmcp_response_free(struct vmcp_session *session)
order = get_order(session->bufsize);
nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT;
if (session->cma_alloc) {
- page = phys_to_page((unsigned long)session->response);
+ page = virt_to_page((unsigned long)session->response);
cma_release(vmcp_cma, page, nr_pages);
session->cma_alloc = 0;
} else {
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 684348d82f08..962dfa25a310 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -91,11 +91,6 @@ static int chsc_subchannel_probe(struct subchannel *sch)
sch->schid.ssid, sch->schid.sch_no, ret);
dev_set_drvdata(&sch->dev, NULL);
kfree(private);
- } else {
- if (dev_get_uevent_suppress(&sch->dev)) {
- dev_set_uevent_suppress(&sch->dev, 0);
- kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
- }
}
return ret;
}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index ce9e7517430f..fa8293335077 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -470,16 +470,6 @@ int css_register_subchannel(struct subchannel *sch)
if (sch->st == SUBCHANNEL_TYPE_IO)
sch->dev.type = &io_subchannel_type;
- /*
- * We don't want to generate uevents for I/O subchannels that don't
- * have a working ccw device behind them since they will be
- * unregistered before they can be used anyway, so we delay the add
- * uevent until after device recognition was successful.
- * Note that we suppress the uevent for all subchannel types;
- * the subchannel driver can decide itself when it wants to inform
- * userspace of its existence.
- */
- dev_set_uevent_suppress(&sch->dev, 1);
css_update_ssd_info(sch);
/* make it known to the system */
ret = css_sch_device_register(sch);
@@ -488,15 +478,6 @@ int css_register_subchannel(struct subchannel *sch)
sch->schid.ssid, sch->schid.sch_no, ret);
return ret;
}
- if (!sch->driver) {
- /*
- * No driver matched. Generate the uevent now so that
- * a fitting driver module may be loaded based on the
- * modalias.
- */
- dev_set_uevent_suppress(&sch->dev, 0);
- kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
- }
return ret;
}
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 07a17613fab5..cd938a26b76c 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -838,14 +838,6 @@ static void io_subchannel_register(struct ccw_device *cdev)
adjust_init_count = 0;
goto out;
}
- /*
- * Now we know this subchannel will stay, we can throw
- * our delayed uevent.
- */
- if (dev_get_uevent_suppress(&sch->dev)) {
- dev_set_uevent_suppress(&sch->dev, 0);
- kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
- }
/* make it known to the system */
ret = device_add(&cdev->dev);
if (ret) {
@@ -1036,15 +1028,9 @@ static int io_subchannel_probe(struct subchannel *sch)
"0.%x.%04x (rc=%d)\n",
sch->schid.ssid, sch->schid.sch_no, rc);
/*
- * The console subchannel already has an associated ccw_device.
- * Throw the delayed uevent for the subchannel, register
- * the ccw_device and exit.
- */
- if (dev_get_uevent_suppress(&sch->dev)) {
- /* should always be the case for the console */
- dev_set_uevent_suppress(&sch->dev, 0);
- kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
- }
+ * The console subchannel already has an associated ccw_device.
+ * Register it and exit.
+ */
cdev = sch_get_cdev(sch);
rc = device_add(&cdev->dev);
if (rc) {
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index 15bdae5981ca..8b463681a149 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -243,11 +243,6 @@ static int eadm_subchannel_probe(struct subchannel *sch)
spin_lock_irq(&list_lock);
list_add(&private->head, &eadm_list);
spin_unlock_irq(&list_lock);
-
- if (dev_get_uevent_suppress(&sch->dev)) {
- dev_set_uevent_suppress(&sch->dev, 0);
- kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
- }
out:
return ret;
}
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 99c2212dc6a6..5ea6249d8180 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -236,12 +236,11 @@ struct qdio_irq {
int nr_input_qs;
int nr_output_qs;
- struct ccw1 ccw;
- struct ciw equeue;
- struct ciw aqueue;
+ struct ccw1 *ccw;
struct qdio_ssqd_desc ssqd_desc;
void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
+ qdio_handler_t (*error_handler);
int perf_stat_enabled;
@@ -338,7 +337,7 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr);
int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
struct subchannel_id *schid,
struct qdio_ssqd_desc *data);
-int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data);
+void qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data);
void qdio_shutdown_irq(struct qdio_irq *irq);
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr);
void qdio_free_queues(struct qdio_irq *irq_ptr);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 45e810c6ea3b..9cde55730b65 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/kmemleak.h>
#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/io.h>
@@ -169,8 +170,6 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
int tmp_count = count, tmp_start = start;
int nr = q->nr;
- if (!count)
- return 0;
qperf_inc(q, sqbs);
if (!q->is_input_q)
@@ -499,6 +498,31 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start,
}
}
+int qdio_inspect_input_queue(struct ccw_device *cdev, unsigned int nr,
+ unsigned int *bufnr, unsigned int *error)
+{
+ struct qdio_irq *irq = cdev->private->qdio_data;
+ unsigned int start;
+ struct qdio_q *q;
+ int count;
+
+ if (!irq)
+ return -ENODEV;
+
+ q = irq->input_qs[nr];
+ start = q->first_to_check;
+ *error = 0;
+
+ count = get_inbound_buffer_frontier(q, start, error);
+ if (count == 0)
+ return 0;
+
+ *bufnr = start;
+ q->first_to_check = add_buf(start, count);
+ return count;
+}
+EXPORT_SYMBOL_GPL(qdio_inspect_input_queue);
+
static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
{
unsigned char state = 0;
@@ -578,6 +602,31 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
}
}
+int qdio_inspect_output_queue(struct ccw_device *cdev, unsigned int nr,
+ unsigned int *bufnr, unsigned int *error)
+{
+ struct qdio_irq *irq = cdev->private->qdio_data;
+ unsigned int start;
+ struct qdio_q *q;
+ int count;
+
+ if (!irq)
+ return -ENODEV;
+
+ q = irq->output_qs[nr];
+ start = q->first_to_check;
+ *error = 0;
+
+ count = get_outbound_buffer_frontier(q, start, error);
+ if (count == 0)
+ return 0;
+
+ *bufnr = start;
+ q->first_to_check = add_buf(start, count);
+ return count;
+}
+EXPORT_SYMBOL_GPL(qdio_inspect_output_queue);
+
static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
unsigned long aob)
{
@@ -653,24 +702,18 @@ static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
unsigned long intparm, int cstat,
int dstat)
{
- struct qdio_q *q;
+ unsigned int first_to_check = 0;
DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
DBF_ERROR("intp :%lx", intparm);
DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
- if (irq_ptr->nr_input_qs) {
- q = irq_ptr->input_qs[0];
- } else if (irq_ptr->nr_output_qs) {
- q = irq_ptr->output_qs[0];
- } else {
- dump_stack();
- goto no_handler;
- }
+ /* zfcp wants this: */
+ if (irq_ptr->nr_input_qs)
+ first_to_check = irq_ptr->input_qs[0]->first_to_check;
- q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
- q->nr, q->first_to_check, 0, irq_ptr->int_parm);
-no_handler:
+ irq_ptr->error_handler(irq_ptr->cdev, QDIO_ERROR_ACTIVATE, 0,
+ first_to_check, 0, irq_ptr->int_parm);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
/*
* In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
@@ -874,6 +917,7 @@ int qdio_free(struct ccw_device *cdev)
qdio_free_queues(irq_ptr);
free_page((unsigned long) irq_ptr->qdr);
free_page(irq_ptr->chsc_page);
+ kfree(irq_ptr->ccw);
free_page((unsigned long) irq_ptr);
return 0;
}
@@ -899,11 +943,17 @@ int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)
return -EINVAL;
- /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
- irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ irq_ptr = (void *) get_zeroed_page(GFP_KERNEL);
if (!irq_ptr)
return -ENOMEM;
+ irq_ptr->ccw = kmalloc(sizeof(*irq_ptr->ccw), GFP_KERNEL | GFP_DMA);
+ if (!irq_ptr->ccw)
+ goto err_ccw;
+
+ /* kmemleak doesn't scan the page-allocated irq_ptr: */
+ kmemleak_not_leak(irq_ptr->ccw);
+
irq_ptr->cdev = cdev;
mutex_init(&irq_ptr->setup_mutex);
if (qdio_allocate_dbf(irq_ptr))
@@ -941,6 +991,8 @@ err_qdr:
free_page(irq_ptr->chsc_page);
err_chsc:
err_dbf:
+ kfree(irq_ptr->ccw);
+err_ccw:
free_page((unsigned long) irq_ptr);
return rc;
}
@@ -972,6 +1024,7 @@ int qdio_establish(struct ccw_device *cdev,
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct subchannel_id schid;
+ struct ciw *ciw;
long timeout;
int rc;
@@ -985,8 +1038,11 @@ int qdio_establish(struct ccw_device *cdev,
init_data->no_output_qs > irq_ptr->max_output_qs)
return -EINVAL;
- if ((init_data->no_input_qs && !init_data->input_handler) ||
- (init_data->no_output_qs && !init_data->output_handler))
+ /* Needed as error_handler: */
+ if (!init_data->input_handler)
+ return -EINVAL;
+
+ if (init_data->no_output_qs && !init_data->output_handler)
return -EINVAL;
if (!init_data->input_sbal_addr_array ||
@@ -996,6 +1052,12 @@ int qdio_establish(struct ccw_device *cdev,
if (!init_data->irq_poll)
return -EINVAL;
+ ciw = ccw_device_get_ciw(cdev, CIW_TYPE_EQUEUE);
+ if (!ciw) {
+ DBF_ERROR("%4x NO EQ", schid.sch_no);
+ return -EIO;
+ }
+
mutex_lock(&irq_ptr->setup_mutex);
qdio_trace_init_data(irq_ptr, init_data);
qdio_setup_irq(irq_ptr, init_data);
@@ -1005,15 +1067,15 @@ int qdio_establish(struct ccw_device *cdev,
goto err_thinint;
/* establish q */
- irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
- irq_ptr->ccw.flags = CCW_FLAG_SLI;
- irq_ptr->ccw.count = irq_ptr->equeue.count;
- irq_ptr->ccw.cda = (u32) virt_to_phys(irq_ptr->qdr);
+ irq_ptr->ccw->cmd_code = ciw->cmd;
+ irq_ptr->ccw->flags = CCW_FLAG_SLI;
+ irq_ptr->ccw->count = ciw->count;
+ irq_ptr->ccw->cda = (u32) virt_to_phys(irq_ptr->qdr);
spin_lock_irq(get_ccwdev_lock(cdev));
ccw_device_set_options_mask(cdev, 0);
- rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
+ rc = ccw_device_start(cdev, irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
spin_unlock_irq(get_ccwdev_lock(cdev));
if (rc) {
DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
@@ -1065,6 +1127,7 @@ int qdio_activate(struct ccw_device *cdev)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct subchannel_id schid;
+ struct ciw *ciw;
int rc;
ccw_device_get_schid(cdev, &schid);
@@ -1073,21 +1136,27 @@ int qdio_activate(struct ccw_device *cdev)
if (!irq_ptr)
return -ENODEV;
+ ciw = ccw_device_get_ciw(cdev, CIW_TYPE_AQUEUE);
+ if (!ciw) {
+ DBF_ERROR("%4x NO AQ", schid.sch_no);
+ return -EIO;
+ }
+
mutex_lock(&irq_ptr->setup_mutex);
if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
rc = -EBUSY;
goto out;
}
- irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
- irq_ptr->ccw.flags = CCW_FLAG_SLI;
- irq_ptr->ccw.count = irq_ptr->aqueue.count;
- irq_ptr->ccw.cda = 0;
+ irq_ptr->ccw->cmd_code = ciw->cmd;
+ irq_ptr->ccw->flags = CCW_FLAG_SLI;
+ irq_ptr->ccw->count = ciw->count;
+ irq_ptr->ccw->cda = 0;
spin_lock_irq(get_ccwdev_lock(cdev));
ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
- rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
+ rc = ccw_device_start(cdev, irq_ptr->ccw, QDIO_DOING_ACTIVATE,
0, DOIO_DENY_PREFETCH);
spin_unlock_irq(get_ccwdev_lock(cdev));
if (rc) {
@@ -1144,6 +1213,35 @@ static int handle_inbound(struct qdio_q *q, int bufnr, int count)
}
/**
+ * qdio_add_bufs_to_input_queue - process buffers on an Input Queue
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @q_nr: queue number
+ * @bufnr: buffer number
+ * @count: how many buffers to process
+ */
+int qdio_add_bufs_to_input_queue(struct ccw_device *cdev, unsigned int q_nr,
+ unsigned int bufnr, unsigned int count)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+ if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
+ return -EINVAL;
+
+ if (!irq_ptr)
+ return -ENODEV;
+
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr, "addi b:%02x c:%02x", bufnr, count);
+
+ if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
+ return -EIO;
+ if (!count)
+ return 0;
+
+ return handle_inbound(irq_ptr->input_qs[q_nr], bufnr, count);
+}
+EXPORT_SYMBOL_GPL(qdio_add_bufs_to_input_queue);
+
+/**
* handle_outbound - process filled outbound buffers
* @q: queue containing the buffers
* @bufnr: first buffer to process
@@ -1184,16 +1282,16 @@ static int handle_outbound(struct qdio_q *q, unsigned int bufnr, unsigned int co
}
/**
- * do_QDIO - process input or output buffers
+ * qdio_add_bufs_to_output_queue - process buffers on an Output Queue
* @cdev: associated ccw_device for the qdio subchannel
- * @callflags: input or output and special flags from the program
* @q_nr: queue number
* @bufnr: buffer number
* @count: how many buffers to process
- * @aob: asynchronous operation block (outbound only)
+ * @aob: asynchronous operation block
*/
-int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
- int q_nr, unsigned int bufnr, unsigned int count, struct qaob *aob)
+int qdio_add_bufs_to_output_queue(struct ccw_device *cdev, unsigned int q_nr,
+ unsigned int bufnr, unsigned int count,
+ struct qaob *aob)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
@@ -1203,20 +1301,16 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
if (!irq_ptr)
return -ENODEV;
- DBF_DEV_EVENT(DBF_INFO, irq_ptr,
- "do%02x b:%02x c:%02x", callflags, bufnr, count);
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr, "addo b:%02x c:%02x", bufnr, count);
if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
return -EIO;
if (!count)
return 0;
- if (callflags & QDIO_FLAG_SYNC_INPUT)
- return handle_inbound(irq_ptr->input_qs[q_nr], bufnr, count);
- else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
- return handle_outbound(irq_ptr->output_qs[q_nr], bufnr, count, aob);
- return -EINVAL;
+
+ return handle_outbound(irq_ptr->output_qs[q_nr], bufnr, count, aob);
}
-EXPORT_SYMBOL_GPL(do_QDIO);
+EXPORT_SYMBOL_GPL(qdio_add_bufs_to_output_queue);
/**
* qdio_start_irq - enable interrupt processing for the device
@@ -1263,40 +1357,6 @@ rescan:
}
EXPORT_SYMBOL(qdio_start_irq);
-static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr,
- unsigned int *error)
-{
- unsigned int start = q->first_to_check;
- int count;
-
- *error = 0;
- count = q->is_input_q ? get_inbound_buffer_frontier(q, start, error) :
- get_outbound_buffer_frontier(q, start, error);
- if (count == 0)
- return 0;
-
- *bufnr = start;
-
- /* for the next time */
- q->first_to_check = add_buf(start, count);
-
- return count;
-}
-
-int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
- unsigned int *bufnr, unsigned int *error)
-{
- struct qdio_irq *irq_ptr = cdev->private->qdio_data;
- struct qdio_q *q;
-
- if (!irq_ptr)
- return -ENODEV;
- q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr];
-
- return __qdio_inspect_queue(q, bufnr, error);
-}
-EXPORT_SYMBOL_GPL(qdio_inspect_queue);
-
/**
* qdio_stop_irq - disable interrupt processing for the device
* @cdev: associated ccw_device for the qdio subchannel
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index efbb5e5eca05..714878e2acc4 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -351,19 +351,18 @@ static void setup_qib(struct qdio_irq *irq_ptr,
sizeof(irq_ptr->qib.parm));
}
-int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
+void qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
{
struct ccw_device *cdev = irq_ptr->cdev;
- struct ciw *ciw;
irq_ptr->qdioac1 = 0;
- memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw));
memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
irq_ptr->debugfs_dev = NULL;
irq_ptr->sch_token = irq_ptr->perf_stat_enabled = 0;
irq_ptr->state = QDIO_IRQ_STATE_INACTIVE;
+ irq_ptr->error_handler = init_data->input_handler;
irq_ptr->int_parm = init_data->int_parm;
irq_ptr->nr_input_qs = init_data->no_input_qs;
@@ -386,23 +385,6 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
irq_ptr->orig_handler = cdev->handler;
cdev->handler = qdio_int_handler;
spin_unlock_irq(get_ccwdev_lock(cdev));
-
- /* get qdio commands */
- ciw = ccw_device_get_ciw(cdev, CIW_TYPE_EQUEUE);
- if (!ciw) {
- DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
- return -EINVAL;
- }
- irq_ptr->equeue = *ciw;
-
- ciw = ccw_device_get_ciw(cdev, CIW_TYPE_AQUEUE);
- if (!ciw) {
- DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
- return -EINVAL;
- }
- irq_ptr->aqueue = *ciw;
-
- return 0;
}
void qdio_shutdown_irq(struct qdio_irq *irq)
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 040742777095..ee182cfb467d 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -244,11 +244,6 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
if (ret)
goto out_disable;
- if (dev_get_uevent_suppress(&sch->dev)) {
- dev_set_uevent_suppress(&sch->dev, 0);
- kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
- }
-
VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
sch->schid.cssid, sch->schid.ssid,
sch->schid.sch_no);
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index 03311a476366..e043ae236630 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -17,6 +17,9 @@
#define VFIO_AP_ROOT_NAME "vfio_ap"
#define VFIO_AP_DEV_NAME "matrix"
+#define AP_QUEUE_ASSIGNED "assigned"
+#define AP_QUEUE_UNASSIGNED "unassigned"
+#define AP_QUEUE_IN_USE "in use"
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("VFIO AP device driver, Copyright IBM Corp. 2018");
@@ -41,26 +44,95 @@ static struct ap_device_id ap_queue_ids[] = {
MODULE_DEVICE_TABLE(vfio_ap, ap_queue_ids);
+static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+ unsigned long apid = AP_QID_CARD(q->apqn);
+ unsigned long apqi = AP_QID_QUEUE(q->apqn);
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ if (test_bit_inv(apid, matrix_mdev->matrix.apm) &&
+ test_bit_inv(apqi, matrix_mdev->matrix.aqm))
+ return matrix_mdev;
+ }
+
+ return NULL;
+}
+
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t nchars = 0;
+ struct vfio_ap_queue *q;
+ struct ap_matrix_mdev *matrix_mdev;
+ struct ap_device *apdev = to_ap_dev(dev);
+
+ mutex_lock(&matrix_dev->lock);
+ q = dev_get_drvdata(&apdev->device);
+ matrix_mdev = vfio_ap_mdev_for_queue(q);
+
+ if (matrix_mdev) {
+ if (matrix_mdev->kvm)
+ nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
+ AP_QUEUE_IN_USE);
+ else
+ nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
+ AP_QUEUE_ASSIGNED);
+ } else {
+ nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
+ AP_QUEUE_UNASSIGNED);
+ }
+
+ mutex_unlock(&matrix_dev->lock);
+
+ return nchars;
+}
+
+static DEVICE_ATTR_RO(status);
+
+static struct attribute *vfio_queue_attrs[] = {
+ &dev_attr_status.attr,
+ NULL,
+};
+
+static const struct attribute_group vfio_queue_attr_group = {
+ .attrs = vfio_queue_attrs,
+};
+
/**
* vfio_ap_queue_dev_probe: Allocate a vfio_ap_queue structure and associate it
* with the device as driver_data.
*
* @apdev: the AP device being probed
*
- * Return: returns 0 if the probe succeeded; otherwise, returns -ENOMEM if
- * storage could not be allocated for a vfio_ap_queue object.
+ * Return: returns 0 if the probe succeeded; otherwise, returns an error if
+ * storage could not be allocated for a vfio_ap_queue object or the
+ * sysfs 'status' attribute could not be created for the queue device.
*/
static int vfio_ap_queue_dev_probe(struct ap_device *apdev)
{
+ int ret;
struct vfio_ap_queue *q;
q = kzalloc(sizeof(*q), GFP_KERNEL);
if (!q)
return -ENOMEM;
+
+ mutex_lock(&matrix_dev->lock);
dev_set_drvdata(&apdev->device, q);
q->apqn = to_ap_queue(&apdev->device)->qid;
q->saved_isc = VFIO_AP_ISC_INVALID;
- return 0;
+
+ ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group);
+ if (ret) {
+ dev_set_drvdata(&apdev->device, NULL);
+ kfree(q);
+ }
+
+ mutex_unlock(&matrix_dev->lock);
+
+ return ret;
}
/**
@@ -75,6 +147,7 @@ static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
struct vfio_ap_queue *q;
mutex_lock(&matrix_dev->lock);
+ sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
q = dev_get_drvdata(&apdev->device);
vfio_ap_mdev_reset_queue(q, 1);
dev_set_drvdata(&apdev->device, NULL);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 4c3dcc435e83..9811ab81f3c4 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -878,14 +878,13 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
/*
* If a valid target domain is set and this domain is NOT a usage
- * domain but a control only domain, use the default domain as target.
+ * domain but a control only domain, autoselect target domain.
*/
tdom = *domain;
if (tdom < AP_DOMAINS &&
!ap_test_config_usage_domain(tdom) &&
- ap_test_config_ctrl_domain(tdom) &&
- ap_domain_index >= 0)
- tdom = ap_domain_index;
+ ap_test_config_ctrl_domain(tdom))
+ tdom = AUTOSEL_DOM;
pref_zc = NULL;
pref_zq = NULL;
diff --git a/drivers/s390/net/ctcm_dbug.h b/drivers/s390/net/ctcm_dbug.h
index 675575ef162e..cce11daf3245 100644
--- a/drivers/s390/net/ctcm_dbug.h
+++ b/drivers/s390/net/ctcm_dbug.h
@@ -65,6 +65,7 @@ extern struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS];
int ctcm_register_dbf_views(void);
void ctcm_unregister_dbf_views(void);
+__printf(3, 4)
void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *text, ...);
static inline const char *strtail(const char *s, int n)
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index de2423c72b02..5db591cf7215 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1406,7 +1406,7 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
if (new_skb == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
- "%s(%d): skb allocation failed",
+ "%s(%s): skb allocation failed",
CTCM_FUNTAIL, dev->name);
fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
goto again;
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 2a6479740600..a61d38a1b4ed 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1808,19 +1808,20 @@ lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
return;
}
/* What kind of frame is it? */
- if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL)
+ if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL) {
/* Control frame. */
lcs_get_control(card, (struct lcs_cmd *) lcs_hdr);
- else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET ||
- lcs_hdr->type == LCS_FRAME_TYPE_TR ||
- lcs_hdr->type == LCS_FRAME_TYPE_FDDI)
+ } else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET ||
+ lcs_hdr->type == LCS_FRAME_TYPE_TR ||
+ lcs_hdr->type == LCS_FRAME_TYPE_FDDI) {
/* Normal network packet. */
lcs_get_skb(card, (char *)(lcs_hdr + 1),
lcs_hdr->offset - offset -
sizeof(struct lcs_header));
- else
+ } else {
/* Unknown frame type. */
; // FIXME: error message ?
+ }
/* Proceed to next frame. */
offset = lcs_hdr->offset;
lcs_hdr->offset = LCS_ILLEGAL_OFFSET;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 20dca4c0384a..de25d7ac41da 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -1030,8 +1030,6 @@ static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card,
data, QETH_PROT_IPV6);
}
-int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb);
-
extern const struct qeth_discipline qeth_l2_discipline;
extern const struct qeth_discipline qeth_l3_discipline;
extern const struct ethtool_ops qeth_ethtool_ops;
@@ -1099,6 +1097,8 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count);
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
u8 cast_type, struct net_device *sb_dev);
+u16 qeth_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev);
int qeth_open(struct net_device *dev);
int qeth_stop(struct net_device *dev);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 26c55f67289f..29f0111f8e11 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -194,9 +194,6 @@ static void qeth_clear_working_pool_list(struct qeth_card *card)
&card->qdio.in_buf_pool.entry_list, list)
list_del(&pool_entry->list);
- if (!queue)
- return;
-
for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
queue->bufs[i].pool_entry = NULL;
}
@@ -275,8 +272,8 @@ int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
QETH_CARD_TEXT(card, 2, "realcbp");
- /* Defer until queue is allocated: */
- if (!card->qdio.in_q)
+ /* Defer until pool is allocated: */
+ if (list_empty(&pool->entry_list))
goto out;
/* Remove entries from the pool: */
@@ -355,8 +352,8 @@ static int qeth_cq_init(struct qeth_card *card)
qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
QDIO_MAX_BUFFERS_PER_Q);
card->qdio.c_q->next_buf_to_init = 127;
- rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 1, 0, 127,
- NULL);
+
+ rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 1, 0, 127);
if (rc) {
QETH_CARD_TEXT_(card, 2, "1err%d", rc);
goto out;
@@ -2557,14 +2554,9 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
return 0;
- QETH_CARD_TEXT(card, 2, "inq");
- card->qdio.in_q = qeth_alloc_qdio_queue();
- if (!card->qdio.in_q)
- goto out_nomem;
-
/* inbound buffer pool */
if (qeth_alloc_buffer_pool(card))
- goto out_freeinq;
+ goto out_buffer_pool;
/* outbound */
for (i = 0; i < card->qdio.no_out_queues; ++i) {
@@ -2605,10 +2597,7 @@ out_freeoutq:
card->qdio.out_qs[i] = NULL;
}
qeth_free_buffer_pool(card);
-out_freeinq:
- qeth_free_qdio_queue(card->qdio.in_q);
- card->qdio.in_q = NULL;
-out_nomem:
+out_buffer_pool:
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
return -ENOMEM;
}
@@ -2623,11 +2612,12 @@ static void qeth_free_qdio_queues(struct qeth_card *card)
qeth_free_cq(card);
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
- if (card->qdio.in_q->bufs[j].rx_skb)
+ if (card->qdio.in_q->bufs[j].rx_skb) {
consume_skb(card->qdio.in_q->bufs[j].rx_skb);
+ card->qdio.in_q->bufs[j].rx_skb = NULL;
+ }
}
- qeth_free_qdio_queue(card->qdio.in_q);
- card->qdio.in_q = NULL;
+
/* inbound buffer pool */
qeth_free_buffer_pool(card);
/* free outbound qdio_qs */
@@ -2926,8 +2916,7 @@ static int qeth_init_qdio_queues(struct qeth_card *card)
}
card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
- rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs,
- NULL);
+ rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 0, 0, rx_bufs);
if (rc) {
QETH_CARD_TEXT_(card, 2, "1err%d", rc);
return rc;
@@ -3415,8 +3404,9 @@ static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
return 0;
}
- rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
- queue->next_buf_to_init, count, NULL);
+ rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 0,
+ queue->next_buf_to_init,
+ count);
if (rc) {
QETH_CARD_TEXT(card, 2, "qinberr");
}
@@ -3588,8 +3578,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
}
QETH_TXQ_STAT_INC(queue, doorbell);
- rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_OUTPUT, queue->queue_no,
- index, count, aob);
+ rc = qdio_add_bufs_to_output_queue(CARD_DDEV(card), queue->queue_no,
+ index, count, aob);
switch (rc) {
case 0:
@@ -3645,12 +3635,10 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
!atomic_read(&queue->set_pci_flags_count)) {
unsigned int index, flush_cnt;
- bool q_was_packing;
spin_lock(&queue->lock);
index = queue->next_buf_to_fill;
- q_was_packing = queue->do_pack;
flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
@@ -3658,8 +3646,7 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
if (flush_cnt) {
qeth_flush_buffers(queue, index, flush_cnt);
- if (q_was_packing)
- QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
+ QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
}
spin_unlock(&queue->lock);
@@ -3739,8 +3726,8 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
}
qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
}
- rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
- cq->next_buf_to_init, count, NULL);
+ rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), queue,
+ cq->next_buf_to_init, count);
if (rc) {
dev_warn(&card->gdev->dev,
"QDIO reported an error, rc=%i\n", rc);
@@ -3779,7 +3766,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
/*
* Note: Function assumes that we have 4 outbound queues.
*/
-int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
+static int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
{
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
u8 tos;
@@ -3824,7 +3811,6 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
}
return card->qdio.default_out_queue;
}
-EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
/**
* qeth_get_elements_for_frags() - find number of SBALEs for skb frags.
@@ -5585,29 +5571,9 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
#endif
static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_hdr *hdr, bool uses_frags)
+ bool uses_frags, bool is_cso)
{
struct napi_struct *napi = &card->napi;
- bool is_cso;
-
- switch (hdr->hdr.l2.id) {
-#if IS_ENABLED(CONFIG_QETH_L3)
- case QETH_HEADER_TYPE_LAYER3:
- qeth_l3_rebuild_skb(card, skb, hdr);
- is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
- break;
-#endif
- case QETH_HEADER_TYPE_LAYER2:
- is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
- break;
- default:
- /* never happens */
- if (uses_frags)
- napi_free_frags(napi);
- else
- kfree_skb(skb);
- return;
- }
if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -5664,6 +5630,7 @@ static int qeth_extract_skb(struct qeth_card *card,
struct qeth_hdr *hdr;
struct sk_buff *skb;
int skb_len = 0;
+ bool is_cso;
element = &buffer->element[*element_no];
@@ -5683,11 +5650,15 @@ next_packet:
switch (hdr->hdr.l2.id) {
case QETH_HEADER_TYPE_LAYER2:
skb_len = hdr->hdr.l2.pkt_length;
+ is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
+
linear_len = ETH_HLEN;
headroom = 0;
break;
case QETH_HEADER_TYPE_LAYER3:
skb_len = hdr->hdr.l3.length;
+ is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
+
if (!IS_LAYER3(card)) {
QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
goto walk_packet;
@@ -5814,7 +5785,12 @@ walk_packet:
*element_no = element - &buffer->element[0];
*__offset = offset;
- qeth_receive_skb(card, skb, hdr, uses_frags);
+#if IS_ENABLED(CONFIG_QETH_L3)
+ if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER3)
+ qeth_l3_rebuild_skb(card, skb, hdr);
+#endif
+
+ qeth_receive_skb(card, skb, uses_frags, is_cso);
return 0;
}
@@ -5850,10 +5826,10 @@ static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
/* Fetch completed RX buffers: */
if (!card->rx.b_count) {
card->rx.qdio_err = 0;
- card->rx.b_count = qdio_inspect_queue(CARD_DDEV(card),
- 0, true,
- &card->rx.b_index,
- &card->rx.qdio_err);
+ card->rx.b_count =
+ qdio_inspect_input_queue(CARD_DDEV(card), 0,
+ &card->rx.b_index,
+ &card->rx.qdio_err);
if (card->rx.b_count <= 0) {
card->rx.b_count = 0;
break;
@@ -5900,8 +5876,8 @@ static void qeth_cq_poll(struct qeth_card *card)
unsigned int start, error;
int completed;
- completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start,
- &error);
+ completed = qdio_inspect_input_queue(CARD_DDEV(card), 1, &start,
+ &error);
if (completed <= 0)
return;
@@ -6038,8 +6014,8 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
return 0;
}
- completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
- &start, &error);
+ completed = qdio_inspect_output_queue(CARD_DDEV(card), queue_no,
+ &start, &error);
if (completed <= 0) {
/* Ensure we see TX completion for pending work: */
if (napi_complete_done(napi, 0) &&
@@ -6447,6 +6423,12 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
qeth_determine_capabilities(card);
qeth_set_blkt_defaults(card);
+ card->qdio.in_q = qeth_alloc_qdio_queue();
+ if (!card->qdio.in_q) {
+ rc = -ENOMEM;
+ goto err_rx_queue;
+ }
+
card->qdio.no_out_queues = card->dev->num_tx_queues;
rc = qeth_update_from_chp_desc(card);
if (rc)
@@ -6473,6 +6455,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
err_setup_disc:
err_chp_desc:
+ qeth_free_qdio_queue(card->qdio.in_q);
+err_rx_queue:
free_netdev(card->dev);
err_card:
qeth_core_free_card(card);
@@ -6494,6 +6478,7 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
qeth_free_qdio_queues(card);
+ qeth_free_qdio_queue(card->qdio.in_q);
free_netdev(card->dev);
qeth_core_free_card(card);
put_device(&gdev->dev);
@@ -7089,6 +7074,18 @@ u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
+u16 qeth_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ if (qeth_uses_tx_prio_queueing(card))
+ return qeth_get_priority_queue(card, skb);
+
+ return netdev_pick_tx(dev, skb, sb_dev);
+}
+EXPORT_SYMBOL_GPL(qeth_osa_select_queue);
+
int qeth_open(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index 46d0fe0d0e8a..b0b36b2132fe 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -144,7 +144,9 @@ static int qeth_set_coalesce(struct net_device *dev,
}
static void qeth_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct qeth_card *card = dev->ml_priv;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 0347fc184786..303461d70af3 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -519,19 +519,11 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static u16 qeth_l2_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev)
+static u16 qeth_l2_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
{
- struct qeth_card *card = dev->ml_priv;
-
- if (IS_IQD(card))
- return qeth_iqd_select_queue(dev, skb,
- qeth_get_ether_cast_type(skb),
- sb_dev);
- if (qeth_uses_tx_prio_queueing(card))
- return qeth_get_priority_queue(card, skb);
-
- return netdev_pick_tx(dev, skb, sb_dev);
+ return qeth_iqd_select_queue(dev, skb, qeth_get_ether_cast_type(skb),
+ sb_dev);
}
static void qeth_l2_set_rx_mode(struct net_device *dev)
@@ -726,7 +718,8 @@ struct qeth_l2_br2dev_event_work {
unsigned char addr[ETH_ALEN];
};
-static const struct net_device_ops qeth_l2_netdev_ops;
+static const struct net_device_ops qeth_l2_iqd_netdev_ops;
+static const struct net_device_ops qeth_l2_osa_netdev_ops;
static bool qeth_l2_must_learn(struct net_device *netdev,
struct net_device *dstdev)
@@ -738,7 +731,8 @@ static bool qeth_l2_must_learn(struct net_device *netdev,
(priv->brport_features & BR_LEARNING_SYNC) &&
!(br_port_flag_is_set(netdev, BR_ISOLATED) &&
br_port_flag_is_set(dstdev, BR_ISOLATED)) &&
- netdev->netdev_ops == &qeth_l2_netdev_ops);
+ (netdev->netdev_ops == &qeth_l2_iqd_netdev_ops ||
+ netdev->netdev_ops == &qeth_l2_osa_netdev_ops));
}
/**
@@ -1051,20 +1045,20 @@ static int qeth_l2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
return rc;
}
-static const struct net_device_ops qeth_l2_netdev_ops = {
+static const struct net_device_ops qeth_l2_iqd_netdev_ops = {
.ndo_open = qeth_open,
.ndo_stop = qeth_stop,
.ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l2_hard_start_xmit,
.ndo_features_check = qeth_features_check,
- .ndo_select_queue = qeth_l2_select_queue,
+ .ndo_select_queue = qeth_l2_iqd_select_queue,
.ndo_validate_addr = qeth_l2_validate_addr,
.ndo_set_rx_mode = qeth_l2_set_rx_mode,
.ndo_eth_ioctl = qeth_do_ioctl,
.ndo_siocdevprivate = qeth_siocdevprivate,
- .ndo_set_mac_address = qeth_l2_set_mac_address,
+ .ndo_set_mac_address = qeth_l2_set_mac_address,
.ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
+ .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
.ndo_tx_timeout = qeth_tx_timeout,
.ndo_fix_features = qeth_fix_features,
.ndo_set_features = qeth_set_features,
@@ -1072,10 +1066,30 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_bridge_setlink = qeth_l2_bridge_setlink,
};
+static const struct net_device_ops qeth_l2_osa_netdev_ops = {
+ .ndo_open = qeth_open,
+ .ndo_stop = qeth_stop,
+ .ndo_get_stats64 = qeth_get_stats64,
+ .ndo_start_xmit = qeth_l2_hard_start_xmit,
+ .ndo_features_check = qeth_features_check,
+ .ndo_select_queue = qeth_osa_select_queue,
+ .ndo_validate_addr = qeth_l2_validate_addr,
+ .ndo_set_rx_mode = qeth_l2_set_rx_mode,
+ .ndo_eth_ioctl = qeth_do_ioctl,
+ .ndo_siocdevprivate = qeth_siocdevprivate,
+ .ndo_set_mac_address = qeth_l2_set_mac_address,
+ .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
+ .ndo_tx_timeout = qeth_tx_timeout,
+ .ndo_fix_features = qeth_fix_features,
+ .ndo_set_features = qeth_set_features,
+};
+
static int qeth_l2_setup_netdev(struct qeth_card *card)
{
+ card->dev->netdev_ops = IS_IQD(card) ? &qeth_l2_iqd_netdev_ops :
+ &qeth_l2_osa_netdev_ops;
card->dev->needed_headroom = sizeof(struct qeth_hdr);
- card->dev->netdev_ops = &qeth_l2_netdev_ops;
card->dev->priv_flags |= IFF_UNICAST_FLT;
if (IS_OSM(card)) {
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 48a886f7af62..9251ad276ee8 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1822,17 +1822,6 @@ static u16 qeth_l3_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
qeth_l3_get_cast_type(skb, proto), sb_dev);
}
-static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev)
-{
- struct qeth_card *card = dev->ml_priv;
-
- if (qeth_uses_tx_prio_queueing(card))
- return qeth_get_priority_queue(card, skb);
-
- return netdev_pick_tx(dev, skb, sb_dev);
-}
-
static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_open = qeth_open,
.ndo_stop = qeth_stop,
@@ -1854,7 +1843,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_features_check = qeth_l3_osa_features_check,
- .ndo_select_queue = qeth_l3_osa_select_queue,
+ .ndo_select_queue = qeth_osa_select_queue,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_eth_ioctl = qeth_do_ioctl,
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index d24cafe02708..511bf8e0a436 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -521,6 +521,8 @@ static void zfcp_fc_adisc_handler(void *data)
goto out;
}
+ /* re-init to undo drop from zfcp_fc_adisc() */
+ port->d_id = ntoh24(adisc_resp->adisc_port_id);
/* port is good, unblock rport without going through erp */
zfcp_scsi_schedule_rport_register(port);
out:
@@ -534,6 +536,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter;
struct Scsi_Host *shost = adapter->scsi_host;
+ u32 d_id;
int ret;
fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
@@ -558,7 +561,15 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
- ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
+ d_id = port->d_id; /* remember as destination for send els below */
+ /*
+ * Force fresh GID_PN lookup on next port recovery.
+ * Must happen after request setup and before sending request,
+ * to prevent race with port->d_id re-init in zfcp_fc_adisc_handler().
+ */
+ port->d_id = 0;
+
+ ret = zfcp_fsf_send_els(adapter, d_id, &fc_req->ct_els,
ZFCP_FC_CTELS_TMO);
if (ret)
kmem_cache_free(zfcp_fc_req_cache, fc_req);
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 6a2720105138..f54f506b02d6 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -79,7 +79,7 @@ static void zfcp_qdio_request_tasklet(struct tasklet_struct *tasklet)
unsigned int start, error;
int completed;
- completed = qdio_inspect_queue(cdev, 0, false, &start, &error);
+ completed = qdio_inspect_output_queue(cdev, 0, &start, &error);
if (completed > 0) {
if (error) {
zfcp_qdio_handler_error(qdio, "qdreqt1", error);
@@ -154,7 +154,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
/*
* put SBALs back to response queue
*/
- if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count, NULL))
+ if (qdio_add_bufs_to_input_queue(cdev, 0, idx, count))
zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
}
@@ -169,7 +169,7 @@ static void zfcp_qdio_irq_tasklet(struct tasklet_struct *tasklet)
tasklet_schedule(&qdio->request_tasklet);
/* Check the Response Queue: */
- completed = qdio_inspect_queue(cdev, 0, true, &start, &error);
+ completed = qdio_inspect_input_queue(cdev, 0, &start, &error);
if (completed < 0)
return;
if (completed > 0)
@@ -326,8 +326,9 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
atomic_sub(sbal_number, &qdio->req_q_free);
- retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
- q_req->sbal_first, sbal_number, NULL);
+ retval = qdio_add_bufs_to_output_queue(qdio->adapter->ccw_device, 0,
+ q_req->sbal_first, sbal_number,
+ NULL);
if (unlikely(retval)) {
/* Failed to submit the IO, roll back our modifications. */
@@ -395,7 +396,10 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return;
- /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
+ /*
+ * Clear QDIOUP flag, thus qdio_add_bufs_to_output_queue() is not called
+ * during qdio_shutdown().
+ */
spin_lock_irq(&qdio->req_q_lock);
atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
spin_unlock_irq(&qdio->req_q_lock);
@@ -498,8 +502,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
sbale->addr = 0;
}
- if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q,
- NULL))
+ if (qdio_add_bufs_to_input_queue(cdev, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
goto failed_qdio;
/* set index of first available SBALS / number of available SBALS */
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index b9482da79512..3ebe66151dcb 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1567,8 +1567,6 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_try_set_mwi(pdev);
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (retval)
- retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
retval = -ENODEV;
@@ -1786,8 +1784,6 @@ static int __maybe_unused twl_resume(struct device *dev)
pci_try_set_mwi(pdev);
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (retval)
- retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
retval = -ENODEV;
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 3ad3ebaca8e9..ad4972c0fc53 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -1507,7 +1507,6 @@ NCR_700_intr(int irq, void *dev_id)
struct scsi_cmnd *SCp = hostdata->cmd;
handled = 1;
- SCp = hostdata->cmd;
if(istat & SCSI_INT_PENDING) {
udelay(10);
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 564ade03b530..d02eb5b213d0 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -904,13 +904,11 @@ static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struc
/**
* inia100_queue_lck - queue command with host
* @cmd: Command block
- * @done: Completion function
*
* Called by the mid layer to queue a command. Process the command
* block, build the host specific scb structures and if there is room
* queue the command down to the controller
*/
-
static int inia100_queue_lck(struct scsi_cmnd *cmd)
{
struct orc_scb *scb;
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 59f6b7b2a70a..b04d039da276 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -271,7 +271,7 @@ MODULE_PARM_DESC(msi, "IRQ handling."
" 0=PIC(default), 1=MSI, 2=MSI-X)");
module_param(startup_timeout, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for"
- " adapter to have it's kernel up and\n"
+ " adapter to have its kernel up and\n"
"running. This is typically adjusted for large systems that do not"
" have a BIOS.");
module_param(aif_timeout, int, S_IRUGO|S_IWUSR);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 5d566d2b2997..928099163f0f 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -755,11 +755,7 @@ ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
static int
ahd_linux_abort(struct scsi_cmnd *cmd)
{
- int error;
-
- error = ahd_linux_queue_abort_cmd(cmd);
-
- return error;
+ return ahd_linux_queue_abort_cmd(cmd);
}
/*
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index dcd6fae65a88..7143418d690f 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -614,7 +614,6 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
/**
* atp870u_queuecommand_lck - Queue SCSI command
* @req_p: request block
- * @done: completion function
*
* Queue a command to the ATP queue. Called with the host lock held.
*/
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 440ef32be048..e5aa982ffedc 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -732,9 +732,6 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
pci_set_master(pdev);
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (rc)
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
-
if (rc) {
rc = -ENODEV;
printk(KERN_ERR "dma_set_mask_and_coherent fail %p\n", pdev);
@@ -1560,9 +1557,6 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64));
if (rc)
- rc = dma_set_mask_and_coherent(&bfad->pcidev->dev,
- DMA_BIT_MASK(32));
- if (rc)
goto out_disable_device;
if (restart_bfa(bfad) == -1)
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index c8b947c16069..f46989bd083c 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -981,7 +981,7 @@ const struct attribute_group *bfad_im_host_groups[] = {
NULL
};
-struct attribute *bfad_im_vport_attrs[] = {
+static struct attribute *bfad_im_vport_attrs[] = {
&dev_attr_serial_number.attr,
&dev_attr_model.attr,
&dev_attr_model_description.attr,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 71fa62bd3083..9be273c320e2 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -82,7 +82,7 @@ static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
struct device *parent, int npiv);
-static void bnx2fc_destroy_work(struct work_struct *work);
+static void bnx2fc_port_destroy(struct fcoe_port *port);
static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
@@ -907,9 +907,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
__bnx2fc_destroy(interface);
}
mutex_unlock(&bnx2fc_dev_lock);
-
- /* Ensure ALL destroy work has been completed before return */
- flush_workqueue(bnx2fc_wq);
return;
default:
@@ -1215,8 +1212,8 @@ static int bnx2fc_vport_destroy(struct fc_vport *vport)
mutex_unlock(&n_port->lp_mutex);
bnx2fc_free_vport(interface->hba, port->lport);
bnx2fc_port_shutdown(port->lport);
+ bnx2fc_port_destroy(port);
bnx2fc_interface_put(interface);
- queue_work(bnx2fc_wq, &port->destroy_work);
return 0;
}
@@ -1525,7 +1522,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
port->lport = lport;
port->priv = interface;
port->get_netdev = bnx2fc_netdev;
- INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
/* Configure fcoe_port */
rc = bnx2fc_lport_config(lport);
@@ -1653,8 +1649,8 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
bnx2fc_interface_cleanup(interface);
bnx2fc_stop(interface);
list_del(&interface->list);
+ bnx2fc_port_destroy(port);
bnx2fc_interface_put(interface);
- queue_work(bnx2fc_wq, &port->destroy_work);
}
/**
@@ -1694,15 +1690,12 @@ netdev_err:
return rc;
}
-static void bnx2fc_destroy_work(struct work_struct *work)
+static void bnx2fc_port_destroy(struct fcoe_port *port)
{
- struct fcoe_port *port;
struct fc_lport *lport;
- port = container_of(work, struct fcoe_port, destroy_work);
lport = port->lport;
-
- BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
+ BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, lport);
bnx2fc_if_destroy(lport);
}
@@ -2556,9 +2549,6 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
__bnx2fc_destroy(interface);
mutex_unlock(&bnx2fc_dev_lock);
- /* Ensure ALL destroy work has been completed before return */
- flush_workqueue(bnx2fc_wq);
-
bnx2fc_ulp_stop(hba);
/* unregister cnic device */
if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 27012908b586..908854869864 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -239,7 +239,7 @@ ch_read_element_status(scsi_changer *ch, u_int elem, char *data)
u_char *buffer;
int result;
- buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kmalloc(512, GFP_KERNEL);
if(!buffer)
return -ENOMEM;
@@ -297,7 +297,7 @@ ch_readconfig(scsi_changer *ch)
int result,id,lun,i;
u_int elem;
- buffer = kzalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kzalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -783,7 +783,7 @@ static long ch_ioctl(struct file *file,
return -EINVAL;
elem = ch->firsts[cge.cge_type] + cge.cge_unit;
- buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kmalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
mutex_lock(&ch->lock);
@@ -877,7 +877,7 @@ static long ch_ioctl(struct file *file,
}
default:
- return scsi_ioctl(ch->device, NULL, file->f_mode, cmd, argp);
+ return scsi_ioctl(ch->device, file->f_mode, cmd, argp);
}
}
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 9b8796c9e634..c11916b8ae00 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -946,7 +946,6 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
* layer, invoke 'done' on completion
*
* @cmd: pointer to scsi command object
- * @done: function pointer to be invoked on completion
*
* Returns 1 if the adapter (host) is busy, else returns 0. One
* reason for an adapter to be busy is that the number
@@ -959,7 +958,7 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
* Locks: struct Scsi_Host::host_lock held on entry (with "irqsave")
* and is expected to be held on return.
*
- **/
+ */
static int dc395x_queue_command_lck(struct scsi_cmnd *cmd)
{
void (*done)(struct scsi_cmnd *) = scsi_done;
diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c
index b2b61bc45f12..b08fc8839808 100644
--- a/drivers/scsi/elx/efct/efct_driver.c
+++ b/drivers/scsi/elx/efct/efct_driver.c
@@ -261,7 +261,7 @@ efct_firmware_write(struct efct *efct, const u8 *buf, size_t buf_len,
dma.size = FW_WRITE_BUFSIZE;
dma.virt = dma_alloc_coherent(&efct->pci->dev,
- dma.size, &dma.phys, GFP_DMA);
+ dma.size, &dma.phys, GFP_KERNEL);
if (!dma.virt)
return -ENOMEM;
@@ -541,13 +541,10 @@ efct_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, efct);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
- dev_warn(&pdev->dev, "trying DMA_BIT_MASK(32)\n");
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
- dev_err(&pdev->dev, "setting DMA_BIT_MASK failed\n");
- rc = -1;
- goto dma_mask_out;
- }
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ dev_err(&pdev->dev, "setting DMA_BIT_MASK failed\n");
+ goto dma_mask_out;
}
num_interrupts = efct_device_interrupts_required(efct);
diff --git a/drivers/scsi/elx/efct/efct_hw.c b/drivers/scsi/elx/efct/efct_hw.c
index ba8256b4c782..d4bb37960a3c 100644
--- a/drivers/scsi/elx/efct/efct_hw.c
+++ b/drivers/scsi/elx/efct/efct_hw.c
@@ -516,7 +516,7 @@ efct_hw_setup_io(struct efct_hw *hw)
dma = &hw->xfer_rdy;
dma->size = sizeof(struct fcp_txrdy) * hw->config.n_io;
dma->virt = dma_alloc_coherent(&efct->pci->dev,
- dma->size, &dma->phys, GFP_DMA);
+ dma->size, &dma->phys, GFP_KERNEL);
if (!dma->virt)
return -ENOMEM;
}
@@ -562,7 +562,7 @@ efct_hw_setup_io(struct efct_hw *hw)
sizeof(struct sli4_sge);
dma->virt = dma_alloc_coherent(&efct->pci->dev,
dma->size, &dma->phys,
- GFP_DMA);
+ GFP_KERNEL);
if (!dma->virt) {
efc_log_err(hw->os, "dma_alloc fail %d\n", i);
memset(&io->def_sgl, 0,
@@ -618,7 +618,7 @@ efct_hw_init_prereg_io(struct efct_hw *hw)
memset(&req, 0, sizeof(struct efc_dma));
req.size = 32 + sgls_per_request * 16;
req.virt = dma_alloc_coherent(&efct->pci->dev, req.size, &req.phys,
- GFP_DMA);
+ GFP_KERNEL);
if (!req.virt) {
kfree(sgls);
return -ENOMEM;
@@ -1063,7 +1063,7 @@ efct_hw_init(struct efct_hw *hw)
dma = &hw->loop_map;
dma->size = SLI4_MIN_LOOP_MAP_BYTES;
dma->virt = dma_alloc_coherent(&hw->os->pci->dev, dma->size, &dma->phys,
- GFP_DMA);
+ GFP_KERNEL);
if (!dma->virt)
return -EIO;
@@ -1192,7 +1192,7 @@ efct_hw_rx_buffer_alloc(struct efct_hw *hw, u32 rqindex, u32 count,
prq->dma.virt = dma_alloc_coherent(&efct->pci->dev,
prq->dma.size,
&prq->dma.phys,
- GFP_DMA);
+ GFP_KERNEL);
if (!prq->dma.virt) {
efc_log_err(hw->os, "DMA allocation failed\n");
kfree(rq_buf);
diff --git a/drivers/scsi/elx/efct/efct_io.c b/drivers/scsi/elx/efct/efct_io.c
index 71e21655916a..c3247b951a76 100644
--- a/drivers/scsi/elx/efct/efct_io.c
+++ b/drivers/scsi/elx/efct/efct_io.c
@@ -48,7 +48,7 @@ efct_io_pool_create(struct efct *efct, u32 num_sgl)
io->rspbuf.size = SCSI_RSP_BUF_LENGTH;
io->rspbuf.virt = dma_alloc_coherent(&efct->pci->dev,
io->rspbuf.size,
- &io->rspbuf.phys, GFP_DMA);
+ &io->rspbuf.phys, GFP_KERNEL);
if (!io->rspbuf.virt) {
efc_log_err(efct, "dma_alloc rspbuf failed\n");
efct_io_pool_free(io_pool);
diff --git a/drivers/scsi/elx/libefc/efc_cmds.c b/drivers/scsi/elx/libefc/efc_cmds.c
index f8665d48904a..da4ac8a4ce12 100644
--- a/drivers/scsi/elx/libefc/efc_cmds.c
+++ b/drivers/scsi/elx/libefc/efc_cmds.c
@@ -179,7 +179,7 @@ efc_nport_alloc_read_sparm64(struct efc *efc, struct efc_nport *nport)
nport->dma.size = EFC_SPARAM_DMA_SZ;
nport->dma.virt = dma_alloc_coherent(&efc->pci->dev,
nport->dma.size, &nport->dma.phys,
- GFP_DMA);
+ GFP_KERNEL);
if (!nport->dma.virt) {
efc_log_err(efc, "Failed to allocate DMA memory\n");
efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
@@ -466,7 +466,7 @@ efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf)
domain->dma.size = EFC_SPARAM_DMA_SZ;
domain->dma.virt = dma_alloc_coherent(&efc->pci->dev,
domain->dma.size,
- &domain->dma.phys, GFP_DMA);
+ &domain->dma.phys, GFP_KERNEL);
if (!domain->dma.virt) {
efc_log_err(efc, "Failed to allocate DMA memory\n");
return -EIO;
diff --git a/drivers/scsi/elx/libefc/efc_els.c b/drivers/scsi/elx/libefc/efc_els.c
index 24db0accb256..84bc81d7ce76 100644
--- a/drivers/scsi/elx/libefc/efc_els.c
+++ b/drivers/scsi/elx/libefc/efc_els.c
@@ -46,18 +46,14 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
efc = node->efc;
- spin_lock_irqsave(&node->els_ios_lock, flags);
-
if (!node->els_io_enabled) {
efc_log_err(efc, "els io alloc disabled\n");
- spin_unlock_irqrestore(&node->els_ios_lock, flags);
return NULL;
}
els = mempool_alloc(efc->els_io_pool, GFP_ATOMIC);
if (!els) {
atomic_add_return(1, &efc->els_io_alloc_failed_count);
- spin_unlock_irqrestore(&node->els_ios_lock, flags);
return NULL;
}
@@ -71,16 +67,15 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
/* now allocate DMA for request and response */
els->io.req.size = reqlen;
els->io.req.virt = dma_alloc_coherent(&efc->pci->dev, els->io.req.size,
- &els->io.req.phys, GFP_DMA);
+ &els->io.req.phys, GFP_KERNEL);
if (!els->io.req.virt) {
mempool_free(els, efc->els_io_pool);
- spin_unlock_irqrestore(&node->els_ios_lock, flags);
return NULL;
}
els->io.rsp.size = rsplen;
els->io.rsp.virt = dma_alloc_coherent(&efc->pci->dev, els->io.rsp.size,
- &els->io.rsp.phys, GFP_DMA);
+ &els->io.rsp.phys, GFP_KERNEL);
if (!els->io.rsp.virt) {
dma_free_coherent(&efc->pci->dev, els->io.req.size,
els->io.req.virt, els->io.req.phys);
@@ -94,10 +89,11 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
/* add els structure to ELS IO list */
INIT_LIST_HEAD(&els->list_entry);
+ spin_lock_irqsave(&node->els_ios_lock, flags);
list_add_tail(&els->list_entry, &node->els_ios_list);
+ spin_unlock_irqrestore(&node->els_ios_lock, flags);
}
- spin_unlock_irqrestore(&node->els_ios_lock, flags);
return els;
}
diff --git a/drivers/scsi/elx/libefc_sli/sli4.c b/drivers/scsi/elx/libefc_sli/sli4.c
index 907d67aeac23..3ea57bd6fb0a 100644
--- a/drivers/scsi/elx/libefc_sli/sli4.c
+++ b/drivers/scsi/elx/libefc_sli/sli4.c
@@ -445,7 +445,7 @@ sli_cmd_rq_create_v2(struct sli4 *sli4, u32 num_rqs,
dma->size = payload_size;
dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
- &dma->phys, GFP_DMA);
+ &dma->phys, GFP_KERNEL);
if (!dma->virt)
return -EIO;
@@ -508,7 +508,7 @@ __sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype,
q->dma.size = size * n_entries;
q->dma.virt = dma_alloc_coherent(&sli4->pci->dev, q->dma.size,
- &q->dma.phys, GFP_DMA);
+ &q->dma.phys, GFP_KERNEL);
if (!q->dma.virt) {
memset(&q->dma, 0, sizeof(struct efc_dma));
efc_log_err(sli4, "%s allocation failed\n", SLI4_QNAME[qtype]);
@@ -849,7 +849,7 @@ static int sli_cmd_cq_set_create(struct sli4 *sli4,
dma->size = payload_size;
dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
- &dma->phys, GFP_DMA);
+ &dma->phys, GFP_KERNEL);
if (!dma->virt)
return -EIO;
@@ -4413,7 +4413,7 @@ sli_get_ctrl_attributes(struct sli4 *sli4)
psize = sizeof(struct sli4_rsp_cmn_get_cntl_addl_attributes);
data.size = psize;
data.virt = dma_alloc_coherent(&sli4->pci->dev, data.size,
- &data.phys, GFP_DMA);
+ &data.phys, GFP_KERNEL);
if (!data.virt) {
memset(&data, 0, sizeof(struct efc_dma));
efc_log_err(sli4, "Failed to allocate memory for GET_CNTL_ADDL_ATTR\n");
@@ -4653,7 +4653,7 @@ sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev,
*/
sli4->bmbx.size = SLI4_BMBX_SIZE + sizeof(struct sli4_mcqe);
sli4->bmbx.virt = dma_alloc_coherent(&pdev->dev, sli4->bmbx.size,
- &sli4->bmbx.phys, GFP_DMA);
+ &sli4->bmbx.phys, GFP_KERNEL);
if (!sli4->bmbx.virt) {
memset(&sli4->bmbx, 0, sizeof(struct efc_dma));
efc_log_err(sli4, "bootstrap mailbox allocation failed\n");
@@ -4674,7 +4674,7 @@ sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev,
sli4->vpd_data.virt = dma_alloc_coherent(&pdev->dev,
sli4->vpd_data.size,
&sli4->vpd_data.phys,
- GFP_DMA);
+ GFP_KERNEL);
if (!sli4->vpd_data.virt) {
memset(&sli4->vpd_data, 0, sizeof(struct efc_dma));
/* Note that failure isn't fatal in this specific case */
@@ -5070,7 +5070,7 @@ sli_cmd_post_hdr_templates(struct sli4 *sli4, void *buf, struct efc_dma *dma,
payload_dma->size = payload_size;
payload_dma->virt = dma_alloc_coherent(&sli4->pci->dev,
payload_dma->size,
- &payload_dma->phys, GFP_DMA);
+ &payload_dma->phys, GFP_KERNEL);
if (!payload_dma->virt) {
memset(payload_dma, 0, sizeof(struct efc_dma));
efc_log_err(sli4, "mbox payload memory allocation fail\n");
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 2213a91923a5..15a58c955516 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -8,7 +8,6 @@
#define _HISI_SAS_H_
#include <linux/acpi.h>
-#include <linux/async.h>
#include <linux/blk-mq.h>
#include <linux/blk-mq-pci.h>
#include <linux/clk.h>
@@ -134,6 +133,11 @@ struct hisi_sas_rst {
bool done;
};
+struct hisi_sas_internal_abort {
+ unsigned int flag;
+ unsigned int tag;
+};
+
#define HISI_SAS_RST_WORK_INIT(r, c) \
{ .hisi_hba = hisi_hba, \
.completion = &c, \
@@ -154,6 +158,7 @@ enum hisi_sas_bit_err_type {
enum hisi_sas_phy_event {
HISI_PHYE_PHY_UP = 0U,
HISI_PHYE_LINK_RESET,
+ HISI_PHYE_PHY_UP_PM,
HISI_PHYES_NUM,
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index f206c433de32..2f53a2ee024a 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -158,7 +158,7 @@ static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
{
void *bitmap = hisi_hba->slot_index_tags;
- clear_bit(slot_idx, bitmap);
+ __clear_bit(slot_idx, bitmap);
}
static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
@@ -175,7 +175,7 @@ static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
{
void *bitmap = hisi_hba->slot_index_tags;
- set_bit(slot_idx, bitmap);
+ __set_bit(slot_idx, bitmap);
}
static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
@@ -206,14 +206,6 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
return index;
}
-static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
-{
- int i;
-
- for (i = 0; i < hisi_hba->slot_index_count; ++i)
- hisi_sas_slot_index_clear(hisi_hba, i);
-}
-
void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
struct hisi_sas_slot *slot)
{
@@ -273,11 +265,11 @@ static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
}
static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot,
- int device_id, int abort_flag, int tag_to_abort)
+ struct hisi_sas_internal_abort *abort,
+ struct hisi_sas_slot *slot, int device_id)
{
hisi_hba->hw->prep_abort(hisi_hba, slot,
- device_id, abort_flag, tag_to_abort);
+ device_id, abort->flag, abort->tag);
}
static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
@@ -403,95 +395,20 @@ err_out_dif_dma_unmap:
return rc;
}
-static int hisi_sas_task_prep(struct sas_task *task,
- struct hisi_sas_dq **dq_pointer,
- bool is_tmf, struct hisi_sas_tmf_task *tmf,
- int *pass)
+static
+void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot,
+ struct hisi_sas_dq *dq,
+ struct hisi_sas_device *sas_dev,
+ struct hisi_sas_internal_abort *abort,
+ struct hisi_sas_tmf_task *tmf)
{
- struct domain_device *device = task->dev;
- struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
- struct hisi_sas_device *sas_dev = device->lldd_dev;
- struct hisi_sas_port *port;
- struct hisi_sas_slot *slot;
- struct hisi_sas_cmd_hdr *cmd_hdr_base;
- struct asd_sas_port *sas_port = device->port;
- struct device *dev = hisi_hba->dev;
- int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
- int n_elem = 0, n_elem_dif = 0, n_elem_req = 0;
- struct scsi_cmnd *scmd = NULL;
- struct hisi_sas_dq *dq;
+ struct hisi_sas_cmd_hdr *cmd_hdr_base;
+ int dlvry_queue_slot, dlvry_queue;
+ struct sas_task *task = slot->task;
unsigned long flags;
int wr_q_index;
- if (DEV_IS_GONE(sas_dev)) {
- if (sas_dev)
- dev_info(dev, "task prep: device %d not ready\n",
- sas_dev->device_id);
- else
- dev_info(dev, "task prep: device %016llx not ready\n",
- SAS_ADDR(device->sas_addr));
-
- return -ECOMM;
- }
-
- if (task->uldd_task) {
- struct ata_queued_cmd *qc;
-
- if (dev_is_sata(device)) {
- qc = task->uldd_task;
- scmd = qc->scsicmd;
- } else {
- scmd = task->uldd_task;
- }
- }
-
- if (scmd) {
- unsigned int dq_index;
- u32 blk_tag;
-
- blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
- dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
- *dq_pointer = dq = &hisi_hba->dq[dq_index];
- } else {
- struct Scsi_Host *shost = hisi_hba->shost;
- struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
- int queue = qmap->mq_map[raw_smp_processor_id()];
-
- *dq_pointer = dq = &hisi_hba->dq[queue];
- }
-
- port = to_hisi_sas_port(sas_port);
- if (port && !port->port_attached) {
- dev_info(dev, "task prep: %s port%d not attach device\n",
- (dev_is_sata(device)) ?
- "SATA/STP" : "SAS",
- device->port->id);
-
- return -ECOMM;
- }
-
- rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
- &n_elem_req);
- if (rc < 0)
- goto prep_out;
-
- if (!sas_protocol_ata(task->task_proto)) {
- rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task);
- if (rc < 0)
- goto err_out_dma_unmap;
- }
-
- if (hisi_hba->hw->slot_index_alloc)
- rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
- else
- rc = hisi_sas_slot_index_alloc(hisi_hba, scmd);
-
- if (rc < 0)
- goto err_out_dif_dma_unmap;
-
- slot_idx = rc;
- slot = &hisi_hba->slot_info[slot_idx];
-
spin_lock(&dq->lock);
wr_q_index = dq->wr_point;
dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
@@ -505,16 +422,13 @@ static int hisi_sas_task_prep(struct sas_task *task,
dlvry_queue_slot = wr_q_index;
slot->device_id = sas_dev->device_id;
- slot->n_elem = n_elem;
- slot->n_elem_dif = n_elem_dif;
slot->dlvry_queue = dlvry_queue;
slot->dlvry_queue_slot = dlvry_queue_slot;
cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
- slot->task = task;
- slot->port = port;
+
slot->tmf = tmf;
- slot->is_internal = is_tmf;
+ slot->is_internal = tmf;
task->lldd_task = slot;
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
@@ -534,8 +448,14 @@ static int hisi_sas_task_prep(struct sas_task *task,
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
hisi_sas_task_prep_ata(hisi_hba, slot);
break;
+ case SAS_PROTOCOL_NONE:
+ if (abort) {
+ hisi_sas_task_prep_abort(hisi_hba, abort, slot, sas_dev->device_id);
+ break;
+ }
+ fallthrough;
default:
- dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
+ dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n",
task->task_proto);
break;
}
@@ -544,32 +464,27 @@ static int hisi_sas_task_prep(struct sas_task *task,
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
- ++(*pass);
WRITE_ONCE(slot->ready, 1);
- return 0;
-
-err_out_dif_dma_unmap:
- if (!sas_protocol_ata(task->task_proto))
- hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
-err_out_dma_unmap:
- hisi_sas_dma_unmap(hisi_hba, task, n_elem,
- n_elem_req);
-prep_out:
- dev_err(dev, "task prep: failed[%d]!\n", rc);
- return rc;
+ spin_lock(&dq->lock);
+ hisi_hba->hw->start_delivery(dq);
+ spin_unlock(&dq->lock);
}
static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
- bool is_tmf, struct hisi_sas_tmf_task *tmf)
+ struct hisi_sas_tmf_task *tmf)
{
- u32 rc;
- u32 pass = 0;
- struct hisi_hba *hisi_hba;
- struct device *dev;
+ int n_elem = 0, n_elem_dif = 0, n_elem_req = 0;
struct domain_device *device = task->dev;
struct asd_sas_port *sas_port = device->port;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ struct scsi_cmnd *scmd = NULL;
struct hisi_sas_dq *dq = NULL;
+ struct hisi_sas_port *port;
+ struct hisi_hba *hisi_hba;
+ struct hisi_sas_slot *slot;
+ struct device *dev;
+ int rc;
if (!sas_port) {
struct task_status_struct *ts = &task->task_status;
@@ -596,17 +511,94 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
up(&hisi_hba->sem);
}
- /* protect task_prep and start_delivery sequence */
- rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
- if (rc)
- dev_err(dev, "task exec: failed[%d]!\n", rc);
+ if (DEV_IS_GONE(sas_dev)) {
+ if (sas_dev)
+ dev_info(dev, "task prep: device %d not ready\n",
+ sas_dev->device_id);
+ else
+ dev_info(dev, "task prep: device %016llx not ready\n",
+ SAS_ADDR(device->sas_addr));
+
+ return -ECOMM;
+ }
+
+ if (task->uldd_task) {
+ struct ata_queued_cmd *qc;
+
+ if (dev_is_sata(device)) {
+ qc = task->uldd_task;
+ scmd = qc->scsicmd;
+ } else {
+ scmd = task->uldd_task;
+ }
+ }
- if (likely(pass)) {
- spin_lock(&dq->lock);
- hisi_hba->hw->start_delivery(dq);
- spin_unlock(&dq->lock);
+ if (scmd) {
+ unsigned int dq_index;
+ u32 blk_tag;
+
+ blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+ dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
+ dq = &hisi_hba->dq[dq_index];
+ } else {
+ struct Scsi_Host *shost = hisi_hba->shost;
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+ int queue = qmap->mq_map[raw_smp_processor_id()];
+
+ dq = &hisi_hba->dq[queue];
+ }
+
+ port = to_hisi_sas_port(sas_port);
+ if (port && !port->port_attached) {
+ dev_info(dev, "task prep: %s port%d not attach device\n",
+ (dev_is_sata(device)) ?
+ "SATA/STP" : "SAS",
+ device->port->id);
+
+ return -ECOMM;
}
+ rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
+ &n_elem_req);
+ if (rc < 0)
+ goto prep_out;
+
+ if (!sas_protocol_ata(task->task_proto)) {
+ rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task);
+ if (rc < 0)
+ goto err_out_dma_unmap;
+ }
+
+ if (hisi_hba->hw->slot_index_alloc)
+ rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
+ else
+ rc = hisi_sas_slot_index_alloc(hisi_hba, scmd);
+
+ if (rc < 0)
+ goto err_out_dif_dma_unmap;
+
+ slot = &hisi_hba->slot_info[rc];
+ slot->n_elem = n_elem;
+ slot->n_elem_dif = n_elem_dif;
+ slot->task = task;
+ slot->port = port;
+
+ slot->tmf = tmf;
+ slot->is_internal = tmf;
+
+ /* protect task_prep and start_delivery sequence */
+ hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, NULL, tmf);
+
+ return 0;
+
+err_out_dif_dma_unmap:
+ if (!sas_protocol_ata(task->task_proto))
+ hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
+err_out_dma_unmap:
+ hisi_sas_dma_unmap(hisi_hba, task, n_elem,
+ n_elem_req);
+prep_out:
+ dev_err(dev, "task exec: failed[%d]!\n", rc);
return rc;
}
@@ -619,12 +611,6 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no,
if (!phy->phy_attached)
return;
- if (test_bit(HISI_SAS_PM_BIT, &hisi_hba->flags) &&
- !sas_phy->suspended) {
- dev_warn(hisi_hba->dev, "phy%d during suspend filtered out\n", phy_no);
- return;
- }
-
sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags);
if (sas_phy->phy) {
@@ -860,10 +846,11 @@ int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
}
EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
-static void hisi_sas_phyup_work(struct work_struct *work)
+static void hisi_sas_phyup_work_common(struct work_struct *work,
+ enum hisi_sas_phy_event event)
{
struct hisi_sas_phy *phy =
- container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
+ container_of(work, typeof(*phy), works[event]);
struct hisi_hba *hisi_hba = phy->hisi_hba;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
int phy_no = sas_phy->id;
@@ -874,6 +861,11 @@ static void hisi_sas_phyup_work(struct work_struct *work)
hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL);
}
+static void hisi_sas_phyup_work(struct work_struct *work)
+{
+ hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP);
+}
+
static void hisi_sas_linkreset_work(struct work_struct *work)
{
struct hisi_sas_phy *phy =
@@ -883,9 +875,21 @@ static void hisi_sas_linkreset_work(struct work_struct *work)
hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
}
+static void hisi_sas_phyup_pm_work(struct work_struct *work)
+{
+ struct hisi_sas_phy *phy =
+ container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]);
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+ struct device *dev = hisi_hba->dev;
+
+ hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM);
+ pm_runtime_put_sync(dev);
+}
+
static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
[HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
[HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
+ [HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work,
};
bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
@@ -917,10 +921,14 @@ void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct device *dev = hisi_hba->dev;
+ unsigned long flags;
dev_dbg(dev, "phy%d OOB ready\n", phy_no);
- if (phy->phy_attached)
+ spin_lock_irqsave(&phy->lock, flags);
+ if (phy->phy_attached) {
+ spin_unlock_irqrestore(&phy->lock, flags);
return;
+ }
if (!timer_pending(&phy->timer)) {
if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) {
@@ -928,13 +936,17 @@ void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
phy->timer.expires = jiffies +
HISI_SAS_WAIT_PHYUP_TIMEOUT;
add_timer(&phy->timer);
- } else {
- dev_warn(dev, "phy%d failed to come up %d times, giving up\n",
- phy_no, phy->wait_phyup_cnt);
- phy->wait_phyup_cnt = 0;
+ spin_unlock_irqrestore(&phy->lock, flags);
+ return;
}
+
+ dev_warn(dev, "phy%d failed to come up %d times, giving up\n",
+ phy_no, phy->wait_phyup_cnt);
+ phy->wait_phyup_cnt = 0;
}
+ spin_unlock_irqrestore(&phy->lock, flags);
}
+
EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready);
static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
@@ -1105,7 +1117,7 @@ static void hisi_sas_dev_gone(struct domain_device *device)
static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
{
- return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
+ return hisi_sas_task_exec(task, gfp_flags, NULL);
}
static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
@@ -1156,6 +1168,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
u8 sts = phy->phy_attached;
int ret = 0;
+ down(&hisi_hba->sem);
phy->reset_completion = &completion;
switch (func) {
@@ -1199,6 +1212,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
out:
phy->reset_completion = NULL;
+ up(&hisi_hba->sem);
return ret;
}
@@ -1259,8 +1273,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
add_timer(&task->slow_task->timer);
- res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
-
+ res = hisi_sas_task_exec(task, GFP_KERNEL, tmf);
if (res) {
del_timer_sync(&task->slow_task->timer);
dev_err(dev, "abort tmf: executing internal task failed: %d\n",
@@ -1433,11 +1446,13 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
sas_port = device->port;
port = to_hisi_sas_port(sas_port);
+ spin_lock(&sas_port->phy_list_lock);
list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
if (state & BIT(sas_phy->id)) {
phy = sas_phy->lldd_phy;
break;
}
+ spin_unlock(&sas_port->phy_list_lock);
if (phy) {
port->id = phy->port_id;
@@ -1510,26 +1525,24 @@ static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
struct device *dev = hisi_hba->dev;
int s = sizeof(struct host_to_dev_fis);
int rc = TMF_RESP_FUNC_FAILED;
- struct asd_sas_phy *sas_phy;
struct ata_link *link;
u8 fis[20] = {0};
- u32 state;
+ int i;
- state = hisi_hba->hw->get_phys_state(hisi_hba);
- list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) {
- if (!(state & BIT(sas_phy->id)))
+ for (i = 0; i < hisi_hba->n_phy; i++) {
+ if (!(sas_port->phy_mask & BIT(i)))
continue;
ata_for_each_link(link, ap, EDGE) {
int pmp = sata_srst_pmp(link);
- tmf_task.phy_id = sas_phy->id;
+ tmf_task.phy_id = i;
hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
rc = hisi_sas_exec_internal_tmf_task(device, fis, s,
&tmf_task);
if (rc != TMF_RESP_FUNC_COMPLETE) {
dev_err(dev, "phy%d ata reset failed rc=%d\n",
- sas_phy->id, rc);
+ i, rc);
break;
}
}
@@ -1581,7 +1594,6 @@ void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
{
struct Scsi_Host *shost = hisi_hba->shost;
- down(&hisi_hba->sem);
hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
scsi_block_requests(shost);
@@ -1606,9 +1618,9 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
if (hisi_hba->reject_stp_links_msk)
hisi_sas_terminate_stp_reject(hisi_hba);
hisi_sas_reset_init_all_devices(hisi_hba);
- up(&hisi_hba->sem);
scsi_unblock_requests(shost);
clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
+ up(&hisi_hba->sem);
hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state);
}
@@ -1619,8 +1631,11 @@ static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
if (!hisi_hba->hw->soft_reset)
return -1;
- if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
+ down(&hisi_hba->sem);
+ if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
+ up(&hisi_hba->sem);
return -1;
+ }
if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
@@ -2021,19 +2036,17 @@ static int hisi_sas_query_task(struct sas_task *task)
static int
hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
- struct sas_task *task, int abort_flag,
- int task_tag, struct hisi_sas_dq *dq)
+ struct hisi_sas_internal_abort *abort,
+ struct sas_task *task,
+ struct hisi_sas_dq *dq)
{
struct domain_device *device = task->dev;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct device *dev = hisi_hba->dev;
struct hisi_sas_port *port;
- struct hisi_sas_slot *slot;
struct asd_sas_port *sas_port = device->port;
- struct hisi_sas_cmd_hdr *cmd_hdr_base;
- int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
- unsigned long flags;
- int wr_q_index;
+ struct hisi_sas_slot *slot;
+ int slot_idx;
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
return -EINVAL;
@@ -2044,59 +2057,24 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
port = to_hisi_sas_port(sas_port);
/* simply get a slot and send abort command */
- rc = hisi_sas_slot_index_alloc(hisi_hba, NULL);
- if (rc < 0)
+ slot_idx = hisi_sas_slot_index_alloc(hisi_hba, NULL);
+ if (slot_idx < 0)
goto err_out;
- slot_idx = rc;
slot = &hisi_hba->slot_info[slot_idx];
-
- spin_lock(&dq->lock);
- wr_q_index = dq->wr_point;
- dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
- list_add_tail(&slot->delivery, &dq->list);
- spin_unlock(&dq->lock);
- spin_lock(&sas_dev->lock);
- list_add_tail(&slot->entry, &sas_dev->list);
- spin_unlock(&sas_dev->lock);
-
- dlvry_queue = dq->id;
- dlvry_queue_slot = wr_q_index;
-
- slot->device_id = sas_dev->device_id;
- slot->n_elem = n_elem;
- slot->dlvry_queue = dlvry_queue;
- slot->dlvry_queue_slot = dlvry_queue_slot;
- cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
- slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
+ slot->n_elem = 0;
slot->task = task;
slot->port = port;
slot->is_internal = true;
- task->lldd_task = slot;
- memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
- memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
- memset(hisi_sas_status_buf_addr_mem(slot), 0,
- sizeof(struct hisi_sas_err_record));
-
- hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
- abort_flag, task_tag);
-
- spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags |= SAS_TASK_AT_INITIATOR;
- spin_unlock_irqrestore(&task->task_state_lock, flags);
- WRITE_ONCE(slot->ready, 1);
- /* send abort command to the chip */
- spin_lock(&dq->lock);
- hisi_hba->hw->start_delivery(dq);
- spin_unlock(&dq->lock);
+ hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, abort, NULL);
return 0;
err_out:
- dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
+ dev_err(dev, "internal abort task prep: failed[%d]!\n", slot_idx);
- return rc;
+ return slot_idx;
}
/**
@@ -2118,9 +2096,12 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
{
struct sas_task *task;
struct hisi_sas_device *sas_dev = device->lldd_dev;
+ struct hisi_sas_internal_abort abort = {
+ .flag = abort_flag,
+ .tag = tag,
+ };
struct device *dev = hisi_hba->dev;
int res;
-
/*
* The interface is not realized means this HW don't support internal
* abort, or don't need to do internal abort. Then here, we return
@@ -2138,14 +2119,14 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
return -ENOMEM;
task->dev = device;
- task->task_proto = device->tproto;
+ task->task_proto = SAS_PROTOCOL_NONE;
task->task_done = hisi_sas_task_done;
task->slow_task->timer.function = hisi_sas_tmf_timedout;
task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT;
add_timer(&task->slow_task->timer);
res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
- task, abort_flag, tag, dq);
+ &abort, task, dq);
if (res) {
del_timer_sync(&task->slow_task->timer);
dev_err(dev, "internal task abort: executing internal task failed: %d\n",
@@ -2516,9 +2497,8 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
if (!hisi_hba->breakpoint)
goto err_out;
- hisi_hba->slot_index_count = max_command_entries;
- s = hisi_hba->slot_index_count / BITS_PER_BYTE;
- hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
+ s = hisi_hba->slot_index_count = max_command_entries;
+ hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL);
if (!hisi_hba->slot_index_tags)
goto err_out;
@@ -2536,7 +2516,6 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
if (!hisi_hba->sata_breakpoint)
goto err_out;
- hisi_sas_slot_index_init(hisi_hba);
hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT;
hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
@@ -2687,9 +2666,6 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
goto err_out;
error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (error)
- error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
-
if (error) {
dev_err(dev, "No usable DMA addressing method\n");
goto err_out;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 0ef6c21bf081..a01a3a7b706b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -1484,7 +1484,6 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct device *dev = hisi_hba->dev;
- del_timer(&phy->timer);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
@@ -1561,9 +1560,18 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
}
phy->port_id = port_id;
- phy->phy_attached = 1;
- hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
+
+ /* Call pm_runtime_put_sync() with pairs in hisi_sas_phyup_pm_work() */
+ pm_runtime_get_noresume(dev);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP_PM);
+
res = IRQ_HANDLED;
+
+ spin_lock(&phy->lock);
+ /* Delete timer and set phy_attached atomically */
+ del_timer(&phy->timer);
+ phy->phy_attached = 1;
+ spin_unlock(&phy->lock);
end:
if (phy->reset_completion)
complete(phy->reset_completion);
@@ -4687,8 +4695,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_out;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (rc)
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(dev, "No usable DMA addressing method\n");
rc = -ENODEV;
@@ -4775,6 +4781,8 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
scsi_scan_host(shost);
+ pm_runtime_set_autosuspend_delay(dev, 5000);
+ pm_runtime_use_autosuspend(dev);
/*
* For the situation that there are ATA disks connected with SAS
* controller, it additionally creates ata_port which will affect the
@@ -4848,6 +4856,7 @@ static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev)
int rc;
dev_info(dev, "FLR prepare\n");
+ down(&hisi_hba->sem);
set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
hisi_sas_controller_reset_prepare(hisi_hba);
@@ -4897,6 +4906,8 @@ static int _suspend_v3_hw(struct device *device)
if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
return -1;
+ dev_warn(dev, "entering suspend state\n");
+
scsi_block_requests(shost);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
flush_workqueue(hisi_hba->wq);
@@ -4912,11 +4923,11 @@ static int _suspend_v3_hw(struct device *device)
hisi_sas_init_mem(hisi_hba);
- dev_warn(dev, "entering suspend state\n");
-
hisi_sas_release_tasks(hisi_hba);
sas_suspend_ha(sha);
+
+ dev_warn(dev, "end of suspending controller\n");
return 0;
}
@@ -4943,9 +4954,19 @@ static int _resume_v3_hw(struct device *device)
return rc;
}
phys_init_v3_hw(hisi_hba);
- sas_resume_ha(sha);
+
+ /*
+ * If a directly-attached disk is removed during suspend, a deadlock
+ * may occur, as the PHYE_RESUME_TIMEOUT processing will require the
+ * hisi_hba->device to be active, which can only happen when resume
+ * completes. So don't wait for the HA event workqueue to drain upon
+ * resume.
+ */
+ sas_resume_ha_no_sync(sha);
clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
+ dev_warn(dev, "end of resuming controller\n");
+
return 0;
}
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 8049b00b6766..f69b77cbf538 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -61,6 +61,7 @@ static void scsi_host_cls_release(struct device *dev)
static struct class shost_class = {
.name = "scsi_host",
.dev_release = scsi_host_cls_release,
+ .dev_groups = scsi_shost_groups,
};
/**
@@ -377,7 +378,7 @@ static struct device_type scsi_host_type = {
struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
{
struct Scsi_Host *shost;
- int index, i, j = 0;
+ int index;
shost = kzalloc(sizeof(struct Scsi_Host) + privsize, GFP_KERNEL);
if (!shost)
@@ -483,17 +484,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost->shost_dev.parent = &shost->shost_gendev;
shost->shost_dev.class = &shost_class;
dev_set_name(&shost->shost_dev, "host%d", shost->host_no);
- shost->shost_dev.groups = shost->shost_dev_attr_groups;
- shost->shost_dev_attr_groups[j++] = &scsi_shost_attr_group;
- if (sht->shost_groups) {
- for (i = 0; sht->shost_groups[i] &&
- j < ARRAY_SIZE(shost->shost_dev_attr_groups);
- i++, j++) {
- shost->shost_dev_attr_groups[j] =
- sht->shost_groups[i];
- }
- }
- WARN_ON_ONCE(j >= ARRAY_SIZE(shost->shost_dev_attr_groups));
+ shost->shost_dev.groups = sht->shost_groups;
shost->ehandler = kthread_run(scsi_error_handler, shost,
"scsi_eh_%d", shost->host_no);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index cdf3328cc065..a47bcce3c9c7 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -4354,7 +4354,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
int i, ndevs_to_allocate;
int raid_ctlr_position;
bool physical_device;
- DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL);
physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
@@ -4368,7 +4367,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
dev_err(&h->pdev->dev, "out of memory\n");
goto out;
}
- memset(lunzerobits, 0, sizeof(lunzerobits));
h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index fd6da96bc51a..5f96ac47d7fd 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2602,13 +2602,11 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c
/**
* i91u_queuecommand_lck - Queue a new command if possible
* @cmd: SCSI command block from the mid layer
- * @done: Completion handler
*
* Attempts to queue a new command with the host adapter. Will return
* zero if successful or indicate a host busy condition if not (which
* will cause the mid layer to call us again later with the command)
*/
-
static int i91u_queuecommand_lck(struct scsi_cmnd *cmd)
{
struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata;
@@ -2849,7 +2847,8 @@ static int initio_probe_one(struct pci_dev *pdev,
for (; num_scb >= MAX_TARGETS + 3; num_scb--) {
i = num_scb * sizeof(struct scsi_ctrl_blk);
- if ((scb = kzalloc(i, GFP_DMA)) != NULL)
+ scb = kzalloc(i, GFP_KERNEL);
+ if (scb)
break;
}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 12e1e36d7c04..758213694091 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -8,7 +8,6 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
-#include <linux/async.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_eh.h>
#include "sas_internal.h"
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index f703115e7a25..3613b9b315bc 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -41,12 +41,25 @@ static int sas_queue_event(int event, struct sas_work *work,
return rc;
}
-
-void __sas_drain_work(struct sas_ha_struct *ha)
+void sas_queue_deferred_work(struct sas_ha_struct *ha)
{
struct sas_work *sw, *_sw;
int ret;
+ spin_lock_irq(&ha->lock);
+ list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
+ list_del_init(&sw->drain_node);
+ ret = sas_queue_work(ha, sw);
+ if (ret != 1) {
+ pm_runtime_put(ha->dev);
+ sas_free_event(to_asd_sas_event(&sw->work));
+ }
+ }
+ spin_unlock_irq(&ha->lock);
+}
+
+void __sas_drain_work(struct sas_ha_struct *ha)
+{
set_bit(SAS_HA_DRAINING, &ha->state);
/* flush submitters */
spin_lock_irq(&ha->lock);
@@ -55,16 +68,8 @@ void __sas_drain_work(struct sas_ha_struct *ha)
drain_workqueue(ha->event_q);
drain_workqueue(ha->disco_q);
- spin_lock_irq(&ha->lock);
clear_bit(SAS_HA_DRAINING, &ha->state);
- list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
- list_del_init(&sw->drain_node);
- ret = sas_queue_work(ha, sw);
- if (ret != 1)
- sas_free_event(to_asd_sas_event(&sw->work));
-
- }
- spin_unlock_irq(&ha->lock);
+ sas_queue_deferred_work(ha);
}
int sas_drain_work(struct sas_ha_struct *ha)
@@ -104,11 +109,15 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
if (!test_and_clear_bit(ev, &d->pending))
continue;
- if (list_empty(&port->phy_list))
+ spin_lock(&port->phy_list_lock);
+ if (list_empty(&port->phy_list)) {
+ spin_unlock(&port->phy_list_lock);
continue;
+ }
sas_phy = container_of(port->phy_list.next, struct asd_sas_phy,
port_phy_el);
+ spin_unlock(&port->phy_list_lock);
sas_notify_port_event(sas_phy,
PORTE_BROADCAST_RCVD, GFP_KERNEL);
}
@@ -119,19 +128,43 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
static void sas_port_event_worker(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+ struct sas_ha_struct *ha = phy->ha;
sas_port_event_fns[ev->event](work);
+ pm_runtime_put(ha->dev);
sas_free_event(ev);
}
static void sas_phy_event_worker(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+ struct sas_ha_struct *ha = phy->ha;
sas_phy_event_fns[ev->event](work);
+ pm_runtime_put(ha->dev);
sas_free_event(ev);
}
+/* defer works of new phys during suspend */
+static bool sas_defer_event(struct asd_sas_phy *phy, struct asd_sas_event *ev)
+{
+ struct sas_ha_struct *ha = phy->ha;
+ unsigned long flags;
+ bool deferred = false;
+
+ spin_lock_irqsave(&ha->lock, flags);
+ if (test_bit(SAS_HA_RESUMING, &ha->state) && !phy->suspended) {
+ struct sas_work *sw = &ev->work;
+
+ list_add_tail(&sw->drain_node, &ha->defer_q);
+ deferred = true;
+ }
+ spin_unlock_irqrestore(&ha->lock, flags);
+ return deferred;
+}
+
int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
gfp_t gfp_flags)
{
@@ -145,11 +178,19 @@ int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
if (!ev)
return -ENOMEM;
+ /* Call pm_runtime_put() with pairs in sas_port_event_worker() */
+ pm_runtime_get_noresume(ha->dev);
+
INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event);
+ if (sas_defer_event(phy, ev))
+ return 0;
+
ret = sas_queue_event(event, &ev->work, ha);
- if (ret != 1)
+ if (ret != 1) {
+ pm_runtime_put(ha->dev);
sas_free_event(ev);
+ }
return ret;
}
@@ -168,11 +209,19 @@ int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
if (!ev)
return -ENOMEM;
+ /* Call pm_runtime_put() with pairs in sas_phy_event_worker() */
+ pm_runtime_get_noresume(ha->dev);
+
INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
+ if (sas_defer_event(phy, ev))
+ return 0;
+
ret = sas_queue_event(event, &ev->work, ha);
- if (ret != 1)
+ if (ret != 1) {
+ pm_runtime_put(ha->dev);
sas_free_event(ev);
+ }
return ret;
}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index c2150a818423..6abce9dfc17b 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -58,7 +58,9 @@ static int smp_execute_task_sg(struct domain_device *dev,
struct sas_task *task = NULL;
struct sas_internal *i =
to_sas_internal(dev->port->ha->core.shost->transportt);
+ struct sas_ha_struct *ha = dev->port->ha;
+ pm_runtime_get_sync(ha->dev);
mutex_lock(&dev->ex_dev.cmd_mutex);
for (retry = 0; retry < 3; retry++) {
if (test_bit(SAS_DEV_GONE, &dev->state)) {
@@ -131,6 +133,7 @@ static int smp_execute_task_sg(struct domain_device *dev,
}
}
mutex_unlock(&dev->ex_dev.cmd_mutex);
+ pm_runtime_put_sync(ha->dev);
BUG_ON(retry == 3 && task != NULL);
sas_free_task(task);
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index b640e09af6a4..dc35f0f8eae3 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -362,6 +362,7 @@ void sas_prep_resume_ha(struct sas_ha_struct *ha)
int i;
set_bit(SAS_HA_REGISTERED, &ha->state);
+ set_bit(SAS_HA_RESUMING, &ha->state);
/* clear out any stale link events/data from the suspension path */
for (i = 0; i < ha->num_phys; i++) {
@@ -387,7 +388,31 @@ static int phys_suspended(struct sas_ha_struct *ha)
return rc;
}
-void sas_resume_ha(struct sas_ha_struct *ha)
+static void sas_resume_insert_broadcast_ha(struct sas_ha_struct *ha)
+{
+ int i;
+
+ for (i = 0; i < ha->num_phys; i++) {
+ struct asd_sas_port *port = ha->sas_port[i];
+ struct domain_device *dev = port->port_dev;
+
+ if (dev && dev_is_expander(dev->dev_type)) {
+ struct asd_sas_phy *first_phy;
+
+ spin_lock(&port->phy_list_lock);
+ first_phy = list_first_entry_or_null(
+ &port->phy_list, struct asd_sas_phy,
+ port_phy_el);
+ spin_unlock(&port->phy_list_lock);
+
+ if (first_phy)
+ sas_notify_port_event(first_phy,
+ PORTE_BROADCAST_RCVD, GFP_KERNEL);
+ }
+ }
+}
+
+static void _sas_resume_ha(struct sas_ha_struct *ha, bool drain)
{
const unsigned long tmo = msecs_to_jiffies(25000);
int i;
@@ -417,10 +442,30 @@ void sas_resume_ha(struct sas_ha_struct *ha)
* flush out disks that did not return
*/
scsi_unblock_requests(ha->core.shost);
- sas_drain_work(ha);
+ if (drain)
+ sas_drain_work(ha);
+ clear_bit(SAS_HA_RESUMING, &ha->state);
+
+ sas_queue_deferred_work(ha);
+ /* send event PORTE_BROADCAST_RCVD to identify some new inserted
+ * disks for expander
+ */
+ sas_resume_insert_broadcast_ha(ha);
+}
+
+void sas_resume_ha(struct sas_ha_struct *ha)
+{
+ _sas_resume_ha(ha, true);
}
EXPORT_SYMBOL(sas_resume_ha);
+/* A no-sync variant, which does not call sas_drain_ha(). */
+void sas_resume_ha_no_sync(struct sas_ha_struct *ha)
+{
+ _sas_resume_ha(ha, false);
+}
+EXPORT_SYMBOL(sas_resume_ha_no_sync);
+
void sas_suspend_ha(struct sas_ha_struct *ha)
{
int i;
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index d7a1fb5c10c6..acd515c01861 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -14,6 +14,7 @@
#include <scsi/scsi_transport_sas.h>
#include <scsi/libsas.h>
#include <scsi/sas_ata.h>
+#include <linux/pm_runtime.h>
#ifdef pr_fmt
#undef pr_fmt
@@ -56,6 +57,7 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha);
void sas_disable_revalidation(struct sas_ha_struct *ha);
void sas_enable_revalidation(struct sas_ha_struct *ha);
+void sas_queue_deferred_work(struct sas_ha_struct *ha);
void __sas_drain_work(struct sas_ha_struct *ha);
void sas_deform_port(struct asd_sas_phy *phy, int gone);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index d337fdf1b9ca..fb19e739a39c 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -37,7 +37,8 @@
static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
{
struct task_status_struct *ts = &task->task_status;
- int hs = 0, stat = 0;
+ enum scsi_host_status hs = DID_OK;
+ enum exec_status stat = SAS_SAM_STAT_GOOD;
if (ts->resp == SAS_TASK_UNDELIVERED) {
/* transport error */
@@ -82,10 +83,10 @@ static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
case SAS_ABORTED_TASK:
hs = DID_ABORT;
break;
- case SAM_STAT_CHECK_CONDITION:
+ case SAS_SAM_STAT_CHECK_CONDITION:
memcpy(sc->sense_buffer, ts->buf,
min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
- stat = SAM_STAT_CHECK_CONDITION;
+ stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
default:
stat = ts->stat;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 2f8e6d0a926f..4878c94761f9 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -496,52 +496,50 @@ struct lpfc_cgn_info {
__le32 cgn_alarm_hr[24];
__le32 cgn_alarm_day[LPFC_MAX_CGN_DAYS];
- /* Start of congestion statistics */
- uint8_t cgn_stat_npm; /* Notifications per minute */
-
- /* Start Time */
- uint8_t cgn_stat_month;
- uint8_t cgn_stat_day;
- uint8_t cgn_stat_year;
- uint8_t cgn_stat_hour;
- uint8_t cgn_stat_minute;
- uint8_t cgn_pad2[2];
-
- __le32 cgn_notification;
- __le32 cgn_peer_notification;
- __le32 link_integ_notification;
- __le32 delivery_notification;
-
- uint8_t cgn_stat_cgn_month; /* Last congestion notification FPIN */
- uint8_t cgn_stat_cgn_day;
- uint8_t cgn_stat_cgn_year;
- uint8_t cgn_stat_cgn_hour;
- uint8_t cgn_stat_cgn_min;
- uint8_t cgn_stat_cgn_sec;
-
- uint8_t cgn_stat_peer_month; /* Last peer congestion FPIN */
- uint8_t cgn_stat_peer_day;
- uint8_t cgn_stat_peer_year;
- uint8_t cgn_stat_peer_hour;
- uint8_t cgn_stat_peer_min;
- uint8_t cgn_stat_peer_sec;
-
- uint8_t cgn_stat_lnk_month; /* Last link integrity FPIN */
- uint8_t cgn_stat_lnk_day;
- uint8_t cgn_stat_lnk_year;
- uint8_t cgn_stat_lnk_hour;
- uint8_t cgn_stat_lnk_min;
- uint8_t cgn_stat_lnk_sec;
-
- uint8_t cgn_stat_del_month; /* Last delivery notification FPIN */
- uint8_t cgn_stat_del_day;
- uint8_t cgn_stat_del_year;
- uint8_t cgn_stat_del_hour;
- uint8_t cgn_stat_del_min;
- uint8_t cgn_stat_del_sec;
-#define LPFC_CGN_STAT_SIZE 48
-#define LPFC_CGN_DATA_SIZE (sizeof(struct lpfc_cgn_info) - \
- LPFC_CGN_STAT_SIZE - sizeof(uint32_t))
+ struct_group(cgn_stat,
+ uint8_t cgn_stat_npm; /* Notifications per minute */
+
+ /* Start Time */
+ uint8_t cgn_stat_month;
+ uint8_t cgn_stat_day;
+ uint8_t cgn_stat_year;
+ uint8_t cgn_stat_hour;
+ uint8_t cgn_stat_minute;
+ uint8_t cgn_pad2[2];
+
+ __le32 cgn_notification;
+ __le32 cgn_peer_notification;
+ __le32 link_integ_notification;
+ __le32 delivery_notification;
+
+ uint8_t cgn_stat_cgn_month; /* Last congestion notification FPIN */
+ uint8_t cgn_stat_cgn_day;
+ uint8_t cgn_stat_cgn_year;
+ uint8_t cgn_stat_cgn_hour;
+ uint8_t cgn_stat_cgn_min;
+ uint8_t cgn_stat_cgn_sec;
+
+ uint8_t cgn_stat_peer_month; /* Last peer congestion FPIN */
+ uint8_t cgn_stat_peer_day;
+ uint8_t cgn_stat_peer_year;
+ uint8_t cgn_stat_peer_hour;
+ uint8_t cgn_stat_peer_min;
+ uint8_t cgn_stat_peer_sec;
+
+ uint8_t cgn_stat_lnk_month; /* Last link integrity FPIN */
+ uint8_t cgn_stat_lnk_day;
+ uint8_t cgn_stat_lnk_year;
+ uint8_t cgn_stat_lnk_hour;
+ uint8_t cgn_stat_lnk_min;
+ uint8_t cgn_stat_lnk_sec;
+
+ uint8_t cgn_stat_del_month; /* Last delivery notification FPIN */
+ uint8_t cgn_stat_del_day;
+ uint8_t cgn_stat_del_year;
+ uint8_t cgn_stat_del_hour;
+ uint8_t cgn_stat_del_min;
+ uint8_t cgn_stat_del_sec;
+ );
__le32 cgn_info_crc;
#define LPFC_CGN_CRC32_MAGIC_NUMBER 0x1EDC6F41
@@ -669,8 +667,6 @@ struct lpfc_vport {
struct timer_list els_tmofunc;
struct timer_list delayed_disc_tmo;
- int unreg_vpi_cmpl;
-
uint8_t load_flag;
#define FC_LOADING 0x1 /* HBA in process of loading drvr */
#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
@@ -1023,7 +1019,6 @@ struct lpfc_hba {
#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
#define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */
-#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */
#define HBA_FORCED_LINK_SPEED 0x40000 /*
* Firmware supports Forced Link Speed
@@ -1031,7 +1026,7 @@ struct lpfc_hba {
*/
#define HBA_PCI_ERR 0x80000 /* The PCI slot is offline */
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
-#define HBA_CGN_RSVD1 0x200000 /* Reserved CGN flag */
+#define HBA_SHORT_CMF 0x200000 /* shorter CMF timer routine */
#define HBA_CGN_DAY_WRAP 0x400000 /* HBA Congestion info day wraps */
#define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */
#define HBA_SETUP 0x1000000 /* Signifies HBA setup is completed */
@@ -1040,6 +1035,7 @@ struct lpfc_hba {
#define HBA_HBEAT_TMO 0x8000000 /* HBEAT initiated after timeout */
#define HBA_FLOGI_OUTSTANDING 0x10000000 /* FLOGI is outstanding */
+ struct completion *fw_dump_cmpl; /* cmpl event tracker for fw_dump */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -1604,6 +1600,7 @@ struct lpfc_hba {
#define LPFC_MAX_RXMONITOR_ENTRY 800
#define LPFC_MAX_RXMONITOR_DUMP 32
struct rxtable_entry {
+ uint64_t cmf_bytes; /* Total no of read bytes for CMF_SYNC_WQE */
uint64_t total_bytes; /* Total no of read bytes requested */
uint64_t rcv_bytes; /* Total no of read bytes completed */
uint64_t avg_io_size;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index dd4c51b6ef4e..7a7f17d71811 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1709,25 +1709,25 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
before_fc_flag = phba->pport->fc_flag;
sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
- /* Disable SR-IOV virtual functions if enabled */
- if (phba->cfg_sriov_nr_virtfn) {
- pci_disable_sriov(pdev);
- phba->cfg_sriov_nr_virtfn = 0;
- }
+ if (opcode == LPFC_FW_DUMP) {
+ init_completion(&online_compl);
+ phba->fw_dump_cmpl = &online_compl;
+ } else {
+ /* Disable SR-IOV virtual functions if enabled */
+ if (phba->cfg_sriov_nr_virtfn) {
+ pci_disable_sriov(pdev);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
- if (opcode == LPFC_FW_DUMP)
- phba->hba_flag |= HBA_FW_DUMP_OP;
+ status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
- status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+ if (status != 0)
+ return status;
- if (status != 0) {
- phba->hba_flag &= ~HBA_FW_DUMP_OP;
- return status;
+ /* wait for the device to be quiesced before firmware reset */
+ msleep(100);
}
- /* wait for the device to be quiesced before firmware reset */
- msleep(100);
-
reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
LPFC_CTL_PDEV_CTL_OFFSET);
@@ -1756,24 +1756,42 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3153 Fail to perform the requested "
"access: x%x\n", reg_val);
+ if (phba->fw_dump_cmpl)
+ phba->fw_dump_cmpl = NULL;
return rc;
}
/* keep the original port state */
- if (before_fc_flag & FC_OFFLINE_MODE)
- goto out;
-
- init_completion(&online_compl);
- job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
- LPFC_EVT_ONLINE);
- if (!job_posted)
+ if (before_fc_flag & FC_OFFLINE_MODE) {
+ if (phba->fw_dump_cmpl)
+ phba->fw_dump_cmpl = NULL;
goto out;
+ }
- wait_for_completion(&online_compl);
+ /* Firmware dump will trigger an HA_ERATT event, and
+ * lpfc_handle_eratt_s4 routine already handles bringing the port back
+ * online.
+ */
+ if (opcode == LPFC_FW_DUMP) {
+ wait_for_completion(phba->fw_dump_cmpl);
+ } else {
+ init_completion(&online_compl);
+ job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
+ LPFC_EVT_ONLINE);
+ if (!job_posted)
+ goto out;
+ wait_for_completion(&online_compl);
+ }
out:
/* in any case, restore the virtual functions enabled as before */
if (sriov_nr_virtfn) {
+ /* If fw_dump was performed, first disable to clean up */
+ if (opcode == LPFC_FW_DUMP) {
+ pci_disable_sriov(pdev);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+
sriov_err =
lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
if (!sriov_err)
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 08b2e85dcd7d..30fac2f6fb06 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -5484,7 +5484,7 @@ lpfc_cgn_buffer_read(struct file *file, char __user *buf, size_t nbytes,
if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) {
len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
"Truncated . . .\n");
- break;
+ goto out;
}
len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
"%03x: %08x %08x %08x %08x "
@@ -5495,6 +5495,17 @@ lpfc_cgn_buffer_read(struct file *file, char __user *buf, size_t nbytes,
cnt += 32;
ptr += 8;
}
+ if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) {
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "Truncated . . .\n");
+ goto out;
+ }
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "Parameter Data\n");
+ ptr = (uint32_t *)&phba->cgn_p;
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "%08x %08x %08x %08x\n",
+ *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3));
out:
return simple_read_from_buffer(buf, nbytes, ppos, buffer, len);
}
@@ -5561,22 +5572,24 @@ lpfc_rx_monitor_read(struct file *file, char __user *buf, size_t nbytes,
start = tail;
len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
- " MaxBPI\t Total Data Cmd Total Data Cmpl "
- " Latency(us) Avg IO Size\tMax IO Size IO cnt "
- "Info BWutil(ms)\n");
+ " MaxBPI Tot_Data_CMF Tot_Data_Cmd "
+ "Tot_Data_Cmpl Lat(us) Avg_IO Max_IO "
+ "Bsy IO_cnt Info BWutil(ms)\n");
get_table:
for (i = start; i < last; i++) {
entry = &phba->rxtable[i];
len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
- "%3d:%12lld %12lld\t%12lld\t"
- "%8lldus\t%8lld\t%10lld "
- "%8d %2d %2d(%2d)\n",
+ "%3d:%12lld %12lld %12lld %12lld "
+ "%7lldus %8lld %7lld "
+ "%2d %4d %2d %2d(%2d)\n",
i, entry->max_bytes_per_interval,
+ entry->cmf_bytes,
entry->total_bytes,
entry->rcv_bytes,
entry->avg_io_latency,
entry->avg_io_size,
entry->max_read_cnt,
+ entry->cmf_busy,
entry->io_cnt,
entry->cmf_info,
entry->timer_utilization,
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index a5bf71b34972..6dd361c1fd31 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -282,7 +282,7 @@ struct lpfc_idiag {
void *ptr_private;
};
-#define MAX_DEBUGFS_RX_TABLE_SIZE (100 * LPFC_MAX_RXMONITOR_ENTRY)
+#define MAX_DEBUGFS_RX_TABLE_SIZE (128 * LPFC_MAX_RXMONITOR_ENTRY)
struct lpfc_rx_monitor_debug {
char *i_private;
char *buffer;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index e83453bea2ae..db5ccae1b63d 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3538,11 +3538,6 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
return 1;
}
- /* This will cause the callback-function lpfc_cmpl_els_cmd to
- * trigger the release of node.
- */
- if (!(vport->fc_flag & FC_PT2PT))
- lpfc_nlp_put(ndlp);
return 0;
}
@@ -6899,6 +6894,7 @@ static int
lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
{
LPFC_MBOXQ_t *mbox = NULL;
+ struct lpfc_dmabuf *mp;
int rc;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -6914,8 +6910,11 @@ lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED)
+ if (rc == MBX_NOT_FINISHED) {
+ mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
goto issue_mbox_fail;
+ }
return 0;
@@ -10974,10 +10973,19 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_can_disctmo(vport);
}
+ if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
+ /* Wake up lpfc_vport_delete if waiting...*/
+ if (ndlp->logo_waitq)
+ wake_up(ndlp->logo_waitq);
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND);
+ ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
+ spin_unlock_irq(&ndlp->lock);
+ }
+
/* Safe to release resources now. */
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
- vport->unreg_vpi_cmpl = VPORT_ERROR;
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 9fe6e5b386ce..816fc406135b 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -869,10 +869,16 @@ lpfc_work_done(struct lpfc_hba *phba)
if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
lpfc_sli4_post_async_mbox(phba);
- if (ha_copy & HA_ERATT)
+ if (ha_copy & HA_ERATT) {
/* Handle the error attention event */
lpfc_handle_eratt(phba);
+ if (phba->fw_dump_cmpl) {
+ complete(phba->fw_dump_cmpl);
+ phba->fw_dump_cmpl = NULL;
+ }
+ }
+
if (ha_copy & HA_MBATT)
lpfc_sli_handle_mb_event(phba);
@@ -3928,7 +3934,6 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport->vpi_state &= ~LPFC_VPI_REGISTERED;
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
- vport->unreg_vpi_cmpl = VPORT_OK;
mempool_free(pmb, phba->mbox_mem_pool);
lpfc_cleanup_vports_rrqs(vport, NULL);
/*
@@ -3958,7 +3963,6 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1800 Could not issue unreg_vpi\n");
mempool_free(mbox, phba->mbox_mem_pool);
- vport->unreg_vpi_cmpl = VPORT_ERROR;
return rc;
}
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 634f8fff7425..4461c3d6fc4f 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -3675,19 +3675,26 @@ union sli_var {
};
typedef struct {
+ struct_group_tagged(MAILBOX_word0, bits,
+ union {
+ struct {
#ifdef __BIG_ENDIAN_BITFIELD
- uint16_t mbxStatus;
- uint8_t mbxCommand;
- uint8_t mbxReserved:6;
- uint8_t mbxHc:1;
- uint8_t mbxOwner:1; /* Low order bit first word */
+ uint16_t mbxStatus;
+ uint8_t mbxCommand;
+ uint8_t mbxReserved:6;
+ uint8_t mbxHc:1;
+ uint8_t mbxOwner:1; /* Low order bit first word */
#else /* __LITTLE_ENDIAN_BITFIELD */
- uint8_t mbxOwner:1; /* Low order bit first word */
- uint8_t mbxHc:1;
- uint8_t mbxReserved:6;
- uint8_t mbxCommand;
- uint16_t mbxStatus;
+ uint8_t mbxOwner:1; /* Low order bit first word */
+ uint8_t mbxHc:1;
+ uint8_t mbxReserved:6;
+ uint8_t mbxCommand;
+ uint16_t mbxStatus;
#endif
+ };
+ u32 word0;
+ };
+ );
MAILVARIANTS un;
union sli_var us;
@@ -3746,7 +3753,7 @@ typedef struct {
#define IOERR_ILLEGAL_COMMAND 0x06
#define IOERR_XCHG_DROPPED 0x07
#define IOERR_ILLEGAL_FIELD 0x08
-#define IOERR_BAD_CONTINUE 0x09
+#define IOERR_RPI_SUSPENDED 0x09
#define IOERR_TOO_MANY_BUFFERS 0x0A
#define IOERR_RCV_BUFFER_WAITING 0x0B
#define IOERR_NO_CONNECTION 0x0C
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index ba17a8f740a9..a56f01f659f8 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -5373,8 +5373,10 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
*/
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED)
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
goto out_free_dmabuf;
+ }
return;
}
/*
@@ -5925,7 +5927,7 @@ lpfc_cmf_timer(struct hrtimer *timer)
uint32_t io_cnt;
uint32_t head, tail;
uint32_t busy, max_read;
- uint64_t total, rcv, lat, mbpi, extra;
+ uint64_t total, rcv, lat, mbpi, extra, cnt;
int timer_interval = LPFC_CMF_INTERVAL;
uint32_t ms;
struct lpfc_cgn_stat *cgs;
@@ -5996,20 +5998,28 @@ lpfc_cmf_timer(struct hrtimer *timer)
/* Calculate any extra bytes needed to account for the
* timer accuracy. If we are less than LPFC_CMF_INTERVAL
- * add an extra 3% slop factor, equal to LPFC_CMF_INTERVAL
- * add an extra 2%. The goal is to equalize total with a
- * time > LPFC_CMF_INTERVAL or <= LPFC_CMF_INTERVAL + 1
+ * calculate the adjustment needed for total to reflect
+ * a full LPFC_CMF_INTERVAL.
*/
- if (ms == LPFC_CMF_INTERVAL)
- extra = div_u64(total, 50);
- else if (ms < LPFC_CMF_INTERVAL)
- extra = div_u64(total, 33);
+ if (ms && ms < LPFC_CMF_INTERVAL) {
+ cnt = div_u64(total, ms); /* bytes per ms */
+ cnt *= LPFC_CMF_INTERVAL; /* what total should be */
+
+ /* If the timeout is scheduled to be shorter,
+ * this value may skew the data, so cap it at mbpi.
+ */
+ if ((phba->hba_flag & HBA_SHORT_CMF) && cnt > mbpi)
+ cnt = mbpi;
+
+ extra = cnt - total;
+ }
lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
} else {
/* For Monitor mode or link down we want mbpi
* to be the full link speed
*/
mbpi = phba->cmf_link_byte_count;
+ extra = 0;
}
phba->cmf_timer_cnt++;
@@ -6040,6 +6050,7 @@ lpfc_cmf_timer(struct hrtimer *timer)
LPFC_RXMONITOR_TABLE_IN_USE);
entry = &phba->rxtable[head];
entry->total_bytes = total;
+ entry->cmf_bytes = total + extra;
entry->rcv_bytes = rcv;
entry->cmf_busy = busy;
entry->cmf_info = phba->cmf_active_info;
@@ -6082,6 +6093,8 @@ lpfc_cmf_timer(struct hrtimer *timer)
/* Each minute save Fabric and Driver congestion information */
lpfc_cgn_save_evt_cnt(phba);
+ phba->hba_flag &= ~HBA_SHORT_CMF;
+
/* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the
* minute, adjust our next timer interval, if needed, to ensure a
* 1 minute granularity when we get the next timer interrupt.
@@ -6092,6 +6105,8 @@ lpfc_cmf_timer(struct hrtimer *timer)
jiffies);
if (timer_interval <= 0)
timer_interval = LPFC_CMF_INTERVAL;
+ else
+ phba->hba_flag |= HBA_SHORT_CMF;
/* If we adjust timer_interval, max_bytes_per_interval
* needs to be adjusted as well.
@@ -6337,8 +6352,10 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
}
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED)
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
goto out_free_dmabuf;
+ }
return;
out_free_dmabuf:
@@ -12709,7 +12726,7 @@ lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
cpumask_clear(&eqhdl->aff_mask);
cpumask_set_cpu(cpu, &eqhdl->aff_mask);
irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
- irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
+ irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask);
}
/**
@@ -12998,7 +13015,6 @@ cfg_fail_out:
for (--index; index >= 0; index--) {
eqhdl = lpfc_get_eq_hdl(index);
lpfc_irq_clear_aff(eqhdl);
- irq_set_affinity_hint(eqhdl->irq, NULL);
free_irq(eqhdl->irq, eqhdl);
}
@@ -13159,7 +13175,6 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba)
for (index = 0; index < phba->cfg_irq_chann; index++) {
eqhdl = lpfc_get_eq_hdl(index);
lpfc_irq_clear_aff(eqhdl);
- irq_set_affinity_hint(eqhdl->irq, NULL);
free_irq(eqhdl->irq, eqhdl);
}
} else {
@@ -13466,7 +13481,7 @@ lpfc_init_congestion_buf(struct lpfc_hba *phba)
phba->cgn_evt_minute = 0;
phba->hba_flag &= ~HBA_CGN_DAY_WRAP;
- memset(cp, 0xff, LPFC_CGN_DATA_SIZE);
+ memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat));
cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
cp->cgn_info_version = LPFC_CGN_INFO_V3;
@@ -13525,7 +13540,7 @@ lpfc_init_congestion_stat(struct lpfc_hba *phba)
return;
cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
- memset(&cp->cgn_stat_npm, 0, LPFC_CGN_STAT_SIZE);
+ memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat));
ktime_get_real_ts64(&cmpl_time);
time64_to_tm(cmpl_time.tv_sec, 0, &broken);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 27263f02ab9f..7d717a4ac14d 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -322,6 +322,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd;
+ struct lpfc_dmabuf *mp;
uint64_t nlp_portwwn = 0;
uint32_t *lp;
IOCB_t *icmd;
@@ -571,6 +572,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* a default RPI.
*/
if (phba->sli_rev == LPFC_SLI_REV4) {
+ mp = (struct lpfc_dmabuf *)login_mbox->ctx_buf;
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
mempool_free(login_mbox, phba->mbox_mem_pool);
login_mbox = NULL;
} else {
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 6ccf573acdec..5a3da38a9067 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -4393,6 +4393,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
if (lpfc_cmd->result == IOERR_INVALID_RPI ||
lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
+ lpfc_cmd->result == IOERR_RPI_SUSPENDED ||
lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
cmd->result = DID_REQUEUE << 16;
break;
@@ -4448,10 +4449,11 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"9039 Iodone <%d/%llu> cmd x%px, error "
- "x%x SNS x%x x%x Data: x%x x%x\n",
+ "x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n",
cmd->device->id, cmd->device->lun, cmd,
- cmd->result, *lp, *(lp + 3), cmd->retries,
- scsi_get_resid(cmd));
+ cmd->result, *lp, *(lp + 3),
+ (u64)scsi_get_lba(cmd),
+ cmd->retries, scsi_get_resid(cmd));
}
lpfc_update_stats(vport, lpfc_cmd);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 5dedb3de271d..1bc0db572d9e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -4749,7 +4749,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
{
uint32_t __iomem *resp_buf;
uint32_t __iomem *mbox_buf;
- volatile uint32_t mbox;
+ volatile struct MAILBOX_word0 mbox;
uint32_t hc_copy, ha_copy, resp_data;
int i;
uint8_t hdrtype;
@@ -4783,13 +4783,13 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
phba->pport->stopped = 1;
}
- mbox = 0;
- ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
- ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
+ mbox.word0 = 0;
+ mbox.mbxCommand = MBX_KILL_BOARD;
+ mbox.mbxOwner = OWN_CHIP;
writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
mbox_buf = phba->MBslimaddr;
- writel(mbox, mbox_buf);
+ writel(mbox.word0, mbox_buf);
for (i = 0; i < 50; i++) {
if (lpfc_readl((resp_buf + 1), &resp_data))
@@ -4810,12 +4810,12 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
goto clear_errat;
}
- ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
+ mbox.mbxOwner = OWN_HOST;
resp_data = 0;
for (i = 0; i < 500; i++) {
if (lpfc_readl(resp_buf, &resp_data))
return;
- if (resp_data != mbox)
+ if (resp_data != mbox.word0)
mdelay(1);
else
break;
@@ -5046,12 +5046,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
phba->fcf.fcf_flag = 0;
spin_unlock_irq(&phba->hbalock);
- /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
- if (phba->hba_flag & HBA_FW_DUMP_OP) {
- phba->hba_flag &= ~HBA_FW_DUMP_OP;
- return rc;
- }
-
/* Now physically reset the device */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0389 Performing PCI function reset!\n");
@@ -5091,9 +5085,8 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
static int
lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
{
- MAILBOX_t *mb;
+ volatile struct MAILBOX_word0 mb;
struct lpfc_sli *psli;
- volatile uint32_t word0;
void __iomem *to_slim;
uint32_t hba_aer_enabled;
@@ -5110,24 +5103,23 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
(phba->pport) ? phba->pport->port_state : 0,
psli->sli_flag);
- word0 = 0;
- mb = (MAILBOX_t *) &word0;
- mb->mbxCommand = MBX_RESTART;
- mb->mbxHc = 1;
+ mb.word0 = 0;
+ mb.mbxCommand = MBX_RESTART;
+ mb.mbxHc = 1;
lpfc_reset_barrier(phba);
to_slim = phba->MBslimaddr;
- writel(*(uint32_t *) mb, to_slim);
+ writel(mb.word0, to_slim);
readl(to_slim); /* flush */
/* Only skip post after fc_ffinit is completed */
if (phba->pport && phba->pport->port_state)
- word0 = 1; /* This is really setting up word1 */
+ mb.word0 = 1; /* This is really setting up word1 */
else
- word0 = 0; /* This is really setting up word1 */
+ mb.word0 = 0; /* This is really setting up word1 */
to_slim = phba->MBslimaddr + sizeof (uint32_t);
- writel(*(uint32_t *) mb, to_slim);
+ writel(mb.word0, to_slim);
readl(to_slim); /* flush */
lpfc_sli_brdreset(phba);
@@ -17990,8 +17982,8 @@ lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
* the driver starts at 0 each time.
*/
spin_lock_irq(&phba->hbalock);
- xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
- phba->sli4_hba.max_cfg_param.max_xri, 0);
+ xri = find_first_zero_bit(phba->sli4_hba.xri_bmask,
+ phba->sli4_hba.max_cfg_param.max_xri);
if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
spin_unlock_irq(&phba->hbalock);
return NO_XRI;
@@ -19668,7 +19660,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
rpi_limit = phba->sli4_hba.next_rpi;
- rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
+ rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit);
if (rpi >= rpi_limit)
rpi = LPFC_RPI_ALLOC_ERROR;
else {
@@ -20311,8 +20303,8 @@ next_priority:
* have been tested so that we can detect when we should
* change the priority level.
*/
- next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
- LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
+ next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask,
+ LPFC_SLI4_FCF_TBL_INDX_MAX);
}
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 5a4d3b24fbce..2e9348a6897c 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.0.0.3"
+#define LPFC_DRIVER_VERSION "14.0.0.4"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index da9a1f72d938..d694d0cff5a5 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -486,22 +486,67 @@ error_out:
}
static int
+lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ int rc;
+ struct lpfc_hba *phba = vport->phba;
+
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+
+ spin_lock_irq(&ndlp->lock);
+ if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO) &&
+ !ndlp->logo_waitq) {
+ ndlp->logo_waitq = &waitq;
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag |= NLP_ISSUE_LOGO;
+ ndlp->save_flags |= NLP_WAIT_FOR_LOGO;
+ }
+ spin_unlock_irq(&ndlp->lock);
+ rc = lpfc_issue_els_npiv_logo(vport, ndlp);
+ if (!rc) {
+ wait_event_timeout(waitq,
+ (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO)),
+ msecs_to_jiffies(phba->fc_ratov * 2000));
+
+ if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO))
+ goto logo_cmpl;
+ /* LOGO wait failed. Correct status. */
+ rc = -EINTR;
+ } else {
+ rc = -EIO;
+ }
+
+ /* Error - clean up node flags. */
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
+ ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
+ spin_unlock_irq(&ndlp->lock);
+
+ logo_cmpl:
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
+ "1824 Issue LOGO completes with status %d\n",
+ rc);
+ spin_lock_irq(&ndlp->lock);
+ ndlp->logo_waitq = NULL;
+ spin_unlock_irq(&ndlp->lock);
+ return rc;
+}
+
+static int
disable_vport(struct fc_vport *fc_vport)
{
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
- long timeout;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ /* Can't disable during an outstanding delete. */
+ if (vport->load_flag & FC_UNLOADING)
+ return 0;
+
ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (ndlp && phba->link_state >= LPFC_LINK_UP) {
- vport->unreg_vpi_cmpl = VPORT_INVAL;
- timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
- if (!lpfc_issue_els_npiv_logo(vport, ndlp))
- while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
- timeout = schedule_timeout(timeout);
- }
+ if (ndlp && phba->link_state >= LPFC_LINK_UP)
+ (void)lpfc_send_npiv_logo(vport, ndlp);
lpfc_sli_host_down(vport);
@@ -600,7 +645,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
- long timeout;
+ int rc;
if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -665,15 +710,14 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
if (vport->cfg_enable_da_id) {
/* Send DA_ID and wait for a completion. */
- timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
- if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))
- while (vport->ct_flags && timeout)
- timeout = schedule_timeout(timeout);
- else
+ rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0);
+ if (rc) {
lpfc_printf_log(vport->phba, KERN_WARNING,
LOG_VPORT,
"1829 CT command failed to "
- "delete objects on fabric\n");
+ "delete objects on fabric, "
+ "rc %d\n", rc);
+ }
}
/*
@@ -688,11 +732,10 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp)
goto skip_logo;
- vport->unreg_vpi_cmpl = VPORT_INVAL;
- timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
- if (!lpfc_issue_els_npiv_logo(vport, ndlp))
- while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
- timeout = schedule_timeout(timeout);
+
+ rc = lpfc_send_npiv_logo(vport, ndlp);
+ if (rc)
+ goto skip_logo;
}
if (!(phba->pport->load_flag & FC_UNLOADING))
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 0d31d7a5e335..bf987f3a7f3f 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -192,23 +192,21 @@ mega_query_adapter(adapter_t *adapter)
{
dma_addr_t prod_info_dma_handle;
mega_inquiry3 *inquiry3;
- u8 raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
+ struct mbox_out mbox;
+ u8 *raw_mbox = (u8 *)&mbox;
int retval;
/* Initialize adapter inquiry mailbox */
- mbox = (mbox_t *)raw_mbox;
-
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
/*
* Try to issue Inquiry3 command
* if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and
* update enquiry3 structure
*/
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
inquiry3 = (mega_inquiry3 *)adapter->mega_buffer;
@@ -232,10 +230,10 @@ mega_query_adapter(adapter_t *adapter)
inq = &ext_inq->raid_inq;
- mbox->m_out.xferaddr = (u32)dma_handle;
+ mbox.xferaddr = (u32)dma_handle;
/*issue old 0x04 command to adapter */
- mbox->m_out.cmd = MEGA_MBOXCMD_ADPEXTINQ;
+ mbox.cmd = MEGA_MBOXCMD_ADPEXTINQ;
issue_scb_block(adapter, raw_mbox);
@@ -262,7 +260,7 @@ mega_query_adapter(adapter_t *adapter)
sizeof(mega_product_info),
DMA_FROM_DEVICE);
- mbox->m_out.xferaddr = prod_info_dma_handle;
+ mbox.xferaddr = prod_info_dma_handle;
raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */
raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */
@@ -3569,16 +3567,14 @@ mega_n_to_m(void __user *arg, megacmd_t *mc)
static int
mega_is_bios_enabled(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
-
- mbox = (mbox_t *)raw_mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
raw_mbox[0] = IS_BIOS_ENABLED;
raw_mbox[2] = GET_BIOS;
@@ -3600,13 +3596,11 @@ mega_is_bios_enabled(adapter_t *adapter)
static void
mega_enum_raid_scsi(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
int i;
- mbox = (mbox_t *)raw_mbox;
-
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
/*
* issue command to find out what channels are raid/scsi
@@ -3616,7 +3610,7 @@ mega_enum_raid_scsi(adapter_t *adapter)
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
/*
* Non-ROMB firmware fail this command, so all channels
@@ -3655,23 +3649,21 @@ static void
mega_get_boot_drv(adapter_t *adapter)
{
struct private_bios_data *prv_bios_data;
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
u16 cksum = 0;
u8 *cksum_p;
u8 boot_pdrv;
int i;
- mbox = (mbox_t *)raw_mbox;
-
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
raw_mbox[0] = BIOS_PVT_DATA;
raw_mbox[2] = GET_BIOS_PVT_DATA;
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
adapter->boot_ldrv_enabled = 0;
adapter->boot_ldrv = 0;
@@ -3721,13 +3713,11 @@ mega_get_boot_drv(adapter_t *adapter)
static int
mega_support_random_del(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
int rval;
- mbox = (mbox_t *)raw_mbox;
-
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
/*
* issue command
@@ -3750,13 +3740,11 @@ mega_support_random_del(adapter_t *adapter)
static int
mega_support_ext_cdb(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
int rval;
- mbox = (mbox_t *)raw_mbox;
-
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
/*
* issue command to find out if controller supports extended CDBs.
*/
@@ -3865,16 +3853,14 @@ mega_do_del_logdrv(adapter_t *adapter, int logdrv)
static void
mega_get_max_sgl(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
- mbox = (mbox_t *)raw_mbox;
-
- memset(mbox, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
raw_mbox[0] = MAIN_MISC_OPCODE;
raw_mbox[2] = GET_MAX_SG_SUPPORT;
@@ -3888,7 +3874,7 @@ mega_get_max_sgl(adapter_t *adapter)
}
else {
adapter->sglen = *((char *)adapter->mega_buffer);
-
+
/*
* Make sure this is not more than the resources we are
* planning to allocate
@@ -3910,16 +3896,14 @@ mega_get_max_sgl(adapter_t *adapter)
static int
mega_support_cluster(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
-
- mbox = (mbox_t *)raw_mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
- memset(mbox, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
/*
* Try to get the initiator id. This command will succeed iff the
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 14f930d27ca1..2a339d4a7e9d 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -1431,7 +1431,6 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb)
/**
* megaraid_queue_command_lck - generic queue entry point for all LLDs
* @scp : pointer to the scsi command to be executed
- * @done : callback routine to be called after the cmd has be completed
*
* Queue entry point for mailbox based controllers.
*/
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index aeb95f409826..82e1e24257bc 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -5720,7 +5720,7 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
"Failed to register IRQ for vector %d.\n", i);
for (j = 0; j < i; j++) {
if (j < instance->low_latency_index_start)
- irq_set_affinity_hint(
+ irq_update_affinity_hint(
pci_irq_vector(pdev, j), NULL);
free_irq(pci_irq_vector(pdev, j),
&instance->irq_context[j]);
@@ -5763,7 +5763,7 @@ megasas_destroy_irqs(struct megasas_instance *instance) {
if (instance->msix_vectors)
for (i = 0; i < instance->msix_vectors; i++) {
if (i < instance->low_latency_index_start)
- irq_set_affinity_hint(
+ irq_update_affinity_hint(
pci_irq_vector(instance->pdev, i), NULL);
free_irq(pci_irq_vector(instance->pdev, i),
&instance->irq_context[i]);
@@ -5894,22 +5894,25 @@ int megasas_get_device_list(struct megasas_instance *instance)
}
/**
- * megasas_set_high_iops_queue_affinity_hint - Set affinity hint for high IOPS queues
- * @instance: Adapter soft state
- * return: void
+ * megasas_set_high_iops_queue_affinity_and_hint - Set affinity and hint
+ * for high IOPS queues
+ * @instance: Adapter soft state
+ * return: void
*/
static inline void
-megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance)
+megasas_set_high_iops_queue_affinity_and_hint(struct megasas_instance *instance)
{
int i;
- int local_numa_node;
+ unsigned int irq;
+ const struct cpumask *mask;
if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
- local_numa_node = dev_to_node(&instance->pdev->dev);
+ mask = cpumask_of_node(dev_to_node(&instance->pdev->dev));
- for (i = 0; i < instance->low_latency_index_start; i++)
- irq_set_affinity_hint(pci_irq_vector(instance->pdev, i),
- cpumask_of_node(local_numa_node));
+ for (i = 0; i < instance->low_latency_index_start; i++) {
+ irq = pci_irq_vector(instance->pdev, i);
+ irq_set_affinity_and_hint(irq, mask);
+ }
}
}
@@ -5998,7 +6001,7 @@ megasas_alloc_irq_vectors(struct megasas_instance *instance)
instance->msix_vectors = 0;
if (instance->smp_affinity_enable)
- megasas_set_high_iops_queue_affinity_hint(instance);
+ megasas_set_high_iops_queue_affinity_and_hint(instance);
}
/**
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
index d43bbecef651..5e1f6ced0e71 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -8,7 +8,7 @@
#define MPI3_CONFIG_PAGETYPE_IO_UNIT (0x00)
#define MPI3_CONFIG_PAGETYPE_MANUFACTURING (0x01)
#define MPI3_CONFIG_PAGETYPE_IOC (0x02)
-#define MPI3_CONFIG_PAGETYPE_UEFI_BSD (0x03)
+#define MPI3_CONFIG_PAGETYPE_DRIVER (0x03)
#define MPI3_CONFIG_PAGETYPE_SECURITY (0x04)
#define MPI3_CONFIG_PAGETYPE_ENCLOSURE (0x11)
#define MPI3_CONFIG_PAGETYPE_DEVICE (0x12)
@@ -181,8 +181,17 @@ struct mpi3_config_page_header {
#define MPI3_SAS_HWRATE_MIN_RATE_6_0 (0x0a)
#define MPI3_SAS_HWRATE_MIN_RATE_12_0 (0x0b)
#define MPI3_SAS_HWRATE_MIN_RATE_22_5 (0x0c)
-#define MPI3_SLOT_INVALID (0xffff)
-#define MPI3_SLOT_INDEX_INVALID (0xffff)
+#define MPI3_SLOT_INVALID (0xffff)
+#define MPI3_SLOT_INDEX_INVALID (0xffff)
+#define MPI3_LINK_CHANGE_COUNT_INVALID (0xffff)
+#define MPI3_RATE_CHANGE_COUNT_INVALID (0xffff)
+#define MPI3_TEMP_SENSOR_LOCATION_INTERNAL (0x0)
+#define MPI3_TEMP_SENSOR_LOCATION_INLET (0x1)
+#define MPI3_TEMP_SENSOR_LOCATION_OUTLET (0x2)
+#define MPI3_TEMP_SENSOR_LOCATION_DRAM (0x3)
+#define MPI3_MFGPAGE_VENDORID_BROADCOM (0x1000)
+#define MPI3_MFGPAGE_DEVID_SAS4116 (0x00a5)
+#define MPI3_MFGPAGE_DEVID_SAS4016 (0x00a7)
struct mpi3_man_page0 {
struct mpi3_config_page_header header;
u8 chip_revision[8];
@@ -195,7 +204,7 @@ struct mpi3_man_page0 {
__le32 reserved98;
u8 oem;
u8 sub_oem;
- __le16 reserved9e;
+ __le16 flags;
u8 board_mfg_day;
u8 board_mfg_month;
__le16 board_mfg_year;
@@ -208,6 +217,8 @@ struct mpi3_man_page0 {
};
#define MPI3_MAN0_PAGEVERSION (0x00)
+#define MPI3_MAN0_FLAGS_SWITCH_PRESENT (0x0002)
+#define MPI3_MAN0_FLAGS_EXPANDER_PRESENT (0x0001)
#define MPI3_MAN1_VPD_SIZE (512)
struct mpi3_man_page1 {
struct mpi3_config_page_header header;
@@ -236,7 +247,7 @@ struct mpi3_man_page5 {
#define MPI3_MAN5_PAGEVERSION (0x00)
struct mpi3_man6_gpio_entry {
u8 function_code;
- u8 reserved01;
+ u8 function_flags;
__le16 flags;
u8 param1;
u8 param2;
@@ -253,7 +264,6 @@ struct mpi3_man6_gpio_entry {
#define MPI3_MAN6_GPIO_FUNCTION_PORT_STATUS_YELLOW (0x06)
#define MPI3_MAN6_GPIO_FUNCTION_CABLE_MANAGEMENT (0x07)
#define MPI3_MAN6_GPIO_FUNCTION_BKPLANE_MGMT_TYPE (0x08)
-#define MPI3_MAN6_GPIO_FUNCTION_ISTWI_MUX_RESET (0x09)
#define MPI3_MAN6_GPIO_FUNCTION_ISTWI_RESET (0x0a)
#define MPI3_MAN6_GPIO_FUNCTION_BACKEND_PCIE_RESET (0x0b)
#define MPI3_MAN6_GPIO_FUNCTION_GLOBAL_FAULT (0x0c)
@@ -263,6 +273,10 @@ struct mpi3_man6_gpio_entry {
#define MPI3_MAN6_GPIO_FUNCTION_CTRL_TYPE (0x10)
#define MPI3_MAN6_GPIO_FUNCTION_LICENSE (0x11)
#define MPI3_MAN6_GPIO_FUNCTION_REFCLK_CONTROL (0x12)
+#define MPI3_MAN6_GPIO_FUNCTION_BACKEND_PCIE_RESET_CLAMP (0x13)
+#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_MASK (0x01)
+#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_ISTWI (0x00)
+#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_RECEPTACLEID (0x01)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_MASK (0xf0)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_GENERIC (0x00)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_CABLE_MGMT (0x10)
@@ -275,8 +289,6 @@ struct mpi3_man6_gpio_entry {
#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_MODULE_PRESENT (0x00)
#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_ACTIVE_CABLE_ENABLE (0x01)
#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_CABLE_MGMT_ENABLE (0x02)
-#define MPI3_MAN6_GPIO_ISTWI_MUX_RESET_PARAM2_SPEC_MUX (0x00)
-#define MPI3_MAN6_GPIO_ISTWI_MUX_RESET_PARAM2_ALL_MUXES (0x01)
#define MPI3_MAN6_GPIO_LICENSE_PARAM1_TYPE_IBUTTON (0x00)
#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_MASK (0x0100)
#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_FAST_EDGE (0x0100)
@@ -353,6 +365,7 @@ struct mpi3_man8_phy_info {
__le32 reserved0c;
};
+#define MPI3_MAN8_PHY_INFO_RECEPTACLE_ID_HOST_PHY (0xff)
#ifndef MPI3_MAN8_PHY_INFO_MAX
#define MPI3_MAN8_PHY_INFO_MAX (1)
#endif
@@ -373,20 +386,22 @@ struct mpi3_man9_rsrc_entry {
};
enum mpi3_man9_resources {
- MPI3_MAN9_RSRC_OUTSTANDING_REQS = 0,
- MPI3_MAN9_RSRC_TARGET_CMDS = 1,
- MPI3_MAN9_RSRC_SAS_TARGETS = 2,
- MPI3_MAN9_RSRC_PCIE_TARGETS = 3,
- MPI3_MAN9_RSRC_INITIATORS = 4,
- MPI3_MAN9_RSRC_VDS = 5,
- MPI3_MAN9_RSRC_ENCLOSURES = 6,
- MPI3_MAN9_RSRC_ENCLOSURE_PHYS = 7,
- MPI3_MAN9_RSRC_EXPANDERS = 8,
- MPI3_MAN9_RSRC_PCIE_SWITCHES = 9,
- MPI3_MAN9_RSRC_PDS = 10,
- MPI3_MAN9_RSRC_HOST_PDS = 11,
- MPI3_MAN9_RSRC_ADV_HOST_PDS = 12,
- MPI3_MAN9_RSRC_RAID_PDS = 13,
+ MPI3_MAN9_RSRC_OUTSTANDING_REQS = 0,
+ MPI3_MAN9_RSRC_TARGET_CMDS = 1,
+ MPI3_MAN9_RSRC_RESERVED02 = 2,
+ MPI3_MAN9_RSRC_NVME = 3,
+ MPI3_MAN9_RSRC_INITIATORS = 4,
+ MPI3_MAN9_RSRC_VDS = 5,
+ MPI3_MAN9_RSRC_ENCLOSURES = 6,
+ MPI3_MAN9_RSRC_ENCLOSURE_PHYS = 7,
+ MPI3_MAN9_RSRC_EXPANDERS = 8,
+ MPI3_MAN9_RSRC_PCIE_SWITCHES = 9,
+ MPI3_MAN9_RSRC_RESERVED10 = 10,
+ MPI3_MAN9_RSRC_HOST_PD_DRIVES = 11,
+ MPI3_MAN9_RSRC_ADV_HOST_PD_DRIVES = 12,
+ MPI3_MAN9_RSRC_RAID_PD_DRIVES = 13,
+ MPI3_MAN9_RSRC_DRV_DIAG_BUF = 14,
+ MPI3_MAN9_RSRC_NAMESPACE_COUNT = 15,
MPI3_MAN9_RSRC_NUM_RESOURCES
};
@@ -402,6 +417,7 @@ enum mpi3_man9_resources {
#define MPI3_MAN9_MIN_ENCLOSURES (0)
#define MPI3_MAN9_MAX_ENCLOSURES (65535)
#define MPI3_MAN9_MIN_ENCLOSURE_PHYS (0)
+#define MPI3_MAN9_MIN_NAMESPACE_COUNT (1)
#define MPI3_MAN9_MIN_EXPANDERS (0)
#define MPI3_MAN9_MAX_EXPANDERS (65535)
#define MPI3_MAN9_MIN_PCIE_SWITCHES (0)
@@ -422,9 +438,14 @@ struct mpi3_man_page9 {
struct mpi3_man10_istwi_ctrlr_entry {
__le16 slave_address;
__le16 flags;
- __le32 reserved04;
+ u8 scl_low_override;
+ u8 scl_high_override;
+ __le16 reserved06;
};
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_MASK (0x000c)
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_100K (0x0000)
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_400K (0x0004)
#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_SLAVE_ENABLED (0x0002)
#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_MASTER_ENABLED (0x0001)
#ifndef MPI3_MAN10_ISTWI_CTRLR_MAX
@@ -451,10 +472,13 @@ struct mpi3_man11_temp_sensor_device_format {
u8 temp_channel[4];
};
-#define MPI3_MAN11_TEMP_SENSOR_TYPE_MAX6654 (0x00)
-#define MPI3_MAN11_TEMP_SENSOR_TYPE_EMC1442 (0x01)
-#define MPI3_MAN11_TEMP_SENSOR_TYPE_ADT7476 (0x02)
-#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_ENABLED (0x01)
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_MAX6654 (0x00)
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_EMC1442 (0x01)
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_ADT7476 (0x02)
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_SE97B (0x03)
+#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_LOCATION_MASK (0xe0)
+#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_LOCATION_SHIFT (5)
+#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_ENABLED (0x01)
struct mpi3_man11_seeprom_device_format {
u8 size;
u8 page_write_size;
@@ -495,31 +519,40 @@ struct mpi3_man11_bkplane_spec_ubm_format {
#define MPI3_MAN11_BKPLANE_UBM_FLAGS_MAX_FRU_SHIFT (4)
#define MPI3_MAN11_BKPLANE_UBM_FLAGS_POLL_INTERVAL_MASK (0x000f)
#define MPI3_MAN11_BKPLANE_UBM_FLAGS_POLL_INTERVAL_SHIFT (0)
-struct mpi3_man11_bkplane_spec_vpp_format {
+struct mpi3_man11_bkplane_spec_non_ubm_format {
__le16 flags;
- __le16 reserved02;
+ u8 reserved02;
+ u8 type;
};
-#define MPI3_MAN11_BKPLANE_VPP_FLAGS_REFCLK_POLICY_ALWAYS_ENABLED (0x0040)
-#define MPI3_MAN11_BKPLANE_VPP_FLAGS_PRESENCE_DETECT_MASK (0x0030)
-#define MPI3_MAN11_BKPLANE_VPP_FLAGS_PRESENCE_DETECT_GPIO (0x0000)
-#define MPI3_MAN11_BKPLANE_VPP_FLAGS_PRESENCE_DETECT_REG (0x0010)
-#define MPI3_MAN11_BKPLANE_VPP_FLAGS_POLL_INTERVAL_MASK (0x000f)
-#define MPI3_MAN11_BKPLANE_VPP_FLAGS_POLL_INTERVAL_SHIFT (0)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_GROUP_MASK (0xf000)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_GROUP_SHIFT (12)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_REFCLK_POLICY_ALWAYS_ENABLED (0x0200)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_MASK (0x0030)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_GPIO (0x0000)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_REG (0x0010)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_POLL_INTERVAL_MASK (0x000f)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_POLL_INTERVAL_SHIFT (0)
+#define MPI3_MAN11_BKPLANE_NON_UBM_TYPE_VPP (0x00)
union mpi3_man11_bkplane_spec_format {
- struct mpi3_man11_bkplane_spec_ubm_format ubm;
- struct mpi3_man11_bkplane_spec_vpp_format vpp;
+ struct mpi3_man11_bkplane_spec_ubm_format ubm;
+ struct mpi3_man11_bkplane_spec_non_ubm_format non_ubm;
};
struct mpi3_man11_bkplane_mgmt_device_format {
u8 type;
u8 receptacle_id;
- __le16 reserved02;
+ u8 reset_info;
+ u8 reserved03;
union mpi3_man11_bkplane_spec_format backplane_mgmt_specific;
};
#define MPI3_MAN11_BKPLANE_MGMT_TYPE_UBM (0x00)
-#define MPI3_MAN11_BKPLANE_MGMT_TYPE_VPP (0x01)
+#define MPI3_MAN11_BKPLANE_MGMT_TYPE_NON_UBM (0x01)
+#define MPI3_MAN11_BACKPLANE_RESETINFO_ASSERT_TIME_MASK (0xf0)
+#define MPI3_MAN11_BACKPLANE_RESETINFO_ASSERT_TIME_SHIFT (4)
+#define MPI3_MAN11_BACKPLANE_RESETINFO_READY_TIME_MASK (0x0f)
+#define MPI3_MAN11_BACKPLANE_RESETINFO_READY_TIME_SHIFT (0)
struct mpi3_man11_gas_gauge_device_format {
u8 type;
u8 reserved01[3];
@@ -527,6 +560,11 @@ struct mpi3_man11_gas_gauge_device_format {
};
#define MPI3_MAN11_GAS_GAUGE_TYPE_STANDARD (0x00)
+struct mpi3_man11_mgmt_ctrlr_device_format {
+ __le32 reserved00;
+ __le32 reserved04;
+};
+
union mpi3_man11_device_specific_format {
struct mpi3_man11_mux_device_format mux;
struct mpi3_man11_temp_sensor_device_format temp_sensor;
@@ -535,6 +573,7 @@ union mpi3_man11_device_specific_format {
struct mpi3_man11_cable_mgmt_device_format cable_mgmt;
struct mpi3_man11_bkplane_mgmt_device_format bkplane_mgmt;
struct mpi3_man11_gas_gauge_device_format gas_gauge;
+ struct mpi3_man11_mgmt_ctrlr_device_format mgmt_controller;
__le32 words[2];
};
@@ -556,10 +595,8 @@ struct mpi3_man11_istwi_device_format {
#define MPI3_MAN11_ISTWI_DEVTYPE_CABLE_MGMT (0x04)
#define MPI3_MAN11_ISTWI_DEVTYPE_BACKPLANE_MGMT (0x05)
#define MPI3_MAN11_ISTWI_DEVTYPE_GAS_GAUGE (0x06)
+#define MPI3_MAN11_ISTWI_DEVTYPE_MGMT_CONTROLLER (0x07)
#define MPI3_MAN11_ISTWI_FLAGS_MUX_PRESENT (0x01)
-#define MPI3_MAN11_ISTWI_FLAGS_BUS_SPEED_MASK (0x06)
-#define MPI3_MAN11_ISTWI_FLAGS_BUS_SPEED_100KHZ (0x00)
-#define MPI3_MAN11_ISTWI_FLAGS_BUS_SPEED_400KHZ (0x02)
#ifndef MPI3_MAN11_ISTWI_DEVICE_MAX
#define MPI3_MAN11_ISTWI_DEVICE_MAX (1)
#endif
@@ -692,8 +729,8 @@ struct mpi3_man_page14 {
#define MPI3_MAN14_FLAGS_AUTH_SESSION_REQ (0x01)
#define MPI3_MAN14_FLAGS_AUTH_API_MASK (0x0e)
#define MPI3_MAN14_FLAGS_AUTH_API_NONE (0x00)
-#define MPI3_MAN14_FLAGS_AUTH_API_CEREBUS (0x02)
-#define MPI3_MAN14_FLAGS_AUTH_API_DMTF_PMCI (0x04)
+#define MPI3_MAN14_FLAGS_AUTH_API_CERBERUS (0x02)
+#define MPI3_MAN14_FLAGS_AUTH_API_SPDM (0x04)
#ifndef MPI3_MAN15_VERSION_RECORD_MAX
#define MPI3_MAN15_VERSION_RECORD_MAX 1
#endif
@@ -808,7 +845,7 @@ struct mpi3_io_unit_page1 {
struct mpi3_config_page_header header;
__le32 flags;
u8 dmd_io_delay;
- u8 dmd_report_pc_ie;
+ u8 dmd_report_pcie;
u8 dmd_report_sata;
u8 dmd_report_sas;
};
@@ -844,26 +881,30 @@ struct mpi3_io_unit_page2 {
#define MPI3_IOUNIT2_GPIO_SETTING_ON (0x0001)
struct mpi3_io_unit3_sensor {
__le16 flags;
- __le16 reserved02;
- __le16 threshold[4];
+ u8 threshold_margin;
+ u8 reserved03;
+ __le16 threshold[3];
+ __le16 reserved0a;
__le32 reserved0c;
__le32 reserved10;
__le32 reserved14;
};
-#define MPI3_IOUNIT3_SENSOR_FLAGS_T3_ENABLE (0x0008)
-#define MPI3_IOUNIT3_SENSOR_FLAGS_T2_ENABLE (0x0004)
-#define MPI3_IOUNIT3_SENSOR_FLAGS_T1_ENABLE (0x0002)
-#define MPI3_IOUNIT3_SENSOR_FLAGS_T0_ENABLE (0x0001)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_FATAL_EVENT_ENABLED (0x0010)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_FATAL_ACTION_ENABLED (0x0008)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_CRITICAL_EVENT_ENABLED (0x0004)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_CRITICAL_ACTION_ENABLED (0x0002)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_WARNING_EVENT_ENABLED (0x0001)
#ifndef MPI3_IO_UNIT3_SENSOR_MAX
-#define MPI3_IO_UNIT3_SENSOR_MAX (1)
+#define MPI3_IO_UNIT3_SENSOR_MAX (1)
#endif
struct mpi3_io_unit_page3 {
struct mpi3_config_page_header header;
__le32 reserved08;
u8 num_sensors;
- u8 polling_interval;
- __le16 reserved0e;
+ u8 nominal_poll_interval;
+ u8 warning_poll_interval;
+ u8 reserved0f;
struct mpi3_io_unit3_sensor sensor[MPI3_IO_UNIT3_SENSOR_MAX];
};
@@ -873,13 +914,19 @@ struct mpi3_io_unit4_sensor {
__le16 reserved02;
u8 flags;
u8 reserved05[3];
- __le32 reserved08;
+ __le16 istwi_index;
+ u8 channel;
+ u8 reserved0b;
__le32 reserved0c;
};
+#define MPI3_IOUNIT4_SENSOR_FLAGS_LOC_MASK (0xe0)
+#define MPI3_IOUNIT4_SENSOR_FLAGS_LOC_SHIFT (5)
#define MPI3_IOUNIT4_SENSOR_FLAGS_TEMP_VALID (0x01)
+#define MPI3_IOUNIT4_SENSOR_ISTWI_INDEX_INTERNAL (0xffff)
+#define MPI3_IOUNIT4_SENSOR_CHANNEL_RESERVED (0xff)
#ifndef MPI3_IO_UNIT4_SENSOR_MAX
-#define MPI3_IO_UNIT4_SENSOR_MAX (1)
+#define MPI3_IO_UNIT4_SENSOR_MAX (1)
#endif
struct mpi3_io_unit_page4 {
struct mpi3_config_page_header header;
@@ -906,8 +953,9 @@ struct mpi3_io_unit_page5 {
struct mpi3_io_unit5_spinup_group spinup_group_parameters[4];
__le32 reserved18;
__le32 reserved1c;
- __le32 reserved20;
- u8 reserved24;
+ __le16 device_shutdown;
+ __le16 reserved22;
+ u8 pcie_device_wait_time;
u8 sata_device_wait_time;
u8 spinup_encl_drive_count;
u8 spinup_encl_delay;
@@ -919,6 +967,22 @@ struct mpi3_io_unit_page5 {
};
#define MPI3_IOUNIT5_PAGEVERSION (0x00)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NO_ACTION (0x00)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_ATTACHED (0x01)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_EXPANDER_ATTACHED (0x02)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SWITCH_ATTACHED (0x02)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_AND_EXPANDER (0x03)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_AND_SWITCH (0x03)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_HDD_MASK (0x0300)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_HDD_SHIFT (8)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_HDD_MASK (0x00c0)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_HDD_SHIFT (6)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NVME_SSD_MASK (0x0030)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NVME_SSD_SHIFT (4)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_SSD_MASK (0x000c)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_SSD_SHIFT (2)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_SSD_MASK (0x0003)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAA_SSD_SHIFT (0)
#define MPI3_IOUNIT5_FLAGS_POWER_CAPABLE_SPINUP (0x02)
#define MPI3_IOUNIT5_FLAGS_AUTO_PORT_ENABLE (0x01)
#define MPI3_IOUNIT5_PHY_SPINUP_GROUP_MASK (0x03)
@@ -1012,7 +1076,52 @@ struct mpi3_ioc_page2 {
};
#define MPI3_IOC2_PAGEVERSION (0x00)
-struct mpi3_uefibsd_page0 {
+#define MPI3_DRIVER_FLAGS_ADMINRAIDPD_BLOCKED (0x0010)
+#define MPI3_DRIVER_FLAGS_OOBRAIDPD_BLOCKED (0x0008)
+#define MPI3_DRIVER_FLAGS_OOBRAIDVD_BLOCKED (0x0004)
+#define MPI3_DRIVER_FLAGS_OOBADVHOSTPD_BLOCKED (0x0002)
+#define MPI3_DRIVER_FLAGS_OOBHOSTPD_BLOCKED (0x0001)
+struct mpi3_allowed_cmd_scsi {
+ __le16 service_action;
+ u8 operation_code;
+ u8 command_flags;
+};
+
+struct mpi3_allowed_cmd_ata {
+ u8 subcommand;
+ u8 reserved01;
+ u8 command;
+ u8 command_flags;
+};
+
+struct mpi3_allowed_cmd_nvme {
+ u8 reserved00;
+ u8 nvme_cmd_flags;
+ u8 op_code;
+ u8 command_flags;
+};
+
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_MASK (0x80)
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_IO (0x00)
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_ADMIN (0x80)
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_CMDSET_MASK (0x3f)
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_CMDSET_NVM (0x00)
+union mpi3_allowed_cmd {
+ struct mpi3_allowed_cmd_scsi scsi;
+ struct mpi3_allowed_cmd_ata ata;
+ struct mpi3_allowed_cmd_nvme nvme;
+};
+
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_ADMINRAIDPD_BLOCKED (0x20)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBRAIDPD_BLOCKED (0x10)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBRAIDVD_BLOCKED (0x08)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBADVHOSTPD_BLOCKED (0x04)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBHOSTPD_BLOCKED (0x02)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_CHECKSUBCMD_ENABLED (0x01)
+#ifndef MPI3_ALLOWED_CMDS_MAX
+#define MPI3_ALLOWED_CMDS_MAX (1)
+#endif
+struct mpi3_driver_page0 {
struct mpi3_config_page_header header;
__le32 bsd_options;
u8 ssu_timeout;
@@ -1026,13 +1135,122 @@ struct mpi3_uefibsd_page0 {
__le32 reserved18;
};
-#define MPI3_UEFIBSD_PAGEVERSION (0x00)
-#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_MASK (0x00000003)
-#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000)
-#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001)
-#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_NONE (0x00000002)
-#define MPI3_UEFIBSD_BSDOPTS_DIS_HII_CONFIG_UTIL (0x00000004)
-#define MPI3_UEFIBSD_BSDOPTS_EN_ADV_ADAPTER_CONFIG (0x00000008)
+#define MPI3_DRIVER0_PAGEVERSION (0x00)
+#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_MASK (0x00000003)
+#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000)
+#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001)
+#define MPI3_DRIVER0_BSDOPTS_DIS_HII_CONFIG_UTIL (0x00000004)
+#define MPI3_DRIVER0_BSDOPTS_EN_ADV_ADAPTER_CONFIG (0x00000008)
+struct mpi3_driver_page1 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ __le32 reserved0c;
+ __le16 host_diag_trace_max_size;
+ __le16 host_diag_trace_min_size;
+ __le16 host_diag_trace_decrement_size;
+ __le16 reserved16;
+ __le16 host_diag_fw_max_size;
+ __le16 host_diag_fw_min_size;
+ __le16 host_diag_fw_decrement_size;
+ __le16 reserved1e;
+ __le16 host_diag_driver_max_size;
+ __le16 host_diag_driver_min_size;
+ __le16 host_diag_driver_decrement_size;
+ __le16 reserved26;
+};
+
+#define MPI3_DRIVER1_PAGEVERSION (0x00)
+#ifndef MPI3_DRIVER2_TRIGGER_MAX
+#define MPI3_DRIVER2_TRIGGER_MAX (1)
+#endif
+struct mpi3_driver2_trigger_event {
+ u8 type;
+ u8 flags;
+ u8 reserved02;
+ u8 event;
+ __le32 reserved04[3];
+};
+
+struct mpi3_driver2_trigger_scsi_sense {
+ u8 type;
+ u8 flags;
+ __le16 reserved02;
+ u8 ascq;
+ u8 asc;
+ u8 sense_key;
+ u8 reserved07;
+ __le32 reserved08[2];
+};
+
+#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASCQ_MATCH_ALL (0xff)
+#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASC_MATCH_ALL (0xff)
+#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_SENSE_KEY_MATCH_ALL (0xff)
+struct mpi3_driver2_trigger_reply {
+ u8 type;
+ u8 flags;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ __le32 ioc_log_info_mask;
+ __le32 reserved0c;
+};
+
+#define MPI3_DRIVER2_TRIGGER_REPLY_IOCSTATUS_MATCH_ALL (0xffff)
+union mpi3_driver2_trigger_element {
+ struct mpi3_driver2_trigger_event event;
+ struct mpi3_driver2_trigger_scsi_sense scsi_sense;
+ struct mpi3_driver2_trigger_reply reply;
+};
+
+#define MPI3_DRIVER2_TRIGGER_TYPE_EVENT (0x00)
+#define MPI3_DRIVER2_TRIGGER_TYPE_SCSI_SENSE (0x01)
+#define MPI3_DRIVER2_TRIGGER_TYPE_REPLY (0x02)
+#define MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_TRACE_RELEASE (0x02)
+#define MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_FW_RELEASE (0x01)
+struct mpi3_driver_page2 {
+ struct mpi3_config_page_header header;
+ __le64 master_trigger;
+ __le32 reserved10[3];
+ u8 num_triggers;
+ u8 reserved1d[3];
+ union mpi3_driver2_trigger_element trigger[MPI3_DRIVER2_TRIGGER_MAX];
+};
+
+#define MPI3_DRIVER2_PAGEVERSION (0x00)
+#define MPI3_DRIVER2_MASTERTRIGGER_DIAG_TRACE_RELEASE (0x8000000000000000ULL)
+#define MPI3_DRIVER2_MASTERTRIGGER_DIAG_FW_RELEASE (0x4000000000000000ULL)
+#define MPI3_DRIVER2_MASTERTRIGGER_SNAPDUMP (0x2000000000000000ULL)
+#define MPI3_DRIVER2_MASTERTRIGGER_DEVICE_REMOVAL_ENABLED (0x0000000000000004ULL)
+#define MPI3_DRIVER2_MASTERTRIGGER_TASK_MANAGEMENT_ENABLED (0x0000000000000002ULL)
+struct mpi3_driver_page10 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_allowed_commands;
+ u8 reserved0d[3];
+ union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX];
+};
+
+#define MPI3_DRIVER10_PAGEVERSION (0x00)
+struct mpi3_driver_page20 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_allowed_commands;
+ u8 reserved0d[3];
+ union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX];
+};
+
+#define MPI3_DRIVER20_PAGEVERSION (0x00)
+struct mpi3_driver_page30 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_allowed_commands;
+ u8 reserved0d[3];
+ union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX];
+};
+
+#define MPI3_DRIVER30_PAGEVERSION (0x00)
union mpi3_security_mac {
__le32 dword[16];
__le16 word[32];
@@ -1102,7 +1320,7 @@ struct mpi3_security1_key_record {
#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_NOT_VALID (0x00)
#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_SAFESTORE (0x01)
#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_CERT_CHAIN (0x02)
-#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_AUTH_DEV_KEY (0x03)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_DEVICE_KEY (0x03)
#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_CACHE_OFFLOAD (0x04)
struct mpi3_security_page1 {
struct mpi3_config_page_header header;
@@ -1137,16 +1355,30 @@ struct mpi3_sas_io_unit_page0 {
struct mpi3_config_page_header header;
__le32 reserved08;
u8 num_phys;
- u8 reserved0d[3];
+ u8 init_status;
+ __le16 reserved0e;
struct mpi3_sas_io_unit0_phy_data phy_data[MPI3_SAS_IO_UNIT0_PHY_MAX];
};
-#define MPI3_SASIOUNIT0_PAGEVERSION (0x00)
-#define MPI3_SASIOUNIT0_PORTFLAGS_DISC_IN_PROGRESS (0x08)
-#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01)
-#define MPI3_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40)
-#define MPI3_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20)
-#define MPI3_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+#define MPI3_SASIOUNIT0_PAGEVERSION (0x00)
+#define MPI3_SASIOUNIT0_INITSTATUS_NO_ERRORS (0x00)
+#define MPI3_SASIOUNIT0_INITSTATUS_NEEDS_INITIALIZATION (0x01)
+#define MPI3_SASIOUNIT0_INITSTATUS_NO_TARGETS_ALLOCATED (0x02)
+#define MPI3_SASIOUNIT0_INITSTATUS_BAD_NUM_PHYS (0x04)
+#define MPI3_SASIOUNIT0_INITSTATUS_UNSUPPORTED_CONFIG (0x05)
+#define MPI3_SASIOUNIT0_INITSTATUS_HOST_PHYS_ENABLED (0x06)
+#define MPI3_SASIOUNIT0_INITSTATUS_PRODUCT_SPECIFIC_MIN (0xf0)
+#define MPI3_SASIOUNIT0_INITSTATUS_PRODUCT_SPECIFIC_MAX (0xff)
+#define MPI3_SASIOUNIT0_PORTFLAGS_DISC_IN_PROGRESS (0x08)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_MASK (0x03)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_IOUNIT1 (0x00)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_DYNAMIC (0x01)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_BACKPLANE (0x02)
+#define MPI3_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40)
+#define MPI3_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20)
+#define MPI3_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+#define MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY (0x02)
+#define MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY (0x01)
struct mpi3_sas_io_unit1_phy_data {
u8 io_unit_port;
u8 port_flags;
@@ -1343,6 +1575,26 @@ struct mpi3_sas_expander_page1 {
#define MPI3_SASEXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04)
#define MPI3_SASEXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02)
#define MPI3_SASEXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01)
+#ifndef MPI3_SASEXPANDER2_MAX_NUM_PHYS
+#define MPI3_SASEXPANDER2_MAX_NUM_PHYS (1)
+#endif
+struct mpi3_sasexpander2_phy_element {
+ u8 link_change_count;
+ u8 reserved01;
+ __le16 rate_change_count;
+ __le32 reserved04;
+};
+
+struct mpi3_sas_expander_page2 {
+ struct mpi3_config_page_header header;
+ u8 num_phys;
+ u8 reserved09;
+ __le16 dev_handle;
+ __le32 reserved0c;
+ struct mpi3_sasexpander2_phy_element phy[MPI3_SASEXPANDER2_MAX_NUM_PHYS];
+};
+
+#define MPI3_SASEXPANDER2_PAGEVERSION (0x00)
struct mpi3_sas_port_page0 {
struct mpi3_config_page_header header;
u8 port_number;
@@ -1510,6 +1762,14 @@ struct mpi3_sas_phy_page4 {
#define MPI3_PCIE_NEG_LINK_RATE_8_0 (0x04)
#define MPI3_PCIE_NEG_LINK_RATE_16_0 (0x05)
#define MPI3_PCIE_NEG_LINK_RATE_32_0 (0x06)
+#define MPI3_PCIE_ASPM_ENABLE_NONE (0x0)
+#define MPI3_PCIE_ASPM_ENABLE_L0S (0x1)
+#define MPI3_PCIE_ASPM_ENABLE_L1 (0x2)
+#define MPI3_PCIE_ASPM_ENABLE_L0S_L1 (0x3)
+#define MPI3_PCIE_ASPM_SUPPORT_NONE (0x0)
+#define MPI3_PCIE_ASPM_SUPPORT_L0S (0x1)
+#define MPI3_PCIE_ASPM_SUPPORT_L1 (0x2)
+#define MPI3_PCIE_ASPM_SUPPORT_L0S_L1 (0x3)
struct mpi3_pcie_io_unit0_phy_data {
u8 link;
u8 link_flags;
@@ -1540,7 +1800,8 @@ struct mpi3_pcie_io_unit_page0 {
__le32 reserved08;
u8 num_phys;
u8 init_status;
- __le16 reserved0e;
+ u8 aspm;
+ u8 reserved0f;
struct mpi3_pcie_io_unit0_phy_data phy_data[MPI3_PCIE_IO_UNIT0_PHY_MAX];
};
@@ -1556,6 +1817,14 @@ struct mpi3_pcie_io_unit_page0 {
#define MPI3_PCIEIOUNIT0_INITSTATUS_BAD_CLOCKING_MODE (0x08)
#define MPI3_PCIEIOUNIT0_INITSTATUS_PROD_SPEC_START (0xf0)
#define MPI3_PCIEIOUNIT0_INITSTATUS_PROD_SPEC_END (0xff)
+#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_STATES_MASK (0xc0)
+#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_STATES_SHIFT (6)
+#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_STATES_MASK (0x30)
+#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_STATES_SHIFT (4)
+#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_SUPPORT_MASK (0x0c)
+#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_SUPPORT_SHIFT (2)
+#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_SUPPORT_MASK (0x03)
+#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_SUPPORT_SHIFT (0)
struct mpi3_pcie_io_unit1_phy_data {
u8 link;
u8 link_flags;
@@ -1569,16 +1838,16 @@ struct mpi3_pcie_io_unit1_phy_data {
#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_DIS_SEPARATE_REFCLK (0x00)
#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_EN_SRIS (0x01)
#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_EN_SRNS (0x02)
-#define MPI3_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
-#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_MASK (0xf0)
-#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_SHIFT (4)
-#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_2_5 (0x20)
-#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_5_0 (0x30)
-#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_8_0 (0x40)
-#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_16_0 (0x50)
-#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_32_0 (0x60)
+#define MPI3_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_MASK (0xf0)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_SHIFT (4)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_2_5 (0x20)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_5_0 (0x30)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_8_0 (0x40)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_16_0 (0x50)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_32_0 (0x60)
#ifndef MPI3_PCIE_IO_UNIT1_PHY_MAX
-#define MPI3_PCIE_IO_UNIT1_PHY_MAX (1)
+#define MPI3_PCIE_IO_UNIT1_PHY_MAX (1)
#endif
struct mpi3_pcie_io_unit_page1 {
struct mpi3_config_page_header header;
@@ -1586,21 +1855,66 @@ struct mpi3_pcie_io_unit_page1 {
__le32 reserved0c;
u8 num_phys;
u8 reserved11;
- __le16 reserved12;
+ u8 aspm;
+ u8 reserved13;
struct mpi3_pcie_io_unit1_phy_data phy_data[MPI3_PCIE_IO_UNIT1_PHY_MAX];
};
-#define MPI3_PCIEIOUNIT1_PAGEVERSION (0x00)
+#define MPI3_PCIEIOUNIT1_PAGEVERSION (0x00)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_OVERRIDE_DISABLE (0x80)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_DISABLE (0x40)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_MASK (0x30)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SHIFT (4)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_SRNS_DISABLED (0x00)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_ENABLED (0x10)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRNS_ENABLED (0x20)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MASK (0x0f)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_2_5 (0x02)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_5_0 (0x03)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_8_0 (0x04)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_16_0 (0x05)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_32_0 (0x06)
+#define MPI3_PCIEIOUNIT1_ASPM_SWITCH_MASK (0x0c)
+#define MPI3_PCIEIOUNIT1_ASPM_SWITCH_SHIFT (2)
+#define MPI3_PCIEIOUNIT1_ASPM_DIRECT_MASK (0x03)
+#define MPI3_PCIEIOUNIT1_ASPM_DIRECT_SHIFT (0)
struct mpi3_pcie_io_unit_page2 {
struct mpi3_config_page_header header;
- __le16 nv_me_max_queue_depth;
- __le16 reserved0a;
- u8 nv_me_abort_to;
+ __le16 nvme_max_q_dx1;
+ __le16 nvme_max_q_dx2;
+ u8 nvme_abort_to;
u8 reserved0d;
- __le16 reserved0e;
+ __le16 nvme_max_q_dx4;
};
#define MPI3_PCIEIOUNIT2_PAGEVERSION (0x00)
+#define MPI3_PCIEIOUNIT3_ERROR_RECEIVER_ERROR (0)
+#define MPI3_PCIEIOUNIT3_ERROR_RECOVERY (1)
+#define MPI3_PCIEIOUNIT3_ERROR_CORRECTABLE_ERROR_MSG (2)
+#define MPI3_PCIEIOUNIT3_ERROR_BAD_DLLP (3)
+#define MPI3_PCIEIOUNIT3_ERROR_BAD_TLP (4)
+#define MPI3_PCIEIOUNIT3_NUM_ERROR_INDEX (5)
+struct mpi3_pcie_io_unit3_error {
+ __le16 threshold_count;
+ __le16 reserved02;
+};
+
+struct mpi3_pcie_io_unit_page3 {
+ struct mpi3_config_page_header header;
+ u8 threshold_window;
+ u8 threshold_action;
+ u8 escalation_count;
+ u8 escalation_action;
+ u8 num_errors;
+ u8 reserved0d[3];
+ struct mpi3_pcie_io_unit3_error error[MPI3_PCIEIOUNIT3_NUM_ERROR_INDEX];
+};
+
+#define MPI3_PCIEIOUNIT3_PAGEVERSION (0x00)
+#define MPI3_PCIEIOUNIT3_ACTION_NO_ACTION (0x00)
+#define MPI3_PCIEIOUNIT3_ACTION_HOT_RESET (0x01)
+#define MPI3_PCIEIOUNIT3_ACTION_REDUCE_LINK_RATE_ONLY (0x02)
+#define MPI3_PCIEIOUNIT3_ACTION_REDUCE_LINK_RATE_NO_ACCESS (0x03)
struct mpi3_pcie_switch_page0 {
struct mpi3_config_page_header header;
u8 io_unit_port;
@@ -1609,7 +1923,7 @@ struct mpi3_pcie_switch_page0 {
__le16 dev_handle;
__le16 parent_dev_handle;
u8 num_ports;
- u8 pc_ie_level;
+ u8 pcie_level;
__le16 reserved12;
__le32 reserved14;
__le32 reserved18;
@@ -1623,7 +1937,8 @@ struct mpi3_pcie_switch_page0 {
struct mpi3_pcie_switch_page1 {
struct mpi3_config_page_header header;
u8 io_unit_port;
- u8 reserved09[3];
+ u8 flags;
+ __le16 reserved0a;
u8 num_ports;
u8 port_num;
__le16 attached_dev_handle;
@@ -1636,15 +1951,43 @@ struct mpi3_pcie_switch_page1 {
};
#define MPI3_PCIESWITCH1_PAGEVERSION (0x00)
+#define MPI3_PCIESWITCH1_FLAGS_ASPMSTATE_MASK (0x0c)
+#define MPI3_PCIESWITCH1_FLAGS_ASPMSTATE_SHIFT (2)
+#define MPI3_PCIESWITCH1_FLAGS_ASPMSUPPORT_MASK (0x03)
+#define MPI3_PCIESWITCH1_FLAGS_ASPMSUPPORT_SHIFT (0)
+#ifndef MPI3_PCIESWITCH2_MAX_NUM_PORTS
+#define MPI3_PCIESWITCH2_MAX_NUM_PORTS (1)
+#endif
+struct mpi3_pcieswitch2_port_element {
+ __le16 link_change_count;
+ __le16 rate_change_count;
+ __le32 reserved04;
+};
+
+struct mpi3_pcie_switch_page2 {
+ struct mpi3_config_page_header header;
+ u8 num_ports;
+ u8 reserved09;
+ __le16 dev_handle;
+ __le32 reserved0c;
+ struct mpi3_pcieswitch2_port_element port[MPI3_PCIESWITCH2_MAX_NUM_PORTS];
+};
+
+#define MPI3_PCIESWITCH2_PAGEVERSION (0x00)
struct mpi3_pcie_link_page0 {
struct mpi3_config_page_header header;
u8 link;
u8 reserved09[3];
- __le32 correctable_error_count;
- __le16 n_fatal_error_count;
- __le16 reserved12;
- __le16 fatal_error_count;
- __le16 reserved16;
+ __le32 reserved0c;
+ __le32 receiver_error_count;
+ __le32 recovery_count;
+ __le32 corr_error_msg_count;
+ __le32 non_fatal_error_msg_count;
+ __le32 fatal_error_msg_count;
+ __le32 non_fatal_error_count;
+ __le32 fatal_error_count;
+ __le32 bad_dllp_count;
+ __le32 bad_tlp_count;
};
#define MPI3_PCIELINK0_PAGEVERSION (0x00)
@@ -1654,11 +1997,12 @@ struct mpi3_enclosure_page0 {
__le16 flags;
__le16 enclosure_handle;
__le16 num_slots;
- __le16 start_slot;
+ __le16 reserved16;
u8 io_unit_port;
u8 enclosure_level;
__le16 sep_dev_handle;
- __le32 reserved1c;
+ u8 chassis_slot;
+ u8 reserved1d[3];
};
#define MPI3_ENCLOSURE0_PAGEVERSION (0x00)
@@ -1666,6 +2010,7 @@ struct mpi3_enclosure_page0 {
#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_VIRTUAL (0x0000)
#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_SAS (0x4000)
#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_PCIE (0x8000)
+#define MPI3_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020)
#define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK (0x0010)
#define MPI3_ENCLS0_FLAGS_ENCL_DEV_NOT_FOUND (0x0000)
#define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT (0x0010)
@@ -1686,6 +2031,7 @@ struct mpi3_device0_sas_sata_format {
u8 zone_group;
};
+#define MPI3_DEVICE0_SASSATA_FLAGS_WRITE_SAME_UNMAP_NCQ (0x0400)
#define MPI3_DEVICE0_SASSATA_FLAGS_SLUMBER_CAP (0x0200)
#define MPI3_DEVICE0_SASSATA_FLAGS_PARTIAL_CAP (0x0100)
#define MPI3_DEVICE0_SASSATA_FLAGS_ASYNC_NOTIFY (0x0080)
@@ -1707,10 +2053,11 @@ struct mpi3_device0_pcie_format {
__le32 maximum_data_transfer_size;
__le32 capabilities;
__le16 noiob;
- u8 nv_me_abort_to;
+ u8 nvme_abort_to;
u8 page_size;
__le16 shutdown_latency;
- __le16 reserved16;
+ u8 recovery_info;
+ u8 reserved17;
};
#define MPI3_DEVICE0_PCIE_LINK_RATE_32_0_SUPP (0x10)
@@ -1718,16 +2065,38 @@ struct mpi3_device0_pcie_format {
#define MPI3_DEVICE0_PCIE_LINK_RATE_8_0_SUPP (0x04)
#define MPI3_DEVICE0_PCIE_LINK_RATE_5_0_SUPP (0x02)
#define MPI3_DEVICE0_PCIE_LINK_RATE_2_5_SUPP (0x01)
-#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK (0x0003)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK (0x0007)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NO_DEVICE (0x0000)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE (0x0001)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SWITCH_DEVICE (0x0002)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE (0x0003)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_ASPM_MASK (0x0030)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_ASPM_SHIFT (4)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK (0x00c0)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_SHIFT (6)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0 (0x0000)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_1 (0x0040)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_2 (0x0080)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_3 (0x00c0)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_EXTRA_LENGTH_SUPPORTED (0x00000020)
#define MPI3_DEVICE0_PCIE_CAP_METADATA_SEPARATED (0x00000010)
#define MPI3_DEVICE0_PCIE_CAP_SGL_DWORD_ALIGN_REQUIRED (0x00000008)
-#define MPI3_DEVICE0_PCIE_CAP_NVME_SGL_ENABLED (0x00000004)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_FORMAT_SGL (0x00000004)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_FORMAT_PRP (0x00000000)
#define MPI3_DEVICE0_PCIE_CAP_BIT_BUCKET_SGL_SUPP (0x00000002)
#define MPI3_DEVICE0_PCIE_CAP_SGL_SUPP (0x00000001)
+#define MPI3_DEVICE0_PCIE_CAP_ASPM_MASK (0x000000c0)
+#define MPI3_DEVICE0_PCIE_CAP_ASPM_SHIFT (6)
+#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_MASK (0xe0)
+#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_NS_MGMT (0x00)
+#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_FORMAT (0x20)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_MASK (0x1f)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_NO_NS (0x00)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_NO_NSID_1 (0x01)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_TOO_MANY_NS (0x02)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_PROTECTION (0x03)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_METADATA_SZ (0x04)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_LBA_DATA_SZ (0x05)
struct mpi3_device0_vd_format {
u8 vd_state;
u8 raid_level;
@@ -1783,6 +2152,8 @@ struct mpi3_device_page0 {
};
#define MPI3_DEVICE0_PAGEVERSION (0x00)
+#define MPI3_DEVICE0_PARENT_INVALID (0xffff)
+#define MPI3_DEVICE0_ENCLOSURE_HANDLE_NO_ENCLOSURE (0x0000)
#define MPI3_DEVICE0_WWID_INVALID (0xffffffffffffffff)
#define MPI3_DEVICE0_PERSISTENTID_INVALID (0xffff)
#define MPI3_DEVICE0_IOUNITPORT_INVALID (0xff)
@@ -1792,9 +2163,13 @@ struct mpi3_device_page0 {
#define MPI3_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x03)
#define MPI3_DEVICE0_ASTATUS_UNAUTHORIZED (0x04)
#define MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY (0x05)
+#define MPI3_DEVICE0_ASTATUS_PREPARE (0x06)
+#define MPI3_DEVICE0_ASTATUS_SAFE_MODE (0x07)
+#define MPI3_DEVICE0_ASTATUS_GENERIC_MAX (0x0f)
#define MPI3_DEVICE0_ASTATUS_SAS_UNKNOWN (0x10)
#define MPI3_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x11)
#define MPI3_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x12)
+#define MPI3_DEVICE0_ASTATUS_SAS_MAX (0x1f)
#define MPI3_DEVICE0_ASTATUS_SIF_UNKNOWN (0x20)
#define MPI3_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x21)
#define MPI3_DEVICE0_ASTATUS_SIF_DIAG (0x22)
@@ -1810,6 +2185,8 @@ struct mpi3_device_page0 {
#define MPI3_DEVICE0_ASTATUS_PCIE_MEM_SPACE_ACCESS (0x31)
#define MPI3_DEVICE0_ASTATUS_PCIE_UNSUPPORTED (0x32)
#define MPI3_DEVICE0_ASTATUS_PCIE_MSIX_REQUIRED (0x33)
+#define MPI3_DEVICE0_ASTATUS_PCIE_ECRC_REQUIRED (0x34)
+#define MPI3_DEVICE0_ASTATUS_PCIE_MAX (0x3f)
#define MPI3_DEVICE0_ASTATUS_NVME_UNKNOWN (0x40)
#define MPI3_DEVICE0_ASTATUS_NVME_READY_TIMEOUT (0x41)
#define MPI3_DEVICE0_ASTATUS_NVME_DEVCFG_UNSUPPORTED (0x42)
@@ -1820,7 +2197,17 @@ struct mpi3_device_page0 {
#define MPI3_DEVICE0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED (0x47)
#define MPI3_DEVICE0_ASTATUS_NVME_IDLE_TIMEOUT (0x48)
#define MPI3_DEVICE0_ASTATUS_NVME_CTRL_FAILURE_STATUS (0x49)
-#define MPI3_DEVICE0_ASTATUS_VD_UNKNOWN (0x50)
+#define MPI3_DEVICE0_ASTATUS_NVME_INSUFFICIENT_POWER (0x4a)
+#define MPI3_DEVICE0_ASTATUS_NVME_DOORBELL_STRIDE (0x4b)
+#define MPI3_DEVICE0_ASTATUS_NVME_MEM_PAGE_MIN_SIZE (0x4c)
+#define MPI3_DEVICE0_ASTATUS_NVME_MEMORY_ALLOCATION (0x4d)
+#define MPI3_DEVICE0_ASTATUS_NVME_COMPLETION_TIME (0x4e)
+#define MPI3_DEVICE0_ASTATUS_NVME_BAR (0x4f)
+#define MPI3_DEVICE0_ASTATUS_NVME_NS_DESCRIPTOR (0x50)
+#define MPI3_DEVICE0_ASTATUS_NVME_INCOMPATIBLE_SETTINGS (0x51)
+#define MPI3_DEVICE0_ASTATUS_NVME_MAX (0x5f)
+#define MPI3_DEVICE0_ASTATUS_VD_UNKNOWN (0x80)
+#define MPI3_DEVICE0_ASTATUS_VD_MAX (0x8f)
#define MPI3_DEVICE0_FLAGS_CONTROLLER_DEV_HANDLE (0x0080)
#define MPI3_DEVICE0_FLAGS_HIDDEN (0x0008)
#define MPI3_DEVICE0_FLAGS_ATT_METHOD_MASK (0x0006)
@@ -1870,11 +2257,17 @@ struct mpi3_device_page1 {
struct mpi3_config_page_header header;
__le16 dev_handle;
__le16 reserved0a;
- __le32 reserved0c[12];
+ __le16 link_change_count;
+ __le16 rate_change_count;
+ __le16 tm_count;
+ __le16 reserved12;
+ __le32 reserved14[10];
u8 reserved3c[3];
u8 device_form;
union mpi3_device1_dev_spec_format device_specific;
};
#define MPI3_DEVICE1_PAGEVERSION (0x00)
+#define MPI3_DEVICE1_COUNTER_MAX (0xfffe)
+#define MPI3_DEVICE1_COUNTER_INVALID (0xffff)
#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_image.h b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
index 169e4f9b7b7c..c29b87de8e18 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_image.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
@@ -61,6 +61,8 @@ struct mpi3_component_image_header {
#define MPI3_IMAGE_HEADER_SIGNATURE1_SPD (0x20445053)
#define MPI3_IMAGE_HEADER_SIGNATURE1_GAS_GAUGE (0x20534147)
#define MPI3_IMAGE_HEADER_SIGNATURE1_PBLP (0x504c4250)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST (0x464e414d)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_OEM (0x204d454f)
#define MPI3_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_MASK (0x00000030)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_CDI (0x00000000)
@@ -94,6 +96,61 @@ struct mpi3_component_image_header {
#define MPI3_IMAGE_HEADER_HASH_EXCLUSION_OFFSET (0x5c)
#define MPI3_IMAGE_HEADER_NEXT_IMAGE_HEADER_OFFSET_OFFSET (0x7c)
#define MPI3_IMAGE_HEADER_SIZE (0x100)
+#ifndef MPI3_CI_MANIFEST_MPI_MAX
+#define MPI3_CI_MANIFEST_MPI_MAX (1)
+#endif
+struct mpi3_ci_manifest_mpi_comp_image_ref {
+ __le32 signature1;
+ __le32 reserved04[3];
+ struct mpi3_comp_image_version component_image_version;
+ __le32 component_image_version_string_offset;
+ __le32 crc;
+};
+
+struct mpi3_ci_manifest_mpi {
+ u8 manifest_type;
+ u8 reserved01[3];
+ __le32 reserved04[3];
+ u8 num_image_references;
+ u8 release_level;
+ __le16 reserved12;
+ __le16 reserved14;
+ __le16 flags;
+ __le32 reserved18[2];
+ __le16 vendor_id;
+ __le16 device_id;
+ __le16 subsystem_vendor_id;
+ __le16 subsystem_id;
+ __le32 reserved28[2];
+ union mpi3_version_union package_security_version;
+ __le32 reserved34;
+ struct mpi3_comp_image_version package_version;
+ __le32 package_version_string_offset;
+ __le32 package_build_date_string_offset;
+ __le32 package_build_time_string_offset;
+ __le32 reserved4c;
+ __le32 diag_authorization_identifier[16];
+ struct mpi3_ci_manifest_mpi_comp_image_ref component_image_ref[MPI3_CI_MANIFEST_MPI_MAX];
+};
+
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_DEV (0x00)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_PREALPHA (0x10)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_ALPHA (0x20)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_BETA (0x30)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_RC (0x40)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_GCA (0x50)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_POINT (0x60)
+#define MPI3_CI_MANIFEST_MPI_FLAGS_DIAG_AUTHORIZATION (0x01)
+#define MPI3_CI_MANIFEST_MPI_SUBSYSTEMID_IGNORED (0xffff)
+#define MPI3_CI_MANIFEST_MPI_PKG_VER_STR_OFF_UNSPECIFIED (0x00000000)
+#define MPI3_CI_MANIFEST_MPI_PKG_BUILD_DATE_STR_OFF_UNSPECIFIED (0x00000000)
+#define MPI3_CI_MANIFEST_MPI_PKG_BUILD_TIME_STR_OFF_UNSPECIFIED (0x00000000)
+union mpi3_ci_manifest {
+ struct mpi3_ci_manifest_mpi mpi;
+ __le32 dword[1];
+};
+
+#define MPI3_CI_MANIFEST_TYPE_MPI (0x00)
struct mpi3_extended_image_header {
u8 image_type;
u8 reserved01[3];
@@ -161,6 +218,7 @@ struct mpi3_encrypted_hash_entry {
#define MPI3_HASH_ALGORITHM_SIZE_UNUSED (0x00)
#define MPI3_HASH_ALGORITHM_SIZE_SHA256 (0x01)
#define MPI3_HASH_ALGORITHM_SIZE_SHA512 (0x02)
+#define MPI3_HASH_ALGORITHM_SIZE_SHA384 (0x03)
#define MPI3_ENCRYPTION_ALGORITHM_UNUSED (0x00)
#define MPI3_ENCRYPTION_ALGORITHM_RSA256 (0x01)
#define MPI3_ENCRYPTION_ALGORITHM_RSA512 (0x02)
@@ -178,7 +236,6 @@ struct mpi3_encrypted_key_with_hash_entry {
u8 reserved03;
__le32 reserved04;
__le32 public_key[MPI3_PUBLIC_KEY_MAX];
- __le32 encrypted_hash[MPI3_ENCRYPTED_HASH_MAX];
};
#ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_init.h b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
index e02b6d3cfba2..7a208dc81d49 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_init.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
@@ -13,7 +13,7 @@ struct mpi3_scsi_io_cdb_eedp32 {
__le32 transfer_length;
};
-union mpi3_scso_io_cdb_union {
+union mpi3_scsi_io_cdb_union {
u8 cdb32[32];
struct mpi3_scsi_io_cdb_eedp32 eedp32;
struct mpi3_sge_common sge;
@@ -32,11 +32,12 @@ struct mpi3_scsi_io_request {
__le32 skip_count;
__le32 data_length;
u8 lun[8];
- union mpi3_scso_io_cdb_union cdb;
+ union mpi3_scsi_io_cdb_union cdb;
union mpi3_sge_union sgl[4];
};
#define MPI3_SCSIIO_MSGFLAGS_METASGL_VALID (0x80)
+#define MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE (0x40)
#define MPI3_SCSIIO_FLAGS_LARGE_CDB (0x60000000)
#define MPI3_SCSIIO_FLAGS_CDB_16_OR_LESS (0x00000000)
#define MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16 (0x20000000)
@@ -155,5 +156,13 @@ struct mpi3_scsi_task_mgmt_reply {
__le32 reserved18;
};
-#define MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC (0x80)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE (0x00)
+#define MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME (0x02)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED (0x04)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED (0x05)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED (0x08)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN (0x09)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG (0x0a)
+#define MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC (0x80)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED (0x81)
#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
index 1af99a5382d5..bc56273778d3 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -29,10 +29,15 @@ struct mpi3_ioc_init_request {
__le64 driver_information_address;
};
-#define MPI3_WHOINIT_NOT_INITIALIZED (0x00)
-#define MPI3_WHOINIT_ROM_BIOS (0x02)
-#define MPI3_WHOINIT_HOST_DRIVER (0x03)
-#define MPI3_WHOINIT_MANUFACTURER (0x04)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_MASK (0x03)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_NOT_USED (0x00)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_SEPARATED (0x01)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_INLINE (0x02)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_BOTH (0x03)
+#define MPI3_WHOINIT_NOT_INITIALIZED (0x00)
+#define MPI3_WHOINIT_ROM_BIOS (0x02)
+#define MPI3_WHOINIT_HOST_DRIVER (0x03)
+#define MPI3_WHOINIT_MANUFACTURER (0x04)
struct mpi3_driver_info_layout {
__le32 information_length;
u8 driver_signature[12];
@@ -77,17 +82,17 @@ struct mpi3_ioc_facts_data {
u8 sge_modifier_shift;
u8 protocol_flags;
__le16 max_sas_initiators;
- __le16 max_sas_targets;
+ __le16 reserved2a;
__le16 max_sas_expanders;
__le16 max_enclosures;
__le16 min_dev_handle;
__le16 max_dev_handle;
- __le16 max_pc_ie_switches;
+ __le16 max_pcie_switches;
__le16 max_nvme;
- __le16 max_pds;
+ __le16 reserved38;
__le16 max_vds;
__le16 max_host_pds;
- __le16 max_advanced_host_pds;
+ __le16 max_adv_host_pds;
__le16 max_raid_pds;
__le16 max_posted_cmd_buffers;
__le32 flags;
@@ -97,26 +102,41 @@ struct mpi3_ioc_facts_data {
__le16 reserved4e;
__le32 diag_trace_size;
__le32 diag_fw_size;
+ __le32 diag_driver_size;
+ u8 max_host_pd_ns_count;
+ u8 max_adv_host_pd_ns_count;
+ u8 max_raidpd_ns_count;
+ u8 reserved5f;
};
-#define MPI3_IOCFACTS_CAPABILITY_ADVANCED_HOST_PD (0x00000010)
+#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_MASK (0x80000000)
+#define MPI3_IOCFACTS_CAPABILITY_SUPERVISOR_IOC (0x00000000)
+#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC (0x10000000)
+#define MPI3_IOCFACTS_CAPABILITY_COMPLETE_RESET_CAPABLE (0x00000100)
+#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_ENABLED (0x00000080)
+#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_FW_ENABLED (0x00000040)
+#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_DRIVER_ENABLED (0x00000020)
+#define MPI3_IOCFACTS_CAPABILITY_ADVANCED_HOST_PD_ENABLED (0x00000010)
#define MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE (0x00000008)
-#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_GRAN_MASK (0x00000001)
-#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_IOC_GRAN (0x00000000)
-#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_REPLY_Q_GRAN (0x00000001)
+#define MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED (0x00000002)
+#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_SUPPORTED (0x00000001)
#define MPI3_IOCFACTS_PID_TYPE_MASK (0xf000)
#define MPI3_IOCFACTS_PID_TYPE_SHIFT (12)
#define MPI3_IOCFACTS_PID_PRODUCT_MASK (0x0f00)
#define MPI3_IOCFACTS_PID_PRODUCT_SHIFT (8)
#define MPI3_IOCFACTS_PID_FAMILY_MASK (0x00ff)
#define MPI3_IOCFACTS_PID_FAMILY_SHIFT (0)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_REKEY (0x2000)
+#define MPI3_IOCFACTS_EXCEPT_SAS_DISABLED (0x1000)
#define MPI3_IOCFACTS_EXCEPT_SAFE_MODE (0x0800)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_MASK (0x0700)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_NONE (0x0000)
-#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_RAID (0x0100)
-#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_OOB (0x0200)
-#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_RAID (0x0300)
-#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_OOB (0x0400)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_MGMT (0x0100)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_MGMT (0x0200)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_DRIVE_EXT_VIA_MGMT (0x0300)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_OOB (0x0400)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_OOB (0x0500)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_DRIVE_EXT_VIA_OOB (0x0600)
#define MPI3_IOCFACTS_EXCEPT_PCIE_DISABLED (0x0080)
#define MPI3_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0040)
#define MPI3_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0020)
@@ -175,6 +195,7 @@ struct mpi3_create_request_queue_request {
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
+#define MPI3_CREATE_REQUEST_QUEUE_SIZE_MINIMUM (2)
struct mpi3_delete_request_queue_request {
__le16 host_tag;
u8 ioc_use_only02;
@@ -210,6 +231,7 @@ struct mpi3_create_reply_queue_request {
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_MASK (0x01)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_DISABLE (0x00)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE (0x01)
+#define MPI3_CREATE_REPLY_QUEUE_SIZE_MINIMUM (2)
struct mpi3_delete_reply_queue_request {
__le16 host_tag;
u8 ioc_use_only02;
@@ -255,7 +277,9 @@ struct mpi3_port_enable_request {
#define MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x19)
#define MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST (0x20)
#define MPI3_EVENT_PCIE_ENUMERATION (0x22)
+#define MPI3_EVENT_PCIE_ERROR_THRESHOLD (0x23)
#define MPI3_EVENT_HARD_RESET_RECEIVED (0x40)
+#define MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE (0x50)
#define MPI3_EVENT_MIN_PRODUCT_SPECIFIC (0x60)
#define MPI3_EVENT_MAX_PRODUCT_SPECIFIC (0x7f)
#define MPI3_EVENT_NOTIFY_EVENTMASK_WORDS (4)
@@ -311,10 +335,9 @@ struct mpi3_event_data_temp_threshold {
__le32 reserved0c;
};
-#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD3_EXCEEDED (0x0008)
-#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD2_EXCEEDED (0x0004)
-#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD1_EXCEEDED (0x0002)
-#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD0_EXCEEDED (0x0001)
+#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_FATAL_THRESHOLD_EXCEEDED (0x0004)
+#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_CRITICAL_THRESHOLD_EXCEEDED (0x0002)
+#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_WARNING_THRESHOLD_EXCEEDED (0x0001)
struct mpi3_event_data_cable_management {
__le32 active_cable_power_requirement;
u8 status;
@@ -398,8 +421,10 @@ struct mpi3_event_data_sas_discovery {
#define MPI3_SAS_DISC_STATUS_MAX_EXPANDERS_EXCEED (0x40000000)
#define MPI3_SAS_DISC_STATUS_MAX_DEVICES_EXCEED (0x20000000)
#define MPI3_SAS_DISC_STATUS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI3_SAS_DISC_STATUS_INVALID_CEI (0x00010000)
+#define MPI3_SAS_DISC_STATUS_FECEI_MISMATCH (0x00008000)
#define MPI3_SAS_DISC_STATUS_MULTIPLE_DEVICES_IN_SLOT (0x00004000)
-#define MPI3_SAS_DISC_STATUS_SLOT_COUNT_MISMATCH (0x00002000)
+#define MPI3_SAS_DISC_STATUS_NECEI_MISMATCH (0x00002000)
#define MPI3_SAS_DISC_STATUS_TOO_MANY_SLOTS (0x00001000)
#define MPI3_SAS_DISC_STATUS_EXP_MULTI_SUBTRACTIVE (0x00000800)
#define MPI3_SAS_DISC_STATUS_MULTI_PORT_DOMAIN (0x00000400)
@@ -581,6 +606,20 @@ struct mpi3_event_data_pcie_topology_change_list {
#define MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING (0x02)
#define MPI3_EVENT_PCIE_TOPO_SS_RESPONDING (0x03)
#define MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING (0x04)
+struct mpi3_event_data_pcie_error_threshold {
+ __le64 timestamp;
+ u8 reason_code;
+ u8 port;
+ __le16 switch_dev_handle;
+ u8 error;
+ u8 action;
+ __le16 threshold_count;
+ __le16 attached_dev_handle;
+ __le16 reserved12;
+};
+
+#define MPI3_EVENT_PCI_ERROR_RC_THRESHOLD_EXCEEDED (0x00)
+#define MPI3_EVENT_PCI_ERROR_RC_ESCALATION (0x01)
struct mpi3_event_data_sas_init_dev_status_change {
u8 reason_code;
u8 io_unit_port;
@@ -604,6 +643,16 @@ struct mpi3_event_data_hard_reset_received {
__le16 reserved02;
};
+struct mpi3_event_data_diag_buffer_status_change {
+ u8 type;
+ u8 reason_code;
+ __le16 reserved02;
+ __le32 reserved04;
+};
+
+#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RELEASED (0x01)
+#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_PAUSED (0x02)
+#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RESUMED (0x03)
#define MPI3_PEL_LOCALE_FLAGS_NON_BLOCKING_BOOT_EVENT (0x0200)
#define MPI3_PEL_LOCALE_FLAGS_BLOCKING_BOOT_EVENT (0x0100)
#define MPI3_PEL_LOCALE_FLAGS_PCIE (0x0080)
@@ -645,21 +694,23 @@ struct mpi3_pel_seq {
};
struct mpi3_pel_entry {
+ __le64 time_stamp;
__le32 sequence_number;
- __le32 time_stamp[2];
__le16 log_code;
__le16 arg_type;
__le16 locale;
u8 class;
- u8 reserved13;
+ u8 flags;
u8 ext_num;
u8 num_exts;
u8 arg_data_size;
- u8 fixed_format_size;
+ u8 fixed_format_strings_size;
__le32 reserved18[2];
__le32 pel_info[24];
};
+#define MPI3_PEL_FLAGS_COMPLETE_RESET_NEEDED (0x02)
+#define MPI3_PEL_FLAGS_ACK_NEEDED (0x01)
struct mpi3_pel_list {
__le32 log_count;
__le32 reserved04;
@@ -837,7 +888,10 @@ struct mpi3_pel_req_action_acknowledge {
__le32 reserved10;
};
-#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT (0x01)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_MASK (0x03)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_NO_GUIDANCE (0x00)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_CONTINUE_OP (0x01)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_TRANSITION_TO_FAULT (0x02)
struct mpi3_pel_reply {
__le16 host_tag;
u8 ioc_use_only02;
@@ -885,6 +939,7 @@ struct mpi3_ci_download_request {
#define MPI3_CI_DOWNLOAD_ACTION_ONLINE_ACTIVATION (0x02)
#define MPI3_CI_DOWNLOAD_ACTION_OFFLINE_ACTIVATION (0x03)
#define MPI3_CI_DOWNLOAD_ACTION_GET_STATUS (0x04)
+#define MPI3_CI_DOWNLOAD_ACTION_CANCEL_OFFLINE_ACTIVATION (0x05)
struct mpi3_ci_download_reply {
__le16 host_tag;
u8 ioc_use_only02;
@@ -902,6 +957,7 @@ struct mpi3_ci_download_reply {
};
#define MPI3_CI_DOWNLOAD_FLAGS_DOWNLOAD_IN_PROGRESS (0x80)
+#define MPI3_CI_DOWNLOAD_FLAGS_OFFLINE_ACTIVATION_REQUIRED (0x20)
#define MPI3_CI_DOWNLOAD_FLAGS_KEY_UPDATE_PENDING (0x10)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_MASK (0x0e)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_NOT_NEEDED (0x00)
@@ -939,19 +995,28 @@ struct mpi3_ci_upload_request {
#define MPI3_CTRL_OP_REMOVE_DEVICE (0x10)
#define MPI3_CTRL_OP_CLOSE_PERSISTENT_CONNECTION (0x11)
#define MPI3_CTRL_OP_HIDDEN_ACK (0x12)
+#define MPI3_CTRL_OP_CLEAR_DEVICE_COUNTERS (0x13)
#define MPI3_CTRL_OP_SAS_SEND_PRIMITIVE (0x20)
-#define MPI3_CTRL_OP_SAS_CLEAR_ERROR_LOG (0x21)
-#define MPI3_CTRL_OP_PCIE_CLEAR_ERROR_LOG (0x22)
+#define MPI3_CTRL_OP_SAS_PHY_CONTROL (0x21)
+#define MPI3_CTRL_OP_READ_INTERNAL_BUS (0x23)
+#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS (0x24)
+#define MPI3_CTRL_OP_PCIE_LINK_CONTROL (0x30)
#define MPI3_CTRL_OP_LOOKUP_MAPPING_PARAM8_LOOKUP_METHOD_INDEX (0x00)
#define MPI3_CTRL_OP_UPDATE_TIMESTAMP_PARAM64_TIMESTAMP_INDEX (0x00)
#define MPI3_CTRL_OP_REMOVE_DEVICE_PARAM16_DEVHANDLE_INDEX (0x00)
#define MPI3_CTRL_OP_CLOSE_PERSIST_CONN_PARAM16_DEVHANDLE_INDEX (0x00)
#define MPI3_CTRL_OP_HIDDEN_ACK_PARAM16_DEVHANDLE_INDEX (0x00)
+#define MPI3_CTRL_OP_CLEAR_DEVICE_COUNTERS_PARAM16_DEVHANDLE_INDEX (0x00)
#define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM8_PHY_INDEX (0x00)
#define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM8_PRIMSEQ_INDEX (0x01)
#define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM32_PRIMITIVE_INDEX (0x00)
-#define MPI3_CTRL_OP_SAS_CLEAR_ERR_LOG_PARAM8_PHY_INDEX (0x00)
-#define MPI3_CTRL_OP_PCIE_CLEAR_ERR_LOG_PARAM8_PHY_INDEX (0x00)
+#define MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_ACTION_INDEX (0x00)
+#define MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_PHY_INDEX (0x01)
+#define MPI3_CTRL_OP_READ_INTERNAL_BUS_PARAM64_ADDRESS_INDEX (0x00)
+#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS_PARAM64_ADDRESS_INDEX (0x00)
+#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS_PARAM32_VALUE_INDEX (0x00)
+#define MPI3_CTRL_OP_PCIE_LINK_CONTROL_PARAM8_ACTION_INDEX (0x00)
+#define MPI3_CTRL_OP_PCIE_LINK_CONTROL_PARAM8_LINK_INDEX (0x01)
#define MPI3_CTRL_LOOKUP_METHOD_WWID_ADDRESS (0x01)
#define MPI3_CTRL_LOOKUP_METHOD_ENCLOSURE_SLOT (0x02)
#define MPI3_CTRL_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03)
@@ -966,9 +1031,14 @@ struct mpi3_ci_upload_request {
#define MPI3_CTRL_LOOKUP_METHOD_PERSISTID_PARAM16_PERSISTENT_ID_INDEX (1)
#define MPI3_CTRL_LOOKUP_METHOD_VALUE16_DEVH_INDEX (0)
#define MPI3_CTRL_GET_TIMESTAMP_VALUE64_TIMESTAMP_INDEX (0)
+#define MPI3_CTRL_READ_INTERNAL_BUS_VALUE32_VALUE_INDEX (0)
#define MPI3_CTRL_PRIMFLAGS_SINGLE (0x01)
#define MPI3_CTRL_PRIMFLAGS_TRIPLE (0x03)
#define MPI3_CTRL_PRIMFLAGS_REDUNDANT (0x06)
+#define MPI3_CTRL_ACTION_NOP (0x00)
+#define MPI3_CTRL_ACTION_LINK_RESET (0x01)
+#define MPI3_CTRL_ACTION_HARD_RESET (0x02)
+#define MPI3_CTRL_ACTION_CLEAR_ERROR_LOG (0x05)
struct mpi3_iounit_control_request {
__le16 host_tag;
u8 ioc_use_only02;
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_pci.h b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
new file mode 100644
index 000000000000..dbfaf4137560
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016-2021 Broadcom Inc. All rights reserved.
+ *
+ */
+#ifndef MPI30_PCI_H
+#define MPI30_PCI_H 1
+#ifndef MPI3_NVME_ENCAP_CMD_MAX
+#define MPI3_NVME_ENCAP_CMD_MAX (1)
+#endif
+struct mpi3_nvme_encapsulated_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 dev_handle;
+ __le16 encapsulated_command_length;
+ __le16 flags;
+ __le32 reserved10[4];
+ __le32 command[MPI3_NVME_ENCAP_CMD_MAX];
+};
+
+#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_MASK (0x0002)
+#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_FAIL_ONLY (0x0000)
+#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_ALL (0x0002)
+#define MPI3_NVME_FLAGS_SUBMISSIONQ_MASK (0x0001)
+#define MPI3_NVME_FLAGS_SUBMISSIONQ_IO (0x0000)
+#define MPI3_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0001)
+struct mpi3_nvme_encapsulated_error_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ __le32 nvme_completion_entry[4];
+};
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
index ba5018702960..298d895e374b 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
@@ -30,4 +30,18 @@ struct mpi3_smp_passthrough_request {
struct mpi3_sge_common request_sge;
struct mpi3_sge_common response_sge;
};
+
+struct mpi3_smp_passthrough_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ __le16 response_data_length;
+ __le16 reserved12;
+};
#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
index 63e4e81d5397..6d550117ec2e 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
@@ -19,8 +19,8 @@ union mpi3_version_union {
#define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0)
-#define MPI3_VERSION_UNIT (0)
-#define MPI3_VERSION_DEV (18)
+#define MPI3_VERSION_UNIT (22)
+#define MPI3_VERSION_DEV (0)
struct mpi3_sysif_oper_queue_indexes {
__le16 producer_index;
__le16 reserved02;
@@ -74,6 +74,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_IOC_INFO_HIGH_OFFSET (0x00000004)
#define MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK (0xff000000)
#define MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT (24)
+#define MPI3_SYSIF_IOC_INFO_LOW_HCB_DISABLED (0x00000001)
#define MPI3_SYSIF_IOC_CONFIG_OFFSET (0x00000014)
#define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ (0x00f00000)
#define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT (20)
@@ -82,12 +83,13 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_MASK (0x0000c000)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NO (0x00000000)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL (0x00004000)
-#define MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN (0x00002000)
+#define MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ (0x00002000)
#define MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE (0x00000010)
#define MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC (0x00000001)
#define MPI3_SYSIF_IOC_STATUS_OFFSET (0x0000001c)
#define MPI3_SYSIF_IOC_STATUS_RESET_HISTORY (0x00000010)
#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK (0x0000000c)
+#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_SHIFT (0x00000002)
#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_NONE (0x00000000)
#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS (0x00000004)
#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE (0x00000008)
@@ -107,9 +109,9 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_NO_CHANGE (0x00000000)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_DISABLE (0x40000000)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_ENABLE (0xc0000000)
-#define MPI3_SYSIF_COALESCE_CONTROL_VALID (0x30000000)
-#define MPI3_SYSIF_COALESCE_CONTROL_QUEUE_ID_MASK (0x00ff0000)
-#define MPI3_SYSIF_COALESCE_CONTROL_QUEUE_ID_SHIFT (16)
+#define MPI3_SYSIF_COALESCE_CONTROL_VALID (0x20000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_MSIX_IDX_MASK (0x01ff0000)
+#define MPI3_SYSIF_COALESCE_CONTROL_MSIX_IDX_SHIFT (16)
#define MPI3_SYSIF_COALESCE_CONTROL_TIMEOUT_MASK (0x0000ff00)
#define MPI3_SYSIF_COALESCE_CONTROL_TIMEOUT_SHIFT (8)
#define MPI3_SYSIF_COALESCE_CONTROL_DEPTH_MASK (0x000000ff)
@@ -117,9 +119,9 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET (0x00001000)
#define MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET (0x00001004)
#define MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET (0x00001008)
-#define MPI3_SYSIF_OPER_REQ_Q_N_PI_OFFSET(n) (MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET + (((n) - 1) * 8))
+#define MPI3_SYSIF_OPER_REQ_Q_N_PI_OFFSET(N) (MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET + (((N) - 1) * 8))
#define MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET (0x0000100c)
-#define MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(n) (MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET + (((n) - 1) * 8))
+#define MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(N) (MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET + (((N) - 1) * 8))
#define MPI3_SYSIF_WRITE_SEQUENCE_OFFSET (0x00001c04)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_MASK (0x0000000f)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH (0x0)
@@ -133,7 +135,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_MASK (0x00000700)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_NO_RESET (0x00000000)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET (0x00000100)
-#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_FLASH_RCVRY_RESET (0x00000200)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_HOST_CONTROL_BOOT_RESET (0x00000200)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_COMPLETE_RESET (0x00000300)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT (0x00000700)
#define MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS (0x00000080)
@@ -153,8 +155,9 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET (0x0000f001)
#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS (0x0000f002)
#define MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED (0x0000f003)
-#define MPI3_SYSIF_FAULT_CODE_SAFE_MODE_EXIT (0x0000f004)
-#define MPI3_SYSIF_FAULT_CODE_FACTORY_RESET (0x0000f005)
+#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_NEEDED (0x0000f004)
+#define MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED (0x0000f005)
+#define MPI3_SYSIF_FAULT_CODE_TEMP_THRESHOLD_EXCEEDED (0x0000f006)
#define MPI3_SYSIF_FAULT_INFO0_OFFSET (0x00001c14)
#define MPI3_SYSIF_FAULT_INFO1_OFFSET (0x00001c18)
#define MPI3_SYSIF_FAULT_INFO2_OFFSET (0x00001c1c)
@@ -409,6 +412,8 @@ struct mpi3_default_reply {
#define MPI3_IOCSTATUS_INVALID_STATE (0x0008)
#define MPI3_IOCSTATUS_INSUFFICIENT_POWER (0x000a)
#define MPI3_IOCSTATUS_INVALID_CHANGE_COUNT (0x000b)
+#define MPI3_IOCSTATUS_ALLOWED_CMD_BLOCK (0x000c)
+#define MPI3_IOCSTATUS_SUPERVISOR_ONLY (0x000d)
#define MPI3_IOCSTATUS_FAILURE (0x001f)
#define MPI3_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020)
#define MPI3_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021)
@@ -448,8 +453,10 @@ struct mpi3_default_reply {
#define MPI3_IOCSTATUS_CI_UNSUPPORTED (0x00b0)
#define MPI3_IOCSTATUS_CI_UPDATE_SEQUENCE (0x00b1)
#define MPI3_IOCSTATUS_CI_VALIDATION_FAILED (0x00b2)
-#define MPI3_IOCSTATUS_CI_UPDATE_PENDING (0x00b3)
+#define MPI3_IOCSTATUS_CI_KEY_UPDATE_PENDING (0x00b3)
+#define MPI3_IOCSTATUS_CI_KEY_UPDATE_NOT_POSSIBLE (0x00b4)
#define MPI3_IOCSTATUS_SECURITY_KEY_REQUIRED (0x00c0)
+#define MPI3_IOCSTATUS_SECURITY_VIOLATION (0x00c1)
#define MPI3_IOCSTATUS_INVALID_QUEUE_ID (0x0f00)
#define MPI3_IOCSTATUS_INVALID_QUEUE_SIZE (0x0f01)
#define MPI3_IOCSTATUS_INVALID_MSIX_VECTOR (0x0f02)
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 9787b53a2b59..fc4eaf6d1e47 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -45,6 +45,7 @@
#include "mpi/mpi30_init.h"
#include "mpi/mpi30_ioc.h"
#include "mpi/mpi30_sas.h"
+#include "mpi/mpi30_pci.h"
#include "mpi3mr_debug.h"
/* Global list and lock for storing multiple adapters managed by the driver */
@@ -52,8 +53,8 @@ extern spinlock_t mrioc_list_lock;
extern struct list_head mrioc_list;
extern int prot_mask;
-#define MPI3MR_DRIVER_VERSION "00.255.45.01"
-#define MPI3MR_DRIVER_RELDATE "12-December-2020"
+#define MPI3MR_DRIVER_VERSION "8.0.0.61.0"
+#define MPI3MR_DRIVER_RELDATE "20-December-2021"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@@ -79,7 +80,8 @@ extern int prot_mask;
/* Operational queue management definitions */
#define MPI3MR_OP_REQ_Q_QD 512
-#define MPI3MR_OP_REP_Q_QD 4096
+#define MPI3MR_OP_REP_Q_QD 1024
+#define MPI3MR_OP_REP_Q_QD4K 4096
#define MPI3MR_OP_REQ_Q_SEG_SIZE 4096
#define MPI3MR_OP_REP_Q_SEG_SIZE 4096
#define MPI3MR_MAX_SEG_LIST_SIZE 4096
@@ -90,25 +92,31 @@ extern int prot_mask;
#define MPI3MR_HOSTTAG_IOCTLCMDS 2
#define MPI3MR_HOSTTAG_BLK_TMS 5
-#define MPI3MR_NUM_DEVRMCMD 1
+#define MPI3MR_NUM_DEVRMCMD 16
#define MPI3MR_HOSTTAG_DEVRMCMD_MIN (MPI3MR_HOSTTAG_BLK_TMS + 1)
#define MPI3MR_HOSTTAG_DEVRMCMD_MAX (MPI3MR_HOSTTAG_DEVRMCMD_MIN + \
MPI3MR_NUM_DEVRMCMD - 1)
-#define MPI3MR_INTERNAL_CMDS_RESVD MPI3MR_HOSTTAG_DEVRMCMD_MAX
+#define MPI3MR_INTERNAL_CMDS_RESVD MPI3MR_HOSTTAG_DEVRMCMD_MAX
+#define MPI3MR_NUM_EVTACKCMD 4
+#define MPI3MR_HOSTTAG_EVTACKCMD_MIN (MPI3MR_HOSTTAG_DEVRMCMD_MAX + 1)
+#define MPI3MR_HOSTTAG_EVTACKCMD_MAX (MPI3MR_HOSTTAG_EVTACKCMD_MIN + \
+ MPI3MR_NUM_EVTACKCMD - 1)
/* Reduced resource count definition for crash kernel */
#define MPI3MR_HOST_IOS_KDUMP 128
/* command/controller interaction timeout definitions in seconds */
-#define MPI3MR_INTADMCMD_TIMEOUT 10
+#define MPI3MR_INTADMCMD_TIMEOUT 60
#define MPI3MR_PORTENABLE_TIMEOUT 300
-#define MPI3MR_ABORTTM_TIMEOUT 30
-#define MPI3MR_RESETTM_TIMEOUT 30
+#define MPI3MR_ABORTTM_TIMEOUT 60
+#define MPI3MR_RESETTM_TIMEOUT 60
#define MPI3MR_RESET_HOST_IOWAIT_TIMEOUT 5
#define MPI3MR_TSUPDATE_INTERVAL 900
#define MPI3MR_DEFAULT_SHUTDOWN_TIME 120
#define MPI3MR_RAID_ERRREC_RESET_TIMEOUT 180
+#define MPI3MR_PREPARE_FOR_RESET_TIMEOUT 180
+#define MPI3MR_RESET_ACK_TIMEOUT 30
#define MPI3MR_WATCHDOG_INTERVAL 1000 /* in milli seconds */
@@ -121,7 +129,7 @@ extern int prot_mask;
/* Definitions for Event replies and sense buffer allocated per controller */
#define MPI3MR_NUM_EVT_REPLIES 64
-#define MPI3MR_SENSEBUF_SZ 256
+#define MPI3MR_SENSE_BUF_SZ 256
#define MPI3MR_SENSEBUF_FACTOR 3
#define MPI3MR_CHAINBUF_FACTOR 3
#define MPI3MR_CHAINBUFDIX_FACTOR 2
@@ -135,17 +143,11 @@ extern int prot_mask;
/* ResponseCode definitions */
#define MPI3MR_RI_MASK_RESPCODE (0x000000FF)
-#define MPI3MR_RSP_TM_COMPLETE 0x00
-#define MPI3MR_RSP_INVALID_FRAME 0x02
-#define MPI3MR_RSP_TM_NOT_SUPPORTED 0x04
-#define MPI3MR_RSP_TM_FAILED 0x05
-#define MPI3MR_RSP_TM_SUCCEEDED 0x08
-#define MPI3MR_RSP_TM_INVALID_LUN 0x09
-#define MPI3MR_RSP_TM_OVERLAPPED_TAG 0x0A
#define MPI3MR_RSP_IO_QUEUED_ON_IOC \
MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC
#define MPI3MR_DEFAULT_MDTS (128 * 1024)
+#define MPI3MR_DEFAULT_PGSZEXP (12)
/* Command retry count definitions */
#define MPI3MR_DEV_RMHS_RETRY_COUNT 3
@@ -183,20 +185,6 @@ enum mpi3mr_iocstate {
MRIOC_STATE_UNRECOVERABLE,
};
-/* Init type definitions */
-enum mpi3mr_init_type {
- MPI3MR_IT_INIT = 0,
- MPI3MR_IT_RESET,
- MPI3MR_IT_RESUME,
-};
-
-/* Cleanup reason definitions */
-enum mpi3mr_cleanup_reason {
- MPI3MR_COMPLETE_CLEANUP = 0,
- MPI3MR_REINIT_FAILURE,
- MPI3MR_SUSPEND,
-};
-
/* Reset reason code definitions*/
enum mpi3mr_reset_reason {
MPI3MR_RESET_FROM_BRINGUP = 1,
@@ -222,7 +210,14 @@ enum mpi3mr_reset_reason {
MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT = 21,
MPI3MR_RESET_FROM_PELABORT_TIMEOUT = 22,
MPI3MR_RESET_FROM_SYSFS = 23,
- MPI3MR_RESET_FROM_SYSFS_TIMEOUT = 24
+ MPI3MR_RESET_FROM_SYSFS_TIMEOUT = 24,
+ MPI3MR_RESET_FROM_FIRMWARE = 27,
+};
+
+/* Queue type definitions */
+enum queue_type {
+ MPI3MR_DEFAULT_QUEUE = 0,
+ MPI3MR_POLL_QUEUE,
};
/**
@@ -263,7 +258,7 @@ struct mpi3mr_ioc_facts {
u16 max_vds;
u16 max_hpds;
u16 max_advhpds;
- u16 max_raidpds;
+ u16 max_raid_pds;
u16 min_devhandle;
u16 max_devhandle;
u16 max_op_req_q;
@@ -336,6 +331,7 @@ struct op_req_qinfo {
* @pend_ios: Number of IOs pending in HW for this queue
* @enable_irq_poll: Flag to indicate polling is enabled
* @in_use: Queue is handled by poll/ISR
+ * @qtype: Type of queue (types defined in enum queue_type)
*/
struct op_reply_qinfo {
u16 ci;
@@ -350,6 +346,7 @@ struct op_reply_qinfo {
atomic_t pend_ios;
bool enable_irq_poll;
atomic_t in_use;
+ enum queue_type qtype;
};
/**
@@ -388,6 +385,7 @@ struct tgt_dev_sas_sata {
* @pgsz: Device page size
* @abort_to: Timeout for abort TM
* @reset_to: Timeout for Target/LUN reset TM
+ * @dev_info: Device information bits
*/
struct tgt_dev_pcie {
u32 mdts;
@@ -395,6 +393,7 @@ struct tgt_dev_pcie {
u8 pgsz;
u8 abort_to;
u8 reset_to;
+ u16 dev_info;
};
/**
@@ -499,6 +498,8 @@ static inline void mpi3mr_tgtdev_put(struct mpi3mr_tgt_dev *s)
* @dev_removedelay: Device is waiting to be removed in FW
* @dev_type: Device type
* @tgt_dev: Internal target device pointer
+ * @pend_count: Counter to track pending I/Os during error
+ * handling
*/
struct mpi3mr_stgt_priv_data {
struct scsi_target *starget;
@@ -510,6 +511,7 @@ struct mpi3mr_stgt_priv_data {
u8 dev_removedelay;
u8 dev_type;
struct mpi3mr_tgt_dev *tgt_dev;
+ u32 pend_count;
};
/**
@@ -518,11 +520,14 @@ struct mpi3mr_stgt_priv_data {
* @tgt_priv_data: Scsi_target private data pointer
* @lun_id: LUN ID of the device
* @ncq_prio_enable: NCQ priority enable for SATA device
+ * @pend_count: Counter to track pending I/Os during error
+ * handling
*/
struct mpi3mr_sdev_priv_data {
struct mpi3mr_stgt_priv_data *tgt_priv_data;
u32 lun_id;
u8 ncq_prio_enable;
+ u32 pend_count;
};
/**
@@ -630,6 +635,7 @@ struct scmd_priv {
* @ready_timeout: Controller ready timeout
* @intr_info: Interrupt cookie pointer
* @intr_info_count: Number of interrupt cookies
+ * @is_intr_info_set: Flag to indicate intr info is setup
* @num_queues: Number of operational queues
* @num_op_req_q: Number of operational request queues
* @req_qinfo: Operational request queue info pointer
@@ -681,17 +687,23 @@ struct scmd_priv {
* @chain_buf_lock: Chain buffer list lock
* @host_tm_cmds: Command tracker for task management commands
* @dev_rmhs_cmds: Command tracker for device removal commands
+ * @evtack_cmds: Command tracker for event ack commands
* @devrem_bitmap_sz: Device removal bitmap size
* @devrem_bitmap: Device removal bitmap
* @dev_handle_bitmap_sz: Device handle bitmap size
* @removepend_bitmap: Remove pending bitmap
* @delayed_rmhs_list: Delayed device removal list
+ * @evtack_cmds_bitmap_sz: Event Ack bitmap size
+ * @evtack_cmds_bitmap: Event Ack bitmap
+ * @delayed_evtack_cmds_list: Delayed event acknowledgment list
* @ts_update_counter: Timestamp update counter
- * @fault_dbg: Fault debug flag
* @reset_in_progress: Reset in progress flag
* @unrecoverable: Controller unrecoverable flag
+ * @prev_reset_result: Result of previous reset
* @reset_mutex: Controller reset mutex
* @reset_waitq: Controller reset wait queue
+ * @prepare_for_reset: Prepare for reset event received
+ * @prepare_for_reset_timeout_counter: Prepare for reset timeout
* @diagsave_timeout: Diagnostic information save timeout
* @logging_level: Controller debug logging level
* @flush_io_count: I/O count to flush after reset
@@ -699,6 +711,9 @@ struct scmd_priv {
* @driver_info: Driver, Kernel, OS information to firmware
* @change_count: Topology change count
* @op_reply_q_offset: Operational reply queue offset with MSIx
+ * @default_qcount: Total Default queues
+ * @active_poll_qcount: Currently active poll queue count
+ * @requested_poll_qcount: User requested poll queue count
*/
struct mpi3mr_ioc {
struct list_head list;
@@ -739,6 +754,7 @@ struct mpi3mr_ioc {
struct mpi3mr_intr_info *intr_info;
u16 intr_info_count;
+ bool is_intr_info_set;
u16 num_queues;
u16 num_op_req_q;
@@ -758,6 +774,7 @@ struct mpi3mr_ioc {
dma_addr_t reply_buf_dma_max_address;
u16 reply_free_qsz;
+ u16 reply_sz;
struct dma_pool *reply_free_q_pool;
__le64 *reply_free_q;
dma_addr_t reply_free_q_dma;
@@ -805,19 +822,26 @@ struct mpi3mr_ioc {
struct mpi3mr_drv_cmd host_tm_cmds;
struct mpi3mr_drv_cmd dev_rmhs_cmds[MPI3MR_NUM_DEVRMCMD];
+ struct mpi3mr_drv_cmd evtack_cmds[MPI3MR_NUM_EVTACKCMD];
u16 devrem_bitmap_sz;
void *devrem_bitmap;
u16 dev_handle_bitmap_sz;
void *removepend_bitmap;
struct list_head delayed_rmhs_list;
+ u16 evtack_cmds_bitmap_sz;
+ void *evtack_cmds_bitmap;
+ struct list_head delayed_evtack_cmds_list;
u32 ts_update_counter;
- u8 fault_dbg;
u8 reset_in_progress;
u8 unrecoverable;
+ int prev_reset_result;
struct mutex reset_mutex;
wait_queue_head_t reset_waitq;
+ u8 prepare_for_reset;
+ u16 prepare_for_reset_timeout_counter;
+
u16 diagsave_timeout;
int logging_level;
u16 flush_io_count;
@@ -826,6 +850,10 @@ struct mpi3mr_ioc {
struct mpi3_driver_info_layout driver_info;
u16 change_count;
u16 op_reply_q_offset;
+
+ u16 default_qcount;
+ u16 active_poll_qcount;
+ u16 requested_poll_qcount;
};
/**
@@ -867,10 +895,23 @@ struct delayed_dev_rmhs_node {
u8 iou_rc;
};
+/**
+ * struct delayed_evt_ack_node - Delayed event ack node
+ * @list: list head
+ * @event: MPI3 event ID
+ * @event_ctx: event context
+ */
+struct delayed_evt_ack_node {
+ struct list_head list;
+ u8 event;
+ u32 event_ctx;
+};
+
int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc);
void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc);
-int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 init_type);
-void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 reason);
+int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc);
+int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume);
+void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc);
int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async);
int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
u16 admin_req_sz, u8 ignore_reset);
@@ -887,6 +928,7 @@ void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
u64 sense_buf_dma);
void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc);
+void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc);
void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
struct mpi3_event_notification_reply *event_reply);
void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
@@ -897,13 +939,11 @@ void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc);
int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
u32 reset_reason, u8 snapdump);
-int mpi3mr_diagfault_reset_handler(struct mpi3mr_ioc *mrioc,
- u32 reset_reason);
void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc);
void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc);
enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc);
-int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
u32 event_ctx);
void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout);
@@ -911,6 +951,12 @@ void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc);
void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc);
void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc);
void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc);
-void mpi3mr_flush_delayed_rmhs_list(struct mpi3mr_ioc *mrioc);
+void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc);
+void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code);
+void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc);
+void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code);
+int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
+ struct op_reply_qinfo *op_reply_q);
+int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
#endif /*MPI3MR_H_INCLUDED*/
diff --git a/drivers/scsi/mpi3mr/mpi3mr_debug.h b/drivers/scsi/mpi3mr/mpi3mr_debug.h
index c085bb048d41..cef61c5d59d3 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_debug.h
+++ b/drivers/scsi/mpi3mr/mpi3mr_debug.h
@@ -14,27 +14,20 @@
/*
* debug levels
*/
-#define MPI3_DEBUG 0x00000001
-#define MPI3_DEBUG_MSG_FRAME 0x00000002
-#define MPI3_DEBUG_SG 0x00000004
-#define MPI3_DEBUG_EVENTS 0x00000008
-#define MPI3_DEBUG_EVENT_WORK_TASK 0x00000010
-#define MPI3_DEBUG_INIT 0x00000020
-#define MPI3_DEBUG_EXIT 0x00000040
-#define MPI3_DEBUG_FAIL 0x00000080
-#define MPI3_DEBUG_TM 0x00000100
-#define MPI3_DEBUG_REPLY 0x00000200
-#define MPI3_DEBUG_HANDSHAKE 0x00000400
-#define MPI3_DEBUG_CONFIG 0x00000800
-#define MPI3_DEBUG_DL 0x00001000
-#define MPI3_DEBUG_RESET 0x00002000
-#define MPI3_DEBUG_SCSI 0x00004000
-#define MPI3_DEBUG_IOCTL 0x00008000
-#define MPI3_DEBUG_CSMISAS 0x00010000
-#define MPI3_DEBUG_SAS 0x00020000
-#define MPI3_DEBUG_TRANSPORT 0x00040000
-#define MPI3_DEBUG_TASK_SET_FULL 0x00080000
-#define MPI3_DEBUG_TRIGGER_DIAG 0x00200000
+
+#define MPI3_DEBUG_EVENT 0x00000001
+#define MPI3_DEBUG_EVENT_WORK_TASK 0x00000002
+#define MPI3_DEBUG_INIT 0x00000004
+#define MPI3_DEBUG_EXIT 0x00000008
+#define MPI3_DEBUG_TM 0x00000010
+#define MPI3_DEBUG_RESET 0x00000020
+#define MPI3_DEBUG_SCSI_ERROR 0x00000040
+#define MPI3_DEBUG_REPLY 0x00000080
+#define MPI3_DEBUG_IOCTL_ERROR 0x00008000
+#define MPI3_DEBUG_IOCTL_INFO 0x00010000
+#define MPI3_DEBUG_SCSI_INFO 0x00020000
+#define MPI3_DEBUG 0x01000000
+#define MPI3_DEBUG_SG 0x02000000
/*
@@ -50,11 +43,103 @@
#define ioc_info(ioc, fmt, ...) \
pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define dprint(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_event_th(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_EVENT) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_event_bh(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_init(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_INIT) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_exit(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_EXIT) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_tm(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_TM) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_reply(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_REPLY) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_reset(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_RESET) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_scsi_info(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_SCSI_INFO) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_scsi_err(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_SCSI_ERROR) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_scsi_command(ioc, SCMD, LOG_LEVEL) \
+ do { \
+ if (ioc->logging_level & LOG_LEVEL) \
+ scsi_print_command(SCMD); \
+ } while (0)
+
+
+#define dprint_ioctl_info(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_IOCTL_INFO) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
-#define dbgprint(IOC, FMT, ...) \
+#define dprint_ioctl_err(ioc, fmt, ...) \
do { \
- if (IOC->logging_level & MPI3_DEBUG) \
- pr_info("%s: " FMT, (IOC)->name, ##__VA_ARGS__); \
+ if (ioc->logging_level & MPI3_DEBUG_IOCTL_ERROR) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
} while (0)
#endif /* MPT3SAS_DEBUG_H_INCLUDED */
+
+/**
+ * dprint_dump_req - print message frame contents
+ * @req: pointer to message frame
+ * @sz: number of dwords
+ */
+static inline void
+dprint_dump_req(void *req, int sz)
+{
+ int i;
+ __le32 *mfp = (__le32 *)req;
+
+ pr_info("request:\n\t");
+ for (i = 0; i < sz; i++) {
+ if (i && ((i % 8) == 0))
+ pr_info("\n\t");
+ pr_info("%08x ", le32_to_cpu(mfp[i]));
+ }
+ pr_info("\n");
+}
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index aa5d877df6f8..15bdc21ead66 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -10,6 +10,16 @@
#include "mpi3mr.h"
#include <linux/io-64-nonatomic-lo-hi.h>
+static int
+mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason);
+static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
+static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
+ struct mpi3_ioc_facts_data *facts_data);
+
+static int poll_queues;
+module_param(poll_queues, int, 0444);
+MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
+
#if defined(writeq) && defined(CONFIG_64BIT)
static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
{
@@ -78,6 +88,7 @@ static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
kfree(mrioc->intr_info);
mrioc->intr_info = NULL;
mrioc->intr_info_count = 0;
+ mrioc->is_intr_info_set = false;
pci_free_irq_vectors(mrioc->pdev);
}
@@ -124,8 +135,9 @@ static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
u64 reply_dma)
{
u32 old_idx = 0;
+ unsigned long flags;
- spin_lock(&mrioc->reply_free_queue_lock);
+ spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags);
old_idx = mrioc->reply_free_queue_host_index;
mrioc->reply_free_queue_host_index = (
(mrioc->reply_free_queue_host_index ==
@@ -134,15 +146,16 @@ static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
writel(mrioc->reply_free_queue_host_index,
&mrioc->sysif_regs->reply_free_host_index);
- spin_unlock(&mrioc->reply_free_queue_lock);
+ spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags);
}
void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
u64 sense_buf_dma)
{
u32 old_idx = 0;
+ unsigned long flags;
- spin_lock(&mrioc->sbq_lock);
+ spin_lock_irqsave(&mrioc->sbq_lock, flags);
old_idx = mrioc->sbq_host_index;
mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
(mrioc->sense_buf_q_sz - 1)) ? 0 :
@@ -150,7 +163,7 @@ void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
writel(mrioc->sbq_host_index,
&mrioc->sysif_regs->sense_buffer_free_host_index);
- spin_unlock(&mrioc->sbq_lock);
+ spin_unlock_irqrestore(&mrioc->sbq_lock, flags);
}
static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
@@ -303,6 +316,12 @@ mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
return &mrioc->dev_rmhs_cmds[idx];
}
+ if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
+ host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
+ idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
+ return &mrioc->evtack_cmds[idx];
+ }
+
return NULL;
}
@@ -369,7 +388,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
if (def_reply) {
cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
- mrioc->facts.reply_sz);
+ mrioc->reply_sz);
}
if (cmdptr->is_waiting) {
complete(&cmdptr->done);
@@ -446,10 +465,21 @@ mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
return reply_desc;
}
-static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
- struct mpi3mr_intr_info *intr_info)
+/**
+ * mpi3mr_process_op_reply_q - Operational reply queue handler
+ * @mrioc: Adapter instance reference
+ * @op_reply_q: Operational reply queue info
+ *
+ * Checks the specific operational reply queue and drains the
+ * reply queue entries until the queue is empty and process the
+ * individual reply descriptors.
+ *
+ * Return: 0 if queue is already processed,or number of reply
+ * descriptors processed.
+ */
+int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
+ struct op_reply_qinfo *op_reply_q)
{
- struct op_reply_qinfo *op_reply_q = intr_info->op_reply_q;
struct op_req_qinfo *op_req_q;
u32 exp_phase;
u32 reply_ci;
@@ -500,7 +530,7 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
* Ensure remaining completion happens from threaded ISR.
*/
if (num_op_reply > mrioc->max_host_ios) {
- intr_info->op_reply_q->enable_irq_poll = true;
+ op_reply_q->enable_irq_poll = true;
break;
}
@@ -515,6 +545,34 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
return num_op_reply;
}
+/**
+ * mpi3mr_blk_mq_poll - Operational reply queue handler
+ * @shost: SCSI Host reference
+ * @queue_num: Request queue number (w.r.t OS it is hardware context number)
+ *
+ * Checks the specific operational reply queue and drains the
+ * reply queue entries until the queue is empty and process the
+ * individual reply descriptors.
+ *
+ * Return: 0 if queue is already processed,or number of reply
+ * descriptors processed.
+ */
+int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+ int num_entries = 0;
+ struct mpi3mr_ioc *mrioc;
+
+ mrioc = (struct mpi3mr_ioc *)shost->hostdata;
+
+ if ((mrioc->reset_in_progress || mrioc->prepare_for_reset))
+ return 0;
+
+ num_entries = mpi3mr_process_op_reply_q(mrioc,
+ &mrioc->op_reply_qinfo[queue_num]);
+
+ return num_entries;
+}
+
static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
{
struct mpi3mr_intr_info *intr_info = privdata;
@@ -535,7 +593,8 @@ static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
if (!midx)
num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
if (intr_info->op_reply_q)
- num_op_reply = mpi3mr_process_op_reply_q(mrioc, intr_info);
+ num_op_reply = mpi3mr_process_op_reply_q(mrioc,
+ intr_info->op_reply_q);
if (num_admin_replies || num_op_reply)
return IRQ_HANDLED;
@@ -606,9 +665,10 @@ static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
mpi3mr_process_admin_reply_q(mrioc);
if (intr_info->op_reply_q)
num_op_reply +=
- mpi3mr_process_op_reply_q(mrioc, intr_info);
+ mpi3mr_process_op_reply_q(mrioc,
+ intr_info->op_reply_q);
- usleep_range(mrioc->irqpoll_sleep, 10 * mrioc->irqpoll_sleep);
+ usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP);
} while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
(num_op_reply < mrioc->max_host_ios));
@@ -652,6 +712,25 @@ static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
return retval;
}
+static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors)
+{
+ if (!mrioc->requested_poll_qcount)
+ return;
+
+ /* Reserved for Admin and Default Queue */
+ if (max_vectors > 2 &&
+ (mrioc->requested_poll_qcount < max_vectors - 2)) {
+ ioc_info(mrioc,
+ "enabled polled queues (%d) msix (%d)\n",
+ mrioc->requested_poll_qcount, max_vectors);
+ } else {
+ ioc_info(mrioc,
+ "disabled polled queues (%d) msix (%d) because of no resources for default queue\n",
+ mrioc->requested_poll_qcount, max_vectors);
+ mrioc->requested_poll_qcount = 0;
+ }
+}
+
/**
* mpi3mr_setup_isr - Setup ISR for the controller
* @mrioc: Adapter instance reference
@@ -664,48 +743,72 @@ static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
{
unsigned int irq_flags = PCI_IRQ_MSIX;
- int max_vectors;
+ int max_vectors, min_vec;
int retval;
int i;
- struct irq_affinity desc = { .pre_vectors = 1};
+ struct irq_affinity desc = { .pre_vectors = 1, .post_vectors = 1 };
+
+ if (mrioc->is_intr_info_set)
+ return 0;
mpi3mr_cleanup_isr(mrioc);
- if (setup_one || reset_devices)
+ if (setup_one || reset_devices) {
max_vectors = 1;
- else {
+ retval = pci_alloc_irq_vectors(mrioc->pdev,
+ 1, max_vectors, irq_flags);
+ if (retval < 0) {
+ ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
+ retval);
+ goto out_failed;
+ }
+ } else {
max_vectors =
- min_t(int, mrioc->cpu_count + 1, mrioc->msix_count);
+ min_t(int, mrioc->cpu_count + 1 +
+ mrioc->requested_poll_qcount, mrioc->msix_count);
+
+ mpi3mr_calc_poll_queues(mrioc, max_vectors);
ioc_info(mrioc,
"MSI-X vectors supported: %d, no of cores: %d,",
mrioc->msix_count, mrioc->cpu_count);
ioc_info(mrioc,
- "MSI-x vectors requested: %d\n", max_vectors);
- }
+ "MSI-x vectors requested: %d poll_queues %d\n",
+ max_vectors, mrioc->requested_poll_qcount);
+
+ desc.post_vectors = mrioc->requested_poll_qcount;
+ min_vec = desc.pre_vectors + desc.post_vectors;
+ irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
+
+ retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
+ min_vec, max_vectors, irq_flags, &desc);
+
+ if (retval < 0) {
+ ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
+ retval);
+ goto out_failed;
+ }
- irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
- mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
- retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
- 1, max_vectors, irq_flags, &desc);
- if (retval < 0) {
- ioc_err(mrioc, "Cannot alloc irq vectors\n");
- goto out_failed;
- }
- if (retval != max_vectors) {
- ioc_info(mrioc,
- "allocated vectors (%d) are less than configured (%d)\n",
- retval, max_vectors);
/*
* If only one MSI-x is allocated, then MSI-x 0 will be shared
* between Admin queue and operational queue
*/
- if (retval == 1)
+ if (retval == min_vec)
mrioc->op_reply_q_offset = 0;
+ else if (retval != (max_vectors)) {
+ ioc_info(mrioc,
+ "allocated vectors (%d) are less than configured (%d)\n",
+ retval, max_vectors);
+ }
max_vectors = retval;
+ mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
+
+ mpi3mr_calc_poll_queues(mrioc, max_vectors);
+
}
+
mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
GFP_KERNEL);
if (!mrioc->intr_info) {
@@ -720,6 +823,8 @@ static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
goto out_failed;
}
}
+ if (reset_devices || !setup_one)
+ mrioc->is_intr_info_set = true;
mrioc->intr_info_count = max_vectors;
mpi3mr_ioc_enable_intr(mrioc);
return 0;
@@ -796,6 +901,7 @@ static const struct {
},
{ MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
{ MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
+ { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
};
/**
@@ -860,7 +966,7 @@ static const char *mpi3mr_reset_type_name(u16 reset_type)
*
* Return: Nothing.
*/
-static void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
+void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
{
u32 ioc_status, code, code1, code2, code3;
@@ -958,25 +1064,25 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
- timeout = mrioc->ready_timeout * 10;
+ timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
do {
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
mpi3mr_clear_reset_history(mrioc);
- ioc_config =
- readl(&mrioc->sysif_regs->ioc_configuration);
- if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
- (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
- (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) {
- retval = 0;
- break;
- }
+ break;
+ }
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
+ mpi3mr_print_fault_info(mrioc);
+ break;
}
msleep(100);
} while (--timeout);
- ioc_status = readl(&mrioc->sysif_regs->ioc_status);
ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
+ (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
+ retval = 0;
ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n",
(!retval) ? "successful" : "failed", ioc_status, ioc_config);
@@ -984,32 +1090,171 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
}
/**
+ * mpi3mr_revalidate_factsdata - validate IOCFacts parameters
+ * during reset/resume
+ * @mrioc: Adapter instance reference
+ *
+ * Return zero if the new IOCFacts parameters value is compatible with
+ * older values else return -EPERM
+ */
+static int
+mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
+{
+ u16 dev_handle_bitmap_sz;
+ void *removepend_bitmap;
+
+ if (mrioc->facts.reply_sz > mrioc->reply_sz) {
+ ioc_err(mrioc,
+ "cannot increase reply size from %d to %d\n",
+ mrioc->reply_sz, mrioc->facts.reply_sz);
+ return -EPERM;
+ }
+
+ if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) {
+ ioc_err(mrioc,
+ "cannot reduce number of operational reply queues from %d to %d\n",
+ mrioc->num_op_reply_q,
+ mrioc->facts.max_op_reply_q);
+ return -EPERM;
+ }
+
+ if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) {
+ ioc_err(mrioc,
+ "cannot reduce number of operational request queues from %d to %d\n",
+ mrioc->num_op_req_q, mrioc->facts.max_op_req_q);
+ return -EPERM;
+ }
+
+ dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
+ if (mrioc->facts.max_devhandle % 8)
+ dev_handle_bitmap_sz++;
+ if (dev_handle_bitmap_sz > mrioc->dev_handle_bitmap_sz) {
+ removepend_bitmap = krealloc(mrioc->removepend_bitmap,
+ dev_handle_bitmap_sz, GFP_KERNEL);
+ if (!removepend_bitmap) {
+ ioc_err(mrioc,
+ "failed to increase removepend_bitmap sz from: %d to %d\n",
+ mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
+ return -EPERM;
+ }
+ memset(removepend_bitmap + mrioc->dev_handle_bitmap_sz, 0,
+ dev_handle_bitmap_sz - mrioc->dev_handle_bitmap_sz);
+ mrioc->removepend_bitmap = removepend_bitmap;
+ ioc_info(mrioc,
+ "increased dev_handle_bitmap_sz from %d to %d\n",
+ mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
+ mrioc->dev_handle_bitmap_sz = dev_handle_bitmap_sz;
+ }
+
+ return 0;
+}
+
+/**
* mpi3mr_bring_ioc_ready - Bring controller to ready state
* @mrioc: Adapter instance reference
*
* Set Enable IOC bit in IOC configuration register and wait for
* the controller to become ready.
*
- * Return: 0 on success, -1 on failure.
+ * Return: 0 on success, appropriate error on failure.
*/
static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
{
- u32 ioc_config, timeout;
- enum mpi3mr_iocstate current_state;
+ u32 ioc_config, ioc_status, timeout;
+ int retval = 0;
+ enum mpi3mr_iocstate ioc_state;
+ u64 base_info;
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
+ ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n",
+ ioc_status, ioc_config, base_info);
+
+ /*The timeout value is in 2sec unit, changing it to seconds*/
+ mrioc->ready_timeout =
+ ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
+ MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
+
+ ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout);
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ ioc_info(mrioc, "controller is in %s state during detection\n",
+ mpi3mr_iocstate_name(ioc_state));
+
+ if (ioc_state == MRIOC_STATE_BECOMING_READY ||
+ ioc_state == MRIOC_STATE_RESET_REQUESTED) {
+ timeout = mrioc->ready_timeout * 10;
+ do {
+ msleep(100);
+ } while (--timeout);
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ ioc_info(mrioc,
+ "controller is in %s state after waiting to reset\n",
+ mpi3mr_iocstate_name(ioc_state));
+ }
+
+ if (ioc_state == MRIOC_STATE_READY) {
+ ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
+ retval = mpi3mr_issue_and_process_mur(mrioc,
+ MPI3MR_RESET_FROM_BRINGUP);
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (retval)
+ ioc_err(mrioc,
+ "message unit reset failed with error %d current state %s\n",
+ retval, mpi3mr_iocstate_name(ioc_state));
+ }
+ if (ioc_state != MRIOC_STATE_RESET) {
+ mpi3mr_print_fault_info(mrioc);
+ ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
+ retval = mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
+ MPI3MR_RESET_FROM_BRINGUP);
+ if (retval) {
+ ioc_err(mrioc,
+ "soft reset failed with error %d\n", retval);
+ goto out_failed;
+ }
+ }
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state != MRIOC_STATE_RESET) {
+ ioc_err(mrioc,
+ "cannot bring controller to reset state, current state: %s\n",
+ mpi3mr_iocstate_name(ioc_state));
+ goto out_failed;
+ }
+ mpi3mr_clear_reset_history(mrioc);
+ retval = mpi3mr_setup_admin_qpair(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to setup admin queues: error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ ioc_info(mrioc, "bringing controller to ready state\n");
ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
timeout = mrioc->ready_timeout * 10;
do {
- current_state = mpi3mr_get_iocstate(mrioc);
- if (current_state == MRIOC_STATE_READY)
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state == MRIOC_STATE_READY) {
+ ioc_info(mrioc,
+ "successfully transitioned to %s state\n",
+ mpi3mr_iocstate_name(ioc_state));
return 0;
+ }
msleep(100);
} while (--timeout);
- return -1;
+out_failed:
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ ioc_err(mrioc,
+ "failed to bring to ready state, current state: %s\n",
+ mpi3mr_iocstate_name(ioc_state));
+ return retval;
}
/**
@@ -1026,7 +1271,6 @@ static inline bool
mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
{
if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
- (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
(ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
return true;
return false;
@@ -1049,8 +1293,10 @@ static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
return false;
fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
- if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET)
+ if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) {
+ mpi3mr_print_fault_info(mrioc);
return true;
+ }
return false;
}
@@ -1089,26 +1335,36 @@ static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
u32 reset_reason)
{
int retval = -1;
- u8 unlock_retry_count, reset_retry_count = 0;
- u32 host_diagnostic, timeout, ioc_status, ioc_config;
+ u8 unlock_retry_count = 0;
+ u32 host_diagnostic, ioc_status, ioc_config;
+ u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
- pci_cfg_access_lock(mrioc->pdev);
if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
(reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
- goto out;
+ return retval;
if (mrioc->unrecoverable)
- goto out;
-retry_reset:
- unlock_retry_count = 0;
+ return retval;
+ if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
+ retval = 0;
+ return retval;
+ }
+
+ ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
+ mpi3mr_reset_type_name(reset_type),
+ mpi3mr_reset_rc_name(reset_reason), reset_reason);
+
mpi3mr_clear_reset_history(mrioc);
do {
ioc_info(mrioc,
"Write magic sequence to unlock host diag register (retry=%d)\n",
++unlock_retry_count);
if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
- writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
+ ioc_err(mrioc,
+ "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n",
+ mpi3mr_reset_type_name(reset_type),
+ host_diagnostic);
mrioc->unrecoverable = 1;
- goto out;
+ return retval;
}
writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
@@ -1133,31 +1389,26 @@ retry_reset:
} while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
- ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
- mpi3mr_reset_type_name(reset_type),
- mpi3mr_reset_rc_name(reset_reason), reset_reason);
writel(host_diagnostic | reset_type,
&mrioc->sysif_regs->host_diagnostic);
- timeout = mrioc->ready_timeout * 10;
- if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) {
+ switch (reset_type) {
+ case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET:
do {
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
- if (ioc_status &
- MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
+ ioc_config =
+ readl(&mrioc->sysif_regs->ioc_configuration);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
+ && mpi3mr_soft_reset_success(ioc_status, ioc_config)
+ ) {
mpi3mr_clear_reset_history(mrioc);
- ioc_config =
- readl(&mrioc->sysif_regs->ioc_configuration);
- if (mpi3mr_soft_reset_success(ioc_status,
- ioc_config)) {
- retval = 0;
- break;
- }
+ retval = 0;
+ break;
}
msleep(100);
} while (--timeout);
- writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
- &mrioc->sysif_regs->write_sequence);
- } else if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT) {
+ mpi3mr_print_fault_info(mrioc);
+ break;
+ case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT:
do {
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
@@ -1166,28 +1417,22 @@ retry_reset:
}
msleep(100);
} while (--timeout);
- mpi3mr_clear_reset_history(mrioc);
- writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
- &mrioc->sysif_regs->write_sequence);
- }
- if (retval && ((++reset_retry_count) < MPI3MR_MAX_RESET_RETRY_COUNT)) {
- ioc_status = readl(&mrioc->sysif_regs->ioc_status);
- ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
- ioc_info(mrioc,
- "Base IOC Sts/Config after reset try %d is (0x%x)/(0x%x)\n",
- reset_retry_count, ioc_status, ioc_config);
- goto retry_reset;
+ break;
+ default:
+ break;
}
-out:
- pci_cfg_access_unlock(mrioc->pdev);
- ioc_status = readl(&mrioc->sysif_regs->ioc_status);
- ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
+ &mrioc->sysif_regs->write_sequence);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
ioc_info(mrioc,
- "Base IOC Sts/Config after %s reset is (0x%x)/(0x%x)\n",
- (!retval) ? "successful" : "failed", ioc_status,
+ "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n",
+ (!retval)?"successful":"failed", ioc_status,
ioc_config);
+ if (retval)
+ mrioc->unrecoverable = 1;
return retval;
}
@@ -1278,7 +1523,7 @@ static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
}
} else
- size = mrioc->req_qinfo[q_idx].num_requests *
+ size = mrioc->req_qinfo[q_idx].segment_qd *
mrioc->facts.op_req_sz;
for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
@@ -1351,10 +1596,11 @@ static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
{
struct mpi3_delete_reply_queue_request delq_req;
+ struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
int retval = 0;
u16 reply_qid = 0, midx;
- reply_qid = mrioc->op_reply_qinfo[qidx].qid;
+ reply_qid = op_reply_q->qid;
midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
@@ -1364,6 +1610,9 @@ static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
goto out;
}
+ (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- :
+ mrioc->active_poll_qcount--;
+
memset(&delq_req, 0, sizeof(delq_req));
mutex_lock(&mrioc->init_cmds.mutex);
if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
@@ -1389,13 +1638,9 @@ static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
wait_for_completion_timeout(&mrioc->init_cmds.done,
(MPI3MR_INTADMCMD_TIMEOUT * HZ));
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
- ioc_err(mrioc, "Issue DelRepQ: command timed out\n");
- mpi3mr_set_diagsave(mrioc);
- mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ ioc_err(mrioc, "delete reply queue timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
- mrioc->unrecoverable = 1;
-
retval = -1;
goto out_unlock;
}
@@ -1565,6 +1810,8 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
reply_qid = qidx + 1;
op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
+ if (!mrioc->pdev->revision)
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
op_reply_q->ci = 0;
op_reply_q->ephase = 1;
atomic_set(&op_reply_q->pend_ios, 0);
@@ -1592,8 +1839,26 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
create_req.queue_id = cpu_to_le16(reply_qid);
- create_req.flags = MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
- create_req.msix_index = cpu_to_le16(mrioc->intr_info[midx].msix_index);
+
+ if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount))
+ op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE;
+ else
+ op_reply_q->qtype = MPI3MR_POLL_QUEUE;
+
+ if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) {
+ create_req.flags =
+ MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
+ create_req.msix_index =
+ cpu_to_le16(mrioc->intr_info[midx].msix_index);
+ } else {
+ create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1);
+ ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n",
+ reply_qid, midx);
+ if (!mrioc->active_poll_qcount)
+ disable_irq_nosync(pci_irq_vector(mrioc->pdev,
+ mrioc->intr_info_count - 1));
+ }
+
if (mrioc->enable_segqueue) {
create_req.flags |=
MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
@@ -1615,12 +1880,9 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
wait_for_completion_timeout(&mrioc->init_cmds.done,
(MPI3MR_INTADMCMD_TIMEOUT * HZ));
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
- ioc_err(mrioc, "CreateRepQ: command timed out\n");
- mpi3mr_set_diagsave(mrioc);
- mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ ioc_err(mrioc, "create reply queue timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
- mrioc->unrecoverable = 1;
retval = -1;
goto out_unlock;
}
@@ -1634,7 +1896,11 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
goto out_unlock;
}
op_reply_q->qid = reply_qid;
- mrioc->intr_info[midx].op_reply_q = op_reply_q;
+ if (midx < mrioc->intr_info_count)
+ mrioc->intr_info[midx].op_reply_q = op_reply_q;
+
+ (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ :
+ mrioc->active_poll_qcount++;
out_unlock:
mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
@@ -1722,12 +1988,9 @@ static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
wait_for_completion_timeout(&mrioc->init_cmds.done,
(MPI3MR_INTADMCMD_TIMEOUT * HZ));
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
- ioc_err(mrioc, "CreateReqQ: command timed out\n");
- mpi3mr_set_diagsave(mrioc);
- if (mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
- MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT))
- mrioc->unrecoverable = 1;
+ ioc_err(mrioc, "create request queue timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
retval = -1;
goto out_unlock;
}
@@ -1771,8 +2034,13 @@ static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
mrioc->intr_info_count - mrioc->op_reply_q_offset;
if (!mrioc->num_queues)
mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
- num_queues = mrioc->num_queues;
- ioc_info(mrioc, "Trying to create %d Operational Q pairs\n",
+ /*
+ * During reset set the num_queues to the number of queues
+ * that was set before the reset.
+ */
+ num_queues = mrioc->num_op_reply_q ?
+ mrioc->num_op_reply_q : mrioc->num_queues;
+ ioc_info(mrioc, "trying to create %d operational queue pairs\n",
num_queues);
if (!mrioc->req_qinfo) {
@@ -1814,8 +2082,10 @@ static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
goto out_failed;
}
mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
- ioc_info(mrioc, "Successfully created %d Operational Q pairs\n",
- mrioc->num_op_reply_q);
+ ioc_info(mrioc,
+ "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n",
+ mrioc->num_op_reply_q, mrioc->default_qcount,
+ mrioc->active_poll_qcount);
return retval;
out_failed:
@@ -1863,7 +2133,7 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
if (mpi3mr_check_req_qfull(op_req_q)) {
midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
reply_qidx, mrioc->op_reply_q_offset);
- mpi3mr_process_op_reply_q(mrioc, &mrioc->intr_info[midx]);
+ mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q);
if (mpi3mr_check_req_qfull(op_req_q)) {
retval = -EAGAIN;
@@ -1901,6 +2171,42 @@ out:
}
/**
+ * mpi3mr_check_rh_fault_ioc - check reset history and fault
+ * controller
+ * @mrioc: Adapter instance reference
+ * @reason_code: reason code for the fault.
+ *
+ * This routine will save snapdump and fault the controller with
+ * the given reason code if it is not already in the fault or
+ * not asynchronosuly reset. This will be used to handle
+ * initilaization time faults/resets/timeout as in those cases
+ * immediate soft reset invocation is not required.
+ *
+ * Return: None.
+ */
+void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
+{
+ u32 ioc_status, host_diagnostic, timeout;
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
+ mpi3mr_print_fault_info(mrioc);
+ return;
+ }
+ mpi3mr_set_diagsave(mrioc);
+ mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ reason_code);
+ timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
+ do {
+ host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
+ if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
+ break;
+ msleep(100);
+ } while (--timeout);
+}
+
+/**
* mpi3mr_sync_timestamp - Issue time stamp sync request
* @mrioc: Adapter reference
*
@@ -1945,8 +2251,9 @@ static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
mrioc->init_cmds.is_waiting = 0;
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_TSU_TIMEOUT, 1);
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_TSU_TIMEOUT, 1);
retval = -1;
goto out_unlock;
}
@@ -1969,6 +2276,91 @@ out:
}
/**
+ * mpi3mr_print_pkg_ver - display controller fw package version
+ * @mrioc: Adapter reference
+ *
+ * Retrieve firmware package version from the component image
+ * header of the controller flash and display it.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3_ci_upload_request ci_upload;
+ int retval = -1;
+ void *data = NULL;
+ dma_addr_t data_dma;
+ struct mpi3_ci_manifest_mpi *manifest;
+ u32 data_len = sizeof(struct mpi3_ci_manifest_mpi);
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+
+ data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memset(&ci_upload, 0, sizeof(ci_upload));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ ioc_err(mrioc, "sending get package version failed due to command in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ ci_upload.function = MPI3_FUNCTION_CI_UPLOAD;
+ ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
+ ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST);
+ ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE);
+ ci_upload.segment_size = cpu_to_le32(data_len);
+
+ mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len,
+ data_dma);
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &ci_upload,
+ sizeof(ci_upload), 1);
+ if (retval) {
+ ioc_err(mrioc, "posting get package version failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "get package version timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ == MPI3_IOCSTATUS_SUCCESS) {
+ manifest = (struct mpi3_ci_manifest_mpi *) data;
+ if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) {
+ ioc_info(mrioc,
+ "firmware package version(%d.%d.%d.%d.%05d-%05d)\n",
+ manifest->package_version.gen_major,
+ manifest->package_version.gen_minor,
+ manifest->package_version.phase_major,
+ manifest->package_version.phase_minor,
+ manifest->package_version.customer_id,
+ manifest->package_version.build_num);
+ }
+ }
+ retval = 0;
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+
+out:
+ if (data)
+ dma_free_coherent(&mrioc->pdev->dev, data_len, data,
+ data_dma);
+ return retval;
+}
+
+/**
* mpi3mr_watchdog_work - watchdog thread to monitor faults
* @work: work struct
*
@@ -1984,50 +2376,66 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
container_of(work, struct mpi3mr_ioc, watchdog_work.work);
unsigned long flags;
enum mpi3mr_iocstate ioc_state;
- u32 fault, host_diagnostic;
+ u32 fault, host_diagnostic, ioc_status;
+ u32 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
+
+ if (mrioc->reset_in_progress || mrioc->unrecoverable)
+ return;
if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) {
mrioc->ts_update_counter = 0;
mpi3mr_sync_timestamp(mrioc);
}
+ if ((mrioc->prepare_for_reset) &&
+ ((mrioc->prepare_for_reset_timeout_counter++) >=
+ MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
+ return;
+ }
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
+ mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0);
+ return;
+ }
+
/*Check for fault state every one second and issue Soft reset*/
ioc_state = mpi3mr_get_iocstate(mrioc);
- if (ioc_state == MRIOC_STATE_FAULT) {
- fault = readl(&mrioc->sysif_regs->fault) &
- MPI3_SYSIF_FAULT_CODE_MASK;
- host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
- if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
- if (!mrioc->diagsave_timeout) {
- mpi3mr_print_fault_info(mrioc);
- ioc_warn(mrioc, "Diag save in progress\n");
- }
- if ((mrioc->diagsave_timeout++) <=
- MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
- goto schedule_work;
- } else
- mpi3mr_print_fault_info(mrioc);
- mrioc->diagsave_timeout = 0;
+ if (ioc_state != MRIOC_STATE_FAULT)
+ goto schedule_work;
- if (fault == MPI3_SYSIF_FAULT_CODE_FACTORY_RESET) {
- ioc_info(mrioc,
- "Factory Reset fault occurred marking controller as unrecoverable"
- );
- mrioc->unrecoverable = 1;
- goto out;
+ fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
+ host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
+ if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
+ if (!mrioc->diagsave_timeout) {
+ mpi3mr_print_fault_info(mrioc);
+ ioc_warn(mrioc, "diag save in progress\n");
}
+ if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
+ goto schedule_work;
+ }
- if ((fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) ||
- (fault == MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS) ||
- (mrioc->reset_in_progress))
- goto out;
- if (fault == MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET)
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_CIACTIV_FAULT, 0);
- else
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_FAULT_WATCH, 0);
+ mpi3mr_print_fault_info(mrioc);
+ mrioc->diagsave_timeout = 0;
+
+ switch (fault) {
+ case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
+ ioc_info(mrioc,
+ "controller requires system power cycle, marking controller as unrecoverable\n");
+ mrioc->unrecoverable = 1;
+ return;
+ case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
+ return;
+ case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
+ reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
+ break;
+ default:
+ break;
}
+ mpi3mr_soft_reset_handler(mrioc, reset_reason, 0);
+ return;
schedule_work:
spin_lock_irqsave(&mrioc->watchdog_lock, flags);
@@ -2036,7 +2444,6 @@ schedule_work:
&mrioc->watchdog_work,
msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
-out:
return;
}
@@ -2097,41 +2504,6 @@ void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
}
/**
- * mpi3mr_kill_ioc - Kill the controller
- * @mrioc: Adapter instance reference
- * @reason: reason for the failure.
- *
- * If fault debug is enabled, display the fault info else issue
- * diag fault and freeze the system for controller debug
- * purpose.
- *
- * Return: Nothing.
- */
-static void mpi3mr_kill_ioc(struct mpi3mr_ioc *mrioc, u32 reason)
-{
- enum mpi3mr_iocstate ioc_state;
-
- if (!mrioc->fault_dbg)
- return;
-
- dump_stack();
-
- ioc_state = mpi3mr_get_iocstate(mrioc);
- if (ioc_state == MRIOC_STATE_FAULT)
- mpi3mr_print_fault_info(mrioc);
- else {
- ioc_err(mrioc, "Firmware is halted due to the reason %d\n",
- reason);
- mpi3mr_diagfault_reset_handler(mrioc, reason);
- }
- if (mrioc->fault_dbg == 2)
- for (;;)
- ;
- else
- panic("panic in %s\n", __func__);
-}
-
-/**
* mpi3mr_setup_admin_qpair - Setup admin queue pair
* @mrioc: Adapter instance reference
*
@@ -2258,12 +2630,9 @@ static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
wait_for_completion_timeout(&mrioc->init_cmds.done,
(MPI3MR_INTADMCMD_TIMEOUT * HZ));
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
- ioc_err(mrioc, "Issue IOCFacts: command timed out\n");
- mpi3mr_set_diagsave(mrioc);
- mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ ioc_err(mrioc, "ioc_facts timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
- mrioc->unrecoverable = 1;
retval = -1;
goto out_unlock;
}
@@ -2277,6 +2646,7 @@ static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
goto out_unlock;
}
memcpy(facts_data, (u8 *)data, data_len);
+ mpi3mr_process_factsdata(mrioc, facts_data);
out_unlock:
mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
mutex_unlock(&mrioc->init_cmds.mutex);
@@ -2374,14 +2744,13 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
- mrioc->facts.max_pds = le16_to_cpu(facts_data->max_pds);
mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
- mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_advanced_host_pds);
- mrioc->facts.max_raidpds = le16_to_cpu(facts_data->max_raid_pds);
+ mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds);
+ mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds);
mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
mrioc->facts.max_pcie_switches =
- le16_to_cpu(facts_data->max_pc_ie_switches);
+ le16_to_cpu(facts_data->max_pcie_switches);
mrioc->facts.max_sasexpanders =
le16_to_cpu(facts_data->max_sas_expanders);
mrioc->facts.max_sasinitiators =
@@ -2415,22 +2784,15 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
ioc_info(mrioc,
- "maxreqs(%d), mindh(%d) maxPDs(%d) maxvectors(%d) maxperids(%d)\n",
+ "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n",
mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
- mrioc->facts.max_pds, mrioc->facts.max_msix_vectors,
- mrioc->facts.max_perids);
+ mrioc->facts.max_msix_vectors, mrioc->facts.max_perids);
ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
mrioc->facts.sge_mod_shift);
ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n",
mrioc->facts.dma_mask, (facts_flags &
MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK));
-
- mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
-
- if (reset_devices)
- mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
- MPI3MR_HOST_IOS_KDUMP);
}
/**
@@ -2446,23 +2808,29 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
{
int retval = 0;
u32 sz, i;
- dma_addr_t phy_addr;
if (mrioc->init_cmds.reply)
- goto post_reply_sbuf;
+ return retval;
- mrioc->init_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL);
+ mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
if (!mrioc->init_cmds.reply)
goto out_failed;
for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
- mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->facts.reply_sz,
+ mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz,
GFP_KERNEL);
if (!mrioc->dev_rmhs_cmds[i].reply)
goto out_failed;
}
- mrioc->host_tm_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL);
+ for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
+ mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz,
+ GFP_KERNEL);
+ if (!mrioc->evtack_cmds[i].reply)
+ goto out_failed;
+ }
+
+ mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
if (!mrioc->host_tm_cmds.reply)
goto out_failed;
@@ -2482,13 +2850,21 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
if (!mrioc->devrem_bitmap)
goto out_failed;
+ mrioc->evtack_cmds_bitmap_sz = MPI3MR_NUM_EVTACKCMD / 8;
+ if (MPI3MR_NUM_EVTACKCMD % 8)
+ mrioc->evtack_cmds_bitmap_sz++;
+ mrioc->evtack_cmds_bitmap = kzalloc(mrioc->evtack_cmds_bitmap_sz,
+ GFP_KERNEL);
+ if (!mrioc->evtack_cmds_bitmap)
+ goto out_failed;
+
mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
/* reply buffer pool, 16 byte align */
- sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz;
+ sz = mrioc->num_reply_bufs * mrioc->reply_sz;
mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
&mrioc->pdev->dev, sz, 16, 0);
if (!mrioc->reply_buf_pool) {
@@ -2517,7 +2893,7 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
goto out_failed;
/* sense buffer pool, 4 byte align */
- sz = mrioc->num_sense_bufs * MPI3MR_SENSEBUF_SZ;
+ sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
&mrioc->pdev->dev, sz, 4, 0);
if (!mrioc->sense_buf_pool) {
@@ -2542,21 +2918,42 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
if (!mrioc->sense_buf_q)
goto out_failed;
-post_reply_sbuf:
- sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz;
+ return retval;
+
+out_failed:
+ retval = -1;
+ return retval;
+}
+
+/**
+ * mpimr_initialize_reply_sbuf_queues - initialize reply sense
+ * buffers
+ * @mrioc: Adapter instance reference
+ *
+ * Helper function to initialize reply and sense buffers along
+ * with some debug prints.
+ *
+ * Return: None.
+ */
+static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc)
+{
+ u32 sz, i;
+ dma_addr_t phy_addr;
+
+ sz = mrioc->num_reply_bufs * mrioc->reply_sz;
ioc_info(mrioc,
"reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
- mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->facts.reply_sz,
+ mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz,
(sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
sz = mrioc->reply_free_qsz * 8;
ioc_info(mrioc,
"reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
(unsigned long long)mrioc->reply_free_q_dma);
- sz = mrioc->num_sense_bufs * MPI3MR_SENSEBUF_SZ;
+ sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
ioc_info(mrioc,
"sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
- mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSEBUF_SZ,
+ mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ,
(sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
sz = mrioc->sense_buf_q_sz * 8;
ioc_info(mrioc,
@@ -2566,20 +2963,15 @@ post_reply_sbuf:
/* initialize Reply buffer Queue */
for (i = 0, phy_addr = mrioc->reply_buf_dma;
- i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->facts.reply_sz)
+ i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz)
mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
mrioc->reply_free_q[i] = cpu_to_le64(0);
/* initialize Sense Buffer Queue */
for (i = 0, phy_addr = mrioc->sense_buf_dma;
- i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSEBUF_SZ)
+ i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ)
mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
mrioc->sense_buf_q[i] = cpu_to_le64(0);
- return retval;
-
-out_failed:
- retval = -1;
- return retval;
}
/**
@@ -2606,6 +2998,8 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
retval = -1;
goto out;
}
+ mpimr_initialize_reply_sbuf_queues(mrioc);
+
drv_info->information_length = cpu_to_le32(data_len);
strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
@@ -2639,7 +3033,7 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
iocinit_req.reply_free_queue_address =
cpu_to_le64(mrioc->reply_free_q_dma);
- iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSEBUF_SZ);
+ iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ);
iocinit_req.sense_buffer_free_queue_depth =
cpu_to_le16(mrioc->sense_buf_q_sz);
iocinit_req.sense_buffer_free_queue_address =
@@ -2659,12 +3053,9 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
wait_for_completion_timeout(&mrioc->init_cmds.done,
(MPI3MR_INTADMCMD_TIMEOUT * HZ));
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
- mpi3mr_set_diagsave(mrioc);
- mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ mpi3mr_check_rh_fault_ioc(mrioc,
MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
- mrioc->unrecoverable = 1;
- ioc_err(mrioc, "Issue IOCInit: command timed out\n");
+ ioc_err(mrioc, "ioc_init timed out\n");
retval = -1;
goto out_unlock;
}
@@ -2678,6 +3069,13 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
goto out_unlock;
}
+ mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
+ writel(mrioc->reply_free_queue_host_index,
+ &mrioc->sysif_regs->reply_free_host_index);
+
+ mrioc->sbq_host_index = mrioc->num_sense_bufs;
+ writel(mrioc->sbq_host_index,
+ &mrioc->sysif_regs->sense_buffer_free_host_index);
out_unlock:
mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
mutex_unlock(&mrioc->init_cmds.mutex);
@@ -2755,12 +3153,9 @@ static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
wait_for_completion_timeout(&mrioc->init_cmds.done,
(MPI3MR_INTADMCMD_TIMEOUT * HZ));
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
- ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
- mpi3mr_set_diagsave(mrioc);
- mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ ioc_err(mrioc, "event notification timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
- mrioc->unrecoverable = 1;
retval = -1;
goto out_unlock;
}
@@ -2782,17 +3177,17 @@ out:
}
/**
- * mpi3mr_send_event_ack - Send event acknowledgment
+ * mpi3mr_process_event_ack - Process event acknowledgment
* @mrioc: Adapter instance reference
* @event: MPI3 event ID
- * @event_ctx: Event context
+ * @event_ctx: event context
*
* Send event acknowledgment through admin queue and wait for
* it to complete.
*
* Return: 0 on success, non-zero on failures.
*/
-int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
u32 event_ctx)
{
struct mpi3_event_ack_request evtack_req;
@@ -2825,8 +3220,9 @@ int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
(MPI3MR_INTADMCMD_TIMEOUT * HZ));
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1);
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1);
retval = -1;
goto out_unlock;
}
@@ -2863,6 +3259,9 @@ static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
u32 sz, i;
u16 num_chains;
+ if (mrioc->chain_sgl_list)
+ return retval;
+
num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
@@ -2966,29 +3365,28 @@ int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
goto out_unlock;
}
- if (!async) {
- wait_for_completion_timeout(&mrioc->init_cmds.done,
- (pe_timeout * HZ));
- if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
- ioc_err(mrioc, "Issue PortEnable: command timed out\n");
- retval = -1;
- mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
- mpi3mr_set_diagsave(mrioc);
- mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
- MPI3MR_RESET_FROM_PE_TIMEOUT);
- mrioc->unrecoverable = 1;
- goto out_unlock;
- }
- mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
+ if (async) {
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
}
+
+ wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "port enable timed out\n");
+ retval = -1;
+ mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT);
+ goto out_unlock;
+ }
+ mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
+
out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
mutex_unlock(&mrioc->init_cmds.mutex);
out:
return retval;
}
-/* Protocol type to name mapper structure*/
+/* Protocol type to name mapper structure */
static const struct {
u8 protocol;
char *name;
@@ -3181,6 +3579,10 @@ int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
mrioc->sysif_regs, memap_sz);
ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
mrioc->msix_count);
+
+ if (!reset_devices && poll_queues > 0)
+ mrioc->requested_poll_qcount = min_t(int, poll_queues,
+ mrioc->msix_count - 2);
return retval;
out_failed:
@@ -3189,9 +3591,48 @@ out_failed:
}
/**
+ * mpi3mr_enable_events - Enable required events
+ * @mrioc: Adapter instance reference
+ *
+ * This routine unmasks the events required by the driver by
+ * sennding appropriate event mask bitmapt through an event
+ * notification request.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u32 i;
+
+ for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ mrioc->event_masks[i] = -1;
+
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_TEMP_THRESHOLD);
+
+ retval = mpi3mr_issue_event_notification(mrioc);
+ if (retval)
+ ioc_err(mrioc, "failed to issue event notification %d\n",
+ retval);
+ return retval;
+}
+
+/**
* mpi3mr_init_ioc - Initialize the controller
* @mrioc: Adapter instance reference
- * @init_type: Flag to indicate is the init_type
*
* This the controller initialization routine, executed either
* after soft reset or from pci probe callback.
@@ -3204,227 +3645,240 @@ out_failed:
*
* Return: 0 on success and non-zero on failure.
*/
-int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 init_type)
+int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
{
int retval = 0;
- enum mpi3mr_iocstate ioc_state;
- u64 base_info;
- u32 timeout;
- u32 ioc_status, ioc_config, i;
+ u8 retry = 0;
struct mpi3_ioc_facts_data facts_data;
- mrioc->irqpoll_sleep = MPI3MR_IRQ_POLL_SLEEP;
- mrioc->change_count = 0;
- if (init_type == MPI3MR_IT_INIT) {
- mrioc->cpu_count = num_online_cpus();
- retval = mpi3mr_setup_resources(mrioc);
- if (retval) {
- ioc_err(mrioc, "Failed to setup resources:error %d\n",
- retval);
- goto out_nocleanup;
- }
+retry_init:
+ retval = mpi3mr_bring_ioc_ready(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
+ retval);
+ goto out_failed_noretry;
}
- ioc_status = readl(&mrioc->sysif_regs->ioc_status);
- ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ retval = mpi3mr_setup_isr(mrioc, 1);
+ if (retval) {
+ ioc_err(mrioc, "Failed to setup ISR error %d\n",
+ retval);
+ goto out_failed_noretry;
+ }
- ioc_info(mrioc, "SOD status %x configuration %x\n",
- ioc_status, ioc_config);
+ retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
+ if (retval) {
+ ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
+ retval);
+ goto out_failed;
+ }
- base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
- ioc_info(mrioc, "SOD base_info %llx\n", base_info);
+ mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
- /*The timeout value is in 2sec unit, changing it to seconds*/
- mrioc->ready_timeout =
- ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
- MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
+ if (reset_devices)
+ mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
+ MPI3MR_HOST_IOS_KDUMP);
- ioc_info(mrioc, "IOC ready timeout %d\n", mrioc->ready_timeout);
+ mrioc->reply_sz = mrioc->facts.reply_sz;
- ioc_state = mpi3mr_get_iocstate(mrioc);
- ioc_info(mrioc, "IOC in %s state during detection\n",
- mpi3mr_iocstate_name(ioc_state));
+ retval = mpi3mr_check_reset_dma_mask(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Resetting dma mask failed %d\n",
+ retval);
+ goto out_failed_noretry;
+ }
- if (ioc_state == MRIOC_STATE_BECOMING_READY ||
- ioc_state == MRIOC_STATE_RESET_REQUESTED) {
- timeout = mrioc->ready_timeout * 10;
- do {
- msleep(100);
- } while (--timeout);
+ mpi3mr_print_ioc_info(mrioc);
- ioc_state = mpi3mr_get_iocstate(mrioc);
- ioc_info(mrioc,
- "IOC in %s state after waiting for reset time\n",
- mpi3mr_iocstate_name(ioc_state));
+ retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
+ if (retval) {
+ ioc_err(mrioc,
+ "%s :Failed to allocated reply sense buffers %d\n",
+ __func__, retval);
+ goto out_failed_noretry;
}
- if (ioc_state == MRIOC_STATE_READY) {
- retval = mpi3mr_issue_and_process_mur(mrioc,
- MPI3MR_RESET_FROM_BRINGUP);
- if (retval) {
- ioc_err(mrioc, "Failed to MU reset IOC error %d\n",
- retval);
- }
- ioc_state = mpi3mr_get_iocstate(mrioc);
+ retval = mpi3mr_alloc_chain_bufs(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
+ retval);
+ goto out_failed_noretry;
}
- if (ioc_state != MRIOC_STATE_RESET) {
- mpi3mr_print_fault_info(mrioc);
- retval = mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
- MPI3MR_RESET_FROM_BRINGUP);
- if (retval) {
- ioc_err(mrioc,
- "%s :Failed to soft reset IOC error %d\n",
- __func__, retval);
- goto out_failed;
- }
+
+ retval = mpi3mr_issue_iocinit(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
+ retval);
+ goto out_failed;
}
- ioc_state = mpi3mr_get_iocstate(mrioc);
- if (ioc_state != MRIOC_STATE_RESET) {
- retval = -1;
- ioc_err(mrioc, "Cannot bring IOC to reset state\n");
+
+ retval = mpi3mr_print_pkg_ver(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to get package version\n");
goto out_failed;
}
- retval = mpi3mr_setup_admin_qpair(mrioc);
+ retval = mpi3mr_setup_isr(mrioc, 0);
if (retval) {
- ioc_err(mrioc, "Failed to setup admin Qs: error %d\n",
+ ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
+ retval);
+ goto out_failed_noretry;
+ }
+
+ retval = mpi3mr_create_op_queues(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to create OpQueues error %d\n",
retval);
goto out_failed;
}
- retval = mpi3mr_bring_ioc_ready(mrioc);
+ retval = mpi3mr_enable_events(mrioc);
if (retval) {
- ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
+ ioc_err(mrioc, "failed to enable events %d\n",
retval);
goto out_failed;
}
- if (init_type != MPI3MR_IT_RESET) {
+ ioc_info(mrioc, "controller initialization completed successfully\n");
+ return retval;
+out_failed:
+ if (retry < 2) {
+ retry++;
+ ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n",
+ retry);
+ mpi3mr_memset_buffers(mrioc);
+ goto retry_init;
+ }
+out_failed_noretry:
+ ioc_err(mrioc, "controller initialization failed\n");
+ mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_CTLR_CLEANUP);
+ mrioc->unrecoverable = 1;
+ return retval;
+}
+
+/**
+ * mpi3mr_reinit_ioc - Re-Initialize the controller
+ * @mrioc: Adapter instance reference
+ * @is_resume: Called from resume or reset path
+ *
+ * This the controller re-initialization routine, executed from
+ * the soft reset handler or resume callback. Creates
+ * operational reply queue pairs, allocate required memory for
+ * reply pool, sense buffer pool, issue IOC init request to the
+ * firmware, unmask the events and issue port enable to discover
+ * SAS/SATA/NVMe devices and RAID volumes.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
+{
+ int retval = 0;
+ u8 retry = 0;
+ struct mpi3_ioc_facts_data facts_data;
+
+retry_init:
+ dprint_reset(mrioc, "bringing up the controller to ready state\n");
+ retval = mpi3mr_bring_ioc_ready(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to bring to ready state\n");
+ goto out_failed_noretry;
+ }
+
+ if (is_resume) {
+ dprint_reset(mrioc, "setting up single ISR\n");
retval = mpi3mr_setup_isr(mrioc, 1);
if (retval) {
- ioc_err(mrioc, "Failed to setup ISR error %d\n",
- retval);
- goto out_failed;
+ ioc_err(mrioc, "failed to setup ISR\n");
+ goto out_failed_noretry;
}
} else
mpi3mr_ioc_enable_intr(mrioc);
+ dprint_reset(mrioc, "getting ioc_facts\n");
retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
if (retval) {
- ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
- retval);
+ ioc_err(mrioc, "failed to get ioc_facts\n");
goto out_failed;
}
- mpi3mr_process_factsdata(mrioc, &facts_data);
- if (init_type == MPI3MR_IT_INIT) {
- retval = mpi3mr_check_reset_dma_mask(mrioc);
- if (retval) {
- ioc_err(mrioc, "Resetting dma mask failed %d\n",
- retval);
- goto out_failed;
- }
+ dprint_reset(mrioc, "validating ioc_facts\n");
+ retval = mpi3mr_revalidate_factsdata(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to revalidate ioc_facts data\n");
+ goto out_failed_noretry;
}
mpi3mr_print_ioc_info(mrioc);
- retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
+ dprint_reset(mrioc, "sending ioc_init\n");
+ retval = mpi3mr_issue_iocinit(mrioc);
if (retval) {
- ioc_err(mrioc,
- "%s :Failed to allocated reply sense buffers %d\n",
- __func__, retval);
+ ioc_err(mrioc, "failed to send ioc_init\n");
goto out_failed;
}
- if (init_type == MPI3MR_IT_INIT) {
- retval = mpi3mr_alloc_chain_bufs(mrioc);
- if (retval) {
- ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
- retval);
- goto out_failed;
- }
- }
-
- retval = mpi3mr_issue_iocinit(mrioc);
+ dprint_reset(mrioc, "getting package version\n");
+ retval = mpi3mr_print_pkg_ver(mrioc);
if (retval) {
- ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
- retval);
+ ioc_err(mrioc, "failed to get package version\n");
goto out_failed;
}
- mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
- writel(mrioc->reply_free_queue_host_index,
- &mrioc->sysif_regs->reply_free_host_index);
- mrioc->sbq_host_index = mrioc->num_sense_bufs;
- writel(mrioc->sbq_host_index,
- &mrioc->sysif_regs->sense_buffer_free_host_index);
-
- if (init_type != MPI3MR_IT_RESET) {
+ if (is_resume) {
+ dprint_reset(mrioc, "setting up multiple ISR\n");
retval = mpi3mr_setup_isr(mrioc, 0);
if (retval) {
- ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
- retval);
- goto out_failed;
+ ioc_err(mrioc, "failed to re-setup ISR\n");
+ goto out_failed_noretry;
}
}
+ dprint_reset(mrioc, "creating operational queue pairs\n");
retval = mpi3mr_create_op_queues(mrioc);
if (retval) {
- ioc_err(mrioc, "Failed to create OpQueues error %d\n",
- retval);
+ ioc_err(mrioc, "failed to create operational queue pairs\n");
goto out_failed;
}
- if ((init_type != MPI3MR_IT_INIT) &&
- (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q)) {
- retval = -1;
+ if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) {
ioc_err(mrioc,
- "Cannot create minimum number of OpQueues expected:%d created:%d\n",
+ "cannot create minimum number of operational queues expected:%d created:%d\n",
mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
- goto out_failed;
+ goto out_failed_noretry;
}
- for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
- mrioc->event_masks[i] = -1;
-
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
-
- retval = mpi3mr_issue_event_notification(mrioc);
+ dprint_reset(mrioc, "enabling events\n");
+ retval = mpi3mr_enable_events(mrioc);
if (retval) {
- ioc_err(mrioc, "Failed to issue event notification %d\n",
- retval);
+ ioc_err(mrioc, "failed to enable events\n");
goto out_failed;
}
- if (init_type != MPI3MR_IT_INIT) {
- ioc_info(mrioc, "Issuing Port Enable\n");
- retval = mpi3mr_issue_port_enable(mrioc, 0);
- if (retval) {
- ioc_err(mrioc, "Failed to issue port enable %d\n",
- retval);
- goto out_failed;
- }
+ ioc_info(mrioc, "sending port enable\n");
+ retval = mpi3mr_issue_port_enable(mrioc, 0);
+ if (retval) {
+ ioc_err(mrioc, "failed to issue port enable\n");
+ goto out_failed;
}
- return retval;
+ ioc_info(mrioc, "controller %s completed successfully\n",
+ (is_resume)?"resume":"re-initialization");
+ return retval;
out_failed:
- if (init_type == MPI3MR_IT_INIT)
- mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
- else
- mpi3mr_cleanup_ioc(mrioc, MPI3MR_REINIT_FAILURE);
-out_nocleanup:
+ if (retry < 2) {
+ retry++;
+ ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n",
+ (is_resume)?"resume":"re-initialization", retry);
+ mpi3mr_memset_buffers(mrioc);
+ goto retry_init;
+ }
+out_failed_noretry:
+ ioc_err(mrioc, "controller %s is failed\n",
+ (is_resume)?"resume":"re-initialization");
+ mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_CTLR_CLEANUP);
+ mrioc->unrecoverable = 1;
return retval;
}
@@ -3488,17 +3942,29 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
{
u16 i;
- memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
- memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
-
- memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
- memset(mrioc->host_tm_cmds.reply, 0,
- sizeof(*mrioc->host_tm_cmds.reply));
- for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
- memset(mrioc->dev_rmhs_cmds[i].reply, 0,
- sizeof(*mrioc->dev_rmhs_cmds[i].reply));
- memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
- memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
+ mrioc->change_count = 0;
+ mrioc->active_poll_qcount = 0;
+ mrioc->default_qcount = 0;
+ if (mrioc->admin_req_base)
+ memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
+ if (mrioc->admin_reply_base)
+ memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
+
+ if (mrioc->init_cmds.reply) {
+ memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
+ memset(mrioc->host_tm_cmds.reply, 0,
+ sizeof(*mrioc->host_tm_cmds.reply));
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
+ memset(mrioc->dev_rmhs_cmds[i].reply, 0,
+ sizeof(*mrioc->dev_rmhs_cmds[i].reply));
+ for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
+ memset(mrioc->evtack_cmds[i].reply, 0,
+ sizeof(*mrioc->evtack_cmds[i].reply));
+ memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
+ memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
+ memset(mrioc->evtack_cmds_bitmap, 0,
+ mrioc->evtack_cmds_bitmap_sz);
+ }
for (i = 0; i < mrioc->num_queues; i++) {
mrioc->op_reply_qinfo[i].qid = 0;
@@ -3527,7 +3993,7 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
*
* Return: Nothing.
*/
-static void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
+void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
{
u16 i;
struct mpi3mr_intr_info *intr_info;
@@ -3591,12 +4057,20 @@ static void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
kfree(mrioc->host_tm_cmds.reply);
mrioc->host_tm_cmds.reply = NULL;
+ for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
+ kfree(mrioc->evtack_cmds[i].reply);
+ mrioc->evtack_cmds[i].reply = NULL;
+ }
+
kfree(mrioc->removepend_bitmap);
mrioc->removepend_bitmap = NULL;
kfree(mrioc->devrem_bitmap);
mrioc->devrem_bitmap = NULL;
+ kfree(mrioc->evtack_cmds_bitmap);
+ mrioc->evtack_cmds_bitmap = NULL;
+
kfree(mrioc->chain_bitmap);
mrioc->chain_bitmap = NULL;
@@ -3663,7 +4137,7 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
- ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN;
+ ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
@@ -3699,21 +4173,17 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
/**
* mpi3mr_cleanup_ioc - Cleanup controller
* @mrioc: Adapter instance reference
- * @reason: Cleanup reason
*
* controller cleanup handler, Message unit reset or soft reset
- * and shutdown notification is issued to the controller and the
- * associated memory resources are freed.
+ * and shutdown notification is issued to the controller.
*
* Return: Nothing.
*/
-void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 reason)
+void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc)
{
enum mpi3mr_iocstate ioc_state;
- if (reason == MPI3MR_COMPLETE_CLEANUP)
- mpi3mr_stop_watchdog(mrioc);
-
+ dprint_exit(mrioc, "cleaning up the controller\n");
mpi3mr_ioc_disable_intr(mrioc);
ioc_state = mpi3mr_get_iocstate(mrioc);
@@ -3725,15 +4195,9 @@ void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 reason)
mpi3mr_issue_reset(mrioc,
MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
MPI3MR_RESET_FROM_MUR_FAILURE);
-
- if (reason != MPI3MR_REINIT_FAILURE)
- mpi3mr_issue_ioc_shutdown(mrioc);
- }
-
- if (reason == MPI3MR_COMPLETE_CLEANUP) {
- mpi3mr_free_mem(mrioc);
- mpi3mr_cleanup_resources(mrioc);
+ mpi3mr_issue_ioc_shutdown(mrioc);
}
+ dprint_exit(mrioc, "controller cleanup completed\n");
}
/**
@@ -3782,41 +4246,11 @@ static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
cmdptr = &mrioc->dev_rmhs_cmds[i];
mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
}
-}
-/**
- * mpi3mr_diagfault_reset_handler - Diag fault reset handler
- * @mrioc: Adapter instance reference
- * @reset_reason: Reset reason code
- *
- * This is an handler for issuing diag fault reset from the
- * applications through IOCTL path to stop the execution of the
- * controller
- *
- * Return: 0 on success, non-zero on failure.
- */
-int mpi3mr_diagfault_reset_handler(struct mpi3mr_ioc *mrioc,
- u32 reset_reason)
-{
- int retval = 0;
-
- ioc_info(mrioc, "Entry: reason code: %s\n",
- mpi3mr_reset_rc_name(reset_reason));
- mrioc->reset_in_progress = 1;
-
- mpi3mr_ioc_disable_intr(mrioc);
-
- retval = mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
-
- if (retval) {
- ioc_err(mrioc, "The diag fault reset failed: reason %d\n",
- reset_reason);
- mpi3mr_ioc_enable_intr(mrioc);
+ for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
+ cmdptr = &mrioc->evtack_cmds[i];
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
}
- ioc_info(mrioc, "%s\n", ((retval == 0) ? "SUCCESS" : "FAILED"));
- mrioc->reset_in_progress = 0;
- return retval;
}
/**
@@ -3847,34 +4281,44 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
unsigned long flags;
u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
- if (mrioc->fault_dbg) {
- if (snapdump)
- mpi3mr_set_diagsave(mrioc);
- mpi3mr_kill_ioc(mrioc, reset_reason);
- }
-
+ /* Block the reset handler until diag save in progress*/
+ dprint_reset(mrioc,
+ "soft_reset_handler: check and block on diagsave_timeout(%d)\n",
+ mrioc->diagsave_timeout);
+ while (mrioc->diagsave_timeout)
+ ssleep(1);
/*
* Block new resets until the currently executing one is finished and
* return the status of the existing reset for all blocked resets
*/
+ dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n");
if (!mutex_trylock(&mrioc->reset_mutex)) {
- ioc_info(mrioc, "Another reset in progress\n");
- return -1;
+ ioc_info(mrioc,
+ "controller reset triggered by %s is blocked due to another reset in progress\n",
+ mpi3mr_reset_rc_name(reset_reason));
+ do {
+ ssleep(1);
+ } while (mrioc->reset_in_progress == 1);
+ ioc_info(mrioc,
+ "returning previous reset result(%d) for the reset triggered by %s\n",
+ mrioc->prev_reset_result,
+ mpi3mr_reset_rc_name(reset_reason));
+ return mrioc->prev_reset_result;
}
+ ioc_info(mrioc, "controller reset is triggered by %s\n",
+ mpi3mr_reset_rc_name(reset_reason));
+
mrioc->reset_in_progress = 1;
+ mrioc->prev_reset_result = -1;
if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
+ (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
(reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
mrioc->event_masks[i] = -1;
- retval = mpi3mr_issue_event_notification(mrioc);
-
- if (retval) {
- ioc_err(mrioc,
- "Failed to turn off events prior to reset %d\n",
- retval);
- }
+ dprint_reset(mrioc, "soft_reset_handler: masking events\n");
+ mpi3mr_issue_event_notification(mrioc);
}
mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
@@ -3904,15 +4348,20 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
goto out;
}
- mpi3mr_flush_delayed_rmhs_list(mrioc);
+ mpi3mr_flush_delayed_cmd_lists(mrioc);
mpi3mr_flush_drv_cmds(mrioc);
memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
+ memset(mrioc->evtack_cmds_bitmap, 0, mrioc->evtack_cmds_bitmap_sz);
mpi3mr_cleanup_fwevt_list(mrioc);
mpi3mr_flush_host_io(mrioc);
mpi3mr_invalidate_devhandles(mrioc);
+ if (mrioc->prepare_for_reset) {
+ mrioc->prepare_for_reset = 0;
+ mrioc->prepare_for_reset_timeout_counter = 0;
+ }
mpi3mr_memset_buffers(mrioc);
- retval = mpi3mr_init_ioc(mrioc, MPI3MR_IT_RESET);
+ retval = mpi3mr_reinit_ioc(mrioc, 0);
if (retval) {
pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
mrioc->name, reset_reason);
@@ -3922,8 +4371,8 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
out:
if (!retval) {
+ mrioc->diagsave_timeout = 0;
mrioc->reset_in_progress = 0;
- scsi_unblock_requests(mrioc->shost);
mpi3mr_rfresh_tgtdevs(mrioc);
mrioc->ts_update_counter = 0;
spin_lock_irqsave(&mrioc->watchdog_lock, flags);
@@ -3939,8 +4388,9 @@ out:
mrioc->reset_in_progress = 0;
retval = -1;
}
-
+ mrioc->prev_reset_result = retval;
mutex_unlock(&mrioc->reset_mutex);
- ioc_info(mrioc, "%s\n", ((retval == 0) ? "SUCCESS" : "FAILED"));
+ ioc_info(mrioc, "controller reset is %s\n",
+ ((retval == 0) ? "successful" : "failed"));
return retval;
}
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index fe10f257b5a4..284117da9086 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -34,6 +34,9 @@ MODULE_PARM_DESC(logging_level,
" bits for enabling additional logging info (default=0)");
/* Forward declarations*/
+static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+ struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx);
+
/**
* mpi3mr_host_tag_for_scmd - Get host tag for a scmd
* @mrioc: Adapter instance reference
@@ -418,6 +421,74 @@ out:
}
/**
+ * mpi3mr_count_dev_pending - Count commands pending for a lun
+ * @rq: Block request
+ * @data: SCSI device reference
+ * @reserved: Unused
+ *
+ * This is an iterator function called for each SCSI command in
+ * a host and if the command is pending in the LLD for the
+ * specific device(lun) then device specific pending I/O counter
+ * is updated in the device structure.
+ *
+ * Return: true always.
+ */
+
+static bool mpi3mr_count_dev_pending(struct request *rq,
+ void *data, bool reserved)
+{
+ struct scsi_device *sdev = (struct scsi_device *)data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ struct scmd_priv *priv;
+
+ if (scmd) {
+ priv = scsi_cmd_priv(scmd);
+ if (!priv->in_lld_scope)
+ goto out;
+ if (scmd->device == sdev)
+ sdev_priv_data->pend_count++;
+ }
+
+out:
+ return true;
+}
+
+/**
+ * mpi3mr_count_tgt_pending - Count commands pending for target
+ * @rq: Block request
+ * @data: SCSI target reference
+ * @reserved: Unused
+ *
+ * This is an iterator function called for each SCSI command in
+ * a host and if the command is pending in the LLD for the
+ * specific target then target specific pending I/O counter is
+ * updated in the target structure.
+ *
+ * Return: true always.
+ */
+
+static bool mpi3mr_count_tgt_pending(struct request *rq,
+ void *data, bool reserved)
+{
+ struct scsi_target *starget = (struct scsi_target *)data;
+ struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ struct scmd_priv *priv;
+
+ if (scmd) {
+ priv = scsi_cmd_priv(scmd);
+ if (!priv->in_lld_scope)
+ goto out;
+ if (scmd->device && (scsi_target(scmd->device) == starget))
+ stgt_priv_data->pend_count++;
+ }
+
+out:
+ return true;
+}
+
+/**
* mpi3mr_flush_host_io - Flush host I/Os
* @mrioc: Adapter instance reference
*
@@ -742,11 +813,18 @@ mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
switch (tgtdev->dev_type) {
case MPI3_DEVICE_DEVFORM_PCIE:
/*The block layer hw sector size = 512*/
- blk_queue_max_hw_sectors(sdev->request_queue,
- tgtdev->dev_spec.pcie_inf.mdts / 512);
- blk_queue_virt_boundary(sdev->request_queue,
- ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1));
-
+ if ((tgtdev->dev_spec.pcie_inf.dev_info &
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ tgtdev->dev_spec.pcie_inf.mdts / 512);
+ if (tgtdev->dev_spec.pcie_inf.pgsz == 0)
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
+ else
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1));
+ }
break;
default:
break;
@@ -824,6 +902,17 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
scsi_tgt_priv_data->dev_type = tgtdev->dev_type;
}
+ switch (dev_pg0->access_status) {
+ case MPI3_DEVICE0_ASTATUS_NO_ERRORS:
+ case MPI3_DEVICE0_ASTATUS_PREPARE:
+ case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION:
+ case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY:
+ break;
+ default:
+ tgtdev->is_hidden = 1;
+ break;
+ }
+
switch (tgtdev->dev_type) {
case MPI3_DEVICE_DEVFORM_SAS_SATA:
{
@@ -848,6 +937,7 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
&dev_pg0->device_specific.pcie_format;
u16 dev_info = le16_to_cpu(pcieinf->device_info);
+ tgtdev->dev_spec.pcie_inf.dev_info = dev_info;
tgtdev->dev_spec.pcie_inf.capb =
le32_to_cpu(pcieinf->capabilities);
tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
@@ -858,14 +948,18 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
le32_to_cpu(pcieinf->maximum_data_transfer_size);
tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size;
tgtdev->dev_spec.pcie_inf.reset_to =
- pcieinf->controller_reset_to;
+ max_t(u8, pcieinf->controller_reset_to,
+ MPI3MR_INTADMCMD_TIMEOUT);
tgtdev->dev_spec.pcie_inf.abort_to =
- pcieinf->nv_me_abort_to;
+ max_t(u8, pcieinf->nvme_abort_to,
+ MPI3MR_INTADMCMD_TIMEOUT);
}
if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
- if ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
- MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE)
+ if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
+ ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE))
tgtdev->is_hidden = 1;
if (!mrioc->shost)
break;
@@ -1313,7 +1407,7 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
evt_ack:
if (fwevt->send_ack)
- mpi3mr_send_event_ack(mrioc, fwevt->event_id,
+ mpi3mr_process_event_ack(mrioc, fwevt->event_id,
fwevt->evt_ctx);
out:
/* Put fwevt reference count to neutralize kref_init increment */
@@ -1377,24 +1471,33 @@ static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
}
/**
- * mpi3mr_flush_delayed_rmhs_list - Flush pending commands
+ * mpi3mr_flush_delayed_cmd_lists - Flush pending commands
* @mrioc: Adapter instance reference
*
- * Flush pending commands in the delayed removal handshake list
- * due to a controller reset or driver removal as a cleanup.
+ * Flush pending commands in the delayed lists due to a
+ * controller reset or driver removal as a cleanup.
*
* Return: Nothing
*/
-void mpi3mr_flush_delayed_rmhs_list(struct mpi3mr_ioc *mrioc)
+void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc)
{
struct delayed_dev_rmhs_node *_rmhs_node;
+ struct delayed_evt_ack_node *_evtack_node;
+ dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n");
while (!list_empty(&mrioc->delayed_rmhs_list)) {
_rmhs_node = list_entry(mrioc->delayed_rmhs_list.next,
struct delayed_dev_rmhs_node, list);
list_del(&_rmhs_node->list);
kfree(_rmhs_node);
}
+ dprint_reset(mrioc, "flushing delayed event ack commands\n");
+ while (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
+ _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next,
+ struct delayed_evt_ack_node, list);
+ list_del(&_evtack_node->list);
+ kfree(_evtack_node);
+ }
}
/**
@@ -1611,6 +1714,141 @@ out_failed:
}
/**
+ * mpi3mr_complete_evt_ack - event ack request completion
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * This is the completion handler for non blocking event
+ * acknowledgment sent to the firmware and this will issue any
+ * pending event acknowledgment request.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
+ struct delayed_evt_ack_node *delayed_evtack = NULL;
+
+ if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_event_th(mrioc,
+ "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n",
+ (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ drv_cmd->ioc_loginfo);
+ }
+
+ if (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
+ delayed_evtack =
+ list_entry(mrioc->delayed_evtack_cmds_list.next,
+ struct delayed_evt_ack_node, list);
+ mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd,
+ delayed_evtack->event_ctx);
+ list_del(&delayed_evtack->list);
+ kfree(delayed_evtack);
+ return;
+ }
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
+}
+
+/**
+ * mpi3mr_send_event_ack - Issue event acknwoledgment request
+ * @mrioc: Adapter instance reference
+ * @event: MPI3 event id
+ * @cmdparam: Internal command tracker
+ * @event_ctx: event context
+ *
+ * Issues event acknowledgment request to the firmware if there
+ * is a free command to send the event ack else it to a pend
+ * list so that it will be processed on a completion of a prior
+ * event acknowledgment .
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+ struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx)
+{
+ struct mpi3_event_ack_request evtack_req;
+ int retval = 0;
+ u8 retrycount = 5;
+ u16 cmd_idx = MPI3MR_NUM_EVTACKCMD;
+ struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
+ struct delayed_evt_ack_node *delayed_evtack = NULL;
+
+ if (drv_cmd) {
+ dprint_event_th(mrioc,
+ "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
+ event, event_ctx);
+ goto issue_cmd;
+ }
+ dprint_event_th(mrioc,
+ "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
+ event, event_ctx);
+ do {
+ cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap,
+ MPI3MR_NUM_EVTACKCMD);
+ if (cmd_idx < MPI3MR_NUM_EVTACKCMD) {
+ if (!test_and_set_bit(cmd_idx,
+ mrioc->evtack_cmds_bitmap))
+ break;
+ cmd_idx = MPI3MR_NUM_EVTACKCMD;
+ }
+ } while (retrycount--);
+
+ if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) {
+ delayed_evtack = kzalloc(sizeof(*delayed_evtack),
+ GFP_ATOMIC);
+ if (!delayed_evtack)
+ return;
+ INIT_LIST_HEAD(&delayed_evtack->list);
+ delayed_evtack->event = event;
+ delayed_evtack->event_ctx = event_ctx;
+ list_add_tail(&delayed_evtack->list,
+ &mrioc->delayed_evtack_cmds_list);
+ dprint_event_th(mrioc,
+ "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n",
+ event, event_ctx);
+ return;
+ }
+ drv_cmd = &mrioc->evtack_cmds[cmd_idx];
+
+issue_cmd:
+ cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
+
+ memset(&evtack_req, 0, sizeof(evtack_req));
+ if (drv_cmd->state & MPI3MR_CMD_PENDING) {
+ dprint_event_th(mrioc,
+ "sending event ack failed due to command in use\n");
+ goto out;
+ }
+ drv_cmd->state = MPI3MR_CMD_PENDING;
+ drv_cmd->is_waiting = 0;
+ drv_cmd->callback = mpi3mr_complete_evt_ack;
+ evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
+ evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
+ evtack_req.event = event;
+ evtack_req.event_context = cpu_to_le32(event_ctx);
+ retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
+ sizeof(evtack_req), 1);
+ if (retval) {
+ dprint_event_th(mrioc,
+ "posting event ack request is failed\n");
+ goto out_failed;
+ }
+
+ dprint_event_th(mrioc,
+ "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n",
+ event, event_ctx);
+out:
+ return;
+out_failed:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
+}
+
+/**
* mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
* @mrioc: Adapter instance reference
* @event_reply: event data
@@ -1819,6 +2057,40 @@ out:
}
/**
+ * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Blocks and unblocks host level I/O based on the reason code
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_prepare_for_reset *evtdata =
+ (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data;
+
+ if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) {
+ dprint_event_th(mrioc,
+ "prepare for reset event top half with rc=start\n");
+ if (mrioc->prepare_for_reset)
+ return;
+ mrioc->prepare_for_reset = 1;
+ mrioc->prepare_for_reset_timeout_counter = 0;
+ } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
+ dprint_event_th(mrioc,
+ "prepare for reset top half with rc=abort\n");
+ mrioc->prepare_for_reset = 0;
+ mrioc->prepare_for_reset_timeout_counter = 0;
+ }
+ if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
+ == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
+ mpi3mr_send_event_ack(mrioc, event_reply->event, NULL,
+ le32_to_cpu(event_reply->event_context));
+}
+
+/**
* mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf
* @mrioc: Adapter instance reference
* @event_reply: event data
@@ -1848,6 +2120,66 @@ static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc,
}
/**
+ * mpi3mr_tempthreshold_evt_th - Temp threshold event tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Displays temperature threshold event details and fault code
+ * if any is hit due to temperature exceeding threshold.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_tempthreshold_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_temp_threshold *evtdata =
+ (struct mpi3_event_data_temp_threshold *)event_reply->event_data;
+
+ ioc_err(mrioc, "Temperature threshold levels %s%s%s exceeded for sensor: %d !!! Current temperature in Celsius: %d\n",
+ (le16_to_cpu(evtdata->status) & 0x1) ? "Warning " : " ",
+ (le16_to_cpu(evtdata->status) & 0x2) ? "Critical " : " ",
+ (le16_to_cpu(evtdata->status) & 0x4) ? "Fatal " : " ", evtdata->sensor_num,
+ le16_to_cpu(evtdata->current_temperature));
+ mpi3mr_print_fault_info(mrioc);
+}
+
+/**
+ * mpi3mr_cablemgmt_evt_th - Cable management event tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Displays Cable manegemt event details.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_cable_management *evtdata =
+ (struct mpi3_event_data_cable_management *)event_reply->event_data;
+
+ switch (evtdata->status) {
+ case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER:
+ {
+ ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n"
+ "Devices connected to this cable are not detected.\n"
+ "This cable requires %d mW of power.\n",
+ evtdata->receptacle_id,
+ le32_to_cpu(evtdata->active_cable_power_requirement));
+ break;
+ }
+ case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED:
+ {
+ ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n",
+ evtdata->receptacle_id);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+/**
* mpi3mr_os_handle_events - Firmware event handler
* @mrioc: Adapter instance reference
* @event_reply: event data
@@ -1905,6 +2237,12 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
mpi3mr_pcietopochg_evt_th(mrioc, event_reply);
break;
}
+ case MPI3_EVENT_PREPARE_FOR_RESET:
+ {
+ mpi3mr_preparereset_evt_th(mrioc, event_reply);
+ ack_req = 0;
+ break;
+ }
case MPI3_EVENT_DEVICE_INFO_CHANGED:
{
process_evt_bh = 1;
@@ -1915,9 +2253,18 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
mpi3mr_energypackchg_evt_th(mrioc, event_reply);
break;
}
+ case MPI3_EVENT_TEMP_THRESHOLD:
+ {
+ mpi3mr_tempthreshold_evt_th(mrioc, event_reply);
+ break;
+ }
+ case MPI3_EVENT_CABLE_MGMT:
+ {
+ mpi3mr_cablemgmt_evt_th(mrioc, event_reply);
+ break;
+ }
case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
case MPI3_EVENT_SAS_DISCOVERY:
- case MPI3_EVENT_CABLE_MGMT:
case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
case MPI3_EVENT_PCIE_ENUMERATION:
@@ -2520,49 +2867,63 @@ static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc,
}
/**
- * mpi3mr_print_response_code - print TM response as a string
- * @mrioc: Adapter instance reference
+ * mpi3mr_tm_response_name - get TM response as a string
* @resp_code: TM response code
*
- * Print TM response code as a readable string.
+ * Convert known task management response code as a readable
+ * string.
*
- * Return: Nothing.
+ * Return: response code string.
*/
-static void mpi3mr_print_response_code(struct mpi3mr_ioc *mrioc, u8 resp_code)
+static const char *mpi3mr_tm_response_name(u8 resp_code)
{
char *desc;
switch (resp_code) {
- case MPI3MR_RSP_TM_COMPLETE:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
desc = "task management request completed";
break;
- case MPI3MR_RSP_INVALID_FRAME:
+ case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME:
desc = "invalid frame";
break;
- case MPI3MR_RSP_TM_NOT_SUPPORTED:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED:
desc = "task management request not supported";
break;
- case MPI3MR_RSP_TM_FAILED:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED:
desc = "task management request failed";
break;
- case MPI3MR_RSP_TM_SUCCEEDED:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
desc = "task management request succeeded";
break;
- case MPI3MR_RSP_TM_INVALID_LUN:
- desc = "invalid lun";
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN:
+ desc = "invalid LUN";
break;
- case MPI3MR_RSP_TM_OVERLAPPED_TAG:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG:
desc = "overlapped tag attempted";
break;
- case MPI3MR_RSP_IO_QUEUED_ON_IOC:
+ case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
desc = "task queued, however not sent to target";
break;
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED:
+ desc = "task management request denied by NVMe device";
+ break;
default:
desc = "unknown";
break;
}
- ioc_info(mrioc, "%s :response_code(0x%01x): %s\n", __func__,
- resp_code, desc);
+
+ return desc;
+}
+
+inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc)
+{
+ int i;
+ int num_of_reply_queues =
+ mrioc->num_op_reply_q + mrioc->op_reply_q_offset;
+
+ for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++)
+ mpi3mr_process_op_reply_q(mrioc,
+ mrioc->intr_info[i].op_reply_q);
}
/**
@@ -2572,9 +2933,10 @@ static void mpi3mr_print_response_code(struct mpi3mr_ioc *mrioc, u8 resp_code)
* @handle: Device handle
* @lun: lun ID
* @htag: Host tag of the TM request
+ * @timeout: TM timeout value
* @drv_cmd: Internal command tracker
* @resp_code: Response code place holder
- * @cmd_priv: SCSI command private data
+ * @scmd: SCSI command
*
* Issues a Task Management Request to the controller for a
* specified target, lun and command and wait for its completion
@@ -2586,14 +2948,16 @@ static void mpi3mr_print_response_code(struct mpi3mr_ioc *mrioc, u8 resp_code)
static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
u16 handle, uint lun, u16 htag, ulong timeout,
struct mpi3mr_drv_cmd *drv_cmd,
- u8 *resp_code, struct scmd_priv *cmd_priv)
+ u8 *resp_code, struct scsi_cmnd *scmd)
{
struct mpi3_scsi_task_mgmt_request tm_req;
struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
int retval = 0;
struct mpi3mr_tgt_dev *tgtdev = NULL;
struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
- struct op_req_qinfo *op_req_q = NULL;
+ struct scmd_priv *cmd_priv = NULL;
+ struct scsi_device *sdev = NULL;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n",
__func__, tm_type, handle);
@@ -2630,16 +2994,21 @@ static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
- if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) {
- scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
- tgtdev->starget->hostdata;
- atomic_inc(&scsi_tgt_priv_data->block_io);
- }
- if (cmd_priv) {
- op_req_q = &mrioc->req_qinfo[cmd_priv->req_q_idx];
- tm_req.task_host_tag = cpu_to_le16(cmd_priv->host_tag);
- tm_req.task_request_queue_id = cpu_to_le16(op_req_q->qid);
+
+ if (scmd) {
+ sdev = scmd->device;
+ sdev_priv_data = sdev->hostdata;
+ scsi_tgt_priv_data = ((sdev_priv_data) ?
+ sdev_priv_data->tgt_priv_data : NULL);
+ } else {
+ if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
}
+
+ if (scsi_tgt_priv_data)
+ atomic_inc(&scsi_tgt_priv_data->block_io);
+
if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to)
timeout = tgtdev->dev_spec.pcie_inf.abort_to;
@@ -2656,39 +3025,49 @@ static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ));
if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) {
- ioc_err(mrioc, "%s :Issue TM: command timed out\n", __func__);
drv_cmd->is_waiting = 0;
retval = -1;
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_TM_TIMEOUT, 1);
+ if (!(drv_cmd->state & MPI3MR_CMD_RESET)) {
+ dprint_tm(mrioc,
+ "task management request timed out after %ld seconds\n",
+ timeout);
+ if (mrioc->logging_level & MPI3_DEBUG_TM)
+ dprint_dump_req(&tm_req, sizeof(tm_req)/4);
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_TM_TIMEOUT, 1);
+ }
goto out_unlock;
}
- if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
- tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
-
- if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
- ioc_err(mrioc,
- "%s :Issue TM: handle(0x%04x) Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
- __func__, handle, drv_cmd->ioc_status,
- drv_cmd->ioc_loginfo);
+ if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) {
+ dprint_tm(mrioc, "invalid task management reply message\n");
retval = -1;
goto out_unlock;
}
- if (!tm_reply) {
- ioc_err(mrioc, "%s :Issue TM: No TM Reply message\n", __func__);
+ tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
+
+ switch (drv_cmd->ioc_status) {
+ case MPI3_IOCSTATUS_SUCCESS:
+ *resp_code = le32_to_cpu(tm_reply->response_data) &
+ MPI3MR_RI_MASK_RESPCODE;
+ break;
+ case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
+ *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE;
+ break;
+ default:
+ dprint_tm(mrioc,
+ "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n",
+ handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo);
retval = -1;
goto out_unlock;
}
- *resp_code = le32_to_cpu(tm_reply->response_data) &
- MPI3MR_RI_MASK_RESPCODE;
switch (*resp_code) {
- case MPI3MR_RSP_TM_SUCCEEDED:
- case MPI3MR_RSP_TM_COMPLETE:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
break;
- case MPI3MR_RSP_IO_QUEUED_ON_IOC:
+ case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
retval = -1;
break;
@@ -2697,14 +3076,37 @@ static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
break;
}
- ioc_info(mrioc,
- "%s :Issue TM: Completed TM type (0x%x) handle(0x%04x) ",
- __func__, tm_type, handle);
- ioc_info(mrioc,
- "with ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
- drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
- le32_to_cpu(tm_reply->termination_count));
- mpi3mr_print_response_code(mrioc, *resp_code);
+ dprint_tm(mrioc,
+ "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n",
+ tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
+ le32_to_cpu(tm_reply->termination_count),
+ mpi3mr_tm_response_name(*resp_code), *resp_code);
+
+ if (!retval) {
+ mpi3mr_ioc_disable_intr(mrioc);
+ mpi3mr_poll_pend_io_completions(mrioc);
+ mpi3mr_ioc_enable_intr(mrioc);
+ mpi3mr_poll_pend_io_completions(mrioc);
+ }
+ switch (tm_type) {
+ case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ if (!scsi_tgt_priv_data)
+ break;
+ scsi_tgt_priv_data->pend_count = 0;
+ blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
+ mpi3mr_count_tgt_pending,
+ (void *)scsi_tgt_priv_data->starget);
+ break;
+ case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
+ if (!sdev_priv_data)
+ break;
+ sdev_priv_data->pend_count = 0;
+ blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
+ mpi3mr_count_dev_pending, (void *)sdev);
+ break;
+ default:
+ break;
+ }
out_unlock:
drv_cmd->state = MPI3MR_CMD_NOTUSED;
@@ -2713,14 +3115,6 @@ out_unlock:
atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
if (tgtdev)
mpi3mr_tgtdev_put(tgtdev);
- if (!retval) {
- /*
- * Flush all IRQ handlers by calling synchronize_irq().
- * mpi3mr_ioc_disable_intr() takes care of it.
- */
- mpi3mr_ioc_disable_intr(mrioc);
- mpi3mr_ioc_enable_intr(mrioc);
- }
out:
return retval;
}
@@ -2769,17 +3163,49 @@ static int mpi3mr_bios_param(struct scsi_device *sdev,
* mpi3mr_map_queues - Map queues callback handler
* @shost: SCSI host reference
*
- * Call the blk_mq_pci_map_queues with from which operational
- * queue the mapping has to be done
+ * Maps default and poll queues.
*
- * Return: return of blk_mq_pci_map_queues
+ * Return: return zero.
*/
static int mpi3mr_map_queues(struct Scsi_Host *shost)
{
struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ int i, qoff, offset;
+ struct blk_mq_queue_map *map = NULL;
+
+ offset = mrioc->op_reply_q_offset;
+
+ for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
+ map = &shost->tag_set.map[i];
+
+ map->nr_queues = 0;
+
+ if (i == HCTX_TYPE_DEFAULT)
+ map->nr_queues = mrioc->default_qcount;
+ else if (i == HCTX_TYPE_POLL)
+ map->nr_queues = mrioc->active_poll_qcount;
+
+ if (!map->nr_queues) {
+ BUG_ON(i == HCTX_TYPE_DEFAULT);
+ continue;
+ }
+
+ /*
+ * The poll queue(s) doesn't have an IRQ (and hence IRQ
+ * affinity), so use the regular blk-mq cpu mapping
+ */
+ map->queue_offset = qoff;
+ if (i != HCTX_TYPE_POLL)
+ blk_mq_pci_map_queues(map, mrioc->pdev, offset);
+ else
+ blk_mq_map_queues(map);
+
+ qoff += map->nr_queues;
+ offset += map->nr_queues;
+ }
+
+ return 0;
- return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
- mrioc->pdev, mrioc->op_reply_q_offset);
}
/**
@@ -2938,6 +3364,13 @@ static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd)
stgt_priv_data = sdev_priv_data->tgt_priv_data;
dev_handle = stgt_priv_data->dev_handle;
+ if (stgt_priv_data->dev_removed) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s:target(handle = 0x%04x) is removed, target reset is not issued\n",
+ mrioc->name, dev_handle);
+ retval = FAILED;
+ goto out;
+ }
sdev_printk(KERN_INFO, scmd->device,
"Target Reset is issued to handle(0x%04x)\n",
dev_handle);
@@ -2945,15 +3378,22 @@ static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd)
ret = mpi3mr_issue_tm(mrioc,
MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle,
sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
- MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, NULL);
+ MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
if (ret)
goto out;
+ if (stgt_priv_data->pend_count) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: target has %d pending commands, target reset is failed\n",
+ mrioc->name, sdev_priv_data->pend_count);
+ goto out;
+ }
+
retval = SUCCESS;
out:
sdev_printk(KERN_INFO, scmd->device,
- "Target reset is %s for scmd(%p)\n",
+ "%s: target reset is %s for scmd(%p)\n", mrioc->name,
((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
return retval;
@@ -2992,21 +3432,34 @@ static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd)
stgt_priv_data = sdev_priv_data->tgt_priv_data;
dev_handle = stgt_priv_data->dev_handle;
+ if (stgt_priv_data->dev_removed) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n",
+ mrioc->name, dev_handle);
+ retval = FAILED;
+ goto out;
+ }
sdev_printk(KERN_INFO, scmd->device,
"Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle);
ret = mpi3mr_issue_tm(mrioc,
MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle,
sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
- MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, NULL);
+ MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
if (ret)
goto out;
+ if (sdev_priv_data->pend_count) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: device has %d pending commands, device(LUN) reset is failed\n",
+ mrioc->name, sdev_priv_data->pend_count);
+ goto out;
+ }
retval = SUCCESS;
out:
sdev_printk(KERN_INFO, scmd->device,
- "Device(lun) reset is %s for scmd(%p)\n",
+ "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name,
((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
return retval;
@@ -3049,32 +3502,42 @@ static int mpi3mr_scan_finished(struct Scsi_Host *shost,
{
struct mpi3mr_ioc *mrioc = shost_priv(shost);
u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
+ u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
- if (time >= (pe_timeout * HZ)) {
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
+ ioc_err(mrioc, "port enable failed due to fault or reset\n");
+ mpi3mr_print_fault_info(mrioc);
+ mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
+ mrioc->scan_started = 0;
mrioc->init_cmds.is_waiting = 0;
mrioc->init_cmds.callback = NULL;
mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
- ioc_err(mrioc, "%s :port enable request timed out\n", __func__);
- mrioc->is_driver_loading = 0;
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_PE_TIMEOUT, 1);
}
- if (mrioc->scan_failed) {
- ioc_err(mrioc,
- "%s :port enable failed with (ioc_status=0x%08x)\n",
- __func__, mrioc->scan_failed);
- mrioc->is_driver_loading = 0;
- mrioc->stop_drv_processing = 1;
- return 1;
+ if (time >= (pe_timeout * HZ)) {
+ ioc_err(mrioc, "port enable failed due to time out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_PE_TIMEOUT);
+ mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
+ mrioc->scan_started = 0;
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = NULL;
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
}
if (mrioc->scan_started)
return 0;
- ioc_info(mrioc, "%s :port enable: SUCCESS\n", __func__);
+
+ if (mrioc->scan_failed) {
+ ioc_err(mrioc,
+ "port enable failed with status=0x%04x\n",
+ mrioc->scan_failed);
+ } else
+ ioc_info(mrioc, "port enable is successfully completed\n");
+
mpi3mr_start_watchdog(mrioc);
mrioc->is_driver_loading = 0;
-
return 1;
}
@@ -3189,10 +3652,18 @@ static int mpi3mr_slave_configure(struct scsi_device *sdev)
switch (tgt_dev->dev_type) {
case MPI3_DEVICE_DEVFORM_PCIE:
/*The block layer hw sector size = 512*/
- blk_queue_max_hw_sectors(sdev->request_queue,
- tgt_dev->dev_spec.pcie_inf.mdts / 512);
- blk_queue_virt_boundary(sdev->request_queue,
- ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1));
+ if ((tgt_dev->dev_spec.pcie_inf.dev_info &
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ tgt_dev->dev_spec.pcie_inf.mdts / 512);
+ if (tgt_dev->dev_spec.pcie_inf.pgsz == 0)
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
+ else
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1));
+ }
break;
default:
break;
@@ -3312,9 +3783,22 @@ static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
struct scsi_cmnd *scmd)
{
unsigned char *buf;
- u16 param_len, desc_len;
-
- param_len = get_unaligned_be16(scmd->cmnd + 7);
+ u16 param_len, desc_len, trunc_param_len;
+
+ trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7);
+
+ if (mrioc->pdev->revision) {
+ if ((param_len > 24) && ((param_len - 8) & 0xF)) {
+ trunc_param_len -= (param_len - 8) & 0xF;
+ dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
+ dprint_scsi_err(mrioc,
+ "truncating param_len from (%d) to (%d)\n",
+ param_len, trunc_param_len);
+ put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
+ dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
+ }
+ return false;
+ }
if (!param_len) {
ioc_warn(mrioc,
@@ -3374,12 +3858,12 @@ static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
}
if (param_len > (desc_len + 8)) {
+ trunc_param_len = desc_len + 8;
scsi_print_command(scmd);
- ioc_warn(mrioc,
- "%s: Truncating param_len(%d) to desc_len+8(%d)\n",
- __func__, param_len, (desc_len + 8));
- param_len = desc_len + 8;
- put_unaligned_be16(param_len, scmd->cmnd + 7);
+ dprint_scsi_err(mrioc,
+ "truncating param_len(%d) to desc_len+8(%d)\n",
+ param_len, trunc_param_len);
+ put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
scsi_print_command(scmd);
}
@@ -3434,6 +3918,7 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
u32 scsiio_flags = 0;
struct request *rq = scsi_cmd_to_rq(scmd);
int iprio_class;
+ u8 is_pcie_dev = 0;
sdev_priv_data = scmd->device->hostdata;
if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
@@ -3478,8 +3963,10 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
goto out;
}
- if ((scmd->cmnd[0] == UNMAP) &&
- (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
+ if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE)
+ is_pcie_dev = 1;
+ if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev &&
+ (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
mpi3mr_check_return_unmap(mrioc, scmd))
goto out;
@@ -3559,6 +4046,7 @@ static struct scsi_host_template mpi3mr_driver_template = {
.eh_host_reset_handler = mpi3mr_eh_host_reset,
.bios_param = mpi3mr_bios_param,
.map_queues = mpi3mr_map_queues,
+ .mq_poll = mpi3mr_blk_mq_poll,
.no_write_same = 1,
.can_queue = 1,
.this_id = -1,
@@ -3567,6 +4055,7 @@ static struct scsi_host_template mpi3mr_driver_template = {
*/
.max_sectors = 2048,
.cmd_per_lun = MPI3MR_MAX_CMDS_LUN,
+ .max_segment_size = 0xffffffff,
.track_queue_depth = 1,
.cmd_size = sizeof(struct scmd_priv),
};
@@ -3714,6 +4203,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&mrioc->fwevt_list);
INIT_LIST_HEAD(&mrioc->tgtdev_list);
INIT_LIST_HEAD(&mrioc->delayed_rmhs_list);
+ INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list);
mutex_init(&mrioc->reset_mutex);
mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS);
@@ -3772,21 +4262,29 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc_err(mrioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
retval = -ENODEV;
- goto out_fwevtthread_failed;
+ goto fwevtthread_failed;
}
mrioc->is_driver_loading = 1;
- if (mpi3mr_init_ioc(mrioc, MPI3MR_IT_INIT)) {
- ioc_err(mrioc, "failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
+ mrioc->cpu_count = num_online_cpus();
+ if (mpi3mr_setup_resources(mrioc)) {
+ ioc_err(mrioc, "setup resources failed\n");
retval = -ENODEV;
- goto out_iocinit_failed;
+ goto resource_alloc_failed;
+ }
+ if (mpi3mr_init_ioc(mrioc)) {
+ ioc_err(mrioc, "initializing IOC failed\n");
+ retval = -ENODEV;
+ goto init_ioc_failed;
}
shost->nr_hw_queues = mrioc->num_op_reply_q;
+ if (mrioc->active_poll_qcount)
+ shost->nr_maps = 3;
+
shost->can_queue = mrioc->max_host_ios;
shost->sg_tablesize = MPI3MR_SG_DEPTH;
- shost->max_id = mrioc->facts.max_perids;
+ shost->max_id = mrioc->facts.max_perids + 1;
retval = scsi_add_host(shost, &pdev->dev);
if (retval) {
@@ -3799,10 +4297,14 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return retval;
addhost_failed:
- mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
-out_iocinit_failed:
+ mpi3mr_stop_watchdog(mrioc);
+ mpi3mr_cleanup_ioc(mrioc);
+init_ioc_failed:
+ mpi3mr_free_mem(mrioc);
+ mpi3mr_cleanup_resources(mrioc);
+resource_alloc_failed:
destroy_workqueue(mrioc->fwevt_worker_thread);
-out_fwevtthread_failed:
+fwevtthread_failed:
spin_lock(&mrioc_list_lock);
list_del(&mrioc->list);
spin_unlock(&mrioc_list_lock);
@@ -3815,6 +4317,7 @@ shost_failed:
* mpi3mr_remove - PCI remove callback
* @pdev: PCI device instance
*
+ * Cleanup the IOC by issuing MUR and shutdown notification.
* Free up all memory and resources associated with the
* controllerand target devices, unregister the shost.
*
@@ -3851,7 +4354,10 @@ static void mpi3mr_remove(struct pci_dev *pdev)
mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
mpi3mr_tgtdev_put(tgtdev);
}
- mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
+ mpi3mr_stop_watchdog(mrioc);
+ mpi3mr_cleanup_ioc(mrioc);
+ mpi3mr_free_mem(mrioc);
+ mpi3mr_cleanup_resources(mrioc);
spin_lock(&mrioc_list_lock);
list_del(&mrioc->list);
@@ -3891,7 +4397,10 @@ static void mpi3mr_shutdown(struct pci_dev *pdev)
spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
if (wq)
destroy_workqueue(wq);
- mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
+
+ mpi3mr_stop_watchdog(mrioc);
+ mpi3mr_cleanup_ioc(mrioc);
+ mpi3mr_cleanup_resources(mrioc);
}
#ifdef CONFIG_PM
@@ -3921,7 +4430,7 @@ static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state)
mpi3mr_cleanup_fwevt_list(mrioc);
scsi_block_requests(shost);
mpi3mr_stop_watchdog(mrioc);
- mpi3mr_cleanup_ioc(mrioc, MPI3MR_SUSPEND);
+ mpi3mr_cleanup_ioc(mrioc);
device_state = pci_choose_state(pdev, state);
ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
@@ -3970,7 +4479,11 @@ static int mpi3mr_resume(struct pci_dev *pdev)
mrioc->stop_drv_processing = 0;
mpi3mr_memset_buffers(mrioc);
- mpi3mr_init_ioc(mrioc, MPI3MR_IT_RESUME);
+ r = mpi3mr_reinit_ioc(mrioc, 1);
+ if (r) {
+ ioc_err(mrioc, "resuming controller failed[%d]\n", r);
+ return r;
+ }
scsi_unblock_requests(shost);
mpi3mr_start_watchdog(mrioc);
@@ -3980,8 +4493,8 @@ static int mpi3mr_resume(struct pci_dev *pdev)
static const struct pci_device_id mpi3mr_pci_id_table[] = {
{
- PCI_DEVICE_SUB(PCI_VENDOR_ID_LSI_LOGIC, 0x00A5,
- PCI_ANY_ID, PCI_ANY_ID)
+ PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
+ MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID)
},
{ 0 }
};
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 81dab9b82f79..511726f92d9a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -3086,6 +3086,7 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
void
mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
{
+ unsigned int irq;
struct adapter_reply_queue *reply_q, *next;
if (list_empty(&ioc->reply_queue_list))
@@ -3098,9 +3099,10 @@ mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
continue;
}
- if (ioc->smp_affinity_enable)
- irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
- reply_q->msix_index), NULL);
+ if (ioc->smp_affinity_enable) {
+ irq = pci_irq_vector(ioc->pdev, reply_q->msix_index);
+ irq_update_affinity_hint(irq, NULL);
+ }
free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
reply_q);
kfree(reply_q);
@@ -3167,18 +3169,15 @@ out:
* @ioc: per adapter object
*
* The enduser would need to set the affinity via /proc/irq/#/smp_affinity
- *
- * It would nice if we could call irq_set_affinity, however it is not
- * an exported symbol
*/
static void
_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
{
- unsigned int cpu, nr_cpus, nr_msix, index = 0;
+ unsigned int cpu, nr_cpus, nr_msix, index = 0, irq;
struct adapter_reply_queue *reply_q;
- int local_numa_node;
int iopoll_q_count = ioc->reply_queue_count -
ioc->iopoll_q_start_index;
+ const struct cpumask *mask;
if (!_base_is_controller_msix_enabled(ioc))
return;
@@ -3201,11 +3200,11 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
* corresponding to high iops queues.
*/
if (ioc->high_iops_queues) {
- local_numa_node = dev_to_node(&ioc->pdev->dev);
+ mask = cpumask_of_node(dev_to_node(&ioc->pdev->dev));
for (index = 0; index < ioc->high_iops_queues;
index++) {
- irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
- index), cpumask_of_node(local_numa_node));
+ irq = pci_irq_vector(ioc->pdev, index);
+ irq_set_affinity_and_hint(irq, mask);
}
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index a0af986633d2..949e98d523e2 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -77,8 +77,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "39.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 39
+#define MPT3SAS_DRIVER_VERSION "40.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 40
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 05b6c6a073c3..d92ca140d298 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -3533,11 +3533,31 @@ diag_trigger_master_store(struct device *cdev,
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct SL_WH_MASTER_TRIGGER_T *master_tg;
unsigned long flags;
ssize_t rc;
+ bool set = 1;
- spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count);
+
+ if (ioc->supports_trigger_pages) {
+ master_tg = kzalloc(sizeof(struct SL_WH_MASTER_TRIGGER_T),
+ GFP_KERNEL);
+ if (!master_tg)
+ return -ENOMEM;
+
+ memcpy(master_tg, buf, rc);
+ if (!master_tg->MasterData)
+ set = 0;
+ if (mpt3sas_config_update_driver_trigger_pg1(ioc, master_tg,
+ set)) {
+ kfree(master_tg);
+ return -EFAULT;
+ }
+ kfree(master_tg);
+ }
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
memset(&ioc->diag_trigger_master, 0,
sizeof(struct SL_WH_MASTER_TRIGGER_T));
memcpy(&ioc->diag_trigger_master, buf, rc);
@@ -3589,11 +3609,31 @@ diag_trigger_event_store(struct device *cdev,
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct SL_WH_EVENT_TRIGGERS_T *event_tg;
unsigned long flags;
ssize_t sz;
+ bool set = 1;
- spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count);
+ if (ioc->supports_trigger_pages) {
+ event_tg = kzalloc(sizeof(struct SL_WH_EVENT_TRIGGERS_T),
+ GFP_KERNEL);
+ if (!event_tg)
+ return -ENOMEM;
+
+ memcpy(event_tg, buf, sz);
+ if (!event_tg->ValidEntries)
+ set = 0;
+ if (mpt3sas_config_update_driver_trigger_pg2(ioc, event_tg,
+ set)) {
+ kfree(event_tg);
+ return -EFAULT;
+ }
+ kfree(event_tg);
+ }
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+
memset(&ioc->diag_trigger_event, 0,
sizeof(struct SL_WH_EVENT_TRIGGERS_T));
memcpy(&ioc->diag_trigger_event, buf, sz);
@@ -3644,11 +3684,31 @@ diag_trigger_scsi_store(struct device *cdev,
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct SL_WH_SCSI_TRIGGERS_T *scsi_tg;
unsigned long flags;
ssize_t sz;
+ bool set = 1;
+
+ sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count);
+ if (ioc->supports_trigger_pages) {
+ scsi_tg = kzalloc(sizeof(struct SL_WH_SCSI_TRIGGERS_T),
+ GFP_KERNEL);
+ if (!scsi_tg)
+ return -ENOMEM;
+
+ memcpy(scsi_tg, buf, sz);
+ if (!scsi_tg->ValidEntries)
+ set = 0;
+ if (mpt3sas_config_update_driver_trigger_pg3(ioc, scsi_tg,
+ set)) {
+ kfree(scsi_tg);
+ return -EFAULT;
+ }
+ kfree(scsi_tg);
+ }
spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
- sz = min(sizeof(ioc->diag_trigger_scsi), count);
+
memset(&ioc->diag_trigger_scsi, 0, sizeof(ioc->diag_trigger_scsi));
memcpy(&ioc->diag_trigger_scsi, buf, sz);
if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES)
@@ -3698,11 +3758,30 @@ diag_trigger_mpi_store(struct device *cdev,
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct SL_WH_MPI_TRIGGERS_T *mpi_tg;
unsigned long flags;
ssize_t sz;
+ bool set = 1;
- spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count);
+ if (ioc->supports_trigger_pages) {
+ mpi_tg = kzalloc(sizeof(struct SL_WH_MPI_TRIGGERS_T),
+ GFP_KERNEL);
+ if (!mpi_tg)
+ return -ENOMEM;
+
+ memcpy(mpi_tg, buf, sz);
+ if (!mpi_tg->ValidEntries)
+ set = 0;
+ if (mpt3sas_config_update_driver_trigger_pg4(ioc, mpi_tg,
+ set)) {
+ kfree(mpi_tg);
+ return -EFAULT;
+ }
+ kfree(mpi_tg);
+ }
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
memset(&ioc->diag_trigger_mpi, 0,
sizeof(ioc->diag_trigger_mpi));
memcpy(&ioc->diag_trigger_mpi, buf, sz);
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 31d1ea5a5dd2..1e52bc7febfa 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -67,8 +67,10 @@ static struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
while (sha->sas_port[i]) {
if (sha->sas_port[i] == dev->port) {
+ spin_lock(&sha->sas_port[i]->phy_list_lock);
phy = container_of(sha->sas_port[i]->phy_list.next,
struct asd_sas_phy, port_phy_el);
+ spin_unlock(&sha->sas_port[i]->phy_list_lock);
j = 0;
while (sha->sas_phy[j]) {
if (sha->sas_phy[j] == phy)
@@ -96,6 +98,8 @@ static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
while (sha->sas_port[i]) {
if (sha->sas_port[i] == dev->port) {
struct asd_sas_phy *phy;
+
+ spin_lock(&sha->sas_port[i]->phy_list_lock);
list_for_each_entry(phy,
&sha->sas_port[i]->phy_list, port_phy_el) {
j = 0;
@@ -109,6 +113,7 @@ static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
num++;
n++;
}
+ spin_unlock(&sha->sas_port[i]->phy_list_lock);
break;
}
i++;
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index 2a4506a5083e..71585528e8db 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1674,7 +1674,7 @@ static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
if (sdev->id > MYRB_MAX_TARGETS)
return -ENXIO;
- pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
+ pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL);
if (!pdev_info)
return -ENOMEM;
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 6ea323e9a2e3..7eb8c39da366 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -538,13 +538,11 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
cs->fwstat_buf = NULL;
goto out_free;
}
- cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info),
- GFP_KERNEL | GFP_DMA);
+ cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info), GFP_KERNEL);
if (!cs->ctlr_info)
goto out_free;
- cs->event_buf = kzalloc(sizeof(struct myrs_event),
- GFP_KERNEL | GFP_DMA);
+ cs->event_buf = kzalloc(sizeof(struct myrs_event), GFP_KERNEL);
if (!cs->event_buf)
goto out_free;
@@ -1805,7 +1803,7 @@ static int myrs_slave_alloc(struct scsi_device *sdev)
ldev_num = myrs_translate_ldev(cs, sdev);
- ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL|GFP_DMA);
+ ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
if (!ldev_info)
return -ENOMEM;
@@ -1867,7 +1865,7 @@ static int myrs_slave_alloc(struct scsi_device *sdev)
} else {
struct myrs_pdev_info *pdev_info;
- pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
+ pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL);
if (!pdev_info)
return -ENOMEM;
@@ -2269,7 +2267,8 @@ static void myrs_cleanup(struct myrs_hba *cs)
myrs_unmap(cs);
if (cs->mmio_base) {
- cs->disable_intr(cs);
+ if (cs->disable_intr)
+ cs->disable_intr(cs);
iounmap(cs->mmio_base);
cs->mmio_base = NULL;
}
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 8b9e889bc306..92c818a8a84a 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1557,6 +1557,9 @@ static int nsp_cs_config_check(struct pcmcia_device *p_dev, void *priv_data)
data->MmioAddress = (unsigned long)
ioremap(p_dev->resource[2]->start,
resource_size(p_dev->resource[2]));
+ if (!data->MmioAddress)
+ goto next_entry;
+
data->MmioLength = resource_size(p_dev->resource[2]);
}
/* If we got this far, we're cool! */
diff --git a/drivers/scsi/pm8001/Makefile b/drivers/scsi/pm8001/Makefile
index 02b7338999cc..bbb51b7312f1 100644
--- a/drivers/scsi/pm8001/Makefile
+++ b/drivers/scsi/pm8001/Makefile
@@ -6,9 +6,12 @@
obj-$(CONFIG_SCSI_PM8001) += pm80xx.o
+
+CFLAGS_pm80xx_tracepoints.o := -I$(src)
+
pm80xx-y += pm8001_init.o \
pm8001_sas.o \
pm8001_ctl.o \
pm8001_hwi.o \
- pm80xx_hwi.o
-
+ pm80xx_hwi.o \
+ pm80xx_tracepoints.o
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 397eb9f6a1dd..41a63c9b719b 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -889,14 +889,6 @@ static ssize_t pm8001_show_update_fw(struct device *cdev,
static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP,
pm8001_show_update_fw, pm8001_store_update_fw);
-/**
- * ctl_mpi_state_show - controller MPI state check
- * @cdev: pointer to embedded class device
- * @buf: the buffer returned
- *
- * A sysfs 'read-only' shost attribute.
- */
-
static const char *const mpiStateText[] = {
"MPI is not initialized",
"MPI is successfully initialized",
@@ -904,6 +896,14 @@ static const char *const mpiStateText[] = {
"MPI initialization failed with error in [31:16]"
};
+/**
+ * ctl_mpi_state_show - controller MPI state check
+ * @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
static ssize_t ctl_mpi_state_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@@ -920,11 +920,11 @@ static DEVICE_ATTR_RO(ctl_mpi_state);
/**
* ctl_hmi_error_show - controller MPI initialization fails
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
-
static ssize_t ctl_hmi_error_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@@ -941,11 +941,11 @@ static DEVICE_ATTR_RO(ctl_hmi_error);
/**
* ctl_raae_count_show - controller raae count check
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
-
static ssize_t ctl_raae_count_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@@ -962,11 +962,11 @@ static DEVICE_ATTR_RO(ctl_raae_count);
/**
* ctl_iop0_count_show - controller iop0 count check
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
-
static ssize_t ctl_iop0_count_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@@ -983,11 +983,11 @@ static DEVICE_ATTR_RO(ctl_iop0_count);
/**
* ctl_iop1_count_show - controller iop1 count check
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
-
static ssize_t ctl_iop1_count_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 124cb69740c6..c814e5071712 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -42,6 +42,7 @@
#include "pm8001_hwi.h"
#include "pm8001_chips.h"
#include "pm8001_ctl.h"
+ #include "pm80xx_tracepoints.h"
/**
* read_main_config_table - read the configure table and save it.
@@ -1324,8 +1325,14 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
unsigned long flags;
int q_index = circularQ - pm8001_ha->inbnd_q_tbl;
int rv;
+ u32 htag = le32_to_cpu(*(__le32 *)payload);
+
+ trace_pm80xx_mpi_build_cmd(pm8001_ha->id, opCode, htag, q_index,
+ circularQ->producer_idx, le32_to_cpu(circularQ->consumer_index));
+
+ if (WARN_ON(q_index >= pm8001_ha->max_q_num))
+ return -EINVAL;
- WARN_ON(q_index >= PM8001_MAX_INB_NUM);
spin_lock_irqsave(&circularQ->iq_lock, flags);
rv = pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size,
&pMessage);
@@ -2304,21 +2311,17 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
psataPayload = (struct sata_completion_resp *)(piomb + 4);
status = le32_to_cpu(psataPayload->status);
+ param = le32_to_cpu(psataPayload->param);
tag = le32_to_cpu(psataPayload->tag);
if (!tag) {
pm8001_dbg(pm8001_ha, FAIL, "tag null\n");
return;
}
+
ccb = &pm8001_ha->ccb_info[tag];
- param = le32_to_cpu(psataPayload->param);
- if (ccb) {
- t = ccb->task;
- pm8001_dev = ccb->device;
- } else {
- pm8001_dbg(pm8001_ha, FAIL, "ccb null\n");
- return;
- }
+ t = ccb->task;
+ pm8001_dev = ccb->device;
if (t) {
if (t->dev && (t->dev->lldd_dev))
@@ -2335,10 +2338,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
ts = &t->task_status;
- if (!ts) {
- pm8001_dbg(pm8001_ha, FAIL, "ts null\n");
- return;
- }
if (status)
pm8001_dbg(pm8001_ha, IOERR,
@@ -2695,14 +2694,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 dev_id = le32_to_cpu(psataPayload->device_id);
unsigned long flags;
- ccb = &pm8001_ha->ccb_info[tag];
-
- if (ccb) {
- t = ccb->task;
- pm8001_dev = ccb->device;
- } else {
- pm8001_dbg(pm8001_ha, FAIL, "No CCB !!!. returning\n");
- }
if (event)
pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event);
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index fbfeb0b046dd..d8a2121cb8d9 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -179,7 +179,7 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
}
PM8001_CHIP_DISP->chip_iounmap(pm8001_ha);
flush_workqueue(pm8001_wq);
- kfree(pm8001_ha->tags);
+ bitmap_free(pm8001_ha->tags);
kfree(pm8001_ha);
}
@@ -1192,7 +1192,7 @@ pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost,
can_queue = ccb_count - PM8001_RESERVE_SLOT;
shost->can_queue = can_queue;
- pm8001_ha->tags = kzalloc(ccb_count, GFP_KERNEL);
+ pm8001_ha->tags = bitmap_zalloc(ccb_count, GFP_KERNEL);
if (!pm8001_ha->tags)
goto err_out;
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 83e73009db5c..160ee8b228c9 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -40,6 +40,7 @@
#include <linux/slab.h>
#include "pm8001_sas.h"
+#include "pm80xx_tracepoints.h"
/**
* pm8001_find_tag - from sas task to find out tag that belongs to this task
@@ -527,6 +528,9 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx)
{
+ struct ata_queued_cmd *qc;
+ struct pm8001_device *pm8001_dev;
+
if (!ccb->task)
return;
if (!sas_protocol_ata(task->task_proto))
@@ -549,6 +553,18 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
/* do nothing */
break;
}
+
+ if (sas_protocol_ata(task->task_proto)) {
+ // For SCSI/ATA commands uldd_task points to ata_queued_cmd
+ qc = task->uldd_task;
+ pm8001_dev = ccb->device;
+ trace_pm80xx_request_complete(pm8001_ha->id,
+ pm8001_dev ? pm8001_dev->attached_phy : PM8001_MAX_PHYS,
+ ccb_idx, 0 /* ctlr_opcode not known */,
+ qc ? qc->tf.command : 0, // ata opcode
+ pm8001_dev ? atomic_read(&pm8001_dev->running_req) : -1);
+ }
+
task->lldd_task = NULL;
ccb->task = NULL;
ccb->ccb_tag = 0xFFFFFFFF;
@@ -1183,7 +1199,7 @@ int pm8001_abort_task(struct sas_task *task)
struct pm8001_device *pm8001_dev;
struct pm8001_tmf_task tmf_task;
int rc = TMF_RESP_FUNC_FAILED, ret;
- u32 phy_id;
+ u32 phy_id, port_id;
struct sas_task_slow slow_task;
if (unlikely(!task || !task->lldd_task || !task->dev))
@@ -1230,6 +1246,7 @@ int pm8001_abort_task(struct sas_task *task)
DECLARE_COMPLETION_ONSTACK(completion_reset);
DECLARE_COMPLETION_ONSTACK(completion);
struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
+ port_id = phy->port->port_id;
/* 1. Set Device state as Recovery */
pm8001_dev->setds_completion = &completion;
@@ -1281,6 +1298,10 @@ int pm8001_abort_task(struct sas_task *task)
PORT_RESET_TMO);
if (phy->port_reset_status == PORT_RESET_TMO) {
pm8001_dev_gone_notify(dev);
+ PM8001_CHIP_DISP->hw_event_ack_req(
+ pm8001_ha, 0,
+ 0x07, /*HW_EVENT_PHY_DOWN ack*/
+ port_id, phy_id, 0, 0);
goto out;
}
}
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 83eec16d021d..a17da1cebce1 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -216,6 +216,9 @@ struct pm8001_dispatch {
u32 state);
int (*sas_re_init_req)(struct pm8001_hba_info *pm8001_ha);
int (*fatal_errors)(struct pm8001_hba_info *pm8001_ha);
+ void (*hw_event_ack_req)(struct pm8001_hba_info *pm8001_ha,
+ u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0,
+ u32 param1);
};
struct pm8001_chip_info {
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 2101fc5761c3..2530d1365556 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -42,6 +42,7 @@
#include "pm80xx_hwi.h"
#include "pm8001_chips.h"
#include "pm8001_ctl.h"
+#include "pm80xx_tracepoints.h"
#define SMP_DIRECT 1
#define SMP_INDIRECT 2
@@ -2400,21 +2401,17 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
psataPayload = (struct sata_completion_resp *)(piomb + 4);
status = le32_to_cpu(psataPayload->status);
+ param = le32_to_cpu(psataPayload->param);
tag = le32_to_cpu(psataPayload->tag);
if (!tag) {
pm8001_dbg(pm8001_ha, FAIL, "tag null\n");
return;
}
+
ccb = &pm8001_ha->ccb_info[tag];
- param = le32_to_cpu(psataPayload->param);
- if (ccb) {
- t = ccb->task;
- pm8001_dev = ccb->device;
- } else {
- pm8001_dbg(pm8001_ha, FAIL, "ccb null\n");
- return;
- }
+ t = ccb->task;
+ pm8001_dev = ccb->device;
if (t) {
if (t->dev && (t->dev->lldd_dev))
@@ -2431,10 +2428,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
}
ts = &t->task_status;
- if (!ts) {
- pm8001_dbg(pm8001_ha, FAIL, "ts null\n");
- return;
- }
if (status != IO_SUCCESS) {
pm8001_dbg(pm8001_ha, FAIL,
@@ -2830,15 +2823,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
u32 dev_id = le32_to_cpu(psataPayload->device_id);
unsigned long flags;
- ccb = &pm8001_ha->ccb_info[tag];
-
- if (ccb) {
- t = ccb->task;
- pm8001_dev = ccb->device;
- } else {
- pm8001_dbg(pm8001_ha, FAIL, "No CCB !!!. returning\n");
- return;
- }
if (event)
pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event);
@@ -2852,6 +2836,10 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
return;
}
+ ccb = &pm8001_ha->ccb_info[tag];
+ t = ccb->task;
+ pm8001_dev = ccb->device;
+
if (unlikely(!t || !t->lldd_task || !t->dev)) {
pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n");
return;
@@ -3522,7 +3510,7 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 status =
le32_to_cpu(pPayload->status);
u32 phy_id =
- le32_to_cpu(pPayload->phyid);
+ le32_to_cpu(pPayload->phyid) & 0xFF;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
pm8001_dbg(pm8001_ha, INIT,
@@ -3724,8 +3712,10 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
case HW_EVENT_PORT_RESET_TIMER_TMO:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n");
- pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
- port_id, phy_id, 0, 0);
+ if (!pm8001_ha->phy[phy_id].reset_completion) {
+ pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+ port_id, phy_id, 0, 0);
+ }
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
@@ -4161,10 +4151,22 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
u32 ret = MPI_IO_STATUS_FAIL;
u32 regval;
+ /*
+ * Fatal errors are programmed to be signalled in irq vector
+ * pm8001_ha->max_q_num - 1 through pm8001_ha->main_cfg_tbl.pm80xx_tbl.
+ * fatal_err_interrupt
+ */
if (vec == (pm8001_ha->max_q_num - 1)) {
+ u32 mipsall_ready;
+
+ if (pm8001_ha->chip_id == chip_8008 ||
+ pm8001_ha->chip_id == chip_8009)
+ mipsall_ready = SCRATCH_PAD_MIPSALL_READY_8PORT;
+ else
+ mipsall_ready = SCRATCH_PAD_MIPSALL_READY_16PORT;
+
regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
- if ((regval & SCRATCH_PAD_MIPSALL_READY) !=
- SCRATCH_PAD_MIPSALL_READY) {
+ if ((regval & mipsall_ready) != mipsall_ready) {
pm8001_ha->controller_fatal_error = true;
pm8001_dbg(pm8001_ha, FAIL,
"Firmware Fatal error! Regval:0x%x\n",
@@ -4547,6 +4549,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
struct sas_task *task = ccb->task;
struct domain_device *dev = task->dev;
struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
+ struct ata_queued_cmd *qc = task->uldd_task;
u32 tag = ccb->ccb_tag;
int ret;
u32 q_index, cpu_id;
@@ -4766,6 +4769,11 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
}
}
}
+ trace_pm80xx_request_issue(pm8001_ha->id,
+ ccb->device ? ccb->device->attached_phy : PM8001_MAX_PHYS,
+ ccb->ccb_tag, opc,
+ qc ? qc->tf.command : 0, // ata opcode
+ ccb->device ? atomic_read(&ccb->device->running_req) : 0);
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
&sata_cmd, sizeof(sata_cmd), q_index);
return ret;
@@ -5061,4 +5069,5 @@ const struct pm8001_dispatch pm8001_80xx_dispatch = {
.fw_flash_update_req = pm8001_chip_fw_flash_update_req,
.set_dev_state_req = pm8001_chip_set_dev_state_req,
.fatal_errors = pm80xx_fatal_errors,
+ .hw_event_ack_req = pm80xx_hw_event_ack_req,
};
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index c7e5d93bea92..c41ed039c92a 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -1405,8 +1405,12 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0
#define SCRATCH_PAD_IOP0_READY 0xC00
#define SCRATCH_PAD_IOP1_READY 0x3000
-#define SCRATCH_PAD_MIPSALL_READY (SCRATCH_PAD_IOP1_READY | \
+#define SCRATCH_PAD_MIPSALL_READY_16PORT (SCRATCH_PAD_IOP1_READY | \
SCRATCH_PAD_IOP0_READY | \
+ SCRATCH_PAD_ILA_READY | \
+ SCRATCH_PAD_RAAE_READY)
+#define SCRATCH_PAD_MIPSALL_READY_8PORT (SCRATCH_PAD_IOP0_READY | \
+ SCRATCH_PAD_ILA_READY | \
SCRATCH_PAD_RAAE_READY)
/* boot loader state */
diff --git a/drivers/scsi/pm8001/pm80xx_tracepoints.c b/drivers/scsi/pm8001/pm80xx_tracepoints.c
new file mode 100644
index 000000000000..344aface9cdb
--- /dev/null
+++ b/drivers/scsi/pm8001/pm80xx_tracepoints.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Trace events in pm8001 driver.
+ *
+ * Copyright 2020 Google LLC
+ * Author: Akshat Jain <akshatzen@google.com>
+ */
+
+#define CREATE_TRACE_POINTS
+#include "pm80xx_tracepoints.h"
diff --git a/drivers/scsi/pm8001/pm80xx_tracepoints.h b/drivers/scsi/pm8001/pm80xx_tracepoints.h
new file mode 100644
index 000000000000..5e669a8a9344
--- /dev/null
+++ b/drivers/scsi/pm8001/pm80xx_tracepoints.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Trace events in pm8001 driver.
+ *
+ * Copyright 2020 Google LLC
+ * Author: Akshat Jain <akshatzen@google.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pm80xx
+
+#if !defined(_TRACE_PM80XX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PM80XX_H
+
+#include <linux/tracepoint.h>
+#include "pm8001_sas.h"
+
+TRACE_EVENT(pm80xx_request_issue,
+ TP_PROTO(u32 id, u32 phy_id, u32 htag, u32 ctlr_opcode,
+ u16 ata_opcode, int running_req),
+
+ TP_ARGS(id, phy_id, htag, ctlr_opcode, ata_opcode, running_req),
+
+ TP_STRUCT__entry(
+ __field(u32, id)
+ __field(u32, phy_id)
+ __field(u32, htag)
+ __field(u32, ctlr_opcode)
+ __field(u16, ata_opcode)
+ __field(int, running_req)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->phy_id = phy_id;
+ __entry->htag = htag;
+ __entry->ctlr_opcode = ctlr_opcode;
+ __entry->ata_opcode = ata_opcode;
+ __entry->running_req = running_req;
+ ),
+
+ TP_printk("ctlr_id = %u phy_id = %u htag = %#x, ctlr_opcode = %#x ata_opcode = %#x running_req = %d",
+ __entry->id, __entry->phy_id, __entry->htag,
+ __entry->ctlr_opcode, __entry->ata_opcode,
+ __entry->running_req)
+);
+
+TRACE_EVENT(pm80xx_request_complete,
+ TP_PROTO(u32 id, u32 phy_id, u32 htag, u32 ctlr_opcode,
+ u16 ata_opcode, int running_req),
+
+ TP_ARGS(id, phy_id, htag, ctlr_opcode, ata_opcode, running_req),
+
+ TP_STRUCT__entry(
+ __field(u32, id)
+ __field(u32, phy_id)
+ __field(u32, htag)
+ __field(u32, ctlr_opcode)
+ __field(u16, ata_opcode)
+ __field(int, running_req)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->phy_id = phy_id;
+ __entry->htag = htag;
+ __entry->ctlr_opcode = ctlr_opcode;
+ __entry->ata_opcode = ata_opcode;
+ __entry->running_req = running_req;
+ ),
+
+ TP_printk("ctlr_id = %u phy_id = %u htag = %#x, ctlr_opcode = %#x ata_opcode = %#x running_req = %d",
+ __entry->id, __entry->phy_id, __entry->htag,
+ __entry->ctlr_opcode, __entry->ata_opcode,
+ __entry->running_req)
+);
+
+TRACE_EVENT(pm80xx_mpi_build_cmd,
+ TP_PROTO(u32 id, u32 opc, u32 htag, u32 qi, u32 pi, u32 ci),
+
+ TP_ARGS(id, opc, htag, qi, pi, ci),
+
+ TP_STRUCT__entry(
+ __field(u32, id)
+ __field(u32, opc)
+ __field(u32, htag)
+ __field(u32, qi)
+ __field(u32, pi)
+ __field(u32, ci)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->opc = opc;
+ __entry->htag = htag;
+ __entry->qi = qi;
+ __entry->pi = pi;
+ __entry->ci = ci;
+ ),
+
+ TP_printk("ctlr_id = %u opc = %#x htag = %#x QI = %u PI = %u CI = %u",
+ __entry->id, __entry->opc, __entry->htag, __entry->qi,
+ __entry->pi, __entry->ci)
+);
+
+#endif /* _TRACE_PM80XX_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE pm80xx_tracepoints
+
+#include <trace/define_trace.h>
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 88046a793767..928532180d32 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3221,8 +3221,8 @@ static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
return NULL;
sglist->order = order;
- sgl_alloc_order(buflen, order, false,
- GFP_KERNEL | GFP_DMA | __GFP_ZERO, &sglist->num_sg);
+ sgl_alloc_order(buflen, order, false, GFP_KERNEL | __GFP_ZERO,
+ &sglist->num_sg);
return sglist;
}
@@ -3302,7 +3302,6 @@ static int pmcraid_copy_sglist(
/**
* pmcraid_queuecommand_lck - Queue a mid-layer request
* @scsi_cmd: scsi command struct
- * @done: done function
*
* This function queues a request generated by the mid-layer. Midlayer calls
* this routine within host->lock. Some of the functions called by queuecommand
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 99a56ca1fb16..fab43dabe5b3 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -2250,6 +2250,7 @@ process_els:
io_req->tm_flags == FCP_TMF_TGT_RESET) {
clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
io_req->sc_cmd = NULL;
+ kref_put(&io_req->refcount, qedf_release_cmd);
complete(&io_req->tm_done);
}
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 1bf7a22d4948..6ad28bc8e948 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -911,7 +911,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
struct qed_link_output if_link;
if (lport->vport) {
- QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n");
+ printk_ratelimited("Cannot issue host reset on NPIV port.\n");
return;
}
@@ -1415,6 +1415,8 @@ static void qedf_upload_connection(struct qedf_ctx *qedf,
*/
term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE,
&term_params_dma, GFP_KERNEL);
+ if (!term_params)
+ return;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection "
"port_id=%06x.\n", fcport->rdata->ids.port_id);
@@ -1862,6 +1864,7 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
vport_qedf->cmd_mgr = base_qedf->cmd_mgr;
init_completion(&vport_qedf->flogi_compl);
INIT_LIST_HEAD(&vport_qedf->fcports);
+ INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work);
rc = qedf_vport_libfc_config(vport, vn_port);
if (rc) {
@@ -3978,7 +3981,9 @@ void qedf_stag_change_work(struct work_struct *work)
struct qedf_ctx *qedf =
container_of(work, struct qedf_ctx, stag_work.work);
- QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n");
+ printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
+ dev_name(&qedf->pdev->dev), __func__, __LINE__,
+ qedf->dbg_ctx.host_no);
qedf_ctx_soft_reset(qedf->lport);
}
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 1dec814d8788..832a856dd367 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1538,7 +1538,6 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
int i;
struct scsi_bd *pbl;
u64 *list;
- dma_addr_t page;
/* Alloc dma memory for BDQ buffers */
for (i = 0; i < QEDI_BDQ_NUM; i++) {
@@ -1608,11 +1607,9 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size /
QEDI_PAGE_SIZE;
list = (u64 *)qedi->bdq_pbl_list;
- page = qedi->bdq_pbl_list_dma;
for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) {
*list = qedi->bdq_pbl_dma;
list++;
- page += QEDI_PAGE_SIZE;
}
return 0;
@@ -2089,8 +2086,7 @@ static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)
rc = snprintf(buf, ip_len, fmt, gw);
break;
case ISCSI_BOOT_ETH_FLAGS:
- rc = snprintf(buf, 3, "%hhd\n",
- SYSFS_FLAG_FW_SEL_BOOT);
+ rc = snprintf(buf, 3, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_ETH_INDEX:
rc = snprintf(buf, 3, "0\n");
@@ -2257,7 +2253,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
mchap_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
- rc = snprintf(buf, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
+ rc = snprintf(buf, 3, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_TGT_NIC_ASSOC:
rc = snprintf(buf, 3, "0\n");
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 032efb294ee5..db55737000ab 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2700,7 +2700,13 @@ qla2x00_get_starget_port_id(struct scsi_target *starget)
static inline void
qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
{
+ fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+
rport->dev_loss_tmo = timeout ? timeout : 1;
+
+ if (IS_ENABLED(CONFIG_NVME_FC) && fcport && fcport->nvme_remote_port)
+ nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
+ rport->dev_loss_tmo);
}
static void
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 070b636802d0..1fe4966fc2f6 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -5828,13 +5828,6 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla2x00_dfs_create_rport(vha, fcport);
- if (NVME_TARGET(vha->hw, fcport)) {
- qla_nvme_register_remote(vha, fcport);
- qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
- return;
- }
-
qla24xx_update_fcport_fcp_prio(vha, fcport);
switch (vha->host->active_mode) {
@@ -5856,6 +5849,9 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
break;
}
+ if (NVME_TARGET(vha->hw, fcport))
+ qla_nvme_register_remote(vha, fcport);
+
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 138ffdb5c92c..e22ec7cb65db 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -43,7 +43,7 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
req.port_name = wwn_to_u64(fcport->port_name);
req.node_name = wwn_to_u64(fcport->node_name);
req.port_role = 0;
- req.dev_loss_tmo = 0;
+ req.dev_loss_tmo = fcport->dev_loss_tmo;
if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
@@ -70,6 +70,9 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
return ret;
}
+ nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
+ fcport->dev_loss_tmo);
+
if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
ql_log(ql_log_info, vha, 0x212a,
"PortID:%06x Supports SLER\n", req.port_id);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 8987acc24dac..0ae936d839f1 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -5734,7 +5734,7 @@ static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
switch (type) {
case ISCSI_BOOT_ETH_FLAGS:
- rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
+ rc = sprintf(str, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_ETH_INDEX:
rc = sprintf(str, "0\n");
@@ -5843,7 +5843,7 @@ qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
(char *)&boot_conn->chap.intr_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
- rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
+ rc = sprintf(str, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_TGT_NIC_ASSOC:
rc = sprintf(str, "0\n");
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index f6af1562cba4..211aace69c22 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -55,7 +55,6 @@
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/mutex.h>
-#include <linux/async.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
@@ -201,11 +200,11 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
/*
- * 1024 is big enough for saturating the fast scsi LUN now
+ * 1024 is big enough for saturating fast SCSI LUNs.
*/
int scsi_device_max_queue_depth(struct scsi_device *sdev)
{
- return max_t(int, sdev->host->can_queue, 1024);
+ return min_t(int, sdev->host->can_queue, 1024);
}
/**
diff --git a/drivers/scsi/scsi_bsg.c b/drivers/scsi/scsi_bsg.c
index 081b84bb7985..b7a464383cc0 100644
--- a/drivers/scsi/scsi_bsg.c
+++ b/drivers/scsi/scsi_bsg.c
@@ -60,7 +60,7 @@ static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
goto out_free_cmd;
bio = rq->bio;
- blk_execute_rq(NULL, rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
+ blk_execute_rq(rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
/*
* fill in all the output members
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index d9109771f274..db8517f1a485 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -9,6 +9,7 @@
static const char *const scsi_cmd_flags[] = {
SCSI_CMD_FLAG_NAME(TAGGED),
SCSI_CMD_FLAG_NAME(INITIALIZED),
+ SCSI_CMD_FLAG_NAME(LAST),
};
#undef SCSI_CMD_FLAG_NAME
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 2371edbc3af4..60a6ae9d1219 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -133,23 +133,6 @@ static bool scsi_eh_should_retry_cmd(struct scsi_cmnd *cmd)
return true;
}
-static void scsi_eh_complete_abort(struct scsi_cmnd *scmd, struct Scsi_Host *shost)
-{
- unsigned long flags;
-
- spin_lock_irqsave(shost->host_lock, flags);
- list_del_init(&scmd->eh_entry);
- /*
- * If the abort succeeds, and there is no further
- * EH action, clear the ->last_reset time.
- */
- if (list_empty(&shost->eh_abort_list) &&
- list_empty(&shost->eh_cmd_q))
- if (shost->eh_deadline != -1)
- shost->last_reset = 0;
- spin_unlock_irqrestore(shost->host_lock, flags);
-}
-
/**
* scmd_eh_abort_handler - Handle command aborts
* @work: command to be aborted.
@@ -166,54 +149,72 @@ scmd_eh_abort_handler(struct work_struct *work)
struct scsi_cmnd *scmd =
container_of(work, struct scsi_cmnd, abort_work.work);
struct scsi_device *sdev = scmd->device;
+ struct Scsi_Host *shost = sdev->host;
enum scsi_disposition rtn;
unsigned long flags;
- if (scsi_host_eh_past_deadline(sdev->host)) {
+ if (scsi_host_eh_past_deadline(shost)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"eh timeout, not aborting\n"));
- } else {
- SCSI_LOG_ERROR_RECOVERY(3,
+ goto out;
+ }
+
+ SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"aborting command\n"));
- rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd);
- if (rtn == SUCCESS) {
- set_host_byte(scmd, DID_TIME_OUT);
- if (scsi_host_eh_past_deadline(sdev->host)) {
- SCSI_LOG_ERROR_RECOVERY(3,
- scmd_printk(KERN_INFO, scmd,
- "eh timeout, not retrying "
- "aborted command\n"));
- } else if (!scsi_noretry_cmd(scmd) &&
- scsi_cmd_retry_allowed(scmd) &&
- scsi_eh_should_retry_cmd(scmd)) {
- SCSI_LOG_ERROR_RECOVERY(3,
- scmd_printk(KERN_WARNING, scmd,
- "retry aborted command\n"));
- scsi_eh_complete_abort(scmd, sdev->host);
- scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
- return;
- } else {
- SCSI_LOG_ERROR_RECOVERY(3,
- scmd_printk(KERN_WARNING, scmd,
- "finish aborted command\n"));
- scsi_eh_complete_abort(scmd, sdev->host);
- scsi_finish_command(scmd);
- return;
- }
- } else {
- SCSI_LOG_ERROR_RECOVERY(3,
- scmd_printk(KERN_INFO, scmd,
- "cmd abort %s\n",
- (rtn == FAST_IO_FAIL) ?
- "not send" : "failed"));
- }
+ rtn = scsi_try_to_abort_cmd(shost->hostt, scmd);
+ if (rtn != SUCCESS) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "cmd abort %s\n",
+ (rtn == FAST_IO_FAIL) ?
+ "not send" : "failed"));
+ goto out;
+ }
+ set_host_byte(scmd, DID_TIME_OUT);
+ if (scsi_host_eh_past_deadline(shost)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "eh timeout, not retrying "
+ "aborted command\n"));
+ goto out;
}
- spin_lock_irqsave(sdev->host->host_lock, flags);
+ spin_lock_irqsave(shost->host_lock, flags);
list_del_init(&scmd->eh_entry);
- spin_unlock_irqrestore(sdev->host->host_lock, flags);
+
+ /*
+ * If the abort succeeds, and there is no further
+ * EH action, clear the ->last_reset time.
+ */
+ if (list_empty(&shost->eh_abort_list) &&
+ list_empty(&shost->eh_cmd_q))
+ if (shost->eh_deadline != -1)
+ shost->last_reset = 0;
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (!scsi_noretry_cmd(scmd) &&
+ scsi_cmd_retry_allowed(scmd) &&
+ scsi_eh_should_retry_cmd(scmd)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_WARNING, scmd,
+ "retry aborted command\n"));
+ scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
+ } else {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_WARNING, scmd,
+ "finish aborted command\n"));
+ scsi_finish_command(scmd);
+ }
+ return;
+
+out:
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_del_init(&scmd->eh_entry);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
scsi_eh_scmd_add(scmd);
}
@@ -1429,7 +1430,8 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
enum scsi_disposition rtn = NEEDS_RETRY;
for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
- rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0);
+ rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
+ scmd->device->eh_timeout, 0);
if (rtn == SUCCESS)
return 0;
@@ -2040,7 +2042,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
req->timeout = 10 * HZ;
rq->retries = 5;
- blk_execute_rq_nowait(NULL, req, 1, eh_lock_door_done);
+ blk_execute_rq_nowait(req, true, eh_lock_door_done);
}
/**
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 400df3354cd6..e13fd380deb6 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -408,8 +408,7 @@ static int scsi_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
return ret;
}
-static int sg_io(struct scsi_device *sdev, struct gendisk *disk,
- struct sg_io_hdr *hdr, fmode_t mode)
+static int sg_io(struct scsi_device *sdev, struct sg_io_hdr *hdr, fmode_t mode)
{
unsigned long start_time;
ssize_t ret = 0;
@@ -483,7 +482,7 @@ static int sg_io(struct scsi_device *sdev, struct gendisk *disk,
start_time = jiffies;
- blk_execute_rq(disk, rq, at_head);
+ blk_execute_rq(rq, at_head);
hdr->duration = jiffies_to_msecs(jiffies - start_time);
@@ -499,19 +498,12 @@ out_put_request:
/**
* sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
* @q: request queue to send scsi commands down
- * @disk: gendisk to operate on (option)
* @mode: mode used to open the file through which the ioctl has been
* submitted
* @sic: userspace structure describing the command to perform
*
* Send down the scsi command described by @sic to the device below
- * the request queue @q. If @file is non-NULL it's used to perform
- * fine-grained permission checks that allow users to send down
- * non-destructive SCSI commands. If the caller has a struct gendisk
- * available it should be passed in as @disk to allow the low level
- * driver to use the information contained in it. A non-NULL @disk
- * is only allowed if the caller knows that the low level driver doesn't
- * need it (e.g. in the scsi subsystem).
+ * the request queue @q.
*
* Notes:
* - This interface is deprecated - users should use the SG_IO
@@ -530,8 +522,8 @@ out_put_request:
* Positive numbers returned are the compacted SCSI error codes (4
* bytes in one int) where the lowest byte is the SCSI status.
*/
-static int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk,
- fmode_t mode, struct scsi_ioctl_command __user *sic)
+static int sg_scsi_ioctl(struct request_queue *q, fmode_t mode,
+ struct scsi_ioctl_command __user *sic)
{
enum { OMAX_SB_LEN = 16 }; /* For backward compatibility */
struct request *rq;
@@ -620,7 +612,7 @@ static int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk,
goto error;
}
- blk_execute_rq(disk, rq, 0);
+ blk_execute_rq(rq, false);
err = req->result & 0xff; /* only 8 bit SCSI status */
if (err) {
@@ -806,8 +798,8 @@ static int scsi_put_cdrom_generic_arg(const struct cdrom_generic_command *cgc,
return 0;
}
-static int scsi_cdrom_send_packet(struct scsi_device *sdev, struct gendisk *disk,
- fmode_t mode, void __user *arg)
+static int scsi_cdrom_send_packet(struct scsi_device *sdev, fmode_t mode,
+ void __user *arg)
{
struct cdrom_generic_command cgc;
struct sg_io_hdr hdr;
@@ -847,7 +839,7 @@ static int scsi_cdrom_send_packet(struct scsi_device *sdev, struct gendisk *disk
hdr.cmdp = ((struct cdrom_generic_command __user *) arg)->cmd;
hdr.cmd_len = sizeof(cgc.cmd);
- err = sg_io(sdev, disk, &hdr, mode);
+ err = sg_io(sdev, &hdr, mode);
if (err == -EFAULT)
return -EFAULT;
@@ -862,8 +854,8 @@ static int scsi_cdrom_send_packet(struct scsi_device *sdev, struct gendisk *disk
return err;
}
-static int scsi_ioctl_sg_io(struct scsi_device *sdev, struct gendisk *disk,
- fmode_t mode, void __user *argp)
+static int scsi_ioctl_sg_io(struct scsi_device *sdev, fmode_t mode,
+ void __user *argp)
{
struct sg_io_hdr hdr;
int error;
@@ -871,7 +863,7 @@ static int scsi_ioctl_sg_io(struct scsi_device *sdev, struct gendisk *disk,
error = get_sg_io_hdr(&hdr, argp);
if (error)
return error;
- error = sg_io(sdev, disk, &hdr, mode);
+ error = sg_io(sdev, &hdr, mode);
if (error == -EFAULT)
return error;
if (put_sg_io_hdr(&hdr, argp))
@@ -882,7 +874,6 @@ static int scsi_ioctl_sg_io(struct scsi_device *sdev, struct gendisk *disk,
/**
* scsi_ioctl - Dispatch ioctl to scsi device
* @sdev: scsi device receiving ioctl
- * @disk: disk receiving the ioctl
* @mode: mode the block/char device is opened with
* @cmd: which ioctl is it
* @arg: data associated with ioctl
@@ -891,8 +882,8 @@ static int scsi_ioctl_sg_io(struct scsi_device *sdev, struct gendisk *disk,
* does not take a major/minor number as the dev field. Rather, it takes
* a pointer to a &struct scsi_device.
*/
-int scsi_ioctl(struct scsi_device *sdev, struct gendisk *disk, fmode_t mode,
- int cmd, void __user *arg)
+int scsi_ioctl(struct scsi_device *sdev, fmode_t mode, int cmd,
+ void __user *arg)
{
struct request_queue *q = sdev->request_queue;
struct scsi_sense_hdr sense_hdr;
@@ -927,11 +918,11 @@ int scsi_ioctl(struct scsi_device *sdev, struct gendisk *disk, fmode_t mode,
case SG_EMULATED_HOST:
return sg_emulated_host(q, arg);
case SG_IO:
- return scsi_ioctl_sg_io(sdev, disk, mode, arg);
+ return scsi_ioctl_sg_io(sdev, mode, arg);
case SCSI_IOCTL_SEND_COMMAND:
- return sg_scsi_ioctl(q, disk, mode, arg);
+ return sg_scsi_ioctl(q, mode, arg);
case CDROM_SEND_PACKET:
- return scsi_cdrom_send_packet(sdev, disk, mode, arg);
+ return scsi_cdrom_send_packet(sdev, mode, arg);
case CDROMCLOSETRAY:
return scsi_send_start_stop(sdev, 3);
case CDROMEJECT:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 621d841d819a..0a70aa763a96 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -241,7 +241,7 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
/*
* head injection *required* here otherwise quiesce won't work
*/
- blk_execute_rq(NULL, req, 1);
+ blk_execute_rq(req, true);
/*
* Some devices (USB mass-storage in particular) may transfer
@@ -543,8 +543,9 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
if (blk_update_request(req, error, bytes))
return true;
+ // XXX:
if (blk_queue_add_random(q))
- add_disk_randomness(req->rq_disk);
+ add_disk_randomness(req->q->disk);
if (!blk_rq_is_passthrough(req)) {
WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
@@ -617,6 +618,46 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
}
}
+/**
+ * scsi_rq_err_bytes - determine number of bytes till the next failure boundary
+ * @rq: request to examine
+ *
+ * Description:
+ * A request could be merge of IOs which require different failure
+ * handling. This function determines the number of bytes which
+ * can be failed from the beginning of the request without
+ * crossing into area which need to be retried further.
+ *
+ * Return:
+ * The number of bytes to fail.
+ */
+static unsigned int scsi_rq_err_bytes(const struct request *rq)
+{
+ unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
+ unsigned int bytes = 0;
+ struct bio *bio;
+
+ if (!(rq->rq_flags & RQF_MIXED_MERGE))
+ return blk_rq_bytes(rq);
+
+ /*
+ * Currently the only 'mixing' which can happen is between
+ * different fastfail types. We can safely fail portions
+ * which have all the failfast bits that the first one has -
+ * the ones which are at least as eager to fail as the first
+ * one.
+ */
+ for (bio = rq->bio; bio; bio = bio->bi_next) {
+ if ((bio->bi_opf & ff) != ff)
+ break;
+ bytes += bio->bi_iter.bi_size;
+ }
+
+ /* this could lead to infinite loop */
+ BUG_ON(blk_rq_bytes(rq) && !bytes);
+ return bytes;
+}
+
/* Helper for scsi_io_completion() when "reprep" action required. */
static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
struct request_queue *q)
@@ -794,7 +835,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
scsi_print_command(cmd);
}
}
- if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req)))
+ if (!scsi_end_request(req, blk_stat, scsi_rq_err_bytes(req)))
return;
fallthrough;
case ACTION_REPREP:
@@ -2026,7 +2067,6 @@ void scsi_exit_queue(void)
* @sdev: SCSI device to be queried
* @pf: Page format bit (1 == standard, 0 == vendor specific)
* @sp: Save page bit (0 == don't save, 1 == save)
- * @modepage: mode page being requested
* @buffer: request buffer (may not be smaller than eight bytes)
* @len: length of request buffer.
* @timeout: command timeout
@@ -2039,10 +2079,9 @@ void scsi_exit_queue(void)
* status on error
*
*/
-int
-scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
- unsigned char *buffer, int len, int timeout, int retries,
- struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
+int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
+ unsigned char *buffer, int len, int timeout, int retries,
+ struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
{
unsigned char cmd[10];
unsigned char *real_buffer;
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index ed9572252a42..1f8f80b2dbfc 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -30,7 +30,9 @@ static inline const char *scmd_name(const struct scsi_cmnd *scmd)
{
struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd);
- return rq->rq_disk ? rq->rq_disk->disk_name : NULL;
+ if (!rq->q->disk)
+ return NULL;
+ return rq->q->disk->disk_name;
}
static size_t sdev_format_header(char *logbuf, size_t logbuf_len,
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index b5a858c29488..d581613d87c7 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -8,7 +8,6 @@
#include <linux/pm_runtime.h>
#include <linux/export.h>
-#include <linux/async.h>
#include <linux/blk-pm.h>
#include <scsi/scsi.h>
@@ -181,7 +180,7 @@ static int sdev_runtime_resume(struct device *dev)
blk_pre_runtime_resume(sdev->request_queue);
if (pm && pm->runtime_resume)
err = pm->runtime_resume(dev);
- blk_post_runtime_resume(sdev->request_queue, err);
+ blk_post_runtime_resume(sdev->request_queue);
return err;
}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index a278fc8948f4..5c4786310a31 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -3,7 +3,6 @@
#define _SCSI_PRIV_H
#include <linux/device.h>
-#include <linux/async.h>
#include <scsi/scsi_device.h>
#include <linux/sbitmap.h>
@@ -144,7 +143,7 @@ extern struct scsi_transport_template blank_transport_template;
extern void __scsi_remove_device(struct scsi_device *);
extern struct bus_type scsi_bus_type;
-extern const struct attribute_group scsi_shost_attr_group;
+extern const struct attribute_group *scsi_shost_groups[];
/* scsi_netlink.c */
#ifdef CONFIG_SCSI_NETLINK
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index d6982d355739..95aee1ad1383 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -49,7 +49,7 @@ static DEFINE_MUTEX(global_host_template_mutex);
static ssize_t proc_scsi_host_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct Scsi_Host *shost = PDE_DATA(file_inode(file));
+ struct Scsi_Host *shost = pde_data(file_inode(file));
ssize_t ret = -ENOMEM;
char *page;
@@ -79,7 +79,7 @@ static int proc_scsi_show(struct seq_file *m, void *v)
static int proc_scsi_host_open(struct inode *inode, struct file *file)
{
- return single_open_size(file, proc_scsi_show, PDE_DATA(inode),
+ return single_open_size(file, proc_scsi_show, pde_data(inode),
4 * PAGE_SIZE);
}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 23e1c0acdeae..3520b9384428 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -97,7 +97,7 @@ MODULE_PARM_DESC(max_luns,
#define SCSI_SCAN_TYPE_DEFAULT "sync"
#endif
-char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
+static char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
S_IRUGO|S_IWUSR);
@@ -122,7 +122,7 @@ struct async_scan_data {
struct completion prev_finished;
};
-/**
+/*
* scsi_enable_async_suspend - Enable async suspend and resume
*/
void scsi_enable_async_suspend(struct device *dev)
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index d4edce930a4a..f1e0c131b77c 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -424,10 +424,15 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
NULL
};
-const struct attribute_group scsi_shost_attr_group = {
+static const struct attribute_group scsi_shost_attr_group = {
.attrs = scsi_sysfs_shost_attrs,
};
+const struct attribute_group *scsi_shost_groups[] = {
+ &scsi_shost_attr_group,
+ NULL
+};
+
static void scsi_device_cls_release(struct device *class_dev)
{
struct scsi_device *sdev;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 65875a598d62..62eb9921cc94 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -51,7 +51,6 @@
#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/string_helpers.h>
-#include <linux/async.h>
#include <linux/slab.h>
#include <linux/sed-opal.h>
#include <linux/pm_runtime.h>
@@ -210,7 +209,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
*/
data.device_specific = 0;
- if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
+ if (scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT,
sdkp->max_retries, &data, &sshdr)) {
if (scsi_sense_valid(&sshdr))
sd_print_sense_hdr(sdkp, &sshdr);
@@ -872,7 +871,7 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdp = cmd->device;
struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
unsigned int data_len = 24;
@@ -908,7 +907,7 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
{
struct scsi_device *sdp = cmd->device;
struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
u32 data_len = sdp->sector_size;
@@ -940,7 +939,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
{
struct scsi_device *sdp = cmd->device;
struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
u32 data_len = sdp->sector_size;
@@ -971,7 +970,7 @@ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_device *sdp = cmd->device;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
@@ -1068,7 +1067,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_device *sdp = cmd->device;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
struct bio *bio = rq->bio;
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
@@ -1116,7 +1115,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
/* flush requests don't perform I/O, zero the S/G table */
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
@@ -1215,7 +1214,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_device *sdp = cmd->device;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
sector_t threshold;
unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
@@ -1236,7 +1235,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
goto fail;
}
- if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) {
+ if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->q->disk)) {
scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
goto fail;
}
@@ -1331,7 +1330,7 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
switch (req_op(rq)) {
case REQ_OP_DISCARD:
- switch (scsi_disk(rq->rq_disk)->provisioning_mode) {
+ switch (scsi_disk(rq->q->disk)->provisioning_mode) {
case SD_LBP_UNMAP:
return sd_setup_unmap_cmnd(cmd);
case SD_LBP_WS16:
@@ -1574,7 +1573,7 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
if (is_sed_ioctl(cmd))
return sed_ioctl(sdkp->opal_dev, cmd, p);
- return scsi_ioctl(sdp, disk, mode, cmd, p);
+ return scsi_ioctl(sdp, mode, cmd, p);
}
static void set_media_not_present(struct scsi_disk *sdkp)
@@ -1917,7 +1916,7 @@ static const struct block_device_operations sd_fops = {
**/
static void sd_eh_reset(struct scsi_cmnd *scmd)
{
- struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
/* New SCSI EH run, reset gate variable */
sdkp->ignore_medium_access_errors = false;
@@ -1937,7 +1936,7 @@ static void sd_eh_reset(struct scsi_cmnd *scmd)
**/
static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
{
- struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
struct scsi_device *sdev = scmd->device;
if (!scsi_device_online(sdev) ||
@@ -2034,7 +2033,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
unsigned int resid;
struct scsi_sense_hdr sshdr;
struct request *req = scsi_cmd_to_rq(SCpnt);
- struct scsi_disk *sdkp = scsi_disk(req->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(req->q->disk);
int sense_valid = 0;
int sense_deferred = 0;
@@ -3566,7 +3565,6 @@ static int sd_probe(struct device *dev)
sd_revalidate_disk(gd);
- gd->flags = GENHD_FL_EXT_DEVT;
if (sdp->removable) {
gd->flags |= GENHD_FL_REMOVABLE;
gd->events |= DISK_EVENT_MEDIA_CHANGE;
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index ed06798983f8..378d071e47cb 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -61,10 +61,10 @@ static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
zone.len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
zone.capacity = zone.len;
zone.start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
- zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
- if (zone.type != ZBC_ZONE_TYPE_CONV &&
- zone.cond == ZBC_ZONE_COND_FULL)
+ if (zone.cond == ZBC_ZONE_COND_FULL)
zone.wp = zone.start + zone.len;
+ else
+ zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
ret = cb(&zone, idx, data);
if (ret)
@@ -244,7 +244,7 @@ out:
static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd)
{
struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
sector_t sector = blk_rq_pos(rq);
if (!sd_is_zoned(sdkp))
@@ -322,7 +322,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
unsigned int nr_blocks)
{
struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
unsigned int wp_offset, zno = blk_rq_zone_no(rq);
unsigned long flags;
blk_status_t ret;
@@ -388,7 +388,7 @@ blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
{
struct request *rq = scsi_cmd_to_rq(cmd);
sector_t sector = blk_rq_pos(rq);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
sector_t block = sectors_to_logical(sdkp->device, sector);
blk_status_t ret;
@@ -443,7 +443,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
{
int result = cmd->result;
struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
unsigned int zno = blk_rq_zone_no(rq);
enum req_opf op = req_op(rq);
unsigned long flags;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 141099ab9092..6b43e97bd417 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -77,7 +77,7 @@ static int sg_proc_init(void);
#define SG_DEFAULT_TIMEOUT mult_frac(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
-int sg_big_buff = SG_DEF_RESERVED_SIZE;
+static int sg_big_buff = SG_DEF_RESERVED_SIZE;
/* N.B. This variable is readable and writeable via
/proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
of this size (or less if there is not enough memory) will be reserved
@@ -833,7 +833,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
srp->rq->timeout = timeout;
kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
- blk_execute_rq_nowait(NULL, srp->rq, at_head, sg_rq_end_io);
+ blk_execute_rq_nowait(srp->rq, at_head, sg_rq_end_io);
return 0;
}
@@ -1109,7 +1109,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
case SCSI_IOCTL_SEND_COMMAND:
if (atomic_read(&sdp->detaching))
return -ENODEV;
- return scsi_ioctl(sdp->device, NULL, filp->f_mode, cmd_in, p);
+ return scsi_ioctl(sdp->device, filp->f_mode, cmd_in, p);
case SG_SET_DEBUG:
result = get_user(val, ip);
if (result)
@@ -1165,7 +1165,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p);
if (ret != -ENOIOCTLCMD)
return ret;
- return scsi_ioctl(sdp->device, NULL, filp->f_mode, cmd_in, p);
+ return scsi_ioctl(sdp->device, filp->f_mode, cmd_in, p);
}
static __poll_t
@@ -1634,6 +1634,37 @@ MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
+#ifdef CONFIG_SYSCTL
+#include <linux/sysctl.h>
+
+static struct ctl_table sg_sysctls[] = {
+ {
+ .procname = "sg-big-buff",
+ .data = &sg_big_buff,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
+ },
+ {}
+};
+
+static struct ctl_table_header *hdr;
+static void register_sg_sysctls(void)
+{
+ if (!hdr)
+ hdr = register_sysctl("kernel", sg_sysctls);
+}
+
+static void unregister_sg_sysctls(void)
+{
+ if (hdr)
+ unregister_sysctl_table(hdr);
+}
+#else
+#define register_sg_sysctls() do { } while (0)
+#define unregister_sg_sysctls() do { } while (0)
+#endif /* CONFIG_SYSCTL */
+
static int __init
init_sg(void)
{
@@ -1666,6 +1697,7 @@ init_sg(void)
return 0;
}
class_destroy(sg_sysfs_class);
+ register_sg_sysctls();
err_out:
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
return rc;
@@ -1674,6 +1706,7 @@ err_out:
static void __exit
exit_sg(void)
{
+ unregister_sg_sysctls();
#ifdef CONFIG_SCSI_PROC_FS
remove_proc_subtree("scsi/sg", NULL);
#endif /* CONFIG_SCSI_PROC_FS */
diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
index e9ccfb97773f..27e98df83b31 100644
--- a/drivers/scsi/snic/snic_disc.c
+++ b/drivers/scsi/snic/snic_disc.c
@@ -100,7 +100,7 @@ snic_queue_report_tgt_req(struct snic *snic)
SNIC_BUG_ON(ntgts == 0);
buf_len = ntgts * sizeof(struct snic_tgt_id) + SNIC_SG_DESC_ALIGN;
- buf = kzalloc(buf_len, GFP_KERNEL|GFP_DMA);
+ buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf) {
snic_req_free(snic, rqi);
SNIC_HOST_ERR(snic->shost, "Resp Buf Alloc Failed.\n");
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 8e4af111c078..f925b1f1f9ad 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -335,7 +335,7 @@ static int sr_done(struct scsi_cmnd *SCpnt)
int block_sectors = 0;
long error_sector;
struct request *rq = scsi_cmd_to_rq(SCpnt);
- struct scsi_cd *cd = scsi_cd(rq->rq_disk);
+ struct scsi_cd *cd = scsi_cd(rq->q->disk);
#ifdef DEBUG
scmd_printk(KERN_INFO, SCpnt, "done: %x\n", result);
@@ -402,7 +402,7 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
ret = scsi_alloc_sgtables(SCpnt);
if (ret != BLK_STS_OK)
return ret;
- cd = scsi_cd(rq->rq_disk);
+ cd = scsi_cd(rq->q->disk);
SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
"Doing sr request, block = %d\n", block));
@@ -561,8 +561,7 @@ static void sr_block_release(struct gendisk *disk, fmode_t mode)
static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
unsigned long arg)
{
- struct gendisk *disk = bdev->bd_disk;
- struct scsi_cd *cd = scsi_cd(disk);
+ struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
struct scsi_device *sdev = cd->device;
void __user *argp = (void __user *)arg;
int ret;
@@ -584,7 +583,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
if (ret != -ENOSYS)
goto put;
}
- ret = scsi_ioctl(sdev, disk, mode, cmd, argp);
+ ret = scsi_ioctl(sdev, mode, cmd, argp);
put:
scsi_autopm_put_device(sdev);
@@ -684,9 +683,10 @@ static int sr_probe(struct device *dev)
disk->minors = 1;
sprintf(disk->disk_name, "sr%d", minor);
disk->fops = &sr_bdops;
- disk->flags = GENHD_FL_CD | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ disk->flags |= GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST;
- disk->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
+ disk->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT |
+ DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE;
blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
@@ -725,7 +725,6 @@ static int sr_probe(struct device *dev)
blk_pm_runtime_init(sdev->request_queue, dev);
dev_set_drvdata(dev, cd);
- disk->flags |= GENHD_FL_REMOVABLE;
sr_revalidate_disk(cd);
error = device_add_disk(&sdev->sdev_gendev, disk, NULL);
@@ -856,7 +855,7 @@ static void get_capabilities(struct scsi_cd *cd)
/* allocate transfer buffer */
- buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kmalloc(512, GFP_KERNEL);
if (!buffer) {
sr_printk(KERN_ERR, cd, "out of memory.\n");
return;
@@ -994,7 +993,7 @@ static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf,
rq->timeout = 60 * HZ;
bio = rq->bio;
- blk_execute_rq(disk, rq, 0);
+ blk_execute_rq(rq, false);
if (scsi_req(rq)->result) {
struct scsi_sense_hdr sshdr;
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c
index 1f988a1b9166..a61635326ae0 100644
--- a/drivers/scsi/sr_vendor.c
+++ b/drivers/scsi/sr_vendor.c
@@ -131,7 +131,7 @@ int sr_set_blocklength(Scsi_CD *cd, int blocklength)
if (cd->vendor == VENDOR_TOSHIBA)
density = (blocklength > 2048) ? 0x81 : 0x83;
- buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kmalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -179,7 +179,7 @@ int sr_cd_check(struct cdrom_device_info *cdi)
if (cd->cdi.mask & CDC_MULTI_SESSION)
return 0;
- buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kmalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index c2d5608f6b1a..e869e90e05af 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -581,7 +581,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
rq->retries = retries;
req->end_io_data = SRpnt;
- blk_execute_rq_nowait(NULL, req, 1, st_scsi_execute_end);
+ blk_execute_rq_nowait(req, true, st_scsi_execute_end);
return 0;
}
@@ -3829,7 +3829,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
break;
}
- retval = scsi_ioctl(STp->device, NULL, file->f_mode, cmd_in, p);
+ retval = scsi_ioctl(STp->device, file->f_mode, cmd_in, p);
if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) {
/* unload */
STp->rew_at_close = 0;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 20595c0ba0ae..9a0bba5a51a7 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -21,6 +21,8 @@
#include <linux/device.h>
#include <linux/hyperv.h>
#include <linux/blkdev.h>
+#include <linux/dma-mapping.h>
+
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
@@ -1336,6 +1338,7 @@ static void storvsc_on_channel_callback(void *context)
continue;
}
request = (struct storvsc_cmd_request *)scsi_cmd_priv(scmnd);
+ scsi_dma_unmap(scmnd);
}
storvsc_on_receive(stor_device, packet, request);
@@ -1749,9 +1752,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
struct hv_host_device *host_dev = shost_priv(host);
struct hv_device *dev = host_dev->dev;
struct storvsc_cmd_request *cmd_request = scsi_cmd_priv(scmnd);
- int i;
struct scatterlist *sgl;
- unsigned int sg_count;
struct vmscsi_request *vm_srb;
struct vmbus_packet_mpb_array *payload;
u32 payload_sz;
@@ -1824,17 +1825,17 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
sgl = (struct scatterlist *)scsi_sglist(scmnd);
- sg_count = scsi_sg_count(scmnd);
length = scsi_bufflen(scmnd);
payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb;
payload_sz = sizeof(cmd_request->mpb);
- if (sg_count) {
- unsigned int hvpgoff, hvpfns_to_add;
+ if (scsi_sg_count(scmnd)) {
unsigned long offset_in_hvpg = offset_in_hvpage(sgl->offset);
unsigned int hvpg_count = HVPFN_UP(offset_in_hvpg + length);
- u64 hvpfn;
+ struct scatterlist *sg;
+ unsigned long hvpfn, hvpfns_to_add;
+ int j, i = 0, sg_count;
if (hvpg_count > MAX_PAGE_BUFFER_COUNT) {
@@ -1848,21 +1849,24 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
payload->range.len = length;
payload->range.offset = offset_in_hvpg;
+ sg_count = scsi_dma_map(scmnd);
+ if (sg_count < 0) {
+ ret = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto err_free_payload;
+ }
- for (i = 0; sgl != NULL; sgl = sg_next(sgl)) {
+ for_each_sg(sgl, sg, sg_count, j) {
/*
- * Init values for the current sgl entry. hvpgoff
- * and hvpfns_to_add are in units of Hyper-V size
- * pages. Handling the PAGE_SIZE != HV_HYP_PAGE_SIZE
- * case also handles values of sgl->offset that are
- * larger than PAGE_SIZE. Such offsets are handled
- * even on other than the first sgl entry, provided
- * they are a multiple of PAGE_SIZE.
+ * Init values for the current sgl entry. hvpfns_to_add
+ * is in units of Hyper-V size pages. Handling the
+ * PAGE_SIZE != HV_HYP_PAGE_SIZE case also handles
+ * values of sgl->offset that are larger than PAGE_SIZE.
+ * Such offsets are handled even on other than the first
+ * sgl entry, provided they are a multiple of PAGE_SIZE.
*/
- hvpgoff = HVPFN_DOWN(sgl->offset);
- hvpfn = page_to_hvpfn(sg_page(sgl)) + hvpgoff;
- hvpfns_to_add = HVPFN_UP(sgl->offset + sgl->length) -
- hvpgoff;
+ hvpfn = HVPFN_DOWN(sg_dma_address(sg));
+ hvpfns_to_add = HVPFN_UP(sg_dma_address(sg) +
+ sg_dma_len(sg)) - hvpfn;
/*
* Fill the next portion of the PFN array with
@@ -1872,7 +1876,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
* the PFN array is filled.
*/
while (hvpfns_to_add--)
- payload->range.pfn_array[i++] = hvpfn++;
+ payload->range.pfn_array[i++] = hvpfn++;
}
}
@@ -1884,13 +1888,18 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
put_cpu();
if (ret == -EAGAIN) {
- if (payload_sz > sizeof(cmd_request->mpb))
- kfree(payload);
/* no more space */
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ ret = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto err_free_payload;
}
return 0;
+
+err_free_payload:
+ if (payload_sz > sizeof(cmd_request->mpb))
+ kfree(payload);
+
+ return ret;
}
static struct scsi_host_template scsi_driver = {
@@ -2016,6 +2025,7 @@ static int storvsc_probe(struct hv_device *device,
stor_device->vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
spin_lock_init(&stor_device->lock);
hv_set_drvdata(device, stor_device);
+ dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1);
stor_device->port_number = host->host_no;
ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size, is_fc);
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index b2521b830be7..9fe27b01904e 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -50,9 +50,11 @@ config SCSI_UFSHCD
However, do not compile this as a module if your root file system
(the one containing the directory /) is located on a UFS device.
+if SCSI_UFSHCD
+
config SCSI_UFSHCD_PCI
tristate "PCI bus based UFS Controller support"
- depends on SCSI_UFSHCD && PCI
+ depends on PCI
help
This selects the PCI UFS Host Controller Interface. Select this if
you have UFS Host Controller with PCI Interface.
@@ -71,7 +73,6 @@ config SCSI_UFS_DWC_TC_PCI
config SCSI_UFSHCD_PLATFORM
tristate "Platform bus based UFS Controller support"
- depends on SCSI_UFSHCD
depends on HAS_IOMEM
help
This selects the UFS host controller support. Select this if
@@ -147,7 +148,6 @@ config SCSI_UFS_TI_J721E
config SCSI_UFS_BSG
bool "Universal Flash Storage BSG device node"
- depends on SCSI_UFSHCD
select BLK_DEV_BSGLIB
help
Universal Flash Storage (UFS) is SCSI transport specification for
@@ -177,7 +177,7 @@ config SCSI_UFS_EXYNOS
config SCSI_UFS_CRYPTO
bool "UFS Crypto Engine Support"
- depends on SCSI_UFSHCD && BLK_INLINE_ENCRYPTION
+ depends on BLK_INLINE_ENCRYPTION
help
Enable Crypto Engine Support in UFS.
Enabling this makes it possible for the kernel to use the crypto
@@ -186,7 +186,6 @@ config SCSI_UFS_CRYPTO
config SCSI_UFS_HPB
bool "Support UFS Host Performance Booster"
- depends on SCSI_UFSHCD
help
The UFS HPB feature improves random read performance. It caches
L2P (logical to physical) map of UFS to host DRAM. The driver uses HPB
@@ -195,16 +194,18 @@ config SCSI_UFS_HPB
config SCSI_UFS_FAULT_INJECTION
bool "UFS Fault Injection Support"
- depends on SCSI_UFSHCD && FAULT_INJECTION
+ depends on FAULT_INJECTION
help
Enable fault injection support in the UFS driver. This makes it easier
to test the UFS error handler and abort handler.
config SCSI_UFS_HWMON
- bool "UFS Temperature Notification"
+ bool "UFS Temperature Notification"
depends on SCSI_UFSHCD=HWMON || HWMON=y
help
This provides support for UFS hardware monitoring. If enabled,
a hardware monitoring device will be created for the UFS device.
If unsure, say N.
+
+endif
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/scsi/ufs/tc-dwc-g210-pci.c
index 679289e1a78e..7b08e2e07cc5 100644
--- a/drivers/scsi/ufs/tc-dwc-g210-pci.c
+++ b/drivers/scsi/ufs/tc-dwc-g210-pci.c
@@ -110,7 +110,6 @@ tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- pci_set_drvdata(pdev, hba);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
index cd26bc82462e..474a4a064a68 100644
--- a/drivers/scsi/ufs/ufs-exynos.c
+++ b/drivers/scsi/ufs/ufs-exynos.c
@@ -853,14 +853,14 @@ static int exynos_ufs_post_pwr_mode(struct ufs_hba *hba,
}
static void exynos_ufs_specify_nexus_t_xfer_req(struct ufs_hba *hba,
- int tag, bool op)
+ int tag, bool is_scsi_cmd)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
u32 type;
type = hci_readl(ufs, HCI_UTRL_NEXUS_TYPE);
- if (op)
+ if (is_scsi_cmd)
hci_writel(ufs, type | (1 << tag), HCI_UTRL_NEXUS_TYPE);
else
hci_writel(ufs, type & ~(1 << tag), HCI_UTRL_NEXUS_TYPE);
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index 8c7e8d321746..ab1a7ebd89b1 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -396,6 +396,12 @@ out:
return ret;
}
+static int ufs_hisi_suspend_prepare(struct device *dev)
+{
+ /* RPM and SPM are different. Refer ufs_hisi_suspend() */
+ return __ufshcd_suspend_prepare(dev, false);
+}
+
static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
@@ -578,7 +584,7 @@ static int ufs_hisi_remove(struct platform_device *pdev)
static const struct dev_pm_ops ufs_hisi_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
- .prepare = ufshcd_suspend_prepare,
+ .prepare = ufs_hisi_suspend_prepare,
.complete = ufshcd_resume_complete,
};
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 5393b5c9dd9c..86a938075f30 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -557,7 +557,7 @@ static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
host->reg_va09 = regulator_get(hba->dev, "va09");
- if (!host->reg_va09)
+ if (IS_ERR(host->reg_va09))
dev_info(hba->dev, "failed to get va09");
else
host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index f725248ba57f..f76692053ca1 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -538,8 +538,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- pci_set_drvdata(pdev, hba);
-
hba->vops = (struct ufs_hba_variant_ops *)id->driver_data;
err = ufshcd_init(hba, mmio_base, pdev->irq);
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index eaeae83b999f..87975d1a21c8 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -92,6 +92,11 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
clki->min_freq = clkfreq[i];
clki->max_freq = clkfreq[i+1];
clki->name = devm_kstrdup(dev, name, GFP_KERNEL);
+ if (!clki->name) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
if (!strcmp(name, "ref_clk"))
clki->keep_link_active = true;
dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
@@ -127,6 +132,8 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
return -ENOMEM;
vreg->name = devm_kstrdup(dev, name, GFP_KERNEL);
+ if (!vreg->name)
+ return -ENOMEM;
snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {
@@ -361,8 +368,6 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
goto dealloc_host;
}
- platform_set_drvdata(pdev, hba);
-
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 13c09dbd99b9..50b12d60dc1b 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -128,8 +128,9 @@ EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
enum {
UFSHCD_MAX_CHANNEL = 0,
UFSHCD_MAX_ID = 1,
- UFSHCD_CMD_PER_LUN = 32,
- UFSHCD_CAN_QUEUE = 32,
+ UFSHCD_NUM_RESERVED = 1,
+ UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
+ UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
};
static const char *const ufshcd_state_name[] = {
@@ -1069,13 +1070,32 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
return false;
}
+/*
+ * Determine the number of pending commands by counting the bits in the SCSI
+ * device budget maps. This approach has been selected because a bit is set in
+ * the budget map before scsi_host_queue_ready() checks the host_self_blocked
+ * flag. The host_self_blocked flag can be modified by calling
+ * scsi_block_requests() or scsi_unblock_requests().
+ */
+static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
+{
+ struct scsi_device *sdev;
+ u32 pending = 0;
+
+ lockdep_assert_held(hba->host->host_lock);
+ __shost_for_each_device(sdev, hba->host)
+ pending += sbitmap_weight(&sdev->budget_map);
+
+ return pending;
+}
+
static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
u64 wait_timeout_us)
{
unsigned long flags;
int ret = 0;
u32 tm_doorbell;
- u32 tr_doorbell;
+ u32 tr_pending;
bool timeout = false, do_last_check = false;
ktime_t start;
@@ -1093,8 +1113,8 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
}
tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
- tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- if (!tm_doorbell && !tr_doorbell) {
+ tr_pending = ufshcd_pending_cmds(hba);
+ if (!tm_doorbell && !tr_pending) {
timeout = false;
break;
} else if (do_last_check) {
@@ -1114,12 +1134,12 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
do_last_check = true;
}
spin_lock_irqsave(hba->host->host_lock, flags);
- } while (tm_doorbell || tr_doorbell);
+ } while (tm_doorbell || tr_pending);
if (timeout) {
dev_err(hba->dev,
"%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
- __func__, tm_doorbell, tr_doorbell);
+ __func__, tm_doorbell, tr_pending);
ret = -EBUSY;
}
out:
@@ -1352,25 +1372,6 @@ out:
return ret;
}
-static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
-{
- int *busy = priv;
-
- WARN_ON_ONCE(reserved);
- (*busy)++;
- return false;
-}
-
-/* Whether or not any tag is in use by a request that is in progress. */
-static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
-{
- struct request_queue *q = hba->cmd_queue;
- int busy = 0;
-
- blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
- return busy;
-}
-
static int ufshcd_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *stat)
{
@@ -1666,7 +1667,8 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
bool flush_result;
unsigned long flags;
- if (!ufshcd_is_clkgating_allowed(hba))
+ if (!ufshcd_is_clkgating_allowed(hba) ||
+ !hba->clk_gating.is_initialized)
goto out;
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.active_reqs++;
@@ -1769,7 +1771,7 @@ static void ufshcd_gate_work(struct work_struct *work)
if (hba->clk_gating.active_reqs
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
- || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
+ || hba->outstanding_reqs || hba->outstanding_tasks
|| hba->active_uic_cmd || hba->uic_async_done)
goto rel_lock;
@@ -1826,7 +1828,7 @@ static void __ufshcd_release(struct ufs_hba *hba)
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
- hba->outstanding_tasks ||
+ hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
hba->active_uic_cmd || hba->uic_async_done ||
hba->clk_gating.state == CLKS_OFF)
return;
@@ -1961,11 +1963,15 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
{
if (!hba->clk_gating.is_initialized)
return;
+
ufshcd_remove_clk_gating_sysfs(hba);
- cancel_work_sync(&hba->clk_gating.ungate_work);
- cancel_delayed_work_sync(&hba->clk_gating.gate_work);
- destroy_workqueue(hba->clk_gating.clk_gating_workq);
+
+ /* Ungate the clock if necessary. */
+ ufshcd_hold(hba, false);
hba->clk_gating.is_initialized = false;
+ ufshcd_release(hba);
+
+ destroy_workqueue(hba->clk_gating.clk_gating_workq);
}
/* Must be called with host lock acquired */
@@ -2189,6 +2195,7 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
hba->nutmrs =
((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
+ hba->reserved_slot = hba->nutrs - 1;
/* Read crypto capabilities */
err = ufshcd_hba_init_crypto_capabilities(hba);
@@ -2650,17 +2657,42 @@ static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
}
-static inline bool is_rpmb_wlun(struct scsi_device *sdev)
-{
- return sdev->lun == ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN);
-}
-
static inline bool is_device_wlun(struct scsi_device *sdev)
{
return sdev->lun ==
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
}
+/*
+ * Associate the UFS controller queue with the default and poll HCTX types.
+ * Initialize the mq_map[] arrays.
+ */
+static int ufshcd_map_queues(struct Scsi_Host *shost)
+{
+ int i, ret;
+
+ for (i = 0; i < shost->nr_maps; i++) {
+ struct blk_mq_queue_map *map = &shost->tag_set.map[i];
+
+ switch (i) {
+ case HCTX_TYPE_DEFAULT:
+ case HCTX_TYPE_POLL:
+ map->nr_queues = 1;
+ break;
+ case HCTX_TYPE_READ:
+ map->nr_queues = 0;
+ break;
+ default:
+ WARN_ON_ONCE(true);
+ }
+ map->queue_offset = 0;
+ ret = blk_mq_map_queues(map);
+ WARN_ON_ONCE(ret);
+ }
+
+ return 0;
+}
+
static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
{
struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
@@ -2696,10 +2728,13 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
struct ufshcd_lrb *lrbp;
int err = 0;
- WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
+ WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
- if (!down_read_trylock(&hba->clk_scaling_lock))
- return SCSI_MLQUEUE_HOST_BUSY;
+ /*
+ * Allows the UFS error handler to wait for prior ufshcd_queuecommand()
+ * calls.
+ */
+ rcu_read_lock();
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
@@ -2779,8 +2814,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
}
ufshcd_send_command(hba, tag);
+
out:
- up_read(&hba->clk_scaling_lock);
+ rcu_read_unlock();
if (ufs_trigger_eh()) {
unsigned long flags;
@@ -2936,30 +2972,15 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout)
{
- struct request_queue *q = hba->cmd_queue;
DECLARE_COMPLETION_ONSTACK(wait);
- struct request *req;
+ const u32 tag = hba->reserved_slot;
struct ufshcd_lrb *lrbp;
int err;
- int tag;
- down_read(&hba->clk_scaling_lock);
+ /* Protects use of hba->reserved_slot. */
+ lockdep_assert_held(&hba->dev_cmd.lock);
- /*
- * Get free slot, sleep if slots are unavailable.
- * Even though we use wait_event() which sleeps indefinitely,
- * the maximum wait time is bounded by SCSI request timeout.
- */
- req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
- goto out_unlock;
- }
- tag = req->tag;
- WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
- /* Set the timeout such that the SCSI error handler is not activated. */
- req->timeout = msecs_to_jiffies(2 * timeout);
- blk_mq_start_request(req);
+ down_read(&hba->clk_scaling_lock);
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
@@ -2977,8 +2998,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
out:
- blk_mq_free_request(req);
-out_unlock:
up_read(&hba->clk_scaling_lock);
return err;
}
@@ -4960,11 +4979,7 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
*/
static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
{
- struct ufs_hba *hba = shost_priv(sdev->host);
-
- if (depth > hba->nutrs)
- depth = hba->nutrs;
- return scsi_change_queue_depth(sdev, depth);
+ return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue));
}
static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev)
@@ -5256,6 +5271,18 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
return retval;
}
+/* Release the resources allocated for processing a SCSI command. */
+static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp)
+{
+ struct scsi_cmnd *cmd = lrbp->cmd;
+
+ scsi_dma_unmap(cmd);
+ lrbp->cmd = NULL; /* Mark the command as completed. */
+ ufshcd_release(hba);
+ ufshcd_clk_scaling_update_busy(hba);
+}
+
/**
* __ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
@@ -5266,9 +5293,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
{
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
- int result;
int index;
- bool update_scaling = false;
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
lrbp = &hba->lrb[index];
@@ -5278,29 +5303,47 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_update_monitor(hba, lrbp);
ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
- result = ufshcd_transfer_rsp_status(hba, lrbp);
- scsi_dma_unmap(cmd);
- cmd->result = result;
- /* Mark completed command as NULL in LRB */
- lrbp->cmd = NULL;
+ cmd->result = ufshcd_transfer_rsp_status(hba, lrbp);
+ ufshcd_release_scsi_cmd(hba, lrbp);
/* Do not touch lrbp after scsi done */
scsi_done(cmd);
- ufshcd_release(hba);
- update_scaling = true;
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
if (hba->dev_cmd.complete) {
ufshcd_add_command_trace(hba, index,
UFS_DEV_COMP);
complete(hba->dev_cmd.complete);
- update_scaling = true;
+ ufshcd_clk_scaling_update_busy(hba);
}
}
- if (update_scaling)
- ufshcd_clk_scaling_update_busy(hba);
}
}
+/*
+ * Returns > 0 if one or more commands have been completed or 0 if no
+ * requests have been completed.
+ */
+static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+ struct ufs_hba *hba = shost_priv(shost);
+ unsigned long completed_reqs, flags;
+ u32 tr_doorbell;
+
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
+ WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
+ "completed: %#lx; outstanding: %#lx\n", completed_reqs,
+ hba->outstanding_reqs);
+ hba->outstanding_reqs &= ~completed_reqs;
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+
+ if (completed_reqs)
+ __ufshcd_transfer_req_compl(hba, completed_reqs);
+
+ return completed_reqs;
+}
+
/**
* ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
@@ -5311,9 +5354,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
*/
static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
{
- unsigned long completed_reqs, flags;
- u32 tr_doorbell;
-
/* Resetting interrupt aggregation counters first and reading the
* DOOR_BELL afterward allows us to handle all the completed requests.
* In order to prevent other interrupts starvation the DB is read once
@@ -5328,21 +5368,13 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
if (ufs_fail_completion())
return IRQ_HANDLED;
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
- WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
- "completed: %#lx; outstanding: %#lx\n", completed_reqs,
- hba->outstanding_reqs);
- hba->outstanding_reqs &= ~completed_reqs;
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+ /*
+ * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
+ * do not want polling to trigger spurious interrupt complaints.
+ */
+ ufshcd_poll(hba->host, 0);
- if (completed_reqs) {
- __ufshcd_transfer_req_compl(hba, completed_reqs);
- return IRQ_HANDLED;
- } else {
- return IRQ_NONE;
- }
+ return IRQ_HANDLED;
}
int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
@@ -5986,8 +6018,7 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
}
ufshcd_scsi_block_requests(hba);
/* Drain ufshcd_queuecommand() */
- down_write(&hba->clk_scaling_lock);
- up_write(&hba->clk_scaling_lock);
+ synchronize_rcu();
cancel_work_sync(&hba->eeh_work);
}
@@ -6594,6 +6625,8 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
spin_lock_irqsave(host->host_lock, flags);
task_tag = req->tag;
+ WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n",
+ task_tag);
hba->tmf_rqs[req->tag] = req;
treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
@@ -6711,28 +6744,16 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type,
enum query_opcode desc_op)
{
- struct request_queue *q = hba->cmd_queue;
DECLARE_COMPLETION_ONSTACK(wait);
- struct request *req;
+ const u32 tag = hba->reserved_slot;
struct ufshcd_lrb *lrbp;
int err = 0;
- int tag;
u8 upiu_flags;
- down_read(&hba->clk_scaling_lock);
-
- req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
- goto out_unlock;
- }
- tag = req->tag;
- WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
+ /* Protects use of hba->reserved_slot. */
+ lockdep_assert_held(&hba->dev_cmd.lock);
- if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
- err = -EBUSY;
- goto out;
- }
+ down_read(&hba->clk_scaling_lock);
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
@@ -6801,9 +6822,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
-out:
- blk_mq_free_request(req);
-out_unlock:
up_read(&hba->clk_scaling_lock);
return err;
}
@@ -7033,6 +7051,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
unsigned long flags;
int err = FAILED;
+ bool outstanding;
u32 reg;
WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
@@ -7110,7 +7129,17 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
goto release;
}
- lrbp->cmd = NULL;
+ /*
+ * Clear the corresponding bit from outstanding_reqs since the command
+ * has been aborted successfully.
+ */
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs);
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+
+ if (outstanding)
+ ufshcd_release_scsi_cmd(hba, lrbp);
+
err = SUCCESS;
release:
@@ -7412,7 +7441,7 @@ static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
{
int ret = 0;
- struct scsi_device *sdev_boot;
+ struct scsi_device *sdev_boot, *sdev_rpmb;
hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
@@ -7423,14 +7452,14 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
}
scsi_device_put(hba->sdev_ufs_device);
- hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
+ sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
- if (IS_ERR(hba->sdev_rpmb)) {
- ret = PTR_ERR(hba->sdev_rpmb);
+ if (IS_ERR(sdev_rpmb)) {
+ ret = PTR_ERR(sdev_rpmb);
goto remove_sdev_ufs_device;
}
- ufshcd_blk_pm_runtime_init(hba->sdev_rpmb);
- scsi_device_put(hba->sdev_rpmb);
+ ufshcd_blk_pm_runtime_init(sdev_rpmb);
+ scsi_device_put(sdev_rpmb);
sdev_boot = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
@@ -7786,7 +7815,7 @@ static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
peer_pa_tactivate_us = peer_pa_tactivate *
gran_to_us_table[peer_granularity - 1];
- if (pa_tactivate_us > peer_pa_tactivate_us) {
+ if (pa_tactivate_us >= peer_pa_tactivate_us) {
u32 new_peer_pa_tactivate;
new_peer_pa_tactivate = pa_tactivate_us /
@@ -8156,7 +8185,9 @@ static struct scsi_host_template ufshcd_driver_template = {
.module = THIS_MODULE,
.name = UFSHCD,
.proc_name = UFSHCD,
+ .map_queues = ufshcd_map_queues,
.queuecommand = ufshcd_queuecommand,
+ .mq_poll = ufshcd_poll,
.slave_alloc = ufshcd_slave_alloc,
.slave_configure = ufshcd_slave_configure,
.slave_destroy = ufshcd_slave_destroy,
@@ -8582,7 +8613,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
* @pwr_mode: device power mode to set
*
* Returns 0 if requested power mode is set successfully
- * Returns non-zero if failed to set the requested power mode
+ * Returns < 0 if failed to set the requested power mode
*/
static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
enum ufs_dev_pwr_mode pwr_mode)
@@ -8636,8 +8667,11 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
sdev_printk(KERN_WARNING, sdp,
"START_STOP failed for power mode: %d, result %x\n",
pwr_mode, ret);
- if (ret > 0 && scsi_sense_valid(&sshdr))
- scsi_print_sense_hdr(sdp, NULL, &sshdr);
+ if (ret > 0) {
+ if (scsi_sense_valid(&sshdr))
+ scsi_print_sense_hdr(sdp, NULL, &sshdr);
+ ret = -EIO;
+ }
}
if (!ret)
@@ -9384,7 +9418,6 @@ void ufshcd_remove(struct ufs_hba *hba)
ufs_sysfs_remove_nodes(hba->dev);
blk_cleanup_queue(hba->tmf_queue);
blk_mq_free_tag_set(&hba->tmf_tag_set);
- blk_cleanup_queue(hba->cmd_queue);
scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
@@ -9445,6 +9478,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = -ENOMEM;
goto out_error;
}
+ host->nr_maps = HCTX_TYPE_POLL + 1;
hba = shost_priv(host);
hba->host = host;
hba->dev = dev;
@@ -9486,6 +9520,13 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
struct device *dev = hba->dev;
char eh_wq_name[sizeof("ufs_eh_wq_00")];
+ /*
+ * dev_set_drvdata() must be called before any callbacks are registered
+ * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
+ * sysfs).
+ */
+ dev_set_drvdata(dev, hba);
+
if (!mmio_base) {
dev_err(hba->dev,
"Invalid memory reference for mmio_base is NULL\n");
@@ -9528,8 +9569,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Configure LRB */
ufshcd_host_memory_configure(hba);
- host->can_queue = hba->nutrs;
- host->cmd_per_lun = hba->nutrs;
+ host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
+ host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
host->max_id = UFSHCD_MAX_ID;
host->max_lun = UFS_MAX_LUNS;
host->max_channel = UFSHCD_MAX_CHANNEL;
@@ -9597,12 +9638,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto out_disable;
}
- hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
- if (IS_ERR(hba->cmd_queue)) {
- err = PTR_ERR(hba->cmd_queue);
- goto out_remove_scsi_host;
- }
-
hba->tmf_tag_set = (struct blk_mq_tag_set) {
.nr_hw_queues = 1,
.queue_depth = hba->nutmrs,
@@ -9611,7 +9646,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
};
err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
if (err < 0)
- goto free_cmd_queue;
+ goto out_remove_scsi_host;
hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
if (IS_ERR(hba->tmf_queue)) {
err = PTR_ERR(hba->tmf_queue);
@@ -9680,8 +9715,6 @@ free_tmf_queue:
blk_cleanup_queue(hba->tmf_queue);
free_tmf_tag_set:
blk_mq_free_tag_set(&hba->tmf_tag_set);
-free_cmd_queue:
- blk_cleanup_queue(hba->cmd_queue);
out_remove_scsi_host:
scsi_remove_host(hba->host);
out_disable:
@@ -9703,7 +9736,27 @@ void ufshcd_resume_complete(struct device *dev)
}
EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
-int ufshcd_suspend_prepare(struct device *dev)
+static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba)
+{
+ struct device *dev = &hba->sdev_ufs_device->sdev_gendev;
+ enum ufs_dev_pwr_mode dev_pwr_mode;
+ enum uic_link_state link_state;
+ unsigned long flags;
+ bool res;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl);
+ link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl);
+ res = pm_runtime_suspended(dev) &&
+ hba->curr_dev_pwr_mode == dev_pwr_mode &&
+ hba->uic_link_state == link_state &&
+ !hba->dev_info.b_rpm_dev_flush_capable;
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return res;
+}
+
+int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
int ret;
@@ -9715,15 +9768,30 @@ int ufshcd_suspend_prepare(struct device *dev)
* Refer ufshcd_resume_complete()
*/
if (hba->sdev_ufs_device) {
- ret = ufshcd_rpm_get_sync(hba);
- if (ret < 0 && ret != -EACCES) {
- ufshcd_rpm_put(hba);
- return ret;
+ /* Prevent runtime suspend */
+ ufshcd_rpm_get_noresume(hba);
+ /*
+ * Check if already runtime suspended in same state as system
+ * suspend would be.
+ */
+ if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) {
+ /* RPM state is not ok for SPM, so runtime resume */
+ ret = ufshcd_rpm_resume(hba);
+ if (ret < 0 && ret != -EACCES) {
+ ufshcd_rpm_put(hba);
+ return ret;
+ }
}
hba->complete_put = true;
}
return 0;
}
+EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare);
+
+int ufshcd_suspend_prepare(struct device *dev)
+{
+ return __ufshcd_suspend_prepare(dev, true);
+}
EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 54750d72c8fb..88c20f3608c2 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -338,7 +338,8 @@ struct ufs_hba_variant_ops {
enum ufs_notify_change_status status,
struct ufs_pa_layer_attr *,
struct ufs_pa_layer_attr *);
- void (*setup_xfer_req)(struct ufs_hba *, int, bool);
+ void (*setup_xfer_req)(struct ufs_hba *hba, int tag,
+ bool is_scsi_cmd);
void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
enum ufs_notify_change_status);
@@ -737,13 +738,13 @@ struct ufs_hba_monitor {
* @host: Scsi_Host instance of the driver
* @dev: device handle
* @lrb: local reference block
- * @cmd_queue: Used to allocate command tags from hba->host->tag_set.
* @outstanding_tasks: Bits representing outstanding task requests
* @outstanding_lock: Protects @outstanding_reqs.
* @outstanding_reqs: Bits representing outstanding transfer requests
* @capabilities: UFS Controller Capabilities
* @nutrs: Transfer Request Queue depth supported by controller
* @nutmrs: Task Management Queue depth supported by controller
+ * @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock.
* @ufs_version: UFS Version to which controller complies
* @vops: pointer to variant specific operations
* @priv: pointer to variant specific private data
@@ -777,6 +778,7 @@ struct ufs_hba_monitor {
* @clk_list_head: UFS host controller clocks list node head
* @pwr_info: holds current power mode
* @max_pwr_info: keeps the device max valid pwm
+ * @clk_scaling_lock: used to serialize device commands and clock scaling
* @desc_size: descriptor sizes reported by device
* @urgent_bkops_lvl: keeps track of urgent bkops level for device
* @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
@@ -802,13 +804,11 @@ struct ufs_hba {
struct Scsi_Host *host;
struct device *dev;
- struct request_queue *cmd_queue;
/*
* This field is to keep a reference to "scsi_device" corresponding to
* "UFS device" W-LU.
*/
struct scsi_device *sdev_ufs_device;
- struct scsi_device *sdev_rpmb;
#ifdef CONFIG_SCSI_UFS_HWMON
struct device *hwmon_device;
@@ -836,6 +836,7 @@ struct ufs_hba {
u32 capabilities;
int nutrs;
int nutmrs;
+ u32 reserved_slot;
u32 ufs_version;
const struct ufs_hba_variant_ops *vops;
struct ufs_hba_variant_params *vps;
@@ -1211,6 +1212,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
int ufshcd_suspend_prepare(struct device *dev);
+int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm);
void ufshcd_resume_complete(struct device *dev);
/* Wrapper functions for safely calling variant operations */
@@ -1420,6 +1422,16 @@ static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba)
return pm_runtime_put_sync(&hba->sdev_ufs_device->sdev_gendev);
}
+static inline void ufshcd_rpm_get_noresume(struct ufs_hba *hba)
+{
+ pm_runtime_get_noresume(&hba->sdev_ufs_device->sdev_gendev);
+}
+
+static inline int ufshcd_rpm_resume(struct ufs_hba *hba)
+{
+ return pm_runtime_resume(&hba->sdev_ufs_device->sdev_gendev);
+}
+
static inline int ufshcd_rpm_put(struct ufs_hba *hba)
{
return pm_runtime_put(&hba->sdev_ufs_device->sdev_gendev);
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 6a295c88d850..a7ff0e5b5494 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -142,7 +142,8 @@ static inline u32 ufshci_version(u32 major, u32 minor)
#define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\
CONTROLLER_FATAL_ERROR |\
SYSTEM_BUS_FATAL_ERROR |\
- CRYPTO_ENGINE_FATAL_ERROR)
+ CRYPTO_ENGINE_FATAL_ERROR |\
+ UIC_LINK_LOST)
/* HCS - Host Controller Status 30h */
#define DEVICE_PRESENT 0x1
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index ded5ba9b1466..2d36a0715fca 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -10,7 +10,6 @@
*/
#include <asm/unaligned.h>
-#include <linux/async.h>
#include "ufshcd.h"
#include "ufshpb.h"
@@ -677,7 +676,7 @@ static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
ufshpb_set_unmap_cmd(rq->cmd, rgn);
rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
- blk_execute_rq_nowait(NULL, req, 1, ufshpb_umap_req_compl_fn);
+ blk_execute_rq_nowait(req, true, ufshpb_umap_req_compl_fn);
hpb->stats.umap_req_cnt++;
}
@@ -719,7 +718,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
map_req->rb.srgn_idx, mem_size);
rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
- blk_execute_rq_nowait(NULL, req, 1, ufshpb_map_req_compl_fn);
+ blk_execute_rq_nowait(req, true, ufshpb_map_req_compl_fn);
hpb->stats.map_req_cnt++;
return 0;
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 28e1d98ae102..0e6110da69e7 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -528,7 +528,7 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
if (!rq || !scsi_prot_sg_count(sc))
return;
- bi = blk_get_integrity(rq->rq_disk);
+ bi = blk_get_integrity(rq->q->disk);
if (sc->sc_data_direction == DMA_TO_DEVICE)
cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
@@ -778,7 +778,7 @@ static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
static void virtscsi_remove_vqs(struct virtio_device *vdev)
{
/* Stop all the virtqueues. */
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
}
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index e8a30c4c5aec..a8562678c437 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -3,6 +3,7 @@ menu "SOC (System On Chip) specific Drivers"
source "drivers/soc/actions/Kconfig"
source "drivers/soc/amlogic/Kconfig"
+source "drivers/soc/apple/Kconfig"
source "drivers/soc/aspeed/Kconfig"
source "drivers/soc/atmel/Kconfig"
source "drivers/soc/bcm/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index a05e9fbcd3e0..adb30c2d4fea 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -4,6 +4,7 @@
#
obj-$(CONFIG_ARCH_ACTIONS) += actions/
+obj-$(CONFIG_ARCH_APPLE) += apple/
obj-y += aspeed/
obj-$(CONFIG_ARCH_AT91) += atmel/
obj-y += bcm/
diff --git a/drivers/soc/apple/Kconfig b/drivers/soc/apple/Kconfig
new file mode 100644
index 000000000000..9b8de31d6a8f
--- /dev/null
+++ b/drivers/soc/apple/Kconfig
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+if ARCH_APPLE || COMPILE_TEST
+
+menu "Apple SoC drivers"
+
+config APPLE_PMGR_PWRSTATE
+ bool "Apple SoC PMGR power state control"
+ depends on PM
+ select REGMAP
+ select MFD_SYSCON
+ select PM_GENERIC_DOMAINS
+ select RESET_CONTROLLER
+ default ARCH_APPLE
+ help
+ The PMGR block in Apple SoCs provides high-level power state
+ controls for SoC devices. This driver manages them through the
+ generic power domain framework, and also provides reset support.
+
+endmenu
+
+endif
diff --git a/drivers/soc/apple/Makefile b/drivers/soc/apple/Makefile
new file mode 100644
index 000000000000..c114e84667e4
--- /dev/null
+++ b/drivers/soc/apple/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_APPLE_PMGR_PWRSTATE) += apple-pmgr-pwrstate.o
diff --git a/drivers/soc/apple/apple-pmgr-pwrstate.c b/drivers/soc/apple/apple-pmgr-pwrstate.c
new file mode 100644
index 000000000000..e1122288409a
--- /dev/null
+++ b/drivers/soc/apple/apple-pmgr-pwrstate.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SoC PMGR device power state driver
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/reset-controller.h>
+#include <linux/module.h>
+
+#define APPLE_PMGR_RESET BIT(31)
+#define APPLE_PMGR_AUTO_ENABLE BIT(28)
+#define APPLE_PMGR_PS_AUTO GENMASK(27, 24)
+#define APPLE_PMGR_PS_MIN GENMASK(19, 16)
+#define APPLE_PMGR_PARENT_OFF BIT(11)
+#define APPLE_PMGR_DEV_DISABLE BIT(10)
+#define APPLE_PMGR_WAS_CLKGATED BIT(9)
+#define APPLE_PMGR_WAS_PWRGATED BIT(8)
+#define APPLE_PMGR_PS_ACTUAL GENMASK(7, 4)
+#define APPLE_PMGR_PS_TARGET GENMASK(3, 0)
+
+#define APPLE_PMGR_FLAGS (APPLE_PMGR_WAS_CLKGATED | APPLE_PMGR_WAS_PWRGATED)
+
+#define APPLE_PMGR_PS_ACTIVE 0xf
+#define APPLE_PMGR_PS_CLKGATE 0x4
+#define APPLE_PMGR_PS_PWRGATE 0x0
+
+#define APPLE_PMGR_PS_SET_TIMEOUT 100
+#define APPLE_PMGR_RESET_TIME 1
+
+struct apple_pmgr_ps {
+ struct device *dev;
+ struct generic_pm_domain genpd;
+ struct reset_controller_dev rcdev;
+ struct regmap *regmap;
+ u32 offset;
+ u32 min_state;
+};
+
+#define genpd_to_apple_pmgr_ps(_genpd) container_of(_genpd, struct apple_pmgr_ps, genpd)
+#define rcdev_to_apple_pmgr_ps(_rcdev) container_of(_rcdev, struct apple_pmgr_ps, rcdev)
+
+static int apple_pmgr_ps_set(struct generic_pm_domain *genpd, u32 pstate, bool auto_enable)
+{
+ int ret;
+ struct apple_pmgr_ps *ps = genpd_to_apple_pmgr_ps(genpd);
+ u32 reg;
+
+ ret = regmap_read(ps->regmap, ps->offset, &reg);
+ if (ret < 0)
+ return ret;
+
+ /* Resets are synchronous, and only work if the device is powered and clocked. */
+ if (reg & APPLE_PMGR_RESET && pstate != APPLE_PMGR_PS_ACTIVE)
+ dev_err(ps->dev, "PS %s: powering off with RESET active\n",
+ genpd->name);
+
+ reg &= ~(APPLE_PMGR_AUTO_ENABLE | APPLE_PMGR_FLAGS | APPLE_PMGR_PS_TARGET);
+ reg |= FIELD_PREP(APPLE_PMGR_PS_TARGET, pstate);
+
+ dev_dbg(ps->dev, "PS %s: pwrstate = 0x%x: 0x%x\n", genpd->name, pstate, reg);
+
+ regmap_write(ps->regmap, ps->offset, reg);
+
+ ret = regmap_read_poll_timeout_atomic(
+ ps->regmap, ps->offset, reg,
+ (FIELD_GET(APPLE_PMGR_PS_ACTUAL, reg) == pstate), 1,
+ APPLE_PMGR_PS_SET_TIMEOUT);
+ if (ret < 0)
+ dev_err(ps->dev, "PS %s: Failed to reach power state 0x%x (now: 0x%x)\n",
+ genpd->name, pstate, reg);
+
+ if (auto_enable) {
+ /* Not all devices implement this; this is a no-op where not implemented. */
+ reg &= ~APPLE_PMGR_FLAGS;
+ reg |= APPLE_PMGR_AUTO_ENABLE;
+ regmap_write(ps->regmap, ps->offset, reg);
+ }
+
+ return ret;
+}
+
+static bool apple_pmgr_ps_is_active(struct apple_pmgr_ps *ps)
+{
+ u32 reg = 0;
+
+ regmap_read(ps->regmap, ps->offset, &reg);
+ /*
+ * We consider domains as active if they are actually on, or if they have auto-PM
+ * enabled and the intended target is on.
+ */
+ return (FIELD_GET(APPLE_PMGR_PS_ACTUAL, reg) == APPLE_PMGR_PS_ACTIVE ||
+ (FIELD_GET(APPLE_PMGR_PS_TARGET, reg) == APPLE_PMGR_PS_ACTIVE &&
+ reg & APPLE_PMGR_AUTO_ENABLE));
+}
+
+static int apple_pmgr_ps_power_on(struct generic_pm_domain *genpd)
+{
+ return apple_pmgr_ps_set(genpd, APPLE_PMGR_PS_ACTIVE, true);
+}
+
+static int apple_pmgr_ps_power_off(struct generic_pm_domain *genpd)
+{
+ return apple_pmgr_ps_set(genpd, APPLE_PMGR_PS_PWRGATE, false);
+}
+
+static int apple_pmgr_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct apple_pmgr_ps *ps = rcdev_to_apple_pmgr_ps(rcdev);
+
+ mutex_lock(&ps->genpd.mlock);
+
+ if (ps->genpd.status == GENPD_STATE_OFF)
+ dev_err(ps->dev, "PS 0x%x: asserting RESET while powered down\n", ps->offset);
+
+ dev_dbg(ps->dev, "PS 0x%x: assert reset\n", ps->offset);
+ /* Quiesce device before asserting reset */
+ regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_DEV_DISABLE,
+ APPLE_PMGR_DEV_DISABLE);
+ regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_RESET,
+ APPLE_PMGR_RESET);
+
+ mutex_unlock(&ps->genpd.mlock);
+
+ return 0;
+}
+
+static int apple_pmgr_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct apple_pmgr_ps *ps = rcdev_to_apple_pmgr_ps(rcdev);
+
+ mutex_lock(&ps->genpd.mlock);
+
+ dev_dbg(ps->dev, "PS 0x%x: deassert reset\n", ps->offset);
+ regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_RESET, 0);
+ regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_DEV_DISABLE, 0);
+
+ if (ps->genpd.status == GENPD_STATE_OFF)
+ dev_err(ps->dev, "PS 0x%x: RESET was deasserted while powered down\n", ps->offset);
+
+ mutex_unlock(&ps->genpd.mlock);
+
+ return 0;
+}
+
+static int apple_pmgr_reset_reset(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ int ret;
+
+ ret = apple_pmgr_reset_assert(rcdev, id);
+ if (ret)
+ return ret;
+
+ usleep_range(APPLE_PMGR_RESET_TIME, 2 * APPLE_PMGR_RESET_TIME);
+
+ return apple_pmgr_reset_deassert(rcdev, id);
+}
+
+static int apple_pmgr_reset_status(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct apple_pmgr_ps *ps = rcdev_to_apple_pmgr_ps(rcdev);
+ u32 reg = 0;
+
+ regmap_read(ps->regmap, ps->offset, &reg);
+
+ return !!(reg & APPLE_PMGR_RESET);
+}
+
+const struct reset_control_ops apple_pmgr_reset_ops = {
+ .assert = apple_pmgr_reset_assert,
+ .deassert = apple_pmgr_reset_deassert,
+ .reset = apple_pmgr_reset_reset,
+ .status = apple_pmgr_reset_status,
+};
+
+static int apple_pmgr_reset_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ return 0;
+}
+
+static int apple_pmgr_ps_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct apple_pmgr_ps *ps;
+ struct regmap *regmap;
+ struct of_phandle_iterator it;
+ int ret;
+ const char *name;
+ bool active;
+
+ regmap = syscon_node_to_regmap(node->parent);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ps = devm_kzalloc(dev, sizeof(*ps), GFP_KERNEL);
+ if (!ps)
+ return -ENOMEM;
+
+ ps->dev = dev;
+ ps->regmap = regmap;
+
+ ret = of_property_read_string(node, "label", &name);
+ if (ret < 0) {
+ dev_err(dev, "missing label property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "reg", &ps->offset);
+ if (ret < 0) {
+ dev_err(dev, "missing reg property\n");
+ return ret;
+ }
+
+ ps->genpd.name = name;
+ ps->genpd.power_on = apple_pmgr_ps_power_on;
+ ps->genpd.power_off = apple_pmgr_ps_power_off;
+
+ ret = of_property_read_u32(node, "apple,min-state", &ps->min_state);
+ if (ret == 0 && ps->min_state <= APPLE_PMGR_PS_ACTIVE)
+ regmap_update_bits(regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_PS_MIN,
+ FIELD_PREP(APPLE_PMGR_PS_MIN, ps->min_state));
+
+ active = apple_pmgr_ps_is_active(ps);
+ if (of_property_read_bool(node, "apple,always-on")) {
+ ps->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
+ if (!active) {
+ dev_warn(dev, "always-on domain %s is not on at boot\n", name);
+ /* Turn it on so pm_genpd_init does not fail */
+ active = apple_pmgr_ps_power_on(&ps->genpd) == 0;
+ }
+ }
+
+ /* Turn on auto-PM if the domain is already on */
+ if (active)
+ regmap_update_bits(regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_AUTO_ENABLE,
+ APPLE_PMGR_AUTO_ENABLE);
+
+ ret = pm_genpd_init(&ps->genpd, NULL, !active);
+ if (ret < 0) {
+ dev_err(dev, "pm_genpd_init failed\n");
+ return ret;
+ }
+
+ ret = of_genpd_add_provider_simple(node, &ps->genpd);
+ if (ret < 0) {
+ dev_err(dev, "of_genpd_add_provider_simple failed\n");
+ return ret;
+ }
+
+ of_for_each_phandle(&it, ret, node, "power-domains", "#power-domain-cells", -1) {
+ struct of_phandle_args parent, child;
+
+ parent.np = it.node;
+ parent.args_count = of_phandle_iterator_args(&it, parent.args, MAX_PHANDLE_ARGS);
+ child.np = node;
+ child.args_count = 0;
+ ret = of_genpd_add_subdomain(&parent, &child);
+
+ if (ret == -EPROBE_DEFER) {
+ of_node_put(parent.np);
+ goto err_remove;
+ } else if (ret < 0) {
+ dev_err(dev, "failed to add to parent domain: %d (%s -> %s)\n",
+ ret, it.node->name, node->name);
+ of_node_put(parent.np);
+ goto err_remove;
+ }
+ }
+
+ /*
+ * Do not participate in regular PM; parent power domains are handled via the
+ * genpd hierarchy.
+ */
+ pm_genpd_remove_device(dev);
+
+ ps->rcdev.owner = THIS_MODULE;
+ ps->rcdev.nr_resets = 1;
+ ps->rcdev.ops = &apple_pmgr_reset_ops;
+ ps->rcdev.of_node = dev->of_node;
+ ps->rcdev.of_reset_n_cells = 0;
+ ps->rcdev.of_xlate = apple_pmgr_reset_xlate;
+
+ ret = devm_reset_controller_register(dev, &ps->rcdev);
+ if (ret < 0)
+ goto err_remove;
+
+ return 0;
+err_remove:
+ of_genpd_del_provider(node);
+ pm_genpd_remove(&ps->genpd);
+ return ret;
+}
+
+static const struct of_device_id apple_pmgr_ps_of_match[] = {
+ { .compatible = "apple,pmgr-pwrstate" },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, apple_pmgr_ps_of_match);
+
+static struct platform_driver apple_pmgr_ps_driver = {
+ .probe = apple_pmgr_ps_probe,
+ .driver = {
+ .name = "apple-pmgr-pwrstate",
+ .of_match_table = apple_pmgr_ps_of_match,
+ },
+};
+
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_DESCRIPTION("PMGR power state driver for Apple SoCs");
+MODULE_LICENSE("GPL v2");
+
+module_platform_driver(apple_pmgr_ps_driver);
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-mips.c b/drivers/soc/bcm/brcmstb/pm/pm-mips.c
index cdc3e387f049..4dfb5a85032b 100644
--- a/drivers/soc/bcm/brcmstb/pm/pm-mips.c
+++ b/drivers/soc/bcm/brcmstb/pm/pm-mips.c
@@ -405,11 +405,14 @@ static int brcmstb_pm_init(void)
i = ctrl.num_memc;
if (i >= MAX_NUM_MEMC) {
pr_warn("Too many MEMCs (max %d)\n", MAX_NUM_MEMC);
+ of_node_put(dn);
break;
}
base = brcmstb_ioremap_node(dn, 0);
- if (IS_ERR(base))
+ if (IS_ERR(base)) {
+ of_node_put(dn);
goto ddr_err;
+ }
ctrl.memcs[i].ddr_phy_base = base;
ctrl.num_memc++;
diff --git a/drivers/soc/canaan/Kconfig b/drivers/soc/canaan/Kconfig
index 853096b7e84c..2527cf5757ec 100644
--- a/drivers/soc/canaan/Kconfig
+++ b/drivers/soc/canaan/Kconfig
@@ -5,7 +5,6 @@ config SOC_K210_SYSCTL
depends on RISCV && SOC_CANAAN && OF
default SOC_CANAAN
select PM
- select SYSCON
select MFD_SYSCON
help
Canaan Kendryte K210 SoC system controller driver.
diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
index dd948889eeab..5a2edc48dd79 100644
--- a/drivers/soc/fsl/dpio/dpio-driver.c
+++ b/drivers/soc/fsl/dpio/dpio-driver.c
@@ -88,7 +88,7 @@ static void unregister_dpio_irq_handlers(struct fsl_mc_device *dpio_dev)
irq = dpio_dev->irqs[0];
/* clear the affinity hint */
- irq_set_affinity_hint(irq->msi_desc->irq, NULL);
+ irq_set_affinity_hint(irq->virq, NULL);
}
static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
@@ -98,7 +98,7 @@ static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
irq = dpio_dev->irqs[0];
error = devm_request_irq(&dpio_dev->dev,
- irq->msi_desc->irq,
+ irq->virq,
dpio_irq_handler,
0,
dev_name(&dpio_dev->dev),
@@ -111,10 +111,10 @@ static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
}
/* set the affinity hint */
- if (irq_set_affinity_hint(irq->msi_desc->irq, cpumask_of(cpu)))
+ if (irq_set_affinity_hint(irq->virq, cpumask_of(cpu)))
dev_err(&dpio_dev->dev,
"irq_set_affinity failed irq %d cpu %d\n",
- irq->msi_desc->irq, cpu);
+ irq->virq, cpu);
return 0;
}
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
index acda8a5637c5..4d7b9caee1c4 100644
--- a/drivers/soc/fsl/qbman/bman_portal.c
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -155,7 +155,7 @@ static int bman_portal_probe(struct platform_device *pdev)
}
spin_lock(&bman_lock);
- cpu = cpumask_next_zero(-1, &portal_cpus);
+ cpu = cpumask_first_zero(&portal_cpus);
if (cpu >= nr_cpu_ids) {
__bman_portals_probed = 1;
/* unassigned portal, skip init */
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index 96f74a1dc603..e23b60618c1a 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -248,7 +248,7 @@ static int qman_portal_probe(struct platform_device *pdev)
pcfg->pools = qm_get_pools_sdqcr();
spin_lock(&qman_lock);
- cpu = cpumask_next_zero(-1, &portal_cpus);
+ cpu = cpumask_first_zero(&portal_cpus);
if (cpu >= nr_cpu_ids) {
__qman_portals_probed = 1;
/* unassigned portal, skip init */
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index b8d52d8d29db..3e59d479d001 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -377,7 +377,7 @@ static int imx_pgc_power_down(struct generic_pm_domain *genpd)
}
}
- pm_runtime_put(domain->dev);
+ pm_runtime_put_sync_suspend(domain->dev);
return 0;
@@ -734,6 +734,7 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.map = IMX8MM_VPUH1_A53_DOMAIN,
},
.pgc = BIT(IMX8MM_PGC_VPUH1),
+ .keep_clocks = true,
},
[IMX8MM_POWER_DOMAIN_DISPMIX] = {
@@ -840,6 +841,32 @@ static const struct imx_pgc_domain imx8mn_pgc_domains[] = {
.hskack = IMX8MN_GPUMIX_HSK_PWRDNACKN,
},
.pgc = BIT(IMX8MN_PGC_GPUMIX),
+ .keep_clocks = true,
+ },
+
+ [IMX8MN_POWER_DOMAIN_DISPMIX] = {
+ .genpd = {
+ .name = "dispmix",
+ },
+ .bits = {
+ .pxx = IMX8MN_DISPMIX_SW_Pxx_REQ,
+ .map = IMX8MN_DISPMIX_A53_DOMAIN,
+ .hskreq = IMX8MN_DISPMIX_HSK_PWRDNREQN,
+ .hskack = IMX8MN_DISPMIX_HSK_PWRDNACKN,
+ },
+ .pgc = BIT(IMX8MN_PGC_DISPMIX),
+ .keep_clocks = true,
+ },
+
+ [IMX8MN_POWER_DOMAIN_MIPI] = {
+ .genpd = {
+ .name = "mipi",
+ },
+ .bits = {
+ .pxx = IMX8MN_MIPI_SW_Pxx_REQ,
+ .map = IMX8MN_MIPI_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MN_PGC_MIPI),
},
};
diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c
index c2f076b56e24..511e74f0db8a 100644
--- a/drivers/soc/imx/imx8m-blk-ctrl.c
+++ b/drivers/soc/imx/imx8m-blk-ctrl.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <dt-bindings/power/imx8mm-power.h>
+#include <dt-bindings/power/imx8mn-power.h>
#define BLK_SFT_RSTN 0x0
#define BLK_CLK_EN 0x4
@@ -517,6 +518,77 @@ static const struct imx8m_blk_ctrl_data imx8mm_disp_blk_ctl_dev_data = {
.num_domains = ARRAY_SIZE(imx8mm_disp_blk_ctl_domain_data),
};
+
+static int imx8mn_disp_power_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct imx8m_blk_ctrl *bc = container_of(nb, struct imx8m_blk_ctrl,
+ power_nb);
+
+ if (action != GENPD_NOTIFY_ON && action != GENPD_NOTIFY_PRE_OFF)
+ return NOTIFY_OK;
+
+ /* Enable bus clock and deassert bus reset */
+ regmap_set_bits(bc->regmap, BLK_CLK_EN, BIT(8));
+ regmap_set_bits(bc->regmap, BLK_SFT_RSTN, BIT(8));
+
+ /*
+ * On power up we have no software backchannel to the GPC to
+ * wait for the ADB handshake to happen, so we just delay for a
+ * bit. On power down the GPC driver waits for the handshake.
+ */
+ if (action == GENPD_NOTIFY_ON)
+ udelay(5);
+
+
+ return NOTIFY_OK;
+}
+
+static const struct imx8m_blk_ctrl_domain_data imx8mn_disp_blk_ctl_domain_data[] = {
+ [IMX8MN_DISPBLK_PD_MIPI_DSI] = {
+ .name = "dispblk-mipi-dsi",
+ .clk_names = (const char *[]){ "dsi-pclk", "dsi-ref", },
+ .num_clks = 2,
+ .gpc_name = "mipi-dsi",
+ .rst_mask = BIT(0) | BIT(1),
+ .clk_mask = BIT(0) | BIT(1),
+ .mipi_phy_rst_mask = BIT(17),
+ },
+ [IMX8MN_DISPBLK_PD_MIPI_CSI] = {
+ .name = "dispblk-mipi-csi",
+ .clk_names = (const char *[]){ "csi-aclk", "csi-pclk" },
+ .num_clks = 2,
+ .gpc_name = "mipi-csi",
+ .rst_mask = BIT(2) | BIT(3),
+ .clk_mask = BIT(2) | BIT(3),
+ .mipi_phy_rst_mask = BIT(16),
+ },
+ [IMX8MN_DISPBLK_PD_LCDIF] = {
+ .name = "dispblk-lcdif",
+ .clk_names = (const char *[]){ "lcdif-axi", "lcdif-apb", "lcdif-pix", },
+ .num_clks = 3,
+ .gpc_name = "lcdif",
+ .rst_mask = BIT(4) | BIT(5),
+ .clk_mask = BIT(4) | BIT(5),
+ },
+ [IMX8MN_DISPBLK_PD_ISI] = {
+ .name = "dispblk-isi",
+ .clk_names = (const char *[]){ "disp_axi", "disp_apb", "disp_axi_root",
+ "disp_apb_root"},
+ .num_clks = 4,
+ .gpc_name = "isi",
+ .rst_mask = BIT(6) | BIT(7),
+ .clk_mask = BIT(6) | BIT(7),
+ },
+};
+
+static const struct imx8m_blk_ctrl_data imx8mn_disp_blk_ctl_dev_data = {
+ .max_reg = 0x84,
+ .power_notifier_fn = imx8mn_disp_power_notifier,
+ .domains = imx8mn_disp_blk_ctl_domain_data,
+ .num_domains = ARRAY_SIZE(imx8mn_disp_blk_ctl_domain_data),
+};
+
static const struct of_device_id imx8m_blk_ctrl_of_match[] = {
{
.compatible = "fsl,imx8mm-vpu-blk-ctrl",
@@ -524,7 +596,10 @@ static const struct of_device_id imx8m_blk_ctrl_of_match[] = {
}, {
.compatible = "fsl,imx8mm-disp-blk-ctrl",
.data = &imx8mm_disp_blk_ctl_dev_data
- } ,{
+ }, {
+ .compatible = "fsl,imx8mn-disp-blk-ctrl",
+ .data = &imx8mn_disp_blk_ctl_dev_data
+ }, {
/* Sentinel */
}
};
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index ca75b14931ec..670cc82d17dc 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -411,12 +411,17 @@ out:
return ret;
}
-static void init_clks(struct platform_device *pdev, struct clk **clk)
+static int init_clks(struct platform_device *pdev, struct clk **clk)
{
int i;
- for (i = CLK_NONE + 1; i < CLK_MAX; i++)
+ for (i = CLK_NONE + 1; i < CLK_MAX; i++) {
clk[i] = devm_clk_get(&pdev->dev, clk_names[i]);
+ if (IS_ERR(clk[i]))
+ return PTR_ERR(clk[i]);
+ }
+
+ return 0;
}
static struct scp *init_scp(struct platform_device *pdev,
@@ -426,7 +431,7 @@ static struct scp *init_scp(struct platform_device *pdev,
{
struct genpd_onecell_data *pd_data;
struct resource *res;
- int i, j;
+ int i, j, ret;
struct scp *scp;
struct clk *clk[CLK_MAX];
@@ -481,7 +486,9 @@ static struct scp *init_scp(struct platform_device *pdev,
pd_data->num_domains = num;
- init_clks(pdev, clk);
+ ret = init_clks(pdev, clk);
+ if (ret)
+ return ERR_PTR(ret);
for (i = 0; i < num; i++) {
struct scp_domain *scpd = &scp->domains[i];
diff --git a/drivers/soc/qcom/cpr.c b/drivers/soc/qcom/cpr.c
index 1d818a8ba208..e9b854ed1bdf 100644
--- a/drivers/soc/qcom/cpr.c
+++ b/drivers/soc/qcom/cpr.c
@@ -1010,7 +1010,7 @@ static int cpr_interpolate(const struct corner *corner, int step_volt,
return corner->uV;
temp = f_diff * (uV_high - uV_low);
- do_div(temp, f_high - f_low);
+ temp = div64_ul(temp, f_high - f_low);
/*
* max_volt_scale has units of uV/MHz while freq values
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 6bf2f1d1f2c5..ec52f29c8867 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -195,6 +195,28 @@ static const struct llcc_slice_config sm8250_data[] = {
{ LLCC_WRCACHE, 31, 256, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
};
+static const struct llcc_slice_config sm8350_data[] = {
+ { LLCC_CPUSS, 1, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 1 },
+ { LLCC_VIDSC0, 2, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
+ { LLCC_MDMHPGRW, 7, 1024, 3, 0, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_MODHW, 9, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_CMPT, 10, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_GPU, 12, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 },
+ { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 },
+ { LLCC_DISP, 16, 3072, 2, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMPNG, 21, 1024, 0, 1, 0xf, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDHW, 22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_CVP, 28, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_MODPE, 29, 256, 1, 1, 0xf, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0x1, 1, 0, 0, 0, 1, 0 },
+ { LLCC_WRCACHE, 31, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 },
+ { LLCC_CVPFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_CPUSS1, 3, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_CPUHWT, 5, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 },
+};
+
static const struct qcom_llcc_config sc7180_cfg = {
.sct_data = sc7180_data,
.size = ARRAY_SIZE(sc7180_data),
@@ -228,6 +250,11 @@ static const struct qcom_llcc_config sm8250_cfg = {
.size = ARRAY_SIZE(sm8250_data),
};
+static const struct qcom_llcc_config sm8350_cfg = {
+ .sct_data = sm8350_data,
+ .size = ARRAY_SIZE(sm8350_data),
+};
+
static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
/**
@@ -644,6 +671,7 @@ static const struct of_device_id qcom_llcc_of_match[] = {
{ .compatible = "qcom,sm6350-llcc", .data = &sm6350_cfg },
{ .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfg },
{ .compatible = "qcom,sm8250-llcc", .data = &sm8250_cfg },
+ { .compatible = "qcom,sm8350-llcc", .data = &sm8350_cfg },
{ }
};
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index 34acf58bbb0d..cbe5e39fdaeb 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -352,7 +352,7 @@ static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev,
return ret;
}
-static struct thermal_cooling_device_ops qmp_cooling_device_ops = {
+static const struct thermal_cooling_device_ops qmp_cooling_device_ops = {
.get_max_state = qmp_cdev_get_max_state,
.get_cur_state = qmp_cdev_get_cur_state,
.set_cur_state = qmp_cdev_set_cur_state,
diff --git a/drivers/soc/qcom/qcom_stats.c b/drivers/soc/qcom/qcom_stats.c
index 131d24caabf8..d6bfd1bbdc2a 100644
--- a/drivers/soc/qcom/qcom_stats.c
+++ b/drivers/soc/qcom/qcom_stats.c
@@ -237,6 +237,15 @@ static const struct stats_config rpm_data = {
.subsystem_stats_in_smem = false,
};
+/* Older RPM firmwares have the stats at a fixed offset instead */
+static const struct stats_config rpm_data_dba0 = {
+ .stats_offset = 0xdba0,
+ .num_records = 2,
+ .appended_stats_avail = true,
+ .dynamic_offset = false,
+ .subsystem_stats_in_smem = false,
+};
+
static const struct stats_config rpmh_data = {
.stats_offset = 0x48,
.num_records = 3,
@@ -246,6 +255,10 @@ static const struct stats_config rpmh_data = {
};
static const struct of_device_id qcom_stats_table[] = {
+ { .compatible = "qcom,apq8084-rpm-stats", .data = &rpm_data_dba0 },
+ { .compatible = "qcom,msm8226-rpm-stats", .data = &rpm_data_dba0 },
+ { .compatible = "qcom,msm8916-rpm-stats", .data = &rpm_data_dba0 },
+ { .compatible = "qcom,msm8974-rpm-stats", .data = &rpm_data_dba0 },
{ .compatible = "qcom,rpm-stats", .data = &rpm_data },
{ .compatible = "qcom,rpmh-stats", .data = &rpmh_data },
{ }
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index 1a03eaa38c46..c8c4c730b135 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -96,7 +96,7 @@ static void qmi_recv_del_server(struct qmi_handle *qmi,
* @node: id of the dying node
*
* Signals the client that all previously registered services on this node are
- * now gone and then calls the bye callback to allow the client client further
+ * now gone and then calls the bye callback to allow the client further
* cleaning up resources associated with this remote.
*/
static void qmi_recv_bye(struct qmi_handle *qmi,
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index 3a12a482f6b2..01c2f50cb97e 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -691,7 +691,7 @@ static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
* @drv: The controller.
* @msg: The data to be written to the controller.
*
- * This should only be called for for sleep/wake state, never active-only
+ * This should only be called for sleep/wake state, never active-only
* state.
*
* The caller must ensure that no other RPMH actions are happening and the
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
index 1118345d8824..58f1dc9b9cb7 100644
--- a/drivers/soc/qcom/rpmhpd.c
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -63,73 +63,134 @@ struct rpmhpd_desc {
static DEFINE_MUTEX(rpmhpd_lock);
-/* SDM845 RPMH powerdomains */
+/* RPMH powerdomains */
+
+static struct rpmhpd cx_ao;
+static struct rpmhpd mx;
+static struct rpmhpd mx_ao;
+static struct rpmhpd cx = {
+ .pd = { .name = "cx", },
+ .peer = &cx_ao,
+ .res_name = "cx.lvl",
+};
+
+static struct rpmhpd cx_ao = {
+ .pd = { .name = "cx_ao", },
+ .active_only = true,
+ .peer = &cx,
+ .res_name = "cx.lvl",
+};
-static struct rpmhpd sdm845_ebi = {
+static struct rpmhpd cx_ao_w_mx_parent;
+static struct rpmhpd cx_w_mx_parent = {
+ .pd = { .name = "cx", },
+ .peer = &cx_ao_w_mx_parent,
+ .parent = &mx.pd,
+ .res_name = "cx.lvl",
+};
+
+static struct rpmhpd cx_ao_w_mx_parent = {
+ .pd = { .name = "cx_ao", },
+ .active_only = true,
+ .peer = &cx_w_mx_parent,
+ .parent = &mx_ao.pd,
+ .res_name = "cx.lvl",
+};
+
+static struct rpmhpd ebi = {
.pd = { .name = "ebi", },
.res_name = "ebi.lvl",
};
-static struct rpmhpd sdm845_lmx = {
- .pd = { .name = "lmx", },
- .res_name = "lmx.lvl",
+static struct rpmhpd gfx = {
+ .pd = { .name = "gfx", },
+ .res_name = "gfx.lvl",
};
-static struct rpmhpd sdm845_lcx = {
+static struct rpmhpd lcx = {
.pd = { .name = "lcx", },
.res_name = "lcx.lvl",
};
-static struct rpmhpd sdm845_gfx = {
- .pd = { .name = "gfx", },
- .res_name = "gfx.lvl",
+static struct rpmhpd lmx = {
+ .pd = { .name = "lmx", },
+ .res_name = "lmx.lvl",
};
-static struct rpmhpd sdm845_mss = {
+static struct rpmhpd mmcx_ao;
+static struct rpmhpd mmcx = {
+ .pd = { .name = "mmcx", },
+ .peer = &mmcx_ao,
+ .res_name = "mmcx.lvl",
+};
+
+static struct rpmhpd mmcx_ao = {
+ .pd = { .name = "mmcx_ao", },
+ .active_only = true,
+ .peer = &mmcx,
+ .res_name = "mmcx.lvl",
+};
+
+static struct rpmhpd mmcx_ao_w_cx_parent;
+static struct rpmhpd mmcx_w_cx_parent = {
+ .pd = { .name = "mmcx", },
+ .peer = &mmcx_ao_w_cx_parent,
+ .parent = &cx.pd,
+ .res_name = "mmcx.lvl",
+};
+
+static struct rpmhpd mmcx_ao_w_cx_parent = {
+ .pd = { .name = "mmcx_ao", },
+ .active_only = true,
+ .peer = &mmcx_w_cx_parent,
+ .parent = &cx_ao.pd,
+ .res_name = "mmcx.lvl",
+};
+
+static struct rpmhpd mss = {
.pd = { .name = "mss", },
.res_name = "mss.lvl",
};
-static struct rpmhpd sdm845_mx_ao;
-static struct rpmhpd sdm845_mx = {
+static struct rpmhpd mx_ao;
+static struct rpmhpd mx = {
.pd = { .name = "mx", },
- .peer = &sdm845_mx_ao,
+ .peer = &mx_ao,
.res_name = "mx.lvl",
};
-static struct rpmhpd sdm845_mx_ao = {
+static struct rpmhpd mx_ao = {
.pd = { .name = "mx_ao", },
.active_only = true,
- .peer = &sdm845_mx,
+ .peer = &mx,
.res_name = "mx.lvl",
};
-static struct rpmhpd sdm845_cx_ao;
-static struct rpmhpd sdm845_cx = {
- .pd = { .name = "cx", },
- .peer = &sdm845_cx_ao,
- .parent = &sdm845_mx.pd,
- .res_name = "cx.lvl",
+static struct rpmhpd mxc_ao;
+static struct rpmhpd mxc = {
+ .pd = { .name = "mxc", },
+ .peer = &mxc_ao,
+ .res_name = "mxc.lvl",
};
-static struct rpmhpd sdm845_cx_ao = {
- .pd = { .name = "cx_ao", },
+static struct rpmhpd mxc_ao = {
+ .pd = { .name = "mxc_ao", },
.active_only = true,
- .peer = &sdm845_cx,
- .parent = &sdm845_mx_ao.pd,
- .res_name = "cx.lvl",
+ .peer = &mxc,
+ .res_name = "mxc.lvl",
};
+/* SDM845 RPMH powerdomains */
static struct rpmhpd *sdm845_rpmhpds[] = {
- [SDM845_EBI] = &sdm845_ebi,
- [SDM845_MX] = &sdm845_mx,
- [SDM845_MX_AO] = &sdm845_mx_ao,
- [SDM845_CX] = &sdm845_cx,
- [SDM845_CX_AO] = &sdm845_cx_ao,
- [SDM845_LMX] = &sdm845_lmx,
- [SDM845_LCX] = &sdm845_lcx,
- [SDM845_GFX] = &sdm845_gfx,
- [SDM845_MSS] = &sdm845_mss,
+ [SDM845_CX] = &cx_w_mx_parent,
+ [SDM845_CX_AO] = &cx_ao_w_mx_parent,
+ [SDM845_EBI] = &ebi,
+ [SDM845_GFX] = &gfx,
+ [SDM845_LCX] = &lcx,
+ [SDM845_LMX] = &lmx,
+ [SDM845_MSS] = &mss,
+ [SDM845_MX] = &mx,
+ [SDM845_MX_AO] = &mx_ao,
};
static const struct rpmhpd_desc sdm845_desc = {
@@ -139,9 +200,9 @@ static const struct rpmhpd_desc sdm845_desc = {
/* SDX55 RPMH powerdomains */
static struct rpmhpd *sdx55_rpmhpds[] = {
- [SDX55_MSS] = &sdm845_mss,
- [SDX55_MX] = &sdm845_mx,
- [SDX55_CX] = &sdm845_cx,
+ [SDX55_CX] = &cx_w_mx_parent,
+ [SDX55_MSS] = &mss,
+ [SDX55_MX] = &mx,
};
static const struct rpmhpd_desc sdx55_desc = {
@@ -151,12 +212,12 @@ static const struct rpmhpd_desc sdx55_desc = {
/* SM6350 RPMH powerdomains */
static struct rpmhpd *sm6350_rpmhpds[] = {
- [SM6350_CX] = &sdm845_cx,
- [SM6350_GFX] = &sdm845_gfx,
- [SM6350_LCX] = &sdm845_lcx,
- [SM6350_LMX] = &sdm845_lmx,
- [SM6350_MSS] = &sdm845_mss,
- [SM6350_MX] = &sdm845_mx,
+ [SM6350_CX] = &cx_w_mx_parent,
+ [SM6350_GFX] = &gfx,
+ [SM6350_LCX] = &lcx,
+ [SM6350_LMX] = &lmx,
+ [SM6350_MSS] = &mss,
+ [SM6350_MX] = &mx,
};
static const struct rpmhpd_desc sm6350_desc = {
@@ -165,33 +226,18 @@ static const struct rpmhpd_desc sm6350_desc = {
};
/* SM8150 RPMH powerdomains */
-
-static struct rpmhpd sm8150_mmcx_ao;
-static struct rpmhpd sm8150_mmcx = {
- .pd = { .name = "mmcx", },
- .peer = &sm8150_mmcx_ao,
- .res_name = "mmcx.lvl",
-};
-
-static struct rpmhpd sm8150_mmcx_ao = {
- .pd = { .name = "mmcx_ao", },
- .active_only = true,
- .peer = &sm8150_mmcx,
- .res_name = "mmcx.lvl",
-};
-
static struct rpmhpd *sm8150_rpmhpds[] = {
- [SM8150_MSS] = &sdm845_mss,
- [SM8150_EBI] = &sdm845_ebi,
- [SM8150_LMX] = &sdm845_lmx,
- [SM8150_LCX] = &sdm845_lcx,
- [SM8150_GFX] = &sdm845_gfx,
- [SM8150_MX] = &sdm845_mx,
- [SM8150_MX_AO] = &sdm845_mx_ao,
- [SM8150_CX] = &sdm845_cx,
- [SM8150_CX_AO] = &sdm845_cx_ao,
- [SM8150_MMCX] = &sm8150_mmcx,
- [SM8150_MMCX_AO] = &sm8150_mmcx_ao,
+ [SM8150_CX] = &cx_w_mx_parent,
+ [SM8150_CX_AO] = &cx_ao_w_mx_parent,
+ [SM8150_EBI] = &ebi,
+ [SM8150_GFX] = &gfx,
+ [SM8150_LCX] = &lcx,
+ [SM8150_LMX] = &lmx,
+ [SM8150_MMCX] = &mmcx,
+ [SM8150_MMCX_AO] = &mmcx_ao,
+ [SM8150_MSS] = &mss,
+ [SM8150_MX] = &mx,
+ [SM8150_MX_AO] = &mx_ao,
};
static const struct rpmhpd_desc sm8150_desc = {
@@ -199,17 +245,18 @@ static const struct rpmhpd_desc sm8150_desc = {
.num_pds = ARRAY_SIZE(sm8150_rpmhpds),
};
+/* SM8250 RPMH powerdomains */
static struct rpmhpd *sm8250_rpmhpds[] = {
- [SM8250_CX] = &sdm845_cx,
- [SM8250_CX_AO] = &sdm845_cx_ao,
- [SM8250_EBI] = &sdm845_ebi,
- [SM8250_GFX] = &sdm845_gfx,
- [SM8250_LCX] = &sdm845_lcx,
- [SM8250_LMX] = &sdm845_lmx,
- [SM8250_MMCX] = &sm8150_mmcx,
- [SM8250_MMCX_AO] = &sm8150_mmcx_ao,
- [SM8250_MX] = &sdm845_mx,
- [SM8250_MX_AO] = &sdm845_mx_ao,
+ [SM8250_CX] = &cx_w_mx_parent,
+ [SM8250_CX_AO] = &cx_ao_w_mx_parent,
+ [SM8250_EBI] = &ebi,
+ [SM8250_GFX] = &gfx,
+ [SM8250_LCX] = &lcx,
+ [SM8250_LMX] = &lmx,
+ [SM8250_MMCX] = &mmcx,
+ [SM8250_MMCX_AO] = &mmcx_ao,
+ [SM8250_MX] = &mx,
+ [SM8250_MX_AO] = &mx_ao,
};
static const struct rpmhpd_desc sm8250_desc = {
@@ -218,34 +265,20 @@ static const struct rpmhpd_desc sm8250_desc = {
};
/* SM8350 Power domains */
-static struct rpmhpd sm8350_mxc_ao;
-static struct rpmhpd sm8350_mxc = {
- .pd = { .name = "mxc", },
- .peer = &sm8350_mxc_ao,
- .res_name = "mxc.lvl",
-};
-
-static struct rpmhpd sm8350_mxc_ao = {
- .pd = { .name = "mxc_ao", },
- .active_only = true,
- .peer = &sm8350_mxc,
- .res_name = "mxc.lvl",
-};
-
static struct rpmhpd *sm8350_rpmhpds[] = {
- [SM8350_CX] = &sdm845_cx,
- [SM8350_CX_AO] = &sdm845_cx_ao,
- [SM8350_EBI] = &sdm845_ebi,
- [SM8350_GFX] = &sdm845_gfx,
- [SM8350_LCX] = &sdm845_lcx,
- [SM8350_LMX] = &sdm845_lmx,
- [SM8350_MMCX] = &sm8150_mmcx,
- [SM8350_MMCX_AO] = &sm8150_mmcx_ao,
- [SM8350_MX] = &sdm845_mx,
- [SM8350_MX_AO] = &sdm845_mx_ao,
- [SM8350_MXC] = &sm8350_mxc,
- [SM8350_MXC_AO] = &sm8350_mxc_ao,
- [SM8350_MSS] = &sdm845_mss,
+ [SM8350_CX] = &cx_w_mx_parent,
+ [SM8350_CX_AO] = &cx_ao_w_mx_parent,
+ [SM8350_EBI] = &ebi,
+ [SM8350_GFX] = &gfx,
+ [SM8350_LCX] = &lcx,
+ [SM8350_LMX] = &lmx,
+ [SM8350_MMCX] = &mmcx,
+ [SM8350_MMCX_AO] = &mmcx_ao,
+ [SM8350_MSS] = &mss,
+ [SM8350_MX] = &mx,
+ [SM8350_MX_AO] = &mx_ao,
+ [SM8350_MXC] = &mxc,
+ [SM8350_MXC_AO] = &mxc_ao,
};
static const struct rpmhpd_desc sm8350_desc = {
@@ -253,16 +286,38 @@ static const struct rpmhpd_desc sm8350_desc = {
.num_pds = ARRAY_SIZE(sm8350_rpmhpds),
};
+/* SM8450 RPMH powerdomains */
+static struct rpmhpd *sm8450_rpmhpds[] = {
+ [SM8450_CX] = &cx,
+ [SM8450_CX_AO] = &cx_ao,
+ [SM8450_EBI] = &ebi,
+ [SM8450_GFX] = &gfx,
+ [SM8450_LCX] = &lcx,
+ [SM8450_LMX] = &lmx,
+ [SM8450_MMCX] = &mmcx_w_cx_parent,
+ [SM8450_MMCX_AO] = &mmcx_ao_w_cx_parent,
+ [SM8450_MSS] = &mss,
+ [SM8450_MX] = &mx,
+ [SM8450_MX_AO] = &mx_ao,
+ [SM8450_MXC] = &mxc,
+ [SM8450_MXC_AO] = &mxc_ao,
+};
+
+static const struct rpmhpd_desc sm8450_desc = {
+ .rpmhpds = sm8450_rpmhpds,
+ .num_pds = ARRAY_SIZE(sm8450_rpmhpds),
+};
+
/* SC7180 RPMH powerdomains */
static struct rpmhpd *sc7180_rpmhpds[] = {
- [SC7180_CX] = &sdm845_cx,
- [SC7180_CX_AO] = &sdm845_cx_ao,
- [SC7180_GFX] = &sdm845_gfx,
- [SC7180_MX] = &sdm845_mx,
- [SC7180_MX_AO] = &sdm845_mx_ao,
- [SC7180_LMX] = &sdm845_lmx,
- [SC7180_LCX] = &sdm845_lcx,
- [SC7180_MSS] = &sdm845_mss,
+ [SC7180_CX] = &cx_w_mx_parent,
+ [SC7180_CX_AO] = &cx_ao_w_mx_parent,
+ [SC7180_GFX] = &gfx,
+ [SC7180_LCX] = &lcx,
+ [SC7180_LMX] = &lmx,
+ [SC7180_MSS] = &mss,
+ [SC7180_MX] = &mx,
+ [SC7180_MX_AO] = &mx_ao,
};
static const struct rpmhpd_desc sc7180_desc = {
@@ -272,15 +327,15 @@ static const struct rpmhpd_desc sc7180_desc = {
/* SC7280 RPMH powerdomains */
static struct rpmhpd *sc7280_rpmhpds[] = {
- [SC7280_CX] = &sdm845_cx,
- [SC7280_CX_AO] = &sdm845_cx_ao,
- [SC7280_EBI] = &sdm845_ebi,
- [SC7280_GFX] = &sdm845_gfx,
- [SC7280_MX] = &sdm845_mx,
- [SC7280_MX_AO] = &sdm845_mx_ao,
- [SC7280_LMX] = &sdm845_lmx,
- [SC7280_LCX] = &sdm845_lcx,
- [SC7280_MSS] = &sdm845_mss,
+ [SC7280_CX] = &cx,
+ [SC7280_CX_AO] = &cx_ao,
+ [SC7280_EBI] = &ebi,
+ [SC7280_GFX] = &gfx,
+ [SC7280_LCX] = &lcx,
+ [SC7280_LMX] = &lmx,
+ [SC7280_MSS] = &mss,
+ [SC7280_MX] = &mx,
+ [SC7280_MX_AO] = &mx_ao,
};
static const struct rpmhpd_desc sc7280_desc = {
@@ -290,17 +345,17 @@ static const struct rpmhpd_desc sc7280_desc = {
/* SC8180x RPMH powerdomains */
static struct rpmhpd *sc8180x_rpmhpds[] = {
- [SC8180X_CX] = &sdm845_cx,
- [SC8180X_CX_AO] = &sdm845_cx_ao,
- [SC8180X_EBI] = &sdm845_ebi,
- [SC8180X_GFX] = &sdm845_gfx,
- [SC8180X_LCX] = &sdm845_lcx,
- [SC8180X_LMX] = &sdm845_lmx,
- [SC8180X_MMCX] = &sm8150_mmcx,
- [SC8180X_MMCX_AO] = &sm8150_mmcx_ao,
- [SC8180X_MSS] = &sdm845_mss,
- [SC8180X_MX] = &sdm845_mx,
- [SC8180X_MX_AO] = &sdm845_mx_ao,
+ [SC8180X_CX] = &cx_w_mx_parent,
+ [SC8180X_CX_AO] = &cx_ao_w_mx_parent,
+ [SC8180X_EBI] = &ebi,
+ [SC8180X_GFX] = &gfx,
+ [SC8180X_LCX] = &lcx,
+ [SC8180X_LMX] = &lmx,
+ [SC8180X_MMCX] = &mmcx,
+ [SC8180X_MMCX_AO] = &mmcx_ao,
+ [SC8180X_MSS] = &mss,
+ [SC8180X_MX] = &mx,
+ [SC8180X_MX_AO] = &mx_ao,
};
static const struct rpmhpd_desc sc8180x_desc = {
@@ -318,6 +373,7 @@ static const struct of_device_id rpmhpd_match_table[] = {
{ .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc },
{ .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc },
{ .compatible = "qcom,sm8350-rpmhpd", .data = &sm8350_desc },
+ { .compatible = "qcom,sm8450-rpmhpd", .data = &sm8450_desc },
{ }
};
MODULE_DEVICE_TABLE(of, rpmhpd_match_table);
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index 4f69fb9b2e0e..0a8d8d24bfb7 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -102,7 +102,6 @@ struct rpmpd {
const bool active_only;
unsigned int corner;
bool enabled;
- const char *res_name;
const int res_type;
const int res_id;
struct qcom_smd_rpm *rpm;
@@ -396,6 +395,45 @@ static const struct rpmpd_desc sm6115_desc = {
.max_state = RPM_SMD_LEVEL_TURBO_NO_CPR,
};
+/* sm6125 RPM Power domains */
+DEFINE_RPMPD_PAIR(sm6125, vddcx, vddcx_ao, RWCX, LEVEL, 0);
+DEFINE_RPMPD_VFL(sm6125, vddcx_vfl, RWCX, 0);
+
+DEFINE_RPMPD_PAIR(sm6125, vddmx, vddmx_ao, RWMX, LEVEL, 0);
+DEFINE_RPMPD_VFL(sm6125, vddmx_vfl, RWMX, 0);
+
+static struct rpmpd *sm6125_rpmpds[] = {
+ [SM6125_VDDCX] = &sm6125_vddcx,
+ [SM6125_VDDCX_AO] = &sm6125_vddcx_ao,
+ [SM6125_VDDCX_VFL] = &sm6125_vddcx_vfl,
+ [SM6125_VDDMX] = &sm6125_vddmx,
+ [SM6125_VDDMX_AO] = &sm6125_vddmx_ao,
+ [SM6125_VDDMX_VFL] = &sm6125_vddmx_vfl,
+};
+
+static const struct rpmpd_desc sm6125_desc = {
+ .rpmpds = sm6125_rpmpds,
+ .num_pds = ARRAY_SIZE(sm6125_rpmpds),
+ .max_state = RPM_SMD_LEVEL_BINNING,
+};
+
+static struct rpmpd *qcm2290_rpmpds[] = {
+ [QCM2290_VDDCX] = &sm6115_vddcx,
+ [QCM2290_VDDCX_AO] = &sm6115_vddcx_ao,
+ [QCM2290_VDDCX_VFL] = &sm6115_vddcx_vfl,
+ [QCM2290_VDDMX] = &sm6115_vddmx,
+ [QCM2290_VDDMX_AO] = &sm6115_vddmx_ao,
+ [QCM2290_VDDMX_VFL] = &sm6115_vddmx_vfl,
+ [QCM2290_VDD_LPI_CX] = &sm6115_vdd_lpi_cx,
+ [QCM2290_VDD_LPI_MX] = &sm6115_vdd_lpi_mx,
+};
+
+static const struct rpmpd_desc qcm2290_desc = {
+ .rpmpds = qcm2290_rpmpds,
+ .num_pds = ARRAY_SIZE(qcm2290_rpmpds),
+ .max_state = RPM_SMD_LEVEL_TURBO_NO_CPR,
+};
+
static const struct of_device_id rpmpd_match_table[] = {
{ .compatible = "qcom,mdm9607-rpmpd", .data = &mdm9607_desc },
{ .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc },
@@ -405,9 +443,11 @@ static const struct of_device_id rpmpd_match_table[] = {
{ .compatible = "qcom,msm8994-rpmpd", .data = &msm8994_desc },
{ .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc },
{ .compatible = "qcom,msm8998-rpmpd", .data = &msm8998_desc },
+ { .compatible = "qcom,qcm2290-rpmpd", .data = &qcm2290_desc },
{ .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc },
{ .compatible = "qcom,sdm660-rpmpd", .data = &sdm660_desc },
{ .compatible = "qcom,sm6115-rpmpd", .data = &sm6115_desc },
+ { .compatible = "qcom,sm6125-rpmpd", .data = &sm6125_desc },
{ }
};
MODULE_DEVICE_TABLE(of, rpmpd_match_table);
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index c7e519bfdc8a..e2057d8f1eff 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -85,7 +85,7 @@
#define SMEM_GLOBAL_HOST 0xfffe
/* Max number of processors/hosts in a system */
-#define SMEM_HOST_COUNT 14
+#define SMEM_HOST_COUNT 15
/**
* struct smem_proc_comm - proc_comm communication struct (legacy)
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 9a0eb59405e8..6dc0f39c0ec3 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -313,8 +313,11 @@ static const struct soc_id soc_id[] = {
{ 421, "IPQ6000" },
{ 422, "IPQ6010" },
{ 425, "SC7180" },
+ { 434, "SM6350" },
{ 453, "IPQ6005" },
{ 455, "QRB5165" },
+ { 457, "SM8450" },
+ { 459, "SM7225" },
};
static const char *socinfo_machine(struct device *dev, unsigned int id)
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index ce16ef5c939c..2cbd03db2cc7 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -235,6 +235,13 @@ config ARCH_R8A77961
This enables support for the Renesas R-Car M3-W+ SoC.
This includes different gradings like R-Car M3e and M3e-2G.
+config ARCH_R8A779F0
+ bool "ARM64 Platform support for R-Car S4-8"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A779F0
+ help
+ This enables support for the Renesas R-Car S4-8 SoC.
+
config ARCH_R8A77980
bool "ARM64 Platform support for R-Car V3H"
select ARCH_RCAR_GEN3
@@ -297,6 +304,9 @@ config RST_RCAR
config SYSC_RCAR
bool "System Controller support for R-Car" if COMPILE_TEST
+config SYSC_RCAR_GEN4
+ bool "System Controller support for R-Car Gen4" if COMPILE_TEST
+
config SYSC_R8A77995
bool "System Controller support for R-Car D3" if COMPILE_TEST
select SYSC_RCAR
@@ -337,6 +347,10 @@ config SYSC_R8A77961
bool "System Controller support for R-Car M3-W+" if COMPILE_TEST
select SYSC_RCAR
+config SYSC_R8A779F0
+ bool "System Controller support for R-Car S4-8" if COMPILE_TEST
+ select SYSC_RCAR_GEN4
+
config SYSC_R8A7792
bool "System Controller support for R-Car V2H" if COMPILE_TEST
select SYSC_RCAR
@@ -351,6 +365,7 @@ config SYSC_R8A77970
config SYSC_R8A779A0
bool "System Controller support for R-Car V3U" if COMPILE_TEST
+ select SYSC_RCAR_GEN4
config SYSC_RMOBILE
bool "System Controller support for R-Mobile" if COMPILE_TEST
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 9b29bed2a597..deeb41f84f01 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_SYSC_R8A77980) += r8a77980-sysc.o
obj-$(CONFIG_SYSC_R8A77990) += r8a77990-sysc.o
obj-$(CONFIG_SYSC_R8A77995) += r8a77995-sysc.o
obj-$(CONFIG_SYSC_R8A779A0) += r8a779a0-sysc.o
+obj-$(CONFIG_SYSC_R8A779F0) += r8a779f0-sysc.o
ifdef CONFIG_SMP
obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o
endif
@@ -32,4 +33,5 @@ endif
# Family
obj-$(CONFIG_RST_RCAR) += rcar-rst.o
obj-$(CONFIG_SYSC_RCAR) += rcar-sysc.o
+obj-$(CONFIG_SYSC_RCAR_GEN4) += rcar-gen4-sysc.o
obj-$(CONFIG_SYSC_RMOBILE) += rmobile-sysc.o
diff --git a/drivers/soc/renesas/r8a779a0-sysc.c b/drivers/soc/renesas/r8a779a0-sysc.c
index 7410b9fa9846..fdfc857df334 100644
--- a/drivers/soc/renesas/r8a779a0-sysc.c
+++ b/drivers/soc/renesas/r8a779a0-sysc.c
@@ -21,35 +21,9 @@
#include <dt-bindings/power/r8a779a0-sysc.h>
-/*
- * Power Domain flags
- */
-#define PD_CPU BIT(0) /* Area contains main CPU core */
-#define PD_SCU BIT(1) /* Area contains SCU and L2 cache */
-#define PD_NO_CR BIT(2) /* Area lacks PWR{ON,OFF}CR registers */
-
-#define PD_CPU_NOCR PD_CPU | PD_NO_CR /* CPU area lacks CR */
-#define PD_ALWAYS_ON PD_NO_CR /* Always-on area */
-
-/*
- * Description of a Power Area
- */
-struct r8a779a0_sysc_area {
- const char *name;
- u8 pdr; /* PDRn */
- int parent; /* -1 if none */
- unsigned int flags; /* See PD_* */
-};
-
-/*
- * SoC-specific Power Area Description
- */
-struct r8a779a0_sysc_info {
- const struct r8a779a0_sysc_area *areas;
- unsigned int num_areas;
-};
+#include "rcar-gen4-sysc.h"
-static struct r8a779a0_sysc_area r8a779a0_areas[] __initdata = {
+static struct rcar_gen4_sysc_area r8a779a0_areas[] __initdata = {
{ "always-on", R8A779A0_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "a3e0", R8A779A0_PD_A3E0, R8A779A0_PD_ALWAYS_ON, PD_SCU },
{ "a3e1", R8A779A0_PD_A3E1, R8A779A0_PD_ALWAYS_ON, PD_SCU },
@@ -96,355 +70,7 @@ static struct r8a779a0_sysc_area r8a779a0_areas[] __initdata = {
{ "a1dsp1", R8A779A0_PD_A1DSP1, R8A779A0_PD_A2CN1 },
};
-static const struct r8a779a0_sysc_info r8a779a0_sysc_info __initconst = {
+const struct rcar_gen4_sysc_info r8a779a0_sysc_info __initconst = {
.areas = r8a779a0_areas,
.num_areas = ARRAY_SIZE(r8a779a0_areas),
};
-
-/* SYSC Common */
-#define SYSCSR 0x000 /* SYSC Status Register */
-#define SYSCPONSR(x) (0x800 + ((x) * 0x4)) /* Power-ON Status Register 0 */
-#define SYSCPOFFSR(x) (0x808 + ((x) * 0x4)) /* Power-OFF Status Register */
-#define SYSCISCR(x) (0x810 + ((x) * 0x4)) /* Interrupt Status/Clear Register */
-#define SYSCIER(x) (0x820 + ((x) * 0x4)) /* Interrupt Enable Register */
-#define SYSCIMR(x) (0x830 + ((x) * 0x4)) /* Interrupt Mask Register */
-
-/* Power Domain Registers */
-#define PDRSR(n) (0x1000 + ((n) * 0x40))
-#define PDRONCR(n) (0x1004 + ((n) * 0x40))
-#define PDROFFCR(n) (0x1008 + ((n) * 0x40))
-#define PDRESR(n) (0x100C + ((n) * 0x40))
-
-/* PWRON/PWROFF */
-#define PWRON_PWROFF BIT(0) /* Power-ON/OFF request */
-
-/* PDRESR */
-#define PDRESR_ERR BIT(0)
-
-/* PDRSR */
-#define PDRSR_OFF BIT(0) /* Power-OFF state */
-#define PDRSR_ON BIT(4) /* Power-ON state */
-#define PDRSR_OFF_STATE BIT(8) /* Processing Power-OFF sequence */
-#define PDRSR_ON_STATE BIT(12) /* Processing Power-ON sequence */
-
-#define SYSCSR_BUSY GENMASK(1, 0) /* All bit sets is not busy */
-
-#define SYSCSR_TIMEOUT 10000
-#define SYSCSR_DELAY_US 10
-
-#define PDRESR_RETRIES 1000
-#define PDRESR_DELAY_US 10
-
-#define SYSCISR_TIMEOUT 10000
-#define SYSCISR_DELAY_US 10
-
-#define NUM_DOMAINS_EACH_REG BITS_PER_TYPE(u32)
-
-static void __iomem *r8a779a0_sysc_base;
-static DEFINE_SPINLOCK(r8a779a0_sysc_lock); /* SMP CPUs + I/O devices */
-
-static int r8a779a0_sysc_pwr_on_off(u8 pdr, bool on)
-{
- unsigned int reg_offs;
- u32 val;
- int ret;
-
- if (on)
- reg_offs = PDRONCR(pdr);
- else
- reg_offs = PDROFFCR(pdr);
-
- /* Wait until SYSC is ready to accept a power request */
- ret = readl_poll_timeout_atomic(r8a779a0_sysc_base + SYSCSR, val,
- (val & SYSCSR_BUSY) == SYSCSR_BUSY,
- SYSCSR_DELAY_US, SYSCSR_TIMEOUT);
- if (ret < 0)
- return -EAGAIN;
-
- /* Submit power shutoff or power resume request */
- iowrite32(PWRON_PWROFF, r8a779a0_sysc_base + reg_offs);
-
- return 0;
-}
-
-static int clear_irq_flags(unsigned int reg_idx, unsigned int isr_mask)
-{
- u32 val;
- int ret;
-
- iowrite32(isr_mask, r8a779a0_sysc_base + SYSCISCR(reg_idx));
-
- ret = readl_poll_timeout_atomic(r8a779a0_sysc_base + SYSCISCR(reg_idx),
- val, !(val & isr_mask),
- SYSCISR_DELAY_US, SYSCISR_TIMEOUT);
- if (ret < 0) {
- pr_err("\n %s : Can not clear IRQ flags in SYSCISCR", __func__);
- return -EIO;
- }
-
- return 0;
-}
-
-static int r8a779a0_sysc_power(u8 pdr, bool on)
-{
- unsigned int isr_mask;
- unsigned int reg_idx, bit_idx;
- unsigned int status;
- unsigned long flags;
- int ret = 0;
- u32 val;
- int k;
-
- spin_lock_irqsave(&r8a779a0_sysc_lock, flags);
-
- reg_idx = pdr / NUM_DOMAINS_EACH_REG;
- bit_idx = pdr % NUM_DOMAINS_EACH_REG;
-
- isr_mask = BIT(bit_idx);
-
- /*
- * The interrupt source needs to be enabled, but masked, to prevent the
- * CPU from receiving it.
- */
- iowrite32(ioread32(r8a779a0_sysc_base + SYSCIER(reg_idx)) | isr_mask,
- r8a779a0_sysc_base + SYSCIER(reg_idx));
- iowrite32(ioread32(r8a779a0_sysc_base + SYSCIMR(reg_idx)) | isr_mask,
- r8a779a0_sysc_base + SYSCIMR(reg_idx));
-
- ret = clear_irq_flags(reg_idx, isr_mask);
- if (ret)
- goto out;
-
- /* Submit power shutoff or resume request until it was accepted */
- for (k = 0; k < PDRESR_RETRIES; k++) {
- ret = r8a779a0_sysc_pwr_on_off(pdr, on);
- if (ret)
- goto out;
-
- status = ioread32(r8a779a0_sysc_base + PDRESR(pdr));
- if (!(status & PDRESR_ERR))
- break;
-
- udelay(PDRESR_DELAY_US);
- }
-
- if (k == PDRESR_RETRIES) {
- ret = -EIO;
- goto out;
- }
-
- /* Wait until the power shutoff or resume request has completed * */
- ret = readl_poll_timeout_atomic(r8a779a0_sysc_base + SYSCISCR(reg_idx),
- val, (val & isr_mask),
- SYSCISR_DELAY_US, SYSCISR_TIMEOUT);
- if (ret < 0) {
- ret = -EIO;
- goto out;
- }
-
- /* Clear interrupt flags */
- ret = clear_irq_flags(reg_idx, isr_mask);
- if (ret)
- goto out;
-
- out:
- spin_unlock_irqrestore(&r8a779a0_sysc_lock, flags);
-
- pr_debug("sysc power %s domain %d: %08x -> %d\n", on ? "on" : "off",
- pdr, ioread32(r8a779a0_sysc_base + SYSCISCR(reg_idx)), ret);
- return ret;
-}
-
-static bool r8a779a0_sysc_power_is_off(u8 pdr)
-{
- unsigned int st;
-
- st = ioread32(r8a779a0_sysc_base + PDRSR(pdr));
-
- if (st & PDRSR_OFF)
- return true;
-
- return false;
-}
-
-struct r8a779a0_sysc_pd {
- struct generic_pm_domain genpd;
- u8 pdr;
- unsigned int flags;
- char name[];
-};
-
-static inline struct r8a779a0_sysc_pd *to_r8a779a0_pd(struct generic_pm_domain *d)
-{
- return container_of(d, struct r8a779a0_sysc_pd, genpd);
-}
-
-static int r8a779a0_sysc_pd_power_off(struct generic_pm_domain *genpd)
-{
- struct r8a779a0_sysc_pd *pd = to_r8a779a0_pd(genpd);
-
- pr_debug("%s: %s\n", __func__, genpd->name);
- return r8a779a0_sysc_power(pd->pdr, false);
-}
-
-static int r8a779a0_sysc_pd_power_on(struct generic_pm_domain *genpd)
-{
- struct r8a779a0_sysc_pd *pd = to_r8a779a0_pd(genpd);
-
- pr_debug("%s: %s\n", __func__, genpd->name);
- return r8a779a0_sysc_power(pd->pdr, true);
-}
-
-static int __init r8a779a0_sysc_pd_setup(struct r8a779a0_sysc_pd *pd)
-{
- struct generic_pm_domain *genpd = &pd->genpd;
- const char *name = pd->genpd.name;
- int error;
-
- if (pd->flags & PD_CPU) {
- /*
- * This domain contains a CPU core and therefore it should
- * only be turned off if the CPU is not in use.
- */
- pr_debug("PM domain %s contains %s\n", name, "CPU");
- genpd->flags |= GENPD_FLAG_ALWAYS_ON;
- } else if (pd->flags & PD_SCU) {
- /*
- * This domain contains an SCU and cache-controller, and
- * therefore it should only be turned off if the CPU cores are
- * not in use.
- */
- pr_debug("PM domain %s contains %s\n", name, "SCU");
- genpd->flags |= GENPD_FLAG_ALWAYS_ON;
- } else if (pd->flags & PD_NO_CR) {
- /*
- * This domain cannot be turned off.
- */
- genpd->flags |= GENPD_FLAG_ALWAYS_ON;
- }
-
- if (!(pd->flags & (PD_CPU | PD_SCU))) {
- /* Enable Clock Domain for I/O devices */
- genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
- genpd->attach_dev = cpg_mssr_attach_dev;
- genpd->detach_dev = cpg_mssr_detach_dev;
- }
-
- genpd->power_off = r8a779a0_sysc_pd_power_off;
- genpd->power_on = r8a779a0_sysc_pd_power_on;
-
- if (pd->flags & (PD_CPU | PD_NO_CR)) {
- /* Skip CPUs (handled by SMP code) and areas without control */
- pr_debug("%s: Not touching %s\n", __func__, genpd->name);
- goto finalize;
- }
-
- if (!r8a779a0_sysc_power_is_off(pd->pdr)) {
- pr_debug("%s: %s is already powered\n", __func__, genpd->name);
- goto finalize;
- }
-
- r8a779a0_sysc_power(pd->pdr, true);
-
-finalize:
- error = pm_genpd_init(genpd, &simple_qos_governor, false);
- if (error)
- pr_err("Failed to init PM domain %s: %d\n", name, error);
-
- return error;
-}
-
-static const struct of_device_id r8a779a0_sysc_matches[] __initconst = {
- { .compatible = "renesas,r8a779a0-sysc", .data = &r8a779a0_sysc_info },
- { /* sentinel */ }
-};
-
-struct r8a779a0_pm_domains {
- struct genpd_onecell_data onecell_data;
- struct generic_pm_domain *domains[R8A779A0_PD_ALWAYS_ON + 1];
-};
-
-static struct genpd_onecell_data *r8a779a0_sysc_onecell_data;
-
-static int __init r8a779a0_sysc_pd_init(void)
-{
- const struct r8a779a0_sysc_info *info;
- const struct of_device_id *match;
- struct r8a779a0_pm_domains *domains;
- struct device_node *np;
- void __iomem *base;
- unsigned int i;
- int error;
-
- np = of_find_matching_node_and_match(NULL, r8a779a0_sysc_matches, &match);
- if (!np)
- return -ENODEV;
-
- info = match->data;
-
- base = of_iomap(np, 0);
- if (!base) {
- pr_warn("%pOF: Cannot map regs\n", np);
- error = -ENOMEM;
- goto out_put;
- }
-
- r8a779a0_sysc_base = base;
-
- domains = kzalloc(sizeof(*domains), GFP_KERNEL);
- if (!domains) {
- error = -ENOMEM;
- goto out_put;
- }
-
- domains->onecell_data.domains = domains->domains;
- domains->onecell_data.num_domains = ARRAY_SIZE(domains->domains);
- r8a779a0_sysc_onecell_data = &domains->onecell_data;
-
- for (i = 0; i < info->num_areas; i++) {
- const struct r8a779a0_sysc_area *area = &info->areas[i];
- struct r8a779a0_sysc_pd *pd;
- size_t n;
-
- if (!area->name) {
- /* Skip NULLified area */
- continue;
- }
-
- n = strlen(area->name) + 1;
- pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL);
- if (!pd) {
- error = -ENOMEM;
- goto out_put;
- }
-
- memcpy(pd->name, area->name, n);
- pd->genpd.name = pd->name;
- pd->pdr = area->pdr;
- pd->flags = area->flags;
-
- error = r8a779a0_sysc_pd_setup(pd);
- if (error)
- goto out_put;
-
- domains->domains[area->pdr] = &pd->genpd;
-
- if (area->parent < 0)
- continue;
-
- error = pm_genpd_add_subdomain(domains->domains[area->parent],
- &pd->genpd);
- if (error) {
- pr_warn("Failed to add PM subdomain %s to parent %u\n",
- area->name, area->parent);
- goto out_put;
- }
- }
-
- error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
-
-out_put:
- of_node_put(np);
- return error;
-}
-early_initcall(r8a779a0_sysc_pd_init);
diff --git a/drivers/soc/renesas/r8a779f0-sysc.c b/drivers/soc/renesas/r8a779f0-sysc.c
new file mode 100644
index 000000000000..5602aa6bd7ed
--- /dev/null
+++ b/drivers/soc/renesas/r8a779f0-sysc.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas R-Car S4-8 System Controller
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/clk/renesas.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/of_address.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <dt-bindings/power/r8a779f0-sysc.h>
+
+#include "rcar-gen4-sysc.h"
+
+static struct rcar_gen4_sysc_area r8a779f0_areas[] __initdata = {
+ { "always-on", R8A779F0_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "a3e0", R8A779F0_PD_A3E0, R8A779F0_PD_ALWAYS_ON, PD_SCU },
+ { "a3e1", R8A779F0_PD_A3E1, R8A779F0_PD_ALWAYS_ON, PD_SCU },
+ { "a2e0d0", R8A779F0_PD_A2E0D0, R8A779F0_PD_A3E0, PD_SCU },
+ { "a2e0d1", R8A779F0_PD_A2E0D1, R8A779F0_PD_A3E0, PD_SCU },
+ { "a2e1d0", R8A779F0_PD_A2E1D0, R8A779F0_PD_A3E1, PD_SCU },
+ { "a2e1d1", R8A779F0_PD_A2E1D1, R8A779F0_PD_A3E1, PD_SCU },
+ { "a1e0d0c0", R8A779F0_PD_A1E0D0C0, R8A779F0_PD_A2E0D0, PD_CPU_NOCR },
+ { "a1e0d0c1", R8A779F0_PD_A1E0D0C1, R8A779F0_PD_A2E0D0, PD_CPU_NOCR },
+ { "a1e0d1c0", R8A779F0_PD_A1E0D1C0, R8A779F0_PD_A2E0D1, PD_CPU_NOCR },
+ { "a1e0d1c1", R8A779F0_PD_A1E0D1C1, R8A779F0_PD_A2E0D1, PD_CPU_NOCR },
+ { "a1e1d0c0", R8A779F0_PD_A1E1D0C0, R8A779F0_PD_A2E1D0, PD_CPU_NOCR },
+ { "a1e1d0c1", R8A779F0_PD_A1E1D0C1, R8A779F0_PD_A2E1D0, PD_CPU_NOCR },
+ { "a1e1d1c0", R8A779F0_PD_A1E1D1C0, R8A779F0_PD_A2E1D1, PD_CPU_NOCR },
+ { "a1e1d1c1", R8A779F0_PD_A1E1D1C1, R8A779F0_PD_A2E1D1, PD_CPU_NOCR },
+};
+
+const struct rcar_gen4_sysc_info r8a779f0_sysc_info __initconst = {
+ .areas = r8a779f0_areas,
+ .num_areas = ARRAY_SIZE(r8a779f0_areas),
+};
diff --git a/drivers/soc/renesas/rcar-gen4-sysc.c b/drivers/soc/renesas/rcar-gen4-sysc.c
new file mode 100644
index 000000000000..831162a57f9a
--- /dev/null
+++ b/drivers/soc/renesas/rcar-gen4-sysc.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R-Car Gen4 SYSC Power management support
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/clk/renesas.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/of_address.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "rcar-gen4-sysc.h"
+
+/* SYSC Common */
+#define SYSCSR 0x000 /* SYSC Status Register */
+#define SYSCPONSR(x) (0x800 + ((x) * 0x4)) /* Power-ON Status Register 0 */
+#define SYSCPOFFSR(x) (0x808 + ((x) * 0x4)) /* Power-OFF Status Register */
+#define SYSCISCR(x) (0x810 + ((x) * 0x4)) /* Interrupt Status/Clear Register */
+#define SYSCIER(x) (0x820 + ((x) * 0x4)) /* Interrupt Enable Register */
+#define SYSCIMR(x) (0x830 + ((x) * 0x4)) /* Interrupt Mask Register */
+
+/* Power Domain Registers */
+#define PDRSR(n) (0x1000 + ((n) * 0x40))
+#define PDRONCR(n) (0x1004 + ((n) * 0x40))
+#define PDROFFCR(n) (0x1008 + ((n) * 0x40))
+#define PDRESR(n) (0x100C + ((n) * 0x40))
+
+/* PWRON/PWROFF */
+#define PWRON_PWROFF BIT(0) /* Power-ON/OFF request */
+
+/* PDRESR */
+#define PDRESR_ERR BIT(0)
+
+/* PDRSR */
+#define PDRSR_OFF BIT(0) /* Power-OFF state */
+#define PDRSR_ON BIT(4) /* Power-ON state */
+#define PDRSR_OFF_STATE BIT(8) /* Processing Power-OFF sequence */
+#define PDRSR_ON_STATE BIT(12) /* Processing Power-ON sequence */
+
+#define SYSCSR_BUSY GENMASK(1, 0) /* All bit sets is not busy */
+
+#define SYSCSR_TIMEOUT 10000
+#define SYSCSR_DELAY_US 10
+
+#define PDRESR_RETRIES 1000
+#define PDRESR_DELAY_US 10
+
+#define SYSCISR_TIMEOUT 10000
+#define SYSCISR_DELAY_US 10
+
+#define RCAR_GEN4_PD_ALWAYS_ON 64
+#define NUM_DOMAINS_EACH_REG BITS_PER_TYPE(u32)
+
+static void __iomem *rcar_gen4_sysc_base;
+static DEFINE_SPINLOCK(rcar_gen4_sysc_lock); /* SMP CPUs + I/O devices */
+
+static int rcar_gen4_sysc_pwr_on_off(u8 pdr, bool on)
+{
+ unsigned int reg_offs;
+ u32 val;
+ int ret;
+
+ if (on)
+ reg_offs = PDRONCR(pdr);
+ else
+ reg_offs = PDROFFCR(pdr);
+
+ /* Wait until SYSC is ready to accept a power request */
+ ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCSR, val,
+ (val & SYSCSR_BUSY) == SYSCSR_BUSY,
+ SYSCSR_DELAY_US, SYSCSR_TIMEOUT);
+ if (ret < 0)
+ return -EAGAIN;
+
+ /* Submit power shutoff or power resume request */
+ iowrite32(PWRON_PWROFF, rcar_gen4_sysc_base + reg_offs);
+
+ return 0;
+}
+
+static int clear_irq_flags(unsigned int reg_idx, unsigned int isr_mask)
+{
+ u32 val;
+ int ret;
+
+ iowrite32(isr_mask, rcar_gen4_sysc_base + SYSCISCR(reg_idx));
+
+ ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCISCR(reg_idx),
+ val, !(val & isr_mask),
+ SYSCISR_DELAY_US, SYSCISR_TIMEOUT);
+ if (ret < 0) {
+ pr_err("\n %s : Can not clear IRQ flags in SYSCISCR", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int rcar_gen4_sysc_power(u8 pdr, bool on)
+{
+ unsigned int isr_mask;
+ unsigned int reg_idx, bit_idx;
+ unsigned int status;
+ unsigned long flags;
+ int ret = 0;
+ u32 val;
+ int k;
+
+ spin_lock_irqsave(&rcar_gen4_sysc_lock, flags);
+
+ reg_idx = pdr / NUM_DOMAINS_EACH_REG;
+ bit_idx = pdr % NUM_DOMAINS_EACH_REG;
+
+ isr_mask = BIT(bit_idx);
+
+ /*
+ * The interrupt source needs to be enabled, but masked, to prevent the
+ * CPU from receiving it.
+ */
+ iowrite32(ioread32(rcar_gen4_sysc_base + SYSCIER(reg_idx)) | isr_mask,
+ rcar_gen4_sysc_base + SYSCIER(reg_idx));
+ iowrite32(ioread32(rcar_gen4_sysc_base + SYSCIMR(reg_idx)) | isr_mask,
+ rcar_gen4_sysc_base + SYSCIMR(reg_idx));
+
+ ret = clear_irq_flags(reg_idx, isr_mask);
+ if (ret)
+ goto out;
+
+ /* Submit power shutoff or resume request until it was accepted */
+ for (k = 0; k < PDRESR_RETRIES; k++) {
+ ret = rcar_gen4_sysc_pwr_on_off(pdr, on);
+ if (ret)
+ goto out;
+
+ status = ioread32(rcar_gen4_sysc_base + PDRESR(pdr));
+ if (!(status & PDRESR_ERR))
+ break;
+
+ udelay(PDRESR_DELAY_US);
+ }
+
+ if (k == PDRESR_RETRIES) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Wait until the power shutoff or resume request has completed * */
+ ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCISCR(reg_idx),
+ val, (val & isr_mask),
+ SYSCISR_DELAY_US, SYSCISR_TIMEOUT);
+ if (ret < 0) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Clear interrupt flags */
+ ret = clear_irq_flags(reg_idx, isr_mask);
+ if (ret)
+ goto out;
+
+ out:
+ spin_unlock_irqrestore(&rcar_gen4_sysc_lock, flags);
+
+ pr_debug("sysc power %s domain %d: %08x -> %d\n", on ? "on" : "off",
+ pdr, ioread32(rcar_gen4_sysc_base + SYSCISCR(reg_idx)), ret);
+ return ret;
+}
+
+static bool rcar_gen4_sysc_power_is_off(u8 pdr)
+{
+ unsigned int st;
+
+ st = ioread32(rcar_gen4_sysc_base + PDRSR(pdr));
+
+ if (st & PDRSR_OFF)
+ return true;
+
+ return false;
+}
+
+struct rcar_gen4_sysc_pd {
+ struct generic_pm_domain genpd;
+ u8 pdr;
+ unsigned int flags;
+ char name[];
+};
+
+static inline struct rcar_gen4_sysc_pd *to_rcar_gen4_pd(struct generic_pm_domain *d)
+{
+ return container_of(d, struct rcar_gen4_sysc_pd, genpd);
+}
+
+static int rcar_gen4_sysc_pd_power_off(struct generic_pm_domain *genpd)
+{
+ struct rcar_gen4_sysc_pd *pd = to_rcar_gen4_pd(genpd);
+
+ pr_debug("%s: %s\n", __func__, genpd->name);
+ return rcar_gen4_sysc_power(pd->pdr, false);
+}
+
+static int rcar_gen4_sysc_pd_power_on(struct generic_pm_domain *genpd)
+{
+ struct rcar_gen4_sysc_pd *pd = to_rcar_gen4_pd(genpd);
+
+ pr_debug("%s: %s\n", __func__, genpd->name);
+ return rcar_gen4_sysc_power(pd->pdr, true);
+}
+
+static int __init rcar_gen4_sysc_pd_setup(struct rcar_gen4_sysc_pd *pd)
+{
+ struct generic_pm_domain *genpd = &pd->genpd;
+ const char *name = pd->genpd.name;
+ int error;
+
+ if (pd->flags & PD_CPU) {
+ /*
+ * This domain contains a CPU core and therefore it should
+ * only be turned off if the CPU is not in use.
+ */
+ pr_debug("PM domain %s contains %s\n", name, "CPU");
+ genpd->flags |= GENPD_FLAG_ALWAYS_ON;
+ } else if (pd->flags & PD_SCU) {
+ /*
+ * This domain contains an SCU and cache-controller, and
+ * therefore it should only be turned off if the CPU cores are
+ * not in use.
+ */
+ pr_debug("PM domain %s contains %s\n", name, "SCU");
+ genpd->flags |= GENPD_FLAG_ALWAYS_ON;
+ } else if (pd->flags & PD_NO_CR) {
+ /*
+ * This domain cannot be turned off.
+ */
+ genpd->flags |= GENPD_FLAG_ALWAYS_ON;
+ }
+
+ if (!(pd->flags & (PD_CPU | PD_SCU))) {
+ /* Enable Clock Domain for I/O devices */
+ genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
+ genpd->attach_dev = cpg_mssr_attach_dev;
+ genpd->detach_dev = cpg_mssr_detach_dev;
+ }
+
+ genpd->power_off = rcar_gen4_sysc_pd_power_off;
+ genpd->power_on = rcar_gen4_sysc_pd_power_on;
+
+ if (pd->flags & (PD_CPU | PD_NO_CR)) {
+ /* Skip CPUs (handled by SMP code) and areas without control */
+ pr_debug("%s: Not touching %s\n", __func__, genpd->name);
+ goto finalize;
+ }
+
+ if (!rcar_gen4_sysc_power_is_off(pd->pdr)) {
+ pr_debug("%s: %s is already powered\n", __func__, genpd->name);
+ goto finalize;
+ }
+
+ rcar_gen4_sysc_power(pd->pdr, true);
+
+finalize:
+ error = pm_genpd_init(genpd, &simple_qos_governor, false);
+ if (error)
+ pr_err("Failed to init PM domain %s: %d\n", name, error);
+
+ return error;
+}
+
+static const struct of_device_id rcar_gen4_sysc_matches[] __initconst = {
+#ifdef CONFIG_SYSC_R8A779A0
+ { .compatible = "renesas,r8a779a0-sysc", .data = &r8a779a0_sysc_info },
+#endif
+#ifdef CONFIG_SYSC_R8A779F0
+ { .compatible = "renesas,r8a779f0-sysc", .data = &r8a779f0_sysc_info },
+#endif
+ { /* sentinel */ }
+};
+
+struct rcar_gen4_pm_domains {
+ struct genpd_onecell_data onecell_data;
+ struct generic_pm_domain *domains[RCAR_GEN4_PD_ALWAYS_ON + 1];
+};
+
+static struct genpd_onecell_data *rcar_gen4_sysc_onecell_data;
+
+static int __init rcar_gen4_sysc_pd_init(void)
+{
+ const struct rcar_gen4_sysc_info *info;
+ const struct of_device_id *match;
+ struct rcar_gen4_pm_domains *domains;
+ struct device_node *np;
+ void __iomem *base;
+ unsigned int i;
+ int error;
+
+ np = of_find_matching_node_and_match(NULL, rcar_gen4_sysc_matches, &match);
+ if (!np)
+ return -ENODEV;
+
+ info = match->data;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_warn("%pOF: Cannot map regs\n", np);
+ error = -ENOMEM;
+ goto out_put;
+ }
+
+ rcar_gen4_sysc_base = base;
+
+ domains = kzalloc(sizeof(*domains), GFP_KERNEL);
+ if (!domains) {
+ error = -ENOMEM;
+ goto out_put;
+ }
+
+ domains->onecell_data.domains = domains->domains;
+ domains->onecell_data.num_domains = ARRAY_SIZE(domains->domains);
+ rcar_gen4_sysc_onecell_data = &domains->onecell_data;
+
+ for (i = 0; i < info->num_areas; i++) {
+ const struct rcar_gen4_sysc_area *area = &info->areas[i];
+ struct rcar_gen4_sysc_pd *pd;
+ size_t n;
+
+ if (!area->name) {
+ /* Skip NULLified area */
+ continue;
+ }
+
+ n = strlen(area->name) + 1;
+ pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL);
+ if (!pd) {
+ error = -ENOMEM;
+ goto out_put;
+ }
+
+ memcpy(pd->name, area->name, n);
+ pd->genpd.name = pd->name;
+ pd->pdr = area->pdr;
+ pd->flags = area->flags;
+
+ error = rcar_gen4_sysc_pd_setup(pd);
+ if (error)
+ goto out_put;
+
+ domains->domains[area->pdr] = &pd->genpd;
+
+ if (area->parent < 0)
+ continue;
+
+ error = pm_genpd_add_subdomain(domains->domains[area->parent],
+ &pd->genpd);
+ if (error) {
+ pr_warn("Failed to add PM subdomain %s to parent %u\n",
+ area->name, area->parent);
+ goto out_put;
+ }
+ }
+
+ error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
+
+out_put:
+ of_node_put(np);
+ return error;
+}
+early_initcall(rcar_gen4_sysc_pd_init);
diff --git a/drivers/soc/renesas/rcar-gen4-sysc.h b/drivers/soc/renesas/rcar-gen4-sysc.h
new file mode 100644
index 000000000000..0e0bd102b1f9
--- /dev/null
+++ b/drivers/soc/renesas/rcar-gen4-sysc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * R-Car Gen4 System Controller
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ */
+#ifndef __SOC_RENESAS_RCAR_GEN4_SYSC_H__
+#define __SOC_RENESAS_RCAR_GEN4_SYSC_H__
+
+#include <linux/types.h>
+
+/*
+ * Power Domain flags
+ */
+#define PD_CPU BIT(0) /* Area contains main CPU core */
+#define PD_SCU BIT(1) /* Area contains SCU and L2 cache */
+#define PD_NO_CR BIT(2) /* Area lacks PWR{ON,OFF}CR registers */
+
+#define PD_CPU_NOCR (PD_CPU | PD_NO_CR) /* CPU area lacks CR */
+#define PD_ALWAYS_ON PD_NO_CR /* Always-on area */
+
+/*
+ * Description of a Power Area
+ */
+struct rcar_gen4_sysc_area {
+ const char *name;
+ u8 pdr; /* PDRn */
+ int parent; /* -1 if none */
+ unsigned int flags; /* See PD_* */
+};
+
+/*
+ * SoC-specific Power Area Description
+ */
+struct rcar_gen4_sysc_info {
+ const struct rcar_gen4_sysc_area *areas;
+ unsigned int num_areas;
+};
+
+extern const struct rcar_gen4_sysc_info r8a779a0_sysc_info;
+extern const struct rcar_gen4_sysc_info r8a779f0_sysc_info;
+
+#endif /* __SOC_RENESAS_RCAR_GEN4_SYSC_H__ */
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
index 8a1e402ea799..4d293eb2d8f3 100644
--- a/drivers/soc/renesas/rcar-rst.c
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -13,15 +13,43 @@
#define WDTRSTCR_RESET 0xA55A0002
#define WDTRSTCR 0x0054
+#define CR7BAR 0x0070
+#define CR7BAREN BIT(4)
+#define CR7BAR_MASK 0xFFFC0000
+
+static void __iomem *rcar_rst_base;
+static u32 saved_mode __initdata;
+static int (*rcar_rst_set_rproc_boot_addr_func)(u64 boot_addr);
+
static int rcar_rst_enable_wdt_reset(void __iomem *base)
{
iowrite32(WDTRSTCR_RESET, base + WDTRSTCR);
return 0;
}
+/*
+ * Most of the R-Car Gen3 SoCs have an ARM Realtime Core.
+ * Firmware boot address has to be set in CR7BAR before
+ * starting the realtime core.
+ * Boot address must be aligned on a 256k boundary.
+ */
+static int rcar_rst_set_gen3_rproc_boot_addr(u64 boot_addr)
+{
+ if (boot_addr & ~(u64)CR7BAR_MASK) {
+ pr_err("Invalid boot address got %llx\n", boot_addr);
+ return -EINVAL;
+ }
+
+ iowrite32(boot_addr, rcar_rst_base + CR7BAR);
+ iowrite32(boot_addr | CR7BAREN, rcar_rst_base + CR7BAR);
+
+ return 0;
+}
+
struct rst_config {
unsigned int modemr; /* Mode Monitoring Register Offset */
int (*configure)(void __iomem *base); /* Platform specific config */
+ int (*set_rproc_boot_addr)(u64 boot_addr);
};
static const struct rst_config rcar_rst_gen1 __initconst = {
@@ -35,9 +63,10 @@ static const struct rst_config rcar_rst_gen2 __initconst = {
static const struct rst_config rcar_rst_gen3 __initconst = {
.modemr = 0x60,
+ .set_rproc_boot_addr = rcar_rst_set_gen3_rproc_boot_addr,
};
-static const struct rst_config rcar_rst_r8a779a0 __initconst = {
+static const struct rst_config rcar_rst_gen4 __initconst = {
.modemr = 0x00, /* MODEMR0 and it has CPG related bits */
};
@@ -71,14 +100,12 @@ static const struct of_device_id rcar_rst_matches[] __initconst = {
{ .compatible = "renesas,r8a77980-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a77990-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a77995-rst", .data = &rcar_rst_gen3 },
- /* R-Car V3U */
- { .compatible = "renesas,r8a779a0-rst", .data = &rcar_rst_r8a779a0 },
+ /* R-Car Gen4 */
+ { .compatible = "renesas,r8a779a0-rst", .data = &rcar_rst_gen4 },
+ { .compatible = "renesas,r8a779f0-rst", .data = &rcar_rst_gen4 },
{ /* sentinel */ }
};
-static void __iomem *rcar_rst_base __initdata;
-static u32 saved_mode __initdata;
-
static int __init rcar_rst_init(void)
{
const struct of_device_id *match;
@@ -100,6 +127,8 @@ static int __init rcar_rst_init(void)
rcar_rst_base = base;
cfg = match->data;
+ rcar_rst_set_rproc_boot_addr_func = cfg->set_rproc_boot_addr;
+
saved_mode = ioread32(base + cfg->modemr);
if (cfg->configure) {
error = cfg->configure(base);
@@ -130,3 +159,12 @@ int __init rcar_rst_read_mode_pins(u32 *mode)
*mode = saved_mode;
return 0;
}
+
+int rcar_rst_set_rproc_boot_addr(u64 boot_addr)
+{
+ if (!rcar_rst_set_rproc_boot_addr_func)
+ return -EIO;
+
+ return rcar_rst_set_rproc_boot_addr_func(boot_addr);
+}
+EXPORT_SYMBOL_GPL(rcar_rst_set_rproc_boot_addr);
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 7961b0be1850..62540ffc581a 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -33,6 +33,10 @@ static const struct renesas_family fam_rcar_gen3 __initconst __maybe_unused = {
.reg = 0xfff00044, /* PRR (Product Register) */
};
+static const struct renesas_family fam_rcar_gen4 __initconst __maybe_unused = {
+ .name = "R-Car Gen4",
+};
+
static const struct renesas_family fam_rmobile __initconst __maybe_unused = {
.name = "R-Mobile",
.reg = 0xe600101c, /* CCCR (Common Chip Code Register) */
@@ -214,6 +218,11 @@ static const struct renesas_soc soc_rcar_v3u __initconst __maybe_unused = {
.id = 0x59,
};
+static const struct renesas_soc soc_rcar_s4 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen4,
+ .id = 0x5a,
+};
+
static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = {
.family = &fam_shmobile,
.id = 0x37,
@@ -319,6 +328,9 @@ static const struct of_device_id renesas_socs[] __initconst = {
#ifdef CONFIG_ARCH_R8A779A0
{ .compatible = "renesas,r8a779a0", .data = &soc_rcar_v3u },
#endif
+#ifdef CONFIG_ARCH_R8A779F0
+ { .compatible = "renesas,r8a779f0", .data = &soc_rcar_s4 },
+#endif
#if defined(CONFIG_ARCH_R9A07G044)
{ .compatible = "renesas,r9a07g044", .data = &soc_rz_g2l },
#endif
@@ -328,94 +340,92 @@ static const struct of_device_id renesas_socs[] __initconst = {
{ /* sentinel */ }
};
+struct renesas_id {
+ unsigned int offset;
+ u32 mask;
+};
+
+static const struct renesas_id id_bsid __initconst = {
+ .offset = 0,
+ .mask = 0xff0000,
+ /*
+ * TODO: Upper 4 bits of BSID are for chip version, but the format is
+ * not known at this time so we don't know how to specify eshi and eslo
+ */
+};
+
+static const struct renesas_id id_rzg2l __initconst = {
+ .offset = 0xa04,
+ .mask = 0xfffffff,
+};
+
+static const struct renesas_id id_prr __initconst = {
+ .offset = 0,
+ .mask = 0xff00,
+};
+
+static const struct of_device_id renesas_ids[] __initconst = {
+ { .compatible = "renesas,bsid", .data = &id_bsid },
+ { .compatible = "renesas,r9a07g044-sysc", .data = &id_rzg2l },
+ { .compatible = "renesas,prr", .data = &id_prr },
+ { /* sentinel */ }
+};
+
static int __init renesas_soc_init(void)
{
struct soc_device_attribute *soc_dev_attr;
+ unsigned int product, eshi = 0, eslo;
const struct renesas_family *family;
const struct of_device_id *match;
const struct renesas_soc *soc;
+ const struct renesas_id *id;
void __iomem *chipid = NULL;
struct soc_device *soc_dev;
struct device_node *np;
- unsigned int product, eshi = 0, eslo;
+ const char *soc_id;
match = of_match_node(renesas_socs, of_root);
if (!match)
return -ENODEV;
+ soc_id = strchr(match->compatible, ',') + 1;
soc = match->data;
family = soc->family;
- np = of_find_compatible_node(NULL, NULL, "renesas,bsid");
+ np = of_find_matching_node_and_match(NULL, renesas_ids, &match);
if (np) {
+ id = match->data;
chipid = of_iomap(np, 0);
of_node_put(np);
-
- if (chipid) {
- product = readl(chipid);
- iounmap(chipid);
-
- if (soc->id && ((product >> 16) & 0xff) != soc->id) {
- pr_warn("SoC mismatch (product = 0x%x)\n",
- product);
- return -ENODEV;
- }
- }
-
- /*
- * TODO: Upper 4 bits of BSID are for chip version, but the
- * format is not known at this time so we don't know how to
- * specify eshi and eslo
- */
-
- goto done;
+ } else if (soc->id && family->reg) {
+ /* Try hardcoded CCCR/PRR fallback */
+ id = &id_prr;
+ chipid = ioremap(family->reg, 4);
}
- np = of_find_compatible_node(NULL, NULL, "renesas,r9a07g044-sysc");
- if (np) {
- chipid = of_iomap(np, 0);
- of_node_put(np);
+ if (chipid) {
+ product = readl(chipid + id->offset);
+ iounmap(chipid);
- if (chipid) {
- product = readl(chipid + 0x0a04);
- iounmap(chipid);
+ if (id == &id_prr) {
+ /* R-Car M3-W ES1.1 incorrectly identifies as ES2.0 */
+ if ((product & 0x7fff) == 0x5210)
+ product ^= 0x11;
+ /* R-Car M3-W ES1.3 incorrectly identifies as ES2.1 */
+ if ((product & 0x7fff) == 0x5211)
+ product ^= 0x12;
- if (soc->id && (product & 0xfffffff) != soc->id) {
- pr_warn("SoC mismatch (product = 0x%x)\n",
- product);
- return -ENODEV;
- }
+ eshi = ((product >> 4) & 0x0f) + 1;
+ eslo = product & 0xf;
}
- goto done;
- }
-
- /* Try PRR first, then hardcoded fallback */
- np = of_find_compatible_node(NULL, NULL, "renesas,prr");
- if (np) {
- chipid = of_iomap(np, 0);
- of_node_put(np);
- } else if (soc->id && family->reg) {
- chipid = ioremap(family->reg, 4);
- }
- if (chipid) {
- product = readl(chipid);
- iounmap(chipid);
- /* R-Car M3-W ES1.1 incorrectly identifies as ES2.0 */
- if ((product & 0x7fff) == 0x5210)
- product ^= 0x11;
- /* R-Car M3-W ES1.3 incorrectly identifies as ES2.1 */
- if ((product & 0x7fff) == 0x5211)
- product ^= 0x12;
- if (soc->id && ((product >> 8) & 0xff) != soc->id) {
+ if (soc->id &&
+ ((product & id->mask) >> __ffs(id->mask)) != soc->id) {
pr_warn("SoC mismatch (product = 0x%x)\n", product);
return -ENODEV;
}
- eshi = ((product >> 4) & 0x0f) + 1;
- eslo = product & 0xf;
}
-done:
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
@@ -425,8 +435,7 @@ done:
of_node_put(np);
soc_dev_attr->family = kstrdup_const(family->name, GFP_KERNEL);
- soc_dev_attr->soc_id = kstrdup_const(strchr(match->compatible, ',') + 1,
- GFP_KERNEL);
+ soc_dev_attr->soc_id = kstrdup_const(soc_id, GFP_KERNEL);
if (eshi)
soc_dev_attr->revision = kasprintf(GFP_KERNEL, "ES%u.%u", eshi,
eslo);
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index e2cedef1e8d1..a9f8b224322e 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -23,6 +23,20 @@ config EXYNOS_CHIPID
Support for Samsung Exynos SoC ChipID and Adaptive Supply Voltage.
This driver can also be built as module (exynos_chipid).
+config EXYNOS_USI
+ tristate "Exynos USI (Universal Serial Interface) driver"
+ default ARCH_EXYNOS && ARM64
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ Enable support for USI block. USI (Universal Serial Interface) is an
+ IP-core found in modern Samsung Exynos SoCs, like Exynos850 and
+ ExynosAutoV0. USI block can be configured to provide one of the
+ following serial protocols: UART, SPI or High Speed I2C.
+
+ This driver allows one to configure USI for desired protocol, which
+ is usually done in USI node in Device Tree.
+
config EXYNOS_PMU
bool "Exynos PMU controller driver" if COMPILE_TEST
depends on ARCH_EXYNOS || ((ARM || ARM64) && COMPILE_TEST)
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile
index 2ae4bea804cf..9f59d1905ab0 100644
--- a/drivers/soc/samsung/Makefile
+++ b/drivers/soc/samsung/Makefile
@@ -4,6 +4,8 @@ obj-$(CONFIG_EXYNOS_ASV_ARM) += exynos5422-asv.o
obj-$(CONFIG_EXYNOS_CHIPID) += exynos_chipid.o
exynos_chipid-y += exynos-chipid.o exynos-asv.o
+obj-$(CONFIG_EXYNOS_USI) += exynos-usi.o
+
obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o
obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
index a28053ec7e6a..2746d05936d3 100644
--- a/drivers/soc/samsung/exynos-chipid.c
+++ b/drivers/soc/samsung/exynos-chipid.c
@@ -42,6 +42,7 @@ static const struct exynos_soc_id {
unsigned int id;
} soc_ids[] = {
/* List ordered by SoC name */
+ /* Compatible with: samsung,exynos4210-chipid */
{ "EXYNOS3250", 0xE3472000 },
{ "EXYNOS4210", 0x43200000 }, /* EVT0 revision */
{ "EXYNOS4210", 0x43210000 },
@@ -55,6 +56,8 @@ static const struct exynos_soc_id {
{ "EXYNOS5440", 0xE5440000 },
{ "EXYNOS5800", 0xE5422000 },
{ "EXYNOS7420", 0xE7420000 },
+ /* Compatible with: samsung,exynos850-chipid */
+ { "EXYNOS7885", 0xE7885000 },
{ "EXYNOS850", 0xE3830000 },
{ "EXYNOSAUTOV9", 0xAAA80000 },
};
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index a18c93a4646c..732c86ce2be8 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -94,6 +94,8 @@ static const struct of_device_id exynos_pmu_of_device_ids[] = {
.compatible = "samsung,exynos5433-pmu",
}, {
.compatible = "samsung,exynos7-pmu",
+ }, {
+ .compatible = "samsung,exynos850-pmu",
},
{ /*sentinel*/ },
};
diff --git a/drivers/soc/samsung/exynos-usi.c b/drivers/soc/samsung/exynos-usi.c
new file mode 100644
index 000000000000..114352695ac2
--- /dev/null
+++ b/drivers/soc/samsung/exynos-usi.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 Linaro Ltd.
+ * Author: Sam Protsenko <semen.protsenko@linaro.org>
+ *
+ * Samsung Exynos USI driver (Universal Serial Interface).
+ */
+
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/soc/samsung,exynos-usi.h>
+
+/* USIv2: System Register: SW_CONF register bits */
+#define USI_V2_SW_CONF_NONE 0x0
+#define USI_V2_SW_CONF_UART BIT(0)
+#define USI_V2_SW_CONF_SPI BIT(1)
+#define USI_V2_SW_CONF_I2C BIT(2)
+#define USI_V2_SW_CONF_MASK (USI_V2_SW_CONF_UART | USI_V2_SW_CONF_SPI | \
+ USI_V2_SW_CONF_I2C)
+
+/* USIv2: USI register offsets */
+#define USI_CON 0x04
+#define USI_OPTION 0x08
+
+/* USIv2: USI register bits */
+#define USI_CON_RESET BIT(0)
+#define USI_OPTION_CLKREQ_ON BIT(1)
+#define USI_OPTION_CLKSTOP_ON BIT(2)
+
+enum exynos_usi_ver {
+ USI_VER2 = 2,
+};
+
+struct exynos_usi_variant {
+ enum exynos_usi_ver ver; /* USI IP-core version */
+ unsigned int sw_conf_mask; /* SW_CONF mask for all protocols */
+ size_t min_mode; /* first index in exynos_usi_modes[] */
+ size_t max_mode; /* last index in exynos_usi_modes[] */
+ size_t num_clks; /* number of clocks to assert */
+ const char * const *clk_names; /* clock names to assert */
+};
+
+struct exynos_usi {
+ struct device *dev;
+ void __iomem *regs; /* USI register map */
+ struct clk_bulk_data *clks; /* USI clocks */
+
+ size_t mode; /* current USI SW_CONF mode index */
+ bool clkreq_on; /* always provide clock to IP */
+
+ /* System Register */
+ struct regmap *sysreg; /* System Register map */
+ unsigned int sw_conf; /* SW_CONF register offset in sysreg */
+
+ const struct exynos_usi_variant *data;
+};
+
+struct exynos_usi_mode {
+ const char *name; /* mode name */
+ unsigned int val; /* mode register value */
+};
+
+static const struct exynos_usi_mode exynos_usi_modes[] = {
+ [USI_V2_NONE] = { .name = "none", .val = USI_V2_SW_CONF_NONE },
+ [USI_V2_UART] = { .name = "uart", .val = USI_V2_SW_CONF_UART },
+ [USI_V2_SPI] = { .name = "spi", .val = USI_V2_SW_CONF_SPI },
+ [USI_V2_I2C] = { .name = "i2c", .val = USI_V2_SW_CONF_I2C },
+};
+
+static const char * const exynos850_usi_clk_names[] = { "pclk", "ipclk" };
+static const struct exynos_usi_variant exynos850_usi_data = {
+ .ver = USI_VER2,
+ .sw_conf_mask = USI_V2_SW_CONF_MASK,
+ .min_mode = USI_V2_NONE,
+ .max_mode = USI_V2_I2C,
+ .num_clks = ARRAY_SIZE(exynos850_usi_clk_names),
+ .clk_names = exynos850_usi_clk_names,
+};
+
+static const struct of_device_id exynos_usi_dt_match[] = {
+ {
+ .compatible = "samsung,exynos850-usi",
+ .data = &exynos850_usi_data,
+ },
+ { } /* sentinel */
+};
+MODULE_DEVICE_TABLE(of, exynos_usi_dt_match);
+
+/**
+ * exynos_usi_set_sw_conf - Set USI block configuration mode
+ * @usi: USI driver object
+ * @mode: Mode index
+ *
+ * Select underlying serial protocol (UART/SPI/I2C) in USI IP-core.
+ *
+ * Return: 0 on success, or negative error code on failure.
+ */
+static int exynos_usi_set_sw_conf(struct exynos_usi *usi, size_t mode)
+{
+ unsigned int val;
+ int ret;
+
+ if (mode < usi->data->min_mode || mode > usi->data->max_mode)
+ return -EINVAL;
+
+ val = exynos_usi_modes[mode].val;
+ ret = regmap_update_bits(usi->sysreg, usi->sw_conf,
+ usi->data->sw_conf_mask, val);
+ if (ret)
+ return ret;
+
+ usi->mode = mode;
+ dev_dbg(usi->dev, "protocol: %s\n", exynos_usi_modes[usi->mode].name);
+
+ return 0;
+}
+
+/**
+ * exynos_usi_enable - Initialize USI block
+ * @usi: USI driver object
+ *
+ * USI IP-core start state is "reset" (on startup and after CPU resume). This
+ * routine enables the USI block by clearing the reset flag. It also configures
+ * HWACG behavior (needed e.g. for UART Rx). It should be performed before
+ * underlying protocol becomes functional.
+ *
+ * Return: 0 on success, or negative error code on failure.
+ */
+static int exynos_usi_enable(const struct exynos_usi *usi)
+{
+ u32 val;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(usi->data->num_clks, usi->clks);
+ if (ret)
+ return ret;
+
+ /* Enable USI block */
+ val = readl(usi->regs + USI_CON);
+ val &= ~USI_CON_RESET;
+ writel(val, usi->regs + USI_CON);
+ udelay(1);
+
+ /* Continuously provide the clock to USI IP w/o gating */
+ if (usi->clkreq_on) {
+ val = readl(usi->regs + USI_OPTION);
+ val &= ~USI_OPTION_CLKSTOP_ON;
+ val |= USI_OPTION_CLKREQ_ON;
+ writel(val, usi->regs + USI_OPTION);
+ }
+
+ clk_bulk_disable_unprepare(usi->data->num_clks, usi->clks);
+
+ return ret;
+}
+
+static int exynos_usi_configure(struct exynos_usi *usi)
+{
+ int ret;
+
+ ret = exynos_usi_set_sw_conf(usi, usi->mode);
+ if (ret)
+ return ret;
+
+ if (usi->data->ver == USI_VER2)
+ return exynos_usi_enable(usi);
+
+ return 0;
+}
+
+static int exynos_usi_parse_dt(struct device_node *np, struct exynos_usi *usi)
+{
+ int ret;
+ u32 mode;
+
+ ret = of_property_read_u32(np, "samsung,mode", &mode);
+ if (ret)
+ return ret;
+ if (mode < usi->data->min_mode || mode > usi->data->max_mode)
+ return -EINVAL;
+ usi->mode = mode;
+
+ usi->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg");
+ if (IS_ERR(usi->sysreg))
+ return PTR_ERR(usi->sysreg);
+
+ ret = of_property_read_u32_index(np, "samsung,sysreg", 1,
+ &usi->sw_conf);
+ if (ret)
+ return ret;
+
+ usi->clkreq_on = of_property_read_bool(np, "samsung,clkreq-on");
+
+ return 0;
+}
+
+static int exynos_usi_get_clocks(struct exynos_usi *usi)
+{
+ const size_t num = usi->data->num_clks;
+ struct device *dev = usi->dev;
+ size_t i;
+
+ if (num == 0)
+ return 0;
+
+ usi->clks = devm_kcalloc(dev, num, sizeof(*usi->clks), GFP_KERNEL);
+ if (!usi->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < num; ++i)
+ usi->clks[i].id = usi->data->clk_names[i];
+
+ return devm_clk_bulk_get(dev, num, usi->clks);
+}
+
+static int exynos_usi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct exynos_usi *usi;
+ int ret;
+
+ usi = devm_kzalloc(dev, sizeof(*usi), GFP_KERNEL);
+ if (!usi)
+ return -ENOMEM;
+
+ usi->dev = dev;
+ platform_set_drvdata(pdev, usi);
+
+ usi->data = of_device_get_match_data(dev);
+ if (!usi->data)
+ return -EINVAL;
+
+ ret = exynos_usi_parse_dt(np, usi);
+ if (ret)
+ return ret;
+
+ ret = exynos_usi_get_clocks(usi);
+ if (ret)
+ return ret;
+
+ if (usi->data->ver == USI_VER2) {
+ usi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(usi->regs))
+ return PTR_ERR(usi->regs);
+ }
+
+ ret = exynos_usi_configure(usi);
+ if (ret)
+ return ret;
+
+ /* Make it possible to embed protocol nodes into USI np */
+ return of_platform_populate(np, NULL, NULL, dev);
+}
+
+static int __maybe_unused exynos_usi_resume_noirq(struct device *dev)
+{
+ struct exynos_usi *usi = dev_get_drvdata(dev);
+
+ return exynos_usi_configure(usi);
+}
+
+static const struct dev_pm_ops exynos_usi_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, exynos_usi_resume_noirq)
+};
+
+static struct platform_driver exynos_usi_driver = {
+ .driver = {
+ .name = "exynos-usi",
+ .pm = &exynos_usi_pm,
+ .of_match_table = exynos_usi_dt_match,
+ },
+ .probe = exynos_usi_probe,
+};
+module_platform_driver(exynos_usi_driver);
+
+MODULE_DESCRIPTION("Samsung USI driver");
+MODULE_AUTHOR("Sam Protsenko <semen.protsenko@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c
index cd33e99249c3..32c346b72635 100644
--- a/drivers/soc/tegra/common.c
+++ b/drivers/soc/tegra/common.c
@@ -10,6 +10,7 @@
#include <linux/export.h>
#include <linux/of.h>
#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
#include <soc/tegra/common.h>
#include <soc/tegra/fuse.h>
@@ -43,6 +44,7 @@ static int tegra_core_dev_init_opp_state(struct device *dev)
{
unsigned long rate;
struct clk *clk;
+ bool rpm_enabled;
int err;
clk = devm_clk_get(dev, NULL);
@@ -57,8 +59,31 @@ static int tegra_core_dev_init_opp_state(struct device *dev)
return -EINVAL;
}
+ /*
+ * Runtime PM of the device must be enabled in order to set up
+ * GENPD's performance properly because GENPD core checks whether
+ * device is suspended and this check doesn't work while RPM is
+ * disabled. This makes sure the OPP vote below gets cached in
+ * GENPD for the device. Instead, the vote is done the next time
+ * the device gets runtime resumed.
+ */
+ rpm_enabled = pm_runtime_enabled(dev);
+ if (!rpm_enabled)
+ pm_runtime_enable(dev);
+
+ /* should never happen in practice */
+ if (!pm_runtime_enabled(dev)) {
+ dev_WARN(dev, "failed to enable runtime PM\n");
+ pm_runtime_disable(dev);
+ return -EINVAL;
+ }
+
/* first dummy rate-setting initializes voltage vote */
err = dev_pm_opp_set_rate(dev, rate);
+
+ if (!rpm_enabled)
+ pm_runtime_disable(dev);
+
if (err) {
dev_err(dev, "failed to initialize OPP clock: %d\n", err);
return err;
@@ -111,9 +136,7 @@ int devm_tegra_core_dev_init_opp_table(struct device *dev,
*/
err = devm_pm_opp_of_add_table(dev);
if (err) {
- if (err == -ENODEV)
- dev_err_once(dev, "OPP table not found, please update device-tree\n");
- else
+ if (err != -ENODEV)
dev_err(dev, "failed to add OPP table: %d\n", err);
return err;
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index e714ed3b61bc..913103ee5432 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -14,6 +14,7 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
@@ -181,6 +182,12 @@ static const struct nvmem_cell_info tegra_fuse_cells[] = {
},
};
+static void tegra_fuse_restore(void *base)
+{
+ fuse->clk = NULL;
+ fuse->base = base;
+}
+
static int tegra_fuse_probe(struct platform_device *pdev)
{
void __iomem *base = fuse->base;
@@ -188,13 +195,16 @@ static int tegra_fuse_probe(struct platform_device *pdev)
struct resource *res;
int err;
+ err = devm_add_action(&pdev->dev, tegra_fuse_restore, base);
+ if (err)
+ return err;
+
/* take over the memory region from the early initialization */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fuse->phys = res->start;
fuse->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(fuse->base)) {
err = PTR_ERR(fuse->base);
- fuse->base = base;
return err;
}
@@ -204,19 +214,20 @@ static int tegra_fuse_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
PTR_ERR(fuse->clk));
- fuse->base = base;
return PTR_ERR(fuse->clk);
}
platform_set_drvdata(pdev, fuse);
fuse->dev = &pdev->dev;
- pm_runtime_enable(&pdev->dev);
+ err = devm_pm_runtime_enable(&pdev->dev);
+ if (err)
+ return err;
if (fuse->soc->probe) {
err = fuse->soc->probe(fuse);
if (err < 0)
- goto restore;
+ return err;
}
memset(&nvmem, 0, sizeof(nvmem));
@@ -240,19 +251,37 @@ static int tegra_fuse_probe(struct platform_device *pdev)
err = PTR_ERR(fuse->nvmem);
dev_err(&pdev->dev, "failed to register NVMEM device: %d\n",
err);
- goto restore;
+ return err;
+ }
+
+ fuse->rst = devm_reset_control_get_optional(&pdev->dev, "fuse");
+ if (IS_ERR(fuse->rst)) {
+ err = PTR_ERR(fuse->rst);
+ dev_err(&pdev->dev, "failed to get FUSE reset: %pe\n",
+ fuse->rst);
+ return err;
+ }
+
+ /*
+ * FUSE clock is enabled at a boot time, hence this resume/suspend
+ * disables the clock besides the h/w resetting.
+ */
+ err = pm_runtime_resume_and_get(&pdev->dev);
+ if (err)
+ return err;
+
+ err = reset_control_reset(fuse->rst);
+ pm_runtime_put(&pdev->dev);
+
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to reset FUSE: %d\n", err);
+ return err;
}
/* release the early I/O memory mapping */
iounmap(base);
return 0;
-
-restore:
- fuse->clk = NULL;
- fuse->base = base;
- pm_runtime_disable(&pdev->dev);
- return err;
}
static int __maybe_unused tegra_fuse_runtime_resume(struct device *dev)
diff --git a/drivers/soc/tegra/fuse/fuse-tegra20.c b/drivers/soc/tegra/fuse/fuse-tegra20.c
index 8ec9fc5e5e4b..12503f563e36 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra20.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra20.c
@@ -94,9 +94,28 @@ static bool dma_filter(struct dma_chan *chan, void *filter_param)
return of_device_is_compatible(np, "nvidia,tegra20-apbdma");
}
+static void tegra20_fuse_release_channel(void *data)
+{
+ struct tegra_fuse *fuse = data;
+
+ dma_release_channel(fuse->apbdma.chan);
+ fuse->apbdma.chan = NULL;
+}
+
+static void tegra20_fuse_free_coherent(void *data)
+{
+ struct tegra_fuse *fuse = data;
+
+ dma_free_coherent(fuse->dev, sizeof(u32), fuse->apbdma.virt,
+ fuse->apbdma.phys);
+ fuse->apbdma.virt = NULL;
+ fuse->apbdma.phys = 0x0;
+}
+
static int tegra20_fuse_probe(struct tegra_fuse *fuse)
{
dma_cap_mask_t mask;
+ int err;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@@ -105,13 +124,21 @@ static int tegra20_fuse_probe(struct tegra_fuse *fuse)
if (!fuse->apbdma.chan)
return -EPROBE_DEFER;
+ err = devm_add_action_or_reset(fuse->dev, tegra20_fuse_release_channel,
+ fuse);
+ if (err)
+ return err;
+
fuse->apbdma.virt = dma_alloc_coherent(fuse->dev, sizeof(u32),
&fuse->apbdma.phys,
GFP_KERNEL);
- if (!fuse->apbdma.virt) {
- dma_release_channel(fuse->apbdma.chan);
+ if (!fuse->apbdma.virt)
return -ENOMEM;
- }
+
+ err = devm_add_action_or_reset(fuse->dev, tegra20_fuse_free_coherent,
+ fuse);
+ if (err)
+ return err;
fuse->apbdma.config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
fuse->apbdma.config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h
index ecff0c08e959..2bb1f9d6a6e6 100644
--- a/drivers/soc/tegra/fuse/fuse.h
+++ b/drivers/soc/tegra/fuse/fuse.h
@@ -43,6 +43,7 @@ struct tegra_fuse {
void __iomem *base;
phys_addr_t phys;
struct clk *clk;
+ struct reset_control *rst;
u32 (*read_early)(struct tegra_fuse *fuse, unsigned int offset);
u32 (*read)(struct tegra_fuse *fuse, unsigned int offset);
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 575d6d5b4294..5aceacbd8ce0 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -1064,10 +1064,8 @@ int tegra_pmc_cpu_remove_clamping(unsigned int cpuid)
return tegra_powergate_remove_clamping(id);
}
-static int tegra_pmc_restart_notify(struct notifier_block *this,
- unsigned long action, void *data)
+static void tegra_pmc_program_reboot_reason(const char *cmd)
{
- const char *cmd = data;
u32 value;
value = tegra_pmc_scratch_readl(pmc, pmc->soc->regs->scratch0);
@@ -1085,6 +1083,25 @@ static int tegra_pmc_restart_notify(struct notifier_block *this,
}
tegra_pmc_scratch_writel(pmc, value, pmc->soc->regs->scratch0);
+}
+
+static int tegra_pmc_reboot_notify(struct notifier_block *this,
+ unsigned long action, void *data)
+{
+ if (action == SYS_RESTART)
+ tegra_pmc_program_reboot_reason(data);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block tegra_pmc_reboot_notifier = {
+ .notifier_call = tegra_pmc_reboot_notify,
+};
+
+static int tegra_pmc_restart_notify(struct notifier_block *this,
+ unsigned long action, void *data)
+{
+ u32 value;
/* reset everything but PMC_SCRATCH0 and PMC_RST_STATUS */
value = tegra_pmc_readl(pmc, PMC_CNTRL);
@@ -1353,7 +1370,7 @@ static int tegra_pmc_core_pd_add(struct tegra_pmc *pmc, struct device_node *np)
if (!genpd)
return -ENOMEM;
- genpd->name = np->name;
+ genpd->name = "core";
genpd->set_performance_state = tegra_pmc_core_pd_set_performance_state;
genpd->opp_to_performance_state = tegra_pmc_core_pd_opp_to_performance_state;
@@ -2890,6 +2907,14 @@ static int tegra_pmc_probe(struct platform_device *pdev)
goto cleanup_sysfs;
}
+ err = devm_register_reboot_notifier(&pdev->dev,
+ &tegra_pmc_reboot_notifier);
+ if (err) {
+ dev_err(&pdev->dev, "unable to register reboot notifier, %d\n",
+ err);
+ goto cleanup_debugfs;
+ }
+
err = register_restart_handler(&tegra_pmc_restart_handler);
if (err) {
dev_err(&pdev->dev, "unable to register restart handler, %d\n",
@@ -2963,7 +2988,7 @@ static SIMPLE_DEV_PM_OPS(tegra_pmc_pm_ops, tegra_pmc_suspend, tegra_pmc_resume);
static const char * const tegra20_powergates[] = {
[TEGRA_POWERGATE_CPU] = "cpu",
- [TEGRA_POWERGATE_3D] = "3d",
+ [TEGRA_POWERGATE_3D] = "td",
[TEGRA_POWERGATE_VENC] = "venc",
[TEGRA_POWERGATE_VDEC] = "vdec",
[TEGRA_POWERGATE_PCIE] = "pcie",
@@ -3071,7 +3096,7 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
static const char * const tegra30_powergates[] = {
[TEGRA_POWERGATE_CPU] = "cpu0",
- [TEGRA_POWERGATE_3D] = "3d0",
+ [TEGRA_POWERGATE_3D] = "td",
[TEGRA_POWERGATE_VENC] = "venc",
[TEGRA_POWERGATE_VDEC] = "vdec",
[TEGRA_POWERGATE_PCIE] = "pcie",
@@ -3083,7 +3108,7 @@ static const char * const tegra30_powergates[] = {
[TEGRA_POWERGATE_CPU2] = "cpu2",
[TEGRA_POWERGATE_CPU3] = "cpu3",
[TEGRA_POWERGATE_CELP] = "celp",
- [TEGRA_POWERGATE_3D1] = "3d1",
+ [TEGRA_POWERGATE_3D1] = "td2",
};
static const u8 tegra30_cpu_powergates[] = {
@@ -3132,7 +3157,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
static const char * const tegra114_powergates[] = {
[TEGRA_POWERGATE_CPU] = "crail",
- [TEGRA_POWERGATE_3D] = "3d",
+ [TEGRA_POWERGATE_3D] = "td",
[TEGRA_POWERGATE_VENC] = "venc",
[TEGRA_POWERGATE_VDEC] = "vdec",
[TEGRA_POWERGATE_MPE] = "mpe",
diff --git a/drivers/soc/tegra/regulators-tegra20.c b/drivers/soc/tegra/regulators-tegra20.c
index b8ce9fd0650d..6a2f90ab9d3e 100644
--- a/drivers/soc/tegra/regulators-tegra20.c
+++ b/drivers/soc/tegra/regulators-tegra20.c
@@ -16,7 +16,9 @@
#include <linux/regulator/coupler.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/suspend.h>
+#include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h>
struct tegra_regulator_coupler {
@@ -25,9 +27,12 @@ struct tegra_regulator_coupler {
struct regulator_dev *cpu_rdev;
struct regulator_dev *rtc_rdev;
struct notifier_block reboot_notifier;
+ struct notifier_block suspend_notifier;
int core_min_uV, cpu_min_uV;
bool sys_reboot_mode_req;
bool sys_reboot_mode;
+ bool sys_suspend_mode_req;
+ bool sys_suspend_mode;
};
static inline struct tegra_regulator_coupler *
@@ -105,6 +110,28 @@ static int tegra20_core_rtc_max_spread(struct regulator_dev *core_rdev,
return 150000;
}
+static int tegra20_cpu_nominal_uV(void)
+{
+ switch (tegra_sku_info.soc_speedo_id) {
+ case 0:
+ return 1100000;
+ case 1:
+ return 1025000;
+ default:
+ return 1125000;
+ }
+}
+
+static int tegra20_core_nominal_uV(void)
+{
+ switch (tegra_sku_info.soc_speedo_id) {
+ default:
+ return 1225000;
+ case 2:
+ return 1300000;
+ }
+}
+
static int tegra20_core_rtc_update(struct tegra_regulator_coupler *tegra,
struct regulator_dev *core_rdev,
struct regulator_dev *rtc_rdev,
@@ -144,6 +171,11 @@ static int tegra20_core_rtc_update(struct tegra_regulator_coupler *tegra,
if (err)
return err;
+ /* prepare voltage level for suspend */
+ if (tegra->sys_suspend_mode)
+ core_min_uV = clamp(tegra20_core_nominal_uV(),
+ core_min_uV, core_max_uV);
+
core_uV = regulator_get_voltage_rdev(core_rdev);
if (core_uV < 0)
return core_uV;
@@ -279,6 +311,11 @@ static int tegra20_cpu_voltage_update(struct tegra_regulator_coupler *tegra,
if (tegra->sys_reboot_mode)
cpu_min_uV = max(cpu_min_uV, tegra->cpu_min_uV);
+ /* prepare voltage level for suspend */
+ if (tegra->sys_suspend_mode)
+ cpu_min_uV = clamp(tegra20_cpu_nominal_uV(),
+ cpu_min_uV, cpu_max_uV);
+
if (cpu_min_uV > cpu_uV) {
err = tegra20_core_rtc_update(tegra, core_rdev, rtc_rdev,
cpu_uV, cpu_min_uV);
@@ -320,6 +357,7 @@ static int tegra20_regulator_balance_voltage(struct regulator_coupler *coupler,
}
tegra->sys_reboot_mode = READ_ONCE(tegra->sys_reboot_mode_req);
+ tegra->sys_suspend_mode = READ_ONCE(tegra->sys_suspend_mode_req);
if (rdev == cpu_rdev)
return tegra20_cpu_voltage_update(tegra, cpu_rdev,
@@ -334,6 +372,63 @@ static int tegra20_regulator_balance_voltage(struct regulator_coupler *coupler,
return -EPERM;
}
+static int tegra20_regulator_prepare_suspend(struct tegra_regulator_coupler *tegra,
+ bool sys_suspend_mode)
+{
+ int err;
+
+ if (!tegra->core_rdev || !tegra->rtc_rdev || !tegra->cpu_rdev)
+ return 0;
+
+ /*
+ * All power domains are enabled early during resume from suspend
+ * by GENPD core. Domains like VENC may require a higher voltage
+ * when enabled during resume from suspend. This also prepares
+ * hardware for resuming from LP0.
+ */
+
+ WRITE_ONCE(tegra->sys_suspend_mode_req, sys_suspend_mode);
+
+ err = regulator_sync_voltage_rdev(tegra->cpu_rdev);
+ if (err)
+ return err;
+
+ err = regulator_sync_voltage_rdev(tegra->core_rdev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra20_regulator_suspend(struct notifier_block *notifier,
+ unsigned long mode, void *arg)
+{
+ struct tegra_regulator_coupler *tegra;
+ int ret = 0;
+
+ tegra = container_of(notifier, struct tegra_regulator_coupler,
+ suspend_notifier);
+
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_RESTORE_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ ret = tegra20_regulator_prepare_suspend(tegra, true);
+ break;
+
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ ret = tegra20_regulator_prepare_suspend(tegra, false);
+ break;
+ }
+
+ if (ret)
+ pr_err("failed to prepare regulators: %d\n", ret);
+
+ return notifier_from_errno(ret);
+}
+
static int tegra20_regulator_prepare_reboot(struct tegra_regulator_coupler *tegra,
bool sys_reboot_mode)
{
@@ -444,6 +539,7 @@ static struct tegra_regulator_coupler tegra20_coupler = {
.balance_voltage = tegra20_regulator_balance_voltage,
},
.reboot_notifier.notifier_call = tegra20_regulator_reboot,
+ .suspend_notifier.notifier_call = tegra20_regulator_suspend,
};
static int __init tegra_regulator_coupler_init(void)
@@ -456,6 +552,9 @@ static int __init tegra_regulator_coupler_init(void)
err = register_reboot_notifier(&tegra20_coupler.reboot_notifier);
WARN_ON(err);
+ err = register_pm_notifier(&tegra20_coupler.suspend_notifier);
+ WARN_ON(err);
+
return regulator_coupler_register(&tegra20_coupler.coupler);
}
arch_initcall(tegra_regulator_coupler_init);
diff --git a/drivers/soc/tegra/regulators-tegra30.c b/drivers/soc/tegra/regulators-tegra30.c
index e74bbc9c7859..8fd43c689134 100644
--- a/drivers/soc/tegra/regulators-tegra30.c
+++ b/drivers/soc/tegra/regulators-tegra30.c
@@ -16,6 +16,7 @@
#include <linux/regulator/coupler.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/suspend.h>
#include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h>
@@ -25,9 +26,12 @@ struct tegra_regulator_coupler {
struct regulator_dev *core_rdev;
struct regulator_dev *cpu_rdev;
struct notifier_block reboot_notifier;
+ struct notifier_block suspend_notifier;
int core_min_uV, cpu_min_uV;
bool sys_reboot_mode_req;
bool sys_reboot_mode;
+ bool sys_suspend_mode_req;
+ bool sys_suspend_mode;
};
static inline struct tegra_regulator_coupler *
@@ -113,6 +117,52 @@ static int tegra30_core_cpu_limit(int cpu_uV)
return -EINVAL;
}
+static int tegra30_cpu_nominal_uV(void)
+{
+ switch (tegra_sku_info.cpu_speedo_id) {
+ case 10 ... 11:
+ return 850000;
+
+ case 9:
+ return 912000;
+
+ case 1 ... 3:
+ case 7 ... 8:
+ return 1050000;
+
+ default:
+ return 1125000;
+
+ case 4 ... 6:
+ case 12 ... 13:
+ return 1237000;
+ }
+}
+
+static int tegra30_core_nominal_uV(void)
+{
+ switch (tegra_sku_info.soc_speedo_id) {
+ case 0:
+ return 1200000;
+
+ case 1:
+ if (tegra_sku_info.cpu_speedo_id != 7 &&
+ tegra_sku_info.cpu_speedo_id != 8)
+ return 1200000;
+
+ fallthrough;
+
+ case 2:
+ if (tegra_sku_info.cpu_speedo_id != 13)
+ return 1300000;
+
+ return 1350000;
+
+ default:
+ return 1250000;
+ }
+}
+
static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
struct regulator_dev *cpu_rdev,
struct regulator_dev *core_rdev)
@@ -168,6 +218,11 @@ static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
if (err)
return err;
+ /* prepare voltage level for suspend */
+ if (tegra->sys_suspend_mode)
+ core_min_uV = clamp(tegra30_core_nominal_uV(),
+ core_min_uV, core_max_uV);
+
core_uV = regulator_get_voltage_rdev(core_rdev);
if (core_uV < 0)
return core_uV;
@@ -223,6 +278,11 @@ static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
if (tegra->sys_reboot_mode)
cpu_min_uV = max(cpu_min_uV, tegra->cpu_min_uV);
+ /* prepare voltage level for suspend */
+ if (tegra->sys_suspend_mode)
+ cpu_min_uV = clamp(tegra30_cpu_nominal_uV(),
+ cpu_min_uV, cpu_max_uV);
+
if (core_min_limited_uV > core_uV) {
pr_err("core voltage constraint violated: %d %d %d\n",
core_uV, core_min_limited_uV, cpu_uV);
@@ -292,10 +352,68 @@ static int tegra30_regulator_balance_voltage(struct regulator_coupler *coupler,
}
tegra->sys_reboot_mode = READ_ONCE(tegra->sys_reboot_mode_req);
+ tegra->sys_suspend_mode = READ_ONCE(tegra->sys_suspend_mode_req);
return tegra30_voltage_update(tegra, cpu_rdev, core_rdev);
}
+static int tegra30_regulator_prepare_suspend(struct tegra_regulator_coupler *tegra,
+ bool sys_suspend_mode)
+{
+ int err;
+
+ if (!tegra->core_rdev || !tegra->cpu_rdev)
+ return 0;
+
+ /*
+ * All power domains are enabled early during resume from suspend
+ * by GENPD core. Domains like VENC may require a higher voltage
+ * when enabled during resume from suspend. This also prepares
+ * hardware for resuming from LP0.
+ */
+
+ WRITE_ONCE(tegra->sys_suspend_mode_req, sys_suspend_mode);
+
+ err = regulator_sync_voltage_rdev(tegra->cpu_rdev);
+ if (err)
+ return err;
+
+ err = regulator_sync_voltage_rdev(tegra->core_rdev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra30_regulator_suspend(struct notifier_block *notifier,
+ unsigned long mode, void *arg)
+{
+ struct tegra_regulator_coupler *tegra;
+ int ret = 0;
+
+ tegra = container_of(notifier, struct tegra_regulator_coupler,
+ suspend_notifier);
+
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_RESTORE_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ ret = tegra30_regulator_prepare_suspend(tegra, true);
+ break;
+
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ ret = tegra30_regulator_prepare_suspend(tegra, false);
+ break;
+ }
+
+ if (ret)
+ pr_err("failed to prepare regulators: %d\n", ret);
+
+ return notifier_from_errno(ret);
+}
+
static int tegra30_regulator_prepare_reboot(struct tegra_regulator_coupler *tegra,
bool sys_reboot_mode)
{
@@ -395,6 +513,7 @@ static struct tegra_regulator_coupler tegra30_coupler = {
.balance_voltage = tegra30_regulator_balance_voltage,
},
.reboot_notifier.notifier_call = tegra30_regulator_reboot,
+ .suspend_notifier.notifier_call = tegra30_regulator_suspend,
};
static int __init tegra_regulator_coupler_init(void)
@@ -407,6 +526,9 @@ static int __init tegra_regulator_coupler_init(void)
err = register_reboot_notifier(&tegra30_coupler.reboot_notifier);
WARN_ON(err);
+ err = register_pm_notifier(&tegra30_coupler.suspend_notifier);
+ WARN_ON(err);
+
return regulator_coupler_register(&tegra30_coupler.coupler);
}
arch_initcall(tegra_regulator_coupler_init);
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
index 312ba0f98ad7..31ab6c657fec 100644
--- a/drivers/soc/ti/k3-ringacc.c
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -358,8 +358,8 @@ struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
goto out;
if (flags & K3_RINGACC_RING_USE_PROXY) {
- proxy_id = find_next_zero_bit(ringacc->proxy_inuse,
- ringacc->num_proxies, 0);
+ proxy_id = find_first_zero_bit(ringacc->proxy_inuse,
+ ringacc->num_proxies);
if (proxy_id == ringacc->num_proxies)
goto error;
}
@@ -647,7 +647,7 @@ int k3_ringacc_get_ring_irq_num(struct k3_ring *ring)
if (!ring)
return -EINVAL;
- irq_num = ti_sci_inta_msi_get_virq(ring->parent->dev, ring->ring_id);
+ irq_num = msi_get_virq(ring->parent->dev, ring->ring_id);
if (irq_num <= 0)
irq_num = -EINVAL;
return irq_num;
@@ -1356,9 +1356,9 @@ static int k3_ringacc_init(struct platform_device *pdev,
struct resource *res;
int ret, i;
- dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
+ dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
DOMAIN_BUS_TI_SCI_INTA_MSI);
- if (!dev->msi_domain) {
+ if (!dev->msi.domain) {
dev_err(dev, "Failed to get MSI domain\n");
return -EPROBE_DEFER;
}
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
index fd91129de6e5..b6b2150aca4e 100644
--- a/drivers/soc/ti/k3-socinfo.c
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -40,7 +40,8 @@ static const struct k3_soc_id {
{ 0xBB5A, "AM65X" },
{ 0xBB64, "J721E" },
{ 0xBB6D, "J7200" },
- { 0xBB38, "AM64X" }
+ { 0xBB38, "AM64X" },
+ { 0xBB75, "J721S2"},
};
static int
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index 591d14ebcb11..700d8eecd8c4 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -646,31 +646,31 @@ static int dma_init(struct device_node *cloud, struct device_node *dma_node)
}
dma->reg_global = pktdma_get_regs(dma, node, 0, &size);
- if (!dma->reg_global)
- return -ENODEV;
+ if (IS_ERR(dma->reg_global))
+ return PTR_ERR(dma->reg_global);
if (size < sizeof(struct reg_global)) {
dev_err(kdev->dev, "bad size %pa for global regs\n", &size);
return -ENODEV;
}
dma->reg_tx_chan = pktdma_get_regs(dma, node, 1, &size);
- if (!dma->reg_tx_chan)
- return -ENODEV;
+ if (IS_ERR(dma->reg_tx_chan))
+ return PTR_ERR(dma->reg_tx_chan);
max_tx_chan = size / sizeof(struct reg_chan);
dma->reg_rx_chan = pktdma_get_regs(dma, node, 2, &size);
- if (!dma->reg_rx_chan)
- return -ENODEV;
+ if (IS_ERR(dma->reg_rx_chan))
+ return PTR_ERR(dma->reg_rx_chan);
max_rx_chan = size / sizeof(struct reg_chan);
dma->reg_tx_sched = pktdma_get_regs(dma, node, 3, &size);
- if (!dma->reg_tx_sched)
- return -ENODEV;
+ if (IS_ERR(dma->reg_tx_sched))
+ return PTR_ERR(dma->reg_tx_sched);
max_tx_sched = size / sizeof(struct reg_tx_sched);
dma->reg_rx_flow = pktdma_get_regs(dma, node, 4, &size);
- if (!dma->reg_rx_flow)
- return -ENODEV;
+ if (IS_ERR(dma->reg_rx_flow))
+ return PTR_ERR(dma->reg_rx_flow);
max_rx_flow = size / sizeof(struct reg_rx_flow);
dma->rx_priority = DMA_PRIO_DEFAULT;
diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c
index 49da387d7749..b36779309e49 100644
--- a/drivers/soc/ti/pruss.c
+++ b/drivers/soc/ti/pruss.c
@@ -129,7 +129,7 @@ static int pruss_clk_init(struct pruss *pruss, struct device_node *cfg_node)
clks_np = of_get_child_by_name(cfg_node, "clocks");
if (!clks_np) {
- dev_err(dev, "%pOF is missing its 'clocks' node\n", clks_np);
+ dev_err(dev, "%pOF is missing its 'clocks' node\n", cfg_node);
return -ENODEV;
}
diff --git a/drivers/soc/ti/ti_sci_inta_msi.c b/drivers/soc/ti/ti_sci_inta_msi.c
index a1d9c027022a..991c78b34745 100644
--- a/drivers/soc/ti/ti_sci_inta_msi.c
+++ b/drivers/soc/ti/ti_sci_inta_msi.c
@@ -51,6 +51,7 @@ struct irq_domain *ti_sci_inta_msi_create_irq_domain(struct fwnode_handle *fwnod
struct irq_domain *domain;
ti_sci_inta_msi_update_chip_ops(info);
+ info->flags |= MSI_FLAG_FREE_MSI_DESCS;
domain = msi_create_irq_domain(fwnode, info, parent);
if (domain)
@@ -60,50 +61,32 @@ struct irq_domain *ti_sci_inta_msi_create_irq_domain(struct fwnode_handle *fwnod
}
EXPORT_SYMBOL_GPL(ti_sci_inta_msi_create_irq_domain);
-static void ti_sci_inta_msi_free_descs(struct device *dev)
-{
- struct msi_desc *desc, *tmp;
-
- list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
- list_del(&desc->list);
- free_msi_entry(desc);
- }
-}
-
static int ti_sci_inta_msi_alloc_descs(struct device *dev,
struct ti_sci_resource *res)
{
- struct msi_desc *msi_desc;
+ struct msi_desc msi_desc;
int set, i, count = 0;
+ memset(&msi_desc, 0, sizeof(msi_desc));
+ msi_desc.nvec_used = 1;
+
for (set = 0; set < res->sets; set++) {
- for (i = 0; i < res->desc[set].num; i++) {
- msi_desc = alloc_msi_entry(dev, 1, NULL);
- if (!msi_desc) {
- ti_sci_inta_msi_free_descs(dev);
- return -ENOMEM;
- }
-
- msi_desc->inta.dev_index = res->desc[set].start + i;
- INIT_LIST_HEAD(&msi_desc->list);
- list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
- count++;
+ for (i = 0; i < res->desc[set].num; i++, count++) {
+ msi_desc.msi_index = res->desc[set].start + i;
+ if (msi_add_msi_desc(dev, &msi_desc))
+ goto fail;
}
- for (i = 0; i < res->desc[set].num_sec; i++) {
- msi_desc = alloc_msi_entry(dev, 1, NULL);
- if (!msi_desc) {
- ti_sci_inta_msi_free_descs(dev);
- return -ENOMEM;
- }
-
- msi_desc->inta.dev_index = res->desc[set].start_sec + i;
- INIT_LIST_HEAD(&msi_desc->list);
- list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
- count++;
+
+ for (i = 0; i < res->desc[set].num_sec; i++, count++) {
+ msi_desc.msi_index = res->desc[set].start_sec + i;
+ if (msi_add_msi_desc(dev, &msi_desc))
+ goto fail;
}
}
-
return count;
+fail:
+ msi_free_msi_descs(dev);
+ return -ENOMEM;
}
int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev,
@@ -120,39 +103,22 @@ int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev,
if (pdev->id < 0)
return -ENODEV;
- nvec = ti_sci_inta_msi_alloc_descs(dev, res);
- if (nvec <= 0)
- return nvec;
+ ret = msi_setup_device_data(dev);
+ if (ret)
+ return ret;
- ret = msi_domain_alloc_irqs(msi_domain, dev, nvec);
- if (ret) {
- dev_err(dev, "Failed to allocate IRQs %d\n", ret);
- goto cleanup;
+ msi_lock_descs(dev);
+ nvec = ti_sci_inta_msi_alloc_descs(dev, res);
+ if (nvec <= 0) {
+ ret = nvec;
+ goto unlock;
}
- return 0;
-
-cleanup:
- ti_sci_inta_msi_free_descs(&pdev->dev);
+ ret = msi_domain_alloc_irqs_descs_locked(msi_domain, dev, nvec);
+ if (ret)
+ dev_err(dev, "Failed to allocate IRQs %d\n", ret);
+unlock:
+ msi_unlock_descs(dev);
return ret;
}
EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_alloc_irqs);
-
-void ti_sci_inta_msi_domain_free_irqs(struct device *dev)
-{
- msi_domain_free_irqs(dev->msi_domain, dev);
- ti_sci_inta_msi_free_descs(dev);
-}
-EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_free_irqs);
-
-unsigned int ti_sci_inta_msi_get_virq(struct device *dev, u32 dev_index)
-{
- struct msi_desc *desc;
-
- for_each_msi_entry(desc, dev)
- if (desc->inta.dev_index == dev_index)
- return desc->irq;
-
- return -ENODEV;
-}
-EXPORT_SYMBOL_GPL(ti_sci_inta_msi_get_virq);
diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig
index 53af9115dc31..8a755a5c8836 100644
--- a/drivers/soc/xilinx/Kconfig
+++ b/drivers/soc/xilinx/Kconfig
@@ -25,4 +25,14 @@ config ZYNQMP_PM_DOMAINS
Say yes to enable device power management through PM domains
If in doubt, say N.
+config XLNX_EVENT_MANAGER
+ bool "Enable Xilinx Event Management Driver"
+ depends on ZYNQMP_FIRMWARE
+ default ZYNQMP_FIRMWARE
+ help
+ Say yes to enable event management support for Xilinx.
+ This driver uses firmware driver as an interface for event/power
+ management request to firmware.
+
+ If in doubt, say N.
endmenu
diff --git a/drivers/soc/xilinx/Makefile b/drivers/soc/xilinx/Makefile
index 9854e6f6086b..41e585bc9c67 100644
--- a/drivers/soc/xilinx/Makefile
+++ b/drivers/soc/xilinx/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ZYNQMP_POWER) += zynqmp_power.o
obj-$(CONFIG_ZYNQMP_PM_DOMAINS) += zynqmp_pm_domains.o
+obj-$(CONFIG_XLNX_EVENT_MANAGER) += xlnx_event_manager.o
diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c
new file mode 100644
index 000000000000..b27f8853508e
--- /dev/null
+++ b/drivers/soc/xilinx/xlnx_event_manager.c
@@ -0,0 +1,600 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Event Management Driver
+ *
+ * Copyright (C) 2021 Xilinx, Inc.
+ *
+ * Abhyuday Godhasara <abhyuday.godhasara@xilinx.com>
+ */
+
+#include <linux/cpuhotplug.h>
+#include <linux/firmware/xlnx-event-manager.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/hashtable.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+static DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number1);
+
+static int virq_sgi;
+static int event_manager_availability = -EACCES;
+
+/* SGI number used for Event management driver */
+#define XLNX_EVENT_SGI_NUM (15)
+
+/* Max number of driver can register for same event */
+#define MAX_DRIVER_PER_EVENT (10U)
+
+/* Max HashMap Order for PM API feature check (1<<7 = 128) */
+#define REGISTERED_DRIVER_MAX_ORDER (7)
+
+#define MAX_BITS (32U) /* Number of bits available for error mask */
+
+#define FIRMWARE_VERSION_MASK (0xFFFFU)
+#define REGISTER_NOTIFIER_FIRMWARE_VERSION (2U)
+
+static DEFINE_HASHTABLE(reg_driver_map, REGISTERED_DRIVER_MAX_ORDER);
+static int sgi_num = XLNX_EVENT_SGI_NUM;
+
+/**
+ * struct registered_event_data - Registered Event Data.
+ * @key: key is the combine id(Node-Id | Event-Id) of type u64
+ * where upper u32 for Node-Id and lower u32 for Event-Id,
+ * And this used as key to index into hashmap.
+ * @agent_data: Data passed back to handler function.
+ * @cb_type: Type of Api callback, like PM_NOTIFY_CB, etc.
+ * @eve_cb: Function pointer to store the callback function.
+ * @wake: If this flag set, firmware will wakeup processor if is
+ * in sleep or power down state.
+ * @hentry: hlist_node that hooks this entry into hashtable.
+ */
+struct registered_event_data {
+ u64 key;
+ enum pm_api_cb_id cb_type;
+ void *agent_data;
+
+ event_cb_func_t eve_cb;
+ bool wake;
+ struct hlist_node hentry;
+};
+
+static bool xlnx_is_error_event(const u32 node_id)
+{
+ if (node_id == EVENT_ERROR_PMC_ERR1 ||
+ node_id == EVENT_ERROR_PMC_ERR2 ||
+ node_id == EVENT_ERROR_PSM_ERR1 ||
+ node_id == EVENT_ERROR_PSM_ERR2)
+ return true;
+
+ return false;
+}
+
+static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, const bool wake,
+ event_cb_func_t cb_fun, void *data)
+{
+ u64 key = 0;
+ struct registered_event_data *eve_data;
+
+ key = ((u64)node_id << 32U) | (u64)event;
+ /* Check for existing entry in hash table for given key id */
+ hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
+ if (eve_data->key == key) {
+ pr_err("Found as already registered\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Add new entry if not present */
+ eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
+ if (!eve_data)
+ return -ENOMEM;
+
+ eve_data->key = key;
+ eve_data->cb_type = PM_NOTIFY_CB;
+ eve_data->eve_cb = cb_fun;
+ eve_data->wake = wake;
+ eve_data->agent_data = data;
+
+ hash_add(reg_driver_map, &eve_data->hentry, key);
+
+ return 0;
+}
+
+static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data)
+{
+ struct registered_event_data *eve_data;
+
+ /* Check for existing entry in hash table for given cb_type */
+ hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) {
+ if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
+ pr_err("Found as already registered\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Add new entry if not present */
+ eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
+ if (!eve_data)
+ return -ENOMEM;
+
+ eve_data->key = 0;
+ eve_data->cb_type = PM_INIT_SUSPEND_CB;
+ eve_data->eve_cb = cb_fun;
+ eve_data->agent_data = data;
+
+ hash_add(reg_driver_map, &eve_data->hentry, PM_INIT_SUSPEND_CB);
+
+ return 0;
+}
+
+static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun)
+{
+ bool is_callback_found = false;
+ struct registered_event_data *eve_data;
+
+ /* Check for existing entry in hash table for given cb_type */
+ hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) {
+ if (eve_data->cb_type == PM_INIT_SUSPEND_CB &&
+ eve_data->eve_cb == cb_fun) {
+ is_callback_found = true;
+ /* remove an object from a hashtable */
+ hash_del(&eve_data->hentry);
+ kfree(eve_data);
+ }
+ }
+ if (!is_callback_found) {
+ pr_warn("Didn't find any registered callback for suspend event\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int xlnx_remove_cb_for_notify_event(const u32 node_id, const u32 event,
+ event_cb_func_t cb_fun)
+{
+ bool is_callback_found = false;
+ struct registered_event_data *eve_data;
+ u64 key = ((u64)node_id << 32U) | (u64)event;
+
+ /* Check for existing entry in hash table for given key id */
+ hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
+ if (eve_data->key == key &&
+ eve_data->eve_cb == cb_fun) {
+ is_callback_found = true;
+ /* remove an object from a hashtable */
+ hash_del(&eve_data->hentry);
+ kfree(eve_data);
+ }
+ }
+ if (!is_callback_found) {
+ pr_warn("Didn't find any registered callback for 0x%x 0x%x\n",
+ node_id, event);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * xlnx_register_event() - Register for the event.
+ * @cb_type: Type of callback from pm_api_cb_id,
+ * PM_NOTIFY_CB - for Error Events,
+ * PM_INIT_SUSPEND_CB - for suspend callback.
+ * @node_id: Node-Id related to event.
+ * @event: Event Mask for the Error Event.
+ * @wake: Flag specifying whether the subsystem should be woken upon
+ * event notification.
+ * @cb_fun: Function pointer to store the callback function.
+ * @data: Pointer for the driver instance.
+ *
+ * Return: Returns 0 on successful registration else error code.
+ */
+int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
+ const bool wake, event_cb_func_t cb_fun, void *data)
+{
+ int ret = 0;
+ u32 eve;
+ int pos;
+
+ if (event_manager_availability)
+ return event_manager_availability;
+
+ if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) {
+ pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type);
+ return -EINVAL;
+ }
+
+ if (!cb_fun)
+ return -EFAULT;
+
+ if (cb_type == PM_INIT_SUSPEND_CB) {
+ ret = xlnx_add_cb_for_suspend(cb_fun, data);
+ } else {
+ if (!xlnx_is_error_event(node_id)) {
+ /* Add entry for Node-Id/Event in hash table */
+ ret = xlnx_add_cb_for_notify_event(node_id, event, wake, cb_fun, data);
+ } else {
+ /* Add into Hash table */
+ for (pos = 0; pos < MAX_BITS; pos++) {
+ eve = event & (1 << pos);
+ if (!eve)
+ continue;
+
+ /* Add entry for Node-Id/Eve in hash table */
+ ret = xlnx_add_cb_for_notify_event(node_id, eve, wake, cb_fun,
+ data);
+ /* Break the loop if got error */
+ if (ret)
+ break;
+ }
+ if (ret) {
+ /* Skip the Event for which got the error */
+ pos--;
+ /* Remove registered(during this call) event from hash table */
+ for ( ; pos >= 0; pos--) {
+ eve = event & (1 << pos);
+ if (!eve)
+ continue;
+ xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun);
+ }
+ }
+ }
+
+ if (ret) {
+ pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id,
+ event, ret);
+ return ret;
+ }
+
+ /* Register for Node-Id/Event combination in firmware */
+ ret = zynqmp_pm_register_notifier(node_id, event, wake, true);
+ if (ret) {
+ pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id,
+ event, ret);
+ /* Remove already registered event from hash table */
+ if (xlnx_is_error_event(node_id)) {
+ for (pos = 0; pos < MAX_BITS; pos++) {
+ eve = event & (1 << pos);
+ if (!eve)
+ continue;
+ xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun);
+ }
+ } else {
+ xlnx_remove_cb_for_notify_event(node_id, event, cb_fun);
+ }
+ return ret;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xlnx_register_event);
+
+/**
+ * xlnx_unregister_event() - Unregister for the event.
+ * @cb_type: Type of callback from pm_api_cb_id,
+ * PM_NOTIFY_CB - for Error Events,
+ * PM_INIT_SUSPEND_CB - for suspend callback.
+ * @node_id: Node-Id related to event.
+ * @event: Event Mask for the Error Event.
+ * @cb_fun: Function pointer of callback function.
+ *
+ * Return: Returns 0 on successful unregistration else error code.
+ */
+int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
+ event_cb_func_t cb_fun)
+{
+ int ret;
+ u32 eve, pos;
+
+ if (event_manager_availability)
+ return event_manager_availability;
+
+ if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) {
+ pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type);
+ return -EINVAL;
+ }
+
+ if (!cb_fun)
+ return -EFAULT;
+
+ if (cb_type == PM_INIT_SUSPEND_CB) {
+ ret = xlnx_remove_cb_for_suspend(cb_fun);
+ } else {
+ /* Remove Node-Id/Event from hash table */
+ if (!xlnx_is_error_event(node_id)) {
+ xlnx_remove_cb_for_notify_event(node_id, event, cb_fun);
+ } else {
+ for (pos = 0; pos < MAX_BITS; pos++) {
+ eve = event & (1 << pos);
+ if (!eve)
+ continue;
+
+ xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun);
+ }
+ }
+
+ /* Un-register for Node-Id/Event combination */
+ ret = zynqmp_pm_register_notifier(node_id, event, false, false);
+ if (ret) {
+ pr_err("%s() failed for 0x%x and 0x%x: %d\n",
+ __func__, node_id, event, ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xlnx_unregister_event);
+
+static void xlnx_call_suspend_cb_handler(const u32 *payload)
+{
+ bool is_callback_found = false;
+ struct registered_event_data *eve_data;
+ u32 cb_type = payload[0];
+
+ /* Check for existing entry in hash table for given cb_type */
+ hash_for_each_possible(reg_driver_map, eve_data, hentry, cb_type) {
+ if (eve_data->cb_type == cb_type) {
+ eve_data->eve_cb(&payload[0], eve_data->agent_data);
+ is_callback_found = true;
+ }
+ }
+ if (!is_callback_found)
+ pr_warn("Didn't find any registered callback for suspend event\n");
+}
+
+static void xlnx_call_notify_cb_handler(const u32 *payload)
+{
+ bool is_callback_found = false;
+ struct registered_event_data *eve_data;
+ u64 key = ((u64)payload[1] << 32U) | (u64)payload[2];
+ int ret;
+
+ /* Check for existing entry in hash table for given key id */
+ hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
+ if (eve_data->key == key) {
+ eve_data->eve_cb(&payload[0], eve_data->agent_data);
+ is_callback_found = true;
+
+ /* re register with firmware to get future events */
+ ret = zynqmp_pm_register_notifier(payload[1], payload[2],
+ eve_data->wake, true);
+ if (ret) {
+ pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__,
+ payload[1], payload[2], ret);
+ /* Remove already registered event from hash table */
+ xlnx_remove_cb_for_notify_event(payload[1], payload[2],
+ eve_data->eve_cb);
+ }
+ }
+ }
+ if (!is_callback_found)
+ pr_warn("Didn't find any registered callback for 0x%x 0x%x\n",
+ payload[1], payload[2]);
+}
+
+static void xlnx_get_event_callback_data(u32 *buf)
+{
+ zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf);
+}
+
+static irqreturn_t xlnx_event_handler(int irq, void *dev_id)
+{
+ u32 cb_type, node_id, event, pos;
+ u32 payload[CB_MAX_PAYLOAD_SIZE] = {0};
+ u32 event_data[CB_MAX_PAYLOAD_SIZE] = {0};
+
+ /* Get event data */
+ xlnx_get_event_callback_data(payload);
+
+ /* First element is callback type, others are callback arguments */
+ cb_type = payload[0];
+
+ if (cb_type == PM_NOTIFY_CB) {
+ node_id = payload[1];
+ event = payload[2];
+ if (!xlnx_is_error_event(node_id)) {
+ xlnx_call_notify_cb_handler(payload);
+ } else {
+ /*
+ * Each call back function expecting payload as an input arguments.
+ * We can get multiple error events as in one call back through error
+ * mask. So payload[2] may can contain multiple error events.
+ * In reg_driver_map database we store data in the combination of single
+ * node_id-error combination.
+ * So coping the payload message into event_data and update the
+ * event_data[2] with Error Mask for single error event and use
+ * event_data as input argument for registered call back function.
+ *
+ */
+ memcpy(event_data, payload, (4 * CB_MAX_PAYLOAD_SIZE));
+ /* Support Multiple Error Event */
+ for (pos = 0; pos < MAX_BITS; pos++) {
+ if ((0 == (event & (1 << pos))))
+ continue;
+ event_data[2] = (event & (1 << pos));
+ xlnx_call_notify_cb_handler(event_data);
+ }
+ }
+ } else if (cb_type == PM_INIT_SUSPEND_CB) {
+ xlnx_call_suspend_cb_handler(payload);
+ } else {
+ pr_err("%s() Unsupported Callback %d\n", __func__, cb_type);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int xlnx_event_cpuhp_start(unsigned int cpu)
+{
+ enable_percpu_irq(virq_sgi, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static int xlnx_event_cpuhp_down(unsigned int cpu)
+{
+ disable_percpu_irq(virq_sgi);
+
+ return 0;
+}
+
+static void xlnx_disable_percpu_irq(void *data)
+{
+ disable_percpu_irq(virq_sgi);
+}
+
+static int xlnx_event_init_sgi(struct platform_device *pdev)
+{
+ int ret = 0;
+ int cpu = smp_processor_id();
+ /*
+ * IRQ related structures are used for the following:
+ * for each SGI interrupt ensure its mapped by GIC IRQ domain
+ * and that each corresponding linux IRQ for the HW IRQ has
+ * a handler for when receiving an interrupt from the remote
+ * processor.
+ */
+ struct irq_domain *domain;
+ struct irq_fwspec sgi_fwspec;
+ struct device_node *interrupt_parent = NULL;
+ struct device *parent = pdev->dev.parent;
+
+ /* Find GIC controller to map SGIs. */
+ interrupt_parent = of_irq_find_parent(parent->of_node);
+ if (!interrupt_parent) {
+ dev_err(&pdev->dev, "Failed to find property for Interrupt parent\n");
+ return -EINVAL;
+ }
+
+ /* Each SGI needs to be associated with GIC's IRQ domain. */
+ domain = irq_find_host(interrupt_parent);
+ of_node_put(interrupt_parent);
+
+ /* Each mapping needs GIC domain when finding IRQ mapping. */
+ sgi_fwspec.fwnode = domain->fwnode;
+
+ /*
+ * When irq domain looks at mapping each arg is as follows:
+ * 3 args for: interrupt type (SGI), interrupt # (set later), type
+ */
+ sgi_fwspec.param_count = 1;
+
+ /* Set SGI's hwirq */
+ sgi_fwspec.param[0] = sgi_num;
+ virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec);
+
+ per_cpu(cpu_number1, cpu) = cpu;
+ ret = request_percpu_irq(virq_sgi, xlnx_event_handler, "xlnx_event_mgmt",
+ &cpu_number1);
+ WARN_ON(ret);
+ if (ret) {
+ irq_dispose_mapping(virq_sgi);
+ return ret;
+ }
+
+ irq_to_desc(virq_sgi);
+ irq_set_status_flags(virq_sgi, IRQ_PER_CPU);
+
+ return ret;
+}
+
+static void xlnx_event_cleanup_sgi(struct platform_device *pdev)
+{
+ int cpu = smp_processor_id();
+
+ per_cpu(cpu_number1, cpu) = cpu;
+
+ cpuhp_remove_state(CPUHP_AP_ONLINE_DYN);
+
+ on_each_cpu(xlnx_disable_percpu_irq, NULL, 1);
+
+ irq_clear_status_flags(virq_sgi, IRQ_PER_CPU);
+ free_percpu_irq(virq_sgi, &cpu_number1);
+ irq_dispose_mapping(virq_sgi);
+}
+
+static int xlnx_event_manager_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = zynqmp_pm_feature(PM_REGISTER_NOTIFIER);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Feature check failed with %d\n", ret);
+ return ret;
+ }
+
+ if ((ret & FIRMWARE_VERSION_MASK) <
+ REGISTER_NOTIFIER_FIRMWARE_VERSION) {
+ dev_err(&pdev->dev, "Register notifier version error. Expected Firmware: v%d - Found: v%d\n",
+ REGISTER_NOTIFIER_FIRMWARE_VERSION,
+ ret & FIRMWARE_VERSION_MASK);
+ return -EOPNOTSUPP;
+ }
+
+ /* Initialize the SGI */
+ ret = xlnx_event_init_sgi(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "SGI Init has been failed with %d\n", ret);
+ return ret;
+ }
+
+ /* Setup function for the CPU hot-plug cases */
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/event:starting",
+ xlnx_event_cpuhp_start, xlnx_event_cpuhp_down);
+
+ ret = zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, sgi_num,
+ 0, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "SGI %d Registration over TF-A failed with %d\n", sgi_num, ret);
+ xlnx_event_cleanup_sgi(pdev);
+ return ret;
+ }
+
+ event_manager_availability = 0;
+
+ dev_info(&pdev->dev, "SGI %d Registered over TF-A\n", sgi_num);
+ dev_info(&pdev->dev, "Xilinx Event Management driver probed\n");
+
+ return ret;
+}
+
+static int xlnx_event_manager_remove(struct platform_device *pdev)
+{
+ int i;
+ struct registered_event_data *eve_data;
+ struct hlist_node *tmp;
+ int ret;
+
+ hash_for_each_safe(reg_driver_map, i, tmp, eve_data, hentry) {
+ hash_del(&eve_data->hentry);
+ kfree(eve_data);
+ }
+
+ ret = zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, 0, 1, NULL);
+ if (ret)
+ dev_err(&pdev->dev, "SGI unregistration over TF-A failed with %d\n", ret);
+
+ xlnx_event_cleanup_sgi(pdev);
+
+ event_manager_availability = -EACCES;
+
+ return ret;
+}
+
+static struct platform_driver xlnx_event_manager_driver = {
+ .probe = xlnx_event_manager_probe,
+ .remove = xlnx_event_manager_remove,
+ .driver = {
+ .name = "xlnx_event_manager",
+ },
+};
+module_param(sgi_num, uint, 0);
+module_platform_driver(xlnx_event_manager_driver);
diff --git a/drivers/soc/xilinx/zynqmp_pm_domains.c b/drivers/soc/xilinx/zynqmp_pm_domains.c
index 226d343f0a6a..fcce2433bd6d 100644
--- a/drivers/soc/xilinx/zynqmp_pm_domains.c
+++ b/drivers/soc/xilinx/zynqmp_pm_domains.c
@@ -20,8 +20,6 @@
#include <linux/firmware/xlnx-zynqmp.h>
#define ZYNQMP_NUM_DOMAINS (100)
-/* Flag stating if PM nodes mapped to the PM domain has been requested */
-#define ZYNQMP_PM_DOMAIN_REQUESTED BIT(0)
static int min_capability;
@@ -29,14 +27,17 @@ static int min_capability;
* struct zynqmp_pm_domain - Wrapper around struct generic_pm_domain
* @gpd: Generic power domain
* @node_id: PM node ID corresponding to device inside PM domain
- * @flags: ZynqMP PM domain flags
+ * @requested: The PM node mapped to the PM domain has been requested
*/
struct zynqmp_pm_domain {
struct generic_pm_domain gpd;
u32 node_id;
- u8 flags;
+ bool requested;
};
+#define to_zynqmp_pm_domain(pm_domain) \
+ container_of(pm_domain, struct zynqmp_pm_domain, gpd)
+
/**
* zynqmp_gpd_is_active_wakeup_path() - Check if device is in wakeup source
* path
@@ -71,21 +72,23 @@ static int zynqmp_gpd_is_active_wakeup_path(struct device *dev, void *not_used)
*/
static int zynqmp_gpd_power_on(struct generic_pm_domain *domain)
{
+ struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain);
int ret;
- struct zynqmp_pm_domain *pd;
- pd = container_of(domain, struct zynqmp_pm_domain, gpd);
ret = zynqmp_pm_set_requirement(pd->node_id,
ZYNQMP_PM_CAPABILITY_ACCESS,
ZYNQMP_PM_MAX_QOS,
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
if (ret) {
- pr_err("%s() %s set requirement for node %d failed: %d\n",
- __func__, domain->name, pd->node_id, ret);
+ dev_err(&domain->dev,
+ "failed to set requirement to 0x%x for PM node id %d: %d\n",
+ ZYNQMP_PM_CAPABILITY_ACCESS, pd->node_id, ret);
return ret;
}
- pr_debug("%s() Powered on %s domain\n", __func__, domain->name);
+ dev_dbg(&domain->dev, "set requirement to 0x%x for PM node id %d\n",
+ ZYNQMP_PM_CAPABILITY_ACCESS, pd->node_id);
+
return 0;
}
@@ -100,18 +103,16 @@ static int zynqmp_gpd_power_on(struct generic_pm_domain *domain)
*/
static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
{
+ struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain);
int ret;
struct pm_domain_data *pdd, *tmp;
- struct zynqmp_pm_domain *pd;
u32 capabilities = min_capability;
bool may_wakeup;
- pd = container_of(domain, struct zynqmp_pm_domain, gpd);
-
/* If domain is already released there is nothing to be done */
- if (!(pd->flags & ZYNQMP_PM_DOMAIN_REQUESTED)) {
- pr_debug("%s() %s domain is already released\n",
- __func__, domain->name);
+ if (!pd->requested) {
+ dev_dbg(&domain->dev, "PM node id %d is already released\n",
+ pd->node_id);
return 0;
}
@@ -128,17 +129,16 @@ static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
ret = zynqmp_pm_set_requirement(pd->node_id, capabilities, 0,
ZYNQMP_PM_REQUEST_ACK_NO);
- /**
- * If powering down of any node inside this domain fails,
- * report and return the error
- */
if (ret) {
- pr_err("%s() %s set requirement for node %d failed: %d\n",
- __func__, domain->name, pd->node_id, ret);
+ dev_err(&domain->dev,
+ "failed to set requirement to 0x%x for PM node id %d: %d\n",
+ capabilities, pd->node_id, ret);
return ret;
}
- pr_debug("%s() Powered off %s domain\n", __func__, domain->name);
+ dev_dbg(&domain->dev, "set requirement to 0x%x for PM node id %d\n",
+ capabilities, pd->node_id);
+
return 0;
}
@@ -152,10 +152,14 @@ static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain,
struct device *dev)
{
+ struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain);
+ struct device_link *link;
int ret;
- struct zynqmp_pm_domain *pd;
- pd = container_of(domain, struct zynqmp_pm_domain, gpd);
+ link = device_link_add(dev, &domain->dev, DL_FLAG_SYNC_STATE_ONLY);
+ if (!link)
+ dev_dbg(&domain->dev, "failed to create device link for %s\n",
+ dev_name(dev));
/* If this is not the first device to attach there is nothing to do */
if (domain->device_count)
@@ -163,17 +167,17 @@ static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain,
ret = zynqmp_pm_request_node(pd->node_id, 0, 0,
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
- /* If requesting a node fails print and return the error */
if (ret) {
- pr_err("%s() %s request failed for node %d: %d\n",
- __func__, domain->name, pd->node_id, ret);
+ dev_err(&domain->dev, "%s request failed for node %d: %d\n",
+ domain->name, pd->node_id, ret);
return ret;
}
- pd->flags |= ZYNQMP_PM_DOMAIN_REQUESTED;
+ pd->requested = true;
+
+ dev_dbg(&domain->dev, "%s requested PM node id %d\n",
+ dev_name(dev), pd->node_id);
- pr_debug("%s() %s attached to %s domain\n", __func__,
- dev_name(dev), domain->name);
return 0;
}
@@ -185,27 +189,24 @@ static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain,
static void zynqmp_gpd_detach_dev(struct generic_pm_domain *domain,
struct device *dev)
{
+ struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain);
int ret;
- struct zynqmp_pm_domain *pd;
-
- pd = container_of(domain, struct zynqmp_pm_domain, gpd);
/* If this is not the last device to detach there is nothing to do */
if (domain->device_count)
return;
ret = zynqmp_pm_release_node(pd->node_id);
- /* If releasing a node fails print the error and return */
if (ret) {
- pr_err("%s() %s release failed for node %d: %d\n",
- __func__, domain->name, pd->node_id, ret);
+ dev_err(&domain->dev, "failed to release PM node id %d: %d\n",
+ pd->node_id, ret);
return;
}
- pd->flags &= ~ZYNQMP_PM_DOMAIN_REQUESTED;
+ pd->requested = false;
- pr_debug("%s() %s detached from %s domain\n", __func__,
- dev_name(dev), domain->name);
+ dev_dbg(&domain->dev, "%s released PM node id %d\n",
+ dev_name(dev), pd->node_id);
}
static struct generic_pm_domain *zynqmp_gpd_xlate
@@ -215,7 +216,7 @@ static struct generic_pm_domain *zynqmp_gpd_xlate
unsigned int i, idx = genpdspec->args[0];
struct zynqmp_pm_domain *pd;
- pd = container_of(genpd_data->domains[0], struct zynqmp_pm_domain, gpd);
+ pd = to_zynqmp_pm_domain(genpd_data->domains[0]);
if (genpdspec->args_count != 1)
return ERR_PTR(-EINVAL);
@@ -299,9 +300,19 @@ static int zynqmp_gpd_remove(struct platform_device *pdev)
return 0;
}
+static void zynqmp_gpd_sync_state(struct device *dev)
+{
+ int ret;
+
+ ret = zynqmp_pm_init_finalize();
+ if (ret)
+ dev_warn(dev, "failed to release power management to firmware\n");
+}
+
static struct platform_driver zynqmp_power_domain_driver = {
.driver = {
.name = "zynqmp_power_controller",
+ .sync_state = zynqmp_gpd_sync_state,
},
.probe = zynqmp_gpd_probe,
.remove = zynqmp_gpd_remove,
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
index c556623dae02..859dd31b6eff 100644
--- a/drivers/soc/xilinx/zynqmp_power.c
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -16,6 +16,7 @@
#include <linux/suspend.h>
#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/firmware/xlnx-event-manager.h>
#include <linux/mailbox/zynqmp-ipi-message.h>
/**
@@ -30,6 +31,7 @@ struct zynqmp_pm_work_struct {
static struct zynqmp_pm_work_struct *zynqmp_pm_init_suspend_work;
static struct mbox_chan *rx_chan;
+static bool event_registered;
enum pm_suspend_mode {
PM_SUSPEND_MODE_FIRST = 0,
@@ -46,17 +48,24 @@ static const char *const suspend_modes[] = {
static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD;
-enum pm_api_cb_id {
- PM_INIT_SUSPEND_CB = 30,
- PM_ACKNOWLEDGE_CB,
- PM_NOTIFY_CB,
-};
-
static void zynqmp_pm_get_callback_data(u32 *buf)
{
zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf);
}
+static void suspend_event_callback(const u32 *payload, void *data)
+{
+ /* First element is callback API ID, others are callback arguments */
+ if (work_pending(&zynqmp_pm_init_suspend_work->callback_work))
+ return;
+
+ /* Copy callback arguments into work's structure */
+ memcpy(zynqmp_pm_init_suspend_work->args, &payload[1],
+ sizeof(zynqmp_pm_init_suspend_work->args));
+
+ queue_work(system_unbound_wq, &zynqmp_pm_init_suspend_work->callback_work);
+}
+
static irqreturn_t zynqmp_pm_isr(int irq, void *data)
{
u32 payload[CB_PAYLOAD_SIZE];
@@ -178,14 +187,38 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
u32 pm_api_version;
struct mbox_client *client;
- zynqmp_pm_init_finalize();
zynqmp_pm_get_api_version(&pm_api_version);
/* Check PM API version number */
if (pm_api_version < ZYNQMP_PM_VERSION)
return -ENODEV;
- if (of_find_property(pdev->dev.of_node, "mboxes", NULL)) {
+ /*
+ * First try to use Xilinx Event Manager by registering suspend_event_callback
+ * for suspend/shutdown event.
+ * If xlnx_register_event() returns -EACCES (Xilinx Event Manager
+ * is not available to use) or -ENODEV(Xilinx Event Manager not compiled),
+ * then use ipi-mailbox or interrupt method.
+ */
+ ret = xlnx_register_event(PM_INIT_SUSPEND_CB, 0, 0, false,
+ suspend_event_callback, NULL);
+ if (!ret) {
+ zynqmp_pm_init_suspend_work = devm_kzalloc(&pdev->dev,
+ sizeof(struct zynqmp_pm_work_struct),
+ GFP_KERNEL);
+ if (!zynqmp_pm_init_suspend_work) {
+ xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0,
+ suspend_event_callback);
+ return -ENOMEM;
+ }
+ event_registered = true;
+
+ INIT_WORK(&zynqmp_pm_init_suspend_work->callback_work,
+ zynqmp_pm_init_suspend_work_fn);
+ } else if (ret != -EACCES && ret != -ENODEV) {
+ dev_err(&pdev->dev, "Failed to Register with Xilinx Event manager %d\n", ret);
+ return ret;
+ } else if (of_find_property(pdev->dev.of_node, "mboxes", NULL)) {
zynqmp_pm_init_suspend_work =
devm_kzalloc(&pdev->dev,
sizeof(struct zynqmp_pm_work_struct),
@@ -229,6 +262,10 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
if (ret) {
+ if (event_registered) {
+ xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback);
+ event_registered = false;
+ }
dev_err(&pdev->dev, "unable to create sysfs interface\n");
return ret;
}
@@ -239,6 +276,8 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
static int zynqmp_pm_remove(struct platform_device *pdev)
{
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
+ if (event_registered)
+ xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback);
if (!rx_chan)
mbox_free_channel(rx_chan);
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index 4fcc3ba93004..558390af44b6 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -1178,9 +1178,6 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
cdns->pcm.num_bd = config.pcm_bd;
cdns->pcm.num_in = config.pcm_in;
cdns->pcm.num_out = config.pcm_out;
- cdns->pdm.num_bd = config.pdm_bd;
- cdns->pdm.num_in = config.pdm_in;
- cdns->pdm.num_out = config.pdm_out;
/* Allocate PDIs for PCMs */
stream = &cdns->pcm;
@@ -1211,32 +1208,6 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
stream->num_pdi = stream->num_bd + stream->num_in + stream->num_out;
cdns->num_ports = stream->num_pdi;
- /* Allocate PDIs for PDMs */
- stream = &cdns->pdm;
- ret = cdns_allocate_pdi(cdns, &stream->bd,
- stream->num_bd, offset);
- if (ret)
- return ret;
-
- offset += stream->num_bd;
-
- ret = cdns_allocate_pdi(cdns, &stream->in,
- stream->num_in, offset);
- if (ret)
- return ret;
-
- offset += stream->num_in;
-
- ret = cdns_allocate_pdi(cdns, &stream->out,
- stream->num_out, offset);
-
- if (ret)
- return ret;
-
- /* Update total number of PDM PDIs */
- stream->num_pdi = stream->num_bd + stream->num_in + stream->num_out;
- cdns->num_ports += stream->num_pdi;
-
return 0;
}
EXPORT_SYMBOL(sdw_cdns_pdi_init);
@@ -1681,7 +1652,7 @@ int sdw_cdns_probe(struct sdw_cdns *cdns)
EXPORT_SYMBOL(sdw_cdns_probe);
int cdns_set_sdw_stream(struct snd_soc_dai *dai,
- void *stream, bool pcm, int direction)
+ void *stream, int direction)
{
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
struct sdw_cdns_dma_data *dma;
@@ -1705,10 +1676,7 @@ int cdns_set_sdw_stream(struct snd_soc_dai *dai,
if (!dma)
return -ENOMEM;
- if (pcm)
- dma->stream_type = SDW_STREAM_PCM;
- else
- dma->stream_type = SDW_STREAM_PDM;
+ dma->stream_type = SDW_STREAM_PCM;
dma->bus = &cdns->bus;
dma->link_id = cdns->instance;
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
index e587aede63bf..595d72c15d97 100644
--- a/drivers/soundwire/cadence_master.h
+++ b/drivers/soundwire/cadence_master.h
@@ -17,7 +17,7 @@
* @h_ch_num: high channel for PDI
* @ch_count: total channel count for PDI
* @dir: data direction
- * @type: stream type, PDM or PCM
+ * @type: stream type, (only PCM supported)
*/
struct sdw_cdns_pdi {
int num;
@@ -62,17 +62,11 @@ struct sdw_cdns_streams {
* @pcm_bd: number of bidirectional PCM streams supported
* @pcm_in: number of input PCM streams supported
* @pcm_out: number of output PCM streams supported
- * @pdm_bd: number of bidirectional PDM streams supported
- * @pdm_in: number of input PDM streams supported
- * @pdm_out: number of output PDM streams supported
*/
struct sdw_cdns_stream_config {
unsigned int pcm_bd;
unsigned int pcm_in;
unsigned int pcm_out;
- unsigned int pdm_bd;
- unsigned int pdm_in;
- unsigned int pdm_out;
};
/**
@@ -86,6 +80,7 @@ struct sdw_cdns_stream_config {
* @link_id: Master link id
* @hw_params: hw_params to be applied in .prepare step
* @suspended: status set when suspended, to be used in .prepare
+ * @paused: status set in .trigger, to be used in suspend
*/
struct sdw_cdns_dma_data {
char *name;
@@ -96,6 +91,7 @@ struct sdw_cdns_dma_data {
int link_id;
struct snd_pcm_hw_params *hw_params;
bool suspended;
+ bool paused;
};
/**
@@ -109,7 +105,6 @@ struct sdw_cdns_dma_data {
* @ports: Data ports
* @num_ports: Total number of data ports
* @pcm: PCM streams
- * @pdm: PDM streams
* @registers: Cadence registers
* @link_up: Link status
* @msg_count: Messages sent on bus
@@ -127,7 +122,6 @@ struct sdw_cdns {
int num_ports;
struct sdw_cdns_streams pcm;
- struct sdw_cdns_streams pdm;
int pdi_loopback_source;
int pdi_loopback_target;
@@ -186,7 +180,7 @@ cdns_xfer_msg_defer(struct sdw_bus *bus,
int cdns_bus_conf(struct sdw_bus *bus, struct sdw_bus_params *params);
int cdns_set_sdw_stream(struct snd_soc_dai *dai,
- void *stream, bool pcm, int direction);
+ void *stream, int direction);
void sdw_cdns_check_self_clearing_bits(struct sdw_cdns *cdns, const char *string,
bool initial_delay, int reset_iterations);
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 78037ffdb09b..122f7a29d8ca 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -564,7 +564,7 @@ static void intel_pdi_init(struct sdw_intel *sdw,
{
void __iomem *shim = sdw->link_res->shim;
unsigned int link_id = sdw->instance;
- int pcm_cap, pdm_cap;
+ int pcm_cap;
/* PCM Stream Capability */
pcm_cap = intel_readw(shim, SDW_SHIM_PCMSCAP(link_id));
@@ -575,41 +575,25 @@ static void intel_pdi_init(struct sdw_intel *sdw,
dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n",
config->pcm_bd, config->pcm_in, config->pcm_out);
-
- /* PDM Stream Capability */
- pdm_cap = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id));
-
- config->pdm_bd = FIELD_GET(SDW_SHIM_PDMSCAP_BSS, pdm_cap);
- config->pdm_in = FIELD_GET(SDW_SHIM_PDMSCAP_ISS, pdm_cap);
- config->pdm_out = FIELD_GET(SDW_SHIM_PDMSCAP_OSS, pdm_cap);
-
- dev_dbg(sdw->cdns.dev, "PDM cap bd:%d in:%d out:%d\n",
- config->pdm_bd, config->pdm_in, config->pdm_out);
}
static int
-intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm)
+intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num)
{
void __iomem *shim = sdw->link_res->shim;
unsigned int link_id = sdw->instance;
int count;
- if (pcm) {
- count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
+ count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
- /*
- * WORKAROUND: on all existing Intel controllers, pdi
- * number 2 reports channel count as 1 even though it
- * supports 8 channels. Performing hardcoding for pdi
- * number 2.
- */
- if (pdi_num == 2)
- count = 7;
-
- } else {
- count = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id));
- count = FIELD_GET(SDW_SHIM_PDMSCAP_CPSS, count);
- }
+ /*
+ * WORKAROUND: on all existing Intel controllers, pdi
+ * number 2 reports channel count as 1 even though it
+ * supports 8 channels. Performing hardcoding for pdi
+ * number 2.
+ */
+ if (pdi_num == 2)
+ count = 7;
/* zero based values for channel count in register */
count++;
@@ -620,12 +604,12 @@ intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm)
static int intel_pdi_get_ch_update(struct sdw_intel *sdw,
struct sdw_cdns_pdi *pdi,
unsigned int num_pdi,
- unsigned int *num_ch, bool pcm)
+ unsigned int *num_ch)
{
int i, ch_count = 0;
for (i = 0; i < num_pdi; i++) {
- pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num, pcm);
+ pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num);
ch_count += pdi->ch_count;
pdi++;
}
@@ -635,25 +619,23 @@ static int intel_pdi_get_ch_update(struct sdw_intel *sdw,
}
static int intel_pdi_stream_ch_update(struct sdw_intel *sdw,
- struct sdw_cdns_streams *stream, bool pcm)
+ struct sdw_cdns_streams *stream)
{
intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd,
- &stream->num_ch_bd, pcm);
+ &stream->num_ch_bd);
intel_pdi_get_ch_update(sdw, stream->in, stream->num_in,
- &stream->num_ch_in, pcm);
+ &stream->num_ch_in);
intel_pdi_get_ch_update(sdw, stream->out, stream->num_out,
- &stream->num_ch_out, pcm);
+ &stream->num_ch_out);
return 0;
}
static int intel_pdi_ch_update(struct sdw_intel *sdw)
{
- /* First update PCM streams followed by PDM streams */
- intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm, true);
- intel_pdi_stream_ch_update(sdw, &sdw->cdns.pdm, false);
+ intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm);
return 0;
}
@@ -711,7 +693,7 @@ intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
}
static int intel_params_stream(struct sdw_intel *sdw,
- struct snd_pcm_substream *substream,
+ int stream,
struct snd_soc_dai *dai,
struct snd_pcm_hw_params *hw_params,
int link_id, int alh_stream_id)
@@ -719,7 +701,7 @@ static int intel_params_stream(struct sdw_intel *sdw,
struct sdw_intel_link_res *res = sdw->link_res;
struct sdw_intel_stream_params_data params_data;
- params_data.substream = substream;
+ params_data.stream = stream; /* direction */
params_data.dai = dai;
params_data.hw_params = hw_params;
params_data.link_id = link_id;
@@ -732,14 +714,14 @@ static int intel_params_stream(struct sdw_intel *sdw,
}
static int intel_free_stream(struct sdw_intel *sdw,
- struct snd_pcm_substream *substream,
+ int stream,
struct snd_soc_dai *dai,
int link_id)
{
struct sdw_intel_link_res *res = sdw->link_res;
struct sdw_intel_stream_free_data free_data;
- free_data.substream = substream;
+ free_data.stream = stream; /* direction */
free_data.dai = dai;
free_data.link_id = link_id;
@@ -840,7 +822,6 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
struct sdw_port_config *pconfig;
int ch, dir;
int ret;
- bool pcm = true;
dma = snd_soc_dai_get_dma_data(dai, substream);
if (!dma)
@@ -852,13 +833,7 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
else
dir = SDW_DATA_DIR_TX;
- if (dma->stream_type == SDW_STREAM_PDM)
- pcm = false;
-
- if (pcm)
- pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
- else
- pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pdm, ch, dir, dai->id);
+ pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
if (!pdi) {
ret = -EINVAL;
@@ -871,12 +846,13 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
sdw_cdns_config_stream(cdns, ch, dir, pdi);
/* store pdi and hw_params, may be needed in prepare step */
+ dma->paused = false;
dma->suspended = false;
dma->pdi = pdi;
dma->hw_params = params;
/* Inform DSP about PDI stream number */
- ret = intel_params_stream(sdw, substream, dai, params,
+ ret = intel_params_stream(sdw, substream->stream, dai, params,
sdw->instance,
pdi->intel_alh_id);
if (ret)
@@ -887,12 +863,7 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
sconfig.frame_rate = params_rate(params);
sconfig.type = dma->stream_type;
- if (dma->stream_type == SDW_STREAM_PDM) {
- sconfig.frame_rate *= 50;
- sconfig.bps = 1;
- } else {
- sconfig.bps = snd_pcm_format_width(params_format(params));
- }
+ sconfig.bps = snd_pcm_format_width(params_format(params));
/* Port configuration */
pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL);
@@ -953,7 +924,7 @@ static int intel_prepare(struct snd_pcm_substream *substream,
sdw_cdns_config_stream(cdns, ch, dir, dma->pdi);
/* Inform DSP about PDI stream number */
- ret = intel_params_stream(sdw, substream, dai,
+ ret = intel_params_stream(sdw, substream->stream, dai,
dma->hw_params,
sdw->instance,
dma->pdi->intel_alh_id);
@@ -987,7 +958,7 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
return ret;
}
- ret = intel_free_stream(sdw, substream, dai, sdw->instance);
+ ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance);
if (ret < 0) {
dev_err(dai->dev, "intel_free_stream: failed %d\n", ret);
return ret;
@@ -1008,39 +979,10 @@ static void intel_shutdown(struct snd_pcm_substream *substream,
pm_runtime_put_autosuspend(cdns->dev);
}
-static int intel_component_dais_suspend(struct snd_soc_component *component)
-{
- struct sdw_cdns_dma_data *dma;
- struct snd_soc_dai *dai;
-
- for_each_component_dais(component, dai) {
- /*
- * we don't have a .suspend dai_ops, and we don't have access
- * to the substream, so let's mark both capture and playback
- * DMA contexts as suspended
- */
- dma = dai->playback_dma_data;
- if (dma)
- dma->suspended = true;
-
- dma = dai->capture_dma_data;
- if (dma)
- dma->suspended = true;
- }
-
- return 0;
-}
-
static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
void *stream, int direction)
{
- return cdns_set_sdw_stream(dai, stream, true, direction);
-}
-
-static int intel_pdm_set_sdw_stream(struct snd_soc_dai *dai,
- void *stream, int direction)
-{
- return cdns_set_sdw_stream(dai, stream, false, direction);
+ return cdns_set_sdw_stream(dai, stream, direction);
}
static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
@@ -1059,24 +1001,100 @@ static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
return dma->stream;
}
-static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
- .startup = intel_startup,
- .hw_params = intel_hw_params,
- .prepare = intel_prepare,
- .hw_free = intel_hw_free,
- .shutdown = intel_shutdown,
- .set_sdw_stream = intel_pcm_set_sdw_stream,
- .get_sdw_stream = intel_get_sdw_stream,
-};
+static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai)
+{
+ struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_cdns_dma_data *dma;
+ int ret = 0;
-static const struct snd_soc_dai_ops intel_pdm_dai_ops = {
+ dma = snd_soc_dai_get_dma_data(dai, substream);
+ if (!dma) {
+ dev_err(dai->dev, "failed to get dma data in %s\n",
+ __func__);
+ return -EIO;
+ }
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+
+ /*
+ * The .prepare callback is used to deal with xruns and resume operations.
+ * In the case of xruns, the DMAs and SHIM registers cannot be touched,
+ * but for resume operations the DMAs and SHIM registers need to be initialized.
+ * the .trigger callback is used to track the suspend case only.
+ */
+
+ dma->suspended = true;
+
+ ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance);
+ break;
+
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ dma->paused = true;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ dma->paused = false;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int intel_component_dais_suspend(struct snd_soc_component *component)
+{
+ struct snd_soc_dai *dai;
+
+ /*
+ * In the corner case where a SUSPEND happens during a PAUSE, the ALSA core
+ * does not throw the TRIGGER_SUSPEND. This leaves the DAIs in an unbalanced state.
+ * Since the component suspend is called last, we can trap this corner case
+ * and force the DAIs to release their resources.
+ */
+ for_each_component_dais(component, dai) {
+ struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_cdns_dma_data *dma;
+ int stream;
+ int ret;
+
+ dma = dai->playback_dma_data;
+ stream = SNDRV_PCM_STREAM_PLAYBACK;
+ if (!dma) {
+ dma = dai->capture_dma_data;
+ stream = SNDRV_PCM_STREAM_CAPTURE;
+ }
+
+ if (!dma)
+ continue;
+
+ if (dma->suspended)
+ continue;
+
+ if (dma->paused) {
+ dma->suspended = true;
+
+ ret = intel_free_stream(sdw, stream, dai, sdw->instance);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
.startup = intel_startup,
.hw_params = intel_hw_params,
.prepare = intel_prepare,
.hw_free = intel_hw_free,
+ .trigger = intel_trigger,
.shutdown = intel_shutdown,
- .set_sdw_stream = intel_pdm_set_sdw_stream,
- .get_sdw_stream = intel_get_sdw_stream,
+ .set_stream = intel_pcm_set_sdw_stream,
+ .get_stream = intel_get_sdw_stream,
};
static const struct snd_soc_component_driver dai_component = {
@@ -1087,7 +1105,7 @@ static const struct snd_soc_component_driver dai_component = {
static int intel_create_dai(struct sdw_cdns *cdns,
struct snd_soc_dai_driver *dais,
enum intel_pdi_type type,
- u32 num, u32 off, u32 max_ch, bool pcm)
+ u32 num, u32 off, u32 max_ch)
{
int i;
@@ -1116,10 +1134,7 @@ static int intel_create_dai(struct sdw_cdns *cdns,
dais[i].capture.formats = SNDRV_PCM_FMTBIT_S16_LE;
}
- if (pcm)
- dais[i].ops = &intel_pcm_dai_ops;
- else
- dais[i].ops = &intel_pdm_dai_ops;
+ dais[i].ops = &intel_pcm_dai_ops;
}
return 0;
@@ -1133,7 +1148,7 @@ static int intel_register_dai(struct sdw_intel *sdw)
int num_dai, ret, off = 0;
/* DAIs are created based on total number of PDIs supported */
- num_dai = cdns->pcm.num_pdi + cdns->pdm.num_pdi;
+ num_dai = cdns->pcm.num_pdi;
dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL);
if (!dais)
@@ -1143,39 +1158,19 @@ static int intel_register_dai(struct sdw_intel *sdw)
stream = &cdns->pcm;
ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in,
- off, stream->num_ch_in, true);
+ off, stream->num_ch_in);
if (ret)
return ret;
off += cdns->pcm.num_in;
ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out,
- off, stream->num_ch_out, true);
+ off, stream->num_ch_out);
if (ret)
return ret;
off += cdns->pcm.num_out;
ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd,
- off, stream->num_ch_bd, true);
- if (ret)
- return ret;
-
- /* Create PDM DAIs */
- stream = &cdns->pdm;
- off += cdns->pcm.num_bd;
- ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pdm.num_in,
- off, stream->num_ch_in, false);
- if (ret)
- return ret;
-
- off += cdns->pdm.num_in;
- ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pdm.num_out,
- off, stream->num_ch_out, false);
- if (ret)
- return ret;
-
- off += cdns->pdm.num_out;
- ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pdm.num_bd,
- off, stream->num_ch_bd, false);
+ off, stream->num_ch_bd);
if (ret)
return ret;
@@ -1293,7 +1288,7 @@ static int intel_link_probe(struct auxiliary_device *auxdev,
bus->ops = &sdw_intel_ops;
/* set driver data, accessed by snd_soc_dai_get_drvdata() */
- dev_set_drvdata(dev, cdns);
+ auxiliary_set_drvdata(auxdev, cdns);
/* use generic bandwidth allocation algorithm */
sdw->cdns.bus.compute_params = sdw_compute_params;
@@ -1321,7 +1316,7 @@ int intel_link_startup(struct auxiliary_device *auxdev)
{
struct sdw_cdns_stream_config config;
struct device *dev = &auxdev->dev;
- struct sdw_cdns *cdns = dev_get_drvdata(dev);
+ struct sdw_cdns *cdns = auxiliary_get_drvdata(auxdev);
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_bus *bus = &cdns->bus;
int link_flags;
@@ -1463,7 +1458,7 @@ err_init:
static void intel_link_remove(struct auxiliary_device *auxdev)
{
struct device *dev = &auxdev->dev;
- struct sdw_cdns *cdns = dev_get_drvdata(dev);
+ struct sdw_cdns *cdns = auxiliary_get_drvdata(auxdev);
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_bus *bus = &cdns->bus;
@@ -1488,7 +1483,7 @@ int intel_link_process_wakeen_event(struct auxiliary_device *auxdev)
void __iomem *shim;
u16 wake_sts;
- sdw = dev_get_drvdata(dev);
+ sdw = auxiliary_get_drvdata(auxdev);
bus = &sdw->cdns.bus;
if (bus->prop.hw_disabled || !sdw->startup_done) {
@@ -1549,7 +1544,7 @@ static int __maybe_unused intel_pm_prepare(struct device *dev)
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_bus *bus = &cdns->bus;
u32 clock_stop_quirks;
- int ret = 0;
+ int ret;
if (bus->prop.hw_disabled || !sdw->startup_done) {
dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n",
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index e329022e1669..d99807765dfe 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -244,7 +244,7 @@ static struct sdw_intel_ctx
goto err;
link = &ldev->link_res;
- link->cdns = dev_get_drvdata(&ldev->auxdev.dev);
+ link->cdns = auxiliary_get_drvdata(&ldev->auxdev);
if (!link->cdns) {
dev_err(&adev->dev, "failed to get link->cdns\n");
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index a317bea2d42d..54813417ef8e 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -1024,8 +1024,8 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream,
ctrl->sruntime[dai->id] = sruntime;
for_each_rtd_codec_dais(rtd, i, codec_dai) {
- ret = snd_soc_dai_set_sdw_stream(codec_dai, sruntime,
- substream->stream);
+ ret = snd_soc_dai_set_stream(codec_dai, sruntime,
+ substream->stream);
if (ret < 0 && ret != -ENOTSUPP) {
dev_err(dai->dev, "Failed to set sdw stream on %s\n",
codec_dai->name);
@@ -1051,8 +1051,8 @@ static const struct snd_soc_dai_ops qcom_swrm_pdm_dai_ops = {
.hw_free = qcom_swrm_hw_free,
.startup = qcom_swrm_startup,
.shutdown = qcom_swrm_shutdown,
- .set_sdw_stream = qcom_swrm_set_sdw_stream,
- .get_sdw_stream = qcom_swrm_get_sdw_stream,
+ .set_stream = qcom_swrm_set_sdw_stream,
+ .get_stream = qcom_swrm_get_sdw_stream,
};
static const struct snd_soc_component_driver qcom_swrm_dai_component = {
@@ -1156,11 +1156,7 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
ret = of_property_read_u8_array(np, "qcom,ports-block-pack-mode",
bp_mode, nports);
if (ret) {
- u32 version;
-
- ctrl->reg_read(ctrl, SWRM_COMP_HW_VERSION, &version);
-
- if (version <= 0x01030000)
+ if (ctrl->version <= 0x01030000)
memset(bp_mode, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS);
else
return ret;
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 5d4f6b308ef7..980f26d49b66 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -1863,7 +1863,7 @@ static int set_stream(struct snd_pcm_substream *substream,
/* Set stream pointer on all DAIs */
for_each_rtd_dais(rtd, i, dai) {
- ret = snd_soc_dai_set_sdw_stream(dai, sdw_stream, substream->stream);
+ ret = snd_soc_dai_set_stream(dai, sdw_stream, substream->stream);
if (ret < 0) {
dev_err(rtd->dev, "failed to set stream pointer on dai %s\n", dai->name);
break;
@@ -1934,7 +1934,7 @@ void sdw_shutdown_stream(void *sdw_substream)
/* Find stream from first CPU DAI */
dai = asoc_rtd_to_cpu(rtd, 0);
- sdw_stream = snd_soc_dai_get_sdw_stream(dai, substream->stream);
+ sdw_stream = snd_soc_dai_get_stream(dai, substream->stream);
if (IS_ERR(sdw_stream)) {
dev_err(rtd->dev, "no stream found for DAI %s\n", dai->name);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 596705d24400..b2a8821971e1 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -974,14 +974,13 @@ config SPI_XILINX
Or for the DS570, see "XPS Serial Peripheral Interface (SPI) (v2.00b)"
config SPI_XLP
- tristate "Netlogic XLP SPI controller driver"
- depends on CPU_XLP || ARCH_THUNDER2 || COMPILE_TEST
+ tristate "Cavium ThunderX2 SPI controller driver"
+ depends on ARCH_THUNDER2 || COMPILE_TEST
help
- Enable support for the SPI controller on the Netlogic XLP SoCs.
- Currently supported XLP variants are XLP8XX, XLP3XX, XLP2XX, XLP9XX
- and XLP5XX.
+ Enable support for the SPI controller on the Cavium ThunderX2.
+ (Originally on Netlogic XLP SoCs.)
- If you have a Netlogic XLP platform say Y here.
+ If you have a Cavium ThunderX2 platform say Y here.
If unsure, say N.
config SPI_XTENSA_XTFPGA
diff --git a/drivers/spi/spi-ar934x.c b/drivers/spi/spi-ar934x.c
index def32e0aaefe..ec7250c4c810 100644
--- a/drivers/spi/spi-ar934x.c
+++ b/drivers/spi/spi-ar934x.c
@@ -82,7 +82,7 @@ static int ar934x_spi_transfer_one_message(struct spi_controller *master,
struct spi_device *spi = m->spi;
unsigned long trx_done, trx_cur;
int stat = 0;
- u8 term = 0;
+ u8 bpw, term = 0;
int div, i;
u32 reg;
const u8 *tx_buf;
@@ -90,6 +90,11 @@ static int ar934x_spi_transfer_one_message(struct spi_controller *master,
m->actual_length = 0;
list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->bits_per_word >= 8 && t->bits_per_word < 32)
+ bpw = t->bits_per_word >> 3;
+ else
+ bpw = 4;
+
if (t->speed_hz)
div = ar934x_spi_clk_div(sp, t->speed_hz);
else
@@ -105,10 +110,10 @@ static int ar934x_spi_transfer_one_message(struct spi_controller *master,
iowrite32(reg, sp->base + AR934X_SPI_REG_CTRL);
iowrite32(0, sp->base + AR934X_SPI_DATAOUT);
- for (trx_done = 0; trx_done < t->len; trx_done += 4) {
+ for (trx_done = 0; trx_done < t->len; trx_done += bpw) {
trx_cur = t->len - trx_done;
- if (trx_cur > 4)
- trx_cur = 4;
+ if (trx_cur > bpw)
+ trx_cur = bpw;
else if (list_is_last(&t->transfer_list, &m->transfers))
term = 1;
@@ -137,8 +142,10 @@ static int ar934x_spi_transfer_one_message(struct spi_controller *master,
reg >>= 8;
}
}
+ spi_delay_exec(&t->word_delay, t);
}
m->actual_length += t->len;
+ spi_transfer_delay_exec(t);
}
msg_done:
@@ -191,7 +198,8 @@ static int ar934x_spi_probe(struct platform_device *pdev)
ctlr->mode_bits = SPI_LSB_FIRST;
ctlr->setup = ar934x_spi_setup;
ctlr->transfer_one_message = ar934x_spi_transfer_one_message;
- ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(24) |
+ SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->num_chipselect = 3;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index f872cf196c2f..9e300a932699 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -433,26 +433,25 @@ static bool atmel_spi_can_dma(struct spi_master *master,
}
-static int atmel_spi_dma_slave_config(struct atmel_spi *as,
- struct dma_slave_config *slave_config,
- u8 bits_per_word)
+static int atmel_spi_dma_slave_config(struct atmel_spi *as, u8 bits_per_word)
{
struct spi_master *master = platform_get_drvdata(as->pdev);
+ struct dma_slave_config slave_config;
int err = 0;
if (bits_per_word > 8) {
- slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
- slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
} else {
- slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
}
- slave_config->dst_addr = (dma_addr_t)as->phybase + SPI_TDR;
- slave_config->src_addr = (dma_addr_t)as->phybase + SPI_RDR;
- slave_config->src_maxburst = 1;
- slave_config->dst_maxburst = 1;
- slave_config->device_fc = false;
+ slave_config.dst_addr = (dma_addr_t)as->phybase + SPI_TDR;
+ slave_config.src_addr = (dma_addr_t)as->phybase + SPI_RDR;
+ slave_config.src_maxburst = 1;
+ slave_config.dst_maxburst = 1;
+ slave_config.device_fc = false;
/*
* This driver uses fixed peripheral select mode (PS bit set to '0' in
@@ -464,12 +463,11 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as,
* However, the first data has to be written into the lowest 16 bits and
* the second data into the highest 16 bits of the Transmit
* Data Register. For 8bit data (the most frequent case), it would
- * require to rework tx_buf so each data would actualy fit 16 bits.
+ * require to rework tx_buf so each data would actually fit 16 bits.
* So we'd rather write only one data at the time. Hence the transmit
* path works the same whether FIFOs are available (and enabled) or not.
*/
- slave_config->direction = DMA_MEM_TO_DEV;
- if (dmaengine_slave_config(master->dma_tx, slave_config)) {
+ if (dmaengine_slave_config(master->dma_tx, &slave_config)) {
dev_err(&as->pdev->dev,
"failed to configure tx dma channel\n");
err = -EINVAL;
@@ -483,8 +481,7 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as,
* So the receive path works the same whether FIFOs are available (and
* enabled) or not.
*/
- slave_config->direction = DMA_DEV_TO_MEM;
- if (dmaengine_slave_config(master->dma_rx, slave_config)) {
+ if (dmaengine_slave_config(master->dma_rx, &slave_config)) {
dev_err(&as->pdev->dev,
"failed to configure rx dma channel\n");
err = -EINVAL;
@@ -496,7 +493,6 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as,
static int atmel_spi_configure_dma(struct spi_master *master,
struct atmel_spi *as)
{
- struct dma_slave_config slave_config;
struct device *dev = &as->pdev->dev;
int err;
@@ -518,7 +514,7 @@ static int atmel_spi_configure_dma(struct spi_master *master,
goto error;
}
- err = atmel_spi_dma_slave_config(as, &slave_config, 8);
+ err = atmel_spi_dma_slave_config(as, 8);
if (err)
goto error;
@@ -700,7 +696,6 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
struct dma_chan *txchan = master->dma_tx;
struct dma_async_tx_descriptor *rxdesc;
struct dma_async_tx_descriptor *txdesc;
- struct dma_slave_config slave_config;
dma_cookie_t cookie;
dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_dma_submit\n");
@@ -712,8 +707,7 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
*plen = xfer->len;
- if (atmel_spi_dma_slave_config(as, &slave_config,
- xfer->bits_per_word))
+ if (atmel_spi_dma_slave_config(as, xfer->bits_per_word))
goto err_exit;
/* Send both scatterlists */
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index f3de3305d0f5..c9a769b8594b 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -287,6 +287,18 @@ static inline int bcm_qspi_spbr_min(struct bcm_qspi *qspi)
return 8;
}
+static u32 bcm_qspi_calc_spbr(u32 clk_speed_hz,
+ const struct bcm_qspi_parms *xp)
+{
+ u32 spbr = 0;
+
+ /* SPBR = System Clock/(2 * SCK Baud Rate) */
+ if (xp->speed_hz)
+ spbr = clk_speed_hz / (xp->speed_hz * 2);
+
+ return spbr;
+}
+
/* Read qspi controller register*/
static inline u32 bcm_qspi_read(struct bcm_qspi *qspi, enum base_type type,
unsigned int offset)
@@ -586,12 +598,24 @@ static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
qspi->curr_cs = cs;
}
+static bool bcmspi_parms_did_change(const struct bcm_qspi_parms * const cur,
+ const struct bcm_qspi_parms * const prev)
+{
+ return (cur->speed_hz != prev->speed_hz) ||
+ (cur->mode != prev->mode) ||
+ (cur->bits_per_word != prev->bits_per_word);
+}
+
+
/* MSPI helpers */
static void bcm_qspi_hw_set_parms(struct bcm_qspi *qspi,
const struct bcm_qspi_parms *xp)
{
u32 spcr, spbr = 0;
+ if (!bcmspi_parms_did_change(xp, &qspi->last_parms))
+ return;
+
if (!qspi->mspi_maj_rev)
/* legacy controller */
spcr = MSPI_MASTER_BIT;
@@ -621,9 +645,17 @@ static void bcm_qspi_hw_set_parms(struct bcm_qspi *qspi,
spcr |= MSPI_SPCR3_HALFDUPLEX | MSPI_SPCR3_HDOUTTYPE;
if (bcm_qspi_has_sysclk_108(qspi)) {
- /* SYSCLK_108 */
- spcr |= MSPI_SPCR3_SYSCLKSEL_108;
- qspi->base_clk = MSPI_BASE_FREQ * 4;
+ /* check requested baud rate before moving to 108Mhz */
+ spbr = bcm_qspi_calc_spbr(MSPI_BASE_FREQ * 4, xp);
+ if (spbr > QSPI_SPBR_MAX) {
+ /* use SYSCLK_27Mhz for slower baud rates */
+ spcr &= ~MSPI_SPCR3_SYSCLKSEL_MASK;
+ qspi->base_clk = MSPI_BASE_FREQ;
+ } else {
+ /* SYSCLK_108Mhz */
+ spcr |= MSPI_SPCR3_SYSCLKSEL_108;
+ qspi->base_clk = MSPI_BASE_FREQ * 4;
+ }
}
if (xp->bits_per_word > 16) {
@@ -649,9 +681,9 @@ static void bcm_qspi_hw_set_parms(struct bcm_qspi *qspi,
bcm_qspi_write(qspi, MSPI, MSPI_SPCR3, spcr);
}
- if (xp->speed_hz)
- spbr = qspi->base_clk / (2 * xp->speed_hz);
-
+ /* SCK Baud Rate = System Clock/(2 * SPBR) */
+ qspi->max_speed_hz = qspi->base_clk / (bcm_qspi_spbr_min(qspi) * 2);
+ spbr = bcm_qspi_calc_spbr(qspi->base_clk, xp);
spbr = clamp_val(spbr, bcm_qspi_spbr_min(qspi), QSPI_SPBR_MAX);
bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, spbr);
diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c
index 3ff63ab82f4f..0a1fb2bc9e54 100644
--- a/drivers/spi/spi-dln2.c
+++ b/drivers/spi/spi-dln2.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/mfd/dln2.h>
#include <linux/spi/spi.h>
#include <linux/pm_runtime.h>
@@ -688,6 +689,8 @@ static int dln2_spi_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
+ device_set_node(&master->dev, dev_fwnode(dev));
+
platform_set_drvdata(pdev, master);
dln2 = spi_master_get_devdata(master);
@@ -699,7 +702,6 @@ static int dln2_spi_probe(struct platform_device *pdev)
}
dln2->master = master;
- dln2->master->dev.of_node = dev->of_node;
dln2->pdev = pdev;
dln2->port = pdata->port;
/* cs/mode can never be 0xff, so the first transfer will set them */
diff --git a/drivers/spi/spi-dw-bt1.c b/drivers/spi/spi-dw-bt1.c
index 5be6b7b80c21..c06553416123 100644
--- a/drivers/spi/spi-dw-bt1.c
+++ b/drivers/spi/spi-dw-bt1.c
@@ -123,7 +123,7 @@ static ssize_t dw_spi_bt1_dirmap_read(struct spi_mem_dirmap_desc *desc,
len = min_t(size_t, len, dwsbt1->map_len - offs);
/* Collect the controller configuration required by the operation */
- cfg.tmode = SPI_TMOD_EPROMREAD;
+ cfg.tmode = DW_SPI_CTRLR0_TMOD_EPROMREAD;
cfg.dfs = 8;
cfg.ndf = 4;
cfg.freq = mem->spi->max_speed_hz;
@@ -131,13 +131,13 @@ static ssize_t dw_spi_bt1_dirmap_read(struct spi_mem_dirmap_desc *desc,
/* Make sure the corresponding CS is de-asserted on transmission */
dw_spi_set_cs(mem->spi, false);
- spi_enable_chip(dws, 0);
+ dw_spi_enable_chip(dws, 0);
dw_spi_update_config(dws, mem->spi, &cfg);
- spi_umask_intr(dws, SPI_INT_RXFI);
+ dw_spi_umask_intr(dws, DW_SPI_INT_RXFI);
- spi_enable_chip(dws, 1);
+ dw_spi_enable_chip(dws, 1);
/*
* Enable the transparent mode of the System Boot Controller.
@@ -339,3 +339,4 @@ module_platform_driver(dw_spi_bt1_driver);
MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
MODULE_DESCRIPTION("Baikal-T1 System Boot SPI Controller driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(SPI_DW_CORE);
diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
index a305074c482e..ecea471ff42c 100644
--- a/drivers/spi/spi-dw-core.c
+++ b/drivers/spi/spi-dw-core.c
@@ -5,6 +5,7 @@
* Copyright (c) 2009, Intel Corporation.
*/
+#include <linux/bitfield.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -24,7 +25,7 @@
#endif
/* Slave spi_device related */
-struct chip_data {
+struct dw_spi_chip_data {
u32 cr0;
u32 rx_sample_dly; /* RX sample delay */
};
@@ -106,10 +107,10 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable)
else
dw_writel(dws, DW_SPI_SER, 0);
}
-EXPORT_SYMBOL_GPL(dw_spi_set_cs);
+EXPORT_SYMBOL_NS_GPL(dw_spi_set_cs, SPI_DW_CORE);
/* Return the max entries we can fill into tx fifo */
-static inline u32 tx_max(struct dw_spi *dws)
+static inline u32 dw_spi_tx_max(struct dw_spi *dws)
{
u32 tx_room, rxtx_gap;
@@ -129,14 +130,14 @@ static inline u32 tx_max(struct dw_spi *dws)
}
/* Return the max entries we should read out of rx fifo */
-static inline u32 rx_max(struct dw_spi *dws)
+static inline u32 dw_spi_rx_max(struct dw_spi *dws)
{
return min_t(u32, dws->rx_len, dw_readl(dws, DW_SPI_RXFLR));
}
static void dw_writer(struct dw_spi *dws)
{
- u32 max = tx_max(dws);
+ u32 max = dw_spi_tx_max(dws);
u32 txw = 0;
while (max--) {
@@ -157,7 +158,7 @@ static void dw_writer(struct dw_spi *dws)
static void dw_reader(struct dw_spi *dws)
{
- u32 max = rx_max(dws);
+ u32 max = dw_spi_rx_max(dws);
u32 rxw;
while (max--) {
@@ -186,31 +187,31 @@ int dw_spi_check_status(struct dw_spi *dws, bool raw)
else
irq_status = dw_readl(dws, DW_SPI_ISR);
- if (irq_status & SPI_INT_RXOI) {
+ if (irq_status & DW_SPI_INT_RXOI) {
dev_err(&dws->master->dev, "RX FIFO overflow detected\n");
ret = -EIO;
}
- if (irq_status & SPI_INT_RXUI) {
+ if (irq_status & DW_SPI_INT_RXUI) {
dev_err(&dws->master->dev, "RX FIFO underflow detected\n");
ret = -EIO;
}
- if (irq_status & SPI_INT_TXOI) {
+ if (irq_status & DW_SPI_INT_TXOI) {
dev_err(&dws->master->dev, "TX FIFO overflow detected\n");
ret = -EIO;
}
/* Generically handle the erroneous situation */
if (ret) {
- spi_reset_chip(dws);
+ dw_spi_reset_chip(dws);
if (dws->master->cur_msg)
dws->master->cur_msg->status = ret;
}
return ret;
}
-EXPORT_SYMBOL_GPL(dw_spi_check_status);
+EXPORT_SYMBOL_NS_GPL(dw_spi_check_status, SPI_DW_CORE);
static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
{
@@ -230,7 +231,7 @@ static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
*/
dw_reader(dws);
if (!dws->rx_len) {
- spi_mask_intr(dws, 0xff);
+ dw_spi_mask_intr(dws, 0xff);
spi_finalize_current_transfer(dws->master);
} else if (dws->rx_len <= dw_readl(dws, DW_SPI_RXFTLR)) {
dw_writel(dws, DW_SPI_RXFTLR, dws->rx_len - 1);
@@ -241,10 +242,10 @@ static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
* disabled after the data transmission is finished so not to
* have the TXE IRQ flood at the final stage of the transfer.
*/
- if (irq_status & SPI_INT_TXEI) {
+ if (irq_status & DW_SPI_INT_TXEI) {
dw_writer(dws);
if (!dws->tx_len)
- spi_mask_intr(dws, SPI_INT_TXEI);
+ dw_spi_mask_intr(dws, DW_SPI_INT_TXEI);
}
return IRQ_HANDLED;
@@ -254,13 +255,13 @@ static irqreturn_t dw_spi_irq(int irq, void *dev_id)
{
struct spi_controller *master = dev_id;
struct dw_spi *dws = spi_controller_get_devdata(master);
- u16 irq_status = dw_readl(dws, DW_SPI_ISR) & 0x3f;
+ u16 irq_status = dw_readl(dws, DW_SPI_ISR) & DW_SPI_INT_MASK;
if (!irq_status)
return IRQ_NONE;
if (!master->cur_msg) {
- spi_mask_intr(dws, 0xff);
+ dw_spi_mask_intr(dws, 0xff);
return IRQ_HANDLED;
}
@@ -271,37 +272,43 @@ static u32 dw_spi_prepare_cr0(struct dw_spi *dws, struct spi_device *spi)
{
u32 cr0 = 0;
- if (!(dws->caps & DW_SPI_CAP_DWC_SSI)) {
+ if (dw_spi_ip_is(dws, PSSI)) {
/* CTRLR0[ 5: 4] Frame Format */
- cr0 |= SSI_MOTO_SPI << SPI_FRF_OFFSET;
+ cr0 |= FIELD_PREP(DW_PSSI_CTRLR0_FRF_MASK, DW_SPI_CTRLR0_FRF_MOTO_SPI);
/*
* SPI mode (SCPOL|SCPH)
* CTRLR0[ 6] Serial Clock Phase
* CTRLR0[ 7] Serial Clock Polarity
*/
- cr0 |= ((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET;
- cr0 |= ((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET;
+ if (spi->mode & SPI_CPOL)
+ cr0 |= DW_PSSI_CTRLR0_SCPOL;
+ if (spi->mode & SPI_CPHA)
+ cr0 |= DW_PSSI_CTRLR0_SCPHA;
/* CTRLR0[11] Shift Register Loop */
- cr0 |= ((spi->mode & SPI_LOOP) ? 1 : 0) << SPI_SRL_OFFSET;
+ if (spi->mode & SPI_LOOP)
+ cr0 |= DW_PSSI_CTRLR0_SRL;
} else {
/* CTRLR0[ 7: 6] Frame Format */
- cr0 |= SSI_MOTO_SPI << DWC_SSI_CTRLR0_FRF_OFFSET;
+ cr0 |= FIELD_PREP(DW_HSSI_CTRLR0_FRF_MASK, DW_SPI_CTRLR0_FRF_MOTO_SPI);
/*
* SPI mode (SCPOL|SCPH)
* CTRLR0[ 8] Serial Clock Phase
* CTRLR0[ 9] Serial Clock Polarity
*/
- cr0 |= ((spi->mode & SPI_CPOL) ? 1 : 0) << DWC_SSI_CTRLR0_SCPOL_OFFSET;
- cr0 |= ((spi->mode & SPI_CPHA) ? 1 : 0) << DWC_SSI_CTRLR0_SCPH_OFFSET;
+ if (spi->mode & SPI_CPOL)
+ cr0 |= DW_HSSI_CTRLR0_SCPOL;
+ if (spi->mode & SPI_CPHA)
+ cr0 |= DW_HSSI_CTRLR0_SCPHA;
/* CTRLR0[13] Shift Register Loop */
- cr0 |= ((spi->mode & SPI_LOOP) ? 1 : 0) << DWC_SSI_CTRLR0_SRL_OFFSET;
+ if (spi->mode & SPI_LOOP)
+ cr0 |= DW_HSSI_CTRLR0_SRL;
if (dws->caps & DW_SPI_CAP_KEEMBAY_MST)
- cr0 |= DWC_SSI_CTRLR0_KEEMBAY_MST;
+ cr0 |= DW_HSSI_CTRLR0_KEEMBAY_MST;
}
return cr0;
@@ -310,7 +317,7 @@ static u32 dw_spi_prepare_cr0(struct dw_spi *dws, struct spi_device *spi)
void dw_spi_update_config(struct dw_spi *dws, struct spi_device *spi,
struct dw_spi_cfg *cfg)
{
- struct chip_data *chip = spi_get_ctldata(spi);
+ struct dw_spi_chip_data *chip = spi_get_ctldata(spi);
u32 cr0 = chip->cr0;
u32 speed_hz;
u16 clk_div;
@@ -318,16 +325,17 @@ void dw_spi_update_config(struct dw_spi *dws, struct spi_device *spi,
/* CTRLR0[ 4/3: 0] or CTRLR0[ 20: 16] Data Frame Size */
cr0 |= (cfg->dfs - 1) << dws->dfs_offset;
- if (!(dws->caps & DW_SPI_CAP_DWC_SSI))
+ if (dw_spi_ip_is(dws, PSSI))
/* CTRLR0[ 9:8] Transfer Mode */
- cr0 |= cfg->tmode << SPI_TMOD_OFFSET;
+ cr0 |= FIELD_PREP(DW_PSSI_CTRLR0_TMOD_MASK, cfg->tmode);
else
/* CTRLR0[11:10] Transfer Mode */
- cr0 |= cfg->tmode << DWC_SSI_CTRLR0_TMOD_OFFSET;
+ cr0 |= FIELD_PREP(DW_HSSI_CTRLR0_TMOD_MASK, cfg->tmode);
dw_writel(dws, DW_SPI_CTRLR0, cr0);
- if (cfg->tmode == SPI_TMOD_EPROMREAD || cfg->tmode == SPI_TMOD_RO)
+ if (cfg->tmode == DW_SPI_CTRLR0_TMOD_EPROMREAD ||
+ cfg->tmode == DW_SPI_CTRLR0_TMOD_RO)
dw_writel(dws, DW_SPI_CTRLR1, cfg->ndf ? cfg->ndf - 1 : 0);
/* Note DW APB SSI clock divider doesn't support odd numbers */
@@ -335,7 +343,7 @@ void dw_spi_update_config(struct dw_spi *dws, struct spi_device *spi,
speed_hz = dws->max_freq / clk_div;
if (dws->current_freq != speed_hz) {
- spi_set_clk(dws, clk_div);
+ dw_spi_set_clk(dws, clk_div);
dws->current_freq = speed_hz;
}
@@ -345,7 +353,7 @@ void dw_spi_update_config(struct dw_spi *dws, struct spi_device *spi,
dws->cur_rx_sample_dly = chip->rx_sample_dly;
}
}
-EXPORT_SYMBOL_GPL(dw_spi_update_config);
+EXPORT_SYMBOL_NS_GPL(dw_spi_update_config, SPI_DW_CORE);
static void dw_spi_irq_setup(struct dw_spi *dws)
{
@@ -363,9 +371,9 @@ static void dw_spi_irq_setup(struct dw_spi *dws)
dws->transfer_handler = dw_spi_transfer_handler;
- imask = SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI |
- SPI_INT_RXFI;
- spi_umask_intr(dws, imask);
+ imask = DW_SPI_INT_TXEI | DW_SPI_INT_TXOI |
+ DW_SPI_INT_RXUI | DW_SPI_INT_RXOI | DW_SPI_INT_RXFI;
+ dw_spi_umask_intr(dws, imask);
}
/*
@@ -405,11 +413,12 @@ static int dw_spi_poll_transfer(struct dw_spi *dws,
}
static int dw_spi_transfer_one(struct spi_controller *master,
- struct spi_device *spi, struct spi_transfer *transfer)
+ struct spi_device *spi,
+ struct spi_transfer *transfer)
{
struct dw_spi *dws = spi_controller_get_devdata(master);
struct dw_spi_cfg cfg = {
- .tmode = SPI_TMOD_TR,
+ .tmode = DW_SPI_CTRLR0_TMOD_TR,
.dfs = transfer->bits_per_word,
.freq = transfer->speed_hz,
};
@@ -425,7 +434,7 @@ static int dw_spi_transfer_one(struct spi_controller *master,
/* Ensure the data above is visible for all CPUs */
smp_mb();
- spi_enable_chip(dws, 0);
+ dw_spi_enable_chip(dws, 0);
dw_spi_update_config(dws, spi, &cfg);
@@ -436,7 +445,7 @@ static int dw_spi_transfer_one(struct spi_controller *master,
dws->dma_mapped = master->cur_msg_mapped;
/* For poll mode just disable all interrupts */
- spi_mask_intr(dws, 0xff);
+ dw_spi_mask_intr(dws, 0xff);
if (dws->dma_mapped) {
ret = dws->dma_ops->dma_setup(dws, transfer);
@@ -444,7 +453,7 @@ static int dw_spi_transfer_one(struct spi_controller *master,
return ret;
}
- spi_enable_chip(dws, 1);
+ dw_spi_enable_chip(dws, 1);
if (dws->dma_mapped)
return dws->dma_ops->dma_transfer(dws, transfer);
@@ -457,20 +466,20 @@ static int dw_spi_transfer_one(struct spi_controller *master,
}
static void dw_spi_handle_err(struct spi_controller *master,
- struct spi_message *msg)
+ struct spi_message *msg)
{
struct dw_spi *dws = spi_controller_get_devdata(master);
if (dws->dma_mapped)
dws->dma_ops->dma_stop(dws);
- spi_reset_chip(dws);
+ dw_spi_reset_chip(dws);
}
static int dw_spi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
if (op->data.dir == SPI_MEM_DATA_IN)
- op->data.nbytes = clamp_val(op->data.nbytes, 0, SPI_NDF_MASK + 1);
+ op->data.nbytes = clamp_val(op->data.nbytes, 0, DW_SPI_NDF_MASK + 1);
return 0;
}
@@ -498,7 +507,7 @@ static int dw_spi_init_mem_buf(struct dw_spi *dws, const struct spi_mem_op *op)
if (op->data.dir == SPI_MEM_DATA_OUT)
len += op->data.nbytes;
- if (len <= SPI_BUF_SIZE) {
+ if (len <= DW_SPI_BUF_SIZE) {
out = dws->buf;
} else {
out = kzalloc(len, GFP_KERNEL);
@@ -512,9 +521,9 @@ static int dw_spi_init_mem_buf(struct dw_spi *dws, const struct spi_mem_op *op)
* single buffer in order to speed the data transmission up.
*/
for (i = 0; i < op->cmd.nbytes; ++i)
- out[i] = SPI_GET_BYTE(op->cmd.opcode, op->cmd.nbytes - i - 1);
+ out[i] = DW_SPI_GET_BYTE(op->cmd.opcode, op->cmd.nbytes - i - 1);
for (j = 0; j < op->addr.nbytes; ++i, ++j)
- out[i] = SPI_GET_BYTE(op->addr.val, op->addr.nbytes - j - 1);
+ out[i] = DW_SPI_GET_BYTE(op->addr.val, op->addr.nbytes - j - 1);
for (j = 0; j < op->dummy.nbytes; ++i, ++j)
out[i] = 0x0;
@@ -587,7 +596,7 @@ static int dw_spi_write_then_read(struct dw_spi *dws, struct spi_device *spi)
entries = readl_relaxed(dws->regs + DW_SPI_RXFLR);
if (!entries) {
sts = readl_relaxed(dws->regs + DW_SPI_RISR);
- if (sts & SPI_INT_RXOI) {
+ if (sts & DW_SPI_INT_RXOI) {
dev_err(&dws->master->dev, "FIFO overflow on Rx\n");
return -EIO;
}
@@ -603,12 +612,12 @@ static int dw_spi_write_then_read(struct dw_spi *dws, struct spi_device *spi)
static inline bool dw_spi_ctlr_busy(struct dw_spi *dws)
{
- return dw_readl(dws, DW_SPI_SR) & SR_BUSY;
+ return dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_BUSY;
}
static int dw_spi_wait_mem_op_done(struct dw_spi *dws)
{
- int retry = SPI_WAIT_RETRIES;
+ int retry = DW_SPI_WAIT_RETRIES;
struct spi_delay delay;
unsigned long ns, us;
u32 nents;
@@ -638,9 +647,9 @@ static int dw_spi_wait_mem_op_done(struct dw_spi *dws)
static void dw_spi_stop_mem_op(struct dw_spi *dws, struct spi_device *spi)
{
- spi_enable_chip(dws, 0);
+ dw_spi_enable_chip(dws, 0);
dw_spi_set_cs(spi, true);
- spi_enable_chip(dws, 1);
+ dw_spi_enable_chip(dws, 1);
}
/*
@@ -673,19 +682,19 @@ static int dw_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
cfg.dfs = 8;
cfg.freq = clamp(mem->spi->max_speed_hz, 0U, dws->max_mem_freq);
if (op->data.dir == SPI_MEM_DATA_IN) {
- cfg.tmode = SPI_TMOD_EPROMREAD;
+ cfg.tmode = DW_SPI_CTRLR0_TMOD_EPROMREAD;
cfg.ndf = op->data.nbytes;
} else {
- cfg.tmode = SPI_TMOD_TO;
+ cfg.tmode = DW_SPI_CTRLR0_TMOD_TO;
}
- spi_enable_chip(dws, 0);
+ dw_spi_enable_chip(dws, 0);
dw_spi_update_config(dws, mem->spi, &cfg);
- spi_mask_intr(dws, 0xff);
+ dw_spi_mask_intr(dws, 0xff);
- spi_enable_chip(dws, 1);
+ dw_spi_enable_chip(dws, 1);
/*
* DW APB SSI controller has very nasty peculiarities. First originally
@@ -768,7 +777,7 @@ static void dw_spi_init_mem_ops(struct dw_spi *dws)
static int dw_spi_setup(struct spi_device *spi)
{
struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
- struct chip_data *chip;
+ struct dw_spi_chip_data *chip;
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
@@ -776,7 +785,7 @@ static int dw_spi_setup(struct spi_device *spi)
struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
u32 rx_sample_dly_ns;
- chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
spi_set_ctldata(spi, chip);
@@ -803,16 +812,30 @@ static int dw_spi_setup(struct spi_device *spi)
static void dw_spi_cleanup(struct spi_device *spi)
{
- struct chip_data *chip = spi_get_ctldata(spi);
+ struct dw_spi_chip_data *chip = spi_get_ctldata(spi);
kfree(chip);
spi_set_ctldata(spi, NULL);
}
/* Restart the controller, disable all interrupts, clean rx fifo */
-static void spi_hw_init(struct device *dev, struct dw_spi *dws)
+static void dw_spi_hw_init(struct device *dev, struct dw_spi *dws)
{
- spi_reset_chip(dws);
+ dw_spi_reset_chip(dws);
+
+ /*
+ * Retrieve the Synopsys component version if it hasn't been specified
+ * by the platform. CoreKit version ID is encoded as a 3-chars ASCII
+ * code enclosed with '*' (typical for the most of Synopsys IP-cores).
+ */
+ if (!dws->ver) {
+ dws->ver = dw_readl(dws, DW_SPI_VERSION);
+
+ dev_dbg(dev, "Synopsys DWC%sSSI v%c.%c%c\n",
+ dw_spi_ip_is(dws, PSSI) ? " APB " : " ",
+ DW_SPI_GET_BYTE(dws->ver, 3), DW_SPI_GET_BYTE(dws->ver, 2),
+ DW_SPI_GET_BYTE(dws->ver, 1));
+ }
/*
* Try to detect the FIFO depth if not set by interface driver,
@@ -837,18 +860,18 @@ static void spi_hw_init(struct device *dev, struct dw_spi *dws)
* writability. Note DWC SSI controller also has the extended DFS, but
* with zero offset.
*/
- if (!(dws->caps & DW_SPI_CAP_DWC_SSI)) {
+ if (dw_spi_ip_is(dws, PSSI)) {
u32 cr0, tmp = dw_readl(dws, DW_SPI_CTRLR0);
- spi_enable_chip(dws, 0);
+ dw_spi_enable_chip(dws, 0);
dw_writel(dws, DW_SPI_CTRLR0, 0xffffffff);
cr0 = dw_readl(dws, DW_SPI_CTRLR0);
dw_writel(dws, DW_SPI_CTRLR0, tmp);
- spi_enable_chip(dws, 1);
+ dw_spi_enable_chip(dws, 1);
- if (!(cr0 & SPI_DFS_MASK)) {
+ if (!(cr0 & DW_PSSI_CTRLR0_DFS_MASK)) {
dws->caps |= DW_SPI_CAP_DFS32;
- dws->dfs_offset = SPI_DFS32_OFFSET;
+ dws->dfs_offset = __bf_shf(DW_PSSI_CTRLR0_DFS32_MASK);
dev_dbg(dev, "Detected 32-bits max data frame size\n");
}
} else {
@@ -872,13 +895,15 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
if (!master)
return -ENOMEM;
+ device_set_node(&master->dev, dev_fwnode(dev));
+
dws->master = master;
dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
spi_controller_set_devdata(master, dws);
/* Basic HW init */
- spi_hw_init(dev, dws);
+ dw_spi_hw_init(dev, dws);
ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
master);
@@ -908,8 +933,6 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
if (dws->mem_ops.exec_op)
master->mem_ops = &dws->mem_ops;
master->max_speed_hz = dws->max_freq;
- master->dev.of_node = dev->of_node;
- master->dev.fwnode = dev->fwnode;
master->flags = SPI_MASTER_GPIO_SS;
master->auto_runtime_pm = true;
@@ -939,13 +962,13 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
err_dma_exit:
if (dws->dma_ops && dws->dma_ops->dma_exit)
dws->dma_ops->dma_exit(dws);
- spi_enable_chip(dws, 0);
+ dw_spi_enable_chip(dws, 0);
free_irq(dws->irq, master);
err_free_master:
spi_controller_put(master);
return ret;
}
-EXPORT_SYMBOL_GPL(dw_spi_add_host);
+EXPORT_SYMBOL_NS_GPL(dw_spi_add_host, SPI_DW_CORE);
void dw_spi_remove_host(struct dw_spi *dws)
{
@@ -956,11 +979,11 @@ void dw_spi_remove_host(struct dw_spi *dws)
if (dws->dma_ops && dws->dma_ops->dma_exit)
dws->dma_ops->dma_exit(dws);
- spi_shutdown_chip(dws);
+ dw_spi_shutdown_chip(dws);
free_irq(dws->irq, dws->master);
}
-EXPORT_SYMBOL_GPL(dw_spi_remove_host);
+EXPORT_SYMBOL_NS_GPL(dw_spi_remove_host, SPI_DW_CORE);
int dw_spi_suspend_host(struct dw_spi *dws)
{
@@ -970,17 +993,17 @@ int dw_spi_suspend_host(struct dw_spi *dws)
if (ret)
return ret;
- spi_shutdown_chip(dws);
+ dw_spi_shutdown_chip(dws);
return 0;
}
-EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
+EXPORT_SYMBOL_NS_GPL(dw_spi_suspend_host, SPI_DW_CORE);
int dw_spi_resume_host(struct dw_spi *dws)
{
- spi_hw_init(&dws->master->dev, dws);
+ dw_spi_hw_init(&dws->master->dev, dws);
return spi_controller_resume(dws->master);
}
-EXPORT_SYMBOL_GPL(dw_spi_resume_host);
+EXPORT_SYMBOL_NS_GPL(dw_spi_resume_host, SPI_DW_CORE);
MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c
index a09831c62192..63e5260100ec 100644
--- a/drivers/spi/spi-dw-dma.c
+++ b/drivers/spi/spi-dw-dma.c
@@ -10,6 +10,7 @@
#include <linux/dmaengine.h>
#include <linux/irqreturn.h>
#include <linux/jiffies.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_data/dma-dw.h>
#include <linux/spi/spi.h>
@@ -17,10 +18,10 @@
#include "spi-dw.h"
-#define RX_BUSY 0
-#define RX_BURST_LEVEL 16
-#define TX_BUSY 1
-#define TX_BURST_LEVEL 16
+#define DW_SPI_RX_BUSY 0
+#define DW_SPI_RX_BURST_LEVEL 16
+#define DW_SPI_TX_BUSY 1
+#define DW_SPI_TX_BURST_LEVEL 16
static bool dw_spi_dma_chan_filter(struct dma_chan *chan, void *param)
{
@@ -45,7 +46,7 @@ static void dw_spi_dma_maxburst_init(struct dw_spi *dws)
if (!ret && caps.max_burst)
max_burst = caps.max_burst;
else
- max_burst = RX_BURST_LEVEL;
+ max_burst = DW_SPI_RX_BURST_LEVEL;
dws->rxburst = min(max_burst, def_burst);
dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1);
@@ -54,7 +55,7 @@ static void dw_spi_dma_maxburst_init(struct dw_spi *dws)
if (!ret && caps.max_burst)
max_burst = caps.max_burst;
else
- max_burst = TX_BURST_LEVEL;
+ max_burst = DW_SPI_TX_BURST_LEVEL;
/*
* Having a Rx DMA channel serviced with higher priority than a Tx DMA
@@ -226,13 +227,13 @@ static int dw_spi_dma_wait(struct dw_spi *dws, unsigned int len, u32 speed)
static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws)
{
- return !(dw_readl(dws, DW_SPI_SR) & SR_TF_EMPT);
+ return !(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_TF_EMPT);
}
static int dw_spi_dma_wait_tx_done(struct dw_spi *dws,
struct spi_transfer *xfer)
{
- int retry = SPI_WAIT_RETRIES;
+ int retry = DW_SPI_WAIT_RETRIES;
struct spi_delay delay;
u32 nents;
@@ -259,8 +260,8 @@ static void dw_spi_dma_tx_done(void *arg)
{
struct dw_spi *dws = arg;
- clear_bit(TX_BUSY, &dws->dma_chan_busy);
- if (test_bit(RX_BUSY, &dws->dma_chan_busy))
+ clear_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy);
+ if (test_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy))
return;
complete(&dws->dma_completion);
@@ -304,19 +305,19 @@ static int dw_spi_dma_submit_tx(struct dw_spi *dws, struct scatterlist *sgl,
return ret;
}
- set_bit(TX_BUSY, &dws->dma_chan_busy);
+ set_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy);
return 0;
}
static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws)
{
- return !!(dw_readl(dws, DW_SPI_SR) & SR_RF_NOT_EMPT);
+ return !!(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_RF_NOT_EMPT);
}
static int dw_spi_dma_wait_rx_done(struct dw_spi *dws)
{
- int retry = SPI_WAIT_RETRIES;
+ int retry = DW_SPI_WAIT_RETRIES;
struct spi_delay delay;
unsigned long ns, us;
u32 nents;
@@ -360,8 +361,8 @@ static void dw_spi_dma_rx_done(void *arg)
{
struct dw_spi *dws = arg;
- clear_bit(RX_BUSY, &dws->dma_chan_busy);
- if (test_bit(TX_BUSY, &dws->dma_chan_busy))
+ clear_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy);
+ if (test_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy))
return;
complete(&dws->dma_completion);
@@ -405,7 +406,7 @@ static int dw_spi_dma_submit_rx(struct dw_spi *dws, struct scatterlist *sgl,
return ret;
}
- set_bit(RX_BUSY, &dws->dma_chan_busy);
+ set_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy);
return 0;
}
@@ -430,16 +431,16 @@ static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
}
/* Set the DMA handshaking interface */
- dma_ctrl = SPI_DMA_TDMAE;
+ dma_ctrl = DW_SPI_DMACR_TDMAE;
if (xfer->rx_buf)
- dma_ctrl |= SPI_DMA_RDMAE;
+ dma_ctrl |= DW_SPI_DMACR_RDMAE;
dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
/* Set the interrupt mask */
- imr = SPI_INT_TXOI;
+ imr = DW_SPI_INT_TXOI;
if (xfer->rx_buf)
- imr |= SPI_INT_RXUI | SPI_INT_RXOI;
- spi_umask_intr(dws, imr);
+ imr |= DW_SPI_INT_RXUI | DW_SPI_INT_RXOI;
+ dw_spi_umask_intr(dws, imr);
reinit_completion(&dws->dma_completion);
@@ -615,13 +616,13 @@ static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
static void dw_spi_dma_stop(struct dw_spi *dws)
{
- if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
+ if (test_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy)) {
dmaengine_terminate_sync(dws->txchan);
- clear_bit(TX_BUSY, &dws->dma_chan_busy);
+ clear_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy);
}
- if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
+ if (test_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy)) {
dmaengine_terminate_sync(dws->rxchan);
- clear_bit(RX_BUSY, &dws->dma_chan_busy);
+ clear_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy);
}
}
@@ -638,7 +639,7 @@ void dw_spi_dma_setup_mfld(struct dw_spi *dws)
{
dws->dma_ops = &dw_spi_dma_mfld_ops;
}
-EXPORT_SYMBOL_GPL(dw_spi_dma_setup_mfld);
+EXPORT_SYMBOL_NS_GPL(dw_spi_dma_setup_mfld, SPI_DW_CORE);
static const struct dw_spi_dma_ops dw_spi_dma_generic_ops = {
.dma_init = dw_spi_dma_init_generic,
@@ -653,4 +654,4 @@ void dw_spi_dma_setup_generic(struct dw_spi *dws)
{
dws->dma_ops = &dw_spi_dma_generic_ops;
}
-EXPORT_SYMBOL_GPL(dw_spi_dma_setup_generic);
+EXPORT_SYMBOL_NS_GPL(dw_spi_dma_setup_generic, SPI_DW_CORE);
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 3379720cfcb8..5101c4c6017b 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -196,18 +196,18 @@ static int dw_spi_alpine_init(struct platform_device *pdev,
return 0;
}
-static int dw_spi_dw_apb_init(struct platform_device *pdev,
- struct dw_spi_mmio *dwsmmio)
+static int dw_spi_pssi_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio)
{
dw_spi_dma_setup_generic(&dwsmmio->dws);
return 0;
}
-static int dw_spi_dwc_ssi_init(struct platform_device *pdev,
- struct dw_spi_mmio *dwsmmio)
+static int dw_spi_hssi_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio)
{
- dwsmmio->dws.caps = DW_SPI_CAP_DWC_SSI;
+ dwsmmio->dws.ip = DW_HSSI_ID;
dw_spi_dma_setup_generic(&dwsmmio->dws);
@@ -217,7 +217,8 @@ static int dw_spi_dwc_ssi_init(struct platform_device *pdev,
static int dw_spi_keembay_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio)
{
- dwsmmio->dws.caps = DW_SPI_CAP_KEEMBAY_MST | DW_SPI_CAP_DWC_SSI;
+ dwsmmio->dws.ip = DW_HSSI_ID;
+ dwsmmio->dws.caps = DW_SPI_CAP_KEEMBAY_MST;
return 0;
}
@@ -342,12 +343,12 @@ static int dw_spi_mmio_remove(struct platform_device *pdev)
}
static const struct of_device_id dw_spi_mmio_of_match[] = {
- { .compatible = "snps,dw-apb-ssi", .data = dw_spi_dw_apb_init},
+ { .compatible = "snps,dw-apb-ssi", .data = dw_spi_pssi_init},
{ .compatible = "mscc,ocelot-spi", .data = dw_spi_mscc_ocelot_init},
{ .compatible = "mscc,jaguar2-spi", .data = dw_spi_mscc_jaguar2_init},
{ .compatible = "amazon,alpine-dw-apb-ssi", .data = dw_spi_alpine_init},
- { .compatible = "renesas,rzn1-spi", .data = dw_spi_dw_apb_init},
- { .compatible = "snps,dwc-ssi-1.01a", .data = dw_spi_dwc_ssi_init},
+ { .compatible = "renesas,rzn1-spi", .data = dw_spi_pssi_init},
+ { .compatible = "snps,dwc-ssi-1.01a", .data = dw_spi_hssi_init},
{ .compatible = "intel,keembay-ssi", .data = dw_spi_keembay_init},
{ .compatible = "microchip,sparx5-spi", dw_spi_mscc_sparx5_init},
{ .compatible = "canaan,k210-spi", dw_spi_canaan_k210_init},
@@ -357,7 +358,7 @@ MODULE_DEVICE_TABLE(of, dw_spi_mmio_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id dw_spi_mmio_acpi_match[] = {
- {"HISI0173", (kernel_ulong_t)dw_spi_dw_apb_init},
+ {"HISI0173", (kernel_ulong_t)dw_spi_pssi_init},
{},
};
MODULE_DEVICE_TABLE(acpi, dw_spi_mmio_acpi_match);
@@ -377,3 +378,4 @@ module_platform_driver(dw_spi_mmio_driver);
MODULE_AUTHOR("Jean-Hugues Deschenes <jean-hugues.deschenes@octasic.com>");
MODULE_DESCRIPTION("Memory-mapped I/O interface driver for DW SPI Core");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(SPI_DW_CORE);
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 8a91cd58102f..7c8279d13f31 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -24,14 +24,14 @@
#define CLK_SPI_CDIV_MASK 0x00000e00
#define CLK_SPI_DISABLE_OFFSET 8
-struct spi_pci_desc {
+struct dw_spi_pci_desc {
int (*setup)(struct dw_spi *);
u16 num_cs;
u16 bus_num;
u32 max_freq;
};
-static int spi_mid_init(struct dw_spi *dws)
+static int dw_spi_pci_mid_init(struct dw_spi *dws)
{
void __iomem *clk_reg;
u32 clk_cdiv;
@@ -53,36 +53,36 @@ static int spi_mid_init(struct dw_spi *dws)
return 0;
}
-static int spi_generic_init(struct dw_spi *dws)
+static int dw_spi_pci_generic_init(struct dw_spi *dws)
{
dw_spi_dma_setup_generic(dws);
return 0;
}
-static struct spi_pci_desc spi_pci_mid_desc_1 = {
- .setup = spi_mid_init,
+static struct dw_spi_pci_desc dw_spi_pci_mid_desc_1 = {
+ .setup = dw_spi_pci_mid_init,
.num_cs = 5,
.bus_num = 0,
};
-static struct spi_pci_desc spi_pci_mid_desc_2 = {
- .setup = spi_mid_init,
+static struct dw_spi_pci_desc dw_spi_pci_mid_desc_2 = {
+ .setup = dw_spi_pci_mid_init,
.num_cs = 2,
.bus_num = 1,
};
-static struct spi_pci_desc spi_pci_ehl_desc = {
- .setup = spi_generic_init,
+static struct dw_spi_pci_desc dw_spi_pci_ehl_desc = {
+ .setup = dw_spi_pci_generic_init,
.num_cs = 2,
.bus_num = -1,
.max_freq = 100000000,
};
-static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int dw_spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct dw_spi_pci_desc *desc = (struct dw_spi_pci_desc *)ent->driver_data;
struct dw_spi *dws;
- struct spi_pci_desc *desc = (struct spi_pci_desc *)ent->driver_data;
int pci_bar = 0;
int ret;
@@ -150,7 +150,7 @@ err_free_irq_vectors:
return ret;
}
-static void spi_pci_remove(struct pci_dev *pdev)
+static void dw_spi_pci_remove(struct pci_dev *pdev)
{
struct dw_spi *dws = pci_get_drvdata(pdev);
@@ -162,14 +162,14 @@ static void spi_pci_remove(struct pci_dev *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int spi_suspend(struct device *dev)
+static int dw_spi_pci_suspend(struct device *dev)
{
struct dw_spi *dws = dev_get_drvdata(dev);
return dw_spi_suspend_host(dws);
}
-static int spi_resume(struct device *dev)
+static int dw_spi_pci_resume(struct device *dev)
{
struct dw_spi *dws = dev_get_drvdata(dev);
@@ -177,39 +177,39 @@ static int spi_resume(struct device *dev)
}
#endif
-static SIMPLE_DEV_PM_OPS(dw_spi_pm_ops, spi_suspend, spi_resume);
+static SIMPLE_DEV_PM_OPS(dw_spi_pci_pm_ops, dw_spi_pci_suspend, dw_spi_pci_resume);
-static const struct pci_device_id pci_ids[] = {
+static const struct pci_device_id dw_spi_pci_ids[] = {
/* Intel MID platform SPI controller 0 */
/*
* The access to the device 8086:0801 is disabled by HW, since it's
* exclusively used by SCU to communicate with MSIC.
*/
/* Intel MID platform SPI controller 1 */
- { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc_1},
+ { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&dw_spi_pci_mid_desc_1},
/* Intel MID platform SPI controller 2 */
- { PCI_VDEVICE(INTEL, 0x0812), (kernel_ulong_t)&spi_pci_mid_desc_2},
+ { PCI_VDEVICE(INTEL, 0x0812), (kernel_ulong_t)&dw_spi_pci_mid_desc_2},
/* Intel Elkhart Lake PSE SPI controllers */
- { PCI_VDEVICE(INTEL, 0x4b84), (kernel_ulong_t)&spi_pci_ehl_desc},
- { PCI_VDEVICE(INTEL, 0x4b85), (kernel_ulong_t)&spi_pci_ehl_desc},
- { PCI_VDEVICE(INTEL, 0x4b86), (kernel_ulong_t)&spi_pci_ehl_desc},
- { PCI_VDEVICE(INTEL, 0x4b87), (kernel_ulong_t)&spi_pci_ehl_desc},
+ { PCI_VDEVICE(INTEL, 0x4b84), (kernel_ulong_t)&dw_spi_pci_ehl_desc},
+ { PCI_VDEVICE(INTEL, 0x4b85), (kernel_ulong_t)&dw_spi_pci_ehl_desc},
+ { PCI_VDEVICE(INTEL, 0x4b86), (kernel_ulong_t)&dw_spi_pci_ehl_desc},
+ { PCI_VDEVICE(INTEL, 0x4b87), (kernel_ulong_t)&dw_spi_pci_ehl_desc},
{},
};
-MODULE_DEVICE_TABLE(pci, pci_ids);
+MODULE_DEVICE_TABLE(pci, dw_spi_pci_ids);
-static struct pci_driver dw_spi_driver = {
+static struct pci_driver dw_spi_pci_driver = {
.name = DRIVER_NAME,
- .id_table = pci_ids,
- .probe = spi_pci_probe,
- .remove = spi_pci_remove,
+ .id_table = dw_spi_pci_ids,
+ .probe = dw_spi_pci_probe,
+ .remove = dw_spi_pci_remove,
.driver = {
- .pm = &dw_spi_pm_ops,
+ .pm = &dw_spi_pci_pm_ops,
},
};
-
-module_pci_driver(dw_spi_driver);
+module_pci_driver(dw_spi_pci_driver);
MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
MODULE_DESCRIPTION("PCI interface driver for DW SPI Core");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(SPI_DW_CORE);
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index b665e040862c..d5ee5130601e 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef DW_SPI_HEADER_H
-#define DW_SPI_HEADER_H
+#ifndef __SPI_DW_H__
+#define __SPI_DW_H__
#include <linux/bits.h>
#include <linux/completion.h>
@@ -11,7 +11,30 @@
#include <linux/spi/spi-mem.h>
#include <linux/bitfield.h>
-/* Register offsets */
+/* Synopsys DW SSI IP-core virtual IDs */
+#define DW_PSSI_ID 0
+#define DW_HSSI_ID 1
+
+/* Synopsys DW SSI component versions (FourCC sequence) */
+#define DW_HSSI_102A 0x3130322a
+
+/* DW SSI IP-core ID and version check helpers */
+#define dw_spi_ip_is(_dws, _ip) \
+ ((_dws)->ip == DW_ ## _ip ## _ID)
+
+#define __dw_spi_ver_cmp(_dws, _ip, _ver, _op) \
+ (dw_spi_ip_is(_dws, _ip) && (_dws)->ver _op DW_ ## _ip ## _ver)
+
+#define dw_spi_ver_is(_dws, _ip, _ver) __dw_spi_ver_cmp(_dws, _ip, _ver, ==)
+
+#define dw_spi_ver_is_ge(_dws, _ip, _ver) __dw_spi_ver_cmp(_dws, _ip, _ver, >=)
+
+/* DW SPI controller capabilities */
+#define DW_SPI_CAP_CS_OVERRIDE BIT(0)
+#define DW_SPI_CAP_KEEMBAY_MST BIT(1)
+#define DW_SPI_CAP_DFS32 BIT(2)
+
+/* Register offsets (Generic for both DWC APB SSI and DWC SSI IP-cores) */
#define DW_SPI_CTRLR0 0x00
#define DW_SPI_CTRLR1 0x04
#define DW_SPI_SSIENR 0x08
@@ -40,92 +63,79 @@
#define DW_SPI_RX_SAMPLE_DLY 0xf0
#define DW_SPI_CS_OVERRIDE 0xf4
-/* Bit fields in CTRLR0 */
-#define SPI_DFS_OFFSET 0
-#define SPI_DFS_MASK GENMASK(3, 0)
-#define SPI_DFS32_OFFSET 16
-
-#define SPI_FRF_OFFSET 4
-#define SPI_FRF_SPI 0x0
-#define SPI_FRF_SSP 0x1
-#define SPI_FRF_MICROWIRE 0x2
-#define SPI_FRF_RESV 0x3
-
-#define SPI_MODE_OFFSET 6
-#define SPI_SCPH_OFFSET 6
-#define SPI_SCOL_OFFSET 7
-
-#define SPI_TMOD_OFFSET 8
-#define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET)
-#define SPI_TMOD_TR 0x0 /* xmit & recv */
-#define SPI_TMOD_TO 0x1 /* xmit only */
-#define SPI_TMOD_RO 0x2 /* recv only */
-#define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */
-
-#define SPI_SLVOE_OFFSET 10
-#define SPI_SRL_OFFSET 11
-#define SPI_CFS_OFFSET 12
-
-/* Bit fields in CTRLR0 based on DWC_ssi_databook.pdf v1.01a */
-#define DWC_SSI_CTRLR0_SRL_OFFSET 13
-#define DWC_SSI_CTRLR0_TMOD_OFFSET 10
-#define DWC_SSI_CTRLR0_TMOD_MASK GENMASK(11, 10)
-#define DWC_SSI_CTRLR0_SCPOL_OFFSET 9
-#define DWC_SSI_CTRLR0_SCPH_OFFSET 8
-#define DWC_SSI_CTRLR0_FRF_OFFSET 6
-#define DWC_SSI_CTRLR0_DFS_OFFSET 0
+/* Bit fields in CTRLR0 (DWC APB SSI) */
+#define DW_PSSI_CTRLR0_DFS_MASK GENMASK(3, 0)
+#define DW_PSSI_CTRLR0_DFS32_MASK GENMASK(20, 16)
+
+#define DW_PSSI_CTRLR0_FRF_MASK GENMASK(5, 4)
+#define DW_SPI_CTRLR0_FRF_MOTO_SPI 0x0
+#define DW_SPI_CTRLR0_FRF_TI_SSP 0x1
+#define DW_SPI_CTRLR0_FRF_NS_MICROWIRE 0x2
+#define DW_SPI_CTRLR0_FRF_RESV 0x3
+
+#define DW_PSSI_CTRLR0_MODE_MASK GENMASK(7, 6)
+#define DW_PSSI_CTRLR0_SCPHA BIT(6)
+#define DW_PSSI_CTRLR0_SCPOL BIT(7)
+
+#define DW_PSSI_CTRLR0_TMOD_MASK GENMASK(9, 8)
+#define DW_SPI_CTRLR0_TMOD_TR 0x0 /* xmit & recv */
+#define DW_SPI_CTRLR0_TMOD_TO 0x1 /* xmit only */
+#define DW_SPI_CTRLR0_TMOD_RO 0x2 /* recv only */
+#define DW_SPI_CTRLR0_TMOD_EPROMREAD 0x3 /* eeprom read mode */
+
+#define DW_PSSI_CTRLR0_SLV_OE BIT(10)
+#define DW_PSSI_CTRLR0_SRL BIT(11)
+#define DW_PSSI_CTRLR0_CFS BIT(12)
+
+/* Bit fields in CTRLR0 (DWC SSI with AHB interface) */
+#define DW_HSSI_CTRLR0_DFS_MASK GENMASK(4, 0)
+#define DW_HSSI_CTRLR0_FRF_MASK GENMASK(7, 6)
+#define DW_HSSI_CTRLR0_SCPHA BIT(8)
+#define DW_HSSI_CTRLR0_SCPOL BIT(9)
+#define DW_HSSI_CTRLR0_TMOD_MASK GENMASK(11, 10)
+#define DW_HSSI_CTRLR0_SRL BIT(13)
/*
* For Keem Bay, CTRLR0[31] is used to select controller mode.
* 0: SSI is slave
* 1: SSI is master
*/
-#define DWC_SSI_CTRLR0_KEEMBAY_MST BIT(31)
+#define DW_HSSI_CTRLR0_KEEMBAY_MST BIT(31)
/* Bit fields in CTRLR1 */
-#define SPI_NDF_MASK GENMASK(15, 0)
+#define DW_SPI_NDF_MASK GENMASK(15, 0)
/* Bit fields in SR, 7 bits */
-#define SR_MASK 0x7f /* cover 7 bits */
-#define SR_BUSY (1 << 0)
-#define SR_TF_NOT_FULL (1 << 1)
-#define SR_TF_EMPT (1 << 2)
-#define SR_RF_NOT_EMPT (1 << 3)
-#define SR_RF_FULL (1 << 4)
-#define SR_TX_ERR (1 << 5)
-#define SR_DCOL (1 << 6)
+#define DW_SPI_SR_MASK GENMASK(6, 0)
+#define DW_SPI_SR_BUSY BIT(0)
+#define DW_SPI_SR_TF_NOT_FULL BIT(1)
+#define DW_SPI_SR_TF_EMPT BIT(2)
+#define DW_SPI_SR_RF_NOT_EMPT BIT(3)
+#define DW_SPI_SR_RF_FULL BIT(4)
+#define DW_SPI_SR_TX_ERR BIT(5)
+#define DW_SPI_SR_DCOL BIT(6)
/* Bit fields in ISR, IMR, RISR, 7 bits */
-#define SPI_INT_TXEI (1 << 0)
-#define SPI_INT_TXOI (1 << 1)
-#define SPI_INT_RXUI (1 << 2)
-#define SPI_INT_RXOI (1 << 3)
-#define SPI_INT_RXFI (1 << 4)
-#define SPI_INT_MSTI (1 << 5)
+#define DW_SPI_INT_MASK GENMASK(5, 0)
+#define DW_SPI_INT_TXEI BIT(0)
+#define DW_SPI_INT_TXOI BIT(1)
+#define DW_SPI_INT_RXUI BIT(2)
+#define DW_SPI_INT_RXOI BIT(3)
+#define DW_SPI_INT_RXFI BIT(4)
+#define DW_SPI_INT_MSTI BIT(5)
/* Bit fields in DMACR */
-#define SPI_DMA_RDMAE (1 << 0)
-#define SPI_DMA_TDMAE (1 << 1)
+#define DW_SPI_DMACR_RDMAE BIT(0)
+#define DW_SPI_DMACR_TDMAE BIT(1)
-#define SPI_WAIT_RETRIES 5
-#define SPI_BUF_SIZE \
+/* Mem/DMA operations helpers */
+#define DW_SPI_WAIT_RETRIES 5
+#define DW_SPI_BUF_SIZE \
(sizeof_field(struct spi_mem_op, cmd.opcode) + \
sizeof_field(struct spi_mem_op, addr.val) + 256)
-#define SPI_GET_BYTE(_val, _idx) \
+#define DW_SPI_GET_BYTE(_val, _idx) \
((_val) >> (BITS_PER_BYTE * (_idx)) & 0xff)
-enum dw_ssi_type {
- SSI_MOTO_SPI = 0,
- SSI_TI_SSP,
- SSI_NS_MICROWIRE,
-};
-
-/* DW SPI capabilities */
-#define DW_SPI_CAP_CS_OVERRIDE BIT(0)
-#define DW_SPI_CAP_KEEMBAY_MST BIT(1)
-#define DW_SPI_CAP_DWC_SSI BIT(2)
-#define DW_SPI_CAP_DFS32 BIT(3)
-
/* Slave spi_transfer/spi_mem_op related */
struct dw_spi_cfg {
u8 tmode;
@@ -148,6 +158,10 @@ struct dw_spi_dma_ops {
struct dw_spi {
struct spi_controller *master;
+ u32 ip; /* Synopsys DW SSI IP-core ID */
+ u32 ver; /* Synopsys component version */
+ u32 caps; /* DW SPI capabilities */
+
void __iomem *regs;
unsigned long paddr;
int irq;
@@ -156,8 +170,6 @@ struct dw_spi {
u32 max_mem_freq; /* max mem-ops bus freq */
u32 max_freq; /* max bus freq supported */
- u32 caps; /* DW SPI capabilities */
-
u32 reg_io_width; /* DR I/O width in bytes */
u16 bus_num;
u16 num_cs; /* supported slave numbers */
@@ -168,7 +180,7 @@ struct dw_spi {
unsigned int tx_len;
void *rx;
unsigned int rx_len;
- u8 buf[SPI_BUF_SIZE];
+ u8 buf[DW_SPI_BUF_SIZE];
int dma_mapped;
u8 n_bytes; /* current is a 1/2 bytes op */
irqreturn_t (*transfer_handler)(struct dw_spi *dws);
@@ -230,18 +242,18 @@ static inline void dw_write_io_reg(struct dw_spi *dws, u32 offset, u32 val)
}
}
-static inline void spi_enable_chip(struct dw_spi *dws, int enable)
+static inline void dw_spi_enable_chip(struct dw_spi *dws, int enable)
{
dw_writel(dws, DW_SPI_SSIENR, (enable ? 1 : 0));
}
-static inline void spi_set_clk(struct dw_spi *dws, u16 div)
+static inline void dw_spi_set_clk(struct dw_spi *dws, u16 div)
{
dw_writel(dws, DW_SPI_BAUDR, div);
}
/* Disable IRQ bits */
-static inline void spi_mask_intr(struct dw_spi *dws, u32 mask)
+static inline void dw_spi_mask_intr(struct dw_spi *dws, u32 mask)
{
u32 new_mask;
@@ -250,7 +262,7 @@ static inline void spi_mask_intr(struct dw_spi *dws, u32 mask)
}
/* Enable IRQ bits */
-static inline void spi_umask_intr(struct dw_spi *dws, u32 mask)
+static inline void dw_spi_umask_intr(struct dw_spi *dws, u32 mask)
{
u32 new_mask;
@@ -263,19 +275,19 @@ static inline void spi_umask_intr(struct dw_spi *dws, u32 mask)
* and CS, then re-enables the controller back. Transmit and receive FIFO
* buffers are cleared when the device is disabled.
*/
-static inline void spi_reset_chip(struct dw_spi *dws)
+static inline void dw_spi_reset_chip(struct dw_spi *dws)
{
- spi_enable_chip(dws, 0);
- spi_mask_intr(dws, 0xff);
+ dw_spi_enable_chip(dws, 0);
+ dw_spi_mask_intr(dws, 0xff);
dw_readl(dws, DW_SPI_ICR);
dw_writel(dws, DW_SPI_SER, 0);
- spi_enable_chip(dws, 1);
+ dw_spi_enable_chip(dws, 1);
}
-static inline void spi_shutdown_chip(struct dw_spi *dws)
+static inline void dw_spi_shutdown_chip(struct dw_spi *dws)
{
- spi_enable_chip(dws, 0);
- spi_set_clk(dws, 0);
+ dw_spi_enable_chip(dws, 0);
+ dw_spi_set_clk(dws, 0);
}
extern void dw_spi_set_cs(struct spi_device *spi, bool enable);
@@ -299,4 +311,4 @@ static inline void dw_spi_dma_setup_generic(struct dw_spi *dws) {}
#endif /* !CONFIG_SPI_DW_DMA */
-#endif /* DW_SPI_HEADER_H */
+#endif /* __SPI_DW_H__ */
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index c72e501c270f..4c601294f8fa 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -913,7 +913,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
ret = devm_spi_register_controller(&pdev->dev, controller);
if (ret < 0) {
dev_err_probe(&pdev->dev, ret, "spi_register_controller error: %i\n", ret);
- goto out_pm_get;
+ goto free_dma;
}
pm_runtime_mark_last_busy(fsl_lpspi->dev);
@@ -921,6 +921,8 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
return 0;
+free_dma:
+ fsl_lpspi_dma_exit(controller);
out_pm_get:
pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
pm_runtime_put_sync(fsl_lpspi->dev);
@@ -937,6 +939,8 @@ static int fsl_lpspi_remove(struct platform_device *pdev)
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
+ fsl_lpspi_dma_exit(controller);
+
pm_runtime_disable(fsl_lpspi->dev);
return 0;
}
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index e2affaee4e76..f7d905d2a90f 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -71,10 +71,6 @@
#define GSI_CPHA BIT(4)
#define GSI_CPOL BIT(5)
-#define MAX_TX_SG 3
-#define NUM_SPI_XFER 8
-#define SPI_XFER_TIMEOUT_MS 250
-
struct spi_geni_master {
struct geni_se se;
struct device *dev;
@@ -168,6 +164,30 @@ static void handle_fifo_timeout(struct spi_master *spi,
}
}
+static void handle_gpi_timeout(struct spi_master *spi, struct spi_message *msg)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+
+ dmaengine_terminate_sync(mas->tx);
+ dmaengine_terminate_sync(mas->rx);
+}
+
+static void spi_geni_handle_err(struct spi_master *spi, struct spi_message *msg)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+
+ switch (mas->cur_xfer_mode) {
+ case GENI_SE_FIFO:
+ handle_fifo_timeout(spi, msg);
+ break;
+ case GENI_GPI_DMA:
+ handle_gpi_timeout(spi, msg);
+ break;
+ default:
+ dev_err(mas->dev, "Abort on Mode:%d not supported", mas->cur_xfer_mode);
+ }
+}
+
static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
{
struct geni_se *se = &mas->se;
@@ -350,17 +370,21 @@ spi_gsi_callback_result(void *cb, const struct dmaengine_result *result)
{
struct spi_master *spi = cb;
+ spi->cur_msg->status = -EIO;
if (result->result != DMA_TRANS_NOERROR) {
dev_err(&spi->dev, "DMA txn failed: %d\n", result->result);
+ spi_finalize_current_transfer(spi);
return;
}
if (!result->residue) {
+ spi->cur_msg->status = 0;
dev_dbg(&spi->dev, "DMA txn completed\n");
- spi_finalize_current_transfer(spi);
} else {
dev_err(&spi->dev, "DMA xfer has pending: %d\n", result->residue);
}
+
+ spi_finalize_current_transfer(spi);
}
static int setup_gsi_xfer(struct spi_transfer *xfer, struct spi_geni_master *mas,
@@ -922,7 +946,7 @@ static int spi_geni_probe(struct platform_device *pdev)
spi->can_dma = geni_can_dma;
spi->dma_map_dev = dev->parent;
spi->auto_runtime_pm = true;
- spi->handle_err = handle_fifo_timeout;
+ spi->handle_err = spi_geni_handle_err;
spi->use_gpio_descriptors = true;
init_completion(&mas->cs_done);
diff --git a/drivers/spi/spi-hisi-kunpeng.c b/drivers/spi/spi-hisi-kunpeng.c
index 58b823a16fc4..525cc0143a30 100644
--- a/drivers/spi/spi-hisi-kunpeng.c
+++ b/drivers/spi/spi-hisi-kunpeng.c
@@ -127,7 +127,6 @@ struct hisi_spi {
void __iomem *regs;
int irq;
u32 fifo_len; /* depth of the FIFO buffer */
- u16 bus_num;
/* Current message transfer state info */
const void *tx;
@@ -165,7 +164,10 @@ static int hisi_spi_debugfs_init(struct hisi_spi *hs)
{
char name[32];
- snprintf(name, 32, "hisi_spi%d", hs->bus_num);
+ struct spi_controller *master;
+
+ master = container_of(hs->dev, struct spi_controller, dev);
+ snprintf(name, 32, "hisi_spi%d", master->bus_num);
hs->debugfs = debugfs_create_dir(name, NULL);
if (!hs->debugfs)
return -ENOMEM;
@@ -467,7 +469,6 @@ static int hisi_spi_probe(struct platform_device *pdev)
hs = spi_controller_get_devdata(master);
hs->dev = dev;
hs->irq = irq;
- hs->bus_num = pdev->id;
hs->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hs->regs))
@@ -490,7 +491,7 @@ static int hisi_spi_probe(struct platform_device *pdev)
master->use_gpio_descriptors = true;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
- master->bus_num = hs->bus_num;
+ master->bus_num = pdev->id;
master->setup = hisi_spi_setup;
master->cleanup = hisi_spi_cleanup;
master->transfer_one = hisi_spi_transfer_one;
@@ -506,15 +507,15 @@ static int hisi_spi_probe(struct platform_device *pdev)
return ret;
}
- if (hisi_spi_debugfs_init(hs))
- dev_info(dev, "failed to create debugfs dir\n");
-
ret = spi_register_controller(master);
if (ret) {
dev_err(dev, "failed to register spi master, ret=%d\n", ret);
return ret;
}
+ if (hisi_spi_debugfs_init(hs))
+ dev_info(dev, "failed to create debugfs dir\n");
+
dev_info(dev, "hw version:0x%x max-freq:%u kHz\n",
readl(hs->regs + HISI_SPI_VERSION),
master->max_speed_hz / 1000);
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
index 8eca6f24cb79..c8ed7815c4ba 100644
--- a/drivers/spi/spi-meson-spifc.c
+++ b/drivers/spi/spi-meson-spifc.c
@@ -349,6 +349,7 @@ static int meson_spifc_probe(struct platform_device *pdev)
return 0;
out_clk:
clk_disable_unprepare(spifc->clk);
+ pm_runtime_disable(spifc->dev);
out_err:
spi_master_put(master);
return ret;
diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
index 5eb7b61bbb4d..f86433b29260 100644
--- a/drivers/spi/spi-pic32.c
+++ b/drivers/spi/spi-pic32.c
@@ -370,7 +370,6 @@ static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
cfg.src_addr_width = dma_width;
cfg.dst_addr_width = dma_width;
/* tx channel */
- cfg.slave_id = pic32s->tx_irq;
cfg.direction = DMA_MEM_TO_DEV;
ret = dmaengine_slave_config(master->dma_tx, &cfg);
if (ret) {
@@ -378,7 +377,6 @@ static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
return ret;
}
/* rx channel */
- cfg.slave_id = pic32s->rx_irq;
cfg.direction = DMA_DEV_TO_MEM;
ret = dmaengine_slave_config(master->dma_rx, &cfg);
if (ret)
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 1573f6d8eb48..e88f86274eeb 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -427,7 +427,6 @@ static void lpss_ssp_cs_control(struct spi_device *spi, bool enable)
static void cs_assert(struct spi_device *spi)
{
- struct chip_data *chip = spi_get_ctldata(spi);
struct driver_data *drv_data =
spi_controller_get_devdata(spi->controller);
@@ -436,18 +435,12 @@ static void cs_assert(struct spi_device *spi)
return;
}
- if (chip->cs_control) {
- chip->cs_control(PXA2XX_CS_ASSERT);
- return;
- }
-
if (is_lpss_ssp(drv_data))
lpss_ssp_cs_control(spi, true);
}
static void cs_deassert(struct spi_device *spi)
{
- struct chip_data *chip = spi_get_ctldata(spi);
struct driver_data *drv_data =
spi_controller_get_devdata(spi->controller);
unsigned long timeout;
@@ -461,11 +454,6 @@ static void cs_deassert(struct spi_device *spi)
!time_after(jiffies, timeout))
cpu_relax();
- if (chip->cs_control) {
- chip->cs_control(PXA2XX_CS_DEASSERT);
- return;
- }
-
if (is_lpss_ssp(drv_data))
lpss_ssp_cs_control(spi, false);
}
@@ -994,13 +982,10 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
dev_err(&spi->dev, "Flush failed\n");
return -EIO;
}
- drv_data->n_bytes = chip->n_bytes;
drv_data->tx = (void *)transfer->tx_buf;
drv_data->tx_end = drv_data->tx + transfer->len;
drv_data->rx = transfer->rx_buf;
drv_data->rx_end = drv_data->rx + transfer->len;
- drv_data->write = drv_data->tx ? chip->write : null_writer;
- drv_data->read = drv_data->rx ? chip->read : null_reader;
/* Change speed and bit per word on a per transfer */
bits = transfer->bits_per_word;
@@ -1010,22 +995,16 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
if (bits <= 8) {
drv_data->n_bytes = 1;
- drv_data->read = drv_data->read != null_reader ?
- u8_reader : null_reader;
- drv_data->write = drv_data->write != null_writer ?
- u8_writer : null_writer;
+ drv_data->read = drv_data->rx ? u8_reader : null_reader;
+ drv_data->write = drv_data->tx ? u8_writer : null_writer;
} else if (bits <= 16) {
drv_data->n_bytes = 2;
- drv_data->read = drv_data->read != null_reader ?
- u16_reader : null_reader;
- drv_data->write = drv_data->write != null_writer ?
- u16_writer : null_writer;
+ drv_data->read = drv_data->rx ? u16_reader : null_reader;
+ drv_data->write = drv_data->tx ? u16_writer : null_writer;
} else if (bits <= 32) {
drv_data->n_bytes = 4;
- drv_data->read = drv_data->read != null_reader ?
- u32_reader : null_reader;
- drv_data->write = drv_data->write != null_writer ?
- u32_writer : null_writer;
+ drv_data->read = drv_data->rx ? u32_reader : null_reader;
+ drv_data->write = drv_data->tx ? u32_writer : null_writer;
}
/*
* If bits per word is changed in DMA mode, then must check
@@ -1213,12 +1192,6 @@ static int setup_cs(struct spi_device *spi, struct chip_data *chip,
*/
cleanup_cs(spi);
- /* If ->cs_control() is provided, ignore GPIO chip select */
- if (chip_info->cs_control) {
- chip->cs_control = chip_info->cs_control;
- return 0;
- }
-
if (gpio_is_valid(chip_info->gpio_cs)) {
int gpio = chip_info->gpio_cs;
int err;
@@ -1316,7 +1289,6 @@ static int setup(struct spi_device *spi)
chip_info = spi->controller_data;
/* chip_info isn't always needed */
- chip->cr1 = 0;
if (chip_info) {
if (chip_info->timeout)
chip->timeout = chip_info->timeout;
@@ -1327,9 +1299,9 @@ static int setup(struct spi_device *spi)
if (chip_info->rx_threshold)
rx_thres = chip_info->rx_threshold;
chip->dma_threshold = 0;
- if (chip_info->enable_loopback)
- chip->cr1 = SSCR1_LBM;
}
+
+ chip->cr1 = 0;
if (spi_controller_is_slave(drv_data->controller)) {
chip->cr1 |= SSCR1_SCFR;
chip->cr1 |= SSCR1_SCLKDIR;
@@ -1391,20 +1363,6 @@ static int setup(struct spi_device *spi)
if (spi->mode & SPI_LOOP)
chip->cr1 |= SSCR1_LBM;
- if (spi->bits_per_word <= 8) {
- chip->n_bytes = 1;
- chip->read = u8_reader;
- chip->write = u8_writer;
- } else if (spi->bits_per_word <= 16) {
- chip->n_bytes = 2;
- chip->read = u16_reader;
- chip->write = u16_writer;
- } else if (spi->bits_per_word <= 32) {
- chip->n_bytes = 4;
- chip->read = u32_reader;
- chip->write = u32_writer;
- }
-
spi_set_ctldata(spi, chip);
if (drv_data->ssp_type == CE4100_SSP)
@@ -1706,8 +1664,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
drv_data->controller_info = platform_info;
drv_data->ssp = ssp;
- controller->dev.of_node = dev->of_node;
- controller->dev.fwnode = dev->fwnode;
+ device_set_node(&controller->dev, dev_fwnode(dev));
/* The spi->mode bits understood by this driver: */
controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 9a20fb88e50f..45cdbbc71c4b 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -49,7 +49,6 @@ struct driver_data {
int (*write)(struct driver_data *drv_data);
int (*read)(struct driver_data *drv_data);
irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
- void (*cs_control)(u32 command);
void __iomem *lpss_base;
@@ -61,18 +60,12 @@ struct chip_data {
u32 cr1;
u32 dds_rate;
u32 timeout;
- u8 n_bytes;
u8 enable_dma;
u32 dma_burst_size;
u32 dma_threshold;
u32 threshold;
u16 lpss_rx_threshold;
u16 lpss_tx_threshold;
-
- int (*write)(struct driver_data *drv_data);
- int (*read)(struct driver_data *drv_data);
-
- void (*cs_control)(u32 command);
};
static inline u32 pxa2xx_spi_read(const struct driver_data *drv_data, u32 reg)
diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c
index 83796a4ead34..fe82f3575df4 100644
--- a/drivers/spi/spi-rpc-if.c
+++ b/drivers/spi/spi-rpc-if.c
@@ -156,7 +156,9 @@ static int rpcif_spi_probe(struct platform_device *pdev)
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_QUAD | SPI_RX_QUAD;
ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
- rpcif_hw_init(rpc, false);
+ error = rpcif_hw_init(rpc, false);
+ if (error)
+ return error;
error = spi_register_controller(ctlr);
if (error) {
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 41761f0d892a..bd5708d7e5a1 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -21,6 +21,7 @@
#include <linux/dma-mapping.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/sh_dma.h>
#include <linux/spi/spi.h>
#include <linux/spi/rspi.h>
@@ -834,7 +835,7 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
int ret;
if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
- int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
+ ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
if (ret != -EAGAIN)
return ret;
}
@@ -1225,8 +1226,14 @@ static const struct of_device_id rspi_of_match[] = {
MODULE_DEVICE_TABLE(of, rspi_of_match);
+static void rspi_reset_control_assert(void *data)
+{
+ reset_control_assert(data);
+}
+
static int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
{
+ struct reset_control *rstc;
u32 num_cs;
int error;
@@ -1238,6 +1245,24 @@ static int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
}
ctlr->num_chipselect = num_cs;
+
+ rstc = devm_reset_control_get_optional_exclusive(dev, NULL);
+ if (IS_ERR(rstc))
+ return dev_err_probe(dev, PTR_ERR(rstc),
+ "failed to get reset ctrl\n");
+
+ error = reset_control_deassert(rstc);
+ if (error) {
+ dev_err(dev, "failed to deassert reset %d\n", error);
+ return error;
+ }
+
+ error = devm_add_action_or_reset(dev, rspi_reset_control_assert, rstc);
+ if (error) {
+ dev_err(dev, "failed to register assert devm action, %d\n", error);
+ return error;
+ }
+
return 0;
}
#else
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index e8204e155484..2a03739a0c60 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -18,12 +18,15 @@
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/reset.h>
#include <linux/spi/spi.h>
+#include <soc/tegra/common.h>
+
#define SLINK_COMMAND 0x000
#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
#define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5)
@@ -680,7 +683,7 @@ static int tegra_slink_start_transfer_one(struct spi_device *spi,
bits_per_word = t->bits_per_word;
speed = t->speed_hz;
if (speed != tspi->cur_speed) {
- clk_set_rate(tspi->clk, speed * 4);
+ dev_pm_opp_set_rate(tspi->dev, speed * 4);
tspi->cur_speed = speed;
}
@@ -1066,6 +1069,10 @@ static int tegra_slink_probe(struct platform_device *pdev)
goto exit_free_master;
}
+ ret = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (ret)
+ goto exit_free_master;
+
tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index c0f9a75b44b5..ce1bdb4767ea 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -877,7 +877,7 @@ static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_devic
struct tegra_qspi_client_data *cdata;
struct device_node *slave_np = spi->dev.of_node;
- cdata = kzalloc(sizeof(*cdata), GFP_KERNEL);
+ cdata = devm_kzalloc(&spi->dev, sizeof(*cdata), GFP_KERNEL);
if (!cdata)
return NULL;
@@ -888,14 +888,6 @@ static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_devic
return cdata;
}
-static void tegra_qspi_cleanup(struct spi_device *spi)
-{
- struct tegra_qspi_client_data *cdata = spi->controller_data;
-
- spi->controller_data = NULL;
- kfree(cdata);
-}
-
static int tegra_qspi_setup(struct spi_device *spi)
{
struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
@@ -1229,7 +1221,6 @@ static int tegra_qspi_probe(struct platform_device *pdev)
SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
master->setup = tegra_qspi_setup;
- master->cleanup = tegra_qspi_cleanup;
master->transfer_one_message = tegra_qspi_transfer_one_message;
master->num_chipselect = 1;
master->auto_runtime_pm = true;
diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c
index 8900e51e1a1c..342ee8d2c476 100644
--- a/drivers/spi/spi-uniphier.c
+++ b/drivers/spi/spi-uniphier.c
@@ -767,12 +767,13 @@ out_master_put:
static int uniphier_spi_remove(struct platform_device *pdev)
{
- struct uniphier_spi_priv *priv = platform_get_drvdata(pdev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
- if (priv->master->dma_tx)
- dma_release_channel(priv->master->dma_tx);
- if (priv->master->dma_rx)
- dma_release_channel(priv->master->dma_rx);
+ if (master->dma_tx)
+ dma_release_channel(master->dma_tx);
+ if (master->dma_rx)
+ dma_release_channel(master->dma_rx);
clk_disable_unprepare(priv->clk);
diff --git a/drivers/spi/spi-xlp.c b/drivers/spi/spi-xlp.c
index 797ac0ea8fa3..e5707fe5c8f1 100644
--- a/drivers/spi/spi-xlp.c
+++ b/drivers/spi/spi-xlp.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
-#include <linux/of.h>
#include <linux/interrupt.h>
/* SPI Configuration Register */
@@ -436,17 +435,10 @@ static const struct acpi_device_id xlp_spi_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, xlp_spi_acpi_match);
#endif
-static const struct of_device_id xlp_spi_dt_id[] = {
- { .compatible = "netlogic,xlp832-spi" },
- { },
-};
-MODULE_DEVICE_TABLE(of, xlp_spi_dt_id);
-
static struct platform_driver xlp_spi_driver = {
.probe = xlp_spi_probe,
.driver = {
.name = "xlp-spi",
- .of_match_table = xlp_spi_dt_id,
.acpi_match_table = ACPI_PTR(xlp_spi_acpi_match),
},
};
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index fdd530b150a7..4599b121d744 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -33,6 +33,7 @@
#include <linux/highmem.h>
#include <linux/idr.h>
#include <linux/platform_data/x86/apple.h>
+#include <linux/ptp_clock_kernel.h>
#define CREATE_TRACE_POINTS
#include <trace/events/spi.h>
@@ -311,15 +312,14 @@ static void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
spin_unlock_irqrestore(&stats->lock, flags);
}
-/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
+/*
+ * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
* and the sysfs version makes coldplug work too.
*/
-
-static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
- const struct spi_device *sdev)
+static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
{
while (id->name[0]) {
- if (!strcmp(sdev->modalias, id->name))
+ if (!strcmp(name, id->name))
return id;
id++;
}
@@ -330,7 +330,7 @@ const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
{
const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
- return spi_match_id(sdrv->id_table, sdev);
+ return spi_match_id(sdrv->id_table, sdev->modalias);
}
EXPORT_SYMBOL_GPL(spi_get_device_id);
@@ -352,7 +352,7 @@ static int spi_match_device(struct device *dev, struct device_driver *drv)
return 1;
if (sdrv->id_table)
- return !!spi_match_id(sdrv->id_table, spi);
+ return !!spi_match_id(sdrv->id_table, spi->modalias);
return strcmp(spi->modalias, drv->name) == 0;
}
@@ -474,12 +474,8 @@ int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
if (sdrv->id_table) {
const struct spi_device_id *spi_id;
- for (spi_id = sdrv->id_table; spi_id->name[0];
- spi_id++)
- if (strcmp(spi_id->name, of_name) == 0)
- break;
-
- if (spi_id->name[0])
+ spi_id = spi_match_id(sdrv->id_table, of_name);
+ if (spi_id)
continue;
} else {
if (strcmp(sdrv->driver.name, of_name) == 0)
@@ -497,7 +493,8 @@ EXPORT_SYMBOL_GPL(__spi_register_driver);
/*-------------------------------------------------------------------------*/
-/* SPI devices should normally not be created by SPI device drivers; that
+/*
+ * SPI devices should normally not be created by SPI device drivers; that
* would make them board-specific. Similarly with SPI controller drivers.
* Device registration normally goes into like arch/.../mach.../board-YYY.c
* with other readonly (flashable) information about mainboard devices.
@@ -513,8 +510,8 @@ static LIST_HEAD(spi_controller_list);
/*
* Used to protect add/del operation for board_info list and
- * spi_controller list, and their matching process
- * also used to protect object of type struct idr
+ * spi_controller list, and their matching process also used
+ * to protect object of type struct idr.
*/
static DEFINE_MUTEX(board_lock);
@@ -621,7 +618,8 @@ static int __spi_add_device(struct spi_device *spi)
else if (ctlr->cs_gpios)
spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
- /* Drivers may modify this initial i/o setup, but will
+ /*
+ * Drivers may modify this initial i/o setup, but will
* normally rely on the device being setup. Devices
* using SPI_CS_HIGH can't coexist well otherwise...
*/
@@ -715,7 +713,8 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr,
struct spi_device *proxy;
int status;
- /* NOTE: caller did any chip->bus_num checks necessary.
+ /*
+ * NOTE: caller did any chip->bus_num checks necessary.
*
* Also, unless we change the return value convention to use
* error-or-pointer (not NULL-or-pointer), troubleshootability
@@ -883,7 +882,6 @@ static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
/**
* spi_res_free - free an spi resource
* @res: pointer to the custom data of a resource
- *
*/
static void spi_res_free(void *res)
{
@@ -947,12 +945,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
spi->controller->last_cs_enable = enable;
spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
- if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
- !spi->controller->set_cs_timing) {
- if (activate)
- spi_delay_exec(&spi->cs_setup, NULL);
- else
- spi_delay_exec(&spi->cs_hold, NULL);
+ if ((spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
+ !spi->controller->set_cs_timing) && !activate) {
+ spi_delay_exec(&spi->cs_hold, NULL);
}
if (spi->mode & SPI_CS_HIGH)
@@ -978,7 +973,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
gpiod_set_value_cansleep(spi->cs_gpiod, activate);
} else {
/*
- * invert the enable line, as active low is
+ * Invert the enable line, as active low is
* default for SPI.
*/
gpio_set_value_cansleep(spi->cs_gpio, !enable);
@@ -994,7 +989,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
!spi->controller->set_cs_timing) {
- if (!activate)
+ if (activate)
+ spi_delay_exec(&spi->cs_setup, NULL);
+ else
spi_delay_exec(&spi->cs_inactive, NULL);
}
}
@@ -1227,11 +1224,10 @@ static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
if (max_tx) {
tmp = krealloc(ctlr->dummy_tx, max_tx,
- GFP_KERNEL | GFP_DMA);
+ GFP_KERNEL | GFP_DMA | __GFP_ZERO);
if (!tmp)
return -ENOMEM;
ctlr->dummy_tx = tmp;
- memset(tmp, 0, max_tx);
}
if (max_rx) {
@@ -1717,16 +1713,7 @@ static void spi_pump_messages(struct kthread_work *work)
}
/**
- * spi_take_timestamp_pre - helper for drivers to collect the beginning of the
- * TX timestamp for the requested byte from the SPI
- * transfer. The frequency with which this function
- * must be called (once per word, once for the whole
- * transfer, once per batch of words etc) is arbitrary
- * as long as the @tx buffer offset is greater than or
- * equal to the requested byte at the time of the
- * call. The timestamp is only taken once, at the
- * first such call. It is assumed that the driver
- * advances its @tx buffer pointer monotonically.
+ * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
* @ctlr: Pointer to the spi_controller structure of the driver
* @xfer: Pointer to the transfer being timestamped
* @progress: How many words (not bytes) have been transferred so far
@@ -1736,6 +1723,14 @@ static void spi_pump_messages(struct kthread_work *work)
* spi_take_timestamp_post or otherwise system will crash.
* WARNING: for fully predictable results, the CPU frequency must
* also be under control (governor).
+ *
+ * This is a helper for drivers to collect the beginning of the TX timestamp
+ * for the requested byte from the SPI transfer. The frequency with which this
+ * function must be called (once per word, once for the whole transfer, once
+ * per batch of words etc) is arbitrary as long as the @tx buffer offset is
+ * greater than or equal to the requested byte at the time of the call. The
+ * timestamp is only taken once, at the first such call. It is assumed that
+ * the driver advances its @tx buffer pointer monotonically.
*/
void spi_take_timestamp_pre(struct spi_controller *ctlr,
struct spi_transfer *xfer,
@@ -1763,16 +1758,16 @@ void spi_take_timestamp_pre(struct spi_controller *ctlr,
EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
/**
- * spi_take_timestamp_post - helper for drivers to collect the end of the
- * TX timestamp for the requested byte from the SPI
- * transfer. Can be called with an arbitrary
- * frequency: only the first call where @tx exceeds
- * or is equal to the requested word will be
- * timestamped.
+ * spi_take_timestamp_post - helper to collect the end of the TX timestamp
* @ctlr: Pointer to the spi_controller structure of the driver
* @xfer: Pointer to the transfer being timestamped
* @progress: How many words (not bytes) have been transferred so far
* @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
+ *
+ * This is a helper for drivers to collect the end of the TX timestamp for
+ * the requested byte from the SPI transfer. Can be called with an arbitrary
+ * frequency: only the first call where @tx exceeds or is equal to the
+ * requested word will be timestamped.
*/
void spi_take_timestamp_post(struct spi_controller *ctlr,
struct spi_transfer *xfer,
@@ -1905,10 +1900,12 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
spi_unmap_msg(ctlr, mesg);
- /* In the prepare_messages callback the spi bus has the opportunity to
- * split a transfer to smaller chunks.
- * Release splited transfers here since spi_map_msg is done on the
- * splited transfers.
+ /*
+ * In the prepare_messages callback the SPI bus has the opportunity
+ * to split a transfer to smaller chunks.
+ *
+ * Release the split transfers here since spi_map_msg() is done on
+ * the split transfers.
*/
spi_res_release(ctlr, mesg);
@@ -2950,8 +2947,9 @@ int spi_register_controller(struct spi_controller *ctlr)
if (!ctlr->max_dma_len)
ctlr->max_dma_len = INT_MAX;
- /* register the device, then userspace will see it.
- * registration fails if the bus ID is in use.
+ /*
+ * Register the device, then userspace will see it.
+ * Registration fails if the bus ID is in use.
*/
dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
@@ -3217,16 +3215,18 @@ static struct spi_replaced_transfers *spi_replace_transfers(
/* init the replaced_transfers list */
INIT_LIST_HEAD(&rxfer->replaced_transfers);
- /* assign the list_entry after which we should reinsert
+ /*
+ * Assign the list_entry after which we should reinsert
* the @replaced_transfers - it may be spi_message.messages!
*/
rxfer->replaced_after = xfer_first->transfer_list.prev;
/* remove the requested number of transfers */
for (i = 0; i < remove; i++) {
- /* if the entry after replaced_after it is msg->transfers
+ /*
+ * If the entry after replaced_after it is msg->transfers
* then we have been requested to remove more transfers
- * than are in the list
+ * than are in the list.
*/
if (rxfer->replaced_after->next == &msg->transfers) {
dev_err(&msg->spi->dev,
@@ -3242,15 +3242,17 @@ static struct spi_replaced_transfers *spi_replace_transfers(
return ERR_PTR(-EINVAL);
}
- /* remove the entry after replaced_after from list of
- * transfers and add it to list of replaced_transfers
+ /*
+ * Remove the entry after replaced_after from list of
+ * transfers and add it to list of replaced_transfers.
*/
list_move_tail(rxfer->replaced_after->next,
&rxfer->replaced_transfers);
}
- /* create copy of the given xfer with identical settings
- * based on the first transfer to get removed
+ /*
+ * Create copy of the given xfer with identical settings
+ * based on the first transfer to get removed.
*/
for (i = 0; i < insert; i++) {
/* we need to run in reverse order */
@@ -3298,18 +3300,20 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
return PTR_ERR(srt);
xfers = srt->inserted_transfers;
- /* now handle each of those newly inserted spi_transfers
- * note that the replacements spi_transfers all are preset
+ /*
+ * Now handle each of those newly inserted spi_transfers.
+ * Note that the replacements spi_transfers all are preset
* to the same values as *xferp, so tx_buf, rx_buf and len
* are all identical (as well as most others)
* so we just have to fix up len and the pointers.
*
- * this also includes support for the depreciated
- * spi_message.is_dma_mapped interface
+ * This also includes support for the depreciated
+ * spi_message.is_dma_mapped interface.
*/
- /* the first transfer just needs the length modified, so we
- * run it outside the loop
+ /*
+ * The first transfer just needs the length modified, so we
+ * run it outside the loop.
*/
xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
@@ -3329,8 +3333,9 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
xfers[i].len = min(maxsize, xfers[i].len - offset);
}
- /* we set up xferp to the last entry we have inserted,
- * so that we skip those already split transfers
+ /*
+ * We set up xferp to the last entry we have inserted,
+ * so that we skip those already split transfers.
*/
*xferp = &xfers[count - 1];
@@ -3362,11 +3367,12 @@ int spi_split_transfers_maxsize(struct spi_controller *ctlr,
struct spi_transfer *xfer;
int ret;
- /* iterate over the transfer_list,
+ /*
+ * Iterate over the transfer_list,
* but note that xfer is advanced to the last transfer inserted
* to avoid checking sizes again unnecessarily (also xfer does
- * potentiall belong to a different list by the time the
- * replacement has happened
+ * potentially belong to a different list by the time the
+ * replacement has happened).
*/
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (xfer->len > maxsize) {
@@ -3427,8 +3433,8 @@ int spi_setup(struct spi_device *spi)
int status;
/*
- * check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
- * are set at the same time
+ * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
+ * are set at the same time.
*/
if ((hweight_long(spi->mode &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
@@ -3438,20 +3444,21 @@ int spi_setup(struct spi_device *spi)
"setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
return -EINVAL;
}
- /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
- */
+ /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
if ((spi->mode & SPI_3WIRE) && (spi->mode &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
return -EINVAL;
- /* help drivers fail *cleanly* when they need options
- * that aren't supported with their current controller
+ /*
+ * Help drivers fail *cleanly* when they need options
+ * that aren't supported with their current controller.
* SPI_CS_WORD has a fallback software implementation,
* so it is ignored here.
*/
bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
SPI_NO_TX | SPI_NO_RX);
- /* nothing prevents from working with active-high CS in case if it
+ /*
+ * Nothing prevents from working with active-high CS in case if it
* is driven by GPIO.
*/
if (gpio_is_valid(spi->cs_gpio))
@@ -3573,7 +3580,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
if (list_empty(&message->transfers))
return -EINVAL;
- /* If an SPI controller does not support toggling the CS line on each
+ /*
+ * If an SPI controller does not support toggling the CS line on each
* transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
* for the CS line, we can emulate the CS-per-word hardware function by
* splitting transfers into one-word transfers and ensuring that
@@ -3603,7 +3611,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
}
}
- /* Half-duplex links include original MicroWire, and ones with
+ /*
+ * Half-duplex links include original MicroWire, and ones with
* only one data pin like SPI_3WIRE (switches direction) or where
* either MOSI or MISO is missing. They can also be caused by
* software limitations.
@@ -3622,7 +3631,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
}
}
- /**
+ /*
* Set transfer bits_per_word and max speed as spi device default if
* it is not set for this transfer.
* Set transfer tx_nbits and rx_nbits as single transfer default
@@ -3648,7 +3657,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
/*
* SPI transfer length should be multiple of SPI word size
- * where SPI word size should be power-of-two multiple
+ * where SPI word size should be power-of-two multiple.
*/
if (xfer->bits_per_word <= 8)
w_size = 1;
@@ -3669,7 +3678,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
xfer->tx_nbits = SPI_NBITS_SINGLE;
if (xfer->rx_buf && !xfer->rx_nbits)
xfer->rx_nbits = SPI_NBITS_SINGLE;
- /* check transfer tx/rx_nbits:
+ /*
+ * Check transfer tx/rx_nbits:
* 1. check the value matches one of single, dual and quad
* 2. check tx/rx_nbits match the mode in spi_device
*/
@@ -3848,7 +3858,8 @@ static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
/*-------------------------------------------------------------------------*/
-/* Utility methods for SPI protocol drivers, layered on
+/*
+ * Utility methods for SPI protocol drivers, layered on
* top of the core. Some other utility methods are defined as
* inline functions.
*/
@@ -3876,7 +3887,8 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
- /* If we're not using the legacy transfer method then we will
+ /*
+ * If we're not using the legacy transfer method then we will
* try to transfer in the calling context so special case.
* This code would be less tricky if we could remove the
* support for driver implemented message queues.
@@ -3894,9 +3906,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
}
if (status == 0) {
- /* Push out the messages in the calling context if we
- * can.
- */
+ /* Push out the messages in the calling context if we can */
if (ctlr->transfer == spi_queued_transfer) {
SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
spi_sync_immediate);
@@ -4057,7 +4067,8 @@ int spi_write_then_read(struct spi_device *spi,
struct spi_transfer x[2];
u8 *local_buf;
- /* Use preallocated DMA-safe buffer if we can. We can't avoid
+ /*
+ * Use preallocated DMA-safe buffer if we can. We can't avoid
* copying here, (as a pure convenience thing), but we can
* keep heap costs out of the hot path unless someone else is
* using the pre-allocated buffer or the transfer is too large.
@@ -4293,11 +4304,12 @@ err0:
return status;
}
-/* board_info is normally registered in arch_initcall(),
- * but even essential drivers wait till later
+/*
+ * A board_info is normally registered in arch_initcall(),
+ * but even essential drivers wait till later.
*
- * REVISIT only boardinfo really needs static linking. the rest (device and
- * driver registration) _could_ be dynamically linked (modular) ... costs
+ * REVISIT only boardinfo really needs static linking. The rest (device and
+ * driver registration) _could_ be dynamically linked (modular) ... Costs
* include needing to have boardinfo data structures be much more public.
*/
postcore_initcall(spi_init);
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 1bd73e322b7b..a5cceca8b82b 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -415,7 +415,7 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
tmp |= SPI_CS_HIGH;
tmp |= spi->mode & ~SPI_MODE_MASK;
- spi->mode = (u16)tmp;
+ spi->mode = tmp & SPI_MODE_USER_MASK;
retval = spi_setup(spi);
if (retval < 0)
spi->mode = save;
@@ -751,9 +751,10 @@ static int spidev_probe(struct spi_device *spi)
* compatible string, it is a Linux implementation thing
* rather than a description of the hardware.
*/
- WARN(spi->dev.of_node &&
- of_device_is_compatible(spi->dev.of_node, "spidev"),
- "%pOF: buggy DT: spidev listed directly in DT\n", spi->dev.of_node);
+ if (spi->dev.of_node && of_device_is_compatible(spi->dev.of_node, "spidev")) {
+ dev_err(&spi->dev, "spidev listed directly in DT is not supported\n");
+ return -EINVAL;
+ }
spidev_probe_acpi(spi);
diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig
index 2874b6c26028..737802046314 100644
--- a/drivers/spmi/Kconfig
+++ b/drivers/spmi/Kconfig
@@ -34,4 +34,15 @@ config SPMI_MSM_PMIC_ARB
This is required for communicating with Qualcomm PMICs and
other devices that have the SPMI interface.
+config SPMI_MTK_PMIF
+ tristate "Mediatek SPMI Controller (PMIC Arbiter)"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ If you say yes to this option, support will be included for the
+ built-in SPMI PMIC Arbiter interface on Mediatek family
+ processors.
+
+ This is required for communicating with Mediatek PMICs and
+ other devices that have the SPMI interface.
+
endif
diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile
index 6e092e6f290c..9d974424c8c1 100644
--- a/drivers/spmi/Makefile
+++ b/drivers/spmi/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_SPMI) += spmi.o
obj-$(CONFIG_SPMI_HISI3670) += hisi-spmi-controller.o
obj-$(CONFIG_SPMI_MSM_PMIC_ARB) += spmi-pmic-arb.o
+obj-$(CONFIG_SPMI_MTK_PMIF) += spmi-mtk-pmif.o
diff --git a/drivers/spmi/spmi-mtk-pmif.c b/drivers/spmi/spmi-mtk-pmif.c
new file mode 100644
index 000000000000..ad511f2c3324
--- /dev/null
+++ b/drivers/spmi/spmi-mtk-pmif.c
@@ -0,0 +1,542 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2021 MediaTek Inc.
+
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/spmi.h>
+
+#define SWINF_IDLE 0x00
+#define SWINF_WFVLDCLR 0x06
+
+#define GET_SWINF(x) (((x) >> 1) & 0x7)
+
+#define PMIF_CMD_REG_0 0
+#define PMIF_CMD_REG 1
+#define PMIF_CMD_EXT_REG 2
+#define PMIF_CMD_EXT_REG_LONG 3
+
+#define PMIF_DELAY_US 10
+#define PMIF_TIMEOUT_US (10 * 1000)
+
+#define PMIF_CHAN_OFFSET 0x5
+
+#define PMIF_MAX_CLKS 3
+
+#define SPMI_OP_ST_BUSY 1
+
+struct ch_reg {
+ u32 ch_sta;
+ u32 wdata;
+ u32 rdata;
+ u32 ch_send;
+ u32 ch_rdy;
+};
+
+struct pmif_data {
+ const u32 *regs;
+ const u32 *spmimst_regs;
+ u32 soc_chan;
+};
+
+struct pmif {
+ void __iomem *base;
+ void __iomem *spmimst_base;
+ struct ch_reg chan;
+ struct clk_bulk_data clks[PMIF_MAX_CLKS];
+ size_t nclks;
+ const struct pmif_data *data;
+};
+
+static const char * const pmif_clock_names[] = {
+ "pmif_sys_ck", "pmif_tmr_ck", "spmimst_clk_mux",
+};
+
+enum pmif_regs {
+ PMIF_INIT_DONE,
+ PMIF_INF_EN,
+ PMIF_ARB_EN,
+ PMIF_CMDISSUE_EN,
+ PMIF_TIMER_CTRL,
+ PMIF_SPI_MODE_CTRL,
+ PMIF_IRQ_EVENT_EN_0,
+ PMIF_IRQ_FLAG_0,
+ PMIF_IRQ_CLR_0,
+ PMIF_IRQ_EVENT_EN_1,
+ PMIF_IRQ_FLAG_1,
+ PMIF_IRQ_CLR_1,
+ PMIF_IRQ_EVENT_EN_2,
+ PMIF_IRQ_FLAG_2,
+ PMIF_IRQ_CLR_2,
+ PMIF_IRQ_EVENT_EN_3,
+ PMIF_IRQ_FLAG_3,
+ PMIF_IRQ_CLR_3,
+ PMIF_IRQ_EVENT_EN_4,
+ PMIF_IRQ_FLAG_4,
+ PMIF_IRQ_CLR_4,
+ PMIF_WDT_EVENT_EN_0,
+ PMIF_WDT_FLAG_0,
+ PMIF_WDT_EVENT_EN_1,
+ PMIF_WDT_FLAG_1,
+ PMIF_SWINF_0_STA,
+ PMIF_SWINF_0_WDATA_31_0,
+ PMIF_SWINF_0_RDATA_31_0,
+ PMIF_SWINF_0_ACC,
+ PMIF_SWINF_0_VLD_CLR,
+ PMIF_SWINF_1_STA,
+ PMIF_SWINF_1_WDATA_31_0,
+ PMIF_SWINF_1_RDATA_31_0,
+ PMIF_SWINF_1_ACC,
+ PMIF_SWINF_1_VLD_CLR,
+ PMIF_SWINF_2_STA,
+ PMIF_SWINF_2_WDATA_31_0,
+ PMIF_SWINF_2_RDATA_31_0,
+ PMIF_SWINF_2_ACC,
+ PMIF_SWINF_2_VLD_CLR,
+ PMIF_SWINF_3_STA,
+ PMIF_SWINF_3_WDATA_31_0,
+ PMIF_SWINF_3_RDATA_31_0,
+ PMIF_SWINF_3_ACC,
+ PMIF_SWINF_3_VLD_CLR,
+};
+
+static const u32 mt6873_regs[] = {
+ [PMIF_INIT_DONE] = 0x0000,
+ [PMIF_INF_EN] = 0x0024,
+ [PMIF_ARB_EN] = 0x0150,
+ [PMIF_CMDISSUE_EN] = 0x03B4,
+ [PMIF_TIMER_CTRL] = 0x03E0,
+ [PMIF_SPI_MODE_CTRL] = 0x0400,
+ [PMIF_IRQ_EVENT_EN_0] = 0x0418,
+ [PMIF_IRQ_FLAG_0] = 0x0420,
+ [PMIF_IRQ_CLR_0] = 0x0424,
+ [PMIF_IRQ_EVENT_EN_1] = 0x0428,
+ [PMIF_IRQ_FLAG_1] = 0x0430,
+ [PMIF_IRQ_CLR_1] = 0x0434,
+ [PMIF_IRQ_EVENT_EN_2] = 0x0438,
+ [PMIF_IRQ_FLAG_2] = 0x0440,
+ [PMIF_IRQ_CLR_2] = 0x0444,
+ [PMIF_IRQ_EVENT_EN_3] = 0x0448,
+ [PMIF_IRQ_FLAG_3] = 0x0450,
+ [PMIF_IRQ_CLR_3] = 0x0454,
+ [PMIF_IRQ_EVENT_EN_4] = 0x0458,
+ [PMIF_IRQ_FLAG_4] = 0x0460,
+ [PMIF_IRQ_CLR_4] = 0x0464,
+ [PMIF_WDT_EVENT_EN_0] = 0x046C,
+ [PMIF_WDT_FLAG_0] = 0x0470,
+ [PMIF_WDT_EVENT_EN_1] = 0x0474,
+ [PMIF_WDT_FLAG_1] = 0x0478,
+ [PMIF_SWINF_0_ACC] = 0x0C00,
+ [PMIF_SWINF_0_WDATA_31_0] = 0x0C04,
+ [PMIF_SWINF_0_RDATA_31_0] = 0x0C14,
+ [PMIF_SWINF_0_VLD_CLR] = 0x0C24,
+ [PMIF_SWINF_0_STA] = 0x0C28,
+ [PMIF_SWINF_1_ACC] = 0x0C40,
+ [PMIF_SWINF_1_WDATA_31_0] = 0x0C44,
+ [PMIF_SWINF_1_RDATA_31_0] = 0x0C54,
+ [PMIF_SWINF_1_VLD_CLR] = 0x0C64,
+ [PMIF_SWINF_1_STA] = 0x0C68,
+ [PMIF_SWINF_2_ACC] = 0x0C80,
+ [PMIF_SWINF_2_WDATA_31_0] = 0x0C84,
+ [PMIF_SWINF_2_RDATA_31_0] = 0x0C94,
+ [PMIF_SWINF_2_VLD_CLR] = 0x0CA4,
+ [PMIF_SWINF_2_STA] = 0x0CA8,
+ [PMIF_SWINF_3_ACC] = 0x0CC0,
+ [PMIF_SWINF_3_WDATA_31_0] = 0x0CC4,
+ [PMIF_SWINF_3_RDATA_31_0] = 0x0CD4,
+ [PMIF_SWINF_3_VLD_CLR] = 0x0CE4,
+ [PMIF_SWINF_3_STA] = 0x0CE8,
+};
+
+static const u32 mt8195_regs[] = {
+ [PMIF_INIT_DONE] = 0x0000,
+ [PMIF_INF_EN] = 0x0024,
+ [PMIF_ARB_EN] = 0x0150,
+ [PMIF_CMDISSUE_EN] = 0x03B8,
+ [PMIF_TIMER_CTRL] = 0x03E4,
+ [PMIF_SPI_MODE_CTRL] = 0x0408,
+ [PMIF_IRQ_EVENT_EN_0] = 0x0420,
+ [PMIF_IRQ_FLAG_0] = 0x0428,
+ [PMIF_IRQ_CLR_0] = 0x042C,
+ [PMIF_IRQ_EVENT_EN_1] = 0x0430,
+ [PMIF_IRQ_FLAG_1] = 0x0438,
+ [PMIF_IRQ_CLR_1] = 0x043C,
+ [PMIF_IRQ_EVENT_EN_2] = 0x0440,
+ [PMIF_IRQ_FLAG_2] = 0x0448,
+ [PMIF_IRQ_CLR_2] = 0x044C,
+ [PMIF_IRQ_EVENT_EN_3] = 0x0450,
+ [PMIF_IRQ_FLAG_3] = 0x0458,
+ [PMIF_IRQ_CLR_3] = 0x045C,
+ [PMIF_IRQ_EVENT_EN_4] = 0x0460,
+ [PMIF_IRQ_FLAG_4] = 0x0468,
+ [PMIF_IRQ_CLR_4] = 0x046C,
+ [PMIF_WDT_EVENT_EN_0] = 0x0474,
+ [PMIF_WDT_FLAG_0] = 0x0478,
+ [PMIF_WDT_EVENT_EN_1] = 0x047C,
+ [PMIF_WDT_FLAG_1] = 0x0480,
+ [PMIF_SWINF_0_ACC] = 0x0800,
+ [PMIF_SWINF_0_WDATA_31_0] = 0x0804,
+ [PMIF_SWINF_0_RDATA_31_0] = 0x0814,
+ [PMIF_SWINF_0_VLD_CLR] = 0x0824,
+ [PMIF_SWINF_0_STA] = 0x0828,
+ [PMIF_SWINF_1_ACC] = 0x0840,
+ [PMIF_SWINF_1_WDATA_31_0] = 0x0844,
+ [PMIF_SWINF_1_RDATA_31_0] = 0x0854,
+ [PMIF_SWINF_1_VLD_CLR] = 0x0864,
+ [PMIF_SWINF_1_STA] = 0x0868,
+ [PMIF_SWINF_2_ACC] = 0x0880,
+ [PMIF_SWINF_2_WDATA_31_0] = 0x0884,
+ [PMIF_SWINF_2_RDATA_31_0] = 0x0894,
+ [PMIF_SWINF_2_VLD_CLR] = 0x08A4,
+ [PMIF_SWINF_2_STA] = 0x08A8,
+ [PMIF_SWINF_3_ACC] = 0x08C0,
+ [PMIF_SWINF_3_WDATA_31_0] = 0x08C4,
+ [PMIF_SWINF_3_RDATA_31_0] = 0x08D4,
+ [PMIF_SWINF_3_VLD_CLR] = 0x08E4,
+ [PMIF_SWINF_3_STA] = 0x08E8,
+};
+
+enum spmi_regs {
+ SPMI_OP_ST_CTRL,
+ SPMI_GRP_ID_EN,
+ SPMI_OP_ST_STA,
+ SPMI_MST_SAMPL,
+ SPMI_MST_REQ_EN,
+ SPMI_REC_CTRL,
+ SPMI_REC0,
+ SPMI_REC1,
+ SPMI_REC2,
+ SPMI_REC3,
+ SPMI_REC4,
+ SPMI_MST_DBG,
+
+ /* MT8195 spmi regs */
+ SPMI_MST_RCS_CTRL,
+ SPMI_SLV_3_0_EINT,
+ SPMI_SLV_7_4_EINT,
+ SPMI_SLV_B_8_EINT,
+ SPMI_SLV_F_C_EINT,
+ SPMI_REC_CMD_DEC,
+ SPMI_DEC_DBG,
+};
+
+static const u32 mt6873_spmi_regs[] = {
+ [SPMI_OP_ST_CTRL] = 0x0000,
+ [SPMI_GRP_ID_EN] = 0x0004,
+ [SPMI_OP_ST_STA] = 0x0008,
+ [SPMI_MST_SAMPL] = 0x000c,
+ [SPMI_MST_REQ_EN] = 0x0010,
+ [SPMI_REC_CTRL] = 0x0040,
+ [SPMI_REC0] = 0x0044,
+ [SPMI_REC1] = 0x0048,
+ [SPMI_REC2] = 0x004c,
+ [SPMI_REC3] = 0x0050,
+ [SPMI_REC4] = 0x0054,
+ [SPMI_MST_DBG] = 0x00fc,
+};
+
+static const u32 mt8195_spmi_regs[] = {
+ [SPMI_OP_ST_CTRL] = 0x0000,
+ [SPMI_GRP_ID_EN] = 0x0004,
+ [SPMI_OP_ST_STA] = 0x0008,
+ [SPMI_MST_SAMPL] = 0x000C,
+ [SPMI_MST_REQ_EN] = 0x0010,
+ [SPMI_MST_RCS_CTRL] = 0x0014,
+ [SPMI_SLV_3_0_EINT] = 0x0020,
+ [SPMI_SLV_7_4_EINT] = 0x0024,
+ [SPMI_SLV_B_8_EINT] = 0x0028,
+ [SPMI_SLV_F_C_EINT] = 0x002C,
+ [SPMI_REC_CTRL] = 0x0040,
+ [SPMI_REC0] = 0x0044,
+ [SPMI_REC1] = 0x0048,
+ [SPMI_REC2] = 0x004C,
+ [SPMI_REC3] = 0x0050,
+ [SPMI_REC4] = 0x0054,
+ [SPMI_REC_CMD_DEC] = 0x005C,
+ [SPMI_DEC_DBG] = 0x00F8,
+ [SPMI_MST_DBG] = 0x00FC,
+};
+
+static u32 pmif_readl(struct pmif *arb, enum pmif_regs reg)
+{
+ return readl(arb->base + arb->data->regs[reg]);
+}
+
+static void pmif_writel(struct pmif *arb, u32 val, enum pmif_regs reg)
+{
+ writel(val, arb->base + arb->data->regs[reg]);
+}
+
+static void mtk_spmi_writel(struct pmif *arb, u32 val, enum spmi_regs reg)
+{
+ writel(val, arb->spmimst_base + arb->data->spmimst_regs[reg]);
+}
+
+static bool pmif_is_fsm_vldclr(struct pmif *arb)
+{
+ u32 reg_rdata;
+
+ reg_rdata = pmif_readl(arb, arb->chan.ch_sta);
+
+ return GET_SWINF(reg_rdata) == SWINF_WFVLDCLR;
+}
+
+static int pmif_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
+{
+ struct pmif *arb = spmi_controller_get_drvdata(ctrl);
+ u32 rdata, cmd;
+ int ret;
+
+ /* Check the opcode */
+ if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP)
+ return -EINVAL;
+
+ cmd = opc - SPMI_CMD_RESET;
+
+ mtk_spmi_writel(arb, (cmd << 0x4) | sid, SPMI_OP_ST_CTRL);
+ ret = readl_poll_timeout_atomic(arb->spmimst_base + arb->data->spmimst_regs[SPMI_OP_ST_STA],
+ rdata, (rdata & SPMI_OP_ST_BUSY) == SPMI_OP_ST_BUSY,
+ PMIF_DELAY_US, PMIF_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(&ctrl->dev, "timeout, err = %d\n", ret);
+
+ return ret;
+}
+
+static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ u16 addr, u8 *buf, size_t len)
+{
+ struct pmif *arb = spmi_controller_get_drvdata(ctrl);
+ struct ch_reg *inf_reg;
+ int ret;
+ u32 data, cmd;
+
+ /* Check for argument validation. */
+ if (sid & ~0xf) {
+ dev_err(&ctrl->dev, "exceed the max slv id\n");
+ return -EINVAL;
+ }
+
+ if (len > 4) {
+ dev_err(&ctrl->dev, "pmif supports 1..4 bytes per trans, but:%zu requested", len);
+
+ return -EINVAL;
+ }
+
+ if (opc >= 0x60 && opc <= 0x7f)
+ opc = PMIF_CMD_REG;
+ else if ((opc >= 0x20 && opc <= 0x2f) || (opc >= 0x38 && opc <= 0x3f))
+ opc = PMIF_CMD_EXT_REG_LONG;
+ else
+ return -EINVAL;
+
+ /* Wait for Software Interface FSM state to be IDLE. */
+ inf_reg = &arb->chan;
+ ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta],
+ data, GET_SWINF(data) == SWINF_IDLE,
+ PMIF_DELAY_US, PMIF_TIMEOUT_US);
+ if (ret < 0) {
+ /* set channel ready if the data has transferred */
+ if (pmif_is_fsm_vldclr(arb))
+ pmif_writel(arb, 1, inf_reg->ch_rdy);
+ dev_err(&ctrl->dev, "failed to wait for SWINF_IDLE\n");
+ return ret;
+ }
+
+ /* Send the command. */
+ cmd = (opc << 30) | (sid << 24) | ((len - 1) << 16) | addr;
+ pmif_writel(arb, cmd, inf_reg->ch_send);
+
+ /*
+ * Wait for Software Interface FSM state to be WFVLDCLR,
+ * read the data and clear the valid flag.
+ */
+ ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta],
+ data, GET_SWINF(data) == SWINF_WFVLDCLR,
+ PMIF_DELAY_US, PMIF_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(&ctrl->dev, "failed to wait for SWINF_WFVLDCLR\n");
+ return ret;
+ }
+
+ data = pmif_readl(arb, inf_reg->rdata);
+ memcpy(buf, &data, len);
+ pmif_writel(arb, 1, inf_reg->ch_rdy);
+
+ return 0;
+}
+
+static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ u16 addr, const u8 *buf, size_t len)
+{
+ struct pmif *arb = spmi_controller_get_drvdata(ctrl);
+ struct ch_reg *inf_reg;
+ int ret;
+ u32 data, cmd;
+
+ if (len > 4) {
+ dev_err(&ctrl->dev, "pmif supports 1..4 bytes per trans, but:%zu requested", len);
+
+ return -EINVAL;
+ }
+
+ /* Check the opcode */
+ if (opc >= 0x40 && opc <= 0x5F)
+ opc = PMIF_CMD_REG;
+ else if ((opc <= 0xF) || (opc >= 0x30 && opc <= 0x37))
+ opc = PMIF_CMD_EXT_REG_LONG;
+ else if (opc >= 0x80)
+ opc = PMIF_CMD_REG_0;
+ else
+ return -EINVAL;
+
+ /* Wait for Software Interface FSM state to be IDLE. */
+ inf_reg = &arb->chan;
+ ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta],
+ data, GET_SWINF(data) == SWINF_IDLE,
+ PMIF_DELAY_US, PMIF_TIMEOUT_US);
+ if (ret < 0) {
+ /* set channel ready if the data has transferred */
+ if (pmif_is_fsm_vldclr(arb))
+ pmif_writel(arb, 1, inf_reg->ch_rdy);
+ dev_err(&ctrl->dev, "failed to wait for SWINF_IDLE\n");
+ return ret;
+ }
+
+ /* Set the write data. */
+ memcpy(&data, buf, len);
+ pmif_writel(arb, data, inf_reg->wdata);
+
+ /* Send the command. */
+ cmd = (opc << 30) | BIT(29) | (sid << 24) | ((len - 1) << 16) | addr;
+ pmif_writel(arb, cmd, inf_reg->ch_send);
+
+ return 0;
+}
+
+static const struct pmif_data mt6873_pmif_arb = {
+ .regs = mt6873_regs,
+ .spmimst_regs = mt6873_spmi_regs,
+ .soc_chan = 2,
+};
+
+static const struct pmif_data mt8195_pmif_arb = {
+ .regs = mt8195_regs,
+ .spmimst_regs = mt8195_spmi_regs,
+ .soc_chan = 2,
+};
+
+static int mtk_spmi_probe(struct platform_device *pdev)
+{
+ struct pmif *arb;
+ struct spmi_controller *ctrl;
+ int err, i;
+ u32 chan_offset;
+
+ ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*arb));
+ if (!ctrl)
+ return -ENOMEM;
+
+ arb = spmi_controller_get_drvdata(ctrl);
+ arb->data = device_get_match_data(&pdev->dev);
+ if (!arb->data) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "Cannot get drv_data\n");
+ goto err_put_ctrl;
+ }
+
+ arb->base = devm_platform_ioremap_resource_byname(pdev, "pmif");
+ if (IS_ERR(arb->base)) {
+ err = PTR_ERR(arb->base);
+ goto err_put_ctrl;
+ }
+
+ arb->spmimst_base = devm_platform_ioremap_resource_byname(pdev, "spmimst");
+ if (IS_ERR(arb->spmimst_base)) {
+ err = PTR_ERR(arb->spmimst_base);
+ goto err_put_ctrl;
+ }
+
+ arb->nclks = ARRAY_SIZE(pmif_clock_names);
+ for (i = 0; i < arb->nclks; i++)
+ arb->clks[i].id = pmif_clock_names[i];
+
+ err = devm_clk_bulk_get(&pdev->dev, arb->nclks, arb->clks);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get clocks: %d\n", err);
+ goto err_put_ctrl;
+ }
+
+ err = clk_bulk_prepare_enable(arb->nclks, arb->clks);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable clocks: %d\n", err);
+ goto err_put_ctrl;
+ }
+
+ ctrl->cmd = pmif_arb_cmd;
+ ctrl->read_cmd = pmif_spmi_read_cmd;
+ ctrl->write_cmd = pmif_spmi_write_cmd;
+
+ chan_offset = PMIF_CHAN_OFFSET * arb->data->soc_chan;
+ arb->chan.ch_sta = PMIF_SWINF_0_STA + chan_offset;
+ arb->chan.wdata = PMIF_SWINF_0_WDATA_31_0 + chan_offset;
+ arb->chan.rdata = PMIF_SWINF_0_RDATA_31_0 + chan_offset;
+ arb->chan.ch_send = PMIF_SWINF_0_ACC + chan_offset;
+ arb->chan.ch_rdy = PMIF_SWINF_0_VLD_CLR + chan_offset;
+
+ platform_set_drvdata(pdev, ctrl);
+
+ err = spmi_controller_add(ctrl);
+ if (err)
+ goto err_domain_remove;
+
+ return 0;
+
+err_domain_remove:
+ clk_bulk_disable_unprepare(arb->nclks, arb->clks);
+err_put_ctrl:
+ spmi_controller_put(ctrl);
+ return err;
+}
+
+static int mtk_spmi_remove(struct platform_device *pdev)
+{
+ struct spmi_controller *ctrl = platform_get_drvdata(pdev);
+ struct pmif *arb = spmi_controller_get_drvdata(ctrl);
+
+ clk_bulk_disable_unprepare(arb->nclks, arb->clks);
+ spmi_controller_remove(ctrl);
+ spmi_controller_put(ctrl);
+ return 0;
+}
+
+static const struct of_device_id mtk_spmi_match_table[] = {
+ {
+ .compatible = "mediatek,mt6873-spmi",
+ .data = &mt6873_pmif_arb,
+ }, {
+ .compatible = "mediatek,mt8195-spmi",
+ .data = &mt8195_pmif_arb,
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, mtk_spmi_match_table);
+
+static struct platform_driver mtk_spmi_driver = {
+ .driver = {
+ .name = "spmi-mtk",
+ .of_match_table = of_match_ptr(mtk_spmi_match_table),
+ },
+ .probe = mtk_spmi_probe,
+ .remove = mtk_spmi_remove,
+};
+module_platform_driver(mtk_spmi_driver);
+
+MODULE_AUTHOR("Hsin-Hsiung Wang <hsin-hsiung.wang@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek SPMI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index bbbd311eda03..2113be40b5a9 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -261,20 +261,21 @@ static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
if (status & PMIC_ARB_STATUS_DONE) {
if (status & PMIC_ARB_STATUS_DENIED) {
- dev_err(&ctrl->dev, "%s: transaction denied (0x%x)\n",
- __func__, status);
+ dev_err(&ctrl->dev, "%s: %#x %#x: transaction denied (%#x)\n",
+ __func__, sid, addr, status);
return -EPERM;
}
if (status & PMIC_ARB_STATUS_FAILURE) {
- dev_err(&ctrl->dev, "%s: transaction failed (0x%x)\n",
- __func__, status);
+ dev_err(&ctrl->dev, "%s: %#x %#x: transaction failed (%#x)\n",
+ __func__, sid, addr, status);
+ WARN_ON(1);
return -EIO;
}
if (status & PMIC_ARB_STATUS_DROPPED) {
- dev_err(&ctrl->dev, "%s: transaction dropped (0x%x)\n",
- __func__, status);
+ dev_err(&ctrl->dev, "%s: %#x %#x: transaction dropped (%#x)\n",
+ __func__, sid, addr, status);
return -EIO;
}
@@ -283,8 +284,8 @@ static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
udelay(1);
}
- dev_err(&ctrl->dev, "%s: timeout, status 0x%x\n",
- __func__, status);
+ dev_err(&ctrl->dev, "%s: %#x %#x: timeout, status %#x\n",
+ __func__, sid, addr, status);
return -ETIMEDOUT;
}
@@ -333,24 +334,20 @@ static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
return pmic_arb->ver_ops->non_data_cmd(ctrl, opc, sid);
}
-static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
- u16 addr, u8 *buf, size_t len)
+static int pmic_arb_fmt_read_cmd(struct spmi_pmic_arb *pmic_arb, u8 opc, u8 sid,
+ u16 addr, size_t len, u32 *cmd, u32 *offset)
{
- struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
- unsigned long flags;
u8 bc = len - 1;
- u32 cmd;
int rc;
- u32 offset;
rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr,
PMIC_ARB_CHANNEL_OBS);
if (rc < 0)
return rc;
- offset = rc;
+ *offset = rc;
if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
- dev_err(&ctrl->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested",
+ dev_err(&pmic_arb->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested",
PMIC_ARB_MAX_TRANS_BYTES, len);
return -EINVAL;
}
@@ -365,14 +362,24 @@ static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
else
return -EINVAL;
- cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
+ *cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
+
+ return 0;
+}
+
+static int pmic_arb_read_cmd_unlocked(struct spmi_controller *ctrl, u32 cmd,
+ u32 offset, u8 sid, u16 addr, u8 *buf,
+ size_t len)
+{
+ struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
+ u8 bc = len - 1;
+ int rc;
- raw_spin_lock_irqsave(&pmic_arb->lock, flags);
pmic_arb_set_rd_cmd(pmic_arb, offset + PMIC_ARB_CMD, cmd);
rc = pmic_arb_wait_for_done(ctrl, pmic_arb->rd_base, sid, addr,
PMIC_ARB_CHANNEL_OBS);
if (rc)
- goto done;
+ return rc;
pmic_arb_read_data(pmic_arb, buf, offset + PMIC_ARB_RDATA0,
min_t(u8, bc, 3));
@@ -380,30 +387,44 @@ static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
if (bc > 3)
pmic_arb_read_data(pmic_arb, buf + 4, offset + PMIC_ARB_RDATA1,
bc - 4);
+ return 0;
+}
-done:
+static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ u16 addr, u8 *buf, size_t len)
+{
+ struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
+ unsigned long flags;
+ u32 cmd, offset;
+ int rc;
+
+ rc = pmic_arb_fmt_read_cmd(pmic_arb, opc, sid, addr, len, &cmd,
+ &offset);
+ if (rc)
+ return rc;
+
+ raw_spin_lock_irqsave(&pmic_arb->lock, flags);
+ rc = pmic_arb_read_cmd_unlocked(ctrl, cmd, offset, sid, addr, buf, len);
raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
+
return rc;
}
-static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
- u16 addr, const u8 *buf, size_t len)
+static int pmic_arb_fmt_write_cmd(struct spmi_pmic_arb *pmic_arb, u8 opc,
+ u8 sid, u16 addr, size_t len, u32 *cmd,
+ u32 *offset)
{
- struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
- unsigned long flags;
u8 bc = len - 1;
- u32 cmd;
int rc;
- u32 offset;
rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr,
PMIC_ARB_CHANNEL_RW);
if (rc < 0)
return rc;
- offset = rc;
+ *offset = rc;
if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
- dev_err(&ctrl->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested",
+ dev_err(&pmic_arb->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested",
PMIC_ARB_MAX_TRANS_BYTES, len);
return -EINVAL;
}
@@ -420,10 +441,19 @@ static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
else
return -EINVAL;
- cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
+ *cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
+
+ return 0;
+}
+
+static int pmic_arb_write_cmd_unlocked(struct spmi_controller *ctrl, u32 cmd,
+ u32 offset, u8 sid, u16 addr,
+ const u8 *buf, size_t len)
+{
+ struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
+ u8 bc = len - 1;
/* Write data to FIFOs */
- raw_spin_lock_irqsave(&pmic_arb->lock, flags);
pmic_arb_write_data(pmic_arb, buf, offset + PMIC_ARB_WDATA0,
min_t(u8, bc, 3));
if (bc > 3)
@@ -432,8 +462,62 @@ static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
/* Start the transaction */
pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd);
- rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, addr,
- PMIC_ARB_CHANNEL_RW);
+ return pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, addr,
+ PMIC_ARB_CHANNEL_RW);
+}
+
+static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ u16 addr, const u8 *buf, size_t len)
+{
+ struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
+ unsigned long flags;
+ u32 cmd, offset;
+ int rc;
+
+ rc = pmic_arb_fmt_write_cmd(pmic_arb, opc, sid, addr, len, &cmd,
+ &offset);
+ if (rc)
+ return rc;
+
+ raw_spin_lock_irqsave(&pmic_arb->lock, flags);
+ rc = pmic_arb_write_cmd_unlocked(ctrl, cmd, offset, sid, addr, buf,
+ len);
+ raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
+
+ return rc;
+}
+
+static int pmic_arb_masked_write(struct spmi_controller *ctrl, u8 sid, u16 addr,
+ const u8 *buf, const u8 *mask, size_t len)
+{
+ struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
+ u32 read_cmd, read_offset, write_cmd, write_offset;
+ u8 temp[PMIC_ARB_MAX_TRANS_BYTES];
+ unsigned long flags;
+ int rc, i;
+
+ rc = pmic_arb_fmt_read_cmd(pmic_arb, SPMI_CMD_EXT_READL, sid, addr, len,
+ &read_cmd, &read_offset);
+ if (rc)
+ return rc;
+
+ rc = pmic_arb_fmt_write_cmd(pmic_arb, SPMI_CMD_EXT_WRITEL, sid, addr,
+ len, &write_cmd, &write_offset);
+ if (rc)
+ return rc;
+
+ raw_spin_lock_irqsave(&pmic_arb->lock, flags);
+ rc = pmic_arb_read_cmd_unlocked(ctrl, read_cmd, read_offset, sid, addr,
+ temp, len);
+ if (rc)
+ goto done;
+
+ for (i = 0; i < len; i++)
+ temp[i] = (temp[i] & ~mask[i]) | (buf[i] & mask[i]);
+
+ rc = pmic_arb_write_cmd_unlocked(ctrl, write_cmd, write_offset, sid,
+ addr, temp, len);
+done:
raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
return rc;
@@ -482,6 +566,23 @@ static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len)
d->irq);
}
+static int qpnpint_spmi_masked_write(struct irq_data *d, u8 reg,
+ const void *buf, const void *mask,
+ size_t len)
+{
+ struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
+ u8 sid = hwirq_to_sid(d->hwirq);
+ u8 per = hwirq_to_per(d->hwirq);
+ int rc;
+
+ rc = pmic_arb_masked_write(pmic_arb->spmic, sid, (per << 8) + reg, buf,
+ mask, len);
+ if (rc)
+ dev_err_ratelimited(&pmic_arb->spmic->dev, "failed irqchip transaction on %x rc=%d\n",
+ d->irq, rc);
+ return rc;
+}
+
static void cleanup_irq(struct spmi_pmic_arb *pmic_arb, u16 apid, int id)
{
u16 ppid = pmic_arb->apid_data[apid].ppid;
@@ -600,18 +701,18 @@ static void qpnpint_irq_unmask(struct irq_data *d)
static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
- struct spmi_pmic_arb_qpnpint_type type;
+ struct spmi_pmic_arb_qpnpint_type type = {0};
+ struct spmi_pmic_arb_qpnpint_type mask;
irq_flow_handler_t flow_handler;
- u8 irq = hwirq_to_irq(d->hwirq);
-
- qpnpint_spmi_read(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
+ u8 irq_bit = BIT(hwirq_to_irq(d->hwirq));
+ int rc;
if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
- type.type |= BIT(irq);
+ type.type = irq_bit;
if (flow_type & IRQF_TRIGGER_RISING)
- type.polarity_high |= BIT(irq);
+ type.polarity_high = irq_bit;
if (flow_type & IRQF_TRIGGER_FALLING)
- type.polarity_low |= BIT(irq);
+ type.polarity_low = irq_bit;
flow_handler = handle_edge_irq;
} else {
@@ -619,19 +720,23 @@ static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
(flow_type & (IRQF_TRIGGER_LOW)))
return -EINVAL;
- type.type &= ~BIT(irq); /* level trig */
if (flow_type & IRQF_TRIGGER_HIGH)
- type.polarity_high |= BIT(irq);
+ type.polarity_high = irq_bit;
else
- type.polarity_low |= BIT(irq);
+ type.polarity_low = irq_bit;
flow_handler = handle_level_irq;
}
- qpnpint_spmi_write(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
+ mask.type = irq_bit;
+ mask.polarity_high = irq_bit;
+ mask.polarity_low = irq_bit;
+
+ rc = qpnpint_spmi_masked_write(d, QPNPINT_REG_SET_TYPE, &type, &mask,
+ sizeof(type));
irq_set_handler_locked(d, flow_handler);
- return 0;
+ return rc;
}
static int qpnpint_irq_set_wake(struct irq_data *d, unsigned int on)
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 7fec86946131..8d41fdd40657 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -84,10 +84,6 @@ source "drivers/staging/vc04_services/Kconfig"
source "drivers/staging/pi433/Kconfig"
-source "drivers/staging/mt7621-dma/Kconfig"
-
-source "drivers/staging/ralink-gdma/Kconfig"
-
source "drivers/staging/mt7621-dts/Kconfig"
source "drivers/staging/axis-fifo/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index e66e19c45425..02b01949b94e 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -32,8 +32,6 @@ obj-$(CONFIG_KS7010) += ks7010/
obj-$(CONFIG_GREYBUS) += greybus/
obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/
obj-$(CONFIG_PI433) += pi433/
-obj-$(CONFIG_SOC_MT7621) += mt7621-dma/
-obj-$(CONFIG_DMA_RALINK) += ralink-gdma/
obj-$(CONFIG_SOC_MT7621) += mt7621-dts/
obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/
diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
index 632f140dddbc..dfd2b357f484 100644
--- a/drivers/staging/axis-fifo/axis-fifo.c
+++ b/drivers/staging/axis-fifo/axis-fifo.c
@@ -809,7 +809,6 @@ end:
static int axis_fifo_probe(struct platform_device *pdev)
{
- struct resource *r_irq; /* interrupt resources */
struct resource *r_mem; /* IO mem resources */
struct device *dev = &pdev->dev; /* OS device (from device tree) */
struct axis_fifo *fifo = NULL;
@@ -882,16 +881,12 @@ static int axis_fifo_probe(struct platform_device *pdev)
*/
/* get IRQ resource */
- r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!r_irq) {
- dev_err(fifo->dt_device, "no IRQ found for 0x%pa\n",
- &r_mem->start);
- rc = -EIO;
+ rc = platform_get_irq(pdev, 0);
+ if (rc < 0)
goto err_initial;
- }
/* request IRQ */
- fifo->irq = r_irq->start;
+ fifo->irq = rc;
rc = devm_request_irq(fifo->dt_device, fifo->irq, &axis_fifo_irq, 0,
DRIVER_NAME, fifo);
if (rc) {
diff --git a/drivers/staging/fbtft/Kconfig b/drivers/staging/fbtft/Kconfig
index dad1ddcd7b0c..4d29e8c1014e 100644
--- a/drivers/staging/fbtft/Kconfig
+++ b/drivers/staging/fbtft/Kconfig
@@ -200,9 +200,3 @@ config FB_TFT_UPD161704
depends on FB_TFT
help
Generic Framebuffer support for uPD161704
-
-config FB_TFT_WATTEROTT
- tristate "FB driver for the WATTEROTT LCD Controller"
- depends on FB_TFT
- help
- Generic Framebuffer support for WATTEROTT
diff --git a/drivers/staging/fbtft/Makefile b/drivers/staging/fbtft/Makefile
index e87193f7df14..e9cdf0f0a7da 100644
--- a/drivers/staging/fbtft/Makefile
+++ b/drivers/staging/fbtft/Makefile
@@ -36,4 +36,3 @@ obj-$(CONFIG_FB_TFT_TLS8204) += fb_tls8204.o
obj-$(CONFIG_FB_TFT_UC1611) += fb_uc1611.o
obj-$(CONFIG_FB_TFT_UC1701) += fb_uc1701.o
obj-$(CONFIG_FB_TFT_UPD161704) += fb_upd161704.o
-obj-$(CONFIG_FB_TFT_WATTEROTT) += fb_watterott.o
diff --git a/drivers/staging/fbtft/fb_sh1106.c b/drivers/staging/fbtft/fb_sh1106.c
index 7b9ab39e1c1a..9685ca516a0e 100644
--- a/drivers/staging/fbtft/fb_sh1106.c
+++ b/drivers/staging/fbtft/fb_sh1106.c
@@ -173,12 +173,7 @@ static struct fbtft_display display = {
},
};
-FBTFT_REGISTER_DRIVER(DRVNAME, "sinowealth,sh1106", &display);
-
-MODULE_ALIAS("spi:" DRVNAME);
-MODULE_ALIAS("platform:" DRVNAME);
-MODULE_ALIAS("spi:sh1106");
-MODULE_ALIAS("platform:sh1106");
+FBTFT_REGISTER_SPI_DRIVER(DRVNAME, "sinowealth", "sh1106", &display);
MODULE_DESCRIPTION("SH1106 OLED Driver");
MODULE_AUTHOR("Heiner Kallweit");
diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c
deleted file mode 100644
index a57e1f4feef3..000000000000
--- a/drivers/staging/fbtft/fb_watterott.c
+++ /dev/null
@@ -1,302 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * FB driver for the Watterott LCD Controller
- *
- * Copyright (C) 2013 Noralf Tronnes
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-
-#include "fbtft.h"
-
-#define DRVNAME "fb_watterott"
-#define WIDTH 320
-#define HEIGHT 240
-#define FPS 5
-#define TXBUFLEN 1024
-#define DEFAULT_BRIGHTNESS 50
-
-#define CMD_VERSION 0x01
-#define CMD_LCD_LED 0x10
-#define CMD_LCD_RESET 0x11
-#define CMD_LCD_ORIENTATION 0x20
-#define CMD_LCD_DRAWIMAGE 0x27
-#define COLOR_RGB323 8
-#define COLOR_RGB332 9
-#define COLOR_RGB233 10
-#define COLOR_RGB565 16
-
-static short mode = 565;
-module_param(mode, short, 0000);
-MODULE_PARM_DESC(mode, "RGB color transfer mode: 332, 565 (default)");
-
-static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
-{
- va_list args;
- int i, ret;
- u8 *buf = par->buf;
-
- va_start(args, len);
- for (i = 0; i < len; i++)
- *buf++ = (u8)va_arg(args, unsigned int);
- va_end(args);
-
- fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par,
- par->info->device, u8, par->buf,
- len, "%s: ", __func__);
-
- ret = par->fbtftops.write(par, par->buf, len);
- if (ret < 0) {
- dev_err(par->info->device,
- "write() failed and returned %d\n", ret);
- return;
- }
-}
-
-static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
-{
- unsigned int start_line, end_line;
- u16 *vmem16 = (u16 *)(par->info->screen_buffer + offset);
- __be16 *pos = par->txbuf.buf + 1;
- __be16 *buf16 = par->txbuf.buf + 10;
- int i, j;
- int ret = 0;
-
- start_line = offset / par->info->fix.line_length;
- end_line = start_line + (len / par->info->fix.line_length) - 1;
-
- /* Set command header. pos: x, y, w, h */
- ((u8 *)par->txbuf.buf)[0] = CMD_LCD_DRAWIMAGE;
- pos[0] = 0;
- pos[2] = cpu_to_be16(par->info->var.xres);
- pos[3] = cpu_to_be16(1);
- ((u8 *)par->txbuf.buf)[9] = COLOR_RGB565;
-
- for (i = start_line; i <= end_line; i++) {
- pos[1] = cpu_to_be16(i);
- for (j = 0; j < par->info->var.xres; j++)
- buf16[j] = cpu_to_be16(*vmem16++);
- ret = par->fbtftops.write(par,
- par->txbuf.buf, 10 + par->info->fix.line_length);
- if (ret < 0)
- return ret;
- udelay(300);
- }
-
- return 0;
-}
-
-static inline int rgb565_to_rgb332(u16 c)
-{
- return ((c & 0xE000) >> 8) | ((c & 000700) >> 6) | ((c & 0x0018) >> 3);
-}
-
-static int write_vmem_8bit(struct fbtft_par *par, size_t offset, size_t len)
-{
- unsigned int start_line, end_line;
- u16 *vmem16 = (u16 *)(par->info->screen_buffer + offset);
- __be16 *pos = par->txbuf.buf + 1;
- u8 *buf8 = par->txbuf.buf + 10;
- int i, j;
- int ret = 0;
-
- start_line = offset / par->info->fix.line_length;
- end_line = start_line + (len / par->info->fix.line_length) - 1;
-
- /* Set command header. pos: x, y, w, h */
- ((u8 *)par->txbuf.buf)[0] = CMD_LCD_DRAWIMAGE;
- pos[0] = 0;
- pos[2] = cpu_to_be16(par->info->var.xres);
- pos[3] = cpu_to_be16(1);
- ((u8 *)par->txbuf.buf)[9] = COLOR_RGB332;
-
- for (i = start_line; i <= end_line; i++) {
- pos[1] = cpu_to_be16(i);
- for (j = 0; j < par->info->var.xres; j++) {
- buf8[j] = rgb565_to_rgb332(*vmem16);
- vmem16++;
- }
- ret = par->fbtftops.write(par,
- par->txbuf.buf, 10 + par->info->var.xres);
- if (ret < 0)
- return ret;
- udelay(700);
- }
-
- return 0;
-}
-
-static unsigned int firmware_version(struct fbtft_par *par)
-{
- u8 rxbuf[4] = {0, };
-
- write_reg(par, CMD_VERSION);
- par->fbtftops.read(par, rxbuf, 4);
- if (rxbuf[1] != '.')
- return 0;
-
- return (rxbuf[0] - '0') << 8 | (rxbuf[2] - '0') << 4 | (rxbuf[3] - '0');
-}
-
-static int init_display(struct fbtft_par *par)
-{
- int ret;
- unsigned int version;
- u8 save_mode;
-
- /* enable SPI interface by having CS and MOSI low during reset */
- save_mode = par->spi->mode;
- /*
- * Set CS active inverse polarity: just setting SPI_CS_HIGH does not
- * work with GPIO based chip selects that are logically active high
- * but inverted inside the GPIO library, so enforce inverted
- * semantics.
- */
- par->spi->mode ^= SPI_CS_HIGH;
- ret = spi_setup(par->spi);
- if (ret) {
- dev_err(par->info->device,
- "Could not set inverse CS polarity\n");
- return ret;
- }
- write_reg(par, 0x00); /* make sure mode is set */
-
- mdelay(50);
- par->fbtftops.reset(par);
- mdelay(1000);
- par->spi->mode = save_mode;
- ret = spi_setup(par->spi);
- if (ret) {
- dev_err(par->info->device, "Could not restore SPI mode\n");
- return ret;
- }
- write_reg(par, 0x00);
-
- version = firmware_version(par);
- fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "Firmware version: %x.%02x\n",
- version >> 8, version & 0xFF);
-
- if (mode == 332)
- par->fbtftops.write_vmem = write_vmem_8bit;
- return 0;
-}
-
-static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
-{
- /* not used on this controller */
-}
-
-static int set_var(struct fbtft_par *par)
-{
- u8 rotate;
-
- /* this controller rotates clock wise */
- switch (par->info->var.rotate) {
- case 90:
- rotate = 27;
- break;
- case 180:
- rotate = 18;
- break;
- case 270:
- rotate = 9;
- break;
- default:
- rotate = 0;
- }
- write_reg(par, CMD_LCD_ORIENTATION, rotate);
-
- return 0;
-}
-
-static int verify_gpios(struct fbtft_par *par)
-{
- if (!par->gpio.reset) {
- dev_err(par->info->device, "Missing 'reset' gpio. Aborting.\n");
- return -EINVAL;
- }
- return 0;
-}
-
-#ifdef CONFIG_FB_BACKLIGHT
-static int backlight_chip_update_status(struct backlight_device *bd)
-{
- struct fbtft_par *par = bl_get_data(bd);
- int brightness = bd->props.brightness;
-
- fbtft_par_dbg(DEBUG_BACKLIGHT, par,
- "%s: brightness=%d, power=%d, fb_blank=%d\n", __func__,
- bd->props.brightness, bd->props.power,
- bd->props.fb_blank);
-
- if (bd->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bd->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
- write_reg(par, CMD_LCD_LED, brightness);
-
- return 0;
-}
-
-static const struct backlight_ops bl_ops = {
- .update_status = backlight_chip_update_status,
-};
-
-static void register_chip_backlight(struct fbtft_par *par)
-{
- struct backlight_device *bd;
- struct backlight_properties bl_props = { 0, };
-
- bl_props.type = BACKLIGHT_RAW;
- bl_props.power = FB_BLANK_POWERDOWN;
- bl_props.max_brightness = 100;
- bl_props.brightness = DEFAULT_BRIGHTNESS;
-
- bd = backlight_device_register(dev_driver_string(par->info->device),
- par->info->device, par, &bl_ops,
- &bl_props);
- if (IS_ERR(bd)) {
- dev_err(par->info->device,
- "cannot register backlight device (%ld)\n",
- PTR_ERR(bd));
- return;
- }
- par->info->bl_dev = bd;
-
- if (!par->fbtftops.unregister_backlight)
- par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
-}
-#else
-#define register_chip_backlight NULL
-#endif
-
-static struct fbtft_display display = {
- .regwidth = 8,
- .buswidth = 8,
- .width = WIDTH,
- .height = HEIGHT,
- .fps = FPS,
- .txbuflen = TXBUFLEN,
- .fbtftops = {
- .write_register = write_reg8_bus8,
- .write_vmem = write_vmem,
- .init_display = init_display,
- .set_addr_win = set_addr_win,
- .set_var = set_var,
- .verify_gpios = verify_gpios,
- .register_backlight = register_chip_backlight,
- },
-};
-
-FBTFT_REGISTER_DRIVER(DRVNAME, "watterott,openlcd", &display);
-
-MODULE_ALIAS("spi:" DRVNAME);
-
-MODULE_DESCRIPTION("FB driver for the Watterott LCD Controller");
-MODULE_AUTHOR("Noralf Tronnes");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 6869f3603b0e..4cdec34e23d2 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -346,6 +346,47 @@ static void __exit fbtft_driver_module_exit(void) \
module_init(fbtft_driver_module_init); \
module_exit(fbtft_driver_module_exit);
+#define FBTFT_REGISTER_SPI_DRIVER(_name, _comp_vend, _comp_dev, _display) \
+ \
+static int fbtft_driver_probe_spi(struct spi_device *spi) \
+{ \
+ return fbtft_probe_common(_display, spi, NULL); \
+} \
+ \
+static int fbtft_driver_remove_spi(struct spi_device *spi) \
+{ \
+ struct fb_info *info = spi_get_drvdata(spi); \
+ \
+ fbtft_remove_common(&spi->dev, info); \
+ return 0; \
+} \
+ \
+static const struct of_device_id dt_ids[] = { \
+ { .compatible = _comp_vend "," _comp_dev }, \
+ {}, \
+}; \
+ \
+MODULE_DEVICE_TABLE(of, dt_ids); \
+ \
+static const struct spi_device_id spi_ids[] = { \
+ { .name = _comp_dev }, \
+ {}, \
+}; \
+ \
+MODULE_DEVICE_TABLE(spi, spi_ids); \
+ \
+static struct spi_driver fbtft_driver_spi_driver = { \
+ .driver = { \
+ .name = _name, \
+ .of_match_table = dt_ids, \
+ }, \
+ .id_table = spi_ids, \
+ .probe = fbtft_driver_probe_spi, \
+ .remove = fbtft_driver_remove_spi, \
+}; \
+ \
+module_spi_driver(fbtft_driver_spi_driver);
+
/* Debug macros */
/* shorthand debug levels */
diff --git a/drivers/staging/greybus/audio_manager_module.c b/drivers/staging/greybus/audio_manager_module.c
index 525cf8f8394f..0a0f0a394c84 100644
--- a/drivers/staging/greybus/audio_manager_module.c
+++ b/drivers/staging/greybus/audio_manager_module.c
@@ -142,11 +142,12 @@ static struct attribute *gb_audio_module_default_attrs[] = {
&gb_audio_module_op_devices_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
+ATTRIBUTE_GROUPS(gb_audio_module_default);
static struct kobj_type gb_audio_module_type = {
.sysfs_ops = &gb_audio_module_sysfs_ops,
.release = gb_audio_module_release,
- .default_attrs = gb_audio_module_default_attrs,
+ .default_groups = gb_audio_module_default_groups,
};
static void send_add_uevent(struct gb_audio_manager_module *module)
diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
index 1e613d42d823..62d7674852be 100644
--- a/drivers/staging/greybus/audio_topology.c
+++ b/drivers/staging/greybus/audio_topology.c
@@ -147,6 +147,9 @@ static const char **gb_generate_enum_strings(struct gbaudio_module_info *gb,
items = le32_to_cpu(gbenum->items);
strings = devm_kcalloc(gb->dev, items, sizeof(char *), GFP_KERNEL);
+ if (!strings)
+ return NULL;
+
data = gbenum->names;
for (i = 0; i < items; i++) {
@@ -655,6 +658,8 @@ static int gbaudio_tplg_create_enum_kctl(struct gbaudio_module_info *gb,
/* since count=1, and reg is dummy */
gbe->items = le32_to_cpu(gb_enum->items);
gbe->texts = gb_generate_enum_strings(gb, gb_enum);
+ if (!gbe->texts)
+ return -ENOMEM;
/* debug enum info */
dev_dbg(gb->dev, "Max:%d, name_length:%d\n", gbe->items,
@@ -862,6 +867,8 @@ static int gbaudio_tplg_create_enum_ctl(struct gbaudio_module_info *gb,
/* since count=1, and reg is dummy */
gbe->items = le32_to_cpu(gb_enum->items);
gbe->texts = gb_generate_enum_strings(gb, gb_enum);
+ if (!gbe->texts)
+ return -ENOMEM;
/* debug enum info */
dev_dbg(gb->dev, "Max:%d, name_length:%d\n", gbe->items,
@@ -974,6 +981,44 @@ static int gbaudio_widget_event(struct snd_soc_dapm_widget *w,
return ret;
}
+static const struct snd_soc_dapm_widget gbaudio_widgets[] = {
+ [snd_soc_dapm_spk] = SND_SOC_DAPM_SPK(NULL, gbcodec_event_spk),
+ [snd_soc_dapm_hp] = SND_SOC_DAPM_HP(NULL, gbcodec_event_hp),
+ [snd_soc_dapm_mic] = SND_SOC_DAPM_MIC(NULL, gbcodec_event_int_mic),
+ [snd_soc_dapm_output] = SND_SOC_DAPM_OUTPUT(NULL),
+ [snd_soc_dapm_input] = SND_SOC_DAPM_INPUT(NULL),
+ [snd_soc_dapm_switch] = SND_SOC_DAPM_SWITCH_E(NULL, SND_SOC_NOPM,
+ 0, 0, NULL,
+ gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ [snd_soc_dapm_pga] = SND_SOC_DAPM_PGA_E(NULL, SND_SOC_NOPM,
+ 0, 0, NULL, 0,
+ gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ [snd_soc_dapm_mixer] = SND_SOC_DAPM_MIXER_E(NULL, SND_SOC_NOPM,
+ 0, 0, NULL, 0,
+ gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ [snd_soc_dapm_mux] = SND_SOC_DAPM_MUX_E(NULL, SND_SOC_NOPM,
+ 0, 0, NULL,
+ gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ [snd_soc_dapm_aif_in] = SND_SOC_DAPM_AIF_IN_E(NULL, NULL, 0,
+ SND_SOC_NOPM, 0, 0,
+ gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ [snd_soc_dapm_aif_out] = SND_SOC_DAPM_AIF_OUT_E(NULL, NULL, 0,
+ SND_SOC_NOPM, 0, 0,
+ gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+};
+
static int gbaudio_tplg_create_widget(struct gbaudio_module_info *module,
struct snd_soc_dapm_widget *dw,
struct gb_audio_widget *w, int *w_size)
@@ -1034,6 +1079,10 @@ static int gbaudio_tplg_create_widget(struct gbaudio_module_info *module,
csize += le16_to_cpu(gbenum->names_length);
control->texts = (const char * const *)
gb_generate_enum_strings(module, gbenum);
+ if (!control->texts) {
+ ret = -ENOMEM;
+ goto error;
+ }
control->items = le32_to_cpu(gbenum->items);
} else {
csize = sizeof(struct gb_audio_control);
@@ -1052,77 +1101,37 @@ static int gbaudio_tplg_create_widget(struct gbaudio_module_info *module,
switch (w->type) {
case snd_soc_dapm_spk:
- *dw = (struct snd_soc_dapm_widget)
- SND_SOC_DAPM_SPK(w->name, gbcodec_event_spk);
+ *dw = gbaudio_widgets[w->type];
module->op_devices |= GBAUDIO_DEVICE_OUT_SPEAKER;
break;
case snd_soc_dapm_hp:
- *dw = (struct snd_soc_dapm_widget)
- SND_SOC_DAPM_HP(w->name, gbcodec_event_hp);
+ *dw = gbaudio_widgets[w->type];
module->op_devices |= (GBAUDIO_DEVICE_OUT_WIRED_HEADSET
| GBAUDIO_DEVICE_OUT_WIRED_HEADPHONE);
module->ip_devices |= GBAUDIO_DEVICE_IN_WIRED_HEADSET;
break;
case snd_soc_dapm_mic:
- *dw = (struct snd_soc_dapm_widget)
- SND_SOC_DAPM_MIC(w->name, gbcodec_event_int_mic);
+ *dw = gbaudio_widgets[w->type];
module->ip_devices |= GBAUDIO_DEVICE_IN_BUILTIN_MIC;
break;
case snd_soc_dapm_output:
- *dw = (struct snd_soc_dapm_widget)SND_SOC_DAPM_OUTPUT(w->name);
- break;
case snd_soc_dapm_input:
- *dw = (struct snd_soc_dapm_widget)SND_SOC_DAPM_INPUT(w->name);
- break;
case snd_soc_dapm_switch:
- *dw = (struct snd_soc_dapm_widget)
- SND_SOC_DAPM_SWITCH_E(w->name, SND_SOC_NOPM, 0, 0,
- widget_kctls,
- gbaudio_widget_event,
- SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD);
- break;
case snd_soc_dapm_pga:
- *dw = (struct snd_soc_dapm_widget)
- SND_SOC_DAPM_PGA_E(w->name, SND_SOC_NOPM, 0, 0, NULL, 0,
- gbaudio_widget_event,
- SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD);
- break;
case snd_soc_dapm_mixer:
- *dw = (struct snd_soc_dapm_widget)
- SND_SOC_DAPM_MIXER_E(w->name, SND_SOC_NOPM, 0, 0, NULL,
- 0, gbaudio_widget_event,
- SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD);
- break;
case snd_soc_dapm_mux:
- *dw = (struct snd_soc_dapm_widget)
- SND_SOC_DAPM_MUX_E(w->name, SND_SOC_NOPM, 0, 0,
- widget_kctls, gbaudio_widget_event,
- SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD);
+ *dw = gbaudio_widgets[w->type];
break;
case snd_soc_dapm_aif_in:
- *dw = (struct snd_soc_dapm_widget)
- SND_SOC_DAPM_AIF_IN_E(w->name, w->sname, 0,
- SND_SOC_NOPM,
- 0, 0, gbaudio_widget_event,
- SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD);
- break;
case snd_soc_dapm_aif_out:
- *dw = (struct snd_soc_dapm_widget)
- SND_SOC_DAPM_AIF_OUT_E(w->name, w->sname, 0,
- SND_SOC_NOPM,
- 0, 0, gbaudio_widget_event,
- SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD);
+ *dw = gbaudio_widgets[w->type];
+ dw->sname = w->sname;
break;
default:
ret = -EINVAL;
goto error;
}
+ dw->name = w->name;
dev_dbg(module->dev, "%s: widget of type %d created\n", dw->name,
dw->id);
@@ -1183,6 +1192,10 @@ static int gbaudio_tplg_process_kcontrols(struct gbaudio_module_info *module,
csize += le16_to_cpu(gbenum->names_length);
control->texts = (const char * const *)
gb_generate_enum_strings(module, gbenum);
+ if (!control->texts) {
+ ret = -ENOMEM;
+ goto error;
+ }
control->items = le32_to_cpu(gbenum->items);
} else {
csize = sizeof(struct gb_audio_control);
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index e3aaae920847..b81cfa74edb7 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -26,6 +26,8 @@ source "drivers/staging/media/hantro/Kconfig"
source "drivers/staging/media/imx/Kconfig"
+source "drivers/staging/media/max96712/Kconfig"
+
source "drivers/staging/media/meson/vdec/Kconfig"
source "drivers/staging/media/omap4iss/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 5b5afc5b03a0..7e2c86e3695d 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INTEL_ATOMISP) += atomisp/
obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx/
+obj-$(CONFIG_VIDEO_MAX96712) += max96712/
obj-$(CONFIG_VIDEO_MESON_VDEC) += meson/vdec/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
obj-$(CONFIG_VIDEO_ROCKCHIP_VDEC) += rkvdec/
diff --git a/drivers/staging/media/atomisp/Makefile b/drivers/staging/media/atomisp/Makefile
index 606b7754fdfd..2485d7b3fee2 100644
--- a/drivers/staging/media/atomisp/Makefile
+++ b/drivers/staging/media/atomisp/Makefile
@@ -27,20 +27,16 @@ atomisp-objs += \
pci/sh_css_firmware.o \
pci/sh_css_host_data.o \
pci/sh_css_hrt.o \
- pci/sh_css_metadata.o \
pci/sh_css_metrics.o \
pci/sh_css_mipi.o \
pci/sh_css_mmu.o \
- pci/sh_css_morph.o \
pci/sh_css.o \
pci/sh_css_param_dvs.o \
pci/sh_css_param_shading.o \
pci/sh_css_params.o \
pci/sh_css_properties.o \
- pci/sh_css_shading.o \
pci/sh_css_sp.o \
pci/sh_css_stream_format.o \
- pci/sh_css_stream.o \
pci/sh_css_version.o \
pci/base/circbuf/src/circbuf.o \
pci/base/refcount/src/refcount.o \
@@ -53,6 +49,9 @@ atomisp-objs += \
pci/hmm/hmm.o \
pci/hmm/hmm_reserved_pool.o \
pci/ia_css_device_access.o \
+ pci/ia_css_isp_configs.o \
+ pci/ia_css_isp_states.o \
+ pci/ia_css_isp_params.o \
pci/isp/kernels/aa/aa_2/ia_css_aa2.host.o \
pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.o \
pci/isp/kernels/anr/anr_2/ia_css_anr2.host.o \
@@ -154,21 +153,13 @@ atomisp-objs += \
pci/hive_isp_css_common/host/timed_ctrl.o \
pci/hive_isp_css_common/host/vmem.o \
pci/hive_isp_css_shared/host/tag.o \
- pci/system_local.o \
-
-obj-byt = \
- pci/css_2400_system/hive/ia_css_isp_configs.o \
- pci/css_2400_system/hive/ia_css_isp_params.o \
- pci/css_2400_system/hive/ia_css_isp_states.o \
+ pci/system_local.o
# These will be needed when clean merge CHT support nicely into the driver
# Keep them here handy for when we get to that point
#
obj-cht = \
- pci/css_2401_system/hive/ia_css_isp_configs.o \
- pci/css_2401_system/hive/ia_css_isp_params.o \
- pci/css_2401_system/hive/ia_css_isp_states.o \
pci/css_2401_system/host/csi_rx.o \
pci/css_2401_system/host/ibuf_ctrl.o \
pci/css_2401_system/host/isys_dma.o \
@@ -306,10 +297,8 @@ INCLUDES += \
-I$(atomisp)/pci/runtime/rmgr/interface/ \
-I$(atomisp)/pci/runtime/spctrl/interface/ \
-I$(atomisp)/pci/runtime/tagger/interface/ \
- -I$(atomisp)/pci/css_2400_system/hive/ \
-I$(atomisp)/pci/css_2401_system/ \
-I$(atomisp)/pci/css_2401_system/host/ \
- -I$(atomisp)/pci/css_2401_system/hive/ \
-I$(atomisp)/pci/css_2401_system/hrt/
DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__
@@ -324,10 +313,7 @@ ifeq ($(CONFIG_VIDEO_ATOMISP_ISP2401),y)
atomisp-objs += \
$(obj-cht) \
pci/runtime/isys/src/ibuf_ctrl_rmgr.o
-DEFINES += -DISP2401 -DISP2401_NEW_INPUT_SYSTEM -DSYSTEM_hive_isp_css_2401_system
-else
-atomisp-objs += $(obj-byt)
-DEFINES += -DISP2400 -DSYSTEM_hive_isp_css_2400_system
+DEFINES += -DISP2401
endif
ccflags-y += $(INCLUDES) $(DEFINES) -fno-common
diff --git a/drivers/staging/media/atomisp/TODO b/drivers/staging/media/atomisp/TODO
index 2d1ef9eb262a..43b842043f29 100644
--- a/drivers/staging/media/atomisp/TODO
+++ b/drivers/staging/media/atomisp/TODO
@@ -1,92 +1,161 @@
+For both Cherrytrail (CHT) and Baytrail (BHT) the driver
+requires the "candrpv_0415_20150521_0458" firmware version.
+It should be noticed that the firmware file is different,
+depending on the ISP model, so they're stored with different
+names:
+
+- for BHT: /lib/firmware/shisp_2400b0_v21.bin
+
+ Warning: The driver was not tested yet for BHT.
+
+- for CHT: /lib/firmware/shisp_2401a0_v21.bin
+
+ https://github.com/intel-aero/meta-intel-aero-base/blob/master/recipes-kernel/linux/linux-yocto/shisp_2401a0_v21.bin
+
NOTE:
=====
-While the driver probes the hardware and reports itself as a
-V4L2 driver, there are still some issues preventing it to
-stream (at least it doesn't with the standard V4L2 applications.
-Didn't test yet with some custom-made app for this driver).
-Solving the related bugs and issues preventing it to work is
-needed (items 6 and 7 from the list below).
+This driver currently doesn't work with most V4L2 applications,
+as there are still some issues with regards to implementing
+certain APIs at the standard way.
+
+Also, currently only USERPTR streaming mode is working.
+
+In order to test, it is needed to know what's the sensor's
+resolution. This can be checked with:
+
+$ v4l2-ctl --get-fmt-video
+ Format Video Capture:
+ Width/Height : 1600/1200
+ ...
+
+It is known to work with:
+
+- v4l2grab at contrib/test directory at https://git.linuxtv.org/v4l-utils.git/
+
+ The resolution should not be bigger than the max resolution
+ supported by the sensor, or it will fail. So, if the sensor
+ reports:
+
+ The driver can be tested with:
+
+ v4l2grab -f YUYV -x 1600 -y 1200 -d /dev/video2 -u
+
+- NVT at https://github.com/intel/nvt
+
+ $ ./v4l2n -o testimage_@.raw \
+ --device /dev/video2 \
+ --input 0 \
+ --exposure=30000,30000,30000,30000 \
+ --parm type=1,capturemode=CI_MODE_PREVIEW \
+ --fmt type=1,width=1600,height=1200,pixelformat=YUYV \
+ --reqbufs count=2,memory=USERPTR \
+ --parameters=wb_config.r=32768,wb_config.gr=21043,wb_config.gb=21043,wb_config.b=30863 \
+ --capture=20
+
+ As the output is in raw format, images need to be converted with:
+
+ $ for i in $(seq 0 19); do
+ name="testimage_$(printf "%03i" $i)"
+ ./raw2pnm -x$WIDTH -y$HEIGHT -f$FORMAT $name.raw $name.pnm
+ rm $name.raw
+ done
TODO
====
-1. The atomisp doesn't rely at the usual i2c stuff to discover the
- sensors. Instead, it calls a function from atomisp_gmin_platform.c.
- There are some hacks added there for it to wait for sensors to be
- probed (with a timeout of 2 seconds or so).
- This should be converted to the usual way, using V4L2 async subdev
- framework to wait for cameras to be probed;
+1. Fix support for MMAP streaming mode. This is required for most
+ V4L2 applications;
-2. Use ACPI _DSM table - DONE!
+2. Implement and/or fix V4L2 ioctls in order to allow a normal app to
+ use it;
-3. Switch the driver to use pm_runtime stuff. Right now, it probes the
- existing PMIC code and sensors call it directly.
+3. Ensure that the driver will pass v4l2-compliance tests;
-4. There's a problem at the sensor drivers: when trying to set a video
- format, the atomisp main driver calls the sensor drivers with the
- sensor turned off. This causes them to fail.
+4. Get manufacturer's authorization to redistribute the binaries for
+ the firmware files;
- The only exception is the atomisp-ov2880, which has a hack inside it
- to turn it on when VIDIOC_S_FMT is called.
+5. remove VIDEO_ATOMISP_ISP2401, making the driver to auto-detect the
+ register address differences between ISP2400 and ISP2401;
- The right fix seems to power on the sensor when a video device is
- opened (or at the first VIDIOC_ ioctl - except for VIDIOC_QUERYCAP),
- powering it down at close() syscall.
+6. Cleanup the driver code, removing the abstraction layers inside it;
- Such kind of control would need to be done inside the atomisp driver,
- not at the sensors code.
+7. The atomisp doesn't rely at the usual i2c stuff to discover the
+ sensors. Instead, it calls a function from atomisp_gmin_platform.c.
+ There are some hacks added there for it to wait for sensors to be
+ probed (with a timeout of 2 seconds or so). This should be converted
+ to the usual way, using V4L2 async subdev framework to wait for
+ cameras to be probed;
-5. There are several issues related to memory management, causing
- crashes. The atomisp splits the memory management on three separate
- regions:
+8. Switch to standard V4L2 sub-device API for sensor and lens. In
+ particular, the user space API needs to support V4L2 controls as
+ defined in the V4L2 spec and references to atomisp must be removed from
+ these drivers.
+
+9. Use LED flash API for flash LED drivers such as LM3554 (which already
+ has a LED class driver).
+
+10. Migrate the sensor drivers out of staging or re-using existing
+ drivers;
+
+11. Switch the driver to use pm_runtime stuff. Right now, it probes the
+ existing PMIC code and sensors call it directly.
+
+12. There's a problem on sensor drivers: when trying to set a video
+ format, the atomisp main driver calls the sensor drivers with the
+ sensor turned off. This causes them to fail.
+
+ This was fixed at atomisp-ov2880, which has a hack inside it
+ to turn it on when VIDIOC_S_FMT is called, but this has to be
+ cheked on other drivers as well.
+
+ The right fix seems to power on the sensor when a video device is
+ opened (or at the first VIDIOC_ ioctl - except for VIDIOC_QUERYCAP),
+ powering it down at close() syscall.
+
+ Such kind of control would need to be done inside the atomisp driver,
+ not at the sensors code.
+
+13. There are several issues related to memory management, that can
+ cause crashes and/or memory leaks. The atomisp splits the memory
+ management on three separate regions:
- dynamic pool;
- reserved pool;
- generic pool
- The code implementing it is at:
+ The code implementing it is at:
drivers/staging/media/atomisp/pci/hmm/
- It also has a separate code for managing DMA buffers at:
+ It also has a separate code for managing DMA buffers at:
drivers/staging/media/atomisp/pci/mmu/
- The code there is really dirty, ugly and probably wrong. I fixed
- one bug there already, but the best would be to just trash it and use
- something else. Maybe the code from the newer intel driver could
- serve as a model:
+ The code there is really dirty, ugly and probably wrong. I fixed
+ one bug there already, but the best would be to just trash it and use
+ something else. Maybe the code from the newer intel driver could
+ serve as a model:
drivers/staging/media/ipu3/ipu3-mmu.c
- But converting it to use something like that is painful and may
- cause some breakages.
-
-6. There is some issues at the frame receive logic, causing the
- DQBUF ioctls to fail.
-
-7. A single AtomISP driver needs to be implemented to support both
- Baytrail (BYT) and Cherrytail (CHT) platforms at the same time.
- The current driver is a mechanical and hand combined merge of the
- two using several runtime macros, plus some ifdef ISP2401 to select the
- CHT version. Yet, there are some ISP-specific headers that change the
- driver's behavior during compile time.
+ But converting it to use something like that is painful and may
+ cause some breakages.
-8. The file structure needs to get tidied up to resemble a normal Linux
- driver.
+14. The file structure needs to get tidied up to resemble a normal Linux
+ driver.
-9. Lots of the midlayer glue. unused code and abstraction needs removing.
+15. Lots of the midlayer glue. Unused code and abstraction needs removing.
-10. The AtomISP driver includes some special IOCTLS (ATOMISP_IOC_XXXX_XXXX)
+16. The AtomISP driver includes some special IOCTLS (ATOMISP_IOC_XXXX_XXXX)
and controls that require some cleanup. Some of those code may have
been removed during the cleanups. They could be needed in order to
- properly support 3A algorithms
+ properly support 3A algorithms.
Such IOCTL interface needs more documentation. The better would
be to use something close to the interface used by the IPU3 IMGU driver.
-11. The ISP code has some dependencies of the exact FW version.
+17. The ISP code has some dependencies of the exact FW version.
The version defined in pci/sh_css_firmware.c:
BYT (isp2400): "irci_stable_candrpv_0415_20150521_0458"
@@ -106,24 +175,16 @@ TODO
there are any specific things that can be done to fold in support for
multiple firmware versions.
-12. Switch to standard V4L2 sub-device API for sensor and lens. In
- particular, the user space API needs to support V4L2 controls as
- defined in the V4L2 spec and references to atomisp must be removed from
- these drivers.
-
-13. Use LED flash API for flash LED drivers such as LM3554 (which already
- has a LED class driver).
-14. Switch from videobuf1 to videobuf2. Videobuf1 is being removed!
+18. Switch from videobuf1 to videobuf2. Videobuf1 is being removed!
-15. Correct Coding Style. Please refrain sending coding style patches
+19. Correct Coding Style. Please refrain sending coding style patches
for this driver until the other work is done, as there will be a lot
of code churn until this driver becomes functional again.
-16. Fix private ioctls to not need a compat_ioctl handler for running
- 32-bit tasks. The compat code has been removed because of bugs,
- and should not be needed for modern drivers. Fixing this properly
- unfortunately means an incompatible ABI change.
+20. Remove the logic which sets up pipelines inside it, moving it to
+ libcamera and implement MC support.
+
Limitations
===========
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
index 687888d643df..cbc8b1d91995 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
@@ -266,7 +266,7 @@ static int gc0310_g_bin_factor_x(struct v4l2_subdev *sd, s32 *val)
{
struct gc0310_device *dev = to_gc0310_sensor(sd);
- *val = gc0310_res[dev->fmt_idx].bin_factor_x;
+ *val = dev->res->bin_factor_x;
return 0;
}
@@ -275,7 +275,7 @@ static int gc0310_g_bin_factor_y(struct v4l2_subdev *sd, s32 *val)
{
struct gc0310_device *dev = to_gc0310_sensor(sd);
- *val = gc0310_res[dev->fmt_idx].bin_factor_y;
+ *val = dev->res->bin_factor_y;
return 0;
}
@@ -878,76 +878,6 @@ static int gc0310_s_power(struct v4l2_subdev *sd, int on)
return gc0310_init(sd);
}
-/*
- * distance - calculate the distance
- * @res: resolution
- * @w: width
- * @h: height
- *
- * Get the gap between resolution and w/h.
- * res->width/height smaller than w/h wouldn't be considered.
- * Returns the value of gap or -1 if fail.
- */
-#define LARGEST_ALLOWED_RATIO_MISMATCH 800
-static int distance(struct gc0310_resolution *res, u32 w, u32 h)
-{
- unsigned int w_ratio = (res->width << 13) / w;
- unsigned int h_ratio;
- int match;
-
- if (h == 0)
- return -1;
- h_ratio = (res->height << 13) / h;
- if (h_ratio == 0)
- return -1;
- match = abs(((w_ratio << 13) / h_ratio) - 8192);
-
- if ((w_ratio < 8192) || (h_ratio < 8192) ||
- (match > LARGEST_ALLOWED_RATIO_MISMATCH))
- return -1;
-
- return w_ratio + h_ratio;
-}
-
-/* Return the nearest higher resolution index */
-static int nearest_resolution_index(int w, int h)
-{
- int i;
- int idx = -1;
- int dist;
- int min_dist = INT_MAX;
- struct gc0310_resolution *tmp_res = NULL;
-
- for (i = 0; i < N_RES; i++) {
- tmp_res = &gc0310_res[i];
- dist = distance(tmp_res, w, h);
- if (dist == -1)
- continue;
- if (dist < min_dist) {
- min_dist = dist;
- idx = i;
- }
- }
-
- return idx;
-}
-
-static int get_resolution_index(int w, int h)
-{
- int i;
-
- for (i = 0; i < N_RES; i++) {
- if (w != gc0310_res[i].width)
- continue;
- if (h != gc0310_res[i].height)
- continue;
-
- return i;
- }
-
- return -1;
-}
-
/* TODO: remove it. */
static int startup(struct v4l2_subdev *sd)
{
@@ -955,7 +885,7 @@ static int startup(struct v4l2_subdev *sd)
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
- ret = gc0310_write_reg_array(client, gc0310_res[dev->fmt_idx].regs);
+ ret = gc0310_write_reg_array(client, dev->res->regs);
if (ret) {
dev_err(&client->dev, "gc0310 write register err.\n");
return ret;
@@ -972,8 +902,8 @@ static int gc0310_set_fmt(struct v4l2_subdev *sd,
struct gc0310_device *dev = to_gc0310_sensor(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct camera_mipi_info *gc0310_info = NULL;
+ struct gc0310_resolution *res;
int ret = 0;
- int idx = 0;
if (format->pad)
return -EINVAL;
@@ -987,15 +917,16 @@ static int gc0310_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&dev->input_lock);
- idx = nearest_resolution_index(fmt->width, fmt->height);
- if (idx == -1) {
- /* return the largest resolution */
- fmt->width = gc0310_res[N_RES - 1].width;
- fmt->height = gc0310_res[N_RES - 1].height;
- } else {
- fmt->width = gc0310_res[idx].width;
- fmt->height = gc0310_res[idx].height;
- }
+ res = v4l2_find_nearest_size(gc0310_res_preview,
+ ARRAY_SIZE(gc0310_res_preview), width,
+ height, fmt->width, fmt->height);
+ if (!res)
+ res = &gc0310_res_preview[N_RES - 1];
+
+ fmt->width = res->width;
+ fmt->height = res->height;
+ dev->res = res;
+
fmt->code = MEDIA_BUS_FMT_SGRBG8_1X8;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
@@ -1004,23 +935,15 @@ static int gc0310_set_fmt(struct v4l2_subdev *sd,
return 0;
}
- dev->fmt_idx = get_resolution_index(fmt->width, fmt->height);
- if (dev->fmt_idx == -1) {
- dev_err(&client->dev, "get resolution fail\n");
- mutex_unlock(&dev->input_lock);
- return -EINVAL;
- }
-
dev_dbg(&client->dev, "%s: before gc0310_write_reg_array %s\n",
- __func__, gc0310_res[dev->fmt_idx].desc);
+ __func__, dev->res->desc);
ret = startup(sd);
if (ret) {
dev_err(&client->dev, "gc0310 startup err\n");
goto err;
}
- ret = gc0310_get_intg_factor(client, gc0310_info,
- &gc0310_res[dev->fmt_idx]);
+ ret = gc0310_get_intg_factor(client, gc0310_info, dev->res);
if (ret) {
dev_err(&client->dev, "failed to get integration_factor\n");
goto err;
@@ -1044,8 +967,8 @@ static int gc0310_get_fmt(struct v4l2_subdev *sd,
if (!fmt)
return -EINVAL;
- fmt->width = gc0310_res[dev->fmt_idx].width;
- fmt->height = gc0310_res[dev->fmt_idx].height;
+ fmt->width = dev->res->width;
+ fmt->height = dev->res->height;
fmt->code = MEDIA_BUS_FMT_SGRBG8_1X8;
return 0;
@@ -1199,7 +1122,7 @@ static int gc0310_g_frame_interval(struct v4l2_subdev *sd,
struct gc0310_device *dev = to_gc0310_sensor(sd);
interval->interval.numerator = 1;
- interval->interval.denominator = gc0310_res[dev->fmt_idx].fps;
+ interval->interval.denominator = dev->res->fps;
return 0;
}
@@ -1237,7 +1160,7 @@ static int gc0310_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
struct gc0310_device *dev = to_gc0310_sensor(sd);
mutex_lock(&dev->input_lock);
- *frames = gc0310_res[dev->fmt_idx].skip_frames;
+ *frames = dev->res->skip_frames;
mutex_unlock(&dev->input_lock);
return 0;
@@ -1301,7 +1224,7 @@ static int gc0310_probe(struct i2c_client *client)
mutex_init(&dev->input_lock);
- dev->fmt_idx = 0;
+ dev->res = &gc0310_res_preview[0];
v4l2_i2c_subdev_init(&dev->sd, client, &gc0310_ops);
pdata = gmin_camera_platform_data(&dev->sd,
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
index 4d769590f2d3..0e6b2e6100d1 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
@@ -570,14 +570,16 @@ static int power_ctrl(struct v4l2_subdev *sd, bool flag)
static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
{
struct gc2235_device *dev = to_gc2235_sensor(sd);
- int ret = -1;
+ int ret;
if (!dev || !dev->platform_data)
return -ENODEV;
- ret |= dev->platform_data->gpio1_ctrl(sd, !flag);
+ ret = dev->platform_data->gpio1_ctrl(sd, !flag);
usleep_range(60, 90);
- return dev->platform_data->gpio0_ctrl(sd, flag);
+ ret |= dev->platform_data->gpio0_ctrl(sd, flag);
+
+ return ret;
}
static int power_up(struct v4l2_subdev *sd)
@@ -670,76 +672,6 @@ static int gc2235_s_power(struct v4l2_subdev *sd, int on)
return ret;
}
-/*
- * distance - calculate the distance
- * @res: resolution
- * @w: width
- * @h: height
- *
- * Get the gap between resolution and w/h.
- * res->width/height smaller than w/h wouldn't be considered.
- * Returns the value of gap or -1 if fail.
- */
-#define LARGEST_ALLOWED_RATIO_MISMATCH 800
-static int distance(struct gc2235_resolution *res, u32 w, u32 h)
-{
- unsigned int w_ratio = (res->width << 13) / w;
- unsigned int h_ratio;
- int match;
-
- if (h == 0)
- return -1;
- h_ratio = (res->height << 13) / h;
- if (h_ratio == 0)
- return -1;
- match = abs(((w_ratio << 13) / h_ratio) - 8192);
-
- if ((w_ratio < 8192) || (h_ratio < 8192) ||
- (match > LARGEST_ALLOWED_RATIO_MISMATCH))
- return -1;
-
- return w_ratio + h_ratio;
-}
-
-/* Return the nearest higher resolution index */
-static int nearest_resolution_index(int w, int h)
-{
- int i;
- int idx = -1;
- int dist;
- int min_dist = INT_MAX;
- struct gc2235_resolution *tmp_res = NULL;
-
- for (i = 0; i < N_RES; i++) {
- tmp_res = &gc2235_res[i];
- dist = distance(tmp_res, w, h);
- if (dist == -1)
- continue;
- if (dist < min_dist) {
- min_dist = dist;
- idx = i;
- }
- }
-
- return idx;
-}
-
-static int get_resolution_index(int w, int h)
-{
- int i;
-
- for (i = 0; i < N_RES; i++) {
- if (w != gc2235_res[i].width)
- continue;
- if (h != gc2235_res[i].height)
- continue;
-
- return i;
- }
-
- return -1;
-}
-
static int startup(struct v4l2_subdev *sd)
{
struct gc2235_device *dev = to_gc2235_sensor(sd);
@@ -758,7 +690,7 @@ static int startup(struct v4l2_subdev *sd)
gc2235_write_reg_array(client, gc2235_init_settings);
}
- ret = gc2235_write_reg_array(client, gc2235_res[dev->fmt_idx].regs);
+ ret = gc2235_write_reg_array(client, dev->res->regs);
if (ret) {
dev_err(&client->dev, "gc2235 write register err.\n");
return ret;
@@ -776,8 +708,8 @@ static int gc2235_set_fmt(struct v4l2_subdev *sd,
struct gc2235_device *dev = to_gc2235_sensor(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct camera_mipi_info *gc2235_info = NULL;
+ struct gc2235_resolution *res;
int ret = 0;
- int idx;
gc2235_info = v4l2_get_subdev_hostdata(sd);
if (!gc2235_info)
@@ -786,16 +718,18 @@ static int gc2235_set_fmt(struct v4l2_subdev *sd,
return -EINVAL;
if (!fmt)
return -EINVAL;
+
mutex_lock(&dev->input_lock);
- idx = nearest_resolution_index(fmt->width, fmt->height);
- if (idx == -1) {
- /* return the largest resolution */
- fmt->width = gc2235_res[N_RES - 1].width;
- fmt->height = gc2235_res[N_RES - 1].height;
- } else {
- fmt->width = gc2235_res[idx].width;
- fmt->height = gc2235_res[idx].height;
- }
+ res = v4l2_find_nearest_size(gc2235_res_preview,
+ ARRAY_SIZE(gc2235_res_preview), width,
+ height, fmt->width, fmt->height);
+ if (!res)
+ res = &gc2235_res_preview[N_RES - 1];
+
+ fmt->width = res->width;
+ fmt->height = res->height;
+ dev->res = res;
+
fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
sd_state->pads->try_fmt = *fmt;
@@ -803,13 +737,6 @@ static int gc2235_set_fmt(struct v4l2_subdev *sd,
return 0;
}
- dev->fmt_idx = get_resolution_index(fmt->width, fmt->height);
- if (dev->fmt_idx == -1) {
- dev_err(&client->dev, "get resolution fail\n");
- mutex_unlock(&dev->input_lock);
- return -EINVAL;
- }
-
ret = startup(sd);
if (ret) {
dev_err(&client->dev, "gc2235 startup err\n");
@@ -817,7 +744,7 @@ static int gc2235_set_fmt(struct v4l2_subdev *sd,
}
ret = gc2235_get_intg_factor(client, gc2235_info,
- &gc2235_res[dev->fmt_idx]);
+ dev->res);
if (ret)
dev_err(&client->dev, "failed to get integration_factor\n");
@@ -839,8 +766,8 @@ static int gc2235_get_fmt(struct v4l2_subdev *sd,
if (!fmt)
return -EINVAL;
- fmt->width = gc2235_res[dev->fmt_idx].width;
- fmt->height = gc2235_res[dev->fmt_idx].height;
+ fmt->width = dev->res->width;
+ fmt->height = dev->res->height;
fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
return 0;
@@ -953,7 +880,7 @@ static int gc2235_g_frame_interval(struct v4l2_subdev *sd,
struct gc2235_device *dev = to_gc2235_sensor(sd);
interval->interval.numerator = 1;
- interval->interval.denominator = gc2235_res[dev->fmt_idx].fps;
+ interval->interval.denominator = dev->res->fps;
return 0;
}
@@ -991,7 +918,7 @@ static int gc2235_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
struct gc2235_device *dev = to_gc2235_sensor(sd);
mutex_lock(&dev->input_lock);
- *frames = gc2235_res[dev->fmt_idx].skip_frames;
+ *frames = dev->res->skip_frames;
mutex_unlock(&dev->input_lock);
return 0;
@@ -1055,7 +982,7 @@ static int gc2235_probe(struct i2c_client *client)
mutex_init(&dev->input_lock);
- dev->fmt_idx = 0;
+ dev->res = &gc2235_res_preview[0];
v4l2_i2c_subdev_init(&dev->sd, client, &gc2235_ops);
gcpdev = gmin_camera_platform_data(&dev->sd,
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
index 49f4090856d3..00d6842c07d6 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
@@ -579,107 +579,6 @@ static int mt9m114_s_power(struct v4l2_subdev *sd, int power)
return mt9m114_init_common(sd);
}
-/*
- * distance - calculate the distance
- * @res: resolution
- * @w: width
- * @h: height
- *
- * Get the gap between resolution and w/h.
- * res->width/height smaller than w/h wouldn't be considered.
- * Returns the value of gap or -1 if fail.
- */
-#define LARGEST_ALLOWED_RATIO_MISMATCH 600
-static int distance(struct mt9m114_res_struct const *res, u32 w, u32 h)
-{
- unsigned int w_ratio;
- unsigned int h_ratio;
- int match;
-
- if (w == 0)
- return -1;
- w_ratio = (res->width << 13) / w;
- if (h == 0)
- return -1;
- h_ratio = (res->height << 13) / h;
- if (h_ratio == 0)
- return -1;
- match = abs(((w_ratio << 13) / h_ratio) - 8192);
-
- if ((w_ratio < 8192) || (h_ratio < 8192) ||
- (match > LARGEST_ALLOWED_RATIO_MISMATCH))
- return -1;
-
- return w_ratio + h_ratio;
-}
-
-/* Return the nearest higher resolution index */
-static int nearest_resolution_index(int w, int h)
-{
- int i;
- int idx = -1;
- int dist;
- int min_dist = INT_MAX;
- const struct mt9m114_res_struct *tmp_res = NULL;
-
- for (i = 0; i < ARRAY_SIZE(mt9m114_res); i++) {
- tmp_res = &mt9m114_res[i];
- dist = distance(tmp_res, w, h);
- if (dist == -1)
- continue;
- if (dist < min_dist) {
- min_dist = dist;
- idx = i;
- }
- }
-
- return idx;
-}
-
-static int mt9m114_try_res(u32 *w, u32 *h)
-{
- int idx = 0;
-
- if ((*w > MT9M114_RES_960P_SIZE_H)
- || (*h > MT9M114_RES_960P_SIZE_V)) {
- *w = MT9M114_RES_960P_SIZE_H;
- *h = MT9M114_RES_960P_SIZE_V;
- } else {
- idx = nearest_resolution_index(*w, *h);
-
- /*
- * nearest_resolution_index() doesn't return smaller
- * resolutions. If it fails, it means the requested
- * resolution is higher than wecan support. Fallback
- * to highest possible resolution in this case.
- */
- if (idx == -1)
- idx = ARRAY_SIZE(mt9m114_res) - 1;
-
- *w = mt9m114_res[idx].width;
- *h = mt9m114_res[idx].height;
- }
-
- return 0;
-}
-
-static struct mt9m114_res_struct *mt9m114_to_res(u32 w, u32 h)
-{
- int index;
-
- for (index = 0; index < N_RES; index++) {
- if ((mt9m114_res[index].width == w) &&
- (mt9m114_res[index].height == h))
- break;
- }
-
- /* No mode found */
- if (index >= N_RES)
- return NULL;
-
- return &mt9m114_res[index];
-}
-
static int mt9m114_res2size(struct v4l2_subdev *sd, int *h_size, int *v_size)
{
struct mt9m114_device *dev = to_mt9m114_sensor(sd);
@@ -829,7 +728,7 @@ static int mt9m114_set_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *fmt = &format->format;
struct i2c_client *c = v4l2_get_subdevdata(sd);
struct mt9m114_device *dev = to_mt9m114_sensor(sd);
- struct mt9m114_res_struct *res_index;
+ struct mt9m114_res_struct *res;
u32 width = fmt->width;
u32 height = fmt->height;
struct camera_mipi_info *mt9m114_info = NULL;
@@ -845,20 +744,21 @@ static int mt9m114_set_fmt(struct v4l2_subdev *sd,
if (!mt9m114_info)
return -EINVAL;
- mt9m114_try_res(&width, &height);
+ res = v4l2_find_nearest_size(mt9m114_res,
+ ARRAY_SIZE(mt9m114_res), width,
+ height, fmt->width, fmt->height);
+ if (!res)
+ res = &mt9m114_res[N_RES - 1];
+
+ fmt->width = res->width;
+ fmt->height = res->height;
+
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
sd_state->pads->try_fmt = *fmt;
return 0;
}
- res_index = mt9m114_to_res(width, height);
-
- /* Sanity check */
- if (unlikely(!res_index)) {
- WARN_ON(1);
- return -EINVAL;
- }
- switch (res_index->res) {
+ switch (res->res) {
case MT9M114_RES_736P:
ret = mt9m114_write_reg_array(c, mt9m114_736P_init, NO_POLLING);
ret += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE,
@@ -876,7 +776,7 @@ static int mt9m114_set_fmt(struct v4l2_subdev *sd,
MISENSOR_R_MODE_MASK, MISENSOR_NORMAL_SET);
break;
default:
- v4l2_err(sd, "set resolution: %d failed!\n", res_index->res);
+ v4l2_err(sd, "set resolution: %d failed!\n", res->res);
return -EINVAL;
}
@@ -890,7 +790,7 @@ static int mt9m114_set_fmt(struct v4l2_subdev *sd,
if (mt9m114_set_suspend(sd))
return -EINVAL;
- if (dev->res != res_index->res) {
+ if (dev->res != res->res) {
int index;
/* Switch to different size */
@@ -922,7 +822,7 @@ static int mt9m114_set_fmt(struct v4l2_subdev *sd,
}
}
ret = mt9m114_get_intg_factor(c, mt9m114_info,
- &mt9m114_res[res_index->res]);
+ &mt9m114_res[res->res]);
if (ret) {
dev_err(&c->dev, "failed to get integration_factor\n");
return -EINVAL;
@@ -931,7 +831,7 @@ static int mt9m114_set_fmt(struct v4l2_subdev *sd,
* mt9m114 - we don't poll for context switch
* because it does not happen with streaming disabled.
*/
- dev->res = res_index->res;
+ dev->res = res->res;
fmt->width = width;
fmt->height = height;
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
index 2111e4a478c1..4ba99c660681 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
@@ -48,7 +48,7 @@ static enum atomisp_bayer_order ov2680_bayer_order_mapping[] = {
/* i2c read/write stuff */
static int ov2680_read_reg(struct i2c_client *client,
- int len, u16 reg, u16 *val)
+ int len, u16 reg, u32 *val)
{
struct i2c_msg msgs[2];
u8 addr_buf[2] = { reg >> 8, reg & 0xff };
@@ -86,7 +86,7 @@ static int ov2680_write_reg(struct i2c_client *client, unsigned int len,
int ret;
if (len == 2)
- put_unaligned_be16(val << (8 * (4 - len)), buf + 2);
+ put_unaligned_be16(val, buf + 2);
else if (len == 1)
buf[2] = val;
else
@@ -147,7 +147,7 @@ static int ov2680_g_bin_factor_x(struct v4l2_subdev *sd, s32 *val)
struct i2c_client *client = v4l2_get_subdevdata(sd);
dev_dbg(&client->dev, "++++ov2680_g_bin_factor_x\n");
- *val = ov2680_res[dev->fmt_idx].bin_factor_x;
+ *val = dev->res->bin_factor_x;
return 0;
}
@@ -157,7 +157,7 @@ static int ov2680_g_bin_factor_y(struct v4l2_subdev *sd, s32 *val)
struct ov2680_device *dev = to_ov2680_sensor(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- *val = ov2680_res[dev->fmt_idx].bin_factor_y;
+ *val = dev->res->bin_factor_y;
dev_dbg(&client->dev, "++++ov2680_g_bin_factor_y\n");
return 0;
}
@@ -166,11 +166,9 @@ static int ov2680_get_intg_factor(struct i2c_client *client,
struct camera_mipi_info *info,
const struct ov2680_resolution *res)
{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct ov2680_device *dev = to_ov2680_sensor(sd);
struct atomisp_sensor_mode_data *buf = &info->data;
unsigned int pix_clk_freq_hz;
- u16 reg_val;
+ u32 reg_val;
int ret;
dev_dbg(&client->dev, "++++ov2680_get_intg_factor\n");
@@ -180,7 +178,6 @@ static int ov2680_get_intg_factor(struct i2c_client *client,
/* pixel clock */
pix_clk_freq_hz = res->pix_clk_freq * 1000000;
- dev->vt_pix_clk_freq_mhz = pix_clk_freq_hz;
buf->vt_pix_clk_freq_mhz = pix_clk_freq_hz;
/* get integration time */
@@ -254,7 +251,7 @@ static long __ov2680_set_exposure(struct v4l2_subdev *sd, int coarse_itg,
"+++++++__ov2680_set_exposure coarse_itg %d, gain %d, digitgain %d++\n",
coarse_itg, gain, digitgain);
- vts = ov2680_res[dev->fmt_idx].lines_per_frame;
+ vts = dev->res->lines_per_frame;
/* group hold */
ret = ov2680_write_reg(client, 1,
@@ -359,10 +356,17 @@ static int ov2680_set_exposure(struct v4l2_subdev *sd, int exposure,
int gain, int digitgain)
{
struct ov2680_device *dev = to_ov2680_sensor(sd);
- int ret;
+ int ret = 0;
mutex_lock(&dev->input_lock);
- ret = __ov2680_set_exposure(sd, exposure, gain, digitgain);
+
+ dev->exposure = exposure;
+ dev->gain = gain;
+ dev->digitgain = digitgain;
+
+ if (dev->power_on)
+ ret = __ov2680_set_exposure(sd, exposure, gain, digitgain);
+
mutex_unlock(&dev->input_lock);
return ret;
@@ -383,7 +387,6 @@ static long ov2680_s_exposure(struct v4l2_subdev *sd,
return -EINVAL;
}
- // EXPOSURE CONTROL DISABLED FOR INITIAL CHECKIN, TUNING DOESN'T WORK
return ov2680_set_exposure(sd, coarse_itg, analog_gain, digital_gain);
}
@@ -406,56 +409,25 @@ static long ov2680_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
static int ov2680_q_exposure(struct v4l2_subdev *sd, s32 *value)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- u16 reg_v, reg_v2;
+ u32 reg_val;
int ret;
/* get exposure */
- ret = ov2680_read_reg(client, 1,
- OV2680_EXPOSURE_L,
- &reg_v);
+ ret = ov2680_read_reg(client, 3, OV2680_EXPOSURE_H, &reg_val);
if (ret)
- goto err;
-
- ret = ov2680_read_reg(client, 1,
- OV2680_EXPOSURE_M,
- &reg_v2);
- if (ret)
- goto err;
-
- reg_v += reg_v2 << 8;
- ret = ov2680_read_reg(client, 1,
- OV2680_EXPOSURE_H,
- &reg_v2);
- if (ret)
- goto err;
-
- *value = reg_v + (((u32)reg_v2 << 16));
-err:
- return ret;
-}
+ return ret;
-static u32 ov2680_translate_bayer_order(enum atomisp_bayer_order code)
-{
- switch (code) {
- case atomisp_bayer_order_rggb:
- return MEDIA_BUS_FMT_SRGGB10_1X10;
- case atomisp_bayer_order_grbg:
- return MEDIA_BUS_FMT_SGRBG10_1X10;
- case atomisp_bayer_order_bggr:
- return MEDIA_BUS_FMT_SBGGR10_1X10;
- case atomisp_bayer_order_gbrg:
- return MEDIA_BUS_FMT_SGBRG10_1X10;
- }
+ /* Lower four bits are not part of the exposure val (always 0) */
+ *value = reg_val >> 4;
return 0;
}
static int ov2680_v_flip(struct v4l2_subdev *sd, s32 value)
{
- struct ov2680_device *dev = to_ov2680_sensor(sd);
struct camera_mipi_info *ov2680_info = NULL;
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
- u16 val;
+ u32 val;
u8 index;
dev_dbg(&client->dev, "@%s: value:%d\n", __func__, value);
@@ -476,19 +448,16 @@ static int ov2680_v_flip(struct v4l2_subdev *sd, s32 value)
ov2680_info = v4l2_get_subdev_hostdata(sd);
if (ov2680_info) {
ov2680_info->raw_bayer_order = ov2680_bayer_order_mapping[index];
- dev->format.code = ov2680_translate_bayer_order(
- ov2680_info->raw_bayer_order);
}
return ret;
}
static int ov2680_h_flip(struct v4l2_subdev *sd, s32 value)
{
- struct ov2680_device *dev = to_ov2680_sensor(sd);
struct camera_mipi_info *ov2680_info = NULL;
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
- u16 val;
+ u32 val;
u8 index;
dev_dbg(&client->dev, "@%s: value:%d\n", __func__, value);
@@ -510,8 +479,6 @@ static int ov2680_h_flip(struct v4l2_subdev *sd, s32 value)
ov2680_info = v4l2_get_subdev_hostdata(sd);
if (ov2680_info) {
ov2680_info->raw_bayer_order = ov2680_bayer_order_mapping[index];
- dev->format.code = ov2680_translate_bayer_order(
- ov2680_info->raw_bayer_order);
}
return ret;
}
@@ -677,25 +644,6 @@ static int ov2680_init_registers(struct v4l2_subdev *sd)
return ret;
}
-static int ov2680_init(struct v4l2_subdev *sd)
-{
- struct ov2680_device *dev = to_ov2680_sensor(sd);
-
- int ret;
-
- mutex_lock(&dev->input_lock);
-
- /* restore settings */
- ov2680_res = ov2680_res_preview;
- N_RES = N_RES_PREVIEW;
-
- ret = ov2680_init_registers(sd);
-
- mutex_unlock(&dev->input_lock);
-
- return ret;
-}
-
static int power_ctrl(struct v4l2_subdev *sd, bool flag)
{
int ret = 0;
@@ -760,6 +708,9 @@ static int power_up(struct v4l2_subdev *sd)
return -ENODEV;
}
+ if (dev->power_on)
+ return 0; /* Already on */
+
/* power control */
ret = power_ctrl(sd, 1);
if (ret)
@@ -784,8 +735,19 @@ static int power_up(struct v4l2_subdev *sd)
/* according to DS, 20ms is needed between PWDN and i2c access */
msleep(20);
+ ret = ov2680_init_registers(sd);
+ if (ret)
+ goto fail_init_registers;
+
+ ret = __ov2680_set_exposure(sd, dev->exposure, dev->gain, dev->digitgain);
+ if (ret)
+ goto fail_init_registers;
+
+ dev->power_on = true;
return 0;
+fail_init_registers:
+ dev->platform_data->flisclk_ctrl(sd, 0);
fail_clk:
gpio_ctrl(sd, 0);
fail_power:
@@ -809,6 +771,9 @@ static int power_down(struct v4l2_subdev *sd)
return -ENODEV;
}
+ if (!dev->power_on)
+ return 0; /* Already off */
+
ret = dev->platform_data->flisclk_ctrl(sd, 0);
if (ret)
dev_err(&client->dev, "flisclk failed\n");
@@ -823,94 +788,31 @@ static int power_down(struct v4l2_subdev *sd)
/* power control */
ret = power_ctrl(sd, 0);
- if (ret)
+ if (ret) {
dev_err(&client->dev, "vprog failed.\n");
+ return ret;
+ }
- return ret;
+ dev->power_on = false;
+ return 0;
}
static int ov2680_s_power(struct v4l2_subdev *sd, int on)
{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
int ret;
+ mutex_lock(&dev->input_lock);
+
if (on == 0) {
ret = power_down(sd);
} else {
ret = power_up(sd);
- if (!ret)
- return ov2680_init(sd);
}
- return ret;
-}
-
-/*
- * distance - calculate the distance
- * @res: resolution
- * @w: width
- * @h: height
- *
- * Get the gap between resolution and w/h.
- * res->width/height smaller than w/h wouldn't be considered.
- * Returns the value of gap or -1 if fail.
- */
-#define LARGEST_ALLOWED_RATIO_MISMATCH 600
-static int distance(struct ov2680_resolution *res, u32 w, u32 h)
-{
- unsigned int w_ratio = (res->width << 13) / w;
- unsigned int h_ratio;
- int match;
-
- if (h == 0)
- return -1;
- h_ratio = (res->height << 13) / h;
- if (h_ratio == 0)
- return -1;
- match = abs(((w_ratio << 13) / h_ratio) - 8192);
-
- if ((w_ratio < 8192) || (h_ratio < 8192) ||
- (match > LARGEST_ALLOWED_RATIO_MISMATCH))
- return -1;
-
- return w_ratio + h_ratio;
-}
-/* Return the nearest higher resolution index */
-static int nearest_resolution_index(int w, int h)
-{
- int i;
- int idx = -1;
- int dist;
- int min_dist = INT_MAX;
- struct ov2680_resolution *tmp_res = NULL;
-
- for (i = 0; i < N_RES; i++) {
- tmp_res = &ov2680_res[i];
- dist = distance(tmp_res, w, h);
- if (dist == -1)
- continue;
- if (dist < min_dist) {
- min_dist = dist;
- idx = i;
- }
- }
-
- return idx;
-}
-
-static int get_resolution_index(int w, int h)
-{
- int i;
-
- for (i = 0; i < N_RES; i++) {
- if (w != ov2680_res[i].width)
- continue;
- if (h != ov2680_res[i].height)
- continue;
-
- return i;
- }
+ mutex_unlock(&dev->input_lock);
- return -1;
+ return ret;
}
static int ov2680_set_fmt(struct v4l2_subdev *sd,
@@ -921,8 +823,8 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
struct ov2680_device *dev = to_ov2680_sensor(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct camera_mipi_info *ov2680_info = NULL;
- int ret = 0;
- int idx = 0;
+ struct ov2680_resolution *res;
+ int vts, ret = 0;
dev_dbg(&client->dev, "%s: %s: pad: %d, fmt: %p\n",
__func__,
@@ -940,41 +842,44 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
return -EINVAL;
mutex_lock(&dev->input_lock);
- idx = nearest_resolution_index(fmt->width, fmt->height);
- if (idx == -1) {
- /* return the largest resolution */
- fmt->width = ov2680_res[N_RES - 1].width;
- fmt->height = ov2680_res[N_RES - 1].height;
- } else {
- fmt->width = ov2680_res[idx].width;
- fmt->height = ov2680_res[idx].height;
- }
+
+ res = v4l2_find_nearest_size(ov2680_res_preview,
+ ARRAY_SIZE(ov2680_res_preview), width,
+ height, fmt->width, fmt->height);
+ if (!res)
+ res = &ov2680_res_preview[N_RES_PREVIEW - 1];
+
+ fmt->width = res->width;
+ fmt->height = res->height;
+
fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
sd_state->pads->try_fmt = *fmt;
mutex_unlock(&dev->input_lock);
return 0;
}
- dev->fmt_idx = get_resolution_index(fmt->width, fmt->height);
- dev_dbg(&client->dev, "%s: Resolution index: %d\n",
- __func__, dev->fmt_idx);
- if (dev->fmt_idx == -1) {
- dev_err(&client->dev, "get resolution fail\n");
- mutex_unlock(&dev->input_lock);
- return -EINVAL;
- }
- dev_dbg(&client->dev, "%s: i=%d, w=%d, h=%d\n",
- __func__, dev->fmt_idx, fmt->width, fmt->height);
- // IS IT NEEDED?
+ dev_dbg(&client->dev, "%s: %dx%d\n",
+ __func__, fmt->width, fmt->height);
+
+ /* s_power has not been called yet for std v4l2 clients (camorama) */
power_up(sd);
- ret = ov2680_write_reg_array(client, ov2680_res[dev->fmt_idx].regs);
+ ret = ov2680_write_reg_array(client, dev->res->regs);
if (ret)
dev_err(&client->dev,
"ov2680 write resolution register err: %d\n", ret);
- ret = ov2680_get_intg_factor(client, ov2680_info,
- &ov2680_res[dev->fmt_idx]);
+ vts = dev->res->lines_per_frame;
+
+ /* If necessary increase the VTS to match exposure + MARGIN */
+ if (dev->exposure > vts - OV2680_INTEGRATION_TIME_MARGIN)
+ vts = dev->exposure + OV2680_INTEGRATION_TIME_MARGIN;
+
+ ret = ov2680_write_reg(client, 2, OV2680_TIMING_VTS_H, vts);
+ if (ret)
+ dev_err(&client->dev, "ov2680 write vts err: %d\n", ret);
+
+ ret = ov2680_get_intg_factor(client, ov2680_info, res);
if (ret) {
dev_err(&client->dev, "failed to get integration factor\n");
goto err;
@@ -989,8 +894,6 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
if (v_flag)
ov2680_v_flip(sd, v_flag);
- v4l2_info(client, "\n%s idx %d\n", __func__, dev->fmt_idx);
-
/*
* ret = startup(sd);
* if (ret)
@@ -1014,8 +917,8 @@ static int ov2680_get_fmt(struct v4l2_subdev *sd,
if (!fmt)
return -EINVAL;
- fmt->width = ov2680_res[dev->fmt_idx].width;
- fmt->height = ov2680_res[dev->fmt_idx].height;
+ fmt->width = dev->res->width;
+ fmt->height = dev->res->height;
fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
return 0;
@@ -1024,7 +927,7 @@ static int ov2680_get_fmt(struct v4l2_subdev *sd,
static int ov2680_detect(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
- u16 high, low;
+ u32 high, low;
int ret;
u16 id;
u8 revision;
@@ -1072,11 +975,6 @@ static int ov2680_s_stream(struct v4l2_subdev *sd, int enable)
ret = ov2680_write_reg(client, 1, OV2680_SW_STREAM,
enable ? OV2680_START_STREAMING :
OV2680_STOP_STREAMING);
-#if 0
- /* restore settings */
- ov2680_res = ov2680_res_preview;
- N_RES = N_RES_PREVIEW;
-#endif
//otp valid at stream on state
//if(!dev->otp_data)
@@ -1101,16 +999,6 @@ static int ov2680_s_config(struct v4l2_subdev *sd,
(struct camera_sensor_platform_data *)platform_data;
mutex_lock(&dev->input_lock);
- /*
- * power off the module, then power on it in future
- * as first power on by board may not fulfill the
- * power on sequqence needed by the module
- */
- ret = power_down(sd);
- if (ret) {
- dev_err(&client->dev, "ov2680 power-off err.\n");
- goto fail_power_off;
- }
ret = power_up(sd);
if (ret) {
@@ -1144,7 +1032,6 @@ fail_csi_cfg:
fail_power_on:
power_down(sd);
dev_err(&client->dev, "sensor power-gating failed\n");
-fail_power_off:
mutex_unlock(&dev->input_lock);
return ret;
}
@@ -1155,7 +1042,7 @@ static int ov2680_g_frame_interval(struct v4l2_subdev *sd,
struct ov2680_device *dev = to_ov2680_sensor(sd);
interval->interval.numerator = 1;
- interval->interval.denominator = ov2680_res[dev->fmt_idx].fps;
+ interval->interval.denominator = dev->res->fps;
return 0;
}
@@ -1177,13 +1064,33 @@ static int ov2680_enum_frame_size(struct v4l2_subdev *sd,
{
int index = fse->index;
- if (index >= N_RES)
+ if (index >= N_RES_PREVIEW)
return -EINVAL;
- fse->min_width = ov2680_res[index].width;
- fse->min_height = ov2680_res[index].height;
- fse->max_width = ov2680_res[index].width;
- fse->max_height = ov2680_res[index].height;
+ fse->min_width = ov2680_res_preview[index].width;
+ fse->min_height = ov2680_res_preview[index].height;
+ fse->max_width = ov2680_res_preview[index].width;
+ fse->max_height = ov2680_res_preview[index].height;
+
+ return 0;
+}
+
+static int ov2680_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ struct v4l2_fract fract;
+
+ if (fie->index >= N_RES_PREVIEW ||
+ fie->width > ov2680_res_preview[0].width ||
+ fie->height > ov2680_res_preview[0].height ||
+ fie->which > V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ fract.denominator = ov2680_res_preview[fie->index].fps;
+ fract.numerator = 1;
+
+ fie->interval = fract;
return 0;
}
@@ -1193,7 +1100,7 @@ static int ov2680_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
struct ov2680_device *dev = to_ov2680_sensor(sd);
mutex_lock(&dev->input_lock);
- *frames = ov2680_res[dev->fmt_idx].skip_frames;
+ *frames = dev->res->skip_frames;
mutex_unlock(&dev->input_lock);
return 0;
@@ -1216,6 +1123,7 @@ static const struct v4l2_subdev_core_ops ov2680_core_ops = {
static const struct v4l2_subdev_pad_ops ov2680_pad_ops = {
.enum_mbus_code = ov2680_enum_mbus_code,
.enum_frame_size = ov2680_enum_frame_size,
+ .enum_frame_interval = ov2680_enum_frame_interval,
.get_fmt = ov2680_get_fmt,
.set_fmt = ov2680_set_fmt,
};
@@ -1257,7 +1165,9 @@ static int ov2680_probe(struct i2c_client *client)
mutex_init(&dev->input_lock);
- dev->fmt_idx = 0;
+ dev->res = &ov2680_res_preview[0];
+ dev->exposure = dev->res->lines_per_frame - OV2680_INTEGRATION_TIME_MARGIN;
+ dev->gain = 250; /* 0-2047 */
v4l2_i2c_subdev_init(&dev->sd, client, &ov2680_ops);
pdata = gmin_camera_platform_data(&dev->sd,
@@ -1278,7 +1188,6 @@ static int ov2680_probe(struct i2c_client *client)
dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
dev->pad.flags = MEDIA_PAD_FL_SOURCE;
- dev->format.code = MEDIA_BUS_FMT_SBGGR10_1X10;
dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
ret =
v4l2_ctrl_handler_init(&dev->ctrl_handler,
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
index 90d0871a78a3..da98094d7094 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
@@ -557,7 +557,7 @@ static int ov2722_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
ret = ov2722_g_fnumber_range(&dev->sd, &ctrl->val);
break;
case V4L2_CID_LINK_FREQ:
- val = ov2722_res[dev->fmt_idx].mipi_freq;
+ val = dev->res->mipi_freq;
if (val == 0)
return -EINVAL;
@@ -782,76 +782,6 @@ static int ov2722_s_power(struct v4l2_subdev *sd, int on)
return ret;
}
-/*
- * distance - calculate the distance
- * @res: resolution
- * @w: width
- * @h: height
- *
- * Get the gap between resolution and w/h.
- * res->width/height smaller than w/h wouldn't be considered.
- * Returns the value of gap or -1 if fail.
- */
-#define LARGEST_ALLOWED_RATIO_MISMATCH 800
-static int distance(struct ov2722_resolution *res, u32 w, u32 h)
-{
- unsigned int w_ratio = (res->width << 13) / w;
- unsigned int h_ratio;
- int match;
-
- if (h == 0)
- return -1;
- h_ratio = (res->height << 13) / h;
- if (h_ratio == 0)
- return -1;
- match = abs(((w_ratio << 13) / h_ratio) - 8192);
-
- if ((w_ratio < 8192) || (h_ratio < 8192) ||
- (match > LARGEST_ALLOWED_RATIO_MISMATCH))
- return -1;
-
- return w_ratio + h_ratio;
-}
-
-/* Return the nearest higher resolution index */
-static int nearest_resolution_index(int w, int h)
-{
- int i;
- int idx = -1;
- int dist;
- int min_dist = INT_MAX;
- struct ov2722_resolution *tmp_res = NULL;
-
- for (i = 0; i < N_RES; i++) {
- tmp_res = &ov2722_res[i];
- dist = distance(tmp_res, w, h);
- if (dist == -1)
- continue;
- if (dist < min_dist) {
- min_dist = dist;
- idx = i;
- }
- }
-
- return idx;
-}
-
-static int get_resolution_index(int w, int h)
-{
- int i;
-
- for (i = 0; i < N_RES; i++) {
- if (w != ov2722_res[i].width)
- continue;
- if (h != ov2722_res[i].height)
- continue;
-
- return i;
- }
-
- return -1;
-}
-
/* TODO: remove it. */
static int startup(struct v4l2_subdev *sd)
{
@@ -866,7 +796,7 @@ static int startup(struct v4l2_subdev *sd)
return ret;
}
- ret = ov2722_write_reg_array(client, ov2722_res[dev->fmt_idx].regs);
+ ret = ov2722_write_reg_array(client, dev->res->regs);
if (ret) {
dev_err(&client->dev, "ov2722 write register err.\n");
return ret;
@@ -882,9 +812,9 @@ static int ov2722_set_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *fmt = &format->format;
struct ov2722_device *dev = to_ov2722_sensor(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov2722_resolution *res;
struct camera_mipi_info *ov2722_info = NULL;
int ret = 0;
- int idx;
if (format->pad)
return -EINVAL;
@@ -895,15 +825,16 @@ static int ov2722_set_fmt(struct v4l2_subdev *sd,
return -EINVAL;
mutex_lock(&dev->input_lock);
- idx = nearest_resolution_index(fmt->width, fmt->height);
- if (idx == -1) {
- /* return the largest resolution */
- fmt->width = ov2722_res[N_RES - 1].width;
- fmt->height = ov2722_res[N_RES - 1].height;
- } else {
- fmt->width = ov2722_res[idx].width;
- fmt->height = ov2722_res[idx].height;
- }
+ res = v4l2_find_nearest_size(ov2722_res_preview,
+ ARRAY_SIZE(ov2722_res_preview), width,
+ height, fmt->width, fmt->height);
+ if (!res)
+ res = &ov2722_res_preview[N_RES - 1];
+
+ fmt->width = res->width;
+ fmt->height = res->height;
+ dev->res = res;
+
fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
sd_state->pads->try_fmt = *fmt;
@@ -911,15 +842,9 @@ static int ov2722_set_fmt(struct v4l2_subdev *sd,
return 0;
}
- dev->fmt_idx = get_resolution_index(fmt->width, fmt->height);
- if (dev->fmt_idx == -1) {
- dev_err(&client->dev, "get resolution fail\n");
- mutex_unlock(&dev->input_lock);
- return -EINVAL;
- }
- dev->pixels_per_line = ov2722_res[dev->fmt_idx].pixels_per_line;
- dev->lines_per_frame = ov2722_res[dev->fmt_idx].lines_per_frame;
+ dev->pixels_per_line = dev->res->pixels_per_line;
+ dev->lines_per_frame = dev->res->lines_per_frame;
ret = startup(sd);
if (ret) {
@@ -950,8 +875,7 @@ static int ov2722_set_fmt(struct v4l2_subdev *sd,
}
}
- ret = ov2722_get_intg_factor(client, ov2722_info,
- &ov2722_res[dev->fmt_idx]);
+ ret = ov2722_get_intg_factor(client, ov2722_info, dev->res);
if (ret)
dev_err(&client->dev, "failed to get integration_factor\n");
@@ -972,8 +896,8 @@ static int ov2722_get_fmt(struct v4l2_subdev *sd,
if (!fmt)
return -EINVAL;
- fmt->width = ov2722_res[dev->fmt_idx].width;
- fmt->height = ov2722_res[dev->fmt_idx].height;
+ fmt->width = dev->res->width;
+ fmt->height = dev->res->height;
fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
return 0;
@@ -1098,7 +1022,7 @@ static int ov2722_g_frame_interval(struct v4l2_subdev *sd,
struct ov2722_device *dev = to_ov2722_sensor(sd);
interval->interval.numerator = 1;
- interval->interval.denominator = ov2722_res[dev->fmt_idx].fps;
+ interval->interval.denominator = dev->res->fps;
return 0;
}
@@ -1136,7 +1060,7 @@ static int ov2722_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
struct ov2722_device *dev = to_ov2722_sensor(sd);
mutex_lock(&dev->input_lock);
- *frames = ov2722_res[dev->fmt_idx].skip_frames;
+ *frames = dev->res->skip_frames;
mutex_unlock(&dev->input_lock);
return 0;
@@ -1220,7 +1144,7 @@ static int ov2722_probe(struct i2c_client *client)
mutex_init(&dev->input_lock);
- dev->fmt_idx = 0;
+ dev->res = &ov2722_res_preview[0];
v4l2_i2c_subdev_init(&dev->sd, client, &ov2722_ops);
ovpdev = gmin_camera_platform_data(&dev->sd,
diff --git a/drivers/staging/media/atomisp/i2c/gc0310.h b/drivers/staging/media/atomisp/i2c/gc0310.h
index 2fe3de115083..db643ebc3909 100644
--- a/drivers/staging/media/atomisp/i2c/gc0310.h
+++ b/drivers/staging/media/atomisp/i2c/gc0310.h
@@ -150,8 +150,7 @@ struct gc0310_device {
struct camera_sensor_platform_data *platform_data;
int vt_pix_clk_freq_mhz;
- int fmt_idx;
- u8 res;
+ struct gc0310_resolution *res;
u8 type;
};
diff --git a/drivers/staging/media/atomisp/i2c/gc2235.h b/drivers/staging/media/atomisp/i2c/gc2235.h
index ba5db1230033..806be5dff7a5 100644
--- a/drivers/staging/media/atomisp/i2c/gc2235.h
+++ b/drivers/staging/media/atomisp/i2c/gc2235.h
@@ -158,11 +158,10 @@ struct gc2235_device {
struct v4l2_mbus_framefmt format;
struct mutex input_lock;
struct v4l2_ctrl_handler ctrl_handler;
+ struct gc2235_resolution *res;
struct camera_sensor_platform_data *platform_data;
int vt_pix_clk_freq_mhz;
- int fmt_idx;
- u8 res;
u8 type;
};
diff --git a/drivers/staging/media/atomisp/i2c/ov2680.h b/drivers/staging/media/atomisp/i2c/ov2680.h
index 874115f35fca..4e351196fe34 100644
--- a/drivers/staging/media/atomisp/i2c/ov2680.h
+++ b/drivers/staging/media/atomisp/i2c/ov2680.h
@@ -141,7 +141,6 @@ struct regval_list {
};
struct ov2680_resolution {
- u8 *desc;
const struct ov2680_reg *regs;
int res;
int width;
@@ -154,7 +153,6 @@ struct ov2680_resolution {
u8 bin_factor_x;
u8 bin_factor_y;
u8 bin_mode;
- bool used;
};
struct ov2680_format {
@@ -169,15 +167,14 @@ struct ov2680_format {
struct ov2680_device {
struct v4l2_subdev sd;
struct media_pad pad;
- struct v4l2_mbus_framefmt format;
struct mutex input_lock;
struct v4l2_ctrl_handler ctrl_handler;
+ struct ov2680_resolution *res;
struct camera_sensor_platform_data *platform_data;
- int vt_pix_clk_freq_mhz;
- int fmt_idx;
- int run_mode;
- u8 res;
- u8 type;
+ bool power_on;
+ u16 exposure;
+ u16 gain;
+ u16 digitgain;
};
/**
@@ -283,14 +280,11 @@ static struct ov2680_reg const ov2680_global_setting[] = {
{}
};
-#if 0 /* None of the definitions below are used currently */
/*
* 176x144 30fps VBlanking 1lane 10Bit (binning)
*/
static struct ov2680_reg const ov2680_QCIF_30fps[] = {
{0x3086, 0x01},
- {0x3501, 0x24},
- {0x3502, 0x40},
{0x370a, 0x23},
{0x3801, 0xa0},
{0x3802, 0x00},
@@ -305,8 +299,6 @@ static struct ov2680_reg const ov2680_QCIF_30fps[] = {
{0x380b, 0xa0},
{0x380c, 0x06},
{0x380d, 0xb0},
- {0x380e, 0x02},
- {0x380f, 0x84},
{0x3810, 0x00},
{0x3811, 0x04},
{0x3812, 0x00},
@@ -334,8 +326,6 @@ static struct ov2680_reg const ov2680_QCIF_30fps[] = {
*/
static struct ov2680_reg const ov2680_CIF_30fps[] = {
{0x3086, 0x01},
- {0x3501, 0x24},
- {0x3502, 0x40},
{0x370a, 0x23},
{0x3801, 0xa0},
{0x3802, 0x00},
@@ -350,8 +340,6 @@ static struct ov2680_reg const ov2680_CIF_30fps[] = {
{0x380b, 0x30},
{0x380c, 0x06},
{0x380d, 0xb0},
- {0x380e, 0x02},
- {0x380f, 0x84},
{0x3810, 0x00},
{0x3811, 0x04},
{0x3812, 0x00},
@@ -377,8 +365,6 @@ static struct ov2680_reg const ov2680_CIF_30fps[] = {
*/
static struct ov2680_reg const ov2680_QVGA_30fps[] = {
{0x3086, 0x01},
- {0x3501, 0x24},
- {0x3502, 0x40},
{0x370a, 0x23},
{0x3801, 0xa0},
{0x3802, 0x00},
@@ -393,8 +379,6 @@ static struct ov2680_reg const ov2680_QVGA_30fps[] = {
{0x380b, 0x00},
{0x380c, 0x06},
{0x380d, 0xb0},
- {0x380e, 0x02},
- {0x380f, 0x84},
{0x3810, 0x00},
{0x3811, 0x04},
{0x3812, 0x00},
@@ -420,8 +404,6 @@ static struct ov2680_reg const ov2680_QVGA_30fps[] = {
*/
static struct ov2680_reg const ov2680_656x496_30fps[] = {
{0x3086, 0x01},
- {0x3501, 0x24},
- {0x3502, 0x40},
{0x370a, 0x23},
{0x3801, 0xa0},
{0x3802, 0x00},
@@ -436,8 +418,6 @@ static struct ov2680_reg const ov2680_656x496_30fps[] = {
{0x380b, 0xf0},
{0x380c, 0x06},
{0x380d, 0xb0},
- {0x380e, 0x02},
- {0x380f, 0x84},
{0x3810, 0x00},
{0x3811, 0x04},
{0x3812, 0x00},
@@ -459,12 +439,10 @@ static struct ov2680_reg const ov2680_656x496_30fps[] = {
};
/*
- * 800x600 30fps VBlanking 1lane 10Bit (binning)
+ * 720x592 30fps VBlanking 1lane 10Bit (binning)
*/
static struct ov2680_reg const ov2680_720x592_30fps[] = {
{0x3086, 0x01},
- {0x3501, 0x26},
- {0x3502, 0x40},
{0x370a, 0x23},
{0x3801, 0x00}, // X_ADDR_START;
{0x3802, 0x00},
@@ -479,8 +457,6 @@ static struct ov2680_reg const ov2680_720x592_30fps[] = {
{0x380b, 0x50}, // Y_OUTPUT_SIZE;
{0x380c, 0x06},
{0x380d, 0xac}, // HTS;
- {0x380e, 0x02},
- {0x380f, 0x84}, // VTS;
{0x3810, 0x00},
{0x3811, 0x00},
{0x3812, 0x00},
@@ -508,8 +484,6 @@ static struct ov2680_reg const ov2680_720x592_30fps[] = {
*/
static struct ov2680_reg const ov2680_800x600_30fps[] = {
{0x3086, 0x01},
- {0x3501, 0x26},
- {0x3502, 0x40},
{0x370a, 0x23},
{0x3801, 0x00},
{0x3802, 0x00},
@@ -524,8 +498,6 @@ static struct ov2680_reg const ov2680_800x600_30fps[] = {
{0x380b, 0x58},
{0x380c, 0x06},
{0x380d, 0xac},
- {0x380e, 0x02},
- {0x380f, 0x84},
{0x3810, 0x00},
{0x3811, 0x00},
{0x3812, 0x00},
@@ -551,8 +523,6 @@ static struct ov2680_reg const ov2680_800x600_30fps[] = {
*/
static struct ov2680_reg const ov2680_720p_30fps[] = {
{0x3086, 0x00},
- {0x3501, 0x48},
- {0x3502, 0xe0},
{0x370a, 0x21},
{0x3801, 0xa0},
{0x3802, 0x00},
@@ -567,8 +537,6 @@ static struct ov2680_reg const ov2680_720p_30fps[] = {
{0x380b, 0xe0},
{0x380c, 0x06},
{0x380d, 0xa8},
- {0x380e, 0x05},
- {0x380f, 0x0e},
{0x3810, 0x00},
{0x3811, 0x08},
{0x3812, 0x00},
@@ -594,8 +562,6 @@ static struct ov2680_reg const ov2680_720p_30fps[] = {
*/
static struct ov2680_reg const ov2680_1296x976_30fps[] = {
{0x3086, 0x00},
- {0x3501, 0x48},
- {0x3502, 0xe0},
{0x370a, 0x21},
{0x3801, 0xa0},
{0x3802, 0x00},
@@ -610,8 +576,6 @@ static struct ov2680_reg const ov2680_1296x976_30fps[] = {
{0x380b, 0xd0},
{0x380c, 0x06},
{0x380d, 0xa8},
- {0x380e, 0x05},
- {0x380f, 0x0e},
{0x3810, 0x00},
{0x3811, 0x08},
{0x3812, 0x00},
@@ -637,8 +601,6 @@ static struct ov2680_reg const ov2680_1296x976_30fps[] = {
*/
static struct ov2680_reg const ov2680_1456x1096_30fps[] = {
{0x3086, 0x00},
- {0x3501, 0x48},
- {0x3502, 0xe0},
{0x370a, 0x21},
{0x3801, 0x90},
{0x3802, 0x00},
@@ -653,8 +615,6 @@ static struct ov2680_reg const ov2680_1456x1096_30fps[] = {
{0x380b, 0x48},
{0x380c, 0x06},
{0x380d, 0xa8},
- {0x380e, 0x05},
- {0x380f, 0x0e},
{0x3810, 0x00},
{0x3811, 0x08},
{0x3812, 0x00},
@@ -674,7 +634,6 @@ static struct ov2680_reg const ov2680_1456x1096_30fps[] = {
// {0x5090, 0x0c},
{}
};
-#endif
/*
*1616x916 30fps VBlanking 1lane 10bit
@@ -682,8 +641,6 @@ static struct ov2680_reg const ov2680_1456x1096_30fps[] = {
static struct ov2680_reg const ov2680_1616x916_30fps[] = {
{0x3086, 0x00},
- {0x3501, 0x48},
- {0x3502, 0xe0},
{0x370a, 0x21},
{0x3801, 0x00},
{0x3802, 0x00},
@@ -698,8 +655,6 @@ static struct ov2680_reg const ov2680_1616x916_30fps[] = {
{0x380b, 0x94},
{0x380c, 0x06},
{0x380d, 0xa8},
- {0x380e, 0x05},
- {0x380f, 0x0e},
{0x3810, 0x00},
{0x3811, 0x00},
{0x3812, 0x00},
@@ -721,13 +676,10 @@ static struct ov2680_reg const ov2680_1616x916_30fps[] = {
};
/*
- * 1612x1212 30fps VBlanking 1lane 10Bit
+ * 1616x1082 30fps VBlanking 1lane 10Bit
*/
-#if 0
static struct ov2680_reg const ov2680_1616x1082_30fps[] = {
{0x3086, 0x00},
- {0x3501, 0x48},
- {0x3502, 0xe0},
{0x370a, 0x21},
{0x3801, 0x00},
{0x3802, 0x00},
@@ -742,8 +694,6 @@ static struct ov2680_reg const ov2680_1616x1082_30fps[] = {
{0x380b, 0x3a},
{0x380c, 0x06},
{0x380d, 0xa8},
- {0x380e, 0x05},
- {0x380f, 0x0e},
{0x3810, 0x00},
{0x3811, 0x00},
{0x3812, 0x00},
@@ -763,14 +713,12 @@ static struct ov2680_reg const ov2680_1616x1082_30fps[] = {
{0x5081, 0x41},
{}
};
-#endif
+
/*
* 1616x1216 30fps VBlanking 1lane 10Bit
*/
static struct ov2680_reg const ov2680_1616x1216_30fps[] = {
{0x3086, 0x00},
- {0x3501, 0x48},
- {0x3502, 0xe0},
{0x370a, 0x21},
{0x3801, 0x00},
{0x3802, 0x00},
@@ -785,8 +733,6 @@ static struct ov2680_reg const ov2680_1616x1216_30fps[] = {
{0x380b, 0xc0},//c0},
{0x380c, 0x06},
{0x380d, 0xa8},
- {0x380e, 0x05},
- {0x380f, 0x0e},
{0x3810, 0x00},
{0x3811, 0x00},
{0x3812, 0x00},
@@ -809,12 +755,10 @@ static struct ov2680_reg const ov2680_1616x1216_30fps[] = {
static struct ov2680_resolution ov2680_res_preview[] = {
{
- .desc = "ov2680_1616x1216_30fps",
.width = 1616,
.height = 1216,
.pix_clk_freq = 66,
.fps = 30,
- .used = 0,
.pixels_per_line = 1698,//1704,
.lines_per_frame = 1294,
.bin_factor_x = 0,
@@ -824,12 +768,23 @@ static struct ov2680_resolution ov2680_res_preview[] = {
.regs = ov2680_1616x1216_30fps,
},
{
- .desc = "ov2680_1616x916_30fps",
+ .width = 1616,
+ .height = 1082,
+ .pix_clk_freq = 66,
+ .fps = 30,
+ .pixels_per_line = 1698,//1704,
+ .lines_per_frame = 1294,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2680_1616x1082_30fps,
+ },
+ {
.width = 1616,
.height = 916,
.fps = 30,
.pix_clk_freq = 66,
- .used = 0,
.pixels_per_line = 1698,//1704,
.lines_per_frame = 1294,
.bin_factor_x = 0,
@@ -838,11 +793,125 @@ static struct ov2680_resolution ov2680_res_preview[] = {
.skip_frames = 3,
.regs = ov2680_1616x916_30fps,
},
+ {
+ .width = 1456,
+ .height = 1096,
+ .fps = 30,
+ .pix_clk_freq = 66,
+ .pixels_per_line = 1698,//1704,
+ .lines_per_frame = 1294,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2680_1456x1096_30fps,
+ },
+ {
+ .width = 1296,
+ .height = 976,
+ .fps = 30,
+ .pix_clk_freq = 66,
+ .pixels_per_line = 1698,//1704,
+ .lines_per_frame = 1294,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2680_1296x976_30fps,
+ },
+ {
+ .width = 1280,
+ .height = 720,
+ .fps = 60,
+ .pix_clk_freq = 66,
+ .pixels_per_line = 1698,//1704,
+ .lines_per_frame = 1294,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2680_720p_30fps,
+ },
+ {
+ .width = 800,
+ .height = 600,
+ .fps = 60,
+ .pix_clk_freq = 66,
+ .pixels_per_line = 1698,//1704,
+ .lines_per_frame = 1294,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2680_800x600_30fps,
+ },
+ {
+ .width = 720,
+ .height = 592,
+ .fps = 60,
+ .pix_clk_freq = 66,
+ .pixels_per_line = 1698,//1704,
+ .lines_per_frame = 1294,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2680_720x592_30fps,
+ },
+ {
+ .width = 656,
+ .height = 496,
+ .fps = 60,
+ .pix_clk_freq = 66,
+ .pixels_per_line = 1698,//1704,
+ .lines_per_frame = 1294,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2680_656x496_30fps,
+ },
+ {
+ .width = 336,
+ .height = 256,
+ .fps = 60,
+ .pix_clk_freq = 66,
+ .pixels_per_line = 1698,//1704,
+ .lines_per_frame = 1294,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2680_QVGA_30fps,
+ },
+ {
+ .width = 352,
+ .height = 288,
+ .fps = 60,
+ .pix_clk_freq = 66,
+ .pixels_per_line = 1698,//1704,
+ .lines_per_frame = 1294,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2680_CIF_30fps,
+ },
+ {
+ .width = 176,
+ .height = 144,
+ .fps = 60,
+ .pix_clk_freq = 66,
+ .pixels_per_line = 1698,//1704,
+ .lines_per_frame = 1294,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2680_QCIF_30fps,
+ },
};
#define N_RES_PREVIEW (ARRAY_SIZE(ov2680_res_preview))
-static struct ov2680_resolution *ov2680_res = ov2680_res_preview;
-static unsigned long N_RES = N_RES_PREVIEW;
-
#endif
diff --git a/drivers/staging/media/atomisp/i2c/ov2722.h b/drivers/staging/media/atomisp/i2c/ov2722.h
index 7b0debb6c53d..d6e2510bc01c 100644
--- a/drivers/staging/media/atomisp/i2c/ov2722.h
+++ b/drivers/staging/media/atomisp/i2c/ov2722.h
@@ -201,14 +201,13 @@ struct ov2722_device {
struct media_pad pad;
struct v4l2_mbus_framefmt format;
struct mutex input_lock;
+ struct ov2722_resolution *res;
struct camera_sensor_platform_data *platform_data;
int vt_pix_clk_freq_mhz;
- int fmt_idx;
int run_mode;
u16 pixels_per_line;
u16 lines_per_frame;
- u8 res;
u8 type;
struct v4l2_ctrl_handler ctrl_handler;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
index 366161cff560..97d5a528969b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
@@ -857,7 +857,8 @@ static struct atomisp_video_pipe *__atomisp_get_pipe(
} else if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
/* For online video or SDV video pipe. */
if (css_pipe_id == IA_CSS_PIPE_ID_VIDEO ||
- css_pipe_id == IA_CSS_PIPE_ID_COPY) {
+ css_pipe_id == IA_CSS_PIPE_ID_COPY ||
+ css_pipe_id == IA_CSS_PIPE_ID_YUVPP) {
if (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)
return &asd->video_out_video_capture;
return &asd->video_out_preview;
@@ -865,7 +866,8 @@ static struct atomisp_video_pipe *__atomisp_get_pipe(
} else if (asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW) {
/* For online preview or ZSL preview pipe. */
if (css_pipe_id == IA_CSS_PIPE_ID_PREVIEW ||
- css_pipe_id == IA_CSS_PIPE_ID_COPY)
+ css_pipe_id == IA_CSS_PIPE_ID_COPY ||
+ css_pipe_id == IA_CSS_PIPE_ID_YUVPP)
return &asd->video_out_preview;
}
/* For capture pipe. */
@@ -1046,13 +1048,8 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error,
asd->pending_capture_request--;
- if (IS_ISP2401)
- asd->re_trigger_capture = false;
-
dev_dbg(isp->dev, "Trigger capture again for new buffer. err=%d\n",
err);
- } else if (IS_ISP2401) {
- asd->re_trigger_capture = true;
}
break;
case IA_CSS_BUFFER_TYPE_OUTPUT_FRAME:
@@ -1474,7 +1471,7 @@ void atomisp_wdt_work(struct work_struct *work)
wdt_work);
int i;
unsigned int pipe_wdt_cnt[MAX_STREAM_NUM][4] = { {0} };
- bool css_recover = false;
+ bool css_recover = true;
rt_mutex_lock(&isp->mutex);
if (!atomisp_streaming_count(isp)) {
@@ -1487,12 +1484,7 @@ void atomisp_wdt_work(struct work_struct *work)
dev_err(isp->dev, "timeout %d of %d\n",
atomic_read(&isp->wdt_count) + 1,
ATOMISP_ISP_MAX_TIMEOUT_COUNT);
-
- if (atomic_inc_return(&isp->wdt_count) < ATOMISP_ISP_MAX_TIMEOUT_COUNT)
- css_recover = true;
} else {
- css_recover = true;
-
for (i = 0; i < isp->num_of_streams; i++) {
struct atomisp_sub_device *asd = &isp->asd[i];
@@ -1715,6 +1707,12 @@ void atomisp_wdt_refresh_pipe(struct atomisp_video_pipe *pipe,
{
unsigned long next;
+ if (!pipe->asd) {
+ dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, pipe->vdev.name);
+ return;
+ }
+
if (delay != ATOMISP_WDT_KEEP_CURRENT_DELAY)
pipe->wdt_duration = delay;
@@ -1777,6 +1775,12 @@ void atomisp_wdt_refresh(struct atomisp_sub_device *asd, unsigned int delay)
/* ISP2401 */
void atomisp_wdt_stop_pipe(struct atomisp_video_pipe *pipe, bool sync)
{
+ if (!pipe->asd) {
+ dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, pipe->vdev.name);
+ return;
+ }
+
if (!atomisp_is_wdt_running(pipe))
return;
@@ -1869,7 +1873,7 @@ irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr)
/*
* The standard CSS2.0 API tells the following calling sequence of
* dequeue ready buffers:
- * while (ia_css_dequeue_event(...)) {
+ * while (ia_css_dequeue_psys_event(...)) {
* switch (event.type) {
* ...
* ia_css_pipe_dequeue_buffer()
@@ -1920,22 +1924,6 @@ out:
}
/*
- * utils for buffer allocation/free
- */
-
-int atomisp_get_frame_pgnr(struct atomisp_device *isp,
- const struct ia_css_frame *frame, u32 *p_pgnr)
-{
- if (!frame) {
- dev_err(isp->dev, "%s: NULL frame pointer ERROR.\n", __func__);
- return -EINVAL;
- }
-
- *p_pgnr = DIV_ROUND_UP(frame->data_bytes, PAGE_SIZE);
- return 0;
-}
-
-/*
* Get internal fmt according to V4L2 fmt
*/
static enum ia_css_frame_format
@@ -1968,9 +1956,11 @@ v4l2_fmt_to_sh_fmt(u32 fmt)
return IA_CSS_FRAME_FORMAT_RGBA888;
case V4L2_PIX_FMT_RGB565:
return IA_CSS_FRAME_FORMAT_RGB565;
+#if 0
case V4L2_PIX_FMT_JPEG:
case V4L2_PIX_FMT_CUSTOM_M10MO_RAW:
return IA_CSS_FRAME_FORMAT_BINARY_8;
+#endif
case V4L2_PIX_FMT_SBGGR16:
case V4L2_PIX_FMT_SBGGR10:
case V4L2_PIX_FMT_SGBRG10:
@@ -2022,7 +2012,7 @@ static int raw_output_format_match_input(u32 input, u32 output)
return -EINVAL;
}
-static u32 get_pixel_depth(u32 pixelformat)
+u32 atomisp_get_pixel_depth(u32 pixelformat)
{
switch (pixelformat) {
case V4L2_PIX_FMT_YUV420:
@@ -3619,18 +3609,10 @@ int atomisp_cp_lsc_table(struct atomisp_sub_device *asd,
}
/* Shading table size per color */
- if (!IS_ISP2401) {
- if (st->width > ISP2400_SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR ||
- st->height > ISP2400_SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR) {
- dev_err(asd->isp->dev, "shading table w/h validate failed!");
- return -EINVAL;
- }
- } else {
- if (st->width > ISP2401_SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR ||
- st->height > ISP2401_SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR) {
- dev_err(asd->isp->dev, "shading table w/h validate failed!");
- return -EINVAL;
- }
+ if (st->width > SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR ||
+ st->height > SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR) {
+ dev_err(asd->isp->dev, "shading table w/h validate failed!");
+ return -EINVAL;
}
shading_table = atomisp_css_shading_table_alloc(st->width, st->height);
@@ -4109,6 +4091,12 @@ void atomisp_handle_parameter_and_buffer(struct atomisp_video_pipe *pipe)
unsigned long irqflags;
bool need_to_enqueue_buffer = false;
+ if (!asd) {
+ dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, pipe->vdev.name);
+ return;
+ }
+
if (atomisp_is_vf_pipe(pipe))
return;
@@ -4196,6 +4184,12 @@ int atomisp_set_parameters(struct video_device *vdev,
struct atomisp_css_params *css_param = &asd->params.css_param;
int ret;
+ if (!asd) {
+ dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+
if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
dev_err(asd->isp->dev, "%s: internal error!\n", __func__);
return -EINVAL;
@@ -4792,15 +4786,6 @@ int atomisp_get_sensor_mode_data(struct atomisp_sub_device *asd,
return 0;
}
-int atomisp_get_fmt(struct video_device *vdev, struct v4l2_format *f)
-{
- struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
-
- f->fmt.pix = pipe->pix;
-
- return 0;
-}
-
static void __atomisp_update_stream_env(struct atomisp_sub_device *asd,
u16 stream_index, struct atomisp_input_stream_info *stream_info)
{
@@ -4857,6 +4842,12 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f,
int source_pad = atomisp_subdev_source_pad(vdev);
int ret;
+ if (!asd) {
+ dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+
if (!isp->inputs[asd->input_curr].camera)
return -EINVAL;
@@ -4943,7 +4934,7 @@ atomisp_try_fmt_file(struct atomisp_device *isp, struct v4l2_format *f)
return -EINVAL;
}
- depth = get_pixel_depth(pixelformat);
+ depth = atomisp_get_pixel_depth(pixelformat);
if (field == V4L2_FIELD_ANY) {
field = V4L2_FIELD_NONE;
@@ -5133,11 +5124,11 @@ static int css_input_resolution_changed(struct atomisp_sub_device *asd,
dev_dbg(asd->isp->dev, "css_input_resolution_changed to %ux%u\n",
ffmt->width, ffmt->height);
-#if defined(ISP2401_NEW_INPUT_SYSTEM)
- atomisp_css_input_set_two_pixels_per_clock(asd, false);
-#else
- atomisp_css_input_set_two_pixels_per_clock(asd, true);
-#endif
+ if (IS_ISP2401)
+ atomisp_css_input_set_two_pixels_per_clock(asd, false);
+ else
+ atomisp_css_input_set_two_pixels_per_clock(asd, true);
+
if (asd->continuous_mode->val) {
/* Note for all checks: ffmt includes pad_w+pad_h */
if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO ||
@@ -5194,10 +5185,17 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev,
int (*configure_pp_input)(struct atomisp_sub_device *asd,
unsigned int width, unsigned int height) =
configure_pp_input_nop;
- u16 stream_index = atomisp_source_pad_to_stream_id(asd, source_pad);
+ u16 stream_index;
const struct atomisp_in_fmt_conv *fc;
int ret, i;
+ if (!asd) {
+ dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+ stream_index = atomisp_source_pad_to_stream_id(asd, source_pad);
+
v4l2_fh_init(&fh.vfh, vdev);
isp_sink_crop = atomisp_subdev_get_rect(
@@ -5417,9 +5415,9 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev,
else
ret = get_frame_info(asd, output_info);
if (ret) {
- dev_err(isp->dev, "get_frame_info %ux%u (padded to %u)\n",
- pix->width, pix->height, pix->bytesperline);
- return -EINVAL;
+ dev_err(isp->dev, "__get_frame_info %ux%u (padded to %u) returned %d\n",
+ pix->width, pix->height, pix->bytesperline, ret);
+ return ret;
}
atomisp_update_grid_info(asd, pipe_id, source_pad);
@@ -5465,9 +5463,14 @@ static void atomisp_get_dis_envelop(struct atomisp_sub_device *asd,
static void atomisp_check_copy_mode(struct atomisp_sub_device *asd,
int source_pad, struct v4l2_pix_format *f)
{
-#if defined(ISP2401_NEW_INPUT_SYSTEM)
struct v4l2_mbus_framefmt *sink, *src;
+ if (!IS_ISP2401) {
+ /* Only used for the new input system */
+ asd->copy_mode = false;
+ return;
+ }
+
sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
V4L2_SUBDEV_FORMAT_ACTIVE, ATOMISP_SUBDEV_PAD_SINK);
src = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
@@ -5481,8 +5484,6 @@ static void atomisp_check_copy_mode(struct atomisp_sub_device *asd,
sensor[asd->sensor_curr].stream_num > 1)))
asd->copy_mode = true;
else
-#endif
- /* Only used for the new input system */
asd->copy_mode = false;
dev_dbg(asd->isp->dev, "copy_mode: %d\n", asd->copy_mode);
@@ -5493,7 +5494,8 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev,
unsigned int padding_w, unsigned int padding_h,
unsigned int dvs_env_w, unsigned int dvs_env_h)
{
- struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
const struct atomisp_format_bridge *format;
struct v4l2_subdev_pad_config pad_cfg;
struct v4l2_subdev_state pad_state = {
@@ -5504,7 +5506,7 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev,
};
struct v4l2_mbus_framefmt *ffmt = &vformat.format;
struct v4l2_mbus_framefmt *req_ffmt;
- struct atomisp_device *isp = asd->isp;
+ struct atomisp_device *isp;
struct atomisp_input_stream_info *stream_info =
(struct atomisp_input_stream_info *)ffmt->reserved;
u16 stream_index = ATOMISP_INPUT_STREAM_GENERAL;
@@ -5512,6 +5514,14 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev,
struct v4l2_subdev_fh fh;
int ret;
+ if (!asd) {
+ dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+
+ isp = asd->isp;
+
v4l2_fh_init(&fh.vfh, vdev);
stream_index = atomisp_source_pad_to_stream_id(asd, source_pad);
@@ -5540,6 +5550,10 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev,
pad, set_fmt, &pad_state, &vformat);
if (ret)
return ret;
+
+ dev_dbg(isp->dev, "video dis: sensor width: %d, height: %d\n",
+ ffmt->width, ffmt->height);
+
if (ffmt->width < req_ffmt->width ||
ffmt->height < req_ffmt->height) {
req_ffmt->height -= dvs_env_h;
@@ -5550,8 +5564,6 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev,
asd->params.video_dis_en = false;
}
}
- dev_dbg(isp->dev, "sensor width: %d, height: %d\n",
- ffmt->width, ffmt->height);
vformat.which = V4L2_SUBDEV_FORMAT_ACTIVE;
ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, pad,
set_fmt, NULL, &vformat);
@@ -5590,18 +5602,28 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
const struct atomisp_format_bridge *format_bridge;
const struct atomisp_format_bridge *snr_format_bridge;
struct ia_css_frame_info output_info, raw_output_info;
- struct v4l2_pix_format snr_fmt = f->fmt.pix;
- struct v4l2_pix_format backup_fmt = snr_fmt, s_fmt;
+ struct v4l2_pix_format snr_fmt;
+ struct v4l2_pix_format backup_fmt, s_fmt;
unsigned int dvs_env_w = 0, dvs_env_h = 0;
unsigned int padding_w = pad_w, padding_h = pad_h;
bool res_overflow = false, crop_needs_override = false;
struct v4l2_mbus_framefmt *isp_sink_fmt;
struct v4l2_mbus_framefmt isp_source_fmt = {0};
+ struct v4l2_subdev_format vformat = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct v4l2_mbus_framefmt *ffmt = &vformat.format;
struct v4l2_rect isp_sink_crop;
u16 source_pad = atomisp_subdev_source_pad(vdev);
struct v4l2_subdev_fh fh;
int ret;
+ if (!asd) {
+ dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+
if (source_pad >= ATOMISP_SUBDEV_PADS_NUM)
return -EINVAL;
@@ -5621,9 +5643,38 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
if (!format_bridge)
return -EINVAL;
+ /* Currently, raw formats are broken!!! */
+
+ if (format_bridge->sh_fmt == IA_CSS_FRAME_FORMAT_RAW) {
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
+
+ format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat);
+ if (!format_bridge)
+ return -EINVAL;
+ }
pipe->sh_fmt = format_bridge->sh_fmt;
pipe->pix.pixelformat = f->fmt.pix.pixelformat;
+ /* Ensure that the resolution is equal or below the maximum supported */
+
+ vformat.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ v4l2_fill_mbus_format(ffmt, &f->fmt.pix, format_bridge->mbus_code);
+ ffmt->height += padding_h;
+ ffmt->width += padding_w;
+
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, pad,
+ set_fmt, NULL, &vformat);
+ if (ret)
+ return ret;
+
+ f->fmt.pix.width = ffmt->width - padding_w;
+ f->fmt.pix.height = ffmt->height - padding_h;
+
+ snr_fmt = f->fmt.pix;
+ backup_fmt = snr_fmt;
+
+ /**********************************************************************/
+
if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VF ||
(source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW
&& asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO)) {
@@ -5945,7 +5996,7 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
* which appears to be related by a hardware
* performance limitation. It's unclear why this
* particular code triggers the issue. */
- if (!IS_ISP2401 || crop_needs_override) {
+ if (crop_needs_override) {
if (isp_sink_crop.width * main_compose.height >
isp_sink_crop.height * main_compose.width) {
sink_crop.height = isp_sink_crop.height;
@@ -5987,6 +6038,14 @@ done:
pipe->pix.width = f->fmt.pix.width;
pipe->pix.height = f->fmt.pix.height;
pipe->pix.pixelformat = f->fmt.pix.pixelformat;
+ /*
+ * FIXME: do we need to setup this differently, depending on the
+ * sensor or the pipeline?
+ */
+ pipe->pix.colorspace = V4L2_COLORSPACE_REC709;
+ pipe->pix.ycbcr_enc = V4L2_YCBCR_ENC_709;
+ pipe->pix.xfer_func = V4L2_XFER_FUNC_709;
+
if (format_bridge->planar) {
pipe->pix.bytesperline = output_info.padded_width;
pipe->pix.sizeimage = PAGE_ALIGN(f->fmt.pix.height *
@@ -6021,6 +6080,15 @@ done:
else
isp->need_gfx_throttle = true;
+ /* Report the needed sizes */
+ f->fmt.pix.sizeimage = pipe->pix.sizeimage;
+ f->fmt.pix.bytesperline = pipe->pix.bytesperline;
+
+ dev_dbg(isp->dev, "%s: %dx%d, image size: %d, %d bytes per line\n",
+ __func__,
+ f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.sizeimage, f->fmt.pix.bytesperline);
+
return 0;
}
@@ -6034,6 +6102,12 @@ int atomisp_set_fmt_file(struct video_device *vdev, struct v4l2_format *f)
struct v4l2_subdev_fh fh;
int ret;
+ if (!asd) {
+ dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+
v4l2_fh_init(&fh.vfh, vdev);
dev_dbg(isp->dev, "setting fmt %ux%u 0x%x for file inject\n",
@@ -6092,15 +6166,9 @@ int atomisp_set_shading_table(struct atomisp_sub_device *asd,
}
/* Shading table size per color */
- if (!IS_ISP2401) {
- if (user_shading_table->width > ISP2400_SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR ||
- user_shading_table->height > ISP2400_SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR)
- return -EINVAL;
- } else {
- if (user_shading_table->width > ISP2401_SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR ||
- user_shading_table->height > ISP2401_SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR)
- return -EINVAL;
- }
+ if (user_shading_table->width > SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR ||
+ user_shading_table->height > SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR)
+ return -EINVAL;
shading_table = atomisp_css_shading_table_alloc(
user_shading_table->width, user_shading_table->height);
@@ -6359,6 +6427,12 @@ bool atomisp_is_vf_pipe(struct atomisp_video_pipe *pipe)
{
struct atomisp_sub_device *asd = pipe->asd;
+ if (!asd) {
+ dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, pipe->vdev.name);
+ return false;
+ }
+
if (pipe == &asd->video_out_vf)
return true;
@@ -6572,6 +6646,12 @@ static int atomisp_get_pipe_id(struct atomisp_video_pipe *pipe)
{
struct atomisp_sub_device *asd = pipe->asd;
+ if (!asd) {
+ dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, pipe->vdev.name);
+ return -EINVAL;
+ }
+
if (ATOMISP_USE_YUVPP(asd)) {
return IA_CSS_PIPE_ID_YUVPP;
} else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
@@ -6609,6 +6689,12 @@ int atomisp_get_invalid_frame_num(struct video_device *vdev,
struct ia_css_pipe_info p_info;
int ret;
+ if (!asd) {
+ dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+
if (asd->isp->inputs[asd->input_curr].camera_caps->
sensor[asd->sensor_curr].stream_num > 1) {
/* External ISP */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.h b/drivers/staging/media/atomisp/pci/atomisp_cmd.h
index e8bdd264d31b..ebc729468f87 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_cmd.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.h
@@ -74,8 +74,6 @@ irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr);
const struct atomisp_format_bridge *get_atomisp_format_bridge_from_mbus(
u32 mbus_code);
bool atomisp_is_mbuscode_raw(uint32_t code);
-int atomisp_get_frame_pgnr(struct atomisp_device *isp,
- const struct ia_css_frame *frame, u32 *p_pgnr);
void atomisp_delayed_init_work(struct work_struct *work);
/* Get internal fmt according to V4L2 fmt */
@@ -266,8 +264,6 @@ int atomisp_compare_grid(struct atomisp_sub_device *asd,
int atomisp_get_sensor_mode_data(struct atomisp_sub_device *asd,
struct atomisp_sensor_mode_data *config);
-int atomisp_get_fmt(struct video_device *vdev, struct v4l2_format *f);
-
/* This function looks up the closest available resolution. */
int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f,
bool *res_overflow);
@@ -341,6 +337,8 @@ enum atomisp_metadata_type
atomisp_get_metadata_type(struct atomisp_sub_device *asd,
enum ia_css_pipe_id pipe_id);
+u32 atomisp_get_pixel_depth(u32 pixelformat);
+
/* Function for HAL to inject a fake event to wake up poll thread */
int atomisp_inject_a_fake_event(struct atomisp_sub_device *asd, int *event);
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat.h b/drivers/staging/media/atomisp/pci/atomisp_compat.h
index c16eaf3d126f..64c1bf0943e6 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_compat.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat.h
@@ -133,8 +133,6 @@ void atomisp_css_get_dis_statistics(struct atomisp_sub_device *asd,
struct atomisp_css_buffer *isp_css_buffer,
struct ia_css_isp_dvs_statistics_map *dvs_map);
-int atomisp_css_dequeue_event(struct atomisp_css_event *current_event);
-
void atomisp_css_temp_pipe_to_pipe_id(struct atomisp_sub_device *asd,
struct atomisp_css_event *current_event);
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
index 99a632f33d2d..1173be0e72b0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
@@ -611,15 +611,6 @@ static void __apply_additional_pipe_config(
if (stream_env->pipe_configs[pipe_id].
default_capture_config.mode == IA_CSS_CAPTURE_MODE_RAW)
stream_env->pipe_configs[pipe_id].enable_dz = false;
-
- if (IS_ISP2401) {
- /* the isp default to use ISP2.2 and the camera hal will
- * control whether use isp2.7 */
- if (asd->select_isp_version->val == ATOMISP_CSS_ISP_PIPE_VERSION_2_7)
- stream_env->pipe_configs[pipe_id].isp_pipe_version = SH_CSS_ISP_PIPE_VERSION_2_7;
- else
- stream_env->pipe_configs[pipe_id].isp_pipe_version = SH_CSS_ISP_PIPE_VERSION_2_2;
- }
break;
case IA_CSS_PIPE_ID_VIDEO:
/* enable reduced pipe to have binary
@@ -972,7 +963,7 @@ int atomisp_css_irq_translate(struct atomisp_device *isp,
void atomisp_css_rx_get_irq_info(enum mipi_port_id port,
unsigned int *infos)
{
-#ifndef ISP2401_NEW_INPUT_SYSTEM
+#ifndef IS_ISP2401
ia_css_isys_rx_get_irq_info(port, infos);
#else
*infos = 0;
@@ -982,7 +973,7 @@ void atomisp_css_rx_get_irq_info(enum mipi_port_id port,
void atomisp_css_rx_clear_irq_info(enum mipi_port_id port,
unsigned int infos)
{
-#ifndef ISP2401_NEW_INPUT_SYSTEM
+#ifndef IS_ISP2401
ia_css_isys_rx_clear_irq_info(port, infos);
#endif
}
@@ -1633,14 +1624,6 @@ void atomisp_css_get_dis_statistics(struct atomisp_sub_device *asd,
}
}
-int atomisp_css_dequeue_event(struct atomisp_css_event *current_event)
-{
- if (ia_css_dequeue_event(&current_event->event))
- return -EINVAL;
-
- return 0;
-}
-
void atomisp_css_temp_pipe_to_pipe_id(struct atomisp_sub_device *asd,
struct atomisp_css_event *current_event)
{
@@ -2657,41 +2640,50 @@ static int __get_frame_info(struct atomisp_sub_device *asd,
if (__destroy_pipes(asd, true))
dev_warn(isp->dev, "destroy pipe failed.\n");
- if (__create_pipes(asd))
+ if (__create_pipes(asd)) {
+ dev_err(isp->dev, "can't create pipes\n");
return -EINVAL;
+ }
- if (__create_streams(asd))
+ if (__create_streams(asd)) {
+ dev_err(isp->dev, "can't create streams\n");
goto stream_err;
+ }
- ret = ia_css_pipe_get_info(
- asd->stream_env[stream_index]
- .pipes[pipe_id], &p_info);
- if (!ret) {
- switch (type) {
- case ATOMISP_CSS_VF_FRAME:
- *info = p_info.vf_output_info[0];
- dev_dbg(isp->dev, "getting vf frame info.\n");
- break;
- case ATOMISP_CSS_SECOND_VF_FRAME:
- *info = p_info.vf_output_info[1];
- dev_dbg(isp->dev, "getting second vf frame info.\n");
- break;
- case ATOMISP_CSS_OUTPUT_FRAME:
- *info = p_info.output_info[0];
- dev_dbg(isp->dev, "getting main frame info.\n");
- break;
- case ATOMISP_CSS_SECOND_OUTPUT_FRAME:
- *info = p_info.output_info[1];
- dev_dbg(isp->dev, "getting second main frame info.\n");
- break;
- case ATOMISP_CSS_RAW_FRAME:
- *info = p_info.raw_output_info;
- dev_dbg(isp->dev, "getting raw frame info.\n");
- }
- dev_dbg(isp->dev, "get frame info: w=%d, h=%d, num_invalid_frames %d.\n",
- info->res.width, info->res.height, p_info.num_invalid_frames);
- return 0;
+ ret = ia_css_pipe_get_info(asd->stream_env[stream_index].pipes[pipe_id],
+ &p_info);
+ if (ret) {
+ dev_err(isp->dev, "can't get info from pipe\n");
+ goto stream_err;
+ }
+
+ switch (type) {
+ case ATOMISP_CSS_VF_FRAME:
+ *info = p_info.vf_output_info[0];
+ dev_dbg(isp->dev, "getting vf frame info.\n");
+ break;
+ case ATOMISP_CSS_SECOND_VF_FRAME:
+ *info = p_info.vf_output_info[1];
+ dev_dbg(isp->dev, "getting second vf frame info.\n");
+ break;
+ case ATOMISP_CSS_OUTPUT_FRAME:
+ *info = p_info.output_info[0];
+ dev_dbg(isp->dev, "getting main frame info.\n");
+ break;
+ case ATOMISP_CSS_SECOND_OUTPUT_FRAME:
+ *info = p_info.output_info[1];
+ dev_dbg(isp->dev, "getting second main frame info.\n");
+ break;
+ default:
+ case ATOMISP_CSS_RAW_FRAME:
+ *info = p_info.raw_output_info;
+ dev_dbg(isp->dev, "getting raw frame info.\n");
+ break;
}
+ dev_dbg(isp->dev, "get frame info: w=%d, h=%d, num_invalid_frames %d.\n",
+ info->res.width, info->res.height, p_info.num_invalid_frames);
+
+ return 0;
stream_err:
__destroy_pipes(asd, true);
@@ -4111,7 +4103,7 @@ int atomisp_css_isr_thread(struct atomisp_device *isp,
bool reset_wdt_timer[MAX_STREAM_NUM] = {false};
int i;
- while (!atomisp_css_dequeue_event(&current_event)) {
+ while (!ia_css_dequeue_psys_event(&current_event.event)) {
if (current_event.event.type ==
IA_CSS_EVENT_TYPE_FW_ASSERT) {
/*
diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c
index f82bf082aa79..be6a74d5ac19 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_fops.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c
@@ -464,13 +464,11 @@ int atomisp_qbuffers_to_css(struct atomisp_sub_device *asd)
css_capture_pipe_id = IA_CSS_PIPE_ID_CAPTURE;
}
-#ifdef ISP2401_NEW_INPUT_SYSTEM
- if (asd->copy_mode) {
+ if (IS_ISP2401 && asd->copy_mode) {
css_capture_pipe_id = IA_CSS_PIPE_ID_COPY;
css_preview_pipe_id = IA_CSS_PIPE_ID_COPY;
css_video_pipe_id = IA_CSS_PIPE_ID_COPY;
}
-#endif
if (asd->yuvpp_mode) {
capture_pipe = &asd->video_out_capture;
@@ -772,6 +770,30 @@ static int atomisp_open(struct file *file)
dev_dbg(isp->dev, "open device %s\n", vdev->name);
+ /*
+ * Ensure that if we are still loading we block. Once the loading
+ * is over we can proceed. We can't blindly hold the lock until
+ * that occurs as if the load fails we'll deadlock the unload
+ */
+ rt_mutex_lock(&isp->loading);
+ /*
+ * FIXME: revisit this with a better check once the code structure
+ * is cleaned up a bit more
+ */
+ ret = v4l2_fh_open(file);
+ if (ret) {
+ dev_err(isp->dev,
+ "%s: v4l2_fh_open() returned error %d\n",
+ __func__, ret);
+ rt_mutex_unlock(&isp->loading);
+ return ret;
+ }
+ if (!isp->ready) {
+ rt_mutex_unlock(&isp->loading);
+ return -ENXIO;
+ }
+ rt_mutex_unlock(&isp->loading);
+
rt_mutex_lock(&isp->mutex);
acc_node = !strcmp(vdev->name, "ATOMISP ISP ACC");
@@ -877,6 +899,11 @@ done:
else
pipe->users++;
rt_mutex_unlock(&isp->mutex);
+
+ /* Ensure that a mode is set */
+ if (!acc_node)
+ v4l2_ctrl_s_ctrl(asd->run_mode, pipe->default_run_mode);
+
return 0;
css_error:
@@ -1025,7 +1052,7 @@ done:
rt_mutex_unlock(&isp->mutex);
mutex_unlock(&isp->streamoff_mutex);
- return 0;
+ return v4l2_fh_release(file);
}
/*
@@ -1067,7 +1094,7 @@ static int frame_mmap(struct atomisp_device *isp,
host_virt = vma->vm_start;
isp_virt = frame->data;
- atomisp_get_frame_pgnr(isp, frame, &pgnr);
+ pgnr = DIV_ROUND_UP(frame->data_bytes, PAGE_SIZE);
if (do_isp_mm_remap(isp, vma, isp_virt, host_virt, pgnr))
return -EAGAIN;
@@ -1171,6 +1198,12 @@ static int atomisp_mmap(struct file *file, struct vm_area_struct *vma)
u32 origin_size, new_size;
int ret;
+ if (!asd) {
+ dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+
if (!(vma->vm_flags & (VM_WRITE | VM_READ)))
return -EACCES;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
index d8c9e31314b2..1cc581074ba7 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
@@ -321,6 +321,18 @@ static struct gmin_cfg_var i8880_vars[] = {
{},
};
+/*
+ * Surface 3 does not describe CsiPort/CsiLanes in both DSDT and EFI.
+ */
+static struct gmin_cfg_var surface3_vars[] = {
+ {"APTA0330:00_CsiPort", "0"},
+ {"APTA0330:00_CsiLanes", "2"},
+
+ {"OVTI8835:00_CsiPort", "1"},
+ {"OVTI8835:00_CsiLanes", "4"},
+ {},
+};
+
static const struct dmi_system_id gmin_vars[] = {
{
.ident = "BYT-T FFD8",
@@ -358,6 +370,13 @@ static const struct dmi_system_id gmin_vars[] = {
},
.driver_data = i8880_vars,
},
+ {
+ .ident = "Surface 3",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "Surface 3"),
+ },
+ .driver_data = surface3_vars,
+ },
{}
};
@@ -481,7 +500,7 @@ fail:
static u8 gmin_get_pmic_id_and_addr(struct device *dev)
{
- struct i2c_client *power;
+ struct i2c_client *power = NULL;
static u8 pmic_i2c_addr;
if (pmic_id)
diff --git a/drivers/staging/media/atomisp/pci/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp_internal.h
index c01db10bb735..f71ab1ee6e19 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_internal.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_internal.h
@@ -247,6 +247,13 @@ struct atomisp_device {
* structures and css API calls. */
struct rt_mutex mutex;
/*
+ * This mutex ensures that we don't allow an open to succeed while
+ * the initialization process is incomplete
+ */
+ struct rt_mutex loading;
+ /* Set once the ISP is ready to allow opens */
+ bool ready;
+ /*
* Serialise streamoff: mutex is dropped during streamoff to
* cancel the watchdog queue. MUST be acquired BEFORE
* "mutex".
diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
index c8a625667e81..8fd470efd658 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
@@ -492,23 +492,22 @@ const struct atomisp_format_bridge atomisp_output_fmts[] = {
.mbus_code = MEDIA_BUS_FMT_BGR565_2X8_LE,
.sh_fmt = IA_CSS_FRAME_FORMAT_RGB565,
.description = "16 RGB 5-6-5"
+#if 0
}, {
.pixelformat = V4L2_PIX_FMT_JPEG,
.depth = 8,
.mbus_code = MEDIA_BUS_FMT_JPEG_1X8,
.sh_fmt = IA_CSS_FRAME_FORMAT_BINARY_8,
.description = "JPEG"
- },
-#if 0
- {
+ }, {
/* This is a custom format being used by M10MO to send the RAW data */
.pixelformat = V4L2_PIX_FMT_CUSTOM_M10MO_RAW,
.depth = 8,
.mbus_code = V4L2_MBUS_FMT_CUSTOM_M10MO_RAW,
.sh_fmt = IA_CSS_FRAME_FORMAT_BINARY_8,
.description = "Custom RAW for M10MO"
- },
#endif
+ },
};
const struct atomisp_format_bridge *
@@ -646,6 +645,12 @@ static int atomisp_g_input(struct file *file, void *fh, unsigned int *input)
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ if (!asd) {
+ dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+
rt_mutex_lock(&isp->mutex);
*input = asd->input_curr;
rt_mutex_unlock(&isp->mutex);
@@ -665,6 +670,12 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input)
struct v4l2_subdev *motor;
int ret;
+ if (!asd) {
+ dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+
rt_mutex_lock(&isp->mutex);
if (input >= ATOM_ISP_MAX_INPUTS || input >= isp->input_cnt) {
dev_dbg(isp->dev, "input_cnt: %d\n", isp->input_cnt);
@@ -755,24 +766,91 @@ error:
return ret;
}
+static int atomisp_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .index = fsize->index,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ pad, enum_frame_size, NULL, &fse);
+ if (ret)
+ return ret;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = fse.max_width - pad_w;
+ fsize->discrete.height = fse.max_height - pad_h;
+
+ return 0;
+}
+
+static int atomisp_enum_frameintervals(struct file *file, void *priv,
+ struct v4l2_frmivalenum *fival)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .code = atomisp_in_fmt_conv[0].code,
+ .index = fival->index,
+ .width = fival->width,
+ .height = fival->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ pad, enum_frame_interval, NULL,
+ &fie);
+ if (ret)
+ return ret;
+
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = fie.interval;
+
+ return ret;
+}
+
static int atomisp_enum_fmt_cap(struct file *file, void *fh,
struct v4l2_fmtdesc *f)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
- struct v4l2_subdev_mbus_code_enum code = { 0 };
+ struct v4l2_subdev_mbus_code_enum code = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ const struct atomisp_format_bridge *format;
+ struct v4l2_subdev *camera;
unsigned int i, fi = 0;
int rval;
+ if (!asd) {
+ dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+
+ camera = isp->inputs[asd->input_curr].camera;
+ if(!camera) {
+ dev_err(isp->dev, "%s(): camera is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+
rt_mutex_lock(&isp->mutex);
- rval = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, pad,
- enum_mbus_code, NULL, &code);
+
+ rval = v4l2_subdev_call(camera, pad, enum_mbus_code, NULL, &code);
if (rval == -ENOIOCTLCMD) {
dev_warn(isp->dev,
- "enum_mbus_code pad op not supported. Please fix your sensor driver!\n");
- // rval = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
- // video, enum_mbus_fmt, 0, &code.code);
+ "enum_mbus_code pad op not supported by %s. Please fix your sensor driver!\n",
+ camera->name);
}
rt_mutex_unlock(&isp->mutex);
@@ -780,15 +858,15 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh,
return rval;
for (i = 0; i < ARRAY_SIZE(atomisp_output_fmts); i++) {
- const struct atomisp_format_bridge *format =
- &atomisp_output_fmts[i];
+ format = &atomisp_output_fmts[i];
/*
* Is the atomisp-supported format is valid for the
* sensor (configuration)? If not, skip it.
+ *
+ * FIXME: fix the pipeline to allow sensor format too.
*/
- if (format->sh_fmt == IA_CSS_FRAME_FORMAT_RAW
- && format->mbus_code != code.code)
+ if (format->sh_fmt == IA_CSS_FRAME_FORMAT_RAW)
continue;
/* Found a match. Now let's pick f->index'th one. */
@@ -806,20 +884,6 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh,
return -EINVAL;
}
-static int atomisp_g_fmt_cap(struct file *file, void *fh,
- struct v4l2_format *f)
-{
- struct video_device *vdev = video_devdata(file);
- struct atomisp_device *isp = video_get_drvdata(vdev);
-
- int ret;
-
- rt_mutex_lock(&isp->mutex);
- ret = atomisp_get_fmt(vdev, f);
- rt_mutex_unlock(&isp->mutex);
- return ret;
-}
-
static int atomisp_g_fmt_file(struct file *file, void *fh,
struct v4l2_format *f)
{
@@ -834,6 +898,72 @@ static int atomisp_g_fmt_file(struct file *file, void *fh,
return 0;
}
+static int atomisp_adjust_fmt(struct v4l2_format *f)
+{
+ const struct atomisp_format_bridge *format_bridge;
+ u32 padded_width;
+
+ format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat);
+
+ padded_width = f->fmt.pix.width + pad_w;
+
+ if (format_bridge->planar) {
+ f->fmt.pix.bytesperline = padded_width;
+ f->fmt.pix.sizeimage = PAGE_ALIGN(f->fmt.pix.height *
+ DIV_ROUND_UP(format_bridge->depth *
+ padded_width, 8));
+ } else {
+ f->fmt.pix.bytesperline = DIV_ROUND_UP(format_bridge->depth *
+ padded_width, 8);
+ f->fmt.pix.sizeimage = PAGE_ALIGN(f->fmt.pix.height * f->fmt.pix.bytesperline);
+ }
+
+ if (f->fmt.pix.field == V4L2_FIELD_ANY)
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat);
+ if (!format_bridge)
+ return -EINVAL;
+
+ /* Currently, raw formats are broken!!! */
+ if (format_bridge->sh_fmt == IA_CSS_FRAME_FORMAT_RAW) {
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
+
+ format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat);
+ if (!format_bridge)
+ return -EINVAL;
+ }
+
+ padded_width = f->fmt.pix.width + pad_w;
+
+ if (format_bridge->planar) {
+ f->fmt.pix.bytesperline = padded_width;
+ f->fmt.pix.sizeimage = PAGE_ALIGN(f->fmt.pix.height *
+ DIV_ROUND_UP(format_bridge->depth *
+ padded_width, 8));
+ } else {
+ f->fmt.pix.bytesperline = DIV_ROUND_UP(format_bridge->depth *
+ padded_width, 8);
+ f->fmt.pix.sizeimage = PAGE_ALIGN(f->fmt.pix.height * f->fmt.pix.bytesperline);
+ }
+
+ if (f->fmt.pix.field == V4L2_FIELD_ANY)
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ /*
+ * FIXME: do we need to setup this differently, depending on the
+ * sensor or the pipeline?
+ */
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+ f->fmt.pix.ycbcr_enc = V4L2_YCBCR_ENC_709;
+ f->fmt.pix.xfer_func = V4L2_XFER_FUNC_709;
+
+ f->fmt.pix.width -= pad_w;
+ f->fmt.pix.height -= pad_h;
+
+ return 0;
+}
+
/* This function looks up the closest available resolution. */
static int atomisp_try_fmt_cap(struct file *file, void *fh,
struct v4l2_format *f)
@@ -845,7 +975,35 @@ static int atomisp_try_fmt_cap(struct file *file, void *fh,
rt_mutex_lock(&isp->mutex);
ret = atomisp_try_fmt(vdev, &f->fmt.pix, NULL);
rt_mutex_unlock(&isp->mutex);
- return ret;
+
+ if (ret)
+ return ret;
+
+ return atomisp_adjust_fmt(f);
+}
+
+static int atomisp_g_fmt_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe;
+
+ rt_mutex_lock(&isp->mutex);
+ pipe = atomisp_to_video_pipe(vdev);
+ rt_mutex_unlock(&isp->mutex);
+
+ f->fmt.pix = pipe->pix;
+
+ /* If s_fmt was issued, just return whatever is was previouly set */
+ if (f->fmt.pix.sizeimage)
+ return 0;
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
+ f->fmt.pix.width = 10000;
+ f->fmt.pix.height = 10000;
+
+ return atomisp_try_fmt_cap(file, fh, f);
}
static int atomisp_s_fmt_cap(struct file *file, void *fh,
@@ -1024,9 +1182,16 @@ int __atomisp_reqbufs(struct file *file, void *fh,
struct ia_css_frame *frame;
struct videobuf_vmalloc_memory *vm_mem;
u16 source_pad = atomisp_subdev_source_pad(vdev);
- u16 stream_id = atomisp_source_pad_to_stream_id(asd, source_pad);
+ u16 stream_id;
int ret = 0, i = 0;
+ if (!asd) {
+ dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+ stream_id = atomisp_source_pad_to_stream_id(asd, source_pad);
+
if (req->count == 0) {
mutex_lock(&pipe->capq.vb_lock);
if (!list_empty(&pipe->capq.stream))
@@ -1154,6 +1319,12 @@ static int atomisp_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
u32 pgnr;
int ret = 0;
+ if (!asd) {
+ dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+
rt_mutex_lock(&isp->mutex);
if (isp->isp_fatal_error) {
ret = -EIO;
@@ -1287,25 +1458,8 @@ done:
pipe->capq.streaming &&
!asd->enable_raw_buffer_lock->val &&
asd->params.offline_parm.num_captures == 1) {
- if (!IS_ISP2401) {
asd->pending_capture_request++;
dev_dbg(isp->dev, "Add one pending capture request.\n");
- } else {
- if (asd->re_trigger_capture) {
- ret = atomisp_css_offline_capture_configure(asd,
- asd->params.offline_parm.num_captures,
- asd->params.offline_parm.skip_frames,
- asd->params.offline_parm.offset);
- asd->re_trigger_capture = false;
- dev_dbg(isp->dev, "%s Trigger capture again ret=%d\n",
- __func__, ret);
-
- } else {
- asd->pending_capture_request++;
- asd->re_trigger_capture = false;
- dev_dbg(isp->dev, "Add one pending capture request.\n");
- }
- }
}
rt_mutex_unlock(&isp->mutex);
@@ -1389,6 +1543,12 @@ static int atomisp_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
struct atomisp_device *isp = video_get_drvdata(vdev);
int ret = 0;
+ if (!asd) {
+ dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+
rt_mutex_lock(&isp->mutex);
if (isp->isp_fatal_error) {
@@ -1640,6 +1800,12 @@ static int atomisp_streamon(struct file *file, void *fh,
int ret = 0;
unsigned long irqflags;
+ if (!asd) {
+ dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+
dev_dbg(isp->dev, "Start stream on pad %d for asd%d\n",
atomisp_subdev_source_pad(vdev), asd->index);
@@ -1686,8 +1852,6 @@ static int atomisp_streamon(struct file *file, void *fh,
/* Reset pending capture request count. */
asd->pending_capture_request = 0;
- if (IS_ISP2401)
- asd->re_trigger_capture = false;
if ((atomisp_subdev_streaming_count(asd) > sensor_start_stream) &&
(!isp->inputs[asd->input_curr].camera_caps->multi_stream_ctrl)) {
@@ -1901,6 +2065,12 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
unsigned long flags;
bool first_streamoff = false;
+ if (!asd) {
+ dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+
dev_dbg(isp->dev, "Stop stream on pad %d for asd%d\n",
atomisp_subdev_source_pad(vdev), asd->index);
@@ -2150,6 +2320,12 @@ static int atomisp_g_ctrl(struct file *file, void *fh,
struct atomisp_device *isp = video_get_drvdata(vdev);
int i, ret = -EINVAL;
+ if (!asd) {
+ dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+
for (i = 0; i < ctrls_num; i++) {
if (ci_v4l2_controls[i].id == control->id) {
ret = 0;
@@ -2229,6 +2405,12 @@ static int atomisp_s_ctrl(struct file *file, void *fh,
struct atomisp_device *isp = video_get_drvdata(vdev);
int i, ret = -EINVAL;
+ if (!asd) {
+ dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+
for (i = 0; i < ctrls_num; i++) {
if (ci_v4l2_controls[i].id == control->id) {
ret = 0;
@@ -2310,6 +2492,12 @@ static int atomisp_queryctl(struct file *file, void *fh,
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
struct atomisp_device *isp = video_get_drvdata(vdev);
+ if (!asd) {
+ dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+
switch (qc->id) {
case V4L2_CID_FOCUS_ABSOLUTE:
case V4L2_CID_FOCUS_RELATIVE:
@@ -2355,6 +2543,12 @@ static int atomisp_camera_g_ext_ctrls(struct file *file, void *fh,
int i;
int ret = 0;
+ if (!asd) {
+ dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+
if (!IS_ISP2401)
motor = isp->inputs[asd->input_curr].motor;
else
@@ -2466,6 +2660,12 @@ static int atomisp_camera_s_ext_ctrls(struct file *file, void *fh,
int i;
int ret = 0;
+ if (!asd) {
+ dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+
if (!IS_ISP2401)
motor = isp->inputs[asd->input_curr].motor;
else
@@ -2591,6 +2791,12 @@ static int atomisp_g_parm(struct file *file, void *fh,
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
struct atomisp_device *isp = video_get_drvdata(vdev);
+ if (!asd) {
+ dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+
if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
dev_err(isp->dev, "unsupported v4l2 buf type\n");
return -EINVAL;
@@ -2613,6 +2819,12 @@ static int atomisp_s_parm(struct file *file, void *fh,
int rval;
int fps;
+ if (!asd) {
+ dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
+ __func__, vdev->name);
+ return -EINVAL;
+ }
+
if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
dev_err(isp->dev, "unsupported v4l2 buf type\n");
return -EINVAL;
@@ -3035,6 +3247,8 @@ const struct v4l2_ioctl_ops atomisp_ioctl_ops = {
.vidioc_g_ctrl = atomisp_g_ctrl,
.vidioc_s_ext_ctrls = atomisp_s_ext_ctrls,
.vidioc_g_ext_ctrls = atomisp_g_ext_ctrls,
+ .vidioc_enum_framesizes = atomisp_enum_framesizes,
+ .vidioc_enum_frameintervals = atomisp_enum_frameintervals,
.vidioc_enum_fmt_vid_cap = atomisp_enum_fmt_cap,
.vidioc_try_fmt_vid_cap = atomisp_try_fmt_cap,
.vidioc_g_fmt_vid_cap = atomisp_g_fmt_cap,
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
index 12f22ad007c7..1807cfa786a7 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
@@ -1058,24 +1058,6 @@ static const struct v4l2_ctrl_config ctrl_depth_mode = {
.def = 0,
};
-/*
- * Control for selectting ISP version
- *
- * When enabled, that means ISP version will be used ISP2.7. when disable, the
- * isp will default to use ISP2.2.
- * Note: Make sure set this configuration before creating stream.
- */
-static const struct v4l2_ctrl_config ctrl_select_isp_version = {
- .ops = &ctrl_ops,
- .id = V4L2_CID_ATOMISP_SELECT_ISP_VERSION,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Select Isp version",
- .min = 0,
- .max = 1,
- .step = 1,
- .def = 0,
-};
-
static void atomisp_init_subdev_pipe(struct atomisp_sub_device *asd,
struct atomisp_video_pipe *pipe, enum v4l2_buf_type buf_type)
{
@@ -1164,23 +1146,28 @@ static int isp_subdev_init_entities(struct atomisp_sub_device *asd)
atomisp_init_acc_pipe(asd, &asd->video_acc);
- ret = atomisp_video_init(&asd->video_in, "MEMORY");
+ ret = atomisp_video_init(&asd->video_in, "MEMORY",
+ ATOMISP_RUN_MODE_SDV);
if (ret < 0)
return ret;
- ret = atomisp_video_init(&asd->video_out_capture, "CAPTURE");
+ ret = atomisp_video_init(&asd->video_out_capture, "CAPTURE",
+ ATOMISP_RUN_MODE_STILL_CAPTURE);
if (ret < 0)
return ret;
- ret = atomisp_video_init(&asd->video_out_vf, "VIEWFINDER");
+ ret = atomisp_video_init(&asd->video_out_vf, "VIEWFINDER",
+ ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE);
if (ret < 0)
return ret;
- ret = atomisp_video_init(&asd->video_out_preview, "PREVIEW");
+ ret = atomisp_video_init(&asd->video_out_preview, "PREVIEW",
+ ATOMISP_RUN_MODE_PREVIEW);
if (ret < 0)
return ret;
- ret = atomisp_video_init(&asd->video_out_video_capture, "VIDEO");
+ ret = atomisp_video_init(&asd->video_out_video_capture, "VIDEO",
+ ATOMISP_RUN_MODE_VIDEO);
if (ret < 0)
return ret;
@@ -1218,11 +1205,6 @@ static int isp_subdev_init_entities(struct atomisp_sub_device *asd)
v4l2_ctrl_new_custom(&asd->ctrl_handler,
&ctrl_disable_dz,
NULL);
- if (IS_ISP2401) {
- asd->select_isp_version = v4l2_ctrl_new_custom(&asd->ctrl_handler,
- &ctrl_select_isp_version,
- NULL);
- }
/* Make controls visible on subdev as well. */
asd->subdev.ctrl_handler = &asd->ctrl_handler;
@@ -1351,6 +1333,14 @@ int atomisp_subdev_register_entities(struct atomisp_sub_device *asd,
if (ret < 0)
goto error;
+ asd->video_out_preview.vdev.v4l2_dev = vdev;
+ asd->video_out_preview.vdev.device_caps = device_caps |
+ V4L2_CAP_VIDEO_OUTPUT;
+ ret = video_register_device(&asd->video_out_preview.vdev,
+ VFL_TYPE_VIDEO, -1);
+ if (ret < 0)
+ goto error;
+
asd->video_out_capture.vdev.v4l2_dev = vdev;
asd->video_out_capture.vdev.device_caps = device_caps |
V4L2_CAP_VIDEO_OUTPUT;
@@ -1366,13 +1356,7 @@ int atomisp_subdev_register_entities(struct atomisp_sub_device *asd,
VFL_TYPE_VIDEO, -1);
if (ret < 0)
goto error;
- asd->video_out_preview.vdev.v4l2_dev = vdev;
- asd->video_out_preview.vdev.device_caps = device_caps |
- V4L2_CAP_VIDEO_OUTPUT;
- ret = video_register_device(&asd->video_out_preview.vdev,
- VFL_TYPE_VIDEO, -1);
- if (ret < 0)
- goto error;
+
asd->video_out_video_capture.vdev.v4l2_dev = vdev;
asd->video_out_video_capture.vdev.device_caps = device_caps |
V4L2_CAP_VIDEO_OUTPUT;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.h b/drivers/staging/media/atomisp/pci/atomisp_subdev.h
index d6fcfab6352d..7d731f1fee72 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.h
@@ -81,6 +81,9 @@ struct atomisp_video_pipe {
/* the link list to store per_frame parameters */
struct list_head per_frame_params;
+ /* Store here the initial run mode */
+ unsigned int default_run_mode;
+
unsigned int buffers_in_css;
/* irq_lock is used to protect video buffer state change operations and
@@ -316,7 +319,6 @@ struct atomisp_sub_device {
/* ISP2401 */
struct v4l2_ctrl *ion_dev_fd;
- struct v4l2_ctrl *select_isp_version;
struct v4l2_ctrl *disable_dz;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
index 1e324f1f656e..1b240891a6e2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
@@ -447,7 +447,8 @@ const struct atomisp_dfs_config dfs_config_cht_soc = {
.dfs_table_size = ARRAY_SIZE(dfs_rules_cht_soc),
};
-int atomisp_video_init(struct atomisp_video_pipe *video, const char *name)
+int atomisp_video_init(struct atomisp_video_pipe *video, const char *name,
+ unsigned int run_mode)
{
int ret;
const char *direction;
@@ -478,6 +479,7 @@ int atomisp_video_init(struct atomisp_video_pipe *video, const char *name)
"ATOMISP ISP %s %s", name, direction);
video->vdev.release = video_device_release_empty;
video_set_drvdata(&video->vdev, video->isp);
+ video->default_run_mode = run_mode;
return 0;
}
@@ -711,15 +713,15 @@ static int atomisp_mrfld_power(struct atomisp_device *isp, bool enable)
dev_dbg(isp->dev, "IUNIT power-%s.\n", enable ? "on" : "off");
- /*WA:Enable DVFS*/
+ /* WA for P-Unit, if DVFS enabled, ISP timeout observed */
if (IS_CHT && enable)
- punit_ddr_dvfs_enable(true);
+ punit_ddr_dvfs_enable(false);
/*
* FIXME:WA for ECS28A, with this sleep, CTS
* android.hardware.camera2.cts.CameraDeviceTest#testCameraDeviceAbort
* PASS, no impact on other platforms
- */
+ */
if (IS_BYT && enable)
msleep(10);
@@ -727,7 +729,7 @@ static int atomisp_mrfld_power(struct atomisp_device *isp, bool enable)
iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0,
val, MRFLD_ISPSSPM0_ISPSSC_MASK);
- /*WA:Enable DVFS*/
+ /* WA:Enable DVFS */
if (IS_CHT && !enable)
punit_ddr_dvfs_enable(true);
@@ -1182,6 +1184,7 @@ static void atomisp_unregister_entities(struct atomisp_device *isp)
v4l2_device_unregister(&isp->v4l2_dev);
media_device_unregister(&isp->media_dev);
+ media_device_cleanup(&isp->media_dev);
}
static int atomisp_register_entities(struct atomisp_device *isp)
@@ -1566,6 +1569,7 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
dev_dbg(&pdev->dev, "atomisp mmio base: %p\n", isp->base);
rt_mutex_init(&isp->mutex);
+ rt_mutex_init(&isp->loading);
mutex_init(&isp->streamoff_mutex);
spin_lock_init(&isp->lock);
@@ -1633,12 +1637,7 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
pdev->d3cold_delay = 0;
break;
case ATOMISP_PCI_DEVICE_SOC_ANN:
- isp->media_dev.hw_revision = (
-#ifdef ISP2401_NEW_INPUT_SYSTEM
- ATOMISP_HW_REVISION_ISP2401
-#else
- ATOMISP_HW_REVISION_ISP2401_LEGACY
-#endif
+ isp->media_dev.hw_revision = ( ATOMISP_HW_REVISION_ISP2401
<< ATOMISP_HW_REVISION_SHIFT);
isp->media_dev.hw_revision |= pdev->revision < 2 ?
ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0;
@@ -1646,12 +1645,7 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
isp->hpll_freq = HPLL_FREQ_1600MHZ;
break;
case ATOMISP_PCI_DEVICE_SOC_CHT:
- isp->media_dev.hw_revision = (
-#ifdef ISP2401_NEW_INPUT_SYSTEM
- ATOMISP_HW_REVISION_ISP2401
-#else
- ATOMISP_HW_REVISION_ISP2401_LEGACY
-#endif
+ isp->media_dev.hw_revision = ( ATOMISP_HW_REVISION_ISP2401
<< ATOMISP_HW_REVISION_SHIFT);
isp->media_dev.hw_revision |= pdev->revision < 2 ?
ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0;
@@ -1748,6 +1742,8 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, csi_afe_trim);
}
+ rt_mutex_lock(&isp->loading);
+
err = atomisp_initialize_modules(isp);
if (err < 0) {
dev_err(&pdev->dev, "atomisp_initialize_modules (%d)\n", err);
@@ -1805,6 +1801,8 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
release_firmware(isp->firmware);
isp->firmware = NULL;
isp->css_env.isp_css_fw.data = NULL;
+ isp->ready = true;
+ rt_mutex_unlock(&isp->loading);
atomisp_drvfs_init(isp);
@@ -1824,6 +1822,7 @@ wdt_work_queue_fail:
register_entities_fail:
atomisp_uninitialize_modules(isp);
initialize_modules_fail:
+ rt_mutex_unlock(&isp->loading);
cpu_latency_qos_remove_request(&isp->pm_qos);
atomisp_msi_irq_uninit(isp);
pci_free_irq_vectors(pdev);
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.h b/drivers/staging/media/atomisp/pci/atomisp_v4l2.h
index 81bb356b8172..72611b8286a4 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.h
@@ -27,7 +27,8 @@ struct v4l2_device;
struct atomisp_device;
struct firmware;
-int atomisp_video_init(struct atomisp_video_pipe *video, const char *name);
+int atomisp_video_init(struct atomisp_video_pipe *video, const char *name,
+ unsigned int run_mode);
void atomisp_acc_init(struct atomisp_acc_pipe *video, const char *name);
void atomisp_video_unregister(struct atomisp_video_pipe *video);
void atomisp_acc_unregister(struct atomisp_acc_pipe *video);
diff --git a/drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_binarydesc.c b/drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_binarydesc.c
index f20c9b02fbe0..7dd0e4a53c8b 100644
--- a/drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_binarydesc.c
+++ b/drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_binarydesc.c
@@ -58,7 +58,6 @@ static void pipe_binarydesc_get_offline(
descr->enable_dz = true;
descr->enable_xnr = false;
descr->enable_dpc = false;
- descr->enable_luma_only = false;
descr->enable_tnr = false;
descr->enable_capture_pp_bli = false;
descr->enable_fractional_ds = false;
@@ -390,8 +389,6 @@ int ia_css_pipe_get_video_binarydesc(
pipe->extra_config.enable_fractional_ds;
video_descr->enable_dpc =
pipe->config.enable_dpc;
- video_descr->enable_luma_only =
- pipe->config.enable_luma_only;
video_descr->enable_tnr =
pipe->config.enable_tnr;
@@ -574,11 +571,9 @@ void ia_css_pipe_get_primary_binarydesc(
in_info->res = pipe->config.input_effective_res;
in_info->padded_width = in_info->res.width;
-#if !defined(HAS_NO_PACKED_RAW_PIXELS)
if (pipe->stream->config.pack_raw_pixels)
in_info->format = IA_CSS_FRAME_FORMAT_RAW_PACKED;
else
-#endif
in_info->format = IA_CSS_FRAME_FORMAT_RAW;
in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
@@ -600,24 +595,15 @@ void ia_css_pipe_get_primary_binarydesc(
prim_descr->isp_pipe_version = pipe->config.isp_pipe_version;
prim_descr->enable_fractional_ds =
pipe->extra_config.enable_fractional_ds;
- prim_descr->enable_luma_only =
- pipe->config.enable_luma_only;
/* We have both striped and non-striped primary binaries,
* if continuous viewfinder is required, then we must select
* a striped one. Otherwise we prefer to use a non-striped
* since it has better performance. */
if (pipe_version == IA_CSS_PIPE_VERSION_2_6_1)
prim_descr->striped = false;
- else if (!IS_ISP2401) {
+ else
prim_descr->striped = prim_descr->continuous &&
(!pipe->stream->stop_copy_preview || !pipe->stream->disable_cont_vf);
- } else {
- prim_descr->striped = prim_descr->continuous && !pipe->stream->disable_cont_vf;
-
- if ((pipe->config.default_capture_config.enable_xnr != 0) &&
- (pipe->extra_config.enable_dvs_6axis == true))
- prim_descr->enable_xnr = true;
- }
}
IA_CSS_LEAVE_PRIVATE("");
}
@@ -849,14 +835,7 @@ void ia_css_pipe_get_ldc_binarydesc(
assert(out_info);
IA_CSS_ENTER_PRIVATE("");
- if (!IS_ISP2401) {
- *in_info = *out_info;
- } else {
- if (pipe->out_yuv_ds_input_info.res.width)
- *in_info = pipe->out_yuv_ds_input_info;
- else
- *in_info = *out_info;
- }
+ *in_info = *out_info;
in_info->format = IA_CSS_FRAME_FORMAT_YUV420;
in_info->raw_bit_depth = 0;
diff --git a/drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_configs.c b/drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_configs.c
deleted file mode 100644
index 1a021ae841fe..000000000000
--- a/drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_configs.c
+++ /dev/null
@@ -1,386 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-/* Generated code: do not edit or commmit. */
-
-#define IA_CSS_INCLUDE_CONFIGURATIONS
-#include "ia_css_pipeline.h"
-#include "ia_css_isp_configs.h"
-#include "ia_css_debug.h"
-#include "assert_support.h"
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_iterator(
- const struct ia_css_binary *binary,
- const struct ia_css_iterator_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_iterator() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.iterator.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.iterator.offset;
- }
- if (size) {
- ia_css_iterator_config((struct sh_css_isp_iterator_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_iterator() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_copy_output(
- const struct ia_css_binary *binary,
- const struct ia_css_copy_output_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_copy_output() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.copy_output.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.copy_output.offset;
- }
- if (size) {
- ia_css_copy_output_config((struct sh_css_isp_copy_output_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_copy_output() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_crop(
- const struct ia_css_binary *binary,
- const struct ia_css_crop_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_crop() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.crop.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.crop.offset;
- }
- if (size) {
- ia_css_crop_config((struct sh_css_isp_crop_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_crop() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_fpn(
- const struct ia_css_binary *binary,
- const struct ia_css_fpn_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_fpn() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.fpn.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.fpn.offset;
- }
- if (size) {
- ia_css_fpn_config((struct sh_css_isp_fpn_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_fpn() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_dvs(
- const struct ia_css_binary *binary,
- const struct ia_css_dvs_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_dvs() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.dvs.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.dvs.offset;
- }
- if (size) {
- ia_css_dvs_config((struct sh_css_isp_dvs_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_dvs() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_qplane(
- const struct ia_css_binary *binary,
- const struct ia_css_qplane_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_qplane() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.qplane.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.qplane.offset;
- }
- if (size) {
- ia_css_qplane_config((struct sh_css_isp_qplane_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_qplane() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_output0(
- const struct ia_css_binary *binary,
- const struct ia_css_output0_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_output0() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.output0.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.output0.offset;
- }
- if (size) {
- ia_css_output0_config((struct sh_css_isp_output_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_output0() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_output1(
- const struct ia_css_binary *binary,
- const struct ia_css_output1_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_output1() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.output1.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.output1.offset;
- }
- if (size) {
- ia_css_output1_config((struct sh_css_isp_output_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_output1() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_output(
- const struct ia_css_binary *binary,
- const struct ia_css_output_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_output() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.output.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.output.offset;
- }
- if (size) {
- ia_css_output_config((struct sh_css_isp_output_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_output() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_raw(
- const struct ia_css_binary *binary,
- const struct ia_css_raw_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_raw() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.raw.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.raw.offset;
- }
- if (size) {
- ia_css_raw_config((struct sh_css_isp_raw_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_raw() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_tnr(
- const struct ia_css_binary *binary,
- const struct ia_css_tnr_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_tnr() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.tnr.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.tnr.offset;
- }
- if (size) {
- ia_css_tnr_config((struct sh_css_isp_tnr_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_tnr() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_ref(
- const struct ia_css_binary *binary,
- const struct ia_css_ref_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_ref() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.ref.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.ref.offset;
- }
- if (size) {
- ia_css_ref_config((struct sh_css_isp_ref_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_ref() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_vf(
- const struct ia_css_binary *binary,
- const struct ia_css_vf_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_vf() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.vf.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.vf.offset;
- }
- if (size) {
- ia_css_vf_config((struct sh_css_isp_vf_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_vf() leave:\n");
-}
diff --git a/drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_params.c b/drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_params.c
deleted file mode 100644
index b786247b322b..000000000000
--- a/drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_params.c
+++ /dev/null
@@ -1,3420 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-#define IA_CSS_INCLUDE_PARAMETERS
-#include "sh_css_params.h"
-#include "isp/kernels/aa/aa_2/ia_css_aa2.host.h"
-#include "isp/kernels/anr/anr_1.0/ia_css_anr.host.h"
-#include "isp/kernels/anr/anr_2/ia_css_anr2.host.h"
-#include "isp/kernels/bh/bh_2/ia_css_bh.host.h"
-#include "isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h"
-#include "isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h"
-#include "isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h"
-#include "isp/kernels/crop/crop_1.0/ia_css_crop.host.h"
-#include "isp/kernels/csc/csc_1.0/ia_css_csc.host.h"
-#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h"
-#include "isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h"
-#include "isp/kernels/ctc/ctc2/ia_css_ctc2.host.h"
-#include "isp/kernels/de/de_1.0/ia_css_de.host.h"
-#include "isp/kernels/de/de_2/ia_css_de2.host.h"
-#include "isp/kernels/dp/dp_1.0/ia_css_dp.host.h"
-#include "isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h"
-#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h"
-#include "isp/kernels/gc/gc_1.0/ia_css_gc.host.h"
-#include "isp/kernels/gc/gc_2/ia_css_gc2.host.h"
-#include "isp/kernels/macc/macc_1.0/ia_css_macc.host.h"
-#include "isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h"
-#include "isp/kernels/ob/ob_1.0/ia_css_ob.host.h"
-#include "isp/kernels/ob/ob2/ia_css_ob2.host.h"
-#include "isp/kernels/output/output_1.0/ia_css_output.host.h"
-#include "isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h"
-#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h"
-#include "isp/kernels/sc/sc_1.0/ia_css_sc.host.h"
-#include "isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h"
-#include "isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h"
-#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h"
-#include "isp/kernels/uds/uds_1.0/ia_css_uds_param.h"
-#include "isp/kernels/wb/wb_1.0/ia_css_wb.host.h"
-#include "isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h"
-#include "isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h"
-#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h"
-#include "isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h"
-#include "isp/kernels/fc/fc_1.0/ia_css_formats.host.h"
-#include "isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h"
-#include "isp/kernels/dpc2/ia_css_dpc2.host.h"
-#include "isp/kernels/eed1_8/ia_css_eed1_8.host.h"
-#include "isp/kernels/bnlm/ia_css_bnlm.host.h"
-#include "isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h"
-/* Generated code: do not edit or commmit. */
-
-#include "ia_css_pipeline.h"
-#include "ia_css_isp_params.h"
-#include "ia_css_debug.h"
-#include "assert_support.h"
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_aa(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.aa.size;
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.aa.offset;
-
- if (size) {
- struct sh_css_isp_aa_params *t = (struct sh_css_isp_aa_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
- t->strength = params->aa_config.strength;
- }
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_anr(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.anr.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.anr.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_anr() enter:\n");
-
- ia_css_anr_encode((struct sh_css_isp_anr_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->anr_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_anr() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_anr2(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->vmem.anr2.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->vmem.anr2.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_anr2() enter:\n");
-
- ia_css_anr2_vmem_encode((struct ia_css_isp_anr2_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
- &params->anr_thres,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_anr2() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_bh(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.bh.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.bh.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() enter:\n");
-
- ia_css_bh_encode((struct sh_css_isp_bh_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->s3a_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() leave:\n");
- }
- }
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->hmem0.bh.size;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() enter:\n");
-
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_HMEM0] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_cnr(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.cnr.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.cnr.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_cnr() enter:\n");
-
- ia_css_cnr_encode((struct sh_css_isp_cnr_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->cnr_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_cnr() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_crop(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.crop.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.crop.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_crop() enter:\n");
-
- ia_css_crop_encode((struct sh_css_isp_crop_isp_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->crop_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_crop() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_csc(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.csc.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.csc.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_csc() enter:\n");
-
- ia_css_csc_encode((struct sh_css_isp_csc_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->cc_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_csc() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_dp(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.dp.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.dp.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_dp() enter:\n");
-
- ia_css_dp_encode((struct sh_css_isp_dp_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->dp_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_dp() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_bnr(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.bnr.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.bnr.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_bnr() enter:\n");
-
- ia_css_bnr_encode((struct sh_css_isp_bnr_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->nr_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_bnr() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_de(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.de.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.de.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_de() enter:\n");
-
- ia_css_de_encode((struct sh_css_isp_de_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->de_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_de() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_ecd(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.ecd.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.ecd.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_ecd() enter:\n");
-
- ia_css_ecd_encode((struct sh_css_isp_ecd_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->ecd_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_ecd() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_formats(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.formats.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.formats.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_formats() enter:\n");
-
- ia_css_formats_encode((struct sh_css_isp_formats_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->formats_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_formats() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_fpn(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.fpn.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.fpn.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_fpn() enter:\n");
-
- ia_css_fpn_encode((struct sh_css_isp_fpn_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->fpn_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_fpn() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_gc(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.gc.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.gc.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() enter:\n");
-
- ia_css_gc_encode((struct sh_css_isp_gc_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->gc_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() leave:\n");
- }
- }
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->vamem1.gc.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->vamem1.gc.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() enter:\n");
-
- ia_css_gc_vamem_encode((struct sh_css_isp_gc_vamem_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
- &params->gc_table,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_ce(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.ce.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.ce.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ce() enter:\n");
-
- ia_css_ce_encode((struct sh_css_isp_ce_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->ce_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ce() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_yuv2rgb(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.yuv2rgb.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.yuv2rgb.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_yuv2rgb() enter:\n");
-
- ia_css_yuv2rgb_encode((struct sh_css_isp_csc_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->yuv2rgb_cc_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_yuv2rgb() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_rgb2yuv(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.rgb2yuv.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.rgb2yuv.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_rgb2yuv() enter:\n");
-
- ia_css_rgb2yuv_encode((struct sh_css_isp_csc_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->rgb2yuv_cc_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_rgb2yuv() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_r_gamma(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->vamem0.r_gamma.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->vamem0.r_gamma.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_r_gamma() enter:\n");
-
- ia_css_r_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM0].address[offset],
- &params->r_gamma_table,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM0] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_r_gamma() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_g_gamma(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->vamem1.g_gamma.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->vamem1.g_gamma.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_g_gamma() enter:\n");
-
- ia_css_g_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
- &params->g_gamma_table,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_g_gamma() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_b_gamma(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->vamem2.b_gamma.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->vamem2.b_gamma.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_b_gamma() enter:\n");
-
- ia_css_b_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM2].address[offset],
- &params->b_gamma_table,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM2] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_b_gamma() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_uds(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.uds.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.uds.offset;
-
- if (size) {
- struct sh_css_sp_uds_params *p;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_uds() enter:\n");
-
- p = (struct sh_css_sp_uds_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
- p->crop_pos = params->uds_config.crop_pos;
- p->uds = params->uds_config.uds;
-
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_uds() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_raa(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.raa.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.raa.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_raa() enter:\n");
-
- ia_css_raa_encode((struct sh_css_isp_aa_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->raa_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_raa() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_s3a(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.s3a.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.s3a.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_s3a() enter:\n");
-
- ia_css_s3a_encode((struct sh_css_isp_s3a_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->s3a_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_s3a() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_ob(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.ob.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.ob.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() enter:\n");
-
- ia_css_ob_encode((struct sh_css_isp_ob_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->ob_config,
- &params->stream_configs.ob, size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() leave:\n");
- }
- }
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->vmem.ob.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->vmem.ob.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() enter:\n");
-
- ia_css_ob_vmem_encode((struct sh_css_isp_ob_vmem_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
- &params->ob_config,
- &params->stream_configs.ob, size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_output(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.output.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.output.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_output() enter:\n");
-
- ia_css_output_encode((struct sh_css_isp_output_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->output_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_output() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_sc(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.sc.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.sc.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sc() enter:\n");
-
- ia_css_sc_encode((struct sh_css_isp_sc_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->sc_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sc() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_bds(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.bds.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.bds.offset;
-
- if (size) {
- struct sh_css_isp_bds_params *p;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_bds() enter:\n");
-
- p = (struct sh_css_isp_bds_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
- p->baf_strength = params->bds_config.strength;
-
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_bds() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_tnr(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.tnr.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.tnr.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_tnr() enter:\n");
-
- ia_css_tnr_encode((struct sh_css_isp_tnr_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->tnr_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_tnr() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_macc(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.macc.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.macc.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_macc() enter:\n");
-
- ia_css_macc_encode((struct sh_css_isp_macc_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->macc_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_macc() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_sdis_horicoef(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->vmem.sdis_horicoef.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->vmem.sdis_horicoef.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_sdis_horicoef() enter:\n");
-
- ia_css_sdis_horicoef_vmem_encode((struct sh_css_isp_sdis_hori_coef_tbl *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
- &params->dvs_coefs,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_sdis_horicoef() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_sdis_vertcoef(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->vmem.sdis_vertcoef.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->vmem.sdis_vertcoef.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_sdis_vertcoef() enter:\n");
-
- ia_css_sdis_vertcoef_vmem_encode((struct sh_css_isp_sdis_vert_coef_tbl *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
- &params->dvs_coefs,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_sdis_vertcoef() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_sdis_horiproj(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.sdis_horiproj.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.sdis_horiproj.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_sdis_horiproj() enter:\n");
-
- ia_css_sdis_horiproj_encode((struct sh_css_isp_sdis_hori_proj_tbl *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->dvs_coefs,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_sdis_horiproj() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_sdis_vertproj(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.sdis_vertproj.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.sdis_vertproj.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_sdis_vertproj() enter:\n");
-
- ia_css_sdis_vertproj_encode((struct sh_css_isp_sdis_vert_proj_tbl *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->dvs_coefs,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_sdis_vertproj() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_sdis2_horicoef(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_horicoef.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_horicoef.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_sdis2_horicoef() enter:\n");
-
- ia_css_sdis2_horicoef_vmem_encode((struct sh_css_isp_sdis_hori_coef_tbl *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
- &params->dvs2_coefs,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_sdis2_horicoef() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_sdis2_vertcoef(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_vertcoef.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_vertcoef.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_sdis2_vertcoef() enter:\n");
-
- ia_css_sdis2_vertcoef_vmem_encode((struct sh_css_isp_sdis_vert_coef_tbl *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
- &params->dvs2_coefs,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_sdis2_vertcoef() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_sdis2_horiproj(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_horiproj.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_horiproj.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_sdis2_horiproj() enter:\n");
-
- ia_css_sdis2_horiproj_encode((struct sh_css_isp_sdis_hori_proj_tbl *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->dvs2_coefs,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_sdis2_horiproj() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_sdis2_vertproj(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_vertproj.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_vertproj.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_sdis2_vertproj() enter:\n");
-
- ia_css_sdis2_vertproj_encode((struct sh_css_isp_sdis_vert_proj_tbl *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->dvs2_coefs,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_sdis2_vertproj() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_wb(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.wb.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.wb.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_wb() enter:\n");
-
- ia_css_wb_encode((struct sh_css_isp_wb_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->wb_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_wb() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_nr(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.nr.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.nr.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_nr() enter:\n");
-
- ia_css_nr_encode((struct sh_css_isp_ynr_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->nr_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_nr() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_yee(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.yee.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.yee.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_yee() enter:\n");
-
- ia_css_yee_encode((struct sh_css_isp_yee_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->yee_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_yee() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_ynr(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.ynr.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.ynr.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_ynr() enter:\n");
-
- ia_css_ynr_encode((struct sh_css_isp_yee2_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->ynr_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_ynr() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_fc(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.fc.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.fc.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fc() enter:\n");
-
- ia_css_fc_encode((struct sh_css_isp_fc_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->fc_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fc() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_ctc(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.ctc.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.ctc.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_ctc() enter:\n");
-
- ia_css_ctc_encode((struct sh_css_isp_ctc_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->ctc_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_ctc() leave:\n");
- }
- }
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->vamem0.ctc.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->vamem0.ctc.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_ctc() enter:\n");
-
- ia_css_ctc_vamem_encode((struct sh_css_isp_ctc_vamem_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM0].address[offset],
- &params->ctc_table,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM0] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_ctc() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_xnr_table(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->vamem1.xnr_table.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->vamem1.xnr_table.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_xnr_table() enter:\n");
-
- ia_css_xnr_table_vamem_encode((struct sh_css_isp_xnr_vamem_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
- &params->xnr_table,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_xnr_table() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_xnr(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.xnr.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.xnr.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_xnr() enter:\n");
-
- ia_css_xnr_encode((struct sh_css_isp_xnr_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->xnr_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_xnr() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_process_function() */
-
-static void
-ia_css_process_xnr3(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params)
-{
- assert(params);
-
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.xnr3.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.xnr3.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_xnr3() enter:\n");
-
- ia_css_xnr3_encode((struct sh_css_isp_xnr3_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->xnr3_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_xnr3() leave:\n");
- }
- }
-}
-
-/* Code generated by genparam/gencode.c:gen_param_process_table() */
-
-void (*ia_css_kernel_process_param[IA_CSS_NUM_PARAMETER_IDS])(
- unsigned int pipe_id,
- const struct ia_css_pipeline_stage *stage,
- struct ia_css_isp_parameters *params) = {
- ia_css_process_aa,
- ia_css_process_anr,
- ia_css_process_anr2,
- ia_css_process_bh,
- ia_css_process_cnr,
- ia_css_process_crop,
- ia_css_process_csc,
- ia_css_process_dp,
- ia_css_process_bnr,
- ia_css_process_de,
- ia_css_process_ecd,
- ia_css_process_formats,
- ia_css_process_fpn,
- ia_css_process_gc,
- ia_css_process_ce,
- ia_css_process_yuv2rgb,
- ia_css_process_rgb2yuv,
- ia_css_process_r_gamma,
- ia_css_process_g_gamma,
- ia_css_process_b_gamma,
- ia_css_process_uds,
- ia_css_process_raa,
- ia_css_process_s3a,
- ia_css_process_ob,
- ia_css_process_output,
- ia_css_process_sc,
- ia_css_process_bds,
- ia_css_process_tnr,
- ia_css_process_macc,
- ia_css_process_sdis_horicoef,
- ia_css_process_sdis_vertcoef,
- ia_css_process_sdis_horiproj,
- ia_css_process_sdis_vertproj,
- ia_css_process_sdis2_horicoef,
- ia_css_process_sdis2_vertcoef,
- ia_css_process_sdis2_horiproj,
- ia_css_process_sdis2_vertproj,
- ia_css_process_wb,
- ia_css_process_nr,
- ia_css_process_yee,
- ia_css_process_ynr,
- ia_css_process_fc,
- ia_css_process_ctc,
- ia_css_process_xnr_table,
- ia_css_process_xnr,
- ia_css_process_xnr3,
-};
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_dp_config(const struct ia_css_isp_parameters *params,
- struct ia_css_dp_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_dp_config() enter: config=%p\n",
- config);
-
- *config = params->dp_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_dp_config() leave\n");
- ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_dp_config(struct ia_css_isp_parameters *params,
- const struct ia_css_dp_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_dp_config() enter:\n");
- ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->dp_config = *config;
- params->config_changed[IA_CSS_DP_ID] = true;
- params->config_changed[IA_CSS_DP_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_dp_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_wb_config(const struct ia_css_isp_parameters *params,
- struct ia_css_wb_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_wb_config() enter: config=%p\n",
- config);
-
- *config = params->wb_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_wb_config() leave\n");
- ia_css_wb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_wb_config(struct ia_css_isp_parameters *params,
- const struct ia_css_wb_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_wb_config() enter:\n");
- ia_css_wb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->wb_config = *config;
- params->config_changed[IA_CSS_WB_ID] = true;
- params->config_changed[IA_CSS_WB_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_wb_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_tnr_config(const struct ia_css_isp_parameters *params,
- struct ia_css_tnr_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_tnr_config() enter: config=%p\n",
- config);
-
- *config = params->tnr_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_tnr_config() leave\n");
- ia_css_tnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_tnr_config(struct ia_css_isp_parameters *params,
- const struct ia_css_tnr_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_tnr_config() enter:\n");
- ia_css_tnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->tnr_config = *config;
- params->config_changed[IA_CSS_TNR_ID] = true;
- params->config_changed[IA_CSS_TNR_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_tnr_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_ob_config(const struct ia_css_isp_parameters *params,
- struct ia_css_ob_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_ob_config() enter: config=%p\n",
- config);
-
- *config = params->ob_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_ob_config() leave\n");
- ia_css_ob_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_ob_config(struct ia_css_isp_parameters *params,
- const struct ia_css_ob_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ob_config() enter:\n");
- ia_css_ob_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->ob_config = *config;
- params->config_changed[IA_CSS_OB_ID] = true;
- params->config_changed[IA_CSS_OB_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_ob_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_de_config(const struct ia_css_isp_parameters *params,
- struct ia_css_de_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_de_config() enter: config=%p\n",
- config);
-
- *config = params->de_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_de_config() leave\n");
- ia_css_de_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_de_config(struct ia_css_isp_parameters *params,
- const struct ia_css_de_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_de_config() enter:\n");
- ia_css_de_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->de_config = *config;
- params->config_changed[IA_CSS_DE_ID] = true;
- params->config_changed[IA_CSS_DE_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_de_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_anr_config(const struct ia_css_isp_parameters *params,
- struct ia_css_anr_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_anr_config() enter: config=%p\n",
- config);
-
- *config = params->anr_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_anr_config() leave\n");
- ia_css_anr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_anr_config(struct ia_css_isp_parameters *params,
- const struct ia_css_anr_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_anr_config() enter:\n");
- ia_css_anr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->anr_config = *config;
- params->config_changed[IA_CSS_ANR_ID] = true;
- params->config_changed[IA_CSS_ANR_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_anr_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_anr2_config(const struct ia_css_isp_parameters *params,
- struct ia_css_anr_thres *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_anr2_config() enter: config=%p\n",
- config);
-
- *config = params->anr_thres;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_anr2_config() leave\n");
- ia_css_anr2_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_anr2_config(struct ia_css_isp_parameters *params,
- const struct ia_css_anr_thres *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_anr2_config() enter:\n");
- ia_css_anr2_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->anr_thres = *config;
- params->config_changed[IA_CSS_ANR2_ID] = true;
- params->config_changed[IA_CSS_ANR2_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_anr2_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_ce_config(const struct ia_css_isp_parameters *params,
- struct ia_css_ce_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_ce_config() enter: config=%p\n",
- config);
-
- *config = params->ce_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_ce_config() leave\n");
- ia_css_ce_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_ce_config(struct ia_css_isp_parameters *params,
- const struct ia_css_ce_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ce_config() enter:\n");
- ia_css_ce_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->ce_config = *config;
- params->config_changed[IA_CSS_CE_ID] = true;
- params->config_changed[IA_CSS_CE_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_ce_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_ecd_config(const struct ia_css_isp_parameters *params,
- struct ia_css_ecd_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_ecd_config() enter: config=%p\n",
- config);
-
- *config = params->ecd_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_ecd_config() leave\n");
- ia_css_ecd_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_ecd_config(struct ia_css_isp_parameters *params,
- const struct ia_css_ecd_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ecd_config() enter:\n");
- ia_css_ecd_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->ecd_config = *config;
- params->config_changed[IA_CSS_ECD_ID] = true;
- params->config_changed[IA_CSS_ECD_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_ecd_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_ynr_config(const struct ia_css_isp_parameters *params,
- struct ia_css_ynr_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_ynr_config() enter: config=%p\n",
- config);
-
- *config = params->ynr_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_ynr_config() leave\n");
- ia_css_ynr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_ynr_config(struct ia_css_isp_parameters *params,
- const struct ia_css_ynr_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ynr_config() enter:\n");
- ia_css_ynr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->ynr_config = *config;
- params->config_changed[IA_CSS_YNR_ID] = true;
- params->config_changed[IA_CSS_YNR_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_ynr_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_fc_config(const struct ia_css_isp_parameters *params,
- struct ia_css_fc_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_fc_config() enter: config=%p\n",
- config);
-
- *config = params->fc_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_fc_config() leave\n");
- ia_css_fc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_fc_config(struct ia_css_isp_parameters *params,
- const struct ia_css_fc_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_fc_config() enter:\n");
- ia_css_fc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->fc_config = *config;
- params->config_changed[IA_CSS_FC_ID] = true;
- params->config_changed[IA_CSS_FC_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_fc_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_cnr_config(const struct ia_css_isp_parameters *params,
- struct ia_css_cnr_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_cnr_config() enter: config=%p\n",
- config);
-
- *config = params->cnr_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_cnr_config() leave\n");
- ia_css_cnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_cnr_config(struct ia_css_isp_parameters *params,
- const struct ia_css_cnr_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_cnr_config() enter:\n");
- ia_css_cnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->cnr_config = *config;
- params->config_changed[IA_CSS_CNR_ID] = true;
- params->config_changed[IA_CSS_CNR_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_cnr_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_macc_config(const struct ia_css_isp_parameters *params,
- struct ia_css_macc_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_macc_config() enter: config=%p\n",
- config);
-
- *config = params->macc_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_macc_config() leave\n");
- ia_css_macc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_macc_config(struct ia_css_isp_parameters *params,
- const struct ia_css_macc_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_macc_config() enter:\n");
- ia_css_macc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->macc_config = *config;
- params->config_changed[IA_CSS_MACC_ID] = true;
- params->config_changed[IA_CSS_MACC_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_macc_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_ctc_config(const struct ia_css_isp_parameters *params,
- struct ia_css_ctc_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_ctc_config() enter: config=%p\n",
- config);
-
- *config = params->ctc_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_ctc_config() leave\n");
- ia_css_ctc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_ctc_config(struct ia_css_isp_parameters *params,
- const struct ia_css_ctc_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ctc_config() enter:\n");
- ia_css_ctc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->ctc_config = *config;
- params->config_changed[IA_CSS_CTC_ID] = true;
- params->config_changed[IA_CSS_CTC_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_ctc_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_aa_config(const struct ia_css_isp_parameters *params,
- struct ia_css_aa_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_aa_config() enter: config=%p\n",
- config);
-
- *config = params->aa_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_aa_config() leave\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_aa_config(struct ia_css_isp_parameters *params,
- const struct ia_css_aa_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_aa_config() enter:\n");
- params->aa_config = *config;
- params->config_changed[IA_CSS_AA_ID] = true;
- params->config_changed[IA_CSS_AA_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_aa_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_yuv2rgb_config(const struct ia_css_isp_parameters *params,
- struct ia_css_cc_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_yuv2rgb_config() enter: config=%p\n",
- config);
-
- *config = params->yuv2rgb_cc_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_yuv2rgb_config() leave\n");
- ia_css_yuv2rgb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_yuv2rgb_config(struct ia_css_isp_parameters *params,
- const struct ia_css_cc_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_yuv2rgb_config() enter:\n");
- ia_css_yuv2rgb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->yuv2rgb_cc_config = *config;
- params->config_changed[IA_CSS_YUV2RGB_ID] = true;
- params->config_changed[IA_CSS_YUV2RGB_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_yuv2rgb_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_rgb2yuv_config(const struct ia_css_isp_parameters *params,
- struct ia_css_cc_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_rgb2yuv_config() enter: config=%p\n",
- config);
-
- *config = params->rgb2yuv_cc_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_rgb2yuv_config() leave\n");
- ia_css_rgb2yuv_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_rgb2yuv_config(struct ia_css_isp_parameters *params,
- const struct ia_css_cc_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_rgb2yuv_config() enter:\n");
- ia_css_rgb2yuv_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->rgb2yuv_cc_config = *config;
- params->config_changed[IA_CSS_RGB2YUV_ID] = true;
- params->config_changed[IA_CSS_RGB2YUV_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_rgb2yuv_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_csc_config(const struct ia_css_isp_parameters *params,
- struct ia_css_cc_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_csc_config() enter: config=%p\n",
- config);
-
- *config = params->cc_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_csc_config() leave\n");
- ia_css_csc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_csc_config(struct ia_css_isp_parameters *params,
- const struct ia_css_cc_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_csc_config() enter:\n");
- ia_css_csc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->cc_config = *config;
- params->config_changed[IA_CSS_CSC_ID] = true;
- params->config_changed[IA_CSS_CSC_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_csc_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_nr_config(const struct ia_css_isp_parameters *params,
- struct ia_css_nr_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_nr_config() enter: config=%p\n",
- config);
-
- *config = params->nr_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_nr_config() leave\n");
- ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_nr_config(struct ia_css_isp_parameters *params,
- const struct ia_css_nr_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_nr_config() enter:\n");
- ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->nr_config = *config;
- params->config_changed[IA_CSS_BNR_ID] = true;
- params->config_changed[IA_CSS_NR_ID] = true;
- params->config_changed[IA_CSS_NR_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_nr_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_gc_config(const struct ia_css_isp_parameters *params,
- struct ia_css_gc_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_gc_config() enter: config=%p\n",
- config);
-
- *config = params->gc_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_gc_config() leave\n");
- ia_css_gc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_gc_config(struct ia_css_isp_parameters *params,
- const struct ia_css_gc_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_gc_config() enter:\n");
- ia_css_gc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->gc_config = *config;
- params->config_changed[IA_CSS_GC_ID] = true;
- params->config_changed[IA_CSS_GC_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_gc_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_sdis_horicoef_config(const struct ia_css_isp_parameters *params,
- struct ia_css_dvs_coefficients *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_sdis_horicoef_config() enter: config=%p\n",
- config);
-
- *config = params->dvs_coefs;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_sdis_horicoef_config() leave\n");
- ia_css_sdis_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_sdis_horicoef_config(struct ia_css_isp_parameters *params,
- const struct ia_css_dvs_coefficients *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_set_sdis_horicoef_config() enter:\n");
- ia_css_sdis_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->dvs_coefs = *config;
- params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
- params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
- params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
- params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
- params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_sdis_horicoef_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_sdis_vertcoef_config(const struct ia_css_isp_parameters *params,
- struct ia_css_dvs_coefficients *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_sdis_vertcoef_config() enter: config=%p\n",
- config);
-
- *config = params->dvs_coefs;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_sdis_vertcoef_config() leave\n");
- ia_css_sdis_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_sdis_vertcoef_config(struct ia_css_isp_parameters *params,
- const struct ia_css_dvs_coefficients *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_set_sdis_vertcoef_config() enter:\n");
- ia_css_sdis_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->dvs_coefs = *config;
- params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
- params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
- params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
- params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
- params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_sdis_vertcoef_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_sdis_horiproj_config(const struct ia_css_isp_parameters *params,
- struct ia_css_dvs_coefficients *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_sdis_horiproj_config() enter: config=%p\n",
- config);
-
- *config = params->dvs_coefs;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_sdis_horiproj_config() leave\n");
- ia_css_sdis_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_sdis_horiproj_config(struct ia_css_isp_parameters *params,
- const struct ia_css_dvs_coefficients *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_set_sdis_horiproj_config() enter:\n");
- ia_css_sdis_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->dvs_coefs = *config;
- params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
- params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
- params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
- params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
- params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_sdis_horiproj_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_sdis_vertproj_config(const struct ia_css_isp_parameters *params,
- struct ia_css_dvs_coefficients *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_sdis_vertproj_config() enter: config=%p\n",
- config);
-
- *config = params->dvs_coefs;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_sdis_vertproj_config() leave\n");
- ia_css_sdis_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_sdis_vertproj_config(struct ia_css_isp_parameters *params,
- const struct ia_css_dvs_coefficients *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_set_sdis_vertproj_config() enter:\n");
- ia_css_sdis_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->dvs_coefs = *config;
- params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
- params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
- params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
- params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
- params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_sdis_vertproj_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_sdis2_horicoef_config(const struct ia_css_isp_parameters *params,
- struct ia_css_dvs2_coefficients *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_sdis2_horicoef_config() enter: config=%p\n",
- config);
-
- *config = params->dvs2_coefs;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_sdis2_horicoef_config() leave\n");
- ia_css_sdis2_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_sdis2_horicoef_config(struct ia_css_isp_parameters *params,
- const struct ia_css_dvs2_coefficients *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_set_sdis2_horicoef_config() enter:\n");
- ia_css_sdis2_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->dvs2_coefs = *config;
- params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
- params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
- params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
- params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
- params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_sdis2_horicoef_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_sdis2_vertcoef_config(const struct ia_css_isp_parameters *params,
- struct ia_css_dvs2_coefficients *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_sdis2_vertcoef_config() enter: config=%p\n",
- config);
-
- *config = params->dvs2_coefs;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_sdis2_vertcoef_config() leave\n");
- ia_css_sdis2_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_sdis2_vertcoef_config(struct ia_css_isp_parameters *params,
- const struct ia_css_dvs2_coefficients *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_set_sdis2_vertcoef_config() enter:\n");
- ia_css_sdis2_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->dvs2_coefs = *config;
- params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
- params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
- params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
- params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
- params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_sdis2_vertcoef_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_sdis2_horiproj_config(const struct ia_css_isp_parameters *params,
- struct ia_css_dvs2_coefficients *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_sdis2_horiproj_config() enter: config=%p\n",
- config);
-
- *config = params->dvs2_coefs;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_sdis2_horiproj_config() leave\n");
- ia_css_sdis2_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_sdis2_horiproj_config(struct ia_css_isp_parameters *params,
- const struct ia_css_dvs2_coefficients *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_set_sdis2_horiproj_config() enter:\n");
- ia_css_sdis2_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->dvs2_coefs = *config;
- params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
- params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
- params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
- params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
- params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_sdis2_horiproj_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_sdis2_vertproj_config(const struct ia_css_isp_parameters *params,
- struct ia_css_dvs2_coefficients *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_sdis2_vertproj_config() enter: config=%p\n",
- config);
-
- *config = params->dvs2_coefs;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_sdis2_vertproj_config() leave\n");
- ia_css_sdis2_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_sdis2_vertproj_config(struct ia_css_isp_parameters *params,
- const struct ia_css_dvs2_coefficients *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_set_sdis2_vertproj_config() enter:\n");
- ia_css_sdis2_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->dvs2_coefs = *config;
- params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
- params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
- params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
- params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
- params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_sdis2_vertproj_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_r_gamma_config(const struct ia_css_isp_parameters *params,
- struct ia_css_rgb_gamma_table *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_r_gamma_config() enter: config=%p\n",
- config);
-
- *config = params->r_gamma_table;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_r_gamma_config() leave\n");
- ia_css_r_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_r_gamma_config(struct ia_css_isp_parameters *params,
- const struct ia_css_rgb_gamma_table *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_r_gamma_config() enter:\n");
- ia_css_r_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->r_gamma_table = *config;
- params->config_changed[IA_CSS_R_GAMMA_ID] = true;
- params->config_changed[IA_CSS_R_GAMMA_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_r_gamma_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_g_gamma_config(const struct ia_css_isp_parameters *params,
- struct ia_css_rgb_gamma_table *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_g_gamma_config() enter: config=%p\n",
- config);
-
- *config = params->g_gamma_table;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_g_gamma_config() leave\n");
- ia_css_g_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_g_gamma_config(struct ia_css_isp_parameters *params,
- const struct ia_css_rgb_gamma_table *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_g_gamma_config() enter:\n");
- ia_css_g_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->g_gamma_table = *config;
- params->config_changed[IA_CSS_G_GAMMA_ID] = true;
- params->config_changed[IA_CSS_G_GAMMA_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_g_gamma_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_b_gamma_config(const struct ia_css_isp_parameters *params,
- struct ia_css_rgb_gamma_table *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_b_gamma_config() enter: config=%p\n",
- config);
-
- *config = params->b_gamma_table;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_b_gamma_config() leave\n");
- ia_css_b_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_b_gamma_config(struct ia_css_isp_parameters *params,
- const struct ia_css_rgb_gamma_table *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_b_gamma_config() enter:\n");
- ia_css_b_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->b_gamma_table = *config;
- params->config_changed[IA_CSS_B_GAMMA_ID] = true;
- params->config_changed[IA_CSS_B_GAMMA_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_b_gamma_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_xnr_table_config(const struct ia_css_isp_parameters *params,
- struct ia_css_xnr_table *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_xnr_table_config() enter: config=%p\n",
- config);
-
- *config = params->xnr_table;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_xnr_table_config() leave\n");
- ia_css_xnr_table_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_xnr_table_config(struct ia_css_isp_parameters *params,
- const struct ia_css_xnr_table *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_set_xnr_table_config() enter:\n");
- ia_css_xnr_table_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->xnr_table = *config;
- params->config_changed[IA_CSS_XNR_TABLE_ID] = true;
- params->config_changed[IA_CSS_XNR_TABLE_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_xnr_table_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_formats_config(const struct ia_css_isp_parameters *params,
- struct ia_css_formats_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_formats_config() enter: config=%p\n",
- config);
-
- *config = params->formats_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_formats_config() leave\n");
- ia_css_formats_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_formats_config(struct ia_css_isp_parameters *params,
- const struct ia_css_formats_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_formats_config() enter:\n");
- ia_css_formats_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->formats_config = *config;
- params->config_changed[IA_CSS_FORMATS_ID] = true;
- params->config_changed[IA_CSS_FORMATS_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_formats_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_xnr_config(const struct ia_css_isp_parameters *params,
- struct ia_css_xnr_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_xnr_config() enter: config=%p\n",
- config);
-
- *config = params->xnr_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_xnr_config() leave\n");
- ia_css_xnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_xnr_config(struct ia_css_isp_parameters *params,
- const struct ia_css_xnr_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr_config() enter:\n");
- ia_css_xnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->xnr_config = *config;
- params->config_changed[IA_CSS_XNR_ID] = true;
- params->config_changed[IA_CSS_XNR_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_xnr_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_xnr3_config(const struct ia_css_isp_parameters *params,
- struct ia_css_xnr3_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_xnr3_config() enter: config=%p\n",
- config);
-
- *config = params->xnr3_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_xnr3_config() leave\n");
- ia_css_xnr3_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_xnr3_config(struct ia_css_isp_parameters *params,
- const struct ia_css_xnr3_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr3_config() enter:\n");
- ia_css_xnr3_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->xnr3_config = *config;
- params->config_changed[IA_CSS_XNR3_ID] = true;
- params->config_changed[IA_CSS_XNR3_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_xnr3_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_s3a_config(const struct ia_css_isp_parameters *params,
- struct ia_css_3a_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_s3a_config() enter: config=%p\n",
- config);
-
- *config = params->s3a_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_s3a_config() leave\n");
- ia_css_s3a_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_s3a_config(struct ia_css_isp_parameters *params,
- const struct ia_css_3a_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_s3a_config() enter:\n");
- ia_css_s3a_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->s3a_config = *config;
- params->config_changed[IA_CSS_BH_ID] = true;
- params->config_changed[IA_CSS_S3A_ID] = true;
- params->config_changed[IA_CSS_S3A_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_s3a_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_get_function() */
-
-static void
-ia_css_get_output_config(const struct ia_css_isp_parameters *params,
- struct ia_css_output_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_output_config() enter: config=%p\n",
- config);
-
- *config = params->output_config;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_get_output_config() leave\n");
- ia_css_output_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
-}
-
-/* Code generated by genparam/gencode.c:gen_set_function() */
-
-void
-ia_css_set_output_config(struct ia_css_isp_parameters *params,
- const struct ia_css_output_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_output_config() enter:\n");
- ia_css_output_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
- params->output_config = *config;
- params->config_changed[IA_CSS_OUTPUT_ID] = true;
- params->config_changed[IA_CSS_OUTPUT_ID] = true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_set_output_config() leave: return_void\n");
-}
-
-/* Code generated by genparam/gencode.c:gen_global_access_function() */
-
-void
-ia_css_get_configs(struct ia_css_isp_parameters *params,
- const struct ia_css_isp_config *config)
-{
- ia_css_get_dp_config(params, config->dp_config);
- ia_css_get_wb_config(params, config->wb_config);
- ia_css_get_tnr_config(params, config->tnr_config);
- ia_css_get_ob_config(params, config->ob_config);
- ia_css_get_de_config(params, config->de_config);
- ia_css_get_anr_config(params, config->anr_config);
- ia_css_get_anr2_config(params, config->anr_thres);
- ia_css_get_ce_config(params, config->ce_config);
- ia_css_get_ecd_config(params, config->ecd_config);
- ia_css_get_ynr_config(params, config->ynr_config);
- ia_css_get_fc_config(params, config->fc_config);
- ia_css_get_cnr_config(params, config->cnr_config);
- ia_css_get_macc_config(params, config->macc_config);
- ia_css_get_ctc_config(params, config->ctc_config);
- ia_css_get_aa_config(params, config->aa_config);
- ia_css_get_yuv2rgb_config(params, config->yuv2rgb_cc_config);
- ia_css_get_rgb2yuv_config(params, config->rgb2yuv_cc_config);
- ia_css_get_csc_config(params, config->cc_config);
- ia_css_get_nr_config(params, config->nr_config);
- ia_css_get_gc_config(params, config->gc_config);
- ia_css_get_sdis_horicoef_config(params, config->dvs_coefs);
- ia_css_get_sdis_vertcoef_config(params, config->dvs_coefs);
- ia_css_get_sdis_horiproj_config(params, config->dvs_coefs);
- ia_css_get_sdis_vertproj_config(params, config->dvs_coefs);
- ia_css_get_sdis2_horicoef_config(params, config->dvs2_coefs);
- ia_css_get_sdis2_vertcoef_config(params, config->dvs2_coefs);
- ia_css_get_sdis2_horiproj_config(params, config->dvs2_coefs);
- ia_css_get_sdis2_vertproj_config(params, config->dvs2_coefs);
- ia_css_get_r_gamma_config(params, config->r_gamma_table);
- ia_css_get_g_gamma_config(params, config->g_gamma_table);
- ia_css_get_b_gamma_config(params, config->b_gamma_table);
- ia_css_get_xnr_table_config(params, config->xnr_table);
- ia_css_get_formats_config(params, config->formats_config);
- ia_css_get_xnr_config(params, config->xnr_config);
- ia_css_get_xnr3_config(params, config->xnr3_config);
- ia_css_get_s3a_config(params, config->s3a_config);
- ia_css_get_output_config(params, config->output_config);
-}
-
-/* Code generated by genparam/gencode.c:gen_global_access_function() */
-
-void
-ia_css_set_configs(struct ia_css_isp_parameters *params,
- const struct ia_css_isp_config *config)
-{
- ia_css_set_dp_config(params, config->dp_config);
- ia_css_set_wb_config(params, config->wb_config);
- ia_css_set_tnr_config(params, config->tnr_config);
- ia_css_set_ob_config(params, config->ob_config);
- ia_css_set_de_config(params, config->de_config);
- ia_css_set_anr_config(params, config->anr_config);
- ia_css_set_anr2_config(params, config->anr_thres);
- ia_css_set_ce_config(params, config->ce_config);
- ia_css_set_ecd_config(params, config->ecd_config);
- ia_css_set_ynr_config(params, config->ynr_config);
- ia_css_set_fc_config(params, config->fc_config);
- ia_css_set_cnr_config(params, config->cnr_config);
- ia_css_set_macc_config(params, config->macc_config);
- ia_css_set_ctc_config(params, config->ctc_config);
- ia_css_set_aa_config(params, config->aa_config);
- ia_css_set_yuv2rgb_config(params, config->yuv2rgb_cc_config);
- ia_css_set_rgb2yuv_config(params, config->rgb2yuv_cc_config);
- ia_css_set_csc_config(params, config->cc_config);
- ia_css_set_nr_config(params, config->nr_config);
- ia_css_set_gc_config(params, config->gc_config);
- ia_css_set_sdis_horicoef_config(params, config->dvs_coefs);
- ia_css_set_sdis_vertcoef_config(params, config->dvs_coefs);
- ia_css_set_sdis_horiproj_config(params, config->dvs_coefs);
- ia_css_set_sdis_vertproj_config(params, config->dvs_coefs);
- ia_css_set_sdis2_horicoef_config(params, config->dvs2_coefs);
- ia_css_set_sdis2_vertcoef_config(params, config->dvs2_coefs);
- ia_css_set_sdis2_horiproj_config(params, config->dvs2_coefs);
- ia_css_set_sdis2_vertproj_config(params, config->dvs2_coefs);
- ia_css_set_r_gamma_config(params, config->r_gamma_table);
- ia_css_set_g_gamma_config(params, config->g_gamma_table);
- ia_css_set_b_gamma_config(params, config->b_gamma_table);
- ia_css_set_xnr_table_config(params, config->xnr_table);
- ia_css_set_formats_config(params, config->formats_config);
- ia_css_set_xnr_config(params, config->xnr_config);
- ia_css_set_xnr3_config(params, config->xnr3_config);
- ia_css_set_s3a_config(params, config->s3a_config);
- ia_css_set_output_config(params, config->output_config);
-}
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_configs.c b/drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_configs.c
deleted file mode 100644
index 1a021ae841fe..000000000000
--- a/drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_configs.c
+++ /dev/null
@@ -1,386 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-/* Generated code: do not edit or commmit. */
-
-#define IA_CSS_INCLUDE_CONFIGURATIONS
-#include "ia_css_pipeline.h"
-#include "ia_css_isp_configs.h"
-#include "ia_css_debug.h"
-#include "assert_support.h"
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_iterator(
- const struct ia_css_binary *binary,
- const struct ia_css_iterator_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_iterator() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.iterator.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.iterator.offset;
- }
- if (size) {
- ia_css_iterator_config((struct sh_css_isp_iterator_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_iterator() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_copy_output(
- const struct ia_css_binary *binary,
- const struct ia_css_copy_output_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_copy_output() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.copy_output.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.copy_output.offset;
- }
- if (size) {
- ia_css_copy_output_config((struct sh_css_isp_copy_output_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_copy_output() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_crop(
- const struct ia_css_binary *binary,
- const struct ia_css_crop_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_crop() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.crop.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.crop.offset;
- }
- if (size) {
- ia_css_crop_config((struct sh_css_isp_crop_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_crop() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_fpn(
- const struct ia_css_binary *binary,
- const struct ia_css_fpn_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_fpn() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.fpn.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.fpn.offset;
- }
- if (size) {
- ia_css_fpn_config((struct sh_css_isp_fpn_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_fpn() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_dvs(
- const struct ia_css_binary *binary,
- const struct ia_css_dvs_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_dvs() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.dvs.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.dvs.offset;
- }
- if (size) {
- ia_css_dvs_config((struct sh_css_isp_dvs_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_dvs() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_qplane(
- const struct ia_css_binary *binary,
- const struct ia_css_qplane_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_qplane() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.qplane.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.qplane.offset;
- }
- if (size) {
- ia_css_qplane_config((struct sh_css_isp_qplane_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_qplane() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_output0(
- const struct ia_css_binary *binary,
- const struct ia_css_output0_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_output0() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.output0.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.output0.offset;
- }
- if (size) {
- ia_css_output0_config((struct sh_css_isp_output_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_output0() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_output1(
- const struct ia_css_binary *binary,
- const struct ia_css_output1_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_output1() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.output1.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.output1.offset;
- }
- if (size) {
- ia_css_output1_config((struct sh_css_isp_output_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_output1() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_output(
- const struct ia_css_binary *binary,
- const struct ia_css_output_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_output() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.output.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.output.offset;
- }
- if (size) {
- ia_css_output_config((struct sh_css_isp_output_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_output() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_raw(
- const struct ia_css_binary *binary,
- const struct ia_css_raw_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_raw() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.raw.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.raw.offset;
- }
- if (size) {
- ia_css_raw_config((struct sh_css_isp_raw_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_raw() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_tnr(
- const struct ia_css_binary *binary,
- const struct ia_css_tnr_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_tnr() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.tnr.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.tnr.offset;
- }
- if (size) {
- ia_css_tnr_config((struct sh_css_isp_tnr_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_tnr() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_ref(
- const struct ia_css_binary *binary,
- const struct ia_css_ref_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_ref() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.ref.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.ref.offset;
- }
- if (size) {
- ia_css_ref_config((struct sh_css_isp_ref_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_ref() leave:\n");
-}
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_vf(
- const struct ia_css_binary *binary,
- const struct ia_css_vf_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_vf() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.vf.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.vf.offset;
- }
- if (size) {
- ia_css_vf_config((struct sh_css_isp_vf_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_vf() leave:\n");
-}
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_states.c b/drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_states.c
deleted file mode 100644
index 514ffe0303cb..000000000000
--- a/drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_states.c
+++ /dev/null
@@ -1,224 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-/* Generated code: do not edit or commmit. */
-
-#include "ia_css_pipeline.h"
-#include "ia_css_isp_states.h"
-#include "ia_css_debug.h"
-#include "assert_support.h"
-
-/* Code generated by genparam/genstate.c:gen_init_function() */
-
-static void
-ia_css_initialize_aa_state(
- const struct ia_css_binary *binary)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_initialize_aa_state() enter:\n");
-
- {
- unsigned int size = binary->info->mem_offsets.offsets.state->vmem.aa.size;
- unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.aa.offset;
-
- if (size)
- memset(&binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
- 0, size);
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_initialize_aa_state() leave:\n");
-}
-
-/* Code generated by genparam/genstate.c:gen_init_function() */
-
-static void
-ia_css_initialize_cnr_state(
- const struct ia_css_binary *binary)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_initialize_cnr_state() enter:\n");
-
- {
- unsigned int size = binary->info->mem_offsets.offsets.state->vmem.cnr.size;
-
- unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.cnr.offset;
-
- if (size) {
- ia_css_init_cnr_state(
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
- size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_initialize_cnr_state() leave:\n");
-}
-
-/* Code generated by genparam/genstate.c:gen_init_function() */
-
-static void
-ia_css_initialize_cnr2_state(
- const struct ia_css_binary *binary)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_initialize_cnr2_state() enter:\n");
-
- {
- unsigned int size = binary->info->mem_offsets.offsets.state->vmem.cnr2.size;
-
- unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.cnr2.offset;
-
- if (size) {
- ia_css_init_cnr2_state(
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
- size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_initialize_cnr2_state() leave:\n");
-}
-
-/* Code generated by genparam/genstate.c:gen_init_function() */
-
-static void
-ia_css_initialize_dp_state(
- const struct ia_css_binary *binary)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_initialize_dp_state() enter:\n");
-
- {
- unsigned int size = binary->info->mem_offsets.offsets.state->vmem.dp.size;
-
- unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.dp.offset;
-
- if (size) {
- ia_css_init_dp_state(
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
- size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_initialize_dp_state() leave:\n");
-}
-
-/* Code generated by genparam/genstate.c:gen_init_function() */
-
-static void
-ia_css_initialize_de_state(
- const struct ia_css_binary *binary)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_initialize_de_state() enter:\n");
-
- {
- unsigned int size = binary->info->mem_offsets.offsets.state->vmem.de.size;
-
- unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.de.offset;
-
- if (size) {
- ia_css_init_de_state(
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
- size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_initialize_de_state() leave:\n");
-}
-
-/* Code generated by genparam/genstate.c:gen_init_function() */
-
-static void
-ia_css_initialize_tnr_state(
- const struct ia_css_binary *binary)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_initialize_tnr_state() enter:\n");
-
- {
- unsigned int size = binary->info->mem_offsets.offsets.state->dmem.tnr.size;
-
- unsigned int offset = binary->info->mem_offsets.offsets.state->dmem.tnr.offset;
-
- if (size) {
- ia_css_init_tnr_state((struct sh_css_isp_tnr_dmem_state *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_DMEM].address[offset],
- size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_initialize_tnr_state() leave:\n");
-}
-
-/* Code generated by genparam/genstate.c:gen_init_function() */
-
-static void
-ia_css_initialize_ref_state(
- const struct ia_css_binary *binary)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_initialize_ref_state() enter:\n");
-
- {
- unsigned int size = binary->info->mem_offsets.offsets.state->dmem.ref.size;
-
- unsigned int offset = binary->info->mem_offsets.offsets.state->dmem.ref.offset;
-
- if (size) {
- ia_css_init_ref_state((struct sh_css_isp_ref_dmem_state *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_DMEM].address[offset],
- size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_initialize_ref_state() leave:\n");
-}
-
-/* Code generated by genparam/genstate.c:gen_init_function() */
-
-static void
-ia_css_initialize_ynr_state(
- const struct ia_css_binary *binary)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_initialize_ynr_state() enter:\n");
-
- {
- unsigned int size = binary->info->mem_offsets.offsets.state->vmem.ynr.size;
-
- unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.ynr.offset;
-
- if (size) {
- ia_css_init_ynr_state(
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
- size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_initialize_ynr_state() leave:\n");
-}
-
-/* Code generated by genparam/genstate.c:gen_state_init_table() */
-
-void (*ia_css_kernel_init_state[IA_CSS_NUM_STATE_IDS])(
- const struct ia_css_binary *binary) = {
- ia_css_initialize_aa_state,
- ia_css_initialize_cnr_state,
- ia_css_initialize_cnr2_state,
- ia_css_initialize_dp_state,
- ia_css_initialize_de_state,
- ia_css_initialize_tnr_state,
- ia_css_initialize_ref_state,
- ia_css_initialize_ynr_state,
-};
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc.c
index 8ed1cffc5384..25e082d6a94a 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc.c
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc.c
@@ -28,10 +28,6 @@ static inline void gdc_reg_store(
const unsigned int reg,
const hrt_data value);
-static inline hrt_data gdc_reg_load(
- const gdc_ID_t ID,
- const unsigned int reg);
-
#ifndef __INLINE_GDC__
#include "gdc_private.h"
#endif /* __INLINE_GDC__ */
@@ -117,10 +113,3 @@ static inline void gdc_reg_store(
ia_css_device_store_uint32(GDC_BASE[ID] + reg * sizeof(hrt_data), value);
return;
}
-
-static inline hrt_data gdc_reg_load(
- const gdc_ID_t ID,
- const unsigned int reg)
-{
- return ia_css_device_load_uint32(GDC_BASE[ID] + reg * sizeof(hrt_data));
-}
diff --git a/drivers/staging/media/atomisp/pci/ia_css_acc_types.h b/drivers/staging/media/atomisp/pci/ia_css_acc_types.h
index 36583ab12e3f..d0ce2f8ba653 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_acc_types.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_acc_types.h
@@ -222,11 +222,6 @@ struct ia_css_binary_info {
struct ia_css_isp_param_isp_segments mem_initializers;
/* MW: Packing (related) bools in an integer ?? */
struct {
- /* ISP2401 */
- u8 luma_only;
- u8 input_yuv;
- u8 input_raw;
-
u8 reduced_pipe;
u8 vf_veceven;
u8 dis;
diff --git a/drivers/staging/media/atomisp/pci/ia_css_event_public.h b/drivers/staging/media/atomisp/pci/ia_css_event_public.h
index 08ea801dd5ac..76219d741d2e 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_event_public.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_event_public.h
@@ -157,21 +157,6 @@ struct ia_css_event {
int
ia_css_dequeue_psys_event(struct ia_css_event *event);
-/* @brief Dequeue an event from the CSS system.
- *
- * @param[out] event Pointer to the event struct which will be filled by
- * this function if an event is available.
- * @return -ENODATA if no events are
- * available or
- * 0 otherwise.
- *
- * deprecated{Use ia_css_dequeue_psys_event instead}.
- * Unless the isys event queue is explicitly enabled, this function will
- * dequeue both isys (EOF) and psys events (all others).
- */
-int
-ia_css_dequeue_event(struct ia_css_event *event);
-
/* @brief Dequeue an ISYS event from the CSS system.
*
* @param[out] event Pointer to the event struct which will be filled by
@@ -182,7 +167,7 @@ ia_css_dequeue_event(struct ia_css_event *event);
*
* This function dequeues an event from the ISYS event queue. The queue is
* between host and the CSS system.
- * Unlike the ia_css_dequeue_event() function, this function can be called
+ * Unlike the ia_css_dequeue_psys_event() function, this function can be called
* directly from an interrupt service routine (ISR) and it is safe to call
* this function in parallel with other CSS API functions (but only one
* call to this function should be in flight at any point in time).
diff --git a/drivers/staging/media/atomisp/pci/ia_css_isp_configs.c b/drivers/staging/media/atomisp/pci/ia_css_isp_configs.c
new file mode 100644
index 000000000000..d28a76a68e43
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/ia_css_isp_configs.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* Generated code: do not edit or commmit. */
+
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_configs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+int ia_css_configure_iterator(const struct ia_css_binary *binary,
+ const struct ia_css_iterator_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.iterator.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.iterator.offset;
+
+ ia_css_iterator_config((struct sh_css_isp_iterator_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
+
+int ia_css_configure_copy_output(const struct ia_css_binary *binary,
+ const struct ia_css_copy_output_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.copy_output.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.copy_output.offset;
+
+ ia_css_copy_output_config((struct sh_css_isp_copy_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+int ia_css_configure_crop(const struct ia_css_binary *binary,
+ const struct ia_css_crop_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.crop.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.crop.offset;
+
+ ia_css_crop_config((struct sh_css_isp_crop_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
+
+int ia_css_configure_fpn(const struct ia_css_binary *binary,
+ const struct ia_css_fpn_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.fpn.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.fpn.offset;
+ ia_css_fpn_config((struct sh_css_isp_fpn_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
+
+int ia_css_configure_dvs(const struct ia_css_binary *binary,
+ const struct ia_css_dvs_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.dvs.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.dvs.offset;
+ ia_css_dvs_config((struct sh_css_isp_dvs_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
+
+int ia_css_configure_qplane(const struct ia_css_binary *binary,
+ const struct ia_css_qplane_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.qplane.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.qplane.offset;
+ ia_css_qplane_config((struct sh_css_isp_qplane_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+
+ return 0;
+}
+
+int ia_css_configure_output0(const struct ia_css_binary *binary,
+ const struct ia_css_output0_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.output0.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.output0.offset;
+
+ ia_css_output0_config((struct sh_css_isp_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
+
+int ia_css_configure_output1(const struct ia_css_binary *binary,
+ const struct ia_css_output1_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.output1.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.output1.offset;
+
+ ia_css_output1_config((struct sh_css_isp_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
+
+int ia_css_configure_output(const struct ia_css_binary *binary,
+ const struct ia_css_output_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.output.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.output.offset;
+
+ ia_css_output_config((struct sh_css_isp_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
+
+int ia_css_configure_raw(const struct ia_css_binary *binary,
+ const struct ia_css_raw_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.raw.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.raw.offset;
+
+ ia_css_raw_config((struct sh_css_isp_raw_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
+
+int ia_css_configure_tnr(const struct ia_css_binary *binary,
+ const struct ia_css_tnr_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.tnr.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.tnr.offset;
+
+ ia_css_tnr_config((struct sh_css_isp_tnr_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
+
+int ia_css_configure_ref(const struct ia_css_binary *binary,
+ const struct ia_css_ref_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.ref.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.ref.offset;
+
+ ia_css_ref_config((struct sh_css_isp_ref_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
+
+int ia_css_configure_vf(const struct ia_css_binary *binary,
+ const struct ia_css_vf_configuration *config_dmem)
+{
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
+
+ if (!binary->info->mem_offsets.offsets.config)
+ return 0;
+
+ size = binary->info->mem_offsets.offsets.config->dmem.vf.size;
+ if (!size)
+ return 0;
+
+ offset = binary->info->mem_offsets.offsets.config->dmem.vf.offset;
+
+ ia_css_vf_config((struct sh_css_isp_vf_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size);
+ return 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/ia_css_isp_configs.h b/drivers/staging/media/atomisp/pci/ia_css_isp_configs.h
index 1abb2fd6a913..fffcfc871bd2 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_isp_configs.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_isp_configs.h
@@ -23,22 +23,15 @@
#include "isp/kernels/raw/raw_1.0/ia_css_raw.host.h"
#include "isp/kernels/ref/ref_1.0/ia_css_ref.host.h"
#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h"
-
-/* ISP2401 */
-#include "isp/kernels/sc/sc_1.0/ia_css_sc.host.h"
-
#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h"
#include "isp/kernels/vf/vf_1.0/ia_css_vf.host.h"
#include "isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h"
#include "isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h"
-#endif /* IA_CSS_INCLUDE_CONFIGURATIONS */
-/* Generated code: do not edit or commmit. */
+#endif
#ifndef _IA_CSS_ISP_CONFIG_H
#define _IA_CSS_ISP_CONFIG_H
-/* Code generated by genparam/gencode.c:gen_param_enum() */
-
enum ia_css_configuration_ids {
IA_CSS_ITERATOR_CONFIG_ID,
IA_CSS_COPY_OUTPUT_CONFIG_ID,
@@ -60,8 +53,6 @@ enum ia_css_configuration_ids {
IA_CSS_NUM_CONFIGURATION_IDS
};
-/* Code generated by genparam/gencode.c:gen_param_offsets() */
-
struct ia_css_config_memory_offsets {
struct {
struct ia_css_isp_parameter iterator;
@@ -73,10 +64,6 @@ struct ia_css_config_memory_offsets {
struct ia_css_isp_parameter output0;
struct ia_css_isp_parameter output1;
struct ia_css_isp_parameter output;
-
- /* ISP2401 */
- struct ia_css_isp_parameter sc;
-
struct ia_css_isp_parameter raw;
struct ia_css_isp_parameter tnr;
struct ia_css_isp_parameter ref;
@@ -88,96 +75,44 @@ struct ia_css_config_memory_offsets {
#include "ia_css_stream.h" /* struct ia_css_stream */
#include "ia_css_binary.h" /* struct ia_css_binary */
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_iterator(
- const struct ia_css_binary *binary,
- const struct ia_css_iterator_configuration *config_dmem);
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_copy_output(
- const struct ia_css_binary *binary,
- const struct ia_css_copy_output_configuration *config_dmem);
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_crop(
- const struct ia_css_binary *binary,
- const struct ia_css_crop_configuration *config_dmem);
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_fpn(
- const struct ia_css_binary *binary,
- const struct ia_css_fpn_configuration *config_dmem);
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_dvs(
- const struct ia_css_binary *binary,
- const struct ia_css_dvs_configuration *config_dmem);
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_qplane(
- const struct ia_css_binary *binary,
- const struct ia_css_qplane_configuration *config_dmem);
-
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-void
-ia_css_configure_output0(
- const struct ia_css_binary *binary,
- const struct ia_css_output0_configuration *config_dmem);
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
+int ia_css_configure_iterator(const struct ia_css_binary *binary,
+ const struct ia_css_iterator_configuration *config_dmem);
-void
-ia_css_configure_output1(
- const struct ia_css_binary *binary,
- const struct ia_css_output1_configuration *config_dmem);
+int ia_css_configure_copy_output(const struct ia_css_binary *binary,
+ const struct ia_css_copy_output_configuration *config_dmem);
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
+int ia_css_configure_crop(const struct ia_css_binary *binary,
+ const struct ia_css_crop_configuration *config_dmem);
-void
-ia_css_configure_output(
- const struct ia_css_binary *binary,
- const struct ia_css_output_configuration *config_dmem);
+int ia_css_configure_fpn(const struct ia_css_binary *binary,
+ const struct ia_css_fpn_configuration *config_dmem);
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
+int ia_css_configure_dvs(const struct ia_css_binary *binary,
+ const struct ia_css_dvs_configuration *config_dmem);
-void
-ia_css_configure_raw(
- const struct ia_css_binary *binary,
- const struct ia_css_raw_configuration *config_dmem);
+int ia_css_configure_qplane(const struct ia_css_binary *binary,
+ const struct ia_css_qplane_configuration *config_dmem);
+int ia_css_configure_output0(const struct ia_css_binary *binary,
+ const struct ia_css_output0_configuration *config_dmem);
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
+int ia_css_configure_output1(const struct ia_css_binary *binary,
+ const struct ia_css_output1_configuration *config_dmem);
-void
-ia_css_configure_tnr(
- const struct ia_css_binary *binary,
- const struct ia_css_tnr_configuration *config_dmem);
+int ia_css_configure_output(const struct ia_css_binary *binary,
+ const struct ia_css_output_configuration *config_dmem);
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
+int ia_css_configure_raw(const struct ia_css_binary *binary,
+ const struct ia_css_raw_configuration *config_dmem);
-void
-ia_css_configure_ref(
- const struct ia_css_binary *binary,
- const struct ia_css_ref_configuration *config_dmem);
+int ia_css_configure_tnr(const struct ia_css_binary *binary,
+ const struct ia_css_tnr_configuration *config_dmem);
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
+int ia_css_configure_ref(const struct ia_css_binary *binary,
+ const struct ia_css_ref_configuration *config_dmem);
-void
-ia_css_configure_vf(
- const struct ia_css_binary *binary,
- const struct ia_css_vf_configuration *config_dmem);
+int ia_css_configure_vf(const struct ia_css_binary *binary,
+ const struct ia_css_vf_configuration *config_dmem);
#endif /* IA_CSS_INCLUDE_CONFIGURATION */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_params.c b/drivers/staging/media/atomisp/pci/ia_css_isp_params.c
index d9c672d8904e..503ac65da69b 100644
--- a/drivers/staging/media/atomisp/pci/css_2401_system/hive/ia_css_isp_params.c
+++ b/drivers/staging/media/atomisp/pci/ia_css_isp_params.c
@@ -1721,29 +1721,6 @@ ia_css_process_xnr3(
"ia_css_process_xnr3() leave:\n");
}
}
- {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->vmem.xnr3.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->vmem.xnr3.offset;
-
- if (size) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_xnr3() enter:\n");
-
- ia_css_xnr3_vmem_encode((struct sh_css_isp_xnr3_vmem_params *)
- &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
- &params->xnr3_config,
- size);
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
- true;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_process_xnr3() leave:\n");
- }
- }
}
/* Code generated by genparam/gencode.c:gen_param_process_table() */
diff --git a/drivers/staging/media/atomisp/pci/ia_css_isp_params.h b/drivers/staging/media/atomisp/pci/ia_css_isp_params.h
index 6e3082b39ed6..c2de689877d1 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_isp_params.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_isp_params.h
@@ -121,9 +121,6 @@ struct ia_css_memory_offsets {
struct ia_css_isp_parameter sdis_vertcoef;
struct ia_css_isp_parameter sdis2_horicoef;
struct ia_css_isp_parameter sdis2_vertcoef;
-
- /* ISP2401 */
- struct ia_css_isp_parameter xnr3;
} vmem;
struct {
struct ia_css_isp_parameter bh;
diff --git a/drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_states.c b/drivers/staging/media/atomisp/pci/ia_css_isp_states.c
index a6bc2e9eddea..a6bc2e9eddea 100644
--- a/drivers/staging/media/atomisp/pci/css_2400_system/hive/ia_css_isp_states.c
+++ b/drivers/staging/media/atomisp/pci/ia_css_isp_states.c
diff --git a/drivers/staging/media/atomisp/pci/ia_css_pipe.h b/drivers/staging/media/atomisp/pci/ia_css_pipe.h
index bb0abf9bffb1..fb58535bff40 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_pipe.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_pipe.h
@@ -33,7 +33,7 @@ struct ia_css_preview_settings {
/* 2401 only for these two - do we in fact use them for anything real */
struct ia_css_frame *delay_frames[MAX_NUM_VIDEO_DELAY_FRAMES];
- struct ia_css_frame *tnr_frames[NUM_TNR_FRAMES];
+ struct ia_css_frame *tnr_frames[NUM_VIDEO_TNR_FRAMES];
struct ia_css_pipe *copy_pipe;
struct ia_css_pipe *capture_pipe;
@@ -81,7 +81,7 @@ struct ia_css_video_settings {
struct ia_css_binary vf_pp_binary;
struct ia_css_binary *yuv_scaler_binary;
struct ia_css_frame *delay_frames[MAX_NUM_VIDEO_DELAY_FRAMES];
- struct ia_css_frame *tnr_frames[NUM_TNR_FRAMES];
+ struct ia_css_frame *tnr_frames[NUM_VIDEO_TNR_FRAMES];
struct ia_css_frame *vf_pp_in_frame;
struct ia_css_pipe *copy_pipe;
struct ia_css_pipe *capture_pipe;
diff --git a/drivers/staging/media/atomisp/pci/ia_css_pipe_public.h b/drivers/staging/media/atomisp/pci/ia_css_pipe_public.h
index 4affd21f9e3f..7352cbf779fb 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_pipe_public.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_pipe_public.h
@@ -123,9 +123,6 @@ struct ia_css_pipe_config {
processing stages. */
/* ISP2401 */
- bool enable_luma_only;
- /** Enabling of monochrome mode for a pipeline. If enabled only luma processing
- will be done. */
bool enable_tnr;
/** Enabling of TNR (temporal noise reduction). This is only applicable to video
pipes. Non video-pipes should always set this parameter to false. */
@@ -482,29 +479,6 @@ ia_css_pipe_get_qos_ext_state(struct ia_css_pipe *pipe,
u32 fw_handle,
bool *enable);
-/* ISP2401 */
-/* @brief Update mapped CSS and ISP arguments for QoS pipe during SP runtime.
- * @param[in] pipe Pipe handle.
- * @param[in] fw_handle Extension firmware Handle (ia_css_fw_info.handle).
- * @param[in] css_seg Parameter memory descriptors for CSS segments.
- * @param[in] isp_seg Parameter memory descriptors for ISP segments.
- *
- * @return
- * 0 : Success
- * -EINVAL : Invalid Parameters
- * -EBUSY : Inactive QOS Pipe
- * (No active stream with this pipe)
- *
- * \deprecated{This interface is used to temporarily support a late-developed,
- * specific use-case on a specific IPU2 platform. It will not be supported or
- * maintained on IPU3 or further.}
- */
-int
-ia_css_pipe_update_qos_ext_mapped_arg(struct ia_css_pipe *pipe,
- u32 fw_handle,
- struct ia_css_isp_param_css_segments *css_seg,
- struct ia_css_isp_param_isp_segments *isp_seg);
-
/* @brief Get selected configuration settings
* @param[in] pipe The pipe.
* @param[out] config Configuration settings.
diff --git a/drivers/staging/media/atomisp/pci/ia_css_stream.h b/drivers/staging/media/atomisp/pci/ia_css_stream.h
index 70b0378748f1..cf847586dc61 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_stream.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_stream.h
@@ -18,9 +18,7 @@
#include <type_support.h>
#include <system_local.h>
-#if !defined(ISP2401)
#include <input_system.h>
-#endif
#include "ia_css_types.h"
#include "ia_css_stream_public.h"
@@ -30,9 +28,7 @@
struct ia_css_stream {
struct ia_css_stream_config config;
struct ia_css_stream_info info;
-#if !defined(ISP2401)
rx_cfg_t csi_rx_config;
-#endif
bool reconfigure_css_rx;
struct ia_css_pipe *last_pipe;
int num_pipes;
diff --git a/drivers/staging/media/atomisp/pci/ia_css_stream_public.h b/drivers/staging/media/atomisp/pci/ia_css_stream_public.h
index 83846e417ae5..47846ece8d64 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_stream_public.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_stream_public.h
@@ -102,12 +102,10 @@ struct ia_css_stream_config {
isys_config[IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH];
struct ia_css_stream_input_config input_config;
- /* Currently, Android and Windows platforms interpret the binning_factor parameter
- * differently. In Android, the binning factor is expressed in the form
- * 2^N * 2^N, whereas in Windows platform, the binning factor is N*N
- * To use the Windows method of specification, the caller has to define
- * macro USE_WINDOWS_BINNING_FACTOR. This is for backward compatibility only
- * and will be deprecated. In the future,all platforms will use the N*N method
+ /*
+ * Currently, Linux and Windows platforms interpret the binning_factor
+ * parameter differently. In Linux, the binning factor is expressed
+ * in the form 2^N * 2^N
*/
/* ISP2401 */
unsigned int sensor_binning_factor; /** Binning factor used by sensor
@@ -202,15 +200,6 @@ int
ia_css_stream_get_info(const struct ia_css_stream *stream,
struct ia_css_stream_info *stream_info);
-/* @brief load (rebuild) a stream that was unloaded.
- * @param[in] stream The stream
- * @return 0 or the error code
- *
- * Rebuild a stream, including allocating structs, setting configuration and
- * building the required pipes.
- */
-int
-ia_css_stream_load(struct ia_css_stream *stream);
/* @brief Starts the stream.
* @param[in] stream The stream.
diff --git a/drivers/staging/media/atomisp/pci/input_system_local.h b/drivers/staging/media/atomisp/pci/input_system_local.h
index b33aa2838290..357987d629cd 100644
--- a/drivers/staging/media/atomisp/pci/input_system_local.h
+++ b/drivers/staging/media/atomisp/pci/input_system_local.h
@@ -4,6 +4,140 @@
* (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
*/
+#include "type_support.h"
+#include "input_system_global.h"
+
+typedef enum {
+ INPUT_SYSTEM_PORT_A = 0,
+ INPUT_SYSTEM_PORT_B,
+ INPUT_SYSTEM_PORT_C,
+ N_INPUT_SYSTEM_PORTS
+} input_system_csi_port_t;
+
+typedef struct ctrl_unit_cfg_s ctrl_unit_cfg_t;
+typedef struct input_system_network_cfg_s input_system_network_cfg_t;
+typedef struct target_cfg2400_s target_cfg2400_t;
+typedef struct channel_cfg_s channel_cfg_t;
+typedef struct backend_channel_cfg_s backend_channel_cfg_t;
+typedef struct input_system_cfg2400_s input_system_cfg2400_t;
+typedef struct mipi_port_state_s mipi_port_state_t;
+typedef struct rx_channel_state_s rx_channel_state_t;
+typedef struct input_switch_cfg_channel_s input_switch_cfg_channel_t;
+typedef struct input_switch_cfg_s input_switch_cfg_t;
+
+struct ctrl_unit_cfg_s {
+ isp2400_ib_buffer_t buffer_mipi[N_CAPTURE_UNIT_ID];
+ isp2400_ib_buffer_t buffer_acquire[N_ACQUISITION_UNIT_ID];
+};
+
+struct input_system_network_cfg_s {
+ input_system_connection_t multicast_cfg[N_CAPTURE_UNIT_ID];
+ input_system_multiplex_t mux_cfg;
+ ctrl_unit_cfg_t ctrl_unit_cfg[N_CTRL_UNIT_ID];
+};
+
+typedef struct {
+// TBD.
+ u32 dummy_parameter;
+} target_isp_cfg_t;
+
+typedef struct {
+// TBD.
+ u32 dummy_parameter;
+} target_sp_cfg_t;
+
+typedef struct {
+// TBD.
+ u32 dummy_parameter;
+} target_strm2mem_cfg_t;
+
+struct input_switch_cfg_channel_s {
+ u32 hsync_data_reg[2];
+ u32 vsync_data_reg;
+};
+
+struct backend_channel_cfg_s {
+ u32 fmt_control_word_1; // Format config.
+ u32 fmt_control_word_2;
+ u32 no_side_band;
+};
+
+typedef union {
+ csi_cfg_t csi_cfg;
+ tpg_cfg_t tpg_cfg;
+ prbs_cfg_t prbs_cfg;
+ gpfifo_cfg_t gpfifo_cfg;
+} source_cfg_t;
+
+struct input_switch_cfg_s {
+ u32 hsync_data_reg[N_RX_CHANNEL_ID * 2];
+ u32 vsync_data_reg;
+};
+
+/*
+ * In 2300 ports can be configured independently and stream
+ * formats need to be specified. In 2400, there are only 8
+ * supported configurations but the HW is fused to support
+ * only a single one.
+ *
+ * In 2300 the compressed format types are programmed by the
+ * user. In 2400 all stream formats are encoded on the stream.
+ *
+ * Use the enum to check validity of a user configuration
+ */
+typedef enum {
+ MONO_4L_1L_0L = 0,
+ MONO_3L_1L_0L,
+ MONO_2L_1L_0L,
+ MONO_1L_1L_0L,
+ STEREO_2L_1L_2L,
+ STEREO_3L_1L_1L,
+ STEREO_2L_1L_1L,
+ STEREO_1L_1L_1L,
+ N_RX_MODE
+} rx_mode_t;
+
+#define UNCOMPRESSED_BITS_PER_PIXEL_10 10
+#define UNCOMPRESSED_BITS_PER_PIXEL_12 12
+#define COMPRESSED_BITS_PER_PIXEL_6 6
+#define COMPRESSED_BITS_PER_PIXEL_7 7
+#define COMPRESSED_BITS_PER_PIXEL_8 8
+enum mipi_compressor {
+ MIPI_COMPRESSOR_NONE = 0,
+ MIPI_COMPRESSOR_10_6_10,
+ MIPI_COMPRESSOR_10_7_10,
+ MIPI_COMPRESSOR_10_8_10,
+ MIPI_COMPRESSOR_12_6_12,
+ MIPI_COMPRESSOR_12_7_12,
+ MIPI_COMPRESSOR_12_8_12,
+ N_MIPI_COMPRESSOR_METHODS
+};
+
+typedef enum mipi_compressor mipi_compressor_t;
+
+typedef enum {
+ MIPI_PREDICTOR_NONE = 0,
+ MIPI_PREDICTOR_TYPE1,
+ MIPI_PREDICTOR_TYPE2,
+ N_MIPI_PREDICTOR_TYPES
+} mipi_predictor_t;
+
+typedef struct rx_cfg_s rx_cfg_t;
+
+/*
+ * Applied per port
+ */
+struct rx_cfg_s {
+ rx_mode_t mode; /* The HW config */
+ enum mipi_port_id port; /* The port ID to apply the control on */
+ unsigned int timeout;
+ unsigned int initcount;
+ unsigned int synccount;
+ unsigned int rxcount;
+ mipi_predictor_t comp; /* Just for backward compatibility */
+ bool is_two_ppc;
+};
+
#ifdef ISP2401
# include "isp2401_input_system_local.h"
#else
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.c
index 82aa69b74677..2091f001502d 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.c
@@ -13,7 +13,6 @@
* more details.
*/
-#if !defined(HAS_NO_HMEM)
#include "ia_css_types.h"
#include "sh_css_internal.h"
@@ -63,4 +62,3 @@ ia_css_bh_encode(
uDIGIT_FITTING(from->ae_y_coef_b, 16, SH_CSS_AE_YCOEF_SHIFT);
}
-#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c
index 5d34f3256a43..cc415c72ad8f 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c
@@ -34,14 +34,12 @@ ia_css_copy_output_config(
to->enable = from->enable;
}
-void
-ia_css_copy_output_configure(
- const struct ia_css_binary *binary,
- bool enable)
+int ia_css_copy_output_configure(const struct ia_css_binary *binary,
+ bool enable)
{
struct ia_css_copy_output_configuration config = default_config;
config.enable = enable;
- ia_css_configure_copy_output(binary, &config);
+ return ia_css_configure_copy_output(binary, &config);
}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h
index 615cb6771884..44e3e45b0ec3 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h
@@ -27,9 +27,7 @@ ia_css_copy_output_config(
const struct ia_css_copy_output_configuration *from,
unsigned int size);
-void
-ia_css_copy_output_configure(
- const struct ia_css_binary *binary,
- bool enable);
+int ia_css_copy_output_configure(const struct ia_css_binary *binary,
+ bool enable);
#endif /* __IA_CSS_COPY_OUTPUT_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.c
index 38912062edd4..8c1d50f7aae4 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.c
@@ -36,30 +36,32 @@ ia_css_crop_encode(
to->crop_pos = from->crop_pos;
}
-void
-ia_css_crop_config(
- struct sh_css_isp_crop_isp_config *to,
- const struct ia_css_crop_configuration *from,
- unsigned int size)
+int ia_css_crop_config(struct sh_css_isp_crop_isp_config *to,
+ const struct ia_css_crop_configuration *from,
+ unsigned int size)
{
unsigned int elems_a = ISP_VEC_NELEMS;
+ int ret;
+
+ ret = ia_css_dma_configure_from_info(&to->port_b, from->info);
+ if (ret)
+ return ret;
- (void)size;
- ia_css_dma_configure_from_info(&to->port_b, from->info);
to->width_a_over_b = elems_a / to->port_b.elems;
/* Assume divisiblity here, may need to generalize to fixed point. */
- assert(elems_a % to->port_b.elems == 0);
+ if (elems_a % to->port_b.elems != 0)
+ return -EINVAL;
+
+ return 0;
}
-void
-ia_css_crop_configure(
- const struct ia_css_binary *binary,
- const struct ia_css_frame_info *info)
+int ia_css_crop_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
{
struct ia_css_crop_configuration config = default_config;
config.info = info;
- ia_css_configure_crop(binary, &config);
+ return ia_css_configure_crop(binary, &config);
}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.h
index 21a259d33256..e700149c1e95 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.h
@@ -28,15 +28,11 @@ ia_css_crop_encode(
const struct ia_css_crop_config *from,
unsigned int size);
-void
-ia_css_crop_config(
- struct sh_css_isp_crop_isp_config *to,
- const struct ia_css_crop_configuration *from,
- unsigned int size);
+int ia_css_crop_config(struct sh_css_isp_crop_isp_config *to,
+ const struct ia_css_crop_configuration *from,
+ unsigned int size);
-void
-ia_css_crop_configure(
- const struct ia_css_binary *binary,
- const struct ia_css_frame_info *from);
+int ia_css_crop_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
#endif /* __IA_CSS_CROP_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c
index 67f5540b48b5..07ce5b4f0816 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c
@@ -46,16 +46,14 @@ ia_css_dvs_config(
DVS_NUM_BLOCKS_Y(from->info->res.height);
}
-void
-ia_css_dvs_configure(
- const struct ia_css_binary *binary,
- const struct ia_css_frame_info *info)
+int ia_css_dvs_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
{
struct ia_css_dvs_configuration config = default_config;
config.info = info;
- ia_css_configure_dvs(binary, &config);
+ return ia_css_configure_dvs(binary, &config);
}
static void
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h
index f9bc17ee0f86..332aa5496c04 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h
@@ -35,10 +35,8 @@ ia_css_dvs_config(
const struct ia_css_dvs_configuration *from,
unsigned int size);
-void
-ia_css_dvs_configure(
- const struct ia_css_binary *binary,
- const struct ia_css_frame_info *from);
+int ia_css_dvs_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
void
convert_dvs_6axis_config(
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c
index 47b5c7956fbd..57b5e11e1cfe 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c
@@ -51,26 +51,28 @@ ia_css_fpn_dump(
"fpn_enabled", fpn->enabled);
}
-void
-ia_css_fpn_config(
- struct sh_css_isp_fpn_isp_config *to,
- const struct ia_css_fpn_configuration *from,
- unsigned int size)
+int ia_css_fpn_config(struct sh_css_isp_fpn_isp_config *to,
+ const struct ia_css_fpn_configuration *from,
+ unsigned int size)
{
unsigned int elems_a = ISP_VEC_NELEMS;
+ int ret;
+
+ ret = ia_css_dma_configure_from_info(&to->port_b, from->info);
+ if (ret)
+ return ret;
- (void)size;
- ia_css_dma_configure_from_info(&to->port_b, from->info);
to->width_a_over_b = elems_a / to->port_b.elems;
/* Assume divisiblity here, may need to generalize to fixed point. */
- assert(elems_a % to->port_b.elems == 0);
+ if (elems_a % to->port_b.elems != 0)
+ return -EINVAL;
+
+ return 0;
}
-void
-ia_css_fpn_configure(
- const struct ia_css_binary *binary,
- const struct ia_css_frame_info *info)
+int ia_css_fpn_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
{
struct ia_css_frame_info my_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO;
const struct ia_css_fpn_configuration config = {
@@ -85,5 +87,5 @@ ia_css_fpn_configure(
my_info.raw_bayer_order = info->raw_bayer_order;
my_info.crop_info = info->crop_info;
- ia_css_configure_fpn(binary, &config);
+ return ia_css_configure_fpn(binary, &config);
}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h
index 12187d213d90..bd341fa287fe 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h
@@ -31,15 +31,11 @@ ia_css_fpn_dump(
const struct sh_css_isp_fpn_params *fpn,
unsigned int level);
-void
-ia_css_fpn_config(
- struct sh_css_isp_fpn_isp_config *to,
- const struct ia_css_fpn_configuration *from,
- unsigned int size);
+int ia_css_fpn_config(struct sh_css_isp_fpn_isp_config *to,
+ const struct ia_css_fpn_configuration *from,
+ unsigned int size);
-void
-ia_css_fpn_configure(
- const struct ia_css_binary *binary,
- const struct ia_css_frame_info *from);
+int ia_css_fpn_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
#endif /* __IA_CSS_FPN_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.c
index 698550cc2fcc..85a02b6adb52 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
-/* Release Version: irci_ecr-master_20150911_0724 */
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.h
index 04599ab590cd..83277b683c47 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
-/* Release Version: irci_ecr-master_20150911_0724 */
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_param.h
index 97a89fd3cfda..998c6d801756 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_param.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_param.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
-/* Release Version: irci_ecr-master_20150911_0724 */
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_types.h
index 1b4090880201..175c301ee96a 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_types.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_types.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
-/* Release Version: irci_ecr-master_20150911_0724 */
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
index ea8055148fb3..c7d88552dfde 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
@@ -22,10 +22,8 @@
#include "ia_css_isp_params.h"
#include "ia_css_frame.h"
-void
-ia_css_bayer_io_config(
- const struct ia_css_binary *binary,
- const struct sh_css_binary_args *args)
+int ia_css_bayer_io_config(const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args)
{
const struct ia_css_frame *in_frame = args->in_frame;
const struct ia_css_frame **out_frames = (const struct ia_css_frame **)
@@ -38,6 +36,7 @@ ia_css_bayer_io_config(
ddr_bits_per_element);
unsigned int size_get = 0, size_put = 0;
unsigned int offset = 0;
+ int ret;
if (binary->info->mem_offsets.offsets.param) {
size_get = binary->info->mem_offsets.offsets.param->dmem.get.size;
@@ -53,7 +52,9 @@ ia_css_bayer_io_config(
"ia_css_bayer_io_config() get part enter:\n");
#endif
- ia_css_dma_configure_from_info(&config, in_frame_info);
+ ret = ia_css_dma_configure_from_info(&config, in_frame_info);
+ if (ret)
+ return ret;
// The base_address of the input frame will be set in the ISP
to->width = in_frame_info->res.width;
to->height = in_frame_info->res.height;
@@ -79,7 +80,9 @@ ia_css_bayer_io_config(
"ia_css_bayer_io_config() put part enter:\n");
#endif
- ia_css_dma_configure_from_info(&config, &out_frames[0]->info);
+ ret = ia_css_dma_configure_from_info(&config, &out_frames[0]->info);
+ if (ret)
+ return ret;
to->base_address = out_frames[0]->data;
to->width = out_frames[0]->info.res.width;
to->height = out_frames[0]->info.res.height;
@@ -91,4 +94,5 @@ ia_css_bayer_io_config(
"ia_css_bayer_io_config() put part leave:\n");
#endif
}
+ return 0;
}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h
index 635ccb1b27d0..9c7e5a1ad57b 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h
@@ -21,9 +21,7 @@
#include "ia_css_binary.h"
#include "sh_css_internal.h"
-void
-ia_css_bayer_io_config(
- const struct ia_css_binary *binary,
- const struct sh_css_binary_args *args);
+int ia_css_bayer_io_config(const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args);
#endif /*__BAYER_IO_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
index f8bd207b28e1..7d2ef6e26ee6 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
@@ -22,10 +22,8 @@ more details.
#include "ia_css_isp_params.h"
#include "ia_css_frame.h"
-void
-ia_css_yuv444_io_config(
- const struct ia_css_binary *binary,
- const struct sh_css_binary_args *args)
+int ia_css_yuv444_io_config(const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args)
{
const struct ia_css_frame *in_frame = args->in_frame;
const struct ia_css_frame **out_frames = (const struct ia_css_frame **)
@@ -38,6 +36,7 @@ ia_css_yuv444_io_config(
ddr_bits_per_element);
unsigned int size_get = 0, size_put = 0;
unsigned int offset = 0;
+ int ret;
if (binary->info->mem_offsets.offsets.param) {
size_get = binary->info->mem_offsets.offsets.param->dmem.get.size;
@@ -53,7 +52,10 @@ ia_css_yuv444_io_config(
"ia_css_yuv444_io_config() get part enter:\n");
#endif
- ia_css_dma_configure_from_info(&config, in_frame_info);
+ ret = ia_css_dma_configure_from_info(&config, in_frame_info);
+ if (ret)
+ return ret;
+
// The base_address of the input frame will be set in the ISP
to->width = in_frame_info->res.width;
to->height = in_frame_info->res.height;
@@ -79,7 +81,10 @@ ia_css_yuv444_io_config(
"ia_css_yuv444_io_config() put part enter:\n");
#endif
- ia_css_dma_configure_from_info(&config, &out_frames[0]->info);
+ ret = ia_css_dma_configure_from_info(&config, &out_frames[0]->info);
+ if (ret)
+ return ret;
+
to->base_address = out_frames[0]->data;
to->width = out_frames[0]->info.res.width;
to->height = out_frames[0]->info.res.height;
@@ -91,4 +96,5 @@ ia_css_yuv444_io_config(
"ia_css_yuv444_io_config() put part leave:\n");
#endif
}
+ return 0;
}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h
index e7cfd380e108..13e50590f91e 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h
@@ -21,9 +21,7 @@ more details.
#include "ia_css_binary.h"
#include "sh_css_internal.h"
-void
-ia_css_yuv444_io_config(
- const struct ia_css_binary *binary,
- const struct sh_css_binary_args *args);
+int ia_css_yuv444_io_config(const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args);
#endif /*__YUV44_IO_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c
index 6d8a35a73750..5f186fb03642 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c
@@ -38,10 +38,9 @@ ia_css_iterator_config(
ia_css_resolution_to_sp_resolution(&to->dvs_envelope, from->dvs_envelope);
}
-int
-ia_css_iterator_configure(
- const struct ia_css_binary *binary,
- const struct ia_css_frame_info *in_info) {
+int ia_css_iterator_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *in_info)
+{
struct ia_css_frame_info my_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO;
struct ia_css_iterator_configuration config = default_config;
@@ -75,7 +74,5 @@ ia_css_iterator_configure(
my_info.res.height <<= binary->vf_downscale_log2;
}
- ia_css_configure_iterator(binary, &config);
-
- return 0;
+ return ia_css_configure_iterator(binary, &config);
}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.c
index c8e074f42353..be9e4ef29fce 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.c
@@ -52,49 +52,45 @@ ia_css_output_encode(
to->enable_vflip = from->enable_vflip;
}
-void
-ia_css_output_config(
- struct sh_css_isp_output_isp_config *to,
- const struct ia_css_output_configuration *from,
- unsigned int size)
+int ia_css_output_config(struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output_configuration *from,
+ unsigned int size)
{
unsigned int elems_a = ISP_VEC_NELEMS;
+ int ret;
+
+ ret = ia_css_dma_configure_from_info(&to->port_b, from->info);
+ if (ret)
+ return ret;
- (void)size;
- ia_css_dma_configure_from_info(&to->port_b, from->info);
to->width_a_over_b = elems_a / to->port_b.elems;
to->height = from->info ? from->info->res.height : 0;
to->enable = from->info != NULL;
ia_css_frame_info_to_frame_sp_info(&to->info, from->info);
/* Assume divisiblity here, may need to generalize to fixed point. */
- assert(elems_a % to->port_b.elems == 0);
+ if (elems_a % to->port_b.elems != 0)
+ return -EINVAL;
+
+ return 0;
}
-void
-ia_css_output0_config(
- struct sh_css_isp_output_isp_config *to,
- const struct ia_css_output0_configuration *from,
- unsigned int size)
+int ia_css_output0_config(struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output0_configuration *from,
+ unsigned int size)
{
- ia_css_output_config(
- to, (const struct ia_css_output_configuration *)from, size);
+ return ia_css_output_config(to, (const struct ia_css_output_configuration *)from, size);
}
-void
-ia_css_output1_config(
- struct sh_css_isp_output_isp_config *to,
- const struct ia_css_output1_configuration *from,
- unsigned int size)
+int ia_css_output1_config(struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output1_configuration *from,
+ unsigned int size)
{
- ia_css_output_config(
- to, (const struct ia_css_output_configuration *)from, size);
+ return ia_css_output_config(to, (const struct ia_css_output_configuration *)from, size);
}
-void
-ia_css_output_configure(
- const struct ia_css_binary *binary,
- const struct ia_css_frame_info *info)
+int ia_css_output_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
{
if (info) {
struct ia_css_output_configuration config =
@@ -102,14 +98,13 @@ ia_css_output_configure(
config.info = info;
- ia_css_configure_output(binary, &config);
+ return ia_css_configure_output(binary, &config);
}
+ return 0;
}
-void
-ia_css_output0_configure(
- const struct ia_css_binary *binary,
- const struct ia_css_frame_info *info)
+int ia_css_output0_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
{
if (info) {
struct ia_css_output0_configuration config =
@@ -117,14 +112,13 @@ ia_css_output0_configure(
config.info = info;
- ia_css_configure_output0(binary, &config);
+ return ia_css_configure_output0(binary, &config);
}
+ return 0;
}
-void
-ia_css_output1_configure(
- const struct ia_css_binary *binary,
- const struct ia_css_frame_info *info)
+int ia_css_output1_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
{
if (info) {
struct ia_css_output1_configuration config =
@@ -132,8 +126,9 @@ ia_css_output1_configure(
config.info = info;
- ia_css_configure_output1(binary, &config);
+ return ia_css_configure_output1(binary, &config);
}
+ return 0;
}
void
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.h
index 1f5a2242640e..c8523e95a394 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.h
@@ -30,38 +30,26 @@ ia_css_output_encode(
const struct ia_css_output_config *from,
unsigned int size);
-void
-ia_css_output_config(
- struct sh_css_isp_output_isp_config *to,
- const struct ia_css_output_configuration *from,
- unsigned int size);
+int ia_css_output_config(struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output_configuration *from,
+ unsigned int size);
-void
-ia_css_output0_config(
- struct sh_css_isp_output_isp_config *to,
- const struct ia_css_output0_configuration *from,
- unsigned int size);
+int ia_css_output0_config(struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output0_configuration *from,
+ unsigned int size);
-void
-ia_css_output1_config(
- struct sh_css_isp_output_isp_config *to,
- const struct ia_css_output1_configuration *from,
- unsigned int size);
+int ia_css_output1_config(struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output1_configuration *from,
+ unsigned int size);
-void
-ia_css_output_configure(
- const struct ia_css_binary *binary,
- const struct ia_css_frame_info *from);
+int ia_css_output_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
-void
-ia_css_output0_configure(
- const struct ia_css_binary *binary,
- const struct ia_css_frame_info *from);
+int ia_css_output0_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
-void
-ia_css_output1_configure(
- const struct ia_css_binary *binary,
- const struct ia_css_frame_info *from);
+int ia_css_output1_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
void
ia_css_output_dump(
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c
index 1603fd44ece3..9fd4435e96b0 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c
@@ -28,35 +28,37 @@ static const struct ia_css_qplane_configuration default_config = {
.pipe = (struct sh_css_sp_pipeline *)NULL,
};
-void
-ia_css_qplane_config(
- struct sh_css_isp_qplane_isp_config *to,
- const struct ia_css_qplane_configuration *from,
- unsigned int size)
+int ia_css_qplane_config(struct sh_css_isp_qplane_isp_config *to,
+ const struct ia_css_qplane_configuration *from,
+ unsigned int size)
{
unsigned int elems_a = ISP_VEC_NELEMS;
+ int ret;
+
+ ret = ia_css_dma_configure_from_info(&to->port_b, from->info);
+ if (ret)
+ return ret;
- (void)size;
- ia_css_dma_configure_from_info(&to->port_b, from->info);
to->width_a_over_b = elems_a / to->port_b.elems;
/* Assume divisiblity here, may need to generalize to fixed point. */
- assert(elems_a % to->port_b.elems == 0);
+ if (elems_a % to->port_b.elems != 0)
+ return -EINVAL;
to->inout_port_config = from->pipe->inout_port_config;
to->format = from->info->format;
+
+ return 0;
}
-void
-ia_css_qplane_configure(
- const struct sh_css_sp_pipeline *pipe,
- const struct ia_css_binary *binary,
- const struct ia_css_frame_info *info)
+int ia_css_qplane_configure(const struct sh_css_sp_pipeline *pipe,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
{
struct ia_css_qplane_configuration config = default_config;
config.pipe = pipe;
config.info = info;
- ia_css_configure_qplane(binary, &config);
+ return ia_css_configure_qplane(binary, &config);
}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h
index 8d940959f40a..b3f8fa30c8ce 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h
@@ -29,16 +29,12 @@
#include "ia_css_qplane_types.h"
#include "ia_css_qplane_param.h"
-void
-ia_css_qplane_config(
- struct sh_css_isp_qplane_isp_config *to,
- const struct ia_css_qplane_configuration *from,
- unsigned int size);
+int ia_css_qplane_config(struct sh_css_isp_qplane_isp_config *to,
+ const struct ia_css_qplane_configuration *from,
+ unsigned int size);
-void
-ia_css_qplane_configure(
- const struct sh_css_sp_pipeline *pipe,
- const struct ia_css_binary *binary,
- const struct ia_css_frame_info *from);
+int ia_css_qplane_configure(const struct sh_css_sp_pipeline *pipe,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
#endif /* __IA_CSS_QPLANE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c
index c505c94a7241..646d6e39c1e5 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c
@@ -29,12 +29,6 @@ static const struct ia_css_raw_configuration default_config = {
.pipe = (struct sh_css_sp_pipeline *)NULL,
};
-static inline unsigned
-sh_css_elems_bytes_from_info(unsigned int raw_bit_depth)
-{
- return CEIL_DIV(raw_bit_depth, 8);
-}
-
/* MW: These areMIPI / ISYS properties, not camera function properties */
static enum sh_stream_format
css2isp_stream_format(enum atomisp_input_format from) {
@@ -70,17 +64,15 @@ css2isp_stream_format(enum atomisp_input_format from) {
}
}
-void
-ia_css_raw_config(
- struct sh_css_isp_raw_isp_config *to,
- const struct ia_css_raw_configuration *from,
- unsigned int size)
+int ia_css_raw_config(struct sh_css_isp_raw_isp_config *to,
+ const struct ia_css_raw_configuration *from,
+ unsigned int size)
{
unsigned int elems_a = ISP_VEC_NELEMS;
const struct ia_css_frame_info *in_info = from->in_info;
const struct ia_css_frame_info *internal_info = from->internal_info;
+ int ret;
- (void)size;
#if !defined(ISP2401)
/* 2401 input system uses input width width */
in_info = internal_info;
@@ -92,7 +84,9 @@ ia_css_raw_config(
in_info = internal_info;
#endif
- ia_css_dma_configure_from_info(&to->port_b, in_info);
+ ret = ia_css_dma_configure_from_info(&to->port_b, in_info);
+ if (ret)
+ return ret;
/* Assume divisiblity here, may need to generalize to fixed point. */
assert((in_info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED) ||
@@ -110,16 +104,16 @@ ia_css_raw_config(
to->start_line = in_info->crop_info.start_line;
to->enable_left_padding = from->enable_left_padding;
#endif
+
+ return 0;
}
-void
-ia_css_raw_configure(
- const struct sh_css_sp_pipeline *pipe,
- const struct ia_css_binary *binary,
- const struct ia_css_frame_info *in_info,
- const struct ia_css_frame_info *internal_info,
- bool two_ppc,
- bool deinterleaved)
+int ia_css_raw_configure(const struct sh_css_sp_pipeline *pipe,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *in_info,
+ const struct ia_css_frame_info *internal_info,
+ bool two_ppc,
+ bool deinterleaved)
{
u8 enable_left_padding = (uint8_t)((binary->left_padding) ? 1 : 0);
struct ia_css_raw_configuration config = default_config;
@@ -132,5 +126,5 @@ ia_css_raw_configure(
config.deinterleaved = deinterleaved;
config.enable_left_padding = enable_left_padding;
- ia_css_configure_raw(binary, &config);
+ return ia_css_configure_raw(binary, &config);
}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.h
index 346928435a8b..23da51aabc8d 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.h
@@ -21,19 +21,15 @@
#include "ia_css_raw_types.h"
#include "ia_css_raw_param.h"
-void
-ia_css_raw_config(
- struct sh_css_isp_raw_isp_config *to,
- const struct ia_css_raw_configuration *from,
- unsigned int size);
+int ia_css_raw_config(struct sh_css_isp_raw_isp_config *to,
+ const struct ia_css_raw_configuration *from,
+ unsigned int size);
-void
-ia_css_raw_configure(
- const struct sh_css_sp_pipeline *pipe,
- const struct ia_css_binary *binary,
- const struct ia_css_frame_info *in_info,
- const struct ia_css_frame_info *internal_info,
- bool two_ppc,
- bool deinterleaved);
+int ia_css_raw_configure(const struct sh_css_sp_pipeline *pipe,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *in_info,
+ const struct ia_css_frame_info *internal_info,
+ bool two_ppc,
+ bool deinterleaved);
#endif /* __IA_CSS_RAW_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c
index 29c707ecf9f3..9b756daddee0 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c
@@ -13,7 +13,6 @@
* more details.
*/
-#if !defined(HAS_NO_HMEM)
#include "ia_css_types.h"
#include "sh_css_internal.h"
@@ -32,4 +31,3 @@ ia_css_raa_encode(
(void)from;
}
-#endif
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.c
index 061558fbe329..08ed916a7eb8 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.c
@@ -22,16 +22,17 @@
#include "isp.h"
#include "ia_css_ref.host.h"
-void
-ia_css_ref_config(
- struct sh_css_isp_ref_isp_config *to,
- const struct ia_css_ref_configuration *from,
- unsigned int size)
+int ia_css_ref_config(struct sh_css_isp_ref_isp_config *to,
+ const struct ia_css_ref_configuration *from,
+ unsigned int size)
{
unsigned int elems_a = ISP_VEC_NELEMS, i;
+ int ret;
if (from->ref_frames[0]) {
- ia_css_dma_configure_from_info(&to->port_b, &from->ref_frames[0]->info);
+ ret = ia_css_dma_configure_from_info(&to->port_b, &from->ref_frames[0]->info);
+ if (ret)
+ return ret;
to->width_a_over_b = elems_a / to->port_b.elems;
to->dvs_frame_delay = from->dvs_frame_delay;
} else {
@@ -52,22 +53,25 @@ ia_css_ref_config(
}
/* Assume divisiblity here, may need to generalize to fixed point. */
- assert(elems_a % to->port_b.elems == 0);
+ if (elems_a % to->port_b.elems != 0)
+ return -EINVAL;
+
+ return 0;
}
-void
-ia_css_ref_configure(
- const struct ia_css_binary *binary,
- const struct ia_css_frame * const *ref_frames,
- const uint32_t dvs_frame_delay)
+int ia_css_ref_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame * const *ref_frames,
+ const uint32_t dvs_frame_delay)
{
struct ia_css_ref_configuration config;
unsigned int i;
for (i = 0; i < MAX_NUM_VIDEO_DELAY_FRAMES; i++)
config.ref_frames[i] = ref_frames[i];
+
config.dvs_frame_delay = dvs_frame_delay;
- ia_css_configure_ref(binary, &config);
+
+ return ia_css_configure_ref(binary, &config);
}
void
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.h
index 3ce590b436a1..388cd4c367ba 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.h
@@ -23,17 +23,13 @@
#include "ia_css_ref_param.h"
#include "ia_css_ref_state.h"
-void
-ia_css_ref_config(
- struct sh_css_isp_ref_isp_config *to,
- const struct ia_css_ref_configuration *from,
- unsigned int size);
+int ia_css_ref_config(struct sh_css_isp_ref_isp_config *to,
+ const struct ia_css_ref_configuration *from,
+ unsigned int size);
-void
-ia_css_ref_configure(
- const struct ia_css_binary *binary,
- const struct ia_css_frame * const *ref_frames,
- const uint32_t dvs_frame_delay);
+int ia_css_ref_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame * const *ref_frames,
+ const uint32_t dvs_frame_delay);
void
ia_css_init_ref_state(
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c
index ba52c80df4a5..bd7b89d9475b 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c
@@ -227,10 +227,6 @@ ia_css_s3a_hmem_decode(
struct ia_css_3a_statistics *host_stats,
const struct ia_css_bh_table *hmem_buf)
{
-#if defined(HAS_NO_HMEM)
- (void)host_stats;
- (void)hmem_buf;
-#else
struct ia_css_3a_rgby_output *out_ptr;
int i;
@@ -291,7 +287,6 @@ ia_css_s3a_hmem_decode(
out_ptr[0].g -= diff;
out_ptr[0].b -= diff;
out_ptr[0].y -= diff;
-#endif
}
void
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.c
index f3fb4b9b3c82..6974b3424d91 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.c
@@ -23,35 +23,6 @@
#include "ia_css_sc.host.h"
-/* Code generated by genparam/genconfig.c:gen_configure_function() */
-
-/* ISP2401 */
-static void
-ia_css_configure_sc(
- const struct ia_css_binary *binary,
- const struct ia_css_sc_configuration *config_dmem)
-{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_sc() enter:\n");
-
- {
- unsigned int offset = 0;
- unsigned int size = 0;
-
- if (binary->info->mem_offsets.offsets.config) {
- size = binary->info->mem_offsets.offsets.config->dmem.sc.size;
- offset = binary->info->mem_offsets.offsets.config->dmem.sc.offset;
- }
- if (size) {
- ia_css_sc_config((struct sh_css_isp_sc_isp_config *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
- config_dmem, size);
- }
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "ia_css_configure_sc() leave:\n");
-}
-
void
ia_css_sc_encode(
struct sh_css_isp_sc_params *to,
@@ -73,45 +44,6 @@ ia_css_sc_dump(
"sc_gain_shift", sc->gain_shift);
}
-/* ISP2401 */
-void
-ia_css_sc_config(
- struct sh_css_isp_sc_isp_config *to,
- const struct ia_css_sc_configuration *from,
- unsigned int size)
-{
- u32 internal_org_x_bqs = from->internal_frame_origin_x_bqs_on_sctbl;
- u32 internal_org_y_bqs = from->internal_frame_origin_y_bqs_on_sctbl;
- u32 slice, rest, i;
-
- (void)size;
-
- /* The internal_frame_origin_x_bqs_on_sctbl is separated to 8 times of slice_vec. */
- rest = internal_org_x_bqs;
- for (i = 0; i < SH_CSS_SC_INTERPED_GAIN_HOR_SLICE_TIMES; i++) {
- slice = min(rest, ((uint32_t)ISP_SLICE_NELEMS));
- rest = rest - slice;
- to->interped_gain_hor_slice_bqs[i] = slice;
- }
-
- to->internal_frame_origin_y_bqs_on_sctbl = internal_org_y_bqs;
-}
-
-/* ISP2401 */
-void
-ia_css_sc_configure(
- const struct ia_css_binary *binary,
- u32 internal_frame_origin_x_bqs_on_sctbl,
- uint32_t internal_frame_origin_y_bqs_on_sctbl)
-{
- const struct ia_css_sc_configuration config = {
- internal_frame_origin_x_bqs_on_sctbl,
- internal_frame_origin_y_bqs_on_sctbl
- };
-
- ia_css_configure_sc(binary, &config);
-}
-
/* ------ deprecated(bz675) : from ------ */
/* It looks like @parameter{} (in *.pipe) is used to generate the process/get/set functions,
for parameters which should be used in the isp kernels.
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.h
index f1eb568f23d4..d103103c9a87 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.h
@@ -32,39 +32,6 @@ ia_css_sc_dump(
const struct sh_css_isp_sc_params *sc,
unsigned int level);
-/* @brief Configure the shading correction.
- * @param[out] to Parameters used in the shading correction kernel in the isp.
- * @param[in] from Parameters passed from the host.
- * @param[in] size Size of the sh_css_isp_sc_isp_config structure.
- *
- * This function passes the parameters for the shading correction from the host to the isp.
- */
-/* ISP2401 */
-void
-ia_css_sc_config(
- struct sh_css_isp_sc_isp_config *to,
- const struct ia_css_sc_configuration *from,
- unsigned int size);
-
-/* @brief Configure the shading correction.
- * @param[in] binary The binary, which has the shading correction.
- * @param[in] internal_frame_origin_x_bqs_on_sctbl
- * X coordinate (in bqs) of the origin of the internal frame on the shading table.
- * @param[in] internal_frame_origin_y_bqs_on_sctbl
- * Y coordinate (in bqs) of the origin of the internal frame on the shading table.
- *
- * This function calls the ia_css_configure_sc() function.
- * (The ia_css_configure_sc() function is automatically generated in ia_css_isp.configs.c.)
- * The ia_css_configure_sc() function calls the ia_css_sc_config() function
- * to pass the parameters for the shading correction from the host to the isp.
- */
-/* ISP2401 */
-void
-ia_css_sc_configure(
- const struct ia_css_binary *binary,
- u32 internal_frame_origin_x_bqs_on_sctbl,
- uint32_t internal_frame_origin_y_bqs_on_sctbl);
-
/* ------ deprecated(bz675) : from ------ */
void
sh_css_get_shading_settings(const struct ia_css_isp_parameters *params,
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_types.h
index aae534521b7b..1d70f6b9a0ec 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_types.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_types.h
@@ -118,18 +118,4 @@ struct ia_css_shading_settings {
/* ------ deprecated(bz675) : to ------ */
-/* Shading Correction configuration.
- *
- * NOTE: The shading table size is larger than or equal to the internal frame size.
- */
-/* ISP2401 */
-struct ia_css_sc_configuration {
- u32 internal_frame_origin_x_bqs_on_sctbl; /** Origin X (in bqs) of internal frame on shading table. */
- u32 internal_frame_origin_y_bqs_on_sctbl; /** Origin Y (in bqs) of internal frame on shading table. */
- /** NOTE: bqs = size in BQ(Bayer Quad) unit.
- 1BQ means {Gr,R,B,Gb}(2x2 pixels).
- Horizontal 1 bqs corresponds to horizontal 2 pixels.
- Vertical 1 bqs corresponds to vertical 2 pixels. */
-};
-
#endif /* __IA_CSS_SC_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c
index ac80e6c6e67e..53050c0c49fc 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c
@@ -71,40 +71,41 @@ ia_css_tnr_debug_dtrace(
config->threshold_y, config->threshold_uv);
}
-void
-ia_css_tnr_config(
- struct sh_css_isp_tnr_isp_config *to,
- const struct ia_css_tnr_configuration *from,
- unsigned int size)
+int ia_css_tnr_config(struct sh_css_isp_tnr_isp_config *to,
+ const struct ia_css_tnr_configuration *from,
+ unsigned int size)
{
unsigned int elems_a = ISP_VEC_NELEMS;
unsigned int i;
+ int ret;
- (void)size;
- ia_css_dma_configure_from_info(&to->port_b, &from->tnr_frames[0]->info);
+ ret = ia_css_dma_configure_from_info(&to->port_b, &from->tnr_frames[0]->info);
+ if (ret)
+ return ret;
to->width_a_over_b = elems_a / to->port_b.elems;
to->frame_height = from->tnr_frames[0]->info.res.height;
- for (i = 0; i < NUM_TNR_FRAMES; i++) {
+ for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++) {
to->tnr_frame_addr[i] = from->tnr_frames[i]->data +
from->tnr_frames[i]->planes.yuyv.offset;
}
/* Assume divisiblity here, may need to generalize to fixed point. */
- assert(elems_a % to->port_b.elems == 0);
+ if (elems_a % to->port_b.elems != 0)
+ return -EINVAL;
+
+ return 0;
}
-void
-ia_css_tnr_configure(
- const struct ia_css_binary *binary,
- const struct ia_css_frame * const *frames)
+int ia_css_tnr_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame * const *frames)
{
struct ia_css_tnr_configuration config;
unsigned int i;
- for (i = 0; i < NUM_TNR_FRAMES; i++)
+ for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++)
config.tnr_frames[i] = frames[i];
- ia_css_configure_tnr(binary, &config);
+ return ia_css_configure_tnr(binary, &config);
}
void
@@ -114,7 +115,7 @@ ia_css_init_tnr_state(
{
(void)size;
- assert(NUM_TNR_FRAMES >= 2);
+ assert(NUM_VIDEO_TNR_FRAMES >= 2);
assert(sizeof(*state) == size);
state->tnr_in_buf_idx = 0;
state->tnr_out_buf_idx = 1;
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h
index 90d6e6b44a8d..acf92052b442 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h
@@ -39,16 +39,12 @@ ia_css_tnr_debug_dtrace(
const struct ia_css_tnr_config *config,
unsigned int level);
-void
-ia_css_tnr_config(
- struct sh_css_isp_tnr_isp_config *to,
- const struct ia_css_tnr_configuration *from,
- unsigned int size);
+int ia_css_tnr_config(struct sh_css_isp_tnr_isp_config *to,
+ const struct ia_css_tnr_configuration *from,
+ unsigned int size);
-void
-ia_css_tnr_configure(
- const struct ia_css_binary *binary,
- const struct ia_css_frame * const *frames);
+int ia_css_tnr_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame * const *frames);
void
ia_css_init_tnr_state(
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h
index 60a2542cf685..551dd5cfa9f1 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h
@@ -28,14 +28,14 @@ struct sh_css_isp_tnr_params {
};
struct ia_css_tnr_configuration {
- const struct ia_css_frame *tnr_frames[NUM_TNR_FRAMES];
+ const struct ia_css_frame *tnr_frames[NUM_VIDEO_TNR_FRAMES];
};
struct sh_css_isp_tnr_isp_config {
u32 width_a_over_b;
u32 frame_height;
struct dma_port_config port_b;
- ia_css_ptr tnr_frame_addr[NUM_TNR_FRAMES];
+ ia_css_ptr tnr_frame_addr[NUM_VIDEO_TNR_FRAMES];
};
#endif /* __IA_CSS_TNR_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c
index dd3670972936..aecdcbe04ce1 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c
@@ -26,26 +26,28 @@
#include "isp.h"
-void
-ia_css_vf_config(
- struct sh_css_isp_vf_isp_config *to,
- const struct ia_css_vf_configuration *from,
- unsigned int size)
+int ia_css_vf_config(struct sh_css_isp_vf_isp_config *to,
+ const struct ia_css_vf_configuration *from,
+ unsigned int size)
{
unsigned int elems_a = ISP_VEC_NELEMS;
+ int ret;
- (void)size;
to->vf_downscale_bits = from->vf_downscale_bits;
to->enable = from->info != NULL;
if (from->info) {
ia_css_frame_info_to_frame_sp_info(&to->info, from->info);
- ia_css_dma_configure_from_info(&to->dma.port_b, from->info);
+ ret = ia_css_dma_configure_from_info(&to->dma.port_b, from->info);
+ if (ret)
+ return ret;
to->dma.width_a_over_b = elems_a / to->dma.port_b.elems;
/* Assume divisiblity here, may need to generalize to fixed point. */
- assert(elems_a % to->dma.port_b.elems == 0);
+ if (elems_a % to->dma.port_b.elems != 0)
+ return -EINVAL;
}
+ return 0;
}
/* compute the log2 of the downscale factor needed to get closest
@@ -120,12 +122,11 @@ configure_dma(
config->info = vf_info;
}
-int
-ia_css_vf_configure(
- const struct ia_css_binary *binary,
- const struct ia_css_frame_info *out_info,
- struct ia_css_frame_info *vf_info,
- unsigned int *downscale_log2) {
+int ia_css_vf_configure(const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info,
+ unsigned int *downscale_log2)
+{
int err;
struct ia_css_vf_configuration config;
const struct ia_css_binary_info *info = &binary->info->sp;
@@ -138,7 +139,6 @@ ia_css_vf_configure(
if (vf_info)
vf_info->raw_bit_depth = info->dma.vfdec_bits_per_pixel;
- ia_css_configure_vf(binary, &config);
- return 0;
+ return ia_css_configure_vf(binary, &config);
}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.h b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.h
index 0e8de034a00e..d6b45d3754b0 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.h
@@ -32,11 +32,9 @@ sh_css_vf_downscale_log2(
const struct ia_css_frame_info *vf_info,
unsigned int *downscale_log2);
-void
-ia_css_vf_config(
- struct sh_css_isp_vf_isp_config *to,
- const struct ia_css_vf_configuration *from,
- unsigned int size);
+int ia_css_vf_config(struct sh_css_isp_vf_isp_config *to,
+ const struct ia_css_vf_configuration *from,
+ unsigned int size);
int
ia_css_vf_configure(
diff --git a/drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h b/drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h
index 11e439d838ae..bfe4f5976771 100644
--- a/drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h
+++ b/drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h
@@ -36,17 +36,7 @@ more details.
#define BITS8_ELEMENTS_PER_XMEM_ADDR CEIL_DIV(XMEM_WIDTH_BITS, 8)
#define BITS16_ELEMENTS_PER_XMEM_ADDR CEIL_DIV(XMEM_WIDTH_BITS, 16)
-#if ISP_VEC_NELEMS == 64
#define ISP_NWAY_LOG2 6
-#elif ISP_VEC_NELEMS == 32
-#define ISP_NWAY_LOG2 5
-#elif ISP_VEC_NELEMS == 16
-#define ISP_NWAY_LOG2 4
-#elif ISP_VEC_NELEMS == 8
-#define ISP_NWAY_LOG2 3
-#else
-#error "isp_const.h ISP_VEC_NELEMS must be one of {8, 16, 32, 64}"
-#endif
/* *****************************
* ISP input/output buffer sizes
@@ -164,9 +154,9 @@ more details.
/* [isp vmem] table size[vectors] per line per color (GR,R,B,GB),
multiples of NWAY */
#define ISP2400_SCTBL_VECTORS_PER_LINE_PER_COLOR \
- CEIL_DIV(ISP2400_SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR, ISP_VEC_NELEMS)
+ CEIL_DIV(SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR, ISP_VEC_NELEMS)
#define ISP2401_SCTBL_VECTORS_PER_LINE_PER_COLOR \
- CEIL_DIV(ISP2401_SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR, ISP_VEC_NELEMS)
+ CEIL_DIV(SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR, ISP_VEC_NELEMS)
/* [isp vmem] table size[vectors] per line for 4colors (GR,R,B,GB),
multiples of NWAY */
#define SCTBL_VECTORS_PER_LINE \
diff --git a/drivers/staging/media/atomisp/pci/isp2400_input_system_local.h b/drivers/staging/media/atomisp/pci/isp2400_input_system_local.h
index 2614b89b8e34..c3ae5014a039 100644
--- a/drivers/staging/media/atomisp/pci/isp2400_input_system_local.h
+++ b/drivers/staging/media/atomisp/pci/isp2400_input_system_local.h
@@ -16,10 +16,6 @@
#ifndef __INPUT_SYSTEM_LOCAL_H_INCLUDED__
#define __INPUT_SYSTEM_LOCAL_H_INCLUDED__
-#include <type_support.h>
-
-#include "input_system_global.h"
-
#include "input_system_defs.h" /* HIVE_ISYS_GPREG_MULTICAST_A_IDX,... */
/*
@@ -33,55 +29,6 @@
#include "isp_acquisition_defs.h"
#include "input_system_ctrl_defs.h"
-typedef enum {
- INPUT_SYSTEM_PORT_A = 0,
- INPUT_SYSTEM_PORT_B,
- INPUT_SYSTEM_PORT_C,
- N_INPUT_SYSTEM_PORTS
-} input_system_csi_port_t;
-
-typedef struct ctrl_unit_cfg_s ctrl_unit_cfg_t;
-typedef struct input_system_network_cfg_s input_system_network_cfg_t;
-typedef struct target_cfg2400_s target_cfg2400_t;
-typedef struct channel_cfg_s channel_cfg_t;
-typedef struct backend_channel_cfg_s backend_channel_cfg_t;
-typedef struct input_system_cfg2400_s input_system_cfg2400_t;
-typedef struct mipi_port_state_s mipi_port_state_t;
-typedef struct rx_channel_state_s rx_channel_state_t;
-typedef struct input_switch_cfg_channel_s input_switch_cfg_channel_t;
-typedef struct input_switch_cfg_s input_switch_cfg_t;
-
-struct ctrl_unit_cfg_s {
- isp2400_ib_buffer_t buffer_mipi[N_CAPTURE_UNIT_ID];
- isp2400_ib_buffer_t buffer_acquire[N_ACQUISITION_UNIT_ID];
-};
-
-struct input_system_network_cfg_s {
- input_system_connection_t multicast_cfg[N_CAPTURE_UNIT_ID];
- input_system_multiplex_t mux_cfg;
- ctrl_unit_cfg_t ctrl_unit_cfg[N_CTRL_UNIT_ID];
-};
-
-typedef struct {
-// TBD.
- u32 dummy_parameter;
-} target_isp_cfg_t;
-
-typedef struct {
-// TBD.
- u32 dummy_parameter;
-} target_sp_cfg_t;
-
-typedef struct {
-// TBD.
- u32 dummy_parameter;
-} target_strm2mem_cfg_t;
-
-struct input_switch_cfg_channel_s {
- u32 hsync_data_reg[2];
- u32 vsync_data_reg;
-};
-
struct target_cfg2400_s {
input_switch_cfg_channel_t input_switch_channel_cfg;
target_isp_cfg_t target_isp_cfg;
@@ -89,24 +36,6 @@ struct target_cfg2400_s {
target_strm2mem_cfg_t target_strm2mem_cfg;
};
-struct backend_channel_cfg_s {
- u32 fmt_control_word_1; // Format config.
- u32 fmt_control_word_2;
- u32 no_side_band;
-};
-
-typedef union {
- csi_cfg_t csi_cfg;
- tpg_cfg_t tpg_cfg;
- prbs_cfg_t prbs_cfg;
- gpfifo_cfg_t gpfifo_cfg;
-} source_cfg_t;
-
-struct input_switch_cfg_s {
- u32 hsync_data_reg[N_RX_CHANNEL_ID * 2];
- u32 vsync_data_reg;
-};
-
// Configuration of a channel.
struct channel_cfg_s {
u32 ch_id;
@@ -238,47 +167,6 @@ typedef struct capture_unit_state_s capture_unit_state_t;
typedef struct acquisition_unit_state_s acquisition_unit_state_t;
typedef struct ctrl_unit_state_s ctrl_unit_state_t;
-/*
- * In 2300 ports can be configured independently and stream
- * formats need to be specified. In 2400, there are only 8
- * supported configurations but the HW is fused to support
- * only a single one.
- *
- * In 2300 the compressed format types are programmed by the
- * user. In 2400 all stream formats are encoded on the stream.
- *
- * Use the enum to check validity of a user configuration
- */
-typedef enum {
- MONO_4L_1L_0L = 0,
- MONO_3L_1L_0L,
- MONO_2L_1L_0L,
- MONO_1L_1L_0L,
- STEREO_2L_1L_2L,
- STEREO_3L_1L_1L,
- STEREO_2L_1L_1L,
- STEREO_1L_1L_1L,
- N_RX_MODE
-} rx_mode_t;
-
-typedef enum {
- MIPI_PREDICTOR_NONE = 0,
- MIPI_PREDICTOR_TYPE1,
- MIPI_PREDICTOR_TYPE2,
- N_MIPI_PREDICTOR_TYPES
-} mipi_predictor_t;
-
-typedef enum {
- MIPI_COMPRESSOR_NONE = 0,
- MIPI_COMPRESSOR_10_6_10,
- MIPI_COMPRESSOR_10_7_10,
- MIPI_COMPRESSOR_10_8_10,
- MIPI_COMPRESSOR_12_6_12,
- MIPI_COMPRESSOR_12_7_12,
- MIPI_COMPRESSOR_12_8_12,
- N_MIPI_COMPRESSOR_METHODS
-} mipi_compressor_t;
-
typedef enum {
MIPI_FORMAT_RGB888 = 0,
MIPI_FORMAT_RGB555,
@@ -339,22 +227,6 @@ typedef enum {
RX_IRQ_INFO_ERR_LINE_SYNC = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT,
} rx_irq_info_t;
-typedef struct rx_cfg_s rx_cfg_t;
-
-/*
- * Applied per port
- */
-struct rx_cfg_s {
- rx_mode_t mode; /* The HW config */
- enum mipi_port_id port; /* The port ID to apply the control on */
- unsigned int timeout;
- unsigned int initcount;
- unsigned int synccount;
- unsigned int rxcount;
- mipi_predictor_t comp; /* Just for backward compatibility */
- bool is_two_ppc;
-};
-
/* NOTE: The base has already an offset of 0x0100 */
static const hrt_address __maybe_unused MIPI_PORT_OFFSET[N_MIPI_PORT_ID] = {
0x00000000UL,
diff --git a/drivers/staging/media/atomisp/pci/isp2401_input_system_global.h b/drivers/staging/media/atomisp/pci/isp2401_input_system_global.h
index f38773842646..e3c86069b390 100644
--- a/drivers/staging/media/atomisp/pci/isp2401_input_system_global.h
+++ b/drivers/staging/media/atomisp/pci/isp2401_input_system_global.h
@@ -44,11 +44,6 @@ typedef enum {
N_INPUT_SYSTEM_SOURCE_TYPE
} input_system_source_type_t;
-typedef enum {
- INPUT_SYSTEM_POLL_ON_WAIT_FOR_FRAME,
- INPUT_SYSTEM_POLL_ON_CAPTURE_REQUEST,
-} input_system_polling_mode_t;
-
typedef struct input_system_channel_s input_system_channel_t;
struct input_system_channel_s {
stream2mmio_ID_t stream2mmio_id;
@@ -111,9 +106,6 @@ struct isp2401_input_system_cfg_s {
input_system_source_type_t mode;
- /* ISP2401 */
- input_system_polling_mode_t polling_mode;
-
bool online;
bool raw_packed;
s8 linked_isys_stream_id;
@@ -165,10 +157,6 @@ struct virtual_input_system_stream_s {
u8 online;
s8 linked_isys_stream_id;
u8 valid;
-
- /* ISP2401 */
- input_system_polling_mode_t polling_mode;
- s32 subscr_index;
};
typedef struct virtual_input_system_stream_cfg_s
diff --git a/drivers/staging/media/atomisp/pci/isp2401_input_system_local.h b/drivers/staging/media/atomisp/pci/isp2401_input_system_local.h
index 24026090cd35..74bfa10e670e 100644
--- a/drivers/staging/media/atomisp/pci/isp2401_input_system_local.h
+++ b/drivers/staging/media/atomisp/pci/isp2401_input_system_local.h
@@ -16,9 +16,6 @@
#ifndef __INPUT_SYSTEM_LOCAL_H_INCLUDED__
#define __INPUT_SYSTEM_LOCAL_H_INCLUDED__
-#include "type_support.h"
-#include "input_system_global.h"
-
#include "csi_rx.h"
#include "pixelgen.h"
#include "isys_stream2mmio.h"
@@ -69,29 +66,6 @@ typedef enum {
/* The number of stores for compressed format types */
#define N_MIPI_COMPRESSOR_CONTEXT (N_RX_CHANNEL_ID * N_MIPI_FORMAT_CUSTOM)
-#define UNCOMPRESSED_BITS_PER_PIXEL_10 10
-#define UNCOMPRESSED_BITS_PER_PIXEL_12 12
-#define COMPRESSED_BITS_PER_PIXEL_6 6
-#define COMPRESSED_BITS_PER_PIXEL_7 7
-#define COMPRESSED_BITS_PER_PIXEL_8 8
-enum mipi_compressor {
- MIPI_COMPRESSOR_NONE = 0,
- MIPI_COMPRESSOR_10_6_10,
- MIPI_COMPRESSOR_10_7_10,
- MIPI_COMPRESSOR_10_8_10,
- MIPI_COMPRESSOR_12_6_12,
- MIPI_COMPRESSOR_12_7_12,
- MIPI_COMPRESSOR_12_8_12,
- N_MIPI_COMPRESSOR_METHODS
-};
-
-typedef enum {
- MIPI_PREDICTOR_NONE = 0,
- MIPI_PREDICTOR_TYPE1,
- MIPI_PREDICTOR_TYPE2,
- N_MIPI_PREDICTOR_TYPES
-} mipi_predictor_t;
-
typedef struct input_system_state_s input_system_state_t;
struct input_system_state_s {
ibuf_ctrl_state_t ibuf_ctrl_state[N_IBUF_CTRL_ID];
diff --git a/drivers/staging/media/atomisp/pci/runtime/binary/interface/ia_css_binary.h b/drivers/staging/media/atomisp/pci/runtime/binary/interface/ia_css_binary.h
index b44099dbdacd..9935ac860bc2 100644
--- a/drivers/staging/media/atomisp/pci/runtime/binary/interface/ia_css_binary.h
+++ b/drivers/staging/media/atomisp/pci/runtime/binary/interface/ia_css_binary.h
@@ -94,7 +94,6 @@ struct ia_css_binary_descr {
bool enable_dpc;
/* ISP2401 */
- bool enable_luma_only;
bool enable_tnr;
bool enable_capture_pp_bli;
@@ -131,8 +130,6 @@ struct ia_css_binary {
int sctbl_width_per_color;
int sctbl_aligned_width_per_color;
int sctbl_height;
- int sctbl_legacy_width_per_color;
- int sctbl_legacy_height;
struct ia_css_sdis_info dis;
struct ia_css_resolution dvs_envelope;
bool online;
diff --git a/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c b/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
index 060d38749570..406ed5fb4c6a 100644
--- a/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
+++ b/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
@@ -108,7 +108,6 @@ ia_css_binary_internal_res(const struct ia_css_frame_info *in_info,
binary_dvs_env.height);
}
-/* ISP2400 */
/* Computation results of the origin coordinate of bayer on the shading table. */
struct sh_css_shading_table_bayer_origin_compute_results {
u32 bayer_scale_hor_ratio_in; /* Horizontal ratio (in) of bayer scaling. */
@@ -119,23 +118,7 @@ struct sh_css_shading_table_bayer_origin_compute_results {
u32 sc_bayer_origin_y_bqs_on_shading_table; /* Y coordinate (in bqs) of bayer origin on shading table. */
};
-/* ISP2401 */
-/* Requirements for the shading correction. */
-struct sh_css_binary_sc_requirements {
- /* Bayer scaling factor, for the scaling which is applied before shading correction. */
- u32 bayer_scale_hor_ratio_in; /* Horizontal ratio (in) of scaling applied BEFORE shading correction. */
- u32 bayer_scale_hor_ratio_out; /* Horizontal ratio (out) of scaling applied BEFORE shading correction. */
- u32 bayer_scale_ver_ratio_in; /* Vertical ratio (in) of scaling applied BEFORE shading correction. */
- u32 bayer_scale_ver_ratio_out; /* Vertical ratio (out) of scaling applied BEFORE shading correction. */
-
- /* ISP internal frame is composed of the real sensor data and the padding data. */
- u32 sensor_data_origin_x_bqs_on_internal; /* X origin (in bqs) of sensor data on internal frame
- at shading correction. */
- u32 sensor_data_origin_y_bqs_on_internal; /* Y origin (in bqs) of sensor data on internal frame
- at shading correction. */
-};
-
-/* ISP2400: Get the requirements for the shading correction. */
+/* Get the requirements for the shading correction. */
static int
ia_css_binary_compute_shading_table_bayer_origin(
const struct ia_css_binary *binary, /* [in] */
@@ -261,227 +244,12 @@ ia_css_binary_compute_shading_table_bayer_origin(
return err;
}
-/* ISP2401: Get the requirements for the shading correction. */
-static int
-sh_css_binary_get_sc_requirements(const struct ia_css_binary *binary, /* [in] */
- unsigned int required_bds_factor, /* [in] */
- const struct ia_css_stream_config *stream_config, /* [in] */
- struct sh_css_binary_sc_requirements *scr) /* [out] */
-{
- int err;
-
- /* Numerator and denominator of the fixed bayer downscaling factor. (numerator >= denominator) */
- unsigned int bds_num, bds_den;
-
- /* Horizontal/Vertical ratio of bayer scaling between input area and output area. */
- unsigned int bs_hor_ratio_in, bs_hor_ratio_out, bs_ver_ratio_in, bs_ver_ratio_out;
-
- /* Left padding set by InputFormatter. */
- unsigned int left_padding_bqs;
-
- /* Flags corresponding to NEED_BDS_FACTOR_2_00/NEED_BDS_FACTOR_1_50/NEED_BDS_FACTOR_1_25 macros
- * defined in isp kernels. */
- unsigned int need_bds_factor_2_00, need_bds_factor_1_50, need_bds_factor_1_25;
-
- /* Left padding adjusted inside the isp kernels. */
- unsigned int left_padding_adjusted_bqs;
-
- /* Top padding padded inside the isp kernel for bayer downscaling binaries. */
- unsigned int top_padding_bqs;
-
- /* Bayer downscaling factor 1.0 by fixed-point. */
- int bds_frac_acc = FRAC_ACC; /* FRAC_ACC is defined in ia_css_fixedbds_param.h. */
-
- /* Right/Down shift amount caused by filters applied BEFORE shading corrertion. */
- unsigned int right_shift_bqs_before_bs; /* right shift before bayer scaling */
- unsigned int right_shift_bqs_after_bs; /* right shift after bayer scaling */
- unsigned int down_shift_bqs_before_bs; /* down shift before bayer scaling */
- unsigned int down_shift_bqs_after_bs; /* down shift after bayer scaling */
-
- /* Origin of the real sensor data area on the internal frame at shading correction. */
- unsigned int sensor_data_origin_x_bqs_on_internal;
- unsigned int sensor_data_origin_y_bqs_on_internal;
-
- unsigned int bs_frac = bds_frac_acc; /* scaling factor 1.0 in fixed point */
- unsigned int bs_out, bs_in; /* scaling ratio in fixed point */
-
- IA_CSS_ENTER_PRIVATE("binary=%p, required_bds_factor=%d, stream_config=%p",
- binary, required_bds_factor, stream_config);
-
- /* Get the numerator and denominator of the required bayer downscaling factor. */
- err = sh_css_bds_factor_get_numerator_denominator(required_bds_factor,
- &bds_num, &bds_den);
- if (err) {
- IA_CSS_LEAVE_ERR_PRIVATE(err);
- return err;
- }
-
- IA_CSS_LOG("bds_num=%d, bds_den=%d", bds_num, bds_den);
-
- /* Set the horizontal/vertical ratio of bayer scaling between input area and output area. */
- bs_hor_ratio_in = bds_num;
- bs_hor_ratio_out = bds_den;
- bs_ver_ratio_in = bds_num;
- bs_ver_ratio_out = bds_den;
-
- /* Set the left padding set by InputFormatter. (ia_css_ifmtr_configure() in ifmtr.c) */
- if (stream_config->left_padding == -1)
- left_padding_bqs = _ISP_BQS(binary->left_padding);
- else
- left_padding_bqs = (unsigned int)((int)ISP_VEC_NELEMS - _ISP_BQS(stream_config->left_padding));
-
- IA_CSS_LOG("stream.left_padding=%d, binary.left_padding=%d, left_padding_bqs=%d",
- stream_config->left_padding, binary->left_padding,
- left_padding_bqs);
-
- /* Set the left padding adjusted inside the isp kernels.
- * When the bds_factor isn't 1.00, the left padding size is adjusted inside the isp,
- * before bayer downscaling. (scaled_hor_plane_index(), raw_compute_hphase() in raw.isp.c)
- */
- need_bds_factor_2_00 = ((binary->info->sp.bds.supported_bds_factors &
- (PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_00) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_50) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_3_00) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_00) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_50) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_5_00) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_6_00) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_8_00))) != 0);
-
- need_bds_factor_1_50 = ((binary->info->sp.bds.supported_bds_factors &
- (PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_1_50) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_25) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_3_00) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_50) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_6_00))) != 0);
-
- need_bds_factor_1_25 = ((binary->info->sp.bds.supported_bds_factors &
- (PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_1_25) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_50) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_5_00))) != 0);
-
- if (binary->info->sp.pipeline.left_cropping > 0 &&
- (need_bds_factor_2_00 || need_bds_factor_1_50 || need_bds_factor_1_25)) {
- /*
- * downscale 2.0 -> first_vec_adjusted_bqs = 128
- * downscale 1.5 -> first_vec_adjusted_bqs = 96
- * downscale 1.25 -> first_vec_adjusted_bqs = 80
- */
- unsigned int first_vec_adjusted_bqs = ISP_VEC_NELEMS * bs_hor_ratio_in / bs_hor_ratio_out;
- left_padding_adjusted_bqs = first_vec_adjusted_bqs
- - _ISP_BQS(binary->info->sp.pipeline.left_cropping);
- } else {
- left_padding_adjusted_bqs = left_padding_bqs;
- }
-
- IA_CSS_LOG("supported_bds_factors=%d, need_bds_factor:2_00=%d, 1_50=%d, 1_25=%d",
- binary->info->sp.bds.supported_bds_factors,
- need_bds_factor_2_00, need_bds_factor_1_50,
- need_bds_factor_1_25);
- IA_CSS_LOG("left_cropping=%d, left_padding_adjusted_bqs=%d",
- binary->info->sp.pipeline.left_cropping,
- left_padding_adjusted_bqs);
-
- /* Set the top padding padded inside the isp kernel for bayer downscaling binaries.
- * When the bds_factor isn't 1.00, the top padding is padded inside the isp
- * before bayer downscaling, because the top cropping size (input margin) is not enough.
- * (calculate_input_line(), raw_compute_vphase(), dma_read_raw() in raw.isp.c)
- * NOTE: In dma_read_raw(), the factor passed to raw_compute_vphase() is got by get_bds_factor_for_dma_read().
- * This factor is BDS_FPVAL_100/BDS_FPVAL_125/BDS_FPVAL_150/BDS_FPVAL_200.
- */
- top_padding_bqs = 0;
- if (binary->info->sp.pipeline.top_cropping > 0 &&
- (required_bds_factor == SH_CSS_BDS_FACTOR_1_25 ||
- required_bds_factor == SH_CSS_BDS_FACTOR_1_50 ||
- required_bds_factor == SH_CSS_BDS_FACTOR_2_00)) {
- /* Calculation from calculate_input_line() and raw_compute_vphase() in raw.isp.c. */
- int top_cropping_bqs = _ISP_BQS(binary->info->sp.pipeline.top_cropping);
- /* top cropping (in bqs) */
- int factor = bds_num * bds_frac_acc /
- bds_den; /* downscaling factor by fixed-point */
- int top_padding_bqsxfrac_acc = (top_cropping_bqs * factor - top_cropping_bqs *
- bds_frac_acc)
- + (2 * bds_frac_acc - factor); /* top padding by fixed-point (in bqs) */
-
- top_padding_bqs = (unsigned int)((top_padding_bqsxfrac_acc + bds_frac_acc / 2 -
- 1) / bds_frac_acc);
- }
-
- IA_CSS_LOG("top_cropping=%d, top_padding_bqs=%d",
- binary->info->sp.pipeline.top_cropping, top_padding_bqs);
-
- /* Set the right/down shift amount caused by filters applied BEFORE bayer scaling,
- * which scaling is applied BEFORE shading corrertion.
- *
- * When the bds_factor isn't 1.00, 3x3 anti-alias filter is applied to each color plane(Gr/R/B/Gb)
- * before bayer downscaling.
- * This filter shifts each color plane (Gr/R/B/Gb) to right/down directions by 1 pixel.
- */
- right_shift_bqs_before_bs = 0;
- down_shift_bqs_before_bs = 0;
-
- if (need_bds_factor_2_00 || need_bds_factor_1_50 || need_bds_factor_1_25) {
- right_shift_bqs_before_bs = 1;
- down_shift_bqs_before_bs = 1;
- }
-
- IA_CSS_LOG("right_shift_bqs_before_bs=%d, down_shift_bqs_before_bs=%d",
- right_shift_bqs_before_bs, down_shift_bqs_before_bs);
-
- /* Set the right/down shift amount caused by filters applied AFTER bayer scaling,
- * which scaling is applied BEFORE shading corrertion.
- *
- * When DPC&BNR is processed between bayer scaling and shading correction,
- * DPC&BNR moves each color plane (Gr/R/B/Gb) to right/down directions by 1 pixel.
- */
- right_shift_bqs_after_bs = 0;
- down_shift_bqs_after_bs = 0;
-
- /* if DPC&BNR is enabled in the binary */
- if (binary->info->mem_offsets.offsets.param->dmem.dp.size != 0) {
- right_shift_bqs_after_bs = 1;
- down_shift_bqs_after_bs = 1;
- }
-
- IA_CSS_LOG("right_shift_bqs_after_bs=%d, down_shift_bqs_after_bs=%d",
- right_shift_bqs_after_bs, down_shift_bqs_after_bs);
-
- bs_out = bs_hor_ratio_out * bs_frac;
- bs_in = bs_hor_ratio_in * bs_frac;
- sensor_data_origin_x_bqs_on_internal =
- ((left_padding_adjusted_bqs + right_shift_bqs_before_bs) * bs_out + bs_in / 2) / bs_in
- + right_shift_bqs_after_bs; /* "+ bs_in/2": rounding */
-
- bs_out = bs_ver_ratio_out * bs_frac;
- bs_in = bs_ver_ratio_in * bs_frac;
- sensor_data_origin_y_bqs_on_internal =
- ((top_padding_bqs + down_shift_bqs_before_bs) * bs_out + bs_in / 2) / bs_in
- + down_shift_bqs_after_bs; /* "+ bs_in/2": rounding */
-
- scr->bayer_scale_hor_ratio_in = (uint32_t)bs_hor_ratio_in;
- scr->bayer_scale_hor_ratio_out = (uint32_t)bs_hor_ratio_out;
- scr->bayer_scale_ver_ratio_in = (uint32_t)bs_ver_ratio_in;
- scr->bayer_scale_ver_ratio_out = (uint32_t)bs_ver_ratio_out;
- scr->sensor_data_origin_x_bqs_on_internal = (uint32_t)sensor_data_origin_x_bqs_on_internal;
- scr->sensor_data_origin_y_bqs_on_internal = (uint32_t)sensor_data_origin_y_bqs_on_internal;
-
- IA_CSS_LOG("sc_requirements: %d, %d, %d, %d, %d, %d",
- scr->bayer_scale_hor_ratio_in,
- scr->bayer_scale_hor_ratio_out,
- scr->bayer_scale_ver_ratio_in, scr->bayer_scale_ver_ratio_out,
- scr->sensor_data_origin_x_bqs_on_internal,
- scr->sensor_data_origin_y_bqs_on_internal);
-
- IA_CSS_LEAVE_ERR_PRIVATE(err);
- return err;
-}
-
/* Get the shading information of Shading Correction Type 1. */
static int
-isp2400_binary_get_shading_info_type_1(const struct ia_css_binary *binary, /* [in] */
- unsigned int required_bds_factor, /* [in] */
- const struct ia_css_stream_config *stream_config, /* [in] */
- struct ia_css_shading_info *info) /* [out] */
+binary_get_shading_info_type_1(const struct ia_css_binary *binary, /* [in] */
+ unsigned int required_bds_factor, /* [in] */
+ const struct ia_css_stream_config *stream_config, /* [in] */
+ struct ia_css_shading_info *info) /* [out] */
{
int err;
struct sh_css_shading_table_bayer_origin_compute_results res;
@@ -522,173 +290,6 @@ isp2400_binary_get_shading_info_type_1(const struct ia_css_binary *binary, /* [i
return err;
}
-/* Get the shading information of Shading Correction Type 1. */
-static int
-isp2401_binary_get_shading_info_type_1(const struct ia_css_binary *binary, /* [in] */
- unsigned int required_bds_factor, /* [in] */
- const struct ia_css_stream_config *stream_config, /* [in] */
- struct ia_css_shading_info *shading_info, /* [out] */
- struct ia_css_pipe_config *pipe_config) /* [out] */
-{
- int err;
- struct sh_css_binary_sc_requirements scr;
-
- u32 in_width_bqs, in_height_bqs, internal_width_bqs, internal_height_bqs;
- u32 num_hor_grids, num_ver_grids, bqs_per_grid_cell, tbl_width_bqs, tbl_height_bqs;
- u32 sensor_org_x_bqs_on_internal, sensor_org_y_bqs_on_internal, sensor_width_bqs, sensor_height_bqs;
- u32 sensor_center_x_bqs_on_internal, sensor_center_y_bqs_on_internal;
- u32 left, right, upper, lower;
- u32 adjust_left, adjust_right, adjust_upper, adjust_lower, adjust_width_bqs, adjust_height_bqs;
- u32 internal_org_x_bqs_on_tbl, internal_org_y_bqs_on_tbl;
- u32 sensor_org_x_bqs_on_tbl, sensor_org_y_bqs_on_tbl;
-
- assert(binary);
- assert(stream_config);
- assert(shading_info);
- assert(pipe_config);
-
- IA_CSS_ENTER_PRIVATE("binary=%p, required_bds_factor=%d, stream_config=%p",
- binary, required_bds_factor, stream_config);
-
- /* Initialize by default values. */
- *shading_info = DEFAULT_SHADING_INFO_TYPE_1;
-
- err = sh_css_binary_get_sc_requirements(binary, required_bds_factor, stream_config, &scr);
- if (err) {
- IA_CSS_LEAVE_ERR_PRIVATE(err);
- return err;
- }
-
- IA_CSS_LOG("binary: id=%d, sctbl=%dx%d, deci=%d",
- binary->info->sp.id, binary->sctbl_width_per_color, binary->sctbl_height, binary->deci_factor_log2);
- IA_CSS_LOG("binary: in=%dx%d, in_padded_w=%d, int=%dx%d, int_padded_w=%d, out=%dx%d, out_padded_w=%d",
- binary->in_frame_info.res.width, binary->in_frame_info.res.height, binary->in_frame_info.padded_width,
- binary->internal_frame_info.res.width, binary->internal_frame_info.res.height,
- binary->internal_frame_info.padded_width,
- binary->out_frame_info[0].res.width, binary->out_frame_info[0].res.height,
- binary->out_frame_info[0].padded_width);
-
- /* Set the input size from sensor, which includes left/top crop size. */
- in_width_bqs = _ISP_BQS(binary->in_frame_info.res.width);
- in_height_bqs = _ISP_BQS(binary->in_frame_info.res.height);
-
- /*
- * Frame size internally used in ISP, including sensor data and padding.
- * This is the frame size, to which the shading correction is applied.
- */
- internal_width_bqs = _ISP_BQS(binary->internal_frame_info.res.width);
- internal_height_bqs = _ISP_BQS(binary->internal_frame_info.res.height);
-
- /* Shading table. */
- num_hor_grids = binary->sctbl_width_per_color;
- num_ver_grids = binary->sctbl_height;
- bqs_per_grid_cell = (1 << binary->deci_factor_log2);
- tbl_width_bqs = (num_hor_grids - 1) * bqs_per_grid_cell;
- tbl_height_bqs = (num_ver_grids - 1) * bqs_per_grid_cell;
-
- IA_CSS_LOG("tbl_width_bqs=%d, tbl_height_bqs=%d", tbl_width_bqs, tbl_height_bqs);
-
- /*
- * Real sensor data area on the internal frame at shading correction.
- * Filters and scaling are applied to the internal frame before
- * shading correction, depending on the binary.
- */
- sensor_org_x_bqs_on_internal = scr.sensor_data_origin_x_bqs_on_internal;
- sensor_org_y_bqs_on_internal = scr.sensor_data_origin_y_bqs_on_internal;
- {
- unsigned int bs_frac = 8; /* scaling factor 1.0 in fixed point (8 == FRAC_ACC macro in ISP) */
- unsigned int bs_out, bs_in; /* scaling ratio in fixed point */
-
- bs_out = scr.bayer_scale_hor_ratio_out * bs_frac;
- bs_in = scr.bayer_scale_hor_ratio_in * bs_frac;
- sensor_width_bqs = (in_width_bqs * bs_out + bs_in / 2) / bs_in; /* "+ bs_in/2": rounding */
-
- bs_out = scr.bayer_scale_ver_ratio_out * bs_frac;
- bs_in = scr.bayer_scale_ver_ratio_in * bs_frac;
- sensor_height_bqs = (in_height_bqs * bs_out + bs_in / 2) / bs_in; /* "+ bs_in/2": rounding */
- }
-
- /* Center of the sensor data on the internal frame at shading correction. */
- sensor_center_x_bqs_on_internal = sensor_org_x_bqs_on_internal + sensor_width_bqs / 2;
- sensor_center_y_bqs_on_internal = sensor_org_y_bqs_on_internal + sensor_height_bqs / 2;
-
- /* Size of left/right/upper/lower sides of the sensor center on the internal frame. */
- left = sensor_center_x_bqs_on_internal;
- right = internal_width_bqs - sensor_center_x_bqs_on_internal;
- upper = sensor_center_y_bqs_on_internal;
- lower = internal_height_bqs - sensor_center_y_bqs_on_internal;
-
- /* Align the size of left/right/upper/lower sides to a multiple of the grid cell size. */
- adjust_left = CEIL_MUL(left, bqs_per_grid_cell);
- adjust_right = CEIL_MUL(right, bqs_per_grid_cell);
- adjust_upper = CEIL_MUL(upper, bqs_per_grid_cell);
- adjust_lower = CEIL_MUL(lower, bqs_per_grid_cell);
-
- /* Shading table should cover the adjusted frame size. */
- adjust_width_bqs = adjust_left + adjust_right;
- adjust_height_bqs = adjust_upper + adjust_lower;
-
- IA_CSS_LOG("adjust_width_bqs=%d, adjust_height_bqs=%d", adjust_width_bqs, adjust_height_bqs);
-
- if (adjust_width_bqs > tbl_width_bqs || adjust_height_bqs > tbl_height_bqs) {
- IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
- return -EINVAL;
- }
-
- /* Origin of the internal frame on the shading table. */
- internal_org_x_bqs_on_tbl = adjust_left - left;
- internal_org_y_bqs_on_tbl = adjust_upper - upper;
-
- /* Origin of the real sensor data area on the shading table. */
- sensor_org_x_bqs_on_tbl = internal_org_x_bqs_on_tbl + sensor_org_x_bqs_on_internal;
- sensor_org_y_bqs_on_tbl = internal_org_y_bqs_on_tbl + sensor_org_y_bqs_on_internal;
-
- /* The shading information necessary as API is stored in the shading_info. */
- shading_info->info.type_1.num_hor_grids = num_hor_grids;
- shading_info->info.type_1.num_ver_grids = num_ver_grids;
- shading_info->info.type_1.bqs_per_grid_cell = bqs_per_grid_cell;
-
- shading_info->info.type_1.bayer_scale_hor_ratio_in = scr.bayer_scale_hor_ratio_in;
- shading_info->info.type_1.bayer_scale_hor_ratio_out = scr.bayer_scale_hor_ratio_out;
- shading_info->info.type_1.bayer_scale_ver_ratio_in = scr.bayer_scale_ver_ratio_in;
- shading_info->info.type_1.bayer_scale_ver_ratio_out = scr.bayer_scale_ver_ratio_out;
-
- shading_info->info.type_1.isp_input_sensor_data_res_bqs.width = in_width_bqs;
- shading_info->info.type_1.isp_input_sensor_data_res_bqs.height = in_height_bqs;
-
- shading_info->info.type_1.sensor_data_res_bqs.width = sensor_width_bqs;
- shading_info->info.type_1.sensor_data_res_bqs.height = sensor_height_bqs;
-
- shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.x = (int32_t)sensor_org_x_bqs_on_tbl;
- shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.y = (int32_t)sensor_org_y_bqs_on_tbl;
-
- /* The shading information related to ISP (but, not necessary as API) is stored in the pipe_config. */
- pipe_config->internal_frame_origin_bqs_on_sctbl.x = (int32_t)internal_org_x_bqs_on_tbl;
- pipe_config->internal_frame_origin_bqs_on_sctbl.y = (int32_t)internal_org_y_bqs_on_tbl;
-
- IA_CSS_LOG("shading_info: grids=%dx%d, cell=%d, scale=%d,%d,%d,%d, input=%dx%d, data=%dx%d, origin=(%d,%d)",
- shading_info->info.type_1.num_hor_grids,
- shading_info->info.type_1.num_ver_grids,
- shading_info->info.type_1.bqs_per_grid_cell,
- shading_info->info.type_1.bayer_scale_hor_ratio_in,
- shading_info->info.type_1.bayer_scale_hor_ratio_out,
- shading_info->info.type_1.bayer_scale_ver_ratio_in,
- shading_info->info.type_1.bayer_scale_ver_ratio_out,
- shading_info->info.type_1.isp_input_sensor_data_res_bqs.width,
- shading_info->info.type_1.isp_input_sensor_data_res_bqs.height,
- shading_info->info.type_1.sensor_data_res_bqs.width,
- shading_info->info.type_1.sensor_data_res_bqs.height,
- shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.x,
- shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.y);
-
- IA_CSS_LOG("pipe_config: origin=(%d,%d)",
- pipe_config->internal_frame_origin_bqs_on_sctbl.x,
- pipe_config->internal_frame_origin_bqs_on_sctbl.y);
-
- IA_CSS_LEAVE_ERR_PRIVATE(err);
- return err;
-}
-
int
ia_css_binary_get_shading_info(const struct ia_css_binary *binary, /* [in] */
@@ -706,24 +307,13 @@ ia_css_binary_get_shading_info(const struct ia_css_binary *binary, /* [in] */
IA_CSS_ENTER_PRIVATE("binary=%p, type=%d, required_bds_factor=%d, stream_config=%p",
binary, type, required_bds_factor, stream_config);
- if (type != IA_CSS_SHADING_CORRECTION_TYPE_1) {
- err = -ENOTSUPP;
-
- IA_CSS_LEAVE_ERR_PRIVATE(err);
- return err;
- }
-
- if (!IS_ISP2401)
- err = isp2400_binary_get_shading_info_type_1(binary,
- required_bds_factor,
- stream_config,
- shading_info);
+ if (type == IA_CSS_SHADING_CORRECTION_TYPE_1)
+ err = binary_get_shading_info_type_1(binary,
+ required_bds_factor,
+ stream_config,
+ shading_info);
else
- err = isp2401_binary_get_shading_info_type_1(binary,
- required_bds_factor,
- stream_config,
- shading_info,
- pipe_config);
+ err = -ENOTSUPP;
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
@@ -805,11 +395,7 @@ ia_css_binary_3a_grid_info(const struct ia_css_binary *binary,
s3a_info->deci_factor_log2 = binary->deci_factor_log2;
s3a_info->elem_bit_depth = SH_CSS_BAYER_BITS;
s3a_info->use_dmem = binary->info->sp.s3a.s3atbl_use_dmem;
-#if defined(HAS_NO_HMEM)
- s3a_info->has_histogram = 1;
-#else
s3a_info->has_histogram = 0;
-#endif
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
@@ -965,15 +551,9 @@ binary_grid_deci_factor_log2(int width, int height)
/* 3A/Shading decimation factor spcification (at August 2008)
* ------------------------------------------------------------------
* [Image Width (BQ)] [Decimation Factor (BQ)] [Resulting grid cells]
- #ifndef ISP2401
* 1280 ?c 32 40 ?c
* 640 ?c 1279 16 40 ?c 80
* ?c 639 8 ?c 80
- #else
- * from 1280 32 from 40
- * from 640 to 1279 16 from 40 to 80
- * to 639 8 to 80
- #endif
* ------------------------------------------------------------------
*/
/* Maximum and minimum decimation factor by the specification */
@@ -1335,26 +915,14 @@ ia_css_binary_fill_info(const struct ia_css_binary_xinfo *xinfo,
if (info->enable.sc)
{
- if (!IS_ISP2401) {
- binary->sctbl_width_per_color = _ISP2400_SCTBL_WIDTH_PER_COLOR(sc_3a_dis_padded_width, s3a_log_deci);
- binary->sctbl_aligned_width_per_color = ISP2400_SH_CSS_MAX_SCTBL_ALIGNED_WIDTH_PER_COLOR;
- binary->sctbl_height = _ISP2400_SCTBL_HEIGHT(sc_3a_dis_height, s3a_log_deci);
- } else {
- binary->sctbl_width_per_color = _ISP2401_SCTBL_WIDTH_PER_COLOR(isp_internal_width, s3a_log_deci);
- binary->sctbl_aligned_width_per_color = ISP2401_SH_CSS_MAX_SCTBL_ALIGNED_WIDTH_PER_COLOR;
- binary->sctbl_height = _ISP2401_SCTBL_HEIGHT(isp_internal_height, s3a_log_deci);
- binary->sctbl_legacy_width_per_color = _ISP_SCTBL_LEGACY_WIDTH_PER_COLOR(sc_3a_dis_padded_width, s3a_log_deci);
- binary->sctbl_legacy_height = _ISP_SCTBL_LEGACY_HEIGHT(sc_3a_dis_height, s3a_log_deci);
- }
+ binary->sctbl_width_per_color = _ISP_SCTBL_WIDTH_PER_COLOR(sc_3a_dis_padded_width, s3a_log_deci);
+ binary->sctbl_aligned_width_per_color = SH_CSS_MAX_SCTBL_ALIGNED_WIDTH_PER_COLOR;
+ binary->sctbl_height = _ISP_SCTBL_HEIGHT(sc_3a_dis_height, s3a_log_deci);
} else
{
binary->sctbl_width_per_color = 0;
binary->sctbl_aligned_width_per_color = 0;
binary->sctbl_height = 0;
- if (IS_ISP2401) {
- binary->sctbl_legacy_width_per_color = 0;
- binary->sctbl_legacy_height = 0;
- }
}
ia_css_sdis_init_info(&binary->dis,
sc_3a_dis_width,
@@ -1383,20 +951,13 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
*req_vf_info;
struct ia_css_binary_xinfo *xcandidate;
-#ifndef ISP2401
bool need_ds, need_dz, need_dvs, need_xnr, need_dpc;
-#else
- bool need_ds, need_dz, need_dvs, need_xnr, need_dpc, need_tnr;
-#endif
bool striped;
bool enable_yuv_ds;
bool enable_high_speed;
bool enable_dvs_6axis;
bool enable_reduced_pipe;
bool enable_capture_pp_bli;
-#ifdef ISP2401
- bool enable_luma_only;
-#endif
int err = -EINVAL;
bool continuous;
unsigned int isp_pipe_version;
@@ -1418,41 +979,26 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
stream_format = descr->stream_format;
req_in_info = descr->in_info;
req_bds_out_info = descr->bds_out_info;
- for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
- {
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
req_out_info[i] = descr->out_info[i];
if (req_out_info[i] && (req_out_info[i]->res.width != 0))
req_bin_out_info = req_out_info[i];
}
if (!req_bin_out_info)
return -EINVAL;
-#ifndef ISP2401
req_vf_info = descr->vf_info;
-#else
-
- if ((descr->vf_info) && (descr->vf_info->res.width == 0))
- /* width==0 means that there is no vf pin (e.g. in SkyCam preview case) */
- req_vf_info = NULL;
- else
- req_vf_info = descr->vf_info;
-#endif
need_xnr = descr->enable_xnr;
need_ds = descr->enable_fractional_ds;
need_dz = false;
need_dvs = false;
need_dpc = descr->enable_dpc;
-#ifdef ISP2401
- need_tnr = descr->enable_tnr;
-#endif
+
enable_yuv_ds = descr->enable_yuv_ds;
enable_high_speed = descr->enable_high_speed;
enable_dvs_6axis = descr->enable_dvs_6axis;
enable_reduced_pipe = descr->enable_reduced_pipe;
enable_capture_pp_bli = descr->enable_capture_pp_bli;
-#ifdef ISP2401
- enable_luma_only = descr->enable_luma_only;
-#endif
continuous = descr->continuous;
striped = descr->striped;
isp_pipe_version = descr->isp_pipe_version;
@@ -1462,8 +1008,7 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
internal_res.width = 0;
internal_res.height = 0;
- if (mode == IA_CSS_BINARY_MODE_VIDEO)
- {
+ if (mode == IA_CSS_BINARY_MODE_VIDEO) {
dvs_env = descr->dvs_env;
need_dz = descr->enable_dz;
/* Video is the only mode that has a nodz variant. */
@@ -1472,8 +1017,7 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
/* print a map of the binary file */
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "BINARY INFO:\n");
- for (i = 0; i < IA_CSS_BINARY_NUM_MODES; i++)
- {
+ for (i = 0; i < IA_CSS_BINARY_NUM_MODES; i++) {
xcandidate = binary_infos[i];
if (xcandidate) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%d:\n", i);
@@ -1488,8 +1032,7 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
/* printf("sh_css_binary_find: pipe version %d\n", isp_pipe_version); */
for (xcandidate = binary_infos[mode]; xcandidate;
- xcandidate = xcandidate->next)
- {
+ xcandidate = xcandidate->next) {
struct ia_css_binary_info *candidate = &xcandidate->sp;
/* printf("sh_css_binary_find: evaluating candidate:
* %d\n",candidate->id); */
@@ -1747,24 +1290,6 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
continue;
}
-#ifdef ISP2401
- if (candidate->enable.luma_only != enable_luma_only) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: %d != %d\n",
- __LINE__, candidate->enable.luma_only,
- descr->enable_luma_only);
- continue;
- }
-
- if (!candidate->enable.tnr && need_tnr) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: !%d && %d\n",
- __LINE__, candidate->enable.tnr,
- descr->enable_tnr);
- continue;
- }
-
-#endif
/* reconfigure any variable properties of the binary */
err = ia_css_binary_fill_info(xcandidate, online, two_ppc,
stream_format, req_in_info,
diff --git a/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c b/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c
index 05ce0f73f5ae..f46238725eea 100644
--- a/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c
+++ b/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c
@@ -2821,7 +2821,7 @@ ia_css_debug_pipe_graph_dump_stage(
"in", true);
}
- for (i = 0; i < NUM_TNR_FRAMES; i++) {
+ for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++) {
if (stage->args.tnr_frames[i]) {
ia_css_debug_pipe_graph_dump_frame(
stage->args.tnr_frames[i], id,
diff --git a/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h b/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h
index 31f01e0f58aa..c756a134efc3 100644
--- a/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h
+++ b/drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h
@@ -138,27 +138,7 @@ bool ia_css_frame_is_same_type(
* @param[in] info The frame info
* @return
*/
-void ia_css_dma_configure_from_info(
- struct dma_port_config *config,
- const struct ia_css_frame_info *info);
-
-/* ISP2401 */
-/* @brief Finds the cropping resolution
- * This function finds the maximum cropping resolution in an input image keeping
- * the aspect ratio for the given output resolution.Calculates the coordinates
- * for cropping from the center and returns the starting pixel location of the
- * region in the input image. Also returns the dimension of the cropping
- * resolution.
- *
- * @param
- * @param[in] in_res Resolution of input image
- * @param[in] out_res Resolution of output image
- * @param[out] crop_res Crop resolution of input image
- * @return Returns 0 or -EINVAL on error
- */
-int
-ia_css_frame_find_crop_resolution(const struct ia_css_resolution *in_res,
- const struct ia_css_resolution *out_res,
- struct ia_css_resolution *crop_res);
+int ia_css_dma_configure_from_info(struct dma_port_config *config,
+ const struct ia_css_frame_info *info);
#endif /* __IA_CSS_FRAME_H__ */
diff --git a/drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c b/drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c
index 10c4907187d9..a3aae638b0bf 100644
--- a/drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c
+++ b/drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c
@@ -168,25 +168,23 @@ int ia_css_frame_map(struct ia_css_frame **frame,
if (err)
return err;
- if (!err) {
- if (pgnr < ((PAGE_ALIGN(me->data_bytes)) >> PAGE_SHIFT)) {
- dev_err(atomisp_dev,
- "user space memory size is less than the expected size..\n");
- err = -ENOMEM;
- goto error;
- } else if (pgnr > ((PAGE_ALIGN(me->data_bytes)) >> PAGE_SHIFT)) {
- dev_err(atomisp_dev,
- "user space memory size is large than the expected size..\n");
- err = -ENOMEM;
- goto error;
- }
+ if (pgnr < ((PAGE_ALIGN(me->data_bytes)) >> PAGE_SHIFT)) {
+ dev_err(atomisp_dev,
+ "user space memory size is less than the expected size..\n");
+ err = -ENOMEM;
+ goto error;
+ } else if (pgnr > ((PAGE_ALIGN(me->data_bytes)) >> PAGE_SHIFT)) {
+ dev_err(atomisp_dev,
+ "user space memory size is large than the expected size..\n");
+ err = -ENOMEM;
+ goto error;
+ }
- me->data = hmm_alloc(me->data_bytes, HMM_BO_USER, 0, data,
- attribute & ATOMISP_MAP_FLAG_CACHED);
+ me->data = hmm_alloc(me->data_bytes, HMM_BO_USER, 0, data,
+ attribute & ATOMISP_MAP_FLAG_CACHED);
- if (me->data == mmgr_NULL)
- err = -EINVAL;
- }
+ if (me->data == mmgr_NULL)
+ err = -EINVAL;
error:
if (err) {
@@ -594,10 +592,8 @@ bool ia_css_frame_is_same_type(const struct ia_css_frame *frame_a,
return is_equal;
}
-void
-ia_css_dma_configure_from_info(
- struct dma_port_config *config,
- const struct ia_css_frame_info *info)
+int ia_css_dma_configure_from_info(struct dma_port_config *config,
+ const struct ia_css_frame_info *info)
{
unsigned int is_raw_packed = info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED;
unsigned int bits_per_pixel = is_raw_packed ? info->raw_bit_depth :
@@ -610,7 +606,13 @@ ia_css_dma_configure_from_info(
config->elems = (uint8_t)elems_b;
config->width = (uint16_t)info->res.width;
config->crop = 0;
- assert(config->width <= info->padded_width);
+
+ if (config->width > info->padded_width) {
+ dev_err(atomisp_dev, "internal error: padded_width is too small!\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
/**************************************************************************
@@ -931,73 +933,3 @@ void ia_css_resolution_to_sp_resolution(
to->width = (uint16_t)from->width;
to->height = (uint16_t)from->height;
}
-
-/* ISP2401 */
-int
-ia_css_frame_find_crop_resolution(const struct ia_css_resolution *in_res,
- const struct ia_css_resolution *out_res,
- struct ia_css_resolution *crop_res) {
- u32 wd_even_ceil, ht_even_ceil;
- u32 in_ratio, out_ratio;
-
- if ((!in_res) || (!out_res) || (!crop_res))
- return -EINVAL;
-
- IA_CSS_ENTER_PRIVATE("in(%ux%u) -> out(%ux%u)", in_res->width,
- in_res->height, out_res->width, out_res->height);
-
- if ((in_res->width == 0)
- || (in_res->height == 0)
- || (out_res->width == 0)
- || (out_res->height == 0))
- return -EINVAL;
-
- if ((out_res->width > in_res->width) ||
- (out_res->height > in_res->height))
- return -EINVAL;
-
- /* If aspect ratio (width/height) of out_res is higher than the aspect
- * ratio of the in_res, then we crop vertically, otherwise we crop
- * horizontally.
- */
- in_ratio = in_res->width * out_res->height;
- out_ratio = out_res->width * in_res->height;
-
- if (in_ratio == out_ratio)
- {
- crop_res->width = in_res->width;
- crop_res->height = in_res->height;
- } else if (out_ratio > in_ratio)
- {
- crop_res->width = in_res->width;
- crop_res->height = ROUND_DIV(out_res->height * crop_res->width,
- out_res->width);
- } else
- {
- crop_res->height = in_res->height;
- crop_res->width = ROUND_DIV(out_res->width * crop_res->height,
- out_res->height);
- }
-
- /* Round new (cropped) width and height to an even number.
- * binarydesc_calculate_bds_factor is such that we should consider as
- * much of the input as possible. This is different only when we end up
- * with an odd number in the last step. So, we take the next even number
- * if it falls within the input, otherwise take the previous even no.
- */
- wd_even_ceil = EVEN_CEIL(crop_res->width);
- ht_even_ceil = EVEN_CEIL(crop_res->height);
- if ((wd_even_ceil > in_res->width) || (ht_even_ceil > in_res->height))
- {
- crop_res->width = EVEN_FLOOR(crop_res->width);
- crop_res->height = EVEN_FLOOR(crop_res->height);
- } else
- {
- crop_res->width = wd_even_ceil;
- crop_res->height = ht_even_ceil;
- }
-
- IA_CSS_LEAVE_PRIVATE("in(%ux%u) -> out(%ux%u)", crop_res->width,
- crop_res->height, out_res->width, out_res->height);
- return 0;
-}
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c
index 82f3c19dc455..8fc7746f8639 100644
--- a/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c
@@ -189,17 +189,6 @@ ia_css_isys_error_t ia_css_isys_stream_create(
return false;
}
-#ifdef ISP2401
- /*
- * Early polling is required for timestamp accuracy in certain cause.
- * The ISYS HW polling is started on
- * ia_css_isys_stream_capture_indication() instead of
- * ia_css_pipeline_sp_wait_for_isys_stream_N() as isp processing of
- * capture takes longer than getting an ISYS frame
- */
- isys_stream->polling_mode = isys_stream_descr->polling_mode;
-
-#endif
/* create metadata channel */
if (isys_stream_descr->metadata.enable) {
rc = create_input_system_channel(isys_stream_descr, true,
diff --git a/drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c b/drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c
index d03957d1ecf4..dfc50247ea8e 100644
--- a/drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c
+++ b/drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c
@@ -140,8 +140,7 @@ void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id,
false, false, false, true, SH_CSS_BDS_FACTOR_1_00,
SH_CSS_PIPE_CONFIG_OVRD_NO_OVRD,
IA_CSS_INPUT_MODE_MEMORY, NULL, NULL,
- (enum mipi_port_id)0,
- NULL, NULL);
+ (enum mipi_port_id)0);
ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
if (!sh_css_sp_is_running()) {
diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c
index c4b35cbab373..1d605e533e29 100644
--- a/drivers/staging/media/atomisp/pci/sh_css.c
+++ b/drivers/staging/media/atomisp/pci/sh_css.c
@@ -20,6 +20,8 @@
#include "hmm.h"
+#include "atomisp_internal.h"
+
#include "ia_css.h"
#include "sh_css_hrt.h" /* only for file 2 MIPI */
#include "ia_css_buffer.h"
@@ -62,9 +64,9 @@
#include "ia_css_mmu_private.h" /* sh_css_mmu_set_page_table_base_index() */
#include "gdc_device.h" /* HRT_GDC_N */
#include "dma.h" /* dma_set_max_burst_size() */
-#include "irq.h" /* virq */
-#include "sp.h" /* cnd_sp_irq_enable() */
-#include "isp.h" /* cnd_isp_irq_enable, ISP_VEC_NELEMS */
+#include "irq.h" /* virq */
+#include "sp.h" /* cnd_sp_irq_enable() */
+#include "isp.h" /* cnd_isp_irq_enable, ISP_VEC_NELEMS */
#include "gp_device.h" /* gp_device_reg_store() */
#define __INLINE_GPIO__
#include "gpio.h"
@@ -74,15 +76,6 @@
#define SH_CSS_VIDEO_BUFFER_ALIGNMENT 0
-#if WITH_PC_MONITORING
-#define MULTIPLE_SAMPLES 1
-#define NOF_SAMPLES 60
-#include "linux/kthread.h"
-#include "linux/sched.h"
-#include "linux/delay.h"
-#include "sh_css_metrics.h"
-static int thread_alive;
-#endif /* WITH_PC_MONITORING */
#include "ia_css_spctrl.h"
#include "ia_css_version_data.h"
@@ -97,8 +90,9 @@ static int thread_alive;
/* Size of Refcount List */
#define REFCOUNT_SIZE 1000
-/* for JPEG, we don't know the length of the image upfront,
- * but since we support sensor upto 16MP, we take this as
+/*
+ * for JPEG, we don't know the length of the image upfront,
+ * but since we support sensor up to 16MP, we take this as
* upper limit.
*/
#define JPEG_BYTES (16 * 1024 * 1024)
@@ -110,9 +104,10 @@ struct sh_css my_css;
int __printf(1, 0) (*sh_css_printf)(const char *fmt, va_list args) = NULL;
-/* modes of work: stream_create and stream_destroy will update the save/restore data
- only when in working mode, not suspend/resume
-*/
+/*
+ * modes of work: stream_create and stream_destroy will update the save/restore
+ * data only when in working mode, not suspend/resume
+ */
enum ia_sh_css_modes {
sh_css_mode_none = 0,
sh_css_mode_working,
@@ -120,40 +115,55 @@ enum ia_sh_css_modes {
sh_css_mode_resume
};
-/* a stream seed, to save and restore the stream data.
- the stream seed contains all the data required to "grow" the seed again after it was closed.
+/**
+ * struct sh_css_stream_seed - a stream seed, to save and restore the
+ * stream data.
+ *
+ * @orig_stream: pointer to restore the original handle
+ * @stream: handle, used as ID too.
+ * @stream_config: stream config struct
+ * @num_pipes: number of pipes
+ * @pipes: pipe handles
+ * @orig_pipes: pointer to restore original handle
+ * @pipe_config: pipe config structs
+ *
+ * the stream seed contains all the data required to "grow" the seed again
+ * after it was closed.
*/
struct sh_css_stream_seed {
- struct ia_css_stream
- **orig_stream; /* pointer to restore the original handle */
- struct ia_css_stream *stream; /* handle, used as ID too.*/
- struct ia_css_stream_config stream_config; /* stream config struct */
+ struct ia_css_stream **orig_stream;
+ struct ia_css_stream *stream;
+ struct ia_css_stream_config stream_config;
int num_pipes;
- struct ia_css_pipe *pipes[IA_CSS_PIPE_ID_NUM]; /* pipe handles */
- struct ia_css_pipe
- **orig_pipes[IA_CSS_PIPE_ID_NUM]; /* pointer to restore original handle */
- struct ia_css_pipe_config
- pipe_config[IA_CSS_PIPE_ID_NUM]; /* pipe config structs */
+ struct ia_css_pipe *pipes[IA_CSS_PIPE_ID_NUM];
+ struct ia_css_pipe **orig_pipes[IA_CSS_PIPE_ID_NUM];
+ struct ia_css_pipe_config pipe_config[IA_CSS_PIPE_ID_NUM];
};
#define MAX_ACTIVE_STREAMS 5
-/* A global struct for save/restore to hold all the data that should sustain power-down:
- MMU base, IRQ type, env for routines, binary loaded FW and the stream seeds.
-*/
+/*
+ * A global struct for save/restore to hold all the data that should
+ * sustain power-down: MMU base, IRQ type, env for routines, binary loaded FW
+ * and the stream seeds.
+ */
struct sh_css_save {
enum ia_sh_css_modes mode;
- u32 mmu_base; /* the last mmu_base */
+ u32 mmu_base; /* the last mmu_base */
enum ia_css_irq_type irq_type;
struct sh_css_stream_seed stream_seeds[MAX_ACTIVE_STREAMS];
- struct ia_css_fw *loaded_fw; /* fw struct previously loaded */
- struct ia_css_env driver_env; /* driver-supplied env copy */
+ struct ia_css_fw *loaded_fw; /* fw struct previously loaded */
+ struct ia_css_env driver_env; /* driver-supplied env copy */
};
static bool my_css_save_initialized; /* if my_css_save was initialized */
static struct sh_css_save my_css_save;
-/* pqiao NOTICE: this is for css internal buffer recycling when stopping pipeline,
- this array is temporary and will be replaced by resource manager*/
+/*
+ * pqiao NOTICE: this is for css internal buffer recycling when stopping
+ * pipeline,
+ * this array is temporary and will be replaced by resource manager
+ */
+
/* Taking the biggest Size for number of Elements */
#define MAX_HMM_BUFFER_NUM \
(SH_CSS_MAX_NUM_QUEUES * (IA_CSS_NUM_ELEMS_SP2HOST_BUFFER_QUEUE + 2))
@@ -181,27 +191,6 @@ allocate_delay_frames(struct ia_css_pipe *pipe);
static int
sh_css_pipe_start(struct ia_css_stream *stream);
-/* ISP 2401 */
-/*
- * @brief Stop all "ia_css_pipe" instances in the target
- * "ia_css_stream" instance.
- *
- * @param[in] stream Point to the target "ia_css_stream" instance.
- *
- * @return
- * - 0, if the "stop" requests have been successfully sent out.
- * - CSS error code, otherwise.
- *
- *
- * NOTE
- * This API sends the "stop" requests to the "ia_css_pipe"
- * instances in the same "ia_css_stream" instance. It will
- * return without waiting for all "ia_css_pipe" instatnces
- * being stopped.
- */
-static int
-sh_css_pipes_stop(struct ia_css_stream *stream);
-
/*
* @brief Check if all "ia_css_pipe" instances in the target
* "ia_css_stream" instance have stopped.
@@ -213,9 +202,6 @@ sh_css_pipes_stop(struct ia_css_stream *stream);
* instance have ben stopped.
* - false, otherwise.
*/
-/* ISP 2401 */
-static bool
-sh_css_pipes_have_stopped(struct ia_css_stream *stream);
/* ISP 2401 */
static int
@@ -224,9 +210,6 @@ ia_css_pipe_check_format(struct ia_css_pipe *pipe,
/* ISP 2401 */
static int
-check_pipe_resolutions(const struct ia_css_pipe *pipe);
-
-static int
ia_css_pipe_load_extension(struct ia_css_pipe *pipe,
struct ia_css_fw_info *firmware);
@@ -384,10 +367,6 @@ ia_css_get_acc_configs(
struct ia_css_pipe *pipe,
struct ia_css_isp_config *config);
-#if CONFIG_ON_FRAME_ENQUEUE()
-static int set_config_on_frame_enqueue(struct ia_css_frame_info
- *info, struct frame_data_wrapper *frame);
-#endif
#ifdef ISP2401
static unsigned int get_crop_lines_for_bayer_order(const struct
@@ -396,17 +375,7 @@ static unsigned int get_crop_columns_for_bayer_order(const struct
ia_css_stream_config *config);
static void get_pipe_extra_pixel(struct ia_css_pipe *pipe,
unsigned int *extra_row, unsigned int *extra_column);
-static int
-aspect_ratio_crop_init(struct ia_css_stream *curr_stream,
- struct ia_css_pipe *pipes[],
- bool *do_crop_status);
-
-static bool
-aspect_ratio_crop_check(bool enabled, struct ia_css_pipe *curr_pipe);
-static int
-aspect_ratio_crop(struct ia_css_pipe *curr_pipe,
- struct ia_css_resolution *effective_res);
#endif
static void
@@ -447,9 +416,10 @@ static enum ia_css_frame_format yuv422_copy_formats[] = {
IA_CSS_FRAME_FORMAT_YUYV
};
-/* Verify whether the selected output format is can be produced
+/*
+ * Verify whether the selected output format is can be produced
* by the copy binary given the stream format.
- * */
+ */
static int
verify_copy_out_frame_format(struct ia_css_pipe *pipe)
{
@@ -522,6 +492,7 @@ ia_css_stream_input_format_bits_per_pixel(struct ia_css_stream *stream)
return bpp;
}
+/* TODO: move define to proper file in tools */
#define GP_ISEL_TPG_MODE 0x90058
#if !defined(ISP2401)
@@ -573,12 +544,8 @@ sh_css_config_input_network(struct ia_css_stream *stream)
vblank_cycles = vblank_lines * (width + hblank_cycles);
sh_css_sp_configure_sync_gen(width, height, hblank_cycles,
vblank_cycles);
- if (!IS_ISP2401) {
- if (pipe->stream->config.mode == IA_CSS_INPUT_MODE_TPG) {
- /* TODO: move define to proper file in tools */
- ia_css_device_store_uint32(GP_ISEL_TPG_MODE, 0);
- }
- }
+ if (pipe->stream->config.mode == IA_CSS_INPUT_MODE_TPG)
+ ia_css_device_store_uint32(GP_ISEL_TPG_MODE, 0);
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"sh_css_config_input_network() leave:\n");
@@ -689,13 +656,13 @@ static unsigned int csi2_protocol_calculate_max_subpixels_per_line(
/*
* The frame format layout is shown below.
*
- * Line 0: Pixel Pixel ... Pixel
- * Line 1: Pixel Pixel ... Pixel
- * Line 2: Pixel Pixel ... Pixel
- * Line 3: Pixel Pixel ... Pixel
+ * Line 0: Pixel ... Pixel
+ * Line 1: Pixel ... Pixel
+ * Line 2: Pixel ... Pixel
+ * Line 3: Pixel ... Pixel
* ...
- * Line (n-2): Pixel Pixel ... Pixel
- * Line (n-1): Pixel Pixel ... Pixel
+ * Line (n-2): Pixel ... Pixel
+ * Line (n-1): Pixel ... Pixel
*
* In this frame format, the even-line is
* as wide as the odd-line.
@@ -906,8 +873,10 @@ static bool sh_css_translate_stream_cfg_to_input_system_input_port_attr(
isys_stream_descr->metadata.lines_per_frame =
stream_cfg->metadata_config.resolution.height;
#ifdef ISP2401
- /* For new input system, number of str2mmio requests must be even.
- * So we round up number of metadata lines to be even. */
+ /*
+ * For new input system, number of str2mmio requests must be even.
+ * So we round up number of metadata lines to be even.
+ */
if (isys_stream_descr->metadata.lines_per_frame > 0)
isys_stream_descr->metadata.lines_per_frame +=
(isys_stream_descr->metadata.lines_per_frame & 1);
@@ -1003,22 +972,10 @@ static bool sh_css_translate_stream_cfg_to_isys_stream_descr(
isys_stream_descr->raw_packed = stream_cfg->pack_raw_pixels;
isys_stream_descr->linked_isys_stream_id = (int8_t)
stream_cfg->isys_config[isys_stream_idx].linked_isys_stream_id;
- /*
- * Early polling is required for timestamp accuracy in certain case.
- * The ISYS HW polling is started on
- * ia_css_isys_stream_capture_indication() instead of
- * ia_css_pipeline_sp_wait_for_isys_stream_N() as isp processing of
- * capture takes longer than getting an ISYS frame
- *
- * Only 2401 relevant ??
- */
-#if 0 // FIXME: NOT USED on Yocto Aero
- isys_stream_descr->polling_mode
- = early_polling ? INPUT_SYSTEM_POLL_ON_CAPTURE_REQUEST
- : INPUT_SYSTEM_POLL_ON_WAIT_FOR_FRAME;
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "sh_css_translate_stream_cfg_to_isys_stream_descr() leave:\n");
-#endif
+
+ if (IS_ISP2401)
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_translate_stream_cfg_to_isys_stream_descr() leave:\n");
return rc;
}
@@ -1065,17 +1022,6 @@ sh_css_config_input_network(struct ia_css_stream *stream)
pipe = stream->last_pipe->pipe_settings.video.copy_pipe;
} else {
pipe = stream->last_pipe;
- if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_CAPTURE) {
- /*
- * We need to poll the ISYS HW in capture_indication itself
- * for "non-continuous" capture usecase for getting accurate
- * isys frame capture timestamps.
- * This is because the capturepipe propcessing takes longer
- * to execute than the input system frame capture.
- * 2401 specific
- */
- early_polling = true;
- }
}
if (!pipe)
@@ -1086,7 +1032,8 @@ sh_css_config_input_network(struct ia_css_stream *stream)
binary = pipe->pipeline.stages->binary;
if (binary) {
- /* this was being done in ifmtr in 2400.
+ /*
+ * this was being done in ifmtr in 2400.
* online and cont bypass the init_in_frameinfo_memory_defaults
* so need to do it here
*/
@@ -1259,89 +1206,6 @@ static inline int stream_unregister_with_csi_rx(
}
#endif
-#if WITH_PC_MONITORING
-static struct task_struct *my_kthread; /* Handle for the monitoring thread */
-static int sh_binary_running; /* Enable sampling in the thread */
-
-static void print_pc_histo(char *core_name, struct sh_css_pc_histogram *hist)
-{
- unsigned int i;
- unsigned int cnt_run = 0;
- unsigned int cnt_stall = 0;
-
- if (!hist)
- return;
-
- sh_css_print("%s histogram length = %d\n", core_name, hist->length);
- sh_css_print("%s PC\turn\tstall\n", core_name);
-
- for (i = 0; i < hist->length; i++) {
- if ((hist->run[i] == 0) && (hist->run[i] == hist->stall[i]))
- continue;
- sh_css_print("%s %d\t%d\t%d\n",
- core_name, i, hist->run[i], hist->stall[i]);
- cnt_run += hist->run[i];
- cnt_stall += hist->stall[i];
- }
-
- sh_css_print(" Statistics for %s, cnt_run = %d, cnt_stall = %d, hist->length = %d\n",
- core_name, cnt_run, cnt_stall, hist->length);
-}
-
-static void print_pc_histogram(void)
-{
- struct ia_css_binary_metrics *metrics;
-
- for (metrics = sh_css_metrics.binary_metrics;
- metrics;
- metrics = metrics->next) {
- if (metrics->mode == IA_CSS_BINARY_MODE_PREVIEW ||
- metrics->mode == IA_CSS_BINARY_MODE_VF_PP) {
- sh_css_print("pc_histogram for binary %d is SKIPPED\n",
- metrics->id);
- continue;
- }
-
- sh_css_print(" pc_histogram for binary %d\n", metrics->id);
- print_pc_histo(" ISP", &metrics->isp_histogram);
- print_pc_histo(" SP", &metrics->sp_histogram);
- sh_css_print("print_pc_histogram() done for binary->id = %d, done.\n",
- metrics->id);
- }
-
- sh_css_print("PC_MONITORING:print_pc_histogram() -- DONE\n");
-}
-
-static int pc_monitoring(void *data)
-{
- int i = 0;
-
- (void)data;
- while (true) {
- if (sh_binary_running) {
- sh_css_metrics_sample_pcs();
-#if MULTIPLE_SAMPLES
- for (i = 0; i < NOF_SAMPLES; i++)
- sh_css_metrics_sample_pcs();
-#endif
- }
- usleep_range(10, 50);
- }
- return 0;
-}
-
-static void spying_thread_create(void)
-{
- my_kthread = kthread_run(pc_monitoring, NULL, "sh_pc_monitor");
- sh_css_metrics_enable_pc_histogram(1);
-}
-
-static void input_frame_info(struct ia_css_frame_info frame_info)
-{
- sh_css_print("SH_CSS:input_frame_info() -- frame->info.res.width = %d, frame->info.res.height = %d, format = %d\n",
- frame_info.res.width, frame_info.res.height, frame_info.format);
-}
-#endif /* WITH_PC_MONITORING */
static void
start_binary(struct ia_css_pipe *pipe,
@@ -1353,15 +1217,6 @@ start_binary(struct ia_css_pipe *pipe,
if (binary)
sh_css_metrics_start_binary(&binary->metrics);
-#if WITH_PC_MONITORING
- sh_css_print("PC_MONITORING: %s() -- binary id = %d , enable_dvs_envelope = %d\n",
- __func__, binary->info->sp.id,
- binary->info->sp.enable.dvs_envelope);
- input_frame_info(binary->in_frame_info);
-
- if (binary && binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_VIDEO)
- sh_binary_running = true;
-#endif
#if !defined(ISP2401)
if (pipe->stream->reconfigure_css_rx) {
@@ -1406,7 +1261,7 @@ void sh_css_binary_args_reset(struct sh_css_binary_args *args)
{
unsigned int i;
- for (i = 0; i < NUM_TNR_FRAMES; i++)
+ for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++)
args->tnr_frames[i] = NULL;
for (i = 0; i < MAX_NUM_VIDEO_DELAY_FRAMES; i++)
args->delay_frames[i] = NULL;
@@ -1424,20 +1279,11 @@ static void start_pipe(
enum sh_css_pipe_config_override copy_ovrd,
enum ia_css_input_mode input_mode)
{
- const struct ia_css_coordinate *coord = NULL;
- const struct ia_css_isp_parameters *params = NULL;
-
-
IA_CSS_ENTER_PRIVATE("me = %p, copy_ovrd = %d, input_mode = %d",
me, copy_ovrd, input_mode);
assert(me); /* all callers are in this file and call with non null argument */
- if (!IS_ISP2401) {
- coord = &me->config.internal_frame_origin_bqs_on_sctbl;
- params = me->stream->isp_params_configs;
- }
-
sh_css_sp_init_pipeline(&me->pipeline,
me->mode,
(uint8_t)ia_css_pipe_get_pipe_num(me),
@@ -1452,9 +1298,7 @@ static void start_pipe(
&me->stream->info.metadata_info
, (input_mode == IA_CSS_INPUT_MODE_MEMORY) ?
(enum mipi_port_id)0 :
- me->stream->config.source.port.port,
- coord,
- params);
+ me->stream->config.source.port.port);
if (me->config.mode != IA_CSS_PIPE_MODE_COPY) {
struct ia_css_pipeline_stage *stage;
@@ -1565,17 +1409,18 @@ ia_css_reset_defaults(struct sh_css *css)
/* Reset everything to zero */
memset(&default_css, 0, sizeof(default_css));
- /* Initialize the non zero values*/
+ /* Initialize the non zero values */
default_css.check_system_idle = true;
default_css.num_cont_raw_frames = NUM_CONTINUOUS_FRAMES;
- /* All should be 0: but memset does it already.
+ /*
+ * All should be 0: but memset does it already.
* default_css.num_mipi_frames[N_CSI_PORTS] = 0;
*/
default_css.irq_type = IA_CSS_IRQ_TYPE_EDGE;
- /*Set the defaults to the output */
+ /* Set the defaults to the output */
*css = default_css;
}
@@ -1638,6 +1483,7 @@ ia_css_init(struct device *dev, const struct ia_css_env *env,
* the SIZE_OF_XXX macro of the corresponding struct. If they are not
* equal, functionality will break.
*/
+
/* Check struct sh_css_ddr_address_map */
COMPILATION_ERROR_IF(sizeof(struct sh_css_ddr_address_map) != SIZE_OF_SH_CSS_DDR_ADDRESS_MAP_STRUCT);
/* Check struct host_sp_queues */
@@ -1707,8 +1553,11 @@ ia_css_init(struct device *dev, const struct ia_css_env *env,
mipi_init();
#ifndef ISP2401
- /* In case this has been programmed already, update internal
- data structure ... DEPRECATED */
+ /*
+ * In case this has been programmed already, update internal
+ * data structure ...
+ * DEPRECATED
+ */
my_css.page_table_base_index = mmu_get_page_table_base_index(MMU0_ID);
#endif
@@ -1746,9 +1595,8 @@ ia_css_init(struct device *dev, const struct ia_css_env *env,
return err;
}
fw_explicitly_loaded = false;
-#ifndef ISP2401
+
my_css_save.loaded_fw = (struct ia_css_fw *)fw;
-#endif
}
if (!sh_css_setup_spctrl_config(&sh_css_sp_fw, SP_PROG_NAME, &spctrl_cfg))
return -EINVAL;
@@ -1759,24 +1607,17 @@ ia_css_init(struct device *dev, const struct ia_css_env *env,
return err;
}
-#if WITH_PC_MONITORING
- if (!thread_alive) {
- thread_alive++;
- sh_css_print("PC_MONITORING: %s() -- create thread DISABLED\n",
- __func__);
- spying_thread_create();
- }
-#endif
if (!sh_css_hrt_system_is_idle()) {
IA_CSS_LEAVE_ERR(-EBUSY);
return -EBUSY;
}
- /* can be called here, queuing works, but:
- - when sp is started later, it will wipe queued items
- so for now we leave it for later and make sure
- updates are not called to frequently.
- sh_css_init_buffer_queues();
- */
+ /*
+ * can be called here, queuing works, but:
+ * - when sp is started later, it will wipe queued items
+ * so for now we leave it for later and make sure
+ * updates are not called to frequently.
+ * sh_css_init_buffer_queues();
+ */
#if defined(ISP2401)
gp_device_reg_store(GP_DEVICE0_ID, _REG_GP_SWITCH_ISYS2401_ADDR, 1);
@@ -1817,10 +1658,12 @@ sh_css_flush(struct ia_css_acc_fw *fw)
my_css.flush(fw);
}
-/* Mapping sp threads. Currently, this is done when a stream is created and
+/*
+ * Mapping sp threads. Currently, this is done when a stream is created and
* pipelines are ready to be converted to sp pipelines. Be careful if you are
* doing it from stream_create since we could run out of sp threads due to
- * allocation on inactive pipelines. */
+ * allocation on inactive pipelines.
+ */
static int
map_sp_threads(struct ia_css_stream *stream, bool map)
{
@@ -1884,8 +1727,10 @@ map_sp_threads(struct ia_css_stream *stream, bool map)
return err;
}
-/* creates a host pipeline skeleton for all pipes in a stream. Called during
- * stream_create. */
+/*
+ * creates a host pipeline skeleton for all pipes in a stream. Called during
+ * stream_create.
+ */
static int
create_host_pipeline_structure(struct ia_css_stream *stream)
{
@@ -1984,8 +1829,10 @@ create_host_pipeline_structure(struct ia_css_stream *stream)
return err;
}
-/* creates a host pipeline for all pipes in a stream. Called during
- * stream_start. */
+/*
+ * creates a host pipeline for all pipes in a stream. Called during
+ * stream_start.
+ */
static int
create_host_pipeline(struct ia_css_stream *stream)
{
@@ -2005,15 +1852,25 @@ create_host_pipeline(struct ia_css_stream *stream)
main_pipe = stream->last_pipe;
pipe_id = main_pipe->mode;
- /* No continuous frame allocation for capture pipe. It uses the
- * "main" pipe's frames. */
+ /*
+ * No continuous frame allocation for capture pipe. It uses the
+ * "main" pipe's frames.
+ */
if ((pipe_id == IA_CSS_PIPE_ID_PREVIEW) ||
(pipe_id == IA_CSS_PIPE_ID_VIDEO)) {
- /* About pipe_id == IA_CSS_PIPE_ID_PREVIEW && stream->config.mode != IA_CSS_INPUT_MODE_MEMORY:
- * The original condition pipe_id == IA_CSS_PIPE_ID_PREVIEW is too strong. E.g. in SkyCam (with memory
- * based input frames) there is no continuous mode and thus no need for allocated continuous frames
- * This is not only for SkyCam but for all preview cases that use DDR based input frames. For this
- * reason the stream->config.mode != IA_CSS_INPUT_MODE_MEMORY has beed added.
+ /*
+ * About
+ * pipe_id == IA_CSS_PIPE_ID_PREVIEW &&
+ * stream->config.mode != IA_CSS_INPUT_MODE_MEMORY:
+ *
+ * The original condition pipe_id == IA_CSS_PIPE_ID_PREVIEW is
+ * too strong. E.g. in SkyCam (with memory based input frames)
+ * there is no continuous mode and thus no need for allocated
+ * continuous frames.
+ * This is not only for SkyCam but for all preview cases that
+ * use DDR based input frames. For this reason the
+ * stream->config.mode != IA_CSS_INPUT_MODE_MEMORY has beed
+ * added.
*/
if (stream->config.continuous ||
(pipe_id == IA_CSS_PIPE_ID_PREVIEW &&
@@ -2223,7 +2080,7 @@ pipe_generate_pipe_num(const struct ia_css_pipe *pipe,
/* Assign a new pipe_num .... search for empty place */
for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++) {
if (!my_css.all_pipes[i]) {
- /*position is reserved */
+ /* position is reserved */
my_css.all_pipes[i] = (struct ia_css_pipe *)pipe;
pipe_num = i;
break;
@@ -2340,8 +2197,10 @@ ia_css_pipe_destroy(struct ia_css_pipe *pipe)
switch (pipe->config.mode) {
case IA_CSS_PIPE_MODE_PREVIEW:
- /* need to take into account that this function is also called
- on the internal copy pipe */
+ /*
+ * need to take into account that this function is also called
+ * on the internal copy pipe
+ */
if (pipe->mode == IA_CSS_PIPE_ID_PREVIEW) {
ia_css_frame_free_multiple(NUM_CONTINUOUS_FRAMES,
pipe->continuous_frames);
@@ -2369,10 +2228,10 @@ ia_css_pipe_destroy(struct ia_css_pipe *pipe)
}
}
#ifndef ISP2401
- ia_css_frame_free_multiple(NUM_TNR_FRAMES,
+ ia_css_frame_free_multiple(NUM_VIDEO_TNR_FRAMES,
pipe->pipe_settings.video.tnr_frames);
#else
- ia_css_frame_free_multiple(NUM_TNR_FRAMES,
+ ia_css_frame_free_multiple(NUM_VIDEO_TNR_FRAMES,
pipe->pipe_settings.video.tnr_frames);
#endif
ia_css_frame_free_multiple(MAX_NUM_VIDEO_DELAY_FRAMES,
@@ -2391,8 +2250,10 @@ ia_css_pipe_destroy(struct ia_css_pipe *pipe)
break;
}
- sh_css_params_free_gdc_lut(pipe->scaler_pp_lut);
- pipe->scaler_pp_lut = mmgr_NULL;
+ if (pipe->scaler_pp_lut != mmgr_NULL) {
+ hmm_free(pipe->scaler_pp_lut);
+ pipe->scaler_pp_lut = mmgr_NULL;
+ }
my_css.active_pipes[ia_css_pipe_get_pipe_num(pipe)] = NULL;
sh_css_pipe_free_shading_table(pipe);
@@ -2413,15 +2274,13 @@ void
ia_css_uninit(void)
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_uninit() enter: void\n");
-#if WITH_PC_MONITORING
- sh_css_print("PC_MONITORING: %s() -- started\n", __func__);
- print_pc_histogram();
-#endif
sh_css_params_free_default_gdc_lut();
/* TODO: JB: implement decent check and handling of freeing mipi frames */
- //assert(ref_count_mipi_allocation == 0); //mipi frames are not freed
+ if (!mipi_is_free())
+ dev_warn(atomisp_dev, "mipi frames are not freed.\n");
+
/* cleanup generic data */
sh_css_params_uninit();
ia_css_refcount_uninit();
@@ -2465,16 +2324,13 @@ int ia_css_irq_translate(
if (status == hrt_isp_css_irq_status_error)
return -EINVAL;
-#if WITH_PC_MONITORING
- sh_css_print("PC_MONITORING: %s() irq = %d, sh_binary_running set to 0\n",
- __func__, irq);
- sh_binary_running = 0;
-#endif
switch (irq) {
case virq_sp:
- /* When SP goes to idle, info is available in the
- * event queue. */
+ /*
+ * When SP goes to idle, info is available in the
+ * event queue.
+ */
infos |= IA_CSS_IRQ_INFO_EVENTS_READY;
break;
case virq_isp:
@@ -2582,8 +2438,10 @@ sh_css_get_sw_interrupt_value(unsigned int irq)
return irq_value;
}
-/* configure and load the copy binary, the next binary is used to
- determine whether the copy binary needs to do left padding. */
+/*
+ * configure and load the copy binary, the next binary is used to
+ * determine whether the copy binary needs to do left padding.
+ */
static int load_copy_binary(
struct ia_css_pipe *pipe,
struct ia_css_binary *copy_binary,
@@ -2670,13 +2528,11 @@ alloc_continuous_frames(struct ia_css_pipe *pipe, bool init_time)
ref_info.padded_width = CEIL_MUL(ref_info.res.width, 2 * ISP_VEC_NELEMS);
#endif
-#if !defined(HAS_NO_PACKED_RAW_PIXELS)
if (pipe->stream->config.pack_raw_pixels) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"alloc_continuous_frames() IA_CSS_FRAME_FORMAT_RAW_PACKED\n");
ref_info.format = IA_CSS_FRAME_FORMAT_RAW_PACKED;
} else
-#endif
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"alloc_continuous_frames() IA_CSS_FRAME_FORMAT_RAW\n");
@@ -2780,7 +2636,8 @@ load_preview_binaries(struct ia_css_pipe *pipe)
if (err)
return err;
- /* Note: the current selection of vf_pp binary and
+ /*
+ * Note: the current selection of vf_pp binary and
* parameterization of the preview binary contains a few pieces
* of hardcoded knowledge. This needs to be cleaned up such that
* the binary selection becomes more generic.
@@ -2793,7 +2650,7 @@ load_preview_binaries(struct ia_css_pipe *pipe)
* The decision if the vf_pp binary is needed for YUV downscaling is
* made after the preview binary selection, since some preview binaries
* can perform the requested YUV downscaling.
- * */
+ */
need_vf_pp = pipe->config.enable_dz;
need_vf_pp |= pipe_out_info->format != IA_CSS_FRAME_FORMAT_YUV_LINE &&
!(pipe_out_info->format == IA_CSS_FRAME_FORMAT_NV12 ||
@@ -2805,11 +2662,12 @@ load_preview_binaries(struct ia_css_pipe *pipe)
prev_vf_info = pipe->vf_yuv_ds_input_info;
else
prev_vf_info = *pipe_out_info;
- /* If vf_pp is needed, then preview must output yuv_line.
+ /*
+ * If vf_pp is needed, then preview must output yuv_line.
* The exception is when vf_pp is manually disabled, that is only
* used in combination with a pipeline extension that requires
* yuv_line as input.
- * */
+ */
if (need_vf_pp)
ia_css_frame_info_set_format(&prev_vf_info,
IA_CSS_FRAME_FORMAT_YUV_LINE);
@@ -2827,22 +2685,12 @@ load_preview_binaries(struct ia_css_pipe *pipe)
if (err)
return err;
- if (IS_ISP2401) {
- /* The delay latency determines the number of invalid frames after
- * a stream is started. */
- pipe->num_invalid_frames = pipe->dvs_frame_delay;
- pipe->info.num_invalid_frames = pipe->num_invalid_frames;
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "load_preview_binaries() num_invalid_frames=%d dvs_frame_delay=%d\n",
- pipe->num_invalid_frames, pipe->dvs_frame_delay);
- }
-
/* The vf_pp binary is needed when (further) YUV downscaling is required */
need_vf_pp |= mycs->preview_binary.out_frame_info[0].res.width != pipe_out_info->res.width;
need_vf_pp |= mycs->preview_binary.out_frame_info[0].res.height != pipe_out_info->res.height;
- /* When vf_pp is needed, then the output format of the selected
+ /*
+ * When vf_pp is needed, then the output format of the selected
* preview binary must be yuv_line. If this is not the case,
* then the preview binary selection is done again.
*/
@@ -2886,12 +2734,14 @@ load_preview_binaries(struct ia_css_pipe *pipe)
}
#ifdef ISP2401
- /* When the input system is 2401, only the Direct Sensor Mode
+ /*
+ * When the input system is 2401, only the Direct Sensor Mode
* Offline Preview uses the ISP copy binary.
*/
need_isp_copy_binary = !online && sensor;
#else
- /* About pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY:
+ /*
+ * About pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY:
* This is typical the case with SkyCam (which has no input system) but it also applies to all cases
* where the driver chooses for memory based input frames. In these cases, a copy binary (which typical
* copies sensor data to DDR) does not have much use.
@@ -3245,8 +3095,10 @@ get_crop_columns_for_bayer_order(const struct ia_css_stream_config *config)
return 0;
}
-/* This function is to get the sum of all extra pixels in addition to the effective
- * input, it includes dvs envelop and filter run-in */
+/*
+ * This function is to get the sum of all extra pixels in addition to the effective
+ * input, it includes dvs envelop and filter run-in
+ */
static void get_pipe_extra_pixel(struct ia_css_pipe *pipe,
unsigned int *extra_row, unsigned int *extra_column)
{
@@ -3255,7 +3107,8 @@ static void get_pipe_extra_pixel(struct ia_css_pipe *pipe,
unsigned int i;
struct ia_css_resolution dvs_env = pipe->config.dvs_envelope;
- /* The dvs envelope info may not be correctly sent down via pipe config
+ /*
+ * The dvs envelope info may not be correctly sent down via pipe config
* The check is made and the correct value is populated in the binary info
* Use this value when computing crop, else excess lines may get trimmed
*/
@@ -3349,7 +3202,8 @@ ia_css_get_crop_offsets(
* 2. Require the special support for the online use cases.
*/
- /* ISP expects GRBG bayer order, we skip one line and/or one row
+ /*
+ * ISP expects GRBG bayer order, we skip one line and/or one row
* to correct in case the input bayer order is different.
*/
column += get_crop_columns_for_bayer_order(&pipe->stream->config);
@@ -3467,7 +3321,8 @@ static int create_host_video_pipeline(struct ia_css_pipe *pipe)
me->dvs_frame_delay = pipe->dvs_frame_delay;
#ifdef ISP2401
- /* When the input system is 2401, always enable 'in_frameinfo_memory'
+ /*
+ * When the input system is 2401, always enable 'in_frameinfo_memory'
* except for the following: online or continuous
*/
need_in_frameinfo_memory = !(pipe->stream->config.online ||
@@ -3523,7 +3378,8 @@ static int create_host_video_pipeline(struct ia_css_pipe *pipe)
in_frame = me->stages->args.out_frame[0];
} else if (pipe->stream->config.continuous) {
#ifdef ISP2401
- /* When continuous is enabled, configure in_frame with the
+ /*
+ * When continuous is enabled, configure in_frame with the
* last pipe, which is the copy pipe.
*/
in_frame = pipe->stream->last_pipe->continuous_frames[0];
@@ -3535,8 +3391,10 @@ static int create_host_video_pipeline(struct ia_css_pipe *pipe)
ia_css_pipe_util_set_output_frames(out_frames, 0,
need_yuv_pp ? NULL : out_frame);
- /* when the video binary supports a second output pin,
- it can directly produce the vf_frame. */
+ /*
+ * when the video binary supports a second output pin,
+ * it can directly produce the vf_frame.
+ */
if (need_vf_pp) {
ia_css_pipe_get_generic_stage_desc(&stage_desc, video_binary,
out_frames, in_frame, NULL);
@@ -3568,7 +3426,7 @@ static int create_host_video_pipeline(struct ia_css_pipe *pipe)
if (video_stage) {
int frm;
- for (frm = 0; frm < NUM_TNR_FRAMES; frm++) {
+ for (frm = 0; frm < NUM_VIDEO_TNR_FRAMES; frm++) {
video_stage->args.tnr_frames[frm] =
pipe->pipe_settings.video.tnr_frames[frm];
}
@@ -3708,7 +3566,8 @@ create_host_preview_pipeline(struct ia_css_pipe *pipe)
ia_css_pipeline_clean(me);
#ifdef ISP2401
- /* When the input system is 2401, always enable 'in_frameinfo_memory'
+ /*
+ * When the input system is 2401, always enable 'in_frameinfo_memory'
* except for the following:
* - Direct Sensor Mode Online Preview
* - Buffered Sensor Mode Online Preview
@@ -3757,7 +3616,8 @@ create_host_preview_pipeline(struct ia_css_pipe *pipe)
in_frame = me->stages->args.out_frame[0];
} else if (pipe->stream->config.continuous) {
#ifdef ISP2401
- /* When continuous is enabled, configure in_frame with the
+ /*
+ * When continuous is enabled, configure in_frame with the
* last pipe, which is the copy pipe.
*/
if (continuous || !online)
@@ -3837,8 +3697,7 @@ preview_start(struct ia_css_pipe *pipe)
struct ia_css_pipe *acc_pipe;
enum sh_css_pipe_config_override copy_ovrd;
enum ia_css_input_mode preview_pipe_input_mode;
- const struct ia_css_coordinate *coord = NULL;
- const struct ia_css_isp_parameters *params = NULL;
+ unsigned int thread_id;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
if ((!pipe) || (!pipe->stream) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW)) {
@@ -3862,22 +3721,13 @@ preview_start(struct ia_css_pipe *pipe)
}
send_raw_frames(pipe);
- {
- unsigned int thread_id;
-
- ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
- copy_ovrd = 1 << thread_id;
-
- if (pipe->stream->cont_capt) {
- ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(capture_pipe),
- &thread_id);
- copy_ovrd |= 1 << thread_id;
- }
- }
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ copy_ovrd = 1 << thread_id;
- if (IS_ISP2401) {
- coord = &pipe->config.internal_frame_origin_bqs_on_sctbl;
- params = pipe->stream->isp_params_configs;
+ if (pipe->stream->cont_capt) {
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(capture_pipe),
+ &thread_id);
+ copy_ovrd |= 1 << thread_id;
}
/* Construct and load the copy pipe */
@@ -3892,12 +3742,12 @@ preview_start(struct ia_css_pipe *pipe)
pipe->stream->config.mode,
&pipe->stream->config.metadata_config,
&pipe->stream->info.metadata_info,
- pipe->stream->config.source.port.port,
- coord,
- params);
+ pipe->stream->config.source.port.port);
- /* make the preview pipe start with mem mode input, copy handles
- the actual mode */
+ /*
+ * make the preview pipe start with mem mode input, copy handles
+ * the actual mode
+ */
preview_pipe_input_mode = IA_CSS_INPUT_MODE_MEMORY;
}
@@ -3915,9 +3765,7 @@ preview_start(struct ia_css_pipe *pipe)
IA_CSS_INPUT_MODE_MEMORY,
&pipe->stream->config.metadata_config,
&pipe->stream->info.metadata_info,
- (enum mipi_port_id)0,
- coord,
- params);
+ (enum mipi_port_id)0);
}
if (acc_pipe) {
@@ -3933,9 +3781,7 @@ preview_start(struct ia_css_pipe *pipe)
IA_CSS_INPUT_MODE_MEMORY,
NULL,
NULL,
- (enum mipi_port_id)0,
- coord,
- params);
+ (enum mipi_port_id)0);
}
start_pipe(pipe, copy_ovrd, preview_pipe_input_mode);
@@ -3968,38 +3814,7 @@ ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
}
buf_type = buffer->type;
- /* following code will be enabled when IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME
- is removed */
-#if 0
- if (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME) {
- bool found_pipe = false;
-
- for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
- if ((buffer->data.frame->info.res.width == pipe->output_info[i].res.width) &&
- (buffer->data.frame->info.res.height == pipe->output_info[i].res.height)) {
- buf_type += i;
- found_pipe = true;
- break;
- }
- }
- if (!found_pipe)
- return -EINVAL;
- }
- if (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME) {
- bool found_pipe = false;
- for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
- if ((buffer->data.frame->info.res.width == pipe->vf_output_info[i].res.width) &&
- (buffer->data.frame->info.res.height == pipe->vf_output_info[i].res.height)) {
- buf_type += i;
- found_pipe = true;
- break;
- }
- }
- if (!found_pipe)
- return -EINVAL;
- }
-#endif
pipe_id = pipe->mode;
IA_CSS_LOG("pipe_id=%d, buf_type=%d", pipe_id, buf_type);
@@ -4086,15 +3901,6 @@ ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
"ia_css_pipe_enqueue_buffer() buf_type=%d, data(DDR address)=0x%x\n",
buf_type, buffer->data.frame->data);
-#if CONFIG_ON_FRAME_ENQUEUE()
- return_err = set_config_on_frame_enqueue(
- &buffer->data.frame->info,
- &ddr_buffer.payload.frame);
- if (return_err) {
- IA_CSS_LEAVE_ERR(return_err);
- return return_err;
- }
-#endif
}
/* start of test for using rmgr for acq/rel memory */
@@ -4124,8 +3930,10 @@ ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
}
for (stage = pipeline->stages; stage; stage = stage->next) {
- /* The SP will read the params
- after it got empty 3a and dis */
+ /*
+ * The SP will read the params after it got
+ * empty 3a and dis
+ */
if (STATS_ENABLED(stage)) {
/* there is a stage that needs it */
return_err = ia_css_bufq_enqueue_buffer(thread_id,
@@ -4142,14 +3950,12 @@ ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
return_err = ia_css_bufq_enqueue_buffer(thread_id,
queue_id,
(uint32_t)h_vbuf->vptr);
-#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
if (!return_err &&
buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME) {
IA_CSS_LOG("pfp: enqueued OF %d to q %d thread %d",
ddr_buffer.payload.frame.frame_data,
queue_id, thread_id);
}
-#endif
}
if (!return_err) {
@@ -4191,7 +3997,7 @@ ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
/*
* TODO: Free up the hmm memory space.
- */
+ */
int
ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
struct ia_css_buffer *buffer)
@@ -4258,7 +4064,8 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
hmm_buffer_record = sh_css_hmm_buffer_record_validate(
ddr_buffer_addr, buf_type);
if (hmm_buffer_record) {
- /* valid hmm_buffer_record found. Save the kernel_ptr
+ /*
+ * valid hmm_buffer_record found. Save the kernel_ptr
* for validation after performing hmm_load. The
* vbuf handle and buffer_record can be released.
*/
@@ -4276,7 +4083,8 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
&ddr_buffer,
sizeof(struct sh_css_hmm_buffer));
- /* if the kernel_ptr is 0 or an invalid, return an error.
+ /*
+ * if the kernel_ptr is 0 or an invalid, return an error.
* do not access the buffer via the kernal_ptr.
*/
if ((ddr_buffer.kernel_ptr == 0) ||
@@ -4290,8 +4098,11 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
}
if (ddr_buffer.kernel_ptr != 0) {
- /* buffer->exp_id : all instances to be removed later once the driver change
- * is completed. See patch #5758 for reference */
+ /*
+ * buffer->exp_id : all instances to be removed later
+ * once the driver change is completed. See patch #5758
+ * for reference
+ */
buffer->exp_id = 0;
buffer->driver_cookie = ddr_buffer.cookie_ptr;
buffer->timing_data = ddr_buffer.timing_data;
@@ -4307,8 +4118,10 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
case IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME:
if (pipe && pipe->stop_requested) {
#if !defined(ISP2401)
- /* free mipi frames only for old input system
- * for 2401 it is done in ia_css_stream_destroy call
+ /*
+ * free mipi frames only for old input
+ * system for 2401 it is done in
+ * ia_css_stream_destroy call
*/
return_err = free_mipi_frames(pipe);
if (return_err) {
@@ -4345,12 +4158,10 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
sh_css_sp_get_binary_copy_size();
#endif
}
-#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
if (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME) {
IA_CSS_LOG("pfp: dequeued OF %d with config id %d thread %d",
frame->data, frame->isp_config_id, thread_id);
}
-#endif
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_pipe_dequeue_buffer() buf_type=%d, data(DDR address)=0x%x\n",
@@ -4419,45 +4230,41 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
* 4) "enum ia_css_event_type convert_event_sp_to_host_domain" (sh_css.c)
*/
static enum ia_css_event_type convert_event_sp_to_host_domain[] = {
- IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE, /** Output frame ready. */
- IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE, /** Second output frame ready. */
- IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE, /** Viewfinder Output frame ready. */
- IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE, /** Second viewfinder Output frame ready. */
- IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE, /** Indication that 3A statistics are available. */
- IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE, /** Indication that DIS statistics are available. */
- IA_CSS_EVENT_TYPE_PIPELINE_DONE, /** Pipeline Done event, sent after last pipeline stage. */
- IA_CSS_EVENT_TYPE_FRAME_TAGGED, /** Frame tagged. */
- IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE, /** Input frame ready. */
- IA_CSS_EVENT_TYPE_METADATA_DONE, /** Metadata ready. */
- IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE, /** Indication that LACE statistics are available. */
- IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE, /** Extension stage executed. */
- IA_CSS_EVENT_TYPE_TIMER, /** Timing measurement data. */
- IA_CSS_EVENT_TYPE_PORT_EOF, /** End Of Frame event, sent when in buffered sensor mode. */
- IA_CSS_EVENT_TYPE_FW_WARNING, /** Performance warning encountered by FW */
- IA_CSS_EVENT_TYPE_FW_ASSERT, /** Assertion hit by FW */
+ IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE, /* Output frame ready. */
+ IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE, /* Second output frame ready. */
+ IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE, /* Viewfinder Output frame ready. */
+ IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE, /* Second viewfinder Output frame ready. */
+ IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE, /* Indication that 3A statistics are available. */
+ IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE, /* Indication that DIS statistics are available. */
+ IA_CSS_EVENT_TYPE_PIPELINE_DONE, /* Pipeline Done event, sent after last pipeline stage. */
+ IA_CSS_EVENT_TYPE_FRAME_TAGGED, /* Frame tagged. */
+ IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE, /* Input frame ready. */
+ IA_CSS_EVENT_TYPE_METADATA_DONE, /* Metadata ready. */
+ IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE, /* Indication that LACE statistics are available. */
+ IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE, /* Extension stage executed. */
+ IA_CSS_EVENT_TYPE_TIMER, /* Timing measurement data. */
+ IA_CSS_EVENT_TYPE_PORT_EOF, /* End Of Frame event, sent when in buffered sensor mode. */
+ IA_CSS_EVENT_TYPE_FW_WARNING, /* Performance warning encountered by FW */
+ IA_CSS_EVENT_TYPE_FW_ASSERT, /* Assertion hit by FW */
0, /* error if sp passes SH_CSS_SP_EVENT_NR_OF_TYPES as a valid event. */
};
int
-ia_css_dequeue_event(struct ia_css_event *event)
-{
- return ia_css_dequeue_psys_event(event);
-}
-
-int
ia_css_dequeue_psys_event(struct ia_css_event *event)
{
enum ia_css_pipe_id pipe_id = 0;
u8 payload[4] = {0, 0, 0, 0};
int ret_err;
- /*TODO:
+ /*
+ * TODO:
* a) use generic decoding function , same as the one used by sp.
* b) group decode and dequeue into eventQueue module
*
* We skip the IA_CSS_ENTER logging call
* to avoid flooding the logs when the host application
- * uses polling. */
+ * uses polling.
+ */
if (!event)
return -EINVAL;
@@ -4476,9 +4283,11 @@ ia_css_dequeue_psys_event(struct ia_css_event *event)
ia_css_bufq_enqueue_psys_event(
IA_CSS_PSYS_SW_EVENT_EVENT_DEQUEUED, 0, 0, 0);
- /* Events are decoded into 4 bytes of payload, the first byte
+ /*
+ * Events are decoded into 4 bytes of payload, the first byte
* contains the sp event type. This is converted to a host enum.
- * TODO: can this enum conversion be eliminated */
+ * TODO: can this enum conversion be eliminated
+ */
event->type = convert_event_sp_to_host_domain[payload[0]];
/* Some sane default values since not all events use all fields. */
event->pipe = NULL;
@@ -4491,7 +4300,10 @@ ia_css_dequeue_psys_event(struct ia_css_event *event)
event->timer_subcode = 0;
if (event->type == IA_CSS_EVENT_TYPE_TIMER) {
- /* timer event ??? get the 2nd event and decode the data into the event struct */
+ /*
+ * timer event ??? get the 2nd event and decode the data
+ * into the event struct
+ */
u32 tmp_data;
/* 1st event: LSB 16-bit timer data and code */
event->timer_data = ((payload[1] & 0xFF) | ((payload[3] & 0xFF) << 8));
@@ -4500,8 +4312,10 @@ ia_css_dequeue_psys_event(struct ia_css_event *event)
ret_err = ia_css_bufq_dequeue_psys_event(payload);
if (ret_err) {
/* no 2nd event ??? an error */
- /* Putting IA_CSS_ERROR is resulting in failures in
- * Merrifield smoke testing */
+ /*
+ * Putting IA_CSS_ERROR is resulting in failures in
+ * Merrifield smoke testing
+ */
IA_CSS_WARNING("Timer: Error de-queuing the 2nd TIMER event!!!\n");
return ret_err;
}
@@ -4515,11 +4329,15 @@ ia_css_dequeue_psys_event(struct ia_css_event *event)
event->timer_data |= (tmp_data << 16);
event->timer_subcode = payload[2];
} else {
- /* It's a non timer event. So clear first half of the timer event data.
- * If the second part of the TIMER event is not received, we discard
- * the first half of the timer data and process the non timer event without
- * affecting the flow. So the non timer event falls through
- * the code. */
+ /*
+ * It's a non timer event. So clear first half of the
+ * timer event data.
+ * If the second part of the TIMER event is not
+ * received, we discard the first half of the timer
+ * data and process the non timer event without
+ * affecting the flow. So the non timer event falls
+ * through the code.
+ */
event->timer_data = 0;
event->timer_code = 0;
event->timer_subcode = 0;
@@ -4540,9 +4358,11 @@ ia_css_dequeue_psys_event(struct ia_css_event *event)
event->fw_assert_line_no = (payload[2] << 8) + payload[3];
/* payload[2] is line_no>>8, payload[3] is line_no&0xff */
} else if (event->type != IA_CSS_EVENT_TYPE_TIMER) {
- /* pipe related events.
+ /*
+ * pipe related events.
* payload[1] contains the pipe_num,
- * payload[2] contains the pipe_id. These are different. */
+ * payload[2] contains the pipe_id. These are different.
+ */
event->pipe = find_pipe_by_num(payload[1]);
pipe_id = (enum ia_css_pipe_id)payload[2];
/* Check to see if pipe still exists */
@@ -4594,9 +4414,11 @@ ia_css_dequeue_isys_event(struct ia_css_event *event)
u8 payload[4] = {0, 0, 0, 0};
int err = 0;
- /* We skip the IA_CSS_ENTER logging call
+ /*
+ * We skip the IA_CSS_ENTER logging call
* to avoid flooding the logs when the host application
- * uses polling. */
+ * uses polling.
+ */
if (!event)
return -EINVAL;
@@ -4720,7 +4542,8 @@ sh_css_pipe_start(struct ia_css_stream *stream)
return err;
}
- /* Force ISP parameter calculation after a mode change
+ /*
+ * Force ISP parameter calculation after a mode change
* Acceleration API examples pass NULL for stream but they
* don't use ISP parameters anyway. So this should be okay.
* The SP binary (jpeg) copy does not use any parameters.
@@ -4889,185 +4712,6 @@ ia_css_stream_get_buffer_depth(struct ia_css_stream *stream,
return 0;
}
-/*
- * @brief Stop all "ia_css_pipe" instances in the target
- * "ia_css_stream" instance.
- *
- * Refer to "Local prototypes" for more info.
- */
-/* ISP2401 */
-static int
-sh_css_pipes_stop(struct ia_css_stream *stream)
-{
- int err = 0;
- struct ia_css_pipe *main_pipe;
- enum ia_css_pipe_id main_pipe_id;
- int i;
-
- if (!stream) {
- IA_CSS_LOG("stream does NOT exist!");
- err = -EINVAL;
- goto ERR;
- }
-
- main_pipe = stream->last_pipe;
- if (!main_pipe) {
- IA_CSS_LOG("main_pipe does NOT exist!");
- err = -EINVAL;
- goto ERR;
- }
-
- main_pipe_id = main_pipe->mode;
- IA_CSS_ENTER_PRIVATE("main_pipe_id=%d", main_pipe_id);
-
- /*
- * Stop all "ia_css_pipe" instances in this target
- * "ia_css_stream" instance.
- */
- for (i = 0; i < stream->num_pipes; i++) {
- /* send the "stop" request to the "ia_css_pipe" instance */
- IA_CSS_LOG("Send the stop-request to the pipe: pipe_id=%d",
- stream->pipes[i]->pipeline.pipe_id);
- err = ia_css_pipeline_request_stop(&stream->pipes[i]->pipeline);
-
- /*
- * Exit this loop if "ia_css_pipeline_request_stop()"
- * returns the error code.
- *
- * The error code would be generated in the following
- * two cases:
- * (1) The Scalar Processor has already been stopped.
- * (2) The "Host->SP" event queue is full.
- *
- * As the convention of using CSS API 2.0/2.1, such CSS
- * error code would be propogated from the CSS-internal
- * API returned value to the CSS API returned value. Then
- * the CSS driver should capture these error code and
- * handle it in the driver exception handling mechanism.
- */
- if (err)
- goto ERR;
- }
-
- /*
- * In the CSS firmware use scenario "Continuous Preview"
- * as well as "Continuous Video", the "ia_css_pipe" instance
- * "Copy Pipe" is activated. This "Copy Pipe" is private to
- * the CSS firmware so that it is not listed in the target
- * "ia_css_stream" instance.
- *
- * We need to stop this "Copy Pipe", as well.
- */
- if (main_pipe->stream->config.continuous) {
- struct ia_css_pipe *copy_pipe = NULL;
-
- /* get the reference to "Copy Pipe" */
- if (main_pipe_id == IA_CSS_PIPE_ID_PREVIEW)
- copy_pipe = main_pipe->pipe_settings.preview.copy_pipe;
- else if (main_pipe_id == IA_CSS_PIPE_ID_VIDEO)
- copy_pipe = main_pipe->pipe_settings.video.copy_pipe;
-
- /* return the error code if "Copy Pipe" does NOT exist */
- if (!copy_pipe) {
- IA_CSS_LOG("Copy Pipe does NOT exist!");
- err = -EINVAL;
- goto ERR;
- }
-
- /* send the "stop" request to "Copy Pipe" */
- IA_CSS_LOG("Send the stop-request to the pipe: pipe_id=%d",
- copy_pipe->pipeline.pipe_id);
- err = ia_css_pipeline_request_stop(&copy_pipe->pipeline);
- }
-
-ERR:
- IA_CSS_LEAVE_ERR_PRIVATE(err);
- return err;
-}
-
-/*
- * @brief Check if all "ia_css_pipe" instances in the target
- * "ia_css_stream" instance have stopped.
- *
- * Refer to "Local prototypes" for more info.
- */
-/* ISP2401 */
-static bool
-sh_css_pipes_have_stopped(struct ia_css_stream *stream)
-{
- bool rval = true;
-
- struct ia_css_pipe *main_pipe;
- enum ia_css_pipe_id main_pipe_id;
-
- int i;
-
- if (!stream) {
- IA_CSS_LOG("stream does NOT exist!");
- rval = false;
- goto RET;
- }
-
- main_pipe = stream->last_pipe;
-
- if (!main_pipe) {
- IA_CSS_LOG("main_pipe does NOT exist!");
- rval = false;
- goto RET;
- }
-
- main_pipe_id = main_pipe->mode;
- IA_CSS_ENTER_PRIVATE("main_pipe_id=%d", main_pipe_id);
-
- /*
- * Check if every "ia_css_pipe" instance in this target
- * "ia_css_stream" instance has stopped.
- */
- for (i = 0; i < stream->num_pipes; i++) {
- rval = rval && ia_css_pipeline_has_stopped(&stream->pipes[i]->pipeline);
- IA_CSS_LOG("Pipe has stopped: pipe_id=%d, stopped=%d",
- stream->pipes[i]->pipeline.pipe_id,
- rval);
- }
-
- /*
- * In the CSS firmware use scenario "Continuous Preview"
- * as well as "Continuous Video", the "ia_css_pipe" instance
- * "Copy Pipe" is activated. This "Copy Pipe" is private to
- * the CSS firmware so that it is not listed in the target
- * "ia_css_stream" instance.
- *
- * We need to check if this "Copy Pipe" has stopped, as well.
- */
- if (main_pipe->stream->config.continuous) {
- struct ia_css_pipe *copy_pipe = NULL;
-
- /* get the reference to "Copy Pipe" */
- if (main_pipe_id == IA_CSS_PIPE_ID_PREVIEW)
- copy_pipe = main_pipe->pipe_settings.preview.copy_pipe;
- else if (main_pipe_id == IA_CSS_PIPE_ID_VIDEO)
- copy_pipe = main_pipe->pipe_settings.video.copy_pipe;
-
- /* return if "Copy Pipe" does NOT exist */
- if (!copy_pipe) {
- IA_CSS_LOG("Copy Pipe does NOT exist!");
-
- rval = false;
- goto RET;
- }
-
- /* check if "Copy Pipe" has stopped or not */
- rval = rval && ia_css_pipeline_has_stopped(&copy_pipe->pipeline);
- IA_CSS_LOG("Pipe has stopped: pipe_id=%d, stopped=%d",
- copy_pipe->pipeline.pipe_id,
- rval);
- }
-
-RET:
- IA_CSS_LEAVE_PRIVATE("rval=%d", rval);
- return rval;
-}
-
#if !defined(ISP2401)
unsigned int
sh_css_get_mipi_sizes_for_check(const unsigned int port, const unsigned int idx)
@@ -5137,11 +4781,13 @@ sh_css_pipe_get_shading_info(struct ia_css_pipe *pipe,
(const struct ia_css_stream_config *)&pipe->stream->config,
shading_info, pipe_config);
- /* Other function calls can be added here when other shading correction types will be added
- * in the future.
+ /*
+ * Other function calls can be added here when other shading
+ * correction types will be added in the future.
*/
} else {
- /* When the pipe does not have a binary which has the shading
+ /*
+ * When the pipe does not have a binary which has the shading
* correction, this function does not need to fill the shading
* information. It is not a error case, and then
* this function should return 0.
@@ -5180,6 +4826,8 @@ sh_css_pipe_get_grid_info(struct ia_css_pipe *pipe,
ia_css_binary_dvs_stat_grid_info(binary, info, pipe);
} else {
memset(&info->dvs_grid, 0, sizeof(info->dvs_grid));
+ memset(&info->dvs_grid.dvs_stat_grid_info, 0,
+ sizeof(info->dvs_grid.dvs_stat_grid_info));
}
if (binary) {
@@ -5254,7 +4902,8 @@ static int load_video_binaries(struct ia_css_pipe *pipe)
IA_CSS_ENTER_PRIVATE("");
assert(pipe);
assert(pipe->mode == IA_CSS_PIPE_ID_VIDEO);
- /* we only test the video_binary because offline video doesn't need a
+ /*
+ * we only test the video_binary because offline video doesn't need a
* vf_pp binary and online does not (always use) the copy_binary.
* All are always reset at the same time anyway.
*/
@@ -5367,7 +5016,8 @@ static int load_video_binaries(struct ia_css_pipe *pipe)
if (err)
return err;
- /* In the case where video_vf_info is not NULL, this allows
+ /*
+ * In the case where video_vf_info is not NULL, this allows
* us to find a potential video library with desired vf format.
* If success, no vf_pp binary is needed.
* If failed, we will look up video binary with YUV_LINE vf format
@@ -5382,17 +5032,23 @@ static int load_video_binaries(struct ia_css_pipe *pipe)
else
return err;
} else if (video_vf_info) {
- /* The first video binary lookup is successful, but we may
- * still need vf_pp binary based on additiona check */
+ /*
+ * The first video binary lookup is successful, but we
+ * may still need vf_pp binary based on additional check
+ */
num_output_pins = mycs->video_binary.info->num_output_pins;
vf_ds_log2 = mycs->video_binary.vf_downscale_log2;
- /* If the binary has dual output pins, we need vf_pp if the resolution
- * is different. */
+ /*
+ * If the binary has dual output pins, we need vf_pp
+ * if the resolution is different.
+ */
need_vf_pp |= ((num_output_pins == 2) && vf_res_different_than_output);
- /* If the binary has single output pin, we need vf_pp if additional
- * scaling is needed for vf */
+ /*
+ * If the binary has single output pin, we need vf_pp
+ * if additional scaling is needed for vf
+ */
need_vf_pp |= ((num_output_pins == 1) &&
((video_vf_info->res.width << vf_ds_log2 != pipe_out_info->res.width) ||
(video_vf_info->res.height << vf_ds_log2 != pipe_out_info->res.height)));
@@ -5422,19 +5078,25 @@ static int load_video_binaries(struct ia_css_pipe *pipe)
}
}
- /* If a video binary does not use a ref_frame, we set the frame delay
- * to 0. This is the case for the 1-stage low-power video binary. */
+ /*
+ * If a video binary does not use a ref_frame, we set the frame delay
+ * to 0. This is the case for the 1-stage low-power video binary.
+ */
if (!mycs->video_binary.info->sp.enable.ref_frame)
pipe->dvs_frame_delay = 0;
- /* The delay latency determines the number of invalid frames after
- * a stream is started. */
+ /*
+ * The delay latency determines the number of invalid frames after
+ * a stream is started.
+ */
pipe->num_invalid_frames = pipe->dvs_frame_delay;
pipe->info.num_invalid_frames = pipe->num_invalid_frames;
- /* Viewfinder frames also decrement num_invalid_frames. If the pipe
+ /*
+ * Viewfinder frames also decrement num_invalid_frames. If the pipe
* outputs a viewfinder output, then we need double the number of
- * invalid frames */
+ * invalid frames
+ */
if (video_vf_info)
pipe->num_invalid_frames *= 2;
@@ -5446,7 +5108,8 @@ static int load_video_binaries(struct ia_css_pipe *pipe)
#if !defined(ISP2401)
/* Copy */
if (!online && !continuous) {
- /* TODO: what exactly needs doing, prepend the copy binary to
+ /*
+ * TODO: what exactly needs doing, prepend the copy binary to
* video base this only on !online?
*/
err = load_copy_binary(pipe,
@@ -5459,7 +5122,6 @@ static int load_video_binaries(struct ia_css_pipe *pipe)
(void)continuous;
#endif
-#if !defined(HAS_OUTPUT_SYSTEM)
if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0] && need_vf_pp) {
struct ia_css_binary_descr vf_pp_descr;
@@ -5469,8 +5131,11 @@ static int load_video_binaries(struct ia_css_pipe *pipe)
&mycs->video_binary.vf_frame_info,
pipe_vf_out_info);
} else {
- /* output from main binary is not yuv line. currently this is
- * possible only when bci is enabled on vfpp output */
+ /*
+ * output from main binary is not yuv line. currently
+ * this is possible only when bci is enabled on vfpp
+ * output
+ */
assert(pipe->config.enable_vfpp_bci);
ia_css_pipe_get_yuvscaler_binarydesc(pipe, &vf_pp_descr,
&mycs->video_binary.vf_frame_info,
@@ -5482,7 +5147,6 @@ static int load_video_binaries(struct ia_css_pipe *pipe)
if (err)
return err;
}
-#endif
err = allocate_delay_frames(pipe);
@@ -5490,35 +5154,10 @@ static int load_video_binaries(struct ia_css_pipe *pipe)
return err;
if (mycs->video_binary.info->sp.enable.block_output) {
- unsigned int tnr_width;
- unsigned int tnr_height;
-
tnr_info = mycs->video_binary.out_frame_info[0];
- if (IS_ISP2401) {
- /* Select resolution for TNR. If
- * output_system_in_resolution(GDC_out_resolution) is
- * being used, then select that as it will also be in resolution for
- * TNR. At present, it only make sense for Skycam */
- if (pipe->config.output_system_in_res.width &&
- pipe->config.output_system_in_res.height) {
- tnr_width = pipe->config.output_system_in_res.width;
- tnr_height = pipe->config.output_system_in_res.height;
- } else {
- tnr_width = tnr_info.res.width;
- tnr_height = tnr_info.res.height;
- }
-
- /* Make tnr reference buffers output block width(in pix) align */
- tnr_info.res.width = CEIL_MUL(tnr_width,
- (mycs->video_binary.info->sp.block.block_width * ISP_NWAY));
- tnr_info.padded_width = tnr_info.res.width;
- } else {
- tnr_height = tnr_info.res.height;
- }
-
/* Make tnr reference buffers output block height align */
- tnr_info.res.height = CEIL_MUL(tnr_height,
+ tnr_info.res.height = CEIL_MUL(tnr_info.res.height,
mycs->video_binary.info->sp.block.output_block_height);
} else {
tnr_info = mycs->video_binary.internal_frame_info;
@@ -5526,7 +5165,7 @@ static int load_video_binaries(struct ia_css_pipe *pipe)
tnr_info.format = IA_CSS_FRAME_FORMAT_YUV_LINE;
tnr_info.raw_bit_depth = SH_CSS_TNR_BIT_DEPTH;
- for (i = 0; i < NUM_TNR_FRAMES; i++) {
+ for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++) {
if (mycs->tnr_frames[i]) {
ia_css_frame_free(mycs->tnr_frames[i]);
mycs->tnr_frames[i] = NULL;
@@ -5574,9 +5213,7 @@ static int video_start(struct ia_css_pipe *pipe)
struct ia_css_pipe *copy_pipe, *capture_pipe;
enum sh_css_pipe_config_override copy_ovrd;
enum ia_css_input_mode video_pipe_input_mode;
-
- const struct ia_css_coordinate *coord = NULL;
- const struct ia_css_isp_parameters *params = NULL;
+ unsigned int thread_id;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_VIDEO)) {
@@ -5598,22 +5235,14 @@ static int video_start(struct ia_css_pipe *pipe)
return err;
send_raw_frames(pipe);
- {
- unsigned int thread_id;
-
- ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
- copy_ovrd = 1 << thread_id;
- if (pipe->stream->cont_capt) {
- ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(capture_pipe),
- &thread_id);
- copy_ovrd |= 1 << thread_id;
- }
- }
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ copy_ovrd = 1 << thread_id;
- if (IS_ISP2401) {
- coord = &pipe->config.internal_frame_origin_bqs_on_sctbl;
- params = pipe->stream->isp_params_configs;
+ if (pipe->stream->cont_capt) {
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(capture_pipe),
+ &thread_id);
+ copy_ovrd |= 1 << thread_id;
}
/* Construct and load the copy pipe */
@@ -5628,12 +5257,12 @@ static int video_start(struct ia_css_pipe *pipe)
pipe->stream->config.mode,
&pipe->stream->config.metadata_config,
&pipe->stream->info.metadata_info,
- pipe->stream->config.source.port.port,
- coord,
- params);
+ pipe->stream->config.source.port.port);
- /* make the video pipe start with mem mode input, copy handles
- the actual mode */
+ /*
+ * make the video pipe start with mem mode input, copy handles
+ * the actual mode
+ */
video_pipe_input_mode = IA_CSS_INPUT_MODE_MEMORY;
}
@@ -5651,9 +5280,7 @@ static int video_start(struct ia_css_pipe *pipe)
IA_CSS_INPUT_MODE_MEMORY,
&pipe->stream->config.metadata_config,
&pipe->stream->info.metadata_info,
- (enum mipi_port_id)0,
- coord,
- params);
+ (enum mipi_port_id)0);
}
start_pipe(pipe, copy_ovrd, video_pipe_input_mode);
@@ -5763,12 +5390,6 @@ static bool need_capture_pp(
assert(pipe);
assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE);
- if (IS_ISP2401) {
- /* ldc and capture_pp are not supported in the same pipeline */
- if (need_capt_ldc(pipe))
- return false;
- }
-
/* determine whether we need to use the capture_pp binary.
* This is needed for:
* 1. XNR or
@@ -5891,9 +5512,11 @@ static int load_primary_binaries(
}
need_pp = need_capture_pp(pipe);
- /* we use the vf output info to get the primary/capture_pp binary
- configured for vf_veceven. It will select the closest downscaling
- factor. */
+ /*
+ * we use the vf output info to get the primary/capture_pp binary
+ * configured for vf_veceven. It will select the closest downscaling
+ * factor.
+ */
vf_info = *pipe_vf_out_info;
/*
@@ -5905,13 +5528,15 @@ static int load_primary_binaries(
* required. This should not be considered as a clean solution.
* Proper investigation should be done to come up with the clean
* solution.
- * */
+ */
ia_css_frame_info_set_format(&vf_info, IA_CSS_FRAME_FORMAT_YUV_LINE);
- /* TODO: All this yuv_scaler and capturepp calculation logic
+ /*
+ * TODO: All this yuv_scaler and capturepp calculation logic
* can be shared later. Capture_pp is also a yuv_scale binary
* with extra XNR funcionality. Therefore, it can be made as the
- * first step of the cascade. */
+ * first step of the cascade.
+ */
capt_pp_out_info = pipe->out_yuv_ds_input_info;
capt_pp_out_info.format = IA_CSS_FRAME_FORMAT_YUV420;
capt_pp_out_info.res.width /= MAX_PREFERRED_YUV_DS_PER_STEP;
@@ -5973,33 +5598,13 @@ static int load_primary_binaries(
/* TODO Do we disable ldc for skycam */
need_ldc = need_capt_ldc(pipe);
- if (IS_ISP2401 && need_ldc) {
- /* ldc and capt_pp are not supported in the same pipeline */
- struct ia_css_binary_descr capt_ldc_descr;
-
- ia_css_pipe_get_ldc_binarydesc(pipe,
- &capt_ldc_descr, &prim_out_info,
- &capt_pp_out_info);
-
- err = ia_css_binary_find(&capt_ldc_descr,
- &mycs->capture_ldc_binary);
- if (err) {
- IA_CSS_LEAVE_ERR_PRIVATE(err);
- return err;
- }
- need_pp = false;
- need_ldc = false;
- }
/* we build up the pipeline starting at the end */
/* Capture post-processing */
if (need_pp) {
struct ia_css_binary_descr capture_pp_descr;
- if (!IS_ISP2401)
- capt_pp_in_info = need_ldc ? &capt_ldc_out_info : &prim_out_info;
- else
- capt_pp_in_info = &prim_out_info;
+ capt_pp_in_info = need_ldc ? &capt_ldc_out_info : &prim_out_info;
ia_css_pipe_get_capturepp_binarydesc(pipe,
&capture_pp_descr,
@@ -6057,15 +5662,15 @@ static int load_primary_binaries(
vf_pp_in_info = &mycs->primary_binary[mycs->num_primary_stage - 1].vf_frame_info;
/*
- * WARNING: The #if def flag has been added below as a
- * temporary solution to solve the problem of enabling the
- * view finder in a single binary in a capture flow. The
- * vf-pp stage has been removed for Skycam in the solution
- * provided. The vf-pp stage should be re-introduced when
- * required. Thisshould not be considered as a clean solution.
- * Proper * investigation should be done to come up with the clean
- * solution.
- * */
+ * WARNING: The #if def flag has been added below as a
+ * temporary solution to solve the problem of enabling the
+ * view finder in a single binary in a capture flow. The
+ * vf-pp stage has been removed for Skycam in the solution
+ * provided. The vf-pp stage should be re-introduced when
+ * required. Thisshould not be considered as a clean solution.
+ * Proper * investigation should be done to come up with the clean
+ * solution.
+ */
if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
struct ia_css_binary_descr vf_pp_descr;
@@ -6083,9 +5688,10 @@ static int load_primary_binaries(
return err;
#ifdef ISP2401
- /* When the input system is 2401, only the Direct Sensor Mode
- * Offline Capture uses the ISP copy binary.
- */
+ /*
+ * When the input system is 2401, only the Direct Sensor Mode
+ * Offline Capture uses the ISP copy binary.
+ */
need_isp_copy_binary = !online && sensor;
#else
need_isp_copy_binary = !online && !continuous && !memory;
@@ -6139,17 +5745,19 @@ allocate_delay_frames(struct ia_css_pipe *pipe)
struct ia_css_video_settings *mycs_video = &pipe->pipe_settings.video;
ref_info = mycs_video->video_binary.internal_frame_info;
- /*The ref frame expects
- * 1. Y plane
- * 2. UV plane with line interleaving, like below
- * UUUUUU(width/2 times) VVVVVVVV..(width/2 times)
- *
- * This format is not YUV420(which has Y, U and V planes).
- * Its closer to NV12, except that the UV plane has UV
- * interleaving, like UVUVUVUVUVUVUVUVU...
- *
- * TODO: make this ref_frame format as a separate frame format
- */
+
+ /*
+ * The ref frame expects
+ * 1. Y plane
+ * 2. UV plane with line interleaving, like below
+ * UUUUUU(width/2 times) VVVVVVVV..(width/2 times)
+ *
+ * This format is not YUV420(which has Y, U and V planes).
+ * Its closer to NV12, except that the UV plane has UV
+ * interleaving, like UVUVUVUVUVUVUVUVU...
+ *
+ * TODO: make this ref_frame format as a separate frame format
+ */
ref_info.format = IA_CSS_FRAME_FORMAT_NV12;
delay_frames = mycs_video->delay_frames;
}
@@ -6158,17 +5766,19 @@ allocate_delay_frames(struct ia_css_pipe *pipe)
struct ia_css_preview_settings *mycs_preview = &pipe->pipe_settings.preview;
ref_info = mycs_preview->preview_binary.internal_frame_info;
- /*The ref frame expects
- * 1. Y plane
- * 2. UV plane with line interleaving, like below
- * UUUUUU(width/2 times) VVVVVVVV..(width/2 times)
- *
- * This format is not YUV420(which has Y, U and V planes).
- * Its closer to NV12, except that the UV plane has UV
- * interleaving, like UVUVUVUVUVUVUVUVU...
- *
- * TODO: make this ref_frame format as a separate frame format
- */
+
+ /*
+ * The ref frame expects
+ * 1. Y plane
+ * 2. UV plane with line interleaving, like below
+ * UUUUUU(width/2 times) VVVVVVVV..(width/2 times)
+ *
+ * This format is not YUV420(which has Y, U and V planes).
+ * Its closer to NV12, except that the UV plane has UV
+ * interleaving, like UVUVUVUVUVUVUVUVU...
+ *
+ * TODO: make this ref_frame format as a separate frame format
+ */
ref_info.format = IA_CSS_FRAME_FORMAT_NV12;
delay_frames = mycs_preview->delay_frames;
}
@@ -6625,9 +6235,11 @@ need_yuv_scaler_stage(const struct ia_css_pipe *pipe)
return false;
}
-/* TODO: it is temporarily created from ia_css_pipe_create_cas_scaler_desc */
-/* which has some hard-coded knowledge which prevents reuse of the function. */
-/* Later, merge this with ia_css_pipe_create_cas_scaler_desc */
+/*
+ * TODO: it is temporarily created from ia_css_pipe_create_cas_scaler_desc
+ * which has some hard-coded knowledge which prevents reuse of the function.
+ * Later, merge this with ia_css_pipe_create_cas_scaler_desc
+ */
static int ia_css_pipe_create_cas_scaler_desc_single_output(
struct ia_css_frame_info *cas_scaler_in_info,
struct ia_css_frame_info *cas_scaler_out_info,
@@ -7024,22 +6636,22 @@ load_yuvpp_binaries(struct ia_css_pipe *pipe)
#if defined(ISP2401)
/*
- * NOTES
- * - Why does the "yuvpp" pipe needs "isp_copy_binary" (i.e. ISP Copy) when
- * its input is "ATOMISP_INPUT_FORMAT_YUV422_8"?
- *
- * In most use cases, the first stage in the "yuvpp" pipe is the "yuv_scale_
- * binary". However, the "yuv_scale_binary" does NOT support the input-frame
- * format as "IA_CSS_STREAM _FORMAT_YUV422_8".
- *
- * Hence, the "isp_copy_binary" is required to be present in front of the "yuv
- * _scale_binary". It would translate the input-frame to the frame formats that
- * are supported by the "yuv_scale_binary".
- *
- * Please refer to "FrameWork/css/isp/pipes/capture_pp/capture_pp_1.0/capture_
- * pp_defs.h" for the list of input-frame formats that are supported by the
- * "yuv_scale_binary".
- */
+ * NOTES
+ * - Why does the "yuvpp" pipe needs "isp_copy_binary" (i.e. ISP Copy) when
+ * its input is "ATOMISP_INPUT_FORMAT_YUV422_8"?
+ *
+ * In most use cases, the first stage in the "yuvpp" pipe is the "yuv_scale_
+ * binary". However, the "yuv_scale_binary" does NOT support the input-frame
+ * format as "IA_CSS_STREAM _FORMAT_YUV422_8".
+ *
+ * Hence, the "isp_copy_binary" is required to be present in front of the "yuv
+ * _scale_binary". It would translate the input-frame to the frame formats that
+ * are supported by the "yuv_scale_binary".
+ *
+ * Please refer to "FrameWork/css/isp/pipes/capture_pp/capture_pp_1.0/capture_
+ * pp_defs.h" for the list of input-frame formats that are supported by the
+ * "yuv_scale_binary".
+ */
need_isp_copy_binary =
(pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_YUV422_8);
#else /* !ISP2401 */
@@ -7055,23 +6667,23 @@ load_yuvpp_binaries(struct ia_css_pipe *pipe)
goto ERR;
/*
- * NOTES
- * - Why is "pipe->pipe_settings.capture.copy_binary.online" specified?
- *
- * In some use cases, the first stage in the "yuvpp" pipe is the
- * "isp_copy_binary". The "isp_copy_binary" is designed to process
- * the input from either the system DDR or from the IPU internal VMEM.
- * So it provides the flag "online" to specify where its input is from,
- * i.e.:
- *
- * (1) "online <= true", the input is from the IPU internal VMEM.
- * (2) "online <= false", the input is from the system DDR.
- *
- * In other use cases, the first stage in the "yuvpp" pipe is the
- * "yuv_scale_binary". "The "yuv_scale_binary" is designed to process the
- * input ONLY from the system DDR. So it does not provide the flag "online"
- * to specify where its input is from.
- */
+ * NOTES
+ * - Why is "pipe->pipe_settings.capture.copy_binary.online" specified?
+ *
+ * In some use cases, the first stage in the "yuvpp" pipe is the
+ * "isp_copy_binary". The "isp_copy_binary" is designed to process
+ * the input from either the system DDR or from the IPU internal VMEM.
+ * So it provides the flag "online" to specify where its input is from,
+ * i.e.:
+ *
+ * (1) "online <= true", the input is from the IPU internal VMEM.
+ * (2) "online <= false", the input is from the system DDR.
+ *
+ * In other use cases, the first stage in the "yuvpp" pipe is the
+ * "yuv_scale_binary". "The "yuv_scale_binary" is designed to process the
+ * input ONLY from the system DDR. So it does not provide the flag "online"
+ * to specify where its input is from.
+ */
pipe->pipe_settings.capture.copy_binary.online = pipe->stream->config.online;
}
@@ -7162,6 +6774,7 @@ static int yuvpp_start(struct ia_css_pipe *pipe)
int err = 0;
enum sh_css_pipe_config_override copy_ovrd;
enum ia_css_input_mode yuvpp_pipe_input_mode;
+ unsigned int thread_id;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_YUVPP)) {
@@ -7181,12 +6794,8 @@ static int yuvpp_start(struct ia_css_pipe *pipe)
return err;
}
- {
- unsigned int thread_id;
-
- ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
- copy_ovrd = 1 << thread_id;
- }
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ copy_ovrd = 1 << thread_id;
start_pipe(pipe, copy_ovrd, yuvpp_pipe_input_mode);
@@ -7264,8 +6873,11 @@ sh_css_pipe_load_binaries(struct ia_css_pipe *pipe)
}
if (err) {
if (sh_css_pipe_unload_binaries(pipe)) {
- /* currently css does not support multiple error returns in a single function,
- * using -EINVAL in this case */
+ /*
+ * currently css does not support multiple error
+ * returns in a single function, using -EINVAL in
+ * this case
+ */
err = -EINVAL;
}
}
@@ -7316,12 +6928,13 @@ create_host_yuvpp_pipeline(struct ia_css_pipe *pipe)
num_output_stage = pipe->pipe_settings.yuvpp.num_output;
#ifdef ISP2401
- /* When the input system is 2401, always enable 'in_frameinfo_memory'
- * except for the following:
- * - Direct Sensor Mode Online Capture
- * - Direct Sensor Mode Continuous Capture
- * - Buffered Sensor Mode Continuous Capture
- */
+ /*
+ * When the input system is 2401, always enable 'in_frameinfo_memory'
+ * except for the following:
+ * - Direct Sensor Mode Online Capture
+ * - Direct Sensor Mode Continuous Capture
+ * - Buffered Sensor Mode Continuous Capture
+ */
sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
buffered_sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR;
online = pipe->stream->config.online;
@@ -7332,19 +6945,23 @@ create_host_yuvpp_pipeline(struct ia_css_pipe *pipe)
/* Construct in_frame info (only in case we have dynamic input */
need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
#endif
- /* the input frame can come from:
- * a) memory: connect yuvscaler to me->in_frame
- * b) sensor, via copy binary: connect yuvscaler to copy binary later on */
+ /*
+ * the input frame can come from:
+ *
+ * a) memory: connect yuvscaler to me->in_frame
+ * b) sensor, via copy binary: connect yuvscaler to copy binary later
+ * on
+ */
if (need_in_frameinfo_memory) {
/* TODO: improve for different input formats. */
/*
- * "pipe->stream->config.input_config.format" represents the sensor output
- * frame format, e.g. YUV422 8-bit.
- *
- * "in_frame_format" represents the imaging pipe's input frame format, e.g.
- * Bayer-Quad RAW.
- */
+ * "pipe->stream->config.input_config.format" represents the sensor output
+ * frame format, e.g. YUV422 8-bit.
+ *
+ * "in_frame_format" represents the imaging pipe's input frame format, e.g.
+ * Bayer-Quad RAW.
+ */
int in_frame_format;
if (pipe->stream->config.input_config.format ==
@@ -7353,22 +6970,22 @@ create_host_yuvpp_pipeline(struct ia_css_pipe *pipe)
} else if (pipe->stream->config.input_config.format ==
ATOMISP_INPUT_FORMAT_YUV422_8) {
/*
- * When the sensor output frame format is "ATOMISP_INPUT_FORMAT_YUV422_8",
- * the "isp_copy_var" binary is selected as the first stage in the yuvpp
- * pipe.
- *
- * For the "isp_copy_var" binary, it reads the YUV422-8 pixels from
- * the frame buffer (at DDR) to the frame-line buffer (at VMEM).
- *
- * By now, the "isp_copy_var" binary does NOT provide a separated
- * frame-line buffer to store the YUV422-8 pixels. Instead, it stores
- * the YUV422-8 pixels in the frame-line buffer which is designed to
- * store the Bayer-Quad RAW pixels.
- *
- * To direct the "isp_copy_var" binary reading from the RAW frame-line
- * buffer, its input frame format must be specified as "IA_CSS_FRAME_
- * FORMAT_RAW".
- */
+ * When the sensor output frame format is "ATOMISP_INPUT_FORMAT_YUV422_8",
+ * the "isp_copy_var" binary is selected as the first stage in the yuvpp
+ * pipe.
+ *
+ * For the "isp_copy_var" binary, it reads the YUV422-8 pixels from
+ * the frame buffer (at DDR) to the frame-line buffer (at VMEM).
+ *
+ * By now, the "isp_copy_var" binary does NOT provide a separated
+ * frame-line buffer to store the YUV422-8 pixels. Instead, it stores
+ * the YUV422-8 pixels in the frame-line buffer which is designed to
+ * store the Bayer-Quad RAW pixels.
+ *
+ * To direct the "isp_copy_var" binary reading from the RAW frame-line
+ * buffer, its input frame format must be specified as "IA_CSS_FRAME_
+ * FORMAT_RAW".
+ */
in_frame_format = IA_CSS_FRAME_FORMAT_RAW;
} else {
in_frame_format = IA_CSS_FRAME_FORMAT_NV12;
@@ -7667,13 +7284,14 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe)
ia_css_pipe_util_create_output_frames(out_frames);
#ifdef ISP2401
- /* When the input system is 2401, always enable 'in_frameinfo_memory'
- * except for the following:
- * - Direct Sensor Mode Online Capture
- * - Direct Sensor Mode Online Capture
- * - Direct Sensor Mode Continuous Capture
- * - Buffered Sensor Mode Continuous Capture
- */
+ /*
+ * When the input system is 2401, always enable 'in_frameinfo_memory'
+ * except for the following:
+ * - Direct Sensor Mode Online Capture
+ * - Direct Sensor Mode Online Capture
+ * - Direct Sensor Mode Continuous Capture
+ * - Buffered Sensor Mode Continuous Capture
+ */
sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR);
buffered_sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR);
online = pipe->stream->config.online;
@@ -7806,15 +7424,15 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe)
local_out_frame = NULL;
ia_css_pipe_util_set_output_frames(out_frames, 0, local_out_frame);
/*
- * WARNING: The #if def flag has been added below as a
- * temporary solution to solve the problem of enabling the
- * view finder in a single binary in a capture flow. The
- * vf-pp stage has been removed from Skycam in the solution
- * provided. The vf-pp stage should be re-introduced when
- * required. This * should not be considered as a clean solution.
- * Proper investigation should be done to come up with the clean
- * solution.
- * */
+ * WARNING: The #if def flag has been added below as a
+ * temporary solution to solve the problem of enabling the
+ * view finder in a single binary in a capture flow. The
+ * vf-pp stage has been removed from Skycam in the solution
+ * provided. The vf-pp stage should be re-introduced when
+ * required. This * should not be considered as a clean solution.
+ * Proper investigation should be done to come up with the clean
+ * solution.
+ */
ia_css_pipe_get_generic_stage_desc(&stage_desc,
primary_binary[i],
out_frames,
@@ -7828,8 +7446,7 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe)
return err;
}
}
- /* If we use copy iso primary,
- the input must be yuv iso raw */
+ /* If we use copy iso primary, the input must be yuv iso raw */
current_stage->args.copy_vf =
primary_binary[0]->info->sp.pipeline.mode ==
IA_CSS_BINARY_MODE_COPY;
@@ -7888,7 +7505,6 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe)
}
}
-#ifndef ISP2401
if (need_pp && current_stage) {
struct ia_css_frame *local_in_frame = NULL;
@@ -7908,20 +7524,6 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe)
}
err = add_capture_pp_stage(pipe, me, local_in_frame,
need_yuv_pp ? NULL : out_frame,
-#else
- /* ldc and capture_pp not supported in same pipeline */
- if (need_ldc && current_stage) {
- in_frame = current_stage->args.out_frame[0];
- ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
- ia_css_pipe_get_generic_stage_desc(&stage_desc, capture_ldc_binary,
- out_frames, in_frame, NULL);
- err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
- NULL);
- } else if (need_pp && current_stage) {
- in_frame = current_stage->args.out_frame[0];
- err = add_capture_pp_stage(pipe, me, in_frame,
- need_yuv_pp ? NULL : out_frame,
-#endif
capture_pp_binary,
&current_stage);
if (err) {
@@ -7954,14 +7556,14 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe)
}
/*
- * WARNING: The #if def flag has been added below as a
- * temporary solution to solve the problem of enabling the
- * view finder in a single binary in a capture flow. The vf-pp
- * stage has been removed from Skycam in the solution provided.
- * The vf-pp stage should be re-introduced when required. This
- * should not be considered as a clean solution. Proper
- * investigation should be done to come up with the clean solution.
- * */
+ * WARNING: The #if def flag has been added below as a
+ * temporary solution to solve the problem of enabling the
+ * view finder in a single binary in a capture flow. The vf-pp
+ * stage has been removed from Skycam in the solution provided.
+ * The vf-pp stage should be re-introduced when required. This
+ * should not be considered as a clean solution. Proper
+ * investigation should be done to come up with the clean solution.
+ */
if (mode != IA_CSS_CAPTURE_MODE_RAW &&
mode != IA_CSS_CAPTURE_MODE_BAYER &&
current_stage && vf_frame) {
@@ -8005,6 +7607,7 @@ create_host_capture_pipeline(struct ia_css_pipe *pipe)
static int capture_start(struct ia_css_pipe *pipe)
{
struct ia_css_pipeline *me;
+ unsigned int thread_id;
int err = 0;
enum sh_css_pipe_config_override copy_ovrd;
@@ -8034,7 +7637,7 @@ static int capture_start(struct ia_css_pipe *pipe)
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
-#elif defined(ISP2401)
+#else
if (pipe->config.mode != IA_CSS_PIPE_MODE_COPY) {
err = send_mipi_frames(pipe);
if (err) {
@@ -8042,23 +7645,19 @@ static int capture_start(struct ia_css_pipe *pipe)
return err;
}
}
-
#endif
- {
- unsigned int thread_id;
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ copy_ovrd = 1 << thread_id;
- ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
- copy_ovrd = 1 << thread_id;
- }
start_pipe(pipe, copy_ovrd, pipe->stream->config.mode);
#if !defined(ISP2401)
/*
- * old isys: for IA_CSS_PIPE_MODE_COPY pipe, isys rx has to be configured,
- * which is currently done in start_binary(); but COPY pipe contains no binary,
- * and does not call start_binary(); so we need to configure the rx here.
- */
+ * old isys: for IA_CSS_PIPE_MODE_COPY pipe, isys rx has to be configured,
+ * which is currently done in start_binary(); but COPY pipe contains no binary,
+ * and does not call start_binary(); so we need to configure the rx here.
+ */
if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY &&
pipe->stream->reconfigure_css_rx) {
ia_css_isys_rx_configure(&pipe->stream->csi_rx_config,
@@ -8174,7 +7773,8 @@ append_firmware(struct ia_css_fw_info **l, struct ia_css_fw_info *firmware)
while (*l)
l = &(*l)->next;
*l = firmware;
- /*firmware->next = NULL;*/ /* when multiple acc extensions are loaded, 'next' can be not NULL */
+ /* when multiple acc extensions are loaded, 'next' can be not NULL */
+ /*firmware->next = NULL;*/
IA_CSS_LEAVE_PRIVATE("");
}
@@ -8360,9 +7960,9 @@ sh_css_pipeline_add_acc_stage(struct ia_css_pipeline *pipeline,
}
/*
- * @brief Tag a specific frame in continuous capture.
- * Refer to "sh_css_internal.h" for details.
- */
+ * @brief Tag a specific frame in continuous capture.
+ * Refer to "sh_css_internal.h" for details.
+ */
int ia_css_stream_capture_frame(struct ia_css_stream *stream,
unsigned int exp_id)
{
@@ -8389,10 +7989,12 @@ int ia_css_stream_capture_frame(struct ia_css_stream *stream,
sh_css_create_tag_descr(0, 0, 0, exp_id, &tag_descr);
/* Encode the tag descriptor into a 32-bit value */
encoded_tag_descr = sh_css_encode_tag_descr(&tag_descr);
- /* Enqueue the encoded tag to the host2sp queue.
- * Note: The pipe and stage IDs for tag_cmd queue are hard-coded to 0
- * on both host and the SP side.
- * It is mainly because it is enough to have only one tag_cmd queue */
+ /*
+ * Enqueue the encoded tag to the host2sp queue.
+ * Note: The pipe and stage IDs for tag_cmd queue are hard-coded to 0
+ * on both host and the SP side.
+ * It is mainly because it is enough to have only one tag_cmd queue
+ */
err = ia_css_bufq_enqueue_tag_cmd(encoded_tag_descr);
IA_CSS_LEAVE_ERR(err);
@@ -8400,9 +8002,9 @@ int ia_css_stream_capture_frame(struct ia_css_stream *stream,
}
/*
- * @brief Configure the continuous capture.
- * Refer to "sh_css_internal.h" for details.
- */
+ * @brief Configure the continuous capture.
+ * Refer to "sh_css_internal.h" for details.
+ */
int ia_css_stream_capture(struct ia_css_stream *stream, int num_captures,
unsigned int skip, int offset)
{
@@ -8438,10 +8040,12 @@ int ia_css_stream_capture(struct ia_css_stream *stream, int num_captures,
return -EBUSY;
}
- /* Enqueue the encoded tag to the host2sp queue.
- * Note: The pipe and stage IDs for tag_cmd queue are hard-coded to 0
- * on both host and the SP side.
- * It is mainly because it is enough to have only one tag_cmd queue */
+ /*
+ * Enqueue the encoded tag to the host2sp queue.
+ * Note: The pipe and stage IDs for tag_cmd queue are hard-coded to 0
+ * on both host and the SP side.
+ * It is mainly because it is enough to have only one tag_cmd queue
+ */
return_err = ia_css_bufq_enqueue_tag_cmd((uint32_t)encoded_tag_descr);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
@@ -8486,9 +8090,7 @@ sh_css_init_host_sp_control_vars(void)
unsigned int HIVE_ADDR_host_sp_queues_initialized;
unsigned int HIVE_ADDR_sp_sleep_mode;
unsigned int HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb;
-#ifndef ISP2401
unsigned int HIVE_ADDR_sp_stop_copy_preview;
-#endif
unsigned int HIVE_ADDR_host_sp_com;
unsigned int o = offsetof(struct host_sp_communication, host2sp_command)
/ sizeof(int);
@@ -8505,20 +8107,9 @@ sh_css_init_host_sp_control_vars(void)
fw->info.sp.host_sp_queues_initialized;
HIVE_ADDR_sp_sleep_mode = fw->info.sp.sleep_mode;
HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb = fw->info.sp.invalidate_tlb;
-#ifndef ISP2401
HIVE_ADDR_sp_stop_copy_preview = fw->info.sp.stop_copy_preview;
-#endif
HIVE_ADDR_host_sp_com = fw->info.sp.host_sp_com;
- (void)HIVE_ADDR_ia_css_ispctrl_sp_isp_started; /* Suppres warnings in CRUN */
-
- (void)HIVE_ADDR_sp_sleep_mode;
- (void)HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb;
-#ifndef ISP2401
- (void)HIVE_ADDR_sp_stop_copy_preview;
-#endif
- (void)HIVE_ADDR_host_sp_com;
-
sp_dmem_store_uint32(SP0_ID,
(unsigned int)sp_address_of(ia_css_ispctrl_sp_isp_started),
(uint32_t)(0));
@@ -8532,11 +8123,9 @@ sh_css_init_host_sp_control_vars(void)
sp_dmem_store_uint32(SP0_ID,
(unsigned int)sp_address_of(ia_css_dmaproxy_sp_invalidate_tlb),
(uint32_t)(false));
-#ifndef ISP2401
sp_dmem_store_uint32(SP0_ID,
(unsigned int)sp_address_of(sp_stop_copy_preview),
my_css.stop_copy_preview ? (uint32_t)(1) : (uint32_t)(0));
-#endif
store_sp_array_uint(host_sp_com, o, host2sp_cmd_ready);
for (i = 0; i < N_CSI_PORTS; i++) {
@@ -8586,9 +8175,11 @@ void ia_css_stream_config_defaults(struct ia_css_stream_config *stream_config)
stream_config->online = true;
stream_config->left_padding = -1;
stream_config->pixels_per_clock = 1;
- /* temporary default value for backwards compatibility.
- * This field used to be hardcoded within CSS but this has now
- * been moved to the stream_config struct. */
+ /*
+ * temporary default value for backwards compatibility.
+ * This field used to be hardcoded within CSS but this has now
+ * been moved to the stream_config struct.
+ */
stream_config->source.port.rxcount = 0x04040404;
}
@@ -8602,7 +8193,7 @@ ia_css_acc_pipe_create(struct ia_css_pipe *pipe)
return -EINVAL;
}
- /* There is not meaning for num_execs = 0 semantically. Run atleast once. */
+ /* There is not meaning for num_execs = 0 semantically. Run at least once. */
if (pipe->config.acc_num_execs == 0)
pipe->config.acc_num_execs = 1;
@@ -8673,9 +8264,11 @@ ia_css_pipe_create_extra(const struct ia_css_pipe_config *config,
ia_css_pipe_extra_config_defaults(&internal_pipe->extra_config);
if (config->mode == IA_CSS_PIPE_MODE_ACC) {
- /* Temporary hack to migrate acceleration to CSS 2.0.
- * In the future the code for all pipe types should be
- * unified. */
+ /*
+ * Temporary hack to migrate acceleration to CSS 2.0.
+ * In the future the code for all pipe types should be
+ * unified.
+ */
*pipe = internal_pipe;
if (!internal_pipe->config.acc_extension &&
internal_pipe->config.num_acc_stages ==
@@ -8687,21 +8280,23 @@ ia_css_pipe_create_extra(const struct ia_css_pipe_config *config,
return ia_css_acc_pipe_create(internal_pipe);
}
- /* Use config value when dvs_frame_delay setting equal to 2, otherwise always 1 by default */
+ /*
+ * Use config value when dvs_frame_delay setting equal to 2,
+ * otherwise always 1 by default
+ */
if (internal_pipe->config.dvs_frame_delay == IA_CSS_FRAME_DELAY_2)
internal_pipe->dvs_frame_delay = 2;
else
internal_pipe->dvs_frame_delay = 1;
- /* we still keep enable_raw_binning for backward compatibility, for any new
- fractional bayer downscaling, we should use bayer_ds_out_res. if both are
- specified, bayer_ds_out_res will take precedence.if none is specified, we
- set bayer_ds_out_res equal to IF output resolution(IF may do cropping on
- sensor output) or use default decimation factor 1. */
- if (internal_pipe->extra_config.enable_raw_binning &&
- internal_pipe->config.bayer_ds_out_res.width) {
- /* fill some code here, if no code is needed, please remove it during integration */
- }
+ /*
+ * we still keep enable_raw_binning for backward compatibility,
+ * for any new fractional bayer downscaling, we should use
+ * bayer_ds_out_res. if both are specified, bayer_ds_out_res will
+ * take precedence.if none is specified, we set bayer_ds_out_res
+ * equal to IF output resolution(IF may do cropping on sensor output)
+ * or use default decimation factor 1.
+ */
/* YUV downscaling */
if ((internal_pipe->config.vf_pp_in_res.width ||
@@ -8905,8 +8500,10 @@ ia_css_stream_configure_rx(struct ia_css_stream *stream)
if (config->compression.type == IA_CSS_CSI2_COMPRESSION_TYPE_NONE)
stream->csi_rx_config.comp = MIPI_PREDICTOR_NONE;
else
- /* not implemented yet, requires extension of the rx_cfg_t
- * struct */
+ /*
+ * not implemented yet, requires extension of the rx_cfg_t
+ * struct
+ */
return -EINVAL;
stream->csi_rx_config.is_two_ppc = (stream->config.pixels_per_clock == 2);
@@ -8993,56 +8590,15 @@ metadata_info_init(const struct ia_css_metadata_config *mdc,
return -EINVAL;
md->resolution = mdc->resolution;
- /* We round up the stride to a multiple of the width
- * of the port going to DDR, this is a HW requirements (DMA). */
+ /*
+ * We round up the stride to a multiple of the width
+ * of the port going to DDR, this is a HW requirements (DMA).
+ */
md->stride = CEIL_MUL(mdc->resolution.width, HIVE_ISP_DDR_WORD_BYTES);
md->size = mdc->resolution.height * md->stride;
return 0;
}
-/* ISP2401 */
-static int check_pipe_resolutions(const struct ia_css_pipe *pipe)
-{
- int err = 0;
-
- IA_CSS_ENTER_PRIVATE("");
-
- if (!pipe || !pipe->stream) {
- IA_CSS_ERROR("null arguments");
- err = -EINVAL;
- goto EXIT;
- }
-
- if (ia_css_util_check_res(pipe->config.input_effective_res.width,
- pipe->config.input_effective_res.height) != 0) {
- IA_CSS_ERROR("effective resolution not supported");
- err = -EINVAL;
- goto EXIT;
- }
- if (!ia_css_util_resolution_is_zero(
- pipe->stream->config.input_config.input_res)) {
- if (!ia_css_util_res_leq(pipe->config.input_effective_res,
- pipe->stream->config.input_config.input_res)) {
- IA_CSS_ERROR("effective resolution is larger than input resolution");
- err = -EINVAL;
- goto EXIT;
- }
- }
- if (!ia_css_util_resolution_is_even(pipe->config.output_info[0].res)) {
- IA_CSS_ERROR("output resolution must be even");
- err = -EINVAL;
- goto EXIT;
- }
- if (!ia_css_util_resolution_is_even(pipe->config.vf_output_info[0].res)) {
- IA_CSS_ERROR("VF resolution must be even");
- err = -EINVAL;
- goto EXIT;
- }
-EXIT:
- IA_CSS_LEAVE_ERR_PRIVATE(err);
- return err;
-}
-
int
ia_css_stream_create(const struct ia_css_stream_config *stream_config,
int num_pipes,
@@ -9057,9 +8613,6 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
int err = -EINVAL;
struct ia_css_metadata_info md_info;
struct ia_css_resolution effective_res;
-#ifdef ISP2401
- bool aspect_ratio_crop_enabled = false;
-#endif
IA_CSS_ENTER("num_pipes=%d", num_pipes);
ia_css_debug_dump_stream_config(stream_config, num_pipes);
@@ -9236,14 +8789,6 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
IA_CSS_LOG("mode sensor/default");
}
-#ifdef ISP2401
- err = aspect_ratio_crop_init(curr_stream, pipes,
- &aspect_ratio_crop_enabled);
- if (err) {
- IA_CSS_LEAVE_ERR(err);
- goto ERR;
- }
-#endif
for (i = 0; i < num_pipes; i++) {
struct ia_css_resolution effective_res;
@@ -9256,22 +8801,6 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
if (effective_res.height == 0 || effective_res.width == 0) {
effective_res = curr_pipe->stream->config.input_config.effective_res;
-#if defined(ISP2401)
- /* The aspect ratio cropping is currently only
- * supported on the new input system. */
- if (aspect_ratio_crop_check(aspect_ratio_crop_enabled, curr_pipe)) {
- struct ia_css_resolution crop_res;
-
- err = aspect_ratio_crop(curr_pipe, &crop_res);
- if (!err) {
- effective_res = crop_res;
- } else {
- /* in case of error fallback to default
- * effective resolution from driver. */
- IA_CSS_LOG("aspect_ratio_crop() failed with err(%d)", err);
- }
- }
-#endif
curr_pipe->config.input_effective_res = effective_res;
}
IA_CSS_LOG("effective_res=%dx%d",
@@ -9279,17 +8808,6 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
effective_res.height);
}
- if (IS_ISP2401) {
- for (i = 0; i < num_pipes; i++) {
- if (pipes[i]->config.mode != IA_CSS_PIPE_MODE_ACC &&
- pipes[i]->config.mode != IA_CSS_PIPE_MODE_COPY) {
- err = check_pipe_resolutions(pipes[i]);
- if (err)
- goto ERR;
- }
- }
- }
-
err = ia_css_stream_isp_parameters_init(curr_stream);
if (err)
goto ERR;
@@ -9327,9 +8845,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
if (num_pipes >= 2) {
curr_stream->cont_capt = true;
curr_stream->disable_cont_vf = curr_stream->config.disable_cont_viewfinder;
-
- if (!IS_ISP2401)
- curr_stream->stop_copy_preview = my_css.stop_copy_preview;
+ curr_stream->stop_copy_preview = my_css.stop_copy_preview;
}
/* Create copy pipe here, since it may not be exposed to the driver */
@@ -9387,16 +8903,15 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
/* set current stream */
curr_pipe->stream = curr_stream;
- if (!IS_ISP2401) {
- /* take over effective info */
+ /* take over effective info */
+
+ effective_res = curr_pipe->config.input_effective_res;
+ err = ia_css_util_check_res(
+ effective_res.width,
+ effective_res.height);
+ if (err)
+ goto ERR;
- effective_res = curr_pipe->config.input_effective_res;
- err = ia_css_util_check_res(
- effective_res.width,
- effective_res.height);
- if (err)
- goto ERR;
- }
/* sensor binning per pipe */
if (sensor_binning_changed)
sh_css_pipe_free_shading_table(curr_pipe);
@@ -9421,9 +8936,6 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
goto ERR;
}
- if (IS_ISP2401)
- pipe_info->output_system_in_res_info = curr_pipe->config.output_system_in_res;
-
if (!spcopyonly) {
if (!IS_ISP2401)
err = sh_css_pipe_get_shading_info(curr_pipe,
@@ -9523,8 +9035,6 @@ ia_css_stream_destroy(struct ia_css_stream *stream)
if ((stream->last_pipe) &&
ia_css_pipeline_is_mapped(stream->last_pipe->pipe_num)) {
#if defined(ISP2401)
- bool free_mpi;
-
for (i = 0; i < stream->num_pipes; i++) {
struct ia_css_pipe *entry = stream->pipes[i];
unsigned int sp_thread_id;
@@ -9548,19 +9058,16 @@ ia_css_stream_destroy(struct ia_css_stream *stream)
}
}
}
- free_mpi = stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR;
- if (IS_ISP2401) {
- free_mpi |= stream->config.mode == IA_CSS_INPUT_MODE_TPG;
- free_mpi |= stream->config.mode == IA_CSS_INPUT_MODE_PRBS;
- }
-
- if (free_mpi) {
+ if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
for (i = 0; i < stream->num_pipes; i++) {
struct ia_css_pipe *entry = stream->pipes[i];
- /* free any mipi frames that are remaining:
- * some test stream create-destroy cycles do not generate output frames
- * and the mipi buffer is not freed in the deque function
- */
+ /*
+ * free any mipi frames that are remaining:
+ * some test stream create-destroy cycles do
+ * not generate output frames
+ * and the mipi buffer is not freed in the
+ * deque function
+ */
if (entry)
free_mipi_frames(entry);
}
@@ -9638,58 +9145,6 @@ ia_css_stream_get_info(const struct ia_css_stream *stream,
return 0;
}
-/*
- * Rebuild a stream, including allocating structs, setting configuration and
- * building the required pipes.
- * The data is taken from the css_save struct updated upon stream creation.
- * The stream handle is used to identify the correct entry in the css_save struct
- */
-int
-ia_css_stream_load(struct ia_css_stream *stream)
-{
- int i, j, err;
-
- if (IS_ISP2401) {
- /* TODO remove function - DEPRECATED */
- (void)stream;
- return -ENOTSUPP;
- }
-
- assert(stream);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_load() enter,\n");
- for (i = 0; i < MAX_ACTIVE_STREAMS; i++) {
- if (my_css_save.stream_seeds[i].stream != stream)
- continue;
-
- for (j = 0; j < my_css_save.stream_seeds[i].num_pipes; j++) {
- int k;
-
- err = ia_css_pipe_create(&my_css_save.stream_seeds[i].pipe_config[j],
- &my_css_save.stream_seeds[i].pipes[j]);
- if (!err)
- continue;
-
- for (k = 0; k < j; k++)
- ia_css_pipe_destroy(my_css_save.stream_seeds[i].pipes[k]);
- return err;
- }
- err = ia_css_stream_create(&my_css_save.stream_seeds[i].stream_config,
- my_css_save.stream_seeds[i].num_pipes,
- my_css_save.stream_seeds[i].pipes,
- &my_css_save.stream_seeds[i].stream);
- if (!err)
- break;
-
- ia_css_stream_destroy(stream);
- for (j = 0; j < my_css_save.stream_seeds[i].num_pipes; j++)
- ia_css_pipe_destroy(my_css_save.stream_seeds[i].pipes[j]);
- return err;
- }
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_load() exit,\n");
- return 0;
-}
-
int
ia_css_stream_start(struct ia_css_stream *stream)
{
@@ -9763,16 +9218,14 @@ ia_css_stream_stop(struct ia_css_stream *stream)
}
#endif
- if (!IS_ISP2401)
- err = ia_css_pipeline_request_stop(&stream->last_pipe->pipeline);
- else
- err = sh_css_pipes_stop(stream);
-
+ err = ia_css_pipeline_request_stop(&stream->last_pipe->pipeline);
if (err)
return err;
- /* Ideally, unmapping should happen after pipeline_stop, but current
- * semantics do not allow that. */
+ /*
+ * Ideally, unmapping should happen after pipeline_stop, but current
+ * semantics do not allow that.
+ */
/* err = map_sp_threads(stream, false); */
return err;
@@ -9785,19 +9238,16 @@ ia_css_stream_has_stopped(struct ia_css_stream *stream)
assert(stream);
- if (!IS_ISP2401)
- stopped = ia_css_pipeline_has_stopped(&stream->last_pipe->pipeline);
- else
- stopped = sh_css_pipes_have_stopped(stream);
+ stopped = ia_css_pipeline_has_stopped(&stream->last_pipe->pipeline);
return stopped;
}
/* ISP2400 */
/*
- * Destroy the stream and all the pipes related to it.
- * The stream handle is used to identify the correct entry in the css_save struct
- */
+ * Destroy the stream and all the pipes related to it.
+ * The stream handle is used to identify the correct entry in the css_save struct
+ */
int
ia_css_stream_unload(struct ia_css_stream *stream)
{
@@ -10060,11 +9510,12 @@ ia_css_pipe_get_pipe_num(const struct ia_css_pipe *pipe)
{
assert(pipe);
- /* KW was not sure this function was not returning a value
- that was out of range; so added an assert, and, for the
- case when asserts are not enabled, clip to the largest
- value; pipe_num is unsigned so the value cannot be too small
- */
+ /*
+ * KW was not sure this function was not returning a value
+ * that was out of range; so added an assert, and, for the
+ * case when asserts are not enabled, clip to the largest
+ * value; pipe_num is unsigned so the value cannot be too small
+ */
assert(pipe->pipe_num < IA_CSS_PIPELINE_NUM_MAX);
if (pipe->pipe_num >= IA_CSS_PIPELINE_NUM_MAX)
@@ -10119,10 +9570,10 @@ ia_css_start_sp(void)
}
/*
- * Time to wait SP for termincate. Only condition when this can happen
- * is a fatal hw failure, but we must be able to detect this and emit
- * a proper error trace.
- */
+ * Time to wait SP for termincate. Only condition when this can happen
+ * is a fatal hw failure, but we must be able to detect this and emit
+ * a proper error trace.
+ */
#define SP_SHUTDOWN_TIMEOUT_US 200000
int
@@ -10142,14 +9593,10 @@ ia_css_stop_sp(void)
}
/* For now, stop whole SP */
- if (!IS_ISP2401) {
- sh_css_write_host2sp_command(host2sp_cmd_terminate);
- } else {
- if (!sh_css_write_host2sp_command(host2sp_cmd_terminate)) {
- IA_CSS_ERROR("Call to 'sh-css_write_host2sp_command()' failed");
- ia_css_debug_dump_sp_sw_debug_info();
- ia_css_debug_dump_debug_info(NULL);
- }
+ if (!sh_css_write_host2sp_command(host2sp_cmd_terminate)) {
+ IA_CSS_ERROR("Call to 'sh-css_write_host2sp_command()' failed");
+ ia_css_debug_dump_sp_sw_debug_info();
+ ia_css_debug_dump_debug_info(NULL);
}
sh_css_sp_set_sp_running(false);
@@ -10245,9 +9692,7 @@ void ia_css_pipe_map_queue(struct ia_css_pipe *pipe, bool map)
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map);
-#if defined SH_CSS_ENABLE_METADATA
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
-#endif
if (pipe->pipe_settings.preview.preview_binary.info &&
pipe->pipe_settings.preview.preview_binary.info->sp.enable.s3a)
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map);
@@ -10260,9 +9705,7 @@ void ia_css_pipe_map_queue(struct ia_css_pipe *pipe, bool map)
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map);
-#if defined SH_CSS_ENABLE_METADATA
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
-#endif
if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_PRIMARY) {
for (i = 0; i < pipe->pipe_settings.capture.num_primary_stage; i++) {
if (pipe->pipe_settings.capture.primary_binary[i].info &&
@@ -10287,9 +9730,7 @@ void ia_css_pipe_map_queue(struct ia_css_pipe *pipe, bool map)
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map);
-#if defined SH_CSS_ENABLE_METADATA
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
-#endif
if (pipe->pipe_settings.video.video_binary.info &&
pipe->pipe_settings.video.video_binary.info->sp.enable.s3a)
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map);
@@ -10302,18 +9743,14 @@ void ia_css_pipe_map_queue(struct ia_css_pipe *pipe, bool map)
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
if (!pipe->stream->config.continuous)
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
-#if defined SH_CSS_ENABLE_METADATA
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
-#endif
} else if (pipe->mode == IA_CSS_PIPE_ID_ACC) {
if (need_input_queue)
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map);
-#if defined SH_CSS_ENABLE_METADATA
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
-#endif
} else if (pipe->mode == IA_CSS_PIPE_ID_YUVPP) {
unsigned int idx;
@@ -10325,38 +9762,11 @@ void ia_css_pipe_map_queue(struct ia_css_pipe *pipe, bool map)
if (need_input_queue)
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
-#if defined SH_CSS_ENABLE_METADATA
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
-#endif
}
IA_CSS_LEAVE("");
}
-#if CONFIG_ON_FRAME_ENQUEUE()
-static int set_config_on_frame_enqueue(struct ia_css_frame_info
- *info, struct frame_data_wrapper *frame)
-{
- frame->config_on_frame_enqueue.padded_width = 0;
-
- /* currently we support configuration on frame enqueue only on YUV formats */
- /* on other formats the padded_width is zeroed for no configuration override */
- switch (info->format) {
- case IA_CSS_FRAME_FORMAT_YUV420:
- case IA_CSS_FRAME_FORMAT_NV12:
- if (info->padded_width > info->res.width)
- frame->config_on_frame_enqueue.padded_width = info->padded_width;
- else if ((info->padded_width < info->res.width) && (info->padded_width > 0))
- return -EINVAL;
-
- /* nothing to do if width == padded width or padded width is zeroed (the same) */
- break;
- default:
- break;
- }
-
- return 0;
-}
-#endif
int
ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id)
@@ -10365,8 +9775,10 @@ ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id)
IA_CSS_ENTER("");
- /* Only continuous streams have a tagger to which we can send the
- * unlock message. */
+ /*
+ * Only continuous streams have a tagger to which we can send the
+ * unlock message.
+ */
if (!stream || !stream->config.continuous) {
IA_CSS_ERROR("invalid stream pointer");
return -EINVAL;
@@ -10378,8 +9790,10 @@ ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id)
return -EINVAL;
}
- /* Send the event. Since we verified that the exp_id is valid,
- * we can safely assign it to an 8-bit argument here. */
+ /*
+ * Send the event. Since we verified that the exp_id is valid,
+ * we can safely assign it to an 8-bit argument here.
+ */
ret = ia_css_bufq_enqueue_psys_event(
IA_CSS_PSYS_SW_EVENT_UNLOCK_RAW_BUFFER, exp_id, 0, 0);
@@ -10387,9 +9801,10 @@ ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id)
return ret;
}
-/* @brief Set the state (Enable or Disable) of the Extension stage in the
- * given pipe.
- */
+/*
+ * @brief Set the state (Enable or Disable) of the Extension stage in the
+ * given pipe.
+ */
int
ia_css_pipe_set_qos_ext_state(struct ia_css_pipe *pipe, uint32_t fw_handle,
bool enable)
@@ -10433,9 +9848,10 @@ ia_css_pipe_set_qos_ext_state(struct ia_css_pipe *pipe, uint32_t fw_handle,
return err;
}
-/* @brief Get the state (Enable or Disable) of the Extension stage in the
- * given pipe.
- */
+/*
+ * @brief Get the state (Enable or Disable) of the Extension stage in the
+ * given pipe.
+ */
int
ia_css_pipe_get_qos_ext_state(struct ia_css_pipe *pipe, uint32_t fw_handle,
bool *enable)
@@ -10471,215 +9887,6 @@ ia_css_pipe_get_qos_ext_state(struct ia_css_pipe *pipe, uint32_t fw_handle,
return err;
}
-/* ISP2401 */
-int
-ia_css_pipe_update_qos_ext_mapped_arg(struct ia_css_pipe *pipe,
- u32 fw_handle,
- struct ia_css_isp_param_css_segments *css_seg,
- struct ia_css_isp_param_isp_segments *isp_seg)
-{
- unsigned int HIVE_ADDR_sp_group;
- static struct sh_css_sp_group sp_group;
- static struct sh_css_sp_stage sp_stage;
- static struct sh_css_isp_stage isp_stage;
- const struct ia_css_fw_info *fw;
- unsigned int thread_id;
- struct ia_css_pipeline_stage *stage;
- int err = 0;
- int stage_num = 0;
- enum ia_css_isp_memories mem;
- bool enabled;
-
- IA_CSS_ENTER("");
-
- fw = &sh_css_sp_fw;
-
- /* Parameter Check */
- if (!pipe || !pipe->stream) {
- IA_CSS_ERROR("Invalid Pipe.");
- err = -EINVAL;
- } else if (!(pipe->config.acc_extension)) {
- IA_CSS_ERROR("Invalid Pipe (No Extension Firmware).");
- err = -EINVAL;
- } else if (!sh_css_sp_is_running()) {
- IA_CSS_ERROR("Leaving: queue unavailable.");
- err = -EBUSY;
- } else {
- /* Query the thread_id and stage_num corresponding to the Extension firmware */
- ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
- err = ia_css_pipeline_get_stage_from_fw(&pipe->pipeline, fw_handle, &stage);
- if (!err) {
- /* Get the Extension State */
- enabled = (SH_CSS_QOS_STAGE_IS_ENABLED(&sh_css_sp_group.pipe[thread_id],
- stage->stage_num)) ? true : false;
- /* Update mapped arg only when extension stage is not enabled */
- if (enabled) {
- IA_CSS_ERROR("Leaving: cannot update when stage is enabled.");
- err = -EBUSY;
- } else {
- stage_num = stage->stage_num;
-
- HIVE_ADDR_sp_group = fw->info.sp.group;
- sp_dmem_load(SP0_ID,
- (unsigned int)sp_address_of(sp_group),
- &sp_group,
- sizeof(struct sh_css_sp_group));
- hmm_load(sp_group.pipe[thread_id].sp_stage_addr[stage_num],
- &sp_stage, sizeof(struct sh_css_sp_stage));
-
- hmm_load(sp_stage.isp_stage_addr,
- &isp_stage, sizeof(struct sh_css_isp_stage));
-
- for (mem = 0; mem < N_IA_CSS_ISP_MEMORIES; mem++) {
- isp_stage.mem_initializers.params[IA_CSS_PARAM_CLASS_PARAM][mem].address =
- css_seg->params[IA_CSS_PARAM_CLASS_PARAM][mem].address;
- isp_stage.mem_initializers.params[IA_CSS_PARAM_CLASS_PARAM][mem].size =
- css_seg->params[IA_CSS_PARAM_CLASS_PARAM][mem].size;
- isp_stage.binary_info.mem_initializers.params[IA_CSS_PARAM_CLASS_PARAM][mem].address
- =
- isp_seg->params[IA_CSS_PARAM_CLASS_PARAM][mem].address;
- isp_stage.binary_info.mem_initializers.params[IA_CSS_PARAM_CLASS_PARAM][mem].size
- =
- isp_seg->params[IA_CSS_PARAM_CLASS_PARAM][mem].size;
- }
-
- hmm_store(sp_stage.isp_stage_addr,
- &isp_stage,
- sizeof(struct sh_css_isp_stage));
- }
- }
- }
- IA_CSS_LEAVE("err:%d handle:%u", err, fw_handle);
- return err;
-}
-
-#ifdef ISP2401
-static int
-aspect_ratio_crop_init(struct ia_css_stream *curr_stream,
- struct ia_css_pipe *pipes[],
- bool *do_crop_status)
-{
- int err = 0;
- int i;
- struct ia_css_pipe *curr_pipe;
- u32 pipe_mask = 0;
-
- if ((!curr_stream) ||
- (curr_stream->num_pipes == 0) ||
- (!pipes) ||
- (!do_crop_status)) {
- err = -EINVAL;
- IA_CSS_LEAVE_ERR(err);
- return err;
- }
-
- for (i = 0; i < curr_stream->num_pipes; i++) {
- curr_pipe = pipes[i];
- pipe_mask |= (1 << curr_pipe->config.mode);
- }
-
- *do_crop_status =
- (((pipe_mask & (1 << IA_CSS_PIPE_MODE_PREVIEW)) ||
- (pipe_mask & (1 << IA_CSS_PIPE_MODE_VIDEO))) &&
- (pipe_mask & (1 << IA_CSS_PIPE_MODE_CAPTURE)) &&
- curr_stream->config.continuous);
- return 0;
-}
-
-static bool
-aspect_ratio_crop_check(bool enabled, struct ia_css_pipe *curr_pipe)
-{
- bool status = false;
-
- if ((curr_pipe) && enabled) {
- if ((curr_pipe->config.mode == IA_CSS_PIPE_MODE_PREVIEW) ||
- (curr_pipe->config.mode == IA_CSS_PIPE_MODE_VIDEO) ||
- (curr_pipe->config.mode == IA_CSS_PIPE_MODE_CAPTURE))
- status = true;
- }
-
- return status;
-}
-
-static int
-aspect_ratio_crop(struct ia_css_pipe *curr_pipe,
- struct ia_css_resolution *effective_res)
-{
- int err = 0;
- struct ia_css_resolution crop_res;
- struct ia_css_resolution *in_res = NULL;
- struct ia_css_resolution *out_res = NULL;
- bool use_bds_output_info = false;
- bool use_vf_pp_in_res = false;
- bool use_capt_pp_in_res = false;
-
- if ((!curr_pipe) ||
- (!effective_res)) {
- err = -EINVAL;
- IA_CSS_LEAVE_ERR(err);
- return err;
- }
-
- if ((curr_pipe->config.mode != IA_CSS_PIPE_MODE_PREVIEW) &&
- (curr_pipe->config.mode != IA_CSS_PIPE_MODE_VIDEO) &&
- (curr_pipe->config.mode != IA_CSS_PIPE_MODE_CAPTURE)) {
- err = -EINVAL;
- IA_CSS_LEAVE_ERR(err);
- return err;
- }
-
- use_bds_output_info =
- ((curr_pipe->bds_output_info.res.width != 0) &&
- (curr_pipe->bds_output_info.res.height != 0));
-
- use_vf_pp_in_res =
- ((curr_pipe->config.vf_pp_in_res.width != 0) &&
- (curr_pipe->config.vf_pp_in_res.height != 0));
-
- use_capt_pp_in_res =
- ((curr_pipe->config.capt_pp_in_res.width != 0) &&
- (curr_pipe->config.capt_pp_in_res.height != 0));
-
- in_res = &curr_pipe->stream->config.input_config.effective_res;
- out_res = &curr_pipe->output_info[0].res;
-
- switch (curr_pipe->config.mode) {
- case IA_CSS_PIPE_MODE_PREVIEW:
- if (use_bds_output_info)
- out_res = &curr_pipe->bds_output_info.res;
- else if (use_vf_pp_in_res)
- out_res = &curr_pipe->config.vf_pp_in_res;
- break;
- case IA_CSS_PIPE_MODE_VIDEO:
- if (use_bds_output_info)
- out_res = &curr_pipe->bds_output_info.res;
- break;
- case IA_CSS_PIPE_MODE_CAPTURE:
- if (use_capt_pp_in_res)
- out_res = &curr_pipe->config.capt_pp_in_res;
- break;
- case IA_CSS_PIPE_MODE_ACC:
- case IA_CSS_PIPE_MODE_COPY:
- case IA_CSS_PIPE_MODE_YUVPP:
- default:
- IA_CSS_ERROR("aspect ratio cropping invalid args: mode[%d]\n",
- curr_pipe->config.mode);
- assert(0);
- break;
- }
-
- err = ia_css_frame_find_crop_resolution(in_res, out_res, &crop_res);
- if (!err)
- *effective_res = crop_res;
- else
- /* in case of error fallback to default
- * effective resolution from driver. */
- IA_CSS_LOG("ia_css_frame_find_crop_resolution() failed with err(%d)", err);
-
- return err;
-}
-#endif
-
static void
sh_css_hmm_buffer_record_init(void)
{
diff --git a/drivers/staging/media/atomisp/pci/sh_css_defs.h b/drivers/staging/media/atomisp/pci/sh_css_defs.h
index 30a84a587b2a..7eb10b226f0a 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_defs.h
+++ b/drivers/staging/media/atomisp/pci/sh_css_defs.h
@@ -117,13 +117,8 @@ RGB[0,8191],coef[-8192,8191] -> RGB[0,8191]
#define SH_CSS_NUM_INPUT_BUF_LINES 4
/* Left cropping only applicable for sufficiently large nway */
-#if ISP_VEC_NELEMS == 16
-#define SH_CSS_MAX_LEFT_CROPPING 0
-#define SH_CSS_MAX_TOP_CROPPING 0
-#else
#define SH_CSS_MAX_LEFT_CROPPING 12
#define SH_CSS_MAX_TOP_CROPPING 12
-#endif
#define SH_CSS_SP_MAX_WIDTH 1280
@@ -137,13 +132,8 @@ RGB[0,8191],coef[-8192,8191] -> RGB[0,8191]
#define SH_CSS_MIN_DVS_ENVELOPE 12U
/* The FPGA system (vec_nelems == 16) only supports upto 5MP */
-#if ISP_VEC_NELEMS == 16
-#define SH_CSS_MAX_SENSOR_WIDTH 2560
-#define SH_CSS_MAX_SENSOR_HEIGHT 1920
-#else
#define SH_CSS_MAX_SENSOR_WIDTH 4608
#define SH_CSS_MAX_SENSOR_HEIGHT 3450
-#endif
/* Limited to reduce vmem pressure */
#if ISP_VMEM_DEPTH >= 3072
@@ -178,50 +168,20 @@ RGB[0,8191],coef[-8192,8191] -> RGB[0,8191]
#define SH_CSS_MORPH_TABLE_ELEMS_PER_DDR_WORD \
(HIVE_ISP_DDR_WORD_BYTES / SH_CSS_MORPH_TABLE_ELEM_BYTES)
-#define ISP2400_SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR (SH_CSS_MAX_BQ_GRID_WIDTH + 1)
-#define ISP2400_SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR (SH_CSS_MAX_BQ_GRID_HEIGHT + 1)
-
-#define ISP2400_SH_CSS_MAX_SCTBL_ALIGNED_WIDTH_PER_COLOR \
- CEIL_MUL(ISP2400_SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR, ISP_VEC_NELEMS)
-
-/* TODO: I will move macros of "*_SCTBL_*" to SC kernel.
- "+ 2" should be "+ SH_CSS_SCTBL_CENTERING_MARGIN + SH_CSS_SCTBL_LAST_GRID_COUNT". (michie, Sep/23/2014) */
-#define ISP2401_SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR (SH_CSS_MAX_BQ_GRID_WIDTH + 2)
-#define ISP2401_SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR (SH_CSS_MAX_BQ_GRID_HEIGHT + 2)
+#define SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR (SH_CSS_MAX_BQ_GRID_WIDTH + 1)
+#define SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR (SH_CSS_MAX_BQ_GRID_HEIGHT + 1)
-#define ISP2401_SH_CSS_MAX_SCTBL_ALIGNED_WIDTH_PER_COLOR \
- CEIL_MUL(ISP2400_SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR, ISP_VEC_NELEMS)
+#define SH_CSS_MAX_SCTBL_ALIGNED_WIDTH_PER_COLOR \
+ CEIL_MUL(SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR, ISP_VEC_NELEMS)
/* Each line of this table is aligned to the maximum line width. */
#define SH_CSS_MAX_S3ATBL_WIDTH SH_CSS_MAX_BQ_GRID_WIDTH
/* Video mode specific DVS define */
/* The video binary supports a delay of 1 or 2 frames */
-#define VIDEO_FRAME_DELAY 2
+#define MAX_DVS_FRAME_DELAY 2
/* +1 because DVS reads the previous and writes the current frame concurrently */
-#define MAX_NUM_VIDEO_DELAY_FRAMES (VIDEO_FRAME_DELAY + 1)
-
-/* Preview mode specific DVS define. */
-/* In preview we only need GDC functionality (and not the DVS functionality) */
-/* The minimum number of DVS frames you need is 2, one were GDC reads from and another where GDC writes into */
-#define NUM_PREVIEW_DVS_FRAMES (2)
-
-/* TNR is no longer exclusive to video, SkyCam preview has TNR too (same kernel as video).
- * All uses the generic define NUM_TNR_FRAMES. The define NUM_VIDEO_TNR_FRAMES has been deprecated.
- *
- * Notes
- * 1) The value depends on the used TNR kernel and is not something that depends on the mode
- * and it is not something you just could choice.
- * 2) For the luma only pipeline a version that supports two different sets of TNR reference frames
- * is being used.
- *.
- */
-#define NUM_VALID_TNR_REF_FRAMES (1) /* At least one valid TNR reference frame is required */
-#define NUM_TNR_FRAMES_PER_REF_BUF_SET (2)
-/* In luma-only mode alternate illuminated frames are supported, that requires two double buffers */
-#define NUM_TNR_REF_BUF_SETS (1)
-
-#define NUM_TNR_FRAMES (NUM_TNR_FRAMES_PER_REF_BUF_SET * NUM_TNR_REF_BUF_SETS)
+#define MAX_NUM_VIDEO_DELAY_FRAMES (MAX_DVS_FRAME_DELAY + 1)
#define NUM_VIDEO_TNR_FRAMES 2
@@ -250,11 +210,11 @@ RGB[0,8191],coef[-8192,8191] -> RGB[0,8191]
CEIL_MUL(_ISP_MORPH_TABLE_WIDTH(width), \
SH_CSS_MORPH_TABLE_ELEMS_PER_DDR_WORD)
-#define _ISP2400_SCTBL_WIDTH_PER_COLOR(input_width, deci_factor_log2) \
+#define _ISP_SCTBL_WIDTH_PER_COLOR(input_width, deci_factor_log2) \
(ISP_BQ_GRID_WIDTH(input_width, deci_factor_log2) + 1)
-#define _ISP2400_SCTBL_HEIGHT(input_height, deci_factor_log2) \
+#define _ISP_SCTBL_HEIGHT(input_height, deci_factor_log2) \
(ISP_BQ_GRID_HEIGHT(input_height, deci_factor_log2) + 1)
-#define _ISP2400_SCTBL_ALIGNED_WIDTH_PER_COLOR(input_width, deci_factor_log2) \
+#define _ISP_SCTBL_ALIGNED_WIDTH_PER_COLOR(input_width, deci_factor_log2) \
CEIL_MUL(_ISP_SCTBL_WIDTH_PER_COLOR(input_width, deci_factor_log2), \
ISP_VEC_NELEMS)
diff --git a/drivers/staging/media/atomisp/pci/sh_css_firmware.c b/drivers/staging/media/atomisp/pci/sh_css_firmware.c
index e1a16a50e588..94149647b98b 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_firmware.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_firmware.c
@@ -56,8 +56,7 @@ static struct firmware_header *firmware_header;
* which will be replaced with the actual RELEASE_VERSION
* during package generation. Please do not modify
*/
-static const char *isp2400_release_version = STR(irci_stable_candrpv_0415_20150521_0458);
-static const char *isp2401_release_version = STR(irci_ecr - master_20150911_0724);
+static const char *release_version = STR(irci_stable_candrpv_0415_20150521_0458);
#define MAX_FW_REL_VER_NAME 300
static char FW_rel_ver_name[MAX_FW_REL_VER_NAME] = "---";
@@ -190,13 +189,6 @@ sh_css_check_firmware_version(struct device *dev, const char *fw_data)
{
struct sh_css_fw_bi_file_h *file_header;
- const char *release_version;
-
- if (!IS_ISP2401)
- release_version = isp2400_release_version;
- else
- release_version = isp2401_release_version;
-
firmware_header = (struct firmware_header *)fw_data;
file_header = &firmware_header->file_header;
@@ -232,12 +224,6 @@ sh_css_load_firmware(struct device *dev, const char *fw_data,
struct ia_css_fw_info *binaries;
struct sh_css_fw_bi_file_h *file_header;
int ret;
- const char *release_version;
-
- if (!IS_ISP2401)
- release_version = isp2400_release_version;
- else
- release_version = isp2401_release_version;
firmware_header = (struct firmware_header *)fw_data;
file_header = &firmware_header->file_header;
diff --git a/drivers/staging/media/atomisp/pci/sh_css_firmware.h b/drivers/staging/media/atomisp/pci/sh_css_firmware.h
index 66cd38f08f71..a73ce703adfb 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_firmware.h
+++ b/drivers/staging/media/atomisp/pci/sh_css_firmware.h
@@ -29,9 +29,6 @@ struct sh_css_fw_bi_file_h {
};
extern struct ia_css_fw_info sh_css_sp_fw;
-#if defined(HAS_BL)
-extern struct ia_css_fw_info sh_css_bl_fw;
-#endif /* HAS_BL */
extern struct ia_css_blob_descr *sh_css_blob_info;
extern unsigned int sh_css_num_binaries;
diff --git a/drivers/staging/media/atomisp/pci/sh_css_internal.h b/drivers/staging/media/atomisp/pci/sh_css_internal.h
index 496faa7297a5..435b3cedd1c3 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_internal.h
+++ b/drivers/staging/media/atomisp/pci/sh_css_internal.h
@@ -86,12 +86,6 @@
#define SH_CSS_MAX_IF_CONFIGS 3 /* Must match with IA_CSS_NR_OF_CONFIGS (not defined yet).*/
#define SH_CSS_IF_CONFIG_NOT_NEEDED 0xFF
-#define SH_CSS_ENABLE_METADATA
-
-#if defined(SH_CSS_ENABLE_METADATA) && !defined(ISP2401)
-#define SH_CSS_ENABLE_METADATA_THREAD
-#endif
-
/*
* SH_CSS_MAX_SP_THREADS:
* sp threads visible to host with connected communication queues
@@ -101,7 +95,7 @@
* these threads can't be used as image pipe
*/
-#if defined(SH_CSS_ENABLE_METADATA_THREAD)
+#if !defined(ISP2401)
#define SH_CSS_SP_INTERNAL_METADATA_THREAD 1
#else
#define SH_CSS_SP_INTERNAL_METADATA_THREAD 0
@@ -276,7 +270,7 @@ struct sh_css_binary_args {
struct ia_css_frame *in_frame; /* input frame */
const struct ia_css_frame
*delay_frames[MAX_NUM_VIDEO_DELAY_FRAMES]; /* reference input frame */
- const struct ia_css_frame *tnr_frames[NUM_TNR_FRAMES]; /* tnr frames */
+ const struct ia_css_frame *tnr_frames[NUM_VIDEO_TNR_FRAMES]; /* tnr frames */
struct ia_css_frame
*out_frame[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; /* output frame */
struct ia_css_frame *out_vf_frame; /* viewfinder output frame */
@@ -526,7 +520,6 @@ struct sh_css_sp_pipeline {
this struct; needs cleanup */
s32 num_execs; /* number of times to run if this is
an acceleration pipe. */
-#if defined(SH_CSS_ENABLE_METADATA)
struct {
u32 format; /* Metadata format in hrt format */
u32 width; /* Width of a line */
@@ -535,10 +528,7 @@ struct sh_css_sp_pipeline {
u32 size; /* Total size (in bytes) */
ia_css_ptr cont_buf; /* Address of continuous buffer */
} metadata;
-#endif
-#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
u32 output_frame_queue_id;
-#endif
union {
struct {
u32 bytes_available;
@@ -551,14 +541,6 @@ struct sh_css_sp_pipeline {
u32 raw_bit_depth;
} raw;
} copy;
-
-/* ISP2401 */
-
- /* Parameters passed to Shading Correction kernel. */
- struct {
- u32 internal_frame_origin_x_bqs_on_sctbl; /* Origin X (bqs) of internal frame on shading table */
- u32 internal_frame_origin_y_bqs_on_sctbl; /* Origin Y (bqs) of internal frame on shading table */
- } shading;
};
/*
@@ -580,9 +562,7 @@ struct ia_css_frames_sp {
struct ia_css_frame_sp_info internal_frame_info;
struct ia_css_buffer_sp s3a_buf;
struct ia_css_buffer_sp dvs_buf;
-#if defined SH_CSS_ENABLE_METADATA
struct ia_css_buffer_sp metadata_buf;
-#endif
};
/* Information for a single pipeline stage for an ISP */
@@ -695,8 +675,6 @@ struct sh_css_sp_output {
unsigned int sw_interrupt_value[SH_CSS_NUM_SDW_IRQS];
};
-#define CONFIG_ON_FRAME_ENQUEUE() 0
-
/**
* @brief Data structure for the circular buffer.
* The circular buffer is empty if "start == end". The
@@ -734,9 +712,6 @@ struct sh_css_hmm_buffer {
u32 exp_id;
u32 isp_parameters_id; /** Unique ID to track which config was
actually applied to a particular frame */
-#if CONFIG_ON_FRAME_ENQUEUE()
- struct sh_css_config_on_frame_enqueue config_on_frame_enqueue;
-#endif
} frame;
ia_css_ptr ddr_ptrs;
} payload;
@@ -752,16 +727,9 @@ struct sh_css_hmm_buffer {
clock_value_t isys_eof_clock_tick;
};
-#if CONFIG_ON_FRAME_ENQUEUE()
-#define SIZE_OF_FRAME_STRUCT \
- (SIZE_OF_HRT_VADDRESS + \
- (3 * sizeof(uint32_t)) + \
- sizeof(uint32_t))
-#else
#define SIZE_OF_FRAME_STRUCT \
(SIZE_OF_HRT_VADDRESS + \
(3 * sizeof(uint32_t)))
-#endif
#define SIZE_OF_PAYLOAD_UNION \
(MAX(MAX(MAX(MAX( \
diff --git a/drivers/staging/media/atomisp/pci/sh_css_metadata.c b/drivers/staging/media/atomisp/pci/sh_css_metadata.c
deleted file mode 100644
index 04a4b7da85e2..000000000000
--- a/drivers/staging/media/atomisp/pci/sh_css_metadata.c
+++ /dev/null
@@ -1,17 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-/* This file will contain the code to implement the functions declared in ia_css_metadata.h
- and associated helper functions */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_metrics.c b/drivers/staging/media/atomisp/pci/sh_css_metrics.c
index 9744bbebe1bc..8ded6cdd1575 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_metrics.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_metrics.c
@@ -123,40 +123,15 @@ sh_css_metrics_sample_pcs(void)
unsigned int pc;
unsigned int msink;
-#if SUSPEND
- unsigned int sc = 0;
- unsigned int stopped_sc = 0;
- unsigned int resume_sc = 0;
-#endif
-#if MULTIPLE_PCS
- int i;
- unsigned int pc_tab[NOF_PCS];
-
- for (i = 0; i < NOF_PCS; i++)
- pc_tab[i] = 0;
-#endif
if (!pc_histogram_enabled)
return;
if (isp_histogram) {
-#if SUSPEND
- /* STOP the ISP */
- isp_ctrl_store(ISP0_ID, ISP_SC_REG, STOP_MASK);
-#endif
msink = isp_ctrl_load(ISP0_ID, ISP_CTRL_SINK_REG);
-#if MULTIPLE_PCS
- for (i = 0; i < NOF_PCS; i++)
- pc_tab[i] = isp_ctrl_load(ISP0_ID, ISP_PC_REG);
-#else
pc = isp_ctrl_load(ISP0_ID, ISP_PC_REG);
-#endif
-#if SUSPEND
- /* RESUME the ISP */
- isp_ctrl_store(ISP0_ID, ISP_SC_REG, RESUME_MASK);
-#endif
isp_histogram->msink[pc] &= msink;
stall = (msink != 0x7FF);
diff --git a/drivers/staging/media/atomisp/pci/sh_css_mipi.c b/drivers/staging/media/atomisp/pci/sh_css_mipi.c
index 75489f7d75ee..0acf75497ae7 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_mipi.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_mipi.c
@@ -33,47 +33,6 @@
static u32
ref_count_mipi_allocation[N_CSI_PORTS]; /* Initialized in mipi_init */
-/*
- * Check if a source port or TPG/PRBS ID is valid
- */
-static bool ia_css_mipi_is_source_port_valid(struct ia_css_pipe *pipe,
- unsigned int *pport)
-{
- bool ret = true;
- unsigned int port = 0;
- unsigned int max_ports = 0;
-
- switch (pipe->stream->config.mode) {
- case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
- port = (unsigned int)pipe->stream->config.source.port.port;
- max_ports = N_CSI_PORTS;
- break;
- case IA_CSS_INPUT_MODE_TPG:
- port = (unsigned int)pipe->stream->config.source.tpg.id;
- max_ports = N_CSS_TPG_IDS;
- break;
- case IA_CSS_INPUT_MODE_PRBS:
- port = (unsigned int)pipe->stream->config.source.prbs.id;
- max_ports = N_CSS_PRBS_IDS;
- break;
- default:
- assert(false);
- ret = false;
- break;
- }
-
- if (ret) {
- assert(port < max_ports);
-
- if (port >= max_ports)
- ret = false;
- }
-
- *pport = port;
-
- return ret;
-}
-
/* Assumptions:
* - A line is multiple of 4 bytes = 1 word.
* - Each frame has SOF and EOF (each 1 word).
@@ -133,15 +92,11 @@ ia_css_mipi_frame_calculate_size(const unsigned int width,
break;
case ATOMISP_INPUT_FORMAT_YUV420_10: /* odd 4p, 5B, 40bits, even 4p, 10B, 80bits */
case ATOMISP_INPUT_FORMAT_RAW_10: /* 4p, 5B, 40bits */
-#if !defined(HAS_NO_PACKED_RAW_PIXELS)
/* The changes will be reverted as soon as RAW
* Buffers are deployed by the 2401 Input System
* in the non-continuous use scenario.
*/
bits_per_pixel = 10;
-#else
- bits_per_pixel = 16;
-#endif
break;
case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY: /* 2p, 3B, 24bits */
case ATOMISP_INPUT_FORMAT_RAW_12: /* 2p, 3B, 24bits */
@@ -231,6 +186,10 @@ ia_css_mipi_frame_calculate_size(const unsigned int width,
return err;
}
+/*
+ * Check if a source port or TPG/PRBS ID is valid
+ */
+
#if !defined(ISP2401)
int
ia_css_mipi_frame_enable_check_on_size(const enum mipi_port_id port,
@@ -265,16 +224,31 @@ mipi_init(void)
ref_count_mipi_allocation[i] = 0;
}
-int
-calculate_mipi_buff_size(
- struct ia_css_stream_config *stream_cfg,
- unsigned int *size_mem_words)
+bool mipi_is_free(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < N_CSI_PORTS; i++)
+ if (ref_count_mipi_allocation[i])
+ return false;
+
+ return true;
+}
+
+#if defined(ISP2401)
+/*
+ * @brief Calculate the required MIPI buffer sizes.
+ * Based on the stream configuration, calculate the
+ * required MIPI buffer sizes (in DDR words).
+ *
+ * @param[in] stream_cfg Point to the target stream configuration
+ * @param[out] size_mem_words MIPI buffer size in DDR words.
+ *
+ * @return
+ */
+static int calculate_mipi_buff_size(struct ia_css_stream_config *stream_cfg,
+ unsigned int *size_mem_words)
{
-#if !defined(ISP2401)
- int err = -EINVAL;
- (void)stream_cfg;
- (void)size_mem_words;
-#else
unsigned int width;
unsigned int height;
enum atomisp_input_format format;
@@ -366,26 +340,9 @@ calculate_mipi_buff_size(
*size_mem_words = mem_words_per_buff;
IA_CSS_LEAVE_ERR(err);
-#endif
return err;
}
-
-static bool buffers_needed(struct ia_css_pipe *pipe)
-{
- if (!IS_ISP2401) {
- if (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
- return false;
- else
- return true;
- }
-
- if (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR ||
- pipe->stream->config.mode == IA_CSS_INPUT_MODE_TPG ||
- pipe->stream->config.mode == IA_CSS_INPUT_MODE_PRBS)
- return false;
-
- return true;
-}
+#endif
int
allocate_mipi_frames(struct ia_css_pipe *pipe,
@@ -415,43 +372,25 @@ allocate_mipi_frames(struct ia_css_pipe *pipe,
}
#endif
-
- if (!buffers_needed(pipe)) {
+ if (pipe->stream->config.mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"allocate_mipi_frames(%p) exit: no buffers needed for pipe mode.\n",
pipe);
return 0; /* AM TODO: Check */
}
- if (!IS_ISP2401)
- port = (unsigned int)pipe->stream->config.source.port.port;
- else
- err = ia_css_mipi_is_source_port_valid(pipe, &port);
-
- assert(port < N_CSI_PORTS);
-
- if (port >= N_CSI_PORTS || err) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "allocate_mipi_frames(%p) exit: error: port is not correct (port=%d).\n",
- pipe, port);
+ port = (unsigned int)pipe->stream->config.source.port.port;
+ if (port >= N_CSI_PORTS) {
+ IA_CSS_ERROR("allocate_mipi_frames(%p) exit: port is not correct (port=%d).",
+ pipe, port);
return -EINVAL;
}
#ifdef ISP2401
- err = calculate_mipi_buff_size(
- &pipe->stream->config,
- &my_css.mipi_frame_size[port]);
-#endif
-
-#if !defined(ISP2401)
- if (ref_count_mipi_allocation[port] != 0) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "allocate_mipi_frames(%p) exit: already allocated for this port (port=%d).\n",
- pipe, port);
- return 0;
- }
-#else
- /* 2401 system allows multiple streams to use same physical port. This is not
+ err = calculate_mipi_buff_size(&pipe->stream->config,
+ &my_css.mipi_frame_size[port]);
+ /*
+ * 2401 system allows multiple streams to use same physical port. This is not
* true for 2400 system. Currently 2401 uses MIPI buffers as a temporary solution.
* TODO AM: Once that is changed (removed) this code should be removed as well.
* In that case only 2400 related code should remain.
@@ -463,6 +402,13 @@ allocate_mipi_frames(struct ia_css_pipe *pipe,
pipe, port);
return 0;
}
+#else
+ if (ref_count_mipi_allocation[port] != 0) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) exit: already allocated for this port (port=%d).\n",
+ pipe, port);
+ return 0;
+ }
#endif
ref_count_mipi_allocation[port]++;
@@ -494,9 +440,8 @@ allocate_mipi_frames(struct ia_css_pipe *pipe,
my_css.mipi_frames[port][j] = NULL;
}
}
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "allocate_mipi_frames(%p, %d) exit: error: allocation failed.\n",
- pipe, port);
+ IA_CSS_ERROR("allocate_mipi_frames(%p, %d) exit: allocation failed.",
+ pipe, port);
return err;
}
}
@@ -539,30 +484,22 @@ free_mipi_frames(struct ia_css_pipe *pipe)
if (pipe) {
assert(pipe->stream);
if ((!pipe) || (!pipe->stream)) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "free_mipi_frames(%p) exit: error: pipe or stream is null.\n",
- pipe);
+ IA_CSS_ERROR("free_mipi_frames(%p) exit: pipe or stream is null.",
+ pipe);
return -EINVAL;
}
- if (!buffers_needed(pipe)) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "free_mipi_frames(%p) exit: error: wrong mode.\n",
- pipe);
+ if (pipe->stream->config.mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+ IA_CSS_ERROR("free_mipi_frames(%p) exit: wrong mode.",
+ pipe);
return err;
}
- if (!IS_ISP2401)
- port = (unsigned int)pipe->stream->config.source.port.port;
- else
- err = ia_css_mipi_is_source_port_valid(pipe, &port);
-
- assert(port < N_CSI_PORTS);
+ port = (unsigned int)pipe->stream->config.source.port.port;
- if (port >= N_CSI_PORTS || err) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "free_mipi_frames(%p, %d) exit: error: pipe port is not correct.\n",
- pipe, port);
+ if (port >= N_CSI_PORTS) {
+ IA_CSS_ERROR("free_mipi_frames(%p, %d) exit: pipe port is not correct.",
+ pipe, port);
return err;
}
@@ -570,9 +507,8 @@ free_mipi_frames(struct ia_css_pipe *pipe)
#if !defined(ISP2401)
assert(ref_count_mipi_allocation[port] == 1);
if (ref_count_mipi_allocation[port] != 1) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "free_mipi_frames(%p) exit: error: wrong ref_count (ref_count=%d).\n",
- pipe, ref_count_mipi_allocation[port]);
+ IA_CSS_ERROR("free_mipi_frames(%p) exit: wrong ref_count (ref_count=%d).",
+ pipe, ref_count_mipi_allocation[port]);
return err;
}
#endif
@@ -640,11 +576,7 @@ send_mipi_frames(struct ia_css_pipe *pipe)
{
int err = -EINVAL;
unsigned int i;
-#ifndef ISP2401
unsigned int port;
-#else
- unsigned int port = 0;
-#endif
IA_CSS_ENTER_PRIVATE("pipe=%p", pipe);
@@ -657,21 +589,16 @@ send_mipi_frames(struct ia_css_pipe *pipe)
/* multi stream video needs mipi buffers */
/* nothing to be done in other cases. */
- if (!buffers_needed(pipe)) {
+ if (pipe->stream->config.mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
IA_CSS_LOG("nothing to be done for this mode");
return 0;
/* TODO: AM: maybe this should be returning an error. */
}
- if (!IS_ISP2401)
- port = (unsigned int)pipe->stream->config.source.port.port;
- else
- err = ia_css_mipi_is_source_port_valid(pipe, &port);
-
- assert(port < N_CSI_PORTS);
+ port = (unsigned int)pipe->stream->config.source.port.port;
- if (port >= N_CSI_PORTS || err) {
- IA_CSS_ERROR("send_mipi_frames(%p) exit: invalid port specified (port=%d).\n",
+ if (port >= N_CSI_PORTS) {
+ IA_CSS_ERROR("send_mipi_frames(%p) exit: invalid port specified (port=%d).",
pipe, port);
return err;
}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_mipi.h b/drivers/staging/media/atomisp/pci/sh_css_mipi.h
index 52f08a103883..e6c86d0ac483 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_mipi.h
+++ b/drivers/staging/media/atomisp/pci/sh_css_mipi.h
@@ -23,6 +23,8 @@
void
mipi_init(void);
+bool mipi_is_free(void);
+
int
allocate_mipi_frames(struct ia_css_pipe *pipe, struct ia_css_stream_info *info);
@@ -32,19 +34,4 @@ free_mipi_frames(struct ia_css_pipe *pipe);
int
send_mipi_frames(struct ia_css_pipe *pipe);
-/**
- * @brief Calculate the required MIPI buffer sizes.
- * Based on the stream configuration, calculate the
- * required MIPI buffer sizes (in DDR words).
- *
- * @param[in] stream_cfg Point to the target stream configuration
- * @param[out] size_mem_words MIPI buffer size in DDR words.
- *
- * @return
- */
-int
-calculate_mipi_buff_size(
- struct ia_css_stream_config *stream_cfg,
- unsigned int *size_mem_words);
-
#endif /* __SH_CSS_MIPI_H */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_morph.c b/drivers/staging/media/atomisp/pci/sh_css_morph.c
deleted file mode 100644
index edd1da941ccb..000000000000
--- a/drivers/staging/media/atomisp/pci/sh_css_morph.c
+++ /dev/null
@@ -1,17 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-/* This file will contain the code to implement the functions declared in ia_css_morph.h
- and associated helper functions */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_param_shading.c b/drivers/staging/media/atomisp/pci/sh_css_param_shading.c
index 69cc4e423d8b..41a4c9162319 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_param_shading.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_param_shading.c
@@ -118,8 +118,10 @@ crop_and_interpolate(unsigned int cropped_width,
int ty, src_y0, src_y1;
unsigned int sy0, sy1, dy0, dy1, divy;
- /* calculate target point and make sure it falls within
- the table */
+ /*
+ * calculate target point and make sure it falls within
+ * the table
+ */
ty = out_start_row + i * out_cell_size;
/* calculate closest source points in shading table and
@@ -166,19 +168,24 @@ crop_and_interpolate(unsigned int cropped_width,
src_x1 = clamp(src_x1, 0, (int)table_width - 1);
tx = min(clamp(tx, 0, (int)sensor_width - 1),
(int)table_cell_w);
- /* calculate closest source points for distance
- computation */
+ /*
+ * calculate closest source points for distance
+ * computation
+ */
sx0 = min(src_x0 * in_cell_size, sensor_width - 1);
sx1 = min(src_x1 * in_cell_size, sensor_width - 1);
- /* calculate distances between source and target
- pixels */
+ /*
+ * calculate distances between source and target
+ * pixels
+ */
dx0 = tx - sx0;
dx1 = sx1 - tx;
divx = sx1 - sx0;
/* if we're at the edge, we just use the closest
- point still in the grid. We make up for the divider
- in this case by setting the distance to
- out_cell_size, since it's actually 0. */
+ * point still in the grid. We make up for the divider
+ * in this case by setting the distance to
+ * out_cell_size, since it's actually 0.
+ */
if (divx == 0) {
dx0 = 1;
divx = 1;
@@ -242,8 +249,8 @@ prepare_shading_table(const struct ia_css_shading_table *in_table,
if (!in_table) {
sh_css_params_shading_id_table_generate(target_table,
- binary->sctbl_legacy_width_per_color,
- binary->sctbl_legacy_height);
+ binary->sctbl_width_per_color,
+ binary->sctbl_height);
return;
}
@@ -271,43 +278,33 @@ prepare_shading_table(const struct ia_css_shading_table *in_table,
bds_denominator -
binary->info->sp.pipeline.top_cropping;
-#if !defined(USE_WINDOWS_BINNING_FACTOR)
- /* @deprecated{This part of the code will be replaced by the code
- * in the #else section below to make the calculation same across
- * all platforms.
- * Android and Windows platforms interpret the binning_factor parameter
- * differently. In Android, the binning factor is expressed in the form
- * 2^N * 2^N, whereas in Windows platform, the binning factor is N*N}
+ /*
+ * We take into account the binning done by the sensor. We do this
+ * by cropping the non-binned part of the shading table and then
+ * increasing the size of a grid cell with this same binning factor.
*/
-
- /* We take into account the binning done by the sensor. We do this
- by cropping the non-binned part of the shading table and then
- increasing the size of a grid cell with this same binning factor. */
input_width <<= sensor_binning;
input_height <<= sensor_binning;
- /* We also scale the padding by the same binning factor. This will
- make it much easier later on to calculate the padding of the
- shading table. */
+ /*
+ * We also scale the padding by the same binning factor. This will
+ * make it much easier later on to calculate the padding of the
+ * shading table.
+ */
left_padding <<= sensor_binning;
right_padding <<= sensor_binning;
top_padding <<= sensor_binning;
-#else
- input_width *= sensor_binning;
- input_height *= sensor_binning;
- left_padding *= sensor_binning;
- right_padding *= sensor_binning;
- top_padding *= sensor_binning;
-#endif /*USE_WINDOWS_BINNING_FACTOR*/
-
- /* during simulation, the used resolution can exceed the sensor
- resolution, so we clip it. */
+
+ /*
+ * during simulation, the used resolution can exceed the sensor
+ * resolution, so we clip it.
+ */
input_width = min(input_width, in_table->sensor_width);
input_height = min(input_height, in_table->sensor_height);
/* This prepare_shading_table() function is called only in legacy API (not in new API).
Then, the legacy shading table width and height should be used. */
- table_width = binary->sctbl_legacy_width_per_color;
- table_height = binary->sctbl_legacy_height;
+ table_width = binary->sctbl_width_per_color;
+ table_height = binary->sctbl_height;
result = ia_css_shading_table_alloc(table_width, table_height);
if (!result) {
@@ -318,8 +315,10 @@ prepare_shading_table(const struct ia_css_shading_table *in_table,
result->sensor_height = in_table->sensor_height;
result->fraction_bits = in_table->fraction_bits;
- /* now we crop the original shading table and then interpolate to the
- requested resolution and decimation factor. */
+ /*
+ * now we crop the original shading table and then interpolate to the
+ * requested resolution and decimation factor.
+ */
for (i = 0; i < IA_CSS_SC_NUM_COLORS; i++) {
crop_and_interpolate(input_width, input_height,
left_padding, right_padding, top_padding,
@@ -376,9 +375,10 @@ ia_css_shading_table_free(struct ia_css_shading_table *table)
if (!table)
return;
- /* We only output logging when the table is not NULL, otherwise
+ /*
+ * We only output logging when the table is not NULL, otherwise
* logs will give the impression that a table was freed.
- * */
+ */
IA_CSS_ENTER("");
for (i = 0; i < IA_CSS_SC_NUM_COLORS; i++) {
diff --git a/drivers/staging/media/atomisp/pci/sh_css_params.c b/drivers/staging/media/atomisp/pci/sh_css_params.c
index dbd3bfe3d343..09f87c285b8d 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_params.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_params.c
@@ -16,12 +16,10 @@
#include "gdc_device.h" /* gdc_lut_store(), ... */
#include "isp.h" /* ISP_VEC_ELEMBITS */
#include "vamem.h"
-#if !defined(HAS_NO_HMEM)
#ifndef __INLINE_HMEM__
#define __INLINE_HMEM__
#endif
#include "hmem.h"
-#endif /* !defined(HAS_NO_HMEM) */
#define IA_CSS_INCLUDE_PARAMETERS
#define IA_CSS_INCLUDE_ACC_PARAMETERS
@@ -96,9 +94,6 @@
#include "xnr/xnr_3.0/ia_css_xnr3.host.h"
-#if defined(HAS_OUTPUT_SYSTEM)
-#include <components/output_system/sc_output_system_1.0/host/output_system.host.h>
-#endif
#include "sh_css_frac.h"
#include "ia_css_bufq.h"
@@ -107,15 +102,10 @@
(sizeof(char) * (binary)->in_frame_info.res.height * \
(binary)->in_frame_info.padded_width)
-#define ISP2400_SCTBL_BYTES(binary) \
+#define SCTBL_BYTES(binary) \
(sizeof(unsigned short) * (binary)->sctbl_height * \
(binary)->sctbl_aligned_width_per_color * IA_CSS_SC_NUM_COLORS)
-#define ISP2401_SCTBL_BYTES(binary) \
- (sizeof(unsigned short) * max((binary)->sctbl_height, (binary)->sctbl_legacy_height) * \
- /* height should be the larger height between new api and legacy api */ \
- (binary)->sctbl_aligned_width_per_color * IA_CSS_SC_NUM_COLORS)
-
#define MORPH_PLANE_BYTES(binary) \
(SH_CSS_MORPH_TABLE_ELEM_BYTES * (binary)->morph_tbl_aligned_width * \
(binary)->morph_tbl_height)
@@ -734,13 +724,11 @@ sh_css_set_global_isp_config_on_pipe(
const struct ia_css_isp_config *config,
struct ia_css_pipe *pipe);
-#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
static int
sh_css_set_per_frame_isp_config_on_pipe(
struct ia_css_stream *stream,
const struct ia_css_isp_config *config,
struct ia_css_pipe *pipe);
-#endif
static int
sh_css_update_uds_and_crop_info_based_on_zoom_region(
@@ -1031,16 +1019,6 @@ sh_css_params_set_binning_factor(struct ia_css_stream *stream,
}
static void
-sh_css_update_shading_table_status(struct ia_css_pipe *pipe,
- struct ia_css_isp_parameters *params)
-{
- if (params && pipe && (pipe->pipe_num != params->sc_table_last_pipe_num)) {
- params->sc_table_dirty = true;
- params->sc_table_last_pipe_num = pipe->pipe_num;
- }
-}
-
-static void
sh_css_set_shading_table(struct ia_css_stream *stream,
struct ia_css_isp_parameters *params,
const struct ia_css_shading_table *table)
@@ -1053,10 +1031,9 @@ sh_css_set_shading_table(struct ia_css_stream *stream,
if (!table->enable)
table = NULL;
- if ((table != params->sc_table) || params->sc_table_dirty) {
+ if (table != params->sc_table) {
params->sc_table = table;
params->sc_table_changed = true;
- params->sc_table_dirty = false;
/* Not very clean, this goes to sh_css.c to invalidate the
* shading table for all pipes. Should replaced by a loop
* and a pipe-specific call.
@@ -1510,10 +1487,8 @@ ia_css_translate_3a_statistics(
ia_css_s3a_vmem_decode(host_stats, isp_stats->vmem_stats_hi,
isp_stats->vmem_stats_lo);
}
-#if !defined(HAS_NO_HMEM)
IA_CSS_LOG("3A: HMEM");
ia_css_s3a_hmem_decode(host_stats, isp_stats->hmem_stats);
-#endif
IA_CSS_LEAVE("void");
}
@@ -1616,57 +1591,6 @@ ia_css_set_param_exceptions(const struct ia_css_pipe *pipe,
params->dp_config.r = params->wb_config.r;
params->dp_config.b = params->wb_config.b;
params->dp_config.gb = params->wb_config.gb;
-
- if (IS_ISP2401) {
- assert(pipe);
- assert(pipe->mode < IA_CSS_PIPE_ID_NUM);
-
- if (pipe->mode < IA_CSS_PIPE_ID_NUM) {
- params->pipe_dp_config[pipe->mode].gr = params->wb_config.gr;
- params->pipe_dp_config[pipe->mode].r = params->wb_config.r;
- params->pipe_dp_config[pipe->mode].b = params->wb_config.b;
- params->pipe_dp_config[pipe->mode].gb = params->wb_config.gb;
- }
- }
-}
-
-/* ISP2401 */
-static void
-sh_css_set_dp_config(const struct ia_css_pipe *pipe,
- struct ia_css_isp_parameters *params,
- const struct ia_css_dp_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- assert(pipe);
- assert(pipe->mode < IA_CSS_PIPE_ID_NUM);
-
- IA_CSS_ENTER_PRIVATE("config=%p", config);
- ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE_PRIVATE);
- if (pipe->mode < IA_CSS_PIPE_ID_NUM) {
- params->pipe_dp_config[pipe->mode] = *config;
- params->pipe_dpc_config_changed[pipe->mode] = true;
- }
- IA_CSS_LEAVE_PRIVATE("void");
-}
-
-static void
-sh_css_get_dp_config(const struct ia_css_pipe *pipe,
- const struct ia_css_isp_parameters *params,
- struct ia_css_dp_config *config)
-{
- if (!config)
- return;
-
- assert(params);
- assert(pipe);
- IA_CSS_ENTER_PRIVATE("config=%p", config);
-
- *config = params->pipe_dp_config[pipe->mode];
-
- IA_CSS_LEAVE_PRIVATE("void");
}
static void
@@ -1740,9 +1664,7 @@ sh_css_set_pipe_dvs_6axis_config(const struct ia_css_pipe *pipe,
copy_dvs_6axis_table(params->pipe_dvs_6axis_config[pipe->mode], dvs_config);
-#if !defined(HAS_NO_DVS_6AXIS_CONFIG_UPDATE)
params->pipe_dvs_6axis_config_changed[pipe->mode] = true;
-#endif
IA_CSS_LEAVE_PRIVATE("void");
}
@@ -1908,11 +1830,9 @@ ia_css_stream_set_isp_config_on_pipe(
IA_CSS_ENTER("stream=%p, config=%p, pipe=%p", stream, config, pipe);
-#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
if (config->output_frame)
err = sh_css_set_per_frame_isp_config_on_pipe(stream, config, pipe);
else
-#endif
err = sh_css_set_global_isp_config_on_pipe(stream->pipes[0], config, pipe);
IA_CSS_LEAVE_ERR(err);
@@ -1933,11 +1853,9 @@ ia_css_pipe_set_isp_config(struct ia_css_pipe *pipe,
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "config=%p\n", config);
-#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
if (config->output_frame)
err = sh_css_set_per_frame_isp_config_on_pipe(pipe->stream, config, pipe);
else
-#endif
err = sh_css_set_global_isp_config_on_pipe(pipe, config, pipe_in);
IA_CSS_LEAVE_ERR(err);
return err;
@@ -1972,7 +1890,6 @@ sh_css_set_global_isp_config_on_pipe(
return err;
}
-#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
static int
sh_css_set_per_frame_isp_config_on_pipe(
struct ia_css_stream *stream,
@@ -2042,7 +1959,6 @@ exit:
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
-#endif
static int
sh_css_init_isp_params_from_config(struct ia_css_pipe *pipe,
@@ -2067,7 +1983,6 @@ sh_css_init_isp_params_from_config(struct ia_css_pipe *pipe,
sh_css_set_pipe_dvs_6axis_config(pipe, params, config->dvs_6axis_config);
sh_css_set_dz_config(params, config->dz_config);
sh_css_set_motion_vector(params, config->motion_vector);
- sh_css_update_shading_table_status(pipe_in, params);
sh_css_set_shading_table(pipe->stream, params, config->shading_table);
sh_css_set_morph_table(params, config->morph_table);
sh_css_set_macc_table(params, config->macc_table);
@@ -2083,16 +1998,6 @@ sh_css_init_isp_params_from_config(struct ia_css_pipe *pipe,
params->output_frame = config->output_frame;
params->isp_parameters_id = config->isp_config_id;
- /* Currently we do not offer CSS interface to set different
- * configurations for DPC, i.e. depending on DPC being enabled
- * before (NORM+OBC) or after. The folllowing code to set the
- * DPC configuration should be updated when this interface is made
- * available */
- if (IS_ISP2401) {
- sh_css_set_dp_config(pipe, params, config->dp_config);
- ia_css_set_param_exceptions(pipe, params);
- }
-
if (0 ==
sh_css_select_dp_10bpp_config(pipe, &is_dp_10bpp)) {
/* return an error when both DPC and BDS is enabled by the
@@ -2107,8 +2012,7 @@ sh_css_init_isp_params_from_config(struct ia_css_pipe *pipe,
goto exit;
}
- if (!IS_ISP2401)
- ia_css_set_param_exceptions(pipe, params);
+ ia_css_set_param_exceptions(pipe, params);
exit:
IA_CSS_LEAVE_ERR_PRIVATE(err);
@@ -2143,7 +2047,6 @@ ia_css_pipe_get_isp_config(struct ia_css_pipe *pipe,
sh_css_get_ee_config(params, config->ee_config);
sh_css_get_baa_config(params, config->baa_config);
sh_css_get_pipe_dvs_6axis_config(pipe, params, config->dvs_6axis_config);
- sh_css_get_dp_config(pipe, params, config->dp_config);
sh_css_get_macc_table(params, config->macc_table);
sh_css_get_gamma_table(params, config->gamma_table);
sh_css_get_ctc_table(params, config->ctc_table);
@@ -2250,9 +2153,7 @@ ia_css_isp_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid)
me->vmem_size = ISP_S3ATBL_HI_LO_STRIDE_BYTES *
grid->aligned_height;
}
-#if !defined(HAS_NO_HMEM)
me->hmem_size = sizeof_hmem(HMEM0_ID);
-#endif
/* All subsections need to be aligned to the system bus width */
me->dmem_size = CEIL_MUL(me->dmem_size, HIVE_ISP_DDR_WORD_BYTES);
@@ -2431,7 +2332,7 @@ sh_css_create_isp_params(struct ia_css_stream *stream,
unsigned int i;
struct sh_css_ddr_address_map *ddr_ptrs;
struct sh_css_ddr_address_map_size *ddr_ptrs_size;
- int err = 0;
+ int err;
size_t params_size;
struct ia_css_isp_parameters *params =
kvmalloc(sizeof(struct ia_css_isp_parameters), GFP_KERNEL);
@@ -2473,7 +2374,11 @@ sh_css_create_isp_params(struct ia_css_stream *stream,
succ &= (ddr_ptrs->macc_tbl != mmgr_NULL);
*isp_params_out = params;
- return err;
+
+ if (!succ)
+ return -ENOMEM;
+
+ return 0;
}
static bool
@@ -2521,29 +2426,7 @@ sh_css_init_isp_params_from_global(struct ia_css_stream *stream,
ia_css_set_ob_config(params, &default_ob_config);
ia_css_set_dp_config(params, &default_dp_config);
- if (!IS_ISP2401) {
- ia_css_set_param_exceptions(pipe_in, params);
- } else {
- for (i = 0; i < stream->num_pipes; i++) {
- if (sh_css_select_dp_10bpp_config(stream->pipes[i],
- &is_dp_10bpp) == 0) {
- /* set the return value as false if both DPC and
- * BDS is enabled by the user. But we do not return
- * the value immediately to enable internal firmware
- * feature testing. */
- if (is_dp_10bpp) {
- sh_css_set_dp_config(stream->pipes[i], params, &default_dp_10bpp_config);
- } else {
- sh_css_set_dp_config(stream->pipes[i], params, &default_dp_config);
- }
- } else {
- retval = false;
- goto exit;
- }
-
- ia_css_set_param_exceptions(stream->pipes[i], params);
- }
- }
+ ia_css_set_param_exceptions(pipe_in, params);
ia_css_set_de_config(params, &default_de_config);
ia_css_set_gc_config(params, &default_gc_config);
@@ -2580,8 +2463,6 @@ sh_css_init_isp_params_from_global(struct ia_css_stream *stream,
params->sc_table = NULL;
params->sc_table_changed = true;
- params->sc_table_dirty = false;
- params->sc_table_last_pipe_num = 0;
ia_css_sdis2_clear_coefficients(&params->dvs2_coefs);
params->dvs2_coef_table_changed = true;
@@ -2639,29 +2520,15 @@ sh_css_init_isp_params_from_global(struct ia_css_stream *stream,
* BDS is enabled by the user. But we do not return
* the value immediately to enable internal firmware
* feature testing. */
-
- if (is_dp_10bpp) {
- retval = false;
- /* FIXME: should it ignore this error? */
- }
+ retval = !is_dp_10bpp;
+ /* FIXME: should it ignore this error? */
} else {
retval = false;
goto exit;
}
- if (IS_ISP2401) {
- if (stream->pipes[i]->mode < IA_CSS_PIPE_ID_NUM) {
- sh_css_set_dp_config(stream->pipes[i], params,
- &stream_params->pipe_dp_config[stream->pipes[i]->mode]);
- ia_css_set_param_exceptions(stream->pipes[i], params);
- } else {
- retval = false;
- goto exit;
- }
- }
}
- if (!IS_ISP2401)
- ia_css_set_param_exceptions(pipe_in, params);
+ ia_css_set_param_exceptions(pipe_in, params);
params->fpn_config.data = stream_params->fpn_config.data;
params->config_changed[IA_CSS_FPN_ID] =
@@ -2672,13 +2539,10 @@ sh_css_init_isp_params_from_global(struct ia_css_stream *stream,
sh_css_set_morph_table(params, stream_params->morph_table);
if (stream_params->sc_table) {
- sh_css_update_shading_table_status(pipe_in, params);
sh_css_set_shading_table(stream, params, stream_params->sc_table);
} else {
params->sc_table = NULL;
params->sc_table_changed = true;
- params->sc_table_dirty = false;
- params->sc_table_last_pipe_num = 0;
}
/* Only IA_CSS_PIPE_ID_VIDEO & IA_CSS_PIPE_ID_CAPTURE will support dvs_6axis_config*/
@@ -2771,18 +2635,6 @@ static void host_lut_store(const void *lut)
gdc_lut_store((gdc_ID_t)i, (const int (*)[HRT_GDC_N]) lut);
}
-/* Note that allocation is in ipu address space. */
-inline ia_css_ptr sh_css_params_alloc_gdc_lut(void)
-{
- return hmm_alloc(sizeof(zoom_table), HMM_BO_PRIVATE, 0, NULL, 0);
-}
-
-inline void sh_css_params_free_gdc_lut(ia_css_ptr addr)
-{
- if (addr != mmgr_NULL)
- hmm_free(addr);
-}
-
int ia_css_pipe_set_bci_scaler_lut(struct ia_css_pipe *pipe,
const void *lut)
{
@@ -2809,14 +2661,13 @@ int ia_css_pipe_set_bci_scaler_lut(struct ia_css_pipe *pipe,
}
/* Free any existing tables. */
- sh_css_params_free_gdc_lut(pipe->scaler_pp_lut);
- pipe->scaler_pp_lut = mmgr_NULL;
+ if (pipe->scaler_pp_lut != mmgr_NULL) {
+ hmm_free(pipe->scaler_pp_lut);
+ pipe->scaler_pp_lut = mmgr_NULL;
+ }
if (!stream_started) {
- if (!IS_ISP2401)
- pipe->scaler_pp_lut = hmm_alloc(sizeof(zoom_table), HMM_BO_PRIVATE, 0, NULL, 0);
- else
- pipe->scaler_pp_lut = sh_css_params_alloc_gdc_lut();
+ pipe->scaler_pp_lut = hmm_alloc(sizeof(zoom_table), HMM_BO_PRIVATE, 0, NULL, 0);
if (pipe->scaler_pp_lut == mmgr_NULL) {
ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
@@ -2858,10 +2709,7 @@ int sh_css_params_map_and_store_default_gdc_lut(void)
host_lut_store((void *)zoom_table);
- if (!IS_ISP2401)
- default_gdc_lut = hmm_alloc(sizeof(zoom_table), HMM_BO_PRIVATE, 0, NULL, 0);
- else
- default_gdc_lut = sh_css_params_alloc_gdc_lut();
+ default_gdc_lut = hmm_alloc(sizeof(zoom_table), HMM_BO_PRIVATE, 0, NULL, 0);
if (default_gdc_lut == mmgr_NULL)
return -ENOMEM;
@@ -2879,8 +2727,10 @@ void sh_css_params_free_default_gdc_lut(void)
{
IA_CSS_ENTER_PRIVATE("void");
- sh_css_params_free_gdc_lut(default_gdc_lut);
- default_gdc_lut = mmgr_NULL;
+ if (default_gdc_lut != mmgr_NULL) {
+ hmm_free(default_gdc_lut);
+ default_gdc_lut = mmgr_NULL;
+ }
IA_CSS_LEAVE_PRIVATE("void");
}
@@ -3252,15 +3102,10 @@ sh_css_param_update_isp_params(struct ia_css_pipe *curr_pipe,
isp_pipe_version = ia_css_pipe_get_isp_pipe_version(pipe);
ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
-#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
ia_css_query_internal_queue_id(params->output_frame
? IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET
: IA_CSS_BUFFER_TYPE_PARAMETER_SET,
thread_id, &queue_id);
-#else
- ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_PARAMETER_SET, thread_id,
- &queue_id);
-#endif
if (!sh_css_sp_is_running()) {
/* SP is not running. The queues are not valid */
err = -EBUSY;
@@ -3356,12 +3201,10 @@ sh_css_param_update_isp_params(struct ia_css_pipe *curr_pipe,
err = ia_css_bufq_enqueue_buffer(thread_id, queue_id, (uint32_t)cpy);
if (err) {
free_ia_css_isp_parameter_set_info(cpy);
-#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
IA_CSS_LOG("pfp: FAILED to add config id %d for OF %d to q %d on thread %d",
isp_params_info.isp_parameters_id,
isp_params_info.output_frame_ptr,
queue_id, thread_id);
-#endif
break;
} else {
/* TMP: check discrepancy between nr of enqueued
@@ -3383,12 +3226,10 @@ sh_css_param_update_isp_params(struct ia_css_pipe *curr_pipe,
(uint8_t)thread_id,
(uint8_t)queue_id,
0);
-#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
IA_CSS_LOG("pfp: added config id %d for OF %d to q %d on thread %d",
isp_params_info.isp_parameters_id,
isp_params_info.output_frame_ptr,
queue_id, thread_id);
-#endif
}
/* clean-up old copy */
ia_css_dequeue_param_buffers(/*pipe_num*/);
@@ -3466,18 +3307,12 @@ sh_css_params_write_to_ddr_internal(
if (binary->info->sp.enable.sc) {
u32 enable_conv;
- size_t bytes;
-
- if (!IS_ISP2401)
- bytes = ISP2400_SCTBL_BYTES(binary);
- else
- bytes = ISP2401_SCTBL_BYTES(binary);
enable_conv = params->shading_settings.enable_shading_table_conversion;
buff_realloced = reallocate_buffer(&ddr_map->sc_tbl,
&ddr_map_size->sc_tbl,
- bytes,
+ SCTBL_BYTES(binary),
params->sc_table_changed,
&err);
if (err) {
@@ -3562,31 +3397,9 @@ sh_css_params_write_to_ddr_internal(
}
}
- /* DPC configuration is made pipe specific to allow flexibility in positioning of the
- * DPC kernel. The code below sets the pipe specific configuration to
- * individual binaries. */
- if (IS_ISP2401 &&
- params->pipe_dpc_config_changed[pipe_id] && binary->info->sp.enable.dpc) {
- unsigned int size =
- stage->binary->info->mem_offsets.offsets.param->dmem.dp.size;
-
- unsigned int offset =
- stage->binary->info->mem_offsets.offsets.param->dmem.dp.offset;
-
- if (size) {
- ia_css_dp_encode((struct sh_css_isp_dp_params *)
- &binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
- &params->pipe_dp_config[pipe_id], size);
-
- params->isp_params_changed = true;
- params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
- true;
- }
- }
-
if (params->config_changed[IA_CSS_MACC_ID] && binary->info->sp.enable.macc) {
unsigned int i, j, idx;
- unsigned int idx_map[] = {
+ static const unsigned int idx_map[] = {
0, 1, 3, 2, 6, 7, 5, 4, 12, 13, 15, 14, 10, 11, 9, 8
};
@@ -3665,13 +3478,7 @@ sh_css_params_write_to_ddr_internal(
if (!params->pipe_dvs_6axis_config[pipe_id]) {
struct ia_css_resolution dvs_offset = {0};
- if (!IS_ISP2401) {
- dvs_offset.width = (PIX_SHIFT_FILTER_RUN_IN_X + binary->dvs_envelope.width) / 2;
- } else {
- if (binary->dvs_envelope.width || binary->dvs_envelope.height) {
- dvs_offset.width = (PIX_SHIFT_FILTER_RUN_IN_X + binary->dvs_envelope.width) / 2;
- }
- }
+ dvs_offset.width = (PIX_SHIFT_FILTER_RUN_IN_X + binary->dvs_envelope.width) / 2;
dvs_offset.height = (PIX_SHIFT_FILTER_RUN_IN_Y + binary->dvs_envelope.height) / 2;
params->pipe_dvs_6axis_config[pipe_id] =
@@ -4335,12 +4142,8 @@ ia_css_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid)
me->data = kvmalloc(grid_size * sizeof(*me->data), GFP_KERNEL);
if (!me->data)
goto err;
-#if !defined(HAS_NO_HMEM)
/* No weighted histogram, no structure, treat the histogram data as a byte dump in a byte array */
me->rgby_data = kvmalloc(sizeof_hmem(HMEM0_ID), GFP_KERNEL);
-#else
- me->rgby_data = NULL;
-#endif
IA_CSS_LEAVE("return=%p", me);
return me;
diff --git a/drivers/staging/media/atomisp/pci/sh_css_params.h b/drivers/staging/media/atomisp/pci/sh_css_params.h
index 62a7b6ada237..bbca19d0e8fc 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_params.h
+++ b/drivers/staging/media/atomisp/pci/sh_css_params.h
@@ -121,8 +121,6 @@ struct ia_css_isp_parameters {
bool dvs2_coef_table_changed;
bool morph_table_changed;
bool sc_table_changed;
- bool sc_table_dirty;
- unsigned int sc_table_last_pipe_num;
bool anr_thres_changed;
/* ---- deprecated: replaced with pipe_dvs_6axis_config_changed ---- */
bool dvs_6axis_config_changed;
@@ -168,12 +166,6 @@ ia_css_params_alloc_convert_sctbl(
struct ia_css_isp_config *
sh_css_pipe_isp_config_get(struct ia_css_pipe *pipe);
-/* ipu address allocation/free for gdc lut */
-ia_css_ptr
-sh_css_params_alloc_gdc_lut(void);
-void
-sh_css_params_free_gdc_lut(ia_css_ptr addr);
-
int
sh_css_params_map_and_store_default_gdc_lut(void);
diff --git a/drivers/staging/media/atomisp/pci/sh_css_shading.c b/drivers/staging/media/atomisp/pci/sh_css_shading.c
deleted file mode 100644
index 462caf9cb571..000000000000
--- a/drivers/staging/media/atomisp/pci/sh_css_shading.c
+++ /dev/null
@@ -1,17 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-/* This file will contain the code to implement the functions declared in ia_css_shading.h
- and associated helper functions */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_sp.c b/drivers/staging/media/atomisp/pci/sh_css_sp.c
index a73e8ca1e225..615500a7d3c4 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_sp.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_sp.c
@@ -330,9 +330,7 @@ sh_css_sp_start_isys_copy(struct ia_css_frame *out_frame,
unsigned int thread_id;
u8 stage_num = 0;
struct sh_css_sp_pipeline *pipe;
-#if defined SH_CSS_ENABLE_METADATA
enum sh_css_queue_id queue_id;
-#endif
assert(out_frame);
@@ -372,7 +370,6 @@ sh_css_sp_start_isys_copy(struct ia_css_frame *out_frame,
set_output_frame_buffer(out_frame, 0);
-#if defined SH_CSS_ENABLE_METADATA
if (pipe->metadata.height > 0) {
ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_METADATA, thread_id,
&queue_id);
@@ -380,7 +377,6 @@ sh_css_sp_start_isys_copy(struct ia_css_frame *out_frame,
queue_id, mmgr_EXCEPTION,
IA_CSS_BUFFER_TYPE_METADATA);
}
-#endif
ia_css_debug_pipe_graph_dump_sp_raw_copy(out_frame);
}
@@ -812,29 +808,47 @@ is_sp_stage(struct ia_css_pipeline_stage *stage)
return stage->sp_func != IA_CSS_PIPELINE_NO_FUNC;
}
-static int
-configure_isp_from_args(
- const struct sh_css_sp_pipeline *pipeline,
- const struct ia_css_binary *binary,
- const struct sh_css_binary_args *args,
- bool two_ppc,
- bool deinterleaved)
+static int configure_isp_from_args(const struct sh_css_sp_pipeline *pipeline,
+ const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args,
+ bool two_ppc,
+ bool deinterleaved)
{
- ia_css_fpn_configure(binary, &binary->in_frame_info);
- ia_css_crop_configure(binary, &args->delay_frames[0]->info);
- ia_css_qplane_configure(pipeline, binary, &binary->in_frame_info);
- ia_css_output0_configure(binary, &args->out_frame[0]->info);
- ia_css_output1_configure(binary, &args->out_vf_frame->info);
- ia_css_copy_output_configure(binary, args->copy_output);
- ia_css_output0_configure(binary, &args->out_frame[0]->info);
-#ifdef ISP2401
- ia_css_sc_configure(binary, pipeline->shading.internal_frame_origin_x_bqs_on_sctbl,
- pipeline->shading.internal_frame_origin_y_bqs_on_sctbl);
-#endif
- ia_css_iterator_configure(binary, &args->in_frame->info);
- ia_css_dvs_configure(binary, &args->out_frame[0]->info);
- ia_css_output_configure(binary, &args->out_frame[0]->info);
- ia_css_raw_configure(pipeline, binary, &args->in_frame->info, &binary->in_frame_info, two_ppc, deinterleaved);
+ int ret;
+
+ ret = ia_css_fpn_configure(binary, &binary->in_frame_info);
+ if (ret)
+ return ret;
+ ret = ia_css_crop_configure(binary, &args->delay_frames[0]->info);
+ if (ret)
+ return ret;
+ ret = ia_css_qplane_configure(pipeline, binary, &binary->in_frame_info);
+ if (ret)
+ return ret;
+ ret = ia_css_output0_configure(binary, &args->out_frame[0]->info);
+ if (ret)
+ return ret;
+ ret = ia_css_output1_configure(binary, &args->out_vf_frame->info);
+ if (ret)
+ return ret;
+ ret = ia_css_copy_output_configure(binary, args->copy_output);
+ if (ret)
+ return ret;
+ ret = ia_css_output0_configure(binary, &args->out_frame[0]->info);
+ if (ret)
+ return ret;
+ ret = ia_css_iterator_configure(binary, &args->in_frame->info);
+ if (ret)
+ return ret;
+ ret = ia_css_dvs_configure(binary, &args->out_frame[0]->info);
+ if (ret)
+ return ret;
+ ret = ia_css_output_configure(binary, &args->out_frame[0]->info);
+ if (ret)
+ return ret;
+ ret = ia_css_raw_configure(pipeline, binary, &args->in_frame->info, &binary->in_frame_info, two_ppc, deinterleaved);
+ if (ret)
+ return ret;
/*
* FIXME: args->delay_frames can be NULL here
@@ -846,10 +860,13 @@ configure_isp_from_args(
* without crashing, but the pipeline should likely be built without
* adding it at the first place (or there are a hidden bug somewhere)
*/
- ia_css_ref_configure(binary, args->delay_frames, pipeline->dvs_frame_delay);
- ia_css_tnr_configure(binary, args->tnr_frames);
- ia_css_bayer_io_config(binary, args);
- return 0;
+ ret = ia_css_ref_configure(binary, args->delay_frames, pipeline->dvs_frame_delay);
+ if (ret)
+ return ret;
+ ret = ia_css_tnr_configure(binary, args->tnr_frames);
+ if (ret)
+ return ret;
+ return ia_css_bayer_io_config(binary, args);
}
static void
@@ -883,9 +900,7 @@ initialize_stage_frames(struct ia_css_frames_sp *frames)
initialize_frame_buffer_attribute(&frames->out_vf.buf_attr);
initialize_frame_buffer_attribute(&frames->s3a_buf);
initialize_frame_buffer_attribute(&frames->dvs_buf);
-#if defined SH_CSS_ENABLE_METADATA
initialize_frame_buffer_attribute(&frames->metadata_buf);
-#endif
}
static int
@@ -1011,24 +1026,20 @@ sh_css_sp_init_stage(struct ia_css_binary *binary,
mmgr_EXCEPTION,
IA_CSS_BUFFER_TYPE_DIS_STATISTICS);
}
-#if defined SH_CSS_ENABLE_METADATA
ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_METADATA, thread_id, &queue_id);
sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.metadata_buf, queue_id, mmgr_EXCEPTION, IA_CSS_BUFFER_TYPE_METADATA);
-#endif
if (err)
return err;
#ifdef ISP2401
- if (stage == 0) {
- pipe = find_pipe_by_num(sh_css_sp_group.pipe[thread_id].pipe_num);
- if (!pipe)
- return -EINVAL;
-
- if (args->in_frame)
- ia_css_get_crop_offsets(pipe, &args->in_frame->info);
- else
- ia_css_get_crop_offsets(pipe, &binary->in_frame_info);
- }
+ pipe = find_pipe_by_num(sh_css_sp_group.pipe[thread_id].pipe_num);
+ if (!pipe)
+ return -EINVAL;
+
+ if (args->in_frame)
+ ia_css_get_crop_offsets(pipe, &args->in_frame->info);
+ else
+ ia_css_get_crop_offsets(pipe, &binary->in_frame_info);
#else
(void)pipe; /*avoid build warning*/
#endif
@@ -1196,12 +1207,7 @@ sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
enum ia_css_input_mode input_mode,
const struct ia_css_metadata_config *md_config,
const struct ia_css_metadata_info *md_info,
- const enum mipi_port_id port_id,
- const struct ia_css_coordinate
- *internal_frame_origin_bqs_on_sctbl, /* Origin of internal frame
- positioned on shading table at shading correction in ISP. */
- const struct ia_css_isp_parameters *params
- )
+ const enum mipi_port_id port_id)
{
/* Get first stage */
struct ia_css_pipeline_stage *stage = NULL;
@@ -1288,7 +1294,6 @@ sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
}
sh_css_sp_group.pipe[thread_id].scaler_pp_lut = sh_css_pipe_get_pp_gdc_lut(pipe);
-#if defined(SH_CSS_ENABLE_METADATA)
if (md_info && md_info->size > 0) {
sh_css_sp_group.pipe[thread_id].metadata.width = md_info->resolution.width;
sh_css_sp_group.pipe[thread_id].metadata.height = md_info->resolution.height;
@@ -1298,37 +1303,13 @@ sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
md_config->data_type, MIPI_PREDICTOR_NONE,
&sh_css_sp_group.pipe[thread_id].metadata.format);
}
-#else
- (void)md_config;
- (void)md_info;
-#endif
-#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
sh_css_sp_group.pipe[thread_id].output_frame_queue_id = (uint32_t)SH_CSS_INVALID_QUEUE_ID;
if (pipe_id != IA_CSS_PIPE_ID_COPY) {
ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, thread_id,
(enum sh_css_queue_id *)(
&sh_css_sp_group.pipe[thread_id].output_frame_queue_id));
}
-#endif
-
- if (IS_ISP2401) {
- /* For the shading correction type 1 (the legacy shading table conversion in css is not used),
- * the parameters are passed to the isp for the shading table centering.
- */
- if (internal_frame_origin_bqs_on_sctbl &&
- params && params->shading_settings.enable_shading_table_conversion == 0) {
- sh_css_sp_group.pipe[thread_id].shading.internal_frame_origin_x_bqs_on_sctbl
- = (uint32_t)internal_frame_origin_bqs_on_sctbl->x;
- sh_css_sp_group.pipe[thread_id].shading.internal_frame_origin_y_bqs_on_sctbl
- = (uint32_t)internal_frame_origin_bqs_on_sctbl->y;
- } else {
- sh_css_sp_group.pipe[thread_id].shading.internal_frame_origin_x_bqs_on_sctbl =
- 0;
- sh_css_sp_group.pipe[thread_id].shading.internal_frame_origin_y_bqs_on_sctbl =
- 0;
- }
- }
IA_CSS_LOG("pipe_id %d port_config %08x",
pipe_id, sh_css_sp_group.pipe[thread_id].inout_port_config);
diff --git a/drivers/staging/media/atomisp/pci/sh_css_sp.h b/drivers/staging/media/atomisp/pci/sh_css_sp.h
index 832eed711525..f69a79b0b0da 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_sp.h
+++ b/drivers/staging/media/atomisp/pci/sh_css_sp.h
@@ -66,12 +66,7 @@ sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
enum ia_css_input_mode input_mode,
const struct ia_css_metadata_config *md_config,
const struct ia_css_metadata_info *md_info,
- const enum mipi_port_id port_id,
- const struct ia_css_coordinate
- *internal_frame_origin_bqs_on_sctbl, /* Origin of internal frame
- positioned on shading table at shading correction in ISP. */
- const struct ia_css_isp_parameters *params
- );
+ const enum mipi_port_id port_id);
void
sh_css_sp_uninit_pipeline(unsigned int pipe_num);
diff --git a/drivers/staging/media/atomisp/pci/sh_css_stream.c b/drivers/staging/media/atomisp/pci/sh_css_stream.c
deleted file mode 100644
index a768ce90f51c..000000000000
--- a/drivers/staging/media/atomisp/pci/sh_css_stream.c
+++ /dev/null
@@ -1,17 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-/* This file will contain the code to implement the functions declared in ia_css_stream.h
- and associated helper functions */
diff --git a/drivers/staging/media/atomisp/pci/system_global.h b/drivers/staging/media/atomisp/pci/system_global.h
index 9b22b8c168be..060b924023ec 100644
--- a/drivers/staging/media/atomisp/pci/system_global.h
+++ b/drivers/staging/media/atomisp/pci/system_global.h
@@ -25,9 +25,6 @@
* N.B. the 3 input formatters are of 2 different classess
*/
-/* per-frame parameter handling support */
-#define SH_CSS_ENABLE_PER_FRAME_PARAMS
-
#define DMA_DDR_TO_VAMEM_WORKAROUND
#define DMA_DDR_TO_HMEM_WORKAROUND
diff --git a/drivers/staging/media/hantro/Kconfig b/drivers/staging/media/hantro/Kconfig
index 20b1f6d7b69c..3c5d833322c8 100644
--- a/drivers/staging/media/hantro/Kconfig
+++ b/drivers/staging/media/hantro/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config VIDEO_HANTRO
tristate "Hantro VPU driver"
- depends on ARCH_MXC || ARCH_ROCKCHIP || ARCH_AT91 || COMPILE_TEST
+ depends on ARCH_MXC || ARCH_ROCKCHIP || ARCH_AT91 || ARCH_SUNXI || COMPILE_TEST
depends on VIDEO_DEV && VIDEO_V4L2
select MEDIA_CONTROLLER
select MEDIA_CONTROLLER_REQUEST_API
@@ -9,6 +9,7 @@ config VIDEO_HANTRO
select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
select V4L2_H264
+ select V4L2_VP9
help
Support for the Hantro IP based Video Processing Units present on
Rockchip and NXP i.MX8M SoCs, which accelerate video and image
@@ -39,3 +40,11 @@ config VIDEO_HANTRO_ROCKCHIP
default y
help
Enable support for RK3288, RK3328, and RK3399 SoCs.
+
+config VIDEO_HANTRO_SUNXI
+ bool "Hantro VPU Allwinner support"
+ depends on VIDEO_HANTRO
+ depends on ARCH_SUNXI || COMPILE_TEST
+ default y
+ help
+ Enable support for H6 SoC.
diff --git a/drivers/staging/media/hantro/Makefile b/drivers/staging/media/hantro/Makefile
index 90036831fec4..ebd5ede7bef7 100644
--- a/drivers/staging/media/hantro/Makefile
+++ b/drivers/staging/media/hantro/Makefile
@@ -10,8 +10,10 @@ hantro-vpu-y += \
hantro_g1.o \
hantro_g1_h264_dec.o \
hantro_g1_mpeg2_dec.o \
- hantro_g2_hevc_dec.o \
hantro_g1_vp8_dec.o \
+ hantro_g2.o \
+ hantro_g2_hevc_dec.o \
+ hantro_g2_vp9_dec.o \
rockchip_vpu2_hw_jpeg_enc.o \
rockchip_vpu2_hw_h264_dec.o \
rockchip_vpu2_hw_mpeg2_dec.o \
@@ -20,7 +22,8 @@ hantro-vpu-y += \
hantro_h264.o \
hantro_hevc.o \
hantro_mpeg2.o \
- hantro_vp8.o
+ hantro_vp8.o \
+ hantro_vp9.o
hantro-vpu-$(CONFIG_VIDEO_HANTRO_IMX8M) += \
imx8m_vpu_hw.o
@@ -30,3 +33,6 @@ hantro-vpu-$(CONFIG_VIDEO_HANTRO_SAMA5D4) += \
hantro-vpu-$(CONFIG_VIDEO_HANTRO_ROCKCHIP) += \
rockchip_vpu_hw.o
+
+hantro-vpu-$(CONFIG_VIDEO_HANTRO_SUNXI) += \
+ sunxi_vpu_hw.o
diff --git a/drivers/staging/media/hantro/hantro.h b/drivers/staging/media/hantro/hantro.h
index c2e2dca38628..06d0f3597694 100644
--- a/drivers/staging/media/hantro/hantro.h
+++ b/drivers/staging/media/hantro/hantro.h
@@ -16,6 +16,7 @@
#include <linux/videodev2.h>
#include <linux/wait.h>
#include <linux/clk.h>
+#include <linux/reset.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
@@ -28,6 +29,7 @@
struct hantro_ctx;
struct hantro_codec_ops;
+struct hantro_postproc_ops;
#define HANTRO_JPEG_ENCODER BIT(0)
#define HANTRO_ENCODERS 0x0000ffff
@@ -35,6 +37,7 @@ struct hantro_codec_ops;
#define HANTRO_VP8_DECODER BIT(17)
#define HANTRO_H264_DECODER BIT(18)
#define HANTRO_HEVC_DECODER BIT(19)
+#define HANTRO_VP9_DECODER BIT(20)
#define HANTRO_DECODERS 0xffff0000
/**
@@ -59,6 +62,7 @@ struct hantro_irq {
* @num_dec_fmts: Number of decoder formats.
* @postproc_fmts: Post-processor formats.
* @num_postproc_fmts: Number of post-processor formats.
+ * @postproc_ops: Post-processor ops.
* @codec: Supported codecs
* @codec_ops: Codec ops.
* @init: Initialize hardware, optional.
@@ -69,7 +73,9 @@ struct hantro_irq {
* @num_clocks: number of clocks in the array
* @reg_names: array of register range names
* @num_regs: number of register range names in the array
- * @postproc_regs: &struct hantro_postproc_regs pointer
+ * @double_buffer: core needs double buffering
+ * @legacy_regs: core uses legacy register set
+ * @late_postproc: postproc must be set up at the end of the job
*/
struct hantro_variant {
unsigned int enc_offset;
@@ -80,6 +86,7 @@ struct hantro_variant {
unsigned int num_dec_fmts;
const struct hantro_fmt *postproc_fmts;
unsigned int num_postproc_fmts;
+ const struct hantro_postproc_ops *postproc_ops;
unsigned int codec;
const struct hantro_codec_ops *codec_ops;
int (*init)(struct hantro_dev *vpu);
@@ -90,7 +97,9 @@ struct hantro_variant {
int num_clocks;
const char * const *reg_names;
int num_regs;
- const struct hantro_postproc_regs *postproc_regs;
+ unsigned int double_buffer : 1;
+ unsigned int legacy_regs : 1;
+ unsigned int late_postproc : 1;
};
/**
@@ -101,6 +110,7 @@ struct hantro_variant {
* @HANTRO_MODE_MPEG2_DEC: MPEG-2 decoder.
* @HANTRO_MODE_VP8_DEC: VP8 decoder.
* @HANTRO_MODE_HEVC_DEC: HEVC decoder.
+ * @HANTRO_MODE_VP9_DEC: VP9 decoder.
*/
enum hantro_codec_mode {
HANTRO_MODE_NONE = -1,
@@ -109,6 +119,7 @@ enum hantro_codec_mode {
HANTRO_MODE_MPEG2_DEC,
HANTRO_MODE_VP8_DEC,
HANTRO_MODE_HEVC_DEC,
+ HANTRO_MODE_VP9_DEC,
};
/*
@@ -167,6 +178,7 @@ hantro_vdev_to_func(struct video_device *vdev)
* @dev: Pointer to device for convenient logging using
* dev_ macros.
* @clocks: Array of clock handles.
+ * @resets: Array of reset handles.
* @reg_bases: Mapped addresses of VPU registers.
* @enc_base: Mapped address of VPU encoder register for convenience.
* @dec_base: Mapped address of VPU decoder register for convenience.
@@ -186,6 +198,7 @@ struct hantro_dev {
struct platform_device *pdev;
struct device *dev;
struct clk_bulk_data *clocks;
+ struct reset_control *resets;
void __iomem **reg_bases;
void __iomem *enc_base;
void __iomem *dec_base;
@@ -222,6 +235,7 @@ struct hantro_dev {
* @mpeg2_dec: MPEG-2-decoding context.
* @vp8_dec: VP8-decoding context.
* @hevc_dec: HEVC-decoding context.
+ * @vp9_dec: VP9-decoding context.
*/
struct hantro_ctx {
struct hantro_dev *dev;
@@ -249,6 +263,7 @@ struct hantro_ctx {
struct hantro_mpeg2_dec_hw_ctx mpeg2_dec;
struct hantro_vp8_dec_hw_ctx vp8_dec;
struct hantro_hevc_dec_hw_ctx hevc_dec;
+ struct hantro_vp9_dec_hw_ctx vp9_dec;
};
};
@@ -262,6 +277,7 @@ struct hantro_ctx {
* @max_depth: Maximum depth, for bitstream formats
* @enc_fmt: Format identifier for encoder registers.
* @frmsize: Supported range of frame sizes (only for bitstream formats).
+ * @postprocessed: Indicates if this format needs the post-processor.
*/
struct hantro_fmt {
char *name;
@@ -271,6 +287,7 @@ struct hantro_fmt {
int max_depth;
enum hantro_enc_fmt enc_fmt;
struct v4l2_frmsize_stepwise frmsize;
+ bool postprocessed;
};
struct hantro_reg {
@@ -296,6 +313,22 @@ struct hantro_postproc_regs {
struct hantro_reg display_width;
};
+struct hantro_vp9_decoded_buffer_info {
+ /* Info needed when the decoded frame serves as a reference frame. */
+ unsigned short width;
+ unsigned short height;
+ u32 bit_depth : 4;
+};
+
+struct hantro_decoded_buffer {
+ /* Must be the first field in this struct. */
+ struct v4l2_m2m_buffer base;
+
+ union {
+ struct hantro_vp9_decoded_buffer_info vp9;
+ };
+};
+
/* Logging helpers */
/**
@@ -366,6 +399,13 @@ static inline void vdpu_write(struct hantro_dev *vpu, u32 val, u32 reg)
writel(val, vpu->dec_base + reg);
}
+static inline void hantro_write_addr(struct hantro_dev *vpu,
+ unsigned long offset,
+ dma_addr_t addr)
+{
+ vdpu_write(vpu, addr & 0xffffffff, offset);
+}
+
static inline u32 vdpu_read(struct hantro_dev *vpu, u32 reg)
{
u32 val = readl(vpu->dec_base + reg);
@@ -426,6 +466,12 @@ hantro_get_dec_buf_addr(struct hantro_ctx *ctx, struct vb2_buffer *vb)
return vb2_dma_contig_plane_dma_addr(vb, 0);
}
+static inline struct hantro_decoded_buffer *
+vb2_to_hantro_decoded_buf(struct vb2_buffer *buf)
+{
+ return container_of(buf, struct hantro_decoded_buffer, base.vb.vb2_buf);
+}
+
void hantro_postproc_disable(struct hantro_ctx *ctx);
void hantro_postproc_enable(struct hantro_ctx *ctx);
void hantro_postproc_free(struct hantro_ctx *ctx);
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
index fb82b9297a2b..6a51f39dde56 100644
--- a/drivers/staging/media/hantro/hantro_drv.c
+++ b/drivers/staging/media/hantro/hantro_drv.c
@@ -130,7 +130,7 @@ void hantro_start_prepare_run(struct hantro_ctx *ctx)
v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
&ctx->ctrl_handler);
- if (!ctx->is_encoder) {
+ if (!ctx->is_encoder && !ctx->dev->variant->late_postproc) {
if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
hantro_postproc_enable(ctx);
else
@@ -142,6 +142,13 @@ void hantro_end_prepare_run(struct hantro_ctx *ctx)
{
struct vb2_v4l2_buffer *src_buf;
+ if (!ctx->is_encoder && ctx->dev->variant->late_postproc) {
+ if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
+ hantro_postproc_enable(ctx);
+ else
+ hantro_postproc_disable(ctx);
+ }
+
src_buf = hantro_get_src_buf(ctx);
v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req,
&ctx->ctrl_handler);
@@ -232,7 +239,7 @@ queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->ops = &hantro_queue_ops;
- dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->buf_struct_size = sizeof(struct hantro_decoded_buffer);
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->dev->vpu_mutex;
dst_vq->dev = ctx->dev->v4l2_dev.dev;
@@ -263,6 +270,12 @@ static int hantro_try_ctrl(struct v4l2_ctrl *ctrl)
if (sps->bit_depth_luma_minus8 != 0)
/* Only 8-bit is supported */
return -EINVAL;
+ } else if (ctrl->id == V4L2_CID_STATELESS_VP9_FRAME) {
+ const struct v4l2_ctrl_vp9_frame *dec_params = ctrl->p_new.p_vp9_frame;
+
+ /* We only support profile 0 */
+ if (dec_params->profile != 0)
+ return -EINVAL;
}
return 0;
}
@@ -461,6 +474,16 @@ static const struct hantro_ctrl controls[] = {
.step = 1,
.ops = &hantro_hevc_ctrl_ops,
},
+ }, {
+ .codec = HANTRO_VP9_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_VP9_FRAME,
+ },
+ }, {
+ .codec = HANTRO_VP9_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_VP9_COMPRESSED_HDR,
+ },
},
};
@@ -598,6 +621,9 @@ static const struct of_device_id of_hantro_match[] = {
#ifdef CONFIG_VIDEO_HANTRO_SAMA5D4
{ .compatible = "microchip,sama5d4-vdec", .data = &sama5d4_vdec_variant, },
#endif
+#ifdef CONFIG_VIDEO_HANTRO_SUNXI
+ { .compatible = "allwinner,sun50i-h6-vpu-g2", .data = &sunxi_vpu_variant, },
+#endif
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_hantro_match);
@@ -889,6 +915,10 @@ static int hantro_probe(struct platform_device *pdev)
return PTR_ERR(vpu->clocks[0].clk);
}
+ vpu->resets = devm_reset_control_array_get(&pdev->dev, false, true);
+ if (IS_ERR(vpu->resets))
+ return PTR_ERR(vpu->resets);
+
num_bases = vpu->variant->num_regs ?: 1;
vpu->reg_bases = devm_kcalloc(&pdev->dev, num_bases,
sizeof(*vpu->reg_bases), GFP_KERNEL);
@@ -907,6 +937,11 @@ static int hantro_probe(struct platform_device *pdev)
vpu->enc_base = vpu->reg_bases[0] + vpu->variant->enc_offset;
vpu->dec_base = vpu->reg_bases[0] + vpu->variant->dec_offset;
+ /**
+ * TODO: Eventually allow taking advantage of full 64-bit address space.
+ * Until then we assume the MSB portion of buffers' base addresses is
+ * always 0 due to this masking operation.
+ */
ret = dma_set_coherent_mask(vpu->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(vpu->dev, "Could not set DMA coherent mask.\n");
@@ -957,10 +992,16 @@ static int hantro_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(vpu->dev);
pm_runtime_enable(vpu->dev);
+ ret = reset_control_deassert(vpu->resets);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to deassert resets\n");
+ goto err_pm_disable;
+ }
+
ret = clk_bulk_prepare(vpu->variant->num_clocks, vpu->clocks);
if (ret) {
dev_err(&pdev->dev, "Failed to prepare clocks\n");
- return ret;
+ goto err_rst_assert;
}
ret = v4l2_device_register(&pdev->dev, &vpu->v4l2_dev);
@@ -1016,6 +1057,9 @@ err_v4l2_unreg:
v4l2_device_unregister(&vpu->v4l2_dev);
err_clk_unprepare:
clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
+err_rst_assert:
+ reset_control_assert(vpu->resets);
+err_pm_disable:
pm_runtime_dont_use_autosuspend(vpu->dev);
pm_runtime_disable(vpu->dev);
return ret;
@@ -1034,6 +1078,7 @@ static int hantro_remove(struct platform_device *pdev)
v4l2_m2m_release(vpu->m2m_dev);
v4l2_device_unregister(&vpu->v4l2_dev);
clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
+ reset_control_assert(vpu->resets);
pm_runtime_dont_use_autosuspend(vpu->dev);
pm_runtime_disable(vpu->dev);
return 0;
diff --git a/drivers/staging/media/hantro/hantro_g2.c b/drivers/staging/media/hantro/hantro_g2.c
new file mode 100644
index 000000000000..ee5f14c5f8f2
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_g2.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2021 Collabora Ltd, Andrzej Pietrasiewicz <andrzej.p@collabora.com>
+ */
+
+#include "hantro_hw.h"
+#include "hantro_g2_regs.h"
+
+void hantro_g2_check_idle(struct hantro_dev *vpu)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ u32 status;
+
+ /* Make sure the VPU is idle */
+ status = vdpu_read(vpu, G2_REG_INTERRUPT);
+ if (status & G2_REG_INTERRUPT_DEC_E) {
+ dev_warn(vpu->dev, "device still running, aborting");
+ status |= G2_REG_INTERRUPT_DEC_ABORT_E | G2_REG_INTERRUPT_DEC_IRQ_DIS;
+ vdpu_write(vpu, status, G2_REG_INTERRUPT);
+ }
+ }
+}
+
+irqreturn_t hantro_g2_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vdpu_read(vpu, G2_REG_INTERRUPT);
+ state = (status & G2_REG_INTERRUPT_DEC_RDY_INT) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vdpu_write(vpu, 0, G2_REG_INTERRUPT);
+ vdpu_write(vpu, G2_REG_CONFIG_DEC_CLK_GATE_E, G2_REG_CONFIG);
+
+ hantro_irq_done(vpu, state);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/staging/media/hantro/hantro_g2_hevc_dec.c b/drivers/staging/media/hantro/hantro_g2_hevc_dec.c
index 76a921163b9a..99d8ea7543da 100644
--- a/drivers/staging/media/hantro/hantro_g2_hevc_dec.c
+++ b/drivers/staging/media/hantro/hantro_g2_hevc_dec.c
@@ -8,20 +8,6 @@
#include "hantro_hw.h"
#include "hantro_g2_regs.h"
-#define HEVC_DEC_MODE 0xC
-
-#define BUS_WIDTH_32 0
-#define BUS_WIDTH_64 1
-#define BUS_WIDTH_128 2
-#define BUS_WIDTH_256 3
-
-static inline void hantro_write_addr(struct hantro_dev *vpu,
- unsigned long offset,
- dma_addr_t addr)
-{
- vdpu_write(vpu, addr & 0xffffffff, offset);
-}
-
static void prepare_tile_info_buffer(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
@@ -368,6 +354,8 @@ static int set_ref(struct hantro_ctx *ctx)
const struct v4l2_hevc_dpb_entry *dpb = decode_params->dpb;
dma_addr_t luma_addr, chroma_addr, mv_addr = 0;
struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_dst;
+ struct hantro_decoded_buffer *dst;
size_t cr_offset = hantro_hevc_chroma_offset(sps);
size_t mv_offset = hantro_hevc_motion_vectors_offset(sps);
u32 max_ref_frames;
@@ -448,32 +436,37 @@ static int set_ref(struct hantro_ctx *ctx)
if (dpb[i].rps == V4L2_HEVC_DPB_ENTRY_RPS_LT_CURR)
dpb_longterm_e |= BIT(V4L2_HEVC_DPB_ENTRIES_NUM_MAX - 1 - i);
- hantro_write_addr(vpu, G2_REG_ADDR_REF(i), luma_addr);
- hantro_write_addr(vpu, G2_REG_CHR_REF(i), chroma_addr);
- hantro_write_addr(vpu, G2_REG_DMV_REF(i), mv_addr);
+ hantro_write_addr(vpu, G2_REF_LUMA_ADDR(i), luma_addr);
+ hantro_write_addr(vpu, G2_REF_CHROMA_ADDR(i), chroma_addr);
+ hantro_write_addr(vpu, G2_REF_MV_ADDR(i), mv_addr);
}
- luma_addr = hantro_hevc_get_ref_buf(ctx, decode_params->pic_order_cnt_val);
+ vb2_dst = hantro_get_dst_buf(ctx);
+ dst = vb2_to_hantro_decoded_buf(&vb2_dst->vb2_buf);
+ luma_addr = hantro_get_dec_buf_addr(ctx, &dst->base.vb.vb2_buf);
if (!luma_addr)
return -ENOMEM;
+ if (hantro_hevc_add_ref_buf(ctx, decode_params->pic_order_cnt_val, luma_addr))
+ return -EINVAL;
+
chroma_addr = luma_addr + cr_offset;
mv_addr = luma_addr + mv_offset;
- hantro_write_addr(vpu, G2_REG_ADDR_REF(i), luma_addr);
- hantro_write_addr(vpu, G2_REG_CHR_REF(i), chroma_addr);
- hantro_write_addr(vpu, G2_REG_DMV_REF(i++), mv_addr);
+ hantro_write_addr(vpu, G2_REF_LUMA_ADDR(i), luma_addr);
+ hantro_write_addr(vpu, G2_REF_CHROMA_ADDR(i), chroma_addr);
+ hantro_write_addr(vpu, G2_REF_MV_ADDR(i++), mv_addr);
- hantro_write_addr(vpu, G2_ADDR_DST, luma_addr);
- hantro_write_addr(vpu, G2_ADDR_DST_CHR, chroma_addr);
- hantro_write_addr(vpu, G2_ADDR_DST_MV, mv_addr);
+ hantro_write_addr(vpu, G2_OUT_LUMA_ADDR, luma_addr);
+ hantro_write_addr(vpu, G2_OUT_CHROMA_ADDR, chroma_addr);
+ hantro_write_addr(vpu, G2_OUT_MV_ADDR, mv_addr);
hantro_hevc_ref_remove_unused(ctx);
for (; i < V4L2_HEVC_DPB_ENTRIES_NUM_MAX; i++) {
- hantro_write_addr(vpu, G2_REG_ADDR_REF(i), 0);
- hantro_write_addr(vpu, G2_REG_CHR_REF(i), 0);
- hantro_write_addr(vpu, G2_REG_DMV_REF(i), 0);
+ hantro_write_addr(vpu, G2_REF_LUMA_ADDR(i), 0);
+ hantro_write_addr(vpu, G2_REF_CHROMA_ADDR(i), 0);
+ hantro_write_addr(vpu, G2_REF_MV_ADDR(i), 0);
}
hantro_reg_write(vpu, &g2_refer_lterm_e, dpb_longterm_e);
@@ -483,37 +476,28 @@ static int set_ref(struct hantro_ctx *ctx)
static void set_buffers(struct hantro_ctx *ctx)
{
- struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf;
struct hantro_dev *vpu = ctx->dev;
- const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
- const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
- size_t cr_offset = hantro_hevc_chroma_offset(sps);
- dma_addr_t src_dma, dst_dma;
+ dma_addr_t src_dma;
u32 src_len, src_buf_len;
src_buf = hantro_get_src_buf(ctx);
- dst_buf = hantro_get_dst_buf(ctx);
/* Source (stream) buffer. */
src_dma = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
src_len = vb2_get_plane_payload(&src_buf->vb2_buf, 0);
src_buf_len = vb2_plane_size(&src_buf->vb2_buf, 0);
- hantro_write_addr(vpu, G2_ADDR_STR, src_dma);
+ hantro_write_addr(vpu, G2_STREAM_ADDR, src_dma);
hantro_reg_write(vpu, &g2_stream_len, src_len);
hantro_reg_write(vpu, &g2_strm_buffer_len, src_buf_len);
hantro_reg_write(vpu, &g2_strm_start_offset, 0);
hantro_reg_write(vpu, &g2_write_mvs_e, 1);
- /* Destination (decoded frame) buffer. */
- dst_dma = hantro_get_dec_buf_addr(ctx, &dst_buf->vb2_buf);
-
- hantro_write_addr(vpu, G2_RASTER_SCAN, dst_dma);
- hantro_write_addr(vpu, G2_RASTER_SCAN_CHR, dst_dma + cr_offset);
- hantro_write_addr(vpu, G2_ADDR_TILE_SIZE, ctx->hevc_dec.tile_sizes.dma);
- hantro_write_addr(vpu, G2_TILE_FILTER, ctx->hevc_dec.tile_filter.dma);
- hantro_write_addr(vpu, G2_TILE_SAO, ctx->hevc_dec.tile_sao.dma);
- hantro_write_addr(vpu, G2_TILE_BSD, ctx->hevc_dec.tile_bsd.dma);
+ hantro_write_addr(vpu, G2_TILE_SIZES_ADDR, ctx->hevc_dec.tile_sizes.dma);
+ hantro_write_addr(vpu, G2_TILE_FILTER_ADDR, ctx->hevc_dec.tile_filter.dma);
+ hantro_write_addr(vpu, G2_TILE_SAO_ADDR, ctx->hevc_dec.tile_sao.dma);
+ hantro_write_addr(vpu, G2_TILE_BSD_ADDR, ctx->hevc_dec.tile_bsd.dma);
}
static void prepare_scaling_list_buffer(struct hantro_ctx *ctx)
@@ -563,24 +547,7 @@ static void prepare_scaling_list_buffer(struct hantro_ctx *ctx)
for (k = 0; k < 8; k++)
*p++ = sc->scaling_list_32x32[i][8 * k + j];
- hantro_write_addr(vpu, HEVC_SCALING_LIST, ctx->hevc_dec.scaling_lists.dma);
-}
-
-static void hantro_g2_check_idle(struct hantro_dev *vpu)
-{
- int i;
-
- for (i = 0; i < 3; i++) {
- u32 status;
-
- /* Make sure the VPU is idle */
- status = vdpu_read(vpu, G2_REG_INTERRUPT);
- if (status & G2_REG_INTERRUPT_DEC_E) {
- dev_warn(vpu->dev, "device still running, aborting");
- status |= G2_REG_INTERRUPT_DEC_ABORT_E | G2_REG_INTERRUPT_DEC_IRQ_DIS;
- vdpu_write(vpu, status, G2_REG_INTERRUPT);
- }
- }
+ hantro_write_addr(vpu, G2_HEVC_SCALING_LIST_ADDR, ctx->hevc_dec.scaling_lists.dma);
}
int hantro_g2_hevc_dec_run(struct hantro_ctx *ctx)
@@ -619,9 +586,6 @@ int hantro_g2_hevc_dec_run(struct hantro_ctx *ctx)
/* Don't compress buffers */
hantro_reg_write(vpu, &g2_ref_compress_bypass, 1);
- /* use NV12 as output format */
- hantro_reg_write(vpu, &g2_out_rs_e, 1);
-
/* Bus width and max burst */
hantro_reg_write(vpu, &g2_buswidth, BUS_WIDTH_128);
hantro_reg_write(vpu, &g2_max_burst, 16);
diff --git a/drivers/staging/media/hantro/hantro_g2_regs.h b/drivers/staging/media/hantro/hantro_g2_regs.h
index bb22fa921914..b7c6f9877b9d 100644
--- a/drivers/staging/media/hantro/hantro_g2_regs.h
+++ b/drivers/staging/media/hantro/hantro_g2_regs.h
@@ -27,8 +27,22 @@
#define G2_REG_INTERRUPT_DEC_IRQ_DIS BIT(4)
#define G2_REG_INTERRUPT_DEC_E BIT(0)
+#define HEVC_DEC_MODE 0xc
+#define VP9_DEC_MODE 0xd
+
+#define BUS_WIDTH_32 0
+#define BUS_WIDTH_64 1
+#define BUS_WIDTH_128 2
+#define BUS_WIDTH_256 3
+
#define g2_strm_swap G2_DEC_REG(2, 28, 0xf)
+#define g2_strm_swap_old G2_DEC_REG(2, 27, 0x1f)
+#define g2_pic_swap G2_DEC_REG(2, 22, 0x1f)
#define g2_dirmv_swap G2_DEC_REG(2, 20, 0xf)
+#define g2_dirmv_swap_old G2_DEC_REG(2, 17, 0x1f)
+#define g2_tab0_swap_old G2_DEC_REG(2, 12, 0x1f)
+#define g2_tab1_swap_old G2_DEC_REG(2, 7, 0x1f)
+#define g2_tab2_swap_old G2_DEC_REG(2, 2, 0x1f)
#define g2_mode G2_DEC_REG(3, 27, 0x1f)
#define g2_compress_swap G2_DEC_REG(3, 20, 0xf)
@@ -37,11 +51,14 @@
#define g2_out_dis G2_DEC_REG(3, 15, 0x1)
#define g2_out_filtering_dis G2_DEC_REG(3, 14, 0x1)
#define g2_write_mvs_e G2_DEC_REG(3, 12, 0x1)
+#define g2_tab3_swap_old G2_DEC_REG(3, 7, 0x1f)
+#define g2_rscan_swap G2_DEC_REG(3, 2, 0x1f)
#define g2_pic_width_in_cbs G2_DEC_REG(4, 19, 0x1fff)
#define g2_pic_height_in_cbs G2_DEC_REG(4, 6, 0x1fff)
#define g2_num_ref_frames G2_DEC_REG(4, 0, 0x1f)
+#define g2_start_bit G2_DEC_REG(5, 25, 0x7f)
#define g2_scaling_list_e G2_DEC_REG(5, 24, 0x1)
#define g2_cb_qp_offset G2_DEC_REG(5, 19, 0x1f)
#define g2_cr_qp_offset G2_DEC_REG(5, 14, 0x1f)
@@ -49,6 +66,7 @@
#define g2_tempor_mvp_e G2_DEC_REG(5, 11, 0x1)
#define g2_max_cu_qpd_depth G2_DEC_REG(5, 5, 0x3f)
#define g2_cu_qpd_e G2_DEC_REG(5, 4, 0x1)
+#define g2_pix_shift G2_DEC_REG(5, 0, 0xf)
#define g2_stream_len G2_DEC_REG(6, 0, 0xffffffff)
@@ -71,24 +89,40 @@
#define g2_const_intra_e G2_DEC_REG(8, 31, 0x1)
#define g2_filt_ctrl_pres G2_DEC_REG(8, 30, 0x1)
+#define g2_bit_depth_y G2_DEC_REG(8, 21, 0xf)
+#define g2_bit_depth_c G2_DEC_REG(8, 17, 0xf)
#define g2_idr_pic_e G2_DEC_REG(8, 16, 0x1)
#define g2_bit_depth_pcm_y G2_DEC_REG(8, 12, 0xf)
#define g2_bit_depth_pcm_c G2_DEC_REG(8, 8, 0xf)
#define g2_bit_depth_y_minus8 G2_DEC_REG(8, 6, 0x3)
#define g2_bit_depth_c_minus8 G2_DEC_REG(8, 4, 0x3)
+#define g2_rs_out_bit_depth G2_DEC_REG(8, 4, 0xf)
#define g2_output_8_bits G2_DEC_REG(8, 3, 0x1)
+#define g2_output_format G2_DEC_REG(8, 0, 0x7)
+#define g2_pp_pix_shift G2_DEC_REG(8, 0, 0xf)
#define g2_refidx1_active G2_DEC_REG(9, 19, 0x1f)
#define g2_refidx0_active G2_DEC_REG(9, 14, 0x1f)
#define g2_hdr_skip_length G2_DEC_REG(9, 0, 0x3fff)
#define g2_start_code_e G2_DEC_REG(10, 31, 0x1)
+#define g2_init_qp_old G2_DEC_REG(10, 25, 0x3f)
#define g2_init_qp G2_DEC_REG(10, 24, 0x3f)
+#define g2_num_tile_cols_old G2_DEC_REG(10, 20, 0x1f)
#define g2_num_tile_cols G2_DEC_REG(10, 19, 0x1f)
+#define g2_num_tile_rows_old G2_DEC_REG(10, 15, 0x1f)
#define g2_num_tile_rows G2_DEC_REG(10, 14, 0x1f)
#define g2_tile_e G2_DEC_REG(10, 1, 0x1)
#define g2_entropy_sync_e G2_DEC_REG(10, 0, 0x1)
+#define vp9_transform_mode G2_DEC_REG(11, 27, 0x7)
+#define vp9_filt_sharpness G2_DEC_REG(11, 21, 0x7)
+#define vp9_mcomp_filt_type G2_DEC_REG(11, 8, 0x7)
+#define vp9_high_prec_mv_e G2_DEC_REG(11, 7, 0x1)
+#define vp9_comp_pred_mode G2_DEC_REG(11, 4, 0x3)
+#define vp9_gref_sign_bias G2_DEC_REG(11, 2, 0x1)
+#define vp9_aref_sign_bias G2_DEC_REG(11, 0, 0x1)
+
#define g2_refer_lterm_e G2_DEC_REG(12, 16, 0xffff)
#define g2_min_cb_size G2_DEC_REG(12, 13, 0x7)
#define g2_max_cb_size G2_DEC_REG(12, 10, 0x7)
@@ -147,6 +181,50 @@
#define g2_partial_ctb_y G2_DEC_REG(20, 30, 0x1)
#define g2_pic_width_4x4 G2_DEC_REG(20, 16, 0xfff)
#define g2_pic_height_4x4 G2_DEC_REG(20, 0, 0xfff)
+
+#define vp9_qp_delta_y_dc G2_DEC_REG(13, 23, 0x3f)
+#define vp9_qp_delta_ch_dc G2_DEC_REG(13, 17, 0x3f)
+#define vp9_qp_delta_ch_ac G2_DEC_REG(13, 11, 0x3f)
+#define vp9_last_sign_bias G2_DEC_REG(13, 10, 0x1)
+#define vp9_lossless_e G2_DEC_REG(13, 9, 0x1)
+#define vp9_comp_pred_var_ref1 G2_DEC_REG(13, 7, 0x3)
+#define vp9_comp_pred_var_ref0 G2_DEC_REG(13, 5, 0x3)
+#define vp9_comp_pred_fixed_ref G2_DEC_REG(13, 3, 0x3)
+#define vp9_segment_temp_upd_e G2_DEC_REG(13, 2, 0x1)
+#define vp9_segment_upd_e G2_DEC_REG(13, 1, 0x1)
+#define vp9_segment_e G2_DEC_REG(13, 0, 0x1)
+
+#define vp9_filt_level G2_DEC_REG(14, 18, 0x3f)
+#define vp9_refpic_seg0 G2_DEC_REG(14, 15, 0x7)
+#define vp9_skip_seg0 G2_DEC_REG(14, 14, 0x1)
+#define vp9_filt_level_seg0 G2_DEC_REG(14, 8, 0x3f)
+#define vp9_quant_seg0 G2_DEC_REG(14, 0, 0xff)
+
+#define vp9_refpic_seg1 G2_DEC_REG(15, 15, 0x7)
+#define vp9_skip_seg1 G2_DEC_REG(15, 14, 0x1)
+#define vp9_filt_level_seg1 G2_DEC_REG(15, 8, 0x3f)
+#define vp9_quant_seg1 G2_DEC_REG(15, 0, 0xff)
+
+#define vp9_refpic_seg2 G2_DEC_REG(16, 15, 0x7)
+#define vp9_skip_seg2 G2_DEC_REG(16, 14, 0x1)
+#define vp9_filt_level_seg2 G2_DEC_REG(16, 8, 0x3f)
+#define vp9_quant_seg2 G2_DEC_REG(16, 0, 0xff)
+
+#define vp9_refpic_seg3 G2_DEC_REG(17, 15, 0x7)
+#define vp9_skip_seg3 G2_DEC_REG(17, 14, 0x1)
+#define vp9_filt_level_seg3 G2_DEC_REG(17, 8, 0x3f)
+#define vp9_quant_seg3 G2_DEC_REG(17, 0, 0xff)
+
+#define vp9_refpic_seg4 G2_DEC_REG(18, 15, 0x7)
+#define vp9_skip_seg4 G2_DEC_REG(18, 14, 0x1)
+#define vp9_filt_level_seg4 G2_DEC_REG(18, 8, 0x3f)
+#define vp9_quant_seg4 G2_DEC_REG(18, 0, 0xff)
+
+#define vp9_refpic_seg5 G2_DEC_REG(19, 15, 0x7)
+#define vp9_skip_seg5 G2_DEC_REG(19, 14, 0x1)
+#define vp9_filt_level_seg5 G2_DEC_REG(19, 8, 0x3f)
+#define vp9_quant_seg5 G2_DEC_REG(19, 0, 0xff)
+
#define hevc_cur_poc_00 G2_DEC_REG(46, 24, 0xff)
#define hevc_cur_poc_01 G2_DEC_REG(46, 16, 0xff)
#define hevc_cur_poc_02 G2_DEC_REG(46, 8, 0xff)
@@ -167,9 +245,48 @@
#define hevc_cur_poc_14 G2_DEC_REG(49, 8, 0xff)
#define hevc_cur_poc_15 G2_DEC_REG(49, 0, 0xff)
+#define vp9_refpic_seg6 G2_DEC_REG(31, 15, 0x7)
+#define vp9_skip_seg6 G2_DEC_REG(31, 14, 0x1)
+#define vp9_filt_level_seg6 G2_DEC_REG(31, 8, 0x3f)
+#define vp9_quant_seg6 G2_DEC_REG(31, 0, 0xff)
+
+#define vp9_refpic_seg7 G2_DEC_REG(32, 15, 0x7)
+#define vp9_skip_seg7 G2_DEC_REG(32, 14, 0x1)
+#define vp9_filt_level_seg7 G2_DEC_REG(32, 8, 0x3f)
+#define vp9_quant_seg7 G2_DEC_REG(32, 0, 0xff)
+
+#define vp9_lref_width G2_DEC_REG(33, 16, 0xffff)
+#define vp9_lref_height G2_DEC_REG(33, 0, 0xffff)
+
+#define vp9_gref_width G2_DEC_REG(34, 16, 0xffff)
+#define vp9_gref_height G2_DEC_REG(34, 0, 0xffff)
+
+#define vp9_aref_width G2_DEC_REG(35, 16, 0xffff)
+#define vp9_aref_height G2_DEC_REG(35, 0, 0xffff)
+
+#define vp9_lref_hor_scale G2_DEC_REG(36, 16, 0xffff)
+#define vp9_lref_ver_scale G2_DEC_REG(36, 0, 0xffff)
+
+#define vp9_gref_hor_scale G2_DEC_REG(37, 16, 0xffff)
+#define vp9_gref_ver_scale G2_DEC_REG(37, 0, 0xffff)
+
+#define vp9_aref_hor_scale G2_DEC_REG(38, 16, 0xffff)
+#define vp9_aref_ver_scale G2_DEC_REG(38, 0, 0xffff)
+
+#define vp9_filt_ref_adj_0 G2_DEC_REG(46, 24, 0x7f)
+#define vp9_filt_ref_adj_1 G2_DEC_REG(46, 16, 0x7f)
+#define vp9_filt_ref_adj_2 G2_DEC_REG(46, 8, 0x7f)
+#define vp9_filt_ref_adj_3 G2_DEC_REG(46, 0, 0x7f)
+
+#define vp9_filt_mb_adj_0 G2_DEC_REG(47, 24, 0x7f)
+#define vp9_filt_mb_adj_1 G2_DEC_REG(47, 16, 0x7f)
+#define vp9_filt_mb_adj_2 G2_DEC_REG(47, 8, 0x7f)
+#define vp9_filt_mb_adj_3 G2_DEC_REG(47, 0, 0x7f)
+
#define g2_apf_threshold G2_DEC_REG(55, 0, 0xffff)
#define g2_clk_gate_e G2_DEC_REG(58, 16, 0x1)
+#define g2_double_buffer_e G2_DEC_REG(58, 15, 0x1)
#define g2_buswidth G2_DEC_REG(58, 8, 0x7)
#define g2_max_burst G2_DEC_REG(58, 0, 0xff)
@@ -177,20 +294,24 @@
#define G2_REG_CONFIG_DEC_CLK_GATE_E BIT(16)
#define G2_REG_CONFIG_DEC_CLK_GATE_IDLE_E BIT(17)
-#define G2_ADDR_DST (G2_SWREG(65))
-#define G2_REG_ADDR_REF(i) (G2_SWREG(67) + ((i) * 0x8))
-#define G2_ADDR_DST_CHR (G2_SWREG(99))
-#define G2_REG_CHR_REF(i) (G2_SWREG(101) + ((i) * 0x8))
-#define G2_ADDR_DST_MV (G2_SWREG(133))
-#define G2_REG_DMV_REF(i) (G2_SWREG(135) + ((i) * 0x8))
-#define G2_ADDR_TILE_SIZE (G2_SWREG(167))
-#define G2_ADDR_STR (G2_SWREG(169))
-#define HEVC_SCALING_LIST (G2_SWREG(171))
-#define G2_RASTER_SCAN (G2_SWREG(175))
-#define G2_RASTER_SCAN_CHR (G2_SWREG(177))
-#define G2_TILE_FILTER (G2_SWREG(179))
-#define G2_TILE_SAO (G2_SWREG(181))
-#define G2_TILE_BSD (G2_SWREG(183))
+#define G2_OUT_LUMA_ADDR (G2_SWREG(65))
+#define G2_REF_LUMA_ADDR(i) (G2_SWREG(67) + ((i) * 0x8))
+#define G2_VP9_SEGMENT_WRITE_ADDR (G2_SWREG(79))
+#define G2_VP9_SEGMENT_READ_ADDR (G2_SWREG(81))
+#define G2_OUT_CHROMA_ADDR (G2_SWREG(99))
+#define G2_REF_CHROMA_ADDR(i) (G2_SWREG(101) + ((i) * 0x8))
+#define G2_OUT_MV_ADDR (G2_SWREG(133))
+#define G2_REF_MV_ADDR(i) (G2_SWREG(135) + ((i) * 0x8))
+#define G2_TILE_SIZES_ADDR (G2_SWREG(167))
+#define G2_STREAM_ADDR (G2_SWREG(169))
+#define G2_HEVC_SCALING_LIST_ADDR (G2_SWREG(171))
+#define G2_VP9_CTX_COUNT_ADDR (G2_SWREG(171))
+#define G2_VP9_PROBS_ADDR (G2_SWREG(173))
+#define G2_RS_OUT_LUMA_ADDR (G2_SWREG(175))
+#define G2_RS_OUT_CHROMA_ADDR (G2_SWREG(177))
+#define G2_TILE_FILTER_ADDR (G2_SWREG(179))
+#define G2_TILE_SAO_ADDR (G2_SWREG(181))
+#define G2_TILE_BSD_ADDR (G2_SWREG(183))
#define g2_strm_buffer_len G2_DEC_REG(258, 0, 0xffffffff)
#define g2_strm_start_offset G2_DEC_REG(259, 0, 0xffffffff)
diff --git a/drivers/staging/media/hantro/hantro_g2_vp9_dec.c b/drivers/staging/media/hantro/hantro_g2_vp9_dec.c
new file mode 100644
index 000000000000..91c21b634fab
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_g2_vp9_dec.c
@@ -0,0 +1,1022 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VP9 codec driver
+ *
+ * Copyright (C) 2021 Collabora Ltd.
+ */
+#include "media/videobuf2-core.h"
+#include "media/videobuf2-dma-contig.h"
+#include "media/videobuf2-v4l2.h"
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-vp9.h>
+
+#include "hantro.h"
+#include "hantro_vp9.h"
+#include "hantro_g2_regs.h"
+
+#define G2_ALIGN 16
+
+enum hantro_ref_frames {
+ INTRA_FRAME = 0,
+ LAST_FRAME = 1,
+ GOLDEN_FRAME = 2,
+ ALTREF_FRAME = 3,
+ MAX_REF_FRAMES = 4
+};
+
+static int start_prepare_run(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame **dec_params)
+{
+ const struct v4l2_ctrl_vp9_compressed_hdr *prob_updates;
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ struct v4l2_ctrl *ctrl;
+ unsigned int fctx_idx;
+
+ /* v4l2-specific stuff */
+ hantro_start_prepare_run(ctx);
+
+ ctrl = v4l2_ctrl_find(&ctx->ctrl_handler, V4L2_CID_STATELESS_VP9_FRAME);
+ if (WARN_ON(!ctrl))
+ return -EINVAL;
+ *dec_params = ctrl->p_cur.p;
+
+ ctrl = v4l2_ctrl_find(&ctx->ctrl_handler, V4L2_CID_STATELESS_VP9_COMPRESSED_HDR);
+ if (WARN_ON(!ctrl))
+ return -EINVAL;
+ prob_updates = ctrl->p_cur.p;
+ vp9_ctx->cur.tx_mode = prob_updates->tx_mode;
+
+ /*
+ * vp9 stuff
+ *
+ * by this point the userspace has done all parts of 6.2 uncompressed_header()
+ * except this fragment:
+ * if ( FrameIsIntra || error_resilient_mode ) {
+ * setup_past_independence ( )
+ * if ( frame_type == KEY_FRAME || error_resilient_mode == 1 ||
+ * reset_frame_context == 3 ) {
+ * for ( i = 0; i < 4; i ++ ) {
+ * save_probs( i )
+ * }
+ * } else if ( reset_frame_context == 2 ) {
+ * save_probs( frame_context_idx )
+ * }
+ * frame_context_idx = 0
+ * }
+ */
+ fctx_idx = v4l2_vp9_reset_frame_ctx(*dec_params, vp9_ctx->frame_context);
+ vp9_ctx->cur.frame_context_idx = fctx_idx;
+
+ /* 6.1 frame(sz): load_probs() and load_probs2() */
+ vp9_ctx->probability_tables = vp9_ctx->frame_context[fctx_idx];
+
+ /*
+ * The userspace has also performed 6.3 compressed_header(), but handling the
+ * probs in a special way. All probs which need updating, except MV-related,
+ * have been read from the bitstream and translated through inv_map_table[],
+ * but no 6.3.6 inv_recenter_nonneg(v, m) has been performed. The values passed
+ * by userspace are either translated values (there are no 0 values in
+ * inv_map_table[]), or zero to indicate no update. All MV-related probs which need
+ * updating have been read from the bitstream and (mv_prob << 1) | 1 has been
+ * performed. The values passed by userspace are either new values
+ * to replace old ones (the above mentioned shift and bitwise or never result in
+ * a zero) or zero to indicate no update.
+ * fw_update_probs() performs actual probs updates or leaves probs as-is
+ * for values for which a zero was passed from userspace.
+ */
+ v4l2_vp9_fw_update_probs(&vp9_ctx->probability_tables, prob_updates, *dec_params);
+
+ return 0;
+}
+
+static size_t chroma_offset(const struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ int bytes_per_pixel = dec_params->bit_depth == 8 ? 1 : 2;
+
+ return ctx->src_fmt.width * ctx->src_fmt.height * bytes_per_pixel;
+}
+
+static size_t mv_offset(const struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ size_t cr_offset = chroma_offset(ctx, dec_params);
+
+ return ALIGN((cr_offset * 3) / 2, G2_ALIGN);
+}
+
+static struct hantro_decoded_buffer *
+get_ref_buf(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *dst, u64 timestamp)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+ struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q;
+ int buf_idx;
+
+ /*
+ * If a ref is unused or invalid, address of current destination
+ * buffer is returned.
+ */
+ buf_idx = vb2_find_timestamp(cap_q, timestamp, 0);
+ if (buf_idx < 0)
+ return vb2_to_hantro_decoded_buf(&dst->vb2_buf);
+
+ return vb2_to_hantro_decoded_buf(vb2_get_buffer(cap_q, buf_idx));
+}
+
+static void update_dec_buf_info(struct hantro_decoded_buffer *buf,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ buf->vp9.width = dec_params->frame_width_minus_1 + 1;
+ buf->vp9.height = dec_params->frame_height_minus_1 + 1;
+ buf->vp9.bit_depth = dec_params->bit_depth;
+}
+
+static void update_ctx_cur_info(struct hantro_vp9_dec_hw_ctx *vp9_ctx,
+ struct hantro_decoded_buffer *buf,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ vp9_ctx->cur.valid = true;
+ vp9_ctx->cur.reference_mode = dec_params->reference_mode;
+ vp9_ctx->cur.interpolation_filter = dec_params->interpolation_filter;
+ vp9_ctx->cur.flags = dec_params->flags;
+ vp9_ctx->cur.timestamp = buf->base.vb.vb2_buf.timestamp;
+}
+
+static void config_output(struct hantro_ctx *ctx,
+ struct hantro_decoded_buffer *dst,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ dma_addr_t luma_addr, chroma_addr, mv_addr;
+
+ hantro_reg_write(ctx->dev, &g2_out_dis, 0);
+ if (!ctx->dev->variant->legacy_regs)
+ hantro_reg_write(ctx->dev, &g2_output_format, 0);
+
+ luma_addr = hantro_get_dec_buf_addr(ctx, &dst->base.vb.vb2_buf);
+ hantro_write_addr(ctx->dev, G2_OUT_LUMA_ADDR, luma_addr);
+
+ chroma_addr = luma_addr + chroma_offset(ctx, dec_params);
+ hantro_write_addr(ctx->dev, G2_OUT_CHROMA_ADDR, chroma_addr);
+
+ mv_addr = luma_addr + mv_offset(ctx, dec_params);
+ hantro_write_addr(ctx->dev, G2_OUT_MV_ADDR, mv_addr);
+}
+
+struct hantro_vp9_ref_reg {
+ const struct hantro_reg width;
+ const struct hantro_reg height;
+ const struct hantro_reg hor_scale;
+ const struct hantro_reg ver_scale;
+ u32 y_base;
+ u32 c_base;
+};
+
+static void config_ref(struct hantro_ctx *ctx,
+ struct hantro_decoded_buffer *dst,
+ const struct hantro_vp9_ref_reg *ref_reg,
+ const struct v4l2_ctrl_vp9_frame *dec_params,
+ u64 ref_ts)
+{
+ struct hantro_decoded_buffer *buf;
+ dma_addr_t luma_addr, chroma_addr;
+ u32 refw, refh;
+
+ buf = get_ref_buf(ctx, &dst->base.vb, ref_ts);
+ refw = buf->vp9.width;
+ refh = buf->vp9.height;
+
+ hantro_reg_write(ctx->dev, &ref_reg->width, refw);
+ hantro_reg_write(ctx->dev, &ref_reg->height, refh);
+
+ hantro_reg_write(ctx->dev, &ref_reg->hor_scale, (refw << 14) / dst->vp9.width);
+ hantro_reg_write(ctx->dev, &ref_reg->ver_scale, (refh << 14) / dst->vp9.height);
+
+ luma_addr = hantro_get_dec_buf_addr(ctx, &buf->base.vb.vb2_buf);
+ hantro_write_addr(ctx->dev, ref_reg->y_base, luma_addr);
+
+ chroma_addr = luma_addr + chroma_offset(ctx, dec_params);
+ hantro_write_addr(ctx->dev, ref_reg->c_base, chroma_addr);
+}
+
+static void config_ref_registers(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp9_frame *dec_params,
+ struct hantro_decoded_buffer *dst,
+ struct hantro_decoded_buffer *mv_ref)
+{
+ static const struct hantro_vp9_ref_reg ref_regs[] = {
+ {
+ /* Last */
+ .width = vp9_lref_width,
+ .height = vp9_lref_height,
+ .hor_scale = vp9_lref_hor_scale,
+ .ver_scale = vp9_lref_ver_scale,
+ .y_base = G2_REF_LUMA_ADDR(0),
+ .c_base = G2_REF_CHROMA_ADDR(0),
+ }, {
+ /* Golden */
+ .width = vp9_gref_width,
+ .height = vp9_gref_height,
+ .hor_scale = vp9_gref_hor_scale,
+ .ver_scale = vp9_gref_ver_scale,
+ .y_base = G2_REF_LUMA_ADDR(4),
+ .c_base = G2_REF_CHROMA_ADDR(4),
+ }, {
+ /* Altref */
+ .width = vp9_aref_width,
+ .height = vp9_aref_height,
+ .hor_scale = vp9_aref_hor_scale,
+ .ver_scale = vp9_aref_ver_scale,
+ .y_base = G2_REF_LUMA_ADDR(5),
+ .c_base = G2_REF_CHROMA_ADDR(5),
+ },
+ };
+ dma_addr_t mv_addr;
+
+ config_ref(ctx, dst, &ref_regs[0], dec_params, dec_params->last_frame_ts);
+ config_ref(ctx, dst, &ref_regs[1], dec_params, dec_params->golden_frame_ts);
+ config_ref(ctx, dst, &ref_regs[2], dec_params, dec_params->alt_frame_ts);
+
+ mv_addr = hantro_get_dec_buf_addr(ctx, &mv_ref->base.vb.vb2_buf) +
+ mv_offset(ctx, dec_params);
+ hantro_write_addr(ctx->dev, G2_REF_MV_ADDR(0), mv_addr);
+
+ hantro_reg_write(ctx->dev, &vp9_last_sign_bias,
+ dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_LAST ? 1 : 0);
+
+ hantro_reg_write(ctx->dev, &vp9_gref_sign_bias,
+ dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_GOLDEN ? 1 : 0);
+
+ hantro_reg_write(ctx->dev, &vp9_aref_sign_bias,
+ dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_ALT ? 1 : 0);
+}
+
+static void recompute_tile_info(unsigned short *tile_info, unsigned int tiles, unsigned int sbs)
+{
+ int i;
+ unsigned int accumulated = 0;
+ unsigned int next_accumulated;
+
+ for (i = 1; i <= tiles; ++i) {
+ next_accumulated = i * sbs / tiles;
+ *tile_info++ = next_accumulated - accumulated;
+ accumulated = next_accumulated;
+ }
+}
+
+static void
+recompute_tile_rc_info(struct hantro_ctx *ctx,
+ unsigned int tile_r, unsigned int tile_c,
+ unsigned int sbs_r, unsigned int sbs_c)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+
+ recompute_tile_info(vp9_ctx->tile_r_info, tile_r, sbs_r);
+ recompute_tile_info(vp9_ctx->tile_c_info, tile_c, sbs_c);
+
+ vp9_ctx->last_tile_r = tile_r;
+ vp9_ctx->last_tile_c = tile_c;
+ vp9_ctx->last_sbs_r = sbs_r;
+ vp9_ctx->last_sbs_c = sbs_c;
+}
+
+static inline unsigned int first_tile_row(unsigned int tile_r, unsigned int sbs_r)
+{
+ if (tile_r == sbs_r + 1)
+ return 1;
+
+ if (tile_r == sbs_r + 2)
+ return 2;
+
+ return 0;
+}
+
+static void
+fill_tile_info(struct hantro_ctx *ctx,
+ unsigned int tile_r, unsigned int tile_c,
+ unsigned int sbs_r, unsigned int sbs_c,
+ unsigned short *tile_mem)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ unsigned int i, j;
+ bool first = true;
+
+ for (i = first_tile_row(tile_r, sbs_r); i < tile_r; ++i) {
+ unsigned short r_info = vp9_ctx->tile_r_info[i];
+
+ if (first) {
+ if (i > 0)
+ r_info += vp9_ctx->tile_r_info[0];
+ if (i == 2)
+ r_info += vp9_ctx->tile_r_info[1];
+ first = false;
+ }
+ for (j = 0; j < tile_c; ++j) {
+ *tile_mem++ = vp9_ctx->tile_c_info[j];
+ *tile_mem++ = r_info;
+ }
+ }
+}
+
+static void
+config_tiles(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp9_frame *dec_params,
+ struct hantro_decoded_buffer *dst)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ struct hantro_aux_buf *misc = &vp9_ctx->misc;
+ struct hantro_aux_buf *tile_edge = &vp9_ctx->tile_edge;
+ dma_addr_t addr;
+ unsigned short *tile_mem;
+ unsigned int rows, cols;
+
+ addr = misc->dma + vp9_ctx->tile_info_offset;
+ hantro_write_addr(ctx->dev, G2_TILE_SIZES_ADDR, addr);
+
+ tile_mem = misc->cpu + vp9_ctx->tile_info_offset;
+ if (dec_params->tile_cols_log2 || dec_params->tile_rows_log2) {
+ unsigned int tile_r = (1 << dec_params->tile_rows_log2);
+ unsigned int tile_c = (1 << dec_params->tile_cols_log2);
+ unsigned int sbs_r = hantro_vp9_num_sbs(dst->vp9.height);
+ unsigned int sbs_c = hantro_vp9_num_sbs(dst->vp9.width);
+
+ if (tile_r != vp9_ctx->last_tile_r || tile_c != vp9_ctx->last_tile_c ||
+ sbs_r != vp9_ctx->last_sbs_r || sbs_c != vp9_ctx->last_sbs_c)
+ recompute_tile_rc_info(ctx, tile_r, tile_c, sbs_r, sbs_c);
+
+ fill_tile_info(ctx, tile_r, tile_c, sbs_r, sbs_c, tile_mem);
+
+ cols = tile_c;
+ rows = tile_r;
+ hantro_reg_write(ctx->dev, &g2_tile_e, 1);
+ } else {
+ tile_mem[0] = hantro_vp9_num_sbs(dst->vp9.width);
+ tile_mem[1] = hantro_vp9_num_sbs(dst->vp9.height);
+
+ cols = 1;
+ rows = 1;
+ hantro_reg_write(ctx->dev, &g2_tile_e, 0);
+ }
+
+ if (ctx->dev->variant->legacy_regs) {
+ hantro_reg_write(ctx->dev, &g2_num_tile_cols_old, cols);
+ hantro_reg_write(ctx->dev, &g2_num_tile_rows_old, rows);
+ } else {
+ hantro_reg_write(ctx->dev, &g2_num_tile_cols, cols);
+ hantro_reg_write(ctx->dev, &g2_num_tile_rows, rows);
+ }
+
+ /* provide aux buffers even if no tiles are used */
+ addr = tile_edge->dma;
+ hantro_write_addr(ctx->dev, G2_TILE_FILTER_ADDR, addr);
+
+ addr = tile_edge->dma + vp9_ctx->bsd_ctrl_offset;
+ hantro_write_addr(ctx->dev, G2_TILE_BSD_ADDR, addr);
+}
+
+static void
+update_feat_and_flag(struct hantro_vp9_dec_hw_ctx *vp9_ctx,
+ const struct v4l2_vp9_segmentation *seg,
+ unsigned int feature,
+ unsigned int segid)
+{
+ u8 mask = V4L2_VP9_SEGMENT_FEATURE_ENABLED(feature);
+
+ vp9_ctx->feature_data[segid][feature] = seg->feature_data[segid][feature];
+ vp9_ctx->feature_enabled[segid] &= ~mask;
+ vp9_ctx->feature_enabled[segid] |= (seg->feature_enabled[segid] & mask);
+}
+
+static inline s16 clip3(s16 x, s16 y, s16 z)
+{
+ return (z < x) ? x : (z > y) ? y : z;
+}
+
+static s16 feat_val_clip3(s16 feat_val, s16 feature_data, bool absolute, u8 clip)
+{
+ if (absolute)
+ return feature_data;
+
+ return clip3(0, 255, feat_val + feature_data);
+}
+
+static void config_segment(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ const struct v4l2_vp9_segmentation *seg;
+ s16 feat_val;
+ unsigned char feat_id;
+ unsigned int segid;
+ bool segment_enabled, absolute, update_data;
+
+ static const struct hantro_reg seg_regs[8][V4L2_VP9_SEG_LVL_MAX] = {
+ { vp9_quant_seg0, vp9_filt_level_seg0, vp9_refpic_seg0, vp9_skip_seg0 },
+ { vp9_quant_seg1, vp9_filt_level_seg1, vp9_refpic_seg1, vp9_skip_seg1 },
+ { vp9_quant_seg2, vp9_filt_level_seg2, vp9_refpic_seg2, vp9_skip_seg2 },
+ { vp9_quant_seg3, vp9_filt_level_seg3, vp9_refpic_seg3, vp9_skip_seg3 },
+ { vp9_quant_seg4, vp9_filt_level_seg4, vp9_refpic_seg4, vp9_skip_seg4 },
+ { vp9_quant_seg5, vp9_filt_level_seg5, vp9_refpic_seg5, vp9_skip_seg5 },
+ { vp9_quant_seg6, vp9_filt_level_seg6, vp9_refpic_seg6, vp9_skip_seg6 },
+ { vp9_quant_seg7, vp9_filt_level_seg7, vp9_refpic_seg7, vp9_skip_seg7 },
+ };
+
+ segment_enabled = !!(dec_params->seg.flags & V4L2_VP9_SEGMENTATION_FLAG_ENABLED);
+ hantro_reg_write(ctx->dev, &vp9_segment_e, segment_enabled);
+ hantro_reg_write(ctx->dev, &vp9_segment_upd_e,
+ !!(dec_params->seg.flags & V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP));
+ hantro_reg_write(ctx->dev, &vp9_segment_temp_upd_e,
+ !!(dec_params->seg.flags & V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE));
+
+ seg = &dec_params->seg;
+ absolute = !!(seg->flags & V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE);
+ update_data = !!(seg->flags & V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA);
+
+ for (segid = 0; segid < 8; ++segid) {
+ /* Quantizer segment feature */
+ feat_id = V4L2_VP9_SEG_LVL_ALT_Q;
+ feat_val = dec_params->quant.base_q_idx;
+ if (segment_enabled) {
+ if (update_data)
+ update_feat_and_flag(vp9_ctx, seg, feat_id, segid);
+ if (v4l2_vp9_seg_feat_enabled(vp9_ctx->feature_enabled, feat_id, segid))
+ feat_val = feat_val_clip3(feat_val,
+ vp9_ctx->feature_data[segid][feat_id],
+ absolute, 255);
+ }
+ hantro_reg_write(ctx->dev, &seg_regs[segid][feat_id], feat_val);
+
+ /* Loop filter segment feature */
+ feat_id = V4L2_VP9_SEG_LVL_ALT_L;
+ feat_val = dec_params->lf.level;
+ if (segment_enabled) {
+ if (update_data)
+ update_feat_and_flag(vp9_ctx, seg, feat_id, segid);
+ if (v4l2_vp9_seg_feat_enabled(vp9_ctx->feature_enabled, feat_id, segid))
+ feat_val = feat_val_clip3(feat_val,
+ vp9_ctx->feature_data[segid][feat_id],
+ absolute, 63);
+ }
+ hantro_reg_write(ctx->dev, &seg_regs[segid][feat_id], feat_val);
+
+ /* Reference frame segment feature */
+ feat_id = V4L2_VP9_SEG_LVL_REF_FRAME;
+ feat_val = 0;
+ if (segment_enabled) {
+ if (update_data)
+ update_feat_and_flag(vp9_ctx, seg, feat_id, segid);
+ if (!(dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME) &&
+ v4l2_vp9_seg_feat_enabled(vp9_ctx->feature_enabled, feat_id, segid))
+ feat_val = vp9_ctx->feature_data[segid][feat_id] + 1;
+ }
+ hantro_reg_write(ctx->dev, &seg_regs[segid][feat_id], feat_val);
+
+ /* Skip segment feature */
+ feat_id = V4L2_VP9_SEG_LVL_SKIP;
+ feat_val = 0;
+ if (segment_enabled) {
+ if (update_data)
+ update_feat_and_flag(vp9_ctx, seg, feat_id, segid);
+ feat_val = v4l2_vp9_seg_feat_enabled(vp9_ctx->feature_enabled,
+ feat_id, segid) ? 1 : 0;
+ }
+ hantro_reg_write(ctx->dev, &seg_regs[segid][feat_id], feat_val);
+ }
+}
+
+static void config_loop_filter(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ bool d = dec_params->lf.flags & V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED;
+
+ hantro_reg_write(ctx->dev, &vp9_filt_level, dec_params->lf.level);
+ hantro_reg_write(ctx->dev, &g2_out_filtering_dis, dec_params->lf.level == 0);
+ hantro_reg_write(ctx->dev, &vp9_filt_sharpness, dec_params->lf.sharpness);
+
+ hantro_reg_write(ctx->dev, &vp9_filt_ref_adj_0, d ? dec_params->lf.ref_deltas[0] : 0);
+ hantro_reg_write(ctx->dev, &vp9_filt_ref_adj_1, d ? dec_params->lf.ref_deltas[1] : 0);
+ hantro_reg_write(ctx->dev, &vp9_filt_ref_adj_2, d ? dec_params->lf.ref_deltas[2] : 0);
+ hantro_reg_write(ctx->dev, &vp9_filt_ref_adj_3, d ? dec_params->lf.ref_deltas[3] : 0);
+ hantro_reg_write(ctx->dev, &vp9_filt_mb_adj_0, d ? dec_params->lf.mode_deltas[0] : 0);
+ hantro_reg_write(ctx->dev, &vp9_filt_mb_adj_1, d ? dec_params->lf.mode_deltas[1] : 0);
+}
+
+static void config_picture_dimensions(struct hantro_ctx *ctx, struct hantro_decoded_buffer *dst)
+{
+ u32 pic_w_4x4, pic_h_4x4;
+
+ hantro_reg_write(ctx->dev, &g2_pic_width_in_cbs, (dst->vp9.width + 7) / 8);
+ hantro_reg_write(ctx->dev, &g2_pic_height_in_cbs, (dst->vp9.height + 7) / 8);
+ pic_w_4x4 = roundup(dst->vp9.width, 8) >> 2;
+ pic_h_4x4 = roundup(dst->vp9.height, 8) >> 2;
+ hantro_reg_write(ctx->dev, &g2_pic_width_4x4, pic_w_4x4);
+ hantro_reg_write(ctx->dev, &g2_pic_height_4x4, pic_h_4x4);
+}
+
+static void
+config_bit_depth(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ if (ctx->dev->variant->legacy_regs) {
+ u8 pp_shift = 0;
+
+ hantro_reg_write(ctx->dev, &g2_bit_depth_y, dec_params->bit_depth);
+ hantro_reg_write(ctx->dev, &g2_bit_depth_c, dec_params->bit_depth);
+ hantro_reg_write(ctx->dev, &g2_rs_out_bit_depth, dec_params->bit_depth);
+
+ if (dec_params->bit_depth > 8)
+ pp_shift = 16 - dec_params->bit_depth;
+
+ hantro_reg_write(ctx->dev, &g2_pp_pix_shift, pp_shift);
+ hantro_reg_write(ctx->dev, &g2_pix_shift, 0);
+ } else {
+ hantro_reg_write(ctx->dev, &g2_bit_depth_y_minus8, dec_params->bit_depth - 8);
+ hantro_reg_write(ctx->dev, &g2_bit_depth_c_minus8, dec_params->bit_depth - 8);
+ }
+}
+
+static inline bool is_lossless(const struct v4l2_vp9_quantization *quant)
+{
+ return quant->base_q_idx == 0 && quant->delta_q_uv_ac == 0 &&
+ quant->delta_q_uv_dc == 0 && quant->delta_q_y_dc == 0;
+}
+
+static void
+config_quant(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ hantro_reg_write(ctx->dev, &vp9_qp_delta_y_dc, dec_params->quant.delta_q_y_dc);
+ hantro_reg_write(ctx->dev, &vp9_qp_delta_ch_dc, dec_params->quant.delta_q_uv_dc);
+ hantro_reg_write(ctx->dev, &vp9_qp_delta_ch_ac, dec_params->quant.delta_q_uv_ac);
+ hantro_reg_write(ctx->dev, &vp9_lossless_e, is_lossless(&dec_params->quant));
+}
+
+static u32
+hantro_interp_filter_from_v4l2(unsigned int interpolation_filter)
+{
+ switch (interpolation_filter) {
+ case V4L2_VP9_INTERP_FILTER_EIGHTTAP:
+ return 0x1;
+ case V4L2_VP9_INTERP_FILTER_EIGHTTAP_SMOOTH:
+ return 0;
+ case V4L2_VP9_INTERP_FILTER_EIGHTTAP_SHARP:
+ return 0x2;
+ case V4L2_VP9_INTERP_FILTER_BILINEAR:
+ return 0x3;
+ case V4L2_VP9_INTERP_FILTER_SWITCHABLE:
+ return 0x4;
+ }
+
+ return 0;
+}
+
+static void
+config_others(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params,
+ bool intra_only, bool resolution_change)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+
+ hantro_reg_write(ctx->dev, &g2_idr_pic_e, intra_only);
+
+ hantro_reg_write(ctx->dev, &vp9_transform_mode, vp9_ctx->cur.tx_mode);
+
+ hantro_reg_write(ctx->dev, &vp9_mcomp_filt_type, intra_only ?
+ 0 : hantro_interp_filter_from_v4l2(dec_params->interpolation_filter));
+
+ hantro_reg_write(ctx->dev, &vp9_high_prec_mv_e,
+ !!(dec_params->flags & V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV));
+
+ hantro_reg_write(ctx->dev, &vp9_comp_pred_mode, dec_params->reference_mode);
+
+ hantro_reg_write(ctx->dev, &g2_tempor_mvp_e,
+ !(dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT) &&
+ !(dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME) &&
+ !(vp9_ctx->last.flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME) &&
+ !(dec_params->flags & V4L2_VP9_FRAME_FLAG_INTRA_ONLY) &&
+ !resolution_change &&
+ vp9_ctx->last.flags & V4L2_VP9_FRAME_FLAG_SHOW_FRAME
+ );
+
+ hantro_reg_write(ctx->dev, &g2_write_mvs_e,
+ !(dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME));
+}
+
+static void
+config_compound_reference(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ u32 comp_fixed_ref, comp_var_ref[2];
+ bool last_ref_frame_sign_bias;
+ bool golden_ref_frame_sign_bias;
+ bool alt_ref_frame_sign_bias;
+ bool comp_ref_allowed = 0;
+
+ comp_fixed_ref = 0;
+ comp_var_ref[0] = 0;
+ comp_var_ref[1] = 0;
+
+ last_ref_frame_sign_bias = dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_LAST;
+ golden_ref_frame_sign_bias = dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_GOLDEN;
+ alt_ref_frame_sign_bias = dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_ALT;
+
+ /* 6.3.12 Frame reference mode syntax */
+ comp_ref_allowed |= golden_ref_frame_sign_bias != last_ref_frame_sign_bias;
+ comp_ref_allowed |= alt_ref_frame_sign_bias != last_ref_frame_sign_bias;
+
+ if (comp_ref_allowed) {
+ if (last_ref_frame_sign_bias ==
+ golden_ref_frame_sign_bias) {
+ comp_fixed_ref = ALTREF_FRAME;
+ comp_var_ref[0] = LAST_FRAME;
+ comp_var_ref[1] = GOLDEN_FRAME;
+ } else if (last_ref_frame_sign_bias ==
+ alt_ref_frame_sign_bias) {
+ comp_fixed_ref = GOLDEN_FRAME;
+ comp_var_ref[0] = LAST_FRAME;
+ comp_var_ref[1] = ALTREF_FRAME;
+ } else {
+ comp_fixed_ref = LAST_FRAME;
+ comp_var_ref[0] = GOLDEN_FRAME;
+ comp_var_ref[1] = ALTREF_FRAME;
+ }
+ }
+
+ hantro_reg_write(ctx->dev, &vp9_comp_pred_fixed_ref, comp_fixed_ref);
+ hantro_reg_write(ctx->dev, &vp9_comp_pred_var_ref0, comp_var_ref[0]);
+ hantro_reg_write(ctx->dev, &vp9_comp_pred_var_ref1, comp_var_ref[1]);
+}
+
+#define INNER_LOOP \
+do { \
+ for (m = 0; m < ARRAY_SIZE(adaptive->coef[0][0][0][0]); ++m) { \
+ memcpy(adaptive->coef[i][j][k][l][m], \
+ probs->coef[i][j][k][l][m], \
+ sizeof(probs->coef[i][j][k][l][m])); \
+ \
+ adaptive->coef[i][j][k][l][m][3] = 0; \
+ } \
+} while (0)
+
+static void config_probs(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ struct hantro_aux_buf *misc = &vp9_ctx->misc;
+ struct hantro_g2_all_probs *all_probs = misc->cpu;
+ struct hantro_g2_probs *adaptive;
+ struct hantro_g2_mv_probs *mv;
+ const struct v4l2_vp9_segmentation *seg = &dec_params->seg;
+ const struct v4l2_vp9_frame_context *probs = &vp9_ctx->probability_tables;
+ int i, j, k, l, m;
+
+ for (i = 0; i < ARRAY_SIZE(all_probs->kf_y_mode_prob); ++i)
+ for (j = 0; j < ARRAY_SIZE(all_probs->kf_y_mode_prob[0]); ++j) {
+ memcpy(all_probs->kf_y_mode_prob[i][j],
+ v4l2_vp9_kf_y_mode_prob[i][j],
+ ARRAY_SIZE(all_probs->kf_y_mode_prob[i][j]));
+
+ all_probs->kf_y_mode_prob_tail[i][j][0] =
+ v4l2_vp9_kf_y_mode_prob[i][j][8];
+ }
+
+ memcpy(all_probs->mb_segment_tree_probs, seg->tree_probs,
+ sizeof(all_probs->mb_segment_tree_probs));
+
+ memcpy(all_probs->segment_pred_probs, seg->pred_probs,
+ sizeof(all_probs->segment_pred_probs));
+
+ for (i = 0; i < ARRAY_SIZE(all_probs->kf_uv_mode_prob); ++i) {
+ memcpy(all_probs->kf_uv_mode_prob[i], v4l2_vp9_kf_uv_mode_prob[i],
+ ARRAY_SIZE(all_probs->kf_uv_mode_prob[i]));
+
+ all_probs->kf_uv_mode_prob_tail[i][0] = v4l2_vp9_kf_uv_mode_prob[i][8];
+ }
+
+ adaptive = &all_probs->probs;
+
+ for (i = 0; i < ARRAY_SIZE(adaptive->inter_mode); ++i) {
+ memcpy(adaptive->inter_mode[i], probs->inter_mode[i],
+ ARRAY_SIZE(probs->inter_mode[i]));
+
+ adaptive->inter_mode[i][3] = 0;
+ }
+
+ memcpy(adaptive->is_inter, probs->is_inter, sizeof(adaptive->is_inter));
+
+ for (i = 0; i < ARRAY_SIZE(adaptive->uv_mode); ++i) {
+ memcpy(adaptive->uv_mode[i], probs->uv_mode[i],
+ sizeof(adaptive->uv_mode[i]));
+ adaptive->uv_mode_tail[i][0] = probs->uv_mode[i][8];
+ }
+
+ memcpy(adaptive->tx8, probs->tx8, sizeof(adaptive->tx8));
+ memcpy(adaptive->tx16, probs->tx16, sizeof(adaptive->tx16));
+ memcpy(adaptive->tx32, probs->tx32, sizeof(adaptive->tx32));
+
+ for (i = 0; i < ARRAY_SIZE(adaptive->y_mode); ++i) {
+ memcpy(adaptive->y_mode[i], probs->y_mode[i],
+ ARRAY_SIZE(adaptive->y_mode[i]));
+
+ adaptive->y_mode_tail[i][0] = probs->y_mode[i][8];
+ }
+
+ for (i = 0; i < ARRAY_SIZE(adaptive->partition[0]); ++i) {
+ memcpy(adaptive->partition[0][i], v4l2_vp9_kf_partition_probs[i],
+ sizeof(v4l2_vp9_kf_partition_probs[i]));
+
+ adaptive->partition[0][i][3] = 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(adaptive->partition[1]); ++i) {
+ memcpy(adaptive->partition[1][i], probs->partition[i],
+ sizeof(probs->partition[i]));
+
+ adaptive->partition[1][i][3] = 0;
+ }
+
+ memcpy(adaptive->interp_filter, probs->interp_filter,
+ sizeof(adaptive->interp_filter));
+
+ memcpy(adaptive->comp_mode, probs->comp_mode, sizeof(adaptive->comp_mode));
+
+ memcpy(adaptive->skip, probs->skip, sizeof(adaptive->skip));
+
+ mv = &adaptive->mv;
+
+ memcpy(mv->joint, probs->mv.joint, sizeof(mv->joint));
+ memcpy(mv->sign, probs->mv.sign, sizeof(mv->sign));
+ memcpy(mv->class0_bit, probs->mv.class0_bit, sizeof(mv->class0_bit));
+ memcpy(mv->fr, probs->mv.fr, sizeof(mv->fr));
+ memcpy(mv->class0_hp, probs->mv.class0_hp, sizeof(mv->class0_hp));
+ memcpy(mv->hp, probs->mv.hp, sizeof(mv->hp));
+ memcpy(mv->classes, probs->mv.classes, sizeof(mv->classes));
+ memcpy(mv->class0_fr, probs->mv.class0_fr, sizeof(mv->class0_fr));
+ memcpy(mv->bits, probs->mv.bits, sizeof(mv->bits));
+
+ memcpy(adaptive->single_ref, probs->single_ref, sizeof(adaptive->single_ref));
+
+ memcpy(adaptive->comp_ref, probs->comp_ref, sizeof(adaptive->comp_ref));
+
+ for (i = 0; i < ARRAY_SIZE(adaptive->coef); ++i)
+ for (j = 0; j < ARRAY_SIZE(adaptive->coef[0]); ++j)
+ for (k = 0; k < ARRAY_SIZE(adaptive->coef[0][0]); ++k)
+ for (l = 0; l < ARRAY_SIZE(adaptive->coef[0][0][0]); ++l)
+ INNER_LOOP;
+
+ hantro_write_addr(ctx->dev, G2_VP9_PROBS_ADDR, misc->dma);
+}
+
+static void config_counts(struct hantro_ctx *ctx)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_dec = &ctx->vp9_dec;
+ struct hantro_aux_buf *misc = &vp9_dec->misc;
+ dma_addr_t addr = misc->dma + vp9_dec->ctx_counters_offset;
+
+ hantro_write_addr(ctx->dev, G2_VP9_CTX_COUNT_ADDR, addr);
+}
+
+static void config_seg_map(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp9_frame *dec_params,
+ bool intra_only, bool update_map)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ struct hantro_aux_buf *segment_map = &vp9_ctx->segment_map;
+ dma_addr_t addr;
+
+ if (intra_only ||
+ (dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT)) {
+ memset(segment_map->cpu, 0, segment_map->size);
+ memset(vp9_ctx->feature_data, 0, sizeof(vp9_ctx->feature_data));
+ memset(vp9_ctx->feature_enabled, 0, sizeof(vp9_ctx->feature_enabled));
+ }
+
+ addr = segment_map->dma + vp9_ctx->active_segment * vp9_ctx->segment_map_size;
+ hantro_write_addr(ctx->dev, G2_VP9_SEGMENT_READ_ADDR, addr);
+
+ addr = segment_map->dma + (1 - vp9_ctx->active_segment) * vp9_ctx->segment_map_size;
+ hantro_write_addr(ctx->dev, G2_VP9_SEGMENT_WRITE_ADDR, addr);
+
+ if (update_map)
+ vp9_ctx->active_segment = 1 - vp9_ctx->active_segment;
+}
+
+static void
+config_source(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params,
+ struct vb2_v4l2_buffer *vb2_src)
+{
+ dma_addr_t stream_base, tmp_addr;
+ unsigned int headres_size;
+ u32 src_len, start_bit, src_buf_len;
+
+ headres_size = dec_params->uncompressed_header_size
+ + dec_params->compressed_header_size;
+
+ stream_base = vb2_dma_contig_plane_dma_addr(&vb2_src->vb2_buf, 0);
+
+ tmp_addr = stream_base + headres_size;
+ if (ctx->dev->variant->legacy_regs)
+ hantro_write_addr(ctx->dev, G2_STREAM_ADDR, (tmp_addr & ~0xf));
+ else
+ hantro_write_addr(ctx->dev, G2_STREAM_ADDR, stream_base);
+
+ start_bit = (tmp_addr & 0xf) * 8;
+ hantro_reg_write(ctx->dev, &g2_start_bit, start_bit);
+
+ src_len = vb2_get_plane_payload(&vb2_src->vb2_buf, 0);
+ src_len += start_bit / 8 - headres_size;
+ hantro_reg_write(ctx->dev, &g2_stream_len, src_len);
+
+ if (!ctx->dev->variant->legacy_regs) {
+ tmp_addr &= ~0xf;
+ hantro_reg_write(ctx->dev, &g2_strm_start_offset, tmp_addr - stream_base);
+ src_buf_len = vb2_plane_size(&vb2_src->vb2_buf, 0);
+ hantro_reg_write(ctx->dev, &g2_strm_buffer_len, src_buf_len);
+ }
+}
+
+static void
+config_registers(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params,
+ struct vb2_v4l2_buffer *vb2_src, struct vb2_v4l2_buffer *vb2_dst)
+{
+ struct hantro_decoded_buffer *dst, *last, *mv_ref;
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ const struct v4l2_vp9_segmentation *seg;
+ bool intra_only, resolution_change;
+
+ /* vp9 stuff */
+ dst = vb2_to_hantro_decoded_buf(&vb2_dst->vb2_buf);
+
+ if (vp9_ctx->last.valid)
+ last = get_ref_buf(ctx, &dst->base.vb, vp9_ctx->last.timestamp);
+ else
+ last = dst;
+
+ update_dec_buf_info(dst, dec_params);
+ update_ctx_cur_info(vp9_ctx, dst, dec_params);
+ seg = &dec_params->seg;
+
+ intra_only = !!(dec_params->flags &
+ (V4L2_VP9_FRAME_FLAG_KEY_FRAME |
+ V4L2_VP9_FRAME_FLAG_INTRA_ONLY));
+
+ if (!intra_only &&
+ !(dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT) &&
+ vp9_ctx->last.valid)
+ mv_ref = last;
+ else
+ mv_ref = dst;
+
+ resolution_change = dst->vp9.width != last->vp9.width ||
+ dst->vp9.height != last->vp9.height;
+
+ /* configure basic registers */
+ hantro_reg_write(ctx->dev, &g2_mode, VP9_DEC_MODE);
+ if (!ctx->dev->variant->legacy_regs) {
+ hantro_reg_write(ctx->dev, &g2_strm_swap, 0xf);
+ hantro_reg_write(ctx->dev, &g2_dirmv_swap, 0xf);
+ hantro_reg_write(ctx->dev, &g2_compress_swap, 0xf);
+ hantro_reg_write(ctx->dev, &g2_ref_compress_bypass, 1);
+ } else {
+ hantro_reg_write(ctx->dev, &g2_strm_swap_old, 0x1f);
+ hantro_reg_write(ctx->dev, &g2_pic_swap, 0x10);
+ hantro_reg_write(ctx->dev, &g2_dirmv_swap_old, 0x10);
+ hantro_reg_write(ctx->dev, &g2_tab0_swap_old, 0x10);
+ hantro_reg_write(ctx->dev, &g2_tab1_swap_old, 0x10);
+ hantro_reg_write(ctx->dev, &g2_tab2_swap_old, 0x10);
+ hantro_reg_write(ctx->dev, &g2_tab3_swap_old, 0x10);
+ hantro_reg_write(ctx->dev, &g2_rscan_swap, 0x10);
+ }
+ hantro_reg_write(ctx->dev, &g2_buswidth, BUS_WIDTH_128);
+ hantro_reg_write(ctx->dev, &g2_max_burst, 16);
+ hantro_reg_write(ctx->dev, &g2_apf_threshold, 8);
+ hantro_reg_write(ctx->dev, &g2_clk_gate_e, 1);
+ hantro_reg_write(ctx->dev, &g2_max_cb_size, 6);
+ hantro_reg_write(ctx->dev, &g2_min_cb_size, 3);
+ if (ctx->dev->variant->double_buffer)
+ hantro_reg_write(ctx->dev, &g2_double_buffer_e, 1);
+
+ config_output(ctx, dst, dec_params);
+
+ if (!intra_only)
+ config_ref_registers(ctx, dec_params, dst, mv_ref);
+
+ config_tiles(ctx, dec_params, dst);
+ config_segment(ctx, dec_params);
+ config_loop_filter(ctx, dec_params);
+ config_picture_dimensions(ctx, dst);
+ config_bit_depth(ctx, dec_params);
+ config_quant(ctx, dec_params);
+ config_others(ctx, dec_params, intra_only, resolution_change);
+ config_compound_reference(ctx, dec_params);
+ config_probs(ctx, dec_params);
+ config_counts(ctx);
+ config_seg_map(ctx, dec_params, intra_only,
+ seg->flags & V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP);
+ config_source(ctx, dec_params, vb2_src);
+}
+
+int hantro_g2_vp9_dec_run(struct hantro_ctx *ctx)
+{
+ const struct v4l2_ctrl_vp9_frame *decode_params;
+ struct vb2_v4l2_buffer *src;
+ struct vb2_v4l2_buffer *dst;
+ int ret;
+
+ hantro_g2_check_idle(ctx->dev);
+
+ ret = start_prepare_run(ctx, &decode_params);
+ if (ret) {
+ hantro_end_prepare_run(ctx);
+ return ret;
+ }
+
+ src = hantro_get_src_buf(ctx);
+ dst = hantro_get_dst_buf(ctx);
+
+ config_registers(ctx, decode_params, src, dst);
+
+ hantro_end_prepare_run(ctx);
+
+ vdpu_write(ctx->dev, G2_REG_INTERRUPT_DEC_E, G2_REG_INTERRUPT);
+
+ return 0;
+}
+
+#define copy_tx_and_skip(p1, p2) \
+do { \
+ memcpy((p1)->tx8, (p2)->tx8, sizeof((p1)->tx8)); \
+ memcpy((p1)->tx16, (p2)->tx16, sizeof((p1)->tx16)); \
+ memcpy((p1)->tx32, (p2)->tx32, sizeof((p1)->tx32)); \
+ memcpy((p1)->skip, (p2)->skip, sizeof((p1)->skip)); \
+} while (0)
+
+void hantro_g2_vp9_dec_done(struct hantro_ctx *ctx)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ unsigned int fctx_idx;
+
+ if (!(vp9_ctx->cur.flags & V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX))
+ goto out_update_last;
+
+ fctx_idx = vp9_ctx->cur.frame_context_idx;
+
+ if (!(vp9_ctx->cur.flags & V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE)) {
+ /* error_resilient_mode == 0 && frame_parallel_decoding_mode == 0 */
+ struct v4l2_vp9_frame_context *probs = &vp9_ctx->probability_tables;
+ bool frame_is_intra = vp9_ctx->cur.flags &
+ (V4L2_VP9_FRAME_FLAG_KEY_FRAME | V4L2_VP9_FRAME_FLAG_INTRA_ONLY);
+ struct tx_and_skip {
+ u8 tx8[2][1];
+ u8 tx16[2][2];
+ u8 tx32[2][3];
+ u8 skip[3];
+ } _tx_skip, *tx_skip = &_tx_skip;
+ struct v4l2_vp9_frame_symbol_counts *counts;
+ struct symbol_counts *hantro_cnts;
+ u32 tx16p[2][4];
+ int i;
+
+ /* buffer the forward-updated TX and skip probs */
+ if (frame_is_intra)
+ copy_tx_and_skip(tx_skip, probs);
+
+ /* 6.1.2 refresh_probs(): load_probs() and load_probs2() */
+ *probs = vp9_ctx->frame_context[fctx_idx];
+
+ /* if FrameIsIntra then undo the effect of load_probs2() */
+ if (frame_is_intra)
+ copy_tx_and_skip(probs, tx_skip);
+
+ counts = &vp9_ctx->cnts;
+ hantro_cnts = vp9_ctx->misc.cpu + vp9_ctx->ctx_counters_offset;
+ for (i = 0; i < ARRAY_SIZE(tx16p); ++i) {
+ memcpy(tx16p[i],
+ hantro_cnts->tx16x16_count[i],
+ sizeof(hantro_cnts->tx16x16_count[0]));
+ tx16p[i][3] = 0;
+ }
+ counts->tx16p = &tx16p;
+
+ v4l2_vp9_adapt_coef_probs(probs, counts,
+ !vp9_ctx->last.valid ||
+ vp9_ctx->last.flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME,
+ frame_is_intra);
+
+ if (!frame_is_intra) {
+ /* load_probs2() already done */
+ u32 mv_mode[7][4];
+
+ for (i = 0; i < ARRAY_SIZE(mv_mode); ++i) {
+ mv_mode[i][0] = hantro_cnts->inter_mode_counts[i][1][0];
+ mv_mode[i][1] = hantro_cnts->inter_mode_counts[i][2][0];
+ mv_mode[i][2] = hantro_cnts->inter_mode_counts[i][0][0];
+ mv_mode[i][3] = hantro_cnts->inter_mode_counts[i][2][1];
+ }
+ counts->mv_mode = &mv_mode;
+ v4l2_vp9_adapt_noncoef_probs(&vp9_ctx->probability_tables, counts,
+ vp9_ctx->cur.reference_mode,
+ vp9_ctx->cur.interpolation_filter,
+ vp9_ctx->cur.tx_mode, vp9_ctx->cur.flags);
+ }
+ }
+
+ vp9_ctx->frame_context[fctx_idx] = vp9_ctx->probability_tables;
+
+out_update_last:
+ vp9_ctx->last = vp9_ctx->cur;
+}
diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
index 56cf261a8e95..1450013d3685 100644
--- a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
+++ b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
@@ -113,9 +113,8 @@ int hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx)
hantro_h1_set_src_img_ctrl(vpu, ctx);
hantro_h1_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf);
- hantro_h1_jpeg_enc_set_qtable(vpu,
- hantro_jpeg_get_qtable(0),
- hantro_jpeg_get_qtable(1));
+ hantro_h1_jpeg_enc_set_qtable(vpu, jpeg_ctx.hw_luma_qtable,
+ jpeg_ctx.hw_chroma_qtable);
reg = H1_REG_AXI_CTRL_OUTPUT_SWAP16
| H1_REG_AXI_CTRL_INPUT_SWAP16
@@ -140,7 +139,7 @@ int hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx)
return 0;
}
-void hantro_jpeg_enc_done(struct hantro_ctx *ctx)
+void hantro_h1_jpeg_enc_done(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
u32 bytesused = vepu_read(vpu, H1_REG_STR_BUF_LIMIT) / 8;
diff --git a/drivers/staging/media/hantro/hantro_hevc.c b/drivers/staging/media/hantro/hantro_hevc.c
index ee03123e7704..b49a41d7ae91 100644
--- a/drivers/staging/media/hantro/hantro_hevc.c
+++ b/drivers/staging/media/hantro/hantro_hevc.c
@@ -44,47 +44,6 @@ size_t hantro_hevc_motion_vectors_offset(const struct v4l2_ctrl_hevc_sps *sps)
return ALIGN((cr_offset * 3) / 2, G2_ALIGN);
}
-static size_t hantro_hevc_mv_size(const struct v4l2_ctrl_hevc_sps *sps)
-{
- u32 min_cb_log2_size_y = sps->log2_min_luma_coding_block_size_minus3 + 3;
- u32 ctb_log2_size_y = min_cb_log2_size_y + sps->log2_diff_max_min_luma_coding_block_size;
- u32 pic_width_in_ctbs_y = (sps->pic_width_in_luma_samples + (1 << ctb_log2_size_y) - 1)
- >> ctb_log2_size_y;
- u32 pic_height_in_ctbs_y = (sps->pic_height_in_luma_samples + (1 << ctb_log2_size_y) - 1)
- >> ctb_log2_size_y;
- size_t mv_size;
-
- mv_size = pic_width_in_ctbs_y * pic_height_in_ctbs_y *
- (1 << (2 * (ctb_log2_size_y - 4))) * 16;
-
- vpu_debug(4, "%dx%d (CTBs) %zu MV bytes\n",
- pic_width_in_ctbs_y, pic_height_in_ctbs_y, mv_size);
-
- return mv_size;
-}
-
-static size_t hantro_hevc_ref_size(struct hantro_ctx *ctx)
-{
- const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
- const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
-
- return hantro_hevc_motion_vectors_offset(sps) + hantro_hevc_mv_size(sps);
-}
-
-static void hantro_hevc_ref_free(struct hantro_ctx *ctx)
-{
- struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
- struct hantro_dev *vpu = ctx->dev;
- int i;
-
- for (i = 0; i < NUM_REF_PICTURES; i++) {
- if (hevc_dec->ref_bufs[i].cpu)
- dma_free_coherent(vpu->dev, hevc_dec->ref_bufs[i].size,
- hevc_dec->ref_bufs[i].cpu,
- hevc_dec->ref_bufs[i].dma);
- }
-}
-
static void hantro_hevc_ref_init(struct hantro_ctx *ctx)
{
struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
@@ -108,37 +67,25 @@ dma_addr_t hantro_hevc_get_ref_buf(struct hantro_ctx *ctx,
}
}
- /* Allocate a new reference buffer */
+ return 0;
+}
+
+int hantro_hevc_add_ref_buf(struct hantro_ctx *ctx, int poc, dma_addr_t addr)
+{
+ struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
+ int i;
+
+ /* Add a new reference buffer */
for (i = 0; i < NUM_REF_PICTURES; i++) {
if (hevc_dec->ref_bufs_poc[i] == UNUSED_REF) {
- if (!hevc_dec->ref_bufs[i].cpu) {
- struct hantro_dev *vpu = ctx->dev;
-
- /*
- * Allocate the space needed for the raw data +
- * motion vector data. Optimizations could be to
- * allocate raw data in non coherent memory and only
- * clear the motion vector data.
- */
- hevc_dec->ref_bufs[i].cpu =
- dma_alloc_coherent(vpu->dev,
- hantro_hevc_ref_size(ctx),
- &hevc_dec->ref_bufs[i].dma,
- GFP_KERNEL);
- if (!hevc_dec->ref_bufs[i].cpu)
- return 0;
-
- hevc_dec->ref_bufs[i].size = hantro_hevc_ref_size(ctx);
- }
hevc_dec->ref_bufs_used |= 1 << i;
- memset(hevc_dec->ref_bufs[i].cpu, 0, hantro_hevc_ref_size(ctx));
hevc_dec->ref_bufs_poc[i] = poc;
-
- return hevc_dec->ref_bufs[i].dma;
+ hevc_dec->ref_bufs[i].dma = addr;
+ return 0;
}
}
- return 0;
+ return -EINVAL;
}
void hantro_hevc_ref_remove_unused(struct hantro_ctx *ctx)
@@ -314,8 +261,6 @@ void hantro_hevc_dec_exit(struct hantro_ctx *ctx)
hevc_dec->tile_bsd.cpu,
hevc_dec->tile_bsd.dma);
hevc_dec->tile_bsd.cpu = NULL;
-
- hantro_hevc_ref_free(ctx);
}
int hantro_hevc_dec_init(struct hantro_ctx *ctx)
diff --git a/drivers/staging/media/hantro/hantro_hw.h b/drivers/staging/media/hantro/hantro_hw.h
index 267a6d33a47b..4a19ae8940b9 100644
--- a/drivers/staging/media/hantro/hantro_hw.h
+++ b/drivers/staging/media/hantro/hantro_hw.h
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/v4l2-controls.h>
#include <media/v4l2-ctrls.h>
+#include <media/v4l2-vp9.h>
#include <media/videobuf2-core.h>
#define DEC_8190_ALIGN_MASK 0x07U
@@ -166,6 +167,82 @@ struct hantro_vp8_dec_hw_ctx {
};
/**
+ * struct hantro_vp9_frame_info
+ *
+ * @valid: frame info valid flag
+ * @frame_context_idx: index of frame context
+ * @reference_mode: inter prediction type
+ * @tx_mode: transform mode
+ * @interpolation_filter: filter selection for inter prediction
+ * @flags: frame flags
+ * @timestamp: frame timestamp
+ */
+struct hantro_vp9_frame_info {
+ u32 valid : 1;
+ u32 frame_context_idx : 2;
+ u32 reference_mode : 2;
+ u32 tx_mode : 3;
+ u32 interpolation_filter : 3;
+ u32 flags;
+ u64 timestamp;
+};
+
+#define MAX_SB_COLS 64
+#define MAX_SB_ROWS 34
+
+/**
+ * struct hantro_vp9_dec_hw_ctx
+ *
+ * @tile_edge: auxiliary DMA buffer for tile edge processing
+ * @segment_map: auxiliary DMA buffer for segment map
+ * @misc: auxiliary DMA buffer for tile info, probabilities and hw counters
+ * @cnts: vp9 library struct for abstracting hw counters access
+ * @probability_tables: VP9 probability tables implied by the spec
+ * @frame_context: VP9 frame contexts
+ * @cur: current frame information
+ * @last: last frame information
+ * @bsd_ctrl_offset: bsd offset into tile_edge
+ * @segment_map_size: size of segment map
+ * @ctx_counters_offset: hw counters offset into misc
+ * @tile_info_offset: tile info offset into misc
+ * @tile_r_info: per-tile information array
+ * @tile_c_info: per-tile information array
+ * @last_tile_r: last number of tile rows
+ * @last_tile_c: last number of tile cols
+ * @last_sbs_r: last number of superblock rows
+ * @last_sbs_c: last number of superblock cols
+ * @active_segment: number of active segment (alternating between 0 and 1)
+ * @feature_enabled: segmentation feature enabled flags
+ * @feature_data: segmentation feature data
+ */
+struct hantro_vp9_dec_hw_ctx {
+ struct hantro_aux_buf tile_edge;
+ struct hantro_aux_buf segment_map;
+ struct hantro_aux_buf misc;
+ struct v4l2_vp9_frame_symbol_counts cnts;
+ struct v4l2_vp9_frame_context probability_tables;
+ struct v4l2_vp9_frame_context frame_context[4];
+ struct hantro_vp9_frame_info cur;
+ struct hantro_vp9_frame_info last;
+
+ unsigned int bsd_ctrl_offset;
+ unsigned int segment_map_size;
+ unsigned int ctx_counters_offset;
+ unsigned int tile_info_offset;
+
+ unsigned short tile_r_info[MAX_SB_ROWS];
+ unsigned short tile_c_info[MAX_SB_COLS];
+ unsigned int last_tile_r;
+ unsigned int last_tile_c;
+ unsigned int last_sbs_r;
+ unsigned int last_sbs_c;
+
+ unsigned int active_segment;
+ u8 feature_enabled[8];
+ s16 feature_data[8][4];
+};
+
+/**
* struct hantro_postproc_ctx
*
* @dec_q: References buffers, in decoder format.
@@ -175,6 +252,17 @@ struct hantro_postproc_ctx {
};
/**
+ * struct hantro_postproc_ops - post-processor operations
+ *
+ * @enable: Enable the post-processor block. Optional.
+ * @disable: Disable the post-processor block. Optional.
+ */
+struct hantro_postproc_ops {
+ void (*enable)(struct hantro_ctx *ctx);
+ void (*disable)(struct hantro_ctx *ctx);
+};
+
+/**
* struct hantro_codec_ops - codec mode specific operations
*
* @init: If needed, can be used for initialization.
@@ -220,8 +308,10 @@ extern const struct hantro_variant rk3288_vpu_variant;
extern const struct hantro_variant rk3328_vpu_variant;
extern const struct hantro_variant rk3399_vpu_variant;
extern const struct hantro_variant sama5d4_vdec_variant;
+extern const struct hantro_variant sunxi_vpu_variant;
-extern const struct hantro_postproc_regs hantro_g1_postproc_regs;
+extern const struct hantro_postproc_ops hantro_g1_postproc_ops;
+extern const struct hantro_postproc_ops hantro_g2_postproc_ops;
extern const u32 hantro_vp8_dec_mc_filter[8][6];
@@ -239,7 +329,8 @@ int hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx);
int rockchip_vpu2_jpeg_enc_run(struct hantro_ctx *ctx);
int hantro_jpeg_enc_init(struct hantro_ctx *ctx);
void hantro_jpeg_enc_exit(struct hantro_ctx *ctx);
-void hantro_jpeg_enc_done(struct hantro_ctx *ctx);
+void hantro_h1_jpeg_enc_done(struct hantro_ctx *ctx);
+void rockchip_vpu2_jpeg_enc_done(struct hantro_ctx *ctx);
dma_addr_t hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
unsigned int dpb_idx);
@@ -256,10 +347,29 @@ void hantro_hevc_dec_exit(struct hantro_ctx *ctx);
int hantro_g2_hevc_dec_run(struct hantro_ctx *ctx);
int hantro_hevc_dec_prepare_run(struct hantro_ctx *ctx);
dma_addr_t hantro_hevc_get_ref_buf(struct hantro_ctx *ctx, int poc);
+int hantro_hevc_add_ref_buf(struct hantro_ctx *ctx, int poc, dma_addr_t addr);
void hantro_hevc_ref_remove_unused(struct hantro_ctx *ctx);
size_t hantro_hevc_chroma_offset(const struct v4l2_ctrl_hevc_sps *sps);
size_t hantro_hevc_motion_vectors_offset(const struct v4l2_ctrl_hevc_sps *sps);
+static inline unsigned short hantro_vp9_num_sbs(unsigned short dimension)
+{
+ return (dimension + 63) / 64;
+}
+
+static inline size_t
+hantro_vp9_mv_size(unsigned int width, unsigned int height)
+{
+ int num_ctbs;
+
+ /*
+ * There can be up to (CTBs x 64) number of blocks,
+ * and the motion vector for each block needs 16 bytes.
+ */
+ num_ctbs = hantro_vp9_num_sbs(width) * hantro_vp9_num_sbs(height);
+ return (num_ctbs * 64) * 16;
+}
+
static inline size_t
hantro_h264_mv_size(unsigned int width, unsigned int height)
{
@@ -287,6 +397,16 @@ hantro_h264_mv_size(unsigned int width, unsigned int height)
return 64 * MB_WIDTH(width) * MB_WIDTH(height) + 32;
}
+static inline size_t
+hantro_hevc_mv_size(unsigned int width, unsigned int height)
+{
+ /*
+ * A CTB can be 64x64, 32x32 or 16x16.
+ * Allocated memory for the "worse" case: 16x16
+ */
+ return width * height / 16;
+}
+
int hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx);
int rockchip_vpu2_mpeg2_dec_run(struct hantro_ctx *ctx);
void hantro_mpeg2_dec_copy_qtable(u8 *qtable,
@@ -301,4 +421,11 @@ void hantro_vp8_dec_exit(struct hantro_ctx *ctx);
void hantro_vp8_prob_update(struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp8_frame *hdr);
+int hantro_g2_vp9_dec_run(struct hantro_ctx *ctx);
+void hantro_g2_vp9_dec_done(struct hantro_ctx *ctx);
+int hantro_vp9_dec_init(struct hantro_ctx *ctx);
+void hantro_vp9_dec_exit(struct hantro_ctx *ctx);
+void hantro_g2_check_idle(struct hantro_dev *vpu);
+irqreturn_t hantro_g2_irq(int irq, void *dev_id);
+
#endif /* HANTRO_HW_H_ */
diff --git a/drivers/staging/media/hantro/hantro_jpeg.c b/drivers/staging/media/hantro/hantro_jpeg.c
index 36c140fc6a36..df62fbdff7c9 100644
--- a/drivers/staging/media/hantro/hantro_jpeg.c
+++ b/drivers/staging/media/hantro/hantro_jpeg.c
@@ -36,8 +36,6 @@ static const unsigned char luma_q_table[] = {
0x48, 0x5c, 0x5f, 0x62, 0x70, 0x64, 0x67, 0x63
};
-static unsigned char luma_q_table_reordered[ARRAY_SIZE(luma_q_table)];
-
static const unsigned char chroma_q_table[] = {
0x11, 0x12, 0x18, 0x2f, 0x63, 0x63, 0x63, 0x63,
0x12, 0x15, 0x1a, 0x42, 0x63, 0x63, 0x63, 0x63,
@@ -49,8 +47,6 @@ static const unsigned char chroma_q_table[] = {
0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63
};
-static unsigned char chroma_q_table_reordered[ARRAY_SIZE(chroma_q_table)];
-
static const unsigned char zigzag[64] = {
0, 1, 8, 16, 9, 2, 3, 10,
17, 24, 32, 25, 18, 11, 4, 5,
@@ -277,7 +273,7 @@ jpeg_scale_quant_table(unsigned char *file_q_tab,
}
}
-static void jpeg_set_quality(unsigned char *buffer, int quality)
+static void jpeg_set_quality(struct hantro_jpeg_ctx *ctx)
{
int scale;
@@ -285,24 +281,15 @@ static void jpeg_set_quality(unsigned char *buffer, int quality)
* Non-linear scaling factor:
* [5,50] -> [1000..100], [51,100] -> [98..0]
*/
- if (quality < 50)
- scale = 5000 / quality;
+ if (ctx->quality < 50)
+ scale = 5000 / ctx->quality;
else
- scale = 200 - 2 * quality;
-
- jpeg_scale_quant_table(buffer + LUMA_QUANT_OFF,
- luma_q_table_reordered,
- luma_q_table, scale);
- jpeg_scale_quant_table(buffer + CHROMA_QUANT_OFF,
- chroma_q_table_reordered,
- chroma_q_table, scale);
-}
+ scale = 200 - 2 * ctx->quality;
-unsigned char *hantro_jpeg_get_qtable(int index)
-{
- if (index == 0)
- return luma_q_table_reordered;
- return chroma_q_table_reordered;
+ jpeg_scale_quant_table(ctx->buffer + LUMA_QUANT_OFF,
+ ctx->hw_luma_qtable, luma_q_table, scale);
+ jpeg_scale_quant_table(ctx->buffer + CHROMA_QUANT_OFF,
+ ctx->hw_chroma_qtable, chroma_q_table, scale);
}
void hantro_jpeg_header_assemble(struct hantro_jpeg_ctx *ctx)
@@ -324,7 +311,7 @@ void hantro_jpeg_header_assemble(struct hantro_jpeg_ctx *ctx)
memcpy(buf + HUFF_CHROMA_AC_OFF, chroma_ac_table,
sizeof(chroma_ac_table));
- jpeg_set_quality(buf, ctx->quality);
+ jpeg_set_quality(ctx);
}
int hantro_jpeg_enc_init(struct hantro_ctx *ctx)
diff --git a/drivers/staging/media/hantro/hantro_jpeg.h b/drivers/staging/media/hantro/hantro_jpeg.h
index 9474a00277f8..035ab25b803f 100644
--- a/drivers/staging/media/hantro/hantro_jpeg.h
+++ b/drivers/staging/media/hantro/hantro_jpeg.h
@@ -1,13 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0+ */
#define JPEG_HEADER_SIZE 601
+#define JPEG_QUANT_SIZE 64
struct hantro_jpeg_ctx {
int width;
int height;
int quality;
unsigned char *buffer;
+ unsigned char hw_luma_qtable[JPEG_QUANT_SIZE];
+ unsigned char hw_chroma_qtable[JPEG_QUANT_SIZE];
};
-unsigned char *hantro_jpeg_get_qtable(int index);
void hantro_jpeg_header_assemble(struct hantro_jpeg_ctx *ctx);
diff --git a/drivers/staging/media/hantro/hantro_postproc.c b/drivers/staging/media/hantro/hantro_postproc.c
index ed8916c950a4..248abe5423f0 100644
--- a/drivers/staging/media/hantro/hantro_postproc.c
+++ b/drivers/staging/media/hantro/hantro_postproc.c
@@ -11,18 +11,19 @@
#include "hantro.h"
#include "hantro_hw.h"
#include "hantro_g1_regs.h"
+#include "hantro_g2_regs.h"
#define HANTRO_PP_REG_WRITE(vpu, reg_name, val) \
{ \
hantro_reg_write(vpu, \
- &(vpu)->variant->postproc_regs->reg_name, \
+ &hantro_g1_postproc_regs.reg_name, \
val); \
}
#define HANTRO_PP_REG_WRITE_S(vpu, reg_name, val) \
{ \
hantro_reg_write_s(vpu, \
- &(vpu)->variant->postproc_regs->reg_name, \
+ &hantro_g1_postproc_regs.reg_name, \
val); \
}
@@ -33,7 +34,7 @@
#define VPU_PP_OUT_RGB 0x0
#define VPU_PP_OUT_YUYV 0x3
-const struct hantro_postproc_regs hantro_g1_postproc_regs = {
+static const struct hantro_postproc_regs hantro_g1_postproc_regs = {
.pipeline_en = {G1_REG_PP_INTERRUPT, 1, 0x1},
.max_burst = {G1_REG_PP_DEV_CONFIG, 0, 0x1f},
.clk_gate = {G1_REG_PP_DEV_CONFIG, 1, 0x1},
@@ -53,27 +54,18 @@ const struct hantro_postproc_regs hantro_g1_postproc_regs = {
bool hantro_needs_postproc(const struct hantro_ctx *ctx,
const struct hantro_fmt *fmt)
{
- struct hantro_dev *vpu = ctx->dev;
-
if (ctx->is_encoder)
return false;
-
- if (!vpu->variant->postproc_fmts)
- return false;
-
- return fmt->fourcc != V4L2_PIX_FMT_NV12;
+ return fmt->postprocessed;
}
-void hantro_postproc_enable(struct hantro_ctx *ctx)
+static void hantro_postproc_g1_enable(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *dst_buf;
u32 src_pp_fmt, dst_pp_fmt;
dma_addr_t dst_dma;
- if (!vpu->variant->postproc_regs)
- return;
-
/* Turn on pipeline mode. Must be done first. */
HANTRO_PP_REG_WRITE_S(vpu, pipeline_en, 0x1);
@@ -108,6 +100,21 @@ void hantro_postproc_enable(struct hantro_ctx *ctx)
HANTRO_PP_REG_WRITE(vpu, display_width, ctx->dst_fmt.width);
}
+static void hantro_postproc_g2_enable(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *dst_buf;
+ size_t chroma_offset = ctx->dst_fmt.width * ctx->dst_fmt.height;
+ dma_addr_t dst_dma;
+
+ dst_buf = hantro_get_dst_buf(ctx);
+ dst_dma = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+
+ hantro_write_addr(vpu, G2_RS_OUT_LUMA_ADDR, dst_dma);
+ hantro_write_addr(vpu, G2_RS_OUT_CHROMA_ADDR, dst_dma + chroma_offset);
+ hantro_reg_write(vpu, &g2_out_rs_e, 1);
+}
+
void hantro_postproc_free(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
@@ -132,9 +139,16 @@ int hantro_postproc_alloc(struct hantro_ctx *ctx)
unsigned int num_buffers = cap_queue->num_buffers;
unsigned int i, buf_size;
- buf_size = ctx->dst_fmt.plane_fmt[0].sizeimage +
- hantro_h264_mv_size(ctx->dst_fmt.width,
- ctx->dst_fmt.height);
+ buf_size = ctx->dst_fmt.plane_fmt[0].sizeimage;
+ if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE)
+ buf_size += hantro_h264_mv_size(ctx->dst_fmt.width,
+ ctx->dst_fmt.height);
+ else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_VP9_FRAME)
+ buf_size += hantro_vp9_mv_size(ctx->dst_fmt.width,
+ ctx->dst_fmt.height);
+ else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_HEVC_SLICE)
+ buf_size += hantro_hevc_mv_size(ctx->dst_fmt.width,
+ ctx->dst_fmt.height);
for (i = 0; i < num_buffers; ++i) {
struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i];
@@ -153,12 +167,42 @@ int hantro_postproc_alloc(struct hantro_ctx *ctx)
return 0;
}
+static void hantro_postproc_g1_disable(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ HANTRO_PP_REG_WRITE_S(vpu, pipeline_en, 0x0);
+}
+
+static void hantro_postproc_g2_disable(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ hantro_reg_write(vpu, &g2_out_rs_e, 0);
+}
+
void hantro_postproc_disable(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
- if (!vpu->variant->postproc_regs)
- return;
+ if (vpu->variant->postproc_ops && vpu->variant->postproc_ops->disable)
+ vpu->variant->postproc_ops->disable(ctx);
+}
+
+void hantro_postproc_enable(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
- HANTRO_PP_REG_WRITE_S(vpu, pipeline_en, 0x0);
+ if (vpu->variant->postproc_ops && vpu->variant->postproc_ops->enable)
+ vpu->variant->postproc_ops->enable(ctx);
}
+
+const struct hantro_postproc_ops hantro_g1_postproc_ops = {
+ .enable = hantro_postproc_g1_enable,
+ .disable = hantro_postproc_g1_disable,
+};
+
+const struct hantro_postproc_ops hantro_g2_postproc_ops = {
+ .enable = hantro_postproc_g2_enable,
+ .disable = hantro_postproc_g2_disable,
+};
diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
index bcb0bdff4a9a..e595905b3bd7 100644
--- a/drivers/staging/media/hantro/hantro_v4l2.c
+++ b/drivers/staging/media/hantro/hantro_v4l2.c
@@ -23,8 +23,6 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
#include <media/v4l2-mem2mem.h>
-#include <media/videobuf2-core.h>
-#include <media/videobuf2-dma-sg.h>
#include "hantro.h"
#include "hantro_hw.h"
@@ -285,6 +283,16 @@ static int hantro_try_fmt(const struct hantro_ctx *ctx,
pix_mp->plane_fmt[0].sizeimage +=
hantro_h264_mv_size(pix_mp->width,
pix_mp->height);
+ else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_VP9_FRAME &&
+ !hantro_needs_postproc(ctx, fmt))
+ pix_mp->plane_fmt[0].sizeimage +=
+ hantro_vp9_mv_size(pix_mp->width,
+ pix_mp->height);
+ else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_HEVC_SLICE &&
+ !hantro_needs_postproc(ctx, fmt))
+ pix_mp->plane_fmt[0].sizeimage +=
+ hantro_hevc_mv_size(pix_mp->width,
+ pix_mp->height);
} else if (!pix_mp->plane_fmt[0].sizeimage) {
/*
* For coded formats the application can specify
@@ -393,6 +401,7 @@ hantro_update_requires_request(struct hantro_ctx *ctx, u32 fourcc)
case V4L2_PIX_FMT_VP8_FRAME:
case V4L2_PIX_FMT_H264_SLICE:
case V4L2_PIX_FMT_HEVC_SLICE:
+ case V4L2_PIX_FMT_VP9_FRAME:
ctx->fh.m2m_ctx->out_q_ctx.q.requires_requests = true;
break;
default:
diff --git a/drivers/staging/media/hantro/hantro_vp9.c b/drivers/staging/media/hantro/hantro_vp9.c
new file mode 100644
index 000000000000..566cd376c097
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_vp9.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VP9 codec driver
+ *
+ * Copyright (C) 2021 Collabora Ltd.
+ */
+
+#include <linux/types.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "hantro.h"
+#include "hantro_hw.h"
+#include "hantro_vp9.h"
+
+#define POW2(x) (1 << (x))
+
+#define MAX_LOG2_TILE_COLUMNS 6
+#define MAX_NUM_TILE_COLS POW2(MAX_LOG2_TILE_COLUMNS)
+#define MAX_TILE_COLS 20
+#define MAX_TILE_ROWS 22
+
+static size_t hantro_vp9_tile_filter_size(unsigned int height)
+{
+ u32 h, height32, size;
+
+ h = roundup(height, 8);
+
+ height32 = roundup(h, 64);
+ size = 24 * height32 * (MAX_NUM_TILE_COLS - 1); /* luma: 8, chroma: 8 + 8 */
+
+ return size;
+}
+
+static size_t hantro_vp9_bsd_control_size(unsigned int height)
+{
+ u32 h, height32;
+
+ h = roundup(height, 8);
+ height32 = roundup(h, 64);
+
+ return 16 * (height32 / 4) * (MAX_NUM_TILE_COLS - 1);
+}
+
+static size_t hantro_vp9_segment_map_size(unsigned int width, unsigned int height)
+{
+ u32 w, h;
+ int num_ctbs;
+
+ w = roundup(width, 8);
+ h = roundup(height, 8);
+ num_ctbs = ((w + 63) / 64) * ((h + 63) / 64);
+
+ return num_ctbs * 32;
+}
+
+static inline size_t hantro_vp9_prob_tab_size(void)
+{
+ return roundup(sizeof(struct hantro_g2_all_probs), 16);
+}
+
+static inline size_t hantro_vp9_count_tab_size(void)
+{
+ return roundup(sizeof(struct symbol_counts), 16);
+}
+
+static inline size_t hantro_vp9_tile_info_size(void)
+{
+ return roundup((MAX_TILE_COLS * MAX_TILE_ROWS * 4 * sizeof(u16) + 15 + 16) & ~0xf, 16);
+}
+
+static void *get_coeffs_arr(struct symbol_counts *cnts, int i, int j, int k, int l, int m)
+{
+ if (i == 0)
+ return &cnts->count_coeffs[j][k][l][m];
+
+ if (i == 1)
+ return &cnts->count_coeffs8x8[j][k][l][m];
+
+ if (i == 2)
+ return &cnts->count_coeffs16x16[j][k][l][m];
+
+ if (i == 3)
+ return &cnts->count_coeffs32x32[j][k][l][m];
+
+ return NULL;
+}
+
+static void *get_eobs1(struct symbol_counts *cnts, int i, int j, int k, int l, int m)
+{
+ if (i == 0)
+ return &cnts->count_coeffs[j][k][l][m][3];
+
+ if (i == 1)
+ return &cnts->count_coeffs8x8[j][k][l][m][3];
+
+ if (i == 2)
+ return &cnts->count_coeffs16x16[j][k][l][m][3];
+
+ if (i == 3)
+ return &cnts->count_coeffs32x32[j][k][l][m][3];
+
+ return NULL;
+}
+
+#define INNER_LOOP \
+ do { \
+ for (m = 0; m < ARRAY_SIZE(vp9_ctx->cnts.coeff[i][0][0][0]); ++m) { \
+ vp9_ctx->cnts.coeff[i][j][k][l][m] = \
+ get_coeffs_arr(cnts, i, j, k, l, m); \
+ vp9_ctx->cnts.eob[i][j][k][l][m][0] = \
+ &cnts->count_eobs[i][j][k][l][m]; \
+ vp9_ctx->cnts.eob[i][j][k][l][m][1] = \
+ get_eobs1(cnts, i, j, k, l, m); \
+ } \
+ } while (0)
+
+static void init_v4l2_vp9_count_tbl(struct hantro_ctx *ctx)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ struct symbol_counts *cnts = vp9_ctx->misc.cpu + vp9_ctx->ctx_counters_offset;
+ int i, j, k, l, m;
+
+ vp9_ctx->cnts.partition = &cnts->partition_counts;
+ vp9_ctx->cnts.skip = &cnts->mbskip_count;
+ vp9_ctx->cnts.intra_inter = &cnts->intra_inter_count;
+ vp9_ctx->cnts.tx32p = &cnts->tx32x32_count;
+ /*
+ * g2 hardware uses tx16x16_count[2][3], while the api
+ * expects tx16p[2][4], so this must be explicitly copied
+ * into vp9_ctx->cnts.tx16p when passing the data to the
+ * vp9 library function
+ */
+ vp9_ctx->cnts.tx8p = &cnts->tx8x8_count;
+
+ vp9_ctx->cnts.y_mode = &cnts->sb_ymode_counts;
+ vp9_ctx->cnts.uv_mode = &cnts->uv_mode_counts;
+ vp9_ctx->cnts.comp = &cnts->comp_inter_count;
+ vp9_ctx->cnts.comp_ref = &cnts->comp_ref_count;
+ vp9_ctx->cnts.single_ref = &cnts->single_ref_count;
+ vp9_ctx->cnts.filter = &cnts->switchable_interp_counts;
+ vp9_ctx->cnts.mv_joint = &cnts->mv_counts.joints;
+ vp9_ctx->cnts.sign = &cnts->mv_counts.sign;
+ vp9_ctx->cnts.classes = &cnts->mv_counts.classes;
+ vp9_ctx->cnts.class0 = &cnts->mv_counts.class0;
+ vp9_ctx->cnts.bits = &cnts->mv_counts.bits;
+ vp9_ctx->cnts.class0_fp = &cnts->mv_counts.class0_fp;
+ vp9_ctx->cnts.fp = &cnts->mv_counts.fp;
+ vp9_ctx->cnts.class0_hp = &cnts->mv_counts.class0_hp;
+ vp9_ctx->cnts.hp = &cnts->mv_counts.hp;
+
+ for (i = 0; i < ARRAY_SIZE(vp9_ctx->cnts.coeff); ++i)
+ for (j = 0; j < ARRAY_SIZE(vp9_ctx->cnts.coeff[i]); ++j)
+ for (k = 0; k < ARRAY_SIZE(vp9_ctx->cnts.coeff[i][0]); ++k)
+ for (l = 0; l < ARRAY_SIZE(vp9_ctx->cnts.coeff[i][0][0]); ++l)
+ INNER_LOOP;
+}
+
+int hantro_vp9_dec_init(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ const struct hantro_variant *variant = vpu->variant;
+ struct hantro_vp9_dec_hw_ctx *vp9_dec = &ctx->vp9_dec;
+ struct hantro_aux_buf *tile_edge = &vp9_dec->tile_edge;
+ struct hantro_aux_buf *segment_map = &vp9_dec->segment_map;
+ struct hantro_aux_buf *misc = &vp9_dec->misc;
+ u32 i, max_width, max_height, size;
+
+ if (variant->num_dec_fmts < 1)
+ return -EINVAL;
+
+ for (i = 0; i < variant->num_dec_fmts; ++i)
+ if (variant->dec_fmts[i].fourcc == V4L2_PIX_FMT_VP9_FRAME)
+ break;
+
+ if (i == variant->num_dec_fmts)
+ return -EINVAL;
+
+ max_width = vpu->variant->dec_fmts[i].frmsize.max_width;
+ max_height = vpu->variant->dec_fmts[i].frmsize.max_height;
+
+ size = hantro_vp9_tile_filter_size(max_height);
+ vp9_dec->bsd_ctrl_offset = size;
+ size += hantro_vp9_bsd_control_size(max_height);
+
+ tile_edge->cpu = dma_alloc_coherent(vpu->dev, size, &tile_edge->dma, GFP_KERNEL);
+ if (!tile_edge->cpu)
+ return -ENOMEM;
+
+ tile_edge->size = size;
+ memset(tile_edge->cpu, 0, size);
+
+ size = hantro_vp9_segment_map_size(max_width, max_height);
+ vp9_dec->segment_map_size = size;
+ size *= 2; /* we need two areas of this size, used alternately */
+
+ segment_map->cpu = dma_alloc_coherent(vpu->dev, size, &segment_map->dma, GFP_KERNEL);
+ if (!segment_map->cpu)
+ goto err_segment_map;
+
+ segment_map->size = size;
+ memset(segment_map->cpu, 0, size);
+
+ size = hantro_vp9_prob_tab_size();
+ vp9_dec->ctx_counters_offset = size;
+ size += hantro_vp9_count_tab_size();
+ vp9_dec->tile_info_offset = size;
+ size += hantro_vp9_tile_info_size();
+
+ misc->cpu = dma_alloc_coherent(vpu->dev, size, &misc->dma, GFP_KERNEL);
+ if (!misc->cpu)
+ goto err_misc;
+
+ misc->size = size;
+ memset(misc->cpu, 0, size);
+
+ init_v4l2_vp9_count_tbl(ctx);
+
+ return 0;
+
+err_misc:
+ dma_free_coherent(vpu->dev, segment_map->size, segment_map->cpu, segment_map->dma);
+
+err_segment_map:
+ dma_free_coherent(vpu->dev, tile_edge->size, tile_edge->cpu, tile_edge->dma);
+
+ return -ENOMEM;
+}
+
+void hantro_vp9_dec_exit(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_vp9_dec_hw_ctx *vp9_dec = &ctx->vp9_dec;
+ struct hantro_aux_buf *tile_edge = &vp9_dec->tile_edge;
+ struct hantro_aux_buf *segment_map = &vp9_dec->segment_map;
+ struct hantro_aux_buf *misc = &vp9_dec->misc;
+
+ dma_free_coherent(vpu->dev, misc->size, misc->cpu, misc->dma);
+ dma_free_coherent(vpu->dev, segment_map->size, segment_map->cpu, segment_map->dma);
+ dma_free_coherent(vpu->dev, tile_edge->size, tile_edge->cpu, tile_edge->dma);
+}
diff --git a/drivers/staging/media/hantro/hantro_vp9.h b/drivers/staging/media/hantro/hantro_vp9.h
new file mode 100644
index 000000000000..26b69275f098
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_vp9.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VP9 codec driver
+ *
+ * Copyright (C) 2021 Collabora Ltd.
+ */
+
+struct hantro_g2_mv_probs {
+ u8 joint[3];
+ u8 sign[2];
+ u8 class0_bit[2][1];
+ u8 fr[2][3];
+ u8 class0_hp[2];
+ u8 hp[2];
+ u8 classes[2][10];
+ u8 class0_fr[2][2][3];
+ u8 bits[2][10];
+};
+
+struct hantro_g2_probs {
+ u8 inter_mode[7][4];
+ u8 is_inter[4];
+ u8 uv_mode[10][8];
+ u8 tx8[2][1];
+ u8 tx16[2][2];
+ u8 tx32[2][3];
+ u8 y_mode_tail[4][1];
+ u8 y_mode[4][8];
+ u8 partition[2][16][4]; /* [keyframe][][], [inter][][] */
+ u8 uv_mode_tail[10][1];
+ u8 interp_filter[4][2];
+ u8 comp_mode[5];
+ u8 skip[3];
+
+ u8 pad1[1];
+
+ struct hantro_g2_mv_probs mv;
+
+ u8 single_ref[5][2];
+ u8 comp_ref[5];
+
+ u8 pad2[17];
+
+ u8 coef[4][2][2][6][6][4];
+};
+
+struct hantro_g2_all_probs {
+ u8 kf_y_mode_prob[10][10][8];
+
+ u8 kf_y_mode_prob_tail[10][10][1];
+ u8 ref_pred_probs[3];
+ u8 mb_segment_tree_probs[7];
+ u8 segment_pred_probs[3];
+ u8 ref_scores[4];
+ u8 prob_comppred[2];
+
+ u8 pad1[9];
+
+ u8 kf_uv_mode_prob[10][8];
+ u8 kf_uv_mode_prob_tail[10][1];
+
+ u8 pad2[6];
+
+ struct hantro_g2_probs probs;
+};
+
+struct mv_counts {
+ u32 joints[4];
+ u32 sign[2][2];
+ u32 classes[2][11];
+ u32 class0[2][2];
+ u32 bits[2][10][2];
+ u32 class0_fp[2][2][4];
+ u32 fp[2][4];
+ u32 class0_hp[2][2];
+ u32 hp[2][2];
+};
+
+struct symbol_counts {
+ u32 inter_mode_counts[7][3][2];
+ u32 sb_ymode_counts[4][10];
+ u32 uv_mode_counts[10][10];
+ u32 partition_counts[16][4];
+ u32 switchable_interp_counts[4][3];
+ u32 intra_inter_count[4][2];
+ u32 comp_inter_count[5][2];
+ u32 single_ref_count[5][2][2];
+ u32 comp_ref_count[5][2];
+ u32 tx32x32_count[2][4];
+ u32 tx16x16_count[2][3];
+ u32 tx8x8_count[2][2];
+ u32 mbskip_count[3][2];
+
+ struct mv_counts mv_counts;
+
+ u32 count_coeffs[2][2][6][6][4];
+ u32 count_coeffs8x8[2][2][6][6][4];
+ u32 count_coeffs16x16[2][2][6][6][4];
+ u32 count_coeffs32x32[2][2][6][6][4];
+
+ u32 count_eobs[4][2][2][6][6];
+};
diff --git a/drivers/staging/media/hantro/imx8m_vpu_hw.c b/drivers/staging/media/hantro/imx8m_vpu_hw.c
index ea919bfb9891..f5991b8e553a 100644
--- a/drivers/staging/media/hantro/imx8m_vpu_hw.c
+++ b/drivers/staging/media/hantro/imx8m_vpu_hw.c
@@ -82,6 +82,7 @@ static const struct hantro_fmt imx8m_vpu_postproc_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_YUYV,
.codec_mode = HANTRO_MODE_NONE,
+ .postprocessed = true,
},
};
@@ -131,10 +132,18 @@ static const struct hantro_fmt imx8m_vpu_dec_fmts[] = {
},
};
-static const struct hantro_fmt imx8m_vpu_g2_dec_fmts[] = {
+static const struct hantro_fmt imx8m_vpu_g2_postproc_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12,
.codec_mode = HANTRO_MODE_NONE,
+ .postprocessed = true,
+ },
+};
+
+static const struct hantro_fmt imx8m_vpu_g2_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12_4L4,
+ .codec_mode = HANTRO_MODE_NONE,
},
{
.fourcc = V4L2_PIX_FMT_HEVC_SLICE,
@@ -149,6 +158,19 @@ static const struct hantro_fmt imx8m_vpu_g2_dec_fmts[] = {
.step_height = MB_DIM,
},
},
+ {
+ .fourcc = V4L2_PIX_FMT_VP9_FRAME,
+ .codec_mode = HANTRO_MODE_VP9_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 3840,
+ .step_width = MB_DIM,
+ .min_height = 48,
+ .max_height = 2160,
+ .step_height = MB_DIM,
+ },
+ },
};
static irqreturn_t imx8m_vpu_g1_irq(int irq, void *dev_id)
@@ -169,24 +191,6 @@ static irqreturn_t imx8m_vpu_g1_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static irqreturn_t imx8m_vpu_g2_irq(int irq, void *dev_id)
-{
- struct hantro_dev *vpu = dev_id;
- enum vb2_buffer_state state;
- u32 status;
-
- status = vdpu_read(vpu, G2_REG_INTERRUPT);
- state = (status & G2_REG_INTERRUPT_DEC_RDY_INT) ?
- VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
-
- vdpu_write(vpu, 0, G2_REG_INTERRUPT);
- vdpu_write(vpu, G2_REG_CONFIG_DEC_CLK_GATE_E, G2_REG_CONFIG);
-
- hantro_irq_done(vpu, state);
-
- return IRQ_HANDLED;
-}
-
static int imx8mq_vpu_hw_init(struct hantro_dev *vpu)
{
vpu->ctrl_base = vpu->reg_bases[vpu->variant->num_regs - 1];
@@ -240,6 +244,13 @@ static const struct hantro_codec_ops imx8mq_vpu_g2_codec_ops[] = {
.init = hantro_hevc_dec_init,
.exit = hantro_hevc_dec_exit,
},
+ [HANTRO_MODE_VP9_DEC] = {
+ .run = hantro_g2_vp9_dec_run,
+ .done = hantro_g2_vp9_dec_done,
+ .reset = imx8m_vpu_g2_reset,
+ .init = hantro_vp9_dec_init,
+ .exit = hantro_vp9_dec_exit,
+ },
};
/*
@@ -251,7 +262,7 @@ static const struct hantro_irq imx8mq_irqs[] = {
};
static const struct hantro_irq imx8mq_g2_irqs[] = {
- { "g2", imx8m_vpu_g2_irq },
+ { "g2", hantro_g2_irq },
};
static const char * const imx8mq_clk_names[] = { "g1", "g2", "bus" };
@@ -262,7 +273,7 @@ const struct hantro_variant imx8mq_vpu_variant = {
.num_dec_fmts = ARRAY_SIZE(imx8m_vpu_dec_fmts),
.postproc_fmts = imx8m_vpu_postproc_fmts,
.num_postproc_fmts = ARRAY_SIZE(imx8m_vpu_postproc_fmts),
- .postproc_regs = &hantro_g1_postproc_regs,
+ .postproc_ops = &hantro_g1_postproc_ops,
.codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
HANTRO_H264_DECODER,
.codec_ops = imx8mq_vpu_codec_ops,
@@ -280,7 +291,10 @@ const struct hantro_variant imx8mq_vpu_g2_variant = {
.dec_offset = 0x0,
.dec_fmts = imx8m_vpu_g2_dec_fmts,
.num_dec_fmts = ARRAY_SIZE(imx8m_vpu_g2_dec_fmts),
- .codec = HANTRO_HEVC_DECODER,
+ .postproc_fmts = imx8m_vpu_g2_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(imx8m_vpu_g2_postproc_fmts),
+ .postproc_ops = &hantro_g2_postproc_ops,
+ .codec = HANTRO_HEVC_DECODER | HANTRO_VP9_DECODER,
.codec_ops = imx8mq_vpu_g2_codec_ops,
.init = imx8mq_vpu_hw_init,
.runtime_resume = imx8mq_runtime_resume,
diff --git a/drivers/staging/media/hantro/rockchip_vpu2_hw_jpeg_enc.c b/drivers/staging/media/hantro/rockchip_vpu2_hw_jpeg_enc.c
index 991213ce1610..4df16f59fb97 100644
--- a/drivers/staging/media/hantro/rockchip_vpu2_hw_jpeg_enc.c
+++ b/drivers/staging/media/hantro/rockchip_vpu2_hw_jpeg_enc.c
@@ -143,9 +143,8 @@ int rockchip_vpu2_jpeg_enc_run(struct hantro_ctx *ctx)
rockchip_vpu2_set_src_img_ctrl(vpu, ctx);
rockchip_vpu2_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf);
- rockchip_vpu2_jpeg_enc_set_qtable(vpu,
- hantro_jpeg_get_qtable(0),
- hantro_jpeg_get_qtable(1));
+ rockchip_vpu2_jpeg_enc_set_qtable(vpu, jpeg_ctx.hw_luma_qtable,
+ jpeg_ctx.hw_chroma_qtable);
reg = VEPU_REG_OUTPUT_SWAP32
| VEPU_REG_OUTPUT_SWAP16
@@ -171,3 +170,20 @@ int rockchip_vpu2_jpeg_enc_run(struct hantro_ctx *ctx)
return 0;
}
+
+void rockchip_vpu2_jpeg_enc_done(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ u32 bytesused = vepu_read(vpu, VEPU_REG_STR_BUF_LIMIT) / 8;
+ struct vb2_v4l2_buffer *dst_buf = hantro_get_dst_buf(ctx);
+
+ /*
+ * TODO: Rework the JPEG encoder to eliminate the need
+ * for a bounce buffer.
+ */
+ memcpy(vb2_plane_vaddr(&dst_buf->vb2_buf, 0) +
+ ctx->vpu_dst_fmt->header_size,
+ ctx->jpeg_enc.bounce_buffer.cpu, bytesused);
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+ ctx->vpu_dst_fmt->header_size + bytesused);
+}
diff --git a/drivers/staging/media/hantro/rockchip_vpu_hw.c b/drivers/staging/media/hantro/rockchip_vpu_hw.c
index d4f52957cc53..c203b606e6e7 100644
--- a/drivers/staging/media/hantro/rockchip_vpu_hw.c
+++ b/drivers/staging/media/hantro/rockchip_vpu_hw.c
@@ -62,6 +62,7 @@ static const struct hantro_fmt rockchip_vpu1_postproc_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_YUYV,
.codec_mode = HANTRO_MODE_NONE,
+ .postprocessed = true,
},
};
@@ -343,7 +344,7 @@ static const struct hantro_codec_ops rk3066_vpu_codec_ops[] = {
.run = hantro_h1_jpeg_enc_run,
.reset = rockchip_vpu1_enc_reset,
.init = hantro_jpeg_enc_init,
- .done = hantro_jpeg_enc_done,
+ .done = hantro_h1_jpeg_enc_done,
.exit = hantro_jpeg_enc_exit,
},
[HANTRO_MODE_H264_DEC] = {
@@ -371,7 +372,7 @@ static const struct hantro_codec_ops rk3288_vpu_codec_ops[] = {
.run = hantro_h1_jpeg_enc_run,
.reset = rockchip_vpu1_enc_reset,
.init = hantro_jpeg_enc_init,
- .done = hantro_jpeg_enc_done,
+ .done = hantro_h1_jpeg_enc_done,
.exit = hantro_jpeg_enc_exit,
},
[HANTRO_MODE_H264_DEC] = {
@@ -399,6 +400,7 @@ static const struct hantro_codec_ops rk3399_vpu_codec_ops[] = {
.run = rockchip_vpu2_jpeg_enc_run,
.reset = rockchip_vpu2_enc_reset,
.init = hantro_jpeg_enc_init,
+ .done = rockchip_vpu2_jpeg_enc_done,
.exit = hantro_jpeg_enc_exit,
},
[HANTRO_MODE_H264_DEC] = {
@@ -460,7 +462,7 @@ const struct hantro_variant rk3036_vpu_variant = {
.num_dec_fmts = ARRAY_SIZE(rk3066_vpu_dec_fmts),
.postproc_fmts = rockchip_vpu1_postproc_fmts,
.num_postproc_fmts = ARRAY_SIZE(rockchip_vpu1_postproc_fmts),
- .postproc_regs = &hantro_g1_postproc_regs,
+ .postproc_ops = &hantro_g1_postproc_ops,
.codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
HANTRO_H264_DECODER,
.codec_ops = rk3036_vpu_codec_ops,
@@ -485,7 +487,7 @@ const struct hantro_variant rk3066_vpu_variant = {
.num_dec_fmts = ARRAY_SIZE(rk3066_vpu_dec_fmts),
.postproc_fmts = rockchip_vpu1_postproc_fmts,
.num_postproc_fmts = ARRAY_SIZE(rockchip_vpu1_postproc_fmts),
- .postproc_regs = &hantro_g1_postproc_regs,
+ .postproc_ops = &hantro_g1_postproc_ops,
.codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
.codec_ops = rk3066_vpu_codec_ops,
@@ -505,7 +507,7 @@ const struct hantro_variant rk3288_vpu_variant = {
.num_dec_fmts = ARRAY_SIZE(rk3288_vpu_dec_fmts),
.postproc_fmts = rockchip_vpu1_postproc_fmts,
.num_postproc_fmts = ARRAY_SIZE(rockchip_vpu1_postproc_fmts),
- .postproc_regs = &hantro_g1_postproc_regs,
+ .postproc_ops = &hantro_g1_postproc_ops,
.codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
.codec_ops = rk3288_vpu_codec_ops,
diff --git a/drivers/staging/media/hantro/sama5d4_vdec_hw.c b/drivers/staging/media/hantro/sama5d4_vdec_hw.c
index 9c3b8cd0b239..b2fc1c5613e1 100644
--- a/drivers/staging/media/hantro/sama5d4_vdec_hw.c
+++ b/drivers/staging/media/hantro/sama5d4_vdec_hw.c
@@ -15,6 +15,7 @@ static const struct hantro_fmt sama5d4_vdec_postproc_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_YUYV,
.codec_mode = HANTRO_MODE_NONE,
+ .postprocessed = true,
},
};
@@ -100,7 +101,7 @@ const struct hantro_variant sama5d4_vdec_variant = {
.num_dec_fmts = ARRAY_SIZE(sama5d4_vdec_fmts),
.postproc_fmts = sama5d4_vdec_postproc_fmts,
.num_postproc_fmts = ARRAY_SIZE(sama5d4_vdec_postproc_fmts),
- .postproc_regs = &hantro_g1_postproc_regs,
+ .postproc_ops = &hantro_g1_postproc_ops,
.codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
HANTRO_H264_DECODER,
.codec_ops = sama5d4_vdec_codec_ops,
diff --git a/drivers/staging/media/hantro/sunxi_vpu_hw.c b/drivers/staging/media/hantro/sunxi_vpu_hw.c
new file mode 100644
index 000000000000..90633406c4eb
--- /dev/null
+++ b/drivers/staging/media/hantro/sunxi_vpu_hw.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner Hantro G2 VPU codec driver
+ *
+ * Copyright (C) 2021 Jernej Skrabec <jernej.skrabec@gmail.com>
+ */
+
+#include <linux/clk.h>
+
+#include "hantro.h"
+
+static const struct hantro_fmt sunxi_vpu_postproc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ .postprocessed = true,
+ },
+};
+
+static const struct hantro_fmt sunxi_vpu_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12_4L4,
+ .codec_mode = HANTRO_MODE_NONE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP9_FRAME,
+ .codec_mode = HANTRO_MODE_VP9_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 3840,
+ .step_width = MB_DIM,
+ .min_height = 48,
+ .max_height = 2160,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static int sunxi_vpu_hw_init(struct hantro_dev *vpu)
+{
+ clk_set_rate(vpu->clocks[0].clk, 300000000);
+
+ return 0;
+}
+
+static void sunxi_vpu_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ reset_control_reset(vpu->resets);
+}
+
+static const struct hantro_codec_ops sunxi_vpu_codec_ops[] = {
+ [HANTRO_MODE_VP9_DEC] = {
+ .run = hantro_g2_vp9_dec_run,
+ .done = hantro_g2_vp9_dec_done,
+ .reset = sunxi_vpu_reset,
+ .init = hantro_vp9_dec_init,
+ .exit = hantro_vp9_dec_exit,
+ },
+};
+
+static const struct hantro_irq sunxi_irqs[] = {
+ { NULL, hantro_g2_irq },
+};
+
+static const char * const sunxi_clk_names[] = { "mod", "bus" };
+
+const struct hantro_variant sunxi_vpu_variant = {
+ .dec_fmts = sunxi_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(sunxi_vpu_dec_fmts),
+ .postproc_fmts = sunxi_vpu_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(sunxi_vpu_postproc_fmts),
+ .postproc_ops = &hantro_g2_postproc_ops,
+ .codec = HANTRO_VP9_DECODER,
+ .codec_ops = sunxi_vpu_codec_ops,
+ .init = sunxi_vpu_hw_init,
+ .irqs = sunxi_irqs,
+ .num_irqs = ARRAY_SIZE(sunxi_irqs),
+ .clk_names = sunxi_clk_names,
+ .num_clocks = ARRAY_SIZE(sunxi_clk_names),
+ .double_buffer = 1,
+ .legacy_regs = 1,
+ .late_postproc = 1,
+};
diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c
index eb6da9b9d8ba..1fd39a2fca98 100644
--- a/drivers/staging/media/imx/imx-media-csc-scaler.c
+++ b/drivers/staging/media/imx/imx-media-csc-scaler.c
@@ -820,7 +820,7 @@ static const struct v4l2_file_operations ipu_csc_scaler_fops = {
.mmap = v4l2_m2m_fop_mmap,
};
-static struct v4l2_m2m_ops m2m_ops = {
+static const struct v4l2_m2m_ops m2m_ops = {
.device_run = device_run,
.job_abort = job_abort,
};
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index 1caa100be33d..bd7f156f2d52 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -150,7 +150,7 @@ static inline bool requires_passthrough(struct v4l2_fwnode_endpoint *ep,
const struct imx_media_pixfmt *incc)
{
if (ep->bus_type == V4L2_MBUS_BT656) // including BT.1120
- return 0;
+ return false;
return incc->bayer || is_parallel_16bit_bus(ep) ||
(is_parallel_bus(ep) &&
diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
index 6f90acf9c725..94bc866ca28c 100644
--- a/drivers/staging/media/imx/imx-media-utils.c
+++ b/drivers/staging/media/imx/imx-media-utils.c
@@ -569,48 +569,6 @@ int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
}
EXPORT_SYMBOL_GPL(imx_media_mbus_fmt_to_pix_fmt);
-int imx_media_mbus_fmt_to_ipu_image(struct ipu_image *image,
- const struct v4l2_mbus_framefmt *mbus)
-{
- int ret;
-
- memset(image, 0, sizeof(*image));
-
- ret = imx_media_mbus_fmt_to_pix_fmt(&image->pix, mbus, NULL);
- if (ret)
- return ret;
-
- image->rect.width = mbus->width;
- image->rect.height = mbus->height;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(imx_media_mbus_fmt_to_ipu_image);
-
-int imx_media_ipu_image_to_mbus_fmt(struct v4l2_mbus_framefmt *mbus,
- const struct ipu_image *image)
-{
- const struct imx_media_pixfmt *fmt;
-
- fmt = imx_media_find_pixel_format(image->pix.pixelformat,
- PIXFMT_SEL_ANY);
- if (!fmt || !fmt->codes || !fmt->codes[0])
- return -EINVAL;
-
- memset(mbus, 0, sizeof(*mbus));
- mbus->width = image->pix.width;
- mbus->height = image->pix.height;
- mbus->code = fmt->codes[0];
- mbus->field = image->pix.field;
- mbus->colorspace = image->pix.colorspace;
- mbus->xfer_func = image->pix.xfer_func;
- mbus->ycbcr_enc = image->pix.ycbcr_enc;
- mbus->quantization = image->pix.quantization;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(imx_media_ipu_image_to_mbus_fmt);
-
void imx_media_free_dma_buf(struct device *dev,
struct imx_media_dma_buf *buf)
{
diff --git a/drivers/staging/media/imx/imx-media.h b/drivers/staging/media/imx/imx-media.h
index 6740e7917458..f263fc3adbb9 100644
--- a/drivers/staging/media/imx/imx-media.h
+++ b/drivers/staging/media/imx/imx-media.h
@@ -199,10 +199,6 @@ void imx_media_try_colorimetry(struct v4l2_mbus_framefmt *tryfmt,
int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
const struct v4l2_mbus_framefmt *mbus,
const struct imx_media_pixfmt *cc);
-int imx_media_mbus_fmt_to_ipu_image(struct ipu_image *image,
- const struct v4l2_mbus_framefmt *mbus);
-int imx_media_ipu_image_to_mbus_fmt(struct v4l2_mbus_framefmt *mbus,
- const struct ipu_image *image);
void imx_media_grp_id_to_sd_name(char *sd_name, int sz,
u32 grp_id, int ipu_id);
struct v4l2_subdev *
diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
index a0941fc2907b..558b256ac935 100644
--- a/drivers/staging/media/imx/imx6-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
@@ -382,13 +382,17 @@ static int csi2_start(struct csi2_dev *csi2)
csi2_enable(csi2, true);
/* Step 5 */
+ ret = v4l2_subdev_call(csi2->src_sd, video, pre_streamon,
+ V4L2_SUBDEV_PRE_STREAMON_FL_MANUAL_LP);
+ if (ret && ret != -ENOIOCTLCMD)
+ goto err_assert_reset;
csi2_dphy_wait_stopstate(csi2, lanes);
/* Step 6 */
ret = v4l2_subdev_call(csi2->src_sd, video, s_stream, 1);
ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
if (ret)
- goto err_assert_reset;
+ goto err_stop_lp11;
/* Step 7 */
ret = csi2_dphy_wait_clock_lane(csi2);
@@ -399,6 +403,8 @@ static int csi2_start(struct csi2_dev *csi2)
err_stop_upstream:
v4l2_subdev_call(csi2->src_sd, video, s_stream, 0);
+err_stop_lp11:
+ v4l2_subdev_call(csi2->src_sd, video, post_streamoff);
err_assert_reset:
csi2_enable(csi2, false);
err_disable_clk:
@@ -410,6 +416,7 @@ static void csi2_stop(struct csi2_dev *csi2)
{
/* stop upstream */
v4l2_subdev_call(csi2->src_sd, video, s_stream, 0);
+ v4l2_subdev_call(csi2->src_sd, video, post_streamoff);
csi2_enable(csi2, false);
clk_disable_unprepare(csi2->pix_clk);
diff --git a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
index 585f55981c86..dbdd015ce220 100644
--- a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
+++ b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
@@ -34,11 +34,17 @@
* struct ipu3_uapi_grid_config - Grid plane config
*
* @width: Grid horizontal dimensions, in number of grid blocks(cells).
+ * For AWB, the range is (16, 80).
+ * For AF/AE, the range is (16, 32).
* @height: Grid vertical dimensions, in number of grid cells.
+ * For AWB, the range is (16, 60).
+ * For AF/AE, the range is (16, 24).
* @block_width_log2: Log2 of the width of each cell in pixels.
- * for (2^3, 2^4, 2^5, 2^6, 2^7), values [3, 7].
+ * For AWB, the range is [3, 6].
+ * For AF/AE, the range is [3, 7].
* @block_height_log2: Log2 of the height of each cell in pixels.
- * for (2^3, 2^4, 2^5, 2^6, 2^7), values [3, 7].
+ * For AWB, the range is [3, 6].
+ * For AF/AE, the range is [3, 7].
* @height_per_slice: The number of blocks in vertical axis per slice.
* Default 2.
* @x_start: X value of top left corner of Region of Interest(ROI).
@@ -61,17 +67,39 @@ struct ipu3_uapi_grid_config {
__u16 y_end;
} __packed;
+/**
+ * struct ipu3_uapi_awb_set_item - Memory layout for each cell in AWB
+ *
+ * @Gr_avg: Green average for red lines in the cell.
+ * @R_avg: Red average in the cell.
+ * @B_avg: Blue average in the cell.
+ * @Gb_avg: Green average for blue lines in the cell.
+ * @sat_ratio: Percentage of pixels over the thresholds specified in
+ * ipu3_uapi_awb_config_s, coded from 0 to 255.
+ * @padding0: Unused byte for padding.
+ * @padding1: Unused byte for padding.
+ * @padding2: Unused byte for padding.
+ */
+struct ipu3_uapi_awb_set_item {
+ __u8 Gr_avg;
+ __u8 R_avg;
+ __u8 B_avg;
+ __u8 Gb_avg;
+ __u8 sat_ratio;
+ __u8 padding0;
+ __u8 padding1;
+ __u8 padding2;
+} __attribute__((packed));
+
/*
* The grid based data is divided into "slices" called set, each slice of setX
* refers to ipu3_uapi_grid_config width * height_per_slice.
*/
#define IPU3_UAPI_AWB_MAX_SETS 60
/* Based on grid size 80 * 60 and cell size 16 x 16 */
-#define IPU3_UAPI_AWB_SET_SIZE 1280
-#define IPU3_UAPI_AWB_MD_ITEM_SIZE 8
+#define IPU3_UAPI_AWB_SET_SIZE 160
#define IPU3_UAPI_AWB_SPARE_FOR_BUBBLES \
- (IPU3_UAPI_MAX_BUBBLE_SIZE * IPU3_UAPI_MAX_STRIPES * \
- IPU3_UAPI_AWB_MD_ITEM_SIZE)
+ (IPU3_UAPI_MAX_BUBBLE_SIZE * IPU3_UAPI_MAX_STRIPES)
#define IPU3_UAPI_AWB_MAX_BUFFER_SIZE \
(IPU3_UAPI_AWB_MAX_SETS * \
(IPU3_UAPI_AWB_SET_SIZE + IPU3_UAPI_AWB_SPARE_FOR_BUBBLES))
@@ -83,7 +111,7 @@ struct ipu3_uapi_grid_config {
* the average values for each color channel.
*/
struct ipu3_uapi_awb_raw_buffer {
- __u8 meta_data[IPU3_UAPI_AWB_MAX_BUFFER_SIZE]
+ struct ipu3_uapi_awb_set_item meta_data[IPU3_UAPI_AWB_MAX_BUFFER_SIZE]
__attribute__((aligned(32)));
} __packed;
diff --git a/drivers/staging/media/ipu3/ipu3-css-params.c b/drivers/staging/media/ipu3/ipu3-css-params.c
index e9d6bd9e9332..d9e3c3785075 100644
--- a/drivers/staging/media/ipu3/ipu3-css-params.c
+++ b/drivers/staging/media/ipu3/ipu3-css-params.c
@@ -771,7 +771,6 @@ static int imgu_css_osys_calc_frame_and_stripe_params(
*/
{
unsigned int i;
- int pin_scale = 0;
/*Input resolution */
stripe_params[s].input_width = stripe_input_width_y;
@@ -791,8 +790,6 @@ static int imgu_css_osys_calc_frame_and_stripe_params(
reso.pin_height[i];
stripe_params[s].output_offset[i] =
stripe_offset_out_y;
-
- pin_scale += frame_params[i].scaled;
} else {
/* Unscaled pin */
stripe_params[s].output_width[i] =
diff --git a/drivers/staging/media/max96712/Kconfig b/drivers/staging/media/max96712/Kconfig
new file mode 100644
index 000000000000..acde14fd5c4d
--- /dev/null
+++ b/drivers/staging/media/max96712/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+config VIDEO_MAX96712
+ tristate "Maxim MAX96712 Quad GMSL2 Deserializer support"
+ depends on I2C
+ depends on OF_GPIO
+ depends on VIDEO_V4L2
+ select V4L2_FWNODE
+ select VIDEO_V4L2_SUBDEV_API
+ select MEDIA_CONTROLLER
+ help
+ This driver supports the Maxim MAX96712 Quad GMSL2 Deserializer.
+
+ To compile this driver as a module, choose M here: the
+ module will be called max96712.
diff --git a/drivers/staging/media/max96712/Makefile b/drivers/staging/media/max96712/Makefile
new file mode 100644
index 000000000000..70c1974ce3f0
--- /dev/null
+++ b/drivers/staging/media/max96712/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIDEO_MAX96712) += max96712.o
diff --git a/drivers/staging/media/max96712/max96712.c b/drivers/staging/media/max96712/max96712.c
new file mode 100644
index 000000000000..9bc72d9a858b
--- /dev/null
+++ b/drivers/staging/media/max96712/max96712.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Maxim MAX9286 Quad GMSL2 Deserializer Driver
+ *
+ * Copyright (C) 2021 Renesas Electronics Corporation
+ * Copyright (C) 2021 Niklas Söderlund
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/regmap.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#define MAX96712_ID 0x20
+
+#define MAX96712_DPLL_FREQ 1000
+
+enum max96712_pattern {
+ MAX96712_PATTERN_CHECKERBOARD = 0,
+ MAX96712_PATTERN_GRADIENT,
+};
+
+struct max96712_priv {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct gpio_desc *gpiod_pwdn;
+
+ struct v4l2_fwnode_bus_mipi_csi2 mipi;
+
+ struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct media_pad pads[1];
+
+ enum max96712_pattern pattern;
+};
+
+static int max96712_read(struct max96712_priv *priv, int reg)
+{
+ int ret, val;
+
+ ret = regmap_read(priv->regmap, reg, &val);
+ if (ret) {
+ dev_err(&priv->client->dev, "read 0x%04x failed\n", reg);
+ return ret;
+ }
+
+ return val;
+}
+
+static int max96712_write(struct max96712_priv *priv, unsigned int reg, u8 val)
+{
+ int ret;
+
+ ret = regmap_write(priv->regmap, reg, val);
+ if (ret)
+ dev_err(&priv->client->dev, "write 0x%04x failed\n", reg);
+
+ return ret;
+}
+
+static int max96712_update_bits(struct max96712_priv *priv, unsigned int reg,
+ u8 mask, u8 val)
+{
+ int ret;
+
+ ret = regmap_update_bits(priv->regmap, reg, mask, val);
+ if (ret)
+ dev_err(&priv->client->dev, "update 0x%04x failed\n", reg);
+
+ return ret;
+}
+
+static int max96712_write_bulk(struct max96712_priv *priv, unsigned int reg,
+ const void *val, size_t val_count)
+{
+ int ret;
+
+ ret = regmap_bulk_write(priv->regmap, reg, val, val_count);
+ if (ret)
+ dev_err(&priv->client->dev, "bulk write 0x%04x failed\n", reg);
+
+ return ret;
+}
+
+static int max96712_write_bulk_value(struct max96712_priv *priv,
+ unsigned int reg, unsigned int val,
+ size_t val_count)
+{
+ unsigned int i;
+ u8 values[4];
+
+ for (i = 1; i <= val_count; i++)
+ values[i - 1] = (val >> ((val_count - i) * 8)) & 0xff;
+
+ return max96712_write_bulk(priv, reg, &values, val_count);
+}
+
+static void max96712_reset(struct max96712_priv *priv)
+{
+ max96712_update_bits(priv, 0x13, 0x40, 0x40);
+ msleep(20);
+}
+
+static void max96712_mipi_enable(struct max96712_priv *priv, bool enable)
+{
+ if (enable) {
+ max96712_update_bits(priv, 0x40b, 0x02, 0x02);
+ max96712_update_bits(priv, 0x8a0, 0x80, 0x80);
+ } else {
+ max96712_update_bits(priv, 0x8a0, 0x80, 0x00);
+ max96712_update_bits(priv, 0x40b, 0x02, 0x00);
+ }
+}
+
+static void max96712_mipi_configure(struct max96712_priv *priv)
+{
+ unsigned int i;
+ u8 phy5 = 0;
+
+ max96712_mipi_enable(priv, false);
+
+ /* Select 2x4 mode. */
+ max96712_write(priv, 0x8a0, 0x04);
+
+ /* Configure a 4-lane DPHY using PHY0 and PHY1. */
+ /* TODO: Add support for 2-lane and 1-lane configurations. */
+ /* TODO: Add support CPHY mode. */
+ max96712_write(priv, 0x94a, 0xc0);
+
+ /* Configure lane mapping for PHY0 and PHY1. */
+ /* TODO: Add support for lane swapping. */
+ max96712_write(priv, 0x8a3, 0xe4);
+
+ /* Configure lane polarity for PHY0 and PHY1. */
+ for (i = 0; i < priv->mipi.num_data_lanes + 1; i++)
+ if (priv->mipi.lane_polarities[i])
+ phy5 |= BIT(i == 0 ? 5 : i < 3 ? i - 1 : i);
+ max96712_write(priv, 0x8a5, phy5);
+
+ /* Set link frequency for PHY0 and PHY1. */
+ max96712_update_bits(priv, 0x415, 0x3f,
+ ((MAX96712_DPLL_FREQ / 100) & 0x1f) | BIT(5));
+ max96712_update_bits(priv, 0x418, 0x3f,
+ ((MAX96712_DPLL_FREQ / 100) & 0x1f) | BIT(5));
+
+ /* Enable PHY0 and PHY1 */
+ max96712_update_bits(priv, 0x8a2, 0xf0, 0x30);
+}
+
+static void max96712_pattern_enable(struct max96712_priv *priv, bool enable)
+{
+ const u32 h_active = 1920;
+ const u32 h_fp = 88;
+ const u32 h_sw = 44;
+ const u32 h_bp = 148;
+ const u32 h_tot = h_active + h_fp + h_sw + h_bp;
+
+ const u32 v_active = 1080;
+ const u32 v_fp = 4;
+ const u32 v_sw = 5;
+ const u32 v_bp = 36;
+ const u32 v_tot = v_active + v_fp + v_sw + v_bp;
+
+ if (!enable) {
+ max96712_write(priv, 0x1051, 0x00);
+ return;
+ }
+
+ /* PCLK 75MHz. */
+ max96712_write(priv, 0x0009, 0x01);
+
+ /* Configure Video Timing Generator for 1920x1080 @ 30 fps. */
+ max96712_write_bulk_value(priv, 0x1052, 0, 3);
+ max96712_write_bulk_value(priv, 0x1055, v_sw * h_tot, 3);
+ max96712_write_bulk_value(priv, 0x1058,
+ (v_active + v_fp + + v_bp) * h_tot, 3);
+ max96712_write_bulk_value(priv, 0x105b, 0, 3);
+ max96712_write_bulk_value(priv, 0x105e, h_sw, 2);
+ max96712_write_bulk_value(priv, 0x1060, h_active + h_fp + h_bp, 2);
+ max96712_write_bulk_value(priv, 0x1062, v_tot, 2);
+ max96712_write_bulk_value(priv, 0x1064,
+ h_tot * (v_sw + v_bp) + (h_sw + h_bp), 3);
+ max96712_write_bulk_value(priv, 0x1067, h_active, 2);
+ max96712_write_bulk_value(priv, 0x1069, h_fp + h_sw + h_bp, 2);
+ max96712_write_bulk_value(priv, 0x106b, v_active, 2);
+
+ /* Generate VS, HS and DE in free-running mode. */
+ max96712_write(priv, 0x1050, 0xfb);
+
+ /* Configure Video Pattern Generator. */
+ if (priv->pattern == MAX96712_PATTERN_CHECKERBOARD) {
+ /* Set checkerboard pattern size. */
+ max96712_write(priv, 0x1074, 0x3c);
+ max96712_write(priv, 0x1075, 0x3c);
+ max96712_write(priv, 0x1076, 0x3c);
+
+ /* Set checkerboard pattern colors. */
+ max96712_write_bulk_value(priv, 0x106e, 0xfecc00, 3);
+ max96712_write_bulk_value(priv, 0x1071, 0x006aa7, 3);
+
+ /* Generate checkerboard pattern. */
+ max96712_write(priv, 0x1051, 0x10);
+ } else {
+ /* Set gradient increment. */
+ max96712_write(priv, 0x106d, 0x10);
+
+ /* Generate gradient pattern. */
+ max96712_write(priv, 0x1051, 0x20);
+ }
+}
+
+static int max96712_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct max96712_priv *priv = v4l2_get_subdevdata(sd);
+
+ if (enable) {
+ max96712_pattern_enable(priv, true);
+ max96712_mipi_enable(priv, true);
+ } else {
+ max96712_mipi_enable(priv, false);
+ max96712_pattern_enable(priv, false);
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops max96712_video_ops = {
+ .s_stream = max96712_s_stream,
+};
+
+static int max96712_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ format->format.width = 1920;
+ format->format.height = 1080;
+ format->format.code = MEDIA_BUS_FMT_RGB888_1X24;
+ format->format.field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops max96712_pad_ops = {
+ .get_fmt = max96712_get_pad_format,
+ .set_fmt = max96712_get_pad_format,
+};
+
+static const struct v4l2_subdev_ops max96712_subdev_ops = {
+ .video = &max96712_video_ops,
+ .pad = &max96712_pad_ops,
+};
+
+static const char * const max96712_test_pattern[] = {
+ "Checkerboard",
+ "Gradient",
+};
+
+static int max96712_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct max96712_priv *priv =
+ container_of(ctrl->handler, struct max96712_priv, ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_TEST_PATTERN:
+ priv->pattern = ctrl->val ?
+ MAX96712_PATTERN_GRADIENT :
+ MAX96712_PATTERN_CHECKERBOARD;
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops max96712_ctrl_ops = {
+ .s_ctrl = max96712_s_ctrl,
+};
+
+static int max96712_v4l2_register(struct max96712_priv *priv)
+{
+ long pixel_rate;
+ int ret;
+
+ v4l2_i2c_subdev_init(&priv->sd, priv->client, &max96712_subdev_ops);
+ priv->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ priv->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+
+ v4l2_ctrl_handler_init(&priv->ctrl_handler, 2);
+
+ /*
+ * TODO: Once V4L2_CID_LINK_FREQ is changed from a menu control to an
+ * INT64 control it should be used here instead of V4L2_CID_PIXEL_RATE.
+ */
+ pixel_rate = MAX96712_DPLL_FREQ / priv->mipi.num_data_lanes * 1000000;
+ v4l2_ctrl_new_std(&priv->ctrl_handler, NULL, V4L2_CID_PIXEL_RATE,
+ pixel_rate, pixel_rate, 1, pixel_rate);
+
+ v4l2_ctrl_new_std_menu_items(&priv->ctrl_handler, &max96712_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(max96712_test_pattern) - 1,
+ 0, 0, max96712_test_pattern);
+
+ priv->sd.ctrl_handler = &priv->ctrl_handler;
+ ret = priv->ctrl_handler.error;
+ if (ret)
+ goto error;
+
+ priv->pads[0].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&priv->sd.entity, 1, priv->pads);
+ if (ret)
+ goto error;
+
+ v4l2_set_subdevdata(&priv->sd, priv);
+
+ ret = v4l2_async_register_subdev(&priv->sd);
+ if (ret < 0) {
+ dev_err(&priv->client->dev, "Unable to register subdevice\n");
+ goto error;
+ }
+
+ return 0;
+error:
+ v4l2_ctrl_handler_free(&priv->ctrl_handler);
+
+ return ret;
+}
+
+static int max96712_parse_dt(struct max96712_priv *priv)
+{
+ struct fwnode_handle *ep;
+ struct v4l2_fwnode_endpoint v4l2_ep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ int ret;
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(&priv->client->dev), 4,
+ 0, 0);
+ if (!ep) {
+ dev_err(&priv->client->dev, "Not connected to subdevice\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &v4l2_ep);
+ fwnode_handle_put(ep);
+ if (ret) {
+ dev_err(&priv->client->dev, "Could not parse v4l2 endpoint\n");
+ return -EINVAL;
+ }
+
+ if (v4l2_ep.bus.mipi_csi2.num_data_lanes != 4) {
+ dev_err(&priv->client->dev, "Only 4 data lanes supported\n");
+ return -EINVAL;
+ }
+
+ priv->mipi = v4l2_ep.bus.mipi_csi2;
+
+ return 0;
+}
+
+static const struct regmap_config max96712_i2c_regmap = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = 0x1f00,
+};
+
+static int max96712_probe(struct i2c_client *client)
+{
+ struct max96712_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+ i2c_set_clientdata(client, priv);
+
+ priv->regmap = devm_regmap_init_i2c(client, &max96712_i2c_regmap);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ priv->gpiod_pwdn = devm_gpiod_get_optional(&client->dev, "enable",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->gpiod_pwdn))
+ return PTR_ERR(priv->gpiod_pwdn);
+
+ gpiod_set_consumer_name(priv->gpiod_pwdn, "max96712-pwdn");
+ gpiod_set_value_cansleep(priv->gpiod_pwdn, 1);
+
+ if (priv->gpiod_pwdn)
+ usleep_range(4000, 5000);
+
+ if (max96712_read(priv, 0x4a) != MAX96712_ID)
+ return -ENODEV;
+
+ max96712_reset(priv);
+
+ ret = max96712_parse_dt(priv);
+ if (ret)
+ return ret;
+
+ max96712_mipi_configure(priv);
+
+ return max96712_v4l2_register(priv);
+}
+
+static int max96712_remove(struct i2c_client *client)
+{
+ struct max96712_priv *priv = i2c_get_clientdata(client);
+
+ v4l2_async_unregister_subdev(&priv->sd);
+
+ gpiod_set_value_cansleep(priv->gpiod_pwdn, 0);
+
+ return 0;
+}
+
+static const struct of_device_id max96712_of_table[] = {
+ { .compatible = "maxim,max96712" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, max96712_of_table);
+
+static struct i2c_driver max96712_i2c_driver = {
+ .driver = {
+ .name = "max96712",
+ .of_match_table = of_match_ptr(max96712_of_table),
+ },
+ .probe_new = max96712_probe,
+ .remove = max96712_remove,
+};
+
+module_i2c_driver(max96712_i2c_driver);
+
+MODULE_DESCRIPTION("Maxim MAX96712 Quad GMSL2 Deserializer Driver");
+MODULE_AUTHOR("Niklas Söderlund <niklas.soderlund@ragnatech.se>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.c b/drivers/staging/media/meson/vdec/vdec_helpers.c
index b9125c295d1d..203d7afa085d 100644
--- a/drivers/staging/media/meson/vdec/vdec_helpers.c
+++ b/drivers/staging/media/meson/vdec/vdec_helpers.c
@@ -276,13 +276,13 @@ static void dst_buf_done(struct amvdec_session *sess,
switch (sess->pixfmt_cap) {
case V4L2_PIX_FMT_NV12M:
- vbuf->vb2_buf.planes[0].bytesused = output_size;
- vbuf->vb2_buf.planes[1].bytesused = output_size / 2;
+ vb2_set_plane_payload(&vbuf->vb2_buf, 0, output_size);
+ vb2_set_plane_payload(&vbuf->vb2_buf, 1, output_size / 2);
break;
case V4L2_PIX_FMT_YUV420M:
- vbuf->vb2_buf.planes[0].bytesused = output_size;
- vbuf->vb2_buf.planes[1].bytesused = output_size / 4;
- vbuf->vb2_buf.planes[2].bytesused = output_size / 4;
+ vb2_set_plane_payload(&vbuf->vb2_buf, 0, output_size);
+ vb2_set_plane_payload(&vbuf->vb2_buf, 1, output_size / 4);
+ vb2_set_plane_payload(&vbuf->vb2_buf, 2, output_size / 4);
break;
}
diff --git a/drivers/staging/media/rkvdec/Kconfig b/drivers/staging/media/rkvdec/Kconfig
index c02199b5e0fd..dc7292f346fa 100644
--- a/drivers/staging/media/rkvdec/Kconfig
+++ b/drivers/staging/media/rkvdec/Kconfig
@@ -9,6 +9,7 @@ config VIDEO_ROCKCHIP_VDEC
select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
select V4L2_H264
+ select V4L2_VP9
help
Support for the Rockchip Video Decoder IP present on Rockchip SoCs,
which accelerates video decoding.
diff --git a/drivers/staging/media/rkvdec/Makefile b/drivers/staging/media/rkvdec/Makefile
index c08fed0a39f9..cb86b429cfaa 100644
--- a/drivers/staging/media/rkvdec/Makefile
+++ b/drivers/staging/media/rkvdec/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_VIDEO_ROCKCHIP_VDEC) += rockchip-vdec.o
-rockchip-vdec-y += rkvdec.o rkvdec-h264.o
+rockchip-vdec-y += rkvdec.o rkvdec-h264.o rkvdec-vp9.o
diff --git a/drivers/staging/media/rkvdec/rkvdec-vp9.c b/drivers/staging/media/rkvdec/rkvdec-vp9.c
new file mode 100644
index 000000000000..311a12656072
--- /dev/null
+++ b/drivers/staging/media/rkvdec/rkvdec-vp9.c
@@ -0,0 +1,1072 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockchip Video Decoder VP9 backend
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ * Boris Brezillon <boris.brezillon@collabora.com>
+ * Copyright (C) 2021 Collabora, Ltd.
+ * Andrzej Pietrasiewicz <andrzej.p@collabora.com>
+ *
+ * Copyright (C) 2016 Rockchip Electronics Co., Ltd.
+ * Alpha Lin <Alpha.Lin@rock-chips.com>
+ */
+
+/*
+ * For following the vp9 spec please start reading this driver
+ * code from rkvdec_vp9_run() followed by rkvdec_vp9_done().
+ */
+
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-vp9.h>
+
+#include "rkvdec.h"
+#include "rkvdec-regs.h"
+
+#define RKVDEC_VP9_PROBE_SIZE 4864
+#define RKVDEC_VP9_COUNT_SIZE 13232
+#define RKVDEC_VP9_MAX_SEGMAP_SIZE 73728
+
+struct rkvdec_vp9_intra_mode_probs {
+ u8 y_mode[105];
+ u8 uv_mode[23];
+};
+
+struct rkvdec_vp9_intra_only_frame_probs {
+ u8 coef_intra[4][2][128];
+ struct rkvdec_vp9_intra_mode_probs intra_mode[10];
+};
+
+struct rkvdec_vp9_inter_frame_probs {
+ u8 y_mode[4][9];
+ u8 comp_mode[5];
+ u8 comp_ref[5];
+ u8 single_ref[5][2];
+ u8 inter_mode[7][3];
+ u8 interp_filter[4][2];
+ u8 padding0[11];
+ u8 coef[2][4][2][128];
+ u8 uv_mode_0_2[3][9];
+ u8 padding1[5];
+ u8 uv_mode_3_5[3][9];
+ u8 padding2[5];
+ u8 uv_mode_6_8[3][9];
+ u8 padding3[5];
+ u8 uv_mode_9[9];
+ u8 padding4[7];
+ u8 padding5[16];
+ struct {
+ u8 joint[3];
+ u8 sign[2];
+ u8 classes[2][10];
+ u8 class0_bit[2];
+ u8 bits[2][10];
+ u8 class0_fr[2][2][3];
+ u8 fr[2][3];
+ u8 class0_hp[2];
+ u8 hp[2];
+ } mv;
+};
+
+struct rkvdec_vp9_probs {
+ u8 partition[16][3];
+ u8 pred[3];
+ u8 tree[7];
+ u8 skip[3];
+ u8 tx32[2][3];
+ u8 tx16[2][2];
+ u8 tx8[2][1];
+ u8 is_inter[4];
+ /* 128 bit alignment */
+ u8 padding0[3];
+ union {
+ struct rkvdec_vp9_inter_frame_probs inter;
+ struct rkvdec_vp9_intra_only_frame_probs intra_only;
+ };
+};
+
+/* Data structure describing auxiliary buffer format. */
+struct rkvdec_vp9_priv_tbl {
+ struct rkvdec_vp9_probs probs;
+ u8 segmap[2][RKVDEC_VP9_MAX_SEGMAP_SIZE];
+};
+
+struct rkvdec_vp9_refs_counts {
+ u32 eob[2];
+ u32 coeff[3];
+};
+
+struct rkvdec_vp9_inter_frame_symbol_counts {
+ u32 partition[16][4];
+ u32 skip[3][2];
+ u32 inter[4][2];
+ u32 tx32p[2][4];
+ u32 tx16p[2][4];
+ u32 tx8p[2][2];
+ u32 y_mode[4][10];
+ u32 uv_mode[10][10];
+ u32 comp[5][2];
+ u32 comp_ref[5][2];
+ u32 single_ref[5][2][2];
+ u32 mv_mode[7][4];
+ u32 filter[4][3];
+ u32 mv_joint[4];
+ u32 sign[2][2];
+ /* add 1 element for align */
+ u32 classes[2][11 + 1];
+ u32 class0[2][2];
+ u32 bits[2][10][2];
+ u32 class0_fp[2][2][4];
+ u32 fp[2][4];
+ u32 class0_hp[2][2];
+ u32 hp[2][2];
+ struct rkvdec_vp9_refs_counts ref_cnt[2][4][2][6][6];
+};
+
+struct rkvdec_vp9_intra_frame_symbol_counts {
+ u32 partition[4][4][4];
+ u32 skip[3][2];
+ u32 intra[4][2];
+ u32 tx32p[2][4];
+ u32 tx16p[2][4];
+ u32 tx8p[2][2];
+ struct rkvdec_vp9_refs_counts ref_cnt[2][4][2][6][6];
+};
+
+struct rkvdec_vp9_run {
+ struct rkvdec_run base;
+ const struct v4l2_ctrl_vp9_frame *decode_params;
+};
+
+struct rkvdec_vp9_frame_info {
+ u32 valid : 1;
+ u32 segmapid : 1;
+ u32 frame_context_idx : 2;
+ u32 reference_mode : 2;
+ u32 tx_mode : 3;
+ u32 interpolation_filter : 3;
+ u32 flags;
+ u64 timestamp;
+ struct v4l2_vp9_segmentation seg;
+ struct v4l2_vp9_loop_filter lf;
+};
+
+struct rkvdec_vp9_ctx {
+ struct rkvdec_aux_buf priv_tbl;
+ struct rkvdec_aux_buf count_tbl;
+ struct v4l2_vp9_frame_symbol_counts inter_cnts;
+ struct v4l2_vp9_frame_symbol_counts intra_cnts;
+ struct v4l2_vp9_frame_context probability_tables;
+ struct v4l2_vp9_frame_context frame_context[4];
+ struct rkvdec_vp9_frame_info cur;
+ struct rkvdec_vp9_frame_info last;
+};
+
+static void write_coeff_plane(const u8 coef[6][6][3], u8 *coeff_plane)
+{
+ unsigned int idx = 0, byte_count = 0;
+ int k, m, n;
+ u8 p;
+
+ for (k = 0; k < 6; k++) {
+ for (m = 0; m < 6; m++) {
+ for (n = 0; n < 3; n++) {
+ p = coef[k][m][n];
+ coeff_plane[idx++] = p;
+ byte_count++;
+ if (byte_count == 27) {
+ idx += 5;
+ byte_count = 0;
+ }
+ }
+ }
+ }
+}
+
+static void init_intra_only_probs(struct rkvdec_ctx *ctx,
+ const struct rkvdec_vp9_run *run)
+{
+ struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
+ struct rkvdec_vp9_priv_tbl *tbl = vp9_ctx->priv_tbl.cpu;
+ struct rkvdec_vp9_intra_only_frame_probs *rkprobs;
+ const struct v4l2_vp9_frame_context *probs;
+ unsigned int i, j, k;
+
+ rkprobs = &tbl->probs.intra_only;
+ probs = &vp9_ctx->probability_tables;
+
+ /*
+ * intra only 149 x 128 bits ,aligned to 152 x 128 bits coeff related
+ * prob 64 x 128 bits
+ */
+ for (i = 0; i < ARRAY_SIZE(probs->coef); i++) {
+ for (j = 0; j < ARRAY_SIZE(probs->coef[0]); j++)
+ write_coeff_plane(probs->coef[i][j][0],
+ rkprobs->coef_intra[i][j]);
+ }
+
+ /* intra mode prob 80 x 128 bits */
+ for (i = 0; i < ARRAY_SIZE(v4l2_vp9_kf_y_mode_prob); i++) {
+ unsigned int byte_count = 0;
+ int idx = 0;
+
+ /* vp9_kf_y_mode_prob */
+ for (j = 0; j < ARRAY_SIZE(v4l2_vp9_kf_y_mode_prob[0]); j++) {
+ for (k = 0; k < ARRAY_SIZE(v4l2_vp9_kf_y_mode_prob[0][0]);
+ k++) {
+ u8 val = v4l2_vp9_kf_y_mode_prob[i][j][k];
+
+ rkprobs->intra_mode[i].y_mode[idx++] = val;
+ byte_count++;
+ if (byte_count == 27) {
+ byte_count = 0;
+ idx += 5;
+ }
+ }
+ }
+
+ }
+
+ for (i = 0; i < sizeof(v4l2_vp9_kf_uv_mode_prob); ++i) {
+ const u8 *ptr = (const u8 *)v4l2_vp9_kf_uv_mode_prob;
+
+ rkprobs->intra_mode[i / 23].uv_mode[i % 23] = ptr[i];
+ }
+}
+
+static void init_inter_probs(struct rkvdec_ctx *ctx,
+ const struct rkvdec_vp9_run *run)
+{
+ struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
+ struct rkvdec_vp9_priv_tbl *tbl = vp9_ctx->priv_tbl.cpu;
+ struct rkvdec_vp9_inter_frame_probs *rkprobs;
+ const struct v4l2_vp9_frame_context *probs;
+ unsigned int i, j, k;
+
+ rkprobs = &tbl->probs.inter;
+ probs = &vp9_ctx->probability_tables;
+
+ /*
+ * inter probs
+ * 151 x 128 bits, aligned to 152 x 128 bits
+ * inter only
+ * intra_y_mode & inter_block info 6 x 128 bits
+ */
+
+ memcpy(rkprobs->y_mode, probs->y_mode, sizeof(rkprobs->y_mode));
+ memcpy(rkprobs->comp_mode, probs->comp_mode,
+ sizeof(rkprobs->comp_mode));
+ memcpy(rkprobs->comp_ref, probs->comp_ref,
+ sizeof(rkprobs->comp_ref));
+ memcpy(rkprobs->single_ref, probs->single_ref,
+ sizeof(rkprobs->single_ref));
+ memcpy(rkprobs->inter_mode, probs->inter_mode,
+ sizeof(rkprobs->inter_mode));
+ memcpy(rkprobs->interp_filter, probs->interp_filter,
+ sizeof(rkprobs->interp_filter));
+
+ /* 128 x 128 bits coeff related */
+ for (i = 0; i < ARRAY_SIZE(probs->coef); i++) {
+ for (j = 0; j < ARRAY_SIZE(probs->coef[0]); j++) {
+ for (k = 0; k < ARRAY_SIZE(probs->coef[0][0]); k++)
+ write_coeff_plane(probs->coef[i][j][k],
+ rkprobs->coef[k][i][j]);
+ }
+ }
+
+ /* intra uv mode 6 x 128 */
+ memcpy(rkprobs->uv_mode_0_2, &probs->uv_mode[0],
+ sizeof(rkprobs->uv_mode_0_2));
+ memcpy(rkprobs->uv_mode_3_5, &probs->uv_mode[3],
+ sizeof(rkprobs->uv_mode_3_5));
+ memcpy(rkprobs->uv_mode_6_8, &probs->uv_mode[6],
+ sizeof(rkprobs->uv_mode_6_8));
+ memcpy(rkprobs->uv_mode_9, &probs->uv_mode[9],
+ sizeof(rkprobs->uv_mode_9));
+
+ /* mv related 6 x 128 */
+ memcpy(rkprobs->mv.joint, probs->mv.joint,
+ sizeof(rkprobs->mv.joint));
+ memcpy(rkprobs->mv.sign, probs->mv.sign,
+ sizeof(rkprobs->mv.sign));
+ memcpy(rkprobs->mv.classes, probs->mv.classes,
+ sizeof(rkprobs->mv.classes));
+ memcpy(rkprobs->mv.class0_bit, probs->mv.class0_bit,
+ sizeof(rkprobs->mv.class0_bit));
+ memcpy(rkprobs->mv.bits, probs->mv.bits,
+ sizeof(rkprobs->mv.bits));
+ memcpy(rkprobs->mv.class0_fr, probs->mv.class0_fr,
+ sizeof(rkprobs->mv.class0_fr));
+ memcpy(rkprobs->mv.fr, probs->mv.fr,
+ sizeof(rkprobs->mv.fr));
+ memcpy(rkprobs->mv.class0_hp, probs->mv.class0_hp,
+ sizeof(rkprobs->mv.class0_hp));
+ memcpy(rkprobs->mv.hp, probs->mv.hp,
+ sizeof(rkprobs->mv.hp));
+}
+
+static void init_probs(struct rkvdec_ctx *ctx,
+ const struct rkvdec_vp9_run *run)
+{
+ const struct v4l2_ctrl_vp9_frame *dec_params;
+ struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
+ struct rkvdec_vp9_priv_tbl *tbl = vp9_ctx->priv_tbl.cpu;
+ struct rkvdec_vp9_probs *rkprobs = &tbl->probs;
+ const struct v4l2_vp9_segmentation *seg;
+ const struct v4l2_vp9_frame_context *probs;
+ bool intra_only;
+
+ dec_params = run->decode_params;
+ probs = &vp9_ctx->probability_tables;
+ seg = &dec_params->seg;
+
+ memset(rkprobs, 0, sizeof(*rkprobs));
+
+ intra_only = !!(dec_params->flags &
+ (V4L2_VP9_FRAME_FLAG_KEY_FRAME |
+ V4L2_VP9_FRAME_FLAG_INTRA_ONLY));
+
+ /* sb info 5 x 128 bit */
+ memcpy(rkprobs->partition,
+ intra_only ? v4l2_vp9_kf_partition_probs : probs->partition,
+ sizeof(rkprobs->partition));
+
+ memcpy(rkprobs->pred, seg->pred_probs, sizeof(rkprobs->pred));
+ memcpy(rkprobs->tree, seg->tree_probs, sizeof(rkprobs->tree));
+ memcpy(rkprobs->skip, probs->skip, sizeof(rkprobs->skip));
+ memcpy(rkprobs->tx32, probs->tx32, sizeof(rkprobs->tx32));
+ memcpy(rkprobs->tx16, probs->tx16, sizeof(rkprobs->tx16));
+ memcpy(rkprobs->tx8, probs->tx8, sizeof(rkprobs->tx8));
+ memcpy(rkprobs->is_inter, probs->is_inter, sizeof(rkprobs->is_inter));
+
+ if (intra_only)
+ init_intra_only_probs(ctx, run);
+ else
+ init_inter_probs(ctx, run);
+}
+
+struct rkvdec_vp9_ref_reg {
+ u32 reg_frm_size;
+ u32 reg_hor_stride;
+ u32 reg_y_stride;
+ u32 reg_yuv_stride;
+ u32 reg_ref_base;
+};
+
+static struct rkvdec_vp9_ref_reg ref_regs[] = {
+ {
+ .reg_frm_size = RKVDEC_REG_VP9_FRAME_SIZE(0),
+ .reg_hor_stride = RKVDEC_VP9_HOR_VIRSTRIDE(0),
+ .reg_y_stride = RKVDEC_VP9_LAST_FRAME_YSTRIDE,
+ .reg_yuv_stride = RKVDEC_VP9_LAST_FRAME_YUVSTRIDE,
+ .reg_ref_base = RKVDEC_REG_VP9_LAST_FRAME_BASE,
+ },
+ {
+ .reg_frm_size = RKVDEC_REG_VP9_FRAME_SIZE(1),
+ .reg_hor_stride = RKVDEC_VP9_HOR_VIRSTRIDE(1),
+ .reg_y_stride = RKVDEC_VP9_GOLDEN_FRAME_YSTRIDE,
+ .reg_yuv_stride = 0,
+ .reg_ref_base = RKVDEC_REG_VP9_GOLDEN_FRAME_BASE,
+ },
+ {
+ .reg_frm_size = RKVDEC_REG_VP9_FRAME_SIZE(2),
+ .reg_hor_stride = RKVDEC_VP9_HOR_VIRSTRIDE(2),
+ .reg_y_stride = RKVDEC_VP9_ALTREF_FRAME_YSTRIDE,
+ .reg_yuv_stride = 0,
+ .reg_ref_base = RKVDEC_REG_VP9_ALTREF_FRAME_BASE,
+ }
+};
+
+static struct rkvdec_decoded_buffer *
+get_ref_buf(struct rkvdec_ctx *ctx, struct vb2_v4l2_buffer *dst, u64 timestamp)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+ struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q;
+ int buf_idx;
+
+ /*
+ * If a ref is unused or invalid, address of current destination
+ * buffer is returned.
+ */
+ buf_idx = vb2_find_timestamp(cap_q, timestamp, 0);
+ if (buf_idx < 0)
+ return vb2_to_rkvdec_decoded_buf(&dst->vb2_buf);
+
+ return vb2_to_rkvdec_decoded_buf(vb2_get_buffer(cap_q, buf_idx));
+}
+
+static dma_addr_t get_mv_base_addr(struct rkvdec_decoded_buffer *buf)
+{
+ unsigned int aligned_pitch, aligned_height, yuv_len;
+
+ aligned_height = round_up(buf->vp9.height, 64);
+ aligned_pitch = round_up(buf->vp9.width * buf->vp9.bit_depth, 512) / 8;
+ yuv_len = (aligned_height * aligned_pitch * 3) / 2;
+
+ return vb2_dma_contig_plane_dma_addr(&buf->base.vb.vb2_buf, 0) +
+ yuv_len;
+}
+
+static void config_ref_registers(struct rkvdec_ctx *ctx,
+ const struct rkvdec_vp9_run *run,
+ struct rkvdec_decoded_buffer *ref_buf,
+ struct rkvdec_vp9_ref_reg *ref_reg)
+{
+ unsigned int aligned_pitch, aligned_height, y_len, yuv_len;
+ struct rkvdec_dev *rkvdec = ctx->dev;
+
+ aligned_height = round_up(ref_buf->vp9.height, 64);
+ writel_relaxed(RKVDEC_VP9_FRAMEWIDTH(ref_buf->vp9.width) |
+ RKVDEC_VP9_FRAMEHEIGHT(ref_buf->vp9.height),
+ rkvdec->regs + ref_reg->reg_frm_size);
+
+ writel_relaxed(vb2_dma_contig_plane_dma_addr(&ref_buf->base.vb.vb2_buf, 0),
+ rkvdec->regs + ref_reg->reg_ref_base);
+
+ if (&ref_buf->base.vb == run->base.bufs.dst)
+ return;
+
+ aligned_pitch = round_up(ref_buf->vp9.width * ref_buf->vp9.bit_depth, 512) / 8;
+ y_len = aligned_height * aligned_pitch;
+ yuv_len = (y_len * 3) / 2;
+
+ writel_relaxed(RKVDEC_HOR_Y_VIRSTRIDE(aligned_pitch / 16) |
+ RKVDEC_HOR_UV_VIRSTRIDE(aligned_pitch / 16),
+ rkvdec->regs + ref_reg->reg_hor_stride);
+ writel_relaxed(RKVDEC_VP9_REF_YSTRIDE(y_len / 16),
+ rkvdec->regs + ref_reg->reg_y_stride);
+
+ if (!ref_reg->reg_yuv_stride)
+ return;
+
+ writel_relaxed(RKVDEC_VP9_REF_YUVSTRIDE(yuv_len / 16),
+ rkvdec->regs + ref_reg->reg_yuv_stride);
+}
+
+static void config_seg_registers(struct rkvdec_ctx *ctx, unsigned int segid)
+{
+ struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
+ const struct v4l2_vp9_segmentation *seg;
+ struct rkvdec_dev *rkvdec = ctx->dev;
+ s16 feature_val;
+ int feature_id;
+ u32 val = 0;
+
+ seg = vp9_ctx->last.valid ? &vp9_ctx->last.seg : &vp9_ctx->cur.seg;
+ feature_id = V4L2_VP9_SEG_LVL_ALT_Q;
+ if (v4l2_vp9_seg_feat_enabled(seg->feature_enabled, feature_id, segid)) {
+ feature_val = seg->feature_data[segid][feature_id];
+ val |= RKVDEC_SEGID_FRAME_QP_DELTA_EN(1) |
+ RKVDEC_SEGID_FRAME_QP_DELTA(feature_val);
+ }
+
+ feature_id = V4L2_VP9_SEG_LVL_ALT_L;
+ if (v4l2_vp9_seg_feat_enabled(seg->feature_enabled, feature_id, segid)) {
+ feature_val = seg->feature_data[segid][feature_id];
+ val |= RKVDEC_SEGID_FRAME_LOOPFILTER_VALUE_EN(1) |
+ RKVDEC_SEGID_FRAME_LOOPFILTER_VALUE(feature_val);
+ }
+
+ feature_id = V4L2_VP9_SEG_LVL_REF_FRAME;
+ if (v4l2_vp9_seg_feat_enabled(seg->feature_enabled, feature_id, segid)) {
+ feature_val = seg->feature_data[segid][feature_id];
+ val |= RKVDEC_SEGID_REFERINFO_EN(1) |
+ RKVDEC_SEGID_REFERINFO(feature_val);
+ }
+
+ feature_id = V4L2_VP9_SEG_LVL_SKIP;
+ if (v4l2_vp9_seg_feat_enabled(seg->feature_enabled, feature_id, segid))
+ val |= RKVDEC_SEGID_FRAME_SKIP_EN(1);
+
+ if (!segid &&
+ (seg->flags & V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE))
+ val |= RKVDEC_SEGID_ABS_DELTA(1);
+
+ writel_relaxed(val, rkvdec->regs + RKVDEC_VP9_SEGID_GRP(segid));
+}
+
+static void update_dec_buf_info(struct rkvdec_decoded_buffer *buf,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ buf->vp9.width = dec_params->frame_width_minus_1 + 1;
+ buf->vp9.height = dec_params->frame_height_minus_1 + 1;
+ buf->vp9.bit_depth = dec_params->bit_depth;
+}
+
+static void update_ctx_cur_info(struct rkvdec_vp9_ctx *vp9_ctx,
+ struct rkvdec_decoded_buffer *buf,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ vp9_ctx->cur.valid = true;
+ vp9_ctx->cur.reference_mode = dec_params->reference_mode;
+ vp9_ctx->cur.interpolation_filter = dec_params->interpolation_filter;
+ vp9_ctx->cur.flags = dec_params->flags;
+ vp9_ctx->cur.timestamp = buf->base.vb.vb2_buf.timestamp;
+ vp9_ctx->cur.seg = dec_params->seg;
+ vp9_ctx->cur.lf = dec_params->lf;
+}
+
+static void update_ctx_last_info(struct rkvdec_vp9_ctx *vp9_ctx)
+{
+ vp9_ctx->last = vp9_ctx->cur;
+}
+
+static void config_registers(struct rkvdec_ctx *ctx,
+ const struct rkvdec_vp9_run *run)
+{
+ unsigned int y_len, uv_len, yuv_len, bit_depth, aligned_height, aligned_pitch, stream_len;
+ const struct v4l2_ctrl_vp9_frame *dec_params;
+ struct rkvdec_decoded_buffer *ref_bufs[3];
+ struct rkvdec_decoded_buffer *dst, *last, *mv_ref;
+ struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
+ u32 val, last_frame_info = 0;
+ const struct v4l2_vp9_segmentation *seg;
+ struct rkvdec_dev *rkvdec = ctx->dev;
+ dma_addr_t addr;
+ bool intra_only;
+ unsigned int i;
+
+ dec_params = run->decode_params;
+ dst = vb2_to_rkvdec_decoded_buf(&run->base.bufs.dst->vb2_buf);
+ ref_bufs[0] = get_ref_buf(ctx, &dst->base.vb, dec_params->last_frame_ts);
+ ref_bufs[1] = get_ref_buf(ctx, &dst->base.vb, dec_params->golden_frame_ts);
+ ref_bufs[2] = get_ref_buf(ctx, &dst->base.vb, dec_params->alt_frame_ts);
+
+ if (vp9_ctx->last.valid)
+ last = get_ref_buf(ctx, &dst->base.vb, vp9_ctx->last.timestamp);
+ else
+ last = dst;
+
+ update_dec_buf_info(dst, dec_params);
+ update_ctx_cur_info(vp9_ctx, dst, dec_params);
+ seg = &dec_params->seg;
+
+ intra_only = !!(dec_params->flags &
+ (V4L2_VP9_FRAME_FLAG_KEY_FRAME |
+ V4L2_VP9_FRAME_FLAG_INTRA_ONLY));
+
+ writel_relaxed(RKVDEC_MODE(RKVDEC_MODE_VP9),
+ rkvdec->regs + RKVDEC_REG_SYSCTRL);
+
+ bit_depth = dec_params->bit_depth;
+ aligned_height = round_up(ctx->decoded_fmt.fmt.pix_mp.height, 64);
+
+ aligned_pitch = round_up(ctx->decoded_fmt.fmt.pix_mp.width *
+ bit_depth,
+ 512) / 8;
+ y_len = aligned_height * aligned_pitch;
+ uv_len = y_len / 2;
+ yuv_len = y_len + uv_len;
+
+ writel_relaxed(RKVDEC_Y_HOR_VIRSTRIDE(aligned_pitch / 16) |
+ RKVDEC_UV_HOR_VIRSTRIDE(aligned_pitch / 16),
+ rkvdec->regs + RKVDEC_REG_PICPAR);
+ writel_relaxed(RKVDEC_Y_VIRSTRIDE(y_len / 16),
+ rkvdec->regs + RKVDEC_REG_Y_VIRSTRIDE);
+ writel_relaxed(RKVDEC_YUV_VIRSTRIDE(yuv_len / 16),
+ rkvdec->regs + RKVDEC_REG_YUV_VIRSTRIDE);
+
+ stream_len = vb2_get_plane_payload(&run->base.bufs.src->vb2_buf, 0);
+ writel_relaxed(RKVDEC_STRM_LEN(stream_len),
+ rkvdec->regs + RKVDEC_REG_STRM_LEN);
+
+ /*
+ * Reset count buffer, because decoder only output intra related syntax
+ * counts when decoding intra frame, but update entropy need to update
+ * all the probabilities.
+ */
+ if (intra_only)
+ memset(vp9_ctx->count_tbl.cpu, 0, vp9_ctx->count_tbl.size);
+
+ vp9_ctx->cur.segmapid = vp9_ctx->last.segmapid;
+ if (!intra_only &&
+ !(dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT) &&
+ (!(seg->flags & V4L2_VP9_SEGMENTATION_FLAG_ENABLED) ||
+ (seg->flags & V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP)))
+ vp9_ctx->cur.segmapid++;
+
+ for (i = 0; i < ARRAY_SIZE(ref_bufs); i++)
+ config_ref_registers(ctx, run, ref_bufs[i], &ref_regs[i]);
+
+ for (i = 0; i < 8; i++)
+ config_seg_registers(ctx, i);
+
+ writel_relaxed(RKVDEC_VP9_TX_MODE(vp9_ctx->cur.tx_mode) |
+ RKVDEC_VP9_FRAME_REF_MODE(dec_params->reference_mode),
+ rkvdec->regs + RKVDEC_VP9_CPRHEADER_CONFIG);
+
+ if (!intra_only) {
+ const struct v4l2_vp9_loop_filter *lf;
+ s8 delta;
+
+ if (vp9_ctx->last.valid)
+ lf = &vp9_ctx->last.lf;
+ else
+ lf = &vp9_ctx->cur.lf;
+
+ val = 0;
+ for (i = 0; i < ARRAY_SIZE(lf->ref_deltas); i++) {
+ delta = lf->ref_deltas[i];
+ val |= RKVDEC_REF_DELTAS_LASTFRAME(i, delta);
+ }
+
+ writel_relaxed(val,
+ rkvdec->regs + RKVDEC_VP9_REF_DELTAS_LASTFRAME);
+
+ for (i = 0; i < ARRAY_SIZE(lf->mode_deltas); i++) {
+ delta = lf->mode_deltas[i];
+ last_frame_info |= RKVDEC_MODE_DELTAS_LASTFRAME(i,
+ delta);
+ }
+ }
+
+ if (vp9_ctx->last.valid && !intra_only &&
+ vp9_ctx->last.seg.flags & V4L2_VP9_SEGMENTATION_FLAG_ENABLED)
+ last_frame_info |= RKVDEC_SEG_EN_LASTFRAME;
+
+ if (vp9_ctx->last.valid &&
+ vp9_ctx->last.flags & V4L2_VP9_FRAME_FLAG_SHOW_FRAME)
+ last_frame_info |= RKVDEC_LAST_SHOW_FRAME;
+
+ if (vp9_ctx->last.valid &&
+ vp9_ctx->last.flags &
+ (V4L2_VP9_FRAME_FLAG_KEY_FRAME | V4L2_VP9_FRAME_FLAG_INTRA_ONLY))
+ last_frame_info |= RKVDEC_LAST_INTRA_ONLY;
+
+ if (vp9_ctx->last.valid &&
+ last->vp9.width == dst->vp9.width &&
+ last->vp9.height == dst->vp9.height)
+ last_frame_info |= RKVDEC_LAST_WIDHHEIGHT_EQCUR;
+
+ writel_relaxed(last_frame_info,
+ rkvdec->regs + RKVDEC_VP9_INFO_LASTFRAME);
+
+ writel_relaxed(stream_len - dec_params->compressed_header_size -
+ dec_params->uncompressed_header_size,
+ rkvdec->regs + RKVDEC_VP9_LASTTILE_SIZE);
+
+ for (i = 0; !intra_only && i < ARRAY_SIZE(ref_bufs); i++) {
+ unsigned int refw = ref_bufs[i]->vp9.width;
+ unsigned int refh = ref_bufs[i]->vp9.height;
+ u32 hscale, vscale;
+
+ hscale = (refw << 14) / dst->vp9.width;
+ vscale = (refh << 14) / dst->vp9.height;
+ writel_relaxed(RKVDEC_VP9_REF_HOR_SCALE(hscale) |
+ RKVDEC_VP9_REF_VER_SCALE(vscale),
+ rkvdec->regs + RKVDEC_VP9_REF_SCALE(i));
+ }
+
+ addr = vb2_dma_contig_plane_dma_addr(&dst->base.vb.vb2_buf, 0);
+ writel_relaxed(addr, rkvdec->regs + RKVDEC_REG_DECOUT_BASE);
+ addr = vb2_dma_contig_plane_dma_addr(&run->base.bufs.src->vb2_buf, 0);
+ writel_relaxed(addr, rkvdec->regs + RKVDEC_REG_STRM_RLC_BASE);
+ writel_relaxed(vp9_ctx->priv_tbl.dma +
+ offsetof(struct rkvdec_vp9_priv_tbl, probs),
+ rkvdec->regs + RKVDEC_REG_CABACTBL_PROB_BASE);
+ writel_relaxed(vp9_ctx->count_tbl.dma,
+ rkvdec->regs + RKVDEC_REG_VP9COUNT_BASE);
+
+ writel_relaxed(vp9_ctx->priv_tbl.dma +
+ offsetof(struct rkvdec_vp9_priv_tbl, segmap) +
+ (RKVDEC_VP9_MAX_SEGMAP_SIZE * vp9_ctx->cur.segmapid),
+ rkvdec->regs + RKVDEC_REG_VP9_SEGIDCUR_BASE);
+ writel_relaxed(vp9_ctx->priv_tbl.dma +
+ offsetof(struct rkvdec_vp9_priv_tbl, segmap) +
+ (RKVDEC_VP9_MAX_SEGMAP_SIZE * (!vp9_ctx->cur.segmapid)),
+ rkvdec->regs + RKVDEC_REG_VP9_SEGIDLAST_BASE);
+
+ if (!intra_only &&
+ !(dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT) &&
+ vp9_ctx->last.valid)
+ mv_ref = last;
+ else
+ mv_ref = dst;
+
+ writel_relaxed(get_mv_base_addr(mv_ref),
+ rkvdec->regs + RKVDEC_VP9_REF_COLMV_BASE);
+
+ writel_relaxed(ctx->decoded_fmt.fmt.pix_mp.width |
+ (ctx->decoded_fmt.fmt.pix_mp.height << 16),
+ rkvdec->regs + RKVDEC_REG_PERFORMANCE_CYCLE);
+}
+
+static int validate_dec_params(struct rkvdec_ctx *ctx,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ unsigned int aligned_width, aligned_height;
+
+ /* We only support profile 0. */
+ if (dec_params->profile != 0) {
+ dev_err(ctx->dev->dev, "unsupported profile %d\n",
+ dec_params->profile);
+ return -EINVAL;
+ }
+
+ aligned_width = round_up(dec_params->frame_width_minus_1 + 1, 64);
+ aligned_height = round_up(dec_params->frame_height_minus_1 + 1, 64);
+
+ /*
+ * Userspace should update the capture/decoded format when the
+ * resolution changes.
+ */
+ if (aligned_width != ctx->decoded_fmt.fmt.pix_mp.width ||
+ aligned_height != ctx->decoded_fmt.fmt.pix_mp.height) {
+ dev_err(ctx->dev->dev,
+ "unexpected bitstream resolution %dx%d\n",
+ dec_params->frame_width_minus_1 + 1,
+ dec_params->frame_height_minus_1 + 1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rkvdec_vp9_run_preamble(struct rkvdec_ctx *ctx,
+ struct rkvdec_vp9_run *run)
+{
+ const struct v4l2_ctrl_vp9_frame *dec_params;
+ const struct v4l2_ctrl_vp9_compressed_hdr *prob_updates;
+ struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
+ struct v4l2_ctrl *ctrl;
+ unsigned int fctx_idx;
+ int ret;
+
+ /* v4l2-specific stuff */
+ rkvdec_run_preamble(ctx, &run->base);
+
+ ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl,
+ V4L2_CID_STATELESS_VP9_FRAME);
+ if (WARN_ON(!ctrl))
+ return -EINVAL;
+ dec_params = ctrl->p_cur.p;
+
+ ret = validate_dec_params(ctx, dec_params);
+ if (ret)
+ return ret;
+
+ run->decode_params = dec_params;
+
+ ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl, V4L2_CID_STATELESS_VP9_COMPRESSED_HDR);
+ if (WARN_ON(!ctrl))
+ return -EINVAL;
+ prob_updates = ctrl->p_cur.p;
+ vp9_ctx->cur.tx_mode = prob_updates->tx_mode;
+
+ /*
+ * vp9 stuff
+ *
+ * by this point the userspace has done all parts of 6.2 uncompressed_header()
+ * except this fragment:
+ * if ( FrameIsIntra || error_resilient_mode ) {
+ * setup_past_independence ( )
+ * if ( frame_type == KEY_FRAME || error_resilient_mode == 1 ||
+ * reset_frame_context == 3 ) {
+ * for ( i = 0; i < 4; i ++ ) {
+ * save_probs( i )
+ * }
+ * } else if ( reset_frame_context == 2 ) {
+ * save_probs( frame_context_idx )
+ * }
+ * frame_context_idx = 0
+ * }
+ */
+ fctx_idx = v4l2_vp9_reset_frame_ctx(dec_params, vp9_ctx->frame_context);
+ vp9_ctx->cur.frame_context_idx = fctx_idx;
+
+ /* 6.1 frame(sz): load_probs() and load_probs2() */
+ vp9_ctx->probability_tables = vp9_ctx->frame_context[fctx_idx];
+
+ /*
+ * The userspace has also performed 6.3 compressed_header(), but handling the
+ * probs in a special way. All probs which need updating, except MV-related,
+ * have been read from the bitstream and translated through inv_map_table[],
+ * but no 6.3.6 inv_recenter_nonneg(v, m) has been performed. The values passed
+ * by userspace are either translated values (there are no 0 values in
+ * inv_map_table[]), or zero to indicate no update. All MV-related probs which need
+ * updating have been read from the bitstream and (mv_prob << 1) | 1 has been
+ * performed. The values passed by userspace are either new values
+ * to replace old ones (the above mentioned shift and bitwise or never result in
+ * a zero) or zero to indicate no update.
+ * fw_update_probs() performs actual probs updates or leaves probs as-is
+ * for values for which a zero was passed from userspace.
+ */
+ v4l2_vp9_fw_update_probs(&vp9_ctx->probability_tables, prob_updates, dec_params);
+
+ return 0;
+}
+
+static int rkvdec_vp9_run(struct rkvdec_ctx *ctx)
+{
+ struct rkvdec_dev *rkvdec = ctx->dev;
+ struct rkvdec_vp9_run run = { };
+ int ret;
+
+ ret = rkvdec_vp9_run_preamble(ctx, &run);
+ if (ret) {
+ rkvdec_run_postamble(ctx, &run.base);
+ return ret;
+ }
+
+ /* Prepare probs. */
+ init_probs(ctx, &run);
+
+ /* Configure hardware registers. */
+ config_registers(ctx, &run);
+
+ rkvdec_run_postamble(ctx, &run.base);
+
+ schedule_delayed_work(&rkvdec->watchdog_work, msecs_to_jiffies(2000));
+
+ writel(1, rkvdec->regs + RKVDEC_REG_PREF_LUMA_CACHE_COMMAND);
+ writel(1, rkvdec->regs + RKVDEC_REG_PREF_CHR_CACHE_COMMAND);
+
+ writel(0xe, rkvdec->regs + RKVDEC_REG_STRMD_ERR_EN);
+ /* Start decoding! */
+ writel(RKVDEC_INTERRUPT_DEC_E | RKVDEC_CONFIG_DEC_CLK_GATE_E |
+ RKVDEC_TIMEOUT_E | RKVDEC_BUF_EMPTY_E,
+ rkvdec->regs + RKVDEC_REG_INTERRUPT);
+
+ return 0;
+}
+
+#define copy_tx_and_skip(p1, p2) \
+do { \
+ memcpy((p1)->tx8, (p2)->tx8, sizeof((p1)->tx8)); \
+ memcpy((p1)->tx16, (p2)->tx16, sizeof((p1)->tx16)); \
+ memcpy((p1)->tx32, (p2)->tx32, sizeof((p1)->tx32)); \
+ memcpy((p1)->skip, (p2)->skip, sizeof((p1)->skip)); \
+} while (0)
+
+static void rkvdec_vp9_done(struct rkvdec_ctx *ctx,
+ struct vb2_v4l2_buffer *src_buf,
+ struct vb2_v4l2_buffer *dst_buf,
+ enum vb2_buffer_state result)
+{
+ struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
+ unsigned int fctx_idx;
+
+ /* v4l2-specific stuff */
+ if (result == VB2_BUF_STATE_ERROR)
+ goto out_update_last;
+
+ /*
+ * vp9 stuff
+ *
+ * 6.1.2 refresh_probs()
+ *
+ * In the spec a complementary condition goes last in 6.1.2 refresh_probs(),
+ * but it makes no sense to perform all the activities from the first "if"
+ * there if we actually are not refreshing the frame context. On top of that,
+ * because of 6.2 uncompressed_header() whenever error_resilient_mode == 1,
+ * refresh_frame_context == 0. Consequently, if we don't jump to out_update_last
+ * it means error_resilient_mode must be 0.
+ */
+ if (!(vp9_ctx->cur.flags & V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX))
+ goto out_update_last;
+
+ fctx_idx = vp9_ctx->cur.frame_context_idx;
+
+ if (!(vp9_ctx->cur.flags & V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE)) {
+ /* error_resilient_mode == 0 && frame_parallel_decoding_mode == 0 */
+ struct v4l2_vp9_frame_context *probs = &vp9_ctx->probability_tables;
+ bool frame_is_intra = vp9_ctx->cur.flags &
+ (V4L2_VP9_FRAME_FLAG_KEY_FRAME | V4L2_VP9_FRAME_FLAG_INTRA_ONLY);
+ struct tx_and_skip {
+ u8 tx8[2][1];
+ u8 tx16[2][2];
+ u8 tx32[2][3];
+ u8 skip[3];
+ } _tx_skip, *tx_skip = &_tx_skip;
+ struct v4l2_vp9_frame_symbol_counts *counts;
+
+ /* buffer the forward-updated TX and skip probs */
+ if (frame_is_intra)
+ copy_tx_and_skip(tx_skip, probs);
+
+ /* 6.1.2 refresh_probs(): load_probs() and load_probs2() */
+ *probs = vp9_ctx->frame_context[fctx_idx];
+
+ /* if FrameIsIntra then undo the effect of load_probs2() */
+ if (frame_is_intra)
+ copy_tx_and_skip(probs, tx_skip);
+
+ counts = frame_is_intra ? &vp9_ctx->intra_cnts : &vp9_ctx->inter_cnts;
+ v4l2_vp9_adapt_coef_probs(probs, counts,
+ !vp9_ctx->last.valid ||
+ vp9_ctx->last.flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME,
+ frame_is_intra);
+ if (!frame_is_intra) {
+ const struct rkvdec_vp9_inter_frame_symbol_counts *inter_cnts;
+ u32 classes[2][11];
+ int i;
+
+ inter_cnts = vp9_ctx->count_tbl.cpu;
+ for (i = 0; i < ARRAY_SIZE(classes); ++i)
+ memcpy(classes[i], inter_cnts->classes[i], sizeof(classes[0]));
+ counts->classes = &classes;
+
+ /* load_probs2() already done */
+ v4l2_vp9_adapt_noncoef_probs(&vp9_ctx->probability_tables, counts,
+ vp9_ctx->cur.reference_mode,
+ vp9_ctx->cur.interpolation_filter,
+ vp9_ctx->cur.tx_mode, vp9_ctx->cur.flags);
+ }
+ }
+
+ /* 6.1.2 refresh_probs(): save_probs(fctx_idx) */
+ vp9_ctx->frame_context[fctx_idx] = vp9_ctx->probability_tables;
+
+out_update_last:
+ update_ctx_last_info(vp9_ctx);
+}
+
+static void rkvdec_init_v4l2_vp9_count_tbl(struct rkvdec_ctx *ctx)
+{
+ struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
+ struct rkvdec_vp9_intra_frame_symbol_counts *intra_cnts = vp9_ctx->count_tbl.cpu;
+ struct rkvdec_vp9_inter_frame_symbol_counts *inter_cnts = vp9_ctx->count_tbl.cpu;
+ int i, j, k, l, m;
+
+ vp9_ctx->inter_cnts.partition = &inter_cnts->partition;
+ vp9_ctx->inter_cnts.skip = &inter_cnts->skip;
+ vp9_ctx->inter_cnts.intra_inter = &inter_cnts->inter;
+ vp9_ctx->inter_cnts.tx32p = &inter_cnts->tx32p;
+ vp9_ctx->inter_cnts.tx16p = &inter_cnts->tx16p;
+ vp9_ctx->inter_cnts.tx8p = &inter_cnts->tx8p;
+
+ vp9_ctx->intra_cnts.partition = (u32 (*)[16][4])(&intra_cnts->partition);
+ vp9_ctx->intra_cnts.skip = &intra_cnts->skip;
+ vp9_ctx->intra_cnts.intra_inter = &intra_cnts->intra;
+ vp9_ctx->intra_cnts.tx32p = &intra_cnts->tx32p;
+ vp9_ctx->intra_cnts.tx16p = &intra_cnts->tx16p;
+ vp9_ctx->intra_cnts.tx8p = &intra_cnts->tx8p;
+
+ vp9_ctx->inter_cnts.y_mode = &inter_cnts->y_mode;
+ vp9_ctx->inter_cnts.uv_mode = &inter_cnts->uv_mode;
+ vp9_ctx->inter_cnts.comp = &inter_cnts->comp;
+ vp9_ctx->inter_cnts.comp_ref = &inter_cnts->comp_ref;
+ vp9_ctx->inter_cnts.single_ref = &inter_cnts->single_ref;
+ vp9_ctx->inter_cnts.mv_mode = &inter_cnts->mv_mode;
+ vp9_ctx->inter_cnts.filter = &inter_cnts->filter;
+ vp9_ctx->inter_cnts.mv_joint = &inter_cnts->mv_joint;
+ vp9_ctx->inter_cnts.sign = &inter_cnts->sign;
+ /*
+ * rk hardware actually uses "u32 classes[2][11 + 1];"
+ * instead of "u32 classes[2][11];", so this must be explicitly
+ * copied into vp9_ctx->classes when passing the data to the
+ * vp9 library function
+ */
+ vp9_ctx->inter_cnts.class0 = &inter_cnts->class0;
+ vp9_ctx->inter_cnts.bits = &inter_cnts->bits;
+ vp9_ctx->inter_cnts.class0_fp = &inter_cnts->class0_fp;
+ vp9_ctx->inter_cnts.fp = &inter_cnts->fp;
+ vp9_ctx->inter_cnts.class0_hp = &inter_cnts->class0_hp;
+ vp9_ctx->inter_cnts.hp = &inter_cnts->hp;
+
+#define INNERMOST_LOOP \
+ do { \
+ for (m = 0; m < ARRAY_SIZE(vp9_ctx->inter_cnts.coeff[0][0][0][0]); ++m) {\
+ vp9_ctx->inter_cnts.coeff[i][j][k][l][m] = \
+ &inter_cnts->ref_cnt[k][i][j][l][m].coeff; \
+ vp9_ctx->inter_cnts.eob[i][j][k][l][m][0] = \
+ &inter_cnts->ref_cnt[k][i][j][l][m].eob[0]; \
+ vp9_ctx->inter_cnts.eob[i][j][k][l][m][1] = \
+ &inter_cnts->ref_cnt[k][i][j][l][m].eob[1]; \
+ \
+ vp9_ctx->intra_cnts.coeff[i][j][k][l][m] = \
+ &intra_cnts->ref_cnt[k][i][j][l][m].coeff; \
+ vp9_ctx->intra_cnts.eob[i][j][k][l][m][0] = \
+ &intra_cnts->ref_cnt[k][i][j][l][m].eob[0]; \
+ vp9_ctx->intra_cnts.eob[i][j][k][l][m][1] = \
+ &intra_cnts->ref_cnt[k][i][j][l][m].eob[1]; \
+ } \
+ } while (0)
+
+ for (i = 0; i < ARRAY_SIZE(vp9_ctx->inter_cnts.coeff); ++i)
+ for (j = 0; j < ARRAY_SIZE(vp9_ctx->inter_cnts.coeff[0]); ++j)
+ for (k = 0; k < ARRAY_SIZE(vp9_ctx->inter_cnts.coeff[0][0]); ++k)
+ for (l = 0; l < ARRAY_SIZE(vp9_ctx->inter_cnts.coeff[0][0][0]); ++l)
+ INNERMOST_LOOP;
+#undef INNERMOST_LOOP
+}
+
+static int rkvdec_vp9_start(struct rkvdec_ctx *ctx)
+{
+ struct rkvdec_dev *rkvdec = ctx->dev;
+ struct rkvdec_vp9_priv_tbl *priv_tbl;
+ struct rkvdec_vp9_ctx *vp9_ctx;
+ unsigned char *count_tbl;
+ int ret;
+
+ vp9_ctx = kzalloc(sizeof(*vp9_ctx), GFP_KERNEL);
+ if (!vp9_ctx)
+ return -ENOMEM;
+
+ ctx->priv = vp9_ctx;
+
+ priv_tbl = dma_alloc_coherent(rkvdec->dev, sizeof(*priv_tbl),
+ &vp9_ctx->priv_tbl.dma, GFP_KERNEL);
+ if (!priv_tbl) {
+ ret = -ENOMEM;
+ goto err_free_ctx;
+ }
+
+ vp9_ctx->priv_tbl.size = sizeof(*priv_tbl);
+ vp9_ctx->priv_tbl.cpu = priv_tbl;
+ memset(priv_tbl, 0, sizeof(*priv_tbl));
+
+ count_tbl = dma_alloc_coherent(rkvdec->dev, RKVDEC_VP9_COUNT_SIZE,
+ &vp9_ctx->count_tbl.dma, GFP_KERNEL);
+ if (!count_tbl) {
+ ret = -ENOMEM;
+ goto err_free_priv_tbl;
+ }
+
+ vp9_ctx->count_tbl.size = RKVDEC_VP9_COUNT_SIZE;
+ vp9_ctx->count_tbl.cpu = count_tbl;
+ memset(count_tbl, 0, sizeof(*count_tbl));
+ rkvdec_init_v4l2_vp9_count_tbl(ctx);
+
+ return 0;
+
+err_free_priv_tbl:
+ dma_free_coherent(rkvdec->dev, vp9_ctx->priv_tbl.size,
+ vp9_ctx->priv_tbl.cpu, vp9_ctx->priv_tbl.dma);
+
+err_free_ctx:
+ kfree(vp9_ctx);
+ return ret;
+}
+
+static void rkvdec_vp9_stop(struct rkvdec_ctx *ctx)
+{
+ struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
+ struct rkvdec_dev *rkvdec = ctx->dev;
+
+ dma_free_coherent(rkvdec->dev, vp9_ctx->count_tbl.size,
+ vp9_ctx->count_tbl.cpu, vp9_ctx->count_tbl.dma);
+ dma_free_coherent(rkvdec->dev, vp9_ctx->priv_tbl.size,
+ vp9_ctx->priv_tbl.cpu, vp9_ctx->priv_tbl.dma);
+ kfree(vp9_ctx);
+}
+
+static int rkvdec_vp9_adjust_fmt(struct rkvdec_ctx *ctx,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *fmt = &f->fmt.pix_mp;
+
+ fmt->num_planes = 1;
+ if (!fmt->plane_fmt[0].sizeimage)
+ fmt->plane_fmt[0].sizeimage = fmt->width * fmt->height * 2;
+ return 0;
+}
+
+const struct rkvdec_coded_fmt_ops rkvdec_vp9_fmt_ops = {
+ .adjust_fmt = rkvdec_vp9_adjust_fmt,
+ .start = rkvdec_vp9_start,
+ .stop = rkvdec_vp9_stop,
+ .run = rkvdec_vp9_run,
+ .done = rkvdec_vp9_done,
+};
diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
index 4fd4a2907da7..c0cf3488f970 100644
--- a/drivers/staging/media/rkvdec/rkvdec.c
+++ b/drivers/staging/media/rkvdec/rkvdec.c
@@ -99,10 +99,30 @@ static const struct rkvdec_ctrls rkvdec_h264_ctrls = {
.num_ctrls = ARRAY_SIZE(rkvdec_h264_ctrl_descs),
};
-static const u32 rkvdec_h264_decoded_fmts[] = {
+static const u32 rkvdec_h264_vp9_decoded_fmts[] = {
V4L2_PIX_FMT_NV12,
};
+static const struct rkvdec_ctrl_desc rkvdec_vp9_ctrl_descs[] = {
+ {
+ .cfg.id = V4L2_CID_STATELESS_VP9_FRAME,
+ },
+ {
+ .cfg.id = V4L2_CID_STATELESS_VP9_COMPRESSED_HDR,
+ },
+ {
+ .cfg.id = V4L2_CID_MPEG_VIDEO_VP9_PROFILE,
+ .cfg.min = V4L2_MPEG_VIDEO_VP9_PROFILE_0,
+ .cfg.max = V4L2_MPEG_VIDEO_VP9_PROFILE_0,
+ .cfg.def = V4L2_MPEG_VIDEO_VP9_PROFILE_0,
+ },
+};
+
+static const struct rkvdec_ctrls rkvdec_vp9_ctrls = {
+ .ctrls = rkvdec_vp9_ctrl_descs,
+ .num_ctrls = ARRAY_SIZE(rkvdec_vp9_ctrl_descs),
+};
+
static const struct rkvdec_coded_fmt_desc rkvdec_coded_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_H264_SLICE,
@@ -116,8 +136,23 @@ static const struct rkvdec_coded_fmt_desc rkvdec_coded_fmts[] = {
},
.ctrls = &rkvdec_h264_ctrls,
.ops = &rkvdec_h264_fmt_ops,
- .num_decoded_fmts = ARRAY_SIZE(rkvdec_h264_decoded_fmts),
- .decoded_fmts = rkvdec_h264_decoded_fmts,
+ .num_decoded_fmts = ARRAY_SIZE(rkvdec_h264_vp9_decoded_fmts),
+ .decoded_fmts = rkvdec_h264_vp9_decoded_fmts,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP9_FRAME,
+ .frmsize = {
+ .min_width = 64,
+ .max_width = 4096,
+ .step_width = 64,
+ .min_height = 64,
+ .max_height = 2304,
+ .step_height = 64,
+ },
+ .ctrls = &rkvdec_vp9_ctrls,
+ .ops = &rkvdec_vp9_fmt_ops,
+ .num_decoded_fmts = ARRAY_SIZE(rkvdec_h264_vp9_decoded_fmts),
+ .decoded_fmts = rkvdec_h264_vp9_decoded_fmts,
}
};
@@ -677,7 +712,7 @@ static void rkvdec_device_run(void *priv)
rkvdec_job_finish(ctx, VB2_BUF_STATE_ERROR);
}
-static struct v4l2_m2m_ops rkvdec_m2m_ops = {
+static const struct v4l2_m2m_ops rkvdec_m2m_ops = {
.device_run = rkvdec_device_run,
};
diff --git a/drivers/staging/media/rkvdec/rkvdec.h b/drivers/staging/media/rkvdec/rkvdec.h
index 52ac3874c5e5..2f4ea1786b93 100644
--- a/drivers/staging/media/rkvdec/rkvdec.h
+++ b/drivers/staging/media/rkvdec/rkvdec.h
@@ -42,14 +42,18 @@ struct rkvdec_run {
struct rkvdec_vp9_decoded_buffer_info {
/* Info needed when the decoded frame serves as a reference frame. */
- u16 width;
- u16 height;
- u32 bit_depth : 4;
+ unsigned short width;
+ unsigned short height;
+ unsigned int bit_depth : 4;
};
struct rkvdec_decoded_buffer {
/* Must be the first field in this struct. */
struct v4l2_m2m_buffer base;
+
+ union {
+ struct rkvdec_vp9_decoded_buffer_info vp9;
+ };
};
static inline struct rkvdec_decoded_buffer *
@@ -116,4 +120,6 @@ void rkvdec_run_preamble(struct rkvdec_ctx *ctx, struct rkvdec_run *run);
void rkvdec_run_postamble(struct rkvdec_ctx *ctx, struct rkvdec_run *run);
extern const struct rkvdec_coded_fmt_ops rkvdec_h264_fmt_ops;
+extern const struct rkvdec_coded_fmt_ops rkvdec_vp9_fmt_ops;
+
#endif /* RKVDEC_H_ */
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index c76fc97d97a0..4a4b714b0f26 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -580,6 +580,14 @@ static const struct cedrus_variant sun8i_r40_cedrus_variant = {
.mod_rate = 297000000,
};
+static const struct cedrus_variant sun20i_d1_cedrus_variant = {
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_MPEG2_DEC |
+ CEDRUS_CAPABILITY_H264_DEC |
+ CEDRUS_CAPABILITY_H265_DEC,
+ .mod_rate = 432000000,
+};
+
static const struct cedrus_variant sun50i_a64_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_UNTILED |
CEDRUS_CAPABILITY_MPEG2_DEC |
@@ -638,6 +646,10 @@ static const struct of_device_id cedrus_dt_match[] = {
.data = &sun8i_r40_cedrus_variant,
},
{
+ .compatible = "allwinner,sun20i-d1-video-engine",
+ .data = &sun20i_d1_cedrus_variant,
+ },
+ {
.compatible = "allwinner,sun50i-a64-video-engine",
.data = &sun50i_a64_cedrus_variant,
},
diff --git a/drivers/staging/media/tegra-vde/vde.c b/drivers/staging/media/tegra-vde/vde.c
index ed4c1250b303..a8f1a024c343 100644
--- a/drivers/staging/media/tegra-vde/vde.c
+++ b/drivers/staging/media/tegra-vde/vde.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <soc/tegra/common.h>
#include <soc/tegra/pmc.h>
#include "uapi.h"
@@ -84,6 +85,96 @@ static int tegra_vde_wait_mbe(struct tegra_vde *vde)
(tmp >= 0x10), 1, 100);
}
+static int tegra_vde_alloc_bo(struct tegra_vde *vde,
+ struct tegra_vde_bo **ret_bo,
+ enum dma_data_direction dma_dir,
+ size_t size)
+{
+ struct device *dev = vde->miscdev.parent;
+ struct tegra_vde_bo *bo;
+ int err;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return -ENOMEM;
+
+ bo->vde = vde;
+ bo->size = size;
+ bo->dma_dir = dma_dir;
+ bo->dma_attrs = DMA_ATTR_WRITE_COMBINE |
+ DMA_ATTR_NO_KERNEL_MAPPING;
+
+ if (!vde->domain)
+ bo->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
+
+ bo->dma_cookie = dma_alloc_attrs(dev, bo->size, &bo->dma_handle,
+ GFP_KERNEL, bo->dma_attrs);
+ if (!bo->dma_cookie) {
+ dev_err(dev, "Failed to allocate DMA buffer of size: %zu\n",
+ bo->size);
+ err = -ENOMEM;
+ goto free_bo;
+ }
+
+ err = dma_get_sgtable_attrs(dev, &bo->sgt, bo->dma_cookie,
+ bo->dma_handle, bo->size, bo->dma_attrs);
+ if (err) {
+ dev_err(dev, "Failed to get DMA buffer SG table: %d\n", err);
+ goto free_attrs;
+ }
+
+ err = dma_map_sgtable(dev, &bo->sgt, bo->dma_dir, bo->dma_attrs);
+ if (err) {
+ dev_err(dev, "Failed to map DMA buffer SG table: %d\n", err);
+ goto free_table;
+ }
+
+ if (vde->domain) {
+ err = tegra_vde_iommu_map(vde, &bo->sgt, &bo->iova, bo->size);
+ if (err) {
+ dev_err(dev, "Failed to map DMA buffer IOVA: %d\n", err);
+ goto unmap_sgtable;
+ }
+
+ bo->dma_addr = iova_dma_addr(&vde->iova, bo->iova);
+ } else {
+ bo->dma_addr = sg_dma_address(bo->sgt.sgl);
+ }
+
+ *ret_bo = bo;
+
+ return 0;
+
+unmap_sgtable:
+ dma_unmap_sgtable(dev, &bo->sgt, bo->dma_dir, bo->dma_attrs);
+free_table:
+ sg_free_table(&bo->sgt);
+free_attrs:
+ dma_free_attrs(dev, bo->size, bo->dma_cookie, bo->dma_handle,
+ bo->dma_attrs);
+free_bo:
+ kfree(bo);
+
+ return err;
+}
+
+static void tegra_vde_free_bo(struct tegra_vde_bo *bo)
+{
+ struct tegra_vde *vde = bo->vde;
+ struct device *dev = vde->miscdev.parent;
+
+ if (vde->domain)
+ tegra_vde_iommu_unmap(vde, bo->iova);
+
+ dma_unmap_sgtable(dev, &bo->sgt, bo->dma_dir, bo->dma_attrs);
+
+ sg_free_table(&bo->sgt);
+
+ dma_free_attrs(dev, bo->size, bo->dma_cookie, bo->dma_handle,
+ bo->dma_attrs);
+ kfree(bo);
+}
+
static int tegra_vde_setup_mbe_frame_idx(struct tegra_vde *vde,
unsigned int refs_nb,
bool setup_refs)
@@ -249,7 +340,7 @@ static void tegra_vde_setup_iram_tables(struct tegra_vde *vde,
value |= frame->frame_num;
} else {
aux_addr = 0x6ADEAD00;
- value = 0;
+ value = 0x3f;
}
tegra_vde_setup_iram_entry(vde, 0, i, value, aux_addr);
@@ -424,6 +515,9 @@ static int tegra_vde_setup_hw_context(struct tegra_vde *vde,
tegra_vde_writel(vde, bitstream_data_addr, vde->sxe, 0x6C);
+ if (vde->soc->supports_ref_pic_marking)
+ tegra_vde_writel(vde, vde->secure_bo->dma_addr, vde->sxe, 0x7c);
+
value = 0x10000005;
value |= ctx->pic_width_in_mbs << 11;
value |= ctx->pic_height_in_mbs << 3;
@@ -920,13 +1014,17 @@ static __maybe_unused int tegra_vde_runtime_suspend(struct device *dev)
struct tegra_vde *vde = dev_get_drvdata(dev);
int err;
- err = tegra_powergate_power_off(TEGRA_POWERGATE_VDEC);
- if (err) {
- dev_err(dev, "Failed to power down HW: %d\n", err);
- return err;
+ if (!dev->pm_domain) {
+ err = tegra_powergate_power_off(TEGRA_POWERGATE_VDEC);
+ if (err) {
+ dev_err(dev, "Failed to power down HW: %d\n", err);
+ return err;
+ }
}
clk_disable_unprepare(vde->clk);
+ reset_control_release(vde->rst);
+ reset_control_release(vde->rst_mc);
return 0;
}
@@ -936,14 +1034,45 @@ static __maybe_unused int tegra_vde_runtime_resume(struct device *dev)
struct tegra_vde *vde = dev_get_drvdata(dev);
int err;
- err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_VDEC,
- vde->clk, vde->rst);
+ err = reset_control_acquire(vde->rst_mc);
if (err) {
- dev_err(dev, "Failed to power up HW : %d\n", err);
+ dev_err(dev, "Failed to acquire mc reset: %d\n", err);
return err;
}
+ err = reset_control_acquire(vde->rst);
+ if (err) {
+ dev_err(dev, "Failed to acquire reset: %d\n", err);
+ goto release_mc_reset;
+ }
+
+ if (!dev->pm_domain) {
+ err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_VDEC,
+ vde->clk, vde->rst);
+ if (err) {
+ dev_err(dev, "Failed to power up HW : %d\n", err);
+ goto release_reset;
+ }
+ } else {
+ /*
+ * tegra_powergate_sequence_power_up() leaves clocks enabled,
+ * while GENPD not.
+ */
+ err = clk_prepare_enable(vde->clk);
+ if (err) {
+ dev_err(dev, "Failed to enable clock: %d\n", err);
+ goto release_reset;
+ }
+ }
+
return 0;
+
+release_reset:
+ reset_control_release(vde->rst);
+release_mc_reset:
+ reset_control_release(vde->rst_mc);
+
+ return err;
}
static int tegra_vde_probe(struct platform_device *pdev)
@@ -958,6 +1087,8 @@ static int tegra_vde_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, vde);
+ vde->soc = of_device_get_match_data(&pdev->dev);
+
vde->sxe = devm_platform_ioremap_resource_byname(pdev, "sxe");
if (IS_ERR(vde->sxe))
return PTR_ERR(vde->sxe);
@@ -1001,14 +1132,14 @@ static int tegra_vde_probe(struct platform_device *pdev)
return err;
}
- vde->rst = devm_reset_control_get(dev, NULL);
+ vde->rst = devm_reset_control_get_exclusive_released(dev, NULL);
if (IS_ERR(vde->rst)) {
err = PTR_ERR(vde->rst);
dev_err(dev, "Could not get VDE reset %d\n", err);
return err;
}
- vde->rst_mc = devm_reset_control_get_optional(dev, "mc");
+ vde->rst_mc = devm_reset_control_get_optional_exclusive_released(dev, "mc");
if (IS_ERR(vde->rst_mc)) {
err = PTR_ERR(vde->rst_mc);
dev_err(dev, "Could not get MC reset %d\n", err);
@@ -1026,6 +1157,12 @@ static int tegra_vde_probe(struct platform_device *pdev)
return err;
}
+ err = devm_tegra_core_dev_init_opp_table_common(dev);
+ if (err) {
+ dev_err(dev, "Could initialize OPP table %d\n", err);
+ return err;
+ }
+
vde->iram_pool = of_gen_pool_get(dev->of_node, "iram", 0);
if (!vde->iram_pool) {
dev_err(dev, "Could not get IRAM pool\n");
@@ -1056,12 +1193,6 @@ static int tegra_vde_probe(struct platform_device *pdev)
goto err_gen_free;
}
- err = misc_register(&vde->miscdev);
- if (err) {
- dev_err(dev, "Failed to register misc device: %d\n", err);
- goto err_deinit_iommu;
- }
-
pm_runtime_enable(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 300);
@@ -1077,15 +1208,26 @@ static int tegra_vde_probe(struct platform_device *pdev)
pm_runtime_put(dev);
+ err = tegra_vde_alloc_bo(vde, &vde->secure_bo, DMA_FROM_DEVICE, 4096);
+ if (err) {
+ dev_err(dev, "Failed to allocate secure BO: %d\n", err);
+ goto err_pm_runtime;
+ }
+
+ err = misc_register(&vde->miscdev);
+ if (err) {
+ dev_err(dev, "Failed to register misc device: %d\n", err);
+ goto err_free_secure_bo;
+ }
+
return 0;
+err_free_secure_bo:
+ tegra_vde_free_bo(vde->secure_bo);
err_pm_runtime:
- misc_deregister(&vde->miscdev);
-
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
-err_deinit_iommu:
tegra_vde_iommu_deinit(vde);
err_gen_free:
@@ -1100,6 +1242,10 @@ static int tegra_vde_remove(struct platform_device *pdev)
struct tegra_vde *vde = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
+ misc_deregister(&vde->miscdev);
+
+ tegra_vde_free_bo(vde->secure_bo);
+
/*
* As it increments RPM usage_count even on errors, we don't need to
* check the returned code here.
@@ -1116,8 +1262,6 @@ static int tegra_vde_remove(struct platform_device *pdev)
pm_runtime_put_noidle(dev);
clk_disable_unprepare(vde->clk);
- misc_deregister(&vde->miscdev);
-
tegra_vde_dmabuf_cache_unmap_all(vde);
tegra_vde_iommu_deinit(vde);
@@ -1133,8 +1277,7 @@ static void tegra_vde_shutdown(struct platform_device *pdev)
* On some devices bootloader isn't ready to a power-gated VDE on
* a warm-reboot, machine will hang in that case.
*/
- if (pm_runtime_status_suspended(&pdev->dev))
- tegra_vde_runtime_resume(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
}
static __maybe_unused int tegra_vde_pm_suspend(struct device *dev)
@@ -1173,8 +1316,27 @@ static const struct dev_pm_ops tegra_vde_pm_ops = {
tegra_vde_pm_resume)
};
+static const struct tegra_vde_soc tegra124_vde_soc = {
+ .supports_ref_pic_marking = true,
+};
+
+static const struct tegra_vde_soc tegra114_vde_soc = {
+ .supports_ref_pic_marking = true,
+};
+
+static const struct tegra_vde_soc tegra30_vde_soc = {
+ .supports_ref_pic_marking = false,
+};
+
+static const struct tegra_vde_soc tegra20_vde_soc = {
+ .supports_ref_pic_marking = false,
+};
+
static const struct of_device_id tegra_vde_of_match[] = {
- { .compatible = "nvidia,tegra20-vde", },
+ { .compatible = "nvidia,tegra124-vde", .data = &tegra124_vde_soc },
+ { .compatible = "nvidia,tegra114-vde", .data = &tegra114_vde_soc },
+ { .compatible = "nvidia,tegra30-vde", .data = &tegra30_vde_soc },
+ { .compatible = "nvidia,tegra20-vde", .data = &tegra20_vde_soc },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_vde_of_match);
diff --git a/drivers/staging/media/tegra-vde/vde.h b/drivers/staging/media/tegra-vde/vde.h
index 5561291b0c88..bbd42b8d9991 100644
--- a/drivers/staging/media/tegra-vde/vde.h
+++ b/drivers/staging/media/tegra-vde/vde.h
@@ -24,6 +24,22 @@ struct iommu_domain;
struct reset_control;
struct dma_buf_attachment;
+struct tegra_vde_soc {
+ bool supports_ref_pic_marking;
+};
+
+struct tegra_vde_bo {
+ struct iova *iova;
+ struct sg_table sgt;
+ struct tegra_vde *vde;
+ enum dma_data_direction dma_dir;
+ unsigned long dma_attrs;
+ dma_addr_t dma_handle;
+ dma_addr_t dma_addr;
+ void *dma_cookie;
+ size_t size;
+};
+
struct tegra_vde {
void __iomem *sxe;
void __iomem *bsev;
@@ -48,6 +64,8 @@ struct tegra_vde {
struct iova_domain iova;
struct iova *iova_resv_static_addresses;
struct iova *iova_resv_last_page;
+ const struct tegra_vde_soc *soc;
+ struct tegra_vde_bo *secure_bo;
dma_addr_t iram_lists_addr;
u32 *iram;
};
diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
index 69d9787d5338..d1f43f465c22 100644
--- a/drivers/staging/media/tegra-video/vi.c
+++ b/drivers/staging/media/tegra-video/vi.c
@@ -1845,7 +1845,6 @@ static int tegra_vi_graph_init(struct tegra_vi *vi)
struct tegra_vi_channel *chan;
struct fwnode_handle *fwnode = dev_fwnode(vi->dev);
int ret;
- struct fwnode_handle *remote = NULL;
/*
* Walk the links to parse the full graph. Each channel will have
@@ -1857,11 +1856,16 @@ static int tegra_vi_graph_init(struct tegra_vi *vi)
* next channels.
*/
list_for_each_entry(chan, &vi->vi_chans, list) {
- remote = fwnode_graph_get_remote_node(fwnode, chan->portnos[0],
- 0);
- if (!remote)
+ struct fwnode_handle *ep, *remote;
+
+ ep = fwnode_graph_get_endpoint_by_id(fwnode,
+ chan->portnos[0], 0, 0);
+ if (!ep)
continue;
+ remote = fwnode_graph_get_remote_port_parent(ep);
+ fwnode_handle_put(ep);
+
ret = tegra_vi_graph_parse_one(chan, remote);
fwnode_handle_put(remote);
if (ret < 0 || list_empty(&chan->notifier.asd_list))
diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c
index bd102329d8c8..29f8ce2a47f5 100644
--- a/drivers/staging/most/dim2/dim2.c
+++ b/drivers/staging/most/dim2/dim2.c
@@ -971,7 +971,7 @@ static void fsl_mx6_disable(struct platform_device *pdev)
clk_disable_unprepare(dev->clk);
}
-static int rcar_h2_enable(struct platform_device *pdev)
+static int rcar_gen2_enable(struct platform_device *pdev)
{
struct dim2_hdm *dev = platform_get_drvdata(pdev);
int ret;
@@ -1006,7 +1006,7 @@ static int rcar_h2_enable(struct platform_device *pdev)
return 0;
}
-static void rcar_h2_disable(struct platform_device *pdev)
+static void rcar_gen2_disable(struct platform_device *pdev)
{
struct dim2_hdm *dev = platform_get_drvdata(pdev);
@@ -1016,7 +1016,7 @@ static void rcar_h2_disable(struct platform_device *pdev)
writel(0x0, dev->io_base + 0x600);
}
-static int rcar_m3_enable(struct platform_device *pdev)
+static int rcar_gen3_enable(struct platform_device *pdev)
{
struct dim2_hdm *dev = platform_get_drvdata(pdev);
u32 enable_512fs = dev->clk_speed == CLK_512FS;
@@ -1046,7 +1046,7 @@ static int rcar_m3_enable(struct platform_device *pdev)
return 0;
}
-static void rcar_m3_disable(struct platform_device *pdev)
+static void rcar_gen3_disable(struct platform_device *pdev)
{
struct dim2_hdm *dev = platform_get_drvdata(pdev);
@@ -1058,20 +1058,20 @@ static void rcar_m3_disable(struct platform_device *pdev)
/* ]] platform specific functions */
-enum dim2_platforms { FSL_MX6, RCAR_H2, RCAR_M3 };
+enum dim2_platforms { FSL_MX6, RCAR_GEN2, RCAR_GEN3 };
static struct dim2_platform_data plat_data[] = {
[FSL_MX6] = {
.enable = fsl_mx6_enable,
.disable = fsl_mx6_disable,
},
- [RCAR_H2] = {
- .enable = rcar_h2_enable,
- .disable = rcar_h2_disable,
+ [RCAR_GEN2] = {
+ .enable = rcar_gen2_enable,
+ .disable = rcar_gen2_disable,
},
- [RCAR_M3] = {
- .enable = rcar_m3_enable,
- .disable = rcar_m3_disable,
+ [RCAR_GEN3] = {
+ .enable = rcar_gen3_enable,
+ .disable = rcar_gen3_disable,
.fcnt = 3,
},
};
@@ -1083,11 +1083,11 @@ static const struct of_device_id dim2_of_match[] = {
},
{
.compatible = "renesas,mlp",
- .data = plat_data + RCAR_H2
+ .data = plat_data + RCAR_GEN2
},
{
- .compatible = "rcar,medialb-dim2",
- .data = plat_data + RCAR_M3
+ .compatible = "renesas,rcar-gen3-mlp",
+ .data = plat_data + RCAR_GEN3
},
{
.compatible = "xlnx,axi4-os62420_3pin-1.00.a",
diff --git a/drivers/staging/mt7621-dma/Kconfig b/drivers/staging/mt7621-dma/Kconfig
deleted file mode 100644
index 54a110288f92..000000000000
--- a/drivers/staging/mt7621-dma/Kconfig
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config MTK_HSDMA
- tristate "MTK HSDMA support"
- depends on RALINK && SOC_MT7621
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
-
diff --git a/drivers/staging/mt7621-dma/Makefile b/drivers/staging/mt7621-dma/Makefile
deleted file mode 100644
index 23256d1286f3..000000000000
--- a/drivers/staging/mt7621-dma/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_MTK_HSDMA) += hsdma-mt7621.o
-
-ccflags-y += -I$(srctree)/drivers/dma
diff --git a/drivers/staging/mt7621-dma/TODO b/drivers/staging/mt7621-dma/TODO
deleted file mode 100644
index fdbc5002c32a..000000000000
--- a/drivers/staging/mt7621-dma/TODO
+++ /dev/null
@@ -1,5 +0,0 @@
-
-- general code review and clean up
-- ensure device-tree requirements are documented
-
-Cc: NeilBrown <neil@brown.name>
diff --git a/drivers/staging/mt7621-dma/hsdma-mt7621.c b/drivers/staging/mt7621-dma/hsdma-mt7621.c
deleted file mode 100644
index 1424d01d434b..000000000000
--- a/drivers/staging/mt7621-dma/hsdma-mt7621.c
+++ /dev/null
@@ -1,758 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2015, Michael Lee <igvtee@gmail.com>
- * MTK HSDMA support
- */
-
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/irq.h>
-#include <linux/of_dma.h>
-#include <linux/reset.h>
-#include <linux/of_device.h>
-
-#include "virt-dma.h"
-
-#define HSDMA_BASE_OFFSET 0x800
-
-#define HSDMA_REG_TX_BASE 0x00
-#define HSDMA_REG_TX_CNT 0x04
-#define HSDMA_REG_TX_CTX 0x08
-#define HSDMA_REG_TX_DTX 0x0c
-#define HSDMA_REG_RX_BASE 0x100
-#define HSDMA_REG_RX_CNT 0x104
-#define HSDMA_REG_RX_CRX 0x108
-#define HSDMA_REG_RX_DRX 0x10c
-#define HSDMA_REG_INFO 0x200
-#define HSDMA_REG_GLO_CFG 0x204
-#define HSDMA_REG_RST_CFG 0x208
-#define HSDMA_REG_DELAY_INT 0x20c
-#define HSDMA_REG_FREEQ_THRES 0x210
-#define HSDMA_REG_INT_STATUS 0x220
-#define HSDMA_REG_INT_MASK 0x228
-#define HSDMA_REG_SCH_Q01 0x280
-#define HSDMA_REG_SCH_Q23 0x284
-
-#define HSDMA_DESCS_MAX 0xfff
-#define HSDMA_DESCS_NUM 8
-#define HSDMA_DESCS_MASK (HSDMA_DESCS_NUM - 1)
-#define HSDMA_NEXT_DESC(x) (((x) + 1) & HSDMA_DESCS_MASK)
-
-/* HSDMA_REG_INFO */
-#define HSDMA_INFO_INDEX_MASK 0xf
-#define HSDMA_INFO_INDEX_SHIFT 24
-#define HSDMA_INFO_BASE_MASK 0xff
-#define HSDMA_INFO_BASE_SHIFT 16
-#define HSDMA_INFO_RX_MASK 0xff
-#define HSDMA_INFO_RX_SHIFT 8
-#define HSDMA_INFO_TX_MASK 0xff
-#define HSDMA_INFO_TX_SHIFT 0
-
-/* HSDMA_REG_GLO_CFG */
-#define HSDMA_GLO_TX_2B_OFFSET BIT(31)
-#define HSDMA_GLO_CLK_GATE BIT(30)
-#define HSDMA_GLO_BYTE_SWAP BIT(29)
-#define HSDMA_GLO_MULTI_DMA BIT(10)
-#define HSDMA_GLO_TWO_BUF BIT(9)
-#define HSDMA_GLO_32B_DESC BIT(8)
-#define HSDMA_GLO_BIG_ENDIAN BIT(7)
-#define HSDMA_GLO_TX_DONE BIT(6)
-#define HSDMA_GLO_BT_MASK 0x3
-#define HSDMA_GLO_BT_SHIFT 4
-#define HSDMA_GLO_RX_BUSY BIT(3)
-#define HSDMA_GLO_RX_DMA BIT(2)
-#define HSDMA_GLO_TX_BUSY BIT(1)
-#define HSDMA_GLO_TX_DMA BIT(0)
-
-#define HSDMA_BT_SIZE_16BYTES (0 << HSDMA_GLO_BT_SHIFT)
-#define HSDMA_BT_SIZE_32BYTES (1 << HSDMA_GLO_BT_SHIFT)
-#define HSDMA_BT_SIZE_64BYTES (2 << HSDMA_GLO_BT_SHIFT)
-#define HSDMA_BT_SIZE_128BYTES (3 << HSDMA_GLO_BT_SHIFT)
-
-#define HSDMA_GLO_DEFAULT (HSDMA_GLO_MULTI_DMA | \
- HSDMA_GLO_RX_DMA | HSDMA_GLO_TX_DMA | HSDMA_BT_SIZE_32BYTES)
-
-/* HSDMA_REG_RST_CFG */
-#define HSDMA_RST_RX_SHIFT 16
-#define HSDMA_RST_TX_SHIFT 0
-
-/* HSDMA_REG_DELAY_INT */
-#define HSDMA_DELAY_INT_EN BIT(15)
-#define HSDMA_DELAY_PEND_OFFSET 8
-#define HSDMA_DELAY_TIME_OFFSET 0
-#define HSDMA_DELAY_TX_OFFSET 16
-#define HSDMA_DELAY_RX_OFFSET 0
-
-#define HSDMA_DELAY_INIT(x) (HSDMA_DELAY_INT_EN | \
- ((x) << HSDMA_DELAY_PEND_OFFSET))
-#define HSDMA_DELAY(x) ((HSDMA_DELAY_INIT(x) << \
- HSDMA_DELAY_TX_OFFSET) | HSDMA_DELAY_INIT(x))
-
-/* HSDMA_REG_INT_STATUS */
-#define HSDMA_INT_DELAY_RX_COH BIT(31)
-#define HSDMA_INT_DELAY_RX_INT BIT(30)
-#define HSDMA_INT_DELAY_TX_COH BIT(29)
-#define HSDMA_INT_DELAY_TX_INT BIT(28)
-#define HSDMA_INT_RX_MASK 0x3
-#define HSDMA_INT_RX_SHIFT 16
-#define HSDMA_INT_RX_Q0 BIT(16)
-#define HSDMA_INT_TX_MASK 0xf
-#define HSDMA_INT_TX_SHIFT 0
-#define HSDMA_INT_TX_Q0 BIT(0)
-
-/* tx/rx dma desc flags */
-#define HSDMA_PLEN_MASK 0x3fff
-#define HSDMA_DESC_DONE BIT(31)
-#define HSDMA_DESC_LS0 BIT(30)
-#define HSDMA_DESC_PLEN0(_x) (((_x) & HSDMA_PLEN_MASK) << 16)
-#define HSDMA_DESC_TAG BIT(15)
-#define HSDMA_DESC_LS1 BIT(14)
-#define HSDMA_DESC_PLEN1(_x) ((_x) & HSDMA_PLEN_MASK)
-
-/* align 4 bytes */
-#define HSDMA_ALIGN_SIZE 3
-/* align size 128bytes */
-#define HSDMA_MAX_PLEN 0x3f80
-
-struct hsdma_desc {
- u32 addr0;
- u32 flags;
- u32 addr1;
- u32 unused;
-};
-
-struct mtk_hsdma_sg {
- dma_addr_t src_addr;
- dma_addr_t dst_addr;
- u32 len;
-};
-
-struct mtk_hsdma_desc {
- struct virt_dma_desc vdesc;
- unsigned int num_sgs;
- struct mtk_hsdma_sg sg[1];
-};
-
-struct mtk_hsdma_chan {
- struct virt_dma_chan vchan;
- unsigned int id;
- dma_addr_t desc_addr;
- int tx_idx;
- int rx_idx;
- struct hsdma_desc *tx_ring;
- struct hsdma_desc *rx_ring;
- struct mtk_hsdma_desc *desc;
- unsigned int next_sg;
-};
-
-struct mtk_hsdam_engine {
- struct dma_device ddev;
- struct device_dma_parameters dma_parms;
- void __iomem *base;
- struct tasklet_struct task;
- volatile unsigned long chan_issued;
-
- struct mtk_hsdma_chan chan[1];
-};
-
-static inline struct mtk_hsdam_engine *mtk_hsdma_chan_get_dev(struct mtk_hsdma_chan *chan)
-{
- return container_of(chan->vchan.chan.device, struct mtk_hsdam_engine,
- ddev);
-}
-
-static inline struct mtk_hsdma_chan *to_mtk_hsdma_chan(struct dma_chan *c)
-{
- return container_of(c, struct mtk_hsdma_chan, vchan.chan);
-}
-
-static inline struct mtk_hsdma_desc *to_mtk_hsdma_desc(struct virt_dma_desc *vdesc)
-{
- return container_of(vdesc, struct mtk_hsdma_desc, vdesc);
-}
-
-static inline u32 mtk_hsdma_read(struct mtk_hsdam_engine *hsdma, u32 reg)
-{
- return readl(hsdma->base + reg);
-}
-
-static inline void mtk_hsdma_write(struct mtk_hsdam_engine *hsdma,
- unsigned int reg, u32 val)
-{
- writel(val, hsdma->base + reg);
-}
-
-static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine *hsdma,
- struct mtk_hsdma_chan *chan)
-{
- chan->tx_idx = 0;
- chan->rx_idx = HSDMA_DESCS_NUM - 1;
-
- mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
- mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
-
- mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
- 0x1 << (chan->id + HSDMA_RST_TX_SHIFT));
- mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
- 0x1 << (chan->id + HSDMA_RST_RX_SHIFT));
-}
-
-static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma)
-{
- dev_dbg(hsdma->ddev.dev,
- "tbase %08x, tcnt %08x, tctx %08x, tdtx: %08x, rbase %08x, rcnt %08x, rctx %08x, rdtx %08x\n",
- mtk_hsdma_read(hsdma, HSDMA_REG_TX_BASE),
- mtk_hsdma_read(hsdma, HSDMA_REG_TX_CNT),
- mtk_hsdma_read(hsdma, HSDMA_REG_TX_CTX),
- mtk_hsdma_read(hsdma, HSDMA_REG_TX_DTX),
- mtk_hsdma_read(hsdma, HSDMA_REG_RX_BASE),
- mtk_hsdma_read(hsdma, HSDMA_REG_RX_CNT),
- mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX),
- mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX));
-
- dev_dbg(hsdma->ddev.dev,
- "info %08x, glo %08x, delay %08x, intr_stat %08x, intr_mask %08x\n",
- mtk_hsdma_read(hsdma, HSDMA_REG_INFO),
- mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG),
- mtk_hsdma_read(hsdma, HSDMA_REG_DELAY_INT),
- mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS),
- mtk_hsdma_read(hsdma, HSDMA_REG_INT_MASK));
-}
-
-static void hsdma_dump_desc(struct mtk_hsdam_engine *hsdma,
- struct mtk_hsdma_chan *chan)
-{
- struct hsdma_desc *tx_desc;
- struct hsdma_desc *rx_desc;
- int i;
-
- dev_dbg(hsdma->ddev.dev, "tx idx: %d, rx idx: %d\n",
- chan->tx_idx, chan->rx_idx);
-
- for (i = 0; i < HSDMA_DESCS_NUM; i++) {
- tx_desc = &chan->tx_ring[i];
- rx_desc = &chan->rx_ring[i];
-
- dev_dbg(hsdma->ddev.dev,
- "%d tx addr0: %08x, flags %08x, tx addr1: %08x, rx addr0 %08x, flags %08x\n",
- i, tx_desc->addr0, tx_desc->flags,
- tx_desc->addr1, rx_desc->addr0, rx_desc->flags);
- }
-}
-
-static void mtk_hsdma_reset(struct mtk_hsdam_engine *hsdma,
- struct mtk_hsdma_chan *chan)
-{
- int i;
-
- /* disable dma */
- mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
-
- /* disable intr */
- mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
-
- /* init desc value */
- for (i = 0; i < HSDMA_DESCS_NUM; i++) {
- chan->tx_ring[i].addr0 = 0;
- chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
- }
- for (i = 0; i < HSDMA_DESCS_NUM; i++) {
- chan->rx_ring[i].addr0 = 0;
- chan->rx_ring[i].flags = 0;
- }
-
- /* reset */
- mtk_hsdma_reset_chan(hsdma, chan);
-
- /* enable intr */
- mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
-
- /* enable dma */
- mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
-}
-
-static int mtk_hsdma_terminate_all(struct dma_chan *c)
-{
- struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
- struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
- unsigned long timeout;
- LIST_HEAD(head);
-
- spin_lock_bh(&chan->vchan.lock);
- chan->desc = NULL;
- clear_bit(chan->id, &hsdma->chan_issued);
- vchan_get_all_descriptors(&chan->vchan, &head);
- spin_unlock_bh(&chan->vchan.lock);
-
- vchan_dma_desc_free_list(&chan->vchan, &head);
-
- /* wait dma transfer complete */
- timeout = jiffies + msecs_to_jiffies(2000);
- while (mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG) &
- (HSDMA_GLO_RX_BUSY | HSDMA_GLO_TX_BUSY)) {
- if (time_after_eq(jiffies, timeout)) {
- hsdma_dump_desc(hsdma, chan);
- mtk_hsdma_reset(hsdma, chan);
- dev_err(hsdma->ddev.dev, "timeout, reset it\n");
- break;
- }
- cpu_relax();
- }
-
- return 0;
-}
-
-static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
- struct mtk_hsdma_chan *chan)
-{
- dma_addr_t src, dst;
- size_t len, tlen;
- struct hsdma_desc *tx_desc, *rx_desc;
- struct mtk_hsdma_sg *sg;
- unsigned int i;
- int rx_idx;
-
- sg = &chan->desc->sg[0];
- len = sg->len;
- chan->desc->num_sgs = DIV_ROUND_UP(len, HSDMA_MAX_PLEN);
-
- /* tx desc */
- src = sg->src_addr;
- for (i = 0; i < chan->desc->num_sgs; i++) {
- tx_desc = &chan->tx_ring[chan->tx_idx];
-
- if (len > HSDMA_MAX_PLEN)
- tlen = HSDMA_MAX_PLEN;
- else
- tlen = len;
-
- if (i & 0x1) {
- tx_desc->addr1 = src;
- tx_desc->flags |= HSDMA_DESC_PLEN1(tlen);
- } else {
- tx_desc->addr0 = src;
- tx_desc->flags = HSDMA_DESC_PLEN0(tlen);
-
- /* update index */
- chan->tx_idx = HSDMA_NEXT_DESC(chan->tx_idx);
- }
-
- src += tlen;
- len -= tlen;
- }
- if (i & 0x1)
- tx_desc->flags |= HSDMA_DESC_LS0;
- else
- tx_desc->flags |= HSDMA_DESC_LS1;
-
- /* rx desc */
- rx_idx = HSDMA_NEXT_DESC(chan->rx_idx);
- len = sg->len;
- dst = sg->dst_addr;
- for (i = 0; i < chan->desc->num_sgs; i++) {
- rx_desc = &chan->rx_ring[rx_idx];
- if (len > HSDMA_MAX_PLEN)
- tlen = HSDMA_MAX_PLEN;
- else
- tlen = len;
-
- rx_desc->addr0 = dst;
- rx_desc->flags = HSDMA_DESC_PLEN0(tlen);
-
- dst += tlen;
- len -= tlen;
-
- /* update index */
- rx_idx = HSDMA_NEXT_DESC(rx_idx);
- }
-
- /* make sure desc and index all up to date */
- wmb();
- mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
-
- return 0;
-}
-
-static int gdma_next_desc(struct mtk_hsdma_chan *chan)
-{
- struct virt_dma_desc *vdesc;
-
- vdesc = vchan_next_desc(&chan->vchan);
- if (!vdesc) {
- chan->desc = NULL;
- return 0;
- }
- chan->desc = to_mtk_hsdma_desc(vdesc);
- chan->next_sg = 0;
-
- return 1;
-}
-
-static void mtk_hsdma_chan_done(struct mtk_hsdam_engine *hsdma,
- struct mtk_hsdma_chan *chan)
-{
- struct mtk_hsdma_desc *desc;
- int chan_issued;
-
- chan_issued = 0;
- spin_lock_bh(&chan->vchan.lock);
- desc = chan->desc;
- if (likely(desc)) {
- if (chan->next_sg == desc->num_sgs) {
- list_del(&desc->vdesc.node);
- vchan_cookie_complete(&desc->vdesc);
- chan_issued = gdma_next_desc(chan);
- }
- } else {
- dev_dbg(hsdma->ddev.dev, "no desc to complete\n");
- }
-
- if (chan_issued)
- set_bit(chan->id, &hsdma->chan_issued);
- spin_unlock_bh(&chan->vchan.lock);
-}
-
-static irqreturn_t mtk_hsdma_irq(int irq, void *devid)
-{
- struct mtk_hsdam_engine *hsdma = devid;
- u32 status;
-
- status = mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS);
- if (unlikely(!status))
- return IRQ_NONE;
-
- if (likely(status & HSDMA_INT_RX_Q0))
- tasklet_schedule(&hsdma->task);
- else
- dev_dbg(hsdma->ddev.dev, "unhandle irq status %08x\n", status);
- /* clean intr bits */
- mtk_hsdma_write(hsdma, HSDMA_REG_INT_STATUS, status);
-
- return IRQ_HANDLED;
-}
-
-static void mtk_hsdma_issue_pending(struct dma_chan *c)
-{
- struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
- struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
-
- spin_lock_bh(&chan->vchan.lock);
- if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
- if (gdma_next_desc(chan)) {
- set_bit(chan->id, &hsdma->chan_issued);
- tasklet_schedule(&hsdma->task);
- } else {
- dev_dbg(hsdma->ddev.dev, "no desc to issue\n");
- }
- }
- spin_unlock_bh(&chan->vchan.lock);
-}
-
-static struct dma_async_tx_descriptor *mtk_hsdma_prep_dma_memcpy(
- struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
- size_t len, unsigned long flags)
-{
- struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
- struct mtk_hsdma_desc *desc;
-
- if (len <= 0)
- return NULL;
-
- desc = kzalloc(sizeof(*desc), GFP_ATOMIC);
- if (!desc) {
- dev_err(c->device->dev, "alloc memcpy decs error\n");
- return NULL;
- }
-
- desc->sg[0].src_addr = src;
- desc->sg[0].dst_addr = dest;
- desc->sg[0].len = len;
-
- return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
-}
-
-static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c,
- dma_cookie_t cookie,
- struct dma_tx_state *state)
-{
- return dma_cookie_status(c, cookie, state);
-}
-
-static void mtk_hsdma_free_chan_resources(struct dma_chan *c)
-{
- vchan_free_chan_resources(to_virt_chan(c));
-}
-
-static void mtk_hsdma_desc_free(struct virt_dma_desc *vdesc)
-{
- kfree(container_of(vdesc, struct mtk_hsdma_desc, vdesc));
-}
-
-static void mtk_hsdma_tx(struct mtk_hsdam_engine *hsdma)
-{
- struct mtk_hsdma_chan *chan;
-
- if (test_and_clear_bit(0, &hsdma->chan_issued)) {
- chan = &hsdma->chan[0];
- if (chan->desc)
- mtk_hsdma_start_transfer(hsdma, chan);
- else
- dev_dbg(hsdma->ddev.dev, "chan 0 no desc to issue\n");
- }
-}
-
-static void mtk_hsdma_rx(struct mtk_hsdam_engine *hsdma)
-{
- struct mtk_hsdma_chan *chan;
- int next_idx, drx_idx, cnt;
-
- chan = &hsdma->chan[0];
- next_idx = HSDMA_NEXT_DESC(chan->rx_idx);
- drx_idx = mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX);
-
- cnt = (drx_idx - next_idx) & HSDMA_DESCS_MASK;
- if (!cnt)
- return;
-
- chan->next_sg += cnt;
- chan->rx_idx = (chan->rx_idx + cnt) & HSDMA_DESCS_MASK;
-
- /* update rx crx */
- wmb();
- mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
-
- mtk_hsdma_chan_done(hsdma, chan);
-}
-
-static void mtk_hsdma_tasklet(struct tasklet_struct *t)
-{
- struct mtk_hsdam_engine *hsdma = from_tasklet(hsdma, t, task);
-
- mtk_hsdma_rx(hsdma);
- mtk_hsdma_tx(hsdma);
-}
-
-static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine *hsdma,
- struct mtk_hsdma_chan *chan)
-{
- int i;
-
- chan->tx_ring = dma_alloc_coherent(hsdma->ddev.dev,
- 2 * HSDMA_DESCS_NUM *
- sizeof(*chan->tx_ring),
- &chan->desc_addr, GFP_ATOMIC | __GFP_ZERO);
- if (!chan->tx_ring)
- goto no_mem;
-
- chan->rx_ring = &chan->tx_ring[HSDMA_DESCS_NUM];
-
- /* init tx ring value */
- for (i = 0; i < HSDMA_DESCS_NUM; i++)
- chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
-
- return 0;
-no_mem:
- return -ENOMEM;
-}
-
-static void mtk_hsdam_free_desc(struct mtk_hsdam_engine *hsdma,
- struct mtk_hsdma_chan *chan)
-{
- if (chan->tx_ring) {
- dma_free_coherent(hsdma->ddev.dev,
- 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
- chan->tx_ring, chan->desc_addr);
- chan->tx_ring = NULL;
- chan->rx_ring = NULL;
- }
-}
-
-static int mtk_hsdma_init(struct mtk_hsdam_engine *hsdma)
-{
- struct mtk_hsdma_chan *chan;
- int ret;
- u32 reg;
-
- /* init desc */
- chan = &hsdma->chan[0];
- ret = mtk_hsdam_alloc_desc(hsdma, chan);
- if (ret)
- return ret;
-
- /* tx */
- mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, chan->desc_addr);
- mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, HSDMA_DESCS_NUM);
- /* rx */
- mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, chan->desc_addr +
- (sizeof(struct hsdma_desc) * HSDMA_DESCS_NUM));
- mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, HSDMA_DESCS_NUM);
- /* reset */
- mtk_hsdma_reset_chan(hsdma, chan);
-
- /* enable rx intr */
- mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
-
- /* enable dma */
- mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
-
- /* hardware info */
- reg = mtk_hsdma_read(hsdma, HSDMA_REG_INFO);
- dev_info(hsdma->ddev.dev, "rx: %d, tx: %d\n",
- (reg >> HSDMA_INFO_RX_SHIFT) & HSDMA_INFO_RX_MASK,
- (reg >> HSDMA_INFO_TX_SHIFT) & HSDMA_INFO_TX_MASK);
-
- hsdma_dump_reg(hsdma);
-
- return ret;
-}
-
-static void mtk_hsdma_uninit(struct mtk_hsdam_engine *hsdma)
-{
- struct mtk_hsdma_chan *chan;
-
- /* disable dma */
- mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
-
- /* disable intr */
- mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
-
- /* free desc */
- chan = &hsdma->chan[0];
- mtk_hsdam_free_desc(hsdma, chan);
-
- /* tx */
- mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, 0);
- mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, 0);
- /* rx */
- mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, 0);
- mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, 0);
- /* reset */
- mtk_hsdma_reset_chan(hsdma, chan);
-}
-
-static const struct of_device_id mtk_hsdma_of_match[] = {
- { .compatible = "mediatek,mt7621-hsdma" },
- { },
-};
-
-static int mtk_hsdma_probe(struct platform_device *pdev)
-{
- const struct of_device_id *match;
- struct mtk_hsdma_chan *chan;
- struct mtk_hsdam_engine *hsdma;
- struct dma_device *dd;
- int ret;
- int irq;
- void __iomem *base;
-
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
-
- match = of_match_device(mtk_hsdma_of_match, &pdev->dev);
- if (!match)
- return -EINVAL;
-
- hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
- if (!hsdma)
- return -EINVAL;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
- hsdma->base = base + HSDMA_BASE_OFFSET;
- tasklet_setup(&hsdma->task, mtk_hsdma_tasklet);
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return -EINVAL;
- ret = devm_request_irq(&pdev->dev, irq, mtk_hsdma_irq,
- 0, dev_name(&pdev->dev), hsdma);
- if (ret) {
- dev_err(&pdev->dev, "failed to request irq\n");
- return ret;
- }
-
- device_reset(&pdev->dev);
-
- dd = &hsdma->ddev;
- dma_cap_set(DMA_MEMCPY, dd->cap_mask);
- dd->copy_align = HSDMA_ALIGN_SIZE;
- dd->device_free_chan_resources = mtk_hsdma_free_chan_resources;
- dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy;
- dd->device_terminate_all = mtk_hsdma_terminate_all;
- dd->device_tx_status = mtk_hsdma_tx_status;
- dd->device_issue_pending = mtk_hsdma_issue_pending;
- dd->dev = &pdev->dev;
- dd->dev->dma_parms = &hsdma->dma_parms;
- dma_set_max_seg_size(dd->dev, HSDMA_MAX_PLEN);
- INIT_LIST_HEAD(&dd->channels);
-
- chan = &hsdma->chan[0];
- chan->id = 0;
- chan->vchan.desc_free = mtk_hsdma_desc_free;
- vchan_init(&chan->vchan, dd);
-
- /* init hardware */
- ret = mtk_hsdma_init(hsdma);
- if (ret) {
- dev_err(&pdev->dev, "failed to alloc ring descs\n");
- return ret;
- }
-
- ret = dma_async_device_register(dd);
- if (ret) {
- dev_err(&pdev->dev, "failed to register dma device\n");
- goto err_uninit_hsdma;
- }
-
- ret = of_dma_controller_register(pdev->dev.of_node,
- of_dma_xlate_by_chan_id, hsdma);
- if (ret) {
- dev_err(&pdev->dev, "failed to register of dma controller\n");
- goto err_unregister;
- }
-
- platform_set_drvdata(pdev, hsdma);
-
- return 0;
-
-err_unregister:
- dma_async_device_unregister(dd);
-err_uninit_hsdma:
- mtk_hsdma_uninit(hsdma);
- return ret;
-}
-
-static int mtk_hsdma_remove(struct platform_device *pdev)
-{
- struct mtk_hsdam_engine *hsdma = platform_get_drvdata(pdev);
-
- mtk_hsdma_uninit(hsdma);
-
- of_dma_controller_free(pdev->dev.of_node);
- dma_async_device_unregister(&hsdma->ddev);
-
- return 0;
-}
-
-static struct platform_driver mtk_hsdma_driver = {
- .probe = mtk_hsdma_probe,
- .remove = mtk_hsdma_remove,
- .driver = {
- .name = KBUILD_MODNAME,
- .of_match_table = mtk_hsdma_of_match,
- },
-};
-module_platform_driver(mtk_hsdma_driver);
-
-MODULE_AUTHOR("Michael Lee <igvtee@gmail.com>");
-MODULE_DESCRIPTION("MTK HSDMA driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index 6d158e4f4b8c..644a65d1a6a1 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -146,44 +146,6 @@
pinctrl-names = "default";
pinctrl-0 = <&spi_pins>;
};
-
- gdma: gdma@2800 {
- compatible = "ralink,rt3883-gdma";
- reg = <0x2800 0x800>;
-
- clocks = <&sysc MT7621_CLK_GDMA>;
- clock-names = "gdma";
- resets = <&rstctrl 14>;
- reset-names = "dma";
-
- interrupt-parent = <&gic>;
- interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>;
-
- #dma-cells = <1>;
- #dma-channels = <16>;
- #dma-requests = <16>;
-
- status = "disabled";
- };
-
- hsdma: hsdma@7000 {
- compatible = "mediatek,mt7621-hsdma";
- reg = <0x7000 0x1000>;
-
- clocks = <&sysc MT7621_CLK_HSDMA>;
- clock-names = "hsdma";
- resets = <&rstctrl 5>;
- reset-names = "hsdma";
-
- interrupt-parent = <&gic>;
- interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
-
- #dma-cells = <1>;
- #dma-channels = <1>;
- #dma-requests = <1>;
-
- status = "disabled";
- };
};
pinctrl: pinctrl {
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index 29bd37669059..68c09fa016ed 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -92,7 +92,7 @@ struct pi433_device {
u32 rx_bytes_to_drop;
u32 rx_bytes_dropped;
unsigned int rx_position;
- struct mutex rx_lock;
+ struct mutex rx_lock; /* protects rx_* variable accesses */
wait_queue_head_t rx_wait_queue;
/* fifo wait queue */
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c
index 7d86bb8be245..d64df072d8e8 100644
--- a/drivers/staging/pi433/rf69.c
+++ b/drivers/staging/pi433/rf69.c
@@ -113,7 +113,7 @@ int rf69_set_mode(struct spi_device *spi, enum mode mode)
};
if (unlikely(mode >= ARRAY_SIZE(mode_map))) {
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal mode %u", mode);
return -EINVAL;
}
@@ -143,7 +143,7 @@ int rf69_set_modulation(struct spi_device *spi, enum modulation modulation)
};
if (unlikely(modulation >= ARRAY_SIZE(modulation_map))) {
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal modulation %u", modulation);
return -EINVAL;
}
@@ -191,7 +191,7 @@ int rf69_set_modulation_shaping(struct spi_device *spi,
MASK_DATAMODUL_MODULATION_SHAPE,
DATAMODUL_MODULATION_SHAPE_0_3);
default:
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal mod shaping for FSK %u", mod_shaping);
return -EINVAL;
}
case OOK:
@@ -209,7 +209,7 @@ int rf69_set_modulation_shaping(struct spi_device *spi,
MASK_DATAMODUL_MODULATION_SHAPE,
DATAMODUL_MODULATION_SHAPE_2BR);
default:
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal mod shaping for OOK %u", mod_shaping);
return -EINVAL;
}
default:
@@ -255,13 +255,25 @@ int rf69_set_deviation(struct spi_device *spi, u32 deviation)
int retval;
u64 f_reg;
u64 f_step;
+ u32 bit_rate_reg;
+ u32 bit_rate;
u8 msb;
u8 lsb;
u64 factor = 1000000; // to improve precision of calculation
- // TODO: Dependency to bitrate
- if (deviation < 600 || deviation > 500000) {
- dev_dbg(&spi->dev, "set_deviation: illegal input param");
+ // calculate bit rate
+ bit_rate_reg = rf69_read_reg(spi, REG_BITRATE_MSB) << 8;
+ bit_rate_reg |= rf69_read_reg(spi, REG_BITRATE_LSB);
+ bit_rate = F_OSC / bit_rate_reg;
+
+ /*
+ * frequency deviation must exceed 600 Hz but not exceed
+ * 500kHz when taking bitrate dependency into consideration
+ * to ensure proper modulation
+ */
+ if (deviation < 600 || (deviation + (bit_rate / 2)) > 500000) {
+ dev_dbg(&spi->dev,
+ "set_deviation: illegal input param: %u", deviation);
return -EINVAL;
}
@@ -392,7 +404,7 @@ int rf69_set_output_power_level(struct spi_device *spi, u8 power_level)
return rf69_read_mod_write(spi, REG_PALEVEL, MASK_PALEVEL_OUTPUT_POWER,
power_level);
failed:
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal power level %u", power_level);
return -EINVAL;
}
@@ -417,7 +429,7 @@ int rf69_set_pa_ramp(struct spi_device *spi, enum pa_ramp pa_ramp)
};
if (unlikely(pa_ramp >= ARRAY_SIZE(pa_ramp_map))) {
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal pa_ramp %u", pa_ramp);
return -EINVAL;
}
@@ -433,7 +445,7 @@ int rf69_set_antenna_impedance(struct spi_device *spi,
case two_hundred_ohm:
return rf69_set_bit(spi, REG_LNA, MASK_LNA_ZIN);
default:
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal antenna impedance %u", antenna_impedance);
return -EINVAL;
}
}
@@ -451,7 +463,7 @@ int rf69_set_lna_gain(struct spi_device *spi, enum lna_gain lna_gain)
};
if (unlikely(lna_gain >= ARRAY_SIZE(lna_gain_map))) {
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal lna gain %u", lna_gain);
return -EINVAL;
}
@@ -466,14 +478,14 @@ static int rf69_set_bandwidth_intern(struct spi_device *spi, u8 reg,
// check value for mantisse and exponent
if (exponent > 7) {
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal bandwidth exponent %u", exponent);
return -EINVAL;
}
- if ((mantisse != mantisse16) &&
- (mantisse != mantisse20) &&
- (mantisse != mantisse24)) {
- dev_dbg(&spi->dev, "set: illegal input param");
+ if (mantisse != mantisse16 &&
+ mantisse != mantisse20 &&
+ mantisse != mantisse24) {
+ dev_dbg(&spi->dev, "set: illegal bandwidth mantisse %u", mantisse);
return -EINVAL;
}
@@ -531,7 +543,7 @@ int rf69_set_ook_threshold_dec(struct spi_device *spi,
};
if (unlikely(threshold_decrement >= ARRAY_SIZE(td_map))) {
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal OOK threshold decrement %u", threshold_decrement);
return -EINVAL;
}
@@ -578,7 +590,7 @@ int rf69_set_dio_mapping(struct spi_device *spi, u8 dio_number, u8 value)
dio_addr = REG_DIOMAPPING2;
break;
default:
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal dio number %u", dio_number);
return -EINVAL;
}
@@ -681,7 +693,7 @@ int rf69_set_fifo_fill_condition(struct spi_device *spi,
return rf69_clear_bit(spi, REG_SYNC_CONFIG,
MASK_SYNC_CONFIG_FIFO_FILL_CONDITION);
default:
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal fifo fill condition %u", fifo_fill_condition);
return -EINVAL;
}
}
@@ -690,7 +702,7 @@ int rf69_set_sync_size(struct spi_device *spi, u8 sync_size)
{
// check input value
if (sync_size > 0x07) {
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal sync size %u", sync_size);
return -EINVAL;
}
@@ -727,7 +739,7 @@ int rf69_set_packet_format(struct spi_device *spi,
return rf69_clear_bit(spi, REG_PACKETCONFIG1,
MASK_PACKETCONFIG1_PACKET_FORMAT_VARIABLE);
default:
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal packet format %u", packet_format);
return -EINVAL;
}
}
@@ -753,7 +765,7 @@ int rf69_set_address_filtering(struct spi_device *spi,
};
if (unlikely(address_filtering >= ARRAY_SIZE(af_map))) {
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal address filtering %u", address_filtering);
return -EINVAL;
}
@@ -788,7 +800,7 @@ int rf69_set_tx_start_condition(struct spi_device *spi,
return rf69_set_bit(spi, REG_FIFO_THRESH,
MASK_FIFO_THRESH_TXSTART);
default:
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal tx start condition %u", tx_start_condition);
return -EINVAL;
}
}
@@ -799,7 +811,7 @@ int rf69_set_fifo_threshold(struct spi_device *spi, u8 threshold)
/* check input value */
if (threshold & 0x80) {
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal fifo threshold %u", threshold);
return -EINVAL;
}
@@ -826,7 +838,7 @@ int rf69_set_dagc(struct spi_device *spi, enum dagc dagc)
};
if (unlikely(dagc >= ARRAY_SIZE(dagc_map))) {
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal dagc %u", dagc);
return -EINVAL;
}
diff --git a/drivers/staging/pi433/rf69_enum.h b/drivers/staging/pi433/rf69_enum.h
index fbf56fcf5fe8..b33a33a85d3b 100644
--- a/drivers/staging/pi433/rf69_enum.h
+++ b/drivers/staging/pi433/rf69_enum.h
@@ -110,12 +110,24 @@ enum fifo_fill_condition {
};
enum packet_format {
+ /*
+ * Used when the size of payload is fixed in advance. This mode of
+ * operation may be of interest to minimize RF overhead by 1 byte as
+ * no length byte field is required
+ */
packet_length_fix,
+ /*
+ * Used when the size of payload isn't known in advance. It requires the
+ * transmitter to send the length byte in each packet so the receiver
+ * would know how to operate properly
+ */
packet_length_var
};
enum tx_start_condition {
+ /* the number of bytes in the FIFO exceeds FIFO_THRESHOLD */
fifo_level,
+ /* at least one byte in the FIFO */
fifo_not_empty
};
diff --git a/drivers/staging/r8188eu/Makefile b/drivers/staging/r8188eu/Makefile
index 62933b0f29b5..a7a486cc16dd 100644
--- a/drivers/staging/r8188eu/Makefile
+++ b/drivers/staging/r8188eu/Makefile
@@ -11,7 +11,6 @@ r8188eu-y = \
hal/hal_com.o \
hal/odm.o \
hal/odm_debug.o \
- hal/odm_interface.o \
hal/odm_HWConfig.o \
hal/odm_RegConfig8188E.o \
hal/odm_RTL8188E.o \
@@ -21,9 +20,7 @@ r8188eu-y = \
hal/rtl8188e_phycfg.o \
hal/rtl8188e_rf6052.o \
hal/rtl8188e_rxdesc.o \
- hal/rtl8188e_sreset.o \
hal/rtl8188e_xmit.o \
- hal/rtl8188eu_led.o \
hal/rtl8188eu_recv.o \
hal/rtl8188eu_xmit.o \
hal/usb_halinit.o \
diff --git a/drivers/staging/r8188eu/core/rtw_ap.c b/drivers/staging/r8188eu/core/rtw_ap.c
index c78feeb9c862..1675e2e8439c 100644
--- a/drivers/staging/r8188eu/core/rtw_ap.c
+++ b/drivers/staging/r8188eu/core/rtw_ap.c
@@ -318,7 +318,6 @@ void expire_timeout_chk(struct adapter *padapter)
void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
{
int i;
- u8 rf_type;
u32 init_rate = 0;
unsigned char sta_band = 0, raid, shortGIrate = false;
unsigned char limit;
@@ -342,11 +341,7 @@ void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
}
/* n mode ra_bitmap */
if (psta_ht->ht_option) {
- GetHwReg8188EU(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
- if (rf_type == RF_2T2R)
- limit = 16;/* 2R */
- else
- limit = 8;/* 1R */
+ limit = 8; /* 1R */
for (i = 0; i < limit; i++) {
if (psta_ht->ht_cap.mcs.rx_mask[i / 8] & BIT(i % 8))
@@ -452,7 +447,7 @@ void update_bmc_sta(struct adapter *padapter)
init_rate = get_highest_rate_idx(tx_ra_bitmap & 0x0fffffff) & 0x3f;
/* ap mode */
- rtl8188e_SetHalODMVar(padapter, HAL_ODM_STA_INFO, psta, true);
+ rtl8188e_SetHalODMVar(padapter, psta, true);
{
u8 arg = 0;
@@ -504,7 +499,7 @@ void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
DBG_88E("%s\n", __func__);
/* ap mode */
- rtl8188e_SetHalODMVar(padapter, HAL_ODM_STA_INFO, psta, true);
+ rtl8188e_SetHalODMVar(padapter, psta, true);
if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
psta->ieee8021x_blocked = true;
diff --git a/drivers/staging/r8188eu/core/rtw_br_ext.c b/drivers/staging/r8188eu/core/rtw_br_ext.c
index bcd0f9dd64b1..4951f835feaf 100644
--- a/drivers/staging/r8188eu/core/rtw_br_ext.c
+++ b/drivers/staging/r8188eu/core/rtw_br_ext.c
@@ -105,19 +105,10 @@ static int skb_pull_and_merge(struct sk_buff *skb, unsigned char *src, int len)
return 0;
}
-static unsigned long __nat25_timeout(struct adapter *priv)
-{
- unsigned long timeout;
-
- timeout = jiffies - NAT25_AGEING_TIME*HZ;
-
- return timeout;
-}
-
static int __nat25_has_expired(struct adapter *priv,
struct nat25_network_db_entry *fdb)
{
- if (time_before_eq(fdb->ageing_timer, __nat25_timeout(priv)))
+ if (time_before_eq(fdb->ageing_timer, jiffies - NAT25_AGEING_TIME * HZ))
return 1;
return 0;
diff --git a/drivers/staging/r8188eu/core/rtw_cmd.c b/drivers/staging/r8188eu/core/rtw_cmd.c
index 48869a7056fd..8bfb01c2ebb5 100644
--- a/drivers/staging/r8188eu/core/rtw_cmd.c
+++ b/drivers/staging/r8188eu/core/rtw_cmd.c
@@ -10,7 +10,6 @@
#include "../include/rtw_br_ext.h"
#include "../include/rtw_mlme_ext.h"
#include "../include/rtl8188e_dm.h"
-#include "../include/rtl8188e_sreset.h"
/*
Caller and the rtw_cmd_thread can protect cmd_q by spin_lock.
@@ -51,7 +50,6 @@ static int _rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - ((size_t)(pcmdpriv->rsp_allocated_buf) & 3);
- pcmdpriv->cmd_issued_cnt = 0;
pcmdpriv->cmd_done_cnt = 0;
pcmdpriv->rsp_cnt = 0;
exit:
@@ -69,7 +67,7 @@ static int _rtw_init_evt_priv(struct evt_priv *pevtpriv)
atomic_set(&pevtpriv->event_seq, 0);
pevtpriv->evt_done_cnt = 0;
- _init_workitem(&pevtpriv->c2h_wk, c2h_wk_callback, NULL);
+ INIT_WORK(&pevtpriv->c2h_wk, c2h_wk_callback);
pevtpriv->c2h_wk_alive = false;
pevtpriv->c2h_queue = rtw_cbuf_alloc(C2H_QUEUE_MAX_LEN + 1);
@@ -78,7 +76,7 @@ static int _rtw_init_evt_priv(struct evt_priv *pevtpriv)
void rtw_free_evt_priv(struct evt_priv *pevtpriv)
{
- _cancel_workitem_sync(&pevtpriv->c2h_wk);
+ cancel_work_sync(&pevtpriv->c2h_wk);
while (pevtpriv->c2h_wk_alive)
msleep(10);
@@ -255,8 +253,9 @@ int rtw_cmd_thread(void *context)
_next:
if (padapter->bDriverStopped ||
padapter->bSurpriseRemoved) {
- DBG_88E("%s: DriverStopped(%d) SurpriseRemoved(%d) break at line %d\n",
- __func__, padapter->bDriverStopped, padapter->bSurpriseRemoved, __LINE__);
+ netdev_dbg(padapter->pnetdev,
+ "DriverStopped(%d) SurpriseRemoved(%d) break\n",
+ padapter->bDriverStopped, padapter->bSurpriseRemoved);
break;
}
@@ -269,8 +268,6 @@ _next:
goto post_process;
}
- pcmdpriv->cmd_issued_cnt++;
-
pcmd->cmdsz = _RND4((pcmd->cmdsz));/* _RND4 */
memcpy(pcmdbuf, pcmd->parmbuf, pcmd->cmdsz);
@@ -316,8 +313,6 @@ post_process:
if (!pcmd)
break;
- /* DBG_88E("%s: leaving... drop cmdcode:%u\n", __func__, pcmd->cmdcode); */
-
rtw_free_cmd_obj(pcmd);
} while (1);
@@ -579,7 +574,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
else
padapter->pwrctrlpriv.smart_ps = padapter->registrypriv.smart_ps;
- DBG_88E("%s: smart_ps =%d\n", __func__, padapter->pwrctrlpriv.smart_ps);
+ netdev_dbg(padapter->pnetdev, "smart_ps = %d\n", padapter->pwrctrlpriv.smart_ps);
pcmd->cmdsz = get_wlan_bssid_ex_sz(psecnetwork);/* get cmdsz before endian conversion */
@@ -800,8 +795,6 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
init_h2fwcmd_w_parm_no_rsp(ph2c, paddbareq_parm, GEN_CMD_CODE(_AddBAReq));
- /* DBG_88E("rtw_addbareq_cmd, tid =%d\n", tid); */
-
/* rtw_enqueue_cmd(pcmdpriv, ph2c); */
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
@@ -953,7 +946,19 @@ static void traffic_status_watchdog(struct adapter *padapter)
pmlmepriv->LinkDetectInfo.bHigherBusyTxTraffic = bHigherBusyTxTraffic;
}
-static void dynamic_chk_wk_hdl(struct adapter *padapter, u8 *pbuf, int sz)
+static void rtl8188e_sreset_xmit_status_check(struct adapter *padapter)
+{
+ u32 txdma_status;
+
+ txdma_status = rtw_read32(padapter, REG_TXDMA_STATUS);
+ if (txdma_status != 0x00) {
+ DBG_88E("%s REG_TXDMA_STATUS:0x%08x\n", __func__, txdma_status);
+ rtw_write32(padapter, REG_TXDMA_STATUS, txdma_status);
+ }
+ /* total xmit irp = 4 */
+}
+
+static void dynamic_chk_wk_hdl(struct adapter *padapter, u8 *pbuf)
{
struct mlme_priv *pmlmepriv;
@@ -1003,7 +1008,6 @@ static void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type)
SetHwReg8188EU(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus));
break;
case LPS_CTRL_SPECIAL_PACKET:
- /* DBG_88E("LPS_CTRL_SPECIAL_PACKET\n"); */
pwrpriv->DelayLPSLastTimeStamp = jiffies;
LPS_Leave(padapter);
break;
@@ -1374,7 +1378,7 @@ u8 rtw_drvextra_cmd_hdl(struct adapter *padapter, unsigned char *pbuf)
switch (pdrvextra_cmd->ec_id) {
case DYNAMIC_CHK_WK_CID:
- dynamic_chk_wk_hdl(padapter, pdrvextra_cmd->pbuf, pdrvextra_cmd->type_size);
+ dynamic_chk_wk_hdl(padapter, pdrvextra_cmd->pbuf);
break;
case POWER_SAVING_CTRL_WK_CID:
rtw_ps_processor(padapter);
diff --git a/drivers/staging/r8188eu/core/rtw_efuse.c b/drivers/staging/r8188eu/core/rtw_efuse.c
index 03c8431b2ed3..0e0e60638880 100644
--- a/drivers/staging/r8188eu/core/rtw_efuse.c
+++ b/drivers/staging/r8188eu/core/rtw_efuse.c
@@ -6,70 +6,7 @@
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
#include "../include/rtw_efuse.h"
-
-/*------------------------Define local variable------------------------------*/
-u8 fakeEfuseBank;
-u32 fakeEfuseUsedBytes;
-u8 fakeEfuseContent[EFUSE_MAX_HW_SIZE] = {0};
-u8 fakeEfuseInitMap[EFUSE_MAX_MAP_LEN] = {0};
-u8 fakeEfuseModifiedMap[EFUSE_MAX_MAP_LEN] = {0};
-
-u32 BTEfuseUsedBytes;
-u8 BTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
-u8 BTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN] = {0};
-u8 BTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN] = {0};
-
-u32 fakeBTEfuseUsedBytes;
-u8 fakeBTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
-u8 fakeBTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN] = {0};
-u8 fakeBTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN] = {0};
-/*------------------------Define local variable------------------------------*/
-
-#define REG_EFUSE_CTRL 0x0030
-#define EFUSE_CTRL REG_EFUSE_CTRL /* E-Fuse Control. */
-
-static bool Efuse_Read1ByteFromFakeContent(u16 Offset, u8 *Value)
-{
- if (Offset >= EFUSE_MAX_HW_SIZE)
- return false;
- if (fakeEfuseBank == 0)
- *Value = fakeEfuseContent[Offset];
- else
- *Value = fakeBTEfuseContent[fakeEfuseBank - 1][Offset];
- return true;
-}
-
-static bool
-Efuse_Write1ByteToFakeContent(
- struct adapter *pAdapter,
- u16 Offset,
- u8 Value)
-{
- if (Offset >= EFUSE_MAX_HW_SIZE)
- return false;
- if (fakeEfuseBank == 0) {
- fakeEfuseContent[Offset] = Value;
- } else {
- fakeBTEfuseContent[fakeEfuseBank - 1][Offset] = Value;
- }
- return true;
-}
-
-/* 11/16/2008 MH Add description. Get current efuse area enabled word!!. */
-u8
-Efuse_CalculateWordCnts(u8 word_en)
-{
- u8 word_cnts = 0;
- if (!(word_en & BIT(0)))
- word_cnts++; /* 0 : write enable */
- if (!(word_en & BIT(1)))
- word_cnts++;
- if (!(word_en & BIT(2)))
- word_cnts++;
- if (!(word_en & BIT(3)))
- word_cnts++;
- return word_cnts;
-}
+#include "../include/rtl8188e_hal.h"
/* */
/* Description: */
@@ -86,18 +23,12 @@ void
ReadEFuseByte(
struct adapter *Adapter,
u16 _offset,
- u8 *pbuf,
- bool pseudo)
+ u8 *pbuf)
{
u32 value32;
u8 readbyte;
u16 retry;
- if (pseudo) {
- Efuse_Read1ByteFromFakeContent(_offset, pbuf);
- return;
- }
-
/* Write Address */
rtw_write8(Adapter, EFUSE_CTRL + 1, (_offset & 0xff));
readbyte = rtw_read8(Adapter, EFUSE_CTRL + 2);
@@ -125,134 +56,6 @@ ReadEFuseByte(
*pbuf = (u8)(value32 & 0xff);
}
-/* 11/16/2008 MH Read one byte from real Efuse. */
-u8 efuse_OneByteRead(struct adapter *pAdapter, u16 addr, u8 *data, bool pseudo)
-{
- u8 tmpidx = 0;
- u8 result;
-
- if (pseudo) {
- result = Efuse_Read1ByteFromFakeContent(addr, data);
- return result;
- }
- /* -----------------e-fuse reg ctrl --------------------------------- */
- /* address */
- rtw_write8(pAdapter, EFUSE_CTRL + 1, (u8)(addr & 0xff));
- rtw_write8(pAdapter, EFUSE_CTRL + 2, ((u8)((addr >> 8) & 0x03)) |
- (rtw_read8(pAdapter, EFUSE_CTRL + 2) & 0xFC));
-
- rtw_write8(pAdapter, EFUSE_CTRL + 3, 0x72);/* read cmd */
-
- while (!(0x80 & rtw_read8(pAdapter, EFUSE_CTRL + 3)) && (tmpidx < 100))
- tmpidx++;
- if (tmpidx < 100) {
- *data = rtw_read8(pAdapter, EFUSE_CTRL);
- result = true;
- } else {
- *data = 0xff;
- result = false;
- }
- return result;
-}
-
-/* 11/16/2008 MH Write one byte to reald Efuse. */
-u8 efuse_OneByteWrite(struct adapter *pAdapter, u16 addr, u8 data, bool pseudo)
-{
- u8 tmpidx = 0;
- u8 result;
-
- if (pseudo) {
- result = Efuse_Write1ByteToFakeContent(pAdapter, addr, data);
- return result;
- }
-
- /* -----------------e-fuse reg ctrl --------------------------------- */
- /* address */
- rtw_write8(pAdapter, EFUSE_CTRL + 1, (u8)(addr & 0xff));
- rtw_write8(pAdapter, EFUSE_CTRL + 2,
- (rtw_read8(pAdapter, EFUSE_CTRL + 2) & 0xFC) |
- (u8)((addr >> 8) & 0x03));
- rtw_write8(pAdapter, EFUSE_CTRL, data);/* data */
-
- rtw_write8(pAdapter, EFUSE_CTRL + 3, 0xF2);/* write cmd */
-
- while ((0x80 & rtw_read8(pAdapter, EFUSE_CTRL + 3)) && (tmpidx < 100))
- tmpidx++;
-
- if (tmpidx < 100)
- result = true;
- else
- result = false;
-
- return result;
-}
-
-/*-----------------------------------------------------------------------------
- * Function: efuse_WordEnableDataRead
- *
- * Overview: Read allowed word in current efuse section data.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 11/16/2008 MHC Create Version 0.
- * 11/21/2008 MHC Fix Write bug when we only enable late word.
- *
- *---------------------------------------------------------------------------*/
-void efuse_WordEnableDataRead(u8 word_en, u8 *sourdata, u8 *targetdata)
-{
- if (!(word_en & BIT(0))) {
- targetdata[0] = sourdata[0];
- targetdata[1] = sourdata[1];
- }
- if (!(word_en & BIT(1))) {
- targetdata[2] = sourdata[2];
- targetdata[3] = sourdata[3];
- }
- if (!(word_en & BIT(2))) {
- targetdata[4] = sourdata[4];
- targetdata[5] = sourdata[5];
- }
- if (!(word_en & BIT(3))) {
- targetdata[6] = sourdata[6];
- targetdata[7] = sourdata[7];
- }
-}
-
-/*-----------------------------------------------------------------------------
- * Function: Efuse_ReadAllMap
- *
- * Overview: Read All Efuse content
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 11/11/2008 MHC Create Version 0.
- *
- *---------------------------------------------------------------------------*/
-static void Efuse_ReadAllMap(struct adapter *pAdapter, u8 efuseType, u8 *Efuse, bool pseudo)
-{
- u16 mapLen = 0;
-
- rtl8188e_EfusePowerSwitch(pAdapter, false, true);
-
- rtl8188e_EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, pseudo);
-
- rtl8188e_ReadEFuse(pAdapter, efuseType, 0, mapLen, Efuse, pseudo);
-
- rtl8188e_EfusePowerSwitch(pAdapter, false, false);
-}
-
/*-----------------------------------------------------------------------------
* Function: EFUSE_ShadowMapUpdate
*
@@ -269,18 +72,16 @@ static void Efuse_ReadAllMap(struct adapter *pAdapter, u8 efuseType, u8 *Efuse,
* 11/13/2008 MHC Create Version 0.
*
*---------------------------------------------------------------------------*/
-void EFUSE_ShadowMapUpdate(
- struct adapter *pAdapter,
- u8 efuseType,
- bool pseudo)
+void EFUSE_ShadowMapUpdate(struct adapter *pAdapter)
{
struct eeprom_priv *pEEPROM = &pAdapter->eeprompriv;
- u16 mapLen = 0;
- rtl8188e_EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, pseudo);
+ if (pEEPROM->bautoload_fail_flag) {
+ memset(pEEPROM->efuse_eeprom_data, 0xFF, EFUSE_MAP_LEN_88E);
+ return;
+ }
- if (pEEPROM->bautoload_fail_flag)
- memset(pEEPROM->efuse_eeprom_data, 0xFF, mapLen);
- else
- Efuse_ReadAllMap(pAdapter, efuseType, pEEPROM->efuse_eeprom_data, pseudo);
-} /* EFUSE_ShadowMapUpdate */
+ rtl8188e_EfusePowerSwitch(pAdapter, true);
+ rtl8188e_ReadEFuse(pAdapter, 0, EFUSE_MAP_LEN_88E, pEEPROM->efuse_eeprom_data);
+ rtl8188e_EfusePowerSwitch(pAdapter, false);
+}
diff --git a/drivers/staging/r8188eu/core/rtw_ieee80211.c b/drivers/staging/r8188eu/core/rtw_ieee80211.c
index 343c2f9a4ce8..ad87954bdeb4 100644
--- a/drivers/staging/r8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/r8188eu/core/rtw_ieee80211.c
@@ -1160,63 +1160,26 @@ void rtw_get_bcn_info(struct wlan_network *pnetwork)
}
/* show MCS rate, unit: 100Kbps */
-u16 rtw_mcs_rate(u8 rf_type, u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40, unsigned char *MCS_rate)
+u16 rtw_mcs_rate(u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40, unsigned char *MCS_rate)
{
u16 max_rate = 0;
- if (rf_type == RF_1T1R) {
- if (MCS_rate[0] & BIT(7))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 1500 : 1350) : ((short_GI_20) ? 722 : 650);
- else if (MCS_rate[0] & BIT(6))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 1350 : 1215) : ((short_GI_20) ? 650 : 585);
- else if (MCS_rate[0] & BIT(5))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 1200 : 1080) : ((short_GI_20) ? 578 : 520);
- else if (MCS_rate[0] & BIT(4))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 900 : 810) : ((short_GI_20) ? 433 : 390);
- else if (MCS_rate[0] & BIT(3))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 600 : 540) : ((short_GI_20) ? 289 : 260);
- else if (MCS_rate[0] & BIT(2))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 450 : 405) : ((short_GI_20) ? 217 : 195);
- else if (MCS_rate[0] & BIT(1))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 300 : 270) : ((short_GI_20) ? 144 : 130);
- else if (MCS_rate[0] & BIT(0))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 150 : 135) : ((short_GI_20) ? 72 : 65);
- } else {
- if (MCS_rate[1]) {
- if (MCS_rate[1] & BIT(7))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 3000 : 2700) : ((short_GI_20) ? 1444 : 1300);
- else if (MCS_rate[1] & BIT(6))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 2700 : 2430) : ((short_GI_20) ? 1300 : 1170);
- else if (MCS_rate[1] & BIT(5))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 2400 : 2160) : ((short_GI_20) ? 1156 : 1040);
- else if (MCS_rate[1] & BIT(4))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 1800 : 1620) : ((short_GI_20) ? 867 : 780);
- else if (MCS_rate[1] & BIT(3))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 1200 : 1080) : ((short_GI_20) ? 578 : 520);
- else if (MCS_rate[1] & BIT(2))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 900 : 810) : ((short_GI_20) ? 433 : 390);
- else if (MCS_rate[1] & BIT(1))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 600 : 540) : ((short_GI_20) ? 289 : 260);
- else if (MCS_rate[1] & BIT(0))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 300 : 270) : ((short_GI_20) ? 144 : 130);
- } else {
- if (MCS_rate[0] & BIT(7))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 1500 : 1350) : ((short_GI_20) ? 722 : 650);
- else if (MCS_rate[0] & BIT(6))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 1350 : 1215) : ((short_GI_20) ? 650 : 585);
- else if (MCS_rate[0] & BIT(5))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 1200 : 1080) : ((short_GI_20) ? 578 : 520);
- else if (MCS_rate[0] & BIT(4))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 900 : 810) : ((short_GI_20) ? 433 : 390);
- else if (MCS_rate[0] & BIT(3))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 600 : 540) : ((short_GI_20) ? 289 : 260);
- else if (MCS_rate[0] & BIT(2))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 450 : 405) : ((short_GI_20) ? 217 : 195);
- else if (MCS_rate[0] & BIT(1))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 300 : 270) : ((short_GI_20) ? 144 : 130);
- else if (MCS_rate[0] & BIT(0))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 150 : 135) : ((short_GI_20) ? 72 : 65);
- }
- }
+ if (MCS_rate[0] & BIT(7))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 1500 : 1350) : ((short_GI_20) ? 722 : 650);
+ else if (MCS_rate[0] & BIT(6))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 1350 : 1215) : ((short_GI_20) ? 650 : 585);
+ else if (MCS_rate[0] & BIT(5))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 1200 : 1080) : ((short_GI_20) ? 578 : 520);
+ else if (MCS_rate[0] & BIT(4))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 900 : 810) : ((short_GI_20) ? 433 : 390);
+ else if (MCS_rate[0] & BIT(3))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 600 : 540) : ((short_GI_20) ? 289 : 260);
+ else if (MCS_rate[0] & BIT(2))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 450 : 405) : ((short_GI_20) ? 217 : 195);
+ else if (MCS_rate[0] & BIT(1))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 300 : 270) : ((short_GI_20) ? 144 : 130);
+ else if (MCS_rate[0] & BIT(0))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 150 : 135) : ((short_GI_20) ? 72 : 65);
+
return max_rate;
}
diff --git a/drivers/staging/r8188eu/core/rtw_ioctl_set.c b/drivers/staging/r8188eu/core/rtw_ioctl_set.c
index 411b06e135be..eadfbdb94dd5 100644
--- a/drivers/staging/r8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/r8188eu/core/rtw_ioctl_set.c
@@ -110,7 +110,7 @@ u8 rtw_set_802_11_bssid(struct adapter *padapter, u8 *bssid)
u32 cur_time = 0;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- DBG_88E_LEVEL(_drv_info_, "set bssid:%pM\n", bssid);
+ netdev_dbg(padapter->pnetdev, "set bssid:%pM\n", bssid);
if ((bssid[0] == 0x00 && bssid[1] == 0x00 && bssid[2] == 0x00 &&
bssid[3] == 0x00 && bssid[4] == 0x00 && bssid[5] == 0x00) ||
@@ -185,8 +185,8 @@ u8 rtw_set_802_11_ssid(struct adapter *padapter, struct ndis_802_11_ssid *ssid)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_network *pnetwork = &pmlmepriv->cur_network;
- DBG_88E_LEVEL(_drv_info_, "set ssid [%s] fw_state=0x%08x\n",
- ssid->Ssid, get_fwstate(pmlmepriv));
+ netdev_dbg(padapter->pnetdev, "set ssid [%s] fw_state=0x%08x\n",
+ ssid->Ssid, get_fwstate(pmlmepriv));
if (!padapter->hw_init_completed) {
status = _FAIL;
@@ -458,7 +458,6 @@ u16 rtw_get_cur_max_rate(struct adapter *adapter)
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
struct ieee80211_ht_cap *pht_capie;
- u8 rf_type = 0;
u8 bw_40MHz = 0, short_GI_20 = 0, short_GI_40 = 0;
u16 mcs_rate = 0;
u32 ht_ielen = 0;
@@ -480,14 +479,10 @@ u16 rtw_get_cur_max_rate(struct adapter *adapter)
short_GI_20 = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & IEEE80211_HT_CAP_SGI_20) ? 1 : 0;
short_GI_40 = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & IEEE80211_HT_CAP_SGI_40) ? 1 : 0;
- GetHwReg8188EU(adapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
- max_rate = rtw_mcs_rate(
- rf_type,
- bw_40MHz & (pregistrypriv->cbw40_enable),
- short_GI_20,
- short_GI_40,
- pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate
- );
+ max_rate = rtw_mcs_rate(bw_40MHz & (pregistrypriv->cbw40_enable),
+ short_GI_20,
+ short_GI_40,
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate);
}
} else {
while ((pcur_bss->SupportedRates[i] != 0) && (pcur_bss->SupportedRates[i] != 0xFF)) {
diff --git a/drivers/staging/r8188eu/core/rtw_led.c b/drivers/staging/r8188eu/core/rtw_led.c
index 0e3453639a8b..ccd43accb7dc 100644
--- a/drivers/staging/r8188eu/core/rtw_led.c
+++ b/drivers/staging/r8188eu/core/rtw_led.c
@@ -3,25 +3,20 @@
#include "../include/drv_types.h"
#include "../include/rtw_led.h"
+#include "../include/rtl8188e_spec.h"
-void BlinkTimerCallback(struct timer_list *t)
-{
- struct LED_871x *pLed = from_timer(pLed, t, BlinkTimer);
- struct adapter *padapter = pLed->padapter;
+#define LED_BLINK_NO_LINK_INTVL msecs_to_jiffies(1000)
+#define LED_BLINK_LINK_INTVL msecs_to_jiffies(500)
+#define LED_BLINK_SCAN_INTVL msecs_to_jiffies(180)
+#define LED_BLINK_FASTER_INTVL msecs_to_jiffies(50)
+#define LED_BLINK_WPS_SUCESS_INTVL msecs_to_jiffies(5000)
- if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
- return;
+#define IS_LED_WPS_BLINKING(l) \
+ ((l)->CurrLedState == LED_BLINK_WPS || \
+ (l)->CurrLedState == LED_BLINK_WPS_STOP || \
+ (l)->bLedWPSBlinkInProgress)
- _set_workitem(&pLed->BlinkWorkItem);
-}
-
-void BlinkWorkItemCallback(struct work_struct *work)
-{
- struct LED_871x *pLed = container_of(work, struct LED_871x, BlinkWorkItem);
- BlinkHandler(pLed);
-}
-
-void ResetLedStatus(struct LED_871x *pLed)
+static void ResetLedStatus(struct LED_871x *pLed)
{
pLed->CurrLedState = RTW_LED_OFF; /* Current LED state. */
pLed->bLedOn = false; /* true if LED is ON, false if LED is OFF. */
@@ -34,39 +29,48 @@ void ResetLedStatus(struct LED_871x *pLed)
pLed->bLedNoLinkBlinkInProgress = false;
pLed->bLedLinkBlinkInProgress = false;
- pLed->bLedStartToLinkBlinkInProgress = false;
pLed->bLedScanBlinkInProgress = false;
}
-void InitLed871x(struct adapter *padapter, struct LED_871x *pLed, enum LED_PIN_871x LedPin)
+static void SwLedOn(struct adapter *padapter, struct LED_871x *pLed)
{
- pLed->padapter = padapter;
- pLed->LedPin = LedPin;
+ u8 LedCfg;
- ResetLedStatus(pLed);
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ return;
- timer_setup(&pLed->BlinkTimer, BlinkTimerCallback, 0);
- _init_workitem(&pLed->BlinkWorkItem, BlinkWorkItemCallback, pLed);
+ LedCfg = rtw_read8(padapter, REG_LEDCFG2);
+ rtw_write8(padapter, REG_LEDCFG2, (LedCfg & 0xf0) | BIT(5) | BIT(6)); /* SW control led0 on. */
+ pLed->bLedOn = true;
}
-void DeInitLed871x(struct LED_871x *pLed)
+static void SwLedOff(struct adapter *padapter, struct LED_871x *pLed)
{
- _cancel_workitem_sync(&pLed->BlinkWorkItem);
- _cancel_timer_ex(&pLed->BlinkTimer);
- ResetLedStatus(pLed);
+ u8 LedCfg;
+
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ goto exit;
+
+ LedCfg = rtw_read8(padapter, REG_LEDCFG2);/* 0x4E */
+
+ LedCfg &= 0x90; /* Set to software control. */
+ rtw_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3)));
+ LedCfg = rtw_read8(padapter, REG_MAC_PINMUX_CFG);
+ LedCfg &= 0xFE;
+ rtw_write8(padapter, REG_MAC_PINMUX_CFG, LedCfg);
+exit:
+ pLed->bLedOn = false;
}
-static void SwLedBlink1(struct LED_871x *pLed)
+static void blink_work(struct work_struct *work)
{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct LED_871x *pLed = container_of(dwork, struct LED_871x, blink_work);
struct adapter *padapter = pLed->padapter;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- u8 bStopBlinking = false;
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- SwLedOn(padapter, pLed);
- else
- SwLedOff(padapter, pLed);
+ if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
+ return;
if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
SwLedOff(padapter, pLed);
@@ -74,81 +78,67 @@ static void SwLedBlink1(struct LED_871x *pLed)
return;
}
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON)
+ SwLedOn(padapter, pLed);
+ else
+ SwLedOff(padapter, pLed);
+
switch (pLed->CurrLedState) {
case LED_BLINK_SLOWLY:
if (pLed->bLedOn)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
break;
case LED_BLINK_NORMAL:
if (pLed->bLedOn)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_LINK_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
break;
case LED_BLINK_SCAN:
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- if (bStopBlinking) {
+ if (pLed->BlinkTimes == 0) {
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_LINK_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
} else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
}
pLed->bLedScanBlinkInProgress = false;
} else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SCAN_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
}
break;
case LED_BLINK_TXRX:
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- if (bStopBlinking) {
+ if (pLed->BlinkTimes == 0) {
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_LINK_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
} else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
}
pLed->bLedBlinkInProgress = false;
} else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_FASTER_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_FASTER_INTVL);
}
break;
case LED_BLINK_WPS:
@@ -156,27 +146,22 @@ static void SwLedBlink1(struct LED_871x *pLed)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SCAN_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
break;
case LED_BLINK_WPS_STOP: /* WPS success */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- bStopBlinking = false;
- else
- bStopBlinking = true;
-
- if (bStopBlinking) {
+ if (pLed->BlinkingLedState != RTW_LED_ON) {
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
if (pLed->bLedOn)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_LINK_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
pLed->bLedWPSBlinkInProgress = false;
} else {
pLed->BlinkingLedState = RTW_LED_OFF;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_WPS_SUCESS_INTVL);
}
break;
default:
@@ -184,25 +169,56 @@ static void SwLedBlink1(struct LED_871x *pLed)
}
}
-static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAction)
+void rtl8188eu_InitSwLeds(struct adapter *padapter)
+{
+ struct led_priv *pledpriv = &padapter->ledpriv;
+ struct LED_871x *pLed = &pledpriv->SwLed0;
+
+ pLed->padapter = padapter;
+ ResetLedStatus(pLed);
+ INIT_DELAYED_WORK(&pLed->blink_work, blink_work);
+}
+
+void rtl8188eu_DeInitSwLeds(struct adapter *padapter)
+{
+ struct led_priv *ledpriv = &padapter->ledpriv;
+ struct LED_871x *pLed = &ledpriv->SwLed0;
+
+ cancel_delayed_work_sync(&pLed->blink_work);
+ ResetLedStatus(pLed);
+ SwLedOff(padapter, pLed);
+}
+
+void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction)
{
struct led_priv *ledpriv = &padapter->ledpriv;
+ struct registry_priv *registry_par;
struct LED_871x *pLed = &ledpriv->SwLed0;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped) ||
+ (!padapter->hw_init_completed))
+ return;
+
+ if (!ledpriv->bRegUseLed)
+ return;
+
+ registry_par = &padapter->registrypriv;
+ if (!registry_par->led_enable)
+ return;
+
switch (LedAction) {
- case LED_CTL_POWER_ON:
case LED_CTL_START_TO_LINK:
case LED_CTL_NO_LINK:
if (!pLed->bLedNoLinkBlinkInProgress) {
if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedBlinkInProgress = false;
}
@@ -212,7 +228,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
}
break;
case LED_CTL_LINK:
@@ -220,11 +236,11 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedNoLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedBlinkInProgress = false;
}
pLed->bLedLinkBlinkInProgress = true;
@@ -233,7 +249,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_LINK_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
}
break;
case LED_CTL_SITE_SURVEY:
@@ -243,15 +259,15 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
if (IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedNoLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedBlinkInProgress = false;
}
pLed->bLedScanBlinkInProgress = true;
@@ -261,7 +277,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SCAN_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
}
break;
case LED_CTL_TX:
@@ -270,11 +286,11 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedNoLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedLinkBlinkInProgress = false;
}
pLed->bLedBlinkInProgress = true;
@@ -284,26 +300,25 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_FASTER_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_FASTER_INTVL);
}
break;
case LED_CTL_START_WPS: /* wait until xinpin finish */
- case LED_CTL_START_WPS_BOTTON:
if (!pLed->bLedWPSBlinkInProgress) {
if (pLed->bLedNoLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedScanBlinkInProgress = false;
}
pLed->bLedWPSBlinkInProgress = true;
@@ -312,42 +327,42 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SCAN_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
}
break;
case LED_CTL_STOP_WPS:
if (pLed->bLedNoLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedScanBlinkInProgress = false;
}
if (pLed->bLedWPSBlinkInProgress)
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
else
pLed->bLedWPSBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_WPS_STOP;
if (pLed->bLedOn) {
pLed->BlinkingLedState = RTW_LED_OFF;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_WPS_SUCESS_INTVL);
} else {
pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, 0);
+ schedule_delayed_work(&pLed->blink_work, 0);
}
break;
case LED_CTL_STOP_WPS_FAIL:
if (pLed->bLedWPSBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedWPSBlinkInProgress = false;
}
pLed->bLedNoLinkBlinkInProgress = true;
@@ -356,29 +371,29 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
break;
case LED_CTL_POWER_OFF:
pLed->CurrLedState = RTW_LED_OFF;
pLed->BlinkingLedState = RTW_LED_OFF;
if (pLed->bLedNoLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedWPSBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedWPSBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedScanBlinkInProgress = false;
}
SwLedOff(padapter, pLed);
@@ -387,41 +402,3 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
break;
}
}
-
-void BlinkHandler(struct LED_871x *pLed)
-{
- struct adapter *padapter = pLed->padapter;
-
- if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
- return;
-
- SwLedBlink1(pLed);
-}
-
-void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
- struct registry_priv *registry_par;
-
- if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped) ||
- (!padapter->hw_init_completed))
- return;
-
- if (!ledpriv->bRegUseLed)
- return;
-
- registry_par = &padapter->registrypriv;
- if (!registry_par->led_enable)
- return;
-
- if ((padapter->pwrctrlpriv.rf_pwrstate != rf_on &&
- padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) &&
- (LedAction == LED_CTL_TX || LedAction == LED_CTL_RX ||
- LedAction == LED_CTL_SITE_SURVEY ||
- LedAction == LED_CTL_LINK ||
- LedAction == LED_CTL_NO_LINK ||
- LedAction == LED_CTL_POWER_ON))
- return;
-
- SwLedControlMode1(padapter, LedAction);
-}
diff --git a/drivers/staging/r8188eu/core/rtw_mlme.c b/drivers/staging/r8188eu/core/rtw_mlme.c
index 8d14aff32f61..394e8a5ce03c 100644
--- a/drivers/staging/r8188eu/core/rtw_mlme.c
+++ b/drivers/staging/r8188eu/core/rtw_mlme.c
@@ -913,7 +913,7 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
psta->aid = pnetwork->join_res;
psta->mac_id = 0;
/* sta mode */
- rtl8188e_SetHalODMVar(padapter, HAL_ODM_STA_INFO, psta, true);
+ rtl8188e_SetHalODMVar(padapter, psta, true);
/* security related */
if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) {
padapter->securitypriv.binstallGrpkey = false;
@@ -1198,7 +1198,7 @@ void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
psta->mac_id = (uint)pstassoc->cam_id;
DBG_88E("%s\n", __func__);
/* for ad-hoc mode */
- rtl8188e_SetHalODMVar(adapter, HAL_ODM_STA_INFO, psta, true);
+ rtl8188e_SetHalODMVar(adapter, psta, true);
rtw_sta_media_status_rpt(adapter, psta, 1);
if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
psta->dot118021XPrivacy = adapter->securitypriv.dot11PrivacyAlgrthm;
@@ -1999,17 +1999,11 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len)
(le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & BIT(1)) &&
(pmlmeinfo->HT_info.infos[0] & BIT(2))) {
int i;
- u8 rf_type;
-
- GetHwReg8188EU(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
/* update the MCS rates */
- for (i = 0; i < 16; i++) {
- if ((rf_type == RF_1T1R) || (rf_type == RF_1T2R))
- pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_1R[i];
- else
- pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_2R[i];
- }
+ for (i = 0; i < 16; i++)
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_1R[i];
+
/* switch to the 40M Hz mode according to the AP */
pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_40;
switch ((pmlmeinfo->HT_info.infos[0] & 0x3)) {
diff --git a/drivers/staging/r8188eu/core/rtw_mlme_ext.c b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
index b4820ad2cee7..a9141ab1690e 100644
--- a/drivers/staging/r8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
@@ -10,7 +10,6 @@
#include "../include/wlan_bssdef.h"
#include "../include/mlme_osdep.h"
#include "../include/recv_osdep.h"
-#include "../include/rtl8188e_sreset.h"
#include "../include/rtl8188e_xmit.h"
#include "../include/rtl8188e_dm.h"
@@ -77,7 +76,7 @@ unsigned char MCS_rate_1R[16] = {0xff, 0x00, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0, 0x0,
/********************************************************
ChannelPlan definitions
*********************************************************/
-static struct rt_channel_plan_2g RTW_ChannelPlan2G[RT_CHANNEL_DOMAIN_2G_MAX] = {
+static struct rt_channel_plan RTW_ChannelPlan2G[RT_CHANNEL_DOMAIN_2G_MAX] = {
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13}, /* 0x00, RT_CHANNEL_DOMAIN_2G_WORLD , Passive scan CH 12, 13 */
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13}, /* 0x01, RT_CHANNEL_DOMAIN_2G_ETSI1 */
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11}, /* 0x02, RT_CHANNEL_DOMAIN_2G_FCC1 */
@@ -252,7 +251,8 @@ static void init_mlme_ext_priv_value(struct adapter *padapter)
static int has_channel(struct rt_channel_info *channel_set,
u8 chanset_size,
- u8 chan) {
+ u8 chan)
+{
int i;
for (i = 0; i < chanset_size; i++) {
@@ -264,7 +264,8 @@ static int has_channel(struct rt_channel_info *channel_set,
static void init_channel_list(struct adapter *padapter, struct rt_channel_info *channel_set,
u8 chanset_size,
- struct p2p_channels *channel_list) {
+ struct p2p_channels *channel_list)
+{
struct p2p_oper_class_map op_class[] = {
{ IEEE80211G, 81, 1, 13, 1, BW20 },
{ IEEE80211G, 82, 14, 14, 1, BW20 },
@@ -655,9 +656,11 @@ unsigned int OnBeacon(struct adapter *padapter, struct recv_frame *precv_frame)
if (psta) {
ret = rtw_check_bcn_info(padapter, pframe, len);
if (!ret) {
- DBG_88E_LEVEL(_drv_info_, "ap has changed, disconnect now\n ");
- receive_disconnect(padapter, pmlmeinfo->network.MacAddress, 0);
- return _SUCCESS;
+ netdev_dbg(padapter->pnetdev,
+ "ap has changed, disconnect now\n");
+ receive_disconnect(padapter,
+ pmlmeinfo->network.MacAddress, 0);
+ return _SUCCESS;
}
/* update WMM, ERP in the beacon */
/* todo: the timer is used instead of the number of the beacon received */
@@ -931,7 +934,7 @@ unsigned int OnAuthClient(struct adapter *padapter, struct recv_frame *precv_fra
}
if (go2asoc) {
- DBG_88E_LEVEL(_drv_info_, "auth success, start assoc\n");
+ netdev_dbg(padapter->pnetdev, "auth success, start assoc\n");
start_clnt_assoc(padapter);
return _SUCCESS;
}
@@ -1503,8 +1506,9 @@ unsigned int OnDeAuth(struct adapter *padapter, struct recv_frame *precv_frame)
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
- DBG_88E_LEVEL(_drv_always_, "ap recv deauth reason code(%d) sta:%pM\n",
- reason, GetAddr2Ptr(pframe));
+ netdev_dbg(padapter->pnetdev,
+ "ap recv deauth reason code(%d) sta:%pM\n",
+ reason, GetAddr2Ptr(pframe));
psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
if (psta) {
@@ -1540,8 +1544,9 @@ unsigned int OnDeAuth(struct adapter *padapter, struct recv_frame *precv_frame)
}
}
- DBG_88E_LEVEL(_drv_always_, "sta recv deauth reason code(%d) sta:%pM, ignore = %d\n",
- reason, GetAddr3Ptr(pframe), ignore_received_deauth);
+ netdev_dbg(padapter->pnetdev,
+ "sta recv deauth reason code(%d) sta:%pM, ignore = %d\n",
+ reason, GetAddr3Ptr(pframe), ignore_received_deauth);
if (!ignore_received_deauth)
receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
@@ -1576,8 +1581,9 @@ unsigned int OnDisassoc(struct adapter *padapter, struct recv_frame *precv_frame
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
- DBG_88E_LEVEL(_drv_always_, "ap recv disassoc reason code(%d) sta:%pM\n",
- reason, GetAddr2Ptr(pframe));
+ netdev_dbg(padapter->pnetdev,
+ "ap recv disassoc reason code(%d) sta:%pM\n",
+ reason, GetAddr2Ptr(pframe));
psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
if (psta) {
@@ -1596,8 +1602,9 @@ unsigned int OnDisassoc(struct adapter *padapter, struct recv_frame *precv_frame
return _SUCCESS;
} else {
- DBG_88E_LEVEL(_drv_always_, "ap recv disassoc reason code(%d) sta:%pM\n",
- reason, GetAddr3Ptr(pframe));
+ netdev_dbg(padapter->pnetdev,
+ "ap recv disassoc reason code(%d) sta:%pM\n",
+ reason, GetAddr3Ptr(pframe));
receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
}
@@ -5062,7 +5069,7 @@ void issue_assocreq(struct adapter *padapter)
__le16 *fctrl;
__le16 le_tmp;
unsigned int i, j, ie_len, index = 0;
- unsigned char rf_type, bssrate[NumRates], sta_bssrate[NumRates];
+ unsigned char bssrate[NumRates], sta_bssrate[NumRates];
struct ndis_802_11_var_ie *pIE;
struct registry_priv *pregpriv = &padapter->registrypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -5187,25 +5194,10 @@ void issue_assocreq(struct adapter *padapter)
/* todo: disable SM power save mode */
pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(0x000c);
- GetHwReg8188EU(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
- switch (rf_type) {
- case RF_1T1R:
- if (pregpriv->rx_stbc)
- pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(0x0100);/* RX STBC One spatial stream */
- memcpy(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_rate_1R, 16);
- break;
- case RF_2T2R:
- case RF_1T2R:
- default:
- if ((pregpriv->rx_stbc == 0x3) ||/* enable for 2.4/5 GHz */
- ((pmlmeext->cur_wireless_mode & WIRELESS_11_24N) && (pregpriv->rx_stbc == 0x1)) || /* enable for 2.4GHz */
- (pregpriv->wifi_spec == 1)) {
- DBG_88E("declare supporting RX STBC\n");
- pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(0x0200);/* RX STBC two spatial stream */
- }
- memcpy(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_rate_2R, 16);
- break;
- }
+ if (pregpriv->rx_stbc)
+ pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(0x0100);/* RX STBC One spatial stream */
+ memcpy(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_rate_1R, 16);
+
pframe = rtw_set_ie(pframe, _HT_CAPABILITY_IE_, ie_len, (u8 *)(&pmlmeinfo->HT_caps), &pattrib->pktlen);
}
}
@@ -6493,7 +6485,7 @@ void start_clnt_auth(struct adapter *padapter)
/* For the Win8 P2P connection, it will be hard to have a successful connection if this Wi-Fi doesn't connect to it. */
issue_deauth(padapter, (&pmlmeinfo->network)->MacAddress, WLAN_REASON_DEAUTH_LEAVING);
- DBG_88E_LEVEL(_drv_info_, "start auth\n");
+ netdev_dbg(padapter->pnetdev, "start auth\n");
issue_auth(padapter, NULL, 0);
set_link_timer(pmlmeext, REAUTH_TO);
@@ -7132,8 +7124,7 @@ void mlmeext_sta_del_event_callback(struct adapter *padapter)
Following are the functions for the timer handlers
*****************************************************************************/
-void _linked_rx_signal_strehgth_display(struct adapter *padapter);
-void _linked_rx_signal_strehgth_display(struct adapter *padapter)
+static void _linked_rx_signal_strength_display(struct adapter *padapter)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
@@ -7167,6 +7158,23 @@ static u8 chk_ap_is_alive(struct adapter *padapter, struct sta_info *psta)
return ret;
}
+static void rtl8188e_sreset_linked_status_check(struct adapter *padapter)
+{
+ u32 rx_dma_status = rtw_read32(padapter, REG_RXDMA_STATUS);
+ u8 fw_status;
+
+ if (rx_dma_status != 0x00) {
+ DBG_88E("%s REG_RXDMA_STATUS:0x%08x\n", __func__, rx_dma_status);
+ rtw_write32(padapter, REG_RXDMA_STATUS, rx_dma_status);
+ }
+
+ fw_status = rtw_read8(padapter, REG_FMETHR);
+ if (fw_status == 1)
+ DBG_88E("%s REG_FW_STATUS (0x%02x), Read_Efuse_Fail !!\n", __func__, fw_status);
+ else if (fw_status == 2)
+ DBG_88E("%s REG_FW_STATUS (0x%02x), Condition_No_Match !!\n", __func__, fw_status);
+}
+
void linked_status_chk(struct adapter *padapter)
{
u32 i;
@@ -7177,7 +7185,7 @@ void linked_status_chk(struct adapter *padapter)
struct sta_priv *pstapriv = &padapter->stapriv;
if (padapter->bRxRSSIDisplay)
- _linked_rx_signal_strehgth_display(padapter);
+ _linked_rx_signal_strength_display(padapter);
rtl8188e_sreset_linked_status_check(padapter);
@@ -7238,8 +7246,8 @@ void linked_status_chk(struct adapter *padapter)
if (rx_chk == _FAIL) {
pmlmeext->retry++;
if (pmlmeext->retry > rx_chk_limit) {
- DBG_88E_LEVEL(_drv_always_, FUNC_ADPT_FMT" disconnect or roaming\n",
- FUNC_ADPT_ARG(padapter));
+ netdev_dbg(padapter->pnetdev,
+ "disconnect or roaming\n");
receive_disconnect(padapter, pmlmeinfo->network.MacAddress,
WLAN_REASON_EXPIRATION_CHK);
return;
@@ -7764,8 +7772,9 @@ u8 setkey_hdl(struct adapter *padapter, u8 *pbuf)
/* write cam */
ctrl = BIT(15) | ((pparm->algorithm) << 2) | pparm->keyid;
- DBG_88E_LEVEL(_drv_info_, "set group key to hw: alg:%d(WEP40-1 WEP104-5 TKIP-2 AES-4) "
- "keyid:%d\n", pparm->algorithm, pparm->keyid);
+ netdev_dbg(padapter->pnetdev,
+ "set group key to hw: alg:%d(WEP40-1 WEP104-5 TKIP-2 AES-4) keyid:%d\n",
+ pparm->algorithm, pparm->keyid);
write_cam(padapter, pparm->keyid, ctrl, null_sta, pparm->key);
return H2C_SUCCESS;
@@ -7794,8 +7803,9 @@ u8 set_stakey_hdl(struct adapter *padapter, u8 *pbuf)
cam_id = 4;
- DBG_88E_LEVEL(_drv_info_, "set pairwise key to hw: alg:%d(WEP40-1 WEP104-5 TKIP-2 AES-4) camid:%d\n",
- pparm->algorithm, cam_id);
+ netdev_dbg(padapter->pnetdev,
+ "set pairwise key to hw: alg:%d(WEP40-1 WEP104-5 TKIP-2 AES-4) camid:%d\n",
+ pparm->algorithm, cam_id);
if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) {
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
diff --git a/drivers/staging/r8188eu/core/rtw_p2p.c b/drivers/staging/r8188eu/core/rtw_p2p.c
index b265b5e46851..7b30b9b64b41 100644
--- a/drivers/staging/r8188eu/core/rtw_p2p.c
+++ b/drivers/staging/r8188eu/core/rtw_p2p.c
@@ -1806,8 +1806,6 @@ void reset_global_wifidirect_info(struct adapter *padapter)
pwdinfo = &padapter->wdinfo;
pwdinfo->persistent_supported = 0;
pwdinfo->session_available = true;
- pwdinfo->wfd_tdls_enable = 0;
- pwdinfo->wfd_tdls_weaksec = 0;
}
void rtw_init_wifidirect_timers(struct adapter *padapter)
@@ -1912,7 +1910,6 @@ void init_wifidirect_info(struct adapter *padapter, enum P2P_ROLE role)
memset(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, 0x00, 4);
memset(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, '0', 3);
memset(&pwdinfo->groupid_info, 0x00, sizeof(struct group_id_info));
- pwdinfo->wfd_tdls_enable = 0;
memset(pwdinfo->p2p_peer_interface_addr, 0x00, ETH_ALEN);
memset(pwdinfo->p2p_peer_device_addr, 0x00, ETH_ALEN);
@@ -1944,7 +1941,6 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
/* Enable P2P function */
init_wifidirect_info(padapter, role);
- rtl8188e_SetHalODMVar(padapter, HAL_ODM_P2P_STATE, NULL, true);
} else if (role == P2P_ROLE_DISABLE) {
if (_FAIL == rtw_pwr_wakeup(padapter)) {
ret = _FAIL;
@@ -1963,8 +1959,6 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
memset(&pwdinfo->rx_prov_disc_info, 0x00, sizeof(struct rx_provdisc_req_info));
}
- rtl8188e_SetHalODMVar(padapter, HAL_ODM_P2P_STATE, NULL, false);
-
/* Restore to initial setting. */
update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
}
diff --git a/drivers/staging/r8188eu/core/rtw_pwrctrl.c b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
index 5d595cf2a47e..46e44aee587f 100644
--- a/drivers/staging/r8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
@@ -15,9 +15,12 @@ void ips_enter(struct adapter *padapter)
if (pxmit_priv->free_xmitbuf_cnt != NR_XMITBUFF ||
pxmit_priv->free_xmit_extbuf_cnt != NR_XMIT_EXTBUFF) {
- DBG_88E_LEVEL(_drv_info_, "There are some pkts to transmit\n");
- DBG_88E_LEVEL(_drv_info_, "free_xmitbuf_cnt: %d, free_xmit_extbuf_cnt: %d\n",
- pxmit_priv->free_xmitbuf_cnt, pxmit_priv->free_xmit_extbuf_cnt);
+ netdev_dbg(padapter->pnetdev,
+ "There are some pkts to transmit\n");
+ netdev_dbg(padapter->pnetdev,
+ "free_xmitbuf_cnt: %d, free_xmit_extbuf_cnt: %d\n",
+ pxmit_priv->free_xmitbuf_cnt,
+ pxmit_priv->free_xmit_extbuf_cnt);
return;
}
@@ -32,7 +35,7 @@ void ips_enter(struct adapter *padapter)
DBG_88E("==>ips_enter cnts:%d\n", pwrpriv->ips_enter_cnts);
if (rf_off == pwrpriv->change_rfpwrstate) {
pwrpriv->bpower_saving = true;
- DBG_88E_LEVEL(_drv_info_, "nolinked power save enter\n");
+ netdev_dbg(padapter->pnetdev, "nolinked power save enter\n");
if (pwrpriv->ips_mode == IPS_LEVEL_2)
pwrpriv->bkeepfwalive = true;
@@ -65,7 +68,7 @@ int ips_leave(struct adapter *padapter)
if (result == _SUCCESS) {
pwrpriv->rf_pwrstate = rf_on;
}
- DBG_88E_LEVEL(_drv_info_, "nolinked power save leave\n");
+ netdev_dbg(padapter->pnetdev, "nolinked power save leave\n");
if ((_WEP40_ == psecuritypriv->dot11PrivacyAlgrthm) || (_WEP104_ == psecuritypriv->dot11PrivacyAlgrthm)) {
DBG_88E("==>%s, channel(%d), processing(%x)\n", __func__, padapter->mlmeextpriv.cur_channel, pwrpriv->bips_processing);
@@ -348,7 +351,6 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
pwrctrlpriv->pwr_state_check_interval = RTW_PWR_STATE_CHK_INTERVAL;
pwrctrlpriv->pwr_state_check_cnts = 0;
- pwrctrlpriv->bInternalAutoSuspend = false;
pwrctrlpriv->bInSuspend = false;
pwrctrlpriv->bkeepfwalive = false;
@@ -393,7 +395,7 @@ int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *cal
}
/* System suspend is not allowed to wakeup */
- if ((!pwrpriv->bInternalAutoSuspend) && pwrpriv->bInSuspend) {
+ if (pwrpriv->bInSuspend) {
while (pwrpriv->bInSuspend &&
(rtw_get_passing_time_ms(start) <= 3000 ||
(rtw_get_passing_time_ms(start) <= 500)))
@@ -404,12 +406,6 @@ int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *cal
DBG_88E("%s wait bInSuspend done\n", __func__);
}
- /* block??? */
- if ((pwrpriv->bInternalAutoSuspend) && (padapter->net_closed)) {
- ret = _FAIL;
- goto exit;
- }
-
/* I think this should be check in IPS, LPS, autosuspend functions... */
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
ret = _SUCCESS;
diff --git a/drivers/staging/r8188eu/core/rtw_rf.c b/drivers/staging/r8188eu/core/rtw_rf.c
index 2ec56012516e..e704092d31d0 100644
--- a/drivers/staging/r8188eu/core/rtw_rf.c
+++ b/drivers/staging/r8188eu/core/rtw_rf.c
@@ -35,7 +35,7 @@ static struct ch_freq ch_freq_map[] = {
{216, 5080},/* Japan, means J16 */
};
-static int ch_freq_map_num = (sizeof(ch_freq_map) / sizeof(struct ch_freq));
+static int ch_freq_map_num = ARRAY_SIZE(ch_freq_map);
u32 rtw_ch2freq(u32 channel)
{
diff --git a/drivers/staging/r8188eu/core/rtw_security.c b/drivers/staging/r8188eu/core/rtw_security.c
index db35f326bbb1..4e93c720c1b6 100644
--- a/drivers/staging/r8188eu/core/rtw_security.c
+++ b/drivers/staging/r8188eu/core/rtw_security.c
@@ -545,7 +545,8 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, struct recv_frame *precvframe)
if (is_multicast_ether_addr(prxattrib->ra)) {
if (!psecuritypriv->binstallGrpkey) {
res = _FAIL;
- DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__);
+ netdev_dbg(padapter->pnetdev,
+ "rx bc/mc packets, but didn't install group key!\n");
goto exit;
}
prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
@@ -1145,7 +1146,7 @@ u32 rtw_aes_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
return res;
}
-static int aes_decipher(u8 *key, uint hdrlen,
+static int aes_decipher(struct adapter *padapter, u8 *key, uint hdrlen,
u8 *pframe, uint plen)
{
static u8 message[MAX_MSG_SIZE];
@@ -1329,8 +1330,10 @@ static int aes_decipher(u8 *key, uint hdrlen,
/* compare the mic */
for (i = 0; i < 8; i++) {
if (pframe[hdrlen + 8 + plen - 8 + i] != message[hdrlen + 8 + plen - 8 + i]) {
- DBG_88E("aes_decipher:mic check error mic[%d]: pframe(%x)!=message(%x)\n",
- i, pframe[hdrlen + 8 + plen - 8 + i], message[hdrlen + 8 + plen - 8 + i]);
+ netdev_dbg(padapter->pnetdev,
+ "mic check error mic[%d]: pframe(%x)!=message(%x)\n",
+ i, pframe[hdrlen + 8 + plen - 8 + i],
+ message[hdrlen + 8 + plen - 8 + i]);
res = _FAIL;
}
}
@@ -1358,13 +1361,16 @@ u32 rtw_aes_decrypt(struct adapter *padapter, struct recv_frame *precvframe)
/* in concurrent we should use sw descrypt in group key, so we remove this message */
if (!psecuritypriv->binstallGrpkey) {
res = _FAIL;
- DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__);
+ netdev_dbg(padapter->pnetdev,
+ "rx bc/mc packets, but didn't install group key!\n");
goto exit;
}
prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
if (psecuritypriv->dot118021XGrpKeyid != prxattrib->key_index) {
- DBG_88E("not match packet_index=%d, install_index=%d\n",
- prxattrib->key_index, psecuritypriv->dot118021XGrpKeyid);
+ netdev_dbg(padapter->pnetdev,
+ "not match packet_index=%d, install_index=%d\n",
+ prxattrib->key_index,
+ psecuritypriv->dot118021XGrpKeyid);
res = _FAIL;
goto exit;
}
@@ -1372,7 +1378,7 @@ u32 rtw_aes_decrypt(struct adapter *padapter, struct recv_frame *precvframe)
prwskey = &stainfo->dot118021x_UncstKey.skey[0];
}
length = precvframe->len - prxattrib->hdrlen - prxattrib->iv_len;
- res = aes_decipher(prwskey, prxattrib->hdrlen, pframe, length);
+ res = aes_decipher(padapter, prwskey, prxattrib->hdrlen, pframe, length);
} else {
res = _FAIL;
}
diff --git a/drivers/staging/r8188eu/core/rtw_sta_mgt.c b/drivers/staging/r8188eu/core/rtw_sta_mgt.c
index a3d4d5d8a785..54561ff239a0 100644
--- a/drivers/staging/r8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/r8188eu/core/rtw_sta_mgt.c
@@ -310,7 +310,7 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
}
if (!(psta->state & WIFI_AP_STATE))
- rtl8188e_SetHalODMVar(padapter, HAL_ODM_STA_INFO, psta, false);
+ rtl8188e_SetHalODMVar(padapter, psta, false);
spin_lock_bh(&pstapriv->auth_list_lock);
if (!list_empty(&psta->auth_list)) {
diff --git a/drivers/staging/r8188eu/core/rtw_wlan_util.c b/drivers/staging/r8188eu/core/rtw_wlan_util.c
index 6d4e21a16783..d40669c21fc1 100644
--- a/drivers/staging/r8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/r8188eu/core/rtw_wlan_util.c
@@ -695,7 +695,6 @@ static void bwmode_update_check(struct adapter *padapter, struct ndis_802_11_var
void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
{
unsigned int i;
- u8 rf_type;
u8 max_AMPDU_len, min_MPDU_spacing;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
@@ -716,29 +715,19 @@ void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
pmlmeinfo->HT_caps.u.HT_cap[i] &= (pIE->data[i]);
} else {
/* modify from fw by Thomas 2010/11/17 */
- if ((pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x3) > (pIE->data[i] & 0x3))
- max_AMPDU_len = (pIE->data[i] & 0x3);
- else
- max_AMPDU_len = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x3);
+ max_AMPDU_len = min(pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x3,
+ pIE->data[i] & 0x3);
- if ((pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) > (pIE->data[i] & 0x1c))
- min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c);
- else
- min_MPDU_spacing = (pIE->data[i] & 0x1c);
+ min_MPDU_spacing = max(pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c,
+ pIE->data[i] & 0x1c);
pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para = max_AMPDU_len | min_MPDU_spacing;
}
}
- GetHwReg8188EU(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
-
/* update the MCS rates */
- for (i = 0; i < 16; i++) {
- if ((rf_type == RF_1T1R) || (rf_type == RF_1T2R))
- pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_1R[i];
- else
- pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_2R[i];
- }
+ for (i = 0; i < 16; i++)
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_1R[i];
}
void HT_info_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
diff --git a/drivers/staging/r8188eu/core/rtw_xmit.c b/drivers/staging/r8188eu/core/rtw_xmit.c
index 0c033a077bf9..8503059edc46 100644
--- a/drivers/staging/r8188eu/core/rtw_xmit.c
+++ b/drivers/staging/r8188eu/core/rtw_xmit.c
@@ -461,7 +461,7 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
}
}
} else if (0x888e == pattrib->ether_type) {
- DBG_88E_LEVEL(_drv_info_, "send eapol packet\n");
+ netdev_dbg(padapter->pnetdev, "send eapol packet\n");
}
if ((pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1))
diff --git a/drivers/staging/r8188eu/hal/Hal8188EPwrSeq.c b/drivers/staging/r8188eu/hal/Hal8188EPwrSeq.c
index 48ede610cd28..6505e1fcb070 100644
--- a/drivers/staging/r8188eu/hal/Hal8188EPwrSeq.c
+++ b/drivers/staging/r8188eu/hal/Hal8188EPwrSeq.c
@@ -4,66 +4,42 @@
#include "../include/Hal8188EPwrSeq.h"
#include "../include/rtl8188e_hal.h"
-/*
- drivers should parse below arrays and do the corresponding actions
-*/
-/* 3 Power on Array */
-struct wl_pwr_cfg rtl8188E_power_on_flow[RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS + RTL8188E_TRANS_END_STEPS] = {
- RTL8188E_TRANS_CARDEMU_TO_ACT
- RTL8188E_TRANS_END
-};
-
-/* 3Radio off Array */
-struct wl_pwr_cfg rtl8188E_radio_off_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_END_STEPS] = {
- RTL8188E_TRANS_ACT_TO_CARDEMU
- RTL8188E_TRANS_END
-};
-
-/* 3Card Disable Array */
-struct wl_pwr_cfg rtl8188E_card_disable_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS + RTL8188E_TRANS_END_STEPS] = {
- RTL8188E_TRANS_ACT_TO_CARDEMU
- RTL8188E_TRANS_CARDEMU_TO_CARDDIS
- RTL8188E_TRANS_END
-};
-
-/* 3 Card Enable Array */
-struct wl_pwr_cfg rtl8188E_card_enable_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS + RTL8188E_TRANS_END_STEPS] = {
- RTL8188E_TRANS_CARDDIS_TO_CARDEMU
- RTL8188E_TRANS_CARDEMU_TO_ACT
- RTL8188E_TRANS_END
-};
-
-/* 3Suspend Array */
-struct wl_pwr_cfg rtl8188E_suspend_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS + RTL8188E_TRANS_END_STEPS] = {
- RTL8188E_TRANS_ACT_TO_CARDEMU
- RTL8188E_TRANS_CARDEMU_TO_SUS
- RTL8188E_TRANS_END
-};
-
-/* 3 Resume Array */
-struct wl_pwr_cfg rtl8188E_resume_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS + RTL8188E_TRANS_END_STEPS] = {
- RTL8188E_TRANS_SUS_TO_CARDEMU
- RTL8188E_TRANS_CARDEMU_TO_ACT
- RTL8188E_TRANS_END
-};
-
-/* 3HWPDN Array */
-struct wl_pwr_cfg rtl8188E_hwpdn_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS + RTL8188E_TRANS_END_STEPS] = {
- RTL8188E_TRANS_ACT_TO_CARDEMU
- RTL8188E_TRANS_CARDEMU_TO_PDN
- RTL8188E_TRANS_END
-};
-
-/* 3 Enter LPS */
-struct wl_pwr_cfg rtl8188E_enter_lps_flow[RTL8188E_TRANS_ACT_TO_LPS_STEPS + RTL8188E_TRANS_END_STEPS] = {
- /* FW behavior */
- RTL8188E_TRANS_ACT_TO_LPS
- RTL8188E_TRANS_END
-};
-
-/* 3 Leave LPS */
-struct wl_pwr_cfg rtl8188E_leave_lps_flow[RTL8188E_TRANS_LPS_TO_ACT_STEPS + RTL8188E_TRANS_END_STEPS] = {
- /* FW behavior */
- RTL8188E_TRANS_LPS_TO_ACT
- RTL8188E_TRANS_END
+struct wl_pwr_cfg rtl8188E_power_on_flow[] = {
+ { 0x0006, PWR_CMD_POLLING, BIT(1), BIT(1) },
+ { 0x0002, PWR_CMD_WRITE, BIT(0) | BIT(1), 0 }, /* reset BB */
+ { 0x0026, PWR_CMD_WRITE, BIT(7), BIT(7) }, /* schmitt trigger */
+ { 0x0005, PWR_CMD_WRITE, BIT(7), 0 }, /* disable HWPDN (control by DRV)*/
+ { 0x0005, PWR_CMD_WRITE, BIT(4) | BIT(3), 0 }, /* disable WL suspend*/
+ { 0x0005, PWR_CMD_WRITE, BIT(0), BIT(0) },
+ { 0x0005, PWR_CMD_POLLING, BIT(0), 0 },
+ { 0x0023, PWR_CMD_WRITE, BIT(4), 0 },
+ { 0xFFFF, PWR_CMD_END, 0, 0 },
+};
+
+struct wl_pwr_cfg rtl8188E_card_disable_flow[] = {
+ { 0x001F, PWR_CMD_WRITE, 0xFF, 0 }, /* turn off RF */
+ { 0x0023, PWR_CMD_WRITE, BIT(4), BIT(4) }, /* LDO Sleep mode */
+ { 0x0005, PWR_CMD_WRITE, BIT(1), BIT(1) }, /* turn off MAC by HW state machine */
+ { 0x0005, PWR_CMD_POLLING, BIT(1), 0 },
+ { 0x0026, PWR_CMD_WRITE, BIT(7), BIT(7) }, /* schmitt trigger */
+ { 0x0005, PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3) }, /* enable WL suspend */
+ { 0x0007, PWR_CMD_WRITE, 0xFF, 0 }, /* enable bandgap mbias in suspend */
+ { 0x0041, PWR_CMD_WRITE, BIT(4), 0 }, /* Clear SIC_EN register */
+ { 0xfe10, PWR_CMD_WRITE, BIT(4), BIT(4) }, /* Set USB suspend enable local register */
+ { 0xFFFF, PWR_CMD_END, 0, 0 },
+};
+
+/* This is used by driver for LPSRadioOff Procedure, not for FW LPS Step */
+struct wl_pwr_cfg rtl8188E_enter_lps_flow[] = {
+ { 0x0522, PWR_CMD_WRITE, 0xFF, 0x7F },/* Tx Pause */
+ { 0x05F8, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */
+ { 0x05F9, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */
+ { 0x05FA, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */
+ { 0x05FB, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */
+ { 0x0002, PWR_CMD_WRITE, BIT(0), 0 }, /* CCK and OFDM are disabled, clocks are gated */
+ { 0x0002, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US },
+ { 0x0100, PWR_CMD_WRITE, 0xFF, 0x3F }, /* Reset MAC TRX */
+ { 0x0101, PWR_CMD_WRITE, BIT(1), 0 }, /* check if removed later */
+ { 0x0553, PWR_CMD_WRITE, BIT(5), BIT(5) }, /* Respond TxOK to scheduler */
+ { 0xFFFF, PWR_CMD_END, 0, 0 },
};
diff --git a/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c b/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c
index 2d351f831289..57e8f5573846 100644
--- a/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c
+++ b/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c
@@ -1,20 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) Realtek Semiconductor Corp.
-Module Name:
- RateAdaptive.c
+/* Copyright (c) Realtek Semiconductor Corp. */
-Abstract:
- Implement Rate Adaptive functions for common operations.
-
-Major Change History:
- When Who What
- ---------- --------------- -------------------------------
- 2011-08-12 Page Create.
-
---*/
-#include "../include/odm_precomp.h"
-
-/* Rate adaptive parameters */
+#include "../include/drv_types.h"
static u8 RETRY_PENALTY[PERENTRY][RETRYSIZE + 1] = {
{5, 4, 3, 2, 0, 3}, /* 92 , idx = 0 */
@@ -316,19 +303,19 @@ static int odm_ARFBRefresh_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_inf
pRaInfo->RAUseRate = (pRaInfo->RateMask) & 0x0000000d;
break;
case 12:
- MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR0);
+ MaskFromReg = rtw_read32(dm_odm->Adapter, REG_ARFR0);
pRaInfo->RAUseRate = (pRaInfo->RateMask) & MaskFromReg;
break;
case 13:
- MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR1);
+ MaskFromReg = rtw_read32(dm_odm->Adapter, REG_ARFR1);
pRaInfo->RAUseRate = (pRaInfo->RateMask) & MaskFromReg;
break;
case 14:
- MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR2);
+ MaskFromReg = rtw_read32(dm_odm->Adapter, REG_ARFR2);
pRaInfo->RAUseRate = (pRaInfo->RateMask) & MaskFromReg;
break;
case 15:
- MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR3);
+ MaskFromReg = rtw_read32(dm_odm->Adapter, REG_ARFR3);
pRaInfo->RAUseRate = (pRaInfo->RateMask) & MaskFromReg;
break;
default:
@@ -590,7 +577,7 @@ void ODM_RA_SetRSSI_8188E(struct odm_dm_struct *dm_odm, u8 macid, u8 Rssi)
void ODM_RA_Set_TxRPT_Time(struct odm_dm_struct *dm_odm, u16 minRptTime)
{
- ODM_Write2Byte(dm_odm, REG_TX_RPT_TIME, minRptTime);
+ rtw_write16(dm_odm->Adapter, REG_TX_RPT_TIME, minRptTime);
}
void ODM_RA_TxRPT2Handle_8188E(struct odm_dm_struct *dm_odm, u8 *TxRPT_Buf, u16 TxRPT_Len, u32 macid_entry0, u32 macid_entry1)
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
index f6e4243e0c7b..e7f834b02567 100644
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
+++ b/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-#include "../include/odm_precomp.h"
#include "../include/rtw_iol.h"
#define read_next_pair(array, v1, v2, i) \
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
index b4c55863d3fb..20ce1571fc26 100644
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
+++ b/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-#include "../include/odm_precomp.h"
#include "../include/rtw_iol.h"
static bool Checkcondition(const u32 condition, const u32 hex)
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
index 5e0a96200078..9dc888a66d09 100644
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
+++ b/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-#include "../include/odm_precomp.h"
#include "../include/rtw_iol.h"
static bool CheckCondition(const u32 Condition, const u32 Hex)
diff --git a/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c b/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c
index 60d4ba275196..21ecc90a558c 100644
--- a/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c
+++ b/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-#include "../include/odm_precomp.h"
+#include "../include/drv_types.h"
/*---------------------------Define Local Constant---------------------------*/
/* 2010/04/25 MH Define the max tx power tracking tx agc power. */
@@ -98,7 +98,7 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
struct adapter *Adapter
)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
u8 ThermalValue = 0, delta, delta_LCK, delta_IQK, offset;
u8 ThermalValue_AVG_count = 0;
u32 ThermalValue_AVG = 0;
@@ -137,7 +137,7 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
/* <Kordan> RFCalibrateInfo.RegA24 will be initialized when ODM HW configuring, but MP configures with para files. */
dm_odm->RFCalibrateInfo.RegA24 = 0x090e1317;
- ThermalValue = (u8)ODM_GetRFReg(dm_odm, RF_PATH_A, RF_T_METER_88E, 0xfc00); /* 0x42: RF Reg[15:10] 88E */
+ ThermalValue = (u8)rtl8188e_PHY_QueryRFReg(Adapter, RF_PATH_A, RF_T_METER_88E, 0xfc00); /* 0x42: RF Reg[15:10] 88E */
if (is2t)
rf = 2;
@@ -146,7 +146,7 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
if (ThermalValue) {
/* Query OFDM path A default setting */
- ele_D = ODM_GetBBReg(dm_odm, rOFDM0_XATxIQImbalance, bMaskDWord) & bMaskOFDM_D;
+ ele_D = rtl8188e_PHY_QueryBBReg(Adapter, rOFDM0_XATxIQImbalance, bMaskDWord) & bMaskOFDM_D;
for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { /* find the index */
if (ele_D == (OFDMSwingTable[i] & bMaskOFDM_D)) {
OFDM_index_old[0] = (u8)i;
@@ -157,7 +157,7 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
/* Query OFDM path B default setting */
if (is2t) {
- ele_D = ODM_GetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord) & bMaskOFDM_D;
+ ele_D = rtl8188e_PHY_QueryBBReg(Adapter, rOFDM0_XBTxIQImbalance, bMaskDWord) & bMaskOFDM_D;
for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { /* find the index */
if (ele_D == (OFDMSwingTable[i] & bMaskOFDM_D)) {
OFDM_index_old[1] = (u8)i;
@@ -171,13 +171,13 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
for (i = 0; i < CCK_TABLE_SIZE; i++) {
if (dm_odm->RFCalibrateInfo.bCCKinCH14) {
- if (ODM_CompareMemory(dm_odm, (void *)&TempCCk, (void *)&CCKSwingTable_Ch14[i][2], 4) == 0) {
+ if (memcmp((void *)&TempCCk, (void *)&CCKSwingTable_Ch14[i][2], 4)) {
CCK_index_old = (u8)i;
dm_odm->BbSwingIdxCckBase = (u8)i;
break;
}
} else {
- if (ODM_CompareMemory(dm_odm, (void *)&TempCCk, (void *)&CCKSwingTable_Ch1_Ch13[i][2], 4) == 0) {
+ if (memcmp((void *)&TempCCk, (void *)&CCKSwingTable_Ch1_Ch13[i][2], 4)) {
CCK_index_old = (u8)i;
dm_odm->BbSwingIdxCckBase = (u8)i;
break;
@@ -329,17 +329,17 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
/* wtite new elements A, C, D to regC88 and regC9C, element B is always 0 */
value32 = (ele_D << 22) | ((ele_C & 0x3F) << 16) | ele_A;
- ODM_SetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord, value32);
+ rtl8188e_PHY_SetBBReg(Adapter, rOFDM0_XBTxIQImbalance, bMaskDWord, value32);
value32 = (ele_C & 0x000003C0) >> 6;
- ODM_SetBBReg(dm_odm, rOFDM0_XDTxAFE, bMaskH4Bits, value32);
+ rtl8188e_PHY_SetBBReg(Adapter, rOFDM0_XDTxAFE, bMaskH4Bits, value32);
value32 = ((X * ele_D) >> 7) & 0x01;
- ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(28), value32);
+ rtl8188e_PHY_SetBBReg(Adapter, rOFDM0_ECCAThreshold, BIT(28), value32);
} else {
- ODM_SetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord, OFDMSwingTable[(u8)OFDM_index[1]]);
- ODM_SetBBReg(dm_odm, rOFDM0_XDTxAFE, bMaskH4Bits, 0x00);
- ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(28), 0x00);
+ rtl8188e_PHY_SetBBReg(Adapter, rOFDM0_XBTxIQImbalance, bMaskDWord, OFDMSwingTable[(u8)OFDM_index[1]]);
+ rtl8188e_PHY_SetBBReg(Adapter, rOFDM0_XDTxAFE, bMaskH4Bits, 0x00);
+ rtl8188e_PHY_SetBBReg(Adapter, rOFDM0_ECCAThreshold, BIT(28), 0x00);
}
}
}
@@ -361,35 +361,33 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
#define IQK_DELAY_TIME 1 /* ms */
static u8 /* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
-phy_PathA_IQK_8188E(struct adapter *adapt, bool configPathB)
+phy_PathA_IQK_8188E(struct adapter *adapt)
{
u32 regeac, regE94, regE9C;
u8 result = 0x00;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
/* 1 Tx IQK */
/* path-A IQK setting */
- ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c);
- ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c);
- ODM_SetBBReg(dm_odm, rTx_IQK_PI_A, bMaskDWord, 0x8214032a);
- ODM_SetBBReg(dm_odm, rRx_IQK_PI_A, bMaskDWord, 0x28160000);
+ rtl8188e_PHY_SetBBReg(adapt, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c);
+ rtl8188e_PHY_SetBBReg(adapt, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c);
+ rtl8188e_PHY_SetBBReg(adapt, rTx_IQK_PI_A, bMaskDWord, 0x8214032a);
+ rtl8188e_PHY_SetBBReg(adapt, rRx_IQK_PI_A, bMaskDWord, 0x28160000);
/* LO calibration setting */
- ODM_SetBBReg(dm_odm, rIQK_AGC_Rsp, bMaskDWord, 0x00462911);
+ rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Rsp, bMaskDWord, 0x00462911);
/* One shot, path A LOK & IQK */
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+ rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
/* delay x ms */
/* PlatformStallExecution(IQK_DELAY_TIME_88E*1000); */
- ODM_delay_ms(IQK_DELAY_TIME_88E);
+ mdelay(IQK_DELAY_TIME_88E);
/* Check failed */
- regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
- regE94 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord);
- regE9C = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord);
+ regeac = rtl8188e_PHY_QueryBBReg(adapt, rRx_Power_After_IQK_A_2, bMaskDWord);
+ regE94 = rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_A, bMaskDWord);
+ regE9C = rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_After_IQK_A, bMaskDWord);
if (!(regeac & BIT(28)) &&
(((regE94 & 0x03FF0000) >> 16) != 0x142) &&
@@ -399,51 +397,49 @@ phy_PathA_IQK_8188E(struct adapter *adapt, bool configPathB)
}
static u8 /* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
-phy_PathA_RxIQK(struct adapter *adapt, bool configPathB)
+phy_PathA_RxIQK(struct adapter *adapt)
{
u32 regeac, regE94, regE9C, regEA4, u4tmp;
u8 result = 0x00;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
/* 1 Get TXIMR setting */
/* modify RXIQK mode table */
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf117B);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf117B);
/* PA,PAD off */
- ODM_SetRFReg(dm_odm, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x980);
- ODM_SetRFReg(dm_odm, RF_PATH_A, 0x56, bRFRegOffsetMask, 0x51000);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x980);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, 0x56, bRFRegOffsetMask, 0x51000);
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x80800000);
/* IQK setting */
- ODM_SetBBReg(dm_odm, rTx_IQK, bMaskDWord, 0x01007c00);
- ODM_SetBBReg(dm_odm, rRx_IQK, bMaskDWord, 0x81004800);
+ rtl8188e_PHY_SetBBReg(adapt, rTx_IQK, bMaskDWord, 0x01007c00);
+ rtl8188e_PHY_SetBBReg(adapt, rRx_IQK, bMaskDWord, 0x81004800);
/* path-A IQK setting */
- ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c);
- ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c);
- ODM_SetBBReg(dm_odm, rTx_IQK_PI_A, bMaskDWord, 0x82160c1f);
- ODM_SetBBReg(dm_odm, rRx_IQK_PI_A, bMaskDWord, 0x28160000);
+ rtl8188e_PHY_SetBBReg(adapt, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c);
+ rtl8188e_PHY_SetBBReg(adapt, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c);
+ rtl8188e_PHY_SetBBReg(adapt, rTx_IQK_PI_A, bMaskDWord, 0x82160c1f);
+ rtl8188e_PHY_SetBBReg(adapt, rRx_IQK_PI_A, bMaskDWord, 0x28160000);
/* LO calibration setting */
- ODM_SetBBReg(dm_odm, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
+ rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
/* One shot, path A LOK & IQK */
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+ rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
/* delay x ms */
- ODM_delay_ms(IQK_DELAY_TIME_88E);
+ mdelay(IQK_DELAY_TIME_88E);
/* Check failed */
- regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
- regE94 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord);
- regE9C = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord);
+ regeac = rtl8188e_PHY_QueryBBReg(adapt, rRx_Power_After_IQK_A_2, bMaskDWord);
+ regE94 = rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_A, bMaskDWord);
+ regE9C = rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_After_IQK_A, bMaskDWord);
if (!(regeac & BIT(28)) &&
(((regE94 & 0x03FF0000) >> 16) != 0x142) &&
@@ -453,46 +449,46 @@ phy_PathA_RxIQK(struct adapter *adapt, bool configPathB)
return result;
u4tmp = 0x80007C00 | (regE94 & 0x3FF0000) | ((regE9C & 0x3FF0000) >> 16);
- ODM_SetBBReg(dm_odm, rTx_IQK, bMaskDWord, u4tmp);
+ rtl8188e_PHY_SetBBReg(adapt, rTx_IQK, bMaskDWord, u4tmp);
/* 1 RX IQK */
/* modify RXIQK mode table */
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf7ffa);
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf7ffa);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x80800000);
/* IQK setting */
- ODM_SetBBReg(dm_odm, rRx_IQK, bMaskDWord, 0x01004800);
+ rtl8188e_PHY_SetBBReg(adapt, rRx_IQK, bMaskDWord, 0x01004800);
/* path-A IQK setting */
- ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x38008c1c);
- ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x18008c1c);
- ODM_SetBBReg(dm_odm, rTx_IQK_PI_A, bMaskDWord, 0x82160c05);
- ODM_SetBBReg(dm_odm, rRx_IQK_PI_A, bMaskDWord, 0x28160c1f);
+ rtl8188e_PHY_SetBBReg(adapt, rTx_IQK_Tone_A, bMaskDWord, 0x38008c1c);
+ rtl8188e_PHY_SetBBReg(adapt, rRx_IQK_Tone_A, bMaskDWord, 0x18008c1c);
+ rtl8188e_PHY_SetBBReg(adapt, rTx_IQK_PI_A, bMaskDWord, 0x82160c05);
+ rtl8188e_PHY_SetBBReg(adapt, rRx_IQK_PI_A, bMaskDWord, 0x28160c1f);
/* LO calibration setting */
- ODM_SetBBReg(dm_odm, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
+ rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
/* One shot, path A LOK & IQK */
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+ rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
/* delay x ms */
/* PlatformStallExecution(IQK_DELAY_TIME_88E*1000); */
- ODM_delay_ms(IQK_DELAY_TIME_88E);
+ mdelay(IQK_DELAY_TIME_88E);
/* Check failed */
- regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
- regE94 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord);
- regE9C = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord);
- regEA4 = ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_A_2, bMaskDWord);
+ regeac = rtl8188e_PHY_QueryBBReg(adapt, rRx_Power_After_IQK_A_2, bMaskDWord);
+ regE94 = rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_A, bMaskDWord);
+ regE9C = rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_After_IQK_A, bMaskDWord);
+ regEA4 = rtl8188e_PHY_QueryBBReg(adapt, rRx_Power_Before_IQK_A_2, bMaskDWord);
/* reload RF 0xdf */
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
- ODM_SetRFReg(dm_odm, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x180);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x180);
if (!(regeac & BIT(27)) && /* if Tx is OK, check whether Rx is OK */
(((regEA4 & 0x03FF0000) >> 16) != 0x132) &&
@@ -502,95 +498,54 @@ phy_PathA_RxIQK(struct adapter *adapt, bool configPathB)
return result;
}
-static u8 /* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
-phy_PathB_IQK_8188E(struct adapter *adapt)
-{
- u32 regeac, regeb4, regebc, regec4, regecc;
- u8 result = 0x00;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
-
- /* One shot, path B LOK & IQK */
- ODM_SetBBReg(dm_odm, rIQK_AGC_Cont, bMaskDWord, 0x00000002);
- ODM_SetBBReg(dm_odm, rIQK_AGC_Cont, bMaskDWord, 0x00000000);
-
- /* delay x ms */
- ODM_delay_ms(IQK_DELAY_TIME_88E);
-
- /* Check failed */
- regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
- regeb4 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_B, bMaskDWord);
- regebc = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_B, bMaskDWord);
- regec4 = ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_B_2, bMaskDWord);
- regecc = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_B_2, bMaskDWord);
-
- if (!(regeac & BIT(31)) &&
- (((regeb4 & 0x03FF0000) >> 16) != 0x142) &&
- (((regebc & 0x03FF0000) >> 16) != 0x42))
- result |= 0x01;
- else
- return result;
-
- if (!(regeac & BIT(30)) &&
- (((regec4 & 0x03FF0000) >> 16) != 0x132) &&
- (((regecc & 0x03FF0000) >> 16) != 0x36))
- result |= 0x02;
-
- return result;
-}
-
static void patha_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], u8 final_candidate, bool txonly)
{
u32 Oldval_0, X, TX0_A, reg;
s32 Y, TX0_C;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
if (final_candidate == 0xFF) {
return;
} else if (iqkok) {
- Oldval_0 = (ODM_GetBBReg(dm_odm, rOFDM0_XATxIQImbalance, bMaskDWord) >> 22) & 0x3FF;
+ Oldval_0 = (rtl8188e_PHY_QueryBBReg(adapt, rOFDM0_XATxIQImbalance, bMaskDWord) >> 22) & 0x3FF;
X = result[final_candidate][0];
if ((X & 0x00000200) != 0)
X = X | 0xFFFFFC00;
TX0_A = (X * Oldval_0) >> 8;
- ODM_SetBBReg(dm_odm, rOFDM0_XATxIQImbalance, 0x3FF, TX0_A);
+ rtl8188e_PHY_SetBBReg(adapt, rOFDM0_XATxIQImbalance, 0x3FF, TX0_A);
- ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(31), ((X * Oldval_0 >> 7) & 0x1));
+ rtl8188e_PHY_SetBBReg(adapt, rOFDM0_ECCAThreshold, BIT(31), ((X * Oldval_0 >> 7) & 0x1));
Y = result[final_candidate][1];
if ((Y & 0x00000200) != 0)
Y = Y | 0xFFFFFC00;
TX0_C = (Y * Oldval_0) >> 8;
- ODM_SetBBReg(dm_odm, rOFDM0_XCTxAFE, 0xF0000000, ((TX0_C & 0x3C0) >> 6));
- ODM_SetBBReg(dm_odm, rOFDM0_XATxIQImbalance, 0x003F0000, (TX0_C & 0x3F));
+ rtl8188e_PHY_SetBBReg(adapt, rOFDM0_XCTxAFE, 0xF0000000, ((TX0_C & 0x3C0) >> 6));
+ rtl8188e_PHY_SetBBReg(adapt, rOFDM0_XATxIQImbalance, 0x003F0000, (TX0_C & 0x3F));
- ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(29), ((Y * Oldval_0 >> 7) & 0x1));
+ rtl8188e_PHY_SetBBReg(adapt, rOFDM0_ECCAThreshold, BIT(29), ((Y * Oldval_0 >> 7) & 0x1));
if (txonly)
return;
reg = result[final_candidate][2];
- ODM_SetBBReg(dm_odm, rOFDM0_XARxIQImbalance, 0x3FF, reg);
+ rtl8188e_PHY_SetBBReg(adapt, rOFDM0_XARxIQImbalance, 0x3FF, reg);
reg = result[final_candidate][3] & 0x3F;
- ODM_SetBBReg(dm_odm, rOFDM0_XARxIQImbalance, 0xFC00, reg);
+ rtl8188e_PHY_SetBBReg(adapt, rOFDM0_XARxIQImbalance, 0xFC00, reg);
reg = (result[final_candidate][3] >> 6) & 0xF;
- ODM_SetBBReg(dm_odm, rOFDM0_RxIQExtAnta, 0xF0000000, reg);
+ rtl8188e_PHY_SetBBReg(adapt, rOFDM0_RxIQExtAnta, 0xF0000000, reg);
}
}
void _PHY_SaveADDARegisters(struct adapter *adapt, u32 *ADDAReg, u32 *ADDABackup, u32 RegisterNum)
{
u32 i;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
for (i = 0; i < RegisterNum; i++) {
- ADDABackup[i] = ODM_GetBBReg(dm_odm, ADDAReg[i], bMaskDWord);
+ ADDABackup[i] = rtl8188e_PHY_QueryBBReg(adapt, ADDAReg[i], bMaskDWord);
}
}
@@ -601,22 +556,19 @@ static void _PHY_SaveMACRegisters(
)
{
u32 i;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
- for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) {
- MACBackup[i] = ODM_Read1Byte(dm_odm, MACReg[i]);
- }
- MACBackup[i] = ODM_Read4Byte(dm_odm, MACReg[i]);
+
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+ MACBackup[i] = rtw_read8(adapt, MACReg[i]);
+
+ MACBackup[i] = rtw_read32(adapt, MACReg[i]);
}
static void reload_adda_reg(struct adapter *adapt, u32 *ADDAReg, u32 *ADDABackup, u32 RegiesterNum)
{
u32 i;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
for (i = 0; i < RegiesterNum; i++)
- ODM_SetBBReg(dm_odm, ADDAReg[i], bMaskDWord, ADDABackup[i]);
+ rtl8188e_PHY_SetBBReg(adapt, ADDAReg[i], bMaskDWord, ADDABackup[i]);
}
static void
@@ -627,38 +579,24 @@ _PHY_ReloadMACRegisters(
)
{
u32 i;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
- for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) {
- ODM_Write1Byte(dm_odm, MACReg[i], (u8)MACBackup[i]);
- }
- ODM_Write4Byte(dm_odm, MACReg[i], MACBackup[i]);
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+ rtw_write8(adapt, MACReg[i], (u8)MACBackup[i]);
+
+ rtw_write32(adapt, MACReg[i], MACBackup[i]);
}
-void
+static void
_PHY_PathADDAOn(
struct adapter *adapt,
- u32 *ADDAReg,
- bool isPathAOn,
- bool is2t
- )
+ u32 *ADDAReg)
{
- u32 pathOn;
u32 i;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
- pathOn = isPathAOn ? 0x04db25a4 : 0x0b1b25a4;
- if (!is2t) {
- pathOn = 0x0bdb25a0;
- ODM_SetBBReg(dm_odm, ADDAReg[0], bMaskDWord, 0x0b1b25a0);
- } else {
- ODM_SetBBReg(dm_odm, ADDAReg[0], bMaskDWord, pathOn);
- }
+ rtl8188e_PHY_SetBBReg(adapt, ADDAReg[0], bMaskDWord, 0x0b1b25a0);
for (i = 1; i < IQK_ADDA_REG_NUM; i++)
- ODM_SetBBReg(dm_odm, ADDAReg[i], bMaskDWord, pathOn);
+ rtl8188e_PHY_SetBBReg(adapt, ADDAReg[i], bMaskDWord, 0x0bdb25a0);
}
void
@@ -669,28 +607,13 @@ _PHY_MACSettingCalibration(
)
{
u32 i = 0;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
-
- ODM_Write1Byte(dm_odm, MACReg[i], 0x3F);
- for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++) {
- ODM_Write1Byte(dm_odm, MACReg[i], (u8)(MACBackup[i] & (~BIT(3))));
- }
- ODM_Write1Byte(dm_odm, MACReg[i], (u8)(MACBackup[i] & (~BIT(5))));
-}
+ rtw_write8(adapt, MACReg[i], 0x3F);
-void
-_PHY_PathAStandBy(
- struct adapter *adapt
- )
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
+ rtw_write8(adapt, MACReg[i], (u8)(MACBackup[i] & (~BIT(3))));
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x0);
- ODM_SetBBReg(dm_odm, 0x840, bMaskDWord, 0x00010000);
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+ rtw_write8(adapt, MACReg[i], (u8)(MACBackup[i] & (~BIT(5))));
}
static void _PHY_PIModeSwitch(
@@ -699,12 +622,10 @@ static void _PHY_PIModeSwitch(
)
{
u32 mode;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
mode = PIMode ? 0x01000100 : 0x01000000;
- ODM_SetBBReg(dm_odm, rFPGA0_XA_HSSIParameter1, bMaskDWord, mode);
- ODM_SetBBReg(dm_odm, rFPGA0_XB_HSSIParameter1, bMaskDWord, mode);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XA_HSSIParameter1, bMaskDWord, mode);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XB_HSSIParameter1, bMaskDWord, mode);
}
static bool phy_SimularityCompare_8188E(
@@ -786,12 +707,12 @@ static bool phy_SimularityCompare_8188E(
}
}
-static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t, bool is2t)
+static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *pHalData = &adapt->haldata;
struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
u32 i;
- u8 PathAOK, PathBOK;
+ u8 PathAOK;
u32 ADDA_REG[IQK_ADDA_REG_NUM] = {
rFPGA0_XCD_SwitchControl, rBlue_Tooth,
rRx_Wait_CCA, rTx_CCK_RFON,
@@ -823,9 +744,9 @@ static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t,
_PHY_SaveADDARegisters(adapt, IQK_BB_REG_92C, dm_odm->RFCalibrateInfo.IQK_BB_backup, IQK_BB_REG_NUM);
}
- _PHY_PathADDAOn(adapt, ADDA_REG, true, is2t);
+ _PHY_PathADDAOn(adapt, ADDA_REG);
if (t == 0)
- dm_odm->RFCalibrateInfo.bRfPiEnable = (u8)ODM_GetBBReg(dm_odm, rFPGA0_XA_HSSIParameter1, BIT(8));
+ dm_odm->RFCalibrateInfo.bRfPiEnable = (u8)rtl8188e_PHY_QueryBBReg(adapt, rFPGA0_XA_HSSIParameter1, BIT(8));
if (!dm_odm->RFCalibrateInfo.bRfPiEnable) {
/* Switch BB to PI mode to do IQ Calibration. */
@@ -833,77 +754,49 @@ static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t,
}
/* BB setting */
- ODM_SetBBReg(dm_odm, rFPGA0_RFMOD, BIT(24), 0x00);
- ODM_SetBBReg(dm_odm, rOFDM0_TRxPathEnable, bMaskDWord, 0x03a05600);
- ODM_SetBBReg(dm_odm, rOFDM0_TRMuxPar, bMaskDWord, 0x000800e4);
- ODM_SetBBReg(dm_odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, 0x22204000);
-
- ODM_SetBBReg(dm_odm, rFPGA0_XAB_RFInterfaceSW, BIT(10), 0x01);
- ODM_SetBBReg(dm_odm, rFPGA0_XAB_RFInterfaceSW, BIT(26), 0x01);
- ODM_SetBBReg(dm_odm, rFPGA0_XA_RFInterfaceOE, BIT(10), 0x00);
- ODM_SetBBReg(dm_odm, rFPGA0_XB_RFInterfaceOE, BIT(10), 0x00);
-
- if (is2t) {
- ODM_SetBBReg(dm_odm, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00010000);
- ODM_SetBBReg(dm_odm, rFPGA0_XB_LSSIParameter, bMaskDWord, 0x00010000);
- }
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_RFMOD, BIT(24), 0x00);
+ rtl8188e_PHY_SetBBReg(adapt, rOFDM0_TRxPathEnable, bMaskDWord, 0x03a05600);
+ rtl8188e_PHY_SetBBReg(adapt, rOFDM0_TRMuxPar, bMaskDWord, 0x000800e4);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, 0x22204000);
+
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XAB_RFInterfaceSW, BIT(10), 0x01);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XAB_RFInterfaceSW, BIT(26), 0x01);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XA_RFInterfaceOE, BIT(10), 0x00);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XB_RFInterfaceOE, BIT(10), 0x00);
/* MAC settings */
_PHY_MACSettingCalibration(adapt, IQK_MAC_REG, dm_odm->RFCalibrateInfo.IQK_MAC_backup);
/* Page B init */
/* AP or IQK */
- ODM_SetBBReg(dm_odm, rConfig_AntA, bMaskDWord, 0x0f600000);
+ rtl8188e_PHY_SetBBReg(adapt, rConfig_AntA, bMaskDWord, 0x0f600000);
- if (is2t)
- ODM_SetBBReg(dm_odm, rConfig_AntB, bMaskDWord, 0x0f600000);
/* IQ calibration setting */
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
- ODM_SetBBReg(dm_odm, rTx_IQK, bMaskDWord, 0x01007c00);
- ODM_SetBBReg(dm_odm, rRx_IQK, bMaskDWord, 0x81004800);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x80800000);
+ rtl8188e_PHY_SetBBReg(adapt, rTx_IQK, bMaskDWord, 0x01007c00);
+ rtl8188e_PHY_SetBBReg(adapt, rRx_IQK, bMaskDWord, 0x81004800);
for (i = 0; i < retryCount; i++) {
- PathAOK = phy_PathA_IQK_8188E(adapt, is2t);
+ PathAOK = phy_PathA_IQK_8188E(adapt);
if (PathAOK == 0x01) {
- result[t][0] = (ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord) & 0x3FF0000) >> 16;
- result[t][1] = (ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord) & 0x3FF0000) >> 16;
+ result[t][0] = (rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_A, bMaskDWord) & 0x3FF0000) >> 16;
+ result[t][1] = (rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_After_IQK_A, bMaskDWord) & 0x3FF0000) >> 16;
break;
}
}
for (i = 0; i < retryCount; i++) {
- PathAOK = phy_PathA_RxIQK(adapt, is2t);
+ PathAOK = phy_PathA_RxIQK(adapt);
if (PathAOK == 0x03) {
- result[t][2] = (ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_A_2, bMaskDWord) & 0x3FF0000) >> 16;
- result[t][3] = (ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord) & 0x3FF0000) >> 16;
+ result[t][2] = (rtl8188e_PHY_QueryBBReg(adapt, rRx_Power_Before_IQK_A_2, bMaskDWord) & 0x3FF0000) >> 16;
+ result[t][3] = (rtl8188e_PHY_QueryBBReg(adapt, rRx_Power_After_IQK_A_2, bMaskDWord) & 0x3FF0000) >> 16;
break;
}
}
- if (is2t) {
- _PHY_PathAStandBy(adapt);
-
- /* Turn Path B ADDA on */
- _PHY_PathADDAOn(adapt, ADDA_REG, false, is2t);
-
- for (i = 0; i < retryCount; i++) {
- PathBOK = phy_PathB_IQK_8188E(adapt);
- if (PathBOK == 0x03) {
- result[t][4] = (ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_B, bMaskDWord) & 0x3FF0000) >> 16;
- result[t][5] = (ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_B, bMaskDWord) & 0x3FF0000) >> 16;
- result[t][6] = (ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_B_2, bMaskDWord) & 0x3FF0000) >> 16;
- result[t][7] = (ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_B_2, bMaskDWord) & 0x3FF0000) >> 16;
- break;
- } else if (i == (retryCount - 1) && PathBOK == 0x01) { /* Tx IQK OK */
- result[t][4] = (ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_B, bMaskDWord) & 0x3FF0000) >> 16;
- result[t][5] = (ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_B, bMaskDWord) & 0x3FF0000) >> 16;
- }
- }
- }
-
/* Back to BB mode, load original value */
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0);
if (t != 0) {
if (!dm_odm->RFCalibrateInfo.bRfPiEnable) {
@@ -920,13 +813,11 @@ static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t,
reload_adda_reg(adapt, IQK_BB_REG_92C, dm_odm->RFCalibrateInfo.IQK_BB_backup, IQK_BB_REG_NUM);
/* Restore RX initial gain */
- ODM_SetBBReg(dm_odm, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00032ed3);
- if (is2t)
- ODM_SetBBReg(dm_odm, rFPGA0_XB_LSSIParameter, bMaskDWord, 0x00032ed3);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00032ed3);
/* load 0xe30 IQC default value */
- ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x01008c00);
- ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x01008c00);
+ rtl8188e_PHY_SetBBReg(adapt, rTx_IQK_Tone_A, bMaskDWord, 0x01008c00);
+ rtl8188e_PHY_SetBBReg(adapt, rRx_IQK_Tone_A, bMaskDWord, 0x01008c00);
}
}
@@ -934,62 +825,60 @@ static void phy_LCCalibrate_8188E(struct adapter *adapt, bool is2t)
{
u8 tmpreg;
u32 RF_Amode = 0, RF_Bmode = 0, LC_Cal;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
/* Check continuous TX and Packet TX */
- tmpreg = ODM_Read1Byte(dm_odm, 0xd03);
+ tmpreg = rtw_read8(adapt, 0xd03);
if ((tmpreg & 0x70) != 0) /* Deal with contisuous TX case */
- ODM_Write1Byte(dm_odm, 0xd03, tmpreg & 0x8F); /* disable all continuous TX */
+ rtw_write8(adapt, 0xd03, tmpreg & 0x8F); /* disable all continuous TX */
else /* Deal with Packet TX case */
- ODM_Write1Byte(dm_odm, REG_TXPAUSE, 0xFF); /* block all queues */
+ rtw_write8(adapt, REG_TXPAUSE, 0xFF); /* block all queues */
if ((tmpreg & 0x70) != 0) {
/* 1. Read original RF mode */
/* Path-A */
- RF_Amode = PHY_QueryRFReg(adapt, RF_PATH_A, RF_AC, bMask12Bits);
+ RF_Amode = rtl8188e_PHY_QueryRFReg(adapt, RF_PATH_A, RF_AC, bMask12Bits);
/* Path-B */
if (is2t)
- RF_Bmode = PHY_QueryRFReg(adapt, RF_PATH_B, RF_AC, bMask12Bits);
+ RF_Bmode = rtl8188e_PHY_QueryRFReg(adapt, RF_PATH_B, RF_AC, bMask12Bits);
/* 2. Set RF mode = standby mode */
/* Path-A */
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_AC, bMask12Bits, (RF_Amode & 0x8FFFF) | 0x10000);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, RF_AC, bMask12Bits, (RF_Amode & 0x8FFFF) | 0x10000);
/* Path-B */
if (is2t)
- ODM_SetRFReg(dm_odm, RF_PATH_B, RF_AC, bMask12Bits, (RF_Bmode & 0x8FFFF) | 0x10000);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_B, RF_AC, bMask12Bits, (RF_Bmode & 0x8FFFF) | 0x10000);
}
/* 3. Read RF reg18 */
- LC_Cal = PHY_QueryRFReg(adapt, RF_PATH_A, RF_CHNLBW, bMask12Bits);
+ LC_Cal = rtl8188e_PHY_QueryRFReg(adapt, RF_PATH_A, RF_CHNLBW, bMask12Bits);
/* 4. Set LC calibration begin bit15 */
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_CHNLBW, bMask12Bits, LC_Cal | 0x08000);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, RF_CHNLBW, bMask12Bits, LC_Cal | 0x08000);
- ODM_sleep_ms(100);
+ msleep(100);
/* Restore original situation */
if ((tmpreg & 0x70) != 0) {
/* Deal with continuous TX case */
/* Path-A */
- ODM_Write1Byte(dm_odm, 0xd03, tmpreg);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_AC, bMask12Bits, RF_Amode);
+ rtw_write8(adapt, 0xd03, tmpreg);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, RF_AC, bMask12Bits, RF_Amode);
/* Path-B */
if (is2t)
- ODM_SetRFReg(dm_odm, RF_PATH_B, RF_AC, bMask12Bits, RF_Bmode);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_B, RF_AC, bMask12Bits, RF_Bmode);
} else {
/* Deal with Packet TX case */
- ODM_Write1Byte(dm_odm, REG_TXPAUSE, 0x00);
+ rtw_write8(adapt, REG_TXPAUSE, 0x00);
}
}
void PHY_IQCalibrate_8188E(struct adapter *adapt, bool recovery)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *pHalData = &adapt->haldata;
struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
s32 result[4][8]; /* last is final result */
u8 i, final_candidate;
@@ -1032,7 +921,7 @@ void PHY_IQCalibrate_8188E(struct adapter *adapt, bool recovery)
is13simular = false;
for (i = 0; i < 3; i++) {
- phy_IQCalibrate_8188E(adapt, result, i, false);
+ phy_IQCalibrate_8188E(adapt, result, i);
if (i == 1) {
is12simular = phy_SimularityCompare_8188E(adapt, result, 0, 1);
@@ -1101,7 +990,7 @@ void PHY_LCCalibrate_8188E(struct adapter *adapt)
{
bool singletone = false, carrier_sup = false;
u32 timeout = 2000, timecount = 0;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *pHalData = &adapt->haldata;
struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
if (!(dm_odm->SupportAbility & ODM_RF_CALIBRATION))
@@ -1111,7 +1000,7 @@ void PHY_LCCalibrate_8188E(struct adapter *adapt)
return;
while (*dm_odm->pbScanInProcess && timecount < timeout) {
- ODM_delay_ms(50);
+ mdelay(50);
timecount += 50;
}
diff --git a/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c b/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c
index 0fd11aca7ac7..47ad4ea273cc 100644
--- a/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c
+++ b/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c
@@ -25,8 +25,7 @@ Major Change History:
* Assumption:
* We should follow specific format which was released from HW SD.
*/
-u8 HalPwrSeqCmdParsing(struct adapter *padapter, u8 cut_vers, u8 fab_vers,
- u8 ifacetype, struct wl_pwr_cfg pwrseqcmd[])
+u8 HalPwrSeqCmdParsing(struct adapter *padapter, struct wl_pwr_cfg pwrseqcmd[])
{
struct wl_pwr_cfg pwrcfgcmd = {0};
u8 poll_bit = false;
@@ -39,54 +38,49 @@ u8 HalPwrSeqCmdParsing(struct adapter *padapter, u8 cut_vers, u8 fab_vers,
do {
pwrcfgcmd = pwrseqcmd[aryidx];
- /* 2 Only Handle the command whose FAB, CUT, and Interface are matched */
- if ((GET_PWR_CFG_FAB_MASK(pwrcfgcmd) & fab_vers) &&
- (GET_PWR_CFG_CUT_MASK(pwrcfgcmd) & cut_vers) &&
- (GET_PWR_CFG_INTF_MASK(pwrcfgcmd) & ifacetype)) {
- switch (GET_PWR_CFG_CMD(pwrcfgcmd)) {
- case PWR_CMD_WRITE:
- offset = GET_PWR_CFG_OFFSET(pwrcfgcmd);
+ switch (GET_PWR_CFG_CMD(pwrcfgcmd)) {
+ case PWR_CMD_WRITE:
+ offset = GET_PWR_CFG_OFFSET(pwrcfgcmd);
- /* Read the value from system register */
- value = rtw_read8(padapter, offset);
-
- value &= ~(GET_PWR_CFG_MASK(pwrcfgcmd));
- value |= (GET_PWR_CFG_VALUE(pwrcfgcmd) & GET_PWR_CFG_MASK(pwrcfgcmd));
+ /* Read the value from system register */
+ value = rtw_read8(padapter, offset);
- /* Write the value back to system register */
- rtw_write8(padapter, offset, value);
- break;
- case PWR_CMD_POLLING:
- poll_bit = false;
- offset = GET_PWR_CFG_OFFSET(pwrcfgcmd);
- do {
- value = rtw_read8(padapter, offset);
+ value &= ~(GET_PWR_CFG_MASK(pwrcfgcmd));
+ value |= (GET_PWR_CFG_VALUE(pwrcfgcmd) & GET_PWR_CFG_MASK(pwrcfgcmd));
- value &= GET_PWR_CFG_MASK(pwrcfgcmd);
- if (value == (GET_PWR_CFG_VALUE(pwrcfgcmd) & GET_PWR_CFG_MASK(pwrcfgcmd)))
- poll_bit = true;
- else
- udelay(10);
+ /* Write the value back to system register */
+ rtw_write8(padapter, offset, value);
+ break;
+ case PWR_CMD_POLLING:
+ poll_bit = false;
+ offset = GET_PWR_CFG_OFFSET(pwrcfgcmd);
+ do {
+ value = rtw_read8(padapter, offset);
- if (poll_count++ > max_poll_count) {
- DBG_88E("Fail to polling Offset[%#x]\n", offset);
- return false;
- }
- } while (!poll_bit);
- break;
- case PWR_CMD_DELAY:
- if (GET_PWR_CFG_VALUE(pwrcfgcmd) == PWRSEQ_DELAY_US)
- udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd));
+ value &= GET_PWR_CFG_MASK(pwrcfgcmd);
+ if (value == (GET_PWR_CFG_VALUE(pwrcfgcmd) & GET_PWR_CFG_MASK(pwrcfgcmd)))
+ poll_bit = true;
else
- udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd) * 1000);
- break;
- case PWR_CMD_END:
- /* When this command is parsed, end the process */
- return true;
- break;
- default:
- break;
- }
+ udelay(10);
+
+ if (poll_count++ > max_poll_count) {
+ DBG_88E("Fail to polling Offset[%#x]\n", offset);
+ return false;
+ }
+ } while (!poll_bit);
+ break;
+ case PWR_CMD_DELAY:
+ if (GET_PWR_CFG_VALUE(pwrcfgcmd) == PWRSEQ_DELAY_US)
+ udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd));
+ else
+ udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd) * 1000);
+ break;
+ case PWR_CMD_END:
+ /* When this command is parsed, end the process */
+ return true;
+ break;
+ default:
+ break;
}
aryidx++;/* Add Array Index */
diff --git a/drivers/staging/r8188eu/hal/odm.c b/drivers/staging/r8188eu/hal/odm.c
index 21f115194df8..d8fa587ff286 100644
--- a/drivers/staging/r8188eu/hal/odm.c
+++ b/drivers/staging/r8188eu/hal/odm.c
@@ -1,9 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-/* include files */
-
-#include "../include/odm_precomp.h"
+#include "../include/drv_types.h"
/* avoid to warn in FreeBSD ==> To DO modify */
static u32 EDCAParam[HT_IOT_PEER_MAX][3] = {
@@ -142,222 +140,26 @@ u8 CCKSwingTable_Ch14[CCK_TABLE_SIZE][8] = {
#define RxDefaultAnt1 0x65a9
#define RxDefaultAnt2 0x569a
-/* 3 Export Interface */
-
-/* 2011/09/21 MH Add to describe different team necessary resource allocate?? */
-void ODM_DMInit(struct odm_dm_struct *pDM_Odm)
-{
- /* 2012.05.03 Luke: For all IC series */
- odm_CommonInfoSelfInit(pDM_Odm);
- odm_DIGInit(pDM_Odm);
- odm_RateAdaptiveMaskInit(pDM_Odm);
-
- odm_PrimaryCCA_Init(pDM_Odm); /* Gary */
- odm_DynamicBBPowerSavingInit(pDM_Odm);
- odm_TXPowerTrackingInit(pDM_Odm);
- ODM_EdcaTurboInit(pDM_Odm);
- ODM_RAInfo_Init_all(pDM_Odm);
- if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
- (pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
- (pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
- odm_InitHybridAntDiv(pDM_Odm);
-}
-
-/* 2011/09/20 MH This is the entry pointer for all team to execute HW out source DM. */
-/* You can not add any dummy function here, be care, you can only use DM structure */
-/* to perform any new ODM_DM. */
-void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm)
-{
- /* 2012.05.03 Luke: For all IC series */
- odm_CommonInfoSelfUpdate(pDM_Odm);
- odm_FalseAlarmCounterStatistics(pDM_Odm);
- odm_RSSIMonitorCheck(pDM_Odm);
-
- odm_DIG(pDM_Odm);
- odm_CCKPacketDetectionThresh(pDM_Odm);
-
- if (*pDM_Odm->pbPowerSaving)
- return;
-
- odm_RefreshRateAdaptiveMask(pDM_Odm);
-
- if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
- (pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
- (pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
- odm_HwAntDiv(pDM_Odm);
-
- ODM_TXPowerTrackingCheck(pDM_Odm);
- odm_EdcaTurboCheck(pDM_Odm);
-}
-
-/* Init /.. Fixed HW value. Only init time. */
-void ODM_CmnInfoInit(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def CmnInfo, u32 Value)
-{
- /* This section is used for init value */
- switch (CmnInfo) {
- /* Fixed ODM value. */
- case ODM_CMNINFO_ABILITY:
- pDM_Odm->SupportAbility = (u32)Value;
- break;
- case ODM_CMNINFO_MP_TEST_CHIP:
- pDM_Odm->bIsMPChip = (u8)Value;
- break;
- case ODM_CMNINFO_RF_ANTENNA_TYPE:
- pDM_Odm->AntDivType = (u8)Value;
- break;
- /* To remove the compiler warning, must add an empty default statement to handle the other values. */
- default:
- /* do nothing */
- break;
- }
-
- /* Tx power tracking BB swing table. */
- /* The base index = 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB */
- pDM_Odm->BbSwingIdxOfdm = 12; /* Set defalut value as index 12. */
- pDM_Odm->BbSwingIdxOfdmCurrent = 12;
- pDM_Odm->BbSwingFlagOfdm = false;
-}
-
-void ODM_CmnInfoHook(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def CmnInfo, void *pValue)
-{
- /* */
- /* Hook call by reference pointer. */
- /* */
- switch (CmnInfo) {
- /* Dynamic call by reference pointer. */
- case ODM_CMNINFO_TX_UNI:
- pDM_Odm->pNumTxBytesUnicast = (u64 *)pValue;
- break;
- case ODM_CMNINFO_RX_UNI:
- pDM_Odm->pNumRxBytesUnicast = (u64 *)pValue;
- break;
- case ODM_CMNINFO_WM_MODE:
- pDM_Odm->pWirelessMode = (u8 *)pValue;
- break;
- case ODM_CMNINFO_SEC_CHNL_OFFSET:
- pDM_Odm->pSecChOffset = (u8 *)pValue;
- break;
- case ODM_CMNINFO_SEC_MODE:
- pDM_Odm->pSecurity = (u8 *)pValue;
- break;
- case ODM_CMNINFO_BW:
- pDM_Odm->pBandWidth = (u8 *)pValue;
- break;
- case ODM_CMNINFO_CHNL:
- pDM_Odm->pChannel = (u8 *)pValue;
- break;
- case ODM_CMNINFO_SCAN:
- pDM_Odm->pbScanInProcess = (bool *)pValue;
- break;
- case ODM_CMNINFO_POWER_SAVING:
- pDM_Odm->pbPowerSaving = (bool *)pValue;
- break;
- case ODM_CMNINFO_NET_CLOSED:
- pDM_Odm->pbNet_closed = (bool *)pValue;
- break;
- /* To remove the compiler warning, must add an empty default statement to handle the other values. */
- default:
- /* do nothing */
- break;
- }
-}
-
-/* Update Band/CHannel/.. The values are dynamic but non-per-packet. */
-void ODM_CmnInfoUpdate(struct odm_dm_struct *pDM_Odm, u32 CmnInfo, u64 Value)
-{
- /* */
- /* This init variable may be changed in run time. */
- /* */
- switch (CmnInfo) {
- case ODM_CMNINFO_ABILITY:
- pDM_Odm->SupportAbility = (u32)Value;
- break;
- case ODM_CMNINFO_WIFI_DIRECT:
- pDM_Odm->bWIFI_Direct = (bool)Value;
- break;
- case ODM_CMNINFO_WIFI_DISPLAY:
- pDM_Odm->bWIFI_Display = (bool)Value;
- break;
- case ODM_CMNINFO_LINK:
- pDM_Odm->bLinked = (bool)Value;
- break;
- case ODM_CMNINFO_RSSI_MIN:
- pDM_Odm->RSSI_Min = (u8)Value;
- break;
- }
-}
-
-void odm_CommonInfoSelfInit(struct odm_dm_struct *pDM_Odm)
-{
- pDM_Odm->bCckHighPower = (bool)ODM_GetBBReg(pDM_Odm, 0x824, BIT(9));
- pDM_Odm->RFPathRxEnable = (u8)ODM_GetBBReg(pDM_Odm, 0xc04, 0x0F);
-}
-
-void odm_CommonInfoSelfUpdate(struct odm_dm_struct *pDM_Odm)
-{
- u8 EntryCnt = 0;
- u8 i;
- struct sta_info *pEntry;
-
- if (*pDM_Odm->pBandWidth == ODM_BW40M) {
- if (*pDM_Odm->pSecChOffset == 1)
- pDM_Odm->ControlChannel = *pDM_Odm->pChannel - 2;
- else if (*pDM_Odm->pSecChOffset == 2)
- pDM_Odm->ControlChannel = *pDM_Odm->pChannel + 2;
- } else {
- pDM_Odm->ControlChannel = *pDM_Odm->pChannel;
- }
-
- for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
- pEntry = pDM_Odm->pODM_StaInfo[i];
- if (IS_STA_VALID(pEntry))
- EntryCnt++;
- }
- if (EntryCnt == 1)
- pDM_Odm->bOneEntryOnly = true;
- else
- pDM_Odm->bOneEntryOnly = false;
-}
-
-void ODM_Write_DIG(struct odm_dm_struct *pDM_Odm, u8 CurrentIGI)
+static void odm_DIGInit(struct odm_dm_struct *pDM_Odm)
{
struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
+ struct adapter *adapter = pDM_Odm->Adapter;
- if (pDM_DigTable->CurIGValue != CurrentIGI) {
- ODM_SetBBReg(pDM_Odm, ODM_REG_IGI_A_11N, ODM_BIT_IGI_11N, CurrentIGI);
- pDM_DigTable->CurIGValue = CurrentIGI;
- }
-}
-
-void odm_DIGInit(struct odm_dm_struct *pDM_Odm)
-{
- struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
-
- pDM_DigTable->CurIGValue = (u8)ODM_GetBBReg(pDM_Odm, ODM_REG_IGI_A_11N, ODM_BIT_IGI_11N);
- pDM_DigTable->RssiLowThresh = DM_DIG_THRESH_LOW;
- pDM_DigTable->RssiHighThresh = DM_DIG_THRESH_HIGH;
- pDM_DigTable->FALowThresh = DM_false_ALARM_THRESH_LOW;
- pDM_DigTable->FAHighThresh = DM_false_ALARM_THRESH_HIGH;
+ pDM_DigTable->CurIGValue = (u8)rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_IGI_A_11N, ODM_BIT_IGI_11N);
pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
pDM_DigTable->rx_gain_range_min = DM_DIG_MIN_NIC;
- pDM_DigTable->BackoffVal = DM_DIG_BACKOFF_DEFAULT;
- pDM_DigTable->BackoffVal_range_max = DM_DIG_BACKOFF_MAX;
- pDM_DigTable->BackoffVal_range_min = DM_DIG_BACKOFF_MIN;
- pDM_DigTable->PreCCK_CCAThres = 0xFF;
pDM_DigTable->CurCCK_CCAThres = 0x83;
pDM_DigTable->ForbiddenIGI = DM_DIG_MIN_NIC;
pDM_DigTable->LargeFAHit = 0;
pDM_DigTable->Recover_cnt = 0;
pDM_DigTable->DIG_Dynamic_MIN_0 = DM_DIG_MIN_NIC;
- pDM_DigTable->DIG_Dynamic_MIN_1 = DM_DIG_MIN_NIC;
pDM_DigTable->bMediaConnect_0 = false;
- pDM_DigTable->bMediaConnect_1 = false;
/* To Initialize pDM_Odm->bDMInitialGainEnable == false to avoid DIG error */
pDM_Odm->bDMInitialGainEnable = true;
}
-void odm_DIG(struct odm_dm_struct *pDM_Odm)
+static void odm_DIG(struct odm_dm_struct *pDM_Odm)
{
struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
struct false_alarm_stats *pFalseAlmCnt = &pDM_Odm->FalseAlmCnt;
@@ -367,7 +169,7 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
u8 dm_dig_max, dm_dig_min;
u8 CurrentIGI = pDM_DigTable->CurIGValue;
- if ((!(pDM_Odm->SupportAbility & ODM_BB_DIG)) || (!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT)))
+ if (!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT))
return;
if (*pDM_Odm->pbScanInProcess)
@@ -457,11 +259,11 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
CurrentIGI = pDM_Odm->RSSI_Min;
} else {
if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2)
- CurrentIGI = CurrentIGI + 4;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
+ CurrentIGI = CurrentIGI + 4;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1)
- CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
+ CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0)
- CurrentIGI = CurrentIGI - 2;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
+ CurrentIGI = CurrentIGI - 2;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
}
} else {
if (FirstDisConnect) {
@@ -489,52 +291,119 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
pDM_DigTable->DIG_Dynamic_MIN_0 = DIG_Dynamic_MIN;
}
-/* 3============================================================ */
-/* 3 FASLE ALARM CHECK */
-/* 3============================================================ */
+static void odm_CommonInfoSelfInit(struct odm_dm_struct *pDM_Odm)
+{
+ struct adapter *adapter = pDM_Odm->Adapter;
+
+ pDM_Odm->bCckHighPower = (bool)rtl8188e_PHY_QueryBBReg(adapter, 0x824, BIT(9));
+ pDM_Odm->RFPathRxEnable = (u8)rtl8188e_PHY_QueryBBReg(adapter, 0xc04, 0x0F);
+}
-void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm)
+static void odm_CommonInfoSelfUpdate(struct odm_dm_struct *pDM_Odm)
+{
+ u8 EntryCnt = 0;
+ u8 i;
+ struct sta_info *pEntry;
+
+ if (*pDM_Odm->pBandWidth == ODM_BW40M) {
+ if (*pDM_Odm->pSecChOffset == 1)
+ pDM_Odm->ControlChannel = *pDM_Odm->pChannel - 2;
+ else if (*pDM_Odm->pSecChOffset == 2)
+ pDM_Odm->ControlChannel = *pDM_Odm->pChannel + 2;
+ } else {
+ pDM_Odm->ControlChannel = *pDM_Odm->pChannel;
+ }
+
+ for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
+ pEntry = pDM_Odm->pODM_StaInfo[i];
+ if (IS_STA_VALID(pEntry))
+ EntryCnt++;
+ }
+ if (EntryCnt == 1)
+ pDM_Odm->bOneEntryOnly = true;
+ else
+ pDM_Odm->bOneEntryOnly = false;
+}
+
+static void odm_RateAdaptiveMaskInit(struct odm_dm_struct *pDM_Odm)
+{
+ struct odm_rate_adapt *pOdmRA = &pDM_Odm->RateAdaptive;
+
+ pOdmRA->RATRState = DM_RATR_STA_INIT;
+ pOdmRA->HighRSSIThresh = 50;
+ pOdmRA->LowRSSIThresh = 20;
+}
+
+static void odm_RefreshRateAdaptiveMask(struct odm_dm_struct *pDM_Odm)
+{
+ u8 i;
+ struct adapter *pAdapter = pDM_Odm->Adapter;
+
+ if (pAdapter->bDriverStopped)
+ return;
+
+ for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
+ struct sta_info *pstat = pDM_Odm->pODM_StaInfo[i];
+
+ if (IS_STA_VALID(pstat)) {
+ if (ODM_RAStateCheck(pDM_Odm, pstat->rssi_stat.UndecoratedSmoothedPWDB, false, &pstat->rssi_level))
+ rtw_hal_update_ra_mask(pAdapter, i, pstat->rssi_level);
+ }
+ }
+}
+
+static void odm_DynamicBBPowerSavingInit(struct odm_dm_struct *pDM_Odm)
+{
+ struct rtl_ps *pDM_PSTable = &pDM_Odm->DM_PSTable;
+
+ pDM_PSTable->pre_rf_state = RF_MAX;
+ pDM_PSTable->cur_rf_state = RF_MAX;
+ pDM_PSTable->initialize = 0;
+}
+
+static void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm)
{
u32 ret_value;
struct false_alarm_stats *FalseAlmCnt = &pDM_Odm->FalseAlmCnt;
+ struct adapter *adapter = pDM_Odm->Adapter;
if (!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT))
return;
/* hold ofdm counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_HOLDC_11N, BIT(31), 1); /* hold page C counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT(31), 1); /* hold page D counter */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_OFDM_FA_HOLDC_11N, BIT(31), 1); /* hold page C counter */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_OFDM_FA_RSTD_11N, BIT(31), 1); /* hold page D counter */
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE1_11N, bMaskDWord);
+ ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_OFDM_FA_TYPE1_11N, bMaskDWord);
FalseAlmCnt->Cnt_Fast_Fsync = (ret_value & 0xffff);
FalseAlmCnt->Cnt_SB_Search_fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE2_11N, bMaskDWord);
+ ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_OFDM_FA_TYPE2_11N, bMaskDWord);
FalseAlmCnt->Cnt_OFDM_CCA = (ret_value & 0xffff);
FalseAlmCnt->Cnt_Parity_Fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE3_11N, bMaskDWord);
+ ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_OFDM_FA_TYPE3_11N, bMaskDWord);
FalseAlmCnt->Cnt_Rate_Illegal = (ret_value & 0xffff);
FalseAlmCnt->Cnt_Crc8_fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE4_11N, bMaskDWord);
+ ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_OFDM_FA_TYPE4_11N, bMaskDWord);
FalseAlmCnt->Cnt_Mcs_fail = (ret_value & 0xffff);
FalseAlmCnt->Cnt_Ofdm_fail = FalseAlmCnt->Cnt_Parity_Fail + FalseAlmCnt->Cnt_Rate_Illegal +
FalseAlmCnt->Cnt_Crc8_fail + FalseAlmCnt->Cnt_Mcs_fail +
FalseAlmCnt->Cnt_Fast_Fsync + FalseAlmCnt->Cnt_SB_Search_fail;
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_SC_CNT_11N, bMaskDWord);
+ ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_SC_CNT_11N, bMaskDWord);
FalseAlmCnt->Cnt_BW_LSC = (ret_value & 0xffff);
FalseAlmCnt->Cnt_BW_USC = ((ret_value & 0xffff0000) >> 16);
/* hold cck counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT(12), 1);
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT(14), 1);
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_CCK_FA_RST_11N, BIT(12), 1);
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_CCK_FA_RST_11N, BIT(14), 1);
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_LSB_11N, bMaskByte0);
+ ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_CCK_FA_LSB_11N, bMaskByte0);
FalseAlmCnt->Cnt_Cck_fail = ret_value;
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_MSB_11N, bMaskByte3);
+ ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_CCK_FA_MSB_11N, bMaskByte3);
FalseAlmCnt->Cnt_Cck_fail += (ret_value & 0xff) << 8;
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_CCA_CNT_11N, bMaskDWord);
+ ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_CCK_CCA_CNT_11N, bMaskDWord);
FalseAlmCnt->Cnt_CCK_CCA = ((ret_value & 0xFF) << 8) | ((ret_value & 0xFF00) >> 8);
FalseAlmCnt->Cnt_all = (FalseAlmCnt->Cnt_Fast_Fsync +
@@ -548,11 +417,7 @@ void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm)
FalseAlmCnt->Cnt_CCA_all = FalseAlmCnt->Cnt_OFDM_CCA + FalseAlmCnt->Cnt_CCK_CCA;
}
-/* 3============================================================ */
-/* 3 CCK Packet Detect Threshold */
-/* 3============================================================ */
-
-void odm_CCKPacketDetectionThresh(struct odm_dm_struct *pDM_Odm)
+static void odm_CCKPacketDetectionThresh(struct odm_dm_struct *pDM_Odm)
{
u8 CurCCK_CCAThres;
struct false_alarm_stats *FalseAlmCnt = &pDM_Odm->FalseAlmCnt;
@@ -579,235 +444,9 @@ void odm_CCKPacketDetectionThresh(struct odm_dm_struct *pDM_Odm)
ODM_Write_CCK_CCA_Thres(pDM_Odm, CurCCK_CCAThres);
}
-void ODM_Write_CCK_CCA_Thres(struct odm_dm_struct *pDM_Odm, u8 CurCCK_CCAThres)
-{
- struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
-
- if (pDM_DigTable->CurCCK_CCAThres != CurCCK_CCAThres) /* modify by Guo.Mingzhi 2012-01-03 */
- ODM_Write1Byte(pDM_Odm, ODM_REG_CCK_CCA_11N, CurCCK_CCAThres);
- pDM_DigTable->PreCCK_CCAThres = pDM_DigTable->CurCCK_CCAThres;
- pDM_DigTable->CurCCK_CCAThres = CurCCK_CCAThres;
-}
-
-/* 3============================================================ */
-/* 3 BB Power Save */
-/* 3============================================================ */
-void odm_DynamicBBPowerSavingInit(struct odm_dm_struct *pDM_Odm)
-{
- struct rtl_ps *pDM_PSTable = &pDM_Odm->DM_PSTable;
-
- pDM_PSTable->pre_cca_state = CCA_MAX;
- pDM_PSTable->cur_cca_state = CCA_MAX;
- pDM_PSTable->pre_rf_state = RF_MAX;
- pDM_PSTable->cur_rf_state = RF_MAX;
- pDM_PSTable->rssi_val_min = 0;
- pDM_PSTable->initialize = 0;
-}
-
-void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal)
-{
- struct rtl_ps *pDM_PSTable = &pDM_Odm->DM_PSTable;
- u8 Rssi_Up_bound = 30;
- u8 Rssi_Low_bound = 25;
-
- if (pDM_PSTable->initialize == 0) {
- pDM_PSTable->reg_874 = (ODM_GetBBReg(pDM_Odm, 0x874, bMaskDWord) & 0x1CC000) >> 14;
- pDM_PSTable->reg_c70 = (ODM_GetBBReg(pDM_Odm, 0xc70, bMaskDWord) & BIT(3)) >> 3;
- pDM_PSTable->reg_85c = (ODM_GetBBReg(pDM_Odm, 0x85c, bMaskDWord) & 0xFF000000) >> 24;
- pDM_PSTable->reg_a74 = (ODM_GetBBReg(pDM_Odm, 0xa74, bMaskDWord) & 0xF000) >> 12;
- pDM_PSTable->initialize = 1;
- }
-
- if (!bForceInNormal) {
- if (pDM_Odm->RSSI_Min != 0xFF) {
- if (pDM_PSTable->pre_rf_state == RF_Normal) {
- if (pDM_Odm->RSSI_Min >= Rssi_Up_bound)
- pDM_PSTable->cur_rf_state = RF_Save;
- else
- pDM_PSTable->cur_rf_state = RF_Normal;
- } else {
- if (pDM_Odm->RSSI_Min <= Rssi_Low_bound)
- pDM_PSTable->cur_rf_state = RF_Normal;
- else
- pDM_PSTable->cur_rf_state = RF_Save;
- }
- } else {
- pDM_PSTable->cur_rf_state = RF_MAX;
- }
- } else {
- pDM_PSTable->cur_rf_state = RF_Normal;
- }
-
- if (pDM_PSTable->pre_rf_state != pDM_PSTable->cur_rf_state) {
- if (pDM_PSTable->cur_rf_state == RF_Save) {
- ODM_SetBBReg(pDM_Odm, 0x874, 0x1C0000, 0x2); /* Reg874[20:18]=3'b010 */
- ODM_SetBBReg(pDM_Odm, 0xc70, BIT(3), 0); /* RegC70[3]=1'b0 */
- ODM_SetBBReg(pDM_Odm, 0x85c, 0xFF000000, 0x63); /* Reg85C[31:24]=0x63 */
- ODM_SetBBReg(pDM_Odm, 0x874, 0xC000, 0x2); /* Reg874[15:14]=2'b10 */
- ODM_SetBBReg(pDM_Odm, 0xa74, 0xF000, 0x3); /* RegA75[7:4]=0x3 */
- ODM_SetBBReg(pDM_Odm, 0x818, BIT(28), 0x0); /* Reg818[28]=1'b0 */
- ODM_SetBBReg(pDM_Odm, 0x818, BIT(28), 0x1); /* Reg818[28]=1'b1 */
- } else {
- ODM_SetBBReg(pDM_Odm, 0x874, 0x1CC000, pDM_PSTable->reg_874);
- ODM_SetBBReg(pDM_Odm, 0xc70, BIT(3), pDM_PSTable->reg_c70);
- ODM_SetBBReg(pDM_Odm, 0x85c, 0xFF000000, pDM_PSTable->reg_85c);
- ODM_SetBBReg(pDM_Odm, 0xa74, 0xF000, pDM_PSTable->reg_a74);
- ODM_SetBBReg(pDM_Odm, 0x818, BIT(28), 0x0);
- }
- pDM_PSTable->pre_rf_state = pDM_PSTable->cur_rf_state;
- }
-}
-
-/* 3============================================================ */
-/* 3 RATR MASK */
-/* 3============================================================ */
-/* 3============================================================ */
-/* 3 Rate Adaptive */
-/* 3============================================================ */
-
-void odm_RateAdaptiveMaskInit(struct odm_dm_struct *pDM_Odm)
-{
- struct odm_rate_adapt *pOdmRA = &pDM_Odm->RateAdaptive;
-
- pOdmRA->RATRState = DM_RATR_STA_INIT;
- pOdmRA->HighRSSIThresh = 50;
- pOdmRA->LowRSSIThresh = 20;
-}
-
-u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid, u32 ra_mask, u8 rssi_level)
-{
- struct sta_info *pEntry;
- u32 rate_bitmap = 0x0fffffff;
- u8 WirelessMode;
-
- pEntry = pDM_Odm->pODM_StaInfo[macid];
- if (!IS_STA_VALID(pEntry))
- return ra_mask;
-
- WirelessMode = pEntry->wireless_mode;
-
- switch (WirelessMode) {
- case ODM_WM_B:
- if (ra_mask & 0x0000000c) /* 11M or 5.5M enable */
- rate_bitmap = 0x0000000d;
- else
- rate_bitmap = 0x0000000f;
- break;
- case (ODM_WM_B | ODM_WM_G):
- if (rssi_level == DM_RATR_STA_HIGH)
- rate_bitmap = 0x00000f00;
- else if (rssi_level == DM_RATR_STA_MIDDLE)
- rate_bitmap = 0x00000ff0;
- else
- rate_bitmap = 0x00000ff5;
- break;
- case (ODM_WM_B | ODM_WM_G | ODM_WM_N24G):
- if (rssi_level == DM_RATR_STA_HIGH) {
- rate_bitmap = 0x000f0000;
- } else if (rssi_level == DM_RATR_STA_MIDDLE) {
- rate_bitmap = 0x000ff000;
- } else {
- if (*pDM_Odm->pBandWidth == ODM_BW40M)
- rate_bitmap = 0x000ff015;
- else
- rate_bitmap = 0x000ff005;
- }
- break;
- default:
- /* case WIRELESS_11_24N: */
- rate_bitmap = 0x0fffffff;
- break;
- }
-
- return rate_bitmap;
-}
-
-/*-----------------------------------------------------------------------------
- * Function: odm_RefreshRateAdaptiveMask()
- *
- * Overview: Update rate table mask according to rssi
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/27/2009 hpfan Create Version 0.
- *
- *---------------------------------------------------------------------------*/
-void odm_RefreshRateAdaptiveMask(struct odm_dm_struct *pDM_Odm)
-{
- u8 i;
- struct adapter *pAdapter = pDM_Odm->Adapter;
-
- if (!(pDM_Odm->SupportAbility & ODM_BB_RA_MASK))
- return;
-
- if (pAdapter->bDriverStopped)
- return;
-
- for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
- struct sta_info *pstat = pDM_Odm->pODM_StaInfo[i];
- if (IS_STA_VALID(pstat)) {
- if (ODM_RAStateCheck(pDM_Odm, pstat->rssi_stat.UndecoratedSmoothedPWDB, false, &pstat->rssi_level))
- rtw_hal_update_ra_mask(pAdapter, i, pstat->rssi_level);
- }
- }
-}
-
-/* Return Value: bool */
-/* - true: RATRState is changed. */
-bool ODM_RAStateCheck(struct odm_dm_struct *pDM_Odm, s32 RSSI, bool bForceUpdate, u8 *pRATRState)
-{
- struct odm_rate_adapt *pRA = &pDM_Odm->RateAdaptive;
- const u8 GoUpGap = 5;
- u8 HighRSSIThreshForRA = pRA->HighRSSIThresh;
- u8 LowRSSIThreshForRA = pRA->LowRSSIThresh;
- u8 RATRState;
-
- /* Threshold Adjustment: */
- /* when RSSI state trends to go up one or two levels, make sure RSSI is high enough. */
- /* Here GoUpGap is added to solve the boundary's level alternation issue. */
- switch (*pRATRState) {
- case DM_RATR_STA_INIT:
- case DM_RATR_STA_HIGH:
- break;
- case DM_RATR_STA_MIDDLE:
- HighRSSIThreshForRA += GoUpGap;
- break;
- case DM_RATR_STA_LOW:
- HighRSSIThreshForRA += GoUpGap;
- LowRSSIThreshForRA += GoUpGap;
- break;
- default:
- break;
- }
-
- /* Decide RATRState by RSSI. */
- if (RSSI > HighRSSIThreshForRA)
- RATRState = DM_RATR_STA_HIGH;
- else if (RSSI > LowRSSIThreshForRA)
- RATRState = DM_RATR_STA_MIDDLE;
- else
- RATRState = DM_RATR_STA_LOW;
-
- if (*pRATRState != RATRState || bForceUpdate) {
- *pRATRState = RATRState;
- return true;
- }
- return false;
-}
-
-/* 3============================================================ */
-/* 3 RSSI Monitor */
-/* 3============================================================ */
-
static void FindMinimumRSSI(struct adapter *pAdapter)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ struct hal_data_8188e *pHalData = &pAdapter->haldata;
struct dm_priv *pdmpriv = &pHalData->dmpriv;
struct mlme_priv *pmlmepriv = &pAdapter->mlmepriv;
@@ -819,10 +458,10 @@ static void FindMinimumRSSI(struct adapter *pAdapter)
pdmpriv->MinUndecoratedPWDBForDM = pdmpriv->EntryMinUndecoratedSmoothedPWDB;
}
-void odm_RSSIMonitorCheck(struct odm_dm_struct *pDM_Odm)
+static void odm_RSSIMonitorCheck(struct odm_dm_struct *pDM_Odm)
{
struct adapter *Adapter = pDM_Odm->Adapter;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
struct dm_priv *pdmpriv = &pHalData->dmpriv;
int i;
int tmpEntryMaxPWDB = 0, tmpEntryMinPWDB = 0xff;
@@ -877,44 +516,15 @@ void odm_RSSIMonitorCheck(struct odm_dm_struct *pDM_Odm)
ODM_CmnInfoUpdate(&pHalData->odmpriv, ODM_CMNINFO_RSSI_MIN, pdmpriv->MinUndecoratedPWDBForDM);
}
-/* 3============================================================ */
-/* 3 Tx Power Tracking */
-/* 3============================================================ */
-
-void odm_TXPowerTrackingInit(struct odm_dm_struct *pDM_Odm)
-{
- odm_TXPowerTrackingThermalMeterInit(pDM_Odm);
-}
-
-void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm)
+static void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm)
{
pDM_Odm->RFCalibrateInfo.bTXPowerTracking = true;
pDM_Odm->RFCalibrateInfo.TXPowercount = 0;
pDM_Odm->RFCalibrateInfo.bTXPowerTrackingInit = false;
- MSG_88E("pDM_Odm TxPowerTrackControl = %d\n", pDM_Odm->RFCalibrateInfo.TxPowerTrackControl);
-
pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = true;
}
-void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
-
- if (!(pDM_Odm->SupportAbility & ODM_RF_TX_PWR_TRACK))
- return;
-
- if (!pDM_Odm->RFCalibrateInfo.TM_Trigger) { /* at least delay 1 sec */
- PHY_SetRFReg(Adapter, RF_PATH_A, RF_T_METER_88E, BIT(17) | BIT(16), 0x03);
-
- pDM_Odm->RFCalibrateInfo.TM_Trigger = 1;
- return;
- } else {
- odm_TXPowerTrackingCallback_ThermalMeter_8188E(Adapter);
- pDM_Odm->RFCalibrateInfo.TM_Trigger = 0;
- }
-}
-
-void odm_InitHybridAntDiv(struct odm_dm_struct *pDM_Odm)
+static void odm_InitHybridAntDiv(struct odm_dm_struct *pDM_Odm)
{
if (!(pDM_Odm->SupportAbility & ODM_BB_ANT_DIV))
return;
@@ -922,7 +532,7 @@ void odm_InitHybridAntDiv(struct odm_dm_struct *pDM_Odm)
ODM_AntennaDiversityInit_88E(pDM_Odm);
}
-void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm)
+static void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm)
{
if (!(pDM_Odm->SupportAbility & ODM_BB_ANT_DIV))
return;
@@ -930,17 +540,15 @@ void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm)
ODM_AntennaDiversity_88E(pDM_Odm);
}
-/* EDCA Turbo */
-void ODM_EdcaTurboInit(struct odm_dm_struct *pDM_Odm)
+static void ODM_EdcaTurboInit(struct odm_dm_struct *pDM_Odm)
{
struct adapter *Adapter = pDM_Odm->Adapter;
pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = false;
pDM_Odm->DM_EDCA_Table.bIsCurRDLState = false;
Adapter->recvpriv.bIsAnyNonBEPkts = false;
+}
-} /* ODM_InitEdcaTurbo */
-
-void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm)
+static void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm)
{
struct adapter *Adapter = pDM_Odm->Adapter;
u32 trafficIndex;
@@ -948,7 +556,7 @@ void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm)
u64 cur_tx_bytes = 0;
u64 cur_rx_bytes = 0;
u8 bbtchange = false;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
struct xmit_priv *pxmitpriv = &Adapter->xmitpriv;
struct recv_priv *precvpriv = &Adapter->recvpriv;
struct registry_priv *pregpriv = &Adapter->registrypriv;
@@ -1004,7 +612,7 @@ void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm)
} else {
/* Turn Off EDCA turbo here. */
/* Restore original EDCA according to the declaration of AP. */
- if (pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA) {
+ if (pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA) {
rtw_write32(Adapter, REG_EDCA_BE_PARAM, pHalData->AcParam_BE);
pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = false;
}
@@ -1016,3 +624,306 @@ dm_CheckEdcaTurbo_EXIT:
pxmitpriv->last_tx_bytes = pxmitpriv->tx_bytes;
precvpriv->last_rx_bytes = precvpriv->rx_bytes;
}
+
+/* 2011/09/21 MH Add to describe different team necessary resource allocate?? */
+void ODM_DMInit(struct odm_dm_struct *pDM_Odm)
+{
+ /* 2012.05.03 Luke: For all IC series */
+ odm_CommonInfoSelfInit(pDM_Odm);
+ odm_DIGInit(pDM_Odm);
+ odm_RateAdaptiveMaskInit(pDM_Odm);
+
+ odm_DynamicBBPowerSavingInit(pDM_Odm);
+ odm_TXPowerTrackingThermalMeterInit(pDM_Odm);
+ ODM_EdcaTurboInit(pDM_Odm);
+ ODM_RAInfo_Init_all(pDM_Odm);
+ if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
+ odm_InitHybridAntDiv(pDM_Odm);
+}
+
+/* 2011/09/20 MH This is the entry pointer for all team to execute HW out source DM. */
+/* You can not add any dummy function here, be care, you can only use DM structure */
+/* to perform any new ODM_DM. */
+void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm)
+{
+ /* 2012.05.03 Luke: For all IC series */
+ odm_CommonInfoSelfUpdate(pDM_Odm);
+ odm_FalseAlarmCounterStatistics(pDM_Odm);
+ odm_RSSIMonitorCheck(pDM_Odm);
+
+ odm_DIG(pDM_Odm);
+ odm_CCKPacketDetectionThresh(pDM_Odm);
+
+ if (*pDM_Odm->pbPowerSaving)
+ return;
+
+ odm_RefreshRateAdaptiveMask(pDM_Odm);
+
+ if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
+ odm_HwAntDiv(pDM_Odm);
+
+ ODM_TXPowerTrackingCheck(pDM_Odm);
+ odm_EdcaTurboCheck(pDM_Odm);
+}
+
+/* Init /.. Fixed HW value. Only init time. */
+void ODM_CmnInfoInit(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def CmnInfo, u32 Value)
+{
+ /* This section is used for init value */
+ switch (CmnInfo) {
+ /* Fixed ODM value. */
+ case ODM_CMNINFO_MP_TEST_CHIP:
+ pDM_Odm->bIsMPChip = (u8)Value;
+ break;
+ case ODM_CMNINFO_RF_ANTENNA_TYPE:
+ pDM_Odm->AntDivType = (u8)Value;
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+
+ /* Tx power tracking BB swing table. */
+ /* The base index = 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB */
+ pDM_Odm->BbSwingIdxOfdm = 12; /* Set defalut value as index 12. */
+ pDM_Odm->BbSwingIdxOfdmCurrent = 12;
+ pDM_Odm->BbSwingFlagOfdm = false;
+}
+
+void ODM_CmnInfoHook(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def CmnInfo, void *pValue)
+{
+ /* */
+ /* Hook call by reference pointer. */
+ /* */
+ switch (CmnInfo) {
+ /* Dynamic call by reference pointer. */
+ case ODM_CMNINFO_WM_MODE:
+ pDM_Odm->pWirelessMode = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_SEC_CHNL_OFFSET:
+ pDM_Odm->pSecChOffset = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_BW:
+ pDM_Odm->pBandWidth = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_CHNL:
+ pDM_Odm->pChannel = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_SCAN:
+ pDM_Odm->pbScanInProcess = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_POWER_SAVING:
+ pDM_Odm->pbPowerSaving = (bool *)pValue;
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+}
+
+/* Update Band/CHannel/.. The values are dynamic but non-per-packet. */
+void ODM_CmnInfoUpdate(struct odm_dm_struct *pDM_Odm, u32 CmnInfo, u64 Value)
+{
+ /* */
+ /* This init variable may be changed in run time. */
+ /* */
+ switch (CmnInfo) {
+ case ODM_CMNINFO_ABILITY:
+ pDM_Odm->SupportAbility = (u32)Value;
+ break;
+ case ODM_CMNINFO_LINK:
+ pDM_Odm->bLinked = (bool)Value;
+ break;
+ case ODM_CMNINFO_RSSI_MIN:
+ pDM_Odm->RSSI_Min = (u8)Value;
+ break;
+ }
+}
+
+void ODM_Write_DIG(struct odm_dm_struct *pDM_Odm, u8 CurrentIGI)
+{
+ struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
+ struct adapter *adapter = pDM_Odm->Adapter;
+
+ if (pDM_DigTable->CurIGValue != CurrentIGI) {
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_IGI_A_11N, ODM_BIT_IGI_11N, CurrentIGI);
+ pDM_DigTable->CurIGValue = CurrentIGI;
+ }
+}
+
+void ODM_Write_CCK_CCA_Thres(struct odm_dm_struct *pDM_Odm, u8 CurCCK_CCAThres)
+{
+ struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
+
+ if (pDM_DigTable->CurCCK_CCAThres != CurCCK_CCAThres) /* modify by Guo.Mingzhi 2012-01-03 */
+ rtw_write8(pDM_Odm->Adapter, ODM_REG_CCK_CCA_11N, CurCCK_CCAThres);
+ pDM_DigTable->CurCCK_CCAThres = CurCCK_CCAThres;
+}
+
+void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal)
+{
+ struct rtl_ps *pDM_PSTable = &pDM_Odm->DM_PSTable;
+ struct adapter *adapter = pDM_Odm->Adapter;
+ u8 Rssi_Up_bound = 30;
+ u8 Rssi_Low_bound = 25;
+
+ if (pDM_PSTable->initialize == 0) {
+ pDM_PSTable->reg_874 = (rtl8188e_PHY_QueryBBReg(adapter, 0x874, bMaskDWord) & 0x1CC000) >> 14;
+ pDM_PSTable->reg_c70 = (rtl8188e_PHY_QueryBBReg(adapter, 0xc70, bMaskDWord) & BIT(3)) >> 3;
+ pDM_PSTable->reg_85c = (rtl8188e_PHY_QueryBBReg(adapter, 0x85c, bMaskDWord) & 0xFF000000) >> 24;
+ pDM_PSTable->reg_a74 = (rtl8188e_PHY_QueryBBReg(adapter, 0xa74, bMaskDWord) & 0xF000) >> 12;
+ pDM_PSTable->initialize = 1;
+ }
+
+ if (!bForceInNormal) {
+ if (pDM_Odm->RSSI_Min != 0xFF) {
+ if (pDM_PSTable->pre_rf_state == RF_Normal) {
+ if (pDM_Odm->RSSI_Min >= Rssi_Up_bound)
+ pDM_PSTable->cur_rf_state = RF_Save;
+ else
+ pDM_PSTable->cur_rf_state = RF_Normal;
+ } else {
+ if (pDM_Odm->RSSI_Min <= Rssi_Low_bound)
+ pDM_PSTable->cur_rf_state = RF_Normal;
+ else
+ pDM_PSTable->cur_rf_state = RF_Save;
+ }
+ } else {
+ pDM_PSTable->cur_rf_state = RF_MAX;
+ }
+ } else {
+ pDM_PSTable->cur_rf_state = RF_Normal;
+ }
+
+ if (pDM_PSTable->pre_rf_state != pDM_PSTable->cur_rf_state) {
+ if (pDM_PSTable->cur_rf_state == RF_Save) {
+ rtl8188e_PHY_SetBBReg(adapter, 0x874, 0x1C0000, 0x2); /* Reg874[20:18]=3'b010 */
+ rtl8188e_PHY_SetBBReg(adapter, 0xc70, BIT(3), 0); /* RegC70[3]=1'b0 */
+ rtl8188e_PHY_SetBBReg(adapter, 0x85c, 0xFF000000, 0x63); /* Reg85C[31:24]=0x63 */
+ rtl8188e_PHY_SetBBReg(adapter, 0x874, 0xC000, 0x2); /* Reg874[15:14]=2'b10 */
+ rtl8188e_PHY_SetBBReg(adapter, 0xa74, 0xF000, 0x3); /* RegA75[7:4]=0x3 */
+ rtl8188e_PHY_SetBBReg(adapter, 0x818, BIT(28), 0x0); /* Reg818[28]=1'b0 */
+ rtl8188e_PHY_SetBBReg(adapter, 0x818, BIT(28), 0x1); /* Reg818[28]=1'b1 */
+ } else {
+ rtl8188e_PHY_SetBBReg(adapter, 0x874, 0x1CC000, pDM_PSTable->reg_874);
+ rtl8188e_PHY_SetBBReg(adapter, 0xc70, BIT(3), pDM_PSTable->reg_c70);
+ rtl8188e_PHY_SetBBReg(adapter, 0x85c, 0xFF000000, pDM_PSTable->reg_85c);
+ rtl8188e_PHY_SetBBReg(adapter, 0xa74, 0xF000, pDM_PSTable->reg_a74);
+ rtl8188e_PHY_SetBBReg(adapter, 0x818, BIT(28), 0x0);
+ }
+ pDM_PSTable->pre_rf_state = pDM_PSTable->cur_rf_state;
+ }
+}
+
+u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid, u32 ra_mask, u8 rssi_level)
+{
+ struct sta_info *pEntry;
+ u32 rate_bitmap = 0x0fffffff;
+ u8 WirelessMode;
+
+ pEntry = pDM_Odm->pODM_StaInfo[macid];
+ if (!IS_STA_VALID(pEntry))
+ return ra_mask;
+
+ WirelessMode = pEntry->wireless_mode;
+
+ switch (WirelessMode) {
+ case ODM_WM_B:
+ if (ra_mask & 0x0000000c) /* 11M or 5.5M enable */
+ rate_bitmap = 0x0000000d;
+ else
+ rate_bitmap = 0x0000000f;
+ break;
+ case (ODM_WM_B | ODM_WM_G):
+ if (rssi_level == DM_RATR_STA_HIGH)
+ rate_bitmap = 0x00000f00;
+ else if (rssi_level == DM_RATR_STA_MIDDLE)
+ rate_bitmap = 0x00000ff0;
+ else
+ rate_bitmap = 0x00000ff5;
+ break;
+ case (ODM_WM_B | ODM_WM_G | ODM_WM_N24G):
+ if (rssi_level == DM_RATR_STA_HIGH) {
+ rate_bitmap = 0x000f0000;
+ } else if (rssi_level == DM_RATR_STA_MIDDLE) {
+ rate_bitmap = 0x000ff000;
+ } else {
+ if (*pDM_Odm->pBandWidth == ODM_BW40M)
+ rate_bitmap = 0x000ff015;
+ else
+ rate_bitmap = 0x000ff005;
+ }
+ break;
+ default:
+ /* case WIRELESS_11_24N: */
+ rate_bitmap = 0x0fffffff;
+ break;
+ }
+
+ return rate_bitmap;
+}
+
+/* Return Value: bool */
+/* - true: RATRState is changed. */
+bool ODM_RAStateCheck(struct odm_dm_struct *pDM_Odm, s32 RSSI, bool bForceUpdate, u8 *pRATRState)
+{
+ struct odm_rate_adapt *pRA = &pDM_Odm->RateAdaptive;
+ const u8 GoUpGap = 5;
+ u8 HighRSSIThreshForRA = pRA->HighRSSIThresh;
+ u8 LowRSSIThreshForRA = pRA->LowRSSIThresh;
+ u8 RATRState;
+
+ /* Threshold Adjustment: */
+ /* when RSSI state trends to go up one or two levels, make sure RSSI is high enough. */
+ /* Here GoUpGap is added to solve the boundary's level alternation issue. */
+ switch (*pRATRState) {
+ case DM_RATR_STA_INIT:
+ case DM_RATR_STA_HIGH:
+ break;
+ case DM_RATR_STA_MIDDLE:
+ HighRSSIThreshForRA += GoUpGap;
+ break;
+ case DM_RATR_STA_LOW:
+ HighRSSIThreshForRA += GoUpGap;
+ LowRSSIThreshForRA += GoUpGap;
+ break;
+ default:
+ break;
+ }
+
+ /* Decide RATRState by RSSI. */
+ if (RSSI > HighRSSIThreshForRA)
+ RATRState = DM_RATR_STA_HIGH;
+ else if (RSSI > LowRSSIThreshForRA)
+ RATRState = DM_RATR_STA_MIDDLE;
+ else
+ RATRState = DM_RATR_STA_LOW;
+
+ if (*pRATRState != RATRState || bForceUpdate) {
+ *pRATRState = RATRState;
+ return true;
+ }
+ return false;
+}
+
+void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+
+ if (!(pDM_Odm->SupportAbility & ODM_RF_TX_PWR_TRACK))
+ return;
+
+ if (!pDM_Odm->RFCalibrateInfo.TM_Trigger) { /* at least delay 1 sec */
+ rtl8188e_PHY_SetRFReg(Adapter, RF_PATH_A, RF_T_METER_88E, BIT(17) | BIT(16), 0x03);
+
+ pDM_Odm->RFCalibrateInfo.TM_Trigger = 1;
+ return;
+ } else {
+ odm_TXPowerTrackingCallback_ThermalMeter_8188E(Adapter);
+ pDM_Odm->RFCalibrateInfo.TM_Trigger = 0;
+ }
+}
diff --git a/drivers/staging/r8188eu/hal/odm_HWConfig.c b/drivers/staging/r8188eu/hal/odm_HWConfig.c
index 3125886e6731..d5212a166dd2 100644
--- a/drivers/staging/r8188eu/hal/odm_HWConfig.c
+++ b/drivers/staging/r8188eu/hal/odm_HWConfig.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-#include "../include/odm_precomp.h"
+#include "../include/drv_types.h"
#define READ_AND_CONFIG READ_AND_CONFIG_MP
@@ -251,17 +251,7 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
isCCKrate = ((pPktinfo->Rate >= DESC92C_RATE1M) && (pPktinfo->Rate <= DESC92C_RATE11M)) ? true : false;
/* Smart Antenna Debug Message------------------ */
- if (dm_odm->AntDivType == CG_TRX_SMART_ANTDIV) {
- if (pDM_FatTable->FAT_State == FAT_TRAINING_STATE) {
- if (pPktinfo->bPacketToSelf) {
- antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2 << 2) |
- (pDM_FatTable->antsel_rx_keep_1 << 1) |
- pDM_FatTable->antsel_rx_keep_0;
- pDM_FatTable->antSumRSSI[antsel_tr_mux] += pPhyInfo->RxPWDBAll;
- pDM_FatTable->antRSSIcnt[antsel_tr_mux]++;
- }
- }
- } else if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV)) {
+ if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV)) {
if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon) {
antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2 << 2) |
(pDM_FatTable->antsel_rx_keep_1 << 1) | pDM_FatTable->antsel_rx_keep_0;
@@ -368,10 +358,8 @@ void ODM_PhyStatusQuery(struct odm_dm_struct *dm_odm,
struct odm_per_pkt_info *pPktinfo,
struct adapter *adapt)
{
- odm_RxPhyStatus92CSeries_Parsing(dm_odm, pPhyInfo, pPhyStatus,
- pPktinfo, adapt);
- if (!dm_odm->RSSI_test)
- odm_Process_RSSIForDM(dm_odm, pPhyInfo, pPktinfo);
+ odm_RxPhyStatus92CSeries_Parsing(dm_odm, pPhyInfo, pPhyStatus, pPktinfo, adapt);
+ odm_Process_RSSIForDM(dm_odm, pPhyInfo, pPktinfo);
}
enum HAL_STATUS ODM_ConfigRFWithHeaderFile(struct odm_dm_struct *dm_odm,
diff --git a/drivers/staging/r8188eu/hal/odm_RTL8188E.c b/drivers/staging/r8188eu/hal/odm_RTL8188E.c
index e7a765f375d6..c8a3c521bd60 100644
--- a/drivers/staging/r8188eu/hal/odm_RTL8188E.c
+++ b/drivers/staging/r8188eu/hal/odm_RTL8188E.c
@@ -1,130 +1,98 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-#include "../include/odm_precomp.h"
+#include "../include/drv_types.h"
static void odm_RX_HWAntDivInit(struct odm_dm_struct *dm_odm)
{
+ struct adapter *adapter = dm_odm->Adapter;
u32 value32;
/* MAC Setting */
- value32 = ODM_GetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
- ODM_SetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32 | (BIT(23) | BIT(25))); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
+ value32 = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32 | (BIT(23) | BIT(25))); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
/* Pin Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_PIN_CTRL_11N, BIT(9) | BIT(8), 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT(10), 0); /* Reg864[10]=1'b0 antsel2 by HW */
- ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT(22), 1); /* Regb2c[22]=1'b0 disable CS/CG switch */
- ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT(31), 1); /* Regb2c[31]=1'b1 output at CG only */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_PIN_CTRL_11N, BIT(9) | BIT(8), 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT(10), 0); /* Reg864[10]=1'b0 antsel2 by HW */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_LNA_SWITCH_11N, BIT(22), 1); /* Regb2c[22]=1'b0 disable CS/CG switch */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_LNA_SWITCH_11N, BIT(31), 1); /* Regb2c[31]=1'b1 output at CG only */
/* OFDM Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_ANTDIV_PARA1_11N, bMaskDWord, 0x000000a0);
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_ANTDIV_PARA1_11N, bMaskDWord, 0x000000a0);
/* CCK Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_BB_PWR_SAV4_11N, BIT(7), 1); /* Fix CCK PHY status report issue */
- ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA2_11N, BIT(4), 1); /* CCK complete HW AntDiv within 64 samples */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_BB_PWR_SAV4_11N, BIT(7), 1); /* Fix CCK PHY status report issue */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_CCK_ANTDIV_PARA2_11N, BIT(4), 1); /* CCK complete HW AntDiv within 64 samples */
ODM_UpdateRxIdleAnt_88E(dm_odm, MAIN_ANT);
- ODM_SetBBReg(dm_odm, ODM_REG_ANT_MAPPING1_11N, 0xFFFF, 0x0201); /* antenna mapping table */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_ANT_MAPPING1_11N, 0xFFFF, 0x0201); /* antenna mapping table */
}
static void odm_TRX_HWAntDivInit(struct odm_dm_struct *dm_odm)
{
+ struct adapter *adapter = dm_odm->Adapter;
u32 value32;
/* MAC Setting */
- value32 = ODM_GetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
- ODM_SetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32 | (BIT(23) | BIT(25))); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
+ value32 = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32 | (BIT(23) | BIT(25))); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
/* Pin Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_PIN_CTRL_11N, BIT(9) | BIT(8), 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT(10), 0); /* Reg864[10]=1'b0 antsel2 by HW */
- ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT(22), 0); /* Regb2c[22]=1'b0 disable CS/CG switch */
- ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT(31), 1); /* Regb2c[31]=1'b1 output at CG only */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_PIN_CTRL_11N, BIT(9) | BIT(8), 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT(10), 0); /* Reg864[10]=1'b0 antsel2 by HW */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_LNA_SWITCH_11N, BIT(22), 0); /* Regb2c[22]=1'b0 disable CS/CG switch */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_LNA_SWITCH_11N, BIT(31), 1); /* Regb2c[31]=1'b1 output at CG only */
/* OFDM Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_ANTDIV_PARA1_11N, bMaskDWord, 0x000000a0);
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_ANTDIV_PARA1_11N, bMaskDWord, 0x000000a0);
/* CCK Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_BB_PWR_SAV4_11N, BIT(7), 1); /* Fix CCK PHY status report issue */
- ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA2_11N, BIT(4), 1); /* CCK complete HW AntDiv within 64 samples */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_BB_PWR_SAV4_11N, BIT(7), 1); /* Fix CCK PHY status report issue */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_CCK_ANTDIV_PARA2_11N, BIT(4), 1); /* CCK complete HW AntDiv within 64 samples */
/* Tx Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_TX_ANT_CTRL_11N, BIT(21), 0); /* Reg80c[21]=1'b0 from TX Reg */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_TX_ANT_CTRL_11N, BIT(21), 0); /* Reg80c[21]=1'b0 from TX Reg */
ODM_UpdateRxIdleAnt_88E(dm_odm, MAIN_ANT);
/* antenna mapping table */
if (!dm_odm->bIsMPChip) { /* testchip */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_DEFUALT_A_11N, BIT(10) | BIT(9) | BIT(8), 1); /* Reg858[10:8]=3'b001 */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_DEFUALT_A_11N, BIT(13) | BIT(12) | BIT(11), 2); /* Reg858[13:11]=3'b010 */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_DEFUALT_A_11N, BIT(10) | BIT(9) | BIT(8), 1); /* Reg858[10:8]=3'b001 */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_DEFUALT_A_11N, BIT(13) | BIT(12) | BIT(11), 2); /* Reg858[13:11]=3'b010 */
} else { /* MPchip */
- ODM_SetBBReg(dm_odm, ODM_REG_ANT_MAPPING1_11N, bMaskDWord, 0x0201); /* Reg914=3'b010, Reg915=3'b001 */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_ANT_MAPPING1_11N, bMaskDWord, 0x0201); /* Reg914=3'b010, Reg915=3'b001 */
}
}
static void odm_FastAntTrainingInit(struct odm_dm_struct *dm_odm)
{
- u32 value32, i;
- struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
- u32 AntCombination = 2;
-
- for (i = 0; i < 6; i++) {
- dm_fat_tbl->Bssid[i] = 0;
- dm_fat_tbl->antSumRSSI[i] = 0;
- dm_fat_tbl->antRSSIcnt[i] = 0;
- dm_fat_tbl->antAveRSSI[i] = 0;
- }
- dm_fat_tbl->TrainIdx = 0;
- dm_fat_tbl->FAT_State = FAT_NORMAL_STATE;
+ struct adapter *adapter = dm_odm->Adapter;
+ u32 value32;
/* MAC Setting */
- value32 = ODM_GetMACReg(dm_odm, 0x4c, bMaskDWord);
- ODM_SetMACReg(dm_odm, 0x4c, bMaskDWord, value32 | (BIT(23) | BIT(25))); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
- value32 = ODM_GetMACReg(dm_odm, 0x7B4, bMaskDWord);
- ODM_SetMACReg(dm_odm, 0x7b4, bMaskDWord, value32 | (BIT(16) | BIT(17))); /* Reg7B4[16]=1 enable antenna training, Reg7B4[17]=1 enable A2 match */
+ value32 = rtl8188e_PHY_QueryBBReg(adapter, 0x4c, bMaskDWord);
+ rtl8188e_PHY_SetBBReg(adapter, 0x4c, bMaskDWord, value32 | (BIT(23) | BIT(25))); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
+ value32 = rtl8188e_PHY_QueryBBReg(adapter, 0x7B4, bMaskDWord);
+ rtl8188e_PHY_SetBBReg(adapter, 0x7b4, bMaskDWord, value32 | (BIT(16) | BIT(17))); /* Reg7B4[16]=1 enable antenna training, Reg7B4[17]=1 enable A2 match */
/* Match MAC ADDR */
- ODM_SetMACReg(dm_odm, 0x7b4, 0xFFFF, 0);
- ODM_SetMACReg(dm_odm, 0x7b0, bMaskDWord, 0);
+ rtl8188e_PHY_SetBBReg(adapter, 0x7b4, 0xFFFF, 0);
+ rtl8188e_PHY_SetBBReg(adapter, 0x7b0, bMaskDWord, 0);
- ODM_SetBBReg(dm_odm, 0x870, BIT(9) | BIT(8), 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
- ODM_SetBBReg(dm_odm, 0x864, BIT(10), 0); /* Reg864[10]=1'b0 antsel2 by HW */
- ODM_SetBBReg(dm_odm, 0xb2c, BIT(22), 0); /* Regb2c[22]=1'b0 disable CS/CG switch */
- ODM_SetBBReg(dm_odm, 0xb2c, BIT(31), 1); /* Regb2c[31]=1'b1 output at CG only */
- ODM_SetBBReg(dm_odm, 0xca4, bMaskDWord, 0x000000a0);
+ rtl8188e_PHY_SetBBReg(adapter, 0x870, BIT(9) | BIT(8), 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
+ rtl8188e_PHY_SetBBReg(adapter, 0x864, BIT(10), 0); /* Reg864[10]=1'b0 antsel2 by HW */
+ rtl8188e_PHY_SetBBReg(adapter, 0xb2c, BIT(22), 0); /* Regb2c[22]=1'b0 disable CS/CG switch */
+ rtl8188e_PHY_SetBBReg(adapter, 0xb2c, BIT(31), 1); /* Regb2c[31]=1'b1 output at CG only */
+ rtl8188e_PHY_SetBBReg(adapter, 0xca4, bMaskDWord, 0x000000a0);
- /* antenna mapping table */
- if (AntCombination == 2) {
- if (!dm_odm->bIsMPChip) { /* testchip */
- ODM_SetBBReg(dm_odm, 0x858, BIT(10) | BIT(9) | BIT(8), 1); /* Reg858[10:8]=3'b001 */
- ODM_SetBBReg(dm_odm, 0x858, BIT(13) | BIT(12) | BIT(11), 2); /* Reg858[13:11]=3'b010 */
- } else { /* MPchip */
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte0, 1);
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte1, 2);
- }
- } else if (AntCombination == 7) {
- if (!dm_odm->bIsMPChip) { /* testchip */
- ODM_SetBBReg(dm_odm, 0x858, BIT(10) | BIT(9) | BIT(8), 0); /* Reg858[10:8]=3'b000 */
- ODM_SetBBReg(dm_odm, 0x858, BIT(13) | BIT(12) | BIT(11), 1); /* Reg858[13:11]=3'b001 */
- ODM_SetBBReg(dm_odm, 0x878, BIT(16), 0);
- ODM_SetBBReg(dm_odm, 0x858, BIT(15) | BIT(14), 2); /* Reg878[0],Reg858[14:15])=3'b010 */
- ODM_SetBBReg(dm_odm, 0x878, BIT(19) | BIT(18) | BIT(17), 3);/* Reg878[3:1]=3b'011 */
- ODM_SetBBReg(dm_odm, 0x878, BIT(22) | BIT(21) | BIT(20), 4);/* Reg878[6:4]=3b'100 */
- ODM_SetBBReg(dm_odm, 0x878, BIT(25) | BIT(24) | BIT(23), 5);/* Reg878[9:7]=3b'101 */
- ODM_SetBBReg(dm_odm, 0x878, BIT(28) | BIT(27) | BIT(26), 6);/* Reg878[12:10]=3b'110 */
- ODM_SetBBReg(dm_odm, 0x878, BIT(31) | BIT(30) | BIT(29), 7);/* Reg878[15:13]=3b'111 */
- } else { /* MPchip */
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte0, 0);
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte1, 1);
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte2, 2);
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte3, 3);
- ODM_SetBBReg(dm_odm, 0x918, bMaskByte0, 4);
- ODM_SetBBReg(dm_odm, 0x918, bMaskByte1, 5);
- ODM_SetBBReg(dm_odm, 0x918, bMaskByte2, 6);
- ODM_SetBBReg(dm_odm, 0x918, bMaskByte3, 7);
- }
+ if (!dm_odm->bIsMPChip) { /* testchip */
+ rtl8188e_PHY_SetBBReg(adapter, 0x858, BIT(10) | BIT(9) | BIT(8), 1); /* Reg858[10:8]=3'b001 */
+ rtl8188e_PHY_SetBBReg(adapter, 0x858, BIT(13) | BIT(12) | BIT(11), 2); /* Reg858[13:11]=3'b010 */
+ } else { /* MPchip */
+ rtl8188e_PHY_SetBBReg(adapter, 0x914, bMaskByte0, 1);
+ rtl8188e_PHY_SetBBReg(adapter, 0x914, bMaskByte1, 2);
}
/* Default Ant Setting when no fast training */
- ODM_SetBBReg(dm_odm, 0x80c, BIT(21), 1); /* Reg80c[21]=1'b1 from TX Info */
- ODM_SetBBReg(dm_odm, 0x864, BIT(5) | BIT(4) | BIT(3), 0); /* Default RX */
- ODM_SetBBReg(dm_odm, 0x864, BIT(8) | BIT(7) | BIT(6), 1); /* Optional RX */
+ rtl8188e_PHY_SetBBReg(adapter, 0x80c, BIT(21), 1); /* Reg80c[21]=1'b1 from TX Info */
+ rtl8188e_PHY_SetBBReg(adapter, 0x864, BIT(5) | BIT(4) | BIT(3), 0); /* Default RX */
+ rtl8188e_PHY_SetBBReg(adapter, 0x864, BIT(8) | BIT(7) | BIT(6), 1); /* Optional RX */
- /* Enter Traing state */
- ODM_SetBBReg(dm_odm, 0x864, BIT(2) | BIT(1) | BIT(0), (AntCombination - 1)); /* Reg864[2:0]=3'd6 ant combination=reg864[2:0]+1 */
- ODM_SetBBReg(dm_odm, 0xc50, BIT(7), 1); /* RegC50[7]=1'b1 enable HW AntDiv */
+ /* Enter Training state */
+ rtl8188e_PHY_SetBBReg(adapter, 0x864, BIT(2) | BIT(1) | BIT(0), 1);
+ rtl8188e_PHY_SetBBReg(adapter, 0xc50, BIT(7), 1); /* RegC50[7]=1'b1 enable HW AntDiv */
}
void ODM_AntennaDiversityInit_88E(struct odm_dm_struct *dm_odm)
@@ -140,6 +108,7 @@ void ODM_AntennaDiversityInit_88E(struct odm_dm_struct *dm_odm)
void ODM_UpdateRxIdleAnt_88E(struct odm_dm_struct *dm_odm, u8 Ant)
{
struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
+ struct adapter *adapter = dm_odm->Adapter;
u32 DefaultAnt, OptionalAnt;
if (dm_fat_tbl->RxIdleAnt != Ant) {
@@ -152,13 +121,13 @@ void ODM_UpdateRxIdleAnt_88E(struct odm_dm_struct *dm_odm, u8 Ant)
}
if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) {
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT(5) | BIT(4) | BIT(3), DefaultAnt); /* Default RX */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT(8) | BIT(7) | BIT(6), OptionalAnt); /* Optional RX */
- ODM_SetBBReg(dm_odm, ODM_REG_ANTSEL_CTRL_11N, BIT(14) | BIT(13) | BIT(12), DefaultAnt); /* Default TX */
- ODM_SetMACReg(dm_odm, ODM_REG_RESP_TX_11N, BIT(6) | BIT(7), DefaultAnt); /* Resp Tx */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT(5) | BIT(4) | BIT(3), DefaultAnt); /* Default RX */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT(8) | BIT(7) | BIT(6), OptionalAnt); /* Optional RX */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_ANTSEL_CTRL_11N, BIT(14) | BIT(13) | BIT(12), DefaultAnt); /* Default TX */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RESP_TX_11N, BIT(6) | BIT(7), DefaultAnt); /* Resp Tx */
} else if (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV) {
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT(5) | BIT(4) | BIT(3), DefaultAnt); /* Default RX */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT(8) | BIT(7) | BIT(6), OptionalAnt); /* Optional RX */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT(5) | BIT(4) | BIT(3), DefaultAnt); /* Default RX */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT(8) | BIT(7) | BIT(6), OptionalAnt); /* Optional RX */
}
}
dm_fat_tbl->RxIdleAnt = Ant;
@@ -267,42 +236,29 @@ static void odm_HWAntDiv(struct odm_dm_struct *dm_odm)
void ODM_AntennaDiversity_88E(struct odm_dm_struct *dm_odm)
{
struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
+ struct adapter *adapter = dm_odm->Adapter;
+
if (!(dm_odm->SupportAbility & ODM_BB_ANT_DIV))
return;
if (!dm_odm->bLinked) {
if (dm_fat_tbl->bBecomeLinked) {
- ODM_SetBBReg(dm_odm, ODM_REG_IGI_A_11N, BIT(7), 0); /* RegC50[7]=1'b1 enable HW AntDiv */
- ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA1_11N, BIT(15), 0); /* Enable CCK AntDiv */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_IGI_A_11N, BIT(7), 0); /* RegC50[7]=1'b1 enable HW AntDiv */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_CCK_ANTDIV_PARA1_11N, BIT(15), 0); /* Enable CCK AntDiv */
if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
- ODM_SetBBReg(dm_odm, ODM_REG_TX_ANT_CTRL_11N, BIT(21), 0); /* Reg80c[21]=1'b0 from TX Reg */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_TX_ANT_CTRL_11N, BIT(21), 0); /* Reg80c[21]=1'b0 from TX Reg */
dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
}
return;
} else {
if (!dm_fat_tbl->bBecomeLinked) {
/* Because HW AntDiv is disabled before Link, we enable HW AntDiv after link */
- ODM_SetBBReg(dm_odm, ODM_REG_IGI_A_11N, BIT(7), 1); /* RegC50[7]=1'b1 enable HW AntDiv */
- ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA1_11N, BIT(15), 1); /* Enable CCK AntDiv */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_IGI_A_11N, BIT(7), 1); /* RegC50[7]=1'b1 enable HW AntDiv */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_CCK_ANTDIV_PARA1_11N, BIT(15), 1); /* Enable CCK AntDiv */
if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
- ODM_SetBBReg(dm_odm, ODM_REG_TX_ANT_CTRL_11N, BIT(21), 1); /* Reg80c[21]=1'b1 from TX Info */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_TX_ANT_CTRL_11N, BIT(21), 1); /* Reg80c[21]=1'b1 from TX Info */
dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
}
}
if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV))
odm_HWAntDiv(dm_odm);
}
-
-/* 3============================================================ */
-/* 3 Dynamic Primary CCA */
-/* 3============================================================ */
-
-void odm_PrimaryCCA_Init(struct odm_dm_struct *dm_odm)
-{
- struct dyn_primary_cca *PrimaryCCA = &dm_odm->DM_PriCCA;
-
- PrimaryCCA->dup_rts_flag = 0;
- PrimaryCCA->intf_flag = 0;
- PrimaryCCA->intf_type = 0;
- PrimaryCCA->monitor_flag = 0;
- PrimaryCCA->pri_cca_flag = 0;
-}
diff --git a/drivers/staging/r8188eu/hal/odm_RegConfig8188E.c b/drivers/staging/r8188eu/hal/odm_RegConfig8188E.c
index 5f6f0ae5196e..5fb5a88314ed 100644
--- a/drivers/staging/r8188eu/hal/odm_RegConfig8188E.c
+++ b/drivers/staging/r8188eu/hal/odm_RegConfig8188E.c
@@ -1,28 +1,28 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-#include "../include/odm_precomp.h"
+#include "../include/drv_types.h"
-void odm_ConfigRFReg_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
- u32 Data, enum rf_radio_path RF_PATH,
- u32 RegAddr)
+static void odm_ConfigRFReg_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
+ u32 Data, enum rf_radio_path RF_PATH,
+ u32 RegAddr)
{
- if (Addr == 0xffe) {
- ODM_sleep_ms(50);
+ if (Addr == 0xffe) {
+ msleep(50);
} else if (Addr == 0xfd) {
- ODM_delay_ms(5);
+ mdelay(5);
} else if (Addr == 0xfc) {
- ODM_delay_ms(1);
+ mdelay(1);
} else if (Addr == 0xfb) {
- ODM_delay_us(50);
+ udelay(50);
} else if (Addr == 0xfa) {
- ODM_delay_us(5);
+ udelay(5);
} else if (Addr == 0xf9) {
- ODM_delay_us(1);
+ udelay(1);
} else {
- ODM_SetRFReg(pDM_Odm, RF_PATH, RegAddr, bRFRegOffsetMask, Data);
+ rtl8188e_PHY_SetRFReg(pDM_Odm->Adapter, RF_PATH, RegAddr, bRFRegOffsetMask, Data);
/* Add 1us delay between BB/RF register setting. */
- ODM_delay_us(1);
+ udelay(1);
}
}
@@ -36,31 +36,31 @@ void odm_ConfigRF_RadioA_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data
void odm_ConfigMAC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u8 Data)
{
- ODM_Write1Byte(pDM_Odm, Addr, Data);
+ rtw_write8(pDM_Odm->Adapter, Addr, Data);
}
void odm_ConfigBB_AGC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data)
{
- ODM_SetBBReg(pDM_Odm, Addr, Bitmask, Data);
+ rtl8188e_PHY_SetBBReg(pDM_Odm->Adapter, Addr, Bitmask, Data);
/* Add 1us delay between BB/RF register setting. */
- ODM_delay_us(1);
+ udelay(1);
}
void odm_ConfigBB_PHY_REG_PG_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
u32 Bitmask, u32 Data)
{
if (Addr == 0xfe)
- ODM_sleep_ms(50);
+ msleep(50);
else if (Addr == 0xfd)
- ODM_delay_ms(5);
+ mdelay(5);
else if (Addr == 0xfc)
- ODM_delay_ms(1);
+ mdelay(1);
else if (Addr == 0xfb)
- ODM_delay_us(50);
+ udelay(50);
else if (Addr == 0xfa)
- ODM_delay_us(5);
+ udelay(5);
else if (Addr == 0xf9)
- ODM_delay_us(1);
+ udelay(1);
else
storePwrIndexDiffRateOffset(pDM_Odm->Adapter, Addr, Bitmask, Data);
}
@@ -68,23 +68,23 @@ void odm_ConfigBB_PHY_REG_PG_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
void odm_ConfigBB_PHY_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data)
{
if (Addr == 0xfe) {
- ODM_sleep_ms(50);
+ msleep(50);
} else if (Addr == 0xfd) {
- ODM_delay_ms(5);
+ mdelay(5);
} else if (Addr == 0xfc) {
- ODM_delay_ms(1);
+ mdelay(1);
} else if (Addr == 0xfb) {
- ODM_delay_us(50);
+ udelay(50);
} else if (Addr == 0xfa) {
- ODM_delay_us(5);
+ udelay(5);
} else if (Addr == 0xf9) {
- ODM_delay_us(1);
+ udelay(1);
} else {
if (Addr == 0xa24)
pDM_Odm->RFCalibrateInfo.RegA24 = Data;
- ODM_SetBBReg(pDM_Odm, Addr, Bitmask, Data);
+ rtl8188e_PHY_SetBBReg(pDM_Odm->Adapter, Addr, Bitmask, Data);
/* Add 1us delay between BB/RF register setting. */
- ODM_delay_us(1);
+ udelay(1);
}
}
diff --git a/drivers/staging/r8188eu/hal/odm_debug.c b/drivers/staging/r8188eu/hal/odm_debug.c
index 7029ec4f771e..7a134229fe39 100644
--- a/drivers/staging/r8188eu/hal/odm_debug.c
+++ b/drivers/staging/r8188eu/hal/odm_debug.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-#include "../include/odm_precomp.h"
+#include "../include/rtw_debug.h"
u32 GlobalDebugLevel;
diff --git a/drivers/staging/r8188eu/hal/odm_interface.c b/drivers/staging/r8188eu/hal/odm_interface.c
deleted file mode 100644
index 7ddba39a0f4b..000000000000
--- a/drivers/staging/r8188eu/hal/odm_interface.c
+++ /dev/null
@@ -1,93 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#include "../include/odm_precomp.h"
-/* ODM IO Relative API. */
-
-u8 ODM_Read1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return rtw_read8(Adapter, RegAddr);
-}
-
-u32 ODM_Read4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return rtw_read32(Adapter, RegAddr);
-}
-
-void ODM_Write1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u8 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- rtw_write8(Adapter, RegAddr, Data);
-}
-
-void ODM_Write2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u16 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- rtw_write16(Adapter, RegAddr, Data);
-}
-
-void ODM_Write4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- rtw_write32(Adapter, RegAddr, Data);
-}
-
-void ODM_SetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask, u32 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- PHY_SetBBReg(Adapter, RegAddr, BitMask, Data);
-}
-
-u32 ODM_GetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return PHY_QueryBBReg(Adapter, RegAddr, BitMask);
-}
-
-void ODM_SetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask, u32 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- PHY_SetBBReg(Adapter, RegAddr, BitMask, Data);
-}
-
-u32 ODM_GetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return PHY_QueryBBReg(Adapter, RegAddr, BitMask);
-}
-
-void ODM_SetRFReg(struct odm_dm_struct *pDM_Odm, enum rf_radio_path eRFPath, u32 RegAddr, u32 BitMask, u32 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- PHY_SetRFReg(Adapter, (enum rf_radio_path)eRFPath, RegAddr, BitMask, Data);
-}
-
-u32 ODM_GetRFReg(struct odm_dm_struct *pDM_Odm, enum rf_radio_path eRFPath, u32 RegAddr, u32 BitMask)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return PHY_QueryRFReg(Adapter, (enum rf_radio_path)eRFPath, RegAddr, BitMask);
-}
-
-/* ODM Memory relative API. */
-s32 ODM_CompareMemory(struct odm_dm_struct *pDM_Odm, void *pBuf1, void *pBuf2, u32 length)
-{
- return !memcmp(pBuf1, pBuf2, length);
-}
-
-/* ODM Timer relative API. */
-void ODM_delay_ms(u32 ms)
-{
- mdelay(ms);
-}
-
-void ODM_delay_us(u32 us)
-{
- udelay(us);
-}
-
-void ODM_sleep_ms(u32 ms)
-{
- msleep(ms);
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_cmd.c b/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
index e44bcde92cc3..a491c37777df 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
@@ -49,7 +49,7 @@ static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *p
u8 h2c_box_num;
u32 msgbox_addr;
u32 msgbox_ex_addr;
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
u8 cmd_idx, ext_cmd_len;
u32 h2c_cmd = 0;
u32 h2c_cmd_ex = 0;
@@ -104,7 +104,7 @@ u8 rtl8188e_set_raid_cmd(struct adapter *adapt, u32 mask)
{
u8 buf[3];
u8 res = _SUCCESS;
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
if (haldata->fw_ractrl) {
__le32 lmask;
@@ -128,7 +128,7 @@ u8 rtl8188e_set_raid_cmd(struct adapter *adapt, u32 mask)
/* arg[5] = Short GI */
void rtl8188e_Add_RateATid(struct adapter *pAdapter, u32 bitmap, u8 arg, u8 rssi_level)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(pAdapter);
+ struct hal_data_8188e *haldata = &pAdapter->haldata;
u8 macid, raid, short_gi_rate = false;
@@ -440,7 +440,6 @@ void CheckFwRsvdPageContent(struct adapter *Adapter)
/* 2009.10.15 by tynli. */
static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
{
- struct hal_data_8188e *haldata;
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
struct xmit_priv *pxmitpriv;
@@ -461,7 +460,6 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
return;
}
- haldata = GET_HAL_DATA(adapt);
pxmitpriv = &adapt->xmitpriv;
pmlmeext = &adapt->mlmeextpriv;
pmlmeinfo = &pmlmeext->mlmext_info;
@@ -480,7 +478,6 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
if (PageNeed == 1)
PageNeed += 1;
PageNum += PageNeed;
- haldata->FwRsvdPageStartOffset = PageNum;
BufIndex += PageNeed * 128;
@@ -547,7 +544,7 @@ exit:
void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
bool bSendBeacon = false;
@@ -642,7 +639,7 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
void rtl8188e_set_p2p_ps_offload_cmd(struct adapter *adapt, u8 p2p_ps_state)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
struct wifidirect_info *pwdinfo = &adapt->wdinfo;
struct P2P_PS_Offload_t *p2p_ps_offload = &haldata->p2p_ps_offload;
u8 i;
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_dm.c b/drivers/staging/r8188eu/hal/rtl8188e_dm.c
index 5d76f6ea91c4..bd6eb3878060 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_dm.c
@@ -24,7 +24,7 @@ static void dm_InitGPIOSetting(struct adapter *Adapter)
/* */
static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *hal_data = &Adapter->haldata;
struct dm_priv *pdmpriv = &hal_data->dmpriv;
struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
@@ -48,18 +48,14 @@ static void Update_ODM_ComInfo_88E(struct adapter *Adapter)
struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
- struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *hal_data = &Adapter->haldata;
struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
struct dm_priv *pdmpriv = &hal_data->dmpriv;
int i;
- pdmpriv->InitODMFlag = ODM_BB_DIG |
- ODM_BB_RA_MASK |
- ODM_BB_DYNAMIC_TXPWR |
- ODM_BB_FA_CNT |
+ pdmpriv->InitODMFlag = ODM_BB_FA_CNT |
ODM_BB_RSSI_MONITOR |
ODM_BB_CCK_PD |
- ODM_BB_PWR_SAVE |
ODM_MAC_EDCA_TURBO |
ODM_RF_CALIBRATION |
ODM_RF_TX_PWR_TRACK;
@@ -68,14 +64,10 @@ static void Update_ODM_ComInfo_88E(struct adapter *Adapter)
ODM_CmnInfoUpdate(dm_odm, ODM_CMNINFO_ABILITY, pdmpriv->InitODMFlag);
- ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_TX_UNI, &Adapter->xmitpriv.tx_bytes);
- ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_RX_UNI, &Adapter->recvpriv.rx_bytes);
ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_WM_MODE, &pmlmeext->cur_wireless_mode);
ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_SEC_CHNL_OFFSET, &hal_data->nCur40MhzPrimeSC);
- ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_SEC_MODE, &Adapter->securitypriv.dot11PrivacyAlgrthm);
ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_BW, &hal_data->CurrentChannelBW);
ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_CHNL, &hal_data->CurrentChannel);
- ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_NET_CLOSED, &Adapter->net_closed);
ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_SCAN, &pmlmepriv->bScanInProcess);
ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_POWER_SAVING, &pwrctrlpriv->bpower_saving);
ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_RF_ANTENNA_TYPE, hal_data->TRxAntDivType);
@@ -86,19 +78,18 @@ static void Update_ODM_ComInfo_88E(struct adapter *Adapter)
void rtl8188e_InitHalDm(struct adapter *Adapter)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *hal_data = &Adapter->haldata;
struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
dm_InitGPIOSetting(Adapter);
Update_ODM_ComInfo_88E(Adapter);
ODM_DMInit(dm_odm);
- Adapter->fix_rate = 0xFF;
}
void rtl8188e_HalDmWatchDog(struct adapter *Adapter)
{
u8 hw_init_completed = Adapter->hw_init_completed;
- struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *hal_data = &Adapter->haldata;
struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
u8 bLinked = false;
@@ -120,7 +111,7 @@ void rtl8188e_HalDmWatchDog(struct adapter *Adapter)
void rtl8188e_init_dm_priv(struct adapter *Adapter)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *hal_data = &Adapter->haldata;
struct dm_priv *pdmpriv = &hal_data->dmpriv;
memset(pdmpriv, 0, sizeof(struct dm_priv));
@@ -131,7 +122,7 @@ void rtl8188e_init_dm_priv(struct adapter *Adapter)
/* Compare RSSI for deciding antenna */
void AntDivCompare8188E(struct adapter *Adapter, struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *hal_data = &Adapter->haldata;
if (0 != hal_data->AntDivCfg) {
/* select optimum_antenna for before linked =>For antenna diversity */
@@ -145,7 +136,7 @@ void AntDivCompare8188E(struct adapter *Adapter, struct wlan_bssid_ex *dst, stru
/* Add new function to reset the state of antenna diversity before link. */
u8 AntDivBeforeLink8188E(struct adapter *Adapter)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *hal_data = &Adapter->haldata;
struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
struct sw_ant_switch *dm_swat_tbl = &dm_odm->DM_SWAT_Table;
struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
index 8c00f2dd67da..b818872e0d19 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
@@ -508,8 +508,6 @@ static s32 _FWFreeToGo(struct adapter *padapter)
return _FAIL;
}
-#define IS_FW_81xxC(padapter) (((GET_HAL_DATA(padapter))->FirmwareSignature & 0xFFF0) == 0x88C0)
-
static int load_firmware(struct rt_firmware *pFirmware, struct device *device)
{
s32 rtStatus = _SUCCESS;
@@ -541,7 +539,7 @@ static int load_firmware(struct rt_firmware *pFirmware, struct device *device)
memcpy(pFirmware->szFwBuffer, fw->data, fw->size);
pFirmware->ulFwLength = fw->size;
release_firmware(fw);
- DBG_88E_LEVEL(_drv_info_, "+%s: !bUsedWoWLANFw, FmrmwareLen:%d+\n", __func__, pFirmware->ulFwLength);
+ dev_dbg(device, "!bUsedWoWLANFw, FmrmwareLen:%d+\n", pFirmware->ulFwLength);
Exit:
return rtStatus;
@@ -552,7 +550,7 @@ s32 rtl8188e_FirmwareDownload(struct adapter *padapter)
s32 rtStatus = _SUCCESS;
u8 writeFW_retry = 0;
u32 fwdl_start_time;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct hal_data_8188e *pHalData = &padapter->haldata;
struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
struct device *device = dvobj_to_dev(dvobj);
struct rt_firmware_hdr *pFwHdr = NULL;
@@ -628,7 +626,7 @@ Exit:
void rtl8188e_InitializeFirmwareVars(struct adapter *padapter)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct hal_data_8188e *pHalData = &padapter->haldata;
/* Init Fw LPS related. */
padapter->pwrctrlpriv.bFwCurrentInPSMode = false;
@@ -637,12 +635,6 @@ void rtl8188e_InitializeFirmwareVars(struct adapter *padapter)
pHalData->LastHMEBoxNum = 0;
}
-void rtl8188e_free_hal_data(struct adapter *padapter)
-{
- kfree(padapter->HalData);
- padapter->HalData = NULL;
-}
-
/* */
/* Efuse related code */
/* */
@@ -651,31 +643,8 @@ enum{
LDOE25_SHIFT = 28,
};
-static bool
-hal_EfusePgPacketWrite2ByteHeader(
- struct adapter *pAdapter,
- u8 efuseType,
- u16 *pAddr,
- struct pgpkt *pTargetPkt,
- bool bPseudoTest);
-static bool
-hal_EfusePgPacketWrite1ByteHeader(
- struct adapter *pAdapter,
- u8 efuseType,
- u16 *pAddr,
- struct pgpkt *pTargetPkt,
- bool bPseudoTest);
-static bool
-hal_EfusePgPacketWriteData(
- struct adapter *pAdapter,
- u8 efuseType,
- u16 *pAddr,
- struct pgpkt *pTargetPkt,
- bool bPseudoTest);
-
-void rtl8188e_EfusePowerSwitch(struct adapter *pAdapter, u8 bWrite, u8 PwrState)
+void rtl8188e_EfusePowerSwitch(struct adapter *pAdapter, u8 PwrState)
{
- u8 tempval;
u16 tmpV16;
if (PwrState) {
@@ -700,31 +669,15 @@ void rtl8188e_EfusePowerSwitch(struct adapter *pAdapter, u8 bWrite, u8 PwrState)
tmpV16 |= (LOADER_CLK_EN | ANA8M);
rtw_write16(pAdapter, REG_SYS_CLKR, tmpV16);
}
-
- if (bWrite) {
- /* Enable LDO 2.5V before read/write action */
- tempval = rtw_read8(pAdapter, EFUSE_TEST + 3);
- tempval &= 0x0F;
- tempval |= (VOLTAGE_V25 << 4);
- rtw_write8(pAdapter, EFUSE_TEST + 3, (tempval | 0x80));
- }
} else {
rtw_write8(pAdapter, REG_EFUSE_ACCESS, EFUSE_ACCESS_OFF);
-
- if (bWrite) {
- /* Disable LDO 2.5V after read/write action */
- tempval = rtw_read8(pAdapter, EFUSE_TEST + 3);
- rtw_write8(pAdapter, EFUSE_TEST + 3, (tempval & 0x7F));
- }
}
}
static void Hal_EfuseReadEFuse88E(struct adapter *Adapter,
u16 _offset,
u16 _size_byte,
- u8 *pbuf,
- bool bPseudoTest
- )
+ u8 *pbuf)
{
u8 *efuseTbl = NULL;
u8 rtemp8[1];
@@ -764,7 +717,7 @@ static void Hal_EfuseReadEFuse88E(struct adapter *Adapter,
/* 1. Read the first byte to check if efuse is empty!!! */
/* */
/* */
- ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8);
if (*rtemp8 != 0xFF) {
efuse_utilized++;
eFuse_Addr++;
@@ -781,11 +734,11 @@ static void Hal_EfuseReadEFuse88E(struct adapter *Adapter,
if ((*rtemp8 & 0x1F) == 0x0F) { /* extended header */
u1temp = ((*rtemp8 & 0xE0) >> 5);
- ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8);
if ((*rtemp8 & 0x0F) == 0x0F) {
eFuse_Addr++;
- ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8);
if (*rtemp8 != 0xFF && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E))
eFuse_Addr++;
@@ -806,13 +759,13 @@ static void Hal_EfuseReadEFuse88E(struct adapter *Adapter,
for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
/* Check word enable condition in the section */
if (!(wren & 0x01)) {
- ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8);
eFuse_Addr++;
efuse_utilized++;
eFuseWord[offset][i] = (*rtemp8 & 0xff);
if (eFuse_Addr >= EFUSE_REAL_CONTENT_LEN_88E)
break;
- ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8);
eFuse_Addr++;
efuse_utilized++;
eFuseWord[offset][i] |= (((u16)*rtemp8 << 8) & 0xff00);
@@ -824,7 +777,7 @@ static void Hal_EfuseReadEFuse88E(struct adapter *Adapter,
}
/* Read next PG header */
- ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8);
if (*rtemp8 != 0xFF && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E)) {
efuse_utilized++;
@@ -852,804 +805,37 @@ exit:
kfree(eFuseWord);
}
-static void ReadEFuseByIC(struct adapter *Adapter, u8 efuseType, u16 _offset, u16 _size_byte, u8 *pbuf, bool bPseudoTest)
-{
- if (!bPseudoTest) {
- int ret = _FAIL;
- if (rtw_IOL_applied(Adapter)) {
- rtl8188eu_InitPowerOn(Adapter);
-
- iol_mode_enable(Adapter, 1);
- ret = iol_read_efuse(Adapter, 0, _offset, _size_byte, pbuf);
- iol_mode_enable(Adapter, 0);
-
- if (_SUCCESS == ret)
- goto exit;
- }
- }
- Hal_EfuseReadEFuse88E(Adapter, _offset, _size_byte, pbuf, bPseudoTest);
-
-exit:
- return;
-}
-
-static void ReadEFuse_Pseudo(struct adapter *Adapter, u8 efuseType, u16 _offset, u16 _size_byte, u8 *pbuf, bool bPseudoTest)
+static void ReadEFuseByIC(struct adapter *Adapter, u16 _offset, u16 _size_byte, u8 *pbuf)
{
- Hal_EfuseReadEFuse88E(Adapter, _offset, _size_byte, pbuf, bPseudoTest);
-}
-
-void rtl8188e_ReadEFuse(struct adapter *Adapter, u8 efuseType,
- u16 _offset, u16 _size_byte, u8 *pbuf,
- bool bPseudoTest)
-{
- if (bPseudoTest)
- ReadEFuse_Pseudo(Adapter, efuseType, _offset, _size_byte, pbuf, bPseudoTest);
- else
- ReadEFuseByIC(Adapter, efuseType, _offset, _size_byte, pbuf, bPseudoTest);
-}
-
-/* Do not support BT */
-static void Hal_EFUSEGetEfuseDefinition88E(struct adapter *pAdapter, u8 efuseType, u8 type, void *pOut)
-{
- switch (type) {
- case TYPE_EFUSE_MAX_SECTION:
- {
- u8 *pMax_section;
- pMax_section = (u8 *)pOut;
- *pMax_section = EFUSE_MAX_SECTION_88E;
- }
- break;
- case TYPE_EFUSE_REAL_CONTENT_LEN:
- {
- u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
- *pu2Tmp = EFUSE_REAL_CONTENT_LEN_88E;
- }
- break;
- case TYPE_EFUSE_CONTENT_LEN_BANK:
- {
- u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
- *pu2Tmp = EFUSE_REAL_CONTENT_LEN_88E;
- }
- break;
- case TYPE_AVAILABLE_EFUSE_BYTES_BANK:
- {
- u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
- *pu2Tmp = (u16)(EFUSE_REAL_CONTENT_LEN_88E - EFUSE_OOB_PROTECT_BYTES_88E);
- }
- break;
- case TYPE_AVAILABLE_EFUSE_BYTES_TOTAL:
- {
- u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
- *pu2Tmp = (u16)(EFUSE_REAL_CONTENT_LEN_88E - EFUSE_OOB_PROTECT_BYTES_88E);
- }
- break;
- case TYPE_EFUSE_MAP_LEN:
- {
- u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
- *pu2Tmp = (u16)EFUSE_MAP_LEN_88E;
- }
- break;
- case TYPE_EFUSE_PROTECT_BYTES_BANK:
- {
- u8 *pu1Tmp;
- pu1Tmp = (u8 *)pOut;
- *pu1Tmp = (u8)(EFUSE_OOB_PROTECT_BYTES_88E);
- }
- break;
- default:
- {
- u8 *pu1Tmp;
- pu1Tmp = (u8 *)pOut;
- *pu1Tmp = 0;
- }
- break;
- }
-}
-
-static void Hal_EFUSEGetEfuseDefinition_Pseudo88E(struct adapter *pAdapter, u8 efuseType, u8 type, void *pOut)
-{
- switch (type) {
- case TYPE_EFUSE_MAX_SECTION:
- {
- u8 *pMax_section;
- pMax_section = (u8 *)pOut;
- *pMax_section = EFUSE_MAX_SECTION_88E;
- }
- break;
- case TYPE_EFUSE_REAL_CONTENT_LEN:
- {
- u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
- *pu2Tmp = EFUSE_REAL_CONTENT_LEN_88E;
- }
- break;
- case TYPE_EFUSE_CONTENT_LEN_BANK:
- {
- u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
- *pu2Tmp = EFUSE_REAL_CONTENT_LEN_88E;
- }
- break;
- case TYPE_AVAILABLE_EFUSE_BYTES_BANK:
- {
- u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
- *pu2Tmp = (u16)(EFUSE_REAL_CONTENT_LEN_88E - EFUSE_OOB_PROTECT_BYTES_88E);
- }
- break;
- case TYPE_AVAILABLE_EFUSE_BYTES_TOTAL:
- {
- u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
- *pu2Tmp = (u16)(EFUSE_REAL_CONTENT_LEN_88E - EFUSE_OOB_PROTECT_BYTES_88E);
- }
- break;
- case TYPE_EFUSE_MAP_LEN:
- {
- u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
- *pu2Tmp = (u16)EFUSE_MAP_LEN_88E;
- }
- break;
- case TYPE_EFUSE_PROTECT_BYTES_BANK:
- {
- u8 *pu1Tmp;
- pu1Tmp = (u8 *)pOut;
- *pu1Tmp = (u8)(EFUSE_OOB_PROTECT_BYTES_88E);
- }
- break;
- default:
- {
- u8 *pu1Tmp;
- pu1Tmp = (u8 *)pOut;
- *pu1Tmp = 0;
- }
- break;
- }
-}
-
-void rtl8188e_EFUSE_GetEfuseDefinition(struct adapter *pAdapter, u8 efuseType, u8 type, void *pOut, bool bPseudoTest)
-{
- if (bPseudoTest)
- Hal_EFUSEGetEfuseDefinition_Pseudo88E(pAdapter, efuseType, type, pOut);
- else
- Hal_EFUSEGetEfuseDefinition88E(pAdapter, efuseType, type, pOut);
-}
-
-static u8 Hal_EfuseWordEnableDataWrite(struct adapter *pAdapter, u16 efuse_addr, u8 word_en, u8 *data, bool bPseudoTest)
-{
- u16 tmpaddr = 0;
- u16 start_addr = efuse_addr;
- u8 badworden = 0x0F;
- u8 tmpdata[8];
-
- memset((void *)tmpdata, 0xff, PGPKT_DATA_SIZE);
-
- if (!(word_en & BIT(0))) {
- tmpaddr = start_addr;
- efuse_OneByteWrite(pAdapter, start_addr++, data[0], bPseudoTest);
- efuse_OneByteWrite(pAdapter, start_addr++, data[1], bPseudoTest);
-
- efuse_OneByteRead(pAdapter, tmpaddr, &tmpdata[0], bPseudoTest);
- efuse_OneByteRead(pAdapter, tmpaddr + 1, &tmpdata[1], bPseudoTest);
- if ((data[0] != tmpdata[0]) || (data[1] != tmpdata[1]))
- badworden &= (~BIT(0));
- }
- if (!(word_en & BIT(1))) {
- tmpaddr = start_addr;
- efuse_OneByteWrite(pAdapter, start_addr++, data[2], bPseudoTest);
- efuse_OneByteWrite(pAdapter, start_addr++, data[3], bPseudoTest);
-
- efuse_OneByteRead(pAdapter, tmpaddr, &tmpdata[2], bPseudoTest);
- efuse_OneByteRead(pAdapter, tmpaddr + 1, &tmpdata[3], bPseudoTest);
- if ((data[2] != tmpdata[2]) || (data[3] != tmpdata[3]))
- badworden &= (~BIT(1));
- }
- if (!(word_en & BIT(2))) {
- tmpaddr = start_addr;
- efuse_OneByteWrite(pAdapter, start_addr++, data[4], bPseudoTest);
- efuse_OneByteWrite(pAdapter, start_addr++, data[5], bPseudoTest);
-
- efuse_OneByteRead(pAdapter, tmpaddr, &tmpdata[4], bPseudoTest);
- efuse_OneByteRead(pAdapter, tmpaddr + 1, &tmpdata[5], bPseudoTest);
- if ((data[4] != tmpdata[4]) || (data[5] != tmpdata[5]))
- badworden &= (~BIT(2));
- }
- if (!(word_en & BIT(3))) {
- tmpaddr = start_addr;
- efuse_OneByteWrite(pAdapter, start_addr++, data[6], bPseudoTest);
- efuse_OneByteWrite(pAdapter, start_addr++, data[7], bPseudoTest);
-
- efuse_OneByteRead(pAdapter, tmpaddr, &tmpdata[6], bPseudoTest);
- efuse_OneByteRead(pAdapter, tmpaddr + 1, &tmpdata[7], bPseudoTest);
- if ((data[6] != tmpdata[6]) || (data[7] != tmpdata[7]))
- badworden &= (~BIT(3));
- }
- return badworden;
-}
-
-static u8 Hal_EfuseWordEnableDataWrite_Pseudo(struct adapter *pAdapter, u16 efuse_addr, u8 word_en, u8 *data, bool bPseudoTest)
-{
- u8 ret;
-
- ret = Hal_EfuseWordEnableDataWrite(pAdapter, efuse_addr, word_en, data, bPseudoTest);
- return ret;
-}
-
-static u8 rtl8188e_Efuse_WordEnableDataWrite(struct adapter *pAdapter, u16 efuse_addr, u8 word_en, u8 *data, bool bPseudoTest)
-{
- u8 ret = 0;
-
- if (bPseudoTest)
- ret = Hal_EfuseWordEnableDataWrite_Pseudo(pAdapter, efuse_addr, word_en, data, bPseudoTest);
- else
- ret = Hal_EfuseWordEnableDataWrite(pAdapter, efuse_addr, word_en, data, bPseudoTest);
- return ret;
-}
-
-static u16 hal_EfuseGetCurrentSize_8188e(struct adapter *pAdapter, bool bPseudoTest)
-{
- int bContinual = true;
- u16 efuse_addr = 0;
- u8 hworden = 0;
- u8 efuse_data, word_cnts = 0;
-
- if (bPseudoTest)
- efuse_addr = (u16)(fakeEfuseUsedBytes);
- else
- GetHwReg8188EU(pAdapter, HW_VAR_EFUSE_BYTES, (u8 *)&efuse_addr);
-
- while (bContinual &&
- efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data, bPseudoTest) &&
- AVAILABLE_EFUSE_ADDR(efuse_addr)) {
- if (efuse_data != 0xFF) {
- if ((efuse_data & 0x1F) == 0x0F) { /* extended header */
- efuse_addr++;
- efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data, bPseudoTest);
- if ((efuse_data & 0x0F) == 0x0F) {
- efuse_addr++;
- continue;
- } else {
- hworden = efuse_data & 0x0F;
- }
- } else {
- hworden = efuse_data & 0x0F;
- }
- word_cnts = Efuse_CalculateWordCnts(hworden);
- /* read next header */
- efuse_addr = efuse_addr + (word_cnts * 2) + 1;
- } else {
- bContinual = false;
- }
- }
-
- if (bPseudoTest)
- fakeEfuseUsedBytes = efuse_addr;
- else
- SetHwReg8188EU(pAdapter, HW_VAR_EFUSE_BYTES, (u8 *)&efuse_addr);
-
- return efuse_addr;
-}
-
-static u16 Hal_EfuseGetCurrentSize_Pseudo(struct adapter *pAdapter, bool bPseudoTest)
-{
- u16 ret = 0;
-
- ret = hal_EfuseGetCurrentSize_8188e(pAdapter, bPseudoTest);
- return ret;
-}
-
-u16 rtl8188e_EfuseGetCurrentSize(struct adapter *pAdapter, u8 efuseType, bool bPseudoTest)
-{
- u16 ret = 0;
-
- if (bPseudoTest)
- ret = Hal_EfuseGetCurrentSize_Pseudo(pAdapter, bPseudoTest);
- else
- ret = hal_EfuseGetCurrentSize_8188e(pAdapter, bPseudoTest);
- return ret;
-}
-
-static int hal_EfusePgPacketRead_8188e(struct adapter *pAdapter, u8 offset, u8 *data, bool bPseudoTest)
-{
- u8 ReadState = PG_STATE_HEADER;
- int bContinual = true;
- int bDataEmpty = true;
- u8 efuse_data, word_cnts = 0;
- u16 efuse_addr = 0;
- u8 hoffset = 0, hworden = 0;
- u8 tmpidx = 0;
- u8 tmpdata[8];
- u8 max_section = 0;
- u8 tmp_header = 0;
-
- rtl8188e_EFUSE_GetEfuseDefinition(pAdapter, EFUSE_WIFI, TYPE_EFUSE_MAX_SECTION, (void *)&max_section, bPseudoTest);
-
- if (!data)
- return false;
- if (offset > max_section)
- return false;
-
- memset((void *)data, 0xff, sizeof(u8) * PGPKT_DATA_SIZE);
- memset((void *)tmpdata, 0xff, sizeof(u8) * PGPKT_DATA_SIZE);
-
- /* <Roger_TODO> Efuse has been pre-programmed dummy 5Bytes at the end of Efuse by CP. */
- /* Skip dummy parts to prevent unexpected data read from Efuse. */
- /* By pass right now. 2009.02.19. */
- while (bContinual && AVAILABLE_EFUSE_ADDR(efuse_addr)) {
- /* Header Read ------------- */
- if (ReadState & PG_STATE_HEADER) {
- if (efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data, bPseudoTest) && (efuse_data != 0xFF)) {
- if (EXT_HEADER(efuse_data)) {
- tmp_header = efuse_data;
- efuse_addr++;
- efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data, bPseudoTest);
- if (!ALL_WORDS_DISABLED(efuse_data)) {
- hoffset = ((tmp_header & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
- hworden = efuse_data & 0x0F;
- } else {
- DBG_88E("Error, All words disabled\n");
- efuse_addr++;
- continue;
- }
- } else {
- hoffset = (efuse_data >> 4) & 0x0F;
- hworden = efuse_data & 0x0F;
- }
- word_cnts = Efuse_CalculateWordCnts(hworden);
- bDataEmpty = true;
-
- if (hoffset == offset) {
- for (tmpidx = 0; tmpidx < word_cnts * 2; tmpidx++) {
- if (efuse_OneByteRead(pAdapter, efuse_addr + 1 + tmpidx, &efuse_data, bPseudoTest)) {
- tmpdata[tmpidx] = efuse_data;
- if (efuse_data != 0xff)
- bDataEmpty = false;
- }
- }
- if (!bDataEmpty) {
- ReadState = PG_STATE_DATA;
- } else {/* read next header */
- efuse_addr = efuse_addr + (word_cnts * 2) + 1;
- ReadState = PG_STATE_HEADER;
- }
- } else {/* read next header */
- efuse_addr = efuse_addr + (word_cnts * 2) + 1;
- ReadState = PG_STATE_HEADER;
- }
- } else {
- bContinual = false;
- }
- } else if (ReadState & PG_STATE_DATA) {
- /* Data section Read ------------- */
- efuse_WordEnableDataRead(hworden, tmpdata, data);
- efuse_addr = efuse_addr + (word_cnts * 2) + 1;
- ReadState = PG_STATE_HEADER;
- }
-
- }
-
- if ((data[0] == 0xff) && (data[1] == 0xff) && (data[2] == 0xff) && (data[3] == 0xff) &&
- (data[4] == 0xff) && (data[5] == 0xff) && (data[6] == 0xff) && (data[7] == 0xff))
- return false;
- else
- return true;
-}
-
-static int Hal_EfusePgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data, bool bPseudoTest)
-{
- int ret;
-
- ret = hal_EfusePgPacketRead_8188e(pAdapter, offset, data, bPseudoTest);
- return ret;
-}
-
-static int Hal_EfusePgPacketRead_Pseudo(struct adapter *pAdapter, u8 offset, u8 *data, bool bPseudoTest)
-{
- int ret;
-
- ret = hal_EfusePgPacketRead_8188e(pAdapter, offset, data, bPseudoTest);
- return ret;
-}
-
-int rtl8188e_Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data, bool bPseudoTest)
-{
- int ret;
-
- if (bPseudoTest)
- ret = Hal_EfusePgPacketRead_Pseudo(pAdapter, offset, data, bPseudoTest);
- else
- ret = Hal_EfusePgPacketRead(pAdapter, offset, data, bPseudoTest);
- return ret;
-}
-
-static bool hal_EfuseFixHeaderProcess(struct adapter *pAdapter, u8 efuseType, struct pgpkt *pFixPkt, u16 *pAddr, bool bPseudoTest)
-{
- u8 originaldata[8], badworden = 0;
- u16 efuse_addr = *pAddr;
- u32 PgWriteSuccess = 0;
-
- memset((void *)originaldata, 0xff, 8);
-
- if (rtl8188e_Efuse_PgPacketRead(pAdapter, pFixPkt->offset, originaldata, bPseudoTest)) {
- /* check if data exist */
- badworden = rtl8188e_Efuse_WordEnableDataWrite(pAdapter, efuse_addr + 1, pFixPkt->word_en, originaldata, bPseudoTest);
-
- if (badworden != 0xf) { /* write fail */
- PgWriteSuccess = rtl8188e_Efuse_PgPacketWrite(pAdapter, pFixPkt->offset, badworden, originaldata, bPseudoTest);
-
- if (!PgWriteSuccess)
- return false;
- else
- efuse_addr = rtl8188e_EfuseGetCurrentSize(pAdapter, efuseType, bPseudoTest);
- } else {
- efuse_addr = efuse_addr + (pFixPkt->word_cnts * 2) + 1;
- }
- } else {
- efuse_addr = efuse_addr + (pFixPkt->word_cnts * 2) + 1;
- }
- *pAddr = efuse_addr;
- return true;
-}
-
-static bool hal_EfusePgPacketWrite2ByteHeader(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt, bool bPseudoTest)
-{
- bool bRet = false;
- u16 efuse_addr = *pAddr, efuse_max_available_len = 0;
- u8 pg_header = 0, tmp_header = 0, pg_header_temp = 0;
- u8 repeatcnt = 0;
-
- rtl8188e_EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_AVAILABLE_EFUSE_BYTES_BANK, (void *)&efuse_max_available_len, bPseudoTest);
-
- while (efuse_addr < efuse_max_available_len) {
- pg_header = ((pTargetPkt->offset & 0x07) << 5) | 0x0F;
- efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
- efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
-
- while (tmp_header == 0xFF) {
- if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_)
- return false;
-
- efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
- efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
- }
-
- /* to write ext_header */
- if (tmp_header == pg_header) {
- efuse_addr++;
- pg_header_temp = pg_header;
- pg_header = ((pTargetPkt->offset & 0x78) << 1) | pTargetPkt->word_en;
-
- efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
- efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
-
- while (tmp_header == 0xFF) {
- if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_)
- return false;
-
- efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
- efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
- }
-
- if ((tmp_header & 0x0F) == 0x0F) { /* word_en PG fail */
- if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_) {
- return false;
- } else {
- efuse_addr++;
- continue;
- }
- } else if (pg_header != tmp_header) { /* offset PG fail */
- struct pgpkt fixPkt;
- fixPkt.offset = ((pg_header_temp & 0xE0) >> 5) | ((tmp_header & 0xF0) >> 1);
- fixPkt.word_en = tmp_header & 0x0F;
- fixPkt.word_cnts = Efuse_CalculateWordCnts(fixPkt.word_en);
- if (!hal_EfuseFixHeaderProcess(pAdapter, efuseType, &fixPkt, &efuse_addr, bPseudoTest))
- return false;
- } else {
- bRet = true;
- break;
- }
- } else if ((tmp_header & 0x1F) == 0x0F) { /* wrong extended header */
- efuse_addr += 2;
- continue;
- }
- }
-
- *pAddr = efuse_addr;
- return bRet;
-}
-
-static bool hal_EfusePgPacketWrite1ByteHeader(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt, bool bPseudoTest)
-{
- bool bRet = false;
- u8 pg_header = 0, tmp_header = 0;
- u16 efuse_addr = *pAddr;
- u8 repeatcnt = 0;
-
- pg_header = ((pTargetPkt->offset << 4) & 0xf0) | pTargetPkt->word_en;
-
- efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
- efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
-
- while (tmp_header == 0xFF) {
- if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_)
- return false;
- efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
- efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
- }
-
- if (pg_header == tmp_header) {
- bRet = true;
- } else {
- struct pgpkt fixPkt;
- fixPkt.offset = (tmp_header >> 4) & 0x0F;
- fixPkt.word_en = tmp_header & 0x0F;
- fixPkt.word_cnts = Efuse_CalculateWordCnts(fixPkt.word_en);
- if (!hal_EfuseFixHeaderProcess(pAdapter, efuseType, &fixPkt, &efuse_addr, bPseudoTest))
- return false;
- }
-
- *pAddr = efuse_addr;
- return bRet;
-}
-
-static bool hal_EfusePgPacketWriteData(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt, bool bPseudoTest)
-{
- u16 efuse_addr = *pAddr;
- u8 badworden;
- u32 PgWriteSuccess = 0;
-
- badworden = rtl8188e_Efuse_WordEnableDataWrite(pAdapter, efuse_addr + 1, pTargetPkt->word_en, pTargetPkt->data, bPseudoTest);
- if (badworden == 0x0F) {
- /* write ok */
- return true;
- } else {
- /* reorganize other pg packet */
- PgWriteSuccess = rtl8188e_Efuse_PgPacketWrite(pAdapter, pTargetPkt->offset, badworden, pTargetPkt->data, bPseudoTest);
- if (!PgWriteSuccess)
- return false;
- else
- return true;
- }
-}
-
-static bool
-hal_EfusePgPacketWriteHeader(
- struct adapter *pAdapter,
- u8 efuseType,
- u16 *pAddr,
- struct pgpkt *pTargetPkt,
- bool bPseudoTest)
-{
- bool bRet = false;
-
- if (pTargetPkt->offset >= EFUSE_MAX_SECTION_BASE)
- bRet = hal_EfusePgPacketWrite2ByteHeader(pAdapter, efuseType, pAddr, pTargetPkt, bPseudoTest);
- else
- bRet = hal_EfusePgPacketWrite1ByteHeader(pAdapter, efuseType, pAddr, pTargetPkt, bPseudoTest);
-
- return bRet;
-}
-
-static bool wordEnMatched(struct pgpkt *pTargetPkt, struct pgpkt *pCurPkt,
- u8 *pWden)
-{
- u8 match_word_en = 0x0F; /* default all words are disabled */
-
- /* check if the same words are enabled both target and current PG packet */
- if (((pTargetPkt->word_en & BIT(0)) == 0) &&
- ((pCurPkt->word_en & BIT(0)) == 0))
- match_word_en &= ~BIT(0); /* enable word 0 */
- if (((pTargetPkt->word_en & BIT(1)) == 0) &&
- ((pCurPkt->word_en & BIT(1)) == 0))
- match_word_en &= ~BIT(1); /* enable word 1 */
- if (((pTargetPkt->word_en & BIT(2)) == 0) &&
- ((pCurPkt->word_en & BIT(2)) == 0))
- match_word_en &= ~BIT(2); /* enable word 2 */
- if (((pTargetPkt->word_en & BIT(3)) == 0) &&
- ((pCurPkt->word_en & BIT(3)) == 0))
- match_word_en &= ~BIT(3); /* enable word 3 */
-
- *pWden = match_word_en;
-
- if (match_word_en != 0xf)
- return true;
- else
- return false;
-}
-
-static bool hal_EfuseCheckIfDatafollowed(struct adapter *pAdapter, u8 word_cnts, u16 startAddr, bool bPseudoTest)
-{
- bool bRet = false;
- u8 i, efuse_data;
-
- for (i = 0; i < (word_cnts * 2); i++) {
- if (efuse_OneByteRead(pAdapter, (startAddr + i), &efuse_data, bPseudoTest) && (efuse_data != 0xFF))
- bRet = true;
- }
- return bRet;
-}
-
-static bool hal_EfusePartialWriteCheck(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt, bool bPseudoTest)
-{
- bool bRet = false;
- u8 i, efuse_data = 0, cur_header = 0;
- u8 matched_wden = 0, badworden = 0;
- u16 startAddr = 0, efuse_max_available_len = 0, efuse_max = 0;
- struct pgpkt curPkt;
-
- rtl8188e_EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_AVAILABLE_EFUSE_BYTES_BANK, (void *)&efuse_max_available_len, bPseudoTest);
- rtl8188e_EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_EFUSE_REAL_CONTENT_LEN, (void *)&efuse_max, bPseudoTest);
-
- if (efuseType == EFUSE_WIFI) {
- if (bPseudoTest) {
- startAddr = (u16)(fakeEfuseUsedBytes % EFUSE_REAL_CONTENT_LEN);
- } else {
- GetHwReg8188EU(pAdapter, HW_VAR_EFUSE_BYTES, (u8 *)&startAddr);
- startAddr %= EFUSE_REAL_CONTENT_LEN;
- }
- } else {
- if (bPseudoTest)
- startAddr = (u16)(fakeBTEfuseUsedBytes % EFUSE_REAL_CONTENT_LEN);
- else
- startAddr = (u16)(BTEfuseUsedBytes % EFUSE_REAL_CONTENT_LEN);
- }
+ int ret = _FAIL;
+ if (rtw_IOL_applied(Adapter)) {
+ rtl8188eu_InitPowerOn(Adapter);
- while (1) {
- if (startAddr >= efuse_max_available_len) {
- bRet = false;
- break;
- }
+ iol_mode_enable(Adapter, 1);
+ ret = iol_read_efuse(Adapter, 0, _offset, _size_byte, pbuf);
+ iol_mode_enable(Adapter, 0);
- if (efuse_OneByteRead(pAdapter, startAddr, &efuse_data, bPseudoTest) && (efuse_data != 0xFF)) {
- if (EXT_HEADER(efuse_data)) {
- cur_header = efuse_data;
- startAddr++;
- efuse_OneByteRead(pAdapter, startAddr, &efuse_data, bPseudoTest);
- if (ALL_WORDS_DISABLED(efuse_data)) {
- bRet = false;
- break;
- } else {
- curPkt.offset = ((cur_header & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
- curPkt.word_en = efuse_data & 0x0F;
- }
- } else {
- cur_header = efuse_data;
- curPkt.offset = (cur_header >> 4) & 0x0F;
- curPkt.word_en = cur_header & 0x0F;
- }
-
- curPkt.word_cnts = Efuse_CalculateWordCnts(curPkt.word_en);
- /* if same header is found but no data followed */
- /* write some part of data followed by the header. */
- if ((curPkt.offset == pTargetPkt->offset) &&
- (!hal_EfuseCheckIfDatafollowed(pAdapter, curPkt.word_cnts, startAddr + 1, bPseudoTest)) &&
- wordEnMatched(pTargetPkt, &curPkt, &matched_wden)) {
- /* Here to write partial data */
- badworden = rtl8188e_Efuse_WordEnableDataWrite(pAdapter, startAddr + 1, matched_wden, pTargetPkt->data, bPseudoTest);
- if (badworden != 0x0F) {
- u32 PgWriteSuccess = 0;
- /* if write fail on some words, write these bad words again */
-
- PgWriteSuccess = rtl8188e_Efuse_PgPacketWrite(pAdapter, pTargetPkt->offset, badworden, pTargetPkt->data, bPseudoTest);
-
- if (!PgWriteSuccess) {
- bRet = false; /* write fail, return */
- break;
- }
- }
- /* partial write ok, update the target packet for later use */
- for (i = 0; i < 4; i++) {
- if ((matched_wden & (0x1 << i)) == 0) /* this word has been written */
- pTargetPkt->word_en |= (0x1 << i); /* disable the word */
- }
- pTargetPkt->word_cnts = Efuse_CalculateWordCnts(pTargetPkt->word_en);
- }
- /* read from next header */
- startAddr = startAddr + (curPkt.word_cnts * 2) + 1;
- } else {
- /* not used header, 0xff */
- *pAddr = startAddr;
- bRet = true;
- break;
- }
+ if (_SUCCESS == ret)
+ return;
}
- return bRet;
-}
-
-static bool
-hal_EfusePgCheckAvailableAddr(
- struct adapter *pAdapter,
- u8 efuseType,
- bool bPseudoTest
- )
-{
- u16 efuse_max_available_len = 0;
- /* Change to check TYPE_EFUSE_MAP_LEN , because 8188E raw 256, logic map over 256. */
- rtl8188e_EFUSE_GetEfuseDefinition(pAdapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&efuse_max_available_len, false);
-
- if (rtl8188e_EfuseGetCurrentSize(pAdapter, efuseType, bPseudoTest) >= efuse_max_available_len)
- return false;
- return true;
-}
-
-static void hal_EfuseConstructPGPkt(u8 offset, u8 word_en, u8 *pData, struct pgpkt *pTargetPkt)
-{
- memset((void *)pTargetPkt->data, 0xFF, sizeof(u8) * 8);
- pTargetPkt->offset = offset;
- pTargetPkt->word_en = word_en;
- efuse_WordEnableDataRead(word_en, pData, pTargetPkt->data);
- pTargetPkt->word_cnts = Efuse_CalculateWordCnts(pTargetPkt->word_en);
+ Hal_EfuseReadEFuse88E(Adapter, _offset, _size_byte, pbuf);
}
-static bool hal_EfusePgPacketWrite_8188e(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *pData, bool bPseudoTest)
+void rtl8188e_ReadEFuse(struct adapter *Adapter, u16 _offset, u16 _size_byte, u8 *pbuf)
{
- struct pgpkt targetPkt;
- u16 startAddr = 0;
- u8 efuseType = EFUSE_WIFI;
-
- if (!hal_EfusePgCheckAvailableAddr(pAdapter, efuseType, bPseudoTest))
- return false;
-
- hal_EfuseConstructPGPkt(offset, word_en, pData, &targetPkt);
-
- if (!hal_EfusePartialWriteCheck(pAdapter, efuseType, &startAddr, &targetPkt, bPseudoTest))
- return false;
-
- if (!hal_EfusePgPacketWriteHeader(pAdapter, efuseType, &startAddr, &targetPkt, bPseudoTest))
- return false;
-
- if (!hal_EfusePgPacketWriteData(pAdapter, efuseType, &startAddr, &targetPkt, bPseudoTest))
- return false;
-
- return true;
-}
-
-static int Hal_EfusePgPacketWrite_Pseudo(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool bPseudoTest)
-{
- int ret;
-
- ret = hal_EfusePgPacketWrite_8188e(pAdapter, offset, word_en, data, bPseudoTest);
- return ret;
-}
-
-static int Hal_EfusePgPacketWrite(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool bPseudoTest)
-{
- int ret = 0;
- ret = hal_EfusePgPacketWrite_8188e(pAdapter, offset, word_en, data, bPseudoTest);
-
- return ret;
-}
-
-int rtl8188e_Efuse_PgPacketWrite(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool bPseudoTest)
-{
- int ret;
-
- if (bPseudoTest)
- ret = Hal_EfusePgPacketWrite_Pseudo(pAdapter, offset, word_en, data, bPseudoTest);
- else
- ret = Hal_EfusePgPacketWrite(pAdapter, offset, word_en, data, bPseudoTest);
- return ret;
+ ReadEFuseByIC(Adapter, _offset, _size_byte, pbuf);
}
void rtl8188e_read_chip_version(struct adapter *padapter)
{
u32 value32;
struct HAL_VERSION ChipVersion;
- struct hal_data_8188e *pHalData;
-
- pHalData = GET_HAL_DATA(padapter);
+ struct hal_data_8188e *pHalData = &padapter->haldata;
value32 = rtw_read32(padapter, REG_SYS_CFG);
ChipVersion.ChipType = ((value32 & RTL_ID) ? TEST_CHIP : NORMAL_CHIP);
- ChipVersion.RFType = RF_TYPE_1T1R;
ChipVersion.VendorType = ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : CHIP_VENDOR_TSMC);
ChipVersion.CUTVersion = (value32 & CHIP_VER_RTL_MASK) >> CHIP_VER_RTL_SHIFT; /* IC version (CUT) */
ChipVersion.ROMVer = 0; /* ROM code version. */
@@ -1657,39 +843,21 @@ void rtl8188e_read_chip_version(struct adapter *padapter)
dump_chip_info(ChipVersion);
pHalData->VersionID = ChipVersion;
-
- pHalData->rf_type = RF_1T1R;
-
- MSG_88E("RF_Type is %x!!\n", pHalData->rf_type);
}
-void rtl8188e_SetHalODMVar(struct adapter *Adapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet)
+void rtl8188e_SetHalODMVar(struct adapter *Adapter, void *pValue1, bool bSet)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
struct odm_dm_struct *podmpriv = &pHalData->odmpriv;
- switch (eVariable) {
- case HAL_ODM_STA_INFO:
- {
- struct sta_info *psta = (struct sta_info *)pValue1;
-
- if (bSet) {
- DBG_88E("### Set STA_(%d) info\n", psta->mac_id);
- podmpriv->pODM_StaInfo[psta->mac_id] = psta;
- ODM_RAInfo_Init(podmpriv, psta->mac_id);
- } else {
- DBG_88E("### Clean STA_(%d) info\n", psta->mac_id);
- podmpriv->pODM_StaInfo[psta->mac_id] = NULL;
- }
- }
- break;
- case HAL_ODM_P2P_STATE:
- ODM_CmnInfoUpdate(podmpriv, ODM_CMNINFO_WIFI_DIRECT, bSet);
- break;
- case HAL_ODM_WIFI_DISPLAY_STATE:
- ODM_CmnInfoUpdate(podmpriv, ODM_CMNINFO_WIFI_DISPLAY, bSet);
- break;
- default:
- break;
+ struct sta_info *psta = (struct sta_info *)pValue1;
+
+ if (bSet) {
+ DBG_88E("### Set STA_(%d) info\n", psta->mac_id);
+ podmpriv->pODM_StaInfo[psta->mac_id] = psta;
+ ODM_RAInfo_Init(podmpriv, psta->mac_id);
+ } else {
+ DBG_88E("### Clean STA_(%d) info\n", psta->mac_id);
+ podmpriv->pODM_StaInfo[psta->mac_id] = NULL;
}
}
@@ -1713,7 +881,8 @@ u8 GetEEPROMSize8188E(struct adapter *padapter)
/* 6: EEPROM used is 93C46, 4: boot from E-Fuse. */
size = (cr & BOOT_FROM_EEPROM) ? 6 : 4;
- MSG_88E("EEPROM type is %s\n", size == 4 ? "E-FUSE" : "93C46");
+ netdev_dbg(padapter->pnetdev, "EEPROM type is %s\n",
+ size == 4 ? "E-FUSE" : "93C46");
return size;
}
@@ -1787,23 +956,6 @@ s32 InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy)
}
void
-Hal_InitPGData88E(struct adapter *padapter)
-{
- struct eeprom_priv *pEEPROM = &padapter->eeprompriv;
-
- if (!pEEPROM->bautoload_fail_flag) { /* autoload OK. */
- if (!is_boot_from_eeprom(padapter)) {
- /* Read EFUSE real map to shadow. */
- EFUSE_ShadowMapUpdate(padapter, EFUSE_WIFI, false);
- }
- } else {/* autoload fail */
- /* update to default value 0xFF */
- if (!is_boot_from_eeprom(padapter))
- EFUSE_ShadowMapUpdate(padapter, EFUSE_WIFI, false);
- }
-}
-
-void
Hal_EfuseParseIDCode88E(
struct adapter *padapter,
u8 *hwinfo
@@ -1942,22 +1094,16 @@ static void hal_get_chnl_group_88e(u8 chnl, u8 *group)
void Hal_ReadPowerSavingMode88E(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
{
if (AutoLoadFail) {
- padapter->pwrctrlpriv.bHWPowerdown = false;
padapter->pwrctrlpriv.bSupportRemoteWakeup = false;
} else {
/* hw power down mode selection , 0:rf-off / 1:power down */
- if (padapter->registrypriv.hwpdn_mode == 2)
- padapter->pwrctrlpriv.bHWPowerdown = (hwinfo[EEPROM_RF_FEATURE_OPTION_88E] & BIT(4));
- else
- padapter->pwrctrlpriv.bHWPowerdown = padapter->registrypriv.hwpdn_mode;
-
/* decide hw if support remote wakeup function */
/* if hw supported, 8051 (SIE) will generate WeakUP signal(D+/D- toggle) when autoresume */
padapter->pwrctrlpriv.bSupportRemoteWakeup = (hwinfo[EEPROM_USB_OPTIONAL_FUNCTION0] & BIT(1)) ? true : false;
- DBG_88E("%s...bHWPowerdown(%x) , bSupportRemoteWakeup(%x)\n", __func__,
- padapter->pwrctrlpriv.bHWPowerdown, padapter->pwrctrlpriv.bSupportRemoteWakeup);
+ DBG_88E("%s , bSupportRemoteWakeup(%x)\n", __func__,
+ padapter->pwrctrlpriv.bSupportRemoteWakeup);
DBG_88E("### PS params => power_mgnt(%x), usbss_enable(%x) ###\n", padapter->registrypriv.power_mgnt, padapter->registrypriv.usbss_enable);
}
@@ -1965,40 +1111,32 @@ void Hal_ReadPowerSavingMode88E(struct adapter *padapter, u8 *hwinfo, bool AutoL
void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool AutoLoadFail)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct hal_data_8188e *pHalData = &padapter->haldata;
struct txpowerinfo24g pwrInfo24G;
- u8 rfPath = 0;
u8 ch, group;
u8 TxCount;
Hal_ReadPowerValueFromPROM_8188E(&pwrInfo24G, PROMContent, AutoLoadFail);
- if (!AutoLoadFail)
- pHalData->bTXPowerDataReadFromEEPORM = true;
-
for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
hal_get_chnl_group_88e(ch, &group);
- pHalData->Index24G_CCK_Base[rfPath][ch] = pwrInfo24G.IndexCCK_Base[rfPath][group];
+ pHalData->Index24G_CCK_Base[ch] = pwrInfo24G.IndexCCK_Base[0][group];
if (ch == 14)
- pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][4];
+ pHalData->Index24G_BW40_Base[ch] = pwrInfo24G.IndexBW40_Base[0][4];
else
- pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][group];
+ pHalData->Index24G_BW40_Base[ch] = pwrInfo24G.IndexBW40_Base[0][group];
- DBG_88E("======= Path %d, Channel %d =======\n", rfPath, ch);
- DBG_88E("Index24G_CCK_Base[%d][%d] = 0x%x\n", rfPath, ch, pHalData->Index24G_CCK_Base[rfPath][ch]);
- DBG_88E("Index24G_BW40_Base[%d][%d] = 0x%x\n", rfPath, ch, pHalData->Index24G_BW40_Base[rfPath][ch]);
+ DBG_88E("======= Path 0, Channel %d =======\n", ch);
+ DBG_88E("Index24G_CCK_Base[%d] = 0x%x\n", ch, pHalData->Index24G_CCK_Base[ch]);
+ DBG_88E("Index24G_BW40_Base[%d] = 0x%x\n", ch, pHalData->Index24G_BW40_Base[ch]);
}
for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
- pHalData->CCK_24G_Diff[rfPath][TxCount] = pwrInfo24G.CCK_Diff[rfPath][TxCount];
- pHalData->OFDM_24G_Diff[rfPath][TxCount] = pwrInfo24G.OFDM_Diff[rfPath][TxCount];
- pHalData->BW20_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW20_Diff[rfPath][TxCount];
- pHalData->BW40_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW40_Diff[rfPath][TxCount];
+ pHalData->OFDM_24G_Diff[TxCount] = pwrInfo24G.OFDM_Diff[0][TxCount];
+ pHalData->BW20_24G_Diff[TxCount] = pwrInfo24G.BW20_Diff[0][TxCount];
DBG_88E("======= TxCount %d =======\n", TxCount);
- DBG_88E("CCK_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->CCK_24G_Diff[rfPath][TxCount]);
- DBG_88E("OFDM_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->OFDM_24G_Diff[rfPath][TxCount]);
- DBG_88E("BW20_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->BW20_24G_Diff[rfPath][TxCount]);
- DBG_88E("BW40_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->BW40_24G_Diff[rfPath][TxCount]);
+ DBG_88E("OFDM_24G_Diff[%d] = %d\n", TxCount, pHalData->OFDM_24G_Diff[TxCount]);
+ DBG_88E("BW20_24G_Diff[%d] = %d\n", TxCount, pHalData->BW20_24G_Diff[TxCount]);
}
/* 2010/10/19 MH Add Regulator recognize for CU. */
@@ -2014,7 +1152,7 @@ void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool Auto
void Hal_EfuseParseXtal_8188E(struct adapter *pAdapter, u8 *hwinfo, bool AutoLoadFail)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ struct hal_data_8188e *pHalData = &pAdapter->haldata;
if (!AutoLoadFail) {
pHalData->CrystalCap = hwinfo[EEPROM_XTAL_88E];
@@ -2026,30 +1164,6 @@ void Hal_EfuseParseXtal_8188E(struct adapter *pAdapter, u8 *hwinfo, bool AutoLoa
DBG_88E("CrystalCap: 0x%2x\n", pHalData->CrystalCap);
}
-void Hal_EfuseParseBoardType88E(struct adapter *pAdapter, u8 *hwinfo, bool AutoLoadFail)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
-
- if (!AutoLoadFail)
- pHalData->BoardType = ((hwinfo[EEPROM_RF_BOARD_OPTION_88E] & 0xE0) >> 5);
- else
- pHalData->BoardType = 0;
- DBG_88E("Board Type: 0x%2x\n", pHalData->BoardType);
-}
-
-void Hal_EfuseParseEEPROMVer88E(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
-
- if (!AutoLoadFail) {
- pHalData->EEPROMVersion = hwinfo[EEPROM_VERSION_88E];
- if (pHalData->EEPROMVersion == 0xFF)
- pHalData->EEPROMVersion = EEPROM_Default_Version;
- } else {
- pHalData->EEPROMVersion = 1;
- }
-}
-
void rtl8188e_EfuseParseChnlPlan(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
{
padapter->mlmepriv.ChannelPlan =
@@ -2061,22 +1175,9 @@ void rtl8188e_EfuseParseChnlPlan(struct adapter *padapter, u8 *hwinfo, bool Auto
DBG_88E("mlmepriv.ChannelPlan = 0x%02x\n", padapter->mlmepriv.ChannelPlan);
}
-void Hal_EfuseParseCustomerID88E(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
-
- if (!AutoLoadFail) {
- pHalData->EEPROMCustomerID = hwinfo[EEPROM_CUSTOMERID_88E];
- } else {
- pHalData->EEPROMCustomerID = 0;
- pHalData->EEPROMSubCustomerID = 0;
- }
- DBG_88E("EEPROM Customer ID: 0x%2x\n", pHalData->EEPROMCustomerID);
-}
-
void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent, bool AutoLoadFail)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ struct hal_data_8188e *pHalData = &pAdapter->haldata;
struct registry_priv *registry_par = &pAdapter->registrypriv;
if (!AutoLoadFail) {
@@ -2108,7 +1209,7 @@ void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent, bool
void Hal_ReadThermalMeter_88E(struct adapter *Adapter, u8 *PROMContent, bool AutoloadFail)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
/* ThermalMeter from EEPROM */
if (!AutoloadFail)
@@ -2116,9 +1217,8 @@ void Hal_ReadThermalMeter_88E(struct adapter *Adapter, u8 *PROMContent, bool Aut
else
pHalData->EEPROMThermalMeter = EEPROM_Default_ThermalMeter_88E;
- if (pHalData->EEPROMThermalMeter == 0xff || AutoloadFail) {
- pHalData->bAPKThermalMeterIgnore = true;
+ if (pHalData->EEPROMThermalMeter == 0xff || AutoloadFail)
pHalData->EEPROMThermalMeter = EEPROM_Default_ThermalMeter_88E;
- }
+
DBG_88E("ThermalMeter = 0x%x\n", pHalData->EEPROMThermalMeter);
}
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
index bb0cda0c16a0..302b15b2874d 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
@@ -143,7 +143,7 @@ phy_RFSerialRead(
)
{
u32 retValue = 0;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
struct bb_reg_def *pPhyReg = &pHalData->PHYRegDef[eRFPath];
u32 NewOffset;
u32 tmplong, tmplong2;
@@ -161,31 +161,31 @@ phy_RFSerialRead(
/* For 92S LSSI Read RFLSSIRead */
/* For RF A/B write 0x824/82c(does not work in the future) */
/* We must use 0x824 for RF A and B to execute read trigger */
- tmplong = PHY_QueryBBReg(Adapter, rFPGA0_XA_HSSIParameter2, bMaskDWord);
+ tmplong = rtl8188e_PHY_QueryBBReg(Adapter, rFPGA0_XA_HSSIParameter2, bMaskDWord);
if (eRFPath == RF_PATH_A)
tmplong2 = tmplong;
else
- tmplong2 = PHY_QueryBBReg(Adapter, pPhyReg->rfHSSIPara2, bMaskDWord);
+ tmplong2 = rtl8188e_PHY_QueryBBReg(Adapter, pPhyReg->rfHSSIPara2, bMaskDWord);
tmplong2 = (tmplong2 & (~bLSSIReadAddress)) | (NewOffset << 23) | bLSSIReadEdge; /* T65 RF */
- PHY_SetBBReg(Adapter, rFPGA0_XA_HSSIParameter2, bMaskDWord, tmplong & (~bLSSIReadEdge));
+ rtl8188e_PHY_SetBBReg(Adapter, rFPGA0_XA_HSSIParameter2, bMaskDWord, tmplong & (~bLSSIReadEdge));
udelay(10);/* PlatformStallExecution(10); */
- PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, bMaskDWord, tmplong2);
+ rtl8188e_PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, bMaskDWord, tmplong2);
udelay(100);/* PlatformStallExecution(100); */
udelay(10);/* PlatformStallExecution(10); */
if (eRFPath == RF_PATH_A)
- RfPiEnable = (u8)PHY_QueryBBReg(Adapter, rFPGA0_XA_HSSIParameter1, BIT(8));
+ RfPiEnable = (u8)rtl8188e_PHY_QueryBBReg(Adapter, rFPGA0_XA_HSSIParameter1, BIT(8));
else if (eRFPath == RF_PATH_B)
- RfPiEnable = (u8)PHY_QueryBBReg(Adapter, rFPGA0_XB_HSSIParameter1, BIT(8));
+ RfPiEnable = (u8)rtl8188e_PHY_QueryBBReg(Adapter, rFPGA0_XB_HSSIParameter1, BIT(8));
if (RfPiEnable) { /* Read from BBreg8b8, 12 bits for 8190, 20bits for T65 RF */
- retValue = PHY_QueryBBReg(Adapter, pPhyReg->rfLSSIReadBackPi, bLSSIReadBackData);
+ retValue = rtl8188e_PHY_QueryBBReg(Adapter, pPhyReg->rfLSSIReadBackPi, bLSSIReadBackData);
} else { /* Read from BBreg8a0, 12 bits for 8190, 20 bits for T65 RF */
- retValue = PHY_QueryBBReg(Adapter, pPhyReg->rfLSSIReadBack, bLSSIReadBackData);
+ retValue = rtl8188e_PHY_QueryBBReg(Adapter, pPhyReg->rfLSSIReadBack, bLSSIReadBackData);
}
return retValue;
}
@@ -242,7 +242,7 @@ phy_RFSerialWrite(
)
{
u32 DataAndAddr = 0;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
struct bb_reg_def *pPhyReg = &pHalData->PHYRegDef[eRFPath];
u32 NewOffset;
@@ -263,7 +263,7 @@ phy_RFSerialWrite(
/* */
/* Write Operation */
/* */
- PHY_SetBBReg(Adapter, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr);
+ rtl8188e_PHY_SetBBReg(Adapter, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr);
}
/**
@@ -355,7 +355,7 @@ rtl8188e_PHY_SetRFReg(
*---------------------------------------------------------------------------*/
s32 PHY_MACConfig8188E(struct adapter *Adapter)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
int rtStatus = _SUCCESS;
/* */
@@ -387,19 +387,15 @@ phy_InitBBRFRegisterDefinition(
struct adapter *Adapter
)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
/* RF Interface Sowrtware Control */
pHalData->PHYRegDef[RF_PATH_A].rfintfs = rFPGA0_XAB_RFInterfaceSW; /* 16 LSBs if read 32-bit from 0x870 */
pHalData->PHYRegDef[RF_PATH_B].rfintfs = rFPGA0_XAB_RFInterfaceSW; /* 16 MSBs if read 32-bit from 0x870 (16-bit for 0x872) */
- pHalData->PHYRegDef[RF_PATH_C].rfintfs = rFPGA0_XCD_RFInterfaceSW;/* 16 LSBs if read 32-bit from 0x874 */
- pHalData->PHYRegDef[RF_PATH_D].rfintfs = rFPGA0_XCD_RFInterfaceSW;/* 16 MSBs if read 32-bit from 0x874 (16-bit for 0x876) */
/* RF Interface Readback Value */
pHalData->PHYRegDef[RF_PATH_A].rfintfi = rFPGA0_XAB_RFInterfaceRB; /* 16 LSBs if read 32-bit from 0x8E0 */
pHalData->PHYRegDef[RF_PATH_B].rfintfi = rFPGA0_XAB_RFInterfaceRB;/* 16 MSBs if read 32-bit from 0x8E0 (16-bit for 0x8E2) */
- pHalData->PHYRegDef[RF_PATH_C].rfintfi = rFPGA0_XCD_RFInterfaceRB;/* 16 LSBs if read 32-bit from 0x8E4 */
- pHalData->PHYRegDef[RF_PATH_D].rfintfi = rFPGA0_XCD_RFInterfaceRB;/* 16 MSBs if read 32-bit from 0x8E4 (16-bit for 0x8E6) */
/* RF Interface Output (and Enable) */
pHalData->PHYRegDef[RF_PATH_A].rfintfo = rFPGA0_XA_RFInterfaceOE; /* 16 LSBs if read 32-bit from 0x860 */
@@ -416,14 +412,10 @@ phy_InitBBRFRegisterDefinition(
/* RF parameter */
pHalData->PHYRegDef[RF_PATH_A].rfLSSI_Select = rFPGA0_XAB_RFParameter; /* BB Band Select */
pHalData->PHYRegDef[RF_PATH_B].rfLSSI_Select = rFPGA0_XAB_RFParameter;
- pHalData->PHYRegDef[RF_PATH_C].rfLSSI_Select = rFPGA0_XCD_RFParameter;
- pHalData->PHYRegDef[RF_PATH_D].rfLSSI_Select = rFPGA0_XCD_RFParameter;
/* Tx AGC Gain Stage (same for all path. Should we remove this?) */
pHalData->PHYRegDef[RF_PATH_A].rfTxGainStage = rFPGA0_TxGainStage; /* Tx gain stage */
pHalData->PHYRegDef[RF_PATH_B].rfTxGainStage = rFPGA0_TxGainStage; /* Tx gain stage */
- pHalData->PHYRegDef[RF_PATH_C].rfTxGainStage = rFPGA0_TxGainStage; /* Tx gain stage */
- pHalData->PHYRegDef[RF_PATH_D].rfTxGainStage = rFPGA0_TxGainStage; /* Tx gain stage */
/* Tranceiver A~D HSSI Parameter-1 */
pHalData->PHYRegDef[RF_PATH_A].rfHSSIPara1 = rFPGA0_XA_HSSIParameter1; /* wire control parameter1 */
@@ -436,50 +428,34 @@ phy_InitBBRFRegisterDefinition(
/* RF switch Control */
pHalData->PHYRegDef[RF_PATH_A].rfSwitchControl = rFPGA0_XAB_SwitchControl; /* TR/Ant switch control */
pHalData->PHYRegDef[RF_PATH_B].rfSwitchControl = rFPGA0_XAB_SwitchControl;
- pHalData->PHYRegDef[RF_PATH_C].rfSwitchControl = rFPGA0_XCD_SwitchControl;
- pHalData->PHYRegDef[RF_PATH_D].rfSwitchControl = rFPGA0_XCD_SwitchControl;
/* AGC control 1 */
pHalData->PHYRegDef[RF_PATH_A].rfAGCControl1 = rOFDM0_XAAGCCore1;
pHalData->PHYRegDef[RF_PATH_B].rfAGCControl1 = rOFDM0_XBAGCCore1;
- pHalData->PHYRegDef[RF_PATH_C].rfAGCControl1 = rOFDM0_XCAGCCore1;
- pHalData->PHYRegDef[RF_PATH_D].rfAGCControl1 = rOFDM0_XDAGCCore1;
/* AGC control 2 */
pHalData->PHYRegDef[RF_PATH_A].rfAGCControl2 = rOFDM0_XAAGCCore2;
pHalData->PHYRegDef[RF_PATH_B].rfAGCControl2 = rOFDM0_XBAGCCore2;
- pHalData->PHYRegDef[RF_PATH_C].rfAGCControl2 = rOFDM0_XCAGCCore2;
- pHalData->PHYRegDef[RF_PATH_D].rfAGCControl2 = rOFDM0_XDAGCCore2;
/* RX AFE control 1 */
pHalData->PHYRegDef[RF_PATH_A].rfRxIQImbalance = rOFDM0_XARxIQImbalance;
pHalData->PHYRegDef[RF_PATH_B].rfRxIQImbalance = rOFDM0_XBRxIQImbalance;
- pHalData->PHYRegDef[RF_PATH_C].rfRxIQImbalance = rOFDM0_XCRxIQImbalance;
- pHalData->PHYRegDef[RF_PATH_D].rfRxIQImbalance = rOFDM0_XDRxIQImbalance;
/* RX AFE control 1 */
pHalData->PHYRegDef[RF_PATH_A].rfRxAFE = rOFDM0_XARxAFE;
pHalData->PHYRegDef[RF_PATH_B].rfRxAFE = rOFDM0_XBRxAFE;
- pHalData->PHYRegDef[RF_PATH_C].rfRxAFE = rOFDM0_XCRxAFE;
- pHalData->PHYRegDef[RF_PATH_D].rfRxAFE = rOFDM0_XDRxAFE;
/* Tx AFE control 1 */
pHalData->PHYRegDef[RF_PATH_A].rfTxIQImbalance = rOFDM0_XATxIQImbalance;
pHalData->PHYRegDef[RF_PATH_B].rfTxIQImbalance = rOFDM0_XBTxIQImbalance;
- pHalData->PHYRegDef[RF_PATH_C].rfTxIQImbalance = rOFDM0_XCTxIQImbalance;
- pHalData->PHYRegDef[RF_PATH_D].rfTxIQImbalance = rOFDM0_XDTxIQImbalance;
/* Tx AFE control 2 */
pHalData->PHYRegDef[RF_PATH_A].rfTxAFE = rOFDM0_XATxAFE;
pHalData->PHYRegDef[RF_PATH_B].rfTxAFE = rOFDM0_XBTxAFE;
- pHalData->PHYRegDef[RF_PATH_C].rfTxAFE = rOFDM0_XCTxAFE;
- pHalData->PHYRegDef[RF_PATH_D].rfTxAFE = rOFDM0_XDTxAFE;
/* Tranceiver LSSI Readback SI mode */
pHalData->PHYRegDef[RF_PATH_A].rfLSSIReadBack = rFPGA0_XA_LSSIReadBack;
pHalData->PHYRegDef[RF_PATH_B].rfLSSIReadBack = rFPGA0_XB_LSSIReadBack;
- pHalData->PHYRegDef[RF_PATH_C].rfLSSIReadBack = rFPGA0_XC_LSSIReadBack;
- pHalData->PHYRegDef[RF_PATH_D].rfLSSIReadBack = rFPGA0_XD_LSSIReadBack;
/* Tranceiver LSSI Readback PI mode */
pHalData->PHYRegDef[RF_PATH_A].rfLSSIReadBackPi = TransceiverA_HSPI_Readback;
@@ -488,7 +464,7 @@ phy_InitBBRFRegisterDefinition(
void storePwrIndexDiffRateOffset(struct adapter *Adapter, u32 RegAddr, u32 BitMask, u32 Data)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
if (RegAddr == rTxAGC_A_Rate18_06)
pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][0] = Data;
@@ -506,8 +482,7 @@ void storePwrIndexDiffRateOffset(struct adapter *Adapter, u32 RegAddr, u32 BitMa
pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][4] = Data;
if (RegAddr == rTxAGC_A_Mcs15_Mcs12) {
pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][5] = Data;
- if (pHalData->rf_type == RF_1T1R)
- pHalData->pwrGroupCnt++;
+ pHalData->pwrGroupCnt++;
}
if (RegAddr == rTxAGC_B_Rate18_06)
pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][8] = Data;
@@ -523,17 +498,14 @@ void storePwrIndexDiffRateOffset(struct adapter *Adapter, u32 RegAddr, u32 BitMa
pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][11] = Data;
if (RegAddr == rTxAGC_B_Mcs11_Mcs08)
pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][12] = Data;
- if (RegAddr == rTxAGC_B_Mcs15_Mcs12) {
+ if (RegAddr == rTxAGC_B_Mcs15_Mcs12)
pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][13] = Data;
- if (pHalData->rf_type != RF_1T1R)
- pHalData->pwrGroupCnt++;
- }
}
static int phy_BB8188E_Config_ParaFile(struct adapter *Adapter)
{
struct eeprom_priv *pEEPROM = &Adapter->eeprompriv;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
int rtStatus = _SUCCESS;
/* */
@@ -574,7 +546,7 @@ PHY_BBConfig8188E(
)
{
int rtStatus = _SUCCESS;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
u32 RegVal;
u8 CrystalCap;
@@ -595,7 +567,7 @@ PHY_BBConfig8188E(
/* write 0x24[16:11] = 0x24[22:17] = CrystalCap */
CrystalCap = pHalData->CrystalCap & 0x3F;
- PHY_SetBBReg(Adapter, REG_AFE_XTAL_CTRL, 0x7ff800, (CrystalCap | (CrystalCap << 6)));
+ rtl8188e_PHY_SetBBReg(Adapter, REG_AFE_XTAL_CTRL, 0x7ff800, (CrystalCap | (CrystalCap << 6)));
return rtStatus;
}
@@ -613,82 +585,25 @@ static void getTxPowerIndex88E(struct adapter *Adapter, u8 channel, u8 *cckPower
u8 *ofdmPowerLevel, u8 *BW20PowerLevel,
u8 *BW40PowerLevel)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
u8 index = (channel - 1);
- u8 TxCount = 0, path_nums;
- if ((RF_1T2R == pHalData->rf_type) || (RF_1T1R == pHalData->rf_type))
- path_nums = 1;
- else
- path_nums = 2;
-
- for (TxCount = 0; TxCount < path_nums; TxCount++) {
- if (TxCount == RF_PATH_A) {
- /* 1. CCK */
- cckPowerLevel[TxCount] = pHalData->Index24G_CCK_Base[TxCount][index];
- /* 2. OFDM */
- ofdmPowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index] +
- pHalData->OFDM_24G_Diff[TxCount][RF_PATH_A];
- /* 1. BW20 */
- BW20PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[TxCount][RF_PATH_A];
- /* 2. BW40 */
- BW40PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[TxCount][index];
- } else if (TxCount == RF_PATH_B) {
- /* 1. CCK */
- cckPowerLevel[TxCount] = pHalData->Index24G_CCK_Base[TxCount][index];
- /* 2. OFDM */
- ofdmPowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[TxCount][index];
- /* 1. BW20 */
- BW20PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[TxCount][RF_PATH_A] +
- pHalData->BW20_24G_Diff[TxCount][index];
- /* 2. BW40 */
- BW40PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[TxCount][index];
- } else if (TxCount == RF_PATH_C) {
- /* 1. CCK */
- cckPowerLevel[TxCount] = pHalData->Index24G_CCK_Base[TxCount][index];
- /* 2. OFDM */
- ofdmPowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[RF_PATH_B][index] +
- pHalData->BW20_24G_Diff[TxCount][index];
- /* 1. BW20 */
- BW20PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[RF_PATH_B][index] +
- pHalData->BW20_24G_Diff[TxCount][index];
- /* 2. BW40 */
- BW40PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[TxCount][index];
- } else if (TxCount == RF_PATH_D) {
- /* 1. CCK */
- cckPowerLevel[TxCount] = pHalData->Index24G_CCK_Base[TxCount][index];
- /* 2. OFDM */
- ofdmPowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[RF_PATH_B][index] +
- pHalData->BW20_24G_Diff[RF_PATH_C][index] +
- pHalData->BW20_24G_Diff[TxCount][index];
-
- /* 1. BW20 */
- BW20PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[RF_PATH_B][index] +
- pHalData->BW20_24G_Diff[RF_PATH_C][index] +
- pHalData->BW20_24G_Diff[TxCount][index];
-
- /* 2. BW40 */
- BW40PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[TxCount][index];
- }
- }
+ /* 1. CCK */
+ cckPowerLevel[RF_PATH_A] = pHalData->Index24G_CCK_Base[index];
+ /* 2. OFDM */
+ ofdmPowerLevel[RF_PATH_A] = pHalData->Index24G_BW40_Base[index] +
+ pHalData->OFDM_24G_Diff[RF_PATH_A];
+ /* 1. BW20 */
+ BW20PowerLevel[RF_PATH_A] = pHalData->Index24G_BW40_Base[index] +
+ pHalData->BW20_24G_Diff[RF_PATH_A];
+ /* 2. BW40 */
+ BW40PowerLevel[RF_PATH_A] = pHalData->Index24G_BW40_Base[index];
}
static void phy_PowerIndexCheck88E(struct adapter *Adapter, u8 channel, u8 *cckPowerLevel,
u8 *ofdmPowerLevel, u8 *BW20PowerLevel, u8 *BW40PowerLevel)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
pHalData->CurrentCckTxPwrIdx = cckPowerLevel[0];
pHalData->CurrentOfdm24GTxPwrIdx = ofdmPowerLevel[0];
@@ -752,17 +667,10 @@ _PHY_SetBWMode92C(
struct adapter *Adapter
)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
u8 regBwOpMode;
u8 regRRSR_RSC;
- if (pHalData->rf_chip == RF_PSEUDO_11N)
- return;
-
- /* There is no 40MHz mode in RF_8225. */
- if (pHalData->rf_chip == RF_8225)
- return;
-
if (Adapter->bDriverStopped)
return;
@@ -796,17 +704,17 @@ _PHY_SetBWMode92C(
switch (pHalData->CurrentChannelBW) {
/* 20 MHz channel*/
case HT_CHANNEL_WIDTH_20:
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bRFMOD, 0x0);
- PHY_SetBBReg(Adapter, rFPGA1_RFMOD, bRFMOD, 0x0);
+ rtl8188e_PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bRFMOD, 0x0);
+ rtl8188e_PHY_SetBBReg(Adapter, rFPGA1_RFMOD, bRFMOD, 0x0);
break;
/* 40 MHz channel*/
case HT_CHANNEL_WIDTH_40:
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bRFMOD, 0x1);
- PHY_SetBBReg(Adapter, rFPGA1_RFMOD, bRFMOD, 0x1);
+ rtl8188e_PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bRFMOD, 0x1);
+ rtl8188e_PHY_SetBBReg(Adapter, rFPGA1_RFMOD, bRFMOD, 0x1);
/* Set Control channel to upper or lower. These settings are required only for 40MHz */
- PHY_SetBBReg(Adapter, rCCK0_System, bCCKSideBand, (pHalData->nCur40MhzPrimeSC >> 1));
- PHY_SetBBReg(Adapter, rOFDM1_LSTF, 0xC00, pHalData->nCur40MhzPrimeSC);
- PHY_SetBBReg(Adapter, 0x818, (BIT(26) | BIT(27)),
+ rtl8188e_PHY_SetBBReg(Adapter, rCCK0_System, bCCKSideBand, (pHalData->nCur40MhzPrimeSC >> 1));
+ rtl8188e_PHY_SetBBReg(Adapter, rOFDM1_LSTF, 0xC00, pHalData->nCur40MhzPrimeSC);
+ rtl8188e_PHY_SetBBReg(Adapter, 0x818, (BIT(26) | BIT(27)),
(pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
break;
default:
@@ -814,21 +722,7 @@ _PHY_SetBWMode92C(
}
/* Skip over setting of J-mode in BB register here. Default value is "None J mode". Emily 20070315 */
- /* 3<3>Set RF related register */
- switch (pHalData->rf_chip) {
- case RF_8225:
- break;
- case RF_8256:
- /* Please implement this function in Hal8190PciPhy8256.c */
- break;
- case RF_PSEUDO_11N:
- break;
- case RF_6052:
- rtl8188e_PHY_RF6052SetBandwidth(Adapter, pHalData->CurrentChannelBW);
- break;
- default:
- break;
- }
+ rtl8188e_PHY_RF6052SetBandwidth(Adapter, pHalData->CurrentChannelBW);
}
/*-----------------------------------------------------------------------------
@@ -848,7 +742,7 @@ _PHY_SetBWMode92C(
void PHY_SetBWMode8188E(struct adapter *Adapter, enum ht_channel_width Bandwidth, /* 20M or 40M */
unsigned char Offset) /* Upper, Lower, or Don't care */
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
enum ht_channel_width tmpBW = pHalData->CurrentChannelBW;
pHalData->CurrentChannelBW = Bandwidth;
@@ -865,7 +759,7 @@ static void _PHY_SwChnl8192C(struct adapter *Adapter, u8 channel)
{
u8 eRFPath = 0;
u32 param1, param2;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
if (Adapter->bNotifyChannelChange)
DBG_88E("[%s] ch = %d\n", __func__, channel);
@@ -877,16 +771,13 @@ static void _PHY_SwChnl8192C(struct adapter *Adapter, u8 channel)
param1 = RF_CHNLBW;
param2 = channel;
pHalData->RfRegChnlVal[eRFPath] = ((pHalData->RfRegChnlVal[eRFPath] & 0xfffffc00) | param2);
- PHY_SetRFReg(Adapter, (enum rf_radio_path)eRFPath, param1, bRFRegOffsetMask, pHalData->RfRegChnlVal[eRFPath]);
+ rtl8188e_PHY_SetRFReg(Adapter, (enum rf_radio_path)eRFPath, param1, bRFRegOffsetMask, pHalData->RfRegChnlVal[eRFPath]);
}
void PHY_SwChnl8188E(struct adapter *Adapter, u8 channel)
{
/* Call after initialization */
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
-
- if (pHalData->rf_chip == RF_PSEUDO_11N)
- return; /* return immediately if it is peudo-phy */
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
if (channel == 0)
channel = 1;
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c b/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c
index 946a1b97d96f..6e0231099986 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c
@@ -46,16 +46,16 @@
void rtl8188e_PHY_RF6052SetBandwidth(struct adapter *Adapter,
enum ht_channel_width Bandwidth)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
switch (Bandwidth) {
case HT_CHANNEL_WIDTH_20:
pHalData->RfRegChnlVal[0] = ((pHalData->RfRegChnlVal[0] & 0xfffff3ff) | BIT(10) | BIT(11));
- PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]);
+ rtl8188e_PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]);
break;
case HT_CHANNEL_WIDTH_40:
pHalData->RfRegChnlVal[0] = ((pHalData->RfRegChnlVal[0] & 0xfffff3ff) | BIT(10));
- PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]);
+ rtl8188e_PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]);
break;
default:
break;
@@ -84,31 +84,24 @@ rtl8188e_PHY_RF6052SetCckTxPower(
struct adapter *Adapter,
u8 *pPowerlevel)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
u32 TxAGC[2] = {0, 0}, tmpval = 0, pwrtrac_value;
- bool TurboScanOff = false;
u8 idx1, idx2;
u8 *ptr;
u8 direction;
- /* FOR CE ,must disable turbo scan */
- TurboScanOff = true;
if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
TxAGC[RF_PATH_A] = 0x3f3f3f3f;
TxAGC[RF_PATH_B] = 0x3f3f3f3f;
- TurboScanOff = true;/* disable turbo scan */
-
- if (TurboScanOff) {
- for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
- TxAGC[idx1] =
- pPowerlevel[idx1] | (pPowerlevel[idx1] << 8) |
- (pPowerlevel[idx1] << 16) | (pPowerlevel[idx1] << 24);
- /* 2010/10/18 MH For external PA module. We need to limit power index to be less than 0x20. */
- if (TxAGC[idx1] > 0x20 && pHalData->ExternalPA)
- TxAGC[idx1] = 0x20;
- }
+ for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
+ TxAGC[idx1] =
+ pPowerlevel[idx1] | (pPowerlevel[idx1] << 8) |
+ (pPowerlevel[idx1] << 16) | (pPowerlevel[idx1] << 24);
+ /* 2010/10/18 MH For external PA module. We need to limit power index to be less than 0x20. */
+ if (TxAGC[idx1] > 0x20 && pHalData->ExternalPA)
+ TxAGC[idx1] = 0x20;
}
} else {
for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
@@ -148,15 +141,15 @@ rtl8188e_PHY_RF6052SetCckTxPower(
/* rf-A cck tx power */
tmpval = TxAGC[RF_PATH_A] & 0xff;
- PHY_SetBBReg(Adapter, rTxAGC_A_CCK1_Mcs32, bMaskByte1, tmpval);
+ rtl8188e_PHY_SetBBReg(Adapter, rTxAGC_A_CCK1_Mcs32, bMaskByte1, tmpval);
tmpval = TxAGC[RF_PATH_A] >> 8;
- PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
+ rtl8188e_PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
/* rf-B cck tx power */
tmpval = TxAGC[RF_PATH_B] >> 24;
- PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, bMaskByte0, tmpval);
+ rtl8188e_PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, bMaskByte0, tmpval);
tmpval = TxAGC[RF_PATH_B] & 0x00ffffff;
- PHY_SetBBReg(Adapter, rTxAGC_B_CCK1_55_Mcs32, 0xffffff00, tmpval);
+ rtl8188e_PHY_SetBBReg(Adapter, rTxAGC_B_CCK1_55_Mcs32, 0xffffff00, tmpval);
} /* PHY_RF6052SetCckTxPower */
/* */
@@ -166,7 +159,7 @@ rtl8188e_PHY_RF6052SetCckTxPower(
static void getpowerbase88e(struct adapter *Adapter, u8 *pPowerLevelOFDM,
u8 *pPowerLevelBW20, u8 *pPowerLevelBW40, u8 Channel, u32 *OfdmBase, u32 *MCSBase)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
u32 powerBase0, powerBase1;
u8 i;
@@ -190,7 +183,7 @@ static void get_rx_power_val_by_reg(struct adapter *Adapter, u8 Channel,
u8 index, u32 *powerBase0, u32 *powerBase1,
u32 *pOutWriteVal)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
u8 i, chnlGroup = 0, pwr_diff_limit[4], customer_pwr_limit;
s8 pwr_diff = 0;
u32 writeVal, customer_limit, rf;
@@ -272,7 +265,6 @@ static void get_rx_power_val_by_reg(struct adapter *Adapter, u8 Channel,
}
static void writeOFDMPowerReg88E(struct adapter *Adapter, u8 index, u32 *pValue)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
u16 regoffset_a[6] = {
rTxAGC_A_Rate18_06, rTxAGC_A_Rate54_24,
rTxAGC_A_Mcs03_Mcs00, rTxAGC_A_Mcs07_Mcs04,
@@ -299,13 +291,10 @@ static void writeOFDMPowerReg88E(struct adapter *Adapter, u8 index, u32 *pValue)
else
regoffset = regoffset_b[index];
- PHY_SetBBReg(Adapter, regoffset, bMaskDWord, writeVal);
+ rtl8188e_PHY_SetBBReg(Adapter, regoffset, bMaskDWord, writeVal);
/* 201005115 Joseph: Set Tx Power diff for Tx power training mechanism. */
- if (((pHalData->rf_type == RF_2T2R) &&
- (regoffset == rTxAGC_A_Mcs15_Mcs12 || regoffset == rTxAGC_B_Mcs15_Mcs12)) ||
- ((pHalData->rf_type != RF_2T2R) &&
- (regoffset == rTxAGC_A_Mcs07_Mcs04 || regoffset == rTxAGC_B_Mcs07_Mcs04))) {
+ if (regoffset == rTxAGC_A_Mcs07_Mcs04 || regoffset == rTxAGC_B_Mcs07_Mcs04) {
writeVal = pwr_val[3];
if (regoffset == rTxAGC_A_Mcs15_Mcs12 || regoffset == rTxAGC_A_Mcs07_Mcs04)
regoffset = 0xc90;
@@ -353,7 +342,7 @@ rtl8188e_PHY_RF6052SetOFDMTxPower(
u8 *pPowerLevelBW40,
u8 Channel)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
u32 writeVal[2], powerBase0[2], powerBase1[2], pwrtrac_value;
u8 direction;
u8 index = 0;
@@ -383,7 +372,7 @@ rtl8188e_PHY_RF6052SetOFDMTxPower(
static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
{
struct bb_reg_def *pPhyReg;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
u32 u4RegValue = 0;
u8 eRFPath = 0;
int rtStatus = _SUCCESS;
@@ -393,21 +382,21 @@ static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
pPhyReg = &pHalData->PHYRegDef[eRFPath];
/*----Store original RFENV control type----*/
- u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV);
+ u4RegValue = rtl8188e_PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV);
/*----Set RF_ENV enable----*/
- PHY_SetBBReg(Adapter, pPhyReg->rfintfe, bRFSI_RFENV << 16, 0x1);
+ rtl8188e_PHY_SetBBReg(Adapter, pPhyReg->rfintfe, bRFSI_RFENV << 16, 0x1);
udelay(1);/* PlatformStallExecution(1); */
/*----Set RF_ENV output high----*/
- PHY_SetBBReg(Adapter, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
+ rtl8188e_PHY_SetBBReg(Adapter, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
udelay(1);/* PlatformStallExecution(1); */
/* Set bit number of Address and Data for RF register */
- PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); /* Set 1 to 4 bits for 8255 */
+ rtl8188e_PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); /* Set 1 to 4 bits for 8255 */
udelay(1);/* PlatformStallExecution(1); */
- PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); /* Set 0 to 12 bits for 8255 */
+ rtl8188e_PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); /* Set 0 to 12 bits for 8255 */
udelay(1);/* PlatformStallExecution(1); */
/*----Initialize RF fom connfiguration file----*/
@@ -415,7 +404,7 @@ static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
rtStatus = _FAIL;
/*----Restore RFENV control type----*/;
- PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV, u4RegValue);
+ rtl8188e_PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV, u4RegValue);
if (rtStatus != _SUCCESS)
goto phy_RF6052_Config_ParaFile_Fail;
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c
index 053d9549873d..90d426199f52 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c
@@ -126,7 +126,7 @@ void update_recvframe_phyinfo_88e(struct recv_frame *precvframe, struct phy_stat
{
struct adapter *padapter = precvframe->adapter;
struct rx_pkt_attrib *pattrib = &precvframe->attrib;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct hal_data_8188e *pHalData = &padapter->haldata;
struct phy_info *pPHYInfo = &pattrib->phy_info;
u8 *wlanhdr;
struct odm_per_pkt_info pkt_info;
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_sreset.c b/drivers/staging/r8188eu/hal/rtl8188e_sreset.c
deleted file mode 100644
index 7b3ac6e306ce..000000000000
--- a/drivers/staging/r8188eu/hal/rtl8188e_sreset.c
+++ /dev/null
@@ -1,37 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _RTL8188E_SRESET_C_
-
-#include "../include/rtl8188e_sreset.h"
-#include "../include/rtl8188e_hal.h"
-
-void rtl8188e_sreset_xmit_status_check(struct adapter *padapter)
-{
- u32 txdma_status;
-
- txdma_status = rtw_read32(padapter, REG_TXDMA_STATUS);
- if (txdma_status != 0x00) {
- DBG_88E("%s REG_TXDMA_STATUS:0x%08x\n", __func__, txdma_status);
- rtw_write32(padapter, REG_TXDMA_STATUS, txdma_status);
- }
- /* total xmit irp = 4 */
-}
-
-void rtl8188e_sreset_linked_status_check(struct adapter *padapter)
-{
- u32 rx_dma_status = 0;
- u8 fw_status = 0;
- rx_dma_status = rtw_read32(padapter, REG_RXDMA_STATUS);
- if (rx_dma_status != 0x00) {
- DBG_88E("%s REG_RXDMA_STATUS:0x%08x\n", __func__, rx_dma_status);
- rtw_write32(padapter, REG_RXDMA_STATUS, rx_dma_status);
- }
- fw_status = rtw_read8(padapter, REG_FMETHR);
- if (fw_status != 0x00) {
- if (fw_status == 1)
- DBG_88E("%s REG_FW_STATUS (0x%02x), Read_Efuse_Fail !!\n", __func__, fw_status);
- else if (fw_status == 2)
- DBG_88E("%s REG_FW_STATUS (0x%02x), Condition_No_Match !!\n", __func__, fw_status);
- }
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188eu_led.c b/drivers/staging/r8188eu/hal/rtl8188eu_led.c
deleted file mode 100644
index 452d4bb87aba..000000000000
--- a/drivers/staging/r8188eu/hal/rtl8188eu_led.c
+++ /dev/null
@@ -1,94 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/rtl8188e_hal.h"
-#include "../include/rtl8188e_led.h"
-
-/* LED object. */
-
-/* LED_819xUsb routines. */
-/* Description: */
-/* Turn on LED according to LedPin specified. */
-void SwLedOn(struct adapter *padapter, struct LED_871x *pLed)
-{
- u8 LedCfg;
-
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
- return;
- LedCfg = rtw_read8(padapter, REG_LEDCFG2);
- switch (pLed->LedPin) {
- case LED_PIN_LED0:
- rtw_write8(padapter, REG_LEDCFG2, (LedCfg & 0xf0) | BIT(5) | BIT(6)); /* SW control led0 on. */
- break;
- case LED_PIN_LED1:
- rtw_write8(padapter, REG_LEDCFG2, (LedCfg & 0x0f) | BIT(5)); /* SW control led1 on. */
- break;
- default:
- break;
- }
- pLed->bLedOn = true;
-}
-
-/* Description: */
-/* Turn off LED according to LedPin specified. */
-void SwLedOff(struct adapter *padapter, struct LED_871x *pLed)
-{
- u8 LedCfg;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
-
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
- goto exit;
-
- LedCfg = rtw_read8(padapter, REG_LEDCFG2);/* 0x4E */
-
- switch (pLed->LedPin) {
- case LED_PIN_LED0:
- if (pHalData->bLedOpenDrain) {
- /* Open-drain arrangement for controlling the LED) */
- LedCfg &= 0x90; /* Set to software control. */
- rtw_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3)));
- LedCfg = rtw_read8(padapter, REG_MAC_PINMUX_CFG);
- LedCfg &= 0xFE;
- rtw_write8(padapter, REG_MAC_PINMUX_CFG, LedCfg);
- } else {
- rtw_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3) | BIT(5) | BIT(6)));
- }
- break;
- case LED_PIN_LED1:
- LedCfg &= 0x0f; /* Set to software control. */
- rtw_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3)));
- break;
- default:
- break;
- }
-exit:
- pLed->bLedOn = false;
-}
-
-/* Interface to manipulate LED objects. */
-/* Default LED behavior. */
-
-/* Description: */
-/* Initialize all LED_871x objects. */
-void rtl8188eu_InitSwLeds(struct adapter *padapter)
-{
- struct led_priv *pledpriv = &padapter->ledpriv;
-
- pledpriv->LedControlHandler = LedControl8188eu;
-
- InitLed871x(padapter, &pledpriv->SwLed0, LED_PIN_LED0);
-
- InitLed871x(padapter, &pledpriv->SwLed1, LED_PIN_LED1);
-}
-
-/* Description: */
-/* DeInitialize all LED_819xUsb objects. */
-void rtl8188eu_DeInitSwLeds(struct adapter *padapter)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
-
- DeInitLed871x(&ledpriv->SwLed0);
- DeInitLed871x(&ledpriv->SwLed1);
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
index b7feb4d8c8aa..293541db597d 100644
--- a/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
@@ -154,7 +154,7 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
u8 data_rate, pwr_status, offset;
struct adapter *adapt = pxmitframe->padapter;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
struct tx_desc *ptxdesc = (struct tx_desc *)pmem;
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
@@ -382,7 +382,7 @@ static u32 xmitframe_need_length(struct xmit_frame *pxmitframe)
s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
struct xmit_frame *pxmitframe = NULL;
struct xmit_frame *pfirstframe = NULL;
diff --git a/drivers/staging/r8188eu/hal/usb_halinit.c b/drivers/staging/r8188eu/hal/usb_halinit.c
index ef1ae95d7db0..96db9a8e7667 100644
--- a/drivers/staging/r8188eu/hal/usb_halinit.c
+++ b/drivers/staging/r8188eu/hal/usb_halinit.c
@@ -8,10 +8,10 @@
#include "../include/rtw_efuse.h"
#include "../include/rtl8188e_hal.h"
-#include "../include/rtl8188e_led.h"
#include "../include/rtw_iol.h"
#include "../include/usb_ops.h"
#include "../include/usb_osintf.h"
+#include "../include/Hal8188EPwrSeq.h"
#define HAL_MAC_ENABLE 1
#define HAL_BB_ENABLE 1
@@ -19,7 +19,7 @@
static void _ConfigNormalChipOutEP_8188E(struct adapter *adapt, u8 NumOutPipe)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
switch (NumOutPipe) {
case 3:
@@ -40,29 +40,16 @@ static void _ConfigNormalChipOutEP_8188E(struct adapter *adapt, u8 NumOutPipe)
DBG_88E("%s OutEpQueueSel(0x%02x), OutEpNumber(%d)\n", __func__, haldata->OutEpQueueSel, haldata->OutEpNumber);
}
-static bool HalUsbSetQueuePipeMapping8188EUsb(struct adapter *adapt, u8 NumInPipe, u8 NumOutPipe)
+static bool HalUsbSetQueuePipeMapping8188EUsb(struct adapter *adapt, u8 NumOutPipe)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
- bool result = false;
_ConfigNormalChipOutEP_8188E(adapt, NumOutPipe);
-
- /* Normal chip with one IN and one OUT doesn't have interrupt IN EP. */
- if (1 == haldata->OutEpNumber) {
- if (1 != NumInPipe)
- return result;
- }
-
- /* All config other than above support one Bulk IN and one Interrupt IN. */
-
- result = Hal_MappingOutPipe(adapt, NumOutPipe);
-
- return result;
+ return Hal_MappingOutPipe(adapt, NumOutPipe);
}
void rtl8188eu_interface_configure(struct adapter *adapt)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapt);
if (pdvobjpriv->ishighspeed)
@@ -70,8 +57,6 @@ void rtl8188eu_interface_configure(struct adapter *adapt)
else
haldata->UsbBulkOutSize = USB_FULL_SPEED_BULK_SIZE;/* 64 bytes */
- haldata->interfaceIndex = pdvobjpriv->InterfaceNumber;
-
haldata->UsbTxAggMode = 1;
haldata->UsbTxAggDescNum = 0x6; /* only 4 bits */
@@ -81,19 +66,18 @@ void rtl8188eu_interface_configure(struct adapter *adapt)
haldata->UsbRxAggPageCount = 48; /* uint :128 b 0x0A; 10 = MAX_RX_DMA_BUFFER_SIZE/2/haldata->UsbBulkOutSize */
haldata->UsbRxAggPageTimeout = 0x4; /* 6, absolute time = 34ms/(2^6) */
- HalUsbSetQueuePipeMapping8188EUsb(adapt,
- pdvobjpriv->RtNumInPipes, pdvobjpriv->RtNumOutPipes);
+ HalUsbSetQueuePipeMapping8188EUsb(adapt, pdvobjpriv->RtNumOutPipes);
}
u32 rtl8188eu_InitPowerOn(struct adapter *adapt)
{
u16 value16;
/* HW Power on sequence */
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
if (haldata->bMacPwrCtrlOn)
return _SUCCESS;
- if (!HalPwrSeqCmdParsing(adapt, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, Rtl8188E_NIC_PWR_ON_FLOW)) {
+ if (!HalPwrSeqCmdParsing(adapt, Rtl8188E_NIC_PWR_ON_FLOW)) {
DBG_88E(KERN_ERR "%s: run power on flow fail\n", __func__);
return _FAIL;
}
@@ -144,7 +128,7 @@ static void _InitInterrupt(struct adapter *Adapter)
static void _InitQueueReservedPage(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
struct registry_priv *pregistrypriv = &Adapter->registrypriv;
u32 numHQ = 0;
u32 numLQ = 0;
@@ -212,7 +196,7 @@ static void _InitNormalChipRegPriority(struct adapter *Adapter, u16 beQ,
static void _InitNormalChipOneOutEpPriority(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
u16 value = 0;
switch (haldata->OutEpQueueSel) {
@@ -234,7 +218,7 @@ static void _InitNormalChipOneOutEpPriority(struct adapter *Adapter)
static void _InitNormalChipTwoOutEpPriority(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
struct registry_priv *pregistrypriv = &Adapter->registrypriv;
u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
u16 valueHi = 0;
@@ -300,7 +284,7 @@ static void _InitNormalChipThreeOutEpPriority(struct adapter *Adapter)
static void _InitQueuePriority(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
switch (haldata->OutEpNumber) {
case 1:
@@ -344,7 +328,7 @@ static void _InitDriverInfoSize(struct adapter *Adapter, u8 drvInfoSize)
static void _InitWMACSetting(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
haldata->ReceiveConfig = RCR_AAP | RCR_APM | RCR_AM | RCR_AB |
RCR_CBSSID_DATA | RCR_CBSSID_BCN |
@@ -400,13 +384,6 @@ static void _InitEDCA(struct adapter *Adapter)
rtw_write32(Adapter, REG_EDCA_VO_PARAM, 0x002FA226);
}
-static void _InitRDGSetting(struct adapter *Adapter)
-{
- rtw_write8(Adapter, REG_RD_CTRL, 0xFF);
- rtw_write16(Adapter, REG_RD_NAV_NXT, 0x200);
- rtw_write8(Adapter, REG_RD_RESP_PKT_TH, 0x05);
-}
-
static void _InitRetryFunction(struct adapter *Adapter)
{
u8 value8;
@@ -436,7 +413,7 @@ static void _InitRetryFunction(struct adapter *Adapter)
*---------------------------------------------------------------------------*/
static void usb_AggSettingTxUpdate(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
u32 value32;
if (Adapter->registrypriv.wifi_spec)
@@ -471,7 +448,7 @@ usb_AggSettingRxUpdate(
struct adapter *Adapter
)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
u8 valueDMA;
u8 valueUSB;
@@ -525,16 +502,11 @@ usb_AggSettingRxUpdate(
static void InitUsbAggregationSetting(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
-
/* Tx aggregation setting */
usb_AggSettingTxUpdate(Adapter);
/* Rx aggregation setting */
usb_AggSettingRxUpdate(Adapter);
-
- /* 201/12/10 MH Add for USB agg mode dynamic switch. */
- haldata->UsbRxHighSpeedMode = false;
}
static void _InitOperationMode(struct adapter *Adapter)
@@ -543,7 +515,7 @@ static void _InitOperationMode(struct adapter *Adapter)
static void _InitBeaconParameters(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
rtw_write16(Adapter, REG_BCN_CTRL, 0x1010);
@@ -572,8 +544,8 @@ static void _BeaconFunctionEnable(struct adapter *Adapter,
/* Set CCK and OFDM Block "ON" */
static void _BBTurnOnBlock(struct adapter *Adapter)
{
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bCCKEn, 0x1);
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bOFDMEn, 0x1);
+ rtl8188e_PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bCCKEn, 0x1);
+ rtl8188e_PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bOFDMEn, 0x1);
}
enum {
@@ -583,16 +555,16 @@ enum {
static void _InitAntenna_Selection(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
if (haldata->AntDivCfg == 0)
return;
DBG_88E("==> %s ....\n", __func__);
rtw_write32(Adapter, REG_LEDCFG0, rtw_read32(Adapter, REG_LEDCFG0) | BIT(23));
- PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter, BIT(13), 0x01);
+ rtl8188e_PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter, BIT(13), 0x01);
- if (PHY_QueryBBReg(Adapter, rFPGA0_XA_RFInterfaceOE, 0x300) == Antenna_A)
+ if (rtl8188e_PHY_QueryBBReg(Adapter, rFPGA0_XA_RFInterfaceOE, 0x300) == Antenna_A)
haldata->CurAntenna = Antenna_A;
else
haldata->CurAntenna = Antenna_B;
@@ -605,18 +577,12 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
u16 value16;
u8 txpktbuf_bndy;
u32 status = _SUCCESS;
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
struct registry_priv *pregistrypriv = &Adapter->registrypriv;
u32 init_start_time = jiffies;
- #define HAL_INIT_PROFILE_TAG(stage) do {} while (0)
-
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_BEGIN);
-
if (Adapter->pwrctrlpriv.bkeepfwalive) {
- _ps_open_RF(Adapter);
-
if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
PHY_IQCalibrate_8188E(Adapter, true);
} else {
@@ -630,7 +596,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
goto exit;
}
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_PW_ON);
status = rtl8188eu_InitPowerOn(Adapter);
if (status == _FAIL)
goto exit;
@@ -653,7 +618,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
txpktbuf_bndy = WMM_NORMAL_TX_PAGE_BOUNDARY_88E;
}
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MISC01);
_InitQueueReservedPage(Adapter);
_InitQueuePriority(Adapter);
_InitPageBoundary(Adapter);
@@ -661,7 +625,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
_InitTxBufferBoundary(Adapter, 0);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_DOWNLOAD_FW);
status = rtl8188e_FirmwareDownload(Adapter);
if (status != _SUCCESS) {
@@ -675,7 +638,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
}
rtl8188e_InitializeFirmwareVars(Adapter);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MAC);
#if (HAL_MAC_ENABLE == 1)
status = PHY_MACConfig8188E(Adapter);
if (status == _FAIL) {
@@ -687,7 +649,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
/* */
/* d. Initialize BB related configurations. */
/* */
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_BB);
#if (HAL_BB_ENABLE == 1)
status = PHY_BBConfig8188E(Adapter);
if (status == _FAIL) {
@@ -696,7 +657,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
}
#endif
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_RF);
#if (HAL_RF_ENABLE == 1)
status = PHY_RFConfig8188E(Adapter);
if (status == _FAIL) {
@@ -705,7 +665,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
}
#endif
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_EFUSE_PATCH);
status = rtl8188e_iol_efuse_patch(Adapter);
if (status == _FAIL) {
DBG_88E("%s rtl8188e_iol_efuse_patch failed\n", __func__);
@@ -714,12 +673,10 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
_InitTxBufferBoundary(Adapter, txpktbuf_bndy);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_LLTT);
status = InitLLTTable(Adapter, txpktbuf_bndy);
if (status == _FAIL)
goto exit;
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MISC02);
/* Get Rx PHY status in order to report RSSI and others. */
_InitDriverInfoSize(Adapter, DRVINFO_SZ);
@@ -743,9 +700,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
value16 |= (MACTXEN | MACRXEN);
rtw_write8(Adapter, REG_CR, value16);
- if (haldata->bRDGEnable)
- _InitRDGSetting(Adapter);
-
/* Enable TX Report */
/* Enable Tx Report Timer */
value8 = rtw_read8(Adapter, REG_TX_RPT_CTRL);
@@ -761,16 +715,13 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
rtw_write16(Adapter, REG_PKT_BE_BK_LIFE_TIME, 0x0400); /* unit: 256us. 256ms */
/* Keep RfRegChnlVal for later use. */
- haldata->RfRegChnlVal[0] = PHY_QueryRFReg(Adapter, (enum rf_radio_path)0, RF_CHNLBW, bRFRegOffsetMask);
- haldata->RfRegChnlVal[1] = PHY_QueryRFReg(Adapter, (enum rf_radio_path)1, RF_CHNLBW, bRFRegOffsetMask);
+ haldata->RfRegChnlVal[0] = rtl8188e_PHY_QueryRFReg(Adapter, (enum rf_radio_path)0, RF_CHNLBW, bRFRegOffsetMask);
+ haldata->RfRegChnlVal[1] = rtl8188e_PHY_QueryRFReg(Adapter, (enum rf_radio_path)1, RF_CHNLBW, bRFRegOffsetMask);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_TURN_ON_BLOCK);
_BBTurnOnBlock(Adapter);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_SECURITY);
invalidate_cam_all(Adapter);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MISC11);
/* 2010/12/17 MH We need to set TX power according to EFUSE content at first. */
PHY_SetTxPowerLevel8188E(Adapter, haldata->CurrentChannel);
@@ -795,7 +746,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
/* Nav limit , suggest by scott */
rtw_write8(Adapter, 0x652, 0x0);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_HAL_DM);
rtl8188e_InitHalDm(Adapter);
/* 2010/08/11 MH Merge from 8192SE for Minicard init. We need to confirm current radio status */
@@ -819,7 +769,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
/* enable tx DMA to drop the redundate data of packet */
rtw_write16(Adapter, REG_TXDMA_OFFSET_CHK, (rtw_read16(Adapter, REG_TXDMA_OFFSET_CHK) | DROP_DATA_EN));
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_IQK);
/* 2010/08/26 MH Merge from 8192CE. */
if (pwrctrlpriv->rf_pwrstate == rf_on) {
if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
@@ -829,15 +778,11 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
haldata->odmpriv.RFCalibrateInfo.bIQKInitialized = true;
}
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_PW_TRACK);
-
ODM_TXPowerTrackingCheck(&haldata->odmpriv);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_LCK);
PHY_LCCalibrate_8188E(Adapter);
}
-/* HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_PABIAS); */
/* _InitPABias(Adapter); */
rtw_write8(Adapter, REG_USB_HRPWM, 0);
@@ -845,29 +790,15 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
rtw_write32(Adapter, REG_FWHW_TXQ_CTRL, rtw_read32(Adapter, REG_FWHW_TXQ_CTRL) | BIT(12));
exit:
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_END);
-
DBG_88E("%s in %dms\n", __func__, rtw_get_passing_time_ms(init_start_time));
return status;
}
-void _ps_open_RF(struct adapter *adapt)
-{
- /* here call with bRegSSPwrLvl 1, bRegSSPwrLvl 2 needs to be verified */
- /* phy_SsPwrSwitch92CU(adapt, rf_on, 1); */
-}
-
-static void _ps_close_RF(struct adapter *adapt)
-{
- /* here call with bRegSSPwrLvl 1, bRegSSPwrLvl 2 needs to be verified */
- /* phy_SsPwrSwitch92CU(adapt, rf_off, 1); */
-}
-
static void CardDisableRTL8188EU(struct adapter *Adapter)
{
u8 val8;
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
/* Stop Tx Report Timer. 0x4EC[Bit1]=b'0 */
val8 = rtw_read8(Adapter, REG_TX_RPT_CTRL);
@@ -877,7 +808,7 @@ static void CardDisableRTL8188EU(struct adapter *Adapter)
rtw_write8(Adapter, REG_CR, 0x0);
/* Run LPS WL RFOFF flow */
- HalPwrSeqCmdParsing(Adapter, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, Rtl8188E_NIC_LPS_ENTER_FLOW);
+ HalPwrSeqCmdParsing(Adapter, Rtl8188E_NIC_LPS_ENTER_FLOW);
/* 2. 0x1F[7:0] = 0 turn off RF */
@@ -898,7 +829,7 @@ static void CardDisableRTL8188EU(struct adapter *Adapter)
rtw_write8(Adapter, REG_32K_CTRL, val8 & (~BIT(0)));
/* Card disable power action flow */
- HalPwrSeqCmdParsing(Adapter, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, Rtl8188E_NIC_DISABLE_FLOW);
+ HalPwrSeqCmdParsing(Adapter, Rtl8188E_NIC_DISABLE_FLOW);
/* Reset MCU IO Wrapper */
val8 = rtw_read8(Adapter, REG_RSV_CTRL + 1);
@@ -929,9 +860,7 @@ u32 rtl8188eu_hal_deinit(struct adapter *Adapter)
rtw_write32(Adapter, REG_HIMRE_88E, IMR_DISABLED_88E);
DBG_88E("bkeepfwalive(%x)\n", Adapter->pwrctrlpriv.bkeepfwalive);
- if (Adapter->pwrctrlpriv.bkeepfwalive) {
- _ps_close_RF(Adapter);
- } else {
+ if (!Adapter->pwrctrlpriv.bkeepfwalive) {
if (Adapter->hw_init_completed) {
CardDisableRTL8188EU(Adapter);
}
@@ -948,12 +877,10 @@ unsigned int rtl8188eu_inirp_init(struct adapter *Adapter)
status = _SUCCESS;
- precvpriv->ff_hwaddr = RECV_BULK_IN_ADDR;
-
/* issue Rx irp to receive data */
precvbuf = (struct recv_buf *)precvpriv->precv_buf;
for (i = 0; i < NR_RECVBUFF; i++) {
- if (!rtw_read_port(Adapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf)) {
+ if (!rtw_read_port(Adapter, (unsigned char *)precvbuf)) {
status = _FAIL;
goto exit;
}
@@ -971,39 +898,6 @@ exit:
/* EEPROM/EFUSE Content Parsing */
/* */
/* */
-static void _ReadLEDSetting(struct adapter *Adapter, u8 *PROMContent, bool AutoloadFail)
-{
- struct led_priv *pledpriv = &Adapter->ledpriv;
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
-
- pledpriv->bRegUseLed = true;
- haldata->bLedOpenDrain = true;/* Support Open-drain arrangement for controlling the LED. */
-}
-
-static void Hal_EfuseParsePIDVID_8188EU(struct adapter *adapt, u8 *hwinfo, bool AutoLoadFail)
-{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
-
- if (!AutoLoadFail) {
- /* VID, PID */
- haldata->EEPROMVID = EF2BYTE(*(__le16 *)&hwinfo[EEPROM_VID_88EU]);
- haldata->EEPROMPID = EF2BYTE(*(__le16 *)&hwinfo[EEPROM_PID_88EU]);
-
- /* Customer ID, 0x00 and 0xff are reserved for Realtek. */
- haldata->EEPROMCustomerID = *(u8 *)&hwinfo[EEPROM_CUSTOMERID_88E];
- haldata->EEPROMSubCustomerID = EEPROM_Default_SubCustomerID;
- } else {
- haldata->EEPROMVID = EEPROM_Default_VID;
- haldata->EEPROMPID = EEPROM_Default_PID;
-
- /* Customer ID, 0x00 and 0xff are reserved for Realtek. */
- haldata->EEPROMCustomerID = EEPROM_Default_CustomerID;
- haldata->EEPROMSubCustomerID = EEPROM_Default_SubCustomerID;
- }
-
- DBG_88E("VID = 0x%04X, PID = 0x%04X\n", haldata->EEPROMVID, haldata->EEPROMPID);
- DBG_88E("Customer ID: 0x%02X, SubCustomer ID: 0x%02X\n", haldata->EEPROMCustomerID, haldata->EEPROMSubCustomerID);
-}
static void Hal_EfuseParseMACAddr_8188EU(struct adapter *adapt, u8 *hwinfo, bool AutoLoadFail)
{
@@ -1020,76 +914,43 @@ static void Hal_EfuseParseMACAddr_8188EU(struct adapter *adapt, u8 *hwinfo, bool
}
}
-static void
-readAdapterInfo_8188EU(
- struct adapter *adapt
- )
-{
- struct eeprom_priv *eeprom = &adapt->eeprompriv;
-
- /* parse the eeprom/efuse content */
- Hal_EfuseParseIDCode88E(adapt, eeprom->efuse_eeprom_data);
- Hal_EfuseParsePIDVID_8188EU(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- Hal_EfuseParseMACAddr_8188EU(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
-
- Hal_ReadPowerSavingMode88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- Hal_ReadTxPowerInfo88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- Hal_EfuseParseEEPROMVer88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- rtl8188e_EfuseParseChnlPlan(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- Hal_EfuseParseXtal_8188E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- Hal_EfuseParseCustomerID88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- Hal_ReadAntennaDiversity88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- Hal_EfuseParseBoardType88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- Hal_ReadThermalMeter_88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
-
- _ReadLEDSetting(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
-}
-
-static void _ReadPROMContent(
- struct adapter *Adapter
- )
+void ReadAdapterInfo8188EU(struct adapter *Adapter)
{
struct eeprom_priv *eeprom = &Adapter->eeprompriv;
+ struct led_priv *ledpriv = &Adapter->ledpriv;
u8 eeValue;
+ /* Read EEPROM size before call any EEPROM function */
+ Adapter->EepromAddressSize = GetEEPROMSize8188E(Adapter);
+
/* check system boot selection */
eeValue = rtw_read8(Adapter, REG_9346CR);
- eeprom->EepromOrEfuse = (eeValue & BOOT_FROM_EEPROM) ? true : false;
- eeprom->bautoload_fail_flag = (eeValue & EEPROM_EN) ? false : true;
+ eeprom->EepromOrEfuse = (eeValue & BOOT_FROM_EEPROM);
+ eeprom->bautoload_fail_flag = !(eeValue & EEPROM_EN);
DBG_88E("Boot from %s, Autoload %s !\n", (eeprom->EepromOrEfuse ? "EEPROM" : "EFUSE"),
(eeprom->bautoload_fail_flag ? "Fail" : "OK"));
- Hal_InitPGData88E(Adapter);
- readAdapterInfo_8188EU(Adapter);
-}
-
-static void _ReadRFType(struct adapter *Adapter)
-{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ if (!is_boot_from_eeprom(Adapter))
+ EFUSE_ShadowMapUpdate(Adapter);
- haldata->rf_chip = RF_6052;
-}
+ /* parse the eeprom/efuse content */
+ Hal_EfuseParseIDCode88E(Adapter, eeprom->efuse_eeprom_data);
+ Hal_EfuseParseMACAddr_8188EU(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
-static int _ReadAdapterInfo8188EU(struct adapter *Adapter)
-{
- _ReadRFType(Adapter);/* rf_chip -> _InitRFType() */
- _ReadPROMContent(Adapter);
+ Hal_ReadPowerSavingMode88E(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_ReadTxPowerInfo88E(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ rtl8188e_EfuseParseChnlPlan(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_EfuseParseXtal_8188E(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_ReadAntennaDiversity88E(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_ReadThermalMeter_88E(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- return _SUCCESS;
-}
-
-void ReadAdapterInfo8188EU(struct adapter *Adapter)
-{
- /* Read EEPROM size before call any EEPROM function */
- Adapter->EepromAddressSize = GetEEPROMSize8188E(Adapter);
-
- _ReadAdapterInfo8188EU(Adapter);
+ ledpriv->bRegUseLed = true;
}
static void ResumeTxBeacon(struct adapter *adapt)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
/* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */
/* which should be read from register to a global variable. */
@@ -1103,7 +964,7 @@ static void ResumeTxBeacon(struct adapter *adapt)
static void StopTxBeacon(struct adapter *adapt)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
/* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */
/* which should be read from register to a global variable. */
@@ -1210,7 +1071,7 @@ static void hw_var_set_bcn_func(struct adapter *Adapter, u8 variable, u8 *val)
void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
struct dm_priv *pdmpriv = &haldata->dmpriv;
struct odm_dm_struct *podmpriv = &haldata->odmpriv;
@@ -1727,7 +1588,7 @@ void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
struct odm_dm_struct *podmpriv = &haldata->odmpriv;
switch (variable) {
@@ -1744,9 +1605,6 @@ void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
case HW_VAR_DM_FLAG:
val[0] = podmpriv->SupportAbility;
break;
- case HW_VAR_RF_TYPE:
- val[0] = haldata->rf_type;
- break;
case HW_VAR_FWLPS_RF_ON:
{
/* When we halt NIC, we should check if FW LPS is leave. */
@@ -1786,7 +1644,7 @@ void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
/* Query setting of specified variable. */
u8 GetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
u8 bResult = _SUCCESS;
switch (eVariable) {
@@ -1871,7 +1729,7 @@ u8 GetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable
/* Change default setting of specified variable. */
u8 SetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
u8 bResult = _SUCCESS;
switch (eVariable) {
@@ -1925,7 +1783,7 @@ void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
u8 shortGIrate = false;
int supportRateNum = 0;
struct sta_info *psta;
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
@@ -2036,11 +1894,10 @@ void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt)
void rtl8188eu_init_default_value(struct adapter *adapt)
{
- struct hal_data_8188e *haldata;
+ struct hal_data_8188e *haldata = &adapt->haldata;
struct pwrctrl_priv *pwrctrlpriv;
u8 i;
- haldata = GET_HAL_DATA(adapt);
pwrctrlpriv = &adapt->pwrctrlpriv;
/* init default value */
@@ -2057,11 +1914,3 @@ void rtl8188eu_init_default_value(struct adapter *adapt)
for (i = 0; i < HP_THERMAL_NUM; i++)
haldata->odmpriv.RFCalibrateInfo.ThermalValue_HP[i] = 0;
}
-
-void rtl8188eu_alloc_haldata(struct adapter *adapt)
-{
- adapt->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
- if (!adapt->HalData)
- DBG_88E("cant not alloc memory for HAL DATA\n");
- adapt->hal_data_sz = sizeof(struct hal_data_8188e);
-}
diff --git a/drivers/staging/r8188eu/hal/usb_ops_linux.c b/drivers/staging/r8188eu/hal/usb_ops_linux.c
index e4a9350376bf..4a0ab4053e90 100644
--- a/drivers/staging/r8188eu/hal/usb_ops_linux.c
+++ b/drivers/staging/r8188eu/hal/usb_ops_linux.c
@@ -183,24 +183,6 @@ int rtw_writeN(struct adapter *adapter, u32 addr, u32 length, u8 *data)
return RTW_STATUS_CODE(ret);
}
-static void interrupt_handler_8188eu(struct adapter *adapt, u16 pkt_len, u8 *pbuf)
-{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
-
- if (pkt_len != INTERRUPT_MSG_FORMAT_LEN) {
- DBG_88E("%s Invalid interrupt content length (%d)!\n", __func__, pkt_len);
- return;
- }
-
- /* HISR */
- memcpy(&haldata->IntArray[0], &pbuf[USB_INTR_CONTENT_HISR_OFFSET], 4);
- memcpy(&haldata->IntArray[1], &pbuf[USB_INTR_CONTENT_HISRE_OFFSET], 4);
-
- /* C2H Event */
- if (pbuf[0] != 0)
- memcpy(&haldata->C2hArray[0], &pbuf[USB_INTR_CONTENT_C2H_OFFSET], 16);
-}
-
static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
{
u8 *pbuf;
@@ -213,7 +195,7 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
struct sk_buff *pkt_copy = NULL;
struct recv_frame *precvframe = NULL;
struct rx_pkt_attrib *pattrib = NULL;
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
struct recv_priv *precvpriv = &adapt->recvpriv;
struct __queue *pfree_recv_queue = &precvpriv->free_recv_queue;
@@ -342,8 +324,6 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
pattrib->MacIDValidEntry[0],
pattrib->MacIDValidEntry[1]
);
- } else if (pattrib->pkt_rpt_type == HIS_REPORT) {
- interrupt_handler_8188eu(adapt, pattrib->pkt_len, precvframe->rx_data);
}
rtw_free_recvframe(precvframe, pfree_recv_queue);
}
@@ -401,7 +381,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
if (purb->status == 0) { /* SUCCESS */
if ((purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)) {
precvbuf->reuse = true;
- rtw_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
+ rtw_read_port(adapt, (unsigned char *)precvbuf);
DBG_88E("%s()-%d: RX Warning!\n", __func__, __LINE__);
} else {
rtw_reset_continual_urb_error(adapter_to_dvobj(adapt));
@@ -415,7 +395,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
precvbuf->pskb = NULL;
precvbuf->reuse = false;
- rtw_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
+ rtw_read_port(adapt, (unsigned char *)precvbuf);
}
} else {
DBG_88E("###=> usb_read_port_complete => urb status(%d)\n", purb->status);
@@ -436,7 +416,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
case -EPROTO:
case -EOVERFLOW:
precvbuf->reuse = true;
- rtw_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
+ rtw_read_port(adapt, (unsigned char *)precvbuf);
break;
case -EINPROGRESS:
DBG_88E("ERROR: URB IS IN PROGRESS!/n");
@@ -447,7 +427,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
}
}
-u32 rtw_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *rmem)
+u32 rtw_read_port(struct adapter *adapter, u8 *rmem)
{
struct urb *purb = NULL;
struct recv_buf *precvbuf = (struct recv_buf *)rmem;
@@ -507,7 +487,7 @@ u32 rtw_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *rmem)
purb = precvbuf->purb;
/* translate DMA FIFO addr to pipehandle */
- pipe = ffaddr2pipehdl(pdvobj, addr);
+ pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe);
usb_fill_bulk_urb(purb, pusbd, pipe,
precvbuf->pbuf,
diff --git a/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h
index 6f901ce607e8..2517a08bc95a 100644
--- a/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h
+++ b/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h
@@ -4,37 +4,11 @@
#ifndef __INC_HAL8188EPHYCFG_H__
#define __INC_HAL8188EPHYCFG_H__
-/*--------------------------Define Parameters-------------------------------*/
-#define LOOP_LIMIT 5
-#define MAX_STALL_TIME 50 /* us */
-#define AntennaDiversityValue 0x80
-#define MAX_TXPWR_IDX_NMODE_92S 63
-#define Reset_Cnt_Limit 3
-
-#define IQK_MAC_REG_NUM 4
-#define IQK_ADDA_REG_NUM 16
-#define IQK_BB_REG_NUM 9
-#define HP_THERMAL_NUM 8
-
#define MAX_AGGR_NUM 0x07
-/*--------------------------Define Parameters-------------------------------*/
-
-/*------------------------------Define structure----------------------------*/
-
-enum hw90_block {
- HW90_BLOCK_MAC = 0,
- HW90_BLOCK_PHY0 = 1,
- HW90_BLOCK_PHY1 = 2,
- HW90_BLOCK_RF = 3,
- HW90_BLOCK_MAXIMUM = 4, /* Never use this */
-};
-
enum rf_radio_path {
RF_PATH_A = 0, /* Radio Path A */
RF_PATH_B = 1, /* Radio Path B */
- RF_PATH_C = 2, /* Radio Path C */
- RF_PATH_D = 3, /* Radio Path D */
};
#define MAX_PG_GROUP 13
@@ -46,17 +20,6 @@ enum rf_radio_path {
#define MAX_CHNL_GROUP_24G 6 /* ch1~2, ch3~5, ch6~8,
*ch9~11, ch12~13, CH 14
* total three groups */
-#define CHANNEL_GROUP_MAX_88E 6
-
-/* BB/RF related */
-enum RF_TYPE_8190P {
- RF_TYPE_MIN, /* 0 */
- RF_8225 = 1, /* 1 11b/g RF for verification only */
- RF_8256 = 2, /* 2 11b/g/n */
- RF_6052 = 4, /* 4 11b/g/n RF */
- /* TODO: We should remove this psudo PHY RF after we get new RF. */
- RF_PSEUDO_11N = 5, /* 5, It is a temporality RF. */
-};
struct bb_reg_def {
u32 rfintfs; /* set software control: */
@@ -106,18 +69,7 @@ struct bb_reg_def {
* Path A and B */
};
-/*------------------------------Define structure----------------------------*/
-
-/*------------------------Export global variable----------------------------*/
-/*------------------------Export global variable----------------------------*/
-
-/*------------------------Export Marco Definition---------------------------*/
-/*------------------------Export Marco Definition---------------------------*/
-
-/*--------------------------Exported Function prototype---------------------*/
-/* */
/* BB and RF register read/write */
-/* */
u32 rtl8188e_PHY_QueryBBReg(struct adapter *adapter, u32 regaddr, u32 mask);
void rtl8188e_PHY_SetBBReg(struct adapter *Adapter, u32 RegAddr,
u32 mask, u32 data);
@@ -144,15 +96,5 @@ void PHY_SwChnl8188E(struct adapter *adapter, u8 channel);
void storePwrIndexDiffRateOffset(struct adapter *adapter, u32 regaddr,
u32 mask, u32 data);
-/*--------------------------Exported Function prototype---------------------*/
-
-#define PHY_QueryBBReg(adapt, regaddr, mask) \
- rtl8188e_PHY_QueryBBReg((adapt), (regaddr), (mask))
-#define PHY_SetBBReg(adapt, regaddr, bitmask, data) \
- rtl8188e_PHY_SetBBReg((adapt), (regaddr), (bitmask), (data))
-#define PHY_QueryRFReg(adapt, rfpath, regaddr, bitmask) \
- rtl8188e_PHY_QueryRFReg((adapt), (rfpath), (regaddr), (bitmask))
-#define PHY_SetRFReg(adapt, rfpath, regaddr, bitmask, data) \
- rtl8188e_PHY_SetRFReg((adapt), (rfpath), (regaddr), (bitmask), (data))
-#endif /* __INC_HAL8192CPHYCFG_H */
+#endif
diff --git a/drivers/staging/r8188eu/include/Hal8188EPwrSeq.h b/drivers/staging/r8188eu/include/Hal8188EPwrSeq.h
index a73bd1a5d57b..e4c5b5d23cb4 100644
--- a/drivers/staging/r8188eu/include/Hal8188EPwrSeq.h
+++ b/drivers/staging/r8188eu/include/Hal8188EPwrSeq.h
@@ -6,150 +6,8 @@
#include "HalPwrSeqCmd.h"
-/*
- Check document WM-20110607-Paul-RTL8188E_Power_Architecture-R02.vsd
- There are 6 HW Power States:
- 0: POFF--Power Off
- 1: PDN--Power Down
- 2: CARDEMU--Card Emulation
- 3: ACT--Active Mode
- 4: LPS--Low Power State
- 5: SUS--Suspend
-
- The transision from different states are defined below
- TRANS_CARDEMU_TO_ACT
- TRANS_ACT_TO_CARDEMU
- TRANS_CARDEMU_TO_SUS
- TRANS_SUS_TO_CARDEMU
- TRANS_CARDEMU_TO_PDN
- TRANS_ACT_TO_LPS
- TRANS_LPS_TO_ACT
-
- TRANS_END
-
- PWR SEQ Version: rtl8188E_PwrSeq_V09.h
-*/
-#define RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS 10
-#define RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS 10
-#define RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS 10
-#define RTL8188E_TRANS_SUS_TO_CARDEMU_STEPS 10
-#define RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS 10
-#define RTL8188E_TRANS_PDN_TO_CARDEMU_STEPS 10
-#define RTL8188E_TRANS_ACT_TO_LPS_STEPS 15
-#define RTL8188E_TRANS_LPS_TO_ACT_STEPS 15
-#define RTL8188E_TRANS_END_STEPS 1
-
-#define RTL8188E_TRANS_CARDEMU_TO_ACT \
- /* format */ \
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
- {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), BIT(1)},/* wait till 0x04[17] = 1 power ready*/ \
- {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0)|BIT(1), 0}, /* 0x02[1:0] = 0 reset BB*/ \
- {0x0026, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)}, /*0x24[23] = 2b'01 schmit trigger */ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0}, /* 0x04[15] = 0 disable HWPDN (control by DRV)*/\
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4)|BIT(3), 0}, /*0x04[12:11] = 2b'00 disable WL suspend*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, /*0x04[8] = 1 polling until return 0*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(0), 0}, /*wait till 0x04[8] = 0*/ \
- {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, /*LDO normal mode*/ \
- {0x0074, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, /*SDIO Driving*/ \
-
-#define RTL8188E_TRANS_ACT_TO_CARDEMU \
- /* format */ \
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
- {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},/*0x1F[7:0] = 0 turn off RF*/ \
- {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, /*LDO Sleep mode*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, /*0x04[9] = 1 turn off MAC by HW state machine*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), 0}, /*wait till 0x04[9] = 0 polling until return 0 to disable*/ \
-
-#define RTL8188E_TRANS_CARDEMU_TO_SUS \
- /* format */ \
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)}, /*0x04[12:11] = 2b'01enable WL suspend*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)|BIT(4)}, /*0x04[12:11] = 2b'11enable WL suspend for PCIe*/ \
- {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, BIT(7)}, /* 0x04[31:30] = 2b'10 enable enable bandgap mbias in suspend */ \
- {0x0041, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, /*Clear SIC_EN register 0x40[12] = 1'b0 */ \
- {0xfe10, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, /*Set USB suspend enable local register 0xfe10[4]=1 */ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, /*Set SDIO suspend local register*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0}, /*wait power state to suspend*/
-
-#define RTL8188E_TRANS_SUS_TO_CARDEMU \
- /* format */ \
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, /*Set SDIO suspend local register*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, /*wait power state to suspend*/\
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, /*0x04[12:11] = 2b'01enable WL suspend*/
-
-#define RTL8188E_TRANS_CARDEMU_TO_CARDDIS \
- /* format */ \
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
- {0x0026, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)}, /*0x24[23] = 2b'01 schmit trigger */ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)}, /*0x04[12:11] = 2b'01 enable WL suspend*/ \
- {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, /* 0x04[31:30] = 2b'10 enable enable bandgap mbias in suspend */ \
- {0x0041, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, /*Clear SIC_EN register 0x40[12] = 1'b0 */ \
- {0xfe10, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, /*Set USB suspend enable local register 0xfe10[4]=1 */ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, /*Set SDIO suspend local register*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0}, /*wait power state to suspend*/
-
-#define RTL8188E_TRANS_CARDDIS_TO_CARDEMU \
- /* format */ \
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, /*Set SDIO suspend local register*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, /*wait power state to suspend*/\
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, /*0x04[12:11] = 2b'01enable WL suspend*/
-
-#define RTL8188E_TRANS_CARDEMU_TO_PDN \
- /* format */ \
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
- {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0},/* 0x04[16] = 0*/\
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)},/* 0x04[15] = 1*/
-
-#define RTL8188E_TRANS_PDN_TO_CARDEMU \
- /* format */ \
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here */ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0},/* 0x04[15] = 0*/
-
-/* This is used by driver for LPSRadioOff Procedure, not for FW LPS Step */
-#define RTL8188E_TRANS_ACT_TO_LPS \
- /* format */ \
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here */ \
- {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x7F},/*Tx Pause*/ \
- {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
- {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
- {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
- {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
- {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0},/*CCK and OFDM are disabled,and clock are gated*/ \
- {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},/*Delay 1us*/ \
- {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x3F},/*Reset MAC TRX*/ \
- {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0},/*check if removed later*/ \
- {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), BIT(5)},/*Respond TxOK to scheduler*/ \
-
-#define RTL8188E_TRANS_LPS_TO_ACT \
- /* format */ \
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here */ \
- {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84}, /*SDIO RPWM*/\
- {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*USB RPWM*/\
- {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*PCIe RPWM*/\
- {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, /*Delay*/\
- {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, /*. 0x08[4] = 0 switch TSF to 40M*/\
- {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(7), 0}, /*Polling 0x109[7]=0 TSF in 40M*/\
- {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6)|BIT(7), 0}, /*. 0x29[7:6] = 2b'00 enable BB clock*/\
- {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, /*. 0x101[1] = 1*/\
- {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, /*. 0x100[7:0] = 0xFF enable WMAC TRX*/\
- {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1)|BIT(0), BIT(1)|BIT(0)}, /*. 0x02[1:0] = 2b'11 enable BB macro*/\
- {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, /*. 0x522 = 0*/
-
-#define RTL8188E_TRANS_END \
- /* format */ \
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
- {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,0, PWR_CMD_END, 0, 0}, /* */
-
-extern struct wl_pwr_cfg rtl8188E_power_on_flow[RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS+RTL8188E_TRANS_END_STEPS];
-extern struct wl_pwr_cfg rtl8188E_radio_off_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_END_STEPS];
-extern struct wl_pwr_cfg rtl8188E_card_disable_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS+RTL8188E_TRANS_END_STEPS];
-extern struct wl_pwr_cfg rtl8188E_card_enable_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS+RTL8188E_TRANS_END_STEPS];
-extern struct wl_pwr_cfg rtl8188E_suspend_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS+RTL8188E_TRANS_END_STEPS];
-extern struct wl_pwr_cfg rtl8188E_resume_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS+RTL8188E_TRANS_END_STEPS];
-extern struct wl_pwr_cfg rtl8188E_hwpdn_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS+RTL8188E_TRANS_END_STEPS];
-extern struct wl_pwr_cfg rtl8188E_enter_lps_flow[RTL8188E_TRANS_ACT_TO_LPS_STEPS+RTL8188E_TRANS_END_STEPS];
-extern struct wl_pwr_cfg rtl8188E_leave_lps_flow[RTL8188E_TRANS_LPS_TO_ACT_STEPS+RTL8188E_TRANS_END_STEPS];
+extern struct wl_pwr_cfg rtl8188E_power_on_flow[];
+extern struct wl_pwr_cfg rtl8188E_card_disable_flow[];
+extern struct wl_pwr_cfg rtl8188E_enter_lps_flow[];
#endif /* __HAL8188EPWRSEQ_H__ */
diff --git a/drivers/staging/r8188eu/include/HalPhyRf_8188e.h b/drivers/staging/r8188eu/include/HalPhyRf_8188e.h
index d4a27662309f..b75a5d869c56 100644
--- a/drivers/staging/r8188eu/include/HalPhyRf_8188e.h
+++ b/drivers/staging/r8188eu/include/HalPhyRf_8188e.h
@@ -30,12 +30,7 @@ void PHY_DigitalPredistortion_8188E(struct adapter *pAdapter);
void _PHY_SaveADDARegisters(struct adapter *pAdapter, u32 *ADDAReg,
u32 *ADDABackup, u32 RegisterNum);
-void _PHY_PathADDAOn(struct adapter *pAdapter, u32 *ADDAReg,
- bool isPathAOn, bool is2T);
-
void _PHY_MACSettingCalibration(struct adapter *pAdapter, u32 *MACReg,
u32 *MACBackup);
-void _PHY_PathAStandBy(struct adapter *pAdapter);
-
#endif /* #ifndef __HAL_PHY_RF_8188E_H__ */
diff --git a/drivers/staging/r8188eu/include/HalPwrSeqCmd.h b/drivers/staging/r8188eu/include/HalPwrSeqCmd.h
index fe7ac910beb8..49c02cce569e 100644
--- a/drivers/staging/r8188eu/include/HalPwrSeqCmd.h
+++ b/drivers/staging/r8188eu/include/HalPwrSeqCmd.h
@@ -9,11 +9,6 @@
/*---------------------------------------------*/
/* 3 The value of cmd: 4 bits */
/*---------------------------------------------*/
-#define PWR_CMD_READ 0x00
- /* offset: the read register offset */
- /* msk: the mask of the read value */
- /* value: N/A, left by 0 */
- /* note: dirver shall implement this function by read & msk */
#define PWR_CMD_WRITE 0x01
/* offset: the read register offset */
@@ -41,43 +36,6 @@
/* msk: N/A */
/* value: N/A */
-/*---------------------------------------------*/
-/* 3 The value of base: 4 bits */
-/*---------------------------------------------*/
- /* define the base address of each block */
-#define PWR_BASEADDR_MAC 0x00
-#define PWR_BASEADDR_USB 0x01
-#define PWR_BASEADDR_PCIE 0x02
-#define PWR_BASEADDR_SDIO 0x03
-
-/*---------------------------------------------*/
-/* 3 The value of interface_msk: 4 bits */
-/*---------------------------------------------*/
-#define PWR_INTF_SDIO_MSK BIT(0)
-#define PWR_INTF_USB_MSK BIT(1)
-#define PWR_INTF_PCI_MSK BIT(2)
-#define PWR_INTF_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
-
-/*---------------------------------------------*/
-/* 3 The value of fab_msk: 4 bits */
-/*---------------------------------------------*/
-#define PWR_FAB_TSMC_MSK BIT(0)
-#define PWR_FAB_UMC_MSK BIT(1)
-#define PWR_FAB_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
-
-/*---------------------------------------------*/
-/* 3 The value of cut_msk: 8 bits */
-/*---------------------------------------------*/
-#define PWR_CUT_TESTCHIP_MSK BIT(0)
-#define PWR_CUT_A_MSK BIT(1)
-#define PWR_CUT_B_MSK BIT(2)
-#define PWR_CUT_C_MSK BIT(3)
-#define PWR_CUT_D_MSK BIT(4)
-#define PWR_CUT_E_MSK BIT(5)
-#define PWR_CUT_F_MSK BIT(6)
-#define PWR_CUT_G_MSK BIT(7)
-#define PWR_CUT_ALL_MSK 0xFF
-
enum pwrseq_cmd_delat_unit {
PWRSEQ_DELAY_US,
PWRSEQ_DELAY_MS,
@@ -85,26 +43,17 @@ enum pwrseq_cmd_delat_unit {
struct wl_pwr_cfg {
u16 offset;
- u8 cut_msk;
- u8 fab_msk:4;
- u8 interface_msk:4;
- u8 base:4;
u8 cmd:4;
u8 msk;
u8 value;
};
#define GET_PWR_CFG_OFFSET(__PWR_CMD) __PWR_CMD.offset
-#define GET_PWR_CFG_CUT_MASK(__PWR_CMD) __PWR_CMD.cut_msk
-#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) __PWR_CMD.fab_msk
-#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) __PWR_CMD.interface_msk
-#define GET_PWR_CFG_BASE(__PWR_CMD) __PWR_CMD.base
#define GET_PWR_CFG_CMD(__PWR_CMD) __PWR_CMD.cmd
#define GET_PWR_CFG_MASK(__PWR_CMD) __PWR_CMD.msk
#define GET_PWR_CFG_VALUE(__PWR_CMD) __PWR_CMD.value
/* Prototype of protected function. */
-u8 HalPwrSeqCmdParsing(struct adapter *padapter, u8 CutVersion, u8 FabVersion,
- u8 InterfaceType, struct wl_pwr_cfg PwrCfgCmd[]);
+u8 HalPwrSeqCmdParsing(struct adapter *padapter, struct wl_pwr_cfg PwrCfgCmd[]);
#endif
diff --git a/drivers/staging/r8188eu/include/HalVerDef.h b/drivers/staging/r8188eu/include/HalVerDef.h
index 796a44a1e697..62b94c993f0d 100644
--- a/drivers/staging/r8188eu/include/HalVerDef.h
+++ b/drivers/staging/r8188eu/include/HalVerDef.h
@@ -24,22 +24,10 @@ enum HAL_VENDOR {
CHIP_VENDOR_UMC = 1,
};
-enum HAL_RF_TYPE {
- RF_TYPE_1T1R = 0,
- RF_TYPE_1T2R = 1,
- RF_TYPE_2T2R = 2,
- RF_TYPE_2T3R = 3,
- RF_TYPE_2T4R = 4,
- RF_TYPE_3T3R = 5,
- RF_TYPE_3T4R = 6,
- RF_TYPE_4T4R = 7,
-};
-
struct HAL_VERSION {
enum HAL_CHIP_TYPE ChipType;
enum HAL_CUT_VERSION CUTVersion;
enum HAL_VENDOR VendorType;
- enum HAL_RF_TYPE RFType;
u8 ROMVer;
};
@@ -47,7 +35,6 @@ struct HAL_VERSION {
#define GET_CVID_CHIP_TYPE(version) (((version).ChipType))
#define GET_CVID_MANUFACTUER(version) (((version).VendorType))
#define GET_CVID_CUT_VERSION(version) (((version).CUTVersion))
-#define GET_CVID_ROM_VERSION(version) (((version).ROMVer) & ROM_VERSION_MASK)
/* Common Macro. -- */
/* HAL_VERSION VersionID */
diff --git a/drivers/staging/r8188eu/include/drv_types.h b/drivers/staging/r8188eu/include/drv_types.h
index 3e4928320f17..2dd5ebaaa921 100644
--- a/drivers/staging/r8188eu/include/drv_types.h
+++ b/drivers/staging/r8188eu/include/drv_types.h
@@ -34,6 +34,7 @@
#include "rtw_p2p.h"
#include "rtw_ap.h"
#include "rtw_br_ext.h"
+#include "rtl8188e_hal.h"
#define DRIVERVERSION "v4.1.4_6773.20130222"
@@ -85,7 +86,6 @@ struct registry_priv {
u8 ampdu_amsdu;/* A-MPDU Supports A-MSDU is permitted */
u8 lowrate_two_xmit;
- u8 rf_config;
u8 low_power;
u8 wifi_spec;/* !turbo_mode */
@@ -114,12 +114,6 @@ struct registry_priv {
u8 notch_filter;
};
-/* For registry parameters */
-#define RGTRY_OFT(field) ((u32)FIELD_OFFSET(struct registry_priv, field))
-#define RGTRY_SZ(field) sizeof(((struct registry_priv *)0)->field)
-#define BSSID_OFT(field) ((u32)FIELD_OFFSET(struct wlan_bssid_ex, field))
-#define BSSID_SZ(field) sizeof(((struct wlan_bssid_ex *)0)->field)
-
#define MAX_CONTINUAL_URB_ERR 4
struct rt_firmware {
@@ -129,14 +123,13 @@ struct rt_firmware {
struct dvobj_priv {
struct adapter *if1;
- struct adapter *if2;
/* For 92D, DMDP have 2 interface. */
u8 InterfaceNumber;
u8 NumInterfaces;
/* In /Out Pipe information */
- int RtInPipe[2];
+ int RtInPipe;
int RtOutPipe[3];
u8 Queue2Pipe[HW_QUEUE_ENTRY];/* for out pipe mapping */
@@ -146,11 +139,8 @@ struct dvobj_priv {
/*-------- below is for USB INTERFACE --------*/
- u8 nr_endpoint;
u8 ishighspeed;
- u8 RtNumInPipes;
u8 RtNumOutPipes;
- int ep_num[5]; /* endpoint number */
int RegUsbSS;
struct semaphore usb_suspend_sema;
struct mutex usb_vendor_req_mutex;
@@ -210,8 +200,7 @@ struct adapter {
struct hostapd_priv *phostapdpriv;
struct wifidirect_info wdinfo;
- void *HalData;
- u32 hal_data_sz;
+ struct hal_data_8188e haldata;
s32 bDriverStopped;
s32 bSurpriseRemoved;
@@ -275,8 +264,6 @@ struct adapter {
unsigned char br_ip[4];
struct br_ext_info ethBrExtInfo;
- u8 fix_rate;
-
unsigned char in_cta_test;
};
diff --git a/drivers/staging/r8188eu/include/hal_intf.h b/drivers/staging/r8188eu/include/hal_intf.h
index d777ad9071e2..b4a7e0ce3116 100644
--- a/drivers/staging/r8188eu/include/hal_intf.h
+++ b/drivers/staging/r8188eu/include/hal_intf.h
@@ -29,7 +29,6 @@ enum hw_variables {
HW_VAR_ACK_PREAMBLE,
HW_VAR_SEC_CFG,
HW_VAR_BCN_VALID,
- HW_VAR_RF_TYPE,
HW_VAR_DM_FLAG,
HW_VAR_DM_FUNC_OP,
HW_VAR_DM_FUNC_SET,
@@ -95,40 +94,17 @@ enum hal_def_variable {
HAL_DEF_DBG_DUMP_TXPKT,
};
-enum hal_odm_variable {
- HAL_ODM_STA_INFO,
- HAL_ODM_P2P_STATE,
- HAL_ODM_WIFI_DISPLAY_STATE,
-};
-
typedef s32 (*c2h_id_filter)(u8 id);
-#define RF_CHANGE_BY_INIT 0
-#define RF_CHANGE_BY_IPS BIT(28)
-#define RF_CHANGE_BY_PS BIT(29)
-#define RF_CHANGE_BY_HW BIT(30)
-#define RF_CHANGE_BY_SW BIT(31)
-
#define is_boot_from_eeprom(adapter) (adapter->eeprompriv.EepromOrEfuse)
-void rtl8188eu_alloc_haldata(struct adapter *adapt);
-
void rtl8188eu_interface_configure(struct adapter *adapt);
void ReadAdapterInfo8188EU(struct adapter *Adapter);
void rtl8188eu_init_default_value(struct adapter *adapt);
-void rtl8188e_SetHalODMVar(struct adapter *Adapter,
- enum hal_odm_variable eVariable, void *pValue1, bool bSet);
+void rtl8188e_SetHalODMVar(struct adapter *Adapter, void *pValue1, bool bSet);
u32 rtl8188eu_InitPowerOn(struct adapter *adapt);
-void rtl8188e_free_hal_data(struct adapter *padapter);
-void rtl8188e_EfusePowerSwitch(struct adapter *pAdapter, u8 bWrite, u8 PwrState);
-void rtl8188e_ReadEFuse(struct adapter *Adapter, u8 efuseType,
- u16 _offset, u16 _size_byte, u8 *pbuf,
- bool bPseudoTest);
-void rtl8188e_EFUSE_GetEfuseDefinition(struct adapter *pAdapter, u8 efuseType,
- u8 type, void *pOut, bool bPseudoTest);
-u16 rtl8188e_EfuseGetCurrentSize(struct adapter *pAdapter, u8 efuseType, bool bPseudoTest);
-int rtl8188e_Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data, bool bPseudoTest);
-int rtl8188e_Efuse_PgPacketWrite(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool bPseudoTest);
+void rtl8188e_EfusePowerSwitch(struct adapter *pAdapter, u8 PwrState);
+void rtl8188e_ReadEFuse(struct adapter *Adapter, u16 _offset, u16 _size_byte, u8 *pbuf);
void hal_notch_filter_8188e(struct adapter *adapter, bool enable);
diff --git a/drivers/staging/r8188eu/include/ieee80211.h b/drivers/staging/r8188eu/include/ieee80211.h
index 6c8206bd5466..3a23d5299314 100644
--- a/drivers/staging/r8188eu/include/ieee80211.h
+++ b/drivers/staging/r8188eu/include/ieee80211.h
@@ -1149,7 +1149,6 @@ void rtw_get_bcn_info(struct wlan_network *pnetwork);
void rtw_macaddr_cfg(u8 *mac_addr);
-u16 rtw_mcs_rate(u8 rf_type, u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40,
- unsigned char *MCS_rate);
+u16 rtw_mcs_rate(u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40, unsigned char *MCS_rate);
#endif /* IEEE80211_H */
diff --git a/drivers/staging/r8188eu/include/ieee80211_ext.h b/drivers/staging/r8188eu/include/ieee80211_ext.h
deleted file mode 100644
index e7ade835d478..000000000000
--- a/drivers/staging/r8188eu/include/ieee80211_ext.h
+++ /dev/null
@@ -1,271 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __IEEE80211_EXT_H
-#define __IEEE80211_EXT_H
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-#define WMM_OUI_TYPE 2
-#define WMM_OUI_SUBTYPE_INFORMATION_ELEMENT 0
-#define WMM_OUI_SUBTYPE_PARAMETER_ELEMENT 1
-#define WMM_OUI_SUBTYPE_TSPEC_ELEMENT 2
-#define WMM_VERSION 1
-
-#define WPA_PROTO_WPA BIT(0)
-#define WPA_PROTO_RSN BIT(1)
-
-#define WPA_KEY_MGMT_IEEE8021X BIT(0)
-#define WPA_KEY_MGMT_PSK BIT(1)
-#define WPA_KEY_MGMT_NONE BIT(2)
-#define WPA_KEY_MGMT_IEEE8021X_NO_WPA BIT(3)
-#define WPA_KEY_MGMT_WPA_NONE BIT(4)
-
-#define WPA_CAPABILITY_PREAUTH BIT(0)
-#define WPA_CAPABILITY_MGMT_FRAME_PROTECTION BIT(6)
-#define WPA_CAPABILITY_PEERKEY_ENABLED BIT(9)
-
-#define PMKID_LEN 16
-
-struct wpa_ie_hdr {
- u8 elem_id;
- u8 len;
- u8 oui[4]; /* 24-bit OUI followed by 8-bit OUI type */
- u8 version[2]; /* little endian */
-} __packed;
-
-struct rsn_ie_hdr {
- u8 elem_id; /* WLAN_EID_RSN */
- u8 len;
- u8 version[2]; /* little endian */
-} __packed;
-
-struct wme_ac_parameter {
-#if defined(__LITTLE_ENDIAN)
- /* byte 1 */
- u8 aifsn:4,
- acm:1,
- aci:2,
- reserved:1;
-
- /* byte 2 */
- u8 eCWmin:4,
- eCWmax:4;
-#elif defined(__BIG_ENDIAN)
- /* byte 1 */
- u8 reserved:1,
- aci:2,
- acm:1,
- aifsn:4;
-
- /* byte 2 */
- u8 eCWmax:4,
- eCWmin:4;
-#else
-#error "Please fix <endian.h>"
-#endif
-
- /* bytes 3 & 4 */
- u16 txopLimit;
-} __packed;
-
-struct wme_parameter_element {
- /* required fields for WME version 1 */
- u8 oui[3];
- u8 oui_type;
- u8 oui_subtype;
- u8 version;
- u8 acInfo;
- u8 reserved;
- struct wme_ac_parameter ac[4];
-
-} __packed;
-
-#define WPA_PUT_LE16(a, val) \
- do { \
- (a)[1] = ((u16) (val)) >> 8; \
- (a)[0] = ((u16) (val)) & 0xff; \
- } while (0)
-
-#define WPA_PUT_BE32(a, val) \
- do { \
- (a)[0] = (u8) ((((u32) (val)) >> 24) & 0xff); \
- (a)[1] = (u8) ((((u32) (val)) >> 16) & 0xff); \
- (a)[2] = (u8) ((((u32) (val)) >> 8) & 0xff); \
- (a)[3] = (u8) (((u32) (val)) & 0xff); \
- } while (0)
-
-#define WPA_PUT_LE32(a, val) \
- do { \
- (a)[3] = (u8) ((((u32) (val)) >> 24) & 0xff); \
- (a)[2] = (u8) ((((u32) (val)) >> 16) & 0xff); \
- (a)[1] = (u8) ((((u32) (val)) >> 8) & 0xff); \
- (a)[0] = (u8) (((u32) (val)) & 0xff); \
- } while (0)
-
-#define RSN_SELECTOR_PUT(a, val) WPA_PUT_BE32((u8 *)(a), (val))
-
-/* Action category code */
-enum ieee80211_category {
- WLAN_CATEGORY_SPECTRUM_MGMT = 0,
- WLAN_CATEGORY_QOS = 1,
- WLAN_CATEGORY_DLS = 2,
- WLAN_CATEGORY_BACK = 3,
- WLAN_CATEGORY_HT = 7,
- WLAN_CATEGORY_WMM = 17,
-};
-
-/* SPECTRUM_MGMT action code */
-enum ieee80211_spectrum_mgmt_actioncode {
- WLAN_ACTION_SPCT_MSR_REQ = 0,
- WLAN_ACTION_SPCT_MSR_RPRT = 1,
- WLAN_ACTION_SPCT_TPC_REQ = 2,
- WLAN_ACTION_SPCT_TPC_RPRT = 3,
- WLAN_ACTION_SPCT_CHL_SWITCH = 4,
- WLAN_ACTION_SPCT_EXT_CHL_SWITCH = 5,
-};
-
-/* BACK action code */
-enum ieee80211_back_actioncode {
- WLAN_ACTION_ADDBA_REQ = 0,
- WLAN_ACTION_ADDBA_RESP = 1,
- WLAN_ACTION_DELBA = 2,
-};
-
-/* HT features action code */
-enum ieee80211_ht_actioncode {
- WLAN_ACTION_NOTIFY_CH_WIDTH = 0,
- WLAN_ACTION_SM_PS = 1,
- WLAN_ACTION_PSPM = 2,
- WLAN_ACTION_PCO_PHASE = 3,
- WLAN_ACTION_MIMO_CSI_MX = 4,
- WLAN_ACTION_MIMO_NONCP_BF = 5,
- WLAN_ACTION_MIMP_CP_BF = 6,
- WLAN_ACTION_ASEL_INDICATES_FB = 7,
- WLAN_ACTION_HI_INFO_EXCHG = 8,
-};
-
-/* BACK (block-ack) parties */
-enum ieee80211_back_parties {
- WLAN_BACK_RECIPIENT = 0,
- WLAN_BACK_INITIATOR = 1,
- WLAN_BACK_TIMER = 2,
-};
-
-struct ieee80211_mgmt {
- u16 frame_control;
- u16 duration;
- u8 da[6];
- u8 sa[6];
- u8 bssid[6];
- u16 seq_ctrl;
- union {
- struct {
- u16 auth_alg;
- u16 auth_transaction;
- u16 status_code;
- /* possibly followed by Challenge text */
- u8 variable[0];
- } __packed auth;
- struct {
- u16 reason_code;
- } __packed deauth;
- struct {
- u16 capab_info;
- u16 listen_interval;
- /* followed by SSID and Supported rates */
- u8 variable[0];
- } __packed assoc_req;
- struct {
- u16 capab_info;
- u16 status_code;
- u16 aid;
- /* followed by Supported rates */
- u8 variable[0];
- } __packed assoc_resp, reassoc_resp;
- struct {
- u16 capab_info;
- u16 listen_interval;
- u8 current_ap[6];
- /* followed by SSID and Supported rates */
- u8 variable[0];
- } __packed reassoc_req;
- struct {
- u16 reason_code;
- } __packed disassoc;
- struct {
- __le64 timestamp;
- u16 beacon_int;
- u16 capab_info;
- /* followed by some of SSID, Supported rates,
- * FH Params, DS Params, CF Params, IBSS Params, TIM */
- u8 variable[0];
- } __packed beacon;
- struct {
- /* only variable items: SSID, Supported rates */
- u8 variable[0];
- } __packed probe_req;
- struct {
- __le64 timestamp;
- u16 beacon_int;
- u16 capab_info;
- /* followed by some of SSID, Supported rates,
- * FH Params, DS Params, CF Params, IBSS Params */
- u8 variable[0];
- } __packed probe_resp;
- struct {
- u8 category;
- union {
- struct {
- u8 action_code;
- u8 dialog_token;
- u8 status_code;
- u8 variable[0];
- } __packed wme_action;
- struct {
- u8 action_code;
- u8 dialog_token;
- u16 capab;
- u16 timeout;
- u16 start_seq_num;
- } __packed addba_req;
- struct {
- u8 action_code;
- u8 dialog_token;
- u16 status;
- u16 capab;
- u16 timeout;
- } __packed addba_resp;
- struct {
- u8 action_code;
- u16 params;
- u16 reason_code;
- } __packed delba;
- structi {
- u8 action_code;
- /* capab_info for open and confirm,
- * reason for close
- */
- u16 aux;
- /* Followed in plink_confirm by status
- * code, AID and supported rates,
- * and directly by supported rates in
- * plink_open and plink_close
- */
- u8 variable[0];
- } __packed plink_action;
- struct{
- u8 action_code;
- u8 variable[0];
- } __packed mesh_action;
- } __packed u;
- } __packed action;
- } __packed u;
-} __packed;
-
-/* mgmt header + 1 byte category code */
-#define IEEE80211_MIN_ACTION_SIZE \
- FIELD_OFFSET(struct ieee80211_mgmt, u.action.u)
-
-#endif
diff --git a/drivers/staging/r8188eu/include/odm.h b/drivers/staging/r8188eu/include/odm.h
index f08655208b32..23a151c558dc 100644
--- a/drivers/staging/r8188eu/include/odm.h
+++ b/drivers/staging/r8188eu/include/odm.h
@@ -4,64 +4,30 @@
#ifndef __HALDMOUTSRC_H__
#define __HALDMOUTSRC_H__
-/* Add for AP/ADSLpseudo DM structuer requirement. */
-/* We need to remove to other position??? */
-struct rtl8192cd_priv {
- u8 temp;
-};
-
struct rtw_dig {
- u8 Dig_Enable_Flag;
- u8 Dig_Ext_Port_Stage;
-
- int RssiLowThresh;
- int RssiHighThresh;
-
- u32 FALowThresh;
- u32 FAHighThresh;
-
- u8 CurSTAConnectState;
- u8 PreSTAConnectState;
- u8 CurMultiSTAConnectState;
-
u8 PreIGValue;
u8 CurIGValue;
u8 BackupIGValue;
- s8 BackoffVal;
- s8 BackoffVal_range_max;
- s8 BackoffVal_range_min;
u8 rx_gain_range_max;
u8 rx_gain_range_min;
- u8 Rssi_val_min;
- u8 PreCCK_CCAThres;
u8 CurCCK_CCAThres;
- u8 PreCCKPDState;
- u8 CurCCKPDState;
u8 LargeFAHit;
u8 ForbiddenIGI;
u32 Recover_cnt;
u8 DIG_Dynamic_MIN_0;
- u8 DIG_Dynamic_MIN_1;
bool bMediaConnect_0;
- bool bMediaConnect_1;
u32 AntDiv_RSSI_max;
u32 RSSI_max;
};
struct rtl_ps {
- u8 pre_cca_state;
- u8 cur_cca_state;
-
u8 pre_rf_state;
u8 cur_rf_state;
-
- int rssi_val_min;
-
u8 initialize;
u32 reg_874;
u32 reg_c70;
@@ -87,59 +53,11 @@ struct false_alarm_stats {
u32 Cnt_BW_LSC; /* Gary */
};
-struct dyn_primary_cca {
- u8 pri_cca_flag;
- u8 intf_flag;
- u8 intf_type;
- u8 dup_rts_flag;
- u8 monitor_flag;
-};
-
-struct rx_hpc {
- u8 RXHP_flag;
- u8 PSD_func_trigger;
- u8 PSD_bitmap_RXHP[80];
- u8 Pre_IGI;
- u8 Cur_IGI;
- u8 Pre_pw_th;
- u8 Cur_pw_th;
- bool First_time_enter;
- bool RXHP_enable;
- u8 TP_Mode;
- struct timer_list PSDTimer;
-};
-
#define ODM_ASSOCIATE_ENTRY_NUM 32 /* Max size of AsocEntry[]. */
struct sw_ant_switch {
- u8 try_flag;
- s32 PreRSSI;
u8 CurAntenna;
- u8 PreAntenna;
- u8 RSSI_Trying;
- u8 TestMode;
- u8 bTriggerAntennaSwitch;
- u8 SelectAntennaMap;
- u8 RSSI_target;
-
- /* Before link Antenna Switch check */
- u8 SWAS_NoLink_State;
- u32 SWAS_NoLink_BK_Reg860;
-
- s32 RSSI_sum_A;
- s32 RSSI_sum_B;
- s32 RSSI_cnt_A;
- s32 RSSI_cnt_B;
- u64 lastTxOkCnt;
- u64 lastRxOkCnt;
- u64 TXByteCnt_A;
- u64 TXByteCnt_B;
- u64 RXByteCnt_A;
- u64 RXByteCnt_B;
- u8 TrafficLoad;
- struct timer_list SwAntennaSwitchTimer;
- u8 TxAnt[ODM_ASSOCIATE_ENTRY_NUM];
- u8 TargetSTA;
+ u8 SWAS_NoLink_State; /* Before link Antenna Switch check */
u8 RxIdleAnt;
};
@@ -182,10 +100,6 @@ struct odm_per_pkt_info {
bool bPacketBeacon;
};
-struct odm_mac_status_info {
- u8 test;
-};
-
enum odm_ability {
/* BB Team */
ODM_DIG = 0x00000001,
@@ -202,22 +116,6 @@ enum odm_ability {
ODM_PSD2AFH = 0x00000800
};
-/* 2011/20/20 MH For MP driver RT_WLAN_STA = struct sta_info */
-/* Please declare below ODM relative info in your STA info structure. */
-
-struct odm_sta_info {
- /* Driver Write */
- bool bUsed; /* record the sta status link or not? */
- u8 IOTPeer; /* Enum value. HT_IOT_PEER_E */
-
- /* ODM Write */
- /* 1 PHY_STATUS_INFO */
- u8 RSSI_Path[4]; /* */
- u8 RSSI_Ave;
- u8 RXEVM[4];
- u8 RXSNR[4];
-};
-
/* 2011/10/20 MH Define Common info enum for all team. */
enum odm_common_info_def {
@@ -230,22 +128,16 @@ enum odm_common_info_def {
/* Dynamic value: */
/* POINTER REFERENCE----------- */
- ODM_CMNINFO_TX_UNI,
- ODM_CMNINFO_RX_UNI,
ODM_CMNINFO_WM_MODE, /* ODM_WIRELESS_MODE_E */
ODM_CMNINFO_SEC_CHNL_OFFSET, /* ODM_SEC_CHNL_OFFSET_E */
- ODM_CMNINFO_SEC_MODE, /* ODM_SECURITY_E */
ODM_CMNINFO_BW, /* ODM_BW_E */
ODM_CMNINFO_CHNL,
ODM_CMNINFO_SCAN,
ODM_CMNINFO_POWER_SAVING,
- ODM_CMNINFO_NET_CLOSED,
/* POINTER REFERENCE----------- */
/* CALL BY VALUE------------- */
- ODM_CMNINFO_WIFI_DIRECT,
- ODM_CMNINFO_WIFI_DISPLAY,
ODM_CMNINFO_LINK,
ODM_CMNINFO_RSSI_MIN,
ODM_CMNINFO_RF_ANTENNA_TYPE, /* u8 */
@@ -256,27 +148,17 @@ enum odm_common_info_def {
enum odm_ability_def {
/* BB ODM section BIT 0-15 */
- ODM_BB_DIG = BIT(0),
- ODM_BB_RA_MASK = BIT(1),
- ODM_BB_DYNAMIC_TXPWR = BIT(2),
ODM_BB_FA_CNT = BIT(3),
ODM_BB_RSSI_MONITOR = BIT(4),
ODM_BB_CCK_PD = BIT(5),
ODM_BB_ANT_DIV = BIT(6),
- ODM_BB_PWR_SAVE = BIT(7),
ODM_BB_PWR_TRA = BIT(8),
- ODM_BB_RATE_ADAPTIVE = BIT(9),
- ODM_BB_PATH_DIV = BIT(10),
- ODM_BB_PSD = BIT(11),
- ODM_BB_RXHP = BIT(12),
/* MAC DM section BIT 16-23 */
ODM_MAC_EDCA_TURBO = BIT(16),
- ODM_MAC_EARLY_MODE = BIT(17),
/* RF ODM section BIT 24-31 */
ODM_RF_TX_PWR_TRACK = BIT(24),
- ODM_RF_RX_GAIN_TRACK = BIT(25),
ODM_RF_CALIBRATION = BIT(26),
};
@@ -424,15 +306,9 @@ struct odm_rf_cal {
/* ODM Dynamic common info value definition */
struct fast_ant_train {
- u8 Bssid[6];
u8 antsel_rx_keep_0;
u8 antsel_rx_keep_1;
u8 antsel_rx_keep_2;
- u32 antSumRSSI[7];
- u32 antRSSIcnt[7];
- u32 antAveRSSI[7];
- u8 FAT_State;
- u32 TrainIdx;
u8 antsel_a[ODM_ASSOCIATE_ENTRY_NUM];
u8 antsel_b[ODM_ASSOCIATE_ENTRY_NUM];
u8 antsel_c[ODM_ASSOCIATE_ENTRY_NUM];
@@ -444,11 +320,6 @@ struct fast_ant_train {
bool bBecomeLinked;
};
-enum fat_state {
- FAT_NORMAL_STATE = 0,
- FAT_TRAINING_STATE = 1,
-};
-
enum ant_div_type {
NO_ANTDIV = 0xFF,
CG_TRX_HW_ANTDIV = 0x01,
@@ -459,13 +330,7 @@ enum ant_div_type {
/* Copy from SD4 defined structure. We use to support PHY DM integration. */
struct odm_dm_struct {
- /* Add for different team use temporarily */
struct adapter *Adapter; /* For CE/NIC team */
- struct rtl8192cd_priv *priv; /* For AP/ADSL team */
- /* WHen you use above pointers, they must be initialized. */
- bool odm_ready;
-
- struct rtl8192cd_priv *fake_priv;
/* ODM HANDLE, DRIVER NEEDS NOT TO HOOK------ */
bool bCckHighPower;
@@ -485,55 +350,25 @@ struct odm_dm_struct {
/* Dynamic Value */
/* POINTER REFERENCE----------- */
-
- u8 u8_temp;
- bool bool_temp;
- struct adapter *adapter_temp;
-
- /* TX Unicast byte count */
- u64 *pNumTxBytesUnicast;
- /* RX Unicast byte count */
- u64 *pNumRxBytesUnicast;
/* Wireless mode B/G/A/N = BIT(0)/BIT(1)/BIT(2)/BIT(3) */
u8 *pWirelessMode; /* ODM_WIRELESS_MODE_E */
/* Secondary channel offset don't_care/below/above = 0/1/2 */
u8 *pSecChOffset;
- /* Security mode Open/WEP/AES/TKIP = 0/1/2/3 */
- u8 *pSecurity;
/* BW info 20M/40M/80M = 0/1/2 */
u8 *pBandWidth;
/* Central channel location Ch1/Ch2/.... */
u8 *pChannel; /* central channel number */
- /* Common info for 92D DMSP */
- bool *pbGetValueFromOtherMac;
- struct adapter **pBuddyAdapter;
- bool *pbMasterOfDMSP; /* MAC0: master, MAC1: slave */
/* Common info for Status */
bool *pbScanInProcess;
bool *pbPowerSaving;
- /* CCA Path 2-path/path-A/path-B = 0/1/2; using ODM_CCA_PATH_E. */
- u8 *pOnePathCCA;
- /* pMgntInfo->AntennaTest */
- u8 *pAntennaTest;
- bool *pbNet_closed;
/* POINTER REFERENCE----------- */
/* */
/* CALL BY VALUE------------- */
- bool bWIFI_Direct;
- bool bWIFI_Display;
bool bLinked;
u8 RSSI_Min;
- u8 InterfaceIndex; /* Add for 92D dual MAC: 0--Mac0 1--Mac1 */
bool bIsMPChip;
bool bOneEntryOnly;
- /* Common info for BTDM */
- bool bBtDisabled; /* BT is disabled */
- bool bBtHsOperation; /* BT HS mode is under progress */
- u8 btHsDigVal; /* use BT rssi to decide the DIG value */
- bool bBtDisableEdcaTurbo;/* Under some condition, don't enable the
- * EDCA Turbo */
- bool bBtBusy; /* BT is busy. */
/* CALL BY VALUE------------- */
/* 2 Define STA info. */
@@ -549,38 +384,16 @@ struct odm_dm_struct {
/* Latest packet phy info (ODM write) */
struct odm_phy_dbg_info PhyDbgInfo;
- /* Latest packet phy info (ODM write) */
- struct odm_mac_status_info *pMacInfo;
-
- /* Different Team independt structure?? */
-
/* ODM Structure */
struct fast_ant_train DM_FatTable;
struct rtw_dig DM_DigTable;
struct rtl_ps DM_PSTable;
- struct dyn_primary_cca DM_PriCCA;
- struct rx_hpc DM_RXHP_Table;
struct false_alarm_stats FalseAlmCnt;
- struct false_alarm_stats FlaseAlmCntBuddyAdapter;
struct sw_ant_switch DM_SWAT_Table;
- bool RSSI_test;
struct edca_turbo DM_EDCA_Table;
- u32 WMMEDCA_BE;
- /* Copy from SD4 structure */
- /* */
- /* ================================================== */
- /* */
-
- bool *pbDriverStopped;
- bool *pbDriverIsGoingToPnpSetPowerSleep;
- bool *pinit_adpt_in_progress;
/* PSD */
- bool bUserAssignLevel;
- struct timer_list PSDTimer;
- u8 RSSI_BT; /* come from BT */
- bool bPSDinProcess;
bool bDMInitialGainEnable;
struct odm_rate_adapt RateAdaptive;
@@ -596,14 +409,7 @@ struct odm_dm_struct {
u8 BbSwingIdxCckCurrent;
u8 BbSwingIdxCckBase;
bool BbSwingFlagCck;
- /* ODM system resource. */
-
- /* ODM relative time. */
- struct timer_list PathDivSwitchTimer;
- /* 2011.09.27 add for Path Diversity */
- struct timer_list CCKPathDiversityTimer;
- struct timer_list FastAntTrainingTimer;
-}; /* DM_Dynamic_Mechanism_Structure */
+};
enum odm_bb_config_type {
CONFIG_BB_PHY_REG,
@@ -612,12 +418,6 @@ enum odm_bb_config_type {
CONFIG_BB_PHY_REG_PG,
};
-#define DM_DIG_THRESH_HIGH 40
-#define DM_DIG_THRESH_LOW 35
-
-#define DM_false_ALARM_THRESH_LOW 400
-#define DM_false_ALARM_THRESH_HIGH 1000
-
#define DM_DIG_MAX_NIC 0x4e
#define DM_DIG_MIN_NIC 0x1e /* 0x22/0x1c */
@@ -629,10 +429,6 @@ enum odm_bb_config_type {
#define DM_DIG_FA_TH1 0x300/* 0x100 */
#define DM_DIG_FA_TH2 0x400/* 0x200 */
-#define DM_DIG_BACKOFF_MAX 12
-#define DM_DIG_BACKOFF_MIN -4
-#define DM_DIG_BACKOFF_DEFAULT 10
-
/* 3=========================================================== */
/* 3 Rate Adaptive */
/* 3=========================================================== */
@@ -645,12 +441,6 @@ enum odm_bb_config_type {
/* 3 BB Power Save */
/* 3=========================================================== */
-enum dm_1r_cca {
- CCA_1R = 0,
- CCA_2R = 1,
- CCA_MAX = 2,
-};
-
enum dm_rf {
RF_Save = 0,
RF_Normal = 1,
@@ -680,8 +470,6 @@ extern u8 CCKSwingTable_Ch14 [CCK_TABLE_SIZE][8];
void ODM_Write_DIG(struct odm_dm_struct *pDM_Odm, u8 CurrentIGI);
void ODM_Write_CCK_CCA_Thres(struct odm_dm_struct *pDM_Odm, u8 CurCCK_CCAThres);
-void ODM_SetAntenna(struct odm_dm_struct *pDM_Odm, u8 Antenna);
-
void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal);
void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm);
diff --git a/drivers/staging/r8188eu/include/odm_RTL8188E.h b/drivers/staging/r8188eu/include/odm_RTL8188E.h
index 96e50c9224aa..3c6471f1a893 100644
--- a/drivers/staging/r8188eu/include/odm_RTL8188E.h
+++ b/drivers/staging/r8188eu/include/odm_RTL8188E.h
@@ -25,10 +25,4 @@ void ODM_AntselStatistics_88E(struct odm_dm_struct *pDM_Odm, u8 antsel_tr_mux,
void odm_FastAntTraining(struct odm_dm_struct *pDM_Odm);
-void odm_FastAntTrainingCallback(struct odm_dm_struct *pDM_Odm);
-
-void odm_FastAntTrainingWorkItemCallback(struct odm_dm_struct *pDM_Odm);
-
-void odm_PrimaryCCA_Init(struct odm_dm_struct *pDM_Odm);
-
#endif
diff --git a/drivers/staging/r8188eu/include/odm_RegConfig8188E.h b/drivers/staging/r8188eu/include/odm_RegConfig8188E.h
index 634454bffdb6..683fa4a07956 100644
--- a/drivers/staging/r8188eu/include/odm_RegConfig8188E.h
+++ b/drivers/staging/r8188eu/include/odm_RegConfig8188E.h
@@ -4,9 +4,6 @@
#ifndef __INC_ODM_REGCONFIG_H_8188E
#define __INC_ODM_REGCONFIG_H_8188E
-void odm_ConfigRFReg_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data,
- enum rf_radio_path RF_PATH, u32 RegAddr);
-
void odm_ConfigRF_RadioA_8188E(struct odm_dm_struct *pDM_Odm,
u32 Addr, u32 Data);
diff --git a/drivers/staging/r8188eu/include/odm_interface.h b/drivers/staging/r8188eu/include/odm_interface.h
deleted file mode 100644
index 17a315d19a50..000000000000
--- a/drivers/staging/r8188eu/include/odm_interface.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __ODM_INTERFACE_H__
-#define __ODM_INTERFACE_H__
-
-enum odm_h2c_cmd {
- ODM_H2C_RSSI_REPORT = 0,
- ODM_H2C_PSD_RESULT= 1,
- ODM_H2C_PathDiv = 2,
- ODM_MAX_H2CCMD
-};
-
-/* 2012/02/17 MH For non-MP compile pass only. Linux does not support workitem. */
-/* Suggest HW team to use thread instead of workitem. Windows also support the feature. */
-typedef void (*RT_WORKITEM_CALL_BACK)(void *pContext);
-
-/* =========== Extern Variable ??? It should be forbidden. */
-
-/* =========== EXtern Function Prototype */
-
-u8 ODM_Read1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr);
-
-u32 ODM_Read4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr);
-
-void ODM_Write1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u8 Data);
-
-void ODM_Write2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u16 Data);
-
-void ODM_Write4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 Data);
-
-void ODM_SetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr,
- u32 BitMask, u32 Data);
-
-u32 ODM_GetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask);
-
-void ODM_SetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr,
- u32 BitMask, u32 Data);
-
-u32 ODM_GetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask);
-
-void ODM_SetRFReg(struct odm_dm_struct *pDM_Odm, enum rf_radio_path eRFPath,
- u32 RegAddr, u32 BitMask, u32 Data);
-
-u32 ODM_GetRFReg(struct odm_dm_struct *pDM_Odm, enum rf_radio_path eRFPath,
- u32 RegAddr, u32 BitMask);
-
-/* Memory Relative Function. */
-s32 ODM_CompareMemory(struct odm_dm_struct *pDM_Odm, void *pBuf1, void *pBuf2,
- u32 length);
-
-/* ODM Timer relative API. */
-void ODM_delay_ms(u32 ms);
-
-void ODM_delay_us(u32 us);
-
-void ODM_sleep_ms(u32 ms);
-
-#endif /* __ODM_INTERFACE_H__ */
diff --git a/drivers/staging/r8188eu/include/odm_precomp.h b/drivers/staging/r8188eu/include/odm_precomp.h
deleted file mode 100644
index 22299f167af8..000000000000
--- a/drivers/staging/r8188eu/include/odm_precomp.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. i*/
-
-#ifndef __ODM_PRECOMP_H__
-#define __ODM_PRECOMP_H__
-
-#include "odm_types.h"
-
-#define TEST_FALG___ 1
-
-/* 2 Config Flags and Structs - defined by each ODM Type */
-
-#include "osdep_service.h"
-#include "drv_types.h"
-#include "hal_intf.h"
-
-/* 2 OutSrc Header Files */
-
-#include "odm.h"
-#include "odm_HWConfig.h"
-#include "odm_RegDefine11N.h"
-
-#include "HalPhyRf_8188e.h"/* for IQK,LCK,Power-tracking */
-#include "Hal8188ERateAdaptive.h"/* for RA,Power training */
-#include "rtl8188e_hal.h"
-
-#include "odm_interface.h"
-
-#include "HalHWImg8188E_MAC.h"
-#include "HalHWImg8188E_RF.h"
-#include "HalHWImg8188E_BB.h"
-
-#include "odm_RegConfig8188E.h"
-#include "odm_RTL8188E.h"
-
-void odm_DIGInit(struct odm_dm_struct *pDM_Odm);
-void odm_RateAdaptiveMaskInit(struct odm_dm_struct *pDM_Odm);
-void odm_DynamicBBPowerSavingInit(struct odm_dm_struct *pDM_Odm);
-void odm_TXPowerTrackingInit(struct odm_dm_struct *pDM_Odm);
-void ODM_EdcaTurboInit(struct odm_dm_struct *pDM_Odm);
-void odm_SwAntDivInit_NIC(struct odm_dm_struct *pDM_Odm);
-void odm_CommonInfoSelfUpdate(struct odm_dm_struct *pDM_Odm);
-void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm);
-void odm_DIG(struct odm_dm_struct *pDM_Odm);
-void odm_CCKPacketDetectionThresh(struct odm_dm_struct *pDM_Odm);
-void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm);
-void odm_CommonInfoSelfInit(struct odm_dm_struct *pDM_Odm);
-void odm_RSSIMonitorCheck(struct odm_dm_struct *pDM_Odm);
-void odm_RefreshRateAdaptiveMask(struct odm_dm_struct *pDM_Odm);
-void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm);
-void odm_InitHybridAntDiv(struct odm_dm_struct *pDM_Odm);
-void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm);
-
-#endif /* __ODM_PRECOMP_H__ */
diff --git a/drivers/staging/r8188eu/include/osdep_service.h b/drivers/staging/r8188eu/include/osdep_service.h
index f6f5e4581212..6c8241372a06 100644
--- a/drivers/staging/r8188eu/include/osdep_service.h
+++ b/drivers/staging/r8188eu/include/osdep_service.h
@@ -74,38 +74,6 @@ static inline void _cancel_timer(struct timer_list *ptimer,u8 *bcancelled)
#define RTW_TIMER_HDL_NAME(name) rtw_##name##_timer_hdl
#define RTW_DECLARE_TIMER_HDL(name) void RTW_TIMER_HDL_NAME(name)(RTW_TIMER_HDL_ARGS)
-static inline void _init_workitem(struct work_struct *pwork, void *pfunc, void * cntx)
-{
- INIT_WORK(pwork, pfunc);
-}
-
-static inline void _set_workitem(struct work_struct *pwork)
-{
- schedule_work(pwork);
-}
-
-static inline void _cancel_workitem_sync(struct work_struct *pwork)
-{
- cancel_work_sync(pwork);
-}
-/* */
-/* Global Mutex: can only be used at PASSIVE level. */
-/* */
-
-#define ACQUIRE_GLOBAL_MUTEX(_MutexCounter) \
-{ \
- while (atomic_inc_return((atomic_t *)&(_MutexCounter)) != 1)\
- { \
- atomic_dec((atomic_t *)&(_MutexCounter)); \
- msleep(10); \
- } \
-}
-
-#define RELEASE_GLOBAL_MUTEX(_MutexCounter) \
-{ \
- atomic_dec((atomic_t *)&(_MutexCounter)); \
-}
-
static inline int rtw_netif_queue_stopped(struct net_device *pnetdev)
{
return netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 0)) &&
diff --git a/drivers/staging/r8188eu/include/rtl8188e_dm.h b/drivers/staging/r8188eu/include/rtl8188e_dm.h
index 208bea050f6f..0b3a9a1a4e5c 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_dm.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_dm.h
@@ -8,12 +8,7 @@ enum{
UP_LINK,
DOWN_LINK,
};
-/* duplicate code,will move to ODM ######### */
-#define IQK_MAC_REG_NUM 4
-#define IQK_ADDA_REG_NUM 16
-#define IQK_BB_REG_NUM 9
-#define HP_THERMAL_NUM 8
-/* duplicate code,will move to ODM ######### */
+
struct dm_priv {
u32 InitODMFlag;
diff --git a/drivers/staging/r8188eu/include/rtl8188e_hal.h b/drivers/staging/r8188eu/include/rtl8188e_hal.h
index d7db1dfc39d0..8134a173ea07 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_hal.h
@@ -13,11 +13,18 @@
#include "rtl8188e_recv.h"
#include "rtl8188e_xmit.h"
#include "rtl8188e_cmd.h"
-#include "Hal8188EPwrSeq.h"
-#include "rtl8188e_sreset.h"
#include "rtw_efuse.h"
-
-#include "odm_precomp.h"
+#include "odm_types.h"
+#include "odm.h"
+#include "odm_HWConfig.h"
+#include "odm_RegDefine11N.h"
+#include "HalPhyRf_8188e.h"
+#include "Hal8188ERateAdaptive.h"
+#include "HalHWImg8188E_MAC.h"
+#include "HalHWImg8188E_RF.h"
+#include "HalHWImg8188E_BB.h"
+#include "odm_RegConfig8188E.h"
+#include "odm_RTL8188E.h"
/* RTL8188E Power Configuration CMDs for USB/SDIO interfaces */
#define Rtl8188E_NIC_PWR_ON_FLOW rtl8188E_power_on_flow
@@ -168,42 +175,15 @@ struct hal_data_8188e {
u16 BasicRateSet;
- /* rf_ctrl */
- u8 rf_chip;
- u8 rf_type;
-
- u8 BoardType;
-
- /* EEPROM setting. */
- u16 EEPROMVID;
- u16 EEPROMPID;
- u16 EEPROMSVID;
- u16 EEPROMSDID;
- u8 EEPROMCustomerID;
- u8 EEPROMSubCustomerID;
- u8 EEPROMVersion;
u8 EEPROMRegulatory;
-
- u8 bTXPowerDataReadFromEEPORM;
u8 EEPROMThermalMeter;
- u8 bAPKThermalMeterIgnore;
-
- bool EepromOrEfuse;
- struct efuse_hal EfuseHal;
- u8 Index24G_CCK_Base[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
- u8 Index24G_BW40_Base[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
+ u8 Index24G_CCK_Base[CHANNEL_MAX_NUMBER];
+ u8 Index24G_BW40_Base[CHANNEL_MAX_NUMBER];
/* If only one tx, only BW20 and OFDM are used. */
- s8 CCK_24G_Diff[RF_PATH_MAX][MAX_TX_COUNT];
- s8 OFDM_24G_Diff[RF_PATH_MAX][MAX_TX_COUNT];
- s8 BW20_24G_Diff[RF_PATH_MAX][MAX_TX_COUNT];
- s8 BW40_24G_Diff[RF_PATH_MAX][MAX_TX_COUNT];
-
- u8 TxPwrLevelCck[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
- /* For HT 40MHZ pwr */
- u8 TxPwrLevelHT40_1S[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
- /* For HT 40MHZ pwr */
- u8 TxPwrLevelHT40_2S[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
+ s8 OFDM_24G_Diff[MAX_TX_COUNT];
+ s8 BW20_24G_Diff[MAX_TX_COUNT];
+
/* HT 20<->40 Pwr diff */
u8 TxPwrHt20Diff[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
/* For HT<->legacy pwr diff */
@@ -212,7 +192,6 @@ struct hal_data_8188e {
u8 PwrGroupHT20[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
u8 PwrGroupHT40[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
- u8 LegacyHTTxPowerDiff;/* Legacy to HT rate power diff */
/* The current Tx Power Level */
u8 CurrentCckTxPwrIdx;
u8 CurrentOfdm24GTxPwrIdx;
@@ -220,32 +199,18 @@ struct hal_data_8188e {
u8 CurrentBW4024GTxPwrIdx;
/* Read/write are allow for following hardware information variables */
- u8 framesync;
- u32 framesyncC34;
- u8 framesyncMonitor;
- u8 DefaultInitialGain[4];
u8 pwrGroupCnt;
u32 MCSTxPowerLevelOriginalOffset[MAX_PG_GROUP][16];
- u32 CCKTxPowerLevelOriginalOffset;
u8 CrystalCap;
- u32 AntennaTxPath; /* Antenna path Tx */
- u32 AntennaRxPath; /* Antenna path Rx */
u8 ExternalPA;
- u8 bLedOpenDrain; /* Open-drain support for controlling the LED.*/
-
- u8 b1x1RecvCombine; /* for 1T1R receive combining */
-
u32 AcParam_BE; /* Original parameter for BE, use for EDCA turbo. */
- struct bb_reg_def PHYRegDef[4]; /* Radio A/B/C/D */
+ struct bb_reg_def PHYRegDef[2]; /* Radio A/B */
u32 RfRegChnlVal[2];
- /* RDG enable */
- bool bRDGEnable;
-
/* for host message to fw */
u8 LastHMEBoxNum;
@@ -263,26 +228,10 @@ struct hal_data_8188e {
u8 bDumpRxPkt;/* for debug */
u8 bDumpTxPkt;/* for debug */
- u8 FwRsvdPageStartOffset; /* Reserve page start offset except
- * beacon in TxQ. */
-
- /* 2010/08/09 MH Add CU power down mode. */
- bool pwrdown;
-
- /* Add for dual MAC 0--Mac0 1--Mac1 */
- u32 interfaceIndex;
u8 OutEpQueueSel;
u8 OutEpNumber;
- /* Add for USB aggreation mode dynamic shceme. */
- bool UsbRxHighSpeedMode;
-
- /* 2010/11/22 MH Add for slim combo debug mode selective. */
- /* This is used for fix the drawback of CU TSMC-A/UMC-A cut.
- * HW auto suspend ability. Close BT clock. */
- bool SlimComboDbg;
-
u16 EfuseUsedBytes;
struct P2P_PS_Offload_t p2p_ps_offload;
@@ -293,12 +242,8 @@ struct hal_data_8188e {
u32 UsbBulkOutSize;
- /* Interrupt relatd register information. */
- u32 IntArray[3];/* HISR0,HISR1,HSISR */
- u8 C2hArray[16];
u8 UsbTxAggMode;
u8 UsbTxAggDescNum;
- u32 MaxUsbRxAggBlock;
enum usb_rx_agg_mode UsbRxAggMode;
u8 UsbRxAggBlockCount; /* USB Block count. Block size is
@@ -309,9 +254,6 @@ struct hal_data_8188e {
u8 UsbRxAggPageTimeout;
};
-#define GET_HAL_DATA(__pAdapter) \
- ((struct hal_data_8188e *)((__pAdapter)->HalData))
-
/* rtl8188e_hal_init.c */
s32 rtl8188e_FirmwareDownload(struct adapter *padapter);
void _8051Reset88E(struct adapter *padapter);
@@ -321,25 +263,18 @@ s32 InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy);
/* EFuse */
u8 GetEEPROMSize8188E(struct adapter *padapter);
-void Hal_InitPGData88E(struct adapter *padapter);
void Hal_EfuseParseIDCode88E(struct adapter *padapter, u8 *hwinfo);
void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *hwinfo,
bool AutoLoadFail);
-void Hal_EfuseParseEEPROMVer88E(struct adapter *padapter, u8 *hwinfo,
- bool AutoLoadFail);
void rtl8188e_EfuseParseChnlPlan(struct adapter *padapter, u8 *hwinfo,
bool AutoLoadFail);
-void Hal_EfuseParseCustomerID88E(struct adapter *padapter, u8 *hwinfo,
- bool AutoLoadFail);
void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter,u8 *PROMContent,
bool AutoLoadFail);
void Hal_ReadThermalMeter_88E(struct adapter * dapter, u8 *PROMContent,
bool AutoloadFail);
void Hal_EfuseParseXtal_8188E(struct adapter *pAdapter, u8 *hwinfo,
bool AutoLoadFail);
-void Hal_EfuseParseBoardType88E(struct adapter *pAdapter, u8 *hwinfo,
- bool AutoLoadFail);
void Hal_ReadPowerSavingMode88E(struct adapter *pAdapter, u8 *hwinfo,
bool AutoLoadFail);
@@ -347,6 +282,5 @@ void rtl8188e_read_chip_version(struct adapter *padapter);
s32 rtl8188e_iol_efuse_patch(struct adapter *padapter);
void rtw_cancel_all_timer(struct adapter *padapter);
-void _ps_open_RF(struct adapter *adapt);
#endif /* __RTL8188E_HAL_H__ */
diff --git a/drivers/staging/r8188eu/include/rtl8188e_led.h b/drivers/staging/r8188eu/include/rtl8188e_led.h
deleted file mode 100644
index 02cdc970bb17..000000000000
--- a/drivers/staging/r8188eu/include/rtl8188e_led.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTL8188E_LED_H__
-#define __RTL8188E_LED_H__
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-/* */
-/* Interface to manipulate LED objects. */
-/* */
-void rtl8188eu_InitSwLeds(struct adapter *padapter);
-void rtl8188eu_DeInitSwLeds(struct adapter *padapter);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/rtl8188e_recv.h b/drivers/staging/r8188eu/include/rtl8188e_recv.h
index 2ab395ef579b..0be9896eaf0f 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_recv.h
@@ -9,8 +9,6 @@
#define RECV_BLK_SZ 512
#define RECV_BLK_CNT 16
#define RECV_BLK_TH RECV_BLK_CNT
-#define RECV_BULK_IN_ADDR 0x80
-#define RECV_INT_IN_ADDR 0x81
#define NR_PREALLOC_RECV_SKB (8)
@@ -39,7 +37,6 @@ enum rx_packet_type {
HIS_REPORT,/* USB HISR RPT */
};
-#define INTERRUPT_MSG_FORMAT_LEN 60
void rtl8188eu_init_recvbuf(struct recv_buf *buf);
s32 rtl8188eu_init_recv_priv(struct adapter *padapter);
void rtl8188eu_free_recv_priv(struct adapter * padapter);
diff --git a/drivers/staging/r8188eu/include/rtl8188e_spec.h b/drivers/staging/r8188eu/include/rtl8188e_spec.h
index 01aeaa4ac605..009222b4a95d 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_spec.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_spec.h
@@ -475,13 +475,6 @@ Default: 00b.
#define MSR_INFRA 0x02
#define MSR_AP 0x03
-/* 88EU (MSR) Media Status Register (Offset 0x4C, 8 bits) */
-#define USB_INTR_CONTENT_C2H_OFFSET 0
-#define USB_INTR_CONTENT_CPWM1_OFFSET 16
-#define USB_INTR_CONTENT_CPWM2_OFFSET 20
-#define USB_INTR_CONTENT_HISR_OFFSET 48
-#define USB_INTR_CONTENT_HISRE_OFFSET 52
-
/* 88E Driver Initialization Offload REG_FDHM0(Offset 0x88, 8 bits) */
/* IOL config for REG_FDHM0(Reg0x88) */
#define CMD_INIT_LLT BIT(0)
@@ -1283,53 +1276,15 @@ Current IOREG MAP
#define EEPROM_RF_BOARD_OPTION_88E 0xC1
#define EEPROM_RF_FEATURE_OPTION_88E 0xC2
-#define EEPROM_RF_BT_SETTING_88E 0xC3
-#define EEPROM_VERSION_88E 0xC4
-#define EEPROM_CUSTOMERID_88E 0xC5
#define EEPROM_RF_ANTENNA_OPT_88E 0xC9
-/* RTL88EE */
-#define EEPROM_MAC_ADDR_88EE 0xD0
-#define EEPROM_VID_88EE 0xD6
-#define EEPROM_DID_88EE 0xD8
-#define EEPROM_SVID_88EE 0xDA
-#define EEPROM_SMID_88EE 0xDC
-
/* RTL88EU */
#define EEPROM_MAC_ADDR_88EU 0xD7
-#define EEPROM_VID_88EU 0xD0
-#define EEPROM_PID_88EU 0xD2
#define EEPROM_USB_OPTIONAL_FUNCTION0 0xD4
/* RTL88ES */
#define EEPROM_MAC_ADDR_88ES 0x11A
-/* EEPROM/Efuse Value Type */
-#define EETYPE_TX_PWR 0x0
-
-/* Default Value for EEPROM or EFUSE!!! */
-#define EEPROM_Default_TSSI 0x0
-#define EEPROM_Default_TxPowerDiff 0x0
-#define EEPROM_Default_CrystalCap 0x5
-/* Default: 2X2, RTL8192CE(QFPN68) */
-#define EEPROM_Default_BoardType 0x02
-#define EEPROM_Default_TxPower 0x1010
-#define EEPROM_Default_HT2T_TxPwr 0x10
-
-#define EEPROM_Default_LegacyHTTxPowerDiff 0x3
-#define EEPROM_Default_ThermalMeter 0x12
-
-#define EEPROM_Default_AntTxPowerDiff 0x0
-#define EEPROM_Default_TxPwDiff_CrystalCap 0x5
-#define EEPROM_Default_TxPowerLevel 0x2A
-
-#define EEPROM_Default_HT40_2SDiff 0x0
-/* HT20<->40 default Tx Power Index Difference */
-#define EEPROM_Default_HT20_Diff 2
-#define EEPROM_Default_LegacyHTTxPowerDiff 0x3
-#define EEPROM_Default_HT40_PwrMaxOffset 0
-#define EEPROM_Default_HT20_PwrMaxOffset 0
-
#define EEPROM_Default_CrystalCap_88E 0x20
#define EEPROM_Default_ThermalMeter_88E 0x18
@@ -1339,18 +1294,7 @@ Current IOREG MAP
#define EEPROM_DEFAULT_24G_OFDM_DIFF 0X04
#define EEPROM_DEFAULT_DIFF 0XFE
-#define EEPROM_DEFAULT_CHANNEL_PLAN 0x7F
#define EEPROM_DEFAULT_BOARD_OPTION 0x00
-#define EEPROM_DEFAULT_FEATURE_OPTION 0x00
-#define EEPROM_DEFAULT_BT_OPTION 0x10
-
-/* For debug */
-#define EEPROM_Default_PID 0x1234
-#define EEPROM_Default_VID 0x5678
-#define EEPROM_Default_CustomerID 0xAB
-#define EEPROM_Default_CustomerID_8188E 0x00
-#define EEPROM_Default_SubCustomerID 0xCD
-#define EEPROM_Default_Version 0
#define EEPROM_CHANNEL_PLAN_FCC 0x0
#define EEPROM_CHANNEL_PLAN_IC 0x1
@@ -1367,11 +1311,6 @@ Current IOREG MAP
#define EEPROM_USB_OPTIONAL1 0xE
#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
-#define EEPROM_CID_DEFAULT 0x0
-#define EEPROM_CID_TOSHIBA 0x4
-#define EEPROM_CID_CCX 0x10 /* CCX test. */
-#define EEPROM_CID_QMI 0x0D
-#define EEPROM_CID_WHQL 0xFE
#define RTL_EEPROM_ID 0x8129
#endif /* __RTL8188E_SPEC_H__ */
diff --git a/drivers/staging/r8188eu/include/rtl8188e_sreset.h b/drivers/staging/r8188eu/include/rtl8188e_sreset.h
deleted file mode 100644
index bb8b0048fbf9..000000000000
--- a/drivers/staging/r8188eu/include/rtl8188e_sreset.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef _RTL8188E_SRESET_H_
-#define _RTL8188E_SRESET_H_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-void rtl8188e_sreset_xmit_status_check(struct adapter *padapter);
-void rtl8188e_sreset_linked_status_check(struct adapter *padapter);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/rtw_cmd.h b/drivers/staging/r8188eu/include/rtw_cmd.h
index 47c3c80cc24a..cf0945ae11c1 100644
--- a/drivers/staging/r8188eu/include/rtw_cmd.h
+++ b/drivers/staging/r8188eu/include/rtw_cmd.h
@@ -42,7 +42,6 @@ struct cmd_priv {
u8 *cmd_allocated_buf;
u8 *rsp_buf; /* shall be non-paged, and 4 bytes aligned */
u8 *rsp_allocated_buf;
- u32 cmd_issued_cnt;
u32 cmd_done_cnt;
u32 rsp_cnt;
u8 cmdthd_running;
diff --git a/drivers/staging/r8188eu/include/rtw_debug.h b/drivers/staging/r8188eu/include/rtw_debug.h
index 0a77e3e73a45..311051757715 100644
--- a/drivers/staging/r8188eu/include/rtw_debug.h
+++ b/drivers/staging/r8188eu/include/rtw_debug.h
@@ -54,22 +54,10 @@
extern u32 GlobalDebugLevel;
-#define DBG_88E_LEVEL(_level, fmt, arg...) \
- do { \
- if (_level <= GlobalDebugLevel) \
- pr_info(DRIVER_PREFIX"INFO " fmt, ##arg); \
- } while (0)
-
#define DBG_88E(...) \
do { \
if (_drv_err_ <= GlobalDebugLevel) \
pr_info(DRIVER_PREFIX __VA_ARGS__); \
} while (0)
-#define MSG_88E(...) \
- do { \
- if (_drv_err_ <= GlobalDebugLevel) \
- pr_info(DRIVER_PREFIX __VA_ARGS__); \
- } while (0)
-
#endif /* __RTW_DEBUG_H__ */
diff --git a/drivers/staging/r8188eu/include/rtw_eeprom.h b/drivers/staging/r8188eu/include/rtw_eeprom.h
index e517239bd75e..3e8d3bb48903 100644
--- a/drivers/staging/r8188eu/include/rtw_eeprom.h
+++ b/drivers/staging/r8188eu/include/rtw_eeprom.h
@@ -7,38 +7,7 @@
#include "osdep_service.h"
#include "drv_types.h"
-#define RTL8712_EEPROM_ID 0x8712
-
#define HWSET_MAX_SIZE_512 512
-#define EEPROM_MAX_SIZE HWSET_MAX_SIZE_512
-
-#define CLOCK_RATE 50 /* 100us */
-
-/* EEPROM opcodes */
-#define EEPROM_READ_OPCODE 06
-#define EEPROM_WRITE_OPCODE 05
-#define EEPROM_ERASE_OPCODE 07
-#define EEPROM_EWEN_OPCODE 19 /* Erase/write enable */
-#define EEPROM_EWDS_OPCODE 16 /* Erase/write disable */
-
-/* Country codes */
-#define USA 0x555320
-#define EUROPE 0x1 /* temp, should be provided later */
-#define JAPAN 0x2 /* temp, should be provided later */
-
-#define EEPROM_CID_DEFAULT 0x0
-#define EEPROM_CID_ALPHA 0x1
-#define EEPROM_CID_Senao 0x3
-#define EEPROM_CID_NetCore 0x5
-#define EEPROM_CID_CAMEO 0X8
-#define EEPROM_CID_SITECOM 0x9
-#define EEPROM_CID_COREGA 0xB
-#define EEPROM_CID_EDIMAX_BELK 0xC
-#define EEPROM_CID_SERCOMM_BELK 0xE
-#define EEPROM_CID_CAMEO1 0xF
-#define EEPROM_CID_WNC_COREGA 0x12
-#define EEPROM_CID_CLEVO 0x13
-#define EEPROM_CID_WHQL 0xFE
struct eeprom_priv {
u8 bautoload_fail_flag;
diff --git a/drivers/staging/r8188eu/include/rtw_efuse.h b/drivers/staging/r8188eu/include/rtw_efuse.h
index 2e19b7be1075..2daf69f554d5 100644
--- a/drivers/staging/r8188eu/include/rtw_efuse.h
+++ b/drivers/staging/r8188eu/include/rtw_efuse.h
@@ -4,110 +4,10 @@
#ifndef __RTW_EFUSE_H__
#define __RTW_EFUSE_H__
-#include "osdep_service.h"
-
-#define EFUSE_ERROE_HANDLE 1
-
-#define PG_STATE_HEADER 0x01
-#define PG_STATE_WORD_0 0x02
-#define PG_STATE_WORD_1 0x04
-#define PG_STATE_WORD_2 0x08
-#define PG_STATE_WORD_3 0x10
-#define PG_STATE_DATA 0x20
-
-#define PG_SWBYTE_H 0x01
-#define PG_SWBYTE_L 0x02
-
-#define PGPKT_DATA_SIZE 8
-
-#define EFUSE_WIFI 0
-#define EFUSE_BT 1
-
-enum _EFUSE_DEF_TYPE {
- TYPE_EFUSE_MAX_SECTION = 0,
- TYPE_EFUSE_REAL_CONTENT_LEN = 1,
- TYPE_AVAILABLE_EFUSE_BYTES_BANK = 2,
- TYPE_AVAILABLE_EFUSE_BYTES_TOTAL = 3,
- TYPE_EFUSE_MAP_LEN = 4,
- TYPE_EFUSE_PROTECT_BYTES_BANK = 5,
- TYPE_EFUSE_CONTENT_LEN_BANK = 6,
-};
-
-/* E-Fuse */
-#define EFUSE_MAP_SIZE 512
-#define EFUSE_MAX_SIZE 256
-/* end of E-Fuse */
-
-#define EFUSE_MAX_MAP_LEN 512
-#define EFUSE_MAX_HW_SIZE 512
-#define EFUSE_MAX_SECTION_BASE 16
-
-#define EXT_HEADER(header) ((header & 0x1F) == 0x0F)
-#define ALL_WORDS_DISABLED(wde) ((wde & 0x0F) == 0x0F)
-#define GET_HDR_OFFSET_2_0(header) ((header & 0xE0) >> 5)
-
-#define EFUSE_REPEAT_THRESHOLD_ 3
-
-/* The following is for BT Efuse definition */
-#define EFUSE_BT_MAX_MAP_LEN 1024
-#define EFUSE_MAX_BANK 4
-#define EFUSE_MAX_BT_BANK (EFUSE_MAX_BANK-1)
-/*--------------------------Define Parameters-------------------------------*/
#define EFUSE_MAX_WORD_UNIT 4
-/*------------------------------Define structure----------------------------*/
-struct pgpkt {
- u8 offset;
- u8 word_en;
- u8 data[8];
- u8 word_cnts;
-};
-
-/*------------------------------Define structure----------------------------*/
-struct efuse_hal {
- u8 fakeEfuseBank;
- u32 fakeEfuseUsedBytes;
- u8 fakeEfuseContent[EFUSE_MAX_HW_SIZE];
- u8 fakeEfuseInitMap[EFUSE_MAX_MAP_LEN];
- u8 fakeEfuseModifiedMap[EFUSE_MAX_MAP_LEN];
-
- u16 BTEfuseUsedBytes;
- u8 BTEfuseUsedPercentage;
- u8 BTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
- u8 BTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN];
- u8 BTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN];
-
- u16 fakeBTEfuseUsedBytes;
- u8 fakeBTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
- u8 fakeBTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN];
- u8 fakeBTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN];
-};
-
-/*------------------------Export global variable----------------------------*/
-extern u8 fakeEfuseBank;
-extern u32 fakeEfuseUsedBytes;
-extern u8 fakeEfuseContent[];
-extern u8 fakeEfuseInitMap[];
-extern u8 fakeEfuseModifiedMap[];
-
-extern u32 BTEfuseUsedBytes;
-extern u8 BTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
-extern u8 BTEfuseInitMap[];
-extern u8 BTEfuseModifiedMap[];
-
-extern u32 fakeBTEfuseUsedBytes;
-extern u8 fakeBTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
-extern u8 fakeBTEfuseInitMap[];
-extern u8 fakeBTEfuseModifiedMap[];
-/*------------------------Export global variable----------------------------*/
-
-u8 Efuse_CalculateWordCnts(u8 word_en);
-void ReadEFuseByte(struct adapter *adapter, u16 _offset, u8 *pbuf, bool test);
-u8 efuse_OneByteRead(struct adapter *adapter, u16 addr, u8 *data, bool test);
-u8 efuse_OneByteWrite(struct adapter *adapter, u16 addr, u8 data, bool test);
-
-void efuse_WordEnableDataRead(u8 word_en, u8 *sourdata, u8 *targetdata);
+void ReadEFuseByte(struct adapter *adapter, u16 _offset, u8 *pbuf);
-void EFUSE_ShadowMapUpdate(struct adapter *adapter, u8 efusetype, bool test);
+void EFUSE_ShadowMapUpdate(struct adapter *adapter);
#endif
diff --git a/drivers/staging/r8188eu/include/rtw_io.h b/drivers/staging/r8188eu/include/rtw_io.h
index c6a078210eeb..6910e2b430e2 100644
--- a/drivers/staging/r8188eu/include/rtw_io.h
+++ b/drivers/staging/r8188eu/include/rtw_io.h
@@ -224,7 +224,7 @@ u8 rtw_read8(struct adapter *adapter, u32 addr);
u16 rtw_read16(struct adapter *adapter, u32 addr);
u32 rtw_read32(struct adapter *adapter, u32 addr);
void _rtw_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-u32 rtw_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+u32 rtw_read_port(struct adapter *adapter, u8 *pmem);
void rtw_read_port_cancel(struct adapter *adapter);
int rtw_write8(struct adapter *adapter, u32 addr, u8 val);
diff --git a/drivers/staging/r8188eu/include/rtw_led.h b/drivers/staging/r8188eu/include/rtw_led.h
index c035fe267635..2c14cb23d9ad 100644
--- a/drivers/staging/r8188eu/include/rtw_led.h
+++ b/drivers/staging/r8188eu/include/rtw_led.h
@@ -7,41 +7,7 @@
#include "osdep_service.h"
#include "drv_types.h"
-#define MSECS(t) (HZ * ((t) / 1000) + (HZ * ((t) % 1000)) / 1000)
-
-#define LED_BLINK_NORMAL_INTERVAL 100
-#define LED_BLINK_SLOWLY_INTERVAL 200
-#define LED_BLINK_LONG_INTERVAL 400
-
-#define LED_BLINK_NO_LINK_INTERVAL_ALPHA 1000
-#define LED_BLINK_LINK_INTERVAL_ALPHA 500 /* 500 */
-#define LED_BLINK_SCAN_INTERVAL_ALPHA 180 /* 150 */
-#define LED_BLINK_FASTER_INTERVAL_ALPHA 50
-#define LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA 5000
-
-#define LED_BLINK_NORMAL_INTERVAL_NETTRONIX 100
-#define LED_BLINK_SLOWLY_INTERVAL_NETTRONIX 2000
-
-#define LED_BLINK_SLOWLY_INTERVAL_PORNET 1000
-#define LED_BLINK_NORMAL_INTERVAL_PORNET 100
-
-#define LED_BLINK_FAST_INTERVAL_BITLAND 30
-
-/* 060403, rcnjko: Customized for AzWave. */
-#define LED_CM2_BLINK_ON_INTERVAL 250
-#define LED_CM2_BLINK_OFF_INTERVAL 4750
-
-#define LED_CM8_BLINK_INTERVAL 500 /* for QMI */
-#define LED_CM8_BLINK_OFF_INTERVAL 3750 /* for QMI */
-
-/* 080124, lanhsin: Customized for RunTop */
-#define LED_RunTop_BLINK_INTERVAL 300
-
-/* 060421, rcnjko: Customized for Sercomm Printer Server case. */
-#define LED_CM3_BLINK_INTERVAL 1500
-
enum LED_CTL_MODE {
- LED_CTL_POWER_ON = 1,
LED_CTL_LINK = 2,
LED_CTL_NO_LINK = 3,
LED_CTL_TX = 4,
@@ -51,10 +17,7 @@ enum LED_CTL_MODE {
LED_CTL_START_TO_LINK = 8,
LED_CTL_START_WPS = 9,
LED_CTL_STOP_WPS = 10,
- LED_CTL_START_WPS_BOTTON = 11, /* added for runtop */
- LED_CTL_STOP_WPS_FAIL = 12, /* added for ALPHA */
- LED_CTL_STOP_WPS_FAIL_OVERLAP = 13, /* added for BELKIN */
- LED_CTL_CONNECTION_NO_TRANSFER = 14,
+ LED_CTL_STOP_WPS_FAIL = 12,
};
enum LED_STATE_871x {
@@ -63,99 +26,46 @@ enum LED_STATE_871x {
RTW_LED_OFF = 2,
LED_BLINK_NORMAL = 3,
LED_BLINK_SLOWLY = 4,
- LED_BLINK_POWER_ON = 5,
LED_BLINK_SCAN = 6, /* LED is blinking during scanning period,
* the # of times to blink is depend on time
* for scanning. */
- LED_BLINK_NO_LINK = 7, /* LED is blinking during no link state. */
LED_BLINK_StartToBlink = 8,/* Customzied for Sercomm Printer
* Server case */
LED_BLINK_TXRX = 9,
LED_BLINK_WPS = 10, /* LED is blinkg during WPS communication */
- LED_BLINK_WPS_STOP = 11, /* for ALPHA */
- LED_BLINK_WPS_STOP_OVERLAP = 12, /* for BELKIN */
+ LED_BLINK_WPS_STOP = 11,
LED_BLINK_RUNTOP = 13, /* Customized for RunTop */
- LED_BLINK_CAMEO = 14,
- LED_BLINK_XAVI = 15,
- LED_BLINK_ALWAYS_ON = 16,
-};
-
-enum LED_PIN_871x {
- LED_PIN_NULL = 0,
- LED_PIN_LED0 = 1,
- LED_PIN_LED1 = 2,
- LED_PIN_LED2 = 3,
- LED_PIN_GPIO0 = 4,
};
struct LED_871x {
struct adapter *padapter;
- enum LED_PIN_871x LedPin; /* Identify how to implement this
- * SW led. */
enum LED_STATE_871x CurrLedState; /* Current LED state. */
enum LED_STATE_871x BlinkingLedState; /* Next state for blinking,
* either RTW_LED_ON or RTW_LED_OFF are. */
- u8 bLedOn; /* true if LED is ON, false if LED is OFF. */
+ bool bLedOn; /* true if LED is ON, false if LED is OFF. */
- u8 bLedBlinkInProgress; /* true if it is blinking, false o.w.. */
+ bool bLedBlinkInProgress; /* true if it is blinking, false o.w.. */
- u8 bLedWPSBlinkInProgress;
+ bool bLedWPSBlinkInProgress;
u32 BlinkTimes; /* Number of times to toggle led state for blinking. */
- struct timer_list BlinkTimer; /* Timer object for led blinking. */
-
- /* ALPHA, added by chiyoko, 20090106 */
- u8 bLedNoLinkBlinkInProgress;
- u8 bLedLinkBlinkInProgress;
- u8 bLedStartToLinkBlinkInProgress;
- u8 bLedScanBlinkInProgress;
- struct work_struct BlinkWorkItem; /* Workitem used by BlinkTimer to
- * manipulate H/W to blink LED. */
+ bool bLedNoLinkBlinkInProgress;
+ bool bLedLinkBlinkInProgress;
+ bool bLedScanBlinkInProgress;
+ struct delayed_work blink_work;
};
-#define IS_LED_WPS_BLINKING(_LED_871x) \
- (((struct LED_871x *)_LED_871x)->CurrLedState == LED_BLINK_WPS || \
- ((struct LED_871x *)_LED_871x)->CurrLedState == LED_BLINK_WPS_STOP || \
- ((struct LED_871x *)_LED_871x)->bLedWPSBlinkInProgress)
-
-#define IS_LED_BLINKING(_LED_871x) \
- (((struct LED_871x *)_LED_871x)->bLedWPSBlinkInProgress || \
- ((struct LED_871x *)_LED_871x)->bLedScanBlinkInProgress)
-
-void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction);
-
struct led_priv{
- /* add for led control */
struct LED_871x SwLed0;
- struct LED_871x SwLed1;
- u8 bRegUseLed;
- void (*LedControlHandler)(struct adapter *padapter,
- enum LED_CTL_MODE LedAction);
- /* add for led control */
+ bool bRegUseLed;
};
-#define rtw_led_control(adapt, action) \
- do { \
- if ((adapt)->ledpriv.LedControlHandler) \
- (adapt)->ledpriv.LedControlHandler((adapt), (action)); \
- } while (0)
-
-void BlinkTimerCallback(struct timer_list *t);
-void BlinkWorkItemCallback(struct work_struct *work);
-
-void ResetLedStatus(struct LED_871x * pLed);
-
-void InitLed871x(struct adapter *padapter, struct LED_871x *pLed,
- enum LED_PIN_871x LedPin);
-
-void DeInitLed871x(struct LED_871x *pLed);
+void rtl8188eu_InitSwLeds(struct adapter *padapter);
+void rtl8188eu_DeInitSwLeds(struct adapter *padapter);
-/* hal... */
-void BlinkHandler(struct LED_871x * pLed);
-void SwLedOn(struct adapter *padapter, struct LED_871x *pLed);
-void SwLedOff(struct adapter *padapter, struct LED_871x *pLed);
+void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction);
#endif /* __RTW_LED_H_ */
diff --git a/drivers/staging/r8188eu/include/rtw_mlme.h b/drivers/staging/r8188eu/include/rtw_mlme.h
index e8d51f495702..77169c15080a 100644
--- a/drivers/staging/r8188eu/include/rtw_mlme.h
+++ b/drivers/staging/r8188eu/include/rtw_mlme.h
@@ -243,18 +243,6 @@ struct wifidirect_info {
* by using the sta_preset CAPI. */
/* 0: disable */
/* 1: enable */
- u8 wfd_tdls_enable; /* Flag to enable or disable the TDLS by WFD Sigma*/
- /* 0: disable */
- /* 1: enable */
- u8 wfd_tdls_weaksec; /* Flag to enable or disable the weak security
- * function for TDLS by WFD Sigma */
- /* 0: disable */
- /* In this case, the driver can't issue the tdsl
- * setup request frame. */
- /* 1: enable */
- /* In this case, the driver can issue the tdls
- * setup request frame */
- /* even the current security is weak security. */
/* This field will store the WPS value (PIN value or PBC) that UI had
* got from the user. */
diff --git a/drivers/staging/r8188eu/include/rtw_mlme_ext.h b/drivers/staging/r8188eu/include/rtw_mlme_ext.h
index 5b307ad3afa5..26f31f20e428 100644
--- a/drivers/staging/r8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/r8188eu/include/rtw_mlme_ext.h
@@ -162,11 +162,6 @@ struct rt_channel_plan {
unsigned char Len;
};
-struct rt_channel_plan_2g {
- unsigned char Channel[MAX_CHANNEL_NUM_2G];
- unsigned char Len;
-};
-
struct rt_channel_plan_map {
unsigned char Index2G;
};
diff --git a/drivers/staging/r8188eu/include/rtw_pwrctrl.h b/drivers/staging/r8188eu/include/rtw_pwrctrl.h
index b19ef796ab54..2d5298373d74 100644
--- a/drivers/staging/r8188eu/include/rtw_pwrctrl.h
+++ b/drivers/staging/r8188eu/include/rtw_pwrctrl.h
@@ -19,10 +19,6 @@ enum power_mgnt {
PS_MODE_DTIM,
PS_MODE_VOIP,
PS_MODE_UAPSD_WMM,
- PS_MODE_UAPSD,
- PS_MODE_IBSS,
- PS_MODE_WWLAN,
- PM_Radio_Off,
PM_Card_Disable,
PS_MODE_NUM
};
@@ -57,7 +53,6 @@ struct pwrctrl_priv {
u8 reg_rfoff;
u8 reg_pdnmode; /* powerdown mode */
- u32 rfoff_reason;
/* RF OFF Level */
u32 cur_ps_level;
@@ -80,7 +75,6 @@ struct pwrctrl_priv {
s32 pnp_current_pwr_state;
u8 pnp_bstop_trx;
- u8 bInternalAutoSuspend;
u8 bInSuspend;
u8 bSupportRemoteWakeup;
struct timer_list pwr_state_check_timer;
@@ -93,7 +87,6 @@ struct pwrctrl_priv {
enum rt_rf_power_state change_rfpwrstate;
u8 wepkeymask;
- u8 bHWPowerdown;/* if support hw power down */
u8 bkeepfwalive;
};
diff --git a/drivers/staging/r8188eu/include/rtw_recv.h b/drivers/staging/r8188eu/include/rtw_recv.h
index 1e28ec731547..b43a46887343 100644
--- a/drivers/staging/r8188eu/include/rtw_recv.h
+++ b/drivers/staging/r8188eu/include/rtw_recv.h
@@ -177,7 +177,6 @@ struct recv_priv {
uint rx_smallpacket_crcerr;
uint rx_middlepacket_crcerr;
struct semaphore allrxreturnevt;
- uint ff_hwaddr;
u8 rx_pending_cnt;
struct tasklet_struct irq_prepare_beacon_tasklet;
diff --git a/drivers/staging/r8188eu/include/rtw_rf.h b/drivers/staging/r8188eu/include/rtw_rf.h
index 7ec252fec054..b7267e75346c 100644
--- a/drivers/staging/r8188eu/include/rtw_rf.h
+++ b/drivers/staging/r8188eu/include/rtw_rf.h
@@ -6,33 +6,16 @@
#include "rtw_cmd.h"
-#define OFDM_PHY 1
-#define MIXED_PHY 2
-#define CCK_PHY 3
-
#define NumRates (13)
/* slot time for 11g */
#define SHORT_SLOT_TIME 9
#define NON_SHORT_SLOT_TIME 20
-#define RTL8711_RF_MAX_SENS 6
-#define RTL8711_RF_DEF_SENS 4
-
-/* We now define the following channels as the max channels in each
- * channel plan. */
-/* 2G, total 14 chnls */
-/* {1,2,3,4,5,6,7,8,9,10,11,12,13,14} */
-#define MAX_CHANNEL_NUM_2G 14
#define MAX_CHANNEL_NUM 14 /* 2.4 GHz only */
#define NUM_REGULATORYS 1
-/* Country codes */
-#define USA 0x555320
-#define EUROPE 0x1 /* temp, should be provided later */
-#define JAPAN 0x2 /* temp, should be provided later */
-
struct regulatory_class {
u32 starting_freq; /* MHz, */
u8 channel_set[MAX_CHANNEL_NUM];
@@ -69,13 +52,6 @@ enum _REG_PREAMBLE_MODE {
PREAMBLE_SHORT = 3,
};
-enum rf90_radio_path {
- RF90_PATH_A = 0, /* Radio Path A */
- RF90_PATH_B = 1, /* Radio Path B */
- RF90_PATH_C = 2, /* Radio Path C */
- RF90_PATH_D = 3 /* Radio Path D */
-};
-
/* Bandwidth Offset */
#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
#define HAL_PRIME_CHNL_OFFSET_LOWER 1
@@ -99,16 +75,6 @@ enum ht_extchnl_offset {
HT_EXTCHNL_OFFSET_LOWER = 3,
};
-/* 2007/11/15 MH Define different RF type. */
-enum rt_rf_type_def {
- RF_1T2R = 0,
- RF_2T4R = 1,
- RF_2T2R = 2,
- RF_1T1R = 3,
- RF_2T2R_GREEN = 4,
- RF_819X_MAX_TYPE = 5,
-};
-
u32 rtw_ch2freq(u32 ch);
#endif /* _RTL8711_RF_H_ */
diff --git a/drivers/staging/r8188eu/include/usb_osintf.h b/drivers/staging/r8188eu/include/usb_osintf.h
index 624298b4bd0b..3e777ca52745 100644
--- a/drivers/staging/r8188eu/include/usb_osintf.h
+++ b/drivers/staging/r8188eu/include/usb_osintf.h
@@ -16,7 +16,6 @@ extern int rtw_mc2u_disable;
u8 usbvendorrequest(struct dvobj_priv *pdvobjpriv, enum bt_usb_request brequest,
enum rt_usb_wvalue wvalue, u8 windex, void *data,
u8 datalen, u8 isdirectionin);
-int pm_netdev_open(struct net_device *pnetdev, u8 bnormal);
void netdev_br_init(struct net_device *netdev);
void dhcp_flag_bcast(struct adapter *priv, struct sk_buff *skb);
void *scdb_findEntry(struct adapter *priv, unsigned char *ipAddr);
diff --git a/drivers/staging/r8188eu/include/wifi.h b/drivers/staging/r8188eu/include/wifi.h
index 193a557f0f47..0b0bd39a257f 100644
--- a/drivers/staging/r8188eu/include/wifi.h
+++ b/drivers/staging/r8188eu/include/wifi.h
@@ -13,32 +13,9 @@
#define BIT(x) (1 << (x))
#define WLAN_ETHHDR_LEN 14
-#define WLAN_ETHADDR_LEN 6
-#define WLAN_IEEE_OUI_LEN 3
-#define WLAN_ADDR_LEN 6
-#define WLAN_CRC_LEN 4
-#define WLAN_BSSID_LEN 6
-#define WLAN_BSS_TS_LEN 8
#define WLAN_HDR_A3_LEN 24
-#define WLAN_HDR_A4_LEN 30
#define WLAN_HDR_A3_QOS_LEN 26
-#define WLAN_HDR_A4_QOS_LEN 32
#define WLAN_SSID_MAXLEN 32
-#define WLAN_DATA_MAXLEN 2312
-
-#define WLAN_A3_PN_OFFSET 24
-#define WLAN_A4_PN_OFFSET 30
-
-#define WLAN_MIN_ETHFRM_LEN 60
-#define WLAN_MAX_ETHFRM_LEN 1514
-#define WLAN_ETHHDR_LEN 14
-
-#define P80211CAPTURE_VERSION 0x80211001
-
-/* This value is tested by WiFi 11n Test Plan 5.2.3. */
-/* This test verifies the WLAN NIC can update the NAV through sending
- * the CTS with large duration. */
-#define WiFiNavUpperUs 30000 /* 30 ms */
enum WIFI_FRAME_TYPE {
WIFI_MGT_TYPE = (0),
@@ -487,13 +464,6 @@ static inline int IsFrameTypeCtrl(unsigned char *pframe)
#define _STATUS_CODE_ 2
#define _TIMESTAMP_ 8
-#define AUTH_ODD_TO 0
-#define AUTH_EVEN_TO 1
-
-#define WLAN_ETHCONV_ENCAP 1
-#define WLAN_ETHCONV_RFC1042 2
-#define WLAN_ETHCONV_8021h 3
-
#define cap_ESS BIT(0)
#define cap_IBSS BIT(1)
#define cap_CFPollable BIT(2)
@@ -632,13 +602,6 @@ enum ht_cap_ampdu_factor {
#define HT_INFO_OPERATION_MODE_TRANSMIT_BURST_LIMIT ((u8) BIT(3))
#define HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT ((u8) BIT(4))
-#define HT_INFO_STBC_PARAM_DUAL_BEACON ((u16) BIT(6))
-#define HT_INFO_STBC_PARAM_DUAL_STBC_PROTECT ((u16) BIT(7))
-#define HT_INFO_STBC_PARAM_SECONDARY_BC ((u16) BIT(8))
-#define HT_INFO_STBC_PARAM_LSIG_TXOP_PROTECT_ALLOWED ((u16) BIT(9))
-#define HT_INFO_STBC_PARAM_PCO_ACTIVE ((u16) BIT(10))
-#define HT_INFO_STBC_PARAM_PCO_PHASE ((u16) BIT(11))
-
/* ===============WPS Section=============== */
/* For WPSv1.0 */
#define WPSOUI 0x0050f204
@@ -930,7 +893,7 @@ enum P2P_PROTO_WK_ID {
P2P_PRE_TX_PROVDISC_PROCESS_WK = 2,
P2P_PRE_TX_NEGOREQ_PROCESS_WK = 3,
P2P_PRE_TX_INVITEREQ_PROCESS_WK = 4,
- P2P_AP_P2P_CH_SWITCH_PROCESS_WK =5,
+ P2P_AP_P2P_CH_SWITCH_PROCESS_WK = 5,
P2P_RO_CH_WK = 6,
};
@@ -949,26 +912,6 @@ enum P2P_PS_MODE {
P2P_PS_MIX = 3, /* CTWindow and NoA */
};
-/* =====================WFD Section===================== */
-/* For Wi-Fi Display */
-#define WFD_ATTR_DEVICE_INFO 0x00
-#define WFD_ATTR_ASSOC_BSSID 0x01
-#define WFD_ATTR_COUPLED_SINK_INFO 0x06
-#define WFD_ATTR_LOCAL_IP_ADDR 0x08
-#define WFD_ATTR_SESSION_INFO 0x09
-#define WFD_ATTR_ALTER_MAC 0x0a
-
-/* For WFD Device Information Attribute */
-#define WFD_DEVINFO_SOURCE 0x0000
-#define WFD_DEVINFO_PSINK 0x0001
-#define WFD_DEVINFO_SSINK 0x0002
-#define WFD_DEVINFO_DUAL 0x0003
-
-#define WFD_DEVINFO_SESSION_AVAIL 0x0010
-#define WFD_DEVINFO_WSD 0x0040
-#define WFD_DEVINFO_PC_TDLS 0x0080
-#define WFD_DEVINFO_HDCP_SUPPORT 0x0100
-
#define IP_MCAST_MAC(mac) \
((mac[0] == 0x01) && (mac[1] == 0x00) && (mac[2] == 0x5e))
#define ICMPV6_MCAST_MAC(mac) \
diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
index 9404355726d0..41b457838a5b 100644
--- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
@@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-#define _IOCTL_LINUX_C_
-
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
#include "../include/wlan_bssdef.h"
@@ -14,7 +12,7 @@
#include "../include/rtw_ioctl_set.h"
#include "../include/usb_ops.h"
#include "../include/rtl8188e_hal.h"
-#include "../include/rtl8188e_led.h"
+#include "../include/rtw_led.h"
#include "../include/rtw_iol.h"
@@ -60,7 +58,7 @@ void rtw_indicate_wx_assoc_event(struct adapter *padapter)
memcpy(wrqu.ap_addr.sa_data, pmlmepriv->cur_network.network.MacAddress, ETH_ALEN);
- DBG_88E_LEVEL(_drv_always_, "assoc success\n");
+ netdev_dbg(padapter->pnetdev, "assoc success\n");
wireless_send_event(padapter->pnetdev, SIOCGIWAP, &wrqu, NULL);
}
@@ -73,7 +71,7 @@ void rtw_indicate_wx_disassoc_event(struct adapter *padapter)
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
- DBG_88E_LEVEL(_drv_always_, "indicate disassoc\n");
+ netdev_dbg(padapter->pnetdev, "indicate disassoc\n");
wireless_send_event(padapter->pnetdev, SIOCGIWAP, &wrqu, NULL);
}
@@ -2092,18 +2090,6 @@ static int rtw_wx_write_rf(struct net_device *dev,
return 0;
}
-static int rtw_wx_priv_null(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- return -1;
-}
-
-static int dummy(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- return -1;
-}
-
static int rtw_wx_set_channel_plan(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -2120,37 +2106,6 @@ static int rtw_wx_set_channel_plan(struct net_device *dev,
return 0;
}
-static int rtw_wx_set_mtk_wps_probe_ie(struct net_device *dev,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- return 0;
-}
-
-static int rtw_wx_get_sensitivity(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *buf)
-{
- return 0;
-}
-
-static int rtw_wx_set_mtk_wps_ie(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- return 0;
-}
-
-/*
- * For all data larger than 16 octets, we need to use a
- * pointer to memory allocated in user space.
- */
-static int rtw_drvext_hdl(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- return 0;
-}
-
static int rtw_get_ap_info(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -3043,7 +2998,7 @@ static int rtw_p2p_connect(struct net_device *dev,
struct list_head *plist, *phead;
struct __queue *queue = &pmlmepriv->scanned_queue;
struct wlan_network *pnetwork = NULL;
- uint uintPeerChannel = 0;
+ u32 peer_channel = 0;
/* Commented by Albert 20110304 */
/* The input data contains two informations. */
@@ -3073,7 +3028,7 @@ static int rtw_p2p_connect(struct net_device *dev,
while (phead != plist) {
pnetwork = container_of(plist, struct wlan_network, list);
if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
- uintPeerChannel = pnetwork->network.Configuration.DSConfig;
+ peer_channel = pnetwork->network.Configuration.DSConfig;
break;
}
@@ -3082,11 +3037,11 @@ static int rtw_p2p_connect(struct net_device *dev,
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- if (uintPeerChannel) {
+ if (peer_channel) {
memset(&pwdinfo->nego_req_info, 0x00, sizeof(struct tx_nego_req_info));
memset(&pwdinfo->groupid_info, 0x00, sizeof(struct group_id_info));
- pwdinfo->nego_req_info.peer_channel_num[0] = uintPeerChannel;
+ pwdinfo->nego_req_info.peer_channel_num[0] = peer_channel;
memcpy(pwdinfo->nego_req_info.peerDevAddr, pnetwork->network.MacAddress, ETH_ALEN);
pwdinfo->nego_req_info.benable = true;
@@ -3121,7 +3076,7 @@ static int rtw_p2p_invite_req(struct net_device *dev,
struct list_head *plist, *phead;
struct __queue *queue = &pmlmepriv->scanned_queue;
struct wlan_network *pnetwork = NULL;
- uint uintPeerChannel = 0;
+ uint peer_channel = 0;
u8 attr_content[50] = {0x00};
u8 *p2pie;
uint p2pielen = 0, attr_contentlen = 0;
@@ -3177,13 +3132,13 @@ static int rtw_p2p_invite_req(struct net_device *dev,
if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_ID, attr_content, &attr_contentlen)) {
/* Handle the P2P Device ID attribute of Beacon first */
if (!memcmp(attr_content, pinvite_req_info->peer_macaddr, ETH_ALEN)) {
- uintPeerChannel = pnetwork->network.Configuration.DSConfig;
+ peer_channel = pnetwork->network.Configuration.DSConfig;
break;
}
} else if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_INFO, attr_content, &attr_contentlen)) {
/* Handle the P2P Device Info attribute of probe response */
if (!memcmp(attr_content, pinvite_req_info->peer_macaddr, ETH_ALEN)) {
- uintPeerChannel = pnetwork->network.Configuration.DSConfig;
+ peer_channel = pnetwork->network.Configuration.DSConfig;
break;
}
}
@@ -3193,7 +3148,7 @@ static int rtw_p2p_invite_req(struct net_device *dev,
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- if (uintPeerChannel) {
+ if (peer_channel) {
/* Store the GO's bssid */
for (jj = 0, kk = 18; jj < ETH_ALEN; jj++, kk += 3)
pinvite_req_info->go_bssid[jj] = key_2char2num(extra[kk], extra[kk + 1]);
@@ -3202,12 +3157,12 @@ static int rtw_p2p_invite_req(struct net_device *dev,
pinvite_req_info->ssidlen = wrqu->data.length - 36;
memcpy(pinvite_req_info->go_ssid, &extra[36], (u32)pinvite_req_info->ssidlen);
pinvite_req_info->benable = true;
- pinvite_req_info->peer_ch = uintPeerChannel;
+ pinvite_req_info->peer_ch = peer_channel;
rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
rtw_p2p_set_state(pwdinfo, P2P_STATE_TX_INVITE_REQ);
- set_channel_bwmode(padapter, uintPeerChannel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ set_channel_bwmode(padapter, peer_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
_set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
@@ -3260,7 +3215,7 @@ static int rtw_p2p_prov_disc(struct net_device *dev,
struct list_head *plist, *phead;
struct __queue *queue = &pmlmepriv->scanned_queue;
struct wlan_network *pnetwork = NULL;
- uint uintPeerChannel = 0;
+ uint peer_channel = 0;
u8 attr_content[100] = {0x00};
u8 *p2pie;
uint p2pielen = 0, attr_contentlen = 0;
@@ -3310,7 +3265,7 @@ static int rtw_p2p_prov_disc(struct net_device *dev,
plist = phead->next;
while (phead != plist) {
- if (uintPeerChannel != 0)
+ if (peer_channel != 0)
break;
pnetwork = container_of(plist, struct wlan_network, list);
@@ -3328,13 +3283,13 @@ static int rtw_p2p_prov_disc(struct net_device *dev,
if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_ID, attr_content, &attr_contentlen)) {
/* Handle the P2P Device ID attribute of Beacon first */
if (!memcmp(attr_content, peerMAC, ETH_ALEN)) {
- uintPeerChannel = pnetwork->network.Configuration.DSConfig;
+ peer_channel = pnetwork->network.Configuration.DSConfig;
break;
}
} else if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_INFO, attr_content, &attr_contentlen)) {
/* Handle the P2P Device Info attribute of probe response */
if (!memcmp(attr_content, peerMAC, ETH_ALEN)) {
- uintPeerChannel = pnetwork->network.Configuration.DSConfig;
+ peer_channel = pnetwork->network.Configuration.DSConfig;
break;
}
}
@@ -3349,11 +3304,11 @@ static int rtw_p2p_prov_disc(struct net_device *dev,
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- if (uintPeerChannel) {
- DBG_88E("[%s] peer channel: %d!\n", __func__, uintPeerChannel);
+ if (peer_channel) {
+ DBG_88E("[%s] peer channel: %d!\n", __func__, peer_channel);
memcpy(pwdinfo->tx_prov_disc_info.peerIFAddr, pnetwork->network.MacAddress, ETH_ALEN);
memcpy(pwdinfo->tx_prov_disc_info.peerDevAddr, peerMAC, ETH_ALEN);
- pwdinfo->tx_prov_disc_info.peer_channel_num[0] = (u16)uintPeerChannel;
+ pwdinfo->tx_prov_disc_info.peer_channel_num[0] = (u16)peer_channel;
pwdinfo->tx_prov_disc_info.benable = true;
rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
rtw_p2p_set_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ);
@@ -3365,7 +3320,7 @@ static int rtw_p2p_prov_disc(struct net_device *dev,
pwdinfo->tx_prov_disc_info.ssid.SsidLength = P2P_WILDCARD_SSID_LEN;
}
- set_channel_bwmode(padapter, uintPeerChannel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ set_channel_bwmode(padapter, peer_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
_set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
@@ -3617,27 +3572,18 @@ static void bb_reg_dump(struct adapter *padapter)
static void rf_reg_dump(struct adapter *padapter)
{
- int i, j = 1, path;
+ int i, j = 1, path = 0;
u32 value;
- u8 rf_type, path_nums = 0;
- GetHwReg8188EU(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
pr_info("\n ======= RF REG =======\n");
- if ((RF_1T2R == rf_type) || (RF_1T1R == rf_type))
- path_nums = 1;
- else
- path_nums = 2;
-
- for (path = 0; path < path_nums; path++) {
- pr_info("\nRF_Path(%x)\n", path);
- for (i = 0; i < 0x100; i++) {
- value = rtl8188e_PHY_QueryRFReg(padapter, path, i, 0xffffffff);
- if (j % 4 == 1)
- pr_info("0x%02x ", i);
- pr_info(" 0x%08x ", value);
- if ((j++) % 4 == 0)
- pr_info("\n");
- }
+ pr_info("\nRF_Path(%x)\n", path);
+ for (i = 0; i < 0x100; i++) {
+ value = rtl8188e_PHY_QueryRFReg(padapter, path, i, 0xffffffff);
+ if (j % 4 == 1)
+ pr_info("0x%02x ", i);
+ pr_info(" 0x%08x ", value);
+ if ((j++) % 4 == 0)
+ pr_info("\n");
}
}
@@ -4058,12 +4004,6 @@ static int rtw_dbg_port(struct net_device *dev,
DBG_88E("turn %s the bShowGetP2PState Variable\n", (extra_arg == 1) ? "on" : "off");
padapter->bShowGetP2PState = extra_arg;
break;
- case 0xaa:
- if (extra_arg > 0x13)
- extra_arg = 0xFF;
- DBG_88E("chang data rate to :0x%02x\n", extra_arg);
- padapter->fix_rate = extra_arg;
- break;
case 0xdd:/* registers dump, 0 for mac reg, 1 for bb reg, 2 for rf reg */
if (extra_arg == 0)
mac_reg_dump(padapter);
@@ -4230,81 +4170,21 @@ static int rtw_pm_set(struct net_device *dev,
return ret;
}
-extern int wifirate2_ratetbl_inx(unsigned char rate);
-
-static int rtw_tdls(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- return 0;
-}
-
-static int rtw_tdls_get(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- return 0;
-}
-
-static int rtw_test(
- struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- u32 len;
- u8 *pbuf, *pch;
- char *ptmp;
- u8 *delim = ",";
-
- DBG_88E("+%s\n", __func__);
- len = wrqu->data.length;
-
- pbuf = kzalloc(len, GFP_KERNEL);
- if (!pbuf) {
- DBG_88E("%s: no memory!\n", __func__);
- return -ENOMEM;
- }
-
- if (copy_from_user(pbuf, wrqu->data.pointer, len)) {
- kfree(pbuf);
- DBG_88E("%s: copy from user fail!\n", __func__);
- return -EFAULT;
- }
- DBG_88E("%s: string =\"%s\"\n", __func__, pbuf);
-
- ptmp = (char *)pbuf;
- pch = strsep(&ptmp, delim);
- if (!pch || strlen(pch) == 0) {
- kfree(pbuf);
- DBG_88E("%s: parameter error(level 1)!\n", __func__);
- return -EFAULT;
- }
- kfree(pbuf);
- return 0;
-}
-
static iw_handler rtw_handlers[] = {
IW_HANDLER(SIOCGIWNAME, rtw_wx_get_name),
- IW_HANDLER(SIOCSIWNWID, dummy),
- IW_HANDLER(SIOCGIWNWID, dummy),
IW_HANDLER(SIOCGIWFREQ, rtw_wx_get_freq),
IW_HANDLER(SIOCSIWMODE, rtw_wx_set_mode),
IW_HANDLER(SIOCGIWMODE, rtw_wx_get_mode),
- IW_HANDLER(SIOCSIWSENS, dummy),
IW_HANDLER(SIOCGIWSENS, rtw_wx_get_sens),
IW_HANDLER(SIOCGIWRANGE, rtw_wx_get_range),
IW_HANDLER(SIOCSIWPRIV, rtw_wx_set_priv),
- IW_HANDLER(SIOCSIWSPY, dummy),
- IW_HANDLER(SIOCGIWSPY, dummy),
IW_HANDLER(SIOCSIWAP, rtw_wx_set_wap),
IW_HANDLER(SIOCGIWAP, rtw_wx_get_wap),
IW_HANDLER(SIOCSIWMLME, rtw_wx_set_mlme),
- IW_HANDLER(SIOCGIWAPLIST, dummy),
IW_HANDLER(SIOCSIWSCAN, rtw_wx_set_scan),
IW_HANDLER(SIOCGIWSCAN, rtw_wx_get_scan),
IW_HANDLER(SIOCSIWESSID, rtw_wx_set_essid),
IW_HANDLER(SIOCGIWESSID, rtw_wx_get_essid),
- IW_HANDLER(SIOCSIWNICKN, dummy),
IW_HANDLER(SIOCGIWNICKN, rtw_wx_get_nick),
IW_HANDLER(SIOCSIWRATE, rtw_wx_set_rate),
IW_HANDLER(SIOCGIWRATE, rtw_wx_get_rate),
@@ -4312,13 +4192,9 @@ static iw_handler rtw_handlers[] = {
IW_HANDLER(SIOCGIWRTS, rtw_wx_get_rts),
IW_HANDLER(SIOCSIWFRAG, rtw_wx_set_frag),
IW_HANDLER(SIOCGIWFRAG, rtw_wx_get_frag),
- IW_HANDLER(SIOCSIWTXPOW, dummy),
- IW_HANDLER(SIOCGIWTXPOW, dummy),
- IW_HANDLER(SIOCSIWRETRY, dummy),
IW_HANDLER(SIOCGIWRETRY, rtw_wx_get_retry),
IW_HANDLER(SIOCSIWENCODE, rtw_wx_set_enc),
IW_HANDLER(SIOCGIWENCODE, rtw_wx_get_enc),
- IW_HANDLER(SIOCSIWPOWER, dummy),
IW_HANDLER(SIOCGIWPOWER, rtw_wx_get_power),
IW_HANDLER(SIOCSIWGENIE, rtw_wx_set_gen_ie),
IW_HANDLER(SIOCSIWAUTH, rtw_wx_set_auth),
@@ -4340,9 +4216,6 @@ static const struct iw_priv_args rtw_private_args[] = {
SIOCIWFIRSTPRIV + 0x2, 0, 0, "driver_ext"
},
{
- SIOCIWFIRSTPRIV + 0x3, 0, 0, "mp_ioctl"
- },
- {
SIOCIWFIRSTPRIV + 0x4,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "apinfo"
},
@@ -4355,19 +4228,6 @@ static const struct iw_priv_args rtw_private_args[] = {
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wps_start"
},
{
- SIOCIWFIRSTPRIV + 0x7,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "get_sensitivity"
- },
- {
- SIOCIWFIRSTPRIV + 0x8,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wps_prob_req_ie"
- },
- {
- SIOCIWFIRSTPRIV + 0x9,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wps_assoc_req_ie"
- },
-
- {
SIOCIWFIRSTPRIV + 0xA,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "channel_plan"
},
@@ -4396,33 +4256,18 @@ static const struct iw_priv_args rtw_private_args[] = {
SIOCIWFIRSTPRIV + 0x12,
IW_PRIV_TYPE_CHAR | P2P_PRIVATE_IOCTL_SET_LEN, IW_PRIV_TYPE_CHAR | IFNAMSIZ, "p2p_get2"
},
- {SIOCIWFIRSTPRIV + 0x13, IW_PRIV_TYPE_CHAR | 128, 0, "NULL"},
- {
- SIOCIWFIRSTPRIV + 0x14,
- IW_PRIV_TYPE_CHAR | 64, 0, "tdls"
- },
- {
- SIOCIWFIRSTPRIV + 0x15,
- IW_PRIV_TYPE_CHAR | P2P_PRIVATE_IOCTL_SET_LEN, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | P2P_PRIVATE_IOCTL_SET_LEN, "tdls_get"
- },
{
SIOCIWFIRSTPRIV + 0x16,
IW_PRIV_TYPE_CHAR | 64, 0, "pm_set"
},
{SIOCIWFIRSTPRIV + 0x18, IW_PRIV_TYPE_CHAR | IFNAMSIZ, 0, "rereg_nd_name"},
-
- {SIOCIWFIRSTPRIV + 0x1D, IW_PRIV_TYPE_CHAR | 40, IW_PRIV_TYPE_CHAR | 0x7FF, "test"
- },
-
- {SIOCIWFIRSTPRIV + 0x0E, IW_PRIV_TYPE_CHAR | 1024, 0, ""}, /* set */
- {SIOCIWFIRSTPRIV + 0x0F, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, ""},/* get */
};
static iw_handler rtw_private_handler[] = {
rtw_wx_write32, /* 0x00 */
rtw_wx_read32, /* 0x01 */
-rtw_drvext_hdl, /* 0x02 */
+ NULL, /* 0x02 */
NULL, /* 0x03 */
/* for MM DTV platform */
rtw_get_ap_info, /* 0x04 */
@@ -4430,9 +4275,9 @@ NULL, /* 0x03 */
rtw_set_pid, /* 0x05 */
rtw_wps_start, /* 0x06 */
- rtw_wx_get_sensitivity, /* 0x07 */
- rtw_wx_set_mtk_wps_probe_ie, /* 0x08 */
- rtw_wx_set_mtk_wps_ie, /* 0x09 */
+ NULL, /* 0x07 */
+ NULL, /* 0x08 */
+ NULL, /* 0x09 */
/* Set Channel depend on the country code */
rtw_wx_set_channel_plan, /* 0x0A */
@@ -4448,18 +4293,12 @@ NULL, /* 0x03 */
rtw_p2p_get2, /* 0x12 */
NULL, /* 0x13 */
- rtw_tdls, /* 0x14 */
- rtw_tdls_get, /* 0x15 */
+ NULL, /* 0x14 */
+ NULL, /* 0x15 */
rtw_pm_set, /* 0x16 */
- rtw_wx_priv_null, /* 0x17 */
+ NULL, /* 0x17 */
rtw_rereg_nd_name, /* 0x18 */
- rtw_wx_priv_null, /* 0x19 */
-
- NULL, /* 0x1A */
- NULL, /* 0x1B */
- NULL, /* 0x1C is reserved for hostapd */
- rtw_test, /* 0x1D */
};
static struct iw_statistics *rtw_get_wireless_stats(struct net_device *dev)
diff --git a/drivers/staging/r8188eu/os_dep/os_intfs.c b/drivers/staging/r8188eu/os_dep/os_intfs.c
index 10059240bf54..b65e44f97826 100644
--- a/drivers/staging/r8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/r8188eu/os_dep/os_intfs.c
@@ -11,7 +11,7 @@
#include "../include/rtw_ioctl.h"
#include "../include/usb_osintf.h"
#include "../include/rtw_br_ext.h"
-#include "../include/rtl8188e_led.h"
+#include "../include/rtw_led.h"
#include "../include/rtl8188e_dm.h"
MODULE_LICENSE("GPL");
@@ -75,7 +75,6 @@ static int rtw_ampdu_amsdu;/* 0: disabled, 1:enabled, 2:auto */
static int rtw_lowrate_two_xmit = 1;/* Use 2 path Tx to transmit MCS0~7 and legacy mode */
-static int rtw_rf_config = RF_819X_MAX_TYPE; /* auto */
static int rtw_low_power;
static int rtw_wifi_spec;
static int rtw_channel_plan = RT_CHANNEL_DOMAIN_MAX;
@@ -123,7 +122,6 @@ module_param(rtw_ampdu_enable, int, 0644);
module_param(rtw_rx_stbc, int, 0644);
module_param(rtw_ampdu_amsdu, int, 0644);
module_param(rtw_lowrate_two_xmit, int, 0644);
-module_param(rtw_rf_config, int, 0644);
module_param(rtw_power_mgnt, int, 0644);
module_param(rtw_smart_ps, int, 0644);
module_param(rtw_low_power, int, 0644);
@@ -153,7 +151,7 @@ MODULE_PARM_DESC(rtw_notch_filter, "0:Disable, 1:Enable, 2:Enable only for P2P")
module_param_named(debug, rtw_debug, int, 0444);
MODULE_PARM_DESC(debug, "Set debug level (1-9) (default 1)");
-static uint loadparam(struct adapter *padapter, struct net_device *pnetdev)
+static uint loadparam(struct adapter *padapter)
{
struct registry_priv *registry_par = &padapter->registrypriv;
@@ -205,7 +203,6 @@ static uint loadparam(struct adapter *padapter, struct net_device *pnetdev)
registry_par->rx_stbc = (u8)rtw_rx_stbc;
registry_par->ampdu_amsdu = (u8)rtw_ampdu_amsdu;
registry_par->lowrate_two_xmit = (u8)rtw_lowrate_two_xmit;
- registry_par->rf_config = (u8)rtw_rf_config;
registry_par->low_power = (u8)rtw_low_power;
registry_par->wifi_spec = (u8)rtw_wifi_spec;
registry_par->channel_plan = (u8)rtw_channel_plan;
@@ -371,7 +368,7 @@ struct net_device *rtw_init_netdev(struct adapter *old_padapter)
pnetdev->wireless_handlers = (struct iw_handler_def *)&rtw_handlers_def;
/* step 2. */
- loadparam(padapter, pnetdev);
+ loadparam(padapter);
return pnetdev;
}
@@ -399,7 +396,7 @@ void rtw_stop_drv_threads(struct adapter *padapter)
wait_for_completion(&padapter->cmdpriv.stop_cmd_thread);
}
-static u8 rtw_init_default_value(struct adapter *padapter)
+static void rtw_init_default_value(struct adapter *padapter)
{
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -444,7 +441,6 @@ static u8 rtw_init_default_value(struct adapter *padapter)
padapter->bRxRSSIDisplay = 0;
padapter->bNotifyChannelChange = 0;
padapter->bShowGetP2PState = 1;
- return _SUCCESS;
}
u8 rtw_reset_drv_sw(struct adapter *padapter)
@@ -478,50 +474,37 @@ u8 rtw_reset_drv_sw(struct adapter *padapter)
u8 rtw_init_drv_sw(struct adapter *padapter)
{
- u8 ret8 = _SUCCESS;
-
- if ((rtw_init_cmd_priv(&padapter->cmdpriv)) == _FAIL) {
- ret8 = _FAIL;
- goto exit;
- }
+ if ((rtw_init_cmd_priv(&padapter->cmdpriv)) == _FAIL)
+ return _FAIL;
padapter->cmdpriv.padapter = padapter;
- if ((rtw_init_evt_priv(&padapter->evtpriv)) == _FAIL) {
- ret8 = _FAIL;
- goto exit;
- }
+ if ((rtw_init_evt_priv(&padapter->evtpriv)) == _FAIL)
+ return _FAIL;
- if (rtw_init_mlme_priv(padapter) == _FAIL) {
- ret8 = _FAIL;
- goto exit;
- }
+ if (rtw_init_mlme_priv(padapter) == _FAIL)
+ return _FAIL;
rtw_init_wifidirect_timers(padapter);
init_wifidirect_info(padapter, P2P_ROLE_DISABLE);
reset_global_wifidirect_info(padapter);
- if (init_mlme_ext_priv(padapter) == _FAIL) {
- ret8 = _FAIL;
- goto exit;
- }
+ if (init_mlme_ext_priv(padapter) == _FAIL)
+ return _FAIL;
if (_rtw_init_xmit_priv(&padapter->xmitpriv, padapter) == _FAIL) {
DBG_88E("Can't _rtw_init_xmit_priv\n");
- ret8 = _FAIL;
- goto exit;
+ return _FAIL;
}
if (_rtw_init_recv_priv(&padapter->recvpriv, padapter) == _FAIL) {
DBG_88E("Can't _rtw_init_recv_priv\n");
- ret8 = _FAIL;
- goto exit;
+ return _FAIL;
}
if (_rtw_init_sta_priv(&padapter->stapriv) == _FAIL) {
DBG_88E("Can't _rtw_init_sta_priv\n");
- ret8 = _FAIL;
- goto exit;
+ return _FAIL;
}
padapter->stapriv.padapter = padapter;
@@ -530,15 +513,14 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
rtw_init_pwrctrl_priv(padapter);
- ret8 = rtw_init_default_value(padapter);
+ rtw_init_default_value(padapter);
rtl8188e_init_dm_priv(padapter);
rtl8188eu_InitSwLeds(padapter);
spin_lock_init(&padapter->br_ext_lock);
-exit:
- return ret8;
+ return _SUCCESS;
}
void rtw_cancel_all_timer(struct adapter *padapter)
@@ -585,8 +567,6 @@ u8 rtw_free_drv_sw(struct adapter *padapter)
_rtw_free_recv_priv(&padapter->recvpriv);
- rtl8188e_free_hal_data(padapter);
-
/* free the old_pnetdev */
if (padapter->rereg_nd_name_priv.old_pnetdev) {
free_netdev(padapter->rereg_nd_name_priv.old_pnetdev);
@@ -775,26 +755,11 @@ void rtw_ips_dev_unload(struct adapter *padapter)
rtw_hal_deinit(padapter);
}
-int pm_netdev_open(struct net_device *pnetdev, u8 bnormal)
-{
- int status;
-
- if (bnormal)
- status = netdev_open(pnetdev);
- else
- status = (_SUCCESS == ips_netdrv_open((struct adapter *)rtw_netdev_priv(pnetdev))) ? (0) : (-1);
- return status;
-}
-
int netdev_close(struct net_device *pnetdev)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
- if (padapter->pwrctrlpriv.bInternalAutoSuspend) {
- if (padapter->pwrctrlpriv.rf_pwrstate == rf_off)
- padapter->pwrctrlpriv.ps_flag = true;
- }
padapter->net_closed = true;
if (padapter->pwrctrlpriv.rf_pwrstate == rf_on) {
diff --git a/drivers/staging/r8188eu/os_dep/usb_intf.c b/drivers/staging/r8188eu/os_dep/usb_intf.c
index 5a35d9fe3fc9..91792dfd3bbe 100644
--- a/drivers/staging/r8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/r8188eu/os_dep/usb_intf.c
@@ -29,13 +29,12 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
/*=== Realtek demoboard ===*/
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8179)}, /* 8188EUS */
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
- {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xf179)}, /* 8188FU */
/*=== Customer ID ===*/
/****** 8188EUS ********/
{USB_DEVICE(0x07B8, 0x8179)}, /* Abocom - Abocom */
{USB_DEVICE(0x0DF6, 0x0076)}, /* Sitecom N150 v2 */
{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
- {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
+ {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
{USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
{USB_DEVICE(0x056E, 0x4008)}, /* Elecom WDC-150SU2M */
@@ -70,6 +69,7 @@ static struct rtw_usb_drv *usb_drv = &rtl8188e_usb_drv;
static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
{
int i;
+ u8 rt_num_in_pipes = 0;
struct dvobj_priv *pdvobjpriv;
struct usb_host_config *phost_conf;
struct usb_config_descriptor *pconf_desc;
@@ -80,14 +80,13 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
pdvobjpriv = kzalloc(sizeof(*pdvobjpriv), GFP_KERNEL);
if (!pdvobjpriv)
- goto exit;
+ goto err;
pdvobjpriv->pusbintf = usb_intf;
pusbd = interface_to_usbdev(usb_intf);
pdvobjpriv->pusbdev = pusbd;
usb_set_intfdata(usb_intf, pdvobjpriv);
- pdvobjpriv->RtNumInPipes = 0;
pdvobjpriv->RtNumOutPipes = 0;
phost_conf = pusbd->actconfig;
@@ -98,28 +97,26 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
pdvobjpriv->NumInterfaces = pconf_desc->bNumInterfaces;
pdvobjpriv->InterfaceNumber = piface_desc->bInterfaceNumber;
- pdvobjpriv->nr_endpoint = piface_desc->bNumEndpoints;
- for (i = 0; i < pdvobjpriv->nr_endpoint; i++) {
+ for (i = 0; i < piface_desc->bNumEndpoints; i++) {
int ep_num;
pendp_desc = &phost_iface->endpoint[i].desc;
ep_num = usb_endpoint_num(pendp_desc);
if (usb_endpoint_is_bulk_in(pendp_desc)) {
- pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] = ep_num;
- pdvobjpriv->RtNumInPipes++;
- } else if (usb_endpoint_is_int_in(pendp_desc)) {
- pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] = ep_num;
- pdvobjpriv->RtNumInPipes++;
+ pdvobjpriv->RtInPipe = ep_num;
+ rt_num_in_pipes++;
} else if (usb_endpoint_is_bulk_out(pendp_desc)) {
pdvobjpriv->RtOutPipe[pdvobjpriv->RtNumOutPipes] =
ep_num;
pdvobjpriv->RtNumOutPipes++;
}
- pdvobjpriv->ep_num[i] = ep_num;
}
+ if (rt_num_in_pipes != 1)
+ goto err;
+
if (pusbd->speed == USB_SPEED_HIGH) {
pdvobjpriv->ishighspeed = true;
DBG_88E("USB_SPEED_HIGH\n");
@@ -133,9 +130,11 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
rtw_reset_continual_urb_error(pdvobjpriv);
usb_get_dev(pusbd);
-
-exit:
return pdvobjpriv;
+
+err:
+ kfree(pdvobjpriv);
+ return NULL;
}
static void usb_dvobj_deinit(struct usb_interface *usb_intf)
@@ -193,8 +192,7 @@ static void rtw_dev_unload(struct adapter *padapter)
if (padapter->intf_stop)
padapter->intf_stop(padapter);
/* s4. */
- if (!padapter->pwrctrlpriv.bInternalAutoSuspend)
- rtw_stop_drv_threads(padapter);
+ rtw_stop_drv_threads(padapter);
/* s5. */
if (!padapter->bSurpriseRemoved) {
@@ -298,7 +296,7 @@ static int rtw_resume(struct usb_interface *pusb_intf)
pwrpriv->bkeepfwalive = false;
DBG_88E("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
- if (pm_netdev_open(pnetdev, true) != 0) {
+ if (netdev_open(pnetdev) != 0) {
mutex_unlock(&pwrpriv->lock);
goto exit;
}
@@ -362,9 +360,6 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
SET_NETDEV_DEV(pnetdev, dvobj_to_dev(dvobj));
padapter = rtw_netdev_priv(pnetdev);
- /* step 2. allocate HalData */
- rtl8188eu_alloc_haldata(padapter);
-
padapter->intf_start = &usb_intf_start;
padapter->intf_stop = &usb_intf_stop;
@@ -386,7 +381,7 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
/* step 5. */
if (rtw_init_drv_sw(padapter) == _FAIL)
- goto free_hal_data;
+ goto handle_dualmac;
#ifdef CONFIG_PM
if (padapter->pwrctrlpriv.bSupportRemoteWakeup) {
@@ -414,7 +409,7 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
/* step 6. Tell the network stack we exist */
if (register_netdev(pnetdev) != 0)
- goto free_hal_data;
+ goto handle_dualmac;
DBG_88E("bDriverStopped:%d, bSurpriseRemoved:%d, bup:%d, hw_init_completed:%d\n"
, padapter->bDriverStopped
@@ -425,9 +420,6 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
status = _SUCCESS;
-free_hal_data:
- if (status != _SUCCESS)
- kfree(padapter->HalData);
handle_dualmac:
if (status != _SUCCESS)
rtw_handle_dualmac(padapter, 0);
diff --git a/drivers/staging/r8188eu/os_dep/usb_ops_linux.c b/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
index ef2ea68ae873..d1cb33d3e6a7 100644
--- a/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
@@ -12,11 +12,7 @@ unsigned int ffaddr2pipehdl(struct dvobj_priv *pdvobj, u32 addr)
unsigned int pipe = 0, ep_num = 0;
struct usb_device *pusbd = pdvobj->pusbdev;
- if (addr == RECV_BULK_IN_ADDR) {
- pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[0]);
- } else if (addr == RECV_INT_IN_ADDR) {
- pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[1]);
- } else if (addr < HW_QUEUE_ENTRY) {
+ if (addr < HW_QUEUE_ENTRY) {
ep_num = pdvobj->Queue2Pipe[addr];
pipe = usb_sndbulkpipe(pusbd, ep_num);
}
diff --git a/drivers/staging/ralink-gdma/Kconfig b/drivers/staging/ralink-gdma/Kconfig
deleted file mode 100644
index 0017376234e2..000000000000
--- a/drivers/staging/ralink-gdma/Kconfig
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config DMA_RALINK
- tristate "RALINK DMA support"
- depends on RALINK && !SOC_RT288X
- depends on DMADEVICES
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
-
diff --git a/drivers/staging/ralink-gdma/Makefile b/drivers/staging/ralink-gdma/Makefile
deleted file mode 100644
index 5c4566b2e405..000000000000
--- a/drivers/staging/ralink-gdma/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DMA_RALINK) += ralink-gdma.o
-
-ccflags-y += -I$(srctree)/drivers/dma
diff --git a/drivers/staging/ralink-gdma/ralink-gdma.c b/drivers/staging/ralink-gdma/ralink-gdma.c
deleted file mode 100644
index b5229bc6eae5..000000000000
--- a/drivers/staging/ralink-gdma/ralink-gdma.c
+++ /dev/null
@@ -1,917 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * GDMA4740 DMAC support
- */
-
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/irq.h>
-#include <linux/of_dma.h>
-#include <linux/reset.h>
-#include <linux/of_device.h>
-
-#include "virt-dma.h"
-
-#define GDMA_REG_SRC_ADDR(x) (0x00 + (x) * 0x10)
-#define GDMA_REG_DST_ADDR(x) (0x04 + (x) * 0x10)
-
-#define GDMA_REG_CTRL0(x) (0x08 + (x) * 0x10)
-#define GDMA_REG_CTRL0_TX_MASK 0xffff
-#define GDMA_REG_CTRL0_TX_SHIFT 16
-#define GDMA_REG_CTRL0_CURR_MASK 0xff
-#define GDMA_REG_CTRL0_CURR_SHIFT 8
-#define GDMA_REG_CTRL0_SRC_ADDR_FIXED BIT(7)
-#define GDMA_REG_CTRL0_DST_ADDR_FIXED BIT(6)
-#define GDMA_REG_CTRL0_BURST_MASK 0x7
-#define GDMA_REG_CTRL0_BURST_SHIFT 3
-#define GDMA_REG_CTRL0_DONE_INT BIT(2)
-#define GDMA_REG_CTRL0_ENABLE BIT(1)
-#define GDMA_REG_CTRL0_SW_MODE BIT(0)
-
-#define GDMA_REG_CTRL1(x) (0x0c + (x) * 0x10)
-#define GDMA_REG_CTRL1_SEG_MASK 0xf
-#define GDMA_REG_CTRL1_SEG_SHIFT 22
-#define GDMA_REG_CTRL1_REQ_MASK 0x3f
-#define GDMA_REG_CTRL1_SRC_REQ_SHIFT 16
-#define GDMA_REG_CTRL1_DST_REQ_SHIFT 8
-#define GDMA_REG_CTRL1_NEXT_MASK 0x1f
-#define GDMA_REG_CTRL1_NEXT_SHIFT 3
-#define GDMA_REG_CTRL1_COHERENT BIT(2)
-#define GDMA_REG_CTRL1_FAIL BIT(1)
-#define GDMA_REG_CTRL1_MASK BIT(0)
-
-#define GDMA_REG_UNMASK_INT 0x200
-#define GDMA_REG_DONE_INT 0x204
-
-#define GDMA_REG_GCT 0x220
-#define GDMA_REG_GCT_CHAN_MASK 0x3
-#define GDMA_REG_GCT_CHAN_SHIFT 3
-#define GDMA_REG_GCT_VER_MASK 0x3
-#define GDMA_REG_GCT_VER_SHIFT 1
-#define GDMA_REG_GCT_ARBIT_RR BIT(0)
-
-#define GDMA_REG_REQSTS 0x2a0
-#define GDMA_REG_ACKSTS 0x2a4
-#define GDMA_REG_FINSTS 0x2a8
-
-/* for RT305X gdma registers */
-#define GDMA_RT305X_CTRL0_REQ_MASK 0xf
-#define GDMA_RT305X_CTRL0_SRC_REQ_SHIFT 12
-#define GDMA_RT305X_CTRL0_DST_REQ_SHIFT 8
-
-#define GDMA_RT305X_CTRL1_FAIL BIT(4)
-#define GDMA_RT305X_CTRL1_NEXT_MASK 0x7
-#define GDMA_RT305X_CTRL1_NEXT_SHIFT 1
-
-#define GDMA_RT305X_STATUS_INT 0x80
-#define GDMA_RT305X_STATUS_SIGNAL 0x84
-#define GDMA_RT305X_GCT 0x88
-
-/* for MT7621 gdma registers */
-#define GDMA_REG_PERF_START(x) (0x230 + (x) * 0x8)
-#define GDMA_REG_PERF_END(x) (0x234 + (x) * 0x8)
-
-enum gdma_dma_transfer_size {
- GDMA_TRANSFER_SIZE_4BYTE = 0,
- GDMA_TRANSFER_SIZE_8BYTE = 1,
- GDMA_TRANSFER_SIZE_16BYTE = 2,
- GDMA_TRANSFER_SIZE_32BYTE = 3,
- GDMA_TRANSFER_SIZE_64BYTE = 4,
-};
-
-struct gdma_dma_sg {
- dma_addr_t src_addr;
- dma_addr_t dst_addr;
- u32 len;
-};
-
-struct gdma_dma_desc {
- struct virt_dma_desc vdesc;
-
- enum dma_transfer_direction direction;
- bool cyclic;
-
- u32 residue;
- unsigned int num_sgs;
- struct gdma_dma_sg sg[];
-};
-
-struct gdma_dmaengine_chan {
- struct virt_dma_chan vchan;
- unsigned int id;
- unsigned int slave_id;
-
- dma_addr_t fifo_addr;
- enum gdma_dma_transfer_size burst_size;
-
- struct gdma_dma_desc *desc;
- unsigned int next_sg;
-};
-
-struct gdma_dma_dev {
- struct dma_device ddev;
- struct device_dma_parameters dma_parms;
- struct gdma_data *data;
- void __iomem *base;
- struct tasklet_struct task;
- volatile unsigned long chan_issued;
- atomic_t cnt;
-
- struct gdma_dmaengine_chan chan[];
-};
-
-struct gdma_data {
- int chancnt;
- u32 done_int_reg;
- void (*init)(struct gdma_dma_dev *dma_dev);
- int (*start_transfer)(struct gdma_dmaengine_chan *chan);
-};
-
-static struct gdma_dma_dev *gdma_dma_chan_get_dev(
- struct gdma_dmaengine_chan *chan)
-{
- return container_of(chan->vchan.chan.device, struct gdma_dma_dev,
- ddev);
-}
-
-static struct gdma_dmaengine_chan *to_gdma_dma_chan(struct dma_chan *c)
-{
- return container_of(c, struct gdma_dmaengine_chan, vchan.chan);
-}
-
-static struct gdma_dma_desc *to_gdma_dma_desc(struct virt_dma_desc *vdesc)
-{
- return container_of(vdesc, struct gdma_dma_desc, vdesc);
-}
-
-static inline uint32_t gdma_dma_read(struct gdma_dma_dev *dma_dev,
- unsigned int reg)
-{
- return readl(dma_dev->base + reg);
-}
-
-static inline void gdma_dma_write(struct gdma_dma_dev *dma_dev,
- unsigned int reg, uint32_t val)
-{
- writel(val, dma_dev->base + reg);
-}
-
-static enum gdma_dma_transfer_size gdma_dma_maxburst(u32 maxburst)
-{
- if (maxburst < 2)
- return GDMA_TRANSFER_SIZE_4BYTE;
- else if (maxburst < 4)
- return GDMA_TRANSFER_SIZE_8BYTE;
- else if (maxburst < 8)
- return GDMA_TRANSFER_SIZE_16BYTE;
- else if (maxburst < 16)
- return GDMA_TRANSFER_SIZE_32BYTE;
- else
- return GDMA_TRANSFER_SIZE_64BYTE;
-}
-
-static int gdma_dma_config(struct dma_chan *c,
- struct dma_slave_config *config)
-{
- struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
- struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
-
- if (config->device_fc) {
- dev_err(dma_dev->ddev.dev, "not support flow controller\n");
- return -EINVAL;
- }
-
- switch (config->direction) {
- case DMA_MEM_TO_DEV:
- if (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) {
- dev_err(dma_dev->ddev.dev, "only support 4 byte buswidth\n");
- return -EINVAL;
- }
- chan->slave_id = config->slave_id;
- chan->fifo_addr = config->dst_addr;
- chan->burst_size = gdma_dma_maxburst(config->dst_maxburst);
- break;
- case DMA_DEV_TO_MEM:
- if (config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) {
- dev_err(dma_dev->ddev.dev, "only support 4 byte buswidth\n");
- return -EINVAL;
- }
- chan->slave_id = config->slave_id;
- chan->fifo_addr = config->src_addr;
- chan->burst_size = gdma_dma_maxburst(config->src_maxburst);
- break;
- default:
- dev_err(dma_dev->ddev.dev, "direction type %d error\n",
- config->direction);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int gdma_dma_terminate_all(struct dma_chan *c)
-{
- struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
- struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
- unsigned long flags, timeout;
- LIST_HEAD(head);
- int i = 0;
-
- spin_lock_irqsave(&chan->vchan.lock, flags);
- chan->desc = NULL;
- clear_bit(chan->id, &dma_dev->chan_issued);
- vchan_get_all_descriptors(&chan->vchan, &head);
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
-
- vchan_dma_desc_free_list(&chan->vchan, &head);
-
- /* wait dma transfer complete */
- timeout = jiffies + msecs_to_jiffies(5000);
- while (gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id)) &
- GDMA_REG_CTRL0_ENABLE) {
- if (time_after_eq(jiffies, timeout)) {
- dev_err(dma_dev->ddev.dev, "chan %d wait timeout\n",
- chan->id);
- /* restore to init value */
- gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), 0);
- break;
- }
- cpu_relax();
- i++;
- }
-
- if (i)
- dev_dbg(dma_dev->ddev.dev, "terminate chan %d loops %d\n",
- chan->id, i);
-
- return 0;
-}
-
-static void rt305x_dump_reg(struct gdma_dma_dev *dma_dev, int id)
-{
- dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, ctr1 %08x, intr %08x, signal %08x\n",
- id,
- gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)),
- gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)),
- gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)),
- gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)),
- gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_INT),
- gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_SIGNAL));
-}
-
-static int rt305x_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
-{
- struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
- dma_addr_t src_addr, dst_addr;
- struct gdma_dma_sg *sg;
- u32 ctrl0, ctrl1;
-
- /* verify chan is already stopped */
- ctrl0 = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
- if (unlikely(ctrl0 & GDMA_REG_CTRL0_ENABLE)) {
- dev_err(dma_dev->ddev.dev, "chan %d is start(%08x).\n",
- chan->id, ctrl0);
- rt305x_dump_reg(dma_dev, chan->id);
- return -EINVAL;
- }
-
- sg = &chan->desc->sg[chan->next_sg];
- if (chan->desc->direction == DMA_MEM_TO_DEV) {
- src_addr = sg->src_addr;
- dst_addr = chan->fifo_addr;
- ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED |
- (8 << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) |
- (chan->slave_id << GDMA_RT305X_CTRL0_DST_REQ_SHIFT);
- } else if (chan->desc->direction == DMA_DEV_TO_MEM) {
- src_addr = chan->fifo_addr;
- dst_addr = sg->dst_addr;
- ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED |
- (chan->slave_id << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) |
- (8 << GDMA_RT305X_CTRL0_DST_REQ_SHIFT);
- } else if (chan->desc->direction == DMA_MEM_TO_MEM) {
- /*
- * TODO: memcpy function have bugs. sometime it will copy
- * more 8 bytes data when using dmatest verify.
- */
- src_addr = sg->src_addr;
- dst_addr = sg->dst_addr;
- ctrl0 = GDMA_REG_CTRL0_SW_MODE |
- (8 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) |
- (8 << GDMA_REG_CTRL1_DST_REQ_SHIFT);
- } else {
- dev_err(dma_dev->ddev.dev, "direction type %d error\n",
- chan->desc->direction);
- return -EINVAL;
- }
-
- ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) |
- (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) |
- GDMA_REG_CTRL0_DONE_INT | GDMA_REG_CTRL0_ENABLE;
- ctrl1 = chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
-
- chan->next_sg++;
- gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr);
- gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr);
- gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1);
-
- /* make sure next_sg is update */
- wmb();
- gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0);
-
- return 0;
-}
-
-static void rt3883_dump_reg(struct gdma_dma_dev *dma_dev, int id)
-{
- dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, ctr1 %08x, unmask %08x, done %08x, req %08x, ack %08x, fin %08x\n",
- id,
- gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)),
- gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)),
- gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)),
- gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)),
- gdma_dma_read(dma_dev, GDMA_REG_UNMASK_INT),
- gdma_dma_read(dma_dev, GDMA_REG_DONE_INT),
- gdma_dma_read(dma_dev, GDMA_REG_REQSTS),
- gdma_dma_read(dma_dev, GDMA_REG_ACKSTS),
- gdma_dma_read(dma_dev, GDMA_REG_FINSTS));
-}
-
-static int rt3883_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
-{
- struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
- dma_addr_t src_addr, dst_addr;
- struct gdma_dma_sg *sg;
- u32 ctrl0, ctrl1;
-
- /* verify chan is already stopped */
- ctrl0 = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
- if (unlikely(ctrl0 & GDMA_REG_CTRL0_ENABLE)) {
- dev_err(dma_dev->ddev.dev, "chan %d is start(%08x).\n",
- chan->id, ctrl0);
- rt3883_dump_reg(dma_dev, chan->id);
- return -EINVAL;
- }
-
- sg = &chan->desc->sg[chan->next_sg];
- if (chan->desc->direction == DMA_MEM_TO_DEV) {
- src_addr = sg->src_addr;
- dst_addr = chan->fifo_addr;
- ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED;
- ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) |
- (chan->slave_id << GDMA_REG_CTRL1_DST_REQ_SHIFT);
- } else if (chan->desc->direction == DMA_DEV_TO_MEM) {
- src_addr = chan->fifo_addr;
- dst_addr = sg->dst_addr;
- ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED;
- ctrl1 = (chan->slave_id << GDMA_REG_CTRL1_SRC_REQ_SHIFT) |
- (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) |
- GDMA_REG_CTRL1_COHERENT;
- } else if (chan->desc->direction == DMA_MEM_TO_MEM) {
- src_addr = sg->src_addr;
- dst_addr = sg->dst_addr;
- ctrl0 = GDMA_REG_CTRL0_SW_MODE;
- ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) |
- (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) |
- GDMA_REG_CTRL1_COHERENT;
- } else {
- dev_err(dma_dev->ddev.dev, "direction type %d error\n",
- chan->desc->direction);
- return -EINVAL;
- }
-
- ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) |
- (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) |
- GDMA_REG_CTRL0_DONE_INT | GDMA_REG_CTRL0_ENABLE;
- ctrl1 |= chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
-
- chan->next_sg++;
- gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr);
- gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr);
- gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1);
-
- /* make sure next_sg is update */
- wmb();
- gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0);
-
- return 0;
-}
-
-static inline int gdma_start_transfer(struct gdma_dma_dev *dma_dev,
- struct gdma_dmaengine_chan *chan)
-{
- return dma_dev->data->start_transfer(chan);
-}
-
-static int gdma_next_desc(struct gdma_dmaengine_chan *chan)
-{
- struct virt_dma_desc *vdesc;
-
- vdesc = vchan_next_desc(&chan->vchan);
- if (!vdesc) {
- chan->desc = NULL;
- return 0;
- }
- chan->desc = to_gdma_dma_desc(vdesc);
- chan->next_sg = 0;
-
- return 1;
-}
-
-static void gdma_dma_chan_irq(struct gdma_dma_dev *dma_dev,
- struct gdma_dmaengine_chan *chan)
-{
- struct gdma_dma_desc *desc;
- unsigned long flags;
- int chan_issued;
-
- chan_issued = 0;
- spin_lock_irqsave(&chan->vchan.lock, flags);
- desc = chan->desc;
- if (desc) {
- if (desc->cyclic) {
- vchan_cyclic_callback(&desc->vdesc);
- if (chan->next_sg == desc->num_sgs)
- chan->next_sg = 0;
- chan_issued = 1;
- } else {
- desc->residue -= desc->sg[chan->next_sg - 1].len;
- if (chan->next_sg == desc->num_sgs) {
- list_del(&desc->vdesc.node);
- vchan_cookie_complete(&desc->vdesc);
- chan_issued = gdma_next_desc(chan);
- } else {
- chan_issued = 1;
- }
- }
- } else {
- dev_dbg(dma_dev->ddev.dev, "chan %d no desc to complete\n",
- chan->id);
- }
- if (chan_issued)
- set_bit(chan->id, &dma_dev->chan_issued);
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
-}
-
-static irqreturn_t gdma_dma_irq(int irq, void *devid)
-{
- struct gdma_dma_dev *dma_dev = devid;
- u32 done, done_reg;
- unsigned int i;
-
- done_reg = dma_dev->data->done_int_reg;
- done = gdma_dma_read(dma_dev, done_reg);
- if (unlikely(!done))
- return IRQ_NONE;
-
- /* clean done bits */
- gdma_dma_write(dma_dev, done_reg, done);
-
- i = 0;
- while (done) {
- if (done & 0x1) {
- gdma_dma_chan_irq(dma_dev, &dma_dev->chan[i]);
- atomic_dec(&dma_dev->cnt);
- }
- done >>= 1;
- i++;
- }
-
- /* start only have work to do */
- if (dma_dev->chan_issued)
- tasklet_schedule(&dma_dev->task);
-
- return IRQ_HANDLED;
-}
-
-static void gdma_dma_issue_pending(struct dma_chan *c)
-{
- struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
- struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&chan->vchan.lock, flags);
- if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
- if (gdma_next_desc(chan)) {
- set_bit(chan->id, &dma_dev->chan_issued);
- tasklet_schedule(&dma_dev->task);
- } else {
- dev_dbg(dma_dev->ddev.dev, "chan %d no desc to issue\n",
- chan->id);
- }
- }
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
-}
-
-static struct dma_async_tx_descriptor *gdma_dma_prep_slave_sg(
- struct dma_chan *c, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
-{
- struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
- struct gdma_dma_desc *desc;
- struct scatterlist *sg;
- unsigned int i;
-
- desc = kzalloc(struct_size(desc, sg, sg_len), GFP_ATOMIC);
- if (!desc) {
- dev_err(c->device->dev, "alloc sg decs error\n");
- return NULL;
- }
- desc->residue = 0;
-
- for_each_sg(sgl, sg, sg_len, i) {
- if (direction == DMA_MEM_TO_DEV) {
- desc->sg[i].src_addr = sg_dma_address(sg);
- } else if (direction == DMA_DEV_TO_MEM) {
- desc->sg[i].dst_addr = sg_dma_address(sg);
- } else {
- dev_err(c->device->dev, "direction type %d error\n",
- direction);
- goto free_desc;
- }
-
- if (unlikely(sg_dma_len(sg) > GDMA_REG_CTRL0_TX_MASK)) {
- dev_err(c->device->dev, "sg len too large %d\n",
- sg_dma_len(sg));
- goto free_desc;
- }
- desc->sg[i].len = sg_dma_len(sg);
- desc->residue += sg_dma_len(sg);
- }
-
- desc->num_sgs = sg_len;
- desc->direction = direction;
- desc->cyclic = false;
-
- return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
-
-free_desc:
- kfree(desc);
- return NULL;
-}
-
-static struct dma_async_tx_descriptor *gdma_dma_prep_dma_memcpy(
- struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
- size_t len, unsigned long flags)
-{
- struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
- struct gdma_dma_desc *desc;
- unsigned int num_periods, i;
- size_t xfer_count;
-
- if (len <= 0)
- return NULL;
-
- chan->burst_size = gdma_dma_maxburst(len >> 2);
-
- xfer_count = GDMA_REG_CTRL0_TX_MASK;
- num_periods = DIV_ROUND_UP(len, xfer_count);
-
- desc = kzalloc(struct_size(desc, sg, num_periods), GFP_ATOMIC);
- if (!desc) {
- dev_err(c->device->dev, "alloc memcpy decs error\n");
- return NULL;
- }
- desc->residue = len;
-
- for (i = 0; i < num_periods; i++) {
- desc->sg[i].src_addr = src;
- desc->sg[i].dst_addr = dest;
- if (len > xfer_count)
- desc->sg[i].len = xfer_count;
- else
- desc->sg[i].len = len;
- src += desc->sg[i].len;
- dest += desc->sg[i].len;
- len -= desc->sg[i].len;
- }
-
- desc->num_sgs = num_periods;
- desc->direction = DMA_MEM_TO_MEM;
- desc->cyclic = false;
-
- return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
-}
-
-static struct dma_async_tx_descriptor *gdma_dma_prep_dma_cyclic(
- struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags)
-{
- struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
- struct gdma_dma_desc *desc;
- unsigned int num_periods, i;
-
- if (buf_len % period_len)
- return NULL;
-
- if (period_len > GDMA_REG_CTRL0_TX_MASK) {
- dev_err(c->device->dev, "cyclic len too large %d\n",
- period_len);
- return NULL;
- }
-
- num_periods = buf_len / period_len;
- desc = kzalloc(struct_size(desc, sg, num_periods), GFP_ATOMIC);
- if (!desc) {
- dev_err(c->device->dev, "alloc cyclic decs error\n");
- return NULL;
- }
- desc->residue = buf_len;
-
- for (i = 0; i < num_periods; i++) {
- if (direction == DMA_MEM_TO_DEV) {
- desc->sg[i].src_addr = buf_addr;
- } else if (direction == DMA_DEV_TO_MEM) {
- desc->sg[i].dst_addr = buf_addr;
- } else {
- dev_err(c->device->dev, "direction type %d error\n",
- direction);
- goto free_desc;
- }
- desc->sg[i].len = period_len;
- buf_addr += period_len;
- }
-
- desc->num_sgs = num_periods;
- desc->direction = direction;
- desc->cyclic = true;
-
- return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
-
-free_desc:
- kfree(desc);
- return NULL;
-}
-
-static enum dma_status gdma_dma_tx_status(struct dma_chan *c,
- dma_cookie_t cookie,
- struct dma_tx_state *state)
-{
- struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
- struct virt_dma_desc *vdesc;
- enum dma_status status;
- unsigned long flags;
- struct gdma_dma_desc *desc;
-
- status = dma_cookie_status(c, cookie, state);
- if (status == DMA_COMPLETE || !state)
- return status;
-
- spin_lock_irqsave(&chan->vchan.lock, flags);
- desc = chan->desc;
- if (desc && (cookie == desc->vdesc.tx.cookie)) {
- /*
- * We never update edesc->residue in the cyclic case, so we
- * can tell the remaining room to the end of the circular
- * buffer.
- */
- if (desc->cyclic)
- state->residue = desc->residue -
- ((chan->next_sg - 1) * desc->sg[0].len);
- else
- state->residue = desc->residue;
- } else {
- vdesc = vchan_find_desc(&chan->vchan, cookie);
- if (vdesc)
- state->residue = to_gdma_dma_desc(vdesc)->residue;
- }
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
-
- dev_dbg(c->device->dev, "tx residue %d bytes\n", state->residue);
-
- return status;
-}
-
-static void gdma_dma_free_chan_resources(struct dma_chan *c)
-{
- vchan_free_chan_resources(to_virt_chan(c));
-}
-
-static void gdma_dma_desc_free(struct virt_dma_desc *vdesc)
-{
- kfree(container_of(vdesc, struct gdma_dma_desc, vdesc));
-}
-
-static void gdma_dma_tasklet(struct tasklet_struct *t)
-{
- struct gdma_dma_dev *dma_dev = from_tasklet(dma_dev, t, task);
- struct gdma_dmaengine_chan *chan;
- static unsigned int last_chan;
- unsigned int i, chan_mask;
-
- /* record last chan to round robin all chans */
- i = last_chan;
- chan_mask = dma_dev->data->chancnt - 1;
- do {
- /*
- * on mt7621. when verify with dmatest with all
- * channel is enable. we need to limit only two
- * channel is working at the same time. otherwise the
- * data will have problem.
- */
- if (atomic_read(&dma_dev->cnt) >= 2) {
- last_chan = i;
- break;
- }
-
- if (test_and_clear_bit(i, &dma_dev->chan_issued)) {
- chan = &dma_dev->chan[i];
- if (chan->desc) {
- atomic_inc(&dma_dev->cnt);
- gdma_start_transfer(dma_dev, chan);
- } else {
- dev_dbg(dma_dev->ddev.dev,
- "chan %d no desc to issue\n",
- chan->id);
- }
- if (!dma_dev->chan_issued)
- break;
- }
-
- i = (i + 1) & chan_mask;
- } while (i != last_chan);
-}
-
-static void rt305x_gdma_init(struct gdma_dma_dev *dma_dev)
-{
- u32 gct;
-
- /* all chans round robin */
- gdma_dma_write(dma_dev, GDMA_RT305X_GCT, GDMA_REG_GCT_ARBIT_RR);
-
- gct = gdma_dma_read(dma_dev, GDMA_RT305X_GCT);
- dev_info(dma_dev->ddev.dev, "revision: %d, channels: %d\n",
- (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK,
- 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) &
- GDMA_REG_GCT_CHAN_MASK));
-}
-
-static void rt3883_gdma_init(struct gdma_dma_dev *dma_dev)
-{
- u32 gct;
-
- /* all chans round robin */
- gdma_dma_write(dma_dev, GDMA_REG_GCT, GDMA_REG_GCT_ARBIT_RR);
-
- gct = gdma_dma_read(dma_dev, GDMA_REG_GCT);
- dev_info(dma_dev->ddev.dev, "revision: %d, channels: %d\n",
- (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK,
- 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) &
- GDMA_REG_GCT_CHAN_MASK));
-}
-
-static struct gdma_data rt305x_gdma_data = {
- .chancnt = 8,
- .done_int_reg = GDMA_RT305X_STATUS_INT,
- .init = rt305x_gdma_init,
- .start_transfer = rt305x_gdma_start_transfer,
-};
-
-static struct gdma_data rt3883_gdma_data = {
- .chancnt = 16,
- .done_int_reg = GDMA_REG_DONE_INT,
- .init = rt3883_gdma_init,
- .start_transfer = rt3883_gdma_start_transfer,
-};
-
-static const struct of_device_id gdma_of_match_table[] = {
- { .compatible = "ralink,rt305x-gdma", .data = &rt305x_gdma_data },
- { .compatible = "ralink,rt3883-gdma", .data = &rt3883_gdma_data },
- { },
-};
-MODULE_DEVICE_TABLE(of, gdma_of_match_table);
-
-static int gdma_dma_probe(struct platform_device *pdev)
-{
- const struct of_device_id *match;
- struct gdma_dmaengine_chan *chan;
- struct gdma_dma_dev *dma_dev;
- struct dma_device *dd;
- unsigned int i;
- int ret;
- int irq;
- void __iomem *base;
- struct gdma_data *data;
-
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
-
- match = of_match_device(gdma_of_match_table, &pdev->dev);
- if (!match)
- return -EINVAL;
- data = (struct gdma_data *)match->data;
-
- dma_dev = devm_kzalloc(&pdev->dev,
- struct_size(dma_dev, chan, data->chancnt),
- GFP_KERNEL);
- if (!dma_dev)
- return -EINVAL;
- dma_dev->data = data;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
- dma_dev->base = base;
- tasklet_setup(&dma_dev->task, gdma_dma_tasklet);
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return -EINVAL;
- ret = devm_request_irq(&pdev->dev, irq, gdma_dma_irq,
- 0, dev_name(&pdev->dev), dma_dev);
- if (ret) {
- dev_err(&pdev->dev, "failed to request irq\n");
- return ret;
- }
-
- ret = device_reset(&pdev->dev);
- if (ret)
- dev_err(&pdev->dev, "failed to reset: %d\n", ret);
-
- dd = &dma_dev->ddev;
- dma_cap_set(DMA_MEMCPY, dd->cap_mask);
- dma_cap_set(DMA_SLAVE, dd->cap_mask);
- dma_cap_set(DMA_CYCLIC, dd->cap_mask);
- dd->device_free_chan_resources = gdma_dma_free_chan_resources;
- dd->device_prep_dma_memcpy = gdma_dma_prep_dma_memcpy;
- dd->device_prep_slave_sg = gdma_dma_prep_slave_sg;
- dd->device_prep_dma_cyclic = gdma_dma_prep_dma_cyclic;
- dd->device_config = gdma_dma_config;
- dd->device_terminate_all = gdma_dma_terminate_all;
- dd->device_tx_status = gdma_dma_tx_status;
- dd->device_issue_pending = gdma_dma_issue_pending;
-
- dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
- dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
- dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
- dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
-
- dd->dev = &pdev->dev;
- dd->dev->dma_parms = &dma_dev->dma_parms;
- dma_set_max_seg_size(dd->dev, GDMA_REG_CTRL0_TX_MASK);
- INIT_LIST_HEAD(&dd->channels);
-
- for (i = 0; i < data->chancnt; i++) {
- chan = &dma_dev->chan[i];
- chan->id = i;
- chan->vchan.desc_free = gdma_dma_desc_free;
- vchan_init(&chan->vchan, dd);
- }
-
- /* init hardware */
- data->init(dma_dev);
-
- ret = dma_async_device_register(dd);
- if (ret) {
- dev_err(&pdev->dev, "failed to register dma device\n");
- return ret;
- }
-
- ret = of_dma_controller_register(pdev->dev.of_node,
- of_dma_xlate_by_chan_id, dma_dev);
- if (ret) {
- dev_err(&pdev->dev, "failed to register of dma controller\n");
- goto err_unregister;
- }
-
- platform_set_drvdata(pdev, dma_dev);
-
- return 0;
-
-err_unregister:
- dma_async_device_unregister(dd);
- return ret;
-}
-
-static int gdma_dma_remove(struct platform_device *pdev)
-{
- struct gdma_dma_dev *dma_dev = platform_get_drvdata(pdev);
-
- tasklet_kill(&dma_dev->task);
- of_dma_controller_free(pdev->dev.of_node);
- dma_async_device_unregister(&dma_dev->ddev);
-
- return 0;
-}
-
-static struct platform_driver gdma_dma_driver = {
- .probe = gdma_dma_probe,
- .remove = gdma_dma_remove,
- .driver = {
- .name = "gdma-rt2880",
- .of_match_table = gdma_of_match_table,
- },
-};
-module_platform_driver(gdma_dma_driver);
-
-MODULE_DESCRIPTION("Ralink/MTK DMA driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index c6f8b772335c..c985e4ebc545 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -1980,7 +1980,7 @@ void SendDisassociation(struct rtllib_device *ieee, bool deauth, u16 asRsn);
void rtllib_softmac_xmit(struct rtllib_txb *txb, struct rtllib_device *ieee);
void rtllib_start_ibss(struct rtllib_device *ieee);
-void rtllib_softmac_init(struct rtllib_device *ieee);
+int rtllib_softmac_init(struct rtllib_device *ieee);
void rtllib_softmac_free(struct rtllib_device *ieee);
void rtllib_disassociate(struct rtllib_device *ieee);
void rtllib_stop_scan(struct rtllib_device *ieee);
diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
index 64d9feee1f39..41697ef55dbd 100644
--- a/drivers/staging/rtl8192e/rtllib_module.c
+++ b/drivers/staging/rtl8192e/rtllib_module.c
@@ -88,7 +88,7 @@ struct net_device *alloc_rtllib(int sizeof_priv)
err = rtllib_networks_allocate(ieee);
if (err) {
pr_err("Unable to allocate beacon storage: %d\n", err);
- goto failed;
+ goto free_netdev;
}
rtllib_networks_initialize(ieee);
@@ -121,11 +121,13 @@ struct net_device *alloc_rtllib(int sizeof_priv)
ieee->hwsec_active = 0;
memset(ieee->swcamtable, 0, sizeof(struct sw_cam_table) * 32);
- rtllib_softmac_init(ieee);
+ err = rtllib_softmac_init(ieee);
+ if (err)
+ goto free_crypt_info;
ieee->pHTInfo = kzalloc(sizeof(struct rt_hi_throughput), GFP_KERNEL);
if (!ieee->pHTInfo)
- return NULL;
+ goto free_softmac;
HTUpdateDefaultSetting(ieee);
HTInitializeHTInfo(ieee);
@@ -141,8 +143,14 @@ struct net_device *alloc_rtllib(int sizeof_priv)
return dev;
- failed:
+free_softmac:
+ rtllib_softmac_free(ieee);
+free_crypt_info:
+ lib80211_crypt_info_free(&ieee->crypt_info);
+ rtllib_networks_free(ieee);
+free_netdev:
free_netdev(dev);
+
return NULL;
}
EXPORT_SYMBOL(alloc_rtllib);
@@ -153,7 +161,6 @@ void free_rtllib(struct net_device *dev)
netdev_priv_rsl(dev);
kfree(ieee->pHTInfo);
- ieee->pHTInfo = NULL;
rtllib_softmac_free(ieee);
lib80211_crypt_info_free(&ieee->crypt_info);
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index d2726d01c757..4b6c2295a3cf 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -2515,7 +2515,7 @@ void rtllib_stop_all_queues(struct rtllib_device *ieee)
unsigned int i;
for (i = 0; i < ieee->dev->num_tx_queues; i++)
- netdev_get_tx_queue(ieee->dev, i)->trans_start = jiffies;
+ txq_trans_cond_update(netdev_get_tx_queue(ieee->dev, i));
netif_tx_stop_all_queues(ieee->dev);
}
@@ -2952,7 +2952,7 @@ void rtllib_start_protocol(struct rtllib_device *ieee)
}
}
-void rtllib_softmac_init(struct rtllib_device *ieee)
+int rtllib_softmac_init(struct rtllib_device *ieee)
{
int i;
@@ -2963,7 +2963,8 @@ void rtllib_softmac_init(struct rtllib_device *ieee)
ieee->seq_ctrl[i] = 0;
ieee->dot11d_info = kzalloc(sizeof(struct rt_dot11d_info), GFP_ATOMIC);
if (!ieee->dot11d_info)
- netdev_err(ieee->dev, "Can't alloc memory for DOT11D\n");
+ return -ENOMEM;
+
ieee->LinkDetectInfo.SlotIndex = 0;
ieee->LinkDetectInfo.SlotNum = 2;
ieee->LinkDetectInfo.NumRecvBcnInPeriod = 0;
@@ -3029,6 +3030,7 @@ void rtllib_softmac_init(struct rtllib_device *ieee)
tasklet_setup(&ieee->ps_task, rtllib_sta_ps);
+ return 0;
}
void rtllib_softmac_free(struct rtllib_device *ieee)
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 1a193f900779..1a43979939a8 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -1346,7 +1346,7 @@ inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee
short apset, ssidset, ssidbroad, apmatch, ssidmatch;
- /* we are interested in new new only if we are not associated
+ /* we are interested in new only if we are not associated
* and we are not associating / authenticating
*/
if (ieee->state != IEEE80211_NOLINK)
@@ -2027,7 +2027,7 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
* N = MAX_PACKET_SIZE / MIN_FRAG_THRESHOLD.
* In this way you need just one and the 802.11 stack
* will take care of buffering fragments and pass them to
- * to the driver later, when it wakes the queue.
+ * the driver later, when it wakes the queue.
*/
void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *ieee)
{
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 726d7ad9408b..364e1ca94f70 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -2531,13 +2531,13 @@ static short rtl8192_init(struct net_device *dev)
#ifdef PIPE12
{
int i = 0;
- u8 queuetopipe[] = {3, 2, 1, 0, 4, 8, 7, 6, 5};
+ static const u8 queuetopipe[] = {3, 2, 1, 0, 4, 8, 7, 6, 5};
memcpy(priv->txqueue_to_outpipemap, queuetopipe, 9);
}
#else
{
- u8 queuetopipe[] = {3, 2, 1, 0, 4, 4, 0, 4, 4};
+ const u8 queuetopipe[] = {3, 2, 1, 0, 4, 4, 0, 4, 4};
memcpy(priv->txqueue_to_outpipemap, queuetopipe, 9);
}
@@ -2666,14 +2666,7 @@ static bool rtl8192_adapter_start(struct net_device *dev)
/* config CPUReset Register */
/* Firmware Reset or not? */
read_nic_dword(dev, CPU_GEN, &dwRegRead);
- if (priv->pFirmware->firmware_status == FW_STATUS_0_INIT)
- dwRegRead |= CPU_GEN_SYSTEM_RESET; /* do nothing here? */
- else if (priv->pFirmware->firmware_status == FW_STATUS_5_READY)
- dwRegRead |= CPU_GEN_FIRMWARE_RESET;
- else
- RT_TRACE(COMP_ERR,
- "ERROR in %s(): undefined firmware state(%d)\n",
- __func__, priv->pFirmware->firmware_status);
+ dwRegRead |= CPU_GEN_SYSTEM_RESET; /* do nothing here? */
write_nic_dword(dev, CPU_GEN, dwRegRead);
/* config BB. */
diff --git a/drivers/staging/rtl8712/rtl8712_efuse.c b/drivers/staging/rtl8712/rtl8712_efuse.c
index 4f3b54a7c3be..c9400e40a1d6 100644
--- a/drivers/staging/rtl8712/rtl8712_efuse.c
+++ b/drivers/staging/rtl8712/rtl8712_efuse.c
@@ -298,25 +298,23 @@ static u8 fix_header(struct _adapter *adapter, u8 header, u16 header_addr)
continue;
}
for (i = 0; i < PGPKG_MAX_WORDS; i++) {
- if (BIT(i) & word_en) {
- if (BIT(i) & pkt.word_en) {
- if (efuse_one_byte_read(
- adapter, addr,
+ if (!(BIT(i) & word_en))
+ continue;
+ if (BIT(i) & pkt.word_en) {
+ if (efuse_one_byte_read(adapter,
+ addr,
&value))
- pkt.data[i * 2] = value;
- else
- return false;
- if (efuse_one_byte_read(
- adapter,
+ pkt.data[i * 2] = value;
+ else
+ return false;
+ if (efuse_one_byte_read(adapter,
addr + 1,
&value))
- pkt.data[i * 2 + 1] =
- value;
- else
- return false;
- }
- addr += 2;
+ pkt.data[i * 2 + 1] = value;
+ else
+ return false;
}
+ addr += 2;
}
}
if (addr != header_addr)
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index bd24d913b464..b28351a97cd3 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -1498,8 +1498,7 @@ static void rtw_lps_change_dtim_hdl(struct adapter *padapter, u8 dtim)
mutex_lock(&pwrpriv->lock);
- if (pwrpriv->dtim != dtim)
- pwrpriv->dtim = dtim;
+ pwrpriv->dtim = dtim;
if (pwrpriv->fw_current_in_ps_mode && (pwrpriv->pwr_mode > PS_MODE_ACTIVE)) {
u8 ps_mode = pwrpriv->pwr_mode;
diff --git a/drivers/staging/rtl8723bs/core/rtw_efuse.c b/drivers/staging/rtl8723bs/core/rtw_efuse.c
index 430e2d81924c..3d3c77273026 100644
--- a/drivers/staging/rtl8723bs/core/rtw_efuse.c
+++ b/drivers/staging/rtl8723bs/core/rtw_efuse.c
@@ -31,10 +31,7 @@ u8 fakeBTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN] = {0};
#define EFUSE_CTRL REG_EFUSE_CTRL /* E-Fuse Control. */
static bool
-Efuse_Read1ByteFromFakeContent(
- struct adapter *padapter,
- u16 Offset,
- u8 *Value)
+Efuse_Read1ByteFromFakeContent(u16 Offset, u8 *Value)
{
if (Offset >= EFUSE_MAX_HW_SIZE)
return false;
@@ -46,10 +43,7 @@ Efuse_Read1ByteFromFakeContent(
}
static bool
-Efuse_Write1ByteToFakeContent(
- struct adapter *padapter,
- u16 Offset,
- u8 Value)
+Efuse_Write1ByteToFakeContent(u16 Offset, u8 Value)
{
if (Offset >= EFUSE_MAX_HW_SIZE)
return false;
@@ -250,7 +244,7 @@ bool bPseudoTest)
u8 readbyte;
if (bPseudoTest)
- return Efuse_Read1ByteFromFakeContent(padapter, addr, data);
+ return Efuse_Read1ByteFromFakeContent(addr, data);
/* <20130121, Kordan> For SMIC EFUSE specificatoin. */
/* 0x34[11]: SW force PGMEN input of efuse to high. (for the bank selected by 0x34[9:8]) */
@@ -291,7 +285,7 @@ u8 efuse_OneByteWrite(struct adapter *padapter, u16 addr, u8 data, bool bPseudoT
u32 efuseValue = 0;
if (bPseudoTest)
- return Efuse_Write1ByteToFakeContent(padapter, addr, data);
+ return Efuse_Write1ByteToFakeContent(addr, data);
/* -----------------e-fuse reg ctrl --------------------------------- */
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
index 7fe3df863fe1..b5d5e922231c 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
@@ -31,9 +31,6 @@ static u8 rtw_sdio_wait_enough_TxOQT_space(struct adapter *padapter, u8 agg_num)
pHalData->SdioTxOQTFreeSpace -= agg_num;
- /* if (n > 1) */
- /* ++priv->pshare->nr_out_of_txoqt_space; */
-
return true;
}
@@ -310,8 +307,6 @@ static s32 xmit_xmitframes(struct adapter *padapter, struct xmit_priv *pxmitpriv
txlen = txdesc_size + pxmitframe->attrib.last_txcmdsz;
pxmitframe->pg_num = (txlen + 127) / 128;
pxmitbuf->pg_num += (txlen + 127) / 128;
- /* if (k != 1) */
- /* ((struct xmit_frame*)pxmitbuf->priv_data)->pg_num += pxmitframe->pg_num; */
pxmitbuf->ptail += _RND(txlen, 8); /* round to 8 bytes alignment */
pxmitbuf->len = _RND(pxmitbuf->len, 8) + txlen;
}
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 0868f56e2979..5157b5b12597 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -2200,7 +2200,7 @@ static netdev_tx_t rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb, struc
memcpy(src_mac_addr, dot11_hdr->addr2, sizeof(src_mac_addr));
/* Skip the 802.11 header, QoS (if any) and SNAP, but leave spaces for
- * for two MAC addresses
+ * two MAC addresses
*/
skb_pull(skb, dot11_hdr_len + qos_len + snap_len - sizeof(src_mac_addr) * 2);
pdata = (unsigned char *)skb->data;
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 91fcf85e150a..5a58dac76c88 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -450,13 +450,13 @@ skip_for_abort:
* after the down() -- that's necessary for the thread-shutdown
* case.
*
- * complete_and_exit() goes even further than this -- it is safe in
- * the case that the thread of the caller is going away (not just
- * the structure) -- this is necessary for the module-remove case.
- * This is important in preemption kernels, which transfer the flow
- * of execution immediately upon a complete().
+ * kthread_complete_and_exit() goes even further than this --
+ * it is safe in the case that the thread of the caller is going away
+ * (not just the structure) -- this is necessary for the module-remove
+ * case. This is important in preemption kernels, which transfer the
+ * flow of execution immediately upon a complete().
*/
- complete_and_exit(&dev->control_exit, 0);
+ kthread_complete_and_exit(&dev->control_exit, 0);
}
static int rtsx_polling_thread(void *__dev)
@@ -501,7 +501,7 @@ static int rtsx_polling_thread(void *__dev)
mutex_unlock(&dev->dev_mutex);
}
- complete_and_exit(&dev->polling_exit, 0);
+ kthread_complete_and_exit(&dev->polling_exit, 0);
}
/*
@@ -682,7 +682,7 @@ static int rtsx_scan_thread(void *__dev)
/* Should we unbind if no devices were detected? */
}
- complete_and_exit(&dev->scanning_done, 0);
+ kthread_complete_and_exit(&dev->scanning_done, 0);
}
static void rtsx_init_options(struct rtsx_chip *chip)
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index 694644112447..8eee131e834d 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -266,7 +266,7 @@ static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
&notifyevent, &notifyresult);
if (ret) {
dev_dbg(&scsidev->sdev_gendev,
- "visorhba: setup_scsitaskmgmt_handles returned %d\n", ret);
+ "visorhba: setup_scsitaskmgmt_handles returned %d\n", ret);
return FAILED;
}
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 62cd9b783732..643432458105 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1590,7 +1590,7 @@ static void send_rcv_posts_if_needed(struct visornic_devdata *devdata)
netdev = devdata->netdev;
rcv_bufs_allocated = 0;
/* this code is trying to prevent getting stuck here forever,
- * but still retry it if you cant allocate them all this time.
+ * but still retry it if you can't allocate them all this time.
*/
cur_num_rcv_bufs_to_alloc = devdata->num_rcv_bufs_could_not_alloc;
while (cur_num_rcv_bufs_to_alloc > 0) {
@@ -1759,13 +1759,11 @@ static void visornic_channel_interrupt(struct visor_device *dev)
if (!devdata)
return;
- if (!visorchannel_signalempty(
- devdata->dev->visorchannel,
- IOCHAN_FROM_IOPART))
+ if (!visorchannel_signalempty(devdata->dev->visorchannel,
+ IOCHAN_FROM_IOPART))
napi_schedule(&devdata->napi);
atomic_set(&devdata->interrupt_rcvd, 0);
-
}
/* visornic_probe - probe function for visornic devices
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
index c250fbef2fa3..628732d7bf6a 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
@@ -52,20 +52,14 @@ static int bcm2835_devm_add_vchi_ctx(struct device *dev)
return 0;
}
-typedef int (*bcm2835_audio_newpcm_func)(struct bcm2835_chip *chip,
- const char *name,
- enum snd_bcm2835_route route,
- u32 numchannels);
-
-typedef int (*bcm2835_audio_newctl_func)(struct bcm2835_chip *chip);
-
struct bcm2835_audio_driver {
struct device_driver driver;
const char *shortname;
const char *longname;
int minchannels;
- bcm2835_audio_newpcm_func newpcm;
- bcm2835_audio_newctl_func newctl;
+ int (*newpcm)(struct bcm2835_chip *chip, const char *name,
+ enum snd_bcm2835_route route, u32 numchannels);
+ int (*newctl)(struct bcm2835_chip *chip);
enum snd_bcm2835_route route;
};
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index 1b184d5c6b82..253d755e547f 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
@@ -64,7 +64,7 @@ MODULE_PARM_DESC(max_video_height, "Threshold for video mode");
static atomic_t camera_instance = ATOMIC_INIT(0);
/* global device data array */
-static struct bm2835_mmal_dev *gdev[MAX_BCM2835_CAMERAS];
+static struct bcm2835_mmal_dev *gdev[MAX_BCM2835_CAMERAS];
#define FPS_MIN 1
#define FPS_MAX 90
@@ -210,7 +210,7 @@ static int queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], struct device *alloc_ctxs[])
{
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
+ struct bcm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
unsigned long size;
/* refuse queue setup if port is not configured */
@@ -265,7 +265,7 @@ static int queue_setup(struct vb2_queue *vq,
static int buffer_init(struct vb2_buffer *vb)
{
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct bcm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vb2 = to_vb2_v4l2_buffer(vb);
struct vb2_mmal_buffer *buf =
container_of(vb2, struct vb2_mmal_buffer, vb);
@@ -280,7 +280,7 @@ static int buffer_init(struct vb2_buffer *vb)
static int buffer_prepare(struct vb2_buffer *vb)
{
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct bcm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
unsigned long size;
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p, vb %p\n",
@@ -302,7 +302,7 @@ static int buffer_prepare(struct vb2_buffer *vb)
static void buffer_cleanup(struct vb2_buffer *vb)
{
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct bcm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vb2 = to_vb2_v4l2_buffer(vb);
struct vb2_mmal_buffer *buf =
container_of(vb2, struct vb2_mmal_buffer, vb);
@@ -313,7 +313,7 @@ static void buffer_cleanup(struct vb2_buffer *vb)
mmal_vchi_buffer_cleanup(&buf->mmal);
}
-static inline bool is_capturing(struct bm2835_mmal_dev *dev)
+static inline bool is_capturing(struct bcm2835_mmal_dev *dev)
{
return dev->capture.camera_port ==
&dev->component[COMP_CAMERA]->output[CAM_PORT_CAPTURE];
@@ -324,7 +324,7 @@ static void buffer_cb(struct vchiq_mmal_instance *instance,
int status,
struct mmal_buffer *mmal_buf)
{
- struct bm2835_mmal_dev *dev = port->cb_ctx;
+ struct bcm2835_mmal_dev *dev = port->cb_ctx;
struct vb2_mmal_buffer *buf =
container_of(mmal_buf, struct vb2_mmal_buffer, mmal);
@@ -416,7 +416,7 @@ static void buffer_cb(struct vchiq_mmal_instance *instance,
}
}
-static int enable_camera(struct bm2835_mmal_dev *dev)
+static int enable_camera(struct bcm2835_mmal_dev *dev)
{
int ret;
@@ -447,7 +447,7 @@ static int enable_camera(struct bm2835_mmal_dev *dev)
return 0;
}
-static int disable_camera(struct bm2835_mmal_dev *dev)
+static int disable_camera(struct bcm2835_mmal_dev *dev)
{
int ret;
@@ -482,7 +482,7 @@ static int disable_camera(struct bm2835_mmal_dev *dev)
static void buffer_queue(struct vb2_buffer *vb)
{
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct bcm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vb2 = to_vb2_v4l2_buffer(vb);
struct vb2_mmal_buffer *buf =
container_of(vb2, struct vb2_mmal_buffer, vb);
@@ -501,7 +501,7 @@ static void buffer_queue(struct vb2_buffer *vb)
static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
+ struct bcm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
int ret;
u32 parameter_size;
@@ -596,7 +596,7 @@ static void stop_streaming(struct vb2_queue *vq)
{
int ret;
unsigned long timeout;
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
+ struct bcm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
struct vchiq_mmal_port *port = dev->capture.port;
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p\n",
@@ -654,7 +654,7 @@ static void stop_streaming(struct vb2_queue *vq)
v4l2_err(&dev->v4l2_dev, "Failed to disable camera\n");
}
-static const struct vb2_ops bm2835_mmal_video_qops = {
+static const struct vb2_ops bcm2835_mmal_video_qops = {
.queue_setup = queue_setup,
.buf_init = buffer_init,
.buf_prepare = buffer_prepare,
@@ -671,7 +671,7 @@ static const struct vb2_ops bm2835_mmal_video_qops = {
* ------------------------------------------------------------------
*/
-static int set_overlay_params(struct bm2835_mmal_dev *dev,
+static int set_overlay_params(struct bcm2835_mmal_dev *dev,
struct vchiq_mmal_port *port)
{
struct mmal_parameter_displayregion prev_config = {
@@ -713,7 +713,7 @@ static int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv,
static int vidioc_g_fmt_vid_overlay(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
f->fmt.win = dev->overlay;
@@ -723,7 +723,7 @@ static int vidioc_g_fmt_vid_overlay(struct file *file, void *priv,
static int vidioc_try_fmt_vid_overlay(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
f->fmt.win.field = V4L2_FIELD_NONE;
f->fmt.win.chromakey = 0;
@@ -754,7 +754,7 @@ static int vidioc_try_fmt_vid_overlay(struct file *file, void *priv,
static int vidioc_s_fmt_vid_overlay(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
vidioc_try_fmt_vid_overlay(file, priv, f);
@@ -770,7 +770,7 @@ static int vidioc_s_fmt_vid_overlay(struct file *file, void *priv,
static int vidioc_overlay(struct file *file, void *f, unsigned int on)
{
int ret;
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
struct vchiq_mmal_port *src;
struct vchiq_mmal_port *dst;
@@ -829,7 +829,7 @@ static int vidioc_g_fbuf(struct file *file, void *fh,
/* The video overlay must stay within the framebuffer and can't be
* positioned independently.
*/
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
struct vchiq_mmal_port *preview_port =
&dev->component[COMP_CAMERA]->output[CAM_PORT_PREVIEW];
@@ -878,18 +878,16 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
u32 major;
u32 minor;
vchiq_mmal_version(dev->instance, &major, &minor);
- strscpy(cap->driver, "bm2835 mmal", sizeof(cap->driver));
- snprintf((char *)cap->card, sizeof(cap->card), "mmal service %d.%d",
- major, minor);
+ strscpy(cap->driver, "bcm2835 mmal", sizeof(cap->driver));
+ snprintf((char *)cap->card, sizeof(cap->card), "mmal service %d.%d", major, minor);
- snprintf((char *)cap->bus_info, sizeof(cap->bus_info),
- "platform:%s", dev->v4l2_dev.name);
+ snprintf((char *)cap->bus_info, sizeof(cap->bus_info), "platform:%s", dev->v4l2_dev.name);
return 0;
}
@@ -911,7 +909,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
f->fmt.pix.width = dev->capture.width;
f->fmt.pix.height = dev->capture.height;
@@ -936,7 +934,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
struct mmal_fmt *mfmt;
mfmt = get_format(f);
@@ -1010,7 +1008,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
}
-static int mmal_setup_video_component(struct bm2835_mmal_dev *dev,
+static int mmal_setup_video_component(struct bcm2835_mmal_dev *dev,
struct v4l2_format *f)
{
bool overlay_enabled = !!dev->component[COMP_PREVIEW]->enabled;
@@ -1056,7 +1054,7 @@ static int mmal_setup_video_component(struct bm2835_mmal_dev *dev,
return ret;
}
-static int mmal_setup_encode_component(struct bm2835_mmal_dev *dev,
+static int mmal_setup_encode_component(struct bcm2835_mmal_dev *dev,
struct v4l2_format *f,
struct vchiq_mmal_port *port,
struct vchiq_mmal_port *camera_port,
@@ -1144,7 +1142,7 @@ static int mmal_setup_encode_component(struct bm2835_mmal_dev *dev,
return 0;
}
-static int mmal_setup_components(struct bm2835_mmal_dev *dev,
+static int mmal_setup_components(struct bcm2835_mmal_dev *dev,
struct v4l2_format *f)
{
int ret;
@@ -1290,7 +1288,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
int ret;
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
struct mmal_fmt *mfmt;
/* try the format to set valid parameters */
@@ -1333,7 +1331,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
static const struct v4l2_frmsize_stepwise sizes = {
MIN_WIDTH, 0, 2,
MIN_HEIGHT, 0, 2
@@ -1358,7 +1356,7 @@ static int vidioc_enum_framesizes(struct file *file, void *fh,
static int vidioc_enum_frameintervals(struct file *file, void *priv,
struct v4l2_frmivalenum *fival)
{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
int i;
if (fival->index)
@@ -1388,7 +1386,7 @@ static int vidioc_enum_frameintervals(struct file *file, void *priv,
static int vidioc_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *parm)
{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -1402,7 +1400,7 @@ static int vidioc_g_parm(struct file *file, void *priv,
static int vidioc_s_parm(struct file *file, void *priv,
struct v4l2_streamparm *parm)
{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
struct v4l2_fract tpf;
if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -1530,7 +1528,7 @@ static int get_num_cameras(struct vchiq_mmal_instance *instance,
static int set_camera_parameters(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_component *camera,
- struct bm2835_mmal_dev *dev)
+ struct bcm2835_mmal_dev *dev)
{
struct mmal_parameter_camera_config cam_config = {
.max_stills_w = dev->max_width,
@@ -1555,7 +1553,7 @@ static int set_camera_parameters(struct vchiq_mmal_instance *instance,
#define MAX_SUPPORTED_ENCODINGS 20
/* MMAL instance and component init */
-static int mmal_init(struct bm2835_mmal_dev *dev)
+static int mmal_init(struct bcm2835_mmal_dev *dev)
{
int ret;
struct mmal_es_format_local *format;
@@ -1735,7 +1733,7 @@ static int mmal_init(struct bm2835_mmal_dev *dev)
&enable,
sizeof(enable));
}
- ret = bm2835_mmal_set_all_camera_controls(dev);
+ ret = bcm2835_mmal_set_all_camera_controls(dev);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "%s: failed to set all camera controls: %d\n",
__func__, ret);
@@ -1769,8 +1767,7 @@ unreg_mmal:
return ret;
}
-static int bm2835_mmal_init_device(struct bm2835_mmal_dev *dev,
- struct video_device *vfd)
+static int bcm2835_mmal_init_device(struct bcm2835_mmal_dev *dev, struct video_device *vfd)
{
int ret;
@@ -1798,7 +1795,7 @@ static int bm2835_mmal_init_device(struct bm2835_mmal_dev *dev,
return 0;
}
-static void bcm2835_cleanup_instance(struct bm2835_mmal_dev *dev)
+static void bcm2835_cleanup_instance(struct bcm2835_mmal_dev *dev)
{
if (!dev)
return;
@@ -1849,7 +1846,7 @@ static struct v4l2_format default_v4l2_format = {
static int bcm2835_mmal_probe(struct platform_device *pdev)
{
int ret;
- struct bm2835_mmal_dev *dev;
+ struct bcm2835_mmal_dev *dev;
struct vb2_queue *q;
int camera;
unsigned int num_cameras;
@@ -1908,7 +1905,7 @@ static int bcm2835_mmal_probe(struct platform_device *pdev)
}
/* setup v4l controls */
- ret = bm2835_mmal_init_controls(dev, &dev->ctrl_handler);
+ ret = bcm2835_mmal_init_controls(dev, &dev->ctrl_handler);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "%s: could not init controls: %d\n",
__func__, ret);
@@ -1931,7 +1928,7 @@ static int bcm2835_mmal_probe(struct platform_device *pdev)
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct vb2_mmal_buffer);
- q->ops = &bm2835_mmal_video_qops;
+ q->ops = &bcm2835_mmal_video_qops;
q->mem_ops = &vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &dev->mutex;
@@ -1940,7 +1937,7 @@ static int bcm2835_mmal_probe(struct platform_device *pdev)
goto unreg_dev;
/* initialise video devices */
- ret = bm2835_mmal_init_device(dev, &dev->vdev);
+ ret = bcm2835_mmal_init_device(dev, &dev->vdev);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "%s: could not init device: %d\n",
__func__, ret);
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h
index 75524adff0f5..0f0c6f7a3764 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
@@ -32,7 +32,7 @@ enum {
extern int bcm2835_v4l2_debug;
-struct bm2835_mmal_dev {
+struct bcm2835_mmal_dev {
/* v4l2 devices */
struct v4l2_device v4l2_dev;
struct video_device vdev;
@@ -110,12 +110,10 @@ struct bm2835_mmal_dev {
unsigned int rgb_bgr_swapped;
};
-int bm2835_mmal_init_controls(
- struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl_handler *hdl);
+int bcm2835_mmal_init_controls(struct bcm2835_mmal_dev *dev, struct v4l2_ctrl_handler *hdl);
-int bm2835_mmal_set_all_camera_controls(struct bm2835_mmal_dev *dev);
-int set_framerate_params(struct bm2835_mmal_dev *dev);
+int bcm2835_mmal_set_all_camera_controls(struct bcm2835_mmal_dev *dev);
+int set_framerate_params(struct bcm2835_mmal_dev *dev);
/* Debug helpers */
diff --git a/drivers/staging/vc04_services/bcm2835-camera/controls.c b/drivers/staging/vc04_services/bcm2835-camera/controls.c
index b096a12387f7..eb722f16fb91 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/controls.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/controls.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
@@ -58,23 +58,16 @@ static const u32 iso_values[] = {
0, 100, 200, 400, 800,
};
-enum bm2835_mmal_ctrl_type {
+enum bcm2835_mmal_ctrl_type {
MMAL_CONTROL_TYPE_STD,
MMAL_CONTROL_TYPE_STD_MENU,
MMAL_CONTROL_TYPE_INT_MENU,
MMAL_CONTROL_TYPE_CLUSTER, /* special cluster entry */
};
-struct bm2835_mmal_v4l2_ctrl;
-
-typedef int(bm2835_mmal_v4l2_ctrl_cb)(
- struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl);
-
-struct bm2835_mmal_v4l2_ctrl {
+struct bcm2835_mmal_v4l2_ctrl {
u32 id; /* v4l2 control identifier */
- enum bm2835_mmal_ctrl_type type;
+ enum bcm2835_mmal_ctrl_type type;
/* control minimum value or
* mask for MMAL_CONTROL_TYPE_STD_MENU
*/
@@ -84,7 +77,8 @@ struct bm2835_mmal_v4l2_ctrl {
u64 step; /* step size of the control */
const s64 *imenu; /* integer menu array */
u32 mmal_id; /* mmal parameter id */
- bm2835_mmal_v4l2_ctrl_cb *setter;
+ int (*setter)(struct bcm2835_mmal_dev *dev, struct v4l2_ctrl *ctrl,
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl);
};
struct v4l2_to_mmal_effects_setting {
@@ -156,9 +150,9 @@ static const struct v4l2_mmal_scene_config scene_configs[] = {
/* control handlers*/
-static int ctrl_set_rational(struct bm2835_mmal_dev *dev,
+static int ctrl_set_rational(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
struct mmal_parameter_rational rational_value;
struct vchiq_mmal_port *control;
@@ -174,9 +168,9 @@ static int ctrl_set_rational(struct bm2835_mmal_dev *dev,
sizeof(rational_value));
}
-static int ctrl_set_value(struct bm2835_mmal_dev *dev,
+static int ctrl_set_value(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
u32 u32_value;
struct vchiq_mmal_port *control;
@@ -190,9 +184,9 @@ static int ctrl_set_value(struct bm2835_mmal_dev *dev,
&u32_value, sizeof(u32_value));
}
-static int ctrl_set_iso(struct bm2835_mmal_dev *dev,
+static int ctrl_set_iso(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
u32 u32_value;
struct vchiq_mmal_port *control;
@@ -218,9 +212,9 @@ static int ctrl_set_iso(struct bm2835_mmal_dev *dev,
&u32_value, sizeof(u32_value));
}
-static int ctrl_set_value_ev(struct bm2835_mmal_dev *dev,
+static int ctrl_set_value_ev(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
s32 s32_value;
struct vchiq_mmal_port *control;
@@ -234,9 +228,9 @@ static int ctrl_set_value_ev(struct bm2835_mmal_dev *dev,
&s32_value, sizeof(s32_value));
}
-static int ctrl_set_rotate(struct bm2835_mmal_dev *dev,
+static int ctrl_set_rotate(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
int ret;
u32 u32_value;
@@ -263,9 +257,9 @@ static int ctrl_set_rotate(struct bm2835_mmal_dev *dev,
&u32_value, sizeof(u32_value));
}
-static int ctrl_set_flip(struct bm2835_mmal_dev *dev,
+static int ctrl_set_flip(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
int ret;
u32 u32_value;
@@ -304,9 +298,9 @@ static int ctrl_set_flip(struct bm2835_mmal_dev *dev,
&u32_value, sizeof(u32_value));
}
-static int ctrl_set_exposure(struct bm2835_mmal_dev *dev,
+static int ctrl_set_exposure(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
enum mmal_parameter_exposuremode exp_mode = dev->exposure_mode_user;
u32 shutter_speed = 0;
@@ -360,9 +354,9 @@ static int ctrl_set_exposure(struct bm2835_mmal_dev *dev,
return ret;
}
-static int ctrl_set_metering_mode(struct bm2835_mmal_dev *dev,
+static int ctrl_set_metering_mode(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
switch (ctrl->val) {
case V4L2_EXPOSURE_METERING_AVERAGE:
@@ -396,9 +390,9 @@ static int ctrl_set_metering_mode(struct bm2835_mmal_dev *dev,
}
}
-static int ctrl_set_flicker_avoidance(struct bm2835_mmal_dev *dev,
+static int ctrl_set_flicker_avoidance(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
u32 u32_value;
struct vchiq_mmal_port *control;
@@ -425,9 +419,9 @@ static int ctrl_set_flicker_avoidance(struct bm2835_mmal_dev *dev,
&u32_value, sizeof(u32_value));
}
-static int ctrl_set_awb_mode(struct bm2835_mmal_dev *dev,
+static int ctrl_set_awb_mode(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
u32 u32_value;
struct vchiq_mmal_port *control;
@@ -481,9 +475,9 @@ static int ctrl_set_awb_mode(struct bm2835_mmal_dev *dev,
&u32_value, sizeof(u32_value));
}
-static int ctrl_set_awb_gains(struct bm2835_mmal_dev *dev,
+static int ctrl_set_awb_gains(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
struct vchiq_mmal_port *control;
struct mmal_parameter_awbgains gains;
@@ -504,9 +498,9 @@ static int ctrl_set_awb_gains(struct bm2835_mmal_dev *dev,
&gains, sizeof(gains));
}
-static int ctrl_set_image_effect(struct bm2835_mmal_dev *dev,
+static int ctrl_set_image_effect(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
int ret = -EINVAL;
int i, j;
@@ -561,9 +555,9 @@ exit:
return (ret == 0 ? 0 : -EINVAL);
}
-static int ctrl_set_colfx(struct bm2835_mmal_dev *dev,
+static int ctrl_set_colfx(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
int ret;
struct vchiq_mmal_port *control;
@@ -585,9 +579,9 @@ static int ctrl_set_colfx(struct bm2835_mmal_dev *dev,
return (ret == 0 ? 0 : -EINVAL);
}
-static int ctrl_set_bitrate(struct bm2835_mmal_dev *dev,
+static int ctrl_set_bitrate(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
int ret;
struct vchiq_mmal_port *encoder_out;
@@ -613,9 +607,9 @@ static int ctrl_set_bitrate(struct bm2835_mmal_dev *dev,
return 0;
}
-static int ctrl_set_bitrate_mode(struct bm2835_mmal_dev *dev,
+static int ctrl_set_bitrate_mode(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
u32 bitrate_mode;
struct vchiq_mmal_port *encoder_out;
@@ -640,9 +634,9 @@ static int ctrl_set_bitrate_mode(struct bm2835_mmal_dev *dev,
return 0;
}
-static int ctrl_set_image_encode_output(struct bm2835_mmal_dev *dev,
+static int ctrl_set_image_encode_output(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
u32 u32_value;
struct vchiq_mmal_port *jpeg_out;
@@ -656,9 +650,9 @@ static int ctrl_set_image_encode_output(struct bm2835_mmal_dev *dev,
&u32_value, sizeof(u32_value));
}
-static int ctrl_set_video_encode_param_output(struct bm2835_mmal_dev *dev,
+static int ctrl_set_video_encode_param_output(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
u32 u32_value;
struct vchiq_mmal_port *vid_enc_ctl;
@@ -672,9 +666,9 @@ static int ctrl_set_video_encode_param_output(struct bm2835_mmal_dev *dev,
&u32_value, sizeof(u32_value));
}
-static int ctrl_set_video_encode_profile_level(struct bm2835_mmal_dev *dev,
+static int ctrl_set_video_encode_profile_level(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
struct mmal_parameter_video_profile param;
int ret = 0;
@@ -783,9 +777,9 @@ static int ctrl_set_video_encode_profile_level(struct bm2835_mmal_dev *dev,
return ret;
}
-static int ctrl_set_scene_mode(struct bm2835_mmal_dev *dev,
+static int ctrl_set_scene_mode(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
int ret = 0;
int shutter_speed;
@@ -890,12 +884,11 @@ static int ctrl_set_scene_mode(struct bm2835_mmal_dev *dev,
return 0;
}
-static int bm2835_mmal_s_ctrl(struct v4l2_ctrl *ctrl)
+static int bcm2835_mmal_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct bm2835_mmal_dev *dev =
- container_of(ctrl->handler, struct bm2835_mmal_dev,
- ctrl_handler);
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl = ctrl->priv;
+ struct bcm2835_mmal_dev *dev = container_of(ctrl->handler, struct bcm2835_mmal_dev,
+ ctrl_handler);
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl = ctrl->priv;
int ret;
if (!mmal_ctrl || mmal_ctrl->id != ctrl->id || !mmal_ctrl->setter) {
@@ -910,11 +903,11 @@ static int bm2835_mmal_s_ctrl(struct v4l2_ctrl *ctrl)
return ret;
}
-static const struct v4l2_ctrl_ops bm2835_mmal_ctrl_ops = {
- .s_ctrl = bm2835_mmal_s_ctrl,
+static const struct v4l2_ctrl_ops bcm2835_mmal_ctrl_ops = {
+ .s_ctrl = bcm2835_mmal_s_ctrl,
};
-static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
+static const struct bcm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
{
.id = V4L2_CID_SATURATION,
.type = MMAL_CONTROL_TYPE_STD,
@@ -1253,7 +1246,7 @@ static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
},
};
-int bm2835_mmal_set_all_camera_controls(struct bm2835_mmal_dev *dev)
+int bcm2835_mmal_set_all_camera_controls(struct bcm2835_mmal_dev *dev)
{
int c;
int ret = 0;
@@ -1273,7 +1266,7 @@ int bm2835_mmal_set_all_camera_controls(struct bm2835_mmal_dev *dev)
return ret;
}
-int set_framerate_params(struct bm2835_mmal_dev *dev)
+int set_framerate_params(struct bcm2835_mmal_dev *dev)
{
struct mmal_parameter_fps_range fps_range;
int ret;
@@ -1318,11 +1311,10 @@ int set_framerate_params(struct bm2835_mmal_dev *dev)
return ret;
}
-int bm2835_mmal_init_controls(struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl_handler *hdl)
+int bcm2835_mmal_init_controls(struct bcm2835_mmal_dev *dev, struct v4l2_ctrl_handler *hdl)
{
int c;
- const struct bm2835_mmal_v4l2_ctrl *ctrl;
+ const struct bcm2835_mmal_v4l2_ctrl *ctrl;
v4l2_ctrl_handler_init(hdl, V4L2_CTRL_COUNT);
@@ -1331,12 +1323,9 @@ int bm2835_mmal_init_controls(struct bm2835_mmal_dev *dev,
switch (ctrl->type) {
case MMAL_CONTROL_TYPE_STD:
- dev->ctrls[c] =
- v4l2_ctrl_new_std(hdl,
- &bm2835_mmal_ctrl_ops,
- ctrl->id, ctrl->min,
- ctrl->max, ctrl->step,
- ctrl->def);
+ dev->ctrls[c] = v4l2_ctrl_new_std(hdl, &bcm2835_mmal_ctrl_ops,
+ ctrl->id, ctrl->min, ctrl->max,
+ ctrl->step, ctrl->def);
break;
case MMAL_CONTROL_TYPE_STD_MENU:
@@ -1360,20 +1349,16 @@ int bm2835_mmal_init_controls(struct bm2835_mmal_dev *dev,
mask = ~mask;
}
- dev->ctrls[c] =
- v4l2_ctrl_new_std_menu(hdl,
- &bm2835_mmal_ctrl_ops,
- ctrl->id, ctrl->max,
- mask, ctrl->def);
+ dev->ctrls[c] = v4l2_ctrl_new_std_menu(hdl, &bcm2835_mmal_ctrl_ops,
+ ctrl->id, ctrl->max, mask,
+ ctrl->def);
break;
}
case MMAL_CONTROL_TYPE_INT_MENU:
- dev->ctrls[c] =
- v4l2_ctrl_new_int_menu(hdl,
- &bm2835_mmal_ctrl_ops,
- ctrl->id, ctrl->max,
- ctrl->def, ctrl->imenu);
+ dev->ctrls[c] = v4l2_ctrl_new_int_menu(hdl, &bcm2835_mmal_ctrl_ops,
+ ctrl->id, ctrl->max,
+ ctrl->def, ctrl->imenu);
break;
case MMAL_CONTROL_TYPE_CLUSTER:
diff --git a/drivers/staging/vc04_services/interface/TODO b/drivers/staging/vc04_services/interface/TODO
index 39810ce017cd..241ca004735c 100644
--- a/drivers/staging/vc04_services/interface/TODO
+++ b/drivers/staging/vc04_services/interface/TODO
@@ -80,11 +80,7 @@ vchiq-core.ko and vchiq-dev.ko. This would also ease the upstreaming process.
The code in vchiq_bcm2835_arm.c should fit in the generic platform file.
-12) Get rid of all the struct typedefs
-
-Most structs are typedefd, it's not encouraged in the kernel.
-
-13) Get rid of all non essential global structures and create a proper per
+11) Get rid of all non essential global structures and create a proper per
device structure
The first thing one generally sees in a probe function is a memory allocation
@@ -92,6 +88,6 @@ for all the device specific data. This structure is then passed all over the
driver. This is good practice since it makes the driver work regardless of the
number of devices probed.
-14) Clean up Sparse warnings from __user annotations. See
+12) Clean up Sparse warnings from __user annotations. See
vchiq_irq_queue_bulk_tx_rx(). Ensure that the address of "&waiter->bulk_waiter"
is never disclosed to userspace.
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index c650a32bcedf..6759a6261500 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -1661,7 +1661,7 @@ vchiq_dump_service_use_state(struct vchiq_state *state)
service_data[i].clientid, service_data[i].use_count,
service_data[i].use_count ? nz : "");
}
- vchiq_log_warning(vchiq_susp_log_level, "----- VCHIQ use count count %d", peer_count);
+ vchiq_log_warning(vchiq_susp_log_level, "----- VCHIQ use count %d", peer_count);
vchiq_log_warning(vchiq_susp_log_level, "--- Overall vchiq instance use count %d",
vc_use_count);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 53a98949b294..55abaf02a196 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -74,8 +74,6 @@
((fourcc) >> 8) & 0xff, \
(fourcc) & 0xff
-static_assert((sizeof(u32) * 8) == 32);
-
#define BITSET_SIZE(b) ((b + 31) >> 5)
#define BITSET_WORD(b) (b >> 5)
#define BITSET_BIT(b) (1 << (b & 31))
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-common.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-common.h
index 5bd7410a034a..b33129403a30 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-common.h
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-common.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-encodings.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-encodings.h
index 2be9941a1f30..e15ae7b24f73 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-encodings.h
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-encodings.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h
index 342c9b670f7e..d77e15f25dda 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h
index a118efd21d98..1e996d8cd283 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-port.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-port.h
index 3fa3f2a578f0..6ee4c1ed7f19 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-port.h
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-port.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg.h
index b636e889c8a1..471413248a14 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg.h
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h
index a1e39b1b1701..2277e05b1e31 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
index 76d3f0399964..cb6cdbfaf6ec 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.h
index 1dc81ecf9268..6006e29232b3 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.h
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 26e08fec6e6a..ee2d145778ed 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -183,7 +183,7 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
unsigned char bySlot = 0;
unsigned char bySIFS = 0;
unsigned char byDIFS = 0;
- unsigned char byData;
+ unsigned char data;
int i;
/* Set SIFS, DIFS, EIFS, SlotTime, CwMin */
@@ -194,15 +194,15 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
priv->abyBBVGA[0] = 0x20;
priv->abyBBVGA[2] = 0x10;
priv->abyBBVGA[3] = 0x10;
- bb_read_embedded(priv, 0xE7, &byData);
- if (byData == 0x1C)
+ bb_read_embedded(priv, 0xE7, &data);
+ if (data == 0x1C)
bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
} else if (priv->byRFType == RF_UW2452) {
MACvSetBBType(priv->port_offset, BB_TYPE_11A);
priv->abyBBVGA[0] = 0x18;
- bb_read_embedded(priv, 0xE7, &byData);
- if (byData == 0x14) {
+ bb_read_embedded(priv, 0xE7, &data);
+ if (data == 0x14) {
bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
bb_write_embedded(priv, 0xE1, 0x57);
}
@@ -220,14 +220,14 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
priv->abyBBVGA[0] = 0x1C;
priv->abyBBVGA[2] = 0x00;
priv->abyBBVGA[3] = 0x00;
- bb_read_embedded(priv, 0xE7, &byData);
- if (byData == 0x20)
+ bb_read_embedded(priv, 0xE7, &data);
+ if (data == 0x20)
bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
} else if (priv->byRFType == RF_UW2452) {
priv->abyBBVGA[0] = 0x14;
- bb_read_embedded(priv, 0xE7, &byData);
- if (byData == 0x18) {
+ bb_read_embedded(priv, 0xE7, &data);
+ if (data == 0x18) {
bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
bb_write_embedded(priv, 0xE1, 0xD3);
}
@@ -243,14 +243,14 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
priv->abyBBVGA[0] = 0x1C;
priv->abyBBVGA[2] = 0x00;
priv->abyBBVGA[3] = 0x00;
- bb_read_embedded(priv, 0xE7, &byData);
- if (byData == 0x20)
+ bb_read_embedded(priv, 0xE7, &data);
+ if (data == 0x20)
bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
} else if (priv->byRFType == RF_UW2452) {
priv->abyBBVGA[0] = 0x14;
- bb_read_embedded(priv, 0xE7, &byData);
- if (byData == 0x18) {
+ bb_read_embedded(priv, 0xE7, &data);
+ if (data == 0x18) {
bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
bb_write_embedded(priv, 0xE1, 0xD3);
}
@@ -404,7 +404,7 @@ bool CARDbSetBeaconPeriod(struct vnt_private *priv,
*/
void CARDbRadioPowerOff(struct vnt_private *priv)
{
- if (priv->bRadioOff)
+ if (priv->radio_off)
return;
switch (priv->byRFType) {
@@ -429,7 +429,7 @@ void CARDbRadioPowerOff(struct vnt_private *priv)
bb_set_deep_sleep(priv, priv->local_id);
- priv->bRadioOff = true;
+ priv->radio_off = true;
pr_debug("chester power off\n");
MACvRegBitsOn(priv->port_offset, MAC_REG_GPIOCTL0,
LED_ACTSET); /* LED issue */
@@ -798,12 +798,12 @@ bool CARDbGetCurrentTSF(struct vnt_private *priv, u64 *pqwCurrTSF)
{
void __iomem *iobase = priv->port_offset;
unsigned short ww;
- unsigned char byData;
+ unsigned char data;
MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TSFCNTRRD);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(iobase + MAC_REG_TFTCTL, &byData);
- if (!(byData & TFTCTL_TSFCNTRRD))
+ VNSvInPortB(iobase + MAC_REG_TFTCTL, &data);
+ if (!(data & TFTCTL_TSFCNTRRD))
break;
}
if (ww == W_MAX_TIMEOUT)
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index b550a1a0844e..e37c8e35a45b 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -189,7 +189,7 @@ bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch)
/* Init Synthesizer Table */
if (priv->bEnablePSMode)
- RFvWriteWakeProgSyn(priv, priv->byRFType, ch->hw_value);
+ rf_write_wake_prog_syn(priv, priv->byRFType, ch->hw_value);
bb_software_reset(priv);
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index 4706bde1ec1d..c272a4ab2fa0 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -128,8 +128,6 @@ struct vnt_private {
u32 memaddr;
u32 ioaddr;
- unsigned char byRxMode;
-
spinlock_t lock;
volatile int iTDUsed[TYPE_MAXTD];
@@ -157,7 +155,7 @@ struct vnt_private {
unsigned char local_id;
unsigned char byRFType;
- unsigned char byMaxPwrLevel;
+ unsigned char max_pwr_level;
unsigned char byZoneType;
bool bZoneRegExist;
unsigned char byOriginalZonetype;
@@ -165,7 +163,7 @@ struct vnt_private {
unsigned char abyCurrentNetAddr[ETH_ALEN]; __aligned(2)
bool bLinkPass; /* link status: OK or fail */
- unsigned int uCurrRSSI;
+ unsigned int current_rssi;
unsigned char byCurrSQ;
unsigned long dwTxAntennaSel;
@@ -221,7 +219,7 @@ struct vnt_private {
bool bBarkerPreambleMd;
bool bRadioControlOff;
- bool bRadioOff;
+ bool radio_off;
bool bEnablePSMode;
unsigned short wListenInterval;
bool bPWBitOn;
@@ -229,7 +227,7 @@ struct vnt_private {
/* GPIO Radio Control */
unsigned char byRadioCtl;
unsigned char byGPIO;
- bool bHWRadioOff;
+ bool hw_radio_off;
bool bPrvActive4RadioOFF;
bool bGPIOBlockRead;
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 212d2a287b2c..897d70cf32b8 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -369,11 +369,11 @@ static void device_init_registers(struct vnt_private *priv)
/* Set Short Slot Time, xIFS, and RSPINF. */
priv->wCurrentRate = RATE_54M;
- priv->bRadioOff = false;
+ priv->radio_off = false;
priv->byRadioCtl = SROMbyReadEmbedded(priv->port_offset,
EEP_OFS_RADIOCTL);
- priv->bHWRadioOff = false;
+ priv->hw_radio_off = false;
if (priv->byRadioCtl & EEP_RADIOCTL_ENABLE) {
/* Get GPIO */
@@ -383,10 +383,10 @@ static void device_init_registers(struct vnt_private *priv)
!(priv->byRadioCtl & EEP_RADIOCTL_INV)) ||
(!(priv->byGPIO & GPIO0_DATA) &&
(priv->byRadioCtl & EEP_RADIOCTL_INV)))
- priv->bHWRadioOff = true;
+ priv->hw_radio_off = true;
}
- if (priv->bHWRadioOff || priv->bRadioControlOff)
+ if (priv->hw_radio_off || priv->bRadioControlOff)
CARDbRadioPowerOff(priv);
/* get Permanent network address */
@@ -980,10 +980,10 @@ static void vnt_check_bb_vga(struct vnt_private *priv)
if (priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
return;
- if (!(priv->vif->bss_conf.assoc && priv->uCurrRSSI))
+ if (!(priv->vif->bss_conf.assoc && priv->current_rssi))
return;
- RFvRSSITodBm(priv, (u8)priv->uCurrRSSI, &dbm);
+ RFvRSSITodBm(priv, (u8)priv->current_rssi, &dbm);
for (i = 0; i < BB_VGA_LEVEL; i++) {
if (dbm < priv->dbm_threshold[i]) {
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index a7d1d35de5d4..c6ed3537f439 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -80,7 +80,7 @@ static bool vnt_rx_data(struct vnt_private *priv, struct sk_buff *skb,
RFvRSSITodBm(priv, *rssi, &rx_dbm);
priv->byBBPreEDRSSI = (u8)rx_dbm + 1;
- priv->uCurrRSSI = *rssi;
+ priv->current_rssi = *rssi;
skb_pull(skb, 4);
skb_trim(skb, frame_size);
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index bc4abe77db7b..ba7056f5a5da 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -609,11 +609,11 @@ bool RFbInit(struct vnt_private *priv)
switch (priv->byRFType) {
case RF_AIROHA:
case RF_AL2230S:
- priv->byMaxPwrLevel = AL2230_PWR_IDX_LEN;
+ priv->max_pwr_level = AL2230_PWR_IDX_LEN;
ret = RFbAL2230Init(priv);
break;
case RF_AIROHA7230:
- priv->byMaxPwrLevel = AL7230_PWR_IDX_LEN;
+ priv->max_pwr_level = AL7230_PWR_IDX_LEN;
ret = s_bAL7230Init(priv);
break;
case RF_NOTHING:
@@ -669,20 +669,22 @@ bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType,
*
* Parameters:
* In:
- * iobase - I/O base address
- * channel - channel number
- * bySleepCnt - SleepProgSyn count
+ * priv - Device Structure
+ * rf_type - RF type
+ * channel - Channel number
*
- * Return Value: None.
+ * Return Value: true if succeeded; false if failed.
*
*/
-bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char rf_type,
- u16 channel)
+bool rf_write_wake_prog_syn(struct vnt_private *priv, unsigned char rf_type,
+ u16 channel)
{
void __iomem *iobase = priv->port_offset;
int i;
unsigned char init_count = 0;
unsigned char sleep_count = 0;
+ unsigned short idx = MISCFIFO_SYNDATA_IDX;
+ const unsigned long *init_table;
VNSvOutPortW(iobase + MAC_REG_MISCFFNDEX, 0);
switch (rf_type) {
@@ -695,15 +697,12 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char rf_type,
/* Init Reg + Channel Reg (2) */
init_count = CB_AL2230_INIT_SEQ + 2;
sleep_count = 0;
- if (init_count > (MISCFIFO_SYNDATASIZE - sleep_count))
- return false;
for (i = 0; i < CB_AL2230_INIT_SEQ; i++)
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al2230_init_table[i]);
+ MACvSetMISCFifo(priv, idx++, al2230_init_table[i]);
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al2230_channel_table0[channel - 1]);
- i++;
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al2230_channel_table1[channel - 1]);
+ MACvSetMISCFifo(priv, idx++, al2230_channel_table0[channel - 1]);
+ MACvSetMISCFifo(priv, idx++, al2230_channel_table1[channel - 1]);
break;
/* Need to check, PLLON need to be low for channel setting */
@@ -711,22 +710,15 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char rf_type,
/* Init Reg + Channel Reg (3) */
init_count = CB_AL7230_INIT_SEQ + 3;
sleep_count = 0;
- if (init_count > (MISCFIFO_SYNDATASIZE - sleep_count))
- return false;
- if (channel <= CB_MAX_CHANNEL_24G) {
- for (i = 0; i < CB_AL7230_INIT_SEQ; i++)
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al7230_init_table[i]);
- } else {
- for (i = 0; i < CB_AL7230_INIT_SEQ; i++)
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al7230_init_table_a_mode[i]);
- }
+ init_table = (channel <= CB_MAX_CHANNEL_24G) ?
+ al7230_init_table : al7230_init_table_a_mode;
+ for (i = 0; i < CB_AL7230_INIT_SEQ; i++)
+ MACvSetMISCFifo(priv, idx++, init_table[i]);
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al7230_channel_table0[channel - 1]);
- i++;
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al7230_channel_table1[channel - 1]);
- i++;
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al7230_channel_table2[channel - 1]);
+ MACvSetMISCFifo(priv, idx++, al7230_channel_table0[channel - 1]);
+ MACvSetMISCFifo(priv, idx++, al7230_channel_table1[channel - 1]);
+ MACvSetMISCFifo(priv, idx++, al7230_channel_table2[channel - 1]);
break;
case RF_NOTHING:
@@ -786,8 +778,8 @@ bool RFbSetPower(struct vnt_private *priv, unsigned int rate, u16 uCH)
else
byDec = byPwr + 10;
- if (byDec >= priv->byMaxPwrLevel)
- byDec = priv->byMaxPwrLevel - 1;
+ if (byDec >= priv->max_pwr_level)
+ byDec = priv->max_pwr_level - 1;
byPwr = byDec;
break;
@@ -829,7 +821,7 @@ bool RFbRawSetPower(struct vnt_private *priv, unsigned char byPwr,
bool ret = true;
unsigned long dwMax7230Pwr = 0;
- if (byPwr >= priv->byMaxPwrLevel)
+ if (byPwr >= priv->max_pwr_level)
return false;
switch (priv->byRFType) {
diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h
index 0939937d47a8..9fef81846a9f 100644
--- a/drivers/staging/vt6655/rf.h
+++ b/drivers/staging/vt6655/rf.h
@@ -60,7 +60,7 @@
bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData);
bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType, u16 byChannel);
bool RFbInit(struct vnt_private *priv);
-bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char rf_type, u16 channel);
+bool rf_write_wake_prog_syn(struct vnt_private *priv, unsigned char rf_type, u16 channel);
bool RFbSetPower(struct vnt_private *priv, unsigned int rate, u16 uCH);
bool RFbRawSetPower(struct vnt_private *priv, unsigned char byPwr,
unsigned int rate);
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index 24ba10d6bd0b..fcf8313870af 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -679,16 +679,8 @@ static int prism2mib_priv(struct mibrec *mib,
HFA384x_RID_CNFWPADATA,
(u8 *)&wpa,
sizeof(wpa));
- /*
- pstr->len = le16_to_cpu(wpa.datalen);
- memcpy(pstr->data, wpa.data, pstr->len);
- */
pstr->len = 0;
} else {
- /*
- wpa.datalen = cpu_to_le16(pstr->len);
- memcpy(wpa.data, pstr->data, pstr->len);
- */
wpa.datalen = 0;
hfa384x_drvr_setconfig(hw,
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 8075f60fd02c..2d5cf1714ae0 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -443,6 +443,9 @@ static bool iscsit_tpg_check_network_portal(
break;
}
spin_unlock(&tpg->tpg_np_lock);
+
+ if (match)
+ break;
}
spin_unlock(&tiqn->tiqn_tpg_lock);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 7fa57fb57bf2..807d06ecadee 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -1005,7 +1005,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
req->timeout = PS_TIMEOUT_OTHER;
scsi_req(req)->retries = PS_RETRY;
- blk_execute_rq_nowait(NULL, req, (cmd->sam_task_attr == TCM_HEAD_TAG),
+ blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG,
pscsi_req_done);
return 0;
diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile
index 66b8a17f14c4..a6eff388d300 100644
--- a/drivers/tee/optee/Makefile
+++ b/drivers/tee/optee/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_OPTEE) += optee.o
optee-objs += core.o
optee-objs += call.o
+optee-objs += notif.o
optee-objs += rpc.o
optee-objs += supp.o
optee-objs += device.o
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 2a66a5203d2f..1ca320885fad 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -157,6 +157,7 @@ void optee_remove_common(struct optee *optee)
/* Unregister OP-TEE specific client devices on TEE bus */
optee_unregister_devices();
+ optee_notif_uninit(optee);
/*
* The two devices have to be unregistered before we can free the
* other resources.
@@ -165,7 +166,6 @@ void optee_remove_common(struct optee *optee)
tee_device_unregister(optee->teedev);
tee_shm_pool_free(optee->pool);
- optee_wait_queue_exit(&optee->wait_queue);
optee_supp_uninit(&optee->supp);
mutex_destroy(&optee->call_queue.mutex);
}
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
index d8c8683863aa..20a1b1a3d965 100644
--- a/drivers/tee/optee/ffa_abi.c
+++ b/drivers/tee/optee/ffa_abi.c
@@ -855,9 +855,13 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
mutex_init(&optee->ffa.mutex);
mutex_init(&optee->call_queue.mutex);
INIT_LIST_HEAD(&optee->call_queue.waiters);
- optee_wait_queue_init(&optee->wait_queue);
optee_supp_init(&optee->supp);
ffa_dev_set_drvdata(ffa_dev, optee);
+ rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE);
+ if (rc) {
+ optee_ffa_remove(ffa_dev);
+ return rc;
+ }
rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
if (rc) {
diff --git a/drivers/tee/optee/notif.c b/drivers/tee/optee/notif.c
new file mode 100644
index 000000000000..a28fa03dcd0e
--- /dev/null
+++ b/drivers/tee/optee/notif.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2021, Linaro Limited
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/tee_drv.h>
+#include "optee_private.h"
+
+struct notif_entry {
+ struct list_head link;
+ struct completion c;
+ u_int key;
+};
+
+static bool have_key(struct optee *optee, u_int key)
+{
+ struct notif_entry *entry;
+
+ list_for_each_entry(entry, &optee->notif.db, link)
+ if (entry->key == key)
+ return true;
+
+ return false;
+}
+
+int optee_notif_wait(struct optee *optee, u_int key)
+{
+ unsigned long flags;
+ struct notif_entry *entry;
+ int rc = 0;
+
+ if (key > optee->notif.max_key)
+ return -EINVAL;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+ init_completion(&entry->c);
+ entry->key = key;
+
+ spin_lock_irqsave(&optee->notif.lock, flags);
+
+ /*
+ * If the bit is already set it means that the key has already
+ * been posted and we must not wait.
+ */
+ if (test_bit(key, optee->notif.bitmap)) {
+ clear_bit(key, optee->notif.bitmap);
+ goto out;
+ }
+
+ /*
+ * Check if someone is already waiting for this key. If there is
+ * it's a programming error.
+ */
+ if (have_key(optee, key)) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ list_add_tail(&entry->link, &optee->notif.db);
+
+ /*
+ * Unlock temporarily and wait for completion.
+ */
+ spin_unlock_irqrestore(&optee->notif.lock, flags);
+ wait_for_completion(&entry->c);
+ spin_lock_irqsave(&optee->notif.lock, flags);
+
+ list_del(&entry->link);
+out:
+ spin_unlock_irqrestore(&optee->notif.lock, flags);
+
+ kfree(entry);
+
+ return rc;
+}
+
+int optee_notif_send(struct optee *optee, u_int key)
+{
+ unsigned long flags;
+ struct notif_entry *entry;
+
+ if (key > optee->notif.max_key)
+ return -EINVAL;
+
+ spin_lock_irqsave(&optee->notif.lock, flags);
+
+ list_for_each_entry(entry, &optee->notif.db, link)
+ if (entry->key == key) {
+ complete(&entry->c);
+ goto out;
+ }
+
+ /* Only set the bit in case there where nobody waiting */
+ set_bit(key, optee->notif.bitmap);
+out:
+ spin_unlock_irqrestore(&optee->notif.lock, flags);
+
+ return 0;
+}
+
+int optee_notif_init(struct optee *optee, u_int max_key)
+{
+ spin_lock_init(&optee->notif.lock);
+ INIT_LIST_HEAD(&optee->notif.db);
+ optee->notif.bitmap = bitmap_zalloc(max_key, GFP_KERNEL);
+ if (!optee->notif.bitmap)
+ return -ENOMEM;
+
+ optee->notif.max_key = max_key;
+
+ return 0;
+}
+
+void optee_notif_uninit(struct optee *optee)
+{
+ kfree(optee->notif.bitmap);
+}
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
index 2422e185d400..70e9cc2ee96b 100644
--- a/drivers/tee/optee/optee_msg.h
+++ b/drivers/tee/optee/optee_msg.h
@@ -318,6 +318,13 @@ struct optee_msg_arg {
* [in] param[0].u.rmem.shm_ref holds shared memory reference
* [in] param[0].u.rmem.offs 0
* [in] param[0].u.rmem.size 0
+ *
+ * OPTEE_MSG_CMD_DO_BOTTOM_HALF does the scheduled bottom half processing
+ * of a driver.
+ *
+ * OPTEE_MSG_CMD_STOP_ASYNC_NOTIF informs secure world that from now is
+ * normal world unable to process asynchronous notifications. Typically
+ * used when the driver is shut down.
*/
#define OPTEE_MSG_CMD_OPEN_SESSION 0
#define OPTEE_MSG_CMD_INVOKE_COMMAND 1
@@ -325,6 +332,8 @@ struct optee_msg_arg {
#define OPTEE_MSG_CMD_CANCEL 3
#define OPTEE_MSG_CMD_REGISTER_SHM 4
#define OPTEE_MSG_CMD_UNREGISTER_SHM 5
+#define OPTEE_MSG_CMD_DO_BOTTOM_HALF 6
+#define OPTEE_MSG_CMD_STOP_ASYNC_NOTIF 7
#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004
#endif /* _OPTEE_MSG_H */
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index 6660e05298db..46f74ab07c7e 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -28,6 +28,13 @@
#define TEEC_ORIGIN_COMMS 0x00000002
+/*
+ * This value should be larger than the number threads in secure world to
+ * meet the need from secure world. The number of threads in secure world
+ * are usually not even close to 255 so we should be safe for now.
+ */
+#define OPTEE_DEFAULT_MAX_NOTIF_VALUE 255
+
typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long,
@@ -44,10 +51,13 @@ struct optee_call_queue {
struct list_head waiters;
};
-struct optee_wait_queue {
- /* Serializes access to this struct */
- struct mutex mu;
+struct optee_notif {
+ u_int max_key;
+ struct tee_context *ctx;
+ /* Serializes access to the elements below in this struct */
+ spinlock_t lock;
struct list_head db;
+ u_long *bitmap;
};
/**
@@ -79,6 +89,7 @@ struct optee_smc {
optee_invoke_fn *invoke_fn;
void *memremaped_shm;
u32 sec_caps;
+ unsigned int notif_irq;
};
/**
@@ -129,8 +140,7 @@ struct optee_ops {
* @smc: specific to SMC ABI
* @ffa: specific to FF-A ABI
* @call_queue: queue of threads waiting to call @invoke_fn
- * @wait_queue: queue of threads from secure world waiting for a
- * secure world sync object
+ * @notif: notification synchronization struct
* @supp: supplicant synchronization struct for RPC to supplicant
* @pool: shared memory pool
* @rpc_arg_count: If > 0 number of RPC parameters to make room for
@@ -147,7 +157,7 @@ struct optee {
struct optee_ffa ffa;
};
struct optee_call_queue call_queue;
- struct optee_wait_queue wait_queue;
+ struct optee_notif notif;
struct optee_supp supp;
struct tee_shm_pool *pool;
unsigned int rpc_arg_count;
@@ -185,8 +195,10 @@ struct optee_call_ctx {
size_t num_entries;
};
-void optee_wait_queue_init(struct optee_wait_queue *wq);
-void optee_wait_queue_exit(struct optee_wait_queue *wq);
+int optee_notif_init(struct optee *optee, u_int max_key);
+void optee_notif_uninit(struct optee *optee);
+int optee_notif_wait(struct optee *optee, u_int key);
+int optee_notif_send(struct optee *optee, u_int key);
u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
struct tee_param *param);
diff --git a/drivers/tee/optee/optee_rpc_cmd.h b/drivers/tee/optee/optee_rpc_cmd.h
index b8275140cef8..f3f06e0994a7 100644
--- a/drivers/tee/optee/optee_rpc_cmd.h
+++ b/drivers/tee/optee/optee_rpc_cmd.h
@@ -28,24 +28,27 @@
#define OPTEE_RPC_CMD_GET_TIME 3
/*
- * Wait queue primitive, helper for secure world to implement a wait queue.
+ * Notification from/to secure world.
*
- * If secure world needs to wait for a secure world mutex it issues a sleep
- * request instead of spinning in secure world. Conversely is a wakeup
- * request issued when a secure world mutex with a thread waiting thread is
- * unlocked.
+ * If secure world needs to wait for something, for instance a mutex, it
+ * does a notification wait request instead of spinning in secure world.
+ * Conversely can a synchronous notification can be sent when a secure
+ * world mutex with a thread waiting thread is unlocked.
*
- * Waiting on a key
- * [in] value[0].a OPTEE_RPC_WAIT_QUEUE_SLEEP
- * [in] value[0].b Wait key
+ * This interface can also be used to wait for a asynchronous notification
+ * which instead is sent via a non-secure interrupt.
*
- * Waking up a key
- * [in] value[0].a OPTEE_RPC_WAIT_QUEUE_WAKEUP
- * [in] value[0].b Wakeup key
+ * Waiting on notification
+ * [in] value[0].a OPTEE_RPC_NOTIFICATION_WAIT
+ * [in] value[0].b notification value
+ *
+ * Sending a synchronous notification
+ * [in] value[0].a OPTEE_RPC_NOTIFICATION_SEND
+ * [in] value[0].b notification value
*/
-#define OPTEE_RPC_CMD_WAIT_QUEUE 4
-#define OPTEE_RPC_WAIT_QUEUE_SLEEP 0
-#define OPTEE_RPC_WAIT_QUEUE_WAKEUP 1
+#define OPTEE_RPC_CMD_NOTIFICATION 4
+#define OPTEE_RPC_NOTIFICATION_WAIT 0
+#define OPTEE_RPC_NOTIFICATION_SEND 1
/*
* Suspend execution
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
index 80eb763a8a80..d44a6ae994f8 100644
--- a/drivers/tee/optee/optee_smc.h
+++ b/drivers/tee/optee/optee_smc.h
@@ -107,6 +107,12 @@ struct optee_smc_call_get_os_revision_result {
/*
* Call with struct optee_msg_arg as argument
*
+ * When calling this function normal world has a few responsibilities:
+ * 1. It must be able to handle eventual RPCs
+ * 2. Non-secure interrupts should not be masked
+ * 3. If asynchronous notifications has been negotiated successfully, then
+ * asynchronous notifications should be unmasked during this call.
+ *
* Call register usage:
* a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG
* a1 Upper 32 bits of a 64-bit physical pointer to a struct optee_msg_arg
@@ -195,7 +201,8 @@ struct optee_smc_get_shm_config_result {
* Normal return register usage:
* a0 OPTEE_SMC_RETURN_OK
* a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
- * a2-7 Preserved
+ * a2 The maximum secure world notification number
+ * a3-7 Preserved
*
* Error return register usage:
* a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world
@@ -218,6 +225,8 @@ struct optee_smc_get_shm_config_result {
#define OPTEE_SMC_SEC_CAP_VIRTUALIZATION BIT(3)
/* Secure world supports Shared Memory with a NULL reference */
#define OPTEE_SMC_SEC_CAP_MEMREF_NULL BIT(4)
+/* Secure world supports asynchronous notification of normal world */
+#define OPTEE_SMC_SEC_CAP_ASYNC_NOTIF BIT(5)
#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
@@ -226,8 +235,8 @@ struct optee_smc_get_shm_config_result {
struct optee_smc_exchange_capabilities_result {
unsigned long status;
unsigned long capabilities;
+ unsigned long max_notif_value;
unsigned long reserved0;
- unsigned long reserved1;
};
/*
@@ -320,6 +329,68 @@ struct optee_smc_disable_shm_cache_result {
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_THREAD_COUNT)
/*
+ * Inform OP-TEE that normal world is able to receive asynchronous
+ * notifications.
+ *
+ * Call requests usage:
+ * a0 SMC Function ID, OPTEE_SMC_ENABLE_ASYNC_NOTIF
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1-7 Preserved
+ *
+ * Not supported return register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_ENABLE_ASYNC_NOTIF 16
+#define OPTEE_SMC_ENABLE_ASYNC_NOTIF \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_ASYNC_NOTIF)
+
+/*
+ * Retrieve a value of notifications pending since the last call of this
+ * function.
+ *
+ * OP-TEE keeps a record of all posted values. When an interrupt is
+ * received which indicates that there are posted values this function
+ * should be called until all pended values have been retrieved. When a
+ * value is retrieved, it's cleared from the record in secure world.
+ *
+ * Call requests usage:
+ * a0 SMC Function ID, OPTEE_SMC_GET_ASYNC_NOTIF_VALUE
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 value
+ * a2 Bit[0]: OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID if the value in a1 is
+ * valid, else 0 if no values where pending
+ * a2 Bit[1]: OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING if another value is
+ * pending, else 0.
+ * Bit[31:2]: MBZ
+ * a3-7 Preserved
+ *
+ * Not supported return register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID BIT(0)
+#define OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING BIT(1)
+
+/*
+ * Notification that OP-TEE expects a yielding call to do some bottom half
+ * work in a driver.
+ */
+#define OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF 0
+
+#define OPTEE_SMC_FUNCID_GET_ASYNC_NOTIF_VALUE 17
+#define OPTEE_SMC_GET_ASYNC_NOTIF_VALUE \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_ASYNC_NOTIF_VALUE)
+
+/*
* Resume from RPC (for example after processing a foreign interrupt)
*
* Call register usage:
diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
index cd642e340eaf..e69bc6380683 100644
--- a/drivers/tee/optee/rpc.c
+++ b/drivers/tee/optee/rpc.c
@@ -12,23 +12,6 @@
#include "optee_private.h"
#include "optee_rpc_cmd.h"
-struct wq_entry {
- struct list_head link;
- struct completion c;
- u32 key;
-};
-
-void optee_wait_queue_init(struct optee_wait_queue *priv)
-{
- mutex_init(&priv->mu);
- INIT_LIST_HEAD(&priv->db);
-}
-
-void optee_wait_queue_exit(struct optee_wait_queue *priv)
-{
- mutex_destroy(&priv->mu);
-}
-
static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg)
{
struct timespec64 ts;
@@ -144,48 +127,6 @@ static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
}
#endif
-static struct wq_entry *wq_entry_get(struct optee_wait_queue *wq, u32 key)
-{
- struct wq_entry *w;
-
- mutex_lock(&wq->mu);
-
- list_for_each_entry(w, &wq->db, link)
- if (w->key == key)
- goto out;
-
- w = kmalloc(sizeof(*w), GFP_KERNEL);
- if (w) {
- init_completion(&w->c);
- w->key = key;
- list_add_tail(&w->link, &wq->db);
- }
-out:
- mutex_unlock(&wq->mu);
- return w;
-}
-
-static void wq_sleep(struct optee_wait_queue *wq, u32 key)
-{
- struct wq_entry *w = wq_entry_get(wq, key);
-
- if (w) {
- wait_for_completion(&w->c);
- mutex_lock(&wq->mu);
- list_del(&w->link);
- mutex_unlock(&wq->mu);
- kfree(w);
- }
-}
-
-static void wq_wakeup(struct optee_wait_queue *wq, u32 key)
-{
- struct wq_entry *w = wq_entry_get(wq, key);
-
- if (w)
- complete(&w->c);
-}
-
static void handle_rpc_func_cmd_wq(struct optee *optee,
struct optee_msg_arg *arg)
{
@@ -197,11 +138,13 @@ static void handle_rpc_func_cmd_wq(struct optee *optee,
goto bad;
switch (arg->params[0].u.value.a) {
- case OPTEE_RPC_WAIT_QUEUE_SLEEP:
- wq_sleep(&optee->wait_queue, arg->params[0].u.value.b);
+ case OPTEE_RPC_NOTIFICATION_WAIT:
+ if (optee_notif_wait(optee, arg->params[0].u.value.b))
+ goto bad;
break;
- case OPTEE_RPC_WAIT_QUEUE_WAKEUP:
- wq_wakeup(&optee->wait_queue, arg->params[0].u.value.b);
+ case OPTEE_RPC_NOTIFICATION_SEND:
+ if (optee_notif_send(optee, arg->params[0].u.value.b))
+ goto bad;
break;
default:
goto bad;
@@ -319,7 +262,7 @@ void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee,
case OPTEE_RPC_CMD_GET_TIME:
handle_rpc_func_cmd_get_time(arg);
break;
- case OPTEE_RPC_CMD_WAIT_QUEUE:
+ case OPTEE_RPC_CMD_NOTIFICATION:
handle_rpc_func_cmd_wq(optee, arg);
break;
case OPTEE_RPC_CMD_SUSPEND:
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index cf2e3293567d..449d6a72d289 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -8,13 +8,16 @@
#include <linux/arm-smccc.h>
#include <linux/errno.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/sched.h>
+#include <linux/irqdomain.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/tee_drv.h>
@@ -35,7 +38,8 @@
* 2. Low level support functions to register shared memory in secure world
* 3. Dynamic shared memory pool based on alloc_pages()
* 4. Do a normal scheduled call into secure world
- * 5. Driver initialization.
+ * 5. Asynchronous notification
+ * 6. Driver initialization.
*/
#define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
@@ -877,10 +881,137 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx,
return rc;
}
+static int simple_call_with_arg(struct tee_context *ctx, u32 cmd)
+{
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+
+ shm = optee_get_msg_arg(ctx, 0, &msg_arg);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ msg_arg->cmd = cmd;
+ optee_smc_do_call_with_arg(ctx, shm);
+
+ tee_shm_free(shm);
+ return 0;
+}
+
+static int optee_smc_do_bottom_half(struct tee_context *ctx)
+{
+ return simple_call_with_arg(ctx, OPTEE_MSG_CMD_DO_BOTTOM_HALF);
+}
+
+static int optee_smc_stop_async_notif(struct tee_context *ctx)
+{
+ return simple_call_with_arg(ctx, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF);
+}
+
/*
- * 5. Driver initialization
+ * 5. Asynchronous notification
+ */
+
+static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid,
+ bool *value_pending)
+{
+ struct arm_smccc_res res;
+
+ invoke_fn(OPTEE_SMC_GET_ASYNC_NOTIF_VALUE, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0)
+ return 0;
+ *value_valid = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID);
+ *value_pending = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING);
+ return res.a1;
+}
+
+static irqreturn_t notif_irq_handler(int irq, void *dev_id)
+{
+ struct optee *optee = dev_id;
+ bool do_bottom_half = false;
+ bool value_valid;
+ bool value_pending;
+ u32 value;
+
+ do {
+ value = get_async_notif_value(optee->smc.invoke_fn,
+ &value_valid, &value_pending);
+ if (!value_valid)
+ break;
+
+ if (value == OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF)
+ do_bottom_half = true;
+ else
+ optee_notif_send(optee, value);
+ } while (value_pending);
+
+ if (do_bottom_half)
+ return IRQ_WAKE_THREAD;
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
+{
+ struct optee *optee = dev_id;
+
+ optee_smc_do_bottom_half(optee->notif.ctx);
+
+ return IRQ_HANDLED;
+}
+
+static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
+{
+ struct tee_context *ctx;
+ int rc;
+
+ ctx = teedev_open(optee->teedev);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ optee->notif.ctx = ctx;
+ rc = request_threaded_irq(irq, notif_irq_handler,
+ notif_irq_thread_fn,
+ 0, "optee_notification", optee);
+ if (rc)
+ goto err_close_ctx;
+
+ optee->smc.notif_irq = irq;
+
+ return 0;
+
+err_close_ctx:
+ teedev_close_context(optee->notif.ctx);
+ optee->notif.ctx = NULL;
+
+ return rc;
+}
+
+static void optee_smc_notif_uninit_irq(struct optee *optee)
+{
+ if (optee->notif.ctx) {
+ optee_smc_stop_async_notif(optee->notif.ctx);
+ if (optee->smc.notif_irq) {
+ free_irq(optee->smc.notif_irq, optee);
+ irq_dispose_mapping(optee->smc.notif_irq);
+ }
+
+ /*
+ * The thread normally working with optee->notif.ctx was
+ * stopped with free_irq() above.
+ *
+ * Note we're not using teedev_close_context() or
+ * tee_client_close_context() since we have already called
+ * tee_device_put() while initializing to avoid a circular
+ * reference counting.
+ */
+ teedev_close_context(optee->notif.ctx);
+ }
+}
+
+/*
+ * 6. Driver initialization
*
- * During driver inititialization is secure world probed to find out which
+ * During driver initialization is secure world probed to find out which
* features it supports so the driver can be initialized with a matching
* configuration. This involves for instance support for dynamic shared
* memory instead of a static memory carvout.
@@ -952,6 +1083,17 @@ static const struct optee_ops optee_ops = {
.from_msg_param = optee_from_msg_param,
};
+static int enable_async_notif(optee_invoke_fn *invoke_fn)
+{
+ struct arm_smccc_res res;
+
+ invoke_fn(OPTEE_SMC_ENABLE_ASYNC_NOTIF, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0)
+ return -EINVAL;
+ return 0;
+}
+
static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
{
struct arm_smccc_res res;
@@ -1001,7 +1143,7 @@ static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
}
static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
- u32 *sec_caps)
+ u32 *sec_caps, u32 *max_notif_value)
{
union {
struct arm_smccc_res smccc;
@@ -1024,6 +1166,11 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
return false;
*sec_caps = res.result.capabilities;
+ if (*sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF)
+ *max_notif_value = res.result.max_notif_value;
+ else
+ *max_notif_value = OPTEE_DEFAULT_MAX_NOTIF_VALUE;
+
return true;
}
@@ -1188,6 +1335,8 @@ static int optee_smc_remove(struct platform_device *pdev)
*/
optee_disable_shm_cache(optee);
+ optee_smc_notif_uninit_irq(optee);
+
optee_remove_common(optee);
if (optee->smc.memremaped_shm)
@@ -1217,6 +1366,7 @@ static int optee_probe(struct platform_device *pdev)
struct optee *optee = NULL;
void *memremaped_shm = NULL;
struct tee_device *teedev;
+ u32 max_notif_value;
u32 sec_caps;
int rc;
@@ -1236,7 +1386,8 @@ static int optee_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
+ if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps,
+ &max_notif_value)) {
pr_warn("capabilities mismatch\n");
return -EINVAL;
}
@@ -1259,7 +1410,7 @@ static int optee_probe(struct platform_device *pdev)
optee = kzalloc(sizeof(*optee), GFP_KERNEL);
if (!optee) {
rc = -ENOMEM;
- goto err;
+ goto err_free_pool;
}
optee->ops = &optee_ops;
@@ -1269,32 +1420,55 @@ static int optee_probe(struct platform_device *pdev)
teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
if (IS_ERR(teedev)) {
rc = PTR_ERR(teedev);
- goto err;
+ goto err_free_optee;
}
optee->teedev = teedev;
teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
if (IS_ERR(teedev)) {
rc = PTR_ERR(teedev);
- goto err;
+ goto err_unreg_teedev;
}
optee->supp_teedev = teedev;
rc = tee_device_register(optee->teedev);
if (rc)
- goto err;
+ goto err_unreg_supp_teedev;
rc = tee_device_register(optee->supp_teedev);
if (rc)
- goto err;
+ goto err_unreg_supp_teedev;
mutex_init(&optee->call_queue.mutex);
INIT_LIST_HEAD(&optee->call_queue.waiters);
- optee_wait_queue_init(&optee->wait_queue);
optee_supp_init(&optee->supp);
optee->smc.memremaped_shm = memremaped_shm;
optee->pool = pool;
+ platform_set_drvdata(pdev, optee);
+ rc = optee_notif_init(optee, max_notif_value);
+ if (rc)
+ goto err_supp_uninit;
+
+ if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
+ unsigned int irq;
+
+ rc = platform_get_irq(pdev, 0);
+ if (rc < 0) {
+ pr_err("platform_get_irq: ret %d\n", rc);
+ goto err_notif_uninit;
+ }
+ irq = rc;
+
+ rc = optee_smc_notif_init_irq(optee, irq);
+ if (rc) {
+ irq_dispose_mapping(irq);
+ goto err_notif_uninit;
+ }
+ enable_async_notif(optee->smc.invoke_fn);
+ pr_info("Asynchronous notifications enabled\n");
+ }
+
/*
* Ensure that there are no pre-existing shm objects before enabling
* the shm cache so that there's no chance of receiving an invalid
@@ -1309,29 +1483,30 @@ static int optee_probe(struct platform_device *pdev)
if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
pr_info("dynamic shared memory is enabled\n");
- platform_set_drvdata(pdev, optee);
-
rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
- if (rc) {
- optee_smc_remove(pdev);
- return rc;
- }
+ if (rc)
+ goto err_disable_shm_cache;
pr_info("initialized driver\n");
return 0;
-err:
- if (optee) {
- /*
- * tee_device_unregister() is safe to call even if the
- * devices hasn't been registered with
- * tee_device_register() yet.
- */
- tee_device_unregister(optee->supp_teedev);
- tee_device_unregister(optee->teedev);
- kfree(optee);
- }
- if (pool)
- tee_shm_pool_free(pool);
+
+err_disable_shm_cache:
+ optee_disable_shm_cache(optee);
+ optee_smc_notif_uninit_irq(optee);
+ optee_unregister_devices();
+err_notif_uninit:
+ optee_notif_uninit(optee);
+err_supp_uninit:
+ optee_supp_uninit(&optee->supp);
+ mutex_destroy(&optee->call_queue.mutex);
+err_unreg_supp_teedev:
+ tee_device_unregister(optee->supp_teedev);
+err_unreg_teedev:
+ tee_device_unregister(optee->teedev);
+err_free_optee:
+ kfree(optee);
+err_free_pool:
+ tee_shm_pool_free(pool);
if (memremaped_shm)
memunmap(memremaped_shm);
return rc;
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 2b37bc408fc3..3fc426dad2df 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -43,7 +43,7 @@ static DEFINE_SPINLOCK(driver_lock);
static struct class *tee_class;
static dev_t tee_devt;
-static struct tee_context *teedev_open(struct tee_device *teedev)
+struct tee_context *teedev_open(struct tee_device *teedev)
{
int rc;
struct tee_context *ctx;
@@ -70,6 +70,7 @@ err:
return ERR_PTR(rc);
}
+EXPORT_SYMBOL_GPL(teedev_open);
void teedev_ctx_get(struct tee_context *ctx)
{
@@ -96,11 +97,14 @@ void teedev_ctx_put(struct tee_context *ctx)
kref_put(&ctx->refcount, teedev_ctx_release);
}
-static void teedev_close_context(struct tee_context *ctx)
+void teedev_close_context(struct tee_context *ctx)
{
- tee_device_put(ctx->teedev);
+ struct tee_device *teedev = ctx->teedev;
+
teedev_ctx_put(ctx);
+ tee_device_put(teedev);
}
+EXPORT_SYMBOL_GPL(teedev_close_context);
static int tee_open(struct inode *inode, struct file *filp)
{
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index d7f44deab5b1..e37691e0bf20 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -354,6 +354,15 @@ config RCAR_GEN3_THERMAL
Enable this to plug the R-Car Gen3 or RZ/G2 thermal sensor driver into
the Linux thermal framework.
+config RZG2L_THERMAL
+ tristate "Renesas RZ/G2L thermal driver"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on OF
+ help
+ Enable this to plug the RZ/G2L thermal sensor driver into the Linux
+ thermal framework.
+
config KIRKWOOD_THERMAL
tristate "Temperature sensor on Marvell Kirkwood SoCs"
depends on MACH_KIRKWOOD || COMPILE_TEST
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 82fc3e616e54..f0c36a1530d5 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_SUN8I_THERMAL) += sun8i_thermal.o
obj-$(CONFIG_ROCKCHIP_THERMAL) += rockchip_thermal.o
obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o
obj-$(CONFIG_RCAR_GEN3_THERMAL) += rcar_gen3_thermal.o
+obj-$(CONFIG_RZG2L_THERMAL) += rzg2l_thermal.o
obj-$(CONFIG_KIRKWOOD_THERMAL) += kirkwood_thermal.o
obj-y += samsung/
obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o
diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
index 43b1ae8a7789..0bfb8eebd126 100644
--- a/drivers/thermal/cpufreq_cooling.c
+++ b/drivers/thermal/cpufreq_cooling.c
@@ -462,7 +462,6 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
struct cpumask *cpus;
unsigned int frequency;
- unsigned long max_capacity, capacity;
int ret;
/* Request state should be less than max_level */
@@ -479,10 +478,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
if (ret >= 0) {
cpufreq_cdev->cpufreq_state = state;
cpus = cpufreq_cdev->policy->related_cpus;
- max_capacity = arch_scale_cpu_capacity(cpumask_first(cpus));
- capacity = frequency * max_capacity;
- capacity /= cpufreq_cdev->policy->cpuinfo.max_freq;
- arch_set_thermal_pressure(cpus, max_capacity - capacity);
+ arch_update_thermal_pressure(cpus, frequency);
ret = 0;
}
diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c
index 7442e013738f..af666bd9e8d4 100644
--- a/drivers/thermal/imx8mm_thermal.c
+++ b/drivers/thermal/imx8mm_thermal.c
@@ -21,6 +21,7 @@
#define TPS 0x4
#define TRITSR 0x20 /* TMU immediate temp */
+#define TER_ADC_PD BIT(30)
#define TER_EN BIT(31)
#define TRITSR_TEMP0_VAL_MASK 0xff
#define TRITSR_TEMP1_VAL_MASK 0xff0000
@@ -113,6 +114,8 @@ static void imx8mm_tmu_enable(struct imx8mm_tmu *tmu, bool enable)
val = readl_relaxed(tmu->base + TER);
val = enable ? (val | TER_EN) : (val & ~TER_EN);
+ if (tmu->socdata->version == TMU_VER2)
+ val = enable ? (val & ~TER_ADC_PD) : (val | TER_ADC_PD);
writel_relaxed(val, tmu->base + TER);
}
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 2c7473d86a59..16663373b682 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -15,6 +15,7 @@
#include <linux/regmap.h>
#include <linux/thermal.h>
#include <linux/nvmem-consumer.h>
+#include <linux/pm_runtime.h>
#define REG_SET 0x4
#define REG_CLR 0x8
@@ -194,6 +195,7 @@ static struct thermal_soc_data thermal_imx7d_data = {
};
struct imx_thermal_data {
+ struct device *dev;
struct cpufreq_policy *policy;
struct thermal_zone_device *tz;
struct thermal_cooling_device *cdev;
@@ -252,44 +254,15 @@ static int imx_get_temp(struct thermal_zone_device *tz, int *temp)
const struct thermal_soc_data *soc_data = data->socdata;
struct regmap *map = data->tempmon;
unsigned int n_meas;
- bool wait, run_measurement;
u32 val;
+ int ret;
- run_measurement = !data->irq_enabled;
- if (!run_measurement) {
- /* Check if a measurement is currently in progress */
- regmap_read(map, soc_data->temp_data, &val);
- wait = !(val & soc_data->temp_valid_mask);
- } else {
- /*
- * Every time we measure the temperature, we will power on the
- * temperature sensor, enable measurements, take a reading,
- * disable measurements, power off the temperature sensor.
- */
- regmap_write(map, soc_data->sensor_ctrl + REG_CLR,
- soc_data->power_down_mask);
- regmap_write(map, soc_data->sensor_ctrl + REG_SET,
- soc_data->measure_temp_mask);
-
- wait = true;
- }
-
- /*
- * According to the temp sensor designers, it may require up to ~17us
- * to complete a measurement.
- */
- if (wait)
- usleep_range(20, 50);
+ ret = pm_runtime_resume_and_get(data->dev);
+ if (ret < 0)
+ return ret;
regmap_read(map, soc_data->temp_data, &val);
- if (run_measurement) {
- regmap_write(map, soc_data->sensor_ctrl + REG_CLR,
- soc_data->measure_temp_mask);
- regmap_write(map, soc_data->sensor_ctrl + REG_SET,
- soc_data->power_down_mask);
- }
-
if ((val & soc_data->temp_valid_mask) == 0) {
dev_dbg(&tz->device, "temp measurement never finished\n");
return -EAGAIN;
@@ -328,6 +301,8 @@ static int imx_get_temp(struct thermal_zone_device *tz, int *temp)
enable_irq(data->irq);
}
+ pm_runtime_put(data->dev);
+
return 0;
}
@@ -335,24 +310,16 @@ static int imx_change_mode(struct thermal_zone_device *tz,
enum thermal_device_mode mode)
{
struct imx_thermal_data *data = tz->devdata;
- struct regmap *map = data->tempmon;
- const struct thermal_soc_data *soc_data = data->socdata;
if (mode == THERMAL_DEVICE_ENABLED) {
- regmap_write(map, soc_data->sensor_ctrl + REG_CLR,
- soc_data->power_down_mask);
- regmap_write(map, soc_data->sensor_ctrl + REG_SET,
- soc_data->measure_temp_mask);
+ pm_runtime_get(data->dev);
if (!data->irq_enabled) {
data->irq_enabled = true;
enable_irq(data->irq);
}
} else {
- regmap_write(map, soc_data->sensor_ctrl + REG_CLR,
- soc_data->measure_temp_mask);
- regmap_write(map, soc_data->sensor_ctrl + REG_SET,
- soc_data->power_down_mask);
+ pm_runtime_put(data->dev);
if (data->irq_enabled) {
disable_irq(data->irq);
@@ -393,6 +360,11 @@ static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip,
int temp)
{
struct imx_thermal_data *data = tz->devdata;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(data->dev);
+ if (ret < 0)
+ return ret;
/* do not allow changing critical threshold */
if (trip == IMX_TRIP_CRITICAL)
@@ -406,6 +378,8 @@ static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip,
imx_set_alarm_temp(data, temp);
+ pm_runtime_put(data->dev);
+
return 0;
}
@@ -681,6 +655,8 @@ static int imx_thermal_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
+ data->dev = &pdev->dev;
+
map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "fsl,tempmon");
if (IS_ERR(map)) {
ret = PTR_ERR(map);
@@ -800,6 +776,16 @@ static int imx_thermal_probe(struct platform_device *pdev)
data->socdata->power_down_mask);
regmap_write(map, data->socdata->sensor_ctrl + REG_SET,
data->socdata->measure_temp_mask);
+ /* After power up, we need a delay before first access can be done. */
+ usleep_range(20, 50);
+
+ /* the core was configured and enabled just before */
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(data->dev);
+
+ ret = pm_runtime_resume_and_get(data->dev);
+ if (ret < 0)
+ goto disable_runtime_pm;
data->irq_enabled = true;
ret = thermal_zone_device_enable(data->tz);
@@ -814,10 +800,15 @@ static int imx_thermal_probe(struct platform_device *pdev)
goto thermal_zone_unregister;
}
+ pm_runtime_put(data->dev);
+
return 0;
thermal_zone_unregister:
thermal_zone_device_unregister(data->tz);
+disable_runtime_pm:
+ pm_runtime_put_noidle(data->dev);
+ pm_runtime_disable(data->dev);
clk_disable:
clk_disable_unprepare(data->thermal_clk);
legacy_cleanup:
@@ -829,13 +820,9 @@ legacy_cleanup:
static int imx_thermal_remove(struct platform_device *pdev)
{
struct imx_thermal_data *data = platform_get_drvdata(pdev);
- struct regmap *map = data->tempmon;
- /* Disable measurements */
- regmap_write(map, data->socdata->sensor_ctrl + REG_SET,
- data->socdata->power_down_mask);
- if (!IS_ERR(data->thermal_clk))
- clk_disable_unprepare(data->thermal_clk);
+ pm_runtime_put_noidle(data->dev);
+ pm_runtime_disable(data->dev);
thermal_zone_device_unregister(data->tz);
imx_thermal_unregister_legacy_cooling(data);
@@ -858,29 +845,79 @@ static int __maybe_unused imx_thermal_suspend(struct device *dev)
ret = thermal_zone_device_disable(data->tz);
if (ret)
return ret;
+
+ return pm_runtime_force_suspend(data->dev);
+}
+
+static int __maybe_unused imx_thermal_resume(struct device *dev)
+{
+ struct imx_thermal_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_resume(data->dev);
+ if (ret)
+ return ret;
+ /* Enabled thermal sensor after resume */
+ return thermal_zone_device_enable(data->tz);
+}
+
+static int __maybe_unused imx_thermal_runtime_suspend(struct device *dev)
+{
+ struct imx_thermal_data *data = dev_get_drvdata(dev);
+ const struct thermal_soc_data *socdata = data->socdata;
+ struct regmap *map = data->tempmon;
+ int ret;
+
+ ret = regmap_write(map, socdata->sensor_ctrl + REG_CLR,
+ socdata->measure_temp_mask);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(map, socdata->sensor_ctrl + REG_SET,
+ socdata->power_down_mask);
+ if (ret)
+ return ret;
+
clk_disable_unprepare(data->thermal_clk);
return 0;
}
-static int __maybe_unused imx_thermal_resume(struct device *dev)
+static int __maybe_unused imx_thermal_runtime_resume(struct device *dev)
{
struct imx_thermal_data *data = dev_get_drvdata(dev);
+ const struct thermal_soc_data *socdata = data->socdata;
+ struct regmap *map = data->tempmon;
int ret;
ret = clk_prepare_enable(data->thermal_clk);
if (ret)
return ret;
- /* Enabled thermal sensor after resume */
- ret = thermal_zone_device_enable(data->tz);
+
+ ret = regmap_write(map, socdata->sensor_ctrl + REG_CLR,
+ socdata->power_down_mask);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(map, socdata->sensor_ctrl + REG_SET,
+ socdata->measure_temp_mask);
if (ret)
return ret;
+ /*
+ * According to the temp sensor designers, it may require up to ~17us
+ * to complete a measurement.
+ */
+ usleep_range(20, 50);
+
return 0;
}
-static SIMPLE_DEV_PM_OPS(imx_thermal_pm_ops,
- imx_thermal_suspend, imx_thermal_resume);
+static const struct dev_pm_ops imx_thermal_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(imx_thermal_suspend, imx_thermal_resume)
+ SET_RUNTIME_PM_OPS(imx_thermal_runtime_suspend,
+ imx_thermal_runtime_resume, NULL)
+};
static struct platform_driver imx_thermal = {
.driver = {
diff --git a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
index a478cff8162a..e90690a234c4 100644
--- a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
+++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
@@ -250,8 +250,9 @@ static int fill_art(char __user *ubuf)
get_single_name(arts[i].source, art_user[i].source_device);
get_single_name(arts[i].target, art_user[i].target_device);
/* copy the rest int data in addition to source and target */
- memcpy(&art_user[i].weight, &arts[i].weight,
- sizeof(u64) * (ACPI_NR_ART_ELEMENTS - 2));
+ BUILD_BUG_ON(sizeof(art_user[i].data) !=
+ sizeof(u64) * (ACPI_NR_ART_ELEMENTS - 2));
+ memcpy(&art_user[i].data, &arts[i].data, sizeof(art_user[i].data));
}
if (copy_to_user(ubuf, art_user, art_len))
diff --git a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.h b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.h
index 58822575fd54..78d942477035 100644
--- a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.h
+++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.h
@@ -17,17 +17,19 @@
struct art {
acpi_handle source;
acpi_handle target;
- u64 weight;
- u64 ac0_max;
- u64 ac1_max;
- u64 ac2_max;
- u64 ac3_max;
- u64 ac4_max;
- u64 ac5_max;
- u64 ac6_max;
- u64 ac7_max;
- u64 ac8_max;
- u64 ac9_max;
+ struct_group(data,
+ u64 weight;
+ u64 ac0_max;
+ u64 ac1_max;
+ u64 ac2_max;
+ u64 ac3_max;
+ u64 ac4_max;
+ u64 ac5_max;
+ u64 ac6_max;
+ u64 ac7_max;
+ u64 ac8_max;
+ u64 ac9_max;
+ );
} __packed;
struct trt {
@@ -47,17 +49,19 @@ union art_object {
struct {
char source_device[8]; /* ACPI single name */
char target_device[8]; /* ACPI single name */
- u64 weight;
- u64 ac0_max_level;
- u64 ac1_max_level;
- u64 ac2_max_level;
- u64 ac3_max_level;
- u64 ac4_max_level;
- u64 ac5_max_level;
- u64 ac6_max_level;
- u64 ac7_max_level;
- u64 ac8_max_level;
- u64 ac9_max_level;
+ struct_group(data,
+ u64 weight;
+ u64 ac0_max_level;
+ u64 ac1_max_level;
+ u64 ac2_max_level;
+ u64 ac3_max_level;
+ u64 ac4_max_level;
+ u64 ac5_max_level;
+ u64 ac6_max_level;
+ u64 ac7_max_level;
+ u64 ac8_max_level;
+ u64 ac9_max_level;
+ );
};
u64 __data[ACPI_NR_ART_ELEMENTS];
};
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 8502b7d8df89..72acb1f61849 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -596,6 +596,7 @@ static const struct acpi_device_id int3400_thermal_match[] = {
{"INT3400", 0},
{"INTC1040", 0},
{"INTC1041", 0},
+ {"INTC10A0", 0},
{}
};
diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
index c3c4c4d34542..07e25321dfe3 100644
--- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
@@ -285,6 +285,7 @@ static const struct acpi_device_id int3403_device_ids[] = {
{"INT3403", 0},
{"INTC1043", 0},
{"INTC1046", 0},
+ {"INTC10A1", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3403_device_ids);
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
index be27f633e40a..49932a68abac 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
@@ -24,6 +24,7 @@
#define PCI_DEVICE_ID_INTEL_HSB_THERMAL 0x0A03
#define PCI_DEVICE_ID_INTEL_ICL_THERMAL 0x8a03
#define PCI_DEVICE_ID_INTEL_JSL_THERMAL 0x4E03
+#define PCI_DEVICE_ID_INTEL_RPL_THERMAL 0xA71D
#define PCI_DEVICE_ID_INTEL_SKL_THERMAL 0x1903
#define PCI_DEVICE_ID_INTEL_TGL_THERMAL 0x9A03
@@ -80,7 +81,8 @@ void proc_thermal_rfim_remove(struct pci_dev *pdev);
int proc_thermal_mbox_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv);
void proc_thermal_mbox_remove(struct pci_dev *pdev);
-int processor_thermal_send_mbox_cmd(struct pci_dev *pdev, u16 cmd_id, u32 cmd_data, u64 *cmd_resp);
+int processor_thermal_send_mbox_read_cmd(struct pci_dev *pdev, u16 id, u64 *resp);
+int processor_thermal_send_mbox_write_cmd(struct pci_dev *pdev, u16 id, u32 data);
int proc_thermal_add(struct device *dev, struct proc_thermal_device *priv);
void proc_thermal_remove(struct proc_thermal_device *proc_priv);
int proc_thermal_suspend(struct device *dev);
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
index b4bcd3fe9eb2..ca40b0967cdd 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -358,6 +358,7 @@ static SIMPLE_DEV_PM_OPS(proc_thermal_pci_pm, proc_thermal_pci_suspend,
static const struct pci_device_id proc_thermal_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, ADL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) },
+ { PCI_DEVICE_DATA(INTEL, RPL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) },
{ },
};
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c
index 01008ae00e7f..0b89a4340ff4 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c
@@ -24,19 +24,15 @@
static DEFINE_MUTEX(mbox_lock);
-static int send_mbox_cmd(struct pci_dev *pdev, u16 cmd_id, u32 cmd_data, u64 *cmd_resp)
+static int wait_for_mbox_ready(struct proc_thermal_device *proc_priv)
{
- struct proc_thermal_device *proc_priv;
u32 retries, data;
int ret;
- mutex_lock(&mbox_lock);
- proc_priv = pci_get_drvdata(pdev);
-
/* Poll for rb bit == 0 */
retries = MBOX_RETRY_COUNT;
do {
- data = readl((void __iomem *) (proc_priv->mmio_base + MBOX_OFFSET_INTERFACE));
+ data = readl(proc_priv->mmio_base + MBOX_OFFSET_INTERFACE);
if (data & BIT_ULL(MBOX_BUSY_BIT)) {
ret = -EBUSY;
continue;
@@ -45,53 +41,78 @@ static int send_mbox_cmd(struct pci_dev *pdev, u16 cmd_id, u32 cmd_data, u64 *cm
break;
} while (--retries);
+ return ret;
+}
+
+static int send_mbox_write_cmd(struct pci_dev *pdev, u16 id, u32 data)
+{
+ struct proc_thermal_device *proc_priv;
+ u32 reg_data;
+ int ret;
+
+ proc_priv = pci_get_drvdata(pdev);
+
+ mutex_lock(&mbox_lock);
+
+ ret = wait_for_mbox_ready(proc_priv);
if (ret)
goto unlock_mbox;
- if (cmd_id == MBOX_CMD_WORKLOAD_TYPE_WRITE)
- writel(cmd_data, (void __iomem *) ((proc_priv->mmio_base + MBOX_OFFSET_DATA)));
-
+ writel(data, (proc_priv->mmio_base + MBOX_OFFSET_DATA));
/* Write command register */
- data = BIT_ULL(MBOX_BUSY_BIT) | cmd_id;
- writel(data, (void __iomem *) ((proc_priv->mmio_base + MBOX_OFFSET_INTERFACE)));
+ reg_data = BIT_ULL(MBOX_BUSY_BIT) | id;
+ writel(reg_data, (proc_priv->mmio_base + MBOX_OFFSET_INTERFACE));
- /* Poll for rb bit == 0 */
- retries = MBOX_RETRY_COUNT;
- do {
- data = readl((void __iomem *) (proc_priv->mmio_base + MBOX_OFFSET_INTERFACE));
- if (data & BIT_ULL(MBOX_BUSY_BIT)) {
- ret = -EBUSY;
- continue;
- }
+ ret = wait_for_mbox_ready(proc_priv);
- if (data) {
- ret = -ENXIO;
- goto unlock_mbox;
- }
+unlock_mbox:
+ mutex_unlock(&mbox_lock);
+ return ret;
+}
- ret = 0;
+static int send_mbox_read_cmd(struct pci_dev *pdev, u16 id, u64 *resp)
+{
+ struct proc_thermal_device *proc_priv;
+ u32 reg_data;
+ int ret;
- if (!cmd_resp)
- break;
+ proc_priv = pci_get_drvdata(pdev);
- if (cmd_id == MBOX_CMD_WORKLOAD_TYPE_READ)
- *cmd_resp = readl((void __iomem *) (proc_priv->mmio_base + MBOX_OFFSET_DATA));
- else
- *cmd_resp = readq((void __iomem *) (proc_priv->mmio_base + MBOX_OFFSET_DATA));
+ mutex_lock(&mbox_lock);
- break;
- } while (--retries);
+ ret = wait_for_mbox_ready(proc_priv);
+ if (ret)
+ goto unlock_mbox;
+
+ /* Write command register */
+ reg_data = BIT_ULL(MBOX_BUSY_BIT) | id;
+ writel(reg_data, (proc_priv->mmio_base + MBOX_OFFSET_INTERFACE));
+
+ ret = wait_for_mbox_ready(proc_priv);
+ if (ret)
+ goto unlock_mbox;
+
+ if (id == MBOX_CMD_WORKLOAD_TYPE_READ)
+ *resp = readl(proc_priv->mmio_base + MBOX_OFFSET_DATA);
+ else
+ *resp = readq(proc_priv->mmio_base + MBOX_OFFSET_DATA);
unlock_mbox:
mutex_unlock(&mbox_lock);
return ret;
}
-int processor_thermal_send_mbox_cmd(struct pci_dev *pdev, u16 cmd_id, u32 cmd_data, u64 *cmd_resp)
+int processor_thermal_send_mbox_read_cmd(struct pci_dev *pdev, u16 id, u64 *resp)
{
- return send_mbox_cmd(pdev, cmd_id, cmd_data, cmd_resp);
+ return send_mbox_read_cmd(pdev, id, resp);
}
-EXPORT_SYMBOL_GPL(processor_thermal_send_mbox_cmd);
+EXPORT_SYMBOL_NS_GPL(processor_thermal_send_mbox_read_cmd, INT340X_THERMAL);
+
+int processor_thermal_send_mbox_write_cmd(struct pci_dev *pdev, u16 id, u32 data)
+{
+ return send_mbox_write_cmd(pdev, id, data);
+}
+EXPORT_SYMBOL_NS_GPL(processor_thermal_send_mbox_write_cmd, INT340X_THERMAL);
/* List of workload types */
static const char * const workload_types[] = {
@@ -104,7 +125,6 @@ static const char * const workload_types[] = {
NULL
};
-
static ssize_t workload_available_types_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -146,7 +166,7 @@ static ssize_t workload_type_store(struct device *dev,
data |= ret;
- ret = send_mbox_cmd(pdev, MBOX_CMD_WORKLOAD_TYPE_WRITE, data, NULL);
+ ret = send_mbox_write_cmd(pdev, MBOX_CMD_WORKLOAD_TYPE_WRITE, data);
if (ret)
return false;
@@ -161,7 +181,7 @@ static ssize_t workload_type_show(struct device *dev,
u64 cmd_resp;
int ret;
- ret = send_mbox_cmd(pdev, MBOX_CMD_WORKLOAD_TYPE_READ, 0, &cmd_resp);
+ ret = send_mbox_read_cmd(pdev, MBOX_CMD_WORKLOAD_TYPE_READ, &cmd_resp);
if (ret)
return false;
@@ -186,8 +206,6 @@ static const struct attribute_group workload_req_attribute_group = {
.name = "workload_request"
};
-
-
static bool workload_req_created;
int proc_thermal_mbox_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv)
@@ -196,7 +214,7 @@ int proc_thermal_mbox_add(struct pci_dev *pdev, struct proc_thermal_device *proc
int ret;
/* Check if there is a mailbox support, if fails return success */
- ret = send_mbox_cmd(pdev, MBOX_CMD_WORKLOAD_TYPE_READ, 0, &cmd_resp);
+ ret = send_mbox_read_cmd(pdev, MBOX_CMD_WORKLOAD_TYPE_READ, &cmd_resp);
if (ret)
return 0;
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
index e693ec8234fb..8c42e7662033 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
@@ -9,6 +9,8 @@
#include <linux/pci.h>
#include "processor_thermal_device.h"
+MODULE_IMPORT_NS(INT340X_THERMAL);
+
struct mmio_reg {
int read_only;
u32 offset;
@@ -194,8 +196,7 @@ static ssize_t rfi_restriction_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- u16 cmd_id = 0x0008;
- u64 cmd_resp;
+ u16 id = 0x0008;
u32 input;
int ret;
@@ -203,7 +204,7 @@ static ssize_t rfi_restriction_store(struct device *dev,
if (ret)
return ret;
- ret = processor_thermal_send_mbox_cmd(to_pci_dev(dev), cmd_id, input, &cmd_resp);
+ ret = processor_thermal_send_mbox_write_cmd(to_pci_dev(dev), id, input);
if (ret)
return ret;
@@ -214,30 +215,30 @@ static ssize_t rfi_restriction_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- u16 cmd_id = 0x0007;
- u64 cmd_resp;
+ u16 id = 0x0007;
+ u64 resp;
int ret;
- ret = processor_thermal_send_mbox_cmd(to_pci_dev(dev), cmd_id, 0, &cmd_resp);
+ ret = processor_thermal_send_mbox_read_cmd(to_pci_dev(dev), id, &resp);
if (ret)
return ret;
- return sprintf(buf, "%llu\n", cmd_resp);
+ return sprintf(buf, "%llu\n", resp);
}
static ssize_t ddr_data_rate_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- u16 cmd_id = 0x0107;
- u64 cmd_resp;
+ u16 id = 0x0107;
+ u64 resp;
int ret;
- ret = processor_thermal_send_mbox_cmd(to_pci_dev(dev), cmd_id, 0, &cmd_resp);
+ ret = processor_thermal_send_mbox_read_cmd(to_pci_dev(dev), id, &resp);
if (ret)
return ret;
- return sprintf(buf, "%llu\n", cmd_resp);
+ return sprintf(buf, "%llu\n", resp);
}
static DEVICE_ATTR_RW(rfi_restriction);
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index 9b68489a2356..14256421d98c 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -641,7 +641,7 @@ exit_set:
}
/* bind to generic thermal layer as cooling device*/
-static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
+static const struct thermal_cooling_device_ops powerclamp_cooling_ops = {
.get_max_state = powerclamp_get_max_state,
.get_cur_state = powerclamp_get_cur_state,
.set_cur_state = powerclamp_set_cur_state,
diff --git a/drivers/thermal/rzg2l_thermal.c b/drivers/thermal/rzg2l_thermal.c
new file mode 100644
index 000000000000..7a9cdc1f37ca
--- /dev/null
+++ b/drivers/thermal/rzg2l_thermal.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G2L TSU Thermal Sensor Driver
+ *
+ * Copyright (C) 2021 Renesas Electronics Corporation
+ */
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/thermal.h>
+#include <linux/units.h>
+
+#include "thermal_hwmon.h"
+
+#define CTEMP_MASK 0xFFF
+
+/* default calibration values, if FUSE values are missing */
+#define SW_CALIB0_VAL 3148
+#define SW_CALIB1_VAL 503
+
+/* Register offsets */
+#define TSU_SM 0x00
+#define TSU_ST 0x04
+#define TSU_SAD 0x0C
+#define TSU_SS 0x10
+
+#define OTPTSUTRIM_REG(n) (0x18 + ((n) * 0x4))
+
+/* Sensor Mode Register(TSU_SM) */
+#define TSU_SM_EN_TS BIT(0)
+#define TSU_SM_ADC_EN_TS BIT(1)
+#define TSU_SM_NORMAL_MODE (TSU_SM_EN_TS | TSU_SM_ADC_EN_TS)
+
+/* TSU_ST bits */
+#define TSU_ST_START BIT(0)
+
+#define TSU_SS_CONV_RUNNING BIT(0)
+
+#define TS_CODE_AVE_SCALE(x) ((x) * 1000000)
+#define MCELSIUS(temp) ((temp) * MILLIDEGREE_PER_DEGREE)
+#define TS_CODE_CAP_TIMES 8 /* Capture times */
+
+#define RZG2L_THERMAL_GRAN 500 /* milli Celsius */
+#define RZG2L_TSU_SS_TIMEOUT_US 1000
+
+#define CURVATURE_CORRECTION_CONST 13
+
+struct rzg2l_thermal_priv {
+ struct device *dev;
+ void __iomem *base;
+ struct thermal_zone_device *zone;
+ struct reset_control *rstc;
+ u32 calib0, calib1;
+};
+
+static inline u32 rzg2l_thermal_read(struct rzg2l_thermal_priv *priv, u32 reg)
+{
+ return ioread32(priv->base + reg);
+}
+
+static inline void rzg2l_thermal_write(struct rzg2l_thermal_priv *priv, u32 reg,
+ u32 data)
+{
+ iowrite32(data, priv->base + reg);
+}
+
+static int rzg2l_thermal_get_temp(void *devdata, int *temp)
+{
+ struct rzg2l_thermal_priv *priv = devdata;
+ u32 result = 0, dsensor, ts_code_ave;
+ int val, i;
+
+ for (i = 0; i < TS_CODE_CAP_TIMES ; i++) {
+ /* TSU repeats measurement at 20 microseconds intervals and
+ * automatically updates the results of measurement. As per
+ * the HW manual for measuring temperature we need to read 8
+ * values consecutively and then take the average.
+ * ts_code_ave = (ts_code[0] + ⋯ + ts_code[7]) / 8
+ */
+ result += rzg2l_thermal_read(priv, TSU_SAD) & CTEMP_MASK;
+ usleep_range(20, 30);
+ }
+
+ ts_code_ave = result / TS_CODE_CAP_TIMES;
+
+ /* Calculate actual sensor value by applying curvature correction formula
+ * dsensor = ts_code_ave / (1 + ts_code_ave * 0.000013). Here we are doing
+ * integer calculation by scaling all the values by 1000000.
+ */
+ dsensor = TS_CODE_AVE_SCALE(ts_code_ave) /
+ (TS_CODE_AVE_SCALE(1) + (ts_code_ave * CURVATURE_CORRECTION_CONST));
+
+ /* The temperature Tj is calculated by the formula
+ * Tj = (dsensor − calib1) * 165/ (calib0 − calib1) − 40
+ * where calib0 and calib1 are the caliberation values.
+ */
+ val = ((dsensor - priv->calib1) * (MCELSIUS(165) /
+ (priv->calib0 - priv->calib1))) - MCELSIUS(40);
+
+ *temp = roundup(val, RZG2L_THERMAL_GRAN);
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops rzg2l_tz_of_ops = {
+ .get_temp = rzg2l_thermal_get_temp,
+};
+
+static int rzg2l_thermal_init(struct rzg2l_thermal_priv *priv)
+{
+ u32 reg_val;
+
+ rzg2l_thermal_write(priv, TSU_SM, TSU_SM_NORMAL_MODE);
+ rzg2l_thermal_write(priv, TSU_ST, 0);
+
+ /* Before setting the START bit, TSU should be in normal operating
+ * mode. As per the HW manual, it will take 60 µs to place the TSU
+ * into normal operating mode.
+ */
+ usleep_range(60, 80);
+
+ reg_val = rzg2l_thermal_read(priv, TSU_ST);
+ reg_val |= TSU_ST_START;
+ rzg2l_thermal_write(priv, TSU_ST, reg_val);
+
+ return readl_poll_timeout(priv->base + TSU_SS, reg_val,
+ reg_val == TSU_SS_CONV_RUNNING, 50,
+ RZG2L_TSU_SS_TIMEOUT_US);
+}
+
+static void rzg2l_thermal_reset_assert_pm_disable_put(struct platform_device *pdev)
+{
+ struct rzg2l_thermal_priv *priv = dev_get_drvdata(&pdev->dev);
+
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ reset_control_assert(priv->rstc);
+}
+
+static int rzg2l_thermal_remove(struct platform_device *pdev)
+{
+ struct rzg2l_thermal_priv *priv = dev_get_drvdata(&pdev->dev);
+
+ thermal_remove_hwmon_sysfs(priv->zone);
+ rzg2l_thermal_reset_assert_pm_disable_put(pdev);
+
+ return 0;
+}
+
+static int rzg2l_thermal_probe(struct platform_device *pdev)
+{
+ struct thermal_zone_device *zone;
+ struct rzg2l_thermal_priv *priv;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->dev = dev;
+ priv->rstc = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(priv->rstc))
+ return dev_err_probe(dev, PTR_ERR(priv->rstc),
+ "failed to get cpg reset");
+
+ ret = reset_control_deassert(priv->rstc);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to deassert");
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ priv->calib0 = rzg2l_thermal_read(priv, OTPTSUTRIM_REG(0));
+ if (!priv->calib0)
+ priv->calib0 = SW_CALIB0_VAL;
+
+ priv->calib1 = rzg2l_thermal_read(priv, OTPTSUTRIM_REG(1));
+ if (!priv->calib1)
+ priv->calib1 = SW_CALIB1_VAL;
+
+ platform_set_drvdata(pdev, priv);
+ ret = rzg2l_thermal_init(priv);
+ if (ret) {
+ dev_err(dev, "Failed to start TSU");
+ goto err;
+ }
+
+ zone = devm_thermal_zone_of_sensor_register(dev, 0, priv,
+ &rzg2l_tz_of_ops);
+ if (IS_ERR(zone)) {
+ dev_err(dev, "Can't register thermal zone");
+ ret = PTR_ERR(zone);
+ goto err;
+ }
+
+ priv->zone = zone;
+ priv->zone->tzp->no_hwmon = false;
+ ret = thermal_add_hwmon_sysfs(priv->zone);
+ if (ret)
+ goto err;
+
+ dev_dbg(dev, "TSU probed with %s caliberation values",
+ rzg2l_thermal_read(priv, OTPTSUTRIM_REG(0)) ? "hw" : "sw");
+
+ return 0;
+
+err:
+ rzg2l_thermal_reset_assert_pm_disable_put(pdev);
+ return ret;
+}
+
+static const struct of_device_id rzg2l_thermal_dt_ids[] = {
+ { .compatible = "renesas,rzg2l-tsu", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rzg2l_thermal_dt_ids);
+
+static struct platform_driver rzg2l_thermal_driver = {
+ .driver = {
+ .name = "rzg2l_thermal",
+ .of_match_table = rzg2l_thermal_dt_ids,
+ },
+ .probe = rzg2l_thermal_probe,
+ .remove = rzg2l_thermal_remove,
+};
+module_platform_driver(rzg2l_thermal_driver);
+
+MODULE_DESCRIPTION("Renesas RZ/G2L TSU Thermal Sensor Driver");
+MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c
index b67e72d5644b..79b5abf9d042 100644
--- a/drivers/thunderbolt/acpi.c
+++ b/drivers/thunderbolt/acpi.c
@@ -7,6 +7,7 @@
*/
#include <linux/acpi.h>
+#include <linux/pm_runtime.h>
#include "tb.h"
@@ -31,7 +32,7 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
return AE_OK;
/* It needs to reference this NHI */
- if (nhi->pdev->dev.fwnode != args.fwnode)
+ if (dev_fwnode(&nhi->pdev->dev) != args.fwnode)
goto out_put;
/*
@@ -74,8 +75,18 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM))) {
const struct device_link *link;
+ /*
+ * Make them both active first to make sure the NHI does
+ * not runtime suspend before the consumer. The
+ * pm_runtime_put() below then allows the consumer to
+ * runtime suspend again (which then allows NHI runtime
+ * suspend too now that the device link is established).
+ */
+ pm_runtime_get_sync(&pdev->dev);
+
link = device_link_add(&pdev->dev, &nhi->pdev->dev,
DL_FLAG_AUTOREMOVE_SUPPLIER |
+ DL_FLAG_RPM_ACTIVE |
DL_FLAG_PM_RUNTIME);
if (link) {
dev_dbg(&nhi->pdev->dev, "created link from %s\n",
@@ -84,6 +95,8 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
dev_name(&pdev->dev));
}
+
+ pm_runtime_put(&pdev->dev);
}
out_put:
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 6255f1ef9599..fff0c740c8f3 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -1741,8 +1741,13 @@ static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
if (!n)
return;
- INIT_WORK(&n->work, icm_handle_notification);
n->pkg = kmemdup(buf, size, GFP_KERNEL);
+ if (!n->pkg) {
+ kfree(n);
+ return;
+ }
+
+ INIT_WORK(&n->work, icm_handle_notification);
n->tb = tb;
queue_work(tb->wq, &n->work);
diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c
index c178f0d7beab..53495a38b4eb 100644
--- a/drivers/thunderbolt/lc.c
+++ b/drivers/thunderbolt/lc.c
@@ -193,6 +193,30 @@ int tb_lc_start_lane_initialization(struct tb_port *port)
return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
}
+/**
+ * tb_lc_is_clx_supported() - Check whether CLx is supported by the lane adapter
+ * @port: Lane adapter
+ *
+ * TB_LC_LINK_ATTR_CPS bit reflects if the link supports CLx including
+ * active cables (if connected on the link).
+ */
+bool tb_lc_is_clx_supported(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+ int cap, ret;
+ u32 val;
+
+ cap = find_port_lc_cap(port);
+ if (cap < 0)
+ return false;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_ATTR, 1);
+ if (ret)
+ return false;
+
+ return !!(val & TB_LC_LINK_ATTR_CPS);
+}
+
static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
unsigned int flags)
{
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index 564e2f42cebd..299712accfe9 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -85,11 +85,12 @@ static int tb_path_find_src_hopid(struct tb_port *src,
* @dst_hopid: HopID to the @dst (%-1 if don't care)
* @last: Last port is filled here if not %NULL
* @name: Name of the path
+ * @alloc_hopid: Allocate HopIDs for the ports
*
* Follows a path starting from @src and @src_hopid to the last output
- * port of the path. Allocates HopIDs for the visited ports. Call
- * tb_path_free() to release the path and allocated HopIDs when the path
- * is not needed anymore.
+ * port of the path. Allocates HopIDs for the visited ports (if
+ * @alloc_hopid is true). Call tb_path_free() to release the path and
+ * allocated HopIDs when the path is not needed anymore.
*
* Note function discovers also incomplete paths so caller should check
* that the @dst port is the expected one. If it is not, the path can be
@@ -99,7 +100,8 @@ static int tb_path_find_src_hopid(struct tb_port *src,
*/
struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
struct tb_port *dst, int dst_hopid,
- struct tb_port **last, const char *name)
+ struct tb_port **last, const char *name,
+ bool alloc_hopid)
{
struct tb_port *out_port;
struct tb_regs_hop hop;
@@ -156,6 +158,7 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
path->tb = src->sw->tb;
path->path_length = num_hops;
path->activated = true;
+ path->alloc_hopid = alloc_hopid;
path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
if (!path->hops) {
@@ -177,13 +180,14 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
goto err;
}
- if (tb_port_alloc_in_hopid(p, h, h) < 0)
+ if (alloc_hopid && tb_port_alloc_in_hopid(p, h, h) < 0)
goto err;
out_port = &sw->ports[hop.out_port];
next_hop = hop.next_hop;
- if (tb_port_alloc_out_hopid(out_port, next_hop, next_hop) < 0) {
+ if (alloc_hopid &&
+ tb_port_alloc_out_hopid(out_port, next_hop, next_hop) < 0) {
tb_port_release_in_hopid(p, h);
goto err;
}
@@ -263,6 +267,8 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
return NULL;
}
+ path->alloc_hopid = true;
+
in_hopid = src_hopid;
out_port = NULL;
@@ -345,17 +351,19 @@ err:
*/
void tb_path_free(struct tb_path *path)
{
- int i;
-
- for (i = 0; i < path->path_length; i++) {
- const struct tb_path_hop *hop = &path->hops[i];
-
- if (hop->in_port)
- tb_port_release_in_hopid(hop->in_port,
- hop->in_hop_index);
- if (hop->out_port)
- tb_port_release_out_hopid(hop->out_port,
- hop->next_hop_index);
+ if (path->alloc_hopid) {
+ int i;
+
+ for (i = 0; i < path->path_length; i++) {
+ const struct tb_path_hop *hop = &path->hops[i];
+
+ if (hop->in_port)
+ tb_port_release_in_hopid(hop->in_port,
+ hop->in_hop_index);
+ if (hop->out_port)
+ tb_port_release_out_hopid(hop->out_port,
+ hop->next_hop_index);
+ }
}
kfree(path->hops);
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index 722694052f4a..8c29bd556ae0 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -324,15 +324,10 @@ struct device_type tb_retimer_type = {
static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
{
- struct usb4_port *usb4;
struct tb_retimer *rt;
u32 vendor, device;
int ret;
- usb4 = port->usb4;
- if (!usb4)
- return -EINVAL;
-
ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor,
sizeof(vendor));
if (ret) {
@@ -374,7 +369,7 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
rt->port = port;
rt->tb = port->sw->tb;
- rt->dev.parent = &usb4->dev;
+ rt->dev.parent = &port->usb4->dev;
rt->dev.bus = &tb_bus_type;
rt->dev.type = &tb_retimer_type;
dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev),
@@ -453,6 +448,13 @@ int tb_retimer_scan(struct tb_port *port, bool add)
{
u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
int ret, i, last_idx = 0;
+ struct usb4_port *usb4;
+
+ usb4 = port->usb4;
+ if (!usb4)
+ return 0;
+
+ pm_runtime_get_sync(&usb4->dev);
/*
* Send broadcast RT to make sure retimer indices facing this
@@ -460,7 +462,7 @@ int tb_retimer_scan(struct tb_port *port, bool add)
*/
ret = usb4_port_enumerate_retimers(port);
if (ret)
- return ret;
+ goto out;
/*
* Enable sideband channel for each retimer. We can do this
@@ -490,8 +492,10 @@ int tb_retimer_scan(struct tb_port *port, bool add)
break;
}
- if (!last_idx)
- return 0;
+ if (!last_idx) {
+ ret = 0;
+ goto out;
+ }
/* Add on-board retimers if they do not exist already */
for (i = 1; i <= last_idx; i++) {
@@ -507,7 +511,11 @@ int tb_retimer_scan(struct tb_port *port, bool add)
}
}
- return 0;
+out:
+ pm_runtime_mark_last_busy(&usb4->dev);
+ pm_runtime_put_autosuspend(&usb4->dev);
+
+ return ret;
}
static int remove_retimer(struct device *dev, void *data)
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 3014146081c1..d026e305fe5d 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -13,6 +13,7 @@
#include <linux/sched/signal.h>
#include <linux/sizes.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "tb.h"
@@ -26,6 +27,10 @@ struct nvm_auth_status {
u32 status;
};
+static bool clx_enabled = true;
+module_param_named(clx, clx_enabled, bool, 0444);
+MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)");
+
/*
* Hold NVM authentication failure status per switch This information
* needs to stay around even when the switch gets power cycled so we
@@ -623,6 +628,9 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits)
return 0;
nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
+ if (credits < 0)
+ credits = max_t(int, -nfc_credits, credits);
+
nfc_credits += credits;
tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
@@ -1319,7 +1327,9 @@ int tb_dp_port_hpd_clear(struct tb_port *port)
* @aux_tx: AUX TX Hop ID
* @aux_rx: AUX RX Hop ID
*
- * Programs specified Hop IDs for DP IN/OUT port.
+ * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4
+ * router DP adapters too but does not program the values as the fields
+ * are read-only.
*/
int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
unsigned int aux_tx, unsigned int aux_rx)
@@ -1327,6 +1337,9 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
u32 data[2];
int ret;
+ if (tb_switch_is_usb4(port->sw))
+ return 0;
+
ret = tb_port_read(port, data, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
if (ret)
@@ -1449,6 +1462,40 @@ int tb_switch_reset(struct tb_switch *sw)
return res.err;
}
+/**
+ * tb_switch_wait_for_bit() - Wait for specified value of bits in offset
+ * @sw: Router to read the offset value from
+ * @offset: Offset in the router config space to read from
+ * @bit: Bit mask in the offset to wait for
+ * @value: Value of the bits to wait for
+ * @timeout_msec: Timeout in ms how long to wait
+ *
+ * Wait till the specified bits in specified offset reach specified value.
+ * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached
+ * within the given timeout or a negative errno in case of failure.
+ */
+int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
+ u32 value, int timeout_msec)
+{
+ ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
+
+ do {
+ u32 val;
+ int ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
+ if (ret)
+ return ret;
+
+ if ((val & bit) == value)
+ return 0;
+
+ usleep_range(50, 100);
+ } while (ktime_before(ktime_get(), timeout));
+
+ return -ETIMEDOUT;
+}
+
/*
* tb_plug_events_active() - enable/disable plug events on a switch
*
@@ -2186,10 +2233,18 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
if (ret > 0)
sw->cap_plug_events = ret;
+ ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2);
+ if (ret > 0)
+ sw->cap_vsec_tmu = ret;
+
ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
if (ret > 0)
sw->cap_lc = ret;
+ ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP);
+ if (ret > 0)
+ sw->cap_lp = ret;
+
/* Root switch is always authorized */
if (!route)
sw->authorized = true;
@@ -2996,6 +3051,13 @@ void tb_switch_suspend(struct tb_switch *sw, bool runtime)
tb_sw_dbg(sw, "suspending switch\n");
+ /*
+ * Actually only needed for Titan Ridge but for simplicity can be
+ * done for USB4 device too as CLx is re-enabled at resume.
+ */
+ if (tb_switch_disable_clx(sw, TB_CL0S))
+ tb_sw_warn(sw, "failed to disable CLx on upstream port\n");
+
err = tb_plug_events_active(sw, false);
if (err)
return;
@@ -3048,9 +3110,20 @@ bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
*/
int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
+ int ret;
+
if (tb_switch_is_usb4(sw))
- return usb4_switch_alloc_dp_resource(sw, in);
- return tb_lc_dp_sink_alloc(sw, in);
+ ret = usb4_switch_alloc_dp_resource(sw, in);
+ else
+ ret = tb_lc_dp_sink_alloc(sw, in);
+
+ if (ret)
+ tb_sw_warn(sw, "failed to allocate DP resource for port %d\n",
+ in->port);
+ else
+ tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port);
+
+ return ret;
}
/**
@@ -3073,6 +3146,8 @@ void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
if (ret)
tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
in->port);
+ else
+ tb_sw_dbg(sw, "released DP resource for port %d\n", in->port);
}
struct tb_sw_lookup {
@@ -3202,3 +3277,415 @@ struct tb_port *tb_switch_find_port(struct tb_switch *sw,
return NULL;
}
+
+static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
+{
+ u32 phy;
+ int ret;
+
+ ret = tb_port_read(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ if (secondary)
+ phy |= LANE_ADP_CS_1_PMS;
+ else
+ phy &= ~LANE_ADP_CS_1_PMS;
+
+ return tb_port_write(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+}
+
+static int tb_port_pm_secondary_enable(struct tb_port *port)
+{
+ return __tb_port_pm_secondary_set(port, true);
+}
+
+static int tb_port_pm_secondary_disable(struct tb_port *port)
+{
+ return __tb_port_pm_secondary_set(port, false);
+}
+
+static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *up, *down;
+ int ret;
+
+ if (!tb_route(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(tb_route(sw), parent);
+ ret = tb_port_pm_secondary_enable(up);
+ if (ret)
+ return ret;
+
+ return tb_port_pm_secondary_disable(down);
+}
+
+/* Called for USB4 or Titan Ridge routers only */
+static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx)
+{
+ u32 mask, val;
+ bool ret;
+
+ /* Don't enable CLx in case of two single-lane links */
+ if (!port->bonded && port->dual_link_port)
+ return false;
+
+ /* Don't enable CLx in case of inter-domain link */
+ if (port->xdomain)
+ return false;
+
+ if (tb_switch_is_usb4(port->sw)) {
+ if (!usb4_port_clx_supported(port))
+ return false;
+ } else if (!tb_lc_is_clx_supported(port)) {
+ return false;
+ }
+
+ switch (clx) {
+ case TB_CL0S:
+ /* CL0s support requires also CL1 support */
+ mask = LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
+ break;
+
+ /* For now we support only CL0s. Not CL1, CL2 */
+ case TB_CL1:
+ case TB_CL2:
+ default:
+ return false;
+ }
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_0, 1);
+ if (ret)
+ return false;
+
+ return !!(val & mask);
+}
+
+static inline bool tb_port_cl0s_supported(struct tb_port *port)
+{
+ return tb_port_clx_supported(port, TB_CL0S);
+}
+
+static int __tb_port_cl0s_set(struct tb_port *port, bool enable)
+{
+ u32 phy, mask;
+ int ret;
+
+ /* To enable CL0s also required to enable CL1 */
+ mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
+ ret = tb_port_read(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ if (enable)
+ phy |= mask;
+ else
+ phy &= ~mask;
+
+ return tb_port_write(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+}
+
+static int tb_port_cl0s_disable(struct tb_port *port)
+{
+ return __tb_port_cl0s_set(port, false);
+}
+
+static int tb_port_cl0s_enable(struct tb_port *port)
+{
+ return __tb_port_cl0s_set(port, true);
+}
+
+static int tb_switch_enable_cl0s(struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_switch_parent(sw);
+ bool up_cl0s_support, down_cl0s_support;
+ struct tb_port *up, *down;
+ int ret;
+
+ if (!tb_switch_is_clx_supported(sw))
+ return 0;
+
+ /*
+ * Enable CLx for host router's downstream port as part of the
+ * downstream router enabling procedure.
+ */
+ if (!tb_route(sw))
+ return 0;
+
+ /* Enable CLx only for first hop router (depth = 1) */
+ if (tb_route(parent))
+ return 0;
+
+ ret = tb_switch_pm_secondary_resolve(sw);
+ if (ret)
+ return ret;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(tb_route(sw), parent);
+
+ up_cl0s_support = tb_port_cl0s_supported(up);
+ down_cl0s_support = tb_port_cl0s_supported(down);
+
+ tb_port_dbg(up, "CL0s %ssupported\n",
+ up_cl0s_support ? "" : "not ");
+ tb_port_dbg(down, "CL0s %ssupported\n",
+ down_cl0s_support ? "" : "not ");
+
+ if (!up_cl0s_support || !down_cl0s_support)
+ return -EOPNOTSUPP;
+
+ ret = tb_port_cl0s_enable(up);
+ if (ret)
+ return ret;
+
+ ret = tb_port_cl0s_enable(down);
+ if (ret) {
+ tb_port_cl0s_disable(up);
+ return ret;
+ }
+
+ ret = tb_switch_mask_clx_objections(sw);
+ if (ret) {
+ tb_port_cl0s_disable(up);
+ tb_port_cl0s_disable(down);
+ return ret;
+ }
+
+ sw->clx = TB_CL0S;
+
+ tb_port_dbg(up, "CL0s enabled\n");
+ return 0;
+}
+
+/**
+ * tb_switch_enable_clx() - Enable CLx on upstream port of specified router
+ * @sw: Router to enable CLx for
+ * @clx: The CLx state to enable
+ *
+ * Enable CLx state only for first hop router. That is the most common
+ * use-case, that is intended for better thermal management, and so helps
+ * to improve performance. CLx is enabled only if both sides of the link
+ * support CLx, and if both sides of the link are not configured as two
+ * single lane links and only if the link is not inter-domain link. The
+ * complete set of conditions is descibed in CM Guide 1.0 section 8.1.
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
+{
+ struct tb_switch *root_sw = sw->tb->root_switch;
+
+ if (!clx_enabled)
+ return 0;
+
+ /*
+ * CLx is not enabled and validated on Intel USB4 platforms before
+ * Alder Lake.
+ */
+ if (root_sw->generation < 4 || tb_switch_is_tiger_lake(root_sw))
+ return 0;
+
+ switch (clx) {
+ case TB_CL0S:
+ return tb_switch_enable_cl0s(sw);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tb_switch_disable_cl0s(struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *up, *down;
+ int ret;
+
+ if (!tb_switch_is_clx_supported(sw))
+ return 0;
+
+ /*
+ * Disable CLx for host router's downstream port as part of the
+ * downstream router enabling procedure.
+ */
+ if (!tb_route(sw))
+ return 0;
+
+ /* Disable CLx only for first hop router (depth = 1) */
+ if (tb_route(parent))
+ return 0;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(tb_route(sw), parent);
+ ret = tb_port_cl0s_disable(up);
+ if (ret)
+ return ret;
+
+ ret = tb_port_cl0s_disable(down);
+ if (ret)
+ return ret;
+
+ sw->clx = TB_CLX_DISABLE;
+
+ tb_port_dbg(up, "CL0s disabled\n");
+ return 0;
+}
+
+/**
+ * tb_switch_disable_clx() - Disable CLx on upstream port of specified router
+ * @sw: Router to disable CLx for
+ * @clx: The CLx state to disable
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
+{
+ if (!clx_enabled)
+ return 0;
+
+ switch (clx) {
+ case TB_CL0S:
+ return tb_switch_disable_cl0s(sw);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * tb_switch_mask_clx_objections() - Mask CLx objections for a router
+ * @sw: Router to mask objections for
+ *
+ * Mask the objections coming from the second depth routers in order to
+ * stop these objections from interfering with the CLx states of the first
+ * depth link.
+ */
+int tb_switch_mask_clx_objections(struct tb_switch *sw)
+{
+ int up_port = sw->config.upstream_port_number;
+ u32 offset, val[2], mask_obj, unmask_obj;
+ int ret, i;
+
+ /* Only Titan Ridge of pre-USB4 devices support CLx states */
+ if (!tb_switch_is_titan_ridge(sw))
+ return 0;
+
+ if (!tb_route(sw))
+ return 0;
+
+ /*
+ * In Titan Ridge there are only 2 dual-lane Thunderbolt ports:
+ * Port A consists of lane adapters 1,2 and
+ * Port B consists of lane adapters 3,4
+ * If upstream port is A, (lanes are 1,2), we mask objections from
+ * port B (lanes 3,4) and unmask objections from Port A and vice-versa.
+ */
+ if (up_port == 1) {
+ mask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
+ unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
+ offset = TB_LOW_PWR_C1_CL1;
+ } else {
+ mask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
+ unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
+ offset = TB_LOW_PWR_C3_CL1;
+ }
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lp + offset, ARRAY_SIZE(val));
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ val[i] |= mask_obj;
+ val[i] &= ~unmask_obj;
+ }
+
+ return tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lp + offset, ARRAY_SIZE(val));
+}
+
+/*
+ * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3
+ * device. For now used only for Titan Ridge.
+ */
+static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge,
+ unsigned int pcie_offset, u32 value)
+{
+ u32 offset, command, val;
+ int ret;
+
+ if (sw->generation != 3)
+ return -EOPNOTSUPP;
+
+ offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA;
+ ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
+ if (ret)
+ return ret;
+
+ command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK;
+ command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT);
+ command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK;
+ command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL
+ << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT;
+ command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK;
+
+ offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD;
+
+ ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1);
+ if (ret)
+ return ret;
+
+ ret = tb_switch_wait_for_bit(sw, offset,
+ TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100);
+ if (ret)
+ return ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
+ if (ret)
+ return ret;
+
+ if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/**
+ * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state
+ * @sw: Router to enable PCIe L1
+ *
+ * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable
+ * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel
+ * was configured. Due to Intel platforms limitation, shall be called only
+ * for first hop switch.
+ */
+int tb_switch_pcie_l1_enable(struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_switch_parent(sw);
+ int ret;
+
+ if (!tb_route(sw))
+ return 0;
+
+ if (!tb_switch_is_titan_ridge(sw))
+ return 0;
+
+ /* Enable PCIe L1 enable only for first hop router (depth = 1) */
+ if (tb_route(parent))
+ return 0;
+
+ /* Write to downstream PCIe bridge #5 aka Dn4 */
+ ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1);
+ if (ret)
+ return ret;
+
+ /* Write to Upstream PCIe bridge #0 aka Up0 */
+ return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1);
+}
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 2897a77d44c3..cbd0ad85ffb1 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -105,10 +105,11 @@ static void tb_remove_dp_resources(struct tb_switch *sw)
}
}
-static void tb_discover_tunnels(struct tb_switch *sw)
+static void tb_switch_discover_tunnels(struct tb_switch *sw,
+ struct list_head *list,
+ bool alloc_hopids)
{
struct tb *tb = sw->tb;
- struct tb_cm *tcm = tb_priv(tb);
struct tb_port *port;
tb_switch_for_each_port(sw, port) {
@@ -116,24 +117,41 @@ static void tb_discover_tunnels(struct tb_switch *sw)
switch (port->config.type) {
case TB_TYPE_DP_HDMI_IN:
- tunnel = tb_tunnel_discover_dp(tb, port);
+ tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
break;
case TB_TYPE_PCIE_DOWN:
- tunnel = tb_tunnel_discover_pci(tb, port);
+ tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
break;
case TB_TYPE_USB3_DOWN:
- tunnel = tb_tunnel_discover_usb3(tb, port);
+ tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
break;
default:
break;
}
- if (!tunnel)
- continue;
+ if (tunnel)
+ list_add_tail(&tunnel->list, list);
+ }
+
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port)) {
+ tb_switch_discover_tunnels(port->remote->sw, list,
+ alloc_hopids);
+ }
+ }
+}
+static void tb_discover_tunnels(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel;
+
+ tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
+
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
if (tb_tunnel_is_pci(tunnel)) {
struct tb_switch *parent = tunnel->dst_port->sw;
@@ -146,13 +164,6 @@ static void tb_discover_tunnels(struct tb_switch *sw)
pm_runtime_get_sync(&tunnel->src_port->sw->dev);
pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
}
-
- list_add_tail(&tunnel->list, &tcm->tunnel_list);
- }
-
- tb_switch_for_each_port(sw, port) {
- if (tb_port_has_remote(port))
- tb_discover_tunnels(port->remote->sw);
}
}
@@ -210,7 +221,7 @@ static int tb_enable_tmu(struct tb_switch *sw)
int ret;
/* If it is already enabled in correct mode, don't touch it */
- if (tb_switch_tmu_is_enabled(sw))
+ if (tb_switch_tmu_hifi_is_enabled(sw, sw->tmu.unidirectional_request))
return 0;
ret = tb_switch_tmu_disable(sw);
@@ -658,6 +669,11 @@ static void tb_scan_port(struct tb_port *port)
tb_switch_lane_bonding_enable(sw);
/* Set the link configured */
tb_switch_configure_link(sw);
+ if (tb_switch_enable_clx(sw, TB_CL0S))
+ tb_sw_warn(sw, "failed to enable CLx on upstream port\n");
+
+ tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI,
+ tb_switch_is_clx_enabled(sw));
if (tb_enable_tmu(sw))
tb_sw_warn(sw, "failed to enable TMU\n");
@@ -1076,6 +1092,13 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
return -EIO;
}
+ /*
+ * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
+ * here.
+ */
+ if (tb_switch_pcie_l1_enable(sw))
+ tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
+
list_add_tail(&tunnel->list, &tcm->tunnel_list);
return 0;
}
@@ -1364,12 +1387,13 @@ static int tb_start(struct tb *tb)
return ret;
}
+ tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI, false);
/* Enable TMU if it is off */
tb_switch_tmu_enable(tb->root_switch);
/* Full scan to discover devices added before the driver was loaded. */
tb_scan_switch(tb->root_switch);
/* Find out tunnels created by the boot firmware */
- tb_discover_tunnels(tb->root_switch);
+ tb_discover_tunnels(tb);
/*
* If the boot firmware did not create USB 3.x tunnels create them
* now for the whole topology.
@@ -1407,6 +1431,14 @@ static void tb_restore_children(struct tb_switch *sw)
if (sw->is_unplugged)
return;
+ if (tb_switch_enable_clx(sw, TB_CL0S))
+ tb_sw_warn(sw, "failed to re-enable CLx on upstream port\n");
+
+ /*
+ * tb_switch_tmu_configure() was already called when the switch was
+ * added before entering system sleep or runtime suspend,
+ * so no need to call it again before enabling TMU.
+ */
if (tb_enable_tmu(sw))
tb_sw_warn(sw, "failed to restore TMU configuration\n");
@@ -1429,6 +1461,8 @@ static int tb_resume_noirq(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel, *n;
+ unsigned int usb3_delay = 0;
+ LIST_HEAD(tunnels);
tb_dbg(tb, "resuming...\n");
@@ -1439,8 +1473,31 @@ static int tb_resume_noirq(struct tb *tb)
tb_free_invalid_tunnels(tb);
tb_free_unplugged_children(tb->root_switch);
tb_restore_children(tb->root_switch);
- list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
+
+ /*
+ * If we get here from suspend to disk the boot firmware or the
+ * restore kernel might have created tunnels of its own. Since
+ * we cannot be sure they are usable for us we find and tear
+ * them down.
+ */
+ tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
+ list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
+ if (tb_tunnel_is_usb3(tunnel))
+ usb3_delay = 500;
+ tb_tunnel_deactivate(tunnel);
+ tb_tunnel_free(tunnel);
+ }
+
+ /* Re-create our tunnels now */
+ list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
+ /* USB3 requires delay before it can be re-activated */
+ if (tb_tunnel_is_usb3(tunnel)) {
+ msleep(usb3_delay);
+ /* Only need to do it once */
+ usb3_delay = 0;
+ }
tb_tunnel_restart(tunnel);
+ }
if (!list_empty(&tcm->tunnel_list)) {
/*
* the pcie links need some time to get going.
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 725104c83e3d..74d3b14f004e 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -89,15 +89,31 @@ enum tb_switch_tmu_rate {
* @cap: Offset to the TMU capability (%0 if not found)
* @has_ucap: Does the switch support uni-directional mode
* @rate: TMU refresh rate related to upstream switch. In case of root
- * switch this holds the domain rate.
+ * switch this holds the domain rate. Reflects the HW setting.
* @unidirectional: Is the TMU in uni-directional or bi-directional mode
- * related to upstream switch. Don't case for root switch.
+ * related to upstream switch. Don't care for root switch.
+ * Reflects the HW setting.
+ * @unidirectional_request: Is the new TMU mode: uni-directional or bi-directional
+ * that is requested to be set. Related to upstream switch.
+ * Don't care for root switch.
+ * @rate_request: TMU new refresh rate related to upstream switch that is
+ * requested to be set. In case of root switch, this holds
+ * the new domain rate that is requested to be set.
*/
struct tb_switch_tmu {
int cap;
bool has_ucap;
enum tb_switch_tmu_rate rate;
bool unidirectional;
+ bool unidirectional_request;
+ enum tb_switch_tmu_rate rate_request;
+};
+
+enum tb_clx {
+ TB_CLX_DISABLE,
+ TB_CL0S,
+ TB_CL1,
+ TB_CL2,
};
/**
@@ -122,7 +138,9 @@ struct tb_switch_tmu {
* @link_usb4: Upstream link is USB4
* @generation: Switch Thunderbolt generation
* @cap_plug_events: Offset to the plug events capability (%0 if not found)
+ * @cap_vsec_tmu: Offset to the TMU vendor specific capability (%0 if not found)
* @cap_lc: Offset to the link controller capability (%0 if not found)
+ * @cap_lp: Offset to the low power (CLx for TBT) capability (%0 if not found)
* @is_unplugged: The switch is going away
* @drom: DROM of the switch (%NULL if not found)
* @nvm: Pointer to the NVM if the switch has one (%NULL otherwise)
@@ -148,6 +166,7 @@ struct tb_switch_tmu {
* @min_dp_main_credits: Router preferred minimum number of buffers for DP MAIN
* @max_pcie_credits: Router preferred number of buffers for PCIe
* @max_dma_credits: Router preferred number of buffers for DMA/P2P
+ * @clx: CLx state on the upstream link of the router
*
* When the switch is being added or removed to the domain (other
* switches) you need to have domain lock held.
@@ -172,7 +191,9 @@ struct tb_switch {
bool link_usb4;
unsigned int generation;
int cap_plug_events;
+ int cap_vsec_tmu;
int cap_lc;
+ int cap_lp;
bool is_unplugged;
u8 *drom;
struct tb_nvm *nvm;
@@ -196,6 +217,7 @@ struct tb_switch {
unsigned int min_dp_main_credits;
unsigned int max_pcie_credits;
unsigned int max_dma_credits;
+ enum tb_clx clx;
};
/**
@@ -354,6 +376,7 @@ enum tb_path_port {
* when deactivating this path
* @hops: Path hops
* @path_length: How many hops the path uses
+ * @alloc_hopid: Does this path consume port HopID
*
* A path consists of a number of hops (see &struct tb_path_hop). To
* establish a PCIe tunnel two paths have to be created between the two
@@ -374,6 +397,7 @@ struct tb_path {
bool clear_fc;
struct tb_path_hop *hops;
int path_length;
+ bool alloc_hopid;
};
/* HopIDs 0-7 are reserved by the Thunderbolt protocol */
@@ -740,6 +764,8 @@ void tb_switch_remove(struct tb_switch *sw);
void tb_switch_suspend(struct tb_switch *sw, bool runtime);
int tb_switch_resume(struct tb_switch *sw);
int tb_switch_reset(struct tb_switch *sw);
+int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
+ u32 value, int timeout_msec);
void tb_sw_set_unplugged(struct tb_switch *sw);
struct tb_port *tb_switch_find_port(struct tb_switch *sw,
enum tb_port_type type);
@@ -851,6 +877,20 @@ static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw)
return false;
}
+static inline bool tb_switch_is_tiger_lake(const struct tb_switch *sw)
+{
+ if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_TGL_NHI0:
+ case PCI_DEVICE_ID_INTEL_TGL_NHI1:
+ case PCI_DEVICE_ID_INTEL_TGL_H_NHI0:
+ case PCI_DEVICE_ID_INTEL_TGL_H_NHI1:
+ return true;
+ }
+ }
+ return false;
+}
+
/**
* tb_switch_is_usb4() - Is the switch USB4 compliant
* @sw: Switch to check
@@ -889,13 +929,64 @@ int tb_switch_tmu_init(struct tb_switch *sw);
int tb_switch_tmu_post_time(struct tb_switch *sw);
int tb_switch_tmu_disable(struct tb_switch *sw);
int tb_switch_tmu_enable(struct tb_switch *sw);
-
-static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw)
+void tb_switch_tmu_configure(struct tb_switch *sw,
+ enum tb_switch_tmu_rate rate,
+ bool unidirectional);
+/**
+ * tb_switch_tmu_hifi_is_enabled() - Checks if the specified TMU mode is enabled
+ * @sw: Router whose TMU mode to check
+ * @unidirectional: If uni-directional (bi-directional otherwise)
+ *
+ * Return true if hardware TMU configuration matches the one passed in
+ * as parameter. That is HiFi and either uni-directional or bi-directional.
+ */
+static inline bool tb_switch_tmu_hifi_is_enabled(const struct tb_switch *sw,
+ bool unidirectional)
{
return sw->tmu.rate == TB_SWITCH_TMU_RATE_HIFI &&
- !sw->tmu.unidirectional;
+ sw->tmu.unidirectional == unidirectional;
}
+int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx);
+int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx);
+
+/**
+ * tb_switch_is_clx_enabled() - Checks if the CLx is enabled
+ * @sw: Router to check the CLx state for
+ *
+ * Checks if the CLx is enabled on the router upstream link.
+ * Not applicable for a host router.
+ */
+static inline bool tb_switch_is_clx_enabled(const struct tb_switch *sw)
+{
+ return sw->clx != TB_CLX_DISABLE;
+}
+
+/**
+ * tb_switch_is_cl0s_enabled() - Checks if the CL0s is enabled
+ * @sw: Router to check for the CL0s
+ *
+ * Checks if the CL0s is enabled on the router upstream link.
+ * Not applicable for a host router.
+ */
+static inline bool tb_switch_is_cl0s_enabled(const struct tb_switch *sw)
+{
+ return sw->clx == TB_CL0S;
+}
+
+/**
+ * tb_switch_is_clx_supported() - Is CLx supported on this type of router
+ * @sw: The router to check CLx support for
+ */
+static inline bool tb_switch_is_clx_supported(const struct tb_switch *sw)
+{
+ return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
+}
+
+int tb_switch_mask_clx_objections(struct tb_switch *sw);
+
+int tb_switch_pcie_l1_enable(struct tb_switch *sw);
+
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
int tb_port_add_nfc_credits(struct tb_port *port, int credits);
int tb_port_clear_counter(struct tb_port *port, int counter);
@@ -957,7 +1048,8 @@ int tb_dp_port_enable(struct tb_port *port, bool enable);
struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
struct tb_port *dst, int dst_hopid,
- struct tb_port **last, const char *name);
+ struct tb_port **last, const char *name,
+ bool alloc_hopid);
struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
struct tb_port *dst, int dst_hopid, int link_nr,
const char *name);
@@ -988,6 +1080,7 @@ void tb_lc_unconfigure_port(struct tb_port *port);
int tb_lc_configure_xdomain(struct tb_port *port);
void tb_lc_unconfigure_xdomain(struct tb_port *port);
int tb_lc_start_lane_initialization(struct tb_port *port);
+bool tb_lc_is_clx_supported(struct tb_port *port);
int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags);
int tb_lc_set_sleep(struct tb_switch *sw);
bool tb_lc_lane_bonding_possible(struct tb_switch *sw);
@@ -1074,6 +1167,7 @@ void usb4_port_unconfigure_xdomain(struct tb_port *port);
int usb4_port_router_offline(struct tb_port *port);
int usb4_port_router_online(struct tb_port *port);
int usb4_port_enumerate_retimers(struct tb_port *port);
+bool usb4_port_clx_supported(struct tb_port *port);
int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index);
int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index bcabfcb2fd03..fe1afa44c56d 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -535,15 +535,25 @@ struct tb_xdp_header {
u32 type;
};
+struct tb_xdp_error_response {
+ struct tb_xdp_header hdr;
+ u32 error;
+};
+
struct tb_xdp_uuid {
struct tb_xdp_header hdr;
};
struct tb_xdp_uuid_response {
- struct tb_xdp_header hdr;
- uuid_t src_uuid;
- u32 src_route_hi;
- u32 src_route_lo;
+ union {
+ struct tb_xdp_error_response err;
+ struct {
+ struct tb_xdp_header hdr;
+ uuid_t src_uuid;
+ u32 src_route_hi;
+ u32 src_route_lo;
+ };
+ };
};
struct tb_xdp_properties {
@@ -555,13 +565,18 @@ struct tb_xdp_properties {
};
struct tb_xdp_properties_response {
- struct tb_xdp_header hdr;
- uuid_t src_uuid;
- uuid_t dst_uuid;
- u16 offset;
- u16 data_length;
- u32 generation;
- u32 data[0];
+ union {
+ struct tb_xdp_error_response err;
+ struct {
+ struct tb_xdp_header hdr;
+ uuid_t src_uuid;
+ uuid_t dst_uuid;
+ u16 offset;
+ u16 data_length;
+ u32 generation;
+ u32 data[];
+ };
+ };
};
/*
@@ -580,7 +595,10 @@ struct tb_xdp_properties_changed {
};
struct tb_xdp_properties_changed_response {
- struct tb_xdp_header hdr;
+ union {
+ struct tb_xdp_error_response err;
+ struct tb_xdp_header hdr;
+ };
};
enum tb_xdp_error {
@@ -591,9 +609,4 @@ enum tb_xdp_error {
ERROR_NOT_READY,
};
-struct tb_xdp_error_response {
- struct tb_xdp_header hdr;
- u32 error;
-};
-
#endif
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 484f25be2849..a74f4878d3e7 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -33,7 +33,7 @@ enum tb_switch_cap {
enum tb_switch_vse_cap {
TB_VSE_CAP_PLUG_EVENTS = 0x01, /* also EEPROM */
TB_VSE_CAP_TIME2 = 0x03,
- TB_VSE_CAP_IECS = 0x04,
+ TB_VSE_CAP_CP_LP = 0x04,
TB_VSE_CAP_LINK_CONTROLLER = 0x06, /* also IECS */
};
@@ -246,6 +246,7 @@ enum usb4_switch_op {
#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT 16
#define TMU_RTR_CS_22 0x16
#define TMU_RTR_CS_24 0x18
+#define TMU_RTR_CS_25 0x19
enum tb_port_type {
TB_TYPE_INACTIVE = 0x000000,
@@ -305,16 +306,22 @@ struct tb_regs_port_header {
/* TMU adapter registers */
#define TMU_ADP_CS_3 0x03
#define TMU_ADP_CS_3_UDM BIT(29)
+#define TMU_ADP_CS_6 0x06
+#define TMU_ADP_CS_6_DTS BIT(1)
/* Lane adapter registers */
#define LANE_ADP_CS_0 0x00
#define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK GENMASK(25, 20)
#define LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT 20
+#define LANE_ADP_CS_0_CL0S_SUPPORT BIT(26)
+#define LANE_ADP_CS_0_CL1_SUPPORT BIT(27)
#define LANE_ADP_CS_1 0x01
#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(9, 4)
#define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4
#define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1
#define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3
+#define LANE_ADP_CS_1_CL0S_ENABLE BIT(10)
+#define LANE_ADP_CS_1_CL1_ENABLE BIT(11)
#define LANE_ADP_CS_1_LD BIT(14)
#define LANE_ADP_CS_1_LB BIT(15)
#define LANE_ADP_CS_1_CURRENT_SPEED_MASK GENMASK(19, 16)
@@ -323,6 +330,7 @@ struct tb_regs_port_header {
#define LANE_ADP_CS_1_CURRENT_SPEED_GEN3 0x4
#define LANE_ADP_CS_1_CURRENT_WIDTH_MASK GENMASK(25, 20)
#define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT 20
+#define LANE_ADP_CS_1_PMS BIT(30)
/* USB4 port registers */
#define PORT_CS_1 0x01
@@ -338,6 +346,7 @@ struct tb_regs_port_header {
#define PORT_CS_18 0x12
#define PORT_CS_18_BE BIT(8)
#define PORT_CS_18_TCM BIT(9)
+#define PORT_CS_18_CPS BIT(10)
#define PORT_CS_18_WOU4S BIT(18)
#define PORT_CS_19 0x13
#define PORT_CS_19_PC BIT(3)
@@ -437,39 +446,79 @@ struct tb_regs_hop {
u32 unknown3:3; /* set to zero */
} __packed;
+/* TMU Thunderbolt 3 registers */
+#define TB_TIME_VSEC_3_CS_9 0x9
+#define TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK GENMASK(17, 16)
+#define TB_TIME_VSEC_3_CS_26 0x1a
+#define TB_TIME_VSEC_3_CS_26_TD BIT(22)
+
+/*
+ * Used for Titan Ridge only. Bits are part of the same register: TMU_ADP_CS_6
+ * (see above) as in USB4 spec, but these specific bits used for Titan Ridge
+ * only and reserved in USB4 spec.
+ */
+#define TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK GENMASK(3, 2)
+#define TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL1 BIT(2)
+#define TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL2 BIT(3)
+
+/* Plug Events registers */
+#define TB_PLUG_EVENTS_PCIE_WR_DATA 0x1b
+#define TB_PLUG_EVENTS_PCIE_CMD 0x1c
+#define TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK GENMASK(9, 0)
+#define TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT 10
+#define TB_PLUG_EVENTS_PCIE_CMD_BR_MASK GENMASK(17, 10)
+#define TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK BIT(21)
+#define TB_PLUG_EVENTS_PCIE_CMD_WR 0x1
+#define TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT 22
+#define TB_PLUG_EVENTS_PCIE_CMD_COMMAND_MASK GENMASK(24, 22)
+#define TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL 0x2
+#define TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK BIT(30)
+#define TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK BIT(31)
+#define TB_PLUG_EVENTS_PCIE_CMD_RD_DATA 0x1d
+
+/* CP Low Power registers */
+#define TB_LOW_PWR_C1_CL1 0x1
+#define TB_LOW_PWR_C1_CL1_OBJ_MASK GENMASK(4, 1)
+#define TB_LOW_PWR_C1_CL2_OBJ_MASK GENMASK(4, 1)
+#define TB_LOW_PWR_C1_PORT_A_MASK GENMASK(2, 1)
+#define TB_LOW_PWR_C0_PORT_B_MASK GENMASK(4, 3)
+#define TB_LOW_PWR_C3_CL1 0x3
+
/* Common link controller registers */
-#define TB_LC_DESC 0x02
-#define TB_LC_DESC_NLC_MASK GENMASK(3, 0)
-#define TB_LC_DESC_SIZE_SHIFT 8
-#define TB_LC_DESC_SIZE_MASK GENMASK(15, 8)
-#define TB_LC_DESC_PORT_SIZE_SHIFT 16
-#define TB_LC_DESC_PORT_SIZE_MASK GENMASK(27, 16)
-#define TB_LC_FUSE 0x03
-#define TB_LC_SNK_ALLOCATION 0x10
-#define TB_LC_SNK_ALLOCATION_SNK0_MASK GENMASK(3, 0)
-#define TB_LC_SNK_ALLOCATION_SNK0_CM 0x1
-#define TB_LC_SNK_ALLOCATION_SNK1_SHIFT 4
-#define TB_LC_SNK_ALLOCATION_SNK1_MASK GENMASK(7, 4)
-#define TB_LC_SNK_ALLOCATION_SNK1_CM 0x1
-#define TB_LC_POWER 0x740
+#define TB_LC_DESC 0x02
+#define TB_LC_DESC_NLC_MASK GENMASK(3, 0)
+#define TB_LC_DESC_SIZE_SHIFT 8
+#define TB_LC_DESC_SIZE_MASK GENMASK(15, 8)
+#define TB_LC_DESC_PORT_SIZE_SHIFT 16
+#define TB_LC_DESC_PORT_SIZE_MASK GENMASK(27, 16)
+#define TB_LC_FUSE 0x03
+#define TB_LC_SNK_ALLOCATION 0x10
+#define TB_LC_SNK_ALLOCATION_SNK0_MASK GENMASK(3, 0)
+#define TB_LC_SNK_ALLOCATION_SNK0_CM 0x1
+#define TB_LC_SNK_ALLOCATION_SNK1_SHIFT 4
+#define TB_LC_SNK_ALLOCATION_SNK1_MASK GENMASK(7, 4)
+#define TB_LC_SNK_ALLOCATION_SNK1_CM 0x1
+#define TB_LC_POWER 0x740
/* Link controller registers */
-#define TB_LC_PORT_ATTR 0x8d
-#define TB_LC_PORT_ATTR_BE BIT(12)
-
-#define TB_LC_SX_CTRL 0x96
-#define TB_LC_SX_CTRL_WOC BIT(1)
-#define TB_LC_SX_CTRL_WOD BIT(2)
-#define TB_LC_SX_CTRL_WODPC BIT(3)
-#define TB_LC_SX_CTRL_WODPD BIT(4)
-#define TB_LC_SX_CTRL_WOU4 BIT(5)
-#define TB_LC_SX_CTRL_WOP BIT(6)
-#define TB_LC_SX_CTRL_L1C BIT(16)
-#define TB_LC_SX_CTRL_L1D BIT(17)
-#define TB_LC_SX_CTRL_L2C BIT(20)
-#define TB_LC_SX_CTRL_L2D BIT(21)
-#define TB_LC_SX_CTRL_SLI BIT(29)
-#define TB_LC_SX_CTRL_UPSTREAM BIT(30)
-#define TB_LC_SX_CTRL_SLP BIT(31)
+#define TB_LC_PORT_ATTR 0x8d
+#define TB_LC_PORT_ATTR_BE BIT(12)
+
+#define TB_LC_SX_CTRL 0x96
+#define TB_LC_SX_CTRL_WOC BIT(1)
+#define TB_LC_SX_CTRL_WOD BIT(2)
+#define TB_LC_SX_CTRL_WODPC BIT(3)
+#define TB_LC_SX_CTRL_WODPD BIT(4)
+#define TB_LC_SX_CTRL_WOU4 BIT(5)
+#define TB_LC_SX_CTRL_WOP BIT(6)
+#define TB_LC_SX_CTRL_L1C BIT(16)
+#define TB_LC_SX_CTRL_L1D BIT(17)
+#define TB_LC_SX_CTRL_L2C BIT(20)
+#define TB_LC_SX_CTRL_L2D BIT(21)
+#define TB_LC_SX_CTRL_SLI BIT(29)
+#define TB_LC_SX_CTRL_UPSTREAM BIT(30)
+#define TB_LC_SX_CTRL_SLP BIT(31)
+#define TB_LC_LINK_ATTR 0x97
+#define TB_LC_LINK_ATTR_CPS BIT(18)
#endif
diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c
index 039c42a06000..e4a07a26f693 100644
--- a/drivers/thunderbolt/tmu.c
+++ b/drivers/thunderbolt/tmu.c
@@ -115,6 +115,11 @@ static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
return tb_port_tmu_set_unidirectional(port, false);
}
+static inline int tb_port_tmu_unidirectional_enable(struct tb_port *port)
+{
+ return tb_port_tmu_set_unidirectional(port, true);
+}
+
static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
{
int ret;
@@ -128,23 +133,46 @@ static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
return val & TMU_ADP_CS_3_UDM;
}
+static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync)
+{
+ u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0;
+
+ return tb_port_tmu_write(port, TMU_ADP_CS_6, TMU_ADP_CS_6_DTS, val);
+}
+
+static int tb_port_tmu_time_sync_disable(struct tb_port *port)
+{
+ return tb_port_tmu_time_sync(port, true);
+}
+
+static int tb_port_tmu_time_sync_enable(struct tb_port *port)
+{
+ return tb_port_tmu_time_sync(port, false);
+}
+
static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
{
+ u32 val, offset, bit;
int ret;
- u32 val;
- ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
- sw->tmu.cap + TMU_RTR_CS_0, 1);
+ if (tb_switch_is_usb4(sw)) {
+ offset = sw->tmu.cap + TMU_RTR_CS_0;
+ bit = TMU_RTR_CS_0_TD;
+ } else {
+ offset = sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_26;
+ bit = TB_TIME_VSEC_3_CS_26_TD;
+ }
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
if (ret)
return ret;
if (set)
- val |= TMU_RTR_CS_0_TD;
+ val |= bit;
else
- val &= ~TMU_RTR_CS_0_TD;
+ val &= ~bit;
- return tb_sw_write(sw, &val, TB_CFG_SWITCH,
- sw->tmu.cap + TMU_RTR_CS_0, 1);
+ return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
}
/**
@@ -207,7 +235,8 @@ int tb_switch_tmu_init(struct tb_switch *sw)
*/
int tb_switch_tmu_post_time(struct tb_switch *sw)
{
- unsigned int post_local_time_offset, post_time_offset;
+ unsigned int post_time_high_offset, post_time_high = 0;
+ unsigned int post_local_time_offset, post_time_offset;
struct tb_switch *root_switch = sw->tb->root_switch;
u64 hi, mid, lo, local_time, post_time;
int i, ret, retries = 100;
@@ -247,6 +276,7 @@ int tb_switch_tmu_post_time(struct tb_switch *sw)
post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
+ post_time_high_offset = sw->tmu.cap + TMU_RTR_CS_25;
/*
* Write the Grandmaster time to the Post Local Time registers
@@ -258,17 +288,24 @@ int tb_switch_tmu_post_time(struct tb_switch *sw)
goto out;
/*
- * Have the new switch update its local time (by writing 1 to
- * the post_time registers) and wait for the completion of the
- * same (post_time register becomes 0). This means the time has
- * been converged properly.
+ * Have the new switch update its local time by:
+ * 1) writing 0x1 to the Post Time Low register and 0xffffffff to
+ * Post Time High register.
+ * 2) write 0 to Post Time High register and then wait for
+ * the completion of the post_time register becomes 0.
+ * This means the time has been converged properly.
*/
- post_time = 1;
+ post_time = 0xffffffff00000001ULL;
ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
if (ret)
goto out;
+ ret = tb_sw_write(sw, &post_time_high, TB_CFG_SWITCH,
+ post_time_high_offset, 1);
+ if (ret)
+ goto out;
+
do {
usleep_range(5, 10);
ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
@@ -297,30 +334,54 @@ out:
*/
int tb_switch_tmu_disable(struct tb_switch *sw)
{
- int ret;
-
- if (!tb_switch_is_usb4(sw))
+ /*
+ * No need to disable TMU on devices that don't support CLx since
+ * on these devices e.g. Alpine Ridge and earlier, the TMU mode
+ * HiFi bi-directional is enabled by default and we don't change it.
+ */
+ if (!tb_switch_is_clx_supported(sw))
return 0;
/* Already disabled? */
if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF)
return 0;
- if (sw->tmu.unidirectional) {
+
+ if (tb_route(sw)) {
+ bool unidirectional = tb_switch_tmu_hifi_is_enabled(sw, true);
struct tb_switch *parent = tb_switch_parent(sw);
- struct tb_port *up, *down;
+ struct tb_port *down, *up;
+ int ret;
- up = tb_upstream_port(sw);
down = tb_port_at(tb_route(sw), parent);
-
- /* The switch may be unplugged so ignore any errors */
- tb_port_tmu_unidirectional_disable(up);
- ret = tb_port_tmu_unidirectional_disable(down);
+ up = tb_upstream_port(sw);
+ /*
+ * In case of uni-directional time sync, TMU handshake is
+ * initiated by upstream router. In case of bi-directional
+ * time sync, TMU handshake is initiated by downstream router.
+ * Therefore, we change the rate to off in the respective
+ * router.
+ */
+ if (unidirectional)
+ tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_OFF);
+ else
+ tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
+
+ tb_port_tmu_time_sync_disable(up);
+ ret = tb_port_tmu_time_sync_disable(down);
if (ret)
return ret;
- }
- tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
+ if (unidirectional) {
+ /* The switch may be unplugged so ignore any errors */
+ tb_port_tmu_unidirectional_disable(up);
+ ret = tb_port_tmu_unidirectional_disable(down);
+ if (ret)
+ return ret;
+ }
+ } else {
+ tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
+ }
sw->tmu.unidirectional = false;
sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF;
@@ -329,55 +390,231 @@ int tb_switch_tmu_disable(struct tb_switch *sw)
return 0;
}
-/**
- * tb_switch_tmu_enable() - Enable TMU on a switch
- * @sw: Switch whose TMU to enable
- *
- * Enables TMU of a switch to be in bi-directional, HiFi mode. In this mode
- * all tunneling should work.
+static void __tb_switch_tmu_off(struct tb_switch *sw, bool unidirectional)
+{
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *down, *up;
+
+ down = tb_port_at(tb_route(sw), parent);
+ up = tb_upstream_port(sw);
+ /*
+ * In case of any failure in one of the steps when setting
+ * bi-directional or uni-directional TMU mode, get back to the TMU
+ * configurations in off mode. In case of additional failures in
+ * the functions below, ignore them since the caller shall already
+ * report a failure.
+ */
+ tb_port_tmu_time_sync_disable(down);
+ tb_port_tmu_time_sync_disable(up);
+ if (unidirectional)
+ tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_OFF);
+ else
+ tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
+
+ tb_port_tmu_unidirectional_disable(down);
+ tb_port_tmu_unidirectional_disable(up);
+}
+
+/*
+ * This function is called when the previous TMU mode was
+ * TB_SWITCH_TMU_RATE_OFF.
*/
-int tb_switch_tmu_enable(struct tb_switch *sw)
+static int __tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
{
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *up, *down;
int ret;
- if (!tb_switch_is_usb4(sw))
- return 0;
+ up = tb_upstream_port(sw);
+ down = tb_port_at(tb_route(sw), parent);
- if (tb_switch_tmu_is_enabled(sw))
- return 0;
+ ret = tb_port_tmu_unidirectional_disable(up);
+ if (ret)
+ return ret;
- ret = tb_switch_tmu_set_time_disruption(sw, true);
+ ret = tb_port_tmu_unidirectional_disable(down);
+ if (ret)
+ goto out;
+
+ ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
+ if (ret)
+ goto out;
+
+ ret = tb_port_tmu_time_sync_enable(up);
+ if (ret)
+ goto out;
+
+ ret = tb_port_tmu_time_sync_enable(down);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ __tb_switch_tmu_off(sw, false);
+ return ret;
+}
+
+static int tb_switch_tmu_objection_mask(struct tb_switch *sw)
+{
+ u32 val;
+ int ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
if (ret)
return ret;
- /* Change mode to bi-directional */
- if (tb_route(sw) && sw->tmu.unidirectional) {
- struct tb_switch *parent = tb_switch_parent(sw);
- struct tb_port *up, *down;
+ val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK;
- up = tb_upstream_port(sw);
- down = tb_port_at(tb_route(sw), parent);
+ return tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
+}
- ret = tb_port_tmu_unidirectional_disable(down);
- if (ret)
- return ret;
+static int tb_switch_tmu_unidirectional_enable(struct tb_switch *sw)
+{
+ struct tb_port *up = tb_upstream_port(sw);
- ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
+ return tb_port_tmu_write(up, TMU_ADP_CS_6,
+ TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK,
+ TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK);
+}
+
+/*
+ * This function is called when the previous TMU mode was
+ * TB_SWITCH_TMU_RATE_OFF.
+ */
+static int __tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *up, *down;
+ int ret;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(tb_route(sw), parent);
+ ret = tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_HIFI);
+ if (ret)
+ return ret;
+
+ ret = tb_port_tmu_unidirectional_enable(up);
+ if (ret)
+ goto out;
+
+ ret = tb_port_tmu_time_sync_enable(up);
+ if (ret)
+ goto out;
+
+ ret = tb_port_tmu_unidirectional_enable(down);
+ if (ret)
+ goto out;
+
+ ret = tb_port_tmu_time_sync_enable(down);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ __tb_switch_tmu_off(sw, true);
+ return ret;
+}
+
+static int tb_switch_tmu_hifi_enable(struct tb_switch *sw)
+{
+ bool unidirectional = sw->tmu.unidirectional_request;
+ int ret;
+
+ if (unidirectional && !sw->tmu.has_ucap)
+ return -EOPNOTSUPP;
+
+ /*
+ * No need to enable TMU on devices that don't support CLx since on
+ * these devices e.g. Alpine Ridge and earlier, the TMU mode HiFi
+ * bi-directional is enabled by default.
+ */
+ if (!tb_switch_is_clx_supported(sw))
+ return 0;
+
+ if (tb_switch_tmu_hifi_is_enabled(sw, sw->tmu.unidirectional_request))
+ return 0;
+
+ if (tb_switch_is_titan_ridge(sw) && unidirectional) {
+ /* Titan Ridge supports only CL0s */
+ if (!tb_switch_is_cl0s_enabled(sw))
+ return -EOPNOTSUPP;
+
+ ret = tb_switch_tmu_objection_mask(sw);
if (ret)
return ret;
- ret = tb_port_tmu_unidirectional_disable(up);
+ ret = tb_switch_tmu_unidirectional_enable(sw);
if (ret)
return ret;
+ }
+
+ ret = tb_switch_tmu_set_time_disruption(sw, true);
+ if (ret)
+ return ret;
+
+ if (tb_route(sw)) {
+ /* The used mode changes are from OFF to HiFi-Uni/HiFi-BiDir */
+ if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF) {
+ if (unidirectional)
+ ret = __tb_switch_tmu_enable_unidirectional(sw);
+ else
+ ret = __tb_switch_tmu_enable_bidirectional(sw);
+ if (ret)
+ return ret;
+ }
+ sw->tmu.unidirectional = unidirectional;
} else {
+ /*
+ * Host router port configurations are written as
+ * part of configurations for downstream port of the parent
+ * of the child node - see above.
+ * Here only the host router' rate configuration is written.
+ */
ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
if (ret)
return ret;
}
- sw->tmu.unidirectional = false;
sw->tmu.rate = TB_SWITCH_TMU_RATE_HIFI;
- tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw));
+ tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw));
return tb_switch_tmu_set_time_disruption(sw, false);
}
+
+/**
+ * tb_switch_tmu_enable() - Enable TMU on a router
+ * @sw: Router whose TMU to enable
+ *
+ * Enables TMU of a router to be in uni-directional or bi-directional HiFi mode.
+ * Calling tb_switch_tmu_configure() is required before calling this function,
+ * to select the mode HiFi and directionality (uni-directional/bi-directional).
+ * In both modes all tunneling should work. Uni-directional mode is required for
+ * CLx (Link Low-Power) to work.
+ */
+int tb_switch_tmu_enable(struct tb_switch *sw)
+{
+ if (sw->tmu.rate_request == TB_SWITCH_TMU_RATE_NORMAL)
+ return -EOPNOTSUPP;
+
+ return tb_switch_tmu_hifi_enable(sw);
+}
+
+/**
+ * tb_switch_tmu_configure() - Configure the TMU rate and directionality
+ * @sw: Router whose mode to change
+ * @rate: Rate to configure Off/LowRes/HiFi
+ * @unidirectional: If uni-directional (bi-directional otherwise)
+ *
+ * Selects the rate of the TMU and directionality (uni-directional or
+ * bi-directional). Must be called before tb_switch_tmu_enable().
+ */
+void tb_switch_tmu_configure(struct tb_switch *sw,
+ enum tb_switch_tmu_rate rate, bool unidirectional)
+{
+ sw->tmu.unidirectional_request = unidirectional;
+ sw->tmu.rate_request = rate;
+}
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index bb5cc480fc9a..a473cc7d9a8d 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -207,12 +207,14 @@ static int tb_pci_init_path(struct tb_path *path)
* tb_tunnel_discover_pci() - Discover existing PCIe tunnels
* @tb: Pointer to the domain structure
* @down: PCIe downstream adapter
+ * @alloc_hopid: Allocate HopIDs from visited ports
*
* If @down adapter is active, follows the tunnel to the PCIe upstream
* adapter and back. Returns the discovered tunnel or %NULL if there was
* no tunnel.
*/
-struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
+struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
+ bool alloc_hopid)
{
struct tb_tunnel *tunnel;
struct tb_path *path;
@@ -233,7 +235,7 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
* case.
*/
path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
- &tunnel->dst_port, "PCIe Up");
+ &tunnel->dst_port, "PCIe Up", alloc_hopid);
if (!path) {
/* Just disable the downstream port */
tb_pci_port_enable(down, false);
@@ -244,7 +246,7 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
goto err_free;
path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
- "PCIe Down");
+ "PCIe Down", alloc_hopid);
if (!path)
goto err_deactivate;
tunnel->paths[TB_PCI_PATH_DOWN] = path;
@@ -761,6 +763,7 @@ static int tb_dp_init_video_path(struct tb_path *path)
* tb_tunnel_discover_dp() - Discover existing Display Port tunnels
* @tb: Pointer to the domain structure
* @in: DP in adapter
+ * @alloc_hopid: Allocate HopIDs from visited ports
*
* If @in adapter is active, follows the tunnel to the DP out adapter
* and back. Returns the discovered tunnel or %NULL if there was no
@@ -768,7 +771,8 @@ static int tb_dp_init_video_path(struct tb_path *path)
*
* Return: DP tunnel or %NULL if no tunnel found.
*/
-struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
+struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
+ bool alloc_hopid)
{
struct tb_tunnel *tunnel;
struct tb_port *port;
@@ -787,7 +791,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
tunnel->src_port = in;
path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
- &tunnel->dst_port, "Video");
+ &tunnel->dst_port, "Video", alloc_hopid);
if (!path) {
/* Just disable the DP IN port */
tb_dp_port_enable(in, false);
@@ -797,14 +801,15 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT]))
goto err_free;
- path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
+ path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
+ alloc_hopid);
if (!path)
goto err_deactivate;
tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
- &port, "AUX RX");
+ &port, "AUX RX", alloc_hopid);
if (!path)
goto err_deactivate;
tunnel->paths[TB_DP_AUX_PATH_IN] = path;
@@ -1343,12 +1348,14 @@ static void tb_usb3_init_path(struct tb_path *path)
* tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
* @tb: Pointer to the domain structure
* @down: USB3 downstream adapter
+ * @alloc_hopid: Allocate HopIDs from visited ports
*
* If @down adapter is active, follows the tunnel to the USB3 upstream
* adapter and back. Returns the discovered tunnel or %NULL if there was
* no tunnel.
*/
-struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
+struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
+ bool alloc_hopid)
{
struct tb_tunnel *tunnel;
struct tb_path *path;
@@ -1369,7 +1376,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
* case.
*/
path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
- &tunnel->dst_port, "USB3 Down");
+ &tunnel->dst_port, "USB3 Down", alloc_hopid);
if (!path) {
/* Just disable the downstream port */
tb_usb3_port_enable(down, false);
@@ -1379,7 +1386,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
- "USB3 Up");
+ "USB3 Up", alloc_hopid);
if (!path)
goto err_deactivate;
tunnel->paths[TB_USB3_PATH_UP] = path;
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index eea14e24f7e0..03e56076b5bc 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -64,10 +64,12 @@ struct tb_tunnel {
int allocated_down;
};
-struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down);
+struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
+ bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
struct tb_port *down);
-struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in);
+struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
+ bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
struct tb_port *out, int max_up,
int max_down);
@@ -77,7 +79,8 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
int receive_ring);
bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
int transmit_ring, int receive_path, int receive_ring);
-struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down);
+struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
+ bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
struct tb_port *down, int max_up,
int max_down);
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index ceddbe7e9f93..3a2e7126db9d 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -50,28 +50,6 @@ enum usb4_ba_index {
#define USB4_BA_VALUE_MASK GENMASK(31, 16)
#define USB4_BA_VALUE_SHIFT 16
-static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
- u32 value, int timeout_msec)
-{
- ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
-
- do {
- u32 val;
- int ret;
-
- ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
- if (ret)
- return ret;
-
- if ((val & bit) == value)
- return 0;
-
- usleep_range(50, 100);
- } while (ktime_before(ktime_get(), timeout));
-
- return -ETIMEDOUT;
-}
-
static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode,
u32 *metadata, u8 *status,
const void *tx_data, size_t tx_dwords,
@@ -97,7 +75,7 @@ static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode,
if (ret)
return ret;
- ret = usb4_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
+ ret = tb_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
if (ret)
return ret;
@@ -303,8 +281,8 @@ int usb4_switch_setup(struct tb_switch *sw)
if (ret)
return ret;
- return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
- ROUTER_CS_6_CR, 50);
+ return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
+ ROUTER_CS_6_CR, 50);
}
/**
@@ -480,8 +458,8 @@ int usb4_switch_set_sleep(struct tb_switch *sw)
if (ret)
return ret;
- return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
- ROUTER_CS_6_SLPR, 500);
+ return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
+ ROUTER_CS_6_SLPR, 500);
}
/**
@@ -1386,6 +1364,26 @@ int usb4_port_enumerate_retimers(struct tb_port *port)
USB4_SB_OPCODE, &val, sizeof(val));
}
+/**
+ * usb4_port_clx_supported() - Check if CLx is supported by the link
+ * @port: Port to check for CLx support for
+ *
+ * PORT_CS_18_CPS bit reflects if the link supports CLx including
+ * active cables (if connected on the link).
+ */
+bool usb4_port_clx_supported(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_18, 1);
+ if (ret)
+ return false;
+
+ return !!(val & PORT_CS_18_CPS);
+}
+
static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
enum usb4_sb_opcode opcode,
int timeout_msec)
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index eff32499610f..01d6b724ca51 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -214,16 +214,12 @@ static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route,
memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid));
}
-static int tb_xdp_handle_error(const struct tb_xdp_header *hdr)
+static int tb_xdp_handle_error(const struct tb_xdp_error_response *res)
{
- const struct tb_xdp_error_response *error;
-
- if (hdr->type != ERROR_RESPONSE)
+ if (res->hdr.type != ERROR_RESPONSE)
return 0;
- error = (const struct tb_xdp_error_response *)hdr;
-
- switch (error->error) {
+ switch (res->error) {
case ERROR_UNKNOWN_PACKET:
case ERROR_UNKNOWN_DOMAIN:
return -EIO;
@@ -257,7 +253,7 @@ static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
if (ret)
return ret;
- ret = tb_xdp_handle_error(&res.hdr);
+ ret = tb_xdp_handle_error(&res.err);
if (ret)
return ret;
@@ -329,7 +325,7 @@ static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
if (ret)
goto err;
- ret = tb_xdp_handle_error(&res->hdr);
+ ret = tb_xdp_handle_error(&res->err);
if (ret)
goto err;
@@ -462,7 +458,7 @@ static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route,
if (ret)
return ret;
- return tb_xdp_handle_error(&res.hdr);
+ return tb_xdp_handle_error(&res.err);
}
static int
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index d24af649a8bb..5ed19a9857ad 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -151,7 +151,7 @@ static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id)
address = (unsigned long)(void *)buf;
goldfish_tty_rw(qtty, address, count, 0);
- tty_schedule_flip(&qtty->port);
+ tty_flip_buffer_push(&qtty->port);
return IRQ_HANDLED;
}
@@ -298,7 +298,7 @@ static int goldfish_tty_probe(struct platform_device *pdev)
struct resource *r;
struct device *ttydev;
void __iomem *base;
- u32 irq;
+ int irq;
unsigned int line;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -313,14 +313,12 @@ static int goldfish_tty_probe(struct platform_device *pdev)
return -ENOMEM;
}
- r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!r) {
- pr_err("goldfish_tty: No IRQ resource available!\n");
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
goto err_unmap;
}
- irq = r->start;
-
mutex_lock(&goldfish_tty_lock);
if (pdev->id == PLATFORM_DEVID_NONE)
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index 02c10a968de1..31dceb5039b5 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -955,19 +955,18 @@ static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
mips_ejtag_fdc_con.tty_drv = driver;
init_waitqueue_head(&priv->waitqueue);
- priv->thread = kthread_create(mips_ejtag_fdc_put, priv, priv->fdc_name);
- if (IS_ERR(priv->thread)) {
- ret = PTR_ERR(priv->thread);
- dev_err(priv->dev, "Couldn't create kthread (%d)\n", ret);
- goto err_destroy_ports;
- }
/*
* Bind the writer thread to the right CPU so it can't migrate.
* The channels are per-CPU and we want all channel I/O to be on a
* single predictable CPU.
*/
- kthread_bind(priv->thread, dev->cpu);
- wake_up_process(priv->thread);
+ priv->thread = kthread_run_on_cpu(mips_ejtag_fdc_put, priv,
+ dev->cpu, "ttyFDC/%u");
+ if (IS_ERR(priv->thread)) {
+ ret = PTR_ERR(priv->thread);
+ dev_err(priv->dev, "Couldn't create kthread (%d)\n", ret);
+ goto err_destroy_ports;
+ }
/* Look for an FDC IRQ */
priv->irq = get_c0_fdc_int();
@@ -1095,15 +1094,14 @@ static int mips_ejtag_fdc_tty_cpu_up(struct mips_cdmm_device *dev)
}
/* Restart the kthread */
- priv->thread = kthread_create(mips_ejtag_fdc_put, priv, priv->fdc_name);
+ /* Bind it back to the right CPU and set it off */
+ priv->thread = kthread_run_on_cpu(mips_ejtag_fdc_put, priv,
+ dev->cpu, "ttyFDC/%u");
if (IS_ERR(priv->thread)) {
ret = PTR_ERR(priv->thread);
dev_err(priv->dev, "Couldn't re-create kthread (%d)\n", ret);
goto out;
}
- /* Bind it back to the right CPU and set it off */
- kthread_bind(priv->thread, dev->cpu);
- wake_up_process(priv->thread);
out:
return ret;
}
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index e37683e25055..f3c72ab1476c 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -1683,7 +1683,7 @@ static int moxa_poll_port(struct moxa_port *p, unsigned int handle,
if (inited && !tty_throttled(tty) &&
MoxaPortRxQueue(p) > 0) { /* RX */
MoxaPortReadData(p);
- tty_schedule_flip(&p->port);
+ tty_flip_buffer_push(&p->port);
}
} else {
clear_bit(EMPTYWAIT, &p->statusflags);
@@ -1708,7 +1708,7 @@ static int moxa_poll_port(struct moxa_port *p, unsigned int handle,
if (tty && (intr & IntrBreak) && !I_IGNBRK(tty)) { /* BREAK */
tty_insert_flip_char(&p->port, 0, TTY_BREAK);
- tty_schedule_flip(&p->port);
+ tty_flip_buffer_push(&p->port);
}
if (intr & IntrLine)
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 93a95a135a71..c858aff721c4 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -159,14 +159,32 @@
#define MXSER_BAUD_BASE 921600
#define MXSER_CUSTOM_DIVISOR (MXSER_BAUD_BASE * 16)
-#define PCI_DEVICE_ID_POS104UL 0x1044
-#define PCI_DEVICE_ID_CB108 0x1080
-#define PCI_DEVICE_ID_CP102UF 0x1023
-#define PCI_DEVICE_ID_CP112UL 0x1120
-#define PCI_DEVICE_ID_CB114 0x1142
-#define PCI_DEVICE_ID_CP114UL 0x1143
-#define PCI_DEVICE_ID_CB134I 0x1341
-#define PCI_DEVICE_ID_CP138U 0x1380
+#define PCI_DEVICE_ID_MOXA_RC7000 0x0001
+#define PCI_DEVICE_ID_MOXA_CP102 0x1020
+#define PCI_DEVICE_ID_MOXA_CP102UL 0x1021
+#define PCI_DEVICE_ID_MOXA_CP102U 0x1022
+#define PCI_DEVICE_ID_MOXA_CP102UF 0x1023
+#define PCI_DEVICE_ID_MOXA_C104 0x1040
+#define PCI_DEVICE_ID_MOXA_CP104U 0x1041
+#define PCI_DEVICE_ID_MOXA_CP104JU 0x1042
+#define PCI_DEVICE_ID_MOXA_CP104EL 0x1043
+#define PCI_DEVICE_ID_MOXA_POS104UL 0x1044
+#define PCI_DEVICE_ID_MOXA_CB108 0x1080
+#define PCI_DEVICE_ID_MOXA_CP112UL 0x1120
+#define PCI_DEVICE_ID_MOXA_CT114 0x1140
+#define PCI_DEVICE_ID_MOXA_CP114 0x1141
+#define PCI_DEVICE_ID_MOXA_CB114 0x1142
+#define PCI_DEVICE_ID_MOXA_CP114UL 0x1143
+#define PCI_DEVICE_ID_MOXA_CP118U 0x1180
+#define PCI_DEVICE_ID_MOXA_CP118EL 0x1181
+#define PCI_DEVICE_ID_MOXA_CP132 0x1320
+#define PCI_DEVICE_ID_MOXA_CP132U 0x1321
+#define PCI_DEVICE_ID_MOXA_CP134U 0x1340
+#define PCI_DEVICE_ID_MOXA_CB134I 0x1341
+#define PCI_DEVICE_ID_MOXA_CP138U 0x1380
+#define PCI_DEVICE_ID_MOXA_C168 0x1680
+#define PCI_DEVICE_ID_MOXA_CP168U 0x1681
+#define PCI_DEVICE_ID_MOXA_CP168EL 0x1682
#define MXSER_NPORTS(ddata) ((ddata) & 0xffU)
#define MXSER_HIGHBAUD 0x0100
@@ -194,32 +212,32 @@ static const struct {
/* driver_data correspond to the lines in the structure above
see also ISA probe function before you change something */
static const struct pci_device_id mxser_pcibrds[] = {
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C168), .driver_data = 8 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C104), .driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132), .driver_data = 2 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP114), .driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CT114), .driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102), .driver_data = 2 | MXSER_HIGHBAUD },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104U), .driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168U), .driver_data = 8 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132U), .driver_data = 2 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP134U), .driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104JU),.driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_RC7000), .driver_data = 8 }, /* RC7000 */
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118U), .driver_data = 8 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102UL),.driver_data = 2 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102U), .driver_data = 2 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118EL),.driver_data = 8 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168EL),.driver_data = 8 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104EL),.driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB108), .driver_data = 8 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB114), .driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB134I), .driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP138U), .driver_data = 8 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_POS104UL), .driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP114UL), .driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP102UF), .driver_data = 2 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP112UL), .driver_data = 2 },
+ { PCI_DEVICE_DATA(MOXA, C168, 8) },
+ { PCI_DEVICE_DATA(MOXA, C104, 4) },
+ { PCI_DEVICE_DATA(MOXA, CP132, 2) },
+ { PCI_DEVICE_DATA(MOXA, CP114, 4) },
+ { PCI_DEVICE_DATA(MOXA, CT114, 4) },
+ { PCI_DEVICE_DATA(MOXA, CP102, 2 | MXSER_HIGHBAUD) },
+ { PCI_DEVICE_DATA(MOXA, CP104U, 4) },
+ { PCI_DEVICE_DATA(MOXA, CP168U, 8) },
+ { PCI_DEVICE_DATA(MOXA, CP132U, 2) },
+ { PCI_DEVICE_DATA(MOXA, CP134U, 4) },
+ { PCI_DEVICE_DATA(MOXA, CP104JU, 4) },
+ { PCI_DEVICE_DATA(MOXA, RC7000, 8) }, /* RC7000 */
+ { PCI_DEVICE_DATA(MOXA, CP118U, 8) },
+ { PCI_DEVICE_DATA(MOXA, CP102UL, 2) },
+ { PCI_DEVICE_DATA(MOXA, CP102U, 2) },
+ { PCI_DEVICE_DATA(MOXA, CP118EL, 8) },
+ { PCI_DEVICE_DATA(MOXA, CP168EL, 8) },
+ { PCI_DEVICE_DATA(MOXA, CP104EL, 4) },
+ { PCI_DEVICE_DATA(MOXA, CB108, 8) },
+ { PCI_DEVICE_DATA(MOXA, CB114, 4) },
+ { PCI_DEVICE_DATA(MOXA, CB134I, 4) },
+ { PCI_DEVICE_DATA(MOXA, CP138U, 8) },
+ { PCI_DEVICE_DATA(MOXA, POS104UL, 4) },
+ { PCI_DEVICE_DATA(MOXA, CP114UL, 4) },
+ { PCI_DEVICE_DATA(MOXA, CP102UF, 2) },
+ { PCI_DEVICE_DATA(MOXA, CP112UL, 2) },
{ }
};
MODULE_DEVICE_TABLE(pci, mxser_pcibrds);
@@ -251,8 +269,6 @@ struct mxser_port {
u8 MCR; /* Modem control register */
u8 FCR; /* FIFO control register */
- bool ldisc_stop_rx;
-
struct async_icount icount; /* kernel counters for 4 input interrupts */
unsigned int timeout;
@@ -262,7 +278,6 @@ struct mxser_port {
unsigned int xmit_head;
unsigned int xmit_tail;
unsigned int xmit_cnt;
- int closing;
spinlock_t slock;
};
@@ -684,27 +699,34 @@ static void mxser_change_speed(struct tty_struct *tty, struct ktermios *old_term
outb(cval, info->ioaddr + UART_LCR);
}
-static void mxser_check_modem_status(struct tty_struct *tty,
- struct mxser_port *port, int status)
+static u8 mxser_check_modem_status(struct tty_struct *tty,
+ struct mxser_port *port)
{
+ u8 msr = inb(port->ioaddr + UART_MSR);
+
+ if (!(msr & UART_MSR_ANY_DELTA))
+ return msr;
+
/* update input line counters */
- if (status & UART_MSR_TERI)
+ if (msr & UART_MSR_TERI)
port->icount.rng++;
- if (status & UART_MSR_DDSR)
+ if (msr & UART_MSR_DDSR)
port->icount.dsr++;
- if (status & UART_MSR_DDCD)
+ if (msr & UART_MSR_DDCD)
port->icount.dcd++;
- if (status & UART_MSR_DCTS)
+ if (msr & UART_MSR_DCTS)
port->icount.cts++;
wake_up_interruptible(&port->port.delta_msr_wait);
- if (tty_port_check_carrier(&port->port) && (status & UART_MSR_DDCD)) {
- if (status & UART_MSR_DCD)
+ if (tty_port_check_carrier(&port->port) && (msr & UART_MSR_DDCD)) {
+ if (msr & UART_MSR_DCD)
wake_up_interruptible(&port->port.open_wait);
}
if (tty_port_cts_enabled(&port->port))
- mxser_handle_cts(tty, port, status);
+ mxser_handle_cts(tty, port, msr);
+
+ return msr;
}
static void mxser_disable_and_clear_FIFO(struct mxser_port *info)
@@ -802,6 +824,20 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty)
}
/*
+ * To stop accepting input, we disable the receive line status interrupts, and
+ * tell the interrupt driver to stop checking the data ready bit in the line
+ * status register.
+ */
+static void mxser_stop_rx(struct mxser_port *info)
+{
+ info->IER &= ~UART_IER_RLSI;
+ if (info->board->must_hwid)
+ info->IER &= ~MOXA_MUST_RECV_ISR;
+
+ outb(info->IER, info->ioaddr + UART_IER);
+}
+
+/*
* This routine will shutdown a serial port
*/
static void mxser_shutdown_port(struct tty_port *port)
@@ -811,6 +847,8 @@ static void mxser_shutdown_port(struct tty_port *port)
spin_lock_irqsave(&info->slock, flags);
+ mxser_stop_rx(info);
+
/*
* clear delta_msr_wait queue to avoid mem leaks: we may free the irq
* here so the queue might never be waken up
@@ -874,64 +912,9 @@ static void mxser_flush_buffer(struct tty_struct *tty)
tty_wakeup(tty);
}
-
-static void mxser_close_port(struct tty_port *port)
-{
- struct mxser_port *info = container_of(port, struct mxser_port, port);
- unsigned long timeout;
- /*
- * At this point we stop accepting input. To do this, we
- * disable the receive line status interrupts, and tell the
- * interrupt driver to stop checking the data ready bit in the
- * line status register.
- */
- info->IER &= ~UART_IER_RLSI;
- if (info->board->must_hwid)
- info->IER &= ~MOXA_MUST_RECV_ISR;
-
- outb(info->IER, info->ioaddr + UART_IER);
- /*
- * Before we drop DTR, make sure the UART transmitter
- * has completely drained; this is especially
- * important if there is a transmit FIFO!
- */
- timeout = jiffies + HZ;
- while (!(inb(info->ioaddr + UART_LSR) & UART_LSR_TEMT)) {
- schedule_timeout_interruptible(5);
- if (time_after(jiffies, timeout))
- break;
- }
-}
-
-/*
- * This routine is called when the serial port gets closed. First, we
- * wait for the last remaining data to be sent. Then, we unlink its
- * async structure from the interrupt chain if necessary, and we free
- * that IRQ if nothing is left in the chain.
- */
static void mxser_close(struct tty_struct *tty, struct file *filp)
{
- struct mxser_port *info = tty->driver_data;
- struct tty_port *port = &info->port;
-
- if (info == NULL)
- return;
- if (tty_port_close_start(port, tty, filp) == 0)
- return;
- info->closing = 1;
- mutex_lock(&port->mutex);
- mxser_close_port(port);
- mxser_flush_buffer(tty);
- if (tty_port_initialized(port) && C_HUPCL(tty))
- tty_port_lower_dtr_rts(port);
- mxser_shutdown_port(port);
- tty_port_set_initialized(port, 0);
- mutex_unlock(&port->mutex);
- info->closing = 0;
- /* Right now the tty_port set is done outside of the close_end helper
- as we don't yet have everyone using refcounts */
- tty_port_close_end(port, tty);
- tty_port_tty_set(port, NULL);
+ tty_port_close(tty->port, tty, filp);
}
static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int count)
@@ -940,9 +923,6 @@ static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int cou
struct mxser_port *info = tty->driver_data;
unsigned long flags;
- if (!info->port.xmit_buf)
- return 0;
-
while (1) {
c = min_t(int, count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
SERIAL_XMIT_SIZE - info->xmit_head));
@@ -973,9 +953,6 @@ static int mxser_put_char(struct tty_struct *tty, unsigned char ch)
struct mxser_port *info = tty->driver_data;
unsigned long flags;
- if (!info->port.xmit_buf)
- return 0;
-
if (info->xmit_cnt >= SERIAL_XMIT_SIZE - 1)
return 0;
@@ -993,7 +970,7 @@ static void mxser_flush_chars(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
- if (!info->xmit_cnt || tty->flow.stopped || !info->port.xmit_buf ||
+ if (!info->xmit_cnt || tty->flow.stopped ||
(tty->hw_stopped && !mxser_16550A_or_MUST(info)))
return;
@@ -1153,25 +1130,24 @@ static int mxser_get_lsr_info(struct mxser_port *info,
static int mxser_tiocmget(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
- unsigned char control, status;
+ unsigned char control;
unsigned long flags;
+ u8 msr;
if (tty_io_error(tty))
return -EIO;
spin_lock_irqsave(&info->slock, flags);
control = info->MCR;
- status = inb(info->ioaddr + UART_MSR);
- if (status & UART_MSR_ANY_DELTA)
- mxser_check_modem_status(tty, info, status);
+ msr = mxser_check_modem_status(tty, info);
spin_unlock_irqrestore(&info->slock, flags);
return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) |
((control & UART_MCR_DTR) ? TIOCM_DTR : 0) |
- ((status & UART_MSR_DCD) ? TIOCM_CAR : 0) |
- ((status & UART_MSR_RI) ? TIOCM_RNG : 0) |
- ((status & UART_MSR_DSR) ? TIOCM_DSR : 0) |
- ((status & UART_MSR_CTS) ? TIOCM_CTS : 0);
+ ((msr & UART_MSR_DCD) ? TIOCM_CAR : 0) |
+ ((msr & UART_MSR_RI) ? TIOCM_RNG : 0) |
+ ((msr & UART_MSR_DSR) ? TIOCM_DSR : 0) |
+ ((msr & UART_MSR_CTS) ? TIOCM_CTS : 0);
}
static int mxser_tiocmset(struct tty_struct *tty,
@@ -1326,11 +1302,14 @@ static int mxser_get_icount(struct tty_struct *tty,
return 0;
}
-static void mxser_stoprx(struct tty_struct *tty)
+/*
+ * This routine is called by the upper-layer tty layer to signal that
+ * incoming characters should be throttled.
+ */
+static void mxser_throttle(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
- info->ldisc_stop_rx = true;
if (I_IXOFF(tty)) {
if (info->board->must_hwid) {
info->IER &= ~MOXA_MUST_RECV_ISR;
@@ -1349,21 +1328,11 @@ static void mxser_stoprx(struct tty_struct *tty)
}
}
-/*
- * This routine is called by the upper-layer tty layer to signal that
- * incoming characters should be throttled.
- */
-static void mxser_throttle(struct tty_struct *tty)
-{
- mxser_stoprx(tty);
-}
-
static void mxser_unthrottle(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
/* startrx */
- info->ldisc_stop_rx = false;
if (I_IXOFF(tty)) {
if (info->x_char)
info->x_char = 0;
@@ -1409,7 +1378,7 @@ static void mxser_start(struct tty_struct *tty)
unsigned long flags;
spin_lock_irqsave(&info->slock, flags);
- if (info->xmit_cnt && info->port.xmit_buf)
+ if (info->xmit_cnt)
__mxser_start_tx(info);
spin_unlock_irqrestore(&info->slock, flags);
}
@@ -1442,15 +1411,25 @@ static void mxser_set_termios(struct tty_struct *tty, struct ktermios *old_termi
}
}
+static bool mxser_tx_empty(struct mxser_port *info)
+{
+ unsigned long flags;
+ u8 lsr;
+
+ spin_lock_irqsave(&info->slock, flags);
+ lsr = inb(info->ioaddr + UART_LSR);
+ spin_unlock_irqrestore(&info->slock, flags);
+
+ return !(lsr & UART_LSR_TEMT);
+}
+
/*
* mxser_wait_until_sent() --- wait until the transmitter is empty
*/
static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct mxser_port *info = tty->driver_data;
- unsigned long orig_jiffies, char_time;
- unsigned long flags;
- int lsr;
+ unsigned long expire, char_time;
if (info->type == PORT_UNKNOWN)
return;
@@ -1458,7 +1437,6 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
if (info->xmit_fifo_size == 0)
return; /* Just in case.... */
- orig_jiffies = jiffies;
/*
* Set the check interval to be 1/5 of the estimated time to
* send a single character, and make it at least 1. The check
@@ -1473,6 +1451,9 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
char_time = 1;
if (timeout && timeout < char_time)
char_time = timeout;
+
+ char_time = jiffies_to_msecs(char_time);
+
/*
* If the transmitter hasn't cleared in twice the approximate
* amount of time to send the entire FIFO, it probably won't
@@ -1485,18 +1466,15 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
if (!timeout || timeout > 2 * info->timeout)
timeout = 2 * info->timeout;
- spin_lock_irqsave(&info->slock, flags);
- while (!((lsr = inb(info->ioaddr + UART_LSR)) & UART_LSR_TEMT)) {
- spin_unlock_irqrestore(&info->slock, flags);
- schedule_timeout_interruptible(char_time);
- spin_lock_irqsave(&info->slock, flags);
+ expire = jiffies + timeout;
+
+ while (mxser_tx_empty(info)) {
+ msleep_interruptible(char_time);
if (signal_pending(current))
break;
- if (timeout && time_after(jiffies, orig_jiffies + timeout))
+ if (time_after(jiffies, expire))
break;
}
- spin_unlock_irqrestore(&info->slock, flags);
- set_current_state(TASK_RUNNING);
}
/*
@@ -1531,8 +1509,7 @@ static int mxser_rs_break(struct tty_struct *tty, int break_state)
return 0;
}
-static bool mxser_receive_chars_new(struct tty_struct *tty,
- struct mxser_port *port, u8 status)
+static bool mxser_receive_chars_new(struct mxser_port *port, u8 status)
{
enum mxser_must_hwid hwid = port->board->must_hwid;
u8 gdl;
@@ -1546,12 +1523,10 @@ static bool mxser_receive_chars_new(struct tty_struct *tty,
if (hwid == MOXA_MUST_MU150_HWID)
gdl &= MOXA_MUST_GDL_MASK;
- if (gdl >= tty->receive_room && !port->ldisc_stop_rx)
- mxser_stoprx(tty);
-
while (gdl--) {
u8 ch = inb(port->ioaddr + UART_RX);
- tty_insert_flip_char(&port->port, ch, 0);
+ if (!tty_insert_flip_char(&port->port, ch, 0))
+ port->icount.buf_overrun++;
}
return true;
@@ -1561,10 +1536,8 @@ static u8 mxser_receive_chars_old(struct tty_struct *tty,
struct mxser_port *port, u8 status)
{
enum mxser_must_hwid hwid = port->board->must_hwid;
- int recv_room = tty->receive_room;
int ignored = 0;
int max = 256;
- int cnt = 0;
u8 ch;
do {
@@ -1599,14 +1572,10 @@ static u8 mxser_receive_chars_old(struct tty_struct *tty,
port->icount.overrun++;
}
}
- tty_insert_flip_char(&port->port, ch, flag);
- cnt++;
- if (cnt >= recv_room) {
- if (!port->ldisc_stop_rx)
- mxser_stoprx(tty);
+ if (!tty_insert_flip_char(&port->port, ch, flag)) {
+ port->icount.buf_overrun++;
break;
}
-
}
if (hwid)
@@ -1621,10 +1590,7 @@ static u8 mxser_receive_chars_old(struct tty_struct *tty,
static u8 mxser_receive_chars(struct tty_struct *tty,
struct mxser_port *port, u8 status)
{
- if (tty->receive_room == 0 && !port->ldisc_stop_rx)
- mxser_stoprx(tty);
-
- if (!mxser_receive_chars_new(tty, port, status))
+ if (!mxser_receive_chars_new(port, status))
status = mxser_receive_chars_old(tty, port, status);
tty_flip_buffer_push(&port->port);
@@ -1634,7 +1600,7 @@ static u8 mxser_receive_chars(struct tty_struct *tty,
static void mxser_transmit_chars(struct tty_struct *tty, struct mxser_port *port)
{
- int count, cnt;
+ int count;
if (port->x_char) {
outb(port->x_char, port->ioaddr + UART_TX);
@@ -1643,27 +1609,22 @@ static void mxser_transmit_chars(struct tty_struct *tty, struct mxser_port *port
return;
}
- if (port->port.xmit_buf == NULL)
- return;
-
if (!port->xmit_cnt || tty->flow.stopped ||
(tty->hw_stopped && !mxser_16550A_or_MUST(port))) {
__mxser_stop_tx(port);
return;
}
- cnt = port->xmit_cnt;
count = port->xmit_fifo_size;
do {
outb(port->port.xmit_buf[port->xmit_tail++],
port->ioaddr + UART_TX);
- port->xmit_tail = port->xmit_tail & (SERIAL_XMIT_SIZE - 1);
+ port->xmit_tail &= SERIAL_XMIT_SIZE - 1;
+ port->icount.tx++;
if (!--port->xmit_cnt)
break;
} while (--count > 0);
- port->icount.tx += (cnt - port->xmit_cnt);
-
if (port->xmit_cnt < WAKEUP_CHARS)
tty_wakeup(tty);
@@ -1674,7 +1635,7 @@ static void mxser_transmit_chars(struct tty_struct *tty, struct mxser_port *port
static bool mxser_port_isr(struct mxser_port *port)
{
struct tty_struct *tty;
- u8 iir, msr, status;
+ u8 iir, status;
bool error = false;
iir = inb(port->ioaddr + UART_IIR);
@@ -1683,7 +1644,7 @@ static bool mxser_port_isr(struct mxser_port *port)
iir &= MOXA_MUST_IIR_MASK;
tty = tty_port_tty_get(&port->port);
- if (!tty || port->closing || !tty_port_initialized(&port->port)) {
+ if (!tty) {
status = inb(port->ioaddr + UART_LSR);
outb(port->FCR | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
port->ioaddr + UART_FCR);
@@ -1707,9 +1668,7 @@ static bool mxser_port_isr(struct mxser_port *port)
status = mxser_receive_chars(tty, port, status);
}
- msr = inb(port->ioaddr + UART_MSR);
- if (msr & UART_MSR_ANY_DELTA)
- mxser_check_modem_status(tty, port, msr);
+ mxser_check_modem_status(tty, port);
if (port->board->must_hwid) {
if (iir == 0x02 && (status & UART_LSR_THRE))
@@ -1836,7 +1795,6 @@ static void mxser_initbrd(struct mxser_board *brd, bool high_baud)
tty_port_init(&info->port);
info->port.ops = &mxser_port_ops;
info->board = brd;
- info->ldisc_stop_rx = false;
/* Enhance mode enabled here */
if (brd->must_hwid != MOXA_OTHER_UART)
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 0b96b14bbfe1..0b1808e3a912 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -322,6 +322,7 @@ static int addr_cnt;
#define GSM1_ESCAPE_BITS 0x20
#define XON 0x11
#define XOFF 0x13
+#define ISO_IEC_646_MASK 0x7F
static const struct tty_port_operations gsm_port_ops;
@@ -531,7 +532,8 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
int olen = 0;
while (len--) {
if (*input == GSM1_SOF || *input == GSM1_ESCAPE
- || *input == XON || *input == XOFF) {
+ || (*input & ISO_IEC_646_MASK) == XON
+ || (*input & ISO_IEC_646_MASK) == XOFF) {
*output++ = GSM1_ESCAPE;
*output++ = *input++ ^ GSM1_ESCAPE_BITS;
olen++;
@@ -2074,8 +2076,6 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
/**
* gsm_error - handle tty error
* @gsm: ldisc data
- * @data: byte received (may be invalid)
- * @flag: error received
*
* Handle an error in the receipt of data for a frame. Currently we just
* go back to hunting for a SOF.
@@ -2083,8 +2083,7 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
* FIXME: better diagnostics ?
*/
-static void gsm_error(struct gsm_mux *gsm,
- unsigned char data, unsigned char flag)
+static void gsm_error(struct gsm_mux *gsm)
{
gsm->state = GSM_SEARCH;
gsm->io_error++;
@@ -2504,7 +2503,7 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
case TTY_BREAK:
case TTY_PARITY:
case TTY_FRAME:
- gsm_error(gsm, *cp, flags);
+ gsm_error(gsm);
break;
default:
WARN_ONCE(1, "%s: unknown flag %d\n",
@@ -2690,8 +2689,8 @@ static __poll_t gsmld_poll(struct tty_struct *tty, struct file *file,
return mask;
}
-static int gsmld_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int gsmld_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
{
struct gsm_config c;
struct gsm_mux *gsm = tty->disc_data;
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 23ba1fc99df8..94c1ec2dd754 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -593,14 +593,13 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
/**
* n_hdlc_tty_ioctl - process IOCTL system call for the tty device.
* @tty: pointer to tty instance data
- * @file: pointer to open file object for device
* @cmd: IOCTL command code
* @arg: argument for IOCTL call (cmd dependent)
*
* Returns command dependent result.
*/
-static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int n_hdlc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
{
struct n_hdlc *n_hdlc = tty->disc_data;
int error = 0;
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 5be6d02dc690..8933ef1f83c0 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -186,17 +186,16 @@ static void tty_copy(struct tty_struct *tty, void *to, size_t tail, size_t n)
}
/**
- * n_tty_kick_worker - start input worker (if required)
- * @tty: terminal
+ * n_tty_kick_worker - start input worker (if required)
+ * @tty: terminal
*
- * Re-schedules the flip buffer work if it may have stopped
+ * Re-schedules the flip buffer work if it may have stopped.
*
- * Caller holds exclusive termios_rwsem
- * or
- * n_tty_read()/consumer path:
- * holds non-exclusive termios_rwsem
+ * Locking:
+ * * Caller holds exclusive %termios_rwsem, or
+ * * n_tty_read()/consumer path:
+ * holds non-exclusive %termios_rwsem
*/
-
static void n_tty_kick_worker(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -230,14 +229,12 @@ static ssize_t chars_in_buffer(struct tty_struct *tty)
}
/**
- * n_tty_write_wakeup - asynchronous I/O notifier
- * @tty: tty device
+ * n_tty_write_wakeup - asynchronous I/O notifier
+ * @tty: tty device
*
- * Required for the ptys, serial driver etc. since processes
- * that attach themselves to the master and rely on ASYNC
- * IO must be woken up
+ * Required for the ptys, serial driver etc. since processes that attach
+ * themselves to the master and rely on ASYNC IO must be woken up.
*/
-
static void n_tty_write_wakeup(struct tty_struct *tty)
{
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
@@ -300,16 +297,16 @@ static void n_tty_check_unthrottle(struct tty_struct *tty)
}
/**
- * put_tty_queue - add character to tty
- * @c: character
- * @ldata: n_tty data
+ * put_tty_queue - add character to tty
+ * @c: character
+ * @ldata: n_tty data
*
- * Add a character to the tty read_buf queue.
+ * Add a character to the tty read_buf queue.
*
- * n_tty_receive_buf()/producer path:
- * caller holds non-exclusive termios_rwsem
+ * Locking:
+ * * n_tty_receive_buf()/producer path:
+ * caller holds non-exclusive %termios_rwsem
*/
-
static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
{
*read_buf_addr(ldata, ldata->read_head) = c;
@@ -317,16 +314,16 @@ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
}
/**
- * reset_buffer_flags - reset buffer state
- * @ldata: line disc data to reset
+ * reset_buffer_flags - reset buffer state
+ * @ldata: line disc data to reset
*
- * Reset the read buffer counters and clear the flags.
- * Called from n_tty_open() and n_tty_flush_buffer().
+ * Reset the read buffer counters and clear the flags. Called from
+ * n_tty_open() and n_tty_flush_buffer().
*
- * Locking: caller holds exclusive termios_rwsem
- * (or locking is not required)
+ * Locking:
+ * * caller holds exclusive %termios_rwsem, or
+ * * (locking is not required)
*/
-
static void reset_buffer_flags(struct n_tty_data *ldata)
{
ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
@@ -351,19 +348,18 @@ static void n_tty_packet_mode_flush(struct tty_struct *tty)
}
/**
- * n_tty_flush_buffer - clean input queue
- * @tty: terminal device
+ * n_tty_flush_buffer - clean input queue
+ * @tty: terminal device
*
- * Flush the input buffer. Called when the tty layer wants the
- * buffer flushed (eg at hangup) or when the N_TTY line discipline
- * internally has to clean the pending queue (for example some signals).
+ * Flush the input buffer. Called when the tty layer wants the buffer flushed
+ * (eg at hangup) or when the %N_TTY line discipline internally has to clean
+ * the pending queue (for example some signals).
*
- * Holds termios_rwsem to exclude producer/consumer while
- * buffer indices are reset.
+ * Holds %termios_rwsem to exclude producer/consumer while buffer indices are
+ * reset.
*
- * Locking: ctrl.lock, exclusive termios_rwsem
+ * Locking: %ctrl.lock, exclusive %termios_rwsem
*/
-
static void n_tty_flush_buffer(struct tty_struct *tty)
{
down_write(&tty->termios_rwsem);
@@ -376,55 +372,50 @@ static void n_tty_flush_buffer(struct tty_struct *tty)
}
/**
- * is_utf8_continuation - utf8 multibyte check
- * @c: byte to check
+ * is_utf8_continuation - utf8 multibyte check
+ * @c: byte to check
*
- * Returns true if the utf8 character 'c' is a multibyte continuation
- * character. We use this to correctly compute the on screen size
- * of the character when printing
+ * Returns: true if the utf8 character @c is a multibyte continuation
+ * character. We use this to correctly compute the on-screen size of the
+ * character when printing.
*/
-
static inline int is_utf8_continuation(unsigned char c)
{
return (c & 0xc0) == 0x80;
}
/**
- * is_continuation - multibyte check
- * @c: byte to check
- * @tty: terminal device
+ * is_continuation - multibyte check
+ * @c: byte to check
+ * @tty: terminal device
*
- * Returns true if the utf8 character 'c' is a multibyte continuation
- * character and the terminal is in unicode mode.
+ * Returns: true if the utf8 character @c is a multibyte continuation character
+ * and the terminal is in unicode mode.
*/
-
static inline int is_continuation(unsigned char c, struct tty_struct *tty)
{
return I_IUTF8(tty) && is_utf8_continuation(c);
}
/**
- * do_output_char - output one character
- * @c: character (or partial unicode symbol)
- * @tty: terminal device
- * @space: space available in tty driver write buffer
+ * do_output_char - output one character
+ * @c: character (or partial unicode symbol)
+ * @tty: terminal device
+ * @space: space available in tty driver write buffer
*
- * This is a helper function that handles one output character
- * (including special characters like TAB, CR, LF, etc.),
- * doing OPOST processing and putting the results in the
- * tty driver's write buffer.
+ * This is a helper function that handles one output character (including
+ * special characters like TAB, CR, LF, etc.), doing OPOST processing and
+ * putting the results in the tty driver's write buffer.
*
- * Note that Linux currently ignores TABDLY, CRDLY, VTDLY, FFDLY
- * and NLDLY. They simply aren't relevant in the world today.
- * If you ever need them, add them here.
+ * Note that Linux currently ignores TABDLY, CRDLY, VTDLY, FFDLY and NLDLY.
+ * They simply aren't relevant in the world today. If you ever need them, add
+ * them here.
*
- * Returns the number of bytes of buffer space used or -1 if
- * no space left.
+ * Returns: the number of bytes of buffer space used or -1 if no space left.
*
- * Locking: should be called under the output_lock to protect
- * the column state and space left in the buffer
+ * Locking: should be called under the %output_lock to protect the column state
+ * and space left in the buffer.
*/
-
static int do_output_char(unsigned char c, struct tty_struct *tty, int space)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -487,19 +478,18 @@ static int do_output_char(unsigned char c, struct tty_struct *tty, int space)
}
/**
- * process_output - output post processor
- * @c: character (or partial unicode symbol)
- * @tty: terminal device
+ * process_output - output post processor
+ * @c: character (or partial unicode symbol)
+ * @tty: terminal device
*
- * Output one character with OPOST processing.
- * Returns -1 when the output device is full and the character
- * must be retried.
+ * Output one character with OPOST processing.
*
- * Locking: output_lock to protect column state and space left
- * (also, this is called from n_tty_write under the
- * tty layer write lock)
+ * Returns: -1 when the output device is full and the character must be
+ * retried.
+ *
+ * Locking: %output_lock to protect column state and space left (also, this is
+ *called from n_tty_write() under the tty layer write lock).
*/
-
static int process_output(unsigned char c, struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -518,24 +508,23 @@ static int process_output(unsigned char c, struct tty_struct *tty)
}
/**
- * process_output_block - block post processor
- * @tty: terminal device
- * @buf: character buffer
- * @nr: number of bytes to output
- *
- * Output a block of characters with OPOST processing.
- * Returns the number of characters output.
- *
- * This path is used to speed up block console writes, among other
- * things when processing blocks of output data. It handles only
- * the simple cases normally found and helps to generate blocks of
- * symbols for the console driver and thus improve performance.
- *
- * Locking: output_lock to protect column state and space left
- * (also, this is called from n_tty_write under the
- * tty layer write lock)
+ * process_output_block - block post processor
+ * @tty: terminal device
+ * @buf: character buffer
+ * @nr: number of bytes to output
+ *
+ * Output a block of characters with OPOST processing.
+ *
+ * This path is used to speed up block console writes, among other things when
+ * processing blocks of output data. It handles only the simple cases normally
+ * found and helps to generate blocks of symbols for the console driver and
+ * thus improve performance.
+ *
+ * Returns: the number of characters output.
+ *
+ * Locking: %output_lock to protect column state and space left (also, this is
+ * called from n_tty_write() under the tty layer write lock).
*/
-
static ssize_t process_output_block(struct tty_struct *tty,
const unsigned char *buf, unsigned int nr)
{
@@ -596,30 +585,27 @@ break_out:
}
/**
- * process_echoes - write pending echo characters
- * @tty: terminal device
+ * __process_echoes - write pending echo characters
+ * @tty: terminal device
*
- * Write previously buffered echo (and other ldisc-generated)
- * characters to the tty.
+ * Write previously buffered echo (and other ldisc-generated) characters to the
+ * tty.
*
- * Characters generated by the ldisc (including echoes) need to
- * be buffered because the driver's write buffer can fill during
- * heavy program output. Echoing straight to the driver will
- * often fail under these conditions, causing lost characters and
- * resulting mismatches of ldisc state information.
+ * Characters generated by the ldisc (including echoes) need to be buffered
+ * because the driver's write buffer can fill during heavy program output.
+ * Echoing straight to the driver will often fail under these conditions,
+ * causing lost characters and resulting mismatches of ldisc state information.
*
- * Since the ldisc state must represent the characters actually sent
- * to the driver at the time of the write, operations like certain
- * changes in column state are also saved in the buffer and executed
- * here.
+ * Since the ldisc state must represent the characters actually sent to the
+ * driver at the time of the write, operations like certain changes in column
+ * state are also saved in the buffer and executed here.
*
- * A circular fifo buffer is used so that the most recent characters
- * are prioritized. Also, when control characters are echoed with a
- * prefixed "^", the pair is treated atomically and thus not separated.
+ * A circular fifo buffer is used so that the most recent characters are
+ * prioritized. Also, when control characters are echoed with a prefixed "^",
+ * the pair is treated atomically and thus not separated.
*
- * Locking: callers must hold output_lock
+ * Locking: callers must hold %output_lock.
*/
-
static size_t __process_echoes(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -828,13 +814,12 @@ static void flush_echoes(struct tty_struct *tty)
}
/**
- * add_echo_byte - add a byte to the echo buffer
- * @c: unicode byte to echo
- * @ldata: n_tty data
+ * add_echo_byte - add a byte to the echo buffer
+ * @c: unicode byte to echo
+ * @ldata: n_tty data
*
- * Add a character or operation byte to the echo buffer.
+ * Add a character or operation byte to the echo buffer.
*/
-
static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata)
{
*echo_buf_addr(ldata, ldata->echo_head) = c;
@@ -843,12 +828,11 @@ static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata)
}
/**
- * echo_move_back_col - add operation to move back a column
- * @ldata: n_tty data
+ * echo_move_back_col - add operation to move back a column
+ * @ldata: n_tty data
*
- * Add an operation to the echo buffer to move back one column.
+ * Add an operation to the echo buffer to move back one column.
*/
-
static void echo_move_back_col(struct n_tty_data *ldata)
{
add_echo_byte(ECHO_OP_START, ldata);
@@ -856,13 +840,12 @@ static void echo_move_back_col(struct n_tty_data *ldata)
}
/**
- * echo_set_canon_col - add operation to set the canon column
- * @ldata: n_tty data
+ * echo_set_canon_col - add operation to set the canon column
+ * @ldata: n_tty data
*
- * Add an operation to the echo buffer to set the canon column
- * to the current column.
+ * Add an operation to the echo buffer to set the canon column to the current
+ * column.
*/
-
static void echo_set_canon_col(struct n_tty_data *ldata)
{
add_echo_byte(ECHO_OP_START, ldata);
@@ -870,20 +853,18 @@ static void echo_set_canon_col(struct n_tty_data *ldata)
}
/**
- * echo_erase_tab - add operation to erase a tab
- * @num_chars: number of character columns already used
- * @after_tab: true if num_chars starts after a previous tab
- * @ldata: n_tty data
- *
- * Add an operation to the echo buffer to erase a tab.
- *
- * Called by the eraser function, which knows how many character
- * columns have been used since either a previous tab or the start
- * of input. This information will be used later, along with
- * canon column (if applicable), to go back the correct number
- * of columns.
+ * echo_erase_tab - add operation to erase a tab
+ * @num_chars: number of character columns already used
+ * @after_tab: true if num_chars starts after a previous tab
+ * @ldata: n_tty data
+ *
+ * Add an operation to the echo buffer to erase a tab.
+ *
+ * Called by the eraser function, which knows how many character columns have
+ * been used since either a previous tab or the start of input. This
+ * information will be used later, along with canon column (if applicable), to
+ * go back the correct number of columns.
*/
-
static void echo_erase_tab(unsigned int num_chars, int after_tab,
struct n_tty_data *ldata)
{
@@ -901,16 +882,15 @@ static void echo_erase_tab(unsigned int num_chars, int after_tab,
}
/**
- * echo_char_raw - echo a character raw
- * @c: unicode byte to echo
- * @ldata: line disc data
+ * echo_char_raw - echo a character raw
+ * @c: unicode byte to echo
+ * @ldata: line disc data
*
- * Echo user input back onto the screen. This must be called only when
- * L_ECHO(tty) is true. Called from the driver receive_buf path.
+ * Echo user input back onto the screen. This must be called only when
+ * L_ECHO(tty) is true. Called from the &tty_driver.receive_buf() path.
*
- * This variant does not treat control characters specially.
+ * This variant does not treat control characters specially.
*/
-
static void echo_char_raw(unsigned char c, struct n_tty_data *ldata)
{
if (c == ECHO_OP_START) {
@@ -922,17 +902,16 @@ static void echo_char_raw(unsigned char c, struct n_tty_data *ldata)
}
/**
- * echo_char - echo a character
- * @c: unicode byte to echo
- * @tty: terminal device
+ * echo_char - echo a character
+ * @c: unicode byte to echo
+ * @tty: terminal device
*
- * Echo user input back onto the screen. This must be called only when
- * L_ECHO(tty) is true. Called from the driver receive_buf path.
+ * Echo user input back onto the screen. This must be called only when
+ * L_ECHO(tty) is true. Called from the &tty_driver.receive_buf() path.
*
- * This variant tags control characters to be echoed as "^X"
- * (where X is the letter representing the control char).
+ * This variant tags control characters to be echoed as "^X" (where X is the
+ * letter representing the control char).
*/
-
static void echo_char(unsigned char c, struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -948,10 +927,9 @@ static void echo_char(unsigned char c, struct tty_struct *tty)
}
/**
- * finish_erasing - complete erase
- * @ldata: n_tty data
+ * finish_erasing - complete erase
+ * @ldata: n_tty data
*/
-
static inline void finish_erasing(struct n_tty_data *ldata)
{
if (ldata->erasing) {
@@ -961,18 +939,17 @@ static inline void finish_erasing(struct n_tty_data *ldata)
}
/**
- * eraser - handle erase function
- * @c: character input
- * @tty: terminal device
+ * eraser - handle erase function
+ * @c: character input
+ * @tty: terminal device
*
- * Perform erase and necessary output when an erase character is
- * present in the stream from the driver layer. Handles the complexities
- * of UTF-8 multibyte symbols.
+ * Perform erase and necessary output when an erase character is present in the
+ * stream from the driver layer. Handles the complexities of UTF-8 multibyte
+ * symbols.
*
- * n_tty_receive_buf()/producer path:
- * caller holds non-exclusive termios_rwsem
+ * Locking: n_tty_receive_buf()/producer path:
+ * caller holds non-exclusive %termios_rwsem
*/
-
static void eraser(unsigned char c, struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1091,20 +1068,6 @@ static void eraser(unsigned char c, struct tty_struct *tty)
finish_erasing(ldata);
}
-/**
- * isig - handle the ISIG optio
- * @sig: signal
- * @tty: terminal
- *
- * Called when a signal is being sent due to terminal input.
- * Called from the driver receive_buf path so serialized.
- *
- * Performs input and output flush if !NOFLSH. In this context, the echo
- * buffer is 'output'. The signal is processed first to alert any current
- * readers or writers to discontinue and exit their i/o loops.
- *
- * Locking: ctrl.lock
- */
static void __isig(int sig, struct tty_struct *tty)
{
@@ -1115,6 +1078,20 @@ static void __isig(int sig, struct tty_struct *tty)
}
}
+/**
+ * isig - handle the ISIG optio
+ * @sig: signal
+ * @tty: terminal
+ *
+ * Called when a signal is being sent due to terminal input. Called from the
+ * &tty_driver.receive_buf() path, so serialized.
+ *
+ * Performs input and output flush if !NOFLSH. In this context, the echo
+ * buffer is 'output'. The signal is processed first to alert any current
+ * readers or writers to discontinue and exit their i/o loops.
+ *
+ * Locking: %ctrl.lock
+ */
static void isig(int sig, struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1151,18 +1128,17 @@ static void isig(int sig, struct tty_struct *tty)
}
/**
- * n_tty_receive_break - handle break
- * @tty: terminal
+ * n_tty_receive_break - handle break
+ * @tty: terminal
*
- * An RS232 break event has been hit in the incoming bitstream. This
- * can cause a variety of events depending upon the termios settings.
+ * An RS232 break event has been hit in the incoming bitstream. This can cause
+ * a variety of events depending upon the termios settings.
*
- * n_tty_receive_buf()/producer path:
- * caller holds non-exclusive termios_rwsem
+ * Locking: n_tty_receive_buf()/producer path:
+ * caller holds non-exclusive termios_rwsem
*
- * Note: may get exclusive termios_rwsem if flushing input buffer
+ * Note: may get exclusive %termios_rwsem if flushing input buffer
*/
-
static void n_tty_receive_break(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1181,18 +1157,15 @@ static void n_tty_receive_break(struct tty_struct *tty)
}
/**
- * n_tty_receive_overrun - handle overrun reporting
- * @tty: terminal
+ * n_tty_receive_overrun - handle overrun reporting
+ * @tty: terminal
*
- * Data arrived faster than we could process it. While the tty
- * driver has flagged this the bits that were missed are gone
- * forever.
+ * Data arrived faster than we could process it. While the tty driver has
+ * flagged this the bits that were missed are gone forever.
*
- * Called from the receive_buf path so single threaded. Does not
- * need locking as num_overrun and overrun_time are function
- * private.
+ * Called from the receive_buf path so single threaded. Does not need locking
+ * as num_overrun and overrun_time are function private.
*/
-
static void n_tty_receive_overrun(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1207,15 +1180,15 @@ static void n_tty_receive_overrun(struct tty_struct *tty)
}
/**
- * n_tty_receive_parity_error - error notifier
- * @tty: terminal device
- * @c: character
+ * n_tty_receive_parity_error - error notifier
+ * @tty: terminal device
+ * @c: character
*
- * Process a parity error and queue the right data to indicate
- * the error case if necessary.
+ * Process a parity error and queue the right data to indicate the error case
+ * if necessary.
*
- * n_tty_receive_buf()/producer path:
- * caller holds non-exclusive termios_rwsem
+ * Locking: n_tty_receive_buf()/producer path:
+ * caller holds non-exclusive %termios_rwsem
*/
static void n_tty_receive_parity_error(struct tty_struct *tty, unsigned char c)
{
@@ -1247,19 +1220,6 @@ n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c)
process_echoes(tty);
}
-/**
- * n_tty_receive_char - perform processing
- * @tty: terminal device
- * @c: character
- *
- * Process an individual character of input received from the driver.
- * This is serialized with respect to itself by the rules for the
- * driver above.
- *
- * n_tty_receive_buf()/producer path:
- * caller holds non-exclusive termios_rwsem
- * publishes canon_head if canonical mode is active
- */
static void n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1394,6 +1354,18 @@ handle_newline:
put_tty_queue(c, ldata);
}
+/**
+ * n_tty_receive_char - perform processing
+ * @tty: terminal device
+ * @c: character
+ *
+ * Process an individual character of input received from the driver. This is
+ * serialized with respect to itself by the rules for the driver above.
+ *
+ * Locking: n_tty_receive_buf()/producer path:
+ * caller holds non-exclusive %termios_rwsem
+ * publishes canon_head if canonical mode is active
+ */
static void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1594,38 +1566,37 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
}
/**
- * n_tty_receive_buf_common - process input
- * @tty: device to receive input
- * @cp: input chars
- * @fp: flags for each char (if NULL, all chars are TTY_NORMAL)
- * @count: number of input chars in @cp
- * @flow: enable flow control
- *
- * Called by the terminal driver when a block of characters has
- * been received. This function must be called from soft contexts
- * not from interrupt context. The driver is responsible for making
- * calls one at a time and in order (or using flush_to_ldisc)
- *
- * Returns the # of input chars from @cp which were processed.
- *
- * In canonical mode, the maximum line length is 4096 chars (including
- * the line termination char); lines longer than 4096 chars are
- * truncated. After 4095 chars, input data is still processed but
- * not stored. Overflow processing ensures the tty can always
- * receive more input until at least one line can be read.
- *
- * In non-canonical mode, the read buffer will only accept 4095 chars;
- * this provides the necessary space for a newline char if the input
- * mode is switched to canonical.
- *
- * Note it is possible for the read buffer to _contain_ 4096 chars
- * in non-canonical mode: the read buffer could already contain the
- * maximum canon line of 4096 chars when the mode is switched to
- * non-canonical.
- *
- * n_tty_receive_buf()/producer path:
- * claims non-exclusive termios_rwsem
- * publishes commit_head or canon_head
+ * n_tty_receive_buf_common - process input
+ * @tty: device to receive input
+ * @cp: input chars
+ * @fp: flags for each char (if %NULL, all chars are %TTY_NORMAL)
+ * @count: number of input chars in @cp
+ * @flow: enable flow control
+ *
+ * Called by the terminal driver when a block of characters has been received.
+ * This function must be called from soft contexts not from interrupt context.
+ * The driver is responsible for making calls one at a time and in order (or
+ * using flush_to_ldisc()).
+ *
+ * Returns: the # of input chars from @cp which were processed.
+ *
+ * In canonical mode, the maximum line length is 4096 chars (including the line
+ * termination char); lines longer than 4096 chars are truncated. After 4095
+ * chars, input data is still processed but not stored. Overflow processing
+ * ensures the tty can always receive more input until at least one line can be
+ * read.
+ *
+ * In non-canonical mode, the read buffer will only accept 4095 chars; this
+ * provides the necessary space for a newline char if the input mode is
+ * switched to canonical.
+ *
+ * Note it is possible for the read buffer to _contain_ 4096 chars in
+ * non-canonical mode: the read buffer could already contain the maximum canon
+ * line of 4096 chars when the mode is switched to non-canonical.
+ *
+ * Locking: n_tty_receive_buf()/producer path:
+ * claims non-exclusive %termios_rwsem
+ * publishes commit_head or canon_head
*/
static int
n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
@@ -1710,19 +1681,17 @@ static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
}
/**
- * n_tty_set_termios - termios data changed
- * @tty: terminal
- * @old: previous data
+ * n_tty_set_termios - termios data changed
+ * @tty: terminal
+ * @old: previous data
*
- * Called by the tty layer when the user changes termios flags so
- * that the line discipline can plan ahead. This function cannot sleep
- * and is protected from re-entry by the tty layer. The user is
- * guaranteed that this function will not be re-entered or in progress
- * when the ldisc is closed.
+ * Called by the tty layer when the user changes termios flags so that the line
+ * discipline can plan ahead. This function cannot sleep and is protected from
+ * re-entry by the tty layer. The user is guaranteed that this function will
+ * not be re-entered or in progress when the ldisc is closed.
*
- * Locking: Caller holds tty->termios_rwsem
+ * Locking: Caller holds @tty->termios_rwsem
*/
-
static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1808,15 +1777,13 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
}
/**
- * n_tty_close - close the ldisc for this tty
- * @tty: device
+ * n_tty_close - close the ldisc for this tty
+ * @tty: device
*
- * Called from the terminal layer when this line discipline is
- * being shut down, either because of a close or becsuse of a
- * discipline change. The function will not be called while other
- * ldisc methods are in progress.
+ * Called from the terminal layer when this line discipline is being shut down,
+ * either because of a close or becsuse of a discipline change. The function
+ * will not be called while other ldisc methods are in progress.
*/
-
static void n_tty_close(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1831,15 +1798,13 @@ static void n_tty_close(struct tty_struct *tty)
}
/**
- * n_tty_open - open an ldisc
- * @tty: terminal to open
+ * n_tty_open - open an ldisc
+ * @tty: terminal to open
*
- * Called when this line discipline is being attached to the
- * terminal device. Can sleep. Called serialized so that no
- * other events will occur in parallel. No further open will occur
- * until a close.
+ * Called when this line discipline is being attached to the terminal device.
+ * Can sleep. Called serialized so that no other events will occur in parallel.
+ * No further open will occur until a close.
*/
-
static int n_tty_open(struct tty_struct *tty)
{
struct n_tty_data *ldata;
@@ -1874,24 +1839,23 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
}
/**
- * copy_from_read_buf - copy read data directly
- * @tty: terminal device
- * @kbp: data
- * @nr: size of data
- *
- * Helper function to speed up n_tty_read. It is only called when
- * ICANON is off; it copies characters straight from the tty queue.
+ * copy_from_read_buf - copy read data directly
+ * @tty: terminal device
+ * @kbp: data
+ * @nr: size of data
*
- * Called under the ldata->atomic_read_lock sem
+ * Helper function to speed up n_tty_read(). It is only called when %ICANON is
+ * off; it copies characters straight from the tty queue.
*
- * Returns true if it successfully copied data, but there is still
- * more data to be had.
+ * Returns: true if it successfully copied data, but there is still more data
+ * to be had.
*
- * n_tty_read()/consumer path:
- * caller holds non-exclusive termios_rwsem
+ * Locking:
+ * * called under the @ldata->atomic_read_lock sem
+ * * n_tty_read()/consumer path:
+ * caller holds non-exclusive %termios_rwsem;
* read_tail published
*/
-
static bool copy_from_read_buf(struct tty_struct *tty,
unsigned char **kbp,
size_t *nr)
@@ -1926,28 +1890,27 @@ static bool copy_from_read_buf(struct tty_struct *tty,
}
/**
- * canon_copy_from_read_buf - copy read data in canonical mode
- * @tty: terminal device
- * @kbp: data
- * @nr: size of data
- *
- * Helper function for n_tty_read. It is only called when ICANON is on;
- * it copies one line of input up to and including the line-delimiting
- * character into the result buffer.
- *
- * NB: When termios is changed from non-canonical to canonical mode and
- * the read buffer contains data, n_tty_set_termios() simulates an EOF
- * push (as if C-d were input) _without_ the DISABLED_CHAR in the buffer.
- * This causes data already processed as input to be immediately available
- * as input although a newline has not been received.
- *
- * Called under the atomic_read_lock mutex
- *
- * n_tty_read()/consumer path:
- * caller holds non-exclusive termios_rwsem
- * read_tail published
+ * canon_copy_from_read_buf - copy read data in canonical mode
+ * @tty: terminal device
+ * @kbp: data
+ * @nr: size of data
+ *
+ * Helper function for n_tty_read(). It is only called when %ICANON is on; it
+ * copies one line of input up to and including the line-delimiting character
+ * into the result buffer.
+ *
+ * Note: When termios is changed from non-canonical to canonical mode and the
+ * read buffer contains data, n_tty_set_termios() simulates an EOF push (as if
+ * C-d were input) _without_ the %DISABLED_CHAR in the buffer. This causes data
+ * already processed as input to be immediately available as input although a
+ * newline has not been received.
+ *
+ * Locking:
+ * * called under the %atomic_read_lock mutex
+ * * n_tty_read()/consumer path:
+ * caller holds non-exclusive %termios_rwsem;
+ * read_tail published
*/
-
static bool canon_copy_from_read_buf(struct tty_struct *tty,
unsigned char **kbp,
size_t *nr)
@@ -1975,7 +1938,7 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
more = n - (size - tail);
if (eol == N_TTY_BUF_SIZE && more) {
/* scan wrapped without finding set bit */
- eol = find_next_bit(ldata->read_flags, more, 0);
+ eol = find_first_bit(ldata->read_flags, more);
found = eol != more;
} else
found = eol != size;
@@ -2015,19 +1978,19 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
}
/**
- * job_control - check job control
- * @tty: tty
- * @file: file handle
- *
- * Perform job control management checks on this file/tty descriptor
- * and if appropriate send any needed signals and return a negative
- * error code if action should be taken.
- *
- * Locking: redirected write test is safe
- * current->signal->tty check is safe
- * ctrl.lock to safely reference tty->ctrl.pgrp
+ * job_control - check job control
+ * @tty: tty
+ * @file: file handle
+ *
+ * Perform job control management checks on this @file/@tty descriptor and if
+ * appropriate send any needed signals and return a negative error code if
+ * action should be taken.
+ *
+ * Locking:
+ * * redirected write test is safe
+ * * current->signal->tty check is safe
+ * * ctrl.lock to safely reference @tty->ctrl.pgrp
*/
-
static int job_control(struct tty_struct *tty, struct file *file)
{
/* Job control check -- must be done at start and after
@@ -2043,24 +2006,25 @@ static int job_control(struct tty_struct *tty, struct file *file)
/**
- * n_tty_read - read function for tty
- * @tty: tty device
- * @file: file object
- * @buf: userspace buffer pointer
- * @nr: size of I/O
- *
- * Perform reads for the line discipline. We are guaranteed that the
- * line discipline will not be closed under us but we may get multiple
- * parallel readers and must handle this ourselves. We may also get
- * a hangup. Always called in user context, may sleep.
- *
- * This code must be sure never to sleep through a hangup.
- *
- * n_tty_read()/consumer path:
- * claims non-exclusive termios_rwsem
- * publishes read_tail
+ * n_tty_read - read function for tty
+ * @tty: tty device
+ * @file: file object
+ * @kbuf: kernelspace buffer pointer
+ * @nr: size of I/O
+ * @cookie: if non-%NULL, this is a continuation read
+ * @offset: where to continue reading from (unused in n_tty)
+ *
+ * Perform reads for the line discipline. We are guaranteed that the line
+ * discipline will not be closed under us but we may get multiple parallel
+ * readers and must handle this ourselves. We may also get a hangup. Always
+ * called in user context, may sleep.
+ *
+ * This code must be sure never to sleep through a hangup.
+ *
+ * Locking: n_tty_read()/consumer path:
+ * claims non-exclusive termios_rwsem;
+ * publishes read_tail
*/
-
static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
unsigned char *kbuf, size_t nr,
void **cookie, unsigned long offset)
@@ -2232,25 +2196,23 @@ more_to_be_read:
}
/**
- * n_tty_write - write function for tty
- * @tty: tty device
- * @file: file object
- * @buf: userspace buffer pointer
- * @nr: size of I/O
- *
- * Write function of the terminal device. This is serialized with
- * respect to other write callers but not to termios changes, reads
- * and other such events. Since the receive code will echo characters,
- * thus calling driver write methods, the output_lock is used in
- * the output processing functions called here as well as in the
- * echo processing function to protect the column state and space
- * left in the buffer.
- *
- * This code must be sure never to sleep through a hangup.
- *
- * Locking: output_lock to protect column state and space left
- * (note that the process_output*() functions take this
- * lock themselves)
+ * n_tty_write - write function for tty
+ * @tty: tty device
+ * @file: file object
+ * @buf: userspace buffer pointer
+ * @nr: size of I/O
+ *
+ * Write function of the terminal device. This is serialized with respect to
+ * other write callers but not to termios changes, reads and other such events.
+ * Since the receive code will echo characters, thus calling driver write
+ * methods, the %output_lock is used in the output processing functions called
+ * here as well as in the echo processing function to protect the column state
+ * and space left in the buffer.
+ *
+ * This code must be sure never to sleep through a hangup.
+ *
+ * Locking: output_lock to protect column state and space left
+ * (note that the process_output*() functions take this lock themselves)
*/
static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
@@ -2341,19 +2303,19 @@ break_out:
}
/**
- * n_tty_poll - poll method for N_TTY
- * @tty: terminal device
- * @file: file accessing it
- * @wait: poll table
+ * n_tty_poll - poll method for N_TTY
+ * @tty: terminal device
+ * @file: file accessing it
+ * @wait: poll table
+ *
+ * Called when the line discipline is asked to poll() for data or for special
+ * events. This code is not serialized with respect to other events save
+ * open/close.
*
- * Called when the line discipline is asked to poll() for data or
- * for special events. This code is not serialized with respect to
- * other events save open/close.
+ * This code must be sure never to sleep through a hangup.
*
- * This code must be sure never to sleep through a hangup.
- * Called without the kernel lock held - fine
+ * Locking: called without the kernel lock held -- fine.
*/
-
static __poll_t n_tty_poll(struct tty_struct *tty, struct file *file,
poll_table *wait)
{
@@ -2400,8 +2362,8 @@ static unsigned long inq_canon(struct n_tty_data *ldata)
return nr;
}
-static int n_tty_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int n_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
{
struct n_tty_data *ldata = tty->disc_data;
int retval;
diff --git a/drivers/tty/rpmsg_tty.c b/drivers/tty/rpmsg_tty.c
index dae2a4e44f38..29db413bbc03 100644
--- a/drivers/tty/rpmsg_tty.c
+++ b/drivers/tty/rpmsg_tty.c
@@ -50,10 +50,17 @@ static int rpmsg_tty_cb(struct rpmsg_device *rpdev, void *data, int len, void *p
static int rpmsg_tty_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct rpmsg_tty_port *cport = idr_find(&tty_idr, tty->index);
+ struct tty_port *port;
tty->driver_data = cport;
- return tty_port_install(&cport->port, driver, tty);
+ port = tty_port_get(&cport->port);
+ return tty_port_install(port, driver, tty);
+}
+
+static void rpmsg_tty_cleanup(struct tty_struct *tty)
+{
+ tty_port_put(tty->port);
}
static int rpmsg_tty_open(struct tty_struct *tty, struct file *filp)
@@ -106,12 +113,19 @@ static unsigned int rpmsg_tty_write_room(struct tty_struct *tty)
return size;
}
+static void rpmsg_tty_hangup(struct tty_struct *tty)
+{
+ tty_port_hangup(tty->port);
+}
+
static const struct tty_operations rpmsg_tty_ops = {
.install = rpmsg_tty_install,
.open = rpmsg_tty_open,
.close = rpmsg_tty_close,
.write = rpmsg_tty_write,
.write_room = rpmsg_tty_write_room,
+ .hangup = rpmsg_tty_hangup,
+ .cleanup = rpmsg_tty_cleanup,
};
static struct rpmsg_tty_port *rpmsg_tty_alloc_cport(void)
@@ -137,8 +151,10 @@ static struct rpmsg_tty_port *rpmsg_tty_alloc_cport(void)
return cport;
}
-static void rpmsg_tty_release_cport(struct rpmsg_tty_port *cport)
+static void rpmsg_tty_destruct_port(struct tty_port *port)
{
+ struct rpmsg_tty_port *cport = container_of(port, struct rpmsg_tty_port, port);
+
mutex_lock(&idr_lock);
idr_remove(&tty_idr, cport->id);
mutex_unlock(&idr_lock);
@@ -146,7 +162,10 @@ static void rpmsg_tty_release_cport(struct rpmsg_tty_port *cport)
kfree(cport);
}
-static const struct tty_port_operations rpmsg_tty_port_ops = { };
+static const struct tty_port_operations rpmsg_tty_port_ops = {
+ .destruct = rpmsg_tty_destruct_port,
+};
+
static int rpmsg_tty_probe(struct rpmsg_device *rpdev)
{
@@ -166,7 +185,8 @@ static int rpmsg_tty_probe(struct rpmsg_device *rpdev)
cport->id, dev);
if (IS_ERR(tty_dev)) {
ret = dev_err_probe(dev, PTR_ERR(tty_dev), "Failed to register tty port\n");
- goto err_destroy;
+ tty_port_put(&cport->port);
+ return ret;
}
cport->rpdev = rpdev;
@@ -177,12 +197,6 @@ static int rpmsg_tty_probe(struct rpmsg_device *rpdev)
rpdev->src, rpdev->dst, cport->id);
return 0;
-
-err_destroy:
- tty_port_destroy(&cport->port);
- rpmsg_tty_release_cport(cport);
-
- return ret;
}
static void rpmsg_tty_remove(struct rpmsg_device *rpdev)
@@ -192,13 +206,11 @@ static void rpmsg_tty_remove(struct rpmsg_device *rpdev)
dev_dbg(&rpdev->dev, "Removing rpmsg tty device %d\n", cport->id);
/* User hang up to release the tty */
- if (tty_port_initialized(&cport->port))
- tty_port_tty_hangup(&cport->port, false);
+ tty_port_tty_hangup(&cport->port, false);
tty_unregister_device(rpmsg_tty_driver, cport->id);
- tty_port_destroy(&cport->port);
- rpmsg_tty_release_cport(cport);
+ tty_port_put(&cport->port);
}
static struct rpmsg_device_id rpmsg_driver_tty_id_table[] = {
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index f1324fe99378..92e3433276f8 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -727,10 +727,24 @@ static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
static int acpi_serdev_register_devices(struct serdev_controller *ctrl)
{
acpi_status status;
+ bool skip;
+ int ret;
if (!has_acpi_companion(ctrl->dev.parent))
return -ENODEV;
+ /*
+ * Skip registration on boards where the ACPI tables are known to
+ * contain buggy devices. Note serdev_controller_add() must still
+ * succeed in this case, so that the proper serdev devices can be
+ * added "manually" later.
+ */
+ ret = acpi_quirk_skip_serdev_enumeration(ctrl->dev.parent, &skip);
+ if (ret)
+ return ret;
+ if (skip)
+ return 0;
+
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
SERDEV_ACPI_MAX_SCAN_DEPTH,
acpi_serdev_add_device, NULL, ctrl, NULL);
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 6473361525d1..db784ace25d8 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -241,16 +241,8 @@ static inline int serial8250_in_MCR(struct uart_8250_port *up)
return mctrl;
}
-#if defined(__alpha__) && !defined(CONFIG_PCI)
-/*
- * Digital did something really horribly wrong with the OUT1 and OUT2
- * lines on at least some ALPHA's. The failure mode is that if either
- * is cleared, the machine locks up with endless interrupts.
- */
-#define ALPHA_KLUDGE_MCR (UART_MCR_OUT2 | UART_MCR_OUT1)
-#else
-#define ALPHA_KLUDGE_MCR 0
-#endif
+bool alpha_jensen(void);
+void alpha_jensen_set_mctrl(struct uart_port *port, unsigned int mctrl);
#ifdef CONFIG_SERIAL_8250_PNP
int serial8250_pnp_init(void);
diff --git a/drivers/tty/serial/8250/8250_alpha.c b/drivers/tty/serial/8250/8250_alpha.c
new file mode 100644
index 000000000000..58e70328aa4d
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_alpha.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <asm/machvec.h>
+#include "8250.h"
+
+bool alpha_jensen(void)
+{
+ return !strcmp(alpha_mv.vector_name, "Jensen");
+}
+
+void alpha_jensen_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ /*
+ * Digital did something really horribly wrong with the OUT1 and OUT2
+ * lines on Alpha Jensen. The failure mode is that if either is
+ * cleared, the machine locks up with endless interrupts.
+ */
+ mctrl |= TIOCM_OUT1 | TIOCM_OUT2;
+
+ serial8250_do_set_mctrl(port, mctrl);
+}
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
index 5163d60756b7..9b878d023dac 100644
--- a/drivers/tty/serial/8250/8250_bcm7271.c
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
@@ -941,7 +941,7 @@ static int brcmuart_probe(struct platform_device *pdev)
struct brcmuart_priv *priv;
struct clk *baud_mux_clk;
struct uart_8250_port up;
- struct resource *irq;
+ int irq;
void __iomem *membase = NULL;
resource_size_t mapbase = 0;
u32 clk_rate = 0;
@@ -952,11 +952,9 @@ static int brcmuart_probe(struct platform_device *pdev)
"uart", "dma_rx", "dma_tx", "dma_intr2", "dma_arb"
};
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq) {
- dev_err(dev, "missing irq\n");
- return -EINVAL;
- }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
priv = devm_kzalloc(dev, sizeof(struct brcmuart_priv),
GFP_KERNEL);
if (!priv)
@@ -1044,7 +1042,7 @@ static int brcmuart_probe(struct platform_device *pdev)
up.port.dev = dev;
up.port.mapbase = mapbase;
up.port.membase = membase;
- up.port.irq = irq->start;
+ up.port.irq = irq;
up.port.handle_irq = brcmuart_handle_irq;
up.port.regshift = 2;
up.port.iotype = of_device_is_big_endian(np) ?
@@ -1076,14 +1074,18 @@ static int brcmuart_probe(struct platform_device *pdev)
priv->rx_bufs = dma_alloc_coherent(dev,
priv->rx_size,
&priv->rx_addr, GFP_KERNEL);
- if (!priv->rx_bufs)
+ if (!priv->rx_bufs) {
+ ret = -ENOMEM;
goto err;
+ }
priv->tx_size = UART_XMIT_SIZE;
priv->tx_buf = dma_alloc_coherent(dev,
priv->tx_size,
&priv->tx_addr, GFP_KERNEL);
- if (!priv->tx_buf)
+ if (!priv->tx_buf) {
+ ret = -ENOMEM;
goto err;
+ }
}
ret = serial8250_register_8250_port(&up);
@@ -1097,6 +1099,7 @@ static int brcmuart_probe(struct platform_device *pdev)
if (priv->dma_enabled) {
dma_irq = platform_get_irq_byname(pdev, "dma");
if (dma_irq < 0) {
+ ret = dma_irq;
dev_err(dev, "no IRQ resource info\n");
goto err1;
}
@@ -1116,7 +1119,7 @@ err1:
err:
brcmuart_free_bufs(dev, priv);
brcmuart_arbitration(priv, 0);
- return -ENODEV;
+ return ret;
}
static int brcmuart_remove(struct platform_device *pdev)
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 1ce193daea7f..01d30f6ed8fb 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -509,11 +509,10 @@ static void __init serial8250_isa_init_ports(void)
up->ops = &univ8250_driver_ops;
- /*
- * ALPHA_KLUDGE_MCR needs to be killed.
- */
- up->mcr_mask = ~ALPHA_KLUDGE_MCR;
- up->mcr_force = ALPHA_KLUDGE_MCR;
+ if (IS_ENABLED(CONFIG_ALPHA_JENSEN) ||
+ (IS_ENABLED(CONFIG_ALPHA_GENERIC) && alpha_jensen()))
+ port->set_mctrl = alpha_jensen_set_mctrl;
+
serial8250_set_defaults(up);
}
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 53f57c3b9f42..1769808031c5 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -414,6 +414,8 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
if (of_device_is_compatible(np, "marvell,armada-38x-uart"))
p->serial_out = dw8250_serial_out38x;
+ if (of_device_is_compatible(np, "starfive,jh7100-uart"))
+ p->set_termios = dw8250_do_set_termios;
} else if (acpi_dev_present("APMC0D08", NULL, -1)) {
p->iotype = UPIO_MEM32;
@@ -696,6 +698,7 @@ static const struct of_device_id dw8250_of_match[] = {
{ .compatible = "cavium,octeon-3860-uart" },
{ .compatible = "marvell,armada-38x-uart" },
{ .compatible = "renesas,rzn1-uart" },
+ { .compatible = "starfive,jh7100-uart" },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, dw8250_of_match);
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index bce28729dd7b..be8626234627 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -83,8 +83,17 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
port->mapsize = resource_size(&resource);
/* Check for shifted address mapping */
- if (of_property_read_u32(np, "reg-offset", &prop) == 0)
+ if (of_property_read_u32(np, "reg-offset", &prop) == 0) {
+ if (prop >= port->mapsize) {
+ dev_warn(&ofdev->dev, "reg-offset %u exceeds region size %pa\n",
+ prop, &port->mapsize);
+ ret = -EINVAL;
+ goto err_unprepare;
+ }
+
port->mapbase += prop;
+ port->mapsize -= prop;
+ }
port->iotype = UPIO_MEM;
if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 60f8fffdfd77..e17e97ea86fa 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1278,7 +1278,7 @@ static int pci_quatech_init(struct pci_dev *dev)
outl(inl(base + 0x38) | 0x00002000, base + 0x38);
tmp = inl(base + 0x3c);
outl(tmp | 0x01000000, base + 0x3c);
- outl(tmp &= ~0x01000000, base + 0x3c);
+ outl(tmp & ~0x01000000, base + 0x3c);
}
}
return 0;
@@ -1318,89 +1318,6 @@ static int pci_default_setup(struct serial_private *priv,
return setup_port(priv, port, bar, offset, board->reg_shift);
}
-static void
-pericom_do_set_divisor(struct uart_port *port, unsigned int baud,
- unsigned int quot, unsigned int quot_frac)
-{
- int scr;
- int lcr;
-
- for (scr = 16; scr > 4; scr--) {
- unsigned int maxrate = port->uartclk / scr;
- unsigned int divisor = max(maxrate / baud, 1U);
- int delta = maxrate / divisor - baud;
-
- if (baud > maxrate + baud / 50)
- continue;
-
- if (delta > baud / 50)
- divisor++;
-
- if (divisor > 0xffff)
- continue;
-
- /* Update delta due to possible divisor change */
- delta = maxrate / divisor - baud;
- if (abs(delta) < baud / 50) {
- lcr = serial_port_in(port, UART_LCR);
- serial_port_out(port, UART_LCR, lcr | 0x80);
- serial_port_out(port, UART_DLL, divisor & 0xff);
- serial_port_out(port, UART_DLM, divisor >> 8 & 0xff);
- serial_port_out(port, 2, 16 - scr);
- serial_port_out(port, UART_LCR, lcr);
- return;
- }
- }
-}
-static int pci_pericom_setup(struct serial_private *priv,
- const struct pciserial_board *board,
- struct uart_8250_port *port, int idx)
-{
- unsigned int bar, offset = board->first_offset, maxnr;
-
- bar = FL_GET_BASE(board->flags);
- if (board->flags & FL_BASE_BARS)
- bar += idx;
- else
- offset += idx * board->uart_offset;
-
-
- maxnr = (pci_resource_len(priv->dev, bar) - board->first_offset) >>
- (board->reg_shift + 3);
-
- if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr)
- return 1;
-
- port->port.set_divisor = pericom_do_set_divisor;
-
- return setup_port(priv, port, bar, offset, board->reg_shift);
-}
-
-static int pci_pericom_setup_four_at_eight(struct serial_private *priv,
- const struct pciserial_board *board,
- struct uart_8250_port *port, int idx)
-{
- unsigned int bar, offset = board->first_offset, maxnr;
-
- bar = FL_GET_BASE(board->flags);
- if (board->flags & FL_BASE_BARS)
- bar += idx;
- else
- offset += idx * board->uart_offset;
-
- if (idx==3)
- offset = 0x38;
-
- maxnr = (pci_resource_len(priv->dev, bar) - board->first_offset) >>
- (board->reg_shift + 3);
-
- if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr)
- return 1;
-
- port->port.set_divisor = pericom_do_set_divisor;
-
- return setup_port(priv, port, bar, offset, board->reg_shift);
-}
static int
ce4100_serial_setup(struct serial_private *priv,
@@ -1886,42 +1803,6 @@ pci_moxa_setup(struct serial_private *priv,
#define PCIE_DEVICE_ID_WCH_CH384_8S 0x3853
#define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253
-#define PCI_VENDOR_ID_ACCESIO 0x494f
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB 0x1051
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S 0x1053
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB 0x105C
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S 0x105E
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB 0x1091
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2 0x1093
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB 0x1099
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4 0x109B
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB 0x10D1
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM 0x10D3
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB 0x10DA
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM 0x10DC
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1 0x1108
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2 0x1110
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2 0x1111
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4 0x1118
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4 0x1119
-#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S 0x1152
-#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S 0x115A
-#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2 0x1190
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2 0x1191
-#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4 0x1198
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4 0x1199
-#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM 0x11D0
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4 0x105A
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4 0x105B
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8 0x106A
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8 0x106B
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4 0x1098
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8 0x10A9
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM 0x10D9
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM 0x10E9
-#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM 0x11D8
-
-
#define PCI_DEVICE_ID_MOXA_CP102E 0x1024
#define PCI_DEVICE_ID_MOXA_CP102EL 0x1025
#define PCI_DEVICE_ID_MOXA_CP104EL_A 0x1045
@@ -2199,16 +2080,6 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.exit = pci_plx9050_exit,
},
/*
- * Pericom (Only 7954 - It have a offset jump for port 4)
- */
- {
- .vendor = PCI_VENDOR_ID_PERICOM,
- .device = PCI_DEVICE_ID_PERICOM_PI7C9X7954,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- /*
* PLX
*/
{
@@ -2238,125 +2109,7 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.setup = pci_default_setup,
.exit = pci_plx9050_exit,
},
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_ANY_ID,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup,
- }, /*
+ /*
* SBS Technologies, Inc., PMC-OCTALPRO 232
*/
{
@@ -2948,10 +2701,6 @@ enum pci_board_num_t {
pbn_wch382_2,
pbn_wch384_4,
pbn_wch384_8,
- pbn_pericom_PI7C9X7951,
- pbn_pericom_PI7C9X7952,
- pbn_pericom_PI7C9X7954,
- pbn_pericom_PI7C9X7958,
pbn_sunix_pci_1s,
pbn_sunix_pci_2s,
pbn_sunix_pci_4s,
@@ -3696,33 +3445,6 @@ static struct pciserial_board pci_boards[] = {
.uart_offset = 8,
.first_offset = 0x00,
},
- /*
- * Pericom PI7C9X795[1248] Uno/Dual/Quad/Octal UART
- */
- [pbn_pericom_PI7C9X7951] = {
- .flags = FL_BASE0,
- .num_ports = 1,
- .base_baud = 921600,
- .uart_offset = 0x8,
- },
- [pbn_pericom_PI7C9X7952] = {
- .flags = FL_BASE0,
- .num_ports = 2,
- .base_baud = 921600,
- .uart_offset = 0x8,
- },
- [pbn_pericom_PI7C9X7954] = {
- .flags = FL_BASE0,
- .num_ports = 4,
- .base_baud = 921600,
- .uart_offset = 0x8,
- },
- [pbn_pericom_PI7C9X7958] = {
- .flags = FL_BASE0,
- .num_ports = 8,
- .base_baud = 921600,
- .uart_offset = 0x8,
- },
[pbn_sunix_pci_1s] = {
.num_ports = 1,
.base_baud = 921600,
@@ -3834,6 +3556,10 @@ static const struct pci_device_id blacklist[] = {
{ PCI_VDEVICE(EXAR, PCI_ANY_ID), },
{ PCI_VDEVICE(COMMTECH, PCI_ANY_ID), },
+ /* Pericom devices */
+ { PCI_VDEVICE(PERICOM, PCI_ANY_ID), },
+ { PCI_VDEVICE(ACCESSIO, PCI_ANY_ID), },
+
/* End of the black list */
{ }
};
@@ -5028,127 +4754,6 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b3_8_115200 },
/*
- * Pericom PI7C9X795[1248] Uno/Dual/Quad/Octal UART
- */
- { PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7951,
- PCI_ANY_ID, PCI_ANY_ID,
- 0,
- 0, pbn_pericom_PI7C9X7951 },
- { PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7952,
- PCI_ANY_ID, PCI_ANY_ID,
- 0,
- 0, pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7954,
- PCI_ANY_ID, PCI_ANY_ID,
- 0,
- 0, pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7958,
- PCI_ANY_ID, PCI_ANY_ID,
- 0,
- 0, pbn_pericom_PI7C9X7958 },
- /*
- * ACCES I/O Products quad
- */
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7951 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7958 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7958 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7958 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7958 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- /*
* Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
*/
{ PCI_VENDOR_ID_TOPIC, PCI_DEVICE_ID_TOPIC_TP560,
@@ -5174,8 +4779,30 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
pbn_b2_4_115200 },
+ /* Brainboxes Devices */
+ /*
+ * Brainboxes UC-101
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0BA1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UC-235/246
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0AA1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_1_115200 },
+ /*
+ * Brainboxes UC-257
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0861,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
/*
- * BrainBoxes UC-260
+ * Brainboxes UC-260/271/701/756
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0D21,
PCI_ANY_ID, PCI_ANY_ID,
@@ -5183,7 +4810,81 @@ static const struct pci_device_id serial_pci_tbl[] = {
pbn_b2_4_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0E34,
PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
+ PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
+ pbn_b2_4_115200 },
+ /*
+ * Brainboxes UC-268
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0841,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_4_115200 },
+ /*
+ * Brainboxes UC-275/279
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0881,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_8_115200 },
+ /*
+ * Brainboxes UC-302
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x08E1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UC-310
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x08C1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UC-313
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x08A3,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UC-320/324
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0A61,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_1_115200 },
+ /*
+ * Brainboxes UC-346
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0B02,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_4_115200 },
+ /*
+ * Brainboxes UC-357
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0A81,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0A83,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UC-368
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0C41,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_4_115200 },
+ /*
+ * Brainboxes UC-420/431
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0921,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
pbn_b2_4_115200 },
/*
* Perle PCI-RAS cards
diff --git a/drivers/tty/serial/8250/8250_pericom.c b/drivers/tty/serial/8250/8250_pericom.c
new file mode 100644
index 000000000000..025b055363c3
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_pericom.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Driver for Pericom UART */
+
+#include <linux/bits.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/pci.h>
+
+#include "8250.h"
+
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM_2SDB 0x1051
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_2S 0x1053
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM422_4 0x105a
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM485_4 0x105b
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM_4SDB 0x105c
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_4S 0x105e
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM422_8 0x106a
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM485_8 0x106b
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_2DB 0x1091
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_COM232_2 0x1093
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_4 0x1098
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_4DB 0x1099
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_COM232_4 0x109b
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_8 0x10a9
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM_2SMDB 0x10d1
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_2SM 0x10d3
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM_4SM 0x10d9
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM_4SMDB 0x10da
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_4SM 0x10dc
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM_8SM 0x10e9
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM485_1 0x1108
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM422_2 0x1110
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM485_2 0x1111
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM422_4 0x1118
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM485_4 0x1119
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_2S 0x1152
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_4S 0x115a
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_ICM232_2 0x1190
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM232_2 0x1191
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_ICM232_4 0x1198
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM232_4 0x1199
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_2SM 0x11d0
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_4SM 0x11d8
+
+struct pericom8250 {
+ void __iomem *virt;
+ unsigned int nr;
+ int line[];
+};
+
+static void pericom_do_set_divisor(struct uart_port *port, unsigned int baud,
+ unsigned int quot, unsigned int quot_frac)
+{
+ int scr;
+
+ for (scr = 16; scr > 4; scr--) {
+ unsigned int maxrate = port->uartclk / scr;
+ unsigned int divisor = max(maxrate / baud, 1U);
+ int delta = maxrate / divisor - baud;
+
+ if (baud > maxrate + baud / 50)
+ continue;
+
+ if (delta > baud / 50)
+ divisor++;
+
+ if (divisor > 0xffff)
+ continue;
+
+ /* Update delta due to possible divisor change */
+ delta = maxrate / divisor - baud;
+ if (abs(delta) < baud / 50) {
+ struct uart_8250_port *up = up_to_u8250p(port);
+ int lcr = serial_port_in(port, UART_LCR);
+
+ serial_port_out(port, UART_LCR, lcr | 0x80);
+ serial_dl_write(up, divisor);
+ serial_port_out(port, 2, 16 - scr);
+ serial_port_out(port, UART_LCR, lcr);
+ return;
+ }
+ }
+}
+
+static int pericom8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ unsigned int nr, i, bar = 0, maxnr;
+ struct pericom8250 *pericom;
+ struct uart_8250_port uart;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ maxnr = pci_resource_len(pdev, bar) >> 3;
+
+ if (pdev->vendor == PCI_VENDOR_ID_PERICOM)
+ nr = pdev->device & 0x0f;
+ else if (pdev->vendor == PCI_VENDOR_ID_ACCESSIO)
+ nr = BIT(((pdev->device & 0x38) >> 3) - 1);
+ else
+ nr = 1;
+
+ pericom = devm_kzalloc(&pdev->dev, struct_size(pericom, line, nr), GFP_KERNEL);
+ if (!pericom)
+ return -ENOMEM;
+
+ pericom->virt = pcim_iomap(pdev, bar, 0);
+ if (!pericom->virt)
+ return -ENOMEM;
+
+ memset(&uart, 0, sizeof(uart));
+
+ uart.port.dev = &pdev->dev;
+ uart.port.irq = pdev->irq;
+ uart.port.private_data = pericom;
+ uart.port.iotype = UPIO_PORT;
+ uart.port.uartclk = 921600 * 16;
+ uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ | UPF_MAGIC_MULTIPLIER;
+ uart.port.set_divisor = pericom_do_set_divisor;
+ for (i = 0; i < nr && i < maxnr; i++) {
+ unsigned int offset = (i == 3 && nr == 4) ? 0x38 : i * 0x8;
+
+ uart.port.iobase = pci_resource_start(pdev, bar) + offset;
+
+ dev_dbg(&pdev->dev, "Setup PCI port: port %lx, irq %d, type %d\n",
+ uart.port.iobase, uart.port.irq, uart.port.iotype);
+
+ pericom->line[i] = serial8250_register_8250_port(&uart);
+ if (pericom->line[i] < 0) {
+ dev_err(&pdev->dev,
+ "Couldn't register serial port %lx, irq %d, type %d, error %d\n",
+ uart.port.iobase, uart.port.irq,
+ uart.port.iotype, pericom->line[i]);
+ break;
+ }
+ }
+ pericom->nr = i;
+
+ pci_set_drvdata(pdev, pericom);
+ return 0;
+}
+
+static void pericom8250_remove(struct pci_dev *pdev)
+{
+ struct pericom8250 *pericom = pci_get_drvdata(pdev);
+ unsigned int i;
+
+ for (i = 0; i < pericom->nr; i++)
+ serial8250_unregister_port(pericom->line[i]);
+}
+
+static const struct pci_device_id pericom8250_pci_ids[] = {
+ /*
+ * Pericom PI7C9X795[1248] Uno/Dual/Quad/Octal UART
+ * (Only 7954 has an offset jump for port 4)
+ */
+ { PCI_VDEVICE(PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7951) },
+ { PCI_VDEVICE(PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7952) },
+ { PCI_VDEVICE(PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7954) },
+ { PCI_VDEVICE(PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7958) },
+
+ /*
+ * ACCES I/O Products quad
+ * (Only 7954 has an offset jump for port 4)
+ */
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM_2SDB) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_2S) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM422_4) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM485_4) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM_4SDB) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_4S) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM422_8) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM485_8) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_2DB) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_COM232_2) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_4) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_4DB) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_COM232_4) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_8) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM_2SMDB) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_2SM) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM_4SM) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM_4SMDB) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_4SM) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM_8SM) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM485_1) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM422_2) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM485_2) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM422_4) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM485_4) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_2S) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_4S) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_ICM232_2) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM232_2) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_ICM232_4) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM232_4) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_2SM) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_4SM) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, pericom8250_pci_ids);
+
+static struct pci_driver pericom8250_pci_driver = {
+ .name = "8250_pericom",
+ .id_table = pericom8250_pci_ids,
+ .probe = pericom8250_probe,
+ .remove = pericom8250_remove,
+};
+module_pci_driver(pericom8250_pci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Pericom UART driver");
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 46e2079ad1aa..3b12bfc1ed67 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -2026,7 +2026,7 @@ void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl)
mcr = serial8250_TIOCM_to_MCR(mctrl);
- mcr = (mcr & up->mcr_mask) | up->mcr_force | up->mcr;
+ mcr |= up->mcr;
serial8250_out_MCR(up, mcr);
}
@@ -3092,7 +3092,7 @@ static ssize_t rx_trig_bytes_show(struct device *dev,
if (rxtrig_bytes < 0)
return rxtrig_bytes;
- return snprintf(buf, PAGE_SIZE, "%d\n", rxtrig_bytes);
+ return sysfs_emit(buf, "%d\n", rxtrig_bytes);
}
static int do_set_rxtrig(struct tty_port *port, unsigned char bytes)
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 8cd11aa63ed5..9d415a38cc71 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -498,6 +498,14 @@ config SERIAL_8250_MID
present on the UART found on Intel Medfield SOC and various other
Intel platforms.
+config SERIAL_8250_PERICOM
+ tristate "Support for Pericom and Acces I/O serial ports"
+ default SERIAL_8250
+ depends on SERIAL_8250 && PCI
+ help
+ Selecting this option will enable handling of the extra features
+ present on the Pericom and Acces I/O UARTs.
+
config SERIAL_8250_PXA
tristate "PXA serial port support"
depends on SERIAL_8250
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index b9bcd73c8997..bee908f99ea0 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -5,6 +5,8 @@
obj-$(CONFIG_SERIAL_8250) += 8250.o 8250_base.o
8250-y := 8250_core.o
+8250-$(CONFIG_ALPHA_GENERIC) += 8250_alpha.o
+8250-$(CONFIG_ALPHA_JENSEN) += 8250_alpha.o
8250-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o
8250_base-y := 8250_port.o
8250_base-$(CONFIG_SERIAL_8250_DMA) += 8250_dma.o
@@ -36,6 +38,7 @@ obj-$(CONFIG_SERIAL_8250_UNIPHIER) += 8250_uniphier.o
obj-$(CONFIG_SERIAL_8250_INGENIC) += 8250_ingenic.o
obj-$(CONFIG_SERIAL_8250_LPSS) += 8250_lpss.o
obj-$(CONFIG_SERIAL_8250_MID) += 8250_mid.o
+obj-$(CONFIG_SERIAL_8250_PERICOM) += 8250_pericom.o
obj-$(CONFIG_SERIAL_8250_PXA) += 8250_pxa.o
obj-$(CONFIG_SERIAL_8250_TEGRA) += 8250_tegra.o
obj-$(CONFIG_SERIAL_8250_BCM7271) += 8250_bcm7271.o
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index fc543ac97c13..0e5ccb25bdb1 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -263,7 +263,7 @@ config SERIAL_SAMSUNG_UARTS
config SERIAL_SAMSUNG_CONSOLE
bool "Support for console on Samsung SoC serial port"
- depends on SERIAL_SAMSUNG=y
+ depends on SERIAL_SAMSUNG
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
help
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index 23c4e0e79694..37bffe406b18 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -418,8 +418,9 @@ static int altera_jtaguart_probe(struct platform_device *pdev)
struct altera_jtaguart_platform_uart *platp =
dev_get_platdata(&pdev->dev);
struct uart_port *port;
- struct resource *res_irq, *res_mem;
+ struct resource *res_mem;
int i = pdev->id;
+ int irq;
/* -1 emphasizes that the platform must have one port, no .N suffix */
if (i == -1)
@@ -438,9 +439,11 @@ static int altera_jtaguart_probe(struct platform_device *pdev)
else
return -ENODEV;
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res_irq)
- port->irq = res_irq->start;
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq < 0 && irq != -ENXIO)
+ return irq;
+ if (irq > 0)
+ port->irq = irq;
else if (platp)
port->irq = platp->irq;
else
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 7c5f4e966b59..64a352b40197 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -553,7 +553,6 @@ static int altera_uart_probe(struct platform_device *pdev)
struct altera_uart_platform_uart *platp = dev_get_platdata(&pdev->dev);
struct uart_port *port;
struct resource *res_mem;
- struct resource *res_irq;
int i = pdev->id;
int ret;
@@ -577,9 +576,11 @@ static int altera_uart_probe(struct platform_device *pdev)
else
return -EINVAL;
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res_irq)
- port->irq = res_irq->start;
+ ret = platform_get_irq_optional(pdev, 0);
+ if (ret < 0 && ret != -ENXIO)
+ return ret;
+ if (ret > 0)
+ port->irq = ret;
else if (platp)
port->irq = platp->irq;
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index e744b953ca34..47654073123d 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -446,14 +446,11 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
if ((termios->c_cflag & CREAD) == 0)
uap->port.ignore_status_mask |= UART_DUMMY_RSR_RX;
- /* first, disable everything */
old_cr = readb(uap->port.membase + UART010_CR) & ~UART010_CR_MSIE;
if (UART_ENABLE_MS(port, termios->c_cflag))
old_cr |= UART010_CR_MSIE;
- writel(0, uap->port.membase + UART010_CR);
-
/* Set baud rate */
quot -= 1;
writel((quot & 0xf00) >> 8, uap->port.membase + UART010_LCRM);
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 52518a606c06..ba053a68529f 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -188,38 +188,6 @@ static struct vendor_data vendor_st = {
.get_fifosize = get_fifosize_st,
};
-static const u16 pl011_zte_offsets[REG_ARRAY_SIZE] = {
- [REG_DR] = ZX_UART011_DR,
- [REG_FR] = ZX_UART011_FR,
- [REG_LCRH_RX] = ZX_UART011_LCRH,
- [REG_LCRH_TX] = ZX_UART011_LCRH,
- [REG_IBRD] = ZX_UART011_IBRD,
- [REG_FBRD] = ZX_UART011_FBRD,
- [REG_CR] = ZX_UART011_CR,
- [REG_IFLS] = ZX_UART011_IFLS,
- [REG_IMSC] = ZX_UART011_IMSC,
- [REG_RIS] = ZX_UART011_RIS,
- [REG_MIS] = ZX_UART011_MIS,
- [REG_ICR] = ZX_UART011_ICR,
- [REG_DMACR] = ZX_UART011_DMACR,
-};
-
-static unsigned int get_fifosize_zte(struct amba_device *dev)
-{
- return 16;
-}
-
-static struct vendor_data vendor_zte = {
- .reg_offset = pl011_zte_offsets,
- .access_32b = true,
- .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
- .fr_busy = ZX_UART01x_FR_BUSY,
- .fr_dsr = ZX_UART01x_FR_DSR,
- .fr_cts = ZX_UART01x_FR_CTS,
- .fr_ri = ZX_UART011_FR_RI,
- .get_fifosize = get_fifosize_zte,
-};
-
/* Deals with DMA transactions */
struct pl011_sgbuf {
@@ -262,7 +230,6 @@ struct uart_amba_port {
unsigned int im; /* interrupt mask */
unsigned int old_status;
unsigned int fifosize; /* vendor-specific */
- unsigned int old_cr; /* state during shutdown */
unsigned int fixed_baud; /* vendor-set fixed baud rate */
char type[12];
bool rs485_tx_started;
@@ -1615,9 +1582,6 @@ static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
container_of(port, struct uart_amba_port, port);
unsigned int cr;
- if (port->rs485.flags & SER_RS485_ENABLED)
- mctrl &= ~TIOCM_RTS;
-
cr = pl011_read(uap, REG_CR);
#define TIOCMBIT(tiocmbit, uartbit) \
@@ -1837,18 +1801,12 @@ static int pl011_startup(struct uart_port *port)
spin_lock_irq(&uap->port.lock);
- /* restore RTS and DTR */
- cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
+ cr = pl011_read(uap, REG_CR);
+ cr &= UART011_CR_RTS | UART011_CR_DTR;
cr |= UART01x_CR_UARTEN | UART011_CR_RXE;
- if (port->rs485.flags & SER_RS485_ENABLED) {
- if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
- cr &= ~UART011_CR_RTS;
- else
- cr |= UART011_CR_RTS;
- } else {
+ if (!(port->rs485.flags & SER_RS485_ENABLED))
cr |= UART011_CR_TXE;
- }
pl011_write(cr, uap, REG_CR);
@@ -1915,7 +1873,6 @@ static void pl011_disable_uart(struct uart_amba_port *uap)
uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
spin_lock_irq(&uap->port.lock);
cr = pl011_read(uap, REG_CR);
- uap->old_cr = cr;
cr &= UART011_CR_RTS | UART011_CR_DTR;
cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
pl011_write(cr, uap, REG_CR);
@@ -2105,9 +2062,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
if (port->rs485.flags & SER_RS485_ENABLED)
termios->c_cflag &= ~CRTSCTS;
- /* first, disable everything */
old_cr = pl011_read(uap, REG_CR);
- pl011_write(0, uap, REG_CR);
if (termios->c_cflag & CRTSCTS) {
if (old_cr & UART011_CR_RTS)
@@ -2184,31 +2139,12 @@ static const char *pl011_type(struct uart_port *port)
}
/*
- * Release the memory region(s) being used by 'port'
- */
-static void pl011_release_port(struct uart_port *port)
-{
- release_mem_region(port->mapbase, SZ_4K);
-}
-
-/*
- * Request the memory region(s) being used by 'port'
- */
-static int pl011_request_port(struct uart_port *port)
-{
- return request_mem_region(port->mapbase, SZ_4K, "uart-pl011")
- != NULL ? 0 : -EBUSY;
-}
-
-/*
* Configure/autoconfigure the port.
*/
static void pl011_config_port(struct uart_port *port, int flags)
{
- if (flags & UART_CONFIG_TYPE) {
+ if (flags & UART_CONFIG_TYPE)
port->type = PORT_AMBA;
- pl011_request_port(port);
- }
}
/*
@@ -2223,6 +2159,8 @@ static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
ret = -EINVAL;
if (ser->baud_base < 9600)
ret = -EINVAL;
+ if (port->mapbase != (unsigned long) ser->iomem_base)
+ ret = -EINVAL;
return ret;
}
@@ -2275,8 +2213,6 @@ static const struct uart_ops amba_pl011_pops = {
.flush_buffer = pl011_dma_flush_buffer,
.set_termios = pl011_set_termios,
.type = pl011_type,
- .release_port = pl011_release_port,
- .request_port = pl011_request_port,
.config_port = pl011_config_port,
.verify_port = pl011_verify_port,
#ifdef CONFIG_CONSOLE_POLL
@@ -2306,8 +2242,6 @@ static const struct uart_ops sbsa_uart_pops = {
.shutdown = sbsa_uart_shutdown,
.set_termios = sbsa_uart_set_termios,
.type = pl011_type,
- .release_port = pl011_release_port,
- .request_port = pl011_request_port,
.config_port = pl011_config_port,
.verify_port = pl011_verify_port,
#ifdef CONFIG_CONSOLE_POLL
@@ -2754,7 +2688,6 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
index = pl011_probe_dt_alias(index, dev);
- uap->old_cr = 0;
uap->port.dev = dev;
uap->port.mapbase = mmiobase->start;
uap->port.membase = base;
@@ -2975,11 +2908,6 @@ static const struct amba_id pl011_ids[] = {
.mask = 0x00ffffff,
.data = &vendor_st,
},
- {
- .id = AMBA_LINUX_ID(0x00, 0x1, 0xffe),
- .mask = 0x00ffffff,
- .data = &vendor_zte,
- },
{ 0, 0 },
};
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 4379ca4842ae..8cabe50c4a33 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -707,11 +707,11 @@ static int ar933x_uart_probe(struct platform_device *pdev)
struct ar933x_uart_port *up;
struct uart_port *port;
struct resource *mem_res;
- struct resource *irq_res;
struct device_node *np;
unsigned int baud;
int id;
int ret;
+ int irq;
np = pdev->dev.of_node;
if (IS_ENABLED(CONFIG_OF) && np) {
@@ -730,11 +730,9 @@ static int ar933x_uart_probe(struct platform_device *pdev)
if (id >= CONFIG_SERIAL_AR933X_NR_UARTS)
return -EINVAL;
- irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq_res) {
- dev_err(&pdev->dev, "no IRQ resource\n");
- return -EINVAL;
- }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
up = devm_kzalloc(&pdev->dev, sizeof(struct ar933x_uart_port),
GFP_KERNEL);
@@ -766,7 +764,7 @@ static int ar933x_uart_probe(struct platform_device *pdev)
port->mapbase = mem_res->start;
port->line = id;
- port->irq = irq_res->start;
+ port->irq = irq;
port->dev = &pdev->dev;
port->type = PORT_AR933X;
port->iotype = UPIO_MEM32;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 2c99a47a2535..c370eddc651b 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1004,6 +1004,13 @@ static void atmel_tx_dma(struct uart_port *port)
desc->callback = atmel_complete_tx_dma;
desc->callback_param = atmel_port;
atmel_port->cookie_tx = dmaengine_submit(desc);
+ if (dma_submit_error(atmel_port->cookie_tx)) {
+ dev_err(port->dev, "dma_submit_error %d\n",
+ atmel_port->cookie_tx);
+ return;
+ }
+
+ dma_async_issue_pending(chan);
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -1258,6 +1265,13 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
desc->callback_param = port;
atmel_port->desc_rx = desc;
atmel_port->cookie_rx = dmaengine_submit(desc);
+ if (dma_submit_error(atmel_port->cookie_rx)) {
+ dev_err(port->dev, "dma_submit_error %d\n",
+ atmel_port->cookie_rx);
+ goto chan_err;
+ }
+
+ dma_async_issue_pending(atmel_port->chan_rx);
return 0;
@@ -2479,7 +2493,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
port->fifosize = 1;
port->dev = &pdev->dev;
port->mapbase = mpdev->resource[0].start;
- port->irq = mpdev->resource[1].start;
+ port->irq = platform_get_irq(mpdev, 0);
port->rs485_config = atmel_config_rs485;
port->iso7816_config = atmel_config_iso7816;
port->membase = NULL;
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 5fb0e84f7fd1..6471a54b616b 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -804,7 +804,7 @@ static struct uart_driver bcm_uart_driver = {
*/
static int bcm_uart_probe(struct platform_device *pdev)
{
- struct resource *res_mem, *res_irq;
+ struct resource *res_mem;
struct uart_port *port;
struct clk *clk;
int ret;
@@ -833,9 +833,10 @@ static int bcm_uart_probe(struct platform_device *pdev)
if (IS_ERR(port->membase))
return PTR_ERR(port->membase);
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res_irq)
- return -ENODEV;
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ port->irq = ret;
clk = clk_get(&pdev->dev, "refclk");
if (IS_ERR(clk) && pdev->dev.of_node)
@@ -845,7 +846,6 @@ static int bcm_uart_probe(struct platform_device *pdev)
return -ENODEV;
port->iotype = UPIO_MEM;
- port->irq = res_irq->start;
port->ops = &bcm_uart_ops;
port->flags = UPF_BOOT_AUTOCONF;
port->dev = &pdev->dev;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index ac5112def40d..ce3e26144689 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -247,6 +247,7 @@ enum lpuart_type {
LS1028A_LPUART,
IMX7ULP_LPUART,
IMX8QXP_LPUART,
+ IMXRT1050_LPUART,
};
struct lpuart_port {
@@ -310,6 +311,11 @@ static struct lpuart_soc_data imx8qxp_data = {
.iotype = UPIO_MEM32,
.reg_off = IMX_REG_OFF,
};
+static struct lpuart_soc_data imxrt1050_data = {
+ .devtype = IMXRT1050_LPUART,
+ .iotype = UPIO_MEM32,
+ .reg_off = IMX_REG_OFF,
+};
static const struct of_device_id lpuart_dt_ids[] = {
{ .compatible = "fsl,vf610-lpuart", .data = &vf_data, },
@@ -317,6 +323,7 @@ static const struct of_device_id lpuart_dt_ids[] = {
{ .compatible = "fsl,ls1028a-lpuart", .data = &ls1028a_data, },
{ .compatible = "fsl,imx7ulp-lpuart", .data = &imx7ulp_data, },
{ .compatible = "fsl,imx8qxp-lpuart", .data = &imx8qxp_data, },
+ { .compatible = "fsl,imxrt1050-lpuart", .data = &imxrt1050_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, lpuart_dt_ids);
@@ -1793,8 +1800,8 @@ static void lpuart_dma_shutdown(struct lpuart_port *sport)
}
if (sport->lpuart_dma_tx_use) {
- if (wait_event_interruptible(sport->dma_wait,
- !sport->dma_tx_in_progress) != false) {
+ if (wait_event_interruptible_timeout(sport->dma_wait,
+ !sport->dma_tx_in_progress, msecs_to_jiffies(300)) <= 0) {
sport->dma_tx_in_progress = false;
dmaengine_terminate_all(sport->dma_tx_chan);
}
@@ -2626,6 +2633,7 @@ OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup
OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1028a-lpuart", ls1028a_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup);
+OF_EARLYCON_DECLARE(lpuart32, "fsl,imxrt1050-lpuart", lpuart32_imx_early_console_setup);
EARLYCON_DECLARE(lpuart, lpuart_early_console_setup);
EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 90f82e6c54e4..df8a0c8b8b29 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -486,18 +486,21 @@ static void imx_uart_stop_tx(struct uart_port *port)
static void imx_uart_stop_rx(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
- u32 ucr1, ucr2;
+ u32 ucr1, ucr2, ucr4;
ucr1 = imx_uart_readl(sport, UCR1);
ucr2 = imx_uart_readl(sport, UCR2);
+ ucr4 = imx_uart_readl(sport, UCR4);
if (sport->dma_is_enabled) {
ucr1 &= ~(UCR1_RXDMAEN | UCR1_ATDMAEN);
} else {
ucr1 &= ~UCR1_RRDYEN;
ucr2 &= ~UCR2_ATEN;
+ ucr4 &= ~UCR4_OREN;
}
imx_uart_writel(sport, ucr1, UCR1);
+ imx_uart_writel(sport, ucr4, UCR4);
ucr2 &= ~UCR2_RXEN;
imx_uart_writel(sport, ucr2, UCR2);
@@ -1544,7 +1547,7 @@ static void imx_uart_shutdown(struct uart_port *port)
imx_uart_writel(sport, ucr1, UCR1);
ucr4 = imx_uart_readl(sport, UCR4);
- ucr4 &= ~(UCR4_OREN | UCR4_TCEN);
+ ucr4 &= ~UCR4_TCEN;
imx_uart_writel(sport, ucr4, UCR4);
spin_unlock_irqrestore(&sport->port.lock, flags);
@@ -2482,10 +2485,12 @@ static void imx_uart_enable_wakeup(struct imx_port *sport, bool on)
if (sport->have_rtscts) {
u32 ucr1 = imx_uart_readl(sport, UCR1);
- if (on)
+ if (on) {
+ imx_uart_writel(sport, USR1_RTSD, USR1);
ucr1 |= UCR1_RTSDEN;
- else
+ } else {
ucr1 &= ~UCR1_RTSDEN;
+ }
imx_uart_writel(sport, ucr1, UCR1);
}
}
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 497b334bc845..3e324d3f0a6d 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -16,8 +16,6 @@
#include <linux/ioport.h>
#include <linux/lantiq.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
@@ -728,19 +726,23 @@ static struct uart_driver lqasc_reg = {
static int fetch_irq_lantiq(struct device *dev, struct ltq_uart_port *ltq_port)
{
struct uart_port *port = &ltq_port->port;
- struct resource irqres[3];
- int ret;
-
- ret = of_irq_to_resource_table(dev->of_node, irqres, 3);
- if (ret != 3) {
- dev_err(dev,
- "failed to get IRQs for serial port\n");
- return -ENODEV;
- }
- ltq_port->tx_irq = irqres[0].start;
- ltq_port->rx_irq = irqres[1].start;
- ltq_port->err_irq = irqres[2].start;
- port->irq = irqres[0].start;
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ ltq_port->tx_irq = irq;
+ irq = platform_get_irq(pdev, 1);
+ if (irq < 0)
+ return irq;
+ ltq_port->rx_irq = irq;
+ irq = platform_get_irq(pdev, 2);
+ if (irq < 0)
+ return irq;
+ ltq_port->err_irq = irq;
+
+ port->irq = ltq_port->tx_irq;
return 0;
}
@@ -793,7 +795,7 @@ static int fetch_irq_intel(struct device *dev, struct ltq_uart_port *ltq_port)
struct uart_port *port = &ltq_port->port;
int ret;
- ret = of_irq_get(dev->of_node, 0);
+ ret = platform_get_irq(to_platform_device(dev), 0);
if (ret < 0) {
dev_err(dev, "failed to fetch IRQ for serial port\n");
return ret;
diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c
index 2941659e5274..7f74bf7bdcff 100644
--- a/drivers/tty/serial/liteuart.c
+++ b/drivers/tty/serial/liteuart.c
@@ -436,4 +436,4 @@ module_exit(liteuart_exit);
MODULE_AUTHOR("Antmicro <www.antmicro.com>");
MODULE_DESCRIPTION("LiteUART serial driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform: liteuart");
+MODULE_ALIAS("platform:liteuart");
diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c
index b199d7859961..07c4161eb4cc 100644
--- a/drivers/tty/serial/lpc32xx_hs.c
+++ b/drivers/tty/serial/lpc32xx_hs.c
@@ -341,7 +341,7 @@ static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id)
LPC32XX_HSUART_IIR(port->membase));
port->icount.overrun++;
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
- tty_schedule_flip(tport);
+ tty_flip_buffer_push(tport);
}
/* Data received? */
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index efee3935917f..45e00d928253 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -622,10 +622,7 @@ meson_serial_early_console_setup(struct earlycon_device *device, const char *opt
device->con->write = meson_serial_early_console_write;
return 0;
}
-/* Legacy bindings, should be removed when no more used */
-OF_EARLYCON_DECLARE(meson, "amlogic,meson-uart",
- meson_serial_early_console_setup);
-/* Stable bindings */
+
OF_EARLYCON_DECLARE(meson, "amlogic,meson-ao-uart",
meson_serial_early_console_setup);
@@ -668,25 +665,6 @@ static inline struct clk *meson_uart_probe_clock(struct device *dev,
return clk;
}
-/*
- * This function gets clocks in the legacy non-stable DT bindings.
- * This code will be remove once all the platforms switch to the
- * new DT bindings.
- */
-static int meson_uart_probe_clocks_legacy(struct platform_device *pdev,
- struct uart_port *port)
-{
- struct clk *clk = NULL;
-
- clk = meson_uart_probe_clock(&pdev->dev, NULL);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
-
- port->uartclk = clk_get_rate(clk);
-
- return 0;
-}
-
static int meson_uart_probe_clocks(struct platform_device *pdev,
struct uart_port *port)
{
@@ -713,10 +691,11 @@ static int meson_uart_probe_clocks(struct platform_device *pdev,
static int meson_uart_probe(struct platform_device *pdev)
{
- struct resource *res_mem, *res_irq;
+ struct resource *res_mem;
struct uart_port *port;
u32 fifosize = 64; /* Default is 64, 128 for EE UART_0 */
int ret = 0;
+ int irq;
if (pdev->dev.of_node)
pdev->id = of_alias_get_id(pdev->dev.of_node, "serial");
@@ -739,9 +718,9 @@ static int meson_uart_probe(struct platform_device *pdev)
if (!res_mem)
return -ENODEV;
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res_irq)
- return -ENODEV;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
of_property_read_u32(pdev->dev.of_node, "fifo-size", &fifosize);
@@ -754,19 +733,14 @@ static int meson_uart_probe(struct platform_device *pdev)
if (!port)
return -ENOMEM;
- /* Use legacy way until all platforms switch to new bindings */
- if (of_device_is_compatible(pdev->dev.of_node, "amlogic,meson-uart"))
- ret = meson_uart_probe_clocks_legacy(pdev, port);
- else
- ret = meson_uart_probe_clocks(pdev, port);
-
+ ret = meson_uart_probe_clocks(pdev, port);
if (ret)
return ret;
port->iotype = UPIO_MEM;
port->mapbase = res_mem->start;
port->mapsize = resource_size(res_mem);
- port->irq = res_irq->start;
+ port->irq = irq;
port->flags = UPF_BOOT_AUTOCONF | UPF_LOW_LATENCY;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MESON_CONSOLE);
port->dev = &pdev->dev;
@@ -804,9 +778,6 @@ static int meson_uart_remove(struct platform_device *pdev)
}
static const struct of_device_id meson_uart_dt_match[] = {
- /* Legacy bindings, should be removed when no more used */
- { .compatible = "amlogic,meson-uart" },
- /* Stable bindings */
{ .compatible = "amlogic,meson6-uart" },
{ .compatible = "amlogic,meson8-uart" },
{ .compatible = "amlogic,meson8b-uart" },
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 489d19274f9a..23c94b927776 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/atomic.h>
+#include <linux/dma/qcom_adm.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
@@ -290,6 +291,7 @@ static void msm_request_tx_dma(struct msm_port *msm_port, resource_size_t base)
{
struct device *dev = msm_port->uart.dev;
struct dma_slave_config conf;
+ struct qcom_adm_peripheral_config periph_conf = {};
struct msm_dma *dma;
u32 crci = 0;
int ret;
@@ -308,7 +310,11 @@ static void msm_request_tx_dma(struct msm_port *msm_port, resource_size_t base)
conf.device_fc = true;
conf.dst_addr = base + UARTDM_TF;
conf.dst_maxburst = UARTDM_BURST_SIZE;
- conf.slave_id = crci;
+ if (crci) {
+ conf.peripheral_config = &periph_conf;
+ conf.peripheral_size = sizeof(periph_conf);
+ periph_conf.crci = crci;
+ }
ret = dmaengine_slave_config(dma->chan, &conf);
if (ret)
@@ -333,6 +339,7 @@ static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base)
{
struct device *dev = msm_port->uart.dev;
struct dma_slave_config conf;
+ struct qcom_adm_peripheral_config periph_conf = {};
struct msm_dma *dma;
u32 crci = 0;
int ret;
@@ -355,7 +362,11 @@ static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base)
conf.device_fc = true;
conf.src_addr = base + UARTDM_RF;
conf.src_maxburst = UARTDM_BURST_SIZE;
- conf.slave_id = crci;
+ if (crci) {
+ conf.peripheral_config = &periph_conf;
+ conf.peripheral_size = sizeof(periph_conf);
+ periph_conf.crci = crci;
+ }
ret = dmaengine_slave_config(dma->chan, &conf);
if (ret)
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 12ce150b0ad4..5359236b32d6 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -1702,17 +1702,21 @@ extern struct platform_device scc_a_pdev, scc_b_pdev;
static int __init pmz_init_port(struct uart_pmac_port *uap)
{
- struct resource *r_ports, *r_irq;
+ struct resource *r_ports;
+ int irq;
r_ports = platform_get_resource(uap->pdev, IORESOURCE_MEM, 0);
- r_irq = platform_get_resource(uap->pdev, IORESOURCE_IRQ, 0);
- if (!r_ports || !r_irq)
+ if (!r_ports)
return -ENODEV;
+ irq = platform_get_irq(uap->pdev, 0);
+ if (irq < 0)
+ return irq;
+
uap->port.mapbase = r_ports->start;
uap->port.membase = (unsigned char __iomem *) r_ports->start;
uap->port.iotype = UPIO_MEM;
- uap->port.irq = r_irq->start;
+ uap->port.irq = irq;
uap->port.uartclk = ZS_CLOCK;
uap->port.fifosize = 1;
uap->port.ops = &pmz_pops;
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 41319ef96fa6..30b099746a75 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -842,14 +842,18 @@ static int serial_pxa_probe_dt(struct platform_device *pdev,
static int serial_pxa_probe(struct platform_device *dev)
{
struct uart_pxa_port *sport;
- struct resource *mmres, *irqres;
+ struct resource *mmres;
int ret;
+ int irq;
mmres = platform_get_resource(dev, IORESOURCE_MEM, 0);
- irqres = platform_get_resource(dev, IORESOURCE_IRQ, 0);
- if (!mmres || !irqres)
+ if (!mmres)
return -ENODEV;
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0)
+ return irq;
+
sport = kzalloc(sizeof(struct uart_pxa_port), GFP_KERNEL);
if (!sport)
return -ENOMEM;
@@ -869,7 +873,7 @@ static int serial_pxa_probe(struct platform_device *dev)
sport->port.type = PORT_PXA;
sport->port.iotype = UPIO_MEM;
sport->port.mapbase = mmres->start;
- sport->port.irq = irqres->start;
+ sport->port.irq = irq;
sport->port.fifosize = 64;
sport->port.ops = &serial_pxa_pops;
sport->port.dev = &dev->dev;
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index ca084c10d0bb..d002a4e48ed9 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -65,7 +65,6 @@ enum s3c24xx_port_type {
struct s3c24xx_uart_info {
char *name;
enum s3c24xx_port_type type;
- bool has_usi;
unsigned int port_type;
unsigned int fifosize;
unsigned long rx_fifomask;
@@ -1357,28 +1356,6 @@ static int apple_s5l_serial_startup(struct uart_port *port)
return ret;
}
-static void exynos_usi_init(struct uart_port *port)
-{
- struct s3c24xx_uart_port *ourport = to_ourport(port);
- struct s3c24xx_uart_info *info = ourport->info;
- unsigned int val;
-
- if (!info->has_usi)
- return;
-
- /* Clear the software reset of USI block (it's set at startup) */
- val = rd_regl(port, USI_CON);
- val &= ~USI_CON_RESET_MASK;
- wr_regl(port, USI_CON, val);
- udelay(1);
-
- /* Continuously provide the clock to USI IP w/o gating (for Rx mode) */
- val = rd_regl(port, USI_OPTION);
- val &= ~USI_OPTION_HWACG_MASK;
- val |= USI_OPTION_HWACG_CLKREQ_ON;
- wr_regl(port, USI_OPTION, val);
-}
-
/* power power management control */
static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
@@ -1405,8 +1382,6 @@ static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
if (!IS_ERR(ourport->baudclk))
clk_prepare_enable(ourport->baudclk);
-
- exynos_usi_init(port);
break;
default:
dev_err(port->dev, "s3c24xx_serial: unknown pm %d\n", level);
@@ -1740,15 +1715,21 @@ s3c24xx_serial_verify_port(struct uart_port *port, struct serial_struct *ser)
static struct console s3c24xx_serial_console;
-static int __init s3c24xx_serial_console_init(void)
+static void __init s3c24xx_serial_register_console(void)
{
register_console(&s3c24xx_serial_console);
- return 0;
}
-console_initcall(s3c24xx_serial_console_init);
+
+static void s3c24xx_serial_unregister_console(void)
+{
+ if (s3c24xx_serial_console.flags & CON_ENABLED)
+ unregister_console(&s3c24xx_serial_console);
+}
#define S3C24XX_SERIAL_CONSOLE &s3c24xx_serial_console
#else
+static inline void s3c24xx_serial_register_console(void) { }
+static inline void s3c24xx_serial_unregister_console(void) { }
#define S3C24XX_SERIAL_CONSOLE NULL
#endif
@@ -2130,8 +2111,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
if (ret)
pr_warn("uart: failed to enable baudclk\n");
- exynos_usi_init(port);
-
/* Keep all interrupts masked and cleared */
switch (ourport->info->type) {
case TYPE_S3C6400:
@@ -2521,7 +2500,8 @@ s3c24xx_serial_console_write(struct console *co, const char *s,
uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar);
}
-static void __init
+/* Shouldn't be __init, as it can be instantiated from other module */
+static void
s3c24xx_serial_get_options(struct uart_port *port, int *baud,
int *parity, int *bits)
{
@@ -2584,7 +2564,8 @@ s3c24xx_serial_get_options(struct uart_port *port, int *baud,
}
}
-static int __init
+/* Shouldn't be __init, as it can be instantiated from other module */
+static int
s3c24xx_serial_console_setup(struct console *co, char *options)
{
struct uart_port *port;
@@ -2780,11 +2761,10 @@ static struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = {
#endif
#if defined(CONFIG_ARCH_EXYNOS)
-#define EXYNOS_COMMON_SERIAL_DRV_DATA(_has_usi) \
+#define EXYNOS_COMMON_SERIAL_DRV_DATA() \
.info = &(struct s3c24xx_uart_info) { \
.name = "Samsung Exynos UART", \
.type = TYPE_S3C6400, \
- .has_usi = _has_usi, \
.port_type = PORT_S3C6400, \
.has_divslot = 1, \
.rx_fifomask = S5PV210_UFSTAT_RXMASK, \
@@ -2805,17 +2785,17 @@ static struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = {
} \
static struct s3c24xx_serial_drv_data exynos4210_serial_drv_data = {
- EXYNOS_COMMON_SERIAL_DRV_DATA(false),
+ EXYNOS_COMMON_SERIAL_DRV_DATA(),
.fifosize = { 256, 64, 16, 16 },
};
static struct s3c24xx_serial_drv_data exynos5433_serial_drv_data = {
- EXYNOS_COMMON_SERIAL_DRV_DATA(false),
+ EXYNOS_COMMON_SERIAL_DRV_DATA(),
.fifosize = { 64, 256, 16, 256 },
};
static struct s3c24xx_serial_drv_data exynos850_serial_drv_data = {
- EXYNOS_COMMON_SERIAL_DRV_DATA(true),
+ EXYNOS_COMMON_SERIAL_DRV_DATA(),
.fifosize = { 256, 64, 64, 64 },
};
@@ -2926,7 +2906,29 @@ static struct platform_driver samsung_serial_driver = {
},
};
-module_platform_driver(samsung_serial_driver);
+static int __init samsung_serial_init(void)
+{
+ int ret;
+
+ s3c24xx_serial_register_console();
+
+ ret = platform_driver_register(&samsung_serial_driver);
+ if (ret) {
+ s3c24xx_serial_unregister_console();
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit samsung_serial_exit(void)
+{
+ platform_driver_unregister(&samsung_serial_driver);
+ s3c24xx_serial_unregister_console();
+}
+
+module_init(samsung_serial_init);
+module_exit(samsung_serial_exit);
#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE
/*
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 61e3dd0222af..0db90be4c3bc 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -144,6 +144,11 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
unsigned long flags;
unsigned int old;
+ if (port->rs485.flags & SER_RS485_ENABLED) {
+ set &= ~TIOCM_RTS;
+ clear &= ~TIOCM_RTS;
+ }
+
spin_lock_irqsave(&port->lock, flags);
old = port->mctrl;
port->mctrl = (old & ~clear) | set;
@@ -157,23 +162,10 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
static void uart_port_dtr_rts(struct uart_port *uport, int raise)
{
- int rs485_on = uport->rs485_config &&
- (uport->rs485.flags & SER_RS485_ENABLED);
- int RTS_after_send = !!(uport->rs485.flags & SER_RS485_RTS_AFTER_SEND);
-
- if (raise) {
- if (rs485_on && !RTS_after_send) {
- uart_set_mctrl(uport, TIOCM_DTR);
- uart_clear_mctrl(uport, TIOCM_RTS);
- } else {
- uart_set_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
- }
- } else {
- unsigned int clear = TIOCM_DTR;
-
- clear |= (!rs485_on || !RTS_after_send) ? TIOCM_RTS : 0;
- uart_clear_mctrl(uport, clear);
- }
+ if (raise)
+ uart_set_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
+ else
+ uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
}
/*
@@ -1075,11 +1067,6 @@ uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
goto out;
if (!tty_io_error(tty)) {
- if (uport->rs485.flags & SER_RS485_ENABLED) {
- set &= ~TIOCM_RTS;
- clear &= ~TIOCM_RTS;
- }
-
uart_update_mctrl(uport, set, clear);
ret = 0;
}
@@ -1701,17 +1688,13 @@ static void uart_port_shutdown(struct tty_port *port)
*/
wake_up_interruptible(&port->delta_msr_wait);
- /*
- * Free the IRQ and disable the port.
- */
- if (uport)
+ if (uport) {
+ /* Free the IRQ and disable the port. */
uport->ops->shutdown(uport);
- /*
- * Ensure that the IRQ handler isn't running on another CPU.
- */
- if (uport)
+ /* Ensure that the IRQ handler isn't running on another CPU. */
synchronize_irq(uport->irq);
+ }
}
static int uart_carrier_raised(struct tty_port *port)
@@ -2393,7 +2376,11 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
* We probably don't need a spinlock around this, but
*/
spin_lock_irqsave(&port->lock, flags);
- port->ops->set_mctrl(port, port->mctrl & TIOCM_DTR);
+ port->mctrl &= TIOCM_DTR;
+ if (port->rs485.flags & SER_RS485_ENABLED &&
+ !(port->rs485.flags & SER_RS485_RTS_AFTER_SEND))
+ port->mctrl |= TIOCM_RTS;
+ port->ops->set_mctrl(port, port->mctrl);
spin_unlock_irqrestore(&port->lock, flags);
/*
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 89ee43061d3a..968967d722d4 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -37,6 +37,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/scatterlist.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
@@ -895,11 +896,9 @@ static void sci_receive_chars(struct uart_port *port)
if (status & SCxSR_FER(port)) {
flag = TTY_FRAME;
port->icount.frame++;
- dev_notice(port->dev, "frame error\n");
} else if (status & SCxSR_PER(port)) {
flag = TTY_PARITY;
port->icount.parity++;
- dev_notice(port->dev, "parity error\n");
} else
flag = TTY_NORMAL;
@@ -939,8 +938,6 @@ static int sci_handle_errors(struct uart_port *port)
/* overrun error */
if (tty_insert_flip_char(tport, 0, TTY_OVERRUN))
copied++;
-
- dev_notice(port->dev, "overrun error\n");
}
if (status & SCxSR_FER(port)) {
@@ -949,8 +946,6 @@ static int sci_handle_errors(struct uart_port *port)
if (tty_insert_flip_char(tport, 0, TTY_FRAME))
copied++;
-
- dev_notice(port->dev, "frame error\n");
}
if (status & SCxSR_PER(port)) {
@@ -959,8 +954,6 @@ static int sci_handle_errors(struct uart_port *port)
if (tty_insert_flip_char(tport, 0, TTY_PARITY))
copied++;
-
- dev_notice(port->dev, "parity error\n");
}
if (copied)
@@ -990,8 +983,6 @@ static int sci_handle_fifo_overrun(struct uart_port *port)
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
tty_flip_buffer_push(tport);
-
- dev_dbg(port->dev, "overrun error\n");
copied++;
}
@@ -1013,8 +1004,6 @@ static int sci_handle_breaks(struct uart_port *port)
/* Notify of BREAK */
if (tty_insert_flip_char(tport, 0, TTY_BREAK))
copied++;
-
- dev_dbg(port->dev, "BREAK detected\n");
}
if (copied)
@@ -2778,44 +2767,29 @@ static int sci_init_clocks(struct sci_port *sci_port, struct device *dev)
clk_names[SCI_SCK] = "hsck";
for (i = 0; i < SCI_NUM_CLKS; i++) {
- clk = devm_clk_get(dev, clk_names[i]);
- if (PTR_ERR(clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- if (IS_ERR(clk) && i == SCI_FCK) {
- /*
- * "fck" used to be called "sci_ick", and we need to
- * maintain DT backward compatibility.
- */
- clk = devm_clk_get(dev, "sci_ick");
- if (PTR_ERR(clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- if (!IS_ERR(clk))
- goto found;
+ clk = devm_clk_get_optional(dev, clk_names[i]);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ if (!clk && i == SCI_FCK) {
/*
* Not all SH platforms declare a clock lookup entry
* for SCI devices, in which case we need to get the
* global "peripheral_clk" clock.
*/
clk = devm_clk_get(dev, "peripheral_clk");
- if (!IS_ERR(clk))
- goto found;
-
- dev_err(dev, "failed to get %s (%ld)\n", clk_names[i],
- PTR_ERR(clk));
- return PTR_ERR(clk);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk),
+ "failed to get %s\n",
+ clk_names[i]);
}
-found:
- if (IS_ERR(clk))
- dev_dbg(dev, "failed to get %s (%ld)\n", clk_names[i],
- PTR_ERR(clk));
+ if (!clk)
+ dev_dbg(dev, "failed to get %s\n", clk_names[i]);
else
dev_dbg(dev, "clk %s is %pC rate %lu\n", clk_names[i],
clk, clk_get_rate(clk));
- sci_port->clks[i] = IS_ERR(clk) ? NULL : clk;
+ sci_port->clks[i] = clk;
}
return 0;
}
@@ -3180,6 +3154,9 @@ static const struct of_device_id of_sci_match[] = {
}, {
.compatible = "renesas,rcar-gen3-scif",
.data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE),
+ }, {
+ .compatible = "renesas,rcar-gen4-scif",
+ .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE),
},
/* Generic types */
{
@@ -3203,23 +3180,47 @@ static const struct of_device_id of_sci_match[] = {
};
MODULE_DEVICE_TABLE(of, of_sci_match);
+static void sci_reset_control_assert(void *data)
+{
+ reset_control_assert(data);
+}
+
static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
unsigned int *dev_id)
{
struct device_node *np = pdev->dev.of_node;
+ struct reset_control *rstc;
struct plat_sci_port *p;
struct sci_port *sp;
const void *data;
- int id;
+ int id, ret;
if (!IS_ENABLED(CONFIG_OF) || !np)
- return NULL;
+ return ERR_PTR(-EINVAL);
data = of_device_get_match_data(&pdev->dev);
+ rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(rstc))
+ return ERR_PTR(dev_err_probe(&pdev->dev, PTR_ERR(rstc),
+ "failed to get reset ctrl\n"));
+
+ ret = reset_control_deassert(rstc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to deassert reset %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = devm_add_action_or_reset(&pdev->dev, sci_reset_control_assert, rstc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register assert devm action, %d\n",
+ ret);
+ return ERR_PTR(ret);
+ }
+
p = devm_kzalloc(&pdev->dev, sizeof(struct plat_sci_port), GFP_KERNEL);
if (!p)
- return NULL;
+ return ERR_PTR(-ENOMEM);
/* Get the line number from the aliases node. */
id = of_alias_get_id(np, "serial");
@@ -3227,11 +3228,11 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
id = ffz(sci_ports_in_use);
if (id < 0) {
dev_err(&pdev->dev, "failed to get alias id (%d)\n", id);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
if (id >= ARRAY_SIZE(sci_ports)) {
dev_err(&pdev->dev, "serial%d out of range\n", id);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
sp = &sci_ports[id];
@@ -3318,8 +3319,8 @@ static int sci_probe(struct platform_device *dev)
if (dev->dev.of_node) {
p = sci_parse_dt(dev, &dev_id);
- if (p == NULL)
- return -EINVAL;
+ if (IS_ERR(p))
+ return PTR_ERR(p);
} else {
p = dev->dev.platform_data;
if (p == NULL) {
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 3244e7f6818c..9570002d07e7 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -365,6 +365,31 @@ static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force
return size;
}
+static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port)
+{
+ dmaengine_terminate_async(stm32_port->tx_ch);
+ stm32_port->tx_dma_busy = false;
+}
+
+static bool stm32_usart_tx_dma_started(struct stm32_port *stm32_port)
+{
+ /*
+ * We cannot use the function "dmaengine_tx_status" to know the
+ * status of DMA. This function does not show if the "dma complete"
+ * callback of the DMA transaction has been called. So we prefer
+ * to use "tx_dma_busy" flag to prevent dual DMA transaction at the
+ * same time.
+ */
+ return stm32_port->tx_dma_busy;
+}
+
+static bool stm32_usart_tx_dma_enabled(struct stm32_port *stm32_port)
+{
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+
+ return !!(readl_relaxed(stm32_port->port.membase + ofs->cr3) & USART_CR3_DMAT);
+}
+
static void stm32_usart_tx_dma_complete(void *arg)
{
struct uart_port *port = arg;
@@ -372,9 +397,8 @@ static void stm32_usart_tx_dma_complete(void *arg)
const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
unsigned long flags;
- dmaengine_terminate_async(stm32port->tx_ch);
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
- stm32port->tx_dma_busy = false;
+ stm32_usart_tx_dma_terminate(stm32port);
/* Let's see if we have pending data to send */
spin_lock_irqsave(&port->lock, flags);
@@ -428,10 +452,8 @@ static void stm32_usart_transmit_chars_pio(struct uart_port *port)
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
struct circ_buf *xmit = &port->state->xmit;
- if (stm32_port->tx_dma_busy) {
+ if (stm32_usart_tx_dma_enabled(stm32_port))
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
- stm32_port->tx_dma_busy = false;
- }
while (!uart_circ_empty(xmit)) {
/* Check that TDR is empty before filling FIFO */
@@ -455,12 +477,13 @@ static void stm32_usart_transmit_chars_dma(struct uart_port *port)
const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
struct circ_buf *xmit = &port->state->xmit;
struct dma_async_tx_descriptor *desc = NULL;
- unsigned int count, i;
+ unsigned int count;
- if (stm32port->tx_dma_busy)
+ if (stm32_usart_tx_dma_started(stm32port)) {
+ if (!stm32_usart_tx_dma_enabled(stm32port))
+ stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
return;
-
- stm32port->tx_dma_busy = true;
+ }
count = uart_circ_chars_pending(xmit);
@@ -491,13 +514,21 @@ static void stm32_usart_transmit_chars_dma(struct uart_port *port)
if (!desc)
goto fallback_err;
+ /*
+ * Set "tx_dma_busy" flag. This flag will be released when
+ * dmaengine_terminate_async will be called. This flag helps
+ * transmit_chars_dma not to start another DMA transaction
+ * if the callback of the previous is not yet called.
+ */
+ stm32port->tx_dma_busy = true;
+
desc->callback = stm32_usart_tx_dma_complete;
desc->callback_param = port;
/* Push current DMA TX transaction in the pending queue */
if (dma_submit_error(dmaengine_submit(desc))) {
/* dma no yet started, safe to free resources */
- dmaengine_terminate_async(stm32port->tx_ch);
+ stm32_usart_tx_dma_terminate(stm32port);
goto fallback_err;
}
@@ -511,8 +542,7 @@ static void stm32_usart_transmit_chars_dma(struct uart_port *port)
return;
fallback_err:
- for (i = count; i > 0; i--)
- stm32_usart_transmit_chars_pio(port);
+ stm32_usart_transmit_chars_pio(port);
}
static void stm32_usart_transmit_chars(struct uart_port *port)
@@ -520,14 +550,27 @@ static void stm32_usart_transmit_chars(struct uart_port *port)
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
struct circ_buf *xmit = &port->state->xmit;
+ u32 isr;
+ int ret;
if (port->x_char) {
- if (stm32_port->tx_dma_busy)
+ if (stm32_usart_tx_dma_started(stm32_port) &&
+ stm32_usart_tx_dma_enabled(stm32_port))
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+
+ /* Check that TDR is empty before filling FIFO */
+ ret =
+ readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
+ isr,
+ (isr & USART_SR_TXE),
+ 10, 1000);
+ if (ret)
+ dev_warn(port->dev, "1 character may be erased\n");
+
writel_relaxed(port->x_char, port->membase + ofs->tdr);
port->x_char = 0;
port->icount.tx++;
- if (stm32_port->tx_dma_busy)
+ if (stm32_usart_tx_dma_started(stm32_port))
stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
return;
}
@@ -675,8 +718,11 @@ static void stm32_usart_stop_tx(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
struct serial_rs485 *rs485conf = &port->rs485;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
stm32_usart_tx_interrupt_disable(port);
+ if (stm32_usart_tx_dma_started(stm32_port) && stm32_usart_tx_dma_enabled(stm32_port))
+ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
if (rs485conf->flags & SER_RS485_ENABLED) {
if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
@@ -696,7 +742,7 @@ static void stm32_usart_start_tx(struct uart_port *port)
struct serial_rs485 *rs485conf = &port->rs485;
struct circ_buf *xmit = &port->state->xmit;
- if (uart_circ_empty(xmit))
+ if (uart_circ_empty(xmit) && !port->x_char)
return;
if (rs485conf->flags & SER_RS485_ENABLED) {
@@ -719,9 +765,8 @@ static void stm32_usart_flush_buffer(struct uart_port *port)
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
if (stm32_port->tx_ch) {
- dmaengine_terminate_async(stm32_port->tx_ch);
+ stm32_usart_tx_dma_terminate(stm32_port);
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
- stm32_port->tx_dma_busy = false;
}
}
@@ -883,6 +928,12 @@ static void stm32_usart_shutdown(struct uart_port *port)
u32 val, isr;
int ret;
+ if (stm32_usart_tx_dma_enabled(stm32_port))
+ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+
+ if (stm32_usart_tx_dma_started(stm32_port))
+ stm32_usart_tx_dma_terminate(stm32_port);
+
/* Disable modem control interrupts */
stm32_usart_disable_ms(port);
@@ -1419,8 +1470,6 @@ static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port,
struct dma_slave_config config;
int ret;
- stm32port->tx_dma_busy = false;
-
stm32port->tx_buf = dma_alloc_coherent(dev, TX_BUF_L,
&stm32port->tx_dma_buf,
GFP_KERNEL);
@@ -1570,7 +1619,6 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
writel_relaxed(cr3, port->membase + ofs->cr3);
if (stm32_port->tx_ch) {
- dmaengine_terminate_async(stm32_port->tx_ch);
stm32_usart_of_dma_tx_remove(stm32_port, pdev);
dma_release_channel(stm32_port->tx_ch);
}
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index e23916bfbb60..feab952aec16 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -264,7 +264,7 @@ struct stm32_port {
u32 cr1_irq; /* USART_CR1_RXNEIE or RTOIE */
u32 cr3_irq; /* USART_CR3_RXFTIE */
int last_res;
- bool tx_dma_busy; /* dma tx busy */
+ bool tx_dma_busy; /* dma tx transaction in progress */
bool throttled; /* port throttled */
bool hw_flow_control;
bool swap; /* swap RX & TX pins */
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 425a016f9db7..98b2f4fb9a99 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -127,7 +127,8 @@ static void serial_out(struct uart_sunsu_port *up, int offset, int value)
* gate outputs a logical one. Since we use level triggered interrupts
* we have lockup and watchdog reset. We cannot mask IRQ because
* keyboard shares IRQ with us (Word has it as Bob Smelik's design).
- * This problem is similar to what Alpha people suffer, see serial.c.
+ * This problem is similar to what Alpha people suffer, see
+ * 8250_alpha.c.
*/
if (offset == UART_MCR)
value |= UART_MCR_OUT2;
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index d3d9566e5dbd..e1fa52d31474 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -626,7 +626,7 @@ static struct uart_driver ulite_uart_driver = {
*
* Returns: 0 on success, <0 otherwise
*/
-static int ulite_assign(struct device *dev, int id, u32 base, int irq,
+static int ulite_assign(struct device *dev, int id, phys_addr_t base, int irq,
struct uartlite_data *pdata)
{
struct uart_port *port;
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index e15b2bf69904..9adfe3dc970f 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -621,21 +621,25 @@ static const struct of_device_id wmt_dt_ids[] = {
static int vt8500_serial_probe(struct platform_device *pdev)
{
struct vt8500_port *vt8500_port;
- struct resource *mmres, *irqres;
+ struct resource *mmres;
struct device_node *np = pdev->dev.of_node;
const unsigned int *flags;
int ret;
int port;
+ int irq;
flags = of_device_get_match_data(&pdev->dev);
if (!flags)
return -EINVAL;
mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!mmres || !irqres)
+ if (!mmres)
return -ENODEV;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
if (np) {
port = of_alias_get_id(np, "serial");
if (port >= VT8500_MAX_PORTS)
@@ -688,7 +692,7 @@ static int vt8500_serial_probe(struct platform_device *pdev)
vt8500_port->uart.type = PORT_VT8500;
vt8500_port->uart.iotype = UPIO_MEM;
vt8500_port->uart.mapbase = mmres->start;
- vt8500_port->uart.irq = irqres->start;
+ vt8500_port->uart.irq = irq;
vt8500_port->uart.fifosize = 16;
vt8500_port->uart.ops = &vt8500_uart_pops;
vt8500_port->uart.line = port;
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 6c7e65b1d9a1..646510476c30 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -39,20 +39,15 @@
#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
/**
- * tty_buffer_lock_exclusive - gain exclusive access to buffer
- * tty_buffer_unlock_exclusive - release exclusive access
+ * tty_buffer_lock_exclusive - gain exclusive access to buffer
+ * @port: tty port owning the flip buffer
*
- * @port: tty port owning the flip buffer
+ * Guarantees safe use of the &tty_ldisc_ops.receive_buf() method by excluding
+ * the buffer work and any pending flush from using the flip buffer. Data can
+ * continue to be added concurrently to the flip buffer from the driver side.
*
- * Guarantees safe use of the line discipline's receive_buf() method by
- * excluding the buffer work and any pending flush from using the flip
- * buffer. Data can continue to be added concurrently to the flip buffer
- * from the driver side.
- *
- * On release, the buffer work is restarted if there is data in the
- * flip buffer
+ * See also tty_buffer_unlock_exclusive().
*/
-
void tty_buffer_lock_exclusive(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
@@ -62,6 +57,14 @@ void tty_buffer_lock_exclusive(struct tty_port *port)
}
EXPORT_SYMBOL_GPL(tty_buffer_lock_exclusive);
+/**
+ * tty_buffer_unlock_exclusive - release exclusive access
+ * @port: tty port owning the flip buffer
+ *
+ * The buffer work is restarted if there is data in the flip buffer.
+ *
+ * See also tty_buffer_lock_exclusive().
+ */
void tty_buffer_unlock_exclusive(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
@@ -77,17 +80,16 @@ void tty_buffer_unlock_exclusive(struct tty_port *port)
EXPORT_SYMBOL_GPL(tty_buffer_unlock_exclusive);
/**
- * tty_buffer_space_avail - return unused buffer space
- * @port: tty port owning the flip buffer
+ * tty_buffer_space_avail - return unused buffer space
+ * @port: tty port owning the flip buffer
*
- * Returns the # of bytes which can be written by the driver without
- * reaching the buffer limit.
+ * Returns: the # of bytes which can be written by the driver without reaching
+ * the buffer limit.
*
- * Note: this does not guarantee that memory is available to write
- * the returned # of bytes (use tty_prepare_flip_string_xxx() to
- * pre-allocate if memory guarantee is required).
+ * Note: this does not guarantee that memory is available to write the returned
+ * # of bytes (use tty_prepare_flip_string() to pre-allocate if memory
+ * guarantee is required).
*/
-
unsigned int tty_buffer_space_avail(struct tty_port *port)
{
int space = port->buf.mem_limit - atomic_read(&port->buf.mem_used);
@@ -107,13 +109,12 @@ static void tty_buffer_reset(struct tty_buffer *p, size_t size)
}
/**
- * tty_buffer_free_all - free buffers used by a tty
- * @port: tty port to free from
+ * tty_buffer_free_all - free buffers used by a tty
+ * @port: tty port to free from
*
- * Remove all the buffers pending on a tty whether queued with data
- * or in the free ring. Must be called when the tty is no longer in use
+ * Remove all the buffers pending on a tty whether queued with data or in the
+ * free ring. Must be called when the tty is no longer in use.
*/
-
void tty_buffer_free_all(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
@@ -142,17 +143,17 @@ void tty_buffer_free_all(struct tty_port *port)
}
/**
- * tty_buffer_alloc - allocate a tty buffer
- * @port: tty port
- * @size: desired size (characters)
- *
- * Allocate a new tty buffer to hold the desired number of characters.
- * We round our buffers off in 256 character chunks to get better
- * allocation behaviour.
- * Return NULL if out of memory or the allocation would exceed the
- * per device queue
+ * tty_buffer_alloc - allocate a tty buffer
+ * @port: tty port
+ * @size: desired size (characters)
+ *
+ * Allocate a new tty buffer to hold the desired number of characters. We
+ * round our buffers off in 256 character chunks to get better allocation
+ * behaviour.
+ *
+ * Returns: %NULL if out of memory or the allocation would exceed the per
+ * device queue.
*/
-
static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size)
{
struct llist_node *free;
@@ -185,14 +186,13 @@ found:
}
/**
- * tty_buffer_free - free a tty buffer
- * @port: tty port owning the buffer
- * @b: the buffer to free
+ * tty_buffer_free - free a tty buffer
+ * @port: tty port owning the buffer
+ * @b: the buffer to free
*
- * Free a tty buffer, or add it to the free list according to our
- * internal strategy
+ * Free a tty buffer, or add it to the free list according to our internal
+ * strategy.
*/
-
static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b)
{
struct tty_bufhead *buf = &port->buf;
@@ -207,17 +207,15 @@ static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b)
}
/**
- * tty_buffer_flush - flush full tty buffers
- * @tty: tty to flush
- * @ld: optional ldisc ptr (must be referenced)
+ * tty_buffer_flush - flush full tty buffers
+ * @tty: tty to flush
+ * @ld: optional ldisc ptr (must be referenced)
*
- * flush all the buffers containing receive data. If ld != NULL,
- * flush the ldisc input buffer.
+ * Flush all the buffers containing receive data. If @ld != %NULL, flush the
+ * ldisc input buffer.
*
- * Locking: takes buffer lock to ensure single-threaded flip buffer
- * 'consumer'
+ * Locking: takes buffer lock to ensure single-threaded flip buffer 'consumer'.
*/
-
void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
{
struct tty_port *port = tty->port;
@@ -244,17 +242,18 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
}
/**
- * __tty_buffer_request_room - grow tty buffer if needed
- * @port: tty port
- * @size: size desired
- * @flags: buffer flags if new buffer allocated (default = 0)
+ * __tty_buffer_request_room - grow tty buffer if needed
+ * @port: tty port
+ * @size: size desired
+ * @flags: buffer flags if new buffer allocated (default = 0)
+ *
+ * Make at least @size bytes of linear space available for the tty buffer.
*
- * Make at least size bytes of linear space available for the tty
- * buffer. If we fail return the size we managed to find.
+ * Will change over to a new buffer if the current buffer is encoded as
+ * %TTY_NORMAL (so has no flags buffer) and the new buffer requires a flags
+ * buffer.
*
- * Will change over to a new buffer if the current buffer is encoded as
- * TTY_NORMAL (so has no flags buffer) and the new buffer requires
- * a flags buffer.
+ * Returns: the size we managed to find.
*/
static int __tty_buffer_request_room(struct tty_port *port, size_t size,
int flags)
@@ -300,16 +299,17 @@ int tty_buffer_request_room(struct tty_port *port, size_t size)
EXPORT_SYMBOL_GPL(tty_buffer_request_room);
/**
- * tty_insert_flip_string_fixed_flag - Add characters to the tty buffer
- * @port: tty port
- * @chars: characters
- * @flag: flag value for each character
- * @size: size
- *
- * Queue a series of bytes to the tty buffering. All the characters
- * passed are marked with the supplied flag. Returns the number added.
+ * tty_insert_flip_string_fixed_flag - add characters to the tty buffer
+ * @port: tty port
+ * @chars: characters
+ * @flag: flag value for each character
+ * @size: size
+ *
+ * Queue a series of bytes to the tty buffering. All the characters passed are
+ * marked with the supplied flag.
+ *
+ * Returns: the number added.
*/
-
int tty_insert_flip_string_fixed_flag(struct tty_port *port,
const unsigned char *chars, char flag, size_t size)
{
@@ -338,17 +338,17 @@ int tty_insert_flip_string_fixed_flag(struct tty_port *port,
EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag);
/**
- * tty_insert_flip_string_flags - Add characters to the tty buffer
- * @port: tty port
- * @chars: characters
- * @flags: flag bytes
- * @size: size
- *
- * Queue a series of bytes to the tty buffering. For each character
- * the flags array indicates the status of the character. Returns the
- * number added.
+ * tty_insert_flip_string_flags - add characters to the tty buffer
+ * @port: tty port
+ * @chars: characters
+ * @flags: flag bytes
+ * @size: size
+ *
+ * Queue a series of bytes to the tty buffering. For each character the flags
+ * array indicates the status of the character.
+ *
+ * Returns: the number added.
*/
-
int tty_insert_flip_string_flags(struct tty_port *port,
const unsigned char *chars, const char *flags, size_t size)
{
@@ -376,13 +376,13 @@ int tty_insert_flip_string_flags(struct tty_port *port,
EXPORT_SYMBOL(tty_insert_flip_string_flags);
/**
- * __tty_insert_flip_char - Add one character to the tty buffer
- * @port: tty port
- * @ch: character
- * @flag: flag byte
+ * __tty_insert_flip_char - add one character to the tty buffer
+ * @port: tty port
+ * @ch: character
+ * @flag: flag byte
*
- * Queue a single byte to the tty buffering, with an optional flag.
- * This is the slow path of tty_insert_flip_char.
+ * Queue a single byte @ch to the tty buffering, with an optional flag. This is
+ * the slow path of tty_insert_flip_char().
*/
int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag)
{
@@ -402,39 +402,19 @@ int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag)
EXPORT_SYMBOL(__tty_insert_flip_char);
/**
- * tty_schedule_flip - push characters to ldisc
- * @port: tty port to push from
+ * tty_prepare_flip_string - make room for characters
+ * @port: tty port
+ * @chars: return pointer for character write area
+ * @size: desired size
*
- * Takes any pending buffers and transfers their ownership to the
- * ldisc side of the queue. It then schedules those characters for
- * processing by the line discipline.
- */
-
-void tty_schedule_flip(struct tty_port *port)
-{
- struct tty_bufhead *buf = &port->buf;
-
- /* paired w/ acquire in flush_to_ldisc(); ensures
- * flush_to_ldisc() sees buffer data.
- */
- smp_store_release(&buf->tail->commit, buf->tail->used);
- queue_work(system_unbound_wq, &buf->work);
-}
-EXPORT_SYMBOL(tty_schedule_flip);
-
-/**
- * tty_prepare_flip_string - make room for characters
- * @port: tty port
- * @chars: return pointer for character write area
- * @size: desired size
- *
- * Prepare a block of space in the buffer for data. Returns the length
- * available and buffer pointer to the space which is now allocated and
- * accounted for as ready for normal characters. This is used for drivers
- * that need their own block copy routines into the buffer. There is no
- * guarantee the buffer is a DMA target!
+ * Prepare a block of space in the buffer for data.
+ *
+ * This is used for drivers that need their own block copy routines into the
+ * buffer. There is no guarantee the buffer is a DMA target!
+ *
+ * Returns: the length available and buffer pointer (@chars) to the space which
+ * is now allocated and accounted for as ready for normal characters.
*/
-
int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
size_t size)
{
@@ -453,16 +433,16 @@ int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
/**
- * tty_ldisc_receive_buf - forward data to line discipline
- * @ld: line discipline to process input
- * @p: char buffer
- * @f: TTY_* flags buffer
- * @count: number of bytes to process
+ * tty_ldisc_receive_buf - forward data to line discipline
+ * @ld: line discipline to process input
+ * @p: char buffer
+ * @f: %TTY_NORMAL, %TTY_BREAK, etc. flags buffer
+ * @count: number of bytes to process
*
- * Callers other than flush_to_ldisc() need to exclude the kworker
- * from concurrent use of the line discipline, see paste_selection().
+ * Callers other than flush_to_ldisc() need to exclude the kworker from
+ * concurrent use of the line discipline, see paste_selection().
*
- * Returns the number of bytes processed
+ * Returns: the number of bytes processed.
*/
int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
const char *f, int count)
@@ -495,18 +475,16 @@ receive_buf(struct tty_port *port, struct tty_buffer *head, int count)
}
/**
- * flush_to_ldisc
- * @work: tty structure passed from work queue.
+ * flush_to_ldisc - flush data from buffer to ldisc
+ * @work: tty structure passed from work queue.
*
- * This routine is called out of the software interrupt to flush data
- * from the buffer chain to the line discipline.
+ * This routine is called out of the software interrupt to flush data from the
+ * buffer chain to the line discipline.
*
- * The receive_buf method is single threaded for each tty instance.
+ * The receive_buf() method is single threaded for each tty instance.
*
- * Locking: takes buffer lock to ensure single-threaded flip buffer
- * 'consumer'
+ * Locking: takes buffer lock to ensure single-threaded flip buffer 'consumer'.
*/
-
static void flush_to_ldisc(struct work_struct *work)
{
struct tty_port *port = container_of(work, struct tty_port, buf.work);
@@ -554,30 +532,35 @@ static void flush_to_ldisc(struct work_struct *work)
}
/**
- * tty_flip_buffer_push - terminal
- * @port: tty port to push
+ * tty_flip_buffer_push - push terminal buffers
+ * @port: tty port to push
*
- * Queue a push of the terminal flip buffers to the line discipline.
- * Can be called from IRQ/atomic context.
+ * Queue a push of the terminal flip buffers to the line discipline. Can be
+ * called from IRQ/atomic context.
*
- * In the event of the queue being busy for flipping the work will be
- * held off and retried later.
+ * In the event of the queue being busy for flipping the work will be held off
+ * and retried later.
*/
-
void tty_flip_buffer_push(struct tty_port *port)
{
- tty_schedule_flip(port);
+ struct tty_bufhead *buf = &port->buf;
+
+ /*
+ * Paired w/ acquire in flush_to_ldisc(); ensures flush_to_ldisc() sees
+ * buffer data.
+ */
+ smp_store_release(&buf->tail->commit, buf->tail->used);
+ queue_work(system_unbound_wq, &buf->work);
}
EXPORT_SYMBOL(tty_flip_buffer_push);
/**
- * tty_buffer_init - prepare a tty buffer structure
- * @port: tty port to initialise
+ * tty_buffer_init - prepare a tty buffer structure
+ * @port: tty port to initialise
*
- * Set up the initial state of the buffer management for a tty device.
- * Must be called before the other tty buffer functions are used.
+ * Set up the initial state of the buffer management for a tty device. Must be
+ * called before the other tty buffer functions are used.
*/
-
void tty_buffer_init(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
@@ -594,14 +577,14 @@ void tty_buffer_init(struct tty_port *port)
}
/**
- * tty_buffer_set_limit - change the tty buffer memory limit
- * @port: tty port to change
- * @limit: memory limit to set
+ * tty_buffer_set_limit - change the tty buffer memory limit
+ * @port: tty port to change
+ * @limit: memory limit to set
*
- * Change the tty buffer memory limit.
- * Must be called before the other tty buffer functions are used.
+ * Change the tty buffer memory limit.
+ *
+ * Must be called before the other tty buffer functions are used.
*/
-
int tty_buffer_set_limit(struct tty_port *port, int limit)
{
if (limit < MIN_TTYB_SIZE)
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 6616d4a0d41d..7e8b3bd59c7b 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -158,19 +158,18 @@ static int tty_fasync(int fd, struct file *filp, int on);
static void release_tty(struct tty_struct *tty, int idx);
/**
- * free_tty_struct - free a disused tty
- * @tty: tty struct to free
+ * free_tty_struct - free a disused tty
+ * @tty: tty struct to free
*
- * Free the write buffers, tty queue and tty memory itself.
+ * Free the write buffers, tty queue and tty memory itself.
*
- * Locking: none. Must be called after tty is definitely unused
+ * Locking: none. Must be called after tty is definitely unused
*/
-
static void free_tty_struct(struct tty_struct *tty)
{
tty_ldisc_deinit(tty);
put_device(tty->dev);
- kfree(tty->write_buf);
+ kvfree(tty->write_buf);
tty->magic = 0xDEADDEAD;
kfree(tty);
}
@@ -206,8 +205,9 @@ void tty_add_file(struct tty_struct *tty, struct file *file)
spin_unlock(&tty->files_lock);
}
-/*
+/**
* tty_free_file - free file->private_data
+ * @file: to free private_data of
*
* This shall be used only for fail path handling when tty_add_file was not
* called yet.
@@ -233,15 +233,14 @@ static void tty_del_file(struct file *file)
}
/**
- * tty_name - return tty naming
- * @tty: tty structure
+ * tty_name - return tty naming
+ * @tty: tty structure
*
- * Convert a tty structure into a name. The name reflects the kernel
- * naming policy and if udev is in use may not reflect user space
+ * Convert a tty structure into a name. The name reflects the kernel naming
+ * policy and if udev is in use may not reflect user space
*
- * Locking: none
+ * Locking: none
*/
-
const char *tty_name(const struct tty_struct *tty)
{
if (!tty) /* Hmm. NULL pointer. That's fun. */
@@ -303,16 +302,15 @@ static int check_tty_count(struct tty_struct *tty, const char *routine)
}
/**
- * get_tty_driver - find device of a tty
- * @device: device identifier
- * @index: returns the index of the tty
+ * get_tty_driver - find device of a tty
+ * @device: device identifier
+ * @index: returns the index of the tty
*
- * This routine returns a tty driver structure, given a device number
- * and also passes back the index number.
+ * This routine returns a tty driver structure, given a device number and also
+ * passes back the index number.
*
- * Locking: caller must hold tty_mutex
+ * Locking: caller must hold tty_mutex
*/
-
static struct tty_driver *get_tty_driver(dev_t device, int *index)
{
struct tty_driver *p;
@@ -329,17 +327,17 @@ static struct tty_driver *get_tty_driver(dev_t device, int *index)
}
/**
- * tty_dev_name_to_number - return dev_t for device name
- * @name: user space name of device under /dev
- * @number: pointer to dev_t that this function will populate
+ * tty_dev_name_to_number - return dev_t for device name
+ * @name: user space name of device under /dev
+ * @number: pointer to dev_t that this function will populate
*
- * This function converts device names like ttyS0 or ttyUSB1 into dev_t
- * like (4, 64) or (188, 1). If no corresponding driver is registered then
- * the function returns -ENODEV.
+ * This function converts device names like ttyS0 or ttyUSB1 into dev_t like
+ * (4, 64) or (188, 1). If no corresponding driver is registered then the
+ * function returns -%ENODEV.
*
- * Locking: this acquires tty_mutex to protect the tty_drivers list from
- * being modified while we are traversing it, and makes sure to
- * release it before exiting.
+ * Locking: this acquires tty_mutex to protect the tty_drivers list from
+ * being modified while we are traversing it, and makes sure to
+ * release it before exiting.
*/
int tty_dev_name_to_number(const char *name, dev_t *number)
{
@@ -381,13 +379,12 @@ EXPORT_SYMBOL_GPL(tty_dev_name_to_number);
#ifdef CONFIG_CONSOLE_POLL
/**
- * tty_find_polling_driver - find device of a polled tty
- * @name: name string to match
- * @line: pointer to resulting tty line nr
+ * tty_find_polling_driver - find device of a polled tty
+ * @name: name string to match
+ * @line: pointer to resulting tty line nr
*
- * This routine returns a tty driver structure, given a name
- * and the condition that the tty driver is capable of polled
- * operation.
+ * This routine returns a tty driver structure, given a name and the condition
+ * that the tty driver is capable of polled operation.
*/
struct tty_driver *tty_find_polling_driver(char *name, int *line)
{
@@ -515,14 +512,13 @@ static DEFINE_SPINLOCK(redirect_lock);
static struct file *redirect;
/**
- * tty_wakeup - request more data
- * @tty: terminal
+ * tty_wakeup - request more data
+ * @tty: terminal
*
- * Internal and external helper for wakeups of tty. This function
- * informs the line discipline if present that the driver is ready
- * to receive more output data.
+ * Internal and external helper for wakeups of tty. This function informs the
+ * line discipline if present that the driver is ready to receive more output
+ * data.
*/
-
void tty_wakeup(struct tty_struct *tty)
{
struct tty_ldisc *ld;
@@ -540,11 +536,11 @@ void tty_wakeup(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_wakeup);
/**
- * tty_release_redirect - Release a redirect on a pty if present
- * @tty: tty device
+ * tty_release_redirect - Release a redirect on a pty if present
+ * @tty: tty device
*
- * This is available to the pty code so if the master closes, if the
- * slave is a redirect it can release the redirect.
+ * This is available to the pty code so if the master closes, if the slave is a
+ * redirect it can release the redirect.
*/
static struct file *tty_release_redirect(struct tty_struct *tty)
{
@@ -561,27 +557,29 @@ static struct file *tty_release_redirect(struct tty_struct *tty)
}
/**
- * __tty_hangup - actual handler for hangup events
- * @tty: tty device
- * @exit_session: if non-zero, signal all foreground group processes
+ * __tty_hangup - actual handler for hangup events
+ * @tty: tty device
+ * @exit_session: if non-zero, signal all foreground group processes
+ *
+ * This can be called by a "kworker" kernel thread. That is process synchronous
+ * but doesn't hold any locks, so we need to make sure we have the appropriate
+ * locks for what we're doing.
+ *
+ * The hangup event clears any pending redirections onto the hung up device. It
+ * ensures future writes will error and it does the needed line discipline
+ * hangup and signal delivery. The tty object itself remains intact.
+ *
+ * Locking:
+ * * BTM
*
- * This can be called by a "kworker" kernel thread. That is process
- * synchronous but doesn't hold any locks, so we need to make sure we
- * have the appropriate locks for what we're doing.
+ * * redirect lock for undoing redirection
+ * * file list lock for manipulating list of ttys
+ * * tty_ldiscs_lock from called functions
+ * * termios_rwsem resetting termios data
+ * * tasklist_lock to walk task list for hangup event
*
- * The hangup event clears any pending redirections onto the hung up
- * device. It ensures future writes will error and it does the needed
- * line discipline hangup and signal delivery. The tty object itself
- * remains intact.
+ * * ->siglock to protect ->signal/->sighand
*
- * Locking:
- * BTM
- * redirect lock for undoing redirection
- * file list lock for manipulating list of ttys
- * tty_ldiscs_lock from called functions
- * termios_rwsem resetting termios data
- * tasklist_lock to walk task list for hangup event
- * ->siglock to protect ->signal/->sighand
*/
static void __tty_hangup(struct tty_struct *tty, int exit_session)
{
@@ -682,13 +680,12 @@ static void do_tty_hangup(struct work_struct *work)
}
/**
- * tty_hangup - trigger a hangup event
- * @tty: tty to hangup
+ * tty_hangup - trigger a hangup event
+ * @tty: tty to hangup
*
- * A carrier loss (virtual or otherwise) has occurred on this like
- * schedule a hangup sequence to run after this event.
+ * A carrier loss (virtual or otherwise) has occurred on @tty. Schedule a
+ * hangup sequence to run after this event.
*/
-
void tty_hangup(struct tty_struct *tty)
{
tty_debug_hangup(tty, "hangup\n");
@@ -697,14 +694,13 @@ void tty_hangup(struct tty_struct *tty)
EXPORT_SYMBOL(tty_hangup);
/**
- * tty_vhangup - process vhangup
- * @tty: tty to hangup
+ * tty_vhangup - process vhangup
+ * @tty: tty to hangup
*
- * The user has asked via system call for the terminal to be hung up.
- * We do this synchronously so that when the syscall returns the process
- * is complete. That guarantee is necessary for security reasons.
+ * The user has asked via system call for the terminal to be hung up. We do
+ * this synchronously so that when the syscall returns the process is complete.
+ * That guarantee is necessary for security reasons.
*/
-
void tty_vhangup(struct tty_struct *tty)
{
tty_debug_hangup(tty, "vhangup\n");
@@ -714,11 +710,10 @@ EXPORT_SYMBOL(tty_vhangup);
/**
- * tty_vhangup_self - process vhangup for own ctty
+ * tty_vhangup_self - process vhangup for own ctty
*
- * Perform a vhangup on the current controlling tty
+ * Perform a vhangup on the current controlling tty
*/
-
void tty_vhangup_self(void)
{
struct tty_struct *tty;
@@ -731,16 +726,15 @@ void tty_vhangup_self(void)
}
/**
- * tty_vhangup_session - hangup session leader exit
- * @tty: tty to hangup
+ * tty_vhangup_session - hangup session leader exit
+ * @tty: tty to hangup
*
- * The session leader is exiting and hanging up its controlling terminal.
- * Every process in the foreground process group is signalled SIGHUP.
+ * The session leader is exiting and hanging up its controlling terminal.
+ * Every process in the foreground process group is signalled %SIGHUP.
*
- * We do this synchronously so that when the syscall returns the process
- * is complete. That guarantee is necessary for security reasons.
+ * We do this synchronously so that when the syscall returns the process is
+ * complete. That guarantee is necessary for security reasons.
*/
-
void tty_vhangup_session(struct tty_struct *tty)
{
tty_debug_hangup(tty, "session hangup\n");
@@ -748,13 +742,11 @@ void tty_vhangup_session(struct tty_struct *tty)
}
/**
- * tty_hung_up_p - was tty hung up
- * @filp: file pointer of tty
+ * tty_hung_up_p - was tty hung up
+ * @filp: file pointer of tty
*
- * Return true if the tty has been subject to a vhangup or a carrier
- * loss
+ * Return: true if the tty has been subject to a vhangup or a carrier loss
*/
-
int tty_hung_up_p(struct file *filp)
{
return (filp && filp->f_op == &hung_up_tty_fops);
@@ -771,20 +763,18 @@ void __stop_tty(struct tty_struct *tty)
}
/**
- * stop_tty - propagate flow control
- * @tty: tty to stop
+ * stop_tty - propagate flow control
+ * @tty: tty to stop
*
- * Perform flow control to the driver. May be called
- * on an already stopped device and will not re-call the driver
- * method.
+ * Perform flow control to the driver. May be called on an already stopped
+ * device and will not re-call the &tty_driver->stop() method.
*
- * This functionality is used by both the line disciplines for
- * halting incoming flow and by the driver. It may therefore be
- * called from any context, may be under the tty atomic_write_lock
- * but not always.
+ * This functionality is used by both the line disciplines for halting incoming
+ * flow and by the driver. It may therefore be called from any context, may be
+ * under the tty %atomic_write_lock but not always.
*
- * Locking:
- * flow.lock
+ * Locking:
+ * flow.lock
*/
void stop_tty(struct tty_struct *tty)
{
@@ -807,15 +797,15 @@ void __start_tty(struct tty_struct *tty)
}
/**
- * start_tty - propagate flow control
- * @tty: tty to start
+ * start_tty - propagate flow control
+ * @tty: tty to start
*
- * Start a tty that has been stopped if at all possible. If this
- * tty was previous stopped and is now being started, the driver
- * start method is invoked and the line discipline woken.
+ * Start a tty that has been stopped if at all possible. If @tty was previously
+ * stopped and is now being started, the &tty_driver->start() method is invoked
+ * and the line discipline woken.
*
- * Locking:
- * flow.lock
+ * Locking:
+ * flow.lock
*/
void start_tty(struct tty_struct *tty)
{
@@ -908,18 +898,17 @@ static int iterate_tty_read(struct tty_ldisc *ld, struct tty_struct *tty,
/**
- * tty_read - read method for tty device files
- * @iocb: kernel I/O control block
- * @to: destination for the data read
+ * tty_read - read method for tty device files
+ * @iocb: kernel I/O control block
+ * @to: destination for the data read
*
- * Perform the read system call function on this terminal device. Checks
- * for hung up devices before calling the line discipline method.
+ * Perform the read system call function on this terminal device. Checks
+ * for hung up devices before calling the line discipline method.
*
- * Locking:
- * Locks the line discipline internally while needed. Multiple
- * read calls may be outstanding in parallel.
+ * Locking:
+ * Locks the line discipline internally while needed. Multiple read calls
+ * may be outstanding in parallel.
*/
-
static ssize_t tty_read(struct kiocb *iocb, struct iov_iter *to)
{
int i;
@@ -997,9 +986,6 @@ static inline ssize_t do_tty_write(
* layer has problems with bigger chunks. It will
* claim to be able to handle more characters than
* it actually does.
- *
- * FIXME: This can probably go away now except that 64K chunks
- * are too likely to fail unless switched to vmalloc...
*/
chunk = 2048;
if (test_bit(TTY_NO_WRITE_SPLIT, &tty->flags))
@@ -1014,12 +1000,12 @@ static inline ssize_t do_tty_write(
if (chunk < 1024)
chunk = 1024;
- buf_chunk = kmalloc(chunk, GFP_KERNEL);
+ buf_chunk = kvmalloc(chunk, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!buf_chunk) {
ret = -ENOMEM;
goto out;
}
- kfree(tty->write_buf);
+ kvfree(tty->write_buf);
tty->write_cnt = chunk;
tty->write_buf = buf_chunk;
}
@@ -1069,13 +1055,12 @@ out:
* @tty: the destination tty_struct
* @msg: the message to write
*
- * This is used for messages that need to be redirected to a specific tty.
- * We don't put it into the syslog queue right now maybe in the future if
- * really needed.
+ * This is used for messages that need to be redirected to a specific tty. We
+ * don't put it into the syslog queue right now maybe in the future if really
+ * needed.
*
* We must still hold the BTM and test the CLOSING flag for the moment.
*/
-
void tty_write_message(struct tty_struct *tty, char *msg)
{
if (tty) {
@@ -1113,18 +1098,18 @@ static ssize_t file_tty_write(struct file *file, struct kiocb *iocb, struct iov_
}
/**
- * tty_write - write method for tty device file
- * @iocb: kernel I/O control block
- * @from: iov_iter with data to write
+ * tty_write - write method for tty device file
+ * @iocb: kernel I/O control block
+ * @from: iov_iter with data to write
*
- * Write data to a tty device via the line discipline.
+ * Write data to a tty device via the line discipline.
*
- * Locking:
- * Locks the line discipline as required
- * Writes to the tty driver are serialized by the atomic_write_lock
- * and are then processed in chunks to the device. The line
- * discipline write method will not be invoked in parallel for
- * each device.
+ * Locking:
+ * Locks the line discipline as required
+ * Writes to the tty driver are serialized by the atomic_write_lock
+ * and are then processed in chunks to the device. The line
+ * discipline write method will not be invoked in parallel for
+ * each device.
*/
static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from)
{
@@ -1154,14 +1139,15 @@ ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
return tty_write(iocb, iter);
}
-/*
- * tty_send_xchar - send priority character
+/**
+ * tty_send_xchar - send priority character
+ * @tty: the tty to send to
+ * @ch: xchar to send
*
- * Send a high priority character to the tty even if stopped
+ * Send a high priority character to the tty even if stopped.
*
- * Locking: none for xchar method, write ordering for write method.
+ * Locking: none for xchar method, write ordering for write method.
*/
-
int tty_send_xchar(struct tty_struct *tty, char ch)
{
bool was_stopped = tty->flow.stopped;
@@ -1188,15 +1174,15 @@ int tty_send_xchar(struct tty_struct *tty, char ch)
}
/**
- * pty_line_name - generate name for a pty
- * @driver: the tty driver in use
- * @index: the minor number
- * @p: output buffer of at least 6 bytes
+ * pty_line_name - generate name for a pty
+ * @driver: the tty driver in use
+ * @index: the minor number
+ * @p: output buffer of at least 6 bytes
*
- * Generate a name from a driver reference and write it to the output
- * buffer.
+ * Generate a name from a @driver reference and write it to the output buffer
+ * @p.
*
- * Locking: None
+ * Locking: None
*/
static void pty_line_name(struct tty_driver *driver, int index, char *p)
{
@@ -1209,15 +1195,15 @@ static void pty_line_name(struct tty_driver *driver, int index, char *p)
}
/**
- * tty_line_name - generate name for a tty
- * @driver: the tty driver in use
- * @index: the minor number
- * @p: output buffer of at least 7 bytes
+ * tty_line_name - generate name for a tty
+ * @driver: the tty driver in use
+ * @index: the minor number
+ * @p: output buffer of at least 7 bytes
*
- * Generate a name from a driver reference and write it to the output
- * buffer.
+ * Generate a name from a @driver reference and write it to the output buffer
+ * @p.
*
- * Locking: None
+ * Locking: None
*/
static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
{
@@ -1229,15 +1215,15 @@ static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
}
/**
- * tty_driver_lookup_tty() - find an existing tty, if any
- * @driver: the driver for the tty
- * @file: file object
- * @idx: the minor number
+ * tty_driver_lookup_tty() - find an existing tty, if any
+ * @driver: the driver for the tty
+ * @file: file object
+ * @idx: the minor number
*
- * Return the tty, if found. If not found, return NULL or ERR_PTR() if the
- * driver lookup() method returns an error.
+ * Return: the tty, if found. If not found, return %NULL or ERR_PTR() if the
+ * driver lookup() method returns an error.
*
- * Locking: tty_mutex must be held. If the tty is found, bump the tty kref.
+ * Locking: tty_mutex must be held. If the tty is found, bump the tty kref.
*/
static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
struct file *file, int idx)
@@ -1258,13 +1244,12 @@ static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
}
/**
- * tty_init_termios - helper for termios setup
- * @tty: the tty to set up
+ * tty_init_termios - helper for termios setup
+ * @tty: the tty to set up
*
- * Initialise the termios structure for this tty. This runs under
- * the tty_mutex currently so we can be relaxed about ordering.
+ * Initialise the termios structure for this tty. This runs under the
+ * %tty_mutex currently so we can be relaxed about ordering.
*/
-
void tty_init_termios(struct tty_struct *tty)
{
struct ktermios *tp;
@@ -1287,6 +1272,14 @@ void tty_init_termios(struct tty_struct *tty)
}
EXPORT_SYMBOL_GPL(tty_init_termios);
+/**
+ * tty_standard_install - usual tty->ops->install
+ * @driver: the driver for the tty
+ * @tty: the tty
+ *
+ * If the @driver overrides @tty->ops->install, it still can call this function
+ * to perform the standard install operations.
+ */
int tty_standard_install(struct tty_driver *driver, struct tty_struct *tty)
{
tty_init_termios(tty);
@@ -1298,16 +1291,15 @@ int tty_standard_install(struct tty_driver *driver, struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_standard_install);
/**
- * tty_driver_install_tty() - install a tty entry in the driver
- * @driver: the driver for the tty
- * @tty: the tty
+ * tty_driver_install_tty() - install a tty entry in the driver
+ * @driver: the driver for the tty
+ * @tty: the tty
*
- * Install a tty object into the driver tables. The tty->index field
- * will be set by the time this is called. This method is responsible
- * for ensuring any need additional structures are allocated and
- * configured.
+ * Install a tty object into the driver tables. The @tty->index field will be
+ * set by the time this is called. This method is responsible for ensuring any
+ * need additional structures are allocated and configured.
*
- * Locking: tty_mutex for now
+ * Locking: tty_mutex for now
*/
static int tty_driver_install_tty(struct tty_driver *driver,
struct tty_struct *tty)
@@ -1317,14 +1309,14 @@ static int tty_driver_install_tty(struct tty_driver *driver,
}
/**
- * tty_driver_remove_tty() - remove a tty from the driver tables
- * @driver: the driver for the tty
- * @tty: tty to remove
+ * tty_driver_remove_tty() - remove a tty from the driver tables
+ * @driver: the driver for the tty
+ * @tty: tty to remove
*
- * Remvoe a tty object from the driver tables. The tty->index field
- * will be set by the time this is called.
+ * Remove a tty object from the driver tables. The tty->index field will be set
+ * by the time this is called.
*
- * Locking: tty_mutex for now
+ * Locking: tty_mutex for now
*/
static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *tty)
{
@@ -1335,13 +1327,13 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
}
/**
- * tty_reopen() - fast re-open of an open tty
- * @tty: the tty to open
+ * tty_reopen() - fast re-open of an open tty
+ * @tty: the tty to open
*
- * Return 0 on success, -errno on error.
- * Re-opens on master ptys are not allowed and return -EIO.
+ * Re-opens on master ptys are not allowed and return -%EIO.
*
- * Locking: Caller must hold tty_lock
+ * Locking: Caller must hold tty_lock
+ * Return: 0 on success, -errno on error.
*/
static int tty_reopen(struct tty_struct *tty)
{
@@ -1379,30 +1371,28 @@ static int tty_reopen(struct tty_struct *tty)
}
/**
- * tty_init_dev - initialise a tty device
- * @driver: tty driver we are opening a device on
- * @idx: device index
+ * tty_init_dev - initialise a tty device
+ * @driver: tty driver we are opening a device on
+ * @idx: device index
*
- * Prepare a tty device. This may not be a "new" clean device but
- * could also be an active device. The pty drivers require special
- * handling because of this.
+ * Prepare a tty device. This may not be a "new" clean device but could also be
+ * an active device. The pty drivers require special handling because of this.
*
- * Locking:
- * The function is called under the tty_mutex, which
- * protects us from the tty struct or driver itself going away.
+ * Locking:
+ * The function is called under the tty_mutex, which protects us from the
+ * tty struct or driver itself going away.
*
- * On exit the tty device has the line discipline attached and
- * a reference count of 1. If a pair was created for pty/tty use
- * and the other was a pty master then it too has a reference count of 1.
+ * On exit the tty device has the line discipline attached and a reference
+ * count of 1. If a pair was created for pty/tty use and the other was a pty
+ * master then it too has a reference count of 1.
*
- * WSH 06/09/97: Rewritten to remove races and properly clean up after a
- * failed open. The new code protects the open with a mutex, so it's
- * really quite straightforward. The mutex locking can probably be
- * relaxed for the (most common) case of reopening a tty.
+ * WSH 06/09/97: Rewritten to remove races and properly clean up after a failed
+ * open. The new code protects the open with a mutex, so it's really quite
+ * straightforward. The mutex locking can probably be relaxed for the (most
+ * common) case of reopening a tty.
*
- * Return: returned tty structure
+ * Return: new tty structure
*/
-
struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
{
struct tty_struct *tty;
@@ -1503,10 +1493,10 @@ void tty_save_termios(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_save_termios);
/**
- * tty_flush_works - flush all works of a tty/pty pair
- * @tty: tty device to flush works for (or either end of a pty pair)
+ * tty_flush_works - flush all works of a tty/pty pair
+ * @tty: tty device to flush works for (or either end of a pty pair)
*
- * Sync flush all works belonging to @tty (and the 'other' tty).
+ * Sync flush all works belonging to @tty (and the 'other' tty).
*/
static void tty_flush_works(struct tty_struct *tty)
{
@@ -1519,19 +1509,19 @@ static void tty_flush_works(struct tty_struct *tty)
}
/**
- * release_one_tty - release tty structure memory
- * @work: work of tty we are obliterating
+ * release_one_tty - release tty structure memory
+ * @work: work of tty we are obliterating
*
- * Releases memory associated with a tty structure, and clears out the
- * driver table slots. This function is called when a device is no longer
- * in use. It also gets called when setup of a device fails.
+ * Releases memory associated with a tty structure, and clears out the
+ * driver table slots. This function is called when a device is no longer
+ * in use. It also gets called when setup of a device fails.
*
- * Locking:
- * takes the file list lock internally when working on the list
- * of ttys that the driver keeps.
+ * Locking:
+ * takes the file list lock internally when working on the list of ttys
+ * that the driver keeps.
*
- * This method gets called from a work queue so that the driver private
- * cleanup ops can sleep (needed for USB at least)
+ * This method gets called from a work queue so that the driver private
+ * cleanup ops can sleep (needed for USB at least)
*/
static void release_one_tty(struct work_struct *work)
{
@@ -1568,13 +1558,12 @@ static void queue_release_one_tty(struct kref *kref)
}
/**
- * tty_kref_put - release a tty kref
- * @tty: tty device
+ * tty_kref_put - release a tty kref
+ * @tty: tty device
*
- * Release a reference to a tty device and if need be let the kref
- * layer destruct the object for us
+ * Release a reference to the @tty device and if need be let the kref layer
+ * destruct the object for us.
*/
-
void tty_kref_put(struct tty_struct *tty)
{
if (tty)
@@ -1583,18 +1572,17 @@ void tty_kref_put(struct tty_struct *tty)
EXPORT_SYMBOL(tty_kref_put);
/**
- * release_tty - release tty structure memory
- * @tty: tty device release
- * @idx: index of the tty device release
+ * release_tty - release tty structure memory
+ * @tty: tty device release
+ * @idx: index of the tty device release
*
- * Release both @tty and a possible linked partner (think pty pair),
- * and decrement the refcount of the backing module.
- *
- * Locking:
- * tty_mutex
- * takes the file list lock internally when working on the list
- * of ttys that the driver keeps.
+ * Release both @tty and a possible linked partner (think pty pair),
+ * and decrement the refcount of the backing module.
*
+ * Locking:
+ * tty_mutex
+ * takes the file list lock internally when working on the list of ttys
+ * that the driver keeps.
*/
static void release_tty(struct tty_struct *tty, int idx)
{
@@ -1619,12 +1607,12 @@ static void release_tty(struct tty_struct *tty, int idx)
}
/**
- * tty_release_checks - check a tty before real release
- * @tty: tty to check
- * @idx: index of the tty
+ * tty_release_checks - check a tty before real release
+ * @tty: tty to check
+ * @idx: index of the tty
*
- * Performs some paranoid checking before true release of the @tty.
- * This is a no-op unless TTY_PARANOIA_CHECK is defined.
+ * Performs some paranoid checking before true release of the @tty. This is a
+ * no-op unless %TTY_PARANOIA_CHECK is defined.
*/
static int tty_release_checks(struct tty_struct *tty, int idx)
{
@@ -1661,12 +1649,12 @@ static int tty_release_checks(struct tty_struct *tty, int idx)
}
/**
- * tty_kclose - closes tty opened by tty_kopen
- * @tty: tty device
+ * tty_kclose - closes tty opened by tty_kopen
+ * @tty: tty device
*
- * Performs the final steps to release and free a tty device. It is the
- * same as tty_release_struct except that it also resets TTY_PORT_KOPENED
- * flag on tty->port.
+ * Performs the final steps to release and free a tty device. It is the same as
+ * tty_release_struct() except that it also resets %TTY_PORT_KOPENED flag on
+ * @tty->port.
*/
void tty_kclose(struct tty_struct *tty)
{
@@ -1691,12 +1679,12 @@ void tty_kclose(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_kclose);
/**
- * tty_release_struct - release a tty struct
- * @tty: tty device
- * @idx: index of the tty
+ * tty_release_struct - release a tty struct
+ * @tty: tty device
+ * @idx: index of the tty
*
- * Performs the final steps to release and free a tty device. It is
- * roughly the reverse of tty_init_dev.
+ * Performs the final steps to release and free a tty device. It is roughly the
+ * reverse of tty_init_dev().
*/
void tty_release_struct(struct tty_struct *tty, int idx)
{
@@ -1720,24 +1708,23 @@ void tty_release_struct(struct tty_struct *tty, int idx)
EXPORT_SYMBOL_GPL(tty_release_struct);
/**
- * tty_release - vfs callback for close
- * @inode: inode of tty
- * @filp: file pointer for handle to tty
+ * tty_release - vfs callback for close
+ * @inode: inode of tty
+ * @filp: file pointer for handle to tty
*
- * Called the last time each file handle is closed that references
- * this tty. There may however be several such references.
+ * Called the last time each file handle is closed that references this tty.
+ * There may however be several such references.
*
- * Locking:
- * Takes bkl. See tty_release_dev
+ * Locking:
+ * Takes BKL. See tty_release_dev().
*
- * Even releasing the tty structures is a tricky business.. We have
- * to be very careful that the structures are all released at the
- * same time, as interrupts might otherwise get the wrong pointers.
+ * Even releasing the tty structures is a tricky business. We have to be very
+ * careful that the structures are all released at the same time, as interrupts
+ * might otherwise get the wrong pointers.
*
* WSH 09/09/97: rewritten to avoid some nasty race conditions that could
* lead to double frees or releasing memory still in use.
*/
-
int tty_release(struct inode *inode, struct file *filp)
{
struct tty_struct *tty = file_tty(filp);
@@ -1880,15 +1867,15 @@ int tty_release(struct inode *inode, struct file *filp)
}
/**
- * tty_open_current_tty - get locked tty of current task
- * @device: device number
- * @filp: file pointer to tty
- * @return: locked tty of the current task iff @device is /dev/tty
+ * tty_open_current_tty - get locked tty of current task
+ * @device: device number
+ * @filp: file pointer to tty
+ * @return: locked tty of the current task iff @device is /dev/tty
*
- * Performs a re-open of the current task's controlling tty.
+ * Performs a re-open of the current task's controlling tty.
*
- * We cannot return driver and index like for the other nodes because
- * devpts will not work then. It expects inodes to be from devpts FS.
+ * We cannot return driver and index like for the other nodes because devpts
+ * will not work then. It expects inodes to be from devpts FS.
*/
static struct tty_struct *tty_open_current_tty(dev_t device, struct file *filp)
{
@@ -1916,16 +1903,17 @@ static struct tty_struct *tty_open_current_tty(dev_t device, struct file *filp)
}
/**
- * tty_lookup_driver - lookup a tty driver for a given device file
- * @device: device number
- * @filp: file pointer to tty
- * @index: index for the device in the @return driver
- * @return: driver for this inode (with increased refcount)
+ * tty_lookup_driver - lookup a tty driver for a given device file
+ * @device: device number
+ * @filp: file pointer to tty
+ * @index: index for the device in the @return driver
+ *
+ * If returned value is not erroneous, the caller is responsible to decrement
+ * the refcount by tty_driver_kref_put().
*
- * If @return is not erroneous, the caller is responsible to decrement the
- * refcount by tty_driver_kref_put.
+ * Locking: %tty_mutex protects get_tty_driver()
*
- * Locking: tty_mutex protects get_tty_driver
+ * Return: driver for this inode (with increased refcount)
*/
static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
int *index)
@@ -2001,19 +1989,18 @@ out:
}
/**
- * tty_kopen_exclusive - open a tty device for kernel
- * @device: dev_t of device to open
+ * tty_kopen_exclusive - open a tty device for kernel
+ * @device: dev_t of device to open
*
- * Opens tty exclusively for kernel. Performs the driver lookup,
- * makes sure it's not already opened and performs the first-time
- * tty initialization.
+ * Opens tty exclusively for kernel. Performs the driver lookup, makes sure
+ * it's not already opened and performs the first-time tty initialization.
*
- * Returns the locked initialized &tty_struct
+ * Claims the global %tty_mutex to serialize:
+ * * concurrent first-time tty initialization
+ * * concurrent tty driver removal w/ lookup
+ * * concurrent tty removal from driver table
*
- * Claims the global tty_mutex to serialize:
- * - concurrent first-time tty initialization
- * - concurrent tty driver removal w/ lookup
- * - concurrent tty removal from driver table
+ * Return: the locked initialized &tty_struct
*/
struct tty_struct *tty_kopen_exclusive(dev_t device)
{
@@ -2022,13 +2009,13 @@ struct tty_struct *tty_kopen_exclusive(dev_t device)
EXPORT_SYMBOL_GPL(tty_kopen_exclusive);
/**
- * tty_kopen_shared - open a tty device for shared in-kernel use
- * @device: dev_t of device to open
+ * tty_kopen_shared - open a tty device for shared in-kernel use
+ * @device: dev_t of device to open
*
- * Opens an already existing tty for in-kernel use. Compared to
- * tty_kopen_exclusive() above it doesn't ensure to be the only user.
+ * Opens an already existing tty for in-kernel use. Compared to
+ * tty_kopen_exclusive() above it doesn't ensure to be the only user.
*
- * Locking is identical to tty_kopen() above.
+ * Locking: identical to tty_kopen() above.
*/
struct tty_struct *tty_kopen_shared(dev_t device)
{
@@ -2037,19 +2024,20 @@ struct tty_struct *tty_kopen_shared(dev_t device)
EXPORT_SYMBOL_GPL(tty_kopen_shared);
/**
- * tty_open_by_driver - open a tty device
- * @device: dev_t of device to open
- * @filp: file pointer to tty
+ * tty_open_by_driver - open a tty device
+ * @device: dev_t of device to open
+ * @filp: file pointer to tty
*
- * Performs the driver lookup, checks for a reopen, or otherwise
- * performs the first-time tty initialization.
+ * Performs the driver lookup, checks for a reopen, or otherwise performs the
+ * first-time tty initialization.
*
- * Returns the locked initialized or re-opened &tty_struct
*
- * Claims the global tty_mutex to serialize:
- * - concurrent first-time tty initialization
- * - concurrent tty driver removal w/ lookup
- * - concurrent tty removal from driver table
+ * Claims the global tty_mutex to serialize:
+ * * concurrent first-time tty initialization
+ * * concurrent tty driver removal w/ lookup
+ * * concurrent tty removal from driver table
+ *
+ * Return: the locked initialized or re-opened &tty_struct
*/
static struct tty_struct *tty_open_by_driver(dev_t device,
struct file *filp)
@@ -2104,29 +2092,28 @@ out:
}
/**
- * tty_open - open a tty device
- * @inode: inode of device file
- * @filp: file pointer to tty
+ * tty_open - open a tty device
+ * @inode: inode of device file
+ * @filp: file pointer to tty
*
- * tty_open and tty_release keep up the tty count that contains the
- * number of opens done on a tty. We cannot use the inode-count, as
- * different inodes might point to the same tty.
+ * tty_open() and tty_release() keep up the tty count that contains the number
+ * of opens done on a tty. We cannot use the inode-count, as different inodes
+ * might point to the same tty.
*
- * Open-counting is needed for pty masters, as well as for keeping
- * track of serial lines: DTR is dropped when the last close happens.
- * (This is not done solely through tty->count, now. - Ted 1/27/92)
+ * Open-counting is needed for pty masters, as well as for keeping track of
+ * serial lines: DTR is dropped when the last close happens.
+ * (This is not done solely through tty->count, now. - Ted 1/27/92)
*
- * The termios state of a pty is reset on first open so that
- * settings don't persist across reuse.
+ * The termios state of a pty is reset on the first open so that settings don't
+ * persist across reuse.
*
- * Locking: tty_mutex protects tty, tty_lookup_driver and tty_init_dev.
- * tty->count should protect the rest.
- * ->siglock protects ->signal/->sighand
+ * Locking:
+ * * %tty_mutex protects tty, tty_lookup_driver() and tty_init_dev().
+ * * @tty->count should protect the rest.
+ * * ->siglock protects ->signal/->sighand
*
- * Note: the tty_unlock/lock cases without a ref are only safe due to
- * tty_mutex
+ * Note: the tty_unlock/lock cases without a ref are only safe due to %tty_mutex
*/
-
static int tty_open(struct inode *inode, struct file *filp)
{
struct tty_struct *tty;
@@ -2198,19 +2185,17 @@ retry_open:
}
-
/**
- * tty_poll - check tty status
- * @filp: file being polled
- * @wait: poll wait structures to update
+ * tty_poll - check tty status
+ * @filp: file being polled
+ * @wait: poll wait structures to update
*
- * Call the line discipline polling method to obtain the poll
- * status of the device.
+ * Call the line discipline polling method to obtain the poll status of the
+ * device.
*
- * Locking: locks called line discipline but ldisc poll method
- * may be re-entered freely by other callers.
+ * Locking: locks called line discipline but ldisc poll method may be
+ * re-entered freely by other callers.
*/
-
static __poll_t tty_poll(struct file *filp, poll_table *wait)
{
struct tty_struct *tty = file_tty(filp);
@@ -2278,20 +2263,18 @@ static int tty_fasync(int fd, struct file *filp, int on)
}
/**
- * tiocsti - fake input character
- * @tty: tty to fake input into
- * @p: pointer to character
+ * tiocsti - fake input character
+ * @tty: tty to fake input into
+ * @p: pointer to character
*
- * Fake input to a tty device. Does the necessary locking and
- * input management.
+ * Fake input to a tty device. Does the necessary locking and input management.
*
- * FIXME: does not honour flow control ??
+ * FIXME: does not honour flow control ??
*
- * Locking:
- * Called functions take tty_ldiscs_lock
- * current->signal->tty check is safe without locks
+ * Locking:
+ * * Called functions take tty_ldiscs_lock
+ * * current->signal->tty check is safe without locks
*/
-
static int tiocsti(struct tty_struct *tty, char __user *p)
{
char ch, mbz = 0;
@@ -2314,16 +2297,15 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
}
/**
- * tiocgwinsz - implement window query ioctl
- * @tty: tty
- * @arg: user buffer for result
+ * tiocgwinsz - implement window query ioctl
+ * @tty: tty
+ * @arg: user buffer for result
*
- * Copies the kernel idea of the window size into the user buffer.
+ * Copies the kernel idea of the window size into the user buffer.
*
- * Locking: tty->winsize_mutex is taken to ensure the winsize data
- * is consistent.
+ * Locking: @tty->winsize_mutex is taken to ensure the winsize data is
+ * consistent.
*/
-
static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg)
{
int err;
@@ -2336,14 +2318,13 @@ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg)
}
/**
- * tty_do_resize - resize event
- * @tty: tty being resized
- * @ws: new dimensions
+ * tty_do_resize - resize event
+ * @tty: tty being resized
+ * @ws: new dimensions
*
- * Update the termios variables and send the necessary signals to
- * peform a terminal resize correctly
+ * Update the termios variables and send the necessary signals to peform a
+ * terminal resize correctly.
*/
-
int tty_do_resize(struct tty_struct *tty, struct winsize *ws)
{
struct pid *pgrp;
@@ -2367,20 +2348,19 @@ done:
EXPORT_SYMBOL(tty_do_resize);
/**
- * tiocswinsz - implement window size set ioctl
- * @tty: tty side of tty
- * @arg: user buffer for result
+ * tiocswinsz - implement window size set ioctl
+ * @tty: tty side of tty
+ * @arg: user buffer for result
*
- * Copies the user idea of the window size to the kernel. Traditionally
- * this is just advisory information but for the Linux console it
- * actually has driver level meaning and triggers a VC resize.
+ * Copies the user idea of the window size to the kernel. Traditionally this is
+ * just advisory information but for the Linux console it actually has driver
+ * level meaning and triggers a VC resize.
*
- * Locking:
- * Driver dependent. The default do_resize method takes the
- * tty termios mutex and ctrl.lock. The console takes its own lock
- * then calls into the default method.
+ * Locking:
+ * Driver dependent. The default do_resize method takes the tty termios
+ * mutex and ctrl.lock. The console takes its own lock then calls into the
+ * default method.
*/
-
static int tiocswinsz(struct tty_struct *tty, struct winsize __user *arg)
{
struct winsize tmp_ws;
@@ -2395,14 +2375,13 @@ static int tiocswinsz(struct tty_struct *tty, struct winsize __user *arg)
}
/**
- * tioccons - allow admin to move logical console
- * @file: the file to become console
+ * tioccons - allow admin to move logical console
+ * @file: the file to become console
*
- * Allow the administrator to move the redirected console device
+ * Allow the administrator to move the redirected console device.
*
- * Locking: uses redirect_lock to guard the redirect information
+ * Locking: uses redirect_lock to guard the redirect information
*/
-
static int tioccons(struct file *file)
{
if (!capable(CAP_SYS_ADMIN))
@@ -2435,15 +2414,14 @@ static int tioccons(struct file *file)
}
/**
- * tiocsetd - set line discipline
- * @tty: tty device
- * @p: pointer to user data
+ * tiocsetd - set line discipline
+ * @tty: tty device
+ * @p: pointer to user data
*
- * Set the line discipline according to user request.
+ * Set the line discipline according to user request.
*
- * Locking: see tty_set_ldisc, this function is just a helper
+ * Locking: see tty_set_ldisc(), this function is just a helper
*/
-
static int tiocsetd(struct tty_struct *tty, int __user *p)
{
int disc;
@@ -2458,16 +2436,15 @@ static int tiocsetd(struct tty_struct *tty, int __user *p)
}
/**
- * tiocgetd - get line discipline
- * @tty: tty device
- * @p: pointer to user data
+ * tiocgetd - get line discipline
+ * @tty: tty device
+ * @p: pointer to user data
*
- * Retrieves the line discipline id directly from the ldisc.
+ * Retrieves the line discipline id directly from the ldisc.
*
- * Locking: waits for ldisc reference (in case the line discipline
- * is changing or the tty is being hungup)
+ * Locking: waits for ldisc reference (in case the line discipline is changing
+ * or the @tty is being hungup)
*/
-
static int tiocgetd(struct tty_struct *tty, int __user *p)
{
struct tty_ldisc *ld;
@@ -2482,18 +2459,16 @@ static int tiocgetd(struct tty_struct *tty, int __user *p)
}
/**
- * send_break - performed time break
- * @tty: device to break on
- * @duration: timeout in mS
- *
- * Perform a timed break on hardware that lacks its own driver level
- * timed break functionality.
+ * send_break - performed time break
+ * @tty: device to break on
+ * @duration: timeout in mS
*
- * Locking:
- * atomic_write_lock serializes
+ * Perform a timed break on hardware that lacks its own driver level timed
+ * break functionality.
*
+ * Locking:
+ * @tty->atomic_write_lock serializes
*/
-
static int send_break(struct tty_struct *tty, unsigned int duration)
{
int retval;
@@ -2522,16 +2497,15 @@ out:
}
/**
- * tty_tiocmget - get modem status
- * @tty: tty device
- * @p: pointer to result
+ * tty_tiocmget - get modem status
+ * @tty: tty device
+ * @p: pointer to result
*
- * Obtain the modem status bits from the tty driver if the feature
- * is supported. Return -ENOTTY if it is not available.
+ * Obtain the modem status bits from the tty driver if the feature is
+ * supported. Return -%ENOTTY if it is not available.
*
- * Locking: none (up to the driver)
+ * Locking: none (up to the driver)
*/
-
static int tty_tiocmget(struct tty_struct *tty, int __user *p)
{
int retval = -ENOTTY;
@@ -2546,17 +2520,16 @@ static int tty_tiocmget(struct tty_struct *tty, int __user *p)
}
/**
- * tty_tiocmset - set modem status
- * @tty: tty device
- * @cmd: command - clear bits, set bits or set all
- * @p: pointer to desired bits
+ * tty_tiocmset - set modem status
+ * @tty: tty device
+ * @cmd: command - clear bits, set bits or set all
+ * @p: pointer to desired bits
*
- * Set the modem status bits from the tty driver if the feature
- * is supported. Return -ENOTTY if it is not available.
+ * Set the modem status bits from the tty driver if the feature
+ * is supported. Return -%ENOTTY if it is not available.
*
- * Locking: none (up to the driver)
+ * Locking: none (up to the driver)
*/
-
static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd,
unsigned __user *p)
{
@@ -2588,13 +2561,13 @@ static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd,
}
/**
- * tty_get_icount - get tty statistics
- * @tty: tty device
- * @icount: output parameter
+ * tty_get_icount - get tty statistics
+ * @tty: tty device
+ * @icount: output parameter
*
- * Gets a copy of the tty's icount statistics.
+ * Gets a copy of the @tty's icount statistics.
*
- * Locking: none (up to the driver)
+ * Locking: none (up to the driver)
*/
int tty_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount)
@@ -2811,7 +2784,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return hung_up_tty_ioctl(file, cmd, arg);
retval = -EINVAL;
if (ld->ops->ioctl) {
- retval = ld->ops->ioctl(tty, file, cmd, arg);
+ retval = ld->ops->ioctl(tty, cmd, arg);
if (retval == -ENOIOCTLCMD)
retval = -ENOTTY;
}
@@ -2990,10 +2963,10 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
if (!ld)
return hung_up_tty_compat_ioctl(file, cmd, arg);
if (ld->ops->compat_ioctl)
- retval = ld->ops->compat_ioctl(tty, file, cmd, arg);
+ retval = ld->ops->compat_ioctl(tty, cmd, arg);
if (retval == -ENOIOCTLCMD && ld->ops->ioctl)
- retval = ld->ops->ioctl(tty, file,
- (unsigned long)compat_ptr(cmd), arg);
+ retval = ld->ops->ioctl(tty, (unsigned long)compat_ptr(cmd),
+ arg);
tty_ldisc_deref(ld);
return retval;
@@ -3028,17 +3001,11 @@ static int this_tty(const void *t, struct file *file, unsigned fd)
*/
void __do_SAK(struct tty_struct *tty)
{
-#ifdef TTY_SOFT_SAK
- tty_hangup(tty);
-#else
struct task_struct *g, *p;
struct pid *session;
- int i;
+ int i;
unsigned long flags;
- if (!tty)
- return;
-
spin_lock_irqsave(&tty->ctrl.lock, flags);
session = get_pid(tty->ctrl.session);
spin_unlock_irqrestore(&tty->ctrl.lock, flags);
@@ -3060,7 +3027,8 @@ void __do_SAK(struct tty_struct *tty)
if (p->signal->tty == tty) {
tty_notice(tty, "SAK: killed process %d (%s): by controlling tty\n",
task_pid_nr(p), p->comm);
- group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID);
+ group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p,
+ PIDTYPE_SID);
continue;
}
task_lock(p);
@@ -3068,13 +3036,13 @@ void __do_SAK(struct tty_struct *tty)
if (i != 0) {
tty_notice(tty, "SAK: killed process %d (%s): by fd#%d\n",
task_pid_nr(p), p->comm, i - 1);
- group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID);
+ group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p,
+ PIDTYPE_SID);
}
task_unlock(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
put_pid(session);
-#endif
}
static void do_SAK_work(struct work_struct *work)
@@ -3107,14 +3075,15 @@ static struct device *tty_get_device(struct tty_struct *tty)
}
-/*
- * alloc_tty_struct
+/**
+ * alloc_tty_struct - allocate a new tty
+ * @driver: driver which will handle the returned tty
+ * @idx: minor of the tty
*
- * This subroutine allocates and initializes a tty structure.
+ * This subroutine allocates and initializes a tty structure.
*
- * Locking: none - tty in question is not exposed at this point
+ * Locking: none - @tty in question is not exposed at this point
*/
-
struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
{
struct tty_struct *tty;
@@ -3156,17 +3125,18 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
}
/**
- * tty_put_char - write one character to a tty
- * @tty: tty
- * @ch: character
+ * tty_put_char - write one character to a tty
+ * @tty: tty
+ * @ch: character to write
+ *
+ * Write one byte to the @tty using the provided @tty->ops->put_char() method
+ * if present.
*
- * Write one byte to the tty using the provided put_char method
- * if present. Returns the number of characters successfully output.
+ * Note: the specific put_char operation in the driver layer may go
+ * away soon. Don't call it directly, use this method
*
- * Note: the specific put_char operation in the driver layer may go
- * away soon. Don't call it directly, use this method
+ * Return: the number of characters successfully output.
*/
-
int tty_put_char(struct tty_struct *tty, unsigned char ch)
{
if (tty->ops->put_char)
@@ -3195,24 +3165,23 @@ static int tty_cdev_add(struct tty_driver *driver, dev_t dev,
}
/**
- * tty_register_device - register a tty device
- * @driver: the tty driver that describes the tty device
- * @index: the index in the tty driver for this tty device
- * @device: a struct device that is associated with this tty device.
- * This field is optional, if there is no known struct device
- * for this tty device it can be set to NULL safely.
+ * tty_register_device - register a tty device
+ * @driver: the tty driver that describes the tty device
+ * @index: the index in the tty driver for this tty device
+ * @device: a struct device that is associated with this tty device.
+ * This field is optional, if there is no known struct device
+ * for this tty device it can be set to NULL safely.
*
- * Returns a pointer to the struct device for this tty device
- * (or ERR_PTR(-EFOO) on error).
+ * This call is required to be made to register an individual tty device
+ * if the tty driver's flags have the %TTY_DRIVER_DYNAMIC_DEV bit set. If
+ * that bit is not set, this function should not be called by a tty
+ * driver.
*
- * This call is required to be made to register an individual tty device
- * if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set. If
- * that bit is not set, this function should not be called by a tty
- * driver.
+ * Locking: ??
*
- * Locking: ??
+ * Return: A pointer to the struct device for this tty device (or
+ * ERR_PTR(-EFOO) on error).
*/
-
struct device *tty_register_device(struct tty_driver *driver, unsigned index,
struct device *device)
{
@@ -3227,24 +3196,23 @@ static void tty_device_create_release(struct device *dev)
}
/**
- * tty_register_device_attr - register a tty device
- * @driver: the tty driver that describes the tty device
- * @index: the index in the tty driver for this tty device
- * @device: a struct device that is associated with this tty device.
- * This field is optional, if there is no known struct device
- * for this tty device it can be set to NULL safely.
- * @drvdata: Driver data to be set to device.
- * @attr_grp: Attribute group to be set on device.
+ * tty_register_device_attr - register a tty device
+ * @driver: the tty driver that describes the tty device
+ * @index: the index in the tty driver for this tty device
+ * @device: a struct device that is associated with this tty device.
+ * This field is optional, if there is no known struct device
+ * for this tty device it can be set to %NULL safely.
+ * @drvdata: Driver data to be set to device.
+ * @attr_grp: Attribute group to be set on device.
*
- * Returns a pointer to the struct device for this tty device
- * (or ERR_PTR(-EFOO) on error).
+ * This call is required to be made to register an individual tty device if the
+ * tty driver's flags have the %TTY_DRIVER_DYNAMIC_DEV bit set. If that bit is
+ * not set, this function should not be called by a tty driver.
*
- * This call is required to be made to register an individual tty device
- * if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set. If
- * that bit is not set, this function should not be called by a tty
- * driver.
+ * Locking: ??
*
- * Locking: ??
+ * Return: A pointer to the struct device for this tty device (or
+ * ERR_PTR(-EFOO) on error).
*/
struct device *tty_register_device_attr(struct tty_driver *driver,
unsigned index, struct device *device,
@@ -3317,16 +3285,15 @@ err_put:
EXPORT_SYMBOL_GPL(tty_register_device_attr);
/**
- * tty_unregister_device - unregister a tty device
- * @driver: the tty driver that describes the tty device
- * @index: the index in the tty driver for this tty device
+ * tty_unregister_device - unregister a tty device
+ * @driver: the tty driver that describes the tty device
+ * @index: the index in the tty driver for this tty device
*
- * If a tty device is registered with a call to tty_register_device() then
- * this function must be called when the tty device is gone.
+ * If a tty device is registered with a call to tty_register_device() then
+ * this function must be called when the tty device is gone.
*
- * Locking: ??
+ * Locking: ??
*/
-
void tty_unregister_device(struct tty_driver *driver, unsigned index)
{
device_destroy(tty_class,
@@ -3342,10 +3309,10 @@ EXPORT_SYMBOL(tty_unregister_device);
* __tty_alloc_driver -- allocate tty driver
* @lines: count of lines this driver can handle at most
* @owner: module which is responsible for this driver
- * @flags: some of TTY_DRIVER_* flags, will be set in driver->flags
+ * @flags: some of %TTY_DRIVER_ flags, will be set in driver->flags
*
* This should not be called directly, some of the provided macros should be
- * used instead. Use IS_ERR and friends on @retval.
+ * used instead. Use IS_ERR() and friends on @retval.
*/
struct tty_driver *__tty_alloc_driver(unsigned int lines, struct module *owner,
unsigned long flags)
@@ -3432,13 +3399,22 @@ static void destruct_tty_driver(struct kref *kref)
kfree(driver);
}
+/**
+ * tty_driver_kref_put -- drop a reference to a tty driver
+ * @driver: driver of which to drop the reference
+ *
+ * The final put will destroy and free up the driver.
+ */
void tty_driver_kref_put(struct tty_driver *driver)
{
kref_put(&driver->kref, destruct_tty_driver);
}
EXPORT_SYMBOL(tty_driver_kref_put);
-/*
+/**
+ * tty_register_driver -- register a tty driver
+ * @driver: driver to register
+ *
* Called by a tty driver to register itself.
*/
int tty_register_driver(struct tty_driver *driver)
@@ -3500,7 +3476,10 @@ err:
}
EXPORT_SYMBOL(tty_register_driver);
-/*
+/**
+ * tty_unregister_driver -- unregister a tty driver
+ * @driver: driver to unregister
+ *
* Called by a tty driver to unregister itself.
*/
void tty_unregister_driver(struct tty_driver *driver)
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 3e4e0b20b4bb..776d8a62f77c 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -47,17 +47,14 @@ static DEFINE_RAW_SPINLOCK(tty_ldiscs_lock);
static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS];
/**
- * tty_register_ldisc - install a line discipline
- * @new_ldisc: pointer to the ldisc object
+ * tty_register_ldisc - install a line discipline
+ * @new_ldisc: pointer to the ldisc object
*
- * Installs a new line discipline into the kernel. The discipline
- * is set up as unreferenced and then made available to the kernel
- * from this point onwards.
+ * Installs a new line discipline into the kernel. The discipline is set up as
+ * unreferenced and then made available to the kernel from this point onwards.
*
- * Locking:
- * takes tty_ldiscs_lock to guard against ldisc races
+ * Locking: takes %tty_ldiscs_lock to guard against ldisc races
*/
-
int tty_register_ldisc(struct tty_ldisc_ops *new_ldisc)
{
unsigned long flags;
@@ -75,14 +72,13 @@ int tty_register_ldisc(struct tty_ldisc_ops *new_ldisc)
EXPORT_SYMBOL(tty_register_ldisc);
/**
- * tty_unregister_ldisc - unload a line discipline
- * @ldisc: ldisc number
+ * tty_unregister_ldisc - unload a line discipline
+ * @ldisc: ldisc number
*
- * Remove a line discipline from the kernel providing it is not
- * currently in use.
+ * Remove a line discipline from the kernel providing it is not currently in
+ * use.
*
- * Locking:
- * takes tty_ldiscs_lock to guard against ldisc races
+ * Locking: takes %tty_ldiscs_lock to guard against ldisc races
*/
void tty_unregister_ldisc(struct tty_ldisc_ops *ldisc)
@@ -122,27 +118,25 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
}
static int tty_ldisc_autoload = IS_BUILTIN(CONFIG_LDISC_AUTOLOAD);
+
/**
- * tty_ldisc_get - take a reference to an ldisc
- * @tty: tty device
- * @disc: ldisc number
- *
- * Takes a reference to a line discipline. Deals with refcounts and
- * module locking counts.
- *
- * Returns: -EINVAL if the discipline index is not [N_TTY..NR_LDISCS] or
- * if the discipline is not registered
- * -EAGAIN if request_module() failed to load or register the
- * discipline
- * -ENOMEM if allocation failure
- *
- * Otherwise, returns a pointer to the discipline and bumps the
- * ref count
- *
- * Locking:
- * takes tty_ldiscs_lock to guard against ldisc races
+ * tty_ldisc_get - take a reference to an ldisc
+ * @tty: tty device
+ * @disc: ldisc number
+ *
+ * Takes a reference to a line discipline. Deals with refcounts and module
+ * locking counts. If the discipline is not available, its module loaded, if
+ * possible.
+ *
+ * Returns:
+ * * -%EINVAL if the discipline index is not [%N_TTY .. %NR_LDISCS] or if the
+ * discipline is not registered
+ * * -%EAGAIN if request_module() failed to load or register the discipline
+ * * -%ENOMEM if allocation failure
+ * * Otherwise, returns a pointer to the discipline and bumps the ref count
+ *
+ * Locking: takes %tty_ldiscs_lock to guard against ldisc races
*/
-
static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
{
struct tty_ldisc *ld;
@@ -176,10 +170,11 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
return ld;
}
-/*
- * tty_ldisc_put - release the ldisc
+/**
+ * tty_ldisc_put - release the ldisc
+ * @ld: lisdsc to release
*
- * Complement of tty_ldisc_get().
+ * Complement of tty_ldisc_get().
*/
static void tty_ldisc_put(struct tty_ldisc *ld)
{
@@ -226,25 +221,22 @@ const struct seq_operations tty_ldiscs_seq_ops = {
};
/**
- * tty_ldisc_ref_wait - wait for the tty ldisc
- * @tty: tty device
+ * tty_ldisc_ref_wait - wait for the tty ldisc
+ * @tty: tty device
*
- * Dereference the line discipline for the terminal and take a
- * reference to it. If the line discipline is in flux then
- * wait patiently until it changes.
+ * Dereference the line discipline for the terminal and take a reference to it.
+ * If the line discipline is in flux then wait patiently until it changes.
*
- * Returns: NULL if the tty has been hungup and not re-opened with
- * a new file descriptor, otherwise valid ldisc reference
+ * Returns: %NULL if the tty has been hungup and not re-opened with a new file
+ * descriptor, otherwise valid ldisc reference
*
- * Note 1: Must not be called from an IRQ/timer context. The caller
- * must also be careful not to hold other locks that will deadlock
- * against a discipline change, such as an existing ldisc reference
- * (which we check for)
+ * Note 1: Must not be called from an IRQ/timer context. The caller must also
+ * be careful not to hold other locks that will deadlock against a discipline
+ * change, such as an existing ldisc reference (which we check for).
*
- * Note 2: a file_operations routine (read/poll/write) should use this
- * function to wait for any ldisc lifetime events to finish.
+ * Note 2: a file_operations routine (read/poll/write) should use this function
+ * to wait for any ldisc lifetime events to finish.
*/
-
struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
{
struct tty_ldisc *ld;
@@ -258,14 +250,13 @@ struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
/**
- * tty_ldisc_ref - get the tty ldisc
- * @tty: tty device
+ * tty_ldisc_ref - get the tty ldisc
+ * @tty: tty device
*
- * Dereference the line discipline for the terminal and take a
- * reference to it. If the line discipline is in flux then
- * return NULL. Can be called from IRQ and timer functions.
+ * Dereference the line discipline for the terminal and take a reference to it.
+ * If the line discipline is in flux then return %NULL. Can be called from IRQ
+ * and timer functions.
*/
-
struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty)
{
struct tty_ldisc *ld = NULL;
@@ -280,13 +271,12 @@ struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_ldisc_ref);
/**
- * tty_ldisc_deref - free a tty ldisc reference
- * @ld: reference to free up
+ * tty_ldisc_deref - free a tty ldisc reference
+ * @ld: reference to free up
*
- * Undoes the effect of tty_ldisc_ref or tty_ldisc_ref_wait. May
- * be called in IRQ context.
+ * Undoes the effect of tty_ldisc_ref() or tty_ldisc_ref_wait(). May be called
+ * in IRQ context.
*/
-
void tty_ldisc_deref(struct tty_ldisc *ld)
{
ldsem_up_read(&ld->tty->ldisc_sem);
@@ -386,13 +376,12 @@ static void tty_ldisc_unlock_pair(struct tty_struct *tty,
}
/**
- * tty_ldisc_flush - flush line discipline queue
- * @tty: tty
+ * tty_ldisc_flush - flush line discipline queue
+ * @tty: tty to flush ldisc for
*
- * Flush the line discipline queue (if any) and the tty flip buffers
- * for this tty.
+ * Flush the line discipline queue (if any) and the tty flip buffers for this
+ * @tty.
*/
-
void tty_ldisc_flush(struct tty_struct *tty)
{
struct tty_ldisc *ld = tty_ldisc_ref(tty);
@@ -404,21 +393,18 @@ void tty_ldisc_flush(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_ldisc_flush);
/**
- * tty_set_termios_ldisc - set ldisc field
- * @tty: tty structure
- * @disc: line discipline number
+ * tty_set_termios_ldisc - set ldisc field
+ * @tty: tty structure
+ * @disc: line discipline number
*
- * This is probably overkill for real world processors but
- * they are not on hot paths so a little discipline won't do
- * any harm.
+ * This is probably overkill for real world processors but they are not on hot
+ * paths so a little discipline won't do any harm.
*
- * The line discipline-related tty_struct fields are reset to
- * prevent the ldisc driver from re-using stale information for
- * the new ldisc instance.
+ * The line discipline-related tty_struct fields are reset to prevent the ldisc
+ * driver from re-using stale information for the new ldisc instance.
*
- * Locking: takes termios_rwsem
+ * Locking: takes termios_rwsem
*/
-
static void tty_set_termios_ldisc(struct tty_struct *tty, int disc)
{
down_write(&tty->termios_rwsem);
@@ -430,16 +416,14 @@ static void tty_set_termios_ldisc(struct tty_struct *tty, int disc)
}
/**
- * tty_ldisc_open - open a line discipline
- * @tty: tty we are opening the ldisc on
- * @ld: discipline to open
+ * tty_ldisc_open - open a line discipline
+ * @tty: tty we are opening the ldisc on
+ * @ld: discipline to open
*
- * A helper opening method. Also a convenient debugging and check
- * point.
+ * A helper opening method. Also a convenient debugging and check point.
*
- * Locking: always called with BTM already held.
+ * Locking: always called with BTM already held.
*/
-
static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
{
WARN_ON(test_and_set_bit(TTY_LDISC_OPEN, &tty->flags));
@@ -457,14 +441,12 @@ static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
}
/**
- * tty_ldisc_close - close a line discipline
- * @tty: tty we are opening the ldisc on
- * @ld: discipline to close
+ * tty_ldisc_close - close a line discipline
+ * @tty: tty we are opening the ldisc on
+ * @ld: discipline to close
*
- * A helper close method. Also a convenient debugging and check
- * point.
+ * A helper close method. Also a convenient debugging and check point.
*/
-
static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
{
lockdep_assert_held_write(&tty->ldisc_sem);
@@ -476,14 +458,13 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
}
/**
- * tty_ldisc_failto - helper for ldisc failback
- * @tty: tty to open the ldisc on
- * @ld: ldisc we are trying to fail back to
+ * tty_ldisc_failto - helper for ldisc failback
+ * @tty: tty to open the ldisc on
+ * @ld: ldisc we are trying to fail back to
*
- * Helper to try and recover a tty when switching back to the old
- * ldisc fails and we need something attached.
+ * Helper to try and recover a tty when switching back to the old ldisc fails
+ * and we need something attached.
*/
-
static int tty_ldisc_failto(struct tty_struct *tty, int ld)
{
struct tty_ldisc *disc = tty_ldisc_get(tty, ld);
@@ -501,14 +482,13 @@ static int tty_ldisc_failto(struct tty_struct *tty, int ld)
}
/**
- * tty_ldisc_restore - helper for tty ldisc change
- * @tty: tty to recover
- * @old: previous ldisc
+ * tty_ldisc_restore - helper for tty ldisc change
+ * @tty: tty to recover
+ * @old: previous ldisc
*
- * Restore the previous line discipline or N_TTY when a line discipline
- * change fails due to an open error
+ * Restore the previous line discipline or %N_TTY when a line discipline change
+ * fails due to an open error
*/
-
static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
{
/* There is an outstanding reference here so this is safe */
@@ -528,16 +508,15 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
}
/**
- * tty_set_ldisc - set line discipline
- * @tty: the terminal to set
- * @disc: the line discipline number
- *
- * Set the discipline of a tty line. Must be called from a process
- * context. The ldisc change logic has to protect itself against any
- * overlapping ldisc change (including on the other end of pty pairs),
- * the close of one side of a tty/pty pair, and eventually hangup.
+ * tty_set_ldisc - set line discipline
+ * @tty: the terminal to set
+ * @disc: the line discipline number
+ *
+ * Set the discipline of a tty line. Must be called from a process context. The
+ * ldisc change logic has to protect itself against any overlapping ldisc
+ * change (including on the other end of pty pairs), the close of one side of a
+ * tty/pty pair, and eventually hangup.
*/
-
int tty_set_ldisc(struct tty_struct *tty, int disc)
{
int retval;
@@ -613,10 +592,10 @@ err:
EXPORT_SYMBOL_GPL(tty_set_ldisc);
/**
- * tty_ldisc_kill - teardown ldisc
- * @tty: tty being released
+ * tty_ldisc_kill - teardown ldisc
+ * @tty: tty being released
*
- * Perform final close of the ldisc and reset tty->ldisc
+ * Perform final close of the ldisc and reset @tty->ldisc
*/
static void tty_ldisc_kill(struct tty_struct *tty)
{
@@ -633,12 +612,11 @@ static void tty_ldisc_kill(struct tty_struct *tty)
}
/**
- * tty_reset_termios - reset terminal state
- * @tty: tty to reset
+ * tty_reset_termios - reset terminal state
+ * @tty: tty to reset
*
- * Restore a terminal to the driver default state.
+ * Restore a terminal to the driver default state.
*/
-
static void tty_reset_termios(struct tty_struct *tty)
{
down_write(&tty->termios_rwsem);
@@ -650,19 +628,17 @@ static void tty_reset_termios(struct tty_struct *tty)
/**
- * tty_ldisc_reinit - reinitialise the tty ldisc
- * @tty: tty to reinit
- * @disc: line discipline to reinitialize
+ * tty_ldisc_reinit - reinitialise the tty ldisc
+ * @tty: tty to reinit
+ * @disc: line discipline to reinitialize
*
- * Completely reinitialize the line discipline state, by closing the
- * current instance, if there is one, and opening a new instance. If
- * an error occurs opening the new non-N_TTY instance, the instance
- * is dropped and tty->ldisc reset to NULL. The caller can then retry
- * with N_TTY instead.
+ * Completely reinitialize the line discipline state, by closing the current
+ * instance, if there is one, and opening a new instance. If an error occurs
+ * opening the new non-%N_TTY instance, the instance is dropped and @tty->ldisc
+ * reset to %NULL. The caller can then retry with %N_TTY instead.
*
- * Returns 0 if successful, otherwise error code < 0
+ * Returns: 0 if successful, otherwise error code < 0
*/
-
int tty_ldisc_reinit(struct tty_struct *tty, int disc)
{
struct tty_ldisc *ld;
@@ -692,21 +668,20 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
}
/**
- * tty_ldisc_hangup - hangup ldisc reset
- * @tty: tty being hung up
- * @reinit: whether to re-initialise the tty
+ * tty_ldisc_hangup - hangup ldisc reset
+ * @tty: tty being hung up
+ * @reinit: whether to re-initialise the tty
*
- * Some tty devices reset their termios when they receive a hangup
- * event. In that situation we must also switch back to N_TTY properly
- * before we reset the termios data.
+ * Some tty devices reset their termios when they receive a hangup event. In
+ * that situation we must also switch back to %N_TTY properly before we reset
+ * the termios data.
*
- * Locking: We can take the ldisc mutex as the rest of the code is
- * careful to allow for this.
+ * Locking: We can take the ldisc mutex as the rest of the code is careful to
+ * allow for this.
*
- * In the pty pair case this occurs in the close() path of the
- * tty itself so we must be careful about locking rules.
+ * In the pty pair case this occurs in the close() path of the tty itself so we
+ * must be careful about locking rules.
*/
-
void tty_ldisc_hangup(struct tty_struct *tty, bool reinit)
{
struct tty_ldisc *ld;
@@ -752,15 +727,14 @@ void tty_ldisc_hangup(struct tty_struct *tty, bool reinit)
}
/**
- * tty_ldisc_setup - open line discipline
- * @tty: tty being shut down
- * @o_tty: pair tty for pty/tty pairs
+ * tty_ldisc_setup - open line discipline
+ * @tty: tty being shut down
+ * @o_tty: pair tty for pty/tty pairs
*
- * Called during the initial open of a tty/pty pair in order to set up the
- * line disciplines and bind them to the tty. This has no locking issues
- * as the device isn't yet active.
+ * Called during the initial open of a tty/pty pair in order to set up the line
+ * disciplines and bind them to the @tty. This has no locking issues as the
+ * device isn't yet active.
*/
-
int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty)
{
int retval = tty_ldisc_open(tty, tty->ldisc);
@@ -783,13 +757,12 @@ int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty)
}
/**
- * tty_ldisc_release - release line discipline
- * @tty: tty being shut down (or one end of pty pair)
+ * tty_ldisc_release - release line discipline
+ * @tty: tty being shut down (or one end of pty pair)
*
- * Called during the final close of a tty or a pty pair in order to shut
- * down the line discpline layer. On exit, each tty's ldisc is NULL.
+ * Called during the final close of a tty or a pty pair in order to shut down
+ * the line discpline layer. On exit, each tty's ldisc is %NULL.
*/
-
void tty_ldisc_release(struct tty_struct *tty)
{
struct tty_struct *o_tty = tty->link;
@@ -814,13 +787,12 @@ void tty_ldisc_release(struct tty_struct *tty)
}
/**
- * tty_ldisc_init - ldisc setup for new tty
- * @tty: tty being allocated
+ * tty_ldisc_init - ldisc setup for new tty
+ * @tty: tty being allocated
*
- * Set up the line discipline objects for a newly allocated tty. Note that
- * the tty structure is not completely set up when this call is made.
+ * Set up the line discipline objects for a newly allocated tty. Note that the
+ * tty structure is not completely set up when this call is made.
*/
-
int tty_ldisc_init(struct tty_struct *tty)
{
struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY);
@@ -832,11 +804,11 @@ int tty_ldisc_init(struct tty_struct *tty)
}
/**
- * tty_ldisc_deinit - ldisc cleanup for new tty
- * @tty: tty that was allocated recently
+ * tty_ldisc_deinit - ldisc cleanup for new tty
+ * @tty: tty that was allocated recently
*
- * The tty structure must not becompletely set up (tty_ldisc_setup) when
- * this call is made.
+ * The tty structure must not be completely set up (tty_ldisc_setup()) when
+ * this call is made.
*/
void tty_ldisc_deinit(struct tty_struct *tty)
{
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index ce8291053af3..3be428c16260 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -163,7 +163,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
/*
* Try to reverse the lock attempt but if the count has changed
- * so that reversing fails, check if there are are no waiters,
+ * so that reversing fails, check if there are no waiters,
* and early-out if not
*/
do {
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 2f1061a9d926..7709ce655f44 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -59,6 +59,15 @@ const struct tty_port_client_operations tty_port_default_client_ops = {
};
EXPORT_SYMBOL_GPL(tty_port_default_client_ops);
+/**
+ * tty_port_init -- initialize tty_port
+ * @port: tty_port to initialize
+ *
+ * Initializes the state of struct tty_port. When a port was initialized using
+ * this function, one has to destroy the port by tty_port_destroy(). Either
+ * indirectly by using &tty_port refcounting (tty_port_put()) or directly if
+ * refcounting is not used.
+ */
void tty_port_init(struct tty_port *port)
{
memset(port, 0, sizeof(*port));
@@ -82,9 +91,9 @@ EXPORT_SYMBOL(tty_port_init);
* @index: index of the tty
*
* Provide the tty layer with a link from a tty (specified by @index) to a
- * tty_port (@port). Use this only if neither tty_port_register_device nor
- * tty_port_install is used in the driver. If used, this has to be called before
- * tty_register_driver.
+ * tty_port (@port). Use this only if neither tty_port_register_device() nor
+ * tty_port_install() is used in the driver. If used, this has to be called
+ * before tty_register_driver().
*/
void tty_port_link_device(struct tty_port *port,
struct tty_driver *driver, unsigned index)
@@ -102,9 +111,9 @@ EXPORT_SYMBOL_GPL(tty_port_link_device);
* @index: index of the tty
* @device: parent if exists, otherwise NULL
*
- * It is the same as tty_register_device except the provided @port is linked to
- * a concrete tty specified by @index. Use this or tty_port_install (or both).
- * Call tty_port_link_device as a last resort.
+ * It is the same as tty_register_device() except the provided @port is linked
+ * to a concrete tty specified by @index. Use this or tty_port_install() (or
+ * both). Call tty_port_link_device() as a last resort.
*/
struct device *tty_port_register_device(struct tty_port *port,
struct tty_driver *driver, unsigned index,
@@ -123,9 +132,9 @@ EXPORT_SYMBOL_GPL(tty_port_register_device);
* @drvdata: Driver data to be set to device.
* @attr_grp: Attribute group to be set on device.
*
- * It is the same as tty_register_device_attr except the provided @port is
- * linked to a concrete tty specified by @index. Use this or tty_port_install
- * (or both). Call tty_port_link_device as a last resort.
+ * It is the same as tty_register_device_attr() except the provided @port is
+ * linked to a concrete tty specified by @index. Use this or tty_port_install()
+ * (or both). Call tty_port_link_device() as a last resort.
*/
struct device *tty_port_register_device_attr(struct tty_port *port,
struct tty_driver *driver, unsigned index,
@@ -240,9 +249,9 @@ EXPORT_SYMBOL(tty_port_free_xmit_buf);
* tty_port_destroy -- destroy inited port
* @port: tty port to be destroyed
*
- * When a port was initialized using tty_port_init, one has to destroy the
- * port by this function. Either indirectly by using tty_port refcounting
- * (tty_port_put) or directly if refcounting is not used.
+ * When a port was initialized using tty_port_init(), one has to destroy the
+ * port by this function. Either indirectly by using &tty_port refcounting
+ * (tty_port_put()) or directly if refcounting is not used.
*/
void tty_port_destroy(struct tty_port *port)
{
@@ -267,6 +276,13 @@ static void tty_port_destructor(struct kref *kref)
kfree(port);
}
+/**
+ * tty_port_put -- drop a reference to tty_port
+ * @port: port to drop a reference of (can be NULL)
+ *
+ * The final put will destroy and free up the @port using
+ * @port->ops->destruct() hook, or using kfree() if not provided.
+ */
void tty_port_put(struct tty_port *port)
{
if (port)
@@ -275,11 +291,11 @@ void tty_port_put(struct tty_port *port)
EXPORT_SYMBOL(tty_port_put);
/**
- * tty_port_tty_get - get a tty reference
- * @port: tty port
+ * tty_port_tty_get - get a tty reference
+ * @port: tty port
*
- * Return a refcount protected tty instance or NULL if the port is not
- * associated with a tty (eg due to close or hangup)
+ * Return a refcount protected tty instance or %NULL if the port is not
+ * associated with a tty (eg due to close or hangup).
*/
struct tty_struct *tty_port_tty_get(struct tty_port *port)
{
@@ -294,12 +310,12 @@ struct tty_struct *tty_port_tty_get(struct tty_port *port)
EXPORT_SYMBOL(tty_port_tty_get);
/**
- * tty_port_tty_set - set the tty of a port
- * @port: tty port
- * @tty: the tty
+ * tty_port_tty_set - set the tty of a port
+ * @port: tty port
+ * @tty: the tty
*
- * Associate the port and tty pair. Manages any internal refcounts.
- * Pass NULL to deassociate a port
+ * Associate the port and tty pair. Manages any internal refcounts. Pass %NULL
+ * to deassociate a port.
*/
void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty)
{
@@ -312,6 +328,16 @@ void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty)
}
EXPORT_SYMBOL(tty_port_tty_set);
+/**
+ * tty_port_shutdown - internal helper to shutdown the device
+ * @port: tty port to be shut down
+ * @tty: the associated tty
+ *
+ * It is used by tty_port_hangup() and tty_port_close(). Its task is to
+ * shutdown the device if it was initialized (note consoles remain
+ * functioning). It lowers DTR/RTS (if @tty has HUPCL set) and invokes
+ * @port->ops->shutdown().
+ */
static void tty_port_shutdown(struct tty_port *port, struct tty_struct *tty)
{
mutex_lock(&port->mutex);
@@ -335,13 +361,13 @@ out:
}
/**
- * tty_port_hangup - hangup helper
- * @port: tty port
+ * tty_port_hangup - hangup helper
+ * @port: tty port
*
- * Perform port level tty hangup flag and count changes. Drop the tty
- * reference.
+ * Perform port level tty hangup flag and count changes. Drop the tty
+ * reference.
*
- * Caller holds tty lock.
+ * Caller holds tty lock.
*/
void tty_port_hangup(struct tty_port *port)
{
@@ -365,9 +391,8 @@ EXPORT_SYMBOL(tty_port_hangup);
/**
* tty_port_tty_hangup - helper to hang up a tty
- *
* @port: tty port
- * @check_clocal: hang only ttys with CLOCAL unset?
+ * @check_clocal: hang only ttys with %CLOCAL unset?
*/
void tty_port_tty_hangup(struct tty_port *port, bool check_clocal)
{
@@ -381,7 +406,6 @@ EXPORT_SYMBOL_GPL(tty_port_tty_hangup);
/**
* tty_port_tty_wakeup - helper to wake up a tty
- *
* @port: tty port
*/
void tty_port_tty_wakeup(struct tty_port *port)
@@ -391,12 +415,12 @@ void tty_port_tty_wakeup(struct tty_port *port)
EXPORT_SYMBOL_GPL(tty_port_tty_wakeup);
/**
- * tty_port_carrier_raised - carrier raised check
- * @port: tty port
+ * tty_port_carrier_raised - carrier raised check
+ * @port: tty port
*
- * Wrapper for the carrier detect logic. For the moment this is used
- * to hide some internal details. This will eventually become entirely
- * internal to the tty port.
+ * Wrapper for the carrier detect logic. For the moment this is used
+ * to hide some internal details. This will eventually become entirely
+ * internal to the tty port.
*/
int tty_port_carrier_raised(struct tty_port *port)
{
@@ -407,12 +431,12 @@ int tty_port_carrier_raised(struct tty_port *port)
EXPORT_SYMBOL(tty_port_carrier_raised);
/**
- * tty_port_raise_dtr_rts - Raise DTR/RTS
- * @port: tty port
+ * tty_port_raise_dtr_rts - Raise DTR/RTS
+ * @port: tty port
*
- * Wrapper for the DTR/RTS raise logic. For the moment this is used
- * to hide some internal details. This will eventually become entirely
- * internal to the tty port.
+ * Wrapper for the DTR/RTS raise logic. For the moment this is used to hide
+ * some internal details. This will eventually become entirely internal to the
+ * tty port.
*/
void tty_port_raise_dtr_rts(struct tty_port *port)
{
@@ -422,12 +446,12 @@ void tty_port_raise_dtr_rts(struct tty_port *port)
EXPORT_SYMBOL(tty_port_raise_dtr_rts);
/**
- * tty_port_lower_dtr_rts - Lower DTR/RTS
- * @port: tty port
+ * tty_port_lower_dtr_rts - Lower DTR/RTS
+ * @port: tty port
*
- * Wrapper for the DTR/RTS raise logic. For the moment this is used
- * to hide some internal details. This will eventually become entirely
- * internal to the tty port.
+ * Wrapper for the DTR/RTS raise logic. For the moment this is used to hide
+ * some internal details. This will eventually become entirely internal to the
+ * tty port.
*/
void tty_port_lower_dtr_rts(struct tty_port *port)
{
@@ -437,28 +461,29 @@ void tty_port_lower_dtr_rts(struct tty_port *port)
EXPORT_SYMBOL(tty_port_lower_dtr_rts);
/**
- * tty_port_block_til_ready - Waiting logic for tty open
- * @port: the tty port being opened
- * @tty: the tty device being bound
- * @filp: the file pointer of the opener or NULL
- *
- * Implement the core POSIX/SuS tty behaviour when opening a tty device.
- * Handles:
- * - hangup (both before and during)
- * - non blocking open
- * - rts/dtr/dcd
- * - signals
- * - port flags and counts
- *
- * The passed tty_port must implement the carrier_raised method if it can
- * do carrier detect and the dtr_rts method if it supports software
- * management of these lines. Note that the dtr/rts raise is done each
- * iteration as a hangup may have previously dropped them while we wait.
- *
- * Caller holds tty lock.
- *
- * NB: May drop and reacquire tty lock when blocking, so tty and tty_port
- * may have changed state (eg., may have been hung up).
+ * tty_port_block_til_ready - Waiting logic for tty open
+ * @port: the tty port being opened
+ * @tty: the tty device being bound
+ * @filp: the file pointer of the opener or %NULL
+ *
+ * Implement the core POSIX/SuS tty behaviour when opening a tty device.
+ * Handles:
+ *
+ * - hangup (both before and during)
+ * - non blocking open
+ * - rts/dtr/dcd
+ * - signals
+ * - port flags and counts
+ *
+ * The passed @port must implement the @port->ops->carrier_raised method if it
+ * can do carrier detect and the @port->ops->dtr_rts method if it supports
+ * software management of these lines. Note that the dtr/rts raise is done each
+ * iteration as a hangup may have previously dropped them while we wait.
+ *
+ * Caller holds tty lock.
+ *
+ * Note: May drop and reacquire tty lock when blocking, so @tty and @port may
+ * have changed state (eg., may have been hung up).
*/
int tty_port_block_til_ready(struct tty_port *port,
struct tty_struct *tty, struct file *filp)
@@ -560,7 +585,21 @@ static void tty_port_drain_delay(struct tty_port *port, struct tty_struct *tty)
schedule_timeout_interruptible(timeout);
}
-/* Caller holds tty lock. */
+/**
+ * tty_port_close_start - helper for tty->ops->close, part 1/2
+ * @port: tty_port of the device
+ * @tty: tty being closed
+ * @filp: passed file pointer
+ *
+ * Decrements and checks open count. Flushes the port if this is the last
+ * close. That means, dropping the data from the outpu buffer on the device and
+ * waiting for sending logic to finish. The rest of close handling is performed
+ * in tty_port_close_end().
+ *
+ * Locking: Caller holds tty lock.
+ *
+ * Return: 1 if this is the last close, otherwise 0
+ */
int tty_port_close_start(struct tty_port *port,
struct tty_struct *tty, struct file *filp)
{
@@ -606,7 +645,17 @@ int tty_port_close_start(struct tty_port *port,
}
EXPORT_SYMBOL(tty_port_close_start);
-/* Caller holds tty lock */
+/**
+ * tty_port_close_end - helper for tty->ops->close, part 2/2
+ * @port: tty_port of the device
+ * @tty: tty being closed
+ *
+ * This is a continuation of the first part: tty_port_close_start(). This
+ * should be called after turning off the device. It flushes the data from the
+ * line discipline and delays the close by @port->close_delay.
+ *
+ * Locking: Caller holds tty lock.
+ */
void tty_port_close_end(struct tty_port *port, struct tty_struct *tty)
{
unsigned long flags;
@@ -628,10 +677,18 @@ void tty_port_close_end(struct tty_port *port, struct tty_struct *tty)
}
EXPORT_SYMBOL(tty_port_close_end);
-/*
- * tty_port_close
+/**
+ * tty_port_close - generic tty->ops->close handler
+ * @port: tty_port of the device
+ * @tty: tty being closed
+ * @filp: passed file pointer
+ *
+ * It is a generic helper to be used in driver's @tty->ops->close. It wraps a
+ * sequence of tty_port_close_start(), tty_port_shutdown(), and
+ * tty_port_close_end(). The latter two are called only if this is the last
+ * close. See the respective functions for the details.
*
- * Caller holds tty lock
+ * Locking: Caller holds tty lock
*/
void tty_port_close(struct tty_port *port, struct tty_struct *tty,
struct file *filp)
@@ -652,9 +709,9 @@ EXPORT_SYMBOL(tty_port_close);
* @driver: tty_driver for this device
* @tty: tty to be installed
*
- * It is the same as tty_standard_install except the provided @port is linked
- * to a concrete tty specified by @tty. Use this or tty_port_register_device
- * (or both). Call tty_port_link_device as a last resort.
+ * It is the same as tty_standard_install() except the provided @port is linked
+ * to a concrete tty specified by @tty. Use this or tty_port_register_device()
+ * (or both). Call tty_port_link_device() as a last resort.
*/
int tty_port_install(struct tty_port *port, struct tty_driver *driver,
struct tty_struct *tty)
@@ -664,13 +721,21 @@ int tty_port_install(struct tty_port *port, struct tty_driver *driver,
}
EXPORT_SYMBOL_GPL(tty_port_install);
-/*
- * tty_port_open
+/**
+ * tty_port_open - generic tty->ops->open handler
+ * @port: tty_port of the device
+ * @tty: tty to be opened
+ * @filp: passed file pointer
*
- * Caller holds tty lock.
+ * It is a generic helper to be used in driver's @tty->ops->open. It activates
+ * the devices using @port->ops->activate if not active already. And waits for
+ * the device to be ready using tty_port_block_til_ready() (e.g. raises
+ * DTR/CTS and waits for carrier).
+ *
+ * Locking: Caller holds tty lock.
*
- * NB: may drop and reacquire tty lock (in tty_port_block_til_ready()) so
- * tty and tty_port may have changed state (eg., may be hung up now)
+ * Note: may drop and reacquire tty lock (in tty_port_block_til_ready()) so
+ * @tty and @port may have changed state (eg., may be hung up now).
*/
int tty_port_open(struct tty_port *port, struct tty_struct *tty,
struct file *filp)
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index c7fbbcdcc346..be8313cdbac3 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -153,6 +153,7 @@ static int shift_state = 0;
static unsigned int ledstate = -1U; /* undefined */
static unsigned char ledioctl;
+static bool vt_switch;
/*
* Notifier list for console keyboard events
@@ -324,13 +325,13 @@ int kbd_rate(struct kbd_repeat *rpt)
static void put_queue(struct vc_data *vc, int ch)
{
tty_insert_flip_char(&vc->port, ch, 0);
- tty_schedule_flip(&vc->port);
+ tty_flip_buffer_push(&vc->port);
}
static void puts_queue(struct vc_data *vc, const char *cp)
{
tty_insert_flip_string(&vc->port, cp, strlen(cp));
- tty_schedule_flip(&vc->port);
+ tty_flip_buffer_push(&vc->port);
}
static void applkey(struct vc_data *vc, int key, char mode)
@@ -414,6 +415,12 @@ void vt_set_leds_compute_shiftstate(void)
{
unsigned long flags;
+ /*
+ * When VT is switched, the keyboard led needs to be set once.
+ * Ensure that after the switch is completed, the state of the
+ * keyboard LED is consistent with the state of the keyboard lock.
+ */
+ vt_switch = true;
set_leds();
spin_lock_irqsave(&kbd_event_lock, flags);
@@ -584,7 +591,7 @@ static void fn_inc_console(struct vc_data *vc)
static void fn_send_intr(struct vc_data *vc)
{
tty_insert_flip_char(&vc->port, 0, TTY_BREAK);
- tty_schedule_flip(&vc->port);
+ tty_flip_buffer_push(&vc->port);
}
static void fn_scroll_forw(struct vc_data *vc)
@@ -1255,6 +1262,11 @@ static void kbd_bh(struct tasklet_struct *unused)
leds |= (unsigned int)kbd->lockstate << 8;
spin_unlock_irqrestore(&led_lock, flags);
+ if (vt_switch) {
+ ledstate = ~leds;
+ vt_switch = false;
+ }
+
if (leds != ledstate) {
kbd_propagate_led_state(ledstate, leds);
ledstate = leds;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 7359c3e80d63..f8c87c4d7399 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1833,7 +1833,7 @@ static void csi_m(struct vc_data *vc)
static void respond_string(const char *p, size_t len, struct tty_port *port)
{
tty_insert_flip_string(port, p, len);
- tty_schedule_flip(port);
+ tty_flip_buffer_push(port);
}
static void cursor_report(struct vc_data *vc, struct tty_struct *tty)
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index ea96e319c8a0..43afbb7c5ab9 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -83,13 +83,14 @@ static struct map_sysfs_entry size_attribute =
static struct map_sysfs_entry offset_attribute =
__ATTR(offset, S_IRUGO, map_offset_show, NULL);
-static struct attribute *attrs[] = {
+static struct attribute *map_attrs[] = {
&name_attribute.attr,
&addr_attribute.attr,
&size_attribute.attr,
&offset_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
+ATTRIBUTE_GROUPS(map);
static void map_release(struct kobject *kobj)
{
@@ -119,7 +120,7 @@ static const struct sysfs_ops map_sysfs_ops = {
static struct kobj_type map_attr_type = {
.release = map_release,
.sysfs_ops = &map_sysfs_ops,
- .default_attrs = attrs,
+ .default_groups = map_groups,
};
struct uio_portio {
@@ -178,6 +179,7 @@ static struct attribute *portio_attrs[] = {
&portio_porttype_attribute.attr,
NULL,
};
+ATTRIBUTE_GROUPS(portio);
static void portio_release(struct kobject *kobj)
{
@@ -207,7 +209,7 @@ static const struct sysfs_ops portio_sysfs_ops = {
static struct kobj_type portio_attr_type = {
.release = portio_release,
.sysfs_ops = &portio_sysfs_ops,
- .default_attrs = portio_attrs,
+ .default_groups = portio_groups,
};
static ssize_t name_show(struct device *dev,
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index 6b5cfa5b0673..1106f3376404 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -188,7 +188,11 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
return -ENOMEM;
}
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "DMA enable failed\n");
+ return ret;
+ }
priv->uioinfo = uioinfo;
spin_lock_init(&priv->lock);
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index da17be1ef64e..e3a49d837609 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -969,7 +969,7 @@ static int usbatm_do_heavy_init(void *arg)
instance->thread = NULL;
mutex_unlock(&instance->serialize);
- complete_and_exit(&instance->thread_exited, ret);
+ kthread_complete_and_exit(&instance->thread_exited, ret);
}
static int usbatm_heavy_init(struct usbatm_data *instance)
diff --git a/drivers/usb/cdns3/cdns3-plat.c b/drivers/usb/cdns3/cdns3-plat.c
index 4d0f027e5bd3..dc068e940ed5 100644
--- a/drivers/usb/cdns3/cdns3-plat.c
+++ b/drivers/usb/cdns3/cdns3-plat.c
@@ -13,6 +13,7 @@
*/
#include <linux/module.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -65,13 +66,14 @@ static int cdns3_plat_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, cdns);
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "host");
- if (!res) {
- dev_err(dev, "missing host IRQ\n");
- return -ENODEV;
- }
+ ret = platform_get_irq_byname(pdev, "host");
+ if (ret < 0)
+ return ret;
- cdns->xhci_res[0] = *res;
+ cdns->xhci_res[0].start = ret;
+ cdns->xhci_res[0].end = ret;
+ cdns->xhci_res[0].flags = IORESOURCE_IRQ | irq_get_trigger_type(ret);
+ cdns->xhci_res[0].name = "host";
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "xhci");
if (!res) {
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
index e85bf768c66d..5c9d07cc5410 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.c
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -81,7 +81,7 @@ int cdnsp_find_next_ext_cap(void __iomem *base, u32 start, int id)
offset = HCC_EXT_CAPS(val) << 2;
if (!offset)
return 0;
- };
+ }
do {
val = readl(base + offset);
diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h
index ab0cb68acd23..2d332a788871 100644
--- a/drivers/usb/cdns3/core.h
+++ b/drivers/usb/cdns3/core.h
@@ -8,12 +8,12 @@
* Authors: Peter Chen <peter.chen@nxp.com>
* Pawel Laszczak <pawell@cadence.com>
*/
-#include <linux/usb/otg.h>
-#include <linux/usb/role.h>
-
#ifndef __LINUX_CDNS3_CORE_H
#define __LINUX_CDNS3_CORE_H
+#include <linux/usb/otg.h>
+#include <linux/usb/role.h>
+
struct cdns;
/**
diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
index 55c73b1d8704..d00ff98dffab 100644
--- a/drivers/usb/cdns3/drd.c
+++ b/drivers/usb/cdns3/drd.c
@@ -483,11 +483,11 @@ int cdns_drd_exit(struct cdns *cdns)
/* Indicate the cdns3 core was power lost before */
bool cdns_power_is_lost(struct cdns *cdns)
{
- if (cdns->version == CDNS3_CONTROLLER_V1) {
- if (!(readl(&cdns->otg_v1_regs->simulate) & BIT(0)))
+ if (cdns->version == CDNS3_CONTROLLER_V0) {
+ if (!(readl(&cdns->otg_v0_regs->simulate) & BIT(0)))
return true;
} else {
- if (!(readl(&cdns->otg_v0_regs->simulate) & BIT(0)))
+ if (!(readl(&cdns->otg_v1_regs->simulate) & BIT(0)))
return true;
}
return false;
diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
index 60361141ac04..a72a9474afea 100644
--- a/drivers/usb/chipidea/ci_hdrc_tegra.c
+++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
@@ -7,6 +7,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/usb.h>
@@ -15,6 +16,8 @@
#include <linux/usb/of.h>
#include <linux/usb/phy.h>
+#include <soc/tegra/common.h>
+
#include "../host/ehci.h"
#include "ci.h"
@@ -278,6 +281,8 @@ static int tegra_usb_probe(struct platform_device *pdev)
if (!usb)
return -ENOMEM;
+ platform_set_drvdata(pdev, usb);
+
soc = of_device_get_match_data(&pdev->dev);
if (!soc) {
dev_err(&pdev->dev, "failed to match OF data\n");
@@ -296,11 +301,14 @@ static int tegra_usb_probe(struct platform_device *pdev)
return err;
}
- err = clk_prepare_enable(usb->clk);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable clock: %d\n", err);
+ err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (err)
+ return err;
+
+ pm_runtime_enable(&pdev->dev);
+ err = pm_runtime_resume_and_get(&pdev->dev);
+ if (err)
return err;
- }
if (device_property_present(&pdev->dev, "nvidia,needs-double-reset"))
usb->needs_double_reset = true;
@@ -320,8 +328,6 @@ static int tegra_usb_probe(struct platform_device *pdev)
if (err)
goto fail_power_off;
- platform_set_drvdata(pdev, usb);
-
/* setup and register ChipIdea HDRC device */
usb->soc = soc;
usb->data.name = "tegra-usb";
@@ -350,7 +356,9 @@ static int tegra_usb_probe(struct platform_device *pdev)
phy_shutdown:
usb_phy_shutdown(usb->phy);
fail_power_off:
- clk_disable_unprepare(usb->clk);
+ pm_runtime_put_sync_suspend(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
+
return err;
}
@@ -360,15 +368,46 @@ static int tegra_usb_remove(struct platform_device *pdev)
ci_hdrc_remove_device(usb->dev);
usb_phy_shutdown(usb->phy);
+
+ pm_runtime_put_sync_suspend(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_usb_runtime_resume(struct device *dev)
+{
+ struct tegra_usb *usb = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(usb->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_usb_runtime_suspend(struct device *dev)
+{
+ struct tegra_usb *usb = dev_get_drvdata(dev);
+
clk_disable_unprepare(usb->clk);
return 0;
}
+static const struct dev_pm_ops tegra_usb_pm = {
+ SET_RUNTIME_PM_OPS(tegra_usb_runtime_suspend, tegra_usb_runtime_resume,
+ NULL)
+};
+
static struct platform_driver tegra_usb_driver = {
.driver = {
.name = "tegra-usb",
.of_match_table = tegra_usb_of_match,
+ .pm = &tegra_usb_pm,
},
.probe = tegra_usb_probe,
.remove = tegra_usb_remove,
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index a56f06368d14..5359b2a2e4d2 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -864,6 +864,7 @@ struct platform_device *ci_hdrc_add_device(struct device *dev,
}
pdev->dev.parent = dev;
+ device_set_of_node_from_dev(&pdev->dev, dev);
ret = platform_device_add_resources(pdev, res, nres);
if (ret)
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index 8dd59282827b..7b53274ef966 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -255,10 +255,9 @@ int ci_hdrc_otg_init(struct ci_hdrc *ci)
*/
void ci_hdrc_otg_destroy(struct ci_hdrc *ci)
{
- if (ci->wq) {
- flush_workqueue(ci->wq);
+ if (ci->wq)
destroy_workqueue(ci->wq);
- }
+
/* Disable all OTG irq and clear status */
hw_write_otgsc(ci, OTGSC_INT_EN_BITS | OTGSC_INT_STATUS_BITS,
OTGSC_INT_STATUS_BITS);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index b3ce7338cb6b..9b9aea24d58c 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -685,10 +685,6 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
if (retval)
goto error_get_interface;
- /*
- * FIXME: Why do we need this? Allocating 64K of physically contiguous
- * memory is really nasty...
- */
set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
acm->control->needs_remote_wakeup = 1;
diff --git a/drivers/usb/common/debug.c b/drivers/usb/common/debug.c
index a76a086b9c54..075f6b1b2a1a 100644
--- a/drivers/usb/common/debug.c
+++ b/drivers/usb/common/debug.c
@@ -8,6 +8,7 @@
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
+#include <linux/kernel.h>
#include <linux/usb/ch9.h>
static void usb_decode_get_status(__u8 bRequestType, __u16 wIndex,
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index 4169cf40a03b..8f8405b0d608 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -39,8 +39,11 @@ static int ulpi_match(struct device *dev, struct device_driver *driver)
struct ulpi *ulpi = to_ulpi_dev(dev);
const struct ulpi_device_id *id;
- /* Some ULPI devices don't have a vendor id so rely on OF match */
- if (ulpi->id.vendor == 0)
+ /*
+ * Some ULPI devices don't have a vendor id
+ * or provide an id_table so rely on OF match.
+ */
+ if (ulpi->id.vendor == 0 || !drv->id_table)
return of_driver_match_device(dev, driver);
for (id = drv->id_table; id->vendor; id++)
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 072968c40ade..355ed33a2179 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/usb/driver.c - most of the driver model stuff for usb
+ * drivers/usb/core/driver.c - most of the driver model stuff for usb
*
* (C) Copyright 2005 Greg Kroah-Hartman <gregkh@suse.de>
*
@@ -834,6 +834,7 @@ const struct usb_device_id *usb_device_match_id(struct usb_device *udev,
return NULL;
}
+EXPORT_SYMBOL_GPL(usb_device_match_id);
bool usb_driver_applicable(struct usb_device *udev,
struct usb_device_driver *udrv)
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 26f9fb9f67ca..740342a2812a 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/usb/generic.c - generic driver for USB devices (not interfaces)
+ * drivers/usb/core/generic.c - generic driver for USB devices (not interfaces)
*
* (C) Copyright 2005 Greg Kroah-Hartman <gregkh@suse.de>
*
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 4d326ee12c36..d9712c2602af 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -753,6 +753,7 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
{
struct urb *urb;
int length;
+ int status;
unsigned long flags;
char buffer[6]; /* Any root hubs with > 31 ports? */
@@ -770,11 +771,17 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
if (urb) {
clear_bit(HCD_FLAG_POLL_PENDING, &hcd->flags);
hcd->status_urb = NULL;
+ if (urb->transfer_buffer_length >= length) {
+ status = 0;
+ } else {
+ status = -EOVERFLOW;
+ length = urb->transfer_buffer_length;
+ }
urb->actual_length = length;
memcpy(urb->transfer_buffer, buffer, length);
usb_hcd_unlink_urb_from_ep(hcd, urb);
- usb_hcd_giveback_urb(hcd, urb, 0);
+ usb_hcd_giveback_urb(hcd, urb, status);
} else {
length = 0;
set_bit(HCD_FLAG_POLL_PENDING, &hcd->flags);
@@ -1281,7 +1288,7 @@ static int hcd_alloc_coherent(struct usb_bus *bus,
return -EFAULT;
}
- vaddr = hcd_buffer_alloc(bus, size + sizeof(vaddr),
+ vaddr = hcd_buffer_alloc(bus, size + sizeof(unsigned long),
mem_flags, dma_handle);
if (!vaddr)
return -ENOMEM;
@@ -1556,6 +1563,13 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
urb->hcpriv = NULL;
INIT_LIST_HEAD(&urb->urb_list);
atomic_dec(&urb->use_count);
+ /*
+ * Order the write of urb->use_count above before the read
+ * of urb->reject below. Pairs with the memory barriers in
+ * usb_kill_urb() and usb_poison_urb().
+ */
+ smp_mb__after_atomic();
+
atomic_dec(&urb->dev->urbnum);
if (atomic_read(&urb->reject))
wake_up(&usb_kill_urb_queue);
@@ -1658,6 +1672,13 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
+ /*
+ * Order the write of urb->use_count above before the read
+ * of urb->reject below. Pairs with the memory barriers in
+ * usb_kill_urb() and usb_poison_urb().
+ */
+ smp_mb__after_atomic();
+
if (unlikely(atomic_read(&urb->reject)))
wake_up(&usb_kill_urb_queue);
usb_put_urb(urb);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 00070a8a6507..47a1c8bddf86 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1110,7 +1110,10 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
} else {
hub_power_on(hub, true);
}
- }
+ /* Give some time on remote wakeup to let links to transit to U0 */
+ } else if (hub_is_superspeed(hub->hdev))
+ msleep(20);
+
init2:
/*
@@ -1225,7 +1228,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
*/
if (portchange || (hub_is_superspeed(hub->hdev) &&
port_resumed))
- set_bit(port1, hub->change_bits);
+ set_bit(port1, hub->event_bits);
} else if (udev->persist_enabled) {
#ifdef CONFIG_PM
@@ -2777,6 +2780,8 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
#define PORT_INIT_TRIES 4
#endif /* CONFIG_USB_FEW_INIT_RETRIES */
+#define DETECT_DISCONNECT_TRIES 5
+
#define HUB_ROOT_RESET_TIME 60 /* times are in msec */
#define HUB_SHORT_RESET_TIME 10
#define HUB_BH_RESET_TIME 50
@@ -3570,7 +3575,7 @@ static int finish_port_resume(struct usb_device *udev)
* This routine should only be called when persist is enabled.
*/
static int wait_for_connected(struct usb_device *udev,
- struct usb_hub *hub, int *port1,
+ struct usb_hub *hub, int port1,
u16 *portchange, u16 *portstatus)
{
int status = 0, delay_ms = 0;
@@ -3584,7 +3589,7 @@ static int wait_for_connected(struct usb_device *udev,
}
msleep(20);
delay_ms += 20;
- status = hub_port_status(hub, *port1, portstatus, portchange);
+ status = hub_port_status(hub, port1, portstatus, portchange);
}
dev_dbg(&udev->dev, "Waited %dms for CONNECT\n", delay_ms);
return status;
@@ -3690,7 +3695,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
}
if (udev->persist_enabled)
- status = wait_for_connected(udev, hub, &port1, &portchange,
+ status = wait_for_connected(udev, hub, port1, &portchange,
&portstatus);
status = check_port_resume_type(udev,
@@ -5543,6 +5548,7 @@ static void port_event(struct usb_hub *hub, int port1)
struct usb_device *udev = port_dev->child;
struct usb_device *hdev = hub->hdev;
u16 portstatus, portchange;
+ int i = 0;
connect_change = test_bit(port1, hub->change_bits);
clear_bit(port1, hub->event_bits);
@@ -5619,17 +5625,27 @@ static void port_event(struct usb_hub *hub, int port1)
connect_change = 1;
/*
- * Warm reset a USB3 protocol port if it's in
- * SS.Inactive state.
+ * Avoid trying to recover a USB3 SS.Inactive port with a warm reset if
+ * the device was disconnected. A 12ms disconnect detect timer in
+ * SS.Inactive state transitions the port to RxDetect automatically.
+ * SS.Inactive link error state is common during device disconnect.
*/
- if (hub_port_warm_reset_required(hub, port1, portstatus)) {
- dev_dbg(&port_dev->dev, "do warm reset\n");
- if (!udev || !(portstatus & USB_PORT_STAT_CONNECTION)
+ while (hub_port_warm_reset_required(hub, port1, portstatus)) {
+ if ((i++ < DETECT_DISCONNECT_TRIES) && udev) {
+ u16 unused;
+
+ msleep(20);
+ hub_port_status(hub, port1, &portstatus, &unused);
+ dev_dbg(&port_dev->dev, "Wait for inactive link disconnect detect\n");
+ continue;
+ } else if (!udev || !(portstatus & USB_PORT_STAT_CONNECTION)
|| udev->state == USB_STATE_NOTATTACHED) {
+ dev_dbg(&port_dev->dev, "do warm reset, port only\n");
if (hub_port_reset(hub, port1, NULL,
HUB_BH_RESET_TIME, true) < 0)
hub_port_disable(hub, port1, 1);
} else {
+ dev_dbg(&port_dev->dev, "do warm reset, full device\n");
usb_unlock_port(port_dev);
usb_lock_device(udev);
usb_reset_device(udev);
@@ -5637,6 +5653,7 @@ static void port_event(struct usb_hub *hub, int port1)
usb_lock_port(port_dev);
connect_change = 0;
}
+ break;
}
if (connect_change)
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index dfcca9c876c7..c2bbf97a79be 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/pm_qos.h>
+#include <linux/component.h>
#include "hub.h"
@@ -528,6 +529,32 @@ static void find_and_link_peer(struct usb_hub *hub, int port1)
link_peers_report(port_dev, peer);
}
+static int connector_bind(struct device *dev, struct device *connector, void *data)
+{
+ int ret;
+
+ ret = sysfs_create_link(&dev->kobj, &connector->kobj, "connector");
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_link(&connector->kobj, &dev->kobj, dev_name(dev));
+ if (ret)
+ sysfs_remove_link(&dev->kobj, "connector");
+
+ return ret;
+}
+
+static void connector_unbind(struct device *dev, struct device *connector, void *data)
+{
+ sysfs_remove_link(&connector->kobj, dev_name(dev));
+ sysfs_remove_link(&dev->kobj, "connector");
+}
+
+static const struct component_ops connector_ops = {
+ .bind = connector_bind,
+ .unbind = connector_unbind,
+};
+
int usb_hub_create_port_device(struct usb_hub *hub, int port1)
{
struct usb_port *port_dev;
@@ -577,6 +604,10 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
find_and_link_peer(hub, port1);
+ retval = component_add(&port_dev->dev, &connector_ops);
+ if (retval)
+ dev_warn(&port_dev->dev, "failed to add component\n");
+
/*
* Enable runtime pm and hold a refernce that hub_configure()
* will drop once the PM_QOS_NO_POWER_OFF flag state has been set
@@ -619,5 +650,6 @@ void usb_hub_remove_port_device(struct usb_hub *hub, int port1)
peer = port_dev->peer;
if (peer)
unlink_peers(port_dev, peer);
+ component_del(&port_dev->dev, &connector_ops);
device_unregister(&port_dev->dev);
}
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 30727729a44c..33d62d7e3929 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -715,6 +715,12 @@ void usb_kill_urb(struct urb *urb)
if (!(urb && urb->dev && urb->ep))
return;
atomic_inc(&urb->reject);
+ /*
+ * Order the write of urb->reject above before the read
+ * of urb->use_count below. Pairs with the barriers in
+ * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
+ */
+ smp_mb__after_atomic();
usb_hcd_unlink_urb(urb, -ENOENT);
wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
@@ -756,6 +762,12 @@ void usb_poison_urb(struct urb *urb)
if (!urb)
return;
atomic_inc(&urb->reject);
+ /*
+ * Order the write of urb->reject above before the read
+ * of urb->use_count below. Pairs with the barriers in
+ * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
+ */
+ smp_mb__after_atomic();
if (!urb->dev || !urb->ep)
return;
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 62368c4ed37a..2ce3667ec6fa 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -398,52 +398,6 @@ int usb_for_each_dev(void *data, int (*fn)(struct usb_device *, void *))
}
EXPORT_SYMBOL_GPL(usb_for_each_dev);
-struct each_hub_arg {
- void *data;
- int (*fn)(struct device *, void *);
-};
-
-static int __each_hub(struct usb_device *hdev, void *data)
-{
- struct each_hub_arg *arg = (struct each_hub_arg *)data;
- struct usb_hub *hub;
- int ret = 0;
- int i;
-
- hub = usb_hub_to_struct_hub(hdev);
- if (!hub)
- return 0;
-
- mutex_lock(&usb_port_peer_mutex);
-
- for (i = 0; i < hdev->maxchild; i++) {
- ret = arg->fn(&hub->ports[i]->dev, arg->data);
- if (ret)
- break;
- }
-
- mutex_unlock(&usb_port_peer_mutex);
-
- return ret;
-}
-
-/**
- * usb_for_each_port - interate over all USB ports in the system
- * @data: data pointer that will be handed to the callback function
- * @fn: callback function to be called for each USB port
- *
- * Iterate over all USB ports and call @fn for each, passing it @data. If it
- * returns anything other than 0, we break the iteration prematurely and return
- * that value.
- */
-int usb_for_each_port(void *data, int (*fn)(struct device *, void *))
-{
- struct each_hub_arg arg = {data, fn};
-
- return usb_for_each_dev(&arg, __each_hub);
-}
-EXPORT_SYMBOL_GPL(usb_for_each_port);
-
/**
* usb_release_dev - free a usb device structure when all users of it are finished.
* @dev: device that's been disconnected
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 37185eb66ae4..8a63da3ab39d 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -869,6 +869,8 @@ struct dwc2_hregs_backup {
* - USB_DR_MODE_HOST
* - USB_DR_MODE_OTG
* @role_sw: usb_role_switch handle
+ * @role_sw_default_mode: default operation mode of controller while usb role
+ * is USB_ROLE_NONE
* @hcd_enabled: Host mode sub-driver initialization indicator.
* @gadget_enabled: Peripheral mode sub-driver initialization indicator.
* @ll_hw_enabled: Status of low-level hardware resources.
@@ -1065,6 +1067,7 @@ struct dwc2_hsotg {
enum usb_otg_state op_state;
enum usb_dr_mode dr_mode;
struct usb_role_switch *role_sw;
+ enum usb_dr_mode role_sw_default_mode;
unsigned int hcd_enabled:1;
unsigned int gadget_enabled:1;
unsigned int ll_hw_enabled:1;
@@ -1151,8 +1154,7 @@ struct dwc2_hsotg {
struct list_head periodic_sched_queued;
struct list_head split_order;
u16 periodic_usecs;
- unsigned long hs_periodic_bitmap[
- DIV_ROUND_UP(DWC2_HS_SCHEDULE_US, BITS_PER_LONG)];
+ DECLARE_BITMAP(hs_periodic_bitmap, DWC2_HS_SCHEDULE_US);
u16 periodic_qh_count;
bool new_connection;
diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c
index aa6eb76f64dd..1b39c4776369 100644
--- a/drivers/usb/dwc2/drd.c
+++ b/drivers/usb/dwc2/drd.c
@@ -13,6 +13,10 @@
#include <linux/usb/role.h>
#include "core.h"
+#define dwc2_ovr_gotgctl(gotgctl) \
+ ((gotgctl) |= GOTGCTL_BVALOEN | GOTGCTL_AVALOEN | GOTGCTL_VBVALOEN | \
+ GOTGCTL_DBNCE_FLTR_BYPASS)
+
static void dwc2_ovr_init(struct dwc2_hsotg *hsotg)
{
unsigned long flags;
@@ -21,9 +25,12 @@ static void dwc2_ovr_init(struct dwc2_hsotg *hsotg)
spin_lock_irqsave(&hsotg->lock, flags);
gotgctl = dwc2_readl(hsotg, GOTGCTL);
- gotgctl |= GOTGCTL_BVALOEN | GOTGCTL_AVALOEN | GOTGCTL_VBVALOEN;
- gotgctl |= GOTGCTL_DBNCE_FLTR_BYPASS;
+ dwc2_ovr_gotgctl(gotgctl);
gotgctl &= ~(GOTGCTL_BVALOVAL | GOTGCTL_AVALOVAL | GOTGCTL_VBVALOVAL);
+ if (hsotg->role_sw_default_mode == USB_DR_MODE_HOST)
+ gotgctl |= GOTGCTL_AVALOVAL | GOTGCTL_VBVALOVAL;
+ else if (hsotg->role_sw_default_mode == USB_DR_MODE_PERIPHERAL)
+ gotgctl |= GOTGCTL_BVALOVAL | GOTGCTL_VBVALOVAL;
dwc2_writel(hsotg, gotgctl, GOTGCTL);
spin_unlock_irqrestore(&hsotg->lock, flags);
@@ -40,6 +47,9 @@ static int dwc2_ovr_avalid(struct dwc2_hsotg *hsotg, bool valid)
(!valid && !(gotgctl & GOTGCTL_ASESVLD)))
return -EALREADY;
+ /* Always enable overrides to handle the resume case */
+ dwc2_ovr_gotgctl(gotgctl);
+
gotgctl &= ~GOTGCTL_BVALOVAL;
if (valid)
gotgctl |= GOTGCTL_AVALOVAL | GOTGCTL_VBVALOVAL;
@@ -59,6 +69,9 @@ static int dwc2_ovr_bvalid(struct dwc2_hsotg *hsotg, bool valid)
(!valid && !(gotgctl & GOTGCTL_BSESVLD)))
return -EALREADY;
+ /* Always enable overrides to handle the resume case */
+ dwc2_ovr_gotgctl(gotgctl);
+
gotgctl &= ~GOTGCTL_AVALOVAL;
if (valid)
gotgctl |= GOTGCTL_BVALOVAL | GOTGCTL_VBVALOVAL;
@@ -105,6 +118,14 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role)
spin_lock_irqsave(&hsotg->lock, flags);
+ if (role == USB_ROLE_NONE) {
+ /* default operation mode when usb role is USB_ROLE_NONE */
+ if (hsotg->role_sw_default_mode == USB_DR_MODE_HOST)
+ role = USB_ROLE_HOST;
+ else if (hsotg->role_sw_default_mode == USB_DR_MODE_PERIPHERAL)
+ role = USB_ROLE_DEVICE;
+ }
+
if (role == USB_ROLE_HOST) {
already = dwc2_ovr_avalid(hsotg, true);
} else if (role == USB_ROLE_DEVICE) {
@@ -146,6 +167,7 @@ int dwc2_drd_init(struct dwc2_hsotg *hsotg)
if (!device_property_read_bool(hsotg->dev, "usb-role-switch"))
return 0;
+ hsotg->role_sw_default_mode = usb_get_role_switch_default_mode(hsotg->dev);
role_sw_desc.driver_data = hsotg;
role_sw_desc.fwnode = dev_fwnode(hsotg->dev);
role_sw_desc.set = dwc2_drd_role_sw_set;
@@ -183,6 +205,31 @@ void dwc2_drd_suspend(struct dwc2_hsotg *hsotg)
void dwc2_drd_resume(struct dwc2_hsotg *hsotg)
{
u32 gintsts, gintmsk;
+ enum usb_role role;
+
+ if (hsotg->role_sw) {
+ /* get last known role (as the get ops isn't implemented by this driver) */
+ role = usb_role_switch_get_role(hsotg->role_sw);
+
+ if (role == USB_ROLE_NONE) {
+ if (hsotg->role_sw_default_mode == USB_DR_MODE_HOST)
+ role = USB_ROLE_HOST;
+ else if (hsotg->role_sw_default_mode == USB_DR_MODE_PERIPHERAL)
+ role = USB_ROLE_DEVICE;
+ }
+
+ /* restore last role that may have been lost */
+ if (role == USB_ROLE_HOST)
+ dwc2_ovr_avalid(hsotg, true);
+ else if (role == USB_ROLE_DEVICE)
+ dwc2_ovr_bvalid(hsotg, true);
+
+ dwc2_force_mode(hsotg, role == USB_ROLE_HOST);
+
+ dev_dbg(hsotg->dev, "resuming %s-session valid\n",
+ role == USB_ROLE_NONE ? "No" :
+ role == USB_ROLE_HOST ? "A" : "B");
+ }
if (hsotg->role_sw && !hsotg->params.external_id_pin_ctl) {
gintsts = dwc2_readl(hsotg, GINTSTS);
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index ab8d7dad9f56..eee3504397e6 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -4974,7 +4974,18 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
hsotg->params.g_np_tx_fifo_size);
dev_dbg(dev, "RXFIFO size: %d\n", hsotg->params.g_rx_fifo_size);
- hsotg->gadget.max_speed = USB_SPEED_HIGH;
+ switch (hsotg->params.speed) {
+ case DWC2_SPEED_PARAM_LOW:
+ hsotg->gadget.max_speed = USB_SPEED_LOW;
+ break;
+ case DWC2_SPEED_PARAM_FULL:
+ hsotg->gadget.max_speed = USB_SPEED_FULL;
+ break;
+ default:
+ hsotg->gadget.max_speed = USB_SPEED_HIGH;
+ break;
+ }
+
hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
hsotg->gadget.name = dev_name(dev);
hsotg->gadget.otg_caps = &hsotg->params.otg_caps;
@@ -5086,7 +5097,7 @@ int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg)
hsotg->gadget.speed = USB_SPEED_UNKNOWN;
spin_unlock_irqrestore(&hsotg->lock, flags);
- for (ep = 0; ep < hsotg->num_of_eps; ep++) {
+ for (ep = 1; ep < hsotg->num_of_eps; ep++) {
if (hsotg->eps_in[ep])
dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
if (hsotg->eps_out[ep])
@@ -5217,7 +5228,7 @@ int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup)
* as result BNA interrupt asserted on hibernation exit
* by restoring from saved area.
*/
- if (hsotg->params.g_dma_desc &&
+ if (using_desc_dma(hsotg) &&
(dr->diepctl[i] & DXEPCTL_EPENA))
dr->diepdma[i] = hsotg->eps_in[i]->desc_list_dma;
dwc2_writel(hsotg, dr->dtxfsiz[i], DPTXFSIZN(i));
@@ -5229,7 +5240,7 @@ int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup)
* as result BNA interrupt asserted on hibernation exit
* by restoring from saved area.
*/
- if (hsotg->params.g_dma_desc &&
+ if (using_desc_dma(hsotg) &&
(dr->doepctl[i] & DXEPCTL_EPENA))
dr->doepdma[i] = hsotg->eps_out[i]->desc_list_dma;
dwc2_writel(hsotg, dr->doepdma[i], DOEPDMA(i));
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 13c779a28e94..f63a27d11fac 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -4399,11 +4399,12 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
* If not hibernation nor partial power down are supported,
* clock gating is used to save power.
*/
- if (!hsotg->params.no_clock_gating)
+ if (!hsotg->params.no_clock_gating) {
dwc2_host_enter_clock_gating(hsotg);
- /* After entering suspend, hardware is not accessible */
- clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ /* After entering suspend, hardware is not accessible */
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ }
break;
default:
goto skip_power_saving;
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index c331a5128c2c..c8ba87df7abe 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -222,20 +222,16 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
int i, ret;
hsotg->reset = devm_reset_control_get_optional(hsotg->dev, "dwc2");
- if (IS_ERR(hsotg->reset)) {
- ret = PTR_ERR(hsotg->reset);
- dev_err(hsotg->dev, "error getting reset control %d\n", ret);
- return ret;
- }
+ if (IS_ERR(hsotg->reset))
+ return dev_err_probe(hsotg->dev, PTR_ERR(hsotg->reset),
+ "error getting reset control\n");
reset_control_deassert(hsotg->reset);
hsotg->reset_ecc = devm_reset_control_get_optional(hsotg->dev, "dwc2-ecc");
- if (IS_ERR(hsotg->reset_ecc)) {
- ret = PTR_ERR(hsotg->reset_ecc);
- dev_err(hsotg->dev, "error getting reset control for ecc %d\n", ret);
- return ret;
- }
+ if (IS_ERR(hsotg->reset_ecc))
+ return dev_err_probe(hsotg->dev, PTR_ERR(hsotg->reset_ecc),
+ "error getting reset control for ecc\n");
reset_control_deassert(hsotg->reset_ecc);
@@ -251,11 +247,8 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
case -ENOSYS:
hsotg->phy = NULL;
break;
- case -EPROBE_DEFER:
- return ret;
default:
- dev_err(hsotg->dev, "error getting phy %d\n", ret);
- return ret;
+ return dev_err_probe(hsotg->dev, ret, "error getting phy\n");
}
}
@@ -268,12 +261,8 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
case -ENXIO:
hsotg->uphy = NULL;
break;
- case -EPROBE_DEFER:
- return ret;
default:
- dev_err(hsotg->dev, "error getting usb phy %d\n",
- ret);
- return ret;
+ return dev_err_probe(hsotg->dev, ret, "error getting usb phy\n");
}
}
}
@@ -282,10 +271,8 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
/* Clock */
hsotg->clk = devm_clk_get_optional(hsotg->dev, "otg");
- if (IS_ERR(hsotg->clk)) {
- dev_err(hsotg->dev, "cannot get otg clock\n");
- return PTR_ERR(hsotg->clk);
- }
+ if (IS_ERR(hsotg->clk))
+ return dev_err_probe(hsotg->dev, PTR_ERR(hsotg->clk), "cannot get otg clock\n");
/* Regulators */
for (i = 0; i < ARRAY_SIZE(hsotg->supplies); i++)
@@ -293,12 +280,9 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
ret = devm_regulator_bulk_get(hsotg->dev, ARRAY_SIZE(hsotg->supplies),
hsotg->supplies);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(hsotg->dev, "failed to request supplies: %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(hsotg->dev, ret, "failed to request supplies\n");
+
return 0;
}
@@ -558,16 +542,12 @@ static int dwc2_driver_probe(struct platform_device *dev)
hsotg->usb33d = devm_regulator_get(hsotg->dev, "usb33d");
if (IS_ERR(hsotg->usb33d)) {
retval = PTR_ERR(hsotg->usb33d);
- if (retval != -EPROBE_DEFER)
- dev_err(hsotg->dev,
- "failed to request usb33d supply: %d\n",
- retval);
+ dev_err_probe(hsotg->dev, retval, "failed to request usb33d supply\n");
goto error;
}
retval = regulator_enable(hsotg->usb33d);
if (retval) {
- dev_err(hsotg->dev,
- "failed to enable usb33d supply: %d\n", retval);
+ dev_err_probe(hsotg->dev, retval, "failed to enable usb33d supply\n");
goto error;
}
@@ -582,8 +562,7 @@ static int dwc2_driver_probe(struct platform_device *dev)
retval = dwc2_drd_init(hsotg);
if (retval) {
- if (retval != -EPROBE_DEFER)
- dev_err(hsotg->dev, "failed to initialize dual-role\n");
+ dev_err_probe(hsotg->dev, retval, "failed to initialize dual-role\n");
goto error_init;
}
@@ -751,10 +730,12 @@ static int __maybe_unused dwc2_resume(struct device *dev)
spin_unlock_irqrestore(&dwc2->lock, flags);
}
- /* Need to restore FORCEDEVMODE/FORCEHOSTMODE */
- dwc2_force_dr_mode(dwc2);
-
- dwc2_drd_resume(dwc2);
+ if (!dwc2->role_sw) {
+ /* Need to restore FORCEDEVMODE/FORCEHOSTMODE */
+ dwc2_force_dr_mode(dwc2);
+ } else {
+ dwc2_drd_resume(dwc2);
+ }
if (dwc2_is_device_mode(dwc2))
ret = dwc2_hsotg_resume(dwc2);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 5c491d0a19d7..e1cc3f7398fb 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -153,6 +153,7 @@
#define DWC3_DGCMDPAR 0xc710
#define DWC3_DGCMD 0xc714
#define DWC3_DALEPENA 0xc720
+#define DWC3_DCFG1 0xc740 /* DWC_usb32 only */
#define DWC3_DEP_BASE(n) (0xc800 + ((n) * 0x10))
#define DWC3_DEPCMDPAR2 0x00
@@ -382,6 +383,7 @@
/* Global HWPARAMS9 Register */
#define DWC3_GHWPARAMS9_DEV_TXF_FLUSH_BYPASS BIT(0)
+#define DWC3_GHWPARAMS9_DEV_MST BIT(1)
/* Global Frame Length Adjustment Register */
#define DWC3_GFLADJ_30MHZ_SDBND_SEL BIT(7)
@@ -558,6 +560,9 @@
/* The EP number goes 0..31 so ep0 is always out and ep1 is always in */
#define DWC3_DALEPENA_EP(n) BIT(n)
+/* DWC_usb32 DCFG1 config */
+#define DWC3_DCFG1_DIS_MST_ENH BIT(1)
+
#define DWC3_DEPCMD_TYPE_CONTROL 0
#define DWC3_DEPCMD_TYPE_ISOC 1
#define DWC3_DEPCMD_TYPE_BULK 2
@@ -888,6 +893,10 @@ struct dwc3_hwparams {
/* HWPARAMS7 */
#define DWC3_RAM1_DEPTH(n) ((n) & 0xffff)
+/* HWPARAMS9 */
+#define DWC3_MST_CAPABLE(p) (!!((p)->hwparams9 & \
+ DWC3_GHWPARAMS9_DEV_MST))
+
/**
* struct dwc3_request - representation of a transfer request
* @request: struct usb_request to be transferred
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
index d0f9b7c296b0..bd814df3bf8b 100644
--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -755,16 +755,16 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
ret = dwc3_meson_g12a_get_phys(priv);
if (ret)
- goto err_disable_clks;
+ goto err_rearm;
ret = priv->drvdata->setup_regmaps(priv, base);
if (ret)
- goto err_disable_clks;
+ goto err_rearm;
if (priv->vbus) {
ret = regulator_enable(priv->vbus);
if (ret)
- goto err_disable_clks;
+ goto err_rearm;
}
/* Get dr_mode */
@@ -825,6 +825,9 @@ err_disable_regulator:
if (priv->vbus)
regulator_disable(priv->vbus);
+err_rearm:
+ reset_control_rearm(priv->reset);
+
err_disable_clks:
clk_bulk_disable_unprepare(priv->drvdata->num_clks,
priv->drvdata->clks);
@@ -852,6 +855,8 @@ static int dwc3_meson_g12a_remove(struct platform_device *pdev)
pm_runtime_put_noidle(dev);
pm_runtime_set_suspended(dev);
+ reset_control_rearm(priv->reset);
+
clk_bulk_disable_unprepare(priv->drvdata->num_clks,
priv->drvdata->clks);
@@ -892,7 +897,7 @@ static int __maybe_unused dwc3_meson_g12a_suspend(struct device *dev)
phy_exit(priv->phys[i]);
}
- reset_control_assert(priv->reset);
+ reset_control_rearm(priv->reset);
return 0;
}
@@ -902,7 +907,9 @@ static int __maybe_unused dwc3_meson_g12a_resume(struct device *dev)
struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
int i, ret;
- reset_control_deassert(priv->reset);
+ ret = reset_control_reset(priv->reset);
+ if (ret)
+ return ret;
ret = priv->drvdata->usb_init(priv);
if (ret)
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index 3cb01cdd02c2..6cba990da32e 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -598,8 +598,10 @@ static int dwc3_qcom_acpi_register_core(struct platform_device *pdev)
qcom->dwc3->dev.coherent_dma_mask = dev->coherent_dma_mask;
child_res = kcalloc(2, sizeof(*child_res), GFP_KERNEL);
- if (!child_res)
+ if (!child_res) {
+ platform_device_put(qcom->dwc3);
return -ENOMEM;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -637,9 +639,13 @@ static int dwc3_qcom_acpi_register_core(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to add device\n");
device_remove_software_node(&qcom->dwc3->dev);
+ goto out;
}
+ kfree(child_res);
+ return 0;
out:
+ platform_device_put(qcom->dwc3);
kfree(child_res);
return ret;
}
@@ -769,9 +775,12 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
if (qcom->acpi_pdata->is_urs) {
qcom->urs_usb = dwc3_qcom_create_urs_usb_platdev(dev);
- if (!qcom->urs_usb) {
+ if (IS_ERR_OR_NULL(qcom->urs_usb)) {
dev_err(dev, "failed to create URS USB platdev\n");
- return -ENODEV;
+ if (!qcom->urs_usb)
+ return -ENODEV;
+ else
+ return PTR_ERR(qcom->urs_usb);
}
}
}
diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
index 9cc3ad701a29..e14ac15e24c3 100644
--- a/drivers/usb/dwc3/dwc3-xilinx.c
+++ b/drivers/usb/dwc3/dwc3-xilinx.c
@@ -102,14 +102,26 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
int ret;
u32 reg;
- usb3_phy = devm_phy_get(dev, "usb3-phy");
- if (PTR_ERR(usb3_phy) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
+ usb3_phy = devm_phy_optional_get(dev, "usb3-phy");
+ if (IS_ERR(usb3_phy)) {
+ ret = PTR_ERR(usb3_phy);
+ dev_err_probe(dev, ret,
+ "failed to get USB3 PHY\n");
goto err;
- } else if (IS_ERR(usb3_phy)) {
- usb3_phy = NULL;
}
+ /*
+ * The following core resets are not required unless a USB3 PHY
+ * is used, and the subsequent register settings are not required
+ * unless a core reset is performed (they should be set properly
+ * by the first-stage boot loader, but may be reverted by a core
+ * reset). They may also break the configuration if USB3 is actually
+ * in use but the usb3-phy entry is missing from the device tree.
+ * Therefore, skip these operations in this case.
+ */
+ if (!usb3_phy)
+ goto skip_usb3_phy;
+
crst = devm_reset_control_get_exclusive(dev, "usb_crst");
if (IS_ERR(crst)) {
ret = PTR_ERR(crst);
@@ -188,6 +200,7 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
goto err;
}
+skip_usb3_phy:
/*
* This routes the USB DMA traffic to go through FPD path instead
* of reaching DDR directly. This traffic routing is needed to
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 7e3db00e9759..520031ba38aa 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -331,9 +331,17 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
}
}
- dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
- dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
- dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
+ /*
+ * For some commands such as Update Transfer command, DEPCMDPARn
+ * registers are reserved. Since the driver often sends Update Transfer
+ * command, don't write to DEPCMDPARn to avoid register write delays and
+ * improve performance.
+ */
+ if (DWC3_DEPCMD_CMD(cmd) != DWC3_DEPCMD_UPDATETRANSFER) {
+ dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
+ dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
+ dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
+ }
/*
* Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're
@@ -357,6 +365,12 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
cmd |= DWC3_DEPCMD_CMDACT;
dwc3_writel(dep->regs, DWC3_DEPCMD, cmd);
+
+ if (!(cmd & DWC3_DEPCMD_CMDACT)) {
+ ret = 0;
+ goto skip_status;
+ }
+
do {
reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
if (!(reg & DWC3_DEPCMD_CMDACT)) {
@@ -398,6 +412,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
cmd_status = -ETIMEDOUT;
}
+skip_status:
trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
@@ -1260,12 +1275,17 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
}
+ /* All TRBs setup for MST must set CSP=1 when LST=0 */
+ if (dep->stream_capable && DWC3_MST_CAPABLE(&dwc->hwparams))
+ trb->ctrl |= DWC3_TRB_CTRL_CSP;
+
if ((!no_interrupt && !chain) || must_interrupt)
trb->ctrl |= DWC3_TRB_CTRL_IOC;
if (chain)
trb->ctrl |= DWC3_TRB_CTRL_CHN;
- else if (dep->stream_capable && is_last)
+ else if (dep->stream_capable && is_last &&
+ !DWC3_MST_CAPABLE(&dwc->hwparams))
trb->ctrl |= DWC3_TRB_CTRL_LST;
if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
@@ -1513,7 +1533,8 @@ static int dwc3_prepare_trbs(struct dwc3_ep *dep)
* burst capability may try to read and use TRBs beyond the
* active transfer instead of stopping.
*/
- if (dep->stream_capable && req->request.is_last)
+ if (dep->stream_capable && req->request.is_last &&
+ !DWC3_MST_CAPABLE(&dep->dwc->hwparams))
return ret;
}
@@ -1546,7 +1567,8 @@ static int dwc3_prepare_trbs(struct dwc3_ep *dep)
* burst capability may try to read and use TRBs beyond the
* active transfer instead of stopping.
*/
- if (dep->stream_capable && req->request.is_last)
+ if (dep->stream_capable && req->request.is_last &&
+ !DWC3_MST_CAPABLE(&dwc->hwparams))
return ret;
}
@@ -1623,7 +1645,8 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
return ret;
}
- if (dep->stream_capable && req->request.is_last)
+ if (dep->stream_capable && req->request.is_last &&
+ !DWC3_MST_CAPABLE(&dep->dwc->hwparams))
dep->flags |= DWC3_EP_WAIT_TRANSFER_COMPLETE;
return 0;
@@ -2638,6 +2661,13 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
reg |= DWC3_DCFG_IGNSTRMPP;
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+ /* Enable MST by default if the device is capable of MST */
+ if (DWC3_MST_CAPABLE(&dwc->hwparams)) {
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG1);
+ reg &= ~DWC3_DCFG1_DIS_MST_ENH;
+ dwc3_writel(dwc->regs, DWC3_DCFG1, reg);
+ }
+
/* Start with SuperSpeed Default */
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
@@ -3437,7 +3467,8 @@ static void dwc3_gadget_endpoint_stream_event(struct dwc3_ep *dep,
case DEPEVT_STREAM_NOSTREAM:
if ((dep->flags & DWC3_EP_IGNORE_NEXT_NOSTREAM) ||
!(dep->flags & DWC3_EP_FORCE_RESTART_STREAM) ||
- !(dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE))
+ (!DWC3_MST_CAPABLE(&dwc->hwparams) &&
+ !(dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE)))
break;
/*
@@ -4067,7 +4098,6 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
struct dwc3 *dwc = evt->dwc;
irqreturn_t ret = IRQ_NONE;
int left;
- u32 reg;
left = evt->count;
@@ -4099,9 +4129,8 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
ret = IRQ_HANDLED;
/* Unmask interrupt */
- reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
- reg &= ~DWC3_GEVNTSIZ_INTMASK;
- dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
+ DWC3_GEVNTSIZ_SIZE(evt->length));
if (dwc->imod_interval) {
dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
@@ -4130,7 +4159,6 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
struct dwc3 *dwc = evt->dwc;
u32 amount;
u32 count;
- u32 reg;
if (pm_runtime_suspended(dwc->dev)) {
pm_runtime_get(dwc->dev);
@@ -4157,9 +4185,8 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
evt->flags |= DWC3_EVENT_PENDING;
/* Mask interrupt */
- reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
- reg |= DWC3_GEVNTSIZ_INTMASK;
- dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
+ DWC3_GEVNTSIZ_INTMASK | DWC3_GEVNTSIZ_SIZE(evt->length));
amount = min(count, evt->length - evt->lpos);
memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount);
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index f29a264635aa..eda871973d6c 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -8,32 +8,55 @@
*/
#include <linux/acpi.h>
+#include <linux/irq.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include "core.h"
+static void dwc3_host_fill_xhci_irq_res(struct dwc3 *dwc,
+ int irq, char *name)
+{
+ struct platform_device *pdev = to_platform_device(dwc->dev);
+ struct device_node *np = dev_of_node(&pdev->dev);
+
+ dwc->xhci_resources[1].start = irq;
+ dwc->xhci_resources[1].end = irq;
+ dwc->xhci_resources[1].flags = IORESOURCE_IRQ | irq_get_trigger_type(irq);
+ if (!name && np)
+ dwc->xhci_resources[1].name = of_node_full_name(pdev->dev.of_node);
+ else
+ dwc->xhci_resources[1].name = name;
+}
+
static int dwc3_host_get_irq(struct dwc3 *dwc)
{
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
int irq;
irq = platform_get_irq_byname_optional(dwc3_pdev, "host");
- if (irq > 0)
+ if (irq > 0) {
+ dwc3_host_fill_xhci_irq_res(dwc, irq, "host");
goto out;
+ }
if (irq == -EPROBE_DEFER)
goto out;
irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
- if (irq > 0)
+ if (irq > 0) {
+ dwc3_host_fill_xhci_irq_res(dwc, irq, "dwc_usb3");
goto out;
+ }
if (irq == -EPROBE_DEFER)
goto out;
irq = platform_get_irq(dwc3_pdev, 0);
- if (irq > 0)
+ if (irq > 0) {
+ dwc3_host_fill_xhci_irq_res(dwc, irq, NULL);
goto out;
+ }
if (!irq)
irq = -EINVAL;
@@ -47,28 +70,12 @@ int dwc3_host_init(struct dwc3 *dwc)
struct property_entry props[4];
struct platform_device *xhci;
int ret, irq;
- struct resource *res;
- struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
int prop_idx = 0;
irq = dwc3_host_get_irq(dwc);
if (irq < 0)
return irq;
- res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ, "host");
- if (!res)
- res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ,
- "dwc_usb3");
- if (!res)
- res = platform_get_resource(dwc3_pdev, IORESOURCE_IRQ, 0);
- if (!res)
- return -ENOMEM;
-
- dwc->xhci_resources[1].start = irq;
- dwc->xhci_resources[1].end = irq;
- dwc->xhci_resources[1].flags = res->flags;
- dwc->xhci_resources[1].name = res->name;
-
xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO);
if (!xhci) {
dev_err(dwc->dev, "couldn't allocate xHCI device\n");
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 3789c329183c..16f9e3423c9f 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -159,6 +159,8 @@ int config_ep_by_speed_and_alt(struct usb_gadget *g,
int want_comp_desc = 0;
struct usb_descriptor_header **d_spd; /* cursor for speed desc */
+ struct usb_composite_dev *cdev;
+ bool incomplete_desc = false;
if (!g || !f || !_ep)
return -EIO;
@@ -167,28 +169,43 @@ int config_ep_by_speed_and_alt(struct usb_gadget *g,
switch (g->speed) {
case USB_SPEED_SUPER_PLUS:
if (gadget_is_superspeed_plus(g)) {
- speed_desc = f->ssp_descriptors;
- want_comp_desc = 1;
- break;
+ if (f->ssp_descriptors) {
+ speed_desc = f->ssp_descriptors;
+ want_comp_desc = 1;
+ break;
+ }
+ incomplete_desc = true;
}
fallthrough;
case USB_SPEED_SUPER:
if (gadget_is_superspeed(g)) {
- speed_desc = f->ss_descriptors;
- want_comp_desc = 1;
- break;
+ if (f->ss_descriptors) {
+ speed_desc = f->ss_descriptors;
+ want_comp_desc = 1;
+ break;
+ }
+ incomplete_desc = true;
}
fallthrough;
case USB_SPEED_HIGH:
if (gadget_is_dualspeed(g)) {
- speed_desc = f->hs_descriptors;
- break;
+ if (f->hs_descriptors) {
+ speed_desc = f->hs_descriptors;
+ break;
+ }
+ incomplete_desc = true;
}
fallthrough;
default:
speed_desc = f->fs_descriptors;
}
+ cdev = get_gadget_data(g);
+ if (incomplete_desc)
+ WARNING(cdev,
+ "%s doesn't hold the descriptors for current speed\n",
+ f->name);
+
/* find correct alternate setting descriptor */
for_each_desc(speed_desc, d_spd, USB_DT_INTERFACE) {
int_desc = (struct usb_interface_descriptor *)*d_spd;
@@ -244,12 +261,8 @@ ep_found:
_ep->maxburst = comp_desc->bMaxBurst + 1;
break;
default:
- if (comp_desc->bMaxBurst != 0) {
- struct usb_composite_dev *cdev;
-
- cdev = get_gadget_data(g);
+ if (comp_desc->bMaxBurst != 0)
ERROR(cdev, "ep0 bMaxBurst must be 0\n");
- }
_ep->maxburst = 1;
break;
}
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 36c611d1d8d0..d4a678c0806e 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -89,10 +89,6 @@ struct gadget_strings {
struct list_head list;
};
-struct os_desc {
- struct config_group group;
-};
-
struct gadget_config_name {
struct usb_gadget_strings stringtab_dev;
struct usb_string strings;
@@ -420,9 +416,8 @@ static int config_usb_cfg_link(
struct config_usb_cfg *cfg = to_config_usb_cfg(usb_cfg_ci);
struct gadget_info *gi = cfg_to_gadget_info(cfg);
- struct config_group *group = to_config_group(usb_func_ci);
- struct usb_function_instance *fi = container_of(group,
- struct usb_function_instance, group);
+ struct usb_function_instance *fi =
+ to_usb_function_instance(usb_func_ci);
struct usb_function_instance *a_fi;
struct usb_function *f;
int ret;
@@ -470,9 +465,8 @@ static void config_usb_cfg_unlink(
struct config_usb_cfg *cfg = to_config_usb_cfg(usb_cfg_ci);
struct gadget_info *gi = cfg_to_gadget_info(cfg);
- struct config_group *group = to_config_group(usb_func_ci);
- struct usb_function_instance *fi = container_of(group,
- struct usb_function_instance, group);
+ struct usb_function_instance *fi =
+ to_usb_function_instance(usb_func_ci);
struct usb_function *f;
/*
@@ -783,15 +777,11 @@ static void gadget_strings_attr_release(struct config_item *item)
USB_CONFIG_STRING_RW_OPS(gadget_strings);
USB_CONFIG_STRINGS_LANG(gadget_strings, gadget_info);
-static inline struct os_desc *to_os_desc(struct config_item *item)
-{
- return container_of(to_config_group(item), struct os_desc, group);
-}
-
static inline struct gadget_info *os_desc_item_to_gadget_info(
struct config_item *item)
{
- return to_gadget_info(to_os_desc(item)->group.cg_item.ci_parent);
+ return container_of(to_config_group(item),
+ struct gadget_info, os_desc_group);
}
static ssize_t os_desc_use_show(struct config_item *item, char *page)
@@ -886,21 +876,12 @@ static struct configfs_attribute *os_desc_attrs[] = {
NULL,
};
-static void os_desc_attr_release(struct config_item *item)
-{
- struct os_desc *os_desc = to_os_desc(item);
- kfree(os_desc);
-}
-
static int os_desc_link(struct config_item *os_desc_ci,
struct config_item *usb_cfg_ci)
{
- struct gadget_info *gi = container_of(to_config_group(os_desc_ci),
- struct gadget_info, os_desc_group);
+ struct gadget_info *gi = os_desc_item_to_gadget_info(os_desc_ci);
struct usb_composite_dev *cdev = &gi->cdev;
- struct config_usb_cfg *c_target =
- container_of(to_config_group(usb_cfg_ci),
- struct config_usb_cfg, group);
+ struct config_usb_cfg *c_target = to_config_usb_cfg(usb_cfg_ci);
struct usb_configuration *c;
int ret;
@@ -930,8 +911,7 @@ out:
static void os_desc_unlink(struct config_item *os_desc_ci,
struct config_item *usb_cfg_ci)
{
- struct gadget_info *gi = container_of(to_config_group(os_desc_ci),
- struct gadget_info, os_desc_group);
+ struct gadget_info *gi = os_desc_item_to_gadget_info(os_desc_ci);
struct usb_composite_dev *cdev = &gi->cdev;
mutex_lock(&gi->lock);
@@ -943,7 +923,6 @@ static void os_desc_unlink(struct config_item *os_desc_ci,
}
static struct configfs_item_operations os_desc_ops = {
- .release = os_desc_attr_release,
.allow_link = os_desc_link,
.drop_link = os_desc_unlink,
};
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index a7e069b18544..25ad1e97a458 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -614,7 +614,7 @@ static int ffs_ep0_open(struct inode *inode, struct file *file)
file->private_data = ffs;
ffs_data_opened(ffs);
- return 0;
+ return stream_open(inode, file);
}
static int ffs_ep0_release(struct inode *inode, struct file *file)
@@ -1154,7 +1154,7 @@ ffs_epfile_open(struct inode *inode, struct file *file)
file->private_data = epfile;
ffs_data_opened(epfile->ffs);
- return 0;
+ return stream_open(inode, file);
}
static int ffs_aio_cancel(struct kiocb *kiocb)
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 752439690fda..46dd11dcb3a8 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2547,7 +2547,7 @@ static int fsg_main_thread(void *common_)
up_write(&common->filesem);
/* Let fsg_unbind() know the thread has exited */
- complete_and_exit(&common->thread_notifier, 0);
+ kthread_complete_and_exit(&common->thread_notifier, 0);
}
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 71a1a26e85c7..fddf539008a9 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -1097,7 +1097,7 @@ static ssize_t f_midi_opts_##name##_show(struct config_item *item, char *page) \
int result; \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", opts->name); \
+ result = sprintf(page, "%u\n", opts->name); \
mutex_unlock(&opts->lock); \
\
return result; \
@@ -1134,7 +1134,51 @@ end: \
\
CONFIGFS_ATTR(f_midi_opts_, name);
-F_MIDI_OPT(index, true, SNDRV_CARDS);
+#define F_MIDI_OPT_SIGNED(name, test_limit, limit) \
+static ssize_t f_midi_opts_##name##_show(struct config_item *item, char *page) \
+{ \
+ struct f_midi_opts *opts = to_f_midi_opts(item); \
+ int result; \
+ \
+ mutex_lock(&opts->lock); \
+ result = sprintf(page, "%d\n", opts->name); \
+ mutex_unlock(&opts->lock); \
+ \
+ return result; \
+} \
+ \
+static ssize_t f_midi_opts_##name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct f_midi_opts *opts = to_f_midi_opts(item); \
+ int ret; \
+ s32 num; \
+ \
+ mutex_lock(&opts->lock); \
+ if (opts->refcnt > 1) { \
+ ret = -EBUSY; \
+ goto end; \
+ } \
+ \
+ ret = kstrtos32(page, 0, &num); \
+ if (ret) \
+ goto end; \
+ \
+ if (test_limit && num > limit) { \
+ ret = -EINVAL; \
+ goto end; \
+ } \
+ opts->name = num; \
+ ret = len; \
+ \
+end: \
+ mutex_unlock(&opts->lock); \
+ return ret; \
+} \
+ \
+CONFIGFS_ATTR(f_midi_opts_, name);
+
+F_MIDI_OPT_SIGNED(index, true, SNDRV_CARDS);
F_MIDI_OPT(buflen, false, 0);
F_MIDI_OPT(qlen, false, 0);
F_MIDI_OPT(in_ports, true, MAX_PORTS);
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 1abf08e5164a..6803cd60cc6d 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -584,6 +584,7 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
if (is_iso) {
switch (speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
size = ss->isoc_maxpacket *
(ss->isoc_mult + 1) *
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index 64de9f1b874c..431d5a7d737e 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -1117,7 +1117,7 @@ static int rndis_proc_show(struct seq_file *m, void *v)
static ssize_t rndis_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
- rndis_params *p = PDE_DATA(file_inode(file));
+ rndis_params *p = pde_data(file_inode(file));
u32 speed = 0;
int i, fl_speed = 0;
@@ -1161,7 +1161,7 @@ static ssize_t rndis_proc_write(struct file *file, const char __user *buffer,
static int rndis_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, rndis_proc_show, PDE_DATA(inode));
+ return single_open(file, rndis_proc_show, pde_data(inode));
}
static const struct proc_ops rndis_proc_ops = {
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index c46400be5464..4561d7a183ff 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -76,8 +76,8 @@ struct snd_uac_chip {
struct snd_pcm *pcm;
/* pre-calculated values for playback iso completion */
- unsigned long long p_interval_mil;
unsigned long long p_residue_mil;
+ unsigned int p_interval;
unsigned int p_framesize;
};
@@ -194,21 +194,24 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
* If there is a residue from this division, add it to the
* residue accumulator.
*/
+ unsigned long long p_interval_mil = uac->p_interval * 1000000ULL;
+
pitched_rate_mil = (unsigned long long)
params->p_srate * prm->pitch;
div_result = pitched_rate_mil;
- do_div(div_result, uac->p_interval_mil);
+ do_div(div_result, uac->p_interval);
+ do_div(div_result, 1000000);
frames = (unsigned int) div_result;
pr_debug("p_srate %d, pitch %d, interval_mil %llu, frames %d\n",
- params->p_srate, prm->pitch, uac->p_interval_mil, frames);
+ params->p_srate, prm->pitch, p_interval_mil, frames);
p_pktsize = min_t(unsigned int,
uac->p_framesize * frames,
ep->maxpacket);
if (p_pktsize < ep->maxpacket) {
- residue_frames_mil = pitched_rate_mil - frames * uac->p_interval_mil;
+ residue_frames_mil = pitched_rate_mil - frames * p_interval_mil;
p_pktsize_residue_mil = uac->p_framesize * residue_frames_mil;
} else
p_pktsize_residue_mil = 0;
@@ -222,11 +225,11 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
* size and decrease the accumulator.
*/
div_result = uac->p_residue_mil;
- do_div(div_result, uac->p_interval_mil);
+ do_div(div_result, uac->p_interval);
+ do_div(div_result, 1000000);
if ((unsigned int) div_result >= uac->p_framesize) {
req->length += uac->p_framesize;
- uac->p_residue_mil -= uac->p_framesize *
- uac->p_interval_mil;
+ uac->p_residue_mil -= uac->p_framesize * p_interval_mil;
pr_debug("increased req length to %d\n", req->length);
}
pr_debug("remains uac->p_residue_mil %llu\n", uac->p_residue_mil);
@@ -591,7 +594,7 @@ int u_audio_start_playback(struct g_audio *audio_dev)
unsigned int factor;
const struct usb_endpoint_descriptor *ep_desc;
int req_len, i;
- unsigned int p_interval, p_pktsize;
+ unsigned int p_pktsize;
ep = audio_dev->in_ep;
prm = &uac->p_prm;
@@ -612,11 +615,10 @@ int u_audio_start_playback(struct g_audio *audio_dev)
/* pre-compute some values for iso_complete() */
uac->p_framesize = params->p_ssize *
num_channels(params->p_chmask);
- p_interval = factor / (1 << (ep_desc->bInterval - 1));
- uac->p_interval_mil = (unsigned long long) p_interval * 1000000;
+ uac->p_interval = factor / (1 << (ep_desc->bInterval - 1));
p_pktsize = min_t(unsigned int,
uac->p_framesize *
- (params->p_srate / p_interval),
+ (params->p_srate / uac->p_interval),
ep->maxpacket);
req_len = p_pktsize;
@@ -1145,7 +1147,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
}
kctl->id.device = pcm->device;
- kctl->id.subdevice = i;
+ kctl->id.subdevice = 0;
err = snd_ctl_add(card, kctl);
if (err < 0)
@@ -1168,7 +1170,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
}
kctl->id.device = pcm->device;
- kctl->id.subdevice = i;
+ kctl->id.subdevice = 0;
kctl->tlv.c = u_audio_volume_tlv;
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 3b58f4fc0a80..51f9d96827b1 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1242,7 +1242,7 @@ out:
return mask;
}
-static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
+static long gadget_dev_ioctl (struct file *fd, unsigned code, unsigned long value)
{
struct dev_data *dev = fd->private_data;
struct usb_gadget *gadget = dev->gadget;
@@ -1826,8 +1826,9 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
spin_lock_irq (&dev->lock);
value = -EINVAL;
if (dev->buf) {
+ spin_unlock_irq(&dev->lock);
kfree(kbuf);
- goto fail;
+ return value;
}
dev->buf = kbuf;
@@ -1874,8 +1875,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
value = usb_gadget_probe_driver(&gadgetfs_driver);
if (value != 0) {
- kfree (dev->buf);
- dev->buf = NULL;
+ spin_lock_irq(&dev->lock);
+ goto fail;
} else {
/* at this point "good" hardware has for the first time
* let the USB the host see us. alternatively, if users
@@ -1892,6 +1893,9 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
return value;
fail:
+ dev->config = NULL;
+ dev->hs_config = NULL;
+ dev->dev = NULL;
spin_unlock_irq (&dev->lock);
pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev);
kfree (dev->buf);
@@ -1900,7 +1904,7 @@ fail:
}
static int
-dev_open (struct inode *inode, struct file *fd)
+gadget_dev_open (struct inode *inode, struct file *fd)
{
struct dev_data *dev = inode->i_private;
int value = -EBUSY;
@@ -1920,12 +1924,12 @@ dev_open (struct inode *inode, struct file *fd)
static const struct file_operations ep0_operations = {
.llseek = no_llseek,
- .open = dev_open,
+ .open = gadget_dev_open,
.read = ep0_read,
.write = dev_config,
.fasync = ep0_fasync,
.poll = ep0_poll,
- .unlocked_ioctl = dev_ioctl,
+ .unlocked_ioctl = gadget_dev_ioctl,
.release = dev_release,
};
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/dev.c b/drivers/usb/gadget/udc/aspeed-vhub/dev.c
index d918e8b2af3c..b0dfca43fbdc 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/dev.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/dev.c
@@ -110,15 +110,26 @@ static int ast_vhub_dev_feature(struct ast_vhub_dev *d,
u16 wIndex, u16 wValue,
bool is_set)
{
+ u32 val;
+
DDBG(d, "%s_FEATURE(dev val=%02x)\n",
is_set ? "SET" : "CLEAR", wValue);
- if (wValue != USB_DEVICE_REMOTE_WAKEUP)
- return std_req_driver;
+ if (wValue == USB_DEVICE_REMOTE_WAKEUP) {
+ d->wakeup_en = is_set;
+ return std_req_complete;
+ }
- d->wakeup_en = is_set;
+ if (wValue == USB_DEVICE_TEST_MODE) {
+ val = readl(d->vhub->regs + AST_VHUB_CTRL);
+ val &= ~GENMASK(10, 8);
+ val |= VHUB_CTRL_SET_TEST_MODE((wIndex >> 8) & 0x7);
+ writel(val, d->vhub->regs + AST_VHUB_CTRL);
- return std_req_complete;
+ return std_req_complete;
+ }
+
+ return std_req_driver;
}
static int ast_vhub_ep_feature(struct ast_vhub_dev *d,
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/ep0.c b/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
index 74ea36c19b1e..b4cf46249fea 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
@@ -251,6 +251,13 @@ static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req
len = remain;
rc = -EOVERFLOW;
}
+
+ /* Hardware return wrong data len */
+ if (len < ep->ep.maxpacket && len != remain) {
+ EPDBG(ep, "using expected data len instead\n");
+ len = remain;
+ }
+
if (len && req->req.buf)
memcpy(req->req.buf + req->req.actual, ep->buf, len);
req->req.actual += len;
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
index b9960fdd8a51..65cd4e46f031 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
@@ -68,6 +68,18 @@ static const struct usb_device_descriptor ast_vhub_dev_desc = {
.bNumConfigurations = 1,
};
+static const struct usb_qualifier_descriptor ast_vhub_qual_desc = {
+ .bLength = 0xA,
+ .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
+ .bcdUSB = cpu_to_le16(0x0200),
+ .bDeviceClass = USB_CLASS_HUB,
+ .bDeviceSubClass = 0,
+ .bDeviceProtocol = 0,
+ .bMaxPacketSize0 = 64,
+ .bNumConfigurations = 1,
+ .bRESERVED = 0,
+};
+
/*
* Configuration descriptor: same comments as above
* regarding handling USB1 mode.
@@ -200,17 +212,28 @@ static int ast_vhub_hub_dev_feature(struct ast_vhub_ep *ep,
u16 wIndex, u16 wValue,
bool is_set)
{
+ u32 val;
+
EPDBG(ep, "%s_FEATURE(dev val=%02x)\n",
is_set ? "SET" : "CLEAR", wValue);
- if (wValue != USB_DEVICE_REMOTE_WAKEUP)
- return std_req_stall;
+ if (wValue == USB_DEVICE_REMOTE_WAKEUP) {
+ ep->vhub->wakeup_en = is_set;
+ EPDBG(ep, "Hub remote wakeup %s\n",
+ is_set ? "enabled" : "disabled");
+ return std_req_complete;
+ }
- ep->vhub->wakeup_en = is_set;
- EPDBG(ep, "Hub remote wakeup %s\n",
- is_set ? "enabled" : "disabled");
+ if (wValue == USB_DEVICE_TEST_MODE) {
+ val = readl(ep->vhub->regs + AST_VHUB_CTRL);
+ val &= ~GENMASK(10, 8);
+ val |= VHUB_CTRL_SET_TEST_MODE((wIndex >> 8) & 0x7);
+ writel(val, ep->vhub->regs + AST_VHUB_CTRL);
- return std_req_complete;
+ return std_req_complete;
+ }
+
+ return std_req_stall;
}
static int ast_vhub_hub_ep_feature(struct ast_vhub_ep *ep,
@@ -271,9 +294,11 @@ static int ast_vhub_rep_desc(struct ast_vhub_ep *ep,
BUILD_BUG_ON(dsize > sizeof(vhub->vhub_dev_desc));
BUILD_BUG_ON(USB_DT_DEVICE_SIZE >= AST_VHUB_EP0_MAX_PACKET);
break;
+ case USB_DT_OTHER_SPEED_CONFIG:
case USB_DT_CONFIG:
dsize = AST_VHUB_CONF_DESC_SIZE;
memcpy(ep->buf, &vhub->vhub_conf_desc, dsize);
+ ((u8 *)ep->buf)[1] = desc_type;
BUILD_BUG_ON(dsize > sizeof(vhub->vhub_conf_desc));
BUILD_BUG_ON(AST_VHUB_CONF_DESC_SIZE >= AST_VHUB_EP0_MAX_PACKET);
break;
@@ -283,6 +308,10 @@ static int ast_vhub_rep_desc(struct ast_vhub_ep *ep,
BUILD_BUG_ON(dsize > sizeof(vhub->vhub_hub_desc));
BUILD_BUG_ON(AST_VHUB_HUB_DESC_SIZE >= AST_VHUB_EP0_MAX_PACKET);
break;
+ case USB_DT_DEVICE_QUALIFIER:
+ dsize = sizeof(vhub->vhub_qual_desc);
+ memcpy(ep->buf, &vhub->vhub_qual_desc, dsize);
+ break;
default:
return std_req_stall;
}
@@ -428,6 +457,8 @@ enum std_req_rc ast_vhub_std_hub_request(struct ast_vhub_ep *ep,
switch (wValue >> 8) {
case USB_DT_DEVICE:
case USB_DT_CONFIG:
+ case USB_DT_DEVICE_QUALIFIER:
+ case USB_DT_OTHER_SPEED_CONFIG:
return ast_vhub_rep_desc(ep, wValue >> 8,
wLength);
case USB_DT_STRING:
@@ -1033,6 +1064,10 @@ static int ast_vhub_init_desc(struct ast_vhub *vhub)
else
ret = ast_vhub_str_alloc_add(vhub, &ast_vhub_strings);
+ /* Initialize vhub Qualifier Descriptor. */
+ memcpy(&vhub->vhub_qual_desc, &ast_vhub_qual_desc,
+ sizeof(vhub->vhub_qual_desc));
+
return ret;
}
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
index 87a5dea12d3c..6b9dfa6e10eb 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
+++ b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
@@ -425,6 +425,7 @@ struct ast_vhub {
struct ast_vhub_full_cdesc vhub_conf_desc;
struct usb_hub_descriptor vhub_hub_desc;
struct list_head vhub_str_desc;
+ struct usb_qualifier_descriptor vhub_qual_desc;
};
/* Standard request handlers result codes */
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index d9ad9adf7348..9040a0561466 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -25,7 +25,7 @@
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_data/atmel.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
@@ -1510,7 +1510,6 @@ static irqreturn_t at91_udc_irq (int irq, void *_udc)
static void at91_vbus_update(struct at91_udc *udc, unsigned value)
{
- value ^= udc->board.vbus_active_low;
if (value != udc->vbus)
at91_vbus_session(&udc->gadget, value);
}
@@ -1521,7 +1520,7 @@ static irqreturn_t at91_vbus_irq(int irq, void *_udc)
/* vbus needs at least brief debouncing */
udelay(10);
- at91_vbus_update(udc, gpio_get_value(udc->board.vbus_pin));
+ at91_vbus_update(udc, gpiod_get_value(udc->board.vbus_pin));
return IRQ_HANDLED;
}
@@ -1531,7 +1530,7 @@ static void at91_vbus_timer_work(struct work_struct *work)
struct at91_udc *udc = container_of(work, struct at91_udc,
vbus_timer_work);
- at91_vbus_update(udc, gpio_get_value_cansleep(udc->board.vbus_pin));
+ at91_vbus_update(udc, gpiod_get_value_cansleep(udc->board.vbus_pin));
if (!timer_pending(&udc->vbus_timer))
mod_timer(&udc->vbus_timer, jiffies + VBUS_POLL_TIMEOUT);
@@ -1595,7 +1594,6 @@ static void at91udc_shutdown(struct platform_device *dev)
static int at91rm9200_udc_init(struct at91_udc *udc)
{
struct at91_ep *ep;
- int ret;
int i;
for (i = 0; i < NUM_ENDPOINTS; i++) {
@@ -1615,32 +1613,23 @@ static int at91rm9200_udc_init(struct at91_udc *udc)
}
}
- if (!gpio_is_valid(udc->board.pullup_pin)) {
+ if (!udc->board.pullup_pin) {
DBG("no D+ pullup?\n");
return -ENODEV;
}
- ret = devm_gpio_request(&udc->pdev->dev, udc->board.pullup_pin,
- "udc_pullup");
- if (ret) {
- DBG("D+ pullup is busy\n");
- return ret;
- }
-
- gpio_direction_output(udc->board.pullup_pin,
- udc->board.pullup_active_low);
+ gpiod_direction_output(udc->board.pullup_pin,
+ gpiod_is_active_low(udc->board.pullup_pin));
return 0;
}
static void at91rm9200_udc_pullup(struct at91_udc *udc, int is_on)
{
- int active = !udc->board.pullup_active_low;
-
if (is_on)
- gpio_set_value(udc->board.pullup_pin, active);
+ gpiod_set_value(udc->board.pullup_pin, 1);
else
- gpio_set_value(udc->board.pullup_pin, !active);
+ gpiod_set_value(udc->board.pullup_pin, 0);
}
static const struct at91_udc_caps at91rm9200_udc_caps = {
@@ -1783,20 +1772,20 @@ static void at91udc_of_init(struct at91_udc *udc, struct device_node *np)
{
struct at91_udc_data *board = &udc->board;
const struct of_device_id *match;
- enum of_gpio_flags flags;
u32 val;
if (of_property_read_u32(np, "atmel,vbus-polled", &val) == 0)
board->vbus_polled = 1;
- board->vbus_pin = of_get_named_gpio_flags(np, "atmel,vbus-gpio", 0,
- &flags);
- board->vbus_active_low = (flags & OF_GPIO_ACTIVE_LOW) ? 1 : 0;
+ board->vbus_pin = gpiod_get_from_of_node(np, "atmel,vbus-gpio", 0,
+ GPIOD_IN, "udc_vbus");
+ if (IS_ERR(board->vbus_pin))
+ board->vbus_pin = NULL;
- board->pullup_pin = of_get_named_gpio_flags(np, "atmel,pullup-gpio", 0,
- &flags);
-
- board->pullup_active_low = (flags & OF_GPIO_ACTIVE_LOW) ? 1 : 0;
+ board->pullup_pin = gpiod_get_from_of_node(np, "atmel,pullup-gpio", 0,
+ GPIOD_ASIS, "udc_pullup");
+ if (IS_ERR(board->pullup_pin))
+ board->pullup_pin = NULL;
match = of_match_node(at91_udc_dt_ids, np);
if (match)
@@ -1886,22 +1875,14 @@ static int at91udc_probe(struct platform_device *pdev)
goto err_unprepare_iclk;
}
- if (gpio_is_valid(udc->board.vbus_pin)) {
- retval = devm_gpio_request(dev, udc->board.vbus_pin,
- "udc_vbus");
- if (retval) {
- DBG("request vbus pin failed\n");
- goto err_unprepare_iclk;
- }
-
- gpio_direction_input(udc->board.vbus_pin);
+ if (udc->board.vbus_pin) {
+ gpiod_direction_input(udc->board.vbus_pin);
/*
* Get the initial state of VBUS - we cannot expect
* a pending interrupt.
*/
- udc->vbus = gpio_get_value_cansleep(udc->board.vbus_pin) ^
- udc->board.vbus_active_low;
+ udc->vbus = gpiod_get_value_cansleep(udc->board.vbus_pin);
if (udc->board.vbus_polled) {
INIT_WORK(&udc->vbus_timer_work, at91_vbus_timer_work);
@@ -1910,11 +1891,11 @@ static int at91udc_probe(struct platform_device *pdev)
jiffies + VBUS_POLL_TIMEOUT);
} else {
retval = devm_request_irq(dev,
- gpio_to_irq(udc->board.vbus_pin),
+ gpiod_to_irq(udc->board.vbus_pin),
at91_vbus_irq, 0, driver_name, udc);
if (retval) {
DBG("request vbus irq %d failed\n",
- udc->board.vbus_pin);
+ desc_to_gpio(udc->board.vbus_pin));
goto err_unprepare_iclk;
}
}
@@ -1988,8 +1969,8 @@ static int at91udc_suspend(struct platform_device *pdev, pm_message_t mesg)
enable_irq_wake(udc->udp_irq);
udc->active_suspend = wake;
- if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled && wake)
- enable_irq_wake(udc->board.vbus_pin);
+ if (udc->board.vbus_pin && !udc->board.vbus_polled && wake)
+ enable_irq_wake(gpiod_to_irq(udc->board.vbus_pin));
return 0;
}
@@ -1998,9 +1979,9 @@ static int at91udc_resume(struct platform_device *pdev)
struct at91_udc *udc = platform_get_drvdata(pdev);
unsigned long flags;
- if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled &&
+ if (udc->board.vbus_pin && !udc->board.vbus_polled &&
udc->active_suspend)
- disable_irq_wake(udc->board.vbus_pin);
+ disable_irq_wake(gpiod_to_irq(udc->board.vbus_pin));
/* maybe reconnect to host; if so, clocks on */
if (udc->active_suspend)
diff --git a/drivers/usb/gadget/udc/at91_udc.h b/drivers/usb/gadget/udc/at91_udc.h
index fd58c5b81826..28c1042f8623 100644
--- a/drivers/usb/gadget/udc/at91_udc.h
+++ b/drivers/usb/gadget/udc/at91_udc.h
@@ -109,11 +109,9 @@ struct at91_udc_caps {
};
struct at91_udc_data {
- int vbus_pin; /* high == host powering us */
- u8 vbus_active_low; /* vbus polarity */
- u8 vbus_polled; /* Use polling, not interrupt */
- int pullup_pin; /* active == D+ pulled up */
- u8 pullup_active_low; /* true == pullup_pin is active low */
+ struct gpio_desc *vbus_pin; /* high == host powering us */
+ u8 vbus_polled; /* Use polling, not interrupt */
+ struct gpio_desc *pullup_pin; /* active == D+ pulled up */
};
/*
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
index a9f07c59fc37..2cdb07905bde 100644
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -2321,8 +2321,10 @@ static int bcm63xx_udc_probe(struct platform_device *pdev)
/* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
irq = platform_get_irq(pdev, 0);
- if (irq < 0)
+ if (irq < 0) {
+ rc = irq;
goto out_uninit;
+ }
if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
dev_name(dev), udc) < 0)
goto report_request_failure;
@@ -2330,8 +2332,10 @@ static int bcm63xx_udc_probe(struct platform_device *pdev)
/* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
irq = platform_get_irq(pdev, i + 1);
- if (irq < 0)
+ if (irq < 0) {
+ rc = irq;
goto out_uninit;
+ }
if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
dev_name(dev), &udc->iudma[i]) < 0)
goto report_request_failure;
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index fa1a3908ec3b..9849e0c86e23 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -623,6 +623,7 @@ static int bdc_resume(struct device *dev)
ret = bdc_reinit(bdc);
if (ret) {
dev_err(bdc->dev, "err in bdc reinit\n");
+ clk_disable_unprepare(bdc->clk);
return ret;
}
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 7f24ce400b59..b6d34dda028b 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -2084,10 +2084,8 @@ static int mv_udc_remove(struct platform_device *pdev)
usb_del_gadget_udc(&udc->gadget);
- if (udc->qwork) {
- flush_workqueue(udc->qwork);
+ if (udc->qwork)
destroy_workqueue(udc->qwork);
- }
/* free memory allocated in probe */
dma_pool_destroy(udc->dtd_pool);
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index 52cdfd8212d6..b38747fd3bb0 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -2364,7 +2364,7 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return -ENODEV;
+ return irq;
dev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->regs))
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 857159dd5ae0..6ce886fb7bfe 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -2179,6 +2179,61 @@ static int xudc_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int xudc_suspend(struct device *dev)
+{
+ struct xusb_udc *udc;
+ u32 crtlreg;
+ unsigned long flags;
+
+ udc = dev_get_drvdata(dev);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ crtlreg = udc->read_fn(udc->addr + XUSB_CONTROL_OFFSET);
+ crtlreg &= ~XUSB_CONTROL_USB_READY_MASK;
+
+ udc->write_fn(udc->addr, XUSB_CONTROL_OFFSET, crtlreg);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ if (udc->driver && udc->driver->suspend)
+ udc->driver->suspend(&udc->gadget);
+
+ clk_disable(udc->clk);
+
+ return 0;
+}
+
+static int xudc_resume(struct device *dev)
+{
+ struct xusb_udc *udc;
+ u32 crtlreg;
+ unsigned long flags;
+ int ret;
+
+ udc = dev_get_drvdata(dev);
+
+ ret = clk_enable(udc->clk);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ crtlreg = udc->read_fn(udc->addr + XUSB_CONTROL_OFFSET);
+ crtlreg |= XUSB_CONTROL_USB_READY_MASK;
+
+ udc->write_fn(udc->addr, XUSB_CONTROL_OFFSET, crtlreg);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops xudc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xudc_suspend, xudc_resume)
+};
+
/* Match table for of_platform binding */
static const struct of_device_id usb_of_match[] = {
{ .compatible = "xlnx,usb2-device-4.00.a", },
@@ -2190,6 +2245,7 @@ static struct platform_driver xudc_driver = {
.driver = {
.name = driver_name,
.of_match_table = usb_of_match,
+ .pm = &xudc_pm_ops,
},
.probe = xudc_probe,
.remove = xudc_remove,
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index d1d926f8f9c2..57ca5f97a3dc 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -772,3 +772,14 @@ config USB_HCD_TEST_MODE
This option is of interest only to developers who need to validate
their USB hardware designs. It is not needed for normal use. If
unsure, say N.
+
+config USB_XEN_HCD
+ tristate "Xen usb virtual host driver"
+ depends on XEN
+ select XEN_XENBUS_FRONTEND
+ help
+ The Xen usb virtual host driver serves as a frontend driver enabling
+ a Xen guest system to access USB Devices passed through to the guest
+ by the Xen host (usually Dom0).
+ Only needed if the kernel is running in a Xen guest and generic
+ access to a USB device is needed.
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 171de4df50bd..2948983618fb 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -85,3 +85,4 @@ obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o
obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o
obj-$(CONFIG_USB_FOTG210_HCD) += fotg210-hcd.o
obj-$(CONFIG_USB_MAX3421_HCD) += max3421-hcd.o
+obj-$(CONFIG_USB_XEN_HCD) += xen-hcd.o
diff --git a/drivers/usb/host/ehci-brcm.c b/drivers/usb/host/ehci-brcm.c
index d3626bfa966b..6a0f64c9e5e8 100644
--- a/drivers/usb/host/ehci-brcm.c
+++ b/drivers/usb/host/ehci-brcm.c
@@ -62,8 +62,12 @@ static int ehci_brcm_hub_control(
u32 __iomem *status_reg;
unsigned long flags;
int retval, irq_disabled = 0;
+ u32 temp;
- status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];
+ temp = (wIndex & 0xff) - 1;
+ if (temp >= HCS_N_PORTS_MAX) /* Avoid index-out-of-bounds warning */
+ temp = 0;
+ status_reg = &ehci->regs->port_status[temp];
/*
* RESUME is cleared when GetPortStatus() is called 20ms after start
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index b590995a6b3e..7af17c8e069b 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -5576,14 +5576,9 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
pdev->dev.power.power_state = PMSG_ON;
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev, "Found HC with no IRQ. Check %s setup!\n",
- dev_name(dev));
- return -ENODEV;
- }
-
- irq = res->start;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
dev_name(dev));
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index ded9738392e4..45dcf8292072 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -306,7 +306,7 @@ static int ohci_hcd_omap_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- retval = -ENXIO;
+ retval = irq;
goto err3;
}
retval = usb_add_hcd(hcd, irq, 0);
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index 1bec9b585e2d..12264c048601 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -356,7 +356,7 @@ static int ohci_hcd_s3c2410_probe(struct platform_device *dev)
{
struct usb_hcd *hcd = NULL;
struct s3c2410_hcd_info *info = dev_get_platdata(&dev->dev);
- int retval;
+ int retval, irq;
s3c2410_usb_set_power(info, 1, 1);
s3c2410_usb_set_power(info, 2, 1);
@@ -388,9 +388,15 @@ static int ohci_hcd_s3c2410_probe(struct platform_device *dev)
goto err_put;
}
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
+ retval = irq;
+ goto err_put;
+ }
+
s3c2410_start_hc(dev, hcd);
- retval = usb_add_hcd(hcd, dev->resource[1].start, 0);
+ retval = usb_add_hcd(hcd, irq, 0);
if (retval != 0)
goto err_ioremap;
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
index b4cd9e6c72fd..9b81f420656d 100644
--- a/drivers/usb/host/ohci-spear.c
+++ b/drivers/usb/host/ohci-spear.c
@@ -76,7 +76,7 @@ static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
goto err_put_hcd;
}
- hcd->rsrc_start = pdev->resource[0].start;
+ hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
sohci_p = to_spear_ohci(hcd);
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index 3f3d62dc0674..49539b9f0e94 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -21,11 +21,6 @@
* usb-ohci-tc6393.c(C) Copyright 2004 Lineo Solutions, Inc.
*/
-/*#include <linux/fs.h>
-#include <linux/mount.h>
-#include <linux/pagemap.h>
-#include <linux/namei.h>
-#include <linux/sched.h>*/
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tmio.h>
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index ae882d76612b..d879d6af5710 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -3211,7 +3211,6 @@ static void __exit u132_hcd_exit(void)
platform_driver_unregister(&u132_platform_driver);
printk(KERN_INFO "u132-hcd driver deregistered\n");
wait_event(u132_hcd_wait, u132_instances == 0);
- flush_workqueue(workqueue);
destroy_workqueue(workqueue);
}
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index 70dbd95c3f06..b2049b47a08d 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -113,7 +113,8 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
num_ports);
}
if (of_device_is_compatible(np, "aspeed,ast2400-uhci") ||
- of_device_is_compatible(np, "aspeed,ast2500-uhci")) {
+ of_device_is_compatible(np, "aspeed,ast2500-uhci") ||
+ of_device_is_compatible(np, "aspeed,ast2600-uhci")) {
uhci->is_aspeed = 1;
dev_info(&pdev->dev,
"Enabled Aspeed implementation workarounds\n");
@@ -132,7 +133,11 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
goto err_rmr;
}
- ret = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_clk;
+
+ ret = usb_add_hcd(hcd, ret, IRQF_SHARED);
if (ret)
goto err_clk;
diff --git a/drivers/usb/host/xen-hcd.c b/drivers/usb/host/xen-hcd.c
new file mode 100644
index 000000000000..be09fd9bac58
--- /dev/null
+++ b/drivers/usb/host/xen-hcd.c
@@ -0,0 +1,1609 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * xen-hcd.c
+ *
+ * Xen USB Virtual Host Controller driver
+ *
+ * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
+ * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
+ */
+
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/list.h>
+#include <linux/usb/hcd.h>
+#include <linux/io.h>
+
+#include <xen/xen.h>
+#include <xen/xenbus.h>
+#include <xen/grant_table.h>
+#include <xen/events.h>
+#include <xen/page.h>
+
+#include <xen/interface/io/usbif.h>
+
+/* Private per-URB data */
+struct urb_priv {
+ struct list_head list;
+ struct urb *urb;
+ int req_id; /* RING_REQUEST id for submitting */
+ int unlink_req_id; /* RING_REQUEST id for unlinking */
+ int status;
+ bool unlinked; /* dequeued marker */
+};
+
+/* virtual roothub port status */
+struct rhport_status {
+ __u32 status;
+ bool resuming; /* in resuming */
+ bool c_connection; /* connection changed */
+ unsigned long timeout;
+};
+
+/* status of attached device */
+struct vdevice_status {
+ int devnum;
+ enum usb_device_state status;
+ enum usb_device_speed speed;
+};
+
+/* RING request shadow */
+struct usb_shadow {
+ struct xenusb_urb_request req;
+ struct urb *urb;
+};
+
+struct xenhcd_info {
+ /* Virtual Host Controller has 4 urb queues */
+ struct list_head pending_submit_list;
+ struct list_head pending_unlink_list;
+ struct list_head in_progress_list;
+ struct list_head giveback_waiting_list;
+
+ spinlock_t lock;
+
+ /* timer that kick pending and giveback waiting urbs */
+ struct timer_list watchdog;
+ unsigned long actions;
+
+ /* virtual root hub */
+ int rh_numports;
+ struct rhport_status ports[XENUSB_MAX_PORTNR];
+ struct vdevice_status devices[XENUSB_MAX_PORTNR];
+
+ /* Xen related staff */
+ struct xenbus_device *xbdev;
+ int urb_ring_ref;
+ int conn_ring_ref;
+ struct xenusb_urb_front_ring urb_ring;
+ struct xenusb_conn_front_ring conn_ring;
+
+ unsigned int evtchn;
+ unsigned int irq;
+ struct usb_shadow shadow[XENUSB_URB_RING_SIZE];
+ unsigned int shadow_free;
+
+ bool error;
+};
+
+#define GRANT_INVALID_REF 0
+
+#define XENHCD_RING_JIFFIES (HZ/200)
+#define XENHCD_SCAN_JIFFIES 1
+
+enum xenhcd_timer_action {
+ TIMER_RING_WATCHDOG,
+ TIMER_SCAN_PENDING_URBS,
+};
+
+static struct kmem_cache *xenhcd_urbp_cachep;
+
+static inline struct xenhcd_info *xenhcd_hcd_to_info(struct usb_hcd *hcd)
+{
+ return (struct xenhcd_info *)hcd->hcd_priv;
+}
+
+static inline struct usb_hcd *xenhcd_info_to_hcd(struct xenhcd_info *info)
+{
+ return container_of((void *)info, struct usb_hcd, hcd_priv);
+}
+
+static void xenhcd_set_error(struct xenhcd_info *info, const char *msg)
+{
+ info->error = true;
+
+ pr_alert("xen-hcd: protocol error: %s!\n", msg);
+}
+
+static inline void xenhcd_timer_action_done(struct xenhcd_info *info,
+ enum xenhcd_timer_action action)
+{
+ clear_bit(action, &info->actions);
+}
+
+static void xenhcd_timer_action(struct xenhcd_info *info,
+ enum xenhcd_timer_action action)
+{
+ if (timer_pending(&info->watchdog) &&
+ test_bit(TIMER_SCAN_PENDING_URBS, &info->actions))
+ return;
+
+ if (!test_and_set_bit(action, &info->actions)) {
+ unsigned long t;
+
+ switch (action) {
+ case TIMER_RING_WATCHDOG:
+ t = XENHCD_RING_JIFFIES;
+ break;
+ default:
+ t = XENHCD_SCAN_JIFFIES;
+ break;
+ }
+ mod_timer(&info->watchdog, t + jiffies);
+ }
+}
+
+/*
+ * set virtual port connection status
+ */
+static void xenhcd_set_connect_state(struct xenhcd_info *info, int portnum)
+{
+ int port;
+
+ port = portnum - 1;
+ if (info->ports[port].status & USB_PORT_STAT_POWER) {
+ switch (info->devices[port].speed) {
+ case XENUSB_SPEED_NONE:
+ info->ports[port].status &=
+ ~(USB_PORT_STAT_CONNECTION |
+ USB_PORT_STAT_ENABLE |
+ USB_PORT_STAT_LOW_SPEED |
+ USB_PORT_STAT_HIGH_SPEED |
+ USB_PORT_STAT_SUSPEND);
+ break;
+ case XENUSB_SPEED_LOW:
+ info->ports[port].status |= USB_PORT_STAT_CONNECTION;
+ info->ports[port].status |= USB_PORT_STAT_LOW_SPEED;
+ break;
+ case XENUSB_SPEED_FULL:
+ info->ports[port].status |= USB_PORT_STAT_CONNECTION;
+ break;
+ case XENUSB_SPEED_HIGH:
+ info->ports[port].status |= USB_PORT_STAT_CONNECTION;
+ info->ports[port].status |= USB_PORT_STAT_HIGH_SPEED;
+ break;
+ default: /* error */
+ return;
+ }
+ info->ports[port].status |= (USB_PORT_STAT_C_CONNECTION << 16);
+ }
+}
+
+/*
+ * set virtual device connection status
+ */
+static int xenhcd_rhport_connect(struct xenhcd_info *info, __u8 portnum,
+ __u8 speed)
+{
+ int port;
+
+ if (portnum < 1 || portnum > info->rh_numports)
+ return -EINVAL; /* invalid port number */
+
+ port = portnum - 1;
+ if (info->devices[port].speed != speed) {
+ switch (speed) {
+ case XENUSB_SPEED_NONE: /* disconnect */
+ info->devices[port].status = USB_STATE_NOTATTACHED;
+ break;
+ case XENUSB_SPEED_LOW:
+ case XENUSB_SPEED_FULL:
+ case XENUSB_SPEED_HIGH:
+ info->devices[port].status = USB_STATE_ATTACHED;
+ break;
+ default: /* error */
+ return -EINVAL;
+ }
+ info->devices[port].speed = speed;
+ info->ports[port].c_connection = true;
+
+ xenhcd_set_connect_state(info, portnum);
+ }
+
+ return 0;
+}
+
+/*
+ * SetPortFeature(PORT_SUSPENDED)
+ */
+static void xenhcd_rhport_suspend(struct xenhcd_info *info, int portnum)
+{
+ int port;
+
+ port = portnum - 1;
+ info->ports[port].status |= USB_PORT_STAT_SUSPEND;
+ info->devices[port].status = USB_STATE_SUSPENDED;
+}
+
+/*
+ * ClearPortFeature(PORT_SUSPENDED)
+ */
+static void xenhcd_rhport_resume(struct xenhcd_info *info, int portnum)
+{
+ int port;
+
+ port = portnum - 1;
+ if (info->ports[port].status & USB_PORT_STAT_SUSPEND) {
+ info->ports[port].resuming = true;
+ info->ports[port].timeout = jiffies + msecs_to_jiffies(20);
+ }
+}
+
+/*
+ * SetPortFeature(PORT_POWER)
+ */
+static void xenhcd_rhport_power_on(struct xenhcd_info *info, int portnum)
+{
+ int port;
+
+ port = portnum - 1;
+ if ((info->ports[port].status & USB_PORT_STAT_POWER) == 0) {
+ info->ports[port].status |= USB_PORT_STAT_POWER;
+ if (info->devices[port].status != USB_STATE_NOTATTACHED)
+ info->devices[port].status = USB_STATE_POWERED;
+ if (info->ports[port].c_connection)
+ xenhcd_set_connect_state(info, portnum);
+ }
+}
+
+/*
+ * ClearPortFeature(PORT_POWER)
+ * SetConfiguration(non-zero)
+ * Power_Source_Off
+ * Over-current
+ */
+static void xenhcd_rhport_power_off(struct xenhcd_info *info, int portnum)
+{
+ int port;
+
+ port = portnum - 1;
+ if (info->ports[port].status & USB_PORT_STAT_POWER) {
+ info->ports[port].status = 0;
+ if (info->devices[port].status != USB_STATE_NOTATTACHED)
+ info->devices[port].status = USB_STATE_ATTACHED;
+ }
+}
+
+/*
+ * ClearPortFeature(PORT_ENABLE)
+ */
+static void xenhcd_rhport_disable(struct xenhcd_info *info, int portnum)
+{
+ int port;
+
+ port = portnum - 1;
+ info->ports[port].status &= ~USB_PORT_STAT_ENABLE;
+ info->ports[port].status &= ~USB_PORT_STAT_SUSPEND;
+ info->ports[port].resuming = false;
+ if (info->devices[port].status != USB_STATE_NOTATTACHED)
+ info->devices[port].status = USB_STATE_POWERED;
+}
+
+/*
+ * SetPortFeature(PORT_RESET)
+ */
+static void xenhcd_rhport_reset(struct xenhcd_info *info, int portnum)
+{
+ int port;
+
+ port = portnum - 1;
+ info->ports[port].status &= ~(USB_PORT_STAT_ENABLE |
+ USB_PORT_STAT_LOW_SPEED |
+ USB_PORT_STAT_HIGH_SPEED);
+ info->ports[port].status |= USB_PORT_STAT_RESET;
+
+ if (info->devices[port].status != USB_STATE_NOTATTACHED)
+ info->devices[port].status = USB_STATE_ATTACHED;
+
+ /* 10msec reset signaling */
+ info->ports[port].timeout = jiffies + msecs_to_jiffies(10);
+}
+
+#ifdef CONFIG_PM
+static int xenhcd_bus_suspend(struct usb_hcd *hcd)
+{
+ struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
+ int ret = 0;
+ int i, ports;
+
+ ports = info->rh_numports;
+
+ spin_lock_irq(&info->lock);
+ if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ ret = -ESHUTDOWN;
+ } else {
+ /* suspend any active ports*/
+ for (i = 1; i <= ports; i++)
+ xenhcd_rhport_suspend(info, i);
+ }
+ spin_unlock_irq(&info->lock);
+
+ del_timer_sync(&info->watchdog);
+
+ return ret;
+}
+
+static int xenhcd_bus_resume(struct usb_hcd *hcd)
+{
+ struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
+ int ret = 0;
+ int i, ports;
+
+ ports = info->rh_numports;
+
+ spin_lock_irq(&info->lock);
+ if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ ret = -ESHUTDOWN;
+ } else {
+ /* resume any suspended ports*/
+ for (i = 1; i <= ports; i++)
+ xenhcd_rhport_resume(info, i);
+ }
+ spin_unlock_irq(&info->lock);
+
+ return ret;
+}
+#endif
+
+static void xenhcd_hub_descriptor(struct xenhcd_info *info,
+ struct usb_hub_descriptor *desc)
+{
+ __u16 temp;
+ int ports = info->rh_numports;
+
+ desc->bDescriptorType = 0x29;
+ desc->bPwrOn2PwrGood = 10; /* EHCI says 20ms max */
+ desc->bHubContrCurrent = 0;
+ desc->bNbrPorts = ports;
+
+ /* size of DeviceRemovable and PortPwrCtrlMask fields */
+ temp = 1 + (ports / 8);
+ desc->bDescLength = 7 + 2 * temp;
+
+ /* bitmaps for DeviceRemovable and PortPwrCtrlMask */
+ memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
+ memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
+
+ /* per-port over current reporting and no power switching */
+ temp = 0x000a;
+ desc->wHubCharacteristics = cpu_to_le16(temp);
+}
+
+/* port status change mask for hub_status_data */
+#define PORT_C_MASK ((USB_PORT_STAT_C_CONNECTION | \
+ USB_PORT_STAT_C_ENABLE | \
+ USB_PORT_STAT_C_SUSPEND | \
+ USB_PORT_STAT_C_OVERCURRENT | \
+ USB_PORT_STAT_C_RESET) << 16)
+
+/*
+ * See USB 2.0 Spec, 11.12.4 Hub and Port Status Change Bitmap.
+ * If port status changed, writes the bitmap to buf and return
+ * that length(number of bytes).
+ * If Nothing changed, return 0.
+ */
+static int xenhcd_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
+ int ports;
+ int i;
+ unsigned long flags;
+ int ret;
+ int changed = 0;
+
+ /* initialize the status to no-changes */
+ ports = info->rh_numports;
+ ret = 1 + (ports / 8);
+ memset(buf, 0, ret);
+
+ spin_lock_irqsave(&info->lock, flags);
+
+ for (i = 0; i < ports; i++) {
+ /* check status for each port */
+ if (info->ports[i].status & PORT_C_MASK) {
+ buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
+ changed = 1;
+ }
+ }
+
+ if ((hcd->state == HC_STATE_SUSPENDED) && (changed == 1))
+ usb_hcd_resume_root_hub(hcd);
+
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ return changed ? ret : 0;
+}
+
+static int xenhcd_hub_control(struct usb_hcd *hcd, __u16 typeReq, __u16 wValue,
+ __u16 wIndex, char *buf, __u16 wLength)
+{
+ struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
+ int ports = info->rh_numports;
+ unsigned long flags;
+ int ret = 0;
+ int i;
+ int changed = 0;
+
+ spin_lock_irqsave(&info->lock, flags);
+ switch (typeReq) {
+ case ClearHubFeature:
+ /* ignore this request */
+ break;
+ case ClearPortFeature:
+ if (!wIndex || wIndex > ports)
+ goto error;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ xenhcd_rhport_resume(info, wIndex);
+ break;
+ case USB_PORT_FEAT_POWER:
+ xenhcd_rhport_power_off(info, wIndex);
+ break;
+ case USB_PORT_FEAT_ENABLE:
+ xenhcd_rhport_disable(info, wIndex);
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ info->ports[wIndex - 1].c_connection = false;
+ fallthrough;
+ default:
+ info->ports[wIndex - 1].status &= ~(1 << wValue);
+ break;
+ }
+ break;
+ case GetHubDescriptor:
+ xenhcd_hub_descriptor(info, (struct usb_hub_descriptor *)buf);
+ break;
+ case GetHubStatus:
+ /* always local power supply good and no over-current exists. */
+ *(__le32 *)buf = cpu_to_le32(0);
+ break;
+ case GetPortStatus:
+ if (!wIndex || wIndex > ports)
+ goto error;
+
+ wIndex--;
+
+ /* resume completion */
+ if (info->ports[wIndex].resuming &&
+ time_after_eq(jiffies, info->ports[wIndex].timeout)) {
+ info->ports[wIndex].status |=
+ USB_PORT_STAT_C_SUSPEND << 16;
+ info->ports[wIndex].status &= ~USB_PORT_STAT_SUSPEND;
+ }
+
+ /* reset completion */
+ if ((info->ports[wIndex].status & USB_PORT_STAT_RESET) != 0 &&
+ time_after_eq(jiffies, info->ports[wIndex].timeout)) {
+ info->ports[wIndex].status |=
+ USB_PORT_STAT_C_RESET << 16;
+ info->ports[wIndex].status &= ~USB_PORT_STAT_RESET;
+
+ if (info->devices[wIndex].status !=
+ USB_STATE_NOTATTACHED) {
+ info->ports[wIndex].status |=
+ USB_PORT_STAT_ENABLE;
+ info->devices[wIndex].status =
+ USB_STATE_DEFAULT;
+ }
+
+ switch (info->devices[wIndex].speed) {
+ case XENUSB_SPEED_LOW:
+ info->ports[wIndex].status |=
+ USB_PORT_STAT_LOW_SPEED;
+ break;
+ case XENUSB_SPEED_HIGH:
+ info->ports[wIndex].status |=
+ USB_PORT_STAT_HIGH_SPEED;
+ break;
+ default:
+ break;
+ }
+ }
+
+ *(__le32 *)buf = cpu_to_le32(info->ports[wIndex].status);
+ break;
+ case SetPortFeature:
+ if (!wIndex || wIndex > ports)
+ goto error;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_POWER:
+ xenhcd_rhport_power_on(info, wIndex);
+ break;
+ case USB_PORT_FEAT_RESET:
+ xenhcd_rhport_reset(info, wIndex);
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ xenhcd_rhport_suspend(info, wIndex);
+ break;
+ default:
+ if (info->ports[wIndex-1].status & USB_PORT_STAT_POWER)
+ info->ports[wIndex-1].status |= (1 << wValue);
+ }
+ break;
+
+ case SetHubFeature:
+ /* not supported */
+ default:
+error:
+ ret = -EPIPE;
+ }
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ /* check status for each port */
+ for (i = 0; i < ports; i++) {
+ if (info->ports[i].status & PORT_C_MASK)
+ changed = 1;
+ }
+ if (changed)
+ usb_hcd_poll_rh_status(hcd);
+
+ return ret;
+}
+
+static void xenhcd_free_urb_priv(struct urb_priv *urbp)
+{
+ urbp->urb->hcpriv = NULL;
+ kmem_cache_free(xenhcd_urbp_cachep, urbp);
+}
+
+static inline unsigned int xenhcd_get_id_from_freelist(struct xenhcd_info *info)
+{
+ unsigned int free;
+
+ free = info->shadow_free;
+ info->shadow_free = info->shadow[free].req.id;
+ info->shadow[free].req.id = 0x0fff; /* debug */
+ return free;
+}
+
+static inline void xenhcd_add_id_to_freelist(struct xenhcd_info *info,
+ unsigned int id)
+{
+ info->shadow[id].req.id = info->shadow_free;
+ info->shadow[id].urb = NULL;
+ info->shadow_free = id;
+}
+
+static inline int xenhcd_count_pages(void *addr, int length)
+{
+ unsigned long vaddr = (unsigned long)addr;
+
+ return PFN_UP(vaddr + length) - PFN_DOWN(vaddr);
+}
+
+static void xenhcd_gnttab_map(struct xenhcd_info *info, void *addr, int length,
+ grant_ref_t *gref_head,
+ struct xenusb_request_segment *seg,
+ int nr_pages, int flags)
+{
+ grant_ref_t ref;
+ unsigned long buffer_mfn;
+ unsigned int offset;
+ unsigned int len = length;
+ unsigned int bytes;
+ int i;
+
+ for (i = 0; i < nr_pages; i++) {
+ buffer_mfn = PFN_DOWN(arbitrary_virt_to_machine(addr).maddr);
+ offset = offset_in_page(addr);
+
+ bytes = PAGE_SIZE - offset;
+ if (bytes > len)
+ bytes = len;
+
+ ref = gnttab_claim_grant_reference(gref_head);
+ gnttab_grant_foreign_access_ref(ref, info->xbdev->otherend_id,
+ buffer_mfn, flags);
+ seg[i].gref = ref;
+ seg[i].offset = (__u16)offset;
+ seg[i].length = (__u16)bytes;
+
+ addr += bytes;
+ len -= bytes;
+ }
+}
+
+static __u32 xenhcd_pipe_urb_to_xenusb(__u32 urb_pipe, __u8 port)
+{
+ static __u32 pipe;
+
+ pipe = usb_pipedevice(urb_pipe) << XENUSB_PIPE_DEV_SHIFT;
+ pipe |= usb_pipeendpoint(urb_pipe) << XENUSB_PIPE_EP_SHIFT;
+ if (usb_pipein(urb_pipe))
+ pipe |= XENUSB_PIPE_DIR;
+ switch (usb_pipetype(urb_pipe)) {
+ case PIPE_ISOCHRONOUS:
+ pipe |= XENUSB_PIPE_TYPE_ISOC << XENUSB_PIPE_TYPE_SHIFT;
+ break;
+ case PIPE_INTERRUPT:
+ pipe |= XENUSB_PIPE_TYPE_INT << XENUSB_PIPE_TYPE_SHIFT;
+ break;
+ case PIPE_CONTROL:
+ pipe |= XENUSB_PIPE_TYPE_CTRL << XENUSB_PIPE_TYPE_SHIFT;
+ break;
+ case PIPE_BULK:
+ pipe |= XENUSB_PIPE_TYPE_BULK << XENUSB_PIPE_TYPE_SHIFT;
+ break;
+ }
+ pipe = xenusb_setportnum_pipe(pipe, port);
+
+ return pipe;
+}
+
+static int xenhcd_map_urb_for_request(struct xenhcd_info *info, struct urb *urb,
+ struct xenusb_urb_request *req)
+{
+ grant_ref_t gref_head;
+ int nr_buff_pages = 0;
+ int nr_isodesc_pages = 0;
+ int nr_grants = 0;
+
+ if (urb->transfer_buffer_length) {
+ nr_buff_pages = xenhcd_count_pages(urb->transfer_buffer,
+ urb->transfer_buffer_length);
+
+ if (usb_pipeisoc(urb->pipe))
+ nr_isodesc_pages = xenhcd_count_pages(
+ &urb->iso_frame_desc[0],
+ sizeof(struct usb_iso_packet_descriptor) *
+ urb->number_of_packets);
+
+ nr_grants = nr_buff_pages + nr_isodesc_pages;
+ if (nr_grants > XENUSB_MAX_SEGMENTS_PER_REQUEST) {
+ pr_err("xenhcd: error: %d grants\n", nr_grants);
+ return -E2BIG;
+ }
+
+ if (gnttab_alloc_grant_references(nr_grants, &gref_head)) {
+ pr_err("xenhcd: gnttab_alloc_grant_references() error\n");
+ return -ENOMEM;
+ }
+
+ xenhcd_gnttab_map(info, urb->transfer_buffer,
+ urb->transfer_buffer_length, &gref_head,
+ &req->seg[0], nr_buff_pages,
+ usb_pipein(urb->pipe) ? 0 : GTF_readonly);
+ }
+
+ req->pipe = xenhcd_pipe_urb_to_xenusb(urb->pipe, urb->dev->portnum);
+ req->transfer_flags = 0;
+ if (urb->transfer_flags & URB_SHORT_NOT_OK)
+ req->transfer_flags |= XENUSB_SHORT_NOT_OK;
+ req->buffer_length = urb->transfer_buffer_length;
+ req->nr_buffer_segs = nr_buff_pages;
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_ISOCHRONOUS:
+ req->u.isoc.interval = urb->interval;
+ req->u.isoc.start_frame = urb->start_frame;
+ req->u.isoc.number_of_packets = urb->number_of_packets;
+ req->u.isoc.nr_frame_desc_segs = nr_isodesc_pages;
+
+ xenhcd_gnttab_map(info, &urb->iso_frame_desc[0],
+ sizeof(struct usb_iso_packet_descriptor) *
+ urb->number_of_packets,
+ &gref_head, &req->seg[nr_buff_pages],
+ nr_isodesc_pages, 0);
+ break;
+ case PIPE_INTERRUPT:
+ req->u.intr.interval = urb->interval;
+ break;
+ case PIPE_CONTROL:
+ if (urb->setup_packet)
+ memcpy(req->u.ctrl, urb->setup_packet, 8);
+ break;
+ case PIPE_BULK:
+ break;
+ default:
+ break;
+ }
+
+ if (nr_grants)
+ gnttab_free_grant_references(gref_head);
+
+ return 0;
+}
+
+static void xenhcd_gnttab_done(struct usb_shadow *shadow)
+{
+ int nr_segs = 0;
+ int i;
+
+ nr_segs = shadow->req.nr_buffer_segs;
+
+ if (xenusb_pipeisoc(shadow->req.pipe))
+ nr_segs += shadow->req.u.isoc.nr_frame_desc_segs;
+
+ for (i = 0; i < nr_segs; i++)
+ gnttab_end_foreign_access(shadow->req.seg[i].gref, 0, 0UL);
+
+ shadow->req.nr_buffer_segs = 0;
+ shadow->req.u.isoc.nr_frame_desc_segs = 0;
+}
+
+static int xenhcd_translate_status(int status)
+{
+ switch (status) {
+ case XENUSB_STATUS_OK:
+ return 0;
+ case XENUSB_STATUS_NODEV:
+ return -ENODEV;
+ case XENUSB_STATUS_INVAL:
+ return -EINVAL;
+ case XENUSB_STATUS_STALL:
+ return -EPIPE;
+ case XENUSB_STATUS_IOERROR:
+ return -EPROTO;
+ case XENUSB_STATUS_BABBLE:
+ return -EOVERFLOW;
+ default:
+ return -ESHUTDOWN;
+ }
+}
+
+static void xenhcd_giveback_urb(struct xenhcd_info *info, struct urb *urb,
+ int status)
+{
+ struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
+ int priv_status = urbp->status;
+
+ list_del_init(&urbp->list);
+ xenhcd_free_urb_priv(urbp);
+
+ if (urb->status == -EINPROGRESS)
+ urb->status = xenhcd_translate_status(status);
+
+ spin_unlock(&info->lock);
+ usb_hcd_giveback_urb(xenhcd_info_to_hcd(info), urb,
+ priv_status <= 0 ? priv_status : urb->status);
+ spin_lock(&info->lock);
+}
+
+static int xenhcd_do_request(struct xenhcd_info *info, struct urb_priv *urbp)
+{
+ struct xenusb_urb_request *req;
+ struct urb *urb = urbp->urb;
+ unsigned int id;
+ int notify;
+ int ret;
+
+ id = xenhcd_get_id_from_freelist(info);
+ req = &info->shadow[id].req;
+ req->id = id;
+
+ if (unlikely(urbp->unlinked)) {
+ req->u.unlink.unlink_id = urbp->req_id;
+ req->pipe = xenusb_setunlink_pipe(xenhcd_pipe_urb_to_xenusb(
+ urb->pipe, urb->dev->portnum));
+ urbp->unlink_req_id = id;
+ } else {
+ ret = xenhcd_map_urb_for_request(info, urb, req);
+ if (ret) {
+ xenhcd_add_id_to_freelist(info, id);
+ return ret;
+ }
+ urbp->req_id = id;
+ }
+
+ req = RING_GET_REQUEST(&info->urb_ring, info->urb_ring.req_prod_pvt);
+ *req = info->shadow[id].req;
+
+ info->urb_ring.req_prod_pvt++;
+ info->shadow[id].urb = urb;
+
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->urb_ring, notify);
+ if (notify)
+ notify_remote_via_irq(info->irq);
+
+ return 0;
+}
+
+static void xenhcd_kick_pending_urbs(struct xenhcd_info *info)
+{
+ struct urb_priv *urbp;
+
+ while (!list_empty(&info->pending_submit_list)) {
+ if (RING_FULL(&info->urb_ring)) {
+ xenhcd_timer_action(info, TIMER_RING_WATCHDOG);
+ return;
+ }
+
+ urbp = list_entry(info->pending_submit_list.next,
+ struct urb_priv, list);
+ if (!xenhcd_do_request(info, urbp))
+ list_move_tail(&urbp->list, &info->in_progress_list);
+ else
+ xenhcd_giveback_urb(info, urbp->urb, -ESHUTDOWN);
+ }
+ xenhcd_timer_action_done(info, TIMER_SCAN_PENDING_URBS);
+}
+
+/*
+ * caller must lock info->lock
+ */
+static void xenhcd_cancel_all_enqueued_urbs(struct xenhcd_info *info)
+{
+ struct urb_priv *urbp, *tmp;
+ int req_id;
+
+ list_for_each_entry_safe(urbp, tmp, &info->in_progress_list, list) {
+ req_id = urbp->req_id;
+ if (!urbp->unlinked) {
+ xenhcd_gnttab_done(&info->shadow[req_id]);
+ if (urbp->urb->status == -EINPROGRESS)
+ /* not dequeued */
+ xenhcd_giveback_urb(info, urbp->urb,
+ -ESHUTDOWN);
+ else /* dequeued */
+ xenhcd_giveback_urb(info, urbp->urb,
+ urbp->urb->status);
+ }
+ info->shadow[req_id].urb = NULL;
+ }
+
+ list_for_each_entry_safe(urbp, tmp, &info->pending_submit_list, list)
+ xenhcd_giveback_urb(info, urbp->urb, -ESHUTDOWN);
+}
+
+/*
+ * caller must lock info->lock
+ */
+static void xenhcd_giveback_unlinked_urbs(struct xenhcd_info *info)
+{
+ struct urb_priv *urbp, *tmp;
+
+ list_for_each_entry_safe(urbp, tmp, &info->giveback_waiting_list, list)
+ xenhcd_giveback_urb(info, urbp->urb, urbp->urb->status);
+}
+
+static int xenhcd_submit_urb(struct xenhcd_info *info, struct urb_priv *urbp)
+{
+ int ret;
+
+ if (RING_FULL(&info->urb_ring)) {
+ list_add_tail(&urbp->list, &info->pending_submit_list);
+ xenhcd_timer_action(info, TIMER_RING_WATCHDOG);
+ return 0;
+ }
+
+ if (!list_empty(&info->pending_submit_list)) {
+ list_add_tail(&urbp->list, &info->pending_submit_list);
+ xenhcd_timer_action(info, TIMER_SCAN_PENDING_URBS);
+ return 0;
+ }
+
+ ret = xenhcd_do_request(info, urbp);
+ if (ret == 0)
+ list_add_tail(&urbp->list, &info->in_progress_list);
+
+ return ret;
+}
+
+static int xenhcd_unlink_urb(struct xenhcd_info *info, struct urb_priv *urbp)
+{
+ int ret;
+
+ /* already unlinked? */
+ if (urbp->unlinked)
+ return -EBUSY;
+
+ urbp->unlinked = true;
+
+ /* the urb is still in pending_submit queue */
+ if (urbp->req_id == ~0) {
+ list_move_tail(&urbp->list, &info->giveback_waiting_list);
+ xenhcd_timer_action(info, TIMER_SCAN_PENDING_URBS);
+ return 0;
+ }
+
+ /* send unlink request to backend */
+ if (RING_FULL(&info->urb_ring)) {
+ list_move_tail(&urbp->list, &info->pending_unlink_list);
+ xenhcd_timer_action(info, TIMER_RING_WATCHDOG);
+ return 0;
+ }
+
+ if (!list_empty(&info->pending_unlink_list)) {
+ list_move_tail(&urbp->list, &info->pending_unlink_list);
+ xenhcd_timer_action(info, TIMER_SCAN_PENDING_URBS);
+ return 0;
+ }
+
+ ret = xenhcd_do_request(info, urbp);
+ if (ret == 0)
+ list_move_tail(&urbp->list, &info->in_progress_list);
+
+ return ret;
+}
+
+static int xenhcd_urb_request_done(struct xenhcd_info *info)
+{
+ struct xenusb_urb_response res;
+ struct urb *urb;
+ RING_IDX i, rp;
+ __u16 id;
+ int more_to_do = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock, flags);
+
+ rp = info->urb_ring.sring->rsp_prod;
+ if (RING_RESPONSE_PROD_OVERFLOW(&info->urb_ring, rp)) {
+ xenhcd_set_error(info, "Illegal index on urb-ring");
+ spin_unlock_irqrestore(&info->lock, flags);
+ return 0;
+ }
+ rmb(); /* ensure we see queued responses up to "rp" */
+
+ for (i = info->urb_ring.rsp_cons; i != rp; i++) {
+ RING_COPY_RESPONSE(&info->urb_ring, i, &res);
+ id = res.id;
+ if (id >= XENUSB_URB_RING_SIZE) {
+ xenhcd_set_error(info, "Illegal data on urb-ring");
+ continue;
+ }
+
+ if (likely(xenusb_pipesubmit(info->shadow[id].req.pipe))) {
+ xenhcd_gnttab_done(&info->shadow[id]);
+ urb = info->shadow[id].urb;
+ if (likely(urb)) {
+ urb->actual_length = res.actual_length;
+ urb->error_count = res.error_count;
+ urb->start_frame = res.start_frame;
+ xenhcd_giveback_urb(info, urb, res.status);
+ }
+ }
+
+ xenhcd_add_id_to_freelist(info, id);
+ }
+ info->urb_ring.rsp_cons = i;
+
+ if (i != info->urb_ring.req_prod_pvt)
+ RING_FINAL_CHECK_FOR_RESPONSES(&info->urb_ring, more_to_do);
+ else
+ info->urb_ring.sring->rsp_event = i + 1;
+
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ return more_to_do;
+}
+
+static int xenhcd_conn_notify(struct xenhcd_info *info)
+{
+ struct xenusb_conn_response res;
+ struct xenusb_conn_request *req;
+ RING_IDX rc, rp;
+ __u16 id;
+ __u8 portnum, speed;
+ int more_to_do = 0;
+ int notify;
+ int port_changed = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock, flags);
+
+ rc = info->conn_ring.rsp_cons;
+ rp = info->conn_ring.sring->rsp_prod;
+ if (RING_RESPONSE_PROD_OVERFLOW(&info->conn_ring, rp)) {
+ xenhcd_set_error(info, "Illegal index on conn-ring");
+ spin_unlock_irqrestore(&info->lock, flags);
+ return 0;
+ }
+ rmb(); /* ensure we see queued responses up to "rp" */
+
+ while (rc != rp) {
+ RING_COPY_RESPONSE(&info->conn_ring, rc, &res);
+ id = res.id;
+ portnum = res.portnum;
+ speed = res.speed;
+ info->conn_ring.rsp_cons = ++rc;
+
+ if (xenhcd_rhport_connect(info, portnum, speed)) {
+ xenhcd_set_error(info, "Illegal data on conn-ring");
+ spin_unlock_irqrestore(&info->lock, flags);
+ return 0;
+ }
+
+ if (info->ports[portnum - 1].c_connection)
+ port_changed = 1;
+
+ barrier();
+
+ req = RING_GET_REQUEST(&info->conn_ring,
+ info->conn_ring.req_prod_pvt);
+ req->id = id;
+ info->conn_ring.req_prod_pvt++;
+ }
+
+ if (rc != info->conn_ring.req_prod_pvt)
+ RING_FINAL_CHECK_FOR_RESPONSES(&info->conn_ring, more_to_do);
+ else
+ info->conn_ring.sring->rsp_event = rc + 1;
+
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->conn_ring, notify);
+ if (notify)
+ notify_remote_via_irq(info->irq);
+
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ if (port_changed)
+ usb_hcd_poll_rh_status(xenhcd_info_to_hcd(info));
+
+ return more_to_do;
+}
+
+static irqreturn_t xenhcd_int(int irq, void *dev_id)
+{
+ struct xenhcd_info *info = (struct xenhcd_info *)dev_id;
+
+ if (unlikely(info->error))
+ return IRQ_HANDLED;
+
+ while (xenhcd_urb_request_done(info) | xenhcd_conn_notify(info))
+ /* Yield point for this unbounded loop. */
+ cond_resched();
+
+ return IRQ_HANDLED;
+}
+
+static void xenhcd_destroy_rings(struct xenhcd_info *info)
+{
+ if (info->irq)
+ unbind_from_irqhandler(info->irq, info);
+ info->irq = 0;
+
+ if (info->urb_ring_ref != GRANT_INVALID_REF) {
+ gnttab_end_foreign_access(info->urb_ring_ref, 0,
+ (unsigned long)info->urb_ring.sring);
+ info->urb_ring_ref = GRANT_INVALID_REF;
+ }
+ info->urb_ring.sring = NULL;
+
+ if (info->conn_ring_ref != GRANT_INVALID_REF) {
+ gnttab_end_foreign_access(info->conn_ring_ref, 0,
+ (unsigned long)info->conn_ring.sring);
+ info->conn_ring_ref = GRANT_INVALID_REF;
+ }
+ info->conn_ring.sring = NULL;
+}
+
+static int xenhcd_setup_rings(struct xenbus_device *dev,
+ struct xenhcd_info *info)
+{
+ struct xenusb_urb_sring *urb_sring;
+ struct xenusb_conn_sring *conn_sring;
+ grant_ref_t gref;
+ int err;
+
+ info->urb_ring_ref = GRANT_INVALID_REF;
+ info->conn_ring_ref = GRANT_INVALID_REF;
+
+ urb_sring = (struct xenusb_urb_sring *)get_zeroed_page(
+ GFP_NOIO | __GFP_HIGH);
+ if (!urb_sring) {
+ xenbus_dev_fatal(dev, -ENOMEM, "allocating urb ring");
+ return -ENOMEM;
+ }
+ SHARED_RING_INIT(urb_sring);
+ FRONT_RING_INIT(&info->urb_ring, urb_sring, PAGE_SIZE);
+
+ err = xenbus_grant_ring(dev, urb_sring, 1, &gref);
+ if (err < 0) {
+ free_page((unsigned long)urb_sring);
+ info->urb_ring.sring = NULL;
+ goto fail;
+ }
+ info->urb_ring_ref = gref;
+
+ conn_sring = (struct xenusb_conn_sring *)get_zeroed_page(
+ GFP_NOIO | __GFP_HIGH);
+ if (!conn_sring) {
+ xenbus_dev_fatal(dev, -ENOMEM, "allocating conn ring");
+ err = -ENOMEM;
+ goto fail;
+ }
+ SHARED_RING_INIT(conn_sring);
+ FRONT_RING_INIT(&info->conn_ring, conn_sring, PAGE_SIZE);
+
+ err = xenbus_grant_ring(dev, conn_sring, 1, &gref);
+ if (err < 0) {
+ free_page((unsigned long)conn_sring);
+ info->conn_ring.sring = NULL;
+ goto fail;
+ }
+ info->conn_ring_ref = gref;
+
+ err = xenbus_alloc_evtchn(dev, &info->evtchn);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "xenbus_alloc_evtchn");
+ goto fail;
+ }
+
+ err = bind_evtchn_to_irq(info->evtchn);
+ if (err <= 0) {
+ xenbus_dev_fatal(dev, err, "bind_evtchn_to_irq");
+ goto fail;
+ }
+
+ info->irq = err;
+
+ err = request_threaded_irq(info->irq, NULL, xenhcd_int,
+ IRQF_ONESHOT, "xenhcd", info);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "request_threaded_irq");
+ goto free_irq;
+ }
+
+ return 0;
+
+free_irq:
+ unbind_from_irqhandler(info->irq, info);
+fail:
+ xenhcd_destroy_rings(info);
+ return err;
+}
+
+static int xenhcd_talk_to_backend(struct xenbus_device *dev,
+ struct xenhcd_info *info)
+{
+ const char *message;
+ struct xenbus_transaction xbt;
+ int err;
+
+ err = xenhcd_setup_rings(dev, info);
+ if (err)
+ return err;
+
+again:
+ err = xenbus_transaction_start(&xbt);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "starting transaction");
+ goto destroy_ring;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "urb-ring-ref", "%u",
+ info->urb_ring_ref);
+ if (err) {
+ message = "writing urb-ring-ref";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "conn-ring-ref", "%u",
+ info->conn_ring_ref);
+ if (err) {
+ message = "writing conn-ring-ref";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
+ info->evtchn);
+ if (err) {
+ message = "writing event-channel";
+ goto abort_transaction;
+ }
+
+ err = xenbus_transaction_end(xbt, 0);
+ if (err) {
+ if (err == -EAGAIN)
+ goto again;
+ xenbus_dev_fatal(dev, err, "completing transaction");
+ goto destroy_ring;
+ }
+
+ return 0;
+
+abort_transaction:
+ xenbus_transaction_end(xbt, 1);
+ xenbus_dev_fatal(dev, err, "%s", message);
+
+destroy_ring:
+ xenhcd_destroy_rings(info);
+
+ return err;
+}
+
+static int xenhcd_connect(struct xenbus_device *dev)
+{
+ struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
+ struct xenusb_conn_request *req;
+ int idx, err;
+ int notify;
+ char name[TASK_COMM_LEN];
+ struct usb_hcd *hcd;
+
+ hcd = xenhcd_info_to_hcd(info);
+ snprintf(name, TASK_COMM_LEN, "xenhcd.%d", hcd->self.busnum);
+
+ err = xenhcd_talk_to_backend(dev, info);
+ if (err)
+ return err;
+
+ /* prepare ring for hotplug notification */
+ for (idx = 0; idx < XENUSB_CONN_RING_SIZE; idx++) {
+ req = RING_GET_REQUEST(&info->conn_ring, idx);
+ req->id = idx;
+ }
+ info->conn_ring.req_prod_pvt = idx;
+
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->conn_ring, notify);
+ if (notify)
+ notify_remote_via_irq(info->irq);
+
+ return 0;
+}
+
+static void xenhcd_disconnect(struct xenbus_device *dev)
+{
+ struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
+ struct usb_hcd *hcd = xenhcd_info_to_hcd(info);
+
+ usb_remove_hcd(hcd);
+ xenbus_frontend_closed(dev);
+}
+
+static void xenhcd_watchdog(struct timer_list *timer)
+{
+ struct xenhcd_info *info = from_timer(info, timer, watchdog);
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock, flags);
+ if (likely(HC_IS_RUNNING(xenhcd_info_to_hcd(info)->state))) {
+ xenhcd_timer_action_done(info, TIMER_RING_WATCHDOG);
+ xenhcd_giveback_unlinked_urbs(info);
+ xenhcd_kick_pending_urbs(info);
+ }
+ spin_unlock_irqrestore(&info->lock, flags);
+}
+
+/*
+ * one-time HC init
+ */
+static int xenhcd_setup(struct usb_hcd *hcd)
+{
+ struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
+
+ spin_lock_init(&info->lock);
+ INIT_LIST_HEAD(&info->pending_submit_list);
+ INIT_LIST_HEAD(&info->pending_unlink_list);
+ INIT_LIST_HEAD(&info->in_progress_list);
+ INIT_LIST_HEAD(&info->giveback_waiting_list);
+ timer_setup(&info->watchdog, xenhcd_watchdog, 0);
+
+ hcd->has_tt = (hcd->driver->flags & HCD_MASK) != HCD_USB11;
+
+ return 0;
+}
+
+/*
+ * start HC running
+ */
+static int xenhcd_run(struct usb_hcd *hcd)
+{
+ hcd->uses_new_polling = 1;
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ hcd->state = HC_STATE_RUNNING;
+ return 0;
+}
+
+/*
+ * stop running HC
+ */
+static void xenhcd_stop(struct usb_hcd *hcd)
+{
+ struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
+
+ del_timer_sync(&info->watchdog);
+ spin_lock_irq(&info->lock);
+ /* cancel all urbs */
+ hcd->state = HC_STATE_HALT;
+ xenhcd_cancel_all_enqueued_urbs(info);
+ xenhcd_giveback_unlinked_urbs(info);
+ spin_unlock_irq(&info->lock);
+}
+
+/*
+ * called as .urb_enqueue()
+ * non-error returns are promise to giveback the urb later
+ */
+static int xenhcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
+ struct urb_priv *urbp;
+ unsigned long flags;
+ int ret;
+
+ if (unlikely(info->error))
+ return -ESHUTDOWN;
+
+ urbp = kmem_cache_zalloc(xenhcd_urbp_cachep, mem_flags);
+ if (!urbp)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&info->lock, flags);
+
+ urbp->urb = urb;
+ urb->hcpriv = urbp;
+ urbp->req_id = ~0;
+ urbp->unlink_req_id = ~0;
+ INIT_LIST_HEAD(&urbp->list);
+ urbp->status = 1;
+ urb->unlinked = false;
+
+ ret = xenhcd_submit_urb(info, urbp);
+
+ if (ret)
+ xenhcd_free_urb_priv(urbp);
+
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ return ret;
+}
+
+/*
+ * called as .urb_dequeue()
+ */
+static int xenhcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
+ struct urb_priv *urbp;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&info->lock, flags);
+
+ urbp = urb->hcpriv;
+ if (urbp) {
+ urbp->status = status;
+ ret = xenhcd_unlink_urb(info, urbp);
+ }
+
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ return ret;
+}
+
+/*
+ * called from usb_get_current_frame_number(),
+ * but, almost all drivers not use such function.
+ */
+static int xenhcd_get_frame(struct usb_hcd *hcd)
+{
+ /* it means error, but probably no problem :-) */
+ return 0;
+}
+
+static struct hc_driver xenhcd_usb20_hc_driver = {
+ .description = "xen-hcd",
+ .product_desc = "Xen USB2.0 Virtual Host Controller",
+ .hcd_priv_size = sizeof(struct xenhcd_info),
+ .flags = HCD_USB2,
+
+ /* basic HC lifecycle operations */
+ .reset = xenhcd_setup,
+ .start = xenhcd_run,
+ .stop = xenhcd_stop,
+
+ /* managing urb I/O */
+ .urb_enqueue = xenhcd_urb_enqueue,
+ .urb_dequeue = xenhcd_urb_dequeue,
+ .get_frame_number = xenhcd_get_frame,
+
+ /* root hub operations */
+ .hub_status_data = xenhcd_hub_status_data,
+ .hub_control = xenhcd_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = xenhcd_bus_suspend,
+ .bus_resume = xenhcd_bus_resume,
+#endif
+};
+
+static struct hc_driver xenhcd_usb11_hc_driver = {
+ .description = "xen-hcd",
+ .product_desc = "Xen USB1.1 Virtual Host Controller",
+ .hcd_priv_size = sizeof(struct xenhcd_info),
+ .flags = HCD_USB11,
+
+ /* basic HC lifecycle operations */
+ .reset = xenhcd_setup,
+ .start = xenhcd_run,
+ .stop = xenhcd_stop,
+
+ /* managing urb I/O */
+ .urb_enqueue = xenhcd_urb_enqueue,
+ .urb_dequeue = xenhcd_urb_dequeue,
+ .get_frame_number = xenhcd_get_frame,
+
+ /* root hub operations */
+ .hub_status_data = xenhcd_hub_status_data,
+ .hub_control = xenhcd_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = xenhcd_bus_suspend,
+ .bus_resume = xenhcd_bus_resume,
+#endif
+};
+
+static struct usb_hcd *xenhcd_create_hcd(struct xenbus_device *dev)
+{
+ int i;
+ int err = 0;
+ int num_ports;
+ int usb_ver;
+ struct usb_hcd *hcd = NULL;
+ struct xenhcd_info *info;
+
+ err = xenbus_scanf(XBT_NIL, dev->otherend, "num-ports", "%d",
+ &num_ports);
+ if (err != 1) {
+ xenbus_dev_fatal(dev, err, "reading num-ports");
+ return ERR_PTR(-EINVAL);
+ }
+ if (num_ports < 1 || num_ports > XENUSB_MAX_PORTNR) {
+ xenbus_dev_fatal(dev, err, "invalid num-ports");
+ return ERR_PTR(-EINVAL);
+ }
+
+ err = xenbus_scanf(XBT_NIL, dev->otherend, "usb-ver", "%d", &usb_ver);
+ if (err != 1) {
+ xenbus_dev_fatal(dev, err, "reading usb-ver");
+ return ERR_PTR(-EINVAL);
+ }
+ switch (usb_ver) {
+ case XENUSB_VER_USB11:
+ hcd = usb_create_hcd(&xenhcd_usb11_hc_driver, &dev->dev,
+ dev_name(&dev->dev));
+ break;
+ case XENUSB_VER_USB20:
+ hcd = usb_create_hcd(&xenhcd_usb20_hc_driver, &dev->dev,
+ dev_name(&dev->dev));
+ break;
+ default:
+ xenbus_dev_fatal(dev, err, "invalid usb-ver");
+ return ERR_PTR(-EINVAL);
+ }
+ if (!hcd) {
+ xenbus_dev_fatal(dev, err,
+ "fail to allocate USB host controller");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ info = xenhcd_hcd_to_info(hcd);
+ info->xbdev = dev;
+ info->rh_numports = num_ports;
+
+ for (i = 0; i < XENUSB_URB_RING_SIZE; i++) {
+ info->shadow[i].req.id = i + 1;
+ info->shadow[i].urb = NULL;
+ }
+ info->shadow[XENUSB_URB_RING_SIZE - 1].req.id = 0x0fff;
+
+ return hcd;
+}
+
+static void xenhcd_backend_changed(struct xenbus_device *dev,
+ enum xenbus_state backend_state)
+{
+ switch (backend_state) {
+ case XenbusStateInitialising:
+ case XenbusStateReconfiguring:
+ case XenbusStateReconfigured:
+ case XenbusStateUnknown:
+ break;
+
+ case XenbusStateInitWait:
+ case XenbusStateInitialised:
+ case XenbusStateConnected:
+ if (dev->state != XenbusStateInitialising)
+ break;
+ if (!xenhcd_connect(dev))
+ xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ fallthrough; /* Missed the backend's Closing state. */
+ case XenbusStateClosing:
+ xenhcd_disconnect(dev);
+ break;
+
+ default:
+ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
+ backend_state);
+ break;
+ }
+}
+
+static int xenhcd_remove(struct xenbus_device *dev)
+{
+ struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
+ struct usb_hcd *hcd = xenhcd_info_to_hcd(info);
+
+ xenhcd_destroy_rings(info);
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+static int xenhcd_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ int err;
+ struct usb_hcd *hcd;
+ struct xenhcd_info *info;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ hcd = xenhcd_create_hcd(dev);
+ if (IS_ERR(hcd)) {
+ err = PTR_ERR(hcd);
+ xenbus_dev_fatal(dev, err,
+ "fail to create usb host controller");
+ return err;
+ }
+
+ info = xenhcd_hcd_to_info(hcd);
+ dev_set_drvdata(&dev->dev, info);
+
+ err = usb_add_hcd(hcd, 0, 0);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "fail to add USB host controller");
+ usb_put_hcd(hcd);
+ dev_set_drvdata(&dev->dev, NULL);
+ }
+
+ return err;
+}
+
+static const struct xenbus_device_id xenhcd_ids[] = {
+ { "vusb" },
+ { "" },
+};
+
+static struct xenbus_driver xenhcd_driver = {
+ .ids = xenhcd_ids,
+ .probe = xenhcd_probe,
+ .otherend_changed = xenhcd_backend_changed,
+ .remove = xenhcd_remove,
+};
+
+static int __init xenhcd_init(void)
+{
+ if (!xen_domain())
+ return -ENODEV;
+
+ xenhcd_urbp_cachep = kmem_cache_create("xenhcd_urb_priv",
+ sizeof(struct urb_priv), 0, 0, NULL);
+ if (!xenhcd_urbp_cachep) {
+ pr_err("xenhcd failed to create kmem cache\n");
+ return -ENOMEM;
+ }
+
+ return xenbus_register_frontend(&xenhcd_driver);
+}
+module_init(xenhcd_init);
+
+static void __exit xenhcd_exit(void)
+{
+ kmem_cache_destroy(xenhcd_urbp_cachep);
+ xenbus_unregister_driver(&xenhcd_driver);
+}
+module_exit(xenhcd_exit);
+
+MODULE_ALIAS("xen:vusb");
+MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
+MODULE_DESCRIPTION("Xen USB Virtual Host Controller driver (xen-hcd)");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 58a0eae4f41b..91738af0ab14 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -245,11 +245,12 @@ static int xhci_mtk_host_disable(struct xhci_hcd_mtk *mtk)
/* wait for host ip to sleep */
ret = readl_poll_timeout(&ippc->ip_pw_sts1, value,
(value & STS1_IP_SLEEP_STS), 100, 100000);
- if (ret) {
+ if (ret)
dev_err(mtk->dev, "ip sleep failed!!!\n");
- return ret;
- }
- return 0;
+ else /* workaound for platforms using low level latch */
+ usleep_range(100, 200);
+
+ return ret;
}
static int xhci_mtk_ssusb_config(struct xhci_hcd_mtk *mtk)
@@ -300,7 +301,7 @@ static void usb_wakeup_ip_sleep_set(struct xhci_hcd_mtk *mtk, bool enable)
case SSUSB_UWK_V1_1:
reg = mtk->uwk_reg_base + PERI_WK_CTRL0;
msk = WC0_IS_EN | WC0_IS_C(0xf) | WC0_IS_P;
- val = enable ? (WC0_IS_EN | WC0_IS_C(0x8)) : 0;
+ val = enable ? (WC0_IS_EN | WC0_IS_C(0x1)) : 0;
break;
case SSUSB_UWK_V1_2:
reg = mtk->uwk_reg_base + PERI_WK_CTRL0;
@@ -437,11 +438,8 @@ static int xhci_mtk_setup(struct usb_hcd *hcd)
if (ret)
return ret;
- if (usb_hcd_is_primary_hcd(hcd)) {
+ if (usb_hcd_is_primary_hcd(hcd))
ret = xhci_mtk_sch_init(mtk);
- if (ret)
- return ret;
- }
return ret;
}
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index c1edcc9b13ce..dc570ce4e831 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -437,6 +437,9 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int ret;
+ if (pm_runtime_suspended(dev))
+ pm_runtime_resume(dev);
+
ret = xhci_priv_suspend_quirk(hcd);
if (ret)
return ret;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index f5b1bcc875de..dc357cabb265 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4998,10 +4998,8 @@ static int calculate_max_exit_latency(struct usb_device *udev,
enabling_u2)
u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
- if (u1_mel_us > u2_mel_us)
- mel_us = u1_mel_us;
- else
- mel_us = u2_mel_us;
+ mel_us = max(u1_mel_us, u2_mel_us);
+
/* xHCI host controller max exit latency field is only 16 bits wide. */
if (mel_us > MAX_EXIT) {
dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
diff --git a/drivers/usb/isp1760/isp1760-if.c b/drivers/usb/isp1760/isp1760-if.c
index 7cc349c0b2ad..65ba5aca2a4f 100644
--- a/drivers/usb/isp1760/isp1760-if.c
+++ b/drivers/usb/isp1760/isp1760-if.c
@@ -13,6 +13,7 @@
#include <linux/usb.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -191,17 +192,15 @@ static int isp1760_plat_probe(struct platform_device *pdev)
unsigned long irqflags;
unsigned int devflags = 0;
struct resource *mem_res;
- struct resource *irq_res;
+ int irq;
int ret;
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq_res) {
- pr_warn("isp1760: IRQ resource not available\n");
- return -ENODEV;
- }
- irqflags = irq_res->flags & IRQF_TRIGGER_MASK;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ irqflags = irq_get_trigger_type(irq);
if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
struct device_node *dp = pdev->dev.of_node;
@@ -239,8 +238,7 @@ static int isp1760_plat_probe(struct platform_device *pdev)
return -ENXIO;
}
- ret = isp1760_register(mem_res, irq_res->start, irqflags, &pdev->dev,
- devflags);
+ ret = isp1760_register(mem_res, irq, irqflags, &pdev->dev, devflags);
if (ret < 0)
return ret;
diff --git a/drivers/usb/misc/ehset.c b/drivers/usb/misc/ehset.c
index f87890f9cd26..986d6589f053 100644
--- a/drivers/usb/misc/ehset.c
+++ b/drivers/usb/misc/ehset.c
@@ -18,6 +18,52 @@
#define TEST_SINGLE_STEP_GET_DEV_DESC 0x0107
#define TEST_SINGLE_STEP_SET_FEATURE 0x0108
+extern const struct usb_device_id *usb_device_match_id(struct usb_device *udev,
+ const struct usb_device_id *id);
+
+/*
+ * A list of USB hubs which requires to disable the power
+ * to the port before starting the testing procedures.
+ */
+static const struct usb_device_id ehset_hub_list[] = {
+ { USB_DEVICE(0x0424, 0x4502) },
+ { USB_DEVICE(0x0424, 0x4913) },
+ { USB_DEVICE(0x0451, 0x8027) },
+ { }
+};
+
+static int ehset_prepare_port_for_testing(struct usb_device *hub_udev, u16 portnum)
+{
+ int ret = 0;
+
+ /*
+ * The USB2.0 spec chapter 11.24.2.13 says that the USB port which is
+ * going under test needs to be put in suspend before sending the
+ * test command. Most hubs don't enforce this precondition, but there
+ * are some hubs which needs to disable the power to the port before
+ * starting the test.
+ */
+ if (usb_device_match_id(hub_udev, ehset_hub_list)) {
+ ret = usb_control_msg_send(hub_udev, 0, USB_REQ_CLEAR_FEATURE,
+ USB_RT_PORT, USB_PORT_FEAT_ENABLE,
+ portnum, NULL, 0, 1000, GFP_KERNEL);
+ /*
+ * Wait for the port to be disabled. It's an arbitrary value
+ * which worked every time.
+ */
+ msleep(100);
+ } else {
+ /*
+ * For the hubs which are compliant with the spec,
+ * put the port in SUSPEND.
+ */
+ ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
+ USB_RT_PORT, USB_PORT_FEAT_SUSPEND,
+ portnum, NULL, 0, 1000, GFP_KERNEL);
+ }
+ return ret;
+}
+
static int ehset_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -30,24 +76,36 @@ static int ehset_probe(struct usb_interface *intf,
switch (test_pid) {
case TEST_SE0_NAK_PID:
+ ret = ehset_prepare_port_for_testing(hub_udev, portnum);
+ if (!ret)
+ break;
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_TEST,
(USB_TEST_SE0_NAK << 8) | portnum,
NULL, 0, 1000, GFP_KERNEL);
break;
case TEST_J_PID:
+ ret = ehset_prepare_port_for_testing(hub_udev, portnum);
+ if (!ret)
+ break;
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_TEST,
(USB_TEST_J << 8) | portnum, NULL, 0,
1000, GFP_KERNEL);
break;
case TEST_K_PID:
+ ret = ehset_prepare_port_for_testing(hub_udev, portnum);
+ if (!ret)
+ break;
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_TEST,
(USB_TEST_K << 8) | portnum, NULL, 0,
1000, GFP_KERNEL);
break;
case TEST_PACKET_PID:
+ ret = ehset_prepare_port_for_testing(hub_udev, portnum);
+ if (!ret)
+ break;
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_TEST,
(USB_TEST_PACKET << 8) | portnum,
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index e5a8fcdbb78e..6c38c62d29b2 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -202,6 +202,7 @@ static void ftdi_elan_delete(struct kref *kref)
mutex_unlock(&ftdi_module_lock);
kfree(ftdi->bulk_in_buffer);
ftdi->bulk_in_buffer = NULL;
+ kfree(ftdi);
}
static void ftdi_elan_put_kref(struct usb_ftdi *ftdi)
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
index 660641ab1545..bf2c0fa6cb32 100644
--- a/drivers/usb/musb/am35x.c
+++ b/drivers/usb/musb/am35x.c
@@ -500,6 +500,8 @@ static int am35x_probe(struct platform_device *pdev)
pinfo.num_res = pdev->num_resources;
pinfo.data = pdata;
pinfo.size_data = sizeof(*pdata);
+ pinfo.fwnode = of_fwnode_handle(pdev->dev.of_node);
+ pinfo.of_node_reused = true;
glue->musb = musb = platform_device_register_full(&pinfo);
if (IS_ERR(musb)) {
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 1c023c0091c4..fd4ae2dd24e5 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -505,7 +505,6 @@ static struct of_dev_auxdata da8xx_auxdata_lookup[] = {
static int da8xx_probe(struct platform_device *pdev)
{
- struct resource musb_resources[2];
struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct da8xx_glue *glue;
struct platform_device_info pinfo;
@@ -558,25 +557,14 @@ static int da8xx_probe(struct platform_device *pdev)
if (ret)
return ret;
- memset(musb_resources, 0x00, sizeof(*musb_resources) *
- ARRAY_SIZE(musb_resources));
-
- musb_resources[0].name = pdev->resource[0].name;
- musb_resources[0].start = pdev->resource[0].start;
- musb_resources[0].end = pdev->resource[0].end;
- musb_resources[0].flags = pdev->resource[0].flags;
-
- musb_resources[1].name = pdev->resource[1].name;
- musb_resources[1].start = pdev->resource[1].start;
- musb_resources[1].end = pdev->resource[1].end;
- musb_resources[1].flags = pdev->resource[1].flags;
-
pinfo = da8xx_dev_info;
pinfo.parent = &pdev->dev;
- pinfo.res = musb_resources;
- pinfo.num_res = ARRAY_SIZE(musb_resources);
+ pinfo.res = pdev->resource;
+ pinfo.num_res = pdev->num_resources;
pinfo.data = pdata;
pinfo.size_data = sizeof(*pdata);
+ pinfo.fwnode = of_fwnode_handle(np);
+ pinfo.of_node_reused = true;
glue->musb = platform_device_register_full(&pinfo);
ret = PTR_ERR_OR_ZERO(glue->musb);
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index 5b7d576bf6ee..417c30bff9ca 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -231,6 +231,7 @@ static int jz4740_probe(struct platform_device *pdev)
musb->dev.parent = dev;
musb->dev.dma_mask = &musb->dev.coherent_dma_mask;
musb->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ device_set_of_node_from_dev(&musb->dev, dev);
glue->pdev = musb;
glue->clk = clk;
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
index f5d97eb84cb5..1aeb34dbe24f 100644
--- a/drivers/usb/musb/mediatek.c
+++ b/drivers/usb/musb/mediatek.c
@@ -538,6 +538,8 @@ static int mtk_musb_probe(struct platform_device *pdev)
pinfo.num_res = pdev->num_resources;
pinfo.data = pdata;
pinfo.size_data = sizeof(*pdata);
+ pinfo.fwnode = of_fwnode_handle(np);
+ pinfo.of_node_reused = true;
glue->musb_pdev = platform_device_register_full(&pinfo);
if (IS_ERR(glue->musb_pdev)) {
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index b5935834f9d2..f75cde0f2b43 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -15,6 +15,7 @@
*/
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
@@ -739,12 +740,14 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue,
}
resources[0] = *res;
- res = platform_get_resource_byname(parent, IORESOURCE_IRQ, "mc");
- if (!res) {
- dev_err(dev, "failed to get irq.\n");
- return -EINVAL;
- }
- resources[1] = *res;
+ ret = platform_get_irq_byname(parent, "mc");
+ if (ret < 0)
+ return ret;
+
+ resources[1].start = ret;
+ resources[1].end = ret;
+ resources[1].flags = IORESOURCE_IRQ | irq_get_trigger_type(ret);
+ resources[1].name = "mc";
/* allocate the child platform device */
musb = platform_device_alloc("musb-hdrc",
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index f086960fe2b5..7d4d0713f4f0 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -301,7 +301,6 @@ static u64 omap2430_dmamask = DMA_BIT_MASK(32);
static int omap2430_probe(struct platform_device *pdev)
{
- struct resource musb_resources[3];
struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct omap_musb_board_data *data;
struct platform_device *musb;
@@ -328,6 +327,7 @@ static int omap2430_probe(struct platform_device *pdev)
musb->dev.parent = &pdev->dev;
musb->dev.dma_mask = &omap2430_dmamask;
musb->dev.coherent_dma_mask = omap2430_dmamask;
+ device_set_of_node_from_dev(&musb->dev, &pdev->dev);
glue->dev = &pdev->dev;
glue->musb = musb;
@@ -383,26 +383,7 @@ static int omap2430_probe(struct platform_device *pdev)
INIT_WORK(&glue->omap_musb_mailbox_work, omap_musb_mailbox_work);
- memset(musb_resources, 0x00, sizeof(*musb_resources) *
- ARRAY_SIZE(musb_resources));
-
- musb_resources[0].name = pdev->resource[0].name;
- musb_resources[0].start = pdev->resource[0].start;
- musb_resources[0].end = pdev->resource[0].end;
- musb_resources[0].flags = pdev->resource[0].flags;
-
- musb_resources[1].name = pdev->resource[1].name;
- musb_resources[1].start = pdev->resource[1].start;
- musb_resources[1].end = pdev->resource[1].end;
- musb_resources[1].flags = pdev->resource[1].flags;
-
- musb_resources[2].name = pdev->resource[2].name;
- musb_resources[2].start = pdev->resource[2].start;
- musb_resources[2].end = pdev->resource[2].end;
- musb_resources[2].flags = pdev->resource[2].flags;
-
- ret = platform_device_add_resources(musb, musb_resources,
- ARRAY_SIZE(musb_resources));
+ ret = platform_device_add_resources(musb, pdev->resource, pdev->num_resources);
if (ret) {
dev_err(&pdev->dev, "failed to add resources\n");
goto err2;
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index 73538d1d0524..8ea62c344328 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -216,7 +216,6 @@ ux500_of_probe(struct platform_device *pdev, struct device_node *np)
static int ux500_probe(struct platform_device *pdev)
{
- struct resource musb_resources[2];
struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *np = pdev->dev.of_node;
struct platform_device *musb;
@@ -263,6 +262,7 @@ static int ux500_probe(struct platform_device *pdev)
musb->dev.parent = &pdev->dev;
musb->dev.dma_mask = &pdev->dev.coherent_dma_mask;
musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
+ device_set_of_node_from_dev(&musb->dev, &pdev->dev);
glue->dev = &pdev->dev;
glue->musb = musb;
@@ -273,21 +273,7 @@ static int ux500_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, glue);
- memset(musb_resources, 0x00, sizeof(*musb_resources) *
- ARRAY_SIZE(musb_resources));
-
- musb_resources[0].name = pdev->resource[0].name;
- musb_resources[0].start = pdev->resource[0].start;
- musb_resources[0].end = pdev->resource[0].end;
- musb_resources[0].flags = pdev->resource[0].flags;
-
- musb_resources[1].name = pdev->resource[1].name;
- musb_resources[1].start = pdev->resource[1].start;
- musb_resources[1].end = pdev->resource[1].end;
- musb_resources[1].flags = pdev->resource[1].flags;
-
- ret = platform_device_add_resources(musb, musb_resources,
- ARRAY_SIZE(musb_resources));
+ ret = platform_device_add_resources(musb, pdev->resource, pdev->num_resources);
if (ret) {
dev_err(&pdev->dev, "failed to add resources\n");
goto err2;
diff --git a/drivers/usb/phy/phy-mv-usb.c b/drivers/usb/phy/phy-mv-usb.c
index 576d925af77c..86503b7d695c 100644
--- a/drivers/usb/phy/phy-mv-usb.c
+++ b/drivers/usb/phy/phy-mv-usb.c
@@ -648,10 +648,8 @@ static int mv_otg_remove(struct platform_device *pdev)
{
struct mv_otg *mvotg = platform_get_drvdata(pdev);
- if (mvotg->qwork) {
- flush_workqueue(mvotg->qwork);
+ if (mvotg->qwork)
destroy_workqueue(mvotg->qwork);
- }
mv_otg_disable(mvotg);
@@ -825,7 +823,6 @@ static int mv_otg_probe(struct platform_device *pdev)
err_disable_clk:
mv_otg_disable_internal(mvotg);
err_destroy_workqueue:
- flush_workqueue(mvotg->qwork);
destroy_workqueue(mvotg->qwork);
return retval;
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 3af91b2b8f76..96f3939a65e2 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -589,11 +589,11 @@ static int usbhs_probe(struct platform_device *pdev)
{
const struct renesas_usbhs_platform_info *info;
struct usbhs_priv *priv;
- struct resource *irq_res;
struct device *dev = &pdev->dev;
struct gpio_desc *gpiod;
int ret;
u32 tmp;
+ int irq;
/* check device node */
if (dev_of_node(dev))
@@ -608,11 +608,9 @@ static int usbhs_probe(struct platform_device *pdev)
}
/* platform data */
- irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq_res) {
- dev_err(dev, "Not enough Renesas USB platform resources.\n");
- return -ENODEV;
- }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
/* usb private data */
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -669,9 +667,7 @@ static int usbhs_probe(struct platform_device *pdev)
/*
* priv settings
*/
- priv->irq = irq_res->start;
- if (irq_res->flags & IORESOURCE_IRQ_SHAREABLE)
- priv->irqflags = IRQF_SHARED;
+ priv->irq = irq;
priv->pdev = pdev;
INIT_DELAYED_WORK(&priv->notify_hotplug_work, usbhsc_notify_hotplug);
spin_lock_init(usbhs_priv_to_lock(priv));
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index eb34d762a63d..3fb5bc94dc0d 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -252,7 +252,6 @@ struct usbhs_priv {
void __iomem *base;
unsigned int irq;
- unsigned long irqflags;
const struct renesas_usbhs_platform_callback *pfunc;
struct renesas_usbhs_driver_param dparam;
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index b98112cefaa4..f2ea3e1412d2 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -142,7 +142,7 @@ int usbhs_mod_probe(struct usbhs_priv *priv)
/* irq settings */
ret = devm_request_irq(dev, priv->irq, usbhs_interrupt,
- priv->irqflags, dev_name(dev), priv);
+ 0, dev_name(dev), priv);
if (ret) {
dev_err(dev, "irq request err\n");
goto mod_init_gadget_err;
@@ -219,18 +219,6 @@ static int usbhs_status_get_each_irq(struct usbhs_priv *priv,
usbhs_unlock(priv, flags);
/******************** spin unlock ******************/
- /*
- * Check whether the irq enable registers and the irq status are set
- * when IRQF_SHARED is set.
- */
- if (priv->irqflags & IRQF_SHARED) {
- if (!(intenb0 & state->intsts0) &&
- !(intenb1 & state->intsts1) &&
- !(state->bempsts) &&
- !(state->brdysts))
- return -EIO;
- }
-
return 0;
}
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c
index b9f78ef3edc3..0774ba22fb66 100644
--- a/drivers/usb/storage/sierra_ms.c
+++ b/drivers/usb/storage/sierra_ms.c
@@ -130,8 +130,6 @@ int sierra_ms_init(struct us_data *us)
struct swoc_info *swocInfo;
struct usb_device *udev;
- retries = 3;
- result = 0;
udev = us->pusb_dev;
/* Force Modem mode */
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 4c5a0a49035f..1928b3918242 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -551,7 +551,7 @@ static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
/* Did this command access the last sector? */
sector = (srb->cmnd[2] << 24) | (srb->cmnd[3] << 16) |
(srb->cmnd[4] << 8) | (srb->cmnd[5]);
- disk = scsi_cmd_to_rq(srb)->rq_disk;
+ disk = scsi_cmd_to_rq(srb)->q->disk;
if (!disk)
goto done;
sdkp = scsi_disk(disk);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 29191d33c0e3..1a05e3dcfec8 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2301,6 +2301,16 @@ UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
US_FL_SCM_MULT_TARG ),
+/*
+ * Reported by DocMAX <mail@vacharakis.de>
+ * and Thomas Weißschuh <linux@weissschuh.net>
+ */
+UNUSUAL_DEV( 0x2109, 0x0715, 0x9999, 0x9999,
+ "VIA Labs, Inc.",
+ "VL817 SATA Bridge",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
"ST",
"2A",
diff --git a/drivers/usb/typec/Makefile b/drivers/usb/typec/Makefile
index a0adb8947a30..57870a2bd787 100644
--- a/drivers/usb/typec/Makefile
+++ b/drivers/usb/typec/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TYPEC) += typec.o
-typec-y := class.o mux.o bus.o port-mapper.o
+typec-y := class.o mux.o bus.o
+typec-$(CONFIG_ACPI) += port-mapper.o
obj-$(CONFIG_TYPEC) += altmodes/
obj-$(CONFIG_TYPEC_TCPM) += tcpm/
obj-$(CONFIG_TYPEC_UCSI) += ucsi/
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index aeef453aa658..45a6f0c807cb 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -2039,8 +2039,6 @@ struct typec_port *typec_register_port(struct device *parent,
ida_init(&port->mode_ids);
mutex_init(&port->port_type_lock);
- mutex_init(&port->port_list_lock);
- INIT_LIST_HEAD(&port->port_list);
port->id = id;
port->ops = cap->ops;
diff --git a/drivers/usb/typec/class.h b/drivers/usb/typec/class.h
index aef03eb7e152..0f1bd6d19d67 100644
--- a/drivers/usb/typec/class.h
+++ b/drivers/usb/typec/class.h
@@ -54,11 +54,6 @@ struct typec_port {
const struct typec_capability *cap;
const struct typec_operations *ops;
-
- struct list_head port_list;
- struct mutex port_list_lock; /* Port list lock */
-
- void *pld;
};
#define to_typec_port(_dev_) container_of(_dev_, struct typec_port, dev)
@@ -79,7 +74,12 @@ extern const struct device_type typec_port_dev_type;
extern struct class typec_mux_class;
extern struct class typec_class;
+#if defined(CONFIG_ACPI)
int typec_link_ports(struct typec_port *connector);
void typec_unlink_ports(struct typec_port *connector);
+#else
+static inline int typec_link_ports(struct typec_port *connector) { return 0; }
+static inline void typec_unlink_ports(struct typec_port *connector) { }
+#endif
#endif /* __USB_TYPEC_CLASS__ */
diff --git a/drivers/usb/typec/port-mapper.c b/drivers/usb/typec/port-mapper.c
index 9b0991bdf391..a7d507802509 100644
--- a/drivers/usb/typec/port-mapper.c
+++ b/drivers/usb/typec/port-mapper.c
@@ -7,273 +7,78 @@
*/
#include <linux/acpi.h>
-#include <linux/usb.h>
-#include <linux/usb/typec.h>
+#include <linux/component.h>
#include "class.h"
-struct port_node {
- struct list_head list;
- struct device *dev;
- void *pld;
-};
-
-static int acpi_pld_match(const struct acpi_pld_info *pld1,
- const struct acpi_pld_info *pld2)
-{
- if (!pld1 || !pld2)
- return 0;
-
- /*
- * To speed things up, first checking only the group_position. It seems
- * to often have the first unique value in the _PLD.
- */
- if (pld1->group_position == pld2->group_position)
- return !memcmp(pld1, pld2, sizeof(struct acpi_pld_info));
-
- return 0;
-}
-
-static void *get_pld(struct device *dev)
-{
-#ifdef CONFIG_ACPI
- struct acpi_pld_info *pld;
- acpi_status status;
-
- if (!has_acpi_companion(dev))
- return NULL;
-
- status = acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld);
- if (ACPI_FAILURE(status))
- return NULL;
-
- return pld;
-#else
- return NULL;
-#endif
-}
-
-static void free_pld(void *pld)
-{
-#ifdef CONFIG_ACPI
- ACPI_FREE(pld);
-#endif
-}
-
-static int __link_port(struct typec_port *con, struct port_node *node)
-{
- int ret;
-
- ret = sysfs_create_link(&node->dev->kobj, &con->dev.kobj, "connector");
- if (ret)
- return ret;
-
- ret = sysfs_create_link(&con->dev.kobj, &node->dev->kobj,
- dev_name(node->dev));
- if (ret) {
- sysfs_remove_link(&node->dev->kobj, "connector");
- return ret;
- }
-
- list_add_tail(&node->list, &con->port_list);
-
- return 0;
-}
-
-static int link_port(struct typec_port *con, struct port_node *node)
-{
- int ret;
-
- mutex_lock(&con->port_list_lock);
- ret = __link_port(con, node);
- mutex_unlock(&con->port_list_lock);
-
- return ret;
-}
-
-static void __unlink_port(struct typec_port *con, struct port_node *node)
-{
- sysfs_remove_link(&con->dev.kobj, dev_name(node->dev));
- sysfs_remove_link(&node->dev->kobj, "connector");
- list_del(&node->list);
-}
-
-static void unlink_port(struct typec_port *con, struct port_node *node)
-{
- mutex_lock(&con->port_list_lock);
- __unlink_port(con, node);
- mutex_unlock(&con->port_list_lock);
-}
-
-static struct port_node *create_port_node(struct device *port)
-{
- struct port_node *node;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return ERR_PTR(-ENOMEM);
-
- node->dev = get_device(port);
- node->pld = get_pld(port);
-
- return node;
-}
-
-static void remove_port_node(struct port_node *node)
+static int typec_aggregate_bind(struct device *dev)
{
- put_device(node->dev);
- free_pld(node->pld);
- kfree(node);
+ return component_bind_all(dev, NULL);
}
-static int connector_match(struct device *dev, const void *data)
+static void typec_aggregate_unbind(struct device *dev)
{
- const struct port_node *node = data;
-
- if (!is_typec_port(dev))
- return 0;
-
- return acpi_pld_match(to_typec_port(dev)->pld, node->pld);
-}
-
-static struct device *find_connector(struct port_node *node)
-{
- if (!node->pld)
- return NULL;
-
- return class_find_device(&typec_class, NULL, node, connector_match);
-}
-
-/**
- * typec_link_port - Link a port to its connector
- * @port: The port device
- *
- * Find the connector of @port and create symlink named "connector" for it.
- * Returns 0 on success, or errno in case of a failure.
- *
- * NOTE. The function increments the reference count of @port on success.
- */
-int typec_link_port(struct device *port)
-{
- struct device *connector;
- struct port_node *node;
- int ret;
-
- node = create_port_node(port);
- if (IS_ERR(node))
- return PTR_ERR(node);
-
- connector = find_connector(node);
- if (!connector) {
- ret = 0;
- goto remove_node;
- }
-
- ret = link_port(to_typec_port(connector), node);
- if (ret)
- goto put_connector;
-
- return 0;
-
-put_connector:
- put_device(connector);
-remove_node:
- remove_port_node(node);
-
- return ret;
+ component_unbind_all(dev, NULL);
}
-EXPORT_SYMBOL_GPL(typec_link_port);
-
-static int port_match_and_unlink(struct device *connector, void *port)
-{
- struct port_node *node;
- struct port_node *tmp;
- int ret = 0;
-
- if (!is_typec_port(connector))
- return 0;
- mutex_lock(&to_typec_port(connector)->port_list_lock);
- list_for_each_entry_safe(node, tmp, &to_typec_port(connector)->port_list, list) {
- ret = node->dev == port;
- if (ret) {
- unlink_port(to_typec_port(connector), node);
- remove_port_node(node);
- put_device(connector);
- break;
- }
- }
- mutex_unlock(&to_typec_port(connector)->port_list_lock);
+static const struct component_master_ops typec_aggregate_ops = {
+ .bind = typec_aggregate_bind,
+ .unbind = typec_aggregate_unbind,
+};
- return ret;
-}
+struct each_port_arg {
+ struct typec_port *port;
+ struct component_match *match;
+};
-/**
- * typec_unlink_port - Unlink port from its connector
- * @port: The port device
- *
- * Removes the symlink "connector" and decrements the reference count of @port.
- */
-void typec_unlink_port(struct device *port)
+static int typec_port_compare(struct device *dev, void *fwnode)
{
- class_for_each_device(&typec_class, NULL, port, port_match_and_unlink);
+ return device_match_fwnode(dev, fwnode);
}
-EXPORT_SYMBOL_GPL(typec_unlink_port);
-static int each_port(struct device *port, void *connector)
+static int typec_port_match(struct device *dev, void *data)
{
- struct port_node *node;
- int ret;
+ struct acpi_device *adev = to_acpi_device(dev);
+ struct each_port_arg *arg = data;
+ struct acpi_device *con_adev;
- node = create_port_node(port);
- if (IS_ERR(node))
- return PTR_ERR(node);
-
- if (!connector_match(connector, node)) {
- remove_port_node(node);
+ con_adev = ACPI_COMPANION(&arg->port->dev);
+ if (con_adev == adev)
return 0;
- }
-
- ret = link_port(to_typec_port(connector), node);
- if (ret) {
- remove_port_node(node->pld);
- return ret;
- }
-
- get_device(connector);
+ if (con_adev->pld_crc == adev->pld_crc)
+ component_match_add(&arg->port->dev, &arg->match, typec_port_compare,
+ acpi_fwnode_handle(adev));
return 0;
}
int typec_link_ports(struct typec_port *con)
{
- int ret = 0;
+ struct each_port_arg arg = { .port = con, .match = NULL };
- con->pld = get_pld(&con->dev);
- if (!con->pld)
+ if (!has_acpi_companion(&con->dev))
return 0;
- ret = usb_for_each_port(&con->dev, each_port);
- if (ret)
- typec_unlink_ports(con);
+ bus_for_each_dev(&acpi_bus_type, NULL, &arg, typec_port_match);
+ if (!arg.match)
+ return 0;
- return ret;
+ /*
+ * REVISIT: Now each connector can have only a single component master.
+ * So far only the USB ports connected to the USB Type-C connector share
+ * the _PLD with it, but if there one day is something else (like maybe
+ * the DisplayPort ACPI device object) that also shares the _PLD with
+ * the connector, every one of those needs to have its own component
+ * master, because each different type of component needs to be bind to
+ * the connector independently of the other components. That requires
+ * improvements to the component framework. Right now you can only have
+ * one master per device.
+ */
+ return component_master_add_with_match(&con->dev, &typec_aggregate_ops, arg.match);
}
void typec_unlink_ports(struct typec_port *con)
{
- struct port_node *node;
- struct port_node *tmp;
-
- mutex_lock(&con->port_list_lock);
-
- list_for_each_entry_safe(node, tmp, &con->port_list, list) {
- __unlink_port(con, node);
- remove_port_node(node);
- put_device(&con->dev);
- }
-
- mutex_unlock(&con->port_list_lock);
-
- free_pld(con->pld);
+ if (has_acpi_companion(&con->dev))
+ component_master_del(&con->dev, &typec_aggregate_ops);
}
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index 35a1307349a2..e07d26a3cd8e 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -75,9 +75,25 @@ static int tcpci_write16(struct tcpci *tcpci, unsigned int reg, u16 val)
static int tcpci_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+ bool vconn_pres;
+ enum typec_cc_polarity polarity = TYPEC_POLARITY_CC1;
unsigned int reg;
int ret;
+ ret = regmap_read(tcpci->regmap, TCPC_POWER_STATUS, &reg);
+ if (ret < 0)
+ return ret;
+
+ vconn_pres = !!(reg & TCPC_POWER_STATUS_VCONN_PRES);
+ if (vconn_pres) {
+ ret = regmap_read(tcpci->regmap, TCPC_TCPC_CTRL, &reg);
+ if (ret < 0)
+ return ret;
+
+ if (reg & TCPC_TCPC_CTRL_ORIENTATION)
+ polarity = TYPEC_POLARITY_CC2;
+ }
+
switch (cc) {
case TYPEC_CC_RA:
reg = (TCPC_ROLE_CTRL_CC_RA << TCPC_ROLE_CTRL_CC1_SHIFT) |
@@ -112,6 +128,16 @@ static int tcpci_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
break;
}
+ if (vconn_pres) {
+ if (polarity == TYPEC_POLARITY_CC2) {
+ reg &= ~(TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT);
+ reg |= (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT);
+ } else {
+ reg &= ~(TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT);
+ reg |= (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT);
+ }
+ }
+
ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
if (ret < 0)
return ret;
diff --git a/drivers/usb/typec/tcpm/tcpci.h b/drivers/usb/typec/tcpm/tcpci.h
index 2be7a77d400e..b2edd45f13c6 100644
--- a/drivers/usb/typec/tcpm/tcpci.h
+++ b/drivers/usb/typec/tcpm/tcpci.h
@@ -98,6 +98,7 @@
#define TCPC_POWER_STATUS_SOURCING_VBUS BIT(4)
#define TCPC_POWER_STATUS_VBUS_DET BIT(3)
#define TCPC_POWER_STATUS_VBUS_PRES BIT(2)
+#define TCPC_POWER_STATUS_VCONN_PRES BIT(1)
#define TCPC_POWER_STATUS_SINKING_VBUS BIT(0)
#define TCPC_FAULT_STATUS 0x1f
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 59d4fa2443f2..5fce795b69c7 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -5156,7 +5156,8 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
case SNK_TRYWAIT_DEBOUNCE:
break;
case SNK_ATTACH_WAIT:
- tcpm_set_state(port, SNK_UNATTACHED, 0);
+ case SNK_DEBOUNCED:
+ /* Do nothing, as TCPM is still waiting for vbus to reaach VSAFE5V to connect */
break;
case SNK_NEGOTIATE_CAPABILITIES:
@@ -5263,6 +5264,10 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
case PR_SWAP_SNK_SRC_SOURCE_ON:
/* Do nothing, vsafe0v is expected during transition */
break;
+ case SNK_ATTACH_WAIT:
+ case SNK_DEBOUNCED:
+ /*Do nothing, still waiting for VSAFE5V for connect */
+ break;
default:
if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
tcpm_set_state(port, SNK_UNATTACHED, 0);
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 08561bf7c40c..f0c2fa19f3e0 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -303,6 +303,17 @@ static int ucsi_next_altmode(struct typec_altmode **alt)
return -ENOENT;
}
+static int ucsi_get_num_altmode(struct typec_altmode **alt)
+{
+ int i;
+
+ for (i = 0; i < UCSI_MAX_ALTMODES; i++)
+ if (!alt[i])
+ break;
+
+ return i;
+}
+
static int ucsi_register_altmode(struct ucsi_connector *con,
struct typec_altmode_desc *desc,
u8 recipient)
@@ -607,7 +618,7 @@ static int ucsi_get_src_pdos(struct ucsi_connector *con)
static int ucsi_check_altmodes(struct ucsi_connector *con)
{
- int ret;
+ int ret, num_partner_am;
ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP);
if (ret && ret != -ETIMEDOUT)
@@ -617,6 +628,9 @@ static int ucsi_check_altmodes(struct ucsi_connector *con)
/* Ignoring the errors in this case. */
if (con->partner_altmode[0]) {
+ num_partner_am = ucsi_get_num_altmode(con->partner_altmode);
+ if (num_partner_am > 0)
+ typec_partner_set_num_altmodes(con->partner, num_partner_am);
ucsi_altmode_update_active(con);
return 0;
}
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index bff96d64dddf..6db7c8ddd51c 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -325,7 +325,7 @@ static int ucsi_ccg_init(struct ucsi_ccg *uc)
if (status < 0)
return status;
- if (!data)
+ if (!(data & DEV_INT))
return 0;
status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c
index 086ca76dd053..26513540bcdb 100644
--- a/drivers/usb/usbip/usbip_event.c
+++ b/drivers/usb/usbip/usbip_event.c
@@ -137,7 +137,6 @@ int usbip_init_eh(void)
void usbip_finish_eh(void)
{
- flush_workqueue(usbip_queue);
destroy_workqueue(usbip_queue);
usbip_queue = NULL;
}
diff --git a/drivers/vdpa/alibaba/eni_vdpa.c b/drivers/vdpa/alibaba/eni_vdpa.c
index 3f788794571a..f480d54f308c 100644
--- a/drivers/vdpa/alibaba/eni_vdpa.c
+++ b/drivers/vdpa/alibaba/eni_vdpa.c
@@ -58,7 +58,7 @@ static struct virtio_pci_legacy_device *vdpa_to_ldev(struct vdpa_device *vdpa)
return &eni_vdpa->ldev;
}
-static u64 eni_vdpa_get_features(struct vdpa_device *vdpa)
+static u64 eni_vdpa_get_device_features(struct vdpa_device *vdpa)
{
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
u64 features = vp_legacy_get_features(ldev);
@@ -69,7 +69,7 @@ static u64 eni_vdpa_get_features(struct vdpa_device *vdpa)
return features;
}
-static int eni_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
+static int eni_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
{
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
@@ -84,6 +84,13 @@ static int eni_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
return 0;
}
+static u64 eni_vdpa_get_driver_features(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
+
+ return vp_legacy_get_driver_features(ldev);
+}
+
static u8 eni_vdpa_get_status(struct vdpa_device *vdpa)
{
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
@@ -401,8 +408,9 @@ static void eni_vdpa_set_config_cb(struct vdpa_device *vdpa,
}
static const struct vdpa_config_ops eni_vdpa_ops = {
- .get_features = eni_vdpa_get_features,
- .set_features = eni_vdpa_set_features,
+ .get_device_features = eni_vdpa_get_device_features,
+ .set_driver_features = eni_vdpa_set_driver_features,
+ .get_driver_features = eni_vdpa_get_driver_features,
.get_status = eni_vdpa_get_status,
.set_status = eni_vdpa_set_status,
.reset = eni_vdpa_reset,
@@ -450,11 +458,6 @@ static u16 eni_vdpa_get_num_queues(struct eni_vdpa *eni_vdpa)
return num;
}
-static void eni_vdpa_free_irq_vectors(void *data)
-{
- pci_free_irq_vectors(data);
-}
-
static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -488,13 +491,6 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
eni_vdpa->vdpa.dma_dev = &pdev->dev;
eni_vdpa->queues = eni_vdpa_get_num_queues(eni_vdpa);
- ret = devm_add_action_or_reset(dev, eni_vdpa_free_irq_vectors, pdev);
- if (ret) {
- ENI_ERR(pdev,
- "failed for adding devres for freeing irq vectors\n");
- goto err;
- }
-
eni_vdpa->vring = devm_kcalloc(&pdev->dev, eni_vdpa->queues,
sizeof(*eni_vdpa->vring),
GFP_KERNEL);
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 2808f1ba9f7b..7d41dfe48ade 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -143,8 +143,8 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
IFCVF_DBG(pdev, "hw->isr = %p\n", hw->isr);
break;
case VIRTIO_PCI_CAP_DEVICE_CFG:
- hw->net_cfg = get_cap_addr(hw, &cap);
- IFCVF_DBG(pdev, "hw->net_cfg = %p\n", hw->net_cfg);
+ hw->dev_cfg = get_cap_addr(hw, &cap);
+ IFCVF_DBG(pdev, "hw->dev_cfg = %p\n", hw->dev_cfg);
break;
}
@@ -153,7 +153,7 @@ next:
}
if (hw->common_cfg == NULL || hw->notify_base == NULL ||
- hw->isr == NULL || hw->net_cfg == NULL) {
+ hw->isr == NULL || hw->dev_cfg == NULL) {
IFCVF_ERR(pdev, "Incomplete PCI capabilities\n");
return -EIO;
}
@@ -174,7 +174,7 @@ next:
IFCVF_DBG(pdev,
"PCI capability mapping: common cfg: %p, notify base: %p\n, isr cfg: %p, device cfg: %p, multiplier: %u\n",
hw->common_cfg, hw->notify_base, hw->isr,
- hw->net_cfg, hw->notify_off_multiplier);
+ hw->dev_cfg, hw->notify_off_multiplier);
return 0;
}
@@ -242,33 +242,54 @@ int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features)
return 0;
}
-void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset,
+u32 ifcvf_get_config_size(struct ifcvf_hw *hw)
+{
+ struct ifcvf_adapter *adapter;
+ u32 config_size;
+
+ adapter = vf_to_adapter(hw);
+ switch (hw->dev_type) {
+ case VIRTIO_ID_NET:
+ config_size = sizeof(struct virtio_net_config);
+ break;
+ case VIRTIO_ID_BLOCK:
+ config_size = sizeof(struct virtio_blk_config);
+ break;
+ default:
+ config_size = 0;
+ IFCVF_ERR(adapter->pdev, "VIRTIO ID %u not supported\n", hw->dev_type);
+ }
+
+ return config_size;
+}
+
+void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset,
void *dst, int length)
{
u8 old_gen, new_gen, *p;
int i;
- WARN_ON(offset + length > sizeof(struct virtio_net_config));
+ WARN_ON(offset + length > hw->config_size);
do {
old_gen = ifc_ioread8(&hw->common_cfg->config_generation);
p = dst;
for (i = 0; i < length; i++)
- *p++ = ifc_ioread8(hw->net_cfg + offset + i);
+ *p++ = ifc_ioread8(hw->dev_cfg + offset + i);
new_gen = ifc_ioread8(&hw->common_cfg->config_generation);
} while (old_gen != new_gen);
}
-void ifcvf_write_net_config(struct ifcvf_hw *hw, u64 offset,
+void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
const void *src, int length)
{
const u8 *p;
int i;
p = src;
- WARN_ON(offset + length > sizeof(struct virtio_net_config));
+ WARN_ON(offset + length > hw->config_size);
for (i = 0; i < length; i++)
- ifc_iowrite8(*p++, hw->net_cfg + offset + i);
+ ifc_iowrite8(*p++, hw->dev_cfg + offset + i);
}
static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index 09918af3ecf8..c486873f370a 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -71,12 +71,14 @@ struct ifcvf_hw {
u64 hw_features;
u32 dev_type;
struct virtio_pci_common_cfg __iomem *common_cfg;
- void __iomem *net_cfg;
+ void __iomem *dev_cfg;
struct vring_info vring[IFCVF_MAX_QUEUES];
void __iomem * const *base;
char config_msix_name[256];
struct vdpa_callback config_cb;
unsigned int config_irq;
+ /* virtio-net or virtio-blk device config size */
+ u32 config_size;
};
struct ifcvf_adapter {
@@ -105,9 +107,9 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *dev);
int ifcvf_start_hw(struct ifcvf_hw *hw);
void ifcvf_stop_hw(struct ifcvf_hw *hw);
void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid);
-void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset,
+void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset,
void *dst, int length);
-void ifcvf_write_net_config(struct ifcvf_hw *hw, u64 offset,
+void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
const void *src, int length);
u8 ifcvf_get_status(struct ifcvf_hw *hw);
void ifcvf_set_status(struct ifcvf_hw *hw, u8 status);
@@ -120,4 +122,5 @@ u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
+u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
#endif /* _IFCVF_H_ */
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 6dc75ca70b37..d1a6b5ab543c 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -169,7 +169,7 @@ static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
return &adapter->vf;
}
-static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
+static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
{
struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
@@ -187,7 +187,7 @@ static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
return features;
}
-static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
+static int ifcvf_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
int ret;
@@ -201,6 +201,13 @@ static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
return 0;
}
+static u64 ifcvf_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return vf->req_features;
+}
+
static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
@@ -366,24 +373,9 @@ static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
{
- struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
- struct pci_dev *pdev = adapter->pdev;
- size_t size;
-
- switch (vf->dev_type) {
- case VIRTIO_ID_NET:
- size = sizeof(struct virtio_net_config);
- break;
- case VIRTIO_ID_BLOCK:
- size = sizeof(struct virtio_blk_config);
- break;
- default:
- size = 0;
- IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
- }
- return size;
+ return vf->config_size;
}
static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
@@ -392,8 +384,7 @@ static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
- WARN_ON(offset + len > sizeof(struct virtio_net_config));
- ifcvf_read_net_config(vf, offset, buf, len);
+ ifcvf_read_dev_config(vf, offset, buf, len);
}
static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
@@ -402,8 +393,7 @@ static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
- WARN_ON(offset + len > sizeof(struct virtio_net_config));
- ifcvf_write_net_config(vf, offset, buf, len);
+ ifcvf_write_dev_config(vf, offset, buf, len);
}
static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
@@ -443,8 +433,9 @@ static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_devic
* implemented set_map()/dma_map()/dma_unmap()
*/
static const struct vdpa_config_ops ifc_vdpa_ops = {
- .get_features = ifcvf_vdpa_get_features,
- .set_features = ifcvf_vdpa_set_features,
+ .get_device_features = ifcvf_vdpa_get_device_features,
+ .set_driver_features = ifcvf_vdpa_set_driver_features,
+ .get_driver_features = ifcvf_vdpa_get_driver_features,
.get_status = ifcvf_vdpa_get_status,
.set_status = ifcvf_vdpa_set_status,
.reset = ifcvf_vdpa_reset,
@@ -542,6 +533,7 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
vf->vring[i].irq = -EINVAL;
vf->hw_features = ifcvf_get_hw_features(vf);
+ vf->config_size = ifcvf_get_config_size(vf);
adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 63813fbb5f62..f648f1c54a0f 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -131,25 +131,24 @@ struct mlx5_vdpa_virtqueue {
struct mlx5_vq_restore_info ri;
};
-/* We will remove this limitation once mlx5_vdpa_alloc_resources()
- * provides for driver space allocation
- */
-#define MLX5_MAX_SUPPORTED_VQS 16
-
static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx)
{
- if (unlikely(idx > mvdev->max_idx))
- return false;
+ if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) {
+ if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
+ return idx < 2;
+ else
+ return idx < 3;
+ }
- return true;
+ return idx <= mvdev->max_idx;
}
struct mlx5_vdpa_net {
struct mlx5_vdpa_dev mvdev;
struct mlx5_vdpa_net_resources res;
struct virtio_net_config config;
- struct mlx5_vdpa_virtqueue vqs[MLX5_MAX_SUPPORTED_VQS];
- struct vdpa_callback event_cbs[MLX5_MAX_SUPPORTED_VQS + 1];
+ struct mlx5_vdpa_virtqueue *vqs;
+ struct vdpa_callback *event_cbs;
/* Serialize vq resources creation and destruction. This is required
* since memory map might change and we need to destroy and create
@@ -876,8 +875,6 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id);
MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size);
MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn);
- if (MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, eth_frame_offload_type))
- MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0, 1);
err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
if (err)
@@ -1218,7 +1215,7 @@ static void suspend_vqs(struct mlx5_vdpa_net *ndev)
{
int i;
- for (i = 0; i < MLX5_MAX_SUPPORTED_VQS; i++)
+ for (i = 0; i < ndev->mvdev.max_vqs; i++)
suspend_vq(ndev, &ndev->vqs[i]);
}
@@ -1244,8 +1241,14 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
void *in;
int i, j;
int err;
+ int num;
- max_rqt = min_t(int, MLX5_MAX_SUPPORTED_VQS / 2,
+ if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
+ num = 1;
+ else
+ num = ndev->cur_num_vqs / 2;
+
+ max_rqt = min_t(int, roundup_pow_of_two(num),
1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
if (max_rqt < 1)
return -EOPNOTSUPP;
@@ -1261,17 +1264,10 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
MLX5_SET(rqtc, rqtc, rqt_max_size, max_rqt);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
- for (i = 0, j = 0; j < max_rqt; j++) {
- if (!ndev->vqs[j].initialized)
- continue;
-
- if (!vq_is_tx(ndev->vqs[j].index)) {
- list[i] = cpu_to_be32(ndev->vqs[j].virtq_id);
- i++;
- }
- }
- MLX5_SET(rqtc, rqtc, rqt_actual_size, i);
+ for (i = 0, j = 0; i < max_rqt; i++, j += 2)
+ list[i] = cpu_to_be32(ndev->vqs[j % (2 * num)].virtq_id);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
kfree(in);
if (err)
@@ -1292,7 +1288,7 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
int i, j;
int err;
- max_rqt = min_t(int, ndev->cur_num_vqs / 2,
+ max_rqt = min_t(int, roundup_pow_of_two(ndev->cur_num_vqs / 2),
1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
if (max_rqt < 1)
return -EOPNOTSUPP;
@@ -1308,16 +1304,10 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
- for (i = 0, j = 0; j < num; j++) {
- if (!ndev->vqs[j].initialized)
- continue;
+ for (i = 0, j = 0; i < max_rqt; i++, j += 2)
+ list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id);
- if (!vq_is_tx(ndev->vqs[j].index)) {
- list[i] = cpu_to_be32(ndev->vqs[j].virtq_id);
- i++;
- }
- }
- MLX5_SET(rqtc, rqtc, rqt_actual_size, i);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
kfree(in);
if (err)
@@ -1554,9 +1544,11 @@ static int change_num_qps(struct mlx5_vdpa_dev *mvdev, int newqps)
return 0;
clean_added:
- for (--i; i >= cur_qps; --i)
+ for (--i; i >= 2 * cur_qps; --i)
teardown_vq(ndev, &ndev->vqs[i]);
+ ndev->cur_num_vqs = 2 * cur_qps;
+
return err;
}
@@ -1581,9 +1573,6 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
break;
}
- if (newqps & (newqps - 1))
- break;
-
if (!change_num_qps(mvdev, newqps))
status = VIRTIO_NET_OK;
@@ -1880,21 +1869,29 @@ static u64 mlx_to_vritio_features(u16 dev_features)
return result;
}
-static u64 mlx5_vdpa_get_features(struct vdpa_device *vdev)
+static u64 get_supported_features(struct mlx5_core_dev *mdev)
{
- struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
- struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ u64 mlx_vdpa_features = 0;
u16 dev_features;
- dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, device_features_bits_mask);
- ndev->mvdev.mlx_features |= mlx_to_vritio_features(dev_features);
- if (MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, virtio_version_1_0))
- ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_VERSION_1);
- ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM);
- ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VQ);
- ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR);
- ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MQ);
- ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_STATUS);
+ dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mdev, device_features_bits_mask);
+ mlx_vdpa_features |= mlx_to_vritio_features(dev_features);
+ if (MLX5_CAP_DEV_VDPA_EMULATION(mdev, virtio_version_1_0))
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_F_VERSION_1);
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM);
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VQ);
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR);
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MQ);
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_STATUS);
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MTU);
+
+ return mlx_vdpa_features;
+}
+
+static u64 mlx5_vdpa_get_device_features(struct vdpa_device *vdev)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
print_features(mvdev, ndev->mvdev.mlx_features, false);
return ndev->mvdev.mlx_features;
@@ -1972,7 +1969,7 @@ static void update_cvq_info(struct mlx5_vdpa_dev *mvdev)
}
}
-static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
+static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
@@ -1985,6 +1982,11 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
return err;
ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
+ if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ))
+ ndev->cur_num_vqs = 2 * mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
+ else
+ ndev->cur_num_vqs = 2;
+
update_cvq_info(mvdev);
return err;
}
@@ -2235,7 +2237,8 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
clear_vqs_ready(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status = 0;
- memset(ndev->event_cbs, 0, sizeof(ndev->event_cbs));
+ ndev->cur_num_vqs = 0;
+ memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1));
ndev->mvdev.actual_features = 0;
++mvdev->generation;
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
@@ -2308,6 +2311,8 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
}
mlx5_vdpa_free_resources(&ndev->mvdev);
mutex_destroy(&ndev->reslock);
+ kfree(ndev->event_cbs);
+ kfree(ndev->vqs);
}
static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device *vdev, u16 idx)
@@ -2339,6 +2344,13 @@ static int mlx5_get_vq_irq(struct vdpa_device *vdv, u16 idx)
return -EOPNOTSUPP;
}
+static u64 mlx5_vdpa_get_driver_features(struct vdpa_device *vdev)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+
+ return mvdev->actual_features;
+}
+
static const struct vdpa_config_ops mlx5_vdpa_ops = {
.set_vq_address = mlx5_vdpa_set_vq_address,
.set_vq_num = mlx5_vdpa_set_vq_num,
@@ -2351,8 +2363,9 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.get_vq_notification = mlx5_get_vq_notification,
.get_vq_irq = mlx5_get_vq_irq,
.get_vq_align = mlx5_vdpa_get_vq_align,
- .get_features = mlx5_vdpa_get_features,
- .set_features = mlx5_vdpa_set_features,
+ .get_device_features = mlx5_vdpa_get_device_features,
+ .set_driver_features = mlx5_vdpa_set_driver_features,
+ .get_driver_features = mlx5_vdpa_get_driver_features,
.set_config_cb = mlx5_vdpa_set_config_cb,
.get_vq_num_max = mlx5_vdpa_get_vq_num_max,
.get_device_id = mlx5_vdpa_get_device_id,
@@ -2545,18 +2558,39 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
return -EOPNOTSUPP;
}
- /* we save one virtqueue for control virtqueue should we require it */
max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
- max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
+ if (max_vqs < 2) {
+ dev_warn(mdev->device,
+ "%d virtqueues are supported. At least 2 are required\n",
+ max_vqs);
+ return -EAGAIN;
+ }
+
+ if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP)) {
+ if (add_config->net.max_vq_pairs > max_vqs / 2)
+ return -EINVAL;
+ max_vqs = min_t(u32, max_vqs, 2 * add_config->net.max_vq_pairs);
+ } else {
+ max_vqs = 2;
+ }
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
name, false);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
+ ndev->mvdev.mlx_features = mgtdev->mgtdev.supported_features;
ndev->mvdev.max_vqs = max_vqs;
mvdev = &ndev->mvdev;
mvdev->mdev = mdev;
+
+ ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL);
+ ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL);
+ if (!ndev->vqs || !ndev->event_cbs) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
init_mvqs(ndev);
mutex_init(&ndev->reslock);
config = &ndev->config;
@@ -2612,9 +2646,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
ndev->nb.notifier_call = event_handler;
mlx5_notifier_register(mdev, &ndev->nb);
- ndev->cur_num_vqs = 2 * mlx5_vdpa_max_qps(max_vqs);
mvdev->vdev.mdev = &mgtdev->mgtdev;
- err = _vdpa_register_device(&mvdev->vdev, ndev->cur_num_vqs + 1);
+ err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs) + 1);
if (err)
goto err_reg;
@@ -2634,6 +2667,7 @@ err_mpfs:
mlx5_mpfs_del_mac(pfmdev, config->mac);
err_mtu:
mutex_destroy(&ndev->reslock);
+err_alloc:
put_device(&mvdev->vdev.dev);
return err;
}
@@ -2676,14 +2710,18 @@ static int mlx5v_probe(struct auxiliary_device *adev,
mgtdev->mgtdev.ops = &mdev_ops;
mgtdev->mgtdev.device = mdev->device;
mgtdev->mgtdev.id_table = id_table;
- mgtdev->mgtdev.config_attr_mask = (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR);
+ mgtdev->mgtdev.config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) |
+ BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
+ mgtdev->mgtdev.max_supported_vqs =
+ MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues) + 1;
+ mgtdev->mgtdev.supported_features = get_supported_features(mdev);
mgtdev->madev = madev;
err = vdpa_mgmtdev_register(&mgtdev->mgtdev);
if (err)
goto reg_err;
- dev_set_drvdata(&adev->dev, mgtdev);
+ auxiliary_set_drvdata(adev, mgtdev);
return 0;
@@ -2696,7 +2734,7 @@ static void mlx5v_remove(struct auxiliary_device *adev)
{
struct mlx5_vdpa_mgmtdev *mgtdev;
- mgtdev = dev_get_drvdata(&adev->dev);
+ mgtdev = auxiliary_get_drvdata(adev);
vdpa_mgmtdev_unregister(&mgtdev->mgtdev);
kfree(mgtdev);
}
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 09bbe53c3ac4..9846c9de4bfa 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -21,6 +21,14 @@ static LIST_HEAD(mdev_head);
static DEFINE_MUTEX(vdpa_dev_mutex);
static DEFINE_IDA(vdpa_index_ida);
+void vdpa_set_status(struct vdpa_device *vdev, u8 status)
+{
+ mutex_lock(&vdev->cf_mutex);
+ vdev->config->set_status(vdev, status);
+ mutex_unlock(&vdev->cf_mutex);
+}
+EXPORT_SYMBOL(vdpa_set_status);
+
static struct genl_family vdpa_nl_family;
static int vdpa_dev_probe(struct device *d)
@@ -52,8 +60,81 @@ static void vdpa_dev_remove(struct device *d)
drv->remove(vdev);
}
+static int vdpa_dev_match(struct device *dev, struct device_driver *drv)
+{
+ struct vdpa_device *vdev = dev_to_vdpa(dev);
+
+ /* Check override first, and if set, only use the named driver */
+ if (vdev->driver_override)
+ return strcmp(vdev->driver_override, drv->name) == 0;
+
+ /* Currently devices must be supported by all vDPA bus drivers */
+ return 1;
+}
+
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct vdpa_device *vdev = dev_to_vdpa(dev);
+ const char *driver_override, *old;
+ char *cp;
+
+ /* We need to keep extra room for a newline */
+ if (count >= (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ driver_override = kstrndup(buf, count, GFP_KERNEL);
+ if (!driver_override)
+ return -ENOMEM;
+
+ cp = strchr(driver_override, '\n');
+ if (cp)
+ *cp = '\0';
+
+ device_lock(dev);
+ old = vdev->driver_override;
+ if (strlen(driver_override)) {
+ vdev->driver_override = driver_override;
+ } else {
+ kfree(driver_override);
+ vdev->driver_override = NULL;
+ }
+ device_unlock(dev);
+
+ kfree(old);
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vdpa_device *vdev = dev_to_vdpa(dev);
+ ssize_t len;
+
+ device_lock(dev);
+ len = snprintf(buf, PAGE_SIZE, "%s\n", vdev->driver_override);
+ device_unlock(dev);
+
+ return len;
+}
+static DEVICE_ATTR_RW(driver_override);
+
+static struct attribute *vdpa_dev_attrs[] = {
+ &dev_attr_driver_override.attr,
+ NULL,
+};
+
+static const struct attribute_group vdpa_dev_group = {
+ .attrs = vdpa_dev_attrs,
+};
+__ATTRIBUTE_GROUPS(vdpa_dev);
+
static struct bus_type vdpa_bus = {
.name = "vdpa",
+ .dev_groups = vdpa_dev_groups,
+ .match = vdpa_dev_match,
.probe = vdpa_dev_probe,
.remove = vdpa_dev_remove,
};
@@ -68,6 +149,7 @@ static void vdpa_release_dev(struct device *d)
ida_simple_remove(&vdpa_index_ida, vdev->index);
mutex_destroy(&vdev->cf_mutex);
+ kfree(vdev->driver_override);
kfree(vdev);
}
@@ -300,6 +382,21 @@ void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
}
EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
+static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
+ unsigned int offset,
+ void *buf, unsigned int len)
+{
+ const struct vdpa_config_ops *ops = vdev->config;
+
+ /*
+ * Config accesses aren't supposed to trigger before features are set.
+ * If it does happen we assume a legacy guest.
+ */
+ if (!vdev->features_valid)
+ vdpa_set_features(vdev, 0, true);
+ ops->get_config(vdev, offset, buf, len);
+}
+
/**
* vdpa_get_config - Get one or more device configuration fields.
* @vdev: vdpa device to operate on
@@ -310,16 +407,8 @@ EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
void *buf, unsigned int len)
{
- const struct vdpa_config_ops *ops = vdev->config;
-
mutex_lock(&vdev->cf_mutex);
- /*
- * Config accesses aren't supposed to trigger before features are set.
- * If it does happen we assume a legacy guest.
- */
- if (!vdev->features_valid)
- vdpa_set_features(vdev, 0);
- ops->get_config(vdev, offset, buf, len);
+ vdpa_get_config_unlocked(vdev, offset, buf, len);
mutex_unlock(&vdev->cf_mutex);
}
EXPORT_SYMBOL_GPL(vdpa_get_config);
@@ -414,6 +503,16 @@ static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *m
err = -EMSGSIZE;
goto msg_err;
}
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_MGMTDEV_MAX_VQS,
+ mdev->max_supported_vqs)) {
+ err = -EMSGSIZE;
+ goto msg_err;
+ }
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_SUPPORTED_FEATURES,
+ mdev->supported_features, VDPA_ATTR_PAD)) {
+ err = -EMSGSIZE;
+ goto msg_err;
+ }
genlmsg_end(msg, hdr);
return 0;
@@ -480,8 +579,9 @@ out:
return msg->len;
}
-#define VDPA_DEV_NET_ATTRS_MASK ((1 << VDPA_ATTR_DEV_NET_CFG_MACADDR) | \
- (1 << VDPA_ATTR_DEV_NET_CFG_MTU))
+#define VDPA_DEV_NET_ATTRS_MASK (BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | \
+ BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU) | \
+ BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP))
static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info)
{
@@ -500,12 +600,22 @@ static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *i
if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) {
macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]);
memcpy(config.net.mac, macaddr, sizeof(config.net.mac));
- config.mask |= (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR);
+ config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
}
if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]) {
config.net.mtu =
nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]);
- config.mask |= (1 << VDPA_ATTR_DEV_NET_CFG_MTU);
+ config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU);
+ }
+ if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]) {
+ config.net.max_vq_pairs =
+ nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]);
+ if (!config.net.max_vq_pairs) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "At least one pair of VQs is required");
+ return -EINVAL;
+ }
+ config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
}
/* Skip checking capability if user didn't prefer to configure any
@@ -707,7 +817,7 @@ static int vdpa_dev_net_mq_config_fill(struct vdpa_device *vdev,
{
u16 val_u16;
- if ((features & (1ULL << VIRTIO_NET_F_MQ)) == 0)
+ if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0)
return 0;
val_u16 = le16_to_cpu(config->max_virtqueue_pairs);
@@ -720,7 +830,7 @@ static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *ms
u64 features;
u16 val_u16;
- vdpa_get_config(vdev, 0, &config, sizeof(config));
+ vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
if (nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR, sizeof(config.mac),
config.mac))
@@ -734,7 +844,10 @@ static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *ms
if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16))
return -EMSGSIZE;
- features = vdev->config->get_features(vdev);
+ features = vdev->config->get_driver_features(vdev);
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features,
+ VDPA_ATTR_PAD))
+ return -EMSGSIZE;
return vdpa_dev_net_mq_config_fill(vdev, msg, features, &config);
}
@@ -745,12 +858,23 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
{
u32 device_id;
void *hdr;
+ u8 status;
int err;
+ mutex_lock(&vdev->cf_mutex);
+ status = vdev->config->get_status(vdev);
+ if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
+ NL_SET_ERR_MSG_MOD(extack, "Features negotiation not completed");
+ err = -EAGAIN;
+ goto out;
+ }
+
hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
VDPA_CMD_DEV_CONFIG_GET);
- if (!hdr)
- return -EMSGSIZE;
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto out;
+ }
if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
err = -EMSGSIZE;
@@ -774,11 +898,14 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
if (err)
goto msg_err;
+ mutex_unlock(&vdev->cf_mutex);
genlmsg_end(msg, hdr);
return 0;
msg_err:
genlmsg_cancel(msg, hdr);
+out:
+ mutex_unlock(&vdev->cf_mutex);
return err;
}
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 41b0cd17fcba..ddbe142af09a 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -399,14 +399,14 @@ static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
return VDPASIM_QUEUE_ALIGN;
}
-static u64 vdpasim_get_features(struct vdpa_device *vdpa)
+static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
return vdpasim->dev_attr.supported_features;
}
-static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
+static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -419,6 +419,13 @@ static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
return 0;
}
+static u64 vdpasim_get_driver_features(struct vdpa_device *vdpa)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ return vdpasim->features;
+}
+
static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
struct vdpa_callback *cb)
{
@@ -613,8 +620,9 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.set_vq_state = vdpasim_set_vq_state,
.get_vq_state = vdpasim_get_vq_state,
.get_vq_align = vdpasim_get_vq_align,
- .get_features = vdpasim_get_features,
- .set_features = vdpasim_set_features,
+ .get_device_features = vdpasim_get_device_features,
+ .set_driver_features = vdpasim_set_driver_features,
+ .get_driver_features = vdpasim_get_driver_features,
.set_config_cb = vdpasim_set_config_cb,
.get_vq_num_max = vdpasim_get_vq_num_max,
.get_device_id = vdpasim_get_device_id,
@@ -642,8 +650,9 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.set_vq_state = vdpasim_set_vq_state,
.get_vq_state = vdpasim_get_vq_state,
.get_vq_align = vdpasim_get_vq_align,
- .get_features = vdpasim_get_features,
- .set_features = vdpasim_set_features,
+ .get_device_features = vdpasim_get_device_features,
+ .set_driver_features = vdpasim_set_driver_features,
+ .get_driver_features = vdpasim_get_driver_features,
.set_config_cb = vdpasim_set_config_cb,
.get_vq_num_max = vdpasim_get_vq_num_max,
.get_device_id = vdpasim_get_device_id,
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
index 76dd24abc791..d5324f6fd8c7 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -191,6 +191,8 @@ static struct vdpa_mgmt_dev mgmt_dev = {
.ops = &vdpasim_net_mgmtdev_ops,
.config_attr_mask = (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR |
1 << VDPA_ATTR_DEV_NET_CFG_MTU),
+ .max_supported_vqs = VDPASIM_NET_VQ_NUM,
+ .supported_features = VDPASIM_NET_FEATURES,
};
static int __init vdpasim_net_init(void)
diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
index 1daae2608860..2b1143f11d8f 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.c
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -292,14 +292,6 @@ vduse_domain_alloc_iova(struct iova_domain *iovad,
unsigned long iova_len = iova_align(iovad, size) >> shift;
unsigned long iova_pfn;
- /*
- * Freeing non-power-of-two-sized allocations back into the IOVA caches
- * will come back to bite us badly, so we have to waste a bit of space
- * rounding up anything cacheable to make sure that can't happen. The
- * order of the unadjusted size will still match upon freeing.
- */
- if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
- iova_len = roundup_pow_of_two(iova_len);
iova_pfn = alloc_iova_fast(iovad, iova_len, limit >> shift, true);
return iova_pfn << shift;
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index eddcb64a910a..f85d1a08ed87 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -573,14 +573,14 @@ static u32 vduse_vdpa_get_vq_align(struct vdpa_device *vdpa)
return dev->vq_align;
}
-static u64 vduse_vdpa_get_features(struct vdpa_device *vdpa)
+static u64 vduse_vdpa_get_device_features(struct vdpa_device *vdpa)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
return dev->device_features;
}
-static int vduse_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
+static int vduse_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
@@ -588,6 +588,13 @@ static int vduse_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
return 0;
}
+static u64 vduse_vdpa_get_driver_features(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->driver_features;
+}
+
static void vduse_vdpa_set_config_cb(struct vdpa_device *vdpa,
struct vdpa_callback *cb)
{
@@ -721,8 +728,9 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = {
.set_vq_state = vduse_vdpa_set_vq_state,
.get_vq_state = vduse_vdpa_get_vq_state,
.get_vq_align = vduse_vdpa_get_vq_align,
- .get_features = vduse_vdpa_get_features,
- .set_features = vduse_vdpa_set_features,
+ .get_device_features = vduse_vdpa_get_device_features,
+ .set_driver_features = vduse_vdpa_set_driver_features,
+ .get_driver_features = vduse_vdpa_get_driver_features,
.set_config_cb = vduse_vdpa_set_config_cb,
.get_vq_num_max = vduse_vdpa_get_vq_num_max,
.get_device_id = vduse_vdpa_get_device_id,
@@ -1357,7 +1365,6 @@ err_domain:
err_str:
vduse_dev_destroy(dev);
err:
- kvfree(config_buf);
return ret;
}
@@ -1408,6 +1415,8 @@ static long vduse_ioctl(struct file *file, unsigned int cmd,
}
config.name[VDUSE_NAME_MAX - 1] = '\0';
ret = vduse_create_dev(&config, buf, control->api_version);
+ if (ret)
+ kvfree(buf);
break;
}
case VDUSE_DESTROY_DEV: {
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index e3ff7875e123..a57e381e830b 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -53,14 +53,14 @@ static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa)
return &vp_vdpa->mdev;
}
-static u64 vp_vdpa_get_features(struct vdpa_device *vdpa)
+static u64 vp_vdpa_get_device_features(struct vdpa_device *vdpa)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
return vp_modern_get_features(mdev);
}
-static int vp_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
+static int vp_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
@@ -69,6 +69,13 @@ static int vp_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
return 0;
}
+static u64 vp_vdpa_get_driver_features(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return vp_modern_get_driver_features(mdev);
+}
+
static u8 vp_vdpa_get_status(struct vdpa_device *vdpa)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
@@ -415,8 +422,9 @@ vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid)
}
static const struct vdpa_config_ops vp_vdpa_ops = {
- .get_features = vp_vdpa_get_features,
- .set_features = vp_vdpa_set_features,
+ .get_device_features = vp_vdpa_get_device_features,
+ .set_driver_features = vp_vdpa_set_driver_features,
+ .get_driver_features = vp_vdpa_get_driver_features,
.get_status = vp_vdpa_get_status,
.set_status = vp_vdpa_set_status,
.reset = vp_vdpa_reset,
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
index 77e584093a23..7b428eac3d3e 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
@@ -67,7 +67,7 @@ static int vfio_set_trigger(struct vfio_fsl_mc_device *vdev,
int hwirq;
int ret;
- hwirq = vdev->mc_dev->irqs[index]->msi_desc->irq;
+ hwirq = vdev->mc_dev->irqs[index]->virq;
if (irq->trigger) {
free_irq(hwirq, irq);
kfree(irq->name);
@@ -137,7 +137,7 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
return vfio_set_trigger(vdev, index, fd);
}
- hwirq = vdev->mc_dev->irqs[index]->msi_desc->irq;
+ hwirq = vdev->mc_dev->irqs[index]->virq;
irq = &vdev->mc_irqs[index];
diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c
index 362f91ec8845..352c725ccf18 100644
--- a/drivers/vfio/pci/vfio_pci_igd.c
+++ b/drivers/vfio/pci/vfio_pci_igd.c
@@ -309,13 +309,14 @@ static ssize_t vfio_pci_igd_cfg_rw(struct vfio_pci_core_device *vdev,
if ((pos & 3) && size > 2) {
u16 val;
+ __le16 lval;
ret = pci_user_read_config_word(pdev, pos, &val);
if (ret)
return ret;
- val = cpu_to_le16(val);
- if (copy_to_user(buf + count - size, &val, 2))
+ lval = cpu_to_le16(val);
+ if (copy_to_user(buf + count - size, &lval, 2))
return -EFAULT;
pos += 2;
@@ -324,13 +325,14 @@ static ssize_t vfio_pci_igd_cfg_rw(struct vfio_pci_core_device *vdev,
while (size > 3) {
u32 val;
+ __le32 lval;
ret = pci_user_read_config_dword(pdev, pos, &val);
if (ret)
return ret;
- val = cpu_to_le32(val);
- if (copy_to_user(buf + count - size, &val, 4))
+ lval = cpu_to_le32(val);
+ if (copy_to_user(buf + count - size, &lval, 4))
return -EFAULT;
pos += 4;
@@ -339,13 +341,14 @@ static ssize_t vfio_pci_igd_cfg_rw(struct vfio_pci_core_device *vdev,
while (size >= 2) {
u16 val;
+ __le16 lval;
ret = pci_user_read_config_word(pdev, pos, &val);
if (ret)
return ret;
- val = cpu_to_le16(val);
- if (copy_to_user(buf + count - size, &val, 2))
+ lval = cpu_to_le16(val);
+ if (copy_to_user(buf + count - size, &lval, 2))
return -EFAULT;
pos += 2;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index f17490ab238f..9394aa9444c1 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -256,7 +256,7 @@ static int vfio_dma_bitmap_alloc(struct vfio_dma *dma, size_t pgsize)
static void vfio_dma_bitmap_free(struct vfio_dma *dma)
{
- kfree(dma->bitmap);
+ kvfree(dma->bitmap);
dma->bitmap = NULL;
}
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index a09dedc79f68..05740cba1cd8 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -166,6 +166,7 @@ static int vhost_test_release(struct inode *inode, struct file *f)
/* We do an extra flush before freeing memory,
* since jobs can re-queue themselves. */
vhost_test_flush(n);
+ kfree(n->dev.vqs);
kfree(n);
return 0;
}
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index e3c4f059b21a..851539807bc9 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -170,7 +170,7 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
* Userspace shouldn't remove status bits unless reset the
* status to 0.
*/
- if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
+ if (status != 0 && (status_old & ~status) != 0)
return -EINVAL;
if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
@@ -178,11 +178,11 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
vhost_vdpa_unsetup_vq_irq(v, i);
if (status == 0) {
- ret = ops->reset(vdpa);
+ ret = vdpa_reset(vdpa);
if (ret)
return ret;
} else
- ops->set_status(vdpa, status);
+ vdpa_set_status(vdpa, status);
if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
for (i = 0; i < nvqs; i++)
@@ -195,7 +195,7 @@ static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
struct vhost_vdpa_config *c)
{
struct vdpa_device *vdpa = v->vdpa;
- long size = vdpa->config->get_config_size(vdpa);
+ size_t size = vdpa->config->get_config_size(vdpa);
if (c->len == 0 || c->off > size)
return -EINVAL;
@@ -262,7 +262,7 @@ static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
const struct vdpa_config_ops *ops = vdpa->config;
u64 features;
- features = ops->get_features(vdpa);
+ features = ops->get_device_features(vdpa);
if (copy_to_user(featurep, &features, sizeof(features)))
return -EFAULT;
@@ -286,7 +286,7 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
if (copy_from_user(&features, featurep, sizeof(features)))
return -EFAULT;
- if (vdpa_set_features(vdpa, features))
+ if (vdpa_set_features(vdpa, features, false))
return -EINVAL;
return 0;
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index e94932c69f54..2b9e2bbbb03e 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -5,6 +5,7 @@
* Copyright (C) 2011 Texas Instruments
*/
+#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -170,22 +171,6 @@ static int lp855x_configure(struct lp855x *lp)
int i, ret;
struct lp855x_platform_data *pd = lp->pdata;
- switch (lp->chip_id) {
- case LP8550:
- case LP8551:
- case LP8552:
- case LP8553:
- case LP8556:
- lp->cfg = &lp855x_dev_cfg;
- break;
- case LP8555:
- case LP8557:
- lp->cfg = &lp8557_dev_cfg;
- break;
- default:
- return -EINVAL;
- }
-
if (lp->cfg->pre_init_device) {
ret = lp->cfg->pre_init_device(lp);
if (ret) {
@@ -346,7 +331,7 @@ static int lp855x_parse_dt(struct lp855x *lp)
{
struct device *dev = lp->dev;
struct device_node *node = dev->of_node;
- struct lp855x_platform_data *pdata;
+ struct lp855x_platform_data *pdata = lp->pdata;
int rom_length;
if (!node) {
@@ -354,10 +339,6 @@ static int lp855x_parse_dt(struct lp855x *lp)
return -EINVAL;
}
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
of_property_read_string(node, "bl-name", &pdata->name);
of_property_read_u8(node, "dev-ctrl", &pdata->device_control);
of_property_read_u8(node, "init-brt", &pdata->initial_brightness);
@@ -384,8 +365,6 @@ static int lp855x_parse_dt(struct lp855x *lp)
pdata->rom_data = &rom[0];
}
- lp->pdata = pdata;
-
return 0;
}
#else
@@ -395,28 +374,89 @@ static int lp855x_parse_dt(struct lp855x *lp)
}
#endif
+static int lp855x_parse_acpi(struct lp855x *lp)
+{
+ int ret;
+
+ /*
+ * On ACPI the device has already been initialized by the firmware
+ * and is in register mode, so we can read back the settings from
+ * the registers.
+ */
+ ret = i2c_smbus_read_byte_data(lp->client, lp->cfg->reg_brightness);
+ if (ret < 0)
+ return ret;
+
+ lp->pdata->initial_brightness = ret;
+
+ ret = i2c_smbus_read_byte_data(lp->client, lp->cfg->reg_devicectrl);
+ if (ret < 0)
+ return ret;
+
+ lp->pdata->device_control = ret;
+ return 0;
+}
+
static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
{
+ const struct acpi_device_id *acpi_id = NULL;
+ struct device *dev = &cl->dev;
struct lp855x *lp;
int ret;
if (!i2c_check_functionality(cl->adapter, I2C_FUNC_SMBUS_I2C_BLOCK))
return -EIO;
- lp = devm_kzalloc(&cl->dev, sizeof(struct lp855x), GFP_KERNEL);
+ lp = devm_kzalloc(dev, sizeof(struct lp855x), GFP_KERNEL);
if (!lp)
return -ENOMEM;
lp->client = cl;
- lp->dev = &cl->dev;
- lp->chipname = id->name;
- lp->chip_id = id->driver_data;
- lp->pdata = dev_get_platdata(&cl->dev);
+ lp->dev = dev;
+ lp->pdata = dev_get_platdata(dev);
+
+ if (id) {
+ lp->chipname = id->name;
+ lp->chip_id = id->driver_data;
+ } else {
+ acpi_id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!acpi_id)
+ return -ENODEV;
+
+ lp->chipname = acpi_id->id;
+ lp->chip_id = acpi_id->driver_data;
+ }
+
+ switch (lp->chip_id) {
+ case LP8550:
+ case LP8551:
+ case LP8552:
+ case LP8553:
+ case LP8556:
+ lp->cfg = &lp855x_dev_cfg;
+ break;
+ case LP8555:
+ case LP8557:
+ lp->cfg = &lp8557_dev_cfg;
+ break;
+ default:
+ return -EINVAL;
+ }
if (!lp->pdata) {
- ret = lp855x_parse_dt(lp);
- if (ret < 0)
- return ret;
+ lp->pdata = devm_kzalloc(dev, sizeof(*lp->pdata), GFP_KERNEL);
+ if (!lp->pdata)
+ return -ENOMEM;
+
+ if (id) {
+ ret = lp855x_parse_dt(lp);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = lp855x_parse_acpi(lp);
+ if (ret < 0)
+ return ret;
+ }
}
if (lp->pdata->period_ns > 0)
@@ -424,30 +464,27 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
else
lp->mode = REGISTER_BASED;
- lp->supply = devm_regulator_get(lp->dev, "power");
+ lp->supply = devm_regulator_get(dev, "power");
if (IS_ERR(lp->supply)) {
if (PTR_ERR(lp->supply) == -EPROBE_DEFER)
return -EPROBE_DEFER;
lp->supply = NULL;
}
- lp->enable = devm_regulator_get_optional(lp->dev, "enable");
+ lp->enable = devm_regulator_get_optional(dev, "enable");
if (IS_ERR(lp->enable)) {
ret = PTR_ERR(lp->enable);
if (ret == -ENODEV) {
lp->enable = NULL;
} else {
- if (ret != -EPROBE_DEFER)
- dev_err(lp->dev, "error getting enable regulator: %d\n",
- ret);
- return ret;
+ return dev_err_probe(dev, ret, "getting enable regulator\n");
}
}
if (lp->supply) {
ret = regulator_enable(lp->supply);
if (ret < 0) {
- dev_err(&cl->dev, "failed to enable supply: %d\n", ret);
+ dev_err(dev, "failed to enable supply: %d\n", ret);
return ret;
}
}
@@ -455,7 +492,7 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
if (lp->enable) {
ret = regulator_enable(lp->enable);
if (ret < 0) {
- dev_err(lp->dev, "failed to enable vddio: %d\n", ret);
+ dev_err(dev, "failed to enable vddio: %d\n", ret);
goto disable_supply;
}
@@ -470,20 +507,19 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
ret = lp855x_configure(lp);
if (ret) {
- dev_err(lp->dev, "device config err: %d", ret);
+ dev_err(dev, "device config err: %d", ret);
goto disable_vddio;
}
ret = lp855x_backlight_register(lp);
if (ret) {
- dev_err(lp->dev,
- "failed to register backlight. err: %d\n", ret);
+ dev_err(dev, "failed to register backlight. err: %d\n", ret);
goto disable_vddio;
}
- ret = sysfs_create_group(&lp->dev->kobj, &lp855x_attr_group);
+ ret = sysfs_create_group(&dev->kobj, &lp855x_attr_group);
if (ret) {
- dev_err(lp->dev, "failed to register sysfs. err: %d\n", ret);
+ dev_err(dev, "failed to register sysfs. err: %d\n", ret);
goto disable_vddio;
}
@@ -540,10 +576,20 @@ static const struct i2c_device_id lp855x_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, lp855x_ids);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id lp855x_acpi_match[] = {
+ /* Xiaomi specific HID used for the LP8556 on the Mi Pad 2 */
+ { "XMCC0001", LP8556 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, lp855x_acpi_match);
+#endif
+
static struct i2c_driver lp855x_driver = {
.driver = {
.name = "lp855x",
.of_match_table = of_match_ptr(lp855x_dt_ids),
+ .acpi_match_table = ACPI_PTR(lp855x_acpi_match),
},
.probe = lp855x_probe,
.remove = lp855x_remove,
diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
index d094299c2a48..306bcc6ccb92 100644
--- a/drivers/video/backlight/qcom-wled.c
+++ b/drivers/video/backlight/qcom-wled.c
@@ -231,14 +231,14 @@ struct wled {
static int wled3_set_brightness(struct wled *wled, u16 brightness)
{
int rc, i;
- u8 v[2];
+ __le16 v;
- v[0] = brightness & 0xff;
- v[1] = (brightness >> 8) & 0xf;
+ v = cpu_to_le16(brightness & WLED3_SINK_REG_BRIGHT_MAX);
- for (i = 0; i < wled->cfg.num_strings; ++i) {
+ for (i = 0; i < wled->cfg.num_strings; ++i) {
rc = regmap_bulk_write(wled->regmap, wled->ctrl_addr +
- WLED3_SINK_REG_BRIGHT(i), v, 2);
+ WLED3_SINK_REG_BRIGHT(wled->cfg.enabled_strings[i]),
+ &v, sizeof(v));
if (rc < 0)
return rc;
}
@@ -250,18 +250,18 @@ static int wled4_set_brightness(struct wled *wled, u16 brightness)
{
int rc, i;
u16 low_limit = wled->max_brightness * 4 / 1000;
- u8 v[2];
+ __le16 v;
/* WLED4's lower limit of operation is 0.4% */
if (brightness > 0 && brightness < low_limit)
brightness = low_limit;
- v[0] = brightness & 0xff;
- v[1] = (brightness >> 8) & 0xf;
+ v = cpu_to_le16(brightness & WLED3_SINK_REG_BRIGHT_MAX);
- for (i = 0; i < wled->cfg.num_strings; ++i) {
+ for (i = 0; i < wled->cfg.num_strings; ++i) {
rc = regmap_bulk_write(wled->regmap, wled->sink_addr +
- WLED4_SINK_REG_BRIGHT(i), v, 2);
+ WLED4_SINK_REG_BRIGHT(wled->cfg.enabled_strings[i]),
+ &v, sizeof(v));
if (rc < 0)
return rc;
}
@@ -273,21 +273,20 @@ static int wled5_set_brightness(struct wled *wled, u16 brightness)
{
int rc, offset;
u16 low_limit = wled->max_brightness * 1 / 1000;
- u8 v[2];
+ __le16 v;
/* WLED5's lower limit is 0.1% */
if (brightness < low_limit)
brightness = low_limit;
- v[0] = brightness & 0xff;
- v[1] = (brightness >> 8) & 0x7f;
+ v = cpu_to_le16(brightness & WLED5_SINK_REG_BRIGHT_MAX_15B);
offset = (wled->cfg.mod_sel == MOD_A) ?
WLED5_SINK_REG_MOD_A_BRIGHTNESS_LSB :
WLED5_SINK_REG_MOD_B_BRIGHTNESS_LSB;
rc = regmap_bulk_write(wled->regmap, wled->sink_addr + offset,
- v, 2);
+ &v, sizeof(v));
return rc;
}
@@ -572,7 +571,7 @@ unlock_mutex:
static void wled_auto_string_detection(struct wled *wled)
{
- int rc = 0, i, delay_time_us;
+ int rc = 0, i, j, delay_time_us;
u32 sink_config = 0;
u8 sink_test = 0, sink_valid = 0, val;
bool fault_set;
@@ -619,14 +618,15 @@ static void wled_auto_string_detection(struct wled *wled)
/* Iterate through the strings one by one */
for (i = 0; i < wled->cfg.num_strings; i++) {
- sink_test = BIT((WLED4_SINK_REG_CURR_SINK_SHFT + i));
+ j = wled->cfg.enabled_strings[i];
+ sink_test = BIT((WLED4_SINK_REG_CURR_SINK_SHFT + j));
/* Enable feedback control */
rc = regmap_write(wled->regmap, wled->ctrl_addr +
- WLED3_CTRL_REG_FEEDBACK_CONTROL, i + 1);
+ WLED3_CTRL_REG_FEEDBACK_CONTROL, j + 1);
if (rc < 0) {
dev_err(wled->dev, "Failed to enable feedback for SINK %d rc = %d\n",
- i + 1, rc);
+ j + 1, rc);
goto failed_detect;
}
@@ -635,7 +635,7 @@ static void wled_auto_string_detection(struct wled *wled)
WLED4_SINK_REG_CURR_SINK, sink_test);
if (rc < 0) {
dev_err(wled->dev, "Failed to configure SINK %d rc=%d\n",
- i + 1, rc);
+ j + 1, rc);
goto failed_detect;
}
@@ -662,7 +662,7 @@ static void wled_auto_string_detection(struct wled *wled)
if (fault_set)
dev_dbg(wled->dev, "WLED OVP fault detected with SINK %d\n",
- i + 1);
+ j + 1);
else
sink_valid |= sink_test;
@@ -702,15 +702,16 @@ static void wled_auto_string_detection(struct wled *wled)
/* Enable valid sinks */
if (wled->version == 4) {
for (i = 0; i < wled->cfg.num_strings; i++) {
+ j = wled->cfg.enabled_strings[i];
if (sink_config &
- BIT(WLED4_SINK_REG_CURR_SINK_SHFT + i))
+ BIT(WLED4_SINK_REG_CURR_SINK_SHFT + j))
val = WLED4_SINK_REG_STR_MOD_MASK;
else
/* Disable modulator_en for unused sink */
val = 0;
rc = regmap_write(wled->regmap, wled->sink_addr +
- WLED4_SINK_REG_STR_MOD_EN(i), val);
+ WLED4_SINK_REG_STR_MOD_EN(j), val);
if (rc < 0) {
dev_err(wled->dev, "Failed to configure MODULATOR_EN rc=%d\n",
rc);
@@ -949,7 +950,7 @@ static const struct wled_config wled3_config_defaults = {
.cs_out_en = false,
.ext_gen = false,
.cabc = false,
- .enabled_strings = {0, 1, 2, 3},
+ .enabled_strings = {0, 1, 2},
};
static int wled4_setup(struct wled *wled)
@@ -1080,6 +1081,7 @@ static const struct wled_config wled4_config_defaults = {
.cabc = false,
.external_pfet = false,
.auto_detection_enabled = false,
+ .enabled_strings = {0, 1, 2, 3},
};
static int wled5_setup(struct wled *wled)
@@ -1193,6 +1195,7 @@ static const struct wled_config wled5_config_defaults = {
.cabc = false,
.external_pfet = false,
.auto_detection_enabled = false,
+ .enabled_strings = {0, 1, 2, 3},
};
static const u32 wled3_boost_i_limit_values[] = {
@@ -1256,21 +1259,6 @@ static const struct wled_var_cfg wled5_ovp_cfg = {
.size = 16,
};
-static u32 wled3_num_strings_values_fn(u32 idx)
-{
- return idx + 1;
-}
-
-static const struct wled_var_cfg wled3_num_strings_cfg = {
- .fn = wled3_num_strings_values_fn,
- .size = 3,
-};
-
-static const struct wled_var_cfg wled4_num_strings_cfg = {
- .fn = wled3_num_strings_values_fn,
- .size = 4,
-};
-
static u32 wled3_switch_freq_values_fn(u32 idx)
{
return 19200 / (2 * (1 + idx));
@@ -1344,11 +1332,6 @@ static int wled_configure(struct wled *wled)
.val_ptr = &cfg->switch_freq,
.cfg = &wled3_switch_freq_cfg,
},
- {
- .name = "qcom,num-strings",
- .val_ptr = &cfg->num_strings,
- .cfg = &wled3_num_strings_cfg,
- },
};
const struct wled_u32_opts wled4_opts[] = {
@@ -1372,11 +1355,6 @@ static int wled_configure(struct wled *wled)
.val_ptr = &cfg->switch_freq,
.cfg = &wled3_switch_freq_cfg,
},
- {
- .name = "qcom,num-strings",
- .val_ptr = &cfg->num_strings,
- .cfg = &wled4_num_strings_cfg,
- },
};
const struct wled_u32_opts wled5_opts[] = {
@@ -1401,11 +1379,6 @@ static int wled_configure(struct wled *wled)
.cfg = &wled3_switch_freq_cfg,
},
{
- .name = "qcom,num-strings",
- .val_ptr = &cfg->num_strings,
- .cfg = &wled4_num_strings_cfg,
- },
- {
.name = "qcom,modulator-sel",
.val_ptr = &cfg->mod_sel,
.cfg = &wled5_mod_sel_cfg,
@@ -1523,16 +1496,57 @@ static int wled_configure(struct wled *wled)
*bool_opts[i].val_ptr = true;
}
- cfg->num_strings = cfg->num_strings + 1;
-
string_len = of_property_count_elems_of_size(dev->of_node,
"qcom,enabled-strings",
sizeof(u32));
- if (string_len > 0)
- of_property_read_u32_array(dev->of_node,
+ if (string_len > 0) {
+ if (string_len > wled->max_string_count) {
+ dev_err(dev, "Cannot have more than %d strings\n",
+ wled->max_string_count);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_array(dev->of_node,
"qcom,enabled-strings",
wled->cfg.enabled_strings,
- sizeof(u32));
+ string_len);
+ if (rc) {
+ dev_err(dev, "Failed to read %d elements from qcom,enabled-strings: %d\n",
+ string_len, rc);
+ return rc;
+ }
+
+ for (i = 0; i < string_len; ++i) {
+ if (wled->cfg.enabled_strings[i] >= wled->max_string_count) {
+ dev_err(dev,
+ "qcom,enabled-strings index %d at %d is out of bounds\n",
+ wled->cfg.enabled_strings[i], i);
+ return -EINVAL;
+ }
+ }
+
+ cfg->num_strings = string_len;
+ }
+
+ rc = of_property_read_u32(dev->of_node, "qcom,num-strings", &val);
+ if (!rc) {
+ if (val < 1 || val > wled->max_string_count) {
+ dev_err(dev, "qcom,num-strings must be between 1 and %d\n",
+ wled->max_string_count);
+ return -EINVAL;
+ }
+
+ if (string_len > 0) {
+ dev_warn(dev, "Only one of qcom,num-strings or qcom,enabled-strings"
+ " should be set\n");
+ if (val > string_len) {
+ dev_err(dev, "qcom,num-strings exceeds qcom,enabled-strings\n");
+ return -EINVAL;
+ }
+ }
+
+ cfg->num_strings = val;
+ }
return 0;
}
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 9a49ea6b5112..576612f18d59 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -97,30 +97,9 @@ static int vga_video_font_height;
static int vga_scan_lines __read_mostly;
static unsigned int vga_rolled_over; /* last vc_origin offset before wrap */
-static bool vgacon_text_mode_force;
static bool vga_hardscroll_enabled;
static bool vga_hardscroll_user_enable = true;
-bool vgacon_text_force(void)
-{
- return vgacon_text_mode_force;
-}
-EXPORT_SYMBOL(vgacon_text_force);
-
-static int __init text_mode(char *str)
-{
- vgacon_text_mode_force = true;
-
- pr_warn("You have booted with nomodeset. This means your GPU drivers are DISABLED\n");
- pr_warn("Any video related functionality will be severely degraded, and you may not even be able to suspend the system properly\n");
- pr_warn("Unless you actually understand what nomodeset does, you should reboot without enabling it\n");
-
- return 1;
-}
-
-/* force text mode - used by kernel modesetting */
-__setup("nomodeset", text_mode);
-
static int __init no_scroll(char *str)
{
/*
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
index 65dae05fff8e..26892940c213 100644
--- a/drivers/video/fbdev/core/fbsysfs.c
+++ b/drivers/video/fbdev/core/fbsysfs.c
@@ -230,7 +230,7 @@ static ssize_t show_bpp(struct device *device, struct device_attribute *attr,
char *buf)
{
struct fb_info *fb_info = dev_get_drvdata(device);
- return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->var.bits_per_pixel);
+ return sysfs_emit(buf, "%d\n", fb_info->var.bits_per_pixel);
}
static ssize_t store_rotate(struct device *device,
@@ -257,7 +257,7 @@ static ssize_t show_rotate(struct device *device,
{
struct fb_info *fb_info = dev_get_drvdata(device);
- return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->var.rotate);
+ return sysfs_emit(buf, "%d\n", fb_info->var.rotate);
}
static ssize_t store_virtual(struct device *device,
@@ -285,7 +285,7 @@ static ssize_t show_virtual(struct device *device,
struct device_attribute *attr, char *buf)
{
struct fb_info *fb_info = dev_get_drvdata(device);
- return snprintf(buf, PAGE_SIZE, "%d,%d\n", fb_info->var.xres_virtual,
+ return sysfs_emit(buf, "%d,%d\n", fb_info->var.xres_virtual,
fb_info->var.yres_virtual);
}
@@ -293,7 +293,7 @@ static ssize_t show_stride(struct device *device,
struct device_attribute *attr, char *buf)
{
struct fb_info *fb_info = dev_get_drvdata(device);
- return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->fix.line_length);
+ return sysfs_emit(buf, "%d\n", fb_info->fix.line_length);
}
static ssize_t store_blank(struct device *device,
@@ -381,7 +381,7 @@ static ssize_t show_pan(struct device *device,
struct device_attribute *attr, char *buf)
{
struct fb_info *fb_info = dev_get_drvdata(device);
- return snprintf(buf, PAGE_SIZE, "%d,%d\n", fb_info->var.xoffset,
+ return sysfs_emit(buf, "%d,%d\n", fb_info->var.xoffset,
fb_info->var.yoffset);
}
@@ -390,7 +390,7 @@ static ssize_t show_name(struct device *device,
{
struct fb_info *fb_info = dev_get_drvdata(device);
- return snprintf(buf, PAGE_SIZE, "%s\n", fb_info->fix.id);
+ return sysfs_emit(buf, "%s\n", fb_info->fix.id);
}
static ssize_t store_fbstate(struct device *device,
@@ -418,7 +418,7 @@ static ssize_t show_fbstate(struct device *device,
struct device_attribute *attr, char *buf)
{
struct fb_info *fb_info = dev_get_drvdata(device);
- return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->state);
+ return sysfs_emit(buf, "%d\n", fb_info->state);
}
#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 23999df52739..c8e0ea27caf1 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -287,8 +287,6 @@ struct hvfb_par {
static uint screen_width = HVFB_WIDTH;
static uint screen_height = HVFB_HEIGHT;
-static uint screen_width_max = HVFB_WIDTH;
-static uint screen_height_max = HVFB_HEIGHT;
static uint screen_depth;
static uint screen_fb_size;
static uint dio_fb_size; /* FB size for deferred IO */
@@ -582,7 +580,6 @@ static int synthvid_get_supported_resolution(struct hv_device *hdev)
int ret = 0;
unsigned long t;
u8 index;
- int i;
memset(msg, 0, sizeof(struct synthvid_msg));
msg->vid_hdr.type = SYNTHVID_RESOLUTION_REQUEST;
@@ -613,13 +610,6 @@ static int synthvid_get_supported_resolution(struct hv_device *hdev)
goto out;
}
- for (i = 0; i < msg->resolution_resp.resolution_count; i++) {
- screen_width_max = max_t(unsigned int, screen_width_max,
- msg->resolution_resp.supported_resolution[i].width);
- screen_height_max = max_t(unsigned int, screen_height_max,
- msg->resolution_resp.supported_resolution[i].height);
- }
-
screen_width =
msg->resolution_resp.supported_resolution[index].width;
screen_height =
@@ -941,7 +931,7 @@ static void hvfb_get_option(struct fb_info *info)
if (x < HVFB_WIDTH_MIN || y < HVFB_HEIGHT_MIN ||
(synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10) &&
- (x > screen_width_max || y > screen_height_max)) ||
+ (x * y * screen_depth / 8 > screen_fb_size)) ||
(par->synthvid_version == SYNTHVID_VERSION_WIN8 &&
x * y * screen_depth / 8 > SYNTHVID_FB_SIZE_WIN8) ||
(par->synthvid_version == SYNTHVID_VERSION_WIN7 &&
@@ -1194,8 +1184,8 @@ static int hvfb_probe(struct hv_device *hdev,
}
hvfb_get_option(info);
- pr_info("Screen resolution: %dx%d, Color depth: %d\n",
- screen_width, screen_height, screen_depth);
+ pr_info("Screen resolution: %dx%d, Color depth: %d, Frame buffer size: %d\n",
+ screen_width, screen_height, screen_depth, screen_fb_size);
ret = hvfb_getmem(hdev, info);
if (ret) {
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 3d090d2d9ed9..b495c09e6102 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -1555,6 +1555,7 @@ static void omapfb_free_resources(struct omapfb_device *fbdev, int state)
case 1:
dev_set_drvdata(fbdev->dev, NULL);
kfree(fbdev);
+ break;
case 0:
/* nothing to free */
break;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
index 6dbe265b312d..8f355d1caf86 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
@@ -19,14 +19,14 @@
static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n",
+ return sysfs_emit(buf, "%s\n",
dssdev->name ?
dssdev->name : "");
}
static ssize_t display_enabled_show(struct omap_dss_device *dssdev, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
omapdss_device_is_enabled(dssdev));
}
@@ -59,7 +59,7 @@ static ssize_t display_enabled_store(struct omap_dss_device *dssdev,
static ssize_t display_tear_show(struct omap_dss_device *dssdev, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
dssdev->driver->get_te ?
dssdev->driver->get_te(dssdev) : 0);
}
@@ -93,7 +93,7 @@ static ssize_t display_timings_show(struct omap_dss_device *dssdev, char *buf)
dssdev->driver->get_timings(dssdev, &t);
- return snprintf(buf, PAGE_SIZE, "%u,%u/%u/%u/%u,%u/%u/%u/%u\n",
+ return sysfs_emit(buf, "%u,%u/%u/%u/%u,%u/%u/%u/%u\n",
t.pixelclock,
t.x_res, t.hfp, t.hbp, t.hsw,
t.y_res, t.vfp, t.vbp, t.vsw);
@@ -143,7 +143,7 @@ static ssize_t display_rotate_show(struct omap_dss_device *dssdev, char *buf)
if (!dssdev->driver->get_rotate)
return -ENOENT;
rotate = dssdev->driver->get_rotate(dssdev);
- return snprintf(buf, PAGE_SIZE, "%u\n", rotate);
+ return sysfs_emit(buf, "%u\n", rotate);
}
static ssize_t display_rotate_store(struct omap_dss_device *dssdev,
@@ -171,7 +171,7 @@ static ssize_t display_mirror_show(struct omap_dss_device *dssdev, char *buf)
if (!dssdev->driver->get_mirror)
return -ENOENT;
mirror = dssdev->driver->get_mirror(dssdev);
- return snprintf(buf, PAGE_SIZE, "%u\n", mirror);
+ return sysfs_emit(buf, "%u\n", mirror);
}
static ssize_t display_mirror_store(struct omap_dss_device *dssdev,
@@ -203,7 +203,7 @@ static ssize_t display_wss_show(struct omap_dss_device *dssdev, char *buf)
wss = dssdev->driver->get_wss(dssdev);
- return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss);
+ return sysfs_emit(buf, "0x%05x\n", wss);
}
static ssize_t display_wss_store(struct omap_dss_device *dssdev,
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
index b52cc1af0959..3ffb1fe4a38a 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
@@ -22,14 +22,14 @@
static ssize_t manager_name_show(struct omap_overlay_manager *mgr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n", mgr->name);
+ return sysfs_emit(buf, "%s\n", mgr->name);
}
static ssize_t manager_display_show(struct omap_overlay_manager *mgr, char *buf)
{
struct omap_dss_device *dssdev = mgr->get_device(mgr);
- return snprintf(buf, PAGE_SIZE, "%s\n", dssdev ?
+ return sysfs_emit(buf, "%s\n", dssdev ?
dssdev->name : "<none>");
}
@@ -120,7 +120,7 @@ static ssize_t manager_default_color_show(struct omap_overlay_manager *mgr,
mgr->get_manager_info(mgr, &info);
- return snprintf(buf, PAGE_SIZE, "%#x\n", info.default_color);
+ return sysfs_emit(buf, "%#x\n", info.default_color);
}
static ssize_t manager_default_color_store(struct omap_overlay_manager *mgr,
@@ -165,7 +165,7 @@ static ssize_t manager_trans_key_type_show(struct omap_overlay_manager *mgr,
key_type = info.trans_key_type;
BUG_ON(key_type >= ARRAY_SIZE(trans_key_type_str));
- return snprintf(buf, PAGE_SIZE, "%s\n", trans_key_type_str[key_type]);
+ return sysfs_emit(buf, "%s\n", trans_key_type_str[key_type]);
}
static ssize_t manager_trans_key_type_store(struct omap_overlay_manager *mgr,
@@ -200,7 +200,7 @@ static ssize_t manager_trans_key_value_show(struct omap_overlay_manager *mgr,
mgr->get_manager_info(mgr, &info);
- return snprintf(buf, PAGE_SIZE, "%#x\n", info.trans_key);
+ return sysfs_emit(buf, "%#x\n", info.trans_key);
}
static ssize_t manager_trans_key_value_store(struct omap_overlay_manager *mgr,
@@ -236,7 +236,7 @@ static ssize_t manager_trans_key_enabled_show(struct omap_overlay_manager *mgr,
mgr->get_manager_info(mgr, &info);
- return snprintf(buf, PAGE_SIZE, "%d\n", info.trans_enabled);
+ return sysfs_emit(buf, "%d\n", info.trans_enabled);
}
static ssize_t manager_trans_key_enabled_store(struct omap_overlay_manager *mgr,
@@ -275,7 +275,7 @@ static ssize_t manager_alpha_blending_enabled_show(
mgr->get_manager_info(mgr, &info);
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
info.partial_alpha_enabled);
}
@@ -316,7 +316,7 @@ static ssize_t manager_cpr_enable_show(struct omap_overlay_manager *mgr,
mgr->get_manager_info(mgr, &info);
- return snprintf(buf, PAGE_SIZE, "%d\n", info.cpr_enable);
+ return sysfs_emit(buf, "%d\n", info.cpr_enable);
}
static ssize_t manager_cpr_enable_store(struct omap_overlay_manager *mgr,
@@ -358,7 +358,7 @@ static ssize_t manager_cpr_coef_show(struct omap_overlay_manager *mgr,
mgr->get_manager_info(mgr, &info);
- return snprintf(buf, PAGE_SIZE,
+ return sysfs_emit(buf,
"%d %d %d %d %d %d %d %d %d\n",
info.cpr_coefs.rr,
info.cpr_coefs.rg,
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
index 36acf366213a..421dcb7564ad 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
@@ -22,12 +22,12 @@
static ssize_t overlay_name_show(struct omap_overlay *ovl, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n", ovl->name);
+ return sysfs_emit(buf, "%s\n", ovl->name);
}
static ssize_t overlay_manager_show(struct omap_overlay *ovl, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n",
+ return sysfs_emit(buf, "%s\n",
ovl->manager ? ovl->manager->name : "<none>");
}
@@ -108,7 +108,7 @@ static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf)
ovl->get_overlay_info(ovl, &info);
- return snprintf(buf, PAGE_SIZE, "%d,%d\n",
+ return sysfs_emit(buf, "%d,%d\n",
info.width, info.height);
}
@@ -118,7 +118,7 @@ static ssize_t overlay_screen_width_show(struct omap_overlay *ovl, char *buf)
ovl->get_overlay_info(ovl, &info);
- return snprintf(buf, PAGE_SIZE, "%d\n", info.screen_width);
+ return sysfs_emit(buf, "%d\n", info.screen_width);
}
static ssize_t overlay_position_show(struct omap_overlay *ovl, char *buf)
@@ -127,7 +127,7 @@ static ssize_t overlay_position_show(struct omap_overlay *ovl, char *buf)
ovl->get_overlay_info(ovl, &info);
- return snprintf(buf, PAGE_SIZE, "%d,%d\n",
+ return sysfs_emit(buf, "%d,%d\n",
info.pos_x, info.pos_y);
}
@@ -166,7 +166,7 @@ static ssize_t overlay_output_size_show(struct omap_overlay *ovl, char *buf)
ovl->get_overlay_info(ovl, &info);
- return snprintf(buf, PAGE_SIZE, "%d,%d\n",
+ return sysfs_emit(buf, "%d,%d\n",
info.out_width, info.out_height);
}
@@ -201,7 +201,7 @@ static ssize_t overlay_output_size_store(struct omap_overlay *ovl,
static ssize_t overlay_enabled_show(struct omap_overlay *ovl, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", ovl->is_enabled(ovl));
+ return sysfs_emit(buf, "%d\n", ovl->is_enabled(ovl));
}
static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf,
@@ -231,7 +231,7 @@ static ssize_t overlay_global_alpha_show(struct omap_overlay *ovl, char *buf)
ovl->get_overlay_info(ovl, &info);
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
info.global_alpha);
}
@@ -273,7 +273,7 @@ static ssize_t overlay_pre_mult_alpha_show(struct omap_overlay *ovl,
ovl->get_overlay_info(ovl, &info);
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
info.pre_mult_alpha);
}
@@ -314,7 +314,7 @@ static ssize_t overlay_zorder_show(struct omap_overlay *ovl, char *buf)
ovl->get_overlay_info(ovl, &info);
- return snprintf(buf, PAGE_SIZE, "%d\n", info.zorder);
+ return sysfs_emit(buf, "%d\n", info.zorder);
}
static ssize_t overlay_zorder_store(struct omap_overlay *ovl,
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
index 2d39dbfa742e..06dc41aa0354 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
@@ -29,7 +29,7 @@ static ssize_t show_rotate_type(struct device *dev,
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
- return snprintf(buf, PAGE_SIZE, "%d\n", ofbi->rotation_type);
+ return sysfs_emit(buf, "%d\n", ofbi->rotation_type);
}
static ssize_t store_rotate_type(struct device *dev,
@@ -83,7 +83,7 @@ static ssize_t show_mirror(struct device *dev,
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
- return snprintf(buf, PAGE_SIZE, "%d\n", ofbi->mirror);
+ return sysfs_emit(buf, "%d\n", ofbi->mirror);
}
static ssize_t store_mirror(struct device *dev,
@@ -415,7 +415,7 @@ static ssize_t show_size(struct device *dev,
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
- return snprintf(buf, PAGE_SIZE, "%lu\n", ofbi->region->size);
+ return sysfs_emit(buf, "%lu\n", ofbi->region->size);
}
static ssize_t store_size(struct device *dev, struct device_attribute *attr,
@@ -492,7 +492,7 @@ static ssize_t show_phys(struct device *dev,
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
- return snprintf(buf, PAGE_SIZE, "%0x\n", ofbi->region->paddr);
+ return sysfs_emit(buf, "%0x\n", ofbi->region->paddr);
}
static ssize_t show_virt(struct device *dev,
@@ -501,7 +501,7 @@ static ssize_t show_virt(struct device *dev,
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
- return snprintf(buf, PAGE_SIZE, "%p\n", ofbi->region->vaddr);
+ return sysfs_emit(buf, "%p\n", ofbi->region->vaddr);
}
static ssize_t show_upd_mode(struct device *dev,
@@ -516,7 +516,7 @@ static ssize_t show_upd_mode(struct device *dev,
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%u\n", (unsigned)mode);
+ return sysfs_emit(buf, "%u\n", (unsigned int)mode);
}
static ssize_t store_upd_mode(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index b63074fd892e..57541887188b 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -541,26 +541,7 @@ static struct platform_driver simplefb_driver = {
.remove = simplefb_remove,
};
-static int __init simplefb_init(void)
-{
- int ret;
- struct device_node *np;
-
- ret = platform_driver_register(&simplefb_driver);
- if (ret)
- return ret;
-
- if (IS_ENABLED(CONFIG_OF_ADDRESS) && of_chosen) {
- for_each_child_of_node(of_chosen, np) {
- if (of_device_is_compatible(np, "simple-framebuffer"))
- of_platform_device_create(np, NULL, NULL);
- }
- }
-
- return 0;
-}
-
-fs_initcall(simplefb_init);
+module_platform_driver(simplefb_driver);
MODULE_AUTHOR("Stephen Warren <swarren@wwwdotorg.org>");
MODULE_DESCRIPTION("Simple framebuffer driver");
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index e2757ff1c23d..96e312a3eac7 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -184,6 +184,25 @@ static inline void setindex(int index)
vga_io_w(VGA_GFX_I, index);
}
+/* Check if the video mode is supported by the driver */
+static inline int check_mode_supported(void)
+{
+ /* non-x86 architectures treat orig_video_isVGA as a boolean flag */
+#if defined(CONFIG_X86)
+ /* only EGA and VGA in 16 color graphic mode are supported */
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EGAC &&
+ screen_info.orig_video_isVGA != VIDEO_TYPE_VGAC)
+ return -ENODEV;
+
+ if (screen_info.orig_video_mode != 0x0D && /* 320x200/4 (EGA) */
+ screen_info.orig_video_mode != 0x0E && /* 640x200/4 (EGA) */
+ screen_info.orig_video_mode != 0x10 && /* 640x350/4 (EGA) */
+ screen_info.orig_video_mode != 0x12) /* 640x480/4 (VGA) */
+ return -ENODEV;
+#endif
+ return 0;
+}
+
static void vga16fb_pan_var(struct fb_info *info,
struct fb_var_screeninfo *var)
{
@@ -1422,6 +1441,11 @@ static int __init vga16fb_init(void)
vga16fb_setup(option);
#endif
+
+ ret = check_mode_supported();
+ if (ret)
+ return ret;
+
ret = platform_driver_register(&vga16fb_driver);
if (!ret) {
diff --git a/drivers/virt/acrn/ioreq.c b/drivers/virt/acrn/ioreq.c
index 80b2e3f0e276..5ff1c53740c0 100644
--- a/drivers/virt/acrn/ioreq.c
+++ b/drivers/virt/acrn/ioreq.c
@@ -246,8 +246,7 @@ void acrn_ioreq_request_clear(struct acrn_vm *vm)
spin_lock_bh(&vm->ioreq_clients_lock);
client = vm->default_client;
if (client) {
- vcpu = find_next_bit(client->ioreqs_map,
- ACRN_IO_REQUEST_MAX, 0);
+ vcpu = find_first_bit(client->ioreqs_map, ACRN_IO_REQUEST_MAX);
while (vcpu < ACRN_IO_REQUEST_MAX) {
acrn_ioreq_complete_request(client, vcpu, NULL);
vcpu = find_next_bit(client->ioreqs_map,
diff --git a/drivers/virt/nitro_enclaves/Kconfig b/drivers/virt/nitro_enclaves/Kconfig
index f53740b941c0..2d3d98158121 100644
--- a/drivers/virt/nitro_enclaves/Kconfig
+++ b/drivers/virt/nitro_enclaves/Kconfig
@@ -14,3 +14,12 @@ config NITRO_ENCLAVES
To compile this driver as a module, choose M here.
The module will be called nitro_enclaves.
+
+config NITRO_ENCLAVES_MISC_DEV_TEST
+ bool "Tests for the misc device functionality of the Nitro Enclaves"
+ depends on NITRO_ENCLAVES && KUNIT=y
+ help
+ Enable KUnit tests for the misc device functionality of the Nitro
+ Enclaves. Select this option only if you will boot the kernel for
+ the purpose of running unit tests (e.g. under UML or qemu). If
+ unsure, say N.
diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c
index 6894ccb868a6..20c881b6a4b6 100644
--- a/drivers/virt/nitro_enclaves/ne_misc_dev.c
+++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c
@@ -24,6 +24,7 @@
#include <linux/nitro_enclaves.h>
#include <linux/pci.h>
#include <linux/poll.h>
+#include <linux/range.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <uapi/linux/vm_sockets.h>
@@ -126,6 +127,16 @@ struct ne_cpu_pool {
static struct ne_cpu_pool ne_cpu_pool;
/**
+ * struct ne_phys_contig_mem_regions - Contiguous physical memory regions.
+ * @num: The number of regions that currently has.
+ * @regions: The array of physical memory regions.
+ */
+struct ne_phys_contig_mem_regions {
+ unsigned long num;
+ struct range *regions;
+};
+
+/**
* ne_check_enclaves_created() - Verify if at least one enclave has been created.
* @void: No parameters provided.
*
@@ -825,6 +836,72 @@ static int ne_sanity_check_user_mem_region_page(struct ne_enclave *ne_enclave,
}
/**
+ * ne_sanity_check_phys_mem_region() - Sanity check the start address and the size
+ * of a physical memory region.
+ * @phys_mem_region_paddr : Physical start address of the region to be sanity checked.
+ * @phys_mem_region_size : Length of the region to be sanity checked.
+ *
+ * Context: Process context. This function is called with the ne_enclave mutex held.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_sanity_check_phys_mem_region(u64 phys_mem_region_paddr,
+ u64 phys_mem_region_size)
+{
+ if (phys_mem_region_size & (NE_MIN_MEM_REGION_SIZE - 1)) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Physical mem region size is not multiple of 2 MiB\n");
+
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(phys_mem_region_paddr, NE_MIN_MEM_REGION_SIZE)) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Physical mem region address is not 2 MiB aligned\n");
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ne_merge_phys_contig_memory_regions() - Add a memory region and merge the adjacent
+ * regions if they are physically contiguous.
+ * @phys_contig_regions : Private data associated with the contiguous physical memory regions.
+ * @page_paddr : Physical start address of the region to be added.
+ * @page_size : Length of the region to be added.
+ *
+ * Context: Process context. This function is called with the ne_enclave mutex held.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int
+ne_merge_phys_contig_memory_regions(struct ne_phys_contig_mem_regions *phys_contig_regions,
+ u64 page_paddr, u64 page_size)
+{
+ unsigned long num = phys_contig_regions->num;
+ int rc = 0;
+
+ rc = ne_sanity_check_phys_mem_region(page_paddr, page_size);
+ if (rc < 0)
+ return rc;
+
+ /* Physically contiguous, just merge */
+ if (num && (phys_contig_regions->regions[num - 1].end + 1) == page_paddr) {
+ phys_contig_regions->regions[num - 1].end += page_size;
+ } else {
+ phys_contig_regions->regions[num].start = page_paddr;
+ phys_contig_regions->regions[num].end = page_paddr + page_size - 1;
+ phys_contig_regions->num++;
+ }
+
+ return 0;
+}
+
+/**
* ne_set_user_memory_region_ioctl() - Add user space memory region to the slot
* associated with the current enclave.
* @ne_enclave : Private data associated with the current enclave.
@@ -843,9 +920,8 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
unsigned long max_nr_pages = 0;
unsigned long memory_size = 0;
struct ne_mem_region *ne_mem_region = NULL;
- unsigned long nr_phys_contig_mem_regions = 0;
struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev;
- struct page **phys_contig_mem_regions = NULL;
+ struct ne_phys_contig_mem_regions phys_contig_mem_regions = {};
int rc = -EINVAL;
rc = ne_sanity_check_user_mem_region(ne_enclave, mem_region);
@@ -866,9 +942,10 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
goto free_mem_region;
}
- phys_contig_mem_regions = kcalloc(max_nr_pages, sizeof(*phys_contig_mem_regions),
- GFP_KERNEL);
- if (!phys_contig_mem_regions) {
+ phys_contig_mem_regions.regions = kcalloc(max_nr_pages,
+ sizeof(*phys_contig_mem_regions.regions),
+ GFP_KERNEL);
+ if (!phys_contig_mem_regions.regions) {
rc = -ENOMEM;
goto free_mem_region;
@@ -902,26 +979,18 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
if (rc < 0)
goto put_pages;
- /*
- * TODO: Update once handled non-contiguous memory regions
- * received from user space or contiguous physical memory regions
- * larger than 2 MiB e.g. 8 MiB.
- */
- phys_contig_mem_regions[i] = ne_mem_region->pages[i];
+ rc = ne_merge_phys_contig_memory_regions(&phys_contig_mem_regions,
+ page_to_phys(ne_mem_region->pages[i]),
+ page_size(ne_mem_region->pages[i]));
+ if (rc < 0)
+ goto put_pages;
memory_size += page_size(ne_mem_region->pages[i]);
ne_mem_region->nr_pages++;
} while (memory_size < mem_region.memory_size);
- /*
- * TODO: Update once handled non-contiguous memory regions received
- * from user space or contiguous physical memory regions larger than
- * 2 MiB e.g. 8 MiB.
- */
- nr_phys_contig_mem_regions = ne_mem_region->nr_pages;
-
- if ((ne_enclave->nr_mem_regions + nr_phys_contig_mem_regions) >
+ if ((ne_enclave->nr_mem_regions + phys_contig_mem_regions.num) >
ne_enclave->max_mem_regions) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Reached max memory regions %lld\n",
@@ -932,27 +1001,13 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
goto put_pages;
}
- for (i = 0; i < nr_phys_contig_mem_regions; i++) {
- u64 phys_region_addr = page_to_phys(phys_contig_mem_regions[i]);
- u64 phys_region_size = page_size(phys_contig_mem_regions[i]);
-
- if (phys_region_size & (NE_MIN_MEM_REGION_SIZE - 1)) {
- dev_err_ratelimited(ne_misc_dev.this_device,
- "Physical mem region size is not multiple of 2 MiB\n");
-
- rc = -EINVAL;
-
- goto put_pages;
- }
-
- if (!IS_ALIGNED(phys_region_addr, NE_MIN_MEM_REGION_SIZE)) {
- dev_err_ratelimited(ne_misc_dev.this_device,
- "Physical mem region address is not 2 MiB aligned\n");
-
- rc = -EINVAL;
+ for (i = 0; i < phys_contig_mem_regions.num; i++) {
+ u64 phys_region_addr = phys_contig_mem_regions.regions[i].start;
+ u64 phys_region_size = range_len(&phys_contig_mem_regions.regions[i]);
+ rc = ne_sanity_check_phys_mem_region(phys_region_addr, phys_region_size);
+ if (rc < 0)
goto put_pages;
- }
}
ne_mem_region->memory_size = mem_region.memory_size;
@@ -960,13 +1015,13 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
list_add(&ne_mem_region->mem_region_list_entry, &ne_enclave->mem_regions_list);
- for (i = 0; i < nr_phys_contig_mem_regions; i++) {
+ for (i = 0; i < phys_contig_mem_regions.num; i++) {
struct ne_pci_dev_cmd_reply cmd_reply = {};
struct slot_add_mem_req slot_add_mem_req = {};
slot_add_mem_req.slot_uid = ne_enclave->slot_uid;
- slot_add_mem_req.paddr = page_to_phys(phys_contig_mem_regions[i]);
- slot_add_mem_req.size = page_size(phys_contig_mem_regions[i]);
+ slot_add_mem_req.paddr = phys_contig_mem_regions.regions[i].start;
+ slot_add_mem_req.size = range_len(&phys_contig_mem_regions.regions[i]);
rc = ne_do_request(pdev, SLOT_ADD_MEM,
&slot_add_mem_req, sizeof(slot_add_mem_req),
@@ -975,7 +1030,7 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
dev_err_ratelimited(ne_misc_dev.this_device,
"Error in slot add mem [rc=%d]\n", rc);
- kfree(phys_contig_mem_regions);
+ kfree(phys_contig_mem_regions.regions);
/*
* Exit here without put pages as memory regions may
@@ -988,7 +1043,7 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
ne_enclave->nr_mem_regions++;
}
- kfree(phys_contig_mem_regions);
+ kfree(phys_contig_mem_regions.regions);
return 0;
@@ -996,7 +1051,7 @@ put_pages:
for (i = 0; i < ne_mem_region->nr_pages; i++)
put_page(ne_mem_region->pages[i]);
free_mem_region:
- kfree(phys_contig_mem_regions);
+ kfree(phys_contig_mem_regions.regions);
kfree(ne_mem_region->pages);
kfree(ne_mem_region);
@@ -1702,8 +1757,37 @@ static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return 0;
}
+#if defined(CONFIG_NITRO_ENCLAVES_MISC_DEV_TEST)
+#include "ne_misc_dev_test.c"
+
+static inline int ne_misc_dev_test_init(void)
+{
+ return __kunit_test_suites_init(ne_misc_dev_test_suites);
+}
+
+static inline void ne_misc_dev_test_exit(void)
+{
+ __kunit_test_suites_exit(ne_misc_dev_test_suites);
+}
+#else
+static inline int ne_misc_dev_test_init(void)
+{
+ return 0;
+}
+
+static inline void ne_misc_dev_test_exit(void)
+{
+}
+#endif
+
static int __init ne_init(void)
{
+ int rc = 0;
+
+ rc = ne_misc_dev_test_init();
+ if (rc < 0)
+ return rc;
+
mutex_init(&ne_cpu_pool.mutex);
return pci_register_driver(&ne_pci_driver);
@@ -1714,6 +1798,8 @@ static void __exit ne_exit(void)
pci_unregister_driver(&ne_pci_driver);
ne_teardown_cpu_pool();
+
+ ne_misc_dev_test_exit();
}
module_init(ne_init);
diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev_test.c b/drivers/virt/nitro_enclaves/ne_misc_dev_test.c
new file mode 100644
index 000000000000..265797bed0ea
--- /dev/null
+++ b/drivers/virt/nitro_enclaves/ne_misc_dev_test.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <kunit/test.h>
+
+#define MAX_PHYS_REGIONS 16
+#define INVALID_VALUE (~0ull)
+
+struct ne_phys_regions_test {
+ u64 paddr;
+ u64 size;
+ int expect_rc;
+ unsigned long expect_num;
+ u64 expect_last_paddr;
+ u64 expect_last_size;
+} phys_regions_test_cases[] = {
+ /*
+ * Add the region from 0x1000 to (0x1000 + 0x200000 - 1):
+ * Expected result:
+ * Failed, start address is not 2M-aligned
+ *
+ * Now the instance of struct ne_phys_contig_mem_regions is:
+ * num = 0
+ * regions = {}
+ */
+ {0x1000, 0x200000, -EINVAL, 0, INVALID_VALUE, INVALID_VALUE},
+
+ /*
+ * Add the region from 0x200000 to (0x200000 + 0x1000 - 1):
+ * Expected result:
+ * Failed, size is not 2M-aligned
+ *
+ * Now the instance of struct ne_phys_contig_mem_regions is:
+ * num = 0
+ * regions = {}
+ */
+ {0x200000, 0x1000, -EINVAL, 0, INVALID_VALUE, INVALID_VALUE},
+
+ /*
+ * Add the region from 0x200000 to (0x200000 + 0x200000 - 1):
+ * Expected result:
+ * Successful
+ *
+ * Now the instance of struct ne_phys_contig_mem_regions is:
+ * num = 1
+ * regions = {
+ * {start=0x200000, end=0x3fffff}, // len=0x200000
+ * }
+ */
+ {0x200000, 0x200000, 0, 1, 0x200000, 0x200000},
+
+ /*
+ * Add the region from 0x0 to (0x0 + 0x200000 - 1):
+ * Expected result:
+ * Successful
+ *
+ * Now the instance of struct ne_phys_contig_mem_regions is:
+ * num = 2
+ * regions = {
+ * {start=0x200000, end=0x3fffff}, // len=0x200000
+ * {start=0x0, end=0x1fffff}, // len=0x200000
+ * }
+ */
+ {0x0, 0x200000, 0, 2, 0x0, 0x200000},
+
+ /*
+ * Add the region from 0x600000 to (0x600000 + 0x400000 - 1):
+ * Expected result:
+ * Successful
+ *
+ * Now the instance of struct ne_phys_contig_mem_regions is:
+ * num = 3
+ * regions = {
+ * {start=0x200000, end=0x3fffff}, // len=0x200000
+ * {start=0x0, end=0x1fffff}, // len=0x200000
+ * {start=0x600000, end=0x9fffff}, // len=0x400000
+ * }
+ */
+ {0x600000, 0x400000, 0, 3, 0x600000, 0x400000},
+
+ /*
+ * Add the region from 0xa00000 to (0xa00000 + 0x400000 - 1):
+ * Expected result:
+ * Successful, merging case!
+ *
+ * Now the instance of struct ne_phys_contig_mem_regions is:
+ * num = 3
+ * regions = {
+ * {start=0x200000, end=0x3fffff}, // len=0x200000
+ * {start=0x0, end=0x1fffff}, // len=0x200000
+ * {start=0x600000, end=0xdfffff}, // len=0x800000
+ * }
+ */
+ {0xa00000, 0x400000, 0, 3, 0x600000, 0x800000},
+
+ /*
+ * Add the region from 0x1000 to (0x1000 + 0x200000 - 1):
+ * Expected result:
+ * Failed, start address is not 2M-aligned
+ *
+ * Now the instance of struct ne_phys_contig_mem_regions is:
+ * num = 3
+ * regions = {
+ * {start=0x200000, end=0x3fffff}, // len=0x200000
+ * {start=0x0, end=0x1fffff}, // len=0x200000
+ * {start=0x600000, end=0xdfffff}, // len=0x800000
+ * }
+ */
+ {0x1000, 0x200000, -EINVAL, 3, 0x600000, 0x800000},
+};
+
+static void ne_misc_dev_test_merge_phys_contig_memory_regions(struct kunit *test)
+{
+ struct ne_phys_contig_mem_regions phys_contig_mem_regions = {};
+ int rc = 0;
+ int i = 0;
+
+ phys_contig_mem_regions.regions = kunit_kcalloc(test, MAX_PHYS_REGIONS,
+ sizeof(*phys_contig_mem_regions.regions),
+ GFP_KERNEL);
+ KUNIT_ASSERT_TRUE(test, phys_contig_mem_regions.regions);
+
+ for (i = 0; i < ARRAY_SIZE(phys_regions_test_cases); i++) {
+ struct ne_phys_regions_test *test_case = &phys_regions_test_cases[i];
+ unsigned long num = 0;
+
+ rc = ne_merge_phys_contig_memory_regions(&phys_contig_mem_regions,
+ test_case->paddr, test_case->size);
+ KUNIT_EXPECT_EQ(test, rc, test_case->expect_rc);
+ KUNIT_EXPECT_EQ(test, phys_contig_mem_regions.num, test_case->expect_num);
+
+ if (test_case->expect_last_paddr == INVALID_VALUE)
+ continue;
+
+ num = phys_contig_mem_regions.num;
+ KUNIT_EXPECT_EQ(test, phys_contig_mem_regions.regions[num - 1].start,
+ test_case->expect_last_paddr);
+ KUNIT_EXPECT_EQ(test, range_len(&phys_contig_mem_regions.regions[num - 1]),
+ test_case->expect_last_size);
+ }
+
+ kunit_kfree(test, phys_contig_mem_regions.regions);
+}
+
+static struct kunit_case ne_misc_dev_test_cases[] = {
+ KUNIT_CASE(ne_misc_dev_test_merge_phys_contig_memory_regions),
+ {}
+};
+
+static struct kunit_suite ne_misc_dev_test_suite = {
+ .name = "ne_misc_dev_test",
+ .test_cases = ne_misc_dev_test_cases,
+};
+
+static struct kunit_suite *ne_misc_dev_test_suites[] = {
+ &ne_misc_dev_test_suite,
+ NULL
+};
diff --git a/drivers/virt/nitro_enclaves/ne_pci_dev.c b/drivers/virt/nitro_enclaves/ne_pci_dev.c
index 40b49ec8e30b..6b81e8f3a5dc 100644
--- a/drivers/virt/nitro_enclaves/ne_pci_dev.c
+++ b/drivers/virt/nitro_enclaves/ne_pci_dev.c
@@ -376,7 +376,6 @@ static void ne_teardown_msix(struct pci_dev *pdev)
free_irq(pci_irq_vector(pdev, NE_VEC_EVENT), ne_pci_dev);
flush_work(&ne_pci_dev->notify_work);
- flush_workqueue(ne_pci_dev->event_wq);
destroy_workqueue(ne_pci_dev->event_wq);
free_irq(pci_irq_vector(pdev, NE_VEC_REPLY), ne_pci_dev);
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 236081afe9a2..00ac9db792a4 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -204,6 +204,12 @@ int virtio_finalize_features(struct virtio_device *dev)
}
EXPORT_SYMBOL_GPL(virtio_finalize_features);
+void virtio_reset_device(struct virtio_device *dev)
+{
+ dev->config->reset(dev);
+}
+EXPORT_SYMBOL_GPL(virtio_reset_device);
+
static int virtio_dev_probe(struct device *_d)
{
int err, i;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index c22ff0117b46..f4c34a2a6b8e 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -1056,7 +1056,7 @@ static void remove_common(struct virtio_balloon *vb)
return_free_pages_to_mm(vb, ULONG_MAX);
/* Now we reset the device so we can clean up the queues. */
- vb->vdev->config->reset(vb->vdev);
+ virtio_reset_device(vb->vdev);
vb->vdev->config->del_vqs(vb->vdev);
}
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
index ce51ae165943..3aa46703872d 100644
--- a/drivers/virtio/virtio_input.c
+++ b/drivers/virtio/virtio_input.c
@@ -347,7 +347,7 @@ static void virtinput_remove(struct virtio_device *vdev)
spin_unlock_irqrestore(&vi->lock, flags);
input_unregister_device(vi->idev);
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL)
kfree(buf);
vdev->config->del_vqs(vdev);
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index 96e5a8782769..38becd8d578c 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -20,6 +20,7 @@
#include <linux/mutex.h>
#include <linux/bitmap.h>
#include <linux/lockdep.h>
+#include <linux/log2.h>
#include <acpi/acpi_numa.h>
@@ -592,7 +593,7 @@ static int virtio_mem_sbm_sb_states_prepare_next_mb(struct virtio_mem *vm)
return -ENOMEM;
mutex_lock(&vm->hotplug_mutex);
- if (new_bitmap)
+ if (vm->sbm.sb_states)
memcpy(new_bitmap, vm->sbm.sb_states, old_pages * PAGE_SIZE);
old_bitmap = vm->sbm.sb_states;
@@ -1120,15 +1121,18 @@ static void virtio_mem_clear_fake_offline(unsigned long pfn,
*/
static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
{
- const unsigned long max_nr_pages = MAX_ORDER_NR_PAGES;
+ unsigned long order = MAX_ORDER - 1;
unsigned long i;
/*
- * We are always called at least with MAX_ORDER_NR_PAGES
- * granularity/alignment (e.g., the way subblocks work). All pages
- * inside such a block are alike.
+ * We might get called for ranges that don't cover properly aligned
+ * MAX_ORDER - 1 pages; however, we can only online properly aligned
+ * pages with an order of MAX_ORDER - 1 at maximum.
*/
- for (i = 0; i < nr_pages; i += max_nr_pages) {
+ while (!IS_ALIGNED(pfn | nr_pages, 1 << order))
+ order--;
+
+ for (i = 0; i < nr_pages; i += 1 << order) {
struct page *page = pfn_to_page(pfn + i);
/*
@@ -1138,14 +1142,12 @@ static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
* alike.
*/
if (PageDirty(page)) {
- virtio_mem_clear_fake_offline(pfn + i, max_nr_pages,
- false);
- generic_online_page(page, MAX_ORDER - 1);
+ virtio_mem_clear_fake_offline(pfn + i, 1 << order, false);
+ generic_online_page(page, order);
} else {
- virtio_mem_clear_fake_offline(pfn + i, max_nr_pages,
- true);
- free_contig_range(pfn + i, max_nr_pages);
- adjust_managed_page_count(page, max_nr_pages);
+ virtio_mem_clear_fake_offline(pfn + i, 1 << order, true);
+ free_contig_range(pfn + i, 1 << order);
+ adjust_managed_page_count(page, 1 << order);
}
}
}
@@ -1228,28 +1230,46 @@ static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
page_ref_inc(pfn_to_page(pfn + i));
}
-static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
+static void virtio_mem_online_page(struct virtio_mem *vm,
+ struct page *page, unsigned int order)
{
- const unsigned long addr = page_to_phys(page);
- unsigned long id, sb_id;
- struct virtio_mem *vm;
+ const unsigned long start = page_to_phys(page);
+ const unsigned long end = start + PFN_PHYS(1 << order);
+ unsigned long addr, next, id, sb_id, count;
bool do_online;
- rcu_read_lock();
- list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
- if (!virtio_mem_contains_range(vm, addr, PFN_PHYS(1 << order)))
- continue;
+ /*
+ * We can get called with any order up to MAX_ORDER - 1. If our
+ * subblock size is smaller than that and we have a mixture of plugged
+ * and unplugged subblocks within such a page, we have to process in
+ * smaller granularity. In that case we'll adjust the order exactly once
+ * within the loop.
+ */
+ for (addr = start; addr < end; ) {
+ next = addr + PFN_PHYS(1 << order);
if (vm->in_sbm) {
- /*
- * We exploit here that subblocks have at least
- * MAX_ORDER_NR_PAGES size/alignment - so we cannot
- * cross subblocks within one call.
- */
id = virtio_mem_phys_to_mb_id(addr);
sb_id = virtio_mem_phys_to_sb_id(vm, addr);
- do_online = virtio_mem_sbm_test_sb_plugged(vm, id,
- sb_id, 1);
+ count = virtio_mem_phys_to_sb_id(vm, next - 1) - sb_id + 1;
+
+ if (virtio_mem_sbm_test_sb_plugged(vm, id, sb_id, count)) {
+ /* Fully plugged. */
+ do_online = true;
+ } else if (count == 1 ||
+ virtio_mem_sbm_test_sb_unplugged(vm, id, sb_id, count)) {
+ /* Fully unplugged. */
+ do_online = false;
+ } else {
+ /*
+ * Mixture, process sub-blocks instead. This
+ * will be at least the size of a pageblock.
+ * We'll run into this case exactly once.
+ */
+ order = ilog2(vm->sbm.sb_size) - PAGE_SHIFT;
+ do_online = virtio_mem_sbm_test_sb_plugged(vm, id, sb_id, 1);
+ continue;
+ }
} else {
/*
* If the whole block is marked fake offline, keep
@@ -1260,18 +1280,38 @@ static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
VIRTIO_MEM_BBM_BB_FAKE_OFFLINE;
}
+ if (do_online)
+ generic_online_page(pfn_to_page(PFN_DOWN(addr)), order);
+ else
+ virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
+ false);
+ addr = next;
+ }
+}
+
+static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
+{
+ const unsigned long addr = page_to_phys(page);
+ struct virtio_mem *vm;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
+ /*
+ * Pages we're onlining will never cross memory blocks and,
+ * therefore, not virtio-mem devices.
+ */
+ if (!virtio_mem_contains_range(vm, addr, PFN_PHYS(1 << order)))
+ continue;
+
/*
- * virtio_mem_set_fake_offline() might sleep, we don't need
- * the device anymore. See virtio_mem_remove() how races
+ * virtio_mem_set_fake_offline() might sleep. We can safely
+ * drop the RCU lock at this point because the device
+ * cannot go away. See virtio_mem_remove() how races
* between memory onlining and device removal are handled.
*/
rcu_read_unlock();
- if (do_online)
- generic_online_page(page, order);
- else
- virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
- false);
+ virtio_mem_online_page(vm, page, order);
return;
}
rcu_read_unlock();
@@ -2438,8 +2478,6 @@ static int virtio_mem_init_hotplug(struct virtio_mem *vm)
/*
* We want subblocks to span at least MAX_ORDER_NR_PAGES and
* pageblock_nr_pages pages. This:
- * - Simplifies our page onlining code (virtio_mem_online_page_cb)
- * and fake page onlining code (virtio_mem_fake_online).
* - Is required for now for alloc_contig_range() to work reliably -
* it doesn't properly handle smaller granularity on ZONE_NORMAL.
*/
@@ -2850,7 +2888,7 @@ static void virtio_mem_remove(struct virtio_device *vdev)
virtio_mem_deinit_hotplug(vm);
/* reset the device and cleanup the queues */
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
kfree(vm);
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index b3f8128b7983..34141b9abe27 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -138,7 +138,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
if (q_pfn >> 32) {
dev_err(&vp_dev->pci_dev->dev,
- "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
+ "platform bug: legacy virtio-pci must not be used with RAM above 0x%llxGB\n",
0x1ULL << (32 + PAGE_SHIFT - 30));
err = -E2BIG;
goto out_del_vq;
diff --git a/drivers/virtio/virtio_pci_legacy_dev.c b/drivers/virtio/virtio_pci_legacy_dev.c
index 9b97680dd02b..677d1f68bc9b 100644
--- a/drivers/virtio/virtio_pci_legacy_dev.c
+++ b/drivers/virtio/virtio_pci_legacy_dev.c
@@ -45,8 +45,10 @@ int vp_legacy_probe(struct virtio_pci_legacy_device *ldev)
return rc;
ldev->ioaddr = pci_iomap(pci_dev, 0, 0);
- if (!ldev->ioaddr)
+ if (!ldev->ioaddr) {
+ rc = -EIO;
goto err_iomap;
+ }
ldev->isr = ldev->ioaddr + VIRTIO_PCI_ISR;
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index e11ed748e661..e8b3ff2b9fbc 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -345,7 +345,7 @@ err_map_common:
EXPORT_SYMBOL_GPL(vp_modern_probe);
/*
- * vp_modern_probe: remove and cleanup the modern virtio pci device
+ * vp_modern_remove: remove and cleanup the modern virtio pci device
* @mdev: the modern virtio-pci device
*/
void vp_modern_remove(struct virtio_pci_modern_device *mdev)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 028b05d44546..962f1477b1fa 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -1197,8 +1197,10 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
if (virtqueue_use_indirect(_vq, total_sg)) {
err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
in_sgs, data, gfp);
- if (err != -ENOMEM)
+ if (err != -ENOMEM) {
+ END_USE(vq);
return err;
+ }
/* fall back on direct */
}
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index f85f860bc10b..7767a7f0119b 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -91,9 +91,8 @@ static u8 virtio_vdpa_get_status(struct virtio_device *vdev)
static void virtio_vdpa_set_status(struct virtio_device *vdev, u8 status)
{
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
- const struct vdpa_config_ops *ops = vdpa->config;
- return ops->set_status(vdpa, status);
+ return vdpa_set_status(vdpa, status);
}
static void virtio_vdpa_reset(struct virtio_device *vdev)
@@ -308,7 +307,7 @@ static u64 virtio_vdpa_get_features(struct virtio_device *vdev)
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
const struct vdpa_config_ops *ops = vdpa->config;
- return ops->get_features(vdpa);
+ return ops->get_device_features(vdpa);
}
static int virtio_vdpa_finalize_features(struct virtio_device *vdev)
@@ -318,7 +317,7 @@ static int virtio_vdpa_finalize_features(struct virtio_device *vdev)
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
- return vdpa_set_features(vdpa, vdev->features);
+ return vdpa_set_features(vdpa, vdev->features, false);
}
static const char *virtio_vdpa_bus_name(struct virtio_device *vdev)
diff --git a/drivers/w1/slaves/w1_ds28e04.c b/drivers/w1/slaves/w1_ds28e04.c
index e4f336111edc..6cef6e2edb89 100644
--- a/drivers/w1/slaves/w1_ds28e04.c
+++ b/drivers/w1/slaves/w1_ds28e04.c
@@ -32,7 +32,7 @@ static int w1_strong_pullup = 1;
module_param_named(strong_pullup, w1_strong_pullup, int, 0);
/* enable/disable CRC checking on DS28E04-100 memory accesses */
-static char w1_enable_crccheck = 1;
+static bool w1_enable_crccheck = true;
#define W1_EEPROM_SIZE 512
#define W1_PAGE_COUNT 16
@@ -339,32 +339,18 @@ static BIN_ATTR_RW(pio, 1);
static ssize_t crccheck_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- if (put_user(w1_enable_crccheck + 0x30, buf))
- return -EFAULT;
-
- return sizeof(w1_enable_crccheck);
+ return sysfs_emit(buf, "%d\n", w1_enable_crccheck);
}
static ssize_t crccheck_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- char val;
-
- if (count != 1 || !buf)
- return -EINVAL;
+ int err = kstrtobool(buf, &w1_enable_crccheck);
- if (get_user(val, buf))
- return -EFAULT;
+ if (err)
+ return err;
- /* convert to decimal */
- val = val - 0x30;
- if (val != 0 && val != 1)
- return -EINVAL;
-
- /* set the new value */
- w1_enable_crccheck = val;
-
- return sizeof(w1_enable_crccheck);
+ return count;
}
static DEVICE_ATTR_RW(crccheck);
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index ca70c5f03206..565578002d79 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -1785,7 +1785,7 @@ static ssize_t alarms_store(struct device *device,
u8 new_config_register[3]; /* array of data to be written */
int temp, ret;
char *token = NULL;
- s8 tl, th, tt; /* 1 byte per value + temp ring order */
+ s8 tl, th; /* 1 byte per value + temp ring order */
char *p_args, *orig;
p_args = orig = kmalloc(size, GFP_KERNEL);
@@ -1836,9 +1836,8 @@ static ssize_t alarms_store(struct device *device,
th = int_to_short(temp);
/* Reorder if required th and tl */
- if (tl > th) {
- tt = tl; tl = th; th = tt;
- }
+ if (tl > th)
+ swap(tl, th);
/*
* Read the scratchpad to change only the required bits
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 9d222ba17ec6..c8fa79da23b3 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -207,6 +207,7 @@ config DA9055_WATCHDOG
config DA9063_WATCHDOG
tristate "Dialog DA9063 Watchdog"
depends on MFD_DA9063 || COMPILE_TEST
+ depends on I2C
select WATCHDOG_CORE
help
Support for the watchdog in the DA9063 PMIC.
@@ -680,10 +681,10 @@ config MAX77620_WATCHDOG
depends on MFD_MAX77620 || COMPILE_TEST
select WATCHDOG_CORE
help
- This is the driver for the Max77620 watchdog timer.
- Say 'Y' here to enable the watchdog timer support for
- MAX77620 chips. To compile this driver as a module,
- choose M here: the module will be called max77620_wdt.
+ This is the driver for the Max77620 watchdog timer.
+ Say 'Y' here to enable the watchdog timer support for
+ MAX77620 chips. To compile this driver as a module,
+ choose M here: the module will be called max77620_wdt.
config IMX2_WDT
tristate "IMX2+ Watchdog"
@@ -822,6 +823,7 @@ config MESON_WATCHDOG
config MEDIATEK_WATCHDOG
tristate "Mediatek SoCs watchdog support"
depends on ARCH_MEDIATEK || COMPILE_TEST
+ default ARCH_MEDIATEK
select WATCHDOG_CORE
select RESET_CONTROLLER
help
@@ -881,6 +883,14 @@ config RENESAS_RZAWDT
This driver adds watchdog support for the integrated watchdogs in the
Renesas RZ/A SoCs. These watchdogs can be used to reset a system.
+config RENESAS_RZG2LWDT
+ tristate "Renesas RZ/G2L WDT Watchdog"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ This driver adds watchdog support for the integrated watchdogs in the
+ Renesas RZ/G2L SoCs. These watchdogs can be used to reset a system.
+
config ASPEED_WATCHDOG
tristate "Aspeed BMC watchdog support"
depends on ARCH_ASPEED || COMPILE_TEST
@@ -940,6 +950,19 @@ config RTD119X_WATCHDOG
Say Y here to include support for the watchdog timer in
Realtek RTD1295 SoCs.
+config REALTEK_OTTO_WDT
+ tristate "Realtek Otto MIPS watchdog support"
+ depends on MACH_REALTEK_RTL || COMPILE_TEST
+ depends on COMMON_CLK
+ select WATCHDOG_CORE
+ default MACH_REALTEK_RTL
+ help
+ Say Y here to include support for the watchdog timer on Realtek
+ RTL838x, RTL839x, RTL930x SoCs. This watchdog has pretimeout
+ notifications and system reset on timeout.
+
+ When built as a module this will be called realtek_otto_wdt.
+
config SPRD_WATCHDOG
tristate "Spreadtrum watchdog support"
depends on ARCH_SPRD || COMPILE_TEST
@@ -976,6 +999,18 @@ config MSC313E_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called msc313e_wdt.
+config APPLE_WATCHDOG
+ tristate "Apple SoC watchdog"
+ depends on ARCH_APPLE || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ Say Y here to include support for the Watchdog found in Apple
+ SoCs such as the M1. Next to the common watchdog features this
+ driver is also required in order to reboot these SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called apple_wdt.
+
# X86 (i386 + ia64 + x86_64) Architecture
config ACQUIRE_WDT
@@ -1440,26 +1475,26 @@ config TQMX86_WDT
depends on X86
select WATCHDOG_CORE
help
- This is the driver for the hardware watchdog timer in the TQMX86 IO
- controller found on some of their ComExpress Modules.
+ This is the driver for the hardware watchdog timer in the TQMX86 IO
+ controller found on some of their ComExpress Modules.
- To compile this driver as a module, choose M here; the module
- will be called tqmx86_wdt.
+ To compile this driver as a module, choose M here; the module
+ will be called tqmx86_wdt.
- Most people will say N.
+ Most people will say N.
config VIA_WDT
tristate "VIA Watchdog Timer"
depends on X86 && PCI
select WATCHDOG_CORE
help
- This is the driver for the hardware watchdog timer on VIA
- southbridge chipset CX700, VX800/VX820 or VX855/VX875.
+ This is the driver for the hardware watchdog timer on VIA
+ southbridge chipset CX700, VX800/VX820 or VX855/VX875.
- To compile this driver as a module, choose M here; the module
- will be called via_wdt.
+ To compile this driver as a module, choose M here; the module
+ will be called via_wdt.
- Most people will say N.
+ Most people will say N.
config W83627HF_WDT
tristate "Watchdog timer for W83627HF/W83627DHG and compatibles"
@@ -1589,6 +1624,17 @@ config NIC7018_WDT
To compile this driver as a module, choose M here: the module will be
called nic7018_wdt.
+config SIEMENS_SIMATIC_IPC_WDT
+ tristate "Siemens Simatic IPC Watchdog"
+ depends on SIEMENS_SIMATIC_IPC
+ select WATCHDOG_CORE
+ help
+ This driver adds support for several watchdogs found in Industrial
+ PCs from Siemens.
+
+ To compile this driver as a module, choose M here: the module will be
+ called simatic-ipc-wdt.
+
# M68K Architecture
config M54xx_WATCHDOG
@@ -1696,16 +1742,6 @@ config OCTEON_WDT
from the first interrupt, it is then only poked when the
device is written.
-config BCM63XX_WDT
- tristate "Broadcom BCM63xx hardware watchdog"
- depends on BCM63XX
- help
- Watchdog driver for the built in watchdog hardware in Broadcom
- BCM63xx SoC.
-
- To compile this driver as a loadable module, choose M here.
- The module will be called bcm63xx_wdt.
-
config BCM2835_WDT
tristate "Broadcom BCM2835 hardware watchdog"
depends on ARCH_BCM2835 || (OF && COMPILE_TEST)
@@ -1740,15 +1776,16 @@ config BCM_KONA_WDT_DEBUG
If in doubt, say 'N'.
config BCM7038_WDT
- tristate "BCM7038 Watchdog"
+ tristate "BCM63xx/BCM7038 Watchdog"
select WATCHDOG_CORE
depends on HAS_IOMEM
- depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC || BCM63XX || COMPILE_TEST
help
- Watchdog driver for the built-in hardware in Broadcom 7038 and
- later SoCs used in set-top boxes. BCM7038 was made public
- during the 2004 CES, and since then, many Broadcom chips use this
- watchdog block, including some cable modem chips.
+ Watchdog driver for the built-in hardware in Broadcom 7038 and
+ later SoCs used in set-top boxes. BCM7038 was made public
+ during the 2004 CES, and since then, many Broadcom chips use this
+ watchdog block, including some cable modem chips and DSL (63xx)
+ chips.
config IMGPDC_WDT
tristate "Imagination Technologies PDC Watchdog Timer"
@@ -2109,12 +2146,12 @@ config KEEMBAY_WATCHDOG
depends on ARCH_KEEMBAY || (ARM64 && COMPILE_TEST)
select WATCHDOG_CORE
help
- This option enable support for an In-secure watchdog timer driver for
- Intel Keem Bay SoC. This WDT has a 32 bit timer and decrements in every
- count unit. An interrupt will be triggered, when the count crosses
- the threshold configured in the register.
+ This option enable support for an In-secure watchdog timer driver for
+ Intel Keem Bay SoC. This WDT has a 32 bit timer and decrements in every
+ count unit. An interrupt will be triggered, when the count crosses
+ the threshold configured in the register.
- To compile this driver as a module, choose M here: the
- module will be called keembay_wdt.
+ To compile this driver as a module, choose M here: the
+ module will be called keembay_wdt.
endif # WATCHDOG
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 2ee97064145b..f7da867e8782 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -84,6 +84,7 @@ obj-$(CONFIG_LPC18XX_WATCHDOG) += lpc18xx_wdt.o
obj-$(CONFIG_BCM7038_WDT) += bcm7038_wdt.o
obj-$(CONFIG_RENESAS_WDT) += renesas_wdt.o
obj-$(CONFIG_RENESAS_RZAWDT) += rza_wdt.o
+obj-$(CONFIG_RENESAS_RZG2LWDT) += rzg2l_wdt.o
obj-$(CONFIG_ASPEED_WATCHDOG) += aspeed_wdt.o
obj-$(CONFIG_STM32_WATCHDOG) += stm32_iwdg.o
obj-$(CONFIG_UNIPHIER_WATCHDOG) += uniphier_wdt.o
@@ -93,6 +94,7 @@ obj-$(CONFIG_PM8916_WATCHDOG) += pm8916_wdt.o
obj-$(CONFIG_ARM_SMC_WATCHDOG) += arm_smc_wdt.o
obj-$(CONFIG_VISCONTI_WATCHDOG) += visconti_wdt.o
obj-$(CONFIG_MSC313E_WATCHDOG) += msc313e_wdt.o
+obj-$(CONFIG_APPLE_WATCHDOG) += apple_wdt.o
# X86 (i386 + ia64 + x86_64) Architecture
obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
@@ -143,6 +145,7 @@ obj-$(CONFIG_NI903X_WDT) += ni903x_wdt.o
obj-$(CONFIG_NIC7018_WDT) += nic7018_wdt.o
obj-$(CONFIG_MLX_WDT) += mlx_wdt.o
obj-$(CONFIG_KEEMBAY_WATCHDOG) += keembay_wdt.o
+obj-$(CONFIG_SIEMENS_SIMATIC_IPC_WDT) += simatic-ipc-wdt.o
# M68K Architecture
obj-$(CONFIG_M54xx_WATCHDOG) += m54xx_wdt.o
@@ -153,7 +156,6 @@ obj-$(CONFIG_XILINX_WATCHDOG) += of_xilinx_wdt.o
# MIPS Architecture
obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o
obj-$(CONFIG_BCM47XX_WDT) += bcm47xx_wdt.o
-obj-$(CONFIG_BCM63XX_WDT) += bcm63xx_wdt.o
obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o
obj-$(CONFIG_INDYDOG) += indydog.o
obj-$(CONFIG_JZ4740_WDT) += jz4740_wdt.o
@@ -170,6 +172,7 @@ obj-$(CONFIG_IMGPDC_WDT) += imgpdc_wdt.o
obj-$(CONFIG_MT7621_WDT) += mt7621_wdt.o
obj-$(CONFIG_PIC32_WDT) += pic32-wdt.o
obj-$(CONFIG_PIC32_DMT) += pic32-dmt.o
+obj-$(CONFIG_REALTEK_OTTO_WDT) += realtek_otto_wdt.o
# PARISC Architecture
diff --git a/drivers/watchdog/apple_wdt.c b/drivers/watchdog/apple_wdt.c
new file mode 100644
index 000000000000..16aca21f13d6
--- /dev/null
+++ b/drivers/watchdog/apple_wdt.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SoC Watchdog driver
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+/*
+ * Apple Watchdog MMIO registers
+ *
+ * This HW block has three separate watchdogs. WD0 resets the machine
+ * to recovery mode and is not very useful for us. WD1 and WD2 trigger a normal
+ * machine reset. WD0 additionally supports a configurable interrupt.
+ * This information can be used to implement pretimeout support at a later time.
+ *
+ * APPLE_WDT_WDx_CUR_TIME is a simple counter incremented for each tick of the
+ * reference clock. It can also be overwritten to any value.
+ * Whenever APPLE_WDT_CTRL_RESET_EN is set in APPLE_WDT_WDx_CTRL and
+ * APPLE_WDT_WDx_CUR_TIME >= APPLE_WDT_WDx_BITE_TIME the entire machine is
+ * reset.
+ * Whenever APPLE_WDT_CTRL_IRQ_EN is set and APPLE_WDTx_WD1_CUR_TIME >=
+ * APPLE_WDTx_WD1_BARK_TIME an interrupt is triggered and
+ * APPLE_WDT_CTRL_IRQ_STATUS is set. The interrupt can be cleared by writing
+ * 1 to APPLE_WDT_CTRL_IRQ_STATUS.
+ */
+#define APPLE_WDT_WD0_CUR_TIME 0x00
+#define APPLE_WDT_WD0_BITE_TIME 0x04
+#define APPLE_WDT_WD0_BARK_TIME 0x08
+#define APPLE_WDT_WD0_CTRL 0x0c
+
+#define APPLE_WDT_WD1_CUR_TIME 0x10
+#define APPLE_WDT_WD1_BITE_TIME 0x14
+#define APPLE_WDT_WD1_CTRL 0x1c
+
+#define APPLE_WDT_WD2_CUR_TIME 0x20
+#define APPLE_WDT_WD2_BITE_TIME 0x24
+#define APPLE_WDT_WD2_CTRL 0x2c
+
+#define APPLE_WDT_CTRL_IRQ_EN BIT(0)
+#define APPLE_WDT_CTRL_IRQ_STATUS BIT(1)
+#define APPLE_WDT_CTRL_RESET_EN BIT(2)
+
+#define APPLE_WDT_TIMEOUT_DEFAULT 30
+
+struct apple_wdt {
+ struct watchdog_device wdd;
+ void __iomem *regs;
+ unsigned long clk_rate;
+};
+
+static struct apple_wdt *to_apple_wdt(struct watchdog_device *wdd)
+{
+ return container_of(wdd, struct apple_wdt, wdd);
+}
+
+static int apple_wdt_start(struct watchdog_device *wdd)
+{
+ struct apple_wdt *wdt = to_apple_wdt(wdd);
+
+ writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_CUR_TIME);
+ writel_relaxed(APPLE_WDT_CTRL_RESET_EN, wdt->regs + APPLE_WDT_WD1_CTRL);
+
+ return 0;
+}
+
+static int apple_wdt_stop(struct watchdog_device *wdd)
+{
+ struct apple_wdt *wdt = to_apple_wdt(wdd);
+
+ writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_CTRL);
+
+ return 0;
+}
+
+static int apple_wdt_ping(struct watchdog_device *wdd)
+{
+ struct apple_wdt *wdt = to_apple_wdt(wdd);
+
+ writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_CUR_TIME);
+
+ return 0;
+}
+
+static int apple_wdt_set_timeout(struct watchdog_device *wdd, unsigned int s)
+{
+ struct apple_wdt *wdt = to_apple_wdt(wdd);
+
+ writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_CUR_TIME);
+ writel_relaxed(wdt->clk_rate * s, wdt->regs + APPLE_WDT_WD1_BITE_TIME);
+
+ wdd->timeout = s;
+
+ return 0;
+}
+
+static unsigned int apple_wdt_get_timeleft(struct watchdog_device *wdd)
+{
+ struct apple_wdt *wdt = to_apple_wdt(wdd);
+ u32 cur_time, reset_time;
+
+ cur_time = readl_relaxed(wdt->regs + APPLE_WDT_WD1_CUR_TIME);
+ reset_time = readl_relaxed(wdt->regs + APPLE_WDT_WD1_BITE_TIME);
+
+ return (reset_time - cur_time) / wdt->clk_rate;
+}
+
+static int apple_wdt_restart(struct watchdog_device *wdd, unsigned long mode,
+ void *cmd)
+{
+ struct apple_wdt *wdt = to_apple_wdt(wdd);
+
+ writel_relaxed(APPLE_WDT_CTRL_RESET_EN, wdt->regs + APPLE_WDT_WD1_CTRL);
+ writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_BITE_TIME);
+ writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_CUR_TIME);
+
+ /*
+ * Flush writes and then wait for the SoC to reset. Even though the
+ * reset is queued almost immediately experiments have shown that it
+ * can take up to ~20-25ms until the SoC is actually reset. Just wait
+ * 50ms here to be safe.
+ */
+ (void)readl_relaxed(wdt->regs + APPLE_WDT_WD1_CUR_TIME);
+ mdelay(50);
+
+ return 0;
+}
+
+static void apple_wdt_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static struct watchdog_ops apple_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = apple_wdt_start,
+ .stop = apple_wdt_stop,
+ .ping = apple_wdt_ping,
+ .set_timeout = apple_wdt_set_timeout,
+ .get_timeleft = apple_wdt_get_timeleft,
+ .restart = apple_wdt_restart,
+};
+
+static struct watchdog_info apple_wdt_info = {
+ .identity = "Apple SoC Watchdog",
+ .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
+};
+
+static int apple_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct apple_wdt *wdt;
+ struct clk *clk;
+ u32 wdt_ctrl;
+ int ret;
+
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ wdt->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(wdt->regs))
+ return PTR_ERR(wdt->regs);
+
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, apple_wdt_clk_disable_unprepare,
+ clk);
+ if (ret)
+ return ret;
+
+ wdt->clk_rate = clk_get_rate(clk);
+ if (!wdt->clk_rate)
+ return -EINVAL;
+
+ wdt->wdd.ops = &apple_wdt_ops;
+ wdt->wdd.info = &apple_wdt_info;
+ wdt->wdd.max_timeout = U32_MAX / wdt->clk_rate;
+ wdt->wdd.timeout = APPLE_WDT_TIMEOUT_DEFAULT;
+
+ wdt_ctrl = readl_relaxed(wdt->regs + APPLE_WDT_WD1_CTRL);
+ if (wdt_ctrl & APPLE_WDT_CTRL_RESET_EN)
+ set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
+
+ watchdog_init_timeout(&wdt->wdd, 0, dev);
+ apple_wdt_set_timeout(&wdt->wdd, wdt->wdd.timeout);
+ watchdog_stop_on_unregister(&wdt->wdd);
+ watchdog_set_restart_priority(&wdt->wdd, 128);
+
+ return devm_watchdog_register_device(dev, &wdt->wdd);
+}
+
+static const struct of_device_id apple_wdt_of_match[] = {
+ { .compatible = "apple,wdt" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, apple_wdt_of_match);
+
+static struct platform_driver apple_wdt_driver = {
+ .driver = {
+ .name = "apple-watchdog",
+ .of_match_table = apple_wdt_of_match,
+ },
+ .probe = apple_wdt_probe,
+};
+module_platform_driver(apple_wdt_driver);
+
+MODULE_DESCRIPTION("Apple SoC watchdog driver");
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
deleted file mode 100644
index 56cc262571a5..000000000000
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ /dev/null
@@ -1,317 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Broadcom BCM63xx SoC watchdog driver
- *
- * Copyright (C) 2007, Miguel Gaio <miguel.gaio@efixo.com>
- * Copyright (C) 2008, Florian Fainelli <florian@openwrt.org>
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/bitops.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
-#include <linux/watchdog.h>
-#include <linux/timer.h>
-#include <linux/jiffies.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/resource.h>
-#include <linux/platform_device.h>
-
-#include <bcm63xx_cpu.h>
-#include <bcm63xx_io.h>
-#include <bcm63xx_regs.h>
-#include <bcm63xx_timer.h>
-
-#define PFX KBUILD_MODNAME
-
-#define WDT_HZ 50000000 /* Fclk */
-#define WDT_DEFAULT_TIME 30 /* seconds */
-#define WDT_MAX_TIME 256 /* seconds */
-
-static struct {
- void __iomem *regs;
- struct timer_list timer;
- unsigned long inuse;
- atomic_t ticks;
-} bcm63xx_wdt_device;
-
-static int expect_close;
-
-static int wdt_time = WDT_DEFAULT_TIME;
-static bool nowayout = WATCHDOG_NOWAYOUT;
-module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
- __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-
-/* HW functions */
-static void bcm63xx_wdt_hw_start(void)
-{
- bcm_writel(0xfffffffe, bcm63xx_wdt_device.regs + WDT_DEFVAL_REG);
- bcm_writel(WDT_START_1, bcm63xx_wdt_device.regs + WDT_CTL_REG);
- bcm_writel(WDT_START_2, bcm63xx_wdt_device.regs + WDT_CTL_REG);
-}
-
-static void bcm63xx_wdt_hw_stop(void)
-{
- bcm_writel(WDT_STOP_1, bcm63xx_wdt_device.regs + WDT_CTL_REG);
- bcm_writel(WDT_STOP_2, bcm63xx_wdt_device.regs + WDT_CTL_REG);
-}
-
-static void bcm63xx_wdt_isr(void *data)
-{
- struct pt_regs *regs = get_irq_regs();
-
- die(PFX " fire", regs);
-}
-
-static void bcm63xx_timer_tick(struct timer_list *unused)
-{
- if (!atomic_dec_and_test(&bcm63xx_wdt_device.ticks)) {
- bcm63xx_wdt_hw_start();
- mod_timer(&bcm63xx_wdt_device.timer, jiffies + HZ);
- } else
- pr_crit("watchdog will restart system\n");
-}
-
-static void bcm63xx_wdt_pet(void)
-{
- atomic_set(&bcm63xx_wdt_device.ticks, wdt_time);
-}
-
-static void bcm63xx_wdt_start(void)
-{
- bcm63xx_wdt_pet();
- bcm63xx_timer_tick(0);
-}
-
-static void bcm63xx_wdt_pause(void)
-{
- del_timer_sync(&bcm63xx_wdt_device.timer);
- bcm63xx_wdt_hw_stop();
-}
-
-static int bcm63xx_wdt_settimeout(int new_time)
-{
- if ((new_time <= 0) || (new_time > WDT_MAX_TIME))
- return -EINVAL;
-
- wdt_time = new_time;
-
- return 0;
-}
-
-static int bcm63xx_wdt_open(struct inode *inode, struct file *file)
-{
- if (test_and_set_bit(0, &bcm63xx_wdt_device.inuse))
- return -EBUSY;
-
- bcm63xx_wdt_start();
- return stream_open(inode, file);
-}
-
-static int bcm63xx_wdt_release(struct inode *inode, struct file *file)
-{
- if (expect_close == 42)
- bcm63xx_wdt_pause();
- else {
- pr_crit("Unexpected close, not stopping watchdog!\n");
- bcm63xx_wdt_start();
- }
- clear_bit(0, &bcm63xx_wdt_device.inuse);
- expect_close = 0;
- return 0;
-}
-
-static ssize_t bcm63xx_wdt_write(struct file *file, const char *data,
- size_t len, loff_t *ppos)
-{
- if (len) {
- if (!nowayout) {
- size_t i;
-
- /* In case it was set long ago */
- expect_close = 0;
-
- for (i = 0; i != len; i++) {
- char c;
- if (get_user(c, data + i))
- return -EFAULT;
- if (c == 'V')
- expect_close = 42;
- }
- }
- bcm63xx_wdt_pet();
- }
- return len;
-}
-
-static struct watchdog_info bcm63xx_wdt_info = {
- .identity = PFX,
- .options = WDIOF_SETTIMEOUT |
- WDIOF_KEEPALIVEPING |
- WDIOF_MAGICCLOSE,
-};
-
-
-static long bcm63xx_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- int new_value, retval = -EINVAL;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user(argp, &bcm63xx_wdt_info,
- sizeof(bcm63xx_wdt_info)) ? -EFAULT : 0;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
-
- case WDIOC_SETOPTIONS:
- if (get_user(new_value, p))
- return -EFAULT;
-
- if (new_value & WDIOS_DISABLECARD) {
- bcm63xx_wdt_pause();
- retval = 0;
- }
- if (new_value & WDIOS_ENABLECARD) {
- bcm63xx_wdt_start();
- retval = 0;
- }
-
- return retval;
-
- case WDIOC_KEEPALIVE:
- bcm63xx_wdt_pet();
- return 0;
-
- case WDIOC_SETTIMEOUT:
- if (get_user(new_value, p))
- return -EFAULT;
-
- if (bcm63xx_wdt_settimeout(new_value))
- return -EINVAL;
-
- bcm63xx_wdt_pet();
-
- fallthrough;
-
- case WDIOC_GETTIMEOUT:
- return put_user(wdt_time, p);
-
- default:
- return -ENOTTY;
-
- }
-}
-
-static const struct file_operations bcm63xx_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = bcm63xx_wdt_write,
- .unlocked_ioctl = bcm63xx_wdt_ioctl,
- .compat_ioctl = compat_ptr_ioctl,
- .open = bcm63xx_wdt_open,
- .release = bcm63xx_wdt_release,
-};
-
-static struct miscdevice bcm63xx_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &bcm63xx_wdt_fops,
-};
-
-
-static int bcm63xx_wdt_probe(struct platform_device *pdev)
-{
- int ret;
- struct resource *r;
-
- timer_setup(&bcm63xx_wdt_device.timer, bcm63xx_timer_tick, 0);
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "failed to get resources\n");
- return -ENODEV;
- }
-
- bcm63xx_wdt_device.regs = devm_ioremap(&pdev->dev, r->start,
- resource_size(r));
- if (!bcm63xx_wdt_device.regs) {
- dev_err(&pdev->dev, "failed to remap I/O resources\n");
- return -ENXIO;
- }
-
- ret = bcm63xx_timer_register(TIMER_WDT_ID, bcm63xx_wdt_isr, NULL);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to register wdt timer isr\n");
- return ret;
- }
-
- if (bcm63xx_wdt_settimeout(wdt_time)) {
- bcm63xx_wdt_settimeout(WDT_DEFAULT_TIME);
- dev_info(&pdev->dev,
- ": wdt_time value must be 1 <= wdt_time <= 256, using %d\n",
- wdt_time);
- }
-
- ret = misc_register(&bcm63xx_wdt_miscdev);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to register watchdog device\n");
- goto unregister_timer;
- }
-
- dev_info(&pdev->dev, " started, timer margin: %d sec\n",
- WDT_DEFAULT_TIME);
-
- return 0;
-
-unregister_timer:
- bcm63xx_timer_unregister(TIMER_WDT_ID);
- return ret;
-}
-
-static int bcm63xx_wdt_remove(struct platform_device *pdev)
-{
- if (!nowayout)
- bcm63xx_wdt_pause();
-
- misc_deregister(&bcm63xx_wdt_miscdev);
- bcm63xx_timer_unregister(TIMER_WDT_ID);
- return 0;
-}
-
-static void bcm63xx_wdt_shutdown(struct platform_device *pdev)
-{
- bcm63xx_wdt_pause();
-}
-
-static struct platform_driver bcm63xx_wdt_driver = {
- .probe = bcm63xx_wdt_probe,
- .remove = bcm63xx_wdt_remove,
- .shutdown = bcm63xx_wdt_shutdown,
- .driver = {
- .name = "bcm63xx-wdt",
- }
-};
-
-module_platform_driver(bcm63xx_wdt_driver);
-
-MODULE_AUTHOR("Miguel Gaio <miguel.gaio@efixo.com>");
-MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
-MODULE_DESCRIPTION("Driver for the Broadcom BCM63xx SoC watchdog");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:bcm63xx-wdt");
diff --git a/drivers/watchdog/bcm7038_wdt.c b/drivers/watchdog/bcm7038_wdt.c
index acaaa0005d5b..8656a137e9a4 100644
--- a/drivers/watchdog/bcm7038_wdt.c
+++ b/drivers/watchdog/bcm7038_wdt.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/bcm7038_wdt.h>
#include <linux/pm.h>
#include <linux/watchdog.h>
@@ -133,8 +134,10 @@ static void bcm7038_clk_disable_unprepare(void *data)
static int bcm7038_wdt_probe(struct platform_device *pdev)
{
+ struct bcm7038_wdt_platform_data *pdata = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
struct bcm7038_watchdog *wdt;
+ const char *clk_name = NULL;
int err;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
@@ -147,7 +150,10 @@ static int bcm7038_wdt_probe(struct platform_device *pdev)
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
- wdt->clk = devm_clk_get(dev, NULL);
+ if (pdata && pdata->clk_name)
+ clk_name = pdata->clk_name;
+
+ wdt->clk = devm_clk_get(dev, clk_name);
/* If unable to get clock, use default frequency */
if (!IS_ERR(wdt->clk)) {
err = clk_prepare_enable(wdt->clk);
@@ -217,8 +223,15 @@ static const struct of_device_id bcm7038_wdt_match[] = {
};
MODULE_DEVICE_TABLE(of, bcm7038_wdt_match);
+static const struct platform_device_id bcm7038_wdt_devtype[] = {
+ { .name = "bcm63xx-wdt" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, bcm7038_wdt_devtype);
+
static struct platform_driver bcm7038_wdt_driver = {
.probe = bcm7038_wdt_probe,
+ .id_table = bcm7038_wdt_devtype,
.driver = {
.name = "bcm7038-wdt",
.of_match_table = bcm7038_wdt_match,
diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c
index d79ce64e26a9..9adad1862bbd 100644
--- a/drivers/watchdog/da9063_wdt.c
+++ b/drivers/watchdog/da9063_wdt.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/mfd/da9063/registers.h>
#include <linux/mfd/da9063/core.h>
@@ -169,14 +170,19 @@ static int da9063_wdt_restart(struct watchdog_device *wdd, unsigned long action,
void *data)
{
struct da9063 *da9063 = watchdog_get_drvdata(wdd);
+ struct i2c_client *client = to_i2c_client(da9063->dev);
int ret;
- ret = regmap_write(da9063->regmap, DA9063_REG_CONTROL_F,
- DA9063_SHUTDOWN);
- if (ret)
+ /* Don't use regmap because it is not atomic safe */
+ ret = i2c_smbus_write_byte_data(client, DA9063_REG_CONTROL_F,
+ DA9063_SHUTDOWN);
+ if (ret < 0)
dev_alert(da9063->dev, "Failed to shutdown (err = %d)\n",
ret);
+ /* wait for reset to assert... */
+ mdelay(500);
+
return ret;
}
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index e6eaba6bae5b..584a56893b81 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -134,7 +134,7 @@ static unsigned int davinci_wdt_get_timeleft(struct watchdog_device *wdd)
timer_counter = ioread32(davinci_wdt->base + TIM12);
timer_counter |= ((u64)ioread32(davinci_wdt->base + TIM34) << 32);
- do_div(timer_counter, freq);
+ timer_counter = div64_ul(timer_counter, freq);
return wdd->timeout - timer_counter;
}
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index ee90c5f943f9..7f59c680de25 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -49,6 +49,7 @@
#define SIO_F81803_ID 0x1210 /* Chipset ID */
#define SIO_F81865_ID 0x0704 /* Chipset ID */
#define SIO_F81866_ID 0x1010 /* Chipset ID */
+#define SIO_F81966_ID 0x1502 /* F81804 chipset ID, same for f81966 */
#define F71808FG_REG_WDO_CONF 0xf0
#define F71808FG_REG_WDT_CONF 0xf5
@@ -105,7 +106,7 @@ MODULE_PARM_DESC(start_withtimeout, "Start watchdog timer on module load with"
" given initial timeout. Zero (default) disables this feature.");
enum chips { f71808fg, f71858fg, f71862fg, f71868, f71869, f71882fg, f71889fg,
- f81803, f81865, f81866};
+ f81803, f81865, f81866, f81966};
static const char * const fintek_wdt_names[] = {
"f71808fg",
@@ -118,6 +119,7 @@ static const char * const fintek_wdt_names[] = {
"f81803",
"f81865",
"f81866",
+ "f81966"
};
/* Super-I/O Function prototypes */
@@ -347,6 +349,7 @@ static int fintek_wdt_start(struct watchdog_device *wdd)
break;
case f81866:
+ case f81966:
/*
* GPIO1 Control Register when 27h BIT3:2 = 01 & BIT0 = 0.
* The PIN 70(GPIO15/WDTRST) is controlled by 2Ch:
@@ -373,7 +376,7 @@ static int fintek_wdt_start(struct watchdog_device *wdd)
superio_select(wd->sioaddr, SIO_F71808FG_LD_WDT);
superio_set_bit(wd->sioaddr, SIO_REG_ENABLE, 0);
- if (wd->type == f81865 || wd->type == f81866)
+ if (wd->type == f81865 || wd->type == f81866 || wd->type == f81966)
superio_set_bit(wd->sioaddr, F81865_REG_WDO_CONF,
F81865_FLAG_WDOUT_EN);
else
@@ -580,6 +583,9 @@ static int __init fintek_wdt_find(int sioaddr)
case SIO_F81866_ID:
type = f81866;
break;
+ case SIO_F81966_ID:
+ type = f81966;
+ break;
default:
pr_info("Unrecognized Fintek device: %04x\n",
(unsigned int)devid);
diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c
index 945f5e65db57..d3c9e2f6e63b 100644
--- a/drivers/watchdog/meson_gxbb_wdt.c
+++ b/drivers/watchdog/meson_gxbb_wdt.c
@@ -198,7 +198,6 @@ static int meson_gxbb_wdt_probe(struct platform_device *pdev)
meson_gxbb_wdt_set_timeout(&data->wdt_dev, data->wdt_dev.timeout);
- watchdog_stop_on_reboot(&data->wdt_dev);
return devm_watchdog_register_device(dev, &data->wdt_dev);
}
diff --git a/drivers/watchdog/msc313e_wdt.c b/drivers/watchdog/msc313e_wdt.c
index 0d497aa0fb7d..90171431fc59 100644
--- a/drivers/watchdog/msc313e_wdt.c
+++ b/drivers/watchdog/msc313e_wdt.c
@@ -120,6 +120,10 @@ static int msc313e_wdt_probe(struct platform_device *pdev)
priv->wdev.max_timeout = U32_MAX / clk_get_rate(priv->clk);
priv->wdev.timeout = MSC313E_WDT_DEFAULT_TIMEOUT;
+ /* If the period is non-zero the WDT is running */
+ if (readw(priv->base + REG_WDT_MAX_PRD_L) | (readw(priv->base + REG_WDT_MAX_PRD_H) << 16))
+ set_bit(WDOG_HW_RUNNING, &priv->wdev.status);
+
watchdog_set_drvdata(&priv->wdev, priv);
watchdog_init_timeout(&priv->wdev, timeout, dev);
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index 543cf38bd04e..4577a76dd464 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -339,7 +339,7 @@ static int mtk_wdt_probe(struct platform_device *pdev)
if (IS_ERR(mtk_wdt->wdt_base))
return PTR_ERR(mtk_wdt->wdt_base);
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq > 0) {
err = devm_request_irq(&pdev->dev, irq, mtk_wdt_isr, 0, "wdt_bark",
&mtk_wdt->wdt_dev);
diff --git a/drivers/watchdog/realtek_otto_wdt.c b/drivers/watchdog/realtek_otto_wdt.c
new file mode 100644
index 000000000000..60058a0c3ec4
--- /dev/null
+++ b/drivers/watchdog/realtek_otto_wdt.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Realtek Otto MIPS platform watchdog
+ *
+ * Watchdog timer that will reset the system after timeout, using the selected
+ * reset mode.
+ *
+ * Counter scaling and timeouts:
+ * - Base prescale of (2 << 25), providing tick duration T_0: 168ms @ 200MHz
+ * - PRESCALE: logarithmic prescaler adding a factor of {1, 2, 4, 8}
+ * - Phase 1: Times out after (PHASE1 + 1) × PRESCALE × T_0
+ * Generates an interrupt, WDT cannot be stopped after phase 1
+ * - Phase 2: starts after phase 1, times out after (PHASE2 + 1) × PRESCALE × T_0
+ * Resets the system according to RST_MODE
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/reboot.h>
+#include <linux/watchdog.h>
+
+#define OTTO_WDT_REG_CNTR 0x0
+#define OTTO_WDT_CNTR_PING BIT(31)
+
+#define OTTO_WDT_REG_INTR 0x4
+#define OTTO_WDT_INTR_PHASE_1 BIT(31)
+#define OTTO_WDT_INTR_PHASE_2 BIT(30)
+
+#define OTTO_WDT_REG_CTRL 0x8
+#define OTTO_WDT_CTRL_ENABLE BIT(31)
+#define OTTO_WDT_CTRL_PRESCALE GENMASK(30, 29)
+#define OTTO_WDT_CTRL_PHASE1 GENMASK(26, 22)
+#define OTTO_WDT_CTRL_PHASE2 GENMASK(19, 15)
+#define OTTO_WDT_CTRL_RST_MODE GENMASK(1, 0)
+#define OTTO_WDT_MODE_SOC 0
+#define OTTO_WDT_MODE_CPU 1
+#define OTTO_WDT_MODE_SOFTWARE 2
+#define OTTO_WDT_CTRL_DEFAULT OTTO_WDT_MODE_CPU
+
+#define OTTO_WDT_PRESCALE_MAX 3
+
+/*
+ * One higher than the max values contained in PHASE{1,2}, since a value of 0
+ * corresponds to one tick.
+ */
+#define OTTO_WDT_PHASE_TICKS_MAX 32
+
+/*
+ * The maximum reset delay is actually 2×32 ticks, but that would require large
+ * pretimeout values for timeouts longer than 32 ticks. Limit the maximum timeout
+ * to 32 + 1 to ensure small pretimeout values can be configured as expected.
+ */
+#define OTTO_WDT_TIMEOUT_TICKS_MAX (OTTO_WDT_PHASE_TICKS_MAX + 1)
+
+struct otto_wdt_ctrl {
+ struct watchdog_device wdev;
+ struct device *dev;
+ void __iomem *base;
+ unsigned int clk_rate_khz;
+ int irq_phase1;
+};
+
+static int otto_wdt_start(struct watchdog_device *wdev)
+{
+ struct otto_wdt_ctrl *ctrl = watchdog_get_drvdata(wdev);
+ u32 v;
+
+ v = ioread32(ctrl->base + OTTO_WDT_REG_CTRL);
+ v |= OTTO_WDT_CTRL_ENABLE;
+ iowrite32(v, ctrl->base + OTTO_WDT_REG_CTRL);
+
+ return 0;
+}
+
+static int otto_wdt_stop(struct watchdog_device *wdev)
+{
+ struct otto_wdt_ctrl *ctrl = watchdog_get_drvdata(wdev);
+ u32 v;
+
+ v = ioread32(ctrl->base + OTTO_WDT_REG_CTRL);
+ v &= ~OTTO_WDT_CTRL_ENABLE;
+ iowrite32(v, ctrl->base + OTTO_WDT_REG_CTRL);
+
+ return 0;
+}
+
+static int otto_wdt_ping(struct watchdog_device *wdev)
+{
+ struct otto_wdt_ctrl *ctrl = watchdog_get_drvdata(wdev);
+
+ iowrite32(OTTO_WDT_CNTR_PING, ctrl->base + OTTO_WDT_REG_CNTR);
+
+ return 0;
+}
+
+static int otto_wdt_tick_ms(struct otto_wdt_ctrl *ctrl, int prescale)
+{
+ return DIV_ROUND_CLOSEST(1 << (25 + prescale), ctrl->clk_rate_khz);
+}
+
+/*
+ * The timer asserts the PHASE1/PHASE2 IRQs when the number of ticks exceeds
+ * the value stored in those fields. This means each phase will run for at least
+ * one tick, so small values need to be clamped to correctly reflect the timeout.
+ */
+static inline unsigned int div_round_ticks(unsigned int val, unsigned int tick_duration,
+ unsigned int min_ticks)
+{
+ return max(min_ticks, DIV_ROUND_UP(val, tick_duration));
+}
+
+static int otto_wdt_determine_timeouts(struct watchdog_device *wdev, unsigned int timeout,
+ unsigned int pretimeout)
+{
+ struct otto_wdt_ctrl *ctrl = watchdog_get_drvdata(wdev);
+ unsigned int pretimeout_ms = pretimeout * 1000;
+ unsigned int timeout_ms = timeout * 1000;
+ unsigned int prescale_next = 0;
+ unsigned int phase1_ticks;
+ unsigned int phase2_ticks;
+ unsigned int total_ticks;
+ unsigned int prescale;
+ unsigned int tick_ms;
+ u32 v;
+
+ do {
+ prescale = prescale_next;
+ if (prescale > OTTO_WDT_PRESCALE_MAX)
+ return -EINVAL;
+
+ tick_ms = otto_wdt_tick_ms(ctrl, prescale);
+ total_ticks = div_round_ticks(timeout_ms, tick_ms, 2);
+ phase1_ticks = div_round_ticks(timeout_ms - pretimeout_ms, tick_ms, 1);
+ phase2_ticks = total_ticks - phase1_ticks;
+
+ prescale_next++;
+ } while (phase1_ticks > OTTO_WDT_PHASE_TICKS_MAX
+ || phase2_ticks > OTTO_WDT_PHASE_TICKS_MAX);
+
+ v = ioread32(ctrl->base + OTTO_WDT_REG_CTRL);
+
+ v &= ~(OTTO_WDT_CTRL_PRESCALE | OTTO_WDT_CTRL_PHASE1 | OTTO_WDT_CTRL_PHASE2);
+ v |= FIELD_PREP(OTTO_WDT_CTRL_PHASE1, phase1_ticks - 1);
+ v |= FIELD_PREP(OTTO_WDT_CTRL_PHASE2, phase2_ticks - 1);
+ v |= FIELD_PREP(OTTO_WDT_CTRL_PRESCALE, prescale);
+
+ iowrite32(v, ctrl->base + OTTO_WDT_REG_CTRL);
+
+ timeout_ms = total_ticks * tick_ms;
+ ctrl->wdev.timeout = timeout_ms / 1000;
+
+ pretimeout_ms = phase2_ticks * tick_ms;
+ ctrl->wdev.pretimeout = pretimeout_ms / 1000;
+
+ return 0;
+}
+
+static int otto_wdt_set_timeout(struct watchdog_device *wdev, unsigned int val)
+{
+ return otto_wdt_determine_timeouts(wdev, val, min(wdev->pretimeout, val - 1));
+}
+
+static int otto_wdt_set_pretimeout(struct watchdog_device *wdev, unsigned int val)
+{
+ return otto_wdt_determine_timeouts(wdev, wdev->timeout, val);
+}
+
+static int otto_wdt_restart(struct watchdog_device *wdev, unsigned long reboot_mode,
+ void *data)
+{
+ struct otto_wdt_ctrl *ctrl = watchdog_get_drvdata(wdev);
+ u32 reset_mode;
+ u32 v;
+
+ disable_irq(ctrl->irq_phase1);
+
+ switch (reboot_mode) {
+ case REBOOT_SOFT:
+ reset_mode = OTTO_WDT_MODE_SOFTWARE;
+ break;
+ case REBOOT_WARM:
+ reset_mode = OTTO_WDT_MODE_CPU;
+ break;
+ default:
+ reset_mode = OTTO_WDT_MODE_SOC;
+ break;
+ }
+
+ /* Configure for shortest timeout and wait for reset to occur */
+ v = FIELD_PREP(OTTO_WDT_CTRL_RST_MODE, reset_mode) | OTTO_WDT_CTRL_ENABLE;
+ iowrite32(v, ctrl->base + OTTO_WDT_REG_CTRL);
+
+ mdelay(3 * otto_wdt_tick_ms(ctrl, 0));
+
+ return 0;
+}
+
+static irqreturn_t otto_wdt_phase1_isr(int irq, void *dev_id)
+{
+ struct otto_wdt_ctrl *ctrl = dev_id;
+
+ iowrite32(OTTO_WDT_INTR_PHASE_1, ctrl->base + OTTO_WDT_REG_INTR);
+ dev_crit(ctrl->dev, "phase 1 timeout\n");
+ watchdog_notify_pretimeout(&ctrl->wdev);
+
+ return IRQ_HANDLED;
+}
+
+static const struct watchdog_ops otto_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = otto_wdt_start,
+ .stop = otto_wdt_stop,
+ .ping = otto_wdt_ping,
+ .set_timeout = otto_wdt_set_timeout,
+ .set_pretimeout = otto_wdt_set_pretimeout,
+ .restart = otto_wdt_restart,
+};
+
+static const struct watchdog_info otto_wdt_info = {
+ .identity = "Realtek Otto watchdog timer",
+ .options = WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE |
+ WDIOF_SETTIMEOUT |
+ WDIOF_PRETIMEOUT,
+};
+
+static void otto_wdt_clock_action(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static int otto_wdt_probe_clk(struct otto_wdt_ctrl *ctrl)
+{
+ struct clk *clk = devm_clk_get(ctrl->dev, NULL);
+ int ret;
+
+ if (IS_ERR(clk))
+ return dev_err_probe(ctrl->dev, PTR_ERR(clk), "Failed to get clock\n");
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return dev_err_probe(ctrl->dev, ret, "Failed to enable clock\n");
+
+ ret = devm_add_action_or_reset(ctrl->dev, otto_wdt_clock_action, clk);
+ if (ret)
+ return ret;
+
+ ctrl->clk_rate_khz = clk_get_rate(clk) / 1000;
+ if (ctrl->clk_rate_khz == 0)
+ return dev_err_probe(ctrl->dev, -ENXIO, "Failed to get clock rate\n");
+
+ return 0;
+}
+
+static int otto_wdt_probe_reset_mode(struct otto_wdt_ctrl *ctrl)
+{
+ static const char *mode_property = "realtek,reset-mode";
+ const struct fwnode_handle *node = ctrl->dev->fwnode;
+ int mode_count;
+ u32 mode;
+ u32 v;
+
+ if (!node)
+ return -ENXIO;
+
+ mode_count = fwnode_property_string_array_count(node, mode_property);
+ if (mode_count < 0)
+ return mode_count;
+ else if (mode_count == 0)
+ return 0;
+ else if (mode_count != 1)
+ return -EINVAL;
+
+ if (fwnode_property_match_string(node, mode_property, "soc") == 0)
+ mode = OTTO_WDT_MODE_SOC;
+ else if (fwnode_property_match_string(node, mode_property, "cpu") == 0)
+ mode = OTTO_WDT_MODE_CPU;
+ else if (fwnode_property_match_string(node, mode_property, "software") == 0)
+ mode = OTTO_WDT_MODE_SOFTWARE;
+ else
+ return -EINVAL;
+
+ v = ioread32(ctrl->base + OTTO_WDT_REG_CTRL);
+ v &= ~OTTO_WDT_CTRL_RST_MODE;
+ v |= FIELD_PREP(OTTO_WDT_CTRL_RST_MODE, mode);
+ iowrite32(v, ctrl->base + OTTO_WDT_REG_CTRL);
+
+ return 0;
+}
+
+static int otto_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct otto_wdt_ctrl *ctrl;
+ unsigned int max_tick_ms;
+ int ret;
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->dev = dev;
+ ctrl->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctrl->base))
+ return PTR_ERR(ctrl->base);
+
+ /* Clear any old interrupts and reset initial state */
+ iowrite32(OTTO_WDT_INTR_PHASE_1 | OTTO_WDT_INTR_PHASE_2,
+ ctrl->base + OTTO_WDT_REG_INTR);
+ iowrite32(OTTO_WDT_CTRL_DEFAULT, ctrl->base + OTTO_WDT_REG_CTRL);
+
+ ret = otto_wdt_probe_clk(ctrl);
+ if (ret)
+ return ret;
+
+ ctrl->irq_phase1 = platform_get_irq_byname(pdev, "phase1");
+ if (ctrl->irq_phase1 < 0)
+ return ctrl->irq_phase1;
+
+ ret = devm_request_irq(dev, ctrl->irq_phase1, otto_wdt_phase1_isr, 0,
+ "realtek-otto-wdt", ctrl);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get IRQ for phase1\n");
+
+ ret = otto_wdt_probe_reset_mode(ctrl);
+ if (ret)
+ return dev_err_probe(dev, ret, "Invalid reset mode specified\n");
+
+ ctrl->wdev.parent = dev;
+ ctrl->wdev.info = &otto_wdt_info;
+ ctrl->wdev.ops = &otto_wdt_ops;
+
+ /*
+ * Since pretimeout cannot be disabled, min. timeout is twice the
+ * subsystem resolution. Max. timeout is ca. 43s at a bus clock of 200MHz.
+ */
+ ctrl->wdev.min_timeout = 2;
+ max_tick_ms = otto_wdt_tick_ms(ctrl, OTTO_WDT_PRESCALE_MAX);
+ ctrl->wdev.max_hw_heartbeat_ms = max_tick_ms * OTTO_WDT_TIMEOUT_TICKS_MAX;
+ ctrl->wdev.timeout = min(30U, ctrl->wdev.max_hw_heartbeat_ms / 1000);
+
+ watchdog_set_drvdata(&ctrl->wdev, ctrl);
+ watchdog_init_timeout(&ctrl->wdev, 0, dev);
+ watchdog_stop_on_reboot(&ctrl->wdev);
+ watchdog_set_restart_priority(&ctrl->wdev, 128);
+
+ ret = otto_wdt_determine_timeouts(&ctrl->wdev, ctrl->wdev.timeout, 1);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set timeout\n");
+
+ return devm_watchdog_register_device(dev, &ctrl->wdev);
+}
+
+static const struct of_device_id otto_wdt_ids[] = {
+ { .compatible = "realtek,rtl8380-wdt" },
+ { .compatible = "realtek,rtl8390-wdt" },
+ { .compatible = "realtek,rtl9300-wdt" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, otto_wdt_ids);
+
+static struct platform_driver otto_wdt_driver = {
+ .probe = otto_wdt_probe,
+ .driver = {
+ .name = "realtek-otto-watchdog",
+ .of_match_table = otto_wdt_ids,
+ },
+};
+module_platform_driver(otto_wdt_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sander Vanheule <sander@svanheule.net>");
+MODULE_DESCRIPTION("Realtek Otto watchdog timer driver");
diff --git a/drivers/watchdog/rzg2l_wdt.c b/drivers/watchdog/rzg2l_wdt.c
new file mode 100644
index 000000000000..6b426df34fd6
--- /dev/null
+++ b/drivers/watchdog/rzg2l_wdt.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G2L WDT Watchdog Driver
+ *
+ * Copyright (C) 2021 Renesas Electronics Corporation
+ */
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/units.h>
+#include <linux/watchdog.h>
+
+#define WDTCNT 0x00
+#define WDTSET 0x04
+#define WDTTIM 0x08
+#define WDTINT 0x0C
+#define WDTCNT_WDTEN BIT(0)
+#define WDTINT_INTDISP BIT(0)
+
+#define WDT_DEFAULT_TIMEOUT 60U
+
+/* Setting period time register only 12 bit set in WDTSET[31:20] */
+#define WDTSET_COUNTER_MASK (0xFFF00000)
+#define WDTSET_COUNTER_VAL(f) ((f) << 20)
+
+#define F2CYCLE_NSEC(f) (1000000000 / (f))
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+struct rzg2l_wdt_priv {
+ void __iomem *base;
+ struct watchdog_device wdev;
+ struct reset_control *rstc;
+ unsigned long osc_clk_rate;
+ unsigned long delay;
+};
+
+static void rzg2l_wdt_wait_delay(struct rzg2l_wdt_priv *priv)
+{
+ /* delay timer when change the setting register */
+ ndelay(priv->delay);
+}
+
+static u32 rzg2l_wdt_get_cycle_usec(unsigned long cycle, u32 wdttime)
+{
+ u64 timer_cycle_us = 1024 * 1024 * (wdttime + 1) * MICRO;
+
+ return div64_ul(timer_cycle_us, cycle);
+}
+
+static void rzg2l_wdt_write(struct rzg2l_wdt_priv *priv, u32 val, unsigned int reg)
+{
+ if (reg == WDTSET)
+ val &= WDTSET_COUNTER_MASK;
+
+ writel_relaxed(val, priv->base + reg);
+ /* Registers other than the WDTINT is always synchronized with WDT_CLK */
+ if (reg != WDTINT)
+ rzg2l_wdt_wait_delay(priv);
+}
+
+static void rzg2l_wdt_init_timeout(struct watchdog_device *wdev)
+{
+ struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ u32 time_out;
+
+ /* Clear Lapsed Time Register and clear Interrupt */
+ rzg2l_wdt_write(priv, WDTINT_INTDISP, WDTINT);
+ /* 2 consecutive overflow cycle needed to trigger reset */
+ time_out = (wdev->timeout * (MICRO / 2)) /
+ rzg2l_wdt_get_cycle_usec(priv->osc_clk_rate, 0);
+ rzg2l_wdt_write(priv, WDTSET_COUNTER_VAL(time_out), WDTSET);
+}
+
+static int rzg2l_wdt_start(struct watchdog_device *wdev)
+{
+ struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ reset_control_deassert(priv->rstc);
+ pm_runtime_get_sync(wdev->parent);
+
+ /* Initialize time out */
+ rzg2l_wdt_init_timeout(wdev);
+
+ /* Initialize watchdog counter register */
+ rzg2l_wdt_write(priv, 0, WDTTIM);
+
+ /* Enable watchdog timer*/
+ rzg2l_wdt_write(priv, WDTCNT_WDTEN, WDTCNT);
+
+ return 0;
+}
+
+static int rzg2l_wdt_stop(struct watchdog_device *wdev)
+{
+ struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ pm_runtime_put(wdev->parent);
+ reset_control_assert(priv->rstc);
+
+ return 0;
+}
+
+static int rzg2l_wdt_restart(struct watchdog_device *wdev,
+ unsigned long action, void *data)
+{
+ struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ /* Reset the module before we modify any register */
+ reset_control_reset(priv->rstc);
+ pm_runtime_get_sync(wdev->parent);
+
+ /* smallest counter value to reboot soon */
+ rzg2l_wdt_write(priv, WDTSET_COUNTER_VAL(1), WDTSET);
+
+ /* Enable watchdog timer*/
+ rzg2l_wdt_write(priv, WDTCNT_WDTEN, WDTCNT);
+
+ return 0;
+}
+
+static const struct watchdog_info rzg2l_wdt_ident = {
+ .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
+ .identity = "Renesas RZ/G2L WDT Watchdog",
+};
+
+static int rzg2l_wdt_ping(struct watchdog_device *wdev)
+{
+ struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ rzg2l_wdt_write(priv, WDTINT_INTDISP, WDTINT);
+
+ return 0;
+}
+
+static const struct watchdog_ops rzg2l_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = rzg2l_wdt_start,
+ .stop = rzg2l_wdt_stop,
+ .ping = rzg2l_wdt_ping,
+ .restart = rzg2l_wdt_restart,
+};
+
+static void rzg2l_wdt_reset_assert_pm_disable_put(void *data)
+{
+ struct watchdog_device *wdev = data;
+ struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ pm_runtime_put(wdev->parent);
+ pm_runtime_disable(wdev->parent);
+ reset_control_assert(priv->rstc);
+}
+
+static int rzg2l_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rzg2l_wdt_priv *priv;
+ unsigned long pclk_rate;
+ struct clk *wdt_clk;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ /* Get watchdog main clock */
+ wdt_clk = clk_get(&pdev->dev, "oscclk");
+ if (IS_ERR(wdt_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(wdt_clk), "no oscclk");
+
+ priv->osc_clk_rate = clk_get_rate(wdt_clk);
+ clk_put(wdt_clk);
+ if (!priv->osc_clk_rate)
+ return dev_err_probe(&pdev->dev, -EINVAL, "oscclk rate is 0");
+
+ /* Get Peripheral clock */
+ wdt_clk = clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(wdt_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(wdt_clk), "no pclk");
+
+ pclk_rate = clk_get_rate(wdt_clk);
+ clk_put(wdt_clk);
+ if (!pclk_rate)
+ return dev_err_probe(&pdev->dev, -EINVAL, "pclk rate is 0");
+
+ priv->delay = F2CYCLE_NSEC(priv->osc_clk_rate) * 6 + F2CYCLE_NSEC(pclk_rate) * 9;
+
+ priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(priv->rstc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->rstc),
+ "failed to get cpg reset");
+
+ reset_control_deassert(priv->rstc);
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_resume_and_get failed ret=%pe", ERR_PTR(ret));
+ goto out_pm_get;
+ }
+
+ priv->wdev.info = &rzg2l_wdt_ident;
+ priv->wdev.ops = &rzg2l_wdt_ops;
+ priv->wdev.parent = dev;
+ priv->wdev.min_timeout = 1;
+ priv->wdev.max_timeout = rzg2l_wdt_get_cycle_usec(priv->osc_clk_rate, 0xfff) /
+ USEC_PER_SEC;
+ priv->wdev.timeout = WDT_DEFAULT_TIMEOUT;
+
+ watchdog_set_drvdata(&priv->wdev, priv);
+ ret = devm_add_action_or_reset(&pdev->dev,
+ rzg2l_wdt_reset_assert_pm_disable_put,
+ &priv->wdev);
+ if (ret < 0)
+ return ret;
+
+ watchdog_set_nowayout(&priv->wdev, nowayout);
+ watchdog_stop_on_unregister(&priv->wdev);
+
+ ret = watchdog_init_timeout(&priv->wdev, 0, dev);
+ if (ret)
+ dev_warn(dev, "Specified timeout invalid, using default");
+
+ return devm_watchdog_register_device(&pdev->dev, &priv->wdev);
+
+out_pm_get:
+ pm_runtime_disable(dev);
+ reset_control_assert(priv->rstc);
+
+ return ret;
+}
+
+static const struct of_device_id rzg2l_wdt_ids[] = {
+ { .compatible = "renesas,rzg2l-wdt", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rzg2l_wdt_ids);
+
+static struct platform_driver rzg2l_wdt_driver = {
+ .driver = {
+ .name = "rzg2l_wdt",
+ .of_match_table = rzg2l_wdt_ids,
+ },
+ .probe = rzg2l_wdt_probe,
+};
+module_platform_driver(rzg2l_wdt_driver);
+
+MODULE_DESCRIPTION("Renesas RZ/G2L WDT Watchdog Driver");
+MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 2395f353e52d..6db22f2e3a4f 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -56,13 +56,58 @@
#define EXYNOS5_RST_STAT_REG_OFFSET 0x0404
#define EXYNOS5_WDT_DISABLE_REG_OFFSET 0x0408
#define EXYNOS5_WDT_MASK_RESET_REG_OFFSET 0x040c
-#define QUIRK_HAS_PMU_CONFIG (1 << 0)
-#define QUIRK_HAS_RST_STAT (1 << 1)
-#define QUIRK_HAS_WTCLRINT_REG (1 << 2)
+#define EXYNOS850_CLUSTER0_NONCPU_OUT 0x1220
+#define EXYNOS850_CLUSTER0_NONCPU_INT_EN 0x1244
+#define EXYNOS850_CLUSTER1_NONCPU_OUT 0x1620
+#define EXYNOS850_CLUSTER1_NONCPU_INT_EN 0x1644
+
+#define EXYNOS850_CLUSTER0_WDTRESET_BIT 24
+#define EXYNOS850_CLUSTER1_WDTRESET_BIT 23
+
+/**
+ * DOC: Quirk flags for different Samsung watchdog IP-cores
+ *
+ * This driver supports multiple Samsung SoCs, each of which might have
+ * different set of registers and features supported. As watchdog block
+ * sometimes requires modifying PMU registers for proper functioning, register
+ * differences in both watchdog and PMU IP-cores should be accounted for. Quirk
+ * flags described below serve the purpose of telling the driver about mentioned
+ * SoC traits, and can be specified in driver data for each particular supported
+ * device.
+ *
+ * %QUIRK_HAS_WTCLRINT_REG: Watchdog block has WTCLRINT register. It's used to
+ * clear the interrupt once the interrupt service routine is complete. It's
+ * write-only, writing any values to this register clears the interrupt, but
+ * reading is not permitted.
+ *
+ * %QUIRK_HAS_PMU_MASK_RESET: PMU block has the register for disabling/enabling
+ * WDT reset request. On old SoCs it's usually called MASK_WDT_RESET_REQUEST,
+ * new SoCs have CLUSTERx_NONCPU_INT_EN register, which 'mask_bit' value is
+ * inverted compared to the former one.
+ *
+ * %QUIRK_HAS_PMU_RST_STAT: PMU block has RST_STAT (reset status) register,
+ * which contains bits indicating the reason for most recent CPU reset. If
+ * present, driver will use this register to check if previous reboot was due to
+ * watchdog timer reset.
+ *
+ * %QUIRK_HAS_PMU_AUTO_DISABLE: PMU block has AUTOMATIC_WDT_RESET_DISABLE
+ * register. If 'mask_bit' bit is set, PMU will disable WDT reset when
+ * corresponding processor is in reset state.
+ *
+ * %QUIRK_HAS_PMU_CNT_EN: PMU block has some register (e.g. CLUSTERx_NONCPU_OUT)
+ * with "watchdog counter enable" bit. That bit should be set to make watchdog
+ * counter running.
+ */
+#define QUIRK_HAS_WTCLRINT_REG (1 << 0)
+#define QUIRK_HAS_PMU_MASK_RESET (1 << 1)
+#define QUIRK_HAS_PMU_RST_STAT (1 << 2)
+#define QUIRK_HAS_PMU_AUTO_DISABLE (1 << 3)
+#define QUIRK_HAS_PMU_CNT_EN (1 << 4)
/* These quirks require that we have a PMU register map */
-#define QUIRKS_HAVE_PMUREG (QUIRK_HAS_PMU_CONFIG | \
- QUIRK_HAS_RST_STAT)
+#define QUIRKS_HAVE_PMUREG \
+ (QUIRK_HAS_PMU_MASK_RESET | QUIRK_HAS_PMU_RST_STAT | \
+ QUIRK_HAS_PMU_AUTO_DISABLE | QUIRK_HAS_PMU_CNT_EN)
static bool nowayout = WATCHDOG_NOWAYOUT;
static int tmr_margin;
@@ -90,26 +135,33 @@ MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, 0 to
* timer reset functionality.
* @mask_reset_reg: Offset in pmureg for the register that masks the watchdog
* timer reset functionality.
+ * @mask_reset_inv: If set, mask_reset_reg value will have inverted meaning.
* @mask_bit: Bit number for the watchdog timer in the disable register and the
* mask reset register.
* @rst_stat_reg: Offset in pmureg for the register that has the reset status.
* @rst_stat_bit: Bit number in the rst_stat register indicating a watchdog
* reset.
+ * @cnt_en_reg: Offset in pmureg for the register that enables WDT counter.
+ * @cnt_en_bit: Bit number for "watchdog counter enable" in cnt_en register.
* @quirks: A bitfield of quirks.
*/
struct s3c2410_wdt_variant {
int disable_reg;
int mask_reset_reg;
+ bool mask_reset_inv;
int mask_bit;
int rst_stat_reg;
int rst_stat_bit;
+ int cnt_en_reg;
+ int cnt_en_bit;
u32 quirks;
};
struct s3c2410_wdt {
struct device *dev;
- struct clk *clock;
+ struct clk *bus_clk; /* for register interface (PCLK) */
+ struct clk *src_clk; /* for WDT counter */
void __iomem *reg_base;
unsigned int count;
spinlock_t lock;
@@ -136,8 +188,8 @@ static const struct s3c2410_wdt_variant drv_data_exynos5250 = {
.mask_bit = 20,
.rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
.rst_stat_bit = 20,
- .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT \
- | QUIRK_HAS_WTCLRINT_REG,
+ .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET | \
+ QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_AUTO_DISABLE,
};
static const struct s3c2410_wdt_variant drv_data_exynos5420 = {
@@ -146,8 +198,8 @@ static const struct s3c2410_wdt_variant drv_data_exynos5420 = {
.mask_bit = 0,
.rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
.rst_stat_bit = 9,
- .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT \
- | QUIRK_HAS_WTCLRINT_REG,
+ .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET | \
+ QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_AUTO_DISABLE,
};
static const struct s3c2410_wdt_variant drv_data_exynos7 = {
@@ -156,8 +208,32 @@ static const struct s3c2410_wdt_variant drv_data_exynos7 = {
.mask_bit = 23,
.rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
.rst_stat_bit = 23, /* A57 WDTRESET */
- .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT \
- | QUIRK_HAS_WTCLRINT_REG,
+ .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET | \
+ QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_AUTO_DISABLE,
+};
+
+static const struct s3c2410_wdt_variant drv_data_exynos850_cl0 = {
+ .mask_reset_reg = EXYNOS850_CLUSTER0_NONCPU_INT_EN,
+ .mask_bit = 2,
+ .mask_reset_inv = true,
+ .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = EXYNOS850_CLUSTER0_WDTRESET_BIT,
+ .cnt_en_reg = EXYNOS850_CLUSTER0_NONCPU_OUT,
+ .cnt_en_bit = 7,
+ .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET | \
+ QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN,
+};
+
+static const struct s3c2410_wdt_variant drv_data_exynos850_cl1 = {
+ .mask_reset_reg = EXYNOS850_CLUSTER1_NONCPU_INT_EN,
+ .mask_bit = 2,
+ .mask_reset_inv = true,
+ .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = EXYNOS850_CLUSTER1_WDTRESET_BIT,
+ .cnt_en_reg = EXYNOS850_CLUSTER1_NONCPU_OUT,
+ .cnt_en_bit = 7,
+ .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET | \
+ QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN,
};
static const struct of_device_id s3c2410_wdt_match[] = {
@@ -171,6 +247,8 @@ static const struct of_device_id s3c2410_wdt_match[] = {
.data = &drv_data_exynos5420 },
{ .compatible = "samsung,exynos7-wdt",
.data = &drv_data_exynos7 },
+ { .compatible = "samsung,exynos850-wdt",
+ .data = &drv_data_exynos850_cl0 },
{},
};
MODULE_DEVICE_TABLE(of, s3c2410_wdt_match);
@@ -187,9 +265,14 @@ MODULE_DEVICE_TABLE(platform, s3c2410_wdt_ids);
/* functions */
-static inline unsigned int s3c2410wdt_max_timeout(struct clk *clock)
+static inline unsigned long s3c2410wdt_get_freq(struct s3c2410_wdt *wdt)
{
- unsigned long freq = clk_get_rate(clock);
+ return clk_get_rate(wdt->src_clk ? wdt->src_clk : wdt->bus_clk);
+}
+
+static inline unsigned int s3c2410wdt_max_timeout(struct s3c2410_wdt *wdt)
+{
+ const unsigned long freq = s3c2410wdt_get_freq(wdt);
return S3C2410_WTCNT_MAXCNT / (freq / (S3C2410_WTCON_PRESCALE_MAX + 1)
/ S3C2410_WTCON_MAXDIV);
@@ -200,35 +283,74 @@ static inline struct s3c2410_wdt *freq_to_wdt(struct notifier_block *nb)
return container_of(nb, struct s3c2410_wdt, freq_transition);
}
-static int s3c2410wdt_mask_and_disable_reset(struct s3c2410_wdt *wdt, bool mask)
+static int s3c2410wdt_disable_wdt_reset(struct s3c2410_wdt *wdt, bool mask)
{
+ const u32 mask_val = BIT(wdt->drv_data->mask_bit);
+ const u32 val = mask ? mask_val : 0;
int ret;
- u32 mask_val = 1 << wdt->drv_data->mask_bit;
- u32 val = 0;
- /* No need to do anything if no PMU CONFIG needed */
- if (!(wdt->drv_data->quirks & QUIRK_HAS_PMU_CONFIG))
- return 0;
+ ret = regmap_update_bits(wdt->pmureg, wdt->drv_data->disable_reg,
+ mask_val, val);
+ if (ret < 0)
+ dev_err(wdt->dev, "failed to update reg(%d)\n", ret);
- if (mask)
- val = mask_val;
+ return ret;
+}
+
+static int s3c2410wdt_mask_wdt_reset(struct s3c2410_wdt *wdt, bool mask)
+{
+ const u32 mask_val = BIT(wdt->drv_data->mask_bit);
+ const bool val_inv = wdt->drv_data->mask_reset_inv;
+ const u32 val = (mask ^ val_inv) ? mask_val : 0;
+ int ret;
- ret = regmap_update_bits(wdt->pmureg,
- wdt->drv_data->disable_reg,
- mask_val, val);
+ ret = regmap_update_bits(wdt->pmureg, wdt->drv_data->mask_reset_reg,
+ mask_val, val);
if (ret < 0)
- goto error;
+ dev_err(wdt->dev, "failed to update reg(%d)\n", ret);
+
+ return ret;
+}
- ret = regmap_update_bits(wdt->pmureg,
- wdt->drv_data->mask_reset_reg,
- mask_val, val);
- error:
+static int s3c2410wdt_enable_counter(struct s3c2410_wdt *wdt, bool en)
+{
+ const u32 mask_val = BIT(wdt->drv_data->cnt_en_bit);
+ const u32 val = en ? mask_val : 0;
+ int ret;
+
+ ret = regmap_update_bits(wdt->pmureg, wdt->drv_data->cnt_en_reg,
+ mask_val, val);
if (ret < 0)
dev_err(wdt->dev, "failed to update reg(%d)\n", ret);
return ret;
}
+static int s3c2410wdt_enable(struct s3c2410_wdt *wdt, bool en)
+{
+ int ret;
+
+ if (wdt->drv_data->quirks & QUIRK_HAS_PMU_AUTO_DISABLE) {
+ ret = s3c2410wdt_disable_wdt_reset(wdt, !en);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (wdt->drv_data->quirks & QUIRK_HAS_PMU_MASK_RESET) {
+ ret = s3c2410wdt_mask_wdt_reset(wdt, !en);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (wdt->drv_data->quirks & QUIRK_HAS_PMU_CNT_EN) {
+ ret = s3c2410wdt_enable_counter(wdt, en);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static int s3c2410wdt_keepalive(struct watchdog_device *wdd)
{
struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd);
@@ -300,7 +422,7 @@ static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd,
unsigned int timeout)
{
struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd);
- unsigned long freq = clk_get_rate(wdt->clock);
+ unsigned long freq = s3c2410wdt_get_freq(wdt);
unsigned int count;
unsigned int divisor = 1;
unsigned long wtcon;
@@ -482,7 +604,7 @@ static inline unsigned int s3c2410wdt_get_bootstatus(struct s3c2410_wdt *wdt)
unsigned int rst_stat;
int ret;
- if (!(wdt->drv_data->quirks & QUIRK_HAS_RST_STAT))
+ if (!(wdt->drv_data->quirks & QUIRK_HAS_PMU_RST_STAT))
return 0;
ret = regmap_read(wdt->pmureg, wdt->drv_data->rst_stat_reg, &rst_stat);
@@ -498,14 +620,40 @@ static inline const struct s3c2410_wdt_variant *
s3c2410_get_wdt_drv_data(struct platform_device *pdev)
{
const struct s3c2410_wdt_variant *variant;
+ struct device *dev = &pdev->dev;
- variant = of_device_get_match_data(&pdev->dev);
+ variant = of_device_get_match_data(dev);
if (!variant) {
/* Device matched by platform_device_id */
variant = (struct s3c2410_wdt_variant *)
platform_get_device_id(pdev)->driver_data;
}
+#ifdef CONFIG_OF
+ /* Choose Exynos850 driver data w.r.t. cluster index */
+ if (variant == &drv_data_exynos850_cl0) {
+ u32 index;
+ int err;
+
+ err = of_property_read_u32(dev->of_node,
+ "samsung,cluster-index", &index);
+ if (err) {
+ dev_err(dev, "failed to get cluster index\n");
+ return NULL;
+ }
+
+ switch (index) {
+ case 0:
+ return &drv_data_exynos850_cl0;
+ case 1:
+ return &drv_data_exynos850_cl1;
+ default:
+ dev_err(dev, "wrong cluster index: %u\n", index);
+ return NULL;
+ }
+ }
+#endif
+
return variant;
}
@@ -513,9 +661,8 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct s3c2410_wdt *wdt;
- struct resource *wdt_irq;
unsigned int wtcon;
- int started = 0;
+ int wdt_irq;
int ret;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
@@ -527,6 +674,9 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
wdt->wdt_device = s3c2410_wdd;
wdt->drv_data = s3c2410_get_wdt_drv_data(pdev);
+ if (!wdt->drv_data)
+ return -EINVAL;
+
if (wdt->drv_data->quirks & QUIRKS_HAVE_PMUREG) {
wdt->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,syscon-phandle");
@@ -536,40 +686,52 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
}
}
- wdt_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (wdt_irq == NULL) {
- dev_err(dev, "no irq resource specified\n");
- ret = -ENOENT;
- goto err;
- }
+ wdt_irq = platform_get_irq(pdev, 0);
+ if (wdt_irq < 0)
+ return wdt_irq;
/* get the memory region for the watchdog timer */
wdt->reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(wdt->reg_base)) {
- ret = PTR_ERR(wdt->reg_base);
- goto err;
- }
+ if (IS_ERR(wdt->reg_base))
+ return PTR_ERR(wdt->reg_base);
- wdt->clock = devm_clk_get(dev, "watchdog");
- if (IS_ERR(wdt->clock)) {
- dev_err(dev, "failed to find watchdog clock source\n");
- ret = PTR_ERR(wdt->clock);
- goto err;
+ wdt->bus_clk = devm_clk_get(dev, "watchdog");
+ if (IS_ERR(wdt->bus_clk)) {
+ dev_err(dev, "failed to find bus clock\n");
+ return PTR_ERR(wdt->bus_clk);
}
- ret = clk_prepare_enable(wdt->clock);
+ ret = clk_prepare_enable(wdt->bus_clk);
if (ret < 0) {
- dev_err(dev, "failed to enable clock\n");
+ dev_err(dev, "failed to enable bus clock\n");
return ret;
}
+ /*
+ * "watchdog_src" clock is optional; if it's not present -- just skip it
+ * and use "watchdog" clock as both bus and source clock.
+ */
+ wdt->src_clk = devm_clk_get_optional(dev, "watchdog_src");
+ if (IS_ERR(wdt->src_clk)) {
+ dev_err_probe(dev, PTR_ERR(wdt->src_clk),
+ "failed to get source clock\n");
+ ret = PTR_ERR(wdt->src_clk);
+ goto err_bus_clk;
+ }
+
+ ret = clk_prepare_enable(wdt->src_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable source clock\n");
+ goto err_bus_clk;
+ }
+
wdt->wdt_device.min_timeout = 1;
- wdt->wdt_device.max_timeout = s3c2410wdt_max_timeout(wdt->clock);
+ wdt->wdt_device.max_timeout = s3c2410wdt_max_timeout(wdt);
ret = s3c2410wdt_cpufreq_register(wdt);
if (ret < 0) {
dev_err(dev, "failed to register cpufreq\n");
- goto err_clk;
+ goto err_src_clk;
}
watchdog_set_drvdata(&wdt->wdt_device, wdt);
@@ -581,19 +743,19 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
ret = s3c2410wdt_set_heartbeat(&wdt->wdt_device,
wdt->wdt_device.timeout);
if (ret) {
- started = s3c2410wdt_set_heartbeat(&wdt->wdt_device,
- S3C2410_WATCHDOG_DEFAULT_TIME);
-
- if (started == 0)
- dev_info(dev,
- "tmr_margin value out of range, default %d used\n",
+ ret = s3c2410wdt_set_heartbeat(&wdt->wdt_device,
+ S3C2410_WATCHDOG_DEFAULT_TIME);
+ if (ret == 0) {
+ dev_warn(dev, "tmr_margin value out of range, default %d used\n",
S3C2410_WATCHDOG_DEFAULT_TIME);
- else
- dev_info(dev, "default timer value is out of range, cannot start\n");
+ } else {
+ dev_err(dev, "failed to use default timeout\n");
+ goto err_cpufreq;
+ }
}
- ret = devm_request_irq(dev, wdt_irq->start, s3c2410wdt_irq, 0,
- pdev->name, pdev);
+ ret = devm_request_irq(dev, wdt_irq, s3c2410wdt_irq, 0,
+ pdev->name, pdev);
if (ret != 0) {
dev_err(dev, "failed to install irq (%d)\n", ret);
goto err_cpufreq;
@@ -605,25 +767,29 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
wdt->wdt_device.bootstatus = s3c2410wdt_get_bootstatus(wdt);
wdt->wdt_device.parent = dev;
+ /*
+ * If "tmr_atboot" param is non-zero, start the watchdog right now. Also
+ * set WDOG_HW_RUNNING bit, so that watchdog core can kick the watchdog.
+ *
+ * If we're not enabling the watchdog, then ensure it is disabled if it
+ * has been left running from the bootloader or other source.
+ */
+ if (tmr_atboot) {
+ dev_info(dev, "starting watchdog timer\n");
+ s3c2410wdt_start(&wdt->wdt_device);
+ set_bit(WDOG_HW_RUNNING, &wdt->wdt_device.status);
+ } else {
+ s3c2410wdt_stop(&wdt->wdt_device);
+ }
+
ret = watchdog_register_device(&wdt->wdt_device);
if (ret)
goto err_cpufreq;
- ret = s3c2410wdt_mask_and_disable_reset(wdt, false);
+ ret = s3c2410wdt_enable(wdt, true);
if (ret < 0)
goto err_unregister;
- if (tmr_atboot && started == 0) {
- dev_info(dev, "starting watchdog timer\n");
- s3c2410wdt_start(&wdt->wdt_device);
- } else if (!tmr_atboot) {
- /* if we're not enabling the watchdog, then ensure it is
- * disabled if it has been left running from the bootloader
- * or other source */
-
- s3c2410wdt_stop(&wdt->wdt_device);
- }
-
platform_set_drvdata(pdev, wdt);
/* print out a statement of readiness */
@@ -643,10 +809,12 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
err_cpufreq:
s3c2410wdt_cpufreq_deregister(wdt);
- err_clk:
- clk_disable_unprepare(wdt->clock);
+ err_src_clk:
+ clk_disable_unprepare(wdt->src_clk);
+
+ err_bus_clk:
+ clk_disable_unprepare(wdt->bus_clk);
- err:
return ret;
}
@@ -655,7 +823,7 @@ static int s3c2410wdt_remove(struct platform_device *dev)
int ret;
struct s3c2410_wdt *wdt = platform_get_drvdata(dev);
- ret = s3c2410wdt_mask_and_disable_reset(wdt, true);
+ ret = s3c2410wdt_enable(wdt, false);
if (ret < 0)
return ret;
@@ -663,7 +831,8 @@ static int s3c2410wdt_remove(struct platform_device *dev)
s3c2410wdt_cpufreq_deregister(wdt);
- clk_disable_unprepare(wdt->clock);
+ clk_disable_unprepare(wdt->src_clk);
+ clk_disable_unprepare(wdt->bus_clk);
return 0;
}
@@ -672,8 +841,7 @@ static void s3c2410wdt_shutdown(struct platform_device *dev)
{
struct s3c2410_wdt *wdt = platform_get_drvdata(dev);
- s3c2410wdt_mask_and_disable_reset(wdt, true);
-
+ s3c2410wdt_enable(wdt, false);
s3c2410wdt_stop(&wdt->wdt_device);
}
@@ -688,7 +856,7 @@ static int s3c2410wdt_suspend(struct device *dev)
wdt->wtcon_save = readl(wdt->reg_base + S3C2410_WTCON);
wdt->wtdat_save = readl(wdt->reg_base + S3C2410_WTDAT);
- ret = s3c2410wdt_mask_and_disable_reset(wdt, true);
+ ret = s3c2410wdt_enable(wdt, false);
if (ret < 0)
return ret;
@@ -708,7 +876,7 @@ static int s3c2410wdt_resume(struct device *dev)
writel(wdt->wtdat_save, wdt->reg_base + S3C2410_WTCNT);/* Reset count */
writel(wdt->wtcon_save, wdt->reg_base + S3C2410_WTCON);
- ret = s3c2410wdt_mask_and_disable_reset(wdt, false);
+ ret = s3c2410wdt_enable(wdt, true);
if (ret < 0)
return ret;
diff --git a/drivers/watchdog/simatic-ipc-wdt.c b/drivers/watchdog/simatic-ipc-wdt.c
new file mode 100644
index 000000000000..8bac793c63fb
--- /dev/null
+++ b/drivers/watchdog/simatic-ipc-wdt.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Siemens SIMATIC IPC driver for Watchdogs
+ *
+ * Copyright (c) Siemens AG, 2020-2021
+ *
+ * Authors:
+ * Gerd Haeussler <gerd.haeussler.ext@siemens.com>
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_data/x86/simatic-ipc-base.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/util_macros.h>
+#include <linux/watchdog.h>
+
+#define WD_ENABLE_IOADR 0x62
+#define WD_TRIGGER_IOADR 0x66
+#define GPIO_COMMUNITY0_PORT_ID 0xaf
+#define PAD_CFG_DW0_GPP_A_23 0x4b8
+#define SAFE_EN_N_427E 0x01
+#define SAFE_EN_N_227E 0x04
+#define WD_ENABLED 0x01
+#define WD_TRIGGERED 0x80
+#define WD_MACROMODE 0x02
+
+#define TIMEOUT_MIN 2
+#define TIMEOUT_DEF 64
+#define TIMEOUT_MAX 64
+
+#define GP_STATUS_REG_227E 0x404D /* IO PORT for SAFE_EN_N on 227E */
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0000);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static struct resource gp_status_reg_227e_res =
+ DEFINE_RES_IO_NAMED(GP_STATUS_REG_227E, SZ_1, KBUILD_MODNAME);
+
+static struct resource io_resource_enable =
+ DEFINE_RES_IO_NAMED(WD_ENABLE_IOADR, SZ_1,
+ KBUILD_MODNAME " WD_ENABLE_IOADR");
+
+static struct resource io_resource_trigger =
+ DEFINE_RES_IO_NAMED(WD_TRIGGER_IOADR, SZ_1,
+ KBUILD_MODNAME " WD_TRIGGER_IOADR");
+
+/* the actual start will be discovered with pci, 0 is a placeholder */
+static struct resource mem_resource =
+ DEFINE_RES_MEM_NAMED(0, SZ_4, "WD_RESET_BASE_ADR");
+
+static u32 wd_timeout_table[] = {2, 4, 6, 8, 16, 32, 48, 64 };
+static void __iomem *wd_reset_base_addr;
+
+static int wd_start(struct watchdog_device *wdd)
+{
+ outb(inb(WD_ENABLE_IOADR) | WD_ENABLED, WD_ENABLE_IOADR);
+ return 0;
+}
+
+static int wd_stop(struct watchdog_device *wdd)
+{
+ outb(inb(WD_ENABLE_IOADR) & ~WD_ENABLED, WD_ENABLE_IOADR);
+ return 0;
+}
+
+static int wd_ping(struct watchdog_device *wdd)
+{
+ inb(WD_TRIGGER_IOADR);
+ return 0;
+}
+
+static int wd_set_timeout(struct watchdog_device *wdd, unsigned int t)
+{
+ int timeout_idx = find_closest(t, wd_timeout_table,
+ ARRAY_SIZE(wd_timeout_table));
+
+ outb((inb(WD_ENABLE_IOADR) & 0xc7) | timeout_idx << 3, WD_ENABLE_IOADR);
+ wdd->timeout = wd_timeout_table[timeout_idx];
+ return 0;
+}
+
+static const struct watchdog_info wdt_ident = {
+ .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING |
+ WDIOF_SETTIMEOUT,
+ .identity = KBUILD_MODNAME,
+};
+
+static const struct watchdog_ops wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = wd_start,
+ .stop = wd_stop,
+ .ping = wd_ping,
+ .set_timeout = wd_set_timeout,
+};
+
+static void wd_secondary_enable(u32 wdtmode)
+{
+ u16 resetbit;
+
+ /* set safe_en_n so we are not just WDIOF_ALARMONLY */
+ if (wdtmode == SIMATIC_IPC_DEVICE_227E) {
+ /* enable SAFE_EN_N on GP_STATUS_REG_227E */
+ resetbit = inb(GP_STATUS_REG_227E);
+ outb(resetbit & ~SAFE_EN_N_227E, GP_STATUS_REG_227E);
+ } else {
+ /* enable SAFE_EN_N on PCH D1600 */
+ resetbit = ioread16(wd_reset_base_addr);
+ iowrite16(resetbit & ~SAFE_EN_N_427E, wd_reset_base_addr);
+ }
+}
+
+static int wd_setup(u32 wdtmode)
+{
+ unsigned int bootstatus = 0;
+ int timeout_idx;
+
+ timeout_idx = find_closest(TIMEOUT_DEF, wd_timeout_table,
+ ARRAY_SIZE(wd_timeout_table));
+
+ if (inb(WD_ENABLE_IOADR) & WD_TRIGGERED)
+ bootstatus |= WDIOF_CARDRESET;
+
+ /* reset alarm bit, set macro mode, and set timeout */
+ outb(WD_TRIGGERED | WD_MACROMODE | timeout_idx << 3, WD_ENABLE_IOADR);
+
+ wd_secondary_enable(wdtmode);
+
+ return bootstatus;
+}
+
+static struct watchdog_device wdd_data = {
+ .info = &wdt_ident,
+ .ops = &wdt_ops,
+ .min_timeout = TIMEOUT_MIN,
+ .max_timeout = TIMEOUT_MAX
+};
+
+static int simatic_ipc_wdt_probe(struct platform_device *pdev)
+{
+ struct simatic_ipc_platform *plat = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+
+ switch (plat->devmode) {
+ case SIMATIC_IPC_DEVICE_227E:
+ if (!devm_request_region(dev, gp_status_reg_227e_res.start,
+ resource_size(&gp_status_reg_227e_res),
+ KBUILD_MODNAME)) {
+ dev_err(dev,
+ "Unable to register IO resource at %pR\n",
+ &gp_status_reg_227e_res);
+ return -EBUSY;
+ }
+ fallthrough;
+ case SIMATIC_IPC_DEVICE_427E:
+ wdd_data.parent = dev;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!devm_request_region(dev, io_resource_enable.start,
+ resource_size(&io_resource_enable),
+ io_resource_enable.name)) {
+ dev_err(dev,
+ "Unable to register IO resource at %#x\n",
+ WD_ENABLE_IOADR);
+ return -EBUSY;
+ }
+
+ if (!devm_request_region(dev, io_resource_trigger.start,
+ resource_size(&io_resource_trigger),
+ io_resource_trigger.name)) {
+ dev_err(dev,
+ "Unable to register IO resource at %#x\n",
+ WD_TRIGGER_IOADR);
+ return -EBUSY;
+ }
+
+ if (plat->devmode == SIMATIC_IPC_DEVICE_427E) {
+ res = &mem_resource;
+
+ /* get GPIO base from PCI */
+ res->start = simatic_ipc_get_membase0(PCI_DEVFN(0x1f, 1));
+ if (res->start == 0)
+ return -ENODEV;
+
+ /* do the final address calculation */
+ res->start = res->start + (GPIO_COMMUNITY0_PORT_ID << 16) +
+ PAD_CFG_DW0_GPP_A_23;
+ res->end += res->start;
+
+ wd_reset_base_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(wd_reset_base_addr))
+ return PTR_ERR(wd_reset_base_addr);
+ }
+
+ wdd_data.bootstatus = wd_setup(plat->devmode);
+ if (wdd_data.bootstatus)
+ dev_warn(dev, "last reboot caused by watchdog reset\n");
+
+ watchdog_set_nowayout(&wdd_data, nowayout);
+ watchdog_stop_on_reboot(&wdd_data);
+ return devm_watchdog_register_device(dev, &wdd_data);
+}
+
+static struct platform_driver simatic_ipc_wdt_driver = {
+ .probe = simatic_ipc_wdt_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+};
+
+module_platform_driver(simatic_ipc_wdt_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_AUTHOR("Gerd Haeussler <gerd.haeussler.ext@siemens.com>");
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 33e941e40082..120d32f164ac 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -327,7 +327,7 @@ config XEN_FRONT_PGDIR_SHBUF
config XEN_UNPOPULATED_ALLOC
bool "Use unpopulated memory ranges for guest mappings"
- depends on X86 && ZONE_DEVICE
+ depends on ZONE_DEVICE
default XEN_BACKEND || XEN_GNTDEV || XEN_DOM0
help
Use unpopulated memory ranges in order to create mappings for guest
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index ba2ea11e0d3d..a2c4fc49c483 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -581,7 +581,6 @@ void balloon_set_new_target(unsigned long target)
}
EXPORT_SYMBOL_GPL(balloon_set_new_target);
-#ifndef CONFIG_XEN_UNPOPULATED_ALLOC
static int add_ballooned_pages(unsigned int nr_pages)
{
enum bp_state st;
@@ -610,12 +609,12 @@ static int add_ballooned_pages(unsigned int nr_pages)
}
/**
- * xen_alloc_unpopulated_pages - get pages that have been ballooned out
+ * xen_alloc_ballooned_pages - get pages that have been ballooned out
* @nr_pages: Number of pages to get
* @pages: pages returned
* @return 0 on success, error otherwise
*/
-int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
+int xen_alloc_ballooned_pages(unsigned int nr_pages, struct page **pages)
{
unsigned int pgno = 0;
struct page *page;
@@ -652,23 +651,23 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
return 0;
out_undo:
mutex_unlock(&balloon_mutex);
- xen_free_unpopulated_pages(pgno, pages);
+ xen_free_ballooned_pages(pgno, pages);
/*
- * NB: free_xenballooned_pages will only subtract pgno pages, but since
+ * NB: xen_free_ballooned_pages will only subtract pgno pages, but since
* target_unpopulated is incremented with nr_pages at the start we need
* to remove the remaining ones also, or accounting will be screwed.
*/
balloon_stats.target_unpopulated -= nr_pages - pgno;
return ret;
}
-EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
+EXPORT_SYMBOL(xen_alloc_ballooned_pages);
/**
- * xen_free_unpopulated_pages - return pages retrieved with get_ballooned_pages
+ * xen_free_ballooned_pages - return pages retrieved with get_ballooned_pages
* @nr_pages: Number of pages
* @pages: pages to return
*/
-void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
+void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages)
{
unsigned int i;
@@ -687,9 +686,9 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
mutex_unlock(&balloon_mutex);
}
-EXPORT_SYMBOL(xen_free_unpopulated_pages);
+EXPORT_SYMBOL(xen_free_ballooned_pages);
-#if defined(CONFIG_XEN_PV)
+#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
static void __init balloon_add_region(unsigned long start_pfn,
unsigned long pages)
{
@@ -712,7 +711,6 @@ static void __init balloon_add_region(unsigned long start_pfn,
balloon_stats.total_pages += extra_pfn_end - start_pfn;
}
#endif
-#endif
static int __init balloon_init(void)
{
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index fec1b6537166..59ffea800079 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -250,13 +250,13 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
if (!refcount_dec_and_test(&map->users))
return;
+ if (map->pages && !use_ptemod)
+ unmap_grant_pages(map, 0, map->count);
+
if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(map->notify.event);
evtchn_put(map->notify.event);
}
-
- if (map->pages && !use_ptemod)
- unmap_grant_pages(map, 0, map->count);
gntdev_free_map(map);
}
diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c
index 87e6b7db892f..a8b41057c382 100644
--- a/drivers/xen/unpopulated-alloc.c
+++ b/drivers/xen/unpopulated-alloc.c
@@ -8,6 +8,7 @@
#include <asm/page.h>
+#include <xen/balloon.h>
#include <xen/page.h>
#include <xen/xen.h>
@@ -15,13 +16,29 @@ static DEFINE_MUTEX(list_lock);
static struct page *page_list;
static unsigned int list_count;
+static struct resource *target_resource;
+
+/*
+ * If arch is not happy with system "iomem_resource" being used for
+ * the region allocation it can provide it's own view by creating specific
+ * Xen resource with unused regions of guest physical address space provided
+ * by the hypervisor.
+ */
+int __weak __init arch_xen_unpopulated_init(struct resource **res)
+{
+ *res = &iomem_resource;
+
+ return 0;
+}
+
static int fill_list(unsigned int nr_pages)
{
struct dev_pagemap *pgmap;
- struct resource *res;
+ struct resource *res, *tmp_res = NULL;
void *vaddr;
unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
- int ret = -ENOMEM;
+ struct range mhp_range;
+ int ret;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
@@ -30,14 +47,40 @@ static int fill_list(unsigned int nr_pages)
res->name = "Xen scratch";
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
- ret = allocate_resource(&iomem_resource, res,
- alloc_pages * PAGE_SIZE, 0, -1,
+ mhp_range = mhp_get_pluggable_range(true);
+
+ ret = allocate_resource(target_resource, res,
+ alloc_pages * PAGE_SIZE, mhp_range.start, mhp_range.end,
PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
if (ret < 0) {
pr_err("Cannot allocate new IOMEM resource\n");
goto err_resource;
}
+ /*
+ * Reserve the region previously allocated from Xen resource to avoid
+ * re-using it by someone else.
+ */
+ if (target_resource != &iomem_resource) {
+ tmp_res = kzalloc(sizeof(*tmp_res), GFP_KERNEL);
+ if (!tmp_res) {
+ ret = -ENOMEM;
+ goto err_insert;
+ }
+
+ tmp_res->name = res->name;
+ tmp_res->start = res->start;
+ tmp_res->end = res->end;
+ tmp_res->flags = res->flags;
+
+ ret = request_resource(&iomem_resource, tmp_res);
+ if (ret < 0) {
+ pr_err("Cannot request resource %pR (%d)\n", tmp_res, ret);
+ kfree(tmp_res);
+ goto err_insert;
+ }
+ }
+
pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
if (!pgmap) {
ret = -ENOMEM;
@@ -85,7 +128,6 @@ static int fill_list(unsigned int nr_pages)
for (i = 0; i < alloc_pages; i++) {
struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
- BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
pg->zone_device_data = page_list;
page_list = pg;
list_count++;
@@ -96,6 +138,11 @@ static int fill_list(unsigned int nr_pages)
err_memremap:
kfree(pgmap);
err_pgmap:
+ if (tmp_res) {
+ release_resource(tmp_res);
+ kfree(tmp_res);
+ }
+err_insert:
release_resource(res);
err_resource:
kfree(res);
@@ -113,6 +160,14 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
unsigned int i;
int ret = 0;
+ /*
+ * Fallback to default behavior if we do not have any suitable resource
+ * to allocate required region from and as the result we won't be able to
+ * construct pages.
+ */
+ if (!target_resource)
+ return xen_alloc_ballooned_pages(nr_pages, pages);
+
mutex_lock(&list_lock);
if (list_count < nr_pages) {
ret = fill_list(nr_pages - list_count);
@@ -160,6 +215,11 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
{
unsigned int i;
+ if (!target_resource) {
+ xen_free_ballooned_pages(nr_pages, pages);
+ return;
+ }
+
mutex_lock(&list_lock);
for (i = 0; i < nr_pages; i++) {
pages[i]->zone_device_data = page_list;
@@ -202,3 +262,20 @@ static int __init init(void)
}
subsys_initcall(init);
#endif
+
+static int __init unpopulated_init(void)
+{
+ int ret;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ ret = arch_xen_unpopulated_init(&target_resource);
+ if (ret) {
+ pr_err("xen:unpopulated: Cannot initialize target resource\n");
+ target_resource = NULL;
+ }
+
+ return ret;
+}
+early_initcall(unpopulated_init);
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c
index 1c9ae08225d8..f916bf60b312 100644
--- a/drivers/zorro/proc.c
+++ b/drivers/zorro/proc.c
@@ -30,7 +30,7 @@ proc_bus_zorro_lseek(struct file *file, loff_t off, int whence)
static ssize_t
proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
- struct zorro_dev *z = PDE_DATA(file_inode(file));
+ struct zorro_dev *z = pde_data(file_inode(file));
struct ConfigDev cd;
loff_t pos = *ppos;